diff --git a/.bundle/config b/.bundle/config new file mode 100644 index 0000000..2369228 --- /dev/null +++ b/.bundle/config @@ -0,0 +1,2 @@ +--- +BUNDLE_PATH: "vendor/bundle" diff --git a/Gemfile.lock b/Gemfile.lock index 78ab10e..c179281 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,23 +1,25 @@ GEM remote: https://rubygems.org/ specs: - CFPropertyList (3.0.2) - activesupport (5.2.4.4) + CFPropertyList (3.0.5) + rexml + activesupport (6.1.4.6) concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) addressable (2.8.0) public_suffix (>= 2.0.2, < 5.0) algoliasearch (1.27.5) httpclient (~> 2.8, >= 2.8.3) json (>= 1.5.1) atomos (0.1.3) - claide (1.0.3) - cocoapods (1.10.0) - addressable (~> 2.6) + claide (1.1.0) + cocoapods (1.11.2) + addressable (~> 2.8) claide (>= 1.0.2, < 2.0) - cocoapods-core (= 1.10.0) + cocoapods-core (= 1.11.2) cocoapods-deintegrate (>= 1.0.3, < 2.0) cocoapods-downloader (>= 1.4.0, < 2.0) cocoapods-plugins (>= 1.0.0, < 2.0) @@ -28,60 +30,62 @@ GEM escape (~> 0.0.4) fourflusher (>= 2.3.0, < 3.0) gh_inspector (~> 1.0) - molinillo (~> 0.6.6) + molinillo (~> 0.8.0) nap (~> 1.0) - ruby-macho (~> 1.4) - xcodeproj (>= 1.19.0, < 2.0) - cocoapods-core (1.10.0) - activesupport (> 5.0, < 6) - addressable (~> 2.6) + ruby-macho (>= 1.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + cocoapods-core (1.11.2) + activesupport (>= 5.0, < 7) + addressable (~> 2.8) algoliasearch (~> 1.0) concurrent-ruby (~> 1.1) fuzzy_match (~> 2.0.4) nap (~> 1.0) netrc (~> 0.11) - public_suffix + public_suffix (~> 4.0) typhoeus (~> 1.0) - cocoapods-deintegrate (1.0.4) - cocoapods-downloader (1.4.0) + cocoapods-deintegrate (1.0.5) + cocoapods-downloader (1.5.1) cocoapods-plugins (1.0.0) nap - cocoapods-search (1.0.0) - cocoapods-trunk (1.5.0) + cocoapods-search (1.0.1) + cocoapods-trunk (1.6.0) nap (>= 0.8, < 2.0) netrc (~> 0.11) cocoapods-try (1.2.0) colored2 (3.1.2) - concurrent-ruby (1.1.7) + concurrent-ruby (1.1.9) escape (0.0.4) - ethon (0.12.0) - ffi (>= 1.3.0) - ffi (1.13.1) + ethon (0.15.0) + ffi (>= 1.15.0) + ffi (1.15.5) fourflusher (2.3.1) fuzzy_match (2.0.4) gh_inspector (1.1.3) httpclient (2.8.3) - i18n (1.8.5) + i18n (1.10.0) concurrent-ruby (~> 1.0) - json (2.3.1) - minitest (5.14.2) - molinillo (0.6.6) + json (2.6.1) + minitest (5.15.0) + molinillo (0.8.0) nanaimo (0.3.0) nap (1.1.0) netrc (0.11.0) public_suffix (4.0.6) - ruby-macho (1.4.0) - thread_safe (0.3.6) + rexml (3.2.5) + ruby-macho (2.5.1) typhoeus (1.4.0) ethon (>= 0.9.0) - tzinfo (1.2.8) - thread_safe (~> 0.1) - xcodeproj (1.19.0) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + xcodeproj (1.21.0) CFPropertyList (>= 2.3.3, < 4.0) atomos (~> 0.1.3) claide (>= 1.0.2, < 2.0) colored2 (~> 3.1) nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + zeitwerk (2.5.4) PLATFORMS ruby @@ -90,4 +94,4 @@ DEPENDENCIES cocoapods BUNDLED WITH - 2.1.4 + 2.3.6 diff --git a/Zephyr.podspec b/Zephyr.podspec index 9fc6087..9ebd3bd 100755 --- a/Zephyr.podspec +++ b/Zephyr.podspec @@ -1,6 +1,6 @@ Pod::Spec.new do |s| # Version - s.version = "3.6.2" + s.version = "3.6.3" s.swift_version = "5.3" # Meta diff --git a/vendor/bundle/ruby/2.6.0/bin/fuzzy_match b/vendor/bundle/ruby/2.6.0/bin/fuzzy_match new file mode 100755 index 0000000..c340ac1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/bin/fuzzy_match @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'fuzzy_match' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('fuzzy_match', 'fuzzy_match', version) +else +gem "fuzzy_match", version +load Gem.bin_path("fuzzy_match", "fuzzy_match", version) +end diff --git a/vendor/bundle/ruby/2.6.0/bin/httpclient b/vendor/bundle/ruby/2.6.0/bin/httpclient new file mode 100755 index 0000000..a37f4dd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/bin/httpclient @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'httpclient' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('httpclient', 'httpclient', version) +else +gem "httpclient", version +load Gem.bin_path("httpclient", "httpclient", version) +end diff --git a/vendor/bundle/ruby/2.6.0/cache/CFPropertyList-3.0.2.gem b/vendor/bundle/ruby/2.6.0/cache/CFPropertyList-3.0.2.gem new file mode 100644 index 0000000..01cb910 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/CFPropertyList-3.0.2.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/activesupport-5.2.4.4.gem b/vendor/bundle/ruby/2.6.0/cache/activesupport-5.2.4.4.gem new file mode 100644 index 0000000..3df1726 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/activesupport-5.2.4.4.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/addressable-2.8.0.gem b/vendor/bundle/ruby/2.6.0/cache/addressable-2.8.0.gem new file mode 100644 index 0000000..1e41e1c Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/addressable-2.8.0.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/algoliasearch-1.27.5.gem b/vendor/bundle/ruby/2.6.0/cache/algoliasearch-1.27.5.gem new file mode 100644 index 0000000..4e068e8 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/algoliasearch-1.27.5.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/atomos-0.1.3.gem b/vendor/bundle/ruby/2.6.0/cache/atomos-0.1.3.gem new file mode 100644 index 0000000..c12e2f0 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/atomos-0.1.3.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/claide-1.0.3.gem b/vendor/bundle/ruby/2.6.0/cache/claide-1.0.3.gem new file mode 100644 index 0000000..16aeff7 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/claide-1.0.3.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/concurrent-ruby-1.1.7.gem b/vendor/bundle/ruby/2.6.0/cache/concurrent-ruby-1.1.7.gem new file mode 100644 index 0000000..ae9b370 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/concurrent-ruby-1.1.7.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/ffi-1.13.1.gem b/vendor/bundle/ruby/2.6.0/cache/ffi-1.13.1.gem new file mode 100644 index 0000000..e59187e Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/ffi-1.13.1.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/fuzzy_match-2.0.4.gem b/vendor/bundle/ruby/2.6.0/cache/fuzzy_match-2.0.4.gem new file mode 100644 index 0000000..bb9e714 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/fuzzy_match-2.0.4.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/httpclient-2.8.3.gem b/vendor/bundle/ruby/2.6.0/cache/httpclient-2.8.3.gem new file mode 100644 index 0000000..9c19ad4 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/httpclient-2.8.3.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/i18n-1.8.5.gem b/vendor/bundle/ruby/2.6.0/cache/i18n-1.8.5.gem new file mode 100644 index 0000000..f5c89ea Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/i18n-1.8.5.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/json-2.3.1.gem b/vendor/bundle/ruby/2.6.0/cache/json-2.3.1.gem new file mode 100644 index 0000000..1f89633 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/json-2.3.1.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/json-2.6.1.gem b/vendor/bundle/ruby/2.6.0/cache/json-2.6.1.gem new file mode 100644 index 0000000..0e3430c Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/json-2.6.1.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/minitest-5.14.2.gem b/vendor/bundle/ruby/2.6.0/cache/minitest-5.14.2.gem new file mode 100644 index 0000000..66232f7 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/minitest-5.14.2.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/nap-1.1.0.gem b/vendor/bundle/ruby/2.6.0/cache/nap-1.1.0.gem new file mode 100644 index 0000000..1f6dd74 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/nap-1.1.0.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/netrc-0.11.0.gem b/vendor/bundle/ruby/2.6.0/cache/netrc-0.11.0.gem new file mode 100644 index 0000000..78226f3 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/netrc-0.11.0.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/public_suffix-4.0.6.gem b/vendor/bundle/ruby/2.6.0/cache/public_suffix-4.0.6.gem new file mode 100644 index 0000000..6f183f4 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/public_suffix-4.0.6.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/thread_safe-0.3.6.gem b/vendor/bundle/ruby/2.6.0/cache/thread_safe-0.3.6.gem new file mode 100644 index 0000000..7ee950f Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/thread_safe-0.3.6.gem differ diff --git a/vendor/bundle/ruby/2.6.0/cache/tzinfo-1.2.8.gem b/vendor/bundle/ruby/2.6.0/cache/tzinfo-1.2.8.gem new file mode 100644 index 0000000..f24bf8e Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/cache/tzinfo-1.2.8.gem differ diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make 2.out b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make 2.out new file mode 100644 index 0000000..73913df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make 2.out @@ -0,0 +1,81 @@ +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/bin/ruby -I /System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0 -r ./siteconf20211106-25171-18c367c.rb extconf.rb +checking for ffi_prep_closure_loc() in -lffi... no +checking for ffi_prep_closure_loc() in -llibffi... no +checking for ffi_prep_closure_loc() in -llibffi-8... no +creating extconf.h +creating Makefile + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" clean + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" +Configuring libffi +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure: line 18632: readelf: command not found +clang: error: unsupported option '-print-multi-os-directory' +clang: error: no input files +cd "/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21" && /Applications/Xcode.app/Contents/Developer/usr/bin/make +/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive +Making all in include +make[3]: Nothing to be done for `all'. +Making all in testsuite +make[3]: Nothing to be done for `all'. +Making all in man +make[3]: Nothing to be done for `all'. +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/prep_cif.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c -fno-common -DPIC -o src/.libs/prep_cif.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/types.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c -fno-common -DPIC -o src/.libs/types.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/raw_api.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c -fno-common -DPIC -o src/.libs/raw_api.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/java_raw_api.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c -fno-common -DPIC -o src/.libs/java_raw_api.o +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:328:46: warning: 'ffi_java_raw_size' is deprecated [-Wdeprecated-declarations] + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ^ +include/ffi.h:299:56: note: 'ffi_java_raw_size' has been explicitly marked deprecated here +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + ^ +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:331:3: warning: 'ffi_java_ptrarray_to_raw' is deprecated [-Wdeprecated-declarations] + ffi_java_ptrarray_to_raw (cif, avalue, raw); + ^ +include/ffi.h:295:93: note: 'ffi_java_ptrarray_to_raw' has been explicitly marked deprecated here +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); + ^ +2 warnings generated. +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/closures.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c -fno-common -DPIC -o src/.libs/closures.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/arm/ffi.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c -fno-common -DPIC -o src/arm/.libs/ffi.o +/bin/sh ./libtool --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c -o src/arm/sysv.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S -fno-common -DPIC -o src/arm/.libs/sysv.o +/bin/sh ./libtool --tag=CC --mode=link xcrun clang -Wall -fexceptions -L/usr/local/opt/libffi/lib -o libffi_convenience.la src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: ar cru .libs/libffi_convenience.a src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o +libtool: link: ranlib .libs/libffi_convenience.a +libtool: link: ( cd ".libs" && rm -f "libffi_convenience.la" && ln -s "../libffi_convenience.la" "libffi_convenience.la" ) +/bin/sh ./libtool --tag=CC --mode=link xcrun clang -Wall -fexceptions -no-undefined -version-info `grep -v '^#' /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version` '-L/usr/local/opt/libffi/lib' -L/usr/local/opt/libffi/lib -o libffi.la -rpath /usr/local/lib src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: xcrun clang -dynamiclib -o .libs/libffi.8.dylib src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o -L/usr/local/opt/libffi/lib -install_name /usr/local/lib/libffi.8.dylib -compatibility_version 10 -current_version 10.0 -Wl,-single_module +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +Undefined symbols for architecture arm64: + "_ffi_call", referenced from: + _ffi_raw_call in raw_api.o + _ffi_java_raw_call in java_raw_api.o + "_ffi_closure_trampoline_table_page", referenced from: + _ffi_trampoline_table_alloc in closures.o + "_ffi_prep_cif_machdep", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_cif_machdep_var", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_closure_loc", referenced from: + _ffi_prep_closure in prep_cif.o + _ffi_prep_raw_closure_loc in raw_api.o + _ffi_prep_java_raw_closure_loc in java_raw_api.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +make[3]: *** [libffi.la] Error 1 +make[2]: *** [all-recursive] Error 1 +make[1]: *** [all] Error 2 +make: *** ["/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21"/.libs/libffi_convenience.a] Error 2 + +make failed, exit code 2 diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make.out b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make.out new file mode 100644 index 0000000..2fbb4e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/gem_make.out @@ -0,0 +1,81 @@ +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/bin/ruby -I /System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0 -r ./siteconf20211106-32129-13eyyi4.rb extconf.rb +checking for ffi_prep_closure_loc() in -lffi... no +checking for ffi_prep_closure_loc() in -llibffi... no +checking for ffi_prep_closure_loc() in -llibffi-8... no +creating extconf.h +creating Makefile + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" clean + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" +Configuring libffi +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure: line 18632: readelf: command not found +clang: error: unsupported option '-print-multi-os-directory' +clang: error: no input files +cd "/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21" && /Applications/Xcode.app/Contents/Developer/usr/bin/make +/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive +Making all in include +make[3]: Nothing to be done for `all'. +Making all in testsuite +make[3]: Nothing to be done for `all'. +Making all in man +make[3]: Nothing to be done for `all'. +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/prep_cif.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c -fno-common -DPIC -o src/.libs/prep_cif.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/types.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c -fno-common -DPIC -o src/.libs/types.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/raw_api.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c -fno-common -DPIC -o src/.libs/raw_api.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/java_raw_api.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c -fno-common -DPIC -o src/.libs/java_raw_api.o +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:328:46: warning: 'ffi_java_raw_size' is deprecated [-Wdeprecated-declarations] + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ^ +include/ffi.h:299:56: note: 'ffi_java_raw_size' has been explicitly marked deprecated here +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + ^ +/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:331:3: warning: 'ffi_java_ptrarray_to_raw' is deprecated [-Wdeprecated-declarations] + ffi_java_ptrarray_to_raw (cif, avalue, raw); + ^ +include/ffi.h:295:93: note: 'ffi_java_ptrarray_to_raw' has been explicitly marked deprecated here +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); + ^ +2 warnings generated. +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/closures.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c -fno-common -DPIC -o src/.libs/closures.o +/bin/sh ./libtool --tag=CC --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/arm/ffi.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c -fno-common -DPIC -o src/arm/.libs/ffi.o +/bin/sh ./libtool --mode=compile xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c -o src/arm/sysv.lo /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S +libtool: compile: xcrun clang -DHAVE_CONFIG_H -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S -fno-common -DPIC -o src/arm/.libs/sysv.o +/bin/sh ./libtool --tag=CC --mode=link xcrun clang -Wall -fexceptions -L/usr/local/opt/libffi/lib -o libffi_convenience.la src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: ar cru .libs/libffi_convenience.a src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o +libtool: link: ranlib .libs/libffi_convenience.a +libtool: link: ( cd ".libs" && rm -f "libffi_convenience.la" && ln -s "../libffi_convenience.la" "libffi_convenience.la" ) +/bin/sh ./libtool --tag=CC --mode=link xcrun clang -Wall -fexceptions -no-undefined -version-info `grep -v '^#' /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version` '-L/usr/local/opt/libffi/lib' -L/usr/local/opt/libffi/lib -o libffi.la -rpath /usr/local/lib src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: xcrun clang -dynamiclib -o .libs/libffi.8.dylib src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o -L/usr/local/opt/libffi/lib -install_name /usr/local/lib/libffi.8.dylib -compatibility_version 10 -current_version 10.0 -Wl,-single_module +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +Undefined symbols for architecture arm64: + "_ffi_call", referenced from: + _ffi_raw_call in raw_api.o + _ffi_java_raw_call in java_raw_api.o + "_ffi_closure_trampoline_table_page", referenced from: + _ffi_trampoline_table_alloc in closures.o + "_ffi_prep_cif_machdep", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_cif_machdep_var", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_closure_loc", referenced from: + _ffi_prep_closure in prep_cif.o + _ffi_prep_raw_closure_loc in raw_api.o + _ffi_prep_java_raw_closure_loc in java_raw_api.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +make[3]: *** [libffi.la] Error 1 +make[2]: *** [all-recursive] Error 1 +make[1]: *** [all] Error 2 +make: *** ["/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21"/.libs/libffi_convenience.a] Error 2 + +make failed, exit code 2 diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/mkmf.log b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/mkmf.log new file mode 100644 index 0000000..1ede79c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/ffi-1.13.1/mkmf.log @@ -0,0 +1,212 @@ +"pkg-config --exists libffi" +| pkg-config --libs libffi +=> "-lffi\n" +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lruby.2.6 " +ld: warning: directory not found for option '-L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib' +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return 0; +6: } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lruby.2.6 -lffi " +ld: warning: directory not found for option '-L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib' +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return 0; +6: } +/* end */ + +| pkg-config --cflags-only-I libffi +=> "-I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi\n" +| pkg-config --cflags-only-other libffi +=> "\n" +| pkg-config --libs-only-l libffi +=> "-lffi\n" +package configuration for libffi +cflags: +ldflags: +libs: -lffi + +have_library: checking for ffi_prep_closure_loc() in -lffi... -------------------- no + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi... -------------------- no + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi-8... -------------------- no + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lffi -lruby.2.6 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +extconf.h is: +/* begin */ +1: #ifndef EXTCONF_H +2: #define EXTCONF_H +3: #define HAVE_FFI_PREP_CIF_VAR 1 +4: #define USE_INTERNAL_LIBFFI 1 +5: #endif +/* end */ + diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem.build_complete b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem.build_complete new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make 2.out b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make 2.out new file mode 100644 index 0000000..6b7d344 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make 2.out @@ -0,0 +1,21 @@ +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator +/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/bin/ruby -I /System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0 -r ./siteconf20211106-75622-17rtlo1.rb extconf.rb +creating Makefile + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator +make "DESTDIR=" clean + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator +make "DESTDIR=" +compiling generator.c +In file included from generator.c:1: +In file included from ./../fbuffer/fbuffer.h:5: +In file included from /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby.h:33: +/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/ruby.h:24:10: fatal error: 'ruby/config.h' file not found +#include "ruby/config.h" + ^~~~~~~~~~~~~~~ +/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/ruby.h:24:10: note: did not find header 'config.h' in framework 'ruby' (loaded from '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks') +1 error generated. +make: *** [generator.o] Error 1 + +make failed, exit code 2 diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make.out b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make.out new file mode 100644 index 0000000..4289192 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/gem_make.out @@ -0,0 +1,13 @@ +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json +/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/bin/ruby -I /System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0 -r ./siteconf20211106-25171-koumu6.rb extconf.rb +creating Makefile + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json +make "DESTDIR=" clean + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json +make "DESTDIR=" +make: Nothing to be done for `all'. + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json +make "DESTDIR=" install diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/generator.bundle b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/generator.bundle new file mode 100755 index 0000000..09c8b98 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/generator.bundle differ diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/parser.bundle b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/parser.bundle new file mode 100755 index 0000000..2cda116 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/json/ext/parser.bundle differ diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/mkmf.log b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/mkmf.log new file mode 100644 index 0000000..537ffb2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.3.1/mkmf.log @@ -0,0 +1,64 @@ +have_func: checking for rb_enc_raise() in ruby.h... -------------------- yes + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lruby.2.6 " +ld: warning: directory not found for option '-L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib' +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return 0; +6: } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lruby.2.6 " +conftest.c:16:57: error: use of undeclared identifier 'rb_enc_raise' +int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } + ^ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } +/* end */ + +"xcrun clang -o conftest -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/backward -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 -I. -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC conftest.c -L. -L/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib -lruby.2.6 " +ld: warning: directory not found for option '-L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib' +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return 0; +15: } +16: extern void rb_enc_raise(); +17: int t(void) { rb_enc_raise(); return 0; } +/* end */ + +-------------------- + diff --git a/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.6.1/gem_make.out b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.6.1/gem_make.out new file mode 100644 index 0000000..d577dc8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/extensions/universal-darwin-21/2.6.0/json-2.6.1/gem_make.out @@ -0,0 +1,21 @@ +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator +/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/bin/ruby -I /System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0 -r ./siteconf20211106-75777-md22l0.rb extconf.rb +creating Makefile + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator +make "DESTDIR=" clean + +current directory: /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator +make "DESTDIR=" +compiling generator.c +In file included from generator.c:1: +In file included from ./../fbuffer/fbuffer.h:5: +In file included from /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby.h:33: +/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/ruby.h:24:10: fatal error: 'ruby/config.h' file not found +#include "ruby/config.h" + ^~~~~~~~~~~~~~~ +/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/ruby/ruby.h:24:10: note: did not find header 'config.h' in framework 'ruby' (loaded from '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks') +1 error generated. +make: *** [generator.o] Error 1 + +make failed, exit code 2 diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/LICENSE b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/LICENSE new file mode 100644 index 0000000..ba6ffb2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 Christian Kruse, + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.md b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.md new file mode 100644 index 0000000..49f0c07 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.md @@ -0,0 +1,79 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +# Caution! + +In version 3.0.0 we dropped Ruby 1.8 compatibility. If you are using +Ruby 1.8 consider to update Ruby; if you can't upgrade, don't upgrade +CFPropertyList. + +# Installation + +You could either use ruby gems and install it via + +```bash +gem install CFPropertyList +``` + +or you could clone this repository and place it somewhere in your load path. + +Example: +```ruby +require 'cfpropertylist' +``` + +If you're using Rails, you can add it into your Gemfile + +```ruby +gem 'CFPropertyList' +``` + +# Usage + + ## create a arbitrary data structure of basic data types + +```ruby +data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } +} +``` + +## create CFPropertyList::List object + +```ruby +plist = CFPropertyList::List.new +``` + +## call CFPropertyList.guess() to create corresponding CFType values + +```ruby +plist.value = CFPropertyList.guess(data) +``` + +## write plist to file +```ruby +plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +``` + +## … later, read it again +```ruby +plist = CFPropertyList::List.new(:file => "example.plist") +data = CFPropertyList.native_types(plist.value) +``` + +# Author and license + +**Author:** Christian Kruse (mailto:cjk@wwwtech.de) + +**Copyright:** Copyright (c) 2010 + +**License:** MIT License + diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.rdoc b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.rdoc new file mode 100644 index 0000000..a71005c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/README.rdoc @@ -0,0 +1,43 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +== Installation + +You could either use ruby gems and install it via + + gem install CFPropertyList + +or you could clone this repository and place it somewhere in your load path. + +== Example + require 'cfpropertylist' + + # create a arbitrary data structure of basic data types + data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } + } + + # create CFPropertyList::List object + plist = CFPropertyList::List.new + + # call CFPropertyList.guess() to create corresponding CFType values + plist.value = CFPropertyList.guess(data) + + # write plist to file + plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) + + # … later, read it again + plist = CFPropertyList::List.new(:file => "example.plist") + data = CFPropertyList.native_types(plist.value) + +Author:: Christian Kruse (mailto:cjk@wwwtech.de) +Copyright:: Copyright (c) 2010 +License:: MIT License diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/THANKS b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/THANKS new file mode 100644 index 0000000..c981a51 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/THANKS @@ -0,0 +1,7 @@ +Special thanks to: + +Steve Madsen for providing a lot of performance patches and bugfixes! +Have a look at his Github account: + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb new file mode 100644 index 0000000..f83fef7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +require 'cfpropertylist/rbCFPropertyList' + + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb new file mode 100644 index 0000000..da84b63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb @@ -0,0 +1,594 @@ +# -*- coding: utf-8 -*- + +require 'stringio' + +module CFPropertyList + # Binary PList parser class + class Binary + # Read a binary plist file + def load(opts) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + @object_ref_size = 0 + + @offsets = [] + + fd = nil + if(opts.has_key?(:file)) + fd = File.open(opts[:file],"rb") + file = opts[:file] + else + fd = StringIO.new(opts[:data],"rb") + file = "" + end + + # first, we read the trailer: 32 byte from the end + fd.seek(-32,IO::SEEK_END) + buff = fd.read(32) + + offset_size, object_ref_size, number_of_objects, top_object, table_offset = buff.unpack "x6CCx4Nx4Nx4N" + + # after that, get the offset table + fd.seek(table_offset, IO::SEEK_SET) + coded_offset_table = fd.read(number_of_objects * offset_size) + raise CFFormatError.new("#{file}: Format error!") unless coded_offset_table.bytesize == number_of_objects * offset_size + + @count_objects = number_of_objects + + # decode offset table + if(offset_size != 3) + formats = ["","C*","n*","","N*"] + @offsets = coded_offset_table.unpack(formats[offset_size]) + else + @offsets = coded_offset_table.unpack("C*").each_slice(3).map { + |x,y,z| (x << 16) | (y << 8) | z + } + end + + @object_ref_size = object_ref_size + val = read_binary_object_at(file,fd,top_object) + + fd.close + val + end + + + # Convert CFPropertyList to binary format; since we have to count our objects we simply unique CFDictionary and CFArray + def to_str(opts={}) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + + @offsets = [] + + binary_str = "bplist00" + + @object_refs = count_object_refs(opts[:root]) + + opts[:root].to_binary(self) + + next_offset = 8 + offsets = @object_table.map do |object| + offset = next_offset + next_offset += object.bytesize + offset + end + binary_str << @object_table.join + + table_offset = next_offset + offset_size = Binary.bytes_needed(table_offset) + + if offset_size < 8 + # Fast path: encode the entire offset array at once. + binary_str << offsets.pack((%w(C n N N)[offset_size - 1]) + '*') + else + # Slow path: host may be little or big endian, must pack each offset + # separately. + offsets.each do |offset| + binary_str << "#{Binary.pack_it_with_size(offset_size,offset)}" + end + end + + binary_str << [offset_size, object_ref_size(@object_refs)].pack("x6CC") + binary_str << [@object_table.size].pack("x4N") + binary_str << [0].pack("x4N") + binary_str << [table_offset].pack("x4N") + + binary_str + end + + def object_ref_size object_refs + Binary.bytes_needed(object_refs) + end + + # read a „null” type (i.e. null byte, marker byte, bool value) + def read_binary_null_type(length) + case length + when 0 then 0 # null byte + when 8 then CFBoolean.new(false) + when 9 then CFBoolean.new(true) + when 15 then 15 # fill type + else + raise CFFormatError.new("unknown null type: #{length}") + end + end + protected :read_binary_null_type + + # read a binary int value + def read_binary_int(fname,fd,length) + if length > 4 + raise CFFormatError.new("Integer greater than 16 bytes: #{length}") + end + + nbytes = 1 << length + + buff = fd.read(nbytes) + + CFInteger.new( + case length + when 0 then buff.unpack("C")[0] + when 1 then buff.unpack("n")[0] + when 2 then buff.unpack("N")[0] + # 8 byte integers are always signed + when 3 then buff.unpack("q>")[0] + # 16 byte integers are used to represent unsigned 8 byte integers + # where the unsigned value is stored in the lower 8 bytes and the + # upper 8 bytes are unused. + when 4 then buff.unpack("Q>Q>")[1] + end + ) + end + protected :read_binary_int + + # read a binary real value + def read_binary_real(fname,fd,length) + raise CFFormatError.new("Real greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFReal.new( + case length + when 0 # 1 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 1 # 2 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + else + fail "unexpected length: #{length}" + end + ) + end + protected :read_binary_real + + # read a binary date value + def read_binary_date(fname,fd,length) + raise CFFormatError.new("Date greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFDate.new( + case length + when 0 then # 1 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 1 then # 2 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + end, + CFDate::TIMESTAMP_APPLE + ) + end + protected :read_binary_date + + # Read a binary data value + def read_binary_data(fname,fd,length) + CFData.new(read_fd(fd, length), CFData::DATA_RAW) + end + protected :read_binary_data + + def read_fd fd, length + length > 0 ? fd.read(length) : "" + end + + # Read a binary string value + def read_binary_string(fname,fd,length) + buff = read_fd fd, length + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(buff) + end + protected :read_binary_string + + # Convert the given string from one charset to another + def Binary.charset_convert(str,from,to="UTF-8") + return str.dup.force_encoding(from).encode(to) if str.respond_to?("encode") + Iconv.conv(to,from,str) + end + + # Count characters considering character set + def Binary.charset_strlen(str,charset="UTF-8") + if str.respond_to?(:encode) + size = str.length + else + utf8_str = Iconv.conv("UTF-8",charset,str) + size = utf8_str.scan(/./mu).size + end + + # UTF-16 code units in the range D800-DBFF are the beginning of + # a surrogate pair, and count as one additional character for + # length calculation. + if charset =~ /^UTF-16/ + if str.respond_to?(:encode) + str.bytes.to_a.each_slice(2) { |pair| size += 1 if (0xd8..0xdb).include?(pair[0]) } + else + str.split('').each_slice(2) { |pair| size += 1 if ("\xd8".."\xdb").include?(pair[0]) } + end + end + + size + end + + # Read a unicode string value, coded as UTF-16BE + def read_binary_unicode_string(fname,fd,length) + # The problem is: we get the length of the string IN CHARACTERS; + # since a char in UTF-16 can be 16 or 32 bit long, we don't really know + # how long the string is in bytes + buff = fd.read(2*length) + + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(Binary.charset_convert(buff,"UTF-16BE","UTF-8")) + end + protected :read_binary_unicode_string + + def unpack_with_size(nbytes, buff) + format = ["C*", "n*", "N*", "N*"][nbytes - 1]; + + if nbytes == 3 + buff = "\0" + buff.scan(/.{1,3}/).join("\0") + end + + return buff.unpack(format) + end + + # Read an binary array value, including contained objects + def read_binary_array(fname,fd,length) + ary = [] + + # first: read object refs + if(length != 0) + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) #buff.unpack(@object_ref_size == 1 ? "C*" : "n*") + + # now: read objects + 0.upto(length-1) do |i| + object = read_binary_object_at(fname,fd,objects[i]) + ary.push object + end + end + + CFArray.new(ary) + end + protected :read_binary_array + + # Read a dictionary value, including contained objects + def read_binary_dict(fname,fd,length) + dict = {} + + # first: read keys + if(length != 0) then + buff = fd.read(length * @object_ref_size) + keys = unpack_with_size(@object_ref_size, buff) + + # second: read object refs + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) + + # read real keys and objects + 0.upto(length-1) do |i| + key = read_binary_object_at(fname,fd,keys[i]) + object = read_binary_object_at(fname,fd,objects[i]) + dict[key.value] = object + end + end + + CFDictionary.new(dict) + end + protected :read_binary_dict + + # Read an object type byte, decode it and delegate to the correct + # reader function + def read_binary_object(fname,fd) + # first: read the marker byte + buff = fd.read(1) + + object_length = buff.unpack("C*") + object_length = object_length[0] & 0xF + + buff = buff.unpack("H*") + object_type = buff[0][0].chr + + if(object_type != "0" && object_length == 15) then + object_length = read_binary_object(fname,fd) + object_length = object_length.value + end + + case object_type + when '0' # null, false, true, fillbyte + read_binary_null_type(object_length) + when '1' # integer + read_binary_int(fname,fd,object_length) + when '2' # real + read_binary_real(fname,fd,object_length) + when '3' # date + read_binary_date(fname,fd,object_length) + when '4' # data + read_binary_data(fname,fd,object_length) + when '5' # byte string, usually utf8 encoded + read_binary_string(fname,fd,object_length) + when '6' # unicode string (utf16be) + read_binary_unicode_string(fname,fd,object_length) + when '8' + CFUid.new(read_binary_int(fname, fd, object_length).value) + when 'a' # array + read_binary_array(fname,fd,object_length) + when 'd' # dictionary + read_binary_dict(fname,fd,object_length) + end + end + protected :read_binary_object + + # Read an object type byte at position $pos, decode it and delegate to the correct reader function + def read_binary_object_at(fname,fd,pos) + position = @offsets[pos] + fd.seek(position,IO::SEEK_SET) + read_binary_object(fname,fd) + end + protected :read_binary_object_at + + # pack an +int+ of +nbytes+ with size + def Binary.pack_it_with_size(nbytes,int) + case nbytes + when 1 then [int].pack('c') + when 2 then [int].pack('n') + when 4 then [int].pack('N') + when 8 + [int >> 32, int & 0xFFFFFFFF].pack('NN') + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + def Binary.pack_int_array_with_size(nbytes, array) + case nbytes + when 1 then array.pack('C*') + when 2 then array.pack('n*') + when 4 then array.pack('N*') + when 8 + array.map { |int| [int >> 32, int & 0xFFFFFFFF].pack('NN') }.join + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + # calculate how many bytes are needed to save +count+ + def Binary.bytes_needed(count) + case + when count < 2**8 then 1 + when count < 2**16 then 2 + when count < 2**32 then 4 + when count < 2**64 then 8 + else + raise CFFormatError.new("Data size too large: #{count}") + end + end + + # Create a type byte for binary format as defined by apple + def Binary.type_bytes(type, length) + if length < 15 + [(type << 4) | length].pack('C') + else + bytes = [(type << 4) | 0xF] + if length <= 0xFF + bytes.push(0x10, length).pack('CCC') # 1 byte length + elsif length <= 0xFFFF + bytes.push(0x11, length).pack('CCn') # 2 byte length + elsif length <= 0xFFFFFFFF + bytes.push(0x12, length).pack('CCN') # 4 byte length + elsif length <= 0x7FFFFFFFFFFFFFFF + bytes.push(0x13, length >> 32, length & 0xFFFFFFFF).pack('CCNN') # 8 byte length + else + raise CFFormatError.new("Integer too large: #{int}") + end + end + end + + def count_object_refs(object) + case object + when CFArray + contained_refs = 0 + object.value.each do |element| + if CFArray === element || CFDictionary === element + contained_refs += count_object_refs(element) + end + end + return object.value.size + contained_refs + when CFDictionary + contained_refs = 0 + object.value.each_value do |value| + if CFArray === value || CFDictionary === value + contained_refs += count_object_refs(value) + end + end + return object.value.keys.size * 2 + contained_refs + else + return 0 + end + end + + def Binary.ascii_string?(str) + if str.respond_to?(:ascii_only?) + str.ascii_only? + else + str !~ /[\x80-\xFF]/mn + end + end + + # Uniques and transforms a string value to binary format and adds it to the object table + def string_to_binary(val) + val = val.to_s + + @unique_table[val] ||= begin + if !Binary.ascii_string?(val) + val = Binary.charset_convert(val,"UTF-8","UTF-16BE") + bdata = Binary.type_bytes(0b0110, Binary.charset_strlen(val,"UTF-16BE")) + + val.force_encoding("ASCII-8BIT") if val.respond_to?("encode") + @object_table[@written_object_count] = bdata << val + else + bdata = Binary.type_bytes(0b0101,val.bytesize) + @object_table[@written_object_count] = bdata << val + end + + @written_object_count += 1 + @written_object_count - 1 + end + end + + # Codes an integer to binary format + def int_to_binary(value) + # Note: nbytes is actually an exponent. number of bytes = 2**nbytes. + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte unsigned integer + nbytes += 1 if value > 0xFFFF # 4 byte unsigned integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte unsigned integer + nbytes += 1 if value > 0x7FFFFFFFFFFFFFFF # 8 byte unsigned integer, stored in lower half of 16 bytes + nbytes = 3 if value < 0 # signed integers always stored in 8 bytes + + Binary.type_bytes(0b0001, nbytes) << + if nbytes < 4 + [value].pack(["C", "n", "N", "q>"][nbytes]) + else # nbytes == 4 + [0,value].pack("Q>Q>") + end + end + + # Codes a real value to binary format + def real_to_binary(val) + Binary.type_bytes(0b0010,3) << [val].pack("E").reverse + end + + # Converts a numeric value to binary and adds it to the object table + def num_to_binary(value) + @object_table[@written_object_count] = + if value.is_a?(CFInteger) + int_to_binary(value.value) + else + real_to_binary(value.value) + end + + @written_object_count += 1 + @written_object_count - 1 + end + + def uid_to_binary(value) + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte integer + nbytes += 1 if value > 0xFFFF # 4 byte integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte integer + nbytes = 3 if value < 0 # 8 byte integer, since signed + + @object_table[@written_object_count] = Binary.type_bytes(0b1000, nbytes) << + if nbytes < 3 + [value].pack( + if nbytes == 0 then "C" + elsif nbytes == 1 then "n" + else "N" + end + ) + else + # 64 bit signed integer; we need the higher and the lower 32 bit of the value + high_word = value >> 32 + low_word = value & 0xFFFFFFFF + [high_word,low_word].pack("NN") + end + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert date value (apple format) to binary and adds it to the object table + def date_to_binary(val) + val = val.getutc.to_f - CFDate::DATE_DIFF_APPLE_UNIX # CFDate is a real, number of seconds since 01/01/2001 00:00:00 GMT + + @object_table[@written_object_count] = + (Binary.type_bytes(0b0011, 3) << [val].pack("E").reverse) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert a bool value to binary and add it to the object table + def bool_to_binary(val) + + @object_table[@written_object_count] = val ? "\x9" : "\x8" # 0x9 is 1001, type indicator for true; 0x8 is 1000, type indicator for false + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert data value to binary format and add it to the object table + def data_to_binary(val) + @object_table[@written_object_count] = + (Binary.type_bytes(0b0100, val.bytesize) << val) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert array to binary format and add it to the object table + def array_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + #@object_refs += val.value.size + + values = val.value.map { |v| v.to_binary(self) } + bdata = Binary.type_bytes(0b1010, val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), + values) + + @object_table[saved_object_count] = bdata + saved_object_count + end + + # Convert dictionary to binary format and add it to the object table + def dict_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + + #@object_refs += val.value.keys.size * 2 + + keys_and_values = val.value.keys.map { |k| CFString.new(k).to_binary(self) } + keys_and_values.concat(val.value.values.map { |v| v.to_binary(self) }) + + bdata = Binary.type_bytes(0b1101,val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), keys_and_values) + + @object_table[saved_object_count] = bdata + return saved_object_count + end + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb new file mode 100644 index 0000000..e276565 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# Exceptions used: +# CFPlistError:: General base exception +# CFFormatError:: Format error +# CFTypeError:: Type error +# +# Easy and simple :-) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License + +# general plist error. All exceptions thrown are derived from this class. +class CFPlistError < StandardError +end + +# Exception thrown when format errors occur +class CFFormatError < CFPlistError +end + +# Exception thrown when type errors occur +class CFTypeError < CFPlistError +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb new file mode 100644 index 0000000..43eb1d3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- + +require 'kconv' +require 'date' +require 'time' + +# +# CFPropertyList implementation +# +# class to read, manipulate and write both XML and binary property list +# files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +# for more documentation. +# +# == Example +# require 'cfpropertylist' +# +# # create a arbitrary data structure of basic data types +# data = { +# 'name' => 'John Doe', +# 'missing' => true, +# 'last_seen' => Time.now, +# 'friends' => ['Jane Doe','Julian Doe'], +# 'likes' => { +# 'me' => false +# } +# } +# +# # create CFPropertyList::List object +# plist = CFPropertyList::List.new +# +# # call CFPropertyList.guess() to create corresponding CFType values +# # pass in optional :convert_unknown_to_string => true to convert things like symbols into strings. +# plist.value = CFPropertyList.guess(data) +# +# # write plist to file +# plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +# +# # … later, read it again +# plist = CFPropertyList::List.new(:file => "example.plist") +# data = CFPropertyList.native_types(plist.value) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License +module CFPropertyList + class << self + attr_accessor :xml_parser_interface + end + + # interface class for PList parsers + class ParserInterface + # load a plist + def load(opts={}) + return "" + end + + # convert a plist to string + def to_str(opts={}) + return true + end + end + + class XMLParserInterface < ParserInterface + def new_node(name) + end + + def new_text(val) + end + + def append_node(parent, child) + end + end +end + +dirname = File.dirname(__FILE__) +require dirname + '/rbCFPlistError.rb' +require dirname + '/rbCFTypes.rb' +require dirname + '/rbBinaryCFPropertyList.rb' +require dirname + '/rbPlainCFPropertyList.rb' + +begin + require dirname + '/rbLibXMLParser.rb' + temp = LibXML::XML::Parser::Options::NOBLANKS # check if we have a version with parser options + temp = false # avoid a warning + try_nokogiri = false + CFPropertyList.xml_parser_interface = CFPropertyList::LibXMLParser +rescue LoadError, NameError + try_nokogiri = true +end + +if try_nokogiri then + begin + require dirname + '/rbNokogiriParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::NokogiriXMLParser + rescue LoadError + require dirname + '/rbREXMLParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::ReXMLParser + end +end + + +module CFPropertyList + # Create CFType hierarchy by guessing the correct CFType, e.g. + # + # x = { + # 'a' => ['b','c','d'] + # } + # cftypes = CFPropertyList.guess(x) + # + # pass optional options hash. Only possible value actually: + # +convert_unknown_to_string+:: Convert unknown objects to string calling to_str() + # +converter_method+:: Convert unknown objects to known objects calling +method_name+ + # + # cftypes = CFPropertyList.guess(x,:convert_unknown_to_string => true,:converter_method => :to_hash, :converter_with_opts => true) + def guess(object, options = {}) + case object + when Integer then CFInteger.new(object) + when UidFixnum then CFUid.new(object) + when Float then CFReal.new(object) + when TrueClass, FalseClass then CFBoolean.new(object) + + when Blob + CFData.new(object, CFData::DATA_RAW) + + when String, Symbol + CFString.new(object.to_s) + + when Time, DateTime, Date + CFDate.new(object) + + when Array, Enumerator + ary = Array.new + object.each do |o| + ary.push CFPropertyList.guess(o, options) + end + CFArray.new(ary) + + when Hash + hsh = Hash.new + object.each_pair do |k,v| + k = k.to_s if k.is_a?(Symbol) + hsh[k] = CFPropertyList.guess(v, options) + end + CFDictionary.new(hsh) + else + case + when Object.const_defined?('BigDecimal') && object.is_a?(BigDecimal) + CFReal.new(object) + when object.respond_to?(:read) + raw_data = object.read + # treat the data as a bytestring (ASCII-8BIT) if Ruby supports it. Do this by forcing + # the encoding, on the assumption that the bytes were read correctly, and just tagged with + # an inappropriate encoding, rather than transcoding. + raw_data.force_encoding(Encoding::ASCII_8BIT) if raw_data.respond_to?(:force_encoding) + CFData.new(raw_data, CFData::DATA_RAW) + when options[:converter_method] && object.respond_to?(options[:converter_method]) + if options[:converter_with_opts] + CFPropertyList.guess(object.send(options[:converter_method],options),options) + else + CFPropertyList.guess(object.send(options[:converter_method]),options) + end + when options[:convert_unknown_to_string] + CFString.new(object.to_s) + else + raise CFTypeError.new("Unknown class #{object.class.to_s}. Try using :convert_unknown_to_string if you want to use unknown object types!") + end + end + end + + # Converts a CFType hiercharchy to native Ruby types + def native_types(object,keys_as_symbols=false) + return if object.nil? + + if(object.is_a?(CFDate) || object.is_a?(CFString) || object.is_a?(CFInteger) || object.is_a?(CFReal) || object.is_a?(CFBoolean)) || object.is_a?(CFUid) then + return object.value + elsif(object.is_a?(CFData)) then + return CFPropertyList::Blob.new(object.decoded_value) + elsif(object.is_a?(CFArray)) then + ary = [] + object.value.each do + |v| + ary.push CFPropertyList.native_types(v) + end + + return ary + elsif(object.is_a?(CFDictionary)) then + hsh = {} + object.value.each_pair do + |k,v| + k = k.to_sym if keys_as_symbols + hsh[k] = CFPropertyList.native_types(v) + end + + return hsh + end + end + + module_function :guess, :native_types + + # Class representing a CFPropertyList. Instantiate with #new + class List + # Format constant for binary format + FORMAT_BINARY = 1 + + # Format constant for XML format + FORMAT_XML = 2 + + # Format constant for the old plain format + FORMAT_PLAIN = 3 + + # Format constant for automatic format recognizing + FORMAT_AUTO = 0 + + @@parsers = [Binary, CFPropertyList.xml_parser_interface, PlainParser] + + # Path of PropertyList + attr_accessor :filename + # the original format of the PropertyList + attr_accessor :format + # the root value in the plist file + attr_accessor :value + # default value for XML generation; if true generate formatted XML + attr_accessor :formatted + + # initialize a new CFPropertyList, arguments are: + # + # :file:: Parse a file + # :format:: Format is one of FORMAT_BINARY or FORMAT_XML. Defaults to FORMAT_AUTO + # :data:: Parse a string + # + # All arguments are optional + def initialize(opts={}) + @filename = opts[:file] + @format = opts[:format] || FORMAT_AUTO + @data = opts[:data] + @formatted = opts[:formatted] + + load(@filename) unless @filename.nil? + load_str(@data) unless @data.nil? + end + + # returns a list of registered parsers + def self.parsers + @@parsers + end + + # set a list of parsers + def self.parsers=(val) + @@parsers = val + end + + # Load an XML PropertyList + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_xml(filename=nil) + load(filename,List::FORMAT_XML) + end + + # read a binary plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_binary(filename=nil) + load(filename,List::FORMAT_BINARY) + end + + # read a plain plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_plain(filename=nil) + load(filename,List::FORMAT_PLAIN) + end + + # load a plist from a XML string + # str:: The string containing the plist + def load_xml_str(str=nil) + load_str(str,List::FORMAT_XML) + end + + # load a plist from a binary string + # str:: The string containing the plist + def load_binary_str(str=nil) + load_str(str,List::FORMAT_BINARY) + end + + # load a plist from a plain string + # str:: The string containing the plist + def load_plain_str(str=nil) + load_str(str,List::FORMAT_PLAIN) + end + + # load a plist from a string + # str = nil:: The string containing the plist + # format = nil:: The format of the plist + def load_str(str=nil,format=nil) + str = @data if str.nil? + format = @format if format.nil? + + @value = {} + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:data => str}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + filetype = str[0..5] + version = str[6..7] + + prsr = nil + + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if str =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:data => str}) + end + end + + # Read a plist file + # file = nil:: The filename of the file to read. If nil, use +filename+ instance variable + # format = nil:: The format of the plist file. Auto-detect if nil + def load(file=nil,format=nil) + file = @filename if file.nil? + format = @format if format.nil? + @value = {} + + raise IOError.new("File #{file} not readable!") unless File.readable? file + + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:file => file}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + magic_number = IO.read(file,12) + raise IOError.new("File #{file} is empty.") unless magic_number + filetype = magic_number[0..5] + version = magic_number[6..7] + + prsr = nil + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if magic_number =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:file => file}) + end + + raise CFFormatError.new("Invalid format or parser error!") if @value.nil? + end + + # Serialize CFPropertyList object to specified format and write it to file + # file = nil:: The filename of the file to write to. Uses +filename+ instance variable if nil + # format = nil:: The format to save in. Uses +format+ instance variable if nil + def save(file=nil,format=nil,opts={}) + format = @format if format.nil? + file = @filename if file.nil? + + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + if(!File.exists?(file)) then + raise IOError.new("File #{file} not writable!") unless File.writable?(File.dirname(file)) + elsif(!File.writable?(file)) then + raise IOError.new("File #{file} not writable!") + end + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + prsr = @@parsers[format-1].new + + content = prsr.to_str(opts) + + File.open(file, 'wb') { + |fd| + fd.write content + } + end + + # convert plist to string + # format = List::FORMAT_BINARY:: The format to save the plist + # opts={}:: Pass parser options + def to_str(format=List::FORMAT_BINARY,opts={}) + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + prsr = @@parsers[format-1].new + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + return prsr.to_str(opts) + end + end +end + + +class Array + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Enumerator + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Hash + # convert a hash to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb new file mode 100644 index 0000000..8563721 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb @@ -0,0 +1,349 @@ +# -*- coding: utf-8 -*- +# +# CFTypes, e.g. CFString, CFInteger +# needed to create unambiguous plists +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2009 +# License:: MIT License + +require 'base64' + +module CFPropertyList + ## + # Blob is intended to distinguish between a Ruby String instance that should + # be converted to a CFString type and a Ruby String instance that should be + # converted to a CFData type + class Blob < String + end + + ## + # UidFixnum is intended to distinguish between a Ruby Integer + # instance that should be converted to a CFInteger/CFReal type and a + # Ruby Integer instance that should be converted to a CFUid type. + class UidFixnum < Integer + end + + # This class defines the base class for all CFType classes + # + class CFType + # value of the type + attr_accessor :value + + def initialize(value=nil) + @value = value + end + + def to_xml(parser) + end + + def to_binary(bplist) + end + + def to_plain(plist) + end + end + + # This class holds string values, both, UTF-8 and UTF-16BE + # It will convert the value to UTF-16BE if necessary (i.e. if non-ascii char contained) + class CFString < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('string') + n = parser.append_node(n, parser.new_text(@value)) unless @value.nil? + n + end + + # convert to binary + def to_binary(bplist) + bplist.string_to_binary(@value); + end + + def to_plain(plist) + if @value =~ /^\w+$/ + @value + else + quoted + end + end + + def quoted + str = '"' + @value.each_char do |c| + str << case c + when '"' + '\\"' + when '\\' + '\\' + when "\a" + "\\a" + when "\b" + "\\b" + when "\f" + "\\f" + when "\n" + "\n" + when "\v" + "\\v" + when "\r" + "\\r" + when "\t" + "\\t" + else + c + end + end + + str << '"' + end + end + + # This class holds integer/fixnum values + class CFInteger < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('integer') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds float values + class CFReal < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('real') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds Time values. While Apple uses seconds since 2001, + # the rest of the world uses seconds since 1970. So if you access value + # directly, you get the Time class. If you access via get_value you either + # geht the timestamp or the Apple timestamp + class CFDate < CFType + TIMESTAMP_APPLE = 0 + TIMESTAMP_UNIX = 1 + DATE_DIFF_APPLE_UNIX = 978307200 + + # create a XML date strimg from a time object + def CFDate.date_string(val) + # 2009-05-13T20:23:43Z + val.getutc.strftime("%Y-%m-%dT%H:%M:%SZ") + end + + # parse a XML date string + def CFDate.parse_date(val) + # 2009-05-13T20:23:43Z + val =~ %r{^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z$} + year,month,day,hour,min,sec = $1, $2, $3, $4, $5, $6 + return Time.utc(year,month,day,hour,min,sec).getlocal + end + + # set value to defined state + def initialize(value = nil,format=CFDate::TIMESTAMP_UNIX) + if(value.is_a?(Time) || value.nil?) then + @value = value.nil? ? Time.now : value + elsif value.instance_of? Date + @value = Time.utc(value.year, value.month, value.day, 0, 0, 0) + elsif value.instance_of? DateTime + @value = value.to_time.utc + else + set_value(value,format) + end + end + + # set value with timestamp, either Apple or UNIX + def set_value(value,format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value = Time.at(value) + else + @value = Time.at(value + CFDate::DATE_DIFF_APPLE_UNIX) + end + end + + # get timestamp, either UNIX or Apple timestamp + def get_value(format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value.to_i + else + @value.to_f - CFDate::DATE_DIFF_APPLE_UNIX + end + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('date') + n = parser.append_node(n, parser.new_text(CFDate::date_string(@value))) + n + end + + # convert to binary + def to_binary(bplist) + bplist.date_to_binary(@value) + end + + def to_plain(plist) + @value.strftime("%Y-%m-%d %H:%M:%S %z") + end + end + + # This class contains a boolean value + class CFBoolean < CFType + # convert to XML + def to_xml(parser) + parser.new_node(@value ? 'true' : 'false') + end + + # convert to binary + def to_binary(bplist) + bplist.bool_to_binary(@value); + end + + def to_plain(plist) + @value ? "true" : "false" + end + end + + # This class contains binary data values + class CFData < CFType + # Base64 encoded data + DATA_BASE64 = 0 + # Raw data + DATA_RAW = 1 + + # set value to defined state, either base64 encoded or raw + def initialize(value=nil,format=DATA_BASE64) + if(format == DATA_RAW) + @raw_value = value + else + @value = value + end + end + + # get base64 encoded value + def encoded_value + @value ||= "\n#{Base64.encode64(@raw_value).gsub("\n", '').scan(/.{1,76}/).join("\n")}\n" + end + + # get base64 decoded value + def decoded_value + @raw_value ||= Blob.new(Base64.decode64(@value)) + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('data') + n = parser.append_node(n, parser.new_text(encoded_value())) + n + end + + # convert to binary + def to_binary(bplist) + bplist.data_to_binary(decoded_value()) + end + + def to_plain(plist) + "<" + decoded_value.unpack("H*").join("") + ">" + end + end + + # This class contains an array of values + class CFArray < CFType + # create a new array CFType + def initialize(val=[]) + @value = val + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('array') + @value.each do |v| + n = parser.append_node(n, v.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.array_to_binary(self) + end + + def to_plain(plist) + ary = @value.map { |v| v.to_plain(plist) } + "( " + ary.join(", ") + " )" + end + end + + # this class contains a hash of values + class CFDictionary < CFType + # Create new CFDictonary type. + def initialize(value={}) + @value = value + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('dict') + @value.each_pair do |key, value| + k = parser.append_node(parser.new_node('key'), parser.new_text(key.to_s)) + n = parser.append_node(n, k) + n = parser.append_node(n, value.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.dict_to_binary(self) + end + + def to_plain(plist) + str = "{ " + cfstr = CFString.new() + + @value.each do |k,v| + cfstr.value = k + str << cfstr.to_plain(plist) + " = " + v.to_plain(plist) + "; " + end + + str << "}" + end + end + + class CFUid < CFType + def to_xml(parser) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_xml(parser) + end + + # convert to binary + def to_binary(bplist) + bplist.uid_to_binary(@value) + end + + def to_plain(plist) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_plain(plist) + end + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb new file mode 100644 index 0000000..0654f08 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +require 'libxml' + +module CFPropertyList + # XML parser + class LibXMLParser < XMLParserInterface + PARSER_OPTIONS = LibXML::XML::Parser::Options::NOBLANKS|LibXML::XML::Parser::Options::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + + if(opts.has_key?(:file)) then + doc = LibXML::XML::Document.file(opts[:file],:options => PARSER_OPTIONS) + else + doc = LibXML::XML::Document.string(opts[:data],:options => PARSER_OPTIONS) + end + + if doc + root = doc.root.first + return import_xml(root) + end + rescue LibXML::XML::Error => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = LibXML::XML::Document.new + + doc.root = LibXML::XML::Node.new('plist') + doc.encoding = LibXML::XML::Encoding::UTF_8 + + doc.root['version'] = '1.0' + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + str = doc.to_s(:indent => opts[:formatted]) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + LibXML::XML::Node.new(name) + end + + def new_text(val) + LibXML::XML::Node.new_text(val) + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children? + n.first.content + else + n.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb new file mode 100644 index 0000000..e2de688 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +require 'nokogiri' + +module CFPropertyList + # XML parser + class NokogiriXMLParser < ParserInterface + PARSER_OPTIONS = Nokogiri::XML::ParseOptions::NOBLANKS|Nokogiri::XML::ParseOptions::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + if(opts.has_key?(:file)) then + File.open(opts[:file], "rb") { |fd| doc = Nokogiri::XML::Document.parse(fd, nil, nil, PARSER_OPTIONS) } + else + doc = Nokogiri::XML::Document.parse(opts[:data], nil, nil, PARSER_OPTIONS) + end + + if doc + root = doc.root.children.first + return import_xml(root) + end + rescue Nokogiri::XML::SyntaxError => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = Nokogiri::XML::Document.new + @doc = doc + + doc.root = doc.create_element 'plist', :version => '1.0' + doc.encoding = 'UTF-8' + + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + s_opts = Nokogiri::XML::Node::SaveOptions::AS_XML + s_opts |= Nokogiri::XML::Node::SaveOptions::FORMAT if opts[:formatted] + + str = doc.serialize(:save_with => s_opts) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + @doc.create_element name + end + + def new_text(val) + @doc.create_text_node val + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children.empty? + n.content + else + n.children.first.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb new file mode 100644 index 0000000..fa698ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- + +require 'strscan' + +module CFPropertyList + # XML parser + class PlainParser < XMLParserInterface + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + @doc = nil + + if(opts.has_key?(:file)) then + File.open(opts[:file], :external_encoding => "ASCII") do |fd| + @doc = StringScanner.new(fd.read) + end + else + @doc = StringScanner.new(opts[:data]) + end + + if @doc + root = import_plain + raise CFFormatError.new('content after root object') unless @doc.eos? + + return root + end + + raise CFFormatError.new('invalid plist string or file not found') + end + + SPACES_AND_COMMENTS = %r{((?:/\*.*?\*/)|(?://.*?$\n?)|(?:\s*))+}x + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + opts[:root].to_plain(self) + end + + protected + def skip_whitespaces + @doc.skip SPACES_AND_COMMENTS + end + + def read_dict + skip_whitespaces + hsh = {} + + while not @doc.scan(/\}/) + key = import_plain + raise CFFormatError.new("invalid dictionary format") if !key + + if key.is_a?(CFString) + key = key.value + elsif key.is_a?(CFInteger) or key.is_a?(CFReal) + key = key.value.to_s + else + raise CFFormatError.new("invalid key format") + end + + skip_whitespaces + + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/=/) + + skip_whitespaces + val = import_plain + + skip_whitespaces + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/;/) + skip_whitespaces + + hsh[key] = val + raise CFFormatError.new("invalid dictionary format") if @doc.eos? + end + + CFDictionary.new(hsh) + end + + def read_array + skip_whitespaces + ary = [] + + while not @doc.scan(/\)/) + val = import_plain + + return nil if not val or not val.value + skip_whitespaces + + if not @doc.skip(/,\s*/) + if @doc.scan(/\)/) + ary << val + return CFArray.new(ary) + end + + raise CFFormatError.new("invalid array format") + end + + ary << val + raise CFFormatError.new("invalid array format") if @doc.eos? + end + + CFArray.new(ary) + end + + def escape_char + case @doc.matched + when '"' + '"' + when '\\' + '\\' + when 'a' + "\a" + when 'b' + "\b" + when 'f' + "\f" + when 'n' + "\n" + when 'v' + "\v" + when 'r' + "\r" + when 't' + "\t" + when 'U' + @doc.scan(/.{4}/).hex.chr('utf-8') + end + end + + def read_quoted + str = '' + + while not @doc.scan(/"/) + if @doc.scan(/\\/) + @doc.scan(/./) + str << escape_char + + elsif @doc.eos? + raise CFFormatError.new("unterminated string") + + else @doc.scan(/./) + str << @doc.matched + end + end + + CFString.new(str) + end + + def read_unquoted + raise CFFormatError.new("unexpected end of file") if @doc.eos? + + if @doc.scan(/(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)(?:\s+(\+|-)(\d\d)(\d\d))?/) + year,month,day,hour,min,sec,pl_min,tz_hour, tz_min = @doc[1], @doc[2], @doc[3], @doc[4], @doc[5], @doc[6], @doc[7], @doc[8], @doc[9] + CFDate.new(Time.new(year, month, day, hour, min, sec, pl_min ? sprintf("%s%s:%s", pl_min, tz_hour, tz_min) : nil)) + + elsif @doc.scan(/-?\d+?\.\d+\b/) + CFReal.new(@doc.matched.to_f) + + elsif @doc.scan(/-?\d+\b/) + CFInteger.new(@doc.matched.to_i) + + elsif @doc.scan(/\b(true|false)\b/) + CFBoolean.new(@doc.matched == 'true') + else + CFString.new(@doc.scan(/\w+/)) + end + end + + def read_binary + @doc.scan(/(.*?)>/) + + hex_str = @doc[1].gsub(/ /, '') + CFData.new([hex_str].pack("H*"), CFData::DATA_RAW) + end + + # import the XML values + def import_plain + skip_whitespaces + ret = nil + + if @doc.scan(/\{/) # dict + ret = read_dict + elsif @doc.scan(/\(/) # array + ret = read_array + elsif @doc.scan(/"/) # string + ret = read_quoted + elsif @doc.scan(/ e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = REXML::Document.new + @doc = doc + + doc.context[:attribute_quote] = :quote + + doc.add_element 'plist', {'version' => '1.0'} + doc.root << opts[:root].to_xml(self) + + formatter = if opts[:formatted] then + f = REXML::Formatters::Pretty.new(2) + f.compact = true + f.width = Float::INFINITY + f + else + REXML::Formatters::Default.new + end + + str = formatter.write(doc.root, "") + str1 = "\n\n" + str + "\n" + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + + return str1 + end + + def new_node(name) + REXML::Element.new(name) + end + + def new_text(val) + val + end + + def append_node(parent, child) + if child.is_a?(String) then + parent.add_text child + else + parent.elements << child + end + parent + end + + protected + + # get the value of a DOM node + def get_value(n) + content = n.text + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + next if n.name == '#comment' + + if n.name == "key" then + key = get_value(n) + key = '' if key.nil? # REXML returns nil if key is empty + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + ret.value = '' if ret.value.nil? # REXML returns nil for empty elements' .text attribute + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/CHANGELOG.md new file mode 100644 index 0000000..686b31d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/CHANGELOG.md @@ -0,0 +1,653 @@ +## Rails 5.2.4.4 (September 09, 2020) ## + +* No changes. + + +## Rails 5.2.4.3 (May 18, 2020) ## + +* [CVE-2020-8165] Deprecate Marshal.load on raw cache read in RedisCacheStore + +* [CVE-2020-8165] Avoid Marshal.load on raw cache value in MemCacheStore + +## Rails 5.2.4.1 (December 18, 2019) ## + +* No changes. + + +## Rails 5.2.4 (November 27, 2019) ## + +* Make ActiveSupport::Logger Fiber-safe. Fixes #36752. + + Use `Fiber.current.__id__` in `ActiveSupport::Logger#local_level=` in order + to make log level local to Ruby Fibers in addition to Threads. + + Example: + + logger = ActiveSupport::Logger.new(STDOUT) + logger.level = 1 + p "Main is debug? #{logger.debug?}" + + Fiber.new { + logger.local_level = 0 + p "Thread is debug? #{logger.debug?}" + }.resume + + p "Main is debug? #{logger.debug?}" + + Before: + + Main is debug? false + Thread is debug? true + Main is debug? true + + After: + + Main is debug? false + Thread is debug? true + Main is debug? false + + *Alexander Varnin* + + +## Rails 5.2.3 (March 27, 2019) ## + +* Add `ActiveSupport::HashWithIndifferentAccess#assoc`. + + `assoc` can now be called with either a string or a symbol. + + *Stefan Schüßler* + +* Fix `String#safe_constantize` throwing a `LoadError` for incorrectly cased constant references. + + *Keenan Brock* + +* Allow Range#=== and Range#cover? on Range + + `Range#cover?` can now accept a range argument like `Range#include?` and + `Range#===`. `Range#===` works correctly on Ruby 2.6. `Range#include?` is moved + into a new file, with these two methods. + + *utilum* + +* If the same block is `included` multiple times for a Concern, an exception is no longer raised. + + *Mark J. Titorenko*, *Vlad Bokov* + + +## Rails 5.2.2.1 (March 11, 2019) ## + +* No changes. + + +## Rails 5.2.2 (December 04, 2018) ## + +* Fix bug where `#to_options` for `ActiveSupport::HashWithIndifferentAccess` + would not act as alias for `#symbolize_keys`. + + *Nick Weiland* + +* Improve the logic that detects non-autoloaded constants. + + *Jan Habermann*, *Xavier Noria* + +* Fix bug where `URI.unescape` would fail with mixed Unicode/escaped character input: + + URI.unescape("\xe3\x83\x90") # => "バ" + URI.unescape("%E3%83%90") # => "バ" + URI.unescape("\xe3\x83\x90%E3%83%90") # => Encoding::CompatibilityError + + *Ashe Connor*, *Aaron Patterson* + + +## Rails 5.2.1.1 (November 27, 2018) ## + +* No changes. + + +## Rails 5.2.1 (August 07, 2018) ## + +* Redis cache store: `delete_matched` no longer blocks the Redis server. + (Switches from evaled Lua to a batched SCAN + DEL loop.) + + *Gleb Mazovetskiy* + +* Fix bug where `ActiveSupport::Timezone.all` would fail when tzinfo data for + any timezone defined in `ActiveSupport::TimeZone::MAPPING` is missing. + + *Dominik Sander* + +* Fix bug where `ActiveSupport::Cache` will massively inflate the storage + size when compression is enabled (which is true by default). This patch + does not attempt to repair existing data: please manually flush the cache + to clear out the problematic entries. + + *Godfrey Chan* + +* Fix `ActiveSupport::Cache#read_multi` bug with local cache enabled that was + returning instances of `ActiveSupport::Cache::Entry` instead of the raw values. + + *Jason Lee* + + +## Rails 5.2.0 (April 09, 2018) ## + +* Caching: MemCache and Redis `read_multi` and `fetch_multi` speedup. + Read from the local in-memory cache before consulting the backend. + + *Gabriel Sobrinho* + +* Return all mappings for a timezone identifier in `country_zones`. + + Some timezones like `Europe/London` have multiple mappings in + `ActiveSupport::TimeZone::MAPPING` so return all of them instead + of the first one found by using `Hash#value`. e.g: + + # Before + ActiveSupport::TimeZone.country_zones("GB") # => ["Edinburgh"] + + # After + ActiveSupport::TimeZone.country_zones("GB") # => ["Edinburgh", "London"] + + Fixes #31668. + + *Andrew White* + +* Add support for connection pooling on RedisCacheStore. + + *fatkodima* + +* Support hash as first argument in `assert_difference`. This allows to specify multiple + numeric differences in the same assertion. + + assert_difference ->{ Article.count } => 1, ->{ Post.count } => 2 + + *Julien Meichelbeck* + +* Add missing instrumentation for `read_multi` in `ActiveSupport::Cache::Store`. + + *Ignatius Reza Lesmana* + +* `assert_changes` will always assert that the expression changes, + regardless of `from:` and `to:` argument combinations. + + *Daniel Ma* + +* Use SHA-1 to generate non-sensitive digests, such as the ETag header. + + Enabled by default for new apps; upgrading apps can opt in by setting + `config.active_support.use_sha1_digests = true`. + + *Dmitri Dolguikh*, *Eugene Kenny* + +* Changed default behaviour of `ActiveSupport::SecurityUtils.secure_compare`, + to make it not leak length information even for variable length string. + + Renamed old `ActiveSupport::SecurityUtils.secure_compare` to `fixed_length_secure_compare`, + and started raising `ArgumentError` in case of length mismatch of passed strings. + + *Vipul A M* + +* Make `ActiveSupport::TimeZone.all` return only time zones that are in + `ActiveSupport::TimeZone::MAPPING`. + + Fixes #7245. + + *Chris LaRose* + +* MemCacheStore: Support expiring counters. + + Pass `expires_in: [seconds]` to `#increment` and `#decrement` options + to set the Memcached TTL (time-to-live) if the counter doesn't exist. + If the counter exists, Memcached doesn't extend its expiry when it's + incremented or decremented. + + ``` + Rails.cache.increment("my_counter", 1, expires_in: 2.minutes) + ``` + + *Takumasa Ochi* + +* Handle `TZInfo::AmbiguousTime` errors. + + Make `ActiveSupport::TimeWithZone` match Ruby's handling of ambiguous + times by choosing the later period, e.g. + + Ruby: + ``` + ENV["TZ"] = "Europe/Moscow" + Time.local(2014, 10, 26, 1, 0, 0) # => 2014-10-26 01:00:00 +0300 + ``` + + Before: + ``` + >> "2014-10-26 01:00:00".in_time_zone("Moscow") + TZInfo::AmbiguousTime: 26/10/2014 01:00 is an ambiguous local time. + ``` + + After: + ``` + >> "2014-10-26 01:00:00".in_time_zone("Moscow") + => Sun, 26 Oct 2014 01:00:00 MSK +03:00 + ``` + + Fixes #17395. + + *Andrew White* + +* Redis cache store. + + ``` + # Defaults to `redis://localhost:6379/0`. Only use for dev/test. + config.cache_store = :redis_cache_store + + # Supports all common cache store options (:namespace, :compress, + # :compress_threshold, :expires_in, :race_condition_ttl) and all + # Redis options. + cache_password = Rails.application.secrets.redis_cache_password + config.cache_store = :redis_cache_store, driver: :hiredis, + namespace: 'myapp-cache', compress: true, timeout: 1, + url: "redis://:#{cache_password}@myapp-cache-1:6379/0" + + # Supports Redis::Distributed with multiple hosts + config.cache_store = :redis_cache_store, driver: :hiredis + namespace: 'myapp-cache', compress: true, + url: %w[ + redis://myapp-cache-1:6379/0 + redis://myapp-cache-1:6380/0 + redis://myapp-cache-2:6379/0 + redis://myapp-cache-2:6380/0 + redis://myapp-cache-3:6379/0 + redis://myapp-cache-3:6380/0 + ] + + # Or pass a builder block + config.cache_store = :redis_cache_store, + namespace: 'myapp-cache', compress: true, + redis: -> { Redis.new … } + ``` + + Deployment note: Take care to use a *dedicated Redis cache* rather + than pointing this at your existing Redis server. It won't cope well + with mixed usage patterns and it won't expire cache entries by default. + + Redis cache server setup guide: https://redis.io/topics/lru-cache + + *Jeremy Daer* + +* Cache: Enable compression by default for values > 1kB. + + Compression has long been available, but opt-in and at a 16kB threshold. + It wasn't enabled by default due to CPU cost. Today it's cheap and typical + cache data is eminently compressible, such as HTML or JSON fragments. + Compression dramatically reduces Memcached/Redis mem usage, which means + the same cache servers can store more data, which means higher hit rates. + + To disable compression, pass `compress: false` to the initializer. + + *Jeremy Daer* + +* Allow `Range#include?` on TWZ ranges. + + In #11474 we prevented TWZ ranges being iterated over which matched + Ruby's handling of Time ranges and as a consequence `include?` + stopped working with both Time ranges and TWZ ranges. However in + ruby/ruby@b061634 support was added for `include?` to use `cover?` + for 'linear' objects. Since we have no way of making Ruby consider + TWZ instances as 'linear' we have to override `Range#include?`. + + Fixes #30799. + + *Andrew White* + +* Fix acronym support in `humanize`. + + Acronym inflections are stored with lowercase keys in the hash but + the match wasn't being lowercased before being looked up in the hash. + This shouldn't have any performance impact because before it would + fail to find the acronym and perform the `downcase` operation anyway. + + Fixes #31052. + + *Andrew White* + +* Add same method signature for `Time#prev_year` and `Time#next_year` + in accordance with `Date#prev_year`, `Date#next_year`. + + Allows pass argument for `Time#prev_year` and `Time#next_year`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_year # => 2016-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_year(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_year # => 2018-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_year(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_year # => 2016-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_year(1) # => 2016-09-16 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_year # => 2018-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_year(1) # => 2018-09-16 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* Add same method signature for `Time#prev_month` and `Time#next_month` + in accordance with `Date#prev_month`, `Date#next_month`. + + Allows pass argument for `Time#prev_month` and `Time#next_month`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_month # => 2017-08-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_month(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_month # => 2017-10-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_month(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_month # => 2017-08-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_month(1) # => 2017-08-16 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_month # => 2017-10-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_month(1) # => 2017-10-16 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* Add same method signature for `Time#prev_day` and `Time#next_day` + in accordance with `Date#prev_day`, `Date#next_day`. + + Allows pass argument for `Time#prev_day` and `Time#next_day`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_day # => 2017-09-15 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_day(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_day # => 2017-09-17 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_day(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_day # => 2017-09-15 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_day(1) # => 2017-09-15 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_day # => 2017-09-17 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_day(1) # => 2017-09-17 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* `IO#to_json` now returns the `to_s` representation, rather than + attempting to convert to an array. This fixes a bug where `IO#to_json` + would raise an `IOError` when called on an unreadable object. + + Fixes #26132. + + *Paul Kuruvilla* + +* Remove deprecated `halt_callback_chains_on_return_false` option. + + *Rafael Mendonça França* + +* Remove deprecated `:if` and `:unless` string filter for callbacks. + + *Rafael Mendonça França* + +* `Hash#slice` now falls back to Ruby 2.5+'s built-in definition if defined. + + *Akira Matsuda* + +* Deprecate `secrets.secret_token`. + + The architecture for secrets had a big upgrade between Rails 3 and Rails 4, + when the default changed from using `secret_token` to `secret_key_base`. + + `secret_token` has been soft deprecated in documentation for four years + but is still in place to support apps created before Rails 4. + Deprecation warnings have been added to help developers upgrade their + applications to `secret_key_base`. + + *claudiob*, *Kasper Timm Hansen* + +* Return an instance of `HashWithIndifferentAccess` from `HashWithIndifferentAccess#transform_keys`. + + *Yuji Yaginuma* + +* Add key rotation support to `MessageEncryptor` and `MessageVerifier`. + + This change introduces a `rotate` method to both the `MessageEncryptor` and + `MessageVerifier` classes. This method accepts the same arguments and + options as the given classes' constructor. The `encrypt_and_verify` method + for `MessageEncryptor` and the `verified` method for `MessageVerifier` also + accept an optional keyword argument `:on_rotation` block which is called + when a rotated instance is used to decrypt or verify the message. + + *Michael J Coyne* + +* Deprecate `Module#reachable?` method. + + *bogdanvlviv* + +* Add `config/credentials.yml.enc` to store production app secrets. + + Allows saving any authentication credentials for third party services + directly in repo encrypted with `config/master.key` or `ENV["RAILS_MASTER_KEY"]`. + + This will eventually replace `Rails.application.secrets` and the encrypted + secrets introduced in Rails 5.1. + + *DHH*, *Kasper Timm Hansen* + +* Add `ActiveSupport::EncryptedFile` and `ActiveSupport::EncryptedConfiguration`. + + Allows for stashing encrypted files or configuration directly in repo by + encrypting it with a key. + + Backs the new credentials setup above, but can also be used independently. + + *DHH*, *Kasper Timm Hansen* + +* `Module#delegate_missing_to` now raises `DelegationError` if target is nil, + similar to `Module#delegate`. + + *Anton Khamets* + +* Update `String#camelize` to provide feedback when wrong option is passed. + + `String#camelize` was returning nil without any feedback when an + invalid option was passed as a parameter. + + Previously: + + 'one_two'.camelize(true) + # => nil + + Now: + + 'one_two'.camelize(true) + # => ArgumentError: Invalid option, use either :upper or :lower. + + *Ricardo Díaz* + +* Fix modulo operations involving durations. + + Rails 5.1 introduced `ActiveSupport::Duration::Scalar` as a wrapper + around numeric values as a way of ensuring a duration was the outcome of + an expression. However, the implementation was missing support for modulo + operations. This support has now been added and should result in a duration + being returned from expressions involving modulo operations. + + Prior to Rails 5.1: + + 5.minutes % 2.minutes + # => 60 + + Now: + + 5.minutes % 2.minutes + # => 1 minute + + Fixes #29603 and #29743. + + *Sayan Chakraborty*, *Andrew White* + +* Fix division where a duration is the denominator. + + PR #29163 introduced a change in behavior when a duration was the denominator + in a calculation - this was incorrect as dividing by a duration should always + return a `Numeric`. The behavior of previous versions of Rails has been restored. + + Fixes #29592. + + *Andrew White* + +* Add purpose and expiry support to `ActiveSupport::MessageVerifier` and + `ActiveSupport::MessageEncryptor`. + + For instance, to ensure a message is only usable for one intended purpose: + + token = @verifier.generate("x", purpose: :shipping) + + @verifier.verified(token, purpose: :shipping) # => "x" + @verifier.verified(token) # => nil + + Or make it expire after a set time: + + @verifier.generate("x", expires_in: 1.month) + @verifier.generate("y", expires_at: Time.now.end_of_year) + + Showcased with `ActiveSupport::MessageVerifier`, but works the same for + `ActiveSupport::MessageEncryptor`'s `encrypt_and_sign` and `decrypt_and_verify`. + + Pull requests: #29599, #29854 + + *Assain Jaleel* + +* Make the order of `Hash#reverse_merge!` consistent with `HashWithIndifferentAccess`. + + *Erol Fornoles* + +* Add `freeze_time` helper which freezes time to `Time.now` in tests. + + *Prathamesh Sonpatki* + +* Default `ActiveSupport::MessageEncryptor` to use AES 256 GCM encryption. + + On for new Rails 5.2 apps. Upgrading apps can find the config as a new + framework default. + + *Assain Jaleel* + +* Cache: `write_multi`. + + Rails.cache.write_multi foo: 'bar', baz: 'qux' + + Plus faster fetch_multi with stores that implement `write_multi_entries`. + Keys that aren't found may be written to the cache store in one shot + instead of separate writes. + + The default implementation simply calls `write_entry` for each entry. + Stores may override if they're capable of one-shot bulk writes, like + Redis `MSET`. + + *Jeremy Daer* + +* Add default option to module and class attribute accessors. + + mattr_accessor :settings, default: {} + + Works for `mattr_reader`, `mattr_writer`, `cattr_accessor`, `cattr_reader`, + and `cattr_writer` as well. + + *Genadi Samokovarov* + +* Add `Date#prev_occurring` and `Date#next_occurring` to return specified next/previous occurring day of week. + + *Shota Iguchi* + +* Add default option to `class_attribute`. + + Before: + + class_attribute :settings + self.settings = {} + + Now: + + class_attribute :settings, default: {} + + *DHH* + +* `#singularize` and `#pluralize` now respect uncountables for the specified locale. + + *Eilis Hamilton* + +* Add `ActiveSupport::CurrentAttributes` to provide a thread-isolated attributes singleton. + Primary use case is keeping all the per-request attributes easily available to the whole system. + + *DHH* + +* Fix implicit coercion calculations with scalars and durations. + + Previously, calculations where the scalar is first would be converted to a duration + of seconds, but this causes issues with dates being converted to times, e.g: + + Time.zone = "Beijing" # => Asia/Shanghai + date = Date.civil(2017, 5, 20) # => Mon, 20 May 2017 + 2 * 1.day # => 172800 seconds + date + 2 * 1.day # => Mon, 22 May 2017 00:00:00 CST +08:00 + + Now, the `ActiveSupport::Duration::Scalar` calculation methods will try to maintain + the part structure of the duration where possible, e.g: + + Time.zone = "Beijing" # => Asia/Shanghai + date = Date.civil(2017, 5, 20) # => Mon, 20 May 2017 + 2 * 1.day # => 2 days + date + 2 * 1.day # => Mon, 22 May 2017 + + Fixes #29160, #28970. + + *Andrew White* + +* Add support for versioned cache entries. This enables the cache stores to recycle cache keys, greatly saving + on storage in cases with frequent churn. Works together with the separation of `#cache_key` and `#cache_version` + in Active Record and its use in Action Pack's fragment caching. + + *DHH* + +* Pass gem name and deprecation horizon to deprecation notifications. + + *Willem van Bergen* + +* Add support for `:offset` and `:zone` to `ActiveSupport::TimeWithZone#change`. + + *Andrew White* + +* Add support for `:offset` to `Time#change`. + + Fixes #28723. + + *Andrew White* + +* Add `fetch_values` for `HashWithIndifferentAccess`. + + The method was originally added to `Hash` in Ruby 2.3.0. + + *Josh Pencheon* + + +Please check [5-1-stable](https://github.com/rails/rails/blob/5-1-stable/activesupport/CHANGELOG.md) for previous changes. diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/MIT-LICENSE b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/MIT-LICENSE new file mode 100644 index 0000000..8f769c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2005-2018 David Heinemeier Hansson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/README.rdoc b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/README.rdoc new file mode 100644 index 0000000..bbe25ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/README.rdoc @@ -0,0 +1,39 @@ += Active Support -- Utility classes and Ruby extensions from Rails + +Active Support is a collection of utility classes and standard library +extensions that were found useful for the Rails framework. These additions +reside in this package so they can be loaded as needed in Ruby projects +outside of Rails. + + +== Download and installation + +The latest version of Active Support can be installed with RubyGems: + + $ gem install activesupport + +Source code can be downloaded as part of the Rails project on GitHub: + +* https://github.com/rails/rails/tree/5-2-stable/activesupport + + +== License + +Active Support is released under the MIT license: + +* https://opensource.org/licenses/MIT + + +== Support + +API documentation is at: + +* http://api.rubyonrails.org + +Bug reports for the Ruby on Rails project can be filed here: + +* https://github.com/rails/rails/issues + +Feature requests should be discussed on the rails-core mailing list here: + +* https://groups.google.com/forum/?fromgroups#!forum/rubyonrails-core diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support.rb new file mode 100644 index 0000000..a4fb697 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +#-- +# Copyright (c) 2005-2018 David Heinemeier Hansson +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#++ + +require "securerandom" +require "active_support/dependencies/autoload" +require "active_support/version" +require "active_support/logger" +require "active_support/lazy_load_hooks" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + extend ActiveSupport::Autoload + + autoload :Concern + autoload :CurrentAttributes + autoload :Dependencies + autoload :DescendantsTracker + autoload :ExecutionWrapper + autoload :Executor + autoload :FileUpdateChecker + autoload :EventedFileUpdateChecker + autoload :LogSubscriber + autoload :Notifications + autoload :Reloader + + eager_autoload do + autoload :BacktraceCleaner + autoload :ProxyObject + autoload :Benchmarkable + autoload :Cache + autoload :Callbacks + autoload :Configurable + autoload :Deprecation + autoload :Digest + autoload :Gzip + autoload :Inflector + autoload :JSON + autoload :KeyGenerator + autoload :MessageEncryptor + autoload :MessageVerifier + autoload :Multibyte + autoload :NumberHelper + autoload :OptionMerger + autoload :OrderedHash + autoload :OrderedOptions + autoload :StringInquirer + autoload :TaggedLogging + autoload :XmlMini + autoload :ArrayInquirer + end + + autoload :Rescuable + autoload :SafeBuffer, "active_support/core_ext/string/output_safety" + autoload :TestCase + + def self.eager_load! + super + + NumberHelper.eager_load! + end + + cattr_accessor :test_order # :nodoc: + + def self.to_time_preserves_timezone + DateAndTime::Compatibility.preserve_timezone + end + + def self.to_time_preserves_timezone=(value) + DateAndTime::Compatibility.preserve_timezone = value + end +end + +autoload :I18n, "active_support/i18n" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb new file mode 100644 index 0000000..4adf446 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/time" +require "active_support/core_ext" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb new file mode 100644 index 0000000..b2b9e9c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module ActiveSupport + # Wrapping an array in an +ArrayInquirer+ gives a friendlier way to check + # its string-like contents: + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.phone? # => true + # variants.tablet? # => true + # variants.desktop? # => false + class ArrayInquirer < Array + # Passes each element of +candidates+ collection to ArrayInquirer collection. + # The method returns true if any element from the ArrayInquirer collection + # is equal to the stringified or symbolized form of any element in the +candidates+ collection. + # + # If +candidates+ collection is not given, method returns true. + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.any? # => true + # variants.any?(:phone, :tablet) # => true + # variants.any?('phone', 'desktop') # => true + # variants.any?(:desktop, :watch) # => false + def any?(*candidates) + if candidates.none? + super + else + candidates.any? do |candidate| + include?(candidate.to_sym) || include?(candidate.to_s) + end + end + end + + private + def respond_to_missing?(name, include_private = false) + (name[-1] == "?") || super + end + + def method_missing(name, *args) + if name[-1] == "?" + any?(name[0..-2]) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb new file mode 100644 index 0000000..16dd733 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module ActiveSupport + # Backtraces often include many lines that are not relevant for the context + # under review. This makes it hard to find the signal amongst the backtrace + # noise, and adds debugging time. With a BacktraceCleaner, filters and + # silencers are used to remove the noisy lines, so that only the most relevant + # lines remain. + # + # Filters are used to modify lines of data, while silencers are used to remove + # lines entirely. The typical filter use case is to remove lengthy path + # information from the start of each line, and view file paths relevant to the + # app directory instead of the file system root. The typical silencer use case + # is to exclude the output of a noisy library from the backtrace, so that you + # can focus on the rest. + # + # bc = ActiveSupport::BacktraceCleaner.new + # bc.add_filter { |line| line.gsub(Rails.root.to_s, '') } # strip the Rails.root prefix + # bc.add_silencer { |line| line =~ /puma|rubygems/ } # skip any lines from puma or rubygems + # bc.clean(exception.backtrace) # perform the cleanup + # + # To reconfigure an existing BacktraceCleaner (like the default one in Rails) + # and show as much data as possible, you can always call + # BacktraceCleaner#remove_silencers!, which will restore the + # backtrace to a pristine state. If you need to reconfigure an existing + # BacktraceCleaner so that it does not filter or modify the paths of any lines + # of the backtrace, you can call BacktraceCleaner#remove_filters! + # These two methods will give you a completely untouched backtrace. + # + # Inspired by the Quiet Backtrace gem by thoughtbot. + class BacktraceCleaner + def initialize + @filters, @silencers = [], [] + end + + # Returns the backtrace after all filters and silencers have been run + # against it. Filters run first, then silencers. + def clean(backtrace, kind = :silent) + filtered = filter_backtrace(backtrace) + + case kind + when :silent + silence(filtered) + when :noise + noise(filtered) + else + filtered + end + end + alias :filter :clean + + # Adds a filter from the block provided. Each line in the backtrace will be + # mapped against this filter. + # + # # Will turn "/my/rails/root/app/models/person.rb" into "/app/models/person.rb" + # backtrace_cleaner.add_filter { |line| line.gsub(Rails.root, '') } + def add_filter(&block) + @filters << block + end + + # Adds a silencer from the block provided. If the silencer returns +true+ + # for a given line, it will be excluded from the clean backtrace. + # + # # Will reject all lines that include the word "puma", like "/gems/puma/server.rb" or "/app/my_puma_server/rb" + # backtrace_cleaner.add_silencer { |line| line =~ /puma/ } + def add_silencer(&block) + @silencers << block + end + + # Removes all silencers, but leaves in the filters. Useful if your + # context of debugging suddenly expands as you suspect a bug in one of + # the libraries you use. + def remove_silencers! + @silencers = [] + end + + # Removes all filters, but leaves in the silencers. Useful if you suddenly + # need to see entire filepaths in the backtrace that you had already + # filtered out. + def remove_filters! + @filters = [] + end + + private + def filter_backtrace(backtrace) + @filters.each do |f| + backtrace = backtrace.map { |line| f.call(line) } + end + + backtrace + end + + def silence(backtrace) + @silencers.each do |s| + backtrace = backtrace.reject { |line| s.call(line) } + end + + backtrace + end + + def noise(backtrace) + backtrace - silence(backtrace) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb new file mode 100644 index 0000000..f481d68 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/core_ext/benchmark" +require "active_support/core_ext/hash/keys" + +module ActiveSupport + module Benchmarkable + # Allows you to measure the execution time of a block in a template and + # records the result to the log. Wrap this block around expensive operations + # or possible bottlenecks to get a time reading for the operation. For + # example, let's say you thought your file processing method was taking too + # long; you could wrap it in a benchmark block. + # + # <% benchmark 'Process data files' do %> + # <%= expensive_files_operation %> + # <% end %> + # + # That would add something like "Process data files (345.2ms)" to the log, + # which you can then use to compare timings when optimizing your code. + # + # You may give an optional logger level (:debug, :info, + # :warn, :error) as the :level option. The + # default logger level value is :info. + # + # <% benchmark 'Low-level files', level: :debug do %> + # <%= lowlevel_files_operation %> + # <% end %> + # + # Finally, you can pass true as the third argument to silence all log + # activity (other than the timing information) from inside the block. This + # is great for boiling down a noisy block to just a single statement that + # produces one log line: + # + # <% benchmark 'Process data files', level: :info, silence: true do %> + # <%= expensive_and_chatty_files_operation %> + # <% end %> + def benchmark(message = "Benchmarking", options = {}) + if logger + options.assert_valid_keys(:level, :silence) + options[:level] ||= :info + + result = nil + ms = Benchmark.ms { result = options[:silence] ? logger.silence { yield } : yield } + logger.send(options[:level], "%s (%.1fms)" % [ message, ms ]) + result + else + yield + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb new file mode 100644 index 0000000..3fa7e6b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +begin + require "builder" +rescue LoadError => e + $stderr.puts "You don't have builder installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb new file mode 100644 index 0000000..d06a497 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb @@ -0,0 +1,808 @@ +# frozen_string_literal: true + +require "zlib" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # See ActiveSupport::Cache::Store for documentation. + module Cache + autoload :FileStore, "active_support/cache/file_store" + autoload :MemoryStore, "active_support/cache/memory_store" + autoload :MemCacheStore, "active_support/cache/mem_cache_store" + autoload :NullStore, "active_support/cache/null_store" + autoload :RedisCacheStore, "active_support/cache/redis_cache_store" + + # These options mean something to all cache implementations. Individual cache + # implementations may support additional options. + UNIVERSAL_OPTIONS = [:namespace, :compress, :compress_threshold, :expires_in, :race_condition_ttl] + + module Strategy + autoload :LocalCache, "active_support/cache/strategy/local_cache" + end + + class << self + # Creates a new Store object according to the given options. + # + # If no arguments are passed to this method, then a new + # ActiveSupport::Cache::MemoryStore object will be returned. + # + # If you pass a Symbol as the first argument, then a corresponding cache + # store class under the ActiveSupport::Cache namespace will be created. + # For example: + # + # ActiveSupport::Cache.lookup_store(:memory_store) + # # => returns a new ActiveSupport::Cache::MemoryStore object + # + # ActiveSupport::Cache.lookup_store(:mem_cache_store) + # # => returns a new ActiveSupport::Cache::MemCacheStore object + # + # Any additional arguments will be passed to the corresponding cache store + # class's constructor: + # + # ActiveSupport::Cache.lookup_store(:file_store, '/tmp/cache') + # # => same as: ActiveSupport::Cache::FileStore.new('/tmp/cache') + # + # If the first argument is not a Symbol, then it will simply be returned: + # + # ActiveSupport::Cache.lookup_store(MyOwnCacheStore.new) + # # => returns MyOwnCacheStore.new + def lookup_store(*store_option) + store, *parameters = *Array.wrap(store_option).flatten + + case store + when Symbol + retrieve_store_class(store).new(*parameters) + when nil + ActiveSupport::Cache::MemoryStore.new + else + store + end + end + + # Expands out the +key+ argument into a key that can be used for the + # cache store. Optionally accepts a namespace, and all keys will be + # scoped within that namespace. + # + # If the +key+ argument provided is an array, or responds to +to_a+, then + # each of elements in the array will be turned into parameters/keys and + # concatenated into a single key. For example: + # + # ActiveSupport::Cache.expand_cache_key([:foo, :bar]) # => "foo/bar" + # ActiveSupport::Cache.expand_cache_key([:foo, :bar], "namespace") # => "namespace/foo/bar" + # + # The +key+ argument can also respond to +cache_key+ or +to_param+. + def expand_cache_key(key, namespace = nil) + expanded_cache_key = (namespace ? "#{namespace}/" : "").dup + + if prefix = ENV["RAILS_CACHE_ID"] || ENV["RAILS_APP_VERSION"] + expanded_cache_key << "#{prefix}/" + end + + expanded_cache_key << retrieve_cache_key(key) + expanded_cache_key + end + + private + def retrieve_cache_key(key) + case + when key.respond_to?(:cache_key_with_version) then key.cache_key_with_version + when key.respond_to?(:cache_key) then key.cache_key + when key.is_a?(Array) then key.map { |element| retrieve_cache_key(element) }.to_param + when key.respond_to?(:to_a) then retrieve_cache_key(key.to_a) + else key.to_param + end.to_s + end + + # Obtains the specified cache store class, given the name of the +store+. + # Raises an error when the store class cannot be found. + def retrieve_store_class(store) + # require_relative cannot be used here because the class might be + # provided by another gem, like redis-activesupport for example. + require "active_support/cache/#{store}" + rescue LoadError => e + raise "Could not find cache store adapter for #{store} (#{e})" + else + ActiveSupport::Cache.const_get(store.to_s.camelize) + end + end + + # An abstract cache store class. There are multiple cache store + # implementations, each having its own additional features. See the classes + # under the ActiveSupport::Cache module, e.g. + # ActiveSupport::Cache::MemCacheStore. MemCacheStore is currently the most + # popular cache store for large production websites. + # + # Some implementations may not support all methods beyond the basic cache + # methods of +fetch+, +write+, +read+, +exist?+, and +delete+. + # + # ActiveSupport::Cache::Store can store any serializable Ruby object. + # + # cache = ActiveSupport::Cache::MemoryStore.new + # + # cache.read('city') # => nil + # cache.write('city', "Duckburgh") + # cache.read('city') # => "Duckburgh" + # + # Keys are always translated into Strings and are case sensitive. When an + # object is specified as a key and has a +cache_key+ method defined, this + # method will be called to define the key. Otherwise, the +to_param+ + # method will be called. Hashes and Arrays can also be used as keys. The + # elements will be delimited by slashes, and the elements within a Hash + # will be sorted by key so they are consistent. + # + # cache.read('city') == cache.read(:city) # => true + # + # Nil values can be cached. + # + # If your cache is on a shared infrastructure, you can define a namespace + # for your cache entries. If a namespace is defined, it will be prefixed on + # to every key. The namespace can be either a static value or a Proc. If it + # is a Proc, it will be invoked when each key is evaluated so that you can + # use application logic to invalidate keys. + # + # cache.namespace = -> { @last_mod_time } # Set the namespace to a variable + # @last_mod_time = Time.now # Invalidate the entire cache by changing namespace + # + # Cached data larger than 1kB are compressed by default. To turn off + # compression, pass compress: false to the initializer or to + # individual +fetch+ or +write+ method calls. The 1kB compression + # threshold is configurable with the :compress_threshold option, + # specified in bytes. + class Store + cattr_accessor :logger, instance_writer: true + + attr_reader :silence, :options + alias :silence? :silence + + class << self + private + def retrieve_pool_options(options) + {}.tap do |pool_options| + pool_options[:size] = options.delete(:pool_size) if options[:pool_size] + pool_options[:timeout] = options.delete(:pool_timeout) if options[:pool_timeout] + end + end + + def ensure_connection_pool_added! + require "connection_pool" + rescue LoadError => e + $stderr.puts "You don't have connection_pool installed in your application. Please add it to your Gemfile and run bundle install" + raise e + end + end + + # Creates a new cache. The options will be passed to any write method calls + # except for :namespace which can be used to set the global + # namespace for the cache. + def initialize(options = nil) + @options = options ? options.dup : {} + end + + # Silences the logger. + def silence! + @silence = true + self + end + + # Silences the logger within a block. + def mute + previous_silence, @silence = defined?(@silence) && @silence, true + yield + ensure + @silence = previous_silence + end + + # Fetches data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. + # + # If there is no such data in the cache (a cache miss), then +nil+ will be + # returned. However, if a block has been passed, that block will be passed + # the key and executed in the event of a cache miss. The return value of the + # block will be written to the cache under the given cache key, and that + # return value will be returned. + # + # cache.write('today', 'Monday') + # cache.fetch('today') # => "Monday" + # + # cache.fetch('city') # => nil + # cache.fetch('city') do + # 'Duckburgh' + # end + # cache.fetch('city') # => "Duckburgh" + # + # You may also specify additional options via the +options+ argument. + # Setting force: true forces a cache "miss," meaning we treat + # the cache value as missing even if it's present. Passing a block is + # required when +force+ is true so this always results in a cache write. + # + # cache.write('today', 'Monday') + # cache.fetch('today', force: true) { 'Tuesday' } # => 'Tuesday' + # cache.fetch('today', force: true) # => ArgumentError + # + # The +:force+ option is useful when you're calling some other method to + # ask whether you should force a cache write. Otherwise, it's clearer to + # just call Cache#write. + # + # Setting compress: false disables compression of the cache entry. + # + # Setting :expires_in will set an expiration time on the cache. + # All caches support auto-expiring content after a specified number of + # seconds. This value can be specified as an option to the constructor + # (in which case all entries will be affected), or it can be supplied to + # the +fetch+ or +write+ method to effect just one entry. + # + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 5.minutes) + # cache.write(key, value, expires_in: 1.minute) # Set a lower value for one entry + # + # Setting :version verifies the cache stored under name + # is of the same version. nil is returned on mismatches despite contents. + # This feature is used to support recyclable cache keys. + # + # Setting :race_condition_ttl is very useful in situations where + # a cache entry is used very frequently and is under heavy load. If a + # cache expires and due to heavy load several different processes will try + # to read data natively and then they all will try to write to cache. To + # avoid that case the first process to find an expired cache entry will + # bump the cache expiration time by the value set in :race_condition_ttl. + # Yes, this process is extending the time for a stale value by another few + # seconds. Because of extended life of the previous cache, other processes + # will continue to use slightly stale data for a just a bit longer. In the + # meantime that first process will go ahead and will write into cache the + # new value. After that all the processes will start getting the new value. + # The key is to keep :race_condition_ttl small. + # + # If the process regenerating the entry errors out, the entry will be + # regenerated after the specified number of seconds. Also note that the + # life of stale cache is extended only if it expired recently. Otherwise + # a new value is generated and :race_condition_ttl does not play + # any role. + # + # # Set all values to expire after one minute. + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 1.minute) + # + # cache.write('foo', 'original value') + # val_1 = nil + # val_2 = nil + # sleep 60 + # + # Thread.new do + # val_1 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # sleep 1 + # 'new value 1' + # end + # end + # + # Thread.new do + # val_2 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # 'new value 2' + # end + # end + # + # cache.fetch('foo') # => "original value" + # sleep 10 # First thread extended the life of cache by another 10 seconds + # cache.fetch('foo') # => "new value 1" + # val_1 # => "new value 1" + # val_2 # => "original value" + # + # Other options will be handled by the specific cache store implementation. + # Internally, #fetch calls #read_entry, and calls #write_entry on a cache + # miss. +options+ will be passed to the #read and #write calls. + # + # For example, MemCacheStore's #write method supports the +:raw+ + # option, which tells the memcached server to store all values as strings. + # We can use this option with #fetch too: + # + # cache = ActiveSupport::Cache::MemCacheStore.new + # cache.fetch("foo", force: true, raw: true) do + # :bar + # end + # cache.fetch('foo') # => "bar" + def fetch(name, options = nil) + if block_given? + options = merged_options(options) + key = normalize_key(name, options) + + entry = nil + instrument(:read, name, options) do |payload| + cached_entry = read_entry(key, options) unless options[:force] + entry = handle_expired_entry(cached_entry, key, options) + entry = nil if entry && entry.mismatched?(normalize_version(name, options)) + payload[:super_operation] = :fetch if payload + payload[:hit] = !!entry if payload + end + + if entry + get_entry_value(entry, name, options) + else + save_block_result_to_cache(name, options) { |_name| yield _name } + end + elsif options && options[:force] + raise ArgumentError, "Missing block: Calling `Cache#fetch` with `force: true` requires a block." + else + read(name, options) + end + end + + # Reads data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. Otherwise, + # +nil+ is returned. + # + # Note, if data was written with the :expires_in or :version options, + # both of these conditions are applied before the data is returned. + # + # Options are passed to the underlying cache implementation. + def read(name, options = nil) + options = merged_options(options) + key = normalize_key(name, options) + version = normalize_version(name, options) + + instrument(:read, name, options) do |payload| + entry = read_entry(key, options) + + if entry + if entry.expired? + delete_entry(key, options) + payload[:hit] = false if payload + nil + elsif entry.mismatched?(version) + payload[:hit] = false if payload + nil + else + payload[:hit] = true if payload + entry.value + end + else + payload[:hit] = false if payload + nil + end + end + end + + # Reads multiple values at once from the cache. Options can be passed + # in the last argument. + # + # Some cache implementation may optimize this method. + # + # Returns a hash mapping the names provided to the values found. + def read_multi(*names) + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + read_multi_entries(names, options).tap do |results| + payload[:hits] = results.keys + end + end + end + + # Cache Storage API to write multiple values at once. + def write_multi(hash, options = nil) + options = merged_options(options) + + instrument :write_multi, hash, options do |payload| + entries = hash.each_with_object({}) do |(name, value), memo| + memo[normalize_key(name, options)] = Entry.new(value, options.merge(version: normalize_version(name, options))) + end + + write_multi_entries entries, options + end + end + + # Fetches data from the cache, using the given keys. If there is data in + # the cache with the given keys, then that data is returned. Otherwise, + # the supplied block is called for each key for which there was no data, + # and the result will be written to the cache and returned. + # Therefore, you need to pass a block that returns the data to be written + # to the cache. If you do not want to write the cache when the cache is + # not found, use #read_multi. + # + # Options are passed to the underlying cache implementation. + # + # Returns a hash with the data for each of the names. For example: + # + # cache.write("bim", "bam") + # cache.fetch_multi("bim", "unknown_key") do |key| + # "Fallback value for key: #{key}" + # end + # # => { "bim" => "bam", + # # "unknown_key" => "Fallback value for key: unknown_key" } + # + def fetch_multi(*names) + raise ArgumentError, "Missing block: `Cache#fetch_multi` requires a block." unless block_given? + + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + read_multi_entries(names, options).tap do |results| + payload[:hits] = results.keys + payload[:super_operation] = :fetch_multi + + writes = {} + + (names - results.keys).each do |name| + results[name] = writes[name] = yield(name) + end + + write_multi writes, options + end + end + end + + # Writes the value to the cache, with the key. + # + # Options are passed to the underlying cache implementation. + def write(name, value, options = nil) + options = merged_options(options) + + instrument(:write, name, options) do + entry = Entry.new(value, options.merge(version: normalize_version(name, options))) + write_entry(normalize_key(name, options), entry, options) + end + end + + # Deletes an entry in the cache. Returns +true+ if an entry is deleted. + # + # Options are passed to the underlying cache implementation. + def delete(name, options = nil) + options = merged_options(options) + + instrument(:delete, name) do + delete_entry(normalize_key(name, options), options) + end + end + + # Returns +true+ if the cache contains an entry for the given key. + # + # Options are passed to the underlying cache implementation. + def exist?(name, options = nil) + options = merged_options(options) + + instrument(:exist?, name) do + entry = read_entry(normalize_key(name, options), options) + (entry && !entry.expired? && !entry.mismatched?(normalize_version(name, options))) || false + end + end + + # Deletes all entries with keys matching the pattern. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def delete_matched(matcher, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support delete_matched") + end + + # Increments an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def increment(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support increment") + end + + # Decrements an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def decrement(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support decrement") + end + + # Cleanups the cache by removing expired entries. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def cleanup(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support cleanup") + end + + # Clears the entire cache. Be careful with this method since it could + # affect other processes if shared cache is being used. + # + # The options hash is passed to the underlying cache implementation. + # + # All implementations may not support this method. + def clear(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support clear") + end + + private + # Adds the namespace defined in the options to a pattern designed to + # match keys. Implementations that support delete_matched should call + # this method to translate a pattern that matches names into one that + # matches namespaced keys. + def key_matcher(pattern, options) # :doc: + prefix = options[:namespace].is_a?(Proc) ? options[:namespace].call : options[:namespace] + if prefix + source = pattern.source + if source.start_with?("^") + source = source[1, source.length] + else + source = ".*#{source[0, source.length]}" + end + Regexp.new("^#{Regexp.escape(prefix)}:#{source}", pattern.options) + else + pattern + end + end + + # Reads an entry from the cache implementation. Subclasses must implement + # this method. + def read_entry(key, options) + raise NotImplementedError.new + end + + # Writes an entry to the cache implementation. Subclasses must implement + # this method. + def write_entry(key, entry, options) + raise NotImplementedError.new + end + + # Reads multiple entries from the cache implementation. Subclasses MAY + # implement this method. + def read_multi_entries(names, options) + results = {} + names.each do |name| + key = normalize_key(name, options) + version = normalize_version(name, options) + entry = read_entry(key, options) + + if entry + if entry.expired? + delete_entry(key, options) + elsif entry.mismatched?(version) + # Skip mismatched versions + else + results[name] = entry.value + end + end + end + results + end + + # Writes multiple entries to the cache implementation. Subclasses MAY + # implement this method. + def write_multi_entries(hash, options) + hash.each do |key, entry| + write_entry key, entry, options + end + end + + # Deletes an entry from the cache implementation. Subclasses must + # implement this method. + def delete_entry(key, options) + raise NotImplementedError.new + end + + # Merges the default options with ones specific to a method call. + def merged_options(call_options) + if call_options + options.merge(call_options) + else + options.dup + end + end + + # Expands and namespaces the cache key. May be overridden by + # cache stores to do additional normalization. + def normalize_key(key, options = nil) + namespace_key expanded_key(key), options + end + + # Prefix the key with a namespace string: + # + # namespace_key 'foo', namespace: 'cache' + # # => 'cache:foo' + # + # With a namespace block: + # + # namespace_key 'foo', namespace: -> { 'cache' } + # # => 'cache:foo' + def namespace_key(key, options = nil) + options = merged_options(options) + namespace = options[:namespace] + + if namespace.respond_to?(:call) + namespace = namespace.call + end + + if namespace + "#{namespace}:#{key}" + else + key + end + end + + # Expands key to be a consistent string value. Invokes +cache_key+ if + # object responds to +cache_key+. Otherwise, +to_param+ method will be + # called. If the key is a Hash, then keys will be sorted alphabetically. + def expanded_key(key) + return key.cache_key.to_s if key.respond_to?(:cache_key) + + case key + when Array + if key.size > 1 + key = key.collect { |element| expanded_key(element) } + else + key = key.first + end + when Hash + key = key.sort_by { |k, _| k.to_s }.collect { |k, v| "#{k}=#{v}" } + end + + key.to_param + end + + def normalize_version(key, options = nil) + (options && options[:version].try(:to_param)) || expanded_version(key) + end + + def expanded_version(key) + case + when key.respond_to?(:cache_version) then key.cache_version.to_param + when key.is_a?(Array) then key.map { |element| expanded_version(element) }.compact.to_param + when key.respond_to?(:to_a) then expanded_version(key.to_a) + end + end + + def instrument(operation, key, options = nil) + log { "Cache #{operation}: #{normalize_key(key, options)}#{options.blank? ? "" : " (#{options.inspect})"}" } + + payload = { key: key } + payload.merge!(options) if options.is_a?(Hash) + ActiveSupport::Notifications.instrument("cache_#{operation}.active_support", payload) { yield(payload) } + end + + def log + return unless logger && logger.debug? && !silence? + logger.debug(yield) + end + + def handle_expired_entry(entry, key, options) + if entry && entry.expired? + race_ttl = options[:race_condition_ttl].to_i + if (race_ttl > 0) && (Time.now.to_f - entry.expires_at <= race_ttl) + # When an entry has a positive :race_condition_ttl defined, put the stale entry back into the cache + # for a brief period while the entry is being recalculated. + entry.expires_at = Time.now + race_ttl + write_entry(key, entry, expires_in: race_ttl * 2) + else + delete_entry(key, options) + end + entry = nil + end + entry + end + + def get_entry_value(entry, name, options) + instrument(:fetch_hit, name, options) {} + entry.value + end + + def save_block_result_to_cache(name, options) + result = instrument(:generate, name, options) do + yield(name) + end + + write(name, result, options) + result + end + end + + # This class is used to represent cache entries. Cache entries have a value, an optional + # expiration time, and an optional version. The expiration time is used to support the :race_condition_ttl option + # on the cache. The version is used to support the :version option on the cache for rejecting + # mismatches. + # + # Since cache entries in most instances will be serialized, the internals of this class are highly optimized + # using short instance variable names that are lazily defined. + class Entry # :nodoc: + attr_reader :version + + DEFAULT_COMPRESS_LIMIT = 1.kilobyte + + # Creates a new cache entry for the specified value. Options supported are + # +:compress+, +:compress_threshold+, +:version+ and +:expires_in+. + def initialize(value, compress: true, compress_threshold: DEFAULT_COMPRESS_LIMIT, version: nil, expires_in: nil, **) + @value = value + @version = version + @created_at = Time.now.to_f + @expires_in = expires_in && expires_in.to_f + + compress!(compress_threshold) if compress + end + + def value + compressed? ? uncompress(@value) : @value + end + + def mismatched?(version) + @version && version && @version != version + end + + # Checks if the entry is expired. The +expires_in+ parameter can override + # the value set when the entry was created. + def expired? + @expires_in && @created_at + @expires_in <= Time.now.to_f + end + + def expires_at + @expires_in ? @created_at + @expires_in : nil + end + + def expires_at=(value) + if value + @expires_in = value.to_f - @created_at + else + @expires_in = nil + end + end + + # Returns the size of the cached value. This could be less than + # value.size if the data is compressed. + def size + case value + when NilClass + 0 + when String + @value.bytesize + else + @s ||= Marshal.dump(@value).bytesize + end + end + + # Duplicates the value in a class. This is used by cache implementations that don't natively + # serialize entries to protect against accidental cache modifications. + def dup_value! + if @value && !compressed? && !(@value.is_a?(Numeric) || @value == true || @value == false) + if @value.is_a?(String) + @value = @value.dup + else + @value = Marshal.load(Marshal.dump(@value)) + end + end + end + + private + def compress!(compress_threshold) + case @value + when nil, true, false, Numeric + uncompressed_size = 0 + when String + uncompressed_size = @value.bytesize + else + serialized = Marshal.dump(@value) + uncompressed_size = serialized.bytesize + end + + if uncompressed_size >= compress_threshold + serialized ||= Marshal.dump(@value) + compressed = Zlib::Deflate.deflate(serialized) + + if compressed.bytesize < uncompressed_size + @value = compressed + @compressed = true + end + end + end + + def compressed? + defined?(@compressed) + end + + def uncompress(value) + Marshal.load(Zlib::Inflate.inflate(value)) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb new file mode 100644 index 0000000..a0f44aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb @@ -0,0 +1,196 @@ +# frozen_string_literal: true + +require "active_support/core_ext/marshal" +require "active_support/core_ext/file/atomic" +require "active_support/core_ext/string/conversions" +require "uri/common" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything on the filesystem. + # + # FileStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class FileStore < Store + prepend Strategy::LocalCache + attr_reader :cache_path + + DIR_FORMATTER = "%03X" + FILENAME_MAX_SIZE = 228 # max filename size on file system is 255, minus room for timestamp and random characters appended by Tempfile (used by atomic write) + FILEPATH_MAX_SIZE = 900 # max is 1024, plus some room + EXCLUDED_DIRS = [".", ".."].freeze + GITKEEP_FILES = [".gitkeep", ".keep"].freeze + + def initialize(cache_path, options = nil) + super(options) + @cache_path = cache_path.to_s + end + + # Deletes all items from the cache. In this case it deletes all the entries in the specified + # file store directory except for .keep or .gitkeep. Be careful which directory is specified in your + # config file when using +FileStore+ because everything in that directory will be deleted. + def clear(options = nil) + root_dirs = exclude_from(cache_path, EXCLUDED_DIRS + GITKEEP_FILES) + FileUtils.rm_r(root_dirs.collect { |f| File.join(cache_path, f) }) + rescue Errno::ENOENT + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + search_dir(cache_path) do |fname| + entry = read_entry(fname, options) + delete_entry(fname, options) if entry && entry.expired? + end + end + + # Increments an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrements an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + search_dir(cache_path) do |path| + key = file_path_key(path) + delete_entry(path, options) if key.match(matcher) + end + end + end + + private + + def read_entry(key, options) + if File.exist?(key) + File.open(key) { |f| Marshal.load(f) } + end + rescue => e + logger.error("FileStoreError (#{e}): #{e.message}") if logger + nil + end + + def write_entry(key, entry, options) + return false if options[:unless_exist] && File.exist?(key) + ensure_cache_path(File.dirname(key)) + File.atomic_write(key, cache_path) { |f| Marshal.dump(entry, f) } + true + end + + def delete_entry(key, options) + if File.exist?(key) + begin + File.delete(key) + delete_empty_directories(File.dirname(key)) + true + rescue => e + # Just in case the error was caused by another process deleting the file first. + raise e if File.exist?(key) + false + end + end + end + + # Lock a file for a block so only one process can modify it at a time. + def lock_file(file_name, &block) + if File.exist?(file_name) + File.open(file_name, "r+") do |f| + begin + f.flock File::LOCK_EX + yield + ensure + f.flock File::LOCK_UN + end + end + else + yield + end + end + + # Translate a key into a file path. + def normalize_key(key, options) + key = super + fname = URI.encode_www_form_component(key) + + if fname.size > FILEPATH_MAX_SIZE + fname = ActiveSupport::Digest.hexdigest(key) + end + + hash = Zlib.adler32(fname) + hash, dir_1 = hash.divmod(0x1000) + dir_2 = hash.modulo(0x1000) + fname_paths = [] + + # Make sure file name doesn't exceed file system limits. + begin + fname_paths << fname[0, FILENAME_MAX_SIZE] + fname = fname[FILENAME_MAX_SIZE..-1] + end until fname.blank? + + File.join(cache_path, DIR_FORMATTER % dir_1, DIR_FORMATTER % dir_2, *fname_paths) + end + + # Translate a file path into a key. + def file_path_key(path) + fname = path[cache_path.to_s.size..-1].split(File::SEPARATOR, 4).last + URI.decode_www_form_component(fname, Encoding::UTF_8) + end + + # Delete empty directories in the cache. + def delete_empty_directories(dir) + return if File.realpath(dir) == File.realpath(cache_path) + if exclude_from(dir, EXCLUDED_DIRS).empty? + Dir.delete(dir) rescue nil + delete_empty_directories(File.dirname(dir)) + end + end + + # Make sure a file path's directories exist. + def ensure_cache_path(path) + FileUtils.makedirs(path) unless File.exist?(path) + end + + def search_dir(dir, &callback) + return if !File.exist?(dir) + Dir.foreach(dir) do |d| + next if EXCLUDED_DIRS.include?(d) + name = File.join(dir, d) + if File.directory?(name) + search_dir(name, &callback) + else + callback.call name + end + end + end + + # Modifies the amount of an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def modify_value(name, amount, options) + file_name = normalize_key(name, options) + + lock_file(file_name) do + options = merged_options(options) + + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + + # Exclude entries from source directory + def exclude_from(source, excludes) + Dir.entries(source).reject { |f| excludes.include?(f) } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb new file mode 100644 index 0000000..22c47e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb @@ -0,0 +1,197 @@ +# frozen_string_literal: true + +begin + require "dalli" +rescue LoadError => e + $stderr.puts "You don't have dalli installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end + +require "active_support/core_ext/array/extract_options" + +module ActiveSupport + module Cache + # A cache store implementation which stores data in Memcached: + # https://memcached.org + # + # This is currently the most popular cache store for production websites. + # + # Special features: + # - Clustering and load balancing. One can specify multiple memcached servers, + # and MemCacheStore will load balance between all available servers. If a + # server goes down, then MemCacheStore will ignore it until it comes back up. + # + # MemCacheStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class MemCacheStore < Store + # Provide support for raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, options) + if options[:raw] && local_cache + raw_entry = Entry.new(entry.value.to_s) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, options) + else + super + end + end + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + ESCAPE_KEY_CHARS = /[\x00-\x20%\x7F-\xFF]/n + + # Creates a new Dalli::Client instance with specified addresses and options. + # By default address is equal localhost:11211. + # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache + # # => # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache('localhost:10290') + # # => # + def self.build_mem_cache(*addresses) # :nodoc: + addresses = addresses.flatten + options = addresses.extract_options! + addresses = ["localhost:11211"] if addresses.empty? + pool_options = retrieve_pool_options(options) + + if pool_options.empty? + Dalli::Client.new(addresses, options) + else + ensure_connection_pool_added! + ConnectionPool.new(pool_options) { Dalli::Client.new(addresses, options.merge(threadsafe: false)) } + end + end + + # Creates a new MemCacheStore object, with the given memcached server + # addresses. Each address is either a host name, or a host-with-port string + # in the form of "host_name:port". For example: + # + # ActiveSupport::Cache::MemCacheStore.new("localhost", "server-downstairs.localnetwork:8229") + # + # If no addresses are specified, then MemCacheStore will connect to + # localhost port 11211 (the default memcached port). + def initialize(*addresses) + addresses = addresses.flatten + options = addresses.extract_options! + super(options) + + unless [String, Dalli::Client, NilClass].include?(addresses.first.class) + raise ArgumentError, "First argument must be an empty array, an array of hosts or a Dalli::Client instance." + end + if addresses.first.is_a?(Dalli::Client) + @data = addresses.first + else + mem_cache_options = options.dup + UNIVERSAL_OPTIONS.each { |name| mem_cache_options.delete(name) } + @data = self.class.build_mem_cache(*(addresses + [mem_cache_options])) + end + end + + # Increment a cached value. This method uses the memcached incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def increment(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:increment, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.incr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Decrement a cached value. This method uses the memcached decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def decrement(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:decrement, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.decr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Clear the entire cache on all memcached servers. This method should + # be used with care when shared cache is being used. + def clear(options = nil) + rescue_error_with(nil) { @data.with { |c| c.flush_all } } + end + + # Get the statistics from the memcached servers. + def stats + @data.with { |c| c.stats } + end + + private + # Read an entry from the cache. + def read_entry(key, options) + rescue_error_with(nil) { deserialize_entry(@data.with { |c| c.get(key, options) }) } + end + + # Write an entry to the cache. + def write_entry(key, entry, options) + method = options && options[:unless_exist] ? :add : :set + value = options[:raw] ? entry.value.to_s : entry + expires_in = options[:expires_in].to_i + if expires_in > 0 && !options[:raw] + # Set the memcache expire a few minutes in the future to support race condition ttls on read + expires_in += 5.minutes + end + rescue_error_with false do + @data.with { |c| c.send(method, key, value, expires_in, options) } + end + end + + # Reads multiple entries from the cache implementation. + def read_multi_entries(names, options) + keys_to_names = Hash[names.map { |name| [normalize_key(name, options), name] }] + + raw_values = @data.with { |c| c.get_multi(keys_to_names.keys) } + values = {} + + raw_values.each do |key, value| + entry = deserialize_entry(value) + + unless entry.expired? || entry.mismatched?(normalize_version(keys_to_names[key], options)) + values[keys_to_names[key]] = entry.value + end + end + + values + end + + # Delete an entry from the cache. + def delete_entry(key, options) + rescue_error_with(false) { @data.with { |c| c.delete(key) } } + end + + # Memcache keys are binaries. So we need to force their encoding to binary + # before applying the regular expression to ensure we are escaping all + # characters properly. + def normalize_key(key, options) + key = super.dup + key = key.force_encoding(Encoding::ASCII_8BIT) + key = key.gsub(ESCAPE_KEY_CHARS) { |match| "%#{match.getbyte(0).to_s(16).upcase}" } + key = "#{key[0, 213]}:md5:#{ActiveSupport::Digest.hexdigest(key)}" if key.size > 250 + key + end + + def deserialize_entry(entry) + if entry + entry.is_a?(Entry) ? entry : Entry.new(entry) + end + end + + def rescue_error_with(fallback) + yield + rescue Dalli::DalliError => e + logger.error("DalliError (#{e}): #{e.message}") if logger + fallback + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb new file mode 100644 index 0000000..564ac17 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb @@ -0,0 +1,169 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything into memory in the + # same process. If you're running multiple Ruby on Rails server processes + # (which is the case if you're using Phusion Passenger or puma clustered mode), + # then this means that Rails server process instances won't be able + # to share cache data with each other and this may not be the most + # appropriate cache in that scenario. + # + # This cache has a bounded size specified by the :size options to the + # initializer (default is 32Mb). When the cache exceeds the allotted size, + # a cleanup will occur which tries to prune the cache down to three quarters + # of the maximum size by removing the least recently used entries. + # + # MemoryStore is thread-safe. + class MemoryStore < Store + def initialize(options = nil) + options ||= {} + super(options) + @data = {} + @key_access = {} + @max_size = options[:size] || 32.megabytes + @max_prune_time = options[:max_prune_time] || 2 + @cache_size = 0 + @monitor = Monitor.new + @pruning = false + end + + # Delete all data stored in a given cache store. + def clear(options = nil) + synchronize do + @data.clear + @key_access.clear + @cache_size = 0 + end + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + instrument(:cleanup, size: @data.size) do + keys = synchronize { @data.keys } + keys.each do |key| + entry = @data[key] + delete_entry(key, options) if entry && entry.expired? + end + end + end + + # To ensure entries fit within the specified memory prune the cache by removing the least + # recently accessed entries. + def prune(target_size, max_time = nil) + return if pruning? + @pruning = true + begin + start_time = Time.now + cleanup + instrument(:prune, target_size, from: @cache_size) do + keys = synchronize { @key_access.keys.sort { |a, b| @key_access[a].to_f <=> @key_access[b].to_f } } + keys.each do |key| + delete_entry(key, options) + return if @cache_size <= target_size || (max_time && Time.now - start_time > max_time) + end + end + ensure + @pruning = false + end + end + + # Returns true if the cache is currently being pruned. + def pruning? + @pruning + end + + # Increment an integer value in the cache. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrement an integer value in the cache. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + # Deletes cache entries if the cache key matches a given pattern. + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + keys = synchronize { @data.keys } + keys.each do |key| + delete_entry(key, options) if key.match(matcher) + end + end + end + + def inspect # :nodoc: + "<##{self.class.name} entries=#{@data.size}, size=#{@cache_size}, options=#{@options.inspect}>" + end + + # Synchronize calls to the cache. This should be called wherever the underlying cache implementation + # is not thread safe. + def synchronize(&block) # :nodoc: + @monitor.synchronize(&block) + end + + private + + PER_ENTRY_OVERHEAD = 240 + + def cached_size(key, entry) + key.to_s.bytesize + entry.size + PER_ENTRY_OVERHEAD + end + + def read_entry(key, options) + entry = @data[key] + synchronize do + if entry + @key_access[key] = Time.now.to_f + else + @key_access.delete(key) + end + end + entry + end + + def write_entry(key, entry, options) + entry.dup_value! + synchronize do + old_entry = @data[key] + return false if @data.key?(key) && options[:unless_exist] + if old_entry + @cache_size -= (old_entry.size - entry.size) + else + @cache_size += cached_size(key, entry) + end + @key_access[key] = Time.now.to_f + @data[key] = entry + prune(@max_size * 0.75, @max_prune_time) if @cache_size > @max_size + true + end + end + + def delete_entry(key, options) + synchronize do + @key_access.delete(key) + entry = @data.delete(key) + @cache_size -= cached_size(key, entry) if entry + !!entry + end + end + + def modify_value(name, amount, options) + synchronize do + options = merged_options(options) + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb new file mode 100644 index 0000000..1a5983d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module ActiveSupport + module Cache + # A cache store implementation which doesn't actually store anything. Useful in + # development and test environments where you don't want caching turned on but + # need to go through the caching interface. + # + # This cache does implement the local cache strategy, so values will actually + # be cached inside blocks that utilize this strategy. See + # ActiveSupport::Cache::Strategy::LocalCache for more details. + class NullStore < Store + prepend Strategy::LocalCache + + def clear(options = nil) + end + + def cleanup(options = nil) + end + + def increment(name, amount = 1, options = nil) + end + + def decrement(name, amount = 1, options = nil) + end + + def delete_matched(matcher, options = nil) + end + + private + def read_entry(key, options) + end + + def write_entry(key, entry, options) + true + end + + def delete_entry(key, options) + false + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb new file mode 100644 index 0000000..825fc9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb @@ -0,0 +1,466 @@ +# frozen_string_literal: true + +begin + gem "redis", ">= 4.0.1" + require "redis" + require "redis/distributed" +rescue LoadError + warn "The Redis cache store requires the redis gem, version 4.0.1 or later. Please add it to your Gemfile: `gem \"redis\", \"~> 4.0\"`" + raise +end + +# Prefer the hiredis driver but don't require it. +begin + require "redis/connection/hiredis" +rescue LoadError +end + +require "digest/sha2" +require "active_support/core_ext/marshal" +require "active_support/core_ext/hash/transform_values" + +module ActiveSupport + module Cache + module ConnectionPoolLike + def with + yield self + end + end + + ::Redis.include(ConnectionPoolLike) + ::Redis::Distributed.include(ConnectionPoolLike) + + # Redis cache store. + # + # Deployment note: Take care to use a *dedicated Redis cache* rather + # than pointing this at your existing Redis server. It won't cope well + # with mixed usage patterns and it won't expire cache entries by default. + # + # Redis cache server setup guide: https://redis.io/topics/lru-cache + # + # * Supports vanilla Redis, hiredis, and Redis::Distributed. + # * Supports Memcached-like sharding across Redises with Redis::Distributed. + # * Fault tolerant. If the Redis server is unavailable, no exceptions are + # raised. Cache fetches are all misses and writes are dropped. + # * Local cache. Hot in-memory primary cache within block/middleware scope. + # * +read_multi+ and +write_multi+ support for Redis mget/mset. Use Redis::Distributed + # 4.0.1+ for distributed mget support. + # * +delete_matched+ support for Redis KEYS globs. + class RedisCacheStore < Store + # Keys are truncated with their own SHA2 digest if they exceed 1kB + MAX_KEY_BYTESIZE = 1024 + + DEFAULT_REDIS_OPTIONS = { + connect_timeout: 20, + read_timeout: 1, + write_timeout: 1, + reconnect_attempts: 0, + } + + DEFAULT_ERROR_HANDLER = -> (method:, returning:, exception:) do + if logger + logger.error { "RedisCacheStore: #{method} failed, returned #{returning.inspect}: #{exception.class}: #{exception.message}" } + end + end + + # The maximum number of entries to receive per SCAN call. + SCAN_BATCH_SIZE = 1000 + private_constant :SCAN_BATCH_SIZE + + # Support raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, options) + if options[:raw] && local_cache + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, options) + else + super + end + end + + def write_multi_entries(entries, options) + if options[:raw] && local_cache + raw_entries = entries.map do |key, entry| + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + end.to_h + + super(raw_entries, options) + else + super + end + end + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + class << self + # Factory method to create a new Redis instance. + # + # Handles four options: :redis block, :redis instance, single :url + # string, and multiple :url strings. + # + # Option Class Result + # :redis Proc -> options[:redis].call + # :redis Object -> options[:redis] + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + def build_redis(redis: nil, url: nil, **redis_options) #:nodoc: + urls = Array(url) + + if redis.is_a?(Proc) + redis.call + elsif redis + redis + elsif urls.size > 1 + build_redis_distributed_client urls: urls, **redis_options + else + build_redis_client url: urls.first, **redis_options + end + end + + private + def build_redis_distributed_client(urls:, **redis_options) + ::Redis::Distributed.new([], DEFAULT_REDIS_OPTIONS.merge(redis_options)).tap do |dist| + urls.each { |u| dist.add_node url: u } + end + end + + def build_redis_client(url:, **redis_options) + ::Redis.new DEFAULT_REDIS_OPTIONS.merge(redis_options.merge(url: url)) + end + end + + attr_reader :redis_options + attr_reader :max_key_bytesize + + # Creates a new Redis cache store. + # + # Handles three options: block provided to instantiate, single URL + # provided, and multiple URLs provided. + # + # :redis Proc -> options[:redis].call + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + # No namespace is set by default. Provide one if the Redis cache + # server is shared with other apps: namespace: 'myapp-cache'. + # + # Compression is enabled by default with a 1kB threshold, so cached + # values larger than 1kB are automatically compressed. Disable by + # passing compress: false or change the threshold by passing + # compress_threshold: 4.kilobytes. + # + # No expiry is set on cache entries by default. Redis is expected to + # be configured with an eviction policy that automatically deletes + # least-recently or -frequently used keys when it reaches max memory. + # See https://redis.io/topics/lru-cache for cache server setup. + # + # Race condition TTL is not set by default. This can be used to avoid + # "thundering herd" cache writes when hot cache entries are expired. + # See ActiveSupport::Cache::Store#fetch for more. + def initialize(namespace: nil, compress: true, compress_threshold: 1.kilobyte, expires_in: nil, race_condition_ttl: nil, error_handler: DEFAULT_ERROR_HANDLER, **redis_options) + @redis_options = redis_options + + @max_key_bytesize = MAX_KEY_BYTESIZE + @error_handler = error_handler + + super namespace: namespace, + compress: compress, compress_threshold: compress_threshold, + expires_in: expires_in, race_condition_ttl: race_condition_ttl + end + + def redis + @redis ||= begin + pool_options = self.class.send(:retrieve_pool_options, redis_options) + + if pool_options.any? + self.class.send(:ensure_connection_pool_added!) + ::ConnectionPool.new(pool_options) { self.class.build_redis(**redis_options) } + else + self.class.build_redis(**redis_options) + end + end + end + + def inspect + instance = @redis || @redis_options + "<##{self.class} options=#{options.inspect} redis=#{instance.inspect}>" + end + + # Cache Store API implementation. + # + # Read multiple values at once. Returns a hash of requested keys -> + # fetched values. + def read_multi(*names) + if mget_capable? + instrument(:read_multi, names, options) do |payload| + read_multi_mget(*names).tap do |results| + payload[:hits] = results.keys + end + end + else + super + end + end + + # Cache Store API implementation. + # + # Supports Redis KEYS glob patterns: + # + # h?llo matches hello, hallo and hxllo + # h*llo matches hllo and heeeello + # h[ae]llo matches hello and hallo, but not hillo + # h[^e]llo matches hallo, hbllo, ... but not hello + # h[a-b]llo matches hallo and hbllo + # + # Use \ to escape special characters if you want to match them verbatim. + # + # See https://redis.io/commands/KEYS for more. + # + # Failsafe: Raises errors. + def delete_matched(matcher, options = nil) + instrument :delete_matched, matcher do + unless String === matcher + raise ArgumentError, "Only Redis glob strings are supported: #{matcher.inspect}" + end + redis.with do |c| + pattern = namespace_key(matcher, options) + cursor = "0" + # Fetch keys in batches using SCAN to avoid blocking the Redis server. + begin + cursor, keys = c.scan(cursor, match: pattern, count: SCAN_BATCH_SIZE) + c.del(*keys) unless keys.empty? + end until cursor == "0" + end + end + end + + # Cache Store API implementation. + # + # Increment a cached value. This method uses the Redis incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def increment(name, amount = 1, options = nil) + instrument :increment, name, amount: amount do + failsafe :increment do + redis.with { |c| c.incrby normalize_key(name, options), amount } + end + end + end + + # Cache Store API implementation. + # + # Decrement a cached value. This method uses the Redis decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def decrement(name, amount = 1, options = nil) + instrument :decrement, name, amount: amount do + failsafe :decrement do + redis.with { |c| c.decrby normalize_key(name, options), amount } + end + end + end + + # Cache Store API implementation. + # + # Removes expired entries. Handled natively by Redis least-recently-/ + # least-frequently-used expiry, so manual cleanup is not supported. + def cleanup(options = nil) + super + end + + # Clear the entire cache on all Redis servers. Safe to use on + # shared servers if the cache is namespaced. + # + # Failsafe: Raises errors. + def clear(options = nil) + failsafe :clear do + if namespace = merged_options(options)[:namespace] + delete_matched "*", namespace: namespace + else + redis.with { |c| c.flushdb } + end + end + end + + def mget_capable? #:nodoc: + set_redis_capabilities unless defined? @mget_capable + @mget_capable + end + + def mset_capable? #:nodoc: + set_redis_capabilities unless defined? @mset_capable + @mset_capable + end + + private + def set_redis_capabilities + case redis + when Redis::Distributed + @mget_capable = true + @mset_capable = false + else + @mget_capable = true + @mset_capable = true + end + end + + # Store provider interface: + # Read an entry from the cache. + def read_entry(key, options = nil) + failsafe :read_entry do + raw = options&.fetch(:raw, false) + deserialize_entry(redis.with { |c| c.get(key) }, raw: raw) + end + end + + def read_multi_entries(names, _options) + if mget_capable? + read_multi_mget(*names) + else + super + end + end + + def read_multi_mget(*names) + options = names.extract_options! + options = merged_options(options) + raw = options&.fetch(:raw, false) + + keys = names.map { |name| normalize_key(name, options) } + + values = failsafe(:read_multi_mget, returning: {}) do + redis.with { |c| c.mget(*keys) } + end + + names.zip(values).each_with_object({}) do |(name, value), results| + if value + entry = deserialize_entry(value, raw: raw) + unless entry.nil? || entry.expired? || entry.mismatched?(normalize_version(name, options)) + results[name] = entry.value + end + end + end + end + + # Write an entry to the cache. + # + # Requires Redis 2.6.12+ for extended SET options. + def write_entry(key, entry, unless_exist: false, raw: false, expires_in: nil, race_condition_ttl: nil, **options) + serialized_entry = serialize_entry(entry, raw: raw) + + # If race condition TTL is in use, ensure that cache entries + # stick around a bit longer after they would have expired + # so we can purposefully serve stale entries. + if race_condition_ttl && expires_in && expires_in > 0 && !raw + expires_in += 5.minutes + end + + failsafe :write_entry, returning: false do + if unless_exist || expires_in + modifiers = {} + modifiers[:nx] = unless_exist + modifiers[:px] = (1000 * expires_in.to_f).ceil if expires_in + + redis.with { |c| c.set key, serialized_entry, modifiers } + else + redis.with { |c| c.set key, serialized_entry } + end + end + end + + # Delete an entry from the cache. + def delete_entry(key, options) + failsafe :delete_entry, returning: false do + redis.with { |c| c.del key } + end + end + + # Nonstandard store provider API to write multiple values at once. + def write_multi_entries(entries, expires_in: nil, **options) + if entries.any? + if mset_capable? && expires_in.nil? + failsafe :write_multi_entries do + redis.with { |c| c.mapped_mset(serialize_entries(entries, raw: options[:raw])) } + end + else + super + end + end + end + + # Truncate keys that exceed 1kB. + def normalize_key(key, options) + truncate_key super.b + end + + def truncate_key(key) + if key.bytesize > max_key_bytesize + suffix = ":sha2:#{::Digest::SHA2.hexdigest(key)}" + truncate_at = max_key_bytesize - suffix.bytesize + "#{key.byteslice(0, truncate_at)}#{suffix}" + else + key + end + end + + def deserialize_entry(serialized_entry, raw:) + if serialized_entry + entry = Marshal.load(serialized_entry) rescue serialized_entry + + written_raw = serialized_entry.equal?(entry) + if raw != written_raw + ActiveSupport::Deprecation.warn(<<-MSG.squish) + Using a different value for the raw option when reading and writing + to a cache key is deprecated for :redis_cache_store and Rails 6.0 + will stop automatically detecting the format when reading to avoid + marshal loading untrusted raw strings. + MSG + end + + entry.is_a?(Entry) ? entry : Entry.new(entry) + end + end + + def serialize_entry(entry, raw: false) + if raw + entry.value.to_s + else + Marshal.dump(entry) + end + end + + def serialize_entries(entries, raw: false) + entries.transform_values do |entry| + serialize_entry entry, raw: raw + end + end + + def failsafe(method, returning: nil) + yield + rescue ::Redis::BaseConnectionError => e + handle_exception exception: e, method: method, returning: returning + returning + end + + def handle_exception(exception:, method:, returning:) + if @error_handler + @error_handler.(method: method, exception: exception, returning: returning) + end + rescue => failsafe + warn "RedisCacheStore ignored exception in handle_exception: #{failsafe.class}: #{failsafe.message}\n #{failsafe.backtrace.join("\n ")}" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb new file mode 100644 index 0000000..39b32fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb @@ -0,0 +1,194 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" +require "active_support/core_ext/string/inflections" +require "active_support/per_thread_registry" + +module ActiveSupport + module Cache + module Strategy + # Caches that implement LocalCache will be backed by an in-memory cache for the + # duration of a block. Repeated calls to the cache for the same key will hit the + # in-memory cache for faster access. + module LocalCache + autoload :Middleware, "active_support/cache/strategy/local_cache_middleware" + + # Class for storing and registering the local caches. + class LocalCacheRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def cache_for(local_cache_key) + @registry[local_cache_key] + end + + def set_cache_for(local_cache_key, value) + @registry[local_cache_key] = value + end + + def self.set_cache_for(l, v); instance.set_cache_for l, v; end + def self.cache_for(l); instance.cache_for l; end + end + + # Simple memory backed cache. This cache is not thread safe and is intended only + # for serving as a temporary memory cache for a single thread. + class LocalStore < Store + def initialize + super + @data = {} + end + + # Don't allow synchronizing since it isn't thread safe. + def synchronize # :nodoc: + yield + end + + def clear(options = nil) + @data.clear + end + + def read_entry(key, options) + @data[key] + end + + def read_multi_entries(keys, options) + values = {} + + keys.each do |name| + entry = read_entry(name, options) + values[name] = entry.value if entry + end + + values + end + + def write_entry(key, value, options) + @data[key] = value + true + end + + def delete_entry(key, options) + !!@data.delete(key) + end + + def fetch_entry(key, options = nil) # :nodoc: + @data.fetch(key) { @data[key] = yield } + end + end + + # Use a local cache for the duration of block. + def with_local_cache + use_temporary_local_cache(LocalStore.new) { yield } + end + + # Middleware class can be inserted as a Rack handler to be local cache for the + # duration of request. + def middleware + @middleware ||= Middleware.new( + "ActiveSupport::Cache::Strategy::LocalCache", + local_cache_key) + end + + def clear(options = nil) # :nodoc: + return super unless cache = local_cache + cache.clear(options) + super + end + + def cleanup(options = nil) # :nodoc: + return super unless cache = local_cache + cache.clear + super + end + + def increment(name, amount = 1, options = nil) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, options) + value + end + + def decrement(name, amount = 1, options = nil) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, options) + value + end + + private + def read_entry(key, options) + if cache = local_cache + cache.fetch_entry(key) { super } + else + super + end + end + + def read_multi_entries(keys, options) + return super unless local_cache + + local_entries = local_cache.read_multi_entries(keys, options) + missed_keys = keys - local_entries.keys + + if missed_keys.any? + local_entries.merge!(super(missed_keys, options)) + else + local_entries + end + end + + def write_entry(key, entry, options) + if options[:unless_exist] + local_cache.delete_entry(key, options) if local_cache + else + local_cache.write_entry(key, entry, options) if local_cache + end + + super + end + + def delete_entry(key, options) + local_cache.delete_entry(key, options) if local_cache + super + end + + def write_cache_value(name, value, options) + name = normalize_key(name, options) + cache = local_cache + cache.mute do + if value + cache.write(name, value, options) + else + cache.delete(name, options) + end + end + end + + def local_cache_key + @local_cache_key ||= "#{self.class.name.underscore}_local_cache_#{object_id}".gsub(/[\/-]/, "_").to_sym + end + + def local_cache + LocalCacheRegistry.cache_for(local_cache_key) + end + + def bypass_local_cache + use_temporary_local_cache(nil) { yield } + end + + def use_temporary_local_cache(temporary_cache) + save_cache = LocalCacheRegistry.cache_for(local_cache_key) + begin + LocalCacheRegistry.set_cache_for(local_cache_key, temporary_cache) + yield + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, save_cache) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb new file mode 100644 index 0000000..62542bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "rack/body_proxy" +require "rack/utils" + +module ActiveSupport + module Cache + module Strategy + module LocalCache + #-- + # This class wraps up local storage for middlewares. Only the middleware method should + # construct them. + class Middleware # :nodoc: + attr_reader :name, :local_cache_key + + def initialize(name, local_cache_key) + @name = name + @local_cache_key = local_cache_key + @app = nil + end + + def new(app) + @app = app + self + end + + def call(env) + LocalCacheRegistry.set_cache_for(local_cache_key, LocalStore.new) + response = @app.call(env) + response[2] = ::Rack::BodyProxy.new(response[2]) do + LocalCacheRegistry.set_cache_for(local_cache_key, nil) + end + cleanup_on_body_close = true + response + rescue Rack::Utils::InvalidParameterError + [400, {}, []] + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, nil) unless + cleanup_on_body_close + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb new file mode 100644 index 0000000..9a3728d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb @@ -0,0 +1,845 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/descendants_tracker" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/string/filters" +require "active_support/deprecation" +require "thread" + +module ActiveSupport + # Callbacks are code hooks that are run at key points in an object's life cycle. + # The typical use case is to have a base class define a set of callbacks + # relevant to the other functionality it supplies, so that subclasses can + # install callbacks that enhance or modify the base functionality without + # needing to override or redefine methods of the base class. + # + # Mixing in this module allows you to define the events in the object's + # life cycle that will support callbacks (via +ClassMethods.define_callbacks+), + # set the instance methods, procs, or callback objects to be called (via + # +ClassMethods.set_callback+), and run the installed callbacks at the + # appropriate times (via +run_callbacks+). + # + # Three kinds of callbacks are supported: before callbacks, run before a + # certain event; after callbacks, run after the event; and around callbacks, + # blocks that surround the event, triggering it when they yield. Callback code + # can be contained in instance methods, procs or lambdas, or callback objects + # that respond to certain predetermined methods. See +ClassMethods.set_callback+ + # for details. + # + # class Record + # include ActiveSupport::Callbacks + # define_callbacks :save + # + # def save + # run_callbacks :save do + # puts "- save" + # end + # end + # end + # + # class PersonRecord < Record + # set_callback :save, :before, :saving_message + # def saving_message + # puts "saving..." + # end + # + # set_callback :save, :after do |object| + # puts "saved" + # end + # end + # + # person = PersonRecord.new + # person.save + # + # Output: + # saving... + # - save + # saved + module Callbacks + extend Concern + + included do + extend ActiveSupport::DescendantsTracker + class_attribute :__callbacks, instance_writer: false, default: {} + end + + CALLBACK_FILTER_TYPES = [:before, :after, :around] + + # Runs the callbacks for the given event. + # + # Calls the before and around callbacks in the order they were set, yields + # the block (if given one), and then runs the after callbacks in reverse + # order. + # + # If the callback chain was halted, returns +false+. Otherwise returns the + # result of the block, +nil+ if no callbacks have been set, or +true+ + # if callbacks have been set but no block is given. + # + # run_callbacks :save do + # save + # end + # + #-- + # + # As this method is used in many places, and often wraps large portions of + # user code, it has an additional design goal of minimizing its impact on + # the visible call stack. An exception from inside a :before or :after + # callback can be as noisy as it likes -- but when control has passed + # smoothly through and into the supplied block, we want as little evidence + # as possible that we were here. + def run_callbacks(kind) + callbacks = __callbacks[kind.to_sym] + + if callbacks.empty? + yield if block_given? + else + env = Filters::Environment.new(self, false, nil) + next_sequence = callbacks.compile + + invoke_sequence = Proc.new do + skipped = nil + while true + current = next_sequence + current.invoke_before(env) + if current.final? + env.value = !env.halted && (!block_given? || yield) + elsif current.skip?(env) + (skipped ||= []) << current + next_sequence = next_sequence.nested + next + else + next_sequence = next_sequence.nested + begin + target, block, method, *arguments = current.expand_call_template(env, invoke_sequence) + target.send(method, *arguments, &block) + ensure + next_sequence = current + end + end + current.invoke_after(env) + skipped.pop.invoke_after(env) while skipped && skipped.first + break env.value + end + end + + # Common case: no 'around' callbacks defined + if next_sequence.final? + next_sequence.invoke_before(env) + env.value = !env.halted && (!block_given? || yield) + next_sequence.invoke_after(env) + env.value + else + invoke_sequence.call + end + end + end + + private + + # A hook invoked every time a before callback is halted. + # This can be overridden in ActiveSupport::Callbacks implementors in order + # to provide better debugging/logging. + def halted_callback_hook(filter) + end + + module Conditionals # :nodoc: + class Value + def initialize(&block) + @block = block + end + def call(target, value); @block.call(value); end + end + end + + module Filters + Environment = Struct.new(:target, :halted, :value) + + class Before + def self.build(callback_sequence, user_callback, user_conditions, chain_config, filter) + halted_lambda = chain_config[:terminator] + + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter) + else + halting(callback_sequence, user_callback, halted_lambda, filter) + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + if env.halted + target.send :halted_callback_hook, filter + end + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback, halted_lambda, filter) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + unless halted + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + + if env.halted + target.send :halted_callback_hook, filter + end + end + + env + end + end + private_class_method :halting + end + + class After + def self.build(callback_sequence, user_callback, user_conditions, chain_config) + if chain_config[:skip_after_callbacks_if_terminated] + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions) + else + halting(callback_sequence, user_callback) + end + else + if user_conditions.any? + conditional callback_sequence, user_callback, user_conditions + else + simple callback_sequence, user_callback + end + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback) + callback_sequence.after do |env| + unless env.halted + user_callback.call env.target, env.value + end + + env + end + end + private_class_method :halting + + def self.conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + + if user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :conditional + + def self.simple(callback_sequence, user_callback) + callback_sequence.after do |env| + user_callback.call env.target, env.value + + env + end + end + private_class_method :simple + end + end + + class Callback #:nodoc:# + def self.build(chain, filter, kind, options) + if filter.is_a?(String) + raise ArgumentError, <<-MSG.squish + Passing string to define a callback is not supported. See the `.set_callback` + documentation to see supported values. + MSG + end + + new chain.name, filter, kind, options, chain.config + end + + attr_accessor :kind, :name + attr_reader :chain_config + + def initialize(name, filter, kind, options, chain_config) + @chain_config = chain_config + @name = name + @kind = kind + @filter = filter + @key = compute_identifier filter + @if = check_conditionals(Array(options[:if])) + @unless = check_conditionals(Array(options[:unless])) + end + + def filter; @key; end + def raw_filter; @filter; end + + def merge_conditional_options(chain, if_option:, unless_option:) + options = { + if: @if.dup, + unless: @unless.dup + } + + options[:if].concat Array(unless_option) + options[:unless].concat Array(if_option) + + self.class.build chain, @filter, @kind, options + end + + def matches?(_kind, _filter) + @kind == _kind && filter == _filter + end + + def duplicates?(other) + case @filter + when Symbol + matches?(other.kind, other.filter) + else + false + end + end + + # Wraps code with filter + def apply(callback_sequence) + user_conditions = conditions_lambdas + user_callback = CallTemplate.build(@filter, self) + + case kind + when :before + Filters::Before.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config, @filter) + when :after + Filters::After.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config) + when :around + callback_sequence.around(user_callback, user_conditions) + end + end + + def current_scopes + Array(chain_config[:scope]).map { |s| public_send(s) } + end + + private + def check_conditionals(conditionals) + if conditionals.any? { |c| c.is_a?(String) } + raise ArgumentError, <<-MSG.squish + Passing string to be evaluated in :if and :unless conditional + options is not supported. Pass a symbol for an instance method, + or a lambda, proc or block, instead. + MSG + end + + conditionals + end + + def compute_identifier(filter) + case filter + when ::Proc + filter.object_id + else + filter + end + end + + def conditions_lambdas + @if.map { |c| CallTemplate.build(c, self).make_lambda } + + @unless.map { |c| CallTemplate.build(c, self).inverted_lambda } + end + end + + # A future invocation of user-supplied code (either as a callback, + # or a condition filter). + class CallTemplate # :nodoc: + def initialize(target, method, arguments, block) + @override_target = target + @method_name = method + @arguments = arguments + @override_block = block + end + + # Return the parts needed to make this call, with the given + # input values. + # + # Returns an array of the form: + # + # [target, block, method, *arguments] + # + # This array can be used as such: + # + # target.send(method, *arguments, &block) + # + # The actual invocation is left up to the caller to minimize + # call stack pollution. + def expand(target, value, block) + result = @arguments.map { |arg| + case arg + when :value; value + when :target; target + when :block; block || raise(ArgumentError) + end + } + + result.unshift @method_name + result.unshift @override_block || block + result.unshift @override_target || target + + # target, block, method, *arguments = result + # target.send(method, *arguments, &block) + result + end + + # Return a lambda that will make this call when given the input + # values. + def make_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + target.send(method, *arguments, &block) + end + end + + # Return a lambda that will make this call when given the input + # values, but then return the boolean inverse of that result. + def inverted_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + ! target.send(method, *arguments, &block) + end + end + + # Filters support: + # + # Symbols:: A method to call. + # Procs:: A proc to call with the object. + # Objects:: An object with a before_foo method on it to call. + # + # All of these objects are converted into a CallTemplate and handled + # the same after this point. + def self.build(filter, callback) + case filter + when Symbol + new(nil, filter, [], nil) + when Conditionals::Value + new(filter, :call, [:target, :value], nil) + when ::Proc + if filter.arity > 1 + new(nil, :instance_exec, [:target, :block], filter) + elsif filter.arity > 0 + new(nil, :instance_exec, [:target], filter) + else + new(nil, :instance_exec, [], filter) + end + else + method_to_call = callback.current_scopes.join("_") + + new(filter, method_to_call, [:target], nil) + end + end + end + + # Execute before and after filters in a sequence instead of + # chaining them with nested lambda calls, see: + # https://github.com/rails/rails/issues/18011 + class CallbackSequence # :nodoc: + def initialize(nested = nil, call_template = nil, user_conditions = nil) + @nested = nested + @call_template = call_template + @user_conditions = user_conditions + + @before = [] + @after = [] + end + + def before(&before) + @before.unshift(before) + self + end + + def after(&after) + @after.push(after) + self + end + + def around(call_template, user_conditions) + CallbackSequence.new(self, call_template, user_conditions) + end + + def skip?(arg) + arg.halted || !@user_conditions.all? { |c| c.call(arg.target, arg.value) } + end + + def nested + @nested + end + + def final? + !@call_template + end + + def expand_call_template(arg, block) + @call_template.expand(arg.target, arg.value, block) + end + + def invoke_before(arg) + @before.each { |b| b.call(arg) } + end + + def invoke_after(arg) + @after.each { |a| a.call(arg) } + end + end + + class CallbackChain #:nodoc:# + include Enumerable + + attr_reader :name, :config + + def initialize(name, config) + @name = name + @config = { + scope: [:kind], + terminator: default_terminator + }.merge!(config) + @chain = [] + @callbacks = nil + @mutex = Mutex.new + end + + def each(&block); @chain.each(&block); end + def index(o); @chain.index(o); end + def empty?; @chain.empty?; end + + def insert(index, o) + @callbacks = nil + @chain.insert(index, o) + end + + def delete(o) + @callbacks = nil + @chain.delete(o) + end + + def clear + @callbacks = nil + @chain.clear + self + end + + def initialize_copy(other) + @callbacks = nil + @chain = other.chain.dup + @mutex = Mutex.new + end + + def compile + @callbacks || @mutex.synchronize do + final_sequence = CallbackSequence.new + @callbacks ||= @chain.reverse.inject(final_sequence) do |callback_sequence, callback| + callback.apply callback_sequence + end + end + end + + def append(*callbacks) + callbacks.each { |c| append_one(c) } + end + + def prepend(*callbacks) + callbacks.each { |c| prepend_one(c) } + end + + protected + def chain; @chain; end + + private + + def append_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.push(callback) + end + + def prepend_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.unshift(callback) + end + + def remove_duplicates(callback) + @callbacks = nil + @chain.delete_if { |c| callback.duplicates?(c) } + end + + def default_terminator + Proc.new do |target, result_lambda| + terminate = true + catch(:abort) do + result_lambda.call + terminate = false + end + terminate + end + end + end + + module ClassMethods + def normalize_callback_params(filters, block) # :nodoc: + type = CALLBACK_FILTER_TYPES.include?(filters.first) ? filters.shift : :before + options = filters.extract_options! + filters.unshift(block) if block + [type, filters, options.dup] + end + + # This is used internally to append, prepend and skip callbacks to the + # CallbackChain. + def __update_callbacks(name) #:nodoc: + ([self] + ActiveSupport::DescendantsTracker.descendants(self)).reverse_each do |target| + chain = target.get_callbacks name + yield target, chain.dup + end + end + + # Install a callback for the given event. + # + # set_callback :save, :before, :before_method + # set_callback :save, :after, :after_method, if: :condition + # set_callback :save, :around, ->(r, block) { stuff; result = block.call; stuff } + # + # The second argument indicates whether the callback is to be run +:before+, + # +:after+, or +:around+ the event. If omitted, +:before+ is assumed. This + # means the first example above can also be written as: + # + # set_callback :save, :before_method + # + # The callback can be specified as a symbol naming an instance method; as a + # proc, lambda, or block; or as an object that responds to a certain method + # determined by the :scope argument to +define_callbacks+. + # + # If a proc, lambda, or block is given, its body is evaluated in the context + # of the current object. It can also optionally accept the current object as + # an argument. + # + # Before and around callbacks are called in the order that they are set; + # after callbacks are called in the reverse order. + # + # Around callbacks can access the return value from the event, if it + # wasn't halted, from the +yield+ call. + # + # ===== Options + # + # * :if - A symbol or an array of symbols, each naming an instance + # method or a proc; the callback will be called only when they all return + # a true value. + # * :unless - A symbol or an array of symbols, each naming an + # instance method or a proc; the callback will be called only when they + # all return a false value. + # * :prepend - If +true+, the callback will be prepended to the + # existing chain rather than appended. + def set_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + self_chain = get_callbacks name + mapped = filters.map do |filter| + Callback.build(self_chain, filter, type, options) + end + + __update_callbacks(name) do |target, chain| + options[:prepend] ? chain.prepend(*mapped) : chain.append(*mapped) + target.set_callbacks name, chain + end + end + + # Skip a previously set callback. Like +set_callback+, :if or + # :unless options may be passed in order to control when the + # callback is skipped. + # + # class Writer < Person + # skip_callback :validate, :before, :check_membership, if: -> { age > 18 } + # end + # + # An ArgumentError will be raised if the callback has not + # already been set (unless the :raise option is set to false). + def skip_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + options[:raise] = true unless options.key?(:raise) + + __update_callbacks(name) do |target, chain| + filters.each do |filter| + callback = chain.find { |c| c.matches?(type, filter) } + + if !callback && options[:raise] + raise ArgumentError, "#{type.to_s.capitalize} #{name} callback #{filter.inspect} has not been defined" + end + + if callback && (options.key?(:if) || options.key?(:unless)) + new_callback = callback.merge_conditional_options(chain, if_option: options[:if], unless_option: options[:unless]) + chain.insert(chain.index(callback), new_callback) + end + + chain.delete(callback) + end + target.set_callbacks name, chain + end + end + + # Remove all set callbacks for the given event. + def reset_callbacks(name) + callbacks = get_callbacks name + + ActiveSupport::DescendantsTracker.descendants(self).each do |target| + chain = target.get_callbacks(name).dup + callbacks.each { |c| chain.delete(c) } + target.set_callbacks name, chain + end + + set_callbacks(name, callbacks.dup.clear) + end + + # Define sets of events in the object life cycle that support callbacks. + # + # define_callbacks :validate + # define_callbacks :initialize, :save, :destroy + # + # ===== Options + # + # * :terminator - Determines when a before filter will halt the + # callback chain, preventing following before and around callbacks from + # being called and the event from being triggered. + # This should be a lambda to be executed. + # The current object and the result lambda of the callback will be provided + # to the terminator lambda. + # + # define_callbacks :validate, terminator: ->(target, result_lambda) { result_lambda.call == false } + # + # In this example, if any before validate callbacks returns +false+, + # any successive before and around callback is not executed. + # + # The default terminator halts the chain when a callback throws +:abort+. + # + # * :skip_after_callbacks_if_terminated - Determines if after + # callbacks should be terminated by the :terminator option. By + # default after callbacks are executed no matter if callback chain was + # terminated or not. This option has no effect if :terminator + # option is set to +nil+. + # + # * :scope - Indicates which methods should be executed when an + # object is used as a callback. + # + # class Audit + # def before(caller) + # puts 'Audit: before is called' + # end + # + # def before_save(caller) + # puts 'Audit: before_save is called' + # end + # end + # + # class Account + # include ActiveSupport::Callbacks + # + # define_callbacks :save + # set_callback :save, :before, Audit.new + # + # def save + # run_callbacks :save do + # puts 'save in main' + # end + # end + # end + # + # In the above case whenever you save an account the method + # Audit#before will be called. On the other hand + # + # define_callbacks :save, scope: [:kind, :name] + # + # would trigger Audit#before_save instead. That's constructed + # by calling #{kind}_#{name} on the given instance. In this + # case "kind" is "before" and "name" is "save". In this context +:kind+ + # and +:name+ have special meanings: +:kind+ refers to the kind of + # callback (before/after/around) and +:name+ refers to the method on + # which callbacks are being defined. + # + # A declaration like + # + # define_callbacks :save, scope: [:name] + # + # would call Audit#save. + # + # ===== Notes + # + # +names+ passed to +define_callbacks+ must not end with + # !, ? or =. + # + # Calling +define_callbacks+ multiple times with the same +names+ will + # overwrite previous callbacks registered with +set_callback+. + def define_callbacks(*names) + options = names.extract_options! + + names.each do |name| + name = name.to_sym + + set_callbacks name, CallbackChain.new(name, options) + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def _run_#{name}_callbacks(&block) + run_callbacks #{name.inspect}, &block + end + + def self._#{name}_callbacks + get_callbacks(#{name.inspect}) + end + + def self._#{name}_callbacks=(value) + set_callbacks(#{name.inspect}, value) + end + + def _#{name}_callbacks + __callbacks[#{name.inspect}] + end + RUBY + end + end + + protected + + def get_callbacks(name) # :nodoc: + __callbacks[name.to_sym] + end + + def set_callbacks(name, callbacks) # :nodoc: + self.__callbacks = __callbacks.merge(name.to_sym => callbacks) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb new file mode 100644 index 0000000..5d356a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +module ActiveSupport + # A typical module looks like this: + # + # module M + # def self.included(base) + # base.extend ClassMethods + # base.class_eval do + # scope :disabled, -> { where(disabled: true) } + # end + # end + # + # module ClassMethods + # ... + # end + # end + # + # By using ActiveSupport::Concern the above module could instead be + # written as: + # + # require 'active_support/concern' + # + # module M + # extend ActiveSupport::Concern + # + # included do + # scope :disabled, -> { where(disabled: true) } + # end + # + # class_methods do + # ... + # end + # end + # + # Moreover, it gracefully handles module dependencies. Given a +Foo+ module + # and a +Bar+ module which depends on the former, we would typically write the + # following: + # + # module Foo + # def self.included(base) + # base.class_eval do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # end + # + # module Bar + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Foo # We need to include this dependency for Bar + # include Bar # Bar is the module that Host really needs + # end + # + # But why should +Host+ care about +Bar+'s dependencies, namely +Foo+? We + # could try to hide these from +Host+ directly including +Foo+ in +Bar+: + # + # module Bar + # include Foo + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Bar + # end + # + # Unfortunately this won't work, since when +Foo+ is included, its base + # is the +Bar+ module, not the +Host+ class. With ActiveSupport::Concern, + # module dependencies are properly resolved: + # + # require 'active_support/concern' + # + # module Foo + # extend ActiveSupport::Concern + # included do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # + # module Bar + # extend ActiveSupport::Concern + # include Foo + # + # included do + # self.method_injected_by_foo + # end + # end + # + # class Host + # include Bar # It works, now Bar takes care of its dependencies + # end + module Concern + class MultipleIncludedBlocks < StandardError #:nodoc: + def initialize + super "Cannot define multiple 'included' blocks for a Concern" + end + end + + def self.extended(base) #:nodoc: + base.instance_variable_set(:@_dependencies, []) + end + + def append_features(base) + if base.instance_variable_defined?(:@_dependencies) + base.instance_variable_get(:@_dependencies) << self + false + else + return false if base < self + @_dependencies.each { |dep| base.include(dep) } + super + base.extend const_get(:ClassMethods) if const_defined?(:ClassMethods) + base.class_eval(&@_included_block) if instance_variable_defined?(:@_included_block) + end + end + + def included(base = nil, &block) + if base.nil? + if instance_variable_defined?(:@_included_block) + if @_included_block.source_location != block.source_location + raise MultipleIncludedBlocks + end + else + @_included_block = block + end + else + super + end + end + + def class_methods(&class_methods_module_definition) + mod = const_defined?(:ClassMethods, false) ? + const_get(:ClassMethods) : + const_set(:ClassMethods, Module.new) + + mod.module_eval(&class_methods_module_definition) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb new file mode 100644 index 0000000..a8455c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Concurrency + # A monitor that will permit dependency loading while blocked waiting for + # the lock. + class LoadInterlockAwareMonitor < Monitor + # Enters an exclusive section, but allows dependency loading while blocked + def mon_enter + mon_try_enter || + ActiveSupport::Dependencies.interlock.permit_concurrent_loads { super } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb new file mode 100644 index 0000000..f18ccf1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb @@ -0,0 +1,227 @@ +# frozen_string_literal: true + +require "thread" +require "monitor" + +module ActiveSupport + module Concurrency + # A share/exclusive lock, otherwise known as a read/write lock. + # + # https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock + class ShareLock + include MonitorMixin + + # We track Thread objects, instead of just using counters, because + # we need exclusive locks to be reentrant, and we need to be able + # to upgrade share locks to exclusive. + + def raw_state # :nodoc: + synchronize do + threads = @sleeping.keys | @sharing.keys | @waiting.keys + threads |= [@exclusive_thread] if @exclusive_thread + + data = {} + + threads.each do |thread| + purpose, compatible = @waiting[thread] + + data[thread] = { + thread: thread, + sharing: @sharing[thread], + exclusive: @exclusive_thread == thread, + purpose: purpose, + compatible: compatible, + waiting: !!@waiting[thread], + sleeper: @sleeping[thread], + } + end + + # NB: Yields while holding our *internal* synchronize lock, + # which is supposed to be used only for a few instructions at + # a time. This allows the caller to inspect additional state + # without things changing out from underneath, but would have + # disastrous effects upon normal operation. Fortunately, this + # method is only intended to be called when things have + # already gone wrong. + yield data + end + end + + def initialize + super() + + @cv = new_cond + + @sharing = Hash.new(0) + @waiting = {} + @sleeping = {} + @exclusive_thread = nil + @exclusive_depth = 0 + end + + # Returns false if +no_wait+ is set and the lock is not + # immediately available. Otherwise, returns true after the lock + # has been acquired. + # + # +purpose+ and +compatible+ work together; while this thread is + # waiting for the exclusive lock, it will yield its share (if any) + # to any other attempt whose +purpose+ appears in this attempt's + # +compatible+ list. This allows a "loose" upgrade, which, being + # less strict, prevents some classes of deadlocks. + # + # For many resources, loose upgrades are sufficient: if a thread + # is awaiting a lock, it is not running any other code. With + # +purpose+ matching, it is possible to yield only to other + # threads whose activity will not interfere. + def start_exclusive(purpose: nil, compatible: [], no_wait: false) + synchronize do + unless @exclusive_thread == Thread.current + if busy_for_exclusive?(purpose) + return false if no_wait + + yield_shares(purpose: purpose, compatible: compatible, block_share: true) do + wait_for(:start_exclusive) { busy_for_exclusive?(purpose) } + end + end + @exclusive_thread = Thread.current + end + @exclusive_depth += 1 + + true + end + end + + # Relinquish the exclusive lock. Must only be called by the thread + # that called start_exclusive (and currently holds the lock). + def stop_exclusive(compatible: []) + synchronize do + raise "invalid unlock" if @exclusive_thread != Thread.current + + @exclusive_depth -= 1 + if @exclusive_depth == 0 + @exclusive_thread = nil + + if eligible_waiters?(compatible) + yield_shares(compatible: compatible, block_share: true) do + wait_for(:stop_exclusive) { @exclusive_thread || eligible_waiters?(compatible) } + end + end + @cv.broadcast + end + end + end + + def start_sharing + synchronize do + if @sharing[Thread.current] > 0 || @exclusive_thread == Thread.current + # We already hold a lock; nothing to wait for + elsif @waiting[Thread.current] + # We're nested inside a +yield_shares+ call: we'll resume as + # soon as there isn't an exclusive lock in our way + wait_for(:start_sharing) { @exclusive_thread } + else + # This is an initial / outermost share call: any outstanding + # requests for an exclusive lock get to go first + wait_for(:start_sharing) { busy_for_sharing?(false) } + end + @sharing[Thread.current] += 1 + end + end + + def stop_sharing + synchronize do + if @sharing[Thread.current] > 1 + @sharing[Thread.current] -= 1 + else + @sharing.delete Thread.current + @cv.broadcast + end + end + end + + # Execute the supplied block while holding the Exclusive lock. If + # +no_wait+ is set and the lock is not immediately available, + # returns +nil+ without yielding. Otherwise, returns the result of + # the block. + # + # See +start_exclusive+ for other options. + def exclusive(purpose: nil, compatible: [], after_compatible: [], no_wait: false) + if start_exclusive(purpose: purpose, compatible: compatible, no_wait: no_wait) + begin + yield + ensure + stop_exclusive(compatible: after_compatible) + end + end + end + + # Execute the supplied block while holding the Share lock. + def sharing + start_sharing + begin + yield + ensure + stop_sharing + end + end + + # Temporarily give up all held Share locks while executing the + # supplied block, allowing any +compatible+ exclusive lock request + # to proceed. + def yield_shares(purpose: nil, compatible: [], block_share: false) + loose_shares = previous_wait = nil + synchronize do + if loose_shares = @sharing.delete(Thread.current) + if previous_wait = @waiting[Thread.current] + purpose = nil unless purpose == previous_wait[0] + compatible &= previous_wait[1] + end + compatible |= [false] unless block_share + @waiting[Thread.current] = [purpose, compatible] + end + + @cv.broadcast + end + + begin + yield + ensure + synchronize do + wait_for(:yield_shares) { @exclusive_thread && @exclusive_thread != Thread.current } + + if previous_wait + @waiting[Thread.current] = previous_wait + else + @waiting.delete Thread.current + end + @sharing[Thread.current] = loose_shares if loose_shares + end + end + end + + private + + # Must be called within synchronize + def busy_for_exclusive?(purpose) + busy_for_sharing?(purpose) || + @sharing.size > (@sharing[Thread.current] > 0 ? 1 : 0) + end + + def busy_for_sharing?(purpose) + (@exclusive_thread && @exclusive_thread != Thread.current) || + @waiting.any? { |t, (_, c)| t != Thread.current && !c.include?(purpose) } + end + + def eligible_waiters?(compatible) + @waiting.any? { |t, (p, _)| compatible.include?(p) && @waiting.all? { |t2, (_, c2)| t == t2 || c2.include?(p) } } + end + + def wait_for(method) + @sleeping[Thread.current] = method + @cv.wait_while { yield } + ensure + @sleeping.delete Thread.current + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb new file mode 100644 index 0000000..4d6f781 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb @@ -0,0 +1,150 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/ordered_options" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +module ActiveSupport + # Configurable provides a config method to store and retrieve + # configuration options as an OrderedHash. + module Configurable + extend ActiveSupport::Concern + + class Configuration < ActiveSupport::InheritableOptions + def compile_methods! + self.class.compile_methods!(keys) + end + + # Compiles reader methods so we don't have to go through method_missing. + def self.compile_methods!(keys) + keys.reject { |m| method_defined?(m) }.each do |key| + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{key}; _get(#{key.inspect}); end + RUBY + end + end + end + + module ClassMethods + def config + @_config ||= if respond_to?(:superclass) && superclass.respond_to?(:config) + superclass.config.inheritable_copy + else + # create a new "anonymous" class that will host the compiled reader methods + Class.new(Configuration).new + end + end + + def configure + yield config + end + + # Allows you to add shortcut so that you don't have to refer to attribute + # through config. Also look at the example for config to contrast. + # + # Defines both class and instance config accessors. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access + # end + # + # User.allowed_access # => nil + # User.allowed_access = false + # User.allowed_access # => false + # + # user = User.new + # user.allowed_access # => false + # user.allowed_access = true + # user.allowed_access # => true + # + # User.allowed_access # => false + # + # The attribute name must be a valid method name in Ruby. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :"1_Badname" + # end + # # => NameError: invalid config attribute name + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_reader: false, instance_writer: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_accessor: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Also you can pass a block to set up the attribute with a default value. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :hair_colors do + # [:brown, :black, :blonde, :red] + # end + # end + # + # User.hair_colors # => [:brown, :black, :blonde, :red] + def config_accessor(*names) + options = names.extract_options! + + names.each do |name| + raise NameError.new("invalid config attribute name") unless /\A[_A-Za-z]\w*\z/.match?(name) + + reader, reader_line = "def #{name}; config.#{name}; end", __LINE__ + writer, writer_line = "def #{name}=(value); config.#{name} = value; end", __LINE__ + + singleton_class.class_eval reader, __FILE__, reader_line + singleton_class.class_eval writer, __FILE__, writer_line + + unless options[:instance_accessor] == false + class_eval reader, __FILE__, reader_line unless options[:instance_reader] == false + class_eval writer, __FILE__, writer_line unless options[:instance_writer] == false + end + send("#{name}=", yield) if block_given? + end + end + private :config_accessor + end + + # Reads and writes attributes from a configuration OrderedHash. + # + # require 'active_support/configurable' + # + # class User + # include ActiveSupport::Configurable + # end + # + # user = User.new + # + # user.config.allowed_access = true + # user.config.level = 1 + # + # user.config.allowed_access # => true + # user.config.level # => 1 + def config + @_config ||= self.class.config.inheritable_copy + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb new file mode 100644 index 0000000..f590605 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +Dir.glob(File.expand_path("core_ext/*.rb", __dir__)).each do |path| + require path +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb new file mode 100644 index 0000000..6d83b76 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/array/access" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/grouping" +require "active_support/core_ext/array/prepend_and_append" +require "active_support/core_ext/array/inquiry" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb new file mode 100644 index 0000000..b7ff7a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +class Array + # Returns the tail of the array from +position+. + # + # %w( a b c d ).from(0) # => ["a", "b", "c", "d"] + # %w( a b c d ).from(2) # => ["c", "d"] + # %w( a b c d ).from(10) # => [] + # %w().from(0) # => [] + # %w( a b c d ).from(-2) # => ["c", "d"] + # %w( a b c ).from(-10) # => [] + def from(position) + self[position, length] || [] + end + + # Returns the beginning of the array up to +position+. + # + # %w( a b c d ).to(0) # => ["a"] + # %w( a b c d ).to(2) # => ["a", "b", "c"] + # %w( a b c d ).to(10) # => ["a", "b", "c", "d"] + # %w().to(0) # => [] + # %w( a b c d ).to(-2) # => ["a", "b", "c"] + # %w( a b c ).to(-10) # => [] + def to(position) + if position >= 0 + take position + 1 + else + self[0..position] + end + end + + # Returns a copy of the Array without the specified elements. + # + # people = ["David", "Rafael", "Aaron", "Todd"] + # people.without "Aaron", "Todd" + # # => ["David", "Rafael"] + # + # Note: This is an optimization of Enumerable#without that uses Array#- + # instead of Array#reject for performance reasons. + def without(*elements) + self - elements + end + + # Equal to self[1]. + # + # %w( a b c d e ).second # => "b" + def second + self[1] + end + + # Equal to self[2]. + # + # %w( a b c d e ).third # => "c" + def third + self[2] + end + + # Equal to self[3]. + # + # %w( a b c d e ).fourth # => "d" + def fourth + self[3] + end + + # Equal to self[4]. + # + # %w( a b c d e ).fifth # => "e" + def fifth + self[4] + end + + # Equal to self[41]. Also known as accessing "the reddit". + # + # (1..42).to_a.forty_two # => 42 + def forty_two + self[41] + end + + # Equal to self[-3]. + # + # %w( a b c d e ).third_to_last # => "c" + def third_to_last + self[-3] + end + + # Equal to self[-2]. + # + # %w( a b c d e ).second_to_last # => "d" + def second_to_last + self[-2] + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb new file mode 100644 index 0000000..ea688ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" + +class Array + # Converts the array to a comma-separated sentence where the last element is + # joined by the connector word. + # + # You can pass the following options to change the default behavior. If you + # pass an option key that doesn't exist in the list below, it will raise an + # ArgumentError. + # + # ==== Options + # + # * :words_connector - The sign or word used to join the elements + # in arrays with two or more elements (default: ", "). + # * :two_words_connector - The sign or word used to join the elements + # in arrays with two elements (default: " and "). + # * :last_word_connector - The sign or word used to join the last element + # in arrays with three or more elements (default: ", and "). + # * :locale - If +i18n+ is available, you can set a locale and use + # the connector options defined on the 'support.array' namespace in the + # corresponding dictionary file. + # + # ==== Examples + # + # [].to_sentence # => "" + # ['one'].to_sentence # => "one" + # ['one', 'two'].to_sentence # => "one and two" + # ['one', 'two', 'three'].to_sentence # => "one, two, and three" + # + # ['one', 'two'].to_sentence(passing: 'invalid option') + # # => ArgumentError: Unknown key: :passing. Valid keys are: :words_connector, :two_words_connector, :last_word_connector, :locale + # + # ['one', 'two'].to_sentence(two_words_connector: '-') + # # => "one-two" + # + # ['one', 'two', 'three'].to_sentence(words_connector: ' or ', last_word_connector: ' or at least ') + # # => "one or two or at least three" + # + # Using :locale option: + # + # # Given this locale dictionary: + # # + # # es: + # # support: + # # array: + # # words_connector: " o " + # # two_words_connector: " y " + # # last_word_connector: " o al menos " + # + # ['uno', 'dos'].to_sentence(locale: :es) + # # => "uno y dos" + # + # ['uno', 'dos', 'tres'].to_sentence(locale: :es) + # # => "uno o dos o al menos tres" + def to_sentence(options = {}) + options.assert_valid_keys(:words_connector, :two_words_connector, :last_word_connector, :locale) + + default_connectors = { + words_connector: ", ", + two_words_connector: " and ", + last_word_connector: ", and " + } + if defined?(I18n) + i18n_connectors = I18n.translate(:'support.array', locale: options[:locale], default: {}) + default_connectors.merge!(i18n_connectors) + end + options = default_connectors.merge!(options) + + case length + when 0 + "" + when 1 + "#{self[0]}" + when 2 + "#{self[0]}#{options[:two_words_connector]}#{self[1]}" + else + "#{self[0...-1].join(options[:words_connector])}#{options[:last_word_connector]}#{self[-1]}" + end + end + + # Extends Array#to_s to convert a collection of elements into a + # comma separated id list if :db argument is given as the format. + # + # Blog.all.to_formatted_s(:db) # => "1,2,3" + # Blog.none.to_formatted_s(:db) # => "null" + # [1,2].to_formatted_s # => "[1, 2]" + def to_formatted_s(format = :default) + case format + when :db + if empty? + "null" + else + collect(&:id).join(",") + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a string that represents the array in XML by invoking +to_xml+ + # on each element. Active Record collections delegate their representation + # in XML to this method. + # + # All elements are expected to respond to +to_xml+, if any of them does + # not then an exception is raised. + # + # The root node reflects the class name of the first element in plural + # if all elements belong to the same type and that's not Hash: + # + # customer.projects.to_xml + # + # + # + # + # 20000.0 + # 1567 + # 2008-04-09 + # ... + # + # + # 57230.0 + # 1567 + # 2008-04-15 + # ... + # + # + # + # Otherwise the root element is "objects": + # + # [{ foo: 1, bar: 2}, { baz: 3}].to_xml + # + # + # + # + # 2 + # 1 + # + # + # 3 + # + # + # + # If the collection is empty the root element is "nil-classes" by default: + # + # [].to_xml + # + # + # + # + # To ensure a meaningful root element use the :root option: + # + # customer_with_no_projects.projects.to_xml(root: 'projects') + # + # + # + # + # By default name of the node for the children of root is root.singularize. + # You can change it with the :children option. + # + # The +options+ hash is passed downwards: + # + # Message.all.to_xml(skip_types: true) + # + # + # + # + # 2008-03-07T09:58:18+01:00 + # 1 + # 1 + # 2008-03-07T09:58:18+01:00 + # 1 + # + # + # + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder) + + options = options.dup + options[:indent] ||= 2 + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + options[:root] ||= \ + if first.class != Hash && all? { |e| e.is_a?(first.class) } + underscored = ActiveSupport::Inflector.underscore(first.class.name) + ActiveSupport::Inflector.pluralize(underscored).tr("/", "_") + else + "objects" + end + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + children = options.delete(:children) || root.singularize + attributes = options[:skip_types] ? {} : { type: "array" } + + if empty? + builder.tag!(root, attributes) + else + builder.tag!(root, attributes) do + each { |value| ActiveSupport::XmlMini.to_tag(children, value, options) } + yield builder if block_given? + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb new file mode 100644 index 0000000..8c7cb2e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Hash + # By default, only instances of Hash itself are extractable. + # Subclasses of Hash may implement this method and return + # true to declare themselves as extractable. If a Hash + # is extractable, Array#extract_options! pops it from + # the Array when it is the last element of the Array. + def extractable_options? + instance_of?(Hash) + end +end + +class Array + # Extracts options from a set of arguments. Removes and returns the last + # element in the array if it's a hash, otherwise returns a blank hash. + # + # def options(*args) + # args.extract_options! + # end + # + # options(1, 2) # => {} + # options(1, 2, a: :b) # => {:a=>:b} + def extract_options! + if last.is_a?(Hash) && last.extractable_options? + pop + else + {} + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb new file mode 100644 index 0000000..67e760b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +class Array + # Splits or iterates over the array in groups of size +number+, + # padding any remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups_of(3) {|group| p group} + # ["1", "2", "3"] + # ["4", "5", "6"] + # ["7", "8", "9"] + # ["10", nil, nil] + # + # %w(1 2 3 4 5).in_groups_of(2, ' ') {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5", " "] + # + # %w(1 2 3 4 5).in_groups_of(2, false) {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5"] + def in_groups_of(number, fill_with = nil) + if number.to_i <= 0 + raise ArgumentError, + "Group size must be a positive integer, was #{number.inspect}" + end + + if fill_with == false + collection = self + else + # size % number gives how many extra we have; + # subtracting from number gives how many to add; + # modulo number ensures we don't add group of just fill. + padding = (number - size % number) % number + collection = dup.concat(Array.new(padding, fill_with)) + end + + if block_given? + collection.each_slice(number) { |slice| yield(slice) } + else + collection.each_slice(number).to_a + end + end + + # Splits or iterates over the array in +number+ of groups, padding any + # remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3) {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", nil] + # ["8", "9", "10", nil] + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3, ' ') {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", " "] + # ["8", "9", "10", " "] + # + # %w(1 2 3 4 5 6 7).in_groups(3, false) {|group| p group} + # ["1", "2", "3"] + # ["4", "5"] + # ["6", "7"] + def in_groups(number, fill_with = nil) + # size.div number gives minor group size; + # size % number gives how many objects need extra accommodation; + # each group hold either division or division + 1 items. + division = size.div number + modulo = size % number + + # create a new array avoiding dup + groups = [] + start = 0 + + number.times do |index| + length = division + (modulo > 0 && modulo > index ? 1 : 0) + groups << last_group = slice(start, length) + last_group << fill_with if fill_with != false && + modulo > 0 && length == division + start += length + end + + if block_given? + groups.each { |g| yield(g) } + else + groups + end + end + + # Divides the array into one or more subarrays based on a delimiting +value+ + # or the result of an optional block. + # + # [1, 2, 3, 4, 5].split(3) # => [[1, 2], [4, 5]] + # (1..10).to_a.split { |i| i % 3 == 0 } # => [[1, 2], [4, 5], [7, 8], [10]] + def split(value = nil) + arr = dup + result = [] + if block_given? + while (idx = arr.index { |i| yield i }) + result << arr.shift(idx) + arr.shift + end + else + while (idx = arr.index(value)) + result << arr.shift(idx) + arr.shift + end + end + result << arr + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb new file mode 100644 index 0000000..92c61bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +require "active_support/array_inquirer" + +class Array + # Wraps the array in an +ArrayInquirer+ object, which gives a friendlier way + # to check its string-like contents. + # + # pets = [:cat, :dog].inquiry + # + # pets.cat? # => true + # pets.ferret? # => false + # + # pets.any?(:cat, :ferret) # => true + # pets.any?(:ferret, :alligator) # => false + def inquiry + ActiveSupport::ArrayInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb new file mode 100644 index 0000000..661971d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +class Array + # The human way of thinking about adding stuff to the end of a list is with append. + alias_method :append, :push unless [].respond_to?(:append) + + # The human way of thinking about adding stuff to the beginning of a list is with prepend. + alias_method :prepend, :unshift unless [].respond_to?(:prepend) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb new file mode 100644 index 0000000..d62f97e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +class Array + # Wraps its argument in an array unless it is already an array (or array-like). + # + # Specifically: + # + # * If the argument is +nil+ an empty array is returned. + # * Otherwise, if the argument responds to +to_ary+ it is invoked, and its result returned. + # * Otherwise, returns an array with the argument as its single element. + # + # Array.wrap(nil) # => [] + # Array.wrap([1, 2, 3]) # => [1, 2, 3] + # Array.wrap(0) # => [0] + # + # This method is similar in purpose to Kernel#Array, but there are some differences: + # + # * If the argument responds to +to_ary+ the method is invoked. Kernel#Array + # moves on to try +to_a+ if the returned value is +nil+, but Array.wrap returns + # an array with the argument as its single element right away. + # * If the returned value from +to_ary+ is neither +nil+ nor an +Array+ object, Kernel#Array + # raises an exception, while Array.wrap does not, it just returns the value. + # * It does not call +to_a+ on the argument, if the argument does not respond to +to_ary+ + # it returns an array with the argument as its single element. + # + # The last point is easily explained with some enumerables: + # + # Array(foo: :bar) # => [[:foo, :bar]] + # Array.wrap(foo: :bar) # => [{:foo=>:bar}] + # + # There's also a related idiom that uses the splat operator: + # + # [*object] + # + # which returns [] for +nil+, but calls to Array(object) otherwise. + # + # The differences with Kernel#Array explained above + # apply to the rest of objects. + def self.wrap(object) + if object.nil? + [] + elsif object.respond_to?(:to_ary) + object.to_ary || [object] + else + [object] + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb new file mode 100644 index 0000000..641b58c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "benchmark" + +class << Benchmark + # Benchmark realtime in milliseconds. + # + # Benchmark.realtime { User.all } + # # => 8.0e-05 + # + # Benchmark.ms { User.all } + # # => 0.074 + def ms + 1000 * realtime { yield } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb new file mode 100644 index 0000000..9e6a9d6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb new file mode 100644 index 0000000..52bd229 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "bigdecimal" +require "bigdecimal/util" + +module ActiveSupport + module BigDecimalWithDefaultFormat #:nodoc: + def to_s(format = "F") + super(format) + end + end +end + +BigDecimal.prepend(ActiveSupport::BigDecimalWithDefaultFormat) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb new file mode 100644 index 0000000..1c110fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/class/subclasses" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb new file mode 100644 index 0000000..7928efb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/array/extract_options" + +class Class + # Declare a class-level attribute whose value is inheritable by subclasses. + # Subclasses can change their own value and it will not impact parent class. + # + # ==== Options + # + # * :instance_reader - Sets the instance reader method (defaults to true). + # * :instance_writer - Sets the instance writer method (defaults to true). + # * :instance_accessor - Sets both instance methods (defaults to true). + # * :instance_predicate - Sets a predicate method (defaults to true). + # * :default - Sets a default value for the attribute (defaults to nil). + # + # ==== Examples + # + # class Base + # class_attribute :setting + # end + # + # class Subclass < Base + # end + # + # Base.setting = true + # Subclass.setting # => true + # Subclass.setting = false + # Subclass.setting # => false + # Base.setting # => true + # + # In the above case as long as Subclass does not assign a value to setting + # by performing Subclass.setting = _something_, Subclass.setting + # would read value assigned to parent class. Once Subclass assigns a value then + # the value assigned by Subclass would be returned. + # + # This matches normal Ruby method inheritance: think of writing an attribute + # on a subclass as overriding the reader method. However, you need to be aware + # when using +class_attribute+ with mutable structures as +Array+ or +Hash+. + # In such cases, you don't want to do changes in place. Instead use setters: + # + # Base.setting = [] + # Base.setting # => [] + # Subclass.setting # => [] + # + # # Appending in child changes both parent and child because it is the same object: + # Subclass.setting << :foo + # Base.setting # => [:foo] + # Subclass.setting # => [:foo] + # + # # Use setters to not propagate changes: + # Base.setting = [] + # Subclass.setting += [:foo] + # Base.setting # => [] + # Subclass.setting # => [:foo] + # + # For convenience, an instance predicate method is defined as well. + # To skip it, pass instance_predicate: false. + # + # Subclass.setting? # => false + # + # Instances may overwrite the class value in the same way: + # + # Base.setting = true + # object = Base.new + # object.setting # => true + # object.setting = false + # object.setting # => false + # Base.setting # => true + # + # To opt out of the instance reader method, pass instance_reader: false. + # + # object.setting # => NoMethodError + # object.setting? # => NoMethodError + # + # To opt out of the instance writer method, pass instance_writer: false. + # + # object.setting = false # => NoMethodError + # + # To opt out of both instance methods, pass instance_accessor: false. + # + # To set a default value for the attribute, pass default:, like so: + # + # class_attribute :settings, default: {} + def class_attribute(*attrs) + options = attrs.extract_options! + instance_reader = options.fetch(:instance_accessor, true) && options.fetch(:instance_reader, true) + instance_writer = options.fetch(:instance_accessor, true) && options.fetch(:instance_writer, true) + instance_predicate = options.fetch(:instance_predicate, true) + default_value = options.fetch(:default, nil) + + attrs.each do |name| + singleton_class.silence_redefinition_of_method(name) + define_singleton_method(name) { nil } + + singleton_class.silence_redefinition_of_method("#{name}?") + define_singleton_method("#{name}?") { !!public_send(name) } if instance_predicate + + ivar = "@#{name}" + + singleton_class.silence_redefinition_of_method("#{name}=") + define_singleton_method("#{name}=") do |val| + singleton_class.class_eval do + redefine_method(name) { val } + end + + if singleton_class? + class_eval do + redefine_method(name) do + if instance_variable_defined? ivar + instance_variable_get ivar + else + singleton_class.send name + end + end + end + end + val + end + + if instance_reader + redefine_method(name) do + if instance_variable_defined?(ivar) + instance_variable_get ivar + else + self.class.public_send name + end + end + + redefine_method("#{name}?") { !!public_send(name) } if instance_predicate + end + + if instance_writer + redefine_method("#{name}=") do |val| + instance_variable_set ivar, val + end + end + + unless default_value.nil? + self.send("#{name}=", default_value) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb new file mode 100644 index 0000000..a77354e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +# cattr_* became mattr_* aliases in 7dfbd91b0780fbd6a1dd9bfbc176e10894871d2d, +# but we keep this around for libraries that directly require it knowing they +# want cattr_*. No need to deprecate. +require "active_support/core_ext/module/attribute_accessors" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb new file mode 100644 index 0000000..75e6533 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +class Class + begin + # Test if this Ruby supports each_object against singleton_class + ObjectSpace.each_object(Numeric.singleton_class) {} + + # Returns an array with all classes that are < than its receiver. + # + # class C; end + # C.descendants # => [] + # + # class B < C; end + # C.descendants # => [B] + # + # class A < B; end + # C.descendants # => [B, A] + # + # class D < C; end + # C.descendants # => [B, A, D] + def descendants + descendants = [] + ObjectSpace.each_object(singleton_class) do |k| + next if k.singleton_class? + descendants.unshift k unless k == self + end + descendants + end + rescue StandardError # JRuby 9.0.4.0 and earlier + def descendants + descendants = [] + ObjectSpace.each_object(Class) do |k| + descendants.unshift k if k < self + end + descendants.uniq! + descendants + end + end + + # Returns an array with the direct children of +self+. + # + # class Foo; end + # class Bar < Foo; end + # class Baz < Bar; end + # + # Foo.subclasses # => [Bar] + def subclasses + subclasses, chain = [], descendants + chain.each do |k| + subclasses << k unless chain.any? { |c| c > k } + end + subclasses + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb new file mode 100644 index 0000000..cce73f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date/acts_like" +require "active_support/core_ext/date/blank" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/conversions" +require "active_support/core_ext/date/zones" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb new file mode 100644 index 0000000..c8077f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Date + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb new file mode 100644 index 0000000..e6271c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class Date #:nodoc: + # No Date is blank: + # + # Date.today.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb new file mode 100644 index 0000000..1cd7acb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb @@ -0,0 +1,145 @@ +# frozen_string_literal: true + +require "date" +require "active_support/duration" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" + +class Date + include DateAndTime::Calculations + + class << self + attr_accessor :beginning_of_week_default + + # Returns the week start (e.g. :monday) for the current request, if this has been set (via Date.beginning_of_week=). + # If Date.beginning_of_week has not been set for the current request, returns the week start specified in config.beginning_of_week. + # If no config.beginning_of_week was specified, returns :monday. + def beginning_of_week + Thread.current[:beginning_of_week] || beginning_of_week_default || :monday + end + + # Sets Date.beginning_of_week to a week start (e.g. :monday) for current request/thread. + # + # This method accepts any of the following day symbols: + # :monday, :tuesday, :wednesday, :thursday, :friday, :saturday, :sunday + def beginning_of_week=(week_start) + Thread.current[:beginning_of_week] = find_beginning_of_week!(week_start) + end + + # Returns week start day symbol (e.g. :monday), or raises an +ArgumentError+ for invalid day symbol. + def find_beginning_of_week!(week_start) + raise ArgumentError, "Invalid beginning of week: #{week_start}" unless ::Date::DAYS_INTO_WEEK.key?(week_start) + week_start + end + + # Returns a new Date representing the date 1 day ago (i.e. yesterday's date). + def yesterday + ::Date.current.yesterday + end + + # Returns a new Date representing the date 1 day after today (i.e. tomorrow's date). + def tomorrow + ::Date.current.tomorrow + end + + # Returns Time.zone.today when Time.zone or config.time_zone are set, otherwise just returns Date.today. + def current + ::Time.zone ? ::Time.zone.today : ::Date.today + end + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then subtracts the specified number of seconds. + def ago(seconds) + in_time_zone.since(-seconds) + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then adds the specified number of seconds + def since(seconds) + in_time_zone.since(seconds) + end + alias :in :since + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + def beginning_of_day + in_time_zone + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the middle of the day (12:00) + def middle_of_day + in_time_zone.middle_of_day + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the end of the day (23:59:59) + def end_of_day + in_time_zone.end_of_day + end + alias :at_end_of_day :end_of_day + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + plus_with_duration(-other) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Provides precise Date calculations for years, months, and days. The +options+ parameter takes a hash with + # any of these keys: :years, :months, :weeks, :days. + def advance(options) + options = options.dup + d = self + d = d >> options.delete(:years) * 12 if options[:years] + d = d >> options.delete(:months) if options[:months] + d = d + options.delete(:weeks) * 7 if options[:weeks] + d = d + options.delete(:days) if options[:days] + d + end + + # Returns a new Date where one or more of the elements have been changed according to the +options+ parameter. + # The +options+ parameter is a hash with a combination of these keys: :year, :month, :day. + # + # Date.new(2007, 5, 12).change(day: 1) # => Date.new(2007, 5, 1) + # Date.new(2007, 5, 12).change(year: 2005, month: 1) # => Date.new(2005, 1, 12) + def change(options) + ::Date.new( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day) + ) + end + + # Allow Date to be compared with Time by converting to DateTime and relying on the <=> from there. + def compare_with_coercion(other) + if other.is_a?(Time) + to_datetime <=> other + else + compare_without_coercion(other) + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb new file mode 100644 index 0000000..870119d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/module/redefine_method" + +class Date + DATE_FORMATS = { + short: "%d %b", + long: "%B %d, %Y", + db: "%Y-%m-%d", + number: "%Y%m%d", + long_ordinal: lambda { |date| + day_format = ActiveSupport::Inflector.ordinalize(date.day) + date.strftime("%B #{day_format}, %Y") # => "April 25th, 2007" + }, + rfc822: "%d %b %Y", + iso8601: lambda { |date| date.iso8601 } + } + + # Convert to a formatted string. See DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_formatted_s(:db) # => "2007-11-10" + # date.to_s(:db) # => "2007-11-10" + # + # date.to_formatted_s(:short) # => "10 Nov" + # date.to_formatted_s(:number) # => "20071110" + # date.to_formatted_s(:long) # => "November 10, 2007" + # date.to_formatted_s(:long_ordinal) # => "November 10th, 2007" + # date.to_formatted_s(:rfc822) # => "10 Nov 2007" + # date.to_formatted_s(:iso8601) # => "2007-11-10" + # + # == Adding your own date formats to to_formatted_s + # You can add your own formats to the Date::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a date argument as the value. + # + # # config/initializers/date_formats.rb + # Date::DATE_FORMATS[:month_and_year] = '%B %Y' + # Date::DATE_FORMATS[:short_ordinal] = ->(date) { date.strftime("%B #{date.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + if formatter.respond_to?(:call) + formatter.call(self).to_s + else + strftime(formatter) + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005" + def readable_inspect + strftime("%a, %d %b %Y") + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + silence_redefinition_of_method :to_time + + # Converts a Date instance to a Time, where the time is set to the beginning of the day. + # The timezone can be either :local or :utc (default :local). + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_time # => 2007-11-10 00:00:00 0800 + # date.to_time(:local) # => 2007-11-10 00:00:00 0800 + # + # date.to_time(:utc) # => 2007-11-10 00:00:00 UTC + # + # NOTE: The :local timezone is Ruby's *process* timezone, i.e. ENV['TZ']. + # If the *application's* timezone is needed, then use +in_time_zone+ instead. + def to_time(form = :local) + raise ArgumentError, "Expected :local or :utc, got #{form.inspect}." unless [:local, :utc].include?(form) + ::Time.send(form, year, month, day) + end + + silence_redefinition_of_method :xmlschema + + # Returns a string which represents the time in used time zone as DateTime + # defined by XML Schema: + # + # date = Date.new(2015, 05, 23) # => Sat, 23 May 2015 + # date.xmlschema # => "2015-05-23T00:00:00+04:00" + def xmlschema + in_time_zone.xmlschema + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb new file mode 100644 index 0000000..2dcf97c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/date_and_time/zones" + +class Date + include DateAndTime::Zones +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb new file mode 100644 index 0000000..f6cb1a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb @@ -0,0 +1,374 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/try" + +module DateAndTime + module Calculations + DAYS_INTO_WEEK = { + monday: 0, + tuesday: 1, + wednesday: 2, + thursday: 3, + friday: 4, + saturday: 5, + sunday: 6 + } + WEEKEND_DAYS = [ 6, 0 ] + + # Returns a new date/time representing yesterday. + def yesterday + advance(days: -1) + end + + # Returns a new date/time the specified number of days ago. + def prev_day(days = 1) + advance(days: -days) + end + + # Returns a new date/time representing tomorrow. + def tomorrow + advance(days: 1) + end + + # Returns a new date/time the specified number of days in the future. + def next_day(days = 1) + advance(days: days) + end + + # Returns true if the date/time is today. + def today? + to_date == ::Date.current + end + + # Returns true if the date/time is in the past. + def past? + self < self.class.current + end + + # Returns true if the date/time is in the future. + def future? + self > self.class.current + end + + # Returns true if the date/time falls on a Saturday or Sunday. + def on_weekend? + WEEKEND_DAYS.include?(wday) + end + + # Returns true if the date/time does not fall on a Saturday or Sunday. + def on_weekday? + !WEEKEND_DAYS.include?(wday) + end + + # Returns a new date/time the specified number of days ago. + def days_ago(days) + advance(days: -days) + end + + # Returns a new date/time the specified number of days in the future. + def days_since(days) + advance(days: days) + end + + # Returns a new date/time the specified number of weeks ago. + def weeks_ago(weeks) + advance(weeks: -weeks) + end + + # Returns a new date/time the specified number of weeks in the future. + def weeks_since(weeks) + advance(weeks: weeks) + end + + # Returns a new date/time the specified number of months ago. + def months_ago(months) + advance(months: -months) + end + + # Returns a new date/time the specified number of months in the future. + def months_since(months) + advance(months: months) + end + + # Returns a new date/time the specified number of years ago. + def years_ago(years) + advance(years: -years) + end + + # Returns a new date/time the specified number of years in the future. + def years_since(years) + advance(years: years) + end + + # Returns a new date/time at the start of the month. + # + # today = Date.today # => Thu, 18 Jun 2015 + # today.beginning_of_month # => Mon, 01 Jun 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Thu, 18 Jun 2015 15:23:13 +0000 + # now.beginning_of_month # => Mon, 01 Jun 2015 00:00:00 +0000 + def beginning_of_month + first_hour(change(day: 1)) + end + alias :at_beginning_of_month :beginning_of_month + + # Returns a new date/time at the start of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_quarter # => Wed, 01 Jul 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_quarter # => Wed, 01 Jul 2015 00:00:00 +0000 + def beginning_of_quarter + first_quarter_month = [10, 7, 4, 1].detect { |m| m <= month } + beginning_of_month.change(month: first_quarter_month) + end + alias :at_beginning_of_quarter :beginning_of_quarter + + # Returns a new date/time at the end of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.end_of_quarter # => Wed, 30 Sep 2015 + # + # +DateTime+ objects will have a time set to 23:59:59. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.end_of_quarter # => Wed, 30 Sep 2015 23:59:59 +0000 + def end_of_quarter + last_quarter_month = [3, 6, 9, 12].detect { |m| m >= month } + beginning_of_month.change(month: last_quarter_month).end_of_month + end + alias :at_end_of_quarter :end_of_quarter + + # Returns a new date/time at the beginning of the year. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_year # => Thu, 01 Jan 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_year # => Thu, 01 Jan 2015 00:00:00 +0000 + def beginning_of_year + change(month: 1).beginning_of_month + end + alias :at_beginning_of_year :beginning_of_year + + # Returns a new date/time representing the given day in the next week. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week # => Mon, 11 May 2015 + # + # The +given_day_in_next_week+ defaults to the beginning of the week + # which is determined by +Date.beginning_of_week+ or +config.beginning_of_week+ + # when set. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week(:friday) # => Fri, 15 May 2015 + # + # +DateTime+ objects have their time set to 0:00 unless +same_time+ is true. + # + # now = DateTime.current # => Thu, 07 May 2015 13:31:16 +0000 + # now.next_week # => Mon, 11 May 2015 00:00:00 +0000 + def next_week(given_day_in_next_week = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_since(1).beginning_of_week.days_since(days_span(given_day_in_next_week))) + same_time ? copy_time_to(result) : result + end + + # Returns a new date/time representing the next weekday. + def next_weekday + if next_day.on_weekend? + next_week(:monday, same_time: true) + else + next_day + end + end + + # Returns a new date/time the specified number of months in the future. + def next_month(months = 1) + advance(months: months) + end + + # Short-hand for months_since(3) + def next_quarter + months_since(3) + end + + # Returns a new date/time the specified number of years in the future. + def next_year(years = 1) + advance(years: years) + end + + # Returns a new date/time representing the given day in the previous week. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 0:00 unless +same_time+ is true. + def prev_week(start_day = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_ago(1).beginning_of_week.days_since(days_span(start_day))) + same_time ? copy_time_to(result) : result + end + alias_method :last_week, :prev_week + + # Returns a new date/time representing the previous weekday. + def prev_weekday + if prev_day.on_weekend? + copy_time_to(beginning_of_week(:friday)) + else + prev_day + end + end + alias_method :last_weekday, :prev_weekday + + # Returns a new date/time the specified number of months ago. + def prev_month(months = 1) + advance(months: -months) + end + + # Short-hand for months_ago(1). + def last_month + months_ago(1) + end + + # Short-hand for months_ago(3). + def prev_quarter + months_ago(3) + end + alias_method :last_quarter, :prev_quarter + + # Returns a new date/time the specified number of years ago. + def prev_year(years = 1) + advance(years: -years) + end + + # Short-hand for years_ago(1). + def last_year + years_ago(1) + end + + # Returns the number of days to the start of the week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + def days_to_week_start(start_day = Date.beginning_of_week) + start_day_number = DAYS_INTO_WEEK[start_day] + current_day_number = wday != 0 ? wday - 1 : 6 + (current_day_number - start_day_number) % 7 + end + + # Returns a new date/time representing the start of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # +DateTime+ objects have their time set to 0:00. + def beginning_of_week(start_day = Date.beginning_of_week) + result = days_ago(days_to_week_start(start_day)) + acts_like?(:time) ? result.midnight : result + end + alias :at_beginning_of_week :beginning_of_week + + # Returns Monday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 0:00. + def monday + beginning_of_week(:monday) + end + + # Returns a new date/time representing the end of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 23:59:59. + def end_of_week(start_day = Date.beginning_of_week) + last_hour(days_since(6 - days_to_week_start(start_day))) + end + alias :at_end_of_week :end_of_week + + # Returns Sunday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 23:59:59. + def sunday + end_of_week(:monday) + end + + # Returns a new date/time representing the end of the month. + # DateTime objects will have a time set to 23:59:59. + def end_of_month + last_day = ::Time.days_in_month(month, year) + last_hour(days_since(last_day - day)) + end + alias :at_end_of_month :end_of_month + + # Returns a new date/time representing the end of the year. + # DateTime objects will have a time set to 23:59:59. + def end_of_year + change(month: 12).end_of_month + end + alias :at_end_of_year :end_of_year + + # Returns a Range representing the whole day of the current date/time. + def all_day + beginning_of_day..end_of_day + end + + # Returns a Range representing the whole week of the current date/time. + # Week starts on start_day, default is Date.beginning_of_week or config.beginning_of_week when set. + def all_week(start_day = Date.beginning_of_week) + beginning_of_week(start_day)..end_of_week(start_day) + end + + # Returns a Range representing the whole month of the current date/time. + def all_month + beginning_of_month..end_of_month + end + + # Returns a Range representing the whole quarter of the current date/time. + def all_quarter + beginning_of_quarter..end_of_quarter + end + + # Returns a Range representing the whole year of the current date/time. + def all_year + beginning_of_year..end_of_year + end + + # Returns a new date/time representing the next occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.next_occurring(:monday) # => Mon, 18 Dec 2017 + # today.next_occurring(:thursday) # => Thu, 21 Dec 2017 + def next_occurring(day_of_week) + current_day_number = wday != 0 ? wday - 1 : 6 + from_now = DAYS_INTO_WEEK.fetch(day_of_week) - current_day_number + from_now += 7 unless from_now > 0 + advance(days: from_now) + end + + # Returns a new date/time representing the previous occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.prev_occurring(:monday) # => Mon, 11 Dec 2017 + # today.prev_occurring(:thursday) # => Thu, 07 Dec 2017 + def prev_occurring(day_of_week) + current_day_number = wday != 0 ? wday - 1 : 6 + ago = current_day_number - DAYS_INTO_WEEK.fetch(day_of_week) + ago += 7 unless ago > 0 + advance(days: -ago) + end + + private + def first_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.beginning_of_day : date_or_time + end + + def last_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.end_of_day : date_or_time + end + + def days_span(day) + (DAYS_INTO_WEEK[day] - DAYS_INTO_WEEK[Date.beginning_of_week]) % 7 + end + + def copy_time_to(other) + other.change(hour: hour, min: min, sec: sec, nsec: try(:nsec)) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb new file mode 100644 index 0000000..d33c36e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" + +module DateAndTime + module Compatibility + # If true, +to_time+ preserves the timezone offset of receiver. + # + # NOTE: With Ruby 2.4+ the default for +to_time+ changed from + # converting to the local system time, to preserving the offset + # of the receiver. For backwards compatibility we're overriding + # this behavior, but new apps will have an initializer that sets + # this to true, because the new behavior is preferred. + mattr_accessor :preserve_timezone, instance_writer: false, default: false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb new file mode 100644 index 0000000..894fd9b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module DateAndTime + module Zones + # Returns the simultaneous time in Time.zone if a zone is given or + # if Time.zone_default is set. Otherwise, it returns the current time. + # + # Time.zone = 'Hawaii' # => 'Hawaii' + # Time.utc(2000).in_time_zone # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Date.new(2000).in_time_zone # => Sat, 01 Jan 2000 00:00:00 HST -10:00 + # + # This method is similar to Time#localtime, except that it uses Time.zone as the local zone + # instead of the operating system's time zone. + # + # You can also pass in a TimeZone instance or string that identifies a TimeZone as an argument, + # and the conversion will be based on that zone instead of Time.zone. + # + # Time.utc(2000).in_time_zone('Alaska') # => Fri, 31 Dec 1999 15:00:00 AKST -09:00 + # Date.new(2000).in_time_zone('Alaska') # => Sat, 01 Jan 2000 00:00:00 AKST -09:00 + def in_time_zone(zone = ::Time.zone) + time_zone = ::Time.find_zone! zone + time = acts_like?(:time) ? self : nil + + if time_zone + time_with_zone(time, time_zone) + else + time || to_time + end + end + + private + + def time_with_zone(time, zone) + if time + ActiveSupport::TimeWithZone.new(time.utc? ? time : time.getutc, zone) + else + ActiveSupport::TimeWithZone.new(nil, zone, to_time(:utc)) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb new file mode 100644 index 0000000..790dbee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_time/acts_like" +require "active_support/core_ext/date_time/blank" +require "active_support/core_ext/date_time/calculations" +require "active_support/core_ext/date_time/compatibility" +require "active_support/core_ext/date_time/conversions" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb new file mode 100644 index 0000000..5dccdfe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/object/acts_like" + +class DateTime + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end + + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb new file mode 100644 index 0000000..a52c8bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class DateTime #:nodoc: + # No DateTime is ever blank: + # + # DateTime.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb new file mode 100644 index 0000000..e61b23f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb @@ -0,0 +1,211 @@ +# frozen_string_literal: true + +require "date" + +class DateTime + class << self + # Returns Time.zone.now.to_datetime when Time.zone or + # config.time_zone are set, otherwise returns + # Time.now.to_datetime. + def current + ::Time.zone ? ::Time.zone.now.to_datetime : ::Time.now.to_datetime + end + end + + # Returns the number of seconds since 00:00:00. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399 + def seconds_since_midnight + sec + (min * 60) + (hour * 3600) + end + + # Returns the number of seconds until 23:59:59. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # DateTime.new(2012, 8, 29, 0, 0, 0.5).subsec # => (1/2) + def subsec + sec_fraction + end + + # Returns a new DateTime where one or more of the elements have been changed + # according to the +options+ parameter. The time options (:hour, + # :min, :sec) reset cascadingly, so if only the hour is + # passed, then minute and sec is set to 0. If the hour and minute is passed, + # then sec is set to 0. The +options+ parameter takes a hash with any of these + # keys: :year, :month, :day, :hour, + # :min, :sec, :offset, :start. + # + # DateTime.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => DateTime.new(2012, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => DateTime.new(1981, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => DateTime.new(1981, 8, 29, 0, 0, 0) + def change(options) + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_fraction = Rational(new_nsec, 1000000000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + new_fraction = Rational(new_usec, 1000000) + end + + raise ArgumentError, "argument out of range" if new_fraction >= 1 + + ::DateTime.civil( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day), + options.fetch(:hour, hour), + options.fetch(:min, options[:hour] ? 0 : min), + options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_fraction, + options.fetch(:offset, offset), + options.fetch(:start, start) + ) + end + + # Uses Date to provide precise Time calculations for years, months, and days. + # The +options+ parameter takes a hash with any of these keys: :years, + # :months, :weeks, :days, :hours, + # :minutes, :seconds. + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.advance(options) + datetime_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + datetime_advanced_by_date + else + datetime_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new DateTime representing the time a number of seconds ago. + # Do not use this method in combination with x.months, use months_ago instead! + def ago(seconds) + since(-seconds) + end + + # Returns a new DateTime representing the time a number of seconds since the + # instance time. Do not use this method in combination with x.months, use + # months_since instead! + def since(seconds) + self + Rational(seconds.round, 86400) + end + alias :in :since + + # Returns a new DateTime representing the start of the day (0:00). + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new DateTime representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new DateTime representing the end of the day (23:59:59). + def end_of_day + change(hour: 23, min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_day :end_of_day + + # Returns a new DateTime representing the start of the hour (hh:00:00). + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new DateTime representing the end of the hour (hh:59:59). + def end_of_hour + change(min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new DateTime representing the start of the minute (hh:mm:00). + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new DateTime representing the end of the minute (hh:mm:59). + def end_of_minute + change(sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_minute :end_of_minute + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ).getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns a Time instance of the simultaneous time in the UTC timezone. + # + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)) # => Mon, 21 Feb 2005 10:11:12 -0600 + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)).utc # => Mon, 21 Feb 2005 16:11:12 UTC + def utc + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ) + end + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns +true+ if offset == 0. + def utc? + offset == 0 + end + + # Returns the offset value in seconds. + def utc_offset + (offset * 86400).to_i + end + + # Layers additional behavior on DateTime#<=> so that Time and + # ActiveSupport::TimeWithZone instances can be compared with a DateTime. + def <=>(other) + if other.respond_to? :to_datetime + super other.to_datetime rescue nil + else + super + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb new file mode 100644 index 0000000..7600a06 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class DateTime + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return an instance of +Time+ with the same UTC offset + # as +self+ or an instance of +Time+ representing the same time + # in the local system timezone depending on the setting of + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? getlocal(utc_offset) : getlocal + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb new file mode 100644 index 0000000..29725c8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/calculations" +require "active_support/values/time_zone" + +class DateTime + # Convert to a formatted string. See Time::DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # === Examples + # datetime = DateTime.civil(2007, 12, 4, 0, 0, 0, 0) # => Tue, 04 Dec 2007 00:00:00 +0000 + # + # datetime.to_formatted_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:number) # => "20071204000000" + # datetime.to_formatted_s(:short) # => "04 Dec 00:00" + # datetime.to_formatted_s(:long) # => "December 04, 2007 00:00" + # datetime.to_formatted_s(:long_ordinal) # => "December 4th, 2007 00:00" + # datetime.to_formatted_s(:rfc822) # => "Tue, 04 Dec 2007 00:00:00 +0000" + # datetime.to_formatted_s(:iso8601) # => "2007-12-04T00:00:00+00:00" + # + # == Adding your own datetime formats to to_formatted_s + # DateTime formats are shared with Time. You can add your own to the + # Time::DATE_FORMATS hash. Use the format name as the hash key and + # either a strftime string or Proc instance that takes a time or + # datetime argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = lambda { |time| time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s if instance_methods(false).include?(:to_s) + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # datetime = DateTime.civil(2000, 1, 1, 0, 0, 0, Rational(-6, 24)) + # datetime.formatted_offset # => "-06:00" + # datetime.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005 14:30:00 +0000". + def readable_inspect + to_s(:rfc822) + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + # Returns DateTime with local offset for given year if format is local else + # offset is zero. + # + # DateTime.civil_from_format :local, 2012 + # # => Sun, 01 Jan 2012 00:00:00 +0300 + # DateTime.civil_from_format :local, 2012, 12, 17 + # # => Mon, 17 Dec 2012 00:00:00 +0000 + def self.civil_from_format(utc_or_local, year, month = 1, day = 1, hour = 0, min = 0, sec = 0) + if utc_or_local.to_sym == :local + offset = ::Time.local(year, month, day).utc_offset.to_r / 86400 + else + offset = 0 + end + civil(year, month, day, hour, min, sec, offset) + end + + # Converts +self+ to a floating-point number of seconds, including fractional microseconds, since the Unix epoch. + def to_f + seconds_since_unix_epoch.to_f + sec_fraction + end + + # Converts +self+ to an integer number of seconds since the Unix epoch. + def to_i + seconds_since_unix_epoch.to_i + end + + # Returns the fraction of a second as microseconds + def usec + (sec_fraction * 1_000_000).to_i + end + + # Returns the fraction of a second as nanoseconds + def nsec + (sec_fraction * 1_000_000_000).to_i + end + + private + + def offset_in_seconds + (offset * 86400).to_i + end + + def seconds_since_unix_epoch + (jd - 2440588) * 86400 - offset_in_seconds + seconds_since_midnight + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb new file mode 100644 index 0000000..ce1427e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/digest/uuid" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb new file mode 100644 index 0000000..6e949a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +require "securerandom" + +module Digest + module UUID + DNS_NAMESPACE = "k\xA7\xB8\x10\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + URL_NAMESPACE = "k\xA7\xB8\x11\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + OID_NAMESPACE = "k\xA7\xB8\x12\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + X500_NAMESPACE = "k\xA7\xB8\x14\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + + # Generates a v5 non-random UUID (Universally Unique IDentifier). + # + # Using Digest::MD5 generates version 3 UUIDs; Digest::SHA1 generates version 5 UUIDs. + # uuid_from_hash always generates the same UUID for a given name and namespace combination. + # + # See RFC 4122 for details of UUID at: https://www.ietf.org/rfc/rfc4122.txt + def self.uuid_from_hash(hash_class, uuid_namespace, name) + if hash_class == Digest::MD5 + version = 3 + elsif hash_class == Digest::SHA1 + version = 5 + else + raise ArgumentError, "Expected Digest::SHA1 or Digest::MD5, got #{hash_class.name}." + end + + hash = hash_class.new + hash.update(uuid_namespace) + hash.update(name) + + ary = hash.digest.unpack("NnnnnN") + ary[2] = (ary[2] & 0x0FFF) | (version << 12) + ary[3] = (ary[3] & 0x3FFF) | 0x8000 + + "%08x-%04x-%04x-%04x-%04x%08x" % ary + end + + # Convenience method for uuid_from_hash using Digest::MD5. + def self.uuid_v3(uuid_namespace, name) + uuid_from_hash(Digest::MD5, uuid_namespace, name) + end + + # Convenience method for uuid_from_hash using Digest::SHA1. + def self.uuid_v5(uuid_namespace, name) + uuid_from_hash(Digest::SHA1, uuid_namespace, name) + end + + # Convenience method for SecureRandom.uuid. + def self.uuid_v4 + SecureRandom.uuid + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb new file mode 100644 index 0000000..cea6f98 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb @@ -0,0 +1,164 @@ +# frozen_string_literal: true + +module Enumerable + # Enumerable#sum was added in Ruby 2.4, but it only works with Numeric elements + # when we omit an identity. + # + # We tried shimming it to attempt the fast native method, rescue TypeError, + # and fall back to the compatible implementation, but that's much slower than + # just calling the compat method in the first place. + if Enumerable.instance_methods(false).include?(:sum) && !((?a..?b).sum rescue false) + # :stopdoc: + + # We can't use Refinements here because Refinements with Module which will be prepended + # doesn't work well https://bugs.ruby-lang.org/issues/13446 + alias :_original_sum_with_required_identity :sum + private :_original_sum_with_required_identity + + # :startdoc: + + # Calculates a sum from the elements. + # + # payments.sum { |p| p.price * p.tax_rate } + # payments.sum(&:price) + # + # The latter is a shortcut for: + # + # payments.inject(0) { |sum, p| sum + p.price } + # + # It can also calculate the sum without the use of a block. + # + # [5, 15, 10].sum # => 30 + # ['foo', 'bar'].sum # => "foobar" + # [[1, 2], [3, 1, 5]].sum # => [1, 2, 3, 1, 5] + # + # The default sum of an empty list is zero. You can override this default: + # + # [].sum(Payment.new(0)) { |i| i.amount } # => Payment.new(0) + def sum(identity = nil, &block) + if identity + _original_sum_with_required_identity(identity, &block) + elsif block_given? + map(&block).sum(identity) + else + inject(:+) || 0 + end + end + else + def sum(identity = nil, &block) + if block_given? + map(&block).sum(identity) + else + sum = identity ? inject(identity, :+) : inject(:+) + sum || identity || 0 + end + end + end + + # Convert an enumerable to a hash. + # + # people.index_by(&:login) + # # => { "nextangle" => , "chade-" => , ...} + # people.index_by { |person| "#{person.first_name} #{person.last_name}" } + # # => { "Chade- Fowlersburg-e" => , "David Heinemeier Hansson" => , ...} + def index_by + if block_given? + result = {} + each { |elem| result[yield(elem)] = elem } + result + else + to_enum(:index_by) { size if respond_to?(:size) } + end + end + + # Returns +true+ if the enumerable has more than 1 element. Functionally + # equivalent to enum.to_a.size > 1. Can be called with a block too, + # much like any?, so people.many? { |p| p.age > 26 } returns +true+ + # if more than one person is over 26. + def many? + cnt = 0 + if block_given? + any? do |element| + cnt += 1 if yield element + cnt > 1 + end + else + any? { (cnt += 1) > 1 } + end + end + + # The negative of the Enumerable#include?. Returns +true+ if the + # collection does not include the object. + def exclude?(object) + !include?(object) + end + + # Returns a copy of the enumerable without the specified elements. + # + # ["David", "Rafael", "Aaron", "Todd"].without "Aaron", "Todd" + # # => ["David", "Rafael"] + # + # {foo: 1, bar: 2, baz: 3}.without :bar + # # => {foo: 1, baz: 3} + def without(*elements) + reject { |element| elements.include?(element) } + end + + # Convert an enumerable to an array based on the given key. + # + # [{ name: "David" }, { name: "Rafael" }, { name: "Aaron" }].pluck(:name) + # # => ["David", "Rafael", "Aaron"] + # + # [{ id: 1, name: "David" }, { id: 2, name: "Rafael" }].pluck(:id, :name) + # # => [[1, "David"], [2, "Rafael"]] + def pluck(*keys) + if keys.many? + map { |element| keys.map { |key| element[key] } } + else + map { |element| element[keys.first] } + end + end +end + +class Range #:nodoc: + # Optimize range sum to use arithmetic progression if a block is not given and + # we have a range of numeric values. + def sum(identity = nil) + if block_given? || !(first.is_a?(Integer) && last.is_a?(Integer)) + super + else + actual_last = exclude_end? ? (last - 1) : last + if actual_last >= first + sum = identity || 0 + sum + (actual_last - first + 1) * (actual_last + first) / 2 + else + identity || 0 + end + end + end +end + +# Array#sum was added in Ruby 2.4 but it only works with Numeric elements. +# +# We tried shimming it to attempt the fast native method, rescue TypeError, +# and fall back to the compatible implementation, but that's much slower than +# just calling the compat method in the first place. +if Array.instance_methods(false).include?(:sum) && !(%w[a].sum rescue false) + # Using Refinements here in order not to expose our internal method + using Module.new { + refine Array do + alias :orig_sum :sum + end + } + + class Array + def sum(init = nil, &block) #:nodoc: + if init.is_a?(Numeric) || first.is_a?(Numeric) + init ||= 0 + orig_sum(init, &block) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb new file mode 100644 index 0000000..64553bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/file/atomic" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb new file mode 100644 index 0000000..9deceb1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require "fileutils" + +class File + # Write to a file atomically. Useful for situations where you don't + # want other processes or threads to see half-written files. + # + # File.atomic_write('important.file') do |file| + # file.write('hello') + # end + # + # This method needs to create a temporary file. By default it will create it + # in the same directory as the destination file. If you don't like this + # behavior you can provide a different directory but it must be on the + # same physical filesystem as the file you're trying to write. + # + # File.atomic_write('/data/something.important', '/data/tmp') do |file| + # file.write('hello') + # end + def self.atomic_write(file_name, temp_dir = dirname(file_name)) + require "tempfile" unless defined?(Tempfile) + + Tempfile.open(".#{basename(file_name)}", temp_dir) do |temp_file| + temp_file.binmode + return_val = yield temp_file + temp_file.close + + old_stat = if exist?(file_name) + # Get original file permissions + stat(file_name) + else + # If not possible, probe which are the default permissions in the + # destination directory. + probe_stat_in(dirname(file_name)) + end + + if old_stat + # Set correct permissions on new file + begin + chown(old_stat.uid, old_stat.gid, temp_file.path) + # This operation will affect filesystem ACL's + chmod(old_stat.mode, temp_file.path) + rescue Errno::EPERM, Errno::EACCES + # Changing file ownership failed, moving on. + end + end + + # Overwrite original file with temp file + rename(temp_file.path, file_name) + return_val + end + end + + # Private utility method. + def self.probe_stat_in(dir) #:nodoc: + basename = [ + ".permissions_check", + Thread.current.object_id, + Process.pid, + rand(1000000) + ].join(".") + + file_name = join(dir, basename) + FileUtils.touch(file_name) + stat(file_name) + ensure + FileUtils.rm_f(file_name) if file_name + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb new file mode 100644 index 0000000..e19aeaa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/compact" +require "active_support/core_ext/hash/conversions" +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/indifferent_access" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/hash/slice" +require "active_support/core_ext/hash/transform_values" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb new file mode 100644 index 0000000..d6364dd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +class Hash + unless Hash.instance_methods(false).include?(:compact) + # Returns a hash with non +nil+ values. + # + # hash = { a: true, b: false, c: nil } + # hash.compact # => { a: true, b: false } + # hash # => { a: true, b: false, c: nil } + # { c: nil }.compact # => {} + # { c: true }.compact # => { c: true } + def compact + select { |_, value| !value.nil? } + end + end + + unless Hash.instance_methods(false).include?(:compact!) + # Replaces current hash with non +nil+ values. + # Returns +nil+ if no changes were made, otherwise returns the hash. + # + # hash = { a: true, b: false, c: nil } + # hash.compact! # => { a: true, b: false } + # hash # => { a: true, b: false } + # { c: true }.compact! # => nil + def compact! + reject! { |_, value| value.nil? } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb new file mode 100644 index 0000000..5b48254 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb @@ -0,0 +1,263 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/time" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/string/inflections" + +class Hash + # Returns a string containing an XML representation of its receiver: + # + # { foo: 1, bar: 2 }.to_xml + # # => + # # + # # + # # 1 + # # 2 + # # + # + # To do so, the method loops over the pairs and builds nodes that depend on + # the _values_. Given a pair +key+, +value+: + # + # * If +value+ is a hash there's a recursive call with +key+ as :root. + # + # * If +value+ is an array there's a recursive call with +key+ as :root, + # and +key+ singularized as :children. + # + # * If +value+ is a callable object it must expect one or two arguments. Depending + # on the arity, the callable is invoked with the +options+ hash as first argument + # with +key+ as :root, and +key+ singularized as second argument. The + # callable can add nodes by using options[:builder]. + # + # {foo: lambda { |options, key| options[:builder].b(key) }}.to_xml + # # => "foo" + # + # * If +value+ responds to +to_xml+ the method is invoked with +key+ as :root. + # + # class Foo + # def to_xml(options) + # options[:builder].bar 'fooing!' + # end + # end + # + # { foo: Foo.new }.to_xml(skip_instruct: true) + # # => + # # + # # fooing! + # # + # + # * Otherwise, a node with +key+ as tag is created with a string representation of + # +value+ as text node. If +value+ is +nil+ an attribute "nil" set to "true" is added. + # Unless the option :skip_types exists and is true, an attribute "type" is + # added as well according to the following mapping: + # + # XML_TYPE_NAMES = { + # "Symbol" => "symbol", + # "Integer" => "integer", + # "BigDecimal" => "decimal", + # "Float" => "float", + # "TrueClass" => "boolean", + # "FalseClass" => "boolean", + # "Date" => "date", + # "DateTime" => "dateTime", + # "Time" => "dateTime" + # } + # + # By default the root node is "hash", but that's configurable via the :root option. + # + # The default XML builder is a fresh instance of Builder::XmlMarkup. You can + # configure your own builder with the :builder option. The method also accepts + # options like :dasherize and friends, they are forwarded to the builder. + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder) + + options = options.dup + options[:indent] ||= 2 + options[:root] ||= "hash" + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + + builder.tag!(root) do + each { |key, value| ActiveSupport::XmlMini.to_tag(key, value, options) } + yield builder if block_given? + end + end + + class << self + # Returns a Hash containing a collection of pairs when the key is the node name and the value is + # its content + # + # xml = <<-XML + # + # + # 1 + # 2 + # + # XML + # + # hash = Hash.from_xml(xml) + # # => {"hash"=>{"foo"=>1, "bar"=>2}} + # + # +DisallowedType+ is raised if the XML contains attributes with type="yaml" or + # type="symbol". Use Hash.from_trusted_xml to + # parse this XML. + # + # Custom +disallowed_types+ can also be passed in the form of an + # array. + # + # xml = <<-XML + # + # + # 1 + # "David" + # + # XML + # + # hash = Hash.from_xml(xml, ['integer']) + # # => ActiveSupport::XMLConverter::DisallowedType: Disallowed type attribute: "integer" + # + # Note that passing custom disallowed types will override the default types, + # which are Symbol and YAML. + def from_xml(xml, disallowed_types = nil) + ActiveSupport::XMLConverter.new(xml, disallowed_types).to_h + end + + # Builds a Hash from XML just like Hash.from_xml, but also allows Symbol and YAML. + def from_trusted_xml(xml) + from_xml xml, [] + end + end +end + +module ActiveSupport + class XMLConverter # :nodoc: + # Raised if the XML contains attributes with type="yaml" or + # type="symbol". Read Hash#from_xml for more details. + class DisallowedType < StandardError + def initialize(type) + super "Disallowed type attribute: #{type.inspect}" + end + end + + DISALLOWED_TYPES = %w(symbol yaml) + + def initialize(xml, disallowed_types = nil) + @xml = normalize_keys(XmlMini.parse(xml)) + @disallowed_types = disallowed_types || DISALLOWED_TYPES + end + + def to_h + deep_to_h(@xml) + end + + private + def normalize_keys(params) + case params + when Hash + Hash[params.map { |k, v| [k.to_s.tr("-", "_"), normalize_keys(v)] } ] + when Array + params.map { |v| normalize_keys(v) } + else + params + end + end + + def deep_to_h(value) + case value + when Hash + process_hash(value) + when Array + process_array(value) + when String + value + else + raise "can't typecast #{value.class.name} - #{value.inspect}" + end + end + + def process_hash(value) + if value.include?("type") && !value["type"].is_a?(Hash) && @disallowed_types.include?(value["type"]) + raise DisallowedType, value["type"] + end + + if become_array?(value) + _, entries = Array.wrap(value.detect { |k, v| not v.is_a?(String) }) + if entries.nil? || value["__content__"].try(:empty?) + [] + else + case entries + when Array + entries.collect { |v| deep_to_h(v) } + when Hash + [deep_to_h(entries)] + else + raise "can't typecast #{entries.inspect}" + end + end + elsif become_content?(value) + process_content(value) + + elsif become_empty_string?(value) + "" + elsif become_hash?(value) + xml_value = Hash[value.map { |k, v| [k, deep_to_h(v)] }] + + # Turn { files: { file: # } } into { files: # } so it is compatible with + # how multipart uploaded files from HTML appear + xml_value["file"].is_a?(StringIO) ? xml_value["file"] : xml_value + end + end + + def become_content?(value) + value["type"] == "file" || (value["__content__"] && (value.keys.size == 1 || value["__content__"].present?)) + end + + def become_array?(value) + value["type"] == "array" + end + + def become_empty_string?(value) + # { "string" => true } + # No tests fail when the second term is removed. + value["type"] == "string" && value["nil"] != "true" + end + + def become_hash?(value) + !nothing?(value) && !garbage?(value) + end + + def nothing?(value) + # blank or nil parsed values are represented by nil + value.blank? || value["nil"] == "true" + end + + def garbage?(value) + # If the type is the only element which makes it then + # this still makes the value nil, except if type is + # an XML node(where type['value'] is a Hash) + value["type"] && !value["type"].is_a?(::Hash) && value.size == 1 + end + + def process_content(value) + content = value["__content__"] + if parser = ActiveSupport::XmlMini::PARSING[value["type"]] + parser.arity == 1 ? parser.call(content) : parser.call(content, value) + else + content + end + end + + def process_array(value) + value.map! { |i| deep_to_h(i) } + value.length > 1 ? value : value.first + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb new file mode 100644 index 0000000..9bc50b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with +self+ and +other_hash+ merged recursively. + # + # h1 = { a: true, b: { c: [1, 2, 3] } } + # h2 = { a: false, b: { x: [3, 4, 5] } } + # + # h1.deep_merge(h2) # => { a: false, b: { c: [1, 2, 3], x: [3, 4, 5] } } + # + # Like with Hash#merge in the standard library, a block can be provided + # to merge values: + # + # h1 = { a: 100, b: 200, c: { c1: 100 } } + # h2 = { b: 250, c: { c1: 200 } } + # h1.deep_merge(h2) { |key, this_val, other_val| this_val + other_val } + # # => { a: 100, b: 450, c: { c1: 300 } } + def deep_merge(other_hash, &block) + dup.deep_merge!(other_hash, &block) + end + + # Same as +deep_merge+, but modifies +self+. + def deep_merge!(other_hash, &block) + merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + this_val.deep_merge(other_val, &block) + elsif block_given? + block.call(key, this_val, other_val) + else + other_val + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb new file mode 100644 index 0000000..6258610 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +class Hash + # Returns a hash that includes everything except given keys. + # hash = { a: true, b: false, c: nil } + # hash.except(:c) # => { a: true, b: false } + # hash.except(:a, :b) # => { c: nil } + # hash # => { a: true, b: false, c: nil } + # + # This is useful for limiting a set of parameters to everything but a few known toggles: + # @person.update(params[:person].except(:admin)) + def except(*keys) + dup.except!(*keys) + end + + # Removes the given keys from hash and returns it. + # hash = { a: true, b: false, c: nil } + # hash.except!(:c) # => { a: true, b: false } + # hash # => { a: true, b: false } + def except!(*keys) + keys.each { |key| delete(key) } + self + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb new file mode 100644 index 0000000..a38f33f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "active_support/hash_with_indifferent_access" + +class Hash + # Returns an ActiveSupport::HashWithIndifferentAccess out of its receiver: + # + # { a: 1 }.with_indifferent_access['a'] # => 1 + def with_indifferent_access + ActiveSupport::HashWithIndifferentAccess.new(self) + end + + # Called when object is nested under an object that receives + # #with_indifferent_access. This method will be called on the current object + # by the enclosing object and is aliased to #with_indifferent_access by + # default. Subclasses of Hash may overwrite this method to return +self+ if + # converting to an ActiveSupport::HashWithIndifferentAccess would not be + # desirable. + # + # b = { b: 1 } + # { a: b }.with_indifferent_access['a'] # calls b.nested_under_indifferent_access + # # => {"b"=>1} + alias nested_under_indifferent_access with_indifferent_access +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb new file mode 100644 index 0000000..bdf196e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb @@ -0,0 +1,172 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with all keys converted using the +block+ operation. + # + # hash = { name: 'Rob', age: '28' } + # + # hash.transform_keys { |key| key.to_s.upcase } # => {"NAME"=>"Rob", "AGE"=>"28"} + # + # If you do not provide a +block+, it will return an Enumerator + # for chaining with other methods: + # + # hash.transform_keys.with_index { |k, i| [k, i].join } # => {"name0"=>"Rob", "age1"=>"28"} + def transform_keys + return enum_for(:transform_keys) { size } unless block_given? + result = {} + each_key do |key| + result[yield(key)] = self[key] + end + result + end unless method_defined? :transform_keys + + # Destructively converts all keys using the +block+ operations. + # Same as +transform_keys+ but modifies +self+. + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end unless method_defined? :transform_keys! + + # Returns a new hash with all keys converted to strings. + # + # hash = { name: 'Rob', age: '28' } + # + # hash.stringify_keys + # # => {"name"=>"Rob", "age"=>"28"} + def stringify_keys + transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. Same as + # +stringify_keys+, but modifies +self+. + def stringify_keys! + transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. + # + # hash = { 'name' => 'Rob', 'age' => '28' } + # + # hash.symbolize_keys + # # => {:name=>"Rob", :age=>"28"} + def symbolize_keys + transform_keys { |key| key.to_sym rescue key } + end + alias_method :to_options, :symbolize_keys + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. Same as +symbolize_keys+, but modifies +self+. + def symbolize_keys! + transform_keys! { |key| key.to_sym rescue key } + end + alias_method :to_options!, :symbolize_keys! + + # Validates all keys in a hash match *valid_keys, raising + # +ArgumentError+ on a mismatch. + # + # Note that keys are treated differently than HashWithIndifferentAccess, + # meaning that string and symbol keys will not match. + # + # { name: 'Rob', years: '28' }.assert_valid_keys(:name, :age) # => raises "ArgumentError: Unknown key: :years. Valid keys are: :name, :age" + # { name: 'Rob', age: '28' }.assert_valid_keys('name', 'age') # => raises "ArgumentError: Unknown key: :name. Valid keys are: 'name', 'age'" + # { name: 'Rob', age: '28' }.assert_valid_keys(:name, :age) # => passes, raises nothing + def assert_valid_keys(*valid_keys) + valid_keys.flatten! + each_key do |k| + unless valid_keys.include?(k) + raise ArgumentError.new("Unknown key: #{k.inspect}. Valid keys are: #{valid_keys.map(&:inspect).join(', ')}") + end + end + end + + # Returns a new hash with all keys converted by the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_transform_keys{ |key| key.to_s.upcase } + # # => {"PERSON"=>{"NAME"=>"Rob", "AGE"=>"28"}} + def deep_transform_keys(&block) + _deep_transform_keys_in_object(self, &block) + end + + # Destructively converts all keys by using the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_transform_keys!(&block) + _deep_transform_keys_in_object!(self, &block) + end + + # Returns a new hash with all keys converted to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_stringify_keys + # # => {"person"=>{"name"=>"Rob", "age"=>"28"}} + def deep_stringify_keys + deep_transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_stringify_keys! + deep_transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. This includes the keys from the root hash + # and from all nested hashes and arrays. + # + # hash = { 'person' => { 'name' => 'Rob', 'age' => '28' } } + # + # hash.deep_symbolize_keys + # # => {:person=>{:name=>"Rob", :age=>"28"}} + def deep_symbolize_keys + deep_transform_keys { |key| key.to_sym rescue key } + end + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_symbolize_keys! + deep_transform_keys! { |key| key.to_sym rescue key } + end + + private + # support methods for deep transforming nested hashes and arrays + def _deep_transform_keys_in_object(object, &block) + case object + when Hash + object.each_with_object({}) do |(key, value), result| + result[yield(key)] = _deep_transform_keys_in_object(value, &block) + end + when Array + object.map { |e| _deep_transform_keys_in_object(e, &block) } + else + object + end + end + + def _deep_transform_keys_in_object!(object, &block) + case object + when Hash + object.keys.each do |key| + value = object.delete(key) + object[yield(key)] = _deep_transform_keys_in_object!(value, &block) + end + object + when Array + object.map! { |e| _deep_transform_keys_in_object!(e, &block) } + else + object + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb new file mode 100644 index 0000000..ef8d592 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Hash + # Merges the caller into +other_hash+. For example, + # + # options = options.reverse_merge(size: 25, velocity: 10) + # + # is equivalent to + # + # options = { size: 25, velocity: 10 }.merge(options) + # + # This is particularly useful for initializing an options hash + # with default values. + def reverse_merge(other_hash) + other_hash.merge(self) + end + alias_method :with_defaults, :reverse_merge + + # Destructive +reverse_merge+. + def reverse_merge!(other_hash) + replace(reverse_merge(other_hash)) + end + alias_method :reverse_update, :reverse_merge! + alias_method :with_defaults!, :reverse_merge! +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb new file mode 100644 index 0000000..2bd0a56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +class Hash + # Slices a hash to include only the given keys. Returns a hash containing + # the given keys. + # + # { a: 1, b: 2, c: 3, d: 4 }.slice(:a, :b) + # # => {:a=>1, :b=>2} + # + # This is useful for limiting an options hash to valid keys before + # passing to a method: + # + # def search(criteria = {}) + # criteria.assert_valid_keys(:mass, :velocity, :time) + # end + # + # search(options.slice(:mass, :velocity, :time)) + # + # If you have an array of keys you want to limit to, you should splat them: + # + # valid_keys = [:mass, :velocity, :time] + # search(options.slice(*valid_keys)) + def slice(*keys) + keys.each_with_object(Hash.new) { |k, hash| hash[k] = self[k] if has_key?(k) } + end unless method_defined?(:slice) + + # Replaces the hash with only the given keys. + # Returns a hash containing the removed key/value pairs. + # + # { a: 1, b: 2, c: 3, d: 4 }.slice!(:a, :b) + # # => {:c=>3, :d=>4} + def slice!(*keys) + omit = slice(*self.keys - keys) + hash = slice(*keys) + hash.default = default + hash.default_proc = default_proc if default_proc + replace(hash) + omit + end + + # Removes and returns the key/value pairs matching the given keys. + # + # { a: 1, b: 2, c: 3, d: 4 }.extract!(:a, :b) # => {:a=>1, :b=>2} + # { a: 1, b: 2 }.extract!(:a, :x) # => {:a=>1} + def extract!(*keys) + keys.each_with_object(self.class.new) { |key, result| result[key] = delete(key) if has_key?(key) } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb new file mode 100644 index 0000000..4b19c9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with the results of running +block+ once for every value. + # The keys are unchanged. + # + # { a: 1, b: 2, c: 3 }.transform_values { |x| x * 2 } # => { a: 2, b: 4, c: 6 } + # + # If you do not provide a +block+, it will return an Enumerator + # for chaining with other methods: + # + # { a: 1, b: 2 }.transform_values.with_index { |v, i| [v, i].join.to_i } # => { a: 10, b: 21 } + def transform_values + return enum_for(:transform_values) { size } unless block_given? + return {} if empty? + result = self.class.new + each do |key, value| + result[key] = yield(value) + end + result + end unless method_defined? :transform_values + + # Destructively converts all values using the +block+ operations. + # Same as +transform_values+ but modifies +self+. + def transform_values! + return enum_for(:transform_values!) { size } unless block_given? + each do |key, value| + self[key] = yield(value) + end + end unless method_defined? :transform_values! + # TODO: Remove this file when supporting only Ruby 2.4+. +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb new file mode 100644 index 0000000..d227013 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support/core_ext/integer/multiple" +require "active_support/core_ext/integer/inflections" +require "active_support/core_ext/integer/time" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb new file mode 100644 index 0000000..aef3266 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "active_support/inflector" + +class Integer + # Ordinalize turns a number into an ordinal string used to denote the + # position in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinalize # => "1st" + # 2.ordinalize # => "2nd" + # 1002.ordinalize # => "1002nd" + # 1003.ordinalize # => "1003rd" + # -11.ordinalize # => "-11th" + # -1001.ordinalize # => "-1001st" + def ordinalize + ActiveSupport::Inflector.ordinalize(self) + end + + # Ordinal returns the suffix used to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinal # => "st" + # 2.ordinal # => "nd" + # 1002.ordinal # => "nd" + # 1003.ordinal # => "rd" + # -11.ordinal # => "th" + # -1001.ordinal # => "st" + def ordinal + ActiveSupport::Inflector.ordinal(self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb new file mode 100644 index 0000000..e760666 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +class Integer + # Check whether the integer is evenly divisible by the argument. + # + # 0.multiple_of?(0) # => true + # 6.multiple_of?(5) # => false + # 10.multiple_of?(2) # => true + def multiple_of?(number) + number != 0 ? self % number == 0 : zero? + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb new file mode 100644 index 0000000..5efb89c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/numeric/time" + +class Integer + # Returns a Duration instance matching the number of months provided. + # + # 2.months # => 2 months + def months + ActiveSupport::Duration.months(self) + end + alias :month :months + + # Returns a Duration instance matching the number of years provided. + # + # 2.years # => 2 years + def years + ActiveSupport::Duration.years(self) + end + alias :year :years +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb new file mode 100644 index 0000000..0f4356f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/agnostics" +require "active_support/core_ext/kernel/concern" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/kernel/singleton_class" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb new file mode 100644 index 0000000..403b5f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class Object + # Makes backticks behave (somewhat more) similarly on all platforms. + # On win32 `nonexistent_command` raises Errno::ENOENT; on Unix, the + # spawned shell prints a message to stderr and sets $?. We emulate + # Unix on the former but not the latter. + def `(command) #:nodoc: + super + rescue Errno::ENOENT => e + STDERR.puts "#$0: #{e}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb new file mode 100644 index 0000000..0b2baed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/concerning" + +module Kernel + module_function + + # A shortcut to define a toplevel concern, not within a module. + # + # See Module::Concerning for more. + def concern(topic, &module_definition) + Object.concern topic, &module_definition + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb new file mode 100644 index 0000000..9155bd6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Kernel + module_function + + # Sets $VERBOSE to +nil+ for the duration of the block and back to its original + # value afterwards. + # + # silence_warnings do + # value = noisy_call # no warning voiced + # end + # + # noisy_call # warning voiced + def silence_warnings + with_warnings(nil) { yield } + end + + # Sets $VERBOSE to +true+ for the duration of the block and back to its + # original value afterwards. + def enable_warnings + with_warnings(true) { yield } + end + + # Sets $VERBOSE for the duration of the block and back to its original + # value afterwards. + def with_warnings(flag) + old_verbose, $VERBOSE = $VERBOSE, flag + yield + ensure + $VERBOSE = old_verbose + end + + # Blocks and ignores any exception passed as argument if raised within the block. + # + # suppress(ZeroDivisionError) do + # 1/0 + # puts 'This code is NOT reached' + # end + # + # puts 'This code gets executed and nothing related to ZeroDivisionError was seen' + def suppress(*exception_classes) + yield + rescue *exception_classes + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb new file mode 100644 index 0000000..6715eba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module Kernel + # class_eval on an object acts like singleton_class.class_eval. + def class_eval(*args, &block) + singleton_class.class_eval(*args, &block) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb new file mode 100644 index 0000000..750f858 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +class LoadError + # Returns true if the given path name (except perhaps for the ".rb" + # extension) is the missing file which caused the exception to be raised. + def is_missing?(location) + location.sub(/\.rb$/, "".freeze) == path.sub(/\.rb$/, "".freeze) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb new file mode 100644 index 0000000..0c72cd7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module ActiveSupport + module MarshalWithAutoloading # :nodoc: + def load(source, proc = nil) + super(source, proc) + rescue ArgumentError, NameError => exc + if exc.message.match(%r|undefined class/module (.+?)(?:::)?\z|) + # try loading the class/module + loaded = $1.constantize + + raise unless $1 == loaded.name + + # if it is an IO we need to go back to read the object + source.rewind if source.respond_to?(:rewind) + retry + else + raise exc + end + end + end +end + +Marshal.singleton_class.prepend(ActiveSupport::MarshalWithAutoloading) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb new file mode 100644 index 0000000..d91e3fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/module/reachable" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/attribute_accessors_per_thread" +require "active_support/core_ext/module/attr_internal" +require "active_support/core_ext/module/concerning" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/module/deprecation" +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/module/remove_method" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb new file mode 100644 index 0000000..6f64d11 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Module + # Allows you to make aliases for attributes, which includes + # getter, setter, and a predicate. + # + # class Content < ActiveRecord::Base + # # has a title attribute + # end + # + # class Email < Content + # alias_attribute :subject, :title + # end + # + # e = Email.find(1) + # e.title # => "Superstars" + # e.subject # => "Superstars" + # e.subject? # => true + # e.subject = "Megastars" + # e.title # => "Megastars" + def alias_attribute(new_name, old_name) + # The following reader methods use an explicit `self` receiver in order to + # support aliases that start with an uppercase letter. Otherwise, they would + # be resolved as constants instead. + module_eval <<-STR, __FILE__, __LINE__ + 1 + def #{new_name}; self.#{old_name}; end # def subject; self.title; end + def #{new_name}?; self.#{old_name}?; end # def subject?; self.title?; end + def #{new_name}=(v); self.#{old_name} = v; end # def subject=(v); self.title = v; end + STR + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb new file mode 100644 index 0000000..d1c86b8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Module + # A module may or may not have a name. + # + # module M; end + # M.name # => "M" + # + # m = Module.new + # m.name # => nil + # + # +anonymous?+ method returns true if module does not have a name, false otherwise: + # + # Module.new.anonymous? # => true + # + # module M; end + # M.anonymous? # => false + # + # A module gets a name when it is first assigned to a constant. Either + # via the +module+ or +class+ keyword or by an explicit assignment: + # + # m = Module.new # creates an anonymous module + # m.anonymous? # => true + # M = m # m gets a name here as a side-effect + # m.name # => "M" + # m.anonymous? # => false + def anonymous? + name.nil? + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb new file mode 100644 index 0000000..7801f6d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +class Module + # Declares an attribute reader backed by an internally-named instance variable. + def attr_internal_reader(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :reader) } + end + + # Declares an attribute writer backed by an internally-named instance variable. + def attr_internal_writer(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :writer) } + end + + # Declares an attribute reader and writer backed by an internally-named instance + # variable. + def attr_internal_accessor(*attrs) + attr_internal_reader(*attrs) + attr_internal_writer(*attrs) + end + alias_method :attr_internal, :attr_internal_accessor + + class << self; attr_accessor :attr_internal_naming_format end + self.attr_internal_naming_format = "@_%s" + + private + def attr_internal_ivar_name(attr) + Module.attr_internal_naming_format % attr + end + + def attr_internal_define(attr_name, type) + internal_name = attr_internal_ivar_name(attr_name).sub(/\A@/, "") + # use native attr_* methods as they are faster on some Ruby implementations + send("attr_#{type}", internal_name) + attr_name, internal_name = "#{attr_name}=", "#{internal_name}=" if type == :writer + alias_method attr_name, internal_name + remove_method internal_name + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb new file mode 100644 index 0000000..580baff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb @@ -0,0 +1,215 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes. +class Module + # Defines a class attribute and creates a class and instance reader methods. + # The underlying class variable is set to +nil+, if it is not previously + # defined. All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_reader :hair_colors + # end + # + # HairColors.hair_colors # => nil + # HairColors.class_variable_set("@@hair_colors", [:brown, :black]) + # HairColors.hair_colors # => [:brown, :black] + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # If you want to opt out the creation on the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # module HairColors + # mattr_reader :hair_colors, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_reader :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + def mattr_reader(*syms, instance_reader: true, instance_accessor: true, default: nil) + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + @@#{sym} = nil unless defined? @@#{sym} + + def self.#{sym} + @@#{sym} + end + EOS + + if instance_reader && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym} + @@#{sym} + end + EOS + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + class_variable_set("@@#{sym}", sym_default_value) unless sym_default_value.nil? + end + end + alias :cattr_reader :mattr_reader + + # Defines a class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. All class and instance methods created + # will be public, even if this method is called with a private or protected + # access modifier. + # + # module HairColors + # mattr_writer :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black] + # Person.class_variable_get("@@hair_colors") # => [:brown, :black] + # Person.new.hair_colors = [:blonde, :red] + # HairColors.class_variable_get("@@hair_colors") # => [:blonde, :red] + # + # If you want to opt out the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # module HairColors + # mattr_writer :hair_colors, instance_writer: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:blonde, :red] # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_writer :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_writer(*syms, instance_writer: true, instance_accessor: true, default: nil) + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + @@#{sym} = nil unless defined? @@#{sym} + + def self.#{sym}=(obj) + @@#{sym} = obj + end + EOS + + if instance_writer && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym}=(obj) + @@#{sym} = obj + end + EOS + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + send("#{sym}=", sym_default_value) unless sym_default_value.nil? + end + end + alias :cattr_writer :mattr_writer + + # Defines both class and instance accessors for class attributes. + # All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_accessor :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black, :blonde, :red] + # HairColors.hair_colors # => [:brown, :black, :blonde, :red] + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + # + # If a subclass changes the value then that would also change the value for + # parent class. Similarly if parent class changes the value then that would + # change the value of subclasses too. + # + # class Male < Person + # end + # + # Male.new.hair_colors << :blue + # Person.new.hair_colors # => [:brown, :black, :blonde, :red, :blue] + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # module HairColors + # mattr_accessor :hair_colors, instance_writer: false, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # module HairColors + # mattr_accessor :hair_colors, instance_accessor: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_accessor :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_accessor(*syms, instance_reader: true, instance_writer: true, instance_accessor: true, default: nil, &blk) + mattr_reader(*syms, instance_reader: instance_reader, instance_accessor: instance_accessor, default: default, &blk) + mattr_writer(*syms, instance_writer: instance_writer, instance_accessor: instance_accessor, default: default) + end + alias :cattr_accessor :mattr_accessor +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb new file mode 100644 index 0000000..4b9b6ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb @@ -0,0 +1,150 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes, but does so on a per-thread basis. +# +# So the values are scoped within the Thread.current space under the class name +# of the module. +class Module + # Defines a per-thread class attribute and creates class and instance reader methods. + # The underlying per-thread class variable is set to +nil+, if it is not previously defined. + # + # module Current + # thread_mattr_reader :user + # end + # + # Current.user # => nil + # Thread.current[:attr_Current_user] = "DHH" + # Current.user # => "DHH" + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # thread_mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # If you want to opt out of the creation of the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # class Current + # thread_mattr_reader :user, instance_reader: false + # end + # + # Current.new.user # => NoMethodError + def thread_mattr_reader(*syms) # :nodoc: + options = syms.extract_options! + + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym} + Thread.current["attr_" + name + "_#{sym}"] + end + EOS + + unless options[:instance_reader] == false || options[:instance_accessor] == false + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym} + self.class.#{sym} + end + EOS + end + end + end + alias :thread_cattr_reader :thread_mattr_reader + + # Defines a per-thread class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. + # + # module Current + # thread_mattr_writer :user + # end + # + # Current.user = "DHH" + # Thread.current[:attr_Current_user] # => "DHH" + # + # If you want to opt out of the creation of the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # class Current + # thread_mattr_writer :user, instance_writer: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + def thread_mattr_writer(*syms) # :nodoc: + options = syms.extract_options! + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym}=(obj) + Thread.current["attr_" + name + "_#{sym}"] = obj + end + EOS + + unless options[:instance_writer] == false || options[:instance_accessor] == false + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym}=(obj) + self.class.#{sym} = obj + end + EOS + end + end + end + alias :thread_cattr_writer :thread_mattr_writer + + # Defines both class and instance accessors for class attributes. + # + # class Account + # thread_mattr_accessor :user + # end + # + # Account.user = "DHH" + # Account.user # => "DHH" + # Account.new.user # => "DHH" + # + # If a subclass changes the value, the parent class' value is not changed. + # Similarly, if the parent class changes the value, the value of subclasses + # is not changed. + # + # class Customer < Account + # end + # + # Customer.user = "Rafael" + # Customer.user # => "Rafael" + # Account.user # => "DHH" + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # class Current + # thread_mattr_accessor :user, instance_writer: false, instance_reader: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # class Current + # mattr_accessor :user, instance_accessor: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + def thread_mattr_accessor(*syms) + thread_mattr_reader(*syms) + thread_mattr_writer(*syms) + end + alias :thread_cattr_accessor :thread_mattr_accessor +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb new file mode 100644 index 0000000..7bbbf32 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb @@ -0,0 +1,134 @@ +# frozen_string_literal: true + +require "active_support/concern" + +class Module + # = Bite-sized separation of concerns + # + # We often find ourselves with a medium-sized chunk of behavior that we'd + # like to extract, but only mix in to a single class. + # + # Extracting a plain old Ruby object to encapsulate it and collaborate or + # delegate to the original object is often a good choice, but when there's + # no additional state to encapsulate or we're making DSL-style declarations + # about the parent class, introducing new collaborators can obfuscate rather + # than simplify. + # + # The typical route is to just dump everything in a monolithic class, perhaps + # with a comment, as a least-bad alternative. Using modules in separate files + # means tedious sifting to get a big-picture view. + # + # = Dissatisfying ways to separate small concerns + # + # == Using comments: + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # ## Event tracking + # has_many :events + # + # before_create :track_creation + # + # private + # def track_creation + # # ... + # end + # end + # + # == With an inline module: + # + # Noisy syntax. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # module EventTracking + # extend ActiveSupport::Concern + # + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # include EventTracking + # end + # + # == Mix-in noise exiled to its own file: + # + # Once our chunk of behavior starts pushing the scroll-to-understand-it + # boundary, we give in and move it to a separate file. At this size, the + # increased overhead can be a reasonable tradeoff even if it reduces our + # at-a-glance perception of how things work. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # include TodoEventTracking + # end + # + # = Introducing Module#concerning + # + # By quieting the mix-in noise, we arrive at a natural, low-ceremony way to + # separate bite-sized concerns. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # concerning :EventTracking do + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # end + # + # Todo.ancestors + # # => [Todo, Todo::EventTracking, ApplicationRecord, Object] + # + # This small step has some wonderful ripple effects. We can + # * grok the behavior of our class in one glance, + # * clean up monolithic junk-drawer classes by separating their concerns, and + # * stop leaning on protected/private for crude "this is internal stuff" modularity. + module Concerning + # Define a new concern and mix it in. + def concerning(topic, &block) + include concern(topic, &block) + end + + # A low-cruft shortcut to define a concern. + # + # concern :EventTracking do + # ... + # end + # + # is equivalent to + # + # module EventTracking + # extend ActiveSupport::Concern + # + # ... + # end + def concern(topic, &module_definition) + const_set topic, Module.new { + extend ::ActiveSupport::Concern + module_eval(&module_definition) + } + end + end + include Concerning +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb new file mode 100644 index 0000000..4310df3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb @@ -0,0 +1,287 @@ +# frozen_string_literal: true + +require "set" +require "active_support/core_ext/regexp" + +class Module + # Error generated by +delegate+ when a method is called on +nil+ and +allow_nil+ + # option is not used. + class DelegationError < NoMethodError; end + + RUBY_RESERVED_KEYWORDS = %w(alias and BEGIN begin break case class def defined? do + else elsif END end ensure false for if in module next nil not or redo rescue retry + return self super then true undef unless until when while yield) + DELEGATION_RESERVED_KEYWORDS = %w(_ arg args block) + DELEGATION_RESERVED_METHOD_NAMES = Set.new( + RUBY_RESERVED_KEYWORDS + DELEGATION_RESERVED_KEYWORDS + ).freeze + + # Provides a +delegate+ class method to easily expose contained objects' + # public methods as your own. + # + # ==== Options + # * :to - Specifies the target object + # * :prefix - Prefixes the new method with the target name or a custom prefix + # * :allow_nil - if set to true, prevents a +Module::DelegationError+ + # from being raised + # + # The macro receives one or more method names (specified as symbols or + # strings) and the name of the target object via the :to option + # (also a symbol or string). + # + # Delegation is particularly useful with Active Record associations: + # + # class Greeter < ActiveRecord::Base + # def hello + # 'hello' + # end + # + # def goodbye + # 'goodbye' + # end + # end + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, to: :greeter + # end + # + # Foo.new.hello # => "hello" + # Foo.new.goodbye # => NoMethodError: undefined method `goodbye' for # + # + # Multiple delegates to the same target are allowed: + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, :goodbye, to: :greeter + # end + # + # Foo.new.goodbye # => "goodbye" + # + # Methods can be delegated to instance variables, class variables, or constants + # by providing them as a symbols: + # + # class Foo + # CONSTANT_ARRAY = [0,1,2,3] + # @@class_array = [4,5,6,7] + # + # def initialize + # @instance_array = [8,9,10,11] + # end + # delegate :sum, to: :CONSTANT_ARRAY + # delegate :min, to: :@@class_array + # delegate :max, to: :@instance_array + # end + # + # Foo.new.sum # => 6 + # Foo.new.min # => 4 + # Foo.new.max # => 11 + # + # It's also possible to delegate a method to the class by using +:class+: + # + # class Foo + # def self.hello + # "world" + # end + # + # delegate :hello, to: :class + # end + # + # Foo.new.hello # => "world" + # + # Delegates can optionally be prefixed using the :prefix option. If the value + # is true, the delegate methods are prefixed with the name of the object being + # delegated to. + # + # Person = Struct.new(:name, :address) + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: true + # end + # + # john_doe = Person.new('John Doe', 'Vimmersvej 13') + # invoice = Invoice.new(john_doe) + # invoice.client_name # => "John Doe" + # invoice.client_address # => "Vimmersvej 13" + # + # It is also possible to supply a custom prefix. + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: :customer + # end + # + # invoice = Invoice.new(john_doe) + # invoice.customer_name # => 'John Doe' + # invoice.customer_address # => 'Vimmersvej 13' + # + # If the target is +nil+ and does not respond to the delegated method a + # +Module::DelegationError+ is raised. If you wish to instead return +nil+, + # use the :allow_nil option. + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile + # end + # + # User.new.age + # # => Module::DelegationError: User#age delegated to profile.age, but profile is nil + # + # But if not having a profile yet is fine and should not be an error + # condition: + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile, allow_nil: true + # end + # + # User.new.age # nil + # + # Note that if the target is not +nil+ then the call is attempted regardless of the + # :allow_nil option, and thus an exception is still raised if said object + # does not respond to the method: + # + # class Foo + # def initialize(bar) + # @bar = bar + # end + # + # delegate :name, to: :@bar, allow_nil: true + # end + # + # Foo.new("Bar").name # raises NoMethodError: undefined method `name' + # + # The target method must be public, otherwise it will raise +NoMethodError+. + def delegate(*methods, to: nil, prefix: nil, allow_nil: nil) + unless to + raise ArgumentError, "Delegation needs a target. Supply an options hash with a :to key as the last argument (e.g. delegate :hello, to: :greeter)." + end + + if prefix == true && /^[^a-z_]/.match?(to) + raise ArgumentError, "Can only automatically set the delegation prefix when delegating to a method." + end + + method_prefix = \ + if prefix + "#{prefix == true ? to : prefix}_" + else + "" + end + + location = caller_locations(1, 1).first + file, line = location.path, location.lineno + + to = to.to_s + to = "self.#{to}" if DELEGATION_RESERVED_METHOD_NAMES.include?(to) + + methods.map do |method| + # Attribute writer methods only accept one argument. Makes sure []= + # methods still accept two arguments. + definition = /[^\]]=$/.match?(method) ? "arg" : "*args, &block" + + # The following generated method calls the target exactly once, storing + # the returned value in a dummy variable. + # + # Reason is twofold: On one hand doing less calls is in general better. + # On the other hand it could be that the target has side-effects, + # whereas conceptually, from the user point of view, the delegator should + # be doing one call. + if allow_nil + method_def = [ + "def #{method_prefix}#{method}(#{definition})", + "_ = #{to}", + "if !_.nil? || nil.respond_to?(:#{method})", + " _.#{method}(#{definition})", + "end", + "end" + ].join ";" + else + exception = %(raise DelegationError, "#{self}##{method_prefix}#{method} delegated to #{to}.#{method}, but #{to} is nil: \#{self.inspect}") + + method_def = [ + "def #{method_prefix}#{method}(#{definition})", + " _ = #{to}", + " _.#{method}(#{definition})", + "rescue NoMethodError => e", + " if _.nil? && e.name == :#{method}", + " #{exception}", + " else", + " raise", + " end", + "end" + ].join ";" + end + + module_eval(method_def, file, line) + end + end + + # When building decorators, a common pattern may emerge: + # + # class Partition + # def initialize(event) + # @event = event + # end + # + # def person + # @event.detail.person || @event.creator + # end + # + # private + # def respond_to_missing?(name, include_private = false) + # @event.respond_to?(name, include_private) + # end + # + # def method_missing(method, *args, &block) + # @event.send(method, *args, &block) + # end + # end + # + # With Module#delegate_missing_to, the above is condensed to: + # + # class Partition + # delegate_missing_to :@event + # + # def initialize(event) + # @event = event + # end + # + # def person + # @event.detail.person || @event.creator + # end + # end + # + # The target can be anything callable within the object, e.g. instance + # variables, methods, constants, etc. + # + # The delegated method must be public on the target, otherwise it will + # raise +NoMethodError+. + def delegate_missing_to(target) + target = target.to_s + target = "self.#{target}" if DELEGATION_RESERVED_METHOD_NAMES.include?(target) + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def respond_to_missing?(name, include_private = false) + # It may look like an oversight, but we deliberately do not pass + # +include_private+, because they do not get delegated. + + #{target}.respond_to?(name) || super + end + + def method_missing(method, *args, &block) + if #{target}.respond_to?(method) + #{target}.public_send(method, *args, &block) + else + begin + super + rescue NoMethodError + if #{target}.nil? + raise DelegationError, "\#{method} delegated to #{target}, but #{target} is nil" + else + raise + end + end + end + end + RUBY + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb new file mode 100644 index 0000000..71c42eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Module + # deprecate :foo + # deprecate bar: 'message' + # deprecate :foo, :bar, baz: 'warning!', qux: 'gone!' + # + # You can also use custom deprecator instance: + # + # deprecate :foo, deprecator: MyLib::Deprecator.new + # deprecate :foo, bar: "warning!", deprecator: MyLib::Deprecator.new + # + # \Custom deprecators must respond to deprecation_warning(deprecated_method_name, message, caller_backtrace) + # method where you can implement your custom warning behavior. + # + # class MyLib::Deprecator + # def deprecation_warning(deprecated_method_name, message, caller_backtrace = nil) + # message = "#{deprecated_method_name} is deprecated and will be removed from MyLibrary | #{message}" + # Kernel.warn message + # end + # end + def deprecate(*method_names) + ActiveSupport::Deprecation.deprecate_methods(self, *method_names) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb new file mode 100644 index 0000000..c5bb598 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require "active_support/inflector" + +class Module + # Returns the name of the module containing this one. + # + # M::N.parent_name # => "M" + def parent_name + if defined?(@parent_name) + @parent_name + else + parent_name = name =~ /::[^:]+\Z/ ? $`.freeze : nil + @parent_name = parent_name unless frozen? + parent_name + end + end + + # Returns the module which contains this one according to its name. + # + # module M + # module N + # end + # end + # X = M::N + # + # M::N.parent # => M + # X.parent # => M + # + # The parent of top-level and anonymous modules is Object. + # + # M.parent # => Object + # Module.new.parent # => Object + def parent + parent_name ? ActiveSupport::Inflector.constantize(parent_name) : Object + end + + # Returns all the parents of this module according to its name, ordered from + # nested outwards. The receiver is not contained within the result. + # + # module M + # module N + # end + # end + # X = M::N + # + # M.parents # => [Object] + # M::N.parents # => [M, Object] + # X.parents # => [M, Object] + def parents + parents = [] + if parent_name + parts = parent_name.split("::") + until parts.empty? + parents << ActiveSupport::Inflector.constantize(parts * "::") + parts.pop + end + end + parents << Object unless parents.include? Object + parents + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb new file mode 100644 index 0000000..e9cbda5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/string/inflections" + +class Module + def reachable? #:nodoc: + !anonymous? && name.safe_constantize.equal?(self) + end + deprecate :reachable? +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb new file mode 100644 index 0000000..a0a6622 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +class Module + if RUBY_VERSION >= "2.3" + # Marks the named method as intended to be redefined, if it exists. + # Suppresses the Ruby method redefinition warning. Prefer + # #redefine_method where possible. + def silence_redefinition_of_method(method) + if method_defined?(method) || private_method_defined?(method) + # This suppresses the "method redefined" warning; the self-alias + # looks odd, but means we don't need to generate a unique name + alias_method method, method + end + end + else + def silence_redefinition_of_method(method) + if method_defined?(method) || private_method_defined?(method) + alias_method :__rails_redefine, method + remove_method :__rails_redefine + end + end + end + + # Replaces the existing method definition, if there is one, with the passed + # block as its body. + def redefine_method(method, &block) + visibility = method_visibility(method) + silence_redefinition_of_method(method) + define_method(method, &block) + send(visibility, method) + end + + # Replaces the existing singleton method definition, if there is one, with + # the passed block as its body. + def redefine_singleton_method(method, &block) + singleton_class.redefine_method(method, &block) + end + + def method_visibility(method) # :nodoc: + case + when private_method_defined?(method) + :private + when protected_method_defined?(method) + :protected + else + :public + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb new file mode 100644 index 0000000..97eb5f9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" + +class Module + # Removes the named method, if it exists. + def remove_possible_method(method) + if method_defined?(method) || private_method_defined?(method) + undef_method(method) + end + end + + # Removes the named singleton method, if it exists. + def remove_possible_singleton_method(method) + singleton_class.remove_possible_method(method) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb new file mode 100644 index 0000000..6d37cd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +class NameError + # Extract the name of the missing constant from the exception message. + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name + # end + # # => "HelloWorld" + def missing_name + # Since ruby v2.3.0 `did_you_mean` gem is loaded by default. + # It extends NameError#message with spell corrections which are SLOW. + # We should use original_message message instead. + message = respond_to?(:original_message) ? original_message : self.message + + if /undefined local variable or method/ !~ message + $1 if /((::)?([A-Z]\w*)(::[A-Z]\w*)*)$/ =~ message + end + end + + # Was this exception raised because the given name was missing? + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name?("HelloWorld") + # end + # # => true + def missing_name?(name) + if name.is_a? Symbol + self.name == name + else + missing_name == name.to_s + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb new file mode 100644 index 0000000..0b04e35 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/numeric/inquiry" +require "active_support/core_ext/numeric/conversions" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb new file mode 100644 index 0000000..b002eba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +class Numeric + KILOBYTE = 1024 + MEGABYTE = KILOBYTE * 1024 + GIGABYTE = MEGABYTE * 1024 + TERABYTE = GIGABYTE * 1024 + PETABYTE = TERABYTE * 1024 + EXABYTE = PETABYTE * 1024 + + # Enables the use of byte calculations and declarations, like 45.bytes + 2.6.megabytes + # + # 2.bytes # => 2 + def bytes + self + end + alias :byte :bytes + + # Returns the number of bytes equivalent to the kilobytes provided. + # + # 2.kilobytes # => 2048 + def kilobytes + self * KILOBYTE + end + alias :kilobyte :kilobytes + + # Returns the number of bytes equivalent to the megabytes provided. + # + # 2.megabytes # => 2_097_152 + def megabytes + self * MEGABYTE + end + alias :megabyte :megabytes + + # Returns the number of bytes equivalent to the gigabytes provided. + # + # 2.gigabytes # => 2_147_483_648 + def gigabytes + self * GIGABYTE + end + alias :gigabyte :gigabytes + + # Returns the number of bytes equivalent to the terabytes provided. + # + # 2.terabytes # => 2_199_023_255_552 + def terabytes + self * TERABYTE + end + alias :terabyte :terabytes + + # Returns the number of bytes equivalent to the petabytes provided. + # + # 2.petabytes # => 2_251_799_813_685_248 + def petabytes + self * PETABYTE + end + alias :petabyte :petabytes + + # Returns the number of bytes equivalent to the exabytes provided. + # + # 2.exabytes # => 2_305_843_009_213_693_952 + def exabytes + self * EXABYTE + end + alias :exabyte :exabytes +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb new file mode 100644 index 0000000..f6c2713 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/number_helper" +require "active_support/core_ext/module/deprecation" + +module ActiveSupport::NumericWithFormat + # Provides options for converting numbers into formatted strings. + # Options are provided for phone numbers, currency, percentage, + # precision, positional notation, file size and pretty printing. + # + # ==== Options + # + # For details on which formats use which options, see ActiveSupport::NumberHelper + # + # ==== Examples + # + # Phone Numbers: + # 5551234.to_s(:phone) # => "555-1234" + # 1235551234.to_s(:phone) # => "123-555-1234" + # 1235551234.to_s(:phone, area_code: true) # => "(123) 555-1234" + # 1235551234.to_s(:phone, delimiter: ' ') # => "123 555 1234" + # 1235551234.to_s(:phone, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # 1235551234.to_s(:phone, country_code: 1) # => "+1-123-555-1234" + # 1235551234.to_s(:phone, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # Currency: + # 1234567890.50.to_s(:currency) # => "$1,234,567,890.50" + # 1234567890.506.to_s(:currency) # => "$1,234,567,890.51" + # 1234567890.506.to_s(:currency, precision: 3) # => "$1,234,567,890.506" + # 1234567890.506.to_s(:currency, locale: :fr) # => "1 234 567 890,51 €" + # -1234567890.50.to_s(:currency, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + # + # Percentage: + # 100.to_s(:percentage) # => "100.000%" + # 100.to_s(:percentage, precision: 0) # => "100%" + # 1000.to_s(:percentage, delimiter: '.', separator: ',') # => "1.000,000%" + # 302.24398923423.to_s(:percentage, precision: 5) # => "302.24399%" + # 1000.to_s(:percentage, locale: :fr) # => "1 000,000%" + # 100.to_s(:percentage, format: '%n %') # => "100.000 %" + # + # Delimited: + # 12345678.to_s(:delimited) # => "12,345,678" + # 12345678.05.to_s(:delimited) # => "12,345,678.05" + # 12345678.to_s(:delimited, delimiter: '.') # => "12.345.678" + # 12345678.to_s(:delimited, delimiter: ',') # => "12,345,678" + # 12345678.05.to_s(:delimited, separator: ' ') # => "12,345,678 05" + # 12345678.05.to_s(:delimited, locale: :fr) # => "12 345 678,05" + # 98765432.98.to_s(:delimited, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # + # Rounded: + # 111.2345.to_s(:rounded) # => "111.235" + # 111.2345.to_s(:rounded, precision: 2) # => "111.23" + # 13.to_s(:rounded, precision: 5) # => "13.00000" + # 389.32314.to_s(:rounded, precision: 0) # => "389" + # 111.2345.to_s(:rounded, significant: true) # => "111" + # 111.2345.to_s(:rounded, precision: 1, significant: true) # => "100" + # 13.to_s(:rounded, precision: 5, significant: true) # => "13.000" + # 111.234.to_s(:rounded, locale: :fr) # => "111,234" + # 13.to_s(:rounded, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # 389.32314.to_s(:rounded, precision: 4, significant: true) # => "389.3" + # 1111.2345.to_s(:rounded, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + # + # Human-friendly size in Bytes: + # 123.to_s(:human_size) # => "123 Bytes" + # 1234.to_s(:human_size) # => "1.21 KB" + # 12345.to_s(:human_size) # => "12.1 KB" + # 1234567.to_s(:human_size) # => "1.18 MB" + # 1234567890.to_s(:human_size) # => "1.15 GB" + # 1234567890123.to_s(:human_size) # => "1.12 TB" + # 1234567890123456.to_s(:human_size) # => "1.1 PB" + # 1234567890123456789.to_s(:human_size) # => "1.07 EB" + # 1234567.to_s(:human_size, precision: 2) # => "1.2 MB" + # 483989.to_s(:human_size, precision: 2) # => "470 KB" + # 1234567.to_s(:human_size, precision: 2, separator: ',') # => "1,2 MB" + # 1234567890123.to_s(:human_size, precision: 5) # => "1.1228 TB" + # 524288000.to_s(:human_size, precision: 5) # => "500 MB" + # + # Human-friendly format: + # 123.to_s(:human) # => "123" + # 1234.to_s(:human) # => "1.23 Thousand" + # 12345.to_s(:human) # => "12.3 Thousand" + # 1234567.to_s(:human) # => "1.23 Million" + # 1234567890.to_s(:human) # => "1.23 Billion" + # 1234567890123.to_s(:human) # => "1.23 Trillion" + # 1234567890123456.to_s(:human) # => "1.23 Quadrillion" + # 1234567890123456789.to_s(:human) # => "1230 Quadrillion" + # 489939.to_s(:human, precision: 2) # => "490 Thousand" + # 489939.to_s(:human, precision: 4) # => "489.9 Thousand" + # 1234567.to_s(:human, precision: 4, + # significant: false) # => "1.2346 Million" + # 1234567.to_s(:human, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + def to_s(format = nil, options = nil) + case format + when nil + super() + when Integer, String + super(format) + when :phone + ActiveSupport::NumberHelper.number_to_phone(self, options || {}) + when :currency + ActiveSupport::NumberHelper.number_to_currency(self, options || {}) + when :percentage + ActiveSupport::NumberHelper.number_to_percentage(self, options || {}) + when :delimited + ActiveSupport::NumberHelper.number_to_delimited(self, options || {}) + when :rounded + ActiveSupport::NumberHelper.number_to_rounded(self, options || {}) + when :human + ActiveSupport::NumberHelper.number_to_human(self, options || {}) + when :human_size + ActiveSupport::NumberHelper.number_to_human_size(self, options || {}) + when Symbol + super() + else + super(format) + end + end +end + +# Ruby 2.4+ unifies Fixnum & Bignum into Integer. +if 0.class == Integer + Integer.prepend ActiveSupport::NumericWithFormat +else + Fixnum.prepend ActiveSupport::NumericWithFormat + Bignum.prepend ActiveSupport::NumericWithFormat +end +Float.prepend ActiveSupport::NumericWithFormat +BigDecimal.prepend ActiveSupport::NumericWithFormat diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb new file mode 100644 index 0000000..15334c9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +unless 1.respond_to?(:positive?) # TODO: Remove this file when we drop support to ruby < 2.3 + class Numeric + # Returns true if the number is positive. + # + # 1.positive? # => true + # 0.positive? # => false + # -1.positive? # => false + def positive? + self > 0 + end + + # Returns true if the number is negative. + # + # -1.negative? # => true + # 0.negative? # => false + # 1.negative? # => false + def negative? + self < 0 + end + end + + class Complex + undef :positive? + undef :negative? + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb new file mode 100644 index 0000000..bc4627f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/acts_like" + +class Numeric + # Returns a Duration instance matching the number of seconds provided. + # + # 2.seconds # => 2 seconds + def seconds + ActiveSupport::Duration.seconds(self) + end + alias :second :seconds + + # Returns a Duration instance matching the number of minutes provided. + # + # 2.minutes # => 2 minutes + def minutes + ActiveSupport::Duration.minutes(self) + end + alias :minute :minutes + + # Returns a Duration instance matching the number of hours provided. + # + # 2.hours # => 2 hours + def hours + ActiveSupport::Duration.hours(self) + end + alias :hour :hours + + # Returns a Duration instance matching the number of days provided. + # + # 2.days # => 2 days + def days + ActiveSupport::Duration.days(self) + end + alias :day :days + + # Returns a Duration instance matching the number of weeks provided. + # + # 2.weeks # => 2 weeks + def weeks + ActiveSupport::Duration.weeks(self) + end + alias :week :weeks + + # Returns a Duration instance matching the number of fortnights provided. + # + # 2.fortnights # => 4 weeks + def fortnights + ActiveSupport::Duration.weeks(self * 2) + end + alias :fortnight :fortnights + + # Returns the number of milliseconds equivalent to the seconds provided. + # Used with the standard time durations. + # + # 2.in_milliseconds # => 2000 + # 1.hour.in_milliseconds # => 3600000 + def in_milliseconds + self * 1000 + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb new file mode 100644 index 0000000..efd34cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/duplicable" +require "active_support/core_ext/object/deep_dup" +require "active_support/core_ext/object/try" +require "active_support/core_ext/object/inclusion" + +require "active_support/core_ext/object/conversions" +require "active_support/core_ext/object/instance_variables" + +require "active_support/core_ext/object/json" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/object/with_options" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb new file mode 100644 index 0000000..403ee20 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class Object + # A duck-type assistant method. For example, Active Support extends Date + # to define an acts_like_date? method, and extends Time to define + # acts_like_time?. As a result, we can do x.acts_like?(:time) and + # x.acts_like?(:date) to do duck-type-safe comparisons, since classes that + # we want to act like Time simply need to define an acts_like_time? method. + def acts_like?(duck) + case duck + when :time + respond_to? :acts_like_time? + when :date + respond_to? :acts_like_date? + when :string + respond_to? :acts_like_string? + else + respond_to? :"acts_like_#{duck}?" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb new file mode 100644 index 0000000..2ca431a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb @@ -0,0 +1,156 @@ +# frozen_string_literal: true + +require "active_support/core_ext/regexp" +require "concurrent/map" + +class Object + # An object is blank if it's false, empty, or a whitespace string. + # For example, +false+, '', ' ', +nil+, [], and {} are all blank. + # + # This simplifies + # + # !address || address.empty? + # + # to + # + # address.blank? + # + # @return [true, false] + def blank? + respond_to?(:empty?) ? !!empty? : !self + end + + # An object is present if it's not blank. + # + # @return [true, false] + def present? + !blank? + end + + # Returns the receiver if it's present otherwise returns +nil+. + # object.presence is equivalent to + # + # object.present? ? object : nil + # + # For example, something like + # + # state = params[:state] if params[:state].present? + # country = params[:country] if params[:country].present? + # region = state || country || 'US' + # + # becomes + # + # region = params[:state].presence || params[:country].presence || 'US' + # + # @return [Object] + def presence + self if present? + end +end + +class NilClass + # +nil+ is blank: + # + # nil.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class FalseClass + # +false+ is blank: + # + # false.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class TrueClass + # +true+ is not blank: + # + # true.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Array + # An array is blank if it's empty: + # + # [].blank? # => true + # [1,2,3].blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class Hash + # A hash is blank if it's empty: + # + # {}.blank? # => true + # { key: 'value' }.blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class String + BLANK_RE = /\A[[:space:]]*\z/ + ENCODED_BLANKS = Concurrent::Map.new do |h, enc| + h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) + end + + # A string is blank if it's empty or contains whitespaces only: + # + # ''.blank? # => true + # ' '.blank? # => true + # "\t\n\r".blank? # => true + # ' blah '.blank? # => false + # + # Unicode whitespace is supported: + # + # "\u00a0".blank? # => true + # + # @return [true, false] + def blank? + # The regexp that matches blank strings is expensive. For the case of empty + # strings we can speed up this method (~3.5x) with an empty? call. The + # penalty for the rest of strings is marginal. + empty? || + begin + BLANK_RE.match?(self) + rescue Encoding::CompatibilityError + ENCODED_BLANKS[self.encoding].match?(self) + end + end +end + +class Numeric #:nodoc: + # No number is blank: + # + # 1.blank? # => false + # 0.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Time #:nodoc: + # No Time is blank: + # + # Time.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb new file mode 100644 index 0000000..624fb8d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/hash/conversions" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb new file mode 100644 index 0000000..c66c5eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" + +class Object + # Returns a deep copy of object if it's duplicable. If it's + # not duplicable, returns +self+. + # + # object = Object.new + # dup = object.deep_dup + # dup.instance_variable_set(:@a, 1) + # + # object.instance_variable_defined?(:@a) # => false + # dup.instance_variable_defined?(:@a) # => true + def deep_dup + duplicable? ? dup : self + end +end + +class Array + # Returns a deep copy of array. + # + # array = [1, [2, 3]] + # dup = array.deep_dup + # dup[1][2] = 4 + # + # array[1][2] # => nil + # dup[1][2] # => 4 + def deep_dup + map(&:deep_dup) + end +end + +class Hash + # Returns a deep copy of hash. + # + # hash = { a: { b: 'b' } } + # dup = hash.deep_dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => nil + # dup[:a][:c] # => "c" + def deep_dup + hash = dup + each_pair do |key, value| + if key.frozen? && ::String === key + hash[key] = value.deep_dup + else + hash.delete(key) + hash[key.deep_dup] = value.deep_dup + end + end + hash + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb new file mode 100644 index 0000000..9bb9908 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb @@ -0,0 +1,156 @@ +# frozen_string_literal: true + +#-- +# Most objects are cloneable, but not all. For example you can't dup methods: +# +# method(:puts).dup # => TypeError: allocator undefined for Method +# +# Classes may signal their instances are not duplicable removing +dup+/+clone+ +# or raising exceptions from them. So, to dup an arbitrary object you normally +# use an optimistic approach and are ready to catch an exception, say: +# +# arbitrary_object.dup rescue object +# +# Rails dups objects in a few critical spots where they are not that arbitrary. +# That rescue is very expensive (like 40 times slower than a predicate), and it +# is often triggered. +# +# That's why we hardcode the following cases and check duplicable? instead of +# using that rescue idiom. +#++ +class Object + # Can you safely dup this object? + # + # False for method objects; + # true otherwise. + def duplicable? + true + end +end + +class NilClass + begin + nil.dup + rescue TypeError + + # +nil+ is not duplicable: + # + # nil.duplicable? # => false + # nil.dup # => TypeError: can't dup NilClass + def duplicable? + false + end + end +end + +class FalseClass + begin + false.dup + rescue TypeError + + # +false+ is not duplicable: + # + # false.duplicable? # => false + # false.dup # => TypeError: can't dup FalseClass + def duplicable? + false + end + end +end + +class TrueClass + begin + true.dup + rescue TypeError + + # +true+ is not duplicable: + # + # true.duplicable? # => false + # true.dup # => TypeError: can't dup TrueClass + def duplicable? + false + end + end +end + +class Symbol + begin + :symbol.dup # Ruby 2.4.x. + "symbol_from_string".to_sym.dup # Some symbols can't `dup` in Ruby 2.4.0. + rescue TypeError + + # Symbols are not duplicable: + # + # :my_symbol.duplicable? # => false + # :my_symbol.dup # => TypeError: can't dup Symbol + def duplicable? + false + end + end +end + +class Numeric + begin + 1.dup + rescue TypeError + + # Numbers are not duplicable: + # + # 3.duplicable? # => false + # 3.dup # => TypeError: can't dup Integer + def duplicable? + false + end + end +end + +require "bigdecimal" +class BigDecimal + # BigDecimals are duplicable: + # + # BigDecimal("1.2").duplicable? # => true + # BigDecimal("1.2").dup # => # + def duplicable? + true + end +end + +class Method + # Methods are not duplicable: + # + # method(:puts).duplicable? # => false + # method(:puts).dup # => TypeError: allocator undefined for Method + def duplicable? + false + end +end + +class Complex + begin + Complex(1).dup + rescue TypeError + + # Complexes are not duplicable: + # + # Complex(1).duplicable? # => false + # Complex(1).dup # => TypeError: can't copy Complex + def duplicable? + false + end + end +end + +class Rational + begin + Rational(1).dup + rescue TypeError + + # Rationals are not duplicable: + # + # Rational(1).duplicable? # => false + # Rational(1).dup # => TypeError: can't copy Rational + def duplicable? + false + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb new file mode 100644 index 0000000..6064e92 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +class Object + # Returns true if this object is included in the argument. Argument must be + # any object which responds to +#include?+. Usage: + # + # characters = ["Konata", "Kagami", "Tsukasa"] + # "Konata".in?(characters) # => true + # + # This will throw an +ArgumentError+ if the argument doesn't respond + # to +#include?+. + def in?(another_object) + another_object.include?(self) + rescue NoMethodError + raise ArgumentError.new("The parameter passed to #in? must respond to #include?") + end + + # Returns the receiver if it's included in the argument otherwise returns +nil+. + # Argument must be any object which responds to +#include?+. Usage: + # + # params[:bucket_type].presence_in %w( project calendar ) + # + # This will throw an +ArgumentError+ if the argument doesn't respond to +#include?+. + # + # @return [Object] + def presence_in(another_object) + in?(another_object) ? self : nil + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb new file mode 100644 index 0000000..12fdf84 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Object + # Returns a hash with string keys that maps instance variable names without "@" to their + # corresponding values. + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_values # => {"x" => 0, "y" => 1} + def instance_values + Hash[instance_variables.map { |name| [name[1..-1], instance_variable_get(name)] }] + end + + # Returns an array of instance variable names as strings including "@". + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_variable_names # => ["@y", "@x"] + def instance_variable_names + instance_variables.map(&:to_s) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb new file mode 100644 index 0000000..f7c623f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb @@ -0,0 +1,227 @@ +# frozen_string_literal: true + +# Hack to load json gem first so we can overwrite its to_json. +require "json" +require "bigdecimal" +require "uri/generic" +require "pathname" +require "active_support/core_ext/big_decimal/conversions" # for #to_s +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +require "active_support/core_ext/object/instance_variables" +require "time" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/conversions" +require "active_support/core_ext/date/conversions" + +# The JSON gem adds a few modules to Ruby core classes containing :to_json definition, overwriting +# their default behavior. That said, we need to define the basic to_json method in all of them, +# otherwise they will always use to_json gem implementation, which is backwards incompatible in +# several cases (for instance, the JSON implementation for Hash does not work) with inheritance +# and consequently classes as ActiveSupport::OrderedHash cannot be serialized to json. +# +# On the other hand, we should avoid conflict with ::JSON.{generate,dump}(obj). Unfortunately, the +# JSON gem's encoder relies on its own to_json implementation to encode objects. Since it always +# passes a ::JSON::State object as the only argument to to_json, we can detect that and forward the +# calls to the original to_json method. +# +# It should be noted that when using ::JSON.{generate,dump} directly, ActiveSupport's encoder is +# bypassed completely. This means that as_json won't be invoked and the JSON gem will simply +# ignore any options it does not natively understand. This also means that ::JSON.{generate,dump} +# should give exactly the same results with or without active support. + +module ActiveSupport + module ToJsonWithActiveSupportEncoder # :nodoc: + def to_json(options = nil) + if options.is_a?(::JSON::State) + # Called from JSON.{generate,dump}, forward it to JSON gem's to_json + super(options) + else + # to_json is being invoked directly, use ActiveSupport's encoder + ActiveSupport::JSON.encode(self, options) + end + end + end +end + +[Object, Array, FalseClass, Float, Hash, Integer, NilClass, String, TrueClass, Enumerable].reverse_each do |klass| + klass.prepend(ActiveSupport::ToJsonWithActiveSupportEncoder) +end + +class Object + def as_json(options = nil) #:nodoc: + if respond_to?(:to_hash) + to_hash.as_json(options) + else + instance_values.as_json(options) + end + end +end + +class Struct #:nodoc: + def as_json(options = nil) + Hash[members.zip(values)].as_json(options) + end +end + +class TrueClass + def as_json(options = nil) #:nodoc: + self + end +end + +class FalseClass + def as_json(options = nil) #:nodoc: + self + end +end + +class NilClass + def as_json(options = nil) #:nodoc: + self + end +end + +class String + def as_json(options = nil) #:nodoc: + self + end +end + +class Symbol + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Numeric + def as_json(options = nil) #:nodoc: + self + end +end + +class Float + # Encoding Infinity or NaN to JSON should return "null". The default returns + # "Infinity" or "NaN" which are not valid JSON. + def as_json(options = nil) #:nodoc: + finite? ? self : nil + end +end + +class BigDecimal + # A BigDecimal would be naturally represented as a JSON number. Most libraries, + # however, parse non-integer JSON numbers directly as floats. Clients using + # those libraries would get in general a wrong number and no way to recover + # other than manually inspecting the string with the JSON code itself. + # + # That's why a JSON string is returned. The JSON literal is not numeric, but + # if the other end knows by contract that the data is supposed to be a + # BigDecimal, it still has the chance to post-process the string and get the + # real value. + def as_json(options = nil) #:nodoc: + finite? ? to_s : nil + end +end + +class Regexp + def as_json(options = nil) #:nodoc: + to_s + end +end + +module Enumerable + def as_json(options = nil) #:nodoc: + to_a.as_json(options) + end +end + +class IO + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Range + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Array + def as_json(options = nil) #:nodoc: + map { |v| options ? v.as_json(options.dup) : v.as_json } + end +end + +class Hash + def as_json(options = nil) #:nodoc: + # create a subset of the hash by applying :only or :except + subset = if options + if attrs = options[:only] + slice(*Array(attrs)) + elsif attrs = options[:except] + except(*Array(attrs)) + else + self + end + else + self + end + + Hash[subset.map { |k, v| [k.to_s, options ? v.as_json(options.dup) : v.as_json] }] + end +end + +class Time + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end +end + +class Date + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + strftime("%Y-%m-%d") + else + strftime("%Y/%m/%d") + end + end +end + +class DateTime + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + strftime("%Y/%m/%d %H:%M:%S %z") + end + end +end + +class URI::Generic #:nodoc: + def as_json(options = nil) + to_s + end +end + +class Pathname #:nodoc: + def as_json(options = nil) + to_s + end +end + +class Process::Status #:nodoc: + def as_json(options = nil) + { exitstatus: exitstatus, pid: pid } + end +end + +class Exception + def as_json(options = nil) + to_s + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb new file mode 100644 index 0000000..6d2bdd7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_query" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb new file mode 100644 index 0000000..bac6ff9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require "cgi" + +class Object + # Alias of to_s. + def to_param + to_s + end + + # Converts an object into a string suitable for use as a URL query string, + # using the given key as the param name. + def to_query(key) + "#{CGI.escape(key.to_param)}=#{CGI.escape(to_param.to_s)}" + end +end + +class NilClass + # Returns +self+. + def to_param + self + end +end + +class TrueClass + # Returns +self+. + def to_param + self + end +end + +class FalseClass + # Returns +self+. + def to_param + self + end +end + +class Array + # Calls to_param on all its elements and joins the result with + # slashes. This is used by url_for in Action Pack. + def to_param + collect(&:to_param).join "/" + end + + # Converts an array into a string suitable for use as a URL query string, + # using the given +key+ as the param name. + # + # ['Rails', 'coding'].to_query('hobbies') # => "hobbies%5B%5D=Rails&hobbies%5B%5D=coding" + def to_query(key) + prefix = "#{key}[]" + + if empty? + nil.to_query(prefix) + else + collect { |value| value.to_query(prefix) }.join "&" + end + end +end + +class Hash + # Returns a string representation of the receiver suitable for use as a URL + # query string: + # + # {name: 'David', nationality: 'Danish'}.to_query + # # => "name=David&nationality=Danish" + # + # An optional namespace can be passed to enclose key names: + # + # {name: 'David', nationality: 'Danish'}.to_query('user') + # # => "user%5Bname%5D=David&user%5Bnationality%5D=Danish" + # + # The string pairs "key=value" that conform the query string + # are sorted lexicographically in ascending order. + # + # This method is also aliased as +to_param+. + def to_query(namespace = nil) + query = collect do |key, value| + unless (value.is_a?(Hash) || value.is_a?(Array)) && value.empty? + value.to_query(namespace ? "#{namespace}[#{key}]" : key) + end + end.compact + + query.sort! unless namespace.to_s.include?("[]") + query.join("&") + end + + alias_method :to_param, :to_query +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb new file mode 100644 index 0000000..c874691 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +require "delegate" + +module ActiveSupport + module Tryable #:nodoc: + def try(*a, &b) + try!(*a, &b) if a.empty? || respond_to?(a.first) + end + + def try!(*a, &b) + if a.empty? && block_given? + if b.arity == 0 + instance_eval(&b) + else + yield self + end + else + public_send(*a, &b) + end + end + end +end + +class Object + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(*a, &b) + # + # Invokes the public method whose name goes as first argument just like + # +public_send+ does, except that if the receiver does not respond to it the + # call returns +nil+ rather than raising an exception. + # + # This method is defined to be able to write + # + # @person.try(:name) + # + # instead of + # + # @person.name if @person + # + # +try+ calls can be chained: + # + # @person.try(:spouse).try(:name) + # + # instead of + # + # @person.spouse.name if @person && @person.spouse + # + # +try+ will also return +nil+ if the receiver does not respond to the method: + # + # @person.try(:non_existing_method) # => nil + # + # instead of + # + # @person.non_existing_method if @person.respond_to?(:non_existing_method) # => nil + # + # +try+ returns +nil+ when called on +nil+ regardless of whether it responds + # to the method: + # + # nil.try(:to_i) # => nil, rather than 0 + # + # Arguments and blocks are forwarded to the method if invoked: + # + # @posts.try(:each_slice, 2) do |a, b| + # ... + # end + # + # The number of arguments in the signature must match. If the object responds + # to the method the call is attempted and +ArgumentError+ is still raised + # in case of argument mismatch. + # + # If +try+ is called without arguments it yields the receiver to a given + # block unless it is +nil+: + # + # @person.try do |p| + # ... + # end + # + # You can also call try with a block without accepting an argument, and the block + # will be instance_eval'ed instead: + # + # @person.try { upcase.truncate(50) } + # + # Please also note that +try+ is defined on +Object+. Therefore, it won't work + # with instances of classes that do not have +Object+ among their ancestors, + # like direct subclasses of +BasicObject+. + + ## + # :method: try! + # + # :call-seq: + # try!(*a, &b) + # + # Same as #try, but raises a +NoMethodError+ exception if the receiver is + # not +nil+ and does not implement the tried method. + # + # "a".try!(:upcase) # => "A" + # nil.try!(:upcase) # => nil + # 123.try!(:upcase) # => NoMethodError: undefined method `upcase' for 123:Integer +end + +class Delegator + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(a*, &b) + # + # See Object#try + + ## + # :method: try! + # + # :call-seq: + # try!(a*, &b) + # + # See Object#try! +end + +class NilClass + # Calling +try+ on +nil+ always returns +nil+. + # It becomes especially helpful when navigating through associations that may return +nil+. + # + # nil.try(:name) # => nil + # + # Without +try+ + # @person && @person.children.any? && @person.children.first.name + # + # With +try+ + # @person.try(:children).try(:first).try(:name) + def try(*args) + nil + end + + # Calling +try!+ on +nil+ always returns +nil+. + # + # nil.try!(:name) # => nil + def try!(*args) + nil + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb new file mode 100644 index 0000000..2838fd7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require "active_support/option_merger" + +class Object + # An elegant way to factor duplication out of options passed to a series of + # method calls. Each method called in the block, with the block variable as + # the receiver, will have its options merged with the default +options+ hash + # provided. Each method called on the block variable must take an options + # hash as its final argument. + # + # Without with_options, this code contains duplication: + # + # class Account < ActiveRecord::Base + # has_many :customers, dependent: :destroy + # has_many :products, dependent: :destroy + # has_many :invoices, dependent: :destroy + # has_many :expenses, dependent: :destroy + # end + # + # Using with_options, we can remove the duplication: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do |assoc| + # assoc.has_many :customers + # assoc.has_many :products + # assoc.has_many :invoices + # assoc.has_many :expenses + # end + # end + # + # It can also be used with an explicit receiver: + # + # I18n.with_options locale: user.locale, scope: 'newsletter' do |i18n| + # subject i18n.t :subject + # body i18n.t :body, user_name: user.name + # end + # + # When you don't pass an explicit receiver, it executes the whole block + # in merging options context: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do + # has_many :customers + # has_many :products + # has_many :invoices + # has_many :expenses + # end + # end + # + # with_options can also be nested since the call is forwarded to its receiver. + # + # NOTE: Each nesting level will merge inherited defaults in addition to their own. + # + # class Post < ActiveRecord::Base + # with_options if: :persisted?, length: { minimum: 50 } do + # validates :content, if: -> { content.present? } + # end + # end + # + # The code is equivalent to: + # + # validates :content, length: { minimum: 50 }, if: -> { content.present? } + # + # Hence the inherited default for +if+ key is ignored. + # + # NOTE: You cannot call class methods implicitly inside of with_options. + # You can access these methods using the class name instead: + # + # class Phone < ActiveRecord::Base + # enum phone_number_type: [home: 0, office: 1, mobile: 2] + # + # with_options presence: true do + # validates :phone_number_type, inclusion: { in: Phone.phone_number_types.keys } + # end + # end + # + def with_options(options, &block) + option_merger = ActiveSupport::OptionMerger.new(self, options) + block.arity.zero? ? option_merger.instance_eval(&block) : block.call(option_merger) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb new file mode 100644 index 0000000..78814fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/conversions" +require "active_support/core_ext/range/compare_range" +require "active_support/core_ext/range/include_time_with_zone" +require "active_support/core_ext/range/overlaps" +require "active_support/core_ext/range/each" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb new file mode 100644 index 0000000..704041f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +module ActiveSupport + module CompareWithRange #:nodoc: + # Extends the default Range#=== to support range comparisons. + # (1..5) === (1..5) # => true + # (1..5) === (2..3) # => true + # (1..5) === (2..6) # => false + # + # The native Range#=== behavior is untouched. + # ('a'..'f') === ('c') # => true + # (5..9) === (11) # => false + def ===(value) + if value.is_a?(::Range) + # 1...10 includes 1..9 but it does not include 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + + # Extends the default Range#include? to support range comparisons. + # (1..5).include?(1..5) # => true + # (1..5).include?(2..3) # => true + # (1..5).include?(2..6) # => false + # + # The native Range#include? behavior is untouched. + # ('a'..'f').include?('c') # => true + # (5..9).include?(11) # => false + def include?(value) + if value.is_a?(::Range) + # 1...10 includes 1..9 but it does not include 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + + # Extends the default Range#cover? to support range comparisons. + # (1..5).cover?(1..5) # => true + # (1..5).cover?(2..3) # => true + # (1..5).cover?(2..6) # => false + # + # The native Range#cover? behavior is untouched. + # ('a'..'f').cover?('c') # => true + # (5..9).cover?(11) # => false + def cover?(value) + if value.is_a?(::Range) + # 1...10 covers 1..9 but it does not cover 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::CompareWithRange) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb new file mode 100644 index 0000000..8832fbc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module ActiveSupport::RangeWithFormat + RANGE_FORMATS = { + db: -> (start, stop) do + case start + when String then "BETWEEN '#{start}' AND '#{stop}'" + else + "BETWEEN '#{start.to_s(:db)}' AND '#{stop.to_s(:db)}'" + end + end + } + + # Convert range to a formatted string. See RANGE_FORMATS for predefined formats. + # + # range = (1..100) # => 1..100 + # + # range.to_s # => "1..100" + # range.to_s(:db) # => "BETWEEN '1' AND '100'" + # + # == Adding your own range formats to to_s + # You can add your own formats to the Range::RANGE_FORMATS hash. + # Use the format name as the hash key and a Proc instance. + # + # # config/initializers/range_formats.rb + # Range::RANGE_FORMATS[:short] = ->(start, stop) { "Between #{start.to_s(:db)} and #{stop.to_s(:db)}" } + def to_s(format = :default) + if formatter = RANGE_FORMATS[format] + formatter.call(first, last) + else + super() + end + end + + alias_method :to_default_s, :to_s + alias_method :to_formatted_s, :to_s +end + +Range.prepend(ActiveSupport::RangeWithFormat) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb new file mode 100644 index 0000000..2f22cd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module EachTimeWithZone #:nodoc: + def each(&block) + ensure_iteration_allowed + super + end + + def step(n = 1, &block) + ensure_iteration_allowed + super + end + + private + + def ensure_iteration_allowed + raise TypeError, "can't iterate from #{first.class}" if first.is_a?(TimeWithZone) + end + end +end + +Range.prepend(ActiveSupport::EachTimeWithZone) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb new file mode 100644 index 0000000..4812e27 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/compare_range" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb new file mode 100644 index 0000000..5f80acf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module IncludeTimeWithZone #:nodoc: + # Extends the default Range#include? to support ActiveSupport::TimeWithZone. + # + # (1.hour.ago..1.hour.from_now).include?(Time.current) # => true + # + def include?(value) + if first.is_a?(TimeWithZone) + cover?(value) + elsif last.is_a?(TimeWithZone) + cover?(value) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::IncludeTimeWithZone) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb new file mode 100644 index 0000000..f753607 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +class Range + # Compare two ranges and see if they overlap each other + # (1..5).overlaps?(4..6) # => true + # (1..5).overlaps?(7..9) # => false + def overlaps?(other) + cover?(other.first) || other.cover?(first) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb new file mode 100644 index 0000000..efbd708 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +class Regexp #:nodoc: + def multiline? + options & MULTILINE == MULTILINE + end + + def match?(string, pos = 0) + !!match(string, pos) + end unless //.respond_to?(:match?) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb new file mode 100644 index 0000000..b4a491f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "securerandom" + +module SecureRandom + BASE58_ALPHABET = ("0".."9").to_a + ("A".."Z").to_a + ("a".."z").to_a - ["0", "O", "I", "l"] + # SecureRandom.base58 generates a random base58 string. + # + # The argument _n_ specifies the length, of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # + # The result may contain alphanumeric characters except 0, O, I and l + # + # p SecureRandom.base58 # => "4kUgL2pdQMSCQtjE" + # p SecureRandom.base58(24) # => "77TMHrHJFvFDwodq8w7Ev2m7" + # + def self.base58(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(58) if idx >= 58 + BASE58_ALPHABET[idx] + end.join + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb new file mode 100644 index 0000000..757d15c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/filters" +require "active_support/core_ext/string/multibyte" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/string/output_safety" +require "active_support/core_ext/string/exclude" +require "active_support/core_ext/string/strip" +require "active_support/core_ext/string/inquiry" +require "active_support/core_ext/string/indent" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb new file mode 100644 index 0000000..58591bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +class String + # If you pass a single integer, returns a substring of one character at that + # position. The first character of the string is at position 0, the next at + # position 1, and so on. If a range is supplied, a substring containing + # characters at offsets given by the range is returned. In both cases, if an + # offset is negative, it is counted from the end of the string. Returns +nil+ + # if the initial offset falls outside the string. Returns an empty string if + # the beginning of the range is greater than the end of the string. + # + # str = "hello" + # str.at(0) # => "h" + # str.at(1..3) # => "ell" + # str.at(-2) # => "l" + # str.at(-2..-1) # => "lo" + # str.at(5) # => nil + # str.at(5..-1) # => "" + # + # If a Regexp is given, the matching portion of the string is returned. + # If a String is given, that given string is returned if it occurs in + # the string. In both cases, +nil+ is returned if there is no match. + # + # str = "hello" + # str.at(/lo/) # => "lo" + # str.at(/ol/) # => nil + # str.at("lo") # => "lo" + # str.at("ol") # => nil + def at(position) + self[position] + end + + # Returns a substring from the given position to the end of the string. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.from(0) # => "hello" + # str.from(3) # => "lo" + # str.from(-2) # => "lo" + # + # You can mix it with +to+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def from(position) + self[position..-1] + end + + # Returns a substring from the beginning of the string to the given position. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.to(0) # => "h" + # str.to(3) # => "hell" + # str.to(-2) # => "hell" + # + # You can mix it with +from+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def to(position) + self[0..position] + end + + # Returns the first character. If a limit is supplied, returns a substring + # from the beginning of the string until it reaches the limit value. If the + # given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.first # => "h" + # str.first(1) # => "h" + # str.first(2) # => "he" + # str.first(0) # => "" + # str.first(6) # => "hello" + def first(limit = 1) + if limit == 0 + "" + elsif limit >= size + dup + else + to(limit - 1) + end + end + + # Returns the last character of the string. If a limit is supplied, returns a substring + # from the end of the string until it reaches the limit value (counting backwards). If + # the given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.last # => "o" + # str.last(1) # => "o" + # str.last(2) # => "lo" + # str.last(0) # => "" + # str.last(6) # => "hello" + def last(limit = 1) + if limit == 0 + "" + elsif limit >= size + dup + else + from(-limit) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb new file mode 100644 index 0000000..35a5aa7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +class String + # Enables more predictable duck-typing on String-like classes. See Object#acts_like?. + def acts_like_string? + true + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb new file mode 100644 index 0000000..29a88b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/time/calculations" + +class String + # Converts a string to a Time value. + # The +form+ can be either :utc or :local (default :local). + # + # The time is parsed using Time.parse method. + # If +form+ is :local, then the time is in the system timezone. + # If the date part is missing then the current date is used and if + # the time part is missing then it is assumed to be 00:00:00. + # + # "13-12-2012".to_time # => 2012-12-13 00:00:00 +0100 + # "06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13 06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time(:utc) # => 2012-12-13 06:12:00 UTC + # "12/13/2012".to_time # => ArgumentError: argument out of range + def to_time(form = :local) + parts = Date._parse(self, false) + used_keys = %i(year mon mday hour min sec sec_fraction offset) + return if (parts.keys & used_keys).empty? + + now = Time.now + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, form == :utc ? 0 : nil) + ) + + form == :utc ? time.utc : time.to_time + end + + # Converts a string to a Date value. + # + # "1-1-2012".to_date # => Sun, 01 Jan 2012 + # "01/01/2012".to_date # => Sun, 01 Jan 2012 + # "2012-12-13".to_date # => Thu, 13 Dec 2012 + # "12/13/2012".to_date # => ArgumentError: invalid date + def to_date + ::Date.parse(self, false) unless blank? + end + + # Converts a string to a DateTime value. + # + # "1-1-2012".to_datetime # => Sun, 01 Jan 2012 00:00:00 +0000 + # "01/01/2012 23:59:59".to_datetime # => Sun, 01 Jan 2012 23:59:59 +0000 + # "2012-12-13 12:50".to_datetime # => Thu, 13 Dec 2012 12:50:00 +0000 + # "12/13/2012".to_datetime # => ArgumentError: invalid date + def to_datetime + ::DateTime.parse(self, false) unless blank? + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb new file mode 100644 index 0000000..8e46268 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class String + # The inverse of String#include?. Returns true if the string + # does not include the other string. + # + # "hello".exclude? "lo" # => false + # "hello".exclude? "ol" # => true + # "hello".exclude? ?h # => false + def exclude?(string) + !include?(string) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb new file mode 100644 index 0000000..66e721e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +class String + # Returns the string, first removing all whitespace on both ends of + # the string, and then changing remaining consecutive whitespace + # groups into one space each. + # + # Note that it handles both ASCII and Unicode whitespace. + # + # %{ Multi-line + # string }.squish # => "Multi-line string" + # " foo bar \n \t boo".squish # => "foo bar boo" + def squish + dup.squish! + end + + # Performs a destructive squish. See String#squish. + # str = " foo bar \n \t boo" + # str.squish! # => "foo bar boo" + # str # => "foo bar boo" + def squish! + gsub!(/[[:space:]]+/, " ") + strip! + self + end + + # Returns a new string with all occurrences of the patterns removed. + # str = "foo bar test" + # str.remove(" test") # => "foo bar" + # str.remove(" test", /bar/) # => "foo " + # str # => "foo bar test" + def remove(*patterns) + dup.remove!(*patterns) + end + + # Alters the string by removing all occurrences of the patterns. + # str = "foo bar test" + # str.remove!(" test", /bar/) # => "foo " + # str # => "foo " + def remove!(*patterns) + patterns.each do |pattern| + gsub! pattern, "" + end + + self + end + + # Truncates a given +text+ after a given length if +text+ is longer than length: + # + # 'Once upon a time in a world far far away'.truncate(27) + # # => "Once upon a time in a wo..." + # + # Pass a string or regexp :separator to truncate +text+ at a natural break: + # + # 'Once upon a time in a world far far away'.truncate(27, separator: ' ') + # # => "Once upon a time in a..." + # + # 'Once upon a time in a world far far away'.truncate(27, separator: /\s/) + # # => "Once upon a time in a..." + # + # The last characters will be replaced with the :omission string (defaults to "...") + # for a total length not exceeding length: + # + # 'And they found that many people were sleeping better.'.truncate(25, omission: '... (continued)') + # # => "And they f... (continued)" + def truncate(truncate_at, options = {}) + return dup unless length > truncate_at + + omission = options[:omission] || "..." + length_with_room_for_omission = truncate_at - omission.length + stop = \ + if options[:separator] + rindex(options[:separator], length_with_room_for_omission) || length_with_room_for_omission + else + length_with_room_for_omission + end + + "#{self[0, stop]}#{omission}" + end + + # Truncates a given +text+ after a given number of words (words_count): + # + # 'Once upon a time in a world far far away'.truncate_words(4) + # # => "Once upon a time..." + # + # Pass a string or regexp :separator to specify a different separator of words: + # + # 'Once
upon
a
time
in
a
world'.truncate_words(5, separator: '
') + # # => "Once
upon
a
time
in..." + # + # The last characters will be replaced with the :omission string (defaults to "..."): + # + # 'And they found that many people were sleeping better.'.truncate_words(5, omission: '... (continued)') + # # => "And they found that many... (continued)" + def truncate_words(words_count, options = {}) + sep = options[:separator] || /\s+/ + sep = Regexp.escape(sep.to_s) unless Regexp === sep + if self =~ /\A((?>.+?#{sep}){#{words_count - 1}}.+?)#{sep}.*/m + $1 + (options[:omission] || "...") + else + dup + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb new file mode 100644 index 0000000..af9d181 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +class String + # Same as +indent+, except it indents the receiver in-place. + # + # Returns the indented string, or +nil+ if there was nothing to indent. + def indent!(amount, indent_string = nil, indent_empty_lines = false) + indent_string = indent_string || self[/^[ \t]/] || " " + re = indent_empty_lines ? /^/ : /^(?!$)/ + gsub!(re, indent_string * amount) + end + + # Indents the lines in the receiver: + # + # < + # def some_method + # some_code + # end + # + # The second argument, +indent_string+, specifies which indent string to + # use. The default is +nil+, which tells the method to make a guess by + # peeking at the first indented line, and fallback to a space if there is + # none. + # + # " foo".indent(2) # => " foo" + # "foo\n\t\tbar".indent(2) # => "\t\tfoo\n\t\t\t\tbar" + # "foo".indent(2, "\t") # => "\t\tfoo" + # + # While +indent_string+ is typically one space or tab, it may be any string. + # + # The third argument, +indent_empty_lines+, is a flag that says whether + # empty lines should be indented. Default is false. + # + # "foo\n\nbar".indent(2) # => " foo\n\n bar" + # "foo\n\nbar".indent(2, nil, true) # => " foo\n \n bar" + # + def indent(amount, indent_string = nil, indent_empty_lines = false) + dup.tap { |_| _.indent!(amount, indent_string, indent_empty_lines) } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb new file mode 100644 index 0000000..8af3017 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb @@ -0,0 +1,254 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/inflector/transliterate" + +# String inflections define new methods on the String class to transform names for different purposes. +# For instance, you can figure out the name of a table from the name of a class. +# +# 'ScaleScore'.tableize # => "scale_scores" +# +class String + # Returns the plural form of the word in the string. + # + # If the optional parameter +count+ is specified, + # the singular form will be returned if count == 1. + # For any other value of +count+ the plural will be returned. + # + # If the optional parameter +locale+ is specified, + # the word will be pluralized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'post'.pluralize # => "posts" + # 'octopus'.pluralize # => "octopi" + # 'sheep'.pluralize # => "sheep" + # 'words'.pluralize # => "words" + # 'the blue mailman'.pluralize # => "the blue mailmen" + # 'CamelOctopus'.pluralize # => "CamelOctopi" + # 'apple'.pluralize(1) # => "apple" + # 'apple'.pluralize(2) # => "apples" + # 'ley'.pluralize(:es) # => "leyes" + # 'ley'.pluralize(1, :es) # => "ley" + def pluralize(count = nil, locale = :en) + locale = count if count.is_a?(Symbol) + if count == 1 + dup + else + ActiveSupport::Inflector.pluralize(self, locale) + end + end + + # The reverse of +pluralize+, returns the singular form of a word in a string. + # + # If the optional parameter +locale+ is specified, + # the word will be singularized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'posts'.singularize # => "post" + # 'octopi'.singularize # => "octopus" + # 'sheep'.singularize # => "sheep" + # 'word'.singularize # => "word" + # 'the blue mailmen'.singularize # => "the blue mailman" + # 'CamelOctopi'.singularize # => "CamelOctopus" + # 'leyes'.singularize(:es) # => "ley" + def singularize(locale = :en) + ActiveSupport::Inflector.singularize(self, locale) + end + + # +constantize+ tries to find a declared constant with the name specified + # in the string. It raises a NameError when the name is not in CamelCase + # or is not initialized. See ActiveSupport::Inflector.constantize + # + # 'Module'.constantize # => Module + # 'Class'.constantize # => Class + # 'blargle'.constantize # => NameError: wrong constant name blargle + def constantize + ActiveSupport::Inflector.constantize(self) + end + + # +safe_constantize+ tries to find a declared constant with the name specified + # in the string. It returns +nil+ when the name is not in CamelCase + # or is not initialized. See ActiveSupport::Inflector.safe_constantize + # + # 'Module'.safe_constantize # => Module + # 'Class'.safe_constantize # => Class + # 'blargle'.safe_constantize # => nil + def safe_constantize + ActiveSupport::Inflector.safe_constantize(self) + end + + # By default, +camelize+ converts strings to UpperCamelCase. If the argument to camelize + # is set to :lower then camelize produces lowerCamelCase. + # + # +camelize+ will also convert '/' to '::' which is useful for converting paths to namespaces. + # + # 'active_record'.camelize # => "ActiveRecord" + # 'active_record'.camelize(:lower) # => "activeRecord" + # 'active_record/errors'.camelize # => "ActiveRecord::Errors" + # 'active_record/errors'.camelize(:lower) # => "activeRecord::Errors" + def camelize(first_letter = :upper) + case first_letter + when :upper + ActiveSupport::Inflector.camelize(self, true) + when :lower + ActiveSupport::Inflector.camelize(self, false) + else + raise ArgumentError, "Invalid option, use either :upper or :lower." + end + end + alias_method :camelcase, :camelize + + # Capitalizes all the words and replaces some characters in the string to create + # a nicer looking title. +titleize+ is meant for creating pretty output. It is not + # used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # 'man from the boondocks'.titleize # => "Man From The Boondocks" + # 'x-men: the last stand'.titleize # => "X Men: The Last Stand" + # 'string_ending_with_id'.titleize(keep_id_suffix: true) # => "String Ending With Id" + def titleize(keep_id_suffix: false) + ActiveSupport::Inflector.titleize(self, keep_id_suffix: keep_id_suffix) + end + alias_method :titlecase, :titleize + + # The reverse of +camelize+. Makes an underscored, lowercase form from the expression in the string. + # + # +underscore+ will also change '::' to '/' to convert namespaces to paths. + # + # 'ActiveModel'.underscore # => "active_model" + # 'ActiveModel::Errors'.underscore # => "active_model/errors" + def underscore + ActiveSupport::Inflector.underscore(self) + end + + # Replaces underscores with dashes in the string. + # + # 'puni_puni'.dasherize # => "puni-puni" + def dasherize + ActiveSupport::Inflector.dasherize(self) + end + + # Removes the module part from the constant expression in the string. + # + # 'ActiveSupport::Inflector::Inflections'.demodulize # => "Inflections" + # 'Inflections'.demodulize # => "Inflections" + # '::Inflections'.demodulize # => "Inflections" + # ''.demodulize # => '' + # + # See also +deconstantize+. + def demodulize + ActiveSupport::Inflector.demodulize(self) + end + + # Removes the rightmost segment from the constant expression in the string. + # + # 'Net::HTTP'.deconstantize # => "Net" + # '::Net::HTTP'.deconstantize # => "::Net" + # 'String'.deconstantize # => "" + # '::String'.deconstantize # => "" + # ''.deconstantize # => "" + # + # See also +demodulize+. + def deconstantize + ActiveSupport::Inflector.deconstantize(self) + end + + # Replaces special characters in a string so that it may be used as part of a 'pretty' URL. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize(preserve_case: true)}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + def parameterize(separator: "-", preserve_case: false) + ActiveSupport::Inflector.parameterize(self, separator: separator, preserve_case: preserve_case) + end + + # Creates the name of a table like Rails does for models to table names. This method + # uses the +pluralize+ method on the last word in the string. + # + # 'RawScaledScorer'.tableize # => "raw_scaled_scorers" + # 'ham_and_egg'.tableize # => "ham_and_eggs" + # 'fancyCategory'.tableize # => "fancy_categories" + def tableize + ActiveSupport::Inflector.tableize(self) + end + + # Creates a class name from a plural table name like Rails does for table names to models. + # Note that this returns a string and not a class. (To convert to an actual class + # follow +classify+ with +constantize+.) + # + # 'ham_and_eggs'.classify # => "HamAndEgg" + # 'posts'.classify # => "Post" + def classify + ActiveSupport::Inflector.classify(self) + end + + # Capitalizes the first word, turns underscores into spaces, and (by default)strips a + # trailing '_id' if present. + # Like +titleize+, this is meant for creating pretty output. + # + # The capitalization of the first word can be turned off by setting the + # optional parameter +capitalize+ to false. + # By default, this parameter is true. + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'employee_salary'.humanize # => "Employee salary" + # 'author_id'.humanize # => "Author" + # 'author_id'.humanize(capitalize: false) # => "author" + # '_id'.humanize # => "Id" + # 'author_id'.humanize(keep_id_suffix: true) # => "Author Id" + def humanize(capitalize: true, keep_id_suffix: false) + ActiveSupport::Inflector.humanize(self, capitalize: capitalize, keep_id_suffix: keep_id_suffix) + end + + # Converts just the first character to uppercase. + # + # 'what a Lovely Day'.upcase_first # => "What a Lovely Day" + # 'w'.upcase_first # => "W" + # ''.upcase_first # => "" + def upcase_first + ActiveSupport::Inflector.upcase_first(self) + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # 'Message'.foreign_key # => "message_id" + # 'Message'.foreign_key(false) # => "messageid" + # 'Admin::Post'.foreign_key # => "post_id" + def foreign_key(separate_class_name_and_id_with_underscore = true) + ActiveSupport::Inflector.foreign_key(self, separate_class_name_and_id_with_underscore) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb new file mode 100644 index 0000000..a796d5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" + +class String + # Wraps the current string in the ActiveSupport::StringInquirer class, + # which gives you a prettier way to test for equality. + # + # env = 'production'.inquiry + # env.production? # => true + # env.development? # => false + def inquiry + ActiveSupport::StringInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb new file mode 100644 index 0000000..07c0d16 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "active_support/multibyte" + +class String + # == Multibyte proxy + # + # +mb_chars+ is a multibyte safe proxy for string methods. + # + # It creates and returns an instance of the ActiveSupport::Multibyte::Chars class which + # encapsulates the original string. A Unicode safe version of all the String methods are defined on this proxy + # class. If the proxy class doesn't respond to a certain method, it's forwarded to the encapsulated string. + # + # >> "lj".upcase + # => "lj" + # >> "lj".mb_chars.upcase.to_s + # => "LJ" + # + # NOTE: An above example is useful for pre Ruby 2.4. Ruby 2.4 supports Unicode case mappings. + # + # == Method chaining + # + # All the methods on the Chars proxy which normally return a string will return a Chars object. This allows + # method chaining on the result of any of these methods. + # + # name.mb_chars.reverse.length # => 12 + # + # == Interoperability and configuration + # + # The Chars object tries to be as interchangeable with String objects as possible: sorting and comparing between + # String and Char work like expected. The bang! methods change the internal string representation in the Chars + # object. Interoperability problems can be resolved easily with a +to_s+ call. + # + # For more information about the methods defined on the Chars proxy see ActiveSupport::Multibyte::Chars. For + # information about how to change the default Multibyte behavior see ActiveSupport::Multibyte. + def mb_chars + ActiveSupport::Multibyte.proxy_class.new(self) + end + + # Returns +true+ if string has utf_8 encoding. + # + # utf_8_str = "some string".encode "UTF-8" + # iso_str = "some string".encode "ISO-8859-1" + # + # utf_8_str.is_utf8? # => true + # iso_str.is_utf8? # => false + def is_utf8? + case encoding + when Encoding::UTF_8 + valid_encoding? + when Encoding::ASCII_8BIT, Encoding::US_ASCII + dup.force_encoding(Encoding::UTF_8).valid_encoding? + else + false + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb new file mode 100644 index 0000000..f3bdc29 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb @@ -0,0 +1,258 @@ +# frozen_string_literal: true + +require "erb" +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/redefine_method" +require "active_support/multibyte/unicode" + +class ERB + module Util + HTML_ESCAPE = { "&" => "&", ">" => ">", "<" => "<", '"' => """, "'" => "'" } + JSON_ESCAPE = { "&" => '\u0026', ">" => '\u003e', "<" => '\u003c', "\u2028" => '\u2028', "\u2029" => '\u2029' } + HTML_ESCAPE_ONCE_REGEXP = /["><']|&(?!([a-zA-Z]+|(#\d+)|(#[xX][\dA-Fa-f]+));)/ + JSON_ESCAPE_REGEXP = /[\u2028\u2029&><]/u + + # A utility method for escaping HTML tag characters. + # This method is also aliased as h. + # + # puts html_escape('is a > 0 & a < 10?') + # # => is a > 0 & a < 10? + def html_escape(s) + unwrapped_html_escape(s).html_safe + end + + silence_redefinition_of_method :h + alias h html_escape + + module_function :h + + singleton_class.silence_redefinition_of_method :html_escape + module_function :html_escape + + # HTML escapes strings but doesn't wrap them with an ActiveSupport::SafeBuffer. + # This method is not for public consumption! Seriously! + def unwrapped_html_escape(s) # :nodoc: + s = s.to_s + if s.html_safe? + s + else + CGI.escapeHTML(ActiveSupport::Multibyte::Unicode.tidy_bytes(s)) + end + end + module_function :unwrapped_html_escape + + # A utility method for escaping HTML without affecting existing escaped entities. + # + # html_escape_once('1 < 2 & 3') + # # => "1 < 2 & 3" + # + # html_escape_once('<< Accept & Checkout') + # # => "<< Accept & Checkout" + def html_escape_once(s) + result = ActiveSupport::Multibyte::Unicode.tidy_bytes(s.to_s).gsub(HTML_ESCAPE_ONCE_REGEXP, HTML_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :html_escape_once + + # A utility method for escaping HTML entities in JSON strings. Specifically, the + # &, > and < characters are replaced with their equivalent unicode escaped form - + # \u0026, \u003e, and \u003c. The Unicode sequences \u2028 and \u2029 are also + # escaped as they are treated as newline characters in some JavaScript engines. + # These sequences have identical meaning as the original characters inside the + # context of a JSON string, so assuming the input is a valid and well-formed + # JSON value, the output will have equivalent meaning when parsed: + # + # json = JSON.generate({ name: ""}) + # # => "{\"name\":\"\"}" + # + # json_escape(json) + # # => "{\"name\":\"\\u003C/script\\u003E\\u003Cscript\\u003Ealert('PWNED!!!')\\u003C/script\\u003E\"}" + # + # JSON.parse(json) == JSON.parse(json_escape(json)) + # # => true + # + # The intended use case for this method is to escape JSON strings before including + # them inside a script tag to avoid XSS vulnerability: + # + # + # + # It is necessary to +raw+ the result of +json_escape+, so that quotation marks + # don't get converted to " entities. +json_escape+ doesn't + # automatically flag the result as HTML safe, since the raw value is unsafe to + # use inside HTML attributes. + # + # If your JSON is being used downstream for insertion into the DOM, be aware of + # whether or not it is being inserted via +html()+. Most jQuery plugins do this. + # If that is the case, be sure to +html_escape+ or +sanitize+ any user-generated + # content returned by your JSON. + # + # If you need to output JSON elsewhere in your HTML, you can just do something + # like this, as any unsafe characters (including quotation marks) will be + # automatically escaped for you: + # + #
...
+ # + # WARNING: this helper only works with valid JSON. Using this on non-JSON values + # will open up serious XSS vulnerabilities. For example, if you replace the + # +current_user.to_json+ in the example above with user input instead, the browser + # will happily eval() that string as JavaScript. + # + # The escaping performed in this method is identical to those performed in the + # Active Support JSON encoder when +ActiveSupport.escape_html_entities_in_json+ is + # set to true. Because this transformation is idempotent, this helper can be + # applied even if +ActiveSupport.escape_html_entities_in_json+ is already true. + # + # Therefore, when you are unsure if +ActiveSupport.escape_html_entities_in_json+ + # is enabled, or if you are unsure where your JSON string originated from, it + # is recommended that you always apply this helper (other libraries, such as the + # JSON gem, do not provide this kind of protection by default; also some gems + # might override +to_json+ to bypass Active Support's encoder). + def json_escape(s) + result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :json_escape + end +end + +class Object + def html_safe? + false + end +end + +class Numeric + def html_safe? + true + end +end + +module ActiveSupport #:nodoc: + class SafeBuffer < String + UNSAFE_STRING_METHODS = %w( + capitalize chomp chop delete downcase gsub lstrip next reverse rstrip + slice squeeze strip sub succ swapcase tr tr_s upcase + ) + + alias_method :original_concat, :concat + private :original_concat + + # Raised when ActiveSupport::SafeBuffer#safe_concat is called on unsafe buffers. + class SafeConcatError < StandardError + def initialize + super "Could not concatenate to the buffer because it is not html safe." + end + end + + def [](*args) + if args.size < 2 + super + elsif html_safe? + new_safe_buffer = super + + if new_safe_buffer + new_safe_buffer.instance_variable_set :@html_safe, true + end + + new_safe_buffer + else + to_str[*args] + end + end + + def safe_concat(value) + raise SafeConcatError unless html_safe? + original_concat(value) + end + + def initialize(str = "") + @html_safe = true + super + end + + def initialize_copy(other) + super + @html_safe = other.html_safe? + end + + def clone_empty + self[0, 0] + end + + def concat(value) + super(html_escape_interpolated_argument(value)) + end + alias << concat + + def prepend(value) + super(html_escape_interpolated_argument(value)) + end + + def +(other) + dup.concat(other) + end + + def %(args) + case args + when Hash + escaped_args = Hash[args.map { |k, arg| [k, html_escape_interpolated_argument(arg)] }] + else + escaped_args = Array(args).map { |arg| html_escape_interpolated_argument(arg) } + end + + self.class.new(super(escaped_args)) + end + + def html_safe? + defined?(@html_safe) && @html_safe + end + + def to_s + self + end + + def to_param + to_str + end + + def encode_with(coder) + coder.represent_object nil, to_str + end + + UNSAFE_STRING_METHODS.each do |unsafe_method| + if unsafe_method.respond_to?(unsafe_method) + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def capitalize(*args, &block) + to_str.#{unsafe_method}(*args, &block) # to_str.capitalize(*args, &block) + end # end + + def #{unsafe_method}!(*args) # def capitalize!(*args) + @html_safe = false # @html_safe = false + super # super + end # end + EOT + end + end + + private + + def html_escape_interpolated_argument(arg) + (!html_safe? || arg.html_safe?) ? arg : CGI.escapeHTML(arg.to_s) + end + end +end + +class String + # Marks a string as trusted safe. It will be inserted into HTML with no + # additional escaping performed. It is your responsibility to ensure that the + # string contains no malicious content. This method is equivalent to the + # +raw+ helper in views. It is recommended that you use +sanitize+ instead of + # this method. It should never be called on user input. + def html_safe + ActiveSupport::SafeBuffer.new(self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb new file mode 100644 index 0000000..919eb7a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +class String + alias_method :starts_with?, :start_with? + alias_method :ends_with?, :end_with? +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb new file mode 100644 index 0000000..cc26274 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class String + # Strips indentation in heredocs. + # + # For example in + # + # if options[:usage] + # puts <<-USAGE.strip_heredoc + # This command does such and such. + # + # Supported options are: + # -h This message + # ... + # USAGE + # end + # + # the user would see the usage message aligned against the left margin. + # + # Technically, it looks for the least indented non-empty line + # in the whole string, and removes that amount of leading whitespace. + def strip_heredoc + gsub(/^#{scan(/^[ \t]*(?=\S)/).min}/, "".freeze) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb new file mode 100644 index 0000000..55dc231 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/time/zones" + +class String + # Converts String to a TimeWithZone in the current zone if Time.zone or Time.zone_default + # is set, otherwise converts String to a Time via String#to_time + def in_time_zone(zone = ::Time.zone) + if zone + ::Time.find_zone!(zone).parse(self) + else + to_time + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb new file mode 100644 index 0000000..c809def --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/compatibility" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/time/zones" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb new file mode 100644 index 0000000..8572b49 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Time + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb new file mode 100644 index 0000000..120768d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb @@ -0,0 +1,315 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/conversions" +require "active_support/time_with_zone" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" +require "active_support/core_ext/date/calculations" + +class Time + include DateAndTime::Calculations + + COMMON_YEAR_DAYS_IN_MONTH = [nil, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + class << self + # Overriding case equality method so that it returns true for ActiveSupport::TimeWithZone instances + def ===(other) + super || (self == Time && other.is_a?(ActiveSupport::TimeWithZone)) + end + + # Returns the number of days in the given month. + # If no year is specified, it will use the current year. + def days_in_month(month, year = current.year) + if month == 2 && ::Date.gregorian_leap?(year) + 29 + else + COMMON_YEAR_DAYS_IN_MONTH[month] + end + end + + # Returns the number of days in the given year. + # If no year is specified, it will use the current year. + def days_in_year(year = current.year) + days_in_month(2, year) + 337 + end + + # Returns Time.zone.now when Time.zone or config.time_zone are set, otherwise just returns Time.now. + def current + ::Time.zone ? ::Time.zone.now : ::Time.now + end + + # Layers additional behavior on Time.at so that ActiveSupport::TimeWithZone and DateTime + # instances can be used when called with a single argument + def at_with_coercion(*args) + return at_without_coercion(*args) if args.size != 1 + + # Time.at can be called with a time or numerical value + time_or_number = args.first + + if time_or_number.is_a?(ActiveSupport::TimeWithZone) || time_or_number.is_a?(DateTime) + at_without_coercion(time_or_number.to_f).getlocal + else + at_without_coercion(time_or_number) + end + end + alias_method :at_without_coercion, :at + alias_method :at, :at_with_coercion + + # Creates a +Time+ instance from an RFC 3339 string. + # + # Time.rfc3339('1999-12-31T14:00:00-10:00') # => 2000-01-01 00:00:00 -1000 + # + # If the time or offset components are missing then an +ArgumentError+ will be raised. + # + # Time.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + end + end + + # Returns the number of seconds since 00:00:00. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0.0 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296.0 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399.0 + def seconds_since_midnight + to_i - change(hour: 0).to_i + (usec / 1.0e+6) + end + + # Returns the number of seconds until 23:59:59. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2) + def sec_fraction + subsec + end + + # Returns a new Time where one or more of the elements have been changed according + # to the +options+ parameter. The time options (:hour, :min, + # :sec, :usec, :nsec) reset cascadingly, so if only + # the hour is passed, then minute, sec, usec and nsec is set to 0. If the hour + # and minute is passed, then sec, usec and nsec is set to 0. The +options+ parameter + # takes a hash with any of these keys: :year, :month, :day, + # :hour, :min, :sec, :usec, :nsec, + # :offset. Pass either :usec or :nsec, not both. + # + # Time.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => Time.new(2012, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => Time.new(1981, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => Time.new(1981, 8, 29, 0, 0, 0) + def change(options) + new_year = options.fetch(:year, year) + new_month = options.fetch(:month, month) + new_day = options.fetch(:day, day) + new_hour = options.fetch(:hour, hour) + new_min = options.fetch(:min, options[:hour] ? 0 : min) + new_sec = options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_offset = options.fetch(:offset, nil) + + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_usec = Rational(new_nsec, 1000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + end + + raise ArgumentError, "argument out of range" if new_usec >= 1000000 + + new_sec += Rational(new_usec, 1000000) + + if new_offset + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, new_offset) + elsif utc? + ::Time.utc(new_year, new_month, new_day, new_hour, new_min, new_sec) + elsif zone + ::Time.local(new_year, new_month, new_day, new_hour, new_min, new_sec) + else + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, utc_offset) + end + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The +options+ parameter + # takes a hash with any of these keys: :years, :months, + # :weeks, :days, :hours, :minutes, + # :seconds. + # + # Time.new(2015, 8, 1, 14, 35, 0).advance(seconds: 1) # => 2015-08-01 14:35:01 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(minutes: 1) # => 2015-08-01 14:36:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(hours: 1) # => 2015-08-01 15:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(days: 1) # => 2015-08-02 14:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(weeks: 1) # => 2015-08-08 14:35:00 -0700 + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.advance(options) + d = d.gregorian if d.julian? + time_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + time_advanced_by_date + else + time_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new Time representing the time a number of seconds ago, this is basically a wrapper around the Numeric extension + def ago(seconds) + since(-seconds) + end + + # Returns a new Time representing the time a number of seconds since the instance time + def since(seconds) + self + seconds + rescue + to_datetime.since(seconds) + end + alias :in :since + + # Returns a new Time representing the start of the day (0:00) + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new Time representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new Time representing the end of the day, 23:59:59.999999 + def end_of_day + change( + hour: 23, + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_day :end_of_day + + # Returns a new Time representing the start of the hour (x:00) + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new Time representing the end of the hour, x:59:59.999999 + def end_of_hour + change( + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new Time representing the start of the minute (x:xx:00) + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new Time representing the end of the minute, x:xx:59.999999 + def end_of_minute + change( + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_minute :end_of_minute + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.until(self) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Time#- can also be used to determine the number of seconds between two Time instances. + # We're layering on additional behavior so that ActiveSupport::TimeWithZone instances + # are coerced into values that Time#- will recognize + def minus_with_coercion(other) + other = other.comparable_time if other.respond_to?(:comparable_time) + other.is_a?(DateTime) ? to_f - other.to_f : minus_without_coercion(other) + end + alias_method :minus_without_coercion, :- + alias_method :-, :minus_with_coercion + + # Layers additional behavior on Time#<=> so that DateTime and ActiveSupport::TimeWithZone instances + # can be chronologically compared with a Time + def compare_with_coercion(other) + # we're avoiding Time#to_datetime and Time#to_time because they're expensive + if other.class == Time + compare_without_coercion(other) + elsif other.is_a?(Time) + compare_without_coercion(other.to_time) + else + to_datetime <=> other + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion + + # Layers additional behavior on Time#eql? so that ActiveSupport::TimeWithZone instances + # can be eql? to an equivalent Time + def eql_with_coercion(other) + # if other is an ActiveSupport::TimeWithZone, coerce a Time instance from it so we can do eql? comparison + other = other.comparable_time if other.respond_to?(:comparable_time) + eql_without_coercion(other) + end + alias_method :eql_without_coercion, :eql? + alias_method :eql?, :eql_with_coercion +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb new file mode 100644 index 0000000..495e4f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class Time + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return +self+ or the time in the local system timezone depending + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? self : getlocal + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb new file mode 100644 index 0000000..345cb28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/values/time_zone" + +class Time + DATE_FORMATS = { + db: "%Y-%m-%d %H:%M:%S", + number: "%Y%m%d%H%M%S", + nsec: "%Y%m%d%H%M%S%9N", + usec: "%Y%m%d%H%M%S%6N", + time: "%H:%M", + short: "%d %b %H:%M", + long: "%B %d, %Y %H:%M", + long_ordinal: lambda { |time| + day_format = ActiveSupport::Inflector.ordinalize(time.day) + time.strftime("%B #{day_format}, %Y %H:%M") + }, + rfc822: lambda { |time| + offset_format = time.formatted_offset(false) + time.strftime("%a, %d %b %Y %H:%M:%S #{offset_format}") + }, + iso8601: lambda { |time| time.iso8601 } + } + + # Converts to a formatted string. See DATE_FORMATS for built-in formats. + # + # This method is aliased to to_s. + # + # time = Time.now # => 2007-01-18 06:10:17 -06:00 + # + # time.to_formatted_s(:time) # => "06:10" + # time.to_s(:time) # => "06:10" + # + # time.to_formatted_s(:db) # => "2007-01-18 06:10:17" + # time.to_formatted_s(:number) # => "20070118061017" + # time.to_formatted_s(:short) # => "18 Jan 06:10" + # time.to_formatted_s(:long) # => "January 18, 2007 06:10" + # time.to_formatted_s(:long_ordinal) # => "January 18th, 2007 06:10" + # time.to_formatted_s(:rfc822) # => "Thu, 18 Jan 2007 06:10:17 -0600" + # time.to_formatted_s(:iso8601) # => "2007-01-18T06:10:17-06:00" + # + # == Adding your own time formats to +to_formatted_s+ + # You can add your own formats to the Time::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a time argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = ->(time) { time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.local(2000).formatted_offset # => "-06:00" + # Time.local(2000).formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Aliased to +xmlschema+ for compatibility with +DateTime+ + alias_method :rfc3339, :xmlschema +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb new file mode 100644 index 0000000..a5588fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date_and_time/zones" + +class Time + include DateAndTime::Zones + class << self + attr_accessor :zone_default + + # Returns the TimeZone for the current request, if this has been set (via Time.zone=). + # If Time.zone has not been set for the current request, returns the TimeZone specified in config.time_zone. + def zone + Thread.current[:time_zone] || zone_default + end + + # Sets Time.zone to a TimeZone object for the current request/thread. + # + # This method accepts any of the following: + # + # * A Rails TimeZone object. + # * An identifier for a Rails TimeZone object (e.g., "Eastern Time (US & Canada)", -5.hours). + # * A TZInfo::Timezone object. + # * An identifier for a TZInfo::Timezone object (e.g., "America/New_York"). + # + # Here's an example of how you might set Time.zone on a per request basis and reset it when the request is done. + # current_user.time_zone just needs to return a string identifying the user's preferred time zone: + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # def set_time_zone + # if logged_in? + # Time.use_zone(current_user.time_zone) { yield } + # else + # yield + # end + # end + # end + def zone=(time_zone) + Thread.current[:time_zone] = find_zone!(time_zone) + end + + # Allows override of Time.zone locally inside supplied block; + # resets Time.zone to existing value when done. + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # private + # + # def set_time_zone + # Time.use_zone(current_user.timezone) { yield } + # end + # end + # + # NOTE: This won't affect any ActiveSupport::TimeWithZone + # objects that have already been created, e.g. any model timestamp + # attributes that have been read before the block will remain in + # the application's default timezone. + def use_zone(time_zone) + new_zone = find_zone!(time_zone) + begin + old_zone, ::Time.zone = ::Time.zone, new_zone + yield + ensure + ::Time.zone = old_zone + end + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Raises an +ArgumentError+ for invalid time zones. + # + # Time.find_zone! "America/New_York" # => # + # Time.find_zone! "EST" # => # + # Time.find_zone! -5.hours # => # + # Time.find_zone! nil # => nil + # Time.find_zone! false # => false + # Time.find_zone! "NOT-A-TIMEZONE" # => ArgumentError: Invalid Timezone: NOT-A-TIMEZONE + def find_zone!(time_zone) + if !time_zone || time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + # Look up the timezone based on the identifier (unless we've been + # passed a TZInfo::Timezone) + unless time_zone.respond_to?(:period_for_local) + time_zone = ActiveSupport::TimeZone[time_zone] || TZInfo::Timezone.get(time_zone) + end + + # Return if a TimeZone instance, or wrap in a TimeZone instance if a TZInfo::Timezone + if time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + ActiveSupport::TimeZone.create(time_zone.name, nil, time_zone) + end + end + rescue TZInfo::InvalidTimezoneIdentifier + raise ArgumentError, "Invalid Timezone: #{time_zone}" + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Returns +nil+ for invalid time zones. + # + # Time.find_zone "America/New_York" # => # + # Time.find_zone "NOT-A-TIMEZONE" # => nil + def find_zone(time_zone) + find_zone!(time_zone) rescue nil + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb new file mode 100644 index 0000000..9def947 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "uri" +if RUBY_VERSION < "2.6.0" + require "active_support/core_ext/module/redefine_method" + URI::Parser.class_eval do + silence_redefinition_of_method :unescape + def unescape(str, escaped = /%[a-fA-F\d]{2}/) + # TODO: Are we actually sure that ASCII == UTF-8? + # YK: My initial experiments say yes, but let's be sure please + enc = str.encoding + enc = Encoding::UTF_8 if enc == Encoding::US_ASCII + str.dup.force_encoding(Encoding::ASCII_8BIT).gsub(escaped) { |match| [match[1, 2].hex].pack("C") }.force_encoding(enc) + end + end +end + +module URI + class << self + def parser + @parser ||= URI::Parser.new + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb new file mode 100644 index 0000000..4e6d8e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb @@ -0,0 +1,195 @@ +# frozen_string_literal: true + +module ActiveSupport + # Abstract super class that provides a thread-isolated attributes singleton, which resets automatically + # before and after each request. This allows you to keep all the per-request attributes easily + # available to the whole system. + # + # The following full app-like example demonstrates how to use a Current class to + # facilitate easy access to the global, per-request attributes without passing them deeply + # around everywhere: + # + # # app/models/current.rb + # class Current < ActiveSupport::CurrentAttributes + # attribute :account, :user + # attribute :request_id, :user_agent, :ip_address + # + # resets { Time.zone = nil } + # + # def user=(user) + # super + # self.account = user.account + # Time.zone = user.time_zone + # end + # end + # + # # app/controllers/concerns/authentication.rb + # module Authentication + # extend ActiveSupport::Concern + # + # included do + # before_action :authenticate + # end + # + # private + # def authenticate + # if authenticated_user = User.find_by(id: cookies.encrypted[:user_id]) + # Current.user = authenticated_user + # else + # redirect_to new_session_url + # end + # end + # end + # + # # app/controllers/concerns/set_current_request_details.rb + # module SetCurrentRequestDetails + # extend ActiveSupport::Concern + # + # included do + # before_action do + # Current.request_id = request.uuid + # Current.user_agent = request.user_agent + # Current.ip_address = request.ip + # end + # end + # end + # + # class ApplicationController < ActionController::Base + # include Authentication + # include SetCurrentRequestDetails + # end + # + # class MessagesController < ApplicationController + # def create + # Current.account.messages.create(message_params) + # end + # end + # + # class Message < ApplicationRecord + # belongs_to :creator, default: -> { Current.user } + # after_create { |message| Event.create(record: message) } + # end + # + # class Event < ApplicationRecord + # before_create do + # self.request_id = Current.request_id + # self.user_agent = Current.user_agent + # self.ip_address = Current.ip_address + # end + # end + # + # A word of caution: It's easy to overdo a global singleton like Current and tangle your model as a result. + # Current should only be used for a few, top-level globals, like account, user, and request details. + # The attributes stuck in Current should be used by more or less all actions on all requests. If you start + # sticking controller-specific attributes in there, you're going to create a mess. + class CurrentAttributes + include ActiveSupport::Callbacks + define_callbacks :reset + + class << self + # Returns singleton instance for this class in this thread. If none exists, one is created. + def instance + current_instances[name] ||= new + end + + # Declares one or more attributes that will be given both class and instance accessor methods. + def attribute(*names) + generated_attribute_methods.module_eval do + names.each do |name| + define_method(name) do + attributes[name.to_sym] + end + + define_method("#{name}=") do |attribute| + attributes[name.to_sym] = attribute + end + end + end + + names.each do |name| + define_singleton_method(name) do + instance.public_send(name) + end + + define_singleton_method("#{name}=") do |attribute| + instance.public_send("#{name}=", attribute) + end + end + end + + # Calls this block after #reset is called on the instance. Used for resetting external collaborators, like Time.zone. + def resets(&block) + set_callback :reset, :after, &block + end + + delegate :set, :reset, to: :instance + + def reset_all # :nodoc: + current_instances.each_value(&:reset) + end + + def clear_all # :nodoc: + reset_all + current_instances.clear + end + + private + def generated_attribute_methods + @generated_attribute_methods ||= Module.new.tap { |mod| include mod } + end + + def current_instances + Thread.current[:current_attributes_instances] ||= {} + end + + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end + + attr_accessor :attributes + + def initialize + @attributes = {} + end + + # Expose one or more attributes within a block. Old values are returned after the block concludes. + # Example demonstrating the common use of needing to set Current attributes outside the request-cycle: + # + # class Chat::PublicationJob < ApplicationJob + # def perform(attributes, room_number, creator) + # Current.set(person: creator) do + # Chat::Publisher.publish(attributes: attributes, room_number: room_number) + # end + # end + # end + def set(set_attributes) + old_attributes = compute_attributes(set_attributes.keys) + assign_attributes(set_attributes) + yield + ensure + assign_attributes(old_attributes) + end + + # Reset all attributes. Should be called before and after actions, when used as a per-request singleton. + def reset + run_callbacks :reset do + self.attributes = {} + end + end + + private + def assign_attributes(new_attributes) + new_attributes.each { |key, value| public_send("#{key}=", value) } + end + + def compute_attributes(keys) + keys.collect { |key| [ key, public_send(key) ] }.to_h + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb new file mode 100644 index 0000000..3957700 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb @@ -0,0 +1,753 @@ +# frozen_string_literal: true + +require "set" +require "thread" +require "concurrent/map" +require "pathname" +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/load_error" +require "active_support/core_ext/name_error" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/dependencies/interlock" +require "active_support/inflector" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + extend self + + mattr_accessor :interlock, default: Interlock.new + + # :doc: + + # Execute the supplied block without interference from any + # concurrent loads. + def self.run_interlock + Dependencies.interlock.running { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.load_interlock + Dependencies.interlock.loading { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.unload_interlock + Dependencies.interlock.unloading { yield } + end + + # :nodoc: + + # Should we turn on Ruby warnings on the first load of dependent files? + mattr_accessor :warnings_on_first_load, default: false + + # All files ever loaded. + mattr_accessor :history, default: Set.new + + # All files currently loaded. + mattr_accessor :loaded, default: Set.new + + # Stack of files being loaded. + mattr_accessor :loading, default: [] + + # Should we load files or require them? + mattr_accessor :mechanism, default: ENV["NO_RELOAD"] ? :require : :load + + # The set of directories from which we may automatically load files. Files + # under these directories will be reloaded on each request in development mode, + # unless the directory also appears in autoload_once_paths. + mattr_accessor :autoload_paths, default: [] + + # The set of directories from which automatically loaded constants are loaded + # only once. All directories in this set must also be present in +autoload_paths+. + mattr_accessor :autoload_once_paths, default: [] + + # An array of qualified constant names that have been loaded. Adding a name + # to this array will cause it to be unloaded the next time Dependencies are + # cleared. + mattr_accessor :autoloaded_constants, default: [] + + # An array of constant names that need to be unloaded on every request. Used + # to allow arbitrary constants to be marked for unloading. + mattr_accessor :explicitly_unloadable_constants, default: [] + + # The WatchStack keeps a stack of the modules being watched as files are + # loaded. If a file in the process of being loaded (parent.rb) triggers the + # load of another file (child.rb) the stack will ensure that child.rb + # handles the new constants. + # + # If child.rb is being autoloaded, its constants will be added to + # autoloaded_constants. If it was being required, they will be discarded. + # + # This is handled by walking back up the watch stack and adding the constants + # found by child.rb to the list of original constants in parent.rb. + class WatchStack + include Enumerable + + # @watching is a stack of lists of constants being watched. For instance, + # if parent.rb is autoloaded, the stack will look like [[Object]]. If + # parent.rb then requires namespace/child.rb, the stack will look like + # [[Object], [Namespace]]. + + attr_reader :watching + + def initialize + @watching = [] + @stack = Hash.new { |h, k| h[k] = [] } + end + + def each(&block) + @stack.each(&block) + end + + def watching? + !@watching.empty? + end + + # Returns a list of new constants found since the last call to + # watch_namespaces. + def new_constants + constants = [] + + # Grab the list of namespaces that we're looking for new constants under + @watching.last.each do |namespace| + # Retrieve the constants that were present under the namespace when watch_namespaces + # was originally called + original_constants = @stack[namespace].last + + mod = Inflector.constantize(namespace) if Dependencies.qualified_const_defined?(namespace) + next unless mod.is_a?(Module) + + # Get a list of the constants that were added + new_constants = mod.constants(false) - original_constants + + # @stack[namespace] returns an Array of the constants that are being evaluated + # for that namespace. For instance, if parent.rb requires child.rb, the first + # element of @stack[Object] will be an Array of the constants that were present + # before parent.rb was required. The second element will be an Array of the + # constants that were present before child.rb was required. + @stack[namespace].each do |namespace_constants| + namespace_constants.concat(new_constants) + end + + # Normalize the list of new constants, and add them to the list we will return + new_constants.each do |suffix| + constants << ([namespace, suffix] - ["Object"]).join("::".freeze) + end + end + constants + ensure + # A call to new_constants is always called after a call to watch_namespaces + pop_modules(@watching.pop) + end + + # Add a set of modules to the watch stack, remembering the initial + # constants. + def watch_namespaces(namespaces) + @watching << namespaces.map do |namespace| + module_name = Dependencies.to_constant_name(namespace) + original_constants = Dependencies.qualified_const_defined?(module_name) ? + Inflector.constantize(module_name).constants(false) : [] + + @stack[module_name] << original_constants + module_name + end + end + + private + def pop_modules(modules) + modules.each { |mod| @stack[mod].pop } + end + end + + # An internal stack used to record which constants are loaded by any block. + mattr_accessor :constant_watch_stack, default: WatchStack.new + + # Module includes this module. + module ModuleConstMissing #:nodoc: + def self.append_features(base) + base.class_eval do + # Emulate #exclude via an ivar + return if defined?(@_const_missing) && @_const_missing + @_const_missing = instance_method(:const_missing) + remove_method(:const_missing) + end + super + end + + def self.exclude_from(base) + base.class_eval do + define_method :const_missing, @_const_missing + @_const_missing = nil + end + end + + def const_missing(const_name) + from_mod = anonymous? ? guess_for_anonymous(const_name) : self + Dependencies.load_missing_constant(from_mod, const_name) + end + + # We assume that the name of the module reflects the nesting + # (unless it can be proven that is not the case) and the path to the file + # that defines the constant. Anonymous modules cannot follow these + # conventions and therefore we assume that the user wants to refer to a + # top-level constant. + def guess_for_anonymous(const_name) + if Object.const_defined?(const_name) + raise NameError.new "#{const_name} cannot be autoloaded from an anonymous class or module", const_name + else + Object + end + end + + def unloadable(const_desc = self) + super(const_desc) + end + end + + # Object includes this module. + module Loadable #:nodoc: + def self.exclude_from(base) + base.class_eval do + define_method(:load, Kernel.instance_method(:load)) + private :load + end + end + + def require_or_load(file_name) + Dependencies.require_or_load(file_name) + end + + # :doc: + + # Interprets a file using mechanism and marks its defined + # constants as autoloaded. file_name can be either a string or + # respond to to_path. + # + # Use this method in code that absolutely needs a certain constant to be + # defined at that point. A typical use case is to make constant name + # resolution deterministic for constants with the same relative name in + # different namespaces whose evaluation would depend on load order + # otherwise. + def require_dependency(file_name, message = "No such file to load -- %s.rb") + file_name = file_name.to_path if file_name.respond_to?(:to_path) + unless file_name.is_a?(String) + raise ArgumentError, "the file name must either be a String or implement #to_path -- you passed #{file_name.inspect}" + end + + Dependencies.depend_on(file_name, message) + end + + # :nodoc: + + def load_dependency(file) + if Dependencies.load? && Dependencies.constant_watch_stack.watching? + descs = Dependencies.constant_watch_stack.watching.flatten.uniq + + Dependencies.new_constants_in(*descs) { yield } + else + yield + end + rescue Exception => exception # errors from loading file + exception.blame_file! file if exception.respond_to? :blame_file! + raise + end + + # Mark the given constant as unloadable. Unloadable constants are removed + # each time dependencies are cleared. + # + # Note that marking a constant for unloading need only be done once. Setup + # or init scripts may list each unloadable constant that may need unloading; + # each constant will be removed for every subsequent clear, as opposed to + # for the first clear. + # + # The provided constant descriptor may be a (non-anonymous) module or class, + # or a qualified constant name as a string or symbol. + # + # Returns +true+ if the constant was not previously marked for unloading, + # +false+ otherwise. + def unloadable(const_desc) + Dependencies.mark_for_unload const_desc + end + + private + + def load(file, wrap = false) + result = false + load_dependency(file) { result = super } + result + end + + def require(file) + result = false + load_dependency(file) { result = super } + result + end + end + + # Exception file-blaming. + module Blamable #:nodoc: + def blame_file!(file) + (@blamed_files ||= []).unshift file + end + + def blamed_files + @blamed_files ||= [] + end + + def describe_blame + return nil if blamed_files.empty? + "This error occurred while loading the following files:\n #{blamed_files.join "\n "}" + end + + def copy_blame!(exc) + @blamed_files = exc.blamed_files.clone + self + end + end + + def hook! + Object.class_eval { include Loadable } + Module.class_eval { include ModuleConstMissing } + Exception.class_eval { include Blamable } + end + + def unhook! + ModuleConstMissing.exclude_from(Module) + Loadable.exclude_from(Object) + end + + def load? + mechanism == :load + end + + def depend_on(file_name, message = "No such file to load -- %s.rb") + path = search_for_file(file_name) + require_or_load(path || file_name) + rescue LoadError => load_error + if file_name = load_error.message[/ -- (.*?)(\.rb)?$/, 1] + load_error.message.replace(message % file_name) + load_error.copy_blame!(load_error) + end + raise + end + + def clear + Dependencies.unload_interlock do + loaded.clear + loading.clear + remove_unloadable_constants! + end + end + + def require_or_load(file_name, const_path = nil) + file_name = $` if file_name =~ /\.rb\z/ + expanded = File.expand_path(file_name) + return if loaded.include?(expanded) + + Dependencies.load_interlock do + # Maybe it got loaded while we were waiting for our lock: + return if loaded.include?(expanded) + + # Record that we've seen this file *before* loading it to avoid an + # infinite loop with mutual dependencies. + loaded << expanded + loading << expanded + + begin + if load? + # Enable warnings if this file has not been loaded before and + # warnings_on_first_load is set. + load_args = ["#{file_name}.rb"] + load_args << const_path unless const_path.nil? + + if !warnings_on_first_load || history.include?(expanded) + result = load_file(*load_args) + else + enable_warnings { result = load_file(*load_args) } + end + else + result = require file_name + end + rescue Exception + loaded.delete expanded + raise + ensure + loading.pop + end + + # Record history *after* loading so first load gets warnings. + history << expanded + result + end + end + + # Is the provided constant path defined? + def qualified_const_defined?(path) + Object.const_defined?(path, false) + end + + # Given +path+, a filesystem path to a ruby file, return an array of + # constant paths which would cause Dependencies to attempt to load this + # file. + def loadable_constants_for_path(path, bases = autoload_paths) + path = $` if path =~ /\.rb\z/ + expanded_path = File.expand_path(path) + paths = [] + + bases.each do |root| + expanded_root = File.expand_path(root) + next unless expanded_path.start_with?(expanded_root) + + root_size = expanded_root.size + next if expanded_path[root_size] != ?/.freeze + + nesting = expanded_path[(root_size + 1)..-1] + paths << nesting.camelize unless nesting.blank? + end + + paths.uniq! + paths + end + + # Search for a file in autoload_paths matching the provided suffix. + def search_for_file(path_suffix) + path_suffix = path_suffix.sub(/(\.rb)?$/, ".rb".freeze) + + autoload_paths.each do |root| + path = File.join(root, path_suffix) + return path if File.file? path + end + nil # Gee, I sure wish we had first_match ;-) + end + + # Does the provided path_suffix correspond to an autoloadable module? + # Instead of returning a boolean, the autoload base for this module is + # returned. + def autoloadable_module?(path_suffix) + autoload_paths.each do |load_path| + return load_path if File.directory? File.join(load_path, path_suffix) + end + nil + end + + def load_once_path?(path) + # to_s works around a ruby issue where String#starts_with?(Pathname) + # will raise a TypeError: no implicit conversion of Pathname into String + autoload_once_paths.any? { |base| path.starts_with? base.to_s } + end + + # Attempt to autoload the provided module name by searching for a directory + # matching the expected path suffix. If found, the module is created and + # assigned to +into+'s constants with the name +const_name+. Provided that + # the directory was loaded from a reloadable base path, it is added to the + # set of constants that are to be unloaded. + def autoload_module!(into, const_name, qualified_name, path_suffix) + return nil unless base_path = autoloadable_module?(path_suffix) + mod = Module.new + into.const_set const_name, mod + autoloaded_constants << qualified_name unless autoload_once_paths.include?(base_path) + autoloaded_constants.uniq! + mod + end + + # Load the file at the provided path. +const_paths+ is a set of qualified + # constant names. When loading the file, Dependencies will watch for the + # addition of these constants. Each that is defined will be marked as + # autoloaded, and will be removed when Dependencies.clear is next called. + # + # If the second parameter is left off, then Dependencies will construct a + # set of names that the file at +path+ may define. See + # +loadable_constants_for_path+ for more details. + def load_file(path, const_paths = loadable_constants_for_path(path)) + const_paths = [const_paths].compact unless const_paths.is_a? Array + parent_paths = const_paths.collect { |const_path| const_path[/.*(?=::)/] || ::Object } + + result = nil + newly_defined_paths = new_constants_in(*parent_paths) do + result = Kernel.load path + end + + autoloaded_constants.concat newly_defined_paths unless load_once_path?(path) + autoloaded_constants.uniq! + result + end + + # Returns the constant path for the provided parent and constant name. + def qualified_name_for(mod, name) + mod_name = to_constant_name mod + mod_name == "Object" ? name.to_s : "#{mod_name}::#{name}" + end + + # Load the constant named +const_name+ which is missing from +from_mod+. If + # it is not possible to load the constant into from_mod, try its parent + # module using +const_missing+. + def load_missing_constant(from_mod, const_name) + unless qualified_const_defined?(from_mod.name) && Inflector.constantize(from_mod.name).equal?(from_mod) + raise ArgumentError, "A copy of #{from_mod} has been removed from the module tree but is still active!" + end + + qualified_name = qualified_name_for from_mod, const_name + path_suffix = qualified_name.underscore + + file_path = search_for_file(path_suffix) + + if file_path + expanded = File.expand_path(file_path) + expanded.sub!(/\.rb\z/, "".freeze) + + if loading.include?(expanded) + raise "Circular dependency detected while autoloading constant #{qualified_name}" + else + require_or_load(expanded, qualified_name) + raise LoadError, "Unable to autoload constant #{qualified_name}, expected #{file_path} to define it" unless from_mod.const_defined?(const_name, false) + return from_mod.const_get(const_name) + end + elsif mod = autoload_module!(from_mod, const_name, qualified_name, path_suffix) + return mod + elsif (parent = from_mod.parent) && parent != from_mod && + ! from_mod.parents.any? { |p| p.const_defined?(const_name, false) } + # If our parents do not have a constant named +const_name+ then we are free + # to attempt to load upwards. If they do have such a constant, then this + # const_missing must be due to from_mod::const_name, which should not + # return constants from from_mod's parents. + begin + # Since Ruby does not pass the nesting at the point the unknown + # constant triggered the callback we cannot fully emulate constant + # name lookup and need to make a trade-off: we are going to assume + # that the nesting in the body of Foo::Bar is [Foo::Bar, Foo] even + # though it might not be. Counterexamples are + # + # class Foo::Bar + # Module.nesting # => [Foo::Bar] + # end + # + # or + # + # module M::N + # module S::T + # Module.nesting # => [S::T, M::N] + # end + # end + # + # for example. + return parent.const_missing(const_name) + rescue NameError => e + raise unless e.missing_name? qualified_name_for(parent, const_name) + end + end + + name_error = NameError.new("uninitialized constant #{qualified_name}", const_name) + name_error.set_backtrace(caller.reject { |l| l.starts_with? __FILE__ }) + raise name_error + end + + # Remove the constants that have been autoloaded, and those that have been + # marked for unloading. Before each constant is removed a callback is sent + # to its class/module if it implements +before_remove_const+. + # + # The callback implementation should be restricted to cleaning up caches, etc. + # as the environment will be in an inconsistent state, e.g. other constants + # may have already been unloaded and not accessible. + def remove_unloadable_constants! + autoloaded_constants.each { |const| remove_constant const } + autoloaded_constants.clear + Reference.clear! + explicitly_unloadable_constants.each { |const| remove_constant const } + end + + class ClassCache + def initialize + @store = Concurrent::Map.new + end + + def empty? + @store.empty? + end + + def key?(key) + @store.key?(key) + end + + def get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.constantize(key) + end + alias :[] :get + + def safe_get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.safe_constantize(key) + end + + def store(klass) + return self unless klass.respond_to?(:name) + raise(ArgumentError, "anonymous classes cannot be cached") if klass.name.empty? + @store[klass.name] = klass + self + end + + def clear! + @store.clear + end + end + + Reference = ClassCache.new + + # Store a reference to a class +klass+. + def reference(klass) + Reference.store klass + end + + # Get the reference for class named +name+. + # Raises an exception if referenced class does not exist. + def constantize(name) + Reference.get(name) + end + + # Get the reference for class named +name+ if one exists. + # Otherwise returns +nil+. + def safe_constantize(name) + Reference.safe_get(name) + end + + # Determine if the given constant has been automatically loaded. + def autoloaded?(desc) + return false if desc.is_a?(Module) && desc.anonymous? + name = to_constant_name desc + return false unless qualified_const_defined?(name) + autoloaded_constants.include?(name) + end + + # Will the provided constant descriptor be unloaded? + def will_unload?(const_desc) + autoloaded?(const_desc) || + explicitly_unloadable_constants.include?(to_constant_name(const_desc)) + end + + # Mark the provided constant name for unloading. This constant will be + # unloaded on each request, not just the next one. + def mark_for_unload(const_desc) + name = to_constant_name const_desc + if explicitly_unloadable_constants.include? name + false + else + explicitly_unloadable_constants << name + true + end + end + + # Run the provided block and detect the new constants that were loaded during + # its execution. Constants may only be regarded as 'new' once -- so if the + # block calls +new_constants_in+ again, then the constants defined within the + # inner call will not be reported in this one. + # + # If the provided block does not run to completion, and instead raises an + # exception, any new constants are regarded as being only partially defined + # and will be removed immediately. + def new_constants_in(*descs) + constant_watch_stack.watch_namespaces(descs) + success = false + + begin + yield # Now yield to the code that is to define new constants. + success = true + ensure + new_constants = constant_watch_stack.new_constants + + return new_constants if success + + # Remove partially loaded constants. + new_constants.each { |c| remove_constant(c) } + end + end + + # Convert the provided const desc to a qualified constant name (as a string). + # A module, class, symbol, or string may be provided. + def to_constant_name(desc) #:nodoc: + case desc + when String then desc.sub(/^::/, "") + when Symbol then desc.to_s + when Module + desc.name || + raise(ArgumentError, "Anonymous modules have no name to be referenced by") + else raise TypeError, "Not a valid constant descriptor: #{desc.inspect}" + end + end + + def remove_constant(const) #:nodoc: + # Normalize ::Foo, ::Object::Foo, Object::Foo, Object::Object::Foo, etc. as Foo. + normalized = const.to_s.sub(/\A::/, "") + normalized.sub!(/\A(Object::)+/, "") + + constants = normalized.split("::") + to_remove = constants.pop + + # Remove the file path from the loaded list. + file_path = search_for_file(const.underscore) + if file_path + expanded = File.expand_path(file_path) + expanded.sub!(/\.rb\z/, "") + loaded.delete(expanded) + end + + if constants.empty? + parent = Object + else + # This method is robust to non-reachable constants. + # + # Non-reachable constants may be passed if some of the parents were + # autoloaded and already removed. It is easier to do a sanity check + # here than require the caller to be clever. We check the parent + # rather than the very const argument because we do not want to + # trigger Kernel#autoloads, see the comment below. + parent_name = constants.join("::") + return unless qualified_const_defined?(parent_name) + parent = constantize(parent_name) + end + + # In an autoloaded user.rb like this + # + # autoload :Foo, 'foo' + # + # class User < ActiveRecord::Base + # end + # + # we correctly register "Foo" as being autoloaded. But if the app does + # not use the "Foo" constant we need to be careful not to trigger + # loading "foo.rb" ourselves. While #const_defined? and #const_get? do + # require the file, #autoload? and #remove_const don't. + # + # We are going to remove the constant nonetheless ---which exists as + # far as Ruby is concerned--- because if the user removes the macro + # call from a class or module that were not autoloaded, as in the + # example above with Object, accessing to that constant must err. + unless parent.autoload?(to_remove) + begin + constantized = parent.const_get(to_remove, false) + rescue NameError + # The constant is no longer reachable, just skip it. + return + else + constantized.before_remove_const if constantized.respond_to?(:before_remove_const) + end + end + + begin + parent.instance_eval { remove_const to_remove } + rescue NameError + # The constant is no longer reachable, just skip it. + end + end + end +end + +ActiveSupport::Dependencies.hook! diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb new file mode 100644 index 0000000..1cee85d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" + +module ActiveSupport + # Autoload and eager load conveniences for your library. + # + # This module allows you to define autoloads based on + # Rails conventions (i.e. no need to define the path + # it is automatically guessed based on the filename) + # and also define a set of constants that needs to be + # eager loaded: + # + # module MyLib + # extend ActiveSupport::Autoload + # + # autoload :Model + # + # eager_autoload do + # autoload :Cache + # end + # end + # + # Then your library can be eager loaded by simply calling: + # + # MyLib.eager_load! + module Autoload + def self.extended(base) # :nodoc: + base.class_eval do + @_autoloads = {} + @_under_path = nil + @_at_path = nil + @_eager_autoload = false + end + end + + def autoload(const_name, path = @_at_path) + unless path + full = [name, @_under_path, const_name.to_s].compact.join("::") + path = Inflector.underscore(full) + end + + if @_eager_autoload + @_autoloads[const_name] = path + end + + super const_name, path + end + + def autoload_under(path) + @_under_path, old_path = path, @_under_path + yield + ensure + @_under_path = old_path + end + + def autoload_at(path) + @_at_path, old_path = path, @_at_path + yield + ensure + @_at_path = old_path + end + + def eager_autoload + old_eager, @_eager_autoload = @_eager_autoload, true + yield + ensure + @_eager_autoload = old_eager + end + + def eager_load! + @_autoloads.each_value { |file| require file } + end + + def autoloads + @_autoloads + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb new file mode 100644 index 0000000..948be75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "active_support/concurrency/share_lock" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + class Interlock + def initialize # :nodoc: + @lock = ActiveSupport::Concurrency::ShareLock.new + end + + def loading + @lock.exclusive(purpose: :load, compatible: [:load], after_compatible: [:load]) do + yield + end + end + + def unloading + @lock.exclusive(purpose: :unload, compatible: [:load, :unload], after_compatible: [:load, :unload]) do + yield + end + end + + def start_unloading + @lock.start_exclusive(purpose: :unload, compatible: [:load, :unload]) + end + + def done_unloading + @lock.stop_exclusive(compatible: [:load, :unload]) + end + + def start_running + @lock.start_sharing + end + + def done_running + @lock.stop_sharing + end + + def running + @lock.sharing do + yield + end + end + + def permit_concurrent_loads + @lock.yield_shares(compatible: [:load]) do + yield + end + end + + def raw_state(&block) # :nodoc: + @lock.raw_state(&block) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb new file mode 100644 index 0000000..a1ad2ca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "singleton" + +module ActiveSupport + # \Deprecation specifies the API used by Rails to deprecate methods, instance + # variables, objects and constants. + class Deprecation + # active_support.rb sets an autoload for ActiveSupport::Deprecation. + # + # If these requires were at the top of the file the constant would not be + # defined by the time their files were loaded. Since some of them reopen + # ActiveSupport::Deprecation its autoload would be triggered, resulting in + # a circular require warning for active_support/deprecation.rb. + # + # So, we define the constant first, and load dependencies later. + require "active_support/deprecation/instance_delegator" + require "active_support/deprecation/behaviors" + require "active_support/deprecation/reporting" + require "active_support/deprecation/constant_accessor" + require "active_support/deprecation/method_wrappers" + require "active_support/deprecation/proxy_wrappers" + require "active_support/core_ext/module/deprecation" + + include Singleton + include InstanceDelegator + include Behavior + include Reporting + include MethodWrapper + + # The version number in which the deprecated behavior will be removed, by default. + attr_accessor :deprecation_horizon + + # It accepts two parameters on initialization. The first is a version of library + # and the second is a library name. + # + # ActiveSupport::Deprecation.new('2.0', 'MyLibrary') + def initialize(deprecation_horizon = "6.0", gem_name = "Rails") + self.gem_name = gem_name + self.deprecation_horizon = deprecation_horizon + # By default, warnings are not silenced and debugging is off. + self.silenced = false + self.debug = false + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb new file mode 100644 index 0000000..3abd25a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require "active_support/notifications" + +module ActiveSupport + # Raised when ActiveSupport::Deprecation::Behavior#behavior is set with :raise. + # You would set :raise, as a behavior to raise errors and proactively report exceptions from deprecations. + class DeprecationException < StandardError + end + + class Deprecation + # Default warning behaviors per Rails.env. + DEFAULT_BEHAVIORS = { + raise: ->(message, callstack, deprecation_horizon, gem_name) { + e = DeprecationException.new(message) + e.set_backtrace(callstack.map(&:to_s)) + raise e + }, + + stderr: ->(message, callstack, deprecation_horizon, gem_name) { + $stderr.puts(message) + $stderr.puts callstack.join("\n ") if debug + }, + + log: ->(message, callstack, deprecation_horizon, gem_name) { + logger = + if defined?(Rails.logger) && Rails.logger + Rails.logger + else + require "active_support/logger" + ActiveSupport::Logger.new($stderr) + end + logger.warn message + logger.debug callstack.join("\n ") if debug + }, + + notify: ->(message, callstack, deprecation_horizon, gem_name) { + notification_name = "deprecation.#{gem_name.underscore.tr('/', '_')}" + ActiveSupport::Notifications.instrument(notification_name, + message: message, + callstack: callstack, + gem_name: gem_name, + deprecation_horizon: deprecation_horizon) + }, + + silence: ->(message, callstack, deprecation_horizon, gem_name) {}, + } + + # Behavior module allows to determine how to display deprecation messages. + # You can create a custom behavior or set any from the +DEFAULT_BEHAVIORS+ + # constant. Available behaviors are: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to +$stderr+. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # For more information you can read the documentation of the +behavior=+ method. + module Behavior + # Whether to print a backtrace along with the warning. + attr_accessor :debug + + # Returns the current behavior or if one isn't set, defaults to +:stderr+. + def behavior + @behavior ||= [DEFAULT_BEHAVIORS[:stderr]] + end + + # Sets the behavior to the specified value. Can be a single value, array, + # or an object that responds to +call+. + # + # Available behaviors: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to +$stderr+. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # Deprecation warnings raised by gems are not affected by this setting + # because they happen before Rails boots up. + # + # ActiveSupport::Deprecation.behavior = :stderr + # ActiveSupport::Deprecation.behavior = [:stderr, :log] + # ActiveSupport::Deprecation.behavior = MyCustomHandler + # ActiveSupport::Deprecation.behavior = ->(message, callstack, deprecation_horizon, gem_name) { + # # custom stuff + # } + def behavior=(behavior) + @behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + private + def arity_coerce(behavior) + unless behavior.respond_to?(:call) + raise ArgumentError, "#{behavior.inspect} is not a valid deprecation behavior." + end + + if behavior.arity == 4 || behavior.arity == -1 + behavior + else + -> message, callstack, _, _ { behavior.call(message, callstack) } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb new file mode 100644 index 0000000..1ed0015 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + # DeprecatedConstantAccessor transforms a constant into a deprecated one by + # hooking +const_missing+. + # + # It takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. + # + # The deprecated constant now returns the same object as the new one rather + # than a proxy object, so it can be used transparently in +rescue+ blocks + # etc. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # include ActiveSupport::Deprecation::DeprecatedConstantAccessor + # deprecate_constant 'PLANETS', 'PLANETS_POST_2006' + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + module DeprecatedConstantAccessor + def self.included(base) + require "active_support/inflector/methods" + + extension = Module.new do + def const_missing(missing_const_name) + if class_variable_defined?(:@@_deprecated_constants) + if (replacement = class_variable_get(:@@_deprecated_constants)[missing_const_name.to_s]) + replacement[:deprecator].warn(replacement[:message] || "#{name}::#{missing_const_name} is deprecated! Use #{replacement[:new]} instead.", caller_locations) + return ActiveSupport::Inflector.constantize(replacement[:new].to_s) + end + end + super + end + + def deprecate_constant(const_name, new_constant, message: nil, deprecator: ActiveSupport::Deprecation.instance) + class_variable_set(:@@_deprecated_constants, {}) unless class_variable_defined?(:@@_deprecated_constants) + class_variable_get(:@@_deprecated_constants)[const_name.to_s] = { new: new_constant, message: message, deprecator: deprecator } + end + end + base.singleton_class.prepend extension + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb new file mode 100644 index 0000000..8beda37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class Deprecation + module InstanceDelegator # :nodoc: + def self.included(base) + base.extend(ClassMethods) + base.singleton_class.prepend(OverrideDelegators) + base.public_class_method :new + end + + module ClassMethods # :nodoc: + def include(included_module) + included_module.instance_methods.each { |m| method_added(m) } + super + end + + def method_added(method_name) + singleton_class.delegate(method_name, to: :instance) + end + end + + module OverrideDelegators # :nodoc: + def warn(message = nil, callstack = nil) + callstack ||= caller_locations(2) + super + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb new file mode 100644 index 0000000..3c5d9ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/array/extract_options" + +module ActiveSupport + class Deprecation + module MethodWrapper + # Declare that a method has been deprecated. + # + # class Fred + # def aaa; end + # def bbb; end + # def ccc; end + # def ddd; end + # def eee; end + # end + # + # Using the default deprecator: + # ActiveSupport::Deprecation.deprecate_methods(Fred, :aaa, bbb: :zzz, ccc: 'use Bar#ccc instead') + # # => Fred + # + # Fred.new.aaa + # # DEPRECATION WARNING: aaa is deprecated and will be removed from Rails 5.1. (called from irb_binding at (irb):10) + # # => nil + # + # Fred.new.bbb + # # DEPRECATION WARNING: bbb is deprecated and will be removed from Rails 5.1 (use zzz instead). (called from irb_binding at (irb):11) + # # => nil + # + # Fred.new.ccc + # # DEPRECATION WARNING: ccc is deprecated and will be removed from Rails 5.1 (use Bar#ccc instead). (called from irb_binding at (irb):12) + # # => nil + # + # Passing in a custom deprecator: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # ActiveSupport::Deprecation.deprecate_methods(Fred, ddd: :zzz, deprecator: custom_deprecator) + # # => [:ddd] + # + # Fred.new.ddd + # DEPRECATION WARNING: ddd is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):15) + # # => nil + # + # Using a custom deprecator directly: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # custom_deprecator.deprecate_methods(Fred, eee: :zzz) + # # => [:eee] + # + # Fred.new.eee + # DEPRECATION WARNING: eee is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):18) + # # => nil + def deprecate_methods(target_module, *method_names) + options = method_names.extract_options! + deprecator = options.delete(:deprecator) || self + method_names += options.keys + mod = Module.new + + method_names.each do |method_name| + if target_module.method_defined?(method_name) || target_module.private_method_defined?(method_name) + aliased_method, punctuation = method_name.to_s.sub(/([?!=])$/, ""), $1 + with_method = "#{aliased_method}_with_deprecation#{punctuation}" + without_method = "#{aliased_method}_without_deprecation#{punctuation}" + + target_module.send(:define_method, with_method) do |*args, &block| + deprecator.deprecation_warning(method_name, options[method_name]) + send(without_method, *args, &block) + end + + target_module.send(:alias_method, without_method, method_name) + target_module.send(:alias_method, method_name, with_method) + + case + when target_module.protected_method_defined?(without_method) + target_module.send(:protected, method_name) + when target_module.private_method_defined?(without_method) + target_module.send(:private, method_name) + end + else + mod.send(:define_method, method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, options[method_name]) + super(*args, &block) + end + end + end + + target_module.prepend(mod) unless mod.instance_methods(false).empty? + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb new file mode 100644 index 0000000..896c0d2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require "active_support/core_ext/regexp" + +module ActiveSupport + class Deprecation + class DeprecationProxy #:nodoc: + def self.new(*args, &block) + object = args.first + + return object unless object + super + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + private + def method_missing(called, *args, &block) + warn caller_locations, called, args + target.__send__(called, *args, &block) + end + end + + # DeprecatedObjectProxy transforms an object into a deprecated one. It + # takes an object, a deprecation message and optionally a deprecator. The + # deprecator defaults to +ActiveSupport::Deprecator+ if none is specified. + # + # deprecated_object = ActiveSupport::Deprecation::DeprecatedObjectProxy.new(Object.new, "This object is now deprecated") + # # => # + # + # deprecated_object.to_s + # DEPRECATION WARNING: This object is now deprecated. + # (Backtrace) + # # => "#" + class DeprecatedObjectProxy < DeprecationProxy + def initialize(object, message, deprecator = ActiveSupport::Deprecation.instance) + @object = object + @message = message + @deprecator = deprecator + end + + private + def target + @object + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + + # DeprecatedInstanceVariableProxy transforms an instance variable into a + # deprecated one. It takes an instance of a class, a method on that class + # and an instance variable. It optionally takes a deprecator as the last + # argument. The deprecator defaults to +ActiveSupport::Deprecator+ if none + # is specified. + # + # class Example + # def initialize + # @request = ActiveSupport::Deprecation::DeprecatedInstanceVariableProxy.new(self, :request, :@request) + # @_request = :special_request + # end + # + # def request + # @_request + # end + # + # def old_request + # @request + # end + # end + # + # example = Example.new + # # => # + # + # example.old_request.to_s + # # => DEPRECATION WARNING: @request is deprecated! Call request.to_s instead of + # @request.to_s + # (Backtrace information…) + # "special_request" + # + # example.request.to_s + # # => "special_request" + class DeprecatedInstanceVariableProxy < DeprecationProxy + def initialize(instance, method, var = "@#{method}", deprecator = ActiveSupport::Deprecation.instance) + @instance = instance + @method = method + @var = var + @deprecator = deprecator + end + + private + def target + @instance.__send__(@method) + end + + def warn(callstack, called, args) + @deprecator.warn("#{@var} is deprecated! Call #{@method}.#{called} instead of #{@var}.#{called}. Args: #{args.inspect}", callstack) + end + end + + # DeprecatedConstantProxy transforms a constant into a deprecated one. It + # takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. The deprecated constant + # now returns the value of the new one. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + class DeprecatedConstantProxy < DeprecationProxy + def initialize(old_const, new_const, deprecator = ActiveSupport::Deprecation.instance, message: "#{old_const} is deprecated! Use #{new_const} instead.") + require "active_support/inflector/methods" + + @old_const = old_const + @new_const = new_const + @deprecator = deprecator + @message = message + end + + # Returns the class of the new constant. + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # PLANETS.class # => Array + def class + target.class + end + + private + def target + ActiveSupport::Inflector.constantize(@new_const.to_s) + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb new file mode 100644 index 0000000..7075b5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb @@ -0,0 +1,114 @@ +# frozen_string_literal: true + +require "rbconfig" + +module ActiveSupport + class Deprecation + module Reporting + # Whether to print a message (silent mode) + attr_accessor :silenced + # Name of gem where method is deprecated + attr_accessor :gem_name + + # Outputs a deprecation warning to the output configured by + # ActiveSupport::Deprecation.behavior. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + def warn(message = nil, callstack = nil) + return if silenced + + callstack ||= caller_locations(2) + deprecation_message(callstack, message).tap do |m| + behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + end + end + + # Silence deprecation warnings within the block. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + # + # ActiveSupport::Deprecation.silence do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + def silence + old_silenced, @silenced = @silenced, true + yield + ensure + @silenced = old_silenced + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + deprecated_method_warning(deprecated_method_name, message).tap do |msg| + warn(msg, caller_backtrace) + end + end + + private + # Outputs a deprecation warning message + # + # deprecated_method_warning(:method_name) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon}" + # deprecated_method_warning(:method_name, :another_method) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (use another_method instead)" + # deprecated_method_warning(:method_name, "Optional message") + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (Optional message)" + def deprecated_method_warning(method_name, message = nil) + warning = "#{method_name} is deprecated and will be removed from #{gem_name} #{deprecation_horizon}" + case message + when Symbol then "#{warning} (use #{message} instead)" + when String then "#{warning} (#{message})" + else warning + end + end + + def deprecation_message(callstack, message = nil) + message ||= "You are using deprecated behavior which will be removed from the next major or minor release." + "DEPRECATION WARNING: #{message} #{deprecation_caller_message(callstack)}" + end + + def deprecation_caller_message(callstack) + file, line, method = extract_callstack(callstack) + if file + if line && method + "(called from #{method} at #{file}:#{line})" + else + "(called from #{file}:#{line})" + end + end + end + + def extract_callstack(callstack) + return _extract_callstack(callstack) if callstack.first.is_a? String + + offending_line = callstack.find { |frame| + frame.absolute_path && !ignored_callstack(frame.absolute_path) + } || callstack.first + + [offending_line.path, offending_line.lineno, offending_line.label] + end + + def _extract_callstack(callstack) + warn "Please pass `caller_locations` to the deprecation API" if $VERBOSE + offending_line = callstack.find { |line| !ignored_callstack(line) } || callstack.first + + if offending_line + if md = offending_line.match(/^(.+?):(\d+)(?::in `(.*?)')?/) + md.captures + else + offending_line + end + end + end + + RAILS_GEM_ROOT = File.expand_path("../../../..", __dir__) + "/" + + def ignored_callstack(path) + path.start_with?(RAILS_GEM_ROOT) || path.start_with?(RbConfig::CONFIG["rubylibdir"]) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb new file mode 100644 index 0000000..a4cee78 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +module ActiveSupport + # This module provides an internal implementation to track descendants + # which is faster than iterating through ObjectSpace. + module DescendantsTracker + @@direct_descendants = {} + + class << self + def direct_descendants(klass) + @@direct_descendants[klass] || [] + end + + def descendants(klass) + arr = [] + accumulate_descendants(klass, arr) + arr + end + + def clear + if defined? ActiveSupport::Dependencies + @@direct_descendants.each do |klass, descendants| + if ActiveSupport::Dependencies.autoloaded?(klass) + @@direct_descendants.delete(klass) + else + descendants.reject! { |v| ActiveSupport::Dependencies.autoloaded?(v) } + end + end + else + @@direct_descendants.clear + end + end + + # This is the only method that is not thread safe, but is only ever called + # during the eager loading phase. + def store_inherited(klass, descendant) + (@@direct_descendants[klass] ||= []) << descendant + end + + private + def accumulate_descendants(klass, acc) + if direct_descendants = @@direct_descendants[klass] + acc.concat(direct_descendants) + direct_descendants.each { |direct_descendant| accumulate_descendants(direct_descendant, acc) } + end + end + end + + def inherited(base) + DescendantsTracker.store_inherited(self, base) + super + end + + def direct_descendants + DescendantsTracker.direct_descendants(self) + end + + def descendants + DescendantsTracker.descendants(self) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb new file mode 100644 index 0000000..fba10fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + class Digest #:nodoc: + class <(other) + if Scalar === other || Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + else + nil + end + end + + def +(other) + if Duration === other + seconds = value + other.parts[:seconds] + new_parts = other.parts.merge(seconds: seconds) + new_value = value + other.value + + Duration.new(new_value, new_parts) + else + calculate(:+, other) + end + end + + def -(other) + if Duration === other + seconds = value - other.parts[:seconds] + new_parts = other.parts.map { |part, other_value| [part, -other_value] }.to_h + new_parts = new_parts.merge(seconds: seconds) + new_value = value - other.value + + Duration.new(new_value, new_parts) + else + calculate(:-, other) + end + end + + def *(other) + if Duration === other + new_parts = other.parts.map { |part, other_value| [part, value * other_value] }.to_h + new_value = value * other.value + + Duration.new(new_value, new_parts) + else + calculate(:*, other) + end + end + + def /(other) + if Duration === other + value / other.value + else + calculate(:/, other) + end + end + + def %(other) + if Duration === other + Duration.build(value % other.value) + else + calculate(:%, other) + end + end + + private + def calculate(op, other) + if Scalar === other + Scalar.new(value.public_send(op, other.value)) + elsif Numeric === other + Scalar.new(value.public_send(op, other)) + else + raise_type_error(other) + end + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end + + SECONDS_PER_MINUTE = 60 + SECONDS_PER_HOUR = 3600 + SECONDS_PER_DAY = 86400 + SECONDS_PER_WEEK = 604800 + SECONDS_PER_MONTH = 2629746 # 1/12 of a gregorian year + SECONDS_PER_YEAR = 31556952 # length of a gregorian year (365.2425 days) + + PARTS_IN_SECONDS = { + seconds: 1, + minutes: SECONDS_PER_MINUTE, + hours: SECONDS_PER_HOUR, + days: SECONDS_PER_DAY, + weeks: SECONDS_PER_WEEK, + months: SECONDS_PER_MONTH, + years: SECONDS_PER_YEAR + }.freeze + + PARTS = [:years, :months, :weeks, :days, :hours, :minutes, :seconds].freeze + + attr_accessor :value, :parts + + autoload :ISO8601Parser, "active_support/duration/iso8601_parser" + autoload :ISO8601Serializer, "active_support/duration/iso8601_serializer" + + class << self + # Creates a new Duration from string formatted according to ISO 8601 Duration. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # This method allows negative parts to be present in pattern. + # If invalid string is provided, it will raise +ActiveSupport::Duration::ISO8601Parser::ParsingError+. + def parse(iso8601duration) + parts = ISO8601Parser.new(iso8601duration).parse! + new(calculate_total_seconds(parts), parts) + end + + def ===(other) #:nodoc: + other.is_a?(Duration) + rescue ::NoMethodError + false + end + + def seconds(value) #:nodoc: + new(value, [[:seconds, value]]) + end + + def minutes(value) #:nodoc: + new(value * SECONDS_PER_MINUTE, [[:minutes, value]]) + end + + def hours(value) #:nodoc: + new(value * SECONDS_PER_HOUR, [[:hours, value]]) + end + + def days(value) #:nodoc: + new(value * SECONDS_PER_DAY, [[:days, value]]) + end + + def weeks(value) #:nodoc: + new(value * SECONDS_PER_WEEK, [[:weeks, value]]) + end + + def months(value) #:nodoc: + new(value * SECONDS_PER_MONTH, [[:months, value]]) + end + + def years(value) #:nodoc: + new(value * SECONDS_PER_YEAR, [[:years, value]]) + end + + # Creates a new Duration from a seconds value that is converted + # to the individual parts: + # + # ActiveSupport::Duration.build(31556952).parts # => {:years=>1} + # ActiveSupport::Duration.build(2716146).parts # => {:months=>1, :days=>1} + # + def build(value) + parts = {} + remainder = value.to_f + + PARTS.each do |part| + unless part == :seconds + part_in_seconds = PARTS_IN_SECONDS[part] + parts[part] = remainder.div(part_in_seconds) + remainder = (remainder % part_in_seconds).round(9) + end + end + + parts[:seconds] = remainder + + new(value, parts) + end + + private + + def calculate_total_seconds(parts) + parts.inject(0) do |total, (part, value)| + total + value * PARTS_IN_SECONDS[part] + end + end + end + + def initialize(value, parts) #:nodoc: + @value, @parts = value, parts.to_h + @parts.default = 0 + @parts.reject! { |k, v| v.zero? } + end + + def coerce(other) #:nodoc: + if Scalar === other + [other, self] + else + [Scalar.new(other), self] + end + end + + # Compares one Duration with another or a Numeric to this Duration. + # Numeric values are treated as seconds. + def <=>(other) + if Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + end + end + + # Adds another Duration or a Numeric to this Duration. Numeric values + # are treated as seconds. + def +(other) + if Duration === other + parts = @parts.dup + other.parts.each do |(key, value)| + parts[key] += value + end + Duration.new(value + other.value, parts) + else + seconds = @parts[:seconds] + other + Duration.new(value + other, @parts.merge(seconds: seconds)) + end + end + + # Subtracts another Duration or a Numeric from this Duration. Numeric + # values are treated as seconds. + def -(other) + self + (-other) + end + + # Multiplies this Duration by a Numeric and returns a new Duration. + def *(other) + if Scalar === other || Duration === other + Duration.new(value * other.value, parts.map { |type, number| [type, number * other.value] }) + elsif Numeric === other + Duration.new(value * other, parts.map { |type, number| [type, number * other] }) + else + raise_type_error(other) + end + end + + # Divides this Duration by a Numeric and returns a new Duration. + def /(other) + if Scalar === other + Duration.new(value / other.value, parts.map { |type, number| [type, number / other.value] }) + elsif Duration === other + value / other.value + elsif Numeric === other + Duration.new(value / other, parts.map { |type, number| [type, number / other] }) + else + raise_type_error(other) + end + end + + # Returns the modulo of this Duration by another Duration or Numeric. + # Numeric values are treated as seconds. + def %(other) + if Duration === other || Scalar === other + Duration.build(value % other.value) + elsif Numeric === other + Duration.build(value % other) + else + raise_type_error(other) + end + end + + def -@ #:nodoc: + Duration.new(-value, parts.map { |type, number| [type, -number] }) + end + + def is_a?(klass) #:nodoc: + Duration == klass || value.is_a?(klass) + end + alias :kind_of? :is_a? + + def instance_of?(klass) # :nodoc: + Duration == klass || value.instance_of?(klass) + end + + # Returns +true+ if +other+ is also a Duration instance with the + # same +value+, or if other == value. + def ==(other) + if Duration === other + other.value == value + else + other == value + end + end + + # Returns the amount of seconds a duration covers as a string. + # For more information check to_i method. + # + # 1.day.to_s # => "86400" + def to_s + @value.to_s + end + + # Returns the number of seconds that this Duration represents. + # + # 1.minute.to_i # => 60 + # 1.hour.to_i # => 3600 + # 1.day.to_i # => 86400 + # + # Note that this conversion makes some assumptions about the + # duration of some periods, e.g. months are always 1/12 of year + # and years are 365.2425 days: + # + # # equivalent to (1.year / 12).to_i + # 1.month.to_i # => 2629746 + # + # # equivalent to 365.2425.days.to_i + # 1.year.to_i # => 31556952 + # + # In such cases, Ruby's core + # Date[http://ruby-doc.org/stdlib/libdoc/date/rdoc/Date.html] and + # Time[http://ruby-doc.org/stdlib/libdoc/time/rdoc/Time.html] should be used for precision + # date and time arithmetic. + def to_i + @value.to_i + end + + # Returns +true+ if +other+ is also a Duration instance, which has the + # same parts as this one. + def eql?(other) + Duration === other && other.value.eql?(value) + end + + def hash + @value.hash + end + + # Calculates a new Time or Date that is as far in the future + # as this Duration represents. + def since(time = ::Time.current) + sum(1, time) + end + alias :from_now :since + alias :after :since + + # Calculates a new Time or Date that is as far in the past + # as this Duration represents. + def ago(time = ::Time.current) + sum(-1, time) + end + alias :until :ago + alias :before :ago + + def inspect #:nodoc: + return "0 seconds" if parts.empty? + + parts. + reduce(::Hash.new(0)) { |h, (l, r)| h[l] += r; h }. + sort_by { |unit, _ | PARTS.index(unit) }. + map { |unit, val| "#{val} #{val == 1 ? unit.to_s.chop : unit.to_s}" }. + to_sentence(locale: ::I18n.default_locale) + end + + def as_json(options = nil) #:nodoc: + to_i + end + + def init_with(coder) #:nodoc: + initialize(coder["value"], coder["parts"]) + end + + def encode_with(coder) #:nodoc: + coder.map = { "value" => @value, "parts" => @parts } + end + + # Build ISO 8601 Duration string for this duration. + # The +precision+ parameter can be used to limit seconds' precision of duration. + def iso8601(precision: nil) + ISO8601Serializer.new(self, precision: precision).serialize + end + + private + + def sum(sign, time = ::Time.current) + parts.inject(time) do |t, (type, number)| + if t.acts_like?(:time) || t.acts_like?(:date) + if type == :seconds + t.since(sign * number) + elsif type == :minutes + t.since(sign * number * 60) + elsif type == :hours + t.since(sign * number * 3600) + else + t.advance(type => sign * number) + end + else + raise ::ArgumentError, "expected a time or date, got #{time.inspect}" + end + end + end + + def respond_to_missing?(method, _) + value.respond_to?(method) + end + + def method_missing(method, *args, &block) + value.public_send(method, *args, &block) + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb new file mode 100644 index 0000000..1847eea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require "strscan" +require "active_support/core_ext/regexp" + +module ActiveSupport + class Duration + # Parses a string formatted according to ISO 8601 Duration into the hash. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # + # This parser allows negative parts to be present in pattern. + class ISO8601Parser # :nodoc: + class ParsingError < ::ArgumentError; end + + PERIOD_OR_COMMA = /\.|,/ + PERIOD = ".".freeze + COMMA = ",".freeze + + SIGN_MARKER = /\A\-|\+|/ + DATE_MARKER = /P/ + TIME_MARKER = /T/ + DATE_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(Y|M|D|W)/ + TIME_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(H|M|S)/ + + DATE_TO_PART = { "Y" => :years, "M" => :months, "W" => :weeks, "D" => :days } + TIME_TO_PART = { "H" => :hours, "M" => :minutes, "S" => :seconds } + + DATE_COMPONENTS = [:years, :months, :days] + TIME_COMPONENTS = [:hours, :minutes, :seconds] + + attr_reader :parts, :scanner + attr_accessor :mode, :sign + + def initialize(string) + @scanner = StringScanner.new(string) + @parts = {} + @mode = :start + @sign = 1 + end + + def parse! + while !finished? + case mode + when :start + if scan(SIGN_MARKER) + self.sign = (scanner.matched == "-") ? -1 : 1 + self.mode = :sign + else + raise_parsing_error + end + + when :sign + if scan(DATE_MARKER) + self.mode = :date + else + raise_parsing_error + end + + when :date + if scan(TIME_MARKER) + self.mode = :time + elsif scan(DATE_COMPONENT) + parts[DATE_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + when :time + if scan(TIME_COMPONENT) + parts[TIME_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + end + end + + validate! + parts + end + + private + + def finished? + scanner.eos? + end + + # Parses number which can be a float with either comma or period. + def number + PERIOD_OR_COMMA.match?(scanner[1]) ? scanner[1].tr(COMMA, PERIOD).to_f : scanner[1].to_i + end + + def scan(pattern) + scanner.scan(pattern) + end + + def raise_parsing_error(reason = nil) + raise ParsingError, "Invalid ISO 8601 duration: #{scanner.string.inspect} #{reason}".strip + end + + # Checks for various semantic errors as stated in ISO 8601 standard. + def validate! + raise_parsing_error("is empty duration") if parts.empty? + + # Mixing any of Y, M, D with W is invalid. + if parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + raise_parsing_error("mixing weeks with other date parts not allowed") + end + + # Specifying an empty T part is invalid. + if mode == :time && (parts.keys & TIME_COMPONENTS).empty? + raise_parsing_error("time part marker is present but time part is empty") + end + + fractions = parts.values.reject(&:zero?).select { |a| (a % 1) != 0 } + unless fractions.empty? || (fractions.size == 1 && fractions.last == @parts.values.reject(&:zero?).last) + raise_parsing_error "(only last part can be fractional)" + end + + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb new file mode 100644 index 0000000..bb177ae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/transform_values" + +module ActiveSupport + class Duration + # Serializes duration to string according to ISO 8601 Duration format. + class ISO8601Serializer # :nodoc: + def initialize(duration, precision: nil) + @duration = duration + @precision = precision + end + + # Builds and returns output string. + def serialize + parts, sign = normalize + return "PT0S".freeze if parts.empty? + + output = "P".dup + output << "#{parts[:years]}Y" if parts.key?(:years) + output << "#{parts[:months]}M" if parts.key?(:months) + output << "#{parts[:weeks]}W" if parts.key?(:weeks) + output << "#{parts[:days]}D" if parts.key?(:days) + time = "".dup + time << "#{parts[:hours]}H" if parts.key?(:hours) + time << "#{parts[:minutes]}M" if parts.key?(:minutes) + if parts.key?(:seconds) + time << "#{sprintf(@precision ? "%0.0#{@precision}f" : '%g', parts[:seconds])}S" + end + output << "T#{time}" unless time.empty? + "#{sign}#{output}" + end + + private + + # Return pair of duration's parts and whole duration sign. + # Parts are summarized (as they can become repetitive due to addition, etc). + # Zero parts are removed as not significant. + # If all parts are negative it will negate all of them and return minus as a sign. + def normalize + parts = @duration.parts.each_with_object(Hash.new(0)) do |(k, v), p| + p[k] += v unless v.zero? + end + # If all parts are negative - let's make a negative duration + sign = "" + if parts.values.all? { |v| v < 0 } + sign = "-" + parts.transform_values!(&:-@) + end + [parts, sign] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb new file mode 100644 index 0000000..b17695f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require "yaml" +require "active_support/encrypted_file" +require "active_support/ordered_options" +require "active_support/core_ext/object/inclusion" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class EncryptedConfiguration < EncryptedFile + delegate :[], :fetch, to: :config + delegate_missing_to :options + + def initialize(config_path:, key_path:, env_key:, raise_if_missing_key:) + super content_path: config_path, key_path: key_path, + env_key: env_key, raise_if_missing_key: raise_if_missing_key + end + + # Allow a config to be started without a file present + def read + super + rescue ActiveSupport::EncryptedFile::MissingContentError + "" + end + + def write(contents) + deserialize(contents) + + super + end + + def config + @config ||= deserialize(read).deep_symbolize_keys + end + + private + def options + @options ||= ActiveSupport::InheritableOptions.new(config) + end + + def serialize(config) + config.present? ? YAML.dump(config) : "" + end + + def deserialize(config) + YAML.load(config).presence || {} + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb new file mode 100644 index 0000000..c66f1b5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb @@ -0,0 +1,99 @@ +# frozen_string_literal: true + +require "pathname" +require "active_support/message_encryptor" + +module ActiveSupport + class EncryptedFile + class MissingContentError < RuntimeError + def initialize(content_path) + super "Missing encrypted content file in #{content_path}." + end + end + + class MissingKeyError < RuntimeError + def initialize(key_path:, env_key:) + super \ + "Missing encryption key to decrypt file with. " + + "Ask your team for your master key and write it to #{key_path} or put it in the ENV['#{env_key}']." + end + end + + CIPHER = "aes-128-gcm" + + def self.generate_key + SecureRandom.hex(ActiveSupport::MessageEncryptor.key_len(CIPHER)) + end + + + attr_reader :content_path, :key_path, :env_key, :raise_if_missing_key + + def initialize(content_path:, key_path:, env_key:, raise_if_missing_key:) + @content_path, @key_path = Pathname.new(content_path), Pathname.new(key_path) + @env_key, @raise_if_missing_key = env_key, raise_if_missing_key + end + + def key + read_env_key || read_key_file || handle_missing_key + end + + def read + if !key.nil? && content_path.exist? + decrypt content_path.binread + else + raise MissingContentError, content_path + end + end + + def write(contents) + IO.binwrite "#{content_path}.tmp", encrypt(contents) + FileUtils.mv "#{content_path}.tmp", content_path + end + + def change(&block) + writing read, &block + end + + + private + def writing(contents) + tmp_file = "#{Process.pid}.#{content_path.basename.to_s.chomp('.enc')}" + tmp_path = Pathname.new File.join(Dir.tmpdir, tmp_file) + tmp_path.binwrite contents + + yield tmp_path + + updated_contents = tmp_path.binread + + write(updated_contents) if updated_contents != contents + ensure + FileUtils.rm(tmp_path) if tmp_path.exist? + end + + + def encrypt(contents) + encryptor.encrypt_and_sign contents + end + + def decrypt(contents) + encryptor.decrypt_and_verify contents + end + + def encryptor + @encryptor ||= ActiveSupport::MessageEncryptor.new([ key ].pack("H*"), cipher: CIPHER) + end + + + def read_env_key + ENV[env_key] + end + + def read_key_file + key_path.binread.strip if key_path.exist? + end + + def handle_missing_key + raise MissingKeyError, key_path: key_path, env_key: env_key if raise_if_missing_key + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb new file mode 100644 index 0000000..97e982e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: true + +require "set" +require "pathname" +require "concurrent/atomic/atomic_boolean" + +module ActiveSupport + # Allows you to "listen" to changes in a file system. + # The evented file updater does not hit disk when checking for updates + # instead it uses platform specific file system events to trigger a change + # in state. + # + # The file checker takes an array of files to watch or a hash specifying directories + # and file extensions to watch. It also takes a block that is called when + # EventedFileUpdateChecker#execute is run or when EventedFileUpdateChecker#execute_if_updated + # is run and there have been changes to the file system. + # + # Note: Forking will cause the first call to `updated?` to return `true`. + # + # Example: + # + # checker = ActiveSupport::EventedFileUpdateChecker.new(["/tmp/foo"]) { puts "changed" } + # checker.updated? + # # => false + # checker.execute_if_updated + # # => nil + # + # FileUtils.touch("/tmp/foo") + # + # checker.updated? + # # => true + # checker.execute_if_updated + # # => "changed" + # + class EventedFileUpdateChecker #:nodoc: all + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize an EventedFileUpdateChecker" + end + + @ph = PathHelper.new + @files = files.map { |f| @ph.xpath(f) }.to_set + + @dirs = {} + dirs.each do |dir, exts| + @dirs[@ph.xpath(dir)] = Array(exts).map { |ext| @ph.normalize_extension(ext) } + end + + @block = block + @updated = Concurrent::AtomicBoolean.new(false) + @lcsp = @ph.longest_common_subpath(@dirs.keys) + @pid = Process.pid + @boot_mutex = Mutex.new + + if (@dtw = directories_to_watch).any? + # Loading listen triggers warnings. These are originated by a legit + # usage of attr_* macros for private attributes, but adds a lot of noise + # to our test suite. Thus, we lazy load it and disable warnings locally. + silence_warnings do + begin + require "listen" + rescue LoadError => e + raise LoadError, "Could not load the 'listen' gem. Add `gem 'listen'` to the development group of your Gemfile", e.backtrace + end + end + end + boot! + end + + def updated? + @boot_mutex.synchronize do + if @pid != Process.pid + boot! + @pid = Process.pid + @updated.make_true + end + end + @updated.true? + end + + def execute + @updated.make_false + @block.call + end + + def execute_if_updated + if updated? + yield if block_given? + execute + true + end + end + + private + def boot! + Listen.to(*@dtw, &method(:changed)).start + end + + def changed(modified, added, removed) + unless updated? + @updated.make_true if (modified + added + removed).any? { |f| watching?(f) } + end + end + + def watching?(file) + file = @ph.xpath(file) + + if @files.member?(file) + true + elsif file.directory? + false + else + ext = @ph.normalize_extension(file.extname) + + file.dirname.ascend do |dir| + if @dirs.fetch(dir, []).include?(ext) + break true + elsif dir == @lcsp || dir.root? + break false + end + end + end + end + + def directories_to_watch + dtw = (@files + @dirs.keys).map { |f| @ph.existing_parent(f) } + dtw.compact! + dtw.uniq! + + normalized_gem_paths = Gem.path.map { |path| File.join path, "" } + dtw = dtw.reject do |path| + normalized_gem_paths.any? { |gem_path| path.to_s.start_with?(gem_path) } + end + + @ph.filter_out_descendants(dtw) + end + + class PathHelper + def xpath(path) + Pathname.new(path).expand_path + end + + def normalize_extension(ext) + ext.to_s.sub(/\A\./, "") + end + + # Given a collection of Pathname objects returns the longest subpath + # common to all of them, or +nil+ if there is none. + def longest_common_subpath(paths) + return if paths.empty? + + lcsp = Pathname.new(paths[0]) + + paths[1..-1].each do |path| + until ascendant_of?(lcsp, path) + if lcsp.root? + # If we get here a root directory is not an ascendant of path. + # This may happen if there are paths in different drives on + # Windows. + return + else + lcsp = lcsp.parent + end + end + end + + lcsp + end + + # Returns the deepest existing ascendant, which could be the argument itself. + def existing_parent(dir) + dir.ascend do |ascendant| + break ascendant if ascendant.directory? + end + end + + # Filters out directories which are descendants of others in the collection (stable). + def filter_out_descendants(dirs) + return dirs if dirs.length < 2 + + dirs_sorted_by_nparts = dirs.sort_by { |dir| dir.each_filename.to_a.length } + descendants = [] + + until dirs_sorted_by_nparts.empty? + dir = dirs_sorted_by_nparts.shift + + dirs_sorted_by_nparts.reject! do |possible_descendant| + ascendant_of?(dir, possible_descendant) && descendants << possible_descendant + end + end + + # Array#- preserves order. + dirs - descendants + end + + private + + def ascendant_of?(base, other) + base != other && other.ascend do |ascendant| + break true if base == ascendant + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb new file mode 100644 index 0000000..f48c586 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb @@ -0,0 +1,128 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + class ExecutionWrapper + include ActiveSupport::Callbacks + + Null = Object.new # :nodoc: + def Null.complete! # :nodoc: + end + + define_callbacks :run + define_callbacks :complete + + def self.to_run(*args, &block) + set_callback(:run, *args, &block) + end + + def self.to_complete(*args, &block) + set_callback(:complete, *args, &block) + end + + RunHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + hook_state[hook] = hook.run + end + end + + CompleteHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + if hook_state.key?(hook) + hook.complete hook_state[hook] + end + end + alias after before + end + + # Register an object to be invoked during both the +run+ and + # +complete+ steps. + # + # +hook.complete+ will be passed the value returned from +hook.run+, + # and will only be invoked if +run+ has previously been called. + # (Mostly, this means it won't be invoked if an exception occurs in + # a preceding +to_run+ block; all ordinary +to_complete+ blocks are + # invoked in that situation.) + def self.register_hook(hook, outer: false) + if outer + to_run RunHook.new(hook), prepend: true + to_complete :after, CompleteHook.new(hook) + else + to_run RunHook.new(hook) + to_complete CompleteHook.new(hook) + end + end + + # Run this execution. + # + # Returns an instance, whose +complete!+ method *must* be invoked + # after the work has been performed. + # + # Where possible, prefer +wrap+. + def self.run! + if active? + Null + else + new.tap do |instance| + success = nil + begin + instance.run! + success = true + ensure + instance.complete! unless success + end + end + end + end + + # Perform the work in the supplied block as an execution. + def self.wrap + return yield if active? + + instance = run! + begin + yield + ensure + instance.complete! + end + end + + class << self # :nodoc: + attr_accessor :active + end + + def self.inherited(other) # :nodoc: + super + other.active = Concurrent::Hash.new + end + + self.active = Concurrent::Hash.new + + def self.active? # :nodoc: + @active[Thread.current] + end + + def run! # :nodoc: + self.class.active[Thread.current] = true + run_callbacks(:run) + end + + # Complete this in-flight execution. This method *must* be called + # exactly once on the result of any call to +run!+. + # + # Where possible, prefer +wrap+. + def complete! + run_callbacks(:complete) + ensure + self.class.active.delete Thread.current + end + + private + def hook_state + @_hook_state ||= {} + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb new file mode 100644 index 0000000..ce391b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + class Executor < ExecutionWrapper + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb new file mode 100644 index 0000000..1a0bb10 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb @@ -0,0 +1,163 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/calculations" + +module ActiveSupport + # FileUpdateChecker specifies the API used by Rails to watch files + # and control reloading. The API depends on four methods: + # + # * +initialize+ which expects two parameters and one block as + # described below. + # + # * +updated?+ which returns a boolean if there were updates in + # the filesystem or not. + # + # * +execute+ which executes the given block on initialization + # and updates the latest watched files and timestamp. + # + # * +execute_if_updated+ which just executes the block if it was updated. + # + # After initialization, a call to +execute_if_updated+ must execute + # the block only if there was really a change in the filesystem. + # + # This class is used by Rails to reload the I18n framework whenever + # they are changed upon a new request. + # + # i18n_reloader = ActiveSupport::FileUpdateChecker.new(paths) do + # I18n.reload! + # end + # + # ActiveSupport::Reloader.to_prepare do + # i18n_reloader.execute_if_updated + # end + class FileUpdateChecker + # It accepts two parameters on initialization. The first is an array + # of files and the second is an optional hash of directories. The hash must + # have directories as keys and the value is an array of extensions to be + # watched under that directory. + # + # This method must also receive a block that will be called once a path + # changes. The array of files and list of directories cannot be changed + # after FileUpdateChecker has been initialized. + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize a FileUpdateChecker" + end + + @files = files.freeze + @glob = compile_glob(dirs) + @block = block + + @watched = nil + @updated_at = nil + + @last_watched = watched + @last_update_at = updated_at(@last_watched) + end + + # Check if any of the entries were updated. If so, the watched and/or + # updated_at values are cached until the block is executed via +execute+ + # or +execute_if_updated+. + def updated? + current_watched = watched + if @last_watched.size != current_watched.size + @watched = current_watched + true + else + current_updated_at = updated_at(current_watched) + if @last_update_at < current_updated_at + @watched = current_watched + @updated_at = current_updated_at + true + else + false + end + end + end + + # Executes the given block and updates the latest watched files and + # timestamp. + def execute + @last_watched = watched + @last_update_at = updated_at(@last_watched) + @block.call + ensure + @watched = nil + @updated_at = nil + end + + # Execute the block given if updated. + def execute_if_updated + if updated? + yield if block_given? + execute + true + else + false + end + end + + private + + def watched + @watched || begin + all = @files.select { |f| File.exist?(f) } + all.concat(Dir[@glob]) if @glob + all + end + end + + def updated_at(paths) + @updated_at || max_mtime(paths) || Time.at(0) + end + + # This method returns the maximum mtime of the files in +paths+, or +nil+ + # if the array is empty. + # + # Files with a mtime in the future are ignored. Such abnormal situation + # can happen for example if the user changes the clock by hand. It is + # healthy to consider this edge case because with mtimes in the future + # reloading is not triggered. + def max_mtime(paths) + time_now = Time.now + max_mtime = nil + + # Time comparisons are performed with #compare_without_coercion because + # AS redefines these operators in a way that is much slower and does not + # bring any benefit in this particular code. + # + # Read t1.compare_without_coercion(t2) < 0 as t1 < t2. + paths.each do |path| + mtime = File.mtime(path) + + next if time_now.compare_without_coercion(mtime) < 0 + + if max_mtime.nil? || max_mtime.compare_without_coercion(mtime) < 0 + max_mtime = mtime + end + end + + max_mtime + end + + def compile_glob(hash) + hash.freeze # Freeze so changes aren't accidentally pushed + return if hash.empty? + + globs = hash.map do |key, value| + "#{escape(key)}/**/*#{compile_ext(value)}" + end + "{#{globs.join(",")}}" + end + + def escape(key) + key.gsub(",", '\,') + end + + def compile_ext(array) + array = Array(array) + return if array.empty? + ".{#{array.join(",")}}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb new file mode 100644 index 0000000..9e53a55 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module ActiveSupport + # Returns the version of the currently loaded Active Support as a Gem::Version. + def self.gem_version + Gem::Version.new VERSION::STRING + end + + module VERSION + MAJOR = 5 + MINOR = 2 + TINY = 4 + PRE = "4" + + STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".") + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb new file mode 100644 index 0000000..7ffa6d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "zlib" +require "stringio" + +module ActiveSupport + # A convenient wrapper for the zlib standard library that allows + # compression/decompression of strings with gzip. + # + # gzip = ActiveSupport::Gzip.compress('compress me!') + # # => "\x1F\x8B\b\x00o\x8D\xCDO\x00\x03K\xCE\xCF-(J-.V\xC8MU\x04\x00R>n\x83\f\x00\x00\x00" + # + # ActiveSupport::Gzip.decompress(gzip) + # # => "compress me!" + module Gzip + class Stream < StringIO + def initialize(*) + super + set_encoding "BINARY" + end + def close; rewind; end + end + + # Decompresses a gzipped string. + def self.decompress(source) + Zlib::GzipReader.wrap(StringIO.new(source), &:read) + end + + # Compresses a string using gzip. + def self.compress(source, level = Zlib::DEFAULT_COMPRESSION, strategy = Zlib::DEFAULT_STRATEGY) + output = Stream.new + gz = Zlib::GzipWriter.new(output, level, strategy) + gz.write(source) + gz.close + output.string + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb new file mode 100644 index 0000000..24d3aed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb @@ -0,0 +1,395 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" + +module ActiveSupport + # Implements a hash where keys :foo and "foo" are considered + # to be the same. + # + # rgb = ActiveSupport::HashWithIndifferentAccess.new + # + # rgb[:black] = '#000000' + # rgb[:black] # => '#000000' + # rgb['black'] # => '#000000' + # + # rgb['white'] = '#FFFFFF' + # rgb[:white] # => '#FFFFFF' + # rgb['white'] # => '#FFFFFF' + # + # Internally symbols are mapped to strings when used as keys in the entire + # writing interface (calling []=, merge, etc). This + # mapping belongs to the public interface. For example, given: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # + # You are guaranteed that the key is returned as a string: + # + # hash.keys # => ["a"] + # + # Technically other types of keys are accepted: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # hash[0] = 0 + # hash # => {"a"=>1, 0=>0} + # + # but this class is intended for use cases where strings or symbols are the + # expected keys and it is convenient to understand both as the same. For + # example the +params+ hash in Ruby on Rails. + # + # Note that core extensions define Hash#with_indifferent_access: + # + # rgb = { black: '#000000', white: '#FFFFFF' }.with_indifferent_access + # + # which may be handy. + # + # To access this class outside of Rails, require the core extension with: + # + # require "active_support/core_ext/hash/indifferent_access" + # + # which will, in turn, require this file. + class HashWithIndifferentAccess < Hash + # Returns +true+ so that Array#extract_options! finds members of + # this class. + def extractable_options? + true + end + + def with_indifferent_access + dup + end + + def nested_under_indifferent_access + self + end + + def initialize(constructor = {}) + if constructor.respond_to?(:to_hash) + super() + update(constructor) + + hash = constructor.to_hash + self.default = hash.default if hash.default + self.default_proc = hash.default_proc if hash.default_proc + else + super(constructor) + end + end + + def self.[](*args) + new.merge!(Hash[*args]) + end + + alias_method :regular_writer, :[]= unless method_defined?(:regular_writer) + alias_method :regular_update, :update unless method_defined?(:regular_update) + + # Assigns a new value to the hash: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:key] = 'value' + # + # This value can be later fetched using either +:key+ or 'key'. + def []=(key, value) + regular_writer(convert_key(key), convert_value(value, for: :assignment)) + end + + alias_method :store, :[]= + + # Updates the receiver in-place, merging in the hash passed as argument: + # + # hash_1 = ActiveSupport::HashWithIndifferentAccess.new + # hash_1[:key] = 'value' + # + # hash_2 = ActiveSupport::HashWithIndifferentAccess.new + # hash_2[:key] = 'New Value!' + # + # hash_1.update(hash_2) # => {"key"=>"New Value!"} + # + # The argument can be either an + # ActiveSupport::HashWithIndifferentAccess or a regular +Hash+. + # In either case the merge respects the semantics of indifferent access. + # + # If the argument is a regular hash with keys +:key+ and +"key"+ only one + # of the values end up in the receiver, but which one is unspecified. + # + # When given a block, the value for duplicated keys will be determined + # by the result of invoking the block with the duplicated key, the value + # in the receiver, and the value in +other_hash+. The rules for duplicated + # keys follow the semantics of indifferent access: + # + # hash_1[:key] = 10 + # hash_2['key'] = 12 + # hash_1.update(hash_2) { |key, old, new| old + new } # => {"key"=>22} + def update(other_hash) + if other_hash.is_a? HashWithIndifferentAccess + super(other_hash) + else + other_hash.to_hash.each_pair do |key, value| + if block_given? && key?(key) + value = yield(convert_key(key), self[key], value) + end + regular_writer(convert_key(key), convert_value(value)) + end + self + end + end + + alias_method :merge!, :update + + # Checks the hash for a key matching the argument passed in: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['key'] = 'value' + # hash.key?(:key) # => true + # hash.key?('key') # => true + def key?(key) + super(convert_key(key)) + end + + alias_method :include?, :key? + alias_method :has_key?, :key? + alias_method :member?, :key? + + # Same as Hash#[] where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters['foo'] # => 1 + # counters[:foo] # => 1 + # counters[:zoo] # => nil + def [](key) + super(convert_key(key)) + end + + # Same as Hash#assoc where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.assoc('foo') # => ["foo", 1] + # counters.assoc(:foo) # => ["foo", 1] + # counters.assoc(:zoo) # => nil + def assoc(key) + super(convert_key(key)) + end + + # Same as Hash#fetch where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.fetch('foo') # => 1 + # counters.fetch(:bar, 0) # => 0 + # counters.fetch(:bar) { |key| 0 } # => 0 + # counters.fetch(:zoo) # => KeyError: key not found: "zoo" + def fetch(key, *extras) + super(convert_key(key), *extras) + end + + if Hash.new.respond_to?(:dig) + # Same as Hash#dig where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = { bar: 1 } + # + # counters.dig('foo', 'bar') # => 1 + # counters.dig(:foo, :bar) # => 1 + # counters.dig(:zoo) # => nil + def dig(*args) + args[0] = convert_key(args[0]) if args.size > 0 + super(*args) + end + end + + # Same as Hash#default where the key passed as argument can be + # either a string or a symbol: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(1) + # hash.default # => 1 + # + # hash = ActiveSupport::HashWithIndifferentAccess.new { |hash, key| key } + # hash.default # => nil + # hash.default('foo') # => 'foo' + # hash.default(:foo) # => 'foo' + def default(*args) + super(*args.map { |arg| convert_key(arg) }) + end + + # Returns an array of the values at the specified indices: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.values_at('a', 'b') # => ["x", "y"] + def values_at(*indices) + indices.collect { |key| self[convert_key(key)] } + end + + # Returns an array of the values at the specified indices, but also + # raises an exception when one of the keys can't be found. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.fetch_values('a', 'b') # => ["x", "y"] + # hash.fetch_values('a', 'c') { |key| 'z' } # => ["x", "z"] + # hash.fetch_values('a', 'c') # => KeyError: key not found: "c" + def fetch_values(*indices, &block) + indices.collect { |key| fetch(key, &block) } + end if Hash.method_defined?(:fetch_values) + + # Returns a shallow copy of the hash. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new({ a: { b: 'b' } }) + # dup = hash.dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => "c" + # dup[:a][:c] # => "c" + def dup + self.class.new(self).tap do |new_hash| + set_defaults(new_hash) + end + end + + # This method has the same semantics of +update+, except it does not + # modify the receiver but rather returns a new hash with indifferent + # access with the result of the merge. + def merge(hash, &block) + dup.update(hash, &block) + end + + # Like +merge+ but the other way around: Merges the receiver into the + # argument and returns a new hash with indifferent access as result: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['a'] = nil + # hash.reverse_merge(a: 0, b: 1) # => {"a"=>nil, "b"=>1} + def reverse_merge(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults, :reverse_merge + + # Same semantics as +reverse_merge+ but modifies the receiver in-place. + def reverse_merge!(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults!, :reverse_merge! + + # Replaces the contents of this hash with other_hash. + # + # h = { "a" => 100, "b" => 200 } + # h.replace({ "c" => 300, "d" => 400 }) # => {"c"=>300, "d"=>400} + def replace(other_hash) + super(self.class.new(other_hash)) + end + + # Removes the specified key from the hash. + def delete(key) + super(convert_key(key)) + end + + def stringify_keys!; self end + def deep_stringify_keys!; self end + def stringify_keys; dup end + def deep_stringify_keys; dup end + undef :symbolize_keys! + undef :deep_symbolize_keys! + def symbolize_keys; to_hash.symbolize_keys! end + alias_method :to_options, :symbolize_keys + def deep_symbolize_keys; to_hash.deep_symbolize_keys! end + def to_options!; self end + + def select(*args, &block) + return to_enum(:select) unless block_given? + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + return to_enum(:reject) unless block_given? + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def transform_values(*args, &block) + return to_enum(:transform_values) unless block_given? + dup.tap { |hash| hash.transform_values!(*args, &block) } + end + + def transform_keys(*args, &block) + return to_enum(:transform_keys) unless block_given? + dup.tap { |hash| hash.transform_keys!(*args, &block) } + end + + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end + + def slice(*keys) + keys.map! { |key| convert_key(key) } + self.class.new(super) + end + + def slice!(*keys) + keys.map! { |key| convert_key(key) } + super + end + + def compact + dup.tap(&:compact!) + end + + # Convert to a regular hash with string keys. + def to_hash + _new_hash = Hash.new + set_defaults(_new_hash) + + each do |key, value| + _new_hash[key] = convert_value(value, for: :to_hash) + end + _new_hash + end + + private + def convert_key(key) # :doc: + key.kind_of?(Symbol) ? key.to_s : key + end + + def convert_value(value, options = {}) # :doc: + if value.is_a? Hash + if options[:for] == :to_hash + value.to_hash + else + value.nested_under_indifferent_access + end + elsif value.is_a?(Array) + if options[:for] != :assignment || value.frozen? + value = value.dup + end + value.map! { |e| convert_value(e, options) } + else + value + end + end + + def set_defaults(target) # :doc: + if default_proc + target.default_proc = default_proc.dup + else + target.default = default + end + end + end +end + +# :stopdoc: + +HashWithIndifferentAccess = ActiveSupport::HashWithIndifferentAccess diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb new file mode 100644 index 0000000..d60b3ef --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +begin + require "i18n" +rescue LoadError => e + $stderr.puts "The i18n gem is not available. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/lazy_load_hooks" + +ActiveSupport.run_load_hooks(:i18n) +I18n.load_path << File.expand_path("locale/en.yml", __dir__) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb new file mode 100644 index 0000000..ee24ebd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/file_update_checker" +require "active_support/core_ext/array/wrap" + +# :enddoc: + +module I18n + class Railtie < Rails::Railtie + config.i18n = ActiveSupport::OrderedOptions.new + config.i18n.railties_load_path = [] + config.i18n.load_path = [] + config.i18n.fallbacks = ActiveSupport::OrderedOptions.new + + # Set the i18n configuration after initialization since a lot of + # configuration is still usually done in application initializers. + config.after_initialize do |app| + I18n::Railtie.initialize_i18n(app) + end + + # Trigger i18n config before any eager loading has happened + # so it's ready if any classes require it when eager loaded. + config.before_eager_load do |app| + I18n::Railtie.initialize_i18n(app) + end + + @i18n_inited = false + + # Setup i18n configuration. + def self.initialize_i18n(app) + return if @i18n_inited + + fallbacks = app.config.i18n.delete(:fallbacks) + + # Avoid issues with setting the default_locale by disabling available locales + # check while configuring. + enforce_available_locales = app.config.i18n.delete(:enforce_available_locales) + enforce_available_locales = I18n.enforce_available_locales if enforce_available_locales.nil? + I18n.enforce_available_locales = false + + reloadable_paths = [] + app.config.i18n.each do |setting, value| + case setting + when :railties_load_path + reloadable_paths = value + app.config.i18n.load_path.unshift(*value.flat_map(&:existent)) + when :load_path + I18n.load_path += value + else + I18n.send("#{setting}=", value) + end + end + + init_fallbacks(fallbacks) if fallbacks && validate_fallbacks(fallbacks) + + # Restore available locales check so it will take place from now on. + I18n.enforce_available_locales = enforce_available_locales + + directories = watched_dirs_with_extensions(reloadable_paths) + reloader = app.config.file_watcher.new(I18n.load_path.dup, directories) do + I18n.load_path.keep_if { |p| File.exist?(p) } + I18n.load_path |= reloadable_paths.flat_map(&:existent) + + I18n.reload! + end + + app.reloaders << reloader + app.reloader.to_run do + reloader.execute_if_updated { require_unload_lock! } + end + reloader.execute + + @i18n_inited = true + end + + def self.include_fallbacks_module + I18n.backend.class.include(I18n::Backend::Fallbacks) + end + + def self.init_fallbacks(fallbacks) + include_fallbacks_module + + args = \ + case fallbacks + when ActiveSupport::OrderedOptions + [*(fallbacks[:defaults] || []) << fallbacks[:map]].compact + when Hash, Array + Array.wrap(fallbacks) + else # TrueClass + [I18n.default_locale] + end + + if args.empty? || args.first.is_a?(Hash) + args.unshift I18n.default_locale + end + + I18n.fallbacks = I18n::Locale::Fallbacks.new(*args) + end + + def self.validate_fallbacks(fallbacks) + case fallbacks + when ActiveSupport::OrderedOptions + !fallbacks.empty? + when TrueClass, Array, Hash + true + else + raise "Unexpected fallback type #{fallbacks.inspect}" + end + end + + def self.watched_dirs_with_extensions(paths) + paths.each_with_object({}) do |path, result| + result[path.absolute_current] = path.extensions + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb new file mode 100644 index 0000000..baf1cb3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/inflections" + +#-- +# Defines the standard inflection rules. These are the starting point for +# new projects and are not considered complete. The current set of inflection +# rules is frozen. This means, we do not change them to become more complete. +# This is a safety measure to keep existing applications from breaking. +#++ +module ActiveSupport + Inflector.inflections(:en) do |inflect| + inflect.plural(/$/, "s") + inflect.plural(/s$/i, "s") + inflect.plural(/^(ax|test)is$/i, '\1es') + inflect.plural(/(octop|vir)us$/i, '\1i') + inflect.plural(/(octop|vir)i$/i, '\1i') + inflect.plural(/(alias|status)$/i, '\1es') + inflect.plural(/(bu)s$/i, '\1ses') + inflect.plural(/(buffal|tomat)o$/i, '\1oes') + inflect.plural(/([ti])um$/i, '\1a') + inflect.plural(/([ti])a$/i, '\1a') + inflect.plural(/sis$/i, "ses") + inflect.plural(/(?:([^f])fe|([lr])f)$/i, '\1\2ves') + inflect.plural(/(hive)$/i, '\1s') + inflect.plural(/([^aeiouy]|qu)y$/i, '\1ies') + inflect.plural(/(x|ch|ss|sh)$/i, '\1es') + inflect.plural(/(matr|vert|ind)(?:ix|ex)$/i, '\1ices') + inflect.plural(/^(m|l)ouse$/i, '\1ice') + inflect.plural(/^(m|l)ice$/i, '\1ice') + inflect.plural(/^(ox)$/i, '\1en') + inflect.plural(/^(oxen)$/i, '\1') + inflect.plural(/(quiz)$/i, '\1zes') + + inflect.singular(/s$/i, "") + inflect.singular(/(ss)$/i, '\1') + inflect.singular(/(n)ews$/i, '\1ews') + inflect.singular(/([ti])a$/i, '\1um') + inflect.singular(/((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$/i, '\1sis') + inflect.singular(/(^analy)(sis|ses)$/i, '\1sis') + inflect.singular(/([^f])ves$/i, '\1fe') + inflect.singular(/(hive)s$/i, '\1') + inflect.singular(/(tive)s$/i, '\1') + inflect.singular(/([lr])ves$/i, '\1f') + inflect.singular(/([^aeiouy]|qu)ies$/i, '\1y') + inflect.singular(/(s)eries$/i, '\1eries') + inflect.singular(/(m)ovies$/i, '\1ovie') + inflect.singular(/(x|ch|ss|sh)es$/i, '\1') + inflect.singular(/^(m|l)ice$/i, '\1ouse') + inflect.singular(/(bus)(es)?$/i, '\1') + inflect.singular(/(o)es$/i, '\1') + inflect.singular(/(shoe)s$/i, '\1') + inflect.singular(/(cris|test)(is|es)$/i, '\1is') + inflect.singular(/^(a)x[ie]s$/i, '\1xis') + inflect.singular(/(octop|vir)(us|i)$/i, '\1us') + inflect.singular(/(alias|status)(es)?$/i, '\1') + inflect.singular(/^(ox)en/i, '\1') + inflect.singular(/(vert|ind)ices$/i, '\1ex') + inflect.singular(/(matr)ices$/i, '\1ix') + inflect.singular(/(quiz)zes$/i, '\1') + inflect.singular(/(database)s$/i, '\1') + + inflect.irregular("person", "people") + inflect.irregular("man", "men") + inflect.irregular("child", "children") + inflect.irregular("sex", "sexes") + inflect.irregular("move", "moves") + inflect.irregular("zombie", "zombies") + + inflect.uncountable(%w(equipment information rice money species series fish sheep jeans police)) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb new file mode 100644 index 0000000..d77f04c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +# in case active_support/inflector is required without the rest of active_support +require "active_support/inflector/inflections" +require "active_support/inflector/transliterate" +require "active_support/inflector/methods" + +require "active_support/inflections" +require "active_support/core_ext/string/inflections" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb new file mode 100644 index 0000000..7e5dff1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "active_support/core_ext/array/prepend_and_append" +require "active_support/core_ext/regexp" +require "active_support/i18n" +require "active_support/deprecation" + +module ActiveSupport + module Inflector + extend self + + # A singleton instance of this class is yielded by Inflector.inflections, + # which can then be used to specify additional inflection rules. If passed + # an optional locale, rules for other languages can be specified. The + # default locale is :en. Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.plural /^(ox)$/i, '\1\2en' + # inflect.singular /^(ox)en/i, '\1' + # + # inflect.irregular 'octopus', 'octopi' + # + # inflect.uncountable 'equipment' + # end + # + # New rules are added at the top. So in the example above, the irregular + # rule for octopus will now be the first of the pluralization and + # singularization rules that is runs. This guarantees that your rules run + # before any of the rules that may already have been loaded. + class Inflections + @__instance__ = Concurrent::Map.new + + class Uncountables < Array + def initialize + @regex_array = [] + super + end + + def delete(entry) + super entry + @regex_array.delete(to_regex(entry)) + end + + def <<(*word) + add(word) + end + + def add(words) + words = words.flatten.map(&:downcase) + concat(words) + @regex_array += words.map { |word| to_regex(word) } + self + end + + def uncountable?(str) + @regex_array.any? { |regex| regex.match? str } + end + + private + def to_regex(string) + /\b#{::Regexp.escape(string)}\Z/i + end + end + + def self.instance(locale = :en) + @__instance__[locale] ||= new + end + + attr_reader :plurals, :singulars, :uncountables, :humans, :acronyms, :acronym_regex + deprecate :acronym_regex + + attr_reader :acronyms_camelize_regex, :acronyms_underscore_regex # :nodoc: + + def initialize + @plurals, @singulars, @uncountables, @humans, @acronyms = [], [], Uncountables.new, [], {} + define_acronym_regex_patterns + end + + # Private, for the test suite. + def initialize_dup(orig) # :nodoc: + %w(plurals singulars uncountables humans acronyms).each do |scope| + instance_variable_set("@#{scope}", orig.send(scope).dup) + end + define_acronym_regex_patterns + end + + # Specifies a new acronym. An acronym must be specified as it will appear + # in a camelized string. An underscore string that contains the acronym + # will retain the acronym when passed to +camelize+, +humanize+, or + # +titleize+. A camelized string that contains the acronym will maintain + # the acronym when titleized or humanized, and will convert the acronym + # into a non-delimited single lowercase word when passed to +underscore+. + # + # acronym 'HTML' + # titleize 'html' # => 'HTML' + # camelize 'html' # => 'HTML' + # underscore 'MyHTML' # => 'my_html' + # + # The acronym, however, must occur as a delimited unit and not be part of + # another word for conversions to recognize it: + # + # acronym 'HTTP' + # camelize 'my_http_delimited' # => 'MyHTTPDelimited' + # camelize 'https' # => 'Https', not 'HTTPs' + # underscore 'HTTPS' # => 'http_s', not 'https' + # + # acronym 'HTTPS' + # camelize 'https' # => 'HTTPS' + # underscore 'HTTPS' # => 'https' + # + # Note: Acronyms that are passed to +pluralize+ will no longer be + # recognized, since the acronym will not occur as a delimited unit in the + # pluralized result. To work around this, you must specify the pluralized + # form as an acronym as well: + # + # acronym 'API' + # camelize(pluralize('api')) # => 'Apis' + # + # acronym 'APIs' + # camelize(pluralize('api')) # => 'APIs' + # + # +acronym+ may be used to specify any word that contains an acronym or + # otherwise needs to maintain a non-standard capitalization. The only + # restriction is that the word must begin with a capital letter. + # + # acronym 'RESTful' + # underscore 'RESTful' # => 'restful' + # underscore 'RESTfulController' # => 'restful_controller' + # titleize 'RESTfulController' # => 'RESTful Controller' + # camelize 'restful' # => 'RESTful' + # camelize 'restful_controller' # => 'RESTfulController' + # + # acronym 'McDonald' + # underscore 'McDonald' # => 'mcdonald' + # camelize 'mcdonald' # => 'McDonald' + def acronym(word) + @acronyms[word.downcase] = word + define_acronym_regex_patterns + end + + # Specifies a new pluralization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def plural(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @plurals.prepend([rule, replacement]) + end + + # Specifies a new singularization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def singular(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @singulars.prepend([rule, replacement]) + end + + # Specifies a new irregular that applies to both pluralization and + # singularization at the same time. This can only be used for strings, not + # regular expressions. You simply pass the irregular in singular and + # plural form. + # + # irregular 'octopus', 'octopi' + # irregular 'person', 'people' + def irregular(singular, plural) + @uncountables.delete(singular) + @uncountables.delete(plural) + + s0 = singular[0] + srest = singular[1..-1] + + p0 = plural[0] + prest = plural[1..-1] + + if s0.upcase == p0.upcase + plural(/(#{s0})#{srest}$/i, '\1' + prest) + plural(/(#{p0})#{prest}$/i, '\1' + prest) + + singular(/(#{s0})#{srest}$/i, '\1' + srest) + singular(/(#{p0})#{prest}$/i, '\1' + srest) + else + plural(/#{s0.upcase}(?i)#{srest}$/, p0.upcase + prest) + plural(/#{s0.downcase}(?i)#{srest}$/, p0.downcase + prest) + plural(/#{p0.upcase}(?i)#{prest}$/, p0.upcase + prest) + plural(/#{p0.downcase}(?i)#{prest}$/, p0.downcase + prest) + + singular(/#{s0.upcase}(?i)#{srest}$/, s0.upcase + srest) + singular(/#{s0.downcase}(?i)#{srest}$/, s0.downcase + srest) + singular(/#{p0.upcase}(?i)#{prest}$/, s0.upcase + srest) + singular(/#{p0.downcase}(?i)#{prest}$/, s0.downcase + srest) + end + end + + # Specifies words that are uncountable and should not be inflected. + # + # uncountable 'money' + # uncountable 'money', 'information' + # uncountable %w( money information rice ) + def uncountable(*words) + @uncountables.add(words) + end + + # Specifies a humanized form of a string by a regular expression rule or + # by a string mapping. When using a regular expression based replacement, + # the normal humanize formatting is called after the replacement. When a + # string is used, the human form should be specified as desired (example: + # 'The name', not 'the_name'). + # + # human /_cnt$/i, '\1_count' + # human 'legacy_col_person_name', 'Name' + def human(rule, replacement) + @humans.prepend([rule, replacement]) + end + + # Clears the loaded inflections within a given scope (default is + # :all). Give the scope as a symbol of the inflection type, the + # options are: :plurals, :singulars, :uncountables, + # :humans. + # + # clear :all + # clear :plurals + def clear(scope = :all) + case scope + when :all + @plurals, @singulars, @uncountables, @humans = [], [], Uncountables.new, [] + else + instance_variable_set "@#{scope}", [] + end + end + + private + + def define_acronym_regex_patterns + @acronym_regex = @acronyms.empty? ? /(?=a)b/ : /#{@acronyms.values.join("|")}/ + @acronyms_camelize_regex = /^(?:#{@acronym_regex}(?=\b|[A-Z_])|\w)/ + @acronyms_underscore_regex = /(?:(?<=([A-Za-z\d]))|\b)(#{@acronym_regex})(?=\b|[^a-z])/ + end + end + + # Yields a singleton instance of Inflector::Inflections so you can specify + # additional inflector rules. If passed an optional locale, rules for other + # languages can be specified. If not specified, defaults to :en. + # Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.uncountable 'rails' + # end + def inflections(locale = :en) + if block_given? + yield Inflections.instance(locale) + else + Inflections.instance(locale) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb new file mode 100644 index 0000000..ad90a0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb @@ -0,0 +1,410 @@ +# frozen_string_literal: true + +require "active_support/inflections" +require "active_support/core_ext/regexp" + +module ActiveSupport + # The Inflector transforms words from singular to plural, class names to table + # names, modularized class names to ones without, and class names to foreign + # keys. The default inflections for pluralization, singularization, and + # uncountable words are kept in inflections.rb. + # + # The Rails core team has stated patches for the inflections library will not + # be accepted in order to avoid breaking legacy applications which may be + # relying on errant inflections. If you discover an incorrect inflection and + # require it for your application or wish to define rules for languages other + # than English, please correct or add them yourself (explained below). + module Inflector + extend self + + # Returns the plural form of the word in the string. + # + # If passed an optional +locale+ parameter, the word will be + # pluralized using rules defined for that language. By default, + # this parameter is set to :en. + # + # pluralize('post') # => "posts" + # pluralize('octopus') # => "octopi" + # pluralize('sheep') # => "sheep" + # pluralize('words') # => "words" + # pluralize('CamelOctopus') # => "CamelOctopi" + # pluralize('ley', :es) # => "leyes" + def pluralize(word, locale = :en) + apply_inflections(word, inflections(locale).plurals, locale) + end + + # The reverse of #pluralize, returns the singular form of a word in a + # string. + # + # If passed an optional +locale+ parameter, the word will be + # singularized using rules defined for that language. By default, + # this parameter is set to :en. + # + # singularize('posts') # => "post" + # singularize('octopi') # => "octopus" + # singularize('sheep') # => "sheep" + # singularize('word') # => "word" + # singularize('CamelOctopi') # => "CamelOctopus" + # singularize('leyes', :es) # => "ley" + def singularize(word, locale = :en) + apply_inflections(word, inflections(locale).singulars, locale) + end + + # Converts strings to UpperCamelCase. + # If the +uppercase_first_letter+ parameter is set to false, then produces + # lowerCamelCase. + # + # Also converts '/' to '::' which is useful for converting + # paths to namespaces. + # + # camelize('active_model') # => "ActiveModel" + # camelize('active_model', false) # => "activeModel" + # camelize('active_model/errors') # => "ActiveModel::Errors" + # camelize('active_model/errors', false) # => "activeModel::Errors" + # + # As a rule of thumb you can think of +camelize+ as the inverse of + # #underscore, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def camelize(term, uppercase_first_letter = true) + string = term.to_s + if uppercase_first_letter + string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize } + else + string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase } + end + string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{inflections.acronyms[$2] || $2.capitalize}" } + string.gsub!("/".freeze, "::".freeze) + string + end + + # Makes an underscored, lowercase form from the expression in the string. + # + # Changes '::' to '/' to convert namespaces to paths. + # + # underscore('ActiveModel') # => "active_model" + # underscore('ActiveModel::Errors') # => "active_model/errors" + # + # As a rule of thumb you can think of +underscore+ as the inverse of + # #camelize, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def underscore(camel_cased_word) + return camel_cased_word unless /[A-Z-]|::/.match?(camel_cased_word) + word = camel_cased_word.to_s.gsub("::".freeze, "/".freeze) + word.gsub!(inflections.acronyms_underscore_regex) { "#{$1 && '_'.freeze }#{$2.downcase}" } + word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2'.freeze) + word.gsub!(/([a-z\d])([A-Z])/, '\1_\2'.freeze) + word.tr!("-".freeze, "_".freeze) + word.downcase! + word + end + + # Tweaks an attribute name for display to end users. + # + # Specifically, performs these transformations: + # + # * Applies human inflection rules to the argument. + # * Deletes leading underscores, if any. + # * Removes a "_id" suffix if present. + # * Replaces underscores with spaces, if any. + # * Downcases all words except acronyms. + # * Capitalizes the first word. + # The capitalization of the first word can be turned off by setting the + # +:capitalize+ option to false (default is true). + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true (default is false). + # + # humanize('employee_salary') # => "Employee salary" + # humanize('author_id') # => "Author" + # humanize('author_id', capitalize: false) # => "author" + # humanize('_id') # => "Id" + # humanize('author_id', keep_id_suffix: true) # => "Author Id" + # + # If "SSL" was defined to be an acronym: + # + # humanize('ssl_error') # => "SSL error" + # + def humanize(lower_case_and_underscored_word, capitalize: true, keep_id_suffix: false) + result = lower_case_and_underscored_word.to_s.dup + + inflections.humans.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + + result.sub!(/\A_+/, "".freeze) + unless keep_id_suffix + result.sub!(/_id\z/, "".freeze) + end + result.tr!("_".freeze, " ".freeze) + + result.gsub!(/([a-z\d]*)/i) do |match| + "#{inflections.acronyms[match.downcase] || match.downcase}" + end + + if capitalize + result.sub!(/\A\w/) { |match| match.upcase } + end + + result + end + + # Converts just the first character to uppercase. + # + # upcase_first('what a Lovely Day') # => "What a Lovely Day" + # upcase_first('w') # => "W" + # upcase_first('') # => "" + def upcase_first(string) + string.length > 0 ? string[0].upcase.concat(string[1..-1]) : "" + end + + # Capitalizes all the words and replaces some characters in the string to + # create a nicer looking title. +titleize+ is meant for creating pretty + # output. It is not used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # titleize('man from the boondocks') # => "Man From The Boondocks" + # titleize('x-men: the last stand') # => "X Men: The Last Stand" + # titleize('TheManWithoutAPast') # => "The Man Without A Past" + # titleize('raiders_of_the_lost_ark') # => "Raiders Of The Lost Ark" + # titleize('string_ending_with_id', keep_id_suffix: true) # => "String Ending With Id" + def titleize(word, keep_id_suffix: false) + humanize(underscore(word), keep_id_suffix: keep_id_suffix).gsub(/\b(? "raw_scaled_scorers" + # tableize('ham_and_egg') # => "ham_and_eggs" + # tableize('fancyCategory') # => "fancy_categories" + def tableize(class_name) + pluralize(underscore(class_name)) + end + + # Creates a class name from a plural table name like Rails does for table + # names to models. Note that this returns a string and not a Class (To + # convert to an actual class follow +classify+ with #constantize). + # + # classify('ham_and_eggs') # => "HamAndEgg" + # classify('posts') # => "Post" + # + # Singular names are not handled correctly: + # + # classify('calculus') # => "Calculus" + def classify(table_name) + # strip out any leading schema name + camelize(singularize(table_name.to_s.sub(/.*\./, "".freeze))) + end + + # Replaces underscores with dashes in the string. + # + # dasherize('puni_puni') # => "puni-puni" + def dasherize(underscored_word) + underscored_word.tr("_".freeze, "-".freeze) + end + + # Removes the module part from the expression in the string. + # + # demodulize('ActiveSupport::Inflector::Inflections') # => "Inflections" + # demodulize('Inflections') # => "Inflections" + # demodulize('::Inflections') # => "Inflections" + # demodulize('') # => "" + # + # See also #deconstantize. + def demodulize(path) + path = path.to_s + if i = path.rindex("::") + path[(i + 2)..-1] + else + path + end + end + + # Removes the rightmost segment from the constant expression in the string. + # + # deconstantize('Net::HTTP') # => "Net" + # deconstantize('::Net::HTTP') # => "::Net" + # deconstantize('String') # => "" + # deconstantize('::String') # => "" + # deconstantize('') # => "" + # + # See also #demodulize. + def deconstantize(path) + path.to_s[0, path.rindex("::") || 0] # implementation based on the one in facets' Module#spacename + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # foreign_key('Message') # => "message_id" + # foreign_key('Message', false) # => "messageid" + # foreign_key('Admin::Post') # => "post_id" + def foreign_key(class_name, separate_class_name_and_id_with_underscore = true) + underscore(demodulize(class_name)) + (separate_class_name_and_id_with_underscore ? "_id" : "id") + end + + # Tries to find a constant with the name specified in the argument string. + # + # constantize('Module') # => Module + # constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # constantize('C') # => 'outside', same as ::C + # end + # + # NameError is raised when the name is not in CamelCase or the constant is + # unknown. + def constantize(camel_cased_word) + names = camel_cased_word.split("::".freeze) + + # Trigger a built-in NameError exception including the ill-formed constant in the message. + Object.const_get(camel_cased_word) if names.empty? + + # Remove the first blank element in case of '::ClassName' notation. + names.shift if names.size > 1 && names.first.empty? + + names.inject(Object) do |constant, name| + if constant == Object + constant.const_get(name) + else + candidate = constant.const_get(name) + next candidate if constant.const_defined?(name, false) + next candidate unless Object.const_defined?(name) + + # Go down the ancestors to check if it is owned directly. The check + # stops when we reach Object or the end of ancestors tree. + constant = constant.ancestors.inject(constant) do |const, ancestor| + break const if ancestor == Object + break ancestor if ancestor.const_defined?(name, false) + const + end + + # owner is in Object, so raise + constant.const_get(name, false) + end + end + end + + # Tries to find a constant with the name specified in the argument string. + # + # safe_constantize('Module') # => Module + # safe_constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # safe_constantize('C') # => 'outside', same as ::C + # end + # + # +nil+ is returned when the name is not in CamelCase or the constant (or + # part of it) is unknown. + # + # safe_constantize('blargle') # => nil + # safe_constantize('UnknownModule') # => nil + # safe_constantize('UnknownModule::Foo::Bar') # => nil + def safe_constantize(camel_cased_word) + constantize(camel_cased_word) + rescue NameError => e + raise if e.name && !(camel_cased_word.to_s.split("::").include?(e.name.to_s) || + e.name.to_s == camel_cased_word.to_s) + rescue ArgumentError => e + raise unless /not missing constant #{const_regexp(camel_cased_word)}!$/.match?(e.message) + rescue LoadError => e + raise unless /Unable to autoload constant #{const_regexp(camel_cased_word)}/.match?(e.message) + end + + # Returns the suffix that should be added to a number to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinal(1) # => "st" + # ordinal(2) # => "nd" + # ordinal(1002) # => "nd" + # ordinal(1003) # => "rd" + # ordinal(-11) # => "th" + # ordinal(-1021) # => "st" + def ordinal(number) + abs_number = number.to_i.abs + + if (11..13).include?(abs_number % 100) + "th" + else + case abs_number % 10 + when 1; "st" + when 2; "nd" + when 3; "rd" + else "th" + end + end + end + + # Turns a number into an ordinal string used to denote the position in an + # ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinalize(1) # => "1st" + # ordinalize(2) # => "2nd" + # ordinalize(1002) # => "1002nd" + # ordinalize(1003) # => "1003rd" + # ordinalize(-11) # => "-11th" + # ordinalize(-1021) # => "-1021st" + def ordinalize(number) + "#{number}#{ordinal(number)}" + end + + private + + # Mounts a regular expression, returned as a string to ease interpolation, + # that will match part by part the given constant. + # + # const_regexp("Foo::Bar::Baz") # => "Foo(::Bar(::Baz)?)?" + # const_regexp("::") # => "::" + def const_regexp(camel_cased_word) + parts = camel_cased_word.split("::".freeze) + + return Regexp.escape(camel_cased_word) if parts.blank? + + last = parts.pop + + parts.reverse.inject(last) do |acc, part| + part.empty? ? acc : "#{part}(::#{acc})?" + end + end + + # Applies inflection rules for +singularize+ and +pluralize+. + # + # If passed an optional +locale+ parameter, the uncountables will be + # found for that locale. + # + # apply_inflections('post', inflections.plurals, :en) # => "posts" + # apply_inflections('posts', inflections.singulars, :en) # => "post" + def apply_inflections(word, rules, locale = :en) + result = word.to_s.dup + + if word.empty? || inflections(locale).uncountables.uncountable?(result) + result + else + rules.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb new file mode 100644 index 0000000..6f2ca49 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/multibyte" +require "active_support/i18n" + +module ActiveSupport + module Inflector + # Replaces non-ASCII characters with an ASCII approximation, or if none + # exists, a replacement character which defaults to "?". + # + # transliterate('Ærøskøbing') + # # => "AEroskobing" + # + # Default approximations are provided for Western/Latin characters, + # e.g, "ø", "ñ", "é", "ß", etc. + # + # This method is I18n aware, so you can set up custom approximations for a + # locale. This can be useful, for example, to transliterate German's "ü" + # and "ö" to "ue" and "oe", or to add support for transliterating Russian + # to ASCII. + # + # In order to make your custom transliterations available, you must set + # them as the i18n.transliterate.rule i18n key: + # + # # Store the transliterations in locales/de.yml + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # # Or set them using Ruby + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: { + # 'ü' => 'ue', + # 'ö' => 'oe' + # } + # } + # }) + # + # The value for i18n.transliterate.rule can be a simple Hash that + # maps characters to ASCII approximations as shown above, or, for more + # complex requirements, a Proc: + # + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: ->(string) { MyTransliterator.transliterate(string) } + # } + # }) + # + # Now you can have different transliterations for each locale: + # + # I18n.locale = :en + # transliterate('Jürgen') + # # => "Jurgen" + # + # I18n.locale = :de + # transliterate('Jürgen') + # # => "Juergen" + def transliterate(string, replacement = "?".freeze) + raise ArgumentError, "Can only transliterate strings. Received #{string.class.name}" unless string.is_a?(String) + + I18n.transliterate( + ActiveSupport::Multibyte::Unicode.normalize( + ActiveSupport::Multibyte::Unicode.tidy_bytes(string), :c), + replacement: replacement) + end + + # Replaces special characters in a string so that it may be used as part of + # a 'pretty' URL. + # + # parameterize("Donald E. Knuth") # => "donald-e-knuth" + # parameterize("^très|Jolie-- ") # => "tres-jolie" + # + # To use a custom separator, override the +separator+ argument. + # + # parameterize("Donald E. Knuth", separator: '_') # => "donald_e_knuth" + # parameterize("^très|Jolie__ ", separator: '_') # => "tres_jolie" + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # parameterize("Donald E. Knuth", preserve_case: true) # => "Donald-E-Knuth" + # parameterize("^très|Jolie-- ", preserve_case: true) # => "tres-Jolie" + # + # It preserves dashes and underscores unless they are used as separators: + # + # parameterize("^très|Jolie__ ") # => "tres-jolie__" + # parameterize("^très|Jolie-- ", separator: "_") # => "tres_jolie--" + # parameterize("^très_Jolie-- ", separator: ".") # => "tres_jolie--" + # + def parameterize(string, separator: "-", preserve_case: false) + # Replace accented chars with their ASCII equivalents. + parameterized_string = transliterate(string) + + # Turn unwanted chars into the separator. + parameterized_string.gsub!(/[^a-z0-9\-_]+/i, separator) + + unless separator.nil? || separator.empty? + if separator == "-".freeze + re_duplicate_separator = /-{2,}/ + re_leading_trailing_separator = /^-|-$/i + else + re_sep = Regexp.escape(separator) + re_duplicate_separator = /#{re_sep}{2,}/ + re_leading_trailing_separator = /^#{re_sep}|#{re_sep}$/i + end + # No more than one of the separator in a row. + parameterized_string.gsub!(re_duplicate_separator, separator) + # Remove leading/trailing separator. + parameterized_string.gsub!(re_leading_trailing_separator, "".freeze) + end + + parameterized_string.downcase! unless preserve_case + parameterized_string + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb new file mode 100644 index 0000000..d788717 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/json/decoding" +require "active_support/json/encoding" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb new file mode 100644 index 0000000..8c0e016 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/delegation" +require "json" + +module ActiveSupport + # Look for and parse json strings that look like ISO 8601 times. + mattr_accessor :parse_json_times + + module JSON + # matches YAML-formatted dates + DATE_REGEX = /^\d{4}-\d{2}-\d{2}$/ + DATETIME_REGEX = /^(?:\d{4}-\d{2}-\d{2}|\d{4}-\d{1,2}-\d{1,2}[T \t]+\d{1,2}:\d{2}:\d{2}(\.[0-9]*)?(([ \t]*)Z|[-+]\d{2}?(:\d{2})?)?)$/ + + class << self + # Parses a JSON string (JavaScript Object Notation) into a hash. + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.decode("{\"team\":\"rails\",\"players\":\"36\"}") + # => {"team" => "rails", "players" => "36"} + def decode(json) + data = ::JSON.parse(json, quirks_mode: true) + + if ActiveSupport.parse_json_times + convert_dates_from(data) + else + data + end + end + + # Returns the class of the error that will be raised when there is an + # error in decoding JSON. Using this method means you won't directly + # depend on the ActiveSupport's JSON implementation, in case it changes + # in the future. + # + # begin + # obj = ActiveSupport::JSON.decode(some_string) + # rescue ActiveSupport::JSON.parse_error + # Rails.logger.warn("Attempted to decode invalid JSON: #{some_string}") + # end + def parse_error + ::JSON::ParserError + end + + private + + def convert_dates_from(data) + case data + when nil + nil + when DATE_REGEX + begin + Date.parse(data) + rescue ArgumentError + data + end + when DATETIME_REGEX + begin + Time.zone.parse(data) + rescue ArgumentError + data + end + when Array + data.map! { |d| convert_dates_from(d) } + when Hash + data.each do |key, value| + data[key] = convert_dates_from(value) + end + else + data + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb new file mode 100644 index 0000000..1339c75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/json" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class << self + delegate :use_standard_json_time_format, :use_standard_json_time_format=, + :time_precision, :time_precision=, + :escape_html_entities_in_json, :escape_html_entities_in_json=, + :json_encoder, :json_encoder=, + to: :'ActiveSupport::JSON::Encoding' + end + + module JSON + # Dumps objects in JSON (JavaScript Object Notation). + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.encode({ team: 'rails', players: '36' }) + # # => "{\"team\":\"rails\",\"players\":\"36\"}" + def self.encode(value, options = nil) + Encoding.json_encoder.new(options).encode(value) + end + + module Encoding #:nodoc: + class JSONGemEncoder #:nodoc: + attr_reader :options + + def initialize(options = nil) + @options = options || {} + end + + # Encode the given object into a JSON string + def encode(value) + stringify jsonify value.as_json(options.dup) + end + + private + # Rails does more escaping than the JSON gem natively does (we + # escape \u2028 and \u2029 and optionally >, <, & to work around + # certain browser problems). + ESCAPED_CHARS = { + "\u2028" => '\u2028', + "\u2029" => '\u2029', + ">" => '\u003e', + "<" => '\u003c', + "&" => '\u0026', + } + + ESCAPE_REGEX_WITH_HTML_ENTITIES = /[\u2028\u2029><&]/u + ESCAPE_REGEX_WITHOUT_HTML_ENTITIES = /[\u2028\u2029]/u + + # This class wraps all the strings we see and does the extra escaping + class EscapedString < String #:nodoc: + def to_json(*) + if Encoding.escape_html_entities_in_json + super.gsub ESCAPE_REGEX_WITH_HTML_ENTITIES, ESCAPED_CHARS + else + super.gsub ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, ESCAPED_CHARS + end + end + + def to_s + self + end + end + + # Mark these as private so we don't leak encoding-specific constructs + private_constant :ESCAPED_CHARS, :ESCAPE_REGEX_WITH_HTML_ENTITIES, + :ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, :EscapedString + + # Convert an object into a "JSON-ready" representation composed of + # primitives like Hash, Array, String, Numeric, + # and +true+/+false+/+nil+. + # Recursively calls #as_json to the object to recursively build a + # fully JSON-ready object. + # + # This allows developers to implement #as_json without having to + # worry about what base types of objects they are allowed to return + # or having to remember to call #as_json recursively. + # + # Note: the +options+ hash passed to +object.to_json+ is only passed + # to +object.as_json+, not any of this method's recursive +#as_json+ + # calls. + def jsonify(value) + case value + when String + EscapedString.new(value) + when Numeric, NilClass, TrueClass, FalseClass + value.as_json + when Hash + Hash[value.map { |k, v| [jsonify(k), jsonify(v)] }] + when Array + value.map { |v| jsonify(v) } + else + jsonify value.as_json + end + end + + # Encode a "jsonified" Ruby data structure using the JSON gem + def stringify(jsonified) + ::JSON.generate(jsonified, quirks_mode: true, max_nesting: false) + end + end + + class << self + # If true, use ISO 8601 format for dates and times. Otherwise, fall back + # to the Active Support legacy format. + attr_accessor :use_standard_json_time_format + + # If true, encode >, <, & as escaped unicode sequences (e.g. > as \u003e) + # as a safety measure. + attr_accessor :escape_html_entities_in_json + + # Sets the precision of encoded time values. + # Defaults to 3 (equivalent to millisecond precision) + attr_accessor :time_precision + + # Sets the encoder used by Rails to encode Ruby objects into JSON strings + # in +Object#to_json+ and +ActiveSupport::JSON.encode+. + attr_accessor :json_encoder + end + + self.use_standard_json_time_format = true + self.escape_html_entities_in_json = true + self.json_encoder = JSONGemEncoder + self.time_precision = 3 + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb new file mode 100644 index 0000000..78f7d7c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "openssl" + +module ActiveSupport + # KeyGenerator is a simple wrapper around OpenSSL's implementation of PBKDF2. + # It can be used to derive a number of keys for various purposes from a given secret. + # This lets Rails applications have a single secure secret, but avoid reusing that + # key in multiple incompatible contexts. + class KeyGenerator + def initialize(secret, options = {}) + @secret = secret + # The default iterations are higher than required for our key derivation uses + # on the off chance someone uses this for password storage + @iterations = options[:iterations] || 2**16 + end + + # Returns a derived key suitable for use. The default key_size is chosen + # to be compatible with the default settings of ActiveSupport::MessageVerifier. + # i.e. OpenSSL::Digest::SHA1#block_length + def generate_key(salt, key_size = 64) + OpenSSL::PKCS5.pbkdf2_hmac_sha1(@secret, salt, @iterations, key_size) + end + end + + # CachingKeyGenerator is a wrapper around KeyGenerator which allows users to avoid + # re-executing the key generation process when it's called using the same salt and + # key_size. + class CachingKeyGenerator + def initialize(key_generator) + @key_generator = key_generator + @cache_keys = Concurrent::Map.new + end + + # Returns a derived key suitable for use. + def generate_key(*args) + @cache_keys[args.join] ||= @key_generator.generate_key(*args) + end + end + + class LegacyKeyGenerator # :nodoc: + SECRET_MIN_LENGTH = 30 # Characters + + def initialize(secret) + ensure_secret_secure(secret) + @secret = secret + end + + def generate_key(salt) + @secret + end + + private + + # To prevent users from using something insecure like "Password" we make sure that the + # secret they've provided is at least 30 characters in length. + def ensure_secret_secure(secret) + if secret.blank? + raise ArgumentError, "A secret is required to generate an integrity hash " \ + "for cookie session data. Set a secret_key_base of at least " \ + "#{SECRET_MIN_LENGTH} characters in via `bin/rails credentials:edit`." + end + + if secret.length < SECRET_MIN_LENGTH + raise ArgumentError, "Secret should be something secure, " \ + "like \"#{SecureRandom.hex(16)}\". The value you " \ + "provided, \"#{secret}\", is shorter than the minimum length " \ + "of #{SECRET_MIN_LENGTH} characters." + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb new file mode 100644 index 0000000..dc8080c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module ActiveSupport + # lazy_load_hooks allows Rails to lazily load a lot of components and thus + # making the app boot faster. Because of this feature now there is no need to + # require ActiveRecord::Base at boot time purely to apply + # configuration. Instead a hook is registered that applies configuration once + # ActiveRecord::Base is loaded. Here ActiveRecord::Base is + # used as example but this feature can be applied elsewhere too. + # + # Here is an example where +on_load+ method is called to register a hook. + # + # initializer 'active_record.initialize_timezone' do + # ActiveSupport.on_load(:active_record) do + # self.time_zone_aware_attributes = true + # self.default_timezone = :utc + # end + # end + # + # When the entirety of +ActiveRecord::Base+ has been + # evaluated then +run_load_hooks+ is invoked. The very last line of + # +ActiveRecord::Base+ is: + # + # ActiveSupport.run_load_hooks(:active_record, ActiveRecord::Base) + module LazyLoadHooks + def self.extended(base) # :nodoc: + base.class_eval do + @load_hooks = Hash.new { |h, k| h[k] = [] } + @loaded = Hash.new { |h, k| h[k] = [] } + @run_once = Hash.new { |h, k| h[k] = [] } + end + end + + # Declares a block that will be executed when a Rails component is fully + # loaded. + # + # Options: + # + # * :yield - Yields the object that run_load_hooks to +block+. + # * :run_once - Given +block+ will run only once. + def on_load(name, options = {}, &block) + @loaded[name].each do |base| + execute_hook(name, base, options, block) + end + + @load_hooks[name] << [block, options] + end + + def run_load_hooks(name, base = Object) + @loaded[name] << base + @load_hooks[name].each do |hook, options| + execute_hook(name, base, options, hook) + end + end + + private + + def with_execution_control(name, block, once) + unless @run_once[name].include?(block) + @run_once[name] << block if once + + yield + end + end + + def execute_hook(name, base, options, block) + with_execution_control(name, block, options[:run_once]) do + if options[:yield] + block.call(base) + else + base.instance_eval(&block) + end + end + end + end + + extend LazyLoadHooks +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml new file mode 100644 index 0000000..c64b759 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml @@ -0,0 +1,135 @@ +en: + date: + formats: + # Use the strftime parameters for formats. + # When no format has been given, it uses default. + # You can provide other formats here if you like! + default: "%Y-%m-%d" + short: "%b %d" + long: "%B %d, %Y" + + day_names: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday] + abbr_day_names: [Sun, Mon, Tue, Wed, Thu, Fri, Sat] + + # Don't forget the nil at the beginning; there's no such thing as a 0th month + month_names: [~, January, February, March, April, May, June, July, August, September, October, November, December] + abbr_month_names: [~, Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec] + # Used in date_select and datetime_select. + order: + - year + - month + - day + + time: + formats: + default: "%a, %d %b %Y %H:%M:%S %z" + short: "%d %b %H:%M" + long: "%B %d, %Y %H:%M" + am: "am" + pm: "pm" + +# Used in array.to_sentence. + support: + array: + words_connector: ", " + two_words_connector: " and " + last_word_connector: ", and " + number: + # Used in NumberHelper.number_to_delimited() + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: "." + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: "," + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3 + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false + # If set, the zeros after the decimal separator will always be stripped (eg.: 1.200 will be 1.2) + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_currency() + currency: + format: + # Where is the currency sign? %u is the currency unit, %n the number (default: $5.00) + format: "%u%n" + unit: "$" + # These five are to override number.format and are optional + separator: "." + delimiter: "," + precision: 2 + significant: false + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_percentage() + percentage: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + format: "%n%" + + # Used in NumberHelper.number_to_rounded() + precision: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_human_size() and NumberHelper.number_to_human() + human: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + precision: 3 + significant: true + strip_insignificant_zeros: true + # Used in number_to_human_size() + storage_units: + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u" + units: + byte: + one: "Byte" + other: "Bytes" + kb: "KB" + mb: "MB" + gb: "GB" + tb: "TB" + pb: "PB" + eb: "EB" + # Used in NumberHelper.number_to_human() + decimal_units: + format: "%n %u" + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "" + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: Thousand + million: Million + billion: Billion + trillion: Trillion + quadrillion: Quadrillion diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb new file mode 100644 index 0000000..0f7be06 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/class/attribute" +require "active_support/subscriber" + +module ActiveSupport + # ActiveSupport::LogSubscriber is an object set to consume + # ActiveSupport::Notifications with the sole purpose of logging them. + # The log subscriber dispatches notifications to a registered object based + # on its given namespace. + # + # An example would be Active Record log subscriber responsible for logging + # queries: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # "#{event.payload[:name]} (#{event.duration}) #{event.payload[:sql]}" + # end + # end + # end + # + # And it's finally registered as: + # + # ActiveRecord::LogSubscriber.attach_to :active_record + # + # Since we need to know all instance methods before attaching the log + # subscriber, the line above should be called after your + # ActiveRecord::LogSubscriber definition. + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the sql method. + # + # Log subscriber also has some helpers to deal with logging and automatically + # flushes all logs when the request finishes (via action_dispatch.callback + # notification) in a Rails environment. + class LogSubscriber < Subscriber + # Embed in a String to clear all previous ANSI sequences. + CLEAR = "\e[0m" + BOLD = "\e[1m" + + # Colors + BLACK = "\e[30m" + RED = "\e[31m" + GREEN = "\e[32m" + YELLOW = "\e[33m" + BLUE = "\e[34m" + MAGENTA = "\e[35m" + CYAN = "\e[36m" + WHITE = "\e[37m" + + mattr_accessor :colorize_logging, default: true + + class << self + def logger + @logger ||= if defined?(Rails) && Rails.respond_to?(:logger) + Rails.logger + end + end + + attr_writer :logger + + def log_subscribers + subscribers + end + + # Flush all log_subscribers' logger. + def flush_all! + logger.flush if logger.respond_to?(:flush) + end + end + + def logger + LogSubscriber.logger + end + + def start(name, id, payload) + super if logger + end + + def finish(name, id, payload) + super if logger + rescue => e + if logger + logger.error "Could not log #{name.inspect} event. #{e.class}: #{e.message} #{e.backtrace}" + end + end + + private + + %w(info debug warn error fatal unknown).each do |level| + class_eval <<-METHOD, __FILE__, __LINE__ + 1 + def #{level}(progname = nil, &block) + logger.#{level}(progname, &block) if logger + end + METHOD + end + + # Set color by using a symbol or one of the defined constants. If a third + # option is set to +true+, it also adds bold to the string. This is based + # on the Highline implementation and will automatically append CLEAR to the + # end of the returned String. + def color(text, color, bold = false) # :doc: + return text unless colorize_logging + color = self.class.const_get(color.upcase) if color.is_a?(Symbol) + bold = bold ? BOLD : "" + "#{bold}#{color}#{text}#{CLEAR}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb new file mode 100644 index 0000000..3f19ef5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "active_support/log_subscriber" +require "active_support/logger" +require "active_support/notifications" + +module ActiveSupport + class LogSubscriber + # Provides some helpers to deal with testing log subscribers by setting up + # notifications. Take for instance Active Record subscriber tests: + # + # class SyncLogSubscriberTest < ActiveSupport::TestCase + # include ActiveSupport::LogSubscriber::TestHelper + # + # setup do + # ActiveRecord::LogSubscriber.attach_to(:active_record) + # end + # + # def test_basic_query_logging + # Developer.all.to_a + # wait + # assert_equal 1, @logger.logged(:debug).size + # assert_match(/Developer Load/, @logger.logged(:debug).last) + # assert_match(/SELECT \* FROM "developers"/, @logger.logged(:debug).last) + # end + # end + # + # All you need to do is to ensure that your log subscriber is added to + # Rails::Subscriber, as in the second line of the code above. The test + # helpers are responsible for setting up the queue, subscriptions and + # turning colors in logs off. + # + # The messages are available in the @logger instance, which is a logger with + # limited powers (it actually does not send anything to your output), and + # you can collect them doing @logger.logged(level), where level is the level + # used in logging, like info, debug, warn and so on. + module TestHelper + def setup # :nodoc: + @logger = MockLogger.new + @notifier = ActiveSupport::Notifications::Fanout.new + + ActiveSupport::LogSubscriber.colorize_logging = false + + @old_notifier = ActiveSupport::Notifications.notifier + set_logger(@logger) + ActiveSupport::Notifications.notifier = @notifier + end + + def teardown # :nodoc: + set_logger(nil) + ActiveSupport::Notifications.notifier = @old_notifier + end + + class MockLogger + include ActiveSupport::Logger::Severity + + attr_reader :flush_count + attr_accessor :level + + def initialize(level = DEBUG) + @flush_count = 0 + @level = level + @logged = Hash.new { |h, k| h[k] = [] } + end + + def method_missing(level, message = nil) + if block_given? + @logged[level] << yield + else + @logged[level] << message + end + end + + def logged(level) + @logged[level].compact.map { |l| l.to_s.strip } + end + + def flush + @flush_count += 1 + end + + ActiveSupport::Logger::Severity.constants.each do |severity| + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{severity.downcase}? + #{severity} >= @level + end + EOT + end + end + + # Wait notifications to be published. + def wait + @notifier.wait + end + + # Overwrite if you use another logger in your log subscriber. + # + # def logger + # ActiveRecord::Base.logger = @logger + # end + def set_logger(logger) + ActiveSupport::LogSubscriber.logger = logger + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb new file mode 100644 index 0000000..8152a18 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +require "active_support/logger_silence" +require "active_support/logger_thread_safe_level" +require "logger" + +module ActiveSupport + class Logger < ::Logger + include ActiveSupport::LoggerThreadSafeLevel + include LoggerSilence + + # Returns true if the logger destination matches one of the sources + # + # logger = Logger.new(STDOUT) + # ActiveSupport::Logger.logger_outputs_to?(logger, STDOUT) + # # => true + def self.logger_outputs_to?(logger, *sources) + logdev = logger.instance_variable_get("@logdev") + logger_source = logdev.dev if logdev.respond_to?(:dev) + sources.any? { |source| source == logger_source } + end + + # Broadcasts logs to multiple loggers. + def self.broadcast(logger) # :nodoc: + Module.new do + define_method(:add) do |*args, &block| + logger.add(*args, &block) + super(*args, &block) + end + + define_method(:<<) do |x| + logger << x + super(x) + end + + define_method(:close) do + logger.close + super() + end + + define_method(:progname=) do |name| + logger.progname = name + super(name) + end + + define_method(:formatter=) do |formatter| + logger.formatter = formatter + super(formatter) + end + + define_method(:level=) do |level| + logger.level = level + super(level) + end + + define_method(:local_level=) do |level| + logger.local_level = level if logger.respond_to?(:local_level=) + super(level) if respond_to?(:local_level=) + end + + define_method(:silence) do |level = Logger::ERROR, &block| + if logger.respond_to?(:silence) + logger.silence(level) do + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + else + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + end + end + end + + def initialize(*args) + super + @formatter = SimpleFormatter.new + after_initialize if respond_to? :after_initialize + end + + def add(severity, message = nil, progname = nil, &block) + return true if @logdev.nil? || (severity || UNKNOWN) < level + super + end + + Logger::Severity.constants.each do |severity| + class_eval(<<-EOT, __FILE__, __LINE__ + 1) + def #{severity.downcase}? # def debug? + Logger::#{severity} >= level # DEBUG >= level + end # end + EOT + end + + # Simple formatter which only displays the message. + class SimpleFormatter < ::Logger::Formatter + # This method is invoked when a log event occurs + def call(severity, timestamp, progname, msg) + "#{String === msg ? msg : msg.inspect}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb new file mode 100644 index 0000000..89f32b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "concurrent" + +module LoggerSilence + extend ActiveSupport::Concern + + included do + cattr_accessor :silencer, default: true + end + + # Silences the logger for the duration of the block. + def silence(temporary_level = Logger::ERROR) + if silencer + begin + old_local_level = local_level + self.local_level = temporary_level + + yield self + ensure + self.local_level = old_local_level + end + else + yield self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb new file mode 100644 index 0000000..52295d7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "fiber" + +module ActiveSupport + module LoggerThreadSafeLevel # :nodoc: + extend ActiveSupport::Concern + + def after_initialize + @local_levels = Concurrent::Map.new(initial_capacity: 2) + end + + def local_log_id + Fiber.current.__id__ + end + + def local_level + @local_levels[local_log_id] + end + + def local_level=(level) + if level + @local_levels[local_log_id] = level + else + @local_levels.delete(local_log_id) + end + end + + def level + local_level || super + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb new file mode 100644 index 0000000..8b73270 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb @@ -0,0 +1,229 @@ +# frozen_string_literal: true + +require "openssl" +require "base64" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/message_verifier" +require "active_support/messages/metadata" + +module ActiveSupport + # MessageEncryptor is a simple way to encrypt values which get stored + # somewhere you don't trust. + # + # The cipher text and initialization vector are base64 encoded and returned + # to you. + # + # This can be used in situations similar to the MessageVerifier, but + # where you don't want users to be able to determine the value of the payload. + # + # len = ActiveSupport::MessageEncryptor.key_len + # salt = SecureRandom.random_bytes(len) + # key = ActiveSupport::KeyGenerator.new('password').generate_key(salt, len) # => "\x89\xE0\x156\xAC..." + # crypt = ActiveSupport::MessageEncryptor.new(key) # => # + # encrypted_data = crypt.encrypt_and_sign('my secret data') # => "NlFBTTMwOUV5UlA1QlNEN2xkY2d6eThYWWh..." + # crypt.decrypt_and_verify(encrypted_data) # => "my secret data" + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = crypt.encrypt_and_sign("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # crypt.decrypt_and_verify(token, purpose: :login) # => "this is the chair" + # crypt.decrypt_and_verify(token, purpose: :shipping) # => nil + # crypt.decrypt_and_verify(token) # => nil + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = crypt.encrypt_and_sign("the conversation is lively") + # crypt.decrypt_and_verify(token, purpose: :scare_tactics) # => nil + # crypt.decrypt_and_verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # crypt.encrypt_and_sign(parcel, expires_in: 1.month) + # crypt.encrypt_and_sign(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned upto the expire time. + # Thereafter, verifying returns +nil+. + # + # === Rotating keys + # + # MessageEncryptor also supports rotating out old configurations by falling + # back to a stack of encryptors. Call +rotate+ to build and add an encryptor + # so +decrypt_and_verify+ will also try the fallback. + # + # By default any rotated encryptors use the values of the primary + # encryptor unless specified otherwise. + # + # You'd give your encryptor the new defaults: + # + # crypt = ActiveSupport::MessageEncryptor.new(@secret, cipher: "aes-256-gcm") + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # crypt.rotate old_secret # Fallback to an old secret instead of @secret. + # crypt.rotate cipher: "aes-256-cbc" # Fallback to an old cipher instead of aes-256-gcm. + # + # Though if both the secret and the cipher was changed at the same time, + # the above should be combined into: + # + # crypt.rotate old_secret, cipher: "aes-256-cbc" + class MessageEncryptor + prepend Messages::Rotator::Encryptor + + cattr_accessor :use_authenticated_message_encryption, instance_accessor: false, default: false + + class << self + def default_cipher #:nodoc: + if use_authenticated_message_encryption + "aes-256-gcm" + else + "aes-256-cbc" + end + end + end + + module NullSerializer #:nodoc: + def self.load(value) + value + end + + def self.dump(value) + value + end + end + + module NullVerifier #:nodoc: + def self.verify(value) + value + end + + def self.generate(value) + value + end + end + + class InvalidMessage < StandardError; end + OpenSSLCipherError = OpenSSL::Cipher::CipherError + + # Initialize a new MessageEncryptor. +secret+ must be at least as long as + # the cipher key size. For the default 'aes-256-gcm' cipher, this is 256 + # bits. If you are using a user-entered secret, you can generate a suitable + # key by using ActiveSupport::KeyGenerator or a similar key + # derivation function. + # + # First additional parameter is used as the signature key for +MessageVerifier+. + # This allows you to specify keys to encrypt and sign data. + # + # ActiveSupport::MessageEncryptor.new('secret', 'signature_secret') + # + # Options: + # * :cipher - Cipher to use. Can be any cipher returned by + # OpenSSL::Cipher.ciphers. Default is 'aes-256-gcm'. + # * :digest - String of digest to use for signing. Default is + # +SHA1+. Ignored when using an AEAD cipher like 'aes-256-gcm'. + # * :serializer - Object serializer to use. Default is +Marshal+. + def initialize(secret, *signature_key_or_options) + options = signature_key_or_options.extract_options! + sign_secret = signature_key_or_options.first + @secret = secret + @sign_secret = sign_secret + @cipher = options[:cipher] || self.class.default_cipher + @digest = options[:digest] || "SHA1" unless aead_mode? + @verifier = resolve_verifier + @serializer = options[:serializer] || Marshal + end + + # Encrypt and sign a message. We need to sign the message in order to avoid + # padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def encrypt_and_sign(value, expires_at: nil, expires_in: nil, purpose: nil) + verifier.generate(_encrypt(value, expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + end + + # Decrypt and verify a message. We need to verify the message in order to + # avoid padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def decrypt_and_verify(data, purpose: nil, **) + _decrypt(verifier.verify(data), purpose) + end + + # Given a cipher, returns the key length of the cipher to help generate the key of desired size + def self.key_len(cipher = default_cipher) + OpenSSL::Cipher.new(cipher).key_len + end + + private + def _encrypt(value, **metadata_options) + cipher = new_cipher + cipher.encrypt + cipher.key = @secret + + # Rely on OpenSSL for the initialization vector + iv = cipher.random_iv + cipher.auth_data = "" if aead_mode? + + encrypted_data = cipher.update(Messages::Metadata.wrap(@serializer.dump(value), metadata_options)) + encrypted_data << cipher.final + + blob = "#{::Base64.strict_encode64 encrypted_data}--#{::Base64.strict_encode64 iv}" + blob = "#{blob}--#{::Base64.strict_encode64 cipher.auth_tag}" if aead_mode? + blob + end + + def _decrypt(encrypted_message, purpose) + cipher = new_cipher + encrypted_data, iv, auth_tag = encrypted_message.split("--".freeze).map { |v| ::Base64.strict_decode64(v) } + + # Currently the OpenSSL bindings do not raise an error if auth_tag is + # truncated, which would allow an attacker to easily forge it. See + # https://github.com/ruby/openssl/issues/63 + raise InvalidMessage if aead_mode? && (auth_tag.nil? || auth_tag.bytes.length != 16) + + cipher.decrypt + cipher.key = @secret + cipher.iv = iv + if aead_mode? + cipher.auth_tag = auth_tag + cipher.auth_data = "" + end + + decrypted_data = cipher.update(encrypted_data) + decrypted_data << cipher.final + + message = Messages::Metadata.verify(decrypted_data, purpose) + @serializer.load(message) if message + rescue OpenSSLCipherError, TypeError, ArgumentError + raise InvalidMessage + end + + def new_cipher + OpenSSL::Cipher.new(@cipher) + end + + def verifier + @verifier + end + + def aead_mode? + @aead_mode ||= new_cipher.authenticated? + end + + def resolve_verifier + if aead_mode? + NullVerifier + else + MessageVerifier.new(@sign_secret || @secret, digest: @digest, serializer: NullSerializer) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb new file mode 100644 index 0000000..83c39c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: true + +require "base64" +require "active_support/core_ext/object/blank" +require "active_support/security_utils" +require "active_support/messages/metadata" +require "active_support/messages/rotator" + +module ActiveSupport + # +MessageVerifier+ makes it easy to generate and verify messages which are + # signed to prevent tampering. + # + # This is useful for cases like remember-me tokens and auto-unsubscribe links + # where the session store isn't suitable or available. + # + # Remember Me: + # cookies[:remember_me] = @verifier.generate([@user.id, 2.weeks.from_now]) + # + # In the authentication filter: + # + # id, time = @verifier.verify(cookies[:remember_me]) + # if Time.now < time + # self.current_user = User.find(id) + # end + # + # By default it uses Marshal to serialize the message. If you want to use + # another serialization method, you can set the serializer in the options + # hash upon initialization: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', serializer: YAML) + # + # +MessageVerifier+ creates HMAC signatures using SHA1 hash algorithm by default. + # If you want to use a different hash algorithm, you can change it by providing + # +:digest+ key as an option while initializing the verifier: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', digest: 'SHA256') + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = @verifier.generate("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # @verifier.verified(token, purpose: :login) # => "this is the chair" + # @verifier.verified(token, purpose: :shipping) # => nil + # @verifier.verified(token) # => nil + # + # @verifier.verify(token, purpose: :login) # => "this is the chair" + # @verifier.verify(token, purpose: :shipping) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => ActiveSupport::MessageVerifier::InvalidSignature + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = @verifier.generate("the conversation is lively") + # @verifier.verified(token, purpose: :scare_tactics) # => nil + # @verifier.verified(token) # => "the conversation is lively" + # + # @verifier.verify(token, purpose: :scare_tactics) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # @verifier.generate(parcel, expires_in: 1.month) + # @verifier.generate(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned upto the expire time. + # Thereafter, the +verified+ method returns +nil+ while +verify+ raises + # ActiveSupport::MessageVerifier::InvalidSignature. + # + # === Rotating keys + # + # MessageVerifier also supports rotating out old configurations by falling + # back to a stack of verifiers. Call +rotate+ to build and add a verifier to + # so either +verified+ or +verify+ will also try verifying with the fallback. + # + # By default any rotated verifiers use the values of the primary + # verifier unless specified otherwise. + # + # You'd give your verifier the new defaults: + # + # verifier = ActiveSupport::MessageVerifier.new(@secret, digest: "SHA512", serializer: JSON) + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # verifier.rotate old_secret # Fallback to an old secret instead of @secret. + # verifier.rotate digest: "SHA256" # Fallback to an old digest instead of SHA512. + # verifier.rotate serializer: Marshal # Fallback to an old serializer instead of JSON. + # + # Though the above would most likely be combined into one rotation: + # + # verifier.rotate old_secret, digest: "SHA256", serializer: Marshal + class MessageVerifier + prepend Messages::Rotator::Verifier + + class InvalidSignature < StandardError; end + + def initialize(secret, options = {}) + raise ArgumentError, "Secret should not be nil." unless secret + @secret = secret + @digest = options[:digest] || "SHA1" + @serializer = options[:serializer] || Marshal + end + + # Checks if a signed message could have been generated by signing an object + # with the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # verifier.valid_message?(signed_message) # => true + # + # tampered_message = signed_message.chop # editing the message invalidates the signature + # verifier.valid_message?(tampered_message) # => false + def valid_message?(signed_message) + return if signed_message.nil? || !signed_message.valid_encoding? || signed_message.blank? + + data, digest = signed_message.split("--".freeze) + data.present? && digest.present? && ActiveSupport::SecurityUtils.secure_compare(digest, generate_digest(data)) + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # + # signed_message = verifier.generate 'a private message' + # verifier.verified(signed_message) # => 'a private message' + # + # Returns +nil+ if the message was not signed with the same secret. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verified(signed_message) # => nil + # + # Returns +nil+ if the message is not Base64-encoded. + # + # invalid_message = "f--46a0120593880c733a53b6dad75b42ddc1c8996d" + # verifier.verified(invalid_message) # => nil + # + # Raises any error raised while decoding the signed message. + # + # incompatible_message = "test--dad7b06c94abba8d46a15fafaef56c327665d5ff" + # verifier.verified(incompatible_message) # => TypeError: incompatible marshal file format + def verified(signed_message, purpose: nil, **) + if valid_message?(signed_message) + begin + data = signed_message.split("--".freeze)[0] + message = Messages::Metadata.verify(decode(data), purpose) + @serializer.load(message) if message + rescue ArgumentError => argument_error + return if argument_error.message.include?("invalid base64") + raise + end + end + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # + # verifier.verify(signed_message) # => 'a private message' + # + # Raises +InvalidSignature+ if the message was not signed with the same + # secret or was not Base64-encoded. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verify(signed_message) # => ActiveSupport::MessageVerifier::InvalidSignature + def verify(*args) + verified(*args) || raise(InvalidSignature) + end + + # Generates a signed message for the provided value. + # + # The message is signed with the +MessageVerifier+'s secret. Without knowing + # the secret, the original value cannot be extracted from the message. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # verifier.generate 'a private message' # => "BAhJIhRwcml2YXRlLW1lc3NhZ2UGOgZFVA==--e2d724331ebdee96a10fb99b089508d1c72bd772" + def generate(value, expires_at: nil, expires_in: nil, purpose: nil) + data = encode(Messages::Metadata.wrap(@serializer.dump(value), expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + "#{data}--#{generate_digest(data)}" + end + + private + def encode(data) + ::Base64.strict_encode64(data) + end + + def decode(data) + ::Base64.strict_decode64(data) + end + + def generate_digest(data) + require "openssl" unless defined?(OpenSSL) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.const_get(@digest).new, @secret, data) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb new file mode 100644 index 0000000..e97caac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require "time" + +module ActiveSupport + module Messages #:nodoc: + class Metadata #:nodoc: + def initialize(message, expires_at = nil, purpose = nil) + @message, @expires_at, @purpose = message, expires_at, purpose + end + + def as_json(options = {}) + { _rails: { message: @message, exp: @expires_at, pur: @purpose } } + end + + class << self + def wrap(message, expires_at: nil, expires_in: nil, purpose: nil) + if expires_at || expires_in || purpose + JSON.encode new(encode(message), pick_expiry(expires_at, expires_in), purpose) + else + message + end + end + + def verify(message, purpose) + extract_metadata(message).verify(purpose) + end + + private + def pick_expiry(expires_at, expires_in) + if expires_at + expires_at.utc.iso8601(3) + elsif expires_in + Time.now.utc.advance(seconds: expires_in).iso8601(3) + end + end + + def extract_metadata(message) + data = JSON.decode(message) rescue nil + + if data.is_a?(Hash) && data.key?("_rails") + new(decode(data["_rails"]["message"]), data["_rails"]["exp"], data["_rails"]["pur"]) + else + new(message) + end + end + + def encode(message) + ::Base64.strict_encode64(message) + end + + def decode(message) + ::Base64.strict_decode64(message) + end + end + + def verify(purpose) + @message if match?(purpose) && fresh? + end + + private + def match?(purpose) + @purpose.to_s == purpose.to_s + end + + def fresh? + @expires_at.nil? || Time.now.utc < Time.iso8601(@expires_at) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb new file mode 100644 index 0000000..bd50d6d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + class RotationConfiguration # :nodoc: + attr_reader :signed, :encrypted + + def initialize + @signed, @encrypted = [], [] + end + + def rotate(kind, *args) + case kind + when :signed + @signed << args + when :encrypted + @encrypted << args + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb new file mode 100644 index 0000000..823a399 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + module Rotator # :nodoc: + def initialize(*, **options) + super + + @options = options + @rotations = [] + end + + def rotate(*secrets, **options) + @rotations << build_rotation(*secrets, @options.merge(options)) + end + + module Encryptor + include Rotator + + def decrypt_and_verify(*args, on_rotation: nil, **options) + super + rescue MessageEncryptor::InvalidMessage, MessageVerifier::InvalidSignature + run_rotations(on_rotation) { |encryptor| encryptor.decrypt_and_verify(*args, options) } || raise + end + + private + def build_rotation(secret = @secret, sign_secret = @sign_secret, options) + self.class.new(secret, sign_secret, options) + end + end + + module Verifier + include Rotator + + def verified(*args, on_rotation: nil, **options) + super || run_rotations(on_rotation) { |verifier| verifier.verified(*args, options) } + end + + private + def build_rotation(secret = @secret, options) + self.class.new(secret, options) + end + end + + private + def run_rotations(on_rotation) + @rotations.find do |rotation| + if message = yield(rotation) rescue next + on_rotation.call if on_rotation + return message + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb new file mode 100644 index 0000000..3fe3a05 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport #:nodoc: + module Multibyte + autoload :Chars, "active_support/multibyte/chars" + autoload :Unicode, "active_support/multibyte/unicode" + + # The proxy class returned when calling mb_chars. You can use this accessor + # to configure your own proxy class so you can support other encodings. See + # the ActiveSupport::Multibyte::Chars implementation for an example how to + # do this. + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + def self.proxy_class=(klass) + @proxy_class = klass + end + + # Returns the current proxy class. + def self.proxy_class + @proxy_class ||= ActiveSupport::Multibyte::Chars + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb new file mode 100644 index 0000000..8152b8f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +require "active_support/json" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/regexp" + +module ActiveSupport #:nodoc: + module Multibyte #:nodoc: + # Chars enables you to work transparently with UTF-8 encoding in the Ruby + # String class without having extensive knowledge about the encoding. A + # Chars object accepts a string upon initialization and proxies String + # methods in an encoding safe manner. All the normal String methods are also + # implemented on the proxy. + # + # String methods are proxied through the Chars object, and can be accessed + # through the +mb_chars+ method. Methods which would normally return a + # String object now return a Chars object so methods can be chained. + # + # 'The Perfect String '.mb_chars.downcase.strip.normalize + # # => # + # + # Chars objects are perfectly interchangeable with String objects as long as + # no explicit class checks are made. If certain methods do explicitly check + # the class, call +to_s+ before you pass chars objects to them. + # + # bad.explicit_checking_method 'T'.mb_chars.downcase.to_s + # + # The default Chars implementation assumes that the encoding of the string + # is UTF-8, if you want to handle different encodings you can write your own + # multibyte string handler and configure it through + # ActiveSupport::Multibyte.proxy_class. + # + # class CharsForUTF32 + # def size + # @wrapped_string.size / 4 + # end + # + # def self.accepts?(string) + # string.length % 4 == 0 + # end + # end + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + class Chars + include Comparable + attr_reader :wrapped_string + alias to_s wrapped_string + alias to_str wrapped_string + + delegate :<=>, :=~, :acts_like_string?, to: :wrapped_string + + # Creates a new Chars instance by wrapping _string_. + def initialize(string) + @wrapped_string = string + @wrapped_string.force_encoding(Encoding::UTF_8) unless @wrapped_string.frozen? + end + + # Forward all undefined methods to the wrapped string. + def method_missing(method, *args, &block) + result = @wrapped_string.__send__(method, *args, &block) + if /!$/.match?(method) + self if result + else + result.kind_of?(String) ? chars(result) : result + end + end + + # Returns +true+ if _obj_ responds to the given method. Private methods + # are included in the search only if the optional second parameter + # evaluates to +true+. + def respond_to_missing?(method, include_private) + @wrapped_string.respond_to?(method, include_private) + end + + # Returns +true+ when the proxy class can handle the string. Returns + # +false+ otherwise. + def self.consumes?(string) + string.encoding == Encoding::UTF_8 + end + + # Works just like String#split, with the exception that the items + # in the resulting list are Chars instances instead of String. This makes + # chaining methods easier. + # + # 'Café périferôl'.mb_chars.split(/é/).map { |part| part.upcase.to_s } # => ["CAF", " P", "RIFERÔL"] + def split(*args) + @wrapped_string.split(*args).map { |i| self.class.new(i) } + end + + # Works like String#slice!, but returns an instance of + # Chars, or +nil+ if the string was not modified. The string will not be + # modified if the range given is out of bounds + # + # string = 'Welcome' + # string.mb_chars.slice!(3) # => # + # string # => 'Welome' + # string.mb_chars.slice!(0..3) # => # + # string # => 'me' + def slice!(*args) + string_sliced = @wrapped_string.slice!(*args) + if string_sliced + chars(string_sliced) + end + end + + # Reverses all characters in the string. + # + # 'Café'.mb_chars.reverse.to_s # => 'éfaC' + def reverse + chars(Unicode.unpack_graphemes(@wrapped_string).reverse.flatten.pack("U*")) + end + + # Limits the byte size of the string to a number of bytes without breaking + # characters. Usable when the storage for a string is limited for some + # reason. + # + # 'こんにちは'.mb_chars.limit(7).to_s # => "こん" + def limit(limit) + slice(0...translate_offset(limit)) + end + + # Converts characters in the string to uppercase. + # + # 'Laurent, où sont les tests ?'.mb_chars.upcase.to_s # => "LAURENT, OÙ SONT LES TESTS ?" + def upcase + chars Unicode.upcase(@wrapped_string) + end + + # Converts characters in the string to lowercase. + # + # 'VĚDA A VÝZKUM'.mb_chars.downcase.to_s # => "věda a výzkum" + def downcase + chars Unicode.downcase(@wrapped_string) + end + + # Converts characters in the string to the opposite case. + # + # 'El Cañón'.mb_chars.swapcase.to_s # => "eL cAÑÓN" + def swapcase + chars Unicode.swapcase(@wrapped_string) + end + + # Converts the first character to uppercase and the remainder to lowercase. + # + # 'über'.mb_chars.capitalize.to_s # => "Über" + def capitalize + (slice(0) || chars("")).upcase + (slice(1..-1) || chars("")).downcase + end + + # Capitalizes the first letter of every word, when possible. + # + # "ÉL QUE SE ENTERÓ".mb_chars.titleize.to_s # => "Él Que Se Enteró" + # "日本語".mb_chars.titleize.to_s # => "日本語" + def titleize + chars(downcase.to_s.gsub(/\b('?\S)/u) { Unicode.upcase($1) }) + end + alias_method :titlecase, :titleize + + # Returns the KC normalization of the string by default. NFKC is + # considered the best normalization form for passing strings to databases + # and validations. + # + # * form - The form you want to normalize in. Should be one of the following: + # :c, :kc, :d, or :kd. Default is + # ActiveSupport::Multibyte::Unicode.default_normalization_form + def normalize(form = nil) + chars(Unicode.normalize(@wrapped_string, form)) + end + + # Performs canonical decomposition on all the characters. + # + # 'é'.length # => 2 + # 'é'.mb_chars.decompose.to_s.length # => 3 + def decompose + chars(Unicode.decompose(:canonical, @wrapped_string.codepoints.to_a).pack("U*")) + end + + # Performs composition on all the characters. + # + # 'é'.length # => 3 + # 'é'.mb_chars.compose.to_s.length # => 2 + def compose + chars(Unicode.compose(@wrapped_string.codepoints.to_a).pack("U*")) + end + + # Returns the number of grapheme clusters in the string. + # + # 'क्षि'.mb_chars.length # => 4 + # 'क्षि'.mb_chars.grapheme_length # => 3 + def grapheme_length + Unicode.unpack_graphemes(@wrapped_string).length + end + + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(force = false) + chars(Unicode.tidy_bytes(@wrapped_string, force)) + end + + def as_json(options = nil) #:nodoc: + to_s.as_json(options) + end + + %w(capitalize downcase reverse tidy_bytes upcase).each do |method| + define_method("#{method}!") do |*args| + @wrapped_string = send(method, *args).to_s + self + end + end + + private + + def translate_offset(byte_offset) + return nil if byte_offset.nil? + return 0 if @wrapped_string == "" + + begin + @wrapped_string.byteslice(0...byte_offset).unpack("U*").length + rescue ArgumentError + byte_offset -= 1 + retry + end + end + + def chars(string) + self.class.new(string) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb new file mode 100644 index 0000000..f923061 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb @@ -0,0 +1,394 @@ +# frozen_string_literal: true + +module ActiveSupport + module Multibyte + module Unicode + extend self + + # A list of all available normalization forms. + # See http://www.unicode.org/reports/tr15/tr15-29.html for more + # information about normalization. + NORMALIZATION_FORMS = [:c, :kc, :d, :kd] + + # The Unicode version that is supported by the implementation + UNICODE_VERSION = "9.0.0" + + # The default normalization used for operations that require + # normalization. It can be set to any of the normalizations + # in NORMALIZATION_FORMS. + # + # ActiveSupport::Multibyte::Unicode.default_normalization_form = :c + attr_accessor :default_normalization_form + @default_normalization_form = :kc + + # Hangul character boundaries and properties + HANGUL_SBASE = 0xAC00 + HANGUL_LBASE = 0x1100 + HANGUL_VBASE = 0x1161 + HANGUL_TBASE = 0x11A7 + HANGUL_LCOUNT = 19 + HANGUL_VCOUNT = 21 + HANGUL_TCOUNT = 28 + HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT + HANGUL_SCOUNT = 11172 + HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT + + # Detect whether the codepoint is in a certain character class. Returns + # +true+ when it's in the specified character class and +false+ otherwise. + # Valid character classes are: :cr, :lf, :l, + # :v, :lv, :lvt and :t. + # + # Primarily used by the grapheme cluster support. + def in_char_class?(codepoint, classes) + classes.detect { |c| database.boundary[c] === codepoint } ? true : false + end + + # Unpack the string at grapheme boundaries. Returns a list of character + # lists. + # + # Unicode.unpack_graphemes('क्षि') # => [[2325, 2381], [2359], [2367]] + # Unicode.unpack_graphemes('Café') # => [[67], [97], [102], [233]] + def unpack_graphemes(string) + codepoints = string.codepoints.to_a + unpacked = [] + pos = 0 + marker = 0 + eoc = codepoints.length + while (pos < eoc) + pos += 1 + previous = codepoints[pos - 1] + current = codepoints[pos] + + # See http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundary_Rules + should_break = + if pos == eoc + true + # GB3. CR X LF + elsif previous == database.boundary[:cr] && current == database.boundary[:lf] + false + # GB4. (Control|CR|LF) ÷ + elsif previous && in_char_class?(previous, [:control, :cr, :lf]) + true + # GB5. ÷ (Control|CR|LF) + elsif in_char_class?(current, [:control, :cr, :lf]) + true + # GB6. L X (L|V|LV|LVT) + elsif database.boundary[:l] === previous && in_char_class?(current, [:l, :v, :lv, :lvt]) + false + # GB7. (LV|V) X (V|T) + elsif in_char_class?(previous, [:lv, :v]) && in_char_class?(current, [:v, :t]) + false + # GB8. (LVT|T) X (T) + elsif in_char_class?(previous, [:lvt, :t]) && database.boundary[:t] === current + false + # GB9. X (Extend | ZWJ) + elsif in_char_class?(current, [:extend, :zwj]) + false + # GB9a. X SpacingMark + elsif database.boundary[:spacingmark] === current + false + # GB9b. Prepend X + elsif database.boundary[:prepend] === previous + false + # GB10. (E_Base | EBG) Extend* X E_Modifier + elsif (marker...pos).any? { |i| in_char_class?(codepoints[i], [:e_base, :e_base_gaz]) && codepoints[i + 1...pos].all? { |c| database.boundary[:extend] === c } } && database.boundary[:e_modifier] === current + false + # GB11. ZWJ X (Glue_After_Zwj | EBG) + elsif database.boundary[:zwj] === previous && in_char_class?(current, [:glue_after_zwj, :e_base_gaz]) + false + # GB12. ^ (RI RI)* RI X RI + # GB13. [^RI] (RI RI)* RI X RI + elsif codepoints[marker..pos].all? { |c| database.boundary[:regional_indicator] === c } && codepoints[marker..pos].count { |c| database.boundary[:regional_indicator] === c }.even? + false + # GB999. Any ÷ Any + else + true + end + + if should_break + unpacked << codepoints[marker..pos - 1] + marker = pos + end + end + unpacked + end + + # Reverse operation of unpack_graphemes. + # + # Unicode.pack_graphemes(Unicode.unpack_graphemes('क्षि')) # => 'क्षि' + def pack_graphemes(unpacked) + unpacked.flatten.pack("U*") + end + + # Re-order codepoints so the string becomes canonical. + def reorder_characters(codepoints) + length = codepoints.length - 1 + pos = 0 + while pos < length do + cp1, cp2 = database.codepoints[codepoints[pos]], database.codepoints[codepoints[pos + 1]] + if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0) + codepoints[pos..pos + 1] = cp2.code, cp1.code + pos += (pos > 0 ? -1 : 1) + else + pos += 1 + end + end + codepoints + end + + # Decompose composed characters to the decomposed form. + def decompose(type, codepoints) + codepoints.inject([]) do |decomposed, cp| + # if it's a hangul syllable starter character + if HANGUL_SBASE <= cp && cp < HANGUL_SLAST + sindex = cp - HANGUL_SBASE + ncp = [] # new codepoints + ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT + ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT + tindex = sindex % HANGUL_TCOUNT + ncp << (HANGUL_TBASE + tindex) unless tindex == 0 + decomposed.concat ncp + # if the codepoint is decomposable in with the current decomposition type + elsif (ncp = database.codepoints[cp].decomp_mapping) && (!database.codepoints[cp].decomp_type || type == :compatibility) + decomposed.concat decompose(type, ncp.dup) + else + decomposed << cp + end + end + end + + # Compose decomposed characters to the composed form. + def compose(codepoints) + pos = 0 + eoa = codepoints.length - 1 + starter_pos = 0 + starter_char = codepoints[0] + previous_combining_class = -1 + while pos < eoa + pos += 1 + lindex = starter_char - HANGUL_LBASE + # -- Hangul + if 0 <= lindex && lindex < HANGUL_LCOUNT + vindex = codepoints[starter_pos + 1] - HANGUL_VBASE rescue vindex = -1 + if 0 <= vindex && vindex < HANGUL_VCOUNT + tindex = codepoints[starter_pos + 2] - HANGUL_TBASE rescue tindex = -1 + if 0 <= tindex && tindex < HANGUL_TCOUNT + j = starter_pos + 2 + eoa -= 2 + else + tindex = 0 + j = starter_pos + 1 + eoa -= 1 + end + codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE + end + starter_pos += 1 + starter_char = codepoints[starter_pos] + # -- Other characters + else + current_char = codepoints[pos] + current = database.codepoints[current_char] + if current.combining_class > previous_combining_class + if ref = database.composition_map[starter_char] + composition = ref[current_char] + else + composition = nil + end + unless composition.nil? + codepoints[starter_pos] = composition + starter_char = composition + codepoints.delete_at pos + eoa -= 1 + pos -= 1 + previous_combining_class = -1 + else + previous_combining_class = current.combining_class + end + else + previous_combining_class = current.combining_class + end + if current.combining_class == 0 + starter_pos = pos + starter_char = codepoints[pos] + end + end + end + codepoints + end + + # Rubinius' String#scrub, however, doesn't support ASCII-incompatible chars. + if !defined?(Rubinius) + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + string.scrub { |bad| recode_windows1252_chars(bad) } + end + else + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + + # We can't transcode to the same format, so we choose a nearly-identical encoding. + # We're going to 'transcode' bytes from UTF-8 when possible, then fall back to + # CP1252 when we get errors. The final string will be 'converted' back to UTF-8 + # before returning. + reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_16LE) + + source = string.dup + out = "".force_encoding(Encoding::UTF_16LE) + + loop do + reader.primitive_convert(source, out) + _, _, _, error_bytes, _ = reader.primitive_errinfo + break if error_bytes.nil? + out << error_bytes.encode(Encoding::UTF_16LE, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + reader.finish + + out.encode!(Encoding::UTF_8) + end + end + + # Returns the KC normalization of the string by default. NFKC is + # considered the best normalization form for passing strings to databases + # and validations. + # + # * string - The string to perform normalization on. + # * form - The form you want to normalize in. Should be one of + # the following: :c, :kc, :d, or :kd. + # Default is ActiveSupport::Multibyte::Unicode.default_normalization_form. + def normalize(string, form = nil) + form ||= @default_normalization_form + # See http://www.unicode.org/reports/tr15, Table 1 + codepoints = string.codepoints.to_a + case form + when :d + reorder_characters(decompose(:canonical, codepoints)) + when :c + compose(reorder_characters(decompose(:canonical, codepoints))) + when :kd + reorder_characters(decompose(:compatibility, codepoints)) + when :kc + compose(reorder_characters(decompose(:compatibility, codepoints))) + else + raise ArgumentError, "#{form} is not a valid normalization variant", caller + end.pack("U*".freeze) + end + + def downcase(string) + apply_mapping string, :lowercase_mapping + end + + def upcase(string) + apply_mapping string, :uppercase_mapping + end + + def swapcase(string) + apply_mapping string, :swapcase_mapping + end + + # Holds data about a codepoint in the Unicode database. + class Codepoint + attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping + + # Initializing Codepoint object with default values + def initialize + @combining_class = 0 + @uppercase_mapping = 0 + @lowercase_mapping = 0 + end + + def swapcase_mapping + uppercase_mapping > 0 ? uppercase_mapping : lowercase_mapping + end + end + + # Holds static data from the Unicode database. + class UnicodeDatabase + ATTRIBUTES = :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252 + + attr_writer(*ATTRIBUTES) + + def initialize + @codepoints = Hash.new(Codepoint.new) + @composition_exclusion = [] + @composition_map = {} + @boundary = {} + @cp1252 = {} + end + + # Lazy load the Unicode database so it's only loaded when it's actually used + ATTRIBUTES.each do |attr_name| + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{attr_name} # def codepoints + load # load + @#{attr_name} # @codepoints + end # end + EOS + end + + # Loads the Unicode database and returns all the internal objects of + # UnicodeDatabase. + def load + begin + @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = File.open(self.class.filename, "rb") { |f| Marshal.load f.read } + rescue => e + raise IOError.new("Couldn't load the Unicode tables for UTF8Handler (#{e.message}), ActiveSupport::Multibyte is unusable") + end + + # Redefine the === method so we can write shorter rules for grapheme cluster breaks + @boundary.each_key do |k| + @boundary[k].instance_eval do + def ===(other) + detect { |i| i === other } ? true : false + end + end if @boundary[k].kind_of?(Array) + end + + # define attr_reader methods for the instance variables + class << self + attr_reader(*ATTRIBUTES) + end + end + + # Returns the directory in which the data files are stored. + def self.dirname + File.expand_path("../values", __dir__) + end + + # Returns the filename for the data file for this version. + def self.filename + File.expand_path File.join(dirname, "unicode_tables.dat") + end + end + + private + + def apply_mapping(string, mapping) + database.codepoints + string.each_codepoint.map do |codepoint| + cp = database.codepoints[codepoint] + if cp && (ncp = cp.send(mapping)) && ncp > 0 + ncp + else + codepoint + end + end.pack("U*") + end + + def recode_windows1252_chars(string) + string.encode(Encoding::UTF_8, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + def database + @database ||= UnicodeDatabase.new + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb new file mode 100644 index 0000000..6207de8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb @@ -0,0 +1,216 @@ +# frozen_string_literal: true + +require "active_support/notifications/instrumenter" +require "active_support/notifications/fanout" +require "active_support/per_thread_registry" + +module ActiveSupport + # = Notifications + # + # ActiveSupport::Notifications provides an instrumentation API for + # Ruby. + # + # == Instrumenters + # + # To instrument an event you just need to do: + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # That first executes the block and then notifies all subscribers once done. + # + # In the example above +render+ is the name of the event, and the rest is called + # the _payload_. The payload is a mechanism that allows instrumenters to pass + # extra information to subscribers. Payloads consist of a hash whose contents + # are arbitrary and generally depend on the event. + # + # == Subscribers + # + # You can consume those events and the information they provide by registering + # a subscriber. + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for this notification + # payload # => Hash, the payload + # end + # + # For instance, let's store all "render" events in an array: + # + # events = [] + # + # ActiveSupport::Notifications.subscribe('render') do |*args| + # events << ActiveSupport::Notifications::Event.new(*args) + # end + # + # That code returns right away, you are just subscribing to "render" events. + # The block is saved and will be called whenever someone instruments "render": + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # event = events.first + # event.name # => "render" + # event.duration # => 10 (in milliseconds) + # event.payload # => { extra: :information } + # + # The block in the subscribe call gets the name of the event, start + # timestamp, end timestamp, a string with a unique identifier for that event + # (something like "535801666f04d0298cd6"), and a hash with the payload, in + # that order. + # + # If an exception happens during that particular instrumentation the payload will + # have a key :exception with an array of two elements as value: a string with + # the name of the exception class, and the exception message. + # The :exception_object key of the payload will have the exception + # itself as the value. + # + # As the previous example depicts, the class ActiveSupport::Notifications::Event + # is able to take the arguments as they come and provide an object-oriented + # interface to that data. + # + # It is also possible to pass an object which responds to call method + # as the second parameter to the subscribe method instead of a block: + # + # module ActionController + # class PageRequest + # def call(name, started, finished, unique_id, payload) + # Rails.logger.debug ['notification:', name, started, finished, unique_id, payload].join(' ') + # end + # end + # end + # + # ActiveSupport::Notifications.subscribe('process_action.action_controller', ActionController::PageRequest.new) + # + # resulting in the following output within the logs including a hash with the payload: + # + # notification: process_action.action_controller 2012-04-13 01:08:35 +0300 2012-04-13 01:08:35 +0300 af358ed7fab884532ec7 { + # controller: "Devise::SessionsController", + # action: "new", + # params: {"action"=>"new", "controller"=>"devise/sessions"}, + # format: :html, + # method: "GET", + # path: "/login/sign_in", + # status: 200, + # view_runtime: 279.3080806732178, + # db_runtime: 40.053 + # } + # + # You can also subscribe to all events whose name matches a certain regexp: + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # ... + # end + # + # and even pass no argument to subscribe, in which case you are subscribing + # to all events. + # + # == Temporary Subscriptions + # + # Sometimes you do not want to subscribe to an event for the entire life of + # the application. There are two ways to unsubscribe. + # + # WARNING: The instrumentation framework is designed for long-running subscribers, + # use this feature sparingly because it wipes some internal caches and that has + # a negative impact on performance. + # + # === Subscribe While a Block Runs + # + # You can subscribe to some event temporarily while some block runs. For + # example, in + # + # callback = lambda {|*args| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record") do + # ... + # end + # + # the callback will be called for all "sql.active_record" events instrumented + # during the execution of the block. The callback is unsubscribed automatically + # after that. + # + # === Manual Unsubscription + # + # The +subscribe+ method returns a subscriber object: + # + # subscriber = ActiveSupport::Notifications.subscribe("render") do |*args| + # ... + # end + # + # To prevent that block from being called anymore, just unsubscribe passing + # that reference: + # + # ActiveSupport::Notifications.unsubscribe(subscriber) + # + # You can also unsubscribe by passing the name of the subscriber object. Note + # that this will unsubscribe all subscriptions with the given name: + # + # ActiveSupport::Notifications.unsubscribe("render") + # + # == Default Queue + # + # Notifications ships with a queue implementation that consumes and publishes events + # to all log subscribers. You can use any queue implementation you want. + # + module Notifications + class << self + attr_accessor :notifier + + def publish(name, *args) + notifier.publish(name, *args) + end + + def instrument(name, payload = {}) + if notifier.listening?(name) + instrumenter.instrument(name, payload) { yield payload if block_given? } + else + yield payload if block_given? + end + end + + def subscribe(*args, &block) + notifier.subscribe(*args, &block) + end + + def subscribed(callback, *args, &block) + subscriber = subscribe(*args, &callback) + yield + ensure + unsubscribe(subscriber) + end + + def unsubscribe(subscriber_or_name) + notifier.unsubscribe(subscriber_or_name) + end + + def instrumenter + InstrumentationRegistry.instance.instrumenter_for(notifier) + end + end + + # This class is a registry which holds all of the +Instrumenter+ objects + # in a particular thread local. To access the +Instrumenter+ object for a + # particular +notifier+, you can call the following method: + # + # InstrumentationRegistry.instrumenter_for(notifier) + # + # The instrumenters for multiple notifiers are held in a single instance of + # this class. + class InstrumentationRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def instrumenter_for(notifier) + @registry[notifier] ||= Instrumenter.new(notifier) + end + end + + self.notifier = Fanout.new + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb new file mode 100644 index 0000000..f4a766f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb @@ -0,0 +1,159 @@ +# frozen_string_literal: true + +require "mutex_m" +require "concurrent/map" + +module ActiveSupport + module Notifications + # This is a default queue implementation that ships with Notifications. + # It just pushes events to all registered log subscribers. + # + # This class is thread safe. All methods are reentrant. + class Fanout + include Mutex_m + + def initialize + @subscribers = [] + @listeners_for = Concurrent::Map.new + super + end + + def subscribe(pattern = nil, callable = nil, &block) + subscriber = Subscribers.new(pattern, callable || block) + synchronize do + @subscribers << subscriber + @listeners_for.clear + end + subscriber + end + + def unsubscribe(subscriber_or_name) + synchronize do + case subscriber_or_name + when String + @subscribers.reject! { |s| s.matches?(subscriber_or_name) } + else + @subscribers.delete(subscriber_or_name) + end + + @listeners_for.clear + end + end + + def start(name, id, payload) + listeners_for(name).each { |s| s.start(name, id, payload) } + end + + def finish(name, id, payload, listeners = listeners_for(name)) + listeners.each { |s| s.finish(name, id, payload) } + end + + def publish(name, *args) + listeners_for(name).each { |s| s.publish(name, *args) } + end + + def listeners_for(name) + # this is correctly done double-checked locking (Concurrent::Map's lookups have volatile semantics) + @listeners_for[name] || synchronize do + # use synchronisation when accessing @subscribers + @listeners_for[name] ||= @subscribers.select { |s| s.subscribed_to?(name) } + end + end + + def listening?(name) + listeners_for(name).any? + end + + # This is a sync queue, so there is no waiting. + def wait + end + + module Subscribers # :nodoc: + def self.new(pattern, listener) + if listener.respond_to?(:start) && listener.respond_to?(:finish) + subscriber = Evented.new pattern, listener + else + subscriber = Timed.new pattern, listener + end + + unless pattern + AllMessages.new(subscriber) + else + subscriber + end + end + + class Evented #:nodoc: + def initialize(pattern, delegate) + @pattern = pattern + @delegate = delegate + @can_publish = delegate.respond_to?(:publish) + end + + def publish(name, *args) + if @can_publish + @delegate.publish name, *args + end + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def subscribed_to?(name) + @pattern === name + end + + def matches?(name) + @pattern && @pattern === name + end + end + + class Timed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = Thread.current[:_timestack] ||= [] + timestack.push Time.now + end + + def finish(name, id, payload) + timestack = Thread.current[:_timestack] + started = timestack.pop + @delegate.call(name, started, Time.now, id, payload) + end + end + + class AllMessages # :nodoc: + def initialize(delegate) + @delegate = delegate + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def publish(name, *args) + @delegate.publish name, *args + end + + def subscribed_to?(name) + true + end + + alias :matches? :=== + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb new file mode 100644 index 0000000..e99f5ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require "securerandom" + +module ActiveSupport + module Notifications + # Instrumenters are stored in a thread local. + class Instrumenter + attr_reader :id + + def initialize(notifier) + @id = unique_id + @notifier = notifier + end + + # Instrument the given block by measuring the time taken to execute it + # and publish it. Notice that events get sent even if an error occurs + # in the passed-in block. + def instrument(name, payload = {}) + # some of the listeners might have state + listeners_state = start name, payload + begin + yield payload + rescue Exception => e + payload[:exception] = [e.class.name, e.message] + payload[:exception_object] = e + raise e + ensure + finish_with_state listeners_state, name, payload + end + end + + # Send a start notification with +name+ and +payload+. + def start(name, payload) + @notifier.start name, @id, payload + end + + # Send a finish notification with +name+ and +payload+. + def finish(name, payload) + @notifier.finish name, @id, payload + end + + def finish_with_state(listeners_state, name, payload) + @notifier.finish name, @id, payload, listeners_state + end + + private + + def unique_id + SecureRandom.hex(10) + end + end + + class Event + attr_reader :name, :time, :transaction_id, :payload, :children + attr_accessor :end + + def initialize(name, start, ending, transaction_id, payload) + @name = name + @payload = payload.dup + @time = start + @transaction_id = transaction_id + @end = ending + @children = [] + @duration = nil + end + + # Returns the difference in milliseconds between when the execution of the + # event started and when it ended. + # + # ActiveSupport::Notifications.subscribe('wait') do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # ActiveSupport::Notifications.instrument('wait') do + # sleep 1 + # end + # + # @event.duration # => 1000.138 + def duration + @duration ||= 1000.0 * (self.end - time) + end + + def <<(event) + @children << event + end + + def parent_of?(event) + @children.include? event + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb new file mode 100644 index 0000000..8fd6e93 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb @@ -0,0 +1,371 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + extend ActiveSupport::Autoload + + eager_autoload do + autoload :NumberConverter + autoload :RoundingHelper + autoload :NumberToRoundedConverter + autoload :NumberToDelimitedConverter + autoload :NumberToHumanConverter + autoload :NumberToHumanSizeConverter + autoload :NumberToPhoneConverter + autoload :NumberToCurrencyConverter + autoload :NumberToPercentageConverter + end + + extend self + + # Formats a +number+ into a phone number (US by default e.g., (555) + # 123-9876). You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :area_code - Adds parentheses around the area code. + # * :delimiter - Specifies the delimiter to use + # (defaults to "-"). + # * :extension - Specifies an extension to add to the + # end of the generated number. + # * :country_code - Sets the country code for the phone + # number. + # * :pattern - Specifies how the number is divided into three + # groups with the custom regexp to override the default format. + # ==== Examples + # + # number_to_phone(5551234) # => "555-1234" + # number_to_phone('5551234') # => "555-1234" + # number_to_phone(1235551234) # => "123-555-1234" + # number_to_phone(1235551234, area_code: true) # => "(123) 555-1234" + # number_to_phone(1235551234, delimiter: ' ') # => "123 555 1234" + # number_to_phone(1235551234, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # number_to_phone(1235551234, country_code: 1) # => "+1-123-555-1234" + # number_to_phone('123a456') # => "123a456" + # + # number_to_phone(1235551234, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # number_to_phone(75561234567, pattern: /(\d{1,4})(\d{4})(\d{4})$/, area_code: true) + # # => "(755) 6123-4567" + # number_to_phone(13312345678, pattern: /(\d{3})(\d{4})(\d{4})$/) + # # => "133-1234-5678" + def number_to_phone(number, options = {}) + NumberToPhoneConverter.convert(number, options) + end + + # Formats a +number+ into a currency string (e.g., $13.65). You + # can customize the format in the +options+ hash. + # + # The currency unit and number formatting of the current locale will be used + # unless otherwise specified in the provided options. No currency conversion + # is performed. If the user is given a way to change their locale, they will + # also be able to change the relative value of the currency displayed with + # this helper. If your application will ever support multiple locales, you + # may want to specify a constant :locale option or consider + # using a library capable of currency conversion. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the level of precision (defaults + # to 2). + # * :unit - Sets the denomination of the currency + # (defaults to "$"). + # * :separator - Sets the separator between the units + # (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :format - Sets the format for non-negative numbers + # (defaults to "%u%n"). Fields are %u for the + # currency, and %n for the number. + # * :negative_format - Sets the format for negative + # numbers (defaults to prepending a hyphen to the formatted + # number given by :format). Accepts the same fields + # than :format, except %n is here the + # absolute value of the number. + # + # ==== Examples + # + # number_to_currency(1234567890.50) # => "$1,234,567,890.50" + # number_to_currency(1234567890.506) # => "$1,234,567,890.51" + # number_to_currency(1234567890.506, precision: 3) # => "$1,234,567,890.506" + # number_to_currency(1234567890.506, locale: :fr) # => "1 234 567 890,51 €" + # number_to_currency('123a456') # => "$123a456" + # + # number_to_currency(-1234567890.50, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + def number_to_currency(number, options = {}) + NumberToCurrencyConverter.convert(number, options) + end + + # Formats a +number+ as a percentage string (e.g., 65%). You can + # customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # * :format - Specifies the format of the percentage + # string The number field is %n (defaults to "%n%"). + # + # ==== Examples + # + # number_to_percentage(100) # => "100.000%" + # number_to_percentage('98') # => "98.000%" + # number_to_percentage(100, precision: 0) # => "100%" + # number_to_percentage(1000, delimiter: '.', separator: ',') # => "1.000,000%" + # number_to_percentage(302.24398923423, precision: 5) # => "302.24399%" + # number_to_percentage(1000, locale: :fr) # => "1000,000%" + # number_to_percentage(1000, precision: nil) # => "1000%" + # number_to_percentage('98a') # => "98a%" + # number_to_percentage(100, format: '%n %') # => "100.000 %" + def number_to_percentage(number, options = {}) + NumberToPercentageConverter.convert(number, options) + end + + # Formats a +number+ with grouped thousands using +delimiter+ + # (e.g., 12,324). You can customize the format in the +options+ + # hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter_pattern - Sets a custom regular expression used for + # deriving the placement of delimiter. Helpful when using currency formats + # like INR. + # + # ==== Examples + # + # number_to_delimited(12345678) # => "12,345,678" + # number_to_delimited('123456') # => "123,456" + # number_to_delimited(12345678.05) # => "12,345,678.05" + # number_to_delimited(12345678, delimiter: '.') # => "12.345.678" + # number_to_delimited(12345678, delimiter: ',') # => "12,345,678" + # number_to_delimited(12345678.05, separator: ' ') # => "12,345,678 05" + # number_to_delimited(12345678.05, locale: :fr) # => "12 345 678,05" + # number_to_delimited('112a') # => "112a" + # number_to_delimited(98765432.98, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # number_to_delimited("123456.78", + # delimiter_pattern: /(\d+?)(?=(\d\d)+(\d)(?!\d))/) + # # => "1,23,456.78" + def number_to_delimited(number, options = {}) + NumberToDelimitedConverter.convert(number, options) + end + + # Formats a +number+ with the specified level of + # :precision (e.g., 112.32 has a precision of 2 if + # +:significant+ is +false+, and 5 if +:significant+ is +true+). + # You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_rounded(111.2345) # => "111.235" + # number_to_rounded(111.2345, precision: 2) # => "111.23" + # number_to_rounded(13, precision: 5) # => "13.00000" + # number_to_rounded(389.32314, precision: 0) # => "389" + # number_to_rounded(111.2345, significant: true) # => "111" + # number_to_rounded(111.2345, precision: 1, significant: true) # => "100" + # number_to_rounded(13, precision: 5, significant: true) # => "13.000" + # number_to_rounded(13, precision: nil) # => "13" + # number_to_rounded(111.234, locale: :fr) # => "111,234" + # + # number_to_rounded(13, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # + # number_to_rounded(389.32314, precision: 4, significant: true) # => "389.3" + # number_to_rounded(1111.2345, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + def number_to_rounded(number, options = {}) + NumberToRoundedConverter.convert(number, options) + end + + # Formats the bytes in +number+ into a more understandable + # representation (e.g., giving it 1500 yields 1.5 KB). This + # method is useful for reporting file sizes to users. You can + # customize the format in the +options+ hash. + # + # See number_to_human if you want to pretty-print a + # generic number. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # + # ==== Examples + # + # number_to_human_size(123) # => "123 Bytes" + # number_to_human_size(1234) # => "1.21 KB" + # number_to_human_size(12345) # => "12.1 KB" + # number_to_human_size(1234567) # => "1.18 MB" + # number_to_human_size(1234567890) # => "1.15 GB" + # number_to_human_size(1234567890123) # => "1.12 TB" + # number_to_human_size(1234567890123456) # => "1.1 PB" + # number_to_human_size(1234567890123456789) # => "1.07 EB" + # number_to_human_size(1234567, precision: 2) # => "1.2 MB" + # number_to_human_size(483989, precision: 2) # => "470 KB" + # number_to_human_size(1234567, precision: 2, separator: ',') # => "1,2 MB" + # number_to_human_size(1234567890123, precision: 5) # => "1.1228 TB" + # number_to_human_size(524288000, precision: 5) # => "500 MB" + def number_to_human_size(number, options = {}) + NumberToHumanSizeConverter.convert(number, options) + end + + # Pretty prints (formats and approximates) a number in a way it + # is more readable by humans (eg.: 1200000000 becomes "1.2 + # Billion"). This is useful for numbers that can get very large + # (and too hard to read). + # + # See number_to_human_size if you want to print a file + # size. + # + # You can also define your own unit-quantifier names if you want + # to use other decimal units (eg.: 1500 becomes "1.5 + # kilometers", 0.150 becomes "150 milliliters", etc). You may + # define a wide range of unit quantifiers, even fractional ones + # (centi, deci, mili, etc). + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # * :units - A Hash of unit quantifier names. Or a + # string containing an i18n scope where to find this hash. It + # might have the following keys: + # * *integers*: :unit, :ten, + # :hundred, :thousand, :million, + # :billion, :trillion, + # :quadrillion + # * *fractionals*: :deci, :centi, + # :mili, :micro, :nano, + # :pico, :femto + # * :format - Sets the format of the output string + # (defaults to "%n %u"). The field types are: + # * %u - The quantifier (ex.: 'thousand') + # * %n - The number + # + # ==== Examples + # + # number_to_human(123) # => "123" + # number_to_human(1234) # => "1.23 Thousand" + # number_to_human(12345) # => "12.3 Thousand" + # number_to_human(1234567) # => "1.23 Million" + # number_to_human(1234567890) # => "1.23 Billion" + # number_to_human(1234567890123) # => "1.23 Trillion" + # number_to_human(1234567890123456) # => "1.23 Quadrillion" + # number_to_human(1234567890123456789) # => "1230 Quadrillion" + # number_to_human(489939, precision: 2) # => "490 Thousand" + # number_to_human(489939, precision: 4) # => "489.9 Thousand" + # number_to_human(1234567, precision: 4, + # significant: false) # => "1.2346 Million" + # number_to_human(1234567, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + # + # number_to_human(500000000, precision: 5) # => "500 Million" + # number_to_human(12345012345, significant: false) # => "12.345 Billion" + # + # Non-significant zeros after the decimal separator are stripped + # out by default (set :strip_insignificant_zeros to + # +false+ to change that): + # + # number_to_human(12.00001) # => "12" + # number_to_human(12.00001, strip_insignificant_zeros: false) # => "12.0" + # + # ==== Custom Unit Quantifiers + # + # You can also use your own custom unit quantifiers: + # number_to_human(500000, units: { unit: 'ml', thousand: 'lt' }) # => "500 lt" + # + # If in your I18n locale you have: + # + # distance: + # centi: + # one: "centimeter" + # other: "centimeters" + # unit: + # one: "meter" + # other: "meters" + # thousand: + # one: "kilometer" + # other: "kilometers" + # billion: "gazillion-distance" + # + # Then you could do: + # + # number_to_human(543934, units: :distance) # => "544 kilometers" + # number_to_human(54393498, units: :distance) # => "54400 kilometers" + # number_to_human(54393498000, units: :distance) # => "54.4 gazillion-distance" + # number_to_human(343, units: :distance, precision: 1) # => "300 meters" + # number_to_human(1, units: :distance) # => "1 meter" + # number_to_human(0.34, units: :distance) # => "34 centimeters" + def number_to_human(number, options = {}) + NumberToHumanConverter.convert(number, options) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb new file mode 100644 index 0000000..06ba797 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb @@ -0,0 +1,184 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/keys" +require "active_support/i18n" +require "active_support/core_ext/class/attribute" + +module ActiveSupport + module NumberHelper + class NumberConverter # :nodoc: + # Default and i18n option namespace per class + class_attribute :namespace + + # Does the object need a number that is a valid float? + class_attribute :validate_float + + attr_reader :number, :opts + + DEFAULTS = { + # Used in number_to_delimited + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: { + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: ".", + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: ",", + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3, + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false, + # If set, the zeros after the decimal separator will always be stripped (eg.: 1.200 will be 1.2) + strip_insignificant_zeros: false + }, + + # Used in number_to_currency + currency: { + format: { + format: "%u%n", + negative_format: "-%u%n", + unit: "$", + # These five are to override number.format and are optional + separator: ".", + delimiter: ",", + precision: 2, + significant: false, + strip_insignificant_zeros: false + } + }, + + # Used in number_to_percentage + percentage: { + format: { + delimiter: "", + format: "%n%" + } + }, + + # Used in number_to_rounded + precision: { + format: { + delimiter: "" + } + }, + + # Used in number_to_human_size and number_to_human + human: { + format: { + # These five are to override number.format and are optional + delimiter: "", + precision: 3, + significant: true, + strip_insignificant_zeros: true + }, + # Used in number_to_human_size + storage_units: { + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u", + units: { + byte: "Bytes", + kb: "KB", + mb: "MB", + gb: "GB", + tb: "TB" + } + }, + # Used in number_to_human + decimal_units: { + format: "%n %u", + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: { + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "", + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: "Thousand", + million: "Million", + billion: "Billion", + trillion: "Trillion", + quadrillion: "Quadrillion" + } + } + } + } + + def self.convert(number, options) + new(number, options).execute + end + + def initialize(number, options) + @number = number + @opts = options.symbolize_keys + end + + def execute + if !number + nil + elsif validate_float? && !valid_float? + number + else + convert + end + end + + private + + def options + @options ||= format_options.merge(opts) + end + + def format_options + default_format_options.merge!(i18n_format_options) + end + + def default_format_options + options = DEFAULTS[:format].dup + options.merge!(DEFAULTS[namespace][:format]) if namespace + options + end + + def i18n_format_options + locale = opts[:locale] + options = I18n.translate(:'number.format', locale: locale, default: {}).dup + + if namespace + options.merge!(I18n.translate(:"number.#{namespace}.format", locale: locale, default: {})) + end + + options + end + + def translate_number_value_with_default(key, i18n_options = {}) + I18n.translate(key, { default: default_value(key), scope: :number }.merge!(i18n_options)) + end + + def translate_in_locale(key, i18n_options = {}) + translate_number_value_with_default(key, { locale: options[:locale] }.merge(i18n_options)) + end + + def default_value(key) + key.split(".").reduce(DEFAULTS) { |defaults, k| defaults[k.to_sym] } + end + + def valid_float? + Float(number) + rescue ArgumentError, TypeError + false + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb new file mode 100644 index 0000000..3f037c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "active_support/core_ext/numeric/inquiry" + +module ActiveSupport + module NumberHelper + class NumberToCurrencyConverter < NumberConverter # :nodoc: + self.namespace = :currency + + def convert + number = self.number.to_s.strip + format = options[:format] + + if number.to_f.negative? + format = options[:negative_format] + number = absolute_value(number) + end + + rounded_number = NumberToRoundedConverter.convert(number, options) + format.gsub("%n".freeze, rounded_number).gsub("%u".freeze, options[:unit]) + end + + private + + def absolute_value(number) + number.respond_to?(:abs) ? number.abs : number.sub(/\A-/, "") + end + + def options + @options ||= begin + defaults = default_format_options.merge(i18n_opts) + # Override negative format if format options are given + defaults[:negative_format] = "-#{opts[:format]}" if opts[:format] + defaults.merge!(opts) + end + end + + def i18n_opts + # Set International negative format if it does not exist + i18n = i18n_format_options + i18n[:negative_format] ||= "-#{i18n[:format]}" if i18n[:format] + i18n + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb new file mode 100644 index 0000000..d5b5706 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToDelimitedConverter < NumberConverter #:nodoc: + self.validate_float = true + + DEFAULT_DELIMITER_REGEX = /(\d)(?=(\d\d\d)+(?!\d))/ + + def convert + parts.join(options[:separator]) + end + + private + + def parts + left, right = number.to_s.split(".".freeze) + left.gsub!(delimiter_pattern) do |digit_to_delimit| + "#{digit_to_delimit}#{options[:delimiter]}" + end + [left, right].compact + end + + def delimiter_pattern + options.fetch(:delimiter_pattern, DEFAULT_DELIMITER_REGEX) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb new file mode 100644 index 0000000..03eb667 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToHumanConverter < NumberConverter # :nodoc: + DECIMAL_UNITS = { 0 => :unit, 1 => :ten, 2 => :hundred, 3 => :thousand, 6 => :million, 9 => :billion, 12 => :trillion, 15 => :quadrillion, + -1 => :deci, -2 => :centi, -3 => :mili, -6 => :micro, -9 => :nano, -12 => :pico, -15 => :femto } + INVERTED_DECIMAL_UNITS = DECIMAL_UNITS.invert + + self.namespace = :human + self.validate_float = true + + def convert # :nodoc: + @number = RoundingHelper.new(options).round(number) + @number = Float(number) + + # for backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + units = opts[:units] + exponent = calculate_exponent(units) + @number = number / (10**exponent) + + rounded_number = NumberToRoundedConverter.convert(number, options) + unit = determine_unit(units, exponent) + format.gsub("%n".freeze, rounded_number).gsub("%u".freeze, unit).strip + end + + private + + def format + options[:format] || translate_in_locale("human.decimal_units.format") + end + + def determine_unit(units, exponent) + exp = DECIMAL_UNITS[exponent] + case units + when Hash + units[exp] || "" + when String, Symbol + I18n.translate("#{units}.#{exp}", locale: options[:locale], count: number.to_i) + else + translate_in_locale("human.decimal_units.units.#{exp}", count: number.to_i) + end + end + + def calculate_exponent(units) + exponent = number != 0 ? Math.log10(number.abs).floor : 0 + unit_exponents(units).find { |e| exponent >= e } || 0 + end + + def unit_exponents(units) + case units + when Hash + units + when String, Symbol + I18n.translate(units.to_s, locale: options[:locale], raise: true) + when nil + translate_in_locale("human.decimal_units.units", raise: true) + else + raise ArgumentError, ":units must be a Hash or String translation scope." + end.keys.map { |e_name| INVERTED_DECIMAL_UNITS[e_name] }.sort_by(&:-@) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb new file mode 100644 index 0000000..842f2fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToHumanSizeConverter < NumberConverter #:nodoc: + STORAGE_UNITS = [:byte, :kb, :mb, :gb, :tb, :pb, :eb] + + self.namespace = :human + self.validate_float = true + + def convert + @number = Float(number) + + # for backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + if smaller_than_base? + number_to_format = number.to_i.to_s + else + human_size = number / (base**exponent) + number_to_format = NumberToRoundedConverter.convert(human_size, options) + end + conversion_format.gsub("%n".freeze, number_to_format).gsub("%u".freeze, unit) + end + + private + + def conversion_format + translate_number_value_with_default("human.storage_units.format", locale: options[:locale], raise: true) + end + + def unit + translate_number_value_with_default(storage_unit_key, locale: options[:locale], count: number.to_i, raise: true) + end + + def storage_unit_key + key_end = smaller_than_base? ? "byte" : STORAGE_UNITS[exponent] + "human.storage_units.units.#{key_end}" + end + + def exponent + max = STORAGE_UNITS.size - 1 + exp = (Math.log(number) / Math.log(base)).to_i + exp = max if exp > max # avoid overflow for the highest unit + exp + end + + def smaller_than_base? + number.to_i < base + end + + def base + 1024 + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb new file mode 100644 index 0000000..4dcdad2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToPercentageConverter < NumberConverter # :nodoc: + self.namespace = :percentage + + def convert + rounded_number = NumberToRoundedConverter.convert(number, options) + options[:format].gsub("%n".freeze, rounded_number) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb new file mode 100644 index 0000000..96410f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToPhoneConverter < NumberConverter #:nodoc: + def convert + str = country_code(opts[:country_code]).dup + str << convert_to_phone_number(number.to_s.strip) + str << phone_ext(opts[:extension]) + end + + private + + def convert_to_phone_number(number) + if opts[:area_code] + convert_with_area_code(number) + else + convert_without_area_code(number) + end + end + + def convert_with_area_code(number) + default_pattern = /(\d{1,3})(\d{3})(\d{4}$)/ + number.gsub!(regexp_pattern(default_pattern), + "(\\1) \\2#{delimiter}\\3") + number + end + + def convert_without_area_code(number) + default_pattern = /(\d{0,3})(\d{3})(\d{4})$/ + number.gsub!(regexp_pattern(default_pattern), + "\\1#{delimiter}\\2#{delimiter}\\3") + number.slice!(0, 1) if start_with_delimiter?(number) + number + end + + def start_with_delimiter?(number) + delimiter.present? && number.start_with?(delimiter) + end + + def delimiter + opts[:delimiter] || "-" + end + + def country_code(code) + code.blank? ? "" : "+#{code}#{delimiter}" + end + + def phone_ext(ext) + ext.blank? ? "" : " x #{ext}" + end + + def regexp_pattern(default_pattern) + opts.fetch :pattern, default_pattern + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb new file mode 100644 index 0000000..eb528a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToRoundedConverter < NumberConverter # :nodoc: + self.namespace = :precision + self.validate_float = true + + def convert + helper = RoundingHelper.new(options) + rounded_number = helper.round(number) + + if precision = options[:precision] + if options[:significant] && precision > 0 + digits = helper.digit_count(rounded_number) + precision -= digits + precision = 0 if precision < 0 # don't let it be negative + end + + formatted_string = + if BigDecimal === rounded_number && rounded_number.finite? + s = rounded_number.to_s("F") + s << "0".freeze * precision + a, b = s.split(".".freeze, 2) + a << ".".freeze + a << b[0, precision] + else + "%00.#{precision}f" % rounded_number + end + else + formatted_string = rounded_number + end + + delimited_number = NumberToDelimitedConverter.convert(formatted_string, options) + format_number(delimited_number) + end + + private + + def strip_insignificant_zeros + options[:strip_insignificant_zeros] + end + + def format_number(number) + if strip_insignificant_zeros + escaped_separator = Regexp.escape(options[:separator]) + number.sub(/(#{escaped_separator})(\d*[1-9])?0+\z/, '\1\2').sub(/#{escaped_separator}\z/, "") + else + number + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb new file mode 100644 index 0000000..2ad8d49 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class RoundingHelper # :nodoc: + attr_reader :options + + def initialize(options) + @options = options + end + + def round(number) + return number unless precision + number = convert_to_decimal(number) + if significant && precision > 0 + round_significant(number) + else + round_without_significant(number) + end + end + + def digit_count(number) + return 1 if number.zero? + (Math.log10(absolute_number(number)) + 1).floor + end + + private + def round_without_significant(number) + number = number.round(precision) + number = number.to_i if precision == 0 && number.finite? + number = number.abs if number.zero? # prevent showing negative zeros + number + end + + def round_significant(number) + return 0 if number.zero? + digits = digit_count(number) + multiplier = 10**(digits - precision) + (number / BigDecimal(multiplier.to_f.to_s)).round * multiplier + end + + def convert_to_decimal(number) + case number + when Float, String + BigDecimal(number.to_s) + when Rational + BigDecimal(number, digit_count(number.to_i) + precision) + else + number.to_d + end + end + + def precision + options[:precision] + end + + def significant + options[:significant] + end + + def absolute_number(number) + number.respond_to?(:abs) ? number.abs : number.to_d.abs + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb new file mode 100644 index 0000000..ab9ca72 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" + +module ActiveSupport + class OptionMerger #:nodoc: + instance_methods.each do |method| + undef_method(method) if method !~ /^(__|instance_eval|class|object_id)/ + end + + def initialize(context, options) + @context, @options = context, options + end + + private + def method_missing(method, *arguments, &block) + if arguments.first.is_a?(Proc) + proc = arguments.pop + arguments << lambda { |*args| @options.deep_merge(proc.call(*args)) } + else + arguments << (arguments.last.respond_to?(:to_hash) ? @options.deep_merge(arguments.pop) : @options.dup) + end + + @context.__send__(method, *arguments, &block) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb new file mode 100644 index 0000000..5758513 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "yaml" + +YAML.add_builtin_type("omap") do |type, val| + ActiveSupport::OrderedHash[val.map { |v| v.to_a.first }] +end + +module ActiveSupport + # DEPRECATED: ActiveSupport::OrderedHash implements a hash that preserves + # insertion order. + # + # oh = ActiveSupport::OrderedHash.new + # oh[:a] = 1 + # oh[:b] = 2 + # oh.keys # => [:a, :b], this order is guaranteed + # + # Also, maps the +omap+ feature for YAML files + # (See http://yaml.org/type/omap.html) to support ordered items + # when loading from yaml. + # + # ActiveSupport::OrderedHash is namespaced to prevent conflicts + # with other implementations. + class OrderedHash < ::Hash + def to_yaml_type + "!tag:yaml.org,2002:omap" + end + + def encode_with(coder) + coder.represent_seq "!omap", map { |k, v| { k => v } } + end + + def select(*args, &block) + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def nested_under_indifferent_access + self + end + + # Returns true to make sure that this hash is extractable via Array#extract_options! + def extractable_options? + true + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb new file mode 100644 index 0000000..6ba1346 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + # Usually key value pairs are handled something like this: + # + # h = {} + # h[:boy] = 'John' + # h[:girl] = 'Mary' + # h[:boy] # => 'John' + # h[:girl] # => 'Mary' + # h[:dog] # => nil + # + # Using +OrderedOptions+, the above code could be reduced to: + # + # h = ActiveSupport::OrderedOptions.new + # h.boy = 'John' + # h.girl = 'Mary' + # h.boy # => 'John' + # h.girl # => 'Mary' + # h.dog # => nil + # + # To raise an exception when the value is blank, append a + # bang to the key name, like: + # + # h.dog! # => raises KeyError: :dog is blank + # + class OrderedOptions < Hash + alias_method :_get, :[] # preserve the original #[] method + protected :_get # make it protected + + def []=(key, value) + super(key.to_sym, value) + end + + def [](key) + super(key.to_sym) + end + + def method_missing(name, *args) + name_string = name.to_s.dup + if name_string.chomp!("=") + self[name_string] = args.first + else + bangs = name_string.chomp!("!") + + if bangs + self[name_string].presence || raise(KeyError.new(":#{name_string} is blank")) + else + self[name_string] + end + end + end + + def respond_to_missing?(name, include_private) + true + end + end + + # +InheritableOptions+ provides a constructor to build an +OrderedOptions+ + # hash inherited from another hash. + # + # Use this if you already have some hash and you want to create a new one based on it. + # + # h = ActiveSupport::InheritableOptions.new({ girl: 'Mary', boy: 'John' }) + # h.girl # => 'Mary' + # h.boy # => 'John' + class InheritableOptions < OrderedOptions + def initialize(parent = nil) + if parent.kind_of?(OrderedOptions) + # use the faster _get when dealing with OrderedOptions + super() { |h, k| parent._get(k) } + elsif parent + super() { |h, k| parent[k] } + else + super() + end + end + + def inheritable_copy + self.class.new(self) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb new file mode 100644 index 0000000..eb92fb4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # NOTE: This approach has been deprecated for end-user code in favor of {thread_mattr_accessor}[rdoc-ref:Module#thread_mattr_accessor] and friends. + # Please use that approach instead. + # + # This module is used to encapsulate access to thread local variables. + # + # Instead of polluting the thread locals namespace: + # + # Thread.current[:connection_handler] + # + # you define a class that extends this module: + # + # module ActiveRecord + # class RuntimeRegistry + # extend ActiveSupport::PerThreadRegistry + # + # attr_accessor :connection_handler + # end + # end + # + # and invoke the declared instance accessors as class methods. So + # + # ActiveRecord::RuntimeRegistry.connection_handler = connection_handler + # + # sets a connection handler local to the current thread, and + # + # ActiveRecord::RuntimeRegistry.connection_handler + # + # returns a connection handler local to the current thread. + # + # This feature is accomplished by instantiating the class and storing the + # instance as a thread local keyed by the class name. In the example above + # a key "ActiveRecord::RuntimeRegistry" is stored in Thread.current. + # The class methods proxy to said thread local instance. + # + # If the class has an initializer, it must accept no arguments. + module PerThreadRegistry + def self.extended(object) + object.instance_variable_set "@per_thread_registry_key", object.name.freeze + end + + def instance + Thread.current[@per_thread_registry_key] ||= new + end + + private + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb new file mode 100644 index 0000000..0965fcd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module ActiveSupport + # A class with no predefined methods that behaves similarly to Builder's + # BlankSlate. Used for proxy classes. + class ProxyObject < ::BasicObject + undef_method :== + undef_method :equal? + + # Let ActiveSupport::ProxyObject at least raise exceptions. + def raise(*args) + ::Object.send(:raise, *args) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb new file mode 100644 index 0000000..5c34a0a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +# This is private interface. +# +# Rails components cherry pick from Active Support as needed, but there are a +# few features that are used for sure in some way or another and it is not worth +# putting individual requires absolutely everywhere. Think blank? for example. +# +# This file is loaded by every Rails component except Active Support itself, +# but it does not belong to the Rails public interface. It is internal to +# Rails and can change anytime. + +# Defines Object#blank? and Object#present?. +require "active_support/core_ext/object/blank" + +# Rails own autoload, eager_load, etc. +require "active_support/dependencies/autoload" + +# Support for ClassMethods and the included macro. +require "active_support/concern" + +# Defines Class#class_attribute. +require "active_support/core_ext/class/attribute" + +# Defines Module#delegate. +require "active_support/core_ext/module/delegation" + +# Defines ActiveSupport::Deprecation. +require "active_support/deprecation" + +# Defines Regexp#match?. +# +# This should be removed when Rails needs Ruby 2.4 or later, and the require +# added where other Regexp extensions are being used (easy to grep). +require "active_support/core_ext/regexp" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb new file mode 100644 index 0000000..605b50d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/i18n_railtie" + +module ActiveSupport + class Railtie < Rails::Railtie # :nodoc: + config.active_support = ActiveSupport::OrderedOptions.new + + config.eager_load_namespaces << ActiveSupport + + initializer "active_support.set_authenticated_message_encryption" do |app| + config.after_initialize do + unless app.config.active_support.use_authenticated_message_encryption.nil? + ActiveSupport::MessageEncryptor.use_authenticated_message_encryption = + app.config.active_support.use_authenticated_message_encryption + end + end + end + + initializer "active_support.reset_all_current_attributes_instances" do |app| + app.reloader.before_class_unload { ActiveSupport::CurrentAttributes.clear_all } + app.executor.to_run { ActiveSupport::CurrentAttributes.reset_all } + app.executor.to_complete { ActiveSupport::CurrentAttributes.reset_all } + end + + initializer "active_support.deprecation_behavior" do |app| + if deprecation = app.config.active_support.deprecation + ActiveSupport::Deprecation.behavior = deprecation + end + end + + # Sets the default value for Time.zone + # If assigned value cannot be matched to a TimeZone, an exception will be raised. + initializer "active_support.initialize_time_zone" do |app| + begin + TZInfo::DataSource.get + rescue TZInfo::DataSourceNotFound => e + raise e.exception "tzinfo-data is not present. Please add gem 'tzinfo-data' to your Gemfile and run bundle install" + end + require "active_support/core_ext/time/zones" + Time.zone_default = Time.find_zone!(app.config.time_zone) + end + + # Sets the default week start + # If assigned value is not a valid day symbol (e.g. :sunday, :monday, ...), an exception will be raised. + initializer "active_support.initialize_beginning_of_week" do |app| + require "active_support/core_ext/date/calculations" + beginning_of_week_default = Date.find_beginning_of_week!(app.config.beginning_of_week) + + Date.beginning_of_week_default = beginning_of_week_default + end + + initializer "active_support.require_master_key" do |app| + if app.config.respond_to?(:require_master_key) && app.config.require_master_key + begin + app.credentials.key + rescue ActiveSupport::EncryptedFile::MissingKeyError => error + $stderr.puts error.message + exit 1 + end + end + end + + initializer "active_support.set_configs" do |app| + app.config.active_support.each do |k, v| + k = "#{k}=" + ActiveSupport.send(k, v) if ActiveSupport.respond_to? k + end + end + + initializer "active_support.set_hash_digest_class" do |app| + config.after_initialize do + if app.config.active_support.use_sha1_digests + ActiveSupport::Digest.hash_digest_class = ::Digest::SHA1 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb new file mode 100644 index 0000000..b26d9c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + #-- + # This class defines several callbacks: + # + # to_prepare -- Run once at application startup, and also from + # +to_run+. + # + # to_run -- Run before a work run that is reloading. If + # +reload_classes_only_on_change+ is true (the default), the class + # unload will have already occurred. + # + # to_complete -- Run after a work run that has reloaded. If + # +reload_classes_only_on_change+ is false, the class unload will + # have occurred after the work run, but before this callback. + # + # before_class_unload -- Run immediately before the classes are + # unloaded. + # + # after_class_unload -- Run immediately after the classes are + # unloaded. + # + class Reloader < ExecutionWrapper + define_callbacks :prepare + + define_callbacks :class_unload + + # Registers a callback that will run once at application startup and every time the code is reloaded. + def self.to_prepare(*args, &block) + set_callback(:prepare, *args, &block) + end + + # Registers a callback that will run immediately before the classes are unloaded. + def self.before_class_unload(*args, &block) + set_callback(:class_unload, *args, &block) + end + + # Registers a callback that will run immediately after the classes are unloaded. + def self.after_class_unload(*args, &block) + set_callback(:class_unload, :after, *args, &block) + end + + to_run(:after) { self.class.prepare! } + + # Initiate a manual reload + def self.reload! + executor.wrap do + new.tap do |instance| + begin + instance.run! + ensure + instance.complete! + end + end + end + prepare! + end + + def self.run! # :nodoc: + if check! + super + else + Null + end + end + + # Run the supplied block as a work unit, reloading code as needed + def self.wrap + executor.wrap do + super + end + end + + class_attribute :executor, default: Executor + class_attribute :check, default: lambda { false } + + def self.check! # :nodoc: + @should_reload ||= check.call + end + + def self.reloaded! # :nodoc: + @should_reload = false + end + + def self.prepare! # :nodoc: + new.run_callbacks(:prepare) + end + + def initialize + super + @locked = false + end + + # Acquire the ActiveSupport::Dependencies::Interlock unload lock, + # ensuring it will be released automatically + def require_unload_lock! + unless @locked + ActiveSupport::Dependencies.interlock.start_unloading + @locked = true + end + end + + # Release the unload lock if it has been previously obtained + def release_unload_lock! + if @locked + @locked = false + ActiveSupport::Dependencies.interlock.done_unloading + end + end + + def run! # :nodoc: + super + release_unload_lock! + end + + def class_unload!(&block) # :nodoc: + require_unload_lock! + run_callbacks(:class_unload, &block) + end + + def complete! # :nodoc: + super + self.class.reloaded! + ensure + release_unload_lock! + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb new file mode 100644 index 0000000..e0fa29c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # Rescuable module adds support for easier exception handling. + module Rescuable + extend Concern + + included do + class_attribute :rescue_handlers, default: [] + end + + module ClassMethods + # Rescue exceptions raised in controller actions. + # + # rescue_from receives a series of exception classes or class + # names, and a trailing :with option with the name of a method + # or a Proc object to be called to handle them. Alternatively a block can + # be given. + # + # Handlers that take one argument will be called with the exception, so + # that the exception can be inspected when dealing with it. + # + # Handlers are inherited. They are searched from right to left, from + # bottom to top, and up the hierarchy. The handler of the first class for + # which exception.is_a?(klass) holds true is the one invoked, if + # any. + # + # class ApplicationController < ActionController::Base + # rescue_from User::NotAuthorized, with: :deny_access # self defined exception + # rescue_from ActiveRecord::RecordInvalid, with: :show_errors + # + # rescue_from 'MyAppError::Base' do |exception| + # render xml: exception, status: 500 + # end + # + # private + # def deny_access + # ... + # end + # + # def show_errors(exception) + # exception.record.new_record? ? ... + # end + # end + # + # Exceptions raised inside exception handlers are not propagated up. + def rescue_from(*klasses, with: nil, &block) + unless with + if block_given? + with = block + else + raise ArgumentError, "Need a handler. Pass the with: keyword argument or provide a block." + end + end + + klasses.each do |klass| + key = if klass.is_a?(Module) && klass.respond_to?(:===) + klass.name + elsif klass.is_a?(String) + klass + else + raise ArgumentError, "#{klass.inspect} must be an Exception class or a String referencing an Exception class" + end + + # Put the new handler at the end because the list is read in reverse. + self.rescue_handlers += [[key, with]] + end + end + + # Matches an exception to a handler based on the exception class. + # + # If no handler matches the exception, check for a handler matching the + # (optional) exception.cause. If no handler matches the exception or its + # cause, this returns +nil+, so you can deal with unhandled exceptions. + # Be sure to re-raise unhandled exceptions if this is what you expect. + # + # begin + # … + # rescue => exception + # rescue_with_handler(exception) || raise + # end + # + # Returns the exception if it was handled and +nil+ if it was not. + def rescue_with_handler(exception, object: self, visited_exceptions: []) + visited_exceptions << exception + + if handler = handler_for_rescue(exception, object: object) + handler.call exception + exception + elsif exception + if visited_exceptions.include?(exception.cause) + nil + else + rescue_with_handler(exception.cause, object: object, visited_exceptions: visited_exceptions) + end + end + end + + def handler_for_rescue(exception, object: self) #:nodoc: + case rescuer = find_rescue_handler(exception) + when Symbol + method = object.method(rescuer) + if method.arity == 0 + -> e { method.call } + else + method + end + when Proc + if rescuer.arity == 0 + -> e { object.instance_exec(&rescuer) } + else + -> e { object.instance_exec(e, &rescuer) } + end + end + end + + private + def find_rescue_handler(exception) + if exception + # Handlers are in order of declaration but the most recently declared + # is the highest priority match, so we search for matching handlers + # in reverse. + _, handler = rescue_handlers.reverse_each.detect do |class_or_name, _| + if klass = constantize_rescue_handler_class(class_or_name) + klass === exception + end + end + + handler + end + end + + def constantize_rescue_handler_class(class_or_name) + case class_or_name + when String, Symbol + begin + # Try a lexical lookup first since we support + # + # class Super + # rescue_from 'Error', with: … + # end + # + # class Sub + # class Error < StandardError; end + # end + # + # so an Error raised in Sub will hit the 'Error' handler. + const_get class_or_name + rescue NameError + class_or_name.safe_constantize + end + else + class_or_name + end + end + end + + # Delegates to the class method, but uses the instance as the subject for + # rescue_from handlers (method calls, instance_exec blocks). + def rescue_with_handler(exception) + self.class.rescue_with_handler exception, object: self + end + + # Internal handler lookup. Delegates to class method. Some libraries call + # this directly, so keeping it around for compatibility. + def handler_for_rescue(exception) #:nodoc: + self.class.handler_for_rescue exception, object: self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb new file mode 100644 index 0000000..20b6b9c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "digest/sha2" + +module ActiveSupport + module SecurityUtils + # Constant time string comparison, for fixed length strings. + # + # The values compared should be of fixed length, such as strings + # that have already been processed by HMAC. Raises in case of length mismatch. + def fixed_length_secure_compare(a, b) + raise ArgumentError, "string length mismatch." unless a.bytesize == b.bytesize + + l = a.unpack "C#{a.bytesize}" + + res = 0 + b.each_byte { |byte| res |= byte ^ l.shift } + res == 0 + end + module_function :fixed_length_secure_compare + + # Constant time string comparison, for variable length strings. + # + # The values are first processed by SHA256, so that we don't leak length info + # via timing attacks. + def secure_compare(a, b) + fixed_length_secure_compare(::Digest::SHA256.hexdigest(a), ::Digest::SHA256.hexdigest(b)) && a == b + end + module_function :secure_compare + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb new file mode 100644 index 0000000..a3af367 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +module ActiveSupport + # Wrapping a string in this class gives you a prettier way to test + # for equality. The value returned by Rails.env is wrapped + # in a StringInquirer object, so instead of calling this: + # + # Rails.env == 'production' + # + # you can call this: + # + # Rails.env.production? + # + # == Instantiating a new StringInquirer + # + # vehicle = ActiveSupport::StringInquirer.new('car') + # vehicle.car? # => true + # vehicle.bike? # => false + class StringInquirer < String + private + + def respond_to_missing?(method_name, include_private = false) + (method_name[-1] == "?") || super + end + + def method_missing(method_name, *arguments) + if method_name[-1] == "?" + self == method_name[0..-2] + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb new file mode 100644 index 0000000..9291b48 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/per_thread_registry" +require "active_support/notifications" + +module ActiveSupport + # ActiveSupport::Subscriber is an object set to consume + # ActiveSupport::Notifications. The subscriber dispatches notifications to + # a registered object based on its given namespace. + # + # An example would be an Active Record subscriber responsible for collecting + # statistics about queries: + # + # module ActiveRecord + # class StatsSubscriber < ActiveSupport::Subscriber + # attach_to :active_record + # + # def sql(event) + # Statsd.timing("sql.#{event.payload[:name]}", event.duration) + # end + # end + # end + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the +sql+ method. + class Subscriber + class << self + # Attach the subscriber to a namespace. + def attach_to(namespace, subscriber = new, notifier = ActiveSupport::Notifications) + @namespace = namespace + @subscriber = subscriber + @notifier = notifier + + subscribers << subscriber + + # Add event subscribers for all existing methods on the class. + subscriber.public_methods(false).each do |event| + add_event_subscriber(event) + end + end + + # Adds event subscribers for all new methods added to the class. + def method_added(event) + # Only public methods are added as subscribers, and only if a notifier + # has been set up. This means that subscribers will only be set up for + # classes that call #attach_to. + if public_method_defined?(event) && notifier + add_event_subscriber(event) + end + end + + def subscribers + @@subscribers ||= [] + end + + # TODO Change this to private once we've dropped Ruby 2.2 support. + # Workaround for Ruby 2.2 "private attribute?" warning. + protected + + attr_reader :subscriber, :notifier, :namespace + + private + + def add_event_subscriber(event) # :doc: + return if %w{ start finish }.include?(event.to_s) + + pattern = "#{event}.#{namespace}" + + # Don't add multiple subscribers (eg. if methods are redefined). + return if subscriber.patterns.include?(pattern) + + subscriber.patterns << pattern + notifier.subscribe(pattern, subscriber) + end + end + + attr_reader :patterns # :nodoc: + + def initialize + @queue_key = [self.class.name, object_id].join "-" + @patterns = [] + super + end + + def start(name, id, payload) + e = ActiveSupport::Notifications::Event.new(name, now, nil, id, payload) + parent = event_stack.last + parent << e if parent + + event_stack.push e + end + + def finish(name, id, payload) + finished = now + event = event_stack.pop + event.end = finished + event.payload.merge!(payload) + + method = name.split(".".freeze).first + send(method, event) + end + + private + + def event_stack + SubscriberQueueRegistry.instance.get_queue(@queue_key) + end + + def now + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + end + + # This is a registry for all the event stacks kept for subscribers. + # + # See the documentation of ActiveSupport::PerThreadRegistry + # for further details. + class SubscriberQueueRegistry # :nodoc: + extend PerThreadRegistry + + def initialize + @registry = {} + end + + def get_queue(queue_key) + @registry[queue_key] ||= [] + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb new file mode 100644 index 0000000..8561cba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/object/blank" +require "logger" +require "active_support/logger" + +module ActiveSupport + # Wraps any standard Logger object to provide tagging capabilities. + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged('BCX') { logger.info 'Stuff' } # Logs "[BCX] Stuff" + # logger.tagged('BCX', "Jason") { logger.info 'Stuff' } # Logs "[BCX] [Jason] Stuff" + # logger.tagged('BCX') { logger.tagged('Jason') { logger.info 'Stuff' } } # Logs "[BCX] [Jason] Stuff" + # + # This is used by the default Rails.logger as configured by Railties to make + # it easy to stamp log lines with subdomains, request ids, and anything else + # to aid debugging of multi-user production applications. + module TaggedLogging + module Formatter # :nodoc: + # This method is invoked when a log event occurs. + def call(severity, timestamp, progname, msg) + super(severity, timestamp, progname, "#{tags_text}#{msg}") + end + + def tagged(*tags) + new_tags = push_tags(*tags) + yield self + ensure + pop_tags(new_tags.size) + end + + def push_tags(*tags) + tags.flatten.reject(&:blank?).tap do |new_tags| + current_tags.concat new_tags + end + end + + def pop_tags(size = 1) + current_tags.pop size + end + + def clear_tags! + current_tags.clear + end + + def current_tags + # We use our object ID here to avoid conflicting with other instances + thread_key = @thread_key ||= "activesupport_tagged_logging_tags:#{object_id}".freeze + Thread.current[thread_key] ||= [] + end + + def tags_text + tags = current_tags + if tags.any? + tags.collect { |tag| "[#{tag}] " }.join + end + end + end + + def self.new(logger) + # Ensure we set a default formatter so we aren't extending nil! + logger.formatter ||= ActiveSupport::Logger::SimpleFormatter.new + logger.formatter.extend Formatter + logger.extend(self) + end + + delegate :push_tags, :pop_tags, :clear_tags!, to: :formatter + + def tagged(*tags) + formatter.tagged(*tags) { yield self } + end + + def flush + clear_tags! + super if defined?(super) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb new file mode 100644 index 0000000..69d69c8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +gem "minitest" # make sure we get the gem, not stdlib +require "minitest" +require "active_support/testing/tagged_logging" +require "active_support/testing/setup_and_teardown" +require "active_support/testing/assertions" +require "active_support/testing/deprecation" +require "active_support/testing/declarative" +require "active_support/testing/isolation" +require "active_support/testing/constant_lookup" +require "active_support/testing/time_helpers" +require "active_support/testing/file_fixtures" + +module ActiveSupport + class TestCase < ::Minitest::Test + Assertion = Minitest::Assertion + + class << self + # Sets the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order = :random # => :random + # + # Valid values are: + # * +:random+ (to run tests in random order) + # * +:parallel+ (to run tests in parallel) + # * +:sorted+ (to run tests alphabetically by method name) + # * +:alpha+ (equivalent to +:sorted+) + def test_order=(new_order) + ActiveSupport.test_order = new_order + end + + # Returns the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order # => :random + # + # Possible values are +:random+, +:parallel+, +:alpha+, +:sorted+. + # Defaults to +:random+. + def test_order + ActiveSupport.test_order ||= :random + end + end + + alias_method :method_name, :name + + include ActiveSupport::Testing::TaggedLogging + prepend ActiveSupport::Testing::SetupAndTeardown + include ActiveSupport::Testing::Assertions + include ActiveSupport::Testing::Deprecation + include ActiveSupport::Testing::TimeHelpers + include ActiveSupport::Testing::FileFixtures + extend ActiveSupport::Testing::Declarative + + # test/unit backwards compatibility methods + alias :assert_raise :assert_raises + alias :assert_not_empty :refute_empty + alias :assert_not_equal :refute_equal + alias :assert_not_in_delta :refute_in_delta + alias :assert_not_in_epsilon :refute_in_epsilon + alias :assert_not_includes :refute_includes + alias :assert_not_instance_of :refute_instance_of + alias :assert_not_kind_of :refute_kind_of + alias :assert_no_match :refute_match + alias :assert_not_nil :refute_nil + alias :assert_not_operator :refute_operator + alias :assert_not_predicate :refute_predicate + alias :assert_not_respond_to :refute_respond_to + alias :assert_not_same :refute_same + + ActiveSupport.run_load_hooks(:active_support_test_case, self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb new file mode 100644 index 0000000..a891ff6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Assertions + UNTRACKED = Object.new # :nodoc: + + # Asserts that an expression is not truthy. Passes if object is + # +nil+ or +false+. "Truthy" means "considered true in a conditional" + # like if foo. + # + # assert_not nil # => true + # assert_not false # => true + # assert_not 'foo' # => Expected "foo" to be nil or false + # + # An error message can be specified. + # + # assert_not foo, 'foo should be false' + def assert_not(object, message = nil) + message ||= "Expected #{mu_pp(object)} to be nil or false" + assert !object, message + end + + # Assertion that the block should not raise an exception. + # + # Passes if evaluated code in the yielded block raises no exception. + # + # assert_nothing_raised do + # perform_service(param: 'no_exception') + # end + def assert_nothing_raised + yield + end + + # Test numeric difference between the return value of an expression as a + # result of what is evaluated in the yielded block. + # + # assert_difference 'Article.count' do + # post :create, params: { article: {...} } + # end + # + # An arbitrary expression is passed in and evaluated. + # + # assert_difference 'Article.last.comments(:reload).size' do + # post :create, params: { comment: {...} } + # end + # + # An arbitrary positive or negative difference can be specified. + # The default is 1. + # + # assert_difference 'Article.count', -1 do + # post :delete, params: { id: ... } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_difference [ 'Article.count', 'Post.count' ], 2 do + # post :create, params: { article: {...} } + # end + # + # A hash of expressions/numeric differences can also be passed in and evaluated. + # + # assert_difference ->{ Article.count } => 1, ->{ Notification.count } => 2 do + # post :create, params: { article: {...} } + # end + # + # A lambda or a list of lambdas can be passed in and evaluated: + # + # assert_difference ->{ Article.count }, 2 do + # post :create, params: { article: {...} } + # end + # + # assert_difference [->{ Article.count }, ->{ Post.count }], 2 do + # post :create, params: { article: {...} } + # end + # + # An error message can be specified. + # + # assert_difference 'Article.count', -1, 'An Article should be destroyed' do + # post :delete, params: { id: ... } + # end + def assert_difference(expression, *args, &block) + expressions = + if expression.is_a?(Hash) + message = args[0] + expression + else + difference = args[0] || 1 + message = args[1] + Hash[Array(expression).map { |e| [e, difference] }] + end + + exps = expressions.keys.map { |e| + e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } + } + before = exps.map(&:call) + + retval = yield + + expressions.zip(exps, before) do |(code, diff), exp, before_value| + error = "#{code.inspect} didn't change by #{diff}" + error = "#{message}.\n#{error}" if message + assert_equal(before_value + diff, exp.call, error) + end + + retval + end + + # Assertion that the numeric result of evaluating an expression is not + # changed before and after invoking the passed in block. + # + # assert_no_difference 'Article.count' do + # post :create, params: { article: invalid_attributes } + # end + # + # An error message can be specified. + # + # assert_no_difference 'Article.count', 'An Article should not be created' do + # post :create, params: { article: invalid_attributes } + # end + def assert_no_difference(expression, message = nil, &block) + assert_difference expression, 0, message, &block + end + + # Assertion that the result of evaluating an expression is changed before + # and after invoking the passed in block. + # + # assert_changes 'Status.all_good?' do + # post :create, params: { status: { ok: false } } + # end + # + # You can pass the block as a string to be evaluated in the context of + # the block. A lambda can be passed for the block as well. + # + # assert_changes -> { Status.all_good? } do + # post :create, params: { status: { ok: false } } + # end + # + # The assertion is useful to test side effects. The passed block can be + # anything that can be converted to string with #to_s. + # + # assert_changes :@object do + # @object = 42 + # end + # + # The keyword arguments :from and :to can be given to specify the + # expected initial value and the expected value after the block was + # executed. + # + # assert_changes :@object, from: nil, to: :foo do + # @object = :foo + # end + # + # An error message can be specified. + # + # assert_changes -> { Status.all_good? }, 'Expected the status to be bad' do + # post :create, params: { status: { incident: true } } + # end + def assert_changes(expression, message = nil, from: UNTRACKED, to: UNTRACKED, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = yield + + unless from == UNTRACKED + error = "#{expression.inspect} isn't #{from.inspect}" + error = "#{message}.\n#{error}" if message + assert from === before, error + end + + after = exp.call + + error = "#{expression.inspect} didn't change" + error = "#{error}. It was already #{to}" if before == to + error = "#{message}.\n#{error}" if message + assert before != after, error + + unless to == UNTRACKED + error = "#{expression.inspect} didn't change to #{to}" + error = "#{message}.\n#{error}" if message + assert to === after, error + end + + retval + end + + # Assertion that the result of evaluating an expression is not changed before + # and after invoking the passed in block. + # + # assert_no_changes 'Status.all_good?' do + # post :create, params: { status: { ok: true } } + # end + # + # An error message can be specified. + # + # assert_no_changes -> { Status.all_good? }, 'Expected the status to be good' do + # post :create, params: { status: { ok: false } } + # end + def assert_no_changes(expression, message = nil, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = yield + after = exp.call + + error = "#{expression.inspect} did change to #{after}" + error = "#{message}.\n#{error}" if message + assert before == after, error + + retval + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb new file mode 100644 index 0000000..889b416 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +gem "minitest" + +require "minitest" + +Minitest.autorun diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb new file mode 100644 index 0000000..51167e9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/inflector" + +module ActiveSupport + module Testing + # Resolves a constant from a minitest spec name. + # + # Given the following spec-style test: + # + # describe WidgetsController, :index do + # describe "authenticated user" do + # describe "returns widgets" do + # it "has a controller that exists" do + # assert_kind_of WidgetsController, @controller + # end + # end + # end + # end + # + # The test will have the following name: + # + # "WidgetsController::index::authenticated user::returns widgets" + # + # The constant WidgetsController can be resolved from the name. + # The following code will resolve the constant: + # + # controller = determine_constant_from_test_name(name) do |constant| + # Class === constant && constant < ::ActionController::Metal + # end + module ConstantLookup + extend ::ActiveSupport::Concern + + module ClassMethods # :nodoc: + def determine_constant_from_test_name(test_name) + names = test_name.split "::" + while names.size > 0 do + names.last.sub!(/Test$/, "") + begin + constant = names.join("::").safe_constantize + break(constant) if yield(constant) + ensure + names.pop + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb new file mode 100644 index 0000000..7c34036 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Declarative + unless defined?(Spec) + # Helper to define a test method using a String. Under the hood, it replaces + # spaces with underscores and defines the test method. + # + # test "verify something" do + # ... + # end + def test(name, &block) + test_name = "test_#{name.gsub(/\s+/, '_')}".to_sym + defined = method_defined? test_name + raise "#{test_name} is already defined in #{self}" if defined + if block_given? + define_method(test_name, &block) + else + define_method(test_name) do + flunk "No implementation provided for #{name}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb new file mode 100644 index 0000000..f655435 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require "active_support/deprecation" +require "active_support/core_ext/regexp" + +module ActiveSupport + module Testing + module Deprecation #:nodoc: + def assert_deprecated(match = nil, deprecator = nil, &block) + result, warnings = collect_deprecations(deprecator, &block) + assert !warnings.empty?, "Expected a deprecation warning within the block but received none" + if match + match = Regexp.new(Regexp.escape(match)) unless match.is_a?(Regexp) + assert warnings.any? { |w| match.match?(w) }, "No deprecation warning matched #{match}: #{warnings.join(', ')}" + end + result + end + + def assert_not_deprecated(deprecator = nil, &block) + result, deprecations = collect_deprecations(deprecator, &block) + assert deprecations.empty?, "Expected no deprecation warning within the block but received #{deprecations.size}: \n #{deprecations * "\n "}" + result + end + + def collect_deprecations(deprecator = nil) + deprecator ||= ActiveSupport::Deprecation + old_behavior = deprecator.behavior + deprecations = [] + deprecator.behavior = Proc.new do |message, callstack| + deprecations << message + end + result = yield + [result, deprecations] + ensure + deprecator.behavior = old_behavior + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb new file mode 100644 index 0000000..ad923d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Adds simple access to sample files called file fixtures. + # File fixtures are normal files stored in + # ActiveSupport::TestCase.file_fixture_path. + # + # File fixtures are represented as +Pathname+ objects. + # This makes it easy to extract specific information: + # + # file_fixture("example.txt").read # get the file's content + # file_fixture("example.mp3").size # get the file size + module FileFixtures + extend ActiveSupport::Concern + + included do + class_attribute :file_fixture_path, instance_writer: false + end + + # Returns a +Pathname+ to the fixture file named +fixture_name+. + # + # Raises +ArgumentError+ if +fixture_name+ can't be found. + def file_fixture(fixture_name) + path = Pathname.new(File.join(file_fixture_path, fixture_name)) + + if path.exist? + path + else + msg = "the directory '%s' does not contain a file named '%s'" + raise ArgumentError, msg % [file_fixture_path, fixture_name] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb new file mode 100644 index 0000000..562f985 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Isolation + require "thread" + + def self.included(klass) #:nodoc: + klass.class_eval do + parallelize_me! + end + end + + def self.forking_env? + !ENV["NO_FORK"] && Process.respond_to?(:fork) + end + + def run + serialized = run_in_isolation do + super + end + + Marshal.load(serialized) + end + + module Forking + def run_in_isolation(&blk) + read, write = IO.pipe + read.binmode + write.binmode + + pid = fork do + read.close + yield + begin + if error? + failures.map! { |e| + begin + Marshal.dump e + e + rescue TypeError + ex = Exception.new e.message + ex.set_backtrace e.backtrace + Minitest::UnexpectedError.new ex + end + } + end + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + result = Marshal.dump(test_result) + end + + write.puts [result].pack("m") + exit! + end + + write.close + result = read.read + Process.wait2(pid) + result.unpack("m")[0] + end + end + + module Subprocess + ORIG_ARGV = ARGV.dup unless defined?(ORIG_ARGV) + + # Crazy H4X to get this working in windows / jruby with + # no forking. + def run_in_isolation(&blk) + require "tempfile" + + if ENV["ISOLATION_TEST"] + yield + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + File.open(ENV["ISOLATION_OUTPUT"], "w") do |file| + file.puts [Marshal.dump(test_result)].pack("m") + end + exit! + else + Tempfile.open("isolation") do |tmpfile| + env = { + "ISOLATION_TEST" => self.class.name, + "ISOLATION_OUTPUT" => tmpfile.path + } + + test_opts = "-n#{self.class.name}##{name}" + + load_path_args = [] + $-I.each do |p| + load_path_args << "-I" + load_path_args << File.expand_path(p) + end + + child = IO.popen([env, Gem.ruby, *load_path_args, $0, *ORIG_ARGV, test_opts]) + + begin + Process.wait(child.pid) + rescue Errno::ECHILD # The child process may exit before we wait + nil + end + + return tmpfile.read.unpack("m")[0] + end + end + end + end + + include forking_env? ? Forking : Subprocess + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb new file mode 100644 index 0000000..c635800 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +require "minitest/mock" + +module ActiveSupport + module Testing + module MethodCallAssertions # :nodoc: + private + def assert_called(object, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + + object.stub(method_name, proc { times_called += 1; returns }) { yield } + + error = "Expected #{method_name} to be called #{times} times, " \ + "but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + assert_equal times, times_called, error + end + + def assert_called_with(object, method_name, args = [], returns: nil) + mock = Minitest::Mock.new + + if args.all? { |arg| arg.is_a?(Array) } + args.each { |arg| mock.expect(:call, returns, arg) } + else + mock.expect(:call, returns, args) + end + + object.stub(method_name, mock) { yield } + + mock.verify + end + + def assert_not_called(object, method_name, message = nil, &block) + assert_called(object, method_name, message, times: 0, &block) + end + + def stub_any_instance(klass, instance: klass.new) + klass.stub(:new, instance) { yield instance } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb new file mode 100644 index 0000000..35321cd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + module Testing + # Adds support for +setup+ and +teardown+ callbacks. + # These callbacks serve as a replacement to overwriting the + # #setup and #teardown methods of your TestCase. + # + # class ExampleTest < ActiveSupport::TestCase + # setup do + # # ... + # end + # + # teardown do + # # ... + # end + # end + module SetupAndTeardown + def self.prepended(klass) + klass.include ActiveSupport::Callbacks + klass.define_callbacks :setup, :teardown + klass.extend ClassMethods + end + + module ClassMethods + # Add a callback, which runs before TestCase#setup. + def setup(*args, &block) + set_callback(:setup, :before, *args, &block) + end + + # Add a callback, which runs after TestCase#teardown. + def teardown(*args, &block) + set_callback(:teardown, :after, *args, &block) + end + end + + def before_setup # :nodoc: + super + run_callbacks :setup + end + + def after_teardown # :nodoc: + begin + run_callbacks :teardown + rescue => e + self.failures << Minitest::UnexpectedError.new(e) + end + + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb new file mode 100644 index 0000000..d070a17 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Stream #:nodoc: + private + + def silence_stream(stream) + old_stream = stream.dup + stream.reopen(IO::NULL) + stream.sync = true + yield + ensure + stream.reopen(old_stream) + old_stream.close + end + + def quietly + silence_stream(STDOUT) do + silence_stream(STDERR) do + yield + end + end + end + + def capture(stream) + stream = stream.to_s + captured_stream = Tempfile.new(stream) + stream_io = eval("$#{stream}") + origin_stream = stream_io.dup + stream_io.reopen(captured_stream) + + yield + + stream_io.rewind + return captured_stream.read + ensure + captured_stream.close + captured_stream.unlink + stream_io.reopen(origin_stream) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb new file mode 100644 index 0000000..9ca50c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Logs a "PostsControllerTest: test name" heading before each test to + # make test.log easier to search and follow along with. + module TaggedLogging #:nodoc: + attr_writer :tagged_logger + + def before_setup + if tagged_logger && tagged_logger.info? + heading = "#{self.class}: #{name}" + divider = "-" * heading.size + tagged_logger.info divider + tagged_logger.info heading + tagged_logger.info divider + end + super + end + + private + def tagged_logger + @tagged_logger ||= (defined?(Rails.logger) && Rails.logger) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb new file mode 100644 index 0000000..998a51a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb @@ -0,0 +1,200 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/string/strip" # for strip_heredoc +require "active_support/core_ext/time/calculations" +require "concurrent/map" + +module ActiveSupport + module Testing + class SimpleStubs # :nodoc: + Stub = Struct.new(:object, :method_name, :original_method) + + def initialize + @stubs = Concurrent::Map.new { |h, k| h[k] = {} } + end + + def stub_object(object, method_name, &block) + if stub = stubbing(object, method_name) + unstub_object(stub) + end + + new_name = "__simple_stub__#{method_name}" + + @stubs[object.object_id][method_name] = Stub.new(object, method_name, new_name) + + object.singleton_class.send :alias_method, new_name, method_name + object.define_singleton_method(method_name, &block) + end + + def unstub_all! + @stubs.each_value do |object_stubs| + object_stubs.each_value do |stub| + unstub_object(stub) + end + end + @stubs.clear + end + + def stubbing(object, method_name) + @stubs[object.object_id][method_name] + end + + private + + def unstub_object(stub) + singleton_class = stub.object.singleton_class + singleton_class.send :silence_redefinition_of_method, stub.method_name + singleton_class.send :alias_method, stub.method_name, stub.original_method + singleton_class.send :undef_method, stub.original_method + end + end + + # Contains helpers that help you test passage of time. + module TimeHelpers + def after_teardown + travel_back + super + end + + # Changes current time to the time in the future or in the past by a given time difference by + # stubbing +Time.now+, +Date.today+, and +DateTime.now+. The stubs are automatically removed + # at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day + # Time.current # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # Date.current # => Sun, 10 Nov 2013 + # DateTime.current # => Sun, 10 Nov 2013 15:34:49 -0500 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day do + # User.create.created_at # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel(duration, &block) + travel_to Time.now + duration, &block + end + + # Changes current time to the given time by stubbing +Time.now+, + # +Date.today+, and +DateTime.now+ to return the time or date passed into this method. + # The stubs are automatically removed at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # Date.current # => Wed, 24 Nov 2004 + # DateTime.current # => Wed, 24 Nov 2004 01:04:44 -0500 + # + # Dates are taken as their timestamp at the beginning of the day in the + # application time zone. Time.current returns said timestamp, + # and Time.now its equivalent in the system time zone. Similarly, + # Date.current returns a date equal to the argument, and + # Date.today the date according to Time.now, which may + # be different. (Note that you rarely want to deal with Time.now, + # or Date.today, in order to honor the application time zone + # please always use Time.current and Date.current.) + # + # Note that the usec for the time passed will be set to 0 to prevent rounding + # errors with external services, like MySQL (which will round instead of floor, + # leading to off-by-one-second errors). + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) do + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_to(date_or_time) + if block_given? && simple_stubs.stubbing(Time, :now) + travel_to_nested_block_call = <<-MSG.strip_heredoc + + Calling `travel_to` with a block, when we have previously already made a call to `travel_to`, can lead to confusing time stubbing. + + Instead of: + + travel_to 2.days.from_now do + # 2 days from today + travel_to 3.days.from_now do + # 5 days from today + end + end + + preferred way to achieve above is: + + travel 2.days do + # 2 days from today + end + + travel 5.days do + # 5 days from today + end + + MSG + raise travel_to_nested_block_call + end + + if date_or_time.is_a?(Date) && !date_or_time.is_a?(DateTime) + now = date_or_time.midnight.to_time + else + now = date_or_time.to_time.change(usec: 0) + end + + simple_stubs.stub_object(Time, :now) { at(now.to_i) } + simple_stubs.stub_object(Date, :today) { jd(now.to_date.jd) } + simple_stubs.stub_object(DateTime, :now) { jd(now.to_date.jd, now.hour, now.min, now.sec, Rational(now.utc_offset, 86400)) } + + if block_given? + begin + yield + ensure + travel_back + end + end + end + + # Returns the current time back to its original state, by removing the stubs added by + # +travel+ and +travel_to+. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # travel_back + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_back + simple_stubs.unstub_all! + end + + # Calls +travel_to+ with +Time.now+. + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time + # sleep(1) + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time do + # sleep(1) + # User.create.created_at # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # end + # Time.current # => Sun, 09 Jul 2017 15:34:50 EST -05:00 + def freeze_time(&block) + travel_to Time.now, &block + end + + private + + def simple_stubs + @simple_stubs ||= SimpleStubs.new + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb new file mode 100644 index 0000000..5185467 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + autoload :Duration, "active_support/duration" + autoload :TimeWithZone, "active_support/time_with_zone" + autoload :TimeZone, "active_support/values/time_zone" +end + +require "date" +require "time" + +require "active_support/core_ext/time" +require "active_support/core_ext/date" +require "active_support/core_ext/date_time" + +require "active_support/core_ext/integer/time" +require "active_support/core_ext/numeric/time" + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb new file mode 100644 index 0000000..20650ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb @@ -0,0 +1,551 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/values/time_zone" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + # A Time-like class that can represent a time in any time zone. Necessary + # because standard Ruby Time instances are limited to UTC and the + # system's ENV['TZ'] zone. + # + # You shouldn't ever need to create a TimeWithZone instance directly via +new+. + # Instead use methods +local+, +parse+, +at+ and +now+ on TimeZone instances, + # and +in_time_zone+ on Time and DateTime instances. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.local(2007, 2, 10, 15, 30, 45) # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.parse('2007-02-10 15:30:45') # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.at(1171139445) # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.now # => Sun, 18 May 2008 13:07:55 EDT -04:00 + # Time.utc(2007, 2, 10, 20, 30, 45).in_time_zone # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # + # See Time and TimeZone for further documentation of these methods. + # + # TimeWithZone instances implement the same API as Ruby Time instances, so + # that Time and TimeWithZone instances are interchangeable. + # + # t = Time.zone.now # => Sun, 18 May 2008 13:27:25 EDT -04:00 + # t.hour # => 13 + # t.dst? # => true + # t.utc_offset # => -14400 + # t.zone # => "EDT" + # t.to_s(:rfc822) # => "Sun, 18 May 2008 13:27:25 -0400" + # t + 1.day # => Mon, 19 May 2008 13:27:25 EDT -04:00 + # t.beginning_of_year # => Tue, 01 Jan 2008 00:00:00 EST -05:00 + # t > Time.utc(1999) # => true + # t.is_a?(Time) # => true + # t.is_a?(ActiveSupport::TimeWithZone) # => true + class TimeWithZone + # Report class name as 'Time' to thwart type checking. + def self.name + "Time" + end + + PRECISIONS = Hash.new { |h, n| h[n] = "%FT%T.%#{n}N".freeze } + PRECISIONS[0] = "%FT%T".freeze + + include Comparable, DateAndTime::Compatibility + attr_reader :time_zone + + def initialize(utc_time, time_zone, local_time = nil, period = nil) + @utc = utc_time ? transfer_time_values_to_utc_constructor(utc_time) : nil + @time_zone, @time = time_zone, local_time + @period = @utc ? period : get_period_and_ensure_valid_local_time(period) + end + + # Returns a Time instance that represents the time in +time_zone+. + def time + @time ||= period.to_local(@utc) + end + + # Returns a Time instance of the simultaneous time in the UTC timezone. + def utc + @utc ||= period.to_utc(@time) + end + alias_method :comparable_time, :utc + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns the underlying TZInfo::TimezonePeriod. + def period + @period ||= time_zone.period_for_utc(@utc) + end + + # Returns the simultaneous time in Time.zone, or the specified zone. + def in_time_zone(new_zone = ::Time.zone) + return self if time_zone == new_zone + utc.in_time_zone(new_zone) + end + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc.getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns true if the current time is within Daylight Savings Time for the + # specified time zone. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.parse("2012-5-30").dst? # => true + # Time.zone.parse("2012-11-30").dst? # => false + def dst? + period.dst? + end + alias_method :isdst, :dst? + + # Returns true if the current time zone is set to UTC. + # + # Time.zone = 'UTC' # => 'UTC' + # Time.zone.now.utc? # => true + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.now.utc? # => false + def utc? + period.offset.abbreviation == :UTC || period.offset.abbreviation == :UCT + end + alias_method :gmt?, :utc? + + # Returns the offset from current time to UTC time in seconds. + def utc_offset + period.utc_total_offset + end + alias_method :gmt_offset, :utc_offset + alias_method :gmtoff, :utc_offset + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.formatted_offset(true) # => "-05:00" + # Time.zone.now.formatted_offset(false) # => "-0500" + # Time.zone = 'UTC' # => "UTC" + # Time.zone.now.formatted_offset(true, "0") # => "0" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Returns the time zone abbreviation. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.zone # => "EST" + def zone + period.zone_identifier.to_s + end + + # Returns a string of the object's date, time, zone, and offset from UTC. + # + # Time.zone.now.inspect # => "Thu, 04 Dec 2014 11:00:25 EST -05:00" + def inspect + "#{time.strftime('%a, %d %b %Y %H:%M:%S')} #{zone} #{formatted_offset}" + end + + # Returns a string of the object's date and time in the ISO 8601 standard + # format. + # + # Time.zone.now.xmlschema # => "2014-12-04T11:02:37-05:00" + def xmlschema(fraction_digits = 0) + "#{time.strftime(PRECISIONS[fraction_digits.to_i])}#{formatted_offset(true, 'Z'.freeze)}" + end + alias_method :iso8601, :xmlschema + alias_method :rfc3339, :xmlschema + + # Coerces time to a string for JSON encoding. The default format is ISO 8601. + # You can get %Y/%m/%d %H:%M:%S +offset style by setting + # ActiveSupport::JSON::Encoding.use_standard_json_time_format + # to +false+. + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = true + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005-02-01T05:15:10.000-10:00" + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = false + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005/02/01 05:15:10 -1000" + def as_json(options = nil) + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{time.strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end + + def init_with(coder) #:nodoc: + initialize(coder["utc"], coder["zone"], coder["time"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:ActiveSupport::TimeWithZone" + coder.map = { "utc" => utc, "zone" => time_zone, "time" => time } + end + + # Returns a string of the object's date and time in the format used by + # HTTP requests. + # + # Time.zone.now.httpdate # => "Tue, 01 Jan 2013 04:39:43 GMT" + def httpdate + utc.httpdate + end + + # Returns a string of the object's date and time in the RFC 2822 standard + # format. + # + # Time.zone.now.rfc2822 # => "Tue, 01 Jan 2013 04:51:39 +0000" + def rfc2822 + to_s(:rfc822) + end + alias_method :rfc822, :rfc2822 + + # Returns a string of the object's date and time. + # Accepts an optional format: + # * :default - default value, mimics Ruby Time#to_s format. + # * :db - format outputs time in UTC :db time. See Time#to_formatted_s(:db). + # * Any key in Time::DATE_FORMATS can be used. See active_support/core_ext/time/conversions.rb. + def to_s(format = :default) + if format == :db + utc.to_s(format) + elsif formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" # mimicking Ruby Time#to_s format + end + end + alias_method :to_formatted_s, :to_s + + # Replaces %Z directive with +zone before passing to Time#strftime, + # so that zone information is correct. + def strftime(format) + format = format.gsub(/((?:\A|[^%])(?:%%)*)%Z/, "\\1#{zone}") + getlocal(utc_offset).strftime(format) + end + + # Use the time in UTC for comparisons. + def <=>(other) + utc <=> other + end + + # Returns true if the current object's time is within the specified + # +min+ and +max+ time. + def between?(min, max) + utc.between?(min, max) + end + + # Returns true if the current object's time is in the past. + def past? + utc.past? + end + + # Returns true if the current object's time falls within + # the current day. + def today? + time.today? + end + + # Returns true if the current object's time is in the future. + def future? + utc.future? + end + + # Returns +true+ if +other+ is equal to current object. + def eql?(other) + other.eql?(utc) + end + + def hash + utc.hash + end + + # Adds an interval of time to the current object's time and returns that + # value as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now + 1000 # => Sun, 02 Nov 2014 01:43:08 EDT -04:00 + # + # If we're adding a Duration of variable length (i.e., years, months, days), + # move forward from #time, otherwise move forward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time + 24.hours will advance exactly 24 hours, while a + # time + 1.day will advance 23-25 hours, depending on the day. + # + # now + 24.hours # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now + 1.day # => Mon, 03 Nov 2014 01:26:28 EST -05:00 + def +(other) + if duration_of_variable_length?(other) + method_missing(:+, other) + else + result = utc.acts_like?(:date) ? utc.since(other) : utc + other rescue utc.since(other) + result.in_time_zone(time_zone) + end + end + alias_method :since, :+ + alias_method :in, :+ + + # Returns a new TimeWithZone object that represents the difference between + # the current object's time and the +other+ time. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now - 1000 # => Mon, 03 Nov 2014 00:09:48 EST -05:00 + # + # If subtracting a Duration of variable length (i.e., years, months, days), + # move backward from #time, otherwise move backward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time - 24.hours will go subtract exactly 24 hours, while a + # time - 1.day will subtract 23-25 hours, depending on the day. + # + # now - 24.hours # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now - 1.day # => Sun, 02 Nov 2014 00:26:28 EDT -04:00 + def -(other) + if other.acts_like?(:time) + to_time - other.to_time + elsif duration_of_variable_length?(other) + method_missing(:-, other) + else + result = utc.acts_like?(:date) ? utc.ago(other) : utc - other rescue utc.ago(other) + result.in_time_zone(time_zone) + end + end + + # Subtracts an interval of time from the current object's time and returns + # the result as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now.ago(1000) # => Mon, 03 Nov 2014 00:09:48 EST -05:00 + # + # If we're subtracting a Duration of variable length (i.e., years, months, + # days), move backward from #time, otherwise move backward from #utc, for + # accuracy when moving across DST boundaries. + # + # For instance, time.ago(24.hours) will move back exactly 24 hours, + # while time.ago(1.day) will move back 23-25 hours, depending on + # the day. + # + # now.ago(24.hours) # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now.ago(1.day) # => Sun, 02 Nov 2014 00:26:28 EDT -04:00 + def ago(other) + since(-other) + end + + # Returns a new +ActiveSupport::TimeWithZone+ where one or more of the elements have + # been changed according to the +options+ parameter. The time options (:hour, + # :min, :sec, :usec, :nsec) reset cascadingly, + # so if only the hour is passed, then minute, sec, usec and nsec is set to 0. If the + # hour and minute is passed, then sec, usec and nsec is set to 0. The +options+ + # parameter takes a hash with any of these keys: :year, :month, + # :day, :hour, :min, :sec, :usec, + # :nsec, :offset, :zone. Pass either :usec + # or :nsec, not both. Similarly, pass either :zone or + # :offset, not both. + # + # t = Time.zone.now # => Fri, 14 Apr 2017 11:45:15 EST -05:00 + # t.change(year: 2020) # => Tue, 14 Apr 2020 11:45:15 EST -05:00 + # t.change(hour: 12) # => Fri, 14 Apr 2017 12:00:00 EST -05:00 + # t.change(min: 30) # => Fri, 14 Apr 2017 11:30:00 EST -05:00 + # t.change(offset: "-10:00") # => Fri, 14 Apr 2017 11:45:15 HST -10:00 + # t.change(zone: "Hawaii") # => Fri, 14 Apr 2017 11:45:15 HST -10:00 + def change(options) + if options[:zone] && options[:offset] + raise ArgumentError, "Can't change both :offset and :zone at the same time: #{options.inspect}" + end + + new_time = time.change(options) + + if options[:zone] + new_zone = ::Time.find_zone(options[:zone]) + elsif options[:offset] + new_zone = ::Time.find_zone(new_time.utc_offset) + end + + new_zone ||= time_zone + periods = new_zone.periods_for_local(new_time) + + self.class.new(nil, new_zone, new_time, periods.include?(period) ? period : nil) + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The result is returned as a + # new TimeWithZone object. + # + # The +options+ parameter takes a hash with any of these keys: + # :years, :months, :weeks, :days, + # :hours, :minutes, :seconds. + # + # If advancing by a value of variable length (i.e., years, weeks, months, + # days), move forward from #time, otherwise move forward from #utc, for + # accuracy when moving across DST boundaries. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now.advance(seconds: 1) # => Sun, 02 Nov 2014 01:26:29 EDT -04:00 + # now.advance(minutes: 1) # => Sun, 02 Nov 2014 01:27:28 EDT -04:00 + # now.advance(hours: 1) # => Sun, 02 Nov 2014 01:26:28 EST -05:00 + # now.advance(days: 1) # => Mon, 03 Nov 2014 01:26:28 EST -05:00 + # now.advance(weeks: 1) # => Sun, 09 Nov 2014 01:26:28 EST -05:00 + # now.advance(months: 1) # => Tue, 02 Dec 2014 01:26:28 EST -05:00 + # now.advance(years: 1) # => Mon, 02 Nov 2015 01:26:28 EST -05:00 + def advance(options) + # If we're advancing a value of variable length (i.e., years, weeks, months, days), advance from #time, + # otherwise advance from #utc, for accuracy when moving across DST boundaries + if options.values_at(:years, :weeks, :months, :days).any? + method_missing(:advance, options) + else + utc.advance(options).in_time_zone(time_zone) + end + end + + %w(year mon month day mday wday yday hour min sec usec nsec to_date).each do |method_name| + class_eval <<-EOV, __FILE__, __LINE__ + 1 + def #{method_name} # def month + time.#{method_name} # time.month + end # end + EOV + end + + # Returns Array of parts of Time in sequence of + # [seconds, minutes, hours, day, month, year, weekday, yearday, dst?, zone]. + # + # now = Time.zone.now # => Tue, 18 Aug 2015 02:29:27 UTC +00:00 + # now.to_a # => [27, 29, 2, 18, 8, 2015, 2, 230, false, "UTC"] + def to_a + [time.sec, time.min, time.hour, time.day, time.mon, time.year, time.wday, time.yday, dst?, zone] + end + + # Returns the object's date and time as a floating point number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_f # => 1417709320.285418 + def to_f + utc.to_f + end + + # Returns the object's date and time as an integer number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_i # => 1417709320 + def to_i + utc.to_i + end + alias_method :tv_sec, :to_i + + # Returns the object's date and time as a rational number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_r # => (708854548642709/500000) + def to_r + utc.to_r + end + + # Returns an instance of DateTime with the timezone's UTC offset + # + # Time.zone.now.to_datetime # => Tue, 18 Aug 2015 02:32:20 +0000 + # Time.current.in_time_zone('Hawaii').to_datetime # => Mon, 17 Aug 2015 16:32:20 -1000 + def to_datetime + @to_datetime ||= utc.to_datetime.new_offset(Rational(utc_offset, 86_400)) + end + + # Returns an instance of +Time+, either with the same UTC offset + # as +self+ or in the local system timezone depending on the setting + # of +ActiveSupport.to_time_preserves_timezone+. + def to_time + if preserve_timezone + @to_time_with_instance_offset ||= getlocal(utc_offset) + else + @to_time_with_system_offset ||= getlocal + end + end + + # So that +self+ acts_like?(:time). + def acts_like_time? + true + end + + # Say we're a Time to thwart type checking. + def is_a?(klass) + klass == ::Time || super + end + alias_method :kind_of?, :is_a? + + # An instance of ActiveSupport::TimeWithZone is never blank + def blank? + false + end + + def freeze + # preload instance variables before freezing + period; utc; time; to_datetime; to_time + super + end + + def marshal_dump + [utc, time_zone.name, time] + end + + def marshal_load(variables) + initialize(variables[0].utc, ::Time.find_zone(variables[1]), variables[2].utc) + end + + # respond_to_missing? is not called in some cases, such as when type conversion is + # performed with Kernel#String + def respond_to?(sym, include_priv = false) + # ensure that we're not going to throw and rescue from NoMethodError in method_missing which is slow + return false if sym.to_sym == :to_str + super + end + + # Ensure proxy class responds to all methods that underlying time instance + # responds to. + def respond_to_missing?(sym, include_priv) + return false if sym.to_sym == :acts_like_date? + time.respond_to?(sym, include_priv) + end + + # Send the missing method to +time+ instance, and wrap result in a new + # TimeWithZone with the existing +time_zone+. + def method_missing(sym, *args, &block) + wrap_with_time_zone time.__send__(sym, *args, &block) + rescue NoMethodError => e + raise e, e.message.sub(time.inspect, inspect), e.backtrace + end + + private + def get_period_and_ensure_valid_local_time(period) + # we don't want a Time.local instance enforcing its own DST rules as well, + # so transfer time values to a utc constructor if necessary + @time = transfer_time_values_to_utc_constructor(@time) unless @time.utc? + begin + period || @time_zone.period_for_local(@time) + rescue ::TZInfo::PeriodNotFound + # time is in the "spring forward" hour gap, so we're moving the time forward one hour and trying again + @time += 1.hour + retry + end + end + + def transfer_time_values_to_utc_constructor(time) + # avoid creating another Time object if possible + return time if time.instance_of?(::Time) && time.utc? + ::Time.utc(time.year, time.month, time.day, time.hour, time.min, time.sec + time.subsec) + end + + def duration_of_variable_length?(obj) + ActiveSupport::Duration === obj && obj.parts.any? { |p| [:years, :months, :weeks, :days].include?(p[0]) } + end + + def wrap_with_time_zone(time) + if time.acts_like?(:time) + periods = time_zone.periods_for_local(time) + self.class.new(nil, time_zone, time, periods.include?(period) ? period : nil) + elsif time.is_a?(Range) + wrap_with_time_zone(time.begin)..wrap_with_time_zone(time.end) + else + time + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb new file mode 100644 index 0000000..90501bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb @@ -0,0 +1,565 @@ +# frozen_string_literal: true + +require "tzinfo" +require "concurrent/map" +require "active_support/core_ext/object/blank" + +module ActiveSupport + # The TimeZone class serves as a wrapper around TZInfo::Timezone instances. + # It allows us to do the following: + # + # * Limit the set of zones provided by TZInfo to a meaningful subset of 134 + # zones. + # * Retrieve and display zones with a friendlier name + # (e.g., "Eastern Time (US & Canada)" instead of "America/New_York"). + # * Lazily load TZInfo::Timezone instances only when they're needed. + # * Create ActiveSupport::TimeWithZone instances via TimeZone's +local+, + # +parse+, +at+ and +now+ methods. + # + # If you set config.time_zone in the Rails Application, you can + # access this TimeZone object via Time.zone: + # + # # application.rb: + # class Application < Rails::Application + # config.time_zone = 'Eastern Time (US & Canada)' + # end + # + # Time.zone # => # + # Time.zone.name # => "Eastern Time (US & Canada)" + # Time.zone.now # => Sun, 18 May 2008 14:30:44 EDT -04:00 + class TimeZone + # Keys are Rails TimeZone names, values are TZInfo identifiers. + MAPPING = { + "International Date Line West" => "Etc/GMT+12", + "Midway Island" => "Pacific/Midway", + "American Samoa" => "Pacific/Pago_Pago", + "Hawaii" => "Pacific/Honolulu", + "Alaska" => "America/Juneau", + "Pacific Time (US & Canada)" => "America/Los_Angeles", + "Tijuana" => "America/Tijuana", + "Mountain Time (US & Canada)" => "America/Denver", + "Arizona" => "America/Phoenix", + "Chihuahua" => "America/Chihuahua", + "Mazatlan" => "America/Mazatlan", + "Central Time (US & Canada)" => "America/Chicago", + "Saskatchewan" => "America/Regina", + "Guadalajara" => "America/Mexico_City", + "Mexico City" => "America/Mexico_City", + "Monterrey" => "America/Monterrey", + "Central America" => "America/Guatemala", + "Eastern Time (US & Canada)" => "America/New_York", + "Indiana (East)" => "America/Indiana/Indianapolis", + "Bogota" => "America/Bogota", + "Lima" => "America/Lima", + "Quito" => "America/Lima", + "Atlantic Time (Canada)" => "America/Halifax", + "Caracas" => "America/Caracas", + "La Paz" => "America/La_Paz", + "Santiago" => "America/Santiago", + "Newfoundland" => "America/St_Johns", + "Brasilia" => "America/Sao_Paulo", + "Buenos Aires" => "America/Argentina/Buenos_Aires", + "Montevideo" => "America/Montevideo", + "Georgetown" => "America/Guyana", + "Puerto Rico" => "America/Puerto_Rico", + "Greenland" => "America/Godthab", + "Mid-Atlantic" => "Atlantic/South_Georgia", + "Azores" => "Atlantic/Azores", + "Cape Verde Is." => "Atlantic/Cape_Verde", + "Dublin" => "Europe/Dublin", + "Edinburgh" => "Europe/London", + "Lisbon" => "Europe/Lisbon", + "London" => "Europe/London", + "Casablanca" => "Africa/Casablanca", + "Monrovia" => "Africa/Monrovia", + "UTC" => "Etc/UTC", + "Belgrade" => "Europe/Belgrade", + "Bratislava" => "Europe/Bratislava", + "Budapest" => "Europe/Budapest", + "Ljubljana" => "Europe/Ljubljana", + "Prague" => "Europe/Prague", + "Sarajevo" => "Europe/Sarajevo", + "Skopje" => "Europe/Skopje", + "Warsaw" => "Europe/Warsaw", + "Zagreb" => "Europe/Zagreb", + "Brussels" => "Europe/Brussels", + "Copenhagen" => "Europe/Copenhagen", + "Madrid" => "Europe/Madrid", + "Paris" => "Europe/Paris", + "Amsterdam" => "Europe/Amsterdam", + "Berlin" => "Europe/Berlin", + "Bern" => "Europe/Zurich", + "Zurich" => "Europe/Zurich", + "Rome" => "Europe/Rome", + "Stockholm" => "Europe/Stockholm", + "Vienna" => "Europe/Vienna", + "West Central Africa" => "Africa/Algiers", + "Bucharest" => "Europe/Bucharest", + "Cairo" => "Africa/Cairo", + "Helsinki" => "Europe/Helsinki", + "Kyiv" => "Europe/Kiev", + "Riga" => "Europe/Riga", + "Sofia" => "Europe/Sofia", + "Tallinn" => "Europe/Tallinn", + "Vilnius" => "Europe/Vilnius", + "Athens" => "Europe/Athens", + "Istanbul" => "Europe/Istanbul", + "Minsk" => "Europe/Minsk", + "Jerusalem" => "Asia/Jerusalem", + "Harare" => "Africa/Harare", + "Pretoria" => "Africa/Johannesburg", + "Kaliningrad" => "Europe/Kaliningrad", + "Moscow" => "Europe/Moscow", + "St. Petersburg" => "Europe/Moscow", + "Volgograd" => "Europe/Volgograd", + "Samara" => "Europe/Samara", + "Kuwait" => "Asia/Kuwait", + "Riyadh" => "Asia/Riyadh", + "Nairobi" => "Africa/Nairobi", + "Baghdad" => "Asia/Baghdad", + "Tehran" => "Asia/Tehran", + "Abu Dhabi" => "Asia/Muscat", + "Muscat" => "Asia/Muscat", + "Baku" => "Asia/Baku", + "Tbilisi" => "Asia/Tbilisi", + "Yerevan" => "Asia/Yerevan", + "Kabul" => "Asia/Kabul", + "Ekaterinburg" => "Asia/Yekaterinburg", + "Islamabad" => "Asia/Karachi", + "Karachi" => "Asia/Karachi", + "Tashkent" => "Asia/Tashkent", + "Chennai" => "Asia/Kolkata", + "Kolkata" => "Asia/Kolkata", + "Mumbai" => "Asia/Kolkata", + "New Delhi" => "Asia/Kolkata", + "Kathmandu" => "Asia/Kathmandu", + "Astana" => "Asia/Dhaka", + "Dhaka" => "Asia/Dhaka", + "Sri Jayawardenepura" => "Asia/Colombo", + "Almaty" => "Asia/Almaty", + "Novosibirsk" => "Asia/Novosibirsk", + "Rangoon" => "Asia/Rangoon", + "Bangkok" => "Asia/Bangkok", + "Hanoi" => "Asia/Bangkok", + "Jakarta" => "Asia/Jakarta", + "Krasnoyarsk" => "Asia/Krasnoyarsk", + "Beijing" => "Asia/Shanghai", + "Chongqing" => "Asia/Chongqing", + "Hong Kong" => "Asia/Hong_Kong", + "Urumqi" => "Asia/Urumqi", + "Kuala Lumpur" => "Asia/Kuala_Lumpur", + "Singapore" => "Asia/Singapore", + "Taipei" => "Asia/Taipei", + "Perth" => "Australia/Perth", + "Irkutsk" => "Asia/Irkutsk", + "Ulaanbaatar" => "Asia/Ulaanbaatar", + "Seoul" => "Asia/Seoul", + "Osaka" => "Asia/Tokyo", + "Sapporo" => "Asia/Tokyo", + "Tokyo" => "Asia/Tokyo", + "Yakutsk" => "Asia/Yakutsk", + "Darwin" => "Australia/Darwin", + "Adelaide" => "Australia/Adelaide", + "Canberra" => "Australia/Melbourne", + "Melbourne" => "Australia/Melbourne", + "Sydney" => "Australia/Sydney", + "Brisbane" => "Australia/Brisbane", + "Hobart" => "Australia/Hobart", + "Vladivostok" => "Asia/Vladivostok", + "Guam" => "Pacific/Guam", + "Port Moresby" => "Pacific/Port_Moresby", + "Magadan" => "Asia/Magadan", + "Srednekolymsk" => "Asia/Srednekolymsk", + "Solomon Is." => "Pacific/Guadalcanal", + "New Caledonia" => "Pacific/Noumea", + "Fiji" => "Pacific/Fiji", + "Kamchatka" => "Asia/Kamchatka", + "Marshall Is." => "Pacific/Majuro", + "Auckland" => "Pacific/Auckland", + "Wellington" => "Pacific/Auckland", + "Nuku'alofa" => "Pacific/Tongatapu", + "Tokelau Is." => "Pacific/Fakaofo", + "Chatham Is." => "Pacific/Chatham", + "Samoa" => "Pacific/Apia" + } + + UTC_OFFSET_WITH_COLON = "%s%02d:%02d" + UTC_OFFSET_WITHOUT_COLON = UTC_OFFSET_WITH_COLON.tr(":", "") + + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + + class << self + # Assumes self represents an offset from UTC in seconds (as returned from + # Time#utc_offset) and turns this into an +HH:MM formatted string. + # + # ActiveSupport::TimeZone.seconds_to_utc_offset(-21_600) # => "-06:00" + def seconds_to_utc_offset(seconds, colon = true) + format = colon ? UTC_OFFSET_WITH_COLON : UTC_OFFSET_WITHOUT_COLON + sign = (seconds < 0 ? "-" : "+") + hours = seconds.abs / 3600 + minutes = (seconds.abs % 3600) / 60 + format % [sign, hours, minutes] + end + + def find_tzinfo(name) + TZInfo::Timezone.new(MAPPING[name] || name) + end + + alias_method :create, :new + + # Returns a TimeZone instance with the given name, or +nil+ if no + # such TimeZone instance exists. (This exists to support the use of + # this class with the +composed_of+ macro.) + def new(name) + self[name] + end + + # Returns an array of all TimeZone objects. There are multiple + # TimeZone objects per time zone, in many cases, to make it easier + # for users to find their own time zone. + def all + @zones ||= zones_map.values.sort + end + + # Locate a specific time zone object. If the argument is a string, it + # is interpreted to mean the name of the timezone to locate. If it is a + # numeric value it is either the hour offset, or the second offset, of the + # timezone to find. (The first one with that offset will be returned.) + # Returns +nil+ if no such time zone is known to the system. + def [](arg) + case arg + when String + begin + @lazy_zones_map[arg] ||= create(arg) + rescue TZInfo::InvalidTimezoneIdentifier + nil + end + when Numeric, ActiveSupport::Duration + arg *= 3600 if arg.abs <= 13 + all.find { |z| z.utc_offset == arg.to_i } + else + raise ArgumentError, "invalid argument to TimeZone[]: #{arg.inspect}" + end + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the USA. + def us_zones + country_zones(:us) + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the country specified by its ISO 3166-1 Alpha2 code. + def country_zones(country_code) + code = country_code.to_s.upcase + @country_zones[code] ||= load_country_zones(code) + end + + def clear #:nodoc: + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + @zones = nil + @zones_map = nil + end + + private + def load_country_zones(code) + country = TZInfo::Country.get(code) + country.zone_identifiers.map do |tz_id| + if MAPPING.value?(tz_id) + MAPPING.inject([]) do |memo, (key, value)| + memo << self[key] if value == tz_id + memo + end + else + create(tz_id, nil, TZInfo::Timezone.new(tz_id)) + end + end.flatten(1).sort! + end + + def zones_map + @zones_map ||= MAPPING.each_with_object({}) do |(name, _), zones| + timezone = self[name] + zones[name] = timezone if timezone + end + end + end + + include Comparable + attr_reader :name + attr_reader :tzinfo + + # Create a new TimeZone object with the given name and offset. The + # offset is the number of seconds that this time zone is offset from UTC + # (GMT). Seconds were chosen as the offset unit because that is the unit + # that Ruby uses to represent time zone offsets (see Time#utc_offset). + def initialize(name, utc_offset = nil, tzinfo = nil) + @name = name + @utc_offset = utc_offset + @tzinfo = tzinfo || TimeZone.find_tzinfo(name) + end + + # Returns the offset of this time zone from UTC in seconds. + def utc_offset + if @utc_offset + @utc_offset + else + tzinfo.current_period.utc_offset if tzinfo && tzinfo.current_period + end + end + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # zone = ActiveSupport::TimeZone['Central Time (US & Canada)'] + # zone.formatted_offset # => "-06:00" + # zone.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc_offset == 0 && alternate_utc_string || self.class.seconds_to_utc_offset(utc_offset, colon) + end + + # Compare this time zone to the parameter. The two are compared first on + # their offsets, and then by name. + def <=>(zone) + return unless zone.respond_to? :utc_offset + result = (utc_offset <=> zone.utc_offset) + result = (name <=> zone.name) if result == 0 + result + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def =~(re) + re === name || re === MAPPING[name] + end + + # Returns a textual representation of this time zone. + def to_s + "(GMT#{formatted_offset}) #{name}" + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from given values. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.local(2007, 2, 1, 15, 30, 45) # => Thu, 01 Feb 2007 15:30:45 HST -10:00 + def local(*args) + time = Time.utc(*args) + ActiveSupport::TimeWithZone.new(nil, self, time) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from number of seconds since the Unix epoch. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.utc(2000).to_f # => 946684800.0 + # Time.zone.at(946684800.0) # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + def at(secs) + Time.at(secs).utc.in_time_zone(self) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an ISO 8601 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31T14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time components are missing then they will be set to zero. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31') # => Fri, 31 Dec 1999 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ will be raised unlike +parse+ + # which usually returns +nil+ when given an invalid date string. + def iso8601(str) + parts = Date._iso8601(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + + if parts[:offset] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from parsed string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.parse('1999-12-31 14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.parse('22:30:00') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.parse('Mar 2000') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ could be raised. + def parse(str, now = now()) + parts_to_time(Date._parse(str, false), now) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an RFC 3339 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('2000-01-01T00:00:00Z') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time or zone components are missing then an +ArgumentError+ will + # be raised. This is much stricter than either +parse+ or +iso8601+ which + # allow for missing components. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + + TimeWithZone.new(time.utc, self) + end + + # Parses +str+ according to +format+ and returns an ActiveSupport::TimeWithZone. + # + # Assumes that +str+ is a time in the time zone +self+, + # unless +format+ includes an explicit time zone. + # (This is the same behavior as +parse+.) + # In either case, the returned TimeWithZone has the timezone of +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.strptime('1999-12-31 14:00:00', '%Y-%m-%d %H:%M:%S') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.strptime('22:30:00', '%H:%M:%S') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.strptime('Mar 2000', '%b %Y') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + def strptime(str, format, now = now()) + parts_to_time(DateTime._strptime(str, format), now) + end + + # Returns an ActiveSupport::TimeWithZone instance representing the current + # time in the time zone represented by +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.now # => Wed, 23 Jan 2008 20:24:27 HST -10:00 + def now + time_now.utc.in_time_zone(self) + end + + # Returns the current date in this time zone. + def today + tzinfo.now.to_date + end + + # Returns the next date in this time zone. + def tomorrow + today + 1 + end + + # Returns the previous date in this time zone. + def yesterday + today - 1 + end + + # Adjust the given time to the simultaneous time in the time zone + # represented by +self+. Returns a Time.utc() instance -- if you want an + # ActiveSupport::TimeWithZone instance, use Time#in_time_zone() instead. + def utc_to_local(time) + tzinfo.utc_to_local(time) + end + + # Adjust the given time to the simultaneous time in UTC. Returns a + # Time.utc() instance. + def local_to_utc(time, dst = true) + tzinfo.local_to_utc(time, dst) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_utc(time) + tzinfo.period_for_utc(time) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_local(time, dst = true) + tzinfo.period_for_local(time, dst) { |periods| periods.last } + end + + def periods_for_local(time) #:nodoc: + tzinfo.periods_for_local(time) + end + + def init_with(coder) #:nodoc: + initialize(coder["name"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:#{self.class}" + coder.map = { "name" => tzinfo.name } + end + + private + def parts_to_time(parts, now) + raise ArgumentError, "invalid date" if parts.nil? + return if parts.empty? + + if parts[:seconds] + time = Time.at(parts[:seconds]) + else + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, parts[:year] || parts[:mon] ? 1 : now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + end + + if parts[:offset] || parts[:seconds] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + def time_now + Time.now + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat new file mode 100644 index 0000000..f7d9c48 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat differ diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb new file mode 100644 index 0000000..928838c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require_relative "gem_version" + +module ActiveSupport + # Returns the version of the currently loaded ActiveSupport as a Gem::Version + def self.version + gem_version + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb new file mode 100644 index 0000000..f087e2c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +require "time" +require "base64" +require "bigdecimal" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/date_time/calculations" + +module ActiveSupport + # = XmlMini + # + # To use the much faster libxml parser: + # gem 'libxml-ruby', '=0.9.7' + # XmlMini.backend = 'LibXML' + module XmlMini + extend self + + # This module decorates files deserialized using Hash.from_xml with + # the original_filename and content_type methods. + module FileLike #:nodoc: + attr_writer :original_filename, :content_type + + def original_filename + @original_filename || "untitled" + end + + def content_type + @content_type || "application/octet-stream" + end + end + + DEFAULT_ENCODINGS = { + "binary" => "base64" + } unless defined?(DEFAULT_ENCODINGS) + + unless defined?(TYPE_NAMES) + TYPE_NAMES = { + "Symbol" => "symbol", + "Integer" => "integer", + "BigDecimal" => "decimal", + "Float" => "float", + "TrueClass" => "boolean", + "FalseClass" => "boolean", + "Date" => "date", + "DateTime" => "dateTime", + "Time" => "dateTime", + "Array" => "array", + "Hash" => "hash" + } + + # No need to map these on Ruby 2.4+ + TYPE_NAMES["Fixnum"] = "integer" unless 0.class == Integer + TYPE_NAMES["Bignum"] = "integer" unless 0.class == Integer + end + + FORMATTING = { + "symbol" => Proc.new { |symbol| symbol.to_s }, + "date" => Proc.new { |date| date.to_s(:db) }, + "dateTime" => Proc.new { |time| time.xmlschema }, + "binary" => Proc.new { |binary| ::Base64.encode64(binary) }, + "yaml" => Proc.new { |yaml| yaml.to_yaml } + } unless defined?(FORMATTING) + + # TODO use regexp instead of Date.parse + unless defined?(PARSING) + PARSING = { + "symbol" => Proc.new { |symbol| symbol.to_s.to_sym }, + "date" => Proc.new { |date| ::Date.parse(date) }, + "datetime" => Proc.new { |time| Time.xmlschema(time).utc rescue ::DateTime.parse(time).utc }, + "integer" => Proc.new { |integer| integer.to_i }, + "float" => Proc.new { |float| float.to_f }, + "decimal" => Proc.new do |number| + if String === number + begin + BigDecimal(number) + rescue ArgumentError + BigDecimal(number.to_f.to_s) + end + else + BigDecimal(number) + end + end, + "boolean" => Proc.new { |boolean| %w(1 true).include?(boolean.to_s.strip) }, + "string" => Proc.new { |string| string.to_s }, + "yaml" => Proc.new { |yaml| YAML.load(yaml) rescue yaml }, + "base64Binary" => Proc.new { |bin| ::Base64.decode64(bin) }, + "binary" => Proc.new { |bin, entity| _parse_binary(bin, entity) }, + "file" => Proc.new { |file, entity| _parse_file(file, entity) } + } + + PARSING.update( + "double" => PARSING["float"], + "dateTime" => PARSING["datetime"] + ) + end + + attr_accessor :depth + self.depth = 100 + + delegate :parse, to: :backend + + def backend + current_thread_backend || @backend + end + + def backend=(name) + backend = name && cast_backend_name_to_module(name) + self.current_thread_backend = backend if current_thread_backend + @backend = backend + end + + def with_backend(name) + old_backend = current_thread_backend + self.current_thread_backend = name && cast_backend_name_to_module(name) + yield + ensure + self.current_thread_backend = old_backend + end + + def to_tag(key, value, options) + type_name = options.delete(:type) + merged_options = options.merge(root: key, skip_instruct: true) + + if value.is_a?(::Method) || value.is_a?(::Proc) + if value.arity == 1 + value.call(merged_options) + else + value.call(merged_options, key.to_s.singularize) + end + elsif value.respond_to?(:to_xml) + value.to_xml(merged_options) + else + type_name ||= TYPE_NAMES[value.class.name] + type_name ||= value.class.name if value && !value.respond_to?(:to_str) + type_name = type_name.to_s if type_name + type_name = "dateTime" if type_name == "datetime" + + key = rename_key(key.to_s, options) + + attributes = options[:skip_types] || type_name.nil? ? {} : { type: type_name } + attributes[:nil] = true if value.nil? + + encoding = options[:encoding] || DEFAULT_ENCODINGS[type_name] + attributes[:encoding] = encoding if encoding + + formatted_value = FORMATTING[type_name] && !value.nil? ? + FORMATTING[type_name].call(value) : value + + options[:builder].tag!(key, formatted_value, attributes) + end + end + + def rename_key(key, options = {}) + camelize = options[:camelize] + dasherize = !options.has_key?(:dasherize) || options[:dasherize] + if camelize + key = true == camelize ? key.camelize : key.camelize(camelize) + end + key = _dasherize(key) if dasherize + key + end + + private + + def _dasherize(key) + # $2 must be a non-greedy regex for this to work + left, middle, right = /\A(_*)(.*?)(_*)\Z/.match(key.strip)[1, 3] + "#{left}#{middle.tr('_ ', '--')}#{right}" + end + + # TODO: Add support for other encodings + def _parse_binary(bin, entity) + case entity["encoding"] + when "base64" + ::Base64.decode64(bin) + else + bin + end + end + + def _parse_file(file, entity) + f = StringIO.new(::Base64.decode64(file)) + f.extend(FileLike) + f.original_filename = entity["name"] + f.content_type = entity["content_type"] + f + end + + def current_thread_backend + Thread.current[:xml_mini_backend] + end + + def current_thread_backend=(name) + Thread.current[:xml_mini_backend] = name && cast_backend_name_to_module(name) + end + + def cast_backend_name_to_module(name) + if name.is_a?(Module) + name + else + require "active_support/xml_mini/#{name.downcase}" + ActiveSupport.const_get("XmlMini_#{name}") + end + end + end + + XmlMini.backend = "REXML" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb new file mode 100644 index 0000000..7f94a64 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb @@ -0,0 +1,183 @@ +# frozen_string_literal: true + +raise "JRuby is required to use the JDOM backend for XmlMini" unless RUBY_PLATFORM.include?("java") + +require "jruby" +include Java + +require "active_support/core_ext/object/blank" + +java_import javax.xml.parsers.DocumentBuilder unless defined? DocumentBuilder +java_import javax.xml.parsers.DocumentBuilderFactory unless defined? DocumentBuilderFactory +java_import java.io.StringReader unless defined? StringReader +java_import org.xml.sax.InputSource unless defined? InputSource +java_import org.xml.sax.Attributes unless defined? Attributes +java_import org.w3c.dom.Node unless defined? Node + +module ActiveSupport + module XmlMini_JDOM #:nodoc: + extend self + + CONTENT_KEY = "__content__".freeze + + NODE_TYPE_NAMES = %w{ATTRIBUTE_NODE CDATA_SECTION_NODE COMMENT_NODE DOCUMENT_FRAGMENT_NODE + DOCUMENT_NODE DOCUMENT_TYPE_NODE ELEMENT_NODE ENTITY_NODE ENTITY_REFERENCE_NODE NOTATION_NODE + PROCESSING_INSTRUCTION_NODE TEXT_NODE} + + node_type_map = {} + NODE_TYPE_NAMES.each { |type| node_type_map[Node.send(type)] = type } + + # Parse an XML Document string or IO into a simple hash using Java's jdom. + # data:: + # XML Document string or IO to parse + def parse(data) + if data.respond_to?(:read) + data = data.read + end + + if data.blank? + {} + else + @dbf = DocumentBuilderFactory.new_instance + # secure processing of java xml + # https://archive.is/9xcQQ + @dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) + @dbf.setFeature("http://xml.org/sax/features/external-general-entities", false) + @dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false) + @dbf.setFeature(javax.xml.XMLConstants::FEATURE_SECURE_PROCESSING, true) + xml_string_reader = StringReader.new(data) + xml_input_source = InputSource.new(xml_string_reader) + doc = @dbf.new_document_builder.parse(xml_input_source) + merge_element!({ CONTENT_KEY => "" }, doc.document_element, XmlMini.depth) + end + end + + private + + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise "Document too deep!" if depth == 0 + delete_empty(hash) + merge!(hash, element.tag_name, collapse(element, depth)) + end + + def delete_empty(hash) + hash.delete(CONTENT_KEY) if hash[CONTENT_KEY] == "" + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + child_nodes = element.child_nodes + if child_nodes.length > 0 + (0...child_nodes.length).each do |i| + child = child_nodes.item(i) + merge_element!(hash, child, depth - 1) unless child.node_type == Node.TEXT_NODE + end + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + delete_empty(hash) + text_children = texts(element) + if text_children.join.empty? + hash + else + # must use value to prevent double-escaping + merge!(hash, CONTENT_KEY, text_children.join) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attribute_hash = {} + attributes = element.attributes + (0...attributes.length).each do |i| + attribute_hash[CONTENT_KEY] ||= "" + attribute_hash[attributes.item(i).name] = attributes.item(i).value + end + attribute_hash + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def texts(element) + texts = [] + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + texts << item.get_data + end + end + texts + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + text = "".dup + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + text << item.get_data.strip + end + end + text.strip.length == 0 + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb new file mode 100644 index 0000000..0b000fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXML #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Parser.io(data).parse.to_hash + end + end + end +end + +module LibXML #:nodoc: + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__".freeze + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + each_child do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= "".dup + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + each_attr { |a| node_hash[a.name] = a.value } + + hash + end + end + end +end + +# :enddoc: + +LibXML::XML::Document.include(LibXML::Conversions::Document) +LibXML::XML::Node.include(LibXML::Conversions::Node) diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb new file mode 100644 index 0000000..dcf16e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXMLSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder + include LibXML::XML::SaxParser::Callbacks + + CONTENT_KEY = "__content__".freeze + HASH_SIZE_KEY = "__hash_size__".freeze + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def on_start_document + @hash = { CONTENT_KEY => "".dup } + @hash_stack = [@hash] + end + + def on_end_document + @hash = @hash_stack.pop + @hash.delete(CONTENT_KEY) + end + + def on_start_element(name, attrs = {}) + new_hash = { CONTENT_KEY => "".dup }.merge!(attrs) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def on_end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def on_characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :on_cdata_block, :on_characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER) + parser = LibXML::XML::SaxParser.io(data) + document = document_class.new + + parser.callbacks = document + parser.parse + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb new file mode 100644 index 0000000..5ee6fc8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_Nokogiri #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml / nokogiri. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + doc = Nokogiri::XML(data) + raise doc.errors.first if doc.errors.length > 0 + doc.to_hash + end + end + + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__".freeze + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + children.each do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= "".dup + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank and there are child tags + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + attribute_nodes.each { |a| node_hash[a.node_name] = a.value } + + hash + end + end + end + + Nokogiri::XML::Document.include(Conversions::Document) + Nokogiri::XML::Node.include(Conversions::Node) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb new file mode 100644 index 0000000..b01ed00 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_NokogiriSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder < Nokogiri::XML::SAX::Document + CONTENT_KEY = "__content__".freeze + HASH_SIZE_KEY = "__hash_size__".freeze + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def start_document + @hash = {} + @hash_stack = [@hash] + end + + def end_document + raise "Parse stack not empty!" if @hash_stack.size > 1 + end + + def error(error_message) + raise error_message + end + + def start_element(name, attrs = []) + new_hash = { CONTENT_KEY => "".dup }.merge!(Hash[attrs]) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :cdata_block, :characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + document = document_class.new + parser = Nokogiri::XML::SAX::Parser.new(document) + parser.parse(data) + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb new file mode 100644 index 0000000..32458d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_REXML #:nodoc: + extend self + + CONTENT_KEY = "__content__".freeze + + # Parse an XML Document string or IO into a simple hash. + # + # Same as XmlSimple::xml_in but doesn't shoot itself in the foot, + # and uses the defaults from Active Support. + # + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + silence_warnings { require "rexml/document" } unless defined?(REXML::Document) + doc = REXML::Document.new(data) + + if doc.root + merge_element!({}, doc.root, XmlMini.depth) + else + raise REXML::ParseException, + "The document #{doc.to_s.inspect} does not have a valid root" + end + end + end + + private + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise REXML::ParseException, "The document is too deep" if depth == 0 + merge!(hash, element.name, collapse(element, depth)) + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + if element.has_elements? + element.each_element { |child| merge_element!(hash, child, depth - 1) } + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + unless element.has_text? + hash + else + # must use value to prevent double-escaping + texts = "".dup + element.texts.each { |t| texts << t.value } + merge!(hash, CONTENT_KEY, texts) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attributes = {} + element.attributes.each { |n, v| attributes[n] = v } + attributes + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + element.texts.join.blank? + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/CHANGELOG.md new file mode 100644 index 0000000..4a9f866 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/CHANGELOG.md @@ -0,0 +1,246 @@ +# Addressable 2.8.0 +- fixes ReDoS vulnerability in Addressable::Template#match +- no longer replaces `+` with spaces in queries for non-http(s) schemes +- fixed encoding ipv6 literals +- the `:compacted` flag for `normalized_query` now dedupes parameters +- fix broken `escape_component` alias +- dropping support for Ruby 2.0 and 2.1 +- adding Ruby 3.0 compatibility for development tasks +- drop support for `rack-mount` and remove Addressable::Template#generate +- performance improvements +- switch CI/CD to GitHub Actions + +# Addressable 2.7.0 +- added `:compacted` flag to `normalized_query` +- `heuristic_parse` handles `mailto:` more intuitively +- dropped explicit support for JRuby 9.0.5.0 +- compatibility w/ public_suffix 4.x +- performance improvements + +# Addressable 2.6.0 +- added `tld=` method to allow assignment to the public suffix +- most `heuristic_parse` patterns are now case-insensitive +- `heuristic_parse` handles more `file://` URI variations +- fixes bug in `heuristic_parse` when uri starts with digit +- fixes bug in `request_uri=` with query strings +- fixes template issues with `nil` and `?` operator +- `frozen_string_literal` pragmas added +- minor performance improvements in regexps +- fixes to eliminate warnings + +# Addressable 2.5.2 +- better support for frozen string literals +- fixed bug w/ uppercase characters in scheme +- IDNA errors w/ emoji URLs +- compatibility w/ public_suffix 3.x + +# Addressable 2.5.1 +- allow unicode normalization to be disabled for URI Template expansion +- removed duplicate test + +# Addressable 2.5.0 +- dropping support for Ruby 1.9 +- adding support for Ruby 2.4 preview +- add support for public suffixes and tld; first runtime dependency +- hostname escaping should match RFC; underscores in hostnames no longer escaped +- paths beginning with // and missing an authority are now considered invalid +- validation now also takes place after setting a path +- handle backslashes in authority more like a browser for `heuristic_parse` +- unescaped backslashes in host now raise an `InvalidURIError` +- `merge!`, `join!`, `omit!` and `normalize!` don't disable deferred validation +- `heuristic_parse` now trims whitespace before parsing +- host parts longer than 63 bytes will be ignored and not passed to libidn +- normalized values always encoded as UTF-8 + +# Addressable 2.4.0 +- support for 1.8.x dropped +- double quotes in a host now raises an error +- newlines in host will no longer get unescaped during normalization +- stricter handling of bogus scheme values +- stricter handling of encoded port values +- calling `require 'addressable'` will now load both the URI and Template files +- assigning to the `hostname` component with an `IPAddr` object is now supported +- assigning to the `origin` component is now supported +- fixed minor bug where an exception would be thrown for a missing ACE suffix +- better partial expansion of URI templates + +# Addressable 2.3.8 +- fix warnings +- update dependency gems +- support for 1.8.x officially deprecated + +# Addressable 2.3.7 +- fix scenario in which invalid URIs don't get an exception until inspected +- handle hostnames with two adjacent periods correctly +- upgrade of RSpec + +# Addressable 2.3.6 +- normalization drops empty query string +- better handling in template extract for missing values +- template modifier for `'?'` now treated as optional +- fixed issue where character class parameters were modified +- templates can now be tested for equality +- added `:sorted` option to normalization of query strings +- fixed issue with normalization of hosts given in `'example.com.'` form + +# Addressable 2.3.5 +- added Addressable::URI#empty? method +- Addressable::URI#hostname methods now strip square brackets from IPv6 hosts +- compatibility with Net::HTTP in Ruby 2.0.0 +- Addressable::URI#route_from should always give relative URIs + +# Addressable 2.3.4 +- fixed issue with encoding altering its inputs +- query string normalization now leaves ';' characters alone +- FakeFS is detected before attempting to load unicode tables +- additional testing to ensure frozen objects don't cause problems + +# Addressable 2.3.3 +- fixed issue with converting common primitives during template expansion +- fixed port encoding issue +- removed a few warnings +- normalize should now ignore %2B in query strings +- the IDNA logic should now be handled by libidn in Ruby 1.9 +- no template match should now result in nil instead of an empty MatchData +- added license information to gemspec + +# Addressable 2.3.2 +- added Addressable::URI#default_port method +- fixed issue with Marshalling Unicode data on Windows +- improved heuristic parsing to better handle IPv4 addresses + +# Addressable 2.3.1 +- fixed missing unicode data file + +# Addressable 2.3.0 +- updated Addressable::Template to use RFC 6570, level 4 +- fixed compatibility problems with some versions of Ruby +- moved unicode tables into a data file for performance reasons +- removing support for multiple query value notations + +# Addressable 2.2.8 +- fixed issues with dot segment removal code +- form encoding can now handle multiple values per key +- updated development environment + +# Addressable 2.2.7 +- fixed issues related to Addressable::URI#query_values= +- the Addressable::URI.parse method is now polymorphic + +# Addressable 2.2.6 +- changed the way ambiguous paths are handled +- fixed bug with frozen URIs +- https supported in heuristic parsing + +# Addressable 2.2.5 +- 'parsing' a pre-parsed URI object is now a dup operation +- introduced conditional support for libidn +- fixed normalization issue on ampersands in query strings +- added additional tests around handling of query strings + +# Addressable 2.2.4 +- added origin support from draft-ietf-websec-origin-00 +- resolved issue with attempting to navigate below root +- fixed bug with string splitting in query strings + +# Addressable 2.2.3 +- added :flat_array notation for query strings + +# Addressable 2.2.2 +- fixed issue with percent escaping of '+' character in query strings + +# Addressable 2.2.1 +- added support for application/x-www-form-urlencoded. + +# Addressable 2.2.0 +- added site methods +- improved documentation + +# Addressable 2.1.2 +- added HTTP request URI methods +- better handling of Windows file paths +- validation_deferred boolean replaced with defer_validation block +- normalization of percent-encoded paths should now be correct +- fixed issue with constructing URIs with relative paths +- fixed warnings + +# Addressable 2.1.1 +- more type checking changes +- fixed issue with unicode normalization +- added method to find template defaults +- symbolic keys are now allowed in template mappings +- numeric values and symbolic values are now allowed in template mappings + +# Addressable 2.1.0 +- refactored URI template support out into its own class +- removed extract method due to being useless and unreliable +- removed Addressable::URI.expand_template +- removed Addressable::URI#extract_mapping +- added partial template expansion +- fixed minor bugs in the parse and heuristic_parse methods +- fixed incompatibility with Ruby 1.9.1 +- fixed bottleneck in Addressable::URI#hash and Addressable::URI#to_s +- fixed unicode normalization exception +- updated query_values methods to better handle subscript notation +- worked around issue with freezing URIs +- improved specs + +# Addressable 2.0.2 +- fixed issue with URI template expansion +- fixed issue with percent escaping characters 0-15 + +# Addressable 2.0.1 +- fixed issue with query string assignment +- fixed issue with improperly encoded components + +# Addressable 2.0.0 +- the initialize method now takes an options hash as its only parameter +- added query_values method to URI class +- completely replaced IDNA implementation with pure Ruby +- renamed Addressable::ADDRESSABLE_VERSION to Addressable::VERSION +- completely reworked the Rakefile +- changed the behavior of the port method significantly +- Addressable::URI.encode_segment, Addressable::URI.unencode_segment renamed +- documentation is now in YARD format +- more rigorous type checking +- to_str method implemented, implicit conversion to Strings now allowed +- Addressable::URI#omit method added, Addressable::URI#merge method replaced +- updated URI Template code to match v 03 of the draft spec +- added a bunch of new specifications + +# Addressable 1.0.4 +- switched to using RSpec's pending system for specs that rely on IDN +- fixed issue with creating URIs with paths that are not prefixed with '/' + +# Addressable 1.0.3 +- implemented a hash method + +# Addressable 1.0.2 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.1 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.0 +- heuristic parse method added +- parsing is slightly more strict +- replaced to_h with to_hash +- fixed routing methods +- improved specifications +- improved heckle rake task +- no surviving heckle mutations + +# Addressable 0.1.2 +- improved normalization +- fixed bug in joining algorithm +- updated specifications + +# Addressable 0.1.1 +- updated documentation +- added URI Template variable extraction + +# Addressable 0.1.0 +- initial release +- implementation based on RFC 3986, 3987 +- support for IRIs via libidn +- support for the URI Template draft spec diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Gemfile b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Gemfile new file mode 100644 index 0000000..2684c15 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Gemfile @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +gemspec(path: __FILE__ == "(eval)" ? ".." : ".") + +group :test do + gem 'rspec', '~> 3.8' + gem 'rspec-its', '~> 1.3' +end + +group :coverage do + gem "coveralls", "> 0.7", require: false, platforms: :mri + gem "simplecov", require: false +end + +group :development do + gem 'launchy', '~> 2.4', '>= 2.4.3' + gem 'redcarpet', :platform => :mri_19 + gem 'yard' +end + +group :test, :development do + gem 'memory_profiler' + gem "rake", ">= 12.3.3" +end + +gem "idn-ruby", platform: :mri diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/LICENSE.txt b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/LICENSE.txt new file mode 100644 index 0000000..ef51da2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/README.md b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/README.md new file mode 100644 index 0000000..9892f61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/README.md @@ -0,0 +1,121 @@ +# Addressable + +
+
Homepage
github.com/sporkmonger/addressable
+
Author
Bob Aman
+
Copyright
Copyright © Bob Aman
+
License
Apache 2.0
+
+ +[![Gem Version](https://img.shields.io/gem/dt/addressable.svg)][gem] +[![Build Status](https://github.com/sporkmonger/addressable/workflows/CI/badge.svg)][actions] +[![Test Coverage Status](https://img.shields.io/coveralls/sporkmonger/addressable.svg)][coveralls] +[![Documentation Coverage Status](https://inch-ci.org/github/sporkmonger/addressable.svg?branch=master)][inch] + +[gem]: https://rubygems.org/gems/addressable +[actions]: https://github.com/sporkmonger/addressable/actions +[coveralls]: https://coveralls.io/r/sporkmonger/addressable +[inch]: https://inch-ci.org/github/sporkmonger/addressable + +# Description + +Addressable is an alternative implementation to the URI implementation +that is part of Ruby's standard library. It is flexible, offers heuristic +parsing, and additionally provides extensive support for IRIs and URI templates. + +Addressable closely conforms to RFC 3986, RFC 3987, and RFC 6570 (level 4). + +# Reference + +- {Addressable::URI} +- {Addressable::Template} + +# Example usage + +```ruby +require "addressable/uri" + +uri = Addressable::URI.parse("http://example.com/path/to/resource/") +uri.scheme +#=> "http" +uri.host +#=> "example.com" +uri.path +#=> "/path/to/resource/" + +uri = Addressable::URI.parse("http://www.詹姆斯.com/") +uri.normalize +#=> # +``` + + +# URI Templates + +For more details, see [RFC 6570](https://www.rfc-editor.org/rfc/rfc6570.txt). + + +```ruby + +require "addressable/template" + +template = Addressable::Template.new("http://example.com/{?query*}") +template.expand({ + "query" => { + 'foo' => 'bar', + 'color' => 'red' + } +}) +#=> # + +template = Addressable::Template.new("http://example.com/{?one,two,three}") +template.partial_expand({"one" => "1", "three" => 3}).pattern +#=> "http://example.com/?one=1{&two}&three=3" + +template = Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" +) +uri = Addressable::URI.parse( + "http://example.com/a/b/c/?one=1&two=2#foo" +) +template.extract(uri) +#=> +# { +# "host" => "example.com", +# "segments" => ["a", "b", "c"], +# "one" => "1", +# "two" => "2", +# "fragment" => "foo" +# } +``` + +# Install + +```console +$ gem install addressable +``` + +You may optionally turn on native IDN support by installing libidn and the +idn gem: + +```console +$ sudo apt-get install libidn11-dev # Debian/Ubuntu +$ brew install libidn # OS X +$ gem install idn-ruby +``` + +# Semantic Versioning + +This project uses [Semantic Versioning](https://semver.org/). You can (and should) specify your +dependency using a pessimistic version constraint covering the major and minor +values: + +```ruby +spec.add_dependency 'addressable', '~> 2.7' +``` + +If you need a specific bug fix, you can also specify minimum tiny versions +without preventing updates to the latest minor release: + +```ruby +spec.add_dependency 'addressable', '~> 2.3', '>= 2.3.7' +``` diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Rakefile b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Rakefile new file mode 100644 index 0000000..b7e0ff3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/Rakefile @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +require 'rubygems' +require 'rake' + +require File.join(File.dirname(__FILE__), 'lib', 'addressable', 'version') + +PKG_DISPLAY_NAME = 'Addressable' +PKG_NAME = PKG_DISPLAY_NAME.downcase +PKG_VERSION = Addressable::VERSION::STRING +PKG_FILE_NAME = "#{PKG_NAME}-#{PKG_VERSION}" + +RELEASE_NAME = "REL #{PKG_VERSION}" + +PKG_SUMMARY = "URI Implementation" +PKG_DESCRIPTION = <<-TEXT +Addressable is an alternative implementation to the URI implementation that is +part of Ruby's standard library. It is flexible, offers heuristic parsing, and +additionally provides extensive support for IRIs and URI templates. +TEXT + +PKG_FILES = FileList[ + "lib/**/*", "spec/**/*", "vendor/**/*", "data/**/*", + "tasks/**/*", + "[A-Z]*", "Rakefile" +].exclude(/pkg/).exclude(/database\.yml/). + exclude(/Gemfile\.lock/).exclude(/[_\.]git$/) + +task :default => "spec" + +WINDOWS = (RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/) rescue false +SUDO = WINDOWS ? '' : ('sudo' unless ENV['SUDOLESS']) + +Dir['tasks/**/*.rake'].each { |rake| load rake } diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/addressable.gemspec b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/addressable.gemspec new file mode 100644 index 0000000..12666a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/addressable.gemspec @@ -0,0 +1,37 @@ +# -*- encoding: utf-8 -*- +# stub: addressable 2.8.0 ruby lib + +Gem::Specification.new do |s| + s.name = "addressable".freeze + s.version = "2.8.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Bob Aman".freeze] + s.date = "2021-07-03" + s.description = "Addressable is an alternative implementation to the URI implementation that is\npart of Ruby's standard library. It is flexible, offers heuristic parsing, and\nadditionally provides extensive support for IRIs and URI templates.\n".freeze + s.email = "bob@sporkmonger.com".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["CHANGELOG.md".freeze, "Gemfile".freeze, "LICENSE.txt".freeze, "README.md".freeze, "Rakefile".freeze, "addressable.gemspec".freeze, "data/unicode.data".freeze, "lib/addressable.rb".freeze, "lib/addressable/idna.rb".freeze, "lib/addressable/idna/native.rb".freeze, "lib/addressable/idna/pure.rb".freeze, "lib/addressable/template.rb".freeze, "lib/addressable/uri.rb".freeze, "lib/addressable/version.rb".freeze, "spec/addressable/idna_spec.rb".freeze, "spec/addressable/net_http_compat_spec.rb".freeze, "spec/addressable/security_spec.rb".freeze, "spec/addressable/template_spec.rb".freeze, "spec/addressable/uri_spec.rb".freeze, "spec/spec_helper.rb".freeze, "tasks/clobber.rake".freeze, "tasks/gem.rake".freeze, "tasks/git.rake".freeze, "tasks/metrics.rake".freeze, "tasks/profile.rake".freeze, "tasks/rspec.rake".freeze, "tasks/yard.rake".freeze] + s.homepage = "https://github.com/sporkmonger/addressable".freeze + s.licenses = ["Apache-2.0".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.0.3".freeze + s.summary = "URI Implementation".freeze + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_development_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/data/unicode.data b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/data/unicode.data new file mode 100644 index 0000000..cdfc224 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/data/unicode.data differ diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable.rb new file mode 100644 index 0000000..b4e98b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require 'addressable/uri' +require 'addressable/template' diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna.rb new file mode 100644 index 0000000..e41c1f5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +begin + require "addressable/idna/native" +rescue LoadError + # libidn or the idn gem was not available, fall back on a pure-Ruby + # implementation... + require "addressable/idna/pure" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb new file mode 100644 index 0000000..84de8e8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "idn" + +module Addressable + module IDNA + def self.punycode_encode(value) + IDN::Punycode.encode(value.to_s) + end + + def self.punycode_decode(value) + IDN::Punycode.decode(value.to_s) + end + + def self.unicode_normalize_kc(value) + IDN::Stringprep.nfkc_normalize(value.to_s) + end + + def self.to_ascii(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toASCII(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + + def self.to_unicode(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toUnicode(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb new file mode 100644 index 0000000..7a0c1fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb @@ -0,0 +1,678 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +module Addressable + module IDNA + # This module is loosely based on idn_actionmailer by Mick Staugaard, + # the unicode library by Yoshida Masato, and the punycode implementation + # by Kazuhiro Nishiyama. Most of the code was copied verbatim, but + # some reformatting was done, and some translation from C was done. + # + # Without their code to work from as a base, we'd all still be relying + # on the presence of libidn. Which nobody ever seems to have installed. + # + # Original sources: + # http://github.com/staugaard/idn_actionmailer + # http://www.yoshidam.net/Ruby.html#unicode + # http://rubyforge.org/frs/?group_id=2550 + + + UNICODE_TABLE = File.expand_path( + File.join(File.dirname(__FILE__), '../../..', 'data/unicode.data') + ) + + ACE_PREFIX = "xn--" + + UTF8_REGEX = /\A(?: + [\x09\x0A\x0D\x20-\x7E] # ASCII + | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )*\z/mnx + + UTF8_REGEX_MULTIBYTE = /(?: + [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )/mnx + + # :startdoc: + + # Converts from a Unicode internationalized domain name to an ASCII + # domain name as described in RFC 3490. + def self.to_ascii(input) + input = input.to_s unless input.is_a?(String) + input = input.dup + if input.respond_to?(:force_encoding) + input.force_encoding(Encoding::ASCII_8BIT) + end + if input =~ UTF8_REGEX && input =~ UTF8_REGEX_MULTIBYTE + parts = unicode_downcase(input).split('.') + parts.map! do |part| + if part.respond_to?(:force_encoding) + part.force_encoding(Encoding::ASCII_8BIT) + end + if part =~ UTF8_REGEX && part =~ UTF8_REGEX_MULTIBYTE + ACE_PREFIX + punycode_encode(unicode_normalize_kc(part)) + else + part + end + end + parts.join('.') + else + input + end + end + + # Converts from an ASCII domain name to a Unicode internationalized + # domain name as described in RFC 3490. + def self.to_unicode(input) + input = input.to_s unless input.is_a?(String) + parts = input.split('.') + parts.map! do |part| + if part =~ /^#{ACE_PREFIX}(.+)/ + begin + punycode_decode(part[/^#{ACE_PREFIX}(.+)/, 1]) + rescue Addressable::IDNA::PunycodeBadInput + # toUnicode is explicitly defined as never-fails by the spec + part + end + else + part + end + end + output = parts.join('.') + if output.respond_to?(:force_encoding) + output.force_encoding(Encoding::UTF_8) + end + output + end + + # Unicode normalization form KC. + def self.unicode_normalize_kc(input) + input = input.to_s unless input.is_a?(String) + unpacked = input.unpack("U*") + unpacked = + unicode_compose(unicode_sort_canonical(unicode_decompose(unpacked))) + return unpacked.pack("U*") + end + + ## + # Unicode aware downcase method. + # + # @api private + # @param [String] input + # The input string. + # @return [String] The downcased result. + def self.unicode_downcase(input) + input = input.to_s unless input.is_a?(String) + unpacked = input.unpack("U*") + unpacked.map! { |codepoint| lookup_unicode_lowercase(codepoint) } + return unpacked.pack("U*") + end + private_class_method :unicode_downcase + + def self.unicode_compose(unpacked) + unpacked_result = [] + length = unpacked.length + + return unpacked if length == 0 + + starter = unpacked[0] + starter_cc = lookup_unicode_combining_class(starter) + starter_cc = 256 if starter_cc != 0 + for i in 1...length + ch = unpacked[i] + + if (starter_cc == 0 && + (composite = unicode_compose_pair(starter, ch)) != nil) + starter = composite + else + unpacked_result << starter + starter = ch + end + end + unpacked_result << starter + return unpacked_result + end + private_class_method :unicode_compose + + def self.unicode_compose_pair(ch_one, ch_two) + if ch_one >= HANGUL_LBASE && ch_one < HANGUL_LBASE + HANGUL_LCOUNT && + ch_two >= HANGUL_VBASE && ch_two < HANGUL_VBASE + HANGUL_VCOUNT + # Hangul L + V + return HANGUL_SBASE + ( + (ch_one - HANGUL_LBASE) * HANGUL_VCOUNT + (ch_two - HANGUL_VBASE) + ) * HANGUL_TCOUNT + elsif ch_one >= HANGUL_SBASE && + ch_one < HANGUL_SBASE + HANGUL_SCOUNT && + (ch_one - HANGUL_SBASE) % HANGUL_TCOUNT == 0 && + ch_two >= HANGUL_TBASE && ch_two < HANGUL_TBASE + HANGUL_TCOUNT + # Hangul LV + T + return ch_one + (ch_two - HANGUL_TBASE) + end + + p = [] + + ucs4_to_utf8(ch_one, p) + ucs4_to_utf8(ch_two, p) + + return lookup_unicode_composition(p) + end + private_class_method :unicode_compose_pair + + def self.ucs4_to_utf8(char, buffer) + if char < 128 + buffer << char + elsif char < 2048 + buffer << (char >> 6 | 192) + buffer << (char & 63 | 128) + elsif char < 0x10000 + buffer << (char >> 12 | 224) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x200000 + buffer << (char >> 18 | 240) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x4000000 + buffer << (char >> 24 | 248) + buffer << (char >> 18 & 63 | 128) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x80000000 + buffer << (char >> 30 | 252) + buffer << (char >> 24 & 63 | 128) + buffer << (char >> 18 & 63 | 128) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + end + end + private_class_method :ucs4_to_utf8 + + def self.unicode_sort_canonical(unpacked) + unpacked = unpacked.dup + i = 1 + length = unpacked.length + + return unpacked if length < 2 + + while i < length + last = unpacked[i-1] + ch = unpacked[i] + last_cc = lookup_unicode_combining_class(last) + cc = lookup_unicode_combining_class(ch) + if cc != 0 && last_cc != 0 && last_cc > cc + unpacked[i] = last + unpacked[i-1] = ch + i -= 1 if i > 1 + else + i += 1 + end + end + return unpacked + end + private_class_method :unicode_sort_canonical + + def self.unicode_decompose(unpacked) + unpacked_result = [] + for cp in unpacked + if cp >= HANGUL_SBASE && cp < HANGUL_SBASE + HANGUL_SCOUNT + l, v, t = unicode_decompose_hangul(cp) + unpacked_result << l + unpacked_result << v if v + unpacked_result << t if t + else + dc = lookup_unicode_compatibility(cp) + unless dc + unpacked_result << cp + else + unpacked_result.concat(unicode_decompose(dc.unpack("U*"))) + end + end + end + return unpacked_result + end + private_class_method :unicode_decompose + + def self.unicode_decompose_hangul(codepoint) + sindex = codepoint - HANGUL_SBASE; + if sindex < 0 || sindex >= HANGUL_SCOUNT + l = codepoint + v = t = nil + return l, v, t + end + l = HANGUL_LBASE + sindex / HANGUL_NCOUNT + v = HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT + t = HANGUL_TBASE + sindex % HANGUL_TCOUNT + if t == HANGUL_TBASE + t = nil + end + return l, v, t + end + private_class_method :unicode_decompose_hangul + + def self.lookup_unicode_combining_class(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + (codepoint_data[UNICODE_DATA_COMBINING_CLASS] || 0) : + 0) + end + private_class_method :lookup_unicode_combining_class + + def self.lookup_unicode_compatibility(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + codepoint_data[UNICODE_DATA_COMPATIBILITY] : nil) + end + private_class_method :lookup_unicode_compatibility + + def self.lookup_unicode_lowercase(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + (codepoint_data[UNICODE_DATA_LOWERCASE] || codepoint) : + codepoint) + end + private_class_method :lookup_unicode_lowercase + + def self.lookup_unicode_composition(unpacked) + return COMPOSITION_TABLE[unpacked] + end + private_class_method :lookup_unicode_composition + + HANGUL_SBASE = 0xac00 + HANGUL_LBASE = 0x1100 + HANGUL_LCOUNT = 19 + HANGUL_VBASE = 0x1161 + HANGUL_VCOUNT = 21 + HANGUL_TBASE = 0x11a7 + HANGUL_TCOUNT = 28 + HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT # 588 + HANGUL_SCOUNT = HANGUL_LCOUNT * HANGUL_NCOUNT # 11172 + + UNICODE_DATA_COMBINING_CLASS = 0 + UNICODE_DATA_EXCLUSION = 1 + UNICODE_DATA_CANONICAL = 2 + UNICODE_DATA_COMPATIBILITY = 3 + UNICODE_DATA_UPPERCASE = 4 + UNICODE_DATA_LOWERCASE = 5 + UNICODE_DATA_TITLECASE = 6 + + begin + if defined?(FakeFS) + fakefs_state = FakeFS.activated? + FakeFS.deactivate! + end + # This is a sparse Unicode table. Codepoints without entries are + # assumed to have the value: [0, 0, nil, nil, nil, nil, nil] + UNICODE_DATA = File.open(UNICODE_TABLE, "rb") do |file| + Marshal.load(file.read) + end + ensure + if defined?(FakeFS) + FakeFS.activate! if fakefs_state + end + end + + COMPOSITION_TABLE = {} + UNICODE_DATA.each do |codepoint, data| + canonical = data[UNICODE_DATA_CANONICAL] + exclusion = data[UNICODE_DATA_EXCLUSION] + + if canonical && exclusion == 0 + COMPOSITION_TABLE[canonical.unpack("C*")] = codepoint + end + end + + UNICODE_MAX_LENGTH = 256 + ACE_MAX_LENGTH = 256 + + PUNYCODE_BASE = 36 + PUNYCODE_TMIN = 1 + PUNYCODE_TMAX = 26 + PUNYCODE_SKEW = 38 + PUNYCODE_DAMP = 700 + PUNYCODE_INITIAL_BIAS = 72 + PUNYCODE_INITIAL_N = 0x80 + PUNYCODE_DELIMITER = 0x2D + + PUNYCODE_MAXINT = 1 << 64 + + PUNYCODE_PRINT_ASCII = + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + " !\"\#$%&'()*+,-./" + + "0123456789:;<=>?" + + "@ABCDEFGHIJKLMNO" + + "PQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmno" + + "pqrstuvwxyz{|}~\n" + + # Input is invalid. + class PunycodeBadInput < StandardError; end + # Output would exceed the space provided. + class PunycodeBigOutput < StandardError; end + # Input needs wider integers to process. + class PunycodeOverflow < StandardError; end + + def self.punycode_encode(unicode) + unicode = unicode.to_s unless unicode.is_a?(String) + input = unicode.unpack("U*") + output = [0] * (ACE_MAX_LENGTH + 1) + input_length = input.size + output_length = [ACE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + delta = out = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: + input_length.times do |j| + if punycode_basic?(input[j]) + if max_out - out < 2 + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + output[out] = input[j] + out += 1 + end + end + + h = b = out + + # h is the number of code points that have been handled, b is the + # number of basic code points, and out is the number of characters + # that have been output. + + if b > 0 + output[out] = PUNYCODE_DELIMITER + out += 1 + end + + # Main encoding loop: + + while h < input_length + # All non-basic code points < n have been + # handled already. Find the next larger one: + + m = PUNYCODE_MAXINT + input_length.times do |j| + m = input[j] if (n...m) === input[j] + end + + # Increase delta enough to advance the decoder's + # state to , but guard against overflow: + + if m - n > (PUNYCODE_MAXINT - delta) / (h + 1) + raise PunycodeOverflow, "Input needs wider integers to process." + end + delta += (m - n) * (h + 1) + n = m + + input_length.times do |j| + # Punycode does not need to check whether input[j] is basic: + if input[j] < n + delta += 1 + if delta == 0 + raise PunycodeOverflow, + "Input needs wider integers to process." + end + end + + if input[j] == n + # Represent delta as a generalized variable-length integer: + + q = delta; k = PUNYCODE_BASE + while true + if out >= max_out + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if q < t + output[out] = + punycode_encode_digit(t + (q - t) % (PUNYCODE_BASE - t)) + out += 1 + q = (q - t) / (PUNYCODE_BASE - t) + k += PUNYCODE_BASE + end + + output[out] = punycode_encode_digit(q) + out += 1 + bias = punycode_adapt(delta, h + 1, h == b) + delta = 0 + h += 1 + end + end + + delta += 1 + n += 1 + end + + output_length[0] = out + + outlen = out + outlen.times do |j| + c = output[j] + unless c >= 0 && c <= 127 + raise StandardError, "Invalid output char." + end + unless PUNYCODE_PRINT_ASCII[c] + raise PunycodeBadInput, "Input is invalid." + end + end + + output[0..outlen].map { |x| x.chr }.join("").sub(/\0+\z/, "") + end + private_class_method :punycode_encode + + def self.punycode_decode(punycode) + input = [] + output = [] + + if ACE_MAX_LENGTH * 2 < punycode.size + raise PunycodeBigOutput, "Output would exceed the space provided." + end + punycode.each_byte do |c| + unless c >= 0 && c <= 127 + raise PunycodeBadInput, "Input is invalid." + end + input.push(c) + end + + input_length = input.length + output_length = [UNICODE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + + out = i = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: Let b be the number of input code + # points before the last delimiter, or 0 if there is none, then + # copy the first b code points to the output. + + b = 0 + input_length.times do |j| + b = j if punycode_delimiter?(input[j]) + end + if b > max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + b.times do |j| + unless punycode_basic?(input[j]) + raise PunycodeBadInput, "Input is invalid." + end + output[out] = input[j] + out+=1 + end + + # Main decoding loop: Start just after the last delimiter if any + # basic code points were copied; start at the beginning otherwise. + + in_ = b > 0 ? b + 1 : 0 + while in_ < input_length + + # in_ is the index of the next character to be consumed, and + # out is the number of code points in the output array. + + # Decode a generalized variable-length integer into delta, + # which gets added to i. The overflow checking is easier + # if we increase i as we go, then subtract off its starting + # value at the end to obtain delta. + + oldi = i; w = 1; k = PUNYCODE_BASE + while true + if in_ >= input_length + raise PunycodeBadInput, "Input is invalid." + end + digit = punycode_decode_digit(input[in_]) + in_+=1 + if digit >= PUNYCODE_BASE + raise PunycodeBadInput, "Input is invalid." + end + if digit > (PUNYCODE_MAXINT - i) / w + raise PunycodeOverflow, "Input needs wider integers to process." + end + i += digit * w + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if digit < t + if w > PUNYCODE_MAXINT / (PUNYCODE_BASE - t) + raise PunycodeOverflow, "Input needs wider integers to process." + end + w *= PUNYCODE_BASE - t + k += PUNYCODE_BASE + end + + bias = punycode_adapt(i - oldi, out + 1, oldi == 0) + + # I was supposed to wrap around from out + 1 to 0, + # incrementing n each time, so we'll fix that now: + + if i / (out + 1) > PUNYCODE_MAXINT - n + raise PunycodeOverflow, "Input needs wider integers to process." + end + n += i / (out + 1) + i %= out + 1 + + # Insert n at position i of the output: + + # not needed for Punycode: + # raise PUNYCODE_INVALID_INPUT if decode_digit(n) <= base + if out >= max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + #memmove(output + i + 1, output + i, (out - i) * sizeof *output) + output[i + 1, out - i] = output[i, out - i] + output[i] = n + i += 1 + + out += 1 + end + + output_length[0] = out + + output.pack("U*") + end + private_class_method :punycode_decode + + def self.punycode_basic?(codepoint) + codepoint < 0x80 + end + private_class_method :punycode_basic? + + def self.punycode_delimiter?(codepoint) + codepoint == PUNYCODE_DELIMITER + end + private_class_method :punycode_delimiter? + + def self.punycode_encode_digit(d) + d + 22 + 75 * ((d < 26) ? 1 : 0) + end + private_class_method :punycode_encode_digit + + # Returns the numeric value of a basic codepoint + # (for use in representing integers) in the range 0 to + # base - 1, or PUNYCODE_BASE if codepoint does not represent a value. + def self.punycode_decode_digit(codepoint) + if codepoint - 48 < 10 + codepoint - 22 + elsif codepoint - 65 < 26 + codepoint - 65 + elsif codepoint - 97 < 26 + codepoint - 97 + else + PUNYCODE_BASE + end + end + private_class_method :punycode_decode_digit + + # Bias adaptation method + def self.punycode_adapt(delta, numpoints, firsttime) + delta = firsttime ? delta / PUNYCODE_DAMP : delta >> 1 + # delta >> 1 is a faster way of doing delta / 2 + delta += delta / numpoints + difference = PUNYCODE_BASE - PUNYCODE_TMIN + + k = 0 + while delta > (difference * PUNYCODE_TMAX) / 2 + delta /= difference + k += PUNYCODE_BASE + end + + k + (difference + 1) * delta / (delta + PUNYCODE_SKEW) + end + private_class_method :punycode_adapt + end + # :startdoc: +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/template.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/template.rb new file mode 100644 index 0000000..45f6ae6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/template.rb @@ -0,0 +1,1031 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/uri" + +module Addressable + ## + # This is an implementation of a URI template based on + # RFC 6570 (http://tools.ietf.org/html/rfc6570). + class Template + # Constants used throughout the template code. + anything = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + + + variable_char_class = + Addressable::URI::CharacterClasses::ALPHA + + Addressable::URI::CharacterClasses::DIGIT + '_' + + var_char = + "(?>(?:[#{variable_char_class}]|%[a-fA-F0-9][a-fA-F0-9])+)" + RESERVED = + "(?:[#{anything}]|%[a-fA-F0-9][a-fA-F0-9])" + UNRESERVED = + "(?:[#{ + Addressable::URI::CharacterClasses::UNRESERVED + }]|%[a-fA-F0-9][a-fA-F0-9])" + variable = + "(?:#{var_char}(?:\\.?#{var_char})*)" + varspec = + "(?:(#{variable})(\\*|:\\d+)?)" + VARNAME = + /^#{variable}$/ + VARSPEC = + /^#{varspec}$/ + VARIABLE_LIST = + /^#{varspec}(?:,#{varspec})*$/ + operator = + "+#./;?&=,!@|" + EXPRESSION = + /\{([#{operator}])?(#{varspec}(?:,#{varspec})*)\}/ + + + LEADERS = { + '?' => '?', + '/' => '/', + '#' => '#', + '.' => '.', + ';' => ';', + '&' => '&' + } + JOINERS = { + '?' => '&', + '.' => '.', + ';' => ';', + '&' => '&', + '/' => '/' + } + + ## + # Raised if an invalid template value is supplied. + class InvalidTemplateValueError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class InvalidTemplateOperatorError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class TemplateOperatorAbortedError < StandardError + end + + ## + # This class represents the data that is extracted when a Template + # is matched against a URI. + class MatchData + ## + # Creates a new MatchData object. + # MatchData objects should never be instantiated directly. + # + # @param [Addressable::URI] uri + # The URI that the template was matched against. + def initialize(uri, template, mapping) + @uri = uri.dup.freeze + @template = template + @mapping = mapping.dup.freeze + end + + ## + # @return [Addressable::URI] + # The URI that the Template was matched against. + attr_reader :uri + + ## + # @return [Addressable::Template] + # The Template used for the match. + attr_reader :template + + ## + # @return [Hash] + # The mapping that resulted from the match. + # Note that this mapping does not include keys or values for + # variables that appear in the Template, but are not present + # in the URI. + attr_reader :mapping + + ## + # @return [Array] + # The list of variables that were present in the Template. + # Note that this list will include variables which do not appear + # in the mapping because they were not present in URI. + def variables + self.template.variables + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # @return [Array] + # The list of values that were captured by the Template. + # Note that this list will include nils for any variables which + # were in the Template, but did not appear in the URI. + def values + @values ||= self.variables.inject([]) do |accu, key| + accu << self.mapping[key] + accu + end + end + alias_method :captures, :values + + ## + # Accesses captured values by name or by index. + # + # @param [String, Symbol, Fixnum] key + # Capture index or name. Note that when accessing by with index + # of 0, the full URI will be returned. The intention is to mimic + # the ::MatchData#[] behavior. + # + # @param [#to_int, nil] len + # If provided, an array of values will be returend with the given + # parameter used as length. + # + # @return [Array, String, nil] + # The captured value corresponding to the index or name. If the + # value was not provided or the key is unknown, nil will be + # returned. + # + # If the second parameter is provided, an array of that length will + # be returned instead. + def [](key, len = nil) + if len + to_a[key, len] + elsif String === key or Symbol === key + mapping[key.to_s] + else + to_a[key] + end + end + + ## + # @return [Array] + # Array with the matched URI as first element followed by the captured + # values. + def to_a + [to_s, *values] + end + + ## + # @return [String] + # The matched URI as String. + def to_s + uri.to_s + end + alias_method :string, :to_s + + # Returns multiple captured values at once. + # + # @param [String, Symbol, Fixnum] *indexes + # Indices of the captures to be returned + # + # @return [Array] + # Values corresponding to given indices. + # + # @see Addressable::Template::MatchData#[] + def values_at(*indexes) + indexes.map { |i| self[i] } + end + + ## + # Returns a String representation of the MatchData's state. + # + # @return [String] The MatchData's state, as a String. + def inspect + sprintf("#<%s:%#0x RESULT:%s>", + self.class.to_s, self.object_id, self.mapping.inspect) + end + + ## + # Dummy method for code expecting a ::MatchData instance + # + # @return [String] An empty string. + def pre_match + "" + end + alias_method :post_match, :pre_match + end + + ## + # Creates a new Addressable::Template object. + # + # @param [#to_str] pattern The URI Template pattern. + # + # @return [Addressable::Template] The initialized Template object. + def initialize(pattern) + if !pattern.respond_to?(:to_str) + raise TypeError, "Can't convert #{pattern.class} into String." + end + @pattern = pattern.to_str.dup.freeze + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.variables + self.variable_defaults + self.named_captures + super + end + + ## + # @return [String] The Template object's pattern. + attr_reader :pattern + + ## + # Returns a String representation of the Template object's state. + # + # @return [String] The Template object's state, as a String. + def inspect + sprintf("#<%s:%#0x PATTERN:%s>", + self.class.to_s, self.object_id, self.pattern) + end + + ## + # Returns true if the Template objects are equal. This method + # does NOT normalize either Template before doing the comparison. + # + # @param [Object] template The Template to compare. + # + # @return [TrueClass, FalseClass] + # true if the Templates are equivalent, false + # otherwise. + def ==(template) + return false unless template.kind_of?(Template) + return self.pattern == template.pattern + end + + ## + # Addressable::Template makes no distinction between `==` and `eql?`. + # + # @see #== + alias_method :eql?, :== + + ## + # Extracts a mapping from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).extract(uri, ExampleProcessor) + # #=> {"query" => "an example search query"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{second}/" + # ).extract(uri, ExampleProcessor) + # #=> {"first" => "a", "second" => "b/c"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{-list|/|second}/" + # ).extract(uri) + # #=> {"first" => "a", "second" => ["b", "c"]} + def extract(uri, processor=nil) + match_data = self.match(uri, processor) + return (match_data ? match_data.mapping : nil) + end + + ## + # Extracts match data from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # match = Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["query"] + # match.captures + # #=> ["an example search query"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}/{+second}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", "b/c"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}{/second*}/" + # ).match(uri) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", ["b", "c"]] + def match(uri, processor=nil) + uri = Addressable::URI.parse(uri) unless uri.is_a?(Addressable::URI) + mapping = {} + + # First, we need to process the pattern, and extract the values. + expansions, expansion_regexp = + parse_template_pattern(pattern, processor) + + return nil unless uri.to_str.match(expansion_regexp) + unparsed_values = uri.to_str.scan(expansion_regexp).flatten + + if uri.to_str == pattern + return Addressable::Template::MatchData.new(uri, self, mapping) + elsif expansions.size > 0 + index = 0 + expansions.each do |expansion| + _, operator, varlist = *expansion.match(EXPRESSION) + varlist.split(',').each do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + mapping[name] ||= nil + case operator + when nil, '+', '#', '/', '.' + unparsed_value = unparsed_values[index] + name = varspec[VARSPEC, 1] + value = unparsed_value + value = value.split(JOINERS[operator]) if value && modifier == '*' + when ';', '?', '&' + if modifier == '*' + if unparsed_values[index] + value = unparsed_values[index].split(JOINERS[operator]) + value = value.inject({}) do |acc, v| + key, val = v.split('=') + val = "" if val.nil? + acc[key] = val + acc + end + end + else + if (unparsed_values[index]) + name, value = unparsed_values[index].split('=') + value = "" if value.nil? + end + end + end + if processor != nil && processor.respond_to?(:restore) + value = processor.restore(name, value) + end + if processor == nil + if value.is_a?(Hash) + value = value.inject({}){|acc, (k, v)| + acc[Addressable::URI.unencode_component(k)] = + Addressable::URI.unencode_component(v) + acc + } + elsif value.is_a?(Array) + value = value.map{|v| Addressable::URI.unencode_component(v) } + else + value = Addressable::URI.unencode_component(value) + end + end + if !mapping.has_key?(name) || mapping[name].nil? + # Doesn't exist, set to value (even if value is nil) + mapping[name] = value + end + index = index + 1 + end + end + return Addressable::Template::MatchData.new(uri, self, mapping) + else + return nil + end + end + + ## + # Expands a URI template into another URI template. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::Template] The partially expanded URI template. + # + # @example + # Addressable::Template.new( + # "http://example.com/{one}/{two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/1/{two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/?one=1{&two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two,three}/" + # ).partial_expand({"one" => "1", "three" => 3}).pattern + # #=> "http://example.com/?one=1{&two}&three=3" + def partial_expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_partial_capture(mapping, capture, processor, normalize_values) + end + return Addressable::Template.new(result) + end + + ## + # Expands a URI template into a full URI. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::URI] The expanded URI template. + # + # @example + # class ExampleProcessor + # def self.validate(name, value) + # return !!(value =~ /^[\w ]+$/) if name == "query" + # return true + # end + # + # def self.transform(name, value) + # return value.gsub(/ /, "+") if name == "query" + # return value + # end + # end + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"}, + # ExampleProcessor + # ).to_str + # #=> "http://example.com/search/an+example+search+query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"} + # ).to_str + # #=> "http://example.com/search/an%20example%20search%20query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "bogus!"}, + # ExampleProcessor + # ).to_str + # #=> Addressable::Template::InvalidTemplateValueError + def expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_capture(mapping, capture, processor, normalize_values) + end + return Addressable::URI.parse(result) + end + + ## + # Returns an Array of variables used within the template pattern. + # The variables are listed in the Array in the order they appear within + # the pattern. Multiple occurrences of a variable within a pattern are + # not represented in this Array. + # + # @return [Array] The variables present in the template's pattern. + def variables + @variables ||= ordered_variable_defaults.map { |var, val| var }.uniq + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # Returns a mapping of variables to their default values specified + # in the template. Variables without defaults are not returned. + # + # @return [Hash] Mapping of template variables to their defaults + def variable_defaults + @variable_defaults ||= + Hash[*ordered_variable_defaults.reject { |k, v| v.nil? }.flatten] + end + + ## + # Coerces a template into a `Regexp` object. This regular expression will + # behave very similarly to the actual template, and should match the same + # URI values, but it cannot fully handle, for example, values that would + # extract to an `Array`. + # + # @return [Regexp] A regular expression which should match the template. + def to_regexp + _, source = parse_template_pattern(pattern) + Regexp.new(source) + end + + ## + # Returns the source of the coerced `Regexp`. + # + # @return [String] The source of the `Regexp` given by {#to_regexp}. + # + # @api private + def source + self.to_regexp.source + end + + ## + # Returns the named captures of the coerced `Regexp`. + # + # @return [Hash] The named captures of the `Regexp` given by {#to_regexp}. + # + # @api private + def named_captures + self.to_regexp.named_captures + end + + private + def ordered_variable_defaults + @ordered_variable_defaults ||= begin + expansions, _ = parse_template_pattern(pattern) + expansions.map do |capture| + _, _, varlist = *capture.match(EXPRESSION) + varlist.split(',').map do |varspec| + varspec[VARSPEC, 1] + end + end.flatten + end + end + + + ## + # Loops through each capture and expands any values available in mapping + # + # @param [Hash] mapping + # Set of keys to expand + # @param [String] capture + # The expression to expand + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_partial_capture(mapping, capture, processor = nil, + normalize_values = true) + _, operator, varlist = *capture.match(EXPRESSION) + + vars = varlist.split(",") + + if operator == "?" + # partial expansion of form style query variables sometimes requires a + # slight reordering of the variables to produce a valid url. + first_to_expand = vars.find { |varspec| + _, name, _ = *varspec.match(VARSPEC) + mapping.key?(name) && !mapping[name].nil? + } + + vars = [first_to_expand] + vars.reject {|varspec| varspec == first_to_expand} if first_to_expand + end + + vars. + inject("".dup) do |acc, varspec| + _, name, _ = *varspec.match(VARSPEC) + next_val = if mapping.key? name + transform_capture(mapping, "{#{operator}#{varspec}}", + processor, normalize_values) + else + "{#{operator}#{varspec}}" + end + # If we've already expanded at least one '?' operator with non-empty + # value, change to '&' + operator = "&" if (operator == "?") && (next_val != "") + acc << next_val + end + end + + ## + # Transforms a mapped value so that values can be substituted into the + # template. + # + # @param [Hash] mapping The mapping to replace captures + # @param [String] capture + # The expression to replace + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_capture(mapping, capture, processor=nil, + normalize_values=true) + _, operator, varlist = *capture.match(EXPRESSION) + return_value = varlist.split(',').inject([]) do |acc, varspec| + _, name, modifier = *varspec.match(VARSPEC) + value = mapping[name] + unless value == nil || value == {} + allow_reserved = %w(+ #).include?(operator) + # Common primitives where the .to_s output is well-defined + if Numeric === value || Symbol === value || + value == true || value == false + value = value.to_s + end + length = modifier.gsub(':', '').to_i if modifier =~ /^:\d+/ + + unless (Hash === value) || + value.respond_to?(:to_ary) || value.respond_to?(:to_str) + raise TypeError, + "Can't convert #{value.class} into String or Array." + end + + value = normalize_value(value) if normalize_values + + if processor == nil || !processor.respond_to?(:transform) + # Handle percent escaping + if allow_reserved + encode_map = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + else + encode_map = Addressable::URI::CharacterClasses::UNRESERVED + end + if value.kind_of?(Array) + transformed_value = value.map do |val| + if length + Addressable::URI.encode_component(val[0...length], encode_map) + else + Addressable::URI.encode_component(val, encode_map) + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + elsif value.kind_of?(Hash) + transformed_value = value.map do |key, val| + if modifier == "*" + "#{ + Addressable::URI.encode_component( key, encode_map) + }=#{ + Addressable::URI.encode_component( val, encode_map) + }" + else + "#{ + Addressable::URI.encode_component( key, encode_map) + },#{ + Addressable::URI.encode_component( val, encode_map) + }" + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + else + if length + transformed_value = Addressable::URI.encode_component( + value[0...length], encode_map) + else + transformed_value = Addressable::URI.encode_component( + value, encode_map) + end + end + end + + # Process, if we've got a processor + if processor != nil + if processor.respond_to?(:validate) + if !processor.validate(name, value) + display_value = value.kind_of?(Array) ? value.inspect : value + raise InvalidTemplateValueError, + "#{name}=#{display_value} is an invalid template value." + end + end + if processor.respond_to?(:transform) + transformed_value = processor.transform(name, value) + if normalize_values + transformed_value = normalize_value(transformed_value) + end + end + end + acc << [name, transformed_value] + end + acc + end + return "" if return_value.empty? + join_values(operator, return_value) + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [String, Nil] operator One of the operators from the set + # (?,&,+,#,;,/,.), or nil if there wasn't one. + # @param [Array] return_value + # The set of return values (as [variable_name, value] tuples) that will + # be joined together. + # + # @return [String] The transformed mapped value + def join_values(operator, return_value) + leader = LEADERS.fetch(operator, '') + joiner = JOINERS.fetch(operator, ',') + case operator + when '&', '?' + leader + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + v.join(joiner) + elsif v.is_a?(Array) + v.map{|inner_value| "#{k}=#{inner_value}"}.join(joiner) + else + "#{k}=#{v}" + end + }.join(joiner) + when ';' + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + ';' + v.join(";") + elsif v.is_a?(Array) + ';' + v.map{|inner_value| "#{k}=#{inner_value}"}.join(";") + else + v && v != '' ? ";#{k}=#{v}" : ";#{k}" + end + }.join + else + leader + return_value.map{|k,v| v}.join(joiner) + end + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [Hash, Array, String] value + # Normalizes keys and values with IDNA#unicode_normalize_kc + # + # @return [Hash, Array, String] The normalized values + def normalize_value(value) + unless value.is_a?(Hash) + value = value.respond_to?(:to_ary) ? value.to_ary : value.to_str + end + + # Handle unicode normalization + if value.kind_of?(Array) + value.map! { |val| Addressable::IDNA.unicode_normalize_kc(val) } + elsif value.kind_of?(Hash) + value = value.inject({}) { |acc, (k, v)| + acc[Addressable::IDNA.unicode_normalize_kc(k)] = + Addressable::IDNA.unicode_normalize_kc(v) + acc + } + else + value = Addressable::IDNA.unicode_normalize_kc(value) + end + value + end + + ## + # Generates a hash with string keys + # + # @param [Hash] mapping A mapping hash to normalize + # + # @return [Hash] + # A hash with stringified keys + def normalize_keys(mapping) + return mapping.inject({}) do |accu, pair| + name, value = pair + if Symbol === name + name = name.to_s + elsif name.respond_to?(:to_str) + name = name.to_str + else + raise TypeError, + "Can't convert #{name.class} into String." + end + accu[name] = value + accu + end + end + + ## + # Generates the Regexp that parses a template pattern. Memoizes the + # value if template processor not set (processors may not be deterministic) + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_template_pattern(pattern, processor = nil) + if processor.nil? && pattern == @pattern + @cached_template_parse ||= + parse_new_template_pattern(pattern, processor) + else + parse_new_template_pattern(pattern, processor) + end + end + + ## + # Generates the Regexp that parses a template pattern. + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_new_template_pattern(pattern, processor = nil) + # Escape the pattern. The two gsubs restore the escaped curly braces + # back to their original form. Basically, escape everything that isn't + # within an expansion. + escaped_pattern = Regexp.escape( + pattern + ).gsub(/\\\{(.*?)\\\}/) do |escaped| + escaped.gsub(/\\(.)/, "\\1") + end + + expansions = [] + + # Create a regular expression that captures the values of the + # variables in the URI. + regexp_string = escaped_pattern.gsub( EXPRESSION ) do |expansion| + + expansions << expansion + _, operator, varlist = *expansion.match(EXPRESSION) + leader = Regexp.escape(LEADERS.fetch(operator, '')) + joiner = Regexp.escape(JOINERS.fetch(operator, ',')) + combined = varlist.split(',').map do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + + result = processor && processor.respond_to?(:match) ? processor.match(name) : nil + if result + "(?<#{name}>#{ result })" + else + group = case operator + when '+' + "#{ RESERVED }*?" + when '#' + "#{ RESERVED }*?" + when '/' + "#{ UNRESERVED }*?" + when '.' + "#{ UNRESERVED.gsub('\.', '') }*?" + when ';' + "#{ UNRESERVED }*=?#{ UNRESERVED }*?" + when '?' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + when '&' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + else + "#{ UNRESERVED }*?" + end + if modifier == '*' + "(?<#{name}>#{group}(?:#{joiner}?#{group})*)?" + else + "(?<#{name}>#{group})?" + end + end + end.join("#{joiner}?") + "(?:|#{leader}#{combined})" + end + + # Ensure that the regular expression matches the whole URI. + regexp_string = "^#{regexp_string}$" + return expansions, Regexp.new(regexp_string) + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/uri.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/uri.rb new file mode 100644 index 0000000..6b5f4fa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/uri.rb @@ -0,0 +1,2556 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/idna" +require "public_suffix" + +## +# Addressable is a library for processing links and URIs. +module Addressable + ## + # This is an implementation of a URI parser based on + # RFC 3986, + # RFC 3987. + class URI + ## + # Raised if something other than a uri is supplied. + class InvalidURIError < StandardError + end + + ## + # Container for the character classes specified in + # RFC 3986. + module CharacterClasses + ALPHA = "a-zA-Z" + DIGIT = "0-9" + GEN_DELIMS = "\\:\\/\\?\\#\\[\\]\\@" + SUB_DELIMS = "\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=" + RESERVED = GEN_DELIMS + SUB_DELIMS + UNRESERVED = ALPHA + DIGIT + "\\-\\.\\_\\~" + PCHAR = UNRESERVED + SUB_DELIMS + "\\:\\@" + SCHEME = ALPHA + DIGIT + "\\-\\+\\." + HOST = UNRESERVED + SUB_DELIMS + "\\[\\:\\]" + AUTHORITY = PCHAR + "\\[\\:\\]" + PATH = PCHAR + "\\/" + QUERY = PCHAR + "\\/\\?" + FRAGMENT = PCHAR + "\\/\\?" + end + + module NormalizeCharacterClasses + HOST = /[^#{CharacterClasses::HOST}]/ + UNRESERVED = /[^#{CharacterClasses::UNRESERVED}]/ + PCHAR = /[^#{CharacterClasses::PCHAR}]/ + SCHEME = /[^#{CharacterClasses::SCHEME}]/ + FRAGMENT = /[^#{CharacterClasses::FRAGMENT}]/ + QUERY = %r{[^a-zA-Z0-9\-\.\_\~\!\$\'\(\)\*\+\,\=\:\@\/\?%]|%(?!2B|2b)} + end + + SLASH = '/' + EMPTY_STR = '' + + URIREGEX = /^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/ + + PORT_MAPPING = { + "http" => 80, + "https" => 443, + "ftp" => 21, + "tftp" => 69, + "sftp" => 22, + "ssh" => 22, + "svn+ssh" => 22, + "telnet" => 23, + "nntp" => 119, + "gopher" => 70, + "wais" => 210, + "ldap" => 389, + "prospero" => 1525 + }.freeze + + ## + # Returns a URI object based on the parsed string. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # + # @return [Addressable::URI] The parsed URI. + def self.parse(uri) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + # Otherwise, convert to a String + begin + uri = uri.to_str + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{uri.class} into String." + end if not uri.is_a? String + + # This Regexp supplied as an example in RFC 3986, and it works great. + scan = uri.scan(URIREGEX) + fragments = scan[0] + scheme = fragments[1] + authority = fragments[3] + path = fragments[4] + query = fragments[6] + fragment = fragments[8] + user = nil + password = nil + host = nil + port = nil + if authority != nil + # The Regexp above doesn't split apart the authority. + userinfo = authority[/^([^\[\]]*)@/, 1] + if userinfo != nil + user = userinfo.strip[/^([^:]*):?/, 1] + password = userinfo.strip[/:(.*)$/, 1] + end + host = authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + port = authority[/:([^:@\[\]]*?)$/, 1] + end + if port == EMPTY_STR + port = nil + end + + return new( + :scheme => scheme, + :user => user, + :password => password, + :host => host, + :port => port, + :path => path, + :query => query, + :fragment => fragment + ) + end + + ## + # Converts an input to a URI. The input does not have to be a valid + # URI — the method will use heuristics to guess what URI was intended. + # This is not standards-compliant, merely user-friendly. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # @param [Hash] hints + # A Hash of hints to the heuristic parser. + # Defaults to {:scheme => "http"}. + # + # @return [Addressable::URI] The parsed URI. + def self.heuristic_parse(uri, hints={}) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + # Otherwise, convert to a String + uri = uri.to_str.dup.strip + hints = { + :scheme => "http" + }.merge(hints) + case uri + when /^http:\//i + uri.sub!(/^http:\/+/i, "http://") + when /^https:\//i + uri.sub!(/^https:\/+/i, "https://") + when /^feed:\/+http:\//i + uri.sub!(/^feed:\/+http:\/+/i, "feed:http://") + when /^feed:\//i + uri.sub!(/^feed:\/+/i, "feed://") + when %r[^file:/{4}]i + uri.sub!(%r[^file:/+]i, "file:////") + when %r[^file://localhost/]i + uri.sub!(%r[^file://localhost/+]i, "file:///") + when %r[^file:/+]i + uri.sub!(%r[^file:/+]i, "file:///") + when /^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/ + uri.sub!(/^/, hints[:scheme] + "://") + when /\A\d+\..*:\d+\z/ + uri = "#{hints[:scheme]}://#{uri}" + end + match = uri.match(URIREGEX) + fragments = match.captures + authority = fragments[3] + if authority && authority.length > 0 + new_authority = authority.tr("\\", "/").gsub(" ", "%20") + # NOTE: We want offset 4, not 3! + offset = match.offset(4) + uri = uri.dup + uri[offset[0]...offset[1]] = new_authority + end + parsed = self.parse(uri) + if parsed.scheme =~ /^[^\/?#\.]+\.[^\/?#]+$/ + parsed = self.parse(hints[:scheme] + "://" + uri) + end + if parsed.path.include?(".") + if parsed.path[/\b@\b/] + parsed.scheme = "mailto" unless parsed.scheme + elsif new_host = parsed.path[/^([^\/]+\.[^\/]*)/, 1] + parsed.defer_validation do + new_path = parsed.path.sub( + Regexp.new("^" + Regexp.escape(new_host)), EMPTY_STR) + parsed.host = new_host + parsed.path = new_path + parsed.scheme = hints[:scheme] unless parsed.scheme + end + end + end + return parsed + end + + ## + # Converts a path to a file scheme URI. If the path supplied is + # relative, it will be returned as a relative URI. If the path supplied + # is actually a non-file URI, it will parse the URI as if it had been + # parsed with Addressable::URI.parse. Handles all of the + # various Microsoft-specific formats for specifying paths. + # + # @param [String, Addressable::URI, #to_str] path + # Typically a String path to a file or directory, but + # will return a sensible return value if an absolute URI is supplied + # instead. + # + # @return [Addressable::URI] + # The parsed file scheme URI or the original URI if some other URI + # scheme was provided. + # + # @example + # base = Addressable::URI.convert_path("/absolute/path/") + # uri = Addressable::URI.convert_path("relative/path") + # (base + uri).to_s + # #=> "file:///absolute/path/relative/path" + # + # Addressable::URI.convert_path( + # "c:\\windows\\My Documents 100%20\\foo.txt" + # ).to_s + # #=> "file:///c:/windows/My%20Documents%20100%20/foo.txt" + # + # Addressable::URI.convert_path("http://example.com/").to_s + # #=> "http://example.com/" + def self.convert_path(path) + # If we were given nil, return nil. + return nil unless path + # If a URI object is passed, just return itself. + return path if path.kind_of?(self) + if !path.respond_to?(:to_str) + raise TypeError, "Can't convert #{path.class} into String." + end + # Otherwise, convert to a String + path = path.to_str.strip + + path.sub!(/^file:\/?\/?/, EMPTY_STR) if path =~ /^file:\/?\/?/ + path = SLASH + path if path =~ /^([a-zA-Z])[\|:]/ + uri = self.parse(path) + + if uri.scheme == nil + # Adjust windows-style uris + uri.path.sub!(/^\/?([a-zA-Z])[\|:][\\\/]/) do + "/#{$1.downcase}:/" + end + uri.path.tr!("\\", SLASH) + if File.exist?(uri.path) && + File.stat(uri.path).directory? + uri.path.chomp!(SLASH) + uri.path = uri.path + '/' + end + + # If the path is absolute, set the scheme and host. + if uri.path.start_with?(SLASH) + uri.scheme = "file" + uri.host = EMPTY_STR + end + uri.normalize! + end + + return uri + end + + ## + # Joins several URIs together. + # + # @param [String, Addressable::URI, #to_str] *uris + # The URIs to join. + # + # @return [Addressable::URI] The joined URI. + # + # @example + # base = "http://example.com/" + # uri = Addressable::URI.parse("relative/path") + # Addressable::URI.join(base, uri) + # #=> # + def self.join(*uris) + uri_objects = uris.collect do |uri| + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + uri.kind_of?(self) ? uri : self.parse(uri.to_str) + end + result = uri_objects.shift.dup + for uri in uri_objects + result.join!(uri) + end + return result + end + + ## + # Tables used to optimize encoding operations in `self.encode_component` + # and `self.normalize_component` + SEQUENCE_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%02x", c) + end.join + end + + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%%%02X", c) + end.join + end + + ## + # Percent encodes a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' through + # '9' to be percent encoded. If a Regexp is passed, the + # value /[^b-zB-Z0-9]/ would have the same effect. A set of + # useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [Regexp] upcase_encoded + # A string of characters that may already be percent encoded, and whose + # encodings should be upcased. This allows normalization of percent + # encodings for characters not included in the + # character_class. + # + # @return [String] The encoded component. + # + # @example + # Addressable::URI.encode_component("simple/example", "b-zB-Z0-9") + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component("simple/example", /[^b-zB-Z0-9]/) + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component( + # "simple/example", Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + def self.encode_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + upcase_encoded='') + return nil if component.nil? + + begin + if component.kind_of?(Symbol) || + component.kind_of?(Numeric) || + component.kind_of?(TrueClass) || + component.kind_of?(FalseClass) + component = component.to_s + else + component = component.to_str + end + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + character_class = /[^#{character_class}]/ + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + # Avoiding gsub! because there are edge cases with frozen strings + component = component.gsub(character_class) do |sequence| + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE[sequence] + end + if upcase_encoded.length > 0 + upcase_encoded_chars = upcase_encoded.chars.map do |char| + SEQUENCE_ENCODING_TABLE[char] + end + component = component.gsub(/%(#{upcase_encoded_chars.join('|')})/, + &:upcase) + end + return component + end + + class << self + alias_method :escape_component, :encode_component + end + + ## + # Unencodes any percent encoded characters within a URI component. + # This method may be used for unencoding either components or full URIs, + # however, it is recommended to use the unencode_component + # alias when unencoding components. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI or component to unencode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @param [String] leave_encoded + # A string of characters to leave encoded. If a percent encoded character + # in this list is encountered then it will remain percent encoded. + # + # @return [String, Addressable::URI] + # The unencoded component or URI. + # The return type is determined by the return_type + # parameter. + def self.unencode(uri, return_type=String, leave_encoded='') + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri = uri.dup + # Seriously, only use UTF-8. I'm really not kidding! + uri.force_encoding("utf-8") + + unless leave_encoded.empty? + leave_encoded = leave_encoded.dup.force_encoding("utf-8") + end + + result = uri.gsub(/%[0-9a-f]{2}/iu) do |sequence| + c = sequence[1..3].to_i(16).chr + c.force_encoding("utf-8") + leave_encoded.include?(c) ? sequence : c + end + result.force_encoding("utf-8") + if return_type == String + return result + elsif return_type == ::Addressable::URI + return ::Addressable::URI.parse(result) + end + end + + class << self + alias_method :unescape, :unencode + alias_method :unencode_component, :unencode + alias_method :unescape_component, :unencode + end + + + ## + # Normalizes the encoding of a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' + # through '9' to be percent encoded. If a Regexp is passed, + # the value /[^b-zB-Z0-9]/ would have the same effect. A + # set of useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [String] leave_encoded + # When character_class is a String then + # leave_encoded is a string of characters that should remain + # percent encoded while normalizing the component; if they appear percent + # encoded in the original component, then they will be upcased ("%2f" + # normalized to "%2F") but otherwise left alone. + # + # @return [String] The normalized component. + # + # @example + # Addressable::URI.normalize_component("simpl%65/%65xampl%65", "b-zB-Z") + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", /[^b-zB-Z]/ + # ) + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", + # Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + # Addressable::URI.normalize_component( + # "one%20two%2fthree%26four", + # "0-9a-zA-Z &/", + # "/" + # ) + # => "one two%2Fthree&four" + def self.normalize_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + leave_encoded='') + return nil if component.nil? + + begin + component = component.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + leave_re = if leave_encoded.length > 0 + character_class = "#{character_class}%" unless character_class.include?('%') + + "|%(?!#{leave_encoded.chars.map do |char| + seq = SEQUENCE_ENCODING_TABLE[char] + [seq.upcase, seq.downcase] + end.flatten.join('|')})" + end + + character_class = if leave_re + /[^#{character_class}]#{leave_re}/ + else + /[^#{character_class}]/ + end + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + unencoded = self.unencode_component(component, String, leave_encoded) + begin + encoded = self.encode_component( + Addressable::IDNA.unicode_normalize_kc(unencoded), + character_class, + leave_encoded + ) + rescue ArgumentError + encoded = self.encode_component(unencoded) + end + encoded.force_encoding(Encoding::UTF_8) + return encoded + end + + ## + # Percent encodes any special characters in the URI. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.encode(uri, return_type=String) + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(uri_object.scheme, + Addressable::URI::CharacterClasses::SCHEME), + :authority => self.encode_component(uri_object.authority, + Addressable::URI::CharacterClasses::AUTHORITY), + :path => self.encode_component(uri_object.path, + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(uri_object.query, + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(uri_object.fragment, + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + class << self + alias_method :escape, :encode + end + + ## + # Normalizes the encoding of a URI. Characters within a hostname are + # not percent encoded to allow for internationalized domain names. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.normalized_encode(uri, return_type=String) + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + components = { + :scheme => self.unencode_component(uri_object.scheme), + :user => self.unencode_component(uri_object.user), + :password => self.unencode_component(uri_object.password), + :host => self.unencode_component(uri_object.host), + :port => (uri_object.port.nil? ? nil : uri_object.port.to_s), + :path => self.unencode_component(uri_object.path), + :query => self.unencode_component(uri_object.query), + :fragment => self.unencode_component(uri_object.fragment) + } + components.each do |key, value| + if value != nil + begin + components[key] = + Addressable::IDNA.unicode_normalize_kc(value.to_str) + rescue ArgumentError + # Likely a malformed UTF-8 character, skip unicode normalization + components[key] = value.to_str + end + end + end + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(components[:scheme], + Addressable::URI::CharacterClasses::SCHEME), + :user => self.encode_component(components[:user], + Addressable::URI::CharacterClasses::UNRESERVED), + :password => self.encode_component(components[:password], + Addressable::URI::CharacterClasses::UNRESERVED), + :host => components[:host], + :port => components[:port], + :path => self.encode_component(components[:path], + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(components[:query], + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(components[:fragment], + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + ## + # Encodes a set of key/value pairs according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [#to_hash, #to_ary] form_values + # The form values to encode. + # + # @param [TrueClass, FalseClass] sort + # Sort the key/value pairs prior to encoding. + # Defaults to false. + # + # @return [String] + # The encoded value. + def self.form_encode(form_values, sort=false) + if form_values.respond_to?(:to_hash) + form_values = form_values.to_hash.to_a + elsif form_values.respond_to?(:to_ary) + form_values = form_values.to_ary + else + raise TypeError, "Can't convert #{form_values.class} into Array." + end + + form_values = form_values.inject([]) do |accu, (key, value)| + if value.kind_of?(Array) + value.each do |v| + accu << [key.to_s, v.to_s] + end + else + accu << [key.to_s, value.to_s] + end + accu + end + + if sort + # Useful for OAuth and optimizing caching systems + form_values = form_values.sort + end + escaped_form_values = form_values.map do |(key, value)| + # Line breaks are CRLF pairs + [ + self.encode_component( + key.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+"), + self.encode_component( + value.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+") + ] + end + return escaped_form_values.map do |(key, value)| + "#{key}=#{value}" + end.join("&") + end + + ## + # Decodes a String according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [String, #to_str] encoded_value + # The form values to decode. + # + # @return [Array] + # The decoded values. + # This is not a Hash because of the possibility for + # duplicate keys. + def self.form_unencode(encoded_value) + if !encoded_value.respond_to?(:to_str) + raise TypeError, "Can't convert #{encoded_value.class} into String." + end + encoded_value = encoded_value.to_str + split_values = encoded_value.split("&").map do |pair| + pair.split("=", 2) + end + return split_values.map do |(key, value)| + [ + key ? self.unencode_component( + key.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n") : nil, + value ? (self.unencode_component( + value.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n")) : nil + ] + end + end + + ## + # Creates a new uri object from component parts. + # + # @option [String, #to_str] scheme The scheme component. + # @option [String, #to_str] user The user component. + # @option [String, #to_str] password The password component. + # @option [String, #to_str] userinfo + # The userinfo component. If this is supplied, the user and password + # components must be omitted. + # @option [String, #to_str] host The host component. + # @option [String, #to_str] port The port component. + # @option [String, #to_str] authority + # The authority component. If this is supplied, the user, password, + # userinfo, host, and port components must be omitted. + # @option [String, #to_str] path The path component. + # @option [String, #to_str] query The query component. + # @option [String, #to_str] fragment The fragment component. + # + # @return [Addressable::URI] The constructed URI object. + def initialize(options={}) + if options.has_key?(:authority) + if (options.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if options.has_key?(:userinfo) + if (options.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + self.defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + self.scheme = options[:scheme] if options[:scheme] + self.user = options[:user] if options[:user] + self.password = options[:password] if options[:password] + self.userinfo = options[:userinfo] if options[:userinfo] + self.host = options[:host] if options[:host] + self.port = options[:port] if options[:port] + self.authority = options[:authority] if options[:authority] + self.path = options[:path] if options[:path] + self.query = options[:query] if options[:query] + self.query_values = options[:query_values] if options[:query_values] + self.fragment = options[:fragment] if options[:fragment] + end + self.to_s + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.normalized_scheme + self.normalized_user + self.normalized_password + self.normalized_userinfo + self.normalized_host + self.normalized_port + self.normalized_authority + self.normalized_site + self.normalized_path + self.normalized_query + self.normalized_fragment + self.hash + super + end + + ## + # The scheme component for this URI. + # + # @return [String] The scheme component. + def scheme + return defined?(@scheme) ? @scheme : nil + end + + ## + # The scheme component for this URI, normalized. + # + # @return [String] The scheme component, normalized. + def normalized_scheme + return nil unless self.scheme + @normalized_scheme ||= begin + if self.scheme =~ /^\s*ssh\+svn\s*$/i + "svn+ssh".dup + else + Addressable::URI.normalize_component( + self.scheme.strip.downcase, + Addressable::URI::NormalizeCharacterClasses::SCHEME + ) + end + end + # All normalized values should be UTF-8 + @normalized_scheme.force_encoding(Encoding::UTF_8) if @normalized_scheme + @normalized_scheme + end + + ## + # Sets the scheme component for this URI. + # + # @param [String, #to_str] new_scheme The new scheme component. + def scheme=(new_scheme) + if new_scheme && !new_scheme.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_scheme.class} into String." + elsif new_scheme + new_scheme = new_scheme.to_str + end + if new_scheme && new_scheme !~ /\A[a-z][a-z0-9\.\+\-]*\z/i + raise InvalidURIError, "Invalid scheme format: '#{new_scheme}'" + end + @scheme = new_scheme + @scheme = nil if @scheme.to_s.strip.empty? + + # Reset dependent values + remove_instance_variable(:@normalized_scheme) if defined?(@normalized_scheme) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The user component for this URI. + # + # @return [String] The user component. + def user + return defined?(@user) ? @user : nil + end + + ## + # The user component for this URI, normalized. + # + # @return [String] The user component, normalized. + def normalized_user + return nil unless self.user + return @normalized_user if defined?(@normalized_user) + @normalized_user ||= begin + if normalized_scheme =~ /https?/ && self.user.strip.empty? && + (!self.password || self.password.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.user.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + @normalized_user.force_encoding(Encoding::UTF_8) if @normalized_user + @normalized_user + end + + ## + # Sets the user component for this URI. + # + # @param [String, #to_str] new_user The new user component. + def user=(new_user) + if new_user && !new_user.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_user.class} into String." + end + @user = new_user ? new_user.to_str : nil + + # You can't have a nil user with a non-nil password + if password != nil + @user = EMPTY_STR if @user.nil? + end + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_user) if defined?(@normalized_user) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The password component for this URI. + # + # @return [String] The password component. + def password + return defined?(@password) ? @password : nil + end + + ## + # The password component for this URI, normalized. + # + # @return [String] The password component, normalized. + def normalized_password + return nil unless self.password + return @normalized_password if defined?(@normalized_password) + @normalized_password ||= begin + if self.normalized_scheme =~ /https?/ && self.password.strip.empty? && + (!self.user || self.user.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.password.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + if @normalized_password + @normalized_password.force_encoding(Encoding::UTF_8) + end + @normalized_password + end + + ## + # Sets the password component for this URI. + # + # @param [String, #to_str] new_password The new password component. + def password=(new_password) + if new_password && !new_password.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_password.class} into String." + end + @password = new_password ? new_password.to_str : nil + + # You can't have a nil user with a non-nil password + @password ||= nil + @user ||= nil + if @password != nil + @user = EMPTY_STR if @user.nil? + end + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_password) if defined?(@normalized_password) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The userinfo component for this URI. + # Combines the user and password components. + # + # @return [String] The userinfo component. + def userinfo + current_user = self.user + current_password = self.password + (current_user || current_password) && @userinfo ||= begin + if current_user && current_password + "#{current_user}:#{current_password}" + elsif current_user && !current_password + "#{current_user}" + end + end + end + + ## + # The userinfo component for this URI, normalized. + # + # @return [String] The userinfo component, normalized. + def normalized_userinfo + return nil unless self.userinfo + return @normalized_userinfo if defined?(@normalized_userinfo) + @normalized_userinfo ||= begin + current_user = self.normalized_user + current_password = self.normalized_password + if !current_user && !current_password + nil + elsif current_user && current_password + "#{current_user}:#{current_password}".dup + elsif current_user && !current_password + "#{current_user}".dup + end + end + # All normalized values should be UTF-8 + if @normalized_userinfo + @normalized_userinfo.force_encoding(Encoding::UTF_8) + end + @normalized_userinfo + end + + ## + # Sets the userinfo component for this URI. + # + # @param [String, #to_str] new_userinfo The new userinfo component. + def userinfo=(new_userinfo) + if new_userinfo && !new_userinfo.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_userinfo.class} into String." + end + new_user, new_password = if new_userinfo + [ + new_userinfo.to_str.strip[/^(.*):/, 1], + new_userinfo.to_str.strip[/:(.*)$/, 1] + ] + else + [nil, nil] + end + + # Password assigned first to ensure validity in case of nil + self.password = new_password + self.user = new_user + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The host component for this URI. + # + # @return [String] The host component. + def host + return defined?(@host) ? @host : nil + end + + ## + # The host component for this URI, normalized. + # + # @return [String] The host component, normalized. + def normalized_host + return nil unless self.host + + @normalized_host ||= begin + if !self.host.strip.empty? + result = ::Addressable::IDNA.to_ascii( + URI.unencode_component(self.host.strip.downcase) + ) + if result =~ /[^\.]\.$/ + # Single trailing dots are unnecessary. + result = result[0...-1] + end + result = Addressable::URI.normalize_component( + result, + NormalizeCharacterClasses::HOST + ) + result + else + EMPTY_STR.dup + end + end + # All normalized values should be UTF-8 + if @normalized_host && !@normalized_host.empty? + @normalized_host.force_encoding(Encoding::UTF_8) + end + @normalized_host + end + + ## + # Sets the host component for this URI. + # + # @param [String, #to_str] new_host The new host component. + def host=(new_host) + if new_host && !new_host.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_host.class} into String." + end + @host = new_host ? new_host.to_str : nil + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_host) if defined?(@normalized_host) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # This method is same as URI::Generic#host except + # brackets for IPv6 (and 'IPvFuture') addresses are removed. + # + # @see Addressable::URI#host + # + # @return [String] The hostname for this URI. + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + + ## + # This method is same as URI::Generic#host= except + # the argument can be a bare IPv6 address (or 'IPvFuture'). + # + # @see Addressable::URI#host= + # + # @param [String, #to_str] new_hostname The new hostname for this URI. + def hostname=(new_hostname) + if new_hostname && + (new_hostname.respond_to?(:ipv4?) || new_hostname.respond_to?(:ipv6?)) + new_hostname = new_hostname.to_s + elsif new_hostname && !new_hostname.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_hostname.class} into String." + end + v = new_hostname ? new_hostname.to_str : nil + v = "[#{v}]" if /\A\[.*\]\z/ !~ v && /:/ =~ v + self.host = v + end + + ## + # Returns the top-level domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").tld # => "co.uk" + def tld + PublicSuffix.parse(self.host, ignore_private: true).tld + end + + ## + # Sets the top-level domain for this URI. + # + # @param [String, #to_str] new_tld The new top-level domain. + def tld=(new_tld) + replaced_tld = host.sub(/#{tld}\z/, new_tld) + self.host = PublicSuffix::Domain.new(replaced_tld).to_s + end + + ## + # Returns the public suffix domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").domain # => "example.co.uk" + def domain + PublicSuffix.domain(self.host, ignore_private: true) + end + + ## + # The authority component for this URI. + # Combines the user, password, host, and port components. + # + # @return [String] The authority component. + def authority + self.host && @authority ||= begin + authority = String.new + if self.userinfo != nil + authority << "#{self.userinfo}@" + end + authority << self.host + if self.port != nil + authority << ":#{self.port}" + end + authority + end + end + + ## + # The authority component for this URI, normalized. + # + # @return [String] The authority component, normalized. + def normalized_authority + return nil unless self.authority + @normalized_authority ||= begin + authority = String.new + if self.normalized_userinfo != nil + authority << "#{self.normalized_userinfo}@" + end + authority << self.normalized_host + if self.normalized_port != nil + authority << ":#{self.normalized_port}" + end + authority + end + # All normalized values should be UTF-8 + if @normalized_authority + @normalized_authority.force_encoding(Encoding::UTF_8) + end + @normalized_authority + end + + ## + # Sets the authority component for this URI. + # + # @param [String, #to_str] new_authority The new authority component. + def authority=(new_authority) + if new_authority + if !new_authority.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_authority.class} into String." + end + new_authority = new_authority.to_str + new_userinfo = new_authority[/^([^\[\]]*)@/, 1] + if new_userinfo + new_user = new_userinfo.strip[/^([^:]*):?/, 1] + new_password = new_userinfo.strip[/:(.*)$/, 1] + end + new_host = new_authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + new_port = + new_authority[/:([^:@\[\]]*?)$/, 1] + end + + # Password assigned first to ensure validity in case of nil + self.password = defined?(new_password) ? new_password : nil + self.user = defined?(new_user) ? new_user : nil + self.host = defined?(new_host) ? new_host : nil + self.port = defined?(new_port) ? new_port : nil + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. + # + # @return [String] The serialized origin. + def origin + if self.scheme && self.authority + if self.normalized_port + "#{self.normalized_scheme}://#{self.normalized_host}" + + ":#{self.normalized_port}" + else + "#{self.normalized_scheme}://#{self.normalized_host}" + end + else + "null" + end + end + + ## + # Sets the origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. This assignment will reset the `userinfo` + # component. + # + # @param [String, #to_str] new_origin The new origin component. + def origin=(new_origin) + if new_origin + if !new_origin.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_origin.class} into String." + end + new_origin = new_origin.to_str + new_scheme = new_origin[/^([^:\/?#]+):\/\//, 1] + unless new_scheme + raise InvalidURIError, 'An origin cannot omit the scheme.' + end + new_host = new_origin[/:\/\/([^\/?#:]+)/, 1] + unless new_host + raise InvalidURIError, 'An origin cannot omit the host.' + end + new_port = new_origin[/:([^:@\[\]\/]*?)$/, 1] + end + + self.scheme = defined?(new_scheme) ? new_scheme : nil + self.host = defined?(new_host) ? new_host : nil + self.port = defined?(new_port) ? new_port : nil + self.userinfo = nil + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_authority) if defined?(@normalized_authority) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + # Returns an array of known ip-based schemes. These schemes typically + # use a similar URI form: + # //:@:/ + def self.ip_based_schemes + return self.port_mapping.keys + end + + # Returns a hash of common IP-based schemes and their default port + # numbers. Adding new schemes to this hash, as necessary, will allow + # for better URI normalization. + def self.port_mapping + PORT_MAPPING + end + + ## + # The port component for this URI. + # This is the port number actually given in the URI. This does not + # infer port numbers from default values. + # + # @return [Integer] The port component. + def port + return defined?(@port) ? @port : nil + end + + ## + # The port component for this URI, normalized. + # + # @return [Integer] The port component, normalized. + def normalized_port + return nil unless self.port + return @normalized_port if defined?(@normalized_port) + @normalized_port ||= begin + if URI.port_mapping[self.normalized_scheme] == self.port + nil + else + self.port + end + end + end + + ## + # Sets the port component for this URI. + # + # @param [String, Integer, #to_s] new_port The new port component. + def port=(new_port) + if new_port != nil && new_port.respond_to?(:to_str) + new_port = Addressable::URI.unencode_component(new_port.to_str) + end + + if new_port.respond_to?(:valid_encoding?) && !new_port.valid_encoding? + raise InvalidURIError, "Invalid encoding in port" + end + + if new_port != nil && !(new_port.to_s =~ /^\d+$/) + raise InvalidURIError, + "Invalid port number: #{new_port.inspect}" + end + + @port = new_port.to_s.to_i + @port = nil if @port == 0 + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_port) if defined?(@normalized_port) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The inferred port component for this URI. + # This method will normalize to the default port for the URI's scheme if + # the port isn't explicitly specified in the URI. + # + # @return [Integer] The inferred port component. + def inferred_port + if self.port.to_i == 0 + self.default_port + else + self.port.to_i + end + end + + ## + # The default port for this URI's scheme. + # This method will always returns the default port for the URI's scheme + # regardless of the presence of an explicit port in the URI. + # + # @return [Integer] The default port. + def default_port + URI.port_mapping[self.scheme.strip.downcase] if self.scheme + end + + ## + # The combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The components that identify a site. + def site + (self.scheme || self.authority) && @site ||= begin + site_string = "".dup + site_string << "#{self.scheme}:" if self.scheme != nil + site_string << "//#{self.authority}" if self.authority != nil + site_string + end + end + + ## + # The normalized combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The normalized components that identify a site. + def normalized_site + return nil unless self.site + @normalized_site ||= begin + site_string = "".dup + if self.normalized_scheme != nil + site_string << "#{self.normalized_scheme}:" + end + if self.normalized_authority != nil + site_string << "//#{self.normalized_authority}" + end + site_string + end + # All normalized values should be UTF-8 + @normalized_site.force_encoding(Encoding::UTF_8) if @normalized_site + @normalized_site + end + + ## + # Sets the site value for this URI. + # + # @param [String, #to_str] new_site The new site value. + def site=(new_site) + if new_site + if !new_site.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_site.class} into String." + end + new_site = new_site.to_str + # These two regular expressions derived from the primary parsing + # expression + self.scheme = new_site[/^(?:([^:\/?#]+):)?(?:\/\/(?:[^\/?#]*))?$/, 1] + self.authority = new_site[ + /^(?:(?:[^:\/?#]+):)?(?:\/\/([^\/?#]*))?$/, 1 + ] + else + self.scheme = nil + self.authority = nil + end + end + + ## + # The path component for this URI. + # + # @return [String] The path component. + def path + return defined?(@path) ? @path : EMPTY_STR + end + + NORMPATH = /^(?!\/)[^\/:]*:.*$/ + ## + # The path component for this URI, normalized. + # + # @return [String] The path component, normalized. + def normalized_path + @normalized_path ||= begin + path = self.path.to_s + if self.scheme == nil && path =~ NORMPATH + # Relative paths with colons in the first segment are ambiguous. + path = path.sub(":", "%2F") + end + # String#split(delimeter, -1) uses the more strict splitting behavior + # found by default in Python. + result = path.strip.split(SLASH, -1).map do |segment| + Addressable::URI.normalize_component( + segment, + Addressable::URI::NormalizeCharacterClasses::PCHAR + ) + end.join(SLASH) + + result = URI.normalize_path(result) + if result.empty? && + ["http", "https", "ftp", "tftp"].include?(self.normalized_scheme) + result = SLASH.dup + end + result + end + # All normalized values should be UTF-8 + @normalized_path.force_encoding(Encoding::UTF_8) if @normalized_path + @normalized_path + end + + ## + # Sets the path component for this URI. + # + # @param [String, #to_str] new_path The new path component. + def path=(new_path) + if new_path && !new_path.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_path.class} into String." + end + @path = (new_path || EMPTY_STR).to_str + if !@path.empty? && @path[0..0] != SLASH && host != nil + @path = "/#{@path}" + end + + # Reset dependent values + remove_instance_variable(:@normalized_path) if defined?(@normalized_path) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The basename, if any, of the file in the path component. + # + # @return [String] The path's basename. + def basename + # Path cannot be nil + return File.basename(self.path).sub(/;[^\/]*$/, EMPTY_STR) + end + + ## + # The extname, if any, of the file in the path component. + # Empty string if there is no extension. + # + # @return [String] The path's extname. + def extname + return nil unless self.path + return File.extname(self.basename) + end + + ## + # The query component for this URI. + # + # @return [String] The query component. + def query + return defined?(@query) ? @query : nil + end + + ## + # The query component for this URI, normalized. + # + # @return [String] The query component, normalized. + def normalized_query(*flags) + return nil unless self.query + return @normalized_query if defined?(@normalized_query) + @normalized_query ||= begin + modified_query_class = Addressable::URI::CharacterClasses::QUERY.dup + # Make sure possible key-value pair delimiters are escaped. + modified_query_class.sub!("\\&", "").sub!("\\;", "") + pairs = (query || "").split("&", -1) + pairs.delete_if(&:empty?).uniq! if flags.include?(:compacted) + pairs.sort! if flags.include?(:sorted) + component = pairs.map do |pair| + Addressable::URI.normalize_component( + pair, + Addressable::URI::NormalizeCharacterClasses::QUERY, + "+" + ) + end.join("&") + component == "" ? nil : component + end + # All normalized values should be UTF-8 + @normalized_query.force_encoding(Encoding::UTF_8) if @normalized_query + @normalized_query + end + + ## + # Sets the query component for this URI. + # + # @param [String, #to_str] new_query The new query component. + def query=(new_query) + if new_query && !new_query.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_query.class} into String." + end + @query = new_query ? new_query.to_str : nil + + # Reset dependent values + remove_instance_variable(:@normalized_query) if defined?(@normalized_query) + remove_composite_values + end + + ## + # Converts the query component to a Hash value. + # + # @param [Class] return_type The return type desired. Value must be either + # `Hash` or `Array`. + # + # @return [Hash, Array, nil] The query string parsed as a Hash or Array + # or nil if the query string is blank. + # + # @example + # Addressable::URI.parse("?one=1&two=2&three=3").query_values + # #=> {"one" => "1", "two" => "2", "three" => "3"} + # Addressable::URI.parse("?one=two&one=three").query_values(Array) + # #=> [["one", "two"], ["one", "three"]] + # Addressable::URI.parse("?one=two&one=three").query_values(Hash) + # #=> {"one" => "three"} + # Addressable::URI.parse("?").query_values + # #=> {} + # Addressable::URI.parse("").query_values + # #=> nil + def query_values(return_type=Hash) + empty_accumulator = Array == return_type ? [] : {} + if return_type != Hash && return_type != Array + raise ArgumentError, "Invalid return type. Must be Hash or Array." + end + return nil if self.query == nil + split_query = self.query.split("&").map do |pair| + pair.split("=", 2) if pair && !pair.empty? + end.compact + return split_query.inject(empty_accumulator.dup) do |accu, pair| + # I'd rather use key/value identifiers instead of array lookups, + # but in this case I really want to maintain the exact pair structure, + # so it's best to make all changes in-place. + pair[0] = URI.unencode_component(pair[0]) + if pair[1].respond_to?(:to_str) + value = pair[1].to_str + # I loathe the fact that I have to do this. Stupid HTML 4.01. + # Treating '+' as a space was just an unbelievably bad idea. + # There was nothing wrong with '%20'! + # If it ain't broke, don't fix it! + value = value.tr("+", " ") if ["http", "https", nil].include?(scheme) + pair[1] = URI.unencode_component(value) + end + if return_type == Hash + accu[pair[0]] = pair[1] + else + accu << pair + end + accu + end + end + + ## + # Sets the query component for this URI from a Hash object. + # An empty Hash or Array will result in an empty query string. + # + # @param [Hash, #to_hash, Array] new_query_values The new query values. + # + # @example + # uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', 'c'], ['b', 'd'], ['b', 'e']] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', ['c', 'd', 'e']]] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['flag'], ['key', 'value']] + # uri.query + # # => "flag&key=value" + def query_values=(new_query_values) + if new_query_values == nil + self.query = nil + return nil + end + + if !new_query_values.is_a?(Array) + if !new_query_values.respond_to?(:to_hash) + raise TypeError, + "Can't convert #{new_query_values.class} into Hash." + end + new_query_values = new_query_values.to_hash + new_query_values = new_query_values.map do |key, value| + key = key.to_s if key.kind_of?(Symbol) + [key, value] + end + # Useful default for OAuth and caching. + # Only to be used for non-Array inputs. Arrays should preserve order. + new_query_values.sort! + end + + # new_query_values have form [['key1', 'value1'], ['key2', 'value2']] + buffer = "".dup + new_query_values.each do |key, value| + encoded_key = URI.encode_component( + key, CharacterClasses::UNRESERVED + ) + if value == nil + buffer << "#{encoded_key}&" + elsif value.kind_of?(Array) + value.each do |sub_value| + encoded_value = URI.encode_component( + sub_value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + else + encoded_value = URI.encode_component( + value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + end + self.query = buffer.chop + end + + ## + # The HTTP request URI for this URI. This is the path and the + # query string. + # + # @return [String] The request URI required for an HTTP request. + def request_uri + return nil if self.absolute? && self.scheme !~ /^https?$/i + return ( + (!self.path.empty? ? self.path : SLASH) + + (self.query ? "?#{self.query}" : EMPTY_STR) + ) + end + + ## + # Sets the HTTP request URI for this URI. + # + # @param [String, #to_str] new_request_uri The new HTTP request URI. + def request_uri=(new_request_uri) + if !new_request_uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_request_uri.class} into String." + end + if self.absolute? && self.scheme !~ /^https?$/i + raise InvalidURIError, + "Cannot set an HTTP request URI for a non-HTTP URI." + end + new_request_uri = new_request_uri.to_str + path_component = new_request_uri[/^([^\?]*)\??(?:.*)$/, 1] + query_component = new_request_uri[/^(?:[^\?]*)\?(.*)$/, 1] + path_component = path_component.to_s + path_component = (!path_component.empty? ? path_component : SLASH) + self.path = path_component + self.query = query_component + + # Reset dependent values + remove_composite_values + end + + ## + # The fragment component for this URI. + # + # @return [String] The fragment component. + def fragment + return defined?(@fragment) ? @fragment : nil + end + + ## + # The fragment component for this URI, normalized. + # + # @return [String] The fragment component, normalized. + def normalized_fragment + return nil unless self.fragment + return @normalized_fragment if defined?(@normalized_fragment) + @normalized_fragment ||= begin + component = Addressable::URI.normalize_component( + self.fragment, + Addressable::URI::NormalizeCharacterClasses::FRAGMENT + ) + component == "" ? nil : component + end + # All normalized values should be UTF-8 + if @normalized_fragment + @normalized_fragment.force_encoding(Encoding::UTF_8) + end + @normalized_fragment + end + + ## + # Sets the fragment component for this URI. + # + # @param [String, #to_str] new_fragment The new fragment component. + def fragment=(new_fragment) + if new_fragment && !new_fragment.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_fragment.class} into String." + end + @fragment = new_fragment ? new_fragment.to_str : nil + + # Reset dependent values + remove_instance_variable(:@normalized_fragment) if defined?(@normalized_fragment) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # Determines if the scheme indicates an IP-based protocol. + # + # @return [TrueClass, FalseClass] + # true if the scheme indicates an IP-based protocol. + # false otherwise. + def ip_based? + if self.scheme + return URI.ip_based_schemes.include?( + self.scheme.strip.downcase) + end + return false + end + + ## + # Determines if the URI is relative. + # + # @return [TrueClass, FalseClass] + # true if the URI is relative. false + # otherwise. + def relative? + return self.scheme.nil? + end + + ## + # Determines if the URI is absolute. + # + # @return [TrueClass, FalseClass] + # true if the URI is absolute. false + # otherwise. + def absolute? + return !relative? + end + + ## + # Joins two URIs together. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + def join(uri) + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + if !uri.kind_of?(URI) + # Otherwise, convert to a String, then parse. + uri = URI.parse(uri.to_str) + end + if uri.to_s.empty? + return self.dup + end + + joined_scheme = nil + joined_user = nil + joined_password = nil + joined_host = nil + joined_port = nil + joined_path = nil + joined_query = nil + joined_fragment = nil + + # Section 5.2.2 of RFC 3986 + if uri.scheme != nil + joined_scheme = uri.scheme + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.authority != nil + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.path == nil || uri.path.empty? + joined_path = self.path + if uri.query != nil + joined_query = uri.query + else + joined_query = self.query + end + else + if uri.path[0..0] == SLASH + joined_path = URI.normalize_path(uri.path) + else + base_path = self.path.dup + base_path = EMPTY_STR if base_path == nil + base_path = URI.normalize_path(base_path) + + # Section 5.2.3 of RFC 3986 + # + # Removes the right-most path segment from the base path. + if base_path.include?(SLASH) + base_path.sub!(/\/[^\/]+$/, SLASH) + else + base_path = EMPTY_STR + end + + # If the base path is empty and an authority segment has been + # defined, use a base path of SLASH + if base_path.empty? && self.authority != nil + base_path = SLASH + end + + joined_path = URI.normalize_path(base_path + uri.path) + end + joined_query = uri.query + end + joined_user = self.user + joined_password = self.password + joined_host = self.host + joined_port = self.port + end + joined_scheme = self.scheme + end + joined_fragment = uri.fragment + + return self.class.new( + :scheme => joined_scheme, + :user => joined_user, + :password => joined_password, + :host => joined_host, + :port => joined_port, + :path => joined_path, + :query => joined_query, + :fragment => joined_fragment + ) + end + alias_method :+, :join + + ## + # Destructive form of join. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + # + # @see Addressable::URI#join + def join!(uri) + replace_self(self.join(uri)) + end + + ## + # Merges a URI with a Hash of components. + # This method has different behavior from join. Any + # components present in the hash parameter will override the + # original components. The path component is not treated specially. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Hash#merge + def merge(hash) + if !hash.respond_to?(:to_hash) + raise TypeError, "Can't convert #{hash.class} into Hash." + end + hash = hash.to_hash + + if hash.has_key?(:authority) + if (hash.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if hash.has_key?(:userinfo) + if (hash.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + uri = self.class.new + uri.defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + uri.scheme = + hash.has_key?(:scheme) ? hash[:scheme] : self.scheme + if hash.has_key?(:authority) + uri.authority = + hash.has_key?(:authority) ? hash[:authority] : self.authority + end + if hash.has_key?(:userinfo) + uri.userinfo = + hash.has_key?(:userinfo) ? hash[:userinfo] : self.userinfo + end + if !hash.has_key?(:userinfo) && !hash.has_key?(:authority) + uri.user = + hash.has_key?(:user) ? hash[:user] : self.user + uri.password = + hash.has_key?(:password) ? hash[:password] : self.password + end + if !hash.has_key?(:authority) + uri.host = + hash.has_key?(:host) ? hash[:host] : self.host + uri.port = + hash.has_key?(:port) ? hash[:port] : self.port + end + uri.path = + hash.has_key?(:path) ? hash[:path] : self.path + uri.query = + hash.has_key?(:query) ? hash[:query] : self.query + uri.fragment = + hash.has_key?(:fragment) ? hash[:fragment] : self.fragment + end + + return uri + end + + ## + # Destructive form of merge. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Addressable::URI#merge + def merge!(uri) + replace_self(self.merge(uri)) + end + + ## + # Returns the shortest normalized relative form of this URI that uses the + # supplied URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_to. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route from. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the original URI. + def route_from(uri) + uri = URI.parse(uri).normalize + normalized_self = self.normalize + if normalized_self.relative? + raise ArgumentError, "Expected absolute URI, got: #{self.to_s}" + end + if uri.relative? + raise ArgumentError, "Expected absolute URI, got: #{uri.to_s}" + end + if normalized_self == uri + return Addressable::URI.parse("##{normalized_self.fragment}") + end + components = normalized_self.to_hash + if normalized_self.scheme == uri.scheme + components[:scheme] = nil + if normalized_self.authority == uri.authority + components[:user] = nil + components[:password] = nil + components[:host] = nil + components[:port] = nil + if normalized_self.path == uri.path + components[:path] = nil + if normalized_self.query == uri.query + components[:query] = nil + end + else + if uri.path != SLASH and components[:path] + self_splitted_path = split_path(components[:path]) + uri_splitted_path = split_path(uri.path) + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + while !self_splitted_path.empty? && !uri_splitted_path.empty? and self_dir == uri_dir + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + end + components[:path] = (uri_splitted_path.fill('..') + [self_dir] + self_splitted_path).join(SLASH) + end + end + end + end + # Avoid network-path references. + if components[:host] != nil + components[:scheme] = normalized_self.scheme + end + return Addressable::URI.new( + :scheme => components[:scheme], + :user => components[:user], + :password => components[:password], + :host => components[:host], + :port => components[:port], + :path => components[:path], + :query => components[:query], + :fragment => components[:fragment] + ) + end + + ## + # Returns the shortest normalized relative form of the supplied URI that + # uses this URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_from. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route to. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the supplied URI. + def route_to(uri) + return URI.parse(uri).route_from(self) + end + + ## + # Returns a normalized URI object. + # + # NOTE: This method does not attempt to fully conform to specifications. + # It exists largely to correct other people's failures to read the + # specifications, and also to deal with caching issues since several + # different URIs may represent the same resource and should not be + # cached multiple times. + # + # @return [Addressable::URI] The normalized URI. + def normalize + # This is a special exception for the frequently misused feed + # URI scheme. + if normalized_scheme == "feed" + if self.to_s =~ /^feed:\/*http:\/*/ + return URI.parse( + self.to_s[/^feed:\/*(http:\/*.*)/, 1] + ).normalize + end + end + + return self.class.new( + :scheme => normalized_scheme, + :authority => normalized_authority, + :path => normalized_path, + :query => normalized_query, + :fragment => normalized_fragment + ) + end + + ## + # Destructively normalizes this URI object. + # + # @return [Addressable::URI] The normalized URI. + # + # @see Addressable::URI#normalize + def normalize! + replace_self(self.normalize) + end + + ## + # Creates a URI suitable for display to users. If semantic attacks are + # likely, the application should try to detect these and warn the user. + # See RFC 3986, + # section 7.6 for more information. + # + # @return [Addressable::URI] A URI suitable for display purposes. + def display_uri + display_uri = self.normalize + display_uri.host = ::Addressable::IDNA.to_unicode(display_uri.host) + return display_uri + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison, and allows comparison + # against Strings. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ===(uri) + if uri.respond_to?(:normalize) + uri_string = uri.normalize.to_s + else + begin + uri_string = ::Addressable::URI.parse(uri).normalize.to_s + rescue InvalidURIError, TypeError + return false + end + end + return self.normalize.to_s == uri_string + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ==(uri) + return false unless uri.kind_of?(URI) + return self.normalize.to_s == uri.normalize.to_s + end + + ## + # Returns true if the URI objects are equal. This method + # does NOT normalize either URI before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def eql?(uri) + return false unless uri.kind_of?(URI) + return self.to_s == uri.to_s + end + + ## + # A hash value that will make a URI equivalent to its normalized + # form. + # + # @return [Integer] A hash of the URI. + def hash + @hash ||= self.to_s.hash * -1 + end + + ## + # Clones the URI object. + # + # @return [Addressable::URI] The cloned URI. + def dup + duplicated_uri = self.class.new( + :scheme => self.scheme ? self.scheme.dup : nil, + :user => self.user ? self.user.dup : nil, + :password => self.password ? self.password.dup : nil, + :host => self.host ? self.host.dup : nil, + :port => self.port, + :path => self.path ? self.path.dup : nil, + :query => self.query ? self.query.dup : nil, + :fragment => self.fragment ? self.fragment.dup : nil + ) + return duplicated_uri + end + + ## + # Omits components from a URI. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @example + # uri = Addressable::URI.parse("http://example.com/path?query") + # #=> # + # uri.omit(:scheme, :authority) + # #=> # + def omit(*components) + invalid_components = components - [ + :scheme, :user, :password, :userinfo, :host, :port, :authority, + :path, :query, :fragment + ] + unless invalid_components.empty? + raise ArgumentError, + "Invalid component names: #{invalid_components.inspect}." + end + duplicated_uri = self.dup + duplicated_uri.defer_validation do + components.each do |component| + duplicated_uri.send((component.to_s + "=").to_sym, nil) + end + duplicated_uri.user = duplicated_uri.normalized_user + end + duplicated_uri + end + + ## + # Destructive form of omit. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @see Addressable::URI#omit + def omit!(*components) + replace_self(self.omit(*components)) + end + + ## + # Determines if the URI is an empty string. + # + # @return [TrueClass, FalseClass] + # Returns true if empty, false otherwise. + def empty? + return self.to_s.empty? + end + + ## + # Converts the URI to a String. + # + # @return [String] The URI's String representation. + def to_s + if self.scheme == nil && self.path != nil && !self.path.empty? && + self.path =~ NORMPATH + raise InvalidURIError, + "Cannot assemble URI string with ambiguous path: '#{self.path}'" + end + @uri_string ||= begin + uri_string = String.new + uri_string << "#{self.scheme}:" if self.scheme != nil + uri_string << "//#{self.authority}" if self.authority != nil + uri_string << self.path.to_s + uri_string << "?#{self.query}" if self.query != nil + uri_string << "##{self.fragment}" if self.fragment != nil + uri_string.force_encoding(Encoding::UTF_8) + uri_string + end + end + + ## + # URI's are glorified Strings. Allow implicit conversion. + alias_method :to_str, :to_s + + ## + # Returns a Hash of the URI components. + # + # @return [Hash] The URI as a Hash of components. + def to_hash + return { + :scheme => self.scheme, + :user => self.user, + :password => self.password, + :host => self.host, + :port => self.port, + :path => self.path, + :query => self.query, + :fragment => self.fragment + } + end + + ## + # Returns a String representation of the URI object's state. + # + # @return [String] The URI object's state, as a String. + def inspect + sprintf("#<%s:%#0x URI:%s>", URI.to_s, self.object_id, self.to_s) + end + + ## + # This method allows you to make several changes to a URI simultaneously, + # which separately would cause validation errors, but in conjunction, + # are valid. The URI will be revalidated as soon as the entire block has + # been executed. + # + # @param [Proc] block + # A set of operations to perform on a given URI. + def defer_validation + raise LocalJumpError, "No block given." unless block_given? + @validation_deferred = true + yield + @validation_deferred = false + validate + return nil + end + + protected + SELF_REF = '.' + PARENT = '..' + + RULE_2A = /\/\.\/|\/\.$/ + RULE_2B_2C = /\/([^\/]*)\/\.\.\/|\/([^\/]*)\/\.\.$/ + RULE_2D = /^\.\.?\/?/ + RULE_PREFIXED_PARENT = /^\/\.\.?\/|^(\/\.\.?)+\/?$/ + + ## + # Resolves paths to their simplest form. + # + # @param [String] path The path to normalize. + # + # @return [String] The normalized path. + def self.normalize_path(path) + # Section 5.2.4 of RFC 3986 + + return nil if path.nil? + normalized_path = path.dup + begin + mod = nil + mod ||= normalized_path.gsub!(RULE_2A, SLASH) + + pair = normalized_path.match(RULE_2B_2C) + parent, current = pair[1], pair[2] if pair + if pair && ((parent != SELF_REF && parent != PARENT) || + (current != SELF_REF && current != PARENT)) + mod ||= normalized_path.gsub!( + Regexp.new( + "/#{Regexp.escape(parent.to_s)}/\\.\\./|" + + "(/#{Regexp.escape(current.to_s)}/\\.\\.$)" + ), SLASH + ) + end + + mod ||= normalized_path.gsub!(RULE_2D, EMPTY_STR) + # Non-standard, removes prefixed dotted segments from path. + mod ||= normalized_path.gsub!(RULE_PREFIXED_PARENT, SLASH) + end until mod.nil? + + return normalized_path + end + + ## + # Ensures that the URI is valid. + def validate + return if !!@validation_deferred + if self.scheme != nil && self.ip_based? && + (self.host == nil || self.host.empty?) && + (self.path == nil || self.path.empty?) + raise InvalidURIError, + "Absolute URI missing hierarchical segment: '#{self.to_s}'" + end + if self.host == nil + if self.port != nil || + self.user != nil || + self.password != nil + raise InvalidURIError, "Hostname not supplied: '#{self.to_s}'" + end + end + if self.path != nil && !self.path.empty? && self.path[0..0] != SLASH && + self.authority != nil + raise InvalidURIError, + "Cannot have a relative path with an authority set: '#{self.to_s}'" + end + if self.path != nil && !self.path.empty? && + self.path[0..1] == SLASH + SLASH && self.authority == nil + raise InvalidURIError, + "Cannot have a path with two leading slashes " + + "without an authority set: '#{self.to_s}'" + end + unreserved = CharacterClasses::UNRESERVED + sub_delims = CharacterClasses::SUB_DELIMS + if !self.host.nil? && (self.host =~ /[<>{}\/\\\?\#\@"[[:space:]]]/ || + (self.host[/^\[(.*)\]$/, 1] != nil && self.host[/^\[(.*)\]$/, 1] !~ + Regexp.new("^[#{unreserved}#{sub_delims}:]*$"))) + raise InvalidURIError, "Invalid character in host: '#{self.host.to_s}'" + end + return nil + end + + ## + # Replaces the internal state of self with the specified URI's state. + # Used in destructive operations to avoid massive code repetition. + # + # @param [Addressable::URI] uri The URI to replace self with. + # + # @return [Addressable::URI] self. + def replace_self(uri) + # Reset dependent values + instance_variables.each do |var| + if instance_variable_defined?(var) && var != :@validation_deferred + remove_instance_variable(var) + end + end + + @scheme = uri.scheme + @user = uri.user + @password = uri.password + @host = uri.host + @port = uri.port + @path = uri.path + @query = uri.query + @fragment = uri.fragment + return self + end + + ## + # Splits path string with "/" (slash). + # It is considered that there is empty string after last slash when + # path ends with slash. + # + # @param [String] path The path to split. + # + # @return [Array] An array of parts of path. + def split_path(path) + splitted = path.split(SLASH) + splitted << EMPTY_STR if path.end_with? SLASH + splitted + end + + ## + # Resets composite values for the entire URI + # + # @api private + def remove_composite_values + remove_instance_variable(:@uri_string) if defined?(@uri_string) + remove_instance_variable(:@hash) if defined?(@hash) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/version.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/version.rb new file mode 100644 index 0000000..2efe434 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/lib/addressable/version.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +# Used to prevent the class/module from being loaded more than once +if !defined?(Addressable::VERSION) + module Addressable + module VERSION + MAJOR = 2 + MINOR = 8 + TINY = 0 + + STRING = [MAJOR, MINOR, TINY].join('.') + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb new file mode 100644 index 0000000..4104b37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb @@ -0,0 +1,302 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +# Have to use RubyGems to load the idn gem. +require "rubygems" + +require "addressable/idna" + +shared_examples_for "converting from unicode to ASCII" do + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_ascii("www.google.com")).to eq("www.google.com") + end + + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_ascii(long)).to eq(long) + end + + it "should convert 'www.詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.詹姆斯.com" + )).to eq("www.xn--8ws00zhy3a.com") + end + + it "should convert 'www.Iñtërnâtiônàlizætiøn.com' correctly" do + "www.Iñtërnâtiônàlizætiøn.com" + expect(Addressable::IDNA.to_ascii( + "www.I\xC3\xB1t\xC3\xABrn\xC3\xA2ti\xC3\xB4" + + "n\xC3\xA0liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert 'www.Iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.In\xCC\x83te\xCC\x88rna\xCC\x82tio\xCC\x82n" + + "a\xCC\x80liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\214\343\201\204\343\202\217\343\201\221\343\201\256" + + "\343\202\217\343\201\213\343\202\211\343\201\252\343\201\204\343\201" + + "\251\343\202\201\343\201\204\343\202\223\343\202\201\343\201\204\343" + + "\201\256\343\202\211\343\201\271\343\202\213\343\201\276\343\201\240" + + "\343\201\252\343\201\214\343\201\217\343\201\227\343\201\252\343\201" + + "\204\343\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\213\343\202\231\343\201\204\343\202\217\343\201\221" + + "\343\201\256\343\202\217\343\201\213\343\202\211\343\201\252\343\201" + + "\204\343\201\250\343\202\231\343\202\201\343\201\204\343\202\223\343" + + "\202\201\343\201\204\343\201\256\343\202\211\343\201\270\343\202\231" + + "\343\202\213\343\201\276\343\201\237\343\202\231\343\201\252\343\201" + + "\213\343\202\231\343\201\217\343\201\227\343\201\252\343\201\204\343" + + "\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert '点心和烤鸭.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_ascii( + "点心和烤鸭.w3.mag.keio.ac.jp" + )).to eq("xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉힢힣.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "가각갂갃간갅갆갇갈갉힢힣.com" + )).to eq("xn--o39acdefghijk5883jma.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + )).to eq("xn--9cs565brid46mda086o.com") + end + + it "should convert 'リ宠퐱〹.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\230\345\256\240\355\220\261\343\200\271.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'リ宠퐱卄.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\343\203\252\345\256\240\355\220\261\345\215\204.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_ascii( + "\341\206\265" + )).to eq("xn--4ud") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\257" + )).to eq("xn--4ud") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_ascii( + "\360\237\214\271\360\237\214\271\360\237\214\271.ws" + )).to eq("xn--2h8haa.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_ascii( + "example..host" + )).to eq("example..host") + end +end + +shared_examples_for "converting from ASCII to unicode" do + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_unicode(long)).to eq(long) + end + + it "should return the identity conversion when punycode decode fails" do + expect(Addressable::IDNA.to_unicode("xn--zckp1cyg1.sblo.jp")).to eq( + "xn--zckp1cyg1.sblo.jp") + end + + it "should return the identity conversion when the ACE prefix has no suffix" do + expect(Addressable::IDNA.to_unicode("xn--...-")).to eq("xn--...-") + end + + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_unicode("www.google.com")).to eq( + "www.google.com") + end + + it "should convert 'www.詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--8ws00zhy3a.com" + )).to eq("www.詹姆斯.com") + end + + it "should convert '詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--8ws00zhy3a.com" + )).to eq("詹姆斯.com") + end + + it "should convert 'www.iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("www.iñtërnâtiônàlizætiøn.com") + end + + it "should convert 'iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("iñtërnâtiônàlizætiøn.com") + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + )).to eq( + "www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp" + ) + end + + it "should convert '点心和烤鸭.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp" + )).to eq("点心和烤鸭.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉힢힣.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--o39acdefghijk5883jma.com" + )).to eq("가각갂갃간갅갆갇갈갉힢힣.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--9cs565brid46mda086o.com" + )).to eq( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + ) + end + + it "should convert 'リ宠퐱卄.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--eek174hoxfpr4k.com" + )).to eq("\343\203\252\345\256\240\355\220\261\345\215\204.com") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--4ud" + )).to eq("\341\206\265") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--2h8haa.ws" + )).to eq("\360\237\214\271\360\237\214\271\360\237\214\271.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_unicode( + "example..host" + )).to eq("example..host") + end + + it "should normalize 'string' correctly" do + expect(Addressable::IDNA.unicode_normalize_kc(:'string')).to eq("string") + expect(Addressable::IDNA.unicode_normalize_kc("string")).to eq("string") + end +end + +describe Addressable::IDNA, "when using the pure-Ruby implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + + begin + require "fiber" + + it "should not blow up inside fibers" do + f = Fiber.new do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + f.resume + end + rescue LoadError + # Fibers aren't supported in this version of Ruby, skip this test. + warn('Fibers unsupported.') + end +end + +begin + require "idn" + + describe Addressable::IDNA, "when using the native-code implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/native.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + end +rescue LoadError => error + raise error if ENV["CI"] && TestHelper.native_supported? + + # Cannot test the native implementation without libidn support. + warn('Could not load native IDN implementation.') +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb new file mode 100644 index 0000000..8663a86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "net/http" + +describe Net::HTTP do + it "should be compatible with Addressable" do + response_body = + Net::HTTP.get(Addressable::URI.parse('http://www.google.com/')) + expect(response_body).not_to be_nil + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb new file mode 100644 index 0000000..601e808 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('%%30%30') + expect(uri.path).to eq('%%30%30') + expect(uri.normalize.path).to eq('%2500') + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/%%30%30') + expect(uri.path).to eq('/%%30%30') + expect(uri.normalize.path).to eq('/%2500') + end +end + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.path).to eq('لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.normalize.path).to eq( + '%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.path).to eq('/لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.normalize.path).to eq( + '/%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb new file mode 100644 index 0000000..d47589a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb @@ -0,0 +1,1460 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "bigdecimal" +require "timeout" +require "addressable/template" + +shared_examples_for 'expands' do |tests| + tests.each do |template, expansion| + exp = expansion.is_a?(Array) ? expansion.first : expansion + it "#{template} to #{exp}" do + tmpl = Addressable::Template.new(template).expand(subject) + if expansion.is_a?(Array) + expect(expansion.any?{|i| i == tmpl.to_str}).to be true + else + expect(tmpl.to_str).to eq(expansion) + end + end + end +end + +describe "eql?" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to be_eql(other_template) + expect(other_template).to be_eql(template) + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).to_not be_eql(other_template) + expect(other_template).to_not be_eql(template) + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).to_not be_eql(addressable_uri) + expect(addressable_uri).to_not be_eql(addressable_template) + end +end + +describe "==" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to eq other_template + expect(other_template).to eq template + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).not_to eq other_template + expect(other_template).not_to eq template + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).not_to eq addressable_uri + expect(addressable_uri).not_to eq addressable_template + end +end + +describe "Type conversion" do + subject { + { + :var => true, + :hello => 1234, + :nothing => nil, + :sym => :symbolic, + :decimal => BigDecimal('1') + } + } + + it_behaves_like 'expands', { + '{var}' => 'true', + '{hello}' => '1234', + '{nothing}' => '', + '{sym}' => 'symbolic', + '{decimal}' => RUBY_VERSION < '2.4.0' ? '0.1E1' : '0.1e1' + } +end + +describe "Level 1:" do + subject { + {:var => "value", :hello => "Hello World!"} + } + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21' + } +end + +describe "Level 2" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar" + } + } + context "Operator +:" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar' + } + end + context "Operator #:" do + it_behaves_like 'expands', { + 'X{#var}' => 'X#value', + 'X{#hello}' => 'X#Hello%20World!' + } + end +end + +describe "Level 3" do + subject { + { + :var => "value", + :hello => "Hello World!", + :empty => "", + :path => "/foo/bar", + :x => "1024", + :y => "768" + } + } + context "Operator nil (multiple vars):" do + it_behaves_like 'expands', { + 'map?{x,y}' => 'map?1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768' + } + end + context "Operator + (multiple vars):" do + it_behaves_like 'expands', { + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here' + } + end + context "Operator # (multiple vars):" do + it_behaves_like 'expands', { + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here' + } + end + context "Operator ." do + it_behaves_like 'expands', { + 'X{.var}' => 'X.value', + 'X{.x,y}' => 'X.1024.768' + } + end + context "Operator /" do + it_behaves_like 'expands', { + '{/var}' => '/value', + '{/var,x}/here' => '/value/1024/here' + } + end + context "Operator ;" do + it_behaves_like 'expands', { + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty' + } + end + context "Operator ?" do + it_behaves_like 'expands', { + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=' + } + end + context "Operator &" do + it_behaves_like 'expands', { + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=' + } + end +end + +describe "Level 4" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar", + :semi => ";", + :list => %w(red green blue), + :keys => {"semi" => ';', "dot" => '.', "comma" => ','} + } + } + context "Expansion with value modifiers" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => [ + 'semi,%3B,dot,.,comma,%2C', + 'dot,.,semi,%3B,comma,%2C', + 'comma,%2C,semi,%3B,dot,.', + 'semi,%3B,comma,%2C,dot,.', + 'dot,.,comma,%2C,semi,%3B', + 'comma,%2C,dot,.,semi,%3B' + ], + '{keys*}' => [ + 'semi=%3B,dot=.,comma=%2C', + 'dot=.,semi=%3B,comma=%2C', + 'comma=%2C,semi=%3B,dot=.', + 'semi=%3B,comma=%2C,dot=.', + 'dot=.,comma=%2C,semi=%3B', + 'comma=%2C,dot=.,semi=%3B' + ] + } + end + context "Operator + with value modifiers" do + it_behaves_like 'expands', { + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => [ + 'semi,;,dot,.,comma,,', + 'dot,.,semi,;,comma,,', + 'comma,,,semi,;,dot,.', + 'semi,;,comma,,,dot,.', + 'dot,.,comma,,,semi,;', + 'comma,,,dot,.,semi,;' + ], + '{+keys*}' => [ + 'semi=;,dot=.,comma=,', + 'dot=.,semi=;,comma=,', + 'comma=,,semi=;,dot=.', + 'semi=;,comma=,,dot=.', + 'dot=.,comma=,,semi=;', + 'comma=,,dot=.,semi=;' + ] + } + end + context "Operator # with value modifiers" do + it_behaves_like 'expands', { + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => [ + '#semi,;,dot,.,comma,,', + '#dot,.,semi,;,comma,,', + '#comma,,,semi,;,dot,.', + '#semi,;,comma,,,dot,.', + '#dot,.,comma,,,semi,;', + '#comma,,,dot,.,semi,;' + ], + '{#keys*}' => [ + '#semi=;,dot=.,comma=,', + '#dot=.,semi=;,comma=,', + '#comma=,,semi=;,dot=.', + '#semi=;,comma=,,dot=.', + '#dot=.,comma=,,semi=;', + '#comma=,,dot=.,semi=;' + ] + } + end + context "Operator . with value modifiers" do + it_behaves_like 'expands', { + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => [ + 'X.semi,%3B,dot,.,comma,%2C', + 'X.dot,.,semi,%3B,comma,%2C', + 'X.comma,%2C,semi,%3B,dot,.', + 'X.semi,%3B,comma,%2C,dot,.', + 'X.dot,.,comma,%2C,semi,%3B', + 'X.comma,%2C,dot,.,semi,%3B' + ], + 'X{.keys*}' => [ + 'X.semi=%3B.dot=..comma=%2C', + 'X.dot=..semi=%3B.comma=%2C', + 'X.comma=%2C.semi=%3B.dot=.', + 'X.semi=%3B.comma=%2C.dot=.', + 'X.dot=..comma=%2C.semi=%3B', + 'X.comma=%2C.dot=..semi=%3B' + ] + } + end + context "Operator / with value modifiers" do + it_behaves_like 'expands', { + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => [ + '/semi,%3B,dot,.,comma,%2C', + '/dot,.,semi,%3B,comma,%2C', + '/comma,%2C,semi,%3B,dot,.', + '/semi,%3B,comma,%2C,dot,.', + '/dot,.,comma,%2C,semi,%3B', + '/comma,%2C,dot,.,semi,%3B' + ], + '{/keys*}' => [ + '/semi=%3B/dot=./comma=%2C', + '/dot=./semi=%3B/comma=%2C', + '/comma=%2C/semi=%3B/dot=.', + '/semi=%3B/comma=%2C/dot=.', + '/dot=./comma=%2C/semi=%3B', + '/comma=%2C/dot=./semi=%3B' + ] + } + end + context "Operator ; with value modifiers" do + it_behaves_like 'expands', { + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => [ + ';keys=semi,%3B,dot,.,comma,%2C', + ';keys=dot,.,semi,%3B,comma,%2C', + ';keys=comma,%2C,semi,%3B,dot,.', + ';keys=semi,%3B,comma,%2C,dot,.', + ';keys=dot,.,comma,%2C,semi,%3B', + ';keys=comma,%2C,dot,.,semi,%3B' + ], + '{;keys*}' => [ + ';semi=%3B;dot=.;comma=%2C', + ';dot=.;semi=%3B;comma=%2C', + ';comma=%2C;semi=%3B;dot=.', + ';semi=%3B;comma=%2C;dot=.', + ';dot=.;comma=%2C;semi=%3B', + ';comma=%2C;dot=.;semi=%3B' + ] + } + end + context "Operator ? with value modifiers" do + it_behaves_like 'expands', { + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => [ + '?keys=semi,%3B,dot,.,comma,%2C', + '?keys=dot,.,semi,%3B,comma,%2C', + '?keys=comma,%2C,semi,%3B,dot,.', + '?keys=semi,%3B,comma,%2C,dot,.', + '?keys=dot,.,comma,%2C,semi,%3B', + '?keys=comma,%2C,dot,.,semi,%3B' + ], + '{?keys*}' => [ + '?semi=%3B&dot=.&comma=%2C', + '?dot=.&semi=%3B&comma=%2C', + '?comma=%2C&semi=%3B&dot=.', + '?semi=%3B&comma=%2C&dot=.', + '?dot=.&comma=%2C&semi=%3B', + '?comma=%2C&dot=.&semi=%3B' + ] + } + end + context "Operator & with value modifiers" do + it_behaves_like 'expands', { + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => [ + '&keys=semi,%3B,dot,.,comma,%2C', + '&keys=dot,.,semi,%3B,comma,%2C', + '&keys=comma,%2C,semi,%3B,dot,.', + '&keys=semi,%3B,comma,%2C,dot,.', + '&keys=dot,.,comma,%2C,semi,%3B', + '&keys=comma,%2C,dot,.,semi,%3B' + ], + '{&keys*}' => [ + '&semi=%3B&dot=.&comma=%2C', + '&dot=.&semi=%3B&comma=%2C', + '&comma=%2C&semi=%3B&dot=.', + '&semi=%3B&comma=%2C&dot=.', + '&dot=.&comma=%2C&semi=%3B', + '&comma=%2C&dot=.&semi=%3B' + ] + } + end +end +describe "Modifiers" do + subject { + { + :var => "value", + :semi => ";", + :year => %w(1965 2000 2012), + :dom => %w(example com) + } + } + context "length" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{var}' => 'value', + '{semi}' => '%3B', + '{semi:2}' => '%3B' + } + end + context "explode" do + it_behaves_like 'expands', { + 'find{?year*}' => 'find?year=1965&year=2000&year=2012', + 'www{.dom*}' => 'www.example.com', + } + end +end +describe "Expansion" do + subject { + { + :count => ["one", "two", "three"], + :dom => ["example", "com"], + :dub => "me/too", + :hello => "Hello World!", + :half => "50%", + :var => "value", + :who => "fred", + :base => "http://example.com/home/", + :path => "/foo/bar", + :list => ["red", "green", "blue"], + :keys => {"semi" => ";","dot" => ".","comma" => ","}, + :v => "6", + :x => "1024", + :y => "768", + :empty => "", + :empty_keys => {}, + :undef => nil + } + } + context "concatenation" do + it_behaves_like 'expands', { + '{count}' => 'one,two,three', + '{count*}' => 'one,two,three', + '{/count}' => '/one,two,three', + '{/count*}' => '/one/two/three', + '{;count}' => ';count=one,two,three', + '{;count*}' => ';count=one;count=two;count=three', + '{?count}' => '?count=one,two,three', + '{?count*}' => '?count=one&count=two&count=three', + '{&count*}' => '&count=one&count=two&count=three' + } + end + context "simple expansion" do + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21', + '{half}' => '50%25', + 'O{empty}X' => 'OX', + 'O{undef}X' => 'OX', + '{x,y}' => '1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768', + '?{x,empty}' => '?1024,', + '?{x,undef}' => '?1024', + '?{undef,y}' => '?768', + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => [ + 'semi,%3B,dot,.,comma,%2C', + 'dot,.,semi,%3B,comma,%2C', + 'comma,%2C,semi,%3B,dot,.', + 'semi,%3B,comma,%2C,dot,.', + 'dot,.,comma,%2C,semi,%3B', + 'comma,%2C,dot,.,semi,%3B' + ], + '{keys*}' => [ + 'semi=%3B,dot=.,comma=%2C', + 'dot=.,semi=%3B,comma=%2C', + 'comma=%2C,semi=%3B,dot=.', + 'semi=%3B,comma=%2C,dot=.', + 'dot=.,comma=%2C,semi=%3B', + 'comma=%2C,dot=.,semi=%3B' + ] + } + end + context "reserved expansion (+)" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+half}' => '50%25', + '{base}index' => 'http%3A%2F%2Fexample.com%2Fhome%2Findex', + '{+base}index' => 'http://example.com/home/index', + 'O{+empty}X' => 'OX', + 'O{+undef}X' => 'OX', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar', + 'up{+path}{var}/here' => 'up/foo/barvalue/here', + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here', + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => [ + 'semi,;,dot,.,comma,,', + 'dot,.,semi,;,comma,,', + 'comma,,,semi,;,dot,.', + 'semi,;,comma,,,dot,.', + 'dot,.,comma,,,semi,;', + 'comma,,,dot,.,semi,;' + ], + '{+keys*}' => [ + 'semi=;,dot=.,comma=,', + 'dot=.,semi=;,comma=,', + 'comma=,,semi=;,dot=.', + 'semi=;,comma=,,dot=.', + 'dot=.,comma=,,semi=;', + 'comma=,,dot=.,semi=;' + ] + } + end + context "fragment expansion (#)" do + it_behaves_like 'expands', { + '{#var}' => '#value', + '{#hello}' => '#Hello%20World!', + '{#half}' => '#50%25', + 'foo{#empty}' => 'foo#', + 'foo{#undef}' => 'foo', + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here', + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => [ + '#semi,;,dot,.,comma,,', + '#dot,.,semi,;,comma,,', + '#comma,,,semi,;,dot,.', + '#semi,;,comma,,,dot,.', + '#dot,.,comma,,,semi,;', + '#comma,,,dot,.,semi,;' + ], + '{#keys*}' => [ + '#semi=;,dot=.,comma=,', + '#dot=.,semi=;,comma=,', + '#comma=,,semi=;,dot=.', + '#semi=;,comma=,,dot=.', + '#dot=.,comma=,,semi=;', + '#comma=,,dot=.,semi=;' + ] + } + end + context "label expansion (.)" do + it_behaves_like 'expands', { + '{.who}' => '.fred', + '{.who,who}' => '.fred.fred', + '{.half,who}' => '.50%25.fred', + 'www{.dom*}' => 'www.example.com', + 'X{.var}' => 'X.value', + 'X{.empty}' => 'X.', + 'X{.undef}' => 'X', + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => [ + 'X.semi,%3B,dot,.,comma,%2C', + 'X.dot,.,semi,%3B,comma,%2C', + 'X.comma,%2C,semi,%3B,dot,.', + 'X.semi,%3B,comma,%2C,dot,.', + 'X.dot,.,comma,%2C,semi,%3B', + 'X.comma,%2C,dot,.,semi,%3B' + ], + 'X{.keys*}' => [ + 'X.semi=%3B.dot=..comma=%2C', + 'X.dot=..semi=%3B.comma=%2C', + 'X.comma=%2C.semi=%3B.dot=.', + 'X.semi=%3B.comma=%2C.dot=.', + 'X.dot=..comma=%2C.semi=%3B', + 'X.comma=%2C.dot=..semi=%3B' + ], + 'X{.empty_keys}' => 'X', + 'X{.empty_keys*}' => 'X' + } + end + context "path expansion (/)" do + it_behaves_like 'expands', { + '{/who}' => '/fred', + '{/who,who}' => '/fred/fred', + '{/half,who}' => '/50%25/fred', + '{/who,dub}' => '/fred/me%2Ftoo', + '{/var}' => '/value', + '{/var,empty}' => '/value/', + '{/var,undef}' => '/value', + '{/var,x}/here' => '/value/1024/here', + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => [ + '/semi,%3B,dot,.,comma,%2C', + '/dot,.,semi,%3B,comma,%2C', + '/comma,%2C,semi,%3B,dot,.', + '/semi,%3B,comma,%2C,dot,.', + '/dot,.,comma,%2C,semi,%3B', + '/comma,%2C,dot,.,semi,%3B' + ], + '{/keys*}' => [ + '/semi=%3B/dot=./comma=%2C', + '/dot=./semi=%3B/comma=%2C', + '/comma=%2C/semi=%3B/dot=.', + '/semi=%3B/comma=%2C/dot=.', + '/dot=./comma=%2C/semi=%3B', + '/comma=%2C/dot=./semi=%3B' + ] + } + end + context "path-style expansion (;)" do + it_behaves_like 'expands', { + '{;who}' => ';who=fred', + '{;half}' => ';half=50%25', + '{;empty}' => ';empty', + '{;v,empty,who}' => ';v=6;empty;who=fred', + '{;v,bar,who}' => ';v=6;who=fred', + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty', + '{;x,y,undef}' => ';x=1024;y=768', + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => [ + ';keys=semi,%3B,dot,.,comma,%2C', + ';keys=dot,.,semi,%3B,comma,%2C', + ';keys=comma,%2C,semi,%3B,dot,.', + ';keys=semi,%3B,comma,%2C,dot,.', + ';keys=dot,.,comma,%2C,semi,%3B', + ';keys=comma,%2C,dot,.,semi,%3B' + ], + '{;keys*}' => [ + ';semi=%3B;dot=.;comma=%2C', + ';dot=.;semi=%3B;comma=%2C', + ';comma=%2C;semi=%3B;dot=.', + ';semi=%3B;comma=%2C;dot=.', + ';dot=.;comma=%2C;semi=%3B', + ';comma=%2C;dot=.;semi=%3B' + ] + } + end + context "form query expansion (?)" do + it_behaves_like 'expands', { + '{?who}' => '?who=fred', + '{?half}' => '?half=50%25', + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=', + '{?x,y,undef}' => '?x=1024&y=768', + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => [ + '?keys=semi,%3B,dot,.,comma,%2C', + '?keys=dot,.,semi,%3B,comma,%2C', + '?keys=comma,%2C,semi,%3B,dot,.', + '?keys=semi,%3B,comma,%2C,dot,.', + '?keys=dot,.,comma,%2C,semi,%3B', + '?keys=comma,%2C,dot,.,semi,%3B' + ], + '{?keys*}' => [ + '?semi=%3B&dot=.&comma=%2C', + '?dot=.&semi=%3B&comma=%2C', + '?comma=%2C&semi=%3B&dot=.', + '?semi=%3B&comma=%2C&dot=.', + '?dot=.&comma=%2C&semi=%3B', + '?comma=%2C&dot=.&semi=%3B' + ] + } + end + context "form query expansion (&)" do + it_behaves_like 'expands', { + '{&who}' => '&who=fred', + '{&half}' => '&half=50%25', + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=', + '{&x,y,undef}' => '&x=1024&y=768', + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => [ + '&keys=semi,%3B,dot,.,comma,%2C', + '&keys=dot,.,semi,%3B,comma,%2C', + '&keys=comma,%2C,semi,%3B,dot,.', + '&keys=semi,%3B,comma,%2C,dot,.', + '&keys=dot,.,comma,%2C,semi,%3B', + '&keys=comma,%2C,dot,.,semi,%3B' + ], + '{&keys*}' => [ + '&semi=%3B&dot=.&comma=%2C', + '&dot=.&semi=%3B&comma=%2C', + '&comma=%2C&semi=%3B&dot=.', + '&semi=%3B&comma=%2C&dot=.', + '&dot=.&comma=%2C&semi=%3B', + '&comma=%2C&dot=.&semi=%3B' + ] + } + end + context "non-string key in match data" do + subject {Addressable::Template.new("http://example.com/{one}")} + + it "raises TypeError" do + expect { subject.expand(Object.new => "1") }.to raise_error TypeError + end + end +end + +class ExampleTwoProcessor + def self.restore(name, value) + return value.gsub(/-/, " ") if name == "query" + return value + end + + def self.match(name) + return ".*?" if name == "first" + return ".*" + end + def self.validate(name, value) + return !!(value =~ /^[\w ]+$/) if name == "query" + return true + end + + def self.transform(name, value) + return value.gsub(/ /, "+") if name == "query" + return value + end +end + +class DumbProcessor + def self.match(name) + return ".*?" if name == "first" + end +end + +describe Addressable::Template do + describe 'initialize' do + context 'with a non-string' do + it 'raises a TypeError' do + expect { Addressable::Template.new(nil) }.to raise_error(TypeError) + end + end + end + + describe 'freeze' do + subject { Addressable::Template.new("http://example.com/{first}/{+second}/") } + it 'freezes the template' do + expect(subject.freeze).to be_frozen + end + end + + describe "Matching" do + let(:uri){ + Addressable::URI.parse( + "http://example.com/search/an-example-search-query/" + ) + } + let(:uri2){ + Addressable::URI.parse("http://example.com/a/b/c/") + } + let(:uri3){ + Addressable::URI.parse("http://example.com/;a=1;b=2;c=3;first=foo") + } + let(:uri4){ + Addressable::URI.parse("http://example.com/?a=1&b=2&c=3&first=foo") + } + let(:uri5){ + "http://example.com/foo" + } + context "first uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/search/{query}/" + ).match(uri, ExampleTwoProcessor) + } + its(:variables){ should == ["query"] } + its(:captures){ should == ["an example search query"] } + end + + context "second uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, ExampleTwoProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri with DumbProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, DumbProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri" do + subject { + Addressable::Template.new( + "http://example.com/{first}{/second*}/" + ).match(uri2) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", ["b","c"]] } + end + context "third uri" do + subject { + Addressable::Template.new( + "http://example.com/{;hash*,first}" + ).match(uri3) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first" => "foo"}, nil] } + end + # Note that this expansion is impossible to revert deterministically - the + # * operator means first could have been a key of hash or a separate key. + # Semantically, a separate key is more likely, but both are possible. + context "fourth uri" do + subject { + Addressable::Template.new( + "http://example.com/{?hash*,first}" + ).match(uri4) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first"=> "foo"}, nil] } + end + context "fifth uri" do + subject { + Addressable::Template.new( + "http://example.com/{path}{?hash*,first}" + ).match(uri5) + } + its(:variables){ should == ["path", "hash", "first"] } + its(:captures){ should == ["foo", nil, nil] } + end + end + + describe 'match' do + subject { Addressable::Template.new('http://example.com/first/second/') } + context 'when the URI is the same as the template' do + it 'returns the match data itself with an empty mapping' do + uri = Addressable::URI.parse('http://example.com/first/second/') + match_data = subject.match(uri) + expect(match_data).to be_an Addressable::Template::MatchData + expect(match_data.uri).to eq(uri) + expect(match_data.template).to eq(subject) + expect(match_data.mapping).to be_empty + expect(match_data.inspect).to be_an String + end + end + end + + describe "extract" do + let(:template) { + Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" + ) + } + let(:uri){ "http://example.com/a/b/c/?one=1&two=2#foo" } + let(:uri2){ "http://example.com/a/b/c/#foo" } + it "should be able to extract with queries" do + expect(template.extract(uri)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => "1", + "bogus" => nil, + "two" => "2", + "fragment" => "foo" + }) + end + it "should be able to extract without queries" do + expect(template.extract(uri2)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => nil, + "bogus" => nil, + "two" => nil, + "fragment" => "foo" + }) + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.extract("/path") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.extract("/path?page=1") + expect(data["page"]).to eq("1") + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.extract("/path?per_page=1") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.extract("/path?page=2&per_page=1") + expect(data["page"]).to eq("2") + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + end + end + + describe "Partial expand with symbols" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1", :three => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand form style query with missing param at beginning" do + subject { + Addressable::Template.new("http://example.com/{?one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:two => "2").pattern).to eq( + "http://example.com/?two=2{&one}/" + ) + end + end + context "issue #307 - partial_expand form query with nil params" do + subject do + Addressable::Template.new("http://example.com/{?one,two,three}/") + end + it "builds a new pattern with two=nil" do + expect(subject.partial_expand(two: nil).pattern).to eq( + "http://example.com/{?one}{&three}/" + ) + end + it "builds a new pattern with one=nil and two=nil" do + expect(subject.partial_expand(one: nil, two: nil).pattern).to eq( + "http://example.com/{?three}/" + ) + end + it "builds a new pattern with one=1 and two=nil" do + expect(subject.partial_expand(one: 1, two: nil).pattern).to eq( + "http://example.com/?one=1{&three}/" + ) + end + it "builds a new pattern with one=nil and two=2" do + expect(subject.partial_expand(one: nil, two: 2).pattern).to eq( + "http://example.com/?two=2{&three}/" + ) + end + it "builds a new pattern with one=nil" do + expect(subject.partial_expand(one: nil).pattern).to eq( + "http://example.com/{?two}{&three}/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + context "partial expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/{resource}/{query}/") + end + it "normalizes unicode by default" do + template = subject.partial_expand("query" => "Cafe\u0301") + expect(template.pattern).to eq( + "http://example.com/{resource}/Caf%C3%A9/" + ) + end + + it "does not normalize unicode when byte semantics requested" do + template = subject.partial_expand({"query" => "Cafe\u0301"}, nil, false) + expect(template.pattern).to eq( + "http://example.com/{resource}/Cafe%CC%81/" + ) + end + end + end + describe "Partial expand with strings" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + describe "Expand" do + context "expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/search/{query}/") + end + it "normalizes unicode by default" do + uri = subject.expand("query" => "Cafe\u0301").to_str + expect(uri).to eq("http://example.com/search/Caf%C3%A9/") + end + + it "does not normalize unicode when byte semantics requested" do + uri = subject.expand({ "query" => "Cafe\u0301" }, nil, false).to_str + expect(uri).to eq("http://example.com/search/Cafe%CC%81/") + end + end + context "expand with a processor" do + subject { + Addressable::Template.new("http://example.com/search/{query}/") + } + it "processes spaces" do + expect(subject.expand({"query" => "an example search query"}, + ExampleTwoProcessor).to_str).to eq( + "http://example.com/search/an+example+search+query/" + ) + end + it "validates" do + expect{ + subject.expand({"query" => "Bogus!"}, + ExampleTwoProcessor).to_str + }.to raise_error(Addressable::Template::InvalidTemplateValueError) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + context "Matching with operators" do + describe "Level 1:" do + subject { Addressable::Template.new("foo{foo}/{bar}baz") } + it "can match" do + data = subject.match("foofoo/bananabaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("banana") + end + it "can fail" do + expect(subject.match("bar/foo")).to be_nil + expect(subject.match("foobaz")).to be_nil + end + it "can match empty" do + data = subject.match("foo/baz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 2:" do + subject { Addressable::Template.new("foo{+foo}{#bar}baz") } + it "can match" do + data = subject.match("foo/test/banana#bazbaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("baz") + end + it "can match empty level 2 #" do + data = subject.match("foo/test/bananabaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo/test/banana#baz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("") + end + it "can match empty level 2 +" do + data = subject.match("foobaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo#barbaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 3:" do + context "no operator" do + subject { Addressable::Template.new("foo{foo,bar}baz") } + it "can match" do + data = subject.match("foofoo,barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "+ operator" do + subject { Addressable::Template.new("foo{+foo,bar}baz") } + it "can match" do + data = subject.match("foofoo/bar,barbaz") + expect(data.mapping["bar"]).to eq("foo/bar,bar") + expect(data.mapping["foo"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context ". operator" do + subject { Addressable::Template.new("foo{.foo,bar}baz") } + it "can match" do + data = subject.match("foo.foo.barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "/ operator" do + subject { Addressable::Template.new("foo{/foo,bar}baz") } + it "can match" do + data = subject.match("foo/foo/barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "; operator" do + subject { Addressable::Template.new("foo{;foo,bar,baz}baz") } + it "can match" do + data = subject.match("foo;foo=bar%20baz;bar=foo;bazbaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + expect(data.mapping["baz"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar baz)) + end + end + context "? operator" do + context "test" do + subject { Addressable::Template.new("foo{?foo,bar}baz") } + it "can match" do + data = subject.match("foo?foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.match("/path") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.match("/path?page=1") + expect(data.mapping["page"]).to eq("1") + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.match("/path?per_page=1") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.match("/path?page=2&per_page=1") + expect(data.mapping["page"]).to eq("2") + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + end + + context "issue #71" do + subject { Addressable::Template.new("http://cyberscore.dev/api/users{?username}") } + it "can match" do + data = subject.match("http://cyberscore.dev/api/users?username=foobaz") + expect(data.mapping["username"]).to eq("foobaz") + end + it "lists vars" do + expect(subject.variables).to eq(%w(username)) + expect(subject.keys).to eq(%w(username)) + end + end + end + context "& operator" do + subject { Addressable::Template.new("foo{&foo,bar}baz") } + it "can match" do + data = subject.match("foo&foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + end + end + + context "support regexes:" do + context "EXPRESSION" do + subject { Addressable::Template::EXPRESSION } + it "should be able to match an expression" do + expect(subject).to match("{foo}") + expect(subject).to match("{foo,9}") + expect(subject).to match("{foo.bar,baz}") + expect(subject).to match("{+foo.bar,baz}") + expect(subject).to match("{foo,foo%20bar}") + expect(subject).to match("{#foo:20,baz*}") + expect(subject).to match("stuff{#foo:20,baz*}things") + end + it "should fail on non vars" do + expect(subject).not_to match("!{foo") + expect(subject).not_to match("{foo.bar.}") + expect(subject).not_to match("!{}") + end + end + context "VARNAME" do + subject { Addressable::Template::VARNAME } + it "should be able to match a variable" do + expect(subject).to match("foo") + expect(subject).to match("9") + expect(subject).to match("foo.bar") + expect(subject).to match("foo_bar") + expect(subject).to match("foo_bar.baz") + expect(subject).to match("foo%20bar") + expect(subject).to match("foo%20bar.baz") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("foo.bar.") + expect(subject).not_to match("foo%2%00bar") + expect(subject).not_to match("foo_ba%r") + expect(subject).not_to match("foo_bar*") + expect(subject).not_to match("foo_bar:20") + end + + it 'should parse in a reasonable time' do + expect do + Timeout.timeout(0.1) do + expect(subject).not_to match("0"*25 + "!") + end + end.not_to raise_error + end + end + context "VARIABLE_LIST" do + subject { Addressable::Template::VARIABLE_LIST } + it "should be able to match a variable list" do + expect(subject).to match("foo,bar") + expect(subject).to match("foo") + expect(subject).to match("foo,bar*,baz") + expect(subject).to match("foo.bar,bar_baz*,baz:12") + end + it "should fail on non vars" do + expect(subject).not_to match(",foo,bar*,baz") + expect(subject).not_to match("foo,*bar,baz") + expect(subject).not_to match("foo,,bar*,baz") + end + end + context "VARSPEC" do + subject { Addressable::Template::VARSPEC } + it "should be able to match a variable with modifier" do + expect(subject).to match("9:8") + expect(subject).to match("foo.bar*") + expect(subject).to match("foo_bar:12") + expect(subject).to match("foo_bar.baz*") + expect(subject).to match("foo%20bar:12") + expect(subject).to match("foo%20bar.baz*") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("*foo") + expect(subject).not_to match("fo*o") + expect(subject).not_to match("fo:o") + expect(subject).not_to match("foo:") + end + end + end +end + +describe Addressable::Template::MatchData do + let(:template) { Addressable::Template.new('{foo}/{bar}') } + subject(:its) { template.match('ab/cd') } + its(:uri) { should == Addressable::URI.parse('ab/cd') } + its(:template) { should == template } + its(:mapping) { should == { 'foo' => 'ab', 'bar' => 'cd' } } + its(:variables) { should == ['foo', 'bar'] } + its(:keys) { should == ['foo', 'bar'] } + its(:names) { should == ['foo', 'bar'] } + its(:values) { should == ['ab', 'cd'] } + its(:captures) { should == ['ab', 'cd'] } + its(:to_a) { should == ['ab/cd', 'ab', 'cd'] } + its(:to_s) { should == 'ab/cd' } + its(:string) { should == its.to_s } + its(:pre_match) { should == "" } + its(:post_match) { should == "" } + + describe 'values_at' do + it 'returns an array with the values' do + expect(its.values_at(0, 2)).to eq(['ab/cd', 'cd']) + end + it 'allows mixing integer an string keys' do + expect(its.values_at('foo', 1)).to eq(['ab', 'ab']) + end + it 'accepts unknown keys' do + expect(its.values_at('baz', 'foo')).to eq([nil, 'ab']) + end + end + + describe '[]' do + context 'string key' do + it 'returns the corresponding capture' do + expect(its['foo']).to eq('ab') + expect(its['bar']).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its['baz']).to be_nil + end + end + context 'symbol key' do + it 'returns the corresponding capture' do + expect(its[:foo]).to eq('ab') + expect(its[:bar]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[:baz]).to be_nil + end + end + context 'integer key' do + it 'returns the full URI for index 0' do + expect(its[0]).to eq('ab/cd') + end + it 'returns the corresponding capture' do + expect(its[1]).to eq('ab') + expect(its[2]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[3]).to be_nil + end + end + context 'other key' do + it 'raises an exception' do + expect { its[Object.new] }.to raise_error(TypeError) + end + end + context 'with length' do + it 'returns an array starting at index with given length' do + expect(its[0, 2]).to eq(['ab/cd', 'ab']) + expect(its[2, 1]).to eq(['cd']) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb new file mode 100644 index 0000000..00baaac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb @@ -0,0 +1,6665 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "uri" +require "ipaddr" + +if !"".respond_to?("force_encoding") + class String + def force_encoding(encoding) + @encoding = encoding + end + + def encoding + @encoding ||= Encoding::ASCII_8BIT + end + end + + class Encoding + def initialize(name) + @name = name + end + + def to_s + return @name + end + + UTF_8 = Encoding.new("UTF-8") + ASCII_8BIT = Encoding.new("US-ASCII") + end +end + +module Fake + module URI + class HTTP + def initialize(uri) + @uri = uri + end + + def to_s + return @uri.to_s + end + + alias :to_str :to_s + end + end +end + +describe Addressable::URI, "when created with a non-numeric port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a invalid encoded port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "%eb") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a non-string scheme" do + it "should raise an error" do + expect do + Addressable::URI.new(:scheme => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string password" do + it "should raise an error" do + expect do + Addressable::URI.new(:password => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string userinfo" do + it "should raise an error" do + expect do + Addressable::URI.new(:userinfo => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string authority" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string path" do + it "should raise an error" do + expect do + Addressable::URI.new(:path => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string query" do + it "should raise an error" do + expect do + Addressable::URI.new(:query => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string fragment" do + it "should raise an error" do + expect do + Addressable::URI.new(:fragment => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a scheme but no hierarchical " + + "segment" do + it "should raise an error" do + expect do + Addressable::URI.parse("http:") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "quote handling" do + describe 'in host name' do + it "should raise an error for single quote" do + expect do + Addressable::URI.parse("http://local\"host/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + end +end + +describe Addressable::URI, "newline normalization" do + it "should not accept newlines in scheme" do + expect do + Addressable::URI.parse("ht%0atp://localhost/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not unescape newline in path" do + uri = Addressable::URI.parse("http://localhost/%0a").normalize + expect(uri.to_s).to eq("http://localhost/%0A") + end + + it "should not unescape newline in hostname" do + uri = Addressable::URI.parse("http://local%0ahost/").normalize + expect(uri.to_s).to eq("http://local%0Ahost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://foo%0abar@localhost/").normalize + expect(uri.to_s).to eq("http://foo%0Abar@localhost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://example:foo%0abar@example/").normalize + expect(uri.to_s).to eq("http://example:foo%0Abar@example/") + end + + it "should not accept newline in hostname" do + uri = Addressable::URI.parse("http://localhost/") + expect do + uri.host = "local\nhost" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with ambiguous path" do + it "should raise an error" do + expect do + Addressable::URI.parse("::http") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with an invalid host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => "") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "sub-delims characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::SUB_DELIMS.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "unreserved characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::UNRESERVED.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created from nil components" do + before do + @uri = Addressable::URI.new + end + + it "should have a nil site value" do + expect(@uri.site).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should be an empty uri" do + expect(@uri.to_s).to eq("") + end + + it "should have a nil default port" do + expect(@uri.default_port).to eq(nil) + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should raise an error if the scheme is set to whitespace" do + expect do + @uri.scheme = "\t \n" + end.to raise_error(Addressable::URI::InvalidURIError, /'\t \n'/) + end + + it "should raise an error if the scheme is set to all digits" do + expect do + @uri.scheme = "123" + end.to raise_error(Addressable::URI::InvalidURIError, /'123'/) + end + + it "should raise an error if the scheme begins with a digit" do + expect do + @uri.scheme = "1scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'1scheme'/) + end + + it "should raise an error if the scheme begins with a plus" do + expect do + @uri.scheme = "+scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\+scheme'/) + end + + it "should raise an error if the scheme begins with a dot" do + expect do + @uri.scheme = ".scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\.scheme'/) + end + + it "should raise an error if the scheme begins with a dash" do + expect do + @uri.scheme = "-scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'-scheme'/) + end + + it "should raise an error if the scheme contains an illegal character" do + expect do + @uri.scheme = "scheme!" + end.to raise_error(Addressable::URI::InvalidURIError, /'scheme!'/) + end + + it "should raise an error if the scheme contains whitespace" do + expect do + @uri.scheme = "sch eme" + end.to raise_error(Addressable::URI::InvalidURIError, /'sch eme'/) + end + + it "should raise an error if the scheme contains a newline" do + expect do + @uri.scheme = "sch\neme" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.user = "user" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.password = "pass" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.scheme = "http" + @uri.fragment = "fragment" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.fragment = "fragment" + @uri.scheme = "http" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when initialized from individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", + :user => "user", + :password => "password", + :host => "example.com", + :port => 8080, + :path => "/path", + :query => "query=value", + :fragment => "fragment" + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'com' for #tld" do + expect(@uri.tld).to eq("com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when initialized from " + + "frozen individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http".freeze, + :user => "user".freeze, + :password => "password".freeze, + :host => "example.com".freeze, + :port => "8080".freeze, + :path => "/path".freeze, + :query => "query=value".freeze, + :fragment => "fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a frozen string" do + before do + @uri = Addressable::URI.parse( + "http://user:password@example.com:8080/path?query=value#fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.new.freeze + end + + it "returns nil for #scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "returns nil for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq(nil) + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns nil for #host" do + expect(@uri.host).to eq(nil) + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq(nil) + end + + it "returns nil for #authority" do + expect(@uri.authority).to eq(nil) + end + + it "returns nil for #normalized_authority" do + expect(@uri.normalized_authority).to eq(nil) + end + + it "returns nil for #port" do + expect(@uri.port).to eq(nil) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + end + + it "returns nil for #default_port" do + expect(@uri.default_port).to eq(nil) + end + + it "returns nil for #site" do + expect(@uri.site).to eq(nil) + end + + it "returns nil for #normalized_site" do + expect(@uri.normalized_site).to eq(nil) + end + + it "returns '' for #path" do + expect(@uri.path).to eq('') + end + + it "returns '' for #normalized_path" do + expect(@uri.normalized_path).to eq('') + end + + it "returns nil for #query" do + expect(@uri.query).to eq(nil) + end + + it "returns nil for #normalized_query" do + expect(@uri.normalized_query).to eq(nil) + end + + it "returns nil for #fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "returns nil for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq(nil) + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('') + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.parse( + "HTTP://example.com.:%38%30/%70a%74%68?a=%31#1%323" + ).freeze + end + + it "returns 'HTTP' for #scheme" do + expect(@uri.scheme).to eq("HTTP") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + expect(@uri.normalize.scheme).to eq("http") + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns 'example.com.' for #host" do + expect(@uri.host).to eq("example.com.") + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + expect(@uri.normalize.host).to eq("example.com") + end + + it "returns 'example.com.:80' for #authority" do + expect(@uri.authority).to eq("example.com.:80") + end + + it "returns 'example.com:80' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("example.com") + expect(@uri.normalize.authority).to eq("example.com") + end + + it "returns 80 for #port" do + expect(@uri.port).to eq(80) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + expect(@uri.normalize.port).to eq(nil) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'HTTP://example.com.:80' for #site" do + expect(@uri.site).to eq("HTTP://example.com.:80") + end + + it "returns 'http://example.com' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://example.com") + expect(@uri.normalize.site).to eq("http://example.com") + end + + it "returns '/%70a%74%68' for #path" do + expect(@uri.path).to eq("/%70a%74%68") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + expect(@uri.normalize.path).to eq("/path") + end + + it "returns 'a=%31' for #query" do + expect(@uri.query).to eq("a=%31") + end + + it "returns 'a=1' for #normalized_query" do + expect(@uri.normalized_query).to eq("a=1") + expect(@uri.normalize.query).to eq("a=1") + end + + it "returns '/%70a%74%68?a=%31' for #request_uri" do + expect(@uri.request_uri).to eq("/%70a%74%68?a=%31") + end + + it "returns '1%323' for #fragment" do + expect(@uri.fragment).to eq("1%323") + end + + it "returns '123' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("123") + expect(@uri.normalize.fragment).to eq("123") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('HTTP://example.com.:80/%70a%74%68?a=%31#1%323') + expect(@uri.normalize.to_s).to eq('http://example.com/path?a=1#123') + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when created from string components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com" + ) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should be equal to the equivalent parsed URI" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should raise an error if invalid components omitted" do + expect do + @uri.omit(:bogus) + end.to raise_error(ArgumentError) + expect do + @uri.omit(:scheme, :bogus, :path) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a nil host but " + + "non-nil authority components" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :password => "pass", :port => 80) + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both an authority and a user" do + it "should raise an error" do + expect do + Addressable::URI.new( + :user => "user", :authority => "user@example.com:80" + ) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with an authority and no port" do + before do + @uri = Addressable::URI.new(:authority => "user@example.com") + end + + it "should not infer a port" do + expect(@uri.port).to eq(nil) + expect(@uri.default_port).to eq(nil) + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a site value of '//user@example.com'" do + expect(@uri.site).to eq("//user@example.com") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when created with a host with trailing dots" do + before do + @uri = Addressable::URI.new(:authority => "example...") + end + + it "should have a stable normalized form" do + expect(@uri.normalize.normalize.normalize.host).to eq( + @uri.normalize.host + ) + end +end + +describe Addressable::URI, "when created with a host with a backslash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example\\example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a slash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example/example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a space" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both a userinfo and a user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :userinfo => "user:pass") + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but a host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com", :path => "path" + ) + end + + it "should prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/path")) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should have an origin of 'http://example.com" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but no host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :path => "path" + ) + end + + it "should not prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http:path")) + end + + it "should have a site value of 'http:'" do + expect(@uri.site).to eq("http:") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from an Addressable::URI object" do + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end +end + +describe Addressable::URI, "when parsed from something that looks " + + "like a URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(Fake::URI::HTTP.new("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a standard library URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(URI.parse("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from ''" do + before do + @uri = Addressable::URI.parse("") + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ftp://ftp.is.co.za/rfc/rfc1808.txt'" do + before do + @uri = Addressable::URI.parse("ftp://ftp.is.co.za/rfc/rfc1808.txt") + end + + it "should use the 'ftp' scheme" do + expect(@uri.scheme).to eq("ftp") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'ftp.is.co.za'" do + expect(@uri.host).to eq("ftp.is.co.za") + end + + it "should have inferred_port of 21" do + expect(@uri.inferred_port).to eq(21) + end + + it "should have a path of '/rfc/rfc1808.txt'" do + expect(@uri.path).to eq("/rfc/rfc1808.txt") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'ftp://ftp.is.co.za'" do + expect(@uri.origin).to eq('ftp://ftp.is.co.za') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'http://www.ietf.org/rfc/rfc2396.txt'" do + before do + @uri = Addressable::URI.parse("http://www.ietf.org/rfc/rfc2396.txt") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'www.ietf.org'" do + expect(@uri.host).to eq("www.ietf.org") + end + + it "should have inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/rfc/rfc2396.txt'" do + expect(@uri.path).to eq("/rfc/rfc2396.txt") + end + + it "should have a request URI of '/rfc/rfc2396.txt'" do + expect(@uri.request_uri).to eq("/rfc/rfc2396.txt") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme).to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + expect(@uri.omit(:path).to_s).to eq("http://www.ietf.org") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme) + expect(@uri.to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + end + + it "should have an origin of 'http://www.ietf.org'" do + expect(@uri.origin).to eq('http://www.ietf.org') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ldap://[2001:db8::7]/c=GB?objectClass?one'" do + before do + @uri = Addressable::URI.parse("ldap://[2001:db8::7]/c=GB?objectClass?one") + end + + it "should use the 'ldap' scheme" do + expect(@uri.scheme).to eq("ldap") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of '[2001:db8::7]'" do + expect(@uri.host).to eq("[2001:db8::7]") + end + + it "should have inferred_port of 389" do + expect(@uri.inferred_port).to eq(389) + end + + it "should have a path of '/c=GB'" do + expect(@uri.path).to eq("/c=GB") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should not allow request URI assignment" do + expect do + @uri.request_uri = "/" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have a query of 'objectClass?one'" do + expect(@uri.query).to eq("objectClass?one") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme, :authority).to_s).to eq("/c=GB?objectClass?one") + expect(@uri.omit(:path).to_s).to eq("ldap://[2001:db8::7]?objectClass?one") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme, :authority) + expect(@uri.to_s).to eq("/c=GB?objectClass?one") + end + + it "should raise an error if omission would create an invalid URI" do + expect do + @uri.omit(:authority, :path) + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have an origin of 'ldap://[2001:db8::7]'" do + expect(@uri.origin).to eq('ldap://[2001:db8::7]') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'mailto:John.Doe@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:John.Doe@example.com") + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of 'John.Doe@example.com'" do + expect(@uri.path).to eq("John.Doe@example.com") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 2 of RFC 6068 +describe Addressable::URI, "when parsed from " + + "'mailto:?to=addr1@an.example,addr2@an.example'" do + before do + @uri = Addressable::URI.parse( + "mailto:?to=addr1@an.example,addr2@an.example" + ) + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should have the To: field value parameterized" do + expect(@uri.query_values(Hash)["to"]).to eq( + "addr1@an.example,addr2@an.example" + ) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'news:comp.infosystems.www.servers.unix'" do + before do + @uri = Addressable::URI.parse("news:comp.infosystems.www.servers.unix") + end + + it "should use the 'news' scheme" do + expect(@uri.scheme).to eq("news") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'comp.infosystems.www.servers.unix'" do + expect(@uri.path).to eq("comp.infosystems.www.servers.unix") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'tel:+1-816-555-1212'" do + before do + @uri = Addressable::URI.parse("tel:+1-816-555-1212") + end + + it "should use the 'tel' scheme" do + expect(@uri.scheme).to eq("tel") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of '+1-816-555-1212'" do + expect(@uri.path).to eq("+1-816-555-1212") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'telnet://192.0.2.16:80/'" do + before do + @uri = Addressable::URI.parse("telnet://192.0.2.16:80/") + end + + it "should use the 'telnet' scheme" do + expect(@uri.scheme).to eq("telnet") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of 80" do + expect(@uri.port).to eq(80) + end + + it "should have a inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a default_port of 23" do + expect(@uri.default_port).to eq(23) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'telnet://192.0.2.16:80'" do + expect(@uri.origin).to eq('telnet://192.0.2.16:80') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'urn:oasis:names:specification:docbook:dtd:xml:4.1.2'" do + before do + @uri = Addressable::URI.parse( + "urn:oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should use the 'urn' scheme" do + expect(@uri.scheme).to eq("urn") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'oasis:names:specification:docbook:dtd:xml:4.1.2'" do + expect(@uri.path).to eq("oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when heuristically parsed from " + + "'192.0.2.16:8000/path'" do + before do + @uri = Addressable::URI.heuristic_parse("192.0.2.16:8000/path") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of '8000'" do + expect(@uri.port).to eq(8000) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/path'" do + expect(@uri.path).to eq("/path") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'http://192.0.2.16:8000'" do + expect(@uri.origin).to eq('http://192.0.2.16:8000') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com'" do + before do + @uri = Addressable::URI.parse("http://example.com") + end + + it "when inspected, should have the correct URI" do + expect(@uri.inspect).to include("http://example.com") + end + + it "when inspected, should have the correct class name" do + expect(@uri.inspect).to include("Addressable::URI") + end + + it "when inspected, should have the correct object id" do + expect(@uri.inspect).to include("%#0x" % @uri.object_id) + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should be considered ip-based" do + expect(@uri).to be_ip_based + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not have a specified port" do + expect(@uri.port).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + expect(@uri.query_values).to eq(nil) + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should not be exactly equal to 42" do + expect(@uri.eql?(42)).to eq(false) + end + + it "should not be equal to 42" do + expect(@uri == 42).to eq(false) + end + + it "should not be roughly equal to 42" do + expect(@uri === 42).to eq(false) + end + + it "should be exactly equal to http://example.com" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com") + expect(@uri.join!(@uri).to_s).to eq("http://example.com") + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://EXAMPLE.com after assignment" do + @uri.origin = "http://EXAMPLE.com" + expect(@uri.hash).to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should have a different hash from http://EXAMPLE.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should not allow origin assignment without scheme" do + expect do + @uri.origin = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment without host" do + expect do + @uri.origin = "http://" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment with bogus type" do + expect do + @uri.origin = :bogus + end.to raise_error(TypeError) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equivalent to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should have a route of '/path/' to 'http://example.com/path/'" do + expect(@uri.route_to("http://example.com/path/")).to eq( + Addressable::URI.parse("/path/") + ) + end + + it "should have a route of '..' from 'http://example.com/path/'" do + expect(@uri.route_from("http://example.com/path/")).to eq( + Addressable::URI.parse("..") + ) + end + + it "should have a route of '#' to 'http://example.com/'" do + expect(@uri.route_to("http://example.com/")).to eq( + Addressable::URI.parse("#") + ) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "when joined with 'relative/path' should be " + + "'http://example.com/relative/path'" do + expect(@uri.join('relative/path')).to eq( + Addressable::URI.parse("http://example.com/relative/path") + ) + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + @uri.join(42) + end.to raise_error(TypeError) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct username after assignment" do + @uri.user = "user@123!" + expect(@uri.user).to eq("user@123!") + expect(@uri.normalized_user).to eq("user%40123%21") + expect(@uri.password).to eq(nil) + expect(@uri.normalize.to_s).to eq("http://user%40123%21@example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "#secret@123!" + expect(@uri.password).to eq("#secret@123!") + expect(@uri.normalized_password).to eq("%23secret%40123%21") + expect(@uri.user).to eq("") + expect(@uri.normalize.to_s).to eq("http://:%23secret%40123%21@example.com/") + expect(@uri.omit(:password).to_s).to eq("http://example.com") + end + + it "should have the correct user/pass after repeated assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "" + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.user = nil + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct user/pass after userinfo assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +# Section 5.1.2 of RFC 2616 +describe Addressable::URI, "when parsed from " + + "'HTTP://www.w3.org/pub/WWW/TheProject.html'" do + before do + @uri = Addressable::URI.parse("HTTP://www.w3.org/pub/WWW/TheProject.html") + end + + it "should have the correct request URI" do + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/pub/WWW/TheProject.html?" + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html?") + expect(@uri.path).to eq("/pub/WWW/TheProject.html") + expect(@uri.query).to eq("") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html" + expect(@uri.request_uri).to eq("/some/where/else.html") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq(nil) + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html?query?string" + expect(@uri.request_uri).to eq("/some/where/else.html?query?string") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq("query?string") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "?x=y" + expect(@uri.request_uri).to eq("/?x=y") + expect(@uri.path).to eq("/") + expect(@uri.query).to eq("x=y") + end + + it "should raise an error if the site value is set to something bogus" do + expect do + @uri.site = 42 + end.to raise_error(TypeError) + end + + it "should raise an error if the request URI is set to something bogus" do + expect do + @uri.request_uri = 42 + end.to raise_error(TypeError) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "www.w3.org", + :port => nil, + :path => "/pub/WWW/TheProject.html", + :query => nil, + :fragment => nil + }) + end + + it "should have an origin of 'http://www.w3.org'" do + expect(@uri.origin).to eq('http://www.w3.org') + end +end + +describe Addressable::URI, "when parsing IPv6 addresses" do + it "should not raise an error for " + + "'http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[::1]/'" do + Addressable::URI.parse("http://[::1]/") + end + + it "should not raise an error for " + + "'http://[fe80::1]/'" do + Addressable::URI.parse("http://[fe80::1]/") + end + + it "should raise an error for " + + "'http://[]/'" do + expect do + Addressable::URI.parse("http://[]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPv6 address" do + subject { Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") } + its(:host) { should == '[3ffe:1900:4545:3:200:f8ff:fe21:67cf]' } + its(:hostname) { should == '3ffe:1900:4545:3:200:f8ff:fe21:67cf' } +end + +describe Addressable::URI, "when assigning IPv6 address" do + it "should allow to set bare IPv6 address as hostname" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should allow to set bare IPv6 address as hostname with IPAddr object" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = IPAddr.new('3ffe:1900:4545:3:200:f8ff:fe21:67cf') + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should not allow to set bare IPv6 address as host" do + uri = Addressable::URI.parse("http://[::1]/") + skip "not checked" + expect do + uri.host = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPvFuture addresses" do + it "should not raise an error for " + + "'http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[v12.fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v12.fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[va0.::1]/'" do + Addressable::URI.parse("http://[va0.::1]/") + end + + it "should not raise an error for " + + "'http://[v255.fe80::1]/'" do + Addressable::URI.parse("http://[v255.fe80::1]/") + end + + it "should raise an error for " + + "'http://[v0.]/'" do + expect do + Addressable::URI.parse("http://[v0.]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/'" do + before do + @uri = Addressable::URI.parse("http://example.com/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to HTTP://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("HTTP://example.com/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://Example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://Example.com/")) + end + + it "should have the correct username after assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have the same hash as its duplicate" do + expect(@uri.hash).to eq(@uri.dup.hash) + end + + it "should have a different hash from its equivalent String value" do + expect(@uri.hash).not_to eq(@uri.to_s.hash) + end + + it "should have the same hash as an equal URI" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com/" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should have the same hash as http://example.com after assignment" do + @uri.path = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query_values = {} + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/# after assignment" do + @uri.fragment = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/#").hash) + end + + it "should have a different hash from http://example.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com?#'" do + before do + @uri = Addressable::URI.parse("http://example.com?#") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => "", + :fragment => "" + }) + end + + it "should have a request URI of '/?'" do + expect(@uri.request_uri).to eq("/?") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq("http://example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://@example.com/'" do + before do + @uri = Addressable::URI.parse("http://@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com./'" do + before do + @uri = Addressable::URI.parse("http://example.com./") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com/'" do + before do + @uri = Addressable::URI.parse("http://:@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => "", + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'HTTP://EXAMPLE.COM/'" do + before do + @uri = Addressable::URI.parse("HTTP://EXAMPLE.COM/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "EXAMPLE.COM", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.co.uk/'" do + before do + @uri = Addressable::URI.parse("http://www.example.co.uk/") + end + + it "should have an origin of 'http://www.example.co.uk'" do + expect(@uri.origin).to eq('http://www.example.co.uk') + end + + it "should have a tld of 'co.uk'" do + expect(@uri.tld).to eq('co.uk') + end + + it "should have a domain of 'example.co.uk'" do + expect(@uri.domain).to eq('example.co.uk') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://sub_domain.blogspot.com/'" do + before do + @uri = Addressable::URI.parse("http://sub_domain.blogspot.com/") + end + + it "should have an origin of 'http://sub_domain.blogspot.com'" do + expect(@uri.origin).to eq('http://sub_domain.blogspot.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end + + it "should have a domain of 'blogspot.com'" do + expect(@uri.domain).to eq('blogspot.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/~smith/'" do + before do + @uri = Addressable::URI.parse("http://example.com/~smith/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7Esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7Esmith/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7esmith/")) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%E8'" do + before do + @uri = Addressable::URI.parse("http://example.com/%E8") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%E8" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%E8" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path%2Fsegment/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path%2Fsegment/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be equal to 'http://example.com/path%2Fsegment/'" do + expect(@uri.normalize).to be_eql( + Addressable::URI.parse("http://example.com/path%2Fsegment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri).not_to eq( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri.normalize).not_to be_eql( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/?%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/?%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/?%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/#%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/#%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/#%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/#%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%C3%87'" do + before do + @uri = Addressable::URI.parse("http://example.com/%C3%87") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to 'http://example.com/C%CC%A7'" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/C%CC%A7")) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%C3%87" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%C3%87" + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.normalized_encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri).to_s).to eq( + "http://example.com/%25C3%2587" + ) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri, Addressable::URI)).to eq( + Addressable::URI.parse("http://example.com/%25C3%2587") + ) + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=string'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=string") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have a query string of 'q=string'" do + expect(@uri.query).to eq("q=string") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:80/'" do + before do + @uri = Addressable::URI.parse("http://example.com:80/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:80'" do + expect(@uri.authority).to eq("example.com:80") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have explicit port 80" do + expect(@uri.port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:80/" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com:80/"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com:80/") + expect(@uri.join!(@uri).to_s).to eq("http://example.com:80/") + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equal to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 80, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:80/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:80/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:8080/'" do + before do + @uri = Addressable::URI.parse("http://example.com:8080/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:8080'" do + expect(@uri.authority).to eq("example.com:8080") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 8080" do + expect(@uri.inferred_port).to eq(8080) + end + + it "should have explicit port 8080" do + expect(@uri.port).to eq(8080) + end + + it "should have default port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com:8080/"))).to eq(true) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of '../../' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 8080, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com:8080'" do + expect(@uri.origin).to eq('http://example.com:8080') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:8080/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:8080/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:%38%30/'" do + before do + @uri = Addressable::URI.parse("http://example.com:%38%30/") + end + + it "should have the correct port" do + expect(@uri.port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%2E/'" do + before do + @uri = Addressable::URI.parse("http://example.com/%2E/") + end + + it "should be considered to be in normal form" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + @uri.normalize.should be_eql(@uri) + end + + it "should normalize to 'http://example.com/%2E/'" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + expect(@uri.normalize).to eq("http://example.com/%2E/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/../..'" do + before do + @uri = Addressable::URI.parse("http://example.com/../..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'/..//example.com'" do + before do + @uri = Addressable::URI.parse("/..//example.com") + end + + it "should become invalid when normalized" do + expect do + @uri.normalize + end.to raise_error(Addressable::URI::InvalidURIError, /authority/) + end + + it "should have a path of '/..//example.com'" do + expect(@uri.path).to eq("/..//example.com") + end +end + +describe Addressable::URI, "when parsed from '/a/b/c/./../../g'" do + before do + @uri = Addressable::URI.parse("/a/b/c/./../../g") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to '/a/g'" do + expect(@uri.normalize.to_s).to eq("/a/g") + end +end + +describe Addressable::URI, "when parsed from 'mid/content=5/../6'" do + before do + @uri = Addressable::URI.parse("mid/content=5/../6") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to 'mid/6'" do + expect(@uri.normalize.to_s).to eq("mid/6") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.com///../'" do + before do + @uri = Addressable::URI.parse('http://www.example.com///../') + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://www.example.com//'" do + expect(@uri.normalize.to_s).to eq("http://www.example.com//") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path/to/resource/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path/to/resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource/'" do + expect(@uri.path).to eq("/path/to/resource/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com/path/to/resource/"))).to eq(true) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of '../' from " + + "'http://example.com/path/to/resource/sub'" do + expect(@uri.route_from("http://example.com/path/to/resource/sub")).to eq( + Addressable::URI.parse("../") + ) + end + + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/another'" do + expect(@uri.route_from("http://example.com/path/to/another")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/res'" do + expect(@uri.route_from("http://example.com/path/to/res")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of '../../path/to/resource/' from " + + "'http://example.com/to/resource/'" do + expect(@uri.route_from("http://example.com/to/resource/")).to eq( + Addressable::URI.parse("../../path/to/resource/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/path/to/resource/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative/path/to/resource'" do + before do + @uri = Addressable::URI.parse("relative/path/to/resource") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative/path/to/resource'" do + expect(@uri.path).to eq("relative/path/to/resource") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + end + + it "when joined with 'another/relative/path' should be " + + "'relative/path/to/another/relative/path'" do + expect(@uri.join('another/relative/path')).to eq( + Addressable::URI.parse("relative/path/to/another/relative/path") + ) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative_path_with_no_slashes'" do + before do + @uri = Addressable::URI.parse("relative_path_with_no_slashes") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative_path_with_no_slashes'" do + expect(@uri.path).to eq("relative_path_with_no_slashes") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "when joined with 'another_relative_path' should be " + + "'another_relative_path'" do + expect(@uri.join('another_relative_path')).to eq( + Addressable::URI.parse("another_relative_path") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt'" do + expect(@uri.path).to eq("/file.txt") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;parameter'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;parameter") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;parameter'" do + expect(@uri.path).to eq("/file.txt;parameter") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;x=y'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;x=y") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;x=y'" do + expect(@uri.path).to eq("/file.txt;x=y") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'svn+ssh://developername@rubyforge.org/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "svn+ssh://developername@rubyforge.org/var/svn/project" + ) + end + + it "should have a scheme of 'svn+ssh'" do + expect(@uri.scheme).to eq("svn+ssh") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'ssh+svn://developername@RUBYFORGE.ORG/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "ssh+svn://developername@RUBYFORGE.ORG/var/svn/project" + ) + end + + it "should have a scheme of 'ssh+svn'" do + expect(@uri.scheme).to eq("ssh+svn") + end + + it "should have a normalized scheme of 'svn+ssh'" do + expect(@uri.normalized_scheme).to eq("svn+ssh") + end + + it "should have a normalized site of 'svn+ssh'" do + expect(@uri.normalized_site).to eq("svn+ssh://developername@rubyforge.org") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'mailto:user@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:user@example.com") + end + + it "should have a scheme of 'mailto'" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'user@example.com'" do + expect(@uri.path).to eq("user@example.com") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'tag:example.com,2006-08-18:/path/to/something'" do + before do + @uri = Addressable::URI.parse( + "tag:example.com,2006-08-18:/path/to/something") + end + + it "should have a scheme of 'tag'" do + expect(@uri.scheme).to eq("tag") + end + + it "should be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'example.com,2006-08-18:/path/to/something'" do + expect(@uri.path).to eq("example.com,2006-08-18:/path/to/something") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/x;y/'" do + before do + @uri = Addressable::URI.parse("http://example.com/x;y/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?x=1&y=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?x=1&y=2") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'view-source:http://example.com/'" do + before do + @uri = Addressable::URI.parse("view-source:http://example.com/") + end + + it "should have a scheme of 'view-source'" do + expect(@uri.scheme).to eq("view-source") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + before do + @uri = Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'user:pass@example.com'" do + expect(@uri.authority).to eq("user:pass@example.com") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource'" do + expect(@uri.path).to eq("/path/to/resource") + end + + it "should have a query string of 'query=x'" do + expect(@uri.query).to eq("query=x") + end + + it "should have a fragment of 'fragment'" do + expect(@uri.fragment).to eq("fragment") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a route of '../../' to " + + "'http://user:pass@example.com/path/'" do + expect(@uri.route_to("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'to/resource?query=x#fragment' " + + "from 'http://user:pass@example.com/path/'" do + expect(@uri.route_from("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("to/resource?query=x#fragment") + ) + end + + it "should have a route of '?query=x#fragment' " + + "from 'http://user:pass@example.com/path/to/resource'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/resource")).to eq( + Addressable::URI.parse("?query=x#fragment") + ) + end + + it "should have a route of '#fragment' " + + "from 'http://user:pass@example.com/path/to/resource?query=x'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x")).to eq( + Addressable::URI.parse("#fragment") + ) + end + + it "should have a route of '#fragment' from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + )).to eq(Addressable::URI.parse("#fragment")) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "should have a route of " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment' " + + "from 'http://example.com/path/to/'" do + expect(@uri.route_from("http://elsewhere.com/path/to/")).to eq( + Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + ) + end + + it "should have the correct scheme after assignment" do + @uri.scheme = "ftp" + expect(@uri.scheme).to eq("ftp") + expect(@uri.to_s).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + expect(@uri.to_str).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct site segment after assignment" do + @uri.site = "https://newuser:newpass@example.com:443" + expect(@uri.scheme).to eq("https") + expect(@uri.authority).to eq("newuser:newpass@example.com:443") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(443) + expect(@uri.inferred_port).to eq(443) + expect(@uri.to_s).to eq( + "https://newuser:newpass@example.com:443" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:newpass@example.com:80" + expect(@uri.authority).to eq("newuser:newpass@example.com:80") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(80) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com:80" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.authority).to eq("newuser:newpass@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.authority).to eq("newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.authority).to eq("user:newpass@example.com") + end + + it "should have the correct host after assignment" do + @uri.host = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should have the correct host after assignment" do + @uri.hostname = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.hostname).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should raise an error if assigning a bogus object to the hostname" do + expect do + @uri.hostname = Object.new + end.to raise_error(TypeError) + end + + it "should have the correct port after assignment" do + @uri.port = 8080 + expect(@uri.port).to eq(8080) + expect(@uri.authority).to eq("user:pass@example.com:8080") + end + + it "should have the correct origin after assignment" do + @uri.origin = "http://newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("newexample.com") + end + + it "should have the correct path after assignment" do + @uri.path = "/newpath/to/resource" + expect(@uri.path).to eq("/newpath/to/resource") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/newpath/to/resource?query=x#fragment" + ) + end + + it "should have the correct scheme and authority after nil assignment" do + @uri.site = nil + expect(@uri.scheme).to eq(nil) + expect(@uri.authority).to eq(nil) + expect(@uri.to_s).to eq("/path/to/resource?query=x#fragment") + end + + it "should have the correct scheme and authority after assignment" do + @uri.site = "file://" + expect(@uri.scheme).to eq("file") + expect(@uri.authority).to eq("") + expect(@uri.to_s).to eq("file:///path/to/resource?query=x#fragment") + end + + it "should have the correct path after nil assignment" do + @uri.path = nil + expect(@uri.path).to eq("") + expect(@uri.to_s).to eq( + "http://user:pass@example.com?query=x#fragment" + ) + end + + it "should have the correct query string after assignment" do + @uri.query = "newquery=x" + expect(@uri.query).to eq("newquery=x") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?newquery=x#fragment" + ) + @uri.query = nil + expect(@uri.query).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource#fragment" + ) + end + + it "should have the correct query string after hash assignment" do + @uri.query_values = {"?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther"} + expect(@uri.query.split("&")).to include("%3Fuestion%20mark=%3Dsign") + expect(@uri.query.split("&")).to include("hello=g%C3%BCnther") + expect(@uri.query_values).to eq({ + "?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther" + }) + end + + it "should have the correct query string after flag hash assignment" do + @uri.query_values = {'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil} + expect(@uri.query.split("&")).to include("flag%3F1") + expect(@uri.query.split("&")).to include("fl%3Dag2") + expect(@uri.query.split("&")).to include("flag3") + expect(@uri.query_values(Array).sort).to eq([["fl=ag2"], ["flag3"], ["flag?1"]]) + expect(@uri.query_values(Hash)).to eq({ + 'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil + }) + end + + it "should raise an error if query values are set to a bogus type" do + expect do + @uri.query_values = "bogus" + end.to raise_error(TypeError) + end + + it "should have the correct fragment after assignment" do + @uri.fragment = "newfragment" + expect(@uri.fragment).to eq("newfragment") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + + @uri.fragment = nil + expect(@uri.fragment).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => "newfragment").to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => nil).to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => "newuser:newpass").to_s).to eq( + "http://newuser:newpass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => nil).to_s).to eq( + "http://example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:path => "newpath").to_s).to eq( + "http://user:pass@example.com/newpath?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:port => "42", :path => "newpath", :query => "").to_s).to eq( + "http://user:pass@example.com:42/newpath?#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:authority => "foo:bar@baz:42").to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + # Ensure the operation was not destructive + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a destructive merge" do + @uri.merge!(:authority => "foo:bar@baz:42") + # Ensure the operation was destructive + expect(@uri.to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:authority => "bar@baz:bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge(42) + end.to raise_error(TypeError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge("http://example.com/") + end.to raise_error(TypeError) + end + + it "should fail to merge with both authority and subcomponents" do + expect do + @uri.merge(:authority => "foo:bar@baz:42", :port => "42") + end.to raise_error(ArgumentError) + end + + it "should fail to merge with both userinfo and subcomponents" do + expect do + @uri.merge(:userinfo => "foo:bar", :user => "foo") + end.to raise_error(ArgumentError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/search?q=Q%26A'" do + + before do + @uri = Addressable::URI.parse("http://example.com/search?q=Q%26A") + end + + it "should have a query of 'q=Q%26A'" do + expect(@uri.query).to eq("q=Q%26A") + end + + it "should have query_values of {'q' => 'Q&A'}" do + expect(@uri.query_values).to eq({ 'q' => 'Q&A' }) + end + + it "should normalize to the original uri " + + "(with the ampersand properly percent-encoded)" do + expect(@uri.normalize.to_s).to eq("http://example.com/search?q=Q%26A") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&x=b") + end + + it "should have a query of '&x=b'" do + expect(@uri.query).to eq("&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='one;two'&x=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q='one;two'&x=1") + end + + it "should have a query of 'q='one;two'&x=1'" do + expect(@uri.query).to eq("q='one;two'&x=1") + end + + it "should have query_values of {\"q\" => \"'one;two'\", \"x\" => \"1\"}" do + expect(@uri.query_values).to eq({"q" => "'one;two'", "x" => "1"}) + end + + it "should escape the ';' character when normalizing to avoid ambiguity " + + "with the W3C HTML 4.01 specification" do + # HTML 4.01 Section B.2.2 + expect(@uri.normalize.query).to eq("q='one%3Btwo'&x=1") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&&x=b") + end + + it "should have a query of '&&x=b'" do + expect(@uri.query).to eq("&&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a&&x=b") + end + + it "should have a query of 'q=a&&x=b'" do + expect(@uri.query).to eq("q=a&&x=b") + end + + it "should have query_values of {'q' => 'a, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => 'a', 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q&&x=b") + end + + it "should have a query of 'q&&x=b'" do + expect(@uri.query).to eq("q&&x=b") + end + + it "should have query_values of {'q' => true, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => nil, 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a+b") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq({'q' => 'a b'}) + end + + it "should have a normalized query of 'q=a+b'" do + expect(@uri.normalized_query).to eq("q=a+b") + end +end + +describe Addressable::URI, "when parsed from 'https://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("https://example.com/?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'example.com?q=a+b'" do + before do + @uri = Addressable::URI.parse("example.com?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'mailto:?q=a+b'" do + before do + @uri = Addressable::URI.parse("mailto:?q=a+b") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq("q" => "a+b") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a%2bb'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a%2bb") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a%2bb") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq({'q' => 'a+b'}) + end + + it "should have a normalized query of 'q=a%2Bb'" do + expect(@uri.normalized_query).to eq("q=a%2Bb") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=%2B&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=%2B&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=+&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=+&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?b=1&a=2&c=3'" do + before do + @uri = Addressable::URI.parse("http://example/?b=1&a=2&c=3") + end + + it "should have a sorted normalized query of 'a=2&b=1&c=3'" do + expect(@uri.normalized_query(:sorted)).to eq("a=2&b=1&c=3") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?&a&&c&'" do + before do + @uri = Addressable::URI.parse("http://example/?&a&&c&") + end + + it "should have a compacted normalized query of 'a&c'" do + expect(@uri.normalized_query(:compacted)).to eq("a&c") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=1") + end + + it "should have a compacted normalized query of 'a=1'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=2") + end + + it "should have a compacted normalized query of 'a=1&a=2'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1&a=2") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/sound%2bvision'" do + before do + @uri = Addressable::URI.parse("http://example.com/sound%2bvision") + end + + it "should have a normalized path of '/sound+vision'" do + expect(@uri.normalized_path).to eq('/sound+vision') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=") + end + + it "should have a query of 'q='" do + expect(@uri.query).to eq("q=") + end + + it "should have query_values of {'q' => ''}" do + expect(@uri.query_values).to eq({'q' => ''}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user@example.com'" do + before do + @uri = Addressable::URI.parse("http://user@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should have a userinfo of 'user'" do + expect(@uri.userinfo).to eq("user") + end + + it "should have a normalized userinfo of 'user'" do + expect(@uri.normalized_userinfo).to eq("user") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have default_port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:newpass@example.com") + end + + it "should have the correct userinfo segment after nil assignment" do + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser@example.com" + expect(@uri.authority).to eq("newuser@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should raise an error after nil assignment of authority segment" do + expect do + # This would create an invalid URI + @uri.authority = nil + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:@example.com'" do + before do + @uri = Addressable::URI.parse("http://user:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of 'user:'" do + expect(@uri.normalized_userinfo).to eq("user:") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:@example.com" + expect(@uri.authority).to eq("newuser:@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:pass@example.com'" do + before do + @uri = Addressable::URI.parse("http://:pass@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a userinfo of ':pass'" do + expect(@uri.userinfo).to eq(":pass") + end + + it "should have a normalized userinfo of ':pass'" do + expect(@uri.normalized_userinfo).to eq(":pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("pass") + expect(@uri.to_s).to eq("http://newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":newpass@example.com" + expect(@uri.authority).to eq(":newpass@example.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:newpass@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com'" do + before do + @uri = Addressable::URI.parse("http://:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of nil" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":@newexample.com" + expect(@uri.authority).to eq(":@newexample.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("newexample.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:@newexample.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'#example'" do + before do + @uri = Addressable::URI.parse("#example") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of nil" do + expect(@uri.host).to eq(nil) + end + + it "should have a site of nil" do + expect(@uri.site).to eq(nil) + end + + it "should have a normalized_site of nil" do + expect(@uri.normalized_site).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a query string of nil" do + expect(@uri.query).to eq(nil) + end + + it "should have a fragment of 'example'" do + expect(@uri.fragment).to eq("example") + end +end + +describe Addressable::URI, "when parsed from " + + "the network-path reference '//example.com/'" do + before do + @uri = Addressable::URI.parse("//example.com/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'feed://http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed://http://example.com/") + end + + it "should have a host of 'http'" do + expect(@uri.host).to eq("http") + end + + it "should have a path of '//example.com/'" do + expect(@uri.path).to eq("//example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'feed:http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed:http://example.com/") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + expect(@uri.normalize!.to_s).to eq("http://example.com/") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'example://a/b/c/%7Bfoo%7D'" do + before do + @uri = Addressable::URI.parse("example://a/b/c/%7Bfoo%7D") + end + + # Section 6.2.2 of RFC 3986 + it "should be equivalent to eXAMPLE://a/./b/../b/%63/%7bfoo%7d" do + expect(@uri).to eq( + Addressable::URI.parse("eXAMPLE://a/./b/../b/%63/%7bfoo%7d") + ) + end + + it "should have an origin of 'example://a'" do + expect(@uri.origin).to eq('example://a') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/indirect/path/./to/../resource/'" do + before do + @uri = Addressable::URI.parse( + "http://example.com/indirect/path/./to/../resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/indirect/path/./to/../resource/'" do + expect(@uri.path).to eq("/indirect/path/./to/../resource/") + end + + # Section 6.2.2.3 of RFC 3986 + it "should have a normalized path of '/indirect/path/resource/'" do + expect(@uri.normalize.path).to eq("/indirect/path/resource/") + expect(@uri.normalize!.path).to eq("/indirect/path/resource/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://under_score.example.com/'" do + it "should not cause an error" do + expect do + Addressable::URI.parse("http://under_score.example.com/") + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from " + + "'./this:that'" do + before do + @uri = Addressable::URI.parse("./this:that") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'this:that'" do + before do + @uri = Addressable::URI.parse("this:that") + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should have a scheme of 'this'" do + expect(@uri.scheme).to eq("this") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?'" do + before do + @uri = Addressable::URI.parse("?") + end + + it "should normalize to ''" do + expect(@uri.normalize.to_s).to eq("") + end + + it "should have the correct return type" do + expect(@uri.query_values).to eq({}) + expect(@uri.query_values(Hash)).to eq({}) + expect(@uri.query_values(Array)).to eq([]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1&two=2&three=3'" do + before do + @uri = Addressable::URI.parse("?one=1&two=2&three=3") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1", "two" => "2", "three" => "3"}) + end + + it "should raise an error for invalid return type values" do + expect do + @uri.query_values(Integer) + end.to raise_error(ArgumentError) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1"], ["two", "2"], ["three", "3"] + ]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1=uno&two=2=dos'" do + before do + @uri = Addressable::URI.parse("?one=1=uno&two=2=dos") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1=uno", "two" => "2=dos"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1=uno"], ["two", "2=dos"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one[two][three]=four'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one[two][three]" => "four"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one.two.three=four'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three]=four&one[two][five]=six'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four&one[two][five]=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three]" => "four", "one[two][five]" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"], ["one[two][five]", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one.two.three=four&one.two.five=six'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four&one.two.five=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four", "one.two.five" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"], ["one.two.five", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one=two&one=three'" do + before do + @uri = Addressable::URI.parse( + "?one=two&one=three&one=four" + ) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq( + [['one', 'two'], ['one', 'three'], ['one', 'four']] + ) + end + + it "should have correct hash query values" do + skip("This is probably more desirable behavior.") + expect(@uri.query_values(Hash)).to eq( + {'one' => ['two', 'three', 'four']} + ) + end + + it "should handle assignment with keys of mixed type" do + @uri.query_values = @uri.query_values(Hash).merge({:one => 'three'}) + expect(@uri.query_values(Hash)).to eq({'one' => 'three'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][]=four&one[two][three][]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][]=four&one[two][three][]=five" + ) + end + + it "should have correct query values" do + expect(@uri.query_values(Hash)).to eq({"one[two][three][]" => "five"}) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three][]", "four"], ["one[two][three][]", "five"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][0]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][0]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][0]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][1]=four&one[two][three][0]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][1]=four&one[two][three][0]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][1]" => "four", "one[two][three][0]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][2]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][2]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][2]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/") + end + + it "should be equivalent to 'http://www.xn--8ws00zhy3a.com/'" do + expect(@uri).to eq( + Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.詹姆斯.com/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/ some spaces /'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/ some spaces /") + end + + it "should be equivalent to " + + "'http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/'" do + expect(@uri).to eq( + Addressable::URI.parse( + "http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.詹姆斯.com/%20some%20spaces%20/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.xn--8ws00zhy3a.com/'" do + before do + @uri = Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + end + + it "should be displayed as http://www.詹姆斯.com/" do + expect(@uri.display_uri.to_s).to eq("http://www.詹姆斯.com/") + end + + it "should properly force the encoding" do + display_string = @uri.display_uri.to_str + expect(display_string).to eq("http://www.詹姆斯.com/") + if display_string.respond_to?(:encoding) + expect(display_string.encoding.to_s).to eq(Encoding::UTF_8.to_s) + end + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/atomtests/iri/詹.html'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/atomtests/iri/詹.html") + end + + it "should normalize to " + + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + end +end + +describe Addressable::URI, "when parsed from a percent-encoded IRI" do + before do + @uri = Addressable::URI.parse( + "http://www.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA" + + "%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3" + + "%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82" + + "%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0" + + "%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3" + + "%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp" + ) + end + + it "should normalize to something sane" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + end + + it "should have the correct origin" do + expect(@uri.origin).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end +end + +describe Addressable::URI, "with a base uri of 'http://a/b/c/d;p?q'" do + before do + @uri = Addressable::URI.parse("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g:h' should resolve to g:h" do + expect((@uri + "g:h").to_s).to eq("g:h") + expect(Addressable::URI.join(@uri, "g:h").to_s).to eq("g:h") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g' should resolve to http://a/b/c/g" do + expect((@uri + "g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './g' should resolve to http://a/b/c/g" do + expect((@uri + "./g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "./g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g/' should resolve to http://a/b/c/g/" do + expect((@uri + "g/").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "g/").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '/g' should resolve to http://a/g" do + expect((@uri + "/g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/g").to_s).to eq("http://a/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '//g' should resolve to http://g" do + expect((@uri + "//g").to_s).to eq("http://g") + expect(Addressable::URI.join(@uri.to_s, "//g").to_s).to eq("http://g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '?y' should resolve to http://a/b/c/d;p?y" do + expect((@uri + "?y").to_s).to eq("http://a/b/c/d;p?y") + expect(Addressable::URI.join(@uri.to_s, "?y").to_s).to eq("http://a/b/c/d;p?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y' should resolve to http://a/b/c/g?y" do + expect((@uri + "g?y").to_s).to eq("http://a/b/c/g?y") + expect(Addressable::URI.join(@uri.to_s, "g?y").to_s).to eq("http://a/b/c/g?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '#s' should resolve to http://a/b/c/d;p?q#s" do + expect((@uri + "#s").to_s).to eq("http://a/b/c/d;p?q#s") + expect(Addressable::URI.join(@uri.to_s, "#s").to_s).to eq( + "http://a/b/c/d;p?q#s" + ) + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g#s' should resolve to http://a/b/c/g#s" do + expect((@uri + "g#s").to_s).to eq("http://a/b/c/g#s") + expect(Addressable::URI.join(@uri.to_s, "g#s").to_s).to eq("http://a/b/c/g#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y#s' should resolve to http://a/b/c/g?y#s" do + expect((@uri + "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with ';x' should resolve to http://a/b/c/;x" do + expect((@uri + ";x").to_s).to eq("http://a/b/c/;x") + expect(Addressable::URI.join(@uri.to_s, ";x").to_s).to eq("http://a/b/c/;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x' should resolve to http://a/b/c/g;x" do + expect((@uri + "g;x").to_s).to eq("http://a/b/c/g;x") + expect(Addressable::URI.join(@uri.to_s, "g;x").to_s).to eq("http://a/b/c/g;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x?y#s' should resolve to http://a/b/c/g;x?y#s" do + expect((@uri + "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '' should resolve to http://a/b/c/d;p?q" do + expect((@uri + "").to_s).to eq("http://a/b/c/d;p?q") + expect(Addressable::URI.join(@uri.to_s, "").to_s).to eq("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '.' should resolve to http://a/b/c/" do + expect((@uri + ".").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, ".").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './' should resolve to http://a/b/c/" do + expect((@uri + "./").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, "./").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '..' should resolve to http://a/b/" do + expect((@uri + "..").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "..").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../' should resolve to http://a/b/" do + expect((@uri + "../").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "../").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../g' should resolve to http://a/b/g" do + expect((@uri + "../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../..' should resolve to http://a/" do + expect((@uri + "../..").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../..").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../' should resolve to http://a/" do + expect((@uri + "../../").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../../").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../g' should resolve to http://a/g" do + expect((@uri + "../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../g' should resolve to http://a/g" do + expect((@uri + "../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../../g").to_s).to eq("http://a/g") + end + + it "when joined with '../.././../g' should resolve to http://a/g" do + expect((@uri + "../.././../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../.././../g").to_s).to eq( + "http://a/g" + ) + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../../g' should resolve to http://a/g" do + expect((@uri + "../../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join( + @uri.to_s, "../../../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/./g' should resolve to http://a/g" do + expect((@uri + "/./g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/./g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/../g' should resolve to http://a/g" do + expect((@uri + "/../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g.' should resolve to http://a/b/c/g." do + expect((@uri + "g.").to_s).to eq("http://a/b/c/g.") + expect(Addressable::URI.join(@uri.to_s, "g.").to_s).to eq("http://a/b/c/g.") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '.g' should resolve to http://a/b/c/.g" do + expect((@uri + ".g").to_s).to eq("http://a/b/c/.g") + expect(Addressable::URI.join(@uri.to_s, ".g").to_s).to eq("http://a/b/c/.g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g..' should resolve to http://a/b/c/g.." do + expect((@uri + "g..").to_s).to eq("http://a/b/c/g..") + expect(Addressable::URI.join(@uri.to_s, "g..").to_s).to eq("http://a/b/c/g..") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '..g' should resolve to http://a/b/c/..g" do + expect((@uri + "..g").to_s).to eq("http://a/b/c/..g") + expect(Addressable::URI.join(@uri.to_s, "..g").to_s).to eq("http://a/b/c/..g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './../g' should resolve to http://a/b/g" do + expect((@uri + "./../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "./../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './g/.' should resolve to http://a/b/c/g/" do + expect((@uri + "./g/.").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "./g/.").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/./h' should resolve to http://a/b/c/g/h" do + expect((@uri + "g/./h").to_s).to eq("http://a/b/c/g/h") + expect(Addressable::URI.join(@uri.to_s, "g/./h").to_s).to eq("http://a/b/c/g/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/../h' should resolve to http://a/b/c/h" do + expect((@uri + "g/../h").to_s).to eq("http://a/b/c/h") + expect(Addressable::URI.join(@uri.to_s, "g/../h").to_s).to eq("http://a/b/c/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/./y' " + + "should resolve to http://a/b/c/g;x=1/y" do + expect((@uri + "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/../y' should resolve to http://a/b/c/y" do + expect((@uri + "g;x=1/../y").to_s).to eq("http://a/b/c/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/../y").to_s).to eq("http://a/b/c/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/./x' " + + "should resolve to http://a/b/c/g?y/./x" do + expect((@uri + "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/../x' " + + "should resolve to http://a/b/c/g?y/../x" do + expect((@uri + "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/./x' " + + "should resolve to http://a/b/c/g#s/./x" do + expect((@uri + "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/../x' " + + "should resolve to http://a/b/c/g#s/../x" do + expect((@uri + "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'http:g' should resolve to http:g" do + expect((@uri + "http:g").to_s).to eq("http:g") + expect(Addressable::URI.join(@uri.to_s, "http:g").to_s).to eq("http:g") + end + + # Edge case to be sure + it "when joined with '//example.com/' should " + + "resolve to http://example.com/" do + expect((@uri + "//example.com/").to_s).to eq("http://example.com/") + expect(Addressable::URI.join( + @uri.to_s, "//example.com/").to_s).to eq("http://example.com/") + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + Addressable::URI.join(@uri, 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when converting the path " + + "'relative/path/to/something'" do + before do + @path = 'relative/path/to/something' + end + + it "should convert to " + + "\'relative/path/to/something\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("relative/path/to/something") + end + + it "should join with an absolute file path correctly" do + @base = Addressable::URI.convert_path("/absolute/path/") + @uri = Addressable::URI.convert_path(@path) + expect((@base + @uri).to_str).to eq( + "file:///absolute/path/relative/path/to/something" + ) + end +end + +describe Addressable::URI, "when converting a bogus path" do + it "should raise a TypeError" do + expect do + Addressable::URI.convert_path(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given a UNIX root directory" do + before do + @path = "/" + end + + it "should convert to \'file:///\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given a Windows root directory" do + before do + @path = "C:\\" + end + + it "should convert to \'file:///c:/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path '/one/two/'" do + before do + @path = '/one/two/' + end + + it "should convert to " + + "\'file:///one/two/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///one/two/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the tld " do + it "'uk' should have a tld of 'uk'" do + uri = Addressable::URI.parse("http://example.com") + uri.tld = "uk" + + expect(uri.tld).to eq("uk") + end + + context "which " do + let (:uri) { Addressable::URI.parse("http://www.comrade.net/path/to/source/") } + + it "contains a subdomain" do + uri.tld = "co.uk" + + expect(uri.to_s).to eq("http://www.comrade.co.uk/path/to/source/") + end + + it "is part of the domain" do + uri.tld = "com" + + expect(uri.to_s).to eq("http://www.comrade.com/path/to/source/") + end + end +end + +describe Addressable::URI, "when given the path " + + "'c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file://c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file://c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:/c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:/c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:///c|/windows/My%20Documents%20100%20/foo.txt'" do + before do + @path = "file:///c|/windows/My%20Documents%20100%20/foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given an http protocol URI" do + before do + @path = "http://example.com/" + end + + it "should not do any conversion at all" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("http://example.com/") + end +end + +class SuperString + def initialize(string) + @string = string.to_s + end + + def to_str + return @string + end +end + +describe Addressable::URI, "when parsing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.parse(42) + end.to raise_error(TypeError) + end + + it "should correctly parse heuristically anything with a 'to_str' method" do + Addressable::URI.heuristic_parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.heuristic_parse(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when form encoding a hash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["&one", "/1"], ["=two", "?2"], [":three", "#3"]] + )).to eq("%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => "one two three"} + )).to eq("q=one+two+three") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"key" => nil} + )).to eq("key=") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => ["one", "two", "three"]} + )).to eq("q=one&q=two&q=three") + end + + it "should result in correctly encoded newlines" do + expect(Addressable::URI.form_encode( + {"text" => "one\ntwo\rthree\r\nfour\n\r"} + )).to eq("text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A") + end + + it "should result in a sorted percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["a", "1"], ["dup", "3"], ["dup", "2"]], true + )).to eq("a=1&dup=2&dup=3") + end +end + +describe Addressable::URI, "when form encoding a non-Array object" do + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_encode(42) + end.to raise_error(TypeError) + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form encoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_encode( + {"value" => " %&+£€"} + )).to eq("value=+%25%26%2B%C2%A3%E2%82%AC") + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form unencoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "value=+%25%26%2B%C2%A3%E2%82%AC" + )).to eq([["value", " %&+£€"]]) + end +end + +describe Addressable::URI, "when form unencoding a string" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233" + )).to eq([["&one", "/1"], ["=two", "?2"], [":three", "#3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "q=one+two+three" + )).to eq([["q", "one two three"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A" + )).to eq([["text", "one\ntwo\nthree\nfour\n\n"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "a=1&dup=2&dup=3" + )).to eq([["a", "1"], ["dup", "2"], ["dup", "3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "key" + )).to eq([["key", nil]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode("GivenName=Ren%C3%A9")).to eq( + [["GivenName", "René"]] + ) + end +end + +describe Addressable::URI, "when form unencoding a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.form_unencode(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_unencode(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.normalize_component(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component("component", 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a path with an encoded slash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.parse("/path%2Fsegment/").normalize.path).to eq( + "/path%2Fsegment/" + ) + end +end + +describe Addressable::URI, "when normalizing a partially encoded string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially % encoded%21" + )).to eq("partially%20%25%20encoded!") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially %25 encoded!" + )).to eq("partially%20%25%20encoded!") + end +end + +describe Addressable::URI, "when normalizing a unicode sequence" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/C%CC%A7" + )).to eq("/%C3%87") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/%C3%87" + )).to eq("/%C3%87") + end +end + +describe Addressable::URI, "when normalizing a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("günther")).to eq( + "g%C3%BCnther" + ) + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("g%C3%BCnther")).to eq( + "g%C3%BCnther" + ) + end +end + +describe Addressable::URI, "when normalizing a string but leaving some characters encoded" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("%58X%59Y%5AZ", "0-9a-zXY", "Y")).to eq( + "XX%59Y%5A%5A" + ) + end + + it "should not modify the character class" do + character_class = "0-9a-zXY" + + character_class_copy = character_class.dup + + Addressable::URI.normalize_component("%58X%59Y%5AZ", character_class, "Y") + + expect(character_class).to eq(character_class_copy) + end +end + +describe Addressable::URI, "when encoding IP literals" do + it "should work for IPv4" do + input = "http://127.0.0.1/" + expect(Addressable::URI.encode(input)).to eq(input) + end + + it "should work for IPv6" do + input = "http://[fe80::200:f8ff:fe21:67cf]/" + expect(Addressable::URI.encode(input)).to eq(input) + end +end + +describe Addressable::URI, "when encoding a string with existing encodings to upcase" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("JK%4c", "0-9A-IKM-Za-z%", "L")).to eq("%4AK%4C") + end +end + +describe Addressable::URI, "when encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("günther")).to eq("g%C3%BCnther") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "günther", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("g%C3%BCnther") + end +end + +describe Addressable::URI, "when form encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode({"GivenName" => "René"})).to eq( + "GivenName=Ren%C3%A9" + ) + end +end + +describe Addressable::URI, "when encoding a string with ASCII chars 0-15" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("one\ntwo")).to eq("one%0Atwo") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "one\ntwo", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("one%0Atwo") + end +end + +describe Addressable::URI, "when unencoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.unencode_component("g%C3%BCnther")).to eq("günther") + end + + it "should consistently use UTF-8 internally" do + expect(Addressable::URI.unencode_component("ski=%BA%DAɫ")).to eq("ski=\xBA\xDAɫ") + end + + it "should result in correct percent encoded sequence as a URI" do + expect(Addressable::URI.unencode( + "/path?g%C3%BCnther", ::Addressable::URI + )).to eq(Addressable::URI.new( + :path => "/path", :query => "günther" + )) + end +end + +describe Addressable::URI, "when partially unencoding a string" do + it "should unencode all characters by default" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String)).to eq('%%~~++') + end + + it "should unencode characters not in leave_encoded" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '~')).to eq('%%~%7e++') + end + + it "should leave characters in leave_encoded alone" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '%~+')).to eq('%%25~%7e+%2b') + end +end + +describe Addressable::URI, "when unencoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.unencode_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.unencode("/path?g%C3%BCnther", Integer) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when encoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.normalized_encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component("günther", Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component(Object.new) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/'" do + before do + @input = "http://example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should not raise error when frozen" do + expect do + Addressable::URI.heuristic_parse(@input).freeze.to_s + end.not_to raise_error + end +end + +describe Addressable::URI, "when given the input " + + "'https://example.com/'" do + before do + @input = "https://example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http:example.com/'" do + before do + @input = "http:example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should heuristically parse to 'http://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'https:example.com/'" do + before do + @input = "https:example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end + + it "should heuristically parse to 'https://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/example.com/'" do + before do + @input = "http://example.com/example.com/" + end + + it "should heuristically parse to 'http://example.com/example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix\\.example.com/'" do + before do + @input = "http://prefix\\.example.com/" + end + + it "should heuristically parse to 'http://prefix/.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix") + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end + + it "should heuristically parse to 'http://prefix/.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p:\\/'" do + before do + @input = "http://p:\\/" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://'" do + before do + @input = "http://p://" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://p'" do + before do + @input = "http://p://p" + end + + it "should heuristically parse to 'http://p//p'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//p") + end + + it "should heuristically parse to 'http://p//p' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//p") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix .example.com/'" do + before do + @input = "http://prefix .example.com/" + end + + # Justification here being that no browser actually tries to resolve this. + # They all treat this as a web search. + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%20.example.com") + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end + + it "should heuristically parse to 'http://prefix%20.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "' http://www.example.com/ '" do + before do + @input = " http://www.example.com/ " + end + + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.scheme).to eq("http") + expect(@uri.path).to eq("/") + expect(@uri.to_s).to eq("http://www.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix%2F.example.com/'" do + before do + @input = "http://prefix%2F.example.com/" + end + + it "should heuristically parse to 'http://prefix%2F.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%2F.example.com") + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end + + it "should heuristically parse to 'http://prefix%2F.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'/path/to/resource'" do + before do + @input = "/path/to/resource" + end + + it "should heuristically parse to '/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'relative/path/to/resource'" do + before do + @input = "relative/path/to/resource" + end + + it "should heuristically parse to 'relative/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("relative/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com'" do + before do + @input = "example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com' and a scheme hint of 'ftp'" do + before do + @input = "example.com" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com:21' and a scheme hint of 'ftp'" do + before do + @input = "example.com:21" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com:21'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com:21") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com/path/to/resource'" do + before do + @input = "example.com/path/to/resource" + end + + it "should heuristically parse to 'http://example.com/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'http:///example.com'" do + before do + @input = "http:///example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input which "\ + "start with digits and has specified port" do + before do + @input = "7777.example.org:8089" + end + + it "should heuristically parse to 'http://7777.example.org:8089'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("http://7777.example.org:8089") + end +end + +describe Addressable::URI, "when given the input " + + "'feed:///example.com'" do + before do + @input = "feed:///example.com" + end + + it "should heuristically parse to 'feed://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'file://localhost/path/to/resource/'" do + before do + @input = "file://localhost/path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://path/to/resource/'" do + before do + @input = "file://path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://///path/to/resource/'" do + before do + @input = "file:///////path/to/resource/" + end + + it "should heuristically parse to 'file:////path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:////path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'feed://http://example.com'" do + before do + @input = "feed://http://example.com" + end + + it "should heuristically parse to 'feed:http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed:http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "::URI.parse('http://example.com')" do + before do + @input = ::URI.parse('http://example.com') + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input: 'user@domain.com'" do + before do + @input = "user@domain.com" + end + + context "for heuristic parse" do + it "should remain 'mailto:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("mailto:#{@input}") + expect(uri.to_s).to eq("mailto:user@domain.com") + end + + it "should have a scheme of 'mailto'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("mailto:user@domain.com") + expect(uri.scheme).to eq("mailto") + end + + it "should remain 'acct:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("acct:#{@input}") + expect(uri.to_s).to eq("acct:user@domain.com") + end + + context "HTTP" do + before do + @uri = Addressable::URI.heuristic_parse("http://#{@input}/") + end + + it "should remain 'http://user@domain.com/'" do + expect(@uri.to_s).to eq("http://user@domain.com/") + end + + it "should have the username 'user' for HTTP basic authentication" do + expect(@uri.user).to eq("user") + end + end + end +end + +describe Addressable::URI, "when assigning query values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign {:a => 'a', :b => ['c', 'd', 'e']}" do + @uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + expect(@uri.query).to eq("a=a&b=c&b=d&b=e") + end + + it "should raise an error attempting to assign {'a' => {'b' => ['c']}}" do + expect do + @uri.query_values = { 'a' => {'b' => ['c'] } } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:b => '2', :a => {:c => '1'}}" do + expect do + @uri.query_values = {:b => '2', :a => {:c => '1'}} + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => 'c', :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => "a", :b => [{:c => "c", :d => "d"}, {:e => "e", :f => "f"}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => true, :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => 'a', :b => [{:c => true, :d => 'd'}, {:e => 'e', :f => 'f'}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should correctly assign {:a => 1, :b => 1.5}" do + @uri.query_values = { :a => 1, :b => 1.5 } + expect(@uri.query).to eq("a=1&b=1.5") + end + + it "should raise an error attempting to assign " + + "{:z => 1, :f => [2, {999.1 => [3,'4']}, ['h', 'i']], " + + ":a => {:b => ['c', 'd'], :e => true, :y => 0.5}}" do + expect do + @uri.query_values = { + :z => 1, + :f => [ 2, {999.1 => [3,'4']}, ['h', 'i'] ], + :a => { :b => ['c', 'd'], :e => true, :y => 0.5 } + } + end.to raise_error(TypeError) + end + + it "should correctly assign {}" do + @uri.query_values = {} + expect(@uri.query).to eq('') + end + + it "should correctly assign nil" do + @uri.query_values = nil + expect(@uri.query).to eq(nil) + end + + it "should correctly sort {'ab' => 'c', :ab => 'a', :a => 'x'}" do + @uri.query_values = {'ab' => 'c', :ab => 'a', :a => 'x'} + expect(@uri.query).to eq("a=x&ab=a&ab=c") + end + + it "should correctly assign " + + "[['b', 'c'], ['b', 'a'], ['a', 'a']]" do + # Order can be guaranteed in this format, so preserve it. + @uri.query_values = [['b', 'c'], ['b', 'a'], ['a', 'a']] + expect(@uri.query).to eq("b=c&b=a&a=a") + end + + it "should preserve query string order" do + query_string = (('a'..'z').to_a.reverse.map { |e| "#{e}=#{e}" }).join("&") + @uri.query = query_string + original_uri = @uri.to_s + @uri.query_values = @uri.query_values(Array) + expect(@uri.to_s).to eq(original_uri) + end + + describe 'when a hash with mixed types is assigned to query_values' do + it 'should not raise an error' do + skip 'Issue #94' + expect { subject.query_values = { "page" => "1", :page => 2 } }.to_not raise_error + end + end +end + +describe Addressable::URI, "when assigning path values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + expect(@uri.path).to eq("acct:bob@sporkmonger.com") + expect(@uri.normalize.to_str).to eq("acct%2Fbob@sporkmonger.com") + expect { @uri.to_s }.to raise_error( + Addressable::URI::InvalidURIError + ) + end + + it "should correctly assign paths containing colons" do + @uri.path = "/acct:bob@sporkmonger.com" + @uri.authority = "example.com" + expect(@uri.normalize.to_str).to eq("//example.com/acct:bob@sporkmonger.com") + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "something" + expect(@uri.normalize.to_str).to eq("something:acct:bob@sporkmonger.com") + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.scheme = "http" + @uri.host = "example.com" + @uri.path = "acct:bob@sporkmonger.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "http" + @uri.host = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "uuid:0b3ecf60-3f93-11df-a9c3-001f5bfffe12" + @uri.scheme = "urn" + end.not_to raise_error + end +end + +describe Addressable::URI, "when initializing a subclass of Addressable::URI" do + before do + @uri = Class.new(Addressable::URI).new + end + + it "should have the same class after being parsed" do + expect(@uri.class).to eq(Addressable::URI.parse(@uri).class) + end + + it "should have the same class as its duplicate" do + expect(@uri.class).to eq(@uri.dup.class) + end + + it "should have the same class after being normalized" do + expect(@uri.class).to eq(@uri.normalize.class) + end + + it "should have the same class after being merged" do + expect(@uri.class).to eq(@uri.merge(:path => 'path').class) + end + + it "should have the same class after being joined" do + expect(@uri.class).to eq(@uri.join('path').class) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/spec_helper.rb new file mode 100644 index 0000000..bd8e395 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/spec/spec_helper.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'bundler/setup' +require 'rspec/its' + +begin + require 'coveralls' + Coveralls.wear! do + add_filter "spec/" + add_filter "vendor/" + end +rescue LoadError + warn "warning: coveralls gem not found; skipping Coveralls" + require 'simplecov' + SimpleCov.start do + add_filter "spec/" + add_filter "vendor/" + end +end if Gem.loaded_specs.key?("simplecov") + +class TestHelper + def self.native_supported? + mri = RUBY_ENGINE == "ruby" + windows = RUBY_PLATFORM.include?("mingw") + + mri && !windows + end +end + +RSpec.configure do |config| + config.warnings = true + config.filter_run_when_matching :focus +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/clobber.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/clobber.rake new file mode 100644 index 0000000..a9e32b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/clobber.rake @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +desc "Remove all build products" +task "clobber" diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/gem.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/gem.rake new file mode 100644 index 0000000..1f793ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/gem.rake @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +require "rubygems/package_task" + +namespace :gem do + GEM_SPEC = Gem::Specification.new do |s| + s.name = PKG_NAME + s.version = PKG_VERSION + s.summary = PKG_SUMMARY + s.description = PKG_DESCRIPTION + + s.files = PKG_FILES.to_a + + s.extra_rdoc_files = %w( README.md ) + s.rdoc_options.concat ["--main", "README.md"] + + if !s.respond_to?(:add_development_dependency) + puts "Cannot build Gem with this version of RubyGems." + exit(1) + end + + s.required_ruby_version = ">= 2.0" + + s.add_runtime_dependency "public_suffix", ">= 2.0.2", "< 5.0" + s.add_development_dependency "bundler", ">= 1.0", "< 3.0" + + s.require_path = "lib" + + s.author = "Bob Aman" + s.email = "bob@sporkmonger.com" + s.homepage = "https://github.com/sporkmonger/addressable" + s.license = "Apache-2.0" + end + + Gem::PackageTask.new(GEM_SPEC) do |p| + p.gem_spec = GEM_SPEC + p.need_tar = true + p.need_zip = true + end + + desc "Generates .gemspec file" + task :gemspec do + spec_string = GEM_SPEC.to_ruby + File.open("#{GEM_SPEC.name}.gemspec", "w") do |file| + file.write spec_string + end + end + + desc "Show information about the gem" + task :debug do + puts GEM_SPEC.to_ruby + end + + desc "Install the gem" + task :install => ["clobber", "gem:package"] do + sh "#{SUDO} gem install --local pkg/#{GEM_SPEC.full_name}" + end + + desc "Uninstall the gem" + task :uninstall do + installed_list = Gem.source_index.find_name(PKG_NAME) + if installed_list && + (installed_list.collect { |s| s.version.to_s}.include?(PKG_VERSION)) + sh( + "#{SUDO} gem uninstall --version '#{PKG_VERSION}' " + + "--ignore-dependencies --executables #{PKG_NAME}" + ) + end + end + + desc "Reinstall the gem" + task :reinstall => [:uninstall, :install] + + desc "Package for release" + task :release => ["gem:package", "gem:gemspec"] do |t| + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PROJ.version}" if v != PKG_VERSION + pkg = "pkg/#{GEM_SPEC.full_name}" + + changelog = File.open("CHANGELOG.md") { |file| file.read } + + puts "Releasing #{PKG_NAME} v. #{PKG_VERSION}" + Rake::Task["git:tag:create"].invoke + end +end + +desc "Alias to gem:package" +task "gem" => "gem:package" + +task "gem:release" => "gem:gemspec" + +task "clobber" => ["gem:clobber_package"] diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/git.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/git.rake new file mode 100644 index 0000000..1238c8d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/git.rake @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +namespace :git do + namespace :tag do + desc "List tags from the Git repository" + task :list do + tags = `git tag -l` + tags.gsub!("\r", "") + tags = tags.split("\n").sort {|a, b| b <=> a } + puts tags.join("\n") + end + + desc "Create a new tag in the Git repository" + task :create do + changelog = File.open("CHANGELOG.md", "r") { |file| file.read } + puts "-" * 80 + puts changelog + puts "-" * 80 + puts + + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PKG_VERSION}" if v != PKG_VERSION + + git_status = `git status` + if git_status !~ /^nothing to commit/ + abort "Working directory isn't clean." + end + + tag = "#{PKG_NAME}-#{PKG_VERSION}" + msg = "Release #{PKG_NAME}-#{PKG_VERSION}" + + existing_tags = `git tag -l #{PKG_NAME}-*`.split('\n') + if existing_tags.include?(tag) + warn("Tag already exists, deleting...") + unless system "git tag -d #{tag}" + abort "Tag deletion failed." + end + end + puts "Creating git tag '#{tag}'..." + unless system "git tag -a -m \"#{msg}\" #{tag}" + abort "Tag creation failed." + end + end + end +end + +task "gem:release" => "git:tag:create" diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/metrics.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/metrics.rake new file mode 100644 index 0000000..107cc24 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/metrics.rake @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +namespace :metrics do + task :lines do + lines, codelines, total_lines, total_codelines = 0, 0, 0, 0 + for file_name in FileList["lib/**/*.rb"] + f = File.open(file_name) + while line = f.gets + lines += 1 + next if line =~ /^\s*$/ + next if line =~ /^\s*#/ + codelines += 1 + end + puts "L: #{sprintf("%4d", lines)}, " + + "LOC #{sprintf("%4d", codelines)} | #{file_name}" + total_lines += lines + total_codelines += codelines + + lines, codelines = 0, 0 + end + + puts "Total: Lines #{total_lines}, LOC #{total_codelines}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/profile.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/profile.rake new file mode 100644 index 0000000..b697d48 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/profile.rake @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +namespace :profile do + desc "Profile Template match memory allocations" + task :template_match_memory do + require "memory_profiler" + require "addressable/template" + + start_at = Time.now.to_f + template = Addressable::Template.new("http://example.com/{?one,two,three}") + report = MemoryProfiler.report do + 30_000.times do + template.match( + "http://example.com/?one=one&two=floo&three=me" + ) + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end + + desc "Profile URI parse memory allocations" + task :memory do + require "memory_profiler" + require "addressable/uri" + if ENV["IDNA_MODE"] == "pure" + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + start_at = Time.now.to_f + report = MemoryProfiler.report do + 30_000.times do + Addressable::URI.parse( + "http://google.com/stuff/../?with_lots=of¶ms=asdff#!stuff" + ).normalize + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(**print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/rspec.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/rspec.rake new file mode 100644 index 0000000..e3d9f01 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/rspec.rake @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require "rspec/core/rake_task" + +namespace :spec do + RSpec::Core::RakeTask.new(:simplecov) do |t| + t.pattern = FileList['spec/**/*_spec.rb'] + t.rspec_opts = %w[--color --format documentation] unless ENV["CI"] + end + + namespace :simplecov do + desc "Browse the code coverage report." + task :browse => "spec:simplecov" do + require "launchy" + Launchy.open("coverage/index.html") + end + end +end + +desc "Alias to spec:simplecov" +task "spec" => "spec:simplecov" + +task "clobber" => ["spec:clobber_simplecov"] diff --git a/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/yard.rake b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/yard.rake new file mode 100644 index 0000000..515f960 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/addressable-2.8.0/tasks/yard.rake @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "rake" + +begin + require "yard" + require "yard/rake/yardoc_task" + + namespace :doc do + desc "Generate Yardoc documentation" + YARD::Rake::YardocTask.new do |yardoc| + yardoc.name = "yard" + yardoc.options = ["--verbose", "--markup", "markdown"] + yardoc.files = FileList[ + "lib/**/*.rb", "ext/**/*.c", + "README.md", "CHANGELOG.md", "LICENSE.txt" + ].exclude(/idna/) + end + end + + task "clobber" => ["doc:clobber_yard"] + + desc "Alias to doc:yard" + task "doc" => "doc:yard" +rescue LoadError + # If yard isn't available, it's not the end of the world + desc "Alias to doc:rdoc" + task "doc" => "doc:rdoc" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.rspec b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.rspec new file mode 100644 index 0000000..397921f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.rspec @@ -0,0 +1,2 @@ +--color +--format d diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.travis.yml new file mode 100644 index 0000000..4c090ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/.travis.yml @@ -0,0 +1,33 @@ +language: ruby + +branches: + only: + - master + +rvm: +- 1.9.3 +- 2.0 +- 2.1 +- 2.2 +- 2.3 +- 2.4 +- 2.5 +- jruby +- rbx-3 + +matrix: + allow_failures: + - rvm: rbx-3 + - rvm: jruby + include: + - rvm: 1.8.7 + dist: precise + + +cache: bundler + +before_script: + - wget https://alg.li/algolia-keys && chmod +x algolia-keys + +script: + - if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [[ ! "$TRAVIS_PULL_REQUEST_SLUG" =~ ^algolia\/ ]]; then eval $(./algolia-keys export) && bundle exec rspec --tag ~maintainers_only; else bundle exec rspec; fi diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/CHANGELOG.md new file mode 100755 index 0000000..26969f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/CHANGELOG.md @@ -0,0 +1,454 @@ +# ChangeLog + +## Unreleased + +## [1.27.4](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.3...1.27.4) (2020-09-16) + +**Fixed** + +* Retrieve all objects when using `copy_index` from `AccountClient` class ([399](https://github.com/algolia/algoliasearch-client-ruby/pull/399)) + +## [1.27.3](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.2...1.27.3) (2020-06-03) + +**Fixed** + +* Replace expired certificate within embedded certificate chain ([9087dd1](https://github.com/algolia/algoliasearch-client-ruby/commit/9087dd14a97bf77c9391a3360c4803edf686086d)) + +## [1.27.2](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.1...1.27.2) (2020-04-28) + +**Fixed** + +* In `search_user_id`, retrieve param `cluster` instead of `clusterName`. [368](https://github.com/algolia/algoliasearch-client-ruby/issues/368) + +## [1.27.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.0...1.27.1) (2019-09-26) + +**Fixed** + +* Update `Algolia::Index.exists` method to `Algolia::Index.exists?`. [364](https://github.com/algolia/algoliasearch-client-ruby/issues/364) + +## [1.27.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.27.0) (2019-09-16) + +**Added** + +* Introduce `Algolia::Index.exists` method. [358](https://github.com/algolia/algoliasearch-client-ruby/issues/358) + + Check whether an index exists or not. + +* Introduce `Algolia::Index.find_object` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Find object by the given condition. + +* Introduce `Algolia::Index.get_object_position` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Retrieve the given object position in a set of results. + +* Introduce `Algolia.get_secured_api_key_remaining_validity` method. [361](https://github.com/algolia/algoliasearch-client-ruby/issues/361) + + Returns the remaining validity time for the given API key in seconds. + + +## [1.26.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.26.0...1.26.1) (2019-07-31) + +### Chore + +- stop using coveralls because of a GPL-licensed transitive dep ([d2fbe8c](https://github.com/algolia/algoliasearch-client-ruby/commit/d2fbe8c)) + + +[1.26.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.26.0) (2019-02-12) + +**Added** + +* Introduce `Algolia.restore_api_key` method. + + If you delete your API key by mistake, you can now restore it via + this new method. This especially useful if this key is used in a + mobile app or somewhere you can't update easily. + + +## [1.25.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.2) (2018-12-19) + +## [1.25.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.1) (2018-12-19) + +**Fixed** + +* Missing `insights.rb` in gemspec - [7d2f3ab](https://github.com/algolia/algoliasearch-client-ruby/commit/7d2f3abe6e4338f0f7364f6f52ac1d371f066464) + +## [1.25.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.0) (2018-12-19) + +**Added** + +* Introduce Insights client to send events to Algolia Insights API - [326](https://github.com/algolia/algoliasearch-client-ruby/issues/326) + +* Add `multiple_get_objects` to retrieve objects by objectID across multiple indices - [329](https://github.com/algolia/algoliasearch-client-ruby/issues/329) + +**Modified** + +* Use the correct `hitsPerPage` key when exporting index resources - [319](https://github.com/algolia/algoliasearch-client-ruby/issues/319) + +## [1.24.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.24.0) (2018-11-28) + +* Feat(adds-account-client-copy-index): adds `copy_index` to account client ([#324](https://github.com/algolia/algoliasearch-client-ruby/pull/324)) +* Feat(replace-all-methods): adds `replace_all_rules`, `replace_all_objects` and `replace_all_synonyms` to search index ([#323](https://github.com/algolia/algoliasearch-client-ruby/pull/323)) +* Feat(scoped-copy-methods): adds `copy_settings`, `copy_synonyms` and `copy_rules` to search client ([#322](https://github.com/algolia/algoliasearch-client-ruby/pull/322)) + +## [1.23.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.2) (2018-06-19) + +* Fix(analytics): gem without new analytics class (#306) + +## [1.23.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.0) (2018-06-19) + +* Feat(analytics): introduce new analytics class +* Chore(rake): use unshift to keep compat with older ruby versions +* Ruby version must be after client version in ua +* Fix ua tests with new format +* Rewrite ua +* Feat(ua): add ruby version +* Fix(syntax): this isn't php +* Tests(mcm): use unique userid everytime +* Tests(mcm): introduce auto_retry for more stable tests +* Feat(client): expose waittask in the client (#302) +* Fix(travis): always test on the latest patches (#295) + +## [1.22.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.22.0) (2018-05-30) + +* Rename license file (#297) +* Fix release task (#294) +* Introduce multi cluster management (#285) +* Fix(browse): ensure cursor is passed in the body (#288) +* Chore(md): update contribution-related files (#293) + +## [1.21.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.21.0) (2018-05-24) + +* Fix(tests): fix warning for unspecified exception (#287) +* Fix release task missing github link (#291) +* Api review (#292) + +## [1.20.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.1) (2018-05-15) + +* Fix changelog link in gemspec (#290) +* Utils: move to changelog.md and add rake task for release (#289) + +## [1.20.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.0) (2018-05-07) + +* Feat: deprecate api keys methods on index in favor of client ones (#286) +* Chore(gemfile): remove useless dependencies (#280) +* Fix(env): adding default env var (#279) +* Chore(travis): test against Rubinius 3 (#281) +* Fix: prevent saving a rule with an empty `objectID` (#283) + +## [1.19.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.2) (2018-04-03) + +* Fix `Algolia.delete_index` wrong number of arguments (#277) + +## [1.19.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.1) (2017-12-18) + +* Fix hard dependency on `hashdiff` (#262) + +## [1.19.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.0) (2017-12-15) + +* Add request options to any method using API calls (#213) +* Add `export_synonyms` index method (#260) +* Add `export_rules` index method (#261) + +## [1.18.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.5) (2017-12-07) + +* Fix missing requirement + +## [1.18.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.4) (2017-12-06) + +* Remove remaining unnecessary requirements (#256) +* Remove Gemfile.lock (#257) + +## [1.18.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.3) (2017-12-04) + +* Remove Bundler and RubyGems requirements (#253) + +## [1.18.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.2) (2017-11-28) + +* Add (undocumented) gzip option to disable gzip (#240) + +## [1.18.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.1) (2017-11-15) + +* Fix `get_logs` always returning type `all` (#244) +* New scopes to `copy_index` method (#243) + +## [1.18.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.0) (2017-11-02) + +* Allow to reuse the webmocks using `Algolia::WebMock.mock!` (#256) + +## [1.17.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.17.0) (2017-10-10) + +* New `delete_by` method + +## [1.16.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.16.0) (2017-09-14) + +* New Query Rules API + +## [1.15.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.1) (2017-08-17) + +* Fixed regression introduced in 1.15.0 + +## [1.15.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.0) (2017-08-17) + +* Make `delete_by_query` not `wait_task` by default (also, make it mockable) +* Add a new `delete_by_query!` doing the last `wait_task` + +## [1.14.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.14.0) (2017-07-31) + +* Ability to override the underlying user-agent + +## [1.13.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.13.0) (2017-03-17) + +* Add a `index.get_task_status(taskID)` method (#199) + +## [1.12.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.7) (2017-03-01) + +* Renamed all `*_user_key` methods to `*_api_key` + +## [1.12.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.6) (2017-01-25) + +* Upgraded `httpclient` to 2.8.3 + +## [1.12.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.5) (2016-12-07) + +* Fixed retry strategy not keeping the `current_host` first (#163) + +## [1.12.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.4) (2016-12-07) + +* Fix DNS tests + +## [1.12.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.3) (2016-12-06) + +* Allow for multiple clients on different app ids on the same thread + +## [1.12.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.2) (2016-12-05) + +* Fix client scoped methods + +## [1.12.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.1) (2016-11-25) + +* Rename `search_facet` to `search_for_facet_values` + +## [1.12.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.0) (2016-10-31) + +* Add `search_facet` + +## [1.11.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.11.0) (2016-08-21) + +* Upgraded to httpclient 2.8.1 to avoid reseting the keep-alive while changing timeouts + +## [1.10.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.10.0) (2016-07-11) + +* `{get,set}_settings` now take optional custom query parameters + +## [1.9.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.9.0) (2016-06-17) + +* New synonyms API + +## [1.8.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.1) (2016-04-14) + +* Ensure we're using an absolute path for the `ca-bundle.crt` file (could fix some `OpenSSL::X509::StoreError: system lib` errors) + +## [1.8.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.0) (2016-04-06) + +* Upgraded to `httpclient` 2.7.1 (includes ruby 2.3.0 deprecation fixes) +* Upgraded WebMock URLs + +## [1.7.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.7.0) (2016-01-09) + +* New `generate_secured_api_key` embedding the filters in the resulting key + +## [1.6.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.1) (2015-08-01) + +* Search queries are now using POST requests + +## [1.6.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.0) (2015-07-19) + +* Ability to instantiate multiple API clients in the same process (was using a class variable before). + +## [1.5.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.1) (2015-07-14) + +* Ability to retrieve a single page from a cursor with `browse_from` + +## [1.5.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.0) (2015-06-05) + +* New cursor-based `browse()` implementation taking query parameters + +## [1.4.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.3) (2015-05-27) + +* Do not call `WebMock.disable!` in the helper + +## [1.4.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4,2) (2015-05-04) + +* Add new methods to add/update api key +* Add batch method to target multiple indices +* Add strategy parameter for the multipleQueries +* Add new method to generate secured api key from query parameters + +## [1.4.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.1) (2015-04-10) + +* Force the default connect/read/write/search/batch timeouts to Algolia-specific values + +## [1.4.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.0) (2015-03-17) + +* High-available DNS: search queries are now targeting `APPID-DSN.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. +* High-available DNS: Indexing queries are targeting `APPID.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. + +## [1.3.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.1) (2014-11-29) + +* Fixed wrong deployed version (1.3.0 was based on 1.2.13 instead of 1.2.14) + +## [1.3.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.0) (2014-11-29) + +* Use `algolia.net` domain instead of `algolia.io` + +## [1.2.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.14) (2014-11-10) + +* Force the underlying `httpclient` dependency to be >= 2.4 in the gemspec as well +* Ability to force the SSL version + +## [1.2.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.13) (2014-10-22) + +* Fix the loop on hosts to retry when the http code is different than 200, 201, 400, 403, 404 + +## [1.2.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.12) (2014-10-08) + +* Upgrade to `httpclient` 2.4 +* Do not reset the timeout on each requests + +## [1.2.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.11) (2014-09-14) + +* Ability to update API keys + +## [1.2.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.10) (2014-08-22) + +* Using Digest to remove "Digest::Digest is deprecated; Use Digest" warning (author: @dglancy) + +## [1.2.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.9) (2014-07-10) + +* Expose `connect_timeout`, `receive_timeout` and `send_timeout` +* Add new `delete_by_query` method to delete all objects matching a specific query +* Add new `get_objects` method to retrieve a list of objects from a single API call +* Add a helper to perform disjunctive faceting + +## [1.2.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.8) (2014-03-27) + +* Catch all exceptions before retrying with another host + +## [1.2.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.7) (2014-03-24) + +* Ruby 1.8 compatibility + +## [1.2.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.6) (2014-03-19) + +* Raise an exception if no `APPLICATION_ID` is provided +* Ability to get last API call errors +* Ability to send multiple queries using a single API call +* Secured API keys generation is now based on secured HMAC-SHA256 + +## [1.2.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.5) (2014-02-24) + +* Ability to generate secured API key from a list of tags + optional `user_token` +* Ability to specify a list of indexes targeted by the user key + +## [1.2.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.4) (2014-02-21) + +* Add `delete_objects` + +## [1.2.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.3) (2014-02-10) + +* `add_object`: POST request if `objectID` is `nil` OR `empty` + +## [1.2.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.2) (2014-01-11) + +* Expose `batch` requests + +## [1.2.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.1) (2014-01-07) + +* Removed `jeweler` since it doesn't support platform specific deps (see https://github.com/technicalpickles/jeweler/issues/170) + +## [1.2.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.0) (2014-01-07) + +* Removed `curb` dependency and switched on `httpclient` to avoid fork-safety issue (see issue #5) + +## [1.1.18](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.18) (2014-01-06) + +* Fixed batch request builder (broken since last refactoring) + +## [1.1.17](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.17) (2014-01-02) + +* Ability to use IP rate limit behind a proxy forwarding the end-user's IP +* Add documentation for the new `attributeForDistinct` setting and `distinct` search parameter + +## [1.1.16](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.16) (2013-12-16) + +* Add arguments type-checking +* Normalize save_object/partial_update/add_object signatures +* Force dependencies versions + +## [1.1.15](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.15) (2013-12-16) + +* Embed ca-bundle.crt + +## [1.1.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.14) (2013-12-11) + +* Added `index.add_user_key(acls, validity, rate_limit, maxApiCalls)`` + +## [1.1.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.13) (2013-12-10) + +* WebMock integration + +## [1.1.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.12) (2013-12-05) + +* Add `browse` command + +## [1.1.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.11) (2013-11-29) + +* Remove rubysl (rbx required dependencies) + +## [1.1.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.10) (2013-11-29) + +* Fixed gzip handling bug + +## [1.1.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.9) (2013-11-28) + +* Added gzip support + +## [1.1.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.8) (2013-11-26) + +* Add `partial_update_objects` method + +## [1.1.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.7) (2013-11-08) + +* Search params: encode array-based parameters (`tagFilters`, `facetFilters`, ...) + +## [1.1.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.6) (2013-11-07) + +* Index: `clear` and `clear!` methods can now be used the delete the whole content of an index +* User keys: plug new `maxQueriesPerIPPerHour` and `maxHitsPerQuery` parameters + +## [1.1.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.5) (2013-10-17) + +* Version is now part of the user-agent + +## [1.1.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.4) (2013-10-17) + +* Fixed `wait_task` not sleeping at all + +## [1.1.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.3) (2013-10-15) + +* Fixed thread-safety issues +* Curl sessions are now thread-local + +## [1.1.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.2) (2013-10-02) + +* Fixed instance/class method conflict + +## [1.1.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.1) (2013-10-01) + +* Updated documentation +* Plug copy/move index + +## [1.1.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.0) (2013-09-17) + +* Initial import diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile new file mode 100644 index 0000000..e05662e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile @@ -0,0 +1,28 @@ +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Load algoliasearch.gemspec dependencies +gemspec + +# See https://github.com/algolia/algoliasearch-client-ruby/pull/257/files/36bcd0b1c4d05776dcbdb362c15a609c81f41cde +if Gem::Version.new(RUBY_VERSION) <= Gem::Version.new('1.9.3') + gem 'hashdiff', '< 0.3.6' # Hashdiff 0.3.6 no longer supports Ruby 1.8 + gem 'highline', '< 1.7.0' + gem 'mime-types', '< 2.0' + gem 'rubysl', '~> 2.0', :platform => :rbx +else + gem 'rubysl', '~> 2.2', :platform => :rbx +end + +group :development do + gem 'rake' + gem 'rdoc' + gem 'travis' +end + +group :test do + gem 'rspec', '>= 2.5.0' + gem 'webmock' + gem 'simplecov' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile.lock b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile.lock new file mode 100644 index 0000000..998dc7c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Gemfile.lock @@ -0,0 +1,99 @@ +PATH + remote: . + specs: + algoliasearch (1.27.4) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + +GEM + remote: https://rubygems.org/ + specs: + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + backports (3.18.2) + connection_pool (2.2.3) + crack (0.4.3) + safe_yaml (~> 1.0.0) + diff-lcs (1.4.4) + docile (1.3.2) + ethon (0.12.0) + ffi (>= 1.3.0) + faraday (0.17.3) + multipart-post (>= 1.2, < 3) + faraday_middleware (0.14.0) + faraday (>= 0.7.4, < 1.0) + ffi (1.13.1) + gh (0.14.0) + addressable + backports + faraday (~> 0.8) + multi_json (~> 1.0) + net-http-persistent (>= 2.7) + net-http-pipeline + hashdiff (1.0.1) + highline (1.7.10) + httpclient (2.8.3) + json (2.3.1) + launchy (2.5.0) + addressable (~> 2.7) + multi_json (1.15.0) + multipart-post (2.1.1) + net-http-persistent (4.0.0) + connection_pool (~> 2.2) + net-http-pipeline (1.0.1) + public_suffix (4.0.6) + pusher-client (0.6.2) + json + websocket (~> 1.0) + rake (13.0.1) + rdoc (6.2.1) + rspec (3.9.0) + rspec-core (~> 3.9.0) + rspec-expectations (~> 3.9.0) + rspec-mocks (~> 3.9.0) + rspec-core (3.9.2) + rspec-support (~> 3.9.3) + rspec-expectations (3.9.2) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-mocks (3.9.1) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-support (3.9.3) + safe_yaml (1.0.5) + simplecov (0.19.0) + docile (~> 1.1) + simplecov-html (~> 0.11) + simplecov-html (0.12.2) + travis (1.8.13) + backports + faraday (~> 0.9) + faraday_middleware (~> 0.9, >= 0.9.1) + gh (~> 0.13) + highline (~> 1.6) + launchy (~> 2.1) + pusher-client (~> 0.4) + typhoeus (~> 0.6, >= 0.6.8) + typhoeus (0.8.0) + ethon (>= 0.8.0) + webmock (3.8.3) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff (>= 0.4.0, < 2.0.0) + websocket (1.2.8) + +PLATFORMS + ruby + +DEPENDENCIES + algoliasearch! + rake + rdoc + rspec (>= 2.5.0) + rubysl (~> 2.2) + simplecov + travis + webmock + +BUNDLED WITH + 1.17.2 diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/LICENSE b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/LICENSE new file mode 100644 index 0000000..fddf416 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-Present Algolia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/README.md b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/README.md new file mode 100644 index 0000000..a19b944 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/README.md @@ -0,0 +1,61 @@ +

+ + Algolia for Ruby + + +

The perfect starting point to integrate Algolia within your Ruby project

+ +

+ Build Status + Gem Version + License +

+

+ +

+ Documentation • + Rails • + Community Forum • + Stack Overflow • + Report a bug • + FAQ • + Support +

+ +## ✨ Features + +- Thin & minimal low-level HTTP client to interact with Algolia's API +- Supports Ruby `^1.8.7`. + +## 💡 Getting Started + +First, install Algolia Ruby API Client via the [RubyGems](https://rubygems.org/) package manager: +```bash +gem install algoliasearch +``` + +Then, create objects on your index: + + +```ruby +Algolia.init(application_id: 'YourApplicationID', + api_key: 'YourAPIKey') +index = Algolia::Index.new('your_index_name') + +index.save_objects([objectID: 1, name: 'Foo']) +``` + +Finally, you may begin searching a object using the `search` method: +```ruby +objects = index.search('Fo') +``` + +For full documentation, visit the **[Algolia Ruby API Client](https://www.algolia.com/doc/api-client/getting-started/install/ruby/)**. + +## ❓ Troubleshooting + +Encountering an issue? Before reaching out to support, we recommend heading to our [FAQ](https://www.algolia.com/doc/api-client/troubleshooting/faq/ruby/) where you will find answers for the most common issues and gotchas with the client. + +## 📄 License + +Algolia Ruby API Client is an open-sourced software licensed under the [MIT license](LICENSE.md). diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Rakefile b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Rakefile new file mode 100644 index 0000000..b328a8b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/Rakefile @@ -0,0 +1,111 @@ +# encoding: utf-8 + +require 'bundler/gem_tasks' + +begin + Bundler.setup(:default, :development) +rescue Bundler::BundlerError => e + $stderr.puts e.message + $stderr.puts "Run `bundle install` to install missing gems" + exit e.status_code +end +require 'rake' + +require File.expand_path('../lib/algolia/version', __FILE__) + +require 'rake/testtask' +Rake::TestTask.new(:test) do |test| + test.libs << 'lib' << 'test' + test.pattern = 'test/**/test_*.rb' + test.verbose = true +end + +require 'rdoc/task' +Rake::RDocTask.new do |rdoc| + version = Algolia::VERSION + rdoc.rdoc_dir = 'rdoc' + rdoc.title = "algoliasearch #{version}" + rdoc.rdoc_files.include('README*') + rdoc.rdoc_files.include('lib/**/*.rb') +end + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new(:spec) + +task :default => :spec + +namespace :algolia do + GEM_VERSION_FILE = File.expand_path('../lib/algolia/version.rb', __FILE__) + GIT_TAG_URL = 'https://github.com/algolia/algoliasearch-client-ruby/releases/tag/' + + def last_commit_date + `git log -1 --date=short --format=%cd`.chomp + end + + def latest_tag + `git describe --tags --abbrev=0`.chomp + end + + def changelog(git_start = latest_tag(), git_end = 'HEAD', format = '%s') + `git log --no-decorate --no-merges --pretty=format:#{format} #{git_start}..#{git_end}` + end + + desc 'Write latest changes to CHANGELOG.md' + task :changelog, [:version] do |t, args| + # Filters-out commits containing some keywords and adds header + exceptions_regexp = Regexp.union(['README']) + title = "## [%s](%s%s) (%s)\n\n" % [args[:version], GIT_TAG_URL, args[:version], last_commit_date] + changes = changelog.each_line + .map { |line| (exceptions_regexp === line) ? nil : "* #{line.capitalize}" } + .unshift(title) + .append("\n\n") + .join + + puts changes + puts "\n\e[31mDo you want to update the CHANGELOG.md with the text above? [y/N]\e[0m" + exit if STDIN.gets.chomp.downcase != 'y' + + # Rewrite CHANGELOG.md + old_changes = File.readlines('CHANGELOG.md', 'r').join + File.open('CHANGELOG.md', 'w') { |file| file.write(changes, old_changes) } + + puts 'CHANGELOG.md successfully updated' + end + + desc 'Bump gem version' + task :semver, [:version] do |t, args| + + File.open(GEM_VERSION_FILE, 'w') do |file| + file.write <<~SEMVER + module Algolia + VERSION = "#{args[:version]}" + end + SEMVER + end + + # This force to reload the file with the newest version. + # Hence, `gemspec.version` invoked by Bundler later on will be correct. + load GEM_VERSION_FILE + + puts "Bumped gem version from #{latest_tag} to #{args[:version]}" + end + + desc 'Release a new version of this gem' + task :release, [:version] => [:changelog, :semver] do |t, args| + `git add #{File.expand_path('../lib/algolia/version.rb', __FILE__)} CHANGELOG.md` + `git commit -m "Bump to version #{args[:version]}"` + + # Invoke Bundler :release task + # https://github.com/bundler/bundler/blob/master/lib/bundler/gem_helper.rb + # + Rake::Task[:release].invoke + end +end + +module Bundler + class GemHelper + def version_tag + "#{version}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec new file mode 100644 index 0000000..eeb620c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec @@ -0,0 +1,86 @@ +# -*- encoding: utf-8 -*- + +require 'date' +require File.join(File.dirname(__FILE__), 'lib', 'algolia', 'version') + +Gem::Specification.new do |s| + s.name = "algoliasearch" + s.version = Algolia::VERSION + s.authors = ["Algolia"] + s.email = "contact@algolia.com" + + s.date = Date.today + s.licenses = ["MIT"] + s.summary = "A simple Ruby client for the algolia.com REST API" + s.description = "A simple Ruby client for the algolia.com REST API" + s.homepage = "https://github.com/algolia/algoliasearch-client-ruby" + + s.metadata = { + "bug_tracker_uri" => "https://github.com/algolia/algoliasearch-client-ruby/issues", + "changelog_uri" => "https://github.com/algolia/algoliasearch-client-ruby/blob/master/CHANGELOG.md", + "documentation_uri" => "http://www.rubydoc.info/gems/algoliasearch", + "homepage_uri" => "https://www.algolia.com/doc/api-client/ruby/getting-started/", + "source_code_uri" => "https://github.com/algolia/algoliasearch-client-ruby" + } + + s.post_install_message = "A new major version is available for Algolia! Please now use the https://rubygems.org/gems/algolia gem to get the latest features." + + s.require_paths = ["lib"] + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + + s.extra_rdoc_files = [ + "CHANGELOG.md", + "LICENSE", + "README.md" + ] + s.files = [ + ".rspec", + ".travis.yml", + "CHANGELOG.md", + "Gemfile", + "Gemfile.lock", + "LICENSE", + "README.md", + "Rakefile", + "algoliasearch.gemspec", + "contacts.json", + "lib/algolia/analytics.rb", + "lib/algolia/account_client.rb", + "lib/algolia/client.rb", + "lib/algolia/error.rb", + "lib/algolia/index.rb", + "lib/algolia/insights.rb", + "lib/algolia/protocol.rb", + "lib/algolia/version.rb", + "lib/algolia/webmock.rb", + "lib/algoliasearch.rb", + "resources/ca-bundle.crt", + "spec/account_client_spec.rb", + "spec/client_spec.rb", + "spec/mock_spec.rb", + "spec/spec_helper.rb", + "spec/stub_spec.rb" + ] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + if defined?(RUBY_VERSION) && RUBY_VERSION < '2.0' + s.add_runtime_dependency 'json', '>= 1.5.1', '< 2.3' + else + s.add_runtime_dependency 'json', '>= 1.5.1' + end + s.add_runtime_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_development_dependency 'travis', '~> 0' + s.add_development_dependency 'rake', '~> 0' + s.add_development_dependency 'rdoc', '~> 0' + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/contacts.json b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/contacts.json new file mode 100644 index 0000000..8ba54fa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/contacts.json @@ -0,0 +1,7504 @@ +[ + { + "firstname": "Essie", + "lastname": "Vaill", + "company": "Litronic Industries", + "address": "14225 Hancock Dr", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-345-0962", + "fax": "907-345-1215", + "email": "essie@vaill.com", + "web": "http://www.essievaill.com", + "followers": 3574 + }, + { + "firstname": "Cruz", + "lastname": "Roudabush", + "company": "Meridian Products", + "address": "2202 S Central Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85004", + "phone": "602-252-4827", + "fax": "602-252-4009", + "email": "cruz@roudabush.com", + "web": "http://www.cruzroudabush.com", + "followers": 6548 + }, + { + "firstname": "Billie", + "lastname": "Tinnes", + "company": "D & M Plywood Inc", + "address": "28 W 27th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10001", + "phone": "212-889-5775", + "fax": "212-889-5764", + "email": "billie@tinnes.com", + "web": "http://www.billietinnes.com", + "followers": 3536 + }, + { + "firstname": "Zackary", + "lastname": "Mockus", + "company": "Metropolitan Elevator Co", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-0638", + "fax": "732-442-5218", + "email": "zackary@mockus.com", + "web": "http://www.zackarymockus.com", + "followers": 1497 + }, + { + "firstname": "Rosemarie", + "lastname": "Fifield", + "company": "Technology Services", + "address": "3131 N Nimitz Hwy #-105", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-836-8966", + "fax": "808-836-6008", + "email": "rosemarie@fifield.com", + "web": "http://www.rosemariefifield.com", + "followers": 4812 + }, + { + "firstname": "Bernard", + "lastname": "Laboy", + "company": "Century 21 Keewaydin Prop", + "address": "22661 S Frontage Rd", + "city": "Channahon", + "county": "Will", + "state": "IL", + "zip": "60410", + "phone": "815-467-0487", + "fax": "815-467-1244", + "email": "bernard@laboy.com", + "web": "http://www.bernardlaboy.com", + "followers": 6891 + }, + { + "firstname": "Sue", + "lastname": "Haakinson", + "company": "Kim Peacock Beringhause", + "address": "9617 N Metro Pky W", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85051", + "phone": "602-953-2753", + "fax": "602-953-0355", + "email": "sue@haakinson.com", + "web": "http://www.suehaakinson.com", + "followers": 5787 + }, + { + "firstname": "Valerie", + "lastname": "Pou", + "company": "Sea Port Record One Stop Inc", + "address": "7475 Hamilton Blvd", + "city": "Trexlertown", + "county": "Lehigh", + "state": "PA", + "zip": "18087", + "phone": "610-395-8743", + "fax": "610-395-6995", + "email": "valerie@pou.com", + "web": "http://www.valeriepou.com", + "followers": 8990 + }, + { + "firstname": "Lashawn", + "lastname": "Hasty", + "company": "Kpff Consulting Engineers", + "address": "815 S Glendora Ave", + "city": "West Covina", + "county": "Los Angeles", + "state": "CA", + "zip": "91790", + "phone": "626-960-6738", + "fax": "626-960-1503", + "email": "lashawn@hasty.com", + "web": "http://www.lashawnhasty.com", + "followers": 2131 + }, + { + "firstname": "Marianne", + "lastname": "Earman", + "company": "Albers Technologies Corp", + "address": "6220 S Orange Blossom Trl", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32809", + "phone": "407-857-0431", + "fax": "407-857-2506", + "email": "marianne@earman.com", + "web": "http://www.marianneearman.com", + "followers": 1992 + }, + { + "firstname": "Justina", + "lastname": "Dragaj", + "company": "Uchner, David D Esq", + "address": "2552 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38112", + "phone": "901-327-5336", + "fax": "901-327-2911", + "email": "justina@dragaj.com", + "web": "http://www.justinadragaj.com", + "followers": 7149 + }, + { + "firstname": "Mandy", + "lastname": "Mcdonnell", + "company": "Southern Vermont Surveys", + "address": "343 Bush St Se", + "city": "Salem", + "county": "Marion", + "state": "OR", + "zip": "97302", + "phone": "503-371-8219", + "fax": "503-371-1118", + "email": "mandy@mcdonnell.com", + "web": "http://www.mandymcdonnell.com", + "followers": 1329 + }, + { + "firstname": "Conrad", + "lastname": "Lanfear", + "company": "Kahler, Karen T Esq", + "address": "49 Roche Way", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-758-0314", + "fax": "330-758-3536", + "email": "conrad@lanfear.com", + "web": "http://www.conradlanfear.com", + "followers": 2906 + }, + { + "firstname": "Cyril", + "lastname": "Behen", + "company": "National Paper & Envelope Corp", + "address": "1650 S Harbor Blvd", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92802", + "phone": "714-772-5050", + "fax": "714-772-3859", + "email": "cyril@behen.com", + "web": "http://www.cyrilbehen.com", + "followers": 7784 + }, + { + "firstname": "Shelley", + "lastname": "Groden", + "company": "Norton, Robert L Esq", + "address": "110 Broadway St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-229-3017", + "fax": "210-229-9757", + "email": "shelley@groden.com", + "web": "http://www.shelleygroden.com", + "followers": 2012 + }, + { + "firstname": "Rosalind", + "lastname": "Krenzke", + "company": "Waldein Manufacturing", + "address": "7000 Bass Lake Rd #-200", + "city": "Minneapolis", + "county": "Hennepin", + "state": "MN", + "zip": "55428", + "phone": "763-537-4194", + "fax": "763-537-3885", + "email": "rosalind@krenzke.com", + "web": "http://www.rosalindkrenzke.com", + "followers": 5547 + }, + { + "firstname": "Davis", + "lastname": "Brevard", + "company": "Transit Trading Corp", + "address": "6715 Tippecanoe Rd", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-6346", + "fax": "330-533-8211", + "email": "davis@brevard.com", + "web": "http://www.davisbrevard.com", + "followers": 4259 + }, + { + "firstname": "Winnie", + "lastname": "Reich", + "company": "Pacific Seating Co", + "address": "1535 Hawkins Blvd", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79925", + "phone": "915-771-2730", + "fax": "915-771-5729", + "email": "winnie@reich.com", + "web": "http://www.winniereich.com", + "followers": 6621 + }, + { + "firstname": "Trudy", + "lastname": "Worlds", + "company": "Shapiro, Mark R Esq", + "address": "24907 Tibbitts Aven #-b", + "city": "Valencia", + "county": "Los Angeles", + "state": "CA", + "zip": "91355", + "phone": "661-257-3083", + "fax": "661-257-0924", + "email": "trudy@worlds.com", + "web": "http://www.trudyworlds.com", + "followers": 5782 + }, + { + "firstname": "Deshawn", + "lastname": "Inafuku", + "company": "Telair Div Of Teleflex Inc", + "address": "3508 Leopard St", + "city": "Corpus Christi", + "county": "Nueces", + "state": "TX", + "zip": "78408", + "phone": "361-884-8433", + "fax": "361-884-3985", + "email": "deshawn@inafuku.com", + "web": "http://www.deshawninafuku.com", + "followers": 1195 + }, + { + "firstname": "Claudio", + "lastname": "Loose", + "company": "Audiotek Electronics", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-8514", + "fax": "732-442-1775", + "email": "claudio@loose.com", + "web": "http://www.claudioloose.com", + "followers": 6043 + }, + { + "firstname": "Sal", + "lastname": "Pindell", + "company": "Wrigley, Robert I Esq", + "address": "1112 Se 1st St", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47713", + "phone": "812-421-4804", + "fax": "812-421-7625", + "email": "sal@pindell.com", + "web": "http://www.salpindell.com", + "followers": 4359 + }, + { + "firstname": "Cristina", + "lastname": "Sharper", + "company": "Tax Office", + "address": "111 W 40th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10018", + "phone": "212-719-3952", + "fax": "212-719-0754", + "email": "cristina@sharper.com", + "web": "http://www.cristinasharper.com", + "followers": 4823 + }, + { + "firstname": "Betty Jane", + "lastname": "Mccamey", + "company": "Vita Foods Inc", + "address": "100 E Broad St", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-225-0900", + "fax": "614-225-1612", + "email": "cary@mccamey.com", + "web": "http://www.carymccamey.com", + "followers": 8863 + }, + { + "firstname": "Haley", + "lastname": "Rocheford", + "company": "Davis, Robert L Esq", + "address": "6030 Greenwood Plaza Blvd", + "city": "Englewood", + "county": "Arapahoe", + "state": "CO", + "zip": "80111", + "phone": "303-689-7729", + "fax": "303-689-5219", + "email": "haley@rocheford.com", + "web": "http://www.haleyrocheford.com", + "followers": 9872 + }, + { + "firstname": "Dorothea", + "lastname": "Sweem", + "company": "Ehrmann, Rolfe F Esq", + "address": "100 Thanet Circ", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08648", + "phone": "609-896-5871", + "fax": "609-896-2099", + "email": "dorothea@sweem.com", + "web": "http://www.dorotheasweem.com", + "followers": 8897 + }, + { + "firstname": "Fannie", + "lastname": "Steese", + "company": "Chiapete, W Richard Esq", + "address": "926 E Park Ave", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32301", + "phone": "850-222-8103", + "fax": "850-222-0105", + "email": "fannie@steese.com", + "web": "http://www.fanniesteese.com", + "followers": 7140 + }, + { + "firstname": "Allyson", + "lastname": "Gillispie", + "company": "De Friese Theo & Sons", + "address": "1722 White Horse Mercerville R", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08619", + "phone": "609-584-1794", + "fax": "609-584-0645", + "email": "allyson@gillispie.com", + "web": "http://www.allysongillispie.com", + "followers": 1414 + }, + { + "firstname": "Roger", + "lastname": "Seid", + "company": "Yoshida, Gerald C Esq", + "address": "3738 N Monroe St", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32303", + "phone": "850-422-1535", + "fax": "850-422-8152", + "email": "roger@seid.com", + "web": "http://www.rogerseid.com", + "followers": 6661 + }, + { + "firstname": "Dollie", + "lastname": "Daquino", + "company": "Jd Edwards & Co", + "address": "1005 Congress Ave", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-9636", + "fax": "512-478-9874", + "email": "dollie@daquino.com", + "web": "http://www.dolliedaquino.com", + "followers": 5262 + }, + { + "firstname": "Eva", + "lastname": "Seahorn", + "company": "Saunders Appraisal Inc", + "address": "3 Northern Blvd", + "city": "Amherst", + "county": "Hillsborough", + "state": "NH", + "zip": "03031", + "phone": "603-673-6072", + "fax": "603-673-5009", + "email": "eva@seahorn.com", + "web": "http://www.evaseahorn.com", + "followers": 9192 + }, + { + "firstname": "Mamie", + "lastname": "Mcintee", + "company": "Jacobs, Brian Realtor", + "address": "2810 Jacobs Ave", + "city": "Eureka", + "county": "Humboldt", + "state": "CA", + "zip": "95501", + "phone": "707-443-0621", + "fax": "707-443-9147", + "email": "mamie@mcintee.com", + "web": "http://www.mamiemcintee.com", + "followers": 6954 + }, + { + "firstname": "Lyndon", + "lastname": "Bellerdine", + "company": "A B C Lock & Key", + "address": "200 California St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94111", + "phone": "415-705-1956", + "fax": "415-705-2887", + "email": "lyndon@bellerdine.com", + "web": "http://www.lyndonbellerdine.com", + "followers": 146 + }, + { + "firstname": "Lashonda", + "lastname": "Derouen", + "company": "Travel Agent Magazine", + "address": "101 Royal St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-3394", + "fax": "703-684-0307", + "email": "lashonda@derouen.com", + "web": "http://www.lashondaderouen.com", + "followers": 3792 + }, + { + "firstname": "Jacklyn", + "lastname": "Emayo", + "company": "Super 8 Motel Temple", + "address": "101 Us Highway 46", + "city": "Fairfield", + "county": "Essex", + "state": "NJ", + "zip": "07004", + "phone": "973-882-3960", + "fax": "973-882-1908", + "email": "jacklyn@emayo.com", + "web": "http://www.jacklynemayo.com", + "followers": 4575 + }, + { + "firstname": "Rubin", + "lastname": "Crotts", + "company": "Misko, Charles G Esq", + "address": "303 N Indian Canyon Dr", + "city": "Palm Springs", + "county": "Riverside", + "state": "CA", + "zip": "92262", + "phone": "760-327-0337", + "fax": "760-327-0929", + "email": "rubin@crotts.com", + "web": "http://www.rubincrotts.com", + "followers": 4736 + }, + { + "firstname": "Boris", + "lastname": "Catino", + "company": "Dream Homes Usa Inc", + "address": "645 Church St", + "city": "Norfolk", + "county": "Norfolk City", + "state": "VA", + "zip": "23510", + "phone": "757-627-8408", + "fax": "757-627-6195", + "email": "boris@catino.com", + "web": "http://www.boriscatino.com", + "followers": 2330 + }, + { + "firstname": "Jannie", + "lastname": "Bowditch", + "company": "Lindsays Landing Rv Pk & Mrna", + "address": "1102 Main St", + "city": "Grandview", + "county": "Jackson", + "state": "MO", + "zip": "64030", + "phone": "816-765-0961", + "fax": "816-765-3469", + "email": "jannie@bowditch.com", + "web": "http://www.janniebowditch.com", + "followers": 7302 + }, + { + "firstname": "Colin", + "lastname": "Altonen", + "company": "Smith, Arthur D Esq", + "address": "1201 18th St", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80202", + "phone": "303-292-5477", + "fax": "303-292-4239", + "email": "colin@altonen.com", + "web": "http://www.colinaltonen.com", + "followers": 2587 + }, + { + "firstname": "Kerry", + "lastname": "Evertt", + "company": "Washington Crossing Inn", + "address": "337 S North Lake Blvd", + "city": "Altamonte Springs", + "county": "Seminole", + "state": "FL", + "zip": "32701", + "phone": "407-332-9851", + "fax": "407-332-1718", + "email": "kerry@evertt.com", + "web": "http://www.kerryevertt.com", + "followers": 6663 + }, + { + "firstname": "Kathie", + "lastname": "Argenti", + "company": "Microtel Franchise & Dev Corp", + "address": "410 Front St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-9253", + "fax": "218-828-1401", + "email": "kathie@argenti.com", + "web": "http://www.kathieargenti.com", + "followers": 6260 + }, + { + "firstname": "Henrietta", + "lastname": "Cintora", + "company": "Keyes, Judith Droz Esq", + "address": "1063 Fm Wzzw", + "city": "Milton", + "county": "Cabell", + "state": "WV", + "zip": "25541", + "phone": "304-743-5440", + "fax": "304-743-7475", + "email": "henrietta@cintora.com", + "web": "http://www.henriettacintora.com", + "followers": 9622 + }, + { + "firstname": "Mariano", + "lastname": "Maury", + "company": "Donut & Sandwich Shop", + "address": "1092 Saint Georges Ave", + "city": "Rahway", + "county": "Union", + "state": "NJ", + "zip": "07065", + "phone": "732-388-1579", + "fax": "732-388-9355", + "email": "mariano@maury.com", + "web": "http://www.marianomaury.com", + "followers": 6254 + }, + { + "firstname": "Lottie", + "lastname": "Voll", + "company": "Mason, Joseph G Esq", + "address": "55 E 10th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97401", + "phone": "541-342-7282", + "fax": "541-342-4657", + "email": "lottie@voll.com", + "web": "http://www.lottievoll.com", + "followers": 1092 + }, + { + "firstname": "Ofelia", + "lastname": "Sheffler", + "company": "Rimpsey Agency", + "address": "628 Pacific Ave", + "city": "Oxnard", + "county": "Ventura", + "state": "CA", + "zip": "93030", + "phone": "805-483-7161", + "fax": "805-483-5693", + "email": "ofelia@sheffler.com", + "web": "http://www.ofeliasheffler.com", + "followers": 1096 + }, + { + "firstname": "Gaston", + "lastname": "Cieloszyk", + "company": "Center For Hope Hospice Inc", + "address": "1160 Wccs", + "city": "Homer City", + "county": "Indiana", + "state": "PA", + "zip": "15748", + "phone": "724-479-0355", + "fax": "724-479-7077", + "email": "gaston@cieloszyk.com", + "web": "http://www.gastoncieloszyk.com", + "followers": 7409 + }, + { + "firstname": "Karla", + "lastname": "Ken", + "company": "Nicollet Process Engineering", + "address": "2135 11th St", + "city": "Rockford", + "county": "Winnebago", + "state": "IL", + "zip": "61104", + "phone": "815-968-0369", + "fax": "815-968-7904", + "email": "karla@ken.com", + "web": "http://www.karlaken.com", + "followers": 1296 + }, + { + "firstname": "Avery", + "lastname": "Parbol", + "company": "Schlackman, William H", + "address": "22343 Se Stark St", + "city": "Gresham", + "county": "Multnomah", + "state": "OR", + "zip": "97030", + "phone": "503-666-1948", + "fax": "503-666-9241", + "email": "avery@parbol.com", + "web": "http://www.averyparbol.com", + "followers": 3515 + }, + { + "firstname": "John", + "lastname": "Chipley", + "company": "Manpower Temporary Services", + "address": "2 Horizon Rd #-2", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-224-7741", + "fax": "201-224-7282", + "email": "john@chipley.com", + "web": "http://www.johnchipley.com", + "followers": 7710 + }, + { + "firstname": "Luella", + "lastname": "Pliner", + "company": "United Waste Systems", + "address": "3437 N 12th Ave", + "city": "Pensacola", + "county": "Escambia", + "state": "FL", + "zip": "32503", + "phone": "850-434-2521", + "fax": "850-434-5228", + "email": "luella@pliner.com", + "web": "http://www.luellapliner.com", + "followers": 5191 + }, + { + "firstname": "Elvira", + "lastname": "Blumenthal", + "company": "Stell Mortgage Investors", + "address": "108 Washington St", + "city": "Keokuk", + "county": "Lee", + "state": "IA", + "zip": "52632", + "phone": "319-524-6237", + "fax": "319-524-9435", + "email": "elvira@blumenthal.com", + "web": "http://www.elvirablumenthal.com", + "followers": 6616 + }, + { + "firstname": "Tyree", + "lastname": "Courrege", + "company": "Stitch Craft", + "address": "13201 Northwest Fwy", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77040", + "phone": "713-690-9216", + "fax": "713-690-4043", + "email": "tyree@courrege.com", + "web": "http://www.tyreecourrege.com", + "followers": 5210 + }, + { + "firstname": "Ramon", + "lastname": "Amaral", + "company": "Air Academy Federal Credit Un", + "address": "700 W Highway 287", + "city": "Lander", + "county": "Fremont", + "state": "WY", + "zip": "82520", + "phone": "307-332-2667", + "fax": "307-332-3893", + "email": "ramon@amaral.com", + "web": "http://www.ramonamaral.com", + "followers": 8659 + }, + { + "firstname": "Wilfredo", + "lastname": "Gidley", + "company": "Mclaughlin, John F Esq", + "address": "2255 Kuhio Ave #-1203", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-924-2610", + "fax": "808-924-7666", + "email": "wilfredo@gidley.com", + "web": "http://www.wilfredogidley.com", + "followers": 8860 + }, + { + "firstname": "Gracie", + "lastname": "Ehn", + "company": "P C Systems", + "address": "Kahala", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96816", + "phone": "808-247-8624", + "fax": "808-247-7982", + "email": "gracie@ehn.com", + "web": "http://www.gracieehn.com", + "followers": 2870 + }, + { + "firstname": "Dorthy", + "lastname": "Alexy", + "company": "Frank Siviglia & Co", + "address": "Pearlridge", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-247-4421", + "fax": "808-247-7192", + "email": "dorthy@alexy.com", + "web": "http://www.dorthyalexy.com", + "followers": 1029 + }, + { + "firstname": "Bertie", + "lastname": "Luby", + "company": "Puckett, Dennis L Esq", + "address": "Windward", + "city": "Kaneohe", + "county": "Honolulu", + "state": "HI", + "zip": "96744", + "phone": "808-247-8062", + "fax": "808-247-2529", + "email": "bertie@luby.com", + "web": "http://www.bertieluby.com", + "followers": 2660 + }, + { + "firstname": "Rudy", + "lastname": "Kuhle", + "company": "General Insurcorp Inc", + "address": "1418 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-628-9987", + "fax": "212-628-1234", + "email": "rudy@kuhle.com", + "web": "http://www.rudykuhle.com", + "followers": 7201 + }, + { + "firstname": "Gale", + "lastname": "Nolau", + "company": "Lust, C James Esq", + "address": "104 N Aurora St", + "city": "Ithaca", + "county": "Tompkins", + "state": "NY", + "zip": "14850", + "phone": "607-277-1567", + "fax": "607-277-6524", + "email": "gale@nolau.com", + "web": "http://www.galenolau.com", + "followers": 6842 + }, + { + "firstname": "Kenya", + "lastname": "Bruni", + "company": "Hurley, Thomas J Jr", + "address": "280 N Midland Ave", + "city": "Saddle Brook", + "county": "Bergen", + "state": "NJ", + "zip": "07663", + "phone": "201-646-9077", + "fax": "201-646-8526", + "email": "kenya@bruni.com", + "web": "http://www.kenyabruni.com", + "followers": 9368 + }, + { + "firstname": "Tricia", + "lastname": "Kruss", + "company": "Edwards, Elwood L", + "address": "4685 Ne 14th St", + "city": "Des Moines", + "county": "Polk", + "state": "IA", + "zip": "50313", + "phone": "515-262-3267", + "fax": "515-262-6264", + "email": "tricia@kruss.com", + "web": "http://www.triciakruss.com", + "followers": 9671 + }, + { + "firstname": "Mack", + "lastname": "Jurasin", + "company": "Sherman, Michael D Esq", + "address": "1180 Dora Hwy", + "city": "Pulaski", + "county": "Pulaski", + "state": "VA", + "zip": "24301", + "phone": "540-980-4958", + "fax": "540-980-2978", + "email": "mack@jurasin.com", + "web": "http://www.mackjurasin.com", + "followers": 4557 + }, + { + "firstname": "Margarito", + "lastname": "Kornbau", + "company": "Acker Knitting Mills Inc", + "address": "303 W 15th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-0371", + "fax": "512-478-4449", + "email": "margarito@kornbau.com", + "web": "http://www.margaritokornbau.com", + "followers": 2072 + }, + { + "firstname": "Lucien", + "lastname": "Iurato", + "company": "Anderson Consulting", + "address": "3918 16th Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11218", + "phone": "718-871-7952", + "fax": "718-871-3483", + "email": "lucien@iurato.com", + "web": "http://www.lucieniurato.com", + "followers": 9434 + }, + { + "firstname": "Jarvis", + "lastname": "Galas", + "company": "Younghans & Burke", + "address": "307 E President St", + "city": "Savannah", + "county": "Chatham", + "state": "GA", + "zip": "31401", + "phone": "912-236-8524", + "fax": "912-236-8705", + "email": "jarvis@galas.com", + "web": "http://www.jarvisgalas.com", + "followers": 2359 + }, + { + "firstname": "Billie", + "lastname": "Cowley", + "company": "Spears, Robert M Esq", + "address": "1700 Street Rd", + "city": "Warrington", + "county": "Bucks", + "state": "PA", + "zip": "18976", + "phone": "215-548-0842", + "fax": "215-548-4706", + "email": "billie@cowley.com", + "web": "http://www.billiecowley.com", + "followers": 2416 + }, + { + "firstname": "Jacinto", + "lastname": "Gawron", + "company": "Matt Kokkonen Insurance Agency", + "address": "1740 House", + "city": "Lumberville", + "county": "Bucks", + "state": "PA", + "zip": "18933", + "phone": "215-297-0120", + "fax": "215-297-5442", + "email": "jacinto@gawron.com", + "web": "http://www.jacintogawron.com", + "followers": 310 + }, + { + "firstname": "Randall", + "lastname": "Kluemper", + "company": "Lifestyles Organization", + "address": "Rt 16", + "city": "North Conway", + "county": "Carroll", + "state": "NH", + "zip": "03860", + "phone": "603-356-3217", + "fax": "603-356-6174", + "email": "randall@kluemper.com", + "web": "http://www.randallkluemper.com", + "followers": 5669 + }, + { + "firstname": "Enrique", + "lastname": "Oroark", + "company": "Callaghan, Kathleen M Esq", + "address": "34 W 17th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10011", + "phone": "212-366-5568", + "fax": "212-366-6877", + "email": "enrique@oroark.com", + "web": "http://www.enriqueoroark.com", + "followers": 3911 + }, + { + "firstname": "Alva", + "lastname": "Pennigton", + "company": "Citizens Savings Bank", + "address": "1275 County Road 210 W", + "city": "Jacksonville", + "county": "Saint Johns", + "state": "FL", + "zip": "32259", + "phone": "904-260-2345", + "fax": "904-260-3735", + "email": "alva@pennigton.com", + "web": "http://www.alvapennigton.com", + "followers": 7564 + }, + { + "firstname": "Socorro", + "lastname": "Balandran", + "company": "Mooring", + "address": "401 S Main St", + "city": "Greensburg", + "county": "Westmoreland", + "state": "PA", + "zip": "15601", + "phone": "724-834-6908", + "fax": "724-834-8831", + "email": "socorro@balandran.com", + "web": "http://www.socorrobalandran.com", + "followers": 7056 + }, + { + "firstname": "Nadia", + "lastname": "Wilshire", + "company": "Midwest Undercar Distributors", + "address": "1801 West Ave S", + "city": "La Crosse", + "county": "La Crosse", + "state": "WI", + "zip": "54601", + "phone": "608-788-4965", + "fax": "608-788-5946", + "email": "nadia@wilshire.com", + "web": "http://www.nadiawilshire.com", + "followers": 9311 + }, + { + "firstname": "Reginald", + "lastname": "Humes", + "company": "Cowley & Chidester", + "address": "44 N Main St", + "city": "Wolfeboro", + "county": "Carroll", + "state": "NH", + "zip": "03894", + "phone": "603-569-7730", + "fax": "603-569-8142", + "email": "reginald@humes.com", + "web": "http://www.reginaldhumes.com", + "followers": 8347 + }, + { + "firstname": "Lynda", + "lastname": "Caraway", + "company": "Lowe Art Museum", + "address": "1822 Spring Garden St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19130", + "phone": "215-564-3171", + "fax": "215-564-2241", + "email": "lynda@caraway.com", + "web": "http://www.lyndacaraway.com", + "followers": 3853 + }, + { + "firstname": "Saundra", + "lastname": "Mcaulay", + "company": "Rcf Inc", + "address": "2401 Cleveland Rd W", + "city": "Huron", + "county": "Erie", + "state": "OH", + "zip": "44839", + "phone": "419-433-5558", + "fax": "419-433-9756", + "email": "saundra@mcaulay.com", + "web": "http://www.saundramcaulay.com", + "followers": 1620 + }, + { + "firstname": "Allan", + "lastname": "Schwantd", + "company": "Micro Wire Products", + "address": "406 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-9666", + "fax": "503-434-3863", + "email": "allan@schwantd.com", + "web": "http://www.allanschwantd.com", + "followers": 6069 + }, + { + "firstname": "Wilmer", + "lastname": "Constantineau", + "company": "Nutra Source", + "address": "1745 W 18th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-345-4729", + "fax": "541-345-4884", + "email": "wilmer@constantineau.com", + "web": "http://www.wilmerconstantineau.com", + "followers": 1648 + }, + { + "firstname": "Savannah", + "lastname": "Kesich", + "company": "Wbnd Am", + "address": "221 Main", + "city": "Park City", + "county": "Summit", + "state": "UT", + "zip": "84060", + "phone": "435-645-0986", + "fax": "435-645-9504", + "email": "savannah@kesich.com", + "web": "http://www.savannahkesich.com", + "followers": 7325 + }, + { + "firstname": "Dwain", + "lastname": "Cuttitta", + "company": "Kintech Stamping Inc", + "address": "1919 Connecticut Ave Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20009", + "phone": "202-265-7854", + "fax": "202-265-9475", + "email": "dwain@cuttitta.com", + "web": "http://www.dwaincuttitta.com", + "followers": 8300 + }, + { + "firstname": "Krystle", + "lastname": "Stika", + "company": "Signature Inn", + "address": "3730 Fm", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77068", + "phone": "281-537-5324", + "fax": "281-537-3235", + "email": "krystle@stika.com", + "web": "http://www.krystlestika.com", + "followers": 2603 + }, + { + "firstname": "Felipe", + "lastname": "Gould", + "company": "Black, Ronald H", + "address": "2308 Bienville Blvd", + "city": "Ocean Springs", + "county": "Jackson", + "state": "MS", + "zip": "39564", + "phone": "228-875-2811", + "fax": "228-875-6402", + "email": "felipe@gould.com", + "web": "http://www.felipegould.com", + "followers": 9237 + }, + { + "firstname": "Steve", + "lastname": "Schorr", + "company": "Midwest Fire Protection Inc", + "address": "1810 N King St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-842-7045", + "fax": "808-842-7338", + "email": "steve@schorr.com", + "web": "http://www.steveschorr.com", + "followers": 1468 + }, + { + "firstname": "Naomi", + "lastname": "Caetano", + "company": "Bashlin Industries Inc", + "address": "50 Spring St #-1", + "city": "Cresskill", + "county": "Bergen", + "state": "NJ", + "zip": "07626", + "phone": "201-569-3572", + "fax": "201-569-5795", + "email": "naomi@caetano.com", + "web": "http://www.naomicaetano.com", + "followers": 1743 + }, + { + "firstname": "Melody", + "lastname": "Saddat", + "company": "Richards, Edward W Esq", + "address": "3540 S 84th St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68124", + "phone": "402-397-0581", + "fax": "402-397-8391", + "email": "melody@saddat.com", + "web": "http://www.melodysaddat.com", + "followers": 2442 + }, + { + "firstname": "Mitchel", + "lastname": "Harnar", + "company": "Copycat Quick Print", + "address": "1810 Pioneer Ave", + "city": "Cheyenne", + "county": "Laramie", + "state": "WY", + "zip": "82001", + "phone": "307-632-0256", + "fax": "307-632-2516", + "email": "mitchel@harnar.com", + "web": "http://www.mitchelharnar.com", + "followers": 4662 + }, + { + "firstname": "Sharlene", + "lastname": "Circelli", + "company": "Calibron Systems Inc", + "address": "4018 W Clearwater Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-783-5167", + "fax": "509-783-7346", + "email": "sharlene@circelli.com", + "web": "http://www.sharlenecircelli.com", + "followers": 6539 + }, + { + "firstname": "Sean", + "lastname": "Bonnet", + "company": "Corporate Alternatives Inc", + "address": "3043 Ridge Rd", + "city": "Lansing", + "county": "Cook", + "state": "IL", + "zip": "60438", + "phone": "708-474-4766", + "fax": "708-474-0011", + "email": "sean@bonnet.com", + "web": "http://www.seanbonnet.com", + "followers": 867 + }, + { + "firstname": "Travis", + "lastname": "Brockert", + "company": "Santa Cruz Title Co", + "address": "7828 N 19th Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85021", + "phone": "602-995-1362", + "fax": "602-995-0966", + "email": "travis@brockert.com", + "web": "http://www.travisbrockert.com", + "followers": 7421 + }, + { + "firstname": "Candice", + "lastname": "Bruckman", + "company": "Fernando Foods Inc", + "address": "611 1st Ave N", + "city": "Humboldt", + "county": "Humboldt", + "state": "IA", + "zip": "50548", + "phone": "515-332-0809", + "fax": "515-332-9083", + "email": "candice@bruckman.com", + "web": "http://www.candicebruckman.com", + "followers": 7084 + }, + { + "firstname": "Mabel", + "lastname": "Weeden", + "company": "Pepsi Cola Gen Bottlers Inc", + "address": "300 E Phillips St", + "city": "Richardson", + "county": "Dallas", + "state": "TX", + "zip": "75081", + "phone": "972-235-5619", + "fax": "972-235-1843", + "email": "mabel@weeden.com", + "web": "http://www.mabelweeden.com", + "followers": 2674 + }, + { + "firstname": "Armando", + "lastname": "Papik", + "company": "Cryogenic Society Of America", + "address": "615 W Markham St", + "city": "Little Rock", + "county": "Pulaski", + "state": "AR", + "zip": "72201", + "phone": "501-376-4154", + "fax": "501-376-0608", + "email": "armando@papik.com", + "web": "http://www.armandopapik.com", + "followers": 7152 + }, + { + "firstname": "Kevin", + "lastname": "Edd", + "company": "Peebles, William J Esq", + "address": "64 Dyerville Ave", + "city": "Johnston", + "county": "Providence", + "state": "RI", + "zip": "02919", + "phone": "401-453-8514", + "fax": "401-453-7085", + "email": "kevin@edd.com", + "web": "http://www.kevinedd.com", + "followers": 3568 + }, + { + "firstname": "Raphael", + "lastname": "Bickel", + "company": "S Shamash & Sons Inc", + "address": "550 N Brand Blvd #-800", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-246-1195", + "fax": "818-246-4734", + "email": "raphael@bickel.com", + "web": "http://www.raphaelbickel.com", + "followers": 1365 + }, + { + "firstname": "Darren", + "lastname": "Merlin", + "company": "Pozzuolo & Perkiss Pc", + "address": "550 N Edward St", + "city": "Decatur", + "county": "Macon", + "state": "IL", + "zip": "62522", + "phone": "217-428-0453", + "fax": "217-428-1491", + "email": "darren@merlin.com", + "web": "http://www.darrenmerlin.com", + "followers": 7653 + }, + { + "firstname": "Francis", + "lastname": "Soo", + "company": "Allen Industrial Supply", + "address": "218 W Main St", + "city": "Sparta", + "county": "Monroe", + "state": "WI", + "zip": "54656", + "phone": "608-269-7306", + "fax": "608-269-3359", + "email": "francis@soo.com", + "web": "http://www.francissoo.com", + "followers": 2482 + }, + { + "firstname": "Nelly", + "lastname": "Jakuboski", + "company": "Hammerman, Stanley M Esq", + "address": "103 Main St", + "city": "Ridgefield", + "county": "Fairfield", + "state": "CT", + "zip": "06877", + "phone": "203-438-9250", + "fax": "203-438-5109", + "email": "nelly@jakuboski.com", + "web": "http://www.nellyjakuboski.com", + "followers": 5338 + }, + { + "firstname": "Mitzi", + "lastname": "Ihenyen", + "company": "Helm, Norman O", + "address": "979 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-838-8303", + "fax": "212-838-3221", + "email": "mitzi@ihenyen.com", + "web": "http://www.mitziihenyen.com", + "followers": 9264 + }, + { + "firstname": "Kathleen", + "lastname": "Beresnyak", + "company": "R & E Associates", + "address": "100 W 25th Ave", + "city": "San Mateo", + "county": "San Mateo", + "state": "CA", + "zip": "94403", + "phone": "650-349-6809", + "fax": "650-349-5962", + "email": "kathleen@beresnyak.com", + "web": "http://www.kathleenberesnyak.com", + "followers": 2853 + }, + { + "firstname": "Adela", + "lastname": "Cervantsz", + "company": "Arizona Awards", + "address": "102 5th St N", + "city": "Clanton", + "county": "Chilton", + "state": "AL", + "zip": "35045", + "phone": "205-755-4137", + "fax": "205-755-1034", + "email": "adela@cervantsz.com", + "web": "http://www.adelacervantsz.com", + "followers": 9876 + }, + { + "firstname": "Randal", + "lastname": "Gansen", + "company": "Quik Print", + "address": "1 First Federal Plz", + "city": "Rochester", + "county": "Monroe", + "state": "NY", + "zip": "14614", + "phone": "585-238-8558", + "fax": "585-238-7764", + "email": "randal@gansen.com", + "web": "http://www.randalgansen.com", + "followers": 4019 + }, + { + "firstname": "Alyssa", + "lastname": "Biasotti", + "company": "Johnson Hardware Co", + "address": "22 James St", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-1878", + "fax": "845-343-5354", + "email": "alyssa@biasotti.com", + "web": "http://www.alyssabiasotti.com", + "followers": 3684 + }, + { + "firstname": "Janet", + "lastname": "Schaffter", + "company": "Hall, Camden M Esq", + "address": "131 Rimbach St", + "city": "Hammond", + "county": "Lake", + "state": "IN", + "zip": "46320", + "phone": "219-853-9283", + "fax": "219-853-9329", + "email": "janet@schaffter.com", + "web": "http://www.janetschaffter.com", + "followers": 2431 + }, + { + "firstname": "Armando", + "lastname": "Kolm", + "company": "Cooper & Cooper Cpas", + "address": "201 N Main St", + "city": "Anderson", + "county": "Anderson", + "state": "SC", + "zip": "29621", + "phone": "864-260-3642", + "fax": "864-260-9205", + "email": "armando@kolm.com", + "web": "http://www.armandokolm.com", + "followers": 4357 + }, + { + "firstname": "Gil", + "lastname": "Scarpa", + "company": "Hughes, James D Esq", + "address": "12 E Broad St", + "city": "Hazleton", + "county": "Luzerne", + "state": "PA", + "zip": "18201", + "phone": "570-459-9281", + "fax": "570-459-5191", + "email": "gil@scarpa.com", + "web": "http://www.gilscarpa.com", + "followers": 7691 + }, + { + "firstname": "Vanessa", + "lastname": "Lewallen", + "company": "Fargo Glass & Paint Co", + "address": "5 E Main", + "city": "Centerburg", + "county": "Knox", + "state": "OH", + "zip": "43011", + "phone": "740-625-8098", + "fax": "740-625-1696", + "email": "vanessa@lewallen.com", + "web": "http://www.vanessalewallen.com", + "followers": 2710 + }, + { + "firstname": "Burton", + "lastname": "Brining", + "company": "Corcoran Machine Company", + "address": "135 E Liberty St", + "city": "Wooster", + "county": "Wayne", + "state": "OH", + "zip": "44691", + "phone": "330-262-5481", + "fax": "330-262-7555", + "email": "burton@brining.com", + "web": "http://www.burtonbrining.com", + "followers": 8158 + }, + { + "firstname": "Rosalie", + "lastname": "Krigger", + "company": "Aaron, William Esq", + "address": "330 Route 211 E", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-2313", + "fax": "845-343-2979", + "email": "rosalie@krigger.com", + "web": "http://www.rosaliekrigger.com", + "followers": 1411 + }, + { + "firstname": "Tammie", + "lastname": "Schwartzwalde", + "company": "Cox, Thomas E", + "address": "415 Center St", + "city": "Ironton", + "county": "Lawrence", + "state": "OH", + "zip": "45638", + "phone": "740-532-5488", + "fax": "740-532-0319", + "email": "tammie@schwartzwalde.com", + "web": "http://www.tammieschwartzwalde.com", + "followers": 1367 + }, + { + "firstname": "Darrin", + "lastname": "Neiss", + "company": "Delaney, James J Jr", + "address": "101 W Central Blvd", + "city": "Kewanee", + "county": "Henry", + "state": "IL", + "zip": "61443", + "phone": "309-852-5127", + "fax": "309-852-8638", + "email": "darrin@neiss.com", + "web": "http://www.darrinneiss.com", + "followers": 5748 + }, + { + "firstname": "Rosalia", + "lastname": "Kennemur", + "company": "Reagan, Thomas J Esq", + "address": "222 S 10th St", + "city": "Oakdale", + "county": "Allen", + "state": "LA", + "zip": "71463", + "phone": "318-335-5586", + "fax": "318-335-1873", + "email": "rosalia@kennemur.com", + "web": "http://www.rosaliakennemur.com", + "followers": 5984 + }, + { + "firstname": "Callie", + "lastname": "Leboeuf", + "company": "Town Motors", + "address": "100 S 2nd Ave", + "city": "Alpena", + "county": "Alpena", + "state": "MI", + "zip": "49707", + "phone": "989-354-3344", + "fax": "989-354-3712", + "email": "callie@leboeuf.com", + "web": "http://www.callieleboeuf.com", + "followers": 3607 + }, + { + "firstname": "Patty", + "lastname": "Bernasconi", + "company": "Porter Wright Morris & Arthur", + "address": "851 Fort Street Mall", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-531-2621", + "fax": "808-531-6234", + "email": "patty@bernasconi.com", + "web": "http://www.pattybernasconi.com", + "followers": 3012 + }, + { + "firstname": "Elmo", + "lastname": "Gabouer", + "company": "Conduit & Foundation Corp", + "address": "275 W Bridge St", + "city": "New Hope", + "county": "Bucks", + "state": "PA", + "zip": "18938", + "phone": "215-862-6538", + "fax": "215-862-7006", + "email": "elmo@gabouer.com", + "web": "http://www.elmogabouer.com", + "followers": 9593 + }, + { + "firstname": "Logan", + "lastname": "Muhl", + "company": "Brown, Phillip C Esq", + "address": "126 S Bellevue Ave", + "city": "Langhorne", + "county": "Bucks", + "state": "PA", + "zip": "19047", + "phone": "215-757-6124", + "fax": "215-757-2785", + "email": "logan@muhl.com", + "web": "http://www.loganmuhl.com", + "followers": 741 + }, + { + "firstname": "Vivian", + "lastname": "Brzostowski", + "company": "Savage, Philip M Iii", + "address": "118 Mill St", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-2791", + "fax": "215-788-3902", + "email": "vivian@brzostowski.com", + "web": "http://www.vivianbrzostowski.com", + "followers": 1121 + }, + { + "firstname": "Efren", + "lastname": "Baucher", + "company": "R O Binson Inc", + "address": "Rts 232 & 413", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-598-4644", + "fax": "215-598-5929", + "email": "efren@baucher.com", + "web": "http://www.efrenbaucher.com", + "followers": 8199 + }, + { + "firstname": "Kurtis", + "lastname": "Mcbay", + "company": "P C Enterprises Ltd", + "address": "737 Levittown Ctr", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19055", + "phone": "215-946-6048", + "fax": "215-946-6458", + "email": "kurtis@mcbay.com", + "web": "http://www.kurtismcbay.com", + "followers": 8315 + }, + { + "firstname": "Guillermo", + "lastname": "Tsang", + "company": "Gillis, Donald W Esq", + "address": "16 Highland Park Way", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19056", + "phone": "215-949-7912", + "fax": "215-949-8919", + "email": "guillermo@tsang.com", + "web": "http://www.guillermotsang.com", + "followers": 4007 + }, + { + "firstname": "Milton", + "lastname": "Kuhlman", + "company": "Imag Corp", + "address": "237 Jackson St Sw", + "city": "Camden", + "county": "Ouachita", + "state": "AR", + "zip": "71701", + "phone": "870-836-9021", + "fax": "870-836-2283", + "email": "milton@kuhlman.com", + "web": "http://www.miltonkuhlman.com", + "followers": 7034 + }, + { + "firstname": "Naomi", + "lastname": "Greenly", + "company": "Kpmg Peat Marwick Llp", + "address": "1400 Gault Ave N", + "city": "Fort Payne", + "county": "De Kalb", + "state": "AL", + "zip": "35967", + "phone": "256-845-1216", + "fax": "256-845-2469", + "email": "naomi@greenly.com", + "web": "http://www.naomigreenly.com", + "followers": 916 + }, + { + "firstname": "Mary", + "lastname": "Maurizio", + "company": "Carey Filter White & Boland", + "address": "404 Main St", + "city": "Delta", + "county": "Fulton", + "state": "OH", + "zip": "43515", + "phone": "419-822-7176", + "fax": "419-822-0591", + "email": "mary@maurizio.com", + "web": "http://www.marymaurizio.com", + "followers": 6083 + }, + { + "firstname": "Caitlin", + "lastname": "Reiniger", + "company": "White, Lawrence R Esq", + "address": "140 N Columbus St", + "city": "Galion", + "county": "Crawford", + "state": "OH", + "zip": "44833", + "phone": "419-468-6910", + "fax": "419-468-9018", + "email": "caitlin@reiniger.com", + "web": "http://www.caitlinreiniger.com", + "followers": 641 + }, + { + "firstname": "Coleman", + "lastname": "Cuneo", + "company": "M & M Mars", + "address": "25 E High St", + "city": "Waynesburg", + "county": "Greene", + "state": "PA", + "zip": "15370", + "phone": "724-627-4378", + "fax": "724-627-2305", + "email": "coleman@cuneo.com", + "web": "http://www.colemancuneo.com", + "followers": 8657 + }, + { + "firstname": "Rachel", + "lastname": "Larrison", + "company": "Ipa The Editing House", + "address": "3721 Oberlin Ave", + "city": "Lorain", + "county": "Lorain", + "state": "OH", + "zip": "44053", + "phone": "440-282-3729", + "fax": "440-282-6918", + "email": "rachel@larrison.com", + "web": "http://www.rachellarrison.com", + "followers": 4562 + }, + { + "firstname": "Dwayne", + "lastname": "Maddalena", + "company": "Ebbeson, James O Esq", + "address": "532 Court St", + "city": "Pekin", + "county": "Tazewell", + "state": "IL", + "zip": "61554", + "phone": "309-347-1137", + "fax": "309-347-9282", + "email": "dwayne@maddalena.com", + "web": "http://www.dwaynemaddalena.com", + "followers": 7384 + }, + { + "firstname": "Angelique", + "lastname": "Schermerhorn", + "company": "Safety Direct Inc", + "address": "511 Saint Johns Ave", + "city": "Palatka", + "county": "Putnam", + "state": "FL", + "zip": "32177", + "phone": "386-328-7869", + "fax": "386-328-1499", + "email": "angelique@schermerhorn.com", + "web": "http://www.angeliqueschermerhorn.com", + "followers": 6181 + }, + { + "firstname": "Junior", + "lastname": "Wadlinger", + "company": "Sonos Music", + "address": "185 E Market St", + "city": "Warren", + "county": "Trumbull", + "state": "OH", + "zip": "44481", + "phone": "330-393-9794", + "fax": "330-393-6808", + "email": "junior@wadlinger.com", + "web": "http://www.juniorwadlinger.com", + "followers": 7690 + }, + { + "firstname": "Darrel", + "lastname": "Tork", + "company": "S & T Machining", + "address": "2121 S Mannheim Rd", + "city": "Westchester", + "county": "Cook", + "state": "IL", + "zip": "60154", + "phone": "708-865-8091", + "fax": "708-865-8984", + "email": "darrel@tork.com", + "web": "http://www.darreltork.com", + "followers": 9708 + }, + { + "firstname": "Lana", + "lastname": "Garrigus", + "company": "Russell Builders & Hardware", + "address": "118 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-2642", + "fax": "503-434-8121", + "email": "lana@garrigus.com", + "web": "http://www.lanagarrigus.com", + "followers": 3048 + }, + { + "firstname": "Jonathon", + "lastname": "Waldall", + "company": "Mission Hills Escrow", + "address": "300 Hampton St", + "city": "Walterboro", + "county": "Colleton", + "state": "SC", + "zip": "29488", + "phone": "843-549-9461", + "fax": "843-549-0125", + "email": "jonathon@waldall.com", + "web": "http://www.jonathonwaldall.com", + "followers": 8039 + }, + { + "firstname": "Kristine", + "lastname": "Paker", + "company": "Chagrin Valley Massotherapy", + "address": "301 N Pine St", + "city": "Creston", + "county": "Union", + "state": "IA", + "zip": "50801", + "phone": "641-782-7169", + "fax": "641-782-7962", + "email": "kristine@paker.com", + "web": "http://www.kristinepaker.com", + "followers": 7977 + }, + { + "firstname": "Dwain", + "lastname": "Agricola", + "company": "Beatty Satchell Everngam & Co", + "address": "211 N Main St", + "city": "Leitchfield", + "county": "Grayson", + "state": "KY", + "zip": "42754", + "phone": "270-259-5194", + "fax": "270-259-0821", + "email": "dwain@agricola.com", + "web": "http://www.dwainagricola.com", + "followers": 8410 + }, + { + "firstname": "Jewel", + "lastname": "Agresta", + "company": "Md Assn Cert Pub Accts Inc", + "address": "4565 Harrison St", + "city": "Hillside", + "county": "Cook", + "state": "IL", + "zip": "60162", + "phone": "708-449-7139", + "fax": "708-449-2963", + "email": "jewel@agresta.com", + "web": "http://www.jewelagresta.com", + "followers": 293 + }, + { + "firstname": "Georgette", + "lastname": "Bandyk", + "company": "Specialty Alumn Castings Inc", + "address": "1965 Wakefield Ave", + "city": "Petersburg", + "county": "Petersburg City", + "state": "VA", + "zip": "23805", + "phone": "804-796-2746", + "fax": "804-796-5351", + "email": "georgette@bandyk.com", + "web": "http://www.georgettebandyk.com", + "followers": 9865 + }, + { + "firstname": "Geri", + "lastname": "Forness", + "company": "Quality Dynamics Group", + "address": "Capitol Ave", + "city": "Corydon", + "county": "Harrison", + "state": "IN", + "zip": "47112", + "phone": "812-738-9416", + "fax": "812-738-4816", + "email": "geri@forness.com", + "web": "http://www.geriforness.com", + "followers": 7788 + }, + { + "firstname": "Modesto", + "lastname": "Scroggie", + "company": "Bulloch, Bruce Cpa", + "address": "300 Orlando Dr", + "city": "Raritan", + "county": "Somerset", + "state": "NJ", + "zip": "08869", + "phone": "908-980-5621", + "fax": "908-980-9842", + "email": "modesto@scroggie.com", + "web": "http://www.modestoscroggie.com", + "followers": 5671 + }, + { + "firstname": "Curt", + "lastname": "Menedez", + "company": "J & J Machinery Repair Inc", + "address": "207 Yukon", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33604", + "phone": "813-932-8602", + "fax": "813-932-4548", + "email": "curt@menedez.com", + "web": "http://www.curtmenedez.com", + "followers": 1311 + }, + { + "firstname": "Karen", + "lastname": "Zombo", + "company": "Healthcare Family Credit Union", + "address": "3112 W Kennedy Blvd", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33609", + "phone": "813-872-4288", + "fax": "813-872-8262", + "email": "karen@zombo.com", + "web": "http://www.karenzombo.com", + "followers": 2543 + }, + { + "firstname": "Lora", + "lastname": "Lendor", + "company": "Advanced Electromagnetics Inc", + "address": "7 W Darlington Ave", + "city": "Kissimmee", + "county": "Osceola", + "state": "FL", + "zip": "34741", + "phone": "407-870-0382", + "fax": "407-870-6229", + "email": "lora@lendor.com", + "web": "http://www.loralendor.com", + "followers": 5947 + }, + { + "firstname": "Felipe", + "lastname": "Mahone", + "company": "Apartment Mart", + "address": "1001 Bishop St #-2850", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-536-3239", + "fax": "808-536-1231", + "email": "felipe@mahone.com", + "web": "http://www.felipemahone.com", + "followers": 4427 + }, + { + "firstname": "Rosalyn", + "lastname": "Daulton", + "company": "Rodgard Corp", + "address": "300 Broadacres Dr", + "city": "Bloomfield", + "county": "Essex", + "state": "NJ", + "zip": "07003", + "phone": "973-338-8552", + "fax": "973-338-1603", + "email": "rosalyn@daulton.com", + "web": "http://www.rosalyndaulton.com", + "followers": 2667 + }, + { + "firstname": "Marquita", + "lastname": "Bousman", + "company": "Constantine, Katherine A Esq", + "address": "30 Highland Ave", + "city": "Warwick", + "county": "Orange", + "state": "NY", + "zip": "10990", + "phone": "845-986-0909", + "fax": "845-986-2447", + "email": "marquita@bousman.com", + "web": "http://www.marquitabousman.com", + "followers": 4315 + }, + { + "firstname": "Carla", + "lastname": "Sirbaugh", + "company": "Urso, Natale L Esq", + "address": "110 S La Brea Ave #-22", + "city": "Inglewood", + "county": "Los Angeles", + "state": "CA", + "zip": "90301", + "phone": "310-412-6653", + "fax": "310-412-1067", + "email": "carla@sirbaugh.com", + "web": "http://www.carlasirbaugh.com", + "followers": 9701 + }, + { + "firstname": "Wes", + "lastname": "Fontanella", + "company": "Woodside Travel Trust", + "address": "1369 W Redondo Beach Blvd", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90247", + "phone": "310-515-3065", + "fax": "310-515-2515", + "email": "wes@fontanella.com", + "web": "http://www.wesfontanella.com", + "followers": 1717 + }, + { + "firstname": "Meredith", + "lastname": "Ivrin", + "company": "Hamilton Financial Corp", + "address": "323 N Gilbert St", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-446-7172", + "fax": "217-446-2369", + "email": "meredith@ivrin.com", + "web": "http://www.meredithivrin.com", + "followers": 7827 + }, + { + "firstname": "Laurie", + "lastname": "Bigg", + "company": "Essc Inc", + "address": "14500 Lakeside Cir", + "city": "Sterling Heights", + "county": "Macomb", + "state": "MI", + "zip": "48313", + "phone": "586-247-6171", + "fax": "586-247-9791", + "email": "laurie@bigg.com", + "web": "http://www.lauriebigg.com", + "followers": 8684 + }, + { + "firstname": "Barton", + "lastname": "Friesner", + "company": "Optical Supply", + "address": "1 Summit Ct", + "city": "Fishkill", + "county": "Dutchess", + "state": "NY", + "zip": "12524", + "phone": "845-896-6652", + "fax": "845-896-1692", + "email": "barton@friesner.com", + "web": "http://www.bartonfriesner.com", + "followers": 4889 + }, + { + "firstname": "Sophie", + "lastname": "Langner", + "company": "Kapetanakis, Alexander Esq", + "address": "535 Ward Ave #-204", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96814", + "phone": "808-545-7695", + "fax": "808-545-8636", + "email": "sophie@langner.com", + "web": "http://www.sophielangner.com", + "followers": 1596 + }, + { + "firstname": "Garfield", + "lastname": "Lijewski", + "company": "Denker, Aaron Esq", + "address": "6401 N Lincoln Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60645", + "phone": "773-976-3827", + "fax": "773-976-5586", + "email": "garfield@lijewski.com", + "web": "http://www.garfieldlijewski.com", + "followers": 5955 + }, + { + "firstname": "Warren", + "lastname": "Speach", + "company": "E Norwalk Crmc Tile & Mrbl Co", + "address": "361 Park Ave", + "city": "Scotch Plains", + "county": "Union", + "state": "NJ", + "zip": "07076", + "phone": "908-322-3846", + "fax": "908-322-6744", + "email": "warren@speach.com", + "web": "http://www.warrenspeach.com", + "followers": 6741 + }, + { + "firstname": "Madonna", + "lastname": "Cosby", + "company": "Emanuel Reider Architects Inc", + "address": "135 Main St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94105", + "phone": "415-956-4437", + "fax": "415-956-5134", + "email": "madonna@cosby.com", + "web": "http://www.madonnacosby.com", + "followers": 3985 + }, + { + "firstname": "Valeria", + "lastname": "Lingbeek", + "company": "Recreation Director", + "address": "state", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-968-8421", + "fax": "215-968-1567", + "email": "valeria@lingbeek.com", + "web": "http://www.valerialingbeek.com", + "followers": 8824 + }, + { + "firstname": "Heath", + "lastname": "Vanalphen", + "company": "California Stat Min & Mnrl Mus", + "address": "227 Commercial St", + "city": "Provincetown", + "county": "Barnstable", + "state": "MA", + "zip": "02657", + "phone": "508-487-6010", + "fax": "508-487-0597", + "email": "heath@vanalphen.com", + "web": "http://www.heathvanalphen.com", + "followers": 6846 + }, + { + "firstname": "Marisa", + "lastname": "Woldridge", + "company": "Wegner, Tim Esq", + "address": "153 Baltimore St", + "city": "Cumberland", + "county": "Allegany", + "state": "MD", + "zip": "21502", + "phone": "301-759-7421", + "fax": "301-759-9676", + "email": "marisa@woldridge.com", + "web": "http://www.marisawoldridge.com", + "followers": 6009 + }, + { + "firstname": "Rene", + "lastname": "Dummermuth", + "company": "Super 8 Motel", + "address": "2 Ridgedale Ave", + "city": "Cedar Knolls", + "county": "Morris", + "state": "NJ", + "zip": "07927", + "phone": "973-292-7918", + "fax": "973-292-5898", + "email": "rene@dummermuth.com", + "web": "http://www.renedummermuth.com", + "followers": 1687 + }, + { + "firstname": "Helga", + "lastname": "Windle", + "company": "Loew, Andrea H Esq", + "address": "99185 Moanalua Rd #-101", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-487-7779", + "fax": "808-487-6258", + "email": "helga@windle.com", + "web": "http://www.helgawindle.com", + "followers": 56 + }, + { + "firstname": "Margot", + "lastname": "Arenburg", + "company": "Mcivor, Carolyn Md", + "address": "736 N Mills Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32803", + "phone": "407-896-1593", + "fax": "407-896-6679", + "email": "margot@arenburg.com", + "web": "http://www.margotarenburg.com", + "followers": 7445 + }, + { + "firstname": "Sheila", + "lastname": "Holloran", + "company": "Warehouse On Wheels", + "address": "126 S Main St", + "city": "Clyde", + "county": "Sandusky", + "state": "OH", + "zip": "43410", + "phone": "419-547-9428", + "fax": "419-547-4835", + "email": "sheila@holloran.com", + "web": "http://www.sheilaholloran.com", + "followers": 9682 + }, + { + "firstname": "Melinda", + "lastname": "Carleton", + "company": "Cenol Co", + "address": "395 Revilo Ave", + "city": "Shirley", + "county": "Suffolk", + "state": "NY", + "zip": "11967", + "phone": "631-399-1636", + "fax": "631-399-6025", + "email": "melinda@carleton.com", + "web": "http://www.melindacarleton.com", + "followers": 7154 + }, + { + "firstname": "Ike", + "lastname": "Zeolla", + "company": "Halpin, Irene A Esq", + "address": "1900 L St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-331-1409", + "fax": "202-331-7781", + "email": "ike@zeolla.com", + "web": "http://www.ikezeolla.com", + "followers": 7418 + }, + { + "firstname": "Elmo", + "lastname": "Dagenais", + "company": "P C Routing Inc", + "address": "12914 Old Stage Rd", + "city": "Chester", + "county": "Chesterfield", + "state": "VA", + "zip": "23831", + "phone": "804-796-5647", + "fax": "804-796-9493", + "email": "elmo@dagenais.com", + "web": "http://www.elmodagenais.com", + "followers": 7355 + }, + { + "firstname": "Valentine", + "lastname": "Granberry", + "company": "Sunnyvale Travel", + "address": "1019 Shadick Dr", + "city": "Orange City", + "county": "Volusia", + "state": "FL", + "zip": "32763", + "phone": "407-775-4269", + "fax": "407-775-0598", + "email": "valentine@granberry.com", + "web": "http://www.valentinegranberry.com", + "followers": 7021 + }, + { + "firstname": "Waldo", + "lastname": "Sisk", + "company": "Muller Drugs Inc", + "address": "2211 Us Highway 19", + "city": "Holiday", + "county": "Pasco", + "state": "FL", + "zip": "34691", + "phone": "727-934-3827", + "fax": "727-934-7181", + "email": "waldo@sisk.com", + "web": "http://www.waldosisk.com", + "followers": 2109 + }, + { + "firstname": "Robt", + "lastname": "Braithwaite", + "company": "Meyer, Janet Md", + "address": "320 W Mclane St", + "city": "Osceola", + "county": "Clarke", + "state": "IA", + "zip": "50213", + "phone": "641-342-1276", + "fax": "641-342-6031", + "email": "robt@braithwaite.com", + "web": "http://www.robtbraithwaite.com", + "followers": 1336 + }, + { + "firstname": "Corinne", + "lastname": "Cowan", + "company": "Ward Equipment Co", + "address": "20 Montana Ave", + "city": "Laurel", + "county": "Yellowstone", + "state": "MT", + "zip": "59044", + "phone": "406-628-4030", + "fax": "406-628-9418", + "email": "corinne@cowan.com", + "web": "http://www.corinnecowan.com", + "followers": 7049 + }, + { + "firstname": "Rebeca", + "lastname": "Brumet", + "company": "Kingston Office Supplies Inc", + "address": "936 N Western Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60622", + "phone": "773-772-4015", + "fax": "773-772-1603", + "email": "rebeca@brumet.com", + "web": "http://www.rebecabrumet.com", + "followers": 202 + }, + { + "firstname": "Lynn", + "lastname": "Saulsberry", + "company": "Printing Factory Inc", + "address": "2725 W Mcdowell Rd", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85009", + "phone": "602-272-8326", + "fax": "602-272-3143", + "email": "lynn@saulsberry.com", + "web": "http://www.lynnsaulsberry.com", + "followers": 5265 + }, + { + "firstname": "Hannah", + "lastname": "Facio", + "company": "Cmptr Pros For Scl", + "address": "115 E Church St", + "city": "Elberton", + "county": "Elbert", + "state": "GA", + "zip": "30635", + "phone": "706-283-8280", + "fax": "706-283-6916", + "email": "hannah@facio.com", + "web": "http://www.hannahfacio.com", + "followers": 4321 + }, + { + "firstname": "Benjamin", + "lastname": "Schkade", + "company": "Port Brownsville Pub Scale Inc", + "address": "1636 E 1st Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-278-8687", + "fax": "907-278-7166", + "email": "benjamin@schkade.com", + "web": "http://www.benjaminschkade.com", + "followers": 5846 + }, + { + "firstname": "Athena", + "lastname": "Fontanilla", + "company": "Willamette Hobbies", + "address": "5020 Germantown Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19144", + "phone": "215-438-9675", + "fax": "215-438-1716", + "email": "athena@fontanilla.com", + "web": "http://www.athenafontanilla.com", + "followers": 5342 + }, + { + "firstname": "Alene", + "lastname": "Rabeck", + "company": "Bucks County Of", + "address": "475 E 162nd St", + "city": "South Holland", + "county": "Cook", + "state": "IL", + "zip": "60473", + "phone": "708-333-8056", + "fax": "708-333-2125", + "email": "alene@rabeck.com", + "web": "http://www.alenerabeck.com", + "followers": 2815 + }, + { + "firstname": "Yvette", + "lastname": "Kokoska", + "company": "Automation Products Inc", + "address": "200 Valley Dr", + "city": "Brisbane", + "county": "San Mateo", + "state": "CA", + "zip": "94005", + "phone": "650-468-3592", + "fax": "650-468-7716", + "email": "yvette@kokoska.com", + "web": "http://www.yvettekokoska.com", + "followers": 6175 + }, + { + "firstname": "Petra", + "lastname": "Clemmens", + "company": "Belton Industries Inc", + "address": "980 N Federal Hwy", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33432", + "phone": "561-394-2152", + "fax": "561-394-1574", + "email": "petra@clemmens.com", + "web": "http://www.petraclemmens.com", + "followers": 5263 + }, + { + "firstname": "Carmel", + "lastname": "Overfelt", + "company": "Woodworkers Supply Inc", + "address": "6801 Lake Worth Rd", + "city": "Lake Worth", + "county": "Palm Beach", + "state": "FL", + "zip": "33467", + "phone": "561-965-5167", + "fax": "561-965-1433", + "email": "carmel@overfelt.com", + "web": "http://www.carmeloverfelt.com", + "followers": 5868 + }, + { + "firstname": "Danette", + "lastname": "Fostervold", + "company": "Flach, Douglas Esq", + "address": "6920 Santa Teresa Blvd", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95119", + "phone": "408-225-1319", + "fax": "408-225-5205", + "email": "danette@fostervold.com", + "web": "http://www.danettefostervold.com", + "followers": 1315 + }, + { + "firstname": "Vince", + "lastname": "Ettel", + "company": "Breen Trucking", + "address": "408 Main St", + "city": "Springfield", + "county": "Sarpy", + "state": "NE", + "zip": "68059", + "phone": "402-399-6999", + "fax": "402-399-6478", + "email": "vince@ettel.com", + "web": "http://www.vinceettel.com", + "followers": 7780 + }, + { + "firstname": "Davis", + "lastname": "Heideman", + "company": "Dennis J Wall Atty At Law Pa", + "address": "801 W 5th St", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76102", + "phone": "817-332-7902", + "fax": "817-332-5439", + "email": "davis@heideman.com", + "web": "http://www.davisheideman.com", + "followers": 4778 + }, + { + "firstname": "Bradly", + "lastname": "Hasselvander", + "company": "Public Works Department Office", + "address": "2302 Artesia Blvd", + "city": "Redondo Beach", + "county": "Los Angeles", + "state": "CA", + "zip": "90278", + "phone": "310-374-2374", + "fax": "310-374-2363", + "email": "bradly@hasselvander.com", + "web": "http://www.bradlyhasselvander.com", + "followers": 7831 + }, + { + "firstname": "Nathanial", + "lastname": "Phoenix", + "company": "Precision Steel Rule Die Co", + "address": "1000 Nw 105th St", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73114", + "phone": "405-748-7637", + "fax": "405-748-1856", + "email": "nathanial@phoenix.com", + "web": "http://www.nathanialphoenix.com", + "followers": 8308 + }, + { + "firstname": "Lamar", + "lastname": "Mckibben", + "company": "Battaglia, Jack M Esq", + "address": "1620 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-864-7338", + "fax": "415-864-7623", + "email": "lamar@mckibben.com", + "web": "http://www.lamarmckibben.com", + "followers": 4193 + }, + { + "firstname": "Shanna", + "lastname": "Numkena", + "company": "Anderson Independent Mail", + "address": "1426 5th Pl Nw", + "city": "Rochester", + "county": "Olmsted", + "state": "MN", + "zip": "55901", + "phone": "507-280-1856", + "fax": "507-280-6844", + "email": "shanna@numkena.com", + "web": "http://www.shannanumkena.com", + "followers": 1364 + }, + { + "firstname": "Helena", + "lastname": "Suermann", + "company": "Stubenberge, James A Esq", + "address": "897 Independence Ave", + "city": "Mountain View", + "county": "Santa Clara", + "state": "CA", + "zip": "94043", + "phone": "650-965-0255", + "fax": "650-965-3368", + "email": "helena@suermann.com", + "web": "http://www.helenasuermann.com", + "followers": 4536 + }, + { + "firstname": "Delphine", + "lastname": "Helmich", + "company": "Friends Hospital", + "address": "50 Aviation Way", + "city": "Watsonville", + "county": "Santa Cruz", + "state": "CA", + "zip": "95076", + "phone": "831-763-4348", + "fax": "831-763-0923", + "email": "delphine@helmich.com", + "web": "http://www.delphinehelmich.com", + "followers": 7383 + }, + { + "firstname": "Barbara", + "lastname": "Hindley", + "company": "Kirin Amgen", + "address": "904 N Lake St", + "city": "Burbank", + "county": "Los Angeles", + "state": "CA", + "zip": "91502", + "phone": "818-841-8886", + "fax": "818-841-8221", + "email": "barbara@hindley.com", + "web": "http://www.barbarahindley.com", + "followers": 9155 + }, + { + "firstname": "Sheryl", + "lastname": "Sisofo", + "company": "Thrifty Sign Stop", + "address": "1049 S Mccord Rd", + "city": "Holland", + "county": "Lucas", + "state": "OH", + "zip": "43528", + "phone": "419-865-8702", + "fax": "419-865-1836", + "email": "sheryl@sisofo.com", + "web": "http://www.sherylsisofo.com", + "followers": 5693 + }, + { + "firstname": "Robyn", + "lastname": "Christophel", + "company": "Woodward, John C Esq", + "address": "3420 E Flamingo Rd", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89121", + "phone": "702-458-1072", + "fax": "702-458-2093", + "email": "robyn@christophel.com", + "web": "http://www.robynchristophel.com", + "followers": 3971 + }, + { + "firstname": "Gayla", + "lastname": "Geimer", + "company": "Ortman Mccain Co", + "address": "1280 Price Ave", + "city": "Pomona", + "county": "Los Angeles", + "state": "CA", + "zip": "91767", + "phone": "909-620-6453", + "fax": "909-620-2768", + "email": "gayla@geimer.com", + "web": "http://www.gaylageimer.com", + "followers": 8969 + }, + { + "firstname": "Evan", + "lastname": "Pyfrom", + "company": "Nevada Baking Co", + "address": "5430 Alpha Rd", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-661-4625", + "fax": "214-661-8804", + "email": "evan@pyfrom.com", + "web": "http://www.evanpyfrom.com", + "followers": 2516 + }, + { + "firstname": "Chad", + "lastname": "Miklas", + "company": "Red Carpet Inn", + "address": "31 S Grove St", + "city": "East Aurora", + "county": "Erie", + "state": "NY", + "zip": "14052", + "phone": "716-655-2736", + "fax": "716-655-2749", + "email": "chad@miklas.com", + "web": "http://www.chadmiklas.com", + "followers": 5357 + }, + { + "firstname": "Trey", + "lastname": "Tout", + "company": "Breen, Sean E Esq", + "address": "100 Mbc Dr", + "city": "Shawano", + "county": "Shawano", + "state": "WI", + "zip": "54166", + "phone": "715-526-6806", + "fax": "715-526-2421", + "email": "trey@tout.com", + "web": "http://www.treytout.com", + "followers": 205 + }, + { + "firstname": "Isabell", + "lastname": "Armout", + "company": "True Electric Corp", + "address": "7895 S Cessna Ave", + "city": "Gaithersburg", + "county": "Montgomery", + "state": "MD", + "zip": "20879", + "phone": "301-921-0406", + "fax": "301-921-1251", + "email": "isabell@armout.com", + "web": "http://www.isabellarmout.com", + "followers": 4878 + }, + { + "firstname": "Alejandro", + "lastname": "Mascall", + "company": "Railway Educational Bureau", + "address": "2350 Duke St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-2882", + "fax": "703-684-8561", + "email": "alejandro@mascall.com", + "web": "http://www.alejandromascall.com", + "followers": 3512 + }, + { + "firstname": "Kennith", + "lastname": "Kirklin", + "company": "Sears Roebuck And Co", + "address": "2303 21st Ave S", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37212", + "phone": "615-385-1598", + "fax": "615-385-6946", + "email": "kennith@kirklin.com", + "web": "http://www.kennithkirklin.com", + "followers": 5087 + }, + { + "firstname": "Ike", + "lastname": "Benthin", + "company": "Lee, Harry Esq", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-5277", + "fax": "415-255-6543", + "email": "ike@benthin.com", + "web": "http://www.ikebenthin.com", + "followers": 8473 + }, + { + "firstname": "Donald", + "lastname": "Sherretts", + "company": "Nylonge Corporation", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-7718", + "fax": "415-255-7088", + "email": "donald@sherretts.com", + "web": "http://www.donaldsherretts.com", + "followers": 2332 + }, + { + "firstname": "Lina", + "lastname": "Hybarger", + "company": "L & H Central Office", + "address": "1828 Jefferson Pl Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-833-4983", + "fax": "202-833-3174", + "email": "lina@hybarger.com", + "web": "http://www.linahybarger.com", + "followers": 9793 + }, + { + "firstname": "Rebekah", + "lastname": "Padley", + "company": "Reed Engineering Inc", + "address": "200 E Delawr Pl", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60611", + "phone": "312-944-1877", + "fax": "312-944-1477", + "email": "rebekah@padley.com", + "web": "http://www.rebekahpadley.com", + "followers": 3839 + }, + { + "firstname": "Marion", + "lastname": "Gaulden", + "company": "Madden, John H Jr", + "address": "200 W South St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-9335", + "fax": "434-979-2694", + "email": "marion@gaulden.com", + "web": "http://www.mariongaulden.com", + "followers": 5625 + }, + { + "firstname": "Maurine", + "lastname": "Monroy", + "company": "Central Distribution System", + "address": "2000 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-947-8922", + "fax": "201-947-4235", + "email": "maurine@monroy.com", + "web": "http://www.maurinemonroy.com", + "followers": 5828 + }, + { + "firstname": "Rosanna", + "lastname": "Sandrock", + "company": "Computer X Consulting", + "address": "1797 Lakewood Ter Se", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30315", + "phone": "404-627-4604", + "fax": "404-627-4276", + "email": "rosanna@sandrock.com", + "web": "http://www.rosannasandrock.com", + "followers": 3044 + }, + { + "firstname": "Marcelino", + "lastname": "Maggs", + "company": "Rascher & Betzold Inc", + "address": "201 E Pine St", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32801", + "phone": "407-420-1152", + "fax": "407-420-7195", + "email": "marcelino@maggs.com", + "web": "http://www.marcelinomaggs.com", + "followers": 5320 + }, + { + "firstname": "Florine", + "lastname": "Willardson", + "company": "Lunt, Donald C Esq", + "address": "5605 Ne 105th Ave", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97220", + "phone": "503-256-6559", + "fax": "503-256-8982", + "email": "florine@willardson.com", + "web": "http://www.florinewillardson.com", + "followers": 2336 + }, + { + "firstname": "Jude", + "lastname": "Haza", + "company": "Howard Fabrication", + "address": "1348 Liberty Pike", + "city": "Franklin", + "county": "Williamson", + "state": "TN", + "zip": "37067", + "phone": "615-790-3984", + "fax": "615-790-3042", + "email": "jude@haza.com", + "web": "http://www.judehaza.com", + "followers": 7311 + }, + { + "firstname": "Eldon", + "lastname": "Sutch", + "company": "Friesen And Kane Public Accts", + "address": "1818 E Atlantic St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-743-2414", + "fax": "215-743-2529", + "email": "eldon@sutch.com", + "web": "http://www.eldonsutch.com", + "followers": 6895 + }, + { + "firstname": "Lashonda", + "lastname": "Enote", + "company": "Nichols Village The Inn", + "address": "6301 Owensmouth Ave", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91367", + "phone": "818-704-8490", + "fax": "818-704-7539", + "email": "lashonda@enote.com", + "web": "http://www.lashondaenote.com", + "Note": "Ancien Dailymotion, recontré à LeWeb London 2012", + "followers": 6383 + }, + { + "firstname": "Marla", + "lastname": "Folz", + "company": "Odonoghue C Kevin", + "address": "201 Electronics Blvd Sw", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35824", + "phone": "256-464-3329", + "fax": "256-464-6964", + "email": "marla@folz.com", + "web": "http://www.marlafolz.com", + "Note": "Product Manager at Sage France & WebMaster of ConseilsMarketing.Fr Interview at LEWeb", + "followers": 5861 + }, + { + "firstname": "Reginald", + "lastname": "Lunan", + "company": "Healey Chevy Olds Buick Geo", + "address": "985 Parker Ct", + "city": "Santa Clara", + "county": "Santa Clara", + "state": "CA", + "zip": "95050", + "phone": "408-727-1747", + "fax": "408-727-0884", + "email": "reginald@lunan.com", + "web": "http://www.reginaldlunan.com", + "followers": 7075 + }, + { + "firstname": "Kyle", + "lastname": "Lindauer", + "company": "Gem Tec Inc", + "address": "2000 E Jefferson St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-258-5196", + "fax": "602-258-8609", + "email": "kyle@lindauer.com", + "web": "http://www.kylelindauer.com", + "followers": 6277 + }, + { + "firstname": "Son", + "lastname": "Marschke", + "company": "Evenings Dlght Fireplaces", + "address": "1119 Wheeler Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18510", + "phone": "570-969-0886", + "fax": "570-969-8176", + "email": "son@marschke.com", + "web": "http://www.sonmarschke.com", + "followers": 3481 + }, + { + "firstname": "Johnie", + "lastname": "Minaai", + "company": "Darling, Pamela E", + "address": "2100 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-592-4771", + "fax": "201-592-8423", + "email": "johnie@minaai.com", + "web": "http://www.johnieminaai.com", + "followers": 5903 + }, + { + "firstname": "Kelli", + "lastname": "Varrato", + "company": "Frances Meyer Inc", + "address": "2505 Congress St", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92110", + "phone": "858-298-3969", + "fax": "858-298-6695", + "email": "kelli@varrato.com", + "web": "http://www.kellivarrato.com", + "followers": 9891 + }, + { + "firstname": "Neva", + "lastname": "Marsell", + "company": "Comfort Inn Wilshire", + "address": "1312 W Lincoln Ave", + "city": "Olivia", + "county": "Renville", + "state": "MN", + "zip": "56277", + "phone": "320-523-4975", + "fax": "320-523-8378", + "email": "neva@marsell.com", + "web": "http://www.nevamarsell.com", + "followers": 4114 + }, + { + "firstname": "Brice", + "lastname": "Hedglin", + "company": "Cupkovic, Walter D Esq", + "address": "2809 Granny White Pike", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37204", + "phone": "615-292-9016", + "fax": "615-292-9027", + "email": "brice@hedglin.com", + "web": "http://www.bricehedglin.com", + "followers": 7730 + }, + { + "firstname": "Terrance", + "lastname": "Nimmer", + "company": "C D Short Foods Inc", + "address": "1400 N Woodward Ave", + "city": "Bloomfield Hills", + "county": "Oakland", + "state": "MI", + "zip": "48304", + "phone": "248-647-0653", + "fax": "248-647-1999", + "email": "terrance@nimmer.com", + "web": "http://www.terrancenimmer.com", + "followers": 7388 + }, + { + "firstname": "Carol", + "lastname": "Krisman", + "company": "Uniglobe Transeas Travel", + "address": "100 E 85th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-472-7877", + "fax": "212-472-9579", + "email": "carol@krisman.com", + "web": "http://www.carolkrisman.com", + "followers": 5985 + }, + { + "firstname": "Dollie", + "lastname": "Pillitteri", + "company": "Jiffy Moving & Storage Company", + "address": "4024 Merchant Rd", + "city": "Fort Wayne", + "county": "Allen", + "state": "IN", + "zip": "46818", + "phone": "260-489-3094", + "fax": "260-489-4697", + "email": "dollie@pillitteri.com", + "web": "http://www.dolliepillitteri.com", + "followers": 2624 + }, + { + "firstname": "Mellissa", + "lastname": "Sule", + "company": "Dowse, Geoffrey Esq", + "address": "92 Argonaut #-270", + "city": "Aliso Viejo", + "county": "Orange", + "state": "CA", + "zip": "92656", + "phone": "949-768-6176", + "fax": "949-768-8107", + "email": "mellissa@sule.com", + "web": "http://www.mellissasule.com", + "followers": 2709 + }, + { + "firstname": "Antony", + "lastname": "Thierauf", + "company": "Gutzwiller, Robert H Esq", + "address": "4915 Industrial Way", + "city": "Coeur d Alene", + "county": "Kootenai", + "state": "ID", + "zip": "83814", + "phone": "208-667-5252", + "fax": "208-667-5935", + "email": "antony@thierauf.com", + "web": "http://www.antonythierauf.com", + "followers": 1044 + }, + { + "firstname": "Reina", + "lastname": "Reisenauer", + "company": "Terrance Fox", + "address": "207 N Main St", + "city": "Hutchins", + "county": "Dallas", + "state": "TX", + "zip": "75141", + "phone": "972-225-9930", + "fax": "972-225-9569", + "email": "reina@reisenauer.com", + "web": "http://www.reinareisenauer.com", + "followers": 2953 + }, + { + "firstname": "Zane", + "lastname": "Sulikowski", + "company": "Meijer Associates Credit Union", + "address": "2375 3rd St", + "city": "Riverside", + "county": "Riverside", + "state": "CA", + "zip": "92507", + "phone": "951-683-4479", + "fax": "951-683-9932", + "email": "zane@sulikowski.com", + "web": "http://www.zanesulikowski.com", + "followers": 7275 + }, + { + "firstname": "Hilario", + "lastname": "Cassa", + "company": "Independence Assocaites Inc", + "address": "2222 Santa Monica Blvd", + "city": "Santa Monica", + "county": "Los Angeles", + "state": "CA", + "zip": "90404", + "phone": "310-828-6710", + "fax": "310-828-1895", + "email": "hilario@cassa.com", + "web": "http://www.hilariocassa.com", + "followers": 994 + }, + { + "firstname": "Veronica", + "lastname": "Radman", + "company": "Martin, Anthony D Esq", + "address": "235 W Main St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-3306", + "fax": "434-979-9777", + "email": "veronica@radman.com", + "web": "http://www.veronicaradman.com", + "followers": 7568 + }, + { + "firstname": "Teri", + "lastname": "Erlewine", + "company": "League Of Kans Municipalities", + "address": "370 34th St St", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33711", + "phone": "727-327-3850", + "fax": "727-327-8494", + "email": "teri@erlewine.com", + "web": "http://www.terierlewine.com", + "followers": 6077 + }, + { + "firstname": "Alissa", + "lastname": "Mountjoy", + "company": "Technical & Mgmt Svc Corp", + "address": "6585 Commerce Blvd", + "city": "Rohnert Park", + "county": "Sonoma", + "state": "CA", + "zip": "94928", + "phone": "707-585-9715", + "fax": "707-585-7011", + "email": "alissa@mountjoy.com", + "web": "http://www.alissamountjoy.com", + "followers": 4886 + }, + { + "firstname": "Helene", + "lastname": "Iberg", + "company": "Spec Check Inc", + "address": "24800 Rockside Rd", + "city": "Bedford", + "county": "Cuyahoga", + "state": "OH", + "zip": "44146", + "phone": "440-786-6052", + "fax": "440-786-9246", + "email": "helene@iberg.com", + "web": "http://www.heleneiberg.com", + "followers": 716 + }, + { + "firstname": "Lona", + "lastname": "Scronce", + "company": "L & L Builders", + "address": "Rte 6 & 209", + "city": "Matamoras", + "county": "Pike", + "state": "PA", + "zip": "18336", + "phone": "570-296-4820", + "fax": "570-296-2054", + "email": "lona@scronce.com", + "web": "http://www.lonascronce.com", + "followers": 4687 + }, + { + "firstname": "Jeremy", + "lastname": "Lampi", + "company": "E Henderson Inc", + "address": "150 Sawkill Ave", + "city": "Milford", + "county": "Pike", + "state": "PA", + "zip": "18337", + "phone": "570-296-7797", + "fax": "570-296-4647", + "email": "jeremy@lampi.com", + "web": "http://www.jeremylampi.com", + "followers": 4714 + }, + { + "firstname": "Mitch", + "lastname": "Schattner", + "company": "Cosgrove Eisenberg & Kiley Pc", + "address": "3001 Geary Blvd", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94118", + "phone": "415-668-8105", + "fax": "415-668-5841", + "email": "mitch@schattner.com", + "web": "http://www.mitchschattner.com", + "followers": 4388 + }, + { + "firstname": "Hans", + "lastname": "Carlan", + "company": "Midlen & Guillot Chartered", + "address": "509 W 4th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-276-2956", + "fax": "907-276-6002", + "email": "hans@carlan.com", + "web": "http://www.hanscarlan.com", + "followers": 985 + }, + { + "firstname": "Concetta", + "lastname": "Sarchett", + "company": "Barco/chromatics Inc", + "address": "2405 Grand Blvd", + "city": "Kansas City", + "county": "Jackson", + "state": "MO", + "zip": "64108", + "phone": "816-274-3833", + "fax": "816-274-6897", + "email": "concetta@sarchett.com", + "web": "http://www.concettasarchett.com", + "followers": 3086 + }, + { + "firstname": "Isaac", + "lastname": "Zackery", + "company": "Holiday Inn Of Issaquah", + "address": "321 Palmer Rd", + "city": "Denville", + "county": "Morris", + "state": "NJ", + "zip": "07834", + "phone": "973-328-5943", + "fax": "973-328-1903", + "email": "isaac@zackery.com", + "web": "http://www.isaaczackery.com", + "followers": 501 + }, + { + "firstname": "Doug", + "lastname": "Matrisciano", + "company": "Cefpi", + "address": "12500 Ne 10th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-451-5906", + "fax": "425-451-1273", + "email": "doug@matrisciano.com", + "web": "http://www.dougmatrisciano.com", + "followers": 9054 + }, + { + "firstname": "Devon", + "lastname": "Samrah", + "company": "Software Pursuits Inc", + "address": "1219 Pine Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32824", + "phone": "407-240-2401", + "fax": "407-240-8312", + "email": "devon@samrah.com", + "web": "http://www.devonsamrah.com", + "followers": 6795 + }, + { + "firstname": "Amos", + "lastname": "Linnan", + "company": "Quincy, Jim", + "address": "3960 W 26th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60623", + "phone": "773-277-8332", + "fax": "773-277-4756", + "email": "amos@linnan.com", + "web": "http://www.amoslinnan.com", + "followers": 6297 + }, + { + "firstname": "Manuel", + "lastname": "Dienhart", + "company": "Bohning Co Ltd", + "address": "40 E Mcmicken Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45210", + "phone": "513-357-4669", + "fax": "513-357-7989", + "email": "manuel@dienhart.com", + "web": "http://www.manueldienhart.com", + "followers": 6771 + }, + { + "firstname": "Audra", + "lastname": "Cantu", + "company": "Chesapeake Telephone Systems", + "address": "935 S 2nd St", + "city": "Plainfield", + "county": "Union", + "state": "NJ", + "zip": "07063", + "phone": "908-756-1816", + "fax": "908-756-5441", + "email": "audra@cantu.com", + "web": "http://www.audracantu.com", + "followers": 3088 + }, + { + "firstname": "Keisha", + "lastname": "Ransonet", + "company": "Hsk Decker", + "address": "1049 Lakloey Dr", + "city": "North Pole", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99705", + "phone": "907-488-6897", + "fax": "907-488-2093", + "email": "keisha@ransonet.com", + "web": "http://www.keisharansonet.com", + "followers": 5340 + }, + { + "firstname": "Rolando", + "lastname": "Baumann", + "company": "Erie Brush Co", + "address": "921 Sw Washington St #-321", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97205", + "phone": "503-241-6723", + "fax": "503-241-7691", + "email": "rolando@baumann.com", + "web": "http://www.rolandobaumann.com", + "followers": 9754 + }, + { + "firstname": "Maryanne", + "lastname": "Whyman", + "company": "Walls, Robert E Esq", + "address": "1008 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-1137", + "fax": "213-748-0447", + "email": "maryanne@whyman.com", + "web": "http://www.maryannewhyman.com", + "followers": 844 + }, + { + "firstname": "Kurtis", + "lastname": "Asberry", + "company": "Budgetel Inns", + "address": "Box #-37223", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79937", + "phone": "915-591-1621", + "fax": "915-591-3614", + "email": "kurtis@asberry.com", + "web": "http://www.kurtisasberry.com", + "followers": 8502 + }, + { + "firstname": "Ed", + "lastname": "Gompf", + "company": "Corro Therm Inc", + "address": "3 B Floor Care & Hskpg Svc", + "city": "Catawissa", + "county": "Columbia", + "state": "PA", + "zip": "17820", + "phone": "570-799-2838", + "fax": "570-799-4583", + "email": "ed@gompf.com", + "web": "http://www.edgompf.com", + "followers": 8705 + }, + { + "firstname": "Norman", + "lastname": "Betance", + "company": "Precision Electric Co Inc", + "address": "7949 E Acoma Dr", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85260", + "phone": "480-991-7884", + "fax": "480-991-6547", + "email": "norman@betance.com", + "web": "http://www.normanbetance.com", + "followers": 4602 + }, + { + "firstname": "Berta", + "lastname": "Karczewski", + "company": "Sather Eng Inc", + "address": "1035 N Mcqueen Rd #-133", + "city": "Gilbert", + "county": "Maricopa", + "state": "AZ", + "zip": "85233", + "phone": "480-926-0770", + "fax": "480-926-7533", + "email": "berta@karczewski.com", + "web": "http://www.bertakarczewski.com", + "followers": 1093 + }, + { + "firstname": "Mac", + "lastname": "Marksberry", + "company": "Pursell, David B Esq", + "address": "112 W Plum", + "city": "Doniphan", + "county": "Hall", + "state": "NE", + "zip": "68832", + "phone": "402-845-4275", + "fax": "402-845-8229", + "email": "mac@marksberry.com", + "web": "http://www.macmarksberry.com", + "followers": 6081 + }, + { + "firstname": "Sandra", + "lastname": "Graen", + "company": "Action Remediation Co", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-3844", + "fax": "320-587-7201", + "email": "sandra@graen.com", + "web": "http://www.sandragraen.com", + "followers": 4844 + }, + { + "firstname": "Lee", + "lastname": "Javens", + "company": "Dyer, James R Esq", + "address": "6086 N Lyons Rd", + "city": "Burlington", + "county": "Racine", + "state": "WI", + "zip": "53105", + "phone": "262-763-9582", + "fax": "262-763-3845", + "email": "lee@javens.com", + "web": "http://www.leejavens.com", + "followers": 2954 + }, + { + "firstname": "Fran", + "lastname": "Zanders", + "company": "River City Body Co", + "address": "6312 S Yellowstone Hwy", + "city": "Idaho Falls", + "county": "Bonneville", + "state": "ID", + "zip": "83402", + "phone": "208-525-6418", + "fax": "208-525-5501", + "email": "fran@zanders.com", + "web": "http://www.franzanders.com", + "followers": 9590 + }, + { + "firstname": "Lane", + "lastname": "Brantz", + "company": "Kings Inn", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-2903", + "fax": "320-587-3448", + "email": "lane@brantz.com", + "web": "http://www.lanebrantz.com", + "followers": 361 + }, + { + "firstname": "Bess", + "lastname": "Marso", + "company": "Lesher Printers Inc", + "address": "15542 Chemical Ln", + "city": "Huntington Beach", + "county": "Orange", + "state": "CA", + "zip": "92649", + "phone": "714-895-4582", + "fax": "714-895-8188", + "email": "bess@marso.com", + "web": "http://www.bessmarso.com", + "followers": 9552 + }, + { + "firstname": "Tamara", + "lastname": "Declue", + "company": "Glen Burnie The Bank Of", + "address": "1900 W Loop S #-600", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-3958", + "fax": "713-871-6355", + "email": "tamara@declue.com", + "web": "http://www.tamaradeclue.com", + "followers": 3229 + }, + { + "firstname": "Denise", + "lastname": "Speegle", + "company": "Shipley Oil Company", + "address": "2078 Foster Ave", + "city": "Wheeling", + "county": "Cook", + "state": "IL", + "zip": "60090", + "phone": "847-870-8743", + "fax": "847-870-6026", + "email": "denise@speegle.com", + "web": "http://www.denisespeegle.com", + "followers": 1139 + }, + { + "firstname": "Lynda", + "lastname": "Youtsey", + "company": "Accurate Color Inc", + "address": "1370 S Bertelsen Rd", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-342-0606", + "fax": "541-342-0655", + "email": "lynda@youtsey.com", + "web": "http://www.lyndayoutsey.com", + "followers": 378 + }, + { + "firstname": "Diann", + "lastname": "Burigsay", + "company": "Snyder, Stephen E Esq", + "address": "13026 S Normandie Ave", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90249", + "phone": "310-321-8278", + "fax": "310-321-0564", + "email": "diann@burigsay.com", + "web": "http://www.diannburigsay.com", + "followers": 6193 + }, + { + "firstname": "Mari", + "lastname": "Hwang", + "company": "Play Craft Pontoon Co", + "address": "23352 El Toro Rd", + "city": "Lake Forest", + "county": "Orange", + "state": "CA", + "zip": "92630", + "phone": "949-583-6901", + "fax": "949-583-7758", + "email": "mari@hwang.com", + "web": "http://www.marihwang.com", + "followers": 6719 + }, + { + "firstname": "Shanna", + "lastname": "Neundorfer", + "company": "Door Systems", + "address": "833 E Allegheny Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-426-9722", + "fax": "215-426-8416", + "email": "shanna@neundorfer.com", + "web": "http://www.shannaneundorfer.com", + "followers": 9759 + }, + { + "firstname": "Sherwood", + "lastname": "Detillier", + "company": "Minteq International", + "address": "Box #-851", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91365", + "phone": "818-703-9160", + "fax": "818-703-0447", + "email": "sherwood@detillier.com", + "web": "http://www.sherwooddetillier.com", + "followers": 6816 + }, + { + "firstname": "Walton", + "lastname": "Schwallie", + "company": "Nielsen, Laura W Md", + "address": "154 Main St", + "city": "Upton", + "county": "Worcester", + "state": "MA", + "zip": "01568", + "phone": "508-529-8783", + "fax": "508-529-8368", + "email": "walton@schwallie.com", + "web": "http://www.waltonschwallie.com", + "followers": 4498 + }, + { + "firstname": "Judy", + "lastname": "Gartenmayer", + "company": "Pro Infusion Pharm Inc", + "address": "3260 W New Haven Ave", + "city": "Melbourne", + "county": "Brevard", + "state": "FL", + "zip": "32904", + "phone": "321-676-3091", + "fax": "321-676-3378", + "email": "judy@gartenmayer.com", + "web": "http://www.judygartenmayer.com", + "followers": 5730 + }, + { + "firstname": "Antione", + "lastname": "Mccleary", + "company": "Cerberus Pyrotronics", + "address": "1110 25th Ave N", + "city": "Fargo", + "county": "Cass", + "state": "ND", + "zip": "58102", + "phone": "701-293-8410", + "fax": "701-293-7439", + "email": "antione@mccleary.com", + "web": "http://www.antionemccleary.com", + "followers": 5273 + }, + { + "firstname": "Kay", + "lastname": "Ganguli", + "company": "Hanson, Bruce Esq", + "address": "3763 Scripps Dr", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89103", + "phone": "702-876-3089", + "fax": "702-876-9367", + "email": "kay@ganguli.com", + "web": "http://www.kayganguli.com", + "followers": 6368 + }, + { + "firstname": "Oma", + "lastname": "Duffy", + "company": "Laun Law Offices", + "address": "255 Industrial Dr", + "city": "Franklin", + "county": "Warren", + "state": "OH", + "zip": "45005", + "phone": "937-746-7537", + "fax": "937-746-4129", + "email": "oma@duffy.com", + "web": "http://www.omaduffy.com", + "followers": 9375 + }, + { + "firstname": "Devon", + "lastname": "Teston", + "company": "Bilton, Dean H Esq", + "address": "1900 W Loop S", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-9773", + "fax": "713-871-0838", + "email": "devon@teston.com", + "web": "http://www.devonteston.com", + "followers": 3426 + }, + { + "firstname": "Jade", + "lastname": "Erlebach", + "company": "Vickery Tape & Label Co Inc", + "address": "975 Flynn Rd", + "city": "Camarillo", + "county": "Ventura", + "state": "CA", + "zip": "93012", + "phone": "805-445-8331", + "fax": "805-445-9961", + "email": "jade@erlebach.com", + "web": "http://www.jadeerlebach.com", + "followers": 4178 + }, + { + "firstname": "Roseann", + "lastname": "Jerko", + "company": "Larry Farmer Appraisal Co Inc", + "address": "850 Glen Ave", + "city": "Moorestown", + "county": "Burlington", + "state": "NJ", + "zip": "08057", + "phone": "856-866-4945", + "fax": "856-866-1542", + "email": "roseann@jerko.com", + "web": "http://www.roseannjerko.com", + "followers": 5397 + }, + { + "firstname": "Ruthie", + "lastname": "Zortman", + "company": "Review Monterey Peninsula", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-1410", + "fax": "510-651-1242", + "email": "ruthie@zortman.com", + "web": "http://www.ruthiezortman.com", + "followers": 6079 + }, + { + "firstname": "Leif", + "lastname": "Arguin", + "company": "Cmplt Cmptg Solutions Ne", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-4937", + "fax": "510-651-8302", + "email": "leif@arguin.com", + "web": "http://www.leifarguin.com", + "followers": 6571 + }, + { + "firstname": "Millicent", + "lastname": "Ekstrom", + "company": "Personal Creations Inc", + "address": "151 Brown St #-b", + "city": "Lawrenceburg", + "county": "Dearborn", + "state": "IN", + "zip": "47025", + "phone": "812-537-7287", + "fax": "812-537-5442", + "email": "millicent@ekstrom.com", + "web": "http://www.millicentekstrom.com", + "followers": 5739 + }, + { + "firstname": "Val", + "lastname": "Oborne", + "company": "Stone Container Corporation", + "address": "7846 Clybourn Ave", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-767-1347", + "fax": "818-767-5123", + "email": "val@oborne.com", + "web": "http://www.valoborne.com", + "followers": 6746 + }, + { + "firstname": "Bridgett", + "lastname": "Retort", + "company": "Womens Resource & Refrl Ntwrk", + "address": "6747 Signat Dr", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77041", + "phone": "713-466-7259", + "fax": "713-466-3278", + "email": "bridgett@retort.com", + "web": "http://www.bridgettretort.com", + "followers": 3060 + }, + { + "firstname": "Tia", + "lastname": "Lino", + "company": "Superior Gundrilling", + "address": "35375 Highway 228", + "city": "Brownsville", + "county": "Linn", + "state": "OR", + "zip": "97327", + "phone": "541-466-2483", + "fax": "541-466-1661", + "email": "tia@lino.com", + "web": "http://www.tialino.com", + "followers": 8942 + }, + { + "firstname": "Jarrett", + "lastname": "Kenzie", + "company": "Young Men Christian Assn Cnty", + "address": "11551 Riverpark Way", + "city": "Chesterfield", + "county": "Chesterfield", + "state": "VA", + "zip": "23838", + "phone": "804-739-3007", + "fax": "804-739-7905", + "email": "jarrett@kenzie.com", + "web": "http://www.jarrettkenzie.com", + "followers": 9459 + }, + { + "firstname": "Mara", + "lastname": "Vanderzwaag", + "company": "Brown & Brown Law Office", + "address": "2550 E Lucas Dr", + "city": "Beaumont", + "county": "Jefferson", + "state": "TX", + "zip": "77703", + "phone": "409-892-1231", + "fax": "409-892-8492", + "email": "mara@vanderzwaag.com", + "web": "http://www.maravanderzwaag.com", + "followers": 2331 + }, + { + "firstname": "Tiffany", + "lastname": "Knust", + "company": "Vantage Products", + "address": "1425 Koll Cir #-107", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95112", + "phone": "408-453-0357", + "fax": "408-453-1525", + "email": "tiffany@knust.com", + "web": "http://www.tiffanyknust.com", + "followers": 2896 + }, + { + "firstname": "Fabian", + "lastname": "Mcshaw", + "company": "Bodik, Michael G Esq", + "address": "6023 Garfield Ave", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90040", + "phone": "323-726-5319", + "fax": "323-726-8499", + "email": "fabian@mcshaw.com", + "web": "http://www.fabianmcshaw.com", + "followers": 4892 + }, + { + "firstname": "Annabelle", + "lastname": "Coger", + "company": "Center For Resource Management", + "address": "1200 Shreveport Barksdale Hwy", + "city": "Shreveport", + "county": "Caddo", + "state": "LA", + "zip": "71105", + "phone": "318-865-8418", + "fax": "318-865-7381", + "email": "annabelle@coger.com", + "web": "http://www.annabellecoger.com", + "followers": 2623 + }, + { + "firstname": "Marisa", + "lastname": "Smiler", + "company": "Star Systems Inc", + "address": "200 Broadhollow Rd", + "city": "Melville", + "county": "Suffolk", + "state": "NY", + "zip": "11747", + "phone": "631-673-3339", + "fax": "631-673-1556", + "email": "marisa@smiler.com", + "web": "http://www.marisasmiler.com", + "followers": 4693 + }, + { + "firstname": "Samantha", + "lastname": "Bordwell", + "company": "Interwest Freight System Inc", + "address": "23405 Sw 152nd Ct", + "city": "Homestead", + "county": "Miami-Dade", + "state": "FL", + "zip": "33032", + "phone": "305-247-8402", + "fax": "305-247-4599", + "email": "samantha@bordwell.com", + "web": "http://www.samanthabordwell.com", + "followers": 6109 + }, + { + "firstname": "Felecia", + "lastname": "Riedl", + "company": "Benson, John S", + "address": "333 Andrew Ave", + "city": "Salt Lake City", + "county": "Salt Lake", + "state": "UT", + "zip": "84115", + "phone": "801-486-6484", + "fax": "801-486-6755", + "email": "felecia@riedl.com", + "web": "http://www.feleciariedl.com", + "followers": 7849 + }, + { + "firstname": "Kris", + "lastname": "Persson", + "company": "Tweel, Ronald R Esq", + "address": "1765 Sw Highway 97", + "city": "Madras", + "county": "Jefferson", + "state": "OR", + "zip": "97741", + "phone": "541-475-8404", + "fax": "541-475-0021", + "email": "kris@persson.com", + "web": "http://www.krispersson.com", + "followers": 232 + }, + { + "firstname": "Kylie", + "lastname": "Bridgeman", + "company": "Thomas & Libowitz Pa", + "address": "118 Lenzner Ct", + "city": "Sewickley", + "county": "Allegheny", + "state": "PA", + "zip": "15143", + "phone": "412-741-4604", + "fax": "412-741-4236", + "email": "kylie@bridgeman.com", + "web": "http://www.kyliebridgeman.com", + "followers": 9868 + }, + { + "firstname": "Eduardo", + "lastname": "Bellendir", + "company": "Powers & Assocs", + "address": "1800 Pine Run Rd", + "city": "Wilkes Barre", + "county": "Luzerne", + "state": "PA", + "zip": "18702", + "phone": "570-822-0721", + "fax": "570-822-6267", + "email": "eduardo@bellendir.com", + "web": "http://www.eduardobellendir.com", + "followers": 597 + }, + { + "firstname": "Waldo", + "lastname": "Edberg", + "company": "Bush Building Corporation", + "address": "2017 W Jackson St", + "city": "Tupelo", + "county": "Lee", + "state": "MS", + "zip": "38801", + "phone": "662-842-4133", + "fax": "662-842-8645", + "email": "waldo@edberg.com", + "web": "http://www.waldoedberg.com", + "followers": 6609 + }, + { + "firstname": "Brent", + "lastname": "Vaidya", + "company": "Crain Industries", + "address": "45 Church St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06906", + "phone": "203-359-2824", + "fax": "203-359-6466", + "email": "brent@vaidya.com", + "web": "http://www.brentvaidya.com", + "followers": 3729 + }, + { + "firstname": "Bette", + "lastname": "Barcelona", + "company": "Fischer, William R Esq", + "address": "432 Lignite Ave", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-6738", + "fax": "907-456-4144", + "email": "bette@barcelona.com", + "web": "http://www.bettebarcelona.com", + "followers": 5857 + }, + { + "firstname": "Rich", + "lastname": "Gleave", + "company": "Finkelstein, Bernard A Cpa", + "address": "827 E 10th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-277-9294", + "fax": "907-277-2227", + "email": "rich@gleave.com", + "web": "http://www.richgleave.com", + "followers": 488 + }, + { + "firstname": "Lyman", + "lastname": "Whittley", + "company": "Berry, Robert A Esq", + "address": "3030 Bridgeway", + "city": "Sausalito", + "county": "Marin", + "state": "CA", + "zip": "94965", + "phone": "415-332-9570", + "fax": "415-332-7303", + "email": "lyman@whittley.com", + "web": "http://www.lymanwhittley.com", + "followers": 7950 + }, + { + "firstname": "Maryann", + "lastname": "Garnette", + "company": "Catholic University Of", + "address": "582 Centerville Rd", + "city": "Lancaster", + "county": "Lancaster", + "state": "PA", + "zip": "17601", + "phone": "717-560-6671", + "fax": "717-560-5625", + "email": "maryann@garnette.com", + "web": "http://www.maryanngarnette.com", + "followers": 5412 + }, + { + "firstname": "Jimmie", + "lastname": "Zarzycki", + "company": "John C Auth", + "address": "215 E Pikes Peak Ave", + "city": "Colorado Springs", + "county": "El Paso", + "state": "CO", + "zip": "80903", + "phone": "719-632-0667", + "fax": "719-632-5612", + "email": "jimmie@zarzycki.com", + "web": "http://www.jimmiezarzycki.com", + "followers": 4291 + }, + { + "firstname": "Gisela", + "lastname": "Kosicki", + "company": "Lisher, John L Esq", + "address": "22140 Ventura Blvd #-4", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91364", + "phone": "818-713-6306", + "fax": "818-713-8346", + "email": "gisela@kosicki.com", + "web": "http://www.giselakosicki.com", + "followers": 377 + }, + { + "firstname": "Marlene", + "lastname": "Hammeren", + "company": "Chamot, Philip S Esq", + "address": "1000 Monte Sano Blvd Se", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35801", + "phone": "256-533-8674", + "fax": "256-533-1176", + "email": "marlene@hammeren.com", + "web": "http://www.marlenehammeren.com", + "followers": 428 + }, + { + "firstname": "Kris", + "lastname": "Stanzak", + "company": "Cenref Labs", + "address": "3100 Dodge St", + "city": "Dubuque", + "county": "Dubuque", + "state": "IA", + "zip": "52003", + "phone": "563-557-2588", + "fax": "563-557-6308", + "email": "kris@stanzak.com", + "web": "http://www.krisstanzak.com", + "followers": 368 + }, + { + "firstname": "Roman", + "lastname": "Simone", + "company": "Spottswood, William B Esq", + "address": "610 W Main St", + "city": "Batavia", + "county": "Clermont", + "state": "OH", + "zip": "45103", + "phone": "513-732-3089", + "fax": "513-732-2547", + "email": "roman@simone.com", + "web": "http://www.romansimone.com", + "followers": 3460 + }, + { + "firstname": "Cathryn", + "lastname": "Nicolaus", + "company": "Perkins Photo/graphics Inc", + "address": "2575 State Highway 32", + "city": "Chico", + "county": "Butte", + "state": "CA", + "zip": "95973", + "phone": "530-345-4627", + "fax": "530-345-8372", + "email": "cathryn@nicolaus.com", + "web": "http://www.cathrynnicolaus.com", + "followers": 9494 + }, + { + "firstname": "Lana", + "lastname": "Keels", + "company": "Veron, J Michael Esq", + "address": "711 Park Ave", + "city": "Freehold", + "county": "Monmouth", + "state": "NJ", + "zip": "07728", + "phone": "732-462-1106", + "fax": "732-462-3575", + "email": "lana@keels.com", + "web": "http://www.lanakeels.com", + "followers": 2796 + }, + { + "firstname": "Malissa", + "lastname": "Ziesemer", + "company": "Electron Rentals Inc", + "address": "330 S Ocean Blvd", + "city": "Palm Beach", + "county": "Palm Beach", + "state": "FL", + "zip": "33480", + "phone": "561-655-6443", + "fax": "561-655-9129", + "email": "malissa@ziesemer.com", + "web": "http://www.malissaziesemer.com", + "followers": 7525 + }, + { + "firstname": "Pamala", + "lastname": "Brodtmann", + "company": "Gagen, William E Jr", + "address": "342 Seaside Ave", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-926-0776", + "fax": "808-926-6173", + "email": "pamala@brodtmann.com", + "web": "http://www.pamalabrodtmann.com", + "followers": 134 + }, + { + "firstname": "Heriberto", + "lastname": "Tivis", + "company": "Newood", + "address": "1135 Kildaire Farm Rd", + "city": "Cary", + "county": "Wake", + "state": "NC", + "zip": "27511", + "phone": "919-460-8104", + "fax": "919-460-4304", + "email": "heriberto@tivis.com", + "web": "http://www.heribertotivis.com", + "followers": 2418 + }, + { + "firstname": "Edgardo", + "lastname": "Prudente", + "company": "R A Hamilton Corp", + "address": "1110 N Highway 360", + "city": "Grand Prairie", + "county": "Dallas", + "state": "TX", + "zip": "75050", + "phone": "972-660-3960", + "fax": "972-660-0934", + "email": "edgardo@prudente.com", + "web": "http://www.edgardoprudente.com", + "followers": 3269 + }, + { + "firstname": "Fred", + "lastname": "Kunde", + "company": "Swanson, Victoria C Esq", + "address": "5321 Sterling Center Dr", + "city": "Westlake Village", + "county": "Ventura", + "state": "CA", + "zip": "91361", + "phone": "805-991-9740", + "fax": "805-991-4665", + "email": "fred@kunde.com", + "web": "http://www.fredkunde.com", + "followers": 7741 + }, + { + "firstname": "Pilar", + "lastname": "Suddeth", + "company": "Slant Fin Corp", + "address": "3700 Campus Dr", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92660", + "phone": "949-852-5463", + "fax": "949-852-9027", + "email": "pilar@suddeth.com", + "web": "http://www.pilarsuddeth.com", + "followers": 4094 + }, + { + "firstname": "Eliseo", + "lastname": "Wice", + "company": "A Limousine Service", + "address": "711 W 38th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78705", + "phone": "512-458-0034", + "fax": "512-458-7226", + "email": "eliseo@wice.com", + "web": "http://www.eliseowice.com", + "followers": 7508 + }, + { + "firstname": "Bridget", + "lastname": "Knightly", + "company": "Teamsters Union Local 20", + "address": "66 Flint St", + "city": "Asheville", + "county": "Buncombe", + "state": "NC", + "zip": "28801", + "phone": "828-251-0817", + "fax": "828-251-4242", + "email": "bridget@knightly.com", + "web": "http://www.bridgetknightly.com", + "followers": 1212 + }, + { + "firstname": "Tori", + "lastname": "Villaescusa", + "company": "Church Point Whol Gr Co Inc", + "address": "1006 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-7617", + "fax": "213-748-5668", + "email": "tori@villaescusa.com", + "web": "http://www.torivillaescusa.com", + "followers": 9093 + }, + { + "firstname": "Claire", + "lastname": "Moyerman", + "company": "Grt Nthrn Shoe Rpr & Dry Clnrs", + "address": "2280 S Xanadu Way #-300", + "city": "Aurora", + "county": "Arapahoe", + "state": "CO", + "zip": "80014", + "phone": "303-337-5701", + "fax": "303-337-5796", + "email": "claire@moyerman.com", + "web": "http://www.clairemoyerman.com", + "followers": 5959 + }, + { + "firstname": "Judi", + "lastname": "Kivel", + "company": "Postal Place At 111th Square", + "address": "125 N Emporia St", + "city": "Wichita", + "county": "Sedgwick", + "state": "KS", + "zip": "67202", + "phone": "316-267-2178", + "fax": "316-267-5183", + "email": "judi@kivel.com", + "web": "http://www.judikivel.com", + "followers": 6139 + }, + { + "firstname": "Terrell", + "lastname": "Rodda", + "company": "Woodland Village Nursing Home", + "address": "200 Cottontail Ln", + "city": "Somerset", + "county": "Somerset", + "state": "NJ", + "zip": "08873", + "phone": "732-563-5361", + "fax": "732-563-1293", + "email": "terrell@rodda.com", + "web": "http://www.terrellrodda.com", + "followers": 3803 + }, + { + "firstname": "Jasmin", + "lastname": "Gum", + "company": "White, Samuel I Esq", + "address": "43 Parmenter Rd", + "city": "Hudson", + "county": "Middlesex", + "state": "MA", + "zip": "01749", + "phone": "978-562-2524", + "fax": "978-562-5062", + "email": "jasmin@gum.com", + "web": "http://www.jasmingum.com", + "followers": 9074 + }, + { + "firstname": "Bridget", + "lastname": "Bottella", + "company": "Mid State Construction Prod", + "address": "112 E Pecan St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-224-9708", + "fax": "210-224-4881", + "email": "bridget@bottella.com", + "web": "http://www.bridgetbottella.com", + "followers": 5951 + }, + { + "firstname": "Tami", + "lastname": "Trybus", + "company": "Seawright, Rodney", + "address": "3963 Virginia Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45227", + "phone": "513-561-1096", + "fax": "513-561-7531", + "email": "tami@trybus.com", + "web": "http://www.tamitrybus.com", + "followers": 370 + }, + { + "firstname": "Maynard", + "lastname": "Kaewprasert", + "company": "Decatur Studio Inc", + "address": "1224 S Hope St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-747-6026", + "fax": "213-747-3088", + "email": "maynard@kaewprasert.com", + "web": "http://www.maynardkaewprasert.com", + "followers": 711 + }, + { + "firstname": "Viola", + "lastname": "Mcsorley", + "company": "First Bank Of Brunswick", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-7428", + "fax": "708-496-8587", + "email": "viola@mcsorley.com", + "web": "http://www.violamcsorley.com", + "followers": 7687 + }, + { + "firstname": "Reggie", + "lastname": "Streu", + "company": "Als Motors Inc", + "address": "15 Henderson Dr", + "city": "Caldwell", + "county": "Essex", + "state": "NJ", + "zip": "07006", + "phone": "973-575-5898", + "fax": "973-575-9328", + "email": "reggie@streu.com", + "web": "http://www.reggiestreu.com", + "followers": 971 + }, + { + "firstname": "Rena", + "lastname": "Griffeth", + "company": "Reliable Optical", + "address": "19901 Nordhoff St", + "city": "Northridge", + "county": "Los Angeles", + "state": "CA", + "zip": "91324", + "phone": "818-709-9165", + "fax": "818-709-2649", + "email": "rena@griffeth.com", + "web": "http://www.renagriffeth.com", + "followers": 6336 + }, + { + "firstname": "Pierre", + "lastname": "Salera", + "company": "Ridley Ridley & Burnette", + "address": "1601 S Shamrock Ave", + "city": "Monrovia", + "county": "Los Angeles", + "state": "CA", + "zip": "91016", + "phone": "626-303-9233", + "fax": "626-303-2569", + "email": "pierre@salera.com", + "web": "http://www.pierresalera.com", + "followers": 6542 + }, + { + "firstname": "Carolina", + "lastname": "Kinlaw", + "company": "Loftus, Daniel B Esq", + "address": "3m County", + "city": "Belle Mead", + "county": "Somerset", + "state": "NJ", + "zip": "08502", + "phone": "908-874-0864", + "fax": "908-874-4873", + "email": "carolina@kinlaw.com", + "web": "http://www.carolinakinlaw.com", + "followers": 8944 + }, + { + "firstname": "Alejandra", + "lastname": "Prenatt", + "company": "Mitchell De Burring Co", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-6958", + "fax": "708-496-4617", + "email": "alejandra@prenatt.com", + "web": "http://www.alejandraprenatt.com", + "followers": 4118 + }, + { + "firstname": "Quintin", + "lastname": "Isacson", + "company": "Spectrum Constrctn & Dev Corp", + "address": "1015 N Cahuenga Blvd", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90038", + "phone": "323-469-0643", + "fax": "323-469-3082", + "email": "quintin@isacson.com", + "web": "http://www.quintinisacson.com", + "followers": 94 + }, + { + "firstname": "Robin", + "lastname": "Grotz", + "company": "Hackett, Peter J", + "address": "2601 Summerhill Rd", + "city": "Texarkana", + "county": "Bowie", + "state": "TX", + "zip": "75503", + "phone": "903-792-2081", + "fax": "903-792-5309", + "email": "robin@grotz.com", + "web": "http://www.robingrotz.com", + "followers": 2532 + }, + { + "firstname": "Lacy", + "lastname": "Woodfin", + "company": "Enviro Dynamics", + "address": "130 Wyoming Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18503", + "phone": "570-348-3754", + "fax": "570-348-4204", + "email": "lacy@woodfin.com", + "web": "http://www.lacywoodfin.com", + "followers": 4644 + }, + { + "firstname": "Daniel", + "lastname": "Zill", + "company": "Safety Team Inc", + "address": "550 N Brand Blvd #-1940", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-507-7207", + "fax": "818-507-1925", + "email": "daniel@zill.com", + "web": "http://www.danielzill.com", + "followers": 2051 + }, + { + "firstname": "Reina", + "lastname": "Wolchesky", + "company": "Highland Management Group", + "address": "305 W Washington St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-7281", + "fax": "218-828-3231", + "email": "reina@wolchesky.com", + "web": "http://www.reinawolchesky.com", + "followers": 9240 + }, + { + "firstname": "Marc", + "lastname": "Wanger", + "company": "Welsh Company", + "address": "33 Harrison Ave", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02111", + "phone": "617-426-6393", + "fax": "617-426-1114", + "email": "marc@wanger.com", + "web": "http://www.marcwanger.com", + "followers": 2846 + }, + { + "firstname": "Damion", + "lastname": "Matkin", + "company": "Data Ware Development Inc", + "address": "5830 Downing St #-d", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80216", + "phone": "303-295-4797", + "fax": "303-295-3867", + "email": "damion@matkin.com", + "web": "http://www.damionmatkin.com", + "followers": 5752 + }, + { + "firstname": "Lucius", + "lastname": "Winchester", + "company": "Barrett Paving Materials Inc", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-9458", + "fax": "630-289-9033", + "email": "lucius@winchester.com", + "web": "http://www.luciuswinchester.com", + "followers": 8815 + }, + { + "firstname": "Petra", + "lastname": "Mcnichol", + "company": "Hyacinth Foundation Aids Proj", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-8190", + "fax": "630-289-8985", + "email": "petra@mcnichol.com", + "web": "http://www.petramcnichol.com", + "followers": 9459 + }, + { + "firstname": "Katina", + "lastname": "Ramano", + "company": "Fuhrmann, Chris C Esq", + "address": "580 Fountain Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11208", + "phone": "718-272-2553", + "fax": "718-272-8498", + "email": "katina@ramano.com", + "web": "http://www.katinaramano.com", + "followers": 924 + }, + { + "firstname": "Leslie", + "lastname": "Cackowski", + "company": "Re, Matthew R Esq", + "address": "2103 W Main St", + "city": "Farmington", + "county": "San Juan", + "state": "NM", + "zip": "87401", + "phone": "505-325-3933", + "fax": "505-325-9042", + "email": "leslie@cackowski.com", + "web": "http://www.lesliecackowski.com", + "followers": 3028 + }, + { + "firstname": "Cristopher", + "lastname": "Wiget", + "company": "Roche, Patrick Esq", + "address": "4354 Highway 64", + "city": "Kirtland", + "county": "San Juan", + "state": "NM", + "zip": "87417", + "phone": "505-598-9742", + "fax": "505-598-3063", + "email": "cristopher@wiget.com", + "web": "http://www.cristopherwiget.com", + "followers": 2445 + }, + { + "firstname": "Garth", + "lastname": "Skiffington", + "company": "Fox & Killbride", + "address": "22 Mill St", + "city": "Paterson", + "county": "Passaic", + "state": "NJ", + "zip": "07501", + "phone": "973-684-7654", + "fax": "973-684-6309", + "email": "garth@skiffington.com", + "web": "http://www.garthskiffington.com", + "followers": 1725 + }, + { + "firstname": "Brendan", + "lastname": "Qin", + "company": "Allegro Copy & Print", + "address": "305 Griffin Ave Sw", + "city": "Eastman", + "county": "Dodge", + "state": "GA", + "zip": "31023", + "phone": "478-374-5686", + "fax": "478-374-6992", + "email": "brendan@qin.com", + "web": "http://www.brendanqin.com", + "followers": 8035 + }, + { + "firstname": "Chase", + "lastname": "Furler", + "company": "Hostetler & Kowalik", + "address": "3333 Se 21st St", + "city": "Topeka", + "county": "Shawnee", + "state": "KS", + "zip": "66607", + "phone": "785-354-7091", + "fax": "785-354-3042", + "email": "chase@furler.com", + "web": "http://www.chasefurler.com", + "followers": 2977 + }, + { + "firstname": "Marietta", + "lastname": "Bjornberg", + "company": "Consolidated Mechanical Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-0501", + "fax": "406-728-5507", + "email": "marietta@bjornberg.com", + "web": "http://www.mariettabjornberg.com", + "followers": 54 + }, + { + "firstname": "Carmella", + "lastname": "Wishman", + "company": "Meicher Cpa", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-6772", + "fax": "406-728-7668", + "email": "carmella@wishman.com", + "web": "http://www.carmellawishman.com", + "followers": 624 + }, + { + "firstname": "Erica", + "lastname": "Eyrich", + "company": "C & I Computer Services Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-7293", + "fax": "406-728-4789", + "email": "erica@eyrich.com", + "web": "http://www.ericaeyrich.com", + "followers": 8217 + }, + { + "firstname": "Lucius", + "lastname": "Bagnoli", + "company": "American Music Teacher", + "address": "404 W Boonville New Harmony Rd", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-867-2916", + "fax": "812-867-2593", + "email": "lucius@bagnoli.com", + "web": "http://www.luciusbagnoli.com", + "followers": 8666 + }, + { + "firstname": "Bart", + "lastname": "Hachey", + "company": "Pip Printing", + "address": "490 S Broad St", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-9769", + "fax": "330-533-6543", + "email": "bart@hachey.com", + "web": "http://www.barthachey.com", + "followers": 8457 + }, + { + "firstname": "Isiah", + "lastname": "Phernetton", + "company": "Wyckoff Florist & Gifts", + "address": "246 Griffing Ave", + "city": "Riverhead", + "county": "Suffolk", + "state": "NY", + "zip": "11901", + "phone": "631-727-0917", + "fax": "631-727-2147", + "email": "isiah@phernetton.com", + "web": "http://www.isiahphernetton.com", + "followers": 6662 + }, + { + "firstname": "Morton", + "lastname": "Crummell", + "company": "Boykin Management Co", + "address": "5485 Conestoga Ct", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80301", + "phone": "303-546-3698", + "fax": "303-546-1589", + "email": "morton@crummell.com", + "web": "http://www.mortoncrummell.com", + "followers": 8143 + }, + { + "firstname": "Prince", + "lastname": "Kauk", + "company": "Mckesson Drug Co", + "address": "2320 W Louise Dr", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85027", + "phone": "623-581-7435", + "fax": "623-581-2472", + "email": "prince@kauk.com", + "web": "http://www.princekauk.com", + "followers": 177 + }, + { + "firstname": "Marta", + "lastname": "Horner", + "company": "Scisci, Pasquale M", + "address": "4694 Alvarado Canyon Rd #-f", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92120", + "phone": "858-265-2270", + "fax": "858-265-6604", + "email": "marta@horner.com", + "web": "http://www.martahorner.com", + "followers": 5749 + }, + { + "firstname": "Teodoro", + "lastname": "Gaboury", + "company": "Bostek, Eva M Dvm", + "address": "4251 Glenwood Ave", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-783-8123", + "fax": "330-783-9674", + "email": "teodoro@gaboury.com", + "web": "http://www.teodorogaboury.com", + "followers": 3663 + }, + { + "firstname": "Jess", + "lastname": "Assad", + "company": "Livingston City", + "address": "9100 F St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68127", + "phone": "402-331-0470", + "fax": "402-331-4974", + "email": "jess@assad.com", + "web": "http://www.jessassad.com", + "followers": 6985 + }, + { + "firstname": "Freeman", + "lastname": "Soula", + "company": "Henson, Claudia", + "address": "Hwy 8e E", + "city": "Mena", + "county": "Polk", + "state": "AR", + "zip": "71953", + "phone": "479-394-6308", + "fax": "479-394-7509", + "email": "freeman@soula.com", + "web": "http://www.freemansoula.com", + "followers": 7262 + }, + { + "firstname": "Rita", + "lastname": "Center", + "company": "R D Playman Co", + "address": "4013 Cameron St #-b", + "city": "Lafayette", + "county": "Lafayette", + "state": "LA", + "zip": "70506", + "phone": "337-269-8825", + "fax": "337-269-7011", + "email": "rita@center.com", + "web": "http://www.ritacenter.com", + "followers": 4484 + }, + { + "firstname": "Kira", + "lastname": "Papen", + "company": "Tile City & Carpet Of Pa", + "address": "8387 University Ave", + "city": "La Mesa", + "county": "San Diego", + "state": "CA", + "zip": "91941", + "phone": "619-464-3649", + "fax": "619-464-8499", + "email": "kira@papen.com", + "web": "http://www.kirapapen.com", + "followers": 5711 + }, + { + "firstname": "Miquel", + "lastname": "Demicco", + "company": "Welk Resort Center", + "address": "5921 S Middlefield Rd", + "city": "Littleton", + "county": "Jefferson", + "state": "CO", + "zip": "80123", + "phone": "303-730-8080", + "fax": "303-730-8087", + "email": "miquel@demicco.com", + "web": "http://www.miqueldemicco.com", + "followers": 6863 + }, + { + "firstname": "William", + "lastname": "Mahmud", + "company": "Campbell Inn", + "address": "1003 Northern Blvd", + "city": "Manhasset", + "county": "Nassau", + "state": "NY", + "zip": "11030", + "phone": "516-365-3496", + "fax": "516-365-0493", + "email": "william@mahmud.com", + "web": "http://www.williammahmud.com", + "followers": 3591 + }, + { + "firstname": "Lacy", + "lastname": "Belmont", + "company": "House Boat Rentals Inc", + "address": "585 Bedford Rd", + "city": "Bedford Hills", + "county": "Westchester", + "state": "NY", + "zip": "10507", + "phone": "914-241-8888", + "fax": "914-241-2272", + "email": "lacy@belmont.com", + "web": "http://www.lacybelmont.com", + "followers": 1399 + }, + { + "firstname": "Van", + "lastname": "Leanen", + "company": "Mail Boxes Etc", + "address": "Bus Rt 54 & Hh", + "city": "Lake Ozark", + "county": "Camden", + "state": "MO", + "zip": "65049", + "phone": "573-365-0319", + "fax": "573-365-1055", + "email": "van@leanen.com", + "web": "http://www.vanleanen.com", + "followers": 1949 + }, + { + "firstname": "Mayme", + "lastname": "Staub", + "company": "River House Hotel", + "address": "2470 Lamington Rd", + "city": "Bedminster", + "county": "Somerset", + "state": "NJ", + "zip": "07921", + "phone": "908-234-9338", + "fax": "908-234-9433", + "email": "mayme@staub.com", + "web": "http://www.maymestaub.com", + "followers": 2216 + }, + { + "firstname": "Gregg", + "lastname": "Guevarra", + "company": "Arbor Center", + "address": "221 S Kerr Ave", + "city": "Wilmington", + "county": "New Hanover", + "state": "NC", + "zip": "28403", + "phone": "910-799-9811", + "fax": "910-799-1965", + "email": "gregg@guevarra.com", + "web": "http://www.greggguevarra.com", + "followers": 2704 + }, + { + "firstname": "Minh", + "lastname": "Leclare", + "company": "Osach, Ronald C Esq", + "address": "106 S 4th St", + "city": "Forest City", + "county": "Winnebago", + "state": "IA", + "zip": "50436", + "phone": "641-582-0973", + "fax": "641-582-0424", + "email": "minh@leclare.com", + "web": "http://www.minhleclare.com", + "followers": 7681 + }, + { + "firstname": "Joey", + "lastname": "Sedore", + "company": "Goodwill Speciality Co", + "address": "1647 E Palmdale Blvd", + "city": "Palmdale", + "county": "Los Angeles", + "state": "CA", + "zip": "93550", + "phone": "661-273-4188", + "fax": "661-273-6263", + "email": "joey@sedore.com", + "web": "http://www.joeysedore.com", + "followers": 273 + }, + { + "firstname": "Jeanie", + "lastname": "Dalen", + "company": "Ivs Media Inc", + "address": "2900 Ford Rd", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-5062", + "fax": "215-788-7666", + "email": "jeanie@dalen.com", + "web": "http://www.jeaniedalen.com", + "followers": 290 + }, + { + "firstname": "Eddie", + "lastname": "Gauer", + "company": "Easy Mail", + "address": "506 Kellogg Ave", + "city": "Ames", + "county": "Story", + "state": "IA", + "zip": "50010", + "phone": "515-233-2381", + "fax": "515-233-6551", + "email": "eddie@gauer.com", + "web": "http://www.eddiegauer.com", + "followers": 2350 + }, + { + "firstname": "Jessie", + "lastname": "Barkle", + "company": "Days Inn Airport By Mall Amer", + "address": "1002 S Treadaway Blvd", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79602", + "phone": "325-677-1190", + "fax": "325-677-2343", + "email": "jessie@barkle.com", + "web": "http://www.jessiebarkle.com", + "followers": 5427 + }, + { + "firstname": "Deandre", + "lastname": "Resendiz", + "company": "Sav Mart", + "address": "4 Trvl Svc Carlson Trvl Ways", + "city": "Twin Falls", + "county": "Twin Falls", + "state": "ID", + "zip": "83301", + "phone": "208-733-8306", + "fax": "208-733-3476", + "email": "deandre@resendiz.com", + "web": "http://www.deandreresendiz.com", + "followers": 1120 + }, + { + "firstname": "Janet", + "lastname": "Rathrock", + "company": "Ryder, Edward A Esq", + "address": "2181 Harlem Rd", + "city": "Loves Park", + "county": "Winnebago", + "state": "IL", + "zip": "61111", + "phone": "815-877-4376", + "fax": "815-877-9538", + "email": "janet@rathrock.com", + "web": "http://www.janetrathrock.com", + "followers": 2725 + }, + { + "firstname": "Denice", + "lastname": "Nordlinger", + "company": "Bickel, Daniel R Cpa", + "address": "7210 Gateway Blvd E", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79915", + "phone": "915-593-2344", + "fax": "915-593-8069", + "email": "denice@nordlinger.com", + "web": "http://www.denicenordlinger.com", + "followers": 2003 + }, + { + "firstname": "Danny", + "lastname": "Dales", + "company": "Barrett Bindery Co", + "address": "3530 E Washington St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-225-9543", + "fax": "602-225-9028", + "email": "danny@dales.com", + "web": "http://www.dannydales.com", + "followers": 2843 + }, + { + "firstname": "Robbie", + "lastname": "Deshay", + "company": "Cornhusker Press", + "address": "1700 Terminal St", + "city": "West Sacramento", + "county": "Yolo", + "state": "CA", + "zip": "95691", + "phone": "916-372-5032", + "fax": "916-372-1333", + "email": "robbie@deshay.com", + "web": "http://www.robbiedeshay.com", + "followers": 3460 + }, + { + "firstname": "Carla", + "lastname": "Humble", + "company": "Express Printing Center", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-0662", + "fax": "818-768-1832", + "email": "carla@humble.com", + "web": "http://www.carlahumble.com", + "followers": 4639 + }, + { + "firstname": "Ashley", + "lastname": "Leonesio", + "company": "Martinique Resort Hotel", + "address": "15 Park Row", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10038", + "phone": "212-227-3681", + "fax": "212-227-4343", + "email": "ashley@leonesio.com", + "web": "http://www.ashleyleonesio.com", + "followers": 4801 + }, + { + "firstname": "Josephine", + "lastname": "Sotlar", + "company": "Shipp Storage", + "address": "400 1st St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20001", + "phone": "202-783-2772", + "fax": "202-783-8805", + "email": "josephine@sotlar.com", + "web": "http://www.josephinesotlar.com", + "followers": 515 + }, + { + "firstname": "Derek", + "lastname": "Kreutzbender", + "company": "Recycle Metals Corp", + "address": "411 E Wisconsin Ave", + "city": "Milwaukee", + "county": "Milwaukee", + "state": "WI", + "zip": "53202", + "phone": "414-271-5253", + "fax": "414-271-6234", + "email": "derek@kreutzbender.com", + "web": "http://www.derekkreutzbender.com", + "followers": 2430 + }, + { + "firstname": "Kira", + "lastname": "Staffon", + "company": "International Management Assoc", + "address": "933 Wiliwili St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96826", + "phone": "808-949-0941", + "fax": "808-949-8257", + "email": "kira@staffon.com", + "web": "http://www.kirastaffon.com", + "followers": 5769 + }, + { + "firstname": "Isaac", + "lastname": "Davensizer", + "company": "Dillon Measurement Instruments", + "address": "444 Nahua St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-544-5794", + "fax": "808-544-6357", + "email": "isaac@davensizer.com", + "web": "http://www.isaacdavensizer.com", + "followers": 3453 + }, + { + "firstname": "Reva", + "lastname": "Bayer", + "company": "Hirt, Stanley Esq", + "address": "4011 Wallingford Ave N", + "city": "Seattle", + "county": "King", + "state": "WA", + "zip": "98103", + "phone": "206-634-9998", + "fax": "206-634-2589", + "email": "reva@bayer.com", + "web": "http://www.revabayer.com", + "followers": 7415 + }, + { + "firstname": "Melvin", + "lastname": "Auteri", + "company": "Interstate Unltd Fed Crdt Un", + "address": "463 Beacon St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02115", + "phone": "617-247-8022", + "fax": "617-247-6002", + "email": "melvin@auteri.com", + "web": "http://www.melvinauteri.com", + "followers": 2705 + }, + { + "firstname": "Stephen", + "lastname": "Seiters", + "company": "Us Mortgage Corp", + "address": "814 Blue Mound Rd", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76131", + "phone": "817-947-3102", + "fax": "817-947-6272", + "email": "stephen@seiters.com", + "web": "http://www.stephenseiters.com", + "followers": 9568 + }, + { + "firstname": "Lucas", + "lastname": "Santellana", + "company": "Constantino, James P Esq", + "address": "4820 E Mcdowell Rd #-300", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85008", + "phone": "602-225-3469", + "fax": "602-225-8897", + "email": "lucas@santellana.com", + "web": "http://www.lucassantellana.com", + "followers": 8420 + }, + { + "firstname": "Traci", + "lastname": "Toomey", + "company": "Pea Press", + "address": "Box #-1948", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-9708", + "fax": "307-733-6525", + "email": "traci@toomey.com", + "web": "http://www.tracitoomey.com", + "followers": 7718 + }, + { + "firstname": "Vernice", + "lastname": "Resendes", + "company": "Sorrento Cheese Co Inc", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-6234", + "fax": "818-768-7585", + "email": "vernice@resendes.com", + "web": "http://www.verniceresendes.com", + "followers": 912 + }, + { + "firstname": "Hillary", + "lastname": "Holmes", + "company": "Dennis N Brager Law Offices Of", + "address": "7565 Green Valley Rd", + "city": "Placerville", + "county": "El Dorado", + "state": "CA", + "zip": "95667", + "phone": "530-626-1934", + "fax": "530-626-6128", + "email": "hillary@holmes.com", + "web": "http://www.hillaryholmes.com", + "followers": 7918 + }, + { + "firstname": "Robin", + "lastname": "Schartz", + "company": "Fairmount Country Club", + "address": "10175 Joerschke Dr", + "city": "Grass Valley", + "county": "Nevada", + "state": "CA", + "zip": "95945", + "phone": "530-477-9983", + "fax": "530-477-7396", + "email": "robin@schartz.com", + "web": "http://www.robinschartz.com", + "followers": 2169 + }, + { + "firstname": "Sabrina", + "lastname": "Deppert", + "company": "Americold", + "address": "2701 E Thomas Rd #-j", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85016", + "phone": "602-954-4343", + "fax": "602-954-6266", + "email": "sabrina@deppert.com", + "web": "http://www.sabrinadeppert.com", + "followers": 5488 + }, + { + "firstname": "Luciano", + "lastname": "Truiolo", + "company": "Currier Gallery Of Art", + "address": "435 Mira Vista Ter", + "city": "Pasadena", + "county": "Los Angeles", + "state": "CA", + "zip": "91105", + "phone": "626-792-6850", + "fax": "626-792-5166", + "email": "luciano@truiolo.com", + "web": "http://www.lucianotruiolo.com", + "followers": 9047 + }, + { + "firstname": "Ezekiel", + "lastname": "Mildon", + "company": "Ace Pro Pest Cntrl Inc", + "address": "10 Rogers St", + "city": "Cambridge", + "county": "Middlesex", + "state": "MA", + "zip": "02142", + "phone": "617-494-5618", + "fax": "617-494-3365", + "email": "ezekiel@mildon.com", + "web": "http://www.ezekielmildon.com", + "followers": 399 + }, + { + "firstname": "Hanna", + "lastname": "Cinkan", + "company": "Northbrook Flowers", + "address": "1150 Nw 72nd Ave #-333", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-477-7869", + "fax": "305-477-7089", + "email": "hanna@cinkan.com", + "web": "http://www.hannacinkan.com", + "followers": 4636 + }, + { + "firstname": "Kory", + "lastname": "Wooldridge", + "company": "Paper Stock Dealers Inc", + "address": "1525 E 53rd St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60615", + "phone": "312-753-6734", + "fax": "312-753-2693", + "email": "kory@wooldridge.com", + "web": "http://www.korywooldridge.com", + "followers": 4528 + }, + { + "firstname": "Darrel", + "lastname": "Ruffins", + "company": "Brickman, Arthur Cpa", + "address": "2800 4th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33704", + "phone": "727-821-8502", + "fax": "727-821-5104", + "email": "darrel@ruffins.com", + "web": "http://www.darrelruffins.com", + "followers": 6468 + }, + { + "firstname": "Miranda", + "lastname": "Hammitt", + "company": "Steindel, Carl R Md", + "address": "964 E Saint Francis St", + "city": "Brownsville", + "county": "Cameron", + "state": "TX", + "zip": "78520", + "phone": "956-541-5918", + "fax": "956-541-9021", + "email": "miranda@hammitt.com", + "web": "http://www.mirandahammitt.com", + "followers": 8847 + }, + { + "firstname": "Sadie", + "lastname": "Rowlett", + "company": "Port City Taxi Inc", + "address": "2659 Webster Ave", + "city": "Bronx", + "county": "Bronx", + "state": "NY", + "zip": "10458", + "phone": "718-365-1753", + "fax": "718-365-5353", + "email": "sadie@rowlett.com", + "web": "http://www.sadierowlett.com", + "followers": 9836 + }, + { + "firstname": "Deanna", + "lastname": "Gerbi", + "company": "Centerville Historical Society", + "address": "264 Broadway", + "city": "Jersey City", + "county": "Hudson", + "state": "NJ", + "zip": "07306", + "phone": "201-433-0391", + "fax": "201-433-3619", + "email": "deanna@gerbi.com", + "web": "http://www.deannagerbi.com", + "followers": 9112 + }, + { + "firstname": "Alfonso", + "lastname": "Griglen", + "company": "Paul D Friedman", + "address": "577 Township Road #-30s", + "city": "Ada", + "county": "Hardin", + "state": "OH", + "zip": "45810", + "phone": "419-634-3513", + "fax": "419-634-5733", + "email": "alfonso@griglen.com", + "web": "http://www.alfonsogriglen.com", + "followers": 7361 + }, + { + "firstname": "Vernon", + "lastname": "Engelman", + "company": "Atlas Metal Cutting Inc", + "address": "321 Watson St", + "city": "Ripon", + "county": "Fond du Lac", + "state": "WI", + "zip": "54971", + "phone": "920-748-1387", + "fax": "920-748-3703", + "email": "vernon@engelman.com", + "web": "http://www.vernonengelman.com", + "followers": 656 + }, + { + "firstname": "Johnnie", + "lastname": "Rheaves", + "company": "Roberts, James A Cpa", + "address": "2910 E La Cresta Ave", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92806", + "phone": "714-632-1291", + "fax": "714-632-7337", + "email": "johnnie@rheaves.com", + "web": "http://www.johnnierheaves.com", + "followers": 2812 + }, + { + "firstname": "Ella", + "lastname": "Pahnke", + "company": "Davis, Randle S Esq", + "address": "2640 Junction Hwy", + "city": "Kerrville", + "county": "Kerr", + "state": "TX", + "zip": "78028", + "phone": "830-367-8513", + "fax": "830-367-9231", + "email": "ella@pahnke.com", + "web": "http://www.ellapahnke.com", + "followers": 4762 + }, + { + "firstname": "Veronica", + "lastname": "Achorn", + "company": "Guy Spradling", + "address": "5201 Hanawalt Dr", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79903", + "phone": "915-772-3217", + "fax": "915-772-2346", + "email": "veronica@achorn.com", + "web": "http://www.veronicaachorn.com", + "followers": 9145 + }, + { + "firstname": "Kasey", + "lastname": "Nguyen", + "company": "Pettine, Paul A Iii", + "address": "5603 Arapahoe Ave", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80303", + "phone": "303-440-3916", + "fax": "303-440-2452", + "email": "kasey@nguyen.com", + "web": "http://www.kaseynguyen.com", + "followers": 2867 + }, + { + "firstname": "Frankie", + "lastname": "Morein", + "company": "Oliver, Jerrold B Esq", + "address": "104 North St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06902", + "phone": "203-975-3712", + "fax": "203-975-4688", + "email": "frankie@morein.com", + "web": "http://www.frankiemorein.com", + "followers": 5663 + }, + { + "firstname": "Elaine", + "lastname": "Renzi", + "company": "Delvel Chem Co", + "address": "221 E 59th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-826-7966", + "fax": "212-826-2043", + "email": "elaine@renzi.com", + "web": "http://www.elainerenzi.com", + "followers": 9525 + }, + { + "firstname": "Timothy", + "lastname": "Janski", + "company": "Snyder Chevrolet Olds Geo Co", + "address": "800 E Dimond Blvd", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-344-4330", + "fax": "907-344-4086", + "email": "timothy@janski.com", + "web": "http://www.timothyjanski.com", + "followers": 1000 + }, + { + "firstname": "Warren", + "lastname": "Hacher", + "company": "Josel, Stephen C Esq", + "address": "4600 S 1st St", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79605", + "phone": "325-691-7220", + "fax": "325-691-7394", + "email": "warren@hacher.com", + "web": "http://www.warrenhacher.com", + "followers": 6287 + }, + { + "firstname": "Brant", + "lastname": "Darnel", + "company": "Cody, Daniel S Esq", + "address": "8250 Tyler Blvd", + "city": "Mentor", + "county": "Lake", + "state": "OH", + "zip": "44060", + "phone": "440-974-8416", + "fax": "440-974-7476", + "email": "brant@darnel.com", + "web": "http://www.brantdarnel.com", + "followers": 5217 + }, + { + "firstname": "Mara", + "lastname": "Rineheart", + "company": "Berry Naturipe Growers", + "address": "39 W 21st St", + "city": "Northampton", + "county": "Northampton", + "state": "PA", + "zip": "18067", + "phone": "610-262-2444", + "fax": "610-262-6836", + "email": "mara@rineheart.com", + "web": "http://www.mararineheart.com", + "followers": 9213 + }, + { + "firstname": "Karen", + "lastname": "Flierl", + "company": "Cook Vetter Doerhoff", + "address": "501 N I #-h35", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78702", + "phone": "512-477-1826", + "fax": "512-477-1407", + "email": "karen@flierl.com", + "web": "http://www.karenflierl.com", + "followers": 754 + }, + { + "firstname": "Virgil", + "lastname": "Chinni", + "company": "Dorfman Abrams Music & Co", + "address": "248 Libby St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-9811", + "fax": "808-841-9646", + "email": "virgil@chinni.com", + "web": "http://www.virgilchinni.com", + "followers": 6740 + }, + { + "firstname": "Jimmie", + "lastname": "Kertzman", + "company": "Oregon Pacific Trading Co", + "address": "279 Puuhale Rd", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-2883", + "fax": "808-841-1772", + "email": "jimmie@kertzman.com", + "web": "http://www.jimmiekertzman.com", + "followers": 4795 + }, + { + "firstname": "Leif", + "lastname": "Bachta", + "company": "Hislop, Lorna Brumfield Esq", + "address": "91246 Oihana St", + "city": "Kapolei", + "county": "Honolulu", + "state": "HI", + "zip": "96707", + "phone": "808-682-8942", + "fax": "808-682-2789", + "email": "leif@bachta.com", + "web": "http://www.leifbachta.com", + "followers": 6016 + }, + { + "firstname": "Ione", + "lastname": "Kucera", + "company": "Tweedy Penney & Crawford", + "address": "94210 Pupukahi St #-201a", + "city": "Waipahu", + "county": "Honolulu", + "state": "HI", + "zip": "96797", + "phone": "808-671-5253", + "fax": "808-671-3048", + "email": "ione@kucera.com", + "web": "http://www.ionekucera.com", + "followers": 17 + }, + { + "firstname": "Doreen", + "lastname": "Sakurai", + "company": "Airlifter", + "address": "211 E 50th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-759-4757", + "fax": "212-759-7548", + "email": "doreen@sakurai.com", + "web": "http://www.doreensakurai.com", + "followers": 8051 + }, + { + "firstname": "Joel", + "lastname": "Nardo", + "company": "New Era Canning Co", + "address": "5150 Town Center Cir", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33486", + "phone": "561-395-2277", + "fax": "561-395-7825", + "email": "joel@nardo.com", + "web": "http://www.joelnardo.com", + "followers": 9041 + }, + { + "firstname": "Neil", + "lastname": "Backus", + "company": "Smith Capital Management", + "address": "521 5th Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10175", + "phone": "212-537-4955", + "fax": "212-537-1181", + "email": "neil@backus.com", + "web": "http://www.neilbackus.com", + "followers": 9529 + }, + { + "firstname": "Fausto", + "lastname": "Marks", + "company": "Donohue, Brian C Esq", + "address": "200 Stuart St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02116", + "phone": "617-451-9353", + "fax": "617-451-2136", + "email": "fausto@marks.com", + "web": "http://www.faustomarks.com", + "followers": 9763 + }, + { + "firstname": "Christa", + "lastname": "Bodenschatz", + "company": "Stationers Inc", + "address": "Tilton Rd", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-443-6280", + "fax": "217-443-6382", + "email": "christa@bodenschatz.com", + "web": "http://www.christabodenschatz.com", + "followers": 1744 + }, + { + "firstname": "Chi", + "lastname": "Greenlaw", + "company": "E & T Screw Machine Products", + "address": "4300 N Miller Rd #-143", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85251", + "phone": "480-946-1537", + "fax": "480-946-1657", + "email": "chi@greenlaw.com", + "web": "http://www.chigreenlaw.com", + "followers": 2707 + }, + { + "firstname": "Kyle", + "lastname": "Ferri", + "company": "Ames Plumbing & Heating", + "address": "1801 E 5th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-2216", + "fax": "907-272-7109", + "email": "kyle@ferri.com", + "web": "http://www.kyleferri.com", + "followers": 7533 + }, + { + "firstname": "Freida", + "lastname": "Michelfelder", + "company": "Fun Discovery Inc", + "address": "117 Martin Luther King Jr Dr S", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30303", + "phone": "404-521-3372", + "fax": "404-521-3223", + "email": "freida@michelfelder.com", + "web": "http://www.freidamichelfelder.com", + "followers": 4298 + }, + { + "firstname": "Bryant", + "lastname": "Bouliouris", + "company": "Medlin, Charles K Jr", + "address": "480 W Pearl Ave", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-8286", + "fax": "307-733-6041", + "email": "bryant@bouliouris.com", + "web": "http://www.bryantbouliouris.com", + "followers": 9520 + }, + { + "firstname": "Emilia", + "lastname": "Oxley", + "company": "Buckeye Reserve Title", + "address": "14651 Dallas Pky", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-702-8125", + "fax": "214-702-4766", + "email": "emilia@oxley.com", + "web": "http://www.emiliaoxley.com", + "followers": 6873 + }, + { + "firstname": "Naomi", + "lastname": "Mcraven", + "company": "Oak Brook Capital Corp", + "address": "13456 Se 27th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-641-5463", + "fax": "425-641-5923", + "email": "naomi@mcraven.com", + "web": "http://www.naomimcraven.com", + "followers": 7029 + }, + { + "firstname": "Dionne", + "lastname": "Borycz", + "company": "Pepsi Cola Dr Pepper Bottling", + "address": "77 S Washington St #-207", + "city": "Rockville", + "county": "Montgomery", + "state": "MD", + "zip": "20850", + "phone": "301-294-0154", + "fax": "301-294-7523", + "email": "dionne@borycz.com", + "web": "http://www.dionneborycz.com", + "followers": 6973 + }, + { + "firstname": "Jimmy", + "lastname": "Hrobsky", + "company": "Howard Johnson", + "address": "8253 Ronson Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-268-4663", + "fax": "858-268-4964", + "email": "jimmy@hrobsky.com", + "web": "http://www.jimmyhrobsky.com", + "followers": 544 + }, + { + "firstname": "Peggy", + "lastname": "Hohlstein", + "company": "Steritek Inc", + "address": "Rt 20", + "city": "Westfield", + "county": "Hampden", + "state": "MA", + "zip": "01085", + "phone": "413-543-2933", + "fax": "413-543-3805", + "email": "peggy@hohlstein.com", + "web": "http://www.peggyhohlstein.com", + "followers": 8885 + }, + { + "firstname": "Genevieve", + "lastname": "Kekiwi", + "company": "Lawson, John F Esq", + "address": "8300 Bell Ter", + "city": "Newburgh", + "county": "Warrick", + "state": "IN", + "zip": "47630", + "phone": "812-477-3620", + "fax": "812-477-3646", + "email": "genevieve@kekiwi.com", + "web": "http://www.genevievekekiwi.com", + "followers": 8134 + }, + { + "firstname": "Terra", + "lastname": "Plagge", + "company": "Beach, Jeffrey E", + "address": "60 Minute Photo Colormax", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47715", + "phone": "812-477-9524", + "fax": "812-477-9617", + "email": "terra@plagge.com", + "web": "http://www.terraplagge.com", + "followers": 4323 + }, + { + "firstname": "Allie", + "lastname": "Pumphrey", + "company": "Asher, Ronald L Md", + "address": "501 N Weinbach Ave", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-477-0753", + "fax": "812-477-4604", + "email": "allie@pumphrey.com", + "web": "http://www.alliepumphrey.com", + "followers": 3985 + }, + { + "firstname": "Katina", + "lastname": "Survant", + "company": "Kgtv Channel 10", + "address": "590 N 2nd E", + "city": "Mountain Home", + "county": "Elmore", + "state": "ID", + "zip": "83647", + "phone": "208-587-3734", + "fax": "208-587-5574", + "email": "katina@survant.com", + "web": "http://www.katinasurvant.com", + "followers": 3440 + }, + { + "firstname": "Marta", + "lastname": "Warran", + "company": "Wirth, John T Esq", + "address": "2929 W Kennewick Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-735-8388", + "fax": "509-735-9193", + "email": "marta@warran.com", + "web": "http://www.martawarran.com", + "followers": 9891 + }, + { + "firstname": "Rebekah", + "lastname": "Lindboe", + "company": "Granite Corporation", + "address": "600 Las Colinas Blvd E", + "city": "Irving", + "county": "Dallas", + "state": "TX", + "zip": "75039", + "phone": "972-556-1121", + "fax": "972-556-0801", + "email": "rebekah@lindboe.com", + "web": "http://www.rebekahlindboe.com", + "followers": 180 + }, + { + "firstname": "Roxie", + "lastname": "Varenhorst", + "company": "Good Neighbor Real Estate", + "address": "1301 Dublin Rd", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-487-2917", + "fax": "614-487-4227", + "email": "roxie@varenhorst.com", + "web": "http://www.roxievarenhorst.com", + "followers": 1352 + }, + { + "firstname": "Kennith", + "lastname": "Peto", + "company": "Kirschbaum, Thomas A Esq", + "address": "2080 Peachtree Industrial Ct", + "city": "Atlanta", + "county": "Dekalb", + "state": "GA", + "zip": "30341", + "phone": "770-455-4277", + "fax": "770-455-6746", + "email": "kennith@peto.com", + "web": "http://www.kennithpeto.com", + "followers": 8164 + }, + { + "firstname": "Darrell", + "lastname": "Amrich", + "company": "Harris, James P Iii", + "address": "64 W Convenient", + "city": "Apex", + "county": "Wake", + "state": "NC", + "zip": "27502", + "phone": "919-362-8201", + "fax": "919-362-7475", + "email": "darrell@amrich.com", + "web": "http://www.darrellamrich.com", + "followers": 9098 + }, + { + "firstname": "Savannah", + "lastname": "Loffier", + "company": "Saint Charles Catv", + "address": "501 S Johnstone Ave", + "city": "Bartlesville", + "county": "Washington", + "state": "OK", + "zip": "74003", + "phone": "918-337-3201", + "fax": "918-337-4947", + "email": "savannah@loffier.com", + "web": "http://www.savannahloffier.com", + "followers": 7227 + }, + { + "firstname": "Martin", + "lastname": "Carley", + "company": "Heil, John P Esq", + "address": "680 Country W", + "city": "Sylva", + "county": "Jackson", + "state": "NC", + "zip": "28779", + "phone": "828-586-3914", + "fax": "828-586-8059", + "email": "martin@carley.com", + "web": "http://www.martincarley.com", + "followers": 7412 + }, + { + "firstname": "Lacy", + "lastname": "Hyten", + "company": "Buy & Sell Press", + "address": "1 Palmer Sq", + "city": "Princeton", + "county": "Mercer", + "state": "NJ", + "zip": "08542", + "phone": "609-683-3558", + "fax": "609-683-0649", + "email": "lacy@hyten.com", + "web": "http://www.lacyhyten.com", + "followers": 2184 + }, + { + "firstname": "Forest", + "lastname": "Orea", + "company": "Clark, Francis J", + "address": "6858 S Ashland Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60636", + "phone": "773-436-4531", + "fax": "773-436-2636", + "email": "forest@orea.com", + "web": "http://www.forestorea.com", + "followers": 8308 + }, + { + "firstname": "Courtney", + "lastname": "Shishido", + "company": "Beymers Jewelry", + "address": "145 W 6th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-279-2737", + "fax": "907-279-2025", + "email": "courtney@shishido.com", + "web": "http://www.courtneyshishido.com", + "followers": 6286 + }, + { + "firstname": "Annette", + "lastname": "Frietas", + "company": "Monsanto Chemical Company", + "address": "45 W 46th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10036", + "phone": "212-944-8670", + "fax": "212-944-9464", + "email": "annette@frietas.com", + "web": "http://www.annettefrietas.com", + "followers": 9185 + }, + { + "firstname": "Karyn", + "lastname": "Jinks", + "company": "Maslen, David Esq", + "address": "2318 N Galloway Ave", + "city": "Mesquite", + "county": "Dallas", + "state": "TX", + "zip": "75150", + "phone": "972-289-4090", + "fax": "972-289-3319", + "email": "karyn@jinks.com", + "web": "http://www.karynjinks.com", + "followers": 3549 + }, + { + "firstname": "Edwin", + "lastname": "Lavelli", + "company": "Letterguide Co", + "address": "W 1st St", + "city": "East Liverpool", + "county": "Columbiana", + "state": "OH", + "zip": "43920", + "phone": "330-385-4581", + "fax": "330-385-9959", + "email": "edwin@lavelli.com", + "web": "http://www.edwinlavelli.com", + "followers": 1645 + }, + { + "firstname": "Jimmie", + "lastname": "Barninger", + "company": "California Paint & Wlpaper Str", + "address": "Box #-4038", + "city": "Modesto", + "county": "Stanislaus", + "state": "CA", + "zip": "95352", + "phone": "209-525-7568", + "fax": "209-525-4389", + "email": "jimmie@barninger.com", + "web": "http://www.jimmiebarninger.com", + "followers": 3947 + }, + { + "firstname": "Merle", + "lastname": "Wyrosdick", + "company": "Keil, James J Esq", + "address": "1350 Campus Pky", + "city": "Neptune", + "county": "Monmouth", + "state": "NJ", + "zip": "07753", + "phone": "732-938-7301", + "fax": "732-938-7237", + "email": "merle@wyrosdick.com", + "web": "http://www.merlewyrosdick.com", + "followers": 5762 + }, + { + "firstname": "Amelia", + "lastname": "Caputo", + "company": "Security Marketing Agency", + "address": "1800 Airport Way", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-1748", + "fax": "907-456-7535", + "email": "amelia@caputo.com", + "web": "http://www.ameliacaputo.com", + "followers": 9583 + }, + { + "firstname": "Germaine", + "lastname": "Bruski", + "company": "Alloy Founders", + "address": "2301 S Cushman St", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-8225", + "fax": "907-456-9261", + "email": "germaine@bruski.com", + "web": "http://www.germainebruski.com", + "followers": 5075 + }, + { + "firstname": "Willa", + "lastname": "Dutt", + "company": "Gutmann Leather Co Inc", + "address": "2110 Peger Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-2885", + "fax": "907-456-2187", + "email": "willa@dutt.com", + "web": "http://www.willadutt.com", + "followers": 2775 + }, + { + "firstname": "Cherie", + "lastname": "Fuhri", + "company": "Continental Baking Co", + "address": "3679 College Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-9072", + "fax": "907-456-8467", + "email": "cherie@fuhri.com", + "web": "http://www.cheriefuhri.com", + "followers": 5839 + }, + { + "firstname": "Tyron", + "lastname": "Quillman", + "company": "Analysts International Corp", + "address": "5300 Shawnee Rd", + "city": "Alexandria", + "county": "Fairfax", + "state": "VA", + "zip": "22312", + "phone": "703-354-9266", + "fax": "703-354-2554", + "email": "tyron@quillman.com", + "web": "http://www.tyronquillman.com", + "followers": 9439 + }, + { + "firstname": "Charity", + "lastname": "Dyckman", + "company": "Marriott, Frank Jr", + "address": "6927 Old Seward Hwy", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99518", + "phone": "907-349-9880", + "fax": "907-349-4449", + "email": "charity@dyckman.com", + "web": "http://www.charitydyckman.com", + "followers": 2058 + }, + { + "firstname": "Nanette", + "lastname": "Turansky", + "company": "Sheraton Society Hill Hotel", + "address": "52 S 2nd St", + "city": "Easton", + "county": "Northampton", + "state": "PA", + "zip": "18042", + "phone": "610-250-6188", + "fax": "610-250-4334", + "email": "nanette@turansky.com", + "web": "http://www.nanetteturansky.com", + "followers": 5905 + }, + { + "firstname": "Cherie", + "lastname": "Schronce", + "company": "Varda, John Duncan Esq", + "address": "21603 Devonshire St", + "city": "Chatsworth", + "county": "Los Angeles", + "state": "CA", + "zip": "91311", + "phone": "818-718-2001", + "fax": "818-718-7339", + "email": "cherie@schronce.com", + "web": "http://www.cherieschronce.com", + "followers": 9409 + }, + { + "firstname": "Theron", + "lastname": "Hambright", + "company": "Sensor Oil And Gas Inc", + "address": "905 Brooks Ave", + "city": "Holland", + "county": "Ottawa", + "state": "MI", + "zip": "49423", + "phone": "616-392-2074", + "fax": "616-392-0226", + "email": "theron@hambright.com", + "web": "http://www.theronhambright.com", + "followers": 6532 + }, + { + "firstname": "Laurie", + "lastname": "Bibbs", + "company": "Action Nursing Care Llc", + "address": "2101 Claremont Ave Ne", + "city": "Albuquerque", + "county": "Bernalillo", + "state": "NM", + "zip": "87107", + "phone": "505-881-2899", + "fax": "505-881-3771", + "email": "laurie@bibbs.com", + "web": "http://www.lauriebibbs.com", + "followers": 5422 + }, + { + "firstname": "Angelo", + "lastname": "Ferentz", + "company": "Reagan, William L Esq", + "address": "3220 E 26th St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90023", + "phone": "323-262-5047", + "fax": "323-262-8693", + "email": "angelo@ferentz.com", + "web": "http://www.angeloferentz.com", + "followers": 6580 + }, + { + "firstname": "Denver", + "lastname": "Topete", + "company": "Insight Cablevision", + "address": "5770 Morehouse Dr", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92121", + "phone": "858-457-3538", + "fax": "858-457-0465", + "email": "denver@topete.com", + "web": "http://www.denvertopete.com", + "followers": 6542 + }, + { + "firstname": "Tommie", + "lastname": "Reuland", + "company": "Schaff, Michael D Esq", + "address": "1166 Arroyo St", + "city": "San Fernando", + "county": "Los Angeles", + "state": "CA", + "zip": "91340", + "phone": "818-361-4035", + "fax": "818-361-7493", + "email": "tommie@reuland.com", + "web": "http://www.tommiereuland.com", + "followers": 7735 + }, + { + "firstname": "Delmer", + "lastname": "Delucas", + "company": "American Processing Co Inc", + "address": "2770 Walden Ave", + "city": "Buffalo", + "county": "Erie", + "state": "NY", + "zip": "14225", + "phone": "716-874-1439", + "fax": "716-874-3467", + "email": "delmer@delucas.com", + "web": "http://www.delmerdelucas.com", + "followers": 8605 + }, + { + "firstname": "Latisha", + "lastname": "Bahls", + "company": "Search South Inc", + "address": "3 E 4th St", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45202", + "phone": "513-784-5007", + "fax": "513-784-5275", + "email": "latisha@bahls.com", + "web": "http://www.latishabahls.com", + "followers": 8504 + }, + { + "firstname": "Simone", + "lastname": "Lundie", + "company": "Casebolt, Victor S Esq", + "address": "701 S 17th St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19146", + "phone": "215-732-9026", + "fax": "215-732-8257", + "email": "simone@lundie.com", + "web": "http://www.simonelundie.com", + "followers": 2165 + }, + { + "firstname": "Ross", + "lastname": "Spurger", + "company": "Hoover Group Inc", + "address": "710 S Illinois Ave", + "city": "Carbondale", + "county": "Jackson", + "state": "IL", + "zip": "62901", + "phone": "618-453-9968", + "fax": "618-453-6144", + "email": "ross@spurger.com", + "web": "http://www.rossspurger.com", + "followers": 361 + }, + { + "firstname": "Abel", + "lastname": "Tuter", + "company": "Wernsing Plumbing & Heating", + "address": "2457 Perkiomen Ave", + "city": "Reading", + "county": "Berks", + "state": "PA", + "zip": "19606", + "phone": "610-370-6549", + "fax": "610-370-0856", + "email": "abel@tuter.com", + "web": "http://www.abeltuter.com", + "followers": 6215 + }, + { + "firstname": "Beverley", + "lastname": "Bunche", + "company": "Nurses Organization Vets Affrs", + "address": "1727 Nw 79th Ave", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-591-4141", + "fax": "305-591-7751", + "email": "beverley@bunche.com", + "web": "http://www.beverleybunche.com", + "followers": 8121 + }, + { + "firstname": "Lizzie", + "lastname": "Torregrossa", + "company": "Salerno & Son", + "address": "78 Faunce Corner Rd", + "city": "North Dartmouth", + "county": "Bristol", + "state": "MA", + "zip": "02747", + "phone": "508-997-1409", + "fax": "508-997-9846", + "email": "lizzie@torregrossa.com", + "web": "http://www.lizzietorregrossa.com", + "followers": 1134 + }, + { + "firstname": "Tia", + "lastname": "Neumaier", + "company": "Carterville Mini Storage", + "address": "2500 Maitland Center Pky", + "city": "Maitland", + "county": "Orange", + "state": "FL", + "zip": "32751", + "phone": "407-660-7426", + "fax": "407-660-7628", + "email": "tia@neumaier.com", + "web": "http://www.tianeumaier.com", + "followers": 3010 + }, + { + "firstname": "Lesa", + "lastname": "Chantry", + "company": "Lutz Cichy Selig & Zeronda", + "address": "1119 N Bodine St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19123", + "phone": "215-923-0136", + "fax": "215-923-9492", + "email": "lesa@chantry.com", + "web": "http://www.lesachantry.com", + "followers": 5201 + }, + { + "firstname": "Marcelo", + "lastname": "Arostegui", + "company": "Moraschs Quality Meats", + "address": "76 Mall Comp", + "city": "Branson", + "county": "Taney", + "state": "MO", + "zip": "65616", + "phone": "417-336-9702", + "fax": "417-336-2664", + "email": "marcelo@arostegui.com", + "web": "http://www.marceloarostegui.com", + "followers": 1195 + }, + { + "firstname": "Jimmie", + "lastname": "Hardgrove", + "company": "Prosthodontic Associates", + "address": "305 E 47th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10017", + "phone": "212-980-0445", + "fax": "212-980-6914", + "email": "jimmie@hardgrove.com", + "web": "http://www.jimmiehardgrove.com", + "followers": 1002 + }, + { + "firstname": "Renae", + "lastname": "Eldrige", + "company": "Pexco Packaging Corp", + "address": "1716 Rt 77", + "city": "Attica", + "county": "Wyoming", + "state": "NY", + "zip": "14011", + "phone": "585-591-3118", + "fax": "585-591-9104", + "email": "renae@eldrige.com", + "web": "http://www.renaeeldrige.com", + "followers": 4105 + }, + { + "firstname": "Tisha", + "lastname": "Gorder", + "company": "Paroly Rampart Sec Systems", + "address": "77 W Huron St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60610", + "phone": "312-642-2897", + "fax": "312-642-2664", + "email": "tisha@gorder.com", + "web": "http://www.tishagorder.com", + "followers": 3305 + }, + { + "firstname": "Clarice", + "lastname": "Knower", + "company": "Yaffa, Andrew B Esq", + "address": "210 W 79th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60620", + "phone": "773-846-1489", + "fax": "773-846-1462", + "email": "clarice@knower.com", + "web": "http://www.clariceknower.com", + "followers": 5497 + }, + { + "firstname": "Sybil", + "lastname": "Marmerchant", + "company": "Blvd Distillers & Importers", + "address": "12 E 7th Ave", + "city": "York", + "county": "York", + "state": "PA", + "zip": "17404", + "phone": "717-845-4718", + "fax": "717-845-4736", + "email": "sybil@marmerchant.com", + "web": "http://www.sybilmarmerchant.com", + "followers": 4656 + }, + { + "firstname": "Floyd", + "lastname": "Veazey", + "company": "Ramada Inn San Fran Arprt N", + "address": "2877 E Florence Ave", + "city": "Huntington Park", + "county": "Los Angeles", + "state": "CA", + "zip": "90255", + "phone": "323-862-9133", + "fax": "323-862-5589", + "email": "floyd@veazey.com", + "web": "http://www.floydveazey.com", + "followers": 4208 + }, + { + "firstname": "Reyna", + "lastname": "Bangle", + "company": "C & R Contractors Inc", + "address": "85 Long Island Expy", + "city": "New Hyde Park", + "county": "Nassau", + "state": "NY", + "zip": "11040", + "phone": "516-627-8715", + "fax": "516-627-9033", + "email": "reyna@bangle.com", + "web": "http://www.reynabangle.com", + "followers": 9261 + }, + { + "firstname": "Owen", + "lastname": "Sparacino", + "company": "Murtha, Thomas D Esq", + "address": "2865 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38111", + "phone": "901-324-9274", + "fax": "901-324-4381", + "email": "owen@sparacino.com", + "web": "http://www.owensparacino.com", + "followers": 154 + }, + { + "firstname": "Eli", + "lastname": "Bettner", + "company": "Shea, David J Esq", + "address": "825 W Main Ave", + "city": "Brewster", + "county": "Okanogan", + "state": "WA", + "zip": "98812", + "phone": "509-689-7964", + "fax": "509-689-1394", + "email": "eli@bettner.com", + "web": "http://www.elibettner.com", + "followers": 9176 + }, + { + "firstname": "Taylor", + "lastname": "Fogerty", + "company": "Khoury Factory Outlet", + "address": "2000 W 120th Ave", + "city": "Denver", + "county": "Adams", + "state": "CO", + "zip": "80234", + "phone": "303-465-0070", + "fax": "303-465-2109", + "email": "taylor@fogerty.com", + "web": "http://www.taylorfogerty.com", + "followers": 2464 + }, + { + "firstname": "Reva", + "lastname": "Lecates", + "company": "First American Rlty Assoc Inc", + "address": "829 S 14th St", + "city": "Fernandina Beach", + "county": "Nassau", + "state": "FL", + "zip": "32034", + "phone": "904-261-0604", + "fax": "904-261-2123", + "email": "reva@lecates.com", + "web": "http://www.revalecates.com", + "followers": 8963 + }, + { + "firstname": "Rodrigo", + "lastname": "Wildrick", + "company": "Midwest Wrecking Company Inc", + "address": "1201 34th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33713", + "phone": "727-323-5060", + "fax": "727-323-5982", + "email": "rodrigo@wildrick.com", + "web": "http://www.rodrigowildrick.com", + "followers": 1406 + }, + { + "firstname": "George", + "lastname": "Tukis", + "company": "Andow Personnel Services", + "address": "8 Inn Of Americus", + "city": "Americus", + "county": "Sumter", + "state": "GA", + "zip": "31709", + "phone": "229-924-0263", + "fax": "229-924-4251", + "email": "george@tukis.com", + "web": "http://www.georgetukis.com", + "followers": 5243 + }, + { + "firstname": "Titus", + "lastname": "Rodreguez", + "company": "Vasquez & Co", + "address": "7677 Engineer Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-571-0819", + "fax": "858-571-9047", + "email": "titus@rodreguez.com", + "web": "http://www.titusrodreguez.com", + "followers": 9258 + }, + { + "firstname": "Emilio", + "lastname": "Lampkin", + "company": "Hendricks, Kenneth J Cpa", + "address": "834 Lois Dr", + "city": "Williamstown", + "county": "Gloucester", + "state": "NJ", + "zip": "08094", + "phone": "856-629-8933", + "fax": "856-629-3337", + "email": "emilio@lampkin.com", + "web": "http://www.emiliolampkin.com", + "followers": 5810 + }, + { + "firstname": "Maryjane", + "lastname": "Arata", + "company": "Larson, John R Esq", + "address": "342 Wolverine Way", + "city": "Sparks", + "county": "Washoe", + "state": "NV", + "zip": "89431", + "phone": "775-352-5822", + "fax": "775-352-1557", + "email": "maryjane@arata.com", + "web": "http://www.maryjanearata.com", + "followers": 5044 + }, + { + "firstname": "Marcie", + "lastname": "Shulz", + "company": "United Printers Inc", + "address": "Blanco Rd", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78216", + "phone": "210-524-6711", + "fax": "210-524-1693", + "email": "marcie@shulz.com", + "web": "http://www.marcieshulz.com", + "followers": 4807 + }, + { + "firstname": "Celia", + "lastname": "Slavin", + "company": "Acra Aerospace Inc", + "address": "8750 W Bryn Mawr Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60631", + "phone": "773-256-3550", + "fax": "773-256-2162", + "email": "celia@slavin.com", + "web": "http://www.celiaslavin.com", + "followers": 3493 + }, + { + "firstname": "Suzette", + "lastname": "Devaughan", + "company": "Anchor Graphics Inc", + "address": "385 Prospect Ave", + "city": "Hackensack", + "county": "Bergen", + "state": "NJ", + "zip": "07601", + "phone": "201-342-8964", + "fax": "201-342-9862", + "email": "suzette@devaughan.com", + "web": "http://www.suzettedevaughan.com", + "followers": 6410 + }, + { + "firstname": "Christian", + "lastname": "Marnell", + "company": "Wenick, George D Esq", + "address": "2750 Springboro Rd", + "city": "Dayton", + "county": "Montgomery", + "state": "OH", + "zip": "45439", + "phone": "937-293-9728", + "fax": "937-293-6782", + "email": "christian@marnell.com", + "web": "http://www.christianmarnell.com", + "followers": 3071 + }, + { + "firstname": "Misty", + "lastname": "Ericksen", + "company": "Graham & Associates Inc", + "address": "1808 2nd", + "city": "Cheney", + "county": "Spokane", + "state": "WA", + "zip": "99004", + "phone": "509-235-8873", + "fax": "509-235-1856", + "email": "misty@ericksen.com", + "web": "http://www.mistyericksen.com", + "followers": 1779 + }, + { + "firstname": "Bert", + "lastname": "Schadle", + "company": "Guaranty Chevrolet Geo", + "address": "500 Sw Loop #-820", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76115", + "phone": "817-921-5560", + "fax": "817-921-5913", + "email": "bert@schadle.com", + "web": "http://www.bertschadle.com", + "followers": 9753 + }, + { + "firstname": "Bertram", + "lastname": "Quertermous", + "company": "Florida Mining & Materials", + "address": "222 Delaware Ave", + "city": "Wilmington", + "county": "New Castle", + "state": "DE", + "zip": "19801", + "phone": "302-655-8039", + "fax": "302-655-4522", + "email": "bertram@quertermous.com", + "web": "http://www.bertramquertermous.com", + "followers": 4349 + }, + { + "firstname": "Buster", + "lastname": "Wubbel", + "company": "Collins, Joseph B Esq", + "address": "315 Us Rt 1", + "city": "Fairless Hills", + "county": "Bucks", + "state": "PA", + "zip": "19030", + "phone": "215-943-3689", + "fax": "215-943-6049", + "email": "buster@wubbel.com", + "web": "http://www.busterwubbel.com", + "followers": 6911 + }, + { + "firstname": "Mildred", + "lastname": "Gallegas", + "company": "Rogers, William A Jr", + "address": "310 Ridge Rd", + "city": "Claymont", + "county": "New Castle", + "state": "DE", + "zip": "19703", + "phone": "302-792-8044", + "fax": "302-792-1282", + "email": "mildred@gallegas.com", + "web": "http://www.mildredgallegas.com", + "followers": 8618 + }, + { + "firstname": "Pat", + "lastname": "Hoshaw", + "company": "Jorgensen, James L Esq", + "address": "30 Matthews Rd", + "city": "Malvern", + "county": "Chester", + "state": "PA", + "zip": "19355", + "phone": "610-644-7836", + "fax": "610-644-3252", + "email": "pat@hoshaw.com", + "web": "http://www.pathoshaw.com", + "followers": 6030 + }, + { + "firstname": "Marshall", + "lastname": "Hutch", + "company": "Nako, Joy Y", + "address": "Route 519", + "city": "Eighty Four", + "county": "Washington", + "state": "PA", + "zip": "15330", + "phone": "724-225-1729", + "fax": "724-225-7064", + "email": "marshall@hutch.com", + "web": "http://www.marshallhutch.com", + "followers": 6225 + }, + { + "firstname": "Don", + "lastname": "Mestler", + "company": "Coldwell Bnkr Hearthside Rltrs", + "address": "State Hwy #-31", + "city": "Pennington", + "county": "Mercer", + "state": "NJ", + "zip": "08534", + "phone": "609-737-2033", + "fax": "609-737-2374", + "email": "don@mestler.com", + "web": "http://www.donmestler.com", + "followers": 6967 + }, + { + "firstname": "Emery", + "lastname": "Reek", + "company": "Metri Tech Engineering Inc", + "address": "85 S Beachview Dr", + "city": "Jekyll Island", + "county": "Glynn", + "state": "GA", + "zip": "31527", + "phone": "912-635-3866", + "fax": "912-635-4039", + "email": "emery@reek.com", + "web": "http://www.emeryreek.com", + "followers": 2476 + }, + { + "firstname": "Ray", + "lastname": "Srock", + "company": "Tilt Lock Inc", + "address": "8700 E Pinnacle Peak Rd", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85255", + "phone": "480-585-6138", + "fax": "480-585-4983", + "email": "ray@srock.com", + "web": "http://www.raysrock.com", + "followers": 2387 + }, + { + "firstname": "Nickolas", + "lastname": "Khosravi", + "company": "Brennan, Mary V Esq", + "address": "120 Tustin Ave", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92663", + "phone": "949-646-6578", + "fax": "949-646-0043", + "email": "nickolas@khosravi.com", + "web": "http://www.nickolaskhosravi.com", + "followers": 3074 + }, + { + "firstname": "Aileen", + "lastname": "Mottern", + "company": "Bennett Hallmark Cards", + "address": "2053 Lemoine Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-944-1664", + "fax": "201-944-3382", + "email": "aileen@mottern.com", + "web": "http://www.aileenmottern.com", + "followers": 6519 + }, + { + "firstname": "Chad", + "lastname": "Araiza", + "company": "Christiansen, David L Cpa", + "address": "536 Grand Ave", + "city": "Schofield", + "county": "Marathon", + "state": "WI", + "zip": "54476", + "phone": "715-359-8700", + "fax": "715-359-3579", + "email": "chad@araiza.com", + "web": "http://www.chadaraiza.com", + "followers": 4865 + }, + { + "firstname": "Beverly", + "lastname": "Cambel", + "company": "Mcintyre Mcintyre & Mcintyre", + "address": "630 W 8th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-3953", + "fax": "907-272-3618", + "email": "beverly@cambel.com", + "web": "http://www.beverlycambel.com", + "followers": 2393 + }, + { + "firstname": "Janice", + "lastname": "Twiet", + "company": "Henley, Cal Pa", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-6739", + "fax": "909-874-2594", + "email": "janice@twiet.com", + "web": "http://www.janicetwiet.com", + "followers": 7340 + }, + { + "firstname": "Byron", + "lastname": "Fortuna", + "company": "Jackson & Collins Pa", + "address": "700 Sw Higgins Ave", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59803", + "phone": "406-549-8320", + "fax": "406-549-4641", + "email": "byron@fortuna.com", + "web": "http://www.byronfortuna.com", + "followers": 8913 + }, + { + "firstname": "Lynette", + "lastname": "Setlock", + "company": "George S Olive & Co", + "address": "614 W Superior Ave", + "city": "Cleveland", + "county": "Cuyahoga", + "state": "OH", + "zip": "44113", + "phone": "216-566-2265", + "fax": "216-566-2299", + "email": "lynette@setlock.com", + "web": "http://www.lynettesetlock.com", + "followers": 8002 + }, + { + "firstname": "Willard", + "lastname": "Roughen", + "company": "Nakamura Oyama & Assocs Inc", + "address": "9 5officce Product Cent", + "city": "Arlington", + "county": "Tarrant", + "state": "TX", + "zip": "76012", + "phone": "817-265-1847", + "fax": "817-265-0322", + "email": "willard@roughen.com", + "web": "http://www.willardroughen.com", + "followers": 861 + }, + { + "firstname": "Elisa", + "lastname": "Gracely", + "company": "Alexander, Christine T Esq", + "address": "1805 Kings Hwy", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11229", + "phone": "718-627-1421", + "fax": "718-627-9346", + "email": "elisa@gracely.com", + "web": "http://www.elisagracely.com", + "followers": 5321 + }, + { + "firstname": "Jeri", + "lastname": "Farstvedt", + "company": "Regan, Denis J Esq", + "address": "16133 Ventura Blvd #-700", + "city": "Encino", + "county": "Los Angeles", + "state": "CA", + "zip": "91436", + "phone": "818-986-8843", + "fax": "818-986-6786", + "email": "jeri@farstvedt.com", + "web": "http://www.jerifarstvedt.com", + "followers": 9529 + }, + { + "firstname": "Stacey", + "lastname": "Blow", + "company": "Schechter, Jeffrey S Esq", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-0274", + "fax": "909-874-8538", + "email": "stacey@blow.com", + "web": "http://www.staceyblow.com", + "followers": 6685 + }, + { + "firstname": "Bryan", + "lastname": "Rovell", + "company": "All N All Shop", + "address": "90 Hackensack St", + "city": "East Rutherford", + "county": "Bergen", + "state": "NJ", + "zip": "07073", + "phone": "201-939-2788", + "fax": "201-939-9079", + "email": "bryan@rovell.com", + "web": "http://www.bryanrovell.com", + "followers": 2687 + }, + { + "firstname": "Joey", + "lastname": "Bolick", + "company": "Utility Trailer Sales", + "address": "7700 N Council Rd", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73132", + "phone": "405-728-5972", + "fax": "405-728-5244", + "email": "joey@bolick.com", + "web": "http://www.joeybolick.com", + "followers": 8465 + } +] diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb new file mode 100644 index 0000000..2039b59 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb @@ -0,0 +1,99 @@ +require 'algolia/error' + +module Algolia + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server for cross-app operations. + # + class AccountClient + class << self + # + # Copies settings, synonyms, rules and objects from the source index to the + # destination index. The replicas of the source index should not be copied. + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index(source_index, destination_index, request_options = {}) + raise AlgoliaError.new('The indexes are in the same application. Use Algolia::Client.copy_index instead.') if source_index.client.application_id == destination_index.client.application_id + + begin + settings = destination_index.get_settings() + rescue AlgoliaError + # Destination index does not exists. We can proceed. + else + raise AlgoliaError.new("Destination index already exists. Please delete it before copying index across applications.") + end + + responses = [] + + settings = source_index.get_settings() + responses << destination_index.set_settings(settings, {}, request_options) + + synonyms = [] + source_index.export_synonyms(100, request_options) do |synonym| + synonym.delete('_highlightResult') + synonyms << synonym + end + + responses << destination_index.batch_synonyms(synonyms, false, false, request_options) + + rules = [] + source_index.export_rules(100, request_options) do |rule| + rule.delete('_highlightResult') + rules << rule + end + responses << destination_index.batch_rules(rules, false, false, request_options) + + # Copy objects + responses = [] + batch = [] + batch_size = 1000 + count = 0 + + source_index.browse do |obj| + batch << obj + count += 1 + + if count == batch_size + responses << destination_index.save_objects(batch, request_options) + batch = [] + count = 0 + end + end + + if batch.any? + responses << destination_index.save_objects(batch, request_options) + end + + responses + end + + # + # The method copy settings, synonyms, rules and objects from the source index + # to the destination index and wait end of indexing. The replicas of the + # source index should not be copied + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index!(source_index, destination_index, request_options = {}) + responses = self.copy_index(source_index, destination_index, request_options) + + responses.each do |res| + destination_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb new file mode 100644 index 0000000..5c9c8b2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb @@ -0,0 +1,75 @@ +module Algolia + + class Analytics + attr_reader :ssl, :ssl_version, :headers + API_URL='https://analytics.algolia.com' + + def initialize(client, params) + @client = client + @headers = params[:headers] + end + + def get_ab_tests(params = {}) + params = { + :offset => 0, + :limit => 10, + }.merge(params) + + perform_request(:GET, Protocol.ab_tests_uri, params) + end + + def get_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:GET, Protocol.ab_tests_uri(ab_test_id)) + end + + def add_ab_test(ab_test) + perform_request(:POST, Protocol.ab_tests_uri, {}, ab_test.to_json) + end + + def stop_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:POST, Protocol.ab_tests_stop_uri(ab_test_id)) + end + + def delete_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:DELETE, Protocol.ab_tests_uri(ab_test_id)) + end + + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + @client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + private + + def perform_request(method, url, params = {}, data = {}) + http = HTTPClient.new + + url = API_URL + url + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :GET + http.get(url, { :header => @headers }) + when :POST + http.post(url, { :body => data, :header => @headers }) + when :DELETE + http.delete(url, { :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb new file mode 100644 index 0000000..08c72f5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb @@ -0,0 +1,1131 @@ +require 'algolia/protocol' +require 'algolia/error' +require 'algolia/version' +require 'json' +require 'zlib' +require 'openssl' +require 'base64' + +module Algolia + WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY = 100 + + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server. Uses the HTTPClient library for low-level HTTP communication. + # + class Client + attr_reader :ssl, :ssl_version, :hosts, :search_hosts, :application_id, :api_key, :headers, :connect_timeout, :send_timeout, :receive_timeout, :search_timeout, :batch_timeout + + DEFAULT_CONNECT_TIMEOUT = 2 + DEFAULT_RECEIVE_TIMEOUT = 30 + DEFAULT_SEND_TIMEOUT = 30 + DEFAULT_BATCH_TIMEOUT = 120 + DEFAULT_SEARCH_TIMEOUT = 5 + DEFAULT_USER_AGENT = ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"] + + def initialize(data = {}) + raise ArgumentError.new('No APPLICATION_ID provided, please set :application_id') if data[:application_id].nil? + + @ssl = data[:ssl].nil? ? true : data[:ssl] + @ssl_version = data[:ssl_version].nil? ? nil : data[:ssl_version] + @gzip = data[:gzip].nil? ? true : data[:gzip] + @application_id = data[:application_id] + @api_key = data[:api_key] + @hosts = data[:hosts] || (["#{@application_id}.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @search_hosts = data[:search_hosts] || data[:hosts] || (["#{@application_id}-dsn.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @connect_timeout = data[:connect_timeout] || DEFAULT_CONNECT_TIMEOUT + @send_timeout = data[:send_timeout] || DEFAULT_SEND_TIMEOUT + @batch_timeout = data[:batch_timeout] || DEFAULT_BATCH_TIMEOUT + @receive_timeout = data[:receive_timeout] || DEFAULT_RECEIVE_TIMEOUT + @search_timeout = data[:search_timeout] || DEFAULT_SEARCH_TIMEOUT + @headers = { + Protocol::HEADER_API_KEY => api_key, + Protocol::HEADER_APP_ID => application_id, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => DEFAULT_USER_AGENT.push(data[:user_agent]).compact.join('; ') + } + end + + def destroy + Thread.current["algolia_search_hosts_#{application_id}"] = nil + Thread.current["algolia_hosts_#{application_id}"] = nil + Thread.current["algolia_host_index_#{application_id}"] = nil + Thread.current["algolia_search_host_index_#{application_id}"] = nil + end + + # + # Initialize a new index + # + def init_index(name) + Index.new(name, self) + end + + # + # Initialize analytics helper + # + def init_analytics() + Analytics.new(self, { :headers => @headers }) + end + + # + # Allow to set custom headers + # + def set_extra_header(key, value) + headers[key] = value + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + headers[Protocol::HEADER_API_KEY] = admin_api_key + headers[Protocol::HEADER_FORWARDED_IP] = end_user_ip + headers[Protocol::HEADER_FORWARDED_API_KEY] = rate_limit_api_key + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def disable_rate_limit_forward + headers[Protocol::HEADER_API_KEY] = api_key + headers.delete(Protocol::HEADER_FORWARDED_IP) + headers.delete(Protocol::HEADER_FORWARDED_API_KEY) + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def with_rate_limits(end_user_ip, rate_limit_api_key, &block) + enable_rate_limit_forward(api_key, end_user_ip, rate_limit_api_key) + begin + yield + ensure + disable_rate_limit_forward + end + end + + # + # This method allows to query multiple indexes with one API call + # + # @param queries the array of hash representing the query and associated index name + # @param options - accepts those keys: + # - index_name_key the name of the key used to fetch the index_name (:index_name by default) + # - strategy define the strategy applied on the sequential searches (none by default) + # - request_options contains extra parameters to send with your query + # + def multiple_queries(queries, options = nil, strategy = nil) + if options.is_a?(Hash) + index_name_key = options.delete(:index_name_key) || options.delete('index_name_key') + strategy = options.delete(:strategy) || options.delete('strategy') + request_options = options.delete(:request_options) || options.delete('request_options') + else + # Deprecated def multiple_queries(queries, index_name_key, strategy) + index_name_key = options + end + index_name_key ||= :index_name + strategy ||= 'none' + request_options ||= {} + + requests = { + :requests => queries.map do |query| + query = query.dup + index_name = query.delete(index_name_key) || query.delete(index_name_key.to_s) + raise ArgumentError.new("Missing '#{index_name_key}' option") if index_name.nil? + encoded_params = Hash[query.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + { :indexName => index_name, :params => Protocol.to_query(encoded_params) } + end + } + post(Protocol.multiple_queries_uri(strategy), requests.to_json, :search, request_options) + end + + # + # Get objects by objectID across multiple indexes + # + # @param requests [ + # { "indexName" => index_name_1, "objectID" => "obj1" }, + # { "indexName" => index_name_2, "objectID" => "obj2" } + # ] + # + def multiple_get_objects(requests, request_options = {}) + post(Protocol.objects_uri, {:requests => requests}.to_json, :search, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def list_indexes(request_options = {}) + get(Protocol.indexes_uri, :read, request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index(src_index, dst_index, request_options = {}) + request = { 'operation' => 'move', 'destination' => dst_index } + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index!(src_index, dst_index, request_options = {}) + res = move_index(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index(src_index, dst_index, scope = nil, request_options = {}) + request = { 'operation' => 'copy', 'destination' => dst_index } + request['scope'] = scope unless scope.nil? + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index!(src_index, dst_index, scope = nil, request_options = {}) + res = copy_index(src_index, dst_index, scope, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's settings (destination's settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['settings'], request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings!(src_index, dst_index, request_options = {}) + res = copy_settings(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's synonyms (destination's synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['synonyms'], request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms!(src_index, dst_index, request_options = {}) + res = copy_synonyms(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's rules (destination's rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['rules'], request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules!(src_index, dst_index, request_options = {}) + res = copy_rules(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an index + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index(name, request_options = {}) + init_index(name).delete(request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index!(name, request_options = {}) + init_index(name).delete!(request_options) + end + + # + # Return last logs entries. + # + # @param options - accepts those keys: + # - offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry) - Default = 0 + # - length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000 - Default = 10 + # - type Type of log entries to retrieve ("all", "query", "build" or "error") - Default = 'all' + # - request_options contains extra parameters to send with your query + # + def get_logs(options = nil, length = nil, type = nil) + if options.is_a?(Hash) + offset = options.delete('offset') || options.delete(:offset) + length = options.delete('length') || options.delete(:length) + type = options.delete('type') || options.delete(:type) + request_options = options.delete('request_options') || options.delete(:request_options) + else + # Deprecated def get_logs(offset, length, type) + offset = options + end + length ||= 10 + type = 'all' if type.nil? + type = type ? 'error' : 'all' if type.is_a?(true.class) + request_options ||= {} + + get(Protocol.logs(offset, length, type), :write, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def list_api_keys(request_options = {}) + get(Protocol.keys_uri, :read, request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def get_api_key(key, request_options = {}) + get(Protocol.key_uri(key), :read, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, maxQueriesPerIPPerHour, maxHitsPerQuery, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + post(Protocol.keys_uri, params.to_json, :write, request_options) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + + put(Protocol.key_uri(key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + def delete_api_key(key, request_options = {}) + delete(Protocol.key_uri(key), :write, request_options) + end + + # + # Restore a deleted api key + # + def restore_api_key(key, request_options = {}) + post(Protocol.restore_key_uri(key), :write, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def batch(operations, request_options = {}) + post(Protocol.batch_uri, { 'requests' => operations }.to_json, :batch, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def batch!(operations, request_options = {}) + res = batch(operations, request_options) + res['taskID'].each do |index, taskID| + wait_task(index, taskID, WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(index_name, taskID, request_options = {}) + get(Protocol.task_uri(index_name, taskID), :read, request_options)['status'] + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + loop do + status = get_task_status(index_name, taskID, request_options) + if status == 'published' + return + end + sleep(time_before_retry.to_f / 1000) + end + end + + def get_personalization_strategy(request_options = {}) + get(Protocol.personalization_strategy_uri, :read, request_options) + end + + def set_personalization_strategy(strategy, request_options = {}) + post(Protocol.personalization_strategy_uri, strategy.to_json, :write, request_options) + end + + # + # Multicluster management + # + def list_clusters(request_options = {}) + get(Protocol.clusters_uri, :read, request_options) + end + + def list_user_ids(page = 0, hits_per_page = 20, request_options = {}) + get(Protocol.list_ids_uri(page, hits_per_page), :read, request_options) + end + + def get_top_user_ids(request_options = {}) + get(Protocol.cluster_top_user_uri, :read, request_options) + end + + def assign_user_id(user_id, cluster_name, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + body = { :cluster => cluster_name } + post(Protocol.cluster_mapping_uri, body.to_json, :write, request_options) + end + + def get_user_id(user_id, request_options = {}) + get(Protocol.cluster_mapping_uri(user_id), :read, request_options) + end + + def remove_user_id(user_id, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + delete(Protocol.cluster_mapping_uri, :write, request_options) + end + + def search_user_id(query, cluster_name = nil, page = nil, hits_per_page = nil, request_options = {}) + body = { :query => query } + body[:cluster] = cluster_name unless cluster_name.nil? + body[:page] = page unless page.nil? + body[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + post(Protocol.search_user_id_uri, body.to_json, :read, request_options) + end + + # Perform an HTTP request for the given uri and method + # with common basic response handling. Will raise a + # AlgoliaProtocolError if the response has an error status code, + # and will return the parsed JSON body on success, if there is one. + # + def request(uri, method, data = nil, type = :write, request_options = {}) + exceptions = [] + + connect_timeout = @connect_timeout + send_timeout = if type == :search + @search_timeout + elsif type == :batch + type = :write + @batch_timeout + else + @send_timeout + end + receive_timeout = type == :search ? @search_timeout : @receive_timeout + + thread_local_hosts(type != :write).each_with_index do |host, i| + connect_timeout += 2 if i == 2 + send_timeout += 10 if i == 2 + receive_timeout += 10 if i == 2 + + thread_index_key = type != :write ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + Thread.current[thread_index_key] = host[:index] + host[:last_call] = Time.now.to_i + + host[:session].connect_timeout = connect_timeout + host[:session].send_timeout = send_timeout + host[:session].receive_timeout = receive_timeout + begin + return perform_request(host[:session], host[:base_url] + uri, method, data, request_options) + rescue AlgoliaProtocolError => e + raise if e.code / 100 == 4 + exceptions << e + rescue => e + exceptions << e + end + host[:session].reset_all + end + raise AlgoliaProtocolError.new(0, "Cannot reach any host: #{exceptions.map { |e| e.to_s }.join(', ')}") + end + + def get(uri, type = :write, request_options = {}) + request(uri, :GET, nil, type, request_options) + end + + def post(uri, body = {}, type = :write, request_options = {}) + request(uri, :POST, body, type, request_options) + end + + def put(uri, body = {}, type = :write, request_options = {}) + request(uri, :PUT, body, type, request_options) + end + + def delete(uri, type = :write, request_options = {}) + request(uri, :DELETE, nil, type, request_options) + end + + private + + # + # This method returns a thread-local array of sessions + # + def thread_local_hosts(read) + thread_hosts_key = read ? "algolia_search_hosts_#{application_id}" : "algolia_hosts_#{application_id}" + Thread.current[thread_hosts_key] ||= (read ? search_hosts : hosts).each_with_index.map do |host, i| + client = HTTPClient.new + client.ssl_config.ssl_version = @ssl_version if @ssl && @ssl_version + client.transparent_gzip_decompression = @gzip + client.ssl_config.add_trust_ca File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'resources', 'ca-bundle.crt')) + { + :index => i, + :base_url => "http#{@ssl ? 's' : ''}://#{host}", + :session => client, + :last_call => nil + } + end + hosts = Thread.current[thread_hosts_key] + thread_index_key = read ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + current_host = Thread.current[thread_index_key].to_i # `to_i` to ensure first call is 0 + # we want to always target host 0 first + # if the current host is not 0, then we want to use it first only if (we never used it OR we're using it since less than 1 minute) + if current_host != 0 && (hosts[current_host][:last_call].nil? || hosts[current_host][:last_call] > Time.now.to_i - 60) + # first host will be `current_host` + first = hosts[current_host] + [first] + hosts.reject { |h| h[:index] == 0 || h == first } + hosts.select { |h| h[:index] == 0 } + else + # first host will be `0` + hosts + end + end + + def perform_request(session, url, method, data, request_options) + hs = {} + extra_headers = request_options[:headers] || request_options['headers'] || {} + @headers.each { |key, val| hs[key.to_s] = val } + extra_headers.each { |key, val| hs[key.to_s] = val } + response = case method + when :GET + session.get(url, { :header => hs }) + when :POST + session.post(url, { :body => data, :header => hs }) + when :PUT + session.put(url, { :body => data, :header => hs }) + when :DELETE + session.delete(url, { :header => hs }) + end + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content} (#{response.code})") + end + return JSON.parse(response.content) + end + + def add_header_to_request_options(request_options, headers_to_add) + if !request_options['headers'].is_a?(Hash) + if request_options[:headers].is_a?(Hash) + request_options['headers'] = request_options[:headers] + request_options.delete(:headers) + else + request_options['headers'] = {} + end + end + + request_options['headers'].merge!(headers_to_add) + request_options + end + + # Deprecated + alias_method :list_user_keys, :list_api_keys + alias_method :get_user_key, :get_api_key + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + end + + # Module methods + # ------------------------------------------------------------ + + # A singleton client + # Always use Algolia.client to retrieve the client object. + @@client = nil + + # + # Initialize the singleton instance of Client which is used by all API methods + # + def Algolia.init(options = {}) + application_id = ENV['ALGOLIA_APP_ID'] || ENV['ALGOLIA_API_ID'] || ENV['ALGOLIA_APPLICATION_ID'] + api_key = ENV['ALGOLIA_REST_API_KEY'] || ENV['ALGOLIA_API_KEY'] + + defaulted = { :application_id => application_id, :api_key => api_key } + defaulted.merge!(options) + + @@client = Client.new(defaulted) + end + + # + # Allow to set custom headers + # + def Algolia.set_extra_header(key, value) + Algolia.client.set_extra_header(key, value) + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the + # X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def Algolia.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + Algolia.client.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def Algolia.disable_rate_limit_forward + Algolia.client.disable_rate_limit_forward + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def Algolia.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + Algolia.client.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + end + + # + # Generate a secured and public API Key from a list of tagFilters and an + # optional user token identifying the current user + # + # @param private_api_key your private API Key + # @param tag_filters the list of tags applied to the query (used as security) + # @param user_token an optional token identifying the current user + # + def Algolia.generate_secured_api_key(private_api_key, tag_filters_or_params, user_token = nil) + if tag_filters_or_params.is_a?(Hash) && user_token.nil? + encoded_params = Hash[tag_filters_or_params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + query_str = Protocol.to_query(encoded_params) + hmac = OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, query_str) + Base64.encode64("#{hmac}#{query_str}").gsub("\n", '') + else + tag_filters = if tag_filters_or_params.is_a?(Array) + tag_filters = tag_filters_or_params.map { |t| t.is_a?(Array) ? "(#{t.join(',')})" : t }.join(',') + else + tag_filters_or_params + end + raise ArgumentError.new('Attribute "tag_filters" must be a list of tags') if !tag_filters.is_a?(String) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, "#{tag_filters}#{user_token.to_s}") + end + end + + # + # Returns the remaining validity time for the given API key in seconds + # + # @param [String] secured_api_key the secured API key to check + # + # @return [Integer] remaining validity in seconds + # + def Algolia.get_secured_api_key_remaining_validity(secured_api_key) + now = Time.now.to_i + decoded_key = Base64.decode64(secured_api_key) + regex = 'validUntil=(\d+)' + matches = decoded_key.match(regex) + + if matches === nil + raise ValidUntilNotFoundError.new('The SecuredAPIKey doesn\'t have a validUntil parameter.') + end + + valid_until = matches[1].to_i + + valid_until - now + end + + # + # This method allows to query multiple indexes with one API call + # + def Algolia.multiple_queries(queries, options = nil, strategy = nil) + Algolia.client.multiple_queries(queries, options, strategy) + end + + # + # This method allows to get objects (records) via objectID across + # multiple indexes with one API call + # + def Algolia.multiple_get_objects(requests, request_options = {}) + Algolia.client.multiple_get_objects(requests, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_indexes(request_options = {}) + Algolia.client.list_indexes(request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index(src_index, dst_index, request_options = {}) + Algolia.client.move_index(src_index, dst_index, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index!(src_index, dst_index, request_options = {}) + Algolia.client.move_index!(src_index, dst_index, request_options) + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index!(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index!(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings(src_index, dst_index, request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings!(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings!(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms!(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms!(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules!(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules!(src_index, dst_index, request_options) + end + + # + # Delete an index + # + def Algolia.delete_index(name, request_options = {}) + Algolia.client.delete_index(name, request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # + def Algolia.delete_index!(name, request_options = {}) + Algolia.client.delete_index!(name, request_options) + end + + # + # Return last logs entries. + # + # @param offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry). + # @param length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000. + # @param type Specify the type of entries you want to retrieve - default: "all" + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_logs(options = nil, length = nil, type = nil) + Algolia.client.get_logs(options, length, type) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_api_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Deprecated + # + def Algolia.list_user_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_api_key(key, request_options = {}) + Algolia.client.get_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.get_user_key(key, request_options = {}) + Algolia.client.get_user_key(key, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a NSDictionary that + # can contains the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.add_user_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, maxQueriesPerIPPerHour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.update_user_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Delete an existing user key + # + def Algolia.delete_api_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Restore an existing api key + # + def Algolia.restore_api_key(key, request_options = {}) + Algolia.client.restore_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.delete_user_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def Algolia.batch(requests, request_options = {}) + Algolia.client.batch(requests, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def Algolia.batch!(requests, request_options = {}) + Algolia.client.batch!(requests, request_options) + end + + # + # Wait until task is completed by the engine + # + def Algolia.wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + Algolia.client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + def Algolia.get_task_status(index_name, taskID, request_options = {}) + Algolia.client.get_task_status(index_name, taskID, request_options = {}) + end + # + # Used mostly for testing. Lets you delete the api key global vars. + # + def Algolia.destroy + @@client.destroy unless @@client.nil? + @@client = nil + self + end + + def Algolia.client + if !@@client + raise AlgoliaError, 'API not initialized' + end + @@client + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb new file mode 100644 index 0000000..4b76b7b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb @@ -0,0 +1,31 @@ +module Algolia + + # Base exception class for errors thrown by the Algolia + # client library. AlgoliaError will be raised by any + # network operation if Algolia.init() has not been called. + class AlgoliaError < StandardError #Exception ... why? A:http://www.skorks.com/2009/09/ruby-exceptions-and-exception-handling/ + end + + # An exception class raised when the REST API returns an error. + # The error code and message will be parsed out of the HTTP response, + # which is also included in the response attribute. + class AlgoliaProtocolError < AlgoliaError + attr_accessor :code + attr_accessor :message + + def initialize(code, message) + self.code = code + self.message = message + super("#{self.code}: #{self.message}") + end + end + + # An exception class raised when the given object was not found. + class AlgoliaObjectNotFoundError < AlgoliaError + end + + # An exception class raised when the validUntil parameter is not found + class ValidUntilNotFoundError < AlgoliaError + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb new file mode 100644 index 0000000..dbc64d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb @@ -0,0 +1,1355 @@ +require 'algolia/client' +require 'algolia/error' + +module Algolia + + class Index + attr_accessor :name, :client + + def initialize(name, client = nil) + self.name = name + self.client = client || Algolia.client + end + + # + # Delete an index + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete(request_options = {}) + client.delete(Protocol.index_uri(name), :write, request_options) + end + alias_method :delete_index, :delete + + # + # Delete an index and wait until the deletion has been processed + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete!(request_options = {}) + res = delete(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :delete_index!, :delete! + + # + # Add an object in this index + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param request_options contains extra parameters to send with your query + # + def add_object(object, objectID = nil, request_options = {}) + check_object(object) + if objectID.nil? || objectID.to_s.empty? + client.post(Protocol.index_uri(name), object.to_json, :write, request_options) + else + client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options) + end + end + + # + # Add an object in this index and wait end of indexing + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param Request options object. Contains extra URL parameters or headers + # + def add_object!(object, objectID = nil, request_options = {}) + res = add_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add several objects in this index + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects(objects, request_options = {}) + batch(build_batch('addObject', objects, false), request_options) + end + + # + # Add several objects in this index and wait end of indexing + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects!(objects, request_options = {}) + res = add_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search inside the index + # + # @param query the full text query + # @param args (optional) if set, contains an associative array with query parameters: + # - page: (integer) Pagination parameter used to select the page to retrieve. + # Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9 + # - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20. + # - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size). + # Attributes are separated with a comma (for example "name,address"). + # You can also use a string array encoding (for example ["name","address"]). + # By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index. + # - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query. + # Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]). + # If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted. + # You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted. + # A matchLevel is returned for each highlighted attribute and can contain: + # - full: if all the query terms were found in the attribute, + # - partial: if only some of the query terms were found, + # - none: if none of the query terms were found. + # - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`). + # Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10). + # You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed. + # - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3. + # - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7. + # - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute. + # - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma). + # For example aroundLatLng=47.316669,5.016670). + # You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision + # (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng). + # For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma. + # The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`. + # You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000. + # You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]). + # - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas. + # To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3). + # You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3). + # At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}). + # - facetFilters: filter the query by a list of facets. + # Facets are separated by commas and each facet is encoded as `attributeName:value`. + # For example: `facetFilters=category:Book,author:John%20Doe`. + # You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`). + # - facets: List of object attributes that you want to use for faceting. + # Attributes are separated with a comma (for example `"category,author"` ). + # You can also use a JSON string array encoding (for example ["category","author"]). + # Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter. + # You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**. + # - queryType: select how the query words are interpreted, it can be one of the following value: + # - prefixAll: all query words are interpreted as prefixes, + # - prefixLast: only the last word is interpreted as a prefix (default behavior), + # - prefixNone: no query word is interpreted as a prefix. This option is not recommended. + # - optionalWords: a string that contains the list of words that should be considered as optional when found in the query. + # The list of words is comma separated. + # - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set. + # This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter, + # all hits containing a duplicate value for the attributeForDistinct attribute are removed from results. + # For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best + # one is kept and others are removed. + # @param request_options contains extra parameters to send with your query + # + def search(query, params = {}, request_options = {}) + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + encoded_params[:query] = query + client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options) + end + + class IndexBrowser + def initialize(client, name, params) + @client = client + @name = name + @params = params + @cursor = params[:cursor] || params['cursor'] || nil + end + + def browse(request_options = {}, &block) + loop do + answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options) + answer['hits'].each do |hit| + if block.arity == 2 + yield hit, @cursor + else + yield hit + end + end + @cursor = answer['cursor'] + break if @cursor.nil? + end + end + end + + # + # Browse all index content + # + # @param queryParameters The hash of query parameters to use to browse + # To browse from a specific cursor, just add a ":cursor" parameters + # @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first) + # @param request_options contains extra parameters to send with your query + # + # @DEPRECATED: + # @param page Pagination parameter used to select the page to retrieve. + # @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000. + # + def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block) + params = {} + if page_or_query_parameters.is_a?(Hash) + params.merge!(page_or_query_parameters) + else + params[:page] = page_or_query_parameters unless page_or_query_parameters.nil? + end + if hits_per_page.is_a?(Hash) + params.merge!(hits_per_page) + else + params[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + end + + if block_given? + IndexBrowser.new(client, name, params).browse(request_options, &block) + else + params[:page] ||= 0 + params[:hitsPerPage] ||= 1000 + client.get(Protocol.browse_uri(name, params), :read, request_options) + end + end + + # + # Browse a single page from a specific cursor + # + # @param request_options contains extra parameters to send with your query + # + def browse_from(cursor, hits_per_page = 1000, request_options = {}) + client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options) + end + + # + # Get an object from this index + # + # @param objectID the unique identifier of the object to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_object(objectID, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + if attributes_to_retrieve.nil? + client.get(Protocol.object_uri(name, objectID, nil), :read, request_options) + else + client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options) + end + end + + # + # Get a list of objects from this index + # + # @param objectIDs the array of unique identifier of the objects to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + requests = objectIDs.map do |objectID| + req = { :indexName => name, :objectID => objectID.to_s } + req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil? + req + end + client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results'] + end + + # + # Find object by the given condition. + # + # Options can be passed in request_options body: + # - query (string): pass a query + # - paginate (bool): choose if you want to iterate through all the + # documents (true) or only the first page (false). Default is true. + # The function takes a block to filter the results from search query + # Usage example: + # index.find_object({'query' => '', 'paginate' => true}) {|obj| obj.key?('company') and obj['company'] == 'Apple'} + # + # @param request_options contains extra parameters to send with your query + # + # @return [Hash] the matching object and its position in the result set + # + def find_object(request_options = {}) + paginate = true + page = 0 + + query = request_options[:query] || request_options['query'] || '' + request_options.delete(:query) + request_options.delete('query') + + if request_options.has_key? :paginate + paginate = request_options[:paginate] + end + + if request_options.has_key? 'paginate' + paginate = request_options['paginate'] + end + + request_options.delete(:paginate) + request_options.delete('paginate') + + while true + request_options['page'] = page + res = search(query, request_options) + + res['hits'].each_with_index do |hit, i| + if yield(hit) + return { + 'object' => hit, + 'position' => i, + 'page' => page, + } + end + end if block_given? + + has_next_page = page + 1 < res['nbPages'] + if !paginate || !has_next_page + raise AlgoliaObjectNotFoundError.new('Object not found') + end + + page += 1 + end + end + + # + # Retrieve the given object position in a set of results. + # + # @param [Array] objects the result set to browse + # @param [String] object_id the object to look for + # + # @return [Integer] position of the object, or -1 if it's not in the array + # + def self.get_object_position(objects, object_id) + objects['hits'].find_index { |hit| hit['objectID'] == object_id } || -1 + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(taskID, request_options = {}) + client.get_task_status(name, taskID, request_options) + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + client.wait_task(name, taskID, time_before_retry, request_options) + end + + # + # Override the content of an object + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object(object, objectID = nil, request_options = {}) + client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options) + end + + # + # Override the content of object and wait end of indexing + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object!(object, objectID = nil, request_options = {}) + res = save_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the content of several objects + # + # @param objects the array of objects to save, each object must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_objects(objects, request_options = {}) + batch(build_batch('updateObject', objects, true), request_options) + end + + # + # Override the content of several objects and wait end of indexing + # + # @param objects the array of objects to save, each object must contain an objectID attribute + # @param request_options contains extra parameters to send with your query + # + def save_objects!(objects, request_options = {}) + res = save_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the current objects by the given array of objects and wait end of indexing. Settings, + # synonyms and query rules are untouched. The objects are replaced without any downtime. + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects(objects, request_options = {}) + safe = request_options[:safe] || request_options['safe'] || false + request_options.delete(:safe) + request_options.delete('safe') + + tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s) + + responses = [] + + scope = ['settings', 'synonyms', 'rules'] + res = @client.copy_index(@name, tmp_index.name, scope, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + batch = [] + batch_size = 1000 + count = 0 + + objects.each do |object| + batch << object + count += 1 + if count == batch_size + res = tmp_index.add_objects(batch, request_options) + responses << res + batch = [] + count = 0 + end + end + + if batch.any? + res = tmp_index.add_objects(batch, request_options) + responses << res + end + + if safe + responses.each do |res| + tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + res = @client.move_index(tmp_index.name, @name, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + + # + # Override the current objects by the given array of objects and wait end of indexing + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects!(objects, request_options = {}) + replace_all_objects(objects, request_options.merge(:safe => true)) + end + + # + # Update partially an object (only update attributes passed in argument) + # + # @param object the object attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {}) + client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options) + end + + # + # Partially override the content of several objects + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects(objects, create_if_not_exits = true, request_options = {}) + if create_if_not_exits + batch(build_batch('partialUpdateObject', objects, true), request_options) + else + batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options) + end + end + + # + # Partially override the content of several objects and wait end of indexing + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects!(objects, create_if_not_exits = true, request_options = {}) + res = partial_update_objects(objects, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Update partially an object (only update attributes passed in argument) and wait indexing + # + # @param object the attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {}) + res = partial_update_object(object, objectID, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an object from the index + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object(objectID, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.delete(Protocol.object_uri(name, objectID), :write, request_options) + end + + # + # Delete an object from the index and wait end of indexing + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object!(objectID, request_options = {}) + res = delete_object(objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete several objects + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects(objects, request_options = {}) + check_array(objects) + batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options) + end + + # + # Delete several objects and wait end of indexing + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects!(objects, request_options = {}) + res = delete_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete all objects matching a query + # This method retrieves all objects synchronously but deletes in batch + # asynchronously + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query(query, params = nil, request_options = {}) + raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil? + params = sanitized_delete_by_query_params(params) + + params[:query] = query + params[:hitsPerPage] = 1000 + params[:distinct] = false + params[:attributesToRetrieve] = ['objectID'] + params[:cursor] = '' + ids = [] + + while params[:cursor] != nil + result = browse(params, nil, request_options) + + params[:cursor] = result['cursor'] + + hits = result['hits'] + break if hits.empty? + + ids += hits.map { |hit| hit['objectID'] } + end + + delete_objects(ids, request_options) + end + + # + # Delete all objects matching a query and wait end of indexing + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query!(query, params = nil, request_options = {}) + res = delete_by_query(query, params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided + # + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by(params, request_options = {}) + raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil? + params = sanitized_delete_by_query_params(params) + client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options) + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided and waits for the end of indexing + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by!(params, request_options = {}) + res = delete_by(params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete the index content + # + # @param request_options contains extra parameters to send with your query + # + def clear(request_options = {}) + client.post(Protocol.clear_uri(name), {}, :write, request_options) + end + alias_method :clear_index, :clear + + # + # Delete the index content and wait end of indexing + # + def clear!(request_options = {}) + res = clear(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :clear_index!, :clear! + + # + # Set settings for this index + # + def set_settings(new_settings, options = {}, request_options = {}) + client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options) + end + + # + # Set settings for this index and wait end of indexing + # + def set_settings!(new_settings, options = {}, request_options = {}) + res = set_settings(new_settings, options, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Get settings of this index + # + def get_settings(options = {}, request_options = {}) + options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion'] + client.get(Protocol.settings_uri(name, options).to_s, :read, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # Deprecated: Please us `client.list_api_keys` instead. + def list_api_keys(request_options = {}) + client.get(Protocol.index_keys_uri(name), :read, request_options) + end + + # + # Get ACL of a user key + # + # Deprecated: Please us `client.get_api_key` instead. + def get_api_key(key, request_options = {}) + client.get(Protocol.index_key_uri(name, key), :read, request_options) + end + + # + # Create a new user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that can + # contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + #
 # Deprecated: Please use `client.add_api_key` instead + def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options) + end + + # + # Update a user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that + # can contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + # + # Deprecated: Please use `client.update_api_key` instead + def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + # Deprecated: Please use `client.delete_api_key` instead + def delete_api_key(key, request_options = {}) + client.delete(Protocol.index_key_uri(name, key), :write, request_options) + end + + # + # Send a batch request + # + def batch(request, request_options = {}) + client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options) + end + + # + # Send a batch request and wait the end of the indexing + # + def batch!(request, request_options = {}) + res = batch(request, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search for facet values + # + # @param facet_name Name of the facet to search. It must have been declared in the + # index's`attributesForFaceting` setting with the `searchable()` modifier. + # @param facet_query Text to search for in the facet's values + # @param search_parameters An optional query to take extra search parameters into account. + # These parameters apply to index objects like in a regular search query. + # Only facet values contained in the matched objects will be returned. + # @param request_options contains extra parameters to send with your query + # + def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {}) + params = search_parameters.clone + params['facetQuery'] = facet_query + client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options) + end + + # deprecated + alias_method :search_facet, :search_for_facet_values + + # + # Perform a search with disjunctive facets generating as many queries as number of disjunctive facets + # + # @param query the query + # @param disjunctive_facets the array of disjunctive facets + # @param params a hash representing the regular query parameters + # @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements + # ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] } + # @param request_options contains extra parameters to send with your query + # + def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {}) + raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array) + raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty? + + # extract disjunctive facets & associated refinements + disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String) + disjunctive_refinements = {} + refinements.each do |k, v| + disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s) + end + + # build queries + queries = [] + ## hits + regular facets query + filters = [] + refinements.to_a.each do |k, values| + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters }) + ## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet) + disjunctive_facets.each do |disjunctive_facet| + filters = [] + refinements.each do |k, values| + if k.to_s != disjunctive_facet.to_s + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + end + queries << params.merge({ + :index_name => self.name, + :query => query, + :page => 0, + :hitsPerPage => 1, + :attributesToRetrieve => [], + :attributesToHighlight => [], + :attributesToSnippet => [], + :facets => disjunctive_facet, + :facetFilters => filters, + :analytics => false + }) + end + answers = client.multiple_queries(queries, { :request_options => request_options }) + + # aggregate answers + ## first answer stores the hits + regular facets + aggregated_answer = answers['results'][0] + ## others store the disjunctive facets + aggregated_answer['disjunctiveFacets'] = {} + answers['results'].each_with_index do |a, i| + next if i == 0 + a['facets'].each do |facet, values| + ## add the facet to the disjunctive facet hash + aggregated_answer['disjunctiveFacets'][facet] = values + ## concatenate missing refinements + (disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r| + if aggregated_answer['disjunctiveFacets'][facet][r].nil? + aggregated_answer['disjunctiveFacets'][facet][r] = 0 + end + end + end + end + + aggregated_answer + end + + # + # Alias of Algolia.list_indexes + # + # @param request_options contains extra parameters to send with your query + # + def Index.all(request_options = {}) + Algolia.list_indexes(request_options) + end + + # + # Search synonyms + # + # @param query the query + # @param params an optional hash of :type, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_synonyms(query, params = {}, request_options = {}) + type = params[:type] || params['type'] + type = type.join(',') if type.is_a?(Array) + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :type => type.to_s, + :page => page, + :hitsPerPage => hits_per_page + } + client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options) + end + + # + # Get a synonym + # + # @param objectID the synonym objectID + # @param request_options contains extra parameters to send with your query + def get_synonym(objectID, request_options = {}) + client.get(Protocol.synonym_uri(name, objectID), :read, request_options) + end + + # + # Delete a synonym + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_synonym(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Save a synonym + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {}) + client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options) + end + + # + # Save a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {}) + res = save_synonym(objectID, synonym, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Clear all synonyms + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all synonyms and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms!(forward_to_replicas = false, request_options = {}) + res = clear_synonyms(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add/Update an array of synonyms + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options) + end + + # + # Add/Update an array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Replace synonyms in the index by the given array of synonyms + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms(synonyms, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_synonyms(synonyms, forward_to_replicas, true, request_options) + end + + # + # Replace synonyms in the index by the given array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms!(synonyms, request_options = {}) + res = replace_all_synonyms(synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of synonyms + # Accepts an optional block to which it will pass each synonym + # Also returns an array with all the synonyms + # + # @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_synonyms(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |synonym| + res << synonym + yield synonym if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Search rules + # + # @param query the query + # @param params an optional hash of :anchoring, :context, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_rules(query, params = {}, request_options = {}) + anchoring = params[:anchoring] + context = params[:context] + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :page => page, + :hitsPerPage => hits_per_page + } + params[:anchoring] = anchoring unless anchoring.nil? + params[:context] = context unless context.nil? + client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options) + end + + # + # Get a rule + # + # @param objectID the rule objectID + # @param request_options contains extra parameters to send with your query + # + def get_rule(objectID, request_options = {}) + client.get(Protocol.rule_uri(name, objectID), :read, request_options) + end + + # + # Delete a rule + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_rule(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Save a rule + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule(objectID, rule, forward_to_replicas = false, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options) + end + + # + # Save a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {}) + res = save_rule(objectID, rule, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Clear all rules + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all rules and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules!(forward_to_replicas = false, request_options = {}) + res = clear_rules(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Add/Update an array of rules + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options) + end + + # + # Add/Update an array of rules and wait the end of indexing + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Replace rules in the index by the given array of rules + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules(rules, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_rules(rules, forward_to_replicas, true, request_options) + end + + # + # Replace rules in the index by the given array of rules and wait the end of indexing + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules!(rules, request_options = {}) + res = replace_all_rules(rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of rules + # Accepts an optional block to which it will pass each rule + # Also returns an array with all the rules + # + # @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_rules(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_rules('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |rule| + res << rule + yield rule if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Check whether an index exists or not + # + # @return [Boolean] + # + def exists + begin + get_settings + rescue AlgoliaProtocolError => e + if e.code === 404 + return false + end + + raise e + end + return true + end + + # + # Aliases the exists method + # + alias :exists? :exists + + # Deprecated + alias_method :get_user_key, :get_api_key + alias_method :list_user_keys, :list_api_keys + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + + private + + def check_array(object) + raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array) + end + + def check_object(object, in_array = false) + case object + when Array + raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array') + when String, Integer, Float, TrueClass, FalseClass, NilClass + raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}") + else + # ok + end + end + + def get_objectID(object, objectID = nil) + check_object(object) + objectID ||= object[:objectID] || object['objectID'] + raise ArgumentError.new("Missing 'objectID'") if objectID.nil? + return objectID + end + + def build_batch(action, objects, with_object_id = false) + check_array(objects) + { + :requests => objects.map { |object| + check_object(object, true) + h = { :action => action, :body => object } + h[:objectID] = get_objectID(object).to_s if with_object_id + h + } + } + end + + def sanitized_delete_by_query_params(params) + params ||= {} + params.delete(:hitsPerPage) + params.delete('hitsPerPage') + params.delete(:attributesToRetrieve) + params.delete('attributesToRetrieve') + params + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb new file mode 100644 index 0000000..a3d5883 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb @@ -0,0 +1,131 @@ +module Algolia + + class Insights + MIN_RUBY_VERSION = '1.9.0' + + def initialize(app_id, api_key, region = 'us', params = {}) + headers = params[:headers] || {} + @app_id = app_id + @api_key = api_key + @url = "https://insights.#{region}.algolia.io" + @headers = headers.merge({ + Protocol::HEADER_APP_ID => app_id, + Protocol::HEADER_API_KEY => api_key, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"].join('; ') + }) + end + + def user(user_token) + UserInsights.new(self, user_token) + end + + def send_event(event) + send_events([event]) + end + + def send_events(events) + perform_request(:POST, '/1/events', {}, { 'events' => events }.to_json) + end + + private + + def perform_request(method, path, params = {}, data = {}) + http = HTTPClient.new + + url = @url + path + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :POST + http.post(url, { :body => data, :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + end + + class UserInsights + def initialize(insights, user_token) + @insights = insights + @user_token = user_token + end + + def clicked_object_ids(event_name, index_name, object_ids, request_options = {}) + clicked({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def clicked_object_ids_after_search(event_name, index_name, object_ids, positions, query_id, request_options = {}) + clicked({ + 'objectIDs' => object_ids, + 'positions' => positions, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def clicked_filters(event_name, index_name, filters, request_options = {}) + clicked({ 'filters' => filters }, event_name, index_name, request_options) + end + + def converted_object_ids(event_name, index_name, object_ids, request_options = {}) + converted({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def converted_object_ids_after_search(event_name, index_name, object_ids, query_id, request_options = {}) + converted({ + 'objectIDs' => object_ids, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def converted_filters(event_name, index_name, filters, request_options = {}) + converted({ 'filters' => filters }, event_name, index_name, request_options) + end + + def viewed_object_ids(event_name, index_name, object_ids, request_options = {}) + viewed({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def viewed_filters(event_name, index_name, filters, request_options = {}) + viewed({ 'filters' => filters }, event_name, index_name, request_options) + end + + private + + def clicked(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'click', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def converted(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'conversion', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def viewed(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'view', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def send_event(event) + @insights.send_event(event.merge({ 'userToken' => @user_token})) + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb new file mode 100644 index 0000000..849ee17 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb @@ -0,0 +1,211 @@ +require 'cgi' + +module Algolia + # A module which encapsulates the specifics of Algolia's REST API. + module Protocol + + # Basics + + # The version of the REST API implemented by this module. + VERSION = 1 + + # HTTP Headers + # ---------------------------------------- + + # The HTTP header used for passing your application ID to the Algolia API. + HEADER_APP_ID = "X-Algolia-Application-Id" + + # The HTTP header used for passing your API key to the Algolia API. + HEADER_API_KEY = "X-Algolia-API-Key" + HEADER_FORWARDED_IP = "X-Forwarded-For" + HEADER_FORWARDED_API_KEY = "X-Forwarded-API-Key" + + # HTTP ERROR CODES + # ---------------------------------------- + + ERROR_BAD_REQUEST = 400 + ERROR_FORBIDDEN = 403 + ERROR_NOT_FOUND = 404 + + # URI Helpers + # ---------------------------------------- + + # Construct a uri to list available indexes + def Protocol.indexes_uri + "/#{VERSION}/indexes" + end + + def Protocol.multiple_queries_uri(strategy = 'none') + "/#{VERSION}/indexes/*/queries?strategy=#{strategy}" + end + + def Protocol.objects_uri + "/#{VERSION}/indexes/*/objects" + end + + # Construct a uri referencing a given Algolia index + def Protocol.index_uri(index) + "/#{VERSION}/indexes/#{CGI.escape(index)}" + end + + def Protocol.batch_uri(index = nil) + "#{index.nil? ? "/#{VERSION}/indexes/*" : index_uri(index)}/batch" + end + + def Protocol.index_operation_uri(index) + "#{index_uri(index)}/operation" + end + + def Protocol.task_uri(index, task_id) + "#{index_uri(index)}/task/#{task_id}" + end + + def Protocol.object_uri(index, object_id, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}#{params}" + end + + def Protocol.search_uri(index, query, params = {}) + params = params.nil? || params.size == 0 ? '' : "&#{to_query(params)}" + "#{index_uri(index)}?query=#{CGI.escape(query)}&#{params}" + end + + def Protocol.search_post_uri(index) + "#{index_uri(index)}/query" + end + + def Protocol.browse_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/browse#{params}" + end + + def Protocol.search_facet_uri(index, facet) + "#{index_uri(index)}/facets/#{CGI.escape(facet)}/query" + end + + def Protocol.partial_object_uri(index, object_id, create_if_not_exits = true) + params = create_if_not_exits ? '' : '?createIfNotExists=false' + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}/partial#{params}" + end + + def Protocol.settings_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/settings#{params}" + end + + def Protocol.clear_uri(index) + "#{index_uri(index)}/clear" + end + + def Protocol.logs(offset, length, type) + "/#{VERSION}/logs?offset=#{offset}&length=#{length}&type=#{type}" + end + + def Protocol.keys_uri + "/#{VERSION}/keys" + end + + def Protocol.key_uri(key) + "/#{VERSION}/keys/#{key}" + end + + def Protocol.restore_key_uri(key) + "/#{VERSION}/keys/#{key}/restore" + end + + def Protocol.index_key_uri(index, key) + "#{index_uri(index)}/keys/#{key}" + end + + def Protocol.index_keys_uri(index) + "#{index_uri(index)}/keys" + end + + def Protocol.to_query(params) + params.map do |k, v| + "#{CGI.escape(k.to_s)}=#{CGI.escape(v.to_s)}" + end.join('&') + end + + def Protocol.synonyms_uri(index) + "#{index_uri(index)}/synonyms" + end + + def Protocol.synonym_uri(index, object_id) + "#{synonyms_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_synonyms_uri(index) + "#{synonyms_uri(index)}/search" + end + + def Protocol.clear_synonyms_uri(index) + "#{synonyms_uri(index)}/clear" + end + + def Protocol.batch_synonyms_uri(index) + "#{synonyms_uri(index)}/batch" + end + + def Protocol.rules_uri(index) + "#{index_uri(index)}/rules" + end + + def Protocol.rule_uri(index, object_id) + "#{rules_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_rules_uri(index) + "#{rules_uri(index)}/search" + end + + def Protocol.clear_rules_uri(index) + "#{rules_uri(index)}/clear" + end + + def Protocol.batch_rules_uri(index) + "#{rules_uri(index)}/batch" + end + + def Protocol.delete_by_uri(index) + "#{index_uri(index)}/deleteByQuery" + end + + def Protocol.personalization_strategy_uri + "/1/recommendation/personalization/strategy" + end + + def Protocol.clusters_uri + "/#{VERSION}/clusters" + end + + def Protocol.cluster_mapping_uri(user_id = nil) + user_id = "/#{CGI.escape(user_id)}" if user_id + + "/#{VERSION}/clusters/mapping" + user_id.to_s + end + + def Protocol.list_ids_uri(page, hits_per_page) + Protocol.cluster_mapping_uri+"?page=#{CGI.escape(page.to_s)}&hitsPerPage=#{CGI.escape(hits_per_page.to_s)}" + end + + def Protocol.cluster_top_user_uri + "/#{VERSION}/clusters/mapping/top" + end + + def Protocol.search_user_id_uri + "/#{VERSION}/clusters/mapping/search" + end + + def Protocol.ab_tests_uri(ab_test = nil) + ab_test = "/#{ab_test}" if ab_test + + "/2/abtests" + ab_test.to_s + end + + def Protocol.ab_tests_stop_uri(ab_test) + "/2/abtests/#{ab_test}/stop" + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb new file mode 100644 index 0000000..3926b62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb @@ -0,0 +1,3 @@ +module Algolia + VERSION = "1.27.5" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb new file mode 100644 index 0000000..e3d112f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb @@ -0,0 +1,54 @@ +begin + require 'webmock' +rescue LoadError + puts 'WebMock was not found, please add "gem \'webmock\'" to your Gemfile.' + exit 1 +end + +module Algolia + class WebMock + def self.mock! + # list indexes + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/).to_return(:body => '{ "items": [] }') + # query index + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + # delete index + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # clear index + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/clear/).to_return(:body => '{ "taskID": 42 }') + # add object + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # save object + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # partial update + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+\/partial/).to_return(:body => '{ "taskID": 42 }') + # get object + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{}') + # delete object + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # batch + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/batch/).to_return(:body => '{ "taskID": 42 }') + # settings + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{}') + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{ "taskID": 42 }') + # browse + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/browse/).to_return(:body => '{ "hits": [] }') + # operations + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/operation/).to_return(:body => '{ "taskID": 42 }') + # tasks + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/task\/[^\/]+/).to_return(:body => '{ "status": "published" }') + # index keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ "keys": [] }') + # global keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ "keys": [] }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + # query POST + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/query/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + end + end +end + +Algolia::WebMock.mock! diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb new file mode 100644 index 0000000..f000b23 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb @@ -0,0 +1,26 @@ +## ---------------------------------------------------------------------- +## +## Ruby client for algolia.com +## A quick library for playing with algolia.com's REST API for object storage. +## Thanks to Sylvain Utard for the initial version of the library +## ---------------------------------------------------------------------- +require 'json' +if !defined?(RUBY_ENGINE) && defined?(RUBY_VERSION) && RUBY_VERSION == '1.8.7' + # work-around a limitation from nahi/httpclient, using the undefined RUBY_ENGINE constant + RUBY_ENGINE = 'ruby1.8' + require 'httpclient' + Object.send(:remove_const, :RUBY_ENGINE) +else + require 'httpclient' +end +require 'date' +require 'cgi' +require 'pathname' + +cwd = Pathname(__FILE__).dirname +$:.unshift(cwd.to_s) unless $:.include?(cwd.to_s) || $:.include?(cwd.expand_path.to_s) + +require 'algolia/index' +require 'algolia/analytics' +require 'algolia/insights' +require 'algolia/account_client' diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt new file mode 100644 index 0000000..52e0c4e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt @@ -0,0 +1,3908 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Sat Dec 29 20:03:40 2012 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + +# @(#) $RCSfile: certdata.txt,v $ $Revision: 1.87 $ $Date: 2012/12/29 16:32:45 $ + +USERTrust RSA root CA +===================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 1 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENnAVljANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMTAeFw05ODEy +MTAxODEwMjNaFw0xODEyMTAxODQwMjNaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUxMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQCgbIGpzzQeJN3+hijM3oMv+V7UQtLodGBmE5gGHKlREmlvMVW5SXIACH7TpWJE +NySZj9mDSI+ZbZUTu0M7LklOiDfBu1h//uG9+LthzfNHwJmm8fOR6Hh8AMthyUQncWlVSn5JTe2i +o74CTADKAqjuAQIxZA9SLRN0dja1erQtcQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTExDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMTAxODEwMjNagQ8yMDE4MTIxMDE4MTAyM1owCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFGp5fpFpRhgTCgJ3pVlbYJglDqL4MB0GA1UdDgQWBBRqeX6RaUYYEwoCd6VZW2CYJQ6i+DAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +ACIS2Hod3IEGtgllsofIH160L+nEHvI8wbsEkBFKg05+k7lNQseSJqBcNJo4cvj9axY+IO6CizEq +kzaFI4iKPANo08kJD038bKTaKHKTDomAsH3+gG9lbRgzl4vCa4nuYD3Im+9/KzJic5PLPON74nZ4 +RbyhkwS7hp86W0N6w4pl +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 3 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENm7TzjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMjAeFw05ODEy +MDkxOTE3MjZaFw0xODEyMDkxOTQ3MjZaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUyMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQC/k48Xku8zExjrEH9OFr//Bo8qhbxe+SSmJIi2A7fBw18DW9Fvrn5C6mYjuGOD +VvsoLeE4i7TuqAHhzhy2iCoiRoX7n6dwqUcUP87eZfCocfdPJmyMvMa1795JJ/9IKn3oTQPMx7JS +xhcxEzu1TdvIxPbDDyQq2gyd55FbgM2UnQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTIxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMDkxOTE3MjZagQ8yMDE4MTIwOTE5MTcyNlowCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFB6CTShlgDzJQW6sNS5ay97u+DlbMB0GA1UdDgQWBBQegk0oZYA8yUFurDUuWsve7vg5WzAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +AEeNg61i8tuwnkUiBbmi1gMOOHLnnvx75pO2mqWilMg0HZHRxdf0CiUPPXiBng+xZ8SQTGPdXqfi +up/1902lMXucKS1M/mQ+7LZT/uqb7YLbdHVLB3luHtgZg3Pe9T7Qtd7nS2h9Qy4qIOF+oHhEngj1 +mPnHfxsb1gYgAlihw6ID +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgd +k4xWArzZbxpvUjZudVYKVdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIq +WpDBucSmFc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0Jh9ZrbWB85a7FkCMM +XErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2uluIncrKTdcu1OofdPvAbT6shkdHvC +lUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68DzFc6PLZ +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazAeFw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjx +nNuX6Zr8wgQGE75fUsjMHiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRC +wiNPStjwDqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cCAwEA +ATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9jinb3/7aHmZuovCfTK +1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAXrXfMSTWqz9iP0b63GJZHc2pUIjRk +LbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnInjBJ7xUS0rg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAN2E1Lm0+afY8wR4nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/E +bRrsC+MO8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjVojYJ +rKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjbPG7PoBMAGrgnoeS+ +Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP26KbqxzcSXKMpHgLZ2x87tNcPVkeB +FQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vrn5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +q2aN17O6x5q25lXQBfGfMY1aqtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/N +y9Sn2WCVhDr4wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrspSCAaWihT37h +a88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4E1Z5T21Q6huwtVexN2ZYI/Pc +D98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29y +azE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ug +b25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y +aXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArwoNwtUs22e5LeWUJ92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6 +tW8UvxDOJxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUYwZF7 +C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9okoqQHgiBVrKtaaNS +0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjNqWm6o+sdDZykIKbBoMXRRkwXbdKs +Zj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/ESrg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0 +JhU8wI1NQ0kdvekhktdmnLfexbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf +0xwLRtxyID+u7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RIsH/7NiXaldDx +JBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTPcjnhsUPgKM+351psE2tJs//j +GHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0xOTEyMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo3QwcjARBglghkgBhvhC +AQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGAvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdER +gL7YibkIozH5oSQJFrlwMB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0B +AQUFAAOCAQEAWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQh7A6tcOdBTcS +o8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18f3v/rxzP5tsHrV7bhZ3QKw0z +2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfNB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjX +OP/swNlQ8C5LWK5Gb9Auw2DaclVyvUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 2 +============================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEXMBUGA1UE +ChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0y +MB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoT +DkVxdWlmYXggU2VjdXJlMSYwJAYDVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn +2Z0GvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/BPO3QSQ5 +BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0CAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUx +JjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTkwNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9e +uSBIplBqy/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAAyGgq3oThr1 +jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia +78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUm +V+GRMOrN +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +UTN-USER First-Network Applications +=================================== +-----BEGIN CERTIFICATE----- +MIIEZDCCA0ygAwIBAgIQRL4Mi1AAJLQR0zYwS8AzdzANBgkqhkiG9w0BAQUFADCBozELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzAp +BgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBBcHBsaWNhdGlvbnMwHhcNOTkwNzA5MTg0ODM5 +WhcNMTkwNzA5MTg1NzQ5WjCBozELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5T +YWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzApBgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBB +cHBsaWNhdGlvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCz+5Gh5DZVhawGNFug +mliy+LUPBXeDrjKxdpJo7CNKyXY/45y2N3kDuatpjQclthln5LAbGHNhSuh+zdMvZOOmfAz6F4Cj +DUeJT1FxL+78P/m4FoCHiZMlIJpDgmkkdihZNaEdwH+DBmQWICzTSaSFtMBhf1EI+GgVkYDLpdXu +Ozr0hAReYFmnjDRy7rh4xdE7EkpvfmUnuaRVxblvQ6TFHSyZwFKkeEwVs0CYCGtDxgGwenv1axwi +P8vv/6jQOkt2FZ7S0cYu49tXGzKiuG/ohqY/cKvlcJKrRB5AUPuco2LkbG6gyN7igEL66S/ozjIE +j3yNtxyjNTwV3Z7DrpelAgMBAAGjgZEwgY4wCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8w +HQYDVR0OBBYEFPqGydvguul49Uuo1hXf8NPhahQ8ME8GA1UdHwRIMEYwRKBCoECGPmh0dHA6Ly9j +cmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LU5ldHdvcmtBcHBsaWNhdGlvbnMuY3JsMA0G +CSqGSIb3DQEBBQUAA4IBAQCk8yXM0dSRgyLQzDKrm5ZONJFUICU0YV8qAhXhi6r/fWRRzwr/vH3Y +IWp4yy9Rb/hCHTO967V7lMPDqaAt39EpHx3+jz+7qEUqf9FuVSTiuwL7MT++6LzsQCv4AdRWOOTK +RIK1YSAhZ2X28AvnNPilwpyjXEAfhZOVBt5P1CeptqX8Fs1zMT+4ZSfP1FMa8Kxun08FDAOBp4Qp +xFq9ZFdyrTvPNximmMatBrTcCKME1SmklpoSZ0qMYEWd8SOasACcaLWYUNPvji6SZbFIPiG+FTAq +DbUMo2s/rn9X9R+WfN9v3YIwLGUbQErNaLly7HF27FSOH4UMAWr6pjisH8SE +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 1 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBJDANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MxIENBMB4XDTAxMDQwNjEwNDkxM1oXDTIxMDQw +NjEwNDkxM1owOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALWJHytPZwp5/8Ue+H88 +7dF+2rDNbS82rDTG29lkFwhjMDMiikzujrsPDUJVyZ0upe/3p4zDq7mXy47vPxVnqIJyY1MPQYx9 +EJUkoVqlBvqSV536pQHydekfvFYmUk54GWVYVQNYwBSujHxVX3BbdyMGNpfzJLWaRpXk3w0LBUXl +0fIdgrvGE+D+qnr9aTCU89JFhfzyMlsy3uhsXR/LpCJ0sICOXZT3BgBLqdReLjVQCfOAl/QMF645 +2F/NM8EcyonCIvdFEu1eEpOdY6uCLrnrQkFEy0oaAIINnvmLVz5MxxftLItyM19yejhW1ebZrgUa +HXVFsculJRwSVzb9IjcCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQIR+IMi/ZT +iFIwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCLGrLJXWG04bkruVPRsoWdd44W7hE9 +28Jj2VuXZfsSZ9gqXLar5V7DtxYvyOirHYr9qxp81V9jz9yw3Xe5qObSIjiHBxTZ/75Wtf0HDjxV +yhbMp6Z3N/vbXB9OWQaHowND9Rart4S9Tu+fMTfwRvFAttEMpWT4Y14h21VOTzF2nBBhjrZTOqMR +vq9tfB69ri3iDGnHhVNoomG6xT60eVR4ngrHAr5i0RGCS2UvkVrCqIexVmiUefkl98HVrhq4uz2P +qYo4Ffdz0Fpg0YCw8NzVUM1O7pJIae2yIx4wzMiUyLb1O4Z/P6Yun/Y+LLWSlj7fLJOK/4GMDw9Z +IRlXvVWa +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +TDC OCES Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFGTCCBAGgAwIBAgIEPki9xDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJESzEMMAoGA1UE +ChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTAeFw0wMzAyMTEwODM5MzBaFw0zNzAyMTEwOTA5 +MzBaMDExCzAJBgNVBAYTAkRLMQwwCgYDVQQKEwNUREMxFDASBgNVBAMTC1REQyBPQ0VTIENBMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGL2YSCyz8DGhdfjeebM7fI5kqSXLmSjhFuH +nEz9pPPEXyG9VhDr2y5h7JNp46PMvZnDBfwGuMo2HP6QjklMxFaaL1a8z3sM8W9Hpg1DTeLpHTk0 +zY0s2RKY+ePhwUp8hjjEqcRhiNJerxomTdXkoCJHhNlktxmW/OwZ5LKXJk5KTMuPJItUGBxIYXvV +iGjaXbXqzRowwYCDdlCqT9HU3Tjw7xb04QxQBr/q+3pJoSgrHPb8FTKjdGqPqcNiKXEx5TukYBde +dObaE+3pHx8b0bJoc8YQNHVGEBDjkAB2QMuLt0MJIf+rTpPGWOmlgtt3xDqZsXKVSQTwtyv6e1mO +3QIDAQABo4ICNzCCAjMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgewGA1UdIASB +5DCB4TCB3gYIKoFQgSkBAQEwgdEwLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuY2VydGlmaWthdC5k +ay9yZXBvc2l0b3J5MIGdBggrBgEFBQcCAjCBkDAKFgNUREMwAwIBARqBgUNlcnRpZmlrYXRlciBm +cmEgZGVubmUgQ0EgdWRzdGVkZXMgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4xLiBDZXJ0aWZp +Y2F0ZXMgZnJvbSB0aGlzIENBIGFyZSBpc3N1ZWQgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4x +LjARBglghkgBhvhCAQEEBAMCAAcwgYEGA1UdHwR6MHgwSKBGoESkQjBAMQswCQYDVQQGEwJESzEM +MAoGA1UEChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTENMAsGA1UEAxMEQ1JMMTAsoCqgKIYm +aHR0cDovL2NybC5vY2VzLmNlcnRpZmlrYXQuZGsvb2Nlcy5jcmwwKwYDVR0QBCQwIoAPMjAwMzAy +MTEwODM5MzBagQ8yMDM3MDIxMTA5MDkzMFowHwYDVR0jBBgwFoAUYLWF7FZkfhIZJ2cdUBVLc647 ++RIwHQYDVR0OBBYEFGC1hexWZH4SGSdnHVAVS3OuO/kSMB0GCSqGSIb2fQdBAAQQMA4bCFY2LjA6 +NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEACromJkbTc6gJ82sLMJn9iuFXehHTuJTXCRBuo7E4 +A9G28kNBKWKnctj7fAXmMXAnVBhOinxO5dHKjHiIzxvTkIvmI/gLDjNDfZziChmPyQE+dF10yYsc +A+UYyAFMP8uXBV2YcaaYb7Z8vTd/vuGTJW1v8AqtFxjhA7wHKcitJuj4YfD9IQl+mo6paH1IYnK9 +AOoBmbgGglGBTvH1tJFUuSN6AJqfXY3gPGS5GhKSKseCRHI53OI8xthV9RVOyAUO28bQYqbsFbS1 +AoLbrIyigfCbmTH1ICCoiGEKB5+U/NDXG8wuF/MEJ3Zn61SD/aSQfgY9BKNDLdr8C2LqL19iUw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Email Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0 +BgNVBAMTLVVUTi1VU0VSRmlyc3QtQ2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05 +OTA3MDkxNzI4NTBaFw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQx +FzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsx +ITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UEAxMtVVROLVVTRVJGaXJz +dC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWlsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3BYHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIx +B8dOtINknS4p1aJkxIW9hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8 +om+rWV6lL8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLmSGHG +TPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM1tZUOt4KpLoDd7Nl +yP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws6wIDAQABo4G5MIG2MAsGA1UdDwQE +AwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNV +HR8EUTBPME2gS6BJhkdodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGll +bnRBdXRoZW50aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u7mFVbwQ+zzne +xRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0xtcgBEXkzYABurorbs6q15L+ +5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQrfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarV +NZ1yQAOJujEdxRBoUp7fooXFXAimeOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZ +w7JHpsIyYdfHb0gkUSeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +UTN USERFirst Object Root CA +============================ +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAb +BgNVBAMTFFVUTi1VU0VSRmlyc3QtT2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAz +NlowgZUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkx +HjAcBgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3dy51c2Vy +dHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicPHxzfOpuCaDDASmEd8S8O+r5596Uj71VR +loTN2+O5bj4x2AogZ8f02b+U60cEPgLOKqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQ +w5ujm9M89RKZd7G3CeBo5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vu +lBe3/IW+pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehbkkj7 +RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUCAwEAAaOBrzCBrDAL +BgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU2u1kdBScFDyr3ZmpvVsoTYs8 +ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtT2JqZWN0LmNybDApBgNVHSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQw +DQYJKoZIhvcNAQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXBmMiKVl0+7kNO +PmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU4U3GDZlDAQ0Slox4nb9QorFE +qmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK581OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCG +hU3IfdeLA/5u1fedFqySLKAj5ZyRUh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Qualified (Class QA) Root +================================= +-----BEGIN CERTIFICATE----- +MIIG0TCCBbmgAwIBAgIBezANBgkqhkiG9w0BAQUFADCByTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMUIwQAYDVQQDEzlOZXRMb2NrIE1pbm9zaXRldHQgS296amVn +eXpvaSAoQ2xhc3MgUUEpIFRhbnVzaXR2YW55a2lhZG8xHjAcBgkqhkiG9w0BCQEWD2luZm9AbmV0 +bG9jay5odTAeFw0wMzAzMzAwMTQ3MTFaFw0yMjEyMTUwMTQ3MTFaMIHJMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRvbnNhZ2kgS2Z0 +LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxQjBABgNVBAMTOU5ldExvY2sgTWlub3NpdGV0 +dCBLb3pqZWd5em9pIChDbGFzcyBRQSkgVGFudXNpdHZhbnlraWFkbzEeMBwGCSqGSIb3DQEJARYP +aW5mb0BuZXRsb2NrLmh1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1Ilstg91IRV +CacbvWy5FPSKAtt2/GoqeKvld/Bu4IwjZ9ulZJm53QE+b+8tmjwi8F3JV6BVQX/yQ15YglMxZc4e +8ia6AFQer7C8HORSjKAyr7c3sVNnaHRnUPYtLmTeriZ539+Zhqurf4XsoPuAzPS4DB6TRWO53Lhb +m+1bOdRfYrCnjnxmOCyqsQhjF2d9zL2z8cM/z1A57dEZgxXbhxInlrfa6uWdvLrqOU+L73Sa58XQ +0uqGURzk/mQIKAR5BevKxXEOC++r6uwSEaEYBTJp0QwsGj0lmT+1fMptsK6ZmfoIYOcZwvK9UdPM +0wKswREMgM6r3JSda6M5UzrWhQIDAMV9o4ICwDCCArwwEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAQYwggJ1BglghkgBhvhCAQ0EggJmFoICYkZJR1lFTEVNISBFemVuIHRhbnVzaXR2 +YW55IGEgTmV0TG9jayBLZnQuIE1pbm9zaXRldHQgU3pvbGdhbHRhdGFzaSBTemFiYWx5emF0YWJh +biBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBBIG1pbm9zaXRldHQgZWxla3Ryb25p +a3VzIGFsYWlyYXMgam9naGF0YXMgZXJ2ZW55ZXN1bGVzZW5laywgdmFsYW1pbnQgZWxmb2dhZGFz +YW5hayBmZWx0ZXRlbGUgYSBNaW5vc2l0ZXR0IFN6b2xnYWx0YXRhc2kgU3phYmFseXphdGJhbiwg +YXogQWx0YWxhbm9zIFN6ZXJ6b2Rlc2kgRmVsdGV0ZWxla2JlbiBlbG9pcnQgZWxsZW5vcnplc2kg +ZWxqYXJhcyBtZWd0ZXRlbGUuIEEgZG9rdW1lbnR1bW9rIG1lZ3RhbGFsaGF0b2sgYSBodHRwczov +L3d3dy5uZXRsb2NrLmh1L2RvY3MvIGNpbWVuIHZhZ3kga2VyaGV0b2sgYXogaW5mb0BuZXRsb2Nr +Lm5ldCBlLW1haWwgY2ltZW4uIFdBUk5JTkchIFRoZSBpc3N1YW5jZSBhbmQgdGhlIHVzZSBvZiB0 +aGlzIGNlcnRpZmljYXRlIGFyZSBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIFF1YWxpZmllZCBDUFMg +YXZhaWxhYmxlIGF0IGh0dHBzOi8vd3d3Lm5ldGxvY2suaHUvZG9jcy8gb3IgYnkgZS1tYWlsIGF0 +IGluZm9AbmV0bG9jay5uZXQwHQYDVR0OBBYEFAlqYhaSsFq7VQ7LdTI6MuWyIckoMA0GCSqGSIb3 +DQEBBQUAA4IBAQCRalCc23iBmz+LQuM7/KbD7kPgz/PigDVJRXYC4uMvBcXxKufAQTPGtpvQMznN +wNuhrWw3AkxYQTvyl5LGSKjN5Yo5iWH5Upfpvfb5lHTocQ68d4bDBsxafEp+NFAwLvt/MpqNPfMg +W/hqyobzMUwsWYACff44yTB1HLdV47yfuqhthCgFdbOLDcCRVCHnpgu0mfVRQdzNo0ci2ccBgcTc +R08m6h/t280NmPSjnLRzMkqWmf68f8glWPhY83ZmiVSkpj7EUFy6iRiCdUgh0k8T6GB+B3bbELVR +5qq5aKrN9p2QdRLqOBrKROi3macqaJVmlaut74nLYKkGEsaUR+ko +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Firmaprofesional Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEVzCCAz+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBnTELMAkGA1UEBhMCRVMxIjAgBgNVBAcT +GUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMTOUF1dG9yaWRhZCBkZSBDZXJ0aWZp +Y2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODEmMCQGCSqGSIb3DQEJARYXY2FA +ZmlybWFwcm9mZXNpb25hbC5jb20wHhcNMDExMDI0MjIwMDAwWhcNMTMxMDI0MjIwMDAwWjCBnTEL +MAkGA1UEBhMCRVMxIjAgBgNVBAcTGUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMT +OUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2 +ODEmMCQGCSqGSIb3DQEJARYXY2FAZmlybWFwcm9mZXNpb25hbC5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDnIwNvbyOlXnjOlSztlB5uCp4Bx+ow0Syd3Tfom5h5VtP8c9/Qit5V +j1H5WuretXDE7aTt/6MNbg9kUDGvASdYrv5sp0ovFy3Tc9UTHI9ZpTQsHVQERc1ouKDAA6XPhUJH +lShbz++AbOCQl4oBPB3zhxAwJkh91/zpnZFx/0GaqUC1N5wpIE8fUuOgfRNtVLcK3ulqTgesrBlf +3H5idPayBQC6haD9HThuy1q7hryUZzM1gywfI834yJFxzJeL764P3CkDG8A563DtwW4O2GcLiam8 +NeTvtjS0pbbELaW+0MOUJEjb35bTALVmGotmBQ/dPz/LP6pemkr4tErvlTcbAgMBAAGjgZ8wgZww +KgYDVR0RBCMwIYYfaHR0cDovL3d3dy5maXJtYXByb2Zlc2lvbmFsLmNvbTASBgNVHRMBAf8ECDAG +AQH/AgEBMCsGA1UdEAQkMCKADzIwMDExMDI0MjIwMDAwWoEPMjAxMzEwMjQyMjAwMDBaMA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUMwugZtHq2s7eYpMEKFK1FH84aLcwDQYJKoZIhvcNAQEFBQAD +ggEBAEdz/o0nVPD11HecJ3lXV7cVVuzH2Fi3AQL0M+2TUIiefEaxvT8Ub/GzR0iLjJcG1+p+o1wq +u00vR+L4OQbJnC4xGgN49Lw4xiKLMzHwFgQEffl25EvXwOaD7FnMP97/T2u3Z36mhoEyIwOdyPdf +wUpgpZKpsaSgYMN4h7Mi8yrrW6ntBas3D7Hi05V2Y1Z0jFhyGzflZKG+TQyTmAyX9odtsz/ny4Cm +7YjHX1BiAuiZdBbQ5rQ58SfLyEDW44YQqSMSkuBpQWOnryULwMWSyx6Yo1q6xTMPoJcB3X/ge9YG +VM+h4k0460tQtcsm9MracEpqoeJ5quGnM/b9Sh/22WA= +-----END CERTIFICATE----- + +Wells Fargo Root CA +=================== +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIEOeSXnjANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC1dlbGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTEvMC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDAxMDExMTY0MTI4WhcNMjEwMTE0MTY0MTI4WjCBgjELMAkGA1UEBhMCVVMxFDASBgNVBAoTC1dl +bGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEv +MC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVqDM7Jvk0/82bfuUER84A4n135zHCLielTWi5MbqNQ1mX +x3Oqfz1cQJ4F5aHiidlMuD+b+Qy0yGIZLEWukR5zcUHESxP9cMIlrCL1dQu3U+SlK93OvRw6esP3 +E48mVJwWa2uv+9iWsWCaSOAlIiR5NM4OJgALTqv9i86C1y8IcGjBqAr5dE8Hq6T54oN+J3N0Prj5 +OEL8pahbSCOz6+MlsoCultQKnMJ4msZoGK43YjdeUXWoWGPAUe5AeH6orxqg4bB4nVCMe+ez/I4j +sNtlAHCEAQgAFG5Uhpq6zPk3EPbg3oQtnaSFN9OH4xXQwReQfhkhahKpdv0SAulPIV4XAgMBAAGj +YTBfMA8GA1UdEwEB/wQFMAMBAf8wTAYDVR0gBEUwQzBBBgtghkgBhvt7hwcBCzAyMDAGCCsGAQUF +BwIBFiRodHRwOi8vd3d3LndlbGxzZmFyZ28uY29tL2NlcnRwb2xpY3kwDQYJKoZIhvcNAQEFBQAD +ggEBANIn3ZwKdyu7IvICtUpKkfnRLb7kuxpo7w6kAOnu5+/u9vnldKTC2FJYxHT7zmu1Oyl5GFrv +m+0fazbuSCUlFLZWohDo7qd/0D+j0MNdJu4HzMPBJCGHHt8qElNvQRbn7a6U+oxy+hNH8Dx+rn0R +OhPs7fpvcmR7nX1/Jv16+yWt6j4pf0zjAFcysLPp7VMX2YuyFA4w6OXVE8Zkr8QA1dhYJPz1j+zx +x32l2w8n0cbyQIjmH/ZhqPRCyLk306m+LFZ4wnKbWV01QIroTmMatukgalHizqSQ33ZwmVxwQ023 +tqcZZE6St8WRPH9IFmV7Fv3L/PvZ1dZPIWU7Sn9Ho/s= +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Platinum CA - G2 +========================== +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWduIFBsYXRpbnVtIENBIC0gRzIw +HhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAwWjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMM +U3dpc3NTaWduIEFHMSMwIQYDVQQDExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu +669yIIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2HtnIuJpX+UF +eNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+6ixuEFGSzH7VozPY1kne +WCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5objM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIo +j5+saCB9bzuohTEJfwvH6GXp43gOCWcwizSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/6 +8++QHkwFix7qepF6w9fl+zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34T +aNhxKFrYzt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaPpZjy +domyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtFKwH3HBqi7Ri6Cr2D ++m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuWae5ogObnmLo2t/5u7Su9IPhlGdpV +CX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMBAAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCv +zAeHFUdvOMW0ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUAA4ICAQAIhab1 +Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0uMoI3LQwnkAHFmtllXcBrqS3 +NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4 +U99REJNi54Av4tHgvI42Rncz7Lj7jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8 +KV2LwUvJ4ooTHbG/u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl +9x8DYSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1puEa+S1B +aYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXaicYwu+uPyyIIoK6q8QNs +OktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbGDI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSY +Mdp08YSTcU1f+2BY0fvEwW2JorsgH51xkcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAci +IfNAChs0B0QTwoRqjt8ZWr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +S-TRUST Authentication and Encryption Root CA 2005 PN +===================================================== +-----BEGIN CERTIFICATE----- +MIIEezCCA2OgAwIBAgIQNxkY5lNUfBq1uMtZWts1tzANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCREUxIDAeBgNVBAgTF0JhZGVuLVd1ZXJ0dGVtYmVyZyAoQlcpMRIwEAYDVQQHEwlTdHV0dGdh +cnQxKTAnBgNVBAoTIERldXRzY2hlciBTcGFya2Fzc2VuIFZlcmxhZyBHbWJIMT4wPAYDVQQDEzVT +LVRSVVNUIEF1dGhlbnRpY2F0aW9uIGFuZCBFbmNyeXB0aW9uIFJvb3QgQ0EgMjAwNTpQTjAeFw0w +NTA2MjIwMDAwMDBaFw0zMDA2MjEyMzU5NTlaMIGuMQswCQYDVQQGEwJERTEgMB4GA1UECBMXQmFk +ZW4tV3VlcnR0ZW1iZXJnIChCVykxEjAQBgNVBAcTCVN0dXR0Z2FydDEpMCcGA1UEChMgRGV1dHNj +aGVyIFNwYXJrYXNzZW4gVmVybGFnIEdtYkgxPjA8BgNVBAMTNVMtVFJVU1QgQXV0aGVudGljYXRp +b24gYW5kIEVuY3J5cHRpb24gUm9vdCBDQSAyMDA1OlBOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA2bVKwdMz6tNGs9HiTNL1toPQb9UY6ZOvJ44TzbUlNlA0EmQpoVXhOmCTnijJ4/Ob +4QSwI7+Vio5bG0F/WsPoTUzVJBY+h0jUJ67m91MduwwA7z5hca2/OnpYH5Q9XIHV1W/fuJvS9eXL +g3KSwlOyggLrra1fFi2SU3bxibYs9cEv4KdKb6AwajLrmnQDaHgTncovmwsdvs91DSaXm8f1Xgqf +eN+zvOyauu9VjxuapgdjKRdZYgkqeQd3peDRF2npW932kKvimAoA0SVtnteFhy+S8dF2g08LOlk3 +KC8zpxdQ1iALCvQm+Z845y2kuJuJja2tyWp9iRe79n+Ag3rm7QIDAQABo4GSMIGPMBIGA1UdEwEB +/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgEGMCkGA1UdEQQiMCCkHjAcMRowGAYDVQQDExFTVFJv +bmxpbmUxLTIwNDgtNTAdBgNVHQ4EFgQUD8oeXHngovMpttKFswtKtWXsa1IwHwYDVR0jBBgwFoAU +D8oeXHngovMpttKFswtKtWXsa1IwDQYJKoZIhvcNAQEFBQADggEBAK8B8O0ZPCjoTVy7pWMciDMD +pwCHpB8gq9Yc4wYfl35UvbfRssnV2oDsF9eK9XvCAPbpEW+EoFolMeKJ+aQAPzFoLtU96G7m1R08 +P7K9n3frndOMusDXtk3sU5wPBG7qNWdX4wple5A64U8+wwCSersFiXOMy6ZNwPv2AtawB6MDwidA +nwzkhYItr5pCHdDHjfhA7p0GVxzZotiAFP7hYy0yh9WUUpY6RsZxlj33mA6ykaqP2vROJAA5Veit +F7nTNCtKqUDMFypVZUF0Qn71wK/Ik63yGFs9iQzbRzkk+OBM8h+wPQrKBU6JIRrjKpms/H+h8Q8b +Hz2eBIPdltkdOpQ= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign CA +========== +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0MRMwEQYDVQQD +EwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTMy +MThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMTCkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNp +Z24xCzAJBgNVBAYTAklMMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49q +ROR+WCf4C9DklBKK8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTy +P2Q298CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb2CEJKHxN +GGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxCejVb7Us6eva1jsz/D3zk +YDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7KpiXd3DTKaCQeQzC6zJMw9kglcq/QytNuEM +rkvF7zuZ2SOzW120V+x0cAwqTwIDAQABo4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAy +oDCgLoYsaHR0cDovL2ZlZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0P +AQH/BAQDAgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRLAZs+ +VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWdfoPPbrxHbvUanlR2 +QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0McXS6hMTXcpuEfDhOZAYnKuGntewI +mbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb +/627HOkthIDYIb6FUtnUdLlphbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VG +zT2ouvDzuFYkRes3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCED9pHoGc8JpK83P/uUii5N0wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAxIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDlGb9to1ZhLZlIcfZn3rmN67eehoAKkQ76OCWvRoiC5XOooJskXQ0fzGVuDLDQ +VoQYh5oGmxChc9+0WDlrbsH2FdWoqD+qEgaNMax/sDTXjzRniAnNFBHiTkVWaR94AoDa3EeRKbs2 +yWNcxeDXLYd7obcysHswuiovMaruo2fa2wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFgVKTk8d6Pa +XCUDfGD67gmZPCcQcMgMCeazh88K4hiWNWLMv5sneYlfycQJ9M61Hd8qveXbhpxoJeUwfLaJFf5n +0a3hUKw8fGJLj7qE1xIVGx/KXQ/BUpQqEZnae88MNhPVNdwQGVnqlMEAv3WP2fr9dgTbYruQagPZ +RjXZ+Hxb +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +TC TrustCenter Universal CA III +=============================== +-----BEGIN CERTIFICATE----- +MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAe +Fw0wOTA5MDkwODE1MjdaFw0yOTEyMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNU +QyBUcnVzdENlbnRlciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0Ex +KDAmBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF5+cvAqBNLaT6hdqbJYUt +QCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYvDIRlzg9uwliT6CwLOunBjvvya8o84pxO +juT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8vzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+Eut +CHnNaYlAJ/Uqwa1D7KRTyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1 +M4BDj5yjdipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBhMB8G +A1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI4jANBgkqhkiG9w0BAQUFAAOCAQEA +g8ev6n9NCjw5sWi+e22JLumzCecYV42FmhfzdkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+ +KGwWaODIl0YgoGhnYIg5IFHYaAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhK +BgePxLcHsU0GDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV +CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPHLQNjO9Po5KIq +woIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb new file mode 100644 index 0000000..26a1f58 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb @@ -0,0 +1,89 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'securerandom' + +describe 'Account client' do + before(:all) do + client_1 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_1 = client_1.init_index(index_name('account_client_1')) + + client_2 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_2 = client_2.init_index(index_name('account_client_2')) + + client_3 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_2'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_2'] + }) + + @index_3 = client_3.init_index(index_name('account_client_3')) + + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + after(:all) do + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + it 'should not allow operations in the same application' do + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_2) + }.to raise_exception( + Algolia::AlgoliaError, + 'The indexes are in the same application. Use Algolia::Client.copy_index instead.' + ) + end + + it 'should perform a cross app copy index and assert that destination must not exist' do + + @index_1.save_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + + @index_1.batch_rules! ([ + { + :objectID => 'one', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + ]) + + @index_1.batch_synonyms! ([ + {:objectID => 'one', :type => 'synonym', :synonyms => ['San Francisco', 'SF']} + ]) + + @index_1.set_settings! ({:searchableAttributes => ['objectID']}) + + Algolia::AccountClient.copy_index!(@index_1, @index_3) + + res = @index_3.search('') + res['nbHits'].should eq(1500) + + res = @index_3.search_rules('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + res = @index_3.search_synonyms('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + @index_3.get_settings['searchableAttributes'].should eq(['objectID']) + + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_3) + }.to raise_exception( + Algolia::AlgoliaError, + 'Destination index already exists. Please delete it before copying index across applications.' + ) + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/client_spec.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/client_spec.rb new file mode 100644 index 0000000..fb664ff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/client_spec.rb @@ -0,0 +1,1426 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'base64' + +def is_include(array, attr, value) + array.each do |elt| + if elt[attr] == value + return true + end + end + return false +end + +describe 'API keys', :maintainers_only => true do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + def wait_key(index, key, &block) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_key_missing(index, key) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + def wait_global_key(key, &block) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_global_key_missing(key) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + it "should test index keys" do + @index.set_settings!({}) # ensure the index exists + + resIndex = @index.list_api_keys + newIndexKey = @index.add_api_key(['search']) + newIndexKey['key'].should_not eq("") + wait_key(@index, newIndexKey['key']) + resIndexAfter = @index.list_api_keys + is_include(resIndex['keys'], 'value', newIndexKey['key']).should eq(false) + is_include(resIndexAfter['keys'], 'value', newIndexKey['key']).should eq(true) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('search') + @index.update_api_key(newIndexKey['key'], ['addObject']) + wait_key(@index, newIndexKey['key']) do |key| + key['acl'] == ['addObject'] + end + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('addObject') + @index.delete_api_key(newIndexKey['key']) + wait_key_missing(@index, newIndexKey['key']) + resIndexEnd = @index.list_api_keys + is_include(resIndexEnd['keys'], 'value', newIndexKey['key']).should eq(false) + end + + it "should test global keys" do + res = Algolia.list_api_keys + newKey = Algolia.add_api_key(['search']) + newKey['key'].should_not eq("") + wait_global_key(newKey['key']) + resAfter = Algolia.list_api_keys + is_include(res['keys'], 'value', newKey['key']).should eq(false) + is_include(resAfter['keys'], 'value', newKey['key']).should eq(true) + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('search') + Algolia.update_api_key(newKey['key'], ['addObject']) + wait_global_key(newKey['key']) do |key| + key['acl'] == ['addObject'] + end + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('addObject') + Algolia.delete_api_key(newKey['key']) + wait_global_key_missing(newKey['key']) + resEnd = Algolia.list_api_keys + is_include(resEnd['keys'], 'value', newKey['key']).should eq(false) + + # Restore the deleted key + Algolia.restore_api_key(newKey['key']) + wait_global_key(newKey['key']) + key_end = Algolia.list_api_keys + is_include(key_end['keys'], 'value', newKey['key']).should eq(true) + + # Re-delete the key + Algolia.delete_api_key(newKey['key']) + end + + it "Check add keys" do + newIndexKey = @index.add_api_key(['search']) + newIndexKey.should have_key('key') + newIndexKey['key'].should be_a(String) + newIndexKey.should have_key('createdAt') + newIndexKey['createdAt'].should be_a(String) + sleep 5 # no task ID here + resIndex = @index.list_api_keys + resIndex.should have_key('keys') + resIndex['keys'].should be_a(Array) + resIndex['keys'][0].should have_key('value') + resIndex['keys'][0]['value'].should be_a(String) + resIndex['keys'][0].should have_key('acl') + resIndex['keys'][0]['acl'].should be_a(Array) + resIndex['keys'][0].should have_key('validity') + resIndex['keys'][0]['validity'].should be_a(Integer) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey.should have_key('value') + indexKey['value'].should be_a(String) + indexKey.should have_key('acl') + indexKey['acl'].should be_a(Array) + indexKey.should have_key('validity') + indexKey['validity'].should be_a(Integer) + task = @index.delete_api_key(newIndexKey['key']) + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + end +end + +describe 'Client' do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + it "should tell if index exists" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + expect(@index.exists?).to be true + end + + it "should tell if index does not exist" do + index = Algolia::Index.new('nonexistent_index') + expect(index.exists?).to be false + end + + it "should add a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + @index.partial_update_object!({ :name => "Robert Doe"}, "1") + res = @index.search("robert") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, or add it if it doesn't exist" do + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(0) + @index.partial_update_object!({ :email => "tonny@parker.org" }, "1") + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, but don't add it if it doesn't exist" do + @index.partial_update_object!({ :email => "alex@boom.org" }, "51", false) + res = @index.search("alex@boom.org") + res["hits"].length.should eq(0) + end + + it "should partial update a batch of objects, and add them if they don't exist" do + batch = [ + { :objectID => "1", :email => "john@wanna.org" }, + { :objectID => "2", :email => "robert@wanna.org" } + ] + @index.partial_update_objects!(batch) + res = @index.search("@wanna.org") + res["hits"].length.should eq(2) + end + + it "should partial update a batch of objects, but don't add them if they don't exist" do + create_if_not_exits = false + batch = [ + { :objectID => "11", :email => "john@be.org" }, + { :objectID => "22", :email => "robert@be.org" } + ] + @index.partial_update_objects!(batch, create_if_not_exits) + res = @index.search("@be.org") + res["hits"].length.should eq(0) + end + + it "should add a set of objects" do + @index.add_objects!([ + { :name => "Another", :email => "another1@example.org" }, + { :name => "Another", :email => "another2@example.org" } + ]) + res = @index.search("another") + res["hits"].length.should eq(2) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "2") + res = @index.search("john") + res["hits"].length.should eq(2) + @index.partial_update_objects!([{ :name => "Robert Doe", :objectID => "1"}, { :name => "Robert Doe", :objectID => "2"}]) + res = @index.search("robert") + res["hits"].length.should eq(2) + end + + it "should save a set of objects with their ids" do + @index.save_object!({ :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }) + res = @index.search("objectid") + res["hits"].length.should eq(1) + end + + it "should save a set of objects with their ids" do + @index.save_objects!([ + { :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }, + { :name => "objectid", :email => "objectid2@example.org", :objectID => 102 } + ]) + res = @index.search("objectid") + res["hits"].length.should eq(2) + end + + it "should replace all objects" do + @index.save_objects!([{:objectID => '1'}, {:objectID => '2'}]) + @index.replace_all_objects!([{'color' => 'black'}, {:objectID => '4', 'color' => 'green'}]) + + res = @index.search('') + res["hits"].length.should eq(2) + res = @index.search('black') + res["hits"][0]['color'].should eq('black') + @index.get_object('4')['color'].should eq('green') + end + + it "should throw an exception if invalid argument" do + expect { @index.add_object!([ {:name => "test"} ]) }.to raise_error(ArgumentError) + expect { @index.add_objects!([ [ {:name => "test"} ] ]) }.to raise_error(ArgumentError) + expect { @index.save_object(1) }.to raise_error(ArgumentError) + expect { @index.save_object("test") }.to raise_error(ArgumentError) + expect { @index.save_object({ :objectID => 42 }.to_json) }.to raise_error(ArgumentError) + expect { @index.save_objects([{}, ""]) }.to raise_error(ArgumentError) + expect { @index.save_objects([1]) }.to raise_error(ArgumentError) + expect { @index.save_objects!([1]) }.to raise_error(ArgumentError) + expect { @index.save_object({ :foo => 42 }) }.to raise_error(ArgumentError) # missing objectID + end + + it "should be thread safe" do + @index.clear! + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + + threads = [] + 64.times do + t = Thread.new do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + threads << t + end + threads.each { |t| t.join } + end + + if !defined?(RUBY_ENGINE) || RUBY_ENGINE != 'jruby' + it "should be fork safe" do + 8.times do + Process.fork do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + end + Process.waitall + end + end + + it "should clear the index" do + @index.clear! + @index.search("")["hits"].length.should eq(0) + end + + it "should have another index after" do + index = Algolia::Index.new(safe_index_name("àlgol?a")) + begin + index.delete_index! + rescue + # friends_2 does not exist + end + res = Algolia.list_indexes + is_include(res['items'], 'name', safe_index_name('àlgol?a')).should eq(false) + index.add_object!({ :name => "Robert" }) + resAfter = Algolia.list_indexes + is_include(resAfter['items'], 'name', safe_index_name('àlgol?a')).should eq(true) + end + + it "should get a object" do + @index.clear_index + @index.add_object!({:firstname => "Robert"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + res["nbHits"].should eq(2) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq(res['hits'][0]['firstname']) + + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq(res['hits'][0]['firstname']) + + objects = @index.get_objects([ res['hits'][0]['objectID'], res['hits'][1]['objectID'] ]) + objects.size.should eq(2) + end + + it "should restrict attributesToRetrieve" do + @index.clear_index + @index.add_object({:firstname => "Robert", :lastname => "foo", :objectID => 1}) + @index.add_object!({:firstname => "Robert2", :lastname => "bar", :objectID => 2}) + objects = @index.get_objects([1, 2], ['firstname']) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects([1, 2], [:firstname]) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects(["1", "2"], 'firstname,lastname') + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "lastname"=>"foo", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "lastname"=>"bar", "objectID"=>"2"}) + end + + it "should delete the object" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + @index.delete_object!(res['hits'][0]['objectID']) + @index.search('')['nbHits'].should eq(0) + end + + it "should not delete the index because the objectID is blank" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + expect { @index.delete_object('') }.to raise_error(ArgumentError) + expect { @index.delete_object!(nil) }.to raise_error(ArgumentError) + @index.search('')['nbHits'].should eq(1) + end + + it "should delete several objects" do + @index.clear + @index.add_object!({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(2) + @index.delete_objects!(res['hits'].map { |h| h['objectID'] }) + @index.search('')['nbHits'].should eq(0) + end + + it "should delete several objects by query" do + @index.clear + @index.add_object({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by_query!('rob') + @index.search('')['nbHits'].should eq(0) + end + + it "should not wipe the entire index with delete_by_query" do + expect { @index.delete_by_query(nil) }.to raise_error(ArgumentError) + end + + context 'delete_by' do + it 'should not wipe the entire index' do + expect { @index.delete_by(nil) }.to raise_error(ArgumentError) + end + + it 'should fail with query passed' do + @index.clear + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + expect { @index.delete_by({ 'query' => 'abc' }) }.to raise_error(Algolia::AlgoliaProtocolError) + @index.search('')['nbHits'].should eq(2) + end + + it 'should work with filters' do + @index.clear + @index.set_settings!({:attributesForFaceting => ['firstname']}) + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by!({ 'filters' => 'firstname:Robert1' }) + @index.search('')['nbHits'].should eq(1) + end + end + + it 'should find objects when needed' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + + index.save_objects!([ + {:company => 'Algolia', :name => 'Julien Lemoine', :objectID => 'julien-lemoine'}, + {:company => 'Algolia', :name => 'Nicolas Dessaigne', :objectID => 'nicolas-dessaigne'}, + {:company => 'Amazon', :name =>' "Jeff Bezos', :objectID => '162590850'}, + {:company => 'Apple', :name => 'Steve Jobs', :objectID => '162590860'}, + {:company => 'Apple', :name => 'Steve Wozniak', :objectID => '162590870'}, + {:company => 'Arista Networks', :name => 'Jayshree Ullal', :objectID => '162590880'}, + {:company => 'Google', :name => 'Larry Page', :objectID => '162590890'}, + {:company => 'Google', :name => 'Rob Pike', :objectID => '162590900'}, + {:company => 'Google', :name => 'Sergueï Brin', :objectID => '162590910'}, + {:company => 'Microsoft', :name => 'Bill Gates', :objectID => '162590920'}, + {:company => 'SpaceX', :name => 'Elon Musk', :objectID => '162590930'}, + {:company => 'Tesla', :name => 'Elon Musk', :objectID => '162590940'}, + {:company => 'Yahoo', :name => 'Marissa Mayer', :objectID => '162590950'}, + ]) + + res = index.search('algolia') + Algolia::Index.get_object_position(res, 'nicolas-dessaigne').should eq(0) + Algolia::Index.get_object_position(res, 'julien-lemoine').should eq(1) + Algolia::Index.get_object_position(res, '').should eq(-1) + + expect { + index.find_object({'query' => '', 'paginate' => true}) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => true}) { false } + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true}) { true } + obj['position'].should eq(0) + obj['page'].should eq(0) + + # we use a lambda and convert it to a block with `&` + # so as not to repeat the condition + condition = lambda do |obj| + obj.key?('company') and obj['company'] == 'Apple' + end + + expect { + index.find_object({'query' => 'algolia', 'paginate' => true}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => false, 'hitsPerPage' => 5}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true, 'hitsPerPage' => 5}, &condition) + obj['position'].should eq(0) + obj['page'].should eq(2) + end + + it "should copy the index" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + @index.delete_index! + + index.search('')['nbHits'].should eq(1) + index.delete_index! + end + + it "should copy only settings" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + res = @index.set_settings!({ + 'searchableAttributes' => ['one'], + }) + + @index.wait_task(res['taskID']) + Algolia.copy_settings!(@index.name, index.name) + @index.delete_index! + + index.get_settings['searchableAttributes'].should eq(['one']) + index.delete_index! + end + + it "should copy only synonyms" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_synonym!('foo', { + :objectID => 'foo', :synonyms => ['car', 'vehicle', 'auto'], :type => 'synonym', + }) + + Algolia.copy_synonyms!(@index.name, index.name) + @index.delete_index! + + index.get_synonym('foo')['objectID'].should eq('foo') + index.delete_index! + end + + it "should copy only rules" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_rule!('bar', { + :objectID => 'bar', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + }) + + Algolia.copy_rules!(@index.name, index.name) + @index.delete_index! + + index.get_rule('bar')['objectID'].should eq('bar') + index.delete_index! + end + + it "should copy parts of the index only" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + @index.search('')['nbHits'].should eq(1) + @index.search_synonyms('')['nbHits'].should eq(2) + + res = Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à"), ["synonyms"]) + + @index.delete_index! + + index.search_synonyms('')['nbHits'].should eq(2) + index.delete_index! + end + + it "should move the index" do + @index.clear_index rescue "friends does not exist" + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.move_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + + index.search('')['nbHits'].should eq(1) + index.delete_index + end + + it "should retrieve the object" do + @index.clear_index rescue "friends does not exist" + @index.add_object!({:firstname => "Robert"}) + + res = @index.browse + + res['hits'].size.should eq(1) + res['hits'][0]['firstname'].should eq("Robert") + end + + it "should get logs" do + + expect { + Algolia::Index.new(safe_index_name('thisdefinitelyshouldntexist')).get_settings + }.to raise_error(Algolia::AlgoliaProtocolError) + res = Algolia.get_logs(0, 20, true) + + res['logs'].size.should > 0 + (res['logs'][0]['answer_code'].to_i / 100).should eq(4) + end + + it "should search on multipleIndex" do + @index.clear_index! rescue "Not fatal" + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res["results"][0]["hits"].length.should eq(1) + + res = Algolia.multiple_queries([{"indexName" => safe_index_name("àlgol?a"), "query" => ""}], "indexName") + res["results"][0]["hits"].length.should eq(1) + end + + it "should get multiple objectIDs" do + index_name_1 = safe_index_name("àlgol?a-multi") + index_1 = Algolia::Index.new(index_name_1) + index_1.save_object!({:objectID => "obj1-multi-get", :name => 'test'}) + + index_name_2 = safe_index_name("àlgol?a-multi") + index_2 = Algolia::Index.new(index_name_2) + index_2.save_object!({:objectID => "obj2-multi-get", :name => 'another index'}) + + requests = [ + { "indexName" => index_name_1, "objectID" => "obj1-multi-get" }, + { "indexName" => index_name_2, "objectID" => "obj2-multi-get" } + ] + + response = Algolia.multiple_get_objects(requests) + + response['results'].count.should eq(2) + + index_1.delete_index rescue "not fatal" + index_2.delete_index rescue "not fatal" + end + + it "should throw if the index_name is missing in multiple_queries" do + expect { Algolia.multiple_queries([{"query" => ""}]) }.to raise_error(ArgumentError) + end + + it "should accept custom batch" do + @index.clear_index! rescue "Not fatal" + request = { "requests" => [ + { + "action" => "addObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger"} + }, + { + "action" => "addObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger", + "objectID" => "43"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"}, + "objectID" => "42" + } + ]} + res = @index.batch!(request) + @index.search('')['nbHits'].should eq(4) + end + + it "should allow an array of tags" do + @index.add_object!({ :name => "P1", :_tags => "t1" }) + @index.add_object!({ :name => "P2", :_tags => "t1" }) + @index.add_object!({ :name => "P3", :_tags => "t2" }) + @index.add_object!({ :name => "P4", :_tags => "t3" }) + @index.add_object!({ :name => "P5", :_tags => ["t3", "t4"] }) + + @index.search("", { :tagFilters => ["t1"] })['hits'].length.should eq(2) # t1 + @index.search("", { :tagFilters => ["t1", "t2"] })['hits'].length.should eq(0) # t1 AND t2 + @index.search("", { :tagFilters => ["t3", "t4"] })['hits'].length.should eq(1) # t3 AND t4 + @index.search("", { :tagFilters => [["t1", "t2"]] })['hits'].length.should eq(3) # t1 OR t2 + end + + it "should be facetable" do + @index.clear! + @index.set_settings( { :attributesForFacetting => ["f", "g"] }) + @index.add_object!({ :name => "P1", :f => "f1", :g => "g1" }) + @index.add_object!({ :name => "P2", :f => "f1", :g => "g2" }) + @index.add_object!({ :name => "P3", :f => "f2", :g => "g2" }) + @index.add_object!({ :name => "P4", :f => "f3", :g => "g2" }) + + res = @index.search("", { :facets => "f" }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1"] }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1", "g:g2"] }) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"]] }) + res['nbHits'].should eq(4) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"], "g:g1"] }) + res['nbHits'].should eq(1) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + res['facets']['g']['g1'].should eq(1) + res['facets']['g']['g2'].should be_nil + end + + it "should handle slash in objectId" do + @index.clear_index!() + @index.add_object!({:firstname => "Robert", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Robert') + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq('Robert') + + @index.save_object!({:firstname => "George", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('George') + + @index.partial_update_object!({:firstname => "Sylvain", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Sylvain') + + end + + it "Check attributes list_indexes:" do + res = Algolia::Index.all + res.should have_key('items') + res['items'][0].should have_key('name') + res['items'][0]['name'].should be_a(String) + res['items'][0].should have_key('createdAt') + res['items'][0]['createdAt'].should be_a(String) + res['items'][0].should have_key('updatedAt') + res['items'][0]['updatedAt'].should be_a(String) + res['items'][0].should have_key('entries') + res['items'][0]['entries'].should be_a(Integer) + res['items'][0].should have_key('pendingTask') + [true, false].should include(res['items'][0]['pendingTask']) + end + + it 'Check attributes search : ' do + res = @index.search('') + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes delete_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à2")) + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = index.delete_index() + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes clear_index : ' do + task = @index.clear_index + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add object : ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }) + task.should have_key('createdAt') + task['createdAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + end + + it 'Check attributes add object id: ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + #task.to_s.should eq("") + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes partial update: ' do + task = @index.partial_update_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes delete object: ' do + @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = @index.delete_object("1") + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add objects: ' do + task = @index.add_objects([{ :name => "John Doe", :email => "john@doe.org", :objectID => "1" }]) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectIDs') + task['objectIDs'].should be_a(Array) + end + + it 'Check attributes browse: ' do + res = @index.browse() + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes get settings: ' do + task = @index.set_settings({}) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + end + + it 'Check attributes move_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.move_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + end + + it 'Check attributes copy_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.copy_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + index2.delete_index + end + + it 'Check attributes wait_task : ' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.client.get(Algolia::Protocol.task_uri(safe_index_name("àlgol?a"), task['objectID'])) + task.should have_key('status') + task['status'].should be_a(String) + task.should have_key('pendingTask') + [true, false].should include(task['pendingTask']) + end + + it 'Check attributes get_task_status' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + status = @index.get_task_status(task["taskID"]) + status.should be_a(String) + end + + it 'Check attributes log : ' do + logs = Algolia.get_logs() + logs.should have_key('logs') + logs['logs'].should be_a(Array) + logs['logs'][0].should have_key('timestamp') + logs['logs'][0]['timestamp'].should be_a(String) + logs['logs'][0].should have_key('method') + logs['logs'][0]['method'].should be_a(String) + logs['logs'][0].should have_key('answer_code') + logs['logs'][0]['answer_code'].should be_a(String) + logs['logs'][0].should have_key('query_body') + logs['logs'][0]['query_body'].should be_a(String) + logs['logs'][0].should have_key('answer') + logs['logs'][0]['answer'].should be_a(String) + logs['logs'][0].should have_key('url') + logs['logs'][0]['url'].should be_a(String) + logs['logs'][0].should have_key('ip') + logs['logs'][0]['ip'].should be_a(String) + logs['logs'][0].should have_key('query_headers') + logs['logs'][0]['query_headers'].should be_a(String) + logs['logs'][0].should have_key('sha1') + logs['logs'][0]['sha1'].should be_a(String) + end + + it 'should generate secured api keys (old syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)') + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)')) + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)', 42) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)42')) + key = Algolia.generate_secured_api_key('my_api_key', ['public']) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public')) + key = Algolia.generate_secured_api_key('my_api_key', ['public', ['premium','vip']]) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public,(premium,vip)')) + end + + it 'should generate secured api keys (new syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', :tagFilters => '(public,user1)') + key.should eq(Base64.encode64("#{OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'tagFilters=%28public%2Cuser1%29')}tagFilters=%28public%2Cuser1%29").gsub("\n", '')) + key = Algolia.generate_secured_api_key('182634d8894831d5dbce3b3185c50881', :tagFilters => '(public,user1)', :userToken => 42) + # in ruby 1.8.7, the map iteration doesn't have the same ordering, + # making the hash slightly different + expected_keys = [ + 'ZDU0N2YzZjA3NGZkZGM2OTUxNzY3NzhkZDI3YWFkMjhhNzU5OTBiOGIyYTgyYzFmMjFjZTY4NTA0ODNiN2I1ZnVzZXJUb2tlbj00MiZ0YWdGaWx0ZXJzPSUyOHB1YmxpYyUyQ3VzZXIxJTI5', + 'OGYwN2NlNTdlOGM2ZmM4MjA5NGM0ZmYwNTk3MDBkNzMzZjQ0MDI3MWZjNTNjM2Y3YTAzMWM4NTBkMzRiNTM5YnRhZ0ZpbHRlcnM9JTI4cHVibGljJTJDdXNlcjElMjkmdXNlclRva2VuPTQy' + ] + expected_keys.include?(key).should eq(true) + end + + it 'Check attributes multipleQueries' do + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res.should have_key('results') + res['results'].should be_a(Array) + res['results'][0].should have_key('hits') + res['results'][0]['hits'].should be_a(Array) + res['results'][0].should have_key('page') + res['results'][0]['page'].should be_a(Integer) + res['results'][0].should have_key('nbHits') + res['results'][0]['nbHits'].should be_a(Integer) + res['results'][0].should have_key('nbPages') + res['results'][0]['nbPages'].should be_a(Integer) + res['results'][0].should have_key('hitsPerPage') + res['results'][0]['hitsPerPage'].should be_a(Integer) + res['results'][0].should have_key('processingTimeMS') + res['results'][0]['processingTimeMS'].should be_a(Integer) + res['results'][0].should have_key('query') + res['results'][0]['query'].should be_a(String) + res['results'][0].should have_key('params') + res['results'][0]['params'].should be_a(String) + end + + it 'should handle facet search' do + objects = { + :snoopy => { + :objectID => '1', + 'name' => 'Snoopy', + :kind => ['dog', 'animal'], + :born => 1950, + :series => 'Peanuts' + }, + :woodstock => { + :objectID => '2', + :name => 'Woodstock', + :kind => ['bird', 'animal'], + :born => 1960, + :series => 'Peanuts' + }, + :charlie => { + :objectID => '3', + :name => 'Charlie Brown', + :kind => ['human'], + :born => 1950, + :series => 'Peanuts' + }, + :hobbes => { + :objectID => '4', + :name => 'Hobbes', + :kind => ['tiger', 'animal', 'teddy'], + :born => 1985, + :series => 'Calvin & Hobbes' + }, + :calvin => { + :objectID => '5', + :name => 'Calvin', + :kind => ['human'], + :born => 1985, + :series => 'Calvin & Hobbes' + } + } + + index = Algolia::Index.new(safe_index_name('test_facet_search')) + index.set_settings({ + :attributesForFaceting => [ + 'searchable(series)', + 'kind' + ] + }) + index.add_objects! objects.values + + query = { + :facetFilters => ['kind:animal'], + :numericFilters => ['born >= 1955'] + } + answer = index.search_for_facet_values 'series', 'Peanutz', query + expect(answer['facetHits'].size).to eq(1) + expect(answer['facetHits'].first['value']).to eq('Peanuts') + expect(answer['facetHits'].first['count']).to eq(1) + end + + it 'should handle disjunctive faceting' do + index = Algolia::Index.new(safe_index_name("test_hotels")) + index.set_settings :attributesForFacetting => ['city', 'stars', 'facilities'] + index.clear_index rescue nil + index.add_objects! [ + { :name => 'Hotel A', :stars => '*', :facilities => ['wifi', 'bath', 'spa'], :city => 'Paris' }, + { :name => 'Hotel B', :stars => '*', :facilities => ['wifi'], :city => 'Paris' }, + { :name => 'Hotel C', :stars => '**', :facilities => ['bath'], :city => 'San Francisco' }, + { :name => 'Hotel D', :stars => '****', :facilities => ['spa'], :city => 'Paris' }, + { :name => 'Hotel E', :stars => '****', :facilities => ['spa'], :city => 'New York' }, + ] + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }) + answer['nbHits'].should eq(5) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['**'].should eq(1) + answer['disjunctiveFacets']['stars']['****'].should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'], :city => ['Paris'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*', '****'], :city => ['Paris'] }) + answer['nbHits'].should eq(3) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + end + + it 'should apply jobs one after another if synchronous' do + index = Algolia::Index.new(safe_index_name("sync")) + begin + index.add_object! :objectID => 1 + answer = index.search('') + answer['nbHits'].should eq(1) + answer['hits'][0]['objectID'].to_i.should eq(1) + index.clear_index! + index.add_object! :objectID => 2 + index.add_object! :objectID => 3 + answer = index.search('') + answer['nbHits'].should eq(2) + answer['hits'][0]['objectID'].to_i.should_not eq(1) + ensure + index.delete_index + end + end + + it "should send a custom batch" do + batch = [ + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "11", :email => "john@be.org" }}, + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "22", :email => "robert@be.org" }} + ] + Algolia.batch!(batch) + res = @index.search("@be.org") + res["hits"].length.should eq(2) + end + + def test_browse(expected, *args) + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + hits = {} + @index.browse(*args) do |hit| + hits[hit['objectID']] = true + end + hits.size.should eq(expected) + end + + it "should browse the index using cursors" do + test_browse(1500) + test_browse(500, 1, 1000) + test_browse(0, 2, 1000) + end + + it "should browse the index using cursors specifying hitsPerPage" do + test_browse(1500, { :hitsPerPage => 500 }) + end + + it "should browse the index using cursors specifying params" do + test_browse(1, { :hitsPerPage => 500, :numericFilters => 'i=42' }) + test_browse(42, { :numericFilters => 'i<=42' }) + end + + it "should browse the index using cursors from a cursor" do + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + answer = @index.browse(0, 1000) + + hits = {} + @index.browse(:cursor => answer['cursor']) do |hit, cursor| + hits[hit['objectID']] = true + cursor.should eq(answer['cursor']) + end + hits.size.should eq(500) + + @index.browse_from(answer['cursor'])['hits'].size.should eq(500) + end + + it "should test synonyms" do + @index.add_object! :name => '589 Howard St., San Francisco' + @index.search('Howard St San Francisco')['nbHits'].should eq(1) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + @index.search('Howard St SF')['nbHits'].should eq(1) + + synonym = @index.get_synonym('city') + synonym['objectID'].should eq('city') + synonym['type'].should eq('synonym') + + @index.search('Howard Street')['nbHits'].should eq(1) + + synonyms_block = [] + synonyms_ret = @index.export_synonyms(1) do |s| + synonyms_block << s + end + + s0 = synonyms_search.map { |s| s['objectID'] }.sort + s1 = synonyms_block.map { |s| s['objectID'] }.sort + s2 = synonyms_ret.map { |s| s['objectID'] }.sort + + s0.should eq(s1) + s1.should eq(s2) + + @index.delete_synonym! 'city' + @index.search('Howard Street SF')['nbHits'].should eq(0) + + @index.clear_synonyms! + @index.search_synonyms('')['nbHits'].should eq(0) + end + + it "should replace all synonyms" do + @index.batch_synonyms! ([ + {:objectID => '1', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '2', :type => 'altCorrection1', :word => 'foo', :corrections => ['st']} + ]) + + @index.replace_all_synonyms! ([ + {:objectID => '3', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '4', :type => 'altCorrection1', :word => 'bar', :corrections => ['st']} + ]) + + synonym = @index.get_synonym('4')['objectID'].should eq('4') + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + end + + it 'should test synonyms Export Query' do + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'us', :type => 'synonym', :synonyms => ['US', 'USA', 'Untied States of America'] }, + { :objectID => 'ie', :type => 'synonym', :synonyms => ['IE', 'IRL', 'Ireland'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + + expect(@index).to receive(:search_synonyms).and_call_original.at_least(4) + @index.export_synonyms(1) + + @index.clear_synonyms! + end + + it 'should test Query Rules' do + rule_1 = { + :objectID => '42', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + rule_2 = { + :objectID => '2', + :condition => { :pattern => 'Pura', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'Pura Vida' } } + } + + result = @index.save_rule!(rule_1[:objectID], rule_1) + result.should have_key('taskID') + result.should have_key('updatedAt') + + @index.get_rule(rule_1[:objectID])['objectID'].should eq(rule_1[:objectID]) + + @index.search_rules('better')['nbHits'].should eq(1) + @index.search_rules('', { :anchoring => 'contains' })['nbHits'].should eq(1) + + @index.delete_rule!(rule_1[:objectID]) + @index.search_rules('')['nbHits'].should eq(0) + + @index.batch_rules!([rule_1, rule_2]) + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + + rules_block = [] + rules_ret = @index.export_rules(1) do |r| + rules_block << r + end + + r0 = rules_search.map { |r| r['objectID'] }.sort + r1 = rules_block.map { |r| r['objectID'] }.sort + r2 = rules_ret.map { |r| r['objectID'] }.sort + + r0.should eq(r1) + r1.should eq(r2) + + @index.clear_rules! + @index.search_rules('')['nbHits'].should eq(0) + end + + it "should replace all rules" do + rule_1 = { + :objectID => '1', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + rule_2 = { + :objectID => '2', + :condition => {:pattern => 'Pura', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'Pura Vida'}} + } + + @index.batch_rules! [rule_1, rule_2] + + rule_1[:objectID] = '3' + rule_2[:objectID] = '4' + @index.replace_all_rules!([rule_1, rule_2]) + + @index.get_rule('4')['objectID'].should eq('4') + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + end + + it 'should not save a query rule with an empty objectID' do + rule = { + :objectID => '', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + + expect { @index.save_rule!(nil, rule) }.to raise_error(ArgumentError) + expect { @index.save_rule!(rule[:objectID], rule) }.to raise_error(ArgumentError) + end + + it "should use request options" do + expect{Algolia.list_indexes}.to_not raise_error + + expect{Algolia.list_indexes('headers' => { 'X-Algolia-API-Key' => 'NotExistentAPIKey' })}.to raise_error(Algolia::AlgoliaProtocolError) + end + + it 'should retrieve the remaining validity time in seconds' do + now = Time.now.to_i + + key = Algolia.generate_secured_api_key('foo', :validUntil => now - (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be < 0 + + key = Algolia.generate_secured_api_key('foo', :validUntil => now + (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be > 0 + + key = Algolia.generate_secured_api_key('foo', []) + expect { Algolia.get_secured_api_key_remaining_validity(key) }.to raise_error(Algolia::ValidUntilNotFoundError) + end + + context 'DNS timeout' do + before(:each) do + @client = Algolia::Client.new :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'], + :hosts => [ + "10.0.0.1", # this will timeout + "#{ENV['ALGOLIA_APPLICATION_ID']}.algolia.net", + "#{ENV['ALGOLIA_APPLICATION_ID']}-1.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-2.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-3.algolianet.com" + ], + :connect_timeout => 5 + @client.destroy # make sure the thread-local vars are reseted + end + + it "should fallback to the 2nd host after a few seconds" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + end + + it "should re-use the working (2nd) host after the 1st one failed" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + start_time = Time.now + @client.list_indexes # re-use the 2nd one + expect(start_time.to_i).to be <= Time.now.to_i + 1 + end + end + + context 'Custom User Agent' do + before(:all) do + WebMock.enable! + end + + before(:each) do + @client = Algolia::Client.new( + :application_id => ENV['ALGOLIA_APPLICATION_ID'], + :api_key => ENV['ALGOLIA_API_KEY'], + :user_agent => 'test agent' + ) + @client.destroy # make sure the thread-local vars are reseted + end + + it "should use a custom user-agent" do + WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/). + to_return(:status => 200, :body => '{}') + @client.list_indexes + expect(WebMock).to have_requested(:get, /https:\/\/.+-dsn.algolia(net\.com|\.net)\/1\/indexes/). + with(:headers => { 'User-Agent' => "Algolia for Ruby (#{::Algolia::VERSION}); Ruby (#{RUBY_VERSION}); test agent" }) + end + + after(:all) do + WebMock.disable! + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb new file mode 100644 index 0000000..8dae58a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +describe 'With a mocked client' do + + before(:all) do + WebMock.enable! + Algolia::WebMock.mock! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should add a simple object" do + index = Algolia::Index.new("friends") + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + index.search('').should == { "hits" => [ { "objectID" => 42 } ], "page" => 1, "hitsPerPage" => 1, "nbHits"=>1, "nbPages"=>1 } # mocked + index.list_api_keys + index.browse + index.clear + index.delete + index.delete_by_query 'test' + end + + after(:all) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb new file mode 100644 index 0000000..e317c1e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb @@ -0,0 +1,69 @@ + +if ENV['COVERAGE'] + require 'simplecov' + SimpleCov.start +end + +require 'bundler/setup' + +Bundler.setup :test + +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) + +require 'algoliasearch' +require 'rspec' +require 'webmock/rspec' +require 'algolia/webmock' +require 'time' + +raise 'missing ALGOLIA_APPLICATION_ID or ALGOLIA_API_KEY environment variables' if ENV['ALGOLIA_APPLICATION_ID'].nil? || ENV['ALGOLIA_API_KEY'].nil? +Algolia.init :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'] + +RSpec.configure do |config| + config.mock_with :rspec + + config.before(:suite) do + WebMock.disable! + end + + config.after(:suite) do + WebMock.disable! + end +end + +# avoid concurrent access to the same index +def safe_index_name(name) + return name if ENV['TRAVIS'].to_s != "true" + id = ENV['TRAVIS_JOB_NUMBER'] + "TRAVIS_RUBY_#{name}-#{id}" +end + +# avoid concurrent access to the same index and follows the CTS standards. +def index_name(name) + date = DateTime.now.strftime('%Y-%m-%d_%H:%M:%S') + + instance = ENV['TRAVIS'].to_s == 'true' ? ENV['TRAVIS_JOB_NUMBER'] : 'unknown' + + 'ruby_%s_%s_%s' % [date, instance, name] +end + +def auto_retry(options = {}) + return if !block_given? + + max_retry = options[:max_retry] || 10 + retry_count = 0 + + loop do + begin + return yield + rescue => e + retry_count += 1 + if retry_count >= max_retry + raise e + else + sleep retry_count + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb new file mode 100644 index 0000000..9c92efa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +require 'webmock' + +describe 'With a rate limited client' do + + before(:each) do + WebMock.enable! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should pass the right headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.enable_rate_limit_forward ENV['ALGOLIA_API_KEY'], "1.2.3.4", "ratelimitapikey" + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + + it "should use original headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'] }). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 2 }", :headers => {}) + Algolia.disable_rate_limit_forward + index = Algolia::Index.new("friends") + index.search('bar')['fakeAttribute'].should == 2 + end + + it "should pass the right headers in the scope" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.with_rate_limits "1.2.3.4", "ratelimitapikey" do + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + end + + after(:each) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.gitignore b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.gitignore new file mode 100644 index 0000000..b04a8c8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.gitignore @@ -0,0 +1,11 @@ +/.bundle/ +/.yardoc +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ + +# rspec failure tracking +.rspec_status diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rspec b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rspec new file mode 100644 index 0000000..34c5164 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rspec @@ -0,0 +1,3 @@ +--format documentation +--color +--require spec_helper diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop.yml b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop.yml new file mode 100644 index 0000000..3ffe2b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop.yml @@ -0,0 +1,2 @@ +inherit_from: .rubocop_todo.yml + diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop_todo.yml b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop_todo.yml new file mode 100644 index 0000000..826a7a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.rubocop_todo.yml @@ -0,0 +1,32 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2018-02-02 08:32:23 -0800 using RuboCop version 0.52.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec +Gemspec/RequiredRubyVersion: + Exclude: + - 'atomos.gemspec' + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 14 + +# Offense count: 1 +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + - 'lib/atomos.rb' + +# Offense count: 7 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 97 diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.travis.yml new file mode 100644 index 0000000..6a8e36f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/.travis.yml @@ -0,0 +1,5 @@ +sudo: false +language: ruby +rvm: + - 2.5.0 +before_install: gem install bundler -v 1.16.1 diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..3399e24 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at segiddins@squareup.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile new file mode 100644 index 0000000..2d1d7e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Specify your gem's dependencies in atomos.gemspec +gemspec diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile.lock b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile.lock new file mode 100644 index 0000000..edfb2e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Gemfile.lock @@ -0,0 +1,51 @@ +PATH + remote: . + specs: + atomos (0.1.3) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.3.0) + diff-lcs (1.3) + parallel (1.12.1) + parser (2.4.0.2) + ast (~> 2.3) + powerpack (0.1.1) + rainbow (3.0.0) + rake (10.5.0) + rspec (3.7.0) + rspec-core (~> 3.7.0) + rspec-expectations (~> 3.7.0) + rspec-mocks (~> 3.7.0) + rspec-core (3.7.1) + rspec-support (~> 3.7.0) + rspec-expectations (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-mocks (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-support (3.7.0) + rubocop (0.52.1) + parallel (~> 1.10) + parser (>= 2.4.0.2, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 4.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.9.0) + unicode-display_width (1.3.0) + +PLATFORMS + ruby + +DEPENDENCIES + atomos! + bundler (~> 1.16) + rake (~> 10.0) + rspec (~> 3.0) + rubocop + +BUNDLED WITH + 1.16.3 diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/LICENSE.txt b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/LICENSE.txt new file mode 100644 index 0000000..7a54c62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Samuel Giddins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/README.md b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/README.md new file mode 100644 index 0000000..de832a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/README.md @@ -0,0 +1,43 @@ +# Atomos + +Welcome to your new gem! In this directory, you'll find the files you need to be able to package up your Ruby library into a gem. Put your Ruby code in the file `lib/atomos`. To experiment with that code, run `bin/console` for an interactive prompt. + +TODO: Delete this and the text above, and describe your gem + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'atomos' +``` + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install atomos + +## Usage + +TODO: Write usage instructions here + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/atomos. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. + +## License + +The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). + +## Code of Conduct + +Everyone interacting in the Atomos project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/[USERNAME]/atomos/blob/master/CODE_OF_CONDUCT.md). diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Rakefile b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Rakefile new file mode 100644 index 0000000..8ce173e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/Rakefile @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require 'bundler/gem_tasks' + +require 'rspec/core/rake_task' +require 'rubocop/rake_task' + +RSpec::Core::RakeTask.new +RuboCop::RakeTask.new + +task default: %i[rubocop spec] diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/VERSION b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/VERSION new file mode 100644 index 0000000..b1e80bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/VERSION @@ -0,0 +1 @@ +0.1.3 diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/atomos.gemspec b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/atomos.gemspec new file mode 100644 index 0000000..7ad4922 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/atomos.gemspec @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +Gem::Specification.new do |spec| + spec.name = 'atomos' + spec.version = File.read(File.expand_path('../VERSION', __FILE__)) + spec.authors = ['Samuel Giddins'] + spec.email = ['segiddins@segiddins.me'] + + spec.summary = 'A simple gem to atomically write files' + spec.homepage = 'https://github.com/segiddins/atomos' + spec.license = 'MIT' + + spec.files = `git ls-files -z`.split("\x0").reject do |f| + f.match(%r{^(test|spec|features)/}) + end + spec.bindir = 'exe' + spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } + spec.require_paths = ['lib'] + + spec.required_ruby_version = '>= 2.0' + + spec.add_development_dependency 'bundler', '~> 1.16' + spec.add_development_dependency 'rake', '~> 10.0' + spec.add_development_dependency 'rspec', '~> 3.0' + spec.add_development_dependency 'rubocop' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/console b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/console new file mode 100755 index 0000000..535613d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'bundler/setup' +require 'atomos' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require 'irb' +IRB.start(__FILE__) diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rake b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rake new file mode 100755 index 0000000..8226b57 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rake @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rake' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rake', 'rake') diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rspec b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rspec new file mode 100755 index 0000000..d086973 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rspec @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rspec' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rspec-core', 'rspec') diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rubocop b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rubocop new file mode 100755 index 0000000..8424d87 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/rubocop @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rubocop' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rubocop', 'rubocop') diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/setup b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/setup new file mode 100755 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos.rb b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos.rb new file mode 100644 index 0000000..4b56d05 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +require 'atomos/version' + +module Atomos + module_function + + # rubocop:disable Metrics/MethodLength + def atomic_write(dest, contents = nil, tmpdir: nil, &block) + unless contents.nil? ^ block.nil? + raise ArgumentError, 'must provide either contents or a block' + end + + tmpdir = Atomos.default_tmpdir_for_file(dest, tmpdir) + + require 'tempfile' + Tempfile.open(".atomos.#{File.basename(dest)}", tmpdir) do |tmpfile| + if contents + tmpfile << contents + else + retval = yield tmpfile + end + + tmpfile.close + + File.rename(tmpfile.path, dest) + + retval + end + end + # rubocop:enable Metrics/MethodLength + + def self.default_tmpdir_for_file(dest, tmpdir) + tmpdir ||= begin + require 'tmpdir' + Dir.tmpdir + end + + # Ensure the destination is on the same device as tmpdir + if File.stat(tmpdir).dev != File.stat(File.dirname(dest)).dev + # If not, use the directory of the destination as the tmpdir. + tmpdir = File.dirname(dest) + end + + tmpdir + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos/version.rb b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos/version.rb new file mode 100644 index 0000000..f52f703 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/atomos-0.1.3/lib/atomos/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Atomos + VERSION = File.read(File.expand_path('../../../VERSION', __FILE__)) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.gitignore b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.kick b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.kick new file mode 100644 index 0000000..0686cce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.kick @@ -0,0 +1,30 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bundle exec bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/[^/]*/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) +ignore(/^tmp/) + diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop.yml b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop.yml new file mode 100644 index 0000000..e3dc640 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_cocoapods.yml b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_cocoapods.yml new file mode 100644 index 0000000..2da3d10 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_cocoapods.yml @@ -0,0 +1,133 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_todo.yml b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_todo.yml new file mode 100644 index 0000000..e5a31c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.rubocop_todo.yml @@ -0,0 +1,70 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2016-03-09 18:40:14 -0600 using RuboCop version 0.38.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 3 +Lint/IneffectiveAccessModifier: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Lint/UnneededDisable: + Exclude: + - 'spec/command/banner_spec.rb' + +# Offense count: 1 +Performance/FixedSize: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Performance/StringReplacement: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: prefer_alias, prefer_alias_method +Style/Alias: + Exclude: + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: SingleLineConditionsOnly. +Style/ConditionalAssignment: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +Style/IfInsideElse: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 9 +# Cop supports --auto-correct. +Style/MutableConstant: + Exclude: + - 'lib/claide/ansi.rb' + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/claide/command/argument_suggester.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/UnneededInterpolation: + Exclude: + - 'lib/claide/command/argument_suggester.rb' diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.travis.yml new file mode 100644 index 0000000..562ab72 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.travis.yml @@ -0,0 +1,27 @@ +language: ruby + +dist: trusty + +addons: + code_climate: + repo_token: 46c8b29dd6711f35704e7c5a541486cbbf2cff8b2df8ce755bfc09917d3c1cbb +branches: + only: + - master + - /.+-stable$/ +rvm: + - 2.0.0-p647 + - 2.1.10 + - 2.2.9 + - 2.3.8 + - 2.4.5 + - 2.5.3 + - 2.6.2 +bundler_args: --without development +before_install: + # There is a bug in travis. When using system ruby, bundler is not + # installed and causes the default install action to fail. + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem install "bundler:~> 1.15.0"; else gem install "bundler:~> 1.15.0"; fi + # RubyGems 2.0.14 isn't a fun time on 2.0.0p648 + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem update --system; fi +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.yardopts b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.yardopts new file mode 100644 index 0000000..a647564 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/.yardopts @@ -0,0 +1 @@ +--markup markdown --protected --charset=utf-8 lib diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/CHANGELOG.md new file mode 100644 index 0000000..8a63e0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/CHANGELOG.md @@ -0,0 +1,254 @@ +# CLAide Changelog + +## 1.0.3 (2019-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly handle `--help` flags when using `argv.remainder!` after initialization + [Eric Amorde](https://github.com/amorde), + [tripleCC](https://github.com/tripleCC) + [#87](https://github.com/CocoaPods/CLAide/pull/87) + + +## 1.0.2 (2017-06-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Avoid a method redefinition warning when requiring `claide`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.1 (2016-10-10) + +##### Bug Fixes + +* Adds a fix for older versions of Rubygems when CLAide crashes. + [Samuel Giddins](https://github.com/segiddins) + [#73](https://github.com/CocoaPods/CLAide/issues/73) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix circular require of `claide/ansi` in `claide/ansi/string_escaper`. + [bootstraponline](https://github.com/bootstraponline) + [#66](https://github.com/CocoaPods/CLAide/issues/66) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Enhancements + +* Added `Command.option` to easily add a single option to a command class. + [Samuel Giddins](https://github.com/segiddins) + [#64](https://github.com/CocoaPods/CLAide/issues/64) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-03-08) + +##### Bug Fixes + +* Attempt to get the terminal width without shelling out to `tput`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* The plugin manager will now properly activate plugin gems, ensuring all of + their files are requirable. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.1 (2015-07-05) + +##### Bug Fixes + +* Fix a regression when contradictory flags were given in `ARGV` -- the last + flag given will once again be the value returned, and all entries for that key + are removed. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.0 (2015-07-02) + +##### Enhancements + +* Properly parse everything in `ARGV` after `--` as an argument. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/CLAide/issues/48) + +* Allow parsing an option that occurs multiple times. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.2 (2015-06-27) + +##### Enhancements + +* Add `ARGV#remainder!`, which returns all the remaining arguments, deleting + them from the receiver. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.1 (2015-02-25) + +###### Bug Fixes + +* Silence errors while loading plugins. + [Clément Beffa](https://github.com/cl3m) + [#44](https://github.com/CocoaPods/CLAide/issues/44) + + +## 0.8.0 (2014-12-25) + +###### Breaking + +* Removes the `ShellCompletionHelper` along with completion script for ZSH. This is out of the scope of CLAide. + [Eloy Durán](https://github.com/alloy) + [#43](https://github.com/CocoaPods/CLAide/issues/43) + +* Various refactoring replacing “Helper” API’s which specialised classes such as ArgumentSuggester, TextWrapper and PluginManager. + [Eloy Durán](https://github.com/alloy) + +###### Enhancements + +* Added convenience method to invoke commands more easily. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/40) + +* Changes to the PluginManager to handle multiple plugin prefixes, which by default adds the `clad` plugin prefix. + [Eloy Durán](https://github.com/alloy) + +## 0.7.0 (2014-09-11) + +###### Breaking + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +###### Enhancements + +* Improved messages for exceptions generated by plugins. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +* Use the Argument class to describe arguments. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* Support for argument alternatives and repeatable arguments (ellipsis). + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* No stack trace if --help and --vebose are combined. + [Marius Rackwitz](https://github.com/mrackwitz) + [#36](https://github.com/CocoaPods/CLAide/issues/36) + + +## 0.6.1 (2014-05-20) + +###### Bug Fixes + +* Respect the ANSI flag for the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + +* Underline the colon of the titles of the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.6.0 (2014-05-19) + +###### Enhancements + +* Use an array to describe arguments. + [Fabio Pelosin][fabiopelosin] + [#26](https://github.com/CocoaPods/CLAide/issues/26) + +* Improved layout and contents of help banner + [Fabio Pelosin](https://github.com/fabiopelosin) + [#25](https://github.com/CocoaPods/CLAide/pull/25) + +* Colorize option, arguments, and example commands in the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#12](https://github.com/CocoaPods/CLAide/issues/12) + +* Add support for ANSI escape sequences. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#17](https://github.com/CocoaPods/CLAide/issues/17) + [#20](https://github.com/CocoaPods/CLAide/pull/20) + [#24](https://github.com/CocoaPods/CLAide/pull/24) + +* Add support for completion script + [Fabio Pelosin](https://github.com/fabiopelosin) + [#19](https://github.com/CocoaPods/CLAide/pull/19) + +* Add support for version logic via the introduction of the `version` class + attribute to the `CLAide::Commmand` class. If a value for the attribute is + specified the `--version` flag is added. The `--version --verbose` flags + include the version of the plugins in the output. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#13](https://github.com/CocoaPods/CLAide/issues/13) + [#14](https://github.com/CocoaPods/CLAide/issues/14) + +## 0.5.0 (2014-03-26) + +###### Enhancements + +* Add a `ignore_in_command_lookup` option to commands, which makes it possible + to have anonymous command classes that are or only meant to provide common + functionality, but are otherwise completely ignored during parsing, command + lookup, and help banner printing. + [Eloy Durán](https://github.com/alloy) + +* Deprecate the `color` option in favor of `ansi`. This is more abstract and + can be used for commands that only prettify output by using, for instance, + the bold ANSI code. This applies to the `CLAide` APIs as well. + [Eloy Durán](https://github.com/alloy) + +* Add more hooks that allow the user to customize how to prettify output. + [Eloy Durán](https://github.com/alloy) + +* Word wrap option descriptions to terminal width. + [Eloy Durán](https://github.com/alloy) + [#6](https://github.com/CocoaPods/CLAide/issues/6) + + +## 0.4.0 (2013-11-14) + +###### Enhancements + +* Added support for plugins. + [Les Hill](https://github.com/leshill) + [#1](https://github.com/CocoaPods/CLAide/pull/1) diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile new file mode 100644 index 0000000..6cc443e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile @@ -0,0 +1,23 @@ +source 'https://rubygems.org' + +gemspec + +gem 'rake' + +group :development do + gem 'kicker' + gem 'colored' # for examples +end + +group :spec do + gem 'bacon' + gem 'json', '< 2' + gem 'mocha-on-bacon' + gem 'prettybacon' + + install_if RUBY_VERSION >= '1.9.3' do + gem 'rubocop' + gem 'codeclimate-test-reporter', :require => nil + gem 'simplecov' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile.lock b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile.lock new file mode 100644 index 0000000..e8a9c08 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Gemfile.lock @@ -0,0 +1,74 @@ +PATH + remote: . + specs: + claide (1.0.3) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.2.0) + bacon (1.2.0) + codeclimate-test-reporter (0.4.1) + simplecov (>= 0.7.1, < 1.0.0) + colored (1.2) + docile (1.1.5) + ffi (1.9.6) + json (1.8.6) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + multi_json (1.10.1) + notify (0.5.2) + parser (2.3.0.6) + ast (~> 2.2) + powerpack (0.1.1) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (2.1.0) + rake (10.3.2) + rb-fsevent (0.9.4) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.3) + ffi (>= 0.5.0) + rubocop (0.38.0) + parser (>= 2.3.0.6, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.7.5) + simplecov (0.9.1) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + unicode-display_width (1.0.1) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + claide! + codeclimate-test-reporter + colored + json (< 2) + kicker + mocha-on-bacon + prettybacon + rake + rubocop + simplecov + +BUNDLED WITH + 1.14.6 diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/LICENSE b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/README.markdown b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/README.markdown new file mode 100644 index 0000000..5ffd237 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/README.markdown @@ -0,0 +1,118 @@ +# Hi, I’m Claide, your command-line tool aide. + +[![Build Status](https://img.shields.io/travis/CocoaPods/CLAide/master.svg?style=flat)](https://travis-ci.org/CocoaPods/CLAide) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/CLAide.svg?style=flat)](https://codeclimate.com/github/CocoaPods/CLAide) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/CLAide.svg?style=flat)](https://codeclimate.com/github/CocoaPods/CLAide) + +I was born out of a need for a _simple_ option and command parser, while still +providing an API that allows you to quickly create a full featured command-line +interface. + +## Install + +``` +$ [sudo] gem install claide +``` + + +## Usage + +For full documentation, on the API of CLAide, visit [rubydoc.info][docs]. + + +### Argument handling + +At its core, a library, such as myself, needs to parse the parameters specified +by the user. + +Working with parameters is done through the `CLAide::ARGV` class. It takes an +array of parameters and parses them as either flags, options, or arguments. + +| Parameter | Description | +| :---: | :---: | +| `--milk`, `--no-milk` | A boolean ‘flag’, which may be negated. | +| `--sweetner=honey` | A ‘option’ consists of a key, a ‘=’, and a value. | +| `tea` | A ‘argument’ is just a value. | + + +Accessing flags, options, and arguments, with the following methods, will also +remove the parameter from the remaining unprocessed parameters. + +```ruby +argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) +argv.shift_argument # => 'tea' +argv.shift_argument # => nil +argv.flag?('milk') # => false +argv.flag?('milk') # => nil +argv.option('sweetner') # => 'honey' +argv.option('sweetner') # => nil +``` + + +In case the requested flag or option is not present, `nil` is returned. You can +specify a default value to be used as the optional second method parameter: + +```ruby +argv = CLAide::ARGV.new(['tea']) +argv.flag?('milk', true) # => true +argv.option('sweetner', 'sugar') # => 'sugar' +``` + + +Unlike flags and options, accessing all of the arguments can be done in either +a preserving or mutating way: + +```ruby +argv = CLAide::ARGV.new(['tea', 'coffee']) +argv.arguments # => ['tea', 'coffee'] +argv.arguments! # => ['tea', 'coffee'] +argv.arguments # => [] +``` + + +### Command handling + +Commands are actions that a tool can perform. Every command is represented by +its own command class. + +Commands may be nested, in which case they inherit from the ‘super command’ +class. Some of these nested commands may not actually perform any work +themselves, but are rather used as ‘super commands’ _only_, in which case they +are ‘abtract commands’. + +Running commands is typically done through the `CLAide::Command.run(argv)` +method, which performs the following three steps: + +1. Parses the given parameters, finds the command class matching the parameters, + and instantiates it with the remaining parameters. It’s each nested command + class’ responsibility to remove the parameters it handles from the remaining + parameters, _before_ calling the `super` implementation. + +2. Asks the command instance to validate its parameters, but only _after_ + calling the `super` implementation. The `super` implementation will show a + help banner in case the `--help` flag is specified, not all parameters were + removed from the parameter list, or the command is an abstract command. + +3. Calls the `run` method on the command instance, where it may do its work. + +4. Catches _any_ uncaught exception and shows it to user in a meaningful way. + * A `Help` exception triggers a help banner to be shown for the command. + * A exception that includes the `InformativeError` module will show _only_ + the message, unless disabled with the `--verbose` flag; and in red, + depending on the color configuration. + * Any other type of exception will be passed to `Command.report_error(error)` + for custom error reporting (such as the one in [CocoaPods][report-error]). + +In case you want to call commands from _inside_ other commands, you should use +the `CLAide::Command.parse(argv)` method to retrieve an instance of the command +and call `run` on it. Unless you are using user-supplied parameters, there +should not be a need to validate the parameters. + +See the [example][example] for a illustration of how to define commands. + + +[travis]: https://secure.travis-ci.org/CocoaPods/CLAide +[travis-status]: https://secure.travis-ci.org/CocoaPods/CLAide.png +[docs]: http://www.rubydoc.info/github/CocoaPods/CLAide/index +[example]: https://github.com/CocoaPods/CLAide/blob/master/examples/make.rb +[report-error]: https://github.com/CocoaPods/CocoaPods/blob/054fe5c861d932219ec40a91c0439a7cfc3a420c/lib/cocoapods/command.rb#L36 diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Rakefile b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Rakefile new file mode 100644 index 0000000..6aba28a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/Rakefile @@ -0,0 +1,57 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Run specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + #-- Rubocop ----------------------------------------------------------------# + + desc 'Check code against RuboCop rules' + task :rubocop do + sh 'bundle exec rubocop' + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/claide.gemspec b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/claide.gemspec new file mode 100644 index 0000000..f40343c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/claide.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +$:.unshift File.expand_path('../lib', __FILE__) +require 'claide' + +Gem::Specification.new do |s| + s.name = "claide" + s.version = CLAide::VERSION + s.license = "MIT" + s.email = ["eloy.de.enige@gmail.com", "fabiopelosin@gmail.com"] + s.homepage = "https://github.com/CocoaPods/CLAide" + s.authors = ["Eloy Duran", "Fabio Pelosin"] + + s.summary = "A small command-line interface framework." + + s.files = `git ls-files -z`.split("\0").reject { |f| f =~ /\A(spec|examples)/i } + + ## Make sure you can build the gem on older versions of RubyGems too: + s.rubygems_version = "1.6.2" + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.specification_version = 3 if s.respond_to? :specification_version +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide.rb new file mode 100644 index 0000000..fd56e7c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide.rb @@ -0,0 +1,19 @@ +# encoding: utf-8 + +# The mods of interest are {CLAide::ARGV}, {CLAide::Command}, and +# {CLAide::InformativeError} +# +module CLAide + # @return [String] + # + # CLAide’s version, following [semver](http://semver.org). + # + VERSION = '1.0.3'.freeze + + require 'claide/ansi' + require 'claide/argument' + require 'claide/argv' + require 'claide/command' + require 'claide/help' + require 'claide/informative_error' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi.rb new file mode 100644 index 0000000..0839ed6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi.rb @@ -0,0 +1,126 @@ +# encoding: utf-8 + +require 'claide/ansi/cursor' +require 'claide/ansi/graphics' + +module CLAide + # Provides support for ANSI Escape sequences + # + # For more information see: + # + # - http://ascii-table.com/ansi-escape-sequences.php + # - http://en.wikipedia.org/wiki/ANSI_escape_code + # + # This functionality has been inspired and derived from the following gems: + # + # - colored + # - colorize + # + class ANSI + extend Cursor + extend Graphics + + class << self + # @return [Bool] Wether the string mixin should be disabled to return the + # original string. This method is intended to offer a central location + # where to disable ANSI logic without needed to implement conditionals + # across the code base of clients. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # ANSI.disabled = true + # "example".ansi.yellow #=> "example" + # + attr_accessor :disabled + end + + # @return [Hash{Symbol => Fixnum}] The text attributes codes by their + # English name. + # + TEXT_ATTRIBUTES = { + :bold => 1, + :underline => 4, + :blink => 5, + :reverse => 7, + :hidden => 8, + } + + # @return [Hash{Symbol => Fixnum}] The codes to disable a text attribute by + # their name. + # + TEXT_DISABLE_ATTRIBUTES = { + :bold => 21, + :underline => 24, + :blink => 25, + :reverse => 27, + :hidden => 28, + } + + # Return [String] The escape sequence to reset the graphics. + # + RESET_SEQUENCE = "\e[0m" + + # @return [Hash{Symbol => Fixnum}] The colors codes by their English name. + # + COLORS = { + :black => 0, + :red => 1, + :green => 2, + :yellow => 3, + :blue => 4, + :magenta => 5, + :cyan => 6, + :white => 7, + } + + # Return [String] The escape sequence for the default foreground color. + # + DEFAULT_FOREGROUND_COLOR = "\e[39m" + + # Return [String] The escape sequence for the default background color. + # + DEFAULT_BACKGROUND_COLOR = "\e[49m" + + # @return [Fixnum] The code of a key given the map. + # + # @param [Symbol] key + # The key for which the code is needed. + # + # @param [Hash{Symbol => Fixnum}] map + # A hash which associates each code to each key. + # + # @raise If the key is not provided. + # @raise If the key is not present in the map. + # + def self.code_for_key(key, map) + unless key + raise ArgumentError, 'A key must be provided' + end + code = map[key] + unless code + raise ArgumentError, "Unsupported key: `#{key}`" + end + code + end + end +end + +#-- String mixin -------------------------------------------------------------# + +require 'claide/ansi/string_escaper' + +class String + # @return [StringEscaper] An object which provides convenience methods to + # wrap the receiver in ANSI sequences. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # "example".ansi.on_red #=> "\e[41mexample\e[49m" + # "example".ansi.bold #=> "\e[1mexample\e[21m" + # + def ansi + CLAide::ANSI::StringEscaper.new(self) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb new file mode 100644 index 0000000..acfd5b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb @@ -0,0 +1,69 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the position + # of the cursor and to erase parts of text. + # + module Cursor + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] line + # The line where to place the cursor. + # + # @param [Fixnum] column + # The column where to place the cursor. + # + def self.set_cursor_position(line = 0, column = 0) + "\e[#{line};#{column}H" + end + + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] lines + # The amount of lines the cursor should be moved to. + # Negative values indicate up direction and positive ones + # down direction. + # + # @param [Fixnum] columns + # The amount of columns the cursor should be moved to. + # Negative values indicate left direction and positive ones + # right direction. + # + def self.move_cursor(lines, columns = 0) + lines_code = lines < 0 ? 'A' : 'B' + columns_code = columns > 0 ? 'C' : 'D' + "\e[#{lines.abs}#{lines_code};#{columns.abs}#{columns_code}" + end + + # @return [String] The escape sequence to save the cursor position. + # + def self.save_cursor_position + "\e[s" + end + + # @return [String] The escape sequence to restore the cursor to the + # previously saved position. This sequence also clears all the + # output after the position. + # + def self.restore_cursor_position + "\e[u" + end + + # @return [String] The escape sequence to erase the display. + # + def self.erase_display + "\e[2J" + end + + # @return [String] The escape sequence to erase a line form the + # cursor position to then end. + # + def self.erase_line + "\e[K" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb new file mode 100644 index 0000000..e5c2d15 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb @@ -0,0 +1,72 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the graphic + # mode. + # + module Graphics + # @return [String] The escape sequence for a text attribute. + # + # @param [Symbol] key + # The name of the text attribute. + # + def self.text_attribute(key) + code = ANSI.code_for_key(key, TEXT_ATTRIBUTES) + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color. + # + # @param [Symbol] key + # The name of the color. + # + def self.foreground_color(key) + code = ANSI.code_for_key(key, COLORS) + 30 + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color. + # + # @param [Symbol] key + # The name of the color. + # + def self.background_color(key) + code = ANSI.code_for_key(key, COLORS) + 40 + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.foreground_color_256(color) + code = [38, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.background_color_256(color) + code = [48, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a single or a list of codes. + # + # @param [Fixnum, Array] codes + # The code(s). + # + def self.graphics_mode(codes) + codes = Array(codes) + "\e[#{codes.join(';')}m" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb new file mode 100644 index 0000000..b6f461c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb @@ -0,0 +1,79 @@ +module CLAide + class ANSI + # Provides support to wrap strings in ANSI sequences according to the + # `ANSI.disabled` setting. + # + class StringEscaper < String + # @param [String] string The string to wrap. + # + def initialize(string) + super + end + + # @return [StringEscaper] Wraps a string in the given ANSI sequences, + # taking care of handling existing sequences for the same + # family of attributes (i.e. attributes terminated by the + # same sequence). + # + def wrap_in_ansi_sequence(open, close) + if ANSI.disabled + self + else + gsub!(close, open) + insert(0, open).insert(-1, close) + end + end + + # @return [StringEscaper] + # + # @param [Array] keys + # One or more keys corresponding to ANSI codes to apply to the + # string. + # + def apply(*keys) + keys.flatten.each do |key| + send(key) + end + self + end + + ANSI::COLORS.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each foreground color (e.g. #blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.foreground_color(key) + close = ANSI::DEFAULT_FOREGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each background color (e.g. #on_blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method "on_#{key}" do + open = Graphics.background_color(key) + close = ANSI::DEFAULT_BACKGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + end + + ANSI::TEXT_ATTRIBUTES.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each text attribute (e.g. #bold). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.text_attribute(key) + close_code = TEXT_DISABLE_ATTRIBUTES[key] + close = Graphics.graphics_mode(close_code) + wrap_in_ansi_sequence(open, close) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argument.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argument.rb new file mode 100644 index 0000000..4d54f29 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argument.rb @@ -0,0 +1,62 @@ +# encoding: utf-8 + +module CLAide + # This class is used to represent individual arguments to present to + # the command help banner + # + class Argument + # The string used for ellipsis / repeatable arguments in the banner + # + ELLIPSIS = '...' + + # @return [Array] + # List of alternate names for the parameters + attr_reader :names + + # @return [Boolean] + # Indicates if the argument is required (not optional) + # + attr_accessor :required + alias_method :required?, :required + + # @return [Boolean] + # Indicates if the argument is repeatable (= can appear multiple + # times in the command, which is indicated by '...' in the banner) + # + attr_accessor :repeatable + alias_method :repeatable?, :repeatable + + # @param [String,Array] names + # List of the names of each parameter alternatives. + # For convenience, if there is only one alternative for that + # parameter, we can use a String instead of a 1-item Array + # + # @param [Boolean] required + # true if the parameter is required, false if it is optional + # + # @param [Boolean] repeatable + # If true, the argument can appear multiple times in the command. + # In that case, an ellipsis will be appended after the argument + # in the help banner. + # + # @example + # + # # A required parameter that can be either a NAME or URL + # Argument.new(%(NAME URL), true) + # + def initialize(names, required, repeatable = false) + @names = Array(names) + @required = required + @repeatable = repeatable + end + + # @return [Boolean] true on equality + # + # @param [Argument] other the Argument compared against + # + def ==(other) + other.is_a?(Argument) && + names == other.names && required == other.required + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argv.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argv.rb new file mode 100644 index 0000000..35374f0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/argv.rb @@ -0,0 +1,329 @@ +# encoding: utf-8 + +module CLAide + # This class is responsible for parsing the parameters specified by the user, + # accessing individual parameters, and keep state by removing handled + # parameters. + # + class ARGV + # @return [ARGV] Coerces an object to the ARGV class if needed. + # + # @param [Object] argv + # The object which should be converted to the ARGV class. + # + def self.coerce(argv) + if argv.is_a?(ARGV) + argv + else + ARGV.new(argv) + end + end + + # @param [Array<#to_s>] argv + # A list of parameters. + # + def initialize(argv) + @entries = Parser.parse(argv) + end + + # @return [Boolean] Whether or not there are any remaining unhandled + # parameters. + # + def empty? + @entries.empty? + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format a user specifies it in. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder # => ['--no-milk', '--sweetner=honey'] + # + def remainder + @entries.map do |type, (key, value)| + case type + when :arg + key + when :flag + "--#{'no-' if value == false}#{key}" + when :option + "--#{key}=#{value}" + end + end + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format the user specified them. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder! # => ['--no-milk', '--sweetner=honey'] + # argv.remainder # => [] + # + def remainder! + remainder.tap { @entries.clear } + end + + # @return [Hash] A hash that consists of the remaining flags and options + # and their values. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.options # => { 'milk' => false, 'sweetner' => 'honey' } + # + def options + options = {} + @entries.each do |type, (key, value)| + options[key] = value unless type == :arg + end + options + end + + # @return [Array] A list of the remaining arguments. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white', 'biscuit'] + # + def arguments + @entries.map { |type, value| value if type == :arg }.compact + end + + # @return [Array] A list of the remaining arguments. + # + # @note This version also removes the arguments from the remaining + # parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.arguments # => ['tea', 'white', 'biscuit'] + # argv.arguments! # => ['tea', 'white', 'biscuit'] + # argv.arguments # => [] + # + def arguments! + arguments = [] + while arg = shift_argument + arguments << arg + end + arguments + end + + # @return [String] The first argument in the remaining parameters. + # + # @note This will remove the argument from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white'] + # + def shift_argument + if index = @entries.find_index { |type, _| type == :arg } + entry = @entries[index] + @entries.delete_at(index) + entry.last + end + end + + # @return [Boolean, nil] Returns `true` if the flag by the specified `name` + # is among the remaining parameters and is not negated. + # + # @param [String] name + # The name of the flag to look for among the remaining parameters. + # + # @param [Boolean] default + # The value that is returned in case the flag is not among the + # remaining parameters. + # + # @note This will remove the flag from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.flag?('milk') # => false + # argv.flag?('milk') # => nil + # argv.flag?('milk', true) # => true + # argv.remainder # => ['tea', '--sweetner=honey'] + # + def flag?(name, default = nil) + delete_entry(:flag, name, default, true) + end + + # @return [String, nil] Returns the value of the option by the specified + # `name` is among the remaining parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @param [String] default + # The value that is returned in case the option is not among the + # remaining parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.option('sweetner') # => 'honey' + # argv.option('sweetner') # => nil + # argv.option('sweetner', 'sugar') # => 'sugar' + # argv.remainder # => ['tea', '--no-milk'] + # + def option(name, default = nil) + delete_entry(:option, name, default) + end + + # @return [Array] Returns an array of all the values of the option + # with the specified `name` among the remaining + # parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['--ignore=foo', '--ignore=bar']) + # argv.all_options('include') # => [] + # argv.all_options('ignore') # => ['bar', 'foo'] + # argv.remainder # => [] + # + def all_options(name) + options = [] + while entry = option(name) + options << entry + end + options + end + + private + + # @return [Array>] A list of tuples for each + # non consumed parameter, where the first entry is the `type` and + # the second entry the actual parsed parameter. + # + attr_reader :entries + + # @return [Bool, String, Nil] Removes an entry from the entries list and + # returns its value or the default value if the entry was not + # present. + # + # @param [Symbol] requested_type + # The type of the entry. + # + # @param [String] requested_key + # The key of the entry. + # + # @param [Bool, String, Nil] default + # The value which should be returned if the entry is not present. + # + # @param [Bool] delete_all + # Whether all values matching `requested_type` and `requested_key` + # should be deleted. + # + def delete_entry(requested_type, requested_key, default, delete_all = false) + pred = proc do |type, (key, _value)| + requested_key == key && requested_type == type + end + entry = entries.reverse_each.find(&pred) + delete_all ? entries.delete_if(&pred) : entries.delete(entry) + + entry.nil? ? default : entry.last.last + end + + module Parser + # @return [Array>] A list of tuples for each + # parameter, where the first entry is the `type` and the second + # entry the actual parsed parameter. + # + # @example + # + # list = parse(['tea', '--no-milk', '--sweetner=honey']) + # list # => [[:arg, "tea"], + # [:flag, ["milk", false]], + # [:option, ["sweetner", "honey"]]] + # + def self.parse(argv) + entries = [] + copy = argv.map(&:to_s) + double_dash = false + while argument = copy.shift + next if !double_dash && double_dash = (argument == '--') + type = double_dash ? :arg : argument_type(argument) + parsed_argument = parse_argument(type, argument) + entries << [type, parsed_argument] + end + entries + end + + # @return [Symbol] Returns the type of an argument. The types can be + # either: `:arg`, `:flag`, `:option`. + # + # @param [String] argument + # The argument to check. + # + def self.argument_type(argument) + if argument.start_with?('--') + if argument.include?('=') + :option + else + :flag + end + else + :arg + end + end + + # @return [String, Array] Returns the argument itself for + # normal arguments (like commands) and a tuple with the key and + # the value for options and flags. + # + # @param [Symbol] type + # The type of the argument. + # + # @param [String] argument + # The argument to check. + # + def self.parse_argument(type, argument) + case type + when :arg + return argument + when :flag + return parse_flag(argument) + when :option + return argument[2..-1].split('=', 2) + end + end + + # @return [String, Array] Returns the parameter + # describing a flag arguments. + # + # @param [String] argument + # The flag argument to check. + # + def self.parse_flag(argument) + if argument.start_with?('--no-') + key = argument[5..-1] + value = false + else + key = argument[2..-1] + value = true + end + [key, value] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command.rb new file mode 100644 index 0000000..6414b5c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command.rb @@ -0,0 +1,669 @@ +# encoding: utf-8 + +require 'claide/command/banner' +require 'claide/command/plugin_manager' +require 'claide/command/argument_suggester' + +module CLAide + # This class is used to build a command-line interface + # + # Each command is represented by a subclass of this class, which may be + # nested to create more granular commands. + # + # Following is an overview of the types of commands and what they should do. + # + # ### Any command type + # + # * Inherit from the command class under which the command should be nested. + # * Set {Command.summary} to a brief description of the command. + # * Override {Command.options} to return the options it handles and their + # descriptions and prepending them to the results of calling `super`. + # * Override {Command#initialize} if it handles any parameters. + # * Override {Command#validate!} to check if the required parameters the + # command handles are valid, or call {Command#help!} in case they’re not. + # + # ### Abstract command + # + # The following is needed for an abstract command: + # + # * Set {Command.abstract_command} to `true`. + # * Subclass the command. + # + # When the optional {Command.description} is specified, it will be shown at + # the top of the command’s help banner. + # + # ### Normal command + # + # The following is needed for a normal command: + # + # * Set {Command.arguments} to the description of the arguments this command + # handles. + # * Override {Command#run} to perform the actual work. + # + # When the optional {Command.description} is specified, it will be shown + # underneath the usage section of the command’s help banner. Otherwise this + # defaults to {Command.summary}. + # + class Command + class << self + # @return [Boolean] Indicates whether or not this command can actually + # perform work of itself, or that it only contains subcommands. + # + attr_accessor :abstract_command + alias_method :abstract_command?, :abstract_command + + # @return [Boolean] Indicates whether or not this command is used during + # command parsing and whether or not it should be shown in the + # help banner or to show its subcommands instead. + # + # Setting this to `true` implies it’s an abstract command. + # + attr_reader :ignore_in_command_lookup + alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup + def ignore_in_command_lookup=(flag) + @ignore_in_command_lookup = self.abstract_command = flag + end + + # @return [String] The subcommand which an abstract command should invoke + # by default. + # + attr_accessor :default_subcommand + + # @return [String] A brief description of the command, which is shown + # next to the command in the help banner of a parent command. + # + attr_accessor :summary + + # @return [String] A longer description of the command, which is shown + # underneath the usage section of the command’s help banner. Any + # indentation in this value will be ignored. + # + attr_accessor :description + + # @return [Array] The prefixes used to search for CLAide plugins. + # Plugins are loaded via their `_plugin.rb` file. + # Defaults to search for `claide` plugins. + # + def plugin_prefixes + @plugin_prefixes ||= ['claide'] + end + attr_writer :plugin_prefixes + + # @return [Array] + # A list of arguments the command handles. This is shown + # in the usage section of the command’s help banner. + # Each Argument in the array represents an argument by its name + # (or list of alternatives) and whether it's required or optional + # + def arguments + @arguments ||= [] + end + + # @param [Array] arguments + # An array listing the command arguments. + # Each Argument object describe the argument by its name + # (or list of alternatives) and whether it's required or optional + # + # @todo Remove deprecation + # + def arguments=(arguments) + if arguments.is_a?(Array) + if arguments.empty? || arguments[0].is_a?(Argument) + @arguments = arguments + else + self.arguments_array = arguments + end + else + self.arguments_string = arguments + end + end + + # @return [Boolean] The default value for {Command#ansi_output}. This + # defaults to `true` if `STDOUT` is connected to a TTY and + # `String` has the instance methods `#red`, `#green`, and + # `#yellow` (which are defined by, for instance, the + # [colored](https://github.com/defunkt/colored) gem). + # + def ansi_output + if @ansi_output.nil? + @ansi_output = STDOUT.tty? + end + @ansi_output + end + attr_writer :ansi_output + alias_method :ansi_output?, :ansi_output + + # @return [String] The name of the command. Defaults to a snake-cased + # version of the class’ name. + # + def command + @command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part| + part.downcase << '-' + end[0..-2] + end + attr_writer :command + + # @return [String] The version of the command. This value will be printed + # by the `--version` flag if used for the root command. + # + attr_accessor :version + end + + #-------------------------------------------------------------------------# + + # @return [String] The full command up-to this command, as it would be + # looked up during parsing. + # + # @note (see #ignore_in_command_lookup) + # + # @example + # + # BevarageMaker::Tea.full_command # => "beverage-maker tea" + # + def self.full_command + if superclass == Command + ignore_in_command_lookup? ? '' : command + else + if ignore_in_command_lookup? + superclass.full_command + else + "#{superclass.full_command} #{command}" + end + end + end + + # @return [Bool] Whether this is the root command class + # + def self.root_command? + superclass == CLAide::Command + end + + # @return [Array] A list of all command classes that are nested + # under this command. + # + def self.subcommands + @subcommands ||= [] + end + + # @return [Array] A list of command classes that are nested under + # this command _or_ the subcommands of those command classes in + # case the command class should be ignored in command lookup. + # + def self.subcommands_for_command_lookup + subcommands.map do |subcommand| + if subcommand.ignore_in_command_lookup? + subcommand.subcommands_for_command_lookup + else + subcommand + end + end.flatten + end + + # Searches the list of subcommands that should not be ignored for command + # lookup for a subcommand with the given `name`. + # + # @param [String] name + # The name of the subcommand to be found. + # + # @return [CLAide::Command, nil] The subcommand, if found. + # + def self.find_subcommand(name) + subcommands_for_command_lookup.find { |sc| sc.command == name } + end + + # @visibility private + # + # Automatically registers a subclass as a subcommand. + # + def self.inherited(subcommand) + subcommands << subcommand + end + + DEFAULT_ROOT_OPTIONS = [ + ['--version', 'Show the version of the tool'], + ] + + DEFAULT_OPTIONS = [ + ['--verbose', 'Show more debugging information'], + ['--no-ansi', 'Show output without ANSI codes'], + ['--help', 'Show help banner of specified command'], + ] + + # Should be overridden by a subclass if it handles any options. + # + # The subclass has to combine the result of calling `super` and its own + # list of options. The recommended way of doing this is by concatenating + # to this classes’ own options. + # + # @return [Array] + # + # A list of option name and description tuples. + # + # @example + # + # def self.options + # [ + # ['--verbose', 'Print more info'], + # ['--help', 'Print help banner'], + # ].concat(super) + # end + # + def self.options + if root_command? + DEFAULT_ROOT_OPTIONS + DEFAULT_OPTIONS + else + DEFAULT_OPTIONS + end + end + + # Adds a new option for the current command. + # + # This method can be used in conjunction with overriding `options`. + # + # @return [void] + # + # @example + # + # option '--help', 'Print help banner ' + # + def self.option(name, description) + mod = Module.new do + define_method(:options) do + [ + [name, description], + ].concat(super()) + end + end + extend(mod) + end + private_class_method :option + + # Handles root commands options if appropriate. + # + # @param [ARGV] argv + # The parameters of the command. + # + # @return [Bool] Whether any root command option was handled. + # + def handle_root_options(argv) + return false unless self.class.root_command? + if argv.flag?('version') + print_version + return true + end + false + end + + # Prints the version of the command optionally including plugins. + # + def print_version + puts self.class.version + if verbose? + PluginManager.specifications.each do |spec| + puts "#{spec.name}: #{spec.version}" + end + end + end + + # Instantiates the command class matching the parameters through + # {Command.parse}, validates it through {Command#validate!}, and runs it + # through {Command#run}. + # + # @note The ANSI support is configured before running a command to allow + # the same process to run multiple commands with different + # settings. For example a process with ANSI output enabled might + # want to programmatically invoke another command with the output + # enabled. + # + # @param [Array, ARGV] argv + # A list of parameters. For instance, the standard `ARGV` constant, + # which contains the parameters passed to the program. + # + # @return [void] + # + def self.run(argv = []) + plugin_prefixes.each do |plugin_prefix| + PluginManager.load_plugins(plugin_prefix) + end + + argv = ARGV.coerce(argv) + command = parse(argv) + ANSI.disabled = !command.ansi_output? + unless command.handle_root_options(argv) + command.validate! + command.run + end + rescue Object => exception + handle_exception(command, exception) + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] An instance of the command class that was matched by + # going through the arguments in the parameters and drilling down + # command classes. + # + def self.parse(argv) + argv = ARGV.coerce(argv) + cmd = argv.arguments.first + if cmd && subcommand = find_subcommand(cmd) + argv.shift_argument + subcommand.parse(argv) + elsif abstract_command? && default_subcommand + load_default_subcommand(argv) + else + new(argv) + end + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] Returns the default subcommand initialized with the + # given arguments. + # + def self.load_default_subcommand(argv) + unless subcommand = find_subcommand(default_subcommand) + raise 'Unable to find the default subcommand ' \ + "`#{default_subcommand}` for command `#{self}`." + end + result = subcommand.parse(argv) + result.invoked_as_default = true + result + end + + # Presents an exception to the user in a short manner in case of an + # `InformativeError` or in long form in other cases, + # + # @param [Command, nil] command + # The command from where the exception originated. + # + # @param [Object] exception + # The exception to present. + # + # @return [void] + # + def self.handle_exception(command, exception) + if exception.is_a?(InformativeError) + puts exception.message + if command.nil? || command.verbose? + puts + puts(*exception.backtrace) + end + exit exception.exit_status + else + report_error(exception) + end + end + + # Allows the application to perform custom error reporting, by overriding + # this method. + # + # @param [Exception] exception + # + # An exception that occurred while running a command through + # {Command.run}. + # + # @raise + # + # By default re-raises the specified exception. + # + # @return [void] + # + def self.report_error(exception) + plugins = PluginManager.plugins_involved_in_exception(exception) + unless plugins.empty? + puts '[!] The exception involves the following plugins:' \ + "\n - #{plugins.join("\n - ")}\n".ansi.yellow + end + raise exception + end + + # @visibility private + # + # @param [String] error_message + # The error message to show to the user. + # + # @param [Class] help_class + # The class to use to raise a ‘help’ error. + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def self.help!(error_message = nil, help_class = Help) + raise help_class.new(banner, error_message) + end + + # @visibility private + # + # Returns the banner for the command. + # + # @param [Class] banner_class + # The class to use to format help banners. + # + # @return [String] The banner for the command. + # + def self.banner(banner_class = Banner) + banner_class.new(self).formatted_banner + end + + # @visibility private + # + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def self.banner! + puts banner + exit 0 + end + + #-------------------------------------------------------------------------# + + # Set to `true` if the user specifies the `--verbose` option. + # + # @note + # + # If you want to make use of this value for your own configuration, you + # should check the value _after_ calling the `super` {Command#initialize} + # implementation. + # + # @return [Boolean] + # + # Wether or not backtraces should be included when presenting the user an + # exception that includes the {InformativeError} module. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # Set to `true` if {Command.ansi_output} returns `true` and the user + # did **not** specify the `--no-ansi` option. + # + # @note (see #verbose) + # + # @return [Boolean] + # + # Whether or not to use ANSI codes to prettify output. For instance, by + # default {InformativeError} exception messages will be colored red and + # subcommands in help banners green. + # + attr_accessor :ansi_output + alias_method :ansi_output?, :ansi_output + + # Set to `true` if initialized with a `--help` flag + # + # @return [Boolean] + # + # Whether the command was initialized with argv containing --help + # + attr_accessor :help_arg + alias_method :help?, :help_arg + + # Subclasses should override this method to remove the arguments/options + # they support from `argv` _before_ calling `super`. + # + # The `super` implementation sets the {#verbose} attribute based on whether + # or not the `--verbose` option is specified; and the {#ansi_output} + # attribute to `false` if {Command.ansi_output} returns `true`, but the + # user specified the `--no-ansi` option. + # + # @param [ARGV, Array] argv + # + # A list of (user-supplied) params that should be handled. + # + def initialize(argv) + argv = ARGV.coerce(argv) + @verbose = argv.flag?('verbose') + @ansi_output = argv.flag?('ansi', Command.ansi_output?) + @argv = argv + @help_arg = argv.flag?('help') + end + + # Convenience method. + # Instantiate the command and run it with the provided arguments at once. + # + # @note This method validate! the command before running it, but contrary to + # CLAide::Command::run, it does not load plugins nor exit on failure. + # It is up to the caller to rescue any possible exception raised. + # + # @param [String..., Array] args + # The arguments to initialize the command with + # + # @raise [Help] If validate! fails + # + def self.invoke(*args) + command = new(ARGV.new(args.flatten)) + command.validate! + command.run + end + + # @return [Bool] Whether the command was invoked by an abstract command by + # default. + # + attr_accessor :invoked_as_default + alias_method :invoked_as_default?, :invoked_as_default + + # Raises a Help exception if the `--help` option is specified, if `argv` + # still contains remaining arguments/options by the time it reaches this + # implementation, or when called on an ‘abstract command’. + # + # Subclasses should call `super` _before_ doing their own validation. This + # way when the user specifies the `--help` flag a help banner is shown, + # instead of possible actual validation errors. + # + # @raise [Help] + # + # @return [void] + # + def validate! + banner! if help? + unless @argv.empty? + argument = @argv.remainder.first + help! ArgumentSuggester.new(argument, self.class).suggestion + end + help! if self.class.abstract_command? + end + + # This method should be overridden by the command class to perform its + # work. + # + # @return [void] + # + def run + raise 'A subclass should override the `CLAide::Command#run` method to ' \ + 'actually perform some work.' + end + + protected + + # Returns the class of the invoked command + # + # @return [Command] + # + def invoked_command_class + if invoked_as_default? + self.class.superclass + else + self.class + end + end + + # @param [String] error_message + # A custom optional error message + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def help!(error_message = nil) + invoked_command_class.help!(error_message) + end + + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def banner! + invoked_command_class.banner! + end + + #-------------------------------------------------------------------------# + + # Handle deprecated form of self.arguments as an + # Array> like in: + # + # self.arguments = [ ['NAME', :required], ['QUERY', :optional] ] + # + # @todo Remove deprecated format support + # + def self.arguments_array=(arguments) + warn '[!] The signature of CLAide#arguments has changed. ' \ + "Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow + @arguments = arguments.map do |(name_str, type)| + names = name_str.split('|') + required = (type == :required) + Argument.new(names, required) + end + end + + # Handle deprecated form of self.arguments as a String, like in: + # + # self.arguments = 'NAME [QUERY]' + # + # @todo Remove deprecated format support + # + def self.arguments_string=(arguments) + warn '[!] The specification of arguments as a string has been' \ + " deprecated #{self}: `#{arguments}`".ansi.yellow + @arguments = arguments.split(' ').map do |argument| + if argument.start_with?('[') + Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false) + else + Argument.new(argument.split('|'), true) + end + end + end + + # Handle depracted form of assigning a plugin prefix. + # + # @todo Remove deprecated form. + # + def self.plugin_prefix=(prefix) + warn '[!] The specification of a singular plugin prefix has been ' \ + "deprecated. Use `#{self}::plugin_prefixes` instead." + plugin_prefixes << prefix + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb new file mode 100644 index 0000000..a13575c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb @@ -0,0 +1,99 @@ +# encoding: utf-8 + +module CLAide + class Command + class ArgumentSuggester + # @param [String] argument + # The unrecognized argument for which to make a suggestion. + # + # @param [Class] command_class + # The class of the command which encountered the unrecognized + # arguments. + # + def initialize(argument, command_class) + @argument, @command_class = argument, command_class + @argument_type = ARGV::Parser.argument_type(@argument) + end + + # @return [Array] The list of the valid arguments for a command + # according to the type of the argument. + # + def possibilities + case @argument_type + when :option, :flag + @command_class.options.map(&:first) + when :arg + @command_class.subcommands_for_command_lookup.map(&:command) + end + end + + # @return [String] Returns a suggested argument from `possibilities` based + # on the `levenshtein_distance` score. + # + def suggested_argument + possibilities.sort_by do |element| + self.class.levenshtein_distance(@argument, element) + end.first + end + + # @return [String] Returns a message including a suggestion for the given + # suggestion. + # + def suggestion + argument_description = @argument_type == :arg ? 'command' : 'option' + if suggestion = suggested_argument + pretty_suggestion = self.class.prettify_suggestion(suggestion, + @argument_type) + "Unknown #{argument_description}: `#{@argument}`\n" \ + "Did you mean: #{pretty_suggestion}?" + else + "Unknown #{argument_description}: `#{@argument}`" + end + end + + # Prettifies the given validation suggestion according to the type. + # + # @param [String] suggestion + # The suggestion to prettify. + # + # @param [Type] argument_type + # The type of the suggestion: either `:command` or `:option`. + # + # @return [String] A handsome suggestion. + # + def self.prettify_suggestion(suggestion, argument_type) + case argument_type + when :option, :flag + suggestion = suggestion.to_s + suggestion.ansi.blue + when :arg + suggestion.ansi.green + end + end + + # Returns the Levenshtein distance between the given strings. + # From: http://rosettacode.org/wiki/Levenshtein_distance#Ruby + # + # @param [String] a + # The first string to compare. + # + # @param [String] b + # The second string to compare. + # + # @return [Fixnum] The distance between the strings. + def self.levenshtein_distance(a, b) + a, b = a.downcase, b.downcase + costs = Array(0..b.length) + (1..a.length).each do |i| + costs[0], nw = i, i - 1 + (1..b.length).each do |j| + costs[j], nw = [ + costs[j] + 1, costs[j - 1] + 1, a[i - 1] == b[j - 1] ? nw : nw + 1 + ].min, costs[j] + end + end + costs[b.length] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/banner.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/banner.rb new file mode 100644 index 0000000..d87c699 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/banner.rb @@ -0,0 +1,307 @@ +# encoding: utf-8 + +module CLAide + class Command + # Creates the formatted banner to present as help of the provided command + # class. + # + class Banner + # @return [Class] The command for which the banner should be created. + # + attr_accessor :command + + # @param [Class] command @see command + # + def initialize(command) + @command = command + end + + # @return [String] The banner for the command. + # + def formatted_banner + sections = [ + ['Usage', formatted_usage_description], + ['Commands', formatted_subcommand_summaries], + ['Options', formatted_options_description], + ] + banner = sections.map do |(title, body)| + [prettify_title("#{title}:"), body] unless body.empty? + end.compact.join("\n\n") + banner + end + + private + + # @!group Banner sections + #-----------------------------------------------------------------------# + + # @return [String] The indentation of the text. + # + TEXT_INDENT = 6 + + # @return [Fixnum] The maximum width of the text. + # + MAX_WIDTH = TEXT_INDENT + 80 + + # @return [Fixnum] The minimum between a name and its description. + # + DESCRIPTION_SPACES = 3 + + # @return [Fixnum] The minimum between a name and its description. + # + SUBCOMMAND_BULLET_SIZE = 2 + + # @return [String] The section describing the usage of the command. + # + def formatted_usage_description + message = command.description || command.summary || '' + message = TextWrapper.wrap_formatted_text(message, + TEXT_INDENT, + MAX_WIDTH) + message = prettify_message(command, message) + "#{signature}\n\n#{message}" + end + + # @return [String] The signature of the command. + # + def signature + full_command = command.full_command + sub_command = signature_sub_command + arguments = signature_arguments + result = prettify_signature(full_command, sub_command, arguments) + result.insert(0, '$ ') + result.insert(0, ' ' * (TEXT_INDENT - '$ '.size)) + end + + # @return [String] The subcommand indicator of the signature. + # + def signature_sub_command + return '[COMMAND]' if command.default_subcommand + return 'COMMAND' if command.subcommands.any? + end + + # @return [String] The arguments of the signature. + # + def signature_arguments + command.arguments.map do |arg| + names = arg.names.join('|') + names.concat(' ' + Argument::ELLIPSIS) if arg.repeatable? + arg.required? ? names : "[#{names}]" + end.join(' ') + end + + # @return [String] The section describing the subcommands of the command. + # + # @note The plus sign emphasizes the that the subcommands are added to + # the command. The square brackets conveys a sense of direction + # and indicates the gravitational force towards the default + # command. + # + def formatted_subcommand_summaries + subcommands = subcommands_for_banner + subcommands.map do |subcommand| + name = subcommand.command + bullet = (name == command.default_subcommand) ? '>' : '+' + name = "#{bullet} #{name}" + pretty_name = prettify_subcommand(name) + entry_description(pretty_name, subcommand.summary, name.size) + end.join("\n") + end + + # @return [String] The section describing the options of the command. + # + def formatted_options_description + options = command.options + options.map do |name, description| + pretty_name = prettify_option_name(name) + entry_description(pretty_name, description, name.size) + end.join("\n") + end + + # @return [String] The line describing a single entry (subcommand or + # option). + # + def entry_description(name, description, name_width) + max_name_width = compute_max_name_width + desc_start = max_name_width + (TEXT_INDENT - 2) + DESCRIPTION_SPACES + result = ' ' * (TEXT_INDENT - 2) + result << name + result << ' ' * DESCRIPTION_SPACES + result << ' ' * (max_name_width - name_width) + result << TextWrapper.wrap_with_indent(description, + desc_start, + MAX_WIDTH) + end + + # @!group Overrides + #-----------------------------------------------------------------------# + + # @return [String] A decorated title. + # + def prettify_title(title) + title.ansi.underline + end + + # @return [String] A decorated textual representation of the subcommand + # name. + # + def prettify_subcommand(name) + name.chomp.ansi.green + end + + # @return [String] A decorated textual representation of the option name. + # + # + def prettify_option_name(name) + name.chomp.ansi.blue + end + + # @return [String] A decorated textual representation of the command. + # + def prettify_signature(command, subcommand, argument) + components = [ + [command, :green], + [subcommand, :green], + [argument, :magenta], + ] + components.reduce('') do |memo, (string, ansi_key)| + next memo if !string || string.empty? + memo << ' ' << string.ansi.apply(ansi_key) + end.lstrip + end + + # @return [String] A decorated command description. + # + def prettify_message(command, message) + message = message.dup + command.arguments.each do |arg| + arg.names.each do |name| + message.gsub!("`#{name.gsub(/\.{3}$/, '')}`", '\0'.ansi.magenta) + end + end + command.options.each do |(name, _description)| + message.gsub!("`#{name}`", '\0'.ansi.blue) + end + message + end + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # @return [Array] The list of the subcommands to use in the + # banner. + # + def subcommands_for_banner + command.subcommands_for_command_lookup.reject do |subcommand| + subcommand.summary.nil? + end.sort_by(&:command) + end + + # @return [Fixnum] The width of the largest command name or of the + # largest option name. Used to align all the descriptions. + # + def compute_max_name_width + widths = [] + widths << command.options.map { |option| option.first.size } + widths << subcommands_for_banner.map do |cmd| + cmd.command.size + SUBCOMMAND_BULLET_SIZE + end.max + widths.flatten.compact.max || 1 + end + + module TextWrapper + # @return [String] Wraps a formatted string (e.g. markdown) by stripping + # heredoc indentation and wrapping by word to the terminal width + # taking into account a maximum one, and indenting the string. + # Code lines (i.e. indented by four spaces) are not wrapped. + # + # @param [String] string + # The string to format. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_formatted_text(string, indent = 0, max_width = 80) + paragraphs = strip_heredoc(string).split("\n\n") + paragraphs = paragraphs.map do |paragraph| + if paragraph.start_with?(' ' * 4) + paragraph.gsub!(/\n/, "\n#{' ' * indent}") + else + paragraph = wrap_with_indent(paragraph, indent, max_width) + end + paragraph.insert(0, ' ' * indent).rstrip + end + paragraphs.join("\n\n") + end + + # @return [String] Wraps a string to the terminal width taking into + # account the given indentation. + # + # @param [String] string + # The string to indent. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_with_indent(string, indent = 0, max_width = 80) + if terminal_width == 0 + width = max_width + else + width = [terminal_width, max_width].min + end + + full_line = string.gsub("\n", ' ') + available_width = width - indent + space = ' ' * indent + word_wrap(full_line, available_width).split("\n").join("\n#{space}") + end + + # @return [String] Lifted straight from ActionView. Thanks guys! + # + def self.word_wrap(line, line_width) + line.gsub(/(.{1,#{line_width}})(\s+|$)/, "\\1\n").strip + end + + # @return [String] Lifted straight from ActiveSupport. Thanks guys! + # + def self.strip_heredoc(string) + if min = string.scan(/^[ \t]*(?=\S)/).min + string.gsub(/^[ \t]{#{min.size}}/, '') + else + string + end + end + + # @!group Private helpers + #---------------------------------------------------------------------# + + # @return [Fixnum] The width of the current terminal unless being piped. + # + def self.terminal_width + @terminal_width ||= + (!ENV['CLAIDE_DISABLE_AUTO_WRAP'] && + STDOUT.tty? && + calculate_terminal_width) || 0 + end + + def self.calculate_terminal_width + require 'io/console' + STDOUT.winsize.last + rescue LoadError + (system('which tput > /dev/null 2>&1') && `tput cols`.to_i) || 0 + rescue + 0 + end + private_class_method :calculate_terminal_width + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb new file mode 100644 index 0000000..fd16047 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb @@ -0,0 +1,123 @@ +# encoding: utf-8 + +module CLAide + class Command + # Handles plugin related logic logic for the `Command` class. + # + # Plugins are loaded the first time a command run and are identified by the + # prefix specified in the command class. Plugins must adopt the following + # conventions: + # + # - Support being loaded by a file located under the + # `lib/#{plugin_prefix}_plugin` relative path. + # - Be stored in a folder named after the plugin. + # + class PluginManager + # @return [Hash] The loaded plugins, + # grouped by plugin prefix. + # + def self.loaded_plugins + @loaded_plugins ||= {} + end + + # @return [Array] Loads plugins via RubyGems looking + # for files named after the `PLUGIN_PREFIX_plugin` and returns the + # specifications of the gems loaded successfully. + # Plugins are required safely. + # + def self.load_plugins(plugin_prefix) + loaded_plugins[plugin_prefix] ||= + plugin_gems_for_prefix(plugin_prefix).map do |spec, paths| + spec if safe_activate_and_require(spec, paths) + end.compact + end + + # @return [Array] The RubyGems specifications for the + # loaded plugins. + # + def self.specifications + loaded_plugins.values.flatten.uniq + end + + # @return [Array] The RubyGems specifications for the + # installed plugins that match the given `plugin_prefix`. + # + def self.installed_specifications_for_prefix(plugin_prefix) + loaded_plugins[plugin_prefix] || + plugin_gems_for_prefix(plugin_prefix).map(&:first) + end + + # @return [Array] The list of the plugins whose root path appears + # in the backtrace of an exception. + # + # @param [Exception] exception + # The exception to analyze. + # + def self.plugins_involved_in_exception(exception) + specifications.select do |gemspec| + exception.backtrace.any? do |line| + full_require_paths_for(gemspec).any? do |plugin_path| + line.include?(plugin_path) + end + end + end.map(&:name) + end + + # @group Helper Methods + + # @return [Array<[Gem::Specification, Array]>] + # Returns an array of tuples containing the specifications and + # plugin files to require for a given plugin prefix. + # + def self.plugin_gems_for_prefix(prefix) + glob = "#{prefix}_plugin#{Gem.suffix_pattern}" + Gem::Specification.latest_specs(true).map do |spec| + matches = spec.matches_for_glob(glob) + [spec, matches] unless matches.empty? + end.compact + end + + # Activates the given spec and requires the given paths. + # If any exception occurs it is caught and an + # informative message is printed. + # + # @param [Gem::Specification] spec + # The spec to be activated. + # + # @param [String] paths + # The paths to require. + # + # @return [Bool] Whether activation and requiring succeeded. + # + def self.safe_activate_and_require(spec, paths) + spec.activate + paths.each { |path| require(path) } + true + rescue Exception => exception # rubocop:disable RescueException + message = "\n---------------------------------------------" + message << "\nError loading the plugin `#{spec.full_name}`.\n" + message << "\n#{exception.class} - #{exception.message}" + message << "\n#{exception.backtrace.join("\n")}" + message << "\n---------------------------------------------\n" + warn message.ansi.yellow + false + end + + def self.full_require_paths_for(gemspec) + if gemspec.respond_to?(:full_require_paths) + return gemspec.full_require_paths + end + + # RubyGems < 2.2 + gemspec.require_paths.map do |require_path| + if require_path.include?(gemspec.full_gem_path) + require_path + else + File.join(gemspec.full_gem_path, require_path) + end + end + end + private_class_method :full_require_paths_for + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/help.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/help.rb new file mode 100644 index 0000000..7d3b183 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/help.rb @@ -0,0 +1,58 @@ +# encoding: utf-8 + +module CLAide + require 'claide/informative_error' + + # The exception class that is raised to indicate a help banner should be + # shown while running {Command.run}. + # + class Help < StandardError + include InformativeError + + # @return [String] The banner containing the usage instructions of the + # command to show in the help. + # + attr_reader :banner + + # @return [String] An optional error message that will be shown before the + # help banner. + # + attr_reader :error_message + + # @param [String] banner @see banner + # @param [String] error_message @see error_message + # + # @note If an error message is provided, the exit status, used to + # terminate the program with, will be set to `1`, otherwise a {Help} + # exception is treated as not being a real error and exits with `0`. + # + def initialize(banner, error_message = nil) + @banner = banner + @error_message = error_message + @exit_status = @error_message.nil? ? 0 : 1 + end + + # @return [String] The optional error message, colored in red if + # {Command.ansi_output} is set to `true`. + # + def formatted_error_message + if error_message + message = "[!] #{error_message}" + prettify_error_message(message) + end + end + + # @return [String] + # + def prettify_error_message(message) + message.ansi.red + end + + # @return [String] The optional error message, combined with the help + # banner of the command. + # + def message + [formatted_error_message, banner].compact.join("\n\n") + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/informative_error.rb b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/informative_error.rb new file mode 100644 index 0000000..8a92bd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/claide-1.0.3/lib/claide/informative_error.rb @@ -0,0 +1,21 @@ +# encoding: utf-8 + +module CLAide + # Including this module into an exception class will ensure that when raised, + # while running {Command.run}, only the message of the exception will be + # shown to the user. Unless disabled with the `--verbose` flag. + # + # In addition, the message will be colored red, if {Command.ansi_output} + # is set to `true`. + # + module InformativeError + # @return [Numeric] The exist status code that should be used to terminate + # the program with. Defaults to `1`. + # + attr_writer :exit_status + + def exit_status + @exit_status ||= 1 + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md new file mode 100644 index 0000000..f7d4921 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md @@ -0,0 +1,511 @@ +## Current + +## Release v1.1.7 (6 August 2020) + +concurrent-ruby: + +* (#879) Consider falsy value on `Concurrent::Map#compute_if_absent` for fast non-blocking path +* (#876) Reset Async queue on forking, makes Async fork-safe +* (#856) Avoid running problematic code in RubyThreadLocalVar on MRI that occasionally results in segfault +* (#853) Introduce ThreadPoolExecutor without a Queue + +## Release v1.1.6, edge v0.6.0 (10 Feb 2020) + +concurrent-ruby: + +* (#841) Concurrent.disable_at_exit_handlers! is no longer needed and was deprecated. +* (#841) AbstractExecutorService#auto_terminate= was deprecated and has no effect. + Set :auto_terminate option instead when executor is initialized. + +## Release v1.1.6.pre1, edge v0.6.0.pre1 (26 Jan 2020) + +concurrent-ruby: + +* (#828) Allow to name executors, the name is also used to name their threads +* (#838) Implement #dup and #clone for structs +* (#821) Safer finalizers for thread local variables +* Documentation fixes +* (#814) Use Ruby's Etc.nprocessors if available +* (#812) Fix directory structure not to mess with packaging tools +* (#840) Fix termination of pools on JRuby + +concurrent-ruby-edge: + +* Add WrappingExecutor (#830) + +## Release v1.1.5, edge v0.5.0 (10 Mar 2019) + +concurrent-ruby: + +* fix potential leak of context on JRuby and Java 7 + +concurrent-ruby-edge: + +* Add finalized Concurrent::Cancellation +* Add finalized Concurrent::Throttle +* Add finalized Concurrent::Promises::Channel +* Add new Concurrent::ErlangActor + +## Release v1.1.4 (14 Dec 2018) + +* (#780) Remove java_alias of 'submit' method of Runnable to let executor service work on java 11 +* (#776) Fix NameError on defining a struct with a name which is already taken in an ancestor + +## Release v1.1.3 (7 Nov 2018) + +* (#775) fix partial require of the gem (although not officially supported) + +## Release v1.1.2 (6 Nov 2018) + +* (#773) more defensive 1.9.3 support + +## Release v1.1.1, edge v0.4.1 (1 Nov 2018) + +* (#768) add support for 1.9.3 back + +## Release v1.1.0, edge v0.4.0 (31 OCt 2018) (yanked) + +* (#768) yanked because of issues with removed 1.9.3 support + +## Release v1.1.0.pre2, edge v0.4.0.pre2 (18 Sep 2018) + +concurrent-ruby: + +* fixed documentation and README links +* fix Set for TruffleRuby and Rubinius +* use properly supported TruffleRuby APIs + +concurrent-ruby-edge: + +* add Promises.zip_futures_over_on + +## Release v1.1.0.pre1, edge v0.4.0.pre1 (15 Aug 2018) + +concurrent-ruby: + +* requires at least Ruby 2.0 +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/1.1.0/Concurrent/Promises.html) + are moved from `concurrent-ruby-edge` to `concurrent-ruby` +* Add support for TruffleRuby + * (#734) Fix Array/Hash/Set construction broken on TruffleRuby + * AtomicReference fixed +* CI stabilization +* remove sharp dependency edge -> core +* remove warnings +* documentation updates +* Exchanger is no longer documented as edge since it was already available in + `concurrent-ruby` +* (#644) Fix Map#each and #each_pair not returning enumerator outside of MRI +* (#659) Edge promises fail during error handling +* (#741) Raise on recursive Delay#value call +* (#727) #717 fix global IO executor on JRuby +* (#740) Drop support for CRuby 1.9, JRuby 1.7, Rubinius. +* (#737) Move AtomicMarkableReference out of Edge +* (#708) Prefer platform specific memory barriers +* (#735) Fix wrong expected exception in channel spec assertion +* (#729) Allow executor option in `Promise#then` +* (#725) fix timeout check to use timeout_interval +* (#719) update engine detection +* (#660) Add specs for Promise#zip/Promise.zip ordering +* (#654) Promise.zip execution changes +* (#666) Add thread safe set implementation +* (#651) #699 #to_s, #inspect should not output negative object IDs. +* (#685) Avoid RSpec warnings about raise_error +* (#680) Avoid RSpec monkey patching, persist spec results locally, use RSpec + v3.7.0 +* (#665) Initialize the monitor for new subarrays on Rubinius +* (#661) Fix error handling in edge promises + +concurrent-ruby-edge: + +* (#659) Edge promises fail during error handling +* Edge files clearly separated in `lib-edge` +* added ReInclude + +## Release v1.0.5, edge v0.3.1 (26 Feb 2017) + +concurrent-ruby: + +* Documentation for Event and Semaphore +* Use Unsafe#fullFence and #loadFence directly since the shortcuts were removed in JRuby +* Do not depend on org.jruby.util.unsafe.UnsafeHolder + +concurrent-ruby-edge: + +* (#620) Actors on Pool raise an error +* (#624) Delayed promises did not interact correctly with flatting + * Fix arguments yielded by callback methods +* Overridable default executor in promises factory methods +* Asking actor to terminate will always resolve to `true` + +## Release v1.0.4, edge v0.3.0 (27 Dec 2016) + +concurrent-ruby: + +* Nothing + +concurrent-ruby-edge: + +* New promises' API renamed, lots of improvements, edge bumped to 0.3.0 + * **Incompatible** with previous 0.2.3 version + * see https://github.com/ruby-concurrency/concurrent-ruby/pull/522 + +## Release v1.0.3 (17 Dec 2016) + +* Trigger execution of flattened delayed futures +* Avoid forking for processor_count if possible +* Semaphore Mutex and JRuby parity +* Adds Map#each as alias to Map#each_pair +* Fix uninitialized instance variables +* Make Fixnum, Bignum merger ready +* Allows Promise#then to receive an executor +* TimerSet now survives a fork +* Reject promise on any exception +* Allow ThreadLocalVar to be initialized with a block +* Support Alpha with `Concurrent::processor_count` +* Fixes format-security error when compiling ruby_193_compatible.h +* Concurrent::Atom#swap fixed: reraise the exceptions from block + +## Release v1.0.2 (2 May 2016) + +* Fix bug with `Concurrent::Map` MRI backend `#inspect` method +* Fix bug with `Concurrent::Map` MRI backend using `Hash#value?` +* Improved documentation and examples +* Minor updates to Edge + +## Release v1.0.1 (27 February 2016) + +* Fix "uninitialized constant Concurrent::ReentrantReadWriteLock" error. +* Better handling of `autoload` vs. `require`. +* Improved API for Edge `Future` zipping. +* Fix reference leak in Edge `Future` constructor . +* Fix bug which prevented thread pools from surviving a `fork`. +* Fix bug in which `TimerTask` did not correctly specify all its dependencies. +* Improved support for JRuby+Truffle +* Improved error messages. +* Improved documentation. +* Updated README and CONTRIBUTING. + +## Release v1.0.0 (13 November 2015) + +* Rename `attr_volatile_with_cas` to `attr_atomic` +* Add `clear_each` to `LockFreeStack` +* Update `AtomicReference` documentation +* Further updates and improvements to the synchronization layer. +* Performance and memory usage performance with `Actor` logging. +* Fixed `ThreadPoolExecutor` task count methods. +* Improved `Async` performance for both short and long-lived objects. +* Fixed bug in `LockFreeLinkedSet`. +* Fixed bug in which `Agent#await` triggered a validation failure. +* Further `Channel` updates. +* Adopted a project Code of Conduct +* Cleared interpreter warnings +* Fixed bug in `ThreadPoolExecutor` task count methods +* Fixed bug in 'LockFreeLinkedSet' +* Improved Java extension loading +* Handle Exception children in Edge::Future +* Continued improvements to channel +* Removed interpreter warnings. +* Shared constants now in `lib/concurrent/constants.rb` +* Refactored many tests. +* Improved synchronization layer/memory model documentation. +* Bug fix in Edge `Future#flat` +* Brand new `Channel` implementation in Edge gem. +* Simplification of `RubySingleThreadExecutor` +* `Async` improvements + - Each object uses its own `SingleThreadExecutor` instead of the global thread pool. + - No longers supports executor injection + - Much better documentation +* `Atom` updates + - No longer `Dereferenceable` + - Now `Observable` + - Added a `#reset` method +* Brand new `Agent` API and implementation. Now functionally equivalent to Clojure. +* Continued improvements to the synchronization layer +* Merged in the `thread_safe` gem + - `Concurrent::Array` + - `Concurrent::Hash` + - `Concurrent::Map` (formerly ThreadSafe::Cache) + - `Concurrent::Tuple` +* Minor improvements to Concurrent::Map +* Complete rewrite of `Exchanger` +* Removed all deprecated code (classes, methods, constants, etc.) +* Updated Agent, MutexAtomic, and BufferedChannel to inherit from Synchronization::Object. +* Many improved tests +* Some internal reorganization + +## Release v0.9.1 (09 August 2015) + +* Fixed a Rubiniux bug in synchronization object +* Fixed all interpreter warnings (except circular references) +* Fixed require statements when requiring `Atom` alone +* Significantly improved `ThreadLocalVar` on non-JRuby platforms +* Fixed error handling in Edge `Concurrent.zip` +* `AtomicFixnum` methods `#increment` and `#decrement` now support optional delta +* New `AtomicFixnum#update` method +* Minor optimizations in `ReadWriteLock` +* New `ReentrantReadWriteLock` class +* `ThreadLocalVar#bind` method is now public +* Refactored many tests + +## Release v0.9.0 (10 July 2015) + +* Updated `AtomicReference` + - `AtomicReference#try_update` now simply returns instead of raising exception + - `AtomicReference#try_update!` was added to raise exceptions if an update + fails. Note: this is the same behavior as the old `try_update` +* Pure Java implementations of + - `AtomicBoolean` + - `AtomicFixnum` + - `Semaphore` +* Fixed bug when pruning Ruby thread pools +* Fixed bug in time calculations within `ScheduledTask` +* Default `count` in `CountDownLatch` to 1 +* Use monotonic clock for all timers via `Concurrent.monotonic_time` + - Use `Process.clock_gettime(Process::CLOCK_MONOTONIC)` when available + - Fallback to `java.lang.System.nanoTime()` on unsupported JRuby versions + - Pure Ruby implementation for everything else + - Effects `Concurrent.timer`, `Concurrent.timeout`, `TimerSet`, `TimerTask`, and `ScheduledTask` +* Deprecated all clock-time based timer scheduling + - Only support scheduling by delay + - Effects `Concurrent.timer`, `TimerSet`, and `ScheduledTask` +* Added new `ReadWriteLock` class +* Consistent `at_exit` behavior for Java and Ruby thread pools. +* Added `at_exit` handler to Ruby thread pools (already in Java thread pools) + - Ruby handler stores the object id and retrieves from `ObjectSpace` + - JRuby disables `ObjectSpace` by default so that handler stores the object reference +* Added a `:stop_on_exit` option to thread pools to enable/disable `at_exit` handler +* Updated thread pool docs to better explain shutting down thread pools +* Simpler `:executor` option syntax for all abstractions which support this option +* Added `Executor#auto_terminate?` predicate method (for thread pools) +* Added `at_exit` handler to `TimerSet` +* Simplified auto-termination of the global executors + - Can now disable auto-termination of global executors + - Added shutdown/kill/wait_for_termination variants for global executors +* Can now disable auto-termination for *all* executors (the nuclear option) +* Simplified auto-termination of the global executors +* Deprecated terms "task pool" and "operation pool" + - New terms are "io executor" and "fast executor" + - New functions added with new names + - Deprecation warnings added to functions referencing old names +* Moved all thread pool related functions from `Concurrent::Configuration` to `Concurrent` + - Old functions still exist with deprecation warnings + - New functions have updated names as appropriate +* All high-level abstractions default to the "io executor" +* Fixed bug in `Actor` causing it to prematurely warm global thread pools on gem load + - This also fixed a `RejectedExecutionError` bug when running with minitest/autorun via JRuby +* Moved global logger up to the `Concurrent` namespace and refactored the code +* Optimized the performance of `Delay` + - Fixed a bug in which no executor option on construction caused block execution on a global thread pool +* Numerous improvements and bug fixes to `TimerSet` +* Fixed deadlock of `Future` when the handler raises Exception +* Added shared specs for more classes +* New concurrency abstractions including: + - `Atom` + - `Maybe` + - `ImmutableStruct` + - `MutableStruct` + - `SettableStruct` +* Created an Edge gem for unstable abstractions including + - `Actor` + - `Agent` + - `Channel` + - `Exchanger` + - `LazyRegister` + - **new Future Framework** - unified + implementation of Futures and Promises which combines Features of previous `Future`, + `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively + new synchronization layer to make all the paths **lock-free** with exception of blocking threads on `#wait`. + It offers better performance and does not block threads when not required. +* Actor framework changes: + - fixed reset loop in Pool + - Pool can use any actor as a worker, abstract worker class is no longer needed. + - Actor events not have format `[:event_name, *payload]` instead of just the Symbol. + - Actor now uses new Future/Promise Framework instead of `IVar` for better interoperability + - Behaviour definition array was simplified to `[BehaviourClass1, [BehaviourClass2, *initialization_args]]` + - Linking behavior responds to :linked message by returning array of linked actors + - Supervised behavior is removed in favour of just Linking + - RestartingContext is supervised by default now, `supervise: true` is not required any more + - Events can be private and public, so far only difference is that Linking will + pass to linked actors only public messages. Adding private :restarting and + :resetting events which are send before the actor restarts or resets allowing + to add callbacks to cleanup current child actors. + - Print also object_id in Reference to_s + - Add AbstractContext#default_executor to be able to override executor class wide + - Add basic IO example + - Documentation somewhat improved + - All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and + be sure that both jobs are processed first. +* Refactored `Channel` to use newer synchronization objects +* Added `#reset` and `#cancel` methods to `TimerSet` +* Added `#cancel` method to `Future` and `ScheduledTask` +* Refactored `TimerSet` to use `ScheduledTask` +* Updated `Async` with a factory that initializes the object +* Deprecated `Concurrent.timer` and `Concurrent.timeout` +* Reduced max threads on pure-Ruby thread pools (abends around 14751 threads) +* Moved many private/internal classes/modules into "namespace" modules +* Removed brute-force killing of threads in tests +* Fixed a thread pool bug when the operating system cannot allocate more threads + +## Release v0.8.0 (25 January 2015) + +* C extension for MRI have been extracted into the `concurrent-ruby-ext` companion gem. + Please see the README for more detail. +* Better variable isolation in `Promise` and `Future` via an `:args` option +* Continued to update intermittently failing tests + +## Release v0.7.2 (24 January 2015) + +* New `Semaphore` class based on [java.util.concurrent.Semaphore](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Semaphore.html) +* New `Promise.all?` and `Promise.any?` class methods +* Renamed `:overflow_policy` on thread pools to `:fallback_policy` +* Thread pools still accept the `:overflow_policy` option but display a warning +* Thread pools now implement `fallback_policy` behavior when not running (rather than universally rejecting tasks) +* Fixed minor `set_deref_options` constructor bug in `Promise` class +* Fixed minor `require` bug in `ThreadLocalVar` class +* Fixed race condition bug in `TimerSet` class +* Fixed race condition bug in `TimerSet` class +* Fixed signal bug in `TimerSet#post` method +* Numerous non-functional updates to clear warning when running in debug mode +* Fixed more intermittently failing tests +* Tests now run on new Travis build environment +* Multiple documentation updates + +## Release v0.7.1 (4 December 2014) + +Please see the [roadmap](https://github.com/ruby-concurrency/concurrent-ruby/issues/142) for more information on the next planned release. + +* Added `flat_map` method to `Promise` +* Added `zip` method to `Promise` +* Fixed bug with logging in `Actor` +* Improvements to `Promise` tests +* Removed actor-experimental warning +* Added an `IndirectImmediateExecutor` class +* Allow disabling auto termination of global executors +* Fix thread leaking in `ThreadLocalVar` (uses `Ref` gem on non-JRuby systems) +* Fix thread leaking when pruning pure-Ruby thread pools +* Prevent `Actor` from using an `ImmediateExecutor` (causes deadlock) +* Added missing synchronizations to `TimerSet` +* Fixed bug with return value of `Concurrent::Actor::Utils::Pool#ask` +* Fixed timing bug in `TimerTask` +* Fixed bug when creating a `JavaThreadPoolExecutor` with minimum pool size of zero +* Removed confusing warning when not using native extenstions +* Improved documentation + +## Release v0.7.0 (13 August 2014) + +* Merge the [atomic](https://github.com/ruby-concurrency/atomic) gem + - Pure Ruby `MutexAtomic` atomic reference class + - Platform native atomic reference classes `CAtomic`, `JavaAtomic`, and `RbxAtomic` + - Automated [build process](https://github.com/ruby-concurrency/rake-compiler-dev-box) + - Fat binary releases for [multiple platforms](https://rubygems.org/gems/concurrent-ruby/versions) including Windows (32/64), Linux (32/64), OS X (64-bit), Solaris (64-bit), and JRuby +* C native `CAtomicBoolean` +* C native `CAtomicFixnum` +* Refactored intermittently failing tests +* Added `dataflow!` and `dataflow_with!` methods to match `Future#value!` method +* Better handling of timeout in `Agent` +* Actor Improvements + - Fine-grained implementation using chain of behaviors. Each behavior is responsible for single aspect like: `Termination`, `Pausing`, `Linking`, `Supervising`, etc. Users can create custom Actors easily based on their needs. + - Supervision was added. `RestartingContext` will pause on error waiting on its supervisor to decide what to do next ( options are `:terminate!`, `:resume!`, `:reset!`, `:restart!`). Supervising behavior also supports strategies `:one_for_one` and `:one_for_all`. + - Linking was added to be able to monitor actor's events like: `:terminated`, `:paused`, `:restarted`, etc. + - Dead letter routing added. Rejected envelopes are collected in a configurable actor (default: `Concurrent::Actor.root.ask!(:dead_letter_routing)`) + - Old `Actor` class removed and replaced by new implementation previously called `Actress`. `Actress` was kept as an alias for `Actor` to keep compatibility. + - `Utils::Broadcast` actor which allows Publish–subscribe pattern. +* More executors for managing serialized operations + - `SerializedExecution` mixin module + - `SerializedExecutionDelegator` for serializing *any* executor +* Updated `Async` with serialized execution +* Updated `ImmediateExecutor` and `PerThreadExecutor` with full executor service lifecycle +* Added a `Delay` to root `Actress` initialization +* Minor bug fixes to thread pools +* Refactored many intermittently failing specs +* Removed Java interop warning `executor.rb:148 warning: ambiguous Java methods found, using submit(java.lang.Runnable)` +* Fixed minor bug in `RubyCachedThreadPool` overflow policy +* Updated tests to use [RSpec 3.0](http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3) +* Removed deprecated `Actor` class +* Better support for Rubinius + +## Release v0.6.1 (14 June 2014) + +* Many improvements to `Concurrent::Actress` +* Bug fixes to `Concurrent::RubyThreadPoolExecutor` +* Fixed several brittle tests +* Moved documentation to http://ruby-concurrency.github.io/concurrent-ruby/frames.html + +## Release v0.6.0 (25 May 2014) + +* Added `Concurrent::Observable` to encapsulate our thread safe observer sets +* Improvements to new `Channel` +* Major improvements to `CachedThreadPool` and `FixedThreadPool` +* Added `SingleThreadExecutor` +* Added `Current::timer` function +* Added `TimerSet` executor +* Added `AtomicBoolean` +* `ScheduledTask` refactoring +* Pure Ruby and JRuby-optimized `PriorityQueue` classes +* Updated `Agent` behavior to more closely match Clojure +* Observer sets support block callbacks to the `add_observer` method +* New algorithm for thread creation in `RubyThreadPoolExecutor` +* Minor API updates to `Event` +* Rewritten `TimerTask` now an `Executor` instead of a `Runnable` +* Fixed many brittle specs +* Renamed `FixedThreadPool` and `CachedThreadPool` to `RubyFixedThreadPool` and `RubyCachedThreadPool` +* Created JRuby optimized `JavaFixedThreadPool` and `JavaCachedThreadPool` +* Consolidated fixed thread pool tests into `spec/concurrent/fixed_thread_pool_shared.rb` and `spec/concurrent/cached_thread_pool_shared.rb` +* `FixedThreadPool` now subclasses `RubyFixedThreadPool` or `JavaFixedThreadPool` as appropriate +* `CachedThreadPool` now subclasses `RubyCachedThreadPool` or `JavaCachedThreadPool` as appropriate +* New `Delay` class +* `Concurrent::processor_count` helper function +* New `Async` module +* Renamed `NullThreadPool` to `PerThreadExecutor` +* Deprecated `Channel` (we are planning a new implementation based on [Go](http://golangtutorials.blogspot.com/2011/06/channels-in-go.html)) +* Added gem-level [configuration](http://robots.thoughtbot.com/mygem-configure-block) +* Deprecated `$GLOBAL_THREAD_POOL` in lieu of gem-level configuration +* Removed support for Ruby [1.9.2](https://www.ruby-lang.org/en/news/2013/12/17/maintenance-of-1-8-7-and-1-9-2/) +* New `RubyThreadPoolExecutor` and `JavaThreadPoolExecutor` classes +* All thread pools now extend the appropriate thread pool executor classes +* All thread pools now support `:overflow_policy` (based on Java's [reject policies](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html)) +* Deprecated `UsesGlobalThreadPool` in lieu of explicit `:executor` option (dependency injection) on `Future`, `Promise`, and `Agent` +* Added `Concurrent::dataflow_with(executor, *inputs)` method to support executor dependency injection for dataflow +* Software transactional memory with `TVar` and `Concurrent::atomically` +* First implementation of [new, high-performance](https://github.com/ruby-concurrency/concurrent-ruby/pull/49) `Channel` +* `Actor` is deprecated in favor of new experimental actor implementation [#73](https://github.com/ruby-concurrency/concurrent-ruby/pull/73). To avoid namespace collision it is living in `Actress` namespace until `Actor` is removed in next release. + +## Release v0.5.0 + +This is the most significant release of this gem since its inception. This release includes many improvements and optimizations. It also includes several bug fixes. The major areas of focus for this release were: + +* Stability improvements on Ruby versions with thread-level parallelism ([JRuby](http://jruby.org/) and [Rubinius](http://rubini.us/)) +* Creation of new low-level concurrency abstractions +* Internal refactoring to use the new low-level abstractions + +Most of these updates had no effect on the gem API. There are a few notable exceptions which were unavoidable. Please read the [release notes](API-Updates-in-v0.5.0) for more information. + +Specific changes include: + +* New class `IVar` +* New class `MVar` +* New class `ThreadLocalVar` +* New class `AtomicFixnum` +* New class method `dataflow` +* New class `Condition` +* New class `CountDownLatch` +* New class `DependencyCounter` +* New class `SafeTaskExecutor` +* New class `CopyOnNotifyObserverSet` +* New class `CopyOnWriteObserverSet` +* `Future` updated with `execute` API +* `ScheduledTask` updated with `execute` API +* New `Promise` API +* `Future` now extends `IVar` +* `Postable#post?` now returns an `IVar` +* Thread safety fixes to `Dereferenceable` +* Thread safety fixes to `Obligation` +* Thread safety fixes to `Supervisor` +* Thread safety fixes to `Event` +* Various other thread safety (race condition) fixes +* Refactored brittle tests +* Implemented pending tests +* Added JRuby and Rubinius as Travis CI build targets +* Added [CodeClimate](https://codeclimate.com/) code review +* Improved YARD documentation diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Gemfile b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Gemfile new file mode 100644 index 0000000..1b8d9fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Gemfile @@ -0,0 +1,42 @@ +source 'https://rubygems.org' + +require File.join(File.dirname(__FILE__), 'lib/concurrent-ruby/concurrent/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby-edge/concurrent/edge/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby/concurrent/utility/engine') + +no_path = ENV['NO_PATH'] +options = no_path ? {} : { path: '.' } + +gem 'concurrent-ruby', Concurrent::VERSION, options +gem 'concurrent-ruby-edge', Concurrent::EDGE_VERSION, options +gem 'concurrent-ruby-ext', Concurrent::VERSION, options.merge(platform: :mri) + +group :development do + gem 'rake', (Concurrent.ruby_version :<, 2, 2, 0) ? '~> 12.0' : '~> 13.0' + gem 'rake-compiler', '~> 1.0', '>= 1.0.7' + gem 'rake-compiler-dock', '~> 1.0' + gem 'pry', '~> 0.11', platforms: :mri +end + +group :documentation, optional: true do + gem 'yard', '~> 0.9.0', require: false + gem 'redcarpet', '~> 3.0', platforms: :mri # understands github markdown + gem 'md-ruby-eval', '~> 0.6' +end + +group :testing do + gem 'rspec', '~> 3.7' + gem 'timecop', '~> 0.7.4' + gem 'sigdump', require: false +end + +# made opt-in since it will not install on jruby 1.7 +group :coverage, optional: !ENV['COVERAGE'] do + gem 'simplecov', '~> 0.16.0', require: false + gem 'coveralls', '~> 0.8.2', require: false +end + +group :benchmarks, optional: true do + gem 'benchmark-ips', '~> 2.7' + gem 'bench9000' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/LICENSE.txt b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/LICENSE.txt new file mode 100644 index 0000000..1026f28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) Jerry D'Antonio -- released under the MIT license. + +http://www.opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/README.md b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/README.md new file mode 100644 index 0000000..2e89c2a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/README.md @@ -0,0 +1,384 @@ +# Concurrent Ruby + +[![Gem Version](https://badge.fury.io/rb/concurrent-ruby.svg)](http://badge.fury.io/rb/concurrent-ruby) +[![Build Status](https://travis-ci.org/ruby-concurrency/concurrent-ruby.svg?branch=master)](https://travis-ci.org/ruby-concurrency/concurrent-ruby) +[![Build status](https://ci.appveyor.com/api/projects/status/iq8aboyuu3etad4w?svg=true)](https://ci.appveyor.com/project/rubyconcurrency/concurrent-ruby) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) +[![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better + or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled +* Thread-safety +* Backward compatibility + +## Contributing + +**This gem depends on +[contributions](https://github.com/ruby-concurrency/concurrent-ruby/graphs/contributors) and we +appreciate your help. Would you like to contribute? Great! Have a look at +[issues with `looking-for-contributor` label](https://github.com/ruby-concurrency/concurrent-ruby/issues?q=is%3Aissue+is%3Aopen+label%3Alooking-for-contributor).** And if you pick something up let us know on the issue. + +## Thread Safety + +*Concurrent Ruby makes one of the strongest thread safety guarantees of any Ruby concurrency +library, providing consistent behavior and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby).* + +Every abstraction in this library is thread safe. Specific thread safety guarantees are documented +with each abstraction. + +It is critical to remember, however, that Ruby is a language of mutable references. *No* +concurrency library for Ruby can ever prevent the user from making thread safety mistakes (such as +sharing a mutable object between threads and modifying it on both threads) or from creating +deadlocks through incorrect use of locks. All the library can do is provide safe abstractions which +encourage safe practices. Concurrent Ruby provides more safe concurrency abstractions than any +other Ruby library, many of which support the mantra of +["Do not communicate by sharing memory; instead, share memory by communicating"](https://blog.golang.org/share-memory-by-communicating). +Concurrent Ruby is also the only Ruby library which provides a full suite of thread safe and +immutable variable types and data structures. + +We've also initiated discussion to document [memory model](docs-source/synchronization.md) of Ruby which +would provide consistent behaviour and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby). + +## Features & Documentation + +**The primary site for documentation is the automatically generated +[API documentation](http://ruby-concurrency.github.io/concurrent-ruby/index.html) which is up to +date with latest release.** This readme matches the master so may contain new stuff not yet +released. + +We also have a [IRC (gitter)](https://gitter.im/ruby-concurrency/concurrent-ruby). + +### Versioning + +* `concurrent-ruby` uses [Semantic Versioning](http://semver.org/) +* `concurrent-ruby-ext` has always same version as `concurrent-ruby` +* `concurrent-ruby-edge` will always be 0.y.z therefore following + [point 4](http://semver.org/#spec-item-4) applies *"Major version zero + (0.y.z) is for initial development. Anything may change at any time. The + public API should not be considered stable."* However we additionally use + following rules: + * Minor version increment means incompatible changes were made + * Patch version increment means only compatible changes were made + + +#### General-purpose Concurrency Abstractions + +* [Async](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Async.html): + A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's + [gen_server](http://www.erlang.org/doc/man/gen_server.html). +* [ScheduledTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ScheduledTask.html): + Like a Future scheduled for a specific future time. +* [TimerTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TimerTask.html): + A Thread that periodically wakes up to perform work at regular intervals. +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and (partially) `TimerTask` into a single + framework. It extensively uses the new synchronization layer to make all the features + **non-blocking** and **lock-free**, with the exception of obviously blocking operations like + `#wait`, `#value`. It also offers better performance. + +#### Thread-safe Value Objects, Structures, and Collections + +Collection classes that were originally part of the (deprecated) `thread_safe` gem: + +* [Array](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Array.html) A thread-safe + subclass of Ruby's standard [Array](http://ruby-doc.org/core/Array.html). +* [Hash](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Hash.html) A thread-safe + subclass of Ruby's standard [Hash](http://ruby-doc.org/core/Hash.html). +* [Set](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Set.html) A thread-safe + subclass of Ruby's standard [Set](http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html). +* [Map](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Map.html) A hash-like object + that should have much better performance characteristics, especially under high concurrency, + than `Concurrent::Hash`. +* [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Tuple.html) A fixed size + array with volatile (synchronized, thread safe) getters/setters. + +Value objects inspired by other languages: + +* [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Maybe.html) A thread-safe, + immutable object representing an optional value, based on + [Haskell Data.Maybe](https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html). + +Structure classes derived from Ruby's [Struct](http://ruby-doc.org/core/Struct.html): + +* [ImmutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ImmutableStruct.html) + Immutable struct where values are set at construction and cannot be changed later. +* [MutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MutableStruct.html) + Synchronized, mutable struct where values can be safely changed at any time. +* [SettableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/SettableStruct.html) + Synchronized, write-once struct where values can be set at most once, either at construction + or any time thereafter. + +Thread-safe variables: + +* [Agent](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Agent.html): A way to + manage shared, mutable, *asynchronous*, independent state. Based on Clojure's + [Agent](http://clojure.org/agents). +* [Atom](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Atom.html): A way to manage + shared, mutable, *synchronous*, independent state. Based on Clojure's + [Atom](http://clojure.org/atoms). +* [AtomicBoolean](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicBoolean.html) + A boolean value that can be updated atomically. +* [AtomicFixnum](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicFixnum.html) + A numeric value that can be updated atomically. +* [AtomicReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicReference.html) + An object reference that may be updated atomically. +* [Exchanger](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Exchanger.html) + A synchronization point at which threads can pair and swap elements within pairs. Based on + Java's [Exchanger](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html). +* [MVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MVar.html) A synchronized + single element container. Based on Haskell's + [MVar](https://hackage.haskell.org/package/base-4.8.1.0/docs/Control-Concurrent-MVar.html) and + Scala's [MVar](http://docs.typelevel.org/api/scalaz/nightly/index.html#scalaz.concurrent.MVar$). +* [ThreadLocalVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ThreadLocalVar.html) + A variable where the value is different for each thread. +* [TVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TVar.html) A transactional + variable implementing software transactional memory (STM). Based on Clojure's + [Ref](http://clojure.org/refs). + +#### Java-inspired ThreadPools and Other Executors + +* See the [thread pool](http://ruby-concurrency.github.io/concurrent-ruby/master/file.thread_pools.html) + overview, which also contains a list of other Executors available. + +#### Thread Synchronization Classes and Algorithms + +* [CountDownLatch](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CountDownLatch.html) + A synchronization object that allows one thread to wait on multiple other threads. +* [CyclicBarrier](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CyclicBarrier.html) + A synchronization aid that allows a set of threads to all wait for each other to reach a common barrier point. +* [Event](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Event.html) Old school + kernel-style event. +* [ReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReadWriteLock.html) + A lock that supports multiple readers but only one writer. +* [ReentrantReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReentrantReadWriteLock.html) + A read/write lock with reentrant and upgrade features. +* [Semaphore](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Semaphore.html) + A counting-based locking mechanism that uses permits. +* [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicMarkableReference.html) + +#### Deprecated + +Deprecated features are still available and bugs are being fixed, but new features will not be added. + +* ~~[Future](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Future.html): + An asynchronous operation that produces a value.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + * ~~[.dataflow](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent.html#dataflow-class_method): + Built on Futures, Dataflow allows you to create a task that will be scheduled when all of + its data dependencies are available.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Promise](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promise.html): Similar + to Futures, with more features.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Delay](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Delay.html) Lazy evaluation + of a block yielding an immutable result. Based on Clojure's + [delay](https://clojuredocs.org/clojure.core/delay).~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[IVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/IVar.html) Similar to a + "future" but can be manually assigned once, after which it becomes immutable.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + +### Edge Features + +These are available in the `concurrent-ruby-edge` companion gem. + +These features are under active development and may change frequently. They are expected not to +keep backward compatibility (there may also lack tests and documentation). Semantic versions will +be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to +`concurrent-ruby` when final. + +* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Actor.html): Implements + the Actor Model, where concurrent actors exchange messages. + *Status: Partial documentation and tests; depends on new future/promise framework; stability is good.* +* [Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Channel.html): + Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). + Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional + inspiration from Clojure [core.async](https://clojure.github.io/core.async/). + *Status: Partial documentation and tests.* +* [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LazyRegister.html) +* [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Edge/LockFreeLinkedSet.html) + *Status: will be moved to core soon.* +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LockFreeStack.html) + *Status: missing documentation and tests.* +* [Promises::Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises/Channel.html) + A first in first out channel that accepts messages with push family of methods and returns + messages with pop family of methods. + Pop and push operations can be represented as futures, see `#pop_op` and `#push_op`. + The capacity of the channel can be limited to support back pressure, use capacity option in `#initialize`. + `#pop` method blocks ans `#pop_op` returns pending future if there is no message in the channel. + If the capacity is limited the `#push` method blocks and `#push_op` returns pending future. +* [Cancellation](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Cancellation.html) + The Cancellation abstraction provides cooperative cancellation. + + The standard methods `Thread#raise` of `Thread#kill` available in Ruby + are very dangerous (see linked the blog posts bellow). + Therefore concurrent-ruby provides an alternative. + + * + * + * + + It provides an object which represents a task which can be executed, + the task has to get the reference to the object and periodically cooperatively check that it is not cancelled. + Good practices to make tasks cancellable: + * check cancellation every cycle of a loop which does significant work, + * do all blocking actions in a loop with a timeout then on timeout check cancellation + and if ok block again with the timeout +* [Throttle](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Throttle.html) + A tool managing concurrency level of tasks. +* [ErlangActor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ErlangActor.html) + Actor implementation which precisely matches Erlang actor behaviour. + Requires at least Ruby 2.1 otherwise it's not loaded. +* [WrappingExecutor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/WrappingExecutor.html) + A delegating executor which modifies each task before the task is given to + the target executor it delegates to. + +## Supported Ruby versions + +* MRI 2.0 and above +* JRuby 9000 +* TruffleRuby are supported. +* Any Ruby interpreter that is compliant with Ruby 2.0 or newer. + +Actually we still support mri 1.9.3 and jruby 1.7.27 but we are looking at ways how to drop the support. +Java 8 is preferred for JRuby but every Java version on which JRuby 9000 runs is supported. + +The legacy support for Rubinius is kept but it is no longer maintained, if you would like to help +please respond to [#739](https://github.com/ruby-concurrency/concurrent-ruby/issues/739). + +## Usage + +Everything within this gem can be loaded simply by requiring it: + +```ruby +require 'concurrent' +``` + +*Requiring only specific abstractions from Concurrent Ruby is not yet supported.* + +To use the tools in the Edge gem it must be required separately: + +```ruby +require 'concurrent-edge' +``` + +If the library does not behave as expected, `Concurrent.use_stdlib_logger(Logger::DEBUG)` could +help to reveal the problem. + +## Installation + +```shell +gem install concurrent-ruby +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby', require: 'concurrent' +``` + +and run `bundle install` from your shell. + +### Edge Gem Installation + +The Edge gem must be installed separately from the core gem: + +```shell +gem install concurrent-ruby-edge +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-edge', require: 'concurrent-edge' +``` + +and run `bundle install` from your shell. + + +### C Extensions for MRI + +Potential performance improvements may be achieved under MRI by installing optional C extensions. +To minimise installation errors the C extensions are available in the `concurrent-ruby-ext` +extension gem. `concurrent-ruby` and `concurrent-ruby-ext` are always released together with same +version. Simply install the extension gem too: + +```ruby +gem install concurrent-ruby-ext +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-ext' +``` + +and run `bundle install` from your shell. + +In code it is only necessary to + +```ruby +require 'concurrent' +``` + +The `concurrent-ruby` gem will automatically detect the presence of the `concurrent-ruby-ext` gem +and load the appropriate C extensions. + +#### Note For gem developers + +No gems should depend on `concurrent-ruby-ext`. Doing so will force C extensions on your users. The +best practice is to depend on `concurrent-ruby` and let users to decide if they want C extensions. + +## Maintainers + +* [Petr Chalupa](https://github.com/pitr-ch) (lead maintainer, point-of-contact) +* [Jerry D'Antonio](https://github.com/jdantonio) (creator) +* [Chris Seaton](https://github.com/chrisseaton) + +### Special Thanks to + +* [Brian Durand](https://github.com/bdurand) for the `ref` gem +* [Charles Oliver Nutter](https://github.com/headius) for the `atomic` and `thread_safe` gems +* [thedarkone](https://github.com/thedarkone) for the `thread_safe` gem + +and to the past maintainers + +* [Michele Della Torre](https://github.com/mighe) +* [Paweł Obrok](https://github.com/obrok) +* [Lucas Allan](https://github.com/lucasallan) + +and to [Ruby Association](https://www.ruby.or.jp/en/) for sponsoring a project +["Enhancing Ruby’s concurrency tooling"](https://www.ruby.or.jp/en/news/20181106) in 2018. + +## License and Copyright + +*Concurrent Ruby* is free software released under the +[MIT License](http://www.opensource.org/licenses/MIT). + +The *Concurrent Ruby* [logo](https://raw.githubusercontent.com/ruby-concurrency/concurrent-ruby/master/docs-source/logo/concurrent-ruby-logo-300x300.png) was +designed by [David Jones](https://twitter.com/zombyboy). It is Copyright © 2014 +[Jerry D'Antonio](https://twitter.com/jerrydantonio). All Rights Reserved. diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Rakefile b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Rakefile new file mode 100644 index 0000000..2ce9ab6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/Rakefile @@ -0,0 +1,332 @@ +require_relative 'lib/concurrent-ruby/concurrent/version' +require_relative 'lib/concurrent-ruby-edge/concurrent/edge/version' +require_relative 'lib/concurrent-ruby/concurrent/utility/engine' + +if Concurrent.ruby_version :<, 2, 0, 0 + # @!visibility private + module Kernel + def __dir__ + File.dirname __FILE__ + end + end +end + +core_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby.gemspec') +ext_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-ext.gemspec') +edge_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-edge.gemspec') + +require 'rake/javaextensiontask' + +ENV['JRUBY_HOME'] = ENV['CONCURRENT_JRUBY_HOME'] if ENV['CONCURRENT_JRUBY_HOME'] && !Concurrent.on_jruby? + +Rake::JavaExtensionTask.new('concurrent_ruby', core_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' +end + +unless Concurrent.on_jruby? + require 'rake/extensiontask' + + Rake::ExtensionTask.new('concurrent_ruby_ext', ext_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby-ext' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' + ext.source_pattern = '*.{c,h}' + + ext.cross_compile = true + ext.cross_platform = ['x86-mingw32', 'x64-mingw32'] + end +end + +require 'rake_compiler_dock' +namespace :repackage do + desc '* with Windows fat distributions' + task :all do + Dir.chdir(__dir__) do + # store gems in vendor cache for docker + sh 'bundle package' + + # build only the jar file not the whole gem for java platform, the jar is part the concurrent-ruby-x.y.z.gem + Rake::Task['lib/concurrent-ruby/concurrent/concurrent_ruby.jar'].invoke + + # build all gem files + RakeCompilerDock.sh 'bundle install --local && bundle exec rake cross native package --trace' + end + end +end + +require 'rubygems' +require 'rubygems/package_task' + +Gem::PackageTask.new(core_gemspec) {} if core_gemspec +Gem::PackageTask.new(ext_gemspec) {} if ext_gemspec && !Concurrent.on_jruby? +Gem::PackageTask.new(edge_gemspec) {} if edge_gemspec + +CLEAN.include('lib/concurrent-ruby/concurrent/2.*', 'lib/concurrent-ruby/concurrent/*.jar') + +begin + require 'rspec' + require 'rspec/core/rake_task' + + RSpec::Core::RakeTask.new(:spec) + + namespace :spec do + desc '* Configured for ci' + RSpec::Core::RakeTask.new(:ci) do |t| + options = %w[ --color + --backtrace + --order defined + --format documentation + --tag ~notravis ] + t.rspec_opts = [*options].join(' ') + end + + desc '* test packaged and installed gems instead of local files' + task :installed do + Dir.chdir(__dir__) do + sh "gem install pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem install pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if Concurrent.on_cruby? + sh "gem install pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" + ENV['NO_PATH'] = 'true' + sh 'bundle update' + sh 'bundle exec rake spec:ci' + end + end + end + + desc 'executed in CI' + task :ci => [:compile, 'spec:ci'] + + task :default => [:clobber, :compile, :spec] +rescue LoadError => e + puts 'RSpec is not installed, skipping test task definitions: ' + e.message +end + +current_yard_version_name = Concurrent::VERSION + +begin + require 'yard' + require 'md_ruby_eval' + require_relative 'support/yard_full_types' + + common_yard_options = ['--no-yardopts', + '--no-document', + '--no-private', + '--embed-mixins', + '--markup', 'markdown', + '--title', 'Concurrent Ruby', + '--template', 'default', + '--template-path', 'yard-template', + '--default-return', 'undocumented'] + + desc 'Generate YARD Documentation (signpost, master)' + task :yard => ['yard:signpost', 'yard:master'] + + namespace :yard do + + desc '* eval markdown files' + task :eval_md do + Dir.chdir File.join(__dir__, 'docs-source') do + sh 'bundle exec md-ruby-eval --auto' + end + end + + task :update_readme do + Dir.chdir __dir__ do + content = File.read(File.join('README.md')). + gsub(/\[([\w ]+)\]\(http:\/\/ruby-concurrency\.github\.io\/concurrent-ruby\/master\/.*\)/) do |_| + case $1 + when 'LockFreeLinkedSet' + "{Concurrent::Edge::#{$1} #{$1}}" + when '.dataflow' + '{Concurrent.dataflow Concurrent.dataflow}' + when 'thread pool' + '{file:thread_pools.md thread pool}' + else + "{Concurrent::#{$1} #{$1}}" + end + end + FileUtils.mkpath 'tmp' + File.write 'tmp/README.md', content + end + end + + define_yard_task = -> name do + output_dir = "docs/#{name}" + + removal_name = "remove.#{name}" + task removal_name do + Dir.chdir __dir__ do + FileUtils.rm_rf output_dir + end + end + + desc "* of #{name} into subdir #{name}" + YARD::Rake::YardocTask.new(name) do |yard| + yard.options.push( + '--output-dir', output_dir, + '--main', 'tmp/README.md', + *common_yard_options) + yard.files = ['./lib/concurrent-ruby/**/*.rb', + './lib/concurrent-ruby-edge/**/*.rb', + './ext/concurrent_ruby_ext/**/*.c', + '-', + 'docs-source/thread_pools.md', + 'docs-source/promises.out.md', + 'docs-source/medium-example.out.rb', + 'LICENSE.txt', + 'CHANGELOG.md'] + end + Rake::Task[name].prerequisites.push removal_name, + # 'yard:eval_md', + 'yard:update_readme' + end + + define_yard_task.call current_yard_version_name + define_yard_task.call 'master' + + desc "* signpost for versions" + YARD::Rake::YardocTask.new(:signpost) do |yard| + yard.options.push( + '--output-dir', 'docs', + '--main', 'docs-source/signpost.md', + *common_yard_options) + yard.files = ['no-lib'] + end + + define_uptodate_task = -> name do + namespace name do + desc "** ensure that #{name} generated documentation is matching the source code" + task :uptodate do + Dir.chdir(__dir__) do + begin + FileUtils.cp_r 'docs', 'docs-copy', verbose: true + Rake::Task["yard:#{name}"].invoke + sh 'diff -r docs/ docs-copy/' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + ensure + FileUtils.rm_rf 'docs-copy', verbose: true + end + end + end + end + end + + define_uptodate_task.call current_yard_version_name + define_uptodate_task.call 'master' + end + +rescue LoadError => e + puts 'YARD is not installed, skipping documentation task definitions: ' + e.message +end + +desc 'build, test, and publish the gem' +task :release => ['release:checks', 'release:build', 'release:test', 'release:publish'] + +namespace :release do + # Depends on environment of @pitr-ch + + mri_version = '2.6.5' + jruby_version = 'jruby-9.2.9.0' + + task :checks => "yard:#{current_yard_version_name}:uptodate" do + Dir.chdir(__dir__) do + sh 'test -z "$(git status --porcelain)"' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + sh 'git fetch' + sh 'test $(git show-ref --verify --hash refs/heads/master) = ' + + '$(git show-ref --verify --hash refs/remotes/origin/master)' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + end + end + + desc '* build all *.gem files necessary for release' + task :build => [:clobber, 'repackage:all'] + + desc '* test actual installed gems instead of cloned repository on MRI and JRuby' + task :test do + Dir.chdir(__dir__) do + old = ENV['RBENV_VERSION'] + + ENV['RBENV_VERSION'] = mri_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + ENV['RBENV_VERSION'] = jruby_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + puts 'Windows build is untested' + + ENV['RBENV_VERSION'] = old + end + end + + desc '* do all nested steps' + task :publish => ['publish:ask', 'publish:tag', 'publish:rubygems', 'publish:post_steps'] + + namespace :publish do + publish_edge = false + + task :ask do + begin + STDOUT.puts 'Do you want to publish anything? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + begin + STDOUT.puts 'Do you want to publish edge? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + publish_edge = input == 'y' + end + + desc '** tag HEAD with current version and push to github' + task :tag do + Dir.chdir(__dir__) do + sh "git tag v#{Concurrent::VERSION}" + sh "git push origin v#{Concurrent::VERSION}" + sh "git tag edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + sh "git push origin edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + end + end + + desc '** push all *.gem files to rubygems' + task :rubygems do + Dir.chdir(__dir__) do + sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" if publish_edge + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem" + end + end + + desc '** print post release steps' + task :post_steps do + puts 'Manually: create a release on GitHub with relevant changelog part' + puts 'Manually: send email same as release with relevant changelog part' + puts 'Manually: tweet' + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java new file mode 100644 index 0000000..fb6be96 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java @@ -0,0 +1,17 @@ +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import java.io.IOException; + +public class ConcurrentRubyService implements BasicLibraryService { + + public boolean basicLoad(final Ruby runtime) throws IOException { + new com.concurrent_ruby.ext.AtomicReferenceLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicBooleanLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicFixnumLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaSemaphoreLibrary().load(runtime, false); + new com.concurrent_ruby.ext.SynchronizationLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JRubyMapBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java new file mode 100644 index 0000000..dfa9e77 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java @@ -0,0 +1,175 @@ +package com.concurrent_ruby.ext; + +import java.lang.reflect.Field; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +/** + * This library adds an atomic reference type to JRuby for use in the atomic + * library. We do a native version to avoid the implicit value coercion that + * normally happens through JI. + * + * @author headius + */ +public class AtomicReferenceLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicReference", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + try { + sun.misc.Unsafe.class.getMethod("getAndSetObject", Object.class); + atomicCls.setAllocator(JRUBYREFERENCE8_ALLOCATOR); + } catch (Exception e) { + // leave it as Java 6/7 version + } + atomicCls.defineAnnotatedMethods(JRubyReference.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBYREFERENCE8_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference8(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyReference", parent="Object") + public static class JRubyReference extends RubyObject { + volatile IRubyObject reference; + + static final sun.misc.Unsafe UNSAFE; + static final long referenceOffset; + + static { + try { + UNSAFE = UnsafeHolder.U; + Class k = JRubyReference.class; + referenceOffset = UNSAFE.objectFieldOffset(k.getDeclaredField("reference")); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public JRubyReference(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + UNSAFE.putObject(this, referenceOffset, context.nil); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + UNSAFE.putObject(this, referenceOffset, value); + return context.nil; + } + + @JRubyMethod(name = {"get", "value"}) + public IRubyObject get() { + return reference; + } + + @JRubyMethod(name = {"set", "value="}) + public IRubyObject set(IRubyObject newValue) { + UNSAFE.putObjectVolatile(this, referenceOffset, newValue); + return newValue; + } + + @JRubyMethod(name = {"compare_and_set", "compare_and_swap"}) + public IRubyObject compare_and_set(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + if (expectedValue instanceof RubyNumeric) { + // numerics are not always idempotent in Ruby, so we need to do slower logic + return compareAndSetNumeric(context, expectedValue, newValue); + } + + return runtime.newBoolean(UNSAFE.compareAndSwapObject(this, referenceOffset, expectedValue, newValue)); + } + + @JRubyMethod(name = {"get_and_set", "swap"}) + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // less-efficient version for Java 6 and 7 + while (true) { + IRubyObject oldValue = get(); + if (UNSAFE.compareAndSwapObject(this, referenceOffset, oldValue, newValue)) { + return oldValue; + } + } + } + + private IRubyObject compareAndSetNumeric(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + // loop until: + // * reference CAS would succeed for same-valued objects + // * current and expected have different values as determined by #equals + while (true) { + IRubyObject current = reference; + + if (!(current instanceof RubyNumeric)) { + // old value is not numeric, CAS fails + return runtime.getFalse(); + } + + RubyNumeric currentNumber = (RubyNumeric)current; + if (!currentNumber.equals(expectedValue)) { + // current number does not equal expected, fail CAS + return runtime.getFalse(); + } + + // check that current has not changed, or else allow loop to repeat + boolean success = UNSAFE.compareAndSwapObject(this, referenceOffset, current, newValue); + if (success) { + // value is same and did not change in interim...success + return runtime.getTrue(); + } + } + } + } + + private static final class UnsafeHolder { + private UnsafeHolder(){} + + public static final sun.misc.Unsafe U = loadUnsafe(); + + private static sun.misc.Unsafe loadUnsafe() { + try { + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + Field f = unsafeClass.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + } catch (Exception e) { + return null; + } + } + } + + public static class JRubyReference8 extends JRubyReference { + public JRubyReference8(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @Override + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // efficient version for Java 8 + return (IRubyObject)UNSAFE.getAndSetObject(this, referenceOffset, newValue); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java new file mode 100644 index 0000000..a09f916 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java @@ -0,0 +1,248 @@ +package com.concurrent_ruby.ext; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8; +import com.concurrent_ruby.ext.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyMapBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyModule thread_safeMod = concurrentMod.defineModuleUnder("Collection"); + RubyClass jrubyRefClass = thread_safeMod.defineClassUnder("JRubyMapBackend", runtime.getObject(), BACKEND_ALLOCATOR); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyMapBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyMapBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyMapBackend", parent="Object") + public static class JRubyMapBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap map; + + private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8(initialCapacity, loadFactor); + } else { + return new com.concurrent_ruby.ext.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyMapBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyMapBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java new file mode 100644 index 0000000..b566076 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java @@ -0,0 +1,93 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNil; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class JavaAtomicBooleanLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicBoolean", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + atomicCls.defineAnnotatedMethods(JavaAtomicBoolean.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicBoolean(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicBoolean", parent = "Object") + public static class JavaAtomicBoolean extends RubyObject { + + private AtomicBoolean atomicBoolean; + + public JavaAtomicBoolean(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + atomicBoolean = new AtomicBoolean(convertRubyBooleanToJavaBoolean(value)); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + atomicBoolean = new AtomicBoolean(); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject value() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "true?") + public IRubyObject isAtomicTrue() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "false?") + public IRubyObject isAtomicFalse() { + return getRuntime().newBoolean((atomicBoolean.get() == false)); + } + + @JRubyMethod(name = "value=") + public IRubyObject setAtomic(ThreadContext context, IRubyObject newValue) { + atomicBoolean.set(convertRubyBooleanToJavaBoolean(newValue)); + return context.nil; + } + + @JRubyMethod(name = "make_true") + public IRubyObject makeTrue() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(false, true)); + } + + @JRubyMethod(name = "make_false") + public IRubyObject makeFalse() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(true, false)); + } + + private boolean convertRubyBooleanToJavaBoolean(IRubyObject newValue) { + if (newValue instanceof RubyBoolean.False || newValue instanceof RubyNil) { + return false; + } else { + return true; + } + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java new file mode 100755 index 0000000..672bfc0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java @@ -0,0 +1,113 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import org.jruby.runtime.Block; + +public class JavaAtomicFixnumLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicFixnum", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaAtomicFixnum.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicFixnum(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicFixnum", parent = "Object") + public static class JavaAtomicFixnum extends RubyObject { + + private AtomicLong atomicLong; + + public JavaAtomicFixnum(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + this.atomicLong = new AtomicLong(0); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.atomicLong = new AtomicLong(rubyFixnumToLong(value)); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject getValue() { + return getRuntime().newFixnum(atomicLong.get()); + } + + @JRubyMethod(name = "value=") + public IRubyObject setValue(ThreadContext context, IRubyObject newValue) { + atomicLong.set(rubyFixnumToLong(newValue)); + return context.nil; + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment() { + return getRuntime().newFixnum(atomicLong.incrementAndGet()); + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(delta)); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement() { + return getRuntime().newFixnum(atomicLong.decrementAndGet()); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(-delta)); + } + + @JRubyMethod(name = "compare_and_set") + public IRubyObject compareAndSet(ThreadContext context, IRubyObject expect, IRubyObject update) { + return getRuntime().newBoolean(atomicLong.compareAndSet(rubyFixnumToLong(expect), rubyFixnumToLong(update))); + } + + @JRubyMethod + public IRubyObject update(ThreadContext context, Block block) { + for (;;) { + long _oldValue = atomicLong.get(); + IRubyObject oldValue = getRuntime().newFixnum(_oldValue); + IRubyObject newValue = block.yield(context, oldValue); + if (atomicLong.compareAndSet(_oldValue, rubyFixnumToLong(newValue))) { + return newValue; + } + } + } + + private long rubyFixnumToLong(IRubyObject value) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError("value must be a Fixnum"); + } + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java new file mode 100755 index 0000000..a3e847d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java @@ -0,0 +1,159 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.Semaphore; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +public class JavaSemaphoreLibrary { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaSemaphore", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaSemaphore.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaSemaphore(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaSemaphore", parent = "Object") + public static class JavaSemaphore extends RubyObject { + + private JRubySemaphore semaphore; + + public JavaSemaphore(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.semaphore = new JRubySemaphore(rubyFixnumInt(value, "count")); + return context.nil; + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context, IRubyObject value) throws InterruptedException { + this.semaphore.acquire(rubyFixnumToPositiveInt(value, "permits")); + return context.nil; + } + + @JRubyMethod(name = "available_permits") + public IRubyObject availablePermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.availablePermits()); + } + + @JRubyMethod(name = "drain_permits") + public IRubyObject drainPermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.drainPermits()); + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context) throws InterruptedException { + this.semaphore.acquire(1); + return context.nil; + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(1)); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(rubyFixnumToPositiveInt(permits, "permits"))); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits, IRubyObject timeout) throws InterruptedException { + return getRuntime().newBoolean( + semaphore.tryAcquire( + rubyFixnumToPositiveInt(permits, "permits"), + rubyNumericToLong(timeout, "timeout"), + java.util.concurrent.TimeUnit.SECONDS) + ); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context) { + this.semaphore.release(1); + return getRuntime().newBoolean(true); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context, IRubyObject value) { + this.semaphore.release(rubyFixnumToPositiveInt(value, "permits")); + return getRuntime().newBoolean(true); + } + + @JRubyMethod(name = "reduce_permits") + public IRubyObject reducePermits(ThreadContext context, IRubyObject reduction) throws InterruptedException { + this.semaphore.publicReducePermits(rubyFixnumToNonNegativeInt(reduction, "reduction")); + return context.nil; + } + + private int rubyFixnumInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be integer"); + } + } + + private int rubyFixnumToNonNegativeInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() >= 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a non-negative integer"); + } + } + + private int rubyFixnumToPositiveInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() > 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be an integer greater than zero"); + } + } + + private long rubyNumericToLong(IRubyObject value, String paramName) { + if (value instanceof RubyNumeric && ((RubyNumeric) value).getDoubleValue() > 0) { + RubyNumeric fixNum = (RubyNumeric) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a float greater than zero"); + } + } + + class JRubySemaphore extends Semaphore { + + public JRubySemaphore(int permits) { + super(permits); + } + + public JRubySemaphore(int permits, boolean value) { + super(permits, value); + } + + public void publicReducePermits(int i) { + reducePermits(i); + } + + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java new file mode 100644 index 0000000..bfcc0d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java @@ -0,0 +1,307 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBasicObject; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.RubyThread; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import sun.misc.Unsafe; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +public class SynchronizationLibrary implements Library { + + private static final Unsafe UNSAFE = loadUnsafe(); + private static final boolean FULL_FENCE = supportsFences(); + + private static Unsafe loadUnsafe() { + try { + Class ncdfe = Class.forName("sun.misc.Unsafe"); + Field f = ncdfe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get((java.lang.Object) null); + } catch (Exception var2) { + return null; + } catch (NoClassDefFoundError var3) { + return null; + } + } + + private static boolean supportsFences() { + if (UNSAFE == null) { + return false; + } else { + try { + Method m = UNSAFE.getClass().getDeclaredMethod("fullFence", new Class[0]); + if (m != null) { + return true; + } + } catch (Exception var1) { + // nothing + } + + return false; + } + } + + private static final ObjectAllocator JRUBY_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyObject(runtime, klazz); + } + }; + + private static final ObjectAllocator OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Object(runtime, klazz); + } + }; + + private static final ObjectAllocator ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new AbstractLockableObject(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBY_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyLockableObject(runtime, klazz); + } + }; + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule synchronizationModule = runtime. + defineModule("Concurrent"). + defineModuleUnder("Synchronization"); + + RubyModule jrubyAttrVolatileModule = synchronizationModule.defineModuleUnder("JRubyAttrVolatile"); + jrubyAttrVolatileModule.defineAnnotatedMethods(JRubyAttrVolatile.class); + + defineClass(runtime, synchronizationModule, "AbstractObject", "JRubyObject", + JRubyObject.class, JRUBY_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "JRubyObject", "Object", + Object.class, OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "AbstractLockableObject", + AbstractLockableObject.class, ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "AbstractLockableObject", "JRubyLockableObject", + JRubyLockableObject.class, JRUBY_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "JRuby", + JRuby.class, new ObjectAllocator() { + @Override + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRuby(runtime, klazz); + } + }); + } + + private RubyClass defineClass( + Ruby runtime, + RubyModule namespace, + String parentName, + String name, + Class javaImplementation, + ObjectAllocator allocator) { + final RubyClass parentClass = namespace.getClass(parentName); + + if (parentClass == null) { + System.out.println("not found " + parentName); + throw runtime.newRuntimeError(namespace.toString() + "::" + parentName + " is missing"); + } + + final RubyClass newClass = namespace.defineClassUnder(name, parentClass, allocator); + newClass.defineAnnotatedMethods(javaImplementation); + return newClass; + } + + // Facts: + // - all ivar reads are without any synchronisation of fences see + // https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/VariableAccessor.java#L110-110 + // - writes depend on UnsafeHolder.U, null -> SynchronizedVariableAccessor, !null -> StampedVariableAccessor + // SynchronizedVariableAccessor wraps with synchronized block, StampedVariableAccessor uses fullFence or + // volatilePut + // TODO (pitr 16-Sep-2015): what do we do in Java 9 ? + + // module JRubyAttrVolatile + public static class JRubyAttrVolatile { + + // volatile threadContext is used as a memory barrier per the JVM memory model happens-before semantic + // on volatile fields. any volatile field could have been used but using the thread context is an + // attempt to avoid code elimination. + private static volatile int volatileField; + + @JRubyMethod(name = "full_memory_barrier", visibility = Visibility.PUBLIC) + public static IRubyObject fullMemoryBarrier(ThreadContext context, IRubyObject self) { + // Prevent reordering of ivar writes with publication of this instance + if (!FULL_FENCE) { + // Assuming that following volatile read and write is not eliminated it simulates fullFence. + // If it's eliminated it'll cause problems only on non-x86 platforms. + // http://shipilev.net/blog/2014/jmm-pragmatics/#_happens_before_test_your_understanding + final int volatileRead = volatileField; + volatileField = context.getLine(); + } else { + UNSAFE.fullFence(); + } + return context.nil; + } + + @JRubyMethod(name = "instance_variable_get_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject instanceVariableGetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name) { + // Ensure we ses latest value with loadFence + if (!FULL_FENCE) { + // piggybacking on volatile read, simulating loadFence + final int volatileRead = volatileField; + return ((RubyBasicObject) self).instance_variable_get(context, name); + } else { + UNSAFE.loadFence(); + return ((RubyBasicObject) self).instance_variable_get(context, name); + } + } + + @JRubyMethod(name = "instance_variable_set_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject InstanceVariableSetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name, + IRubyObject value) { + // Ensure we make last update visible + if (!FULL_FENCE) { + // piggybacking on volatile write, simulating storeFence + final IRubyObject result = ((RubyBasicObject) self).instance_variable_set(name, value); + volatileField = context.getLine(); + return result; + } else { + // JRuby uses StampedVariableAccessor which calls fullFence + // so no additional steps needed. + // See https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/StampedVariableAccessor.java#L151-L159 + return ((RubyBasicObject) self).instance_variable_set(name, value); + } + } + } + + @JRubyClass(name = "JRubyObject", parent = "AbstractObject") + public static class JRubyObject extends RubyObject { + + public JRubyObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "Object", parent = "JRubyObject") + public static class Object extends JRubyObject { + + public Object(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "AbstractLockableObject", parent = "Object") + public static class AbstractLockableObject extends Object { + + public AbstractLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "JRubyLockableObject", parent = "AbstractLockableObject") + public static class JRubyLockableObject extends JRubyObject { + + public JRubyLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "synchronize", visibility = Visibility.PROTECTED) + public IRubyObject rubySynchronize(ThreadContext context, Block block) { + synchronized (this) { + return block.yield(context, null); + } + } + + @JRubyMethod(name = "ns_wait", optional = 1, visibility = Visibility.PROTECTED) + public IRubyObject nsWait(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.runtime; + if (args.length > 1) { + throw runtime.newArgumentError(args.length, 1); + } + Double timeout = null; + if (args.length > 0 && !args[0].isNil()) { + timeout = args[0].convertToFloat().getDoubleValue(); + if (timeout < 0) { + throw runtime.newArgumentError("time interval must be positive"); + } + } + if (Thread.interrupted()) { + throw runtime.newConcurrencyError("thread interrupted"); + } + boolean success = false; + try { + success = context.getThread().wait_timeout(this, timeout); + } catch (InterruptedException ie) { + throw runtime.newConcurrencyError(ie.getLocalizedMessage()); + } finally { + // An interrupt or timeout may have caused us to miss + // a notify that we consumed, so do another notify in + // case someone else is available to pick it up. + if (!success) { + this.notify(); + } + } + return this; + } + + @JRubyMethod(name = "ns_signal", visibility = Visibility.PROTECTED) + public IRubyObject nsSignal(ThreadContext context) { + notify(); + return this; + } + + @JRubyMethod(name = "ns_broadcast", visibility = Visibility.PROTECTED) + public IRubyObject nsBroadcast(ThreadContext context) { + notifyAll(); + return this; + } + } + + @JRubyClass(name = "JRuby") + public static class JRuby extends RubyObject { + public JRuby(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "sleep_interruptibly", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject sleepInterruptibly(final ThreadContext context, IRubyObject receiver, final Block block) { + try { + context.getThread().executeBlockingTask(new RubyThread.BlockingTask() { + @Override + public void run() throws InterruptedException { + block.call(context); + } + + @Override + public void wakeup() { + context.getThread().getNativeThread().interrupt(); + } + }); + } catch (InterruptedException e) { + throw context.runtime.newThreadError("interrupted in Concurrent::Synchronization::JRuby.sleep_interruptibly"); + } + return context.nil; + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..e11e15a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package com.concurrent_ruby.ext.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap { + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun mf); + public V computeIfPresent(K key, BiFun mf); + public V compute(K key, BiFun mf); + public V merge(K key, V value, BiFun mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..86aa4eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + * + *
    + *
+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java new file mode 100644 index 0000000..47a923c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java new file mode 100644 index 0000000..93a277f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..b7fc5a9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray initTable() { + AtomicReferenceArray tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { + int n = tab.length(); + AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; AtomicReferenceArray t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray tab = new AtomicReferenceArray(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..ecf552a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..f521642 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..3ea409f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package com.concurrent_ruby.ext.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + *

Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb new file mode 100644 index 0000000..08917e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb @@ -0,0 +1 @@ +require_relative "./concurrent" diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb new file mode 100644 index 0000000..87de46f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb @@ -0,0 +1,134 @@ +require 'concurrent/version' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' + +require 'concurrent/atomics' +require 'concurrent/executors' +require 'concurrent/synchronization' + +require 'concurrent/atomic/atomic_markable_reference' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/agent' +require 'concurrent/atom' +require 'concurrent/array' +require 'concurrent/hash' +require 'concurrent/set' +require 'concurrent/map' +require 'concurrent/tuple' +require 'concurrent/async' +require 'concurrent/dataflow' +require 'concurrent/delay' +require 'concurrent/exchanger' +require 'concurrent/future' +require 'concurrent/immutable_struct' +require 'concurrent/ivar' +require 'concurrent/maybe' +require 'concurrent/mutable_struct' +require 'concurrent/mvar' +require 'concurrent/promise' +require 'concurrent/scheduled_task' +require 'concurrent/settable_struct' +require 'concurrent/timer_task' +require 'concurrent/tvar' +require 'concurrent/promises' + +require 'concurrent/thread_safe/synchronized_delegator' +require 'concurrent/thread_safe/util' + +require 'concurrent/options' + +# @!macro internal_implementation_note +# +# @note **Private Implementation:** This abstraction is a private, internal +# implementation detail. It should never be used directly. + +# @!macro monotonic_clock_warning +# +# @note Time calculations on all platforms and languages are sensitive to +# changes to the system clock. To alleviate the potential problems +# associated with changing the system clock while an application is running, +# most modern operating systems provide a monotonic clock that operates +# independently of the system clock. A monotonic clock cannot be used to +# determine human-friendly clock times. A monotonic clock is used exclusively +# for calculating time intervals. Not all Ruby platforms provide access to an +# operating system monotonic clock. On these platforms a pure-Ruby monotonic +# clock will be used as a fallback. An operating system monotonic clock is both +# faster and more reliable than the pure-Ruby implementation. The pure-Ruby +# implementation should be fast and reliable enough for most non-realtime +# operations. At this time the common Ruby platforms that provide access to an +# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). +# +# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) + +# @!macro copy_options +# +# ## Copy Options +# +# Object references in Ruby are mutable. This can lead to serious +# problems when the {#value} of an object is a mutable reference. Which +# is always the case unless the value is a `Fixnum`, `Symbol`, or similar +# "primitive" data type. Each instance can be configured with a few +# options that can help protect the program from potentially dangerous +# operations. Each of these options can be optionally set when the object +# instance is created: +# +# * `:dup_on_deref` When true the object will call the `#dup` method on +# the `value` object every time the `#value` method is called +# (default: false) +# * `:freeze_on_deref` When true the object will call the `#freeze` +# method on the `value` object every time the `#value` method is called +# (default: false) +# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run +# every time the `#value` method is called. The `Proc` will be given +# the current `value` as its only argument and the result returned by +# the block will be the return value of the `#value` call. When `nil` +# this option will be ignored (default: nil) +# +# When multiple deref options are set the order of operations is strictly defined. +# The order of deref operations is: +# * `:copy_on_deref` +# * `:dup_on_deref` +# * `:freeze_on_deref` +# +# Because of this ordering there is no need to `#freeze` an object created by a +# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. +# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is +# as close to the behavior of a "pure" functional language (like Erlang, Clojure, +# or Haskell) as we are likely to get in Ruby. + +# @!macro deref_options +# +# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before +# returning the data from {#value} +# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before +# returning the data from {#value} +# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} +# method, call the given proc passing the internal value as the sole +# argument then return the new value returned from the proc. + +# @!macro executor_and_deref_options +# +# @param [Hash] opts the options used to define the behavior at update and deref +# and to specify the executor on which to perform actions +# @option opts [Executor] :executor when set use the given `Executor` instance. +# Three special values are also supported: `:io` returns the global pool for +# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast +# operations, and `:immediate` returns the global `ImmediateExecutor` object. +# @!macro deref_options + +# @!macro warn.edge +# @api Edge +# @note **Edge Features** are under active development and may change frequently. +# +# - Deprecations are not added before incompatible changes. +# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, +# _patch_ bump means compatible change. +# - Edge features may also lack tests and documentation. +# - Features developed in `concurrent-ruby-edge` are expected to move +# to `concurrent-ruby` when finalised. + + +# {include:file:README.md} +module Concurrent +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb new file mode 100644 index 0000000..815dca0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb @@ -0,0 +1,587 @@ +require 'concurrent/configuration' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/thread_local_var' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) + # function. An agent is a shared, mutable variable providing independent, + # uncoordinated, *asynchronous* change of individual values. Best used when + # the value will undergo frequent, complex updates. Suitable when the result + # of an update does not need to be known immediately. `Agent` is (mostly) + # functionally equivalent to Clojure's agent, except where the runtime + # prevents parity. + # + # Agents are reactive, not autonomous - there is no imperative message loop + # and no blocking receive. The state of an Agent should be itself immutable + # and the `#value` of an Agent is always immediately available for reading by + # any thread without any messages, i.e. observation does not require + # cooperation or coordination. + # + # Agent action dispatches are made using the various `#send` methods. These + # methods always return immediately. At some point later, in another thread, + # the following will happen: + # + # 1. The given `action` will be applied to the state of the Agent and the + # `args`, if any were supplied. + # 2. The return value of `action` will be passed to the validator lambda, + # if one has been set on the Agent. + # 3. If the validator succeeds or if no validator was given, the return value + # of the given `action` will become the new `#value` of the Agent. See + # `#initialize` for details. + # 4. If any observers were added to the Agent, they will be notified. See + # `#add_observer` for details. + # 5. If during the `action` execution any other dispatches are made (directly + # or indirectly), they will be held until after the `#value` of the Agent + # has been changed. + # + # If any exceptions are thrown by an action function, no nested dispatches + # will occur, and the exception will be cached in the Agent itself. When an + # Agent has errors cached, any subsequent interactions will immediately throw + # an exception, until the agent's errors are cleared. Agent errors can be + # examined with `#error` and the agent restarted with `#restart`. + # + # The actions of all Agents get interleaved amongst threads in a thread pool. + # At any point in time, at most one action for each Agent is being executed. + # Actions dispatched to an agent from another single agent or thread will + # occur in the order they were sent, potentially interleaved with actions + # dispatched to the same agent from other sources. The `#send` method should + # be used for actions that are CPU limited, while the `#send_off` method is + # appropriate for actions that may block on IO. + # + # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an agent with an initial value + # agent = Concurrent::Agent.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # agent.send{|set| next_fibonacci(set) } + # end + # + # # wait for them to complete + # agent.await + # + # # get the current value + # agent.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Agents support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time an action dispatch returns and + # the new value is successfully validated. Observation will *not* occur if the + # action raises an exception, if validation fails, or when a {#restart} occurs. + # + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Agent when the action began + # processing. The `new_value` is the value to which the Agent was set when the + # action completed. Note that `old_value` and `new_value` may be the same. + # This is not an error. It simply means that the action returned the same + # value. + # + # ## Nested Actions + # + # It is possible for an Agent action to post further actions back to itself. + # The nested actions will be enqueued normally then processed *after* the + # outer action completes, in the order they were sent, possibly interleaved + # with action dispatches from other threads. Nested actions never deadlock + # with one another and a failure in a nested action will never affect the + # outer action. + # + # Nested actions can be called using the Agent reference from the enclosing + # scope or by passing the reference in as a "send" argument. Nested actions + # cannot be post using `self` from within the action block/proc/lambda; `self` + # in this context will not reference the Agent. The preferred method for + # dispatching nested actions is to pass the Agent as an argument. This allows + # Ruby to more effectively manage the closing scope. + # + # Prefer this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send(agent) do |value, this| + # this.send {|v| v + 42 } + # 3.14 + # end + # agent.value #=> 45.14 + # ``` + # + # Over this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send do |value| + # agent.send {|v| v + 42 } + # 3.14 + # end + # ``` + # + # @!macro agent_await_warning + # + # **NOTE** Never, *under any circumstances*, call any of the "await" methods + # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action + # block/proc/lambda. The call will block the Agent and will always fail. + # Calling either {#await} or {#wait} (with a timeout of `nil`) will + # hopelessly deadlock the Agent with no possibility of recovery. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/Agents Clojure Agents + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Agent < Synchronization::LockableObject + include Concern::Observable + + ERROR_MODES = [:continue, :fail].freeze + private_constant :ERROR_MODES + + AWAIT_FLAG = ::Object.new + private_constant :AWAIT_FLAG + + AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } + private_constant :AWAIT_ACTION + + DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } + private_constant :DEFAULT_ERROR_HANDLER + + DEFAULT_VALIDATOR = ->(value) { true } + private_constant :DEFAULT_VALIDATOR + + Job = Struct.new(:action, :args, :executor, :caller) + private_constant :Job + + # Raised during action processing or any other time in an Agent's lifecycle. + class Error < StandardError + def initialize(message = nil) + message ||= 'agent must be restarted before jobs can post' + super(message) + end + end + + # Raised when a new value obtained during action processing or at `#restart` + # fails validation. + class ValidationError < Error + def initialize(message = nil) + message ||= 'invalid value' + super(message) + end + end + + # The error mode this Agent is operating in. See {#initialize} for details. + attr_reader :error_mode + + # Create a new `Agent` with the given initial value and options. + # + # The `:validator` option must be `nil` or a side-effect free proc/lambda + # which takes one argument. On any intended value change the validator, if + # provided, will be called. If the new value is invalid the validator should + # return `false` or raise an error. + # + # The `:error_handler` option must be `nil` or a proc/lambda which takes two + # arguments. When an action raises an error or validation fails, either by + # returning false or raising an error, the error handler will be called. The + # arguments to the error handler will be a reference to the agent itself and + # the error object which was raised. + # + # The `:error_mode` may be either `:continue` (the default if an error + # handler is given) or `:fail` (the default if error handler nil or not + # given). + # + # If an action being run by the agent throws an error or doesn't pass + # validation the error handler, if present, will be called. After the + # handler executes if the error mode is `:continue` the Agent will continue + # as if neither the action that caused the error nor the error itself ever + # happened. + # + # If the mode is `:fail` the Agent will become {#failed?} and will stop + # accepting new action dispatches. Any previously queued actions will be + # held until {#restart} is called. The {#value} method will still work, + # returning the value of the Agent before the error. + # + # @param [Object] initial the initial value + # @param [Hash] opts the configuration options + # + # @option opts [Symbol] :error_mode either `:continue` or `:fail` + # @option opts [nil, Proc] :error_handler the (optional) error handler + # @option opts [nil, Proc] :validator the (optional) validation procedure + def initialize(initial, opts = {}) + super() + synchronize { ns_initialize(initial, opts) } + end + + # The current value (state) of the Agent, irrespective of any pending or + # in-progress actions. The value is always available and is non-blocking. + # + # @return [Object] the current value + def value + @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? + end + + alias_method :deref, :value + + # When {#failed?} and {#error_mode} is `:fail`, returns the error object + # which caused the failure, else `nil`. When {#error_mode} is `:continue` + # will *always* return `nil`. + # + # @return [nil, Error] the error which caused the failure when {#failed?} + def error + @error.value + end + + alias_method :reason, :error + + # @!macro agent_send + # + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Action dispatches are only allowed when the Agent + # is not {#failed?}. + # + # The action must be a block/proc/lambda which takes 1 or more arguments. + # The first argument is the current {#value} of the Agent. Any arguments + # passed to the send method via the `args` parameter will be passed to the + # action as the remaining arguments. The action must return the new value + # of the Agent. + # + # * {#send} and {#send!} should be used for actions that are CPU limited + # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that + # may block on IO + # * {#send_via} and {#send_via!} are used when a specific executor is to + # be used for the action + # + # @param [Array] args zero or more arguments to be passed to + # the action + # @param [Proc] action the action dispatch to be enqueued + # + # @yield [agent, value, *args] process the old value and return the new + # @yieldparam [Object] value the current {#value} of the Agent + # @yieldparam [Array] args zero or more arguments to pass to the + # action + # @yieldreturn [Object] the new value of the Agent + # + # @!macro send_return + # @return [Boolean] true if the action is successfully enqueued, false if + # the Agent is {#failed?} + def send(*args, &action) + enqueue_action_job(action, args, Concurrent.global_fast_executor) + end + + # @!macro agent_send + # + # @!macro send_bang_return_and_raise + # @return [Boolean] true if the action is successfully enqueued + # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} + def send!(*args, &action) + raise Error.new unless send(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + def send_off(*args, &action) + enqueue_action_job(action, args, Concurrent.global_io_executor) + end + + alias_method :post, :send_off + + # @!macro agent_send + # @!macro send_bang_return_and_raise + def send_off!(*args, &action) + raise Error.new unless send_off(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via(executor, *args, &action) + enqueue_action_job(action, args, executor) + end + + # @!macro agent_send + # @!macro send_bang_return_and_raise + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via!(executor, *args, &action) + raise Error.new unless send_via(executor, *args, &action) + true + end + + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Appropriate for actions that may block on IO. + # + # @param [Proc] action the action dispatch to be enqueued + # @return [Concurrent::Agent] self + # @see #send_off + def <<(action) + send_off(&action) + self + end + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far, from this thread or nested by the Agent, have occurred. Will + # block when {#failed?}. Will never return if a failed Agent is {#restart} + # with `:clear_actions` true. + # + # Returns a reference to `self` to support method chaining: + # + # ``` + # current_value = agent.await.value + # ``` + # + # @return [Boolean] self + # + # @!macro agent_await_warning + def await + wait(nil) + self + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout) + wait(timeout.to_f) + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # + # @!macro agent_await_warning + def await_for!(timeout) + raise Concurrent::TimeoutError unless wait(timeout.to_f) + true + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. Will block indefinitely when timeout is nil or not given. + # + # Provided mainly for consistency with other classes in this library. Prefer + # the various `await` methods instead. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def wait(timeout = nil) + latch = Concurrent::CountDownLatch.new(1) + enqueue_await_job(latch) + latch.wait(timeout) + end + + # Is the Agent in a failed state? + # + # @see #restart + def failed? + !@error.value.nil? + end + + alias_method :stopped?, :failed? + + # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` + # then un-fails the Agent so that action dispatches are allowed again. If + # the `:clear_actions` option is give and true, any actions queued on the + # Agent that were being held while it was failed will be discarded, + # otherwise those held actions will proceed. The `new_value` must pass the + # validator if any, or `restart` will raise an exception and the Agent will + # remain failed with its old {#value} and {#error}. Observers, if any, will + # not be notified of the new state. + # + # @param [Object] new_value the new value for the Agent once restarted + # @param [Hash] opts the configuration options + # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed + # actions should be discarded on restart, else false (default: false) + # @return [Boolean] true + # + # @raise [Concurrent:AgentError] when not failed + def restart(new_value, opts = {}) + clear_actions = opts.fetch(:clear_actions, false) + synchronize do + raise Error.new('agent is not failed') unless failed? + raise ValidationError unless ns_validate(new_value) + @current.value = new_value + @error.value = nil + @queue.clear if clear_actions + ns_post_next_job unless @queue.empty? + end + true + end + + class << self + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far to all the given Agents, from this thread or nested by the + # given Agents, have occurred. Will block when any of the agents are + # failed. Will never return if a failed Agent is restart with + # `:clear_actions` true. + # + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true + # + # @!macro agent_await_warning + def await(*agents) + agents.each { |agent| agent.await } + true + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout, *agents) + end_at = Concurrent.monotonic_time + timeout.to_f + ok = agents.length.times do |i| + break false if (delay = end_at - Concurrent.monotonic_time) < 0 + break false unless agents[i].await_for(delay) + end + !!ok + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # @!macro agent_await_warning + def await_for!(timeout, *agents) + raise Concurrent::TimeoutError unless await_for(timeout, *agents) + true + end + end + + private + + def ns_initialize(initial, opts) + @error_mode = opts[:error_mode] + @error_handler = opts[:error_handler] + + if @error_mode && !ERROR_MODES.include?(@error_mode) + raise ArgumentError.new('unrecognized error mode') + elsif @error_mode.nil? + @error_mode = @error_handler ? :continue : :fail + end + + @error_handler ||= DEFAULT_ERROR_HANDLER + @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) + @current = Concurrent::AtomicReference.new(initial) + @error = Concurrent::AtomicReference.new(nil) + @caller = Concurrent::ThreadLocalVar.new(nil) + @queue = [] + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + def enqueue_action_job(action, args, executor) + raise ArgumentError.new('no action given') unless action + job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) + synchronize { ns_enqueue_job(job) } + end + + def enqueue_await_job(latch) + synchronize do + if (index = ns_find_last_job_for_thread) + job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, + Thread.current.object_id) + ns_enqueue_job(job, index+1) + else + latch.count_down + true + end + end + end + + def ns_enqueue_job(job, index = nil) + # a non-nil index means this is an await job + return false if index.nil? && failed? + index ||= @queue.length + @queue.insert(index, job) + # if this is the only job, post to executor + ns_post_next_job if @queue.length == 1 + true + end + + def ns_post_next_job + @queue.first.executor.post { execute_next_job } + end + + def execute_next_job + job = synchronize { @queue.first } + old_value = @current.value + + @caller.value = job.caller # for nested actions + new_value = job.action.call(old_value, *job.args) + @caller.value = nil + + return if new_value == AWAIT_FLAG + + if ns_validate(new_value) + @current.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + else + handle_error(ValidationError.new) + end + rescue => error + handle_error(error) + ensure + synchronize do + @queue.shift + unless failed? || @queue.empty? + ns_post_next_job + end + end + end + + def ns_validate(value) + @validator.call(value) + rescue + false + end + + def handle_error(error) + # stop new jobs from posting + @error.value = error if @error_mode == :fail + @error_handler.call(self, error) + rescue + # do nothing + end + + def ns_find_last_job_for_thread + @queue.rindex { |job| job.caller == Thread.current.object_id } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb new file mode 100644 index 0000000..60e5b56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_array + # + # A thread-safe subclass of Array. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` + # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#concat` instead. + # + # @see http://ruby-doc.org/core/Array.html Ruby standard library `Array` + + # @!macro internal_implementation_note + ArrayImplementation = case + when Concurrent.on_cruby? + # Array is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Array + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyArray < ::Array + include JRuby::Synchronized + end + JRubyArray + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_rbx RbxArray + RbxArray + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray + TruffleRubyArray + + else + warn 'Possibly unsupported Ruby implementation' + ::Array + end + private_constant :ArrayImplementation + + # @!macro concurrent_array + class Array < ArrayImplementation + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb new file mode 100644 index 0000000..5e125e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb @@ -0,0 +1,448 @@ +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A mixin module that provides simple asynchronous behavior to a class, + # turning it into a simple actor. Loosely based on Erlang's + # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without + # supervision or linking. + # + # A more feature-rich {Concurrent::Actor} is also available when the + # capabilities of `Async` are too limited. + # + # ```cucumber + # Feature: + # As a stateful, plain old Ruby class + # I want safe, asynchronous behavior + # So my long-running methods don't block the main thread + # ``` + # + # The `Async` module is a way to mix simple yet powerful asynchronous + # capabilities into any plain old Ruby object or class, turning each object + # into a simple Actor. Method calls are processed on a background thread. The + # caller is free to perform other actions while processing occurs in the + # background. + # + # Method calls to the asynchronous object are made via two proxy methods: + # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post + # the method call to the object's background thread and return a "future" + # which will eventually contain the result of the method call. + # + # This behavior is loosely patterned after Erlang's `gen_server` behavior. + # When an Erlang module implements the `gen_server` behavior it becomes + # inherently asynchronous. The `start` or `start_link` function spawns a + # process (similar to a thread but much more lightweight and efficient) and + # returns the ID of the process. Using the process ID, other processes can + # send messages to the `gen_server` via the `cast` and `call` methods. Unlike + # Erlang's `gen_server`, however, `Async` classes do not support linking or + # supervision trees. + # + # ## Basic Usage + # + # When this module is mixed into a class, objects of the class become inherently + # asynchronous. Each object gets its own background thread on which to post + # asynchronous method calls. Asynchronous method calls are executed in the + # background one at a time in the order they are received. + # + # To create an asynchronous class, simply mix in the `Concurrent::Async` module: + # + # ``` + # class Hello + # include Concurrent::Async + # + # def hello(name) + # "Hello, #{name}!" + # end + # end + # ``` + # + # Mixing this module into a class provides each object two proxy methods: + # `async` and `await`. These methods are thread safe with respect to the + # enclosing object. The former proxy allows methods to be called + # asynchronously by posting to the object's internal thread. The latter proxy + # allows a method to be called synchronously but does so safely with respect + # to any pending asynchronous method calls and ensures proper ordering. Both + # methods return a {Concurrent::IVar} which can be inspected for the result + # of the proxied method call. Calling a method with `async` will return a + # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. + # + # ``` + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # ``` + # + # ## Let It Fail + # + # The `async` and `await` proxy methods have built-in error protection based + # on Erlang's famous "let it fail" philosophy. Instance methods should not be + # programmed defensively. When an exception is raised by a delegated method + # the proxy will rescue the exception, expose it to the caller as the `reason` + # attribute of the returned future, then process the next method call. + # + # ## Calling Methods Internally + # + # External method calls should *always* use the `async` and `await` proxy + # methods. When one method calls another method, the `async` proxy should + # rarely be used and the `await` proxy should *never* be used. + # + # When an object calls one of its own methods using the `await` proxy the + # second call will be enqueued *behind* the currently running method call. + # Any attempt to wait on the result will fail as the second call will never + # run until after the current call completes. + # + # Calling a method using the `await` proxy from within a method that was + # itself called using `async` or `await` will irreversibly deadlock the + # object. Do *not* do this, ever. + # + # ## Instance Variables and Attribute Accessors + # + # Instance variables do not need to be thread-safe so long as they are private. + # Asynchronous method calls are processed in the order they are received and + # are processed one at a time. Therefore private instance variables can only + # be accessed by one thread at a time. This is inherently thread-safe. + # + # When using private instance variables within asynchronous methods, the best + # practice is to read the instance variable into a local variable at the start + # of the method then update the instance variable at the *end* of the method. + # This way, should an exception be raised during method execution the internal + # state of the object will not have been changed. + # + # ### Reader Attributes + # + # The use of `attr_reader` is discouraged. Internal state exposed externally, + # when necessary, should be done through accessor methods. The instance + # variables exposed by these methods *must* be thread-safe, or they must be + # called using the `async` and `await` proxy methods. These two approaches are + # subtly different. + # + # When internal state is accessed via the `async` and `await` proxy methods, + # the returned value represents the object's state *at the time the call is + # processed*, which may *not* be the state of the object at the time the call + # is made. + # + # To get the state *at the current* time, irrespective of an enqueued method + # calls, a reader method must be called directly. This is inherently unsafe + # unless the instance variable is itself thread-safe, preferably using one + # of the thread-safe classes within this library. Because the thread-safe + # classes within this library are internally-locking or non-locking, they can + # be safely used from within asynchronous methods without causing deadlocks. + # + # Generally speaking, the best practice is to *not* expose internal state via + # reader methods. The best practice is to simply use the method's return value. + # + # ### Writer Attributes + # + # Writer attributes should never be used with asynchronous classes. Changing + # the state externally, even when done in the thread-safe way, is not logically + # consistent. Changes to state need to be timed with respect to all asynchronous + # method calls which my be in-process or enqueued. The only safe practice is to + # pass all necessary data to each method as arguments and let the method update + # the internal state as necessary. + # + # ## Class Constants, Variables, and Methods + # + # ### Class Constants + # + # Class constants do not need to be thread-safe. Since they are read-only and + # immutable they may be safely read both externally and from within + # asynchronous methods. + # + # ### Class Variables + # + # Class variables should be avoided. Class variables represent shared state. + # Shared state is anathema to concurrency. Should there be a need to share + # state using class variables they *must* be thread-safe, preferably + # using the thread-safe classes within this library. When updating class + # variables, never assign a new value/object to the variable itself. Assignment + # is not thread-safe in Ruby. Instead, use the thread-safe update functions + # of the variable itself to change the value. + # + # The best practice is to *never* use class variables with `Async` classes. + # + # ### Class Methods + # + # Class methods which are pure functions are safe. Class methods which modify + # class variables should be avoided, for all the reasons listed above. + # + # ## An Important Note About Thread Safe Guarantees + # + # > Thread safe guarantees can only be made when asynchronous method calls + # > are not mixed with direct method calls. Use only direct method calls + # > when the object is used exclusively on a single thread. Use only + # > `async` and `await` when the object is shared between threads. Once you + # > call a method using `async` or `await`, you should no longer call methods + # > directly on the object. Use `async` and `await` exclusively from then on. + # + # @example + # + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # + # @see Concurrent::Actor + # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia + # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server + # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ + module Async + + # @!method self.new(*args, &block) + # + # Instanciate a new object and ensure proper initialization of the + # synchronization mechanisms. + # + # @param [Array] args Zero or more arguments to be passed to the + # object's initializer. + # @param [Proc] block Optional block to pass to the object's initializer. + # @return [Object] A properly initialized object of the asynchronous class. + + # Check for the presence of a method on an object and determine if a given + # set of arguments matches the required arity. + # + # @param [Object] obj the object to check against + # @param [Symbol] method the method to check the object for + # @param [Array] args zero or more arguments for the arity check + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + # + # @note This check is imperfect because of the way Ruby reports the arity of + # methods with a variable number of arguments. It is possible to determine + # if too few arguments are given but impossible to determine if too many + # arguments are given. This check may also fail to recognize dynamic behavior + # of the object, such as methods simulated with `method_missing`. + # + # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity + # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? + # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing + # + # @!visibility private + def self.validate_argc(obj, method, *args) + argc = args.length + arity = obj.method(method).arity + + if arity >= 0 && argc != arity + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") + elsif arity < 0 && (arity = (arity + 1).abs) > argc + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") + end + end + + # @!visibility private + def self.included(base) + base.singleton_class.send(:alias_method, :original_new, :new) + base.extend(ClassMethods) + super(base) + end + + # @!visibility private + module ClassMethods + def new(*args, &block) + obj = original_new(*args, &block) + obj.send(:init_synchronization) + obj + end + end + private_constant :ClassMethods + + # Delegates asynchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AsyncDelegator < Synchronization::LockableObject + safe_initialization! + + # Create a new delegator object wrapping the given delegate. + # + # @param [Object] delegate the object to wrap and delegate method calls to + def initialize(delegate) + super() + @delegate = delegate + @queue = [] + @executor = Concurrent.global_io_executor + @ruby_pid = $$ + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + super unless @delegate.respond_to?(method) + Async::validate_argc(@delegate, method, *args) + + ivar = Concurrent::IVar.new + synchronize do + reset_if_forked + @queue.push [ivar, method, args, block] + @executor.post { perform } if @queue.length == 1 + end + + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + + # Perform all enqueued tasks. + # + # This method must be called from within the executor. It must not be + # called while already running. It will loop until the queue is empty. + def perform + loop do + ivar, method, args, block = synchronize { @queue.first } + break unless ivar # queue is empty + + begin + ivar.set(@delegate.send(method, *args, &block)) + rescue => error + ivar.fail(error) + end + + synchronize do + @queue.shift + return if @queue.empty? + end + end + end + + def reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ruby_pid = $$ + end + end + end + private_constant :AsyncDelegator + + # Delegates synchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AwaitDelegator + + # Create a new delegator object wrapping the given delegate. + # + # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to + def initialize(delegate) + @delegate = delegate + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + ivar = @delegate.send(method, *args, &block) + ivar.wait + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + end + private_constant :AwaitDelegator + + # Causes the chained method call to be performed asynchronously on the + # object's thread. The delegated method will return a future in the + # `:pending` state and the method call will have been scheduled on the + # object's thread. The final disposition of the method call can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # @note The method call is guaranteed to be thread safe with respect to + # all other method calls against the same object that are called with + # either `async` or `await`. The mutable nature of Ruby references + # (and object orientation in general) prevent any other thread safety + # guarantees. Do NOT mix direct method calls with delegated method calls. + # Use *only* delegated method calls when sharing the object between threads. + # + # @return [Concurrent::IVar] the pending result of the asynchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of + # the requested method + def async + @__async_delegator__ + end + alias_method :cast, :async + + # Causes the chained method call to be performed synchronously on the + # current thread. The delegated will return a future in either the + # `:fulfilled` or `:rejected` state and the delegated method will have + # completed. The final disposition of the delegated method can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # + # @return [Concurrent::IVar] the completed result of the synchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of the + # requested method + def await + @__await_delegator__ + end + alias_method :call, :await + + # Initialize the internal serializer and other stnchronization mechanisms. + # + # @note This method *must* be called immediately upon object construction. + # This is the only way thread-safe initialization can be guaranteed. + # + # @!visibility private + def init_synchronization + return self if defined?(@__async_initialized__) && @__async_initialized__ + @__async_initialized__ = true + @__async_delegator__ = AsyncDelegator.new(self) + @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) + self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb new file mode 100644 index 0000000..8a45730 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb @@ -0,0 +1,222 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +# @!macro thread_safe_variable_comparison +# +# ## Thread-safe Variable Classes +# +# Each of the thread-safe variable classes is designed to solve a different +# problem. In general: +# +# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, +# uncoordinated, *asynchronous* change of individual values. Best used when +# the value will undergo frequent, complex updates. Suitable when the result +# of an update does not need to be known immediately. +# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, +# uncoordinated, *synchronous* change of individual values. Best used when +# the value will undergo frequent reads but only occasional, though complex, +# updates. Suitable when the result of an update must be known immediately. +# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated +# atomically. Updates are synchronous but fast. Best used when updates a +# simple set operations. Not suitable when updates are complex. +# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar +# but optimized for the given data type. +# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used +# when two or more threads need to exchange data. The threads will pair then +# block on each other until the exchange is complete. +# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread +# must give a value to another, which must take the value. The threads will +# block on each other until the exchange is complete. +# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which +# holds a different value for each thread which has access. Often used as +# an instance variable in objects which must maintain different state +# for different threads. +# * *{Concurrent::TVar}:* Shared, mutable variables which provide +# *coordinated*, *synchronous*, change of *many* stated. Used when multiple +# value must change together, in an all-or-nothing transaction. + + +module Concurrent + + # Atoms provide a way to manage shared, synchronous, independent state. + # + # An atom is initialized with an initial value and an optional validation + # proc. At any time the value of the atom can be synchronously and safely + # changed. If a validator is given at construction then any new value + # will be checked against the validator and will be rejected if the + # validator returns false or raises an exception. + # + # There are two ways to change the value of an atom: {#compare_and_set} and + # {#swap}. The former will set the new value if and only if it validates and + # the current value matches the new value. The latter will atomically set the + # new value to the result of running the given block if and only if that + # value validates. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an atom with an initial value + # atom = Concurrent::Atom.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # atom.swap{|set| next_fibonacci(set) } + # end + # + # # get the current value + # atom.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Atoms support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time the value of the Atom changes. + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Atom when the change began + # The `new_value` is the value to which the Atom was set when the change + # completed. Note that `old_value` and `new_value` may be the same. This is + # not an error. It simply means that the change operation returned the same + # value. + # + # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/atoms Clojure Atoms + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Atom < Synchronization::Object + include Concern::Observable + + safe_initialization! + attr_atomic(:value) + private :value=, :swap_value, :compare_and_set_value, :update_value + public :value + alias_method :deref, :value + + # @!method value + # The current value of the atom. + # + # @return [Object] The current value. + + # Create a new atom with the given initial value. + # + # @param [Object] value The initial value + # @param [Hash] opts The options used to configure the atom + # @option opts [Proc] :validator (nil) Optional proc used to validate new + # values. It must accept one and only one argument which will be the + # intended new value. The validator will return true if the new value + # is acceptable else return false (preferrably) or raise an exception. + # + # @!macro deref_options + # + # @raise [ArgumentError] if the validator is not a `Proc` (when given) + def initialize(value, opts = {}) + super() + @Validator = opts.fetch(:validator, -> v { true }) + self.observers = Collection::CopyOnNotifyObserverSet.new + self.value = value + end + + # Atomically swaps the value of atom using the given block. The current + # value will be passed to the block, as will any arguments passed as + # arguments to the function. The new value will be validated against the + # (optional) validator proc given at construction. If validation fails the + # value will not be changed. + # + # Internally, {#swap} reads the current value, applies the block to it, and + # attempts to compare-and-set it in. Since another thread may have changed + # the value in the intervening time, it may have to retry, and does so in a + # spin loop. The net effect is that the value will always be the result of + # the application of the supplied block to a current value, atomically. + # However, because the block might be called multiple times, it must be free + # of side effects. + # + # @note The given block may be called multiple times, and thus should be free + # of side effects. + # + # @param [Object] args Zero or more arguments passed to the block. + # + # @yield [value, args] Calculates a new value for the atom based on the + # current value and any supplied arguments. + # @yieldparam value [Object] The current value of the atom. + # @yieldparam args [Object] All arguments passed to the function, in order. + # @yieldreturn [Object] The intended new value of the atom. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + # + # @raise [ArgumentError] When no block is given. + def swap(*args) + raise ArgumentError.new('no block given') unless block_given? + + loop do + old_value = value + new_value = yield(old_value, *args) + begin + break old_value unless valid?(new_value) + break new_value if compare_and_set(old_value, new_value) + rescue + break old_value + end + end + end + + # Atomically sets the value of atom to the new value if and only if the + # current value of the atom is identical to the old value and the new + # value successfully validates against the (optional) validator given + # at construction. + # + # @param [Object] old_value The expected current value. + # @param [Object] new_value The intended new value. + # + # @return [Boolean] True if the value is changed else false. + def compare_and_set(old_value, new_value) + if valid?(new_value) && compare_and_set_value(old_value, new_value) + observers.notify_observers(Time.now, old_value, new_value) + true + else + false + end + end + + # Atomically sets the value of atom to the new value without regard for the + # current value so long as the new value successfully validates against the + # (optional) validator given at construction. + # + # @param [Object] new_value The intended new value. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + def reset(new_value) + old_value = value + if valid?(new_value) + self.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + new_value + else + old_value + end + end + + private + + # Is the new value valid? + # + # @param [Object] new_value The intended new value. + # @return [Boolean] false if the validator function returns false or raises + # an exception else true + def valid?(new_value) + @Validator.call(new_value) + rescue + false + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb new file mode 100644 index 0000000..fcdeed7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb @@ -0,0 +1,66 @@ +require 'concurrent/constants' + +module Concurrent + + # @!macro thread_local_var + # @!macro internal_implementation_note + # @!visibility private + class AbstractThreadLocalVar + + # @!macro thread_local_var_method_initialize + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + allocate_storage + end + + # @!macro thread_local_var_method_get + def value + raise NotImplementedError + end + + # @!macro thread_local_var_method_set + def value=(value) + raise NotImplementedError + end + + # @!macro thread_local_var_method_bind + def bind(value, &block) + if block_given? + old_value = self.value + begin + self.value = value + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def allocate_storage + raise NotImplementedError + end + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb new file mode 100644 index 0000000..0b0373d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb @@ -0,0 +1,126 @@ +require 'concurrent/atomic/mutex_atomic_boolean' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_boolean_method_initialize + # + # Creates a new `AtomicBoolean` with the given initial value. + # + # @param [Boolean] initial the initial value + + # @!macro atomic_boolean_method_value_get + # + # Retrieves the current `Boolean` value. + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_value_set + # + # Explicitly sets the value. + # + # @param [Boolean] value the new value to be set + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_true_question + # + # Is the current value `true` + # + # @return [Boolean] true if the current value is `true`, else false + + # @!macro atomic_boolean_method_false_question + # + # Is the current value `false` + # + # @return [Boolean] true if the current value is `false`, else false + + # @!macro atomic_boolean_method_make_true + # + # Explicitly sets the value to true. + # + # @return [Boolean] true if value has changed, otherwise false + + # @!macro atomic_boolean_method_make_false + # + # Explicitly sets the value to false. + # + # @return [Boolean] true if value has changed, otherwise false + + ################################################################### + + # @!macro atomic_boolean_public_api + # + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + # + # @!method value + # @!macro atomic_boolean_method_value_get + # + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + # + # @!method true? + # @!macro atomic_boolean_method_true_question + # + # @!method false? + # @!macro atomic_boolean_method_false_question + # + # @!method make_true + # @!macro atomic_boolean_method_make_true + # + # @!method make_false + # @!macro atomic_boolean_method_make_false + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicBooleanImplementation = case + when defined?(JavaAtomicBoolean) + JavaAtomicBoolean + when defined?(CAtomicBoolean) + CAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # A boolean value that can be updated atomically. Reads and writes to an atomic + # boolean and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicBoolean... + # 2.790000 0.000000 2.790000 ( 2.791454) + # Testing with Concurrent::CAtomicBoolean... + # 0.740000 0.000000 0.740000 ( 0.740206) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicBoolean... + # 5.240000 2.520000 7.760000 ( 3.683000) + # Testing with Concurrent::JavaAtomicBoolean... + # 3.340000 0.010000 3.350000 ( 0.855000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean + # + # @!macro atomic_boolean_public_api + class AtomicBoolean < AtomicBooleanImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb new file mode 100644 index 0000000..c67166d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb @@ -0,0 +1,143 @@ +require 'concurrent/atomic/mutex_atomic_fixnum' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_fixnum_method_initialize + # + # Creates a new `AtomicFixnum` with the given initial value. + # + # @param [Fixnum] initial the initial value + # @raise [ArgumentError] if the initial value is not a `Fixnum` + + # @!macro atomic_fixnum_method_value_get + # + # Retrieves the current `Fixnum` value. + # + # @return [Fixnum] the current value + + # @!macro atomic_fixnum_method_value_set + # + # Explicitly sets the value. + # + # @param [Fixnum] value the new value to be set + # + # @return [Fixnum] the current value + # + # @raise [ArgumentError] if the new value is not a `Fixnum` + + # @!macro atomic_fixnum_method_increment + # + # Increases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to increase the current value + # + # @return [Fixnum] the current value after incrementation + + # @!macro atomic_fixnum_method_decrement + # + # Decreases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to decrease the current value + # + # @return [Fixnum] the current value after decrementation + + # @!macro atomic_fixnum_method_compare_and_set + # + # Atomically sets the value to the given updated value if the current + # value == the expected value. + # + # @param [Fixnum] expect the expected value + # @param [Fixnum] update the new value + # + # @return [Boolean] true if the value was updated else false + + # @!macro atomic_fixnum_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # + # @return [Object] the new value + + ################################################################### + + # @!macro atomic_fixnum_public_api + # + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize + # + # @!method value + # @!macro atomic_fixnum_method_value_get + # + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set + # + # @!method increment(delta = 1) + # @!macro atomic_fixnum_method_increment + # + # @!method decrement(delta = 1) + # @!macro atomic_fixnum_method_decrement + # + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set + # + # @!method update + # @!macro atomic_fixnum_method_update + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicFixnumImplementation = case + when defined?(JavaAtomicFixnum) + JavaAtomicFixnum + when defined?(CAtomicFixnum) + CAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # A numeric value that can be updated atomically. Reads and writes to an atomic + # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicFixnum... + # 3.130000 0.000000 3.130000 ( 3.136505) + # Testing with Concurrent::CAtomicFixnum... + # 0.790000 0.000000 0.790000 ( 0.785550) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicFixnum... + # 5.460000 2.460000 7.920000 ( 3.715000) + # Testing with Concurrent::JavaAtomicFixnum... + # 4.520000 0.030000 4.550000 ( 1.187000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong + # + # @!macro atomic_fixnum_public_api + class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb new file mode 100644 index 0000000..f20cd46 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb @@ -0,0 +1,164 @@ +module Concurrent + # An atomic reference which maintains an object reference along with a mark bit + # that can be updated atomically. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html + # java.util.concurrent.atomic.AtomicMarkableReference + class AtomicMarkableReference < ::Concurrent::Synchronization::Object + + attr_atomic(:reference) + private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference + + def initialize(value = nil, mark = false) + super() + self.reference = immutable_array(value, mark) + end + + # Atomically sets the value and mark to the given updated value and + # mark given both: + # - the current value == the expected value && + # - the current mark == the expected mark + # + # @param [Object] expected_val the expected value + # @param [Object] new_val the new value + # @param [Boolean] expected_mark the expected mark + # @param [Boolean] new_mark the new mark + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value or the + # actual mark was not equal to the expected mark + def compare_and_set(expected_val, new_val, expected_mark, new_mark) + # Memoize a valid reference to the current AtomicReference for + # later comparison. + current = reference + curr_val, curr_mark = current + + # Ensure that that the expected marks match. + return false unless expected_mark == curr_mark + + if expected_val.is_a? Numeric + # If the object is a numeric, we need to ensure we are comparing + # the numerical values + return false unless expected_val == curr_val + else + # Otherwise, we need to ensure we are comparing the object identity. + # Theoretically, this could be incorrect if a user monkey-patched + # `Object#equal?`, but they should know that they are playing with + # fire at that point. + return false unless expected_val.equal? curr_val + end + + prospect = immutable_array(new_val, new_mark) + + compare_and_set_reference current, prospect + end + + alias_method :compare_and_swap, :compare_and_set + + # Gets the current reference and marked values. + # + # @return [Array] the current reference and marked values + def get + reference + end + + # Gets the current value of the reference + # + # @return [Object] the current value of the reference + def value + reference[0] + end + + # Gets the current marked value + # + # @return [Boolean] the current marked value + def mark + reference[1] + end + + alias_method :marked?, :mark + + # _Unconditionally_ sets to the given value of both the reference and + # the mark. + # + # @param [Object] new_val the new value + # @param [Boolean] new_mark the new mark + # + # @return [Array] both the new value and the new mark + def set(new_val, new_mark) + self.reference = immutable_array(new_val, new_mark) + end + + # Pass the current value and marked state to the given block, replacing it + # with the block's results. May retry if the value changes during the + # block's execution. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and new mark + def update + loop do + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + if compare_and_set old_val, new_val, old_mark, new_mark + return immutable_array(new_val, new_mark) + end + end + end + + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state + # + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + unless compare_and_set old_val, new_val, old_mark, new_mark + fail ::Concurrent::ConcurrentUpdateError, + 'AtomicMarkableReference: Update failed due to race condition.', + 'Note: If you would like to guarantee an update, please use ' + + 'the `AtomicMarkableReference#update` method.' + end + + immutable_array(new_val, new_mark) + end + + # Pass the current value to the given block, replacing it with the + # block's result. Simply return nil if update fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state, or nil if + # the update failed + def try_update + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + return unless compare_and_set old_val, new_val, old_mark, new_mark + + immutable_array(new_val, new_mark) + end + + private + + def immutable_array(*args) + args.freeze + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb new file mode 100644 index 0000000..620c069 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb @@ -0,0 +1,204 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/engine' +require 'concurrent/atomic_reference/numeric_cas_wrapper' + +# Shim for TruffleRuby::AtomicReference +if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) + # @!visibility private + module TruffleRuby + AtomicReference = Truffle::AtomicReference + end +end + +module Concurrent + + # Define update methods that use direct paths + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicDirectUpdate + + # @!macro atomic_reference_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @return [Object] the new value + def update + true until compare_and_set(old_value = get, new_value = yield(old_value)) + new_value + end + + # @!macro atomic_reference_method_try_update + # + # Pass the current value to the given block, replacing it + # with the block's result. Return nil if the update fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This method was altered to avoid raising an exception by default. + # Instead, this method now returns `nil` in case of failure. For more info, + # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value, or nil if update failed + def try_update + old_value = get + new_value = yield old_value + + return unless compare_and_set old_value, new_value + + new_value + end + + # @!macro atomic_reference_method_try_update! + # + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This behavior mimics the behavior of the original + # `AtomicReference#try_update` API. The reason this was changed was to + # avoid raising exceptions (which are inherently slow) by default. For more + # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_value = get + new_value = yield old_value + unless compare_and_set(old_value, new_value) + if $VERBOSE + raise ConcurrentUpdateError, "Update failed" + else + raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE + end + end + new_value + end + end + + require 'concurrent/atomic_reference/mutex_atomic' + + # @!macro atomic_reference + # + # An object reference that may be updated atomically. All read and write + # operations have java volatile semantic. + # + # @!macro thread_safe_variable_comparison + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html + # + # @!method initialize(value = nil) + # @!macro atomic_reference_method_initialize + # @param [Object] value The initial value. + # + # @!method get + # @!macro atomic_reference_method_get + # Gets the current value. + # @return [Object] the current value + # + # @!method set(new_value) + # @!macro atomic_reference_method_set + # Sets to the given value. + # @param [Object] new_value the new value + # @return [Object] the new value + # + # @!method get_and_set(new_value) + # @!macro atomic_reference_method_get_and_set + # Atomically sets to the given value and returns the old value. + # @param [Object] new_value the new value + # @return [Object] the old value + # + # @!method compare_and_set(old_value, new_value) + # @!macro atomic_reference_method_compare_and_set + # + # Atomically sets the value to the given updated value if + # the current value == the expected value. + # + # @param [Object] old_value the expected value + # @param [Object] new_value the new value + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value. + # + # @!method update + # @!macro atomic_reference_method_update + # + # @!method try_update + # @!macro atomic_reference_method_try_update + # + # @!method try_update! + # @!macro atomic_reference_method_try_update! + + + # @!macro internal_implementation_note + class ConcurrentUpdateError < ThreadError + # frozen pre-allocated backtrace to speed ConcurrentUpdateError + CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze + end + + # @!macro internal_implementation_note + AtomicReferenceImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + # @!visibility private + # @!macro internal_implementation_note + class CAtomicReference + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + end + CAtomicReference + when Concurrent.on_jruby? + # @!visibility private + # @!macro internal_implementation_note + class JavaAtomicReference + include AtomicDirectUpdate + end + JavaAtomicReference + when Concurrent.on_truffleruby? + class TruffleRubyAtomicReference < TruffleRuby::AtomicReference + include AtomicDirectUpdate + alias_method :value, :get + alias_method :value=, :set + alias_method :compare_and_swap, :compare_and_set + alias_method :swap, :get_and_set + end + when Concurrent.on_rbx? + # @note Extends `Rubinius::AtomicReference` version adding aliases + # and numeric logic. + # + # @!visibility private + # @!macro internal_implementation_note + class RbxAtomicReference < Rubinius::AtomicReference + alias_method :_compare_and_set, :compare_and_set + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :value, :get + alias_method :value=, :set + alias_method :swap, :get_and_set + alias_method :compare_and_swap, :compare_and_set + end + RbxAtomicReference + else + MutexAtomicReference + end + private_constant :AtomicReferenceImplementation + + # @!macro atomic_reference + class AtomicReference < AtomicReferenceImplementation + + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], get + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb new file mode 100644 index 0000000..d883aed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb @@ -0,0 +1,100 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/mutex_count_down_latch' +require 'concurrent/atomic/java_count_down_latch' + +module Concurrent + + ################################################################### + + # @!macro count_down_latch_method_initialize + # + # Create a new `CountDownLatch` with the initial `count`. + # + # @param [new] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro count_down_latch_method_wait + # + # Block on the latch until the counter reaches zero or until `timeout` is reached. + # + # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` + # to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` + + # @!macro count_down_latch_method_count_down + # + # Signal the latch to decrement the counter. Will signal all blocked threads when + # the `count` reaches zero. + + # @!macro count_down_latch_method_count + # + # The current value of the counter. + # + # @return [Fixnum] the current value of the counter + + ################################################################### + + # @!macro count_down_latch_public_api + # + # @!method initialize(count = 1) + # @!macro count_down_latch_method_initialize + # + # @!method wait(timeout = nil) + # @!macro count_down_latch_method_wait + # + # @!method count_down + # @!macro count_down_latch_method_count_down + # + # @!method count + # @!macro count_down_latch_method_count + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + CountDownLatchImplementation = case + when Concurrent.on_jruby? + JavaCountDownLatch + else + MutexCountDownLatch + end + private_constant :CountDownLatchImplementation + + # @!macro count_down_latch + # + # A synchronization object that allows one thread to wait on multiple other threads. + # The thread that will wait creates a `CountDownLatch` and sets the initial value + # (normally equal to the number of other threads). The initiating thread passes the + # latch to the other threads then waits for the other threads by calling the `#wait` + # method. Each of the other threads calls `#count_down` when done with its work. + # When the latch counter reaches zero the waiting thread is unblocked and continues + # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. + # + # @!macro count_down_latch_public_api + # @example Waiter and Decrementer + # latch = Concurrent::CountDownLatch.new(3) + # + # waiter = Thread.new do + # latch.wait() + # puts ("Waiter released") + # end + # + # decrementer = Thread.new do + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # end + # + # [waiter, decrementer].each(&:join) + class CountDownLatch < CountDownLatchImplementation + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb new file mode 100644 index 0000000..42f5a94 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb @@ -0,0 +1,128 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # A synchronization aid that allows a set of threads to all wait for each + # other to reach a common barrier point. + # @example + # barrier = Concurrent::CyclicBarrier.new(3) + # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } + # process = -> (i) do + # # waiting to start at the same time + # barrier.wait + # # execute job + # jobs[i].call + # # wait for others to finish + # barrier.wait + # end + # threads = 2.times.map do |i| + # Thread.new(i, &process) + # end + # + # # use main as well + # process.call 2 + # + # # here we can be sure that all jobs are processed + class CyclicBarrier < Synchronization::LockableObject + + # @!visibility private + Generation = Struct.new(:status) + private_constant :Generation + + # Create a new `CyclicBarrier` that waits for `parties` threads + # + # @param [Fixnum] parties the number of parties + # @yield an optional block that will be executed that will be executed after + # the last thread arrives and before the others are released + # + # @raise [ArgumentError] if `parties` is not an integer or is less than zero + def initialize(parties, &block) + Utility::NativeInteger.ensure_integer_and_bounds parties + Utility::NativeInteger.ensure_positive_and_no_zero parties + + super(&nil) + synchronize { ns_initialize parties, &block } + end + + # @return [Fixnum] the number of threads needed to pass the barrier + def parties + synchronize { @parties } + end + + # @return [Fixnum] the number of threads currently waiting on the barrier + def number_waiting + synchronize { @number_waiting } + end + + # Blocks on the barrier until the number of waiting threads is equal to + # `parties` or until `timeout` is reached or `reset` is called + # If a block has been passed to the constructor, it will be executed once by + # the last arrived thread before releasing the others + # @param [Fixnum] timeout the number of seconds to wait for the counter or + # `nil` to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on + # `timeout` or on `reset` or if the barrier is broken + def wait(timeout = nil) + synchronize do + + return false unless @generation.status == :waiting + + @number_waiting += 1 + + if @number_waiting == @parties + @action.call if @action + ns_generation_done @generation, :fulfilled + true + else + generation = @generation + if ns_wait_until(timeout) { generation.status != :waiting } + generation.status == :fulfilled + else + ns_generation_done generation, :broken, false + false + end + end + end + end + + # resets the barrier to its initial state + # If there is at least one waiting thread, it will be woken up, the `wait` + # method will return false and the barrier will be broken + # If the barrier is broken, this method restores it to the original state + # + # @return [nil] + def reset + synchronize { ns_generation_done @generation, :reset } + end + + # A barrier can be broken when: + # - a thread called the `reset` method while at least one other thread was waiting + # - at least one thread timed out on `wait` method + # + # A broken barrier can be restored using `reset` it's safer to create a new one + # @return [Boolean] true if the barrier is broken otherwise false + def broken? + synchronize { @generation.status != :waiting } + end + + protected + + def ns_generation_done(generation, status, continue = true) + generation.status = status + ns_next_generation if continue + ns_broadcast + end + + def ns_next_generation + @generation = Generation.new(:waiting) + @number_waiting = 0 + end + + def ns_initialize(parties, &block) + @parties = parties + @action = block + ns_next_generation + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb new file mode 100644 index 0000000..825f38a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb @@ -0,0 +1,109 @@ +require 'thread' +require 'concurrent/synchronization' + +module Concurrent + + # Old school kernel-style event reminiscent of Win32 programming in C++. + # + # When an `Event` is created it is in the `unset` state. Threads can choose to + # `#wait` on the event, blocking until released by another thread. When one + # thread wants to alert all blocking threads it calls the `#set` method which + # will then wake up all listeners. Once an `Event` has been set it remains set. + # New threads calling `#wait` will return immediately. An `Event` may be + # `#reset` at any time once it has been set. + # + # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx + # @example + # event = Concurrent::Event.new + # + # t1 = Thread.new do + # puts "t1 is waiting" + # event.wait(1) + # puts "event ocurred" + # end + # + # t2 = Thread.new do + # puts "t2 calling set" + # event.set + # end + # + # [t1, t2].each(&:join) + # + # # prints: + # # t2 calling set + # # t1 is waiting + # # event occurred + class Event < Synchronization::LockableObject + + # Creates a new `Event` in the unset state. Threads calling `#wait` on the + # `Event` will block. + def initialize + super + synchronize { ns_initialize } + end + + # Is the object in the set state? + # + # @return [Boolean] indicating whether or not the `Event` has been set + def set? + synchronize { @set } + end + + # Trigger the event, setting the state to `set` and releasing all threads + # waiting on the event. Has no effect if the `Event` has already been set. + # + # @return [Boolean] should always return `true` + def set + synchronize { ns_set } + end + + def try? + synchronize { @set ? false : ns_set } + end + + # Reset a previously set event back to the `unset` state. + # Has no effect if the `Event` has not yet been set. + # + # @return [Boolean] should always return `true` + def reset + synchronize do + if @set + @set = false + @iteration +=1 + end + true + end + end + + # Wait a given number of seconds for the `Event` to be set by another + # thread. Will wait forever when no `timeout` value is given. Returns + # immediately if the `Event` has already been set. + # + # @return [Boolean] true if the `Event` was set before timeout else false + def wait(timeout = nil) + synchronize do + unless @set + iteration = @iteration + ns_wait_until(timeout) { iteration < @iteration || @set } + else + true + end + end + end + + protected + + def ns_set + unless @set + @set = true + ns_broadcast + end + true + end + + def ns_initialize + @set = false + @iteration = 0 + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb new file mode 100644 index 0000000..cb5b35a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb @@ -0,0 +1,42 @@ +if Concurrent.on_jruby? + + module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class JavaCountDownLatch + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds(count) + Utility::NativeInteger.ensure_positive(count) + @latch = java.util.concurrent.CountDownLatch.new(count) + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly { @latch.await } + result = true + else + Synchronization::JRuby.sleep_interruptibly do + result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + end + + # @!macro count_down_latch_method_count_down + def count_down + @latch.countDown + end + + # @!macro count_down_latch_method_count + def count + @latch.getCount + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb new file mode 100644 index 0000000..b41018f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb @@ -0,0 +1,37 @@ +require 'concurrent/atomic/abstract_thread_local_var' + +if Concurrent.on_jruby? + + module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class JavaThreadLocalVar < AbstractThreadLocalVar + + # @!macro thread_local_var_method_get + def value + value = @var.get + + if value.nil? + default + elsif value == NULL + nil + else + value + end + end + + # @!macro thread_local_var_method_set + def value=(value) + @var.set(value) + end + + protected + + # @!visibility private + def allocate_storage + @var = java.lang.ThreadLocal.new + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb new file mode 100644 index 0000000..a033de4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb @@ -0,0 +1,62 @@ +require 'concurrent/synchronization' + +module Concurrent + + # @!macro atomic_boolean + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicBoolean < Synchronization::LockableObject + + # @!macro atomic_boolean_method_initialize + def initialize(initial = false) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_boolean_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_boolean_method_value_set + def value=(value) + synchronize { @value = !!value } + end + + # @!macro atomic_boolean_method_true_question + def true? + synchronize { @value } + end + + # @!macro atomic_boolean_method_false_question + def false? + synchronize { !@value } + end + + # @!macro atomic_boolean_method_make_true + def make_true + synchronize { ns_make_value(true) } + end + + # @!macro atomic_boolean_method_make_false + def make_false + synchronize { ns_make_value(false) } + end + + protected + + # @!visibility private + def ns_initialize(initial) + @value = !!initial + end + + private + + # @!visibility private + def ns_make_value(value) + old = @value + @value = value + old != @value + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb new file mode 100644 index 0000000..77b91d2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb @@ -0,0 +1,75 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro atomic_fixnum + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicFixnum < Synchronization::LockableObject + + # @!macro atomic_fixnum_method_initialize + def initialize(initial = 0) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_fixnum_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_fixnum_method_value_set + def value=(value) + synchronize { ns_set(value) } + end + + # @!macro atomic_fixnum_method_increment + def increment(delta = 1) + synchronize { ns_set(@value + delta.to_i) } + end + + alias_method :up, :increment + + # @!macro atomic_fixnum_method_decrement + def decrement(delta = 1) + synchronize { ns_set(@value - delta.to_i) } + end + + alias_method :down, :decrement + + # @!macro atomic_fixnum_method_compare_and_set + def compare_and_set(expect, update) + synchronize do + if @value == expect.to_i + @value = update.to_i + true + else + false + end + end + end + + # @!macro atomic_fixnum_method_update + def update + synchronize do + @value = yield @value + end + end + + protected + + # @!visibility private + def ns_initialize(initial) + ns_set(initial) + end + + private + + # @!visibility private + def ns_set(value) + Utility::NativeInteger.ensure_integer_and_bounds value + @value = value + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb new file mode 100644 index 0000000..e99744c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb @@ -0,0 +1,44 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class MutexCountDownLatch < Synchronization::LockableObject + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds count + Utility::NativeInteger.ensure_positive count + + super() + synchronize { ns_initialize count } + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + synchronize { ns_wait_until(timeout) { @count == 0 } } + end + + # @!macro count_down_latch_method_count_down + def count_down + synchronize do + @count -= 1 if @count > 0 + ns_broadcast if @count == 0 + end + end + + # @!macro count_down_latch_method_count + def count + synchronize { @count } + end + + protected + + def ns_initialize(count) + @count = count + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb new file mode 100644 index 0000000..2042f73 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb @@ -0,0 +1,115 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro semaphore + # @!visibility private + # @!macro internal_implementation_note + class MutexSemaphore < Synchronization::LockableObject + + # @!macro semaphore_method_initialize + def initialize(count) + Utility::NativeInteger.ensure_integer_and_bounds count + + super() + synchronize { ns_initialize count } + end + + # @!macro semaphore_method_acquire + def acquire(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + try_acquire_timed(permits, nil) + nil + end + end + + # @!macro semaphore_method_available_permits + def available_permits + synchronize { @free } + end + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + def drain_permits + synchronize do + @free.tap { |_| @free = 0 } + end + end + + # @!macro semaphore_method_try_acquire + def try_acquire(permits = 1, timeout = nil) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + if timeout.nil? + try_acquire_now(permits) + else + try_acquire_timed(permits, timeout) + end + end + end + + # @!macro semaphore_method_release + def release(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + @free += permits + permits.times { ns_signal } + end + nil + end + + # Shrinks the number of available permits by the indicated reduction. + # + # @param [Fixnum] reduction Number of permits to remove. + # + # @raise [ArgumentError] if `reduction` is not an integer or is negative + # + # @raise [ArgumentError] if `@free` - `@reduction` is less than zero + # + # @return [nil] + # + # @!visibility private + def reduce_permits(reduction) + Utility::NativeInteger.ensure_integer_and_bounds reduction + Utility::NativeInteger.ensure_positive reduction + + synchronize { @free -= reduction } + nil + end + + protected + + # @!visibility private + def ns_initialize(count) + @free = count + end + + private + + # @!visibility private + def try_acquire_now(permits) + if @free >= permits + @free -= permits + true + else + false + end + end + + # @!visibility private + def try_acquire_timed(permits, timeout) + ns_wait_until(timeout) { try_acquire_now(permits) } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb new file mode 100644 index 0000000..246f21a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb @@ -0,0 +1,254 @@ +require 'thread' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # Ruby read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And if the "write" lock is taken, any readers who come along will have to wait) + # + # If readers are already active when a writer comes along, the writer will wait for + # all the readers to finish before going ahead. + # Any additional readers that come when the writer is already waiting, will also + # wait (so writers are not starved). + # + # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @note Do **not** try to acquire the write lock while already holding a read lock + # **or** try to acquire the write lock while you already have it. + # This will lead to deadlock + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReadWriteLock < Synchronization::Object + + # @!visibility private + WAITING_WRITER = 1 << 15 + + # @!visibility private + RUNNING_WRITER = 1 << 29 + + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + safe_initialization! + + # Implementation notes: + # A goal is to make the uncontended path for both readers/writers lock-free + # Only if there is reader-writer or writer-writer contention, should locks be used + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each reader increments the counter by 1 when acquiring a read lock + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + + # Create a new `ReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadLock = Synchronization::Lock.new + @WriteLock = Synchronization::Lock.new + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock has been acquired will block until + # it is released. Will not block if other read locks have been acquired. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting when we first queue up, we need to wait + if waiting_writer?(c) + @ReadLock.wait_until { !waiting_writer? } + + # after a reader has waited once, they are allowed to "barge" ahead of waiting writers + # but if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadLock.wait_until { !running_writer? } + else + return if @Counter.compare_and_set(c, c+1) + end + end + else + break if @Counter.compare_and_set(c, c+1) + end + end + true + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + while true + c = @Counter.value + if @Counter.compare_and_set(c, c-1) + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_writer?(c) && running_readers(c) == 1 + @WriteLock.signal + end + break + end + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + if c == 0 # no readers OR writers running + # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead + break if @Counter.compare_and_set(0, RUNNING_WRITER) + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here, OR another writer could increment + @WriteLock.wait_until do + # So we have to do another check inside the synchronized section + # If a writer OR reader is running, then go to sleep + c = @Counter.value + !running_writer?(c) && !running_readers?(c) + end + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # Then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + end + break + end + end + true + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + return true unless running_writer? + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadLock.broadcast + @WriteLock.signal if waiting_writers(c) > 0 + true + end + + # Queries if the write lock is held by any thread. + # + # @return [Boolean] true if the write lock is held else false` + def write_locked? + @Counter.value >= RUNNING_WRITER + end + + # Queries whether any threads are waiting to acquire the read or write lock. + # + # @return [Boolean] true if any threads are waiting for a lock else false + def has_waiters? + waiting_writer?(@Counter.value) + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) / WAITING_WRITER + end + + # @!visibility private + def waiting_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb new file mode 100644 index 0000000..42d7f3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb @@ -0,0 +1,379 @@ +require 'thread' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/errors' +require 'concurrent/synchronization' +require 'concurrent/atomic/thread_local_var' + +module Concurrent + + # Re-entrant read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And while the "write" lock is taken, no read locks can be obtained either. + # Hence, the write lock can also be called an "exclusive" lock.) + # + # If another thread has taken a read lock, any thread which wants a write lock + # will block until all the readers release their locks. However, once a thread + # starts waiting to obtain a write lock, any additional readers that come along + # will also wait (so writers are not starved). + # + # A thread can acquire both a read and write lock at the same time. A thread can + # also acquire a read lock OR a write lock more than once. Only when the read (or + # write) lock is released as many times as it was acquired, will the thread + # actually let it go, allowing other threads which might have been waiting + # to proceed. Therefore the lock can be upgraded by first acquiring + # read lock and then write lock and that the lock can be downgraded by first + # having both read and write lock a releasing just the write lock. + # + # If both read and write locks are acquired by the same thread, it is not strictly + # necessary to release them in the same order they were acquired. In other words, + # the following code is legal: + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.acquire_write_lock + # lock.acquire_read_lock + # lock.release_write_lock + # # At this point, the current thread is holding only a read lock, not a write + # # lock. So other threads can take read locks, but not a write lock. + # lock.release_read_lock + # # Now the current thread is not holding either a read or write lock, so + # # another thread could potentially acquire a write lock. + # + # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReentrantReadWriteLock < Synchronization::Object + + # Implementation notes: + # + # A goal is to make the uncontended path for both readers/writers mutex-free + # Only if there is reader-writer or writer-writer contention, should mutexes be used + # Otherwise, a single CAS operation is all we need to acquire/release a lock + # + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each thread which has one OR MORE read locks increments the counter by 1 + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + # + # Additionally, each thread uses a thread-local variable to count how many times + # it has acquired a read lock, AND how many times it has acquired a write lock. + # It uses a similar trick; an increment of 1 means a read lock was taken, and + # an increment of (1 << 15) means a write lock was taken + # This is what makes re-entrancy possible + # + # 2 rules are followed to ensure good liveness properties: + # 1) Once a writer has queued up and is waiting for a write lock, no other thread + # can take a lock without waiting + # 2) When a write lock is released, readers are given the "first chance" to wake + # up and acquire a read lock + # Following these rules means readers and writers tend to "take turns", so neither + # can starve the other, even under heavy contention + + # @!visibility private + READER_BITS = 15 + # @!visibility private + WRITER_BITS = 14 + + # Used with @Counter: + # @!visibility private + WAITING_WRITER = 1 << READER_BITS + # @!visibility private + RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + # Used with @HeldCount: + # @!visibility private + WRITE_LOCK_HELD = 1 << READER_BITS + # @!visibility private + READ_LOCK_MASK = WRITE_LOCK_HELD - 1 + # @!visibility private + WRITE_LOCK_MASK = MAX_WRITERS + + safe_initialization! + + # Create a new `ReentrantReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadQueue = Synchronization::Lock.new # used to queue waiting readers + @WriteQueue = Synchronization::Lock.new # used to queue waiting writers + @HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock is held by another thread, will block + # until it is released. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + if (held = @HeldCount.value) > 0 + # If we already have a lock, there's no need to wait + if held & READ_LOCK_MASK == 0 + # But we do need to update the counter, if we were holding a write + # lock but not a read lock + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting OR running when we first queue up, we need to wait + if waiting_or_running_writer?(c) + # Before going to sleep, check again with the ReadQueue mutex held + @ReadQueue.synchronize do + @ReadQueue.ns_wait if waiting_or_running_writer? + end + # Note: the above 'synchronize' block could have used #wait_until, + # but that waits repeatedly in a loop, checking the wait condition + # each time it wakes up (to protect against spurious wakeups) + # But we are already in a loop, which is only broken when we successfully + # acquire the lock! So we don't care about spurious wakeups, and would + # rather not pay the extra overhead of using #wait_until + + # After a reader has waited once, they are allowed to "barge" ahead of waiting writers + # But if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadQueue.synchronize do + @ReadQueue.ns_wait if running_writer? + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + end + + # Try to acquire a read lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_read_lock + if (held = @HeldCount.value) > 0 + if held & READ_LOCK_MASK == 0 + # If we hold a write lock, but not a read lock... + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + false + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + held = @HeldCount.value = @HeldCount.value - 1 + rlocks_held = held & READ_LOCK_MASK + if rlocks_held == 0 + c = @Counter.update { |counter| counter - 1 } + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_or_running_writer?(c) && running_readers(c) == 0 + @WriteQueue.signal + end + elsif rlocks_held == READ_LOCK_MASK + raise IllegalOperationError, "Cannot release a read lock which is not held" + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + # if we already have a write (exclusive) lock, there's no need to wait + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + # To go ahead and take the lock without waiting, there must be no writer + # running right now, AND no writers who came before us still waiting to + # acquire the lock + # Additionally, if any read locks have been taken, we must hold all of them + if c == held + # If we successfully swap the RUNNING_WRITER bit on, then we can go ahead + if @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here + @WriteQueue.synchronize do + # So we have to do another check inside the synchronized section + # If a writer OR another reader is running, then go to sleep + c = @Counter.value + @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held + end + # Note: if you are thinking of replacing the above 'synchronize' block + # with #wait_until, read the comment in #acquire_read_lock first! + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + if !running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + end + end + end + + # Try to acquire a write lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + @HeldCount.value = held + WRITE_LOCK_HELD + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + false + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD + wlocks_held = held & WRITE_LOCK_MASK + if wlocks_held == 0 + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadQueue.broadcast + @WriteQueue.signal if waiting_writers(c) > 0 + elsif wlocks_held == WRITE_LOCK_MASK + raise IllegalOperationError, "Cannot release a write lock which is not held" + end + true + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) >> READER_BITS + end + + # @!visibility private + def waiting_or_running_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb new file mode 100644 index 0000000..6d7601a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb @@ -0,0 +1,178 @@ +require 'thread' +require 'concurrent/atomic/abstract_thread_local_var' + +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class RubyThreadLocalVar < AbstractThreadLocalVar + + # Each thread has a (lazily initialized) array of thread-local variable values + # Each time a new thread-local var is created, we allocate an "index" for it + # For example, if the allocated index is 1, that means slot #1 in EVERY + # thread's thread-local array will be used for the value of that TLV + # + # The good thing about using a per-THREAD structure to hold values, rather + # than a per-TLV structure, is that no synchronization is needed when + # reading and writing those values (since the structure is only ever + # accessed by a single thread) + # + # Of course, when a TLV is GC'd, 1) we need to recover its index for use + # by other new TLVs (otherwise the thread-local arrays could get bigger + # and bigger with time), and 2) we need to null out all the references + # held in the now-unused slots (both to avoid blocking GC of those objects, + # and also to prevent "stale" values from being passed on to a new TLV + # when the index is reused) + # Because we need to null out freed slots, we need to keep references to + # ALL the thread-local arrays -- ARRAYS is for that + # But when a Thread is GC'd, we need to drop the reference to its thread-local + # array, so we don't leak memory + + FREE = [] + LOCK = Mutex.new + THREAD_LOCAL_ARRAYS = {} # used as a hash set + + # synchronize when not on MRI + # on MRI using lock in finalizer leads to "can't be called from trap context" error + # so the code is carefully written to be tread-safe on MRI relying on GIL + + if Concurrent.on_cruby? + # @!visibility private + def self.semi_sync(&block) + block.call + end + else + # @!visibility private + def self.semi_sync(&block) + LOCK.synchronize(&block) + end + end + + private_constant :FREE, :LOCK, :THREAD_LOCAL_ARRAYS + + # @!macro thread_local_var_method_get + def value + if (array = get_threadlocal_array) + value = array[@index] + if value.nil? + default + elsif value.equal?(NULL) + nil + else + value + end + else + default + end + end + + # @!macro thread_local_var_method_set + def value=(value) + me = Thread.current + # We could keep the thread-local arrays in a hash, keyed by Thread + # But why? That would require locking + # Using Ruby's built-in thread-local storage is faster + unless (array = get_threadlocal_array(me)) + array = set_threadlocal_array([], me) + self.class.semi_sync { THREAD_LOCAL_ARRAYS[array.object_id] = array } + ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id)) + end + array[@index] = (value.nil? ? NULL : value) + value + end + + protected + + # @!visibility private + def allocate_storage + @index = FREE.pop || next_index + + ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index)) + end + + # @!visibility private + def self.thread_local_finalizer(index) + proc do + semi_sync do + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + THREAD_LOCAL_ARRAYS.each_value { |array| array[index] = nil } + # free index has to be published after the arrays are cleared + FREE.push(index) + end + end + end + + # @!visibility private + def self.thread_finalizer(id) + proc do + semi_sync do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + THREAD_LOCAL_ARRAYS.delete(id) + end + end + end + + private + + # noinspection RubyClassVariableUsageInspection + @@next = 0 + # noinspection RubyClassVariableUsageInspection + def next_index + LOCK.synchronize do + result = @@next + @@next += 1 + result + end + end + + if Thread.instance_methods.include?(:thread_variable_get) + + def get_threadlocal_array(thread = Thread.current) + thread.thread_variable_get(:__threadlocal_array__) + end + + def set_threadlocal_array(array, thread = Thread.current) + thread.thread_variable_set(:__threadlocal_array__, array) + end + + else + + def get_threadlocal_array(thread = Thread.current) + thread[:__threadlocal_array__] + end + + def set_threadlocal_array(array, thread = Thread.current) + thread[:__threadlocal_array__] = array + end + end + + # This exists only for use in testing + # @!visibility private + def value_for(thread) + if (array = get_threadlocal_array(thread)) + value = array[@index] + if value.nil? + get_default + elsif value.equal?(NULL) + nil + else + value + end + else + get_default + end + end + + # @!visibility private + def get_default + if @default_block + raise "Cannot use default_for with default block" + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb new file mode 100644 index 0000000..1b2bd8c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb @@ -0,0 +1,145 @@ +require 'concurrent/atomic/mutex_semaphore' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro semaphore_method_initialize + # + # Create a new `Semaphore` with the initial `count`. + # + # @param [Fixnum] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro semaphore_method_acquire + # + # Acquires the given number of permits from this semaphore, + # blocking until all are available. + # + # @param [Fixnum] permits Number of permits to acquire + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [nil] + + # @!macro semaphore_method_available_permits + # + # Returns the current number of permits available in this semaphore. + # + # @return [Integer] + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + + # @!macro semaphore_method_try_acquire + # + # Acquires the given number of permits from this semaphore, + # only if all are available at the time of invocation or within + # `timeout` interval + # + # @param [Fixnum] permits the number of permits to acquire + # + # @param [Fixnum] timeout the number of seconds to wait for the counter + # or `nil` to return immediately + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [Boolean] `false` if no permits are available, `true` when + # acquired a permit + + # @!macro semaphore_method_release + # + # Releases the given number of permits, returning them to the semaphore. + # + # @param [Fixnum] permits Number of permits to return to the semaphore. + # + # @raise [ArgumentError] if `permits` is not a number or is less than one + # + # @return [nil] + + ################################################################### + + # @!macro semaphore_public_api + # + # @!method initialize(count) + # @!macro semaphore_method_initialize + # + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + # + # @!method available_permits + # @!macro semaphore_method_available_permits + # + # @!method drain_permits + # @!macro semaphore_method_drain_permits + # + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + # + # @!method release(permits = 1) + # @!macro semaphore_method_release + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + SemaphoreImplementation = case + when defined?(JavaSemaphore) + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation + + # @!macro semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. + # + # @!macro semaphore_public_api + # @example + # semaphore = Concurrent::Semaphore.new(2) + # + # t1 = Thread.new do + # semaphore.acquire + # puts "Thread 1 acquired semaphore" + # end + # + # t2 = Thread.new do + # semaphore.acquire + # puts "Thread 2 acquired semaphore" + # end + # + # t3 = Thread.new do + # semaphore.acquire + # puts "Thread 3 acquired semaphore" + # end + # + # t4 = Thread.new do + # sleep(2) + # puts "Thread 4 releasing semaphore" + # semaphore.release + # end + # + # [t1, t2, t3, t4].each(&:join) + # + # # prints: + # # Thread 3 acquired semaphore + # # Thread 2 acquired semaphore + # # Thread 4 releasing semaphore + # # Thread 1 acquired semaphore + # + class Semaphore < SemaphoreImplementation + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb new file mode 100644 index 0000000..100cc8d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb @@ -0,0 +1,104 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/ruby_thread_local_var' +require 'concurrent/atomic/java_thread_local_var' + +module Concurrent + + ################################################################### + + # @!macro thread_local_var_method_initialize + # + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each thread + + # @!macro thread_local_var_method_get + # + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value + + # @!macro thread_local_var_method_set + # + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + + # @!macro thread_local_var_method_bind + # + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + + + ################################################################### + + # @!macro thread_local_var_public_api + # + # @!method initialize(default = nil, &default_block) + # @!macro thread_local_var_method_initialize + # + # @!method value + # @!macro thread_local_var_method_get + # + # @!method value=(value) + # @!macro thread_local_var_method_set + # + # @!method bind(value, &block) + # @!macro thread_local_var_method_bind + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + ThreadLocalVarImplementation = case + when Concurrent.on_jruby? + JavaThreadLocalVar + else + RubyThreadLocalVar + end + private_constant :ThreadLocalVarImplementation + + # @!macro thread_local_var + # + # A `ThreadLocalVar` is a variable where the value is different for each thread. + # Each variable may have a default value, but when you modify the variable only + # the current thread will ever see that change. + # + # @!macro thread_safe_variable_comparison + # + # @example + # v = ThreadLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = ThreadLocalVar.new(14) + # + # t1 = Thread.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end + # + # t2 = Thread.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end + # + # v.value #=> 14 + # + # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal + # + # @!macro thread_local_var_public_api + class ThreadLocalVar < ThreadLocalVarImplementation + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb new file mode 100644 index 0000000..d092aed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb @@ -0,0 +1,56 @@ +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicReference < Synchronization::LockableObject + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + + # @!macro atomic_reference_method_initialize + def initialize(value = nil) + super() + synchronize { ns_initialize(value) } + end + + # @!macro atomic_reference_method_get + def get + synchronize { @value } + end + alias_method :value, :get + + # @!macro atomic_reference_method_set + def set(new_value) + synchronize { @value = new_value } + end + alias_method :value=, :set + + # @!macro atomic_reference_method_get_and_set + def get_and_set(new_value) + synchronize do + old_value = @value + @value = new_value + old_value + end + end + alias_method :swap, :get_and_set + + # @!macro atomic_reference_method_compare_and_set + def _compare_and_set(old_value, new_value) + synchronize do + if @value.equal? old_value + @value = new_value + true + else + false + end + end + end + + protected + + def ns_initialize(value) + @value = value + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb new file mode 100644 index 0000000..709a382 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb @@ -0,0 +1,28 @@ +module Concurrent + + # Special "compare and set" handling of numeric values. + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicNumericCompareAndSetWrapper + + # @!macro atomic_reference_method_compare_and_set + def compare_and_set(old_value, new_value) + if old_value.kind_of? Numeric + while true + old = get + + return false unless old.kind_of? Numeric + + return false unless old == old_value + + result = _compare_and_set(old, new_value) + return result if result + end + else + _compare_and_set(old_value, new_value) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb new file mode 100644 index 0000000..16cbe66 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb @@ -0,0 +1,10 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/cyclic_barrier' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/event' +require 'concurrent/atomic/read_write_lock' +require 'concurrent/atomic/reentrant_read_write_lock' +require 'concurrent/atomic/semaphore' +require 'concurrent/atomic/thread_local_var' diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb new file mode 100644 index 0000000..50d52a6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb @@ -0,0 +1,107 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-read approach: + # observers are added and removed from a thread safe collection; every time + # a notification is required the internal data structure is copied to + # prevent concurrency issues + # + # @api private + class CopyOnNotifyObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + @observers[observer] = func + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + @observers.delete(observer) + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + synchronize do + @observers.clear + self + end + end + + # @!macro observable_count_observers + def count_observers + synchronize { @observers.count } + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + observers = duplicate_observers + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + observers = duplicate_and_clear_observers + notify_to(observers, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def duplicate_and_clear_observers + synchronize do + observers = @observers.dup + @observers.clear + observers + end + end + + def duplicate_observers + synchronize { @observers.dup } + end + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb new file mode 100644 index 0000000..3f3f7cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb @@ -0,0 +1,111 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-write approach: + # every time an observer is added or removed the whole internal data structure is + # duplicated and replaced with a new one. + # + # @api private + class CopyOnWriteObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + new_observers = @observers.dup + new_observers[observer] = func + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + new_observers = @observers.dup + new_observers.delete(observer) + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + self.observers = {} + self + end + + # @!macro observable_count_observers + def count_observers + observers.count + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + old = clear_observers_and_return_old + notify_to(old, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + + def observers + synchronize { @observers } + end + + def observers=(new_set) + synchronize { @observers = new_set } + end + + def clear_observers_and_return_old + synchronize do + old_observers = @observers + @observers = {} + old_observers + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..2be9e43 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb @@ -0,0 +1,84 @@ +if Concurrent.on_jruby? + + module Concurrent + module Collection + + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class JavaNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + if [:min, :low].include?(order) + @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity + else + @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) + end + end + + # @!macro priority_queue_method_clear + def clear + @queue.clear + true + end + + # @!macro priority_queue_method_delete + def delete(item) + found = false + while @queue.remove(item) do + found = true + end + found + end + + # @!macro priority_queue_method_empty + def empty? + @queue.size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.contains(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @queue.size + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + @queue.peek + end + + # @!macro priority_queue_method_pop + def pop + @queue.poll + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @queue.add(item) + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb new file mode 100644 index 0000000..d003d3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb @@ -0,0 +1,158 @@ +module Concurrent + + # @!macro warn.edge + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + + # @return [Node] + attr_reader :next_node + + # @return [Object] + attr_reader :value + + # @!visibility private + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + # The singleton for empty node + EMPTY = Node[nil, nil] + def EMPTY.next_node + self + end + + attr_atomic(:head) + private :head, :head=, :swap_head, :compare_and_set_head, :update_head + + # @!visibility private + def self.of1(value) + new Node[value, EMPTY] + end + + # @!visibility private + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + # @param [Node] head + def initialize(head = EMPTY) + super() + self.head = head + end + + # @param [Node] head + # @return [true, false] + def empty?(head = head()) + head.equal? EMPTY + end + + # @param [Node] head + # @param [Object] value + # @return [true, false] + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + # @param [Object] value + # @return [self] + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + # @return [Node] + def peek + head + end + + # @param [Node] head + # @return [true, false] + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + # @return [Object] + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + # @param [Node] head + # @return [true, false] + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + # @param [Node] head + # @return [self] + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + # @return [true, false] + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + # @param [Node] head + # @return [true, false] + def clear_if(head) + compare_and_set_head head, EMPTY + end + + # @param [Node] head + # @param [Node] new_head + # @return [true, false] + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + # @return [self] + # @yield over the cleared stack + # @yieldparam [Object] value + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], to_a.to_s + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb new file mode 100644 index 0000000..dc51893 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb @@ -0,0 +1,927 @@ +require 'concurrent/constants' +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/adder' +require 'concurrent/thread_safe/util/cheap_lockable' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module Collection + + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + # + # @!visibility private + class AtomicReferenceMapBackend + + # @!visibility private + class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + # + # @!visibility private + class Node + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :hash, :value, :next + + include Concurrent::ThreadSafe::Util::CheapLockable + + bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_acquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_acquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Concurrent::ThreadSafe::Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Concurrent::ThreadSafe::Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= ::Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb new file mode 100644 index 0000000..903c1f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb @@ -0,0 +1,66 @@ +require 'thread' +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class MriMapBackend < NonConcurrentMapBackend + + def initialize(options = nil) + super(options) + @write_lock = Mutex.new + end + + def []=(key, value) + @write_lock.synchronize { super } + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case + stored_value + else + @write_lock.synchronize { super } + end + end + + def compute_if_present(key) + @write_lock.synchronize { super } + end + + def compute(key) + @write_lock.synchronize { super } + end + + def merge_pair(key, value) + @write_lock.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + @write_lock.synchronize { super } + end + + def replace_if_exists(key, new_value) + @write_lock.synchronize { super } + end + + def get_and_set(key, value) + @write_lock.synchronize { super } + end + + def delete(key) + @write_lock.synchronize { super } + end + + def delete_pair(key, value) + @write_lock.synchronize { super } + end + + def clear + @write_lock.synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb new file mode 100644 index 0000000..e7c62e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb @@ -0,0 +1,140 @@ +require 'concurrent/constants' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class NonConcurrentMapBackend + + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedMapBackend which uses a non-reentrant mutex for performance + # reasons. + def initialize(options = nil) + @backend = {} + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(@backend[key])) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = @backend[key] + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + alias_method :_get, :[] + alias_method :_set, :[]= + private :_get, :_set + private + def initialize_copy(other) + super + @backend = {} + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb new file mode 100644 index 0000000..190c8d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb @@ -0,0 +1,82 @@ +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class SynchronizedMapBackend < NonConcurrentMapBackend + + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb new file mode 100644 index 0000000..694cd7a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb @@ -0,0 +1,143 @@ +require 'concurrent/utility/engine' +require 'concurrent/collection/java_non_concurrent_priority_queue' +require 'concurrent/collection/ruby_non_concurrent_priority_queue' + +module Concurrent + module Collection + + # @!visibility private + # @!macro internal_implementation_note + NonConcurrentPriorityQueueImplementation = case + when Concurrent.on_jruby? + JavaNonConcurrentPriorityQueue + else + RubyNonConcurrentPriorityQueue + end + private_constant :NonConcurrentPriorityQueueImplementation + + # @!macro priority_queue + # + # A queue collection in which the elements are sorted based on their + # comparison (spaceship) operator `<=>`. Items are added to the queue + # at a position relative to their priority. On removal the element + # with the "highest" priority is removed. By default the sort order is + # from highest to lowest, but a lowest-to-highest sort order can be + # set on construction. + # + # The API is based on the `Queue` class from the Ruby standard library. + # + # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm + # stored in an array. The algorithm is based on the work of Robert Sedgewick + # and Kevin Wayne. + # + # The JRuby native implementation is a thin wrapper around the standard + # library `java.util.NonConcurrentPriorityQueue`. + # + # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. + # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. + # + # @note This implementation is *not* thread safe. + # + # @see http://en.wikipedia.org/wiki/Priority_queue + # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html + # + # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 + # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html + # + # @!visibility private + class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation + + alias_method :has_priority?, :include? + + alias_method :size, :length + + alias_method :deq, :pop + alias_method :shift, :pop + + alias_method :<<, :push + alias_method :enq, :push + + # @!method initialize(opts = {}) + # @!macro priority_queue_method_initialize + # + # Create a new priority queue with no items. + # + # @param [Hash] opts the options for creating the queue + # @option opts [Symbol] :order (:max) dictates the order in which items are + # stored: from highest to lowest when `:max` or `:high`; from lowest to + # highest when `:min` or `:low` + + # @!method clear + # @!macro priority_queue_method_clear + # + # Removes all of the elements from this priority queue. + + # @!method delete(item) + # @!macro priority_queue_method_delete + # + # Deletes all items from `self` that are equal to `item`. + # + # @param [Object] item the item to be removed from the queue + # @return [Object] true if the item is found else false + + # @!method empty? + # @!macro priority_queue_method_empty + # + # Returns `true` if `self` contains no elements. + # + # @return [Boolean] true if there are no items in the queue else false + + # @!method include?(item) + # @!macro priority_queue_method_include + # + # Returns `true` if the given item is present in `self` (that is, if any + # element == `item`), otherwise returns false. + # + # @param [Object] item the item to search for + # + # @return [Boolean] true if the item is found else false + + # @!method length + # @!macro priority_queue_method_length + # + # The current length of the queue. + # + # @return [Fixnum] the number of items in the queue + + # @!method peek + # @!macro priority_queue_method_peek + # + # Retrieves, but does not remove, the head of this queue, or returns `nil` + # if this queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method pop + # @!macro priority_queue_method_pop + # + # Retrieves and removes the head of this queue, or returns `nil` if this + # queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method push(item) + # @!macro priority_queue_method_push + # + # Inserts the specified element into this priority queue. + # + # @param [Object] item the item to insert onto the queue + + # @!method self.from_list(list, opts = {}) + # @!macro priority_queue_method_from_list + # + # Create a new priority queue from the given list. + # + # @param [Enumerable] list the list to build the queue from + # @param [Hash] opts the options for creating the queue + # + # @return [NonConcurrentPriorityQueue] the newly created and populated queue + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..bdf3cba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb @@ -0,0 +1,150 @@ +module Concurrent + module Collection + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class RubyNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + @comparator = [:min, :low].include?(order) ? -1 : 1 + clear + end + + # @!macro priority_queue_method_clear + def clear + @queue = [nil] + @length = 0 + true + end + + # @!macro priority_queue_method_delete + def delete(item) + return false if empty? + original_length = @length + k = 1 + while k <= @length + if @queue[k] == item + swap(k, @length) + @length -= 1 + sink(k) + @queue.pop + else + k += 1 + end + end + @length != original_length + end + + # @!macro priority_queue_method_empty + def empty? + size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.include?(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @length + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + empty? ? nil : @queue[1] + end + + # @!macro priority_queue_method_pop + def pop + return nil if empty? + max = @queue[1] + swap(1, @length) + @length -= 1 + sink(1) + @queue.pop + max + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @length += 1 + @queue << item + swim(@length) + true + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + + private + + # Exchange the values at the given indexes within the internal array. + # + # @param [Integer] x the first index to swap + # @param [Integer] y the second index to swap + # + # @!visibility private + def swap(x, y) + temp = @queue[x] + @queue[x] = @queue[y] + @queue[y] = temp + end + + # Are the items at the given indexes ordered based on the priority + # order specified at construction? + # + # @param [Integer] x the first index from which to retrieve a comparable value + # @param [Integer] y the second index from which to retrieve a comparable value + # + # @return [Boolean] true if the two elements are in the correct priority order + # else false + # + # @!visibility private + def ordered?(x, y) + (@queue[x] <=> @queue[y]) == @comparator + end + + # Percolate down to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def sink(k) + while (j = (2 * k)) <= @length do + j += 1 if j < @length && ! ordered?(j, j+1) + break if ordered?(k, j) + swap(k, j) + k = j + end + end + + # Percolate up to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def swim(k) + while k > 1 && ! ordered?(k/2, k) do + swap(k, k/2) + k = k/2 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb new file mode 100644 index 0000000..35ae4b2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb @@ -0,0 +1,34 @@ +require 'concurrent/concern/logging' + +module Concurrent + module Concern + + # @!visibility private + # @!macro internal_implementation_note + module Deprecation + # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. + include Concern::Logging + + def deprecated(message, strip = 2) + caller_line = caller(strip).first if strip > 0 + klass = if Module === self + self + else + self.class + end + message = if strip > 0 + format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) + else + format('[DEPRECATED] %s', message) + end + log WARN, klass.to_s, message + end + + def deprecated_method(old_name, new_name) + deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb new file mode 100644 index 0000000..dc172ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb @@ -0,0 +1,73 @@ +module Concurrent + module Concern + + # Object references in Ruby are mutable. This can lead to serious problems when + # the `#value` of a concurrent object is a mutable reference. Which is always the + # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. + # Most classes in this library that expose a `#value` getter method do so using the + # `Dereferenceable` mixin module. + # + # @!macro copy_options + module Dereferenceable + # NOTE: This module is going away in 2.0. In the mean time we need it to + # play nicely with the synchronization layer. This means that the + # including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Return the value this object represents after applying the options specified + # by the `#set_deref_options` method. + # + # @return [Object] the current value of the object + def value + synchronize { apply_deref_options(@value) } + end + alias_method :deref, :value + + protected + + # Set the internal value of this object + # + # @param [Object] value the new value + def value=(value) + synchronize{ @value = value } + end + + # @!macro dereferenceable_set_deref_options + # Set the options which define the operations #value performs before + # returning data to the caller (dereferencing). + # + # @note Most classes that include this module will call `#set_deref_options` + # from within the constructor, thus allowing these options to be set at + # object creation. + # + # @param [Hash] opts the options defining dereference behavior. + # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def set_deref_options(opts = {}) + synchronize{ ns_set_deref_options(opts) } + end + + # @!macro dereferenceable_set_deref_options + # @!visibility private + def ns_set_deref_options(opts) + @dup_on_deref = opts[:dup_on_deref] || opts[:dup] + @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] + @copy_on_deref = opts[:copy_on_deref] || opts[:copy] + @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) + nil + end + + # @!visibility private + def apply_deref_options(value) + return nil if value.nil? + return value if @do_nothing_on_deref + value = @copy_on_deref.call(value) if @copy_on_deref + value = value.dup if @dup_on_deref + value = value.freeze if @freeze_on_deref + value + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb new file mode 100644 index 0000000..2c74999 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb @@ -0,0 +1,32 @@ +require 'logger' + +module Concurrent + module Concern + + # Include where logging is needed + # + # @!visibility private + module Logging + include Logger::Severity + + # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger + # @param [Integer] level one of Logger::Severity constants + # @param [String] progname e.g. a path of an Actor + # @param [String, nil] message when nil block is used to generate the message + # @yieldreturn [String] a message + def log(level, progname, message = nil, &block) + #NOTE: Cannot require 'concurrent/configuration' above due to circular references. + # Assume that the gem has been initialized if we've gotten this far. + logger = if defined?(@logger) && @logger + @logger + else + Concurrent.global_logger + end + logger.call level, progname, message, &block + rescue => error + $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + + "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb new file mode 100644 index 0000000..2c9ac12 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb @@ -0,0 +1,220 @@ +require 'thread' +require 'timeout' + +require 'concurrent/atomic/event' +require 'concurrent/concern/dereferenceable' + +module Concurrent + module Concern + + module Obligation + include Concern::Dereferenceable + # NOTE: The Dereferenceable module is going away in 2.0. In the mean time + # we need it to place nicely with the synchronization layer. This means + # that the including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Has the obligation been fulfilled? + # + # @return [Boolean] + def fulfilled? + state == :fulfilled + end + alias_method :realized?, :fulfilled? + + # Has the obligation been rejected? + # + # @return [Boolean] + def rejected? + state == :rejected + end + + # Is obligation completion still pending? + # + # @return [Boolean] + def pending? + state == :pending + end + + # Is the obligation still unscheduled? + # + # @return [Boolean] + def unscheduled? + state == :unscheduled + end + + # Has the obligation completed processing? + # + # @return [Boolean] + def complete? + [:fulfilled, :rejected].include? state + end + + # Is the obligation still awaiting completion of processing? + # + # @return [Boolean] + def incomplete? + ! complete? + end + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + def value(timeout = nil) + wait timeout + deref + end + + # Wait until obligation is complete or the timeout has been reached. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + def wait(timeout = nil) + event.wait(timeout) if timeout != 0 && incomplete? + self + end + + # Wait until obligation is complete or the timeout is reached. Will re-raise + # any exceptions raised during processing (but will not raise an exception + # on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + # @raise [Exception] raises the reason when rejected + def wait!(timeout = nil) + wait(timeout).tap { raise self if rejected? } + end + alias_method :no_error!, :wait! + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. Will re-raise any exceptions + # raised during processing (but will not raise an exception on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + # @raise [Exception] raises the reason when rejected + def value!(timeout = nil) + wait(timeout) + if rejected? + raise self + else + deref + end + end + + # The current state of the obligation. + # + # @return [Symbol] the current state + def state + synchronize { @state } + end + + # If an exception was raised during processing this will return the + # exception object. Will return `nil` when the state is pending or if + # the obligation has been successfully fulfilled. + # + # @return [Exception] the exception raised during processing or `nil` + def reason + synchronize { @reason } + end + + # @example allows Obligation to be risen + # rejected_ivar = Ivar.new.fail + # raise rejected_ivar + def exception(*args) + raise 'obligation is not rejected' unless rejected? + reason.exception(*args) + end + + protected + + # @!visibility private + def get_arguments_from(opts = {}) + [*opts.fetch(:args, [])] + end + + # @!visibility private + def init_obligation + @event = Event.new + @value = @reason = nil + end + + # @!visibility private + def event + @event + end + + # @!visibility private + def set_state(success, value, reason) + if success + @value = value + @state = :fulfilled + else + @reason = reason + @state = :rejected + end + end + + # @!visibility private + def state=(value) + synchronize { ns_set_state(value) } + end + + # Atomic compare and set operation + # State is set to `next_state` only if `current state == expected_current`. + # + # @param [Symbol] next_state + # @param [Symbol] expected_current + # + # @return [Boolean] true is state is changed, false otherwise + # + # @!visibility private + def compare_and_set_state(next_state, *expected_current) + synchronize do + if expected_current.include? @state + @state = next_state + true + else + false + end + end + end + + # Executes the block within mutex if current state is included in expected_states + # + # @return block value if executed, false otherwise + # + # @!visibility private + def if_state(*expected_states) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + + if expected_states.include? @state + yield + else + false + end + end + end + + protected + + # Am I in the current state? + # + # @param [Symbol] expected The state to check against + # @return [Boolean] true if in the expected state else false + # + # @!visibility private + def ns_check_state?(expected) + @state == expected + end + + # @!visibility private + def ns_set_state(value) + @state = value + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb new file mode 100644 index 0000000..b513271 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb @@ -0,0 +1,110 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/collection/copy_on_write_observer_set' + +module Concurrent + module Concern + + # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one + # of the most useful design patterns. + # + # The workflow is very simple: + # - an `observer` can register itself to a `subject` via a callback + # - many `observers` can be registered to the same `subject` + # - the `subject` notifies all registered observers when its status changes + # - an `observer` can deregister itself when is no more interested to receive + # event notifications + # + # In a single threaded environment the whole pattern is very easy: the + # `subject` can use a simple data structure to manage all its subscribed + # `observer`s and every `observer` can react directly to every event without + # caring about synchronization. + # + # In a multi threaded environment things are more complex. The `subject` must + # synchronize the access to its data structure and to do so currently we're + # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} + # and {Concurrent::Concern::CopyOnNotifyObserverSet}. + # + # When implementing and `observer` there's a very important rule to remember: + # **there are no guarantees about the thread that will execute the callback** + # + # Let's take this example + # ``` + # class Observer + # def initialize + # @count = 0 + # end + # + # def update + # @count += 1 + # end + # end + # + # obs = Observer.new + # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } + # # execute [obj1, obj2, obj3, obj4] + # ``` + # + # `obs` is wrong because the variable `@count` can be accessed by different + # threads at the same time, so it should be synchronized (using either a Mutex + # or an AtomicFixum) + module Observable + + # @!macro observable_add_observer + # + # Adds an observer to this set. If a block is passed, the observer will be + # created by this method and no other params should be passed. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # Default is :update + # @return [Object] the added observer + def add_observer(observer = nil, func = :update, &block) + observers.add_observer(observer, func, &block) + end + + # As `#add_observer` but can be used for chaining. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # @return [Observable] self + def with_observer(observer = nil, func = :update, &block) + add_observer(observer, func, &block) + self + end + + # @!macro observable_delete_observer + # + # Remove `observer` as an observer on this object so that it will no + # longer receive notifications. + # + # @param [Object] observer the observer to remove + # @return [Object] the deleted observer + def delete_observer(observer) + observers.delete_observer(observer) + end + + # @!macro observable_delete_observers + # + # Remove all observers associated with this object. + # + # @return [Observable] self + def delete_observers + observers.delete_observers + self + end + + # @!macro observable_count_observers + # + # Return the number of observers associated with this object. + # + # @return [Integer] the observers count + def count_observers + observers.count_observers + end + + protected + + attr_accessor :observers + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar new file mode 100644 index 0000000..bd2b279 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar differ diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb new file mode 100644 index 0000000..a00dc84 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb @@ -0,0 +1,188 @@ +require 'thread' +require 'concurrent/delay' +require 'concurrent/errors' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/concern/logging' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/utility/processor_counter' + +module Concurrent + extend Concern::Logging + extend Concern::Deprecation + + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' + + # @return [Logger] Logger with provided level and output. + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message + when String + message + when Exception + format "%s (%s)\n%s", + message.message, message.class, (message.backtrace || []).join("\n") + else + message.inspect + end + + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true + end + end + + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output + end + + # @return [Logger] Logger with provided level and output. + # @deprecated + def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) + logger = Logger.new(output) + logger.level = level + logger.formatter = lambda do |severity, datetime, progname, msg| + formatted_message = case msg + when String + msg + when Exception + format "%s (%s)\n%s", + msg.message, msg.class, (msg.backtrace || []).join("\n") + else + msg.inspect + end + format "[%s] %5s -- %s: %s\n", + datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), + severity, + progname, + formatted_message + end + + lambda do |loglevel, progname, message = nil, &block| + logger.add loglevel, message, progname, &block + end + end + + # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. + # @deprecated + def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_stdlib_logger level, output + end + + # TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods + + # Suppresses all output when used for logging. + NULL_LOGGER = lambda { |level, progname, message = nil, &block| } + + # @!visibility private + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) + private_constant :GLOBAL_LOGGER + + def self.global_logger + GLOBAL_LOGGER.value + end + + def self.global_logger=(value) + GLOBAL_LOGGER.value = value + end + + # @!visibility private + GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor } + private_constant :GLOBAL_FAST_EXECUTOR + + # @!visibility private + GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor } + private_constant :GLOBAL_IO_EXECUTOR + + # @!visibility private + GLOBAL_TIMER_SET = Delay.new { TimerSet.new } + private_constant :GLOBAL_TIMER_SET + + # @!visibility private + GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new + private_constant :GLOBAL_IMMEDIATE_EXECUTOR + + # Disables AtExit handlers including pool auto-termination handlers. + # When disabled it will be the application programmer's responsibility + # to ensure that the handlers are shutdown properly prior to application + # exit by calling `AtExit.run` method. + # + # @note this option should be needed only because of `at_exit` ordering + # issues which may arise when running some of the testing frameworks. + # E.g. Minitest's test-suite runs itself in `at_exit` callback which + # executes after the pools are already terminated. Then auto termination + # needs to be disabled and called manually after test-suite ends. + # @note This method should *never* be called + # from within a gem. It should *only* be used from within the main + # application and even then it should be used only when necessary. + # @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841. + # + def self.disable_at_exit_handlers! + deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841." + end + + # Global thread pool optimized for short, fast *operations*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_fast_executor + GLOBAL_FAST_EXECUTOR.value + end + + # Global thread pool optimized for long, blocking (IO) *tasks*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_io_executor + GLOBAL_IO_EXECUTOR.value + end + + def self.global_immediate_executor + GLOBAL_IMMEDIATE_EXECUTOR + end + + # Global thread pool user for global *timers*. + # + # @return [Concurrent::TimerSet] the thread pool + def self.global_timer_set + GLOBAL_TIMER_SET.value + end + + # General access point to global executors. + # @param [Symbol, Executor] executor_identifier symbols: + # - :fast - {Concurrent.global_fast_executor} + # - :io - {Concurrent.global_io_executor} + # - :immediate - {Concurrent.global_immediate_executor} + # @return [Executor] + def self.executor(executor_identifier) + Options.executor(executor_identifier) + end + + def self.new_fast_executor(opts = {}) + FixedThreadPool.new( + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "fast" + ) + end + + def self.new_io_executor(opts = {}) + CachedThreadPool.new( + auto_terminate: opts.fetch(:auto_terminate, true), + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "io" + ) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb new file mode 100644 index 0000000..676c2af --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb @@ -0,0 +1,8 @@ +module Concurrent + + # Various classes within allows for +nil+ values to be stored, + # so a special +NULL+ token is required to indicate the "nil-ness". + # @!visibility private + NULL = ::Object.new + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb new file mode 100644 index 0000000..d55f19d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb @@ -0,0 +1,81 @@ +require 'concurrent/future' +require 'concurrent/atomic/atomic_fixnum' + +module Concurrent + + # @!visibility private + class DependencyCounter # :nodoc: + + def initialize(count, &block) + @counter = AtomicFixnum.new(count) + @block = block + end + + def update(time, value, reason) + if @counter.decrement == 0 + @block.call + end + end + end + + # Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. + # {include:file:docs-source/dataflow.md} + # + # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon + # + # @yield The operation to perform once all the dependencies are met + # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow + # @yieldreturn [Object] the result of the block operation + # + # @return [Object] the result of all the operations + # + # @raise [ArgumentError] if no block is given + # @raise [ArgumentError] if any of the inputs are not `IVar`s + def dataflow(*inputs, &block) + dataflow_with(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow + + def dataflow_with(executor, *inputs, &block) + call_dataflow(:value, executor, *inputs, &block) + end + module_function :dataflow_with + + def dataflow!(*inputs, &block) + dataflow_with!(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow! + + def dataflow_with!(executor, *inputs, &block) + call_dataflow(:value!, executor, *inputs, &block) + end + module_function :dataflow_with! + + private + + def call_dataflow(method, executor, *inputs, &block) + raise ArgumentError.new('an executor must be provided') if executor.nil? + raise ArgumentError.new('no block given') unless block_given? + unless inputs.all? { |input| input.is_a? IVar } + raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }") + end + + result = Future.new(executor: executor) do + values = inputs.map { |input| input.send(method) } + block.call(*values) + end + + if inputs.empty? + result.execute + else + counter = DependencyCounter.new(inputs.size) { result.execute } + + inputs.each do |input| + input.add_observer counter + end + end + + result + end + module_function :call_dataflow +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb new file mode 100644 index 0000000..83799d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb @@ -0,0 +1,199 @@ +require 'thread' +require 'concurrent/concern/obligation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/synchronization' + +module Concurrent + + # This file has circular require issues. It must be autoloaded here. + autoload :Options, 'concurrent/options' + + # Lazy evaluation of a block yielding an immutable result. Useful for + # expensive operations that may never be needed. It may be non-blocking, + # supports the `Concern::Obligation` interface, and accepts the injection of + # custom executor upon which to execute the block. Processing of + # block will be deferred until the first time `#value` is called. + # At that time the caller can choose to return immediately and let + # the block execute asynchronously, block indefinitely, or block + # with a timeout. + # + # When a `Delay` is created its state is set to `pending`. The value and + # reason are both `nil`. The first time the `#value` method is called the + # enclosed opration will be run and the calling thread will block. Other + # threads attempting to call `#value` will block as well. Once the operation + # is complete the *value* will be set to the result of the operation or the + # *reason* will be set to the raised exception, as appropriate. All threads + # blocked on `#value` will return. Subsequent calls to `#value` will immediately + # return the cached value. The operation will only be run once. This means that + # any side effects created by the operation will only happen once as well. + # + # `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread + # safety of the reference returned by `#value`. + # + # @!macro copy_options + # + # @!macro delay_note_regarding_blocking + # @note The default behavior of `Delay` is to block indefinitely when + # calling either `value` or `wait`, executing the delayed operation on + # the current thread. This makes the `timeout` value completely + # irrelevant. To enable non-blocking behavior, use the `executor` + # constructor option. This will cause the delayed operation to be + # execute on the given executor, allowing the call to timeout. + # + # @see Concurrent::Concern::Dereferenceable + class Delay < Synchronization::LockableObject + include Concern::Obligation + + # NOTE: Because the global thread pools are lazy-loaded with these objects + # there is a performance hit every time we post a new task to one of these + # thread pools. Subsequently it is critical that `Delay` perform as fast + # as possible post-completion. This class has been highly optimized using + # the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to + # DRY-up this class or perform other refactoring with running the + # benchmarks and ensuring that performance is not negatively impacted. + + # Create a new `Delay` in the `:pending` state. + # + # @!macro executor_and_deref_options + # + # @yield the delayed operation to perform + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(&nil) + synchronize { ns_initialize(opts, &block) } + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception this method will return nil. The execption object + # can be accessed via the `#reason` method. + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # + # @!macro delay_note_regarding_blocking + def value(timeout = nil) + if @executor # TODO (pitr 12-Sep-2015): broken unsafe read? + super + else + # this function has been optimized for performance and + # should not be modified without running new benchmarks + synchronize do + execute = @evaluation_started = true unless @evaluation_started + if execute + begin + set_state(true, @task.call, nil) + rescue => ex + set_state(false, nil, ex) + end + elsif incomplete? + raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' + end + end + if @do_nothing_on_deref + @value + else + apply_deref_options(@value) + end + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception, this method will raise that exception (even when) + # the operation has already been executed). + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # @raise [Exception] when `#rejected?` raises `#reason` + # + # @!macro delay_note_regarding_blocking + def value!(timeout = nil) + if @executor + super + else + result = value + raise @reason if @reason + result + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. + # + # @param [Integer] timeout (nil) the maximum number of seconds to wait for + # the value to be computed. When `nil` the caller will block indefinitely. + # + # @return [Object] self + # + # @!macro delay_note_regarding_blocking + def wait(timeout = nil) + if @executor + execute_task_once + super(timeout) + else + value + end + self + end + + # Reconfigures the block returning the value if still `#incomplete?` + # + # @yield the delayed operation to perform + # @return [true, false] if success + def reconfigure(&block) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + unless @evaluation_started + @task = block + true + else + false + end + end + end + + protected + + def ns_initialize(opts, &block) + init_obligation + set_deref_options(opts) + @executor = opts[:executor] + + @task = block + @state = :pending + @evaluation_started = false + end + + private + + # @!visibility private + def execute_task_once # :nodoc: + # this function has been optimized for performance and + # should not be modified without running new benchmarks + execute = task = nil + synchronize do + execute = @evaluation_started = true unless @evaluation_started + task = @task + end + + if execute + executor = Options.executor_from_options(executor: @executor) + executor.post do + begin + result = task.call + success = true + rescue => ex + reason = ex + end + synchronize do + set_state(success, result, reason) + event.set + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb new file mode 100644 index 0000000..b69fec0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb @@ -0,0 +1,69 @@ +module Concurrent + + Error = Class.new(StandardError) + + # Raised when errors occur during configuration. + ConfigurationError = Class.new(Error) + + # Raised when an asynchronous operation is cancelled before execution. + CancelledOperationError = Class.new(Error) + + # Raised when a lifecycle method (such as `stop`) is called in an improper + # sequence or when the object is in an inappropriate state. + LifecycleError = Class.new(Error) + + # Raised when an attempt is made to violate an immutability guarantee. + ImmutabilityError = Class.new(Error) + + # Raised when an operation is attempted which is not legal given the + # receiver's current state + IllegalOperationError = Class.new(Error) + + # Raised when an object's methods are called when it has not been + # properly initialized. + InitializationError = Class.new(Error) + + # Raised when an object with a start/stop lifecycle has been started an + # excessive number of times. Often used in conjunction with a restart + # policy or strategy. + MaxRestartFrequencyError = Class.new(Error) + + # Raised when an attempt is made to modify an immutable object + # (such as an `IVar`) after its final state has been set. + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end + + # Raised by an `Executor` when it is unable to process a given task, + # possibly because of a reject policy or other internal error. + RejectedExecutionError = Class.new(Error) + + # Raised when any finite resource, such as a lock counter, exceeds its + # maximum limit/threshold. + ResourceLimitError = Class.new(Error) + + # Raised when an operation times out. + TimeoutError = Class.new(Error) + + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb new file mode 100644 index 0000000..5a99550 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb @@ -0,0 +1,352 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/maybe' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/utility/engine' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro exchanger + # + # A synchronization point at which threads can pair and swap elements within + # pairs. Each thread presents some object on entry to the exchange method, + # matches with a partner thread, and receives its partner's object on return. + # + # @!macro thread_safe_variable_comparison + # + # This implementation is very simple, using only a single slot for each + # exchanger (unlike more advanced implementations which use an "arena"). + # This approach will work perfectly fine when there are only a few threads + # accessing a single `Exchanger`. Beyond a handful of threads the performance + # will degrade rapidly due to contention on the single slot, but the algorithm + # will remain correct. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # threads = [ + # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" + # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" + # ] + # threads.each {|t| t.join(2) } + + # @!visibility private + class AbstractExchanger < Synchronization::Object + + # @!visibility private + CANCEL = ::Object.new + private_constant :CANCEL + + def initialize + super + end + + # @!macro exchanger_method_do_exchange + # + # Waits for another thread to arrive at this exchange point (unless the + # current thread is interrupted), and then transfers the given object to + # it, receiving its object in return. The timeout value indicates the + # approximate number of seconds the method should block while waiting + # for the exchange. When the timeout value is `nil` the method will + # block indefinitely. + # + # @param [Object] value the value to exchange with another thread + # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely + # + # @!macro exchanger_method_exchange + # + # In some edge cases when a `timeout` is given a return value of `nil` may be + # ambiguous. Specifically, if `nil` is a valid value in the exchange it will + # be impossible to tell whether `nil` is the actual return value or if it + # signifies timeout. When `nil` is a valid value in the exchange consider + # using {#exchange!} or {#try_exchange} instead. + # + # @return [Object] the value exchanged by the other thread or `nil` on timeout + def exchange(value, timeout = nil) + (value = do_exchange(value, timeout)) == CANCEL ? nil : value + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + # + # On timeout a {Concurrent::TimeoutError} exception will be raised. + # + # @return [Object] the value exchanged by the other thread + # @raise [Concurrent::TimeoutError] on timeout + def exchange!(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + raise Concurrent::TimeoutError + else + value + end + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + # + # The return value will be a {Concurrent::Maybe} set to `Just` on success or + # `Nothing` on timeout. + # + # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with + # the item exchanged by the other thread as `#value`; on timeout a + # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` + # + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # result = exchanger.exchange(:foo, 0.5) + # + # if result.just? + # puts result.value #=> :bar + # else + # puts 'timeout' + # end + def try_exchange(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + Concurrent::Maybe.nothing(Concurrent::TimeoutError) + else + Concurrent::Maybe.just(value) + end + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + raise NotImplementedError + end + end + + # @!macro internal_implementation_note + # @!visibility private + class RubyExchanger < AbstractExchanger + # A simplified version of java.util.concurrent.Exchanger written by + # Doug Lea, Bill Scherer, and Michael Scott with assistance from members + # of JCP JSR-166 Expert Group and released to the public domain. It does + # not include the arena or the multi-processor spin loops. + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java + + safe_initialization! + + class Node < Concurrent::Synchronization::Object + attr_atomic :value + safe_initialization! + + def initialize(item) + super() + @Item = item + @Latch = Concurrent::CountDownLatch.new + self.value = nil + end + + def latch + @Latch + end + + def item + @Item + end + end + private_constant :Node + + def initialize + super + end + + private + + attr_atomic(:slot) + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + + # ALGORITHM + # + # From the original Java version: + # + # > The basic idea is to maintain a "slot", which is a reference to + # > a Node containing both an Item to offer and a "hole" waiting to + # > get filled in. If an incoming "occupying" thread sees that the + # > slot is null, it CAS'es (compareAndSets) a Node there and waits + # > for another to invoke exchange. That second "fulfilling" thread + # > sees that the slot is non-null, and so CASes it back to null, + # > also exchanging items by CASing the hole, plus waking up the + # > occupying thread if it is blocked. In each case CAS'es may + # > fail because a slot at first appears non-null but is null upon + # > CAS, or vice-versa. So threads may need to retry these + # > actions. + # + # This version: + # + # An exchange occurs between an "occupier" thread and a "fulfiller" thread. + # The "slot" is used to setup this interaction. The first thread in the + # exchange puts itself into the slot (occupies) and waits for a fulfiller. + # The second thread removes the occupier from the slot and attempts to + # perform the exchange. Removing the occupier also frees the slot for + # another occupier/fulfiller pair. + # + # Because the occupier and the fulfiller are operating independently and + # because there may be contention with other threads, any failed operation + # indicates contention. Both the occupier and the fulfiller operate within + # spin loops. Any failed actions along the happy path will cause the thread + # to repeat the loop and try again. + # + # When a timeout value is given the thread must be cognizant of time spent + # in the spin loop. The remaining time is checked every loop. When the time + # runs out the thread will exit. + # + # A "node" is the data structure used to perform the exchange. Only the + # occupier's node is necessary. It's the node used for the exchange. + # Each node has an "item," a "hole" (self), and a "latch." The item is the + # node's initial value. It never changes. It's what the fulfiller returns on + # success. The occupier's hole is where the fulfiller put its item. It's the + # item that the occupier returns on success. The latch is used for synchronization. + # Because a thread may act as either an occupier or fulfiller (or possibly + # both in periods of high contention) every thread creates a node when + # the exchange method is first called. + # + # The following steps occur within the spin loop. If any actions fail + # the thread will loop and try again, so long as there is time remaining. + # If time runs out the thread will return CANCEL. + # + # Check the slot for an occupier: + # + # * If the slot is empty try to occupy + # * If the slot is full try to fulfill + # + # Attempt to occupy: + # + # * Attempt to CAS myself into the slot + # * Go to sleep and wait to be woken by a fulfiller + # * If the sleep is successful then the fulfiller completed its happy path + # - Return the value from my hole (the value given by the fulfiller) + # * When the sleep fails (time ran out) attempt to cancel the operation + # - Attempt to CAS myself out of the hole + # - If successful there is no contention + # - Return CANCEL + # - On failure, I am competing with a fulfiller + # - Attempt to CAS my hole to CANCEL + # - On success + # - Let the fulfiller deal with my cancel + # - Return CANCEL + # - On failure the fulfiller has completed its happy path + # - Return th value from my hole (the fulfiller's value) + # + # Attempt to fulfill: + # + # * Attempt to CAS the occupier out of the slot + # - On failure loop again + # * Attempt to CAS my item into the occupier's hole + # - On failure the occupier is trying to cancel + # - Loop again + # - On success we are on the happy path + # - Wake the sleeping occupier + # - Return the occupier's item + + value = NULL if value.nil? # The sentinel allows nil to be a valid value + me = Node.new(value) # create my node in case I need to occupy + end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up + + result = loop do + other = slot + if other && compare_and_set_slot(other, nil) + # try to fulfill + if other.compare_and_set_value(nil, value) + # happy path + other.latch.count_down + break other.item + end + elsif other.nil? && compare_and_set_slot(nil, me) + # try to occupy + timeout = end_at - Concurrent.monotonic_time if timeout + if me.latch.wait(timeout) + # happy path + break me.value + else + # attempt to remove myself from the slot + if compare_and_set_slot(me, nil) + break CANCEL + elsif !me.compare_and_set_value(nil, CANCEL) + # I've failed to block the fulfiller + break me.value + end + end + end + break CANCEL if timeout && Concurrent.monotonic_time >= end_at + end + + result == NULL ? nil : result + end + end + + if Concurrent.on_jruby? + + # @!macro internal_implementation_note + # @!visibility private + class JavaExchanger < AbstractExchanger + + def initialize + @exchanger = java.util.concurrent.Exchanger.new + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value) + end + else + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + rescue java.util.concurrent.TimeoutException + CANCEL + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + ExchangerImplementation = case + when Concurrent.on_jruby? + JavaExchanger + else + RubyExchanger + end + private_constant :ExchangerImplementation + + # @!macro exchanger + class Exchanger < ExchangerImplementation + + # @!method initialize + # Creates exchanger instance + + # @!method exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange + + # @!method exchange!(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + + # @!method try_exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb new file mode 100644 index 0000000..6d0b047 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb @@ -0,0 +1,128 @@ +require 'concurrent/errors' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/executor_service' +require 'concurrent/synchronization' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class AbstractExecutorService < Synchronization::LockableObject + include ExecutorService + include Concern::Deprecation + + # The set of possible fallback policies that may be set at thread pool creation. + FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze + + # @!macro executor_service_attr_reader_fallback_policy + attr_reader :fallback_policy + + attr_reader :name + + # Create a new thread pool. + def initialize(opts = {}, &block) + super(&nil) + synchronize do + @auto_terminate = opts.fetch(:auto_terminate, true) + @name = opts.fetch(:name) if opts.key?(:name) + ns_initialize(opts, &block) + end + end + + def to_s + name ? "#{super[0..-2]} name: #{name}>" : super + end + + # @!macro executor_service_method_shutdown + def shutdown + raise NotImplementedError + end + + # @!macro executor_service_method_kill + def kill + raise NotImplementedError + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + raise NotImplementedError + end + + # @!macro executor_service_method_running_question + def running? + synchronize { ns_running? } + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + synchronize { ns_shuttingdown? } + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + synchronize { ns_shutdown? } + end + + # @!macro executor_service_method_auto_terminate_question + def auto_terminate? + synchronize { @auto_terminate } + end + + # @!macro executor_service_method_auto_terminate_setter + def auto_terminate=(value) + deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized." + end + + private + + # Handler which executes the `fallback_policy` once the queue size + # reaches `max_queue`. + # + # @param [Array] args the arguments to the task which is being handled. + # + # @!visibility private + def handle_fallback(*args) + case fallback_policy + when :abort + raise RejectedExecutionError + when :discard + false + when :caller_runs + begin + yield(*args) + rescue => ex + # let it fail + log DEBUG, ex + end + true + else + fail "Unknown fallback policy #{fallback_policy}" + end + end + + def ns_execute(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_ns_shutdown_execution + # + # Callback method called when an orderly shutdown has completed. + # The default behavior is to signal all waiting threads. + def ns_shutdown_execution + # do nothing + end + + # @!macro executor_service_method_ns_kill_execution + # + # Callback method called when the executor has been killed. + # The default behavior is to do nothing. + def ns_kill_execution + # do nothing + end + + def ns_auto_terminate? + @auto_terminate + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb new file mode 100644 index 0000000..de50ed1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb @@ -0,0 +1,62 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @!macro thread_pool_options + class CachedThreadPool < ThreadPoolExecutor + + # @!macro cached_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- + def initialize(opts = {}) + defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: 0, + max_threads: DEFAULT_MAX_POOL_SIZE, + max_queue: DEFAULT_MAX_QUEUE_SIZE } + super(defaults.merge(opts).merge(overrides)) + end + + private + + # @!macro cached_thread_pool_method_initialize + # @!visibility private + def ns_initialize(opts) + super(opts) + if Concurrent.on_jruby? + @max_queue = 0 + @executor = java.util.concurrent.Executors.newCachedThreadPool( + DaemonThreadFactory.new(ns_auto_terminate?)) + @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) + @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb new file mode 100644 index 0000000..7e34491 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb @@ -0,0 +1,185 @@ +require 'concurrent/concern/logging' + +module Concurrent + + ################################################################### + + # @!macro executor_service_method_post + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + + # @!macro executor_service_method_left_shift + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Proc] task the asynchronous task to perform + # + # @return [self] returns itself + + # @!macro executor_service_method_can_overflow_question + # + # Does the task queue have a maximum size? + # + # @return [Boolean] True if the task queue has a maximum size else false. + + # @!macro executor_service_method_serialized_question + # + # Does this executor guarantee serialization of its operations? + # + # @return [Boolean] True if the executor guarantees that all operations + # will be post in the order they are received and no two operations may + # occur simultaneously. Else false. + + ################################################################### + + # @!macro executor_service_public_api + # + # @!method post(*args, &task) + # @!macro executor_service_method_post + # + # @!method <<(task) + # @!macro executor_service_method_left_shift + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method serialized? + # @!macro executor_service_method_serialized_question + + ################################################################### + + # @!macro executor_service_attr_reader_fallback_policy + # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. + + # @!macro executor_service_method_shutdown + # + # Begin an orderly shutdown. Tasks already in the queue will be executed, + # but no new tasks will be accepted. Has no additional effect if the + # thread pool is not running. + + # @!macro executor_service_method_kill + # + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + + # @!macro executor_service_method_wait_for_termination + # + # Block until executor shutdown is complete or until `timeout` seconds have + # passed. + # + # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` + # must be called before this method (or on another thread). + # + # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete + # + # @return [Boolean] `true` if shutdown complete or false on `timeout` + + # @!macro executor_service_method_running_question + # + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + + # @!macro executor_service_method_shuttingdown_question + # + # Is the executor shuttingdown? + # + # @return [Boolean] `true` when not running and not shutdown, else `false` + + # @!macro executor_service_method_shutdown_question + # + # Is the executor shutdown? + # + # @return [Boolean] `true` when shutdown, `false` when shutting down or running + + # @!macro executor_service_method_auto_terminate_question + # + # Is the executor auto-terminate when the application exits? + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + # @!macro executor_service_method_auto_terminate_setter + # + # + # Set the auto-terminate behavior for this executor. + # @deprecated Has no effect + # @param [Boolean] value The new auto-terminate value to set for this executor. + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + ################################################################### + + # @!macro abstract_executor_service_public_api + # + # @!macro executor_service_public_api + # + # @!attribute [r] fallback_policy + # @!macro executor_service_attr_reader_fallback_policy + # + # @!method shutdown + # @!macro executor_service_method_shutdown + # + # @!method kill + # @!macro executor_service_method_kill + # + # @!method wait_for_termination(timeout = nil) + # @!macro executor_service_method_wait_for_termination + # + # @!method running? + # @!macro executor_service_method_running_question + # + # @!method shuttingdown? + # @!macro executor_service_method_shuttingdown_question + # + # @!method shutdown? + # @!macro executor_service_method_shutdown_question + # + # @!method auto_terminate? + # @!macro executor_service_method_auto_terminate_question + # + # @!method auto_terminate=(value) + # @!macro executor_service_method_auto_terminate_setter + + ################################################################### + + # @!macro executor_service_public_api + # @!visibility private + module ExecutorService + include Concern::Logging + + # @!macro executor_service_method_post + def post(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_can_overflow_question + # + # @note Always returns `false` + def can_overflow? + false + end + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `false` + def serialized? + false + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb new file mode 100644 index 0000000..b665f6c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb @@ -0,0 +1,210 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # @!macro thread_pool_executor_constant_default_max_pool_size + # Default maximum number of threads that will be created in the pool. + + # @!macro thread_pool_executor_constant_default_min_pool_size + # Default minimum number of threads that will be retained in the pool. + + # @!macro thread_pool_executor_constant_default_max_queue_size + # Default maximum number of tasks that may be added to the task queue. + + # @!macro thread_pool_executor_constant_default_thread_timeout + # Default maximum number of seconds a thread in the pool may remain idle + # before being reclaimed. + + # @!macro thread_pool_executor_constant_default_synchronous + # Default value of the :synchronous option. + + # @!macro thread_pool_executor_attr_reader_max_length + # The maximum number of threads that may be created in the pool. + # @return [Integer] The maximum number of threads that may be created in the pool. + + # @!macro thread_pool_executor_attr_reader_min_length + # The minimum number of threads that may be retained in the pool. + # @return [Integer] The minimum number of threads that may be retained in the pool. + + # @!macro thread_pool_executor_attr_reader_largest_length + # The largest number of threads that have been created in the pool since construction. + # @return [Integer] The largest number of threads that have been created in the pool since construction. + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # The number of tasks that have been scheduled for execution on the pool since construction. + # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. + + # @!macro thread_pool_executor_attr_reader_completed_task_count + # The number of tasks that have been completed by the pool since construction. + # @return [Integer] The number of tasks that have been completed by the pool since construction. + + # @!macro thread_pool_executor_attr_reader_idletime + # The number of seconds that a thread may be idle before being reclaimed. + # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_synchronous + # Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue. + # @return [true, false] + + # @!macro thread_pool_executor_attr_reader_max_queue + # The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + # + # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + + # @!macro thread_pool_executor_attr_reader_length + # The number of threads currently in the pool. + # @return [Integer] The number of threads currently in the pool. + + # @!macro thread_pool_executor_attr_reader_queue_length + # The number of tasks in the queue awaiting execution. + # @return [Integer] The number of tasks in the queue awaiting execution. + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + # + # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + + + + + + # @!macro thread_pool_executor_public_api + # + # @!macro abstract_executor_service_public_api + # + # @!attribute [r] max_length + # @!macro thread_pool_executor_attr_reader_max_length + # + # @!attribute [r] min_length + # @!macro thread_pool_executor_attr_reader_min_length + # + # @!attribute [r] largest_length + # @!macro thread_pool_executor_attr_reader_largest_length + # + # @!attribute [r] scheduled_task_count + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # + # @!attribute [r] completed_task_count + # @!macro thread_pool_executor_attr_reader_completed_task_count + # + # @!attribute [r] idletime + # @!macro thread_pool_executor_attr_reader_idletime + # + # @!attribute [r] max_queue + # @!macro thread_pool_executor_attr_reader_max_queue + # + # @!attribute [r] length + # @!macro thread_pool_executor_attr_reader_length + # + # @!attribute [r] queue_length + # @!macro thread_pool_executor_attr_reader_queue_length + # + # @!attribute [r] remaining_capacity + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + + + + + # @!macro thread_pool_options + # + # **Thread Pool Options** + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and + # a `-worker-` name is given to its threads if supported by used Ruby + # implementation. `` is uniq for each thread. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` and no new threads can be created, + # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default), the threads started will be marked as daemon. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit, all threads can be marked as daemon according to the + # `:auto_terminate` option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean- + + + + + + # @!macro fixed_thread_pool + # + # A thread pool that reuses a fixed number of threads operating off an unbounded queue. + # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @!macro thread_pool_options + class FixedThreadPool < ThreadPoolExecutor + + # @!macro fixed_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Integer] num_threads the number of threads to allocate + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `num_threads` is less than or equal to zero + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- + def initialize(num_threads, opts = {}) + raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 + defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, + idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: num_threads, + max_threads: num_threads } + super(defaults.merge(opts).merge(overrides)) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb new file mode 100644 index 0000000..282df7a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb @@ -0,0 +1,66 @@ +require 'concurrent/atomic/event' +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/serial_executor_service' + +module Concurrent + + # An executor service which runs all operations on the current thread, + # blocking as necessary. Operations are performed in the order they are + # received and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used + # it immediately runs every `#post` operation on the current thread, blocking + # that thread until the operation is complete. This can be very beneficial + # during testing because it makes all operations deterministic. + # + # @note Intended for use primarily in testing and debugging. + class ImmediateExecutor < AbstractExecutorService + include SerialExecutorService + + # Creates a new executor + def initialize + @stopped = Concurrent::Event.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + task.call(*args) + true + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + ! shutdown? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + false + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @stopped.set + true + end + alias_method :kill, :shutdown + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb new file mode 100644 index 0000000..4f9769f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb @@ -0,0 +1,44 @@ +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/simple_executor_service' + +module Concurrent + # An executor service which runs all operations on a new thread, blocking + # until it completes. Operations are performed in the order they are received + # and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used it + # immediately runs every `#post` operation on a new thread, blocking the + # current thread until the operation is complete. This is similar to how the + # ImmediateExecutor works, but the operation has the full stack of the new + # thread at its disposal. This can be helpful when the operations will spawn + # more operations on the same executor and so on - such a situation might + # overflow the single stack in case of an ImmediateExecutor, which is + # inconsistent with how it would behave for a threaded executor. + # + # @note Intended for use primarily in testing and debugging. + class IndirectImmediateExecutor < ImmediateExecutor + # Creates a new executor + def initialize + super + @internal_executor = SimpleExecutorService.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new("no block given") unless block_given? + return false unless running? + + event = Concurrent::Event.new + @internal_executor.post do + begin + task.call(*args) + ensure + event.set + end + end + event.wait + + true + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb new file mode 100644 index 0000000..9c0f310 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb @@ -0,0 +1,103 @@ +if Concurrent.on_jruby? + + require 'concurrent/errors' + require 'concurrent/utility/engine' + require 'concurrent/executor/abstract_executor_service' + + module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaExecutorService < AbstractExecutorService + java_import 'java.lang.Runnable' + + FALLBACK_POLICY_CLASSES = { + abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, + discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, + caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy + }.freeze + private_constant :FALLBACK_POLICY_CLASSES + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return handle_fallback(*args, &task) unless running? + @executor.submit Job.new(args, task) + true + rescue Java::JavaUtilConcurrent::RejectedExecutionException + raise RejectedExecutionError + end + + def wait_for_termination(timeout = nil) + if timeout.nil? + ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok + true + else + @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + + def shutdown + synchronize do + @executor.shutdown + nil + end + end + + def kill + synchronize do + @executor.shutdownNow + nil + end + end + + private + + def ns_running? + !(ns_shuttingdown? || ns_shutdown?) + end + + def ns_shuttingdown? + if @executor.respond_to? :isTerminating + @executor.isTerminating + else + false + end + end + + def ns_shutdown? + @executor.isShutdown || @executor.isTerminated + end + + class Job + include Runnable + def initialize(args, block) + @args = args + @block = block + end + + def run + @block.call(*@args) + end + end + private_constant :Job + end + + class DaemonThreadFactory + # hide include from YARD + send :include, java.util.concurrent.ThreadFactory + + def initialize(daemonize = true) + @daemonize = daemonize + end + + def newThread(runnable) + thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable) + thread.setDaemon(@daemonize) + return thread + end + end + + private_constant :DaemonThreadFactory + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb new file mode 100644 index 0000000..7aa24f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb @@ -0,0 +1,30 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + require 'concurrent/executor/serial_executor_service' + + module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaSingleThreadExecutor < JavaExecutorService + include SerialExecutorService + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + private + + def ns_initialize(opts) + @executor = java.util.concurrent.Executors.newSingleThreadExecutor( + DaemonThreadFactory.new(ns_auto_terminate?) + ) + @fallback_policy = opts.fetch(:fallback_policy, :discard) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb new file mode 100644 index 0000000..e670663 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb @@ -0,0 +1,136 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + + module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class JavaThreadPoolExecutor < JavaExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + @max_queue != 0 + end + + # @!macro thread_pool_executor_attr_reader_min_length + def min_length + @executor.getCorePoolSize + end + + # @!macro thread_pool_executor_attr_reader_max_length + def max_length + @executor.getMaximumPoolSize + end + + # @!macro thread_pool_executor_attr_reader_length + def length + @executor.getPoolSize + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + @executor.getLargestPoolSize + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + @executor.getTaskCount + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + @executor.getCompletedTaskCount + end + + # @!macro thread_pool_executor_attr_reader_idletime + def idletime + @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + @executor.getQueue.size + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity + end + + # @!macro executor_service_method_running_question + def running? + super && !@executor.isTerminating + end + + private + + def ns_initialize(opts) + min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) + + if @max_queue == 0 + if @synchronous + queue = java.util.concurrent.SynchronousQueue.new + else + queue = java.util.concurrent.LinkedBlockingQueue.new + end + else + queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) + end + + @executor = java.util.concurrent.ThreadPoolExecutor.new( + min_length, + max_length, + idletime, + java.util.concurrent.TimeUnit::SECONDS, + queue, + DaemonThreadFactory.new(ns_auto_terminate?), + FALLBACK_POLICY_CLASSES[@fallback_policy].new) + + end + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb new file mode 100644 index 0000000..06658d3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb @@ -0,0 +1,76 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/atomic/event' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubyExecutorService < AbstractExecutorService + safe_initialization! + + def initialize(*args, &block) + super + @StopEvent = Event.new + @StoppedEvent = Event.new + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + synchronize do + # If the executor is shut down, reject this task + return handle_fallback(*args, &task) unless running? + ns_execute(*args, &task) + true + end + end + + def shutdown + synchronize do + break unless running? + stop_event.set + ns_shutdown_execution + end + true + end + + def kill + synchronize do + break if shutdown? + stop_event.set + ns_kill_execution + stopped_event.set + end + true + end + + def wait_for_termination(timeout = nil) + stopped_event.wait(timeout) + end + + private + + def stop_event + @StopEvent + end + + def stopped_event + @StoppedEvent + end + + def ns_shutdown_execution + stopped_event.set + end + + def ns_running? + !stop_event.set? + end + + def ns_shuttingdown? + !(ns_running? || ns_shutdown?) + end + + def ns_shutdown? + stopped_event.set? + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb new file mode 100644 index 0000000..916337d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb @@ -0,0 +1,21 @@ +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubySingleThreadExecutor < RubyThreadPoolExecutor + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super( + min_threads: 1, + max_threads: 1, + max_queue: 0, + idletime: DEFAULT_THREAD_IDLETIMEOUT, + fallback_policy: opts.fetch(:fallback_policy, :discard), + ) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb new file mode 100644 index 0000000..dc20d76 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb @@ -0,0 +1,377 @@ +require 'thread' +require 'concurrent/atomic/event' +require 'concurrent/concern/logging' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class RubyThreadPoolExecutor < RubyExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_min_length + attr_reader :min_length + + # @!macro thread_pool_executor_attr_reader_idletime + attr_reader :idletime + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + synchronize { @largest_length } + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + synchronize { @scheduled_task_count } + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + synchronize { @completed_task_count } + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + synchronize { ns_limited_queue? } + end + + # @!macro thread_pool_executor_attr_reader_length + def length + synchronize { @pool.length } + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + synchronize { @queue.length } + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + synchronize do + if ns_limited_queue? + @max_queue - @queue.length + else + -1 + end + end + end + + # @!visibility private + def remove_busy_worker(worker) + synchronize { ns_remove_busy_worker worker } + end + + # @!visibility private + def ready_worker(worker) + synchronize { ns_ready_worker worker } + end + + # @!visibility private + def worker_not_old_enough(worker) + synchronize { ns_worker_not_old_enough worker } + end + + # @!visibility private + def worker_died(worker) + synchronize { ns_worker_died worker } + end + + # @!visibility private + def worker_task_completed + synchronize { @completed_task_count += 1 } + end + + private + + # @!visibility private + def ns_initialize(opts) + @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + + @pool = [] # all workers + @ready = [] # used as a stash (most idle worker is at the start) + @queue = [] # used as queue + # @ready or @queue is empty at all times + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ # detects if Ruby has forked + + @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + # @!visibility private + def ns_limited_queue? + @max_queue != 0 + end + + # @!visibility private + def ns_execute(*args, &task) + ns_reset_if_forked + + if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) + @scheduled_task_count += 1 + else + handle_fallback(*args, &task) + end + + ns_prune_pool if @next_gc_time < Concurrent.monotonic_time + end + + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + + if @pool.empty? + # nothing to do + stopped_event.set + end + + if @queue.empty? + # no more tasks will be accepted, just stop all workers + @pool.each(&:stop) + end + end + + # @!visibility private + def ns_kill_execution + # TODO log out unprocessed tasks in queue + # TODO try to shutdown first? + @pool.each(&:kill) + @pool.clear + @ready.clear + end + + # tries to assign task to a worker, tries to get one from @ready or to create new one + # @return [true, false] if task is assigned to a worker + # + # @!visibility private + def ns_assign_worker(*args, &task) + # keep growing if the pool is not at the minimum yet + worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker + if worker + worker << [task, args] + true + else + false + end + rescue ThreadError + # Raised when the operating system refuses to create the new thread + return false + end + + # tries to enqueue task + # @return [true, false] if enqueued + # + # @!visibility private + def ns_enqueue(*args, &task) + return false if @synchronous + + if !ns_limited_queue? || @queue.size < @max_queue + @queue << [task, args] + true + else + false + end + end + + # @!visibility private + def ns_worker_died(worker) + ns_remove_busy_worker worker + replacement_worker = ns_add_busy_worker + ns_ready_worker replacement_worker, false if replacement_worker + end + + # creates new worker which has to receive work to do after it's added + # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private + def ns_add_busy_worker + return if @pool.size >= @max_length + + @workers_counter += 1 + @pool << (worker = Worker.new(self, @workers_counter)) + @largest_length = @pool.length if @pool.length > @largest_length + worker + end + + # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private + def ns_ready_worker(worker, success = true) + task_and_args = @queue.shift + if task_and_args + worker << task_and_args + else + # stop workers when !running?, do not return them to @ready + if running? + @ready.push(worker) + else + worker.stop + end + end + end + + # returns back worker to @ready which was not idle for enough time + # + # @!visibility private + def ns_worker_not_old_enough(worker) + # let's put workers coming from idle_test back to the start (as the oldest worker) + @ready.unshift(worker) + true + end + + # removes a worker which is not in not tracked in @ready + # + # @!visibility private + def ns_remove_busy_worker(worker) + @pool.delete(worker) + stopped_event.set if @pool.empty? && !running? + true + end + + # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private + def ns_prune_pool + return if @pool.size <= @min_length + + last_used = @ready.shift + last_used << :idle_test if last_used + + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ready.clear + @pool.clear + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ + end + end + + # @!visibility private + class Worker + include Concern::Logging + + def initialize(pool, id) + # instance variables accessed only under pool's lock so no need to sync here again + @queue = Queue.new + @pool = pool + @thread = create_worker @queue, pool, pool.idletime + + if @thread.respond_to?(:name=) + @thread.name = [pool.name, 'worker', id].compact.join('-') + end + end + + def <<(message) + @queue << message + end + + def stop + @queue << :stop + end + + def kill + @thread.kill + end + + private + + def create_worker(queue, pool, idletime) + Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| + last_message = Concurrent.monotonic_time + catch(:stop) do + loop do + + case message = my_queue.pop + when :idle_test + if (Concurrent.monotonic_time - last_message) > my_idletime + my_pool.remove_busy_worker(self) + throw :stop + else + my_pool.worker_not_old_enough(self) + end + + when :stop + my_pool.remove_busy_worker(self) + throw :stop + + else + task, args = message + run_task my_pool, task, args + last_message = Concurrent.monotonic_time + + my_pool.ready_worker(self) + end + end + end + end + end + + def run_task(pool, task, args) + task.call(*args) + pool.worker_task_completed + rescue => ex + # let it fail + log DEBUG, ex + rescue Exception => ex + log ERROR, ex + pool.worker_died(self) + throw :stop + end + end + + private_constant :Worker + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb new file mode 100644 index 0000000..414aa64 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb @@ -0,0 +1,35 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A simple utility class that executes a callable and returns and array of three elements: + # success - indicating if the callable has been executed without errors + # value - filled by the callable result if it has been executed without errors, nil otherwise + # reason - the error risen by the callable if it has been executed with errors, nil otherwise + class SafeTaskExecutor < Synchronization::LockableObject + + def initialize(task, opts = {}) + @task = task + @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError + super() # ensures visibility + end + + # @return [Array] + def execute(*args) + synchronize do + success = false + value = reason = nil + + begin + value = @task.call(*args) + success = true + rescue @exception_class => ex + reason = ex + success = false + end + + [success, value, reason] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb new file mode 100644 index 0000000..f1c38ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb @@ -0,0 +1,34 @@ +require 'concurrent/executor/executor_service' + +module Concurrent + + # Indicates that the including `ExecutorService` guarantees + # that all operations will occur in the order they are post and that no + # two operations may occur simultaneously. This module provides no + # functionality and provides no guarantees. That is the responsibility + # of the including class. This module exists solely to allow the including + # object to be interrogated for its serialization status. + # + # @example + # class Foo + # include Concurrent::SerialExecutor + # end + # + # foo = Foo.new + # + # foo.is_a? Concurrent::ExecutorService #=> true + # foo.is_a? Concurrent::SerialExecutor #=> true + # foo.serialized? #=> true + # + # @!visibility private + module SerialExecutorService + include ExecutorService + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `true` + def serialized? + true + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb new file mode 100644 index 0000000..d314e90 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb @@ -0,0 +1,107 @@ +require 'concurrent/errors' +require 'concurrent/concern/logging' +require 'concurrent/synchronization' + +module Concurrent + + # Ensures passed jobs in a serialized order never running at the same time. + class SerializedExecution < Synchronization::LockableObject + include Concern::Logging + + def initialize() + super() + synchronize { ns_initialize } + end + + Job = Struct.new(:executor, :args, :block) do + def call + block.call(*args) + end + end + + # Submit a task to the executor for asynchronous processing. + # + # @param [Executor] executor to be used for this job + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + def post(executor, *args, &task) + posts [[executor, args, task]] + true + end + + # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not + # be interleaved by other tasks. + # + # @param [Array, Proc)>] posts array of triplets where + # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) + def posts(posts) + # if can_overflow? + # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' + # end + + return nil if posts.empty? + + jobs = posts.map { |executor, args, task| Job.new executor, args, task } + + job_to_post = synchronize do + if @being_executed + @stash.push(*jobs) + nil + else + @being_executed = true + @stash.push(*jobs[1..-1]) + jobs.first + end + end + + call_job job_to_post if job_to_post + true + end + + private + + def ns_initialize + @being_executed = false + @stash = [] + end + + def call_job(job) + did_it_run = begin + job.executor.post { work(job) } + true + rescue RejectedExecutionError => ex + false + end + + # TODO not the best idea to run it myself + unless did_it_run + begin + work job + rescue => ex + # let it fail + log DEBUG, ex + end + end + end + + # ensures next job is executed if any is stashed + def work(job) + job.call + ensure + synchronize do + job = @stash.shift || (@being_executed = false) + end + + # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end + # of this block + call_job job if job + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb new file mode 100644 index 0000000..8197781 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb @@ -0,0 +1,28 @@ +require 'delegate' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' + +module Concurrent + + # A wrapper/delegator for any `ExecutorService` that + # guarantees serialized execution of tasks. + # + # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) + # @see Concurrent::SerializedExecution + class SerializedExecutionDelegator < SimpleDelegator + include SerialExecutorService + + def initialize(executor) + @executor = executor + @serializer = SerializedExecution.new + super(executor) + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @serializer.post(@executor, *args, &task) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb new file mode 100644 index 0000000..b87fed5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb @@ -0,0 +1,100 @@ +require 'concurrent/atomics' +require 'concurrent/executor/executor_service' + +module Concurrent + + # An executor service in which every operation spawns a new, + # independently operating thread. + # + # This is perhaps the most inefficient executor service in this + # library. It exists mainly for testing an debugging. Thread creation + # and management is expensive in Ruby and this executor performs no + # resource pooling. This can be very beneficial during testing and + # debugging because it decouples the using code from the underlying + # executor implementation. In production this executor will likely + # lead to suboptimal performance. + # + # @note Intended for use primarily in testing and debugging. + class SimpleExecutorService < RubyExecutorService + + # @!macro executor_service_method_post + def self.post(*args) + raise ArgumentError.new('no block given') unless block_given? + Thread.new(*args) do + Thread.current.abort_on_exception = false + yield(*args) + end + true + end + + # @!macro executor_service_method_left_shift + def self.<<(task) + post(&task) + self + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @count.increment + Thread.new(*args) do + Thread.current.abort_on_exception = false + begin + yield(*args) + ensure + @count.decrement + @stopped.set if @running.false? && @count.value == 0 + end + end + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + @running.true? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + @running.false? && ! @stopped.set? + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @running.make_false + @stopped.set if @count.value == 0 + true + end + + # @!macro executor_service_method_kill + def kill + @running.make_false + @stopped.set + true + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + + private + + def ns_initialize(*args) + @running = Concurrent::AtomicBoolean.new(true) + @stopped = Concurrent::Event.new + @count = Concurrent::AtomicFixnum.new(0) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb new file mode 100644 index 0000000..f1474ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb @@ -0,0 +1,57 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_single_thread_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation + + # @!macro single_thread_executor + # + # A thread pool with a single thread an unlimited queue. Should the thread + # die for any reason it will be removed and replaced, thus ensuring that + # the executor will always remain viable and available to process jobs. + # + # A common pattern for background processing is to create a single thread + # on which an infinite loop is run. The thread's loop blocks on an input + # source (perhaps blocking I/O or a queue) and processes each input as it + # is received. This pattern has several issues. The thread itself is highly + # susceptible to errors during processing. Also, the thread itself must be + # constantly monitored and restarted should it die. `SingleThreadExecutor` + # encapsulates all these bahaviors. The task processor is highly resilient + # to errors from within tasks. Also, should the thread die it will + # automatically be restarted. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor`. + # + # @!macro abstract_executor_service_public_api + class SingleThreadExecutor < SingleThreadExecutorImplementation + + # @!macro single_thread_executor_method_initialize + # + # Create a new thread pool. + # + # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html + + # @!method initialize(opts = {}) + # @!macro single_thread_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb new file mode 100644 index 0000000..253d46a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb @@ -0,0 +1,88 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_thread_pool_executor' + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submitted to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. + # + # A `ThreadPoolExecutor` will automatically adjust the pool size according + # to the bounds set by `min-threads` and `max-threads`. When a new task is + # submitted and fewer than `min-threads` threads are running, a new thread + # is created to handle the request, even if other worker threads are idle. + # If there are more than `min-threads` but less than `max-threads` threads + # running, a new thread will be created only if the queue is full. + # + # Threads that are idle for too long will be garbage collected, down to the + # configured minimum options. Should a thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentation; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + # + # @!macro thread_pool_executor_public_api + class ThreadPoolExecutor < ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options which configure the thread pool. + # + # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum + # number of threads to be created + # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted + # and fewer than `min_threads` are running, a new thread is created + # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum + # number of seconds a thread may be idle before being reclaimed + # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum + # number of tasks allowed in the work queue at any one time; a value of + # zero means the queue may grow without bound + # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0 + # for :max_queue means the queue must perform direct hand-off rather than unbounded. + # @raise [ArgumentError] if `:max_threads` is less than one + # @raise [ArgumentError] if `:min_threads` is less than zero + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html + + # @!method initialize(opts = {}) + # @!macro thread_pool_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb new file mode 100644 index 0000000..0dfaf12 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb @@ -0,0 +1,172 @@ +require 'concurrent/scheduled_task' +require 'concurrent/atomic/event' +require 'concurrent/collection/non_concurrent_priority_queue' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/single_thread_executor' + +require 'concurrent/options' + +module Concurrent + + # Executes a collection of tasks, each after a given delay. A master task + # monitors the set and schedules each task for execution at the appropriate + # time. Tasks are run on the global thread pool or on the supplied executor. + # Each task is represented as a `ScheduledTask`. + # + # @see Concurrent::ScheduledTask + # + # @!macro monotonic_clock_warning + class TimerSet < RubyExecutorService + + # Create a new set of timed tasks. + # + # @!macro executor_options + # + # @param [Hash] opts the options used to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + def initialize(opts = {}) + super(opts) + end + + # Post a task to be execute run after a given delay (in seconds). If the + # delay is less than 1/100th of a second the task will be immediately post + # to the executor. + # + # @param [Float] delay the number of seconds to wait for before executing the task. + # @param [Array] args the arguments passed to the task on execution. + # + # @yield the task to be performed. + # + # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post + # is successful; false after shutdown. + # + # @raise [ArgumentError] if the intended execution time is not in the future. + # @raise [ArgumentError] if no block is given. + def post(delay, *args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + opts = { executor: @task_executor, + args: args, + timer_set: self } + task = ScheduledTask.execute(delay, opts, &task) # may raise exception + task.unscheduled? ? false : task + end + + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + def kill + shutdown + end + + private :<< + + private + + # Initialize the object. + # + # @param [Hash] opts the options to create the object with. + # @!visibility private + def ns_initialize(opts) + @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) + @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @timer_executor = SingleThreadExecutor.new + @condition = Event.new + @ruby_pid = $$ # detects if Ruby has forked + end + + # Post the task to the internal queue. + # + # @note This is intended as a callback method from ScheduledTask + # only. It is not intended to be used directly. Post a task + # by using the `SchedulesTask#execute` method. + # + # @!visibility private + def post_task(task) + synchronize { ns_post_task(task) } + end + + # @!visibility private + def ns_post_task(task) + return false unless ns_running? + ns_reset_if_forked + if (task.initial_delay) <= 0.01 + task.executor.post { task.process_task } + else + @queue.push(task) + # only post the process method when the queue is empty + @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 + @condition.set + end + true + end + + # Remove the given task from the queue. + # + # @note This is intended as a callback method from `ScheduledTask` + # only. It is not intended to be used directly. Cancel a task + # by using the `ScheduledTask#cancel` method. + # + # @!visibility private + def remove_task(task) + synchronize { @queue.delete(task) } + end + + # `ExecutorService` callback called during shutdown. + # + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + @queue.clear + @timer_executor.kill + stopped_event.set + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @condition.reset + @ruby_pid = $$ + end + end + + # Run a loop and execute tasks in the scheduled order and at the approximate + # scheduled time. If no tasks remain the thread will exit gracefully so that + # garbage collection can occur. If there are no ready tasks it will sleep + # for up to 60 seconds waiting for the next scheduled task. + # + # @!visibility private + def process_tasks + loop do + task = synchronize { @condition.reset; @queue.peek } + break unless task + + now = Concurrent.monotonic_time + diff = task.schedule_time - now + + if diff <= 0 + # We need to remove the task from the queue before passing + # it to the executor, to avoid race conditions where we pass + # the peek'ed task to the executor and then pop a different + # one that's been added in the meantime. + # + # Note that there's no race condition between the peek and + # this pop - this pop could retrieve a different task from + # the peek, but that task would be due to fire now anyway + # (because @queue is a priority queue, and this thread is + # the only reader, so whatever timer is at the head of the + # queue now must have the same pop time, or a closer one, as + # when we peeked). + task = synchronize { @queue.pop } + task.executor.post { task.process_task } + else + @condition.wait([diff, 60].min) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb new file mode 100644 index 0000000..eb1972c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb @@ -0,0 +1,20 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/indirect_immediate_executor' +require 'concurrent/executor/java_executor_service' +require 'concurrent/executor/java_single_thread_executor' +require 'concurrent/executor/java_thread_pool_executor' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/ruby_single_thread_executor' +require 'concurrent/executor/ruby_thread_pool_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' +require 'concurrent/executor/serialized_execution_delegator' +require 'concurrent/executor/single_thread_executor' +require 'concurrent/executor/thread_pool_executor' +require 'concurrent/executor/timer_set' diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb new file mode 100644 index 0000000..1af182e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb @@ -0,0 +1,141 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. + + +module Concurrent + + # {include:file:docs-source/future.md} + # + # @!macro copy_options + # + # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module + # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future + class Future < IVar + + # Create a new `Future` in the `:unscheduled` state. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(NULL, opts.merge(__task_from_block__: block), &nil) + end + + # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Future` is in any state other than `:unscheduled`. + # + # @return [Future] a reference to `self` + # + # @example Instance and execute in separate steps + # future = Concurrent::Future.new{ sleep(1); 42 } + # future.state #=> :unscheduled + # future.execute + # future.state #=> :pending + # + # @example Instance and execute in one line + # future = Concurrent::Future.new{ sleep(1); 42 }.execute + # future.state #=> :pending + def execute + if compare_and_set_state(:pending, :unscheduled) + @executor.post{ safe_execute(@task, @args) } + self + end + end + + # Create a new `Future` object with the given block, execute it, and return the + # `:pending` object. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + # + # @return [Future] the newly created `Future` in the `:pending` state + # + # @example + # future = Concurrent::Future.execute{ sleep(1); 42 } + # future.state #=> :pending + def self.execute(opts = {}, &block) + Future.new(opts, &block).execute + end + + # @!macro ivar_set_method + def set(value = NULL, &block) + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @task = block || Proc.new { value } + end + end + execute + end + + # Attempt to cancel the operation if it has not already processed. + # The operation can only be cancelled while still `pending`. It cannot + # be cancelled once it has begun processing or has completed. + # + # @return [Boolean] was the operation successfully cancelled. + def cancel + if compare_and_set_state(:cancelled, :pending) + complete(false, nil, CancelledOperationError.new) + true + else + false + end + end + + # Has the operation been successfully cancelled? + # + # @return [Boolean] + def cancelled? + state == :cancelled + end + + # Wait the given number of seconds for the operation to complete. + # On timeout attempt to cancel the operation. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Boolean] true if the operation completed before the timeout + # else false + def wait_or_cancel(timeout) + wait(timeout) + if complete? + true + else + cancel + false + end + end + + protected + + def ns_initialize(value, opts) + super + @state = :unscheduled + @task = opts[:__task_from_block__] + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb new file mode 100644 index 0000000..92df66b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb @@ -0,0 +1,59 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_hash + # + # A thread-safe subclass of Hash. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`, + # which takes the lock repeatedly when reading an item. + # + # @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash` + + # @!macro internal_implementation_note + HashImplementation = case + when Concurrent.on_cruby? + # Hash is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Hash + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyHash < ::Hash + include JRuby::Synchronized + end + JRubyHash + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxHash < ::Hash + end + ThreadSafe::Util.make_synchronized_on_rbx RbxHash + RbxHash + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyHash < ::Hash + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash + TruffleRubyHash + + else + warn 'Possibly unsupported Ruby implementation' + ::Hash + end + private_constant :HashImplementation + + # @!macro concurrent_hash + class Hash < HashImplementation + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb new file mode 100644 index 0000000..d275595 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb @@ -0,0 +1,101 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # A thread-safe, immutable variation of Ruby's standard `Struct`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module ImmutableStruct + include Synchronization::AbstractStruct + + def self.included(base) + base.safe_initialization! + end + + # @!macro struct_values + def values + ns_values + end + + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + ns_values_at(indexes) + end + + # @!macro struct_inspect + def inspect + ns_inspect + end + + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + ns_merge(other, &block) + end + + # @!macro struct_to_h + def to_h + ns_to_h + end + + # @!macro struct_get + def [](member) + ns_get(member) + end + + # @!macro struct_equality + def ==(other) + ns_equality(other) + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + ns_each(&block) + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + ns_each_pair(&block) + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + ns_select(&block) + end + + private + + # @!visibility private + def initialize_copy(original) + super(original) + ns_initialize_copy + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb new file mode 100644 index 0000000..2a724db --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb @@ -0,0 +1,207 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/obligation' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # An `IVar` is like a future that you can assign. As a future is a value that + # is being computed that you can wait on, an `IVar` is a value that is waiting + # to be assigned, that you can wait on. `IVars` are single assignment and + # deterministic. + # + # Then, express futures as an asynchronous computation that assigns an `IVar`. + # The `IVar` becomes the primitive on which [futures](Future) and + # [dataflow](Dataflow) are built. + # + # An `IVar` is a single-element container that is normally created empty, and + # can only be set once. The I in `IVar` stands for immutable. Reading an + # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` + # from different threads. + # + # If you want to have some parallel task set the value in an `IVar`, you want + # a `Future`. If you want to create a graph of parallel tasks all executed + # when the values they depend on are ready you want `dataflow`. `IVar` is + # generally a low-level primitive. + # + # ## Examples + # + # Create, set and get an `IVar` + # + # ```ruby + # ivar = Concurrent::IVar.new + # ivar.set 14 + # ivar.value #=> 14 + # ivar.set 2 # would now be an error + # ``` + # + # ## See Also + # + # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. + # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). + # In Proceedings of Workshop on Graph Reduction, 1986. + # 2. For recent application: + # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). + class IVar < Synchronization::LockableObject + include Concern::Obligation + include Concern::Observable + + # Create a new `IVar` in the `:pending` state with the (optional) initial value. + # + # @param [Object] value the initial value + # @param [Hash] opts the options to create a message with + # @option opts [String] :dup_on_deref (false) call `#dup` before returning + # the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before + # returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def initialize(value = NULL, opts = {}, &block) + if value != NULL && block_given? + raise ArgumentError.new('provide only a value or a block') + end + super(&nil) + synchronize { ns_initialize(value, opts, &block) } + end + + # Add an observer on this object that will receive notification on update. + # + # Upon completion the `IVar` will notify all observers in a thread-safe way. + # The `func` method of the observer will be called with three arguments: the + # `Time` at which the `Future` completed the asynchronous operation, the + # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on + # fulfillment). + # + # @param [Object] observer the object that will be notified of changes + # @param [Symbol] func symbol naming the method to call when this + # `Observable` has changes` + def add_observer(observer = nil, func = :update, &block) + raise ArgumentError.new('cannot provide both an observer and a block') if observer && block + direct_notification = false + + if block + observer = block + func = :call + end + + synchronize do + if event.set? + direct_notification = true + else + observers.add_observer(observer, func) + end + end + + observer.send(func, Time.now, self.value, reason) if direct_notification + observer + end + + # @!macro ivar_set_method + # Set the `IVar` to a value and wake or notify all threads waiting on it. + # + # @!macro ivar_set_parameters_and_exceptions + # @param [Object] value the value to store in the `IVar` + # @yield A block operation to use for setting the value + # @raise [ArgumentError] if both a value and a block are given + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # + # @return [IVar] self + def set(value = NULL) + check_for_block_or_value!(block_given?, value) + raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) + + begin + value = yield if block_given? + complete_without_notification(true, value, nil) + rescue => ex + complete_without_notification(false, nil, ex) + end + + notify_observers(self.value, reason) + self + end + + # @!macro ivar_fail_method + # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # @return [IVar] self + def fail(reason = StandardError.new) + complete(false, nil, reason) + end + + # Attempt to set the `IVar` with the given value or block. Return a + # boolean indicating the success or failure of the set operation. + # + # @!macro ivar_set_parameters_and_exceptions + # + # @return [Boolean] true if the value was set else false + def try_set(value = NULL, &block) + set(value, &block) + true + rescue MultipleAssignmentError + false + end + + protected + + # @!visibility private + def ns_initialize(value, opts) + value = yield if block_given? + init_obligation + self.observers = Collection::CopyOnWriteObserverSet.new + set_deref_options(opts) + + @state = :pending + if value != NULL + ns_complete_without_notification(true, value, nil) + end + end + + # @!visibility private + def safe_execute(task, args = []) + if compare_and_set_state(:processing, :pending) + success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, val, reason) + yield(success, val, reason) if block_given? + end + end + + # @!visibility private + def complete(success, value, reason) + complete_without_notification(success, value, reason) + notify_observers(self.value, reason) + self + end + + # @!visibility private + def complete_without_notification(success, value, reason) + synchronize { ns_complete_without_notification(success, value, reason) } + self + end + + # @!visibility private + def notify_observers(value, reason) + observers.notify_and_delete_observers{ [Time.now, value, reason] } + end + + # @!visibility private + def ns_complete_without_notification(success, value, reason) + raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state + set_state(success, value, reason) + event.set + end + + # @!visibility private + def check_for_block_or_value!(block_given, value) # :nodoc: + if (block_given && value != NULL) || (! block_given && value == NULL) + raise ArgumentError.new('must set with either a value or a block') + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb new file mode 100644 index 0000000..3967cbf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb @@ -0,0 +1,337 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/synchronization' +require 'concurrent/utility/engine' + +module Concurrent + # @!visibility private + module Collection + + # @!visibility private + MapImplementation = case + when Concurrent.on_jruby? + # noinspection RubyResolve + JRubyMapBackend + when Concurrent.on_cruby? + require 'concurrent/collection/map/mri_map_backend' + MriMapBackend + when Concurrent.on_rbx? || Concurrent.on_truffleruby? + require 'concurrent/collection/map/atomic_reference_map_backend' + AtomicReferenceMapBackend + else + warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' + require 'concurrent/collection/map/synchronized_map_backend' + SynchronizedMapBackend + end + end + + # `Concurrent::Map` is a hash-like object and should have much better performance + # characteristics, especially under high concurrency, than `Concurrent::Hash`. + # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` + # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` + # does. For most uses it should do fine though, and we recommend you consider + # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. + class Map < Collection::MapImplementation + + # @!macro map.atomic_method + # This method is atomic. + + # @!macro map.atomic_method_with_block + # This method is atomic. + # @note Atomic methods taking a block do not allow the `self` instance + # to be used within the block. Doing so will cause a deadlock. + + # @!method compute_if_absent(key) + # Compute and store new value for key if the key is absent. + # @param [Object] key + # @yield new value + # @yieldreturn [Object] new value + # @return [Object] new value or current value + # @!macro map.atomic_method_with_block + + # @!method compute_if_present(key) + # Compute and store new value for key if the key is present. + # @param [Object] key + # @yield new value + # @yieldparam old_value [Object] + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method compute(key) + # Compute and store new value for key. + # @param [Object] key + # @yield compute new value from old one + # @yieldparam old_value [Object, nil] old_value, or nil when key is absent + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method merge_pair(key, value) + # If the key is absent, the value is stored, otherwise new value is + # computed with a block. + # @param [Object] key + # @param [Object] value + # @yield compute new value from old one + # @yieldparam old_value [Object] old value + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method replace_pair(key, old_value, new_value) + # Replaces old_value with new_value if key exists and current value + # matches old_value + # @param [Object] key + # @param [Object] old_value + # @param [Object] new_value + # @return [true, false] true if replaced + # @!macro map.atomic_method + + # @!method replace_if_exists(key, new_value) + # Replaces current value with new_value if key exists + # @param [Object] key + # @param [Object] new_value + # @return [Object, nil] old value or nil + # @!macro map.atomic_method + + # @!method get_and_set(key, value) + # Get the current value under key and set new value. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete(key) + # Delete key and its value. + # @param [Object] key + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete_pair(key, value) + # Delete pair and its value if current value equals the provided value. + # @param [Object] key + # @param [Object] value + # @return [true, false] true if deleted + # @!macro map.atomic_method + + + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + # Get a value with key + # @param [Object] key + # @return [Object] the value + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + alias_method :get, :[] + # TODO (pitr-ch 30-Oct-2018): doc + alias_method :put, :[]= + + # Get a value with key, or default_value when key is absent, + # or fail when no default_value is given. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @raise [KeyError] when key is missing and no default_value is provided + # @!macro map_method_not_atomic + # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended + # to be use as a concurrency primitive with strong happens-before + # guarantees. It is not intended to be used as a high-level abstraction + # supporting complex operations. All read and write operations are + # thread safe, but no guarantees are made regarding race conditions + # between the fetch operation and yielding to the block. Additionally, + # this method does not support recursion. This is due to internal + # constraints that are very unlikely to change in the near future. + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + # Fetch value with key, or store default value when key is absent, + # or fail when no default_value is given. This is a two step operation, + # therefore not atomic. The store can overwrite other concurrently + # stored value. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @!macro map.atomic_method_with_block + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + # Insert value into map with key if key is absent in one atomic step. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] the previous value when key was present or nil when there was no key + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + # Is the value stored in the map. Iterates over all values. + # @param [Object] value + # @return [true, false] + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end + + # All keys + # @return [::Array] keys + def keys + arr = [] + each_pair { |k, v| arr << k } + arr + end unless method_defined?(:keys) + + # All values + # @return [::Array] values + def values + arr = [] + each_pair { |k, v| arr << v } + arr + end unless method_defined?(:values) + + # Iterates over each key. + # @yield for each key in the map + # @yieldparam key [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_key + each_pair { |k, v| yield k } + end unless method_defined?(:each_key) + + # Iterates over each value. + # @yield for each value in the map + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_value + each_pair { |k, v| yield v } + end unless method_defined?(:each_value) + + # Iterates over each key value pair. + # @yield for each key value pair in the map + # @yieldparam key [Object] + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_pair + return enum_for :each_pair unless block_given? + super + end + + alias_method :each, :each_pair unless method_defined?(:each) + + # Find key of a value. + # @param [Object] value + # @return [Object, nil] key or nil when not found + def key(value) + each_pair { |k, v| return k if v == value } + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + # Is map empty? + # @return [true, false] + def empty? + each_pair { |k, v| return false } + true + end unless method_defined?(:empty?) + + # The size of map. + # @return [Integer] size + def size + count = 0 + each_pair { |k, v| count += 1 } + count + end unless method_defined?(:size) + + # @!visibility private + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair { |k, v| h[k] = v } + h + end + + # @!visibility private + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + # @!visibility private + def inspect + format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect + end + + private + + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair { |k, v| self[k] = v } + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive Integer" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb new file mode 100644 index 0000000..7ba3d3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value + # of (represented as `Just`), or it is empty (represented as `Nothing`). Using + # `Maybe` is a good way to deal with errors or exceptional cases without + # resorting to drastic measures such as exceptions. + # + # `Maybe` is a replacement for the use of `nil` with better type checking. + # + # For compatibility with {Concurrent::Concern::Obligation} the predicate and + # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and + # `reason`. + # + # ## Motivation + # + # A common pattern in languages with pattern matching, such as Erlang and + # Haskell, is to return *either* a value *or* an error from a function + # Consider this Erlang code: + # + # ```erlang + # case file:consult("data.dat") of + # {ok, Terms} -> do_something_useful(Terms); + # {error, Reason} -> lager:error(Reason) + # end. + # ``` + # + # In this example the standard library function `file:consult` returns a + # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) + # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) + # (similar to a ruby symbol) and a variable containing ancillary data. On + # success it returns the atom `ok` and the data from the file. On failure it + # returns `error` and a string with an explanation of the problem. With this + # pattern there is no ambiguity regarding success or failure. If the file is + # empty the return value cannot be misinterpreted as an error. And when an + # error occurs the return value provides useful information. + # + # In Ruby we tend to return `nil` when an error occurs or else we raise an + # exception. Both of these idioms are problematic. Returning `nil` is + # ambiguous because `nil` may also be a valid value. It also lacks + # information pertaining to the nature of the error. Raising an exception + # is both expensive and usurps the normal flow of control. All of these + # problems can be solved with the use of a `Maybe`. + # + # A `Maybe` is unambiguous with regard to whether or not it contains a value. + # When `Just` it contains a value, when `Nothing` it does not. When `Just` + # the value it contains may be `nil`, which is perfectly valid. When + # `Nothing` the reason for the lack of a value is contained as well. The + # previous Erlang example can be duplicated in Ruby in a principled way by + # having functions return `Maybe` objects: + # + # ```ruby + # result = MyFileUtils.consult("data.dat") # returns a Maybe + # if result.just? + # do_something_useful(result.value) # or result.just + # else + # logger.error(result.reason) # or result.nothing + # end + # ``` + # + # @example Returning a Maybe from a Function + # module MyFileUtils + # def self.consult(path) + # file = File.open(path, 'r') + # Concurrent::Maybe.just(file.read) + # rescue => ex + # return Concurrent::Maybe.nothing(ex) + # ensure + # file.close if file + # end + # end + # + # maybe = MyFileUtils.consult('bogus.file') + # maybe.just? #=> false + # maybe.nothing? #=> true + # maybe.reason #=> # + # + # maybe = MyFileUtils.consult('README.md') + # maybe.just? #=> true + # maybe.nothing? #=> false + # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." + # + # @example Using Maybe with a Block + # result = Concurrent::Maybe.from do + # Client.find(10) # Client is an ActiveRecord model + # end + # + # # -- if the record was found + # result.just? #=> true + # result.value #=> # + # + # # -- if the record was not found + # result.just? #=> false + # result.reason #=> ActiveRecord::RecordNotFound + # + # @example Using Maybe with the Null Object Pattern + # # In a Rails controller... + # result = ClientService.new(10).find # returns a Maybe + # render json: result.or(NullClient.new) + # + # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe + # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe + class Maybe < Synchronization::Object + include Comparable + safe_initialization! + + # Indicates that the given attribute has not been set. + # When `Just` the {#nothing} getter will return `NONE`. + # When `Nothing` the {#just} getter will return `NONE`. + NONE = ::Object.new.freeze + + # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. + attr_reader :just + + # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. + attr_reader :nothing + + private_class_method :new + + # Create a new `Maybe` using the given block. + # + # Runs the given block passing all function arguments to the block as block + # arguments. If the block runs to completion without raising an exception + # a new `Just` is created with the value set to the return value of the + # block. If the block raises an exception a new `Nothing` is created with + # the reason being set to the raised exception. + # + # @param [Array] args Zero or more arguments to pass to the block. + # @yield The block from which to create a new `Maybe`. + # @yieldparam [Array] args Zero or more block arguments passed as + # arguments to the function. + # + # @return [Maybe] The newly created object. + # + # @raise [ArgumentError] when no block given. + def self.from(*args) + raise ArgumentError.new('no block given') unless block_given? + begin + value = yield(*args) + return new(value, NONE) + rescue => ex + return new(NONE, ex) + end + end + + # Create a new `Just` with the given value. + # + # @param [Object] value The value to set for the new `Maybe` object. + # + # @return [Maybe] The newly created object. + def self.just(value) + return new(value, NONE) + end + + # Create a new `Nothing` with the given (optional) reason. + # + # @param [Exception] error The reason to set for the new `Maybe` object. + # When given a string a new `StandardError` will be created with the + # argument as the message. When no argument is given a new + # `StandardError` with an empty message will be created. + # + # @return [Maybe] The newly created object. + def self.nothing(error = '') + if error.is_a?(Exception) + nothing = error + else + nothing = StandardError.new(error.to_s) + end + return new(NONE, nothing) + end + + # Is this `Maybe` a `Just` (successfully fulfilled with a value)? + # + # @return [Boolean] True if `Just` or false if `Nothing`. + def just? + ! nothing? + end + alias :fulfilled? :just? + + # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? + # + # @return [Boolean] True if `Nothing` or false if `Just`. + def nothing? + @nothing != NONE + end + alias :rejected? :nothing? + + alias :value :just + + alias :reason :nothing + + # Comparison operator. + # + # @return [Integer] 0 if self and other are both `Nothing`; + # -1 if self is `Nothing` and other is `Just`; + # 1 if self is `Just` and other is nothing; + # `self.just <=> other.just` if both self and other are `Just`. + def <=>(other) + if nothing? + other.nothing? ? 0 : -1 + else + other.nothing? ? 1 : just <=> other.just + end + end + + # Return either the value of self or the given default value. + # + # @return [Object] The value of self when `Just`; else the given default. + def or(other) + just? ? just : other + end + + private + + # Create a new `Maybe` with the given attributes. + # + # @param [Object] just The value when `Just` else `NONE`. + # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. + # + # @return [Maybe] The new `Maybe`. + # + # @!visibility private + def initialize(just, nothing) + @just = just + @nothing = nothing + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb new file mode 100644 index 0000000..55361e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb @@ -0,0 +1,239 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe variation of Ruby's standard `Struct`. Values can be set at + # construction or safely changed at any time during the object's lifecycle. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module MutableStruct + include Synchronization::AbstractStruct + + # @!macro struct_new + # + # Factory for creating new struct classes. + # + # ``` + # new([class_name] [, member_name]+>) -> StructClass click to toggle source + # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass + # new(value, ...) -> obj + # StructClass[value, ...] -> obj + # ``` + # + # The first two forms are used to create a new struct subclass `class_name` + # that can contain a value for each member_name . This subclass can be + # used to create instances of the structure like any other Class . + # + # If the `class_name` is omitted an anonymous struct class will be created. + # Otherwise, the name of this struct will appear as a constant in the struct class, + # so it must be unique for all structs under this base class and must start with a + # capital letter. Assigning a struct class to a constant also gives the class + # the name of the constant. + # + # If a block is given it will be evaluated in the context of `StructClass`, passing + # the created class as a parameter. This is the recommended way to customize a struct. + # Subclassing an anonymous struct creates an extra anonymous class that will never be used. + # + # The last two forms create a new instance of a struct subclass. The number of value + # parameters must be less than or equal to the number of attributes defined for the + # struct. Unset parameters default to nil. Passing more parameters than number of attributes + # will raise an `ArgumentError`. + # + # @see http://ruby-doc.org/core/Struct.html#method-c-new Ruby standard library `Struct#new` + + # @!macro struct_values + # + # Returns the values for this struct as an Array. + # + # @return [Array] the values for this struct + # + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + # + # Returns the struct member values for each selector as an Array. + # + # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). + # + # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + # + # Describe the contents of this struct in a string. + # + # @return [String] the contents of this struct in a string + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + # + # Returns a new struct containing the contents of `other` and the contents + # of `self`. If no block is specified, the value for entries with duplicate + # keys will be that of `other`. Otherwise the value for each duplicate key + # is determined by calling the block with the key, its value in `self` and + # its value in `other`. + # + # @param [Hash] other the hash from which to set the new values + # @yield an options block for resolving duplicate keys + # @yieldparam [String, Symbol] member the name of the member which is duplicated + # @yieldparam [Object] selfvalue the value of the member in `self` + # @yieldparam [Object] othervalue the value of the member in `other` + # + # @return [Synchronization::AbstractStruct] a new struct with the new values + # + # @raise [ArgumentError] of given a member that is not defined in the struct + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + # + # Returns a hash containing the names and values for the struct’s members. + # + # @return [Hash] the names and values for the struct’s members + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + # + # Attribute Reference + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the member does not exist + # @raise [IndexError] if the index is out of range. + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + # + # Equality + # + # @return [Boolean] true if other has the same struct subclass and has + # equal member values (according to `Object#==`) + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + # + # Yields the value of each struct member in order. If no block is given + # an enumerator is returned. + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + # + # Yields the name and value of each struct member in order. If no block is + # given an enumerator is returned. + # + # @yield the operation to be performed on each struct member/value pair + # @yieldparam [Object] member each struct member (in order) + # @yieldparam [Object] value each struct value (in order) + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + # + # Yields each member value from the struct to the block and returns an Array + # containing the member values from the struct for which the given block + # returns a true value (equivalent to `Enumerable#select`). + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + # + # @return [Array] an array containing each value for which the block returns true + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # Attribute Assignment + # + # Sets the value of the given struct member or the member at the given index. + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the name does not exist + # @raise [IndexError] if the index is out of range. + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize { @values[member] = value } + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize { @values[index] = value } + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb new file mode 100644 index 0000000..9034711 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb @@ -0,0 +1,242 @@ +require 'concurrent/concern/dereferenceable' +require 'concurrent/synchronization' + +module Concurrent + + # An `MVar` is a synchronized single element container. They are empty or + # contain one item. Taking a value from an empty `MVar` blocks, as does + # putting a value into a full one. You can either think of them as blocking + # queue of length one, or a special kind of mutable variable. + # + # On top of the fundamental `#put` and `#take` operations, we also provide a + # `#mutate` that is atomic with respect to operations on the same instance. + # These operations all support timeouts. + # + # We also support non-blocking operations `#try_put!` and `#try_take!`, a + # `#set!` that ignores existing values, a `#value` that returns the value + # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields + # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. + # You shouldn't use these operations in the first instance. + # + # `MVar` is a [Dereferenceable](Dereferenceable). + # + # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. + # + # Note that unlike the original Haskell paper, our `#take` is blocking. This is how + # Haskell and Scala do it today. + # + # @!macro copy_options + # + # ## See Also + # + # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th + # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. + # + # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). + # In Proceedings of the 23rd Symposium on Principles of Programming Languages + # (PoPL), 1996. + class MVar < Synchronization::Object + include Concern::Dereferenceable + safe_initialization! + + # Unique value that represents that an `MVar` was empty + EMPTY = ::Object.new + + # Unique value that represents that an `MVar` timed out before it was able + # to produce a value. + TIMEOUT = ::Object.new + + # Create a new `MVar`, either empty or with an initial value. + # + # @param [Hash] opts the options controlling how the future will be processed + # + # @!macro deref_options + def initialize(value = EMPTY, opts = {}) + @value = value + @mutex = Mutex.new + @empty_condition = ConditionVariable.new + @full_condition = ConditionVariable.new + set_deref_options(opts) + end + + # Remove the value from an `MVar`, leaving it empty, and blocking if there + # isn't a value. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was taken, or `TIMEOUT` + def take(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # acquires lock on the from an `MVAR`, yields the value to provided block, + # and release lock. A timeout can be set to limit the time spent blocked, + # in which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value returned by the block, or `TIMEOUT` + def borrow(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # if we timeoud out we'll still be empty + if unlocked_full? + yield @value + else + TIMEOUT + end + end + end + + # Put a value into an `MVar`, blocking if there is already a value until + # it is empty. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was put, or `TIMEOUT` + def put(value, timeout = nil) + @mutex.synchronize do + wait_for_empty(timeout) + + # If we timed out we won't be empty + if unlocked_empty? + @value = value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Atomically `take`, yield the value to a block for transformation, and then + # `put` the transformed value. Returns the transformed value. A timeout can + # be set to limit the time spent blocked, in which case it returns `TIMEOUT` + # if the time is exceeded. + # @return [Object] the transformed value, or `TIMEOUT` + def modify(timeout = nil) + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = yield value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. + def try_take! + @mutex.synchronize do + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + EMPTY + end + end + end + + # Non-blocking version of `put`, that returns whether or not it was successful. + def try_put!(value) + @mutex.synchronize do + if unlocked_empty? + @value = value + @full_condition.signal + true + else + false + end + end + end + + # Non-blocking version of `put` that will overwrite an existing value. + def set!(value) + @mutex.synchronize do + old_value = @value + @value = value + @full_condition.signal + apply_deref_options(old_value) + end + end + + # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. + def modify! + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + value = @value + @value = yield value + if unlocked_empty? + @empty_condition.signal + else + @full_condition.signal + end + apply_deref_options(value) + end + end + + # Returns if the `MVar` is currently empty. + def empty? + @mutex.synchronize { @value == EMPTY } + end + + # Returns if the `MVar` currently contains a value. + def full? + !empty? + end + + protected + + def synchronize(&block) + @mutex.synchronize(&block) + end + + private + + def unlocked_empty? + @value == EMPTY + end + + def unlocked_full? + ! unlocked_empty? + end + + def wait_for_full(timeout) + wait_while(@full_condition, timeout) { unlocked_empty? } + end + + def wait_for_empty(timeout) + wait_while(@empty_condition, timeout) { unlocked_full? } + end + + def wait_while(condition, timeout) + if timeout.nil? + while yield + condition.wait(@mutex) + end + else + stop = Concurrent.monotonic_time + timeout + while yield && timeout > 0.0 + condition.wait(@mutex, timeout) + timeout = stop - Concurrent.monotonic_time + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb new file mode 100644 index 0000000..bdd22a9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb @@ -0,0 +1,42 @@ +require 'concurrent/configuration' + +module Concurrent + + # @!visibility private + module Options + + # Get the requested `Executor` based on the values set in the options hash. + # + # @param [Hash] opts the options defining the requested executor + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:fast` returns the global fast executor, + # `:io` returns the global io executor, and `:immediate` returns a new + # `ImmediateExecutor` object. + # + # @return [Executor, nil] the requested thread pool, or nil when no option specified + # + # @!visibility private + def self.executor_from_options(opts = {}) # :nodoc: + if identifier = opts.fetch(:executor, nil) + executor(identifier) + else + nil + end + end + + def self.executor(executor_identifier) + case executor_identifier + when :fast + Concurrent.global_fast_executor + when :io + Concurrent.global_io_executor + when :immediate + Concurrent.global_immediate_executor + when Concurrent::ExecutorService + executor_identifier + else + raise ArgumentError, "executor not recognized by '#{executor_identifier}'" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb new file mode 100644 index 0000000..f5f31eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb @@ -0,0 +1,579 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +module Concurrent + + PromiseExecutionError = Class.new(StandardError) + + # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) + # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. + # + # > A promise represents the eventual value returned from the single + # > completion of an operation. + # + # Promises are similar to futures and share many of the same behaviours. + # Promises are far more robust, however. Promises can be chained in a tree + # structure where each promise may have zero or more children. Promises are + # chained using the `then` method. The result of a call to `then` is always + # another promise. Promises are resolved asynchronously (with respect to the + # main thread) but in a strict order: parents are guaranteed to be resolved + # before their children, children before their younger siblings. The `then` + # method takes two parameters: an optional block to be executed upon parent + # resolution and an optional callable to be executed upon parent failure. The + # result of each promise is passed to each of its children upon resolution. + # When a promise is rejected all its children will be summarily rejected and + # will receive the reason. + # + # Promises have several possible states: *:unscheduled*, *:pending*, + # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as + # `#incomplete?` and `#complete?`. When a Promise is created it is set to + # *:unscheduled*. Once the `#execute` method is called the state becomes + # *:pending*. Once a job is pulled from the thread pool's queue and is given + # to a thread for processing (often immediately upon `#post`) the state + # becomes *:processing*. The future will remain in this state until processing + # is complete. A future that is in the *:unscheduled*, *:pending*, or + # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either + # *:rejected*, indicating that an exception was thrown during processing, or + # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` + # will be updated to reflect the result of the operation. If *:rejected* the + # `reason` will be updated with a reference to the thrown exception. The + # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and + # `#fulfilled?` can be called at any time to obtain the state of the Promise, + # as can the `#state` method, which returns a symbol. + # + # Retrieving the value of a promise is done through the `value` (alias: + # `deref`) method. Obtaining the value of a promise is a potentially blocking + # operation. When a promise is *rejected* a call to `value` will return `nil` + # immediately. When a promise is *fulfilled* a call to `value` will + # immediately return the current value. When a promise is *pending* a call to + # `value` will block until the promise is either *rejected* or *fulfilled*. A + # *timeout* value can be passed to `value` to limit how long the call will + # block. If `nil` the call will block indefinitely. If `0` the call will not + # block. Any other integer or float value will indicate the maximum number of + # seconds to block. + # + # Promises run on the global thread pool. + # + # @!macro copy_options + # + # ### Examples + # + # Start by requiring promises + # + # ```ruby + # require 'concurrent' + # ``` + # + # Then create one + # + # ```ruby + # p = Concurrent::Promise.execute do + # # do something + # 42 + # end + # ``` + # + # Promises can be chained using the `then` method. The `then` method accepts a + # block and an executor, to be executed on fulfillment, and a callable argument to be executed + # on rejection. The result of the each promise is passed as the block argument + # to chained promises. + # + # ```ruby + # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute + # ``` + # + # And so on, and so on, and so on... + # + # ```ruby + # p = Concurrent::Promise.fulfill(20). + # then{|result| result - 10 }. + # then{|result| result * 3 }. + # then(executor: different_executor){|result| result % 5 }.execute + # ``` + # + # The initial state of a newly created Promise depends on the state of its parent: + # - if parent is *unscheduled* the child will be *unscheduled* + # - if parent is *pending* the child will be *pending* + # - if parent is *fulfilled* the child will be *pending* + # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) + # + # Promises are executed asynchronously from the main thread. By the time a + # child Promise finishes intialization it may be in a different state than its + # parent (by the time a child is created its parent may have completed + # execution and changed state). Despite being asynchronous, however, the order + # of execution of Promise objects in a chain (or tree) is strictly defined. + # + # There are multiple ways to create and execute a new `Promise`. Both ways + # provide identical behavior: + # + # ```ruby + # # create, operate, then execute + # p1 = Concurrent::Promise.new{ "Hello World!" } + # p1.state #=> :unscheduled + # p1.execute + # + # # create and immediately execute + # p2 = Concurrent::Promise.new{ "Hello World!" }.execute + # + # # execute during creation + # p3 = Concurrent::Promise.execute{ "Hello World!" } + # ``` + # + # Once the `execute` method is called a `Promise` becomes `pending`: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # p.state #=> :pending + # p.pending? #=> true + # ``` + # + # Wait a little bit, and the promise will resolve and provide a value: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # sleep(0.1) + # + # p.state #=> :fulfilled + # p.fulfilled? #=> true + # p.value #=> "Hello, world!" + # ``` + # + # If an exception occurs, the promise will be rejected and will provide + # a reason for the rejection: + # + # ```ruby + # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } + # sleep(0.1) + # + # p.state #=> :rejected + # p.rejected? #=> true + # p.reason #=> "#" + # ``` + # + # #### Rejection + # + # When a promise is rejected all its children will be rejected and will + # receive the rejection `reason` as the rejection callable parameter: + # + # ```ruby + # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } + # + # c1 = p.then(-> reason { 42 }) + # c2 = p.then(-> reason { raise 'Boom!' }) + # + # c1.wait.state #=> :fulfilled + # c1.value #=> 45 + # c2.wait.state #=> :rejected + # c2.reason #=> # + # ``` + # + # Once a promise is rejected it will continue to accept children that will + # receive immediately rejection (they will be executed asynchronously). + # + # #### Aliases + # + # The `then` method is the most generic alias: it accepts a block to be + # executed upon parent fulfillment and a callable to be executed upon parent + # rejection. At least one of them should be passed. The default block is `{ + # |result| result }` that fulfills the child with the parent value. The + # default callable is `{ |reason| raise reason }` that rejects the child with + # the parent reason. + # + # - `on_success { |result| ... }` is the same as `then {|result| ... }` + # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` + # - `rescue` is aliased by `catch` and `on_error` + class Promise < IVar + + # Initialize a new Promise with the provided options. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @option opts [Promise] :parent the parent `Promise` when building a chain/tree + # @option opts [Proc] :on_fulfill fulfillment handler + # @option opts [Proc] :on_reject rejection handler + # @option opts [object, Array] :args zero or more arguments to be passed + # the task block on execution + # + # @yield The block operation to be performed asynchronously. + # + # @raise [ArgumentError] if no block is given + # + # @see http://wiki.commonjs.org/wiki/Promises/A + # @see http://promises-aplus.github.io/promises-spec/ + def initialize(opts = {}, &block) + opts.delete_if { |k, v| v.nil? } + super(NULL, opts.merge(__promise_body_from_block__: block), &nil) + end + + # Create a new `Promise` and fulfill it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.fulfill(value, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } + end + + # Create a new `Promise` and reject it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.reject(reason, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } + end + + # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Promise` is in any state other than `:unscheduled`. + # + # @return [Promise] a reference to `self` + def execute + if root? + if compare_and_set_state(:pending, :unscheduled) + set_pending + realize(@promise_body) + end + else + @parent.execute + end + self + end + + # @!macro ivar_set_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def set(value = NULL, &block) + raise PromiseExecutionError.new('supported only on root promise') unless root? + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @promise_body = block || Proc.new { |result| value } + end + end + execute + end + + # @!macro ivar_fail_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def fail(reason = StandardError.new) + set { raise reason } + end + + # Create a new `Promise` object with the given block, execute it, and return the + # `:pending` object. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @return [Promise] the newly created `Promise` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + # + # @example + # promise = Concurrent::Promise.execute{ sleep(1); 42 } + # promise.state #=> :pending + def self.execute(opts = {}, &block) + new(opts, &block).execute + end + + # Chain a new promise off the current promise. + # + # @return [Promise] the new promise + # @yield The block operation to be performed asynchronously. + # @overload then(rescuer, executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + # @overload then(rescuer, executor: executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + def then(*args, &block) + if args.last.is_a?(::Hash) + executor = args.pop[:executor] + rescuer = args.first + else + rescuer, executor = args + end + + executor ||= @executor + + raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? + block = Proc.new { |result| result } unless block_given? + child = Promise.new( + parent: self, + executor: executor, + on_fulfill: block, + on_reject: rescuer + ) + + synchronize do + child.state = :pending if @state == :pending + child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled + child.on_reject(@reason) if @state == :rejected + @children << child + end + + child + end + + # Chain onto this promise an action to be undertaken on success + # (fulfillment). + # + # @yield The block to execute + # + # @return [Promise] self + def on_success(&block) + raise ArgumentError.new('no block given') unless block_given? + self.then(&block) + end + + # Chain onto this promise an action to be undertaken on failure + # (rejection). + # + # @yield The block to execute + # + # @return [Promise] self + def rescue(&block) + self.then(block) + end + + alias_method :catch, :rescue + alias_method :on_error, :rescue + + # Yield the successful result to the block that returns a promise. If that + # promise is also successful the result is the result of the yielded promise. + # If either part fails the whole also fails. + # + # @example + # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 + # + # @return [Promise] + def flat_map(&block) + child = Promise.new( + parent: self, + executor: ImmediateExecutor.new, + ) + + on_error { |e| child.on_reject(e) } + on_success do |result1| + begin + inner = block.call(result1) + inner.execute + inner.on_success { |result2| child.on_fulfill(result2) } + inner.on_error { |e| child.on_reject(e) } + rescue => e + child.on_reject(e) + end + end + + child + end + + # Builds a promise that produces the result of promises in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] promises + # + # @overload zip(*promises, opts) + # @param [Array] promises + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def self.zip(*promises) + opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} + opts[:executor] ||= ImmediateExecutor.new + zero = if !opts.key?(:execute) || opts.delete(:execute) + fulfill([], opts) + else + Promise.new(opts) { [] } + end + + promises.reduce(zero) do |p1, p2| + p1.flat_map do |results| + p2.then do |next_result| + results << next_result + end + end + end + end + + # Builds a promise that produces the result of self and others in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] others + # + # @overload zip(*promises, opts) + # @param [Array] others + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def zip(*others) + self.class.zip(self, *others) + end + + # Aggregates a collection of promises and executes the `then` condition + # if all aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + # + # The returned promise will not yet have been executed. Additional `#then` + # and `#rescue` handlers may still be provided. Once the returned promise + # is execute the aggregate promises will be also be executed (if they have + # not been executed already). The results of the aggregate promises will + # be checked upon completion. The necessary `#then` and `#rescue` blocks + # on the aggregating promise will then be executed as appropriate. If the + # `#rescue` handlers are executed the raises exception will be + # `Concurrent::PromiseExecutionError`. + # + # @param [Array] promises Zero or more promises to aggregate + # @return [Promise] an unscheduled (not executed) promise that aggregates + # the promises given as arguments + def self.all?(*promises) + aggregate(:all?, *promises) + end + + # Aggregates a collection of promises and executes the `then` condition + # if any aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + def self.any?(*promises) + aggregate(:any?, *promises) + end + + protected + + def ns_initialize(value, opts) + super + + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + + @parent = opts.fetch(:parent) { nil } + @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } + @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } + + @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } + @state = :unscheduled + @children = [] + end + + # Aggregate a collection of zero or more promises under a composite promise, + # execute the aggregated promises and collect them into a standard Ruby array, + # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, + # or `one?`) on the collection checking for the success or failure of each, + # then executing the composite's `#then` handlers if the predicate returns + # `true` or executing the composite's `#rescue` handlers if the predicate + # returns false. + # + # @!macro promise_self_aggregate + def self.aggregate(method, *promises) + composite = Promise.new do + completed = promises.collect do |promise| + promise.execute if promise.unscheduled? + promise.wait + promise + end + unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } + raise PromiseExecutionError + end + end + composite + end + + # @!visibility private + def set_pending + synchronize do + @state = :pending + @children.each { |c| c.set_pending } + end + end + + # @!visibility private + def root? # :nodoc: + @parent.nil? + end + + # @!visibility private + def on_fulfill(result) + realize Proc.new { @on_fulfill.call(result) } + nil + end + + # @!visibility private + def on_reject(reason) + realize Proc.new { @on_reject.call(reason) } + nil + end + + # @!visibility private + def notify_child(child) + if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } + if_state(:rejected) { child.on_reject(@reason) } + end + + # @!visibility private + def complete(success, value, reason) + children_to_notify = synchronize do + set_state!(success, value, reason) + @children.dup + end + + children_to_notify.each { |child| notify_child(child) } + observers.notify_and_delete_observers{ [Time.now, self.value, reason] } + end + + # @!visibility private + def realize(task) + @executor.post do + success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, value, reason) + end + end + + # @!visibility private + def set_state!(success, value, reason) + set_state(success, value, reason) + event.set + end + + # @!visibility private + def synchronized_set_state!(success, value, reason) + synchronize { set_state!(success, value, reason) } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb new file mode 100644 index 0000000..76af4d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb @@ -0,0 +1,2167 @@ +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/collection/lock_free_stack' +require 'concurrent/errors' +require 'concurrent/re_include' + +module Concurrent + + # {include:file:docs-source/promises-main.md} + module Promises + + # @!macro promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + extend self + + module Configuration + # @return [Executor, :io, :fast] the executor which is used when none is supplied + # to a factory method. The method can be overridden in the receivers of + # `include FactoryMethod` + def default_executor + :io + end + end + + include Configuration + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on default_executor + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = self.default_executor) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on default_executor + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = self.default_executor) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(default_executor, *args, &task) + end + + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @param [Object] value + # @return [Future] + def fulfilled_future(value, default_executor = self.default_executor) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @param [Object] reason + # @return [Future] + def rejected_future(reason, default_executor = self.default_executor) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = self.default_executor) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload make_future(nil, default_executor = self.default_executor) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload make_future(a_future, default_executor = self.default_executor) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload make_future(an_event, default_executor = self.default_executor) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload make_future(exception, default_executor = self.default_executor) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload make_future(value, default_executor = self.default_executor) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def make_future(argument = nil, default_executor = self.default_executor) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def delay(*args, &task) + delay_on default_executor, *args, &task + end + + # Creates new event or future which is resolved only after it is touched, + # see {Concurrent::AbstractEventFuture#touch}. + # + # @!macro promises.param.default_executor + # @overload delay_on(default_executor, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload delay_on(default_executor) + # If no task is provided, it returns an {Event} + # @return [Event] + def delay_on(default_executor, *args, &task) + event = DelayPromise.new(default_executor).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def schedule(intended_time, *args, &task) + schedule_on default_executor, intended_time, *args, &task + end + + # Creates new event or future which is resolved in intended_time. + # + # @!macro promises.param.default_executor + # @!macro promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + # @overload schedule_on(default_executor, intended_time, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload schedule_on(default_executor, intended_time) + # If no task is provided, it returns an {Event} + # @return [Event] + def schedule_on(default_executor, intended_time, *args, &task) + event = ScheduledPromise.new(default_executor, intended_time).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on default_executor, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on default_executor, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro promises.any-touch + # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on default_executor, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + # TODO or rather a generic aggregator taking a function + end + + module InternalStates + # @!visibility private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + # @!visibility private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + # @!visibility private + class Reserved < Pending + end + + # @!visibility private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + # @!visibility private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + # @!visibility private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + # @!visibility private + PENDING = Pending.new + # @!visibility private + RESERVED = Reserved.new + # @!visibility private + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + end + + private_constant :InternalStates + + # @!macro promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + attr_atomic(:internal_state) + private :internal_state=, :swap_internal_state, :compare_and_set_internal_state, :update_internal_state + # @!method internal_state + # @!visibility private + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending? + !internal_state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved? + internal_state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro promises.touches + # Calls {Concurrent::AbstractEventFuture#touch}. + + # @!macro promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [self, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true, reserved = false) + if compare_and_set_internal_state(reserved ? RESERVED : PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback_notify_blocked(promise, index) + add_callback :callback_notify_blocked, promise, index + end + + # @!visibility private + def add_callback_clear_delayed_node(node) + add_callback(:callback_clear_delayed_node, node) + end + + # @!visibility private + def with_hidden_resolvable + # TODO (pitr-ch 10-Dec-2018): documentation, better name if in edge + self + end + + private + + def add_callback(method, *args) + state = internal_state + if state.resolved? + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if state.resolved? + end + self + end + + def callback_clear_delayed_node(state, node) + node.value = nil + end + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call(*args) + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled? + state = internal_state + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected? + state = internal_state + state.resolved? && !state.fulfilled? + end + + # @!macro promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @param [Object] timeout_value a value returned by the method when it times out + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # timeout_value on timeout, + # nil on rejection. + def value(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.value + else + timeout_value + end + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment. + def reason(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.reason + else + timeout_value + end + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Object), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # or nil on rejection, + # or timeout_value on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil, timeout_value = nil) + if wait_until_resolved! timeout + internal_state.value + else + timeout_value + end + end + + # Allows rejected Future to be risen with `raise` method. + # If the reason is not an exception `Runtime.new(reason)` is returned. + # + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # raise Promises.rejected_future("or just boom") + # @raise [Concurrent::Error] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + raise ArgumentError unless args.size <= 1 + reason = Array(internal_state.reason).flatten.compact + if reason.size > 1 + ex = Concurrent::MultipleErrors.new reason + ex.set_backtrace(caller) + ex + else + ex = if reason[0].respond_to? :exception + reason[0].exception(*args) + else + RuntimeError.new(reason[0]).exception(*args) + end + ex.set_backtrace Array(ex.backtrace) + caller + ex + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @param [#call(value)] run_test + # an object which when called returns either Future to keep running with + # or nil, then the run completes with the value. + # The run_test can be used to extract the Future from deeper structure, + # or to distinguish Future which is a resulting value from a future + # which is suppose to continue running. + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run(run_test = method(:run_test)) + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor, run_test).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + # @return [String] Short string representation. + def to_s + if resolved? + format '%s with %s>', super[0..-2], (fulfilled? ? value : reason).inspect + else + super + end + end + + alias_method :inspect, :to_s + + private + + def run_test(v) + v if v.is_a?(Future) + end + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + if internal_state == RESERVED + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It is already reserved.") + else + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, + new_result: state.result) + end + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call(*state.result, *args) + end + + end + + # Marker module of Future, Event resolved manually. + module Resolvable + include InternalStates + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + # @!macro raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + # @param [true, false] reserved + # Set to true if the resolvable is {#reserve}d by you, + # marks resolution of reserved resolvable events and futures explicitly. + # Advanced feature, ignore unless you use {Resolvable#reserve} from edge. + def resolve(raise_on_reassign = true, reserved = false) + resolve_with RESOLVED, raise_on_reassign, reserved + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @param [true, false] resolve_on_timeout + # If it times out and the argument is true it will also resolve the event. + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = false) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(false) + else + false + end + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true, reserved = false) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign, reserved) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @param [Object] value + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def fulfill(value, raise_on_reassign = true, reserved = false) + resolve_with Fulfilled.new(value), raise_on_reassign, reserved + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def reject(reason, raise_on_reassign = true, reserved = false) + resolve_with Rejected.new(reason), raise_on_reassign, reserved + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to(*args, block).wait! + end + + # @!macro promises.resolvable.resolve_on_timeout + # @param [::Array(true, Object, nil), ::Array(false, nil, Exception), nil] resolve_on_timeout + # If it times out and the argument is not nil it will also resolve the future + # to the provided resolution. + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(*resolve_on_timeout, false) + else + false + end + end + + # Behaves as {Future#wait!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @raise [Exception] {#reason} on rejection + # @see Future#wait! + def wait!(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + if resolve(*resolve_on_timeout, false) + false + else + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + raise self if rejected? + true + end + else + false + end + end + + # Behaves as {Future#value} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @see Future#value + def value(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#value!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @raise [Exception] {#reason} on rejection + # @see Future#value! + def value!(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved! timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + raise self if rejected? + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#reason} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Exception, timeout_value, nil] + # @see Future#reason + def reason(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.reason + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.reason + end + end + timeout_value + end + end + + # Behaves as {Future#result} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [::Array(Boolean, Object, Exception), nil] + # @see Future#result + def result(timeout = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.result + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + internal_state.result + end + end + # otherwise returns nil + end + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + format '%s %s>', super[0..-2], @Future + end + + alias_method :inspect, :to_s + + def delayed_because + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + resolve_with Rejected.new(error) + raise error unless error.is_a?(StandardError) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + public :evaluate_to + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed_because + promise = new(blocker_delayed, 1, *args, &block) + blocker.add_callback_notify_blocked promise, 0 + promise + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed_because + blocker_delayed2 = blocker2.promise.delayed_because + delayed = if blocker_delayed1 && blocker_delayed2 + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + blocker_delayed1 || blocker_delayed2 + end + promise = new(delayed, 2, *args, &block) + blocker1.add_callback_notify_blocked promise, 0 + blocker2.add_callback_notify_blocked promise, 1 + promise + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } + promise = new(delayed, blockers.size, *args, &block) + blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } + promise + end + + def self.add_delayed(delayed1, delayed2) + if delayed1 && delayed2 + delayed1.push delayed2 + delayed1 + else + delayed1 || delayed2 + end + end + + def initialize(delayed, blockers_count, future) + super(future) + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed_because + @Delayed + end + + def touch + clear_and_propagate_touch + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_and_propagate_touch(stack_or_element = @Delayed) + return if stack_or_element.nil? + + if stack_or_element.is_a? LockFreeStack + stack_or_element.clear_each { |element| clear_and_propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to(*args, task) + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + def initialize(delayed_because, blockers_count, event_or_future) + delayed = LockFreeStack.of1(self) + super(delayed, blockers_count, event_or_future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @DelayedBecause = delayed_because || LockFreeStack.new + + event_or_future.add_callback_clear_delayed_node delayed.peek + end + + def touch + if @Touched.make_true + clear_and_propagate_touch @DelayedBecause + end + end + + private + + def touched? + @Touched.value + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + delayed = future.promise.delayed_because + if touched? + clear_and_propagate_touch delayed + else + BlockedPromise.add_delayed @DelayedBecause, delayed + clear_and_propagate_touch @DelayedBecause if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor, run_test) + super delayed, 1, Future.new(self, default_executor) + @RunTest = run_test + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + continuation_future = @RunTest.call value + + if continuation_future + add_delayed_of continuation_future + continuation_future.add_callback_notify_blocked self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count, nil) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = ::Array.new(@Resolutions.size) + reasons = ::Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, future, index) + future.fulfilled? || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + event = Event.new(self, default_executor) + @Delayed = LockFreeStack.of1(self) + super event + event.add_callback_clear_delayed_node @Delayed.peek + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed_because + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb new file mode 100644 index 0000000..516d58c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb @@ -0,0 +1,58 @@ +module Concurrent + + # Methods form module A included to a module B, which is already included into class C, + # will not be visible in the C class. If this module is extended to B then A's methods + # are correctly made visible to C. + # + # @example + # module A + # def a + # :a + # end + # end + # + # module B1 + # end + # + # class C1 + # include B1 + # end + # + # module B2 + # extend Concurrent::ReInclude + # end + # + # class C2 + # include B2 + # end + # + # B1.send :include, A + # B2.send :include, A + # + # C1.new.respond_to? :a # => false + # C2.new.respond_to? :a # => true + module ReInclude + # @!visibility private + def included(base) + (@re_include_to_bases ||= []) << [:include, base] + super(base) + end + + # @!visibility private + def extended(base) + (@re_include_to_bases ||= []) << [:extend, base] + super(base) + end + + # @!visibility private + def include(*modules) + result = super(*modules) + modules.reverse.each do |module_being_included| + (@re_include_to_bases ||= []).each do |method, mod| + mod.send method, module_being_included + end + end + result + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb new file mode 100644 index 0000000..90f78b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb @@ -0,0 +1,318 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/utility/monotonic_time' + +require 'concurrent/options' + +module Concurrent + + # `ScheduledTask` is a close relative of `Concurrent::Future` but with one + # important difference: A `Future` is set to execute as soon as possible + # whereas a `ScheduledTask` is set to execute after a specified delay. This + # implementation is loosely based on Java's + # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). + # It is a more feature-rich variant of {Concurrent.timer}. + # + # The *intended* schedule time of task execution is set on object construction + # with the `delay` argument. The delay is a numeric (floating point or integer) + # representing a number of seconds in the future. Any other value or a numeric + # equal to or less than zero will result in an exception. The *actual* schedule + # time of task execution is set when the `execute` method is called. + # + # The constructor can also be given zero or more processing options. Currently + # the only supported options are those recognized by the + # [Dereferenceable](Dereferenceable) module. + # + # The final constructor argument is a block representing the task to be performed. + # If no block is given an `ArgumentError` will be raised. + # + # **States** + # + # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it + # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` + # has one additional state, however. While the task (block) is being executed the + # state of the object will be `:processing`. This additional state is necessary + # because it has implications for task cancellation. + # + # **Cancellation** + # + # A `:pending` task can be cancelled using the `#cancel` method. A task in any + # other state, including `:processing`, cannot be cancelled. The `#cancel` + # method returns a boolean indicating the success of the cancellation attempt. + # A cancelled `ScheduledTask` cannot be restarted. It is immutable. + # + # **Obligation and Observation** + # + # The result of a `ScheduledTask` can be obtained either synchronously or + # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) + # module and the + # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) + # module from the Ruby standard library. With one exception `ScheduledTask` + # behaves identically to [Future](Observable) with regard to these modules. + # + # @!macro copy_options + # + # @example Basic usage + # + # require 'concurrent' + # require 'thread' # for Queue + # require 'open-uri' # for open(uri) + # + # class Ticker + # def get_year_end_closing(symbol, year) + # uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m" + # data = open(uri) {|f| f.collect{|line| line.strip } } + # data[1].split(',')[4].to_f + # end + # end + # + # # Future + # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) } + # price.state #=> :pending + # sleep(1) # do other stuff + # price.value #=> 63.65 + # price.state #=> :fulfilled + # + # # ScheduledTask + # task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) } + # task.state #=> :pending + # sleep(3) # do other stuff + # task.value #=> 25.96 + # + # @example Successful task execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.state #=> :unscheduled + # task.execute + # task.state #=> pending + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> true + # task.rejected? #=> false + # task.value #=> 'What does the fox say?' + # + # @example One line creation and execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute + # task.state #=> pending + # + # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } + # task.state #=> pending + # + # @example Failed task execution + # + # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> false + # task.rejected? #=> true + # task.value #=> nil + # task.reason #=> # + # + # @example Task execution with observation + # + # observer = Class.new{ + # def update(time, value, reason) + # puts "The task completed at #{time} with value '#{value}'" + # end + # }.new + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.add_observer(observer) + # task.execute + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' + # + # @!macro monotonic_clock_warning + # + # @see Concurrent.timer + class ScheduledTask < IVar + include Comparable + + # The executor on which to execute the task. + # @!visibility private + attr_reader :executor + + # Schedule a task for execution at a specified future time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @yield the task to be performed + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] When no block is given + # @raise [ArgumentError] When given a time that is in the past + def initialize(delay, opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 + + super(NULL, opts, &nil) + + synchronize do + ns_set_state(:unscheduled) + @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) + @args = get_arguments_from(opts) + @delay = delay.to_f + @task = task + @time = nil + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + self.observers = Collection::CopyOnNotifyObserverSet.new + end + end + + # The `delay` value given at instanciation. + # + # @return [Float] the initial delay. + def initial_delay + synchronize { @delay } + end + + # The monotonic time at which the the task is scheduled to be executed. + # + # @return [Float] the schedule time or nil if `unscheduled` + def schedule_time + synchronize { @time } + end + + # Comparator which orders by schedule time. + # + # @!visibility private + def <=>(other) + schedule_time <=> other.schedule_time + end + + # Has the task been cancelled? + # + # @return [Boolean] true if the task is in the given state else false + def cancelled? + synchronize { ns_check_state?(:cancelled) } + end + + # In the task execution in progress? + # + # @return [Boolean] true if the task is in the given state else false + def processing? + synchronize { ns_check_state?(:processing) } + end + + # Cancel this task and prevent it from executing. A task can only be + # cancelled if it is pending or unscheduled. + # + # @return [Boolean] true if successfully cancelled else false + def cancel + if compare_and_set_state(:cancelled, :pending, :unscheduled) + complete(false, nil, CancelledOperationError.new) + # To avoid deadlocks this call must occur outside of #synchronize + # Changing the state above should prevent redundant calls + @parent.send(:remove_task, self) + else + false + end + end + + # Reschedule the task using the original delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @return [Boolean] true if successfully rescheduled else false + def reset + synchronize{ ns_reschedule(@delay) } + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @raise [ArgumentError] When given a time that is in the past + def reschedule(delay) + delay = delay.to_f + raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 + synchronize{ ns_reschedule(delay) } + end + + # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` + # and starts counting down toward execution. Does nothing if the `ScheduledTask` is + # in any state other than `:unscheduled`. + # + # @return [ScheduledTask] a reference to `self` + def execute + if compare_and_set_state(:pending, :unscheduled) + synchronize{ ns_schedule(@delay) } + end + self + end + + # Create a new `ScheduledTask` object with the given block, execute it, and return the + # `:pending` object. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @!macro executor_and_deref_options + # + # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + def self.execute(delay, opts = {}, &task) + new(delay, opts, &task).execute + end + + # Execute the task. + # + # @!visibility private + def process_task + safe_execute(@task, @args) + end + + protected :set, :try_set, :fail, :complete + + protected + + # Schedule the task using the given delay and the current time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_schedule(delay) + @delay = delay + @time = Concurrent.monotonic_time + @delay + @parent.send(:post_task, self) + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_reschedule(delay) + return false unless ns_check_state?(:pending) + @parent.send(:remove_task, self) && ns_schedule(delay) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb new file mode 100644 index 0000000..602d494 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' +require 'set' + +module Concurrent + + # @!macro concurrent_set + # + # A thread-safe subclass of Set. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` + # which is union of `a` and `b`, then it writes the union to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#merge` instead. + # + # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` + + + # @!macro internal_implementation_note + SetImplementation = case + when Concurrent.on_cruby? + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + ::Set + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubySet < ::Set + include JRuby::Synchronized + end + JRubySet + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxSet < ::Set + end + ThreadSafe::Util.make_synchronized_on_rbx Concurrent::RbxSet + RbxSet + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_truffleruby Concurrent::TruffleRubySet + TruffleRubySet + + else + warn 'Possibly unsupported Ruby implementation' + ::Set + end + private_constant :SetImplementation + + # @!macro concurrent_set + class Set < SetImplementation + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb new file mode 100644 index 0000000..0012352 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb @@ -0,0 +1,139 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe, write-once variation of Ruby's standard `Struct`. + # Each member can have its value set at most once, either at construction + # or any time thereafter. Attempting to assign a value to a member + # that has already been set will result in a `Concurrent::ImmutabilityError`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword + module SettableStruct + include Synchronization::AbstractStruct + + # @!macro struct_values + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # @raise [Concurrent::ImmutabilityError] if the given member has already been set + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize do + unless @values[member].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[member] = value + end + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize do + unless @values[index].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[index] = value + end + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb new file mode 100644 index 0000000..49c68eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb @@ -0,0 +1,30 @@ +require 'concurrent/utility/engine' + +require 'concurrent/synchronization/abstract_object' +require 'concurrent/utility/native_extension_loader' # load native parts first +Concurrent.load_native_extensions + +require 'concurrent/synchronization/mri_object' +require 'concurrent/synchronization/jruby_object' +require 'concurrent/synchronization/rbx_object' +require 'concurrent/synchronization/truffleruby_object' +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/volatile' + +require 'concurrent/synchronization/abstract_lockable_object' +require 'concurrent/synchronization/mutex_lockable_object' +require 'concurrent/synchronization/jruby_lockable_object' +require 'concurrent/synchronization/rbx_lockable_object' + +require 'concurrent/synchronization/lockable_object' + +require 'concurrent/synchronization/condition' +require 'concurrent/synchronization/lock' + +module Concurrent + # {include:file:docs-source/synchronization.md} + # {include:file:docs-source/synchronization-notes.md} + module Synchronization + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb new file mode 100644 index 0000000..bc12603 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb @@ -0,0 +1,98 @@ +module Concurrent + module Synchronization + + # @!visibility private + class AbstractLockableObject < Synchronization::Object + + protected + + # @!macro synchronization_object_method_synchronize + # + # @yield runs the block synchronized against this object, + # equivalent of java's `synchronize(this) {}` + # @note can by made public in descendants if required by `public :synchronize` + def synchronize + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_wait_until + # + # Wait until condition is met or timeout passes, + # protects against spurious wake-ups. + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @yield condition to be met + # @yieldreturn [true, false] + # @return [true, false] if condition met + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait_until(timeout = nil, &condition) + # synchronize { ns_wait_until(timeout, &condition) } + # end + # ``` + def ns_wait_until(timeout = nil, &condition) + if timeout + wait_until = Concurrent.monotonic_time + timeout + loop do + now = Concurrent.monotonic_time + condition_result = condition.call + return condition_result if now >= wait_until || condition_result + ns_wait wait_until - now + end + else + ns_wait timeout until condition.call + true + end + end + + # @!macro synchronization_object_method_ns_wait + # + # Wait until another thread calls #signal or #broadcast, + # spurious wake-ups can happen. + # + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait(timeout = nil) + # synchronize { ns_wait(timeout) } + # end + # ``` + def ns_wait(timeout = nil) + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_signal + # + # Signal one waiting thread. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def signal + # synchronize { ns_signal } + # end + # ``` + def ns_signal + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_broadcast + # + # Broadcast to all waiting threads. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def broadcast + # synchronize { ns_broadcast } + # end + # ``` + def ns_broadcast + raise NotImplementedError + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb new file mode 100644 index 0000000..532388b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb @@ -0,0 +1,24 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class AbstractObject + + # @abstract has to be implemented based on Ruby runtime + def initialize + raise NotImplementedError + end + + # @!visibility private + # @abstract + def full_memory_barrier + raise NotImplementedError + end + + def self.attr_volatile(*names) + raise NotImplementedError + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb new file mode 100644 index 0000000..1fe90c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb @@ -0,0 +1,171 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module AbstractStruct + + # @!visibility private + def initialize(*values) + super() + ns_initialize(*values) + end + + # @!macro struct_length + # + # Returns the number of struct members. + # + # @return [Fixnum] the number of struct members + def length + self.class::MEMBERS.length + end + alias_method :size, :length + + # @!macro struct_members + # + # Returns the struct members as an array of symbols. + # + # @return [Array] the struct members as an array of symbols + def members + self.class::MEMBERS.dup + end + + protected + + # @!macro struct_values + # + # @!visibility private + def ns_values + @values.dup + end + + # @!macro struct_values_at + # + # @!visibility private + def ns_values_at(indexes) + @values.values_at(*indexes) + end + + # @!macro struct_to_h + # + # @!visibility private + def ns_to_h + length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} + end + + # @!macro struct_get + # + # @!visibility private + def ns_get(member) + if member.is_a? Integer + if member >= @values.length + raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") + end + @values[member] + else + send(member) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_equality + # + # @!visibility private + def ns_equality(other) + self.class == other.class && self.values == other.values + end + + # @!macro struct_each + # + # @!visibility private + def ns_each + values.each{|value| yield value } + end + + # @!macro struct_each_pair + # + # @!visibility private + def ns_each_pair + @values.length.times do |index| + yield self.class::MEMBERS[index], @values[index] + end + end + + # @!macro struct_select + # + # @!visibility private + def ns_select + values.select{|value| yield value } + end + + # @!macro struct_inspect + # + # @!visibility private + def ns_inspect + struct = pr_underscore(self.class.ancestors[1]) + clazz = ((self.class.to_s =~ /^#" + end + + # @!macro struct_merge + # + # @!visibility private + def ns_merge(other, &block) + self.class.new(*self.to_h.merge(other, &block).values) + end + + # @!visibility private + def ns_initialize_copy + @values = @values.map do |val| + begin + val.clone + rescue TypeError + val + end + end + end + + # @!visibility private + def pr_underscore(clazz) + word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 + word.gsub!(/::/, '/') + word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') + word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # @!visibility private + def self.define_struct_class(parent, base, name, members, &block) + clazz = Class.new(base || Object) do + include parent + self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) + def ns_initialize(*values) + raise ArgumentError.new('struct size differs') if values.length > length + @values = values.fill(nil, values.length..length-1) + end + end + unless name.nil? + begin + parent.send :remove_const, name if parent.const_defined?(name, false) + parent.const_set(name, clazz) + clazz + rescue NameError + raise NameError.new("identifier #{name} needs to be constant") + end + end + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + @values[index] + end + end + clazz.class_exec(&block) unless block.nil? + clazz.singleton_class.send :alias_method, :[], :new + clazz + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb new file mode 100644 index 0000000..f704b81 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb @@ -0,0 +1,60 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Condition < LockableObject + safe_initialization! + + # TODO (pitr 12-Sep-2015): locks two objects, improve + # TODO (pitr 26-Sep-2015): study + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/concurrent/locks/AbstractQueuedSynchronizer.java#AbstractQueuedSynchronizer.Node + + singleton_class.send :alias_method, :private_new, :new + private_class_method :new + + def initialize(lock) + super() + @Lock = lock + end + + def wait(timeout = nil) + @Lock.synchronize { ns_wait(timeout) } + end + + def ns_wait(timeout = nil) + synchronize { super(timeout) } + end + + def wait_until(timeout = nil, &condition) + @Lock.synchronize { ns_wait_until(timeout, &condition) } + end + + def ns_wait_until(timeout = nil, &condition) + synchronize { super(timeout, &condition) } + end + + def signal + @Lock.synchronize { ns_signal } + end + + def ns_signal + synchronize { super } + end + + def broadcast + @Lock.synchronize { ns_broadcast } + end + + def ns_broadcast + synchronize { super } + end + end + + class LockableObject < LockableObjectImplementation + def new_condition + Condition.private_new(self) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb new file mode 100644 index 0000000..359a032 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb @@ -0,0 +1,13 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + # @!macro internal_implementation_note + class JRubyLockableObject < AbstractLockableObject + + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb new file mode 100644 index 0000000..da91ac5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb @@ -0,0 +1,45 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + module JRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + instance_variable_get_volatile(:#{ivar}) + end + + def #{name}=(value) + instance_variable_set_volatile(:#{ivar}, value) + end + RUBY + + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + class JRubyObject < AbstractObject + include JRubyAttrVolatile + + def initialize + # nothing to do + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb new file mode 100644 index 0000000..0dbad2e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Lock < LockableObject + # TODO use JavaReentrantLock on JRuby + + public :synchronize + + def wait(timeout = nil) + synchronize { ns_wait(timeout) } + end + + public :ns_wait + + def wait_until(timeout = nil, &condition) + synchronize { ns_wait_until(timeout, &condition) } + end + + public :ns_wait_until + + def signal + synchronize { ns_signal } + end + + public :ns_signal + + def broadcast + synchronize { ns_broadcast } + end + + public :ns_broadcast + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb new file mode 100644 index 0000000..cdbe4d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb @@ -0,0 +1,74 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + LockableObjectImplementation = case + when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3) + MonitorLockableObject + when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3) + MutexLockableObject + when Concurrent.on_jruby? + JRubyLockableObject + when Concurrent.on_rbx? + RbxLockableObject + when Concurrent.on_truffleruby? + MutexLockableObject + else + warn 'Possibly unsupported Ruby implementation' + MonitorLockableObject + end + private_constant :LockableObjectImplementation + + # Safe synchronization under any Ruby implementation. + # It provides methods like {#synchronize}, {#wait}, {#signal} and {#broadcast}. + # Provides a single layer which can improve its implementation over time without changes needed to + # the classes using it. Use {Synchronization::Object} not this abstract class. + # + # @note this object does not support usage together with + # [`Thread#wakeup`](http://ruby-doc.org/core/Thread.html#method-i-wakeup) + # and [`Thread#raise`](http://ruby-doc.org/core/Thread.html#method-i-raise). + # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and + # `Thread#wakeup` will not work on all platforms. + # + # @see Event implementation as an example of this class use + # + # @example simple + # class AnClass < Synchronization::Object + # def initialize + # super + # synchronize { @value = 'asd' } + # end + # + # def value + # synchronize { @value } + # end + # end + # + # @!visibility private + class LockableObject < LockableObjectImplementation + + # TODO (pitr 12-Sep-2015): make private for c-r, prohibit subclassing + # TODO (pitr 12-Sep-2015): we inherit too much ourselves :/ + + # @!method initialize(*args, &block) + # @!macro synchronization_object_method_initialize + + # @!method synchronize + # @!macro synchronization_object_method_synchronize + + # @!method wait_until(timeout = nil, &condition) + # @!macro synchronization_object_method_ns_wait_until + + # @!method wait(timeout = nil) + # @!macro synchronization_object_method_ns_wait + + # @!method signal + # @!macro synchronization_object_method_ns_signal + + # @!method broadcast + # @!macro synchronization_object_method_ns_broadcast + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb new file mode 100644 index 0000000..4b1d6c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb @@ -0,0 +1,44 @@ +module Concurrent + module Synchronization + + # @!visibility private + module MriAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + # relying on undocumented behavior of CRuby, GVL acquire has lock which ensures visibility of ivars + # https://github.com/ruby/ruby/blob/ruby_2_2/thread_pthread.c#L204-L211 + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MriObject < AbstractObject + include MriAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb new file mode 100644 index 0000000..f288c51 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb @@ -0,0 +1,76 @@ +module Concurrent + # noinspection RubyInstanceVariableNamingConvention + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module ConditionSignalling + protected + + def ns_signal + @__Condition__.signal + self + end + + def ns_broadcast + @__Condition__.broadcast + self + end + end + + + # @!visibility private + # @!macro internal_implementation_note + class MutexLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + protected + + def synchronize + if @__Lock__.owned? + yield + else + @__Lock__.synchronize { yield } + end + end + + def ns_wait(timeout = nil) + @__Condition__.wait @__Lock__, timeout + self + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MonitorLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + protected + + def synchronize # TODO may be a problem with lock.synchronize { lock.wait } + @__Lock__.synchronize { yield } + end + + def ns_wait(timeout = nil) + @__Condition__.wait timeout + self + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb new file mode 100644 index 0000000..0e62112 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb @@ -0,0 +1,183 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + ObjectImplementation = case + when Concurrent.on_cruby? + MriObject + when Concurrent.on_jruby? + JRubyObject + when Concurrent.on_rbx? + RbxObject + when Concurrent.on_truffleruby? + TruffleRubyObject + else + warn 'Possibly unsupported Ruby implementation' + MriObject + end + private_constant :ObjectImplementation + + # Abstract object providing final, volatile, ans CAS extensions to build other concurrent abstractions. + # - final instance variables see {Object.safe_initialization!} + # - volatile instance variables see {Object.attr_volatile} + # - volatile instance variables see {Object.attr_atomic} + class Object < ObjectImplementation + # TODO make it a module if possible + + # @!method self.attr_volatile(*names) + # Creates methods for reading and writing (as `attr_accessor` does) to a instance variable with + # volatile (Java) semantic. The instance variable should be accessed only through generated methods. + # + # @param [::Array] names of the instance variables to be volatile + # @return [::Array] names of defined method names + + # Has to be called by children. + def initialize + super + __initialize_atomic_fields__ + end + + # By calling this method on a class, it and all its children are marked to be constructed safely. Meaning that + # all writes (ivar initializations) are made visible to all readers of newly constructed object. It ensures + # same behaviour as Java's final fields. + # @example + # class AClass < Concurrent::Synchronization::Object + # safe_initialization! + # + # def initialize + # @AFinalValue = 'value' # published safely, does not have to be synchronized + # end + # end + # @return [true] + def self.safe_initialization! + # define only once, and not again in children + return if safe_initialization? + + # @!visibility private + def self.new(*args, &block) + object = super(*args, &block) + ensure + object.full_memory_barrier if object + end + + @safe_initialization = true + end + + # @return [true, false] if this class is safely initialized. + def self.safe_initialization? + @safe_initialization = false unless defined? @safe_initialization + @safe_initialization || (superclass.respond_to?(:safe_initialization?) && superclass.safe_initialization?) + end + + # For testing purposes, quite slow. Injects assert code to new method which will raise if class instance contains + # any instance variables with CamelCase names and isn't {.safe_initialization?}. + # @raise when offend found + # @return [true] + def self.ensure_safe_initialization_when_final_fields_are_present + Object.class_eval do + def self.new(*args, &block) + object = super(*args, &block) + ensure + has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } + if has_final_field && !safe_initialization? + raise "there was an instance of #{object.class} with final field but not marked with safe_initialization!" + end + end + end + true + end + + # Creates methods for reading and writing to a instance variable with + # volatile (Java) semantic as {.attr_volatile} does. + # The instance variable should be accessed oly through generated methods. + # This method generates following methods: `value`, `value=(new_value) #=> new_value`, + # `swap_value(new_value) #=> old_value`, + # `compare_and_set_value(expected, value) #=> true || false`, `update_value(&block)`. + # @param [::Array] names of the instance variables to be volatile with CAS. + # @return [::Array] names of defined method names. + # @!macro attr_atomic + # @!method $1 + # @return [Object] The $1. + # @!method $1=(new_$1) + # Set the $1. + # @return [Object] new_$1. + # @!method swap_$1(new_$1) + # Set the $1 to new_$1 and return the old $1. + # @return [Object] old $1 + # @!method compare_and_set_$1(expected_$1, new_$1) + # Sets the $1 to new_$1 if the current $1 is expected_$1 + # @return [true, false] + # @!method update_$1(&block) + # Updates the $1 using the block. + # @yield [Object] Calculate a new $1 using given (old) $1 + # @yieldparam [Object] old $1 + # @return [Object] new $1 + def self.attr_atomic(*names) + @__atomic_fields__ ||= [] + @__atomic_fields__ += names + safe_initialization! + define_initialize_atomic_fields + + names.each do |name| + ivar = :"@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar}.get + end + + def #{name}=(value) + #{ivar}.set value + end + + def swap_#{name}(value) + #{ivar}.swap value + end + + def compare_and_set_#{name}(expected, value) + #{ivar}.compare_and_set expected, value + end + + def update_#{name}(&block) + #{ivar}.update(&block) + end + RUBY + end + names.flat_map { |n| [n, :"#{n}=", :"swap_#{n}", :"compare_and_set_#{n}", :"update_#{n}"] } + end + + # @param [true, false] inherited should inherited volatile with CAS fields be returned? + # @return [::Array] Returns defined volatile with CAS fields on this class. + def self.atomic_attributes(inherited = true) + @__atomic_fields__ ||= [] + ((superclass.atomic_attributes if superclass.respond_to?(:atomic_attributes) && inherited) || []) + @__atomic_fields__ + end + + # @return [true, false] is the attribute with name atomic? + def self.atomic_attribute?(name) + atomic_attributes.include? name + end + + private + + def self.define_initialize_atomic_fields + assignments = @__atomic_fields__.map do |name| + "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" + end.join("\n") + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def __initialize_atomic_fields__ + super + #{assignments} + end + RUBY + end + + private_class_method :define_initialize_atomic_fields + + def __initialize_atomic_fields__ + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb new file mode 100644 index 0000000..8dbd3c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb @@ -0,0 +1,65 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class RbxLockableObject < AbstractLockableObject + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Waiters__ = [] + @__owner__ = nil + end + + protected + + def synchronize(&block) + if @__owner__ == Thread.current + yield + else + result = nil + Rubinius.synchronize(self) do + begin + @__owner__ = Thread.current + result = yield + ensure + @__owner__ = nil + end + end + result + end + end + + def ns_wait(timeout = nil) + wchan = Rubinius::Channel.new + + begin + @__Waiters__.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout timeout + ensure + Rubinius.lock(self) + + if !signaled && !@__Waiters__.delete(wchan) + # we timed out, but got signaled afterwards, + # so pass that signal on to the next waiter + @__Waiters__.shift << true unless @__Waiters__.empty? + end + end + + self + end + + def ns_signal + @__Waiters__.shift << true unless @__Waiters__.empty? + self + end + + def ns_broadcast + @__Waiters__.shift << true until @__Waiters__.empty? + self + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb new file mode 100644 index 0000000..4b23f2a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb @@ -0,0 +1,49 @@ +module Concurrent + module Synchronization + + # @!visibility private + module RbxAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + Rubinius.memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + Rubinius.memory_barrier + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + + end + + def full_memory_barrier + # Rubinius instance variables are not volatile so we need to insert barrier + # TODO (pitr 26-Nov-2015): check comments like ^ + Rubinius.memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class RbxObject < AbstractObject + include RbxAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb new file mode 100644 index 0000000..3919c76 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb @@ -0,0 +1,47 @@ +module Concurrent + module Synchronization + + # @!visibility private + module TruffleRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + full_memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + full_memory_barrier + end + RUBY + end + + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + TruffleRuby.full_memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class TruffleRubyObject < AbstractObject + include TruffleRubyAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb new file mode 100644 index 0000000..9dffa91 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # Volatile adds the attr_volatile class method when included. + # + # @example + # class Foo + # include Concurrent::Synchronization::Volatile + # + # attr_volatile :bar + # + # def initialize + # self.bar = 1 + # end + # end + # + # foo = Foo.new + # foo.bar + # => 1 + # foo.bar = 2 + # => 2 + + Volatile = case + when Concurrent.on_cruby? + MriAttrVolatile + when Concurrent.on_jruby? + JRubyAttrVolatile + when Concurrent.on_rbx? + RbxAttrVolatile + when Concurrent.on_truffleruby? + TruffleRubyAttrVolatile + else + MriAttrVolatile + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..92e7c45 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb @@ -0,0 +1,50 @@ +require 'delegate' +require 'monitor' + +module Concurrent + unless defined?(SynchronizedDelegator) + + # This class provides a trivial way to synchronize all calls to a given object + # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls + # around the delegated `#send`. Example: + # + # array = [] # not thread-safe on many impls + # array = SynchronizedDelegator.new([]) # thread-safe + # + # A simple `Monitor` provides a very coarse-grained way to synchronize a given + # object, in that it will cause synchronization for methods that have no need + # for it, but this is a trivial way to get thread-safety where none may exist + # currently on some implementations. + # + # This class is currently being considered for inclusion into stdlib, via + # https://bugs.ruby-lang.org/issues/8556 + # + # @!visibility private + class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb new file mode 100644 index 0000000..c67084a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb @@ -0,0 +1,16 @@ +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter + CPU_COUNT = 16 # is there a way to determine this? + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb new file mode 100644 index 0000000..7a6e8d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb @@ -0,0 +1,74 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/striped64' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + # + # @!visibility private + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..d9b4c58 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,118 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/volatile' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + # + # @!visibility private + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb new file mode 100644 index 0000000..ff1e8ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb @@ -0,0 +1,63 @@ +require 'concurrent/thread_safe/util' + +# Shim for TruffleRuby.synchronized +if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) + module TruffleRuby + def self.synchronized(object, &block) + Truffle::System.synchronized(object, &block) + end + end +end + +module Concurrent + module ThreadSafe + module Util + def self.make_synchronized_on_rbx(klass) + klass.class_eval do + private + + def _mon_initialize + @_monitor = Monitor.new unless @_monitor # avoid double initialisation + end + + def self.new(*args) + obj = super(*args) + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + case method + when :new_range, :new_reserved + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + obj = super + obj.send(:_mon_initialize) + obj + end + RUBY + else + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + end + + def self.make_synchronized_on_truffleruby(klass) + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args, &block) + TruffleRuby.synchronized(self) { super(*args, &block) } + end + RUBY + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..b54be39 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,38 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/tuple' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + class PowerOfTwoTuple < Concurrent::Tuple + + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb new file mode 100644 index 0000000..4169c3d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb @@ -0,0 +1,246 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + # + # @!visibility private + class Striped64 + + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + # + # @!visibility private + class Cell < Concurrent::AtomicReference + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + + # @!visibility private + def self.padding + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created + # hide from yardoc in a method + attr_reader :padding_0, :padding_1, :padding_2, :padding_3, :padding_4, :padding_5, :padding_6, :padding_7, :padding_8, :padding_9, :padding_10, :padding_11 + end + padding + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb new file mode 100644 index 0000000..cdac2a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb @@ -0,0 +1,75 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + module Volatile + + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of +Concurrent::AtomicReference+. + # + # Usage: + # class Foo + # extend Concurrent::ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..bdde2dd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,50 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb new file mode 100644 index 0000000..a0b7233 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb @@ -0,0 +1,333 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/dereferenceable' +require 'concurrent/concern/observable' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/scheduled_task' + +module Concurrent + + # A very common concurrency pattern is to run a thread that performs a task at + # regular intervals. The thread that performs the task sleeps for the given + # interval then wakes up and performs the task. Lather, rinse, repeat... This + # pattern causes two problems. First, it is difficult to test the business + # logic of the task because the task itself is tightly coupled with the + # concurrency logic. Second, an exception raised while performing the task can + # cause the entire thread to abend. In a long-running application where the + # task thread is intended to run for days/weeks/years a crashed task thread + # can pose a significant problem. `TimerTask` alleviates both problems. + # + # When a `TimerTask` is launched it starts a thread for monitoring the + # execution interval. The `TimerTask` thread does not perform the task, + # however. Instead, the TimerTask launches the task on a separate thread. + # Should the task experience an unrecoverable crash only the task thread will + # crash. This makes the `TimerTask` very fault tolerant. Additionally, the + # `TimerTask` thread can respond to the success or failure of the task, + # performing logging or ancillary operations. `TimerTask` can also be + # configured with a timeout value allowing it to kill a task that runs too + # long. + # + # One other advantage of `TimerTask` is that it forces the business logic to + # be completely decoupled from the concurrency logic. The business logic can + # be tested separately then passed to the `TimerTask` for scheduling and + # running. + # + # In some cases it may be necessary for a `TimerTask` to affect its own + # execution cycle. To facilitate this, a reference to the TimerTask instance + # is passed as an argument to the provided block every time the task is + # executed. + # + # The `TimerTask` class includes the `Dereferenceable` mixin module so the + # result of the last execution is always available via the `#value` method. + # Dereferencing options can be passed to the `TimerTask` during construction or + # at any later time using the `#set_deref_options` method. + # + # `TimerTask` supports notification through the Ruby standard library + # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # Observable} module. On execution the `TimerTask` will notify the observers + # with three arguments: time of execution, the result of the block (or nil on + # failure), and any raised exceptions (or nil on success). If the timeout + # interval is exceeded the observer will receive a `Concurrent::TimeoutError` + # object as the third argument. + # + # @!macro copy_options + # + # @example Basic usage + # task = Concurrent::TimerTask.new{ puts 'Boom!' } + # task.execute + # + # task.execution_interval #=> 60 (default) + # task.timeout_interval #=> 30 (default) + # + # # wait 60 seconds... + # #=> 'Boom!' + # + # task.shutdown #=> true + # + # @example Configuring `:execution_interval` and `:timeout_interval` + # task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do + # puts 'Boom!' + # end + # + # task.execution_interval #=> 5 + # task.timeout_interval #=> 5 + # + # @example Immediate execution with `:run_now` + # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } + # task.execute + # + # #=> 'Boom!' + # + # @example Last `#value` and `Dereferenceable` mixin + # task = Concurrent::TimerTask.new( + # dup_on_deref: true, + # execution_interval: 5 + # ){ Time.now } + # + # task.execute + # Time.now #=> 2013-11-07 18:06:50 -0500 + # sleep(10) + # task.value #=> 2013-11-07 18:06:55 -0500 + # + # @example Controlling execution from within the block + # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| + # task.execution_interval.times{ print 'Boom! ' } + # print "\n" + # task.execution_interval += 1 + # if task.execution_interval > 5 + # puts 'Stopping...' + # task.shutdown + # end + # end + # + # timer_task.execute # blocking call - this task will stop itself + # #=> Boom! + # #=> Boom! Boom! + # #=> Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! Boom! + # #=> Stopping... + # + # @example Observation + # class TaskObserver + # def update(time, result, ex) + # if result + # print "(#{time}) Execution successfully returned #{result}\n" + # elsif ex.is_a?(Concurrent::TimeoutError) + # print "(#{time}) Execution timed out\n" + # else + # print "(#{time}) Execution failed with error #{ex}\n" + # end + # end + # end + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 } + # task.add_observer(TaskObserver.new) + # task.execute + # sleep 4 + # + # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:07:25 -0400) Execution timed out + # #=> (2013-10-13 19:07:27 -0400) Execution timed out + # #=> (2013-10-13 19:07:29 -0400) Execution timed out + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError + # task.shutdown + # + # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html + class TimerTask < RubyExecutorService + include Concern::Dereferenceable + include Concern::Observable + + # Default `:execution_interval` in seconds. + EXECUTION_INTERVAL = 60 + + # Default `:timeout_interval` in seconds. + TIMEOUT_INTERVAL = 30 + + # Create a new TimerTask with the given task and configuration. + # + # @!macro timer_task_initialize + # @param [Hash] opts the options defining task execution. + # @option opts [Integer] :execution_interval number of seconds between + # task executions (default: EXECUTION_INTERVAL) + # @option opts [Integer] :timeout_interval number of seconds a task can + # run before it is considered to have failed (default: TIMEOUT_INTERVAL) + # @option opts [Boolean] :run_now Whether to run the task immediately + # upon instantiation or to wait until the first # execution_interval + # has passed (default: false) + # + # @!macro deref_options + # + # @raise ArgumentError when no block is given. + # + # @yield to the block after :execution_interval seconds have passed since + # the last yield + # @yieldparam task a reference to the `TimerTask` instance so that the + # block can control its own lifecycle. Necessary since `self` will + # refer to the execution context of the block rather than the running + # `TimerTask`. + # + # @return [TimerTask] the new `TimerTask` + def initialize(opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + super + set_deref_options opts + end + + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + def running? + @running.true? + end + + # Execute a previously created `TimerTask`. + # + # @return [TimerTask] a reference to `self` + # + # @example Instance and execute in separate steps + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> false + # task.execute + # task.running? #=> true + # + # @example Instance and execute in one line + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute + # task.running? #=> true + def execute + synchronize do + if @running.false? + @running.make_true + schedule_next_task(@run_now ? 0 : @execution_interval) + end + end + self + end + + # Create and execute a new `TimerTask`. + # + # @!macro timer_task_initialize + # + # @example + # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> true + def self.execute(opts = {}, &task) + TimerTask.new(opts, &task).execute + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval + synchronize { @execution_interval } + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @execution_interval = value } + end + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval + synchronize { @timeout_interval } + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @timeout_interval = value } + end + end + + private :post, :<< + + private + + def ns_initialize(opts, &task) + set_deref_options(opts) + + self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL + self.timeout_interval = opts[:timeout] || opts[:timeout_interval] || TIMEOUT_INTERVAL + @run_now = opts[:now] || opts[:run_now] + @executor = Concurrent::SafeTaskExecutor.new(task) + @running = Concurrent::AtomicBoolean.new(false) + @value = nil + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + # @!visibility private + def ns_shutdown_execution + @running.make_false + super + end + + # @!visibility private + def ns_kill_execution + @running.make_false + super + end + + # @!visibility private + def schedule_next_task(interval = execution_interval) + ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) + nil + end + + # @!visibility private + def execute_task(completion) + return nil unless @running.true? + ScheduledTask.execute(timeout_interval, args: [completion], &method(:timeout_task)) + _success, value, reason = @executor.execute(self) + if completion.try? + self.value = value + schedule_next_task + time = Time.now + observers.notify_observers do + [time, self.value, reason] + end + end + nil + end + + # @!visibility private + def timeout_task(completion) + return unless @running.true? + if completion.try? + schedule_next_task + observers.notify_observers(Time.now, nil, Concurrent::TimeoutError.new) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb new file mode 100644 index 0000000..f8c4c25 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb @@ -0,0 +1,86 @@ +require 'concurrent/atomic/atomic_reference' + +module Concurrent + + # A fixed size array with volatile (synchronized, thread safe) getters/setters. + # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. + # + # @example + # tuple = Concurrent::Tuple.new(16) + # + # tuple.set(0, :foo) #=> :foo | volatile write + # tuple.get(0) #=> :foo | volatile read + # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS + # tuple.cas(0, :foo, :baz) #=> false | strong CAS + # tuple.get(0) #=> :bar | volatile read + # + # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia + # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple + # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable + class Tuple + include Enumerable + + # The (fixed) size of the tuple. + attr_reader :size + + # @!visibility private + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : ::Array + private_constant :Tuple + + # Create a new tuple of the given size. + # + # @param [Integer] size the number of elements in the tuple + def initialize(size) + @size = size + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = Concurrent::AtomicReference.new + i += 1 + end + end + + # Get the value of the element at the given index. + # + # @param [Integer] i the index from which to retrieve the value + # @return [Object] the value at the given index or nil if the index is out of bounds + def get(i) + return nil if i >= @size || i < 0 + @tuple[i].get + end + alias_method :volatile_get, :get + + # Set the element at the given index to the given value + # + # @param [Integer] i the index for the element to set + # @param [Object] value the value to set at the given index + # + # @return [Object] the new value of the element at the given index or nil if the index is out of bounds + def set(i, value) + return nil if i >= @size || i < 0 + @tuple[i].set(value) + end + alias_method :volatile_set, :set + + # Set the value at the given index to the new value if and only if the current + # value matches the given old value. + # + # @param [Integer] i the index for the element to set + # @param [Object] old_value the value to compare against the current value + # @param [Object] new_value the value to set at the given index + # + # @return [Boolean] true if the value at the given element was set else false + def compare_and_set(i, old_value, new_value) + return false if i >= @size || i < 0 + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + # Calls the given block once for each element in self, passing that element as a parameter. + # + # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index + def each + @tuple.each {|ref| yield ref.get} + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb new file mode 100644 index 0000000..09138c8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb @@ -0,0 +1,258 @@ +require 'set' +require 'concurrent/synchronization' + +module Concurrent + + # A `TVar` is a transactional variable - a single-element container that + # is used as part of a transaction - see `Concurrent::atomically`. + # + # @!macro thread_safe_variable_comparison + # + # {include:file:docs-source/tvar.md} + class TVar < Synchronization::Object + safe_initialization! + + # Create a new `TVar` with an initial value. + def initialize(value) + @value = value + @version = 0 + @lock = Mutex.new + end + + # Get the value of a `TVar`. + def value + Concurrent::atomically do + Transaction::current.read(self) + end + end + + # Set the value of a `TVar`. + def value=(value) + Concurrent::atomically do + Transaction::current.write(self, value) + end + end + + # @!visibility private + def unsafe_value # :nodoc: + @value + end + + # @!visibility private + def unsafe_value=(value) # :nodoc: + @value = value + end + + # @!visibility private + def unsafe_version # :nodoc: + @version + end + + # @!visibility private + def unsafe_increment_version # :nodoc: + @version += 1 + end + + # @!visibility private + def unsafe_lock # :nodoc: + @lock + end + + end + + # Run a block that reads and writes `TVar`s as a single atomic transaction. + # With respect to the value of `TVar` objects, the transaction is atomic, in + # that it either happens or it does not, consistent, in that the `TVar` + # objects involved will never enter an illegal state, and isolated, in that + # transactions never interfere with each other. You may recognise these + # properties from database transactions. + # + # There are some very important and unusual semantics that you must be aware of: + # + # * Most importantly, the block that you pass to atomically may be executed + # more than once. In most cases your code should be free of + # side-effects, except for via TVar. + # + # * If an exception escapes an atomically block it will abort the transaction. + # + # * It is undefined behaviour to use callcc or Fiber with atomically. + # + # * If you create a new thread within an atomically, it will not be part of + # the transaction. Creating a thread counts as a side-effect. + # + # Transactions within transactions are flattened to a single transaction. + # + # @example + # a = new TVar(100_000) + # b = new TVar(100) + # + # Concurrent::atomically do + # a.value -= 10 + # b.value += 10 + # end + def atomically + raise ArgumentError.new('no block given') unless block_given? + + # Get the current transaction + + transaction = Transaction::current + + # Are we not already in a transaction (not nested)? + + if transaction.nil? + # New transaction + + begin + # Retry loop + + loop do + + # Create a new transaction + + transaction = Transaction.new + Transaction::current = transaction + + # Run the block, aborting on exceptions + + begin + result = yield + rescue Transaction::AbortError => e + transaction.abort + result = Transaction::ABORTED + rescue Transaction::LeaveError => e + transaction.abort + break result + rescue => e + transaction.abort + raise e + end + # If we can commit, break out of the loop + + if result != Transaction::ABORTED + if transaction.commit + break result + end + end + end + ensure + # Clear the current transaction + + Transaction::current = nil + end + else + # Nested transaction - flatten it and just run the block + + yield + end + end + + # Abort a currently running transaction - see `Concurrent::atomically`. + def abort_transaction + raise Transaction::AbortError.new + end + + # Leave a transaction without committing or aborting - see `Concurrent::atomically`. + def leave_transaction + raise Transaction::LeaveError.new + end + + module_function :atomically, :abort_transaction, :leave_transaction + + private + + class Transaction + + ABORTED = ::Object.new + + ReadLogEntry = Struct.new(:tvar, :version) + + AbortError = Class.new(StandardError) + LeaveError = Class.new(StandardError) + + def initialize + @read_log = [] + @write_log = {} + end + + def read(tvar) + Concurrent::abort_transaction unless valid? + + if @write_log.has_key? tvar + @write_log[tvar] + else + @read_log.push(ReadLogEntry.new(tvar, tvar.unsafe_version)) + tvar.unsafe_value + end + end + + def write(tvar, value) + # Have we already written to this TVar? + + unless @write_log.has_key? tvar + # Try to lock the TVar + + unless tvar.unsafe_lock.try_lock + # Someone else is writing to this TVar - abort + Concurrent::abort_transaction + end + + # If we previously wrote to it, check the version hasn't changed + + @read_log.each do |log_entry| + if log_entry.tvar == tvar and tvar.unsafe_version > log_entry.version + Concurrent::abort_transaction + end + end + end + + # Record the value written + + @write_log[tvar] = value + end + + def abort + unlock + end + + def commit + return false unless valid? + + @write_log.each_pair do |tvar, value| + tvar.unsafe_value = value + tvar.unsafe_increment_version + end + + unlock + + true + end + + def valid? + @read_log.each do |log_entry| + unless @write_log.has_key? log_entry.tvar + if log_entry.tvar.unsafe_version > log_entry.version + return false + end + end + end + + true + end + + def unlock + @write_log.each_key do |tvar| + tvar.unsafe_lock.unlock + end + end + + def self.current + Thread.current[:current_tvar_transaction] + end + + def self.current=(transaction) + Thread.current[:current_tvar_transaction] = transaction + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb new file mode 100644 index 0000000..bc4173e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb @@ -0,0 +1,56 @@ +module Concurrent + module Utility + + # @!visibility private + module EngineDetector + def on_jruby? + ruby_engine == 'jruby' + end + + def on_jruby_9000? + on_jruby? && ruby_version(JRUBY_VERSION, :>=, 9, 0, 0) + end + + def on_cruby? + ruby_engine == 'ruby' + end + + def on_rbx? + ruby_engine == 'rbx' + end + + def on_truffleruby? + ruby_engine == 'truffleruby' + end + + def on_windows? + !(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil? + end + + def on_osx? + !(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil? + end + + def on_linux? + !(RbConfig::CONFIG['host_os'] =~ /linux/).nil? + end + + def ruby_engine + defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' + end + + def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) + result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) + comparisons = { :== => [0], + :>= => [1, 0], + :<= => [-1, 0], + :> => [1], + :< => [-1] } + comparisons.fetch(comparison).include? result + end + end + end + + # @!visibility private + extend Utility::EngineDetector +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb new file mode 100644 index 0000000..c9f4b36 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb @@ -0,0 +1,58 @@ +require 'concurrent/synchronization' + +module Concurrent + + class_definition = Class.new(Synchronization::LockableObject) do + def initialize + @last_time = Time.now.to_f + super() + end + + if defined?(Process::CLOCK_MONOTONIC) + # @!visibility private + def get_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + elsif Concurrent.on_jruby? + # @!visibility private + def get_time + java.lang.System.nanoTime() / 1_000_000_000.0 + end + else + + # @!visibility private + def get_time + synchronize do + now = Time.now.to_f + if @last_time < now + @last_time = now + else # clock has moved back in time + @last_time += 0.000_001 + end + end + end + + end + end + + # Clock that cannot be set and represents monotonic time since + # some unspecified starting point. + # + # @!visibility private + GLOBAL_MONOTONIC_CLOCK = class_definition.new + private_constant :GLOBAL_MONOTONIC_CLOCK + + # @!macro monotonic_get_time + # + # Returns the current time a tracked by the application monotonic clock. + # + # @return [Float] The current monotonic time since some unspecified + # starting point + # + # @!macro monotonic_clock_warning + def monotonic_time + GLOBAL_MONOTONIC_CLOCK.get_time + end + + module_function :monotonic_time +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb new file mode 100644 index 0000000..a944bd7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb @@ -0,0 +1,79 @@ +require 'concurrent/utility/engine' + +module Concurrent + + module Utility + + # @!visibility private + module NativeExtensionLoader + + def allow_c_extensions? + Concurrent.on_cruby? + end + + def c_extensions_loaded? + defined?(@c_extensions_loaded) && @c_extensions_loaded + end + + def java_extensions_loaded? + defined?(@java_extensions_loaded) && @java_extensions_loaded + end + + def load_native_extensions + unless defined? Synchronization::AbstractObject + raise 'native_extension_loader loaded before Synchronization::AbstractObject' + end + + if Concurrent.on_cruby? && !c_extensions_loaded? + ['concurrent/concurrent_ruby_ext', + "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" + ].each { |p| try_load_c_extension p } + end + + if Concurrent.on_jruby? && !java_extensions_loaded? + begin + require 'concurrent/concurrent_ruby.jar' + set_java_extensions_loaded + rescue LoadError => e + raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace + end + end + end + + private + + def load_error_path(error) + if error.respond_to? :path + error.path + else + error.message.split(' -- ').last + end + end + + def set_c_extensions_loaded + @c_extensions_loaded = true + end + + def set_java_extensions_loaded + @java_extensions_loaded = true + end + + def try_load_c_extension(path) + require path + set_c_extensions_loaded + rescue LoadError => e + if load_error_path(e) == path + # move on with pure-Ruby implementations + # TODO (pitr-ch 12-Jul-2018): warning on verbose? + else + raise e + end + end + + end + end + + # @!visibility private + extend Utility::NativeExtensionLoader +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb new file mode 100644 index 0000000..10719e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb @@ -0,0 +1,53 @@ +module Concurrent + module Utility + # @private + module NativeInteger + # http://stackoverflow.com/questions/535721/ruby-max-integer + MIN_VALUE = -(2**(0.size * 8 - 2)) + MAX_VALUE = (2**(0.size * 8 - 2) - 1) + + def ensure_upper_bound(value) + if value > MAX_VALUE + raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") + end + value + end + + def ensure_lower_bound(value) + if value < MIN_VALUE + raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") + end + value + end + + def ensure_integer(value) + unless value.is_a?(Integer) + raise ArgumentError.new("#{value} is not an Integer") + end + value + end + + def ensure_integer_and_bounds(value) + ensure_integer value + ensure_upper_bound value + ensure_lower_bound value + end + + def ensure_positive(value) + if value < 0 + raise ArgumentError.new("#{value} cannot be negative") + end + value + end + + def ensure_positive_and_no_zero(value) + if value < 1 + raise ArgumentError.new("#{value} cannot be negative or zero") + end + value + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb new file mode 100644 index 0000000..531ca0a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb @@ -0,0 +1,163 @@ +require 'etc' +require 'rbconfig' +require 'concurrent/delay' + +module Concurrent + module Utility + + # @!visibility private + class ProcessorCounter + def initialize + @processor_count = Delay.new { compute_processor_count } + @physical_processor_count = Delay.new { compute_physical_processor_count } + end + + # Number of processors seen by the OS and used for process scheduling. For + # performance reasons the calculated value will be memoized on the first + # call. + # + # When running under JRuby the Java runtime call + # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According + # to the Java documentation this "value may change during a particular + # invocation of the virtual machine... [applications] should therefore + # occasionally poll this property." Subsequently the result will NOT be + # memoized under JRuby. + # + # Ruby's Etc.nprocessors will be used if available (MRI 2.2+). + # + # On Windows the Win32 API will be queried for the + # `NumberOfLogicalProcessors from Win32_Processor`. This will return the + # total number "logical processors for the current instance of the + # processor", which taked into account hyperthreading. + # + # * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev + # * Alpha: /usr/bin/nproc (/proc/cpuinfo exists but cannot be used) + # * BSD: /sbin/sysctl + # * Cygwin: /proc/cpuinfo + # * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl + # * HP-UX: /usr/sbin/ioscan + # * IRIX: /usr/sbin/sysconf + # * Linux: /proc/cpuinfo + # * Minix 3+: /proc/cpuinfo + # * Solaris: /usr/sbin/psrinfo + # * Tru64 UNIX: /usr/sbin/psrinfo + # * UnixWare: /usr/sbin/psrinfo + # + # @return [Integer] number of processors seen by the OS or Java runtime + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + def processor_count + @processor_count.value + end + + # Number of physical processor cores on the current system. For performance + # reasons the calculated value will be memoized on the first call. + # + # On Windows the Win32 API will be queried for the `NumberOfCores from + # Win32_Processor`. This will return the total number "of cores for the + # current instance of the processor." On Unix-like operating systems either + # the `hwprefs` or `sysctl` utility will be called in a subshell and the + # returned value will be used. In the rare case where none of these methods + # work or an exception is raised the function will simply return 1. + # + # @return [Integer] number physical processor cores on the current system + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + # @see http://www.unix.com/man-page/osx/1/HWPREFS/ + # @see http://linux.die.net/man/8/sysctl + def physical_processor_count + @physical_processor_count.value + end + + private + + def compute_processor_count + if Concurrent.on_jruby? + java.lang.Runtime.getRuntime.availableProcessors + elsif Etc.respond_to?(:nprocessors) && (nprocessor = Etc.nprocessors rescue nil) + nprocessor + else + os_name = RbConfig::CONFIG["target_os"] + if os_name =~ /mingw|mswin/ + require 'win32ole' + result = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfLogicalProcessors from Win32_Processor") + result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+) + elsif File.readable?("/proc/cpuinfo") && (cpuinfo_count = IO.read("/proc/cpuinfo").scan(/^processor/).size) > 0 + cpuinfo_count + elsif File.executable?("/usr/bin/nproc") + IO.popen("/usr/bin/nproc --all", &:read).to_i + elsif File.executable?("/usr/bin/hwprefs") + IO.popen("/usr/bin/hwprefs thread_count", &:read).to_i + elsif File.executable?("/usr/sbin/psrinfo") + IO.popen("/usr/sbin/psrinfo", &:read).scan(/^.*on-*line/).size + elsif File.executable?("/usr/sbin/ioscan") + IO.popen("/usr/sbin/ioscan -kC processor", &:read).scan(/^.*processor/).size + elsif File.executable?("/usr/sbin/pmcycles") + IO.popen("/usr/sbin/pmcycles -m", &:read).count("\n") + elsif File.executable?("/usr/sbin/lsdev") + IO.popen("/usr/sbin/lsdev -Cc processor -S 1", &:read).count("\n") + elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i + IO.popen("/usr/sbin/sysconf NPROC_ONLN", &:read).to_i + elsif File.executable?("/usr/sbin/sysctl") + IO.popen("/usr/sbin/sysctl -n hw.ncpu", &:read).to_i + elsif File.executable?("/sbin/sysctl") + IO.popen("/sbin/sysctl -n hw.ncpu", &:read).to_i + else + # TODO (pitr-ch 05-Nov-2016): warn about failures + 1 + end + end + rescue + return 1 + end + + def compute_physical_processor_count + ppc = case RbConfig::CONFIG["target_os"] + when /darwin1/ + IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i + when /linux/ + cores = {} # unique physical ID / core ID combinations + phy = 0 + IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| + if ln.start_with?("physical") + phy = ln[/\d+/] + elsif ln.start_with?("core") + cid = phy + ":" + ln[/\d+/] + cores[cid] = true if not cores[cid] + end + end + cores.count + when /mswin|mingw/ + require 'win32ole' + result_set = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfCores from Win32_Processor") + result_set.to_enum.collect(&:NumberOfCores).reduce(:+) + else + processor_count + end + # fall back to logical count if physical info is invalid + ppc > 0 ? ppc : processor_count + rescue + return 1 + end + end + end + + # create the default ProcessorCounter on load + @processor_counter = Utility::ProcessorCounter.new + singleton_class.send :attr_reader, :processor_counter + + def self.processor_count + processor_counter.processor_count + end + + def self.physical_processor_count + processor_counter.physical_processor_count + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb new file mode 100644 index 0000000..d18c038 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb @@ -0,0 +1,3 @@ +module Concurrent + VERSION = '1.1.7' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor 2.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor 2.yml new file mode 100644 index 0000000..4e61390 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor 2.yml @@ -0,0 +1,30 @@ +install: + - SET PATH=C:\Ruby%RUBYVER%\bin;%PATH% + - SET RAKEOPT=-rdevkit + - ps: | + if ($env:RUBYVER -like "*head*") { + $(new-object net.webclient).DownloadFile("https://github.com/oneclick/rubyinstaller2/releases/download/rubyinstaller-head/rubyinstaller-$env:RUBYVER.exe", "$pwd/ruby-setup.exe") + cmd /c ruby-setup.exe /verysilent /dir=C:/Ruby$env:RUBYVER + } + - ridk version + - gem --version + - gem install bundler --quiet --no-document + - bundle install + # Update to libffi-3.3 since Appveyor version fails on LongDouble specs + - ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi mingw-w64-i686-libffi +build: off +build_script: + - bundle exec rake libffi compile -- %EXTCONFOPTS% || bundle exec rake compile -- %EXTCONFOPTS% +test_script: + - bundle exec rake test + - bundle exec rake types_conf && git --no-pager diff +environment: + matrix: + - RUBYVER: "head-x64" + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 24 + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 25-x64 + EXTCONFOPTS: "--enable-system-libffi" + - RUBYVER: 26 + EXTCONFOPTS: "--enable-system-libffi" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor.yml new file mode 100644 index 0000000..4e61390 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.appveyor.yml @@ -0,0 +1,30 @@ +install: + - SET PATH=C:\Ruby%RUBYVER%\bin;%PATH% + - SET RAKEOPT=-rdevkit + - ps: | + if ($env:RUBYVER -like "*head*") { + $(new-object net.webclient).DownloadFile("https://github.com/oneclick/rubyinstaller2/releases/download/rubyinstaller-head/rubyinstaller-$env:RUBYVER.exe", "$pwd/ruby-setup.exe") + cmd /c ruby-setup.exe /verysilent /dir=C:/Ruby$env:RUBYVER + } + - ridk version + - gem --version + - gem install bundler --quiet --no-document + - bundle install + # Update to libffi-3.3 since Appveyor version fails on LongDouble specs + - ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi mingw-w64-i686-libffi +build: off +build_script: + - bundle exec rake libffi compile -- %EXTCONFOPTS% || bundle exec rake compile -- %EXTCONFOPTS% +test_script: + - bundle exec rake test + - bundle exec rake types_conf && git --no-pager diff +environment: + matrix: + - RUBYVER: "head-x64" + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 24 + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 25-x64 + EXTCONFOPTS: "--enable-system-libffi" + - RUBYVER: 26 + EXTCONFOPTS: "--enable-system-libffi" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci 2.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci 2.yml new file mode 100644 index 0000000..ee7d068 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci 2.yml @@ -0,0 +1,64 @@ +name: CI +on: [push, pull_request] +jobs: + system-libffi: + # Run on latest MRI with explicit selection of system or builtin libffi + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + extconfopts: [ --disable-system-libffi, --enable-system-libffi ] + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: 2.7 + + - run: brew install automake libffi pkg-config + if: matrix.os == 'macos' + - run: ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi + if: matrix.os == 'windows' && matrix.extconfopts == '--enable-system-libffi' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile -- ${{ matrix.extconfopts }} + env: + # work around misconfiguration of libffi on MacOS with homebrew + PKG_CONFIG_PATH: ${{ env.PKG_CONFIG_PATH }}:/usr/local/opt/libffi/lib/pkgconfig + - run: bundle exec rake test + - run: bundle exec rake types_conf && git --no-pager diff + + specs: + # Run all specs on all ruby implementations + # Use automatic libffi selection on MRI + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + ruby: [ 2.3, 2.4, 2.5, 2.6, 2.7, ruby-head, truffleruby-head ] + exclude: + - os: windows + ruby: truffleruby-head + - os: windows + ruby: 2.3 # compilation fails + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + + - run: brew install automake + if: matrix.os == 'macos' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile + + - run: bundle exec rake test + + - run: bundle exec rake bench:all + if: matrix.ruby != 'truffleruby-head' + env: + ITER: 10 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci.yml new file mode 100644 index 0000000..ee7d068 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.github/workflows/ci.yml @@ -0,0 +1,64 @@ +name: CI +on: [push, pull_request] +jobs: + system-libffi: + # Run on latest MRI with explicit selection of system or builtin libffi + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + extconfopts: [ --disable-system-libffi, --enable-system-libffi ] + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: 2.7 + + - run: brew install automake libffi pkg-config + if: matrix.os == 'macos' + - run: ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi + if: matrix.os == 'windows' && matrix.extconfopts == '--enable-system-libffi' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile -- ${{ matrix.extconfopts }} + env: + # work around misconfiguration of libffi on MacOS with homebrew + PKG_CONFIG_PATH: ${{ env.PKG_CONFIG_PATH }}:/usr/local/opt/libffi/lib/pkgconfig + - run: bundle exec rake test + - run: bundle exec rake types_conf && git --no-pager diff + + specs: + # Run all specs on all ruby implementations + # Use automatic libffi selection on MRI + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + ruby: [ 2.3, 2.4, 2.5, 2.6, 2.7, ruby-head, truffleruby-head ] + exclude: + - os: windows + ruby: truffleruby-head + - os: windows + ruby: 2.3 # compilation fails + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + + - run: brew install automake + if: matrix.os == 'macos' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile + + - run: bundle exec rake test + + - run: bundle exec rake bench:all + if: matrix.ruby != 'truffleruby-head' + env: + ITER: 10 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitignore b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitignore new file mode 100644 index 0000000..1d1f393 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitignore @@ -0,0 +1,25 @@ +doc/ +bin/ +.yardoc +*.orig +nbproject/private +pkg +*.orig +*.rej +*.patch +*.diff +build +*.so +*.[oa] +core +lib/ffi/types.conf +lib/ffi_c.bundle +lib/ffi_c.so +vendor +.bundle +Gemfile.lock +types_log +*.gem +embed-test.log +spec/ffi/embed-test/ext/Makefile +spec/ffi/embed-test/ext/embed_test.bundle \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitmodules b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitmodules new file mode 100644 index 0000000..1d6cbc7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.gitmodules @@ -0,0 +1,4 @@ +[submodule "ext/ffi_c/libffi"] + path = ext/ffi_c/libffi + url = https://github.com/libffi/libffi.git + branch = master diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.travis.yml new file mode 100644 index 0000000..52f8913 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.travis.yml @@ -0,0 +1,58 @@ +dist: trusty +group: beta +language: ruby +git: + submodules: false + +script: + - bundle exec rake libffi + - bundle exec rake compile + - bundle exec rake test + - | + if [[ $(ruby -v) != *truffleruby* ]]; then + ITER=10 bundle exec rake bench:all + fi + - bundle exec rake types_conf && git --no-pager diff +os: + - linux + - osx +rvm: + - 2.3.8 + - 2.4.6 + - 2.5.5 + - 2.6.5 + - 2.7.0 + - ruby-head + - truffleruby-head + +env: + - CC=gcc + - CC=clang +matrix: + allow_failures: + - os: osx + rvm: ruby-head + - os: osx + rvm: 2.3.8 + - os: linux + rvm: ruby-head + include: + - name: powerpc + language: generic + before_install: | + docker run --rm --privileged multiarch/qemu-user-static:register --reset && + docker build --rm -t ffi-powerpc -f spec/env/Dockerfile.powerpc . + script: | + docker run --rm -t -v `pwd`:/ffi ffi-powerpc + - name: armhf + language: generic + before_install: | + docker run --rm --privileged multiarch/qemu-user-static:register --reset && + docker build --rm -t ffi-armhf -f spec/env/Dockerfile.armhf . + script: | + docker run --rm -t -v `pwd`:/ffi ffi-armhf + exclude: + - os: osx + rvm: truffleruby-head +after_failure: + - "find build -name mkmf.log | xargs cat" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.yardopts b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.yardopts new file mode 100644 index 0000000..8ef32b9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/.yardopts @@ -0,0 +1,5 @@ +--title "Ruby FFI" +--charset UTF-8 +--private +lib/**/*.rb +ext/**/*.c \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/CHANGELOG.md new file mode 100644 index 0000000..f78b870 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/CHANGELOG.md @@ -0,0 +1,222 @@ +1.13.1 / 2020-06-09 +------------------- + +Changed: +* Revert use of `ucrtbase.dll` as default C library on Windows-MINGW. + `ucrtbase.dll` is still used on MSWIN target. #790 +* Test for `ffi_prep_closure_loc()` to make sure we can use this function. + This fixes incorrect use of system libffi on MacOS Mojave (10.14). #787 +* Update types.conf on x86_64-dragonflybsd + + +1.13.0 / 2020-06-01 +------------------- + +Added: +* Add TruffleRuby support. Almost all specs are running on TruffleRuby and succeed. #768 +* Add ruby source files to the java gem. This allows to ship the Ruby library code per platform java gem and add it as a default gem to JRuby. #763 +* Add FFI::Platform::LONG_DOUBLE_SIZE +* Add bounds checks for writing to an inline char[] . #756 +* Add long double as callback return value. #771 +* Update type definitions and add types from stdint.h and stddef.h on i386-windows, x86_64-windows, x86_64-darwin, x86_64-linux, arm-linux, powerpc-linux. #749 +* Add new type definitions for powerpc-openbsd and sparcv9-openbsd. #775, #778 + +Changed: +* Raise required ruby version to >= 2.3. +* Lots of cleanups and improvements in library, specs and benchmarks. +* Fix a lot of compiler warnings at the C-extension +* Fix several install issues on MacOS: + * Look for libffi in SDK paths, since recent versions of macOS removed it from `/usr/include` . #757 + * Fix error `ld: library not found for -lgcc_s.10.4` + * Don't built for i386 architecture as it is deprecated +* Several fixes for MSVC build on Windows. #779 +* Use `ucrtbase.dll` as default C library on Windows instead of old `msvcrt.dll`. #779 +* Update builtin libffi to fix a Powerpc issue with parameters of type long +* Allow unmodified sourcing of (the ruby code of) this gem in JRuby and TruffleRuby as a default gem. #747 +* Improve check to detect if a module has a #find_type method suitable for FFI. This fixes compatibility with stdlib `mkmf` . #776 + +Removed: +* Reject callback with `:string` return type at definition, because it didn't work so far and is not save to use. #751, #782 + + +1.12.2 / 2020-02-01 +------------------- + +* Fix possible segfault at FFI::Struct#[] and []= after GC.compact . #742 + + +1.12.1 / 2020-01-14 +------------------- + +Added: +* Add binary gem support for ruby-2.7 on Windows + + +1.12.0 / 2020-01-14 +------------------- + +Added: +* FFI::VERSION is defined as part of `require 'ffi'` now. + It is no longer necessary to `require 'ffi/version'` . + +Changed: +* Update libffi to latest master. + +Deprecated: +* Overwriting struct layouts is now warned and will be disallowed in ffi-2.0. #734, #735 + + +1.11.3 / 2019-11-25 +------------------- + +Removed: +* Remove support for tainted objects which cause deprecation warnings in ruby-2.7. #730 + + +1.11.2 / 2019-11-11 +------------------- + +Added: +* Add DragonFlyBSD as a platform. #724 + +Changed: +* Sort all types.conf files, so that files and changes are easier to compare. +* Regenerated type conf for freebsd12 and x86_64-linux targets. #722 +* Remove MACOSX_DEPLOYMENT_TARGET that was targeting very old version 10.4. #647 +* Fix library name mangling for non glibc Linux/UNIX. #727 +* Fix compiler warnings raised by ruby-2.7 +* Update libffi to latest master. + + +1.11.1 / 2019-05-20 +------------------- + +Changed: +* Raise required ruby version to >=2.0. #699, #700 +* Fix a possible linker error on ruby < 2.3 on Linux. + + +1.11.0 / 2019-05-17 +------------------- +This version was yanked on 2019-05-20 to fix an install issue on ruby-1.9.3. #700 + +Added: +* Add ability to disable or force use of system libffi. #669 + Use like `gem inst ffi -- --enable-system-libffi` . +* Add ability to call FFI callbacks from outside of FFI call frame. #584 +* Add proper documentation to FFI::Generator and ::Task +* Add gemspec metadata. #696, #698 + +Changed: +* Fix stdcall on Win32. #649, #669 +* Fix load paths for FFI::Generator::Task +* Fix FFI::Pointer#read_string(0) to return a binary String. #692 +* Fix benchmark suite so that it runs on ruby-2.x +* Move FFI::Platform::CPU from C to Ruby. #663 +* Move FFI::StructByReference to Ruby. #681 +* Move FFI::DataConverter to Ruby (#661) +* Various cleanups and improvements of specs and benchmarks + +Removed: +* Remove ruby-1.8 and 1.9 compatibility code. #683 +* Remove unused spec files. #684 + + +1.10.0 / 2019-01-06 +------------------- + +Added: +* Add /opt/local/lib/ to ffi's fallback library search path. #638 +* Add binary gem support for ruby-2.6 on Windows +* Add FreeBSD on AArch64 and ARM support. #644 +* Add FFI::LastError.winapi_error on Windows native or Cygwin. #633 + +Changed: +* Update to rake-compiler-dock-0.7.0 +* Use 64-bit inodes on FreeBSD >= 12. #644 +* Switch time_t and suseconds_t types to long on FreeBSD. #627 +* Make register_t long_long on 64-bit FreeBSD. #644 +* Fix Pointer#write_array_of_type #637 + +Removed: +* Drop binary gem support for ruby-2.0 and 2.1 on Windows + + +1.9.25 / 2018-06-03 +------------------- + +Changed: +* Revert closures via libffi. + This re-adds ClosurePool and fixes compat with SELinux enabled systems. #621 + + +1.9.24 / 2018-06-02 +------------------- + +Security Note: + +This update addresses vulnerability CVE-2018-1000201: DLL loading issue which can be hijacked on Windows OS, when a Symbol is used as DLL name instead of a String. Found by Matthew Bush. + +Added: +* Added a CHANGELOG file +* Add mips64(eb) support, and mips r6 support. (#601) + +Changed: +* Update libffi to latest changes on master. +* Don't search in hardcoded /usr paths on Windows. +* Don't treat Symbol args different to Strings in ffi_lib. +* Make sure size_t is defined in Thread.c. Fixes #609 + + +1.9.23 / 2018-02-25 +------------------- + +Changed: +* Fix unnecessary rebuild of configure in darwin multi arch. Fixes #605 + + +1.9.22 / 2018-02-22 +------------------- + +Changed: +* Update libffi to latest changes on master. +* Update detection of system libffi to match new requirements. Fixes #617 +* Prefer bundled libffi over system libffi on Mac OS. +* Do closures via libffi. This removes ClosurePool and fixes compat with PaX. #540 +* Use a more deterministic gem packaging. +* Fix unnecessary update of autoconf files at gem install. + + +1.9.21 / 2018-02-06 +------------------- + +Added: +* Ruby-2.5 support by Windows binary gems. Fixes #598 +* Add missing win64 types. +* Added support for Bitmask. (#573) +* Add support for MSYS2 (#572) and Sparc64 Linux. (#574) + +Changed: +* Fix read_string to not throw an error on length 0. +* Don't use absolute paths for sh and env. Fixes usage on Adroid #528 +* Use Ruby implementation for `which` for better compat with Windows. Fixes #315 +* Fix compatibility with PPC64LE platform. (#577) +* Normalize sparc64 to sparcv9. (#575) + +Removed: +* Drop Ruby 1.8.7 support (#480) + + +1.9.18 / 2017-03-03 +------------------- + +Added: +* Add compatibility with Ruby-2.4. + +Changed: +* Add missing shlwapi.h include to fix Windows build. +* Avoid undefined behaviour of LoadLibrary() on Windows. #553 + + +1.9.17 / 2017-01-13 +------------------- diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/COPYING b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/COPYING new file mode 100644 index 0000000..7622318 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/COPYING @@ -0,0 +1,49 @@ +Copyright (c) 2008-2013, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +libffi, used by this project, is licensed under the MIT license: + +libffi - Copyright (c) 1996-2011 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Gemfile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Gemfile new file mode 100644 index 0000000..ec88154 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Gemfile @@ -0,0 +1,17 @@ +source 'https://rubygems.org' + +group :development do + gem 'rake', '~> 13.0' + gem 'rake-compiler', '~> 1.0.3' + gem 'rake-compiler-dock', '~> 1.0' + gem 'rspec', '~> 3.0' + # irb is a dependency of rubygems-tasks 0.2.5. + # irb versions > 1.1.1 depend on reline, + # which sometimes causes 'bundle install' to fail on Ruby <= 2.4: https://github.com/rubygems/rubygems/issues/3463 + gem 'rubygems-tasks', '>= 0.2', '< 0.2.5', :require => 'rubygems/tasks' +end + +group :doc do + gem 'kramdown' + gem 'yard', '~> 0.9' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE new file mode 100644 index 0000000..20185fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2008-2016, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE.SPECS b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE.SPECS new file mode 100644 index 0000000..5c9ffce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/LICENSE.SPECS @@ -0,0 +1,22 @@ +Copyright (c) 2008-2012 Ruby-FFI contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/README.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/README.md new file mode 100644 index 0000000..326ed47 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/README.md @@ -0,0 +1,124 @@ +# Ruby-FFI https://github.com/ffi/ffi/wiki [![Build Status](https://travis-ci.org/ffi/ffi.svg?branch=master)](https://travis-ci.org/ffi/ffi) [![Build status Windows](https://ci.appveyor.com/api/projects/status/r8wxn1sd4s794gg1/branch/master?svg=true)](https://ci.appveyor.com/project/larskanis/ffi-aofqa/branch/master) + +## Description + +Ruby-FFI is a gem for programmatically loading dynamically-linked native +libraries, binding functions within them, and calling those functions +from Ruby code. Moreover, a Ruby-FFI extension works without changes +on CRuby (MRI), JRuby, Rubinius and TruffleRuby. [Discover why you should write your next extension +using Ruby-FFI](https://github.com/ffi/ffi/wiki/why-use-ffi). + +## Features + +* Intuitive DSL +* Supports all C native types +* C structs (also nested), enums and global variables +* Callbacks from C to Ruby +* Automatic garbage collection of native memory + +## Synopsis + +```ruby +require 'ffi' + +module MyLib + extend FFI::Library + ffi_lib 'c' + attach_function :puts, [ :string ], :int +end + +MyLib.puts 'Hello, World using libc!' +``` + +For less minimalistic and more examples you may look at: + +* the `samples/` folder +* the examples on the [wiki](https://github.com/ffi/ffi/wiki) +* the projects using FFI listed on the wiki: https://github.com/ffi/ffi/wiki/projects-using-ffi + +## Requirements + +When installing the gem on CRuby (MRI), you will need: +* A C compiler (e.g., Xcode on macOS, `gcc` or `clang` on everything else) +Optionally (speeds up installation): +* The `libffi` library and development headers - this is commonly in the `libffi-dev` or `libffi-devel` packages + +The ffi gem comes with a builtin libffi version, which is used, when the system libffi library is not available or too old. +Use of the system libffi can be enforced by: +``` +gem install ffi -- --enable-system-libffi # to install the gem manually +bundle config build.ffi --enable-system-libffi # for bundle install +``` +or prevented by `--disable-system-libffi`. + +On Linux systems running with [PaX](https://en.wikipedia.org/wiki/PaX) (Gentoo, Alpine, etc.), FFI may trigger `mprotect` errors. You may need to disable [mprotect](https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options#Restrict_mprotect.28.29) for ruby (`paxctl -m [/path/to/ruby]`) for the time being until a solution is found. + +On FreeBSD systems pkgconf must be installed for the gem to be able to compile using clang. Install either via packages `pkg install pkgconf` or from ports via `devel/pkgconf`. + +On JRuby and TruffleRuby, there are no requirements to install the FFI gem, and `require 'ffi'` works even without installing the gem (i.e., the gem is preinstalled on these implementations). + +## Installation + +From rubygems: + + [sudo] gem install ffi + +or from the git repository on github: + + git clone git://github.com/ffi/ffi.git + git submodule update --init --recursive + cd ffi + rake install + +## License + +The ffi library is covered by the BSD license, also see the LICENSE file. +The specs are covered by the same license as [ruby/spec](https://github.com/ruby/spec), the MIT license. + +## Credits + +The following people have submitted code, bug reports, or otherwise contributed to the success of this project: + +* Alban Peignier +* Aman Gupta +* Andrea Fazzi +* Andreas Niederl +* Andrew Cholakian +* Antonio Terceiro +* Benoit Daloze +* Brian Candler +* Brian D. Burns +* Bryan Kearney +* Charlie Savage +* Chikanaga Tomoyuki +* Hongli Lai +* Ian MacLeod +* Jake Douglas +* Jean-Dominique Morani +* Jeremy Hinegardner +* Jesús García Sáez +* Joe Khoobyar +* Jurij Smakov +* KISHIMOTO, Makoto +* Kim Burgestrand +* Lars Kanis +* Luc Heinrich +* Luis Lavena +* Matijs van Zuijlen +* Matthew King +* Mike Dalessio +* NARUSE, Yui +* Park Heesob +* Shin Yee +* Stephen Bannasch +* Suraj N. Kurapati +* Sylvain Daubert +* Victor Costan +* beoran@gmail.com +* ctide +* emboss +* hobophobe +* meh +* postmodern +* wycats@gmail.com +* Wayne Meissner diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Rakefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Rakefile new file mode 100644 index 0000000..ce27f29 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/Rakefile @@ -0,0 +1,177 @@ +require 'rubygems/tasks' +require 'rbconfig' +require 'rake/clean' +require_relative "lib/ffi/version" + +require 'date' +require 'fileutils' +require 'rbconfig' +require 'rspec/core/rake_task' +require 'rubygems/package_task' + +BUILD_DIR = "build" +BUILD_EXT_DIR = File.join(BUILD_DIR, "#{RbConfig::CONFIG['arch']}", 'ffi_c', RUBY_VERSION) + +def gem_spec + @gem_spec ||= Gem::Specification.load('ffi.gemspec') +end + +RSpec::Core::RakeTask.new(:spec => :compile) do |config| + config.rspec_opts = YAML.load_file 'spec/spec.opts' +end + +desc "Build all packages" +task :package => %w[ gem:java gem:windows ] + +CLOBBER.include 'lib/ffi/types.conf' +CLOBBER.include 'pkg' +CLOBBER.include 'log' + +CLEAN.include 'build' +CLEAN.include 'conftest.dSYM' +CLEAN.include 'spec/ffi/fixtures/libtest.{dylib,so,dll}' +CLEAN.include 'spec/ffi/fixtures/*.o' +CLEAN.include 'spec/ffi/embed-test/ext/*.{o,def}' +CLEAN.include 'spec/ffi/embed-test/ext/Makefile' +CLEAN.include "pkg/ffi-*-{mingw32,java}" +CLEAN.include 'lib/1.*' +CLEAN.include 'lib/2.*' + +task :distclean => :clobber + +desc "Test the extension" +task :test => [ :spec ] + + +namespace :bench do + ITER = ENV['ITER'] ? ENV['ITER'].to_i : 100000 + bench_files = Dir["bench/bench_*.rb"].sort.reject { |f| f == "bench/bench_helper.rb" } + bench_files.each do |bench| + task File.basename(bench, ".rb")[6..-1] => :compile do + sh %{#{Gem.ruby} #{bench} #{ITER}} + end + end + task :all => :compile do + bench_files.each do |bench| + sh %{#{Gem.ruby} #{bench}} + end + end +end + +task 'spec:run' => :compile +task 'spec:specdoc' => :compile + +task :default => :spec + +namespace 'java' do + + java_gem_spec = gem_spec.dup.tap do |s| + s.files.reject! { |f| File.fnmatch?("ext/*", f) } + s.extensions = [] + s.platform = 'java' + end + + Gem::PackageTask.new(java_gem_spec) do |pkg| + pkg.need_zip = true + pkg.need_tar = true + pkg.package_dir = 'pkg' + end +end + +task 'gem:java' => 'java:gem' + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'rake/extensiontask' + Rake::ExtensionTask.new('ffi_c', gem_spec) do |ext| + ext.name = 'ffi_c' # indicate the name of the extension. + # ext.lib_dir = BUILD_DIR # put binaries into this folder. + ext.tmp_dir = BUILD_DIR # temporary folder used during compilation. + ext.cross_compile = true # enable cross compilation (requires cross compile toolchain) + ext.cross_platform = %w[i386-mingw32 x64-mingw32] # forces the Windows platform instead of the default one + ext.cross_compiling do |spec| + spec.files.reject! { |path| File.fnmatch?('ext/*', path) } + end + end + + # To reduce the gem file size strip mingw32 dlls before packaging + ENV['RUBY_CC_VERSION'].to_s.split(':').each do |ruby_version| + task "build/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" do |t| + sh "i686-w64-mingw32-strip -S build/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" + end + + task "build/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" do |t| + sh "x86_64-w64-mingw32-strip -S build/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" + end + end +else + task :compile do + STDERR.puts "Nothing to compile on #{RUBY_ENGINE}" + end +end + +desc "build a windows gem without all the ceremony" +task "gem:windows" do + require "rake_compiler_dock" + sh "bundle package" + RakeCompilerDock.sh "sudo apt-get update && sudo apt-get install -y libltdl-dev && bundle --local && rake cross native gem MAKE='nice make -j`nproc`' RUBY_CC_VERSION=${RUBY_CC_VERSION/:2.2.2/}" +end + +directory "ext/ffi_c/libffi" +file "ext/ffi_c/libffi/autogen.sh" => "ext/ffi_c/libffi" do + warn "Downloading libffi ..." + sh "git submodule update --init --recursive" +end +task :libffi => "ext/ffi_c/libffi/autogen.sh" + +LIBFFI_GIT_FILES = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + +# Generate files in gemspec but not in libffi's git repo by running autogen.sh +gem_spec.files.select do |f| + f =~ /ext\/ffi_c\/libffi\/(.*)/ && !LIBFFI_GIT_FILES.include?($1) +end.each do |f| + file f => "ext/ffi_c/libffi/autogen.sh" do + chdir "ext/ffi_c/libffi" do + sh "sh ./autogen.sh" + end + touch f + if gem_spec.files != Gem::Specification.load('./ffi.gemspec').files + warn "gemspec files have changed -> Please restart rake!" + exit 1 + end + end +end + +require_relative "lib/ffi/platform" +types_conf = File.expand_path(File.join(FFI::Platform::CONF_DIR, 'types.conf')) +logfile = File.join(File.dirname(__FILE__), 'types_log') + +task types_conf do |task| + require 'fileutils' + require_relative "lib/ffi/tools/types_generator" + options = {} + FileUtils.mkdir_p(File.dirname(task.name), mode: 0755 ) + File.open(task.name, File::CREAT|File::TRUNC|File::RDWR, 0644) do |f| + f.puts FFI::TypesGenerator.generate(options) + end + File.open(logfile, 'w') do |log| + log.puts(types_conf) + end +end + +desc "Create or update type information for platform #{FFI::Platform::NAME}" +task :types_conf => types_conf + +Gem::Tasks.new do |t| + t.scm.tag.format = '%s' +end + +begin + require 'yard' + + namespace :doc do + YARD::Rake::YardocTask.new do |yard| + end + end +rescue LoadError + warn "[warn] YARD unavailable" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory 2.c new file mode 100644 index 0000000..f25c709 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory 2.c @@ -0,0 +1,1105 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Jake Douglas + * Copyright (C) 2008 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif + +#include +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Function.h" +#include "LongDouble.h" + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +static inline char* memory_address(VALUE self); +VALUE rbffi_AbstractMemoryClass = Qnil; +static VALUE NullPointerErrorClass = Qnil; +static ID id_to_ptr = 0, id_plus = 0, id_call = 0; + +static VALUE +memory_allocate(VALUE klass) +{ + AbstractMemory* memory; + VALUE obj; + obj = Data_Make_Struct(klass, AbstractMemory, NULL, -1, memory); + memory->flags = MEM_RD | MEM_WR; + + return obj; +} +#define VAL(x, swap) (unlikely(((memory->flags & MEM_SWAP) != 0)) ? swap((x)) : (x)) + +#define NUM_OP(name, type, toNative, fromNative, swap) \ +static void memory_op_put_##name(AbstractMemory* memory, long off, VALUE value); \ +static void \ +memory_op_put_##name(AbstractMemory* memory, long off, VALUE value) \ +{ \ + type tmp = (type) VAL(toNative(value), swap); \ + checkWrite(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(memory->address + off, &tmp, sizeof(tmp)); \ +} \ +static VALUE memory_put_##name(VALUE self, VALUE offset, VALUE value); \ +static VALUE \ +memory_put_##name(VALUE self, VALUE offset, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, NUM2LONG(offset), value); \ + return self; \ +} \ +static VALUE memory_write_##name(VALUE self, VALUE value); \ +static VALUE \ +memory_write_##name(VALUE self, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, 0, value); \ + return self; \ +} \ +static VALUE memory_op_get_##name(AbstractMemory* memory, long off); \ +static VALUE \ +memory_op_get_##name(AbstractMemory* memory, long off) \ +{ \ + type tmp; \ + checkRead(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(&tmp, memory->address + off, sizeof(tmp)); \ + return fromNative(VAL(tmp, swap)); \ +} \ +static VALUE memory_get_##name(VALUE self, VALUE offset); \ +static VALUE \ +memory_get_##name(VALUE self, VALUE offset) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, NUM2LONG(offset)); \ +} \ +static VALUE memory_read_##name(VALUE self); \ +static VALUE \ +memory_read_##name(VALUE self) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, 0); \ +} \ +static MemoryOp memory_op_##name = { memory_op_get_##name, memory_op_put_##name }; \ +\ +static VALUE memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary); \ +static VALUE \ +memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary) \ +{ \ + long count = RARRAY_LEN(ary); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + long i; \ + checkWrite(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; i++) { \ + type tmp = (type) VAL(toNative(RARRAY_PTR(ary)[i]), swap); \ + memcpy(memory->address + off + (i * sizeof(type)), &tmp, sizeof(tmp)); \ + } \ + return self; \ +} \ +static VALUE memory_write_array_of_##name(VALUE self, VALUE ary); \ +static VALUE \ +memory_write_array_of_##name(VALUE self, VALUE ary) \ +{ \ + return memory_put_array_of_##name(self, INT2FIX(0), ary); \ +} \ +static VALUE memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length); \ +static VALUE \ +memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length) \ +{ \ + long count = NUM2LONG(length); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + VALUE retVal = rb_ary_new2(count); \ + long i; \ + checkRead(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; ++i) { \ + type tmp; \ + memcpy(&tmp, memory->address + off + (i * sizeof(type)), sizeof(tmp)); \ + rb_ary_push(retVal, fromNative(VAL(tmp, swap))); \ + } \ + return retVal; \ +} \ +static VALUE memory_read_array_of_##name(VALUE self, VALUE length); \ +static VALUE \ +memory_read_array_of_##name(VALUE self, VALUE length) \ +{ \ + return memory_get_array_of_##name(self, INT2FIX(0), length); \ +} + +#define NOSWAP(x) (x) +#define bswap16(x) (((x) >> 8) & 0xff) | (((x) << 8) & 0xff00); +static inline int16_t +SWAPS16(int16_t x) +{ + return bswap16(x); +} + +static inline uint16_t +SWAPU16(uint16_t x) +{ + return bswap16(x); +} + +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#define bswap32(x) \ + (((x << 24) & 0xff000000) | \ + ((x << 8) & 0x00ff0000) | \ + ((x >> 8) & 0x0000ff00) | \ + ((x >> 24) & 0x000000ff)) + +#define bswap64(x) \ + (((x << 56) & 0xff00000000000000ULL) | \ + ((x << 40) & 0x00ff000000000000ULL) | \ + ((x << 24) & 0x0000ff0000000000ULL) | \ + ((x << 8) & 0x000000ff00000000ULL) | \ + ((x >> 8) & 0x00000000ff000000ULL) | \ + ((x >> 24) & 0x0000000000ff0000ULL) | \ + ((x >> 40) & 0x000000000000ff00ULL) | \ + ((x >> 56) & 0x00000000000000ffULL)) + +static inline int32_t +SWAPS32(int32_t x) +{ + return bswap32(x); +} + +static inline uint32_t +SWAPU32(uint32_t x) +{ + return bswap32(x); +} + +static inline int64_t +SWAPS64(int64_t x) +{ + return bswap64(x); +} + +static inline uint64_t +SWAPU64(uint64_t x) +{ + return bswap64(x); +} + +#else +# define SWAPS32(x) ((int32_t) __builtin_bswap32(x)) +# define SWAPU32(x) ((uint32_t) __builtin_bswap32(x)) +# define SWAPS64(x) ((int64_t) __builtin_bswap64(x)) +# define SWAPU64(x) ((uint64_t) __builtin_bswap64(x)) +#endif + +#if LONG_MAX > INT_MAX +# define SWAPSLONG SWAPS64 +# define SWAPULONG SWAPU64 +#else +# define SWAPSLONG SWAPS32 +# define SWAPULONG SWAPU32 +#endif + +NUM_OP(int8, int8_t, NUM2INT, INT2NUM, NOSWAP); +NUM_OP(uint8, uint8_t, NUM2UINT, UINT2NUM, NOSWAP); +NUM_OP(int16, int16_t, NUM2INT, INT2NUM, SWAPS16); +NUM_OP(uint16, uint16_t, NUM2UINT, UINT2NUM, SWAPU16); +NUM_OP(int32, int32_t, NUM2INT, INT2NUM, SWAPS32); +NUM_OP(uint32, uint32_t, NUM2UINT, UINT2NUM, SWAPU32); +NUM_OP(int64, int64_t, NUM2LL, LL2NUM, SWAPS64); +NUM_OP(uint64, uint64_t, NUM2ULL, ULL2NUM, SWAPU64); +NUM_OP(long, long, NUM2LONG, LONG2NUM, SWAPSLONG); +NUM_OP(ulong, unsigned long, NUM2ULONG, ULONG2NUM, SWAPULONG); +NUM_OP(float32, float, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(float64, double, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(longdouble, long double, rbffi_num2longdouble, rbffi_longdouble_new, NOSWAP); + +static inline void* +get_pointer_value(VALUE value) +{ + const int type = TYPE(value); + if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_PointerClass)) { + return memory_address(value); + } else if (type == T_NIL) { + return NULL; + } else if (type == T_FIXNUM) { + return (void *) (uintptr_t) FIX2ULONG(value); + } else if (type == T_BIGNUM) { + return (void *) (uintptr_t) NUM2ULL(value); + } else if (rb_respond_to(value, id_to_ptr)) { + return MEMORY_PTR(rb_funcall2(value, id_to_ptr, 0, NULL)); + } else { + rb_raise(rb_eArgError, "value is not a pointer"); + return NULL; + } +} + +NUM_OP(pointer, void *, get_pointer_value, rbffi_Pointer_NewInstance, NOSWAP); + +static inline uint8_t +rbffi_bool_value(VALUE value) +{ + return RTEST(value); +} + +static inline VALUE +rbffi_bool_new(uint8_t value) +{ + return (value & 1) != 0 ? Qtrue : Qfalse; +} + +NUM_OP(bool, unsigned char, rbffi_bool_value, rbffi_bool_new, NOSWAP); + + +/* + * call-seq: memory.clear + * Set the memory to all-zero. + * @return [self] + */ +static VALUE +memory_clear(VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + memset(ptr->address, 0, ptr->size); + return self; +} + +/* + * call-seq: memory.size + * Return memory size in bytes (alias: #total) + * @return [Numeric] + */ +static VALUE +memory_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return LONG2NUM(ptr->size); +} + +/* + * call-seq: memory.get(type, offset) + * Return data of given type contained in memory. + * @param [Symbol, Type] type_name type of data to get + * @param [Numeric] offset point in buffer to start from + * @return [Object] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_get(VALUE self, VALUE type_name, VALUE offset) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + return op->get(ptr, NUM2LONG(offset)); + +undefined_type: { + VALUE msg = rb_sprintf("undefined type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.put(type, offset, value) + * @param [Symbol, Type] type_name type of data to put + * @param [Numeric] offset point in buffer to start from + * @return [nil] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_put(VALUE self, VALUE type_name, VALUE offset, VALUE value) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + op->put(ptr, NUM2LONG(offset), value); + return Qnil; + +undefined_type: { + VALUE msg = rb_sprintf("unsupported type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.get_string(offset, length=nil) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_string(int argc, VALUE* argv, VALUE self) +{ + VALUE length = Qnil, offset = Qnil; + AbstractMemory* ptr = MEMORY(self); + long off, len; + char* end; + int nargs = rb_scan_args(argc, argv, "11", &offset, &length); + + off = NUM2LONG(offset); + len = nargs > 1 && length != Qnil ? NUM2LONG(length) : (ptr->size - off); + checkRead(ptr); + checkBounds(ptr, off, len); + + end = memchr(ptr->address + off, 0, len); + return rb_str_new((char *) ptr->address + off, + (end != NULL ? end - ptr->address - off : len)); +} + +/* + * call-seq: memory.get_array_of_string(offset, count=nil) + * Return an array of strings contained in memory. + * @param [Numeric] offset point in memory to start from + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE offset = Qnil, countnum = Qnil, retVal = Qnil; + AbstractMemory* ptr; + long off; + int count; + + rb_scan_args(argc, argv, "11", &offset, &countnum); + off = NUM2LONG(offset); + count = (countnum == Qnil ? 0 : NUM2INT(countnum)); + retVal = rb_ary_new2(count); + + Data_Get_Struct(self, AbstractMemory, ptr); + checkRead(ptr); + + if (countnum != Qnil) { + int i; + + checkBounds(ptr, off, count * sizeof (char*)); + + for (i = 0; i < count; ++i) { + const char* strptr = *((const char**) (ptr->address + off) + i); + rb_ary_push(retVal, (strptr == NULL ? Qnil : rb_str_new2(strptr))); + } + + } else { + checkBounds(ptr, off, sizeof (char*)); + for ( ; off < ptr->size - (long) sizeof (void *); off += (long) sizeof (void *)) { + const char* strptr = *(const char**) (ptr->address + off); + if (strptr == NULL) { + break; + } + rb_ary_push(retVal, rb_str_new2(strptr)); + } + } + + return retVal; +} + +/* + * call-seq: memory.read_array_of_string(count=nil) + * Return an array of strings contained in memory. Same as: + * memory.get_array_of_string(0, count) + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + */ +static VALUE +memory_read_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE* rargv = ALLOCA_N(VALUE, argc + 1); + int i; + + rargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + rargv[i + 1] = argv[i]; + } + + return memory_get_array_of_string(argc + 1, rargv, self); +} + + +/* + * call-seq: memory.put_string(offset, str) + * @param [Numeric] offset + * @param [String] str + * @return [self] + * @raise {SecurityError} when writing unsafe string to memory + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + * Put a string in memory. + */ +static VALUE +memory_put_string(VALUE self, VALUE offset, VALUE str) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + Check_Type(str, T_STRING); + off = NUM2LONG(offset); + len = RSTRING_LEN(str); + + checkWrite(ptr); + checkBounds(ptr, off, len + 1); + + memcpy(ptr->address + off, RSTRING_PTR(str), len); + *((char *) ptr->address + off + len) = '\0'; + + return self; +} + +/* + * call-seq: memory.get_bytes(offset, length) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_bytes(VALUE self, VALUE offset, VALUE length) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + off = NUM2LONG(offset); + len = NUM2LONG(length); + + checkRead(ptr); + checkBounds(ptr, off, len); + + return rb_str_new((char *) ptr->address + off, len); +} + +/* + * call-seq: memory.put_bytes(offset, str, index=0, length=nil) + * Put a string in memory. + * @param [Numeric] offset point in buffer to start from + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + * @raise {RangeError} if +index+ is negative, or if index+length is greater than size of string + * @raise {SecurityError} when writing unsafe string to memory + */ +static VALUE +memory_put_bytes(int argc, VALUE* argv, VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + VALUE offset = Qnil, str = Qnil, rbIndex = Qnil, rbLength = Qnil; + long off, len, idx; + int nargs = rb_scan_args(argc, argv, "22", &offset, &str, &rbIndex, &rbLength); + + Check_Type(str, T_STRING); + + off = NUM2LONG(offset); + idx = nargs > 2 ? NUM2LONG(rbIndex) : 0; + if (idx < 0) { + rb_raise(rb_eRangeError, "index cannot be less than zero"); + return Qnil; + } + len = nargs > 3 ? NUM2LONG(rbLength) : (RSTRING_LEN(str) - idx); + if ((idx + len) > RSTRING_LEN(str)) { + rb_raise(rb_eRangeError, "index+length is greater than size of string"); + return Qnil; + } + + checkWrite(ptr); + checkBounds(ptr, off, len); + + memcpy(ptr->address + off, RSTRING_PTR(str) + idx, len); + + return self; +} + +/* + * call-seq: memory.read_bytes(length) + * @param [Numeric] length of string to return + * @return [String] + * equivalent to : + * memory.get_bytes(0, length) + */ +static VALUE +memory_read_bytes(VALUE self, VALUE length) +{ + return memory_get_bytes(self, INT2FIX(0), length); +} + +/* + * call-seq: memory.write_bytes(str, index=0, length=nil) + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * equivalent to : + * memory.put_bytes(0, str, index, length) + */ +static VALUE +memory_write_bytes(int argc, VALUE* argv, VALUE self) +{ + VALUE* wargv = ALLOCA_N(VALUE, argc + 1); + int i; + + wargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + wargv[i + 1] = argv[i]; + } + + return memory_put_bytes(argc + 1, wargv, self); +} + +/* + * call-seq: memory.type_size + * @return [Numeric] type size in bytes + * Get the memory's type size. + */ +static VALUE +memory_type_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return INT2NUM(ptr->typeSize); +} + +/* + * Document-method: [] + * call-seq: memory[idx] + * @param [Numeric] idx index to access in memory + * @return + * Memory read accessor. + */ +static VALUE +memory_aref(VALUE self, VALUE idx) +{ + AbstractMemory* ptr; + VALUE rbOffset = Qnil; + + Data_Get_Struct(self, AbstractMemory, ptr); + + rbOffset = ULONG2NUM(NUM2ULONG(idx) * ptr->typeSize); + + return rb_funcall2(self, id_plus, 1, &rbOffset); +} + +static inline char* +memory_address(VALUE obj) +{ + return ((AbstractMemory *) DATA_PTR(obj))->address; +} + +static VALUE +memory_copy_from(VALUE self, VALUE rbsrc, VALUE rblen) +{ + AbstractMemory* dst; + + Data_Get_Struct(self, AbstractMemory, dst); + + memcpy(dst->address, rbffi_AbstractMemory_Cast(rbsrc, rbffi_AbstractMemoryClass)->address, NUM2INT(rblen)); + + return self; +} + +AbstractMemory* +rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass) +{ + if (rb_obj_is_kind_of(obj, klass)) { + AbstractMemory* memory; + Data_Get_Struct(obj, AbstractMemory, memory); + return memory; + } + + rb_raise(rb_eArgError, "Invalid Memory object"); + return NULL; +} + +void +rbffi_AbstractMemory_Error(AbstractMemory *mem, int op) +{ + VALUE rbErrorClass = mem->address == NULL ? NullPointerErrorClass : rb_eRuntimeError; + if (op == MEM_RD) { + rb_raise(rbErrorClass, "invalid memory read at address=%p", mem->address); + } else if (op == MEM_WR) { + rb_raise(rbErrorClass, "invalid memory write at address=%p", mem->address); + } else { + rb_raise(rbErrorClass, "invalid memory access at address=%p", mem->address); + } +} + +static VALUE +memory_op_get_strptr(AbstractMemory* ptr, long offset) +{ + void* tmp = NULL; + + if (ptr != NULL && ptr->address != NULL) { + checkRead(ptr); + checkBounds(ptr, offset, sizeof(tmp)); + memcpy(&tmp, ptr->address + offset, sizeof(tmp)); + } + + return tmp != NULL ? rb_str_new2(tmp) : Qnil; +} + +static void +memory_op_put_strptr(AbstractMemory* ptr, long offset, VALUE value) +{ + rb_raise(rb_eArgError, "Cannot set :string fields"); +} + +static MemoryOp memory_op_strptr = { memory_op_get_strptr, memory_op_put_strptr }; + + +MemoryOps rbffi_AbstractMemoryOps = { + &memory_op_int8, /*.int8 */ + &memory_op_uint8, /* .uint8 */ + &memory_op_int16, /* .int16 */ + &memory_op_uint16, /* .uint16 */ + &memory_op_int32, /* .int32 */ + &memory_op_uint32, /* .uint32 */ + &memory_op_int64, /* .int64 */ + &memory_op_uint64, /* .uint64 */ + &memory_op_long, /* .slong */ + &memory_op_ulong, /* .uslong */ + &memory_op_float32, /* .float32 */ + &memory_op_float64, /* .float64 */ + &memory_op_longdouble, /* .longdouble */ + &memory_op_pointer, /* .pointer */ + &memory_op_strptr, /* .strptr */ + &memory_op_bool /* .boolOp */ +}; + +void +rbffi_AbstractMemory_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::AbstractMemory + * + * {AbstractMemory} is the base class for many memory management classes such as {Buffer}. + * + * This class has a lot of methods to work with integers : + * * put_intsize(offset, value) + * * get_intsize(offset) + * * put_uintsize(offset, value) + * * get_uintsize(offset) + * * writeuintsize(value) + * * read_intsize + * * write_uintsize(value) + * * read_uintsize + * * put_array_of_intsize(offset, ary) + * * get_array_of_intsize(offset, length) + * * put_array_of_uintsize(offset, ary) + * * get_array_of_uintsize(offset, length) + * * write_array_of_intsize(ary) + * * read_array_of_intsize(length) + * * write_array_of_uintsize(ary) + * * read_array_of_uintsize(length) + * where _size_ is 8, 16, 32 or 64. Same methods exist for long type. + * + * Aliases exist : _char_ for _int8_, _short_ for _int16_, _int_ for _int32_ and long_long for _int64_. + * + * Others methods are listed below. + */ + VALUE classMemory = rb_define_class_under(moduleFFI, "AbstractMemory", rb_cObject); + rbffi_AbstractMemoryClass = classMemory; + /* + * Document-variable: FFI::AbstractMemory + */ + rb_global_variable(&rbffi_AbstractMemoryClass); + rb_define_alloc_func(classMemory, memory_allocate); + + NullPointerErrorClass = rb_define_class_under(moduleFFI, "NullPointerError", rb_eRuntimeError); + /* Document-variable: NullPointerError */ + rb_global_variable(&NullPointerErrorClass); + + +#undef INT +#define INT(type) \ + rb_define_method(classMemory, "put_" #type, memory_put_##type, 2); \ + rb_define_method(classMemory, "get_" #type, memory_get_##type, 1); \ + rb_define_method(classMemory, "put_u" #type, memory_put_u##type, 2); \ + rb_define_method(classMemory, "get_u" #type, memory_get_u##type, 1); \ + rb_define_method(classMemory, "write_" #type, memory_write_##type, 1); \ + rb_define_method(classMemory, "read_" #type, memory_read_##type, 0); \ + rb_define_method(classMemory, "write_u" #type, memory_write_u##type, 1); \ + rb_define_method(classMemory, "read_u" #type, memory_read_u##type, 0); \ + rb_define_method(classMemory, "put_array_of_" #type, memory_put_array_of_##type, 2); \ + rb_define_method(classMemory, "get_array_of_" #type, memory_get_array_of_##type, 2); \ + rb_define_method(classMemory, "put_array_of_u" #type, memory_put_array_of_u##type, 2); \ + rb_define_method(classMemory, "get_array_of_u" #type, memory_get_array_of_u##type, 2); \ + rb_define_method(classMemory, "write_array_of_" #type, memory_write_array_of_##type, 1); \ + rb_define_method(classMemory, "read_array_of_" #type, memory_read_array_of_##type, 1); \ + rb_define_method(classMemory, "write_array_of_u" #type, memory_write_array_of_u##type, 1); \ + rb_define_method(classMemory, "read_array_of_u" #type, memory_read_array_of_u##type, 1); + + INT(int8); + INT(int16); + INT(int32); + INT(int64); + INT(long); + +#define ALIAS(name, old) \ + rb_define_alias(classMemory, "put_" #name, "put_" #old); \ + rb_define_alias(classMemory, "get_" #name, "get_" #old); \ + rb_define_alias(classMemory, "put_u" #name, "put_u" #old); \ + rb_define_alias(classMemory, "get_u" #name, "get_u" #old); \ + rb_define_alias(classMemory, "write_" #name, "write_" #old); \ + rb_define_alias(classMemory, "read_" #name, "read_" #old); \ + rb_define_alias(classMemory, "write_u" #name, "write_u" #old); \ + rb_define_alias(classMemory, "read_u" #name, "read_u" #old); \ + rb_define_alias(classMemory, "put_array_of_" #name, "put_array_of_" #old); \ + rb_define_alias(classMemory, "get_array_of_" #name, "get_array_of_" #old); \ + rb_define_alias(classMemory, "put_array_of_u" #name, "put_array_of_u" #old); \ + rb_define_alias(classMemory, "get_array_of_u" #name, "get_array_of_u" #old); \ + rb_define_alias(classMemory, "write_array_of_" #name, "write_array_of_" #old); \ + rb_define_alias(classMemory, "read_array_of_" #name, "read_array_of_" #old); \ + rb_define_alias(classMemory, "write_array_of_u" #name, "write_array_of_u" #old); \ + rb_define_alias(classMemory, "read_array_of_u" #name, "read_array_of_u" #old); + + ALIAS(char, int8); + ALIAS(short, int16); + ALIAS(int, int32); + ALIAS(long_long, int64); + + /* + * Document-method: put_float32 + * call-seq: memory.put_float32offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 32-bit float in memory at offset +offset+ (alias: #put_float). + */ + rb_define_method(classMemory, "put_float32", memory_put_float32, 2); + /* + * Document-method: get_float32 + * call-seq: memory.get_float32(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 32-bit float from memory at offset +offset+ (alias: #get_float). + */ + rb_define_method(classMemory, "get_float32", memory_get_float32, 1); + rb_define_alias(classMemory, "put_float", "put_float32"); + rb_define_alias(classMemory, "get_float", "get_float32"); + /* + * Document-method: write_float + * call-seq: memory.write_float(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 32-bit float in memory. + * + * Same as: + * memory.put_float(0, value) + */ + rb_define_method(classMemory, "write_float", memory_write_float32, 1); + /* + * Document-method: read_float + * call-seq: memory.read_float + * @return [Float] + * Read a 32-bit float from memory. + * + * Same as: + * memory.get_float(0) + */ + rb_define_method(classMemory, "read_float", memory_read_float32, 0); + /* + * Document-method: put_array_of_float32 + * call-seq: memory.put_array_of_float32(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 32-bit floats in memory from offset +offset+ (alias: #put_array_of_float). + */ + rb_define_method(classMemory, "put_array_of_float32", memory_put_array_of_float32, 2); + /* + * Document-method: get_array_of_float32 + * call-seq: memory.get_array_of_float32(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 32-bit floats in memory from offset +offset+ (alias: #get_array_of_float). + */ + rb_define_method(classMemory, "get_array_of_float32", memory_get_array_of_float32, 2); + /* + * Document-method: write_array_of_float + * call-seq: memory.write_array_of_float(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 32-bit floats in memory. + * + * Same as: + * memory.put_array_of_float(0, ary) + */ + rb_define_method(classMemory, "write_array_of_float", memory_write_array_of_float32, 1); + /* + * Document-method: read_array_of_float + * call-seq: memory.read_array_of_float(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 32-bit floats from memory. + * + * Same as: + * memory.get_array_of_float(0, ary) + */ + rb_define_method(classMemory, "read_array_of_float", memory_read_array_of_float32, 1); + rb_define_alias(classMemory, "put_array_of_float", "put_array_of_float32"); + rb_define_alias(classMemory, "get_array_of_float", "get_array_of_float32"); + /* + * Document-method: put_float64 + * call-seq: memory.put_float64(offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 64-bit float (double) in memory at offset +offset+ (alias: #put_double). + */ + rb_define_method(classMemory, "put_float64", memory_put_float64, 2); + /* + * Document-method: get_float64 + * call-seq: memory.get_float64(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 64-bit float (double) from memory at offset +offset+ (alias: #get_double). + */ + rb_define_method(classMemory, "get_float64", memory_get_float64, 1); + rb_define_alias(classMemory, "put_double", "put_float64"); + rb_define_alias(classMemory, "get_double", "get_float64"); + /* + * Document-method: write_double + * call-seq: memory.write_double(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 64-bit float (double) in memory. + * + * Same as: + * memory.put_double(0, value) + */ + rb_define_method(classMemory, "write_double", memory_write_float64, 1); + /* + * Document-method: read_double + * call-seq: memory.read_double + * @return [Float] + * Read a 64-bit float (double) from memory. + * + * Same as: + * memory.get_double(0) + */ + rb_define_method(classMemory, "read_double", memory_read_float64, 0); + /* + * Document-method: put_array_of_float64 + * call-seq: memory.put_array_of_float64(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 64-bit floats (doubles) in memory from offset +offset+ (alias: #put_array_of_double). + */ + rb_define_method(classMemory, "put_array_of_float64", memory_put_array_of_float64, 2); + /* + * Document-method: get_array_of_float64 + * call-seq: memory.get_array_of_float64(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 64-bit floats (doubles) in memory from offset +offset+ (alias: #get_array_of_double). + */ + rb_define_method(classMemory, "get_array_of_float64", memory_get_array_of_float64, 2); + /* + * Document-method: write_array_of_double + * call-seq: memory.write_array_of_double(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 64-bit floats (doubles) in memory. + * + * Same as: + * memory.put_array_of_double(0, ary) + */ + rb_define_method(classMemory, "write_array_of_double", memory_write_array_of_float64, 1); + /* + * Document-method: read_array_of_double + * call-seq: memory.read_array_of_double(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 64-bit floats (doubles) from memory. + * + * Same as: + * memory.get_array_of_double(0, ary) + */ + rb_define_method(classMemory, "read_array_of_double", memory_read_array_of_float64, 1); + rb_define_alias(classMemory, "put_array_of_double", "put_array_of_float64"); + rb_define_alias(classMemory, "get_array_of_double", "get_array_of_float64"); + /* + * Document-method: put_pointer + * call-seq: memory.put_pointer(offset, value) + * @param [Numeric] offset + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Put +value+ in memory from +offset+.. + */ + rb_define_method(classMemory, "put_pointer", memory_put_pointer, 2); + /* + * Document-method: get_pointer + * call-seq: memory.get_pointer(offset) + * @param [Numeric] offset + * @return [Pointer] + * Get a {Pointer} to the memory from +offset+. + */ + rb_define_method(classMemory, "get_pointer", memory_get_pointer, 1); + /* + * Document-method: write_pointer + * call-seq: memory.write_pointer(value) + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Write +value+ in memory. + * + * Equivalent to: + * memory.put_pointer(0, value) + */ + rb_define_method(classMemory, "write_pointer", memory_write_pointer, 1); + /* + * Document-method: read_pointer + * call-seq: memory.read_pointer + * @return [Pointer] + * Get a {Pointer} to the memory from base address. + * + * Equivalent to: + * memory.get_pointer(0) + */ + rb_define_method(classMemory, "read_pointer", memory_read_pointer, 0); + /* + * Document-method: put_array_of_pointer + * call-seq: memory.put_array_of_pointer(offset, ary) + * @param [Numeric] offset + * @param [Array<#to_ptr>] ary + * @return [self] + * Put an array of {Pointer} into memory from +offset+. + */ + rb_define_method(classMemory, "put_array_of_pointer", memory_put_array_of_pointer, 2); + /* + * Document-method: get_array_of_pointer + * call-seq: memory.get_array_of_pointer(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Array] + * Get an array of {Pointer} of length +length+ from +offset+. + */ + rb_define_method(classMemory, "get_array_of_pointer", memory_get_array_of_pointer, 2); + /* + * Document-method: write_array_of_pointer + * call-seq: memory.write_array_of_pointer(ary) + * @param [Array<#to_ptr>] ary + * @return [self] + * Write an array of {Pointer} into memory from +offset+. + * + * Same as : + * memory.put_array_of_pointer(0, ary) + */ + rb_define_method(classMemory, "write_array_of_pointer", memory_write_array_of_pointer, 1); + /* + * Document-method: read_array_of_pointer + * call-seq: memory.read_array_of_pointer(length) + * @param [Numeric] length + * @return [Array] + * Read an array of {Pointer} of length +length+. + * + * Same as: + * memory.get_array_of_pointer(0, length) + */ + rb_define_method(classMemory, "read_array_of_pointer", memory_read_array_of_pointer, 1); + + rb_define_method(classMemory, "get_string", memory_get_string, -1); + rb_define_method(classMemory, "put_string", memory_put_string, 2); + rb_define_method(classMemory, "get_bytes", memory_get_bytes, 2); + rb_define_method(classMemory, "put_bytes", memory_put_bytes, -1); + rb_define_method(classMemory, "read_bytes", memory_read_bytes, 1); + rb_define_method(classMemory, "write_bytes", memory_write_bytes, -1); + rb_define_method(classMemory, "get_array_of_string", memory_get_array_of_string, -1); + + rb_define_method(classMemory, "get", memory_get, 2); + rb_define_method(classMemory, "put", memory_put, 3); + + rb_define_method(classMemory, "clear", memory_clear, 0); + rb_define_method(classMemory, "total", memory_size, 0); + rb_define_alias(classMemory, "size", "total"); + rb_define_method(classMemory, "type_size", memory_type_size, 0); + rb_define_method(classMemory, "[]", memory_aref, 1); + rb_define_method(classMemory, "__copy_from__", memory_copy_from, 2); + + id_to_ptr = rb_intern("to_ptr"); + id_call = rb_intern("call"); + id_plus = rb_intern("+"); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c new file mode 100644 index 0000000..f25c709 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c @@ -0,0 +1,1105 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Jake Douglas + * Copyright (C) 2008 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif + +#include +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Function.h" +#include "LongDouble.h" + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +static inline char* memory_address(VALUE self); +VALUE rbffi_AbstractMemoryClass = Qnil; +static VALUE NullPointerErrorClass = Qnil; +static ID id_to_ptr = 0, id_plus = 0, id_call = 0; + +static VALUE +memory_allocate(VALUE klass) +{ + AbstractMemory* memory; + VALUE obj; + obj = Data_Make_Struct(klass, AbstractMemory, NULL, -1, memory); + memory->flags = MEM_RD | MEM_WR; + + return obj; +} +#define VAL(x, swap) (unlikely(((memory->flags & MEM_SWAP) != 0)) ? swap((x)) : (x)) + +#define NUM_OP(name, type, toNative, fromNative, swap) \ +static void memory_op_put_##name(AbstractMemory* memory, long off, VALUE value); \ +static void \ +memory_op_put_##name(AbstractMemory* memory, long off, VALUE value) \ +{ \ + type tmp = (type) VAL(toNative(value), swap); \ + checkWrite(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(memory->address + off, &tmp, sizeof(tmp)); \ +} \ +static VALUE memory_put_##name(VALUE self, VALUE offset, VALUE value); \ +static VALUE \ +memory_put_##name(VALUE self, VALUE offset, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, NUM2LONG(offset), value); \ + return self; \ +} \ +static VALUE memory_write_##name(VALUE self, VALUE value); \ +static VALUE \ +memory_write_##name(VALUE self, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, 0, value); \ + return self; \ +} \ +static VALUE memory_op_get_##name(AbstractMemory* memory, long off); \ +static VALUE \ +memory_op_get_##name(AbstractMemory* memory, long off) \ +{ \ + type tmp; \ + checkRead(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(&tmp, memory->address + off, sizeof(tmp)); \ + return fromNative(VAL(tmp, swap)); \ +} \ +static VALUE memory_get_##name(VALUE self, VALUE offset); \ +static VALUE \ +memory_get_##name(VALUE self, VALUE offset) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, NUM2LONG(offset)); \ +} \ +static VALUE memory_read_##name(VALUE self); \ +static VALUE \ +memory_read_##name(VALUE self) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, 0); \ +} \ +static MemoryOp memory_op_##name = { memory_op_get_##name, memory_op_put_##name }; \ +\ +static VALUE memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary); \ +static VALUE \ +memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary) \ +{ \ + long count = RARRAY_LEN(ary); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + long i; \ + checkWrite(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; i++) { \ + type tmp = (type) VAL(toNative(RARRAY_PTR(ary)[i]), swap); \ + memcpy(memory->address + off + (i * sizeof(type)), &tmp, sizeof(tmp)); \ + } \ + return self; \ +} \ +static VALUE memory_write_array_of_##name(VALUE self, VALUE ary); \ +static VALUE \ +memory_write_array_of_##name(VALUE self, VALUE ary) \ +{ \ + return memory_put_array_of_##name(self, INT2FIX(0), ary); \ +} \ +static VALUE memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length); \ +static VALUE \ +memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length) \ +{ \ + long count = NUM2LONG(length); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + VALUE retVal = rb_ary_new2(count); \ + long i; \ + checkRead(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; ++i) { \ + type tmp; \ + memcpy(&tmp, memory->address + off + (i * sizeof(type)), sizeof(tmp)); \ + rb_ary_push(retVal, fromNative(VAL(tmp, swap))); \ + } \ + return retVal; \ +} \ +static VALUE memory_read_array_of_##name(VALUE self, VALUE length); \ +static VALUE \ +memory_read_array_of_##name(VALUE self, VALUE length) \ +{ \ + return memory_get_array_of_##name(self, INT2FIX(0), length); \ +} + +#define NOSWAP(x) (x) +#define bswap16(x) (((x) >> 8) & 0xff) | (((x) << 8) & 0xff00); +static inline int16_t +SWAPS16(int16_t x) +{ + return bswap16(x); +} + +static inline uint16_t +SWAPU16(uint16_t x) +{ + return bswap16(x); +} + +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#define bswap32(x) \ + (((x << 24) & 0xff000000) | \ + ((x << 8) & 0x00ff0000) | \ + ((x >> 8) & 0x0000ff00) | \ + ((x >> 24) & 0x000000ff)) + +#define bswap64(x) \ + (((x << 56) & 0xff00000000000000ULL) | \ + ((x << 40) & 0x00ff000000000000ULL) | \ + ((x << 24) & 0x0000ff0000000000ULL) | \ + ((x << 8) & 0x000000ff00000000ULL) | \ + ((x >> 8) & 0x00000000ff000000ULL) | \ + ((x >> 24) & 0x0000000000ff0000ULL) | \ + ((x >> 40) & 0x000000000000ff00ULL) | \ + ((x >> 56) & 0x00000000000000ffULL)) + +static inline int32_t +SWAPS32(int32_t x) +{ + return bswap32(x); +} + +static inline uint32_t +SWAPU32(uint32_t x) +{ + return bswap32(x); +} + +static inline int64_t +SWAPS64(int64_t x) +{ + return bswap64(x); +} + +static inline uint64_t +SWAPU64(uint64_t x) +{ + return bswap64(x); +} + +#else +# define SWAPS32(x) ((int32_t) __builtin_bswap32(x)) +# define SWAPU32(x) ((uint32_t) __builtin_bswap32(x)) +# define SWAPS64(x) ((int64_t) __builtin_bswap64(x)) +# define SWAPU64(x) ((uint64_t) __builtin_bswap64(x)) +#endif + +#if LONG_MAX > INT_MAX +# define SWAPSLONG SWAPS64 +# define SWAPULONG SWAPU64 +#else +# define SWAPSLONG SWAPS32 +# define SWAPULONG SWAPU32 +#endif + +NUM_OP(int8, int8_t, NUM2INT, INT2NUM, NOSWAP); +NUM_OP(uint8, uint8_t, NUM2UINT, UINT2NUM, NOSWAP); +NUM_OP(int16, int16_t, NUM2INT, INT2NUM, SWAPS16); +NUM_OP(uint16, uint16_t, NUM2UINT, UINT2NUM, SWAPU16); +NUM_OP(int32, int32_t, NUM2INT, INT2NUM, SWAPS32); +NUM_OP(uint32, uint32_t, NUM2UINT, UINT2NUM, SWAPU32); +NUM_OP(int64, int64_t, NUM2LL, LL2NUM, SWAPS64); +NUM_OP(uint64, uint64_t, NUM2ULL, ULL2NUM, SWAPU64); +NUM_OP(long, long, NUM2LONG, LONG2NUM, SWAPSLONG); +NUM_OP(ulong, unsigned long, NUM2ULONG, ULONG2NUM, SWAPULONG); +NUM_OP(float32, float, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(float64, double, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(longdouble, long double, rbffi_num2longdouble, rbffi_longdouble_new, NOSWAP); + +static inline void* +get_pointer_value(VALUE value) +{ + const int type = TYPE(value); + if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_PointerClass)) { + return memory_address(value); + } else if (type == T_NIL) { + return NULL; + } else if (type == T_FIXNUM) { + return (void *) (uintptr_t) FIX2ULONG(value); + } else if (type == T_BIGNUM) { + return (void *) (uintptr_t) NUM2ULL(value); + } else if (rb_respond_to(value, id_to_ptr)) { + return MEMORY_PTR(rb_funcall2(value, id_to_ptr, 0, NULL)); + } else { + rb_raise(rb_eArgError, "value is not a pointer"); + return NULL; + } +} + +NUM_OP(pointer, void *, get_pointer_value, rbffi_Pointer_NewInstance, NOSWAP); + +static inline uint8_t +rbffi_bool_value(VALUE value) +{ + return RTEST(value); +} + +static inline VALUE +rbffi_bool_new(uint8_t value) +{ + return (value & 1) != 0 ? Qtrue : Qfalse; +} + +NUM_OP(bool, unsigned char, rbffi_bool_value, rbffi_bool_new, NOSWAP); + + +/* + * call-seq: memory.clear + * Set the memory to all-zero. + * @return [self] + */ +static VALUE +memory_clear(VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + memset(ptr->address, 0, ptr->size); + return self; +} + +/* + * call-seq: memory.size + * Return memory size in bytes (alias: #total) + * @return [Numeric] + */ +static VALUE +memory_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return LONG2NUM(ptr->size); +} + +/* + * call-seq: memory.get(type, offset) + * Return data of given type contained in memory. + * @param [Symbol, Type] type_name type of data to get + * @param [Numeric] offset point in buffer to start from + * @return [Object] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_get(VALUE self, VALUE type_name, VALUE offset) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + return op->get(ptr, NUM2LONG(offset)); + +undefined_type: { + VALUE msg = rb_sprintf("undefined type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.put(type, offset, value) + * @param [Symbol, Type] type_name type of data to put + * @param [Numeric] offset point in buffer to start from + * @return [nil] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_put(VALUE self, VALUE type_name, VALUE offset, VALUE value) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + op->put(ptr, NUM2LONG(offset), value); + return Qnil; + +undefined_type: { + VALUE msg = rb_sprintf("unsupported type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.get_string(offset, length=nil) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_string(int argc, VALUE* argv, VALUE self) +{ + VALUE length = Qnil, offset = Qnil; + AbstractMemory* ptr = MEMORY(self); + long off, len; + char* end; + int nargs = rb_scan_args(argc, argv, "11", &offset, &length); + + off = NUM2LONG(offset); + len = nargs > 1 && length != Qnil ? NUM2LONG(length) : (ptr->size - off); + checkRead(ptr); + checkBounds(ptr, off, len); + + end = memchr(ptr->address + off, 0, len); + return rb_str_new((char *) ptr->address + off, + (end != NULL ? end - ptr->address - off : len)); +} + +/* + * call-seq: memory.get_array_of_string(offset, count=nil) + * Return an array of strings contained in memory. + * @param [Numeric] offset point in memory to start from + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE offset = Qnil, countnum = Qnil, retVal = Qnil; + AbstractMemory* ptr; + long off; + int count; + + rb_scan_args(argc, argv, "11", &offset, &countnum); + off = NUM2LONG(offset); + count = (countnum == Qnil ? 0 : NUM2INT(countnum)); + retVal = rb_ary_new2(count); + + Data_Get_Struct(self, AbstractMemory, ptr); + checkRead(ptr); + + if (countnum != Qnil) { + int i; + + checkBounds(ptr, off, count * sizeof (char*)); + + for (i = 0; i < count; ++i) { + const char* strptr = *((const char**) (ptr->address + off) + i); + rb_ary_push(retVal, (strptr == NULL ? Qnil : rb_str_new2(strptr))); + } + + } else { + checkBounds(ptr, off, sizeof (char*)); + for ( ; off < ptr->size - (long) sizeof (void *); off += (long) sizeof (void *)) { + const char* strptr = *(const char**) (ptr->address + off); + if (strptr == NULL) { + break; + } + rb_ary_push(retVal, rb_str_new2(strptr)); + } + } + + return retVal; +} + +/* + * call-seq: memory.read_array_of_string(count=nil) + * Return an array of strings contained in memory. Same as: + * memory.get_array_of_string(0, count) + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + */ +static VALUE +memory_read_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE* rargv = ALLOCA_N(VALUE, argc + 1); + int i; + + rargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + rargv[i + 1] = argv[i]; + } + + return memory_get_array_of_string(argc + 1, rargv, self); +} + + +/* + * call-seq: memory.put_string(offset, str) + * @param [Numeric] offset + * @param [String] str + * @return [self] + * @raise {SecurityError} when writing unsafe string to memory + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + * Put a string in memory. + */ +static VALUE +memory_put_string(VALUE self, VALUE offset, VALUE str) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + Check_Type(str, T_STRING); + off = NUM2LONG(offset); + len = RSTRING_LEN(str); + + checkWrite(ptr); + checkBounds(ptr, off, len + 1); + + memcpy(ptr->address + off, RSTRING_PTR(str), len); + *((char *) ptr->address + off + len) = '\0'; + + return self; +} + +/* + * call-seq: memory.get_bytes(offset, length) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_bytes(VALUE self, VALUE offset, VALUE length) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + off = NUM2LONG(offset); + len = NUM2LONG(length); + + checkRead(ptr); + checkBounds(ptr, off, len); + + return rb_str_new((char *) ptr->address + off, len); +} + +/* + * call-seq: memory.put_bytes(offset, str, index=0, length=nil) + * Put a string in memory. + * @param [Numeric] offset point in buffer to start from + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + * @raise {RangeError} if +index+ is negative, or if index+length is greater than size of string + * @raise {SecurityError} when writing unsafe string to memory + */ +static VALUE +memory_put_bytes(int argc, VALUE* argv, VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + VALUE offset = Qnil, str = Qnil, rbIndex = Qnil, rbLength = Qnil; + long off, len, idx; + int nargs = rb_scan_args(argc, argv, "22", &offset, &str, &rbIndex, &rbLength); + + Check_Type(str, T_STRING); + + off = NUM2LONG(offset); + idx = nargs > 2 ? NUM2LONG(rbIndex) : 0; + if (idx < 0) { + rb_raise(rb_eRangeError, "index cannot be less than zero"); + return Qnil; + } + len = nargs > 3 ? NUM2LONG(rbLength) : (RSTRING_LEN(str) - idx); + if ((idx + len) > RSTRING_LEN(str)) { + rb_raise(rb_eRangeError, "index+length is greater than size of string"); + return Qnil; + } + + checkWrite(ptr); + checkBounds(ptr, off, len); + + memcpy(ptr->address + off, RSTRING_PTR(str) + idx, len); + + return self; +} + +/* + * call-seq: memory.read_bytes(length) + * @param [Numeric] length of string to return + * @return [String] + * equivalent to : + * memory.get_bytes(0, length) + */ +static VALUE +memory_read_bytes(VALUE self, VALUE length) +{ + return memory_get_bytes(self, INT2FIX(0), length); +} + +/* + * call-seq: memory.write_bytes(str, index=0, length=nil) + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * equivalent to : + * memory.put_bytes(0, str, index, length) + */ +static VALUE +memory_write_bytes(int argc, VALUE* argv, VALUE self) +{ + VALUE* wargv = ALLOCA_N(VALUE, argc + 1); + int i; + + wargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + wargv[i + 1] = argv[i]; + } + + return memory_put_bytes(argc + 1, wargv, self); +} + +/* + * call-seq: memory.type_size + * @return [Numeric] type size in bytes + * Get the memory's type size. + */ +static VALUE +memory_type_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return INT2NUM(ptr->typeSize); +} + +/* + * Document-method: [] + * call-seq: memory[idx] + * @param [Numeric] idx index to access in memory + * @return + * Memory read accessor. + */ +static VALUE +memory_aref(VALUE self, VALUE idx) +{ + AbstractMemory* ptr; + VALUE rbOffset = Qnil; + + Data_Get_Struct(self, AbstractMemory, ptr); + + rbOffset = ULONG2NUM(NUM2ULONG(idx) * ptr->typeSize); + + return rb_funcall2(self, id_plus, 1, &rbOffset); +} + +static inline char* +memory_address(VALUE obj) +{ + return ((AbstractMemory *) DATA_PTR(obj))->address; +} + +static VALUE +memory_copy_from(VALUE self, VALUE rbsrc, VALUE rblen) +{ + AbstractMemory* dst; + + Data_Get_Struct(self, AbstractMemory, dst); + + memcpy(dst->address, rbffi_AbstractMemory_Cast(rbsrc, rbffi_AbstractMemoryClass)->address, NUM2INT(rblen)); + + return self; +} + +AbstractMemory* +rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass) +{ + if (rb_obj_is_kind_of(obj, klass)) { + AbstractMemory* memory; + Data_Get_Struct(obj, AbstractMemory, memory); + return memory; + } + + rb_raise(rb_eArgError, "Invalid Memory object"); + return NULL; +} + +void +rbffi_AbstractMemory_Error(AbstractMemory *mem, int op) +{ + VALUE rbErrorClass = mem->address == NULL ? NullPointerErrorClass : rb_eRuntimeError; + if (op == MEM_RD) { + rb_raise(rbErrorClass, "invalid memory read at address=%p", mem->address); + } else if (op == MEM_WR) { + rb_raise(rbErrorClass, "invalid memory write at address=%p", mem->address); + } else { + rb_raise(rbErrorClass, "invalid memory access at address=%p", mem->address); + } +} + +static VALUE +memory_op_get_strptr(AbstractMemory* ptr, long offset) +{ + void* tmp = NULL; + + if (ptr != NULL && ptr->address != NULL) { + checkRead(ptr); + checkBounds(ptr, offset, sizeof(tmp)); + memcpy(&tmp, ptr->address + offset, sizeof(tmp)); + } + + return tmp != NULL ? rb_str_new2(tmp) : Qnil; +} + +static void +memory_op_put_strptr(AbstractMemory* ptr, long offset, VALUE value) +{ + rb_raise(rb_eArgError, "Cannot set :string fields"); +} + +static MemoryOp memory_op_strptr = { memory_op_get_strptr, memory_op_put_strptr }; + + +MemoryOps rbffi_AbstractMemoryOps = { + &memory_op_int8, /*.int8 */ + &memory_op_uint8, /* .uint8 */ + &memory_op_int16, /* .int16 */ + &memory_op_uint16, /* .uint16 */ + &memory_op_int32, /* .int32 */ + &memory_op_uint32, /* .uint32 */ + &memory_op_int64, /* .int64 */ + &memory_op_uint64, /* .uint64 */ + &memory_op_long, /* .slong */ + &memory_op_ulong, /* .uslong */ + &memory_op_float32, /* .float32 */ + &memory_op_float64, /* .float64 */ + &memory_op_longdouble, /* .longdouble */ + &memory_op_pointer, /* .pointer */ + &memory_op_strptr, /* .strptr */ + &memory_op_bool /* .boolOp */ +}; + +void +rbffi_AbstractMemory_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::AbstractMemory + * + * {AbstractMemory} is the base class for many memory management classes such as {Buffer}. + * + * This class has a lot of methods to work with integers : + * * put_intsize(offset, value) + * * get_intsize(offset) + * * put_uintsize(offset, value) + * * get_uintsize(offset) + * * writeuintsize(value) + * * read_intsize + * * write_uintsize(value) + * * read_uintsize + * * put_array_of_intsize(offset, ary) + * * get_array_of_intsize(offset, length) + * * put_array_of_uintsize(offset, ary) + * * get_array_of_uintsize(offset, length) + * * write_array_of_intsize(ary) + * * read_array_of_intsize(length) + * * write_array_of_uintsize(ary) + * * read_array_of_uintsize(length) + * where _size_ is 8, 16, 32 or 64. Same methods exist for long type. + * + * Aliases exist : _char_ for _int8_, _short_ for _int16_, _int_ for _int32_ and long_long for _int64_. + * + * Others methods are listed below. + */ + VALUE classMemory = rb_define_class_under(moduleFFI, "AbstractMemory", rb_cObject); + rbffi_AbstractMemoryClass = classMemory; + /* + * Document-variable: FFI::AbstractMemory + */ + rb_global_variable(&rbffi_AbstractMemoryClass); + rb_define_alloc_func(classMemory, memory_allocate); + + NullPointerErrorClass = rb_define_class_under(moduleFFI, "NullPointerError", rb_eRuntimeError); + /* Document-variable: NullPointerError */ + rb_global_variable(&NullPointerErrorClass); + + +#undef INT +#define INT(type) \ + rb_define_method(classMemory, "put_" #type, memory_put_##type, 2); \ + rb_define_method(classMemory, "get_" #type, memory_get_##type, 1); \ + rb_define_method(classMemory, "put_u" #type, memory_put_u##type, 2); \ + rb_define_method(classMemory, "get_u" #type, memory_get_u##type, 1); \ + rb_define_method(classMemory, "write_" #type, memory_write_##type, 1); \ + rb_define_method(classMemory, "read_" #type, memory_read_##type, 0); \ + rb_define_method(classMemory, "write_u" #type, memory_write_u##type, 1); \ + rb_define_method(classMemory, "read_u" #type, memory_read_u##type, 0); \ + rb_define_method(classMemory, "put_array_of_" #type, memory_put_array_of_##type, 2); \ + rb_define_method(classMemory, "get_array_of_" #type, memory_get_array_of_##type, 2); \ + rb_define_method(classMemory, "put_array_of_u" #type, memory_put_array_of_u##type, 2); \ + rb_define_method(classMemory, "get_array_of_u" #type, memory_get_array_of_u##type, 2); \ + rb_define_method(classMemory, "write_array_of_" #type, memory_write_array_of_##type, 1); \ + rb_define_method(classMemory, "read_array_of_" #type, memory_read_array_of_##type, 1); \ + rb_define_method(classMemory, "write_array_of_u" #type, memory_write_array_of_u##type, 1); \ + rb_define_method(classMemory, "read_array_of_u" #type, memory_read_array_of_u##type, 1); + + INT(int8); + INT(int16); + INT(int32); + INT(int64); + INT(long); + +#define ALIAS(name, old) \ + rb_define_alias(classMemory, "put_" #name, "put_" #old); \ + rb_define_alias(classMemory, "get_" #name, "get_" #old); \ + rb_define_alias(classMemory, "put_u" #name, "put_u" #old); \ + rb_define_alias(classMemory, "get_u" #name, "get_u" #old); \ + rb_define_alias(classMemory, "write_" #name, "write_" #old); \ + rb_define_alias(classMemory, "read_" #name, "read_" #old); \ + rb_define_alias(classMemory, "write_u" #name, "write_u" #old); \ + rb_define_alias(classMemory, "read_u" #name, "read_u" #old); \ + rb_define_alias(classMemory, "put_array_of_" #name, "put_array_of_" #old); \ + rb_define_alias(classMemory, "get_array_of_" #name, "get_array_of_" #old); \ + rb_define_alias(classMemory, "put_array_of_u" #name, "put_array_of_u" #old); \ + rb_define_alias(classMemory, "get_array_of_u" #name, "get_array_of_u" #old); \ + rb_define_alias(classMemory, "write_array_of_" #name, "write_array_of_" #old); \ + rb_define_alias(classMemory, "read_array_of_" #name, "read_array_of_" #old); \ + rb_define_alias(classMemory, "write_array_of_u" #name, "write_array_of_u" #old); \ + rb_define_alias(classMemory, "read_array_of_u" #name, "read_array_of_u" #old); + + ALIAS(char, int8); + ALIAS(short, int16); + ALIAS(int, int32); + ALIAS(long_long, int64); + + /* + * Document-method: put_float32 + * call-seq: memory.put_float32offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 32-bit float in memory at offset +offset+ (alias: #put_float). + */ + rb_define_method(classMemory, "put_float32", memory_put_float32, 2); + /* + * Document-method: get_float32 + * call-seq: memory.get_float32(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 32-bit float from memory at offset +offset+ (alias: #get_float). + */ + rb_define_method(classMemory, "get_float32", memory_get_float32, 1); + rb_define_alias(classMemory, "put_float", "put_float32"); + rb_define_alias(classMemory, "get_float", "get_float32"); + /* + * Document-method: write_float + * call-seq: memory.write_float(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 32-bit float in memory. + * + * Same as: + * memory.put_float(0, value) + */ + rb_define_method(classMemory, "write_float", memory_write_float32, 1); + /* + * Document-method: read_float + * call-seq: memory.read_float + * @return [Float] + * Read a 32-bit float from memory. + * + * Same as: + * memory.get_float(0) + */ + rb_define_method(classMemory, "read_float", memory_read_float32, 0); + /* + * Document-method: put_array_of_float32 + * call-seq: memory.put_array_of_float32(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 32-bit floats in memory from offset +offset+ (alias: #put_array_of_float). + */ + rb_define_method(classMemory, "put_array_of_float32", memory_put_array_of_float32, 2); + /* + * Document-method: get_array_of_float32 + * call-seq: memory.get_array_of_float32(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 32-bit floats in memory from offset +offset+ (alias: #get_array_of_float). + */ + rb_define_method(classMemory, "get_array_of_float32", memory_get_array_of_float32, 2); + /* + * Document-method: write_array_of_float + * call-seq: memory.write_array_of_float(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 32-bit floats in memory. + * + * Same as: + * memory.put_array_of_float(0, ary) + */ + rb_define_method(classMemory, "write_array_of_float", memory_write_array_of_float32, 1); + /* + * Document-method: read_array_of_float + * call-seq: memory.read_array_of_float(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 32-bit floats from memory. + * + * Same as: + * memory.get_array_of_float(0, ary) + */ + rb_define_method(classMemory, "read_array_of_float", memory_read_array_of_float32, 1); + rb_define_alias(classMemory, "put_array_of_float", "put_array_of_float32"); + rb_define_alias(classMemory, "get_array_of_float", "get_array_of_float32"); + /* + * Document-method: put_float64 + * call-seq: memory.put_float64(offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 64-bit float (double) in memory at offset +offset+ (alias: #put_double). + */ + rb_define_method(classMemory, "put_float64", memory_put_float64, 2); + /* + * Document-method: get_float64 + * call-seq: memory.get_float64(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 64-bit float (double) from memory at offset +offset+ (alias: #get_double). + */ + rb_define_method(classMemory, "get_float64", memory_get_float64, 1); + rb_define_alias(classMemory, "put_double", "put_float64"); + rb_define_alias(classMemory, "get_double", "get_float64"); + /* + * Document-method: write_double + * call-seq: memory.write_double(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 64-bit float (double) in memory. + * + * Same as: + * memory.put_double(0, value) + */ + rb_define_method(classMemory, "write_double", memory_write_float64, 1); + /* + * Document-method: read_double + * call-seq: memory.read_double + * @return [Float] + * Read a 64-bit float (double) from memory. + * + * Same as: + * memory.get_double(0) + */ + rb_define_method(classMemory, "read_double", memory_read_float64, 0); + /* + * Document-method: put_array_of_float64 + * call-seq: memory.put_array_of_float64(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 64-bit floats (doubles) in memory from offset +offset+ (alias: #put_array_of_double). + */ + rb_define_method(classMemory, "put_array_of_float64", memory_put_array_of_float64, 2); + /* + * Document-method: get_array_of_float64 + * call-seq: memory.get_array_of_float64(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 64-bit floats (doubles) in memory from offset +offset+ (alias: #get_array_of_double). + */ + rb_define_method(classMemory, "get_array_of_float64", memory_get_array_of_float64, 2); + /* + * Document-method: write_array_of_double + * call-seq: memory.write_array_of_double(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 64-bit floats (doubles) in memory. + * + * Same as: + * memory.put_array_of_double(0, ary) + */ + rb_define_method(classMemory, "write_array_of_double", memory_write_array_of_float64, 1); + /* + * Document-method: read_array_of_double + * call-seq: memory.read_array_of_double(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 64-bit floats (doubles) from memory. + * + * Same as: + * memory.get_array_of_double(0, ary) + */ + rb_define_method(classMemory, "read_array_of_double", memory_read_array_of_float64, 1); + rb_define_alias(classMemory, "put_array_of_double", "put_array_of_float64"); + rb_define_alias(classMemory, "get_array_of_double", "get_array_of_float64"); + /* + * Document-method: put_pointer + * call-seq: memory.put_pointer(offset, value) + * @param [Numeric] offset + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Put +value+ in memory from +offset+.. + */ + rb_define_method(classMemory, "put_pointer", memory_put_pointer, 2); + /* + * Document-method: get_pointer + * call-seq: memory.get_pointer(offset) + * @param [Numeric] offset + * @return [Pointer] + * Get a {Pointer} to the memory from +offset+. + */ + rb_define_method(classMemory, "get_pointer", memory_get_pointer, 1); + /* + * Document-method: write_pointer + * call-seq: memory.write_pointer(value) + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Write +value+ in memory. + * + * Equivalent to: + * memory.put_pointer(0, value) + */ + rb_define_method(classMemory, "write_pointer", memory_write_pointer, 1); + /* + * Document-method: read_pointer + * call-seq: memory.read_pointer + * @return [Pointer] + * Get a {Pointer} to the memory from base address. + * + * Equivalent to: + * memory.get_pointer(0) + */ + rb_define_method(classMemory, "read_pointer", memory_read_pointer, 0); + /* + * Document-method: put_array_of_pointer + * call-seq: memory.put_array_of_pointer(offset, ary) + * @param [Numeric] offset + * @param [Array<#to_ptr>] ary + * @return [self] + * Put an array of {Pointer} into memory from +offset+. + */ + rb_define_method(classMemory, "put_array_of_pointer", memory_put_array_of_pointer, 2); + /* + * Document-method: get_array_of_pointer + * call-seq: memory.get_array_of_pointer(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Array] + * Get an array of {Pointer} of length +length+ from +offset+. + */ + rb_define_method(classMemory, "get_array_of_pointer", memory_get_array_of_pointer, 2); + /* + * Document-method: write_array_of_pointer + * call-seq: memory.write_array_of_pointer(ary) + * @param [Array<#to_ptr>] ary + * @return [self] + * Write an array of {Pointer} into memory from +offset+. + * + * Same as : + * memory.put_array_of_pointer(0, ary) + */ + rb_define_method(classMemory, "write_array_of_pointer", memory_write_array_of_pointer, 1); + /* + * Document-method: read_array_of_pointer + * call-seq: memory.read_array_of_pointer(length) + * @param [Numeric] length + * @return [Array] + * Read an array of {Pointer} of length +length+. + * + * Same as: + * memory.get_array_of_pointer(0, length) + */ + rb_define_method(classMemory, "read_array_of_pointer", memory_read_array_of_pointer, 1); + + rb_define_method(classMemory, "get_string", memory_get_string, -1); + rb_define_method(classMemory, "put_string", memory_put_string, 2); + rb_define_method(classMemory, "get_bytes", memory_get_bytes, 2); + rb_define_method(classMemory, "put_bytes", memory_put_bytes, -1); + rb_define_method(classMemory, "read_bytes", memory_read_bytes, 1); + rb_define_method(classMemory, "write_bytes", memory_write_bytes, -1); + rb_define_method(classMemory, "get_array_of_string", memory_get_array_of_string, -1); + + rb_define_method(classMemory, "get", memory_get, 2); + rb_define_method(classMemory, "put", memory_put, 3); + + rb_define_method(classMemory, "clear", memory_clear, 0); + rb_define_method(classMemory, "total", memory_size, 0); + rb_define_alias(classMemory, "size", "total"); + rb_define_method(classMemory, "type_size", memory_type_size, 0); + rb_define_method(classMemory, "[]", memory_aref, 1); + rb_define_method(classMemory, "__copy_from__", memory_copy_from, 2); + + id_to_ptr = rb_intern("to_ptr"); + id_call = rb_intern("call"); + id_plus = rb_intern("+"); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h new file mode 100644 index 0000000..1119288 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ABSTRACTMEMORY_H +#define RBFFI_ABSTRACTMEMORY_H + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _MSC_VER +#include +#endif + +#include "compat.h" +#include "Types.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#define MEM_RD 0x01 +#define MEM_WR 0x02 +#define MEM_CODE 0x04 +#define MEM_SWAP 0x08 +#define MEM_EMBED 0x10 + +typedef struct AbstractMemory_ AbstractMemory; + +typedef struct { + VALUE (*get)(AbstractMemory* ptr, long offset); + void (*put)(AbstractMemory* ptr, long offset, VALUE value); +} MemoryOp; + +typedef struct { + MemoryOp* int8; + MemoryOp* uint8; + MemoryOp* int16; + MemoryOp* uint16; + MemoryOp* int32; + MemoryOp* uint32; + MemoryOp* int64; + MemoryOp* uint64; + MemoryOp* slong; + MemoryOp* uslong; + MemoryOp* float32; + MemoryOp* float64; + MemoryOp* longdouble; + MemoryOp* pointer; + MemoryOp* strptr; + MemoryOp* boolOp; +} MemoryOps; + +struct AbstractMemory_ { + char* address; /* Use char* instead of void* to ensure adding to it works correctly */ + long size; + int flags; + int typeSize; +}; + + +extern VALUE rbffi_AbstractMemoryClass; +extern MemoryOps rbffi_AbstractMemoryOps; + +extern void rbffi_AbstractMemory_Init(VALUE ffiModule); + +extern AbstractMemory* rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass); + +extern void rbffi_AbstractMemory_Error(AbstractMemory *, int op); + +static inline void +checkBounds(AbstractMemory* mem, long off, long len) +{ + if (unlikely((off | len | (off + len) | (mem->size - (off + len))) < 0)) { + rb_raise(rb_eIndexError, "Memory access offset=%ld size=%ld is out of bounds", + off, len); + } +} + +static inline void +checkRead(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_RD) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_RD); + } +} + +static inline void +checkWrite(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_WR) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_WR); + } +} + +static inline MemoryOp* +get_memory_op(Type* type) +{ + switch (type->nativeType) { + case NATIVE_INT8: + return rbffi_AbstractMemoryOps.int8; + case NATIVE_UINT8: + return rbffi_AbstractMemoryOps.uint8; + case NATIVE_INT16: + return rbffi_AbstractMemoryOps.int16; + case NATIVE_UINT16: + return rbffi_AbstractMemoryOps.uint16; + case NATIVE_INT32: + return rbffi_AbstractMemoryOps.int32; + case NATIVE_UINT32: + return rbffi_AbstractMemoryOps.uint32; + case NATIVE_INT64: + return rbffi_AbstractMemoryOps.int64; + case NATIVE_UINT64: + return rbffi_AbstractMemoryOps.uint64; + case NATIVE_LONG: + return rbffi_AbstractMemoryOps.slong; + case NATIVE_ULONG: + return rbffi_AbstractMemoryOps.uslong; + case NATIVE_FLOAT32: + return rbffi_AbstractMemoryOps.float32; + case NATIVE_FLOAT64: + return rbffi_AbstractMemoryOps.float64; + case NATIVE_LONGDOUBLE: + return rbffi_AbstractMemoryOps.longdouble; + case NATIVE_POINTER: + return rbffi_AbstractMemoryOps.pointer; + case NATIVE_STRING: + return rbffi_AbstractMemoryOps.strptr; + case NATIVE_BOOL: + return rbffi_AbstractMemoryOps.boolOp; + default: + return NULL; + } +} + +#define MEMORY(obj) rbffi_AbstractMemory_Cast((obj), rbffi_AbstractMemoryClass) +#define MEMORY_PTR(obj) MEMORY((obj))->address +#define MEMORY_LEN(obj) MEMORY((obj))->size + + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ABSTRACTMEMORY_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c new file mode 100644 index 0000000..bfd666a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "ArrayType.h" + +static VALUE array_type_s_allocate(VALUE klass); +static VALUE array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength); +static void array_type_mark(ArrayType *); +static void array_type_free(ArrayType *); + +VALUE rbffi_ArrayTypeClass = Qnil; + +static VALUE +array_type_s_allocate(VALUE klass) +{ + ArrayType* array; + VALUE obj; + + obj = Data_Make_Struct(klass, ArrayType, array_type_mark, array_type_free, array); + + array->base.nativeType = NATIVE_ARRAY; + array->base.ffiType = xcalloc(1, sizeof(*array->base.ffiType)); + array->base.ffiType->type = FFI_TYPE_STRUCT; + array->base.ffiType->size = 0; + array->base.ffiType->alignment = 0; + array->rbComponentType = Qnil; + + return obj; +} + +static void +array_type_mark(ArrayType *array) +{ + rb_gc_mark(array->rbComponentType); +} + +static void +array_type_free(ArrayType *array) +{ + xfree(array->base.ffiType); + xfree(array->ffiTypes); + xfree(array); +} + + +/* + * call-seq: initialize(component_type, length) + * @param [Type] component_type + * @param [Numeric] length + * @return [self] + * A new instance of ArrayType. + */ +static VALUE +array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength) +{ + ArrayType* array; + int i; + + Data_Get_Struct(self, ArrayType, array); + + array->length = NUM2UINT(rbLength); + array->rbComponentType = rbComponentType; + Data_Get_Struct(rbComponentType, Type, array->componentType); + + array->ffiTypes = xcalloc(array->length + 1, sizeof(*array->ffiTypes)); + array->base.ffiType->elements = array->ffiTypes; + array->base.ffiType->size = array->componentType->ffiType->size * array->length; + array->base.ffiType->alignment = array->componentType->ffiType->alignment; + + for (i = 0; i < array->length; ++i) { + array->ffiTypes[i] = array->componentType->ffiType; + } + + return self; +} + +/* + * call-seq: length + * @return [Numeric] + * Get array's length + */ +static VALUE +array_type_length(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return UINT2NUM(array->length); +} + +/* + * call-seq: element_type + * @return [Type] + * Get element type. + */ +static VALUE +array_type_element_type(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return array->rbComponentType; +} + +void +rbffi_ArrayType_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::ArrayType < FFI::Type + * + * This is a typed array. The type is a {NativeType native type}. + */ + rbffi_ArrayTypeClass = rb_define_class_under(moduleFFI, "ArrayType", ffi_Type); + /* + * Document-variable: FFI::ArrayType + */ + rb_global_variable(&rbffi_ArrayTypeClass); + /* + * Document-constant: FFI::Type::Array + */ + rb_define_const(ffi_Type, "Array", rbffi_ArrayTypeClass); + + rb_define_alloc_func(rbffi_ArrayTypeClass, array_type_s_allocate); + rb_define_method(rbffi_ArrayTypeClass, "initialize", array_type_initialize, 2); + rb_define_method(rbffi_ArrayTypeClass, "length", array_type_length, 0); + rb_define_method(rbffi_ArrayTypeClass, "elem_type", array_type_element_type, 0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h new file mode 100644 index 0000000..356ffb1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ARRAYTYPE_H +#define RBFFI_ARRAYTYPE_H + +#include +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct ArrayType_ { + Type base; + int length; + ffi_type** ffiTypes; + Type* componentType; + VALUE rbComponentType; +} ArrayType; + +extern void rbffi_ArrayType_Init(VALUE moduleFFI); +extern VALUE rbffi_ArrayTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ARRAYTYPE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c new file mode 100644 index 0000000..6863dda --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" + +#define BUFFER_EMBED_MAXLEN (8) +typedef struct Buffer { + AbstractMemory memory; + + union { + VALUE rbParent; /* link to parent buffer */ + char* storage; /* start of malloc area */ + long embed[BUFFER_EMBED_MAXLEN / sizeof(long)]; /* storage for tiny allocations */ + } data; +} Buffer; + +static VALUE buffer_allocate(VALUE klass); +static VALUE buffer_initialize(int argc, VALUE* argv, VALUE self); +static void buffer_release(Buffer* ptr); +static void buffer_mark(Buffer* ptr); +static VALUE buffer_free(VALUE self); + +static VALUE BufferClass = Qnil; + +static VALUE +buffer_allocate(VALUE klass) +{ + Buffer* buffer; + VALUE obj; + + obj = Data_Make_Struct(klass, Buffer, NULL, buffer_release, buffer); + buffer->data.rbParent = Qnil; + buffer->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +static void +buffer_release(Buffer* ptr) +{ + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + xfree(ptr); +} + +/* + * call-seq: initialize(size, count=1, clear=false) + * @param [Integer, Symbol, #size] Type or size in bytes of a buffer cell + * @param [Fixnum] count number of cell in the Buffer + * @param [Boolean] clear if true, set the buffer to all-zero + * @return [self] + * @raise {NoMemoryError} if failed to allocate memory for Buffer + * A new instance of Buffer. + */ +static VALUE +buffer_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbSize = Qnil, rbCount = Qnil, rbClear = Qnil; + Buffer* p; + int nargs; + + Data_Get_Struct(self, Buffer, p); + + nargs = rb_scan_args(argc, argv, "12", &rbSize, &rbCount, &rbClear); + p->memory.typeSize = rbffi_type_size(rbSize); + p->memory.size = p->memory.typeSize * (nargs > 1 ? NUM2LONG(rbCount) : 1); + + if (p->memory.size > BUFFER_EMBED_MAXLEN) { + p->data.storage = xmalloc(p->memory.size + 7); + if (p->data.storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%lu bytes", p->memory.size); + return Qnil; + } + + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (void *) (((uintptr_t) p->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + + if (p->memory.size > 0 && (nargs < 3 || RTEST(rbClear))) { + memset(p->memory.address, 0, p->memory.size); + } + + } else { + p->memory.flags |= MEM_EMBED; + p->memory.address = (void *) &p->data.embed[0]; + } + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, buffer_free, self); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [self] + * DO NOT CALL THIS METHOD. + */ +static VALUE +buffer_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Buffer* dst; + + Data_Get_Struct(self, Buffer, dst); + src = rbffi_AbstractMemory_Cast(other, BufferClass); + if ((dst->memory.flags & MEM_EMBED) == 0 && dst->data.storage != NULL) { + xfree(dst->data.storage); + } + dst->data.storage = xmalloc(src->size + 7); + if (dst->data.storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->memory.address = (void *) (((uintptr_t) dst->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual buffer contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +buffer_alloc_inout(int argc, VALUE* argv, VALUE klass) +{ + return buffer_initialize(argc, argv, buffer_allocate(klass)); +} + +static VALUE +slice(VALUE self, long offset, long len) +{ + Buffer* ptr; + Buffer* result; + VALUE obj = Qnil; + + Data_Get_Struct(self, Buffer, ptr); + checkBounds(&ptr->memory, offset, len); + + obj = Data_Make_Struct(BufferClass, Buffer, buffer_mark, -1, result); + result->memory.address = ptr->memory.address + offset; + result->memory.size = len; + result->memory.flags = ptr->memory.flags; + result->memory.typeSize = ptr->memory.typeSize; + result->data.rbParent = self; + + return obj; +} + +/* + * call-seq: + offset + * @param [Numeric] offset + * @return [Buffer] a new instance of Buffer pointing from offset until end of previous buffer. + * Add a Buffer with an offset + */ +static VALUE +buffer_plus(VALUE self, VALUE rbOffset) +{ + Buffer* ptr; + long offset = NUM2LONG(rbOffset); + + Data_Get_Struct(self, Buffer, ptr); + + return slice(self, offset, ptr->memory.size - offset); +} + +/* + * call-seq: slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Buffer] a new instance of Buffer + * Slice an existing Buffer. + */ +static VALUE +buffer_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect a Buffer. + */ +static VALUE +buffer_inspect(VALUE self) +{ + char tmp[100]; + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + + snprintf(tmp, sizeof(tmp), "#", ptr, ptr->memory.address, ptr->memory.size); + + return rb_str_new2(tmp); +} + + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Set or get endianness of Buffer. + * @overload order + * @return [:big, :little] + * Get endianness of Buffer. + * @overload order(order) + * @param [:big, :little, :network] order + * @return [self] + * Set endianness of Buffer (+:network+ is an alias for +:big+). + */ +static VALUE +buffer_order(int argc, VALUE* argv, VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } + } + if (order != BYTE_ORDER) { + Buffer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Buffer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + +/* Only used to free the buffer if the yield in the initializer throws an exception */ +static VALUE +buffer_free(VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + return self; +} + +static void +buffer_mark(Buffer* ptr) +{ + rb_gc_mark(ptr->data.rbParent); +} + +void +rbffi_Buffer_Init(VALUE moduleFFI) +{ + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Buffer < FFI::AbstractMemory + * + * A Buffer is a function argument type. It should be use with functions playing with C arrays. + */ + BufferClass = rb_define_class_under(moduleFFI, "Buffer", ffi_AbstractMemory); + + /* + * Document-variable: FFI::Buffer + */ + rb_global_variable(&BufferClass); + rb_define_alloc_func(BufferClass, buffer_allocate); + + /* + * Document-method: alloc_inout + * call-seq: alloc_inout(*args) + * Create a new Buffer for in and out arguments (alias : new_inout). + */ + rb_define_singleton_method(BufferClass, "alloc_inout", buffer_alloc_inout, -1); + /* + * Document-method: alloc_out + * call-seq: alloc_out(*args) + * Create a new Buffer for out arguments (alias : new_out). + */ + rb_define_singleton_method(BufferClass, "alloc_out", buffer_alloc_inout, -1); + /* + * Document-method: alloc_in + * call-seq: alloc_in(*args) + * Create a new Buffer for in arguments (alias : new_in). + */ + rb_define_singleton_method(BufferClass, "alloc_in", buffer_alloc_inout, -1); + rb_define_alias(rb_singleton_class(BufferClass), "new_in", "alloc_in"); + rb_define_alias(rb_singleton_class(BufferClass), "new_out", "alloc_out"); + rb_define_alias(rb_singleton_class(BufferClass), "new_inout", "alloc_inout"); + + rb_define_method(BufferClass, "initialize", buffer_initialize, -1); + rb_define_method(BufferClass, "initialize_copy", buffer_initialize_copy, 1); + rb_define_method(BufferClass, "order", buffer_order, -1); + rb_define_method(BufferClass, "inspect", buffer_inspect, 0); + rb_define_alias(BufferClass, "length", "total"); + rb_define_method(BufferClass, "+", buffer_plus, 1); + rb_define_method(BufferClass, "slice", buffer_slice, 2); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.c new file mode 100644 index 0000000..8db60f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.c @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +# include +# include +#endif +#include +#include "extconf.h" +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Function.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" + +#ifdef USE_RAW +# ifndef __i386__ +# error "RAW argument packing only supported on i386" +# endif + +#define INT8_ADJ (4) +#define INT16_ADJ (4) +#define INT32_ADJ (4) +#define INT64_ADJ (8) +#define LONG_ADJ (sizeof(long)) +#define FLOAT32_ADJ (4) +#define FLOAT64_ADJ (8) +#define ADDRESS_ADJ (sizeof(void *)) +#define LONGDOUBLE_ADJ (ffi_type_longdouble.alignment) + +#endif /* USE_RAW */ + +#ifdef USE_RAW +# define ADJ(p, a) ((p) = (FFIStorage*) (((char *) p) + a##_ADJ)) +#else +# define ADJ(p, a) (++(p)) +#endif + +static void* callback_param(VALUE proc, VALUE cbinfo); +static inline void* getPointer(VALUE value, int type); + +static ID id_to_ptr, id_map_symbol, id_to_native; + +void +rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums) +{ + VALUE callbackProc = Qnil; + FFIStorage* param = ¶mStorage[0]; + int i, argidx, cbidx, argCount; + + if (unlikely(paramCount != -1 && paramCount != argc)) { + if (argc == (paramCount - 1) && callbackCount == 1 && rb_block_given_p()) { + callbackProc = rb_block_proc(); + } else { + rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)", argc, paramCount); + } + } + + argCount = paramCount != -1 ? paramCount : argc; + + for (i = 0, argidx = 0, cbidx = 0; i < argCount; ++i) { + Type* paramType = paramTypes[i]; + int type; + + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { argv[argidx], Qnil }; + argv[argidx] = rb_funcall2(((MappedType *) paramType)->rbConverter, id_to_native, 2, values); + paramType = ((MappedType *) paramType)->type; + } + + type = argidx < argc ? TYPE(argv[argidx]) : T_NONE; + ffiValues[i] = param; + + switch (paramType->nativeType) { + + case NATIVE_INT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s8 = NUM2INT(value); + } else { + param->s8 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT8); + break; + + case NATIVE_INT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s16 = NUM2INT(value); + + } else { + param->s16 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT16); + break; + + case NATIVE_INT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s32 = NUM2INT(value); + + } else { + param->s32 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT32); + break; + + case NATIVE_BOOL: + if (type != T_TRUE && type != T_FALSE) { + rb_raise(rb_eTypeError, "wrong argument type (expected a boolean parameter)"); + } + param->s8 = argv[argidx++] == Qtrue; + ADJ(param, INT8); + break; + + case NATIVE_UINT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u8 = NUM2UINT(value); + } else { + param->u8 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT8); + ++argidx; + break; + + case NATIVE_UINT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u16 = NUM2UINT(value); + } else { + param->u16 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT16); + ++argidx; + break; + + case NATIVE_UINT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u32 = NUM2UINT(value); + } else { + param->u32 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT32); + ++argidx; + break; + + case NATIVE_INT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->i64 = NUM2LL(value); + } else { + param->i64 = NUM2LL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_UINT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u64 = NUM2ULL(value); + } else { + param->u64 = NUM2ULL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_LONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_sarg *) param = NUM2LONG(value); + } else { + *(ffi_sarg *) param = NUM2LONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_ULONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_arg *) param = NUM2ULONG(value); + } else { + *(ffi_arg *) param = NUM2ULONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_FLOAT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f32 = (float) NUM2DBL(value); + } else { + param->f32 = (float) NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT32); + ++argidx; + break; + + case NATIVE_FLOAT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f64 = NUM2DBL(value); + } else { + param->f64 = NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT64); + ++argidx; + break; + + case NATIVE_LONGDOUBLE: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->ld = rbffi_num2longdouble(value); + } else { + param->ld = rbffi_num2longdouble(argv[argidx]); + } + + ADJ(param, LONGDOUBLE); + ++argidx; + break; + + + case NATIVE_STRING: + if (type == T_NIL) { + param->ptr = NULL; + + } else { + param->ptr = StringValueCStr(argv[argidx]); + } + + ADJ(param, ADDRESS); + ++argidx; + break; + + case NATIVE_POINTER: + case NATIVE_BUFFER_IN: + case NATIVE_BUFFER_OUT: + case NATIVE_BUFFER_INOUT: + param->ptr = getPointer(argv[argidx++], type); + ADJ(param, ADDRESS); + break; + + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + if (callbackProc != Qnil) { + param->ptr = callback_param(callbackProc, callbackParameters[cbidx++]); + } else { + param->ptr = callback_param(argv[argidx], callbackParameters[cbidx++]); + ++argidx; + } + ADJ(param, ADDRESS); + break; + + case NATIVE_STRUCT: + ffiValues[i] = getPointer(argv[argidx++], type); + break; + + default: + rb_raise(rb_eArgError, "Invalid parameter type: %d", paramType->nativeType); + } + } +} + +static void * +call_blocking_function(void* data) +{ + rbffi_blocking_call_t* b = (rbffi_blocking_call_t *) data; + ffi_call(&b->cif, FFI_FN(b->function), b->retval, b->ffiValues); + + return NULL; +} + +VALUE +rbffi_do_blocking_call(VALUE data) +{ + rb_thread_call_without_gvl(call_blocking_function, (void*)data, (rb_unblock_function_t *) -1, NULL); + + return Qnil; +} + +VALUE +rbffi_save_frame_exception(VALUE data, VALUE exc) +{ + rbffi_frame_t* frame = (rbffi_frame_t *) data; + frame->exc = exc; + return Qnil; +} + +VALUE +rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo) +{ + void* retval; + void** ffiValues; + FFIStorage* params; + VALUE rbReturnValue; + rbffi_frame_t frame = { 0 }; + + retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG)); + + if (unlikely(fnInfo->blocking)) { + rbffi_blocking_call_t* bc; + + /* allocate information passed to the blocking function on the stack */ + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->cif = fnInfo->ffi_cif; + bc->function = function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + rbffi_frame_pop(&frame); + + } else { + + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + ffi_call(&fnInfo->ffi_cif, FFI_FN(function), retval, ffiValues); + rbffi_frame_pop(&frame); + } + + if (unlikely(!fnInfo->ignoreErrno)) { + rbffi_save_errno(); + } + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + RB_GC_GUARD(rbReturnValue) = rbffi_NativeValue_ToRuby(fnInfo->returnType, fnInfo->rbReturnType, retval); + RB_GC_GUARD(fnInfo->rbReturnType); + + return rbReturnValue; +} + +static inline void* +getPointer(VALUE value, int type) +{ + if (likely(type == T_DATA && rb_obj_is_kind_of(value, rbffi_AbstractMemoryClass))) { + + return ((AbstractMemory *) DATA_PTR(value))->address; + + } else if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_StructClass)) { + + AbstractMemory* memory = ((Struct *) DATA_PTR(value))->pointer; + return memory != NULL ? memory->address : NULL; + + } else if (type == T_STRING) { + + return StringValuePtr(value); + + } else if (type == T_NIL) { + + return NULL; + + } else if (rb_respond_to(value, id_to_ptr)) { + + VALUE ptr = rb_funcall2(value, id_to_ptr, 0, NULL); + if (rb_obj_is_kind_of(ptr, rbffi_AbstractMemoryClass) && TYPE(ptr) == T_DATA) { + return ((AbstractMemory *) DATA_PTR(ptr))->address; + } + rb_raise(rb_eArgError, "to_ptr returned an invalid pointer"); + } + + rb_raise(rb_eArgError, ":pointer argument is not a valid pointer"); + return NULL; +} + +Invoker +rbffi_GetInvoker(FunctionType *fnInfo) +{ + return rbffi_CallFunction; +} + + +static void* +callback_param(VALUE proc, VALUE cbInfo) +{ + VALUE callback ; + if (unlikely(proc == Qnil)) { + return NULL ; + } + + /* Handle Function pointers here */ + if (rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + AbstractMemory* ptr; + Data_Get_Struct(proc, AbstractMemory, ptr); + return ptr->address; + } + + callback = rbffi_Function_ForProc(cbInfo, proc); + RB_GC_GUARD(callback); + + return ((AbstractMemory *) DATA_PTR(callback))->address; +} + + +void +rbffi_Call_Init(VALUE moduleFFI) +{ + id_to_ptr = rb_intern("to_ptr"); + id_to_native = rb_intern("to_native"); + id_map_symbol = rb_intern("__map_symbol"); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.h new file mode 100644 index 0000000..b892d85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Call.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_CALL_H +#define RBFFI_CALL_H + +#include "Thread.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__i386__) && \ + (defined(HAVE_RAW_API) || defined(USE_INTERNAL_LIBFFI)) && \ + !defined(_WIN32) && !defined(__WIN32__) +# define USE_RAW +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && !(defined(_WIN32) || defined(__WIN32__)) +# define BYPASS_FFI 1 +#endif + +typedef union { +#ifdef USE_RAW + signed int s8, s16, s32; + unsigned int u8, u16, u32; +#else + signed char s8; + unsigned char u8; + signed short s16; + unsigned short u16; + signed int s32; + unsigned int u32; +#endif + signed long long i64; + unsigned long long u64; + signed long sl; + unsigned long ul; + void* ptr; + float f32; + double f64; + long double ld; +} FFIStorage; + +extern void rbffi_Call_Init(VALUE moduleFFI); + +extern void rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums); + +struct FunctionType_; +extern VALUE rbffi_CallFunction(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +typedef VALUE (*Invoker)(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +Invoker rbffi_GetInvoker(struct FunctionType_* fnInfo); + +extern VALUE rbffi_GetEnumValue(VALUE enums, VALUE value); +extern int rbffi_GetSignedIntValue(VALUE value, int type, int minValue, int maxValue, const char* typeName, VALUE enums); + +typedef struct rbffi_blocking_call { + rbffi_frame_t* frame; + void* function; + ffi_cif cif; + void **ffiValues; + void* retval; + void* params; +} rbffi_blocking_call_t; + +VALUE rbffi_do_blocking_call(VALUE data); +VALUE rbffi_save_frame_exception(VALUE data, VALUE exc); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_CALL_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c new file mode 100644 index 0000000..45411b4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#endif +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include + +#if defined(_MSC_VER) && !defined(INT8_MIN) +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" + +#include "ClosurePool.h" + + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +typedef struct Memory { + void* code; + void* data; + struct Memory* next; +} Memory; + +struct ClosurePool_ { + void* ctx; + int closureSize; + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize); + struct Memory* blocks; /* Keeps track of all the allocated memory for this pool */ + Closure* list; + long refcnt; +}; + +static long pageSize; + +static void* allocatePage(void); +static bool freePage(void *); +static bool protectPage(void *); + +ClosurePool* +rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx) +{ + ClosurePool* pool; + + pool = xcalloc(1, sizeof(*pool)); + pool->closureSize = closureSize; + pool->ctx = ctx; + pool->prep = prep; + pool->refcnt = 1; + + return pool; +} + +void +cleanup_closure_pool(ClosurePool* pool) +{ + Memory* memory; + + for (memory = pool->blocks; memory != NULL; ) { + Memory* next = memory->next; + freePage(memory->code); + free(memory->data); + free(memory); + memory = next; + } + xfree(pool); +} + +void +rbffi_ClosurePool_Free(ClosurePool* pool) +{ + if (pool != NULL) { + long refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +Closure* +rbffi_Closure_Alloc(ClosurePool* pool) +{ + Closure *list = NULL; + Memory* block = NULL; + void *code = NULL; + char errmsg[256]; + int nclosures; + long trampolineSize; + int i; + + if (pool->list != NULL) { + Closure* closure = pool->list; + pool->list = pool->list->next; + pool->refcnt++; + + return closure; + } + + trampolineSize = roundup(pool->closureSize, 8); + nclosures = (int) (pageSize / trampolineSize); + block = calloc(1, sizeof(*block)); + list = calloc(nclosures, sizeof(*list)); + code = allocatePage(); + + if (block == NULL || list == NULL || code == NULL) { + snprintf(errmsg, sizeof(errmsg), "failed to allocate a page. errno=%d (%s)", errno, strerror(errno)); + goto error; + } + + for (i = 0; i < nclosures; ++i) { + Closure* closure = &list[i]; + closure->next = &list[i + 1]; + closure->pool = pool; + closure->code = ((char *)code + (i * trampolineSize)); + + if (!(*pool->prep)(pool->ctx, closure->code, closure, errmsg, sizeof(errmsg))) { + goto error; + } + } + + if (!protectPage(code)) { + goto error; + } + + /* Track the allocated page + Closure memory area */ + block->data = list; + block->code = code; + block->next = pool->blocks; + pool->blocks = block; + + /* Thread the new block onto the free list, apart from the first one. */ + list[nclosures - 1].next = pool->list; + pool->list = list->next; + pool->refcnt++; + + /* Use the first one as the new handle */ + return list; + +error: + free(block); + free(list); + if (code != NULL) { + freePage(code); + } + + + rb_raise(rb_eRuntimeError, "%s", errmsg); + return NULL; +} + +void +rbffi_Closure_Free(Closure* closure) +{ + if (closure != NULL) { + ClosurePool* pool = closure->pool; + long refcnt; + /* Just push it on the front of the free list */ + closure->next = pool->list; + pool->list = closure; + refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +void* +rbffi_Closure_CodeAddress(Closure* handle) +{ + return handle->code; +} + + +static long +getPageSize() +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +static void* +allocatePage(void) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualAlloc(NULL, pageSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + void *page = mmap(NULL, pageSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + return (page != (void *) -1) ? page : NULL; +#endif +} + +static bool +freePage(void *addr) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualFree(addr, 0, MEM_RELEASE); +#else + return munmap(addr, pageSize) == 0; +#endif +} + +static bool +protectPage(void* page) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + DWORD oldProtect; + return VirtualProtect(page, pageSize, PAGE_EXECUTE_READ, &oldProtect); +#else + return mprotect(page, pageSize, PROT_READ | PROT_EXEC) == 0; +#endif +} + +void +rbffi_ClosurePool_Init(VALUE module) +{ + pageSize = getPageSize(); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h new file mode 100644 index 0000000..b842375 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RUBYFFI_CLOSUREPOOL_H +#define RUBYFFI_CLOSUREPOOL_H + +typedef struct ClosurePool_ ClosurePool; +typedef struct Closure_ Closure; + +struct Closure_ { + void* info; /* opaque handle for storing closure-instance specific data */ + void* function; /* closure-instance specific function, called by custom trampoline */ + void* code; /* The native trampoline code location */ + struct ClosurePool_* pool; + Closure* next; +}; + +void rbffi_ClosurePool_Init(VALUE module); + +ClosurePool* rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx); + +void rbffi_ClosurePool_Free(ClosurePool *); + +Closure* rbffi_Closure_Alloc(ClosurePool *); +void rbffi_Closure_Free(Closure *); + +void* rbffi_Closure_GetCodeAddress(Closure *); + +#endif /* RUBYFFI_CLOSUREPOOL_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c new file mode 100644 index 0000000..c717aec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#ifndef _MSC_VER +# include +#endif +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +# include +# define _WINSOCKAPI_ +# include +# include +#else +# include +#endif +#include +#if defined(_MSC_VER) && !defined(INT8_MIN) +# include "win32/stdint.h" +#endif + +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "DynamicLibrary.h" + +typedef struct LibrarySymbol_ { + Pointer base; + VALUE library; + VALUE name; +} LibrarySymbol; + +static VALUE library_initialize(VALUE self, VALUE libname, VALUE libflags); +static void library_free(Library* lib); + + +static VALUE symbol_allocate(VALUE klass); +static VALUE symbol_new(VALUE library, void* address, VALUE name); +static void symbol_mark(LibrarySymbol* sym); + +static VALUE LibraryClass = Qnil, SymbolClass = Qnil; + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* dl_open(const char* name, int flags); +static void dl_error(char* buf, int size); +#define dl_sym(handle, name) GetProcAddress(handle, name) +#define dl_close(handle) FreeLibrary(handle) +#else +# define dl_open(name, flags) dlopen(name, flags != 0 ? flags : RTLD_LAZY) +# define dl_error(buf, size) do { snprintf(buf, size, "%s", dlerror()); } while(0) +# define dl_sym(handle, name) dlsym(handle, name) +# define dl_close(handle) dlclose(handle) +#endif + +static VALUE +library_allocate(VALUE klass) +{ + Library* library; + return Data_Make_Struct(klass, Library, NULL, library_free, library); +} + +/* + * call-seq: DynamicLibrary.open(libname, libflags) + * @param libname (see #initialize) + * @param libflags (see #initialize) + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * Open a library. + */ +static VALUE +library_open(VALUE klass, VALUE libname, VALUE libflags) +{ + return library_initialize(library_allocate(klass), libname, libflags); +} + +/* + * call-seq: initialize(libname, libflags) + * @param [String] libname name of library to open + * @param [Fixnum] libflags flags for library to open + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * A new DynamicLibrary instance. + */ +static VALUE +library_initialize(VALUE self, VALUE libname, VALUE libflags) +{ + Library* library; + int flags; + + Check_Type(libflags, T_FIXNUM); + + Data_Get_Struct(self, Library, library); + flags = libflags != Qnil ? NUM2UINT(libflags) : 0; + + library->handle = dl_open(libname != Qnil ? StringValueCStr(libname) : NULL, flags); + if (library->handle == NULL) { + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + rb_raise(rb_eLoadError, "Could not open library '%s': %s", + libname != Qnil ? StringValueCStr(libname) : "[current process]", + errmsg); + } +#ifdef __CYGWIN__ + // On Cygwin 1.7.17 "dlsym(dlopen(0,0), 'getpid')" fails. (dlerror: "No such process") + // As a workaround we can use "dlsym(RTLD_DEFAULT, 'getpid')" instead. + // Since 0 == RTLD_DEFAULT we won't call dl_close later. + if (libname == Qnil) { + dl_close(library->handle); + library->handle = RTLD_DEFAULT; + } +#endif + rb_iv_set(self, "@name", libname != Qnil ? libname : rb_str_new2("[current process]")); + return self; +} + +static VALUE +library_dlsym(VALUE self, VALUE name) +{ + Library* library; + void* address = NULL; + Check_Type(name, T_STRING); + + Data_Get_Struct(self, Library, library); + address = dl_sym(library->handle, StringValueCStr(name)); + + return address != NULL ? symbol_new(self, address, name) : Qnil; +} + +/* + * call-seq: last_error + * @return [String] library's last error string + */ +static VALUE +library_dlerror(VALUE self) +{ + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + return rb_str_new2(errmsg); +} + +static void +library_free(Library* library) +{ + /* dlclose() on MacOS tends to segfault - avoid it */ +#ifndef __APPLE__ + if (library->handle != NULL) { + dl_close(library->handle); + } +#endif + xfree(library); +} + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* +dl_open(const char* name, int flags) +{ + if (name == NULL) { + return GetModuleHandle(NULL); + } else { + DWORD dwFlags = PathIsRelativeA(name) ? 0 : LOAD_WITH_ALTERED_SEARCH_PATH; + return LoadLibraryExA(name, NULL, dwFlags); + } +} + +static void +dl_error(char* buf, int size) +{ + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), + 0, buf, size, NULL); +} +#endif + +static VALUE +symbol_allocate(VALUE klass) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(klass, LibrarySymbol, NULL, -1, sym); + sym->name = Qnil; + sym->library = Qnil; + sym->base.rbParent = Qnil; + + return obj; +} + + +/* + * call-seq: initialize_copy(other) + * @param [Object] other + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +symbol_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate symbol"); + return Qnil; +} + +static VALUE +symbol_new(VALUE library, void* address, VALUE name) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(SymbolClass, LibrarySymbol, symbol_mark, -1, sym); + + sym->base.memory.address = address; + sym->base.memory.size = LONG_MAX; + sym->base.memory.typeSize = 1; + sym->base.memory.flags = MEM_RD | MEM_WR; + sym->library = library; + sym->name = name; + + return obj; +} + +static void +symbol_mark(LibrarySymbol* sym) +{ + rb_gc_mark(sym->library); + rb_gc_mark(sym->name); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect. + */ +static VALUE +symbol_inspect(VALUE self) +{ + LibrarySymbol* sym; + char buf[256]; + + Data_Get_Struct(self, LibrarySymbol, sym); + snprintf(buf, sizeof(buf), "#", + StringValueCStr(sym->name), sym->base.memory.address); + return rb_str_new2(buf); +} + +void +rbffi_DynamicLibrary_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::DynamicLibrary + */ + LibraryClass = rb_define_class_under(moduleFFI, "DynamicLibrary", rb_cObject); + rb_global_variable(&LibraryClass); + /* + * Document-class: FFI::DynamicLibrary::Symbol < FFI::Pointer + * + * An instance of this class represents a library symbol. It may be a {Pointer pointer} to + * a function or to a variable. + */ + SymbolClass = rb_define_class_under(LibraryClass, "Symbol", rbffi_PointerClass); + rb_global_variable(&SymbolClass); + + /* + * Document-const: FFI::NativeLibrary + * Backward compatibility for FFI::DynamicLibrary + */ + rb_define_const(moduleFFI, "NativeLibrary", LibraryClass); /* backwards compat library */ + rb_define_alloc_func(LibraryClass, library_allocate); + rb_define_singleton_method(LibraryClass, "open", library_open, 2); + rb_define_singleton_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_method(LibraryClass, "initialize", library_initialize, 2); + /* + * Document-method: find_symbol + * call-seq: find_symbol(name) + * @param [String] name library symbol's name + * @return [FFI::DynamicLibrary::Symbol] library symbol + */ + rb_define_method(LibraryClass, "find_symbol", library_dlsym, 1); + /* + * Document-method: find_function + * call-seq: find_function(name) + * @param [String] name library function's name + * @return [FFI::DynamicLibrary::Symbol] library function symbol + */ + rb_define_method(LibraryClass, "find_function", library_dlsym, 1); + /* + * Document-method: find_variable + * call-seq: find_variable(name) + * @param [String] name library variable's name + * @return [FFI::DynamicLibrary::Symbol] library variable symbol + */ + rb_define_method(LibraryClass, "find_variable", library_dlsym, 1); + rb_define_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_attr(LibraryClass, "name", 1, 0); + + rb_define_alloc_func(SymbolClass, symbol_allocate); + rb_undef_method(SymbolClass, "new"); + rb_define_method(SymbolClass, "inspect", symbol_inspect, 0); + rb_define_method(SymbolClass, "initialize_copy", symbol_initialize_copy, 1); + +#define DEF(x) rb_define_const(LibraryClass, "RTLD_" #x, UINT2NUM(RTLD_##x)) + DEF(LAZY); + DEF(NOW); + DEF(GLOBAL); + DEF(LOCAL); + DEF(NOLOAD); + DEF(NODELETE); + DEF(FIRST); + DEF(DEEPBIND); + DEF(MEMBER); + DEF(BINDING_MASK); + DEF(LOCATION_MASK); + DEF(ALL_MASK); +#undef DEF + +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h new file mode 100644 index 0000000..97bf7bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIBRARY_H +#define _LIBRARY_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* if these aren't defined (eg. windows), we need sensible defaults */ +#ifndef RTLD_LAZY +#define RTLD_LAZY 1 +#endif + +#ifndef RTLD_NOW +#define RTLD_NOW 2 +#endif + +#ifndef RTLD_LOCAL +#define RTLD_LOCAL 4 +#endif + +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 8 +#endif + +/* If these aren't defined, they're not supported so define as 0 */ +#ifndef RTLD_NOLOAD +#define RTLD_NOLOAD 0 +#endif + +#ifndef RTLD_NODELETE +#define RTLD_NODELETE 0 +#endif + +#ifndef RTLD_FIRST +#define RTLD_FIRST 0 +#endif + +#ifndef RTLD_DEEPBIND +#define RTLD_DEEPBIND 0 +#endif + +#ifndef RTLD_MEMBER +#define RTLD_MEMBER 0 +#endif + +/* convenience */ +#ifndef RTLD_BINDING_MASK +#define RTLD_BINDING_MASK (RTLD_LAZY | RTLD_NOW) +#endif + +#ifndef RTLD_LOCATION_MASK +#define RTLD_LOCATION_MASK (RTLD_LOCAL | RTLD_GLOBAL) +#endif + +#ifndef RTLD_ALL_MASK +#define RTLD_ALL_MASK (RTLD_BINDING_MASK | RTLD_LOCATION_MASK | RTLD_NOLOAD | RTLD_NODELETE | RTLD_FIRST | RTLD_DEEPBIND | RTLD_MEMBER) +#endif + +typedef struct Library { + void* handle; +} Library; + +extern void rbffi_DynamicLibrary_Init(VALUE ffiModule); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBRARY_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.c new file mode 100644 index 0000000..506d79b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.c @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2009-2011 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +# include +#endif + +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# if !defined(INT8_MIN) +# include "win32/stdint.h" +# endif +#endif +#include +#include + +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +#include +#endif +#include + +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Platform.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" +#include "MethodHandle.h" +#include "Function.h" + +typedef struct Function_ { + Pointer base; + FunctionType* info; + MethodHandle* methodHandle; + bool autorelease; + Closure* closure; + VALUE rbProc; + VALUE rbFunctionInfo; +} Function; + +static void function_mark(Function *); +static void function_free(Function *); +static VALUE function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc); +static void callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data); +static bool callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static void* callback_with_gvl(void* data); +static VALUE invoke_callback(VALUE data); +static VALUE save_callback_exception(VALUE data, VALUE exc); + +#define DEFER_ASYNC_CALLBACK 1 + + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_event(void *); +static VALUE async_cb_call(void *); +#endif + +extern int ruby_thread_has_gvl_p(void); +extern int ruby_native_thread_p(void); + +VALUE rbffi_FunctionClass = Qnil; + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_thread = Qnil; +#endif + +static ID id_call = 0, id_to_native = 0, id_from_native = 0, id_cbtable = 0, id_cb_ref = 0; + +struct gvl_callback { + Closure* closure; + void* retval; + void** parameters; + bool done; + rbffi_frame_t *frame; +#if defined(DEFER_ASYNC_CALLBACK) + struct gvl_callback* next; +# ifndef _WIN32 + pthread_cond_t async_cond; + pthread_mutex_t async_mutex; +# else + HANDLE async_event; +# endif +#endif +}; + + +#if defined(DEFER_ASYNC_CALLBACK) +static struct gvl_callback* async_cb_list = NULL; +# ifndef _WIN32 + static pthread_mutex_t async_cb_mutex = PTHREAD_MUTEX_INITIALIZER; + static pthread_cond_t async_cb_cond = PTHREAD_COND_INITIALIZER; +# else + static HANDLE async_cb_cond; + static CRITICAL_SECTION async_cb_lock; +# endif +#endif + + +static VALUE +function_allocate(VALUE klass) +{ + Function *fn; + VALUE obj; + + obj = Data_Make_Struct(klass, Function, function_mark, function_free, fn); + + fn->base.memory.flags = MEM_RD; + fn->base.rbParent = Qnil; + fn->rbProc = Qnil; + fn->rbFunctionInfo = Qnil; + fn->autorelease = true; + + return obj; +} + +static void +function_mark(Function *fn) +{ + rb_gc_mark(fn->base.rbParent); + rb_gc_mark(fn->rbProc); + rb_gc_mark(fn->rbFunctionInfo); +} + +static void +function_free(Function *fn) +{ + if (fn->methodHandle != NULL) { + rbffi_MethodHandle_Free(fn->methodHandle); + } + + if (fn->closure != NULL && fn->autorelease) { + rbffi_Closure_Free(fn->closure); + } + + xfree(fn); +} + +/* + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options see {FFI::FunctionType} for available options + * @return [self] + * A new Function instance. + * + * Define a function from a Proc or a block. + * + * @overload initialize(return_type, param_types, options = {}) { |i| ... } + * @yieldparam i parameters for the function + * @overload initialize(return_type, param_types, proc, options = {}) + * @param [Proc] proc + */ +static VALUE +function_initialize(int argc, VALUE* argv, VALUE self) +{ + + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbProc = Qnil, rbOptions = Qnil; + VALUE rbFunctionInfo = Qnil; + VALUE infoArgv[3]; + int nargs; + + nargs = rb_scan_args(argc, argv, "22", &rbReturnType, &rbParamTypes, &rbProc, &rbOptions); + + /* + * Callback with block, + * e.g. Function.new(:int, [ :int ]) { |i| blah } + * or Function.new(:int, [ :int ], { :convention => :stdcall }) { |i| blah } + */ + if (rb_block_given_p()) { + if (nargs > 3) { + rb_raise(rb_eArgError, "cannot create function with both proc/address and block"); + } + rbOptions = rbProc; + rbProc = rb_block_proc(); + } else { + /* Callback with proc, or Function with address + * e.g. Function.new(:int, [ :int ], Proc.new { |i| }) + * Function.new(:int, [ :int ], Proc.new { |i| }, { :convention => :stdcall }) + * Function.new(:int, [ :int ], addr) + * Function.new(:int, [ :int ], addr, { :convention => :stdcall }) + */ + } + + infoArgv[0] = rbReturnType; + infoArgv[1] = rbParamTypes; + infoArgv[2] = rbOptions; + rbFunctionInfo = rb_class_new_instance(rbOptions != Qnil ? 3 : 2, infoArgv, rbffi_FunctionTypeClass); + + function_init(self, rbFunctionInfo, rbProc); + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +function_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate function instances"); + return Qnil; +} + +VALUE +rbffi_Function_NewInstance(VALUE rbFunctionInfo, VALUE rbProc) +{ + return function_init(function_allocate(rbffi_FunctionClass), rbFunctionInfo, rbProc); +} + +VALUE +rbffi_Function_ForProc(VALUE rbFunctionInfo, VALUE proc) +{ + VALUE callback, cbref, cbTable; + Function* fp; + + cbref = RTEST(rb_ivar_defined(proc, id_cb_ref)) ? rb_ivar_get(proc, id_cb_ref) : Qnil; + /* If the first callback reference has the same function function signature, use it */ + if (cbref != Qnil && CLASS_OF(cbref) == rbffi_FunctionClass) { + Data_Get_Struct(cbref, Function, fp); + if (fp->rbFunctionInfo == rbFunctionInfo) { + return cbref; + } + } + + cbTable = RTEST(rb_ivar_defined(proc, id_cbtable)) ? rb_ivar_get(proc, id_cbtable) : Qnil; + if (cbTable != Qnil && (callback = rb_hash_aref(cbTable, rbFunctionInfo)) != Qnil) { + return callback; + } + + /* No existing function for the proc with that signature, create a new one and cache it */ + callback = rbffi_Function_NewInstance(rbFunctionInfo, proc); + if (cbref == Qnil) { + /* If there is no other cb already cached for this proc, we can use the ivar slot */ + rb_ivar_set(proc, id_cb_ref, callback); + } else { + /* The proc instance has been used as more than one type of callback, store extras in a hash */ + cbTable = rb_hash_new(); + rb_ivar_set(proc, id_cbtable, cbTable); + rb_hash_aset(cbTable, rbFunctionInfo, callback); + } + + return callback; +} + +static VALUE +function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc) +{ + Function* fn = NULL; + + Data_Get_Struct(self, Function, fn); + + fn->rbFunctionInfo = rbFunctionInfo; + + Data_Get_Struct(fn->rbFunctionInfo, FunctionType, fn->info); + + if (rb_obj_is_kind_of(rbProc, rbffi_PointerClass)) { + Pointer* orig; + Data_Get_Struct(rbProc, Pointer, orig); + fn->base.memory = orig->memory; + fn->base.rbParent = rbProc; + + } else if (rb_obj_is_kind_of(rbProc, rb_cProc) || rb_respond_to(rbProc, id_call)) { + if (fn->info->closurePool == NULL) { + fn->info->closurePool = rbffi_ClosurePool_New(sizeof(ffi_closure), callback_prep, fn->info); + if (fn->info->closurePool == NULL) { + rb_raise(rb_eNoMemError, "failed to create closure pool"); + } + } + +#if defined(DEFER_ASYNC_CALLBACK) + if (async_cb_thread == Qnil) { + async_cb_thread = rb_thread_create(async_cb_event, NULL); + } +#endif + + fn->closure = rbffi_Closure_Alloc(fn->info->closurePool); + fn->closure->info = fn; + fn->base.memory.address = fn->closure->code; + fn->base.memory.size = sizeof(*fn->closure); + fn->autorelease = true; + + } else { + rb_raise(rb_eTypeError, "wrong argument type %s, expected pointer or proc", + rb_obj_classname(rbProc)); + } + + fn->rbProc = rbProc; + + return self; +} + +/* + * call-seq: call(*args) + * @param [Array] args function arguments + * @return [FFI::Type] + * Call the function + */ +static VALUE +function_call(int argc, VALUE* argv, VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return (*fn->info->invoke)(argc, argv, fn->base.memory.address, fn->info); +} + +/* + * call-seq: attach(m, name) + * @param [Module] m + * @param [String] name + * @return [self] + * Attach a Function to the Module +m+ as +name+. + */ +static VALUE +function_attach(VALUE self, VALUE module, VALUE name) +{ + Function* fn; + char var[1024]; + + Data_Get_Struct(self, Function, fn); + + if (fn->info->parameterCount == -1) { + rb_raise(rb_eRuntimeError, "cannot attach variadic functions"); + return Qnil; + } + + if (!rb_obj_is_kind_of(module, rb_cModule)) { + rb_raise(rb_eRuntimeError, "trying to attach function to non-module"); + return Qnil; + } + + if (fn->methodHandle == NULL) { + fn->methodHandle = rbffi_MethodHandle_Alloc(fn->info, fn->base.memory.address); + } + + /* + * Stash the Function in a module variable so it does not get garbage collected + */ + snprintf(var, sizeof(var), "@@%s", StringValueCStr(name)); + rb_cv_set(module, var, self); + + rb_define_singleton_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + + rb_define_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + return self; +} + +/* + * call-seq: autorelease = autorelease + * @param [Boolean] autorelease + * @return [self] + * Set +autorelease+ attribute (See {Pointer}). + */ +static VALUE +function_set_autorelease(VALUE self, VALUE autorelease) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + fn->autorelease = RTEST(autorelease); + + return self; +} + +static VALUE +function_autorelease_p(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return fn->autorelease ? Qtrue : Qfalse; +} + +/* + * call-seq: free + * @return [self] + * Free memory allocated by Function. + */ +static VALUE +function_release(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + if (fn->closure == NULL) { + rb_raise(rb_eRuntimeError, "cannot free function which was not allocated"); + } + + rbffi_Closure_Free(fn->closure); + fn->closure = NULL; + + return self; +} + +static void +callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data) +{ + struct gvl_callback cb = { 0 }; + + cb.closure = (Closure *) user_data; + cb.retval = retval; + cb.parameters = parameters; + cb.done = false; + cb.frame = rbffi_frame_current(); + + if (cb.frame != NULL) cb.frame->exc = Qnil; + + if (ruby_native_thread_p()) { + if(ruby_thread_has_gvl_p()) { + callback_with_gvl(&cb); + } else { + rb_thread_call_with_gvl(callback_with_gvl, &cb); + } +#if defined(DEFER_ASYNC_CALLBACK) && !defined(_WIN32) + } else { + bool empty = false; + + pthread_mutex_init(&cb.async_mutex, NULL); + pthread_cond_init(&cb.async_cond, NULL); + + /* Now signal the async callback thread */ + pthread_mutex_lock(&async_cb_mutex); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); + + /* Wait for the thread executing the ruby callback to signal it is done */ + pthread_mutex_lock(&cb.async_mutex); + while (!cb.done) { + pthread_cond_wait(&cb.async_cond, &cb.async_mutex); + } + pthread_mutex_unlock(&cb.async_mutex); + pthread_cond_destroy(&cb.async_cond); + pthread_mutex_destroy(&cb.async_mutex); + +#elif defined(DEFER_ASYNC_CALLBACK) && defined(_WIN32) + } else { + bool empty = false; + + cb.async_event = CreateEvent(NULL, FALSE, FALSE, NULL); + + /* Now signal the async callback thread */ + EnterCriticalSection(&async_cb_lock); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + LeaveCriticalSection(&async_cb_lock); + + SetEvent(async_cb_cond); + + /* Wait for the thread executing the ruby callback to signal it is done */ + WaitForSingleObject(cb.async_event, INFINITE); + CloseHandle(cb.async_event); +#endif + } +} + +#if defined(DEFER_ASYNC_CALLBACK) +struct async_wait { + void* cb; + bool stop; +}; + +static void * async_cb_wait(void *); +static void async_cb_stop(void *); + +static VALUE +async_cb_event(void* unused) +{ + struct async_wait w = { 0 }; + + w.stop = false; + while (!w.stop) { + rb_thread_call_without_gvl(async_cb_wait, &w, async_cb_stop, &w); + if (w.cb != NULL) { + /* Start up a new ruby thread to run the ruby callback */ + rb_thread_create(async_cb_call, w.cb); + } + } + + return Qnil; +} + +#ifdef _WIN32 +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + EnterCriticalSection(&async_cb_lock); + + while (!w->stop && async_cb_list == NULL) { + LeaveCriticalSection(&async_cb_lock); + WaitForSingleObject(async_cb_cond, INFINITE); + EnterCriticalSection(&async_cb_lock); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + LeaveCriticalSection(&async_cb_lock); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + EnterCriticalSection(&async_cb_lock); + w->stop = true; + LeaveCriticalSection(&async_cb_lock); + SetEvent(async_cb_cond); +} + +#else +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + pthread_mutex_lock(&async_cb_mutex); + + while (!w->stop && async_cb_list == NULL) { + pthread_cond_wait(&async_cb_cond, &async_cb_mutex); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + pthread_mutex_unlock(&async_cb_mutex); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + pthread_mutex_lock(&async_cb_mutex); + w->stop = true; + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); +} +#endif + +static VALUE +async_cb_call(void *data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + callback_with_gvl(data); + + /* Signal the original native thread that the ruby code has completed */ +#ifdef _WIN32 + SetEvent(cb->async_event); +#else + pthread_mutex_lock(&cb->async_mutex); + cb->done = true; + pthread_cond_signal(&cb->async_cond); + pthread_mutex_unlock(&cb->async_mutex); +#endif + + return Qnil; +} + +#endif + +static void * +callback_with_gvl(void* data) +{ + rb_rescue2(invoke_callback, (VALUE) data, save_callback_exception, (VALUE) data, rb_eException, (VALUE) 0); + return NULL; +} + +static VALUE +invoke_callback(VALUE data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + Function* fn = (Function *) cb->closure->info; + FunctionType *cbInfo = fn->info; + Type* returnType = cbInfo->returnType; + void* retval = cb->retval; + void** parameters = cb->parameters; + VALUE* rbParams; + VALUE rbReturnType = cbInfo->rbReturnType; + VALUE rbReturnValue; + int i; + + rbParams = ALLOCA_N(VALUE, cbInfo->parameterCount); + for (i = 0; i < cbInfo->parameterCount; ++i) { + VALUE param; + Type* paramType = cbInfo->parameterTypes[i]; + VALUE rbParamType = rb_ary_entry(cbInfo->rbParameterTypes, i); + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + rbParamType = ((MappedType *) paramType)->rbType; + paramType = ((MappedType *) paramType)->type; + } + + switch (paramType->nativeType) { + case NATIVE_INT8: + param = INT2NUM(*(int8_t *) parameters[i]); + break; + case NATIVE_UINT8: + param = UINT2NUM(*(uint8_t *) parameters[i]); + break; + case NATIVE_INT16: + param = INT2NUM(*(int16_t *) parameters[i]); + break; + case NATIVE_UINT16: + param = UINT2NUM(*(uint16_t *) parameters[i]); + break; + case NATIVE_INT32: + param = INT2NUM(*(int32_t *) parameters[i]); + break; + case NATIVE_UINT32: + param = UINT2NUM(*(uint32_t *) parameters[i]); + break; + case NATIVE_INT64: + param = LL2NUM(*(int64_t *) parameters[i]); + break; + case NATIVE_UINT64: + param = ULL2NUM(*(uint64_t *) parameters[i]); + break; + case NATIVE_LONG: + param = LONG2NUM(*(long *) parameters[i]); + break; + case NATIVE_ULONG: + param = ULONG2NUM(*(unsigned long *) parameters[i]); + break; + case NATIVE_FLOAT32: + param = rb_float_new(*(float *) parameters[i]); + break; + case NATIVE_FLOAT64: + param = rb_float_new(*(double *) parameters[i]); + break; + case NATIVE_LONGDOUBLE: + param = rbffi_longdouble_new(*(long double *) parameters[i]); + break; + case NATIVE_STRING: + param = (*(void **) parameters[i] != NULL) ? rb_str_new2(*(char **) parameters[i]) : Qnil; + break; + case NATIVE_POINTER: + param = rbffi_Pointer_NewInstance(*(void **) parameters[i]); + break; + case NATIVE_BOOL: + param = (*(uint8_t *) parameters[i]) ? Qtrue : Qfalse; + break; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + case NATIVE_STRUCT: + param = rbffi_NativeValue_ToRuby(paramType, rbParamType, parameters[i]); + break; + + default: + param = Qnil; + break; + } + + /* Convert the native value into a custom ruby value */ + if (unlikely(cbInfo->parameterTypes[i]->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { param, Qnil }; + param = rb_funcall2(((MappedType *) cbInfo->parameterTypes[i])->rbConverter, id_from_native, 2, values); + } + + rbParams[i] = param; + } + + rbReturnValue = rb_funcall2(fn->rbProc, id_call, cbInfo->parameterCount, rbParams); + + if (unlikely(returnType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { rbReturnValue, Qnil }; + rbReturnValue = rb_funcall2(((MappedType *) returnType)->rbConverter, id_to_native, 2, values); + rbReturnType = ((MappedType *) returnType)->rbType; + returnType = ((MappedType* ) returnType)->type; + } + + if (rbReturnValue == Qnil || TYPE(rbReturnValue) == T_NIL) { + memset(retval, 0, returnType->ffiType->size); + } else switch (returnType->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + *((ffi_sarg *) retval) = NUM2INT(rbReturnValue); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + *((ffi_arg *) retval) = NUM2UINT(rbReturnValue); + break; + case NATIVE_INT64: + *((int64_t *) retval) = NUM2LL(rbReturnValue); + break; + case NATIVE_UINT64: + *((uint64_t *) retval) = NUM2ULL(rbReturnValue); + break; + case NATIVE_LONG: + *((ffi_sarg *) retval) = NUM2LONG(rbReturnValue); + break; + case NATIVE_ULONG: + *((ffi_arg *) retval) = NUM2ULONG(rbReturnValue); + break; + case NATIVE_FLOAT32: + *((float *) retval) = (float) NUM2DBL(rbReturnValue); + break; + case NATIVE_FLOAT64: + *((double *) retval) = NUM2DBL(rbReturnValue); + break; + case NATIVE_LONGDOUBLE: + *((long double *) retval) = rbffi_num2longdouble(rbReturnValue); + break; + case NATIVE_POINTER: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + } else { + /* Default to returning NULL if not a value pointer object. handles nil case as well */ + *((void **) retval) = NULL; + } + break; + + case NATIVE_BOOL: + *((ffi_arg *) retval) = rbReturnValue == Qtrue; + break; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + + } else if (rb_obj_is_kind_of(rbReturnValue, rb_cProc) || rb_respond_to(rbReturnValue, id_call)) { + VALUE function; + + function = rbffi_Function_ForProc(rbReturnType, rbReturnValue); + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(function))->address; + } else { + *((void **) retval) = NULL; + } + break; + + case NATIVE_STRUCT: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_StructClass)) { + AbstractMemory* memory = ((Struct *) DATA_PTR(rbReturnValue))->pointer; + + if (memory->address != NULL) { + memcpy(retval, memory->address, returnType->ffiType->size); + + } else { + memset(retval, 0, returnType->ffiType->size); + } + + } else { + memset(retval, 0, returnType->ffiType->size); + } + break; + + default: + *((ffi_arg *) retval) = 0; + break; + } + + return Qnil; +} + +static VALUE +save_callback_exception(VALUE data, VALUE exc) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + memset(cb->retval, 0, ((Function *) cb->closure->info)->info->returnType->ffiType->size); + if (cb->frame != NULL) cb->frame->exc = exc; + + return Qnil; +} + +static bool +callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + FunctionType* fnInfo = (FunctionType *) ctx; + ffi_status ffiStatus; + + ffiStatus = ffi_prep_closure_loc(code, &fnInfo->ffi_cif, callback_invoke, closure, code); + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + +void +rbffi_Function_Init(VALUE moduleFFI) +{ + rbffi_FunctionInfo_Init(moduleFFI); + /* + * Document-class: FFI::Function < FFI::Pointer + */ + rbffi_FunctionClass = rb_define_class_under(moduleFFI, "Function", rbffi_PointerClass); + + rb_global_variable(&rbffi_FunctionClass); + rb_define_alloc_func(rbffi_FunctionClass, function_allocate); + + rb_define_method(rbffi_FunctionClass, "initialize", function_initialize, -1); + rb_define_method(rbffi_FunctionClass, "initialize_copy", function_initialize_copy, 1); + rb_define_method(rbffi_FunctionClass, "call", function_call, -1); + rb_define_method(rbffi_FunctionClass, "attach", function_attach, 2); + rb_define_method(rbffi_FunctionClass, "free", function_release, 0); + rb_define_method(rbffi_FunctionClass, "autorelease=", function_set_autorelease, 1); + /* + * call-seq: autorelease + * @return [Boolean] + * Get +autorelease+ attribute. + * Synonymous for {#autorelease?}. + */ + rb_define_method(rbffi_FunctionClass, "autorelease", function_autorelease_p, 0); + /* + * call-seq: autorelease? + * @return [Boolean] +autorelease+ attribute + * Get +autorelease+ attribute. + */ + rb_define_method(rbffi_FunctionClass, "autorelease?", function_autorelease_p, 0); + + id_call = rb_intern("call"); + id_cbtable = rb_intern("@__ffi_callback_table__"); + id_cb_ref = rb_intern("@__ffi_callback__"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); +#if defined(_WIN32) + InitializeCriticalSection(&async_cb_lock); + async_cb_cond = CreateEvent(NULL, FALSE, FALSE, NULL); +#endif +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.h new file mode 100644 index 0000000..052aaf6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Function.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_FUNCTION_H +#define RBFFI_FUNCTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif + +#include + +typedef struct FunctionType_ FunctionType; + +#include "Type.h" +#include "Call.h" +#include "ClosurePool.h" + +struct FunctionType_ { + Type type; /* The native type of a FunctionInfo object */ + VALUE rbReturnType; + VALUE rbParameterTypes; + + Type* returnType; + Type** parameterTypes; + NativeType* nativeParameterTypes; + ffi_type* ffiReturnType; + ffi_type** ffiParameterTypes; + ffi_cif ffi_cif; + Invoker invoke; + ClosurePool* closurePool; + int parameterCount; + int flags; + ffi_abi abi; + int callbackCount; + VALUE* callbackParameters; + VALUE rbEnums; + bool ignoreErrno; + bool blocking; + bool hasStruct; +}; + +extern VALUE rbffi_FunctionTypeClass, rbffi_FunctionClass; + +void rbffi_Function_Init(VALUE moduleFFI); +VALUE rbffi_Function_NewInstance(VALUE functionInfo, VALUE proc); +VALUE rbffi_Function_ForProc(VALUE cbInfo, VALUE proc); +void rbffi_FunctionInfo_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_FUNCTION_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c new file mode 100644 index 0000000..b7b23a7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +#endif + +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Types.h" +#include "Type.h" +#include "StructByValue.h" +#include "Function.h" + +static VALUE fntype_allocate(VALUE klass); +static VALUE fntype_initialize(int argc, VALUE* argv, VALUE self); +static void fntype_mark(FunctionType*); +static void fntype_free(FunctionType *); + +VALUE rbffi_FunctionTypeClass = Qnil; + +static VALUE +fntype_allocate(VALUE klass) +{ + FunctionType* fnInfo; + VALUE obj = Data_Make_Struct(klass, FunctionType, fntype_mark, fntype_free, fnInfo); + + fnInfo->type.ffiType = &ffi_type_pointer; + fnInfo->type.nativeType = NATIVE_FUNCTION; + fnInfo->rbReturnType = Qnil; + fnInfo->rbParameterTypes = Qnil; + fnInfo->rbEnums = Qnil; + fnInfo->invoke = rbffi_CallFunction; + fnInfo->closurePool = NULL; + + return obj; +} + +static void +fntype_mark(FunctionType* fnInfo) +{ + rb_gc_mark(fnInfo->rbReturnType); + rb_gc_mark(fnInfo->rbParameterTypes); + rb_gc_mark(fnInfo->rbEnums); + if (fnInfo->callbackCount > 0 && fnInfo->callbackParameters != NULL) { + rb_gc_mark_locations(&fnInfo->callbackParameters[0], &fnInfo->callbackParameters[fnInfo->callbackCount]); + } +} + +static void +fntype_free(FunctionType* fnInfo) +{ + xfree(fnInfo->parameterTypes); + xfree(fnInfo->ffiParameterTypes); + xfree(fnInfo->nativeParameterTypes); + xfree(fnInfo->callbackParameters); + if (fnInfo->closurePool != NULL) { + rbffi_ClosurePool_Free(fnInfo->closurePool); + } + xfree(fnInfo); +} + +/* + * call-seq: initialize(return_type, param_types, options={}) + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options + * @option options [Boolean] :blocking set to true if the C function is a blocking call + * @option options [Symbol] :convention calling convention see {FFI::Library#calling_convention} + * @option options [FFI::Enums] :enums + * @return [self] + * A new FunctionType instance. + */ +static VALUE +fntype_initialize(int argc, VALUE* argv, VALUE self) +{ + FunctionType *fnInfo; + ffi_status status; + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbOptions = Qnil; + VALUE rbEnums = Qnil, rbConvention = Qnil, rbBlocking = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i, nargs; + + nargs = rb_scan_args(argc, argv, "21", &rbReturnType, &rbParamTypes, &rbOptions); + if (nargs >= 3 && rbOptions != Qnil) { + rbConvention = rb_hash_aref(rbOptions, ID2SYM(rb_intern("convention"))); + rbEnums = rb_hash_aref(rbOptions, ID2SYM(rb_intern("enums"))); + rbBlocking = rb_hash_aref(rbOptions, ID2SYM(rb_intern("blocking"))); + } + + Check_Type(rbParamTypes, T_ARRAY); + + Data_Get_Struct(self, FunctionType, fnInfo); + fnInfo->parameterCount = (int) RARRAY_LEN(rbParamTypes); + fnInfo->parameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->parameterTypes)); + fnInfo->ffiParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(ffi_type *)); + fnInfo->nativeParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->nativeParameterTypes)); + fnInfo->rbParameterTypes = rb_ary_new2(fnInfo->parameterCount); + fnInfo->rbEnums = rbEnums; + fnInfo->blocking = RTEST(rbBlocking); + fnInfo->hasStruct = false; + + for (i = 0; i < fnInfo->parameterCount; ++i) { + VALUE entry = rb_ary_entry(rbParamTypes, i); + VALUE type = rbffi_Type_Lookup(entry); + + if (!RTEST(type)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(type, rbffi_FunctionTypeClass)) { + REALLOC_N(fnInfo->callbackParameters, VALUE, fnInfo->callbackCount + 1); + fnInfo->callbackParameters[fnInfo->callbackCount++] = type; + } + + if (rb_obj_is_kind_of(type, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + rb_ary_push(fnInfo->rbParameterTypes, type); + Data_Get_Struct(type, Type, fnInfo->parameterTypes[i]); + fnInfo->ffiParameterTypes[i] = fnInfo->parameterTypes[i]->ffiType; + fnInfo->nativeParameterTypes[i] = fnInfo->parameterTypes[i]->nativeType; + } + + fnInfo->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(fnInfo->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(fnInfo->rbReturnType, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + Data_Get_Struct(fnInfo->rbReturnType, Type, fnInfo->returnType); + fnInfo->ffiReturnType = fnInfo->returnType->ffiType; + +#if defined(X86_WIN32) + rbConventionStr = (rbConvention != Qnil) ? rb_funcall2(rbConvention, rb_intern("to_s"), 0, NULL) : Qnil; + fnInfo->abi = (rbConventionStr != Qnil && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + fnInfo->abi = FFI_DEFAULT_ABI; +#endif + + status = ffi_prep_cif(&fnInfo->ffi_cif, fnInfo->abi, fnInfo->parameterCount, + fnInfo->ffiReturnType, fnInfo->ffiParameterTypes); + switch (status) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + fnInfo->invoke = rbffi_GetInvoker(fnInfo); + + return self; +} + +/* + * call-seq: result_type + * @return [Type] + * Get the return type of the function type + */ +static VALUE +fntype_result_type(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return ft->rbReturnType; +} + +/* + * call-seq: param_types + * @return [Array] + * Get parameters types. + */ +static VALUE +fntype_param_types(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return rb_ary_dup(ft->rbParameterTypes); +} + +void +rbffi_FunctionInfo_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::FunctionType < FFI::Type + */ + rbffi_FunctionTypeClass = rb_define_class_under(moduleFFI, "FunctionType",ffi_Type); + rb_global_variable(&rbffi_FunctionTypeClass); + /* + * Document-const: FFI::CallbackInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "CallbackInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::FunctionInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "FunctionInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::Type::Function = FFI::FunctionType + */ + rb_define_const(ffi_Type, "Function", rbffi_FunctionTypeClass); + + rb_define_alloc_func(rbffi_FunctionTypeClass, fntype_allocate); + rb_define_method(rbffi_FunctionTypeClass, "initialize", fntype_initialize, -1); + rb_define_method(rbffi_FunctionTypeClass, "result_type", fntype_result_type, 0); + rb_define_method(rbffi_FunctionTypeClass, "param_types", fntype_param_types, 0); + +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c new file mode 100644 index 0000000..8a460f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +#endif +#include +#include + +#include "LastError.h" + +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +# define USE_PTHREAD_LOCAL +#endif + +#if defined(__CYGWIN__) +typedef uint32_t DWORD; +DWORD __stdcall GetLastError(void); +void __stdcall SetLastError(DWORD); +#endif + +typedef struct ThreadData { + int td_errno; +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD td_winapi_errno; +#endif +} ThreadData; + +#if defined(USE_PTHREAD_LOCAL) +static pthread_key_t threadDataKey; +#endif + +static inline ThreadData* thread_data_get(void); + +#if defined(USE_PTHREAD_LOCAL) + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td = xcalloc(1, sizeof(ThreadData)); + + pthread_setspecific(threadDataKey, td); + + return td; +} + + +static inline ThreadData* +thread_data_get(void) +{ + ThreadData* td = pthread_getspecific(threadDataKey); + return td != NULL ? td : thread_data_init(); +} + +static void +thread_data_free(void *ptr) +{ + xfree(ptr); +} + +#else +static ID id_thread_data; + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td; + VALUE obj; + + obj = Data_Make_Struct(rb_cObject, ThreadData, NULL, -1, td); + rb_thread_local_aset(rb_thread_current(), id_thread_data, obj); + + return td; +} + +static inline ThreadData* +thread_data_get() +{ + VALUE obj = rb_thread_local_aref(rb_thread_current(), id_thread_data); + + if (obj != Qnil && TYPE(obj) == T_DATA) { + return (ThreadData *) DATA_PTR(obj); + } + + return thread_data_init(); +} + +#endif + + +/* + * call-seq: error + * @return [Numeric] + * Get +errno+ value. + */ +static VALUE +get_last_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_errno); +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: winapi_error + * @return [Numeric] + * Get +GetLastError()+ value. Only Windows or Cygwin. + */ +static VALUE +get_last_winapi_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_winapi_errno); +} +#endif + + +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +errno+ value. + */ +static VALUE +set_last_error(VALUE self, VALUE error) +{ + +#ifdef _WIN32 + SetLastError(NUM2INT(error)); +#else + errno = NUM2INT(error); +#endif + + return Qnil; +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +GetLastError()+ value. Only on Windows and Cygwin. + */ +static VALUE +set_last_winapi_error(VALUE self, VALUE error) +{ + SetLastError(NUM2INT(error)); + return Qnil; +} +#endif + + +void +rbffi_save_errno(void) +{ + int error = 0; +#ifdef _WIN32 + error = GetLastError(); +#else + error = errno; +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD winapi_error = GetLastError(); + thread_data_get()->td_winapi_errno = winapi_error; +#endif + + thread_data_get()->td_errno = error; +} + +void +rbffi_LastError_Init(VALUE moduleFFI) +{ + /* + * Document-module: FFI::LastError + * This module defines a couple of method to set and get +errno+ + * for current thread. + */ + VALUE moduleError = rb_define_module_under(moduleFFI, "LastError"); + + rb_define_module_function(moduleError, "error", get_last_error, 0); + rb_define_module_function(moduleError, "error=", set_last_error, 1); + +#if defined(_WIN32) || defined(__CYGWIN__) + rb_define_module_function(moduleError, "winapi_error", get_last_winapi_error, 0); + rb_define_module_function(moduleError, "winapi_error=", set_last_winapi_error, 1); +#endif + +#if defined(USE_PTHREAD_LOCAL) + pthread_key_create(&threadDataKey, thread_data_free); +#else + id_thread_data = rb_intern("ffi_thread_local_data"); +#endif /* USE_PTHREAD_LOCAL */ +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h new file mode 100644 index 0000000..ee1dfbb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LASTERROR_H +#define RBFFI_LASTERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + + +void rbffi_LastError_Init(VALUE moduleFFI); + +void rbffi_save_errno(void); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LASTERROR_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c new file mode 100644 index 0000000..c95f2fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c @@ -0,0 +1,65 @@ +#include "LongDouble.h" +#include +#include +#include + +#if defined (__CYGWIN__) || defined(__INTERIX) || defined(_MSC_VER) +# define strtold(str, endptr) ((long double) strtod((str), (endptr))) +#endif /* defined (__CYGWIN__) */ + +static VALUE rb_cBigDecimal = Qnil; +static VALUE bigdecimal_load(VALUE unused); +static VALUE bigdecimal_failed(VALUE value, VALUE exc); + +VALUE +rbffi_longdouble_new(long double ld) +{ + if (!RTEST(rb_cBigDecimal)) { + /* allow fallback if the bigdecimal library is unavailable in future ruby versions */ + rb_cBigDecimal = rb_rescue(bigdecimal_load, Qnil, bigdecimal_failed, rb_cObject); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject) { + char buf[128]; + return rb_funcall(rb_mKernel, rb_intern("BigDecimal"), 1, rb_str_new(buf, sprintf(buf, "%.35Le", ld))); + } + + /* Fall through to handling as a float */ + return rb_float_new(ld); +} + +long double +rbffi_num2longdouble(VALUE value) +{ + if (TYPE(value) == T_FLOAT) { + return rb_num2dbl(value); + } + + if (!RTEST(rb_cBigDecimal) && rb_const_defined(rb_cObject, rb_intern("BigDecimal"))) { + rb_cBigDecimal = rb_const_get(rb_cObject, rb_intern("BigDecimal")); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject && RTEST(rb_obj_is_kind_of(value, rb_cBigDecimal))) { + VALUE s = rb_funcall(value, rb_intern("to_s"), 1, rb_str_new2("E")); + long double ret = strtold(RSTRING_PTR(s), NULL); + RB_GC_GUARD(s); + return ret; + } + + /* Fall through to handling as a float */ + return rb_num2dbl(value); +} + + +static VALUE +bigdecimal_load(VALUE unused) +{ + rb_require("bigdecimal"); + return rb_const_get(rb_cObject, rb_intern("BigDecimal")); +} + +static VALUE +bigdecimal_failed(VALUE value, VALUE exc) +{ + return value; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h new file mode 100644 index 0000000..079e890 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2012, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LONGDOUBLE_H +#define RBFFI_LONGDOUBLE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern VALUE rbffi_longdouble_new(long double ld); +extern long double rbffi_num2longdouble(VALUE value); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LONGDOUBLE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Makefile new file mode 100644 index 0000000..2823158 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Makefile @@ -0,0 +1,269 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 +hdrdir = $(topdir) +arch_hdrdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20211106-32129-davzlr +sitelibdir = $(DESTDIR)./.gem.20211106-32129-davzlr +sitedir = $(DESTDIR)/Library/Ruby/Site +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(DESTDIR)/usr/share/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(DESTDIR)/usr/share/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(DESTDIR)/usr/include +includedir = $(DESTDIR)/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.sdk$(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(DESTDIR)/Library/Ruby/Site +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = xcrun clang +CXX = xcrun clang++ +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = extconf.h +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = $(optflags) $(debugflags) $(warnflags) +optflags = +debugflags = -g +warnflags = +cppflags = +CCDLFLAGS = +CFLAGS = $(CCDLFLAGS) -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi +DEFS = +CPPFLAGS = -DRUBY_EXTCONF_H=\"$(RUBY_EXTCONF_H)\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) -g -Os -pipe $(ARCH_FLAG) +ldflags = -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib +dldflags = $(ARCH_FLAG) -undefined dynamic_lookup -multiply_defined suppress +ARCH_FLAG = +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = libtool -static +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.2.6 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = universal-darwin21 +sitearch = $(arch) +ruby_version = 2.6.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h $(RUBY_EXTCONF_H) + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = mkdir -p +INSTALL = /usr/bin/install -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) -lffi +ORIG_SRCS = AbstractMemory.c ArrayType.c Buffer.c Call.c ClosurePool.c DynamicLibrary.c Function.c FunctionInfo.c LastError.c LongDouble.c MappedType.c MemoryPointer.c MethodHandle.c Platform.c Pointer.c Struct.c StructByValue.c StructLayout.c Thread.c Type.c Types.c Variadic.c ffi.c +SRCS = $(ORIG_SRCS) +OBJS = AbstractMemory.o ArrayType.o Buffer.o Call.o ClosurePool.o DynamicLibrary.o Function.o FunctionInfo.o LastError.o LongDouble.o MappedType.o MemoryPointer.o MethodHandle.o Platform.o Pointer.o Struct.o StructByValue.o StructLayout.o Thread.o Type.o Types.o Variadic.o ffi.o +HDRS = $(srcdir)/Type.h $(srcdir)/rbffi_endian.h $(srcdir)/MappedType.h $(srcdir)/Types.h $(srcdir)/LastError.h $(srcdir)/ArrayType.h $(srcdir)/extconf.h $(srcdir)/StructByValue.h $(srcdir)/AbstractMemory.h $(srcdir)/ClosurePool.h $(srcdir)/Call.h $(srcdir)/MethodHandle.h $(srcdir)/Struct.h $(srcdir)/rbffi.h $(srcdir)/Thread.h $(srcdir)/compat.h $(srcdir)/DynamicLibrary.h $(srcdir)/Platform.h $(srcdir)/Function.h $(srcdir)/LongDouble.h $(srcdir)/MemoryPointer.h $(srcdir)/Pointer.h +LOCAL_HDRS = +TARGET = ffi_c +TARGET_NAME = ffi_c +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object $(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +$(OBJS): $(HDRS) $(ruby_headers) +LIBFFI_HOST=--host= +include ${srcdir}/libffi.darwin.mk diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c new file mode 100644 index 0000000..d1a4189 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include "rbffi.h" + +#include "Type.h" +#include "MappedType.h" + + +static VALUE mapped_allocate(VALUE); +static VALUE mapped_initialize(VALUE, VALUE); +static void mapped_mark(MappedType *); +static ID id_native_type, id_to_native, id_from_native; + +VALUE rbffi_MappedTypeClass = Qnil; + +static VALUE +mapped_allocate(VALUE klass) +{ + MappedType* m; + + VALUE obj = Data_Make_Struct(klass, MappedType, mapped_mark, -1, m); + + m->rbConverter = Qnil; + m->rbType = Qnil; + m->type = NULL; + m->base.nativeType = NATIVE_MAPPED; + m->base.ffiType = &ffi_type_void; + + return obj; +} + +/* + * call-seq: initialize(converter) + * @param [#native_type, #to_native, #from_native] converter +converter+ must respond to + * all these methods + * @return [self] + */ +static VALUE +mapped_initialize(VALUE self, VALUE rbConverter) +{ + MappedType* m = NULL; + + if (!rb_respond_to(rbConverter, id_native_type)) { + rb_raise(rb_eNoMethodError, "native_type method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_to_native)) { + rb_raise(rb_eNoMethodError, "to_native method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_from_native)) { + rb_raise(rb_eNoMethodError, "from_native method not implemented"); + } + + Data_Get_Struct(self, MappedType, m); + m->rbType = rb_funcall2(rbConverter, id_native_type, 0, NULL); + if (!(rb_obj_is_kind_of(m->rbType, rbffi_TypeClass))) { + rb_raise(rb_eTypeError, "native_type did not return instance of FFI::Type"); + } + + m->rbConverter = rbConverter; + Data_Get_Struct(m->rbType, Type, m->type); + m->base.ffiType = m->type->ffiType; + + return self; +} + +static void +mapped_mark(MappedType* m) +{ + rb_gc_mark(m->rbType); + rb_gc_mark(m->rbConverter); +} + +/* + * call-seq: mapped_type.native_type + * @return [Type] + * Get native type of mapped type. + */ +static VALUE +mapped_native_type(VALUE self) +{ + MappedType*m = NULL; + Data_Get_Struct(self, MappedType, m); + + return m->rbType; +} + +/* + * call-seq: mapped_type.to_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_to_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_to_native, argc, argv); +} + +/* + * call-seq: mapped_type.from_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_from_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_from_native, argc, argv); +} + +void +rbffi_MappedType_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type::Mapped < FFI::Type + */ + rbffi_MappedTypeClass = rb_define_class_under(rbffi_TypeClass, "Mapped", rbffi_TypeClass); + + rb_global_variable(&rbffi_MappedTypeClass); + + id_native_type = rb_intern("native_type"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); + + rb_define_alloc_func(rbffi_MappedTypeClass, mapped_allocate); + rb_define_method(rbffi_MappedTypeClass, "initialize", mapped_initialize, 1); + rb_define_method(rbffi_MappedTypeClass, "type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "native_type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "to_native", mapped_to_native, -1); + rb_define_method(rbffi_MappedTypeClass, "from_native", mapped_from_native, -1); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h new file mode 100644 index 0000000..4b26cc1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MAPPEDTYPE_H +#define RBFFI_MAPPEDTYPE_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct MappedType_ { + Type base; + Type* type; + VALUE rbConverter; + VALUE rbType; + +} MappedType; + +void rbffi_MappedType_Init(VALUE moduleFFI); + +extern VALUE rbffi_MappedTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MAPPEDTYPE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c new file mode 100644 index 0000000..9dc59bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" + + +static VALUE memptr_allocate(VALUE klass); +static void memptr_release(Pointer* ptr); +static VALUE memptr_malloc(VALUE self, long size, long count, bool clear); +static VALUE memptr_free(VALUE self); + +VALUE rbffi_MemoryPointerClass; + +#define MEMPTR(obj) ((MemoryPointer *) rbffi_AbstractMemory_Cast(obj, rbffi_MemoryPointerClass)) + +VALUE +rbffi_MemoryPointer_NewInstance(long size, long count, bool clear) +{ + return memptr_malloc(memptr_allocate(rbffi_MemoryPointerClass), size, count, clear); +} + +static VALUE +memptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj = Data_Make_Struct(klass, Pointer, NULL, memptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * call-seq: initialize(size, count=1, clear=true) + * @param [Fixnum, Bignum, Symbol, FFI::Type] size size of a memory cell (in bytes, or type whom size will be used) + * @param [Numeric] count number of cells in memory + * @param [Boolean] clear set memory to all-zero if +true+ + * @return [self] + * A new instance of FFI::MemoryPointer. + */ +static VALUE +memptr_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE size = Qnil, count = Qnil, clear = Qnil; + int nargs = rb_scan_args(argc, argv, "12", &size, &count, &clear); + + memptr_malloc(self, rbffi_type_size(size), nargs > 1 ? NUM2LONG(count) : 1, + RTEST(clear) || clear == Qnil); + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, memptr_free, self); + } + + return self; +} + +static VALUE +memptr_malloc(VALUE self, long size, long count, bool clear) +{ + Pointer* p; + unsigned long msize; + + Data_Get_Struct(self, Pointer, p); + + msize = size * count; + + p->storage = xmalloc(msize + 7); + if (p->storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%ld bytes", msize); + return Qnil; + } + p->autorelease = true; + p->memory.typeSize = (int) size; + p->memory.size = msize; + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (char *) (((uintptr_t) p->storage + 0x7) & (uintptr_t) ~0x7ULL); + p->allocated = true; + + if (clear && p->memory.size > 0) { + memset(p->memory.address, 0, p->memory.size); + } + + return self; +} + +static VALUE +memptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + } + + return self; +} + +static void +memptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +/* + * call-seq: from_string(s) + * @param [String] s string + * @return [MemoryPointer] + * Create a {MemoryPointer} with +s+ inside. + */ +static VALUE +memptr_s_from_string(VALUE klass, VALUE to_str) +{ + VALUE s = StringValue(to_str); + VALUE args[] = { INT2FIX(1), LONG2NUM(RSTRING_LEN(s) + 1), Qfalse }; + VALUE obj = rb_class_new_instance(3, args, klass); + rb_funcall(obj, rb_intern("put_string"), 2, INT2FIX(0), s); + + return obj; +} + +void +rbffi_MemoryPointer_Init(VALUE moduleFFI) +{ + VALUE ffi_Pointer; + + ffi_Pointer = rbffi_PointerClass; + + /* + * Document-class: FFI::MemoryPointer < FFI::Pointer + * A MemoryPointer is a specific {Pointer}. It points to a memory composed of cells. All cells have the + * same size. + * + * @example Create a new MemoryPointer + * mp = FFI::MemoryPointer.new(:long, 16) # Create a pointer on a memory of 16 long ints. + * @example Create a new MemoryPointer from a String + * mp1 = FFI::MemoryPointer.from_string("this is a string") + * # same as: + * mp2 = FFI::MemoryPointer.new(:char,16) + * mp2.put_string("this is a string") + */ + rbffi_MemoryPointerClass = rb_define_class_under(moduleFFI, "MemoryPointer", ffi_Pointer); + rb_global_variable(&rbffi_MemoryPointerClass); + + rb_define_alloc_func(rbffi_MemoryPointerClass, memptr_allocate); + rb_define_method(rbffi_MemoryPointerClass, "initialize", memptr_initialize, -1); + rb_define_singleton_method(rbffi_MemoryPointerClass, "from_string", memptr_s_from_string, 1); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h new file mode 100644 index 0000000..1257683 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2008, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MEMORYPOINTER_H +#define RBFFI_MEMORYPOINTER_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_MemoryPointer_Init(VALUE moduleFFI); + extern VALUE rbffi_MemoryPointerClass; + extern VALUE rbffi_MemoryPointer_NewInstance(long size, long count, bool clear); +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MEMORYPOINTER_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c new file mode 100644 index 0000000..4dc85fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +#endif +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#ifndef _WIN32 +# include +#endif +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +#endif + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MethodHandle.h" + + +#define MAX_METHOD_FIXED_ARITY (6) + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef USE_RAW +# define METHOD_CLOSURE ffi_raw_closure +# define METHOD_PARAMS ffi_raw* +#else +# define METHOD_CLOSURE ffi_closure +# define METHOD_PARAMS void** +#endif + + + +static bool prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static long trampoline_size(void); + +#if defined(__x86_64__) && (defined(__linux__) || defined(__APPLE__)) +# define CUSTOM_TRAMPOLINE 1 +#endif + + +struct MethodHandle { + Closure* closure; +}; + +static ClosurePool* defaultClosurePool; + + +MethodHandle* +rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function) +{ + MethodHandle* handle; + Closure* closure = rbffi_Closure_Alloc(defaultClosurePool); + if (closure == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate closure from pool"); + return NULL; + } + + handle = xcalloc(1, sizeof(*handle)); + handle->closure = closure; + closure->info = fnInfo; + closure->function = function; + + return handle; +} + +void +rbffi_MethodHandle_Free(MethodHandle* handle) +{ + if (handle != NULL) { + rbffi_Closure_Free(handle->closure); + } +} + +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle) +{ + return (rbffi_function_anyargs) handle->closure->code; +} + +#ifndef CUSTOM_TRAMPOLINE +static void attached_method_invoke(ffi_cif* cif, void* retval, METHOD_PARAMS parameters, void* user_data); + +static ffi_type* methodHandleParamTypes[3]; + +static ffi_cif mh_cif; + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + ffi_status ffiStatus; + +#if defined(USE_RAW) + ffiStatus = ffi_prep_raw_closure(code, &mh_cif, attached_method_invoke, closure); +#else + ffiStatus = ffi_prep_closure_loc(code, &mh_cif, attached_method_invoke, closure, code); +#endif + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + + +static long +trampoline_size(void) +{ + return sizeof(METHOD_CLOSURE); +} + +/* + * attached_method_invoke is used functions with more than 6 parameters, or + * with struct param or return values + */ +static void +attached_method_invoke(ffi_cif* cif, void* mretval, METHOD_PARAMS parameters, void* user_data) +{ + Closure* handle = (Closure *) user_data; + FunctionType* fnInfo = (FunctionType *) handle->info; + +#ifdef USE_RAW + int argc = parameters[0].sint; + VALUE* argv = *(VALUE **) ¶meters[1]; +#else + int argc = *(int *) parameters[0]; + VALUE* argv = *(VALUE **) parameters[1]; +#endif + + *(VALUE *) mretval = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); +} + +#endif + + + +#if defined(CUSTOM_TRAMPOLINE) +#if defined(__x86_64__) + +static VALUE custom_trampoline(int argc, VALUE* argv, VALUE self, Closure*); + +#define TRAMPOLINE_CTX_MAGIC (0xfee1deadcafebabe) +#define TRAMPOLINE_FUN_MAGIC (0xfeedfacebeeff00d) + +/* + * This is a hand-coded trampoline to speedup entry from ruby to the FFI translation + * layer for x86_64 arches. + * + * Since a ruby function has exactly 3 arguments, and the first 6 arguments are + * passed in registers for x86_64, we can tack on a context pointer by simply + * putting a value in %rcx, then jumping to the C trampoline code. + * + * This results in approx a 30% speedup for x86_64 FFI dispatch + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "movabsq $0xfee1deadcafebabe, %rcx\n\t" + "movabsq $0xfeedfacebeeff00d, %r11\n\t" + "jmpq *%r11\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(int argc, VALUE* argv, VALUE self, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + VALUE rbReturnValue; + + RB_GC_GUARD(rbReturnValue) = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); + RB_GC_GUARD(self); + + return rbReturnValue; +} + +#elif defined(__i386__) && 0 + +static VALUE custom_trampoline(void *args, Closure*); +#define TRAMPOLINE_CTX_MAGIC (0xfee1dead) +#define TRAMPOLINE_FUN_MAGIC (0xbeefcafe) + +/* + * This is a hand-coded trampoline to speed-up entry from ruby to the FFI translation + * layer for i386 arches. + * + * This does not make a discernible difference vs a raw closure, so for now, + * it is not enabled. + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "subl $12, %esp\n\t" + "leal 16(%esp), %eax\n\t" + "movl %eax, (%esp)\n\t" + "movl $0xfee1dead, 4(%esp)\n\t" + "movl $0xbeefcafe, %eax\n\t" + "call *%eax\n\t" + "addl $12, %esp\n\t" + "ret\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(void *args, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + return (*fnInfo->invoke)(*(int *) args, *(VALUE **) (args + 4), handle->function, fnInfo); +} + +#endif /* __x86_64__ else __i386__ */ + +extern void ffi_trampoline(int argc, VALUE* argv, VALUE self); +extern void ffi_trampoline_end(void); +static int trampoline_offsets(long *, long *); + +static long trampoline_ctx_offset, trampoline_func_offset; + +static long +trampoline_offset(int off, const long value) +{ + char *ptr; + for (ptr = (char *) &ffi_trampoline + off; ptr < (char *) &ffi_trampoline_end; ++ptr) { + if (*(long *) ptr == value) { + return ptr - (char *) &ffi_trampoline; + } + } + + return -1; +} + +static int +trampoline_offsets(long* ctxOffset, long* fnOffset) +{ + *ctxOffset = trampoline_offset(0, TRAMPOLINE_CTX_MAGIC); + if (*ctxOffset == -1) { + return -1; + } + + *fnOffset = trampoline_offset(0, TRAMPOLINE_FUN_MAGIC); + if (*fnOffset == -1) { + return -1; + } + + return 0; +} + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + memcpy(code, (void*) &ffi_trampoline, trampoline_size()); + /* Patch the context and function addresses into the stub code */ + *(intptr_t *)((char*)code + trampoline_ctx_offset) = (intptr_t) closure; + *(intptr_t *)((char*)code + trampoline_func_offset) = (intptr_t) custom_trampoline; + + return true; +} + +static long +trampoline_size(void) +{ + return (char *) &ffi_trampoline_end - (char *) &ffi_trampoline; +} + +#endif /* CUSTOM_TRAMPOLINE */ + + +void +rbffi_MethodHandle_Init(VALUE module) +{ +#ifndef CUSTOM_TRAMPOLINE + ffi_status ffiStatus; +#endif + + defaultClosurePool = rbffi_ClosurePool_New((int) trampoline_size(), prep_trampoline, NULL); + +#if defined(CUSTOM_TRAMPOLINE) + if (trampoline_offsets(&trampoline_ctx_offset, &trampoline_func_offset) != 0) { + rb_raise(rb_eFatal, "Could not locate offsets in trampoline code"); + } +#else + methodHandleParamTypes[0] = &ffi_type_sint; + methodHandleParamTypes[1] = &ffi_type_pointer; + methodHandleParamTypes[2] = &ffi_type_ulong; + + ffiStatus = ffi_prep_cif(&mh_cif, FFI_DEFAULT_ABI, 3, &ffi_type_ulong, + methodHandleParamTypes); + if (ffiStatus != FFI_OK) { + rb_raise(rb_eFatal, "ffi_prep_cif failed. status=%#x", ffiStatus); + } + +#endif +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h new file mode 100644 index 0000000..0dcc058 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_METHODHANDLE_H +#define RBFFI_METHODHANDLE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "Function.h" + + +typedef struct MethodHandlePool MethodHandlePool; +typedef struct MethodHandle MethodHandle; +typedef VALUE (*rbffi_function_anyargs)(int argc, VALUE* argv, VALUE self); + + +MethodHandle* rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function); +void rbffi_MethodHandle_Free(MethodHandle* handle); +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle); +void rbffi_MethodHandle_Init(VALUE module); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_METHODHANDLE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c new file mode 100644 index 0000000..d873026 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +# include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#include +#include +#include "rbffi_endian.h" +#include "Platform.h" + +#if defined(__GNU__) || defined(__GLIBC__) +# include +#endif + +static VALUE PlatformModule = Qnil; + +static void +export_primitive_types(VALUE module) +{ +#define S(name, T) do { \ + typedef struct { char c; T v; } s; \ + rb_define_const(module, #name "_ALIGN", INT2NUM((sizeof(s) - sizeof(T)) * 8)); \ + rb_define_const(module, #name "_SIZE", INT2NUM(sizeof(T)* 8)); \ +} while(0) + S(INT8, char); + S(INT16, short); + S(INT32, int); + S(INT64, long long); + S(LONG, long); + S(FLOAT, float); + S(DOUBLE, double); + S(LONG_DOUBLE, long double); + S(ADDRESS, void*); +#undef S +} + +void +rbffi_Platform_Init(VALUE moduleFFI) +{ + PlatformModule = rb_define_module_under(moduleFFI, "Platform"); + rb_define_const(PlatformModule, "BYTE_ORDER", INT2FIX(BYTE_ORDER)); + rb_define_const(PlatformModule, "LITTLE_ENDIAN", INT2FIX(LITTLE_ENDIAN)); + rb_define_const(PlatformModule, "BIG_ENDIAN", INT2FIX(BIG_ENDIAN)); +#if defined(__GNU__) || defined(__GLIBC__) + rb_define_const(PlatformModule, "GNU_LIBC", rb_str_new2(LIBC_SO)); +#endif + export_primitive_types(PlatformModule); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h new file mode 100644 index 0000000..5575e34 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_PLATFORM_H +#define RBFFI_PLATFORM_H + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Platform_Init(VALUE moduleFFI); + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_PLATFORM_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c new file mode 100644 index 0000000..e5f24a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" +#include "Pointer.h" + +#define POINTER(obj) rbffi_AbstractMemory_Cast((obj), rbffi_PointerClass) + +VALUE rbffi_PointerClass = Qnil; +VALUE rbffi_NullPointerSingleton = Qnil; + +static void ptr_release(Pointer* ptr); +static void ptr_mark(Pointer* ptr); + +VALUE +rbffi_Pointer_NewInstance(void* addr) +{ + Pointer* p; + VALUE obj; + + if (addr == NULL) { + return rbffi_NullPointerSingleton; + } + + obj = Data_Make_Struct(rbffi_PointerClass, Pointer, NULL, -1, p); + p->memory.address = addr; + p->memory.size = LONG_MAX; + p->memory.flags = (addr == NULL) ? 0 : (MEM_RD | MEM_WR); + p->memory.typeSize = 1; + p->rbParent = Qnil; + + return obj; +} + +static VALUE +ptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj; + + obj = Data_Make_Struct(klass, Pointer, ptr_mark, ptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * @overload initialize(pointer) + * @param [Pointer] pointer another pointer to initialize from + * Create a new pointer from another {Pointer}. + * @overload initialize(type, address) + * @param [Type] type type for pointer + * @param [Integer] address base address for pointer + * Create a new pointer from a {Type} and a base address + * @return [self] + * A new instance of Pointer. + */ +static VALUE +ptr_initialize(int argc, VALUE* argv, VALUE self) +{ + Pointer* p; + VALUE rbType = Qnil, rbAddress = Qnil; + int typeSize = 1; + + Data_Get_Struct(self, Pointer, p); + + switch (rb_scan_args(argc, argv, "11", &rbType, &rbAddress)) { + case 1: + rbAddress = rbType; + typeSize = 1; + break; + case 2: + typeSize = rbffi_type_size(rbType); + break; + default: + rb_raise(rb_eArgError, "Invalid arguments"); + } + + switch (TYPE(rbAddress)) { + case T_FIXNUM: + case T_BIGNUM: + p->memory.address = (void*) (uintptr_t) NUM2LL(rbAddress); + p->memory.size = LONG_MAX; + if (p->memory.address == NULL) { + p->memory.flags = 0; + } + break; + + default: + if (rb_obj_is_kind_of(rbAddress, rbffi_PointerClass)) { + Pointer* orig; + + p->rbParent = rbAddress; + Data_Get_Struct(rbAddress, Pointer, orig); + p->memory = orig->memory; + } else { + rb_raise(rb_eTypeError, "wrong argument type, expected Integer or FFI::Pointer"); + } + break; + } + + p->memory.typeSize = typeSize; + + return self; +} + +/* + * call-seq: ptr.initialize_copy(other) + * @param [Pointer] other source for cloning or dupping + * @return [self] + * @raise {RuntimeError} if +other+ is an unbounded memory area, or is unreadable/unwritable + * @raise {NoMemError} if failed to allocate memory for new object + * DO NOT CALL THIS METHOD. + * + * This method is internally used by #dup and #clone. Memory content is copied from +other+. + */ +static VALUE +ptr_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Pointer* dst; + + Data_Get_Struct(self, Pointer, dst); + src = POINTER(other); + if (src->size == LONG_MAX) { + rb_raise(rb_eRuntimeError, "cannot duplicate unbounded memory area"); + return Qnil; + } + + if ((dst->memory.flags & (MEM_RD | MEM_WR)) != (MEM_RD | MEM_WR)) { + rb_raise(rb_eRuntimeError, "cannot duplicate unreadable/unwritable memory area"); + return Qnil; + } + + if (dst->storage != NULL) { + xfree(dst->storage); + dst->storage = NULL; + } + + dst->storage = xmalloc(src->size + 7); + if (dst->storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->allocated = true; + dst->autorelease = true; + dst->memory.address = (void *) (((uintptr_t) dst->storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual memory contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +slice(VALUE self, long offset, long size) +{ + AbstractMemory* ptr; + Pointer* p; + VALUE retval; + + Data_Get_Struct(self, AbstractMemory, ptr); + checkBounds(ptr, offset, size == LONG_MAX ? 1 : size); + + retval = Data_Make_Struct(rbffi_PointerClass, Pointer, ptr_mark, -1, p); + + p->memory.address = ptr->address + offset; + p->memory.size = size; + p->memory.flags = ptr->flags; + p->memory.typeSize = ptr->typeSize; + p->rbParent = self; + + return retval; +} + +/* + * Document-method: + + * call-seq: ptr + offset + * @param [Numeric] offset + * @return [Pointer] + * Return a new {Pointer} from an existing pointer and an +offset+. + */ +static VALUE +ptr_plus(VALUE self, VALUE offset) +{ + AbstractMemory* ptr; + long off = NUM2LONG(offset); + + Data_Get_Struct(self, AbstractMemory, ptr); + + return slice(self, off, ptr->size == LONG_MAX ? LONG_MAX : ptr->size - off); +} + +/* + * call-seq: ptr.slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Pointer] + * Return a new {Pointer} from an existing one. This pointer points on same contents + * from +offset+ for a length +length+. + */ +static VALUE +ptr_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: ptr.inspect + * @return [String] + * Inspect pointer object. + */ +static VALUE +ptr_inspect(VALUE self) +{ + char buf[100]; + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->memory.size != LONG_MAX) { + snprintf(buf, sizeof(buf), "#<%s address=%p size=%lu>", + rb_obj_classname(self), ptr->memory.address, ptr->memory.size); + } else { + snprintf(buf, sizeof(buf), "#<%s address=%p>", rb_obj_classname(self), ptr->memory.address); + } + + return rb_str_new2(buf); +} + +/* + * Document-method: null? + * call-seq: ptr.null? + * @return [Boolean] + * Return +true+ if +self+ is a {NULL} pointer. + */ +static VALUE +ptr_null_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->memory.address == NULL ? Qtrue : Qfalse; +} + +/* + * Document-method: == + * call-seq: ptr == other + * @param [Pointer] other + * Check equality between +self+ and +other+. Equality is tested on {#address}. + */ +static VALUE +ptr_equals(VALUE self, VALUE other) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (NIL_P(other)) { + return ptr->memory.address == NULL ? Qtrue : Qfalse; + } + + return ptr->memory.address == POINTER(other)->address ? Qtrue : Qfalse; +} + +/* + * call-seq: ptr.address + * @return [Numeric] pointer's base address + * Return +self+'s base address (alias: #to_i). + */ +static VALUE +ptr_address(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ULL2NUM((uintptr_t) ptr->memory.address); +} + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Get or set +self+'s endianness + * @overload order + * @return [:big, :little] endianness of +self+ + * @overload order(order) + * @param [Symbol] order endianness to set (+:little+, +:big+ or +:network+). +:big+ and +:network+ + * are synonymous. + * @return [self] + */ +static VALUE +ptr_order(int argc, VALUE* argv, VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } + } + if (order != BYTE_ORDER) { + Pointer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Pointer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + + +/* + * call-seq: ptr.free + * @return [self] + * Free memory pointed by +self+. + */ +static VALUE +ptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + + } else { + VALUE caller = rb_funcall(rb_funcall(Qnil, rb_intern("caller"), 0), rb_intern("first"), 0); + + rb_warn("calling free on non allocated pointer %s from %s", RSTRING_PTR(ptr_inspect(self)), RSTRING_PTR(rb_str_to_str(caller))); + } + + return self; +} + +static VALUE +ptr_type_size(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return INT2NUM(ptr->memory.typeSize); +} + +/* + * call-seq: ptr.autorelease = autorelease + * @param [Boolean] autorelease + * @return [Boolean] +autorelease+ + * Set +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease(VALUE self, VALUE autorelease) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + ptr->autorelease = autorelease == Qtrue; + + return autorelease; +} + +/* + * call-seq: ptr.autorelease? + * @return [Boolean] + * Get +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->autorelease ? Qtrue : Qfalse; +} + + +static void +ptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +static void +ptr_mark(Pointer* ptr) +{ + rb_gc_mark(ptr->rbParent); +} + +void +rbffi_Pointer_Init(VALUE moduleFFI) +{ + VALUE rbNullAddress = ULL2NUM(0); + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Pointer < FFI::AbstractMemory + * Pointer class is used to manage C pointers with ease. A {Pointer} object is defined by his + * {#address} (as a C pointer). It permits additions with an integer for pointer arithmetic. + * + * ==Autorelease + * A pointer object may autorelease his contents when freed (by default). This behaviour may be + * changed with {#autorelease=} method. + */ + rbffi_PointerClass = rb_define_class_under(moduleFFI, "Pointer", ffi_AbstractMemory); + /* + * Document-variable: Pointer + */ + rb_global_variable(&rbffi_PointerClass); + + rb_define_alloc_func(rbffi_PointerClass, ptr_allocate); + rb_define_method(rbffi_PointerClass, "initialize", ptr_initialize, -1); + rb_define_method(rbffi_PointerClass, "initialize_copy", ptr_initialize_copy, 1); + rb_define_method(rbffi_PointerClass, "inspect", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "to_s", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "+", ptr_plus, 1); + rb_define_method(rbffi_PointerClass, "slice", ptr_slice, 2); + rb_define_method(rbffi_PointerClass, "null?", ptr_null_p, 0); + rb_define_method(rbffi_PointerClass, "address", ptr_address, 0); + rb_define_alias(rbffi_PointerClass, "to_i", "address"); + rb_define_method(rbffi_PointerClass, "==", ptr_equals, 1); + rb_define_method(rbffi_PointerClass, "order", ptr_order, -1); + rb_define_method(rbffi_PointerClass, "autorelease=", ptr_autorelease, 1); + rb_define_method(rbffi_PointerClass, "autorelease?", ptr_autorelease_p, 0); + rb_define_method(rbffi_PointerClass, "free", ptr_free, 0); + rb_define_method(rbffi_PointerClass, "type_size", ptr_type_size, 0); + + rbffi_NullPointerSingleton = rb_class_new_instance(1, &rbNullAddress, rbffi_PointerClass); + /* + * NULL pointer + */ + rb_define_const(rbffi_PointerClass, "NULL", rbffi_NullPointerSingleton); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h new file mode 100644 index 0000000..2d86851 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_POINTER_H +#define RBFFI_POINTER_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "AbstractMemory.h" + +extern void rbffi_Pointer_Init(VALUE moduleFFI); +extern VALUE rbffi_Pointer_NewInstance(void* addr); +extern VALUE rbffi_PointerClass; +extern VALUE rbffi_NullPointerSingleton; + +typedef struct Pointer { + AbstractMemory memory; + VALUE rbParent; + char* storage; /* start of malloc area */ + bool autorelease; + bool allocated; +} Pointer; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_POINTER_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c new file mode 100644 index 0000000..c6ccd14 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "Function.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "MappedType.h" +#include "Struct.h" + +typedef struct InlineArray_ { + VALUE rbMemory; + VALUE rbField; + + AbstractMemory* memory; + StructField* field; + MemoryOp *op; + Type* componentType; + ArrayType* arrayType; + int length; +} InlineArray; + + +static void struct_mark(Struct *); +static void struct_free(Struct *); +static VALUE struct_class_layout(VALUE klass); +static void struct_malloc(Struct* s); +static void inline_array_mark(InlineArray *); +static void store_reference_value(StructField* f, Struct* s, VALUE value); + +VALUE rbffi_StructClass = Qnil; + +VALUE rbffi_StructInlineArrayClass = Qnil; +VALUE rbffi_StructLayoutCharArrayClass = Qnil; + +static ID id_pointer_ivar = 0, id_layout_ivar = 0; +static ID id_get = 0, id_put = 0, id_to_ptr = 0, id_to_s = 0, id_layout = 0; + +static inline char* +memory_address(VALUE self) +{ + return ((AbstractMemory *)DATA_PTR((self)))->address; +} + +static VALUE +struct_allocate(VALUE klass) +{ + Struct* s; + VALUE obj = Data_Make_Struct(klass, Struct, struct_mark, struct_free, s); + + s->rbPointer = Qnil; + s->rbLayout = Qnil; + + return obj; +} + +/* + * call-seq: initialize + * @overload initialize(pointer, *args) + * @param [AbstractMemory] pointer + * @param [Array] args + * @return [self] + */ +static VALUE +struct_initialize(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + VALUE rbPointer = Qnil, rest = Qnil, klass = CLASS_OF(self); + int nargs; + + Data_Get_Struct(self, Struct, s); + + nargs = rb_scan_args(argc, argv, "01*", &rbPointer, &rest); + + /* Call up into ruby code to adjust the layout */ + if (nargs > 1) { + s->rbLayout = rb_funcall2(CLASS_OF(self), id_layout, (int) RARRAY_LEN(rest), RARRAY_PTR(rest)); + } else { + s->rbLayout = struct_class_layout(klass); + } + + if (!rb_obj_is_kind_of(s->rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "Invalid Struct layout"); + } + + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + + if (rbPointer != Qnil) { + s->pointer = MEMORY(rbPointer); + s->rbPointer = rbPointer; + } else { + struct_malloc(s); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +struct_initialize_copy(VALUE self, VALUE other) +{ + Struct* src; + Struct* dst; + + Data_Get_Struct(self, Struct, dst); + Data_Get_Struct(other, Struct, src); + if (dst == src) { + return self; + } + + dst->rbLayout = src->rbLayout; + dst->layout = src->layout; + + /* + * A new MemoryPointer instance is allocated here instead of just calling + * #dup on rbPointer, since the Pointer may not know its length, or may + * be longer than just this struct. + */ + if (src->pointer->address != NULL) { + dst->rbPointer = rbffi_MemoryPointer_NewInstance(1, src->layout->size, false); + dst->pointer = MEMORY(dst->rbPointer); + memcpy(dst->pointer->address, src->pointer->address, src->layout->size); + } else { + dst->rbPointer = src->rbPointer; + dst->pointer = src->pointer; + } + + if (src->layout->referenceFieldCount > 0) { + dst->rbReferences = ALLOC_N(VALUE, dst->layout->referenceFieldCount); + memcpy(dst->rbReferences, src->rbReferences, dst->layout->referenceFieldCount * sizeof(VALUE)); + } + + return self; +} + +static VALUE +struct_class_layout(VALUE klass) +{ + VALUE layout; + if (!rb_ivar_defined(klass, id_layout_ivar)) { + rb_raise(rb_eRuntimeError, "no Struct layout configured for %s", rb_class2name(klass)); + } + + layout = rb_ivar_get(klass, id_layout_ivar); + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "invalid Struct layout for %s", rb_class2name(klass)); + } + + return layout; +} + +static StructLayout* +struct_layout(VALUE self) +{ + Struct* s = (Struct *) DATA_PTR(self); + if (s->layout != NULL) { + return s->layout; + } + + if (s->layout == NULL) { + s->rbLayout = struct_class_layout(CLASS_OF(self)); + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + } + + return s->layout; +} + +static Struct* +struct_validate(VALUE self) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (struct_layout(self) == NULL) { + rb_raise(rb_eRuntimeError, "struct layout == null"); + } + + if (s->pointer == NULL) { + struct_malloc(s); + } + + return s; +} + +static void +struct_malloc(Struct* s) +{ + if (s->rbPointer == Qnil) { + s->rbPointer = rbffi_MemoryPointer_NewInstance(s->layout->size, 1, true); + + } else if (!rb_obj_is_kind_of(s->rbPointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eRuntimeError, "invalid pointer in struct"); + } + + s->pointer = (AbstractMemory *) DATA_PTR(s->rbPointer); +} + +static void +struct_mark(Struct *s) +{ + rb_gc_mark(s->rbPointer); + rb_gc_mark(s->rbLayout); + if (s->rbReferences != NULL) { + rb_gc_mark_locations(&s->rbReferences[0], &s->rbReferences[s->layout->referenceFieldCount]); + } +} + +static void +struct_free(Struct* s) +{ + xfree(s->rbReferences); + xfree(s); +} + + +static void +store_reference_value(StructField* f, Struct* s, VALUE value) +{ + if (unlikely(f->referenceIndex == -1)) { + rb_raise(rb_eRuntimeError, "put_reference_value called for non-reference type"); + return; + } + if (s->rbReferences == NULL) { + int i; + s->rbReferences = ALLOC_N(VALUE, s->layout->referenceFieldCount); + for (i = 0; i < s->layout->referenceFieldCount; ++i) { + s->rbReferences[i] = Qnil; + } + } + + s->rbReferences[f->referenceIndex] = value; +} + + +static StructField * +struct_field(Struct* s, VALUE fieldName) +{ + StructLayout* layout = s->layout; + struct field_cache_entry *p_ce = FIELD_CACHE_LOOKUP(layout, fieldName); + + /* Do a hash lookup only if cache entry is empty or fieldName is unexpected? */ + if (unlikely(!SYMBOL_P(fieldName) || !p_ce->fieldName || p_ce->fieldName != fieldName)) { + VALUE rbField = rb_hash_aref(layout->rbFieldMap, fieldName); + if (unlikely(NIL_P(rbField))) { + VALUE str = rb_funcall2(fieldName, id_to_s, 0, NULL); + rb_raise(rb_eArgError, "No such field '%s'", StringValuePtr(str)); + } + /* Write the retrieved coder to the cache */ + p_ce->fieldName = fieldName; + p_ce->field = (StructField *) DATA_PTR(rbField); + } + + return p_ce->field; +} + +/* + * call-seq: struct[field_name] + * @param field_name field to access + * Acces to a Struct field. + */ +static VALUE +struct_aref(VALUE self, VALUE fieldName) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->get != NULL) { + return (*f->get)(f, s); + + } else if (f->memoryOp != NULL) { + return (*f->memoryOp->get)(s->pointer, f->offset); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to fetch the value */ + return rb_funcall2(rbField, id_get, 1, &s->rbPointer); + } +} + +/* + * call-seq: []=(field_name, value) + * @param field_name field to access + * @param value value to set to +field_name+ + * @return [value] + * Set a field in Struct. + */ +static VALUE +struct_aset(VALUE self, VALUE fieldName, VALUE value) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->put != NULL) { + (*f->put)(f, s, value); + + } else if (f->memoryOp != NULL) { + + (*f->memoryOp->put)(s->pointer, f->offset, value); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to set the value */ + VALUE argv[2]; + argv[0] = s->rbPointer; + argv[1] = value; + rb_funcall2(rbField, id_put, 2, argv); + } + + if (f->referenceRequired) { + store_reference_value(f, s, value); + } + + return value; +} + +/* + * call-seq: pointer= pointer + * @param [AbstractMemory] pointer + * @return [self] + * Make Struct point to +pointer+. + */ +static VALUE +struct_set_pointer(VALUE self, VALUE pointer) +{ + Struct* s; + StructLayout* layout; + AbstractMemory* memory; + + if (!rb_obj_is_kind_of(pointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Pointer or Buffer)", + rb_obj_classname(pointer)); + return Qnil; + } + + + Data_Get_Struct(self, Struct, s); + Data_Get_Struct(pointer, AbstractMemory, memory); + layout = struct_layout(self); + + if ((int) layout->base.ffiType->size > memory->size) { + rb_raise(rb_eArgError, "memory of %ld bytes too small for struct %s (expected at least %ld)", + memory->size, rb_obj_classname(self), (long) layout->base.ffiType->size); + } + + s->pointer = MEMORY(pointer); + s->rbPointer = pointer; + rb_ivar_set(self, id_pointer_ivar, pointer); + + return self; +} + +/* + * call-seq: pointer + * @return [AbstractMemory] + * Get pointer to Struct contents. + */ +static VALUE +struct_get_pointer(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbPointer; +} + +/* + * call-seq: layout= layout + * @param [StructLayout] layout + * @return [self] + * Set the Struct's layout. + */ +static VALUE +struct_set_layout(VALUE self, VALUE layout) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected %s)", + rb_obj_classname(layout), rb_class2name(rbffi_StructLayoutClass)); + return Qnil; + } + + Data_Get_Struct(layout, StructLayout, s->layout); + rb_ivar_set(self, id_layout_ivar, layout); + + return self; +} + +/* + * call-seq: layout + * @return [StructLayout] + * Get the Struct's layout. + */ +static VALUE +struct_get_layout(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbLayout; +} + +/* + * call-seq: null? + * @return [Boolean] + * Test if Struct's pointer is NULL + */ +static VALUE +struct_null_p(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->pointer->address == NULL ? Qtrue : Qfalse; +} + +/* + * (see Pointer#order) + */ +static VALUE +struct_order(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + if (argc == 0) { + return rb_funcall(s->rbPointer, rb_intern("order"), 0); + + } else { + VALUE retval = rb_obj_dup(self); + VALUE rbPointer = rb_funcall2(s->rbPointer, rb_intern("order"), argc, argv); + struct_set_pointer(retval, rbPointer); + + return retval; + } +} + +static VALUE +inline_array_allocate(VALUE klass) +{ + InlineArray* array; + VALUE obj; + + obj = Data_Make_Struct(klass, InlineArray, inline_array_mark, -1, array); + array->rbField = Qnil; + array->rbMemory = Qnil; + + return obj; +} + +static void +inline_array_mark(InlineArray* array) +{ + rb_gc_mark(array->rbField); + rb_gc_mark(array->rbMemory); +} + +/* + * Document-method: FFI::Struct::InlineArray#initialize + * call-seq: initialize(memory, field) + * @param [AbstractMemory] memory + * @param [StructField] field + * @return [self] + */ +static VALUE +inline_array_initialize(VALUE self, VALUE rbMemory, VALUE rbField) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + array->rbMemory = rbMemory; + array->rbField = rbField; + + Data_Get_Struct(rbMemory, AbstractMemory, array->memory); + Data_Get_Struct(rbField, StructField, array->field); + Data_Get_Struct(array->field->rbType, ArrayType, array->arrayType); + Data_Get_Struct(array->arrayType->rbComponentType, Type, array->componentType); + + array->op = get_memory_op(array->componentType); + if (array->op == NULL && array->componentType->nativeType == NATIVE_MAPPED) { + array->op = get_memory_op(((MappedType *) array->componentType)->type); + } + + array->length = array->arrayType->length; + + return self; +} + +/* + * call-seq: size + * @return [Numeric] + * Get size + */ +static VALUE +inline_array_size(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return UINT2NUM(((ArrayType *) array->field->type)->length); +} + +static int +inline_array_offset(InlineArray* array, int index) +{ + if (index < 0 || (index >= array->length && array->length > 0)) { + rb_raise(rb_eIndexError, "index %d out of bounds", index); + } + + return (int) array->field->offset + (index * (int) array->componentType->ffiType->size); +} + +/* + * call-seq: [](index) + * @param [Numeric] index + * @return [Type, Struct] + */ +static VALUE +inline_array_aref(VALUE self, VALUE rbIndex) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + VALUE rbNativeValue = array->op->get(array->memory, + inline_array_offset(array, NUM2INT(rbIndex))); + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + return rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("from_native"), 2, rbNativeValue, Qnil); + } else { + return rbNativeValue; + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + VALUE rbOffset = INT2NUM(inline_array_offset(array, NUM2INT(rbIndex))); + VALUE rbLength = INT2NUM(array->componentType->ffiType->size); + VALUE rbPointer = rb_funcall(array->rbMemory, rb_intern("slice"), 2, rbOffset, rbLength); + + return rb_class_new_instance(1, &rbPointer, ((StructByValue *) array->componentType)->rbStructClass); + } else { + + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(array->arrayType->rbComponentType)); + return Qnil; + } +} + +/* + * call-seq: []=(index, value) + * @param [Numeric] index + * @param [Type, Struct] + * @return [value] + */ +static VALUE +inline_array_aset(VALUE self, VALUE rbIndex, VALUE rbValue) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + rbValue = rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("to_native"), 2, rbValue, Qnil); + } + array->op->put(array->memory, inline_array_offset(array, NUM2INT(rbIndex)), + rbValue); + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + int offset = inline_array_offset(array, NUM2INT(rbIndex)); + Struct* s; + + if (!rb_obj_is_kind_of(rbValue, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "argument not an instance of struct"); + return Qnil; + } + + checkWrite(array->memory); + checkBounds(array->memory, offset, array->componentType->ffiType->size); + + Data_Get_Struct(rbValue, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(array->memory->address + offset, s->pointer->address, array->componentType->ffiType->size); + + } else { + ArrayType* arrayType; + Data_Get_Struct(array->field->rbType, ArrayType, arrayType); + + rb_raise(rb_eArgError, "set not supported for %s", rb_obj_classname(arrayType->rbComponentType)); + return Qnil; + } + + return rbValue; +} + +/* + * call-seq: each + * Yield block for each element of +self+. + */ +static VALUE +inline_array_each(VALUE self) +{ + InlineArray* array; + + int i; + + Data_Get_Struct(self, InlineArray, array); + + for (i = 0; i < array->length; ++i) { + rb_yield(inline_array_aref(self, INT2FIX(i))); + } + + return self; +} + +/* + * call-seq: to_a + * @return [Array] + * Convert +self+ to an array. + */ +static VALUE +inline_array_to_a(VALUE self) +{ + InlineArray* array; + VALUE obj; + int i; + + Data_Get_Struct(self, InlineArray, array); + obj = rb_ary_new2(array->length); + + + for (i = 0; i < array->length; ++i) { + rb_ary_push(obj, inline_array_aref(self, INT2FIX(i))); + } + + return obj; +} + +/* + * Document-method: FFI::StructLayout::CharArray#to_s + * call-seq: to_s + * @return [String] + * Convert +self+ to a string. + */ +static VALUE +inline_array_to_s(VALUE self) +{ + InlineArray* array; + VALUE argv[2]; + + Data_Get_Struct(self, InlineArray, array); + + if (array->componentType->nativeType != NATIVE_INT8 && array->componentType->nativeType != NATIVE_UINT8) { + VALUE dummy = Qnil; + return rb_call_super(0, &dummy); + } + + argv[0] = UINT2NUM(array->field->offset); + argv[1] = UINT2NUM(array->length); + + return rb_funcall2(array->rbMemory, rb_intern("get_string"), 2, argv); +} + +/* + * call-seq: to_ptr + * @return [AbstractMemory] + * Get pointer to +self+ content. + */ +static VALUE +inline_array_to_ptr(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return rb_funcall(array->rbMemory, rb_intern("slice"), 2, + UINT2NUM(array->field->offset), UINT2NUM(array->arrayType->base.ffiType->size)); +} + + +void +rbffi_Struct_Init(VALUE moduleFFI) +{ + VALUE StructClass; + + rbffi_StructLayout_Init(moduleFFI); + + /* + * Document-class: FFI::Struct + * + * A FFI::Struct means to mirror a C struct. + * + * A Struct is defined as: + * class MyStruct < FFI::Struct + * layout :value1, :int, + * :value2, :double + * end + * and is used as: + * my_struct = MyStruct.new + * my_struct[:value1] = 12 + * + * For more information, see http://github.com/ffi/ffi/wiki/Structs + */ + rbffi_StructClass = rb_define_class_under(moduleFFI, "Struct", rb_cObject); + StructClass = rbffi_StructClass; // put on a line alone to help RDoc + rb_global_variable(&rbffi_StructClass); + + /* + * Document-class: FFI::Struct::InlineArray + */ + rbffi_StructInlineArrayClass = rb_define_class_under(rbffi_StructClass, "InlineArray", rb_cObject); + rb_global_variable(&rbffi_StructInlineArrayClass); + + /* + * Document-class: FFI::StructLayout::CharArray < FFI::Struct::InlineArray + */ + rbffi_StructLayoutCharArrayClass = rb_define_class_under(rbffi_StructLayoutClass, "CharArray", + rbffi_StructInlineArrayClass); + rb_global_variable(&rbffi_StructLayoutCharArrayClass); + + + rb_define_alloc_func(StructClass, struct_allocate); + rb_define_method(StructClass, "initialize", struct_initialize, -1); + rb_define_method(StructClass, "initialize_copy", struct_initialize_copy, 1); + rb_define_method(StructClass, "order", struct_order, -1); + + rb_define_alias(rb_singleton_class(StructClass), "alloc_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_inout", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_inout", "new"); + + rb_define_method(StructClass, "pointer", struct_get_pointer, 0); + rb_define_private_method(StructClass, "pointer=", struct_set_pointer, 1); + + rb_define_method(StructClass, "layout", struct_get_layout, 0); + rb_define_private_method(StructClass, "layout=", struct_set_layout, 1); + + rb_define_method(StructClass, "[]", struct_aref, 1); + rb_define_method(StructClass, "[]=", struct_aset, 2); + rb_define_method(StructClass, "null?", struct_null_p, 0); + + rb_include_module(rbffi_StructInlineArrayClass, rb_mEnumerable); + rb_define_alloc_func(rbffi_StructInlineArrayClass, inline_array_allocate); + rb_define_method(rbffi_StructInlineArrayClass, "initialize", inline_array_initialize, 2); + rb_define_method(rbffi_StructInlineArrayClass, "[]", inline_array_aref, 1); + rb_define_method(rbffi_StructInlineArrayClass, "[]=", inline_array_aset, 2); + rb_define_method(rbffi_StructInlineArrayClass, "each", inline_array_each, 0); + rb_define_method(rbffi_StructInlineArrayClass, "size", inline_array_size, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_a", inline_array_to_a, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_ptr", inline_array_to_ptr, 0); + + rb_define_method(rbffi_StructLayoutCharArrayClass, "to_s", inline_array_to_s, 0); + rb_define_alias(rbffi_StructLayoutCharArrayClass, "to_str", "to_s"); + + id_pointer_ivar = rb_intern("@pointer"); + id_layout_ivar = rb_intern("@layout"); + id_layout = rb_intern("layout"); + id_get = rb_intern("get"); + id_put = rb_intern("put"); + id_to_ptr = rb_intern("to_ptr"); + id_to_s = rb_intern("to_s"); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h new file mode 100644 index 0000000..eb6edf2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCT_H +#define RBFFI_STRUCT_H + +#include "extconf.h" +#include "AbstractMemory.h" +#include "Type.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Struct_Init(VALUE ffiModule); + extern void rbffi_StructLayout_Init(VALUE ffiModule); + typedef struct StructField_ StructField; + typedef struct StructLayout_ StructLayout; + typedef struct Struct_ Struct; + + struct StructField_ { + Type* type; + unsigned int offset; + + int referenceIndex; + + bool referenceRequired; + VALUE rbType; + VALUE rbName; + + VALUE (*get)(StructField* field, Struct* s); + void (*put)(StructField* field, Struct* s, VALUE value); + + MemoryOp* memoryOp; + }; + + struct StructLayout_ { + Type base; + StructField** fields; + int fieldCount; + int size; + int align; + ffi_type** ffiTypes; + + /* + * We use the fieldName's minor 8 Bits as index to a 256 entry cache. + * This avoids full ruby hash lookups for repeated lookups. + */ + #define FIELD_CACHE_LOOKUP(this, sym) ( &(this)->cache_row[((sym) >> 8) & 0xff] ) + + struct field_cache_entry { + VALUE fieldName; + StructField *field; + } cache_row[0x100]; + + /** The number of reference tracking fields in this struct */ + int referenceFieldCount; + + VALUE rbFieldNames; + VALUE rbFieldMap; + VALUE rbFields; + }; + + struct Struct_ { + StructLayout* layout; + AbstractMemory* pointer; + VALUE* rbReferences; + + VALUE rbLayout; + VALUE rbPointer; + }; + + extern VALUE rbffi_StructClass, rbffi_StructLayoutClass; + extern VALUE rbffi_StructLayoutFieldClass, rbffi_StructLayoutFunctionFieldClass; + extern VALUE rbffi_StructLayoutArrayFieldClass; + extern VALUE rbffi_StructInlineArrayClass; + extern VALUE rbffi_StructLayoutCharArrayClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCT_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c new file mode 100644 index 0000000..0d9fb9c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Type.h" +#include "StructByValue.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static VALUE sbv_allocate(VALUE); +static VALUE sbv_initialize(VALUE, VALUE); +static void sbv_mark(StructByValue *); +static void sbv_free(StructByValue *); + +VALUE rbffi_StructByValueClass = Qnil; + +static VALUE +sbv_allocate(VALUE klass) +{ + StructByValue* sbv; + + VALUE obj = Data_Make_Struct(klass, StructByValue, sbv_mark, sbv_free, sbv); + + sbv->rbStructClass = Qnil; + sbv->rbStructLayout = Qnil; + sbv->base.nativeType = NATIVE_STRUCT; + + sbv->base.ffiType = xcalloc(1, sizeof(*sbv->base.ffiType)); + sbv->base.ffiType->size = 0; + sbv->base.ffiType->alignment = 1; + sbv->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +static VALUE +sbv_initialize(VALUE self, VALUE rbStructClass) +{ + StructByValue* sbv = NULL; + StructLayout* layout = NULL; + VALUE rbLayout = Qnil; + + rbLayout = rb_ivar_get(rbStructClass, rb_intern("@layout")); + if (!rb_obj_is_instance_of(rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong type in @layout ivar (expected FFI::StructLayout)"); + } + + Data_Get_Struct(rbLayout, StructLayout, layout); + Data_Get_Struct(self, StructByValue, sbv); + sbv->rbStructClass = rbStructClass; + sbv->rbStructLayout = rbLayout; + + /* We can just use everything from the ffi_type directly */ + *sbv->base.ffiType = *layout->base.ffiType; + + return self; +} + +static void +sbv_mark(StructByValue *sbv) +{ + rb_gc_mark(sbv->rbStructClass); + rb_gc_mark(sbv->rbStructLayout); +} + +static void +sbv_free(StructByValue *sbv) +{ + xfree(sbv->base.ffiType); + xfree(sbv); +} + + +static VALUE +sbv_layout(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + return sbv->rbStructLayout; +} + +static VALUE +sbv_struct_class(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + + return sbv->rbStructClass; +} + +void +rbffi_StructByValue_Init(VALUE moduleFFI) +{ + rbffi_StructByValueClass = rb_define_class_under(moduleFFI, "StructByValue", rbffi_TypeClass); + rb_global_variable(&rbffi_StructByValueClass); + rb_define_const(rbffi_TypeClass, "Struct", rbffi_StructByValueClass); + + rb_define_alloc_func(rbffi_StructByValueClass, sbv_allocate); + rb_define_method(rbffi_StructByValueClass, "initialize", sbv_initialize, 1); + rb_define_method(rbffi_StructByValueClass, "layout", sbv_layout, 0); + rb_define_method(rbffi_StructByValueClass, "struct_class", sbv_struct_class, 0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h new file mode 100644 index 0000000..07b2763 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCTBYVALUE_H +#define RBFFI_STRUCTBYVALUE_H + +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct StructByValue_ { + Type base; + VALUE rbStructClass; + VALUE rbStructLayout; +} StructByValue; + +void rbffi_StructByValue_Init(VALUE moduleFFI); + +extern VALUE rbffi_StructByValueClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCTBYVALUE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c new file mode 100644 index 0000000..9b748bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "Function.h" +#include "MappedType.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static void struct_layout_mark(StructLayout *); +static void struct_layout_free(StructLayout *); +static void struct_field_mark(StructField* ); + +VALUE rbffi_StructLayoutFieldClass = Qnil; +VALUE rbffi_StructLayoutNumberFieldClass = Qnil, rbffi_StructLayoutPointerFieldClass = Qnil; +VALUE rbffi_StructLayoutStringFieldClass = Qnil; +VALUE rbffi_StructLayoutFunctionFieldClass = Qnil, rbffi_StructLayoutArrayFieldClass = Qnil; + +VALUE rbffi_StructLayoutClass = Qnil; + + +static VALUE +struct_field_allocate(VALUE klass) +{ + StructField* field; + VALUE obj; + + obj = Data_Make_Struct(klass, StructField, struct_field_mark, -1, field); + field->rbType = Qnil; + field->rbName = Qnil; + + return obj; +} + +static void +struct_field_mark(StructField* f) +{ + rb_gc_mark(f->rbType); + rb_gc_mark(f->rbName); +} + +/* + * call-seq: initialize(name, offset, type) + * @param [String,Symbol] name + * @param [Fixnum] offset + * @param [FFI::Type] type + * @return [self] + * A new FFI::StructLayout::Field instance. + */ +static VALUE +struct_field_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbOffset = Qnil, rbName = Qnil, rbType = Qnil; + StructField* field; + int nargs; + + Data_Get_Struct(self, StructField, field); + + nargs = rb_scan_args(argc, argv, "3", &rbName, &rbOffset, &rbType); + + if (TYPE(rbName) != T_SYMBOL && TYPE(rbName) != T_STRING) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Symbol/String)", + rb_obj_classname(rbName)); + } + + Check_Type(rbOffset, T_FIXNUM); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected FFI::Type)", + rb_obj_classname(rbType)); + } + + field->offset = NUM2UINT(rbOffset); + field->rbName = (TYPE(rbName) == T_SYMBOL) ? rbName : rb_str_intern(rbName); + field->rbType = rbType; + Data_Get_Struct(field->rbType, Type, field->type); + field->memoryOp = get_memory_op(field->type); + field->referenceIndex = -1; + + switch (field->type->nativeType == NATIVE_MAPPED ? ((MappedType *) field->type)->type->nativeType : field->type->nativeType) { + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + case NATIVE_POINTER: + field->referenceRequired = true; + break; + + default: + field->referenceRequired = (rb_respond_to(self, rb_intern("reference_required?")) + && RTEST(rb_funcall2(self, rb_intern("reference_required?"), 0, NULL))) + || (rb_respond_to(rbType, rb_intern("reference_required?")) + && RTEST(rb_funcall2(rbType, rb_intern("reference_required?"), 0, NULL))); + break; + } + + return self; +} + +/* + * call-seq: offset + * @return [Numeric] + * Get the field offset. + */ +static VALUE +struct_field_offset(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->offset); +} + +/* + * call-seq: size + * @return [Numeric] + * Get the field size. + */ +static VALUE +struct_field_size(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->size); +} + +/* + * call-seq: alignment + * @return [Numeric] + * Get the field alignment. + */ +static VALUE +struct_field_alignment(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->alignment); +} + +/* + * call-seq: type + * @return [Type] + * Get the field type. + */ +static VALUE +struct_field_type(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + + return field->rbType; +} + +/* + * call-seq: name + * @return [Symbol] + * Get the field name. + */ +static VALUE +struct_field_name(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return field->rbName; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Object] + * Get an object of type {#type} from memory pointed by +pointer+. + */ +static VALUE +struct_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(f->rbType)); + return Qnil; + } + + return (*f->memoryOp->get)(MEMORY(pointer), f->offset); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [Object] value this object must be a kind of {#type} + * @return [self] + * Put an object to memory pointed by +pointer+. + */ +static VALUE +struct_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "put not supported for %s", rb_obj_classname(f->rbType)); + return self; + } + + (*f->memoryOp->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Function] + * Get a {Function} from memory pointed by +pointer+. + */ +static VALUE +function_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + + return rbffi_Function_NewInstance(f->rbType, (*rbffi_AbstractMemoryOps.pointer->get)(MEMORY(pointer), f->offset)); +} + +/* + * call-seq: put(pointer, proc) + * @param [AbstractMemory] pointer pointer to a {Struct} + * @param [Function, Proc] proc + * @return [Function] + * Set a {Function} to memory pointed by +pointer+ as a function. + * + * If a Proc is submitted as +proc+, it is automatically transformed to a {Function}. + */ +static VALUE +function_field_put(VALUE self, VALUE pointer, VALUE proc) +{ + StructField* f; + VALUE value = Qnil; + + Data_Get_Struct(self, StructField, f); + + if (NIL_P(proc) || rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + value = proc; + } else if (rb_obj_is_kind_of(proc, rb_cProc) || rb_respond_to(proc, rb_intern("call"))) { + value = rbffi_Function_ForProc(f->rbType, proc); + } else { + rb_raise(rb_eTypeError, "wrong type (expected Proc or Function)"); + } + + (*rbffi_AbstractMemoryOps.pointer->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +static inline bool +isCharArray(ArrayType* arrayType) +{ + return arrayType->componentType->nativeType == NATIVE_INT8 + || arrayType->componentType->nativeType == NATIVE_UINT8; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [FFI::StructLayout::CharArray, FFI::Struct::InlineArray] + * Get an array from a {Struct}. + */ +static VALUE +array_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + ArrayType* array; + VALUE argv[2]; + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + argv[0] = pointer; + argv[1] = self; + + return rb_class_new_instance(2, argv, isCharArray(array) + ? rbffi_StructLayoutCharArrayClass : rbffi_StructInlineArrayClass); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [String, Array] value +value+ may be a String only if array's type is a kind of +int8+ + * @return [value] + * Set an array in a {Struct}. + */ +static VALUE +array_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + ArrayType* array; + + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + if (isCharArray(array) && rb_obj_is_instance_of(value, rb_cString)) { + VALUE argv[2]; + + argv[0] = INT2FIX(f->offset); + argv[1] = value; + + if (RSTRING_LEN(value) < array->length) { + rb_funcall2(pointer, rb_intern("put_string"), 2, argv); + } else if (RSTRING_LEN(value) == array->length) { + rb_funcall2(pointer, rb_intern("put_bytes"), 2, argv); + } else { + rb_raise(rb_eIndexError, "String is longer (%ld bytes) than the char array (%d bytes)", RSTRING_LEN(value), array->length); + } + } else { +#ifdef notyet + MemoryOp* op; + int count = RARRAY_LEN(value); + int i; + AbstractMemory* memory = MEMORY(pointer); + + if (count > array->length) { + rb_raise(rb_eIndexError, "array too large"); + } + + /* clear the contents in case of a short write */ + checkWrite(memory); + checkBounds(memory, f->offset, f->type->ffiType->size); + if (count < array->length) { + memset(memory->address + f->offset + (count * array->componentType->ffiType->size), + 0, (array->length - count) * array->componentType->ffiType->size); + } + + /* now copy each element in */ + if ((op = get_memory_op(array->componentType)) != NULL) { + + for (i = 0; i < count; ++i) { + (*op->put)(memory, f->offset + (i * array->componentType->ffiType->size), rb_ary_entry(value, i)); + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + + for (i = 0; i < count; ++i) { + VALUE entry = rb_ary_entry(value, i); + Struct* s; + + if (!rb_obj_is_kind_of(entry, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "array element not an instance of FFI::Struct"); + break; + } + + Data_Get_Struct(entry, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(memory->address + f->offset + (i * array->componentType->ffiType->size), + s->pointer->address, array->componentType->ffiType->size); + } + + } else { + rb_raise(rb_eNotImpError, "put not supported for arrays of type %s", rb_obj_classname(array->rbComponentType)); + } +#else + rb_raise(rb_eNotImpError, "cannot set array field"); +#endif + } + + return value; +} + + +static VALUE +struct_layout_allocate(VALUE klass) +{ + StructLayout* layout; + VALUE obj; + + obj = Data_Make_Struct(klass, StructLayout, struct_layout_mark, struct_layout_free, layout); + layout->rbFieldMap = Qnil; + layout->rbFieldNames = Qnil; + layout->rbFields = Qnil; + layout->base.ffiType = xcalloc(1, sizeof(*layout->base.ffiType)); + layout->base.ffiType->size = 0; + layout->base.ffiType->alignment = 0; + layout->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +/* + * call-seq: initialize(fields, size, align) + * @param [Array] fields + * @param [Numeric] size + * @param [Numeric] align + * @return [self] + * A new StructLayout instance. + */ +static VALUE +struct_layout_initialize(VALUE self, VALUE fields, VALUE size, VALUE align) +{ + StructLayout* layout; + ffi_type* ltype; + int i; + + Data_Get_Struct(self, StructLayout, layout); + layout->fieldCount = (int) RARRAY_LEN(fields); + layout->rbFieldMap = rb_hash_new(); + layout->rbFieldNames = rb_ary_new2(layout->fieldCount); + layout->size = (int) FFI_ALIGN(NUM2INT(size), NUM2INT(align)); + layout->align = NUM2INT(align); + layout->fields = xcalloc(layout->fieldCount, sizeof(StructField *)); + layout->ffiTypes = xcalloc(layout->fieldCount + 1, sizeof(ffi_type *)); + layout->rbFields = rb_ary_new2(layout->fieldCount); + layout->referenceFieldCount = 0; + layout->base.ffiType->elements = layout->ffiTypes; + layout->base.ffiType->size = layout->size; + layout->base.ffiType->alignment = layout->align; + + ltype = layout->base.ffiType; + for (i = 0; i < (int) layout->fieldCount; ++i) { + VALUE rbField = rb_ary_entry(fields, i); + VALUE rbName; + StructField* field; + ffi_type* ftype; + + + if (!rb_obj_is_kind_of(rbField, rbffi_StructLayoutFieldClass)) { + rb_raise(rb_eTypeError, "wrong type for field %d.", i); + } + rbName = rb_funcall2(rbField, rb_intern("name"), 0, NULL); + + Data_Get_Struct(rbField, StructField, field); + layout->fields[i] = field; + + if (field->type == NULL || field->type->ffiType == NULL) { + rb_raise(rb_eRuntimeError, "type of field %d not supported", i); + } + + ftype = field->type->ffiType; + if (ftype->size == 0 && i < ((int) layout->fieldCount - 1)) { + rb_raise(rb_eTypeError, "type of field %d has zero size", i); + } + + if (field->referenceRequired) { + field->referenceIndex = layout->referenceFieldCount++; + } + + + layout->ffiTypes[i] = ftype->size > 0 ? ftype : NULL; + rb_hash_aset(layout->rbFieldMap, rbName, rbField); + rb_ary_push(layout->rbFields, rbField); + rb_ary_push(layout->rbFieldNames, rbName); + } + + if (ltype->size == 0) { + rb_raise(rb_eRuntimeError, "Struct size is zero"); + } + + return self; +} + +/* + * call-seq: [](field) + * @param [Symbol] field + * @return [StructLayout::Field] + * Get a field from the layout. + */ +static VALUE +struct_layout_union_bang(VALUE self) +{ + const ffi_type *alignment_types[] = { &ffi_type_sint8, &ffi_type_sint16, &ffi_type_sint32, &ffi_type_sint64, + &ffi_type_float, &ffi_type_double, &ffi_type_longdouble, NULL }; + StructLayout* layout; + ffi_type *t = NULL; + int count, i; + + Data_Get_Struct(self, StructLayout, layout); + + for (i = 0; alignment_types[i] != NULL; ++i) { + if (alignment_types[i]->alignment == layout->align) { + t = (ffi_type *) alignment_types[i]; + break; + } + } + if (t == NULL) { + rb_raise(rb_eRuntimeError, "cannot create libffi union representation for alignment %d", layout->align); + return Qnil; + } + + count = (int) layout->size / (int) t->size; + xfree(layout->ffiTypes); + layout->ffiTypes = xcalloc(count + 1, sizeof(ffi_type *)); + layout->base.ffiType->elements = layout->ffiTypes; + + for (i = 0; i < count; ++i) { + layout->ffiTypes[i] = t; + } + + return self; +} + +static VALUE +struct_layout_aref(VALUE self, VALUE field) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_hash_aref(layout->rbFieldMap, field); +} + +/* + * call-seq: fields + * @return [Array] + * Get fields list. + */ +static VALUE +struct_layout_fields(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +/* + * call-seq: members + * @return [Array] + * Get list of field names. + */ +static VALUE +struct_layout_members(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFieldNames); +} + +/* + * call-seq: to_a + * @return [Array] + * Get an array of fields. + */ +static VALUE +struct_layout_to_a(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +static void +struct_layout_mark(StructLayout *layout) +{ + rb_gc_mark(layout->rbFieldMap); + rb_gc_mark(layout->rbFieldNames); + rb_gc_mark(layout->rbFields); + /* Clear the cache, to be safe from changes of fieldName VALUE by GC.compact. + * TODO: Move cache clearing to compactation callback provided by Ruby-2.7+. + */ + memset(&layout->cache_row, 0, sizeof(layout->cache_row)); +} + +static void +struct_layout_free(StructLayout *layout) +{ + xfree(layout->ffiTypes); + xfree(layout->base.ffiType); + xfree(layout->fields); + xfree(layout); +} + + +void +rbffi_StructLayout_Init(VALUE moduleFFI) +{ + VALUE ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::StructLayout < FFI::Type + * + * This class aims at defining a struct layout. + */ + rbffi_StructLayoutClass = rb_define_class_under(moduleFFI, "StructLayout", ffi_Type); + rb_global_variable(&rbffi_StructLayoutClass); + + /* + * Document-class: FFI::StructLayout::Field + * A field in a {StructLayout}. + */ + rbffi_StructLayoutFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Field", rb_cObject); + rb_global_variable(&rbffi_StructLayoutFieldClass); + + /* + * Document-class: FFI::StructLayout::Number + * A numeric {Field} in a {StructLayout}. + */ + rbffi_StructLayoutNumberFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Number", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutNumberFieldClass); + + /* + * Document-class: FFI::StructLayout::String + * A string {Field} in a {StructLayout}. + */ + rbffi_StructLayoutStringFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "String", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutStringFieldClass); + + /* + * Document-class: FFI::StructLayout::Pointer + * A pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutPointerFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Pointer", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutPointerFieldClass); + + /* + * Document-class: FFI::StructLayout::Function + * A function pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutFunctionFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Function", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutFunctionFieldClass); + + /* + * Document-class: FFI::StructLayout::Array + * An array {Field} in a {StructLayout}. + */ + rbffi_StructLayoutArrayFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Array", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutArrayFieldClass); + + rb_define_alloc_func(rbffi_StructLayoutFieldClass, struct_field_allocate); + rb_define_method(rbffi_StructLayoutFieldClass, "initialize", struct_field_initialize, -1); + rb_define_method(rbffi_StructLayoutFieldClass, "offset", struct_field_offset, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "size", struct_field_size, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "alignment", struct_field_alignment, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "name", struct_field_name, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "type", struct_field_type, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "put", struct_field_put, 2); + rb_define_method(rbffi_StructLayoutFieldClass, "get", struct_field_get, 1); + + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "put", function_field_put, 2); + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "get", function_field_get, 1); + + rb_define_method(rbffi_StructLayoutArrayFieldClass, "get", array_field_get, 1); + rb_define_method(rbffi_StructLayoutArrayFieldClass, "put", array_field_put, 2); + + rb_define_alloc_func(rbffi_StructLayoutClass, struct_layout_allocate); + rb_define_method(rbffi_StructLayoutClass, "initialize", struct_layout_initialize, 3); + rb_define_method(rbffi_StructLayoutClass, "[]", struct_layout_aref, 1); + rb_define_method(rbffi_StructLayoutClass, "fields", struct_layout_fields, 0); + rb_define_method(rbffi_StructLayoutClass, "members", struct_layout_members, 0); + rb_define_method(rbffi_StructLayoutClass, "to_a", struct_layout_to_a, 0); + rb_define_method(rbffi_StructLayoutClass, "__union!", struct_layout_union_bang, 0); + +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c new file mode 100644 index 0000000..aab3344 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +#include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif + +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +# include +# include +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include "Thread.h" + +#ifdef _WIN32 +static volatile DWORD frame_thread_key = TLS_OUT_OF_INDEXES; +#else +static pthread_key_t thread_data_key; +struct thread_data { + rbffi_frame_t* frame; +}; +static inline struct thread_data* thread_data_get(void); + +#endif + +rbffi_frame_t* +rbffi_frame_current(void) +{ +#ifdef _WIN32 + return (rbffi_frame_t *) TlsGetValue(frame_thread_key); +#else + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td->frame : NULL; +#endif +} + +void +rbffi_frame_push(rbffi_frame_t* frame) +{ + memset(frame, 0, sizeof(*frame)); + frame->exc = Qnil; + +#ifdef _WIN32 + frame->prev = TlsGetValue(frame_thread_key); + TlsSetValue(frame_thread_key, frame); +#else + frame->td = thread_data_get(); + frame->prev = frame->td->frame; + frame->td->frame = frame; +#endif +} + +void +rbffi_frame_pop(rbffi_frame_t* frame) +{ +#ifdef _WIN32 + TlsSetValue(frame_thread_key, frame->prev); +#else + frame->td->frame = frame->prev; +#endif +} + +#ifndef _WIN32 +static struct thread_data* thread_data_init(void); + +static inline struct thread_data* +thread_data_get(void) +{ + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td : thread_data_init(); +} + +static struct thread_data* +thread_data_init(void) +{ + struct thread_data* td = calloc(1, sizeof(struct thread_data)); + + pthread_setspecific(thread_data_key, td); + + return td; +} + +static void +thread_data_free(void *ptr) +{ + free(ptr); +} +#endif + +void +rbffi_Thread_Init(VALUE moduleFFI) +{ +#ifdef _WIN32 + frame_thread_key = TlsAlloc(); +#else + pthread_key_create(&thread_data_key, thread_data_free); +#endif +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h new file mode 100644 index 0000000..46a65c6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_THREAD_H +#define RBFFI_THREAD_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "extconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifdef _WIN32 +# include +#else +# include +#endif + +typedef struct { +#ifdef _WIN32 + DWORD id; +#else + pthread_t id; +#endif + bool valid; + bool has_gvl; + VALUE exc; +} rbffi_thread_t; + +typedef struct rbffi_frame { +#ifndef _WIN32 + struct thread_data* td; +#endif + struct rbffi_frame* prev; + VALUE exc; +} rbffi_frame_t; + +rbffi_frame_t* rbffi_frame_current(void); +void rbffi_frame_push(rbffi_frame_t* frame); +void rbffi_frame_pop(rbffi_frame_t* frame); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_THREAD_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.c new file mode 100644 index 0000000..bc82857 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif + +#include +#include +#include +#include "rbffi.h" +#include "compat.h" +#include "Types.h" +#include "Type.h" + + +typedef struct BuiltinType_ { + Type type; + char* name; +} BuiltinType; + +static void builtin_type_free(BuiltinType *); + +VALUE rbffi_TypeClass = Qnil; + +static VALUE classBuiltinType = Qnil; +static VALUE moduleNativeType = Qnil; +static VALUE typeMap = Qnil, sizeMap = Qnil; +static ID id_find_type = 0, id_type_size = 0, id_size = 0; + +static VALUE +type_allocate(VALUE klass) +{ + Type* type; + VALUE obj = Data_Make_Struct(klass, Type, NULL, -1, type); + + type->nativeType = -1; + type->ffiType = &ffi_type_void; + + return obj; +} + +/* + * Document-method: initialize + * call-seq: initialize(value) + * @param [Fixnum,Type] value + * @return [self] + */ +static VALUE +type_initialize(VALUE self, VALUE value) +{ + Type* type; + Type* other; + + Data_Get_Struct(self, Type, type); + + if (FIXNUM_P(value)) { + type->nativeType = FIX2INT(value); + } else if (rb_obj_is_kind_of(value, rbffi_TypeClass)) { + Data_Get_Struct(value, Type, other); + type->nativeType = other->nativeType; + type->ffiType = other->ffiType; + } else { + rb_raise(rb_eArgError, "wrong type"); + } + + return self; +} + +/* + * call-seq: type.size + * @return [Fixnum] + * Return type's size, in bytes. + */ +static VALUE +type_size(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->size); +} + +/* + * call-seq: type.alignment + * @return [Fixnum] + * Get Type alignment. + */ +static VALUE +type_alignment(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->alignment); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type} object. + */ +static VALUE +type_inspect(VALUE self) +{ + char buf[100]; + Type *type; + + Data_Get_Struct(self, Type, type); + + snprintf(buf, sizeof(buf), "#<%s:%p size=%d alignment=%d>", + rb_obj_classname(self), type, (int) type->ffiType->size, (int) type->ffiType->alignment); + + return rb_str_new2(buf); +} + +static VALUE +builtin_type_new(VALUE klass, int nativeType, ffi_type* ffiType, const char* name) +{ + BuiltinType* type; + VALUE obj = Qnil; + + obj = Data_Make_Struct(klass, BuiltinType, NULL, builtin_type_free, type); + + type->name = strdup(name); + type->type.nativeType = nativeType; + type->type.ffiType = ffiType; + + return obj; +} + +static void +builtin_type_free(BuiltinType *type) +{ + free(type->name); + xfree(type); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type::Builtin} object. + */ +static VALUE +builtin_type_inspect(VALUE self) +{ + char buf[100]; + BuiltinType *type; + + Data_Get_Struct(self, BuiltinType, type); + snprintf(buf, sizeof(buf), "#<%s:%s size=%d alignment=%d>", + rb_obj_classname(self), type->name, (int) type->type.ffiType->size, type->type.ffiType->alignment); + + return rb_str_new2(buf); +} + +int +rbffi_type_size(VALUE type) +{ + int t = TYPE(type); + + if (t == T_FIXNUM || t == T_BIGNUM) { + return NUM2INT(type); + + } else if (t == T_SYMBOL) { + /* + * Try looking up directly in the type and size maps + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, type)) != Qnil) { + if (rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + Type* type; + Data_Get_Struct(nType, Type, type); + return (int) type->ffiType->size; + + } else if (rb_respond_to(nType, id_size)) { + return NUM2INT(rb_funcall2(nType, id_size, 0, NULL)); + } + } + + /* Not found - call up to the ruby version to resolve */ + return NUM2INT(rb_funcall2(rbffi_FFIModule, id_type_size, 1, &type)); + + } else { + return NUM2INT(rb_funcall2(type, id_size, 0, NULL)); + } +} + +VALUE +rbffi_Type_Lookup(VALUE name) +{ + int t = TYPE(name); + if (t == T_SYMBOL || t == T_STRING) { + /* + * Try looking up directly in the type Map + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, name)) != Qnil && rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + return nType; + } + } else if (rb_obj_is_kind_of(name, rbffi_TypeClass)) { + + return name; + } + + /* Nothing found - let caller handle raising exceptions */ + return Qnil; +} + +void +rbffi_Type_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type + * This class manages C types. + * + * It embbed {FFI::Type::Builtin} objects as constants (for names, + * see {FFI::NativeType}). + */ + rbffi_TypeClass = rb_define_class_under(moduleFFI, "Type", rb_cObject); + + /* + * Document-constant: FFI::TypeDefs + */ + rb_define_const(moduleFFI, "TypeDefs", typeMap = rb_hash_new()); + rb_define_const(moduleFFI, "SizeTypes", sizeMap = rb_hash_new()); + rb_global_variable(&typeMap); + rb_global_variable(&sizeMap); + id_find_type = rb_intern("find_type"); + id_type_size = rb_intern("type_size"); + id_size = rb_intern("size"); + + /* + * Document-class: FFI::Type::Builtin + * Class for Built-in types. + */ + classBuiltinType = rb_define_class_under(rbffi_TypeClass, "Builtin", rbffi_TypeClass); + /* + * Document-module: FFI::NativeType + * This module defines constants for native (C) types. + * + * ==Native type constants + * Native types are defined by constants : + * * INT8, SCHAR, CHAR + * * UINT8, UCHAR + * * INT16, SHORT, SSHORT + * * UINT16, USHORT + * * INT32,, INT, SINT + * * UINT32, UINT + * * INT64, LONG_LONG, SLONG_LONG + * * UINT64, ULONG_LONG + * * LONG, SLONG + * * ULONG + * * FLOAT32, FLOAT + * * FLOAT64, DOUBLE + * * POINTER + * * CALLBACK + * * FUNCTION + * * CHAR_ARRAY + * * BOOL + * * STRING (immutable string, nul terminated) + * * STRUCT (struct-b-value param or result) + * * ARRAY (array type definition) + * * MAPPED (custom native type) + * For function return type only : + * * VOID + * For function argument type only : + * * BUFFER_IN + * * BUFFER_OUT + * * VARARGS (function takes a variable number of arguments) + * + * All these constants are exported to {FFI} module prefixed with "TYPE_". + * They are objets from {FFI::Type::Builtin} class. + */ + moduleNativeType = rb_define_module_under(moduleFFI, "NativeType"); + + /* + * Document-global: FFI::Type + */ + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + rb_global_variable(&moduleNativeType); + + rb_define_alloc_func(rbffi_TypeClass, type_allocate); + rb_define_method(rbffi_TypeClass, "initialize", type_initialize, 1); + rb_define_method(rbffi_TypeClass, "size", type_size, 0); + rb_define_method(rbffi_TypeClass, "alignment", type_alignment, 0); + rb_define_method(rbffi_TypeClass, "inspect", type_inspect, 0); + + /* Make Type::Builtin non-allocatable */ + rb_undef_method(CLASS_OF(classBuiltinType), "new"); + rb_define_method(classBuiltinType, "inspect", builtin_type_inspect, 0); + + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + + /* Define all the builtin types */ + #define T(x, ffiType) do { \ + VALUE t = Qnil; \ + rb_define_const(rbffi_TypeClass, #x, t = builtin_type_new(classBuiltinType, NATIVE_##x, ffiType, #x)); \ + rb_define_const(moduleNativeType, #x, t); \ + rb_define_const(moduleFFI, "TYPE_" #x, t); \ + } while(0) + + #define A(old_type, new_type) do { \ + VALUE t = rb_const_get(rbffi_TypeClass, rb_intern(#old_type)); \ + rb_const_set(rbffi_TypeClass, rb_intern(#new_type), t); \ + } while(0) + + /* + * Document-constant: FFI::Type::Builtin::VOID + */ + T(VOID, &ffi_type_void); + T(INT8, &ffi_type_sint8); + A(INT8, SCHAR); + A(INT8, CHAR); + T(UINT8, &ffi_type_uint8); + A(UINT8, UCHAR); + + T(INT16, &ffi_type_sint16); + A(INT16, SHORT); + A(INT16, SSHORT); + T(UINT16, &ffi_type_uint16); + A(UINT16, USHORT); + T(INT32, &ffi_type_sint32); + A(INT32, INT); + A(INT32, SINT); + T(UINT32, &ffi_type_uint32); + A(UINT32, UINT); + T(INT64, &ffi_type_sint64); + A(INT64, LONG_LONG); + A(INT64, SLONG_LONG); + T(UINT64, &ffi_type_uint64); + A(UINT64, ULONG_LONG); + T(LONG, &ffi_type_slong); + A(LONG, SLONG); + T(ULONG, &ffi_type_ulong); + T(FLOAT32, &ffi_type_float); + A(FLOAT32, FLOAT); + T(FLOAT64, &ffi_type_double); + A(FLOAT64, DOUBLE); + T(LONGDOUBLE, &ffi_type_longdouble); + T(POINTER, &ffi_type_pointer); + T(STRING, &ffi_type_pointer); + T(BUFFER_IN, &ffi_type_pointer); + T(BUFFER_OUT, &ffi_type_pointer); + T(BUFFER_INOUT, &ffi_type_pointer); + T(BOOL, &ffi_type_uchar); + T(VARARGS, &ffi_type_void); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.h new file mode 100644 index 0000000..b81995a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Type.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * This file is part of ruby-ffi. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the Evan Phoenix nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef RBFFI_TYPE_H +#define RBFFI_TYPE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct Type_ Type; + +#include "Types.h" + +struct Type_ { + NativeType nativeType; + ffi_type* ffiType; +}; + +extern VALUE rbffi_TypeClass; +extern VALUE rbffi_Type_Lookup(VALUE type); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPE_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.c new file mode 100644 index 0000000..bb757c9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "Pointer.h" +#include "rbffi.h" +#include "Function.h" +#include "StructByValue.h" +#include "Types.h" +#include "Struct.h" +#include "MappedType.h" +#include "MemoryPointer.h" +#include "LongDouble.h" + +static ID id_from_native = 0; + + +VALUE +rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr) +{ + switch (type->nativeType) { + case NATIVE_VOID: + return Qnil; + case NATIVE_INT8: + return INT2NUM((signed char) *(ffi_sarg *) ptr); + case NATIVE_INT16: + return INT2NUM((signed short) *(ffi_sarg *) ptr); + case NATIVE_INT32: + return INT2NUM((signed int) *(ffi_sarg *) ptr); + case NATIVE_LONG: + return LONG2NUM((signed long) *(ffi_sarg *) ptr); + case NATIVE_INT64: + return LL2NUM(*(signed long long *) ptr); + + case NATIVE_UINT8: + return UINT2NUM((unsigned char) *(ffi_arg *) ptr); + case NATIVE_UINT16: + return UINT2NUM((unsigned short) *(ffi_arg *) ptr); + case NATIVE_UINT32: + return UINT2NUM((unsigned int) *(ffi_arg *) ptr); + case NATIVE_ULONG: + return ULONG2NUM((unsigned long) *(ffi_arg *) ptr); + case NATIVE_UINT64: + return ULL2NUM(*(unsigned long long *) ptr); + + case NATIVE_FLOAT32: + return rb_float_new(*(float *) ptr); + case NATIVE_FLOAT64: + return rb_float_new(*(double *) ptr); + + case NATIVE_LONGDOUBLE: + return rbffi_longdouble_new(*(long double *) ptr); + + case NATIVE_STRING: + return (*(void **) ptr != NULL) ? rb_str_new2(*(char **) ptr) : Qnil; + case NATIVE_POINTER: + return rbffi_Pointer_NewInstance(*(void **) ptr); + case NATIVE_BOOL: + return ((unsigned char) *(ffi_arg *) ptr) ? Qtrue : Qfalse; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: { + return *(void **) ptr != NULL + ? rbffi_Function_NewInstance(rbType, rbffi_Pointer_NewInstance(*(void **) ptr)) + : Qnil; + } + + case NATIVE_STRUCT: { + StructByValue* sbv = (StructByValue *)type; + AbstractMemory* mem; + VALUE rbMemory = rbffi_MemoryPointer_NewInstance(1, sbv->base.ffiType->size, false); + + Data_Get_Struct(rbMemory, AbstractMemory, mem); + memcpy(mem->address, ptr, sbv->base.ffiType->size); + RB_GC_GUARD(rbMemory); + RB_GC_GUARD(rbType); + + return rb_class_new_instance(1, &rbMemory, sbv->rbStructClass); + } + + case NATIVE_MAPPED: { + /* + * For mapped types, first convert to the real native type, then upcall to + * ruby to convert to the expected return type + */ + MappedType* m = (MappedType *) type; + VALUE values[2], rbReturnValue; + + values[0] = rbffi_NativeValue_ToRuby(m->type, m->rbType, ptr); + values[1] = Qnil; + + + rbReturnValue = rb_funcall2(m->rbConverter, id_from_native, 2, values); + RB_GC_GUARD(values[0]); + RB_GC_GUARD(rbType); + + return rbReturnValue; + } + + default: + rb_raise(rb_eRuntimeError, "Unknown type: %d", type->nativeType); + return Qnil; + } +} + +void +rbffi_Types_Init(VALUE moduleFFI) +{ + id_from_native = rb_intern("from_native"); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.h new file mode 100644 index 0000000..0d4806f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Types.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_TYPES_H +#define RBFFI_TYPES_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + NATIVE_VOID, + NATIVE_INT8, + NATIVE_UINT8, + NATIVE_INT16, + NATIVE_UINT16, + NATIVE_INT32, + NATIVE_UINT32, + NATIVE_INT64, + NATIVE_UINT64, + NATIVE_LONG, + NATIVE_ULONG, + NATIVE_FLOAT32, + NATIVE_FLOAT64, + NATIVE_LONGDOUBLE, + NATIVE_POINTER, + NATIVE_CALLBACK, + NATIVE_FUNCTION, + NATIVE_BUFFER_IN, + NATIVE_BUFFER_OUT, + NATIVE_BUFFER_INOUT, + NATIVE_CHAR_ARRAY, + NATIVE_BOOL, + + /** An immutable string. Nul terminated, but only copies in to the native function */ + NATIVE_STRING, + + /** The function takes a variable number of arguments */ + NATIVE_VARARGS, + + /** Struct-by-value param or result */ + NATIVE_STRUCT, + + /** An array type definition */ + NATIVE_ARRAY, + + /** Custom native type */ + NATIVE_MAPPED, +} NativeType; + +#include +#include "Type.h" + +VALUE rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr); +void rbffi_Types_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPES_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c new file mode 100644 index 0000000..aafe072 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include + +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "MethodHandle.h" +#include "Call.h" +#include "Thread.h" + +typedef struct VariadicInvoker_ { + VALUE rbAddress; + VALUE rbReturnType; + VALUE rbEnums; + + Type* returnType; + ffi_abi abi; + void* function; + int paramCount; + bool blocking; +} VariadicInvoker; + + +static VALUE variadic_allocate(VALUE klass); +static VALUE variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, + VALUE rbReturnType, VALUE options); +static void variadic_mark(VariadicInvoker *); + +static VALUE classVariadicInvoker = Qnil; + + +static VALUE +variadic_allocate(VALUE klass) +{ + VariadicInvoker *invoker; + VALUE obj = Data_Make_Struct(klass, VariadicInvoker, variadic_mark, -1, invoker); + + invoker->rbAddress = Qnil; + invoker->rbEnums = Qnil; + invoker->rbReturnType = Qnil; + invoker->blocking = false; + + return obj; +} + +static void +variadic_mark(VariadicInvoker *invoker) +{ + rb_gc_mark(invoker->rbEnums); + rb_gc_mark(invoker->rbAddress); + rb_gc_mark(invoker->rbReturnType); +} + +static VALUE +variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, VALUE rbReturnType, VALUE options) +{ + VariadicInvoker* invoker = NULL; + VALUE retval = Qnil; + VALUE convention = Qnil; + VALUE fixed = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i; + + Check_Type(options, T_HASH); + convention = rb_hash_aref(options, ID2SYM(rb_intern("convention"))); + + Data_Get_Struct(self, VariadicInvoker, invoker); + invoker->rbEnums = rb_hash_aref(options, ID2SYM(rb_intern("enums"))); + invoker->rbAddress = rbFunction; + invoker->function = rbffi_AbstractMemory_Cast(rbFunction, rbffi_PointerClass)->address; + invoker->blocking = RTEST(rb_hash_aref(options, ID2SYM(rb_intern("blocking")))); + +#if defined(X86_WIN32) + rbConventionStr = rb_funcall2(convention, rb_intern("to_s"), 0, NULL); + invoker->abi = (RTEST(convention) && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + invoker->abi = FFI_DEFAULT_ABI; +#endif + + invoker->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(invoker->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + Data_Get_Struct(rbReturnType, Type, invoker->returnType); + + invoker->paramCount = -1; + + fixed = rb_ary_new2(RARRAY_LEN(rbParameterTypes) - 1); + for (i = 0; i < RARRAY_LEN(rbParameterTypes); ++i) { + VALUE entry = rb_ary_entry(rbParameterTypes, i); + VALUE rbType = rbffi_Type_Lookup(entry); + Type* type; + + if (!RTEST(rbType)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + Data_Get_Struct(rbType, Type, type); + if (type->nativeType != NATIVE_VARARGS) { + rb_ary_push(fixed, entry); + } + } + /* + * @fixed and @type_map are used by the parameter mangling ruby code + */ + rb_iv_set(self, "@fixed", fixed); + rb_iv_set(self, "@type_map", rb_hash_aref(options, ID2SYM(rb_intern("type_map")))); + + return retval; +} + +static VALUE +variadic_invoke(VALUE self, VALUE parameterTypes, VALUE parameterValues) +{ + VariadicInvoker* invoker; + FFIStorage* params; + void* retval; + ffi_cif cif; + void** ffiValues; + ffi_type** ffiParamTypes; + ffi_type* ffiReturnType; + Type** paramTypes; + VALUE* argv; + int paramCount = 0, fixedCount = 0, i; + ffi_status ffiStatus; + rbffi_frame_t frame = { 0 }; + + Check_Type(parameterTypes, T_ARRAY); + Check_Type(parameterValues, T_ARRAY); + + Data_Get_Struct(self, VariadicInvoker, invoker); + paramCount = (int) RARRAY_LEN(parameterTypes); + paramTypes = ALLOCA_N(Type *, paramCount); + ffiParamTypes = ALLOCA_N(ffi_type *, paramCount); + params = ALLOCA_N(FFIStorage, paramCount); + ffiValues = ALLOCA_N(void*, paramCount); + argv = ALLOCA_N(VALUE, paramCount); + retval = alloca(MAX(invoker->returnType->ffiType->size, FFI_SIZEOF_ARG)); + + for (i = 0; i < paramCount; ++i) { + VALUE rbType = rb_ary_entry(parameterTypes, i); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong type. Expected (FFI::Type)"); + } + Data_Get_Struct(rbType, Type, paramTypes[i]); + + switch (paramTypes[i]->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("INT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("UINT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + case NATIVE_FLOAT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("DOUBLE")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + default: + break; + } + + + ffiParamTypes[i] = paramTypes[i]->ffiType; + if (ffiParamTypes[i] == NULL) { + rb_raise(rb_eArgError, "Invalid parameter type #%x", paramTypes[i]->nativeType); + } + argv[i] = rb_ary_entry(parameterValues, i); + } + + ffiReturnType = invoker->returnType->ffiType; + if (ffiReturnType == NULL) { + rb_raise(rb_eArgError, "Invalid return type"); + } + + /*Get the number of fixed args from @fixed array*/ + fixedCount = RARRAY_LEN(rb_iv_get(self, "@fixed")); + +#ifdef HAVE_FFI_PREP_CIF_VAR + ffiStatus = ffi_prep_cif_var(&cif, invoker->abi, fixedCount, paramCount, ffiReturnType, ffiParamTypes); +#else + ffiStatus = ffi_prep_cif(&cif, invoker->abi, paramCount, ffiReturnType, ffiParamTypes); +#endif + switch (ffiStatus) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + rbffi_SetupCallParams(paramCount, argv, -1, paramTypes, params, + ffiValues, NULL, 0, invoker->rbEnums); + + rbffi_frame_push(&frame); + + if(unlikely(invoker->blocking)) { + rbffi_blocking_call_t* bc; + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->function = invoker->function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + bc->cif = cif; + + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + } else { + ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); + } + + rbffi_frame_pop(&frame); + + rbffi_save_errno(); + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + return rbffi_NativeValue_ToRuby(invoker->returnType, invoker->rbReturnType, retval); +} + + +void +rbffi_Variadic_Init(VALUE moduleFFI) +{ + classVariadicInvoker = rb_define_class_under(moduleFFI, "VariadicInvoker", rb_cObject); + rb_global_variable(&classVariadicInvoker); + + rb_define_alloc_func(classVariadicInvoker, variadic_allocate); + + rb_define_method(classVariadicInvoker, "initialize", variadic_initialize, 4); + rb_define_method(classVariadicInvoker, "invoke", variadic_invoke, 2); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/compat.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/compat.h new file mode 100644 index 0000000..3f7bbae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/compat.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_COMPAT_H +#define RBFFI_COMPAT_H + +#include + +#ifndef RARRAY_LEN +# define RARRAY_LEN(ary) RARRAY(ary)->len +#endif + +#ifndef RARRAY_PTR +# define RARRAY_PTR(ary) RARRAY(ary)->ptr +#endif + +#ifndef RSTRING_LEN +# define RSTRING_LEN(s) RSTRING(s)->len +#endif + +#ifndef RSTRING_PTR +# define RSTRING_PTR(s) RSTRING(s)->ptr +#endif + +#ifndef NUM2ULL +# define NUM2ULL(x) rb_num2ull((VALUE)x) +#endif + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef __GNUC__ +# define likely(x) __builtin_expect((x), 1) +# define unlikely(x) __builtin_expect((x), 0) +#else +# define likely(x) (x) +# define unlikely(x) (x) +#endif + +#ifdef _MSC_VER +#define ffi_type_longdouble ffi_type_double +#endif + +#ifndef MAX +# define MAX(a, b) ((a) < (b) ? (b) : (a)) +#endif +#ifndef MIN +# define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#ifndef RB_GC_GUARD +# define RB_GC_GUARD(x) (x) +#endif + +#endif /* RBFFI_COMPAT_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h new file mode 100644 index 0000000..6f70c69 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h @@ -0,0 +1,5 @@ +#ifndef EXTCONF_H +#define EXTCONF_H +#define HAVE_FFI_PREP_CIF_VAR 1 +#define USE_INTERNAL_LIBFFI 1 +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb new file mode 100644 index 0000000..6dd28aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb @@ -0,0 +1,82 @@ +#!/usr/bin/env ruby + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'mkmf' + require 'rbconfig' + + def system_libffi_usable? + # We need pkg_config or ffi.h + libffi_ok = pkg_config("libffi") || + have_header("ffi.h") || + find_header("ffi.h", "/usr/local/include", "/usr/include/ffi", + "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/ffi", + "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/ffi") || + (find_header("ffi.h", `xcrun --sdk macosx --show-sdk-path`.strip + "/usr/include/ffi") rescue false) + + # Ensure we can link to ffi_prep_closure_loc + libffi_ok &&= have_library("ffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi-8", "ffi_prep_closure_loc", [ "ffi.h" ]) + + if RbConfig::CONFIG['host_os'] =~ /mswin/ + have_library('libffi_convenience') + have_library('shlwapi') + end + + libffi_ok + end + + dir_config("ffi_c") + + # recent versions of ruby add restrictive ansi and warning flags on a whim - kill them all + $warnflags = '' + $CFLAGS.gsub!(/[\s+]-ansi/, '') + $CFLAGS.gsub!(/[\s+]-std=[^\s]+/, '') + # solaris 10 needs -c99 for + $CFLAGS << " -std=c99" if RbConfig::CONFIG['host_os'] =~ /solaris(!?2\.11)/ + + # Check whether we use system libffi + system_libffi = enable_config('system-libffi', :try) + + if system_libffi == :try + system_libffi = ENV['RUBY_CC_VERSION'].nil? && system_libffi_usable? + elsif system_libffi + abort "system libffi is not usable" unless system_libffi_usable? + end + + if system_libffi + have_func('ffi_prep_cif_var') + $defs << "-DHAVE_RAW_API" if have_func("ffi_raw_call") && have_func("ffi_prep_raw_closure") + else + $defs << "-DHAVE_FFI_PREP_CIF_VAR" + $defs << "-DUSE_INTERNAL_LIBFFI" + end + + $defs << "-DHAVE_EXTCONF_H" if $defs.empty? # needed so create_header works + + create_header + create_makefile("ffi_c") + + unless system_libffi + File.open("Makefile", "a") do |mf| + mf.puts "LIBFFI_HOST=--host=#{RbConfig::CONFIG['host_alias']}" if RbConfig::CONFIG.has_key?("host_alias") + if RbConfig::CONFIG['host_os'].downcase =~ /darwin/ + mf.puts "include ${srcdir}/libffi.darwin.mk" + elsif RbConfig::CONFIG['host_os'].downcase =~ /bsd/ + mf.puts '.include "${srcdir}/libffi.bsd.mk"' + elsif RbConfig::CONFIG['host_os'].downcase =~ /mswin64/ + mf.puts '!include $(srcdir)/libffi.vc64.mk' + elsif RbConfig::CONFIG['host_os'].downcase =~ /mswin32/ + mf.puts '!include $(srcdir)/libffi.vc.mk' + else + mf.puts "include ${srcdir}/libffi.mk" + end + end + end + +else + File.open("Makefile", "w") do |mf| + mf.puts "# Dummy makefile for non-mri rubies" + mf.puts "all install::\n" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c new file mode 100644 index 0000000..22ea3bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Struct.h" +#include "StructByValue.h" +#include "DynamicLibrary.h" +#include "Platform.h" +#include "Types.h" +#include "LastError.h" +#include "Function.h" +#include "ClosurePool.h" +#include "MethodHandle.h" +#include "Call.h" +#include "ArrayType.h" +#include "MappedType.h" + +void Init_ffi_c(void); + +VALUE rbffi_FFIModule = Qnil; + +static VALUE moduleFFI = Qnil; + +void +Init_ffi_c(void) +{ + /* + * Document-module: FFI + * + * This module embbed type constants from {FFI::NativeType}. + */ + rbffi_FFIModule = moduleFFI = rb_define_module("FFI"); + rb_global_variable(&rbffi_FFIModule); + + rbffi_Thread_Init(rbffi_FFIModule); + + /* FFI::Type needs to be initialized before most other classes */ + rbffi_Type_Init(moduleFFI); + + rbffi_ArrayType_Init(moduleFFI); + rbffi_LastError_Init(moduleFFI); + rbffi_Call_Init(moduleFFI); + rbffi_ClosurePool_Init(moduleFFI); + rbffi_MethodHandle_Init(moduleFFI); + rbffi_Platform_Init(moduleFFI); + rbffi_AbstractMemory_Init(moduleFFI); + rbffi_Pointer_Init(moduleFFI); + rbffi_Function_Init(moduleFFI); + rbffi_MemoryPointer_Init(moduleFFI); + rbffi_Buffer_Init(moduleFFI); + rbffi_StructByValue_Init(moduleFFI); + rbffi_Struct_Init(moduleFFI); + rbffi_DynamicLibrary_Init(moduleFFI); + rbffi_Variadic_Init(moduleFFI); + rbffi_Types_Init(moduleFFI); + rbffi_MappedType_Init(moduleFFI); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi 2.mk new file mode 100644 index 0000000..3b58227 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi 2.mk @@ -0,0 +1,18 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$@(D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + env CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + $(MAKE) -C "$(LIBFFI_BUILD_DIR)" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/.libs/libffi_convenience.la b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/.libs/libffi_convenience.la new file mode 120000 index 0000000..0727452 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/.libs/libffi_convenience.la @@ -0,0 +1 @@ +../libffi_convenience.la \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/Makefile new file mode 100644 index 0000000..4606150 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/Makefile @@ -0,0 +1,1796 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +#am__append_1 = doc +#am__append_2 = src/debug.c +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +#am__append_3 = -DFFI_DEBUG +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(noinst_HEADERS) $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = fficonfig.h +CONFIG_CLEAN_FILES = libffi.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ + "$(DESTDIR)$(pkgconfigdir)" +LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am__libffi_la_SOURCES_DIST = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c src/debug.c +am__dirstamp = $(am__leading_dot)dirstamp +#am__objects_1 = src/debug.lo +am_libffi_la_OBJECTS = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) +AM_V_lt = $(am__v_lt_$(V)) +am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY)) +am__v_lt_0 = --silent +am__v_lt_1 = +libffi_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libffi_la_LDFLAGS) $(LDFLAGS) -o $@ +am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) +am__libffi_convenience_la_SOURCES_DIST = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c src/debug.c +am__objects_2 = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +am_libffi_convenience_la_OBJECTS = $(am__objects_2) +nodist_libffi_convenience_la_OBJECTS = +libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \ + $(nodist_libffi_convenience_la_OBJECTS) +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I. -I$(srcdir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) +LTCPPASCOMPILE = $(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CCASFLAGS) $(CCASFLAGS) +AM_V_CPPAS = $(am__v_CPPAS_$(V)) +am__v_CPPAS_ = $(am__v_CPPAS_$(AM_DEFAULT_VERBOSITY)) +am__v_CPPAS_0 = @echo " CPPAS " $@; +am__v_CPPAS_1 = +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_$(V)) +am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_$(V)) +am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libffi_la_SOURCES) $(EXTRA_libffi_la_SOURCES) \ + $(libffi_convenience_la_SOURCES) \ + $(EXTRA_libffi_convenience_la_SOURCES) \ + $(nodist_libffi_convenience_la_SOURCES) +DIST_SOURCES = $(am__libffi_la_SOURCES_DIST) \ + $(EXTRA_libffi_la_SOURCES) \ + $(am__libffi_convenience_la_SOURCES_DIST) \ + $(EXTRA_libffi_convenience_la_SOURCES) +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(pkgconfig_DATA) +HEADERS = $(noinst_HEADERS) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope distdir dist dist-all distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ + $(LISP)fficonfig.h.in +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +CSCOPE = cscope +DIST_SUBDIRS = include testsuite man doc +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fficonfig.h.in \ + $(srcdir)/libffi.pc.in compile config.guess config.sub depcomp \ + install-sh ltmain.sh missing +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = +top_builddir = . +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign subdir-objects +ACLOCAL_AMFLAGS = -I m4 +SUBDIRS = include testsuite man $(am__append_1) +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) +MAKEOVERRIDES = +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la +libffi_la_SOURCES = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c $(am__append_2) +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +libffi_la_LIBADD = $(TARGET_OBJ) +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) +AM_CFLAGS = $(am__append_3) +libffi_version_script = +##libffi_version_script = -Wl,--version-script,libffi.map +##libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = +##libffi_version_dep = libffi.map +##libffi_version_dep = libffi.map-sun +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) +all: fficonfig.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +.SUFFIXES: .S .c .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: # $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +fficonfig.h: stamp-h1 + @test -f $@ || rm -f stamp-h1 + @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + +stamp-h1: $(srcdir)/fficonfig.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status fficonfig.h +$(srcdir)/fficonfig.h.in: # $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f fficonfig.h stamp-h1 +libffi.pc: $(top_builddir)/config.status $(srcdir)/libffi.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \ + } + +uninstall-toolexeclibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \ + done + +clean-toolexeclibLTLIBRARIES: + -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES) + @list='$(toolexeclib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } +src/$(am__dirstamp): + @$(MKDIR_P) src + @: > src/$(am__dirstamp) +src/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/$(DEPDIR) + @: > src/$(DEPDIR)/$(am__dirstamp) +src/prep_cif.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/types.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/java_raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/closures.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/aarch64/$(am__dirstamp): + @$(MKDIR_P) src/aarch64 + @: > src/aarch64/$(am__dirstamp) +src/aarch64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/aarch64/$(DEPDIR) + @: > src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/win64_armasm.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/alpha/$(am__dirstamp): + @$(MKDIR_P) src/alpha + @: > src/alpha/$(am__dirstamp) +src/alpha/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/alpha/$(DEPDIR) + @: > src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/ffi.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/osf.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/arc/$(am__dirstamp): + @$(MKDIR_P) src/arc + @: > src/arc/$(am__dirstamp) +src/arc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arc/$(DEPDIR) + @: > src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/ffi.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/arcompact.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arm/$(am__dirstamp): + @$(MKDIR_P) src/arm + @: > src/arm/$(am__dirstamp) +src/arm/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arm/$(DEPDIR) + @: > src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/ffi.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv_msvc_arm32.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/avr32/$(am__dirstamp): + @$(MKDIR_P) src/avr32 + @: > src/avr32/$(am__dirstamp) +src/avr32/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/avr32/$(DEPDIR) + @: > src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/ffi.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/sysv.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/bfin/$(am__dirstamp): + @$(MKDIR_P) src/bfin + @: > src/bfin/$(am__dirstamp) +src/bfin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/bfin/$(DEPDIR) + @: > src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/ffi.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/sysv.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/cris/$(am__dirstamp): + @$(MKDIR_P) src/cris + @: > src/cris/$(am__dirstamp) +src/cris/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/cris/$(DEPDIR) + @: > src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/ffi.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/sysv.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/frv/$(am__dirstamp): + @$(MKDIR_P) src/frv + @: > src/frv/$(am__dirstamp) +src/frv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/frv/$(DEPDIR) + @: > src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/ffi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/eabi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/ia64/$(am__dirstamp): + @$(MKDIR_P) src/ia64 + @: > src/ia64/$(am__dirstamp) +src/ia64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/ia64/$(DEPDIR) + @: > src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/ffi.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/unix.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/m32r/$(am__dirstamp): + @$(MKDIR_P) src/m32r + @: > src/m32r/$(am__dirstamp) +src/m32r/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m32r/$(DEPDIR) + @: > src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/ffi.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/sysv.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m68k/$(am__dirstamp): + @$(MKDIR_P) src/m68k + @: > src/m68k/$(am__dirstamp) +src/m68k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m68k/$(DEPDIR) + @: > src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/ffi.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/sysv.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m88k/$(am__dirstamp): + @$(MKDIR_P) src/m88k + @: > src/m88k/$(am__dirstamp) +src/m88k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m88k/$(DEPDIR) + @: > src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/ffi.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/obsd.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/metag/$(am__dirstamp): + @$(MKDIR_P) src/metag + @: > src/metag/$(am__dirstamp) +src/metag/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/metag/$(DEPDIR) + @: > src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/ffi.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/sysv.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/microblaze/$(am__dirstamp): + @$(MKDIR_P) src/microblaze + @: > src/microblaze/$(am__dirstamp) +src/microblaze/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/microblaze/$(DEPDIR) + @: > src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/ffi.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/sysv.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/mips/$(am__dirstamp): + @$(MKDIR_P) src/mips + @: > src/mips/$(am__dirstamp) +src/mips/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/mips/$(DEPDIR) + @: > src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/ffi.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/o32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/n32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/moxie/$(am__dirstamp): + @$(MKDIR_P) src/moxie + @: > src/moxie/$(am__dirstamp) +src/moxie/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/moxie/$(DEPDIR) + @: > src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/ffi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/eabi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/nios2/$(am__dirstamp): + @$(MKDIR_P) src/nios2 + @: > src/nios2/$(am__dirstamp) +src/nios2/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/nios2/$(DEPDIR) + @: > src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/ffi.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/sysv.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/or1k/$(am__dirstamp): + @$(MKDIR_P) src/or1k + @: > src/or1k/$(am__dirstamp) +src/or1k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/or1k/$(DEPDIR) + @: > src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/ffi.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/sysv.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/pa/$(am__dirstamp): + @$(MKDIR_P) src/pa + @: > src/pa/$(am__dirstamp) +src/pa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/pa/$(DEPDIR) + @: > src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/ffi.lo: src/pa/$(am__dirstamp) src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/linux.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/hpux32.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/powerpc/$(am__dirstamp): + @$(MKDIR_P) src/powerpc + @: > src/powerpc/$(am__dirstamp) +src/powerpc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/powerpc/$(DEPDIR) + @: > src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ppc_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/riscv/$(am__dirstamp): + @$(MKDIR_P) src/riscv + @: > src/riscv/$(am__dirstamp) +src/riscv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/riscv/$(DEPDIR) + @: > src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/ffi.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/sysv.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/s390/$(am__dirstamp): + @$(MKDIR_P) src/s390 + @: > src/s390/$(am__dirstamp) +src/s390/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/s390/$(DEPDIR) + @: > src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/ffi.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/sysv.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/sh/$(am__dirstamp): + @$(MKDIR_P) src/sh + @: > src/sh/$(am__dirstamp) +src/sh/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh/$(DEPDIR) + @: > src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/ffi.lo: src/sh/$(am__dirstamp) src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/sysv.lo: src/sh/$(am__dirstamp) \ + src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh64/$(am__dirstamp): + @$(MKDIR_P) src/sh64 + @: > src/sh64/$(am__dirstamp) +src/sh64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh64/$(DEPDIR) + @: > src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/ffi.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/sysv.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sparc/$(am__dirstamp): + @$(MKDIR_P) src/sparc + @: > src/sparc/$(am__dirstamp) +src/sparc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sparc/$(DEPDIR) + @: > src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi64.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v8.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v9.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/tile/$(am__dirstamp): + @$(MKDIR_P) src/tile + @: > src/tile/$(am__dirstamp) +src/tile/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/tile/$(DEPDIR) + @: > src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/ffi.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/tile.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/vax/$(am__dirstamp): + @$(MKDIR_P) src/vax + @: > src/vax/$(am__dirstamp) +src/vax/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/vax/$(DEPDIR) + @: > src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/ffi.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/elfbsd.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/x86/$(am__dirstamp): + @$(MKDIR_P) src/x86 + @: > src/x86/$(am__dirstamp) +src/x86/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/x86/$(DEPDIR) + @: > src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffiw64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/unix64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/xtensa/$(am__dirstamp): + @$(MKDIR_P) src/xtensa + @: > src/xtensa/$(am__dirstamp) +src/xtensa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/xtensa/$(DEPDIR) + @: > src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/ffi.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/sysv.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) + +libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES) $(EXTRA_libffi_la_DEPENDENCIES) + $(AM_V_CCLD)$(libffi_la_LINK) -rpath $(toolexeclibdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS) + +libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES) $(EXTRA_libffi_convenience_la_DEPENDENCIES) + $(AM_V_CCLD)$(LINK) $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f src/*.$(OBJEXT) + -rm -f src/*.lo + -rm -f src/aarch64/*.$(OBJEXT) + -rm -f src/aarch64/*.lo + -rm -f src/alpha/*.$(OBJEXT) + -rm -f src/alpha/*.lo + -rm -f src/arc/*.$(OBJEXT) + -rm -f src/arc/*.lo + -rm -f src/arm/*.$(OBJEXT) + -rm -f src/arm/*.lo + -rm -f src/avr32/*.$(OBJEXT) + -rm -f src/avr32/*.lo + -rm -f src/bfin/*.$(OBJEXT) + -rm -f src/bfin/*.lo + -rm -f src/cris/*.$(OBJEXT) + -rm -f src/cris/*.lo + -rm -f src/frv/*.$(OBJEXT) + -rm -f src/frv/*.lo + -rm -f src/ia64/*.$(OBJEXT) + -rm -f src/ia64/*.lo + -rm -f src/m32r/*.$(OBJEXT) + -rm -f src/m32r/*.lo + -rm -f src/m68k/*.$(OBJEXT) + -rm -f src/m68k/*.lo + -rm -f src/m88k/*.$(OBJEXT) + -rm -f src/m88k/*.lo + -rm -f src/metag/*.$(OBJEXT) + -rm -f src/metag/*.lo + -rm -f src/microblaze/*.$(OBJEXT) + -rm -f src/microblaze/*.lo + -rm -f src/mips/*.$(OBJEXT) + -rm -f src/mips/*.lo + -rm -f src/moxie/*.$(OBJEXT) + -rm -f src/moxie/*.lo + -rm -f src/nios2/*.$(OBJEXT) + -rm -f src/nios2/*.lo + -rm -f src/or1k/*.$(OBJEXT) + -rm -f src/or1k/*.lo + -rm -f src/pa/*.$(OBJEXT) + -rm -f src/pa/*.lo + -rm -f src/powerpc/*.$(OBJEXT) + -rm -f src/powerpc/*.lo + -rm -f src/riscv/*.$(OBJEXT) + -rm -f src/riscv/*.lo + -rm -f src/s390/*.$(OBJEXT) + -rm -f src/s390/*.lo + -rm -f src/sh/*.$(OBJEXT) + -rm -f src/sh/*.lo + -rm -f src/sh64/*.$(OBJEXT) + -rm -f src/sh64/*.lo + -rm -f src/sparc/*.$(OBJEXT) + -rm -f src/sparc/*.lo + -rm -f src/tile/*.$(OBJEXT) + -rm -f src/tile/*.lo + -rm -f src/vax/*.$(OBJEXT) + -rm -f src/vax/*.lo + -rm -f src/x86/*.$(OBJEXT) + -rm -f src/x86/*.lo + -rm -f src/xtensa/*.$(OBJEXT) + -rm -f src/xtensa/*.lo + +distclean-compile: + -rm -f *.tab.c + +#include src/$(DEPDIR)/closures.Plo +#include src/$(DEPDIR)/debug.Plo +#include src/$(DEPDIR)/java_raw_api.Plo +#include src/$(DEPDIR)/prep_cif.Plo +#include src/$(DEPDIR)/raw_api.Plo +#include src/$(DEPDIR)/types.Plo +#include src/aarch64/$(DEPDIR)/ffi.Plo +#include src/aarch64/$(DEPDIR)/sysv.Plo +#include src/aarch64/$(DEPDIR)/win64_armasm.Plo +#include src/alpha/$(DEPDIR)/ffi.Plo +#include src/alpha/$(DEPDIR)/osf.Plo +#include src/arc/$(DEPDIR)/arcompact.Plo +#include src/arc/$(DEPDIR)/ffi.Plo +#include src/arm/$(DEPDIR)/ffi.Plo +#include src/arm/$(DEPDIR)/sysv.Plo +#include src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo +#include src/avr32/$(DEPDIR)/ffi.Plo +#include src/avr32/$(DEPDIR)/sysv.Plo +#include src/bfin/$(DEPDIR)/ffi.Plo +#include src/bfin/$(DEPDIR)/sysv.Plo +#include src/cris/$(DEPDIR)/ffi.Plo +#include src/cris/$(DEPDIR)/sysv.Plo +#include src/frv/$(DEPDIR)/eabi.Plo +#include src/frv/$(DEPDIR)/ffi.Plo +#include src/ia64/$(DEPDIR)/ffi.Plo +#include src/ia64/$(DEPDIR)/unix.Plo +#include src/m32r/$(DEPDIR)/ffi.Plo +#include src/m32r/$(DEPDIR)/sysv.Plo +#include src/m68k/$(DEPDIR)/ffi.Plo +#include src/m68k/$(DEPDIR)/sysv.Plo +#include src/m88k/$(DEPDIR)/ffi.Plo +#include src/m88k/$(DEPDIR)/obsd.Plo +#include src/metag/$(DEPDIR)/ffi.Plo +#include src/metag/$(DEPDIR)/sysv.Plo +#include src/microblaze/$(DEPDIR)/ffi.Plo +#include src/microblaze/$(DEPDIR)/sysv.Plo +#include src/mips/$(DEPDIR)/ffi.Plo +#include src/mips/$(DEPDIR)/n32.Plo +#include src/mips/$(DEPDIR)/o32.Plo +#include src/moxie/$(DEPDIR)/eabi.Plo +#include src/moxie/$(DEPDIR)/ffi.Plo +#include src/nios2/$(DEPDIR)/ffi.Plo +#include src/nios2/$(DEPDIR)/sysv.Plo +#include src/or1k/$(DEPDIR)/ffi.Plo +#include src/or1k/$(DEPDIR)/sysv.Plo +#include src/pa/$(DEPDIR)/ffi.Plo +#include src/pa/$(DEPDIR)/hpux32.Plo +#include src/pa/$(DEPDIR)/linux.Plo +#include src/powerpc/$(DEPDIR)/aix.Plo +#include src/powerpc/$(DEPDIR)/aix_closure.Plo +#include src/powerpc/$(DEPDIR)/darwin.Plo +#include src/powerpc/$(DEPDIR)/darwin_closure.Plo +#include src/powerpc/$(DEPDIR)/ffi.Plo +#include src/powerpc/$(DEPDIR)/ffi_darwin.Plo +#include src/powerpc/$(DEPDIR)/ffi_linux64.Plo +#include src/powerpc/$(DEPDIR)/ffi_sysv.Plo +#include src/powerpc/$(DEPDIR)/linux64.Plo +#include src/powerpc/$(DEPDIR)/linux64_closure.Plo +#include src/powerpc/$(DEPDIR)/ppc_closure.Plo +#include src/powerpc/$(DEPDIR)/sysv.Plo +#include src/riscv/$(DEPDIR)/ffi.Plo +#include src/riscv/$(DEPDIR)/sysv.Plo +#include src/s390/$(DEPDIR)/ffi.Plo +#include src/s390/$(DEPDIR)/sysv.Plo +#include src/sh/$(DEPDIR)/ffi.Plo +#include src/sh/$(DEPDIR)/sysv.Plo +#include src/sh64/$(DEPDIR)/ffi.Plo +#include src/sh64/$(DEPDIR)/sysv.Plo +#include src/sparc/$(DEPDIR)/ffi.Plo +#include src/sparc/$(DEPDIR)/ffi64.Plo +#include src/sparc/$(DEPDIR)/v8.Plo +#include src/sparc/$(DEPDIR)/v9.Plo +#include src/tile/$(DEPDIR)/ffi.Plo +#include src/tile/$(DEPDIR)/tile.Plo +#include src/vax/$(DEPDIR)/elfbsd.Plo +#include src/vax/$(DEPDIR)/ffi.Plo +#include src/x86/$(DEPDIR)/ffi.Plo +#include src/x86/$(DEPDIR)/ffi64.Plo +#include src/x86/$(DEPDIR)/ffiw64.Plo +#include src/x86/$(DEPDIR)/sysv.Plo +#include src/x86/$(DEPDIR)/sysv_intel.Plo +#include src/x86/$(DEPDIR)/unix64.Plo +#include src/x86/$(DEPDIR)/win64.Plo +#include src/x86/$(DEPDIR)/win64_intel.Plo +#include src/xtensa/$(DEPDIR)/ffi.Plo +#include src/xtensa/$(DEPDIR)/sysv.Plo + +.S.o: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ $< + +.S.obj: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.S.lo: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CPPAS)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(LTCPPASCOMPILE) -c -o $@ $< + +.c.o: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ $< + +.c.obj: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CC)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + -rm -rf src/.libs src/_libs + -rm -rf src/aarch64/.libs src/aarch64/_libs + -rm -rf src/alpha/.libs src/alpha/_libs + -rm -rf src/arc/.libs src/arc/_libs + -rm -rf src/arm/.libs src/arm/_libs + -rm -rf src/avr32/.libs src/avr32/_libs + -rm -rf src/bfin/.libs src/bfin/_libs + -rm -rf src/cris/.libs src/cris/_libs + -rm -rf src/frv/.libs src/frv/_libs + -rm -rf src/ia64/.libs src/ia64/_libs + -rm -rf src/m32r/.libs src/m32r/_libs + -rm -rf src/m68k/.libs src/m68k/_libs + -rm -rf src/m88k/.libs src/m88k/_libs + -rm -rf src/metag/.libs src/metag/_libs + -rm -rf src/microblaze/.libs src/microblaze/_libs + -rm -rf src/mips/.libs src/mips/_libs + -rm -rf src/moxie/.libs src/moxie/_libs + -rm -rf src/nios2/.libs src/nios2/_libs + -rm -rf src/or1k/.libs src/or1k/_libs + -rm -rf src/pa/.libs src/pa/_libs + -rm -rf src/powerpc/.libs src/powerpc/_libs + -rm -rf src/riscv/.libs src/riscv/_libs + -rm -rf src/s390/.libs src/s390/_libs + -rm -rf src/sh/.libs src/sh/_libs + -rm -rf src/sh64/.libs src/sh64/_libs + -rm -rf src/sparc/.libs src/sparc/_libs + -rm -rf src/tile/.libs src/tile/_libs + -rm -rf src/vax/.libs src/vax/_libs + -rm -rf src/x86/.libs src/x86/_libs + -rm -rf src/xtensa/.libs src/xtensa/_libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) fficonfig.h +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f src/$(DEPDIR)/$(am__dirstamp) + -rm -f src/$(am__dirstamp) + -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/aarch64/$(am__dirstamp) + -rm -f src/alpha/$(DEPDIR)/$(am__dirstamp) + -rm -f src/alpha/$(am__dirstamp) + -rm -f src/arc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arc/$(am__dirstamp) + -rm -f src/arm/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arm/$(am__dirstamp) + -rm -f src/avr32/$(DEPDIR)/$(am__dirstamp) + -rm -f src/avr32/$(am__dirstamp) + -rm -f src/bfin/$(DEPDIR)/$(am__dirstamp) + -rm -f src/bfin/$(am__dirstamp) + -rm -f src/cris/$(DEPDIR)/$(am__dirstamp) + -rm -f src/cris/$(am__dirstamp) + -rm -f src/frv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/frv/$(am__dirstamp) + -rm -f src/ia64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/ia64/$(am__dirstamp) + -rm -f src/m32r/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m32r/$(am__dirstamp) + -rm -f src/m68k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m68k/$(am__dirstamp) + -rm -f src/m88k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m88k/$(am__dirstamp) + -rm -f src/metag/$(DEPDIR)/$(am__dirstamp) + -rm -f src/metag/$(am__dirstamp) + -rm -f src/microblaze/$(DEPDIR)/$(am__dirstamp) + -rm -f src/microblaze/$(am__dirstamp) + -rm -f src/mips/$(DEPDIR)/$(am__dirstamp) + -rm -f src/mips/$(am__dirstamp) + -rm -f src/moxie/$(DEPDIR)/$(am__dirstamp) + -rm -f src/moxie/$(am__dirstamp) + -rm -f src/nios2/$(DEPDIR)/$(am__dirstamp) + -rm -f src/nios2/$(am__dirstamp) + -rm -f src/or1k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/or1k/$(am__dirstamp) + -rm -f src/pa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/pa/$(am__dirstamp) + -rm -f src/powerpc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/powerpc/$(am__dirstamp) + -rm -f src/riscv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/riscv/$(am__dirstamp) + -rm -f src/s390/$(DEPDIR)/$(am__dirstamp) + -rm -f src/s390/$(am__dirstamp) + -rm -f src/sh/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh/$(am__dirstamp) + -rm -f src/sh64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh64/$(am__dirstamp) + -rm -f src/sparc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sparc/$(am__dirstamp) + -rm -f src/tile/$(DEPDIR)/$(am__dirstamp) + -rm -f src/tile/$(am__dirstamp) + -rm -f src/vax/$(DEPDIR)/$(am__dirstamp) + -rm -f src/vax/$(am__dirstamp) + -rm -f src/x86/$(DEPDIR)/$(am__dirstamp) + -rm -f src/x86/$(am__dirstamp) + -rm -f src/xtensa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/xtensa/$(am__dirstamp) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: install-toolexeclibLTLIBRARIES + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.MAKE: $(am__recursive_targets) all install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--refresh check check-am clean clean-cscope clean-generic \ + clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES cscope cscopelist-am ctags \ + ctags-am dist dist-all dist-bzip2 dist-gzip dist-hook \ + dist-lzip dist-shar dist-tarZ dist-xz dist-zip distcheck \ + distclean distclean-compile distclean-generic distclean-hdr \ + distclean-libtool distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkgconfigDATA install-ps \ + install-ps-am install-strip install-toolexeclibLTLIBRARIES \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.PRECIOUS: Makefile + +##libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ +## $(libffi_la_OBJECTS) $(libffi_la_LIBADD) +## perl $(top_srcdir)/make_sunver.pl libffi.map \ +## `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ +## sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ +## > $@ || (rm -f $@ ; exit 1) + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.log b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.log new file mode 100644 index 0000000..b4921c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.log @@ -0,0 +1,1851 @@ +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libffi configure 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure --disable-static --with-pic=yes --disable-dependency-tracking --disable-docs --host= + +## --------- ## +## Platform. ## +## --------- ## + +hostname = Arthurs-MacBook-Air.local +uname -m = arm64 +uname -r = 21.1.0 +uname -s = Darwin +uname -v = Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:24 PDT 2021; root:xnu-8019.41.5~1/RELEASE_ARM64_T8101 + +/usr/bin/uname -p = arm +/bin/uname -X = unknown + +/bin/arch = unknown +/usr/bin/arch -k = unknown +/usr/convex/getsysinfo = unknown +/usr/bin/hostinfo = Mach kernel version: + Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:24 PDT 2021; root:xnu-8019.41.5~1/RELEASE_ARM64_T8101 +Kernel configured for up to 8 processors. +8 processors are physically available. +8 processors are logically available. +Processor type: arm64e (ARM64E) +Processors active: 0 1 2 3 4 5 6 7 +Primary memory available: 16.00 gigabytes +Default processor set: 630 tasks, 2563 threads, 8 processors +Load average: 16.66, Mach factor: 1.78 +/bin/machine = unknown +/usr/bin/oslevel = unknown +/bin/universe = unknown + +PATH: /opt/homebrew/bin +PATH: /usr/local/bin +PATH: /usr/bin +PATH: /bin +PATH: /usr/sbin +PATH: /sbin +PATH: /usr/local/go/bin +PATH: /Library/Apple/usr/bin +PATH: /Users/arthur/go/bin + + +## ----------- ## +## Core tests. ## +## ----------- ## + +configure:2704: checking build system type +configure:2718: result: arm-apple-darwin21.1.0 +configure:2738: checking host system type +configure:2751: result: arm-apple-darwin21.1.0 +configure:2771: checking target system type +configure:2784: result: arm-apple-darwin21.1.0 +configure:2881: checking for gsed +configure:2912: result: sed +configure:2940: checking for a BSD-compatible install +configure:3008: result: /opt/homebrew/bin/ginstall -c +configure:3019: checking whether build environment is sane +configure:3074: result: yes +configure:3222: checking for a thread-safe mkdir -p +configure:3261: result: /opt/homebrew/bin/gmkdir -p +configure:3268: checking for gawk +configure:3298: result: no +configure:3268: checking for mawk +configure:3298: result: no +configure:3268: checking for nawk +configure:3298: result: no +configure:3268: checking for awk +configure:3284: found /usr/bin/awk +configure:3295: result: awk +configure:3306: checking whether make sets $(MAKE) +configure:3328: result: yes +configure:3357: checking whether make supports nested variables +configure:3374: result: yes +configure:3559: checking for gcc +configure:3586: result: xcrun clang +configure:3815: checking for C compiler version +configure:3824: xcrun clang --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.3) +Target: arm64-apple-darwin21.1.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:3835: $? = 0 +configure:3824: xcrun clang -v >&5 +Apple clang version 13.0.0 (clang-1300.0.29.3) +Target: arm64-apple-darwin21.1.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:3835: $? = 0 +configure:3824: xcrun clang -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:3835: $? = 1 +configure:3824: xcrun clang -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:3835: $? = 1 +configure:3855: checking whether the C compiler works +configure:3877: xcrun clang -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3881: $? = 0 +configure:3929: result: yes +configure:3932: checking for C compiler default output file name +configure:3934: result: a.out +configure:3940: checking for suffix of executables +configure:3947: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3951: $? = 0 +configure:3973: result: +configure:3995: checking whether we are cross compiling +configure:4003: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:4007: $? = 0 +configure:4014: ./conftest +configure:4018: $? = 0 +configure:4033: result: no +configure:4038: checking for suffix of object files +configure:4060: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4064: $? = 0 +configure:4085: result: o +configure:4089: checking whether we are using the GNU C compiler +configure:4108: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4108: $? = 0 +configure:4117: result: yes +configure:4126: checking whether xcrun clang accepts -g +configure:4146: xcrun clang -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4146: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4161: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4161: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4177: xcrun clang -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4177: $? = 0 +configure:4187: result: yes +configure:4204: checking for xcrun clang option to accept ISO C89 +configure:4267: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4267: $? = 0 +configure:4280: result: none needed +configure:4305: checking whether xcrun clang understands -c and -o together +configure:4327: xcrun clang -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4327: xcrun clang -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4342: result: yes +configure:4370: checking for style of include used by make +configure:4398: result: GNU +configure:4424: checking dependency style of xcrun clang +configure:4535: result: none +configure:4608: checking for g++ +configure:4624: found /usr/bin/g++ +configure:4635: result: g++ +configure:4662: checking for C++ compiler version +configure:4671: g++ --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.3) +Target: arm64-apple-darwin21.1.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +configure:4682: $? = 0 +configure:4671: g++ -v >&5 +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +Apple clang version 13.0.0 (clang-1300.0.29.3) +Target: arm64-apple-darwin21.1.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:4682: $? = 0 +configure:4671: g++ -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:4682: $? = 1 +configure:4671: g++ -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:4682: $? = 1 +configure:4686: checking whether we are using the GNU C++ compiler +configure:4705: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4705: $? = 0 +configure:4714: result: yes +configure:4723: checking whether g++ accepts -g +configure:4743: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4743: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4758: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4758: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4774: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4774: $? = 0 +configure:4784: result: yes +configure:4809: checking dependency style of g++ +configure:4920: result: none +configure:4950: checking dependency style of xcrun clang +configure:5059: result: none +configure:5121: checking how to print strings +configure:5148: result: printf +configure:5169: checking for a sed that does not truncate output +configure:5233: result: /usr/bin/sed +configure:5251: checking for grep that handles long lines and -e +configure:5309: result: /usr/bin/grep +configure:5314: checking for egrep +configure:5376: result: /usr/bin/grep -E +configure:5381: checking for fgrep +configure:5443: result: /usr/bin/grep -F +configure:5478: checking for ld used by xcrun clang +configure:5545: result: ld +configure:5552: checking if the linker (ld) is GNU ld +configure:5567: result: no +configure:5579: checking for BSD- or MS-compatible name lister (nm) +configure:5633: result: /usr/bin/nm -B +configure:5763: checking the name lister (/usr/bin/nm -B) interface +configure:5770: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:5773: /usr/bin/nm -B "conftest.o" +configure:5776: output +0000000000000000 S _some_variable +0000000000000000 t ltmp0 +0000000000000000 s ltmp1 +configure:5783: result: BSD nm +configure:5786: checking whether ln -s works +configure:5790: result: yes +configure:5798: checking the maximum length of command line arguments +configure:5929: result: 786432 +configure:5977: checking how to convert arm-apple-darwin21.1.0 file names to arm-apple-darwin21.1.0 format +configure:6017: result: func_convert_file_noop +configure:6024: checking how to convert arm-apple-darwin21.1.0 file names to toolchain format +configure:6044: result: func_convert_file_noop +configure:6051: checking for ld option to reload object files +configure:6058: result: -r +configure:6132: checking for objdump +configure:6148: found /usr/bin/objdump +configure:6159: result: objdump +configure:6191: checking how to recognize dependent libraries +configure:6391: result: pass_all +configure:6476: checking for dlltool +configure:6506: result: no +configure:6536: checking how to associate runtime and link libraries +configure:6563: result: printf %s\n +configure:6624: checking for ar +configure:6640: found /usr/bin/ar +configure:6651: result: ar +configure:6688: checking for archiver @FILE support +configure:6705: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:6705: $? = 0 +configure:6708: ar cru libconftest.a @conftest.lst >&5 +ar: @conftest.lst: No such file or directory +configure:6711: $? = 1 +configure:6731: result: no +configure:6789: checking for strip +configure:6805: found /usr/bin/strip +configure:6816: result: strip +configure:6888: checking for ranlib +configure:6904: found /usr/bin/ranlib +configure:6915: result: ranlib +configure:7017: checking command to parse /usr/bin/nm -B output from xcrun clang object +configure:7170: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*\([_A-Za-z][_A-Za-z0-9]*\)$/\1 \2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +cannot find nm_test_var in conftest.nm +configure:7170: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +configure:7246: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c conftstm.o >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:7249: $? = 0 +configure:7287: result: ok +configure:7334: checking for sysroot +configure:7364: result: no +configure:7371: checking for a working dd +configure:7409: result: /bin/dd +configure:7413: checking how to truncate binary pipes +configure:7428: result: /bin/dd bs=4096 count=1 +configure:7757: checking for mt +configure:7787: result: no +configure:7807: checking if : is a manifest tool +configure:7813: : '-?' +configure:7821: result: no +configure:7877: checking for dsymutil +configure:7893: found /usr/bin/dsymutil +configure:7904: result: dsymutil +configure:7969: checking for nmedit +configure:7985: found /usr/bin/nmedit +configure:7996: result: nmedit +configure:8061: checking for lipo +configure:8077: found /usr/bin/lipo +configure:8088: result: lipo +configure:8153: checking for otool +configure:8169: found /usr/bin/otool +configure:8180: result: otool +configure:8245: checking for otool64 +configure:8275: result: no +configure:8320: checking for -single_module linker flag +xcrun clang -L/usr/local/opt/libffi/lib -o libconftest.dylib -dynamiclib -Wl,-single_module conftest.c +configure:8353: result: yes +configure:8356: checking for -exported_symbols_list linker flag +configure:8376: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib -Wl,-exported_symbols_list,conftest.sym conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8376: $? = 0 +configure:8386: result: yes +configure:8389: checking for -force_load linker flag +xcrun clang -c -o conftest.o conftest.c +ar cru libconftest.a conftest.o +ranlib libconftest.a +xcrun clang -L/usr/local/opt/libffi/lib -o conftest conftest.c -Wl,-force_load,./libconftest.a +configure:8421: result: yes +configure:8498: checking how to run the C preprocessor +configure:8529: xcrun clang -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8529: $? = 0 +configure:8543: xcrun clang -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8543: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8568: result: xcrun clang -E +configure:8588: xcrun clang -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8588: $? = 0 +configure:8602: xcrun clang -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8602: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8631: checking for ANSI C header files +configure:8651: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8651: $? = 0 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8724: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8724: $? = 0 +configure:8724: ./conftest +configure:8724: $? = 0 +configure:8735: result: yes +configure:8748: checking for sys/types.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for sys/stat.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdlib.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for string.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for memory.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for strings.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for inttypes.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdint.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for unistd.h +configure:8748: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8762: checking for dlfcn.h +configure:8762: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8762: $? = 0 +configure:8762: result: yes +configure:9029: checking for objdir +configure:9044: result: .libs +configure:9308: checking if xcrun clang supports -fno-rtti -fno-exceptions +configure:9326: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-rtti -fno-exceptions conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9330: $? = 0 +configure:9343: result: yes +configure:9701: checking for xcrun clang option to produce PIC +configure:9708: result: -fno-common -DPIC +configure:9716: checking if xcrun clang PIC flag -fno-common -DPIC works +configure:9734: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9738: $? = 0 +configure:9751: result: yes +configure:9780: checking if xcrun clang static flag -static works +configure:9808: result: no +configure:9823: checking if xcrun clang supports -c -o file.o +configure:9844: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9848: $? = 0 +configure:9870: result: yes +configure:9878: checking if xcrun clang supports -c -o file.o +configure:9925: result: yes +configure:9958: checking whether the xcrun clang linker (ld) supports shared libraries +configure:11221: result: yes +configure:11461: checking dynamic linker characteristics +configure:12291: result: darwin21.1.0 dyld +configure:12413: checking how to hardcode library paths into programs +configure:12438: result: immediate +configure:12986: checking whether stripping libraries is possible +configure:13000: result: yes +configure:13026: checking if libtool supports shared libraries +configure:13028: result: yes +configure:13031: checking whether to build shared libraries +configure:13056: result: yes +configure:13059: checking whether to build static libraries +configure:13063: result: no +configure:13086: checking how to run the C++ preprocessor +configure:13113: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13113: $? = 0 +configure:13127: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13127: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13152: result: g++ -E +configure:13172: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13172: $? = 0 +configure:13186: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13186: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13348: checking for ld used by g++ +configure:13415: result: ld +configure:13422: checking if the linker (ld) is GNU ld +configure:13437: result: no +configure:13492: checking whether the g++ linker (ld) supports shared libraries +configure:14565: result: yes +configure:14601: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:14604: $? = 0 +configure:15085: checking for g++ option to produce PIC +configure:15092: result: -fno-common -DPIC +configure:15100: checking if g++ PIC flag -fno-common -DPIC works +configure:15118: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15122: $? = 0 +configure:15135: result: yes +configure:15158: checking if g++ static flag -static works +configure:15186: result: no +configure:15198: checking if g++ supports -c -o file.o +configure:15219: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15223: $? = 0 +configure:15245: result: yes +configure:15250: checking if g++ supports -c -o file.o +configure:15297: result: yes +configure:15327: checking whether the g++ linker (ld) supports shared libraries +configure:15370: result: yes +configure:15511: checking dynamic linker characteristics +configure:16268: result: darwin21.1.0 dyld +configure:16333: checking how to hardcode library paths into programs +configure:16358: result: immediate +configure:16426: checking size of size_t +configure:16431: xcrun clang -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:16431: $? = 0 +configure:16431: ./conftest +configure:16431: $? = 0 +configure:16445: result: 8 +configure:16456: checking for C compiler vendor +configure:16504: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__ICC) || defined(__ECC) || defined(__INTEL_COMPILER)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__xlc__) || defined(__xlC__) || defined(__IBMC__) || defined(__IBMCPP__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__PATHCC__) || defined(__PATHSCALE__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: xcrun clang -c -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:16504: $? = 0 +configure:16512: result: clang +configure:17357: checking CFLAGS for maximum warnings +configure:17377: xcrun clang -c -warn all -warn all -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +clang: error: unknown argument: '-warn' +clang: error: unknown argument: '-warn' +clang: error: no such file or directory: 'all' +clang: error: no such file or directory: 'all' +configure:17377: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:17377: xcrun clang -c -pedantic -Wall -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17377: $? = 0 +configure:17385: result: -Wall +configure:17405: : CFLAGS="$CFLAGS" +configure:17408: $? = 0 +configure:17445: checking whether to enable maintainer-specific portions of Makefiles +configure:17454: result: no +configure:17470: checking sys/mman.h usability +configure:17470: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17470: $? = 0 +configure:17470: result: yes +configure:17470: checking sys/mman.h presence +configure:17470: xcrun clang -E -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17470: $? = 0 +configure:17470: result: yes +configure:17470: checking for sys/mman.h +configure:17470: result: yes +configure:17483: checking for mmap +configure:17483: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17483: $? = 0 +configure:17483: result: yes +configure:17483: checking for mkostemp +configure:17483: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17483: $? = 0 +configure:17483: result: yes +configure:17493: checking for sys/mman.h +configure:17493: result: yes +configure:17501: checking for mmap +configure:17501: result: yes +configure:17514: checking whether read-only mmap of a plain file works +configure:17531: result: yes +configure:17533: checking whether mmap from /dev/zero works +configure:17555: result: no +configure:17559: checking for MAP_ANON(YMOUS) +configure:17582: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:38:5: warning: unused variable 'n' [-Wunused-variable] +int n = MAP_ANONYMOUS; + ^ +2 warnings generated. +configure:17582: $? = 0 +configure:17589: result: yes +configure:17595: checking whether mmap with MAP_ANON(YMOUS) works +configure:17612: result: yes +configure:17655: checking for ANSI C header files +configure:17759: result: yes +configure:17769: checking for memcpy +configure:17769: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:53:6: warning: incompatible redeclaration of library function 'memcpy' [-Wincompatible-library-redeclaration] +char memcpy (); + ^ +conftest.c:53:6: note: 'memcpy' is a builtin with type 'void *(void *, const void *, unsigned long)' +2 warnings generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17769: $? = 0 +configure:17769: result: yes +configure:17778: checking for size_t +configure:17778: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17778: $? = 0 +configure:17778: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:67:21: error: expected expression +if (sizeof ((size_t))) + ^ +1 warning and 1 error generated. +configure:17778: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| /* end confdefs.h. */ +| #include +| #ifdef HAVE_SYS_TYPES_H +| # include +| #endif +| #ifdef HAVE_SYS_STAT_H +| # include +| #endif +| #ifdef STDC_HEADERS +| # include +| # include +| #else +| # ifdef HAVE_STDLIB_H +| # include +| # endif +| #endif +| #ifdef HAVE_STRING_H +| # if !defined STDC_HEADERS && defined HAVE_MEMORY_H +| # include +| # endif +| # include +| #endif +| #ifdef HAVE_STRINGS_H +| # include +| #endif +| #ifdef HAVE_INTTYPES_H +| # include +| #endif +| #ifdef HAVE_STDINT_H +| # include +| #endif +| #ifdef HAVE_UNISTD_H +| # include +| #endif +| int +| main () +| { +| if (sizeof ((size_t))) +| return 0; +| ; +| return 0; +| } +configure:17778: result: yes +configure:17791: checking for working alloca.h +configure:17808: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17808: $? = 0 +configure:17816: result: yes +configure:17824: checking for alloca +configure:17861: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17861: $? = 0 +configure:17869: result: yes +configure:17980: checking size of double +configure:17985: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17985: $? = 0 +configure:17985: ./conftest +configure:17985: $? = 0 +configure:17999: result: 8 +configure:18013: checking size of long double +configure:18018: xcrun clang -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:18018: $? = 0 +configure:18018: ./conftest +configure:18018: $? = 0 +configure:18032: result: 8 +configure:18065: checking whether byte ordering is bigendian +configure:18080: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18080: $? = 0 +configure:18125: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18125: $? = 0 +configure:18143: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:42:4: error: use of undeclared identifier 'not' + not big endian + ^ +1 warning and 1 error generated. +configure:18143: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| #include +| #include +| +| int +| main () +| { +| #if BYTE_ORDER != BIG_ENDIAN +| not big endian +| #endif +| +| ; +| return 0; +| } +configure:18271: result: no +configure:18290: checking assembler .cfi pseudo-op support +configure:18308: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +:1:14: error: Expected an identifier +.cfi_sections + ^ +1 warning and 1 error generated. +configure:18308: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc"); +| int +| main () +| { +| +| ; +| return 0; +| } +configure:18316: result: no +configure:18451: checking whether compiler supports pointer authentication +configure:18479: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:47:3: error: Pointer authentication not supported +# error Pointer authentication not supported + ^ +1 warning and 1 error generated. +configure:18479: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #ifdef __clang__ +| # if __has_feature(ptrauth_calls) +| # define HAVE_PTRAUTH 1 +| # endif +| #endif +| +| #ifndef HAVE_PTRAUTH +| # error Pointer authentication not supported +| #endif +| +| ; +| return 0; +| } +configure:18487: result: no +configure:18506: checking for _ prefix in compiled symbols +configure:18516: xcrun clang -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:2:12: warning: expression result unused [-Wunused-value] +int main(){nm_test_func;return 0;} + ^~~~~~~~~~~~ +2 warnings generated. +configure:18519: $? = 0 +configure:18523: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:18526: $? = 0 +configure:18548: result: yes +configure:18623: checking whether .eh_frame section should be read-only +configure:18639: result: yes +configure:18654: checking for __attribute__((visibility("hidden"))) +configure:18663: xcrun clang -Werror -S conftest.c -o conftest.s 1>&5 +configure:18666: $? = 0 +configure:18675: result: yes +configure:18814: checking for ld used by xcrun clang +configure:18881: result: ld +configure:18888: checking if the linker (ld) is GNU ld +configure:18903: result: no +configure:19216: versioning on shared library symbols is no +configure:19337: checking that generated files are newer than configure +configure:19343: result: done +configure:19411: creating ./config.status + +## ---------------------- ## +## Running config.status. ## +## ---------------------- ## + +This file was extended by libffi config.status 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = + CONFIG_HEADERS = + CONFIG_LINKS = + CONFIG_COMMANDS = + $ ./config.status + +on Arthurs-MacBook-Air.local + +config.status:1221: creating include/Makefile +config.status:1221: creating include/ffi.h +config.status:1221: creating Makefile +config.status:1221: creating testsuite/Makefile +config.status:1221: creating man/Makefile +config.status:1221: creating doc/Makefile +config.status:1221: creating libffi.pc +config.status:1221: creating fficonfig.h +config.status:1435: executing buildir commands +config.status:1448: skipping top_srcdir/Makefile - not created +config.status:1435: executing depfiles commands +config.status:1435: executing libtool commands +config.status:1435: executing include commands +config.status:1435: executing src commands + +## ---------------- ## +## Cache variables. ## +## ---------------- ## + +ac_cv_build=arm-apple-darwin21.1.0 +ac_cv_c_bigendian=no +ac_cv_c_compiler_gnu=yes +ac_cv_cflags_warn_all=-Wall +ac_cv_cxx_compiler_gnu=yes +ac_cv_decl_map_anon=yes +ac_cv_env_CCASFLAGS_set= +ac_cv_env_CCASFLAGS_value= +ac_cv_env_CCAS_set= +ac_cv_env_CCAS_value= +ac_cv_env_CPPFLAGS_set=set +ac_cv_env_CPPFLAGS_value='-DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +ac_cv_env_CPP_set= +ac_cv_env_CPP_value= +ac_cv_env_CXXCPP_set= +ac_cv_env_CXXCPP_value= +ac_cv_env_LT_SYS_LIBRARY_PATH_set= +ac_cv_env_LT_SYS_LIBRARY_PATH_value= +ac_cv_env_build_alias_set= +ac_cv_env_build_alias_value= +ac_cv_env_host_alias_set=set +ac_cv_env_host_alias_value= +ac_cv_env_target_alias_set= +ac_cv_env_target_alias_value= +ac_cv_func_alloca_works=yes +ac_cv_func_memcpy=yes +ac_cv_func_mkostemp=yes +ac_cv_func_mmap=yes +ac_cv_func_mmap_anon=yes +ac_cv_func_mmap_dev_zero=no +ac_cv_func_mmap_file=yes +ac_cv_header_dlfcn_h=yes +ac_cv_header_inttypes_h=yes +ac_cv_header_memory_h=yes +ac_cv_header_stdc=yes +ac_cv_header_stdint_h=yes +ac_cv_header_stdlib_h=yes +ac_cv_header_string_h=yes +ac_cv_header_strings_h=yes +ac_cv_header_sys_mman_h=yes +ac_cv_header_sys_stat_h=yes +ac_cv_header_sys_types_h=yes +ac_cv_header_unistd_h=yes +ac_cv_host=arm-apple-darwin21.1.0 +ac_cv_objext=o +ac_cv_path_EGREP='/usr/bin/grep -E' +ac_cv_path_FGREP='/usr/bin/grep -F' +ac_cv_path_GREP=/usr/bin/grep +ac_cv_path_SED=/usr/bin/sed +ac_cv_path_ax_enable_builddir_sed=sed +ac_cv_path_install='/opt/homebrew/bin/ginstall -c' +ac_cv_path_lt_DD=/bin/dd +ac_cv_path_mkdir=/opt/homebrew/bin/gmkdir +ac_cv_prog_AWK=awk +ac_cv_prog_CPP='xcrun clang -E' +ac_cv_prog_CXXCPP='g++ -E' +ac_cv_prog_ac_ct_AR=ar +ac_cv_prog_ac_ct_CC='xcrun clang' +ac_cv_prog_ac_ct_CXX=g++ +ac_cv_prog_ac_ct_DSYMUTIL=dsymutil +ac_cv_prog_ac_ct_LIPO=lipo +ac_cv_prog_ac_ct_NMEDIT=nmedit +ac_cv_prog_ac_ct_OBJDUMP=objdump +ac_cv_prog_ac_ct_OTOOL=otool +ac_cv_prog_ac_ct_RANLIB=ranlib +ac_cv_prog_ac_ct_STRIP=strip +ac_cv_prog_cc_c89= +ac_cv_prog_cc_g=yes +ac_cv_prog_cxx_g=yes +ac_cv_prog_make_make_set=yes +ac_cv_sizeof_double=8 +ac_cv_sizeof_long_double=8 +ac_cv_sizeof_size_t=8 +ac_cv_target=arm-apple-darwin21.1.0 +ac_cv_type_size_t=yes +ac_cv_working_alloca_h=yes +am_cv_CCAS_dependencies_compiler_type=none +am_cv_CC_dependencies_compiler_type=none +am_cv_CXX_dependencies_compiler_type=none +am_cv_make_support_nested_variables=yes +am_cv_prog_cc_c_o=yes +ax_cv_c_compiler_vendor=clang +gcc_cv_as_cfi_pseudo_op=no +libffi_cv_as_ptrauth=no +libffi_cv_hidden_visibility_attribute=yes +libffi_cv_ro_eh_frame=yes +lt_cv_apple_cc_single_mod=yes +lt_cv_ar_at_file=no +lt_cv_deplibs_check_method=pass_all +lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_ld_exported_symbols_list=yes +lt_cv_ld_force_load=yes +lt_cv_ld_reload_flag=-r +lt_cv_nm_interface='BSD nm' +lt_cv_objdir=.libs +lt_cv_path_LD=ld +lt_cv_path_LDCXX=ld +lt_cv_path_NM='/usr/bin/nm -B' +lt_cv_path_mainfest_tool=no +lt_cv_prog_compiler_c_o=yes +lt_cv_prog_compiler_c_o_CXX=yes +lt_cv_prog_compiler_pic='-fno-common -DPIC' +lt_cv_prog_compiler_pic_CXX='-fno-common -DPIC' +lt_cv_prog_compiler_pic_works=yes +lt_cv_prog_compiler_pic_works_CXX=yes +lt_cv_prog_compiler_rtti_exceptions=yes +lt_cv_prog_compiler_static_works=no +lt_cv_prog_compiler_static_works_CXX=no +lt_cv_prog_gnu_ld=no +lt_cv_prog_gnu_ldcxx=no +lt_cv_sharedlib_from_linklib_cmd='printf %s\n' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import= +lt_cv_sys_max_cmd_len=786432 +lt_cv_sys_symbol_underscore=yes +lt_cv_to_host_file_cmd=func_convert_file_noop +lt_cv_to_tool_file_cmd=func_convert_file_noop +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' + +## ----------------- ## +## Output variables. ## +## ----------------- ## + +ACLOCAL='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15' +ALLOCA='' +AMDEPBACKSLASH='' +AMDEP_FALSE='' +AMDEP_TRUE='#' +AMTAR='$${TAR-tar}' +AM_BACKSLASH='\' +AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +AM_DEFAULT_VERBOSITY='1' +AM_LTLDFLAGS='' +AM_RUNTESTFLAGS='' +AM_V='$(V)' +AR='ar' +AUTOCONF='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf' +AUTOHEADER='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader' +AUTOMAKE='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15' +AWK='awk' +BUILD_DOCS_FALSE='' +BUILD_DOCS_TRUE='#' +CC='xcrun clang' +CCAS='xcrun clang' +CCASDEPMODE='depmode=none' +CCASFLAGS='' +CCDEPMODE='depmode=none' +CFLAGS=' -Wall -fexceptions' +CPP='xcrun clang -E' +CPPFLAGS='-DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +CXX='g++' +CXXCPP='g++ -E' +CXXDEPMODE='depmode=none' +CXXFLAGS='-g -O2' +CYGPATH_W='echo' +DEFS='-DHAVE_CONFIG_H' +DEPDIR='.deps' +DLLTOOL='false' +DSYMUTIL='dsymutil' +DUMPBIN='' +ECHO_C='\c' +ECHO_N='' +ECHO_T='' +EGREP='/usr/bin/grep -E' +EXEEXT='' +FFI_DEBUG_FALSE='' +FFI_DEBUG_TRUE='#' +FFI_EXEC_TRAMPOLINE_TABLE='1' +FFI_EXEC_TRAMPOLINE_TABLE_FALSE='#' +FFI_EXEC_TRAMPOLINE_TABLE_TRUE='' +FGREP='/usr/bin/grep -F' +GREP='/usr/bin/grep' +HAVE_LONG_DOUBLE='0' +HAVE_LONG_DOUBLE_VARIANT='0' +INSTALL_DATA='${INSTALL} -m 644' +INSTALL_PROGRAM='${INSTALL}' +INSTALL_SCRIPT='${INSTALL}' +INSTALL_STRIP_PROGRAM='$(install_sh) -c -s' +LD='ld' +LDFLAGS='-L/usr/local/opt/libffi/lib' +LIBFFI_BUILD_VERSIONED_SHLIB_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_TRUE='#' +LIBOBJS='' +LIBS='' +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +LIPO='lipo' +LN_S='ln -s' +LTLIBOBJS='' +LT_SYS_LIBRARY_PATH='' +MAINT='#' +MAINTAINER_MODE_FALSE='' +MAINTAINER_MODE_TRUE='#' +MAKEINFO='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo' +MANIFEST_TOOL=':' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +NM='/usr/bin/nm -B' +NMEDIT='nmedit' +OBJDUMP='objdump' +OBJEXT='o' +OPT_LDFLAGS='' +OTOOL64=':' +OTOOL='otool' +PACKAGE='libffi' +PACKAGE_BUGREPORT='http://github.com/libffi/libffi/issues' +PACKAGE_NAME='libffi' +PACKAGE_STRING='libffi 3.3' +PACKAGE_TARNAME='libffi' +PACKAGE_URL='' +PACKAGE_VERSION='3.3' +PATH_SEPARATOR=':' +PRTDIAG='' +RANLIB='ranlib' +SECTION_LDFLAGS='' +SED='/usr/bin/sed' +SET_MAKE='' +SHELL='/bin/sh' +STRIP='strip' +TARGET='ARM' +TARGETDIR='arm' +TARGET_OBJ=' src/arm/ffi.lo src/arm/sysv.lo' +TESTSUBDIR_FALSE='#' +TESTSUBDIR_TRUE='' +VERSION='3.3' +ac_ct_AR='ar' +ac_ct_CC='xcrun clang' +ac_ct_CXX='g++' +ac_ct_DUMPBIN='' +am__EXEEXT_FALSE='' +am__EXEEXT_TRUE='#' +am__fastdepCCAS_FALSE='' +am__fastdepCCAS_TRUE='#' +am__fastdepCC_FALSE='' +am__fastdepCC_TRUE='#' +am__fastdepCXX_FALSE='' +am__fastdepCXX_TRUE='#' +am__include='include' +am__isrc=' -I$(srcdir)' +am__leading_dot='.' +am__nodep='' +am__quote='' +am__tar='$${TAR-tar} chof - "$$tardir"' +am__untar='$${TAR-tar} xf -' +ax_enable_builddir_sed='sed' +bindir='${exec_prefix}/bin' +build='arm-apple-darwin21.1.0' +build_alias='' +build_cpu='arm' +build_os='darwin21.1.0' +build_vendor='apple' +datadir='${datarootdir}' +datarootdir='${prefix}/share' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +dvidir='${docdir}' +exec_prefix='${prefix}' +host='arm-apple-darwin21.1.0' +host_alias='' +host_cpu='arm' +host_os='darwin21.1.0' +host_vendor='apple' +htmldir='${docdir}' +includedir='${prefix}/include' +infodir='${datarootdir}/info' +install_sh='${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh' +libdir='${exec_prefix}/lib' +libexecdir='${exec_prefix}/libexec' +localedir='${datarootdir}/locale' +localstatedir='${prefix}/var' +mandir='${datarootdir}/man' +mkdir_p='$(MKDIR_P)' +oldincludedir='/usr/include' +pdfdir='${docdir}' +prefix='/usr/local' +program_transform_name='s,x,x,' +psdir='${docdir}' +runstatedir='${localstatedir}/run' +sbindir='${exec_prefix}/sbin' +sharedstatedir='${prefix}/com' +sys_symbol_underscore='yes' +sysconfdir='${prefix}/etc' +target='arm-apple-darwin21.1.0' +target_alias='' +target_cpu='arm' +target_os='darwin21.1.0' +target_vendor='apple' +toolexecdir='${libdir}/gcc-lib/$(target_alias)' +toolexeclibdir='${libdir}' + +## ----------- ## +## confdefs.h. ## +## ----------- ## + +/* confdefs.h */ +#define PACKAGE_NAME "libffi" +#define PACKAGE_TARNAME "libffi" +#define PACKAGE_VERSION "3.3" +#define PACKAGE_STRING "libffi 3.3" +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +#define PACKAGE_URL "" +#define PACKAGE "libffi" +#define VERSION "3.3" +#define STDC_HEADERS 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_STRINGS_H 1 +#define HAVE_INTTYPES_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_DLFCN_H 1 +#define LT_OBJDIR ".libs/" +#define SIZEOF_SIZE_T 8 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_MMAP 1 +#define HAVE_MKOSTEMP 1 +#define HAVE_MMAP_FILE 1 +#define HAVE_MMAP_ANON 1 +#define STDC_HEADERS 1 +#define HAVE_MEMCPY 1 +#define HAVE_ALLOCA_H 1 +#define HAVE_ALLOCA 1 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_LONG_DOUBLE 8 +#define SYMBOL_UNDERSCORE 1 +#define FFI_EXEC_TRAMPOLINE_TABLE 1 +#define HAVE_RO_EH_FRAME 1 +#define EH_FRAME_FLAGS "a" +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +configure: exit 0 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.status b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.status new file mode 100755 index 0000000..f661589 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/config.status @@ -0,0 +1,2398 @@ +#! /bin/sh +# Generated by configure. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libffi $as_me 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +# Files that config.status was made for. +config_files=" include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc" +config_headers=" fficonfig.h" +config_commands=" buildir depfiles libtool include src" + +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +ac_cs_config="'--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=' 'host_alias=' 'CPPFLAGS=-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT '" +ac_cs_version="\ +libffi config.status 3.3 +configured by /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure, generated by GNU Autoconf 2.69, + with options \"$ac_cs_config\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21' +srcdir='/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi' +INSTALL='/opt/homebrew/bin/ginstall -c' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +AWK='awk' +test -n "$AWK" || AWK=awk +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +if $ac_cs_recheck; then + set X /bin/sh '/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure' '--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=' 'host_alias=' 'CPPFLAGS=-DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' $ac_configure_extra_args --no-create --no-recursion + shift + $as_echo "running CONFIG_SHELL=/bin/sh $*" >&6 + CONFIG_SHELL='/bin/sh' + export CONFIG_SHELL + exec "$@" +fi + +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +# +# INIT-COMMANDS +# +ax_enable_builddir_srcdir="/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi" # /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ax_enable_builddir_host="" # / arm-apple-darwin21.1.0 +ax_enable_builddir_version="3.3" # 3.3 +ax_enable_builddir_package="libffi" # libffi +ax_enable_builddir_auxdir="/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi" # +ax_enable_builddir_sed="sed" # /usr/bin/sed +ax_enable_builddir="." # + +AMDEP_TRUE="#" ac_aux_dir="/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +double_quote_subst='s/\(["`\\]\)/\\\1/g' +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +macro_version='2.4.6' +macro_revision='2.4.6' +enable_shared='yes' +enable_static='no' +pic_mode='yes' +enable_fast_install='needless' +shared_archive_member_spec='' +SHELL='/bin/sh' +ECHO='printf %s\n' +PATH_SEPARATOR=':' +host_alias='' +host='arm-apple-darwin21.1.0' +host_os='darwin21.1.0' +build_alias='' +build='arm-apple-darwin21.1.0' +build_os='darwin21.1.0' +SED='/usr/bin/sed' +Xsed='/usr/bin/sed -e 1s/^X//' +GREP='/usr/bin/grep' +EGREP='/usr/bin/grep -E' +FGREP='/usr/bin/grep -F' +LD='ld' +NM='/usr/bin/nm -B' +LN_S='ln -s' +max_cmd_len='786432' +ac_objext='o' +exeext='' +lt_unset='unset' +lt_SP2NL='tr \040 \012' +lt_NL2SP='tr \015\012 \040\040' +lt_cv_to_host_file_cmd='func_convert_file_noop' +lt_cv_to_tool_file_cmd='func_convert_file_noop' +reload_flag=' -r' +reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +OBJDUMP='objdump' +deplibs_check_method='pass_all' +file_magic_cmd='$MAGIC_CMD' +file_magic_glob='' +want_nocaseglob='no' +DLLTOOL='false' +sharedlib_from_linklib_cmd='printf %s\n' +AR='ar' +AR_FLAGS='cru' +archiver_list_spec='' +STRIP='strip' +RANLIB='ranlib' +old_postinstall_cmds='chmod 644 $oldlib~$RANLIB $tool_oldlib' +old_postuninstall_cmds='' +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +lock_old_archive_extraction='yes' +CC='xcrun clang' +CFLAGS=' -Wall -fexceptions' +compiler='g++' +GCC='yes' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import='' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_nm_interface='BSD nm' +nm_file_list_spec='@' +lt_sysroot='' +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' +objdir='.libs' +MAGIC_CMD='file' +lt_prog_compiler_no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions' +lt_prog_compiler_pic=' -fno-common -DPIC' +lt_prog_compiler_wl='-Wl,' +lt_prog_compiler_static='' +lt_cv_prog_compiler_c_o='yes' +need_locks='no' +MANIFEST_TOOL=':' +DSYMUTIL='dsymutil' +NMEDIT='nmedit' +LIPO='lipo' +OTOOL='otool' +OTOOL64=':' +libext='a' +shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +extract_expsyms_cmds='' +archive_cmds_need_lc='no' +enable_shared_with_static_runtimes='no' +export_dynamic_flag_spec='' +whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object='no' +old_archive_from_new_cmds='' +old_archive_from_expsyms_cmds='' +archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld='no' +allow_undefined_flag='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag='' +hardcode_libdir_flag_spec='' +hardcode_libdir_separator='' +hardcode_direct='no' +hardcode_direct_absolute='no' +hardcode_minus_L='no' +hardcode_shlibpath_var='unsupported' +hardcode_automatic='yes' +inherit_rpath='no' +link_all_deplibs='yes' +always_export_symbols='no' +export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms='' +prelink_cmds='' +postlink_cmds='' +file_list_spec='' +variables_saved_for_relink='PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH' +need_lib_prefix='no' +need_version='no' +version_type='darwin' +runpath_var='' +shlibpath_var='DYLD_LIBRARY_PATH' +shlibpath_overrides_runpath='yes' +libname_spec='lib$name' +library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +soname_spec='$libname$release$major$shared_ext' +install_override_mode='' +postinstall_cmds='' +postuninstall_cmds='' +finish_cmds='' +finish_eval='' +hardcode_into_libs='no' +sys_lib_search_path_spec='/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib' +configure_time_dlsearch_path='/usr/local/lib /lib /usr/lib' +configure_time_lt_sys_library_path='' +hardcode_action='immediate' +enable_dlopen='unknown' +enable_dlopen_self='unknown' +enable_dlopen_self_static='unknown' +old_striplib='strip -S' +striplib='strip -x' +compiler_lib_search_dirs='' +predep_objects='' +postdep_objects='' +predeps='' +postdeps='' +compiler_lib_search_path='' +LD_CXX='ld' +reload_flag_CXX=' -r' +reload_cmds_CXX='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +old_archive_cmds_CXX='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +compiler_CXX='g++' +GCC_CXX='yes' +lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +lt_prog_compiler_pic_CXX=' -fno-common -DPIC' +lt_prog_compiler_wl_CXX='-Wl,' +lt_prog_compiler_static_CXX='' +lt_cv_prog_compiler_c_o_CXX='yes' +archive_cmds_need_lc_CXX='no' +enable_shared_with_static_runtimes_CXX='no' +export_dynamic_flag_spec_CXX='' +whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object_CXX='no' +old_archive_from_new_cmds_CXX='' +old_archive_from_expsyms_cmds_CXX='' +archive_cmds_CXX='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds_CXX='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds_CXX='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld_CXX='no' +allow_undefined_flag_CXX='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag_CXX='' +hardcode_libdir_flag_spec_CXX='' +hardcode_libdir_separator_CXX='' +hardcode_direct_CXX='no' +hardcode_direct_absolute_CXX='no' +hardcode_minus_L_CXX='no' +hardcode_shlibpath_var_CXX='unsupported' +hardcode_automatic_CXX='yes' +inherit_rpath_CXX='no' +link_all_deplibs_CXX='yes' +always_export_symbols_CXX='no' +export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms_CXX='' +prelink_cmds_CXX='' +postlink_cmds_CXX='' +file_list_spec_CXX='' +hardcode_action_CXX='immediate' +compiler_lib_search_dirs_CXX='' +predep_objects_CXX='' +postdep_objects_CXX='' +predeps_CXX='' +postdeps_CXX='' +compiler_lib_search_path_CXX='' + +LTCC='xcrun clang' +LTCFLAGS='' +compiler='xcrun clang' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL ECHO PATH_SEPARATOR SED GREP EGREP FGREP LD NM LN_S lt_SP2NL lt_NL2SP reload_flag OBJDUMP deplibs_check_method file_magic_cmd file_magic_glob want_nocaseglob DLLTOOL sharedlib_from_linklib_cmd AR AR_FLAGS archiver_list_spec STRIP RANLIB CC CFLAGS compiler lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl lt_cv_sys_global_symbol_to_import lt_cv_sys_global_symbol_to_c_name_address lt_cv_sys_global_symbol_to_c_name_address_lib_prefix lt_cv_nm_interface nm_file_list_spec lt_cv_truncate_bin lt_prog_compiler_no_builtin_flag lt_prog_compiler_pic lt_prog_compiler_wl lt_prog_compiler_static lt_cv_prog_compiler_c_o need_locks MANIFEST_TOOL DSYMUTIL NMEDIT LIPO OTOOL OTOOL64 shrext_cmds export_dynamic_flag_spec whole_archive_flag_spec compiler_needs_object with_gnu_ld allow_undefined_flag no_undefined_flag hardcode_libdir_flag_spec hardcode_libdir_separator exclude_expsyms include_expsyms file_list_spec variables_saved_for_relink libname_spec library_names_spec soname_spec install_override_mode finish_eval old_striplib striplib compiler_lib_search_dirs predep_objects postdep_objects predeps postdeps compiler_lib_search_path LD_CXX reload_flag_CXX compiler_CXX lt_prog_compiler_no_builtin_flag_CXX lt_prog_compiler_pic_CXX lt_prog_compiler_wl_CXX lt_prog_compiler_static_CXX lt_cv_prog_compiler_c_o_CXX export_dynamic_flag_spec_CXX whole_archive_flag_spec_CXX compiler_needs_object_CXX with_gnu_ld_CXX allow_undefined_flag_CXX no_undefined_flag_CXX hardcode_libdir_flag_spec_CXX hardcode_libdir_separator_CXX exclude_expsyms_CXX include_expsyms_CXX file_list_spec_CXX compiler_lib_search_dirs_CXX predep_objects_CXX postdep_objects_CXX predeps_CXX postdeps_CXX compiler_lib_search_path_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED \"\$sed_quote_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds old_postinstall_cmds old_postuninstall_cmds old_archive_cmds extract_expsyms_cmds old_archive_from_new_cmds old_archive_from_expsyms_cmds archive_cmds archive_expsym_cmds module_cmds module_expsym_cmds export_symbols_cmds prelink_cmds postlink_cmds postinstall_cmds postuninstall_cmds finish_cmds sys_lib_search_path_spec configure_time_dlsearch_path configure_time_lt_sys_library_path reload_cmds_CXX old_archive_cmds_CXX old_archive_from_new_cmds_CXX old_archive_from_expsyms_cmds_CXX archive_cmds_CXX archive_expsym_cmds_CXX module_cmds_CXX module_expsym_cmds_CXX export_symbols_cmds_CXX prelink_cmds_CXX postlink_cmds_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +ac_aux_dir='/Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='libffi' + VERSION='3.3' + RM='rm -f' + ofile='libtool' + + + + + +TARGETDIR="arm" + + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fficonfig.h") CONFIG_HEADERS="$CONFIG_HEADERS fficonfig.h" ;; + "buildir") CONFIG_COMMANDS="$CONFIG_COMMANDS buildir" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "include") CONFIG_COMMANDS="$CONFIG_COMMANDS include" ;; + "src") CONFIG_COMMANDS="$CONFIG_COMMANDS src" ;; + "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; + "include/ffi.h") CONFIG_FILES="$CONFIG_FILES include/ffi.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;; + "man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "libffi.pc") CONFIG_FILES="$CONFIG_FILES libffi.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +cat >>"$ac_tmp/subs1.awk" <<\_ACAWK && +S["am__EXEEXT_FALSE"]="" +S["am__EXEEXT_TRUE"]="#" +S["LTLIBOBJS"]="" +S["LIBOBJS"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_TRUE"]="#" +S["OPT_LDFLAGS"]="" +S["SECTION_LDFLAGS"]="" +S["toolexeclibdir"]="${libdir}" +S["toolexecdir"]="${libdir}/gcc-lib/$(target_alias)" +S["FFI_DEBUG_FALSE"]="" +S["FFI_DEBUG_TRUE"]="#" +S["TARGET_OBJ"]=" src/arm/ffi.lo src/arm/sysv.lo" +S["TARGETDIR"]="arm" +S["TARGET"]="ARM" +S["BUILD_DOCS_FALSE"]="" +S["BUILD_DOCS_TRUE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE"]="1" +S["FFI_EXEC_TRAMPOLINE_TABLE_FALSE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE_TRUE"]="" +S["sys_symbol_underscore"]="yes" +S["HAVE_LONG_DOUBLE_VARIANT"]="0" +S["HAVE_LONG_DOUBLE"]="0" +S["ALLOCA"]="" +S["AM_LTLDFLAGS"]="" +S["AM_RUNTESTFLAGS"]="" +S["TESTSUBDIR_FALSE"]="#" +S["TESTSUBDIR_TRUE"]="" +S["MAINT"]="#" +S["MAINTAINER_MODE_FALSE"]="" +S["MAINTAINER_MODE_TRUE"]="#" +S["PRTDIAG"]="" +S["CXXCPP"]="g++ -E" +S["CPP"]="xcrun clang -E" +S["LT_SYS_LIBRARY_PATH"]="" +S["OTOOL64"]=":" +S["OTOOL"]="otool" +S["LIPO"]="lipo" +S["NMEDIT"]="nmedit" +S["DSYMUTIL"]="dsymutil" +S["MANIFEST_TOOL"]=":" +S["RANLIB"]="ranlib" +S["ac_ct_AR"]="ar" +S["AR"]="ar" +S["DLLTOOL"]="false" +S["OBJDUMP"]="objdump" +S["LN_S"]="ln -s" +S["NM"]="/usr/bin/nm -B" +S["ac_ct_DUMPBIN"]="" +S["DUMPBIN"]="" +S["LD"]="ld" +S["FGREP"]="/usr/bin/grep -F" +S["EGREP"]="/usr/bin/grep -E" +S["GREP"]="/usr/bin/grep" +S["SED"]="/usr/bin/sed" +S["LIBTOOL"]="$(SHELL) $(top_builddir)/libtool" +S["am__fastdepCCAS_FALSE"]="" +S["am__fastdepCCAS_TRUE"]="#" +S["CCASDEPMODE"]="depmode=none" +S["CCASFLAGS"]="" +S["CCAS"]="xcrun clang" +S["am__fastdepCXX_FALSE"]="" +S["am__fastdepCXX_TRUE"]="#" +S["CXXDEPMODE"]="depmode=none" +S["ac_ct_CXX"]="g++" +S["CXXFLAGS"]="-g -O2" +S["CXX"]="g++" +S["am__fastdepCC_FALSE"]="" +S["am__fastdepCC_TRUE"]="#" +S["CCDEPMODE"]="depmode=none" +S["am__nodep"]="" +S["AMDEPBACKSLASH"]="" +S["AMDEP_FALSE"]="" +S["AMDEP_TRUE"]="#" +S["am__quote"]="" +S["am__include"]="include" +S["DEPDIR"]=".deps" +S["OBJEXT"]="o" +S["EXEEXT"]="" +S["ac_ct_CC"]="xcrun clang" +S["CPPFLAGS"]="-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT " +S["LDFLAGS"]="-L/usr/local/opt/libffi/lib" +S["CFLAGS"]=" -Wall -fexceptions" +S["CC"]="xcrun clang" +S["AM_BACKSLASH"]="\\" +S["AM_DEFAULT_VERBOSITY"]="1" +S["AM_DEFAULT_V"]="$(AM_DEFAULT_VERBOSITY)" +S["AM_V"]="$(V)" +S["am__untar"]="$${TAR-tar} xf -" +S["am__tar"]="$${TAR-tar} chof - \"$$tardir\"" +S["AMTAR"]="$${TAR-tar}" +S["am__leading_dot"]="." +S["SET_MAKE"]="" +S["AWK"]="awk" +S["mkdir_p"]="$(MKDIR_P)" +S["MKDIR_P"]="/opt/homebrew/bin/gmkdir -p" +S["INSTALL_STRIP_PROGRAM"]="$(install_sh) -c -s" +S["STRIP"]="strip" +S["install_sh"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh" +S["MAKEINFO"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo" +S["AUTOHEADER"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader" +S["AUTOMAKE"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15" +S["AUTOCONF"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf" +S["ACLOCAL"]="${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15" +S["VERSION"]="3.3" +S["PACKAGE"]="libffi" +S["CYGPATH_W"]="echo" +S["am__isrc"]=" -I$(srcdir)" +S["INSTALL_DATA"]="${INSTALL} -m 644" +S["INSTALL_SCRIPT"]="${INSTALL}" +S["INSTALL_PROGRAM"]="${INSTALL}" +S["ax_enable_builddir_sed"]="sed" +S["target_os"]="darwin21.1.0" +S["target_vendor"]="apple" +S["target_cpu"]="arm" +S["target"]="arm-apple-darwin21.1.0" +S["host_os"]="darwin21.1.0" +S["host_vendor"]="apple" +S["host_cpu"]="arm" +S["host"]="arm-apple-darwin21.1.0" +S["build_os"]="darwin21.1.0" +S["build_vendor"]="apple" +S["build_cpu"]="arm" +S["build"]="arm-apple-darwin21.1.0" +S["target_alias"]="" +S["host_alias"]="" +S["build_alias"]="" +S["LIBS"]="" +S["ECHO_T"]="" +S["ECHO_N"]="" +S["ECHO_C"]="\\c" +S["DEFS"]="-DHAVE_CONFIG_H" +S["mandir"]="${datarootdir}/man" +S["localedir"]="${datarootdir}/locale" +S["libdir"]="${exec_prefix}/lib" +S["psdir"]="${docdir}" +S["pdfdir"]="${docdir}" +S["dvidir"]="${docdir}" +S["htmldir"]="${docdir}" +S["infodir"]="${datarootdir}/info" +S["docdir"]="${datarootdir}/doc/${PACKAGE_TARNAME}" +S["oldincludedir"]="/usr/include" +S["includedir"]="${prefix}/include" +S["runstatedir"]="${localstatedir}/run" +S["localstatedir"]="${prefix}/var" +S["sharedstatedir"]="${prefix}/com" +S["sysconfdir"]="${prefix}/etc" +S["datadir"]="${datarootdir}" +S["datarootdir"]="${prefix}/share" +S["libexecdir"]="${exec_prefix}/libexec" +S["sbindir"]="${exec_prefix}/sbin" +S["bindir"]="${exec_prefix}/bin" +S["program_transform_name"]="s,x,x," +S["prefix"]="/usr/local" +S["exec_prefix"]="${prefix}" +S["PACKAGE_URL"]="" +S["PACKAGE_BUGREPORT"]="http://github.com/libffi/libffi/issues" +S["PACKAGE_STRING"]="libffi 3.3" +S["PACKAGE_VERSION"]="3.3" +S["PACKAGE_TARNAME"]="libffi" +S["PACKAGE_NAME"]="libffi" +S["PATH_SEPARATOR"]=":" +S["SHELL"]="/bin/sh" +_ACAWK +cat >>"$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +D["PACKAGE_NAME"]=" \"libffi\"" +D["PACKAGE_TARNAME"]=" \"libffi\"" +D["PACKAGE_VERSION"]=" \"3.3\"" +D["PACKAGE_STRING"]=" \"libffi 3.3\"" +D["PACKAGE_BUGREPORT"]=" \"http://github.com/libffi/libffi/issues\"" +D["PACKAGE_URL"]=" \"\"" +D["PACKAGE"]=" \"libffi\"" +D["VERSION"]=" \"3.3\"" +D["STDC_HEADERS"]=" 1" +D["HAVE_SYS_TYPES_H"]=" 1" +D["HAVE_SYS_STAT_H"]=" 1" +D["HAVE_STDLIB_H"]=" 1" +D["HAVE_STRING_H"]=" 1" +D["HAVE_MEMORY_H"]=" 1" +D["HAVE_STRINGS_H"]=" 1" +D["HAVE_INTTYPES_H"]=" 1" +D["HAVE_STDINT_H"]=" 1" +D["HAVE_UNISTD_H"]=" 1" +D["HAVE_DLFCN_H"]=" 1" +D["LT_OBJDIR"]=" \".libs/\"" +D["SIZEOF_SIZE_T"]=" 8" +D["HAVE_SYS_MMAN_H"]=" 1" +D["HAVE_MMAP"]=" 1" +D["HAVE_MKOSTEMP"]=" 1" +D["HAVE_MMAP_FILE"]=" 1" +D["HAVE_MMAP_ANON"]=" 1" +D["STDC_HEADERS"]=" 1" +D["HAVE_MEMCPY"]=" 1" +D["HAVE_ALLOCA_H"]=" 1" +D["HAVE_ALLOCA"]=" 1" +D["SIZEOF_DOUBLE"]=" 8" +D["SIZEOF_LONG_DOUBLE"]=" 8" +D["SYMBOL_UNDERSCORE"]=" 1" +D["FFI_EXEC_TRAMPOLINE_TABLE"]=" 1" +D["HAVE_RO_EH_FRAME"]=" 1" +D["EH_FRAME_FLAGS"]=" \"a\"" +D["HAVE_HIDDEN_VISIBILITY_ATTRIBUTE"]=" 1" + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*([\t (]|$)/ { + line = $ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + ac_datarootdir_hack=' + s&@datadir@&${datarootdir}&g + s&@docdir@&${datarootdir}/doc/${PACKAGE_TARNAME}&g + s&@infodir@&${datarootdir}/info&g + s&@localedir@&${datarootdir}/locale&g + s&@mandir@&${datarootdir}/man&g + s&\${datarootdir}&${prefix}/share&g' ;; +esac +ac_sed_extra=" + +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "buildir":C) ac_top_srcdir="$ax_enable_builddir_srcdir" + if test ".$ax_enable_builddir" = ".." ; then + if test -f "$top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - left untouched" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - left untouched" >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - not created" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - not created" >&6;} + fi + else + if test -f "$ac_top_srcdir/Makefile" ; then + a=`grep "^VERSION " "$ac_top_srcdir/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$ac_top_srcdir/Makefile" + fi + if test -f "$ac_top_srcdir/Makefile" ; then + echo "$ac_top_srcdir/Makefile : $ac_top_srcdir/Makefile.in" > $tmp/conftemp.mk + echo " @ echo 'REMOVED,,,' >\$@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$ac_top_srcdir/Makefile" >/dev/null + then rm $ac_top_srcdir/Makefile ; fi + cp $tmp/conftemp.mk $ac_top_srcdir/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$ac_top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: create top_srcdir/Makefile guessed from local Makefile" >&5 +$as_echo "$as_me: create top_srcdir/Makefile guessed from local Makefile" >&6;} + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[ ]*[\\#]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[:=]/!d +/^\\./d +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/ \\1 \\1-all\\2/g +s/^\\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/\\1 \\1-all\\2/ +s/ / /g +/^all all-all[ :]/i\\ +all-configured : all-all +s/ [a-zA-Z0-9-]*-all [a-zA-Z0-9-]*-all-all//g +/-all-all/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +/dist-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/dist-[a-zA-Z0-9]*-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/distclean-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefile.sed~" ## DEBUGGING + $ax_enable_builddir_sed -f $tmp/conftemp.sed Makefile >$ac_top_srcdir/Makefile + if test -f "$ac_top_srcdir/Makefile.mk" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&5 +$as_echo "$as_me: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&6;} + cat $ac_top_srcdir/Makefile.mk >>$ac_top_srcdir/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$ac_top_srcdir/Makefile + # sanity check + if grep '^; echo "MAKE ' $ac_top_srcdir/Makefile >/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: buggy sed found - it deletes tab in \"a\" text parts" >&5 +$as_echo "$as_me: buggy sed found - it deletes tab in \"a\" text parts" >&6;} + $ax_enable_builddir_sed -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $ac_top_srcdir/Makefile \ + >$ac_top_srcdir/Makefile~ + (test -s $ac_top_srcdir/Makefile~ && mv $ac_top_srcdir/Makefile~ $ac_top_srcdir/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [^|]* | *$ax_enable_builddir *\$!$xxxx ...... $ax_enable_builddir!" >$tmp/conftemp.sed + $ax_enable_builddir_sed -f "$tmp/conftemp.sed" "$ac_top_srcdir/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$ac_top_srcdir/makefiles.out~" ## DEBUGGING + if cmp -s "$ac_top_srcdir/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: keeping top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: keeping top_srcdir/Makefile from earlier configure" >&6;} + rm "$tmp/mkfile.tmp" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: reusing top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: reusing top_srcdir/Makefile from earlier configure" >&6;} + mv "$tmp/mkfile.tmp" "$ac_top_srcdir/Makefile" + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&5 +$as_echo "$as_me: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&6;} + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$ax_enable_builddir" >>$ac_top_srcdir/Makefile + fi + ;; + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named 'Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running 'make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "$am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "include":C) test -d include || mkdir include ;; + "src":C) +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/fficonfig.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/fficonfig.h new file mode 100644 index 0000000..3589fe5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/fficonfig.h @@ -0,0 +1,218 @@ +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "a" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#define FFI_EXEC_TRAMPOLINE_TABLE 1 + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +/* #undef FFI_MMAP_EXEC_WRIT */ + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +/* #undef HAVE_AS_CFI_PSEUDO_OP */ + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +/* #undef HAVE_LONG_DOUBLE */ + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +#define HAVE_MKOSTEMP 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if your compiler supports pointer authentication. */ +/* #undef HAVE_PTRAUTH */ + +/* Define if .eh_frame sections should be read-only. */ +#define HAVE_RO_EH_FRAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile new file mode 100644 index 0000000..baf5368 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile @@ -0,0 +1,605 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# include/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = ffi.h +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" +HEADERS = $(nodist_include_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ffi.h.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +DISTCLEANFILES = ffitarget.h +noinst_HEADERS = ffi_common.h ffi_cfi.h +EXTRA_DIST = ffi.h.in +nodist_include_HEADERS = ffi.h ffitarget.h +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +ffi.h: $(top_builddir)/config.status $(srcdir)/ffi.h.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nodist_includeHEADERS: $(nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-nodist_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-nodist_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man \ + install-nodist_includeHEADERS install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile 2 new file mode 100644 index 0000000..baf5368 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/Makefile 2 @@ -0,0 +1,605 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# include/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = ffi.h +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" +HEADERS = $(nodist_include_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ffi.h.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +DISTCLEANFILES = ffitarget.h +noinst_HEADERS = ffi_common.h ffi_cfi.h +EXTRA_DIST = ffi.h.in +nodist_include_HEADERS = ffi.h ffitarget.h +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +ffi.h: $(top_builddir)/config.status $(srcdir)/ffi.h.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nodist_includeHEADERS: $(nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-nodist_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-nodist_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man \ + install-nodist_includeHEADERS install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffi.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffi.h new file mode 100644 index 0000000..b876025 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffi.h @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3 - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef ARM +#define ARM +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 0 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 0 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 0 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffitarget.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/include/ffitarget.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi.pc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi.pc new file mode 100644 index 0000000..1f9d5ff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi.pc @@ -0,0 +1,11 @@ +prefix=/usr/local +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +toolexeclibdir=${libdir} +includedir=${prefix}/include + +Name: libffi +Description: Library supporting Foreign Function Interfaces +Version: 3.3 +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi_convenience.la b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi_convenience.la new file mode 100644 index 0000000..9055742 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libffi_convenience.la @@ -0,0 +1,41 @@ +# libffi_convenience.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='' + +# Names of this library. +library_names='' + +# The name of the static archive. +old_library='libffi_convenience.a' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L/usr/local/opt/libffi/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libffi_convenience. +current= +age= +revision= + +# Is this an already installed library? +installed=no + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='' diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libtool b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libtool new file mode 100755 index 0000000..781b779 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/libtool @@ -0,0 +1,11813 @@ +#! /bin/sh +# Generated automatically by config.status (libffi) 3.3 +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: ${LT_SYS_LIBRARY_PATH=""} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=2.4.6 +macro_revision=2.4.6 + +# Whether or not to build shared libraries. +build_libtool_libs=yes + +# Whether or not to build static libraries. +build_old_libs=no + +# What type of objects to build. +pic_mode=yes + +# Whether or not to optimize for fast installation. +fast_install=needless + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec= + +# Shell to use when invoking shell scripts. +SHELL="/bin/sh" + +# An echo program that protects backslashes. +ECHO="printf %s\\n" + +# The PATH separator for the build system. +PATH_SEPARATOR=":" + +# The host system. +host_alias= +host=arm-apple-darwin21.1.0 +host_os=darwin21.1.0 + +# The build system. +build_alias= +build=arm-apple-darwin21.1.0 +build_os=darwin21.1.0 + +# A sed program that does not truncate output. +SED="/usr/bin/sed" + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP="/usr/bin/grep" + +# An ERE matcher. +EGREP="/usr/bin/grep -E" + +# A literal string matcher. +FGREP="/usr/bin/grep -F" + +# A BSD- or MS-compatible name lister. +NM="/usr/bin/nm -B" + +# Whether we need soft or hard links. +LN_S="ln -s" + +# What is the maximum length of a command? +max_cmd_len=786432 + +# Object file suffix (normally "o"). +objext=o + +# Executable file suffix (normally ""). +exeext= + +# whether the shell understands "unset". +lt_unset=unset + +# turn spaces into newlines. +SP2NL="tr \\040 \\012" + +# turn newlines into spaces. +NL2SP="tr \\015\\012 \\040\\040" + +# convert $build file names to $host format. +to_host_file_cmd=func_convert_file_noop + +# convert $build files to toolchain format. +to_tool_file_cmd=func_convert_file_noop + +# An object symbol dumper. +OBJDUMP="objdump" + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method="pass_all" + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd="\$MAGIC_CMD" + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob="" + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob="no" + +# DLL creation program. +DLLTOOL="false" + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd="printf %s\\n" + +# The archiver. +AR="ar" + +# Flags to create an archive. +AR_FLAGS="cru" + +# How to feed a file listing to the archiver. +archiver_list_spec="" + +# A symbol stripping program. +STRIP="strip" + +# Commands used to install an old-style archive. +RANLIB="ranlib" +old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$tool_oldlib" +old_postuninstall_cmds="" + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=yes + +# A C compiler. +LTCC="xcrun clang" + +# LTCC compiler flags. +LTCFLAGS=" -Wall -fexceptions" + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe="sed -n -e 's/^.*[ ]\\([BCDEGRST][BCDEGRST]*\\)[ ][ ]*_\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 _\\2 \\2/p' | sed '/ __gnu_lto/d'" + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl="sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/extern char \\1;/p'" + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import="" + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p'" + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(lib.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"lib\\1\", (void *) \\&\\1},/p'" + +# The name lister interface. +nm_interface="BSD nm" + +# Specify filename containing input files for $NM. +nm_file_list_spec="@" + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot= + +# Command to truncate a binary pipe. +lt_truncate_bin="/bin/dd bs=4096 count=1" + +# The name of the directory that contains temporary libtool files. +objdir=.libs + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=file + +# Must we lock files when doing compilation? +need_locks="no" + +# Manifest tool. +MANIFEST_TOOL=":" + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL="dsymutil" + +# Tool to change global to local symbols on Mac OS X. +NMEDIT="nmedit" + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO="lipo" + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL="otool" + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=":" + +# Old archive suffix (normally "a"). +libext=a + +# Shared library suffix (normally ".so"). +shrext_cmds="\`test .\$module = .yes && echo .so || echo .dylib\`" + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds="" + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink="PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + +# Do we need the "lib" prefix for modules? +need_lib_prefix=no + +# Do we need a version for libraries? +need_version=no + +# Library versioning type. +version_type=darwin + +# Shared library runtime path variable. +runpath_var= + +# Shared library path variable. +shlibpath_var=DYLD_LIBRARY_PATH + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=yes + +# Format of library name prefix. +libname_spec="lib\$name" + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec="\$libname\$release\$major\$shared_ext \$libname\$shared_ext" + +# The coded name of the library, if different from the real name. +soname_spec="\$libname\$release\$major\$shared_ext" + +# Permission mode override for installation of shared libraries. +install_override_mode="" + +# Command to use after installation of a shared archive. +postinstall_cmds="" + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds="" + +# Commands used to finish a libtool library installation in a directory. +finish_cmds="" + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval="" + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=no + +# Compile-time system search path for libraries. +sys_lib_search_path_spec="/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib" + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec="/usr/local/lib /lib /usr/lib" + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path="" + +# Whether dlopen is supported. +dlopen_support=unknown + +# Whether dlopen of programs is supported. +dlopen_self=unknown + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=unknown + +# Commands to strip libraries. +old_striplib="strip -S" +striplib="strip -x" + + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="xcrun clang" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin -fno-rtti -fno-exceptions" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL CONFIG + + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +#! /bin/sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2014-01-03.01 + +# libtool (GNU libtool) 2.4.6 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.6 Debian-2.4.6-2" +package_revision=2.4.6 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2015-01-20.17; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# Copyright (C) 2004-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. + +# As a special exception to the GNU General Public License, if you distribute +# this file as part of a program or library that is built using GNU Libtool, +# you may include this file under the same distribution terms that you use +# for the rest of that program. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1+=\\ \$func_quote_for_eval_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1=\$$1\\ \$func_quote_for_eval_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_for_eval ARG... +# -------------------------- +# Aesthetically quote ARGs to be evaled later. +# This function returns two values: +# i) func_quote_for_eval_result +# double-quoted, suitable for a subsequent eval +# ii) func_quote_for_eval_unquoted_result +# has all characters that are still active within double +# quotes backslashified. +func_quote_for_eval () +{ + $debug_cmd + + func_quote_for_eval_unquoted_result= + func_quote_for_eval_result= + while test 0 -lt $#; do + case $1 in + *[\\\`\"\$]*) + _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; + *) + _G_unquoted_arg=$1 ;; + esac + if test -n "$func_quote_for_eval_unquoted_result"; then + func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" + else + func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" + fi + + case $_G_unquoted_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_quoted_arg=\"$_G_unquoted_arg\" + ;; + *) + _G_quoted_arg=$_G_unquoted_arg + ;; + esac + + if test -n "$func_quote_for_eval_result"; then + func_append func_quote_for_eval_result " $_G_quoted_arg" + else + func_append func_quote_for_eval_result "$_G_quoted_arg" + fi + shift + done +} + + +# func_quote_for_expand ARG +# ------------------------- +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + $debug_cmd + + case $1 in + *[\\\`\"]*) + _G_arg=`$ECHO "$1" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; + *) + _G_arg=$1 ;; + esac + + case $_G_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_arg=\"$_G_arg\" + ;; + esac + + func_quote_for_expand_result=$_G_arg +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_for_expand "$_G_cmd" + eval "func_notquiet $func_quote_for_expand_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_for_expand "$_G_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# Set a version string for this script. +scriptversion=2014-01-07.03; # UTC + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# Copyright (C) 2010-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# warranty; '. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# to the main code. A hook is just a named list of of function, that can +# be run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of functions called by FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It is assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook funcions.n" ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + eval $_G_hook '"$@"' + + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + done + + func_quote_for_eval ${1+"$@"} + func_run_hooks_result=$func_quote_for_eval_result +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list in your hook function, remove any +# options that you action, and then pass back the remaining unprocessed +# options in '_result', escaped suitably for +# 'eval'. Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# +# func_quote_for_eval ${1+"$@"} +# my_options_prep_result=$func_quote_for_eval_result +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# # Note that for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# ;; +# *) set dummy "$_G_opt" "$*"; shift; break ;; +# esac +# done +# +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# +# func_quote_for_eval ${1+"$@"} +# my_option_validation_result=$func_quote_for_eval_result +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll alse need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + func_options_prep ${1+"$@"} + eval func_parse_options \ + ${func_options_prep_result+"$func_options_prep_result"} + eval func_validate_options \ + ${func_parse_options_result+"$func_parse_options_result"} + + eval func_run_hooks func_options \ + ${func_validate_options_result+"$func_validate_options_result"} + + # save modified positional parameters for caller + func_options_result=$func_run_hooks_result +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propogate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before +# returning. +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + func_run_hooks func_options_prep ${1+"$@"} + + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + func_parse_options_result= + + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + func_run_hooks func_parse_options ${1+"$@"} + + # Adjust func_parse_options positional parameters to match + eval set dummy "$func_run_hooks_result"; shift + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + test $# = 0 && func_missing_arg $_G_opt && break + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + func_run_hooks func_validate_options ${1+"$@"} + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE + + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables after +# splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + test "x$func_split_equals_lhs" = "x$1" \ + && func_split_equals_rhs= + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /(C)/!b go + :more + /\./!{ + N + s|\n# | | + b more + } + :go + /^# Written by /,/# warranty; / { + s|^# || + s|^# *$|| + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + p + } + /^# Written by / { + s|^# || + p + } + /^warranty; /q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.6' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname $scriptversion Debian-2.4.6-2 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func__fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + esac + + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote_for_eval ${1+"$@"} + libtool_validate_options_result=$func_quote_for_eval_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type '$version_type'" + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="g++" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL TAG CONFIG: CXX diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/local.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/local.exp new file mode 100644 index 0000000..4889cba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/local.exp @@ -0,0 +1,2 @@ +set CC_FOR_TARGET "xcrun clang" +set CXX_FOR_TARGET "g++" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile new file mode 100644 index 0000000..4f21cc2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile @@ -0,0 +1,559 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# man/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = man +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man3dir = $(mandir)/man3 +am__installdirs = "$(DESTDIR)$(man3dir)" +NROFF = nroff +MANS = $(man_MANS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign man/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign man/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-man3: $(man_MANS) + @$(NORMAL_INSTALL) + @list1=''; \ + list2='$(man_MANS)'; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list=''; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ + sed -n '/\.3[a-z]*$$/p'; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(MANS) +installdirs: + for dir in "$(DESTDIR)$(man3dir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-man + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man3 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-man + +uninstall-man: uninstall-man3 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-man3 install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags-am uninstall uninstall-am uninstall-man \ + uninstall-man3 + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile 2 new file mode 100644 index 0000000..4f21cc2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man/Makefile 2 @@ -0,0 +1,559 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# man/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = man +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man3dir = $(mandir)/man3 +am__installdirs = "$(DESTDIR)$(man3dir)" +NROFF = nroff +MANS = $(man_MANS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/man +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign man/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign man/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-man3: $(man_MANS) + @$(NORMAL_INSTALL) + @list1=''; \ + list2='$(man_MANS)'; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list=''; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ + sed -n '/\.3[a-z]*$$/p'; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(MANS) +installdirs: + for dir in "$(DESTDIR)$(man3dir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-man + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man3 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-man + +uninstall-man: uninstall-man3 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-man3 install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags-am uninstall uninstall-am uninstall-man \ + uninstall-man3 + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.deps/.dirstamp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.deps/.dirstamp 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.deps/.dirstamp 2 new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.dirstamp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.deps/.dirstamp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.deps/.dirstamp 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.deps/.dirstamp 2 new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.dirstamp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi 2.lo new file mode 100644 index 0000000..45384d2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi 2.lo @@ -0,0 +1,12 @@ +# src/arm/ffi.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/ffi.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi.lo new file mode 100644 index 0000000..45384d2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/ffi.lo @@ -0,0 +1,12 @@ +# src/arm/ffi.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/ffi.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv 2.lo new file mode 100644 index 0000000..74823df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv 2.lo @@ -0,0 +1,12 @@ +# src/arm/sysv.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/sysv.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv.lo new file mode 100644 index 0000000..74823df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/arm/sysv.lo @@ -0,0 +1,12 @@ +# src/arm/sysv.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/sysv.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures 2.lo new file mode 100644 index 0000000..613cbdf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures 2.lo @@ -0,0 +1,12 @@ +# src/closures.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/closures.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures.lo new file mode 100644 index 0000000..613cbdf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/closures.lo @@ -0,0 +1,12 @@ +# src/closures.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/closures.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api 2.lo new file mode 100644 index 0000000..0b99622 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api 2.lo @@ -0,0 +1,12 @@ +# src/java_raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/java_raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api.lo new file mode 100644 index 0000000..0b99622 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/java_raw_api.lo @@ -0,0 +1,12 @@ +# src/java_raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/java_raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif 2.lo new file mode 100644 index 0000000..1ab7932 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif 2.lo @@ -0,0 +1,12 @@ +# src/prep_cif.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/prep_cif.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif.lo new file mode 100644 index 0000000..1ab7932 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/prep_cif.lo @@ -0,0 +1,12 @@ +# src/prep_cif.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/prep_cif.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api 2.lo new file mode 100644 index 0000000..e7e6231 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api 2.lo @@ -0,0 +1,12 @@ +# src/raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api.lo new file mode 100644 index 0000000..e7e6231 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/raw_api.lo @@ -0,0 +1,12 @@ +# src/raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types 2.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types 2.lo new file mode 100644 index 0000000..40b7aec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types 2.lo @@ -0,0 +1,12 @@ +# src/types.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/types.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types.lo b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types.lo new file mode 100644 index 0000000..40b7aec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/src/types.lo @@ -0,0 +1,12 @@ +# src/types.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/types.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 new file mode 100644 index 0000000..e43c201 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 @@ -0,0 +1 @@ +timestamp for fficonfig.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 2 new file mode 100644 index 0000000..e43c201 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/stamp-h1 2 @@ -0,0 +1 @@ +timestamp for fficonfig.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile new file mode 100644 index 0000000..68b98e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile @@ -0,0 +1,645 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# testsuite/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = testsuite +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DEJATOOL = $(PACKAGE) +RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir +EXPECT = expect +RUNTEST = runtest +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign dejagnu +EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp +CLEANFILES = *.exe core* *.log *.sum +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign testsuite/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +check-DEJAGNU: site.exp + srcdir='$(srcdir)'; export srcdir; \ + EXPECT=$(EXPECT); export EXPECT; \ + if $(SHELL) -c "$(RUNTEST) --version" > /dev/null 2>&1; then \ + exit_status=0; l='$(DEJATOOL)'; for tool in $$l; do \ + if $(RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \ + then :; else exit_status=1; fi; \ + done; \ + else echo "WARNING: could not find '$(RUNTEST)'" 1>&2; :;\ + fi; \ + exit $$exit_status +site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG) + @echo 'Making a new site.exp file ...' + @echo '## these variables are automatically generated by make ##' >site.tmp + @echo '# Do not edit here. If you wish to override these values' >>site.tmp + @echo '# edit the last section' >>site.tmp + @echo 'set srcdir "$(srcdir)"' >>site.tmp + @echo "set objdir `pwd`" >>site.tmp + @echo 'set build_alias "$(build_alias)"' >>site.tmp + @echo 'set build_triplet $(build_triplet)' >>site.tmp + @echo 'set host_alias "$(host_alias)"' >>site.tmp + @echo 'set host_triplet $(host_triplet)' >>site.tmp + @echo 'set target_alias "$(target_alias)"' >>site.tmp + @echo 'set target_triplet $(target_triplet)' >>site.tmp + @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \ + echo "## Begin content included from file $$f. Do not modify. ##" \ + && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \ + && echo "## End content included from file $$f. ##" \ + || exit 1; \ + done >> site.tmp + @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp + @if test -f site.exp; then \ + sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \ + fi + @-rm -f site.bak + @test ! -f site.exp || mv site.exp site.bak + @mv site.tmp site.exp + +distclean-DEJAGNU: + -rm -f site.exp site.bak + -l='$(DEJATOOL)'; for tool in $$l; do \ + rm -f $$tool.sum $$tool.log; \ + done + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-DEJAGNU distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am distclean \ + distclean-DEJAGNU distclean-generic distclean-libtool distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile 2 new file mode 100644 index 0000000..68b98e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite/Makefile 2 @@ -0,0 +1,645 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# testsuite/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.1.0 +host_triplet = arm-apple-darwin21.1.0 +target_triplet = arm-apple-darwin21.1.0 +subdir = testsuite +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DEJATOOL = $(PACKAGE) +RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir +EXPECT = expect +RUNTEST = runtest +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = xcrun clang +CCAS = xcrun clang +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = xcrun clang -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21/testsuite +abs_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +abs_top_builddir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi-universal-darwin21 +abs_top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = xcrun clang +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.1.0 +build_alias = +build_cpu = arm +build_os = darwin21.1.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.1.0 +host_alias = +host_cpu = arm +host_os = darwin21.1.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.1.0 +target_alias = +target_cpu = arm +target_os = darwin21.1.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Documents/zephyr/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign dejagnu +EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp +CLEANFILES = *.exe core* *.log *.sum +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign testsuite/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +check-DEJAGNU: site.exp + srcdir='$(srcdir)'; export srcdir; \ + EXPECT=$(EXPECT); export EXPECT; \ + if $(SHELL) -c "$(RUNTEST) --version" > /dev/null 2>&1; then \ + exit_status=0; l='$(DEJATOOL)'; for tool in $$l; do \ + if $(RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \ + then :; else exit_status=1; fi; \ + done; \ + else echo "WARNING: could not find '$(RUNTEST)'" 1>&2; :;\ + fi; \ + exit $$exit_status +site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG) + @echo 'Making a new site.exp file ...' + @echo '## these variables are automatically generated by make ##' >site.tmp + @echo '# Do not edit here. If you wish to override these values' >>site.tmp + @echo '# edit the last section' >>site.tmp + @echo 'set srcdir "$(srcdir)"' >>site.tmp + @echo "set objdir `pwd`" >>site.tmp + @echo 'set build_alias "$(build_alias)"' >>site.tmp + @echo 'set build_triplet $(build_triplet)' >>site.tmp + @echo 'set host_alias "$(host_alias)"' >>site.tmp + @echo 'set host_triplet $(host_triplet)' >>site.tmp + @echo 'set target_alias "$(target_alias)"' >>site.tmp + @echo 'set target_triplet $(target_triplet)' >>site.tmp + @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \ + echo "## Begin content included from file $$f. Do not modify. ##" \ + && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \ + && echo "## End content included from file $$f. ##" \ + || exit 1; \ + done >> site.tmp + @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp + @if test -f site.exp; then \ + sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \ + fi + @-rm -f site.bak + @test ! -f site.exp || mv site.exp site.bak + @mv site.tmp site.exp + +distclean-DEJAGNU: + -rm -f site.exp site.bak + -l='$(DEJATOOL)'; for tool in $$l; do \ + rm -f $$tool.sum $$tool.log; \ + done + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-DEJAGNU distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am distclean \ + distclean-DEJAGNU distclean-generic distclean-libtool distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd 2.mk new file mode 100644 index 0000000..ab66256 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd 2.mk @@ -0,0 +1,40 @@ +# -*- makefile -*- +# +# Makefile for BSD systems +# + +LOCAL_LIBS += ${LIBFFI} -lpthread + +LIBFFI_CFLAGS = ${FFI_MMAP_EXEC} -pthread +LIBFFI_BUILD_DIR = ${.CURDIR}/libffi-${arch} +INCFLAGS := -I${LIBFFI_BUILD_DIR}/include -I${INCFLAGS} + +.if ${srcdir} == "." + LIBFFI_SRC_DIR := ${.CURDIR}/libffi +.else + LIBFFI_SRC_DIR := ${srcdir}/libffi +.endif + + +LIBFFI = ${LIBFFI_BUILD_DIR}/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = ${LIBFFI_SRC_DIR}/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): ${LIBFFI} + +$(LIBFFI): + @mkdir -p ${LIBFFI_BUILD_DIR} + @if [ ! -f $(LIBFFI_SRC_DIR)/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f ${LIBFFI_BUILD_DIR}/Makefile ]; then \ + echo "Configuring libffi"; \ + cd ${LIBFFI_BUILD_DIR} && \ + /usr/bin/env CC="${CC}" LD="${LD}" CFLAGS="${LIBFFI_CFLAGS}" GREP_OPTIONS="" \ + /bin/sh ${LIBFFI_CONFIGURE} ${LIBFFI_HOST} > /dev/null; \ + fi + @cd ${LIBFFI_BUILD_DIR} && ${MAKE} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk new file mode 100644 index 0000000..ab66256 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk @@ -0,0 +1,40 @@ +# -*- makefile -*- +# +# Makefile for BSD systems +# + +LOCAL_LIBS += ${LIBFFI} -lpthread + +LIBFFI_CFLAGS = ${FFI_MMAP_EXEC} -pthread +LIBFFI_BUILD_DIR = ${.CURDIR}/libffi-${arch} +INCFLAGS := -I${LIBFFI_BUILD_DIR}/include -I${INCFLAGS} + +.if ${srcdir} == "." + LIBFFI_SRC_DIR := ${.CURDIR}/libffi +.else + LIBFFI_SRC_DIR := ${srcdir}/libffi +.endif + + +LIBFFI = ${LIBFFI_BUILD_DIR}/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = ${LIBFFI_SRC_DIR}/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): ${LIBFFI} + +$(LIBFFI): + @mkdir -p ${LIBFFI_BUILD_DIR} + @if [ ! -f $(LIBFFI_SRC_DIR)/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f ${LIBFFI_BUILD_DIR}/Makefile ]; then \ + echo "Configuring libffi"; \ + cd ${LIBFFI_BUILD_DIR} && \ + /usr/bin/env CC="${CC}" LD="${LD}" CFLAGS="${LIBFFI_CFLAGS}" GREP_OPTIONS="" \ + /bin/sh ${LIBFFI_CONFIGURE} ${LIBFFI_HOST} > /dev/null; \ + fi + @cd ${LIBFFI_BUILD_DIR} && ${MAKE} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin 2.mk new file mode 100644 index 0000000..893a8e1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin 2.mk @@ -0,0 +1,105 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +CCACHE := $(shell type -p ccache) +BUILD_DIR := $(shell pwd) + +INCFLAGS += -I"$(BUILD_DIR)" + +# Work out which arches we need to compile the lib for +ARCHES := +ARCHFLAGS ?= $(filter -arch %, $(CFLAGS)) + +ifneq ($(findstring -arch ppc,$(ARCHFLAGS)),) + ARCHES += ppc +endif + +ifneq ($(findstring -arch i386,$(ARCHFLAGS)),) + ARCHES += i386 +endif + +ifneq ($(findstring -arch x86_64,$(ARCHFLAGS)),) + ARCHES += x86_64 +endif + +ifeq ($(strip $(ARCHES)),) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +# Just build the one (default) architecture +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$(@D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + /usr/bin/env CC="$(CC)" LD="$(LD)" CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + /bin/sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + cd "$(LIBFFI_BUILD_DIR)" && $(MAKE) + +else +LIBTARGETS = $(foreach arch,$(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + +# Build a fat binary and assemble +build_ffi = \ + mkdir -p "$(BUILD_DIR)"/libffi-$(1); \ + (if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi); \ + (if [ ! -f "$(BUILD_DIR)"/libffi-$(1)/Makefile ]; then \ + echo "Configuring libffi for $(1)"; \ + cd "$(BUILD_DIR)"/libffi-$(1) && \ + env CC="$(CCACHE) $(CC)" CFLAGS="-arch $(1) $(LIBFFI_CFLAGS)" LDFLAGS="-arch $(1)" \ + $(LIBFFI_CONFIGURE) --host=$(1)-apple-darwin > /dev/null; \ + fi); \ + $(MAKE) -C "$(BUILD_DIR)"/libffi-$(1) + +target_ffi = "$(BUILD_DIR)"/libffi-$(1)/.libs/libffi_convenience.a:; $(call build_ffi,$(1)) + +# Work out which arches we need to compile the lib for +ifneq ($(findstring ppc,$(ARCHES)),) + $(call target_ffi,ppc) +endif + +ifneq ($(findstring i386,$(ARCHES)),) + $(call target_ffi,i386) +endif + +ifneq ($(findstring x86_64,$(ARCHES)),) + $(call target_ffi,x86_64) +endif + + +$(LIBFFI): $(LIBTARGETS) + # Assemble into a FAT (x86_64, i386, ppc) library + @mkdir -p "$(@D)" + /usr/bin/libtool -static -o $@ \ + $(foreach arch, $(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + @mkdir -p "$(LIBFFI_BUILD_DIR)"/include + $(RM) "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffi.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffi.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffi.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffitarget.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffitarget.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffitarget.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffitarget.h + +endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk new file mode 100644 index 0000000..893a8e1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk @@ -0,0 +1,105 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +CCACHE := $(shell type -p ccache) +BUILD_DIR := $(shell pwd) + +INCFLAGS += -I"$(BUILD_DIR)" + +# Work out which arches we need to compile the lib for +ARCHES := +ARCHFLAGS ?= $(filter -arch %, $(CFLAGS)) + +ifneq ($(findstring -arch ppc,$(ARCHFLAGS)),) + ARCHES += ppc +endif + +ifneq ($(findstring -arch i386,$(ARCHFLAGS)),) + ARCHES += i386 +endif + +ifneq ($(findstring -arch x86_64,$(ARCHFLAGS)),) + ARCHES += x86_64 +endif + +ifeq ($(strip $(ARCHES)),) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +# Just build the one (default) architecture +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$(@D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + /usr/bin/env CC="$(CC)" LD="$(LD)" CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + /bin/sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + cd "$(LIBFFI_BUILD_DIR)" && $(MAKE) + +else +LIBTARGETS = $(foreach arch,$(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + +# Build a fat binary and assemble +build_ffi = \ + mkdir -p "$(BUILD_DIR)"/libffi-$(1); \ + (if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi); \ + (if [ ! -f "$(BUILD_DIR)"/libffi-$(1)/Makefile ]; then \ + echo "Configuring libffi for $(1)"; \ + cd "$(BUILD_DIR)"/libffi-$(1) && \ + env CC="$(CCACHE) $(CC)" CFLAGS="-arch $(1) $(LIBFFI_CFLAGS)" LDFLAGS="-arch $(1)" \ + $(LIBFFI_CONFIGURE) --host=$(1)-apple-darwin > /dev/null; \ + fi); \ + $(MAKE) -C "$(BUILD_DIR)"/libffi-$(1) + +target_ffi = "$(BUILD_DIR)"/libffi-$(1)/.libs/libffi_convenience.a:; $(call build_ffi,$(1)) + +# Work out which arches we need to compile the lib for +ifneq ($(findstring ppc,$(ARCHES)),) + $(call target_ffi,ppc) +endif + +ifneq ($(findstring i386,$(ARCHES)),) + $(call target_ffi,i386) +endif + +ifneq ($(findstring x86_64,$(ARCHES)),) + $(call target_ffi,x86_64) +endif + + +$(LIBFFI): $(LIBTARGETS) + # Assemble into a FAT (x86_64, i386, ppc) library + @mkdir -p "$(@D)" + /usr/bin/libtool -static -o $@ \ + $(foreach arch, $(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + @mkdir -p "$(LIBFFI_BUILD_DIR)"/include + $(RM) "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffi.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffi.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffi.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffitarget.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffitarget.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffitarget.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffitarget.h + +endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu 2.mk new file mode 100644 index 0000000..473b8fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu 2.mk @@ -0,0 +1,32 @@ +# -*- makefile -*- +# +# Common definitions for all systems that use GNU make +# + + +# Tack the extra deps onto the autogenerated variables +LOCAL_LIBS += $(LIBFFI) +BUILD_DIR = $(shell pwd) +LIBFFI_CFLAGS = $(FFI_MMAP_EXEC) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +INCFLAGS := -I"$(LIBFFI_BUILD_DIR)"/include $(INCFLAGS) + +ifeq ($(srcdir),.) + LIBFFI_SRC_DIR := $(shell pwd)/libffi +else ifeq ($(srcdir),..) + LIBFFI_SRC_DIR := $(shell pwd)/../libffi +else + LIBFFI_SRC_DIR := $(realpath $(srcdir)/libffi) +endif + +LIBFFI = "$(LIBFFI_BUILD_DIR)"/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = "$(LIBFFI_SRC_DIR)"/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): $(LIBFFI) + +# +# libffi.mk or libffi.darwin.mk contains rules for building the actual library +# + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk new file mode 100644 index 0000000..473b8fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk @@ -0,0 +1,32 @@ +# -*- makefile -*- +# +# Common definitions for all systems that use GNU make +# + + +# Tack the extra deps onto the autogenerated variables +LOCAL_LIBS += $(LIBFFI) +BUILD_DIR = $(shell pwd) +LIBFFI_CFLAGS = $(FFI_MMAP_EXEC) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +INCFLAGS := -I"$(LIBFFI_BUILD_DIR)"/include $(INCFLAGS) + +ifeq ($(srcdir),.) + LIBFFI_SRC_DIR := $(shell pwd)/libffi +else ifeq ($(srcdir),..) + LIBFFI_SRC_DIR := $(shell pwd)/../libffi +else + LIBFFI_SRC_DIR := $(realpath $(srcdir)/libffi) +endif + +LIBFFI = "$(LIBFFI_BUILD_DIR)"/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = "$(LIBFFI_SRC_DIR)"/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): $(LIBFFI) + +# +# libffi.mk or libffi.darwin.mk contains rules for building the actual library +# + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk new file mode 100644 index 0000000..3b58227 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk @@ -0,0 +1,18 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$@(D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + env CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + $(MAKE) -C "$(LIBFFI_BUILD_DIR)" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc 2.mk new file mode 100644 index 0000000..8cd4603 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc 2.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk new file mode 100644 index 0000000..8cd4603 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64 2.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64 2.mk new file mode 100644 index 0000000..6f3dbbc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64 2.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc64 + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk new file mode 100644 index 0000000..6f3dbbc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc64 + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor 2.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor 2.yml new file mode 100644 index 0000000..ece8a94 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor 2.yml @@ -0,0 +1,66 @@ +shallow_clone: true + +# We're currently only testing libffi built with Microsoft's +# tools. +# This matrix should be expanded to include at least: +# 32- and 64-bit gcc/cygwin +# 32- and 64-bit gcc/mingw +# 32- and 64-bit clang/mingw +# and perhaps more. + +image: Visual Studio 2017 +platform: + - x64 + - x86 + - arm + - arm64 + +environment: + global: + CYG_ROOT: C:/cygwin + CYG_CACHE: C:/cygwin/var/cache/setup + CYG_MIRROR: http://mirrors.kernel.org/sourceware/cygwin/ + matrix: + - VSVER: 15 + +install: + - ps: >- + If ($env:Platform -Match "x86") { + $env:VCVARS_PLATFORM="x86" + $env:BUILD="i686-pc-cygwin" + $env:HOST="i686-pc-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh" + $env:SRC_ARCHITECTURE="x86" + } ElseIf ($env:Platform -Match "arm64") { + $env:VCVARS_PLATFORM="x86_arm64" + $env:BUILD="i686-pc-cygwin" + $env:HOST="aarch64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm64" + $env:SRC_ARCHITECTURE="aarch64" + } ElseIf ($env:Platform -Match "arm") { + $env:VCVARS_PLATFORM="x86_arm" + $env:BUILD="i686-pc-cygwin" + $env:HOST="arm-w32-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm" + $env:SRC_ARCHITECTURE="arm" + } Else { + $env:VCVARS_PLATFORM="amd64" + $env:BUILD="x86_64-w64-cygwin" + $env:HOST="x86_64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -m64" + $env:SRC_ARCHITECTURE="x86" + } + - 'appveyor DownloadFile https://cygwin.com/setup-x86.exe -FileName setup.exe' + - 'setup.exe -qnNdO -R "%CYG_ROOT%" -s "%CYG_MIRROR%" -l "%CYG_CACHE%" -P dejagnu >NUL' + - '%CYG_ROOT%/bin/bash -lc "cygcheck -dc cygwin"' + - echo call VsDevCmd to set VS150COMNTOOLS + - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + - ps: $env:VSCOMNTOOLS=(Get-Content ("env:VS" + "$env:VSVER" + "0COMNTOOLS")) + - echo "Using Visual Studio %VSVER%.0 at %VSCOMNTOOLS%" + - call "%VSCOMNTOOLS%..\..\vc\Auxiliary\Build\vcvarsall.bat" %VCVARS_PLATFORM% + +build_script: + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./autogen.sh;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./configure CC='%MSVCC%' CXX='%MSVCC%' LD='link' CPP='cl -nologo -EP' CXXCPP='cl -nologo -EP' CPPFLAGS='-DFFI_BUILDING_DLL' AR='/cygdrive/c/projects/libffi/.travis/ar-lib lib' NM='dumpbin -symbols' STRIP=':' --build=$BUILD --host=$HOST;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp src/%SRC_ARCHITECTURE%/ffitarget.h include; make; find .;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp `find . -name 'libffi-?.dll'` $HOST/testsuite/; make check; cat `find ./ -name libffi.log`)" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml new file mode 100644 index 0000000..ece8a94 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml @@ -0,0 +1,66 @@ +shallow_clone: true + +# We're currently only testing libffi built with Microsoft's +# tools. +# This matrix should be expanded to include at least: +# 32- and 64-bit gcc/cygwin +# 32- and 64-bit gcc/mingw +# 32- and 64-bit clang/mingw +# and perhaps more. + +image: Visual Studio 2017 +platform: + - x64 + - x86 + - arm + - arm64 + +environment: + global: + CYG_ROOT: C:/cygwin + CYG_CACHE: C:/cygwin/var/cache/setup + CYG_MIRROR: http://mirrors.kernel.org/sourceware/cygwin/ + matrix: + - VSVER: 15 + +install: + - ps: >- + If ($env:Platform -Match "x86") { + $env:VCVARS_PLATFORM="x86" + $env:BUILD="i686-pc-cygwin" + $env:HOST="i686-pc-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh" + $env:SRC_ARCHITECTURE="x86" + } ElseIf ($env:Platform -Match "arm64") { + $env:VCVARS_PLATFORM="x86_arm64" + $env:BUILD="i686-pc-cygwin" + $env:HOST="aarch64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm64" + $env:SRC_ARCHITECTURE="aarch64" + } ElseIf ($env:Platform -Match "arm") { + $env:VCVARS_PLATFORM="x86_arm" + $env:BUILD="i686-pc-cygwin" + $env:HOST="arm-w32-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm" + $env:SRC_ARCHITECTURE="arm" + } Else { + $env:VCVARS_PLATFORM="amd64" + $env:BUILD="x86_64-w64-cygwin" + $env:HOST="x86_64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -m64" + $env:SRC_ARCHITECTURE="x86" + } + - 'appveyor DownloadFile https://cygwin.com/setup-x86.exe -FileName setup.exe' + - 'setup.exe -qnNdO -R "%CYG_ROOT%" -s "%CYG_MIRROR%" -l "%CYG_CACHE%" -P dejagnu >NUL' + - '%CYG_ROOT%/bin/bash -lc "cygcheck -dc cygwin"' + - echo call VsDevCmd to set VS150COMNTOOLS + - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + - ps: $env:VSCOMNTOOLS=(Get-Content ("env:VS" + "$env:VSVER" + "0COMNTOOLS")) + - echo "Using Visual Studio %VSVER%.0 at %VSCOMNTOOLS%" + - call "%VSCOMNTOOLS%..\..\vc\Auxiliary\Build\vcvarsall.bat" %VCVARS_PLATFORM% + +build_script: + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./autogen.sh;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./configure CC='%MSVCC%' CXX='%MSVCC%' LD='link' CPP='cl -nologo -EP' CXXCPP='cl -nologo -EP' CPPFLAGS='-DFFI_BUILDING_DLL' AR='/cygdrive/c/projects/libffi/.travis/ar-lib lib' NM='dumpbin -symbols' STRIP=':' --build=$BUILD --host=$HOST;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp src/%SRC_ARCHITECTURE%/ffitarget.h include; make; find .;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp `find . -name 'libffi-?.dll'` $HOST/testsuite/; make check; cat `find ./ -name libffi.log`)" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes new file mode 100644 index 0000000..f7d3833 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +*.sln text eol=crlf +*.vcxproj* text eol=crlf diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes 2 new file mode 100644 index 0000000..f7d3833 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes 2 @@ -0,0 +1,4 @@ +* text=auto + +*.sln text eol=crlf +*.vcxproj* text eol=crlf diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template 2.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template 2.md new file mode 100644 index 0000000..e197e2c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template 2.md @@ -0,0 +1,10 @@ +## System Details + + + + +## Problems Description + + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md new file mode 100644 index 0000000..e197e2c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md @@ -0,0 +1,10 @@ +## System Details + + + + +## Problems Description + + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore new file mode 100644 index 0000000..5d39689 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore @@ -0,0 +1,38 @@ +.libs +.deps +*.o +*.lo +.dirstamp +*.la +Makefile +!testsuite/libffi.bhaible/Makefile +Makefile.in +aclocal.m4 +compile +!.travis/compile +configure +depcomp +doc/libffi.info +*~ +fficonfig.h.in +fficonfig.h +include/ffi.h +include/ffitarget.h +install-sh +libffi.pc +libtool +libtool-ldflags +ltmain.sh +m4/libtool.m4 +m4/lt*.m4 +mdate-sh +missing +stamp-h1 +libffi*gz +autom4te.cache +libffi.xcodeproj/xcuserdata +libffi.xcodeproj/project.xcworkspace +build_*/ +darwin_*/ +src/arm/trampoline.S +**/texinfo.tex diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore 2 new file mode 100644 index 0000000..5d39689 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore 2 @@ -0,0 +1,38 @@ +.libs +.deps +*.o +*.lo +.dirstamp +*.la +Makefile +!testsuite/libffi.bhaible/Makefile +Makefile.in +aclocal.m4 +compile +!.travis/compile +configure +depcomp +doc/libffi.info +*~ +fficonfig.h.in +fficonfig.h +include/ffi.h +include/ffitarget.h +install-sh +libffi.pc +libtool +libtool-ldflags +ltmain.sh +m4/libtool.m4 +m4/lt*.m4 +mdate-sh +missing +stamp-h1 +libffi*gz +autom4te.cache +libffi.xcodeproj/xcuserdata +libffi.xcodeproj/project.xcworkspace +build_*/ +darwin_*/ +src/arm/trampoline.S +**/texinfo.tex diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis 2.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis 2.yml new file mode 100644 index 0000000..8db2ddf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis 2.yml @@ -0,0 +1,83 @@ +--- +sudo: required + +language: cpp + +# For qemu-powered targets, get the list of supported processors from +# travis by setting QEMU_CPU=help, then set -mcpu= for the compilers +# accordingly. + +matrix: + include: + - os: linux + env: HOST=powerpc-eabisim RUNTESTFLAGS="--target_board powerpc-eabisim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=or1k-elf RUNTESTFLAGS="--target_board or1k-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=m32r-elf RUNTESTFLAGS="--target_board m32r-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=bfin-elf RUNTESTFLAGS="--target_board bfin-sim" DEJAGNU="/opt/.travis/site.exp" +# This configuration is still using the native x86 toolchain? +# - os: osx +# env: HOST=aarch64-apple-darwin13 + - os: osx + env: HOST=x86_64-apple-darwin10 + - os: linux + env: HOST=x86_64-w64-mingw32 MEVAL='export CC="x86_64-w64-mingw32-gcc" && CXX="x86_64-w64-mingw32-g++" RUNTESTFLAGS="--target_board wine-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" CONFIGURE_OPTIONS=--disable-shared LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=sh4-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/sh4-linux-gnu + - os: linux + env: HOST=alpha-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/alpha-linux-gnu + - os: linux + env: HOST=m68k-linux-gnu MEVAL='export CC="m68k-linux-gnu-gcc-8 -mcpu=547x" && CXX="m68k-linux-gnu-g++-8 -mcpu=547x"' CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/m68k-linux-gnu QEMU_CPU=cfv4e + - os: linux + arch: s390x + env: HOST=s390x-linux-gnu + - os: linux + arch: ppc64le + env: HOST=ppc64le-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + compiler: clang + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O0" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2 -fomit-frame-pointer" +# The sparc64 linux system in the GCC compile farm is non-responsive. +# - os: linux +# env: HOST=sparc64-linux-gnu +# The mips64 linux system in the GCC compile farm is not allowing logins +# - os: linux +# env: HOST=mips64el-linux-gnu + - os: linux + compiler: gcc + env: HOST=i386-pc-linux-gnu MEVAL='export CC="$CC -m32" && CXX="$CXX -m32"' + - os: linux + compiler: gcc + - os: linux + compiler: gcc + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + compiler: clang + - os: linux + compiler: clang + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + env: HOST=moxie-elf MEVAL='export PATH=/opt/moxielogic/bin:$PATH && CC=moxie-elf-gcc && CXX=moxie-elf-g++' LDFLAGS=-Tsim.ld RUNTESTFLAGS="--target_board moxie-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" + +before_install: + - if test x"$MEVAL" != x; then eval ${MEVAL}; fi + +install: + - travis_wait 30 ./.travis/install.sh + +script: + - if ! test x"$MEVAL" = x; then eval ${MEVAL}; fi + - travis_wait 115 sleep infinity & + - ./.travis/build.sh diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml new file mode 100644 index 0000000..8db2ddf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml @@ -0,0 +1,83 @@ +--- +sudo: required + +language: cpp + +# For qemu-powered targets, get the list of supported processors from +# travis by setting QEMU_CPU=help, then set -mcpu= for the compilers +# accordingly. + +matrix: + include: + - os: linux + env: HOST=powerpc-eabisim RUNTESTFLAGS="--target_board powerpc-eabisim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=or1k-elf RUNTESTFLAGS="--target_board or1k-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=m32r-elf RUNTESTFLAGS="--target_board m32r-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=bfin-elf RUNTESTFLAGS="--target_board bfin-sim" DEJAGNU="/opt/.travis/site.exp" +# This configuration is still using the native x86 toolchain? +# - os: osx +# env: HOST=aarch64-apple-darwin13 + - os: osx + env: HOST=x86_64-apple-darwin10 + - os: linux + env: HOST=x86_64-w64-mingw32 MEVAL='export CC="x86_64-w64-mingw32-gcc" && CXX="x86_64-w64-mingw32-g++" RUNTESTFLAGS="--target_board wine-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" CONFIGURE_OPTIONS=--disable-shared LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=sh4-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/sh4-linux-gnu + - os: linux + env: HOST=alpha-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/alpha-linux-gnu + - os: linux + env: HOST=m68k-linux-gnu MEVAL='export CC="m68k-linux-gnu-gcc-8 -mcpu=547x" && CXX="m68k-linux-gnu-g++-8 -mcpu=547x"' CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/m68k-linux-gnu QEMU_CPU=cfv4e + - os: linux + arch: s390x + env: HOST=s390x-linux-gnu + - os: linux + arch: ppc64le + env: HOST=ppc64le-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + compiler: clang + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O0" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2 -fomit-frame-pointer" +# The sparc64 linux system in the GCC compile farm is non-responsive. +# - os: linux +# env: HOST=sparc64-linux-gnu +# The mips64 linux system in the GCC compile farm is not allowing logins +# - os: linux +# env: HOST=mips64el-linux-gnu + - os: linux + compiler: gcc + env: HOST=i386-pc-linux-gnu MEVAL='export CC="$CC -m32" && CXX="$CXX -m32"' + - os: linux + compiler: gcc + - os: linux + compiler: gcc + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + compiler: clang + - os: linux + compiler: clang + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + env: HOST=moxie-elf MEVAL='export PATH=/opt/moxielogic/bin:$PATH && CC=moxie-elf-gcc && CXX=moxie-elf-g++' LDFLAGS=-Tsim.ld RUNTESTFLAGS="--target_board moxie-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" + +before_install: + - if test x"$MEVAL" != x; then eval ${MEVAL}; fi + +install: + - travis_wait 30 ./.travis/install.sh + +script: + - if ! test x"$MEVAL" = x; then eval ${MEVAL}; fi + - travis_wait 115 sleep infinity & + - ./.travis/build.sh diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib new file mode 100755 index 0000000..0baa4f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib @@ -0,0 +1,270 @@ +#! /bin/sh +# Wrapper for Microsoft lib.exe + +me=ar-lib +scriptversion=2012-03-01.08; # UTC + +# Copyright (C) 2010-2018 Free Software Foundation, Inc. +# Written by Peter Rosin . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + + +# func_error message +func_error () +{ + echo "$me: $1" 1>&2 + exit 1 +} + +file_conv= + +# func_file_conv build_file +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv in + mingw) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_at_file at_file operation archive +# Iterate over all members in AT_FILE performing OPERATION on ARCHIVE +# for each of them. +# When interpreting the content of the @FILE, do NOT use func_file_conv, +# since the user would need to supply preconverted file names to +# binutils ar, at least for MinGW. +func_at_file () +{ + operation=$2 + archive=$3 + at_file_contents=`cat "$1"` + eval set x "$at_file_contents" + shift + + for member + do + $AR -NOLOGO $operation:"$member" "$archive" || exit $? + done +} + +case $1 in + '') + func_error "no command. Try '$0 --help' for more information." + ;; + -h | --h*) + cat <. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + + +# func_error message +func_error () +{ + echo "$me: $1" 1>&2 + exit 1 +} + +file_conv= + +# func_file_conv build_file +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv in + mingw) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_at_file at_file operation archive +# Iterate over all members in AT_FILE performing OPERATION on ARCHIVE +# for each of them. +# When interpreting the content of the @FILE, do NOT use func_file_conv, +# since the user would need to supply preconverted file names to +# binutils ar, at least for MinGW. +func_at_file () +{ + operation=$2 + archive=$3 + at_file_contents=`cat "$1"` + eval set x "$at_file_contents" + shift + + for member + do + $AR -NOLOGO $operation:"$member" "$archive" || exit $? + done +} + +case $1 in + '') + func_error "no command. Try '$0 --help' for more information." + ;; + -h | --h*) + cat < libffi.log + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git libffi.log + exit $? +} + +function build_linux() +{ + ./autogen.sh + ./configure ${HOST+--host=$HOST} ${CONFIGURE_OPTIONS} || cat */config.log + make + make dist + make check RUNTESTFLAGS="-a $RUNTESTFLAGS" + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_foreign_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" $2 bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e HOST="${HOST}" -e CC="${HOST}-gcc-8 ${GCC_OPTIONS}" -e CXX="${HOST}-g++-8 ${GCC_OPTIONS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" moxielogic/cross-ci-build-container:latest bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross() +{ + ${DOCKER} pull quay.io/moxielogic/libffi-ci-${HOST} + ${DOCKER} run --rm -t -i -v $(pwd):/opt -e HOST="${HOST}" -e CC="${HOST}-gcc ${GCC_OPTIONS}" -e CXX="${HOST}-g++ ${GCC_OPTIONS}" -e TRAVIS_BUILD_DIR=/opt -e DEJAGNU="${DEJAGNU}" -e RUNTESTFLAGS="${RUNTESTFLAGS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" quay.io/moxielogic/libffi-ci-${HOST} bash -c /opt/.travis/build-cross-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_ios() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-ios + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-iOS" -configuration Release -sdk iphoneos11.4 + exit $? +} + +function build_macosx() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-osx + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-Mac" -configuration Release -sdk macosx10.13 + echo "Finished build" + exit $? +} + +case "$HOST" in + arm-apple-darwin*) + ./autogen.sh + build_ios + ;; + x86_64-apple-darwin*) + ./autogen.sh + build_macosx + ;; + arm32v7-linux-gnu) + ./autogen.sh + build_foreign_linux arm moxielogic/arm32v7-ci-build-container:latest + ;; + mips64el-linux-gnu | sparc64-linux-gnu) + build_cfarm + ;; + bfin-elf ) + ./autogen.sh + GCC_OPTIONS=-msim build_cross + ;; + m32r-elf ) + ./autogen.sh + build_cross + ;; + or1k-elf ) + ./autogen.sh + build_cross + ;; + powerpc-eabisim ) + ./autogen.sh + build_cross + ;; + m68k-linux-gnu ) + ./autogen.sh + GCC_OPTIONS=-mcpu=547x build_cross_linux + ;; + alpha-linux-gnu | sh4-linux-gnu ) + ./autogen.sh + build_cross_linux + ;; + *) + ./autogen.sh + build_linux + ;; +esac diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container 2.sh new file mode 100755 index 0000000..7e2252c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container 2.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd /opt + +echo $PATH +export PATH=/usr/local/bin:$PATH +echo $PATH + +./configure --host=${HOST} || cat */config.log +make +make dist +make check RUNTESTFLAGS="-a $RUNTESTFLAGS" || true + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container.sh new file mode 100755 index 0000000..7e2252c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-cross-in-container.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd /opt + +echo $PATH +export PATH=/usr/local/bin:$PATH +echo $PATH + +./configure --host=${HOST} || cat */config.log +make +make dist +make check RUNTESTFLAGS="-a $RUNTESTFLAGS" || true + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container 2.sh new file mode 100755 index 0000000..1a7fa76 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container 2.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd /opt + +export QEMU_LD_PREFIX=/usr/${HOST} + +./configure ${HOST+--host=$HOST --disable-shared} +make +make dist +make check RUNTESTFLAGS="-a $RUNTESTFLAGS" || true + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container.sh new file mode 100755 index 0000000..1a7fa76 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build-in-container.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd /opt + +export QEMU_LD_PREFIX=/usr/${HOST} + +./configure ${HOST+--host=$HOST --disable-shared} +make +make dist +make check RUNTESTFLAGS="-a $RUNTESTFLAGS" || true + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build.sh new file mode 100755 index 0000000..5f9a86e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/build.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +set -x + +# This is a policy bound API key. It can only be used with +# https://github.com/libffi/rlgl-policy.git. +RLGL_KEY=0LIBFFI-0LIBFFI-0LIBFFI-0LIBFFI + +if [ -z ${QEMU_CPU+x} ]; then + export SET_QEMU_CPU= +else + export SET_QEMU_CPU="-e QEMU_CPU=${QEMU_CPU}" +fi + +export DOCKER=docker + +function build_cfarm() +{ + curl -u ${CFARM_AUTH} https://cfarm-test-libffi-libffi.apps.home.labdroid.net/test?host=${HOST}\&commit=${TRAVIS_COMMIT} | tee build.log + echo ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + echo $(tail build.log | grep '^==LOGFILE==') + echo $(tail build.log | grep '^==LOGFILE==' | cut -b13-) + echo ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + curl -u ${CFARM_AUTH} "$(tail build.log | grep '^==LOGFILE==' | cut -b13-)" > libffi.log + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git libffi.log + exit $? +} + +function build_linux() +{ + ./autogen.sh + ./configure ${HOST+--host=$HOST} ${CONFIGURE_OPTIONS} || cat */config.log + make + make dist + make check RUNTESTFLAGS="-a $RUNTESTFLAGS" + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_foreign_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" $2 bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e HOST="${HOST}" -e CC="${HOST}-gcc-8 ${GCC_OPTIONS}" -e CXX="${HOST}-g++-8 ${GCC_OPTIONS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" moxielogic/cross-ci-build-container:latest bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross() +{ + ${DOCKER} pull quay.io/moxielogic/libffi-ci-${HOST} + ${DOCKER} run --rm -t -i -v $(pwd):/opt -e HOST="${HOST}" -e CC="${HOST}-gcc ${GCC_OPTIONS}" -e CXX="${HOST}-g++ ${GCC_OPTIONS}" -e TRAVIS_BUILD_DIR=/opt -e DEJAGNU="${DEJAGNU}" -e RUNTESTFLAGS="${RUNTESTFLAGS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" quay.io/moxielogic/libffi-ci-${HOST} bash -c /opt/.travis/build-cross-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_ios() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-ios + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-iOS" -configuration Release -sdk iphoneos11.4 + exit $? +} + +function build_macosx() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-osx + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-Mac" -configuration Release -sdk macosx10.13 + echo "Finished build" + exit $? +} + +case "$HOST" in + arm-apple-darwin*) + ./autogen.sh + build_ios + ;; + x86_64-apple-darwin*) + ./autogen.sh + build_macosx + ;; + arm32v7-linux-gnu) + ./autogen.sh + build_foreign_linux arm moxielogic/arm32v7-ci-build-container:latest + ;; + mips64el-linux-gnu | sparc64-linux-gnu) + build_cfarm + ;; + bfin-elf ) + ./autogen.sh + GCC_OPTIONS=-msim build_cross + ;; + m32r-elf ) + ./autogen.sh + build_cross + ;; + or1k-elf ) + ./autogen.sh + build_cross + ;; + powerpc-eabisim ) + ./autogen.sh + build_cross + ;; + m68k-linux-gnu ) + ./autogen.sh + GCC_OPTIONS=-mcpu=547x build_cross_linux + ;; + alpha-linux-gnu | sh4-linux-gnu ) + ./autogen.sh + build_cross_linux + ;; + *) + ./autogen.sh + build_linux + ;; +esac diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile new file mode 100755 index 0000000..655932a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile @@ -0,0 +1,351 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-27.18; # UTC + +# Copyright (C) 1999-2018 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -warn) + eat=1 + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile 2 new file mode 100755 index 0000000..655932a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile 2 @@ -0,0 +1,351 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-27.18; # UTC + +# Copyright (C) 1999-2018 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -warn) + eat=1 + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install 2.sh new file mode 100755 index 0000000..2420245 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install 2.sh @@ -0,0 +1,71 @@ +#!/bin/bash +set -x + +if [[ $TRAVIS_OS_NAME != 'linux' ]]; then + brew update > brew-update.log 2>&1 + # fix an issue with libtool on travis by reinstalling it + brew uninstall libtool; + brew install libtool dejagnu; + + # Download and extract the rlgl client + wget -qO - https://rl.gl/cli/rlgl-darwin-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + +else + # Download and extract the rlgl client + case $HOST in + aarch64-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-arm.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + ppc64le-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-ppc64le.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + s390x-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-s390x.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + *) + wget -qO - https://rl.gl/cli/rlgl-linux-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + esac + + sudo apt-get clean # clear the cache + sudo apt-get update + case $HOST in + mips64el-linux-gnu | sparc64-linux-gnu) + ;; + alpha-linux-gnu | arm32v7-linux-gnu | m68k-linux-gnu | sh4-linux-gnu) + sudo apt-get install qemu-user-static + ;; + hppa-linux-gnu ) + sudo apt-get install -y qemu-user-static g++-5-hppa-linux-gnu + ;; + i386-pc-linux-gnu) + sudo apt-get install gcc-multilib g++-multilib; + ;; + moxie-elf) + echo 'deb https://repos.moxielogic.org:7114/MoxieLogic moxiedev main' | sudo tee -a /etc/apt/sources.list + sudo apt-get clean # clear the cache + sudo apt-get update ## -qq + sudo apt-get update + sudo apt-get install -y --allow-unauthenticated moxielogic-moxie-elf-gcc moxielogic-moxie-elf-gcc-c++ moxielogic-moxie-elf-gcc-libstdc++ moxielogic-moxie-elf-gdb-sim + ;; + x86_64-w64-mingw32) + sudo apt-get install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 wine; + ;; + i686-w32-mingw32) + sudo apt-get install gcc-mingw-w64-i686 g++-mingw-w64-i686 wine; + ;; + esac + case $HOST in + arm32v7-linux-gnu) + # don't install host tools + ;; + *) + sudo apt-get install dejagnu texinfo sharutils + ;; + esac +fi diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh new file mode 100755 index 0000000..2420245 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh @@ -0,0 +1,71 @@ +#!/bin/bash +set -x + +if [[ $TRAVIS_OS_NAME != 'linux' ]]; then + brew update > brew-update.log 2>&1 + # fix an issue with libtool on travis by reinstalling it + brew uninstall libtool; + brew install libtool dejagnu; + + # Download and extract the rlgl client + wget -qO - https://rl.gl/cli/rlgl-darwin-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + +else + # Download and extract the rlgl client + case $HOST in + aarch64-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-arm.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + ppc64le-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-ppc64le.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + s390x-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-s390x.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + *) + wget -qO - https://rl.gl/cli/rlgl-linux-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + esac + + sudo apt-get clean # clear the cache + sudo apt-get update + case $HOST in + mips64el-linux-gnu | sparc64-linux-gnu) + ;; + alpha-linux-gnu | arm32v7-linux-gnu | m68k-linux-gnu | sh4-linux-gnu) + sudo apt-get install qemu-user-static + ;; + hppa-linux-gnu ) + sudo apt-get install -y qemu-user-static g++-5-hppa-linux-gnu + ;; + i386-pc-linux-gnu) + sudo apt-get install gcc-multilib g++-multilib; + ;; + moxie-elf) + echo 'deb https://repos.moxielogic.org:7114/MoxieLogic moxiedev main' | sudo tee -a /etc/apt/sources.list + sudo apt-get clean # clear the cache + sudo apt-get update ## -qq + sudo apt-get update + sudo apt-get install -y --allow-unauthenticated moxielogic-moxie-elf-gcc moxielogic-moxie-elf-gcc-c++ moxielogic-moxie-elf-gcc-libstdc++ moxielogic-moxie-elf-gdb-sim + ;; + x86_64-w64-mingw32) + sudo apt-get install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 wine; + ;; + i686-w32-mingw32) + sudo apt-get install gcc-mingw-w64-i686 g++-mingw-w64-i686 wine; + ;; + esac + case $HOST in + arm32v7-linux-gnu) + # don't install host tools + ;; + *) + sudo apt-get install dejagnu texinfo sharutils + ;; + esac +fi diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim 2.exp new file mode 100644 index 0000000..b3341f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim 2.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {m32r-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "m32r" is the name of the sim subdir in devo/sim. +setup_sim m32r + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp new file mode 100644 index 0000000..b3341f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {m32r-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "m32r" is the name of the sim subdir in devo/sim. +setup_sim m32r + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim 2.exp new file mode 100644 index 0000000..3a6042e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim 2.exp @@ -0,0 +1,60 @@ +# Copyright (C) 2010 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {moxie-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "moxie" is the name of the sim subdir in devo/sim. +setup_sim moxie + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" +# No linker script needed. +set_board_info ldscript "-Tsim.ld" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp new file mode 100644 index 0000000..3a6042e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp @@ -0,0 +1,60 @@ +# Copyright (C) 2010 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {moxie-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "moxie" is the name of the sim subdir in devo/sim. +setup_sim moxie + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" +# No linker script needed. +set_board_info ldscript "-Tsim.ld" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim 2.exp new file mode 100644 index 0000000..64e1444 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim 2.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {or1k-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "or1k" is the name of the sim subdir in devo/sim. +setup_sim or1k + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp new file mode 100644 index 0000000..64e1444 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {or1k-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "or1k" is the name of the sim subdir in devo/sim. +setup_sim or1k + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim 2.exp new file mode 100644 index 0000000..bdc4389 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim 2.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {powerpc-eabisim} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "powerpc" is the name of the sim subdir in devo/sim. +setup_sim powerpc + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp new file mode 100644 index 0000000..bdc4389 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {powerpc-eabisim} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "powerpc" is the name of the sim subdir in devo/sim. +setup_sim powerpc + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site 2.exp new file mode 100644 index 0000000..644ec63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site 2.exp @@ -0,0 +1,27 @@ +# Copyright (C) 2008, 2010, 2018, 2019 Anthony Green + +# Make sure we look in the right place for the board description files. +if ![info exists boards_dir] { + set boards_dir {} +} + +lappend boards_dir $::env(TRAVIS_BUILD_DIR)/.travis + +verbose "Global Config File: target_triplet is $target_triplet" 2 +global target_list + +case "$target_triplet" in { + { "bfin-elf" } { + set target_list "bfin-sim" + } + { "m32r-elf" } { + set target_list "m32r-sim" + } + { "moxie-elf" } { + set target_list "moxie-sim" + } + { "or1k-elf" } { + set target_list "or1k-sim" + } +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp new file mode 100644 index 0000000..644ec63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp @@ -0,0 +1,27 @@ +# Copyright (C) 2008, 2010, 2018, 2019 Anthony Green + +# Make sure we look in the right place for the board description files. +if ![info exists boards_dir] { + set boards_dir {} +} + +lappend boards_dir $::env(TRAVIS_BUILD_DIR)/.travis + +verbose "Global Config File: target_triplet is $target_triplet" 2 +global target_list + +case "$target_triplet" in { + { "bfin-elf" } { + set target_list "bfin-sim" + } + { "m32r-elf" } { + set target_list "m32r-sim" + } + { "moxie-elf" } { + set target_list "moxie-sim" + } + { "or1k-elf" } { + set target_list "or1k-sim" + } +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim 2.exp new file mode 100644 index 0000000..8bdab67 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim 2.exp @@ -0,0 +1,55 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {i686-w64-mingw32} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +set_board_info sim "wineconsole --backend=curses" +set_board_info is_simulator 1 + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp new file mode 100644 index 0000000..8bdab67 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp @@ -0,0 +1,55 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {i686-w64-mingw32} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +set_board_info sim "wineconsole --backend=curses" +set_board_info is_simulator 1 + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog 2.old b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog 2.old new file mode 100644 index 0000000..8de1ca7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog 2.old @@ -0,0 +1,7407 @@ +Libffi change logs used to be maintained in separate ChangeLog files. +These days we generate them directly from the git commit messages. +The old ChangeLog files are saved here in order to maintain the historical +record. + +============================================================================= +From the old ChangeLog.libffi-3.1 file... + +2014-03-16 Josh Triplett + + * ChangeLog: Archive to ChangeLog.libffi-3.1 and delete. Future + changelogs will come from git, with autogenerated snapshots shipped in + distributed tarballs. + +2014-03-16 Josh Triplett + + Add support for stdcall, thiscall, and fastcall on non-Windows + x86-32. + + Linux supports the stdcall calling convention, either via + functions explicitly declared with the stdcall attribute, or via + code compiled with -mrtd which effectively makes stdcall the + default. + + This introduces FFI_STDCALL, FFI_THISCALL, and FFI_FASTCALL on + non-Windows x86-32 platforms, as non-default calling conventions. + + * Makefile.am: Compile in src/x86/win32.S on non-Windows x86-32. + * src/x86/ffitarget.h: Add FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32. Increase trampoline size to + accomodate these calling conventions, and unify some ifdeffery. + * src/x86/ffi.c: Add support for FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32 platforms; update ifdeffery. + * src/x86/win32.S: Support compiling on non-Windows x86-32 + platforms. On those platforms, avoid redefining the SYSV symbols + already provided by src/x86/sysv.S. + * testsuite/libffi.call/closure_stdcall.c: Run on non-Windows. + #define __stdcall if needed. + * testsuite/libffi.call/closure_thiscall.c: Run on non-Windows. + #define __fastcall if needed. + * testsuite/libffi.call/fastthis1_win32.c: Run on non-Windows. + * testsuite/libffi.call/fastthis2_win32.c: Ditto. + * testsuite/libffi.call/fastthis3_win32.c: Ditto. + * testsuite/libffi.call/many2_win32.c: Ditto. + * testsuite/libffi.call/many_win32.c: Ditto. + * testsuite/libffi.call/strlen2_win32.c: Ditto. + * testsuite/libffi.call/strlen_win32.c: Ditto. + * testsuite/libffi.call/struct1_win32.c: Ditto. + * testsuite/libffi.call/struct2_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * prep_cif.c: Remove unnecessary ifdef for X86_WIN32. + ffi_prep_cif_core had a special case for X86_WIN32, checking for + FFI_THISCALL in addition to the FFI_FIRST_ABI-to-FFI_LAST_ABI + range before returning FFI_BAD_ABI. However, on X86_WIN32, + FFI_THISCALL already falls in that range, making the special case + unnecessary. Remove it. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/closure_thiscall.c: Remove fragile stack + pointer checks. These files included inline assembly to save the + stack pointer before and after the call, and compare the values. + However, compilers can and do leave the stack in different states + for these two pieces of inline assembly, such as by saving a + temporary value on the stack across the call; observed with gcc + -Os, and verified as spurious through careful inspection of + disassembly. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/many.c: Avoid spurious failure due to + excess floating-point precision. + * testsuite/libffi.call/many_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * libtool-ldflags: Re-add. + +2014-03-16 Josh Triplett + + * Makefile.in, aclocal.m4, compile, config.guess, config.sub, + configure, depcomp, include/Makefile.in, install-sh, + libtool-ldflags, ltmain.sh, m4/libtool.m4, m4/ltoptions.m4, + m4/ltsugar.m4, m4/ltversion.m4, m4/lt~obsolete.m4, + man/Makefile.in, mdate-sh, missing, testsuite/Makefile.in: Delete + autogenerated files from version control. + * .gitignore: Add autogenerated files. + * autogen.sh: New script to generate the autogenerated files. + * README: Document requirement to run autogen.sh when building + directly from version control. + * .travis.yml: Run autogen.sh + +2014-03-14 Anthony Green + + * configure, Makefile.in: Rebuilt. + +2014-03-10 Mike Hommey + + * configure.ac: Allow building for mipsel with Android NDK r8. + * Makefile.am (AM_MAKEFLAGS): Replace double quotes with single + quotes. + +2014-03-10 Landry Breuil + + * configure.ac: Ensure the linker supports @unwind sections in libffi. + +2014-03-01 Anthony Green + + * Makefile.am (EXTRA_DIST): Replace old scripts with + generate-darwin-source-and-headers.py. + * Makefile.in: Rebuilt. + +2014-02-28 Anthony Green + + * Makefile.am (AM_CFLAGS): Reintroduce missing -DFFI_DEBUG for + --enable-debug builds. + * Makefile.in: Rebuilt. + +2014-02-28 Makoto Kato + + * src/closures.c: Fix build failure when using clang for Android. + +2014-02-28 Marcin Wojdyr + + * libffi.pc.in (toolexeclibdir): use -L${toolexeclibdir} instead + of -L${libdir}. + +2014-02-28 Paulo Pizarro + + * src/bfin/sysv.S: Calling functions in shared libraries requires + considering the GOT. + +2014-02-28 Josh Triplett + + * src/x86/ffi64.c (classify_argument): Handle case where + FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE. + +2014-02-28 Anthony Green + + * ltmain.sh: Generate with libtool-2.4.2.418. + * m4/libtool.m4, m4/ltoptions.m4, m4/ltversion.m4: Ditto. + * configure: Rebuilt. + +2014-02-28 Dominik Vogt + + * configure.ac (AC_ARG_ENABLE struct): Fix typo in help + message. + (AC_ARG_ENABLE raw_api): Ditto. + * configure, fficonfig.h.in: Rebuilt. + +2014-02-28 Will Newton + + * src/arm/sysv.S: Initialize IP register with FP. + +2014-02-28 Yufeng Zhang + + * src/aarch64/sysv.S (ffi_closure_SYSV): Use x29 as the + main CFA reg; update cfi_rel_offset. + +2014-02-15 Marcus Comstedt + + * src/powerpc/ffi_linux64.c, src/powerpc/linux64_closure.S: Remove + assumption on contents of r11 in closure. + +2014-02-09 Heiher + + * src/mips/n32.S: Fix call floating point va function. + +2014-01-21 Zachary Waldowski + + * src/aarch64/ffi.c: Fix missing semicolons on assertions under + debug mode. + +2013-12-30 Zachary Waldowski + + * .gitignore: Exclude darwin_* generated source and build_* trees. + * src/aarch64/ffi.c, src/arm/ffi.c, src/x86/ffi.c: Inhibit Clang + previous prototype warnings. + * src/arm/ffi.c: Prevent NULL dereference, fix short type warning + * src/dlmalloc.c: Fix warnings from set_segment_flags return type, + and the native use of size_t for malloc on platforms + * src/arm/sysv.S: Use unified syntax. Clang clean-ups for + ARM_FUNC_START. + * generate-osx-source-and-headers.py: Remove. + * build-ios.sh: Remove. + * libffi.xcodeproj/project.pbxproj: Rebuild targets. Include + x86_64+aarch64 pieces in library. Export headers properly. + * src/x86/ffi64.c: More Clang warning clean-ups. + * src/closures.c (open_temp_exec_file_dir): Use size_t. + * src/prep_cif.c (ffi_prep_cif_core): Cast ALIGN result. + * src/aarch64/sysv.S: Use CNAME for global symbols. Only use + .size for ELF targets. + * src/aarch64/ffi.c: Clean up for double == long double. Clean up + from Clang warnings. Use Clang cache invalidation builtin. Use + size_t in place of unsigned in many places. Accommodate for + differences in Apple AArch64 ABI. + +2013-12-02 Daniel Rodríguez Troitiño + + * generate-darwin-source-and-headers.py: Clean up, modernize, + merged version of previous scripts. + +2013-11-21 Anthony Green + + * configure, Makefile.in, include/Makefile.in, include/ffi.h.in, + man/Makefile.in, testsuite/Makefile.in, fficonfig.h.in: Rebuilt. + +2013-11-21 Alan Modra + + * Makefile.am (EXTRA_DIST): Add new src/powerpc files. + (nodist_libffi_la_SOURCES ): Likewise. + * configure.ac (HAVE_LONG_DOUBLE_VARIANT): Define for powerpc. + * include/ffi.h.in (ffi_prep_types): Declare. + * src/prep_cif.c (ffi_prep_cif_core): Call ffi_prep_types. + * src/types.c (FFI_NONCONST_TYPEDEF): Define and use for + HAVE_LONG_DOUBLE_VARIANT. + * src/powerpc/ffi_powerpc.h: New file. + * src/powerpc/ffi.c: Split into.. + * src/powerpc/ffi_sysv.c: ..new file, and.. + * src/powerpc/ffi_linux64.c: ..new file, rewriting parts. + * src/powerpc/ffitarget.h (enum ffi_abi): Rewrite powerpc ABI + selection as bits controlling features. + * src/powerpc/linux64.S: For consistency, use POWERPC64 rather + than __powerpc64__. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. Move .note.FNU-stack + inside guard. + * src/powerpc/sysv.S: Likewise. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + +2013-11-20 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use + NUM_FPR_ARG_REGISTERS64 and NUM_GPR_ARG_REGISTERS64 not their + 32-bit versions for 64-bit code. + * src/powerpc/linux64_closure.S: Don't use the return value area + as a parameter save area on ELFv2. + +2013-11-18 Iain Sandoe + + * src/powerpc/darwin.S (EH): Correct use of pcrel FDE encoding. + * src/powerpc/darwin_closure.S (EH): Likewise. Modernise picbase + labels. + +2013-11-18 Anthony Green + + * src/arm/ffi.c (ffi_call): Hoist declaration of temp to top of + function. + * src/arm/ffi.c (ffi_closure_inner): Moderize function declaration + to appease compiler. + Thanks for Gregory P. Smith . + +2013-11-18 Anthony Green + + * README (tested): Mention PowerPC ELFv2. + +2013-11-16 Alan Modra + + * src/powerpc/ppc_closure.S: Move errant #endif to where it belongs. + Don't bl .Luint128. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use #if _CALL_ELF + test to select parameter save sizing for ELFv2 vs. ELFv1. + * src/powerpc/ffitarget.h (FFI_V2_TYPE_FLOAT_HOMOG, + FFI_V2_TYPE_DOUBLE_HOMOG, FFI_V2_TYPE_SMALL_STRUCT): Define. + (FFI_TRAMPOLINE_SIZE): Define variant for ELFv2. + * src/powerpc/ffi.c (FLAG_ARG_NEEDS_PSAVE): Define. + (discover_homogeneous_aggregate): New function. + (ffi_prep_args64): Adjust start of param save area for ELFv2. + Handle homogenous floating point struct parms. + (ffi_prep_cif_machdep_core): Adjust space calculation for ELFv2. + Handle ELFv2 return values. Set FLAG_ARG_NEEDS_PSAVE. Handle + homogenous floating point structs. + (ffi_call): Increase size of smst_buffer for ELFv2. Handle ELFv2. + (flush_icache): Compile for ELFv2. + (ffi_prep_closure_loc): Set up ELFv2 trampoline. + (ffi_closure_helper_LINUX64): Don't return all structs directly + to caller. Handle homogenous floating point structs. Handle + ELFv2 struct return values. + * src/powerpc/linux64.S (ffi_call_LINUX64): Set up r2 for + ELFv2. Adjust toc save location. Call function pointer using + r12. Handle FLAG_RETURNS_SMST. Don't predict branches. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Set up r2 + for ELFv2. Define ELFv2 versions of STACKFRAME, PARMSAVE, and + RETVAL. Handle possibly missing parameter save area. Handle + ELFv2 return values. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Revert 2013-02-08 + change. Do not consume an int arg when returning a small struct + for FFI_SYSV ABI. + (ffi_call): Only use bounce buffer when FLAG_RETURNS_SMST. + Properly copy bounce buffer to destination. + * src/powerpc/sysv.S: Revert 2013-02-08 change. + * src/powerpc/ppc_closure.S: Remove stray '+'. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Align struct parameters + according to __STRUCT_PARM_ALIGN__. + (ffi_prep_cif_machdep_core): Likewise. + (ffi_closure_helper_LINUX64): Likewise. + +2013-11-16 Alan Modra + + * src/powerpc/linux64.S (ffi_call_LINUX64): Tweak restore of r28. + (.note.GNU-stack): Move inside outer #ifdef. + * src/powerpc/linux64_closure.S (STACKFRAME, PARMSAVE, + RETVAL): Define and use throughout. + (ffi_closure_LINUX64): Save fprs before buying stack. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffitarget.h (FFI_TARGET_SPECIFIC_VARIADIC): Define. + (FFI_EXTRA_CIF_FIELDS): Define. + * src/powerpc/ffi.c (ffi_prep_args64): Save fprs as per the + ABI, not to both fpr and param save area. + (ffi_prep_cif_machdep_core): Renamed from ffi_prep_cif_machdep. + Keep initial flags. Formatting. Remove dead FFI_LINUX_SOFT_FLOAT + code. + (ffi_prep_cif_machdep, ffi_prep_cif_machdep_var): New functions. + (ffi_closure_helper_LINUX64): Pass floating point as per ABI, + not to both fpr and parameter save areas. + + * libffi/testsuite/libffi.call/cls_double_va.c (main): Correct + function cast and don't call ffi_prep_cif. + * libffi/testsuite/libffi.call/cls_longdouble_va.c (main): Likewise. + +2013-11-15 Andrew Haley + + * doc/libffi.texi (Closure Example): Fix the sample code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-15 Andrew Haley + + * testsuite/libffi.call/va_struct1.c (main): Fix broken test. + * testsuite/libffi.call/cls_uint_va.c (cls_ret_T_fn): Likewise + * testsuite/libffi.call/cls_struct_va1.c (test_fn): Likewise. + * testsuite/libffi.call/va_1.c (main): Likewise. + +2013-11-14 David Schneider + + * src/arm/ffi.c: Fix register allocation for mixed float and + doubles. + * testsuite/libffi.call/cls_many_mixed_float_double.c: Testcase + for many mixed float and double arguments. + +2013-11-13 Alan Modra + + * doc/libffi.texi (Simple Example): Correct example code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-13 Anthony Green + + * include/ffi_common.h: Respect HAVE_ALLOCA_H for GNU compiler + based build. (Thanks to tmr111116 on github) + +2013-11-09 Anthony Green + + * m4/libtool.m4: Refresh. + * configure, Makefile.in: Rebuilt. + * README: Add more notes about next release. + +2013-11-09 Shigeharu TAKENO + + * m4/ax_gcc_archflag.m4 (ax_gcc_arch): Don't recognize + UltraSPARC-IIi as ultrasparc3. + +2013-11-06 Mark Kettenis + + * src/x86/freebsd.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2013-11-06 Konstantin Belousov + + * src/x86/freebsd.S (ffi_closure_raw_SYSV): Mark the assembler + source as not requiring executable stack. + +2013-11-02 Anthony Green + + * doc/libffi.texi (The Basics): Clarify return value buffer size + requirements. Also, NULL result buffer pointers are no longer + supported. + * doc/libffi.info: Rebuilt. + +2013-11-02 Mischa Jonker + + * Makefile.am (nodist_libffi_la_SOURCES): Fix build error. + * Makefile.in: Rebuilt. + +2013-11-02 David Schneider + + * src/arm/ffi.c: more robust argument handling for closures on arm hardfloat + * testsuite/libffi.call/many_mixed.c: New file. + * testsuite/libffi.call/cls_many_mixed_args.c: More tests. + +2013-11-02 Vitaly Budovski + + * src/x86/ffi.c (ffi_prep_cif_machdep): Don't align stack for win32. + +2013-10-23 Mark H Weaver + + * src/mips/ffi.c: Fix handling of uint32_t arguments on the + MIPS N32 ABI. + +2013-10-13 Sandra Loosemore + + * README: Add Nios II to table of supported platforms. + * Makefile.am (EXTRA_DIST): Add nios2 files. + (nodist_libffi_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + * configure.ac (nios2*-linux*): New host. + (NIOS2): Add AM_CONDITIONAL. + * configure: Regenerated. + * src/nios2/ffi.c: New. + * src/nios2/ffitarget.h: New. + * src/nios2/sysv.S: New. + * src/prep_cif.c (initialize_aggregate): Handle extra structure + alignment via FFI_AGGREGATE_ALIGNMENT. + (ffi_prep_cif_core): Conditionalize structure return for NIOS2. + +2013-10-10 Sandra Loosemore + + * testsuite/libffi.call/cls_many_mixed_args.c (cls_ret_double_fn): + Fix uninitialized variable. + +2013-10-11 Marcus Shawcroft + + * testsuite/libffi.call/many.c (many): Replace * with +. + +2013-10-08 Ondřej Bílka + + * src/aarch64/ffi.c, src/aarch64/sysv.S, src/arm/ffi.c, + src/arm/gentramp.sh, src/bfin/sysv.S, src/closures.c, + src/dlmalloc.c, src/ia64/ffi.c, src/microblaze/ffi.c, + src/microblaze/sysv.S, src/powerpc/darwin_closure.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/sh/ffi.c, + src/tile/tile.S, testsuite/libffi.call/nested_struct11.c: Fix + spelling errors. + +2013-10-08 Anthony Green + + * aclocal.m4, compile, config.guess, config.sub, depcomp, + install-sh, mdate-sh, missing, texinfo.tex: Update from upstream. + * configure.ac: Update version to 3.0.14-rc0. + * Makefile.in, configure, Makefile.in, include/Makefile.in, + man/Makefile.in, testsuite/Makefile.in: Rebuilt. + * README: Mention M88K and VAX. + +2013-07-15 Miod Vallat + + * Makefile.am, + configure.ac, + src/m88k/ffi.c, + src/m88k/ffitarget.h, + src/m88k/obsd.S, + src/vax/elfbsd.S, + src/vax/ffi.c, + src/vax/ffitarget.h: Add m88k and vax support. + +2013-06-24 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Move var declaration + before statements. + (ffi_prep_args64): Support little-endian. + (ffi_closure_helper_SYSV, ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Likewise. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Likewise. + +2013-06-12 Mischa Jonker + + * configure.ac: Add support for ARC. + * Makefile.am: Likewise. + * README: Add ARC details. + * src/arc/arcompact.S: New. + * src/arc/ffi.c: Likewise. + * src/arc/ffitarget.h: Likewise. + +2013-03-28 David Schneider + + * src/arm/ffi.c: Fix support for ARM hard-float calling convention. + * src/arm/sysv.S: call different methods for SYSV and VFP ABIs. + * testsuite/libffi.call/cls_many_mixed_args.c: testcase for a closure with + mixed arguments, many doubles. + * testsuite/libffi.call/many_double.c: testcase for calling a function using + more than 8 doubles. + * testcase/libffi.call/many.c: use absolute value to check result against an + epsilon + +2013-03-17 Anthony Green + + * README: Update for 3.0.13. + * configure.ac: Ditto. + * configure: Rebuilt. + * doc/*: Update version. + +2013-03-17 Dave Korn + + * src/closures.c (is_emutramp_enabled + [!FFI_MMAP_EXEC_EMUTRAMP_PAX]): Move default definition outside + enclosing #if scope. + +2013-03-17 Anthony Green + + * configure.ac: Only modify toolexecdir in certain cases. + * configure: Rebuilt. + +2013-03-16 Gilles Talis + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Don't use + fparg_count,etc on __NO_FPRS__ targets. + +2013-03-16 Alan Hourihane + + * src/m68k/sysv.S (epilogue): Don't use extb instruction on + m680000 machines. + +2013-03-16 Alex Gaynor + + * src/x86/ffi.c (ffi_prep_cif_machdep): Always align stack. + +2013-03-13 Markos Chandras + + * configure.ac: Add support for Imagination Technologies Meta. + * Makefile.am: Likewise. + * README: Add Imagination Technologies Meta details. + * src/metag/ffi.c: New. + * src/metag/ffitarget.h: Likewise. + * src/metag/sysv.S: Likewise. + +2013-02-24 Andreas Schwab + + * doc/libffi.texi (Structures): Fix missing category argument of + @deftp. + +2013-02-11 Anthony Green + + * configure.ac: Update release number to 3.0.12. + * configure: Rebuilt. + * README: Update release info. + +2013-02-10 Anthony Green + + * README: Add Moxie. + * src/moxie/ffi.c: Created. + * src/moxie/eabi.S: Created. + * src/moxie/ffitarget.h: Created. + * Makefile.am (nodist_libffi_la_SOURCES): Add Moxie. + * Makefile.in: Rebuilt. + * configure.ac: Add Moxie. + * configure: Rebuilt. + * testsuite/libffi.call/huge_struct.c: Disable format string + warnings for moxie*-*-elf tests. + +2013-02-10 Anthony Green + + * Makefile.am (LTLDFLAGS): Fix reference. + * Makefile.in: Rebuilt. + +2013-02-10 Anthony Green + + * README: Update supported platforms. Update test results link. + +2013-02-09 Anthony Green + + * testsuite/libffi.call/negint.c: Remove forced -O2. + * testsuite/libffi.call/many2.c (foo): Remove GCCism. + * testsuite/libffi.call/ffitest.h: Add default PRIuPTR definition. + + * src/sparc/v8.S (ffi_closure_v8): Import ancient ulonglong + closure return type fix developed by Martin v. Löwis for cpython + fork. + +2013-02-08 Andreas Tobler + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix small struct + support. + * src/powerpc/sysv.S: Ditto. + +2013-02-08 Anthony Green + + * testsuite/libffi.call/cls_longdouble.c: Remove xfail for + arm*-*-*. + +2013-02-08 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Fix cache flushing for GCC. + +2013-02-08 Matthias Klose + + * man/ffi_prep_cif.3: Clean up for debian linter. + +2013-02-08 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Account for FP args pushed + on the stack. + +2013-02-08 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am (EXTRA_DIST): Ditto. + * Makefile.in: Rebuilt. + +2013-02-08 Anthony Green + + * configure.ac: Move sparc asm config checks to within functions + for compatibility with sun tools. + * configure: Rebuilt. + * src/sparc/ffi.c (ffi_prep_closure_loc): Flush cache on v9 + systems. + * src/sparc/v8.S (ffi_flush_icache): Implement a sparc v9 cache + flusher. + +2013-02-08 Nathan Rossi + + * src/microblaze/ffi.c (ffi_closure_call_SYSV): Fix handling of + small big-endian structures. + (ffi_prep_args): Ditto. + +2013-02-07 Anthony Green + + * src/sparc/v8.S (ffi_call_v8): Fix typo from last patch + (effectively hiding ffi_call_v8). + +2013-02-07 Anthony Green + + * configure.ac: Update bug reporting address. + * configure.in: Rebuild. + + * src/sparc/v8.S (ffi_flush_icache): Out-of-line cache flusher for + Sun compiler. + * src/sparc/ffi.c (ffi_call): Remove warning. + Call ffi_flush_icache for non-GCC builds. + (ffi_prep_closure_loc): Use ffi_flush_icache. + + * Makefile.am (EXTRA_DIST): Add libtool-ldflags. + * Makefile.in: Rebuilt. + * libtool-ldflags: New file. + +2013-02-07 Daniel Schepler + + * configure.ac: Correctly identify x32 systems as 64-bit. + * m4/libtool.m4: Remove libtool expr error. + * aclocal.m4, configure: Rebuilt. + +2013-02-07 Anthony Green + + * configure.ac: Fix GCC usage test. + * configure: Rebuilt. + * README: Mention LLVM/GCC x86_64 issue. + * testsuite/Makefile.in: Rebuilt. + +2013-02-07 Anthony Green + + * testsuite/libffi.call/cls_double_va.c (main): Replace // style + comments with /* */ for xlc compiler. + * testsuite/libffi.call/stret_large.c (main): Ditto. + * testsuite/libffi.call/stret_large2.c (main): Ditto. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/huge_struct.c (main): Ditto. + * testsuite/libffi.call/float_va.c (main): Ditto. + * testsuite/libffi.call/cls_struct_va1.c (main): Ditto. + * testsuite/libffi.call/cls_pointer_stack.c (main): Ditto. + * testsuite/libffi.call/cls_pointer.c (main): Ditto. + * testsuite/libffi.call/cls_longdouble_va.c (main): Ditto. + +2013-02-06 Anthony Green + + * man/ffi_prep_cif.3: Clean up for debian lintian checker. + +2013-02-06 Anthony Green + + * Makefile.am (pkgconfigdir): Add missing pkgconfig install bits. + * Makefile.in: Rebuild. + +2013-02-02 Mark H Weaver + + * src/x86/ffi64.c (ffi_call): Sign-extend integer arguments passed + via general purpose registers. + +2013-01-21 Nathan Rossi + + * README: Add MicroBlaze details. + * Makefile.am: Add MicroBlaze support. + * configure.ac: Likewise. + * src/microblaze/ffi.c: New. + * src/microblaze/ffitarget.h: Likewise. + * src/microblaze/sysv.S: Likewise. + +2013-01-21 Nathan Rossi + * testsuite/libffi.call/return_uc.c: Fixed issue. + +2013-01-21 Chris Zankel + + * README: Add Xtensa support. + * Makefile.am: Likewise. + * configure.ac: Likewise. + * Makefile.in Regenerate. + * configure: Likewise. + * src/prep_cif.c: Handle Xtensa. + * src/xtensa: New directory. + * src/xtensa/ffi.c: New file. + * src/xtensa/ffitarget.h: Ditto. + * src/xtensa/sysv.S: Ditto. + +2013-01-11 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Replace // style + comments with /* */ for xlc compiler. + * src/powerpc/aix.S (ffi_call_AIX): Ditto. + * testsuite/libffi.call/ffitest.h (allocate_mmap): Delete + deprecated inline function. + * testsuite/libffi.special/ffitestcxx.h: Ditto. + * README: Add update for AIX support. + +2013-01-11 Anthony Green + + * configure.ac: Robustify pc relative reloc check. + * m4/ax_cc_maxopt.m4: Don't -malign-double. This is an ABI + changing option for 32-bit x86. + * aclocal.m4, configure: Rebuilt. + * README: Update supported target list. + +2013-01-10 Anthony Green + + * README (tested): Add Compiler column to table. + +2013-01-10 Anthony Green + + * src/x86/ffi64.c (struct register_args): Make sse array and array + of unions for sunpro compiler compatibility. + +2013-01-10 Anthony Green + + * configure.ac: Test target platform size_t size. Handle both 32 + and 64-bit builds for x86_64-* and i?86-* targets (allowing for + CFLAG option to change default settings). + * configure, aclocal.m4: Rebuilt. + +2013-01-10 Anthony Green + + * testsuite/libffi.special/special.exp: Only run exception + handling tests when using GNU compiler. + + * m4/ax_compiler_vendor.m4: New file. + * configure.ac: Test for compiler vendor and don't use + AX_CFLAGS_WARN_ALL with the sun compiler. + * aclocal.m4, configure: Rebuilt. + +2013-01-10 Anthony Green + + * include/ffi_common.h: Don't use GCCisms to define types when + building with the SUNPRO compiler. + +2013-01-10 Anthony Green + + * configure.ac: Put local.exp in the right place. + * configure: Rebuilt. + + * src/x86/ffi.c: Update comment about regparm function attributes. + * src/x86/sysv.S (ffi_closure_SYSV): The SUNPRO compiler requires + that all function arguments be passed on the stack (no regparm + support). + +2013-01-08 Anthony Green + + * configure.ac: Generate local.exp. This sets CC_FOR_TARGET + when we are using the vendor compiler. + * testsuite/Makefile.am (EXTRA_DEJAGNU_SITE_CONFIG): Point to + ../local.exp. + * configure, testsuite/Makefile.in: Rebuilt. + + * testsuite/libffi.call/call.exp: Run tests with different + options, depending on whether or not we are using gcc or the + vendor compiler. + * testsuite/lib/libffi.exp (libffi-init): Set using_gcc based on + whether or not we are building/testing with gcc. + +2013-01-08 Anthony Green + + * configure.ac: Switch x86 solaris target to X86 by default. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * configure.ac: Fix test for read-only eh_frame. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * src/x86/sysv.S, src/x86/unix64.S: Only emit DWARF unwind info + when building with the GNU toolchain. + * testsuite/libffi.call/ffitest.h (CHECK): Fix for Solaris vendor + compiler. + +2013-01-07 Thorsten Glaser + + * testsuite/libffi.call/cls_uchar_va.c, + testsuite/libffi.call/cls_ushort_va.c, + testsuite/libffi.call/va_1.c: Testsuite fixes. + +2013-01-07 Thorsten Glaser + + * src/m68k/ffi.c (CIF_FLAGS_SINT8, CIF_FLAGS_SINT16): Define. + (ffi_prep_cif_machdep): Fix 8-bit and 16-bit signed calls. + * src/m68k/sysv.S (ffi_call_SYSV, ffi_closure_SYSV): Ditto. + +2013-01-04 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't automatically add -fexceptions + and -Wall. This is set in the configure script after testing for + GCC. + * Makefile.in: Rebuilt. + +2013-01-02 rofl0r + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix build error on ppc + when long double == double. + +2013-01-02 Reini Urban + + * Makefile.am (libffi_la_LDFLAGS): Add -no-undefined to LDFLAGS + (required for shared libs on cygwin/mingw). + * Makefile.in: Rebuilt. + +2012-10-31 Alan Modra + + * src/powerpc/linux64_closure.S: Add new ABI support. + * src/powerpc/linux64.S: Likewise. + +2012-10-30 Magnus Granberg + Pavel Labushev + + * configure.ac: New options pax_emutramp + * configure, fficonfig.h.in: Regenerated + * src/closures.c: New function emutramp_enabled_check() and + checks. + +2012-10-30 Frederick Cheung + + * configure.ac: Enable FFI_MAP_EXEC_WRIT for Darwin 12 (mountain + lion) and future version. + * configure: Rebuild. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * README: Add details of aarch64 port. + * src/aarch64/ffi.c: New. + * src/aarch64/ffitarget.h: Likewise. + * src/aarch64/sysv.S: Likewise. + * Makefile.am: Support aarch64. + * configure.ac: Support aarch64. + * Makefile.in, configure: Rebuilt. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * testsuite/lib/libffi.exp: Add support for aarch64. + * testsuite/libffi.call/cls_struct_va1.c: New. + * testsuite/libffi.call/cls_uchar_va.c: Likewise. + * testsuite/libffi.call/cls_uint_va.c: Likewise. + * testsuite/libffi.call/cls_ulong_va.c: Likewise. + * testsuite/libffi.call/cls_ushort_va.c: Likewise. + * testsuite/libffi.call/nested_struct11.c: Likewise. + * testsuite/libffi.call/uninitialized.c: Likewise. + * testsuite/libffi.call/va_1.c: Likewise. + * testsuite/libffi.call/va_struct1.c: Likewise. + * testsuite/libffi.call/va_struct2.c: Likewise. + * testsuite/libffi.call/va_struct3.c: Likewise. + +2012-10-12 Walter Lee + + * Makefile.am: Add TILE-Gx/TILEPro support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + * src/prep_cif.c (ffi_prep_cif_core): Handle TILE-Gx/TILEPro. + * src/tile: New directory. + * src/tile/ffi.c: New file. + * src/tile/ffitarget.h: Ditto. + * src/tile/tile.S: Ditto. + +2012-10-12 Matthias Klose + + * generate-osx-source-and-headers.py: Normalize whitespace. + +2012-09-14 David Edelsohn + + * configure: Regenerated. + +2012-08-26 Andrew Pinski + + PR libffi/53014 + * src/mips/ffi.c (ffi_prep_closure_loc): Allow n32 with soft-float and n64 with + soft-float. + +2012-08-08 Uros Bizjak + + * src/s390/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-07-18 H.J. Lu + + PR libffi/53982 + PR libffi/53973 + * src/x86/ffitarget.h: Check __ILP32__ instead of __LP64__ for x32. + (FFI_SIZEOF_JAVA_RAW): Defined to 4 for x32. + +2012-05-16 H.J. Lu + + * configure: Regenerated. + +2012-05-05 Nicolas Lelong + + * libffi.xcodeproj/project.pbxproj: Fixes. + * README: Update for iOS builds. + +2012-04-23 Alexandre Keunecke I. de Mendonca + + * configure.ac: Add Blackfin/sysv support + * Makefile.am: Add Blackfin/sysv support + * src/bfin/ffi.c: Add Blackfin/sysv support + * src/bfin/ffitarget.h: Add Blackfin/sysv support + +2012-04-11 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new script. + * Makefile.in: Rebuilt. + +2012-04-11 Zachary Waldowski + + * generate-ios-source-and-headers.py, + libffi.xcodeproj/project.pbxproj: Support a Mac static library via + Xcode. Set iOS compatibility to 4.0. Move iOS trampoline + generation into an Xcode "run script" phase. Include both as + Xcode build scripts. Don't always regenerate config files. + +2012-04-10 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Add missing semicolon. + +2012-04-06 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new iOS/xcode files. + * Makefile.in: Rebuilt. + +2012-04-06 Mike Lewis + + * generate-ios-source-and-headers.py: New file. + * libffi.xcodeproj/project.pbxproj: New file. + * README: Update instructions on building iOS binary. + * build-ios.sh: Delete. + +2012-04-06 Anthony Green + + * src/x86/ffi64.c (UINT128): Define differently for Intel and GNU + compilers, then use it. + +2012-04-06 H.J. Lu + + * m4/libtool.m4 (_LT_ENABLE_LOCK): Support x32. + +2012-04-06 Anthony Green + + * testsuite/Makefile.am (EXTRA_DIST): Add missing test cases. + * testsuite/Makefile.in: Rebuilt. + +2012-04-05 Zachary Waldowski + + * include/ffi.h.in: Add missing trampoline table fields. + * src/arm/sysv.S: Fix ENTRY definition, and wrap symbol references + in CNAME. + * src/x86/ffi.c: Wrap Windows specific code in ifdefs. + +2012-04-02 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Declare double_tmp. + Silence casting pointer to integer of different size warning. + Delete goto to previously deleted label. + (ffi_call): Silence possibly undefined warning. + (ffi_closure_helper_SYSV): Declare variable type. + +2012-04-02 Peter Rosin + + * src/x86/win32.S (ffi_call_win32): Sign/zero extend the return + value in the Intel version as is already done for the AT&T version. + (ffi_closure_SYSV): Likewise. + (ffi_closure_raw_SYSV): Likewise. + (ffi_closure_STDCALL): Likewise. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_THISCALL): Unify the frame + generation, fix the ENDP label and remove the surplus third arg + from the 'lea' insn. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_SYSV): Make the 'stubraw' label + visible outside the PROC, so that ffi_closure_raw_THISCALL can see + it. Also instruct the assembler to add a frame to the function. + +2012-03-23 Peter Rosin + + * Makefile.am (AM_CPPFLAGS): Add -DFFI_BUILDING. + * Makefile.in: Rebuilt. + * include/ffi.h.in [MSVC]: Add __declspec(dllimport) decorations + to all data exports, when compiling libffi clients using MSVC. + +2012-03-29 Peter Rosin + + * src/x86/ffitarget.h (ffi_abi): Add new ABI FFI_MS_CDECL and + make it the default for MSVC. + (FFI_TYPE_MS_STRUCT): New structure return convention. + * src/x86/ffi.c (ffi_prep_cif_machdep): Tweak the structure + return convention for FFI_MS_CDECL to be FFI_TYPE_MS_STRUCT + instead of an ordinary FFI_TYPE_STRUCT. + (ffi_prep_args): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_call): Likewise. + (ffi_prep_incoming_args_SYSV): Likewise. + (ffi_raw_call): Likewise. + (ffi_prep_closure_loc): Treat FFI_MS_CDECL as FFI_SYSV. + * src/x86/win32.S (ffi_closure_SYSV): For FFI_TYPE_MS_STRUCT, + return a pointer to the result structure in eax and don't pop + that pointer from the stack, the caller takes care of it. + (ffi_call_win32): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_closure_raw_SYSV): Likewise. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/closure_stdcall.c [MSVC]: Add inline + assembly version with Intel syntax. + * testsuite/libffi.call/closure_thiscall.c [MSVC]: Likewise. + +2012-03-23 Peter Rosin + + * testsuite/libffi.call/ffitest.h: Provide abstration of + __attribute__((fastcall)) in the form of a __FASTCALL__ + define. Define it to __fastcall for MSVC. + * testsuite/libffi.call/fastthis1_win32.c: Use the above. + * testsuite/libffi.call/fastthis2_win32.c: Likewise. + * testsuite/libffi.call/fastthis3_win32.c: Likewise. + * testsuite/libffi.call/strlen2_win32.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + +2012-03-22 Peter Rosin + + * src/x86/win32.S [MSVC] (ffi_closure_THISCALL): Remove the manual + frame on function entry, MASM adds one automatically. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/ffitest.h [MSVC]: Add kludge for missing + bits in the MSVC headers. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/cls_12byte.c: Adjust to the C89 style + with no declarations after statements. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5_1_byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_dbls_struct.c: Likewise. + * testsuite/libffi.call/cls_pointer_stack.c: Likewise. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct10.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.call/testclosure.c: Likewise. + +2012-03-21 Peter Rosin + + * testsuite/libffi.call/float_va.c (float_va_fn): Use %f when + printing doubles (%lf is for long doubles). + (main): Likewise. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-19 Alan Hourihane + + * src/m68k/ffi.c: Add MINT support. + * src/m68k/sysv.S: Ditto. + +2012-03-06 Chung-Lin Tang + + * src/arm/ffi.c (ffi_call): Add __ARM_EABI__ guard around call to + ffi_call_VFP(). + (ffi_prep_closure_loc): Add __ARM_EABI__ guard around use of + ffi_closure_VFP. + * src/arm/sysv.S: Add __ARM_EABI__ guard around VFP code. + +2012-03-19 chennam + + * src/powerpc/ffi_darwin.c (ffi_prep_closure_loc): Fix AIX closure + support. + +2012-03-13 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/sh64/ffi.c (ffi_prep_closure_loc): Ditto. + +2012-03-09 David Edelsohn + + * src/powerpc/aix_closure.S (ffi_closure_ASM): Adjust for Darwin64 + change to return value of ffi_closure_helper_DARWIN and load type + from return type. + +2012-03-03 H.J. Lu + + * src/x86/ffi64.c (ffi_call): Cast the return value to unsigned + long. + (ffi_prep_closure_loc): Cast to 64bit address in trampoline. + (ffi_closure_unix64_inner): Cast return pointer to unsigned long + first. + + * src/x86/ffitarget.h (FFI_SIZEOF_ARG): Defined to 8 for x32. + (ffi_arg): Set to unsigned long long for x32. + (ffi_sarg): Set to long long for x32. + +2012-03-03 H.J. Lu + + * src/prep_cif.c (ffi_prep_cif_core): Properly check bad ABI. + +2012-03-03 Andoni Morales Alastruey + + * configure.ac: Add -no-undefined for both 32- and 64-bit x86 + windows-like hosts. + * configure: Rebuilt. + +2012-02-27 Mikael Pettersson + + PR libffi/52223 + * Makefile.am (FLAGS_TO_PASS): Define. + * Makefile.in: Regenerate. + +2012-02-23 Anthony Green + + * src/*/ffitarget.h: Ensure that users never include ffitarget.h + directly. + +2012-02-23 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_closure_raw_THISCALL): New + prototype. + (ffi_prep_raw_closure_loc): Use ffi_closure_raw_THISCALL for + thiscall-convention. + (ffi_raw_call): Use ffi_prep_args_raw. + * src/x86/win32.S (ffi_closure_raw_THISCALL): Add + implementation for stub. + +2012-02-10 Kai Tietz + + * configure.ac (AM_LTLDFLAGS): Add -no-undefine for x64 + windows target. + * configure: Regenerated. + +2012-02-08 Kai Tietz + + * src/prep_cif.c (ffi_prep_cif): Allow for X86_WIN32 + also FFI_THISCALL. + * src/x86/ffi.c (ffi_closure_THISCALL): Add prototype. + (FFI_INIT_TRAMPOLINE_THISCALL): New trampoline code. + (ffi_prep_closure_loc): Add FFI_THISCALL support. + * src/x86/ffitarget.h (FFI_TRAMPOLINE_SIZE): Adjust size. + * src/x86/win32.S (ffi_closure_THISCALL): New closure code + for thiscall-calling convention. + * testsuite/libffi.call/closure_thiscall.c: New test. + +2012-01-28 Kai Tietz + + * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new + argument to prototype for specify calling-convention. + (ffi_call): Add support for stdcall/thiscall convention. + (ffi_prep_args): Likewise. + (ffi_raw_call): Likewise. + * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and + FFI_FASTCALL. + * src/x86/win32.S (_ffi_call_win32): Add support for + fastcall/thiscall calling-convention calls. + * testsuite/libffi.call/fastthis1_win32.c: New test. + * testsuite/libffi.call/fastthis2_win32.c: New test. + * testsuite/libffi.call/fastthis3_win32.c: New test. + * testsuite/libffi.call/strlen2_win32.c: New test. + * testsuite/libffi.call/many2_win32.c: New test. + * testsuite/libffi.call/struct1_win32.c: New test. + * testsuite/libffi.call/struct2_win32.c: New test. + +2012-01-23 Uros Bizjak + + * src/alpha/ffi.c (ffi_prep_closure_loc): Check for bad ABI. + +2012-01-23 Anthony Green + Chris Young + + * configure.ac: Add Amiga support. + * configure: Rebuilt. + +2012-01-23 Dmitry Nadezhin + + * include/ffi_common.h (LIKELY, UNLIKELY): Fix definitions. + +2012-01-23 Andreas Schwab + + * src/m68k/sysv.S (ffi_call_SYSV): Properly test for plain + mc68000. Test for __HAVE_68881__ in addition to __MC68881__. + +2012-01-19 Jakub Jelinek + + PR rtl-optimization/48496 + * src/ia64/ffi.c (ffi_call): Fix up aliasing violations. + +2012-01-09 Rainer Orth + + * configure.ac (i?86-*-*): Set TARGET to X86_64. + * configure: Regenerate. + +2011-12-07 Andrew Pinski + + PR libffi/50051 + * src/mips/n32.S: Add ".set mips4". + +2011-11-21 Andreas Tobler + + * configure: Regenerate. + +2011-11-12 David Gilbert + + * doc/libffi.texi, include/ffi.h.in, include/ffi_common.h, + man/Makefile.am, man/ffi.3, man/ffi_prep_cif.3, + man/ffi_prep_cif_var.3, src/arm/ffi.c, src/arm/ffitarget.h, + src/cris/ffi.c, src/prep_cif.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/float_va.c: Many changes to support variadic + function calls. + +2011-11-12 Kyle Moffett + + * src/powerpc/ffi.c, src/powerpc/ffitarget.h, + src/powerpc/ppc_closure.S, src/powerpc/sysv.S: Many changes for + softfloat powerpc variants. + +2011-11-12 Petr Salinger + + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Fix kfreebsd support. + * configure: Rebuilt. + +2011-11-12 Timothy Wall + + * src/arm/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): Max + alignment of 4 for wince on ARM. + +2011-11-12 Kyle Moffett + Anthony Green + + * src/ppc/sysv.S, src/ppc/ffi.c: Remove use of ppc string + instructions (not available on some cores, like the PPC440). + +2011-11-12 Kimura Wataru + + * m4/ax_enable_builddir: Change from string comparison to numeric + comparison for wc output. + * configure.ac: Enable FFI_MMAP_EXEC_WRIT for darwin11 aka Mac OS + X 10.7. + * configure: Rebuilt. + +2011-11-12 Anthony Green + + * Makefile.am (AM_CCASFLAGS): Add -g option to build assembly + files with debug info. + * Makefile.in: Rebuilt. + +2011-11-12 Jasper Lievisse Adriaanse + + * README: Update list of supported OpenBSD systems. + +2011-11-12 Anthony Green + + * libtool-version: Update. + * Makefile.am (nodist_libffi_la_SOURCES): Add src/debug.c if + FFI_DEBUG. + (libffi_la_SOURCES): Remove src/debug.c + (EXTRA_DIST): Add src/debug.c + * Makefile.in: Rebuilt. + * README: Update for 3.0.11. + +2011-11-10 Richard Henderson + + * configure.ac (GCC_AS_CFI_PSEUDO_OP): Use it instead of inline check. + * configure, aclocal.m4: Rebuild. + +2011-09-04 Iain Sandoe + + PR libffi/49594 + * src/powerpc/darwin_closure.S (stubs): Make the stub binding + helper reference track the architecture pointer size. + +2011-08-25 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Remove hard-coded assembly + instructions. + * src/arm/sysv.S (ffi_arm_trampoline): Put them here instead. + +2011-07-11 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Clear icache. + +2011-06-29 Rainer Orth + + * testsuite/libffi.call/cls_double_va.c: Move PR number to comment. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-29 Rainer Orth + + PR libffi/46660 + * testsuite/libffi.call/cls_double_va.c: xfail dg-output on + mips-sgi-irix6*. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-14 Rainer Orth + + * testsuite/libffi.call/huge_struct.c (test_large_fn): Use PRIu8, + PRId8 instead of %hhu, %hhd. + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRId8, + PRIu8): Define. + [__sgi__] (PRId8, PRIu8): Define. + +2011-04-29 Rainer Orth + + * src/alpha/osf.S (UA_SI, FDE_ENCODING, FDE_ENCODE, FDE_ARANGE): + Define. + Use them to handle ELF vs. ECOFF differences. + [__osf__] (_GLOBAL__F_ffi_call_osf): Define. + +2011-03-30 Timothy Wall + + * src/powerpc/darwin.S: Fix unknown FDE encoding. + * src/powerpc/darwin_closure.S: ditto. + +2011-02-25 Anthony Green + + * src/powerpc/ffi.c (ffi_prep_closure_loc): Allow for more + 32-bit ABIs. + +2011-02-15 Anthony Green + + * m4/ax_cc_maxopt.m4: Don't -malign-double or use -ffast-math. + * configure: Rebuilt. + +2011-02-13 Ralf Wildenhues + + * configure: Regenerate. + +2011-02-13 Anthony Green + + * include/ffi_common.h (UNLIKELY, LIKELY): Define. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Remove definition. + * src/prep_cif.c (UNLIKELY, LIKELY): Remove definition. + + * src/prep_cif.c (initialize_aggregate): Convert assertion into + FFI_BAD_TYPEDEF return. Initialize arg size and alignment to 0. + + * src/pa/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/arm/ffi.c (ffi_prep_closure_loc): Ditto. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Ditto. + * src/mips/ffi.c (ffi_prep_closure_loc): Ditto. + * src/ia64/ffi.c (ffi_prep_closure_loc): Ditto. + * src/avr32/ffi.c (ffi_prep_closure_loc): Ditto. + +2011-02-11 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-02-11 Eric Botcazou + + * src/sparc/v9.S (STACKFRAME): Bump to 176. + +2011-02-09 Stuart Shelton + + http://bugs.gentoo.org/show_bug.cgi?id=286911 + * src/mips/ffitarget.h: Clean up error messages. + * src/java_raw_api.c (ffi_java_translate_args): Cast raw arg to + ffi_raw*. + * include/ffi.h.in: Add pragma for SGI compiler. + +2011-02-09 Anthony Green + + * configure.ac: Add powerpc64-*-darwin* support. + +2011-02-09 Anthony Green + + * README: Mention Interix. + +2011-02-09 Jonathan Callen + + * configure.ac: Add Interix to win32/cygwin/mingw case. + * configure: Ditto. + * src/closures.c: Treat Interix like Cygwin, instead of as a + generic win32. + +2011-02-09 Anthony Green + + * testsuite/libffi.call/err_bad_typedef.c: Remove xfail. + * testsuite/libffi.call/err_bad_abi.c: Remove xfail. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Define. + (ffi_prep_closure_loc): Check for bad ABI. + * src/prep_cif.c (UNLIKELY, LIKELY): Define. + (initialize_aggregate): Check for bad types. + +2011-02-09 Landon Fuller + + * Makefile.am (EXTRA_DIST): Add build-ios.sh, src/arm/gentramp.sh, + src/arm/trampoline.S. + (nodist_libffi_la_SOURCES): Add src/arc/trampoline.S. + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Define. + * src/arm/ffi.c (ffi_trampoline_table) + (ffi_closure_trampoline_table_page, ffi_trampoline_table_entry) + (FFI_TRAMPOLINE_CODELOC_CONFIG, FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) + (FFI_TRAMPOLINE_COUNT, ffi_trampoline_lock, ffi_trampoline_tables) + (ffi_trampoline_table_alloc, ffi_closure_alloc, ffi_closure_free): + Define for FFI_EXEC_TRAMPOLINE_TABLE case (iOS). + (ffi_prep_closure_loc): Handl FFI_EXEC_TRAMPOLINE_TABLE case + separately. + * src/arm/sysv.S: Handle Apple iOS host. + * src/closures.c: Handle FFI_EXEC_TRAMPOLINE_TABLE case. + * build-ios.sh: New file. + * fficonfig.h.in, configure, Makefile.in: Rebuilt. + * README: Mention ARM iOS. + +2011-02-08 Oren Held + + * src/dlmalloc.c (_STRUCT_MALLINFO): Define in order to avoid + redefinition of mallinfo on HP-UX. + +2011-02-08 Ginn Chen + + * src/sparc/ffi.c (ffi_call): Make compatible with Solaris Studio + aggregate return ABI. Flush cache. + (ffi_prep_closure_loc): Flush cache. + +2011-02-11 Anthony Green + + From Tom Honermann : + * src/powerpc/aix.S (ffi_call_AIX): Support for xlc toolchain on + AIX. Declare .ffi_prep_args. Insert nops after branch + instructions so that the AIX linker can insert TOC reload + instructions. + * src/powerpc/aix_closure.S: Declare .ffi_closure_helper_DARWIN. + +2011-02-08 Ed + + * src/powerpc/asm.h: Fix grammar nit in comment. + +2011-02-08 Uli Link + + * include/ffi.h.in (FFI_64_BIT_MAX): Define and use. + +2011-02-09 Rainer Orth + + PR libffi/46661 + * testsuite/libffi.call/cls_pointer.c (main): Cast void * to + uintptr_t first. + * testsuite/libffi.call/cls_pointer_stack.c (main): Likewise. + +2011-02-08 Rafael Avila de Espindola + + * configure.ac: Fix x86 test for pc related relocs. + * configure: Rebuilt. + +2011-02-07 Joel Sherrill + + * libffi/src/m68k/ffi.c: Add RTEMS support for cache flushing. + Handle case when CPU variant does not have long double support. + * libffi/src/m68k/sysv.S: Add support for mc68000, Coldfire, + and cores with soft floating point. + +2011-02-07 Joel Sherrill + + * configure.ac: Add mips*-*-rtems* support. + * configure: Regenerate. + * src/mips/ffitarget.h: Ensure needed constants are available + for targets which do not have sgidefs.h. + +2011-01-26 Dave Korn + + PR target/40125 + * configure.ac (AM_LTLDFLAGS): Add -bindir option for windows DLLs. + * configure: Regenerate. + +2010-12-18 Iain Sandoe + + PR libffi/29152 + PR libffi/42378 + * src/powerpc/darwin_closure.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffitarget.h (POWERPC_DARWIN64): New, + (FFI_TRAMPOLINE_SIZE): Update for Darwin64. + * src/powerpc/darwin.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffi_darwin.c: Likewise. + +2010-12-06 Rainer Orth + + * configure.ac (libffi_cv_as_ascii_pseudo_op): Use double + backslashes. + (libffi_cv_as_string_pseudo_op): Likewise. + * configure: Regenerate. + +2010-12-03 Chung-Lin Tang + + * src/arm/sysv.S (ffi_closure_SYSV): Add UNWIND to .pad directive. + (ffi_closure_VFP): Same. + (ffi_call_VFP): Move down to before ffi_closure_VFP. Add '.fpu vfp' + directive. + +2010-12-01 Rainer Orth + + * testsuite/libffi.call/ffitest.h [__sgi] (PRId64, PRIu64): Define. + (PRIuPTR): Define. + +2010-11-29 Richard Henderson + Rainer Orth + + * src/x86/sysv.S (FDE_ENCODING, FDE_ENCODE): Define. + (.eh_frame): Use FDE_ENCODING. + (.LASFDE1, .LASFDE2, LASFDE3): Simplify with FDE_ENCODE. + +2010-11-22 Jacek Caban + + * configure.ac: Check for symbol underscores on mingw-w64. + * configure: Rebuilt. + * src/x86/win64.S: Correctly access extern symbols in respect to + underscores. + +2010-11-15 Rainer Orth + + * testsuite/lib/libffi-dg.exp: Rename ... + * testsuite/lib/libffi.exp: ... to this. + * libffi/testsuite/libffi.call/call.exp: Don't load libffi-dg.exp. + * libffi/testsuite/libffi.special/special.exp: Likewise. + +2010-10-28 Chung-Lin Tang + + * src/arm/ffi.c (ffi_prep_args): Add VFP register argument handling + code, new parameter, and return value. Update comments. + (ffi_prep_cif_machdep): Add case for VFP struct return values. Add + call to layout_vfp_args(). + (ffi_call_SYSV): Update declaration. + (ffi_call_VFP): New declaration. + (ffi_call): Add VFP struct return conditions. Call ffi_call_VFP() + when ABI is FFI_VFP. + (ffi_closure_VFP): New declaration. + (ffi_closure_SYSV_inner): Add new vfp_args parameter, update call to + ffi_prep_incoming_args_SYSV(). + (ffi_prep_incoming_args_SYSV): Update parameters. Add VFP argument + case handling. + (ffi_prep_closure_loc): Pass ffi_closure_VFP to trampoline + construction under VFP hard-float. + (rec_vfp_type_p): New function. + (vfp_type_p): Same. + (place_vfp_arg): Same. + (layout_vfp_args): Same. + * src/arm/ffitarget.h (ffi_abi): Add FFI_VFP. Define FFI_DEFAULT_ABI + based on __ARM_PCS_VFP. + (FFI_EXTRA_CIF_FIELDS): Define for adding VFP hard-float specific + fields. + (FFI_TYPE_STRUCT_VFP_FLOAT): Define internally used type code. + (FFI_TYPE_STRUCT_VFP_DOUBLE): Same. + * src/arm/sysv.S (ffi_call_SYSV): Change call of ffi_prep_args() to + direct call. Move function pointer load upwards. + (ffi_call_VFP): New function. + (ffi_closure_VFP): Same. + + * testsuite/lib/libffi-dg.exp (check-flags): New function. + (dg-skip-if): New function. + * testsuite/libffi.call/cls_double_va.c: Skip if target is arm*-*-* + and compiler options include -mfloat-abi=hard. + * testsuite/libffi.call/cls_longdouble_va.c: Same. + +2010-10-01 Jakub Jelinek + + PR libffi/45677 + * src/x86/ffi64.c (ffi_prep_cif_machdep): Ensure cif->bytes is + a multiple of 8. + * testsuite/libffi.call/many2.c: New test. + +2010-08-20 Mark Wielaard + + * src/closures.c (open_temp_exec_file_mnt): Check if getmntent_r + returns NULL. + +2010-08-09 Andreas Tobler + + * configure.ac: Add target powerpc64-*-freebsd*. + * configure: Regenerate. + * testsuite/libffi.call/cls_align_longdouble_split.c: Pass + -mlong-double-128 only to linux targets. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_longdouble.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + +2010-08-05 Dan Witte + + * Makefile.am: Pass FFI_DEBUG define to msvcc.sh for linking to the + debug CRT when --enable-debug is given. + * configure.ac: Define it. + * msvcc.sh: Translate -g and -DFFI_DEBUG appropriately. + +2010-08-04 Dan Witte + + * src/x86/ffitarget.h: Add X86_ANY define for all x86/x86_64 + platforms. + * src/x86/ffi.c: Remove redundant ifdef checks. + * src/prep_cif.c: Push stack space computation into src/x86/ffi.c + for X86_ANY so return value space doesn't get added twice. + +2010-08-03 Neil Rashbrooke + + * msvcc.sh: Don't pass -safeseh to ml64 because behavior is buggy. + +2010-07-22 Dan Witte + + * src/*/ffitarget.h: Make FFI_LAST_ABI one past the last valid ABI. + * src/prep_cif.c: Fix ABI assertion. + * src/cris/ffi.c: Ditto. + +2010-07-10 Evan Phoenix + + * src/closures.c (selinux_enabled_check): Fix strncmp usage bug. + +2010-07-07 Dan Horák + + * include/ffi.h.in: Protect #define with #ifndef. + * src/powerpc/ffitarget.h: Ditto. + * src/s390/ffitarget.h: Ditto. + * src/sparc/ffitarget.h: Ditto. + +2010-07-07 Neil Roberts + + * src/x86/sysv.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2010-07-02 Jakub Jelinek + + * Makefile.am (AM_MAKEFLAGS): Pass also mandir to submakes. + * Makefile.in: Regenerated. + +2010-05-19 Rainer Orth + + * configure.ac (libffi_cv_as_x86_pcrel): Check for illegal in as + output, too. + (libffi_cv_as_ascii_pseudo_op): Check for .ascii. + (libffi_cv_as_string_pseudo_op): Check for .string. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S (.eh_frame): Use .ascii, .string or error. + +2010-05-11 Dan Witte + + * doc/libffi.tex: Document previous change. + +2010-05-11 Makoto Kato + + * src/x86/ffi.c (ffi_call): Don't copy structs passed by value. + +2010-05-05 Michael Kohler + + * src/dlmalloc.c (dlfree): Fix spelling. + * src/ia64/ffi.c (ffi_prep_cif_machdep): Ditto. + * configure.ac: Ditto. + * configure: Rebuilt. + +2010-04-13 Dan Witte + + * msvcc.sh: Build with -W3 instead of -Wall. + * src/powerpc/ffi_darwin.c: Remove build warnings. + * src/x86/ffi.c: Ditto. + * src/x86/ffitarget.h: Ditto. + +2010-04-12 Dan Witte + Walter Meinl + + * configure.ac: Add OS/2 support. + * configure: Rebuilt. + * src/closures.c: Ditto. + * src/dlmalloc.c: Ditto. + * src/x86/win32.S: Ditto. + +2010-04-07 Jakub Jelinek + + * testsuite/libffi.call/err_bad_abi.c: Remove unused args variable. + +2010-04-02 Ralf Wildenhues + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2010-03-30 Dan Witte + + * msvcc.sh: Disable build warnings. + * README (tested): Clarify windows build procedure. + +2010-03-15 Rainer Orth + + * configure.ac (libffi_cv_as_x86_64_unwind_section_type): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * libffi/src/x86/unix64.S (.eh_frame) + [HAVE_AS_X86_64_UNWIND_SECTION_TYPE]: Use @unwind section type. + +2010-03-14 Matthias Klose + + * src/x86/ffi64.c: Fix typo in comment. + * src/x86/ffi.c: Use /* ... */ comment style. + +2010-02-24 Rainer Orth + + * doc/libffi.texi (The Closure API): Fix typo. + * doc/libffi.info: Remove. + +2010-02-15 Matthias Klose + + * src/arm/sysv.S (__ARM_ARCH__): Define for processor + __ARM_ARCH_7EM__. + +2010-01-15 Anthony Green + + * README: Add notes on building with Microsoft Visual C++. + +2010-01-15 Daniel Witte + + * msvcc.sh: New file. + + * src/x86/win32.S: Port assembly routines to MSVC and #ifdef. + * src/x86/ffi.c: Tweak function declaration and remove excess + parens. + * include/ffi.h.in: Add __declspec(align(8)) to typedef struct + ffi_closure. + + * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new + function ffi_call_win32 on X86_WIN32. + * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32. + (ffi_call_STDCALL): Remove. + + * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code + to ffi_prep_cif_machdep for x86. + * src/x86/ffi.c (ffi_prep_cif_machdep): To here. + +2010-01-15 Oliver Kiddle + + * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for + Sun Studio compiler compatibility. + +2010-01-12 Conrad Irwin + + * doc/libffi.texi: Add closure example. + +2010-01-07 Rainer Orth + + PR libffi/40701 + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRIdLL, + PRIuLL, PRId64, PRIu64, PRIuPTR): Define. + * testsuite/libffi.call/cls_align_sint64.c: Add -Wno-format on + alpha*-dec-osf*. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/return_ll1.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.special/ffitestcxx.h (allocate_mmap): Cast + MAP_FAILED to char *. + +2010-01-06 Rainer Orth + + * src/mips/n32.S: Use .abicalls and .eh_frame with __GNUC__. + +2009-12-31 Anthony Green + + * README: Update for libffi 3.0.9. + +2009-12-27 Matthias Klose + + * configure.ac (HAVE_LONG_DOUBLE): Define for mips when + appropriate. + * configure: Rebuilt. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_longdouble_va.c: Mark as xfail for + avr32*-*-*. + * testsuite/libffi.call/cls_double_va.c: Ditto. + +2009-12-26 Andreas Tobler + + * testsuite/libffi.call/ffitest.h: Conditionally include stdint.h + and inttypes.h. + * testsuite/libffi.special/unwindtest.cc: Ditto. + +2009-12-26 Andreas Tobler + + * configure.ac: Add amd64-*-openbsd*. + * configure: Rebuilt. + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Link + openbsd programs with -lpthread. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: Remove xfail for + mips*-*-* and arm*-*-*. + * testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c: Remove xfail for arm*-*-*. + +2009-12-31 Kay Tietz + + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRuLL): Fix + definitions. + +2009-12-31 Carlo Bramini + + * configure.ac (AM_LTLDFLAGS): Define for windows hosts. + * Makefile.am (libffi_la_LDFLAGS): Add AM_LTLDFLAGS. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + +2009-12-31 Anthony Green + Blake Chaffin. + + * testsuite/libffi.call/huge_struct.c: New test case from Blake + Chaffin @ Apple. + +2009-12-28 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Copy abi and nargs to + local variables. + (aix_adjust_aggregate_sizes): New function. + (ffi_prep_cif_machdep): Call it. + +2009-12-26 Andreas Tobler + + * configure.ac: Define FFI_MMAP_EXEC_WRIT for the given targets. + * configure: Regenerate. + * fficonfig.h.in: Likewise. + * src/closures.c: Remove the FFI_MMAP_EXEC_WRIT definition for + Solaris/x86. + +2009-12-26 Andreas Schwab + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Advance intarg_count + when a float arguments is passed in memory. + (ffi_closure_helper_SYSV): Mark general registers as used up when + a 64bit or soft-float long double argument is passed in memory. + +2009-12-25 Matthias Klose + + * man/ffi_call.3: Fix #include in examples. + * doc/libffi.texi: Add dircategory. + +2009-12-25 Frank Everdij + + * include/ffi.h.in: Placed '__GNUC__' ifdef around + '__attribute__((aligned(8)))' in ffi_closure, fixes compile for + IRIX MIPSPro c99. + * include/ffi_common.h: Added '__sgi' define to non + '__attribute__((__mode__()))' integer typedefs. + * src/mips/ffi.c (ffi_call, ffi_closure_mips_inner_O32, + ffi_closure_mips_inner_N32): Added 'defined(_MIPSEB)' to BE check. + (ffi_closure_mips_inner_O32, ffi_closure_mips_inner_N32): Added + FFI_LONGDOUBLE support and alignment(N32 only). + * src/mips/ffitarget.h: Corrected '#include ' for IRIX and + fixed non '__attribute__((__mode__()))' integer typedefs. + * src/mips/n32.S: Put '#ifdef linux' around '.abicalls' and '.eh_frame' + since they are Linux/GNU Assembler specific. + +2009-12-25 Bradley Smith + + * configure.ac, Makefile.am, src/avr32/ffi.c, + src/avr32/ffitarget.h, + src/avr32/sysv.S: Add AVR32 port. + * configure, Makefile.in: Rebuilt. + +2009-12-21 Andreas Tobler + + * configure.ac: Make i?86 build on FreeBSD and OpenBSD. + * configure: Regenerate. + +2009-12-15 John David Anglin + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on PA HP-UX. + +2009-12-13 John David Anglin + + * src/pa/ffi.c (ffi_closure_inner_pa32): Handle FFI_TYPE_LONGDOUBLE + type on HP-UX. + +2012-02-13 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_prep_raw_closure_loc): Add thiscall + support for X86_WIN32. + (FFI_INIT_TRAMPOLINE_THISCALL): Fix displacement. + +2009-12-11 Eric Botcazou + + * src/sparc/ffi.c (ffi_closure_sparc_inner_v9): Properly align 'long + double' arguments. + +2009-12-11 Eric Botcazou + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on Solaris < 10. + +2009-12-10 Rainer Orth + + PR libffi/40700 + * src/closures.c [X86_64 && __sun__ && __svr4__] + (FFI_MMAP_EXEC_WRIT): Define. + +2009-12-08 David Daney + + * testsuite/libffi.call/stret_medium.c: Remove xfail for mips*-*-* + * testsuite/libffi.call/cls_align_longdouble_split2.c: Same. + * testsuite/libffi.call/stret_large.c: Same. + * testsuite/libffi.call/cls_align_longdouble_split.c: Same. + * testsuite/libffi.call/stret_large2.c: Same. + * testsuite/libffi.call/stret_medium2.c: Same. + +2009-12-07 David Edelsohn + + * src/powerpc/aix_closure.S (libffi_closure_ASM): Fix tablejump + typo. + +2009-12-05 David Edelsohn + + * src/powerpc/aix.S: Update AIX32 code to be consistent with AIX64 + code. + * src/powerpc/aix_closure.S: Same. + +2009-12-05 Ralf Wildenhues + + * Makefile.in: Regenerate. + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2009-12-04 David Edelsohn + + * src/powerpc/aix_closure.S: Reorganize 64-bit code to match + linux64_closure.S. + +2009-12-04 Uros Bizjak + + PR libffi/41908 + * src/x86/ffi64.c (classify_argument): Update from + gcc/config/i386/i386.c. + (ffi_closure_unix64_inner): Do not use the address of two consecutive + SSE registers directly. + * testsuite/libffi.call/cls_dbls_struct.c (main): Remove xfail + for x86_64 linux targets. + +2009-12-04 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_closure_helper_DARWIN): Increment + pfr for long double split between fpr13 and stack. + +2009-12-03 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Increment next_arg and + fparg_count twice for long double. + +2009-12-03 David Edelsohn + + PR libffi/42243 + * src/powerpc/ffi_darwin.c (ffi_prep_args): Remove extra parentheses. + +2009-12-03 Uros Bizjak + + * testsuite/libffi.call/cls_longdouble_va.c (main): Fix format string. + Remove xfails for x86 linux targets. + +2009-12-02 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Fix typo in INT64 + case. + +2009-12-01 David Edelsohn + + * src/powerpc/aix.S (ffi_call_AIX): Convert to more standard + register usage. Call ffi_prep_args directly. Add long double + return value support. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Double arg increment + applies to FFI_TYPE_DOUBLE. Correct fpr_base increment typo. + Separate FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases. + (ffi_prep_cif_machdep): Only 16 byte stack alignment in 64 bit + mode. + (ffi_closure_helper_DARWIN): Remove nf and ng counters. Move temp + into case. + * src/powerpc/aix_closure.S: Maintain 16 byte stack alignment. + Allocate result area between params and FPRs. + +2009-11-30 David Edelsohn + + PR target/35484 + * src/powerpc/ffitarget.h (POWERPC64): Define for PPC64 Linux and + AIX64. + * src/powerpc/aix.S: Implement AIX64 version. + * src/powerpc/aix_closure.S: Implement AIX64 version. + (ffi_closure_ASM): Use extsb, lha and displament addresses. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Implement AIX64 + support. + (ffi_prep_cif_machdep): Same. + (ffi_call): Same. + (ffi_closure_helper_DARWIN): Same. + +2009-11-02 Andreas Tobler + + PR libffi/41908 + * testsuite/libffi.call/testclosure.c: New test. + +2009-09-28 Kai Tietz + + * src/x86/win64.S (_ffi_call_win64 stack): Remove for gnu + assembly version use of ___chkstk. + +2009-09-23 Matthias Klose + + PR libffi/40242, PR libffi/41443 + * src/arm/sysv.S (__ARM_ARCH__): Define for processors + __ARM_ARCH_6T2__, __ARM_ARCH_6M__, __ARM_ARCH_7__, + __ARM_ARCH_7A__, __ARM_ARCH_7R__, __ARM_ARCH_7M__. + Change the conditionals to __SOFTFP__ || __ARM_EABI__ + for -mfloat-abi=softfp to work. + +2009-09-17 Loren J. Rittle + + PR testsuite/32843 (strikes again) + * src/x86/ffi.c (ffi_prep_cif_machdep): Add X86_FREEBSD to + enable proper extension on char and short. + +2009-09-15 David Daney + + * src/java_raw_api.c (ffi_java_raw_to_rvalue): Remove special + handling for FFI_TYPE_POINTER. + * src/mips/ffitarget.h (FFI_TYPE_STRUCT_D_SOFT, + FFI_TYPE_STRUCT_F_SOFT, FFI_TYPE_STRUCT_DD_SOFT, + FFI_TYPE_STRUCT_FF_SOFT, FFI_TYPE_STRUCT_FD_SOFT, + FFI_TYPE_STRUCT_DF_SOFT, FFI_TYPE_STRUCT_SOFT): New defines. + (FFI_N32_SOFT_FLOAT, FFI_N64_SOFT_FLOAT): New ffi_abi enumerations. + (enum ffi_abi): Set FFI_DEFAULT_ABI for soft-float. + * src/mips/n32.S (ffi_call_N32): Add handling for soft-float + structure and pointer returns. + (ffi_closure_N32): Add handling for pointer returns. + * src/mips/ffi.c (ffi_prep_args, calc_n32_struct_flags, + calc_n32_return_struct_flags): Handle soft-float. + (ffi_prep_cif_machdep): Handle soft-float, fix pointer handling. + (ffi_call_N32): Declare proper argument types. + (ffi_call, copy_struct_N32, ffi_closure_mips_inner_N32): Handle + soft-float. + +2009-08-24 Ralf Wildenhues + + * configure.ac (AC_PREREQ): Bump to 2.64. + +2009-08-22 Ralf Wildenhues + + * Makefile.am (install-html, install-pdf): Remove. + * Makefile.in: Regenerate. + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2011-08-22 Jasper Lievisse Adriaanse + + * configure.ac: Add OpenBSD/hppa and OpenBSD/powerpc support. + * configure: Rebuilt. + +2009-07-30 Ralf Wildenhues + + * configure.ac (_AC_ARG_VAR_PRECIOUS): Use m4_rename_force. + +2009-07-24 Dave Korn + + PR libffi/40807 + * src/x86/ffi.c (ffi_prep_cif_machdep): Also use sign/zero-extending + return types for X86_WIN32. + * src/x86/win32.S (_ffi_call_SYSV): Handle omitted return types. + (_ffi_call_STDCALL, _ffi_closure_SYSV, _ffi_closure_raw_SYSV, + _ffi_closure_STDCALL): Likewise. + + * src/closures.c (is_selinux_enabled): Define to const 0 for Cygwin. + (dlmmap, dlmunmap): Also use these functions on Cygwin. + +2009-07-11 Richard Sandiford + + PR testsuite/40699 + PR testsuite/40707 + PR testsuite/40709 + * testsuite/lib/libffi-dg.exp: Revert 2009-07-02, 2009-07-01 and + 2009-06-30 commits. + +2009-07-01 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Set ld_library_path + to "" before adding paths. (This reinstates an assignment that + was removed by my 2009-06-30 commit, but changes the initial + value from "." to "".) + +2009-07-01 H.J. Lu + + PR testsuite/40601 + * testsuite/lib/libffi-dg.exp (libffi-init): Properly set + gccdir. Adjust ld_library_path for gcc only if gccdir isn't + empty. + +2009-06-30 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Don't add "." + to ld_library_path. Use add_path. Add just find_libgcc_s + to ld_library_path, not every libgcc multilib directory. + +2009-06-16 Wim Lewis + + * src/powerpc/ffi.c: Avoid clobbering cr3 and cr4, which are + supposed to be callee-saved. + * src/powerpc/sysv.S (small_struct_return_value): Fix overrun of + return buffer for odd-size structs. + +2009-06-16 Andreas Tobler + + PR libffi/40444 + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Add + allow_stack_execute for Darwin. + +2009-06-16 Andrew Haley + + * configure.ac (TARGETDIR): Add missing blank lines. + * configure: Regenerate. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-15 Andrew Haley + + * testsuite/libffi.call/err_bad_typedef.c: xfail everywhere. + * testsuite/libffi.call/err_bad_abi.c: Likewise. + +2009-06-12 Andrew Haley + + * Makefile.am: Remove info_TEXINFOS. + +2009-06-12 Andrew Haley + + * ChangeLog.libffi: testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-11 Kaz Kojima + + * testsuite/libffi.call/cls_longdouble_va.c: Add xfail sh*-*-linux-*. + * testsuite/libffi.call/err_bad_abi.c: Add xfail sh*-*-*. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + +2009-06-09 Andrew Haley + + * src/x86/freebsd.S: Add missing file. + +2009-06-08 Andrew Haley + + Import from libffi 3.0.8: + + * doc/libffi.texi: New file. + * doc/libffi.info: Likewise. + * doc/stamp-vti: Likewise. + * man/Makefile.am: New file. + * man/ffi_call.3: New file. + + * Makefile.am (EXTRA_DIST): Add src/x86/darwin64.S, + src/dlmalloc.c. + (nodist_libffi_la_SOURCES): Add X86_FREEBSD. + + * configure.ac: Bump version to 3.0.8. + parisc*-*-linux*: Add. + i386-*-freebsd* | i386-*-openbsd*: Add. + powerpc-*-beos*: Add. + AM_CONDITIONAL X86_FREEBSD: Add. + AC_CONFIG_FILES: Add man/Makefile. + + * include/ffi.h.in (FFI_FN): Change void (*)() to void (*)(void). + +2009-06-08 Andrew Haley + + * README: Import from libffi 3.0.8. + +2009-06-08 Andrew Haley + + * testsuite/libffi.call/err_bad_abi.c: Add xfails. + * testsuite/libffi.call/cls_longdouble_va.c: Add xfails. + * testsuite/libffi.call/cls_dbls_struct.c: Add xfail x86_64-*-linux-*. + * testsuite/libffi.call/err_bad_typedef.c: Add xfails. + + * testsuite/libffi.call/stret_medium2.c: Add __UNUSED__ to args. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2009-06-05 Andrew Haley + + * src/x86/ffitarget.h, src/x86/ffi.c: Merge stdcall changes from + libffi. + +2009-06-04 Andrew Haley + + * src/x86/ffitarget.h, src/x86/win32.S, src/x86/ffi.c: Back out + stdcall changes. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2009-06-04 Andrew Haley + + * include/ffi.h.in: Change void (*)() to void (*)(void). + * src/x86/ffi.c: Likewise. + +2009-06-04 Andrew Haley + + * src/powerpc/ppc_closure.S: Insert licence header. + * src/powerpc/linux64_closure.S: Likewise. + * src/m68k/sysv.S: Likewise. + + * src/sh64/ffi.c: Change void (*)() to void (*)(void). + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/m32r/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/alpha/ffi.c: Likewise. + * src/alpha/osf.S: Likewise. + * src/frv/ffi.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/pa/hpux32.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/ia64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2009-06-04 Andrew Haley + + include/ffi.h.in, + src/arm/ffitarget.h, + src/arm/ffi.c, + src/arm/sysv.S, + src/powerpc/ffitarget.h, + src/closures.c, + src/sh64/ffitarget.h, + src/sh64/ffi.c, + src/sh64/sysv.S, + src/types.c, + src/x86/ffi64.c, + src/x86/ffitarget.h, + src/x86/win32.S, + src/x86/darwin.S, + src/x86/ffi.c, + src/x86/sysv.S, + src/x86/unix64.S, + src/alpha/ffitarget.h, + src/alpha/ffi.c, + src/alpha/osf.S, + src/m68k/ffitarget.h, + src/frv/ffitarget.h, + src/frv/ffi.c, + src/s390/ffitarget.h, + src/s390/sysv.S, + src/cris/ffitarget.h, + src/pa/linux.S, + src/pa/ffitarget.h, + src/pa/ffi.c, + src/raw_api.c, + src/ia64/ffitarget.h, + src/ia64/unix.S, + src/ia64/ffi.c, + src/ia64/ia64_flags.h, + src/java_raw_api.c, + src/debug.c, + src/sparc/v9.S, + src/sparc/ffitarget.h, + src/sparc/ffi.c, + src/sparc/v8.S, + src/mips/ffitarget.h, + src/mips/n32.S, + src/mips/o32.S, + src/mips/ffi.c, + src/prep_cif.c, + src/sh/ffitarget.h, + src/sh/ffi.c, + src/sh/sysv.S: Update license text. + +2009-05-22 Dave Korn + + * src/x86/win32.S (_ffi_closure_STDCALL): New function. + (.eh_frame): Add FDE for it. + +2009-05-22 Dave Korn + + * configure.ac: Also check if assembler supports pc-relative + relocs on X86_WIN32 targets. + * configure: Regenerate. + * src/x86/win32.S (ffi_prep_args): Declare extern, not global. + (_ffi_call_SYSV): Add missing function type symbol .def and + add EH markup labels. + (_ffi_call_STDCALL): Likewise. + (_ffi_closure_SYSV): Likewise. + (_ffi_closure_raw_SYSV): Likewise. + (.eh_frame): Add hand-crafted EH data. + +2009-04-09 Jakub Jelinek + + * testsuite/lib/libffi-dg.exp: Change copyright header to refer to + version 3 of the GNU General Public License and to point readers + at the COPYING3 file and the FSF's license web page. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.special/special.exp: Likewise. + +2009-03-01 Ralf Wildenhues + + * configure: Regenerate. + +2008-12-18 Rainer Orth + + PR libffi/26048 + * configure.ac (HAVE_AS_X86_PCREL): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S [!FFI_NO_RAW_API]: Precalculate + RAW_CLOSURE_CIF_OFFSET, RAW_CLOSURE_FUN_OFFSET, + RAW_CLOSURE_USER_DATA_OFFSET for the Solaris 10/x86 assembler. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + * src/x86/unix64.S (.Lstore_table): Move to .text section. + (.Lload_table): Likewise. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + +2008-12-18 Ralf Wildenhues + + * configure: Regenerate. + +2008-11-21 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_cif_machdep): Add support for + signed/unsigned int8/16 return values. + * src/sparc/v8.S (ffi_call_v8): Likewise. + (ffi_closure_v8): Likewise. + +2008-09-26 Peter O'Gorman + Steve Ellcey + + * configure: Regenerate for new libtool. + * Makefile.in: Ditto. + * include/Makefile.in: Ditto. + * aclocal.m4: Ditto. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-06-17 Ralf Wildenhues + + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2008-06-07 Joseph Myers + + * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, + powerpc-*-beos*): Remove. + * configure: Regenerate. + +2008-05-09 Julian Brown + + * Makefile.am (LTLDFLAGS): New. + (libffi_la_LDFLAGS): Use above. + * Makefile.in: Regenerate. + +2008-04-18 Paolo Bonzini + + PR bootstrap/35457 + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2008-03-26 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-03-26 Daniel Jacobowitz + + * src/arm/sysv.S: Fix ARM comment marker. + +2008-03-26 Jakub Jelinek + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-03-16 Ralf Wildenhues + + * aclocal.m4: Regenerate. + * configure: Likewise. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2008-02-12 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-24 David Edelsohn + + * configure: Regenerate. + +2008-01-06 Andreas Tobler + + * src/x86/ffi.c (ffi_prep_cif_machdep): Fix thinko. + +2008-01-05 Andreas Tobler + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): Add code for + signed/unsigned int8/16 for X86_DARWIN. + Updated copyright info. + Handle one and two byte structs with special cif->flags. + * src/x86/ffitarget.h: Add special types for one and two byte structs. + Updated copyright info. + * src/x86/darwin.S (ffi_call_SYSV): Rewrite to use a jump table like + sysv.S + Remove code to pop args from the stack after call. + Special-case signed/unsigned for int8/16, one and two byte structs. + (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + Updated copyright info. + +2007-12-08 David Daney + + * src/mips/n32.S (ffi_call_N32): Replace dadd with ADDU, dsub with + SUBU, add with ADDU and use smaller code sequences. + +2007-12-07 David Daney + + * src/mips/ffi.c (ffi_prep_cif_machdep): Handle long double return + type. + +2007-12-06 David Daney + + * include/ffi.h.in (FFI_SIZEOF_JAVA_RAW): Define if not already + defined. + (ffi_java_raw): New typedef. + (ffi_java_raw_call, ffi_java_ptrarray_to_raw, + ffi_java_raw_to_ptrarray): Change parameter types from ffi_raw to + ffi_java_raw. + (ffi_java_raw_closure) : Same. + (ffi_prep_java_raw_closure, ffi_prep_java_raw_closure_loc): Change + parameter types. + * src/java_raw_api.c (ffi_java_raw_size): Replace FFI_SIZEOF_ARG with + FFI_SIZEOF_JAVA_RAW. + (ffi_java_raw_to_ptrarray): Change type of raw to ffi_java_raw. + Replace FFI_SIZEOF_ARG with FFI_SIZEOF_JAVA_RAW. Use + sizeof(ffi_java_raw) for alignment calculations. + (ffi_java_ptrarray_to_raw): Same. + (ffi_java_rvalue_to_raw): Add special handling for FFI_TYPE_POINTER + if FFI_SIZEOF_JAVA_RAW == 4. + (ffi_java_raw_to_rvalue): Same. + (ffi_java_raw_call): Change type of raw to ffi_java_raw. + (ffi_java_translate_args): Same. + (ffi_prep_java_raw_closure_loc, ffi_prep_java_raw_closure): Change + parameter types. + * src/mips/ffitarget.h (FFI_SIZEOF_JAVA_RAW): Define for N32 ABI. + +2007-12-06 David Daney + + * src/mips/n32.S (ffi_closure_N32): Use 64-bit add instruction on + pointer values. + +2007-12-01 Andreas Tobler + + PR libffi/31937 + * src/powerpc/ffitarget.h: Introduce new ABI FFI_LINUX_SOFT_FLOAT. + Add local FFI_TYPE_UINT128 to handle soft-float long-double-128. + * src/powerpc/ffi.c: Distinguish between __NO_FPRS__ and not and + set the NUM_FPR_ARG_REGISTERS according to. + Add support for potential soft-float support under hard-float + architecture. + (ffi_prep_args_SYSV): Set NUM_FPR_ARG_REGISTERS to 0 in case of + FFI_LINUX_SOFT_FLOAT, handle float, doubles and long-doubles according + to the FFI_LINUX_SOFT_FLOAT ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Make sure not to store float/double + on archs where __NO_FPRS__ is true. + Add FFI_TYPE_UINT128 support. + * src/powerpc/sysv.S: Add support for soft-float long-double-128. + Adjust copyright notice. + +2007-11-25 Andreas Tobler + + * src/closures.c: Move defintion of MAYBE_UNUSED from here to ... + * include/ffi_common.h: ... here. + Update copyright. + +2007-11-17 Andreas Tobler + + * src/powerpc/sysv.S: Load correct cr to compare if we have long double. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/ffi.c: Add a comment to show which part goes into cr6. + * testsuite/libffi.call/return_ldl.c: New test. + +2007-09-04 + + * src/arm/sysv.S (UNWIND): New. + (Whole file): Conditionally compile unwinder directives. + * src/arm/sysv.S: Add unwinder directives. + + * src/arm/ffi.c (ffi_prep_args): Align structs by at least 4 bytes. + Only treat r0 as a struct address if we're actually returning a + struct by address. + Only copy the bytes that are actually within a struct. + (ffi_prep_cif_machdep): A Composite Type not larger than 4 bytes + is returned in r0, not passed by address. + (ffi_call): Allocate a word-sized temporary for the case where + a composite is returned in r0. + (ffi_prep_incoming_args_SYSV): Align as necessary. + +2007-08-05 Steven Newbury + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Use __clear_cache instead of + directly using the sys_cacheflush syscall. + +2007-07-27 Andrew Haley + + * src/arm/sysv.S (ffi_closure_SYSV): Add soft-float. + +2007-09-03 Maciej W. Rozycki + + * Makefile.am: Unify MIPS_IRIX and MIPS_LINUX into MIPS. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure: Likewise. + +2007-08-24 David Daney + + * testsuite/libffi.call/return_sl.c: New test. + +2007-08-10 David Daney + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Remove xfail for mips64*-*-*. + +2007-08-10 David Daney + + PR libffi/28313 + * configure.ac: Don't treat mips64 as a special case. + * Makefile.am (nodist_libffi_la_SOURCES): Add n32.S. + * configure: Regenerate + * Makefile.in: Ditto. + * fficonfig.h.in: Ditto. + * src/mips/ffitarget.h (REG_L, REG_S, SUBU, ADDU, SRL, LI): Indent. + (LA, EH_FRAME_ALIGN, FDE_ADDR_BYTES): New preprocessor macros. + (FFI_DEFAULT_ABI): Set for n64 case. + (FFI_CLOSURES, FFI_TRAMPOLINE_SIZE): Define for n32 and n64 cases. + * src/mips/n32.S (ffi_call_N32): Add debug macros and labels for FDE. + (ffi_closure_N32): New function. + (.eh_frame): New section + * src/mips/o32.S: Clean up comments. + (ffi_closure_O32): Pass ffi_closure parameter in $12. + * src/mips/ffi.c: Use FFI_MIPS_N32 instead of + _MIPS_SIM == _ABIN32 throughout. + (FFI_MIPS_STOP_HERE): New, use in place of + ffi_stop_here. + (ffi_prep_args): Use unsigned long to hold pointer values. Rewrite + to support n32/n64 ABIs. + (calc_n32_struct_flags): Rewrite. + (calc_n32_return_struct_flags): Remove unused variable. Reverse + position of flag bits. + (ffi_prep_cif_machdep): Rewrite n32 portion. + (ffi_call): Enable for n64. Add special handling for small structure + return values. + (ffi_prep_closure_loc): Add n32 and n64 support. + (ffi_closure_mips_inner_O32): Add cast to silence warning. + (copy_struct_N32, ffi_closure_mips_inner_N32): New functions. + +2007-08-08 David Daney + + * testsuite/libffi.call/ffitest.h (ffi_type_mylong): Remove definition. + * testsuite/libffi.call/cls_align_uint16.c (main): Use correct type + specifiers. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/cls_sint.c (main): Ditto. + * testsuite/libffi.call/nested_struct9.c (main): Ditto. + * testsuite/libffi.call/cls_20byte1.c (main): Ditto. + * testsuite/libffi.call/cls_9byte1.c (main): Ditto. + * testsuite/libffi.call/closure_fn1.c (main): Ditto. + * testsuite/libffi.call/closure_fn3.c (main): Ditto. + * testsuite/libffi.call/return_dbl2.c (main): Ditto. + * testsuite/libffi.call/cls_sshort.c (main): Ditto. + * testsuite/libffi.call/return_fl3.c (main): Ditto. + * testsuite/libffi.call/closure_fn5.c (main): Ditto. + * testsuite/libffi.call/nested_struct.c (main): Ditto. + * testsuite/libffi.call/nested_struct10.c (main): Ditto. + * testsuite/libffi.call/return_ll1.c (main): Ditto. + * testsuite/libffi.call/cls_8byte.c (main): Ditto. + * testsuite/libffi.call/cls_align_uint32.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint16.c (main): Ditto. + * testsuite/libffi.call/cls_20byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct2.c (main): Ditto. + * testsuite/libffi.call/cls_24byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct6.c (main): Ditto. + * testsuite/libffi.call/cls_uint.c (main): Ditto. + * testsuite/libffi.call/cls_12byte.c (main): Ditto. + * testsuite/libffi.call/cls_16byte.c (main): Ditto. + * testsuite/libffi.call/closure_fn0.c (main): Ditto. + * testsuite/libffi.call/cls_9byte2.c (main): Ditto. + * testsuite/libffi.call/closure_fn2.c (main): Ditto. + * testsuite/libffi.call/return_dbl1.c (main): Ditto. + * testsuite/libffi.call/closure_fn4.c (main): Ditto. + * testsuite/libffi.call/closure_fn6.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint32.c (main): Ditto. + +2007-08-07 Andrew Haley + + * src/x86/sysv.S (ffi_closure_raw_SYSV): Fix typo in previous + checkin. + +2007-08-06 Andrew Haley + + PR testsuite/32843 + * src/x86/sysv.S (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + +2007-08-02 David Daney + + * testsuite/libffi.call/return_ul.c (main): Define return type as + ffi_arg. Use proper printf conversion specifier. + +2007-07-30 Andrew Haley + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): in x86 case, add code for + signed/unsigned int8/16. + * src/x86/sysv.S (ffi_call_SYSV): Rewrite to: + Use a jump table. + Remove code to pop args from the stack after call. + Special-case signed/unsigned int8/16. + * testsuite/libffi.call/return_sc.c (main): Revert. + +2007-07-26 Richard Guenther + + PR testsuite/32843 + * testsuite/libffi.call/return_sc.c (main): Verify call + result as signed char, not ffi_arg. + +2007-07-16 Rainer Orth + + * configure.ac (i?86-*-solaris2.1[0-9]): Set TARGET to X86_64. + * configure: Regenerate. + +2007-07-11 David Daney + + * src/mips/ffi.c: Don't include sys/cachectl.h. + (ffi_prep_closure_loc): Use __builtin___clear_cache() instead of + cacheflush(). + +2007-05-18 Aurelien Jarno + + * src/arm/ffi.c (ffi_prep_closure_loc): Renamed and ajusted + from (ffi_prep_closure): ... this. + (FFI_INIT_TRAMPOLINE): Adjust. + +2005-12-31 Phil Blundell + + * src/arm/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner, ffi_prep_closure): New, add closure support. + * src/arm/sysv.S(ffi_closure_SYSV): Likewise. + * src/arm/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-07-03 Andrew Haley + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Enable for ARM. + +2007-07-05 H.J. Lu + + * aclocal.m4: Regenerated. + +2007-06-02 Paolo Bonzini + + * configure: Regenerate. + +2007-05-23 Steve Ellcey + + * Makefile.in: Regenerate. + * configure: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner,ffi_prep_closure): New, add closure support. + * src/m68k/sysv.S(ffi_closure_SYSV,ffi_closure_struct_SYSV): Likewise. + * src/m68k/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-05-10 Roman Zippel + + * configure.ac (HAVE_AS_CFI_PSEUDO_OP): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/m68k/sysv.S (CFI_STARTPROC,CFI_ENDPROC, + CFI_OFFSET,CFI_DEF_CFA): New macros. + (ffi_call_SYSV): Add callframe annotation. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_args,ffi_prep_cif_machdep): Fix + numerous test suite failures. + * src/m68k/sysv.S (ffi_call_SYSV): Likewise. + +2007-04-11 Paolo Bonzini + + * Makefile.am (EXTRA_DIST): Bring up to date. + * Makefile.in: Regenerate. + * src/frv/eabi.S: Remove RCS keyword. + +2007-04-06 Richard Henderson + + * configure.ac: Tidy target case. + (HAVE_LONG_DOUBLE): Allow the target to override. + * configure: Regenerate. + * include/ffi.h.in: Don't define ffi_type_foo if + LIBFFI_HIDE_BASIC_TYPES is defined. + (ffi_type_longdouble): If not HAVE_LONG_DOUBLE, define + to ffi_type_double. + * types.c (LIBFFI_HIDE_BASIC_TYPES): Define. + (FFI_TYPEDEF, ffi_type_void): Mark the data const. + (ffi_type_longdouble): Special case for Alpha. Don't define + if long double == double. + + * src/alpha/ffi.c (FFI_TYPE_LONGDOUBLE): Assert unique value. + (ffi_prep_cif_machdep): Handle it as the 128-bit type. + (ffi_call, ffi_closure_osf_inner): Likewise. + (ffi_closure_osf_inner): Likewise. Mark hidden. + (ffi_call_osf, ffi_closure_osf): Mark hidden. + * src/alpha/ffitarget.h (FFI_LAST_ABI): Tidy definition. + * src/alpha/osf.S (ffi_call_osf, ffi_closure_osf): Mark hidden. + (load_table): Handle 128-bit long double. + + * testsuite/libffi.call/float4.c: Add -mieee for alpha. + +2007-04-06 Tom Tromey + + PR libffi/31491: + * README: Fixed bug in example. + +2007-04-03 Jakub Jelinek + + * src/closures.c: Include sys/statfs.h. + (_GNU_SOURCE): Define on Linux. + (FFI_MMAP_EXEC_SELINUX): Define. + (selinux_enabled): New variable. + (selinux_enabled_check): New function. + (is_selinux_enabled): Define. + (dlmmap): Use it. + +2007-03-24 Uros Bizjak + + * testsuite/libffi.call/return_fl2.c (return_fl): Mark as static. + Use 'volatile float sum' to create sum of floats to avoid false + negative due to excess precision on ix86 targets. + (main): Ditto. + +2007-03-08 Alexandre Oliva + + * src/powerpc/ffi.c (flush_icache): Fix left-over from previous + patch. + (ffi_prep_closure_loc): Remove unneeded casts. Add needed ones. + +2007-03-07 Alexandre Oliva + + * include/ffi.h.in (ffi_closure_alloc, ffi_closure_free): New. + (ffi_prep_closure_loc): New. + (ffi_prep_raw_closure_loc): New. + (ffi_prep_java_raw_closure_loc): New. + * src/closures.c: New file. + * src/dlmalloc.c [FFI_MMAP_EXEC_WRIT] (struct malloc_segment): + Replace sflags with exec_offset. + [FFI_MMAP_EXEC_WRIT] (mmap_exec_offset, add_segment_exec_offset, + sub_segment_exec_offset): New macros. + (get_segment_flags, set_segment_flags, check_segment_merge): New + macros. + (is_mmapped_segment, is_extern_segment): Use get_segment_flags. + (add_segment, sys_alloc, create_mspace, create_mspace_with_base, + destroy_mspace): Use new macros. + (sys_alloc): Silence warning. + * Makefile.am (libffi_la_SOURCES): Add src/closures.c. + * Makefile.in: Rebuilt. + * src/prep_cif [FFI_CLOSURES] (ffi_prep_closure): Implement in + terms of ffi_prep_closure_loc. + * src/raw_api.c (ffi_prep_raw_closure_loc): Renamed and adjusted + from... + (ffi_prep_raw_closure): ... this. Re-implement in terms of the + renamed version. + * src/java_raw_api (ffi_prep_java_raw_closure_loc): Renamed and + adjusted from... + (ffi_prep_java_raw_closure): ... this. Re-implement in terms of + the renamed version. + * src/alpha/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + * src/pa/ffi.c: Likewise. + * src/cris/ffi.c: Likewise. Adjust. + * src/frv/ffi.c: Likewise. + * src/ia64/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/x86/ffi.c: Likewise. + (FFI_INIT_TRAMPOLINE): Adjust. + (ffi_prep_raw_closure_loc): Renamed and adjusted from... + (ffi_prep_raw_closure): ... this. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + (flush_icache): Adjust. + +2007-03-07 Alexandre Oliva + + * src/dlmalloc.c: New file, imported version 2.8.3 of Doug + Lea's malloc. + +2007-03-01 Brooks Moses + + * Makefile.am: Add dummy install-pdf target. + * Makefile.in: Regenerate + +2007-02-13 Andreas Krebbel + + * src/s390/ffi.c (ffi_prep_args, ffi_prep_cif_machdep, + ffi_closure_helper_SYSV): Add long double handling. + +2007-02-02 Jakub Jelinek + + * src/powerpc/linux64.S (ffi_call_LINUX64): Move restore of r2 + immediately after bctrl instruction. + +2007-01-18 Alexandre Oliva + + * Makefile.am (all-recursive, install-recursive, + mostlyclean-recursive, clean-recursive, distclean-recursive, + maintainer-clean-recursive): Add missing targets. + * Makefile.in: Rebuilt. + +2006-12-14 Andreas Tobler + + * configure.ac: Add TARGET for x86_64-*-darwin*. + * Makefile.am (nodist_libffi_la_SOURCES): Add rules for 64-bit sources + for X86_DARWIN. + * src/x86/ffitarget.h: Set trampoline size for x86_64-*-darwin*. + * src/x86/darwin64.S: New file for x86_64-*-darwin* support. + * configure: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + * testsuite/libffi.special/unwindtest_ffi_call.cc: New test case for + ffi_call only. + +2006-12-13 Andreas Tobler + + * aclocal.m4: Regenerate with aclocal -I .. as written in the + Makefile.am. + +2006-10-31 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (darwin_adjust_aggregate_sizes): New. + (ffi_prep_cif_machdep): Call darwin_adjust_aggregate_sizes for + Darwin. + * testsuite/libffi.call/nested_struct4.c: Remove Darwin XFAIL. + * testsuite/libffi.call/nested_struct6.c: Remove Darwin XFAIL. + +2006-10-10 Paolo Bonzini + Sandro Tolaini + + * configure.ac [i*86-*-darwin*]: Set X86_DARWIN symbol and + conditional. + * configure: Regenerated. + * Makefile.am (nodist_libffi_la_SOURCES) [X86_DARWIN]: New case. + (EXTRA_DIST): Add src/x86/darwin.S. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + + * src/x86/ffi.c (ffi_prep_cif_machdep) [X86_DARWIN]: Treat like + X86_WIN32, and additionally align stack to 16 bytes. + * src/x86/darwin.S: New, based on sysv.S. + * src/prep_cif.c (ffi_prep_cif) [X86_DARWIN]: Align > 8-byte structs. + +2006-09-12 David Daney + + PR libffi/23935 + * include/Makefile.am: Install both ffi.h and ffitarget.h in + $(libdir)/gcc/$(target_alias)/$(gcc_version)/include. + * aclocal.m4: Regenerated for automake 1.9.6. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + +2006-08-17 Andreas Tobler + + * include/ffi_common.h (struct): Revert accidental commit. + +2006-08-15 Andreas Tobler + + * include/ffi_common.h: Remove lint directives. + * include/ffi.h.in: Likewise. + +2006-07-25 Torsten Schoenfeld + + * include/ffi.h.in (ffi_type_ulong, ffi_type_slong): Define correctly + for 32-bit architectures. + * testsuite/libffi.call/return_ul.c: New test case. + +2006-07-19 David Daney + + * testsuite/libffi.call/closure_fn6.c: Remove xfail for mips, + xfail remains for mips64. + +2006-05-23 Carlos O'Donell + + * Makefile.am: Add install-html target. Add install-html to .PHONY + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2006-05-18 John David Anglin + + * pa/ffi.c (ffi_prep_args_pa32): Load floating point arguments from + stack slot. + +2006-04-22 Andreas Tobler + + * README: Remove notice about 'Crazy Comments'. + * src/debug.c: Remove lint directives. Cleanup white spaces. + * src/java_raw_api.c: Likewise. + * src/prep_cif.c: Likewise. + * src/raw_api.c: Likewise. + * src/ffitest.c: Delete. No longer needed, all test cases migrated + to the testsuite. + * src/arm/ffi.c: Remove lint directives. + * src/m32r/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + +2006-04-13 Andreas Tobler + + * src/pa/hpux32.S: Correct unwind offset calculation for + ffi_closure_pa32. + * src/pa/linux.S: Likewise. + +2006-04-12 James E Wilson + + PR libgcj/26483 + * src/ia64/ffi.c (stf_spill, ldf_fill): Rewrite as macros. + (hfa_type_load): Call stf_spill. + (hfa_type_store): Call ldf_fill. + (ffi_call): Adjust calls to above routines. Add local temps for + macro result. + +2006-04-10 Matthias Klose + + * testsuite/lib/libffi-dg.exp (libffi-init): Recognize multilib + directory names containing underscores. + +2006-04-07 James E Wilson + + * testsuite/libffi.call/float4.c: New testcase. + +2006-04-05 John David Anglin + Andreas Tobler + + * Makefile.am: Add PA_HPUX port. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add PA_HPUX rules. + * configure: Regenerate. + * src/pa/ffitarget.h: Rename linux target to PA_LINUX. + Add PA_HPUX and PA64_HPUX. + Rename FFI_LINUX ABI to FFI_PA32 ABI. + (FFI_TRAMPOLINE_SIZE): Define for 32-bit HP-UX targets. + (FFI_TYPE_SMALL_STRUCT2): Define. + (FFI_TYPE_SMALL_STRUCT4): Likewise. + (FFI_TYPE_SMALL_STRUCT8): Likewise. + (FFI_TYPE_SMALL_STRUCT3): Redefine. + (FFI_TYPE_SMALL_STRUCT5): Likewise. + (FFI_TYPE_SMALL_STRUCT6): Likewise. + (FFI_TYPE_SMALL_STRUCT7): Likewise. + * src/pa/ffi.c (ROUND_DOWN): Delete. + (fldw, fstw, fldd, fstd): Use '__asm__'. + (ffi_struct_type): Add support for FFI_TYPE_SMALL_STRUCT2, + FFI_TYPE_SMALL_STRUCT4 and FFI_TYPE_SMALL_STRUCT8. + (ffi_prep_args_LINUX): Rename to ffi_prep_args_pa32. Update comment. + Simplify incrementing of stack slot variable. Change type of local + 'n' to unsigned int. + (ffi_size_stack_LINUX): Rename to ffi_size_stack_pa32. Handle long + double on PA_HPUX. + (ffi_prep_cif_machdep): Likewise. + (ffi_call): Likewise. + (ffi_closure_inner_LINUX): Rename to ffi_closure_inner_pa32. Change + return type to ffi_status. Simplify incrementing of stack slot + variable. Only copy floating point argument registers when PA_LINUX + is true. Reformat debug statement. + Add support for FFI_TYPE_SMALL_STRUCT2, FFI_TYPE_SMALL_STRUCT4 and + FFI_TYPE_SMALL_STRUCT8. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Add 'extern' to + declaration. + (ffi_prep_closure): Make linux trampoline conditional on PA_LINUX. + Add nops to cache flush. Add trampoline for PA_HPUX. + * src/pa/hpux32.S: New file. + * src/pa/linux.S (ffi_call_LINUX): Rename to ffi_call_pa32. Rename + ffi_prep_args_LINUX to ffi_prep_args_pa32. + Localize labels. Add support for 2, 4 and 8-byte small structs. Handle + unaligned destinations in 3, 5, 6 and 7-byte small structs. Order + argument type checks so that common argument types appear first. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Rename + ffi_closure_inner_LINUX to ffi_closure_inner_pa32. + +2006-03-24 Alan Modra + + * src/powerpc/ffitarget.h (enum ffi_abi): Add FFI_LINUX. Default + for 32-bit using IBM extended double format. Fix FFI_LAST_ABI. + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Handle linux variant of + FFI_TYPE_LONGDOUBLE. + (ffi_prep_args64): Assert using IBM extended double. + (ffi_prep_cif_machdep): Don't munge FFI_TYPE_LONGDOUBLE type. + Handle FFI_LINUX FFI_TYPE_LONGDOUBLE return and args. + (ffi_call): Handle FFI_LINUX. + (ffi_closure_helper_SYSV): Non FFI_LINUX long double return needs + gpr3 return pointer as for struct return. Handle FFI_LINUX + FFI_TYPE_LONGDOUBLE return and args. Don't increment "nf" + unnecessarily. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Load both f1 and f2 + for FFI_TYPE_LONGDOUBLE. Move epilogue insns into case table. + Don't use r6 as pointer to results, instead use sp offset. Don't + make a special call to load lr with case table address, instead + use offset from previous call. + * src/powerpc/sysv.S (ffi_call_SYSV): Save long double return. + * src/powerpc/linux64.S (ffi_call_LINUX64): Simplify long double + return. + +2006-03-15 Kaz Kojima + + * src/sh64/ffi.c (ffi_prep_cif_machdep): Handle float arguments + passed with FP registers correctly. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S: Likewise. + +2006-03-01 Andreas Tobler + + * testsuite/libffi.special/unwindtest.cc (closure_test_fn): Mark cif, + args and userdata unused. + (closure_test_fn1): Mark cif and userdata unused. + (main): Remove unused res. + +2006-02-28 Andreas Tobler + + * testsuite/libffi.call/call.exp: Adjust FSF address. Add test runs for + -O2, -O3, -Os and the warning flags -W -Wall. + * testsuite/libffi.special/special.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Add an __UNUSED__ macro to mark + unused parameter unused for gcc or else do nothing. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/cls_12byte.c (cls_struct_12byte_gn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_16byte.c (cls_struct_16byte_gn): Likewise. + * testsuite/libffi.call/cls_18byte.c (cls_struct_18byte_gn): Likewise. + * testsuite/libffi.call/cls_19byte.c (cls_struct_19byte_gn): Likewise. + * testsuite/libffi.call/cls_1_1byte.c (cls_struct_1_1byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte1.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_24byte.c (cls_struct_24byte_gn): Likewise. + * testsuite/libffi.call/cls_2byte.c (cls_struct_2byte_gn): Likewise. + * testsuite/libffi.call/cls_3_1byte.c (cls_struct_3_1byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte1.c (cls_struct_3byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte2.c (cls_struct_3byte_gn1): Likewise. + * testsuite/libffi.call/cls_4_1byte.c (cls_struct_4_1byte_gn): Likewise. + * testsuite/libffi.call/cls_4byte.c (cls_struct_4byte_gn): Likewise. + * testsuite/libffi.call/cls_5_1_byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_5byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_64byte.c (cls_struct_64byte_gn): Likewise. + * testsuite/libffi.call/cls_6_1_byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_6byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_7_1_byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_7byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_8byte.c (cls_struct_8byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte1.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte2.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_align_double.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_float.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_longdouble.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_pointer.c (cls_struct_align_fn): Cast + void* to avoid compiler warning. + (main): Likewise. + (cls_struct_align_gn): Mark cif and userdata unused. + * testsuite/libffi.call/cls_align_sint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint64.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_double.c (cls_ret_double_fn): Likewise. + * testsuite/libffi.call/cls_float.c (cls_ret_float_fn): Likewise. + * testsuite/libffi.call/cls_multi_schar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_uchar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_schar.c (cls_ret_schar_fn): Mark cif and + userdata unused. + (cls_ret_schar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sint.c (cls_ret_sint_fn): Mark cif and + userdata unused. + (cls_ret_sint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sshort.c (cls_ret_sshort_fn): Mark cif and + userdata unused. + (cls_ret_sshort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uchar.c (cls_ret_uchar_fn): Mark cif and + userdata unused. + (cls_ret_uchar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Mark cif and + userdata unused. + (cls_ret_uint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_ulonglong.c (cls_ret_ulonglong_fn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_ushort.c (cls_ret_ushort_fn): Mark cif and + userdata unused. + (cls_ret_ushort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/float.c (floating): Remove unused parameter e. + * testsuite/libffi.call/float1.c (main): Remove unused variable i. + Cleanup white spaces. + * testsuite/libffi.call/negint.c (checking): Remove unused variable i. + * testsuite/libffi.call/nested_struct.c (cls_struct_combined_gn): Mark + cif and userdata unused. + * testsuite/libffi.call/nested_struct1.c (cls_struct_combined_gn): + Likewise. + * testsuite/libffi.call/nested_struct10.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct2.c (B_fn): Adjust printf + formatters to silence gcc. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct3.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct4.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct5.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct6.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct7.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct8.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct9.c (B_gn): Likewise. + * testsuite/libffi.call/problem1.c (stub): Likewise. + * testsuite/libffi.call/pyobjc-tc.c (main): Cast the result to silence + gcc. + * testsuite/libffi.call/return_fl2.c (return_fl): Add the note mentioned + in the last commit for this test case in the test case itself. + * testsuite/libffi.call/closure_fn0.c (closure_test_fn0): Mark cif as + unused. + * testsuite/libffi.call/closure_fn1.c (closure_test_fn1): Likewise. + * testsuite/libffi.call/closure_fn2.c (closure_test_fn2): Likewise. + * testsuite/libffi.call/closure_fn3.c (closure_test_fn3): Likewise. + * testsuite/libffi.call/closure_fn4.c (closure_test_fn0): Likewise. + * testsuite/libffi.call/closure_fn5.c (closure_test_fn5): Likewise. + * testsuite/libffi.call/closure_fn6.c (closure_test_fn0): Likewise. + +2006-02-22 Kaz Kojima + + * src/sh/sysv.S: Fix register numbers in the FDE for + ffi_closure_SYSV. + +2006-02-20 Andreas Tobler + + * testsuite/libffi.call/return_fl2.c (return_fl): Remove static + declaration to avoid a false negative on ix86. See PR323. + +2006-02-18 Kaz Kojima + + * src/sh/ffi.c (ffi_closure_helper_SYSV): Remove unused variable + and cast integer to void * if needed. Update the pointer to + the FP register saved area correctly. + +2006-02-17 Andreas Tobler + + * testsuite/libffi.call/nested_struct6.c: XFAIL this test until PR25630 + is fixed. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-02-16 Andreas Tobler + + * testsuite/libffi.call/return_dbl.c: New test case. + * testsuite/libffi.call/return_dbl1.c: Likewise. + * testsuite/libffi.call/return_dbl2.c: Likewise. + * testsuite/libffi.call/return_fl.c: Likewise. + * testsuite/libffi.call/return_fl1.c: Likewise. + * testsuite/libffi.call/return_fl2.c: Likewise. + * testsuite/libffi.call/return_fl3.c: Likewise. + * testsuite/libffi.call/closure_fn6.c: Likewise. + + * testsuite/libffi.call/nested_struct2.c: Remove ffi_type_mylong + definition. + * testsuite/libffi.call/ffitest.h: Add ffi_type_mylong definition + here to be used by other test cases too. + + * testsuite/libffi.call/nested_struct10.c: New test case. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-01-21 Andreas Tobler + + * configure.ac: Enable libffi for sparc64-*-freebsd*. + * configure: Rebuilt. + +2006-01-18 Jakub Jelinek + + * src/powerpc/sysv.S (smst_two_register): Don't call __ashldi3, + instead do the shifting inline. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't compute %r5 + shift count unconditionally. Simplify load sequences for 1, 2, 3, 4 + and 8 byte structs, for the remaining struct sizes don't call + __lshrdi3, instead do the shifting inline. + +2005-12-07 Thiemo Seufer + + * src/mips/ffitarget.h: Remove obsolete sgidefs.h include. Add + missing parentheses. + * src/mips/o32.S (ffi_call_O32): Code formatting. Define + and use A3_OFF, FP_OFF, RA_OFF. Micro-optimizations. + (ffi_closure_O32): Likewise, but with newly defined A3_OFF2, + A2_OFF2, A1_OFF2, A0_OFF2, RA_OFF2, FP_OFF2, S0_OFF2, GP_OFF2, + V1_OFF2, V0_OFF2, FA_1_1_OFF2, FA_1_0_OFF2, FA_0_1_OFF2, + FA_0_0_OFF2. + * src/mips/ffi.c (ffi_prep_args): Code formatting. Fix + endianness bugs. + (ffi_prep_closure): Improve trampoline instruction scheduling. + (ffi_closure_mips_inner_O32): Fix endianness bugs. + +2005-12-03 Alan Modra + + * src/powerpc/ffi.c: Formatting. + (ffi_prep_args_SYSV): Avoid possible aliasing problems by using unions. + (ffi_prep_args64): Likewise. + +2005-09-30 Geoffrey Keating + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): For + darwin, use -shared-libgcc not -lgcc_s, and explain why. + +2005-09-26 Tom Tromey + + * testsuite/libffi.call/float1.c (value_type): New typedef. + (CANARY): New define. + (main): Check for result buffer overflow. + * src/powerpc/linux64.S: Handle linux64 long double returns. + * src/powerpc/ffi.c (FLAG_RETURNS_128BITS): New constant. + (ffi_prep_cif_machdep): Handle linux64 long double returns. + +2005-08-25 Alan Modra + + PR target/23404 + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Correct placement of stack + homed fp args. + (ffi_status ffi_prep_cif_machdep): Correct stack sizing for same. + +2005-08-11 Jakub Jelinek + + * configure.ac (HAVE_HIDDEN_VISIBILITY_ATTRIBUTE): New test. + (AH_BOTTOM): Add FFI_HIDDEN definition. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * src/powerpc/ffi.c (hidden): Remove. + (ffi_closure_LINUX64, ffi_prep_args64, ffi_call_LINUX64, + ffi_closure_helper_LINUX64): Use FFI_HIDDEN instead of hidden. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64, + .ffi_closure_LINUX64): Use FFI_HIDDEN instead of .hidden. + * src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): Remove, + add FFI_HIDDEN to its prototype. + (ffi_closure_SYSV_inner): New. + * src/x86/sysv.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + * src/x86/win32.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + +2005-08-10 Alfred M. Szmidt + + PR libffi/21819: + * configure: Rebuilt. + * configure.ac: Handle i*86-*-gnu*. + +2005-08-09 Jakub Jelinek + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Use + DW_CFA_offset_extended_sf rather than + DW_CFA_GNU_negative_offset_extended. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise. + +2005-07-22 SUGIOKA Toshinobu + + * src/sh/sysv.S (ffi_call_SYSV): Stop argument popping correctly + on sh3. + (ffi_closure_SYSV): Change the stack layout for sh3 struct argument. + * src/sh/ffi.c (ffi_prep_args): Fix sh3 argument copy, when it is + partially on register. + (ffi_closure_helper_SYSV): Likewise. + (ffi_prep_cif_machdep): Don't set too many cif->flags. + +2005-07-20 Kaz Kojima + + * src/sh/ffi.c (ffi_call): Handle small structures correctly. + Remove empty line. + * src/sh64/ffi.c (simple_type): Remove. + (return_type): Handle small structures correctly. + (ffi_prep_args): Likewise. + (ffi_call): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S (ffi_call_SYSV): Handle 1, 2 and 4-byte return. + Emit position independent code if PIC and remove wrong datalabel + prefixes from EH data. + +2005-07-19 Andreas Tobler + + * Makefile.am (nodist_libffi_la_SOURCES): Add POWERPC_FREEBSD. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add POWERPC_FREEBSD rules. + * configure: Regenerate. + * src/powerpc/ffitarget.h: Add POWERPC_FREEBSD rules. + (FFI_SYSV_TYPE_SMALL_STRUCT): Define. + * src/powerpc/ffi.c: Add flags to handle small structure returns + in ffi_call_SYSV. + (ffi_prep_cif_machdep): Handle small structures for SYSV 4 ABI. + Aka FFI_SYSV. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Add return types for small structures. + * src/powerpc/sysv.S: Add bits to handle small structures for + final SYSV 4 ABI. + +2005-07-10 Andreas Tobler + + * testsuite/libffi.call/cls_5_1_byte.c: New test file. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + +2005-07-05 Randolph Chung + + * src/pa/ffi.c (ffi_struct_type): Rename FFI_TYPE_SMALL_STRUCT1 + as FFI_TYPE_SMALL_STRUCT3. Break out handling for 5-7 byte + structures. Kill compilation warnings. + (ffi_closure_inner_LINUX): Print return values as hex in debug + message. Rename FFI_TYPE_SMALL_STRUCT1 as FFI_TYPE_SMALL_STRUCT3. + Properly handle 5-7 byte structure returns. + * src/pa/ffitarget.h (FFI_TYPE_SMALL_STRUCT1) + (FFI_TYPE_SMALL_STRUCT2): Remove. + (FFI_TYPE_SMALL_STRUCT3, FFI_TYPE_SMALL_STRUCT5) + (FFI_TYPE_SMALL_STRUCT6, FFI_TYPE_SMALL_STRUCT7): Define. + * src/pa/linux.S: Mark source file as using PA1.1 assembly. + (checksmst1, checksmst2): Remove. + (checksmst3): Optimize handling of 3-byte struct returns. + (checksmst567): Properly handle 5-7 byte struct returns. + +2005-06-15 Rainer Orth + + PR libgcj/21943 + * src/mips/n32.S: Enforce PIC code. + * src/mips/o32.S: Likewise. + +2005-06-15 Rainer Orth + + * configure.ac: Treat i*86-*-solaris2.10 and up as X86_64. + * configure: Regenerate. + +2005-06-01 Alan Modra + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't use JUMPTARGET + to call ffi_closure_helper_SYSV. Append @local instead. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise for ffi_prep_args_SYSV. + +2005-05-17 Kelley Cook + + * configure.ac: Use AC_C_BIGENDIAN instead of AC_C_BIGENDIAN_CROSS. + Use AC_CHECK_SIZEOF instead of AC_COMPILE_CHECK_SIZEOF. + * Makefile.am (ACLOCAL_AMFLAGS): Remove -I ../config. + * aclocal.m4, configure, fficonfig.h.in, Makefile.in, + include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2005-05-09 Mike Stump + + * configure: Regenerate. + +2005-05-08 Richard Henderson + + PR libffi/21285 + * src/alpha/osf.S: Update unwind into to match code. + +2005-05-04 Andreas Degert + Richard Henderson + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Save sse-used flag in + bit 11 of flags. + (ffi_call): Mask return type field. Pass ssecount to ffi_call_unix64. + (ffi_prep_closure): Set carry bit if sse-used flag set. + * src/x86/unix64.S (ffi_call_unix64): Add ssecount argument. + Only load sse registers if ssecount non-zero. + (ffi_closure_unix64): Only save sse registers if carry set on entry. + +2005-04-29 Ralf Corsepius + + * configure.ac: Add i*86-*-rtems*, sparc*-*-rtems*, + powerpc-*rtems*, arm*-*-rtems*, sh-*-rtems*. + * configure: Regenerate. + +2005-04-20 Hans-Peter Nilsson + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): In regsub use, + have Tcl8.3-compatible intermediate variable. + +2005-04-18 Simon Posnjak + Hans-Peter Nilsson + + * Makefile.am: Add CRIS support. + * configure.ac: Likewise. + * Makefile.in, configure, testsuite/Makefile.in, + include/Makefile.in: Regenerate. + * src/cris: New directory. + * src/cris/ffi.c, src/cris/sysv.S, src/cris/ffitarget.h: New files. + * src/prep_cif.c (ffi_prep_cif): Wrap in #ifndef __CRIS__. + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): Replace \n with + \r?\n in output tests. + +2005-04-12 Mike Stump + + * configure: Regenerate. + +2005-03-30 Hans Boehm + + * src/ia64/ffitarget.h (ffi_arg): Use long long instead of DI. + +2005-03-30 Steve Ellcey + + * src/ia64/ffitarget.h (ffi_arg) ADD DI attribute. + (ffi_sarg) Ditto. + * src/ia64/unix.S (ffi_closure_unix): Extend gp + to 64 bits in ILP32 mode. + Load 64 bits even for short data. + +2005-03-23 Mike Stump + + * src/powerpc/darwin.S: Update for -m64 multilib. + * src/powerpc/darwin_closure.S: Likewise. + +2005-03-21 Zack Weinberg + + * configure.ac: Do not invoke TL_AC_GCC_VERSION. + Do not set tool_include_dir. + * aclocal.m4, configure, Makefile.in, testsuite/Makefile.in: + Regenerate. + * include/Makefile.am: Set gcc_version and toollibffidir. + * include/Makefile.in: Regenerate. + +2005-02-22 Andrew Haley + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Bump alignment to + odd-numbered register pairs for 64-bit integer types. + +2005-02-23 Andreas Tobler + + PR libffi/20104 + * testsuite/libffi.call/return_ll1.c: New test case. + +2005-02-11 Janis Johnson + + * testsuite/libffi.call/cls_align_longdouble.c: Remove dg-options. + * testsuite/libffi.call/float.c: Ditto. + * testsuite/libffi.call/float2.c: Ditto. + * testsuite/libffi.call/float3.c: Ditto. + +2005-02-08 Andreas Tobler + + * src/frv/ffitarget.h: Remove PPC stuff which does not belong to frv. + +2005-01-12 Eric Botcazou + + * testsuite/libffi.special/special.exp (cxx_options): Add + -shared-libgcc. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_AGGREGATE_TYPEDEF): Remove. + (FFI_TYPEDEF): Rename from FFI_INTEGRAL_TYPEDEF. Replace size and + offset parameters with a type parameter; deduce size and structure + alignment. Update all users. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_TYPE_POINTER): Define with sizeof. + (FFI_TYPE_LONGDOUBLE): Fix for ia64. + * src/ia64/ffitarget.h (struct ffi_ia64_trampoline_struct): Move + into ffi_prep_closure. + * src/ia64/ia64_flags.h, src/ia64/ffi.c, src/ia64/unix.S: Rewrite + from scratch. + +2004-12-27 Richard Henderson + + * src/x86/unix64.S: Fix typo in unwind info. + +2004-12-25 Richard Henderson + + * src/x86/ffi64.c (struct register_args): Rename from stackLayout. + (enum x86_64_reg_class): Add X86_64_COMPLEX_X87_CLASS. + (merge_classes): Check for it. + (SSE_CLASS_P): New. + (classify_argument): Pass byte_offset by value; perform all updates + inside struct case. + (examine_argument): Add classes argument; handle + X86_64_COMPLEX_X87_CLASS. + (ffi_prep_args): Merge into ... + (ffi_call): ... here. Share stack frame with ffi_call_unix64. + (ffi_prep_cif_machdep): Setup cif->flags for proper structure return. + (ffi_fill_return_value): Remove. + (ffi_prep_closure): Remove dead assert. + (ffi_closure_unix64_inner): Rename from ffi_closure_UNIX64_inner. + Rewrite to use struct register_args instead of va_list. Create + flags for handling structure returns. + * src/x86/unix64.S: Remove dead strings. + (ffi_call_unix64): Rename from ffi_call_UNIX64. Rewrite to share + stack frame with ffi_call. Handle structure returns properly. + (float2sse, floatfloat2sse, double2sse): Remove. + (sse2float, sse2double, sse2floatfloat): Remove. + (ffi_closure_unix64): Rename from ffi_closure_UNIX64. Rewrite + to handle structure returns properly. + +2004-12-08 David Edelsohn + + * Makefile.am (AM_MAKEFLAGS): Remove duplicate LIBCFLAGS and + PICFLAG. + * Makefile.in: Regenerated. + +2004-12-02 Richard Sandiford + + * configure.ac: Use TL_AC_GCC_VERSION to set gcc_version. + * configure, aclocal.m4, Makefile.in: Regenerate. + * include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2004-11-29 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-25 Kelley Cook + + * configure: Regenerate for libtool reversion. + +2004-11-24 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-23 John David Anglin + + * testsuite/lib/libffi-dg.exp: Use new procs in target-libpath.exp. + +2004-11-23 Richard Sandiford + + * src/mips/o32.S (ffi_call_O32, ffi_closure_O32): Use jalr instead + of jal. Use an absolute encoding for the frame information. + +2004-11-23 Kelley Cook + + * Makefile.am: Remove no-dependencies. Add ACLOCAL_AMFLAGS. + * acinclude.m4: Delete logic for sincludes. + * aclocal.m4, Makefile.in, configure: Regenerate. + * include/Makefile: Likewise. + * testsuite/Makefile: Likewise. + +2004-11-22 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_closure): Align doubles and 64-bit integers + on a 8-byte boundary. + * src/sparc/v8.S (ffi_closure_v8): Reserve frame space for arguments. + +2004-10-27 Richard Earnshaw + + * src/arm/ffi.c (ffi_prep_cif_machdep): Handle functions that return + long long values. Round stack allocation to a multiple of 8 bytes + for ATPCS compatibility. + * src/arm/sysv.S (ffi_call_SYSV): Rework to avoid use of APCS register + names. Handle returning long long types. Add Thumb and interworking + support. Improve soft-float code. + +2004-10-27 Richard Earnshaw + + * testsuite/lib/libffi-db.exp (load_gcc_lib): New function. + (libffi_exit): New function. + (libffi_init): Build the testglue wrapper if needed. + +2004-10-25 Eric Botcazou + + PR other/18138 + * testsuite/lib/libffi-dg.exp: Accept more than one multilib libgcc. + +2004-10-25 Kazuhiro Inaoka + + * src/m32r/libffitarget.h (FFI_CLOSURES): Set to 0. + +2004-10-20 Kaz Kojima + + * src/sh/sysv.S (ffi_call_SYSV): Don't align for double data. + * testsuite/libffi.call/float3.c: New test case. + +2004-10-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure): Set T bit in trampoline for + the function returning a structure pointed with R2. + * src/sh/sysv.S (ffi_closure_SYSV): Use R2 as the pointer to + the structure return value if T bit set. Emit position + independent code and EH data if PIC. + +2004-10-13 Kazuhiro Inaoka + + * Makefile.am: Add m32r support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * confiugre: Regenerate. + * src/types.c: Add m32r port to FFI_INTERNAL_TYPEDEF + (uint64, sint64, double, longdouble) + * src/m32r: New directory. + * src/m32r/ffi.c: New file. + * src/m32r/sysv.S: Likewise. + * src/m32r/ffitarget.h: Likewise. + +2004-10-02 Kaz Kojima + + * testsuite/libffi.call/negint.c: New test case. + +2004-09-14 H.J. Lu + + PR libgcj/17465 + * testsuite/lib/libffi-dg.exp: Don't use global ld_library_path. + Set up LD_LIBRARY_PATH, SHLIB_PATH, LD_LIBRARYN32_PATH, + LD_LIBRARY64_PATH, LD_LIBRARY_PATH_32, LD_LIBRARY_PATH_64 and + DYLD_LIBRARY_PATH. + +2004-09-05 Andreas Tobler + + * testsuite/libffi.call/many_win32.c: Remove whitespaces. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Remove unused var. Cleanup + whitespaces. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + +2004-09-05 Andreas Tobler + + * src/powerpc/darwin.S: Fix comments and identation. + * src/powerpc/darwin_closure.S: Likewise. + +2004-09-02 Andreas Tobler + + * src/powerpc/ffi_darwin.c: Add flag for longdouble return values. + (ffi_prep_args): Handle longdouble arguments. + (ffi_prep_cif_machdep): Set flags for longdouble. Calculate space for + longdouble. + (ffi_closure_helper_DARWIN): Add closure handling for longdouble. + * src/powerpc/darwin.S (_ffi_call_DARWIN): Add handling of longdouble + values. + * src/powerpc/darwin_closure.S (_ffi_closure_ASM): Likewise. + * src/types.c: Defined longdouble size and alignment for darwin. + +2004-09-02 Andreas Tobler + + * src/powerpc/aix.S: Remove whitespaces. + * src/powerpc/aix_closure.S: Likewise. + * src/powerpc/asm.h: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffitarget.h: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + +2004-08-30 Anthony Green + + * Makefile.am: Add frv support. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + * configure.ac: Read configure.host. + * configure.in: Read configure.host. + * configure.host: New file. frv-elf needs libgloss. + * include/ffi.h.in: Force ffi_closure to have a nice big (8) + alignment. This is needed to frv and shouldn't harm the others. + * include/ffi_common.h (ALIGN_DOWN): New macro. + * src/frv/ffi.c, src/frv/ffitarget.h, src/frv/eabi.S: New files. + +2004-08-24 David Daney + + * testsuite/libffi.call/closure_fn0.c: Xfail mips64* instead of mips*. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_sshort.c: Likewise. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise and set return value + to zero. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + +2004-08-23 David Daney + + PR libgcj/13141 + * src/mips/ffitarget.h (FFI_O32_SOFT_FLOAT): New ABI. + * src/mips/ffi.c (ffi_prep_args): Fix alignment calculation. + (ffi_prep_cif_machdep): Handle FFI_O32_SOFT_FLOAT floating point + parameters and return types. + (ffi_call): Handle FFI_O32_SOFT_FLOAT ABI. + (ffi_prep_closure): Ditto. + (ffi_closure_mips_inner_O32): Handle FFI_O32_SOFT_FLOAT ABI, fix + alignment calculations. + * src/mips/o32.S (ffi_closure_O32): Don't use floating point + instructions if FFI_O32_SOFT_FLOAT, make stack frame ABI compliant. + +2004-08-14 Casey Marshall + + * src/mips/ffi.c (ffi_pref_cif_machdep): set `cif->flags' to + contain `FFI_TYPE_UINT64' as return type for any 64-bit + integer (O32 ABI only). + (ffi_prep_closure): new function. + (ffi_closure_mips_inner_O32): new function. + * src/mips/ffitarget.h: Define `FFI_CLOSURES' and + `FFI_TRAMPOLINE_SIZE' appropriately if the ABI is o32. + * src/mips/o32.S (ffi_call_O32): add labels for .eh_frame. Return + 64 bit integers correctly. + (ffi_closure_O32): new function. + Added DWARF-2 unwind info for both functions. + +2004-08-10 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args ): 8-align all stack arguments. + +2004-08-01 Robert Millan + + * configure.ac: Detect knetbsd-gnu and kfreebsd-gnu. + * configure: Regenerate. + +2004-07-30 Maciej W. Rozycki + + * acinclude.m4 (AC_FUNC_MMAP_BLACKLIST): Check for + and mmap() explicitly instead of relying on preset autoconf cache + variables. + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2004-07-11 Ulrich Weigand + + * src/s390/ffi.c (ffi_prep_args): Fix C aliasing violation. + (ffi_check_float_struct): Remove unused prototype. + +2004-06-30 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (flush_icache): ';' is a comment + character on Darwin, use '\n\t' instead. + +2004-06-26 Matthias Klose + + * libtool-version: Fix typo in revision/age. + +2004-06-17 Matthias Klose + + * libtool-version: New. + * Makefile.am (libffi_la_LDFLAGS): Use -version-info for soname. + * Makefile.in: Regenerate. + +2004-06-15 Paolo Bonzini + + * Makefile.am: Remove useless multilib rules. + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate with automake 1.8.5. + * configure.ac: Remove useless multilib configury. + * configure: Regenerate. + +2004-06-15 Paolo Bonzini + + * .cvsignore: New file. + +2004-06-10 Jakub Jelinek + + * src/ia64/unix.S (ffi_call_unix): Insert group barrier break + fp_done. + (ffi_closure_UNIX): Fix f14/f15 adjustment if FLOAT_SZ is ever + changed from 8. + +2004-06-06 Sean McNeil + + * configure.ac: Add x86_64-*-freebsd* support. + * configure: Regenerate. + +2004-04-26 Joe Buck + + Bug 15093 + * configure.ac: Test for existence of mmap and sys/mman.h before + checking blacklist. Fix suggested by Jim Wilson. + * configure: Regenerate. + +2004-04-26 Matt Austern + + * src/powerpc/darwin.S: Go through a non-lazy pointer for initial + FDE location. + * src/powerpc/darwin_closure.S: Likewise. + +2004-04-24 Andreas Tobler + + * testsuite/libffi.call/cls_multi_schar.c (main): Fix initialization + error. Reported by Thomas Heller . + * testsuite/libffi.call/cls_multi_sshort.c (main): Likewise. + * testsuite/libffi.call/cls_multi_ushort.c (main): Likewise. + +2004-03-20 Matthias Klose + + * src/pa/linux.S: Fix typo. + +2004-03-19 Matthias Klose + + * Makefile.am: Update. + * Makefile.in: Regenerate. + * src/pa/ffi.h.in: Remove. + * src/pa/ffitarget.h: New file. + +2004-02-10 Randolph Chung + + * Makefile.am: Add PA support. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * configure.ac: Add PA target. + * configure: Regenerate. + * src/pa/ffi.c: New file. + * src/pa/ffi.h.in: Add PA support. + * src/pa/linux.S: New file. + * prep_cif.c: Add PA support. + +2004-03-16 Hosaka Yuji + + * src/types.c: Fix alignment size of X86_WIN32 case int64 and + double. + * src/x86/ffi.c (ffi_prep_args): Replace ecif->cif->rtype->type + with ecif->cif->flags. + (ffi_call, ffi_prep_incoming_args_SYSV): Replace cif->rtype->type + with cif->flags. + (ffi_prep_cif_machdep): Add X86_WIN32 struct case. + (ffi_closure_SYSV): Add 1 or 2-bytes struct case for X86_WIN32. + * src/x86/win32.S (retstruct1b, retstruct2b, sc_retstruct1b, + sc_retstruct2b): Add for 1 or 2-bytes struct case. + +2004-03-15 Kelley Cook + + * configure.in: Rename file to ... + * configure.ac: ... this. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2004-03-12 Matt Austern + + * src/powerpc/darwin.S: Fix EH information so it corresponds to + changes in EH format resulting from addition of linkonce support. + * src/powerpc/darwin_closure.S: Likewise. + +2004-03-11 Andreas Tobler + Paolo Bonzini + + * Makefile.am (AUTOMAKE_OPTIONS): Set them. + Remove VPATH. Remove rules for object files. Remove multilib support. + (AM_CCASFLAGS): Add. + * configure.in (AC_CONFIG_HEADERS): Relace AM_CONFIG_HEADER. + (AC_PREREQ): Bump version to 2.59. + (AC_INIT): Fill with version info and bug address. + (ORIGINAL_LD_FOR_MULTILIBS): Remove. + (AM_ENABLE_MULTILIB): Use this instead of AC_ARG_ENABLE. + De-precious CC so that the right flags are passed down to multilibs. + (AC_MSG_ERROR): Replace obsolete macro AC_ERROR. + (AC_CONFIG_FILES): Replace obsolete macro AC_LINK_FILES. + (AC_OUTPUT): Reorganize the output with AC_CONFIG_COMMANDS. + * configure: Rebuilt. + * aclocal.m4: Likewise. + * Makefile.in, include/Makefile.in, testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2004-03-11 Andreas Schwab + + * src/ia64/ffi.c (ffi_prep_incoming_args_UNIX): Get floating point + arguments from fp registers only for the first 8 parameter slots. + Don't convert a float parameter when passed in memory. + +2004-03-09 Hans-Peter Nilsson + + * configure: Regenerate for config/accross.m4 correction. + +2004-02-25 Matt Kraai + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Change + ecif->cif->bytes to bytes. + (ffi_prep_cif_machdep): Add braces around nested if statement. + +2004-02-09 Alan Modra + + * src/types.c (pointer): POWERPC64 has 8 byte pointers. + + * src/powerpc/ffi.c (ffi_prep_args64): Correct long double handling. + (ffi_closure_helper_LINUX64): Fix typo. + * testsuite/libffi.call/cls_align_longdouble.c: Pass -mlong-double-128 + for powerpc64-*-*. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + +2004-02-08 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep ): Correct + long double function return and long double arg handling. + (ffi_closure_helper_LINUX64): Formatting. Delete unused "ng" var. + Use "end_pfr" instead of "nf". Correct long double handling. + Localise "temp". + * src/powerpc/linux64.S (ffi_call_LINUX64): Save f2 long double + return value. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Allocate + space for long double return value. Adjust stack frame and offsets. + Load f2 long double return. + +2004-02-07 Alan Modra + + * src/types.c: Use 16 byte long double for POWERPC64. + +2004-01-25 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_args_v9): Shift the parameter array + when the structure return address is passed in %o0. + (ffi_V9_return_struct): Rename into ffi_v9_layout_struct. + (ffi_v9_layout_struct): Align the field following a nested structure + on a word boundary. Use memmove instead of memcpy. + (ffi_call): Update call to ffi_V9_return_struct. + (ffi_prep_closure): Define 'ctx' only for V8. + (ffi_closure_sparc_inner): Clone into ffi_closure_sparc_inner_v8 + and ffi_closure_sparc_inner_v9. + (ffi_closure_sparc_inner_v8): Return long doubles by reference. + Always skip the structure return address. For structures and long + doubles, copy the argument directly. + (ffi_closure_sparc_inner_v9): Skip the structure return address only + if required. Shift the maximum floating-point slot accordingly. For + big structures, copy the argument directly; otherwise, left-justify the + argument and call ffi_v9_layout_struct to lay out the structure on + the stack. + * src/sparc/v8.S: Undef STACKFRAME before defining it. + (ffi_closure_v8): Pass the structure return address. Update call to + ffi_closure_sparc_inner_v8. Short-circuit FFI_TYPE_INT handling. + Skip the 'unimp' insn when returning long doubles and structures. + * src/sparc/v9.S: Undef STACKFRAME before defining it. + (ffi_closure_v9): Increase the frame size by 2 words. Short-circuit + FFI_TYPE_INT handling. Load structures both in integers and + floating-point registers on return. + * README: Update status of the SPARC port. + +2004-01-24 Andreas Tobler + + * testsuite/libffi.call/pyobjc-tc.c (main): Treat result value + as of type ffi_arg. + * testsuite/libffi.call/struct3.c (main): Fix CHECK. + +2004-01-22 Ulrich Weigand + + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Treat result + value as of type ffi_arg, not unsigned int. + +2004-01-21 Michael Ritzert + + * ffi64.c (ffi_prep_args): Cast the RHS of an assignment instead + of the LHS. + +2004-01-12 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_32 for + Solaris. + +2004-01-08 Rainer Orth + + * testsuite/libffi.call/ffitest.h (allocate_mmap): Cast MAP_FAILED + to void *. + +2003-12-10 Richard Henderson + + * testsuite/libffi.call/cls_align_pointer.c: Cast pointers to + size_t instead of int. + +2003-12-04 Hosaka Yuji + + * testsuite/libffi.call/many_win32.c: Include . + * testsuite/libffi.call/many_win32.c (main): Replace variable + int i with unsigned long ul. + + * testsuite/libffi.call/cls_align_uint64.c: New test case. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + +2003-12-02 Hosaka Yuji + + PR other/13221 + * src/x86/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): + Align arguments to 32 bits. + +2003-12-01 Andreas Tobler + + PR other/13221 + * testsuite/libffi.call/cls_multi_sshort.c: New test case. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Cosmetics. + +2003-11-26 Kaveh R. Ghazi + + * testsuite/libffi.call/ffitest.h: Include . + * testsuite/libffi.special/ffitestcxx.h: Likewise. + +2003-11-22 Andreas Tobler + + * Makefile.in: Rebuilt. + * configure: Likewise. + * testsuite/libffi.special/unwindtest.cc: Convert the mmap to + the right type. + +2003-11-21 Andreas Jaeger + Andreas Tobler + + * acinclude.m4: Add AC_FUNC_MMAP_BLACKLIST. + * configure.in: Call AC_FUNC_MMAP_BLACKLIST. + * Makefile.in: Rebuilt. + * aclocal.m4: Likewise. + * configure: Likewise. + * fficonfig.h.in: Likewise. + * testsuite/lib/libffi-dg.exp: Add include dir. + * testsuite/libffi.call/ffitest.h: Add MMAP definitions. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Use MMAP functionality + for ffi_closure if available. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + +2003-11-20 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Make the -lgcc_s conditional. + +2003-11-19 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Add DYLD_LIBRARY_PATH for darwin. + Add -lgcc_s to additional flags. + +2003-11-12 Andreas Tobler + + * configure.in, include/Makefile.am: PR libgcj/11147, install + the ffitarget.h header file in a gcc versioned and target + dependent place. + * configure: Regenerated. + * Makefile.in, include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2003-11-09 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Print result and check + with dg-output to make debugging easier. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Make ffi_closure + static. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_9byte2.c: New test case. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_double.c: Do a check on the result. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/return_sc.c: Cleanup whitespaces. + +2003-11-06 Andreas Tobler + + * src/prep_cif.c (ffi_prep_cif): Move the validity check after + the initialization. + +2003-10-23 Andreas Tobler + + * src/java_raw_api.c (ffi_java_ptrarray_to_raw): Replace + FFI_ASSERT(FALSE) with FFI_ASSERT(0). + +2003-10-22 David Daney + + * src/mips/ffitarget.h: Replace undefined UINT32 and friends with + __attribute__((__mode__(__SI__))) and friends. + +2003-10-22 Andreas Schwab + + * src/ia64/ffi.c: Replace FALSE/TRUE with false/true. + +2003-10-21 Andreas Tobler + + * configure.in: AC_LINK_FILES(ffitarget.h). + * configure: Regenerate. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2003-10-21 Paolo Bonzini + Richard Henderson + + Avoid that ffi.h includes fficonfig.h. + + * Makefile.am (EXTRA_DIST): Include ffitarget.h files + (TARGET_SRC_MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (TARGET_SRC_MIPS_SGI): Removed. + (MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (MIPS_SGI): Removed. + (CLEANFILES): Removed. + (mostlyclean-am, clean-am, mostlyclean-sub, clean-sub): New + targets. + * acconfig.h: Removed. + * configure.in: Compute sizeofs only for double and long double. + Use them to define and subst HAVE_LONG_DOUBLE. Include comments + into AC_DEFINE instead of using acconfig.h. Create + include/ffitarget.h instead of include/fficonfig.h. Rename + MIPS_GCC to MIPS_IRIX, drop MIPS_SGI since we are in gcc's tree. + AC_DEFINE EH_FRAME_FLAGS. + * include/Makefile.am (DISTCLEANFILES): New automake macro. + (hack_DATA): Add ffitarget.h. + * include/ffi.h.in: Remove all system specific definitions. + Declare raw API even if it is not installed, why bother? + Use limits.h instead of SIZEOF_* to define ffi_type_*. Do + not define EH_FRAME_FLAGS, it is in fficonfig.h now. Include + ffitarget.h instead of fficonfig.h. Remove ALIGN macro. + (UINT_ARG, INT_ARG): Removed, use ffi_arg and ffi_sarg instead. + * include/ffi_common.h (bool): Do not define. + (ffi_assert): Accept failed assertion. + (ffi_type_test): Return void and accept file/line. + (FFI_ASSERT): Pass stringized failed assertion. + (FFI_ASSERT_AT): New macro. + (FFI_ASSERT_VALID_TYPE): New macro. + (UINT8, SINT8, UINT16, SINT16, UINT32, SINT32, + UINT64, SINT64): Define here with gcc's __attribute__ macro + instead of in ffi.h + (FLOAT32, ALIGN): Define here instead of in ffi.h + * include/ffi-mips.h: Removed. Its content moved to + src/mips/ffitarget.h after separating assembly and C sections. + * src/alpha/ffi.c, src/alpha/ffi.c, src/java_raw_api.c + src/prep_cif.c, src/raw_api.c, src/ia64/ffi.c, + src/mips/ffi.c, src/mips/n32.S, src/mips/o32.S, + src/mips/ffitarget.h, src/sparc/ffi.c, src/x86/ffi64.c: + SIZEOF_ARG -> FFI_SIZEOF_ARG. + * src/ia64/ffi.c: Include stdbool.h (provided by GCC 2.95+). + * src/debug.c (ffi_assert): Accept stringized failed assertion. + (ffi_type_test): Rewritten. + * src/prep-cif.c (initialize_aggregate, ffi_prep_cif): Call + FFI_ASSERT_VALID_TYPE. + * src/alpha/ffitarget.h, src/arm/ffitarget.h, + src/ia64/ffitarget.h, src/m68k/ffitarget.h, + src/mips/ffitarget.h, src/powerpc/ffitarget.h, + src/s390/ffitarget.h, src/sh/ffitarget.h, + src/sh64/ffitarget.h, src/sparc/ffitarget.h, + src/x86/ffitarget.h: New files. + * src/alpha/osf.S, src/arm/sysv.S, src/ia64/unix.S, + src/m68k/sysv.S, src/mips/n32.S, src/mips/o32.S, + src/powerpc/aix.S, src/powerpc/darwin.S, + src/powerpc/ffi_darwin.c, src/powerpc/linux64.S, + src/powerpc/linux64_closure.S, src/powerpc/ppc_closure.S, + src/powerpc/sysv.S, src/s390/sysv.S, src/sh/sysv.S, + src/sh64/sysv.S, src/sparc/v8.S, src/sparc/v9.S, + src/x86/sysv.S, src/x86/unix64.S, src/x86/win32.S: + include fficonfig.h + +2003-10-20 Rainer Orth + + * src/mips/ffi.c: Use _ABIN32, _ABIO32 instead of external + _MIPS_SIM_NABI32, _MIPS_SIM_ABI32. + +2003-10-19 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Declare bytes again. + Used when FFI_DEBUG = 1. + +2003-10-14 Alan Modra + + * src/types.c (double, longdouble): Default POWERPC64 to 8 byte size + and align. + +2003-10-06 Rainer Orth + + * include/ffi_mips.h: Define FFI_MIPS_N32 for N32/N64 ABIs, + FFI_MIPS_O32 for O32 ABI. + +2003-10-01 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_64 for + SPARC64. Cleanup whitespaces. + +2003-09-19 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Xfail mips, arm, + strongarm, xscale. Cleanup whitespaces. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Cleanup whitespaces. + +2003-09-18 David Edelsohn + + * src/powerpc/aix.S: Cleanup whitespaces. + * src/powerpc/aix_closure.S: Likewise. + +2003-09-18 Andreas Tobler + + * src/powerpc/darwin.S: Cleanup whitespaces, comment formatting. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + +2003-09-18 Andreas Tobler + David Edelsohn + + * src/types.c (double): Add AIX and Darwin to the right TYPEDEF. + * src/powerpc/aix_closure.S: Remove the pointer to the outgoing + parameter stack. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Handle structures + according to the Darwin/AIX ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_DARWIN): Likewise. + Remove the outgoing parameter stack logic. Simplify the evaluation + of the different CASE types. + (ffi_prep_clousure): Avoid the casts on lvalues. Change the branch + statement in the trampoline code. + +2003-09-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_args): Take account into the alignement + for the register size. + (ffi_closure_helper_SYSV): Handle the structure return value + address correctly. + (ffi_closure_helper_SYSV): Return the appropriate type when + the registers are used for the structure return value. + * src/sh/sysv.S (ffi_closure_SYSV): Fix the stack layout for + the 64-bit return value. Update copyright years. + +2003-09-17 Rainer Orth + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Search in + srcdir for ffi_mips.h. + +2003-09-12 Alan Modra + + * src/prep_cif.c (initialize_aggregate): Include tail padding in + structure size. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Correct + placement of float result. + * testsuite/libffi.special/unwindtest.cc (closure_test_fn1): Correct + cast of "resp" for big-endian 64 bit machines. + +2003-09-11 Alan Modra + + * src/types.c (double, longdouble): Merge identical SH and ARM + typedefs, and add POWERPC64. + * src/powerpc/ffi.c (ffi_prep_args64): Correct next_arg calc for + struct split over gpr and rest. + (ffi_prep_cif_machdep): Correct intarg_count for structures. + * src/powerpc/linux64.S (ffi_call_LINUX64): Fix gpr offsets. + +2003-09-09 Andreas Tobler + + * src/powerpc/ffi.c (ffi_closure_helper_SYSV) Handle struct + passing correctly. + +2003-09-09 Alan Modra + + * configure: Regenerate. + +2003-09-04 Andreas Tobler + + * Makefile.am: Remove build rules for ffitest. + * Makefile.in: Rebuilt. + +2003-09-04 Andreas Tobler + + * src/java_raw_api.c: Include to fix compiler warning + about implicit declaration of abort(). + +2003-09-04 Andreas Tobler + + * Makefile.am: Add dejagnu test framework. Fixes PR other/11411. + * Makefile.in: Rebuilt. + * configure.in: Add dejagnu test framework. + * configure: Rebuilt. + + * testsuite/Makefile.am: New file. + * testsuite/Makefile.in: Built + * testsuite/lib/libffi-dg.exp: New file. + * testsuite/config/default.exp: Likewise. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Likewise. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float1.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/many.c: Likewise. + * testsuite/libffi.call/many_win32.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Likewise. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + * testsuite/libffi.call/strlen.c: Likewise. + * testsuite/libffi.call/strlen_win32.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.special/special.exp: New file. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + + +2003-08-13 Kaz Kojima + + * src/sh/ffi.c (OFS_INT16): Set 0 for little endian case. Update + copyright years. + +2003-08-02 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Modify for changed gcc + structure passing. + (ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64.S: Remove code writing to parm save area. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Use return + address in lr from ffi_closure_helper_LINUX64 call to calculate + table address. Optimize function tail. + +2003-07-28 Andreas Tobler + + * src/sparc/ffi.c: Handle all floating point registers. + * src/sparc/v9.S: Likewise. Fixes second part of PR target/11410. + +2003-07-11 Gerald Pfeifer + + * README: Note that libffi is not part of GCC. Update the project + URL and status. + +2003-06-19 Franz Sirl + + * src/powerpc/ppc_closure.S: Include ffi.h. + +2003-06-13 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .uleb128/.sleb128 directives. + Use C style comments. + +2003-06-13 Kaz Kojima + + * Makefile.am: Add SHmedia support. Fix a typo of SH support. + * Makefile.in: Regenerate. + * configure.in (sh64-*-linux*, sh5*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SHmedia support. + * src/sh64/ffi.c: New file. + * src/sh64/sysv.S: New file. + +2003-05-16 Jakub Jelinek + + * configure.in (HAVE_RO_EH_FRAME): Check whether .eh_frame section + should be read-only. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * include/ffi.h.in (EH_FRAME_FLAGS): Define. + * src/alpha/osf.S: Use EH_FRAME_FLAGS. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. Include ffi.h. + * src/powerpc/sysv.S: Use EH_FRAME_FLAGS. Use pcrel encoding + if -fpic/-fPIC/-mrelocatable. + * src/powerpc/powerpc_closure.S: Likewise. + * src/sparc/v8.S: If HAVE_RO_EH_FRAME is defined, don't include + #write in .eh_frame flags. + * src/sparc/v9.S: Likewise. + * src/x86/unix64.S: Use EH_FRAME_FLAGS. + * src/x86/sysv.S: Likewise. Use pcrel encoding if -fpic/-fPIC. + * src/s390/sysv.S: Use EH_FRAME_FLAGS. Include ffi.h. + +2003-05-07 Jeff Sturm + + Fixes PR bootstrap/10656 + * configure.in (HAVE_AS_REGISTER_PSEUDO_OP): Test assembler + support for .register pseudo-op. + * src/sparc/v8.S: Use it. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2003-04-18 Jakub Jelinek + + * include/ffi.h.in (POWERPC64): Define if 64-bit. + (enum ffi_abi): Add FFI_LINUX64 on POWERPC. + Make it the default on POWERPC64. + (FFI_TRAMPOLINE_SIZE): Define to 24 on POWERPC64. + * configure.in: Change powerpc-*-linux* into powerpc*-*-linux*. + * configure: Rebuilt. + * src/powerpc/ffi.c (hidden): Define. + (ffi_prep_args_SYSV): Renamed from + ffi_prep_args. Cast pointers to unsigned long to shut up warnings. + (NUM_GPR_ARG_REGISTERS64, NUM_FPR_ARG_REGISTERS64, + ASM_NEEDS_REGISTERS64): New. + (ffi_prep_args64): New function. + (ffi_prep_cif_machdep): Handle FFI_LINUX64 ABI. + (ffi_call): Likewise. + (ffi_prep_closure): Likewise. + (flush_icache): Surround by #ifndef POWERPC64. + (ffi_dblfl): New union type. + (ffi_closure_helper_SYSV): Use it to avoid aliasing problems. + (ffi_closure_helper_LINUX64): New function. + * src/powerpc/ppc_closure.S: Surround whole file by #ifndef + __powerpc64__. + * src/powerpc/sysv.S: Likewise. + (ffi_call_SYSV): Rename ffi_prep_args to ffi_prep_args_SYSV. + * src/powerpc/linux64.S: New file. + * src/powerpc/linux64_closure.S: New file. + * Makefile.am (EXTRA_DIST): Add src/powerpc/linux64.S and + src/powerpc/linux64_closure.S. + (TARGET_SRC_POWERPC): Likewise. + + * src/ffitest.c (closure_test_fn, closure_test_fn1, closure_test_fn2, + closure_test_fn3): Fix result printing on big-endian 64-bit + machines. + (main): Print tst2_arg instead of uninitialized tst2_result. + + * src/ffitest.c (main): Hide what closure pointer really points to + from the compiler. + +2003-04-16 Richard Earnshaw + + * configure.in (arm-*-netbsdelf*): Add configuration. + (configure): Regenerated. + +2003-04-04 Loren J. Rittle + + * include/Makefile.in: Regenerate. + +2003-03-21 Zdenek Dvorak + + * libffi/include/ffi.h.in: Define X86 instead of X86_64 in 32 + bit mode. + * libffi/src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): + Receive closure pointer through parameter, read args using + __builtin_dwarf_cfa. + (FFI_INIT_TRAMPOLINE): Send closure reference through eax. + +2003-03-12 Andreas Schwab + + * configure.in: Avoid trailing /. in toolexeclibdir. + * configure: Rebuilt. + +2003-03-03 Andreas Tobler + + * src/powerpc/darwin_closure.S: Recode to fit dynamic libraries. + +2003-02-06 Andreas Tobler + + * libffi/src/powerpc/darwin_closure.S: + Fix alignement bug, allocate 8 bytes for the result. + * libffi/src/powerpc/aix_closure.S: + Likewise. + * libffi/src/powerpc/ffi_darwin.c: + Update stackframe description for aix/darwin_closure.S. + +2003-02-06 Jakub Jelinek + + * src/s390/ffi.c (ffi_closure_helper_SYSV): Add hidden visibility + attribute. + +2003-01-31 Christian Cornelssen , + Andreas Schwab + + * configure.in: Adjust command to source config-ml.in to account + for changes to the libffi_basedir definition. + (libffi_basedir): Remove ${srcdir} from value and include trailing + slash if nonempty. + + * configure: Regenerate. + +2003-01-29 Franz Sirl + + * src/powerpc/ppc_closure.S: Recode to fit shared libs. + +2003-01-28 Andrew Haley + + * include/ffi.h.in: Enable FFI_CLOSURES for x86_64. + * src/x86/ffi64.c (ffi_prep_closure): New. + (ffi_closure_UNIX64_inner): New. + * src/x86/unix64.S (ffi_closure_UNIX64): New. + +2003-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +2003-01027 David Edelsohn + + * Makefile.am (TARGET_SRC_POWERPC_AIX): Fix typo. + * Makefile.in: Regenerate. + +2003-01-22 Andrew Haley + + * src/powerpc/darwin.S (_ffi_call_AIX): Add Augmentation size to + unwind info. + +2003-01-21 Andreas Tobler + + * src/powerpc/darwin.S: Add unwind info. + * src/powerpc/darwin_closure.S: Likewise. + +2003-01-14 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args): Check for void retval. + (ffi_prep_cif_machdep): Likewise. + * src/x86/unix64.S: Add unwind info. + +2003-01-14 Andreas Jaeger + + * src/ffitest.c (main): Only use ffi_closures if those are + supported. + +2003-01-13 Andreas Tobler + + * libffi/src/ffitest.c + add closure testcases + +2003-01-13 Kevin B. Hendricks + + * libffi/src/powerpc/ffi.c + fix alignment bug for float (4 byte aligned iso 8 byte) + +2003-01-09 Geoffrey Keating + + * src/powerpc/ffi_darwin.c: Remove RCS version string. + * src/powerpc/darwin.S: Remove RCS version string. + +2003-01-03 Jeff Sturm + + * include/ffi.h.in: Add closure defines for SPARC, SPARC64. + * src/ffitest.c (main): Use static storage for closure. + * src/sparc/ffi.c (ffi_prep_closure, ffi_closure_sparc_inner): New. + * src/sparc/v8.S (ffi_closure_v8): New. + * src/sparc/v9.S (ffi_closure_v9): New. + +2002-11-10 Ranjit Mathew + + * include/ffi.h.in: Added FFI_STDCALL ffi_type + enumeration for X86_WIN32. + * src/x86/win32.S: Added ffi_call_STDCALL function + definition. + * src/x86/ffi.c (ffi_call/ffi_raw_call): Added + switch cases for recognising FFI_STDCALL and + calling ffi_call_STDCALL if target is X86_WIN32. + * src/ffitest.c (my_stdcall_strlen/stdcall_many): + stdcall versions of the "my_strlen" and "many" + test functions (for X86_WIN32). + Added test cases to test stdcall invocation using + these functions. + +2002-12-02 Kaz Kojima + + * src/sh/sysv.S: Add DWARF2 unwind info. + +2002-11-27 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Make section read-only. + +2002-11-26 Jim Wilson + + * src/types.c (FFI_TYPE_POINTER): Has size 8 on IA64. + +2002-11-23 H.J. Lu + + * acinclude.m4: Add dummy AM_PROG_LIBTOOL. + Include ../config/accross.m4. + * aclocal.m4; Rebuild. + * configure: Likewise. + +2002-11-15 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Adapt to pcrel FDE encoding. + +2002-11-11 DJ Delorie + + * configure.in: Look for common files in the right place. + +2002-10-08 Ulrich Weigand + + * src/java_raw_api.c (ffi_java_raw_to_ptrarray): Interpret + raw data as _Jv_word values, not ffi_raw. + (ffi_java_ptrarray_to_raw): Likewise. + (ffi_java_rvalue_to_raw): New function. + (ffi_java_raw_call): Call it. + (ffi_java_raw_to_rvalue): New function. + (ffi_java_translate_args): Call it. + * src/ffitest.c (closure_test_fn): Interpret return value + as ffi_arg, not int. + * src/s390/ffi.c (ffi_prep_cif_machdep): Add missing + FFI_TYPE_POINTER case. + (ffi_closure_helper_SYSV): Likewise. Also, assume return + values extended to word size. + +2002-10-02 Andreas Jaeger + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Remove debug output. + +2002-10-01 Bo Thorsen + + * include/ffi.h.in: Fix i386 win32 compilation. + +2002-09-30 Ulrich Weigand + + * configure.in: Add s390x-*-linux-* target. + * configure: Regenerate. + * include/ffi.h.in: Define S390X for s390x targets. + (FFI_CLOSURES): Define for s390/s390x. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * src/prep_cif.c (ffi_prep_cif): Do not compute stack space for s390. + * src/types.c (FFI_TYPE_POINTER): Use 8-byte pointers on s390x. + * src/s390/ffi.c: Major rework of existing code. Add support for + s390x targets. Add closure support. + * src/s390/sysv.S: Likewise. + +2002-09-29 Richard Earnshaw + + * src/arm/sysv.S: Fix typo. + +2002-09-28 Richard Earnshaw + + * src/arm/sysv.S: If we don't have machine/asm.h and the pre-processor + has defined __USER_LABEL_PREFIX__, then use it in CNAME. + (ffi_call_SYSV): Handle soft-float. + +2002-09-27 Bo Thorsen + + * include/ffi.h.in: Fix multilib x86-64 support. + +2002-09-22 Kaveh R. Ghazi + + * Makefile.am (all-multi): Fix multilib parallel build. + +2002-07-19 Kaz Kojima + + * configure.in (sh[34]*-*-linux*): Add brackets. + * configure: Regenerate. + +2002-07-18 Kaz Kojima + + * Makefile.am: Add SH support. + * Makefile.in: Regenerate. + * configure.in (sh-*-linux*, sh[34]*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SH support. + * src/sh/ffi.c: New file. + * src/sh/sysv.S: New file. + * src/types.c: Add SH support. + +2002-07-16 Bo Thorsen + + * src/x86/ffi64.c: New file that adds x86-64 support. + * src/x86/unix64.S: New file that handles argument setup for + x86-64. + * src/x86/sysv.S: Don't use this on x86-64. + * src/x86/ffi.c: Don't use this on x86-64. + Remove unused vars. + * src/prep_cif.c (ffi_prep_cif): Don't do stack size calculation + for x86-64. + * src/ffitest.c (struct6): New test that tests a special case in + the x86-64 ABI. + (struct7): Likewise. + (struct8): Likewise. + (struct9): Likewise. + (closure_test_fn): Silence warning about this when it's not used. + (main): Add the new tests. + (main): Fix a couple of wrong casts and silence some compiler warnings. + * include/ffi.h.in: Add x86-64 ABI definition. + * fficonfig.h.in: Regenerate. + * Makefile.am: Add x86-64 support. + * configure.in: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + +2002-06-24 Bo Thorsen + + * src/types.c: Merge settings for similar architectures. + Add x86-64 sizes and alignments. + +2002-06-23 Bo Thorsen + + * src/arm/ffi.c (ffi_prep_args): Remove unused vars. + * src/sparc/ffi.c (ffi_prep_args_v8): Likewise. + * src/mips/ffi.c (ffi_prep_args): Likewise. + * src/m68k/ffi.c (ffi_prep_args): Likewise. + +2002-07-18 H.J. Lu (hjl@gnu.org) + + * Makefile.am (TARGET_SRC_MIPS_LINUX): New. + (libffi_la_SOURCES): Support MIPS_LINUX. + (libffi_convenience_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + + * configure.in (mips64*-*): Skip. + (mips*-*-linux*): New. + * configure: Regenerated. + + * src/mips/ffi.c: Include . + +2002-06-06 Ulrich Weigand + + * src/s390/sysv.S: Save/restore %r6. Add DWARF-2 unwind info. + +2002-05-27 Roger Sayle + + * src/x86/ffi.c (ffi_prep_args): Remove reference to avn. + +2002-05-27 Bo Thorsen + + * src/x86/ffi.c (ffi_prep_args): Remove unused variable and + fix formatting. + +2002-05-13 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_closure): Declare fd at + beginning of function (for older apple cc). + +2002-05-08 Alexandre Oliva + + * configure.in (ORIGINAL_LD_FOR_MULTILIBS): Preserve LD at + script entry, and set LD to it when configuring multilibs. + * configure: Rebuilt. + +2002-05-05 Jason Thorpe + + * configure.in (sparc64-*-netbsd*): Add target. + (sparc-*-netbsdelf*): Likewise. + * configure: Regenerate. + +2002-04-28 David S. Miller + + * configure.in, configure: Fix SPARC test in previous change. + +2002-04-29 Gerhard Tonn + + * Makefile.am: Add Linux for S/390 support. + * Makefile.in: Regenerate. + * configure.in: Add Linux for S/390 support. + * configure: Regenerate. + * include/ffi.h.in: Add Linux for S/390 support. + * src/s390/ffi.c: New file from libffi CVS tree. + * src/s390/sysv.S: New file from libffi CVS tree. + +2002-04-28 Jakub Jelinek + + * configure.in (HAVE_AS_SPARC_UA_PCREL): Check for working + %r_disp32(). + * src/sparc/v8.S: Use it. + * src/sparc/v9.S: Likewise. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2002-04-08 Hans Boehm + + * src/java_raw_api.c (ffi_java_raw_size): Handle FFI_TYPE_DOUBLE + correctly. + * src/ia64/unix.S: Add unwind information. Fix comments. + Save sp in a way that's compatible with unwind info. + (ffi_call_unix): Correctly restore sp in all cases. + * src/ia64/ffi.c: Add, fix comments. + +2002-04-08 Jakub Jelinek + + * src/sparc/v8.S: Make .eh_frame dependent on target word size. + +2002-04-06 Jason Thorpe + + * configure.in (alpha*-*-netbsd*): Add target. + * configure: Regenerate. + +2002-04-04 Jeff Sturm + + * src/sparc/v8.S: Add unwind info. + * src/sparc/v9.S: Likewise. + +2002-03-30 Krister Walfridsson + + * configure.in: Enable i*86-*-netbsdelf*. + * configure: Rebuilt. + +2002-03-29 David Billinghurst + + PR other/2620 + * src/mips/n32.s: Delete + * src/mips/o32.s: Delete + +2002-03-21 Loren J. Rittle + + * configure.in: Enable alpha*-*-freebsd*. + * configure: Rebuilt. + +2002-03-17 Bryce McKinlay + + * Makefile.am: libfficonvenience -> libffi_convenience. + * Makefile.in: Rebuilt. + + * Makefile.am: Define ffitest_OBJECTS. + * Makefile.in: Rebuilt. + +2002-03-07 Andreas Tobler + David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX closure files. + (TARGET_SRC_POWERPC_AIX): Add aix_closure.S. + (TARGET_SRC_POWERPC_DARWIN): Add darwin_closure.S. + * Makefile.in: Regenerate. + * include/ffi.h.in: Add AIX and Darwin closure definitions. + * src/powerpc/ffi_darwin.c (ffi_prep_closure): New function. + (flush_icache, flush_range): New functions. + (ffi_closure_helper_DARWIN): New function. + * src/powerpc/aix_closure.S: New file. + * src/powerpc/darwin_closure.S: New file. + +2002-02-24 Jeff Sturm + + * include/ffi.h.in: Add typedef for ffi_arg. + * src/ffitest.c (main): Declare rint with ffi_arg. + +2002-02-21 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Skip appropriate + number of GPRs for floating-point arguments. + +2002-01-31 Anthony Green + + * configure: Rebuilt. + * configure.in: Replace CHECK_SIZEOF and endian tests with + cross-compiler friendly macros. + * aclocal.m4 (AC_COMPILE_CHECK_SIZEOF, AC_C_BIGENDIAN_CROSS): New + macros. + +2002-01-18 David Edelsohn + + * src/powerpc/darwin.S (_ffi_call_AIX): New. + * src/powerpc/aix.S (ffi_call_DARWIN): New. + +2002-01-17 David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX files. + (TARGET_SRC_POWERPC_AIX): New. + (POWERPC_AIX): New stanza. + * Makefile.in: Regenerate. + * configure.in: Add AIX case. + * configure: Regenerate. + * include/ffi.h.in (ffi_abi): Add FFI_AIX. + * src/powerpc/ffi_darwin.c (ffi_status): Use "long" to scale frame + size. Fix "long double" support. + (ffi_call): Add FFI_AIX case. + * src/powerpc/aix.S: New. + +2001-10-09 John Hornkvist + + Implement Darwin PowerPC ABI. + * configure.in: Handle powerpc-*-darwin*. + * Makefile.am: Set source files for POWERPC_DARWIN. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + * include/ffi.h.in: Define FFI_DARWIN and FFI_DEFAULT_ABI for + POWERPC_DARWIN. + * src/powerpc/darwin.S: New file. + * src/powerpc/ffi_darwin.c: New file. + +2001-10-07 Joseph S. Myers + + * src/x86/ffi.c: Fix spelling error of "separate" as "seperate". + +2001-07-16 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .balign directive. + Use C style comments. + +2001-07-16 Rainer Orth + + * src/alpha/ffi.c (ffi_prep_closure): Avoid gas-only mnemonic. + Fixes PR bootstrap/3563. + +2001-06-26 Rainer Orth + + * src/alpha/osf.S (ffi_closure_osf): Use .rdata for ECOFF. + +2001-06-25 Rainer Orth + + * configure.in: Recognize sparc*-sun-* host. + * configure: Regenerate. + +2001-06-06 Andrew Haley + + * src/alpha/osf.S (__FRAME_BEGIN__): Conditionalize for ELF. + +2001-06-03 Andrew Haley + + * src/alpha/osf.S: Add unwind info. + * src/powerpc/sysv.S: Add unwind info. + * src/powerpc/ppc_closure.S: Likewise. + +2000-05-31 Jeff Sturm + + * configure.in: Fix AC_ARG_ENABLE usage. + * configure: Rebuilt. + +2001-05-06 Bryce McKinlay + + * configure.in: Remove warning about beta code. + * configure: Rebuilt. + +2001-04-25 Hans Boehm + + * src/ia64/unix.S: Restore stack pointer when returning from + ffi_closure_UNIX. + * src/ia64/ffi.c: Fix typo in comment. + +2001-04-18 Jim Wilson + + * src/ia64/unix.S: Delete unnecessary increment and decrement of loc2 + to eliminate RAW DV. + +2001-04-12 Bryce McKinlay + + * Makefile.am: Make a libtool convenience library. + * Makefile.in: Rebuilt. + +2001-03-29 Bryce McKinlay + + * configure.in: Use different syntax for subdirectory creation. + * configure: Rebuilt. + +2001-03-27 Jon Beniston + + * configure.in: Added X86_WIN32 target (Win32, CygWin, MingW). + * configure: Rebuilt. + * Makefile.am: Added X86_WIN32 target support. + * Makefile.in: Rebuilt. + + * include/ffi.h.in: Added X86_WIN32 target support. + + * src/ffitest.c: Doesn't run structure tests for X86_WIN32 targets. + * src/types.c: Added X86_WIN32 target support. + + * src/x86/win32.S: New file. Based on sysv.S, but with EH + stuff removed and made to work with CygWin's gas. + +2001-03-26 Bryce McKinlay + + * configure.in: Make target subdirectory in build dir. + * Makefile.am: Override suffix based rules to specify correct output + subdirectory. + * Makefile.in: Rebuilt. + * configure: Rebuilt. + +2001-03-23 Kevin B Hendricks + + * src/powerpc/ppc_closure.S: New file. + * src/powerpc/ffi.c (ffi_prep_args): Fixed ABI compatibility bug + involving long long and register pairs. + (ffi_prep_closure): New function. + (flush_icache): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * include/ffi.h.in (FFI_CLOSURES): Define on PPC. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Added src/powerpc/ppc_closure.S. + (TARGET_SRC_POWERPC): Likewise. + +2001-03-19 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (ffitest_LDFLAGS): New macro. + +2001-03-02 Nick Clifton + + * include/ffi.h.in: Remove RCS ident string. + * include/ffi_mips.h: Remove RCS ident string. + * src/debug.c: Remove RCS ident string. + * src/ffitest.c: Remove RCS ident string. + * src/prep_cif.c: Remove RCS ident string. + * src/types.c: Remove RCS ident string. + * src/alpha/ffi.c: Remove RCS ident string. + * src/alpha/osf.S: Remove RCS ident string. + * src/arm/ffi.c: Remove RCS ident string. + * src/arm/sysv.S: Remove RCS ident string. + * src/mips/ffi.c: Remove RCS ident string. + * src/mips/n32.S: Remove RCS ident string. + * src/mips/o32.S: Remove RCS ident string. + * src/sparc/ffi.c: Remove RCS ident string. + * src/sparc/v8.S: Remove RCS ident string. + * src/sparc/v9.S: Remove RCS ident string. + * src/x86/ffi.c: Remove RCS ident string. + * src/x86/sysv.S: Remove RCS ident string. + +2001-02-08 Joseph S. Myers + + * include/ffi.h.in: Change sourceware.cygnus.com references to + gcc.gnu.org. + +2000-12-09 Richard Henderson + + * src/alpha/ffi.c (ffi_call): Simplify struct return test. + (ffi_closure_osf_inner): Index rather than increment avalue + and arg_types. Give ffi_closure_osf the raw return value type. + * src/alpha/osf.S (ffi_closure_osf): Handle return value type + promotion. + +2000-12-07 Richard Henderson + + * src/raw_api.c (ffi_translate_args): Fix typo. + (ffi_prep_closure): Likewise. + + * include/ffi.h.in [ALPHA]: Define FFI_CLOSURES and + FFI_TRAMPOLINE_SIZE. + * src/alpha/ffi.c (ffi_prep_cif_machdep): Adjust minimal + cif->bytes for new ffi_call_osf implementation. + (ffi_prep_args): Absorb into ... + (ffi_call): ... here. Do all stack allocation here and + avoid a callback function. + (ffi_prep_closure, ffi_closure_osf_inner): New. + * src/alpha/osf.S (ffi_call_osf): Reimplement with no callback. + (ffi_closure_osf): New. + +2000-09-10 Alexandre Oliva + + * config.guess, config.sub, install-sh: Removed. + * ltconfig, ltmain.sh, missing, mkinstalldirs: Likewise. + * Makefile.in: Rebuilt. + + * acinclude.m4: Include libtool macros from the top level. + * aclocal.m4, configure: Rebuilt. + +2000-08-22 Alexandre Oliva + + * configure.in [i*86-*-freebsd*] (TARGET, TARGETDIR): Set. + * configure: Rebuilt. + +2000-05-11 Scott Bambrough + + * libffi/src/arm/sysv.S (ffi_call_SYSV): Doubles are not saved to + memory correctly. Use conditional instructions, not branches where + possible. + +2000-05-04 Tom Tromey + + * configure: Rebuilt. + * configure.in: Match `arm*-*-linux-*'. + From Chris Dornan . + +2000-04-28 Jakub Jelinek + + * Makefile.am (SUBDIRS): Define. + (AM_MAKEFLAGS): Likewise. + (Multilib support.): Add section. + * Makefile.in: Rebuilt. + * ltconfig (extra_compiler_flags, extra_compiler_flags_value): + New variables. Set for gcc using -print-multi-lib. Export them + to libtool. + (sparc64-*-linux-gnu*): Use libsuff 64 for search paths. + * ltmain.sh (B|b|V): Don't throw away gcc's -B, -b and -V options + for -shared links. + (extra_compiler_flags_value, extra_compiler_flags): Check these + for extra compiler options which need to be passed down in + compiler_flags. + +2000-04-16 Anthony Green + + * configure: Rebuilt. + * configure.in: Change i*86-pc-linux* to i*86-*-linux*. + +2000-04-14 Jakub Jelinek + + * include/ffi.h.in (SPARC64): Define for 64bit SPARC builds. + Set SPARC FFI_DEFAULT_ABI based on SPARC64 define. + * src/sparc/ffi.c (ffi_prep_args_v8): Renamed from ffi_prep_args. + Replace all void * sizeofs with sizeof(int). + Only compare type with FFI_TYPE_LONGDOUBLE if LONGDOUBLE is + different than DOUBLE. + Remove FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases (handled elsewhere). + (ffi_prep_args_v9): New function. + (ffi_prep_cif_machdep): Handle V9 ABI and long long on V8. + (ffi_V9_return_struct): New function. + (ffi_call): Handle FFI_V9 ABI from 64bit code and FFI_V8 ABI from + 32bit code (not yet cross-arch calls). + * src/sparc/v8.S: Add struct return delay nop. + Handle long long. + * src/sparc/v9.S: New file. + * src/prep_cif.c (ffi_prep_cif): Return structure pointer + is used on sparc64 only for structures larger than 32 bytes. + Pass by reference for structures is done for structure arguments + larger than 16 bytes. + * src/ffitest.c (main): Use 64bit rint on sparc64. + Run long long tests on sparc. + * src/types.c (FFI_TYPE_POINTER): Pointer is 64bit on alpha and + sparc64. + (FFI_TYPE_LONGDOUBLE): long double is 128 bit aligned to 128 bits + on sparc64. + * configure.in (sparc-*-linux*): New supported target. + (sparc64-*-linux*): Likewise. + * configure: Rebuilt. + * Makefile.am: Add v9.S to SPARC files. + * Makefile.in: Likewise. + (LINK): Surround $(CCLD) into double quotes, so that multilib + compiles work correctly. + +2000-04-04 Alexandre Petit-Bianco + + * configure: Rebuilt. + * configure.in: (i*86-*-solaris*): New libffi target. Patch + proposed by Bryce McKinlay. + +2000-03-20 Tom Tromey + + * Makefile.in: Hand edit for java_raw_api.lo. + +2000-03-08 Bryce McKinlay + + * config.guess, config.sub: Update from the gcc tree. + Fix for PR libgcj/168. + +2000-03-03 Tom Tromey + + * Makefile.in: Fixed ia64 by hand. + + * configure: Rebuilt. + * configure.in (--enable-multilib): New option. + (libffi_basedir): New subst. + (AC_OUTPUT): Added multilib code. + +2000-03-02 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (TARGET_SRC_IA64): Use `ia64', not `alpha', as + directory name. + +2000-02-25 Hans Boehm + + * src/ia64/ffi.c, src/ia64/ia64_flags.h, src/ia64/unix.S: New + files. + * src/raw_api.c (ffi_translate_args): Fixed typo in argument + list. + (ffi_prep_raw_closure): Use ffi_translate_args, not + ffi_closure_translate. + * src/java_raw_api.c: New file. + * src/ffitest.c (closure_test_fn): New function. + (main): Define `rint' as long long on IA64. Added new test when + FFI_CLOSURES is defined. + * include/ffi.h.in (ALIGN): Use size_t, not unsigned. + (ffi_abi): Recognize IA64. + (ffi_raw): Added `flt' field. + Added "Java raw API" code. + * configure.in: Recognize ia64. + * Makefile.am (TARGET_SRC_IA64): New macro. + (libffi_la_common_SOURCES): Added java_raw_api.c. + (libffi_la_SOURCES): Define in IA64 case. + +2000-01-04 Tom Tromey + + * Makefile.in: Rebuilt with newer automake. + +1999-12-31 Tom Tromey + + * Makefile.am (INCLUDES): Added -I$(top_srcdir)/src. + +1999-09-01 Tom Tromey + + * include/ffi.h.in: Removed PACKAGE and VERSION defines and + undefs. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + * configure.in: Pass 3rd argument to AM_INIT_AUTOMAKE. + Use AM_PROG_LIBTOOL (automake 1.4 compatibility). + * acconfig.h: Don't #undef PACKAGE or VERSION. + +1999-08-09 Anthony Green + + * include/ffi.h.in: Try to work around messy header problem + with PACKAGE and VERSION. + + * configure: Rebuilt. + * configure.in: Change version to 2.00-beta. + + * fficonfig.h.in: Rebuilt. + * acconfig.h (FFI_NO_STRUCTS, FFI_NO_RAW_API): Define. + + * src/x86/ffi.c (ffi_raw_call): Rename. + +1999-08-02 Kresten Krab Thorup + + * src/x86/ffi.c (ffi_closure_SYSV): New function. + (ffi_prep_incoming_args_SYSV): Ditto. + (ffi_prep_closure): Ditto. + (ffi_closure_raw_SYSV): Ditto. + (ffi_prep_raw_closure): More ditto. + (ffi_call_raw): Final ditto. + + * include/ffi.h.in: Add definitions for closure and raw API. + + * src/x86/ffi.c (ffi_prep_cif_machdep): Added case for + FFI_TYPE_UINT64. + + * Makefile.am (libffi_la_common_SOURCES): Added raw_api.c + + * src/raw_api.c: New file. + + * include/ffi.h.in (ffi_raw): New type. + (UINT_ARG, SINT_ARG): New defines. + (ffi_closure, ffi_raw_closure): New types. + (ffi_prep_closure, ffi_prep_raw_closure): New declarations. + + * configure.in: Add check for endianness and sizeof void*. + + * src/x86/sysv.S (ffi_call_SYSV): Call fixup routine via argument, + instead of directly. + + * configure: Rebuilt. + +Thu Jul 8 14:28:42 1999 Anthony Green + + * configure.in: Add x86 and powerpc BeOS configurations. + From Makoto Kato . + +1999-05-09 Anthony Green + + * configure.in: Add warning about this being beta code. + Remove src/Makefile.am from the picture. + * configure: Rebuilt. + + * Makefile.am: Move logic from src/Makefile.am. Add changes + to support libffi as a target library. + * Makefile.in: Rebuilt. + + * aclocal.m4, config.guess, config.sub, ltconfig, ltmain.sh: + Upgraded to new autoconf, automake, libtool. + + * README: Tweaks. + + * LICENSE: Update copyright date. + + * src/Makefile.am, src/Makefile.in: Removed. + +1998-11-29 Anthony Green + + * include/ChangeLog: Removed. + * src/ChangeLog: Removed. + * src/mips/ChangeLog: Removed. + * src/sparc/ChangeLog: Remboved. + * src/x86/ChangeLog: Removed. + + * ChangeLog.v1: Created. + +============================================================================= +From the old ChangeLog.libffi file.... + +2011-02-08 Andreas Tobler + + * testsuite/lib/libffi.exp: Tweak for stand-alone mode. + +2009-12-25 Samuli Suominen + + * configure.ac: Undefine _AC_ARG_VAR_PRECIOUS for autoconf 2.64. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/huge_struct.c: Ad x86 XFAILs. + * testsuite/libffi.call/float2.c: Fix dg-excess-errors. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-12 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-04 Andrew Haley + + * src/powerpc/ffitarget.h: Fix misapplied merge from gcc. + +2009-06-04 Andrew Haley + + * src/mips/o32.S, + src/mips/n32.S: Fix licence formatting. + +2009-06-04 Andrew Haley + + * src/x86/darwin.S: Fix licence formatting. + src/x86/win32.S: Likewise. + src/sh64/sysv.S: Likewise. + src/sh/sysv.S: Likewise. + +2009-06-04 Andrew Haley + + * src/sh64/ffi.c: Remove lint directives. Was missing from merge + of Andreas Tobler's patch from 2006-04-22. + +2009-06-04 Andrew Haley + + * src/sh/ffi.c: Apply missing hunk from Alexandre Oliva's patch of + 2007-03-07. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-12-19 Anthony Green + + * configure.ac: Bump version to 3.0.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-11-11 Anthony Green + + * configure.ac: Bump version to 3.0.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-07-17 Anthony Green + + * configure.ac: Bump version to 3.0.6. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. Add documentation. + * README: Update for new release. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-07-16 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-04-03 Anthony Green + + * libffi.pc.in (Libs): Add -L${libdir}. + * configure.ac: Bump version to 3.0.5. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-04-03 Anthony Green + Xerces Ranby + + * include/ffi.h.in: Wrap definition of target architecture to + protect from double definitions. + +2008-03-22 Moriyoshi Koizumi + + * src/x86/ffi.c (ffi_prep_closure_loc): Fix for bug revealed in + closure_loc_fn0.c. + * testsuite/libffi.call/closure_loc_fn0.c (closure_loc_test_fn0): + New test. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/huge_struct.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2008-02-26 Jakub Jelinek + Anthony Green + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-26 Anthony Green + Thomas Heller + + * include/ffi.h.in: Change void (*)() to void (*)(void). + +2008-02-26 Anthony Green + Thomas Heller + + * src/alpha/ffi.c: Change void (*)() to void (*)(void). + src/alpha/osf.S, src/arm/ffi.c, src/frv/ffi.c, src/ia64/ffi.c, + src/ia64/unix.S, src/java_raw_api.c, src/m32r/ffi.c, + src/mips/ffi.c, src/pa/ffi.c, src/pa/hpux32.S, src/pa/linux.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/raw_api.c, + src/s390/ffi.c, src/sh/ffi.c, src/sh64/ffi.c, src/sparc/ffi.c, + src/x86/ffi.c, src/x86/unix64.S, src/x86/darwin64.S, + src/x86/ffi64.c: Ditto. + +2008-02-24 Anthony Green + + * configure.ac: Accept openbsd*, not just openbsd. + Bump version to 3.0.4. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-22 Anthony Green + + * README: Clean up list of tested platforms. + +2008-02-22 Anthony Green + + * configure.ac: Bump version to 3.0.3. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. Clean up test docs. + +2008-02-22 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-02-22 Thomas Heller + + * configure.ac: Add x86 OpenBSD support. + * configure: Rebuilt. + +2008-02-21 Thomas Heller + + * README: Change "make test" to "make check". + +2008-02-21 Anthony Green + + * configure.ac: Bump version to 3.0.2. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-21 Björn König + + * src/x86/freebsd.S: New file. + * configure.ac: Add x86 FreeBSD support. + * Makefile.am: Ditto. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.1. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-15 David Daney + + * src/mips/ffi.c: Remove extra '>' from include directive. + (ffi_prep_closure_loc): Use clear_location instead of tramp. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.0. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2008-02-15 Anthony Green + + * man/ffi_call.3, man/ffi_prep_cif.3, man/ffi.3: + Update dates and remove all references to ffi_prep_closure. + * configure.ac: Bump version to 2.99.9. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 Anthony Green + + * man/ffi_prep_closure.3: Delete. + * man/Makefile.am (EXTRA_DIST): Remove ffi_prep_closure.3. + (man_MANS): Ditto. + * man/Makefile.in: Rebuilt. + * configure.ac: Bump version to 2.99.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * include/ffi.h.in LICENSE src/debug.c src/closures.c + src/ffitest.c src/s390/sysv.S src/s390/ffitarget.h + src/types.c src/m68k/ffitarget.h src/raw_api.c src/frv/ffi.c + src/frv/ffitarget.h src/sh/ffi.c src/sh/sysv.S + src/sh/ffitarget.h src/powerpc/ffitarget.h src/pa/ffi.c + src/pa/ffitarget.h src/pa/linux.S src/java_raw_api.c + src/cris/ffitarget.h src/x86/ffi.c src/x86/sysv.S + src/x86/unix64.S src/x86/win32.S src/x86/ffitarget.h + src/x86/ffi64.c src/x86/darwin.S src/ia64/ffi.c + src/ia64/ffitarget.h src/ia64/ia64_flags.h src/ia64/unix.S + src/sparc/ffi.c src/sparc/v9.S src/sparc/ffitarget.h + src/sparc/v8.S src/alpha/ffi.c src/alpha/ffitarget.h + src/alpha/osf.S src/sh64/ffi.c src/sh64/sysv.S + src/sh64/ffitarget.h src/mips/ffi.c src/mips/ffitarget.h + src/mips/n32.S src/mips/o32.S src/arm/ffi.c src/arm/sysv.S + src/arm/ffitarget.h src/prep_cif.c: Update license text. + +2008-02-14 Anthony Green + + * README: Update tested platforms. + * configure.ac: Bump version to 2.99.6. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.5. + * configure: Rebuilt. + * Makefile.am (EXTRA_DIST): Add darwin64.S + * Makefile.in: Rebuilt. + * testsuite/lib/libffi-dg.exp: Remove libstdc++ bits from GCC tree. + * LICENSE: Update WARRANTY. + +2008-02-14 Anthony Green + + * libffi.pc.in (libdir): Fix libdir definition. + * configure.ac: Bump version to 2.99.4. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * README: Update. + * libffi.info: New file. + * doc/stamp-vti: New file. + * configure.ac: Bump version to 2.99.3. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * Makefile.am (SUBDIRS): Add man dir. + * Makefile.in: Rebuilt. + * configure.ac: Create Makefile. + * configure: Rebuilt. + * man/ffi_call.3 man/ffi_prep_cif.3 man/ffi_prep_closure.3 + man/Makefile.am man/Makefile.in: New files. + +2008-02-14 Tom Tromey + + * aclocal.m4, Makefile.in, configure, fficonfig.h.in: Rebuilt. + * mdate-sh, texinfo.tex: New files. + * Makefile.am (info_TEXINFOS): New variable. + * doc/libffi.texi: New file. + * doc/version.texi: Likewise. + +2008-02-14 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't compile with -D$(TARGET). + (lib_LTLIBRARIES): Define. + (toolexeclib_LIBRARIES): Undefine. + * Makefile.in: Rebuilt. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + +2008-02-14 Anthony Green + + * libffi.pc.in: Use @PACKAGE_NAME@ and @PACKAGE_VERSION@. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Add ChangeLog.libffi. + * Makefile.in: Rebuilt. + * LICENSE: Update copyright notice. + +2008-02-14 Anthony Green + + * include/Makefile.am (nodist_includes_HEADERS): Define. Don't + distribute ffitarget.h or ffi.h from the build include dir. + * Makefile.in: Rebuilt. + +2008-02-14 Anthony Green + + * include/Makefile.am (includesdir): Install headers under libdir. + (pkgconfigdir): Define. Install libffi.pc. + * include/Makefile.in: Rebuilt. + * libffi.pc.in: Create. + * libtool-version: Increment CURRENT + * configure.ac: Add libffi.pc.in + * configure: Rebuilt. + +2008-02-03 Anthony Green + + * include/Makefile.am (includesdir): Fix header install with + DESTDIR. + * include/Makefile.in: Rebuilt. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-02-01 Anthony Green + + * include/Makefile.am: Fix header installs. + * Makefile.am: Ditto. + * include/Makefile.in: Rebuilt. + * Makefile.in: Ditto. + +2008-02-01 Anthony Green + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL, + FFI_INIT_TRAMPOLINE): Revert my broken changes to twall's last + patch. + +2008-01-31 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am: Ditto. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-30 Anthony Green + + * Makefile.am, include/Makefile.am: Move headers to + libffi_la_SOURCES for new automake. + * Makefile.in, include/Makefile.in: Rebuilt. + + * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for + execution outside of gcc tree. + * testsuite/lib/target-libpath.exp: Ditto. + + * testsuite/lib/libffi-dg.exp: Many changes to allow for execution + outside of gcc tree. + + +============================================================================= +From the old ChangeLog.libgcj file.... + +2004-01-14 Kelley Cook + + * configure.in: Add in AC_PREREQ(2.13) + +2003-02-20 Alexandre Oliva + + * configure.in: Propagate ORIGINAL_LD_FOR_MULTILIBS to + config.status. + * configure: Rebuilt. + +2002-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +Mon Aug 9 18:33:38 1999 Rainer Orth + + * include/Makefile.in: Rebuilt. + * Makefile.in: Rebuilt + * Makefile.am (toolexeclibdir): Add $(MULTISUBDIR) even for native + builds. + Use USE_LIBDIR. + + * configure: Rebuilt. + * configure.in (USE_LIBDIR): Define for native builds. + Use lowercase in configure --help explanations. + +1999-08-08 Anthony Green + + * include/ffi.h.in (FFI_FN): Remove `...'. + +1999-08-08 Anthony Green + + * Makefile.in: Rebuilt. + * Makefile.am (AM_CFLAGS): Compile with -fexceptions. + + * src/x86/sysv.S: Add exception handling metadata. + + +============================================================================= + +The libffi version 1 ChangeLog archive. + +Version 1 of libffi had per-directory ChangeLogs. Current and future +versions have a single ChangeLog file in the root directory. The +version 1 ChangeLogs have all been concatenated into this file for +future reference only. + +--- libffi ---------------------------------------------------------------- + +Mon Oct 5 02:17:50 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +Mon Oct 5 01:03:03 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +1998-07-25 Andreas Schwab + + * m68k/ffi.c (ffi_prep_cif_machdep): Use bitmask for cif->flags. + Correctly handle small structures. + (ffi_prep_args): Also handle small structures. + (ffi_call): Pass size of return type to ffi_call_SYSV. + * m68k/sysv.S: Adjust for above changes. Correctly align small + structures in the return value. + + * types.c (uint64, sint64) [M68K]: Change alignment to 4. + +Fri Apr 17 17:26:58 1998 Anthony Green + + * configure.in: Boosted rev. + * configure,Makefile.in,aclocal.m4: Rebuilt. + * README: Boosted rev and added release notes. + +Sun Feb 22 00:50:41 1998 Geoff Keating + + * configure.in: Add PowerPC config bits. + +1998-02-14 Andreas Schwab + + * configure.in: Add m68k config bits. Change AC_CANONICAL_SYSTEM + to AC_CANONICAL_HOST, this is not a compiler. Use $host instead + of $target. Remove AC_CHECK_SIZEOF(char), we already know the + result. Fix argument of AC_ARG_ENABLE. + * configure, fficonfig.h.in: Rebuilt. + +Tue Feb 10 20:53:40 1998 Richard Henderson + + * configure.in: Add Alpha config bits. + +Tue May 13 13:39:20 1997 Anthony Green + + * README: Updated dates and reworded Irix comments. + + * configure.in: Removed AC_PROG_RANLIB. + + * Makefile.in, aclocal.m4, config.guess, config.sub, configure, + ltmain.sh, */Makefile.in: libtoolized again and rebuilt with + automake and autoconf. + +Sat May 10 18:44:50 1997 Tom Tromey + + * configure, aclocal.m4: Rebuilt. + * configure.in: Don't compute EXTRADIST; now handled in + src/Makefile.in. Removed macros implied by AM_INIT_AUTOMAKE. + Don't run AM_MAINTAINER_MODE. + +Thu May 8 14:34:05 1997 Anthony Green + + * missing, ltmain.sh, ltconfig.sh: Created. These are new files + required by automake and libtool. + + * README: Boosted rev to 1.14. Added notes. + + * acconfig.h: Moved PACKAGE and VERSION for new automake. + + * configure.in: Changes for libtool. + + * Makefile.am (check): make test now make check. Uses libtool now. + + * Makefile.in, configure.in, aclocal.h, fficonfig.h.in: Rebuilt. + +Thu May 1 16:27:07 1997 Anthony Green + + * missing: Added file required by new automake. + +Tue Nov 26 14:10:42 1996 Anthony Green + + * acconfig.h: Added USING_PURIFY flag. This is defined when + --enable-purify-safety was used at configure time. + + * configure.in (allsources): Added --enable-purify-safety switch. + (VERSION): Boosted rev to 1.13. + * configure: Rebuilt. + +Fri Nov 22 06:46:12 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.12. + Removed special CFLAGS hack for gcc. + * configure: Rebuilt. + + * README: Boosted rev to 1.12. Added notes. + + * Many files: Cygnus Support changed to Cygnus Solutions. + +Wed Oct 30 11:15:25 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.11. + * configure: Rebuilt. + + * README: Boosted rev to 1.11. Added notes about GNU make. + +Tue Oct 29 12:25:12 1996 Anthony Green + + * configure.in: Fixed -Wall trick. + (VERSION): Boosted rev. + * configure: Rebuilt + + * acconfig.h: Needed for --enable-debug configure switch. + + * README: Boosted rev to 1.09. Added more notes on building + libffi, and LCLint. + + * configure.in: Added --enable-debug switch. Boosted rev to + 1.09. + * configure: Rebuilt + +Tue Oct 15 13:11:28 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.08 + * configure: Rebuilt. + + * README: Added n32 bug fix notes. + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + +Mon Oct 14 10:54:46 1996 Anthony Green + + * README: Added web page reference. + + * configure.in, README: Boosted rev to 1.05 + * configure: Rebuilt. + + * README: Fixed n32 sample code. + +Fri Oct 11 17:09:28 1996 Anthony Green + + * README: Added sparc notes. + + * configure.in, README: Boosted rev to 1.04. + * configure: Rebuilt. + +Thu Oct 10 10:31:03 1996 Anthony Green + + * configure.in, README: Boosted rev to 1.03. + * configure: Rebuilt. + + * README: Added struct notes. + + * Makefile.am (EXTRA_DIST): Added LICENSE to distribution. + * Makefile.in: Rebuilt. + + * README: Removed Linux section. No special notes now + because aggregates arg/return types work. + +Wed Oct 9 16:16:42 1996 Anthony Green + + * README, configure.in (VERSION): Boosted rev to 1.02 + * configure: Rebuilt. + +Tue Oct 8 11:56:33 1996 Anthony Green + + * README (NOTE): Added n32 notes. + + * Makefile.am: Added test production. + * Makefile: Rebuilt + + * README: spell checked! + + * configure.in (VERSION): Boosted rev to 1.01 + * configure: Rebuilt. + +Mon Oct 7 15:50:22 1996 Anthony Green + + * configure.in: Added nasty bit to support SGI tools. + * configure: Rebuilt. + + * README: Added SGI notes. Added note about automake bug. + +Mon Oct 7 11:00:28 1996 Anthony Green + + * README: Rewrote intro, and fixed examples. + +Fri Oct 4 10:19:55 1996 Anthony Green + + * configure.in: -D$TARGET is no longer used as a compiler switch. + It is now inserted into ffi.h at configure time. + * configure: Rebuilt. + + * FFI_ABI and FFI_STATUS are now ffi_abi and ffi_status. + +Thu Oct 3 13:47:34 1996 Anthony Green + + * README, LICENSE: Created. Wrote some docs. + + * configure.in: Don't barf on i586-unknown-linuxaout. + Added EXTRADIST code for "make dist". + * configure: Rebuilt. + + * */Makefile.in: Rebuilt with patched automake. + +Tue Oct 1 17:12:25 1996 Anthony Green + + * Makefile.am, aclocal.m4, config.guess, config.sub, + configure.in, fficonfig.h.in, install-sh, mkinstalldirs, + stamp-h.in: Created + * Makefile.in, configure: Generated + +--- libffi/include -------------------------------------------------------- + +Tue Feb 24 13:09:36 1998 Anthony Green + + * ffi_mips.h: Updated FFI_TYPE_STRUCT_* values based on + ffi.h.in changes. This is a work-around for SGI's "simple" + assembler. + +Sun Feb 22 00:51:55 1998 Geoff Keating + + * ffi.h.in: PowerPC support. + +1998-02-14 Andreas Schwab + + * ffi.h.in: Add m68k support. + (FFI_TYPE_LONGDOUBLE): Make it a separate value. + +Tue Feb 10 20:55:16 1998 Richard Henderson + + * ffi.h.in (SIZEOF_ARG): Use a pointer type by default. + + * ffi.h.in: Alpha support. + +Fri Nov 22 06:48:45 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Cygnus Support -> Cygnus Solutions. + +Wed Nov 20 22:31:01 1996 Anthony Green + + * ffi.h.in: Added ffi_type_void definition. + +Tue Oct 29 12:22:40 1996 Anthony Green + + * Makefile.am (hack_DATA): Always install ffi_mips.h. + + * ffi.h.in: Removed FFI_DEBUG. It's now in the correct + place (acconfig.h). + Added #include for size_t definition. + +Tue Oct 15 17:23:35 1996 Anthony Green + + * ffi.h.in, ffi_common.h, ffi_mips.h: More clean up. + Commented out #define of FFI_DEBUG. + +Tue Oct 15 13:01:06 1996 Anthony Green + + * ffi_common.h: Added bool definition. + + * ffi.h.in, ffi_common.h: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Mon Oct 14 12:29:23 1996 Anthony Green + + * ffi.h.in: Interface changes based on feedback from Jim + Blandy. + +Fri Oct 11 16:49:35 1996 Anthony Green + + * ffi.h.in: Small change for sparc support. + +Thu Oct 10 14:53:37 1996 Anthony Green + + * ffi_mips.h: Added FFI_TYPE_STRUCT_* definitions for + special structure return types. + +Wed Oct 9 13:55:57 1996 Anthony Green + + * ffi.h.in: Added SIZEOF_ARG definition for X86 + +Tue Oct 8 11:40:36 1996 Anthony Green + + * ffi.h.in (FFI_FN): Added macro for eliminating compiler warnings. + Use it to case your function pointers to the proper type. + + * ffi_mips.h (SIZEOF_ARG): Added magic to fix type promotion bug. + + * Makefile.am (EXTRA_DIST): Added ffi_mips.h to EXTRA_DIST. + * Makefile: Rebuilt. + + * ffi_mips.h: Created. Moved all common mips definitions here. + +Mon Oct 7 10:58:12 1996 Anthony Green + + * ffi.h.in: The SGI assember is very picky about parens. Redefined + some macros to avoid problems. + + * ffi.h.in: Added FFI_DEFAULT_ABI definitions. Also added + externs for pointer, and 64bit integral ffi_types. + +Fri Oct 4 09:51:37 1996 Anthony Green + + * ffi.h.in: Added FFI_ABI member to ffi_cif and changed + function prototypes accordingly. + Added #define @TARGET@. Now programs including ffi.h don't + have to specify this themselves. + +Thu Oct 3 15:36:44 1996 Anthony Green + + * ffi.h.in: Changed ffi_prep_cif's values from void* to void** + + * Makefile.am (EXTRA_DIST): Added EXTRA_DIST for "make dist" + to work. + * Makefile.in: Regenerated. + +Wed Oct 2 10:16:59 1996 Anthony Green + + * Makefile.am: Created + * Makefile.in: Generated + + * ffi_common.h: Added rcsid comment + +Tue Oct 1 17:13:51 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Created + +--- libffi/src ------------------------------------------------------------ + +Mon Oct 5 02:17:50 1998 Anthony Green + + * arm/ffi.c, arm/sysv.S: Created. + + * Makefile.am: Added arm files. + * Makefile.in: Rebuilt. + +Mon Oct 5 01:41:38 1998 Anthony Green + + * Makefile.am (libffi_la_LDFLAGS): Incremented revision. + +Sun Oct 4 16:27:17 1998 Anthony Green + + * alpha/osf.S (ffi_call_osf): Patch for DU assembler. + + * ffitest.c (main): long long and long double return values work + for x86. + +Fri Apr 17 11:50:58 1998 Anthony Green + + * Makefile.in: Rebuilt. + + * ffitest.c (main): Floating point tests not executed for systems + with broken lond double (SunOS 4 w/ GCC). + + * types.c: Fixed x86 alignment info for long long types. + +Thu Apr 16 07:15:28 1998 Anthony Green + + * ffitest.c: Added more notes about GCC bugs under Irix 6. + +Wed Apr 15 08:42:22 1998 Anthony Green + + * ffitest.c (struct5): New test function. + (main): New test with struct5. + +Thu Mar 5 10:48:11 1998 Anthony Green + + * prep_cif.c (initialize_aggregate): Fix assertion for + nested structures. + +Tue Feb 24 16:33:41 1998 Anthony Green + + * prep_cif.c (ffi_prep_cif): Added long double support for sparc. + +Sun Feb 22 00:52:18 1998 Geoff Keating + + * powerpc/asm.h: New file. + * powerpc/ffi.c: New file. + * powerpc/sysv.S: New file. + * Makefile.am: PowerPC port. + * ffitest.c (main): Allow all tests to run even in presence of gcc + bug on PowerPC. + +1998-02-17 Anthony Green + + * mips/ffi.c: Fixed comment typo. + + * x86/ffi.c (ffi_prep_cif_machdep), x86/sysv.S (retfloat): + Fixed x86 long double return handling. + + * types.c: Fixed x86 long double alignment info. + +1998-02-14 Andreas Schwab + + * types.c: Add m68k support. + + * ffitest.c (floating): Add long double parameter. + (return_ll, ldblit): New functions to test long long and long + double return value. + (main): Fix type error in assignment of ts[1-4]_type.elements. + Add tests for long long and long double arguments and return + values. + + * prep_cif.c (ffi_prep_cif) [M68K]: Don't allocate argument for + struct value pointer. + + * m68k/ffi.c, m68k/sysv.S: New files. + * Makefile.am: Add bits for m68k port. Add kludge to work around + automake deficiency. + (test): Don't require "." in $PATH. + * Makefile.in: Rebuilt. + +Wed Feb 11 07:36:50 1998 Anthony Green + + * Makefile.in: Rebuilt. + +Tue Feb 10 20:56:00 1998 Richard Henderson + + * alpha/ffi.c, alpha/osf.S: New files. + * Makefile.am: Alpha port. + +Tue Nov 18 14:12:07 1997 Anthony Green + + * mips/ffi.c (ffi_prep_cif_machdep): Initialize rstruct_flag + for n32. + +Tue Jun 3 17:18:20 1997 Anthony Green + + * ffitest.c (main): Added hack to get structure tests working + correctly. + +Sat May 10 19:06:42 1997 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Explicitly list all distributable + files in subdirs. + (VERSION, CC): Removed. + +Thu May 8 17:19:01 1997 Anthony Green + + * Makefile.am: Many changes for new automake and libtool. + * Makefile.in: Rebuilt. + +Fri Nov 22 06:57:56 1996 Anthony Green + + * ffitest.c (main): Fixed test case for non mips machines. + +Wed Nov 20 22:31:59 1996 Anthony Green + + * types.c: Added ffi_type_void declaration. + +Tue Oct 29 13:07:19 1996 Anthony Green + + * ffitest.c (main): Fixed character constants. + (main): Emit warning for structure test 3 failure on Sun. + + * Makefile.am (VPATH): Fixed VPATH def'n so automake won't + strip it out. + Moved distdir hack from libffi to automake. + (ffitest): Added missing -c for $(COMPILE) (change in automake). + * Makefile.in: Rebuilt. + +Tue Oct 15 13:08:20 1996 Anthony Green + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + + * prep_cif.c (STACK_ARG_SIZE): Improved STACK_ARG_SIZE macro. + Clean up based on LCLint output. Added funny /*@...@*/ comments to + annotate source. + + * ffitest.c, debug.c: Cleaned up code. + +Mon Oct 14 12:26:56 1996 Anthony Green + + * ffitest.c: Changes based on interface changes. + + * prep_cif.c (ffi_prep_cif): Cleaned up interface based on + feedback from Jim Blandy. + +Fri Oct 11 15:53:18 1996 Anthony Green + + * ffitest.c: Reordered tests while porting to sparc. + Made changes to handle lame structure passing for sparc. + Removed calls to fflush(). + + * prep_cif.c (ffi_prep_cif): Added special case for sparc + aggregate type arguments. + +Thu Oct 10 09:56:51 1996 Anthony Green + + * ffitest.c (main): Added structure passing/returning tests. + + * prep_cif.c (ffi_prep_cif): Perform proper initialization + of structure return types if needed. + (initialize_aggregate): Bug fix + +Wed Oct 9 16:04:20 1996 Anthony Green + + * types.c: Added special definitions for x86 (double doesn't + need double word alignment). + + * ffitest.c: Added many tests + +Tue Oct 8 09:19:22 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Fixed assertion. + + * debug.c (ffi_assert): Must return a non void now. + + * Makefile.am: Added test production. + * Makefile: Rebuilt. + + * ffitest.c (main): Created. + + * types.c: Created. Stripped common code out of */ffi.c. + + * prep_cif.c: Added missing stdlib.h include. + + * debug.c (ffi_type_test): Used "a" to eliminate compiler + warnings in non-debug builds. Included ffi_common.h. + +Mon Oct 7 15:36:42 1996 Anthony Green + + * Makefile.am: Added a rule for .s -> .o + This is required by the SGI compiler. + * Makefile: Rebuilt. + +Fri Oct 4 09:51:08 1996 Anthony Green + + * prep_cif.c (initialize_aggregate): Moved abi specification + to ffi_prep_cif(). + +Thu Oct 3 15:37:37 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Changed values from void* to void**. + (initialize_aggregate): Fixed aggregate type initialization. + + * Makefile.am (EXTRA_DIST): Added support code for "make dist". + * Makefile.in: Regenerated. + +Wed Oct 2 11:41:57 1996 Anthony Green + + * debug.c, prep_cif: Created. + + * Makefile.am: Added debug.o and prep_cif.o to OBJ. + * Makefile.in: Regenerated. + + * Makefile.am (INCLUDES): Added missing -I../include + * Makefile.in: Regenerated. + +Tue Oct 1 17:11:51 1996 Anthony Green + + * error.c, Makefile.am: Created. + * Makefile.in: Generated. + +--- libffi/src/x86 -------------------------------------------------------- + +Sun Oct 4 16:27:17 1998 Anthony Green + + * sysv.S (retlongdouble): Fixed long long return value support. + * ffi.c (ffi_prep_cif_machdep): Ditto. + +Wed May 13 04:30:33 1998 Anthony Green + + * ffi.c (ffi_prep_cif_machdep): Fixed long double return value + support. + +Wed Apr 15 08:43:20 1998 Anthony Green + + * ffi.c (ffi_prep_args): small struct support was missing. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Mon Dec 2 15:12:58 1996 Tom Tromey + + * sysv.S: Use .balign, for a.out Linux boxes. + +Tue Oct 15 13:06:50 1996 Anthony Green + + * ffi.c: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Fri Oct 11 16:43:38 1996 Anthony Green + + * ffi.c (ffi_call): Added assertion for bad ABIs. + +Wed Oct 9 13:57:27 1996 Anthony Green + + * sysv.S (retdouble): Fixed double return problems. + + * ffi.c (ffi_call): Corrected fn arg definition. + (ffi_prep_cif_machdep): Fixed double return problems + +Tue Oct 8 12:12:49 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + +Mon Oct 7 15:53:06 1996 Anthony Green + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:54:53 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 10:07:05 1996 Anthony Green + + * ffi.c, sysv.S, objects.mak: Created. + (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep(). + +--- libffi/src/mips ------------------------------------------------------- + +Tue Feb 17 17:18:07 1998 Anthony Green + + * o32.S: Fixed typo in comment. + + * ffi.c (ffi_prep_cif_machdep): Fixed argument processing. + +Thu May 8 16:53:58 1997 Anthony Green + + * o32.s, n32.s: Wrappers for SGI tool support. + + * objects.mak: Removed. + +Tue Oct 29 14:37:45 1996 Anthony Green + + * ffi.c (ffi_prep_args): Changed int z to size_t z. + +Tue Oct 15 13:17:25 1996 Anthony Green + + * n32.S: Fixed bad stack munging. + + * ffi.c: Moved prototypes for ffi_call_?32() to here from + ffi_mips.h because extended_cif is not defined in ffi_mips.h. + +Mon Oct 14 12:42:02 1996 Anthony Green + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 11:22:16 1996 Anthony Green + + * n32.S, ffi.c: Lots of changes to support passing and + returning structures with the n32 calling convention. + + * n32.S: Fixed fn pointer bug. + + * ffi.c (ffi_prep_cif_machdep): Fix for o32 structure + return values. + (ffi_prep_args): Fixed n32 structure passing when structures + partially fit in registers. + +Wed Oct 9 13:49:25 1996 Anthony Green + + * objects.mak: Added n32.o. + + * n32.S: Created. + + * ffi.c (ffi_prep_args): Added magic to support proper + n32 processing. + +Tue Oct 8 10:37:35 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + + * o32.S: This code is only built for o32 compiles. + A lot of the #define cruft has moved to ffi_mips.h. + + * ffi.c (ffi_prep_cif_machdep): Fixed arg flags. Second arg + is only processed if the first is either a float or double. + +Mon Oct 7 15:33:59 1996 Anthony Green + + * o32.S: Modified to compile under each of o32, n32 and n64. + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:53:25 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 17:41:22 1996 Anthony Green + + * o32.S: Removed crufty definitions. + +Wed Oct 2 12:53:42 1996 Anthony Green + + * ffi.c (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved all machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep. Return types + of FFI_TYPE_STRUCT are no different than FFI_TYPE_INT. + +Tue Oct 1 17:11:02 1996 Anthony Green + + * ffi.c, o32.S, object.mak: Created + +--- libffi/src/sparc ------------------------------------------------------ + +Tue Feb 24 16:33:18 1998 Anthony Green + + * ffi.c (ffi_prep_args): Added long double support. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Thu May 1 16:07:56 1997 Anthony Green + + * v8.S: Fixed minor portability problem reported by + Russ McManus . + +Tue Nov 26 14:12:43 1996 Anthony Green + + * v8.S: Used STACKFRAME define elsewhere. + + * ffi.c (ffi_prep_args): Zero out space when USING_PURIFY + is set. + (ffi_prep_cif_machdep): Allocate the correct stack frame + space for functions with < 6 args. + +Tue Oct 29 15:08:55 1996 Anthony Green + + * ffi.c (ffi_prep_args): int z is now size_t z. + +Mon Oct 14 13:31:24 1996 Anthony Green + + * v8.S (ffi_call_V8): Gordon rewrites this again. It looks + great now. + + * ffi.c (ffi_call): The comment about hijacked registers + is no longer valid after gordoni hacked v8.S. + + * v8.S (ffi_call_V8): Rewrote with gordoni. Much simpler. + + * v8.S, ffi.c: ffi_call() had changed to accept more than + two args, so v8.S had to change (because it hijacks incoming + arg registers). + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 17:48:16 1996 Anthony Green + + * ffi.c, v8.S, objects.mak: Created. + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old new file mode 100644 index 0000000..8de1ca7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old @@ -0,0 +1,7407 @@ +Libffi change logs used to be maintained in separate ChangeLog files. +These days we generate them directly from the git commit messages. +The old ChangeLog files are saved here in order to maintain the historical +record. + +============================================================================= +From the old ChangeLog.libffi-3.1 file... + +2014-03-16 Josh Triplett + + * ChangeLog: Archive to ChangeLog.libffi-3.1 and delete. Future + changelogs will come from git, with autogenerated snapshots shipped in + distributed tarballs. + +2014-03-16 Josh Triplett + + Add support for stdcall, thiscall, and fastcall on non-Windows + x86-32. + + Linux supports the stdcall calling convention, either via + functions explicitly declared with the stdcall attribute, or via + code compiled with -mrtd which effectively makes stdcall the + default. + + This introduces FFI_STDCALL, FFI_THISCALL, and FFI_FASTCALL on + non-Windows x86-32 platforms, as non-default calling conventions. + + * Makefile.am: Compile in src/x86/win32.S on non-Windows x86-32. + * src/x86/ffitarget.h: Add FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32. Increase trampoline size to + accomodate these calling conventions, and unify some ifdeffery. + * src/x86/ffi.c: Add support for FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32 platforms; update ifdeffery. + * src/x86/win32.S: Support compiling on non-Windows x86-32 + platforms. On those platforms, avoid redefining the SYSV symbols + already provided by src/x86/sysv.S. + * testsuite/libffi.call/closure_stdcall.c: Run on non-Windows. + #define __stdcall if needed. + * testsuite/libffi.call/closure_thiscall.c: Run on non-Windows. + #define __fastcall if needed. + * testsuite/libffi.call/fastthis1_win32.c: Run on non-Windows. + * testsuite/libffi.call/fastthis2_win32.c: Ditto. + * testsuite/libffi.call/fastthis3_win32.c: Ditto. + * testsuite/libffi.call/many2_win32.c: Ditto. + * testsuite/libffi.call/many_win32.c: Ditto. + * testsuite/libffi.call/strlen2_win32.c: Ditto. + * testsuite/libffi.call/strlen_win32.c: Ditto. + * testsuite/libffi.call/struct1_win32.c: Ditto. + * testsuite/libffi.call/struct2_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * prep_cif.c: Remove unnecessary ifdef for X86_WIN32. + ffi_prep_cif_core had a special case for X86_WIN32, checking for + FFI_THISCALL in addition to the FFI_FIRST_ABI-to-FFI_LAST_ABI + range before returning FFI_BAD_ABI. However, on X86_WIN32, + FFI_THISCALL already falls in that range, making the special case + unnecessary. Remove it. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/closure_thiscall.c: Remove fragile stack + pointer checks. These files included inline assembly to save the + stack pointer before and after the call, and compare the values. + However, compilers can and do leave the stack in different states + for these two pieces of inline assembly, such as by saving a + temporary value on the stack across the call; observed with gcc + -Os, and verified as spurious through careful inspection of + disassembly. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/many.c: Avoid spurious failure due to + excess floating-point precision. + * testsuite/libffi.call/many_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * libtool-ldflags: Re-add. + +2014-03-16 Josh Triplett + + * Makefile.in, aclocal.m4, compile, config.guess, config.sub, + configure, depcomp, include/Makefile.in, install-sh, + libtool-ldflags, ltmain.sh, m4/libtool.m4, m4/ltoptions.m4, + m4/ltsugar.m4, m4/ltversion.m4, m4/lt~obsolete.m4, + man/Makefile.in, mdate-sh, missing, testsuite/Makefile.in: Delete + autogenerated files from version control. + * .gitignore: Add autogenerated files. + * autogen.sh: New script to generate the autogenerated files. + * README: Document requirement to run autogen.sh when building + directly from version control. + * .travis.yml: Run autogen.sh + +2014-03-14 Anthony Green + + * configure, Makefile.in: Rebuilt. + +2014-03-10 Mike Hommey + + * configure.ac: Allow building for mipsel with Android NDK r8. + * Makefile.am (AM_MAKEFLAGS): Replace double quotes with single + quotes. + +2014-03-10 Landry Breuil + + * configure.ac: Ensure the linker supports @unwind sections in libffi. + +2014-03-01 Anthony Green + + * Makefile.am (EXTRA_DIST): Replace old scripts with + generate-darwin-source-and-headers.py. + * Makefile.in: Rebuilt. + +2014-02-28 Anthony Green + + * Makefile.am (AM_CFLAGS): Reintroduce missing -DFFI_DEBUG for + --enable-debug builds. + * Makefile.in: Rebuilt. + +2014-02-28 Makoto Kato + + * src/closures.c: Fix build failure when using clang for Android. + +2014-02-28 Marcin Wojdyr + + * libffi.pc.in (toolexeclibdir): use -L${toolexeclibdir} instead + of -L${libdir}. + +2014-02-28 Paulo Pizarro + + * src/bfin/sysv.S: Calling functions in shared libraries requires + considering the GOT. + +2014-02-28 Josh Triplett + + * src/x86/ffi64.c (classify_argument): Handle case where + FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE. + +2014-02-28 Anthony Green + + * ltmain.sh: Generate with libtool-2.4.2.418. + * m4/libtool.m4, m4/ltoptions.m4, m4/ltversion.m4: Ditto. + * configure: Rebuilt. + +2014-02-28 Dominik Vogt + + * configure.ac (AC_ARG_ENABLE struct): Fix typo in help + message. + (AC_ARG_ENABLE raw_api): Ditto. + * configure, fficonfig.h.in: Rebuilt. + +2014-02-28 Will Newton + + * src/arm/sysv.S: Initialize IP register with FP. + +2014-02-28 Yufeng Zhang + + * src/aarch64/sysv.S (ffi_closure_SYSV): Use x29 as the + main CFA reg; update cfi_rel_offset. + +2014-02-15 Marcus Comstedt + + * src/powerpc/ffi_linux64.c, src/powerpc/linux64_closure.S: Remove + assumption on contents of r11 in closure. + +2014-02-09 Heiher + + * src/mips/n32.S: Fix call floating point va function. + +2014-01-21 Zachary Waldowski + + * src/aarch64/ffi.c: Fix missing semicolons on assertions under + debug mode. + +2013-12-30 Zachary Waldowski + + * .gitignore: Exclude darwin_* generated source and build_* trees. + * src/aarch64/ffi.c, src/arm/ffi.c, src/x86/ffi.c: Inhibit Clang + previous prototype warnings. + * src/arm/ffi.c: Prevent NULL dereference, fix short type warning + * src/dlmalloc.c: Fix warnings from set_segment_flags return type, + and the native use of size_t for malloc on platforms + * src/arm/sysv.S: Use unified syntax. Clang clean-ups for + ARM_FUNC_START. + * generate-osx-source-and-headers.py: Remove. + * build-ios.sh: Remove. + * libffi.xcodeproj/project.pbxproj: Rebuild targets. Include + x86_64+aarch64 pieces in library. Export headers properly. + * src/x86/ffi64.c: More Clang warning clean-ups. + * src/closures.c (open_temp_exec_file_dir): Use size_t. + * src/prep_cif.c (ffi_prep_cif_core): Cast ALIGN result. + * src/aarch64/sysv.S: Use CNAME for global symbols. Only use + .size for ELF targets. + * src/aarch64/ffi.c: Clean up for double == long double. Clean up + from Clang warnings. Use Clang cache invalidation builtin. Use + size_t in place of unsigned in many places. Accommodate for + differences in Apple AArch64 ABI. + +2013-12-02 Daniel Rodríguez Troitiño + + * generate-darwin-source-and-headers.py: Clean up, modernize, + merged version of previous scripts. + +2013-11-21 Anthony Green + + * configure, Makefile.in, include/Makefile.in, include/ffi.h.in, + man/Makefile.in, testsuite/Makefile.in, fficonfig.h.in: Rebuilt. + +2013-11-21 Alan Modra + + * Makefile.am (EXTRA_DIST): Add new src/powerpc files. + (nodist_libffi_la_SOURCES ): Likewise. + * configure.ac (HAVE_LONG_DOUBLE_VARIANT): Define for powerpc. + * include/ffi.h.in (ffi_prep_types): Declare. + * src/prep_cif.c (ffi_prep_cif_core): Call ffi_prep_types. + * src/types.c (FFI_NONCONST_TYPEDEF): Define and use for + HAVE_LONG_DOUBLE_VARIANT. + * src/powerpc/ffi_powerpc.h: New file. + * src/powerpc/ffi.c: Split into.. + * src/powerpc/ffi_sysv.c: ..new file, and.. + * src/powerpc/ffi_linux64.c: ..new file, rewriting parts. + * src/powerpc/ffitarget.h (enum ffi_abi): Rewrite powerpc ABI + selection as bits controlling features. + * src/powerpc/linux64.S: For consistency, use POWERPC64 rather + than __powerpc64__. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. Move .note.FNU-stack + inside guard. + * src/powerpc/sysv.S: Likewise. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + +2013-11-20 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use + NUM_FPR_ARG_REGISTERS64 and NUM_GPR_ARG_REGISTERS64 not their + 32-bit versions for 64-bit code. + * src/powerpc/linux64_closure.S: Don't use the return value area + as a parameter save area on ELFv2. + +2013-11-18 Iain Sandoe + + * src/powerpc/darwin.S (EH): Correct use of pcrel FDE encoding. + * src/powerpc/darwin_closure.S (EH): Likewise. Modernise picbase + labels. + +2013-11-18 Anthony Green + + * src/arm/ffi.c (ffi_call): Hoist declaration of temp to top of + function. + * src/arm/ffi.c (ffi_closure_inner): Moderize function declaration + to appease compiler. + Thanks for Gregory P. Smith . + +2013-11-18 Anthony Green + + * README (tested): Mention PowerPC ELFv2. + +2013-11-16 Alan Modra + + * src/powerpc/ppc_closure.S: Move errant #endif to where it belongs. + Don't bl .Luint128. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use #if _CALL_ELF + test to select parameter save sizing for ELFv2 vs. ELFv1. + * src/powerpc/ffitarget.h (FFI_V2_TYPE_FLOAT_HOMOG, + FFI_V2_TYPE_DOUBLE_HOMOG, FFI_V2_TYPE_SMALL_STRUCT): Define. + (FFI_TRAMPOLINE_SIZE): Define variant for ELFv2. + * src/powerpc/ffi.c (FLAG_ARG_NEEDS_PSAVE): Define. + (discover_homogeneous_aggregate): New function. + (ffi_prep_args64): Adjust start of param save area for ELFv2. + Handle homogenous floating point struct parms. + (ffi_prep_cif_machdep_core): Adjust space calculation for ELFv2. + Handle ELFv2 return values. Set FLAG_ARG_NEEDS_PSAVE. Handle + homogenous floating point structs. + (ffi_call): Increase size of smst_buffer for ELFv2. Handle ELFv2. + (flush_icache): Compile for ELFv2. + (ffi_prep_closure_loc): Set up ELFv2 trampoline. + (ffi_closure_helper_LINUX64): Don't return all structs directly + to caller. Handle homogenous floating point structs. Handle + ELFv2 struct return values. + * src/powerpc/linux64.S (ffi_call_LINUX64): Set up r2 for + ELFv2. Adjust toc save location. Call function pointer using + r12. Handle FLAG_RETURNS_SMST. Don't predict branches. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Set up r2 + for ELFv2. Define ELFv2 versions of STACKFRAME, PARMSAVE, and + RETVAL. Handle possibly missing parameter save area. Handle + ELFv2 return values. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Revert 2013-02-08 + change. Do not consume an int arg when returning a small struct + for FFI_SYSV ABI. + (ffi_call): Only use bounce buffer when FLAG_RETURNS_SMST. + Properly copy bounce buffer to destination. + * src/powerpc/sysv.S: Revert 2013-02-08 change. + * src/powerpc/ppc_closure.S: Remove stray '+'. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Align struct parameters + according to __STRUCT_PARM_ALIGN__. + (ffi_prep_cif_machdep_core): Likewise. + (ffi_closure_helper_LINUX64): Likewise. + +2013-11-16 Alan Modra + + * src/powerpc/linux64.S (ffi_call_LINUX64): Tweak restore of r28. + (.note.GNU-stack): Move inside outer #ifdef. + * src/powerpc/linux64_closure.S (STACKFRAME, PARMSAVE, + RETVAL): Define and use throughout. + (ffi_closure_LINUX64): Save fprs before buying stack. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffitarget.h (FFI_TARGET_SPECIFIC_VARIADIC): Define. + (FFI_EXTRA_CIF_FIELDS): Define. + * src/powerpc/ffi.c (ffi_prep_args64): Save fprs as per the + ABI, not to both fpr and param save area. + (ffi_prep_cif_machdep_core): Renamed from ffi_prep_cif_machdep. + Keep initial flags. Formatting. Remove dead FFI_LINUX_SOFT_FLOAT + code. + (ffi_prep_cif_machdep, ffi_prep_cif_machdep_var): New functions. + (ffi_closure_helper_LINUX64): Pass floating point as per ABI, + not to both fpr and parameter save areas. + + * libffi/testsuite/libffi.call/cls_double_va.c (main): Correct + function cast and don't call ffi_prep_cif. + * libffi/testsuite/libffi.call/cls_longdouble_va.c (main): Likewise. + +2013-11-15 Andrew Haley + + * doc/libffi.texi (Closure Example): Fix the sample code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-15 Andrew Haley + + * testsuite/libffi.call/va_struct1.c (main): Fix broken test. + * testsuite/libffi.call/cls_uint_va.c (cls_ret_T_fn): Likewise + * testsuite/libffi.call/cls_struct_va1.c (test_fn): Likewise. + * testsuite/libffi.call/va_1.c (main): Likewise. + +2013-11-14 David Schneider + + * src/arm/ffi.c: Fix register allocation for mixed float and + doubles. + * testsuite/libffi.call/cls_many_mixed_float_double.c: Testcase + for many mixed float and double arguments. + +2013-11-13 Alan Modra + + * doc/libffi.texi (Simple Example): Correct example code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-13 Anthony Green + + * include/ffi_common.h: Respect HAVE_ALLOCA_H for GNU compiler + based build. (Thanks to tmr111116 on github) + +2013-11-09 Anthony Green + + * m4/libtool.m4: Refresh. + * configure, Makefile.in: Rebuilt. + * README: Add more notes about next release. + +2013-11-09 Shigeharu TAKENO + + * m4/ax_gcc_archflag.m4 (ax_gcc_arch): Don't recognize + UltraSPARC-IIi as ultrasparc3. + +2013-11-06 Mark Kettenis + + * src/x86/freebsd.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2013-11-06 Konstantin Belousov + + * src/x86/freebsd.S (ffi_closure_raw_SYSV): Mark the assembler + source as not requiring executable stack. + +2013-11-02 Anthony Green + + * doc/libffi.texi (The Basics): Clarify return value buffer size + requirements. Also, NULL result buffer pointers are no longer + supported. + * doc/libffi.info: Rebuilt. + +2013-11-02 Mischa Jonker + + * Makefile.am (nodist_libffi_la_SOURCES): Fix build error. + * Makefile.in: Rebuilt. + +2013-11-02 David Schneider + + * src/arm/ffi.c: more robust argument handling for closures on arm hardfloat + * testsuite/libffi.call/many_mixed.c: New file. + * testsuite/libffi.call/cls_many_mixed_args.c: More tests. + +2013-11-02 Vitaly Budovski + + * src/x86/ffi.c (ffi_prep_cif_machdep): Don't align stack for win32. + +2013-10-23 Mark H Weaver + + * src/mips/ffi.c: Fix handling of uint32_t arguments on the + MIPS N32 ABI. + +2013-10-13 Sandra Loosemore + + * README: Add Nios II to table of supported platforms. + * Makefile.am (EXTRA_DIST): Add nios2 files. + (nodist_libffi_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + * configure.ac (nios2*-linux*): New host. + (NIOS2): Add AM_CONDITIONAL. + * configure: Regenerated. + * src/nios2/ffi.c: New. + * src/nios2/ffitarget.h: New. + * src/nios2/sysv.S: New. + * src/prep_cif.c (initialize_aggregate): Handle extra structure + alignment via FFI_AGGREGATE_ALIGNMENT. + (ffi_prep_cif_core): Conditionalize structure return for NIOS2. + +2013-10-10 Sandra Loosemore + + * testsuite/libffi.call/cls_many_mixed_args.c (cls_ret_double_fn): + Fix uninitialized variable. + +2013-10-11 Marcus Shawcroft + + * testsuite/libffi.call/many.c (many): Replace * with +. + +2013-10-08 Ondřej Bílka + + * src/aarch64/ffi.c, src/aarch64/sysv.S, src/arm/ffi.c, + src/arm/gentramp.sh, src/bfin/sysv.S, src/closures.c, + src/dlmalloc.c, src/ia64/ffi.c, src/microblaze/ffi.c, + src/microblaze/sysv.S, src/powerpc/darwin_closure.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/sh/ffi.c, + src/tile/tile.S, testsuite/libffi.call/nested_struct11.c: Fix + spelling errors. + +2013-10-08 Anthony Green + + * aclocal.m4, compile, config.guess, config.sub, depcomp, + install-sh, mdate-sh, missing, texinfo.tex: Update from upstream. + * configure.ac: Update version to 3.0.14-rc0. + * Makefile.in, configure, Makefile.in, include/Makefile.in, + man/Makefile.in, testsuite/Makefile.in: Rebuilt. + * README: Mention M88K and VAX. + +2013-07-15 Miod Vallat + + * Makefile.am, + configure.ac, + src/m88k/ffi.c, + src/m88k/ffitarget.h, + src/m88k/obsd.S, + src/vax/elfbsd.S, + src/vax/ffi.c, + src/vax/ffitarget.h: Add m88k and vax support. + +2013-06-24 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Move var declaration + before statements. + (ffi_prep_args64): Support little-endian. + (ffi_closure_helper_SYSV, ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Likewise. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Likewise. + +2013-06-12 Mischa Jonker + + * configure.ac: Add support for ARC. + * Makefile.am: Likewise. + * README: Add ARC details. + * src/arc/arcompact.S: New. + * src/arc/ffi.c: Likewise. + * src/arc/ffitarget.h: Likewise. + +2013-03-28 David Schneider + + * src/arm/ffi.c: Fix support for ARM hard-float calling convention. + * src/arm/sysv.S: call different methods for SYSV and VFP ABIs. + * testsuite/libffi.call/cls_many_mixed_args.c: testcase for a closure with + mixed arguments, many doubles. + * testsuite/libffi.call/many_double.c: testcase for calling a function using + more than 8 doubles. + * testcase/libffi.call/many.c: use absolute value to check result against an + epsilon + +2013-03-17 Anthony Green + + * README: Update for 3.0.13. + * configure.ac: Ditto. + * configure: Rebuilt. + * doc/*: Update version. + +2013-03-17 Dave Korn + + * src/closures.c (is_emutramp_enabled + [!FFI_MMAP_EXEC_EMUTRAMP_PAX]): Move default definition outside + enclosing #if scope. + +2013-03-17 Anthony Green + + * configure.ac: Only modify toolexecdir in certain cases. + * configure: Rebuilt. + +2013-03-16 Gilles Talis + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Don't use + fparg_count,etc on __NO_FPRS__ targets. + +2013-03-16 Alan Hourihane + + * src/m68k/sysv.S (epilogue): Don't use extb instruction on + m680000 machines. + +2013-03-16 Alex Gaynor + + * src/x86/ffi.c (ffi_prep_cif_machdep): Always align stack. + +2013-03-13 Markos Chandras + + * configure.ac: Add support for Imagination Technologies Meta. + * Makefile.am: Likewise. + * README: Add Imagination Technologies Meta details. + * src/metag/ffi.c: New. + * src/metag/ffitarget.h: Likewise. + * src/metag/sysv.S: Likewise. + +2013-02-24 Andreas Schwab + + * doc/libffi.texi (Structures): Fix missing category argument of + @deftp. + +2013-02-11 Anthony Green + + * configure.ac: Update release number to 3.0.12. + * configure: Rebuilt. + * README: Update release info. + +2013-02-10 Anthony Green + + * README: Add Moxie. + * src/moxie/ffi.c: Created. + * src/moxie/eabi.S: Created. + * src/moxie/ffitarget.h: Created. + * Makefile.am (nodist_libffi_la_SOURCES): Add Moxie. + * Makefile.in: Rebuilt. + * configure.ac: Add Moxie. + * configure: Rebuilt. + * testsuite/libffi.call/huge_struct.c: Disable format string + warnings for moxie*-*-elf tests. + +2013-02-10 Anthony Green + + * Makefile.am (LTLDFLAGS): Fix reference. + * Makefile.in: Rebuilt. + +2013-02-10 Anthony Green + + * README: Update supported platforms. Update test results link. + +2013-02-09 Anthony Green + + * testsuite/libffi.call/negint.c: Remove forced -O2. + * testsuite/libffi.call/many2.c (foo): Remove GCCism. + * testsuite/libffi.call/ffitest.h: Add default PRIuPTR definition. + + * src/sparc/v8.S (ffi_closure_v8): Import ancient ulonglong + closure return type fix developed by Martin v. Löwis for cpython + fork. + +2013-02-08 Andreas Tobler + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix small struct + support. + * src/powerpc/sysv.S: Ditto. + +2013-02-08 Anthony Green + + * testsuite/libffi.call/cls_longdouble.c: Remove xfail for + arm*-*-*. + +2013-02-08 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Fix cache flushing for GCC. + +2013-02-08 Matthias Klose + + * man/ffi_prep_cif.3: Clean up for debian linter. + +2013-02-08 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Account for FP args pushed + on the stack. + +2013-02-08 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am (EXTRA_DIST): Ditto. + * Makefile.in: Rebuilt. + +2013-02-08 Anthony Green + + * configure.ac: Move sparc asm config checks to within functions + for compatibility with sun tools. + * configure: Rebuilt. + * src/sparc/ffi.c (ffi_prep_closure_loc): Flush cache on v9 + systems. + * src/sparc/v8.S (ffi_flush_icache): Implement a sparc v9 cache + flusher. + +2013-02-08 Nathan Rossi + + * src/microblaze/ffi.c (ffi_closure_call_SYSV): Fix handling of + small big-endian structures. + (ffi_prep_args): Ditto. + +2013-02-07 Anthony Green + + * src/sparc/v8.S (ffi_call_v8): Fix typo from last patch + (effectively hiding ffi_call_v8). + +2013-02-07 Anthony Green + + * configure.ac: Update bug reporting address. + * configure.in: Rebuild. + + * src/sparc/v8.S (ffi_flush_icache): Out-of-line cache flusher for + Sun compiler. + * src/sparc/ffi.c (ffi_call): Remove warning. + Call ffi_flush_icache for non-GCC builds. + (ffi_prep_closure_loc): Use ffi_flush_icache. + + * Makefile.am (EXTRA_DIST): Add libtool-ldflags. + * Makefile.in: Rebuilt. + * libtool-ldflags: New file. + +2013-02-07 Daniel Schepler + + * configure.ac: Correctly identify x32 systems as 64-bit. + * m4/libtool.m4: Remove libtool expr error. + * aclocal.m4, configure: Rebuilt. + +2013-02-07 Anthony Green + + * configure.ac: Fix GCC usage test. + * configure: Rebuilt. + * README: Mention LLVM/GCC x86_64 issue. + * testsuite/Makefile.in: Rebuilt. + +2013-02-07 Anthony Green + + * testsuite/libffi.call/cls_double_va.c (main): Replace // style + comments with /* */ for xlc compiler. + * testsuite/libffi.call/stret_large.c (main): Ditto. + * testsuite/libffi.call/stret_large2.c (main): Ditto. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/huge_struct.c (main): Ditto. + * testsuite/libffi.call/float_va.c (main): Ditto. + * testsuite/libffi.call/cls_struct_va1.c (main): Ditto. + * testsuite/libffi.call/cls_pointer_stack.c (main): Ditto. + * testsuite/libffi.call/cls_pointer.c (main): Ditto. + * testsuite/libffi.call/cls_longdouble_va.c (main): Ditto. + +2013-02-06 Anthony Green + + * man/ffi_prep_cif.3: Clean up for debian lintian checker. + +2013-02-06 Anthony Green + + * Makefile.am (pkgconfigdir): Add missing pkgconfig install bits. + * Makefile.in: Rebuild. + +2013-02-02 Mark H Weaver + + * src/x86/ffi64.c (ffi_call): Sign-extend integer arguments passed + via general purpose registers. + +2013-01-21 Nathan Rossi + + * README: Add MicroBlaze details. + * Makefile.am: Add MicroBlaze support. + * configure.ac: Likewise. + * src/microblaze/ffi.c: New. + * src/microblaze/ffitarget.h: Likewise. + * src/microblaze/sysv.S: Likewise. + +2013-01-21 Nathan Rossi + * testsuite/libffi.call/return_uc.c: Fixed issue. + +2013-01-21 Chris Zankel + + * README: Add Xtensa support. + * Makefile.am: Likewise. + * configure.ac: Likewise. + * Makefile.in Regenerate. + * configure: Likewise. + * src/prep_cif.c: Handle Xtensa. + * src/xtensa: New directory. + * src/xtensa/ffi.c: New file. + * src/xtensa/ffitarget.h: Ditto. + * src/xtensa/sysv.S: Ditto. + +2013-01-11 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Replace // style + comments with /* */ for xlc compiler. + * src/powerpc/aix.S (ffi_call_AIX): Ditto. + * testsuite/libffi.call/ffitest.h (allocate_mmap): Delete + deprecated inline function. + * testsuite/libffi.special/ffitestcxx.h: Ditto. + * README: Add update for AIX support. + +2013-01-11 Anthony Green + + * configure.ac: Robustify pc relative reloc check. + * m4/ax_cc_maxopt.m4: Don't -malign-double. This is an ABI + changing option for 32-bit x86. + * aclocal.m4, configure: Rebuilt. + * README: Update supported target list. + +2013-01-10 Anthony Green + + * README (tested): Add Compiler column to table. + +2013-01-10 Anthony Green + + * src/x86/ffi64.c (struct register_args): Make sse array and array + of unions for sunpro compiler compatibility. + +2013-01-10 Anthony Green + + * configure.ac: Test target platform size_t size. Handle both 32 + and 64-bit builds for x86_64-* and i?86-* targets (allowing for + CFLAG option to change default settings). + * configure, aclocal.m4: Rebuilt. + +2013-01-10 Anthony Green + + * testsuite/libffi.special/special.exp: Only run exception + handling tests when using GNU compiler. + + * m4/ax_compiler_vendor.m4: New file. + * configure.ac: Test for compiler vendor and don't use + AX_CFLAGS_WARN_ALL with the sun compiler. + * aclocal.m4, configure: Rebuilt. + +2013-01-10 Anthony Green + + * include/ffi_common.h: Don't use GCCisms to define types when + building with the SUNPRO compiler. + +2013-01-10 Anthony Green + + * configure.ac: Put local.exp in the right place. + * configure: Rebuilt. + + * src/x86/ffi.c: Update comment about regparm function attributes. + * src/x86/sysv.S (ffi_closure_SYSV): The SUNPRO compiler requires + that all function arguments be passed on the stack (no regparm + support). + +2013-01-08 Anthony Green + + * configure.ac: Generate local.exp. This sets CC_FOR_TARGET + when we are using the vendor compiler. + * testsuite/Makefile.am (EXTRA_DEJAGNU_SITE_CONFIG): Point to + ../local.exp. + * configure, testsuite/Makefile.in: Rebuilt. + + * testsuite/libffi.call/call.exp: Run tests with different + options, depending on whether or not we are using gcc or the + vendor compiler. + * testsuite/lib/libffi.exp (libffi-init): Set using_gcc based on + whether or not we are building/testing with gcc. + +2013-01-08 Anthony Green + + * configure.ac: Switch x86 solaris target to X86 by default. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * configure.ac: Fix test for read-only eh_frame. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * src/x86/sysv.S, src/x86/unix64.S: Only emit DWARF unwind info + when building with the GNU toolchain. + * testsuite/libffi.call/ffitest.h (CHECK): Fix for Solaris vendor + compiler. + +2013-01-07 Thorsten Glaser + + * testsuite/libffi.call/cls_uchar_va.c, + testsuite/libffi.call/cls_ushort_va.c, + testsuite/libffi.call/va_1.c: Testsuite fixes. + +2013-01-07 Thorsten Glaser + + * src/m68k/ffi.c (CIF_FLAGS_SINT8, CIF_FLAGS_SINT16): Define. + (ffi_prep_cif_machdep): Fix 8-bit and 16-bit signed calls. + * src/m68k/sysv.S (ffi_call_SYSV, ffi_closure_SYSV): Ditto. + +2013-01-04 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't automatically add -fexceptions + and -Wall. This is set in the configure script after testing for + GCC. + * Makefile.in: Rebuilt. + +2013-01-02 rofl0r + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix build error on ppc + when long double == double. + +2013-01-02 Reini Urban + + * Makefile.am (libffi_la_LDFLAGS): Add -no-undefined to LDFLAGS + (required for shared libs on cygwin/mingw). + * Makefile.in: Rebuilt. + +2012-10-31 Alan Modra + + * src/powerpc/linux64_closure.S: Add new ABI support. + * src/powerpc/linux64.S: Likewise. + +2012-10-30 Magnus Granberg + Pavel Labushev + + * configure.ac: New options pax_emutramp + * configure, fficonfig.h.in: Regenerated + * src/closures.c: New function emutramp_enabled_check() and + checks. + +2012-10-30 Frederick Cheung + + * configure.ac: Enable FFI_MAP_EXEC_WRIT for Darwin 12 (mountain + lion) and future version. + * configure: Rebuild. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * README: Add details of aarch64 port. + * src/aarch64/ffi.c: New. + * src/aarch64/ffitarget.h: Likewise. + * src/aarch64/sysv.S: Likewise. + * Makefile.am: Support aarch64. + * configure.ac: Support aarch64. + * Makefile.in, configure: Rebuilt. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * testsuite/lib/libffi.exp: Add support for aarch64. + * testsuite/libffi.call/cls_struct_va1.c: New. + * testsuite/libffi.call/cls_uchar_va.c: Likewise. + * testsuite/libffi.call/cls_uint_va.c: Likewise. + * testsuite/libffi.call/cls_ulong_va.c: Likewise. + * testsuite/libffi.call/cls_ushort_va.c: Likewise. + * testsuite/libffi.call/nested_struct11.c: Likewise. + * testsuite/libffi.call/uninitialized.c: Likewise. + * testsuite/libffi.call/va_1.c: Likewise. + * testsuite/libffi.call/va_struct1.c: Likewise. + * testsuite/libffi.call/va_struct2.c: Likewise. + * testsuite/libffi.call/va_struct3.c: Likewise. + +2012-10-12 Walter Lee + + * Makefile.am: Add TILE-Gx/TILEPro support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + * src/prep_cif.c (ffi_prep_cif_core): Handle TILE-Gx/TILEPro. + * src/tile: New directory. + * src/tile/ffi.c: New file. + * src/tile/ffitarget.h: Ditto. + * src/tile/tile.S: Ditto. + +2012-10-12 Matthias Klose + + * generate-osx-source-and-headers.py: Normalize whitespace. + +2012-09-14 David Edelsohn + + * configure: Regenerated. + +2012-08-26 Andrew Pinski + + PR libffi/53014 + * src/mips/ffi.c (ffi_prep_closure_loc): Allow n32 with soft-float and n64 with + soft-float. + +2012-08-08 Uros Bizjak + + * src/s390/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-07-18 H.J. Lu + + PR libffi/53982 + PR libffi/53973 + * src/x86/ffitarget.h: Check __ILP32__ instead of __LP64__ for x32. + (FFI_SIZEOF_JAVA_RAW): Defined to 4 for x32. + +2012-05-16 H.J. Lu + + * configure: Regenerated. + +2012-05-05 Nicolas Lelong + + * libffi.xcodeproj/project.pbxproj: Fixes. + * README: Update for iOS builds. + +2012-04-23 Alexandre Keunecke I. de Mendonca + + * configure.ac: Add Blackfin/sysv support + * Makefile.am: Add Blackfin/sysv support + * src/bfin/ffi.c: Add Blackfin/sysv support + * src/bfin/ffitarget.h: Add Blackfin/sysv support + +2012-04-11 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new script. + * Makefile.in: Rebuilt. + +2012-04-11 Zachary Waldowski + + * generate-ios-source-and-headers.py, + libffi.xcodeproj/project.pbxproj: Support a Mac static library via + Xcode. Set iOS compatibility to 4.0. Move iOS trampoline + generation into an Xcode "run script" phase. Include both as + Xcode build scripts. Don't always regenerate config files. + +2012-04-10 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Add missing semicolon. + +2012-04-06 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new iOS/xcode files. + * Makefile.in: Rebuilt. + +2012-04-06 Mike Lewis + + * generate-ios-source-and-headers.py: New file. + * libffi.xcodeproj/project.pbxproj: New file. + * README: Update instructions on building iOS binary. + * build-ios.sh: Delete. + +2012-04-06 Anthony Green + + * src/x86/ffi64.c (UINT128): Define differently for Intel and GNU + compilers, then use it. + +2012-04-06 H.J. Lu + + * m4/libtool.m4 (_LT_ENABLE_LOCK): Support x32. + +2012-04-06 Anthony Green + + * testsuite/Makefile.am (EXTRA_DIST): Add missing test cases. + * testsuite/Makefile.in: Rebuilt. + +2012-04-05 Zachary Waldowski + + * include/ffi.h.in: Add missing trampoline table fields. + * src/arm/sysv.S: Fix ENTRY definition, and wrap symbol references + in CNAME. + * src/x86/ffi.c: Wrap Windows specific code in ifdefs. + +2012-04-02 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Declare double_tmp. + Silence casting pointer to integer of different size warning. + Delete goto to previously deleted label. + (ffi_call): Silence possibly undefined warning. + (ffi_closure_helper_SYSV): Declare variable type. + +2012-04-02 Peter Rosin + + * src/x86/win32.S (ffi_call_win32): Sign/zero extend the return + value in the Intel version as is already done for the AT&T version. + (ffi_closure_SYSV): Likewise. + (ffi_closure_raw_SYSV): Likewise. + (ffi_closure_STDCALL): Likewise. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_THISCALL): Unify the frame + generation, fix the ENDP label and remove the surplus third arg + from the 'lea' insn. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_SYSV): Make the 'stubraw' label + visible outside the PROC, so that ffi_closure_raw_THISCALL can see + it. Also instruct the assembler to add a frame to the function. + +2012-03-23 Peter Rosin + + * Makefile.am (AM_CPPFLAGS): Add -DFFI_BUILDING. + * Makefile.in: Rebuilt. + * include/ffi.h.in [MSVC]: Add __declspec(dllimport) decorations + to all data exports, when compiling libffi clients using MSVC. + +2012-03-29 Peter Rosin + + * src/x86/ffitarget.h (ffi_abi): Add new ABI FFI_MS_CDECL and + make it the default for MSVC. + (FFI_TYPE_MS_STRUCT): New structure return convention. + * src/x86/ffi.c (ffi_prep_cif_machdep): Tweak the structure + return convention for FFI_MS_CDECL to be FFI_TYPE_MS_STRUCT + instead of an ordinary FFI_TYPE_STRUCT. + (ffi_prep_args): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_call): Likewise. + (ffi_prep_incoming_args_SYSV): Likewise. + (ffi_raw_call): Likewise. + (ffi_prep_closure_loc): Treat FFI_MS_CDECL as FFI_SYSV. + * src/x86/win32.S (ffi_closure_SYSV): For FFI_TYPE_MS_STRUCT, + return a pointer to the result structure in eax and don't pop + that pointer from the stack, the caller takes care of it. + (ffi_call_win32): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_closure_raw_SYSV): Likewise. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/closure_stdcall.c [MSVC]: Add inline + assembly version with Intel syntax. + * testsuite/libffi.call/closure_thiscall.c [MSVC]: Likewise. + +2012-03-23 Peter Rosin + + * testsuite/libffi.call/ffitest.h: Provide abstration of + __attribute__((fastcall)) in the form of a __FASTCALL__ + define. Define it to __fastcall for MSVC. + * testsuite/libffi.call/fastthis1_win32.c: Use the above. + * testsuite/libffi.call/fastthis2_win32.c: Likewise. + * testsuite/libffi.call/fastthis3_win32.c: Likewise. + * testsuite/libffi.call/strlen2_win32.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + +2012-03-22 Peter Rosin + + * src/x86/win32.S [MSVC] (ffi_closure_THISCALL): Remove the manual + frame on function entry, MASM adds one automatically. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/ffitest.h [MSVC]: Add kludge for missing + bits in the MSVC headers. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/cls_12byte.c: Adjust to the C89 style + with no declarations after statements. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5_1_byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_dbls_struct.c: Likewise. + * testsuite/libffi.call/cls_pointer_stack.c: Likewise. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct10.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.call/testclosure.c: Likewise. + +2012-03-21 Peter Rosin + + * testsuite/libffi.call/float_va.c (float_va_fn): Use %f when + printing doubles (%lf is for long doubles). + (main): Likewise. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-19 Alan Hourihane + + * src/m68k/ffi.c: Add MINT support. + * src/m68k/sysv.S: Ditto. + +2012-03-06 Chung-Lin Tang + + * src/arm/ffi.c (ffi_call): Add __ARM_EABI__ guard around call to + ffi_call_VFP(). + (ffi_prep_closure_loc): Add __ARM_EABI__ guard around use of + ffi_closure_VFP. + * src/arm/sysv.S: Add __ARM_EABI__ guard around VFP code. + +2012-03-19 chennam + + * src/powerpc/ffi_darwin.c (ffi_prep_closure_loc): Fix AIX closure + support. + +2012-03-13 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/sh64/ffi.c (ffi_prep_closure_loc): Ditto. + +2012-03-09 David Edelsohn + + * src/powerpc/aix_closure.S (ffi_closure_ASM): Adjust for Darwin64 + change to return value of ffi_closure_helper_DARWIN and load type + from return type. + +2012-03-03 H.J. Lu + + * src/x86/ffi64.c (ffi_call): Cast the return value to unsigned + long. + (ffi_prep_closure_loc): Cast to 64bit address in trampoline. + (ffi_closure_unix64_inner): Cast return pointer to unsigned long + first. + + * src/x86/ffitarget.h (FFI_SIZEOF_ARG): Defined to 8 for x32. + (ffi_arg): Set to unsigned long long for x32. + (ffi_sarg): Set to long long for x32. + +2012-03-03 H.J. Lu + + * src/prep_cif.c (ffi_prep_cif_core): Properly check bad ABI. + +2012-03-03 Andoni Morales Alastruey + + * configure.ac: Add -no-undefined for both 32- and 64-bit x86 + windows-like hosts. + * configure: Rebuilt. + +2012-02-27 Mikael Pettersson + + PR libffi/52223 + * Makefile.am (FLAGS_TO_PASS): Define. + * Makefile.in: Regenerate. + +2012-02-23 Anthony Green + + * src/*/ffitarget.h: Ensure that users never include ffitarget.h + directly. + +2012-02-23 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_closure_raw_THISCALL): New + prototype. + (ffi_prep_raw_closure_loc): Use ffi_closure_raw_THISCALL for + thiscall-convention. + (ffi_raw_call): Use ffi_prep_args_raw. + * src/x86/win32.S (ffi_closure_raw_THISCALL): Add + implementation for stub. + +2012-02-10 Kai Tietz + + * configure.ac (AM_LTLDFLAGS): Add -no-undefine for x64 + windows target. + * configure: Regenerated. + +2012-02-08 Kai Tietz + + * src/prep_cif.c (ffi_prep_cif): Allow for X86_WIN32 + also FFI_THISCALL. + * src/x86/ffi.c (ffi_closure_THISCALL): Add prototype. + (FFI_INIT_TRAMPOLINE_THISCALL): New trampoline code. + (ffi_prep_closure_loc): Add FFI_THISCALL support. + * src/x86/ffitarget.h (FFI_TRAMPOLINE_SIZE): Adjust size. + * src/x86/win32.S (ffi_closure_THISCALL): New closure code + for thiscall-calling convention. + * testsuite/libffi.call/closure_thiscall.c: New test. + +2012-01-28 Kai Tietz + + * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new + argument to prototype for specify calling-convention. + (ffi_call): Add support for stdcall/thiscall convention. + (ffi_prep_args): Likewise. + (ffi_raw_call): Likewise. + * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and + FFI_FASTCALL. + * src/x86/win32.S (_ffi_call_win32): Add support for + fastcall/thiscall calling-convention calls. + * testsuite/libffi.call/fastthis1_win32.c: New test. + * testsuite/libffi.call/fastthis2_win32.c: New test. + * testsuite/libffi.call/fastthis3_win32.c: New test. + * testsuite/libffi.call/strlen2_win32.c: New test. + * testsuite/libffi.call/many2_win32.c: New test. + * testsuite/libffi.call/struct1_win32.c: New test. + * testsuite/libffi.call/struct2_win32.c: New test. + +2012-01-23 Uros Bizjak + + * src/alpha/ffi.c (ffi_prep_closure_loc): Check for bad ABI. + +2012-01-23 Anthony Green + Chris Young + + * configure.ac: Add Amiga support. + * configure: Rebuilt. + +2012-01-23 Dmitry Nadezhin + + * include/ffi_common.h (LIKELY, UNLIKELY): Fix definitions. + +2012-01-23 Andreas Schwab + + * src/m68k/sysv.S (ffi_call_SYSV): Properly test for plain + mc68000. Test for __HAVE_68881__ in addition to __MC68881__. + +2012-01-19 Jakub Jelinek + + PR rtl-optimization/48496 + * src/ia64/ffi.c (ffi_call): Fix up aliasing violations. + +2012-01-09 Rainer Orth + + * configure.ac (i?86-*-*): Set TARGET to X86_64. + * configure: Regenerate. + +2011-12-07 Andrew Pinski + + PR libffi/50051 + * src/mips/n32.S: Add ".set mips4". + +2011-11-21 Andreas Tobler + + * configure: Regenerate. + +2011-11-12 David Gilbert + + * doc/libffi.texi, include/ffi.h.in, include/ffi_common.h, + man/Makefile.am, man/ffi.3, man/ffi_prep_cif.3, + man/ffi_prep_cif_var.3, src/arm/ffi.c, src/arm/ffitarget.h, + src/cris/ffi.c, src/prep_cif.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/float_va.c: Many changes to support variadic + function calls. + +2011-11-12 Kyle Moffett + + * src/powerpc/ffi.c, src/powerpc/ffitarget.h, + src/powerpc/ppc_closure.S, src/powerpc/sysv.S: Many changes for + softfloat powerpc variants. + +2011-11-12 Petr Salinger + + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Fix kfreebsd support. + * configure: Rebuilt. + +2011-11-12 Timothy Wall + + * src/arm/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): Max + alignment of 4 for wince on ARM. + +2011-11-12 Kyle Moffett + Anthony Green + + * src/ppc/sysv.S, src/ppc/ffi.c: Remove use of ppc string + instructions (not available on some cores, like the PPC440). + +2011-11-12 Kimura Wataru + + * m4/ax_enable_builddir: Change from string comparison to numeric + comparison for wc output. + * configure.ac: Enable FFI_MMAP_EXEC_WRIT for darwin11 aka Mac OS + X 10.7. + * configure: Rebuilt. + +2011-11-12 Anthony Green + + * Makefile.am (AM_CCASFLAGS): Add -g option to build assembly + files with debug info. + * Makefile.in: Rebuilt. + +2011-11-12 Jasper Lievisse Adriaanse + + * README: Update list of supported OpenBSD systems. + +2011-11-12 Anthony Green + + * libtool-version: Update. + * Makefile.am (nodist_libffi_la_SOURCES): Add src/debug.c if + FFI_DEBUG. + (libffi_la_SOURCES): Remove src/debug.c + (EXTRA_DIST): Add src/debug.c + * Makefile.in: Rebuilt. + * README: Update for 3.0.11. + +2011-11-10 Richard Henderson + + * configure.ac (GCC_AS_CFI_PSEUDO_OP): Use it instead of inline check. + * configure, aclocal.m4: Rebuild. + +2011-09-04 Iain Sandoe + + PR libffi/49594 + * src/powerpc/darwin_closure.S (stubs): Make the stub binding + helper reference track the architecture pointer size. + +2011-08-25 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Remove hard-coded assembly + instructions. + * src/arm/sysv.S (ffi_arm_trampoline): Put them here instead. + +2011-07-11 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Clear icache. + +2011-06-29 Rainer Orth + + * testsuite/libffi.call/cls_double_va.c: Move PR number to comment. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-29 Rainer Orth + + PR libffi/46660 + * testsuite/libffi.call/cls_double_va.c: xfail dg-output on + mips-sgi-irix6*. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-14 Rainer Orth + + * testsuite/libffi.call/huge_struct.c (test_large_fn): Use PRIu8, + PRId8 instead of %hhu, %hhd. + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRId8, + PRIu8): Define. + [__sgi__] (PRId8, PRIu8): Define. + +2011-04-29 Rainer Orth + + * src/alpha/osf.S (UA_SI, FDE_ENCODING, FDE_ENCODE, FDE_ARANGE): + Define. + Use them to handle ELF vs. ECOFF differences. + [__osf__] (_GLOBAL__F_ffi_call_osf): Define. + +2011-03-30 Timothy Wall + + * src/powerpc/darwin.S: Fix unknown FDE encoding. + * src/powerpc/darwin_closure.S: ditto. + +2011-02-25 Anthony Green + + * src/powerpc/ffi.c (ffi_prep_closure_loc): Allow for more + 32-bit ABIs. + +2011-02-15 Anthony Green + + * m4/ax_cc_maxopt.m4: Don't -malign-double or use -ffast-math. + * configure: Rebuilt. + +2011-02-13 Ralf Wildenhues + + * configure: Regenerate. + +2011-02-13 Anthony Green + + * include/ffi_common.h (UNLIKELY, LIKELY): Define. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Remove definition. + * src/prep_cif.c (UNLIKELY, LIKELY): Remove definition. + + * src/prep_cif.c (initialize_aggregate): Convert assertion into + FFI_BAD_TYPEDEF return. Initialize arg size and alignment to 0. + + * src/pa/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/arm/ffi.c (ffi_prep_closure_loc): Ditto. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Ditto. + * src/mips/ffi.c (ffi_prep_closure_loc): Ditto. + * src/ia64/ffi.c (ffi_prep_closure_loc): Ditto. + * src/avr32/ffi.c (ffi_prep_closure_loc): Ditto. + +2011-02-11 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-02-11 Eric Botcazou + + * src/sparc/v9.S (STACKFRAME): Bump to 176. + +2011-02-09 Stuart Shelton + + http://bugs.gentoo.org/show_bug.cgi?id=286911 + * src/mips/ffitarget.h: Clean up error messages. + * src/java_raw_api.c (ffi_java_translate_args): Cast raw arg to + ffi_raw*. + * include/ffi.h.in: Add pragma for SGI compiler. + +2011-02-09 Anthony Green + + * configure.ac: Add powerpc64-*-darwin* support. + +2011-02-09 Anthony Green + + * README: Mention Interix. + +2011-02-09 Jonathan Callen + + * configure.ac: Add Interix to win32/cygwin/mingw case. + * configure: Ditto. + * src/closures.c: Treat Interix like Cygwin, instead of as a + generic win32. + +2011-02-09 Anthony Green + + * testsuite/libffi.call/err_bad_typedef.c: Remove xfail. + * testsuite/libffi.call/err_bad_abi.c: Remove xfail. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Define. + (ffi_prep_closure_loc): Check for bad ABI. + * src/prep_cif.c (UNLIKELY, LIKELY): Define. + (initialize_aggregate): Check for bad types. + +2011-02-09 Landon Fuller + + * Makefile.am (EXTRA_DIST): Add build-ios.sh, src/arm/gentramp.sh, + src/arm/trampoline.S. + (nodist_libffi_la_SOURCES): Add src/arc/trampoline.S. + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Define. + * src/arm/ffi.c (ffi_trampoline_table) + (ffi_closure_trampoline_table_page, ffi_trampoline_table_entry) + (FFI_TRAMPOLINE_CODELOC_CONFIG, FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) + (FFI_TRAMPOLINE_COUNT, ffi_trampoline_lock, ffi_trampoline_tables) + (ffi_trampoline_table_alloc, ffi_closure_alloc, ffi_closure_free): + Define for FFI_EXEC_TRAMPOLINE_TABLE case (iOS). + (ffi_prep_closure_loc): Handl FFI_EXEC_TRAMPOLINE_TABLE case + separately. + * src/arm/sysv.S: Handle Apple iOS host. + * src/closures.c: Handle FFI_EXEC_TRAMPOLINE_TABLE case. + * build-ios.sh: New file. + * fficonfig.h.in, configure, Makefile.in: Rebuilt. + * README: Mention ARM iOS. + +2011-02-08 Oren Held + + * src/dlmalloc.c (_STRUCT_MALLINFO): Define in order to avoid + redefinition of mallinfo on HP-UX. + +2011-02-08 Ginn Chen + + * src/sparc/ffi.c (ffi_call): Make compatible with Solaris Studio + aggregate return ABI. Flush cache. + (ffi_prep_closure_loc): Flush cache. + +2011-02-11 Anthony Green + + From Tom Honermann : + * src/powerpc/aix.S (ffi_call_AIX): Support for xlc toolchain on + AIX. Declare .ffi_prep_args. Insert nops after branch + instructions so that the AIX linker can insert TOC reload + instructions. + * src/powerpc/aix_closure.S: Declare .ffi_closure_helper_DARWIN. + +2011-02-08 Ed + + * src/powerpc/asm.h: Fix grammar nit in comment. + +2011-02-08 Uli Link + + * include/ffi.h.in (FFI_64_BIT_MAX): Define and use. + +2011-02-09 Rainer Orth + + PR libffi/46661 + * testsuite/libffi.call/cls_pointer.c (main): Cast void * to + uintptr_t first. + * testsuite/libffi.call/cls_pointer_stack.c (main): Likewise. + +2011-02-08 Rafael Avila de Espindola + + * configure.ac: Fix x86 test for pc related relocs. + * configure: Rebuilt. + +2011-02-07 Joel Sherrill + + * libffi/src/m68k/ffi.c: Add RTEMS support for cache flushing. + Handle case when CPU variant does not have long double support. + * libffi/src/m68k/sysv.S: Add support for mc68000, Coldfire, + and cores with soft floating point. + +2011-02-07 Joel Sherrill + + * configure.ac: Add mips*-*-rtems* support. + * configure: Regenerate. + * src/mips/ffitarget.h: Ensure needed constants are available + for targets which do not have sgidefs.h. + +2011-01-26 Dave Korn + + PR target/40125 + * configure.ac (AM_LTLDFLAGS): Add -bindir option for windows DLLs. + * configure: Regenerate. + +2010-12-18 Iain Sandoe + + PR libffi/29152 + PR libffi/42378 + * src/powerpc/darwin_closure.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffitarget.h (POWERPC_DARWIN64): New, + (FFI_TRAMPOLINE_SIZE): Update for Darwin64. + * src/powerpc/darwin.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffi_darwin.c: Likewise. + +2010-12-06 Rainer Orth + + * configure.ac (libffi_cv_as_ascii_pseudo_op): Use double + backslashes. + (libffi_cv_as_string_pseudo_op): Likewise. + * configure: Regenerate. + +2010-12-03 Chung-Lin Tang + + * src/arm/sysv.S (ffi_closure_SYSV): Add UNWIND to .pad directive. + (ffi_closure_VFP): Same. + (ffi_call_VFP): Move down to before ffi_closure_VFP. Add '.fpu vfp' + directive. + +2010-12-01 Rainer Orth + + * testsuite/libffi.call/ffitest.h [__sgi] (PRId64, PRIu64): Define. + (PRIuPTR): Define. + +2010-11-29 Richard Henderson + Rainer Orth + + * src/x86/sysv.S (FDE_ENCODING, FDE_ENCODE): Define. + (.eh_frame): Use FDE_ENCODING. + (.LASFDE1, .LASFDE2, LASFDE3): Simplify with FDE_ENCODE. + +2010-11-22 Jacek Caban + + * configure.ac: Check for symbol underscores on mingw-w64. + * configure: Rebuilt. + * src/x86/win64.S: Correctly access extern symbols in respect to + underscores. + +2010-11-15 Rainer Orth + + * testsuite/lib/libffi-dg.exp: Rename ... + * testsuite/lib/libffi.exp: ... to this. + * libffi/testsuite/libffi.call/call.exp: Don't load libffi-dg.exp. + * libffi/testsuite/libffi.special/special.exp: Likewise. + +2010-10-28 Chung-Lin Tang + + * src/arm/ffi.c (ffi_prep_args): Add VFP register argument handling + code, new parameter, and return value. Update comments. + (ffi_prep_cif_machdep): Add case for VFP struct return values. Add + call to layout_vfp_args(). + (ffi_call_SYSV): Update declaration. + (ffi_call_VFP): New declaration. + (ffi_call): Add VFP struct return conditions. Call ffi_call_VFP() + when ABI is FFI_VFP. + (ffi_closure_VFP): New declaration. + (ffi_closure_SYSV_inner): Add new vfp_args parameter, update call to + ffi_prep_incoming_args_SYSV(). + (ffi_prep_incoming_args_SYSV): Update parameters. Add VFP argument + case handling. + (ffi_prep_closure_loc): Pass ffi_closure_VFP to trampoline + construction under VFP hard-float. + (rec_vfp_type_p): New function. + (vfp_type_p): Same. + (place_vfp_arg): Same. + (layout_vfp_args): Same. + * src/arm/ffitarget.h (ffi_abi): Add FFI_VFP. Define FFI_DEFAULT_ABI + based on __ARM_PCS_VFP. + (FFI_EXTRA_CIF_FIELDS): Define for adding VFP hard-float specific + fields. + (FFI_TYPE_STRUCT_VFP_FLOAT): Define internally used type code. + (FFI_TYPE_STRUCT_VFP_DOUBLE): Same. + * src/arm/sysv.S (ffi_call_SYSV): Change call of ffi_prep_args() to + direct call. Move function pointer load upwards. + (ffi_call_VFP): New function. + (ffi_closure_VFP): Same. + + * testsuite/lib/libffi-dg.exp (check-flags): New function. + (dg-skip-if): New function. + * testsuite/libffi.call/cls_double_va.c: Skip if target is arm*-*-* + and compiler options include -mfloat-abi=hard. + * testsuite/libffi.call/cls_longdouble_va.c: Same. + +2010-10-01 Jakub Jelinek + + PR libffi/45677 + * src/x86/ffi64.c (ffi_prep_cif_machdep): Ensure cif->bytes is + a multiple of 8. + * testsuite/libffi.call/many2.c: New test. + +2010-08-20 Mark Wielaard + + * src/closures.c (open_temp_exec_file_mnt): Check if getmntent_r + returns NULL. + +2010-08-09 Andreas Tobler + + * configure.ac: Add target powerpc64-*-freebsd*. + * configure: Regenerate. + * testsuite/libffi.call/cls_align_longdouble_split.c: Pass + -mlong-double-128 only to linux targets. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_longdouble.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + +2010-08-05 Dan Witte + + * Makefile.am: Pass FFI_DEBUG define to msvcc.sh for linking to the + debug CRT when --enable-debug is given. + * configure.ac: Define it. + * msvcc.sh: Translate -g and -DFFI_DEBUG appropriately. + +2010-08-04 Dan Witte + + * src/x86/ffitarget.h: Add X86_ANY define for all x86/x86_64 + platforms. + * src/x86/ffi.c: Remove redundant ifdef checks. + * src/prep_cif.c: Push stack space computation into src/x86/ffi.c + for X86_ANY so return value space doesn't get added twice. + +2010-08-03 Neil Rashbrooke + + * msvcc.sh: Don't pass -safeseh to ml64 because behavior is buggy. + +2010-07-22 Dan Witte + + * src/*/ffitarget.h: Make FFI_LAST_ABI one past the last valid ABI. + * src/prep_cif.c: Fix ABI assertion. + * src/cris/ffi.c: Ditto. + +2010-07-10 Evan Phoenix + + * src/closures.c (selinux_enabled_check): Fix strncmp usage bug. + +2010-07-07 Dan Horák + + * include/ffi.h.in: Protect #define with #ifndef. + * src/powerpc/ffitarget.h: Ditto. + * src/s390/ffitarget.h: Ditto. + * src/sparc/ffitarget.h: Ditto. + +2010-07-07 Neil Roberts + + * src/x86/sysv.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2010-07-02 Jakub Jelinek + + * Makefile.am (AM_MAKEFLAGS): Pass also mandir to submakes. + * Makefile.in: Regenerated. + +2010-05-19 Rainer Orth + + * configure.ac (libffi_cv_as_x86_pcrel): Check for illegal in as + output, too. + (libffi_cv_as_ascii_pseudo_op): Check for .ascii. + (libffi_cv_as_string_pseudo_op): Check for .string. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S (.eh_frame): Use .ascii, .string or error. + +2010-05-11 Dan Witte + + * doc/libffi.tex: Document previous change. + +2010-05-11 Makoto Kato + + * src/x86/ffi.c (ffi_call): Don't copy structs passed by value. + +2010-05-05 Michael Kohler + + * src/dlmalloc.c (dlfree): Fix spelling. + * src/ia64/ffi.c (ffi_prep_cif_machdep): Ditto. + * configure.ac: Ditto. + * configure: Rebuilt. + +2010-04-13 Dan Witte + + * msvcc.sh: Build with -W3 instead of -Wall. + * src/powerpc/ffi_darwin.c: Remove build warnings. + * src/x86/ffi.c: Ditto. + * src/x86/ffitarget.h: Ditto. + +2010-04-12 Dan Witte + Walter Meinl + + * configure.ac: Add OS/2 support. + * configure: Rebuilt. + * src/closures.c: Ditto. + * src/dlmalloc.c: Ditto. + * src/x86/win32.S: Ditto. + +2010-04-07 Jakub Jelinek + + * testsuite/libffi.call/err_bad_abi.c: Remove unused args variable. + +2010-04-02 Ralf Wildenhues + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2010-03-30 Dan Witte + + * msvcc.sh: Disable build warnings. + * README (tested): Clarify windows build procedure. + +2010-03-15 Rainer Orth + + * configure.ac (libffi_cv_as_x86_64_unwind_section_type): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * libffi/src/x86/unix64.S (.eh_frame) + [HAVE_AS_X86_64_UNWIND_SECTION_TYPE]: Use @unwind section type. + +2010-03-14 Matthias Klose + + * src/x86/ffi64.c: Fix typo in comment. + * src/x86/ffi.c: Use /* ... */ comment style. + +2010-02-24 Rainer Orth + + * doc/libffi.texi (The Closure API): Fix typo. + * doc/libffi.info: Remove. + +2010-02-15 Matthias Klose + + * src/arm/sysv.S (__ARM_ARCH__): Define for processor + __ARM_ARCH_7EM__. + +2010-01-15 Anthony Green + + * README: Add notes on building with Microsoft Visual C++. + +2010-01-15 Daniel Witte + + * msvcc.sh: New file. + + * src/x86/win32.S: Port assembly routines to MSVC and #ifdef. + * src/x86/ffi.c: Tweak function declaration and remove excess + parens. + * include/ffi.h.in: Add __declspec(align(8)) to typedef struct + ffi_closure. + + * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new + function ffi_call_win32 on X86_WIN32. + * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32. + (ffi_call_STDCALL): Remove. + + * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code + to ffi_prep_cif_machdep for x86. + * src/x86/ffi.c (ffi_prep_cif_machdep): To here. + +2010-01-15 Oliver Kiddle + + * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for + Sun Studio compiler compatibility. + +2010-01-12 Conrad Irwin + + * doc/libffi.texi: Add closure example. + +2010-01-07 Rainer Orth + + PR libffi/40701 + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRIdLL, + PRIuLL, PRId64, PRIu64, PRIuPTR): Define. + * testsuite/libffi.call/cls_align_sint64.c: Add -Wno-format on + alpha*-dec-osf*. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/return_ll1.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.special/ffitestcxx.h (allocate_mmap): Cast + MAP_FAILED to char *. + +2010-01-06 Rainer Orth + + * src/mips/n32.S: Use .abicalls and .eh_frame with __GNUC__. + +2009-12-31 Anthony Green + + * README: Update for libffi 3.0.9. + +2009-12-27 Matthias Klose + + * configure.ac (HAVE_LONG_DOUBLE): Define for mips when + appropriate. + * configure: Rebuilt. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_longdouble_va.c: Mark as xfail for + avr32*-*-*. + * testsuite/libffi.call/cls_double_va.c: Ditto. + +2009-12-26 Andreas Tobler + + * testsuite/libffi.call/ffitest.h: Conditionally include stdint.h + and inttypes.h. + * testsuite/libffi.special/unwindtest.cc: Ditto. + +2009-12-26 Andreas Tobler + + * configure.ac: Add amd64-*-openbsd*. + * configure: Rebuilt. + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Link + openbsd programs with -lpthread. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: Remove xfail for + mips*-*-* and arm*-*-*. + * testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c: Remove xfail for arm*-*-*. + +2009-12-31 Kay Tietz + + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRuLL): Fix + definitions. + +2009-12-31 Carlo Bramini + + * configure.ac (AM_LTLDFLAGS): Define for windows hosts. + * Makefile.am (libffi_la_LDFLAGS): Add AM_LTLDFLAGS. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + +2009-12-31 Anthony Green + Blake Chaffin. + + * testsuite/libffi.call/huge_struct.c: New test case from Blake + Chaffin @ Apple. + +2009-12-28 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Copy abi and nargs to + local variables. + (aix_adjust_aggregate_sizes): New function. + (ffi_prep_cif_machdep): Call it. + +2009-12-26 Andreas Tobler + + * configure.ac: Define FFI_MMAP_EXEC_WRIT for the given targets. + * configure: Regenerate. + * fficonfig.h.in: Likewise. + * src/closures.c: Remove the FFI_MMAP_EXEC_WRIT definition for + Solaris/x86. + +2009-12-26 Andreas Schwab + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Advance intarg_count + when a float arguments is passed in memory. + (ffi_closure_helper_SYSV): Mark general registers as used up when + a 64bit or soft-float long double argument is passed in memory. + +2009-12-25 Matthias Klose + + * man/ffi_call.3: Fix #include in examples. + * doc/libffi.texi: Add dircategory. + +2009-12-25 Frank Everdij + + * include/ffi.h.in: Placed '__GNUC__' ifdef around + '__attribute__((aligned(8)))' in ffi_closure, fixes compile for + IRIX MIPSPro c99. + * include/ffi_common.h: Added '__sgi' define to non + '__attribute__((__mode__()))' integer typedefs. + * src/mips/ffi.c (ffi_call, ffi_closure_mips_inner_O32, + ffi_closure_mips_inner_N32): Added 'defined(_MIPSEB)' to BE check. + (ffi_closure_mips_inner_O32, ffi_closure_mips_inner_N32): Added + FFI_LONGDOUBLE support and alignment(N32 only). + * src/mips/ffitarget.h: Corrected '#include ' for IRIX and + fixed non '__attribute__((__mode__()))' integer typedefs. + * src/mips/n32.S: Put '#ifdef linux' around '.abicalls' and '.eh_frame' + since they are Linux/GNU Assembler specific. + +2009-12-25 Bradley Smith + + * configure.ac, Makefile.am, src/avr32/ffi.c, + src/avr32/ffitarget.h, + src/avr32/sysv.S: Add AVR32 port. + * configure, Makefile.in: Rebuilt. + +2009-12-21 Andreas Tobler + + * configure.ac: Make i?86 build on FreeBSD and OpenBSD. + * configure: Regenerate. + +2009-12-15 John David Anglin + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on PA HP-UX. + +2009-12-13 John David Anglin + + * src/pa/ffi.c (ffi_closure_inner_pa32): Handle FFI_TYPE_LONGDOUBLE + type on HP-UX. + +2012-02-13 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_prep_raw_closure_loc): Add thiscall + support for X86_WIN32. + (FFI_INIT_TRAMPOLINE_THISCALL): Fix displacement. + +2009-12-11 Eric Botcazou + + * src/sparc/ffi.c (ffi_closure_sparc_inner_v9): Properly align 'long + double' arguments. + +2009-12-11 Eric Botcazou + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on Solaris < 10. + +2009-12-10 Rainer Orth + + PR libffi/40700 + * src/closures.c [X86_64 && __sun__ && __svr4__] + (FFI_MMAP_EXEC_WRIT): Define. + +2009-12-08 David Daney + + * testsuite/libffi.call/stret_medium.c: Remove xfail for mips*-*-* + * testsuite/libffi.call/cls_align_longdouble_split2.c: Same. + * testsuite/libffi.call/stret_large.c: Same. + * testsuite/libffi.call/cls_align_longdouble_split.c: Same. + * testsuite/libffi.call/stret_large2.c: Same. + * testsuite/libffi.call/stret_medium2.c: Same. + +2009-12-07 David Edelsohn + + * src/powerpc/aix_closure.S (libffi_closure_ASM): Fix tablejump + typo. + +2009-12-05 David Edelsohn + + * src/powerpc/aix.S: Update AIX32 code to be consistent with AIX64 + code. + * src/powerpc/aix_closure.S: Same. + +2009-12-05 Ralf Wildenhues + + * Makefile.in: Regenerate. + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2009-12-04 David Edelsohn + + * src/powerpc/aix_closure.S: Reorganize 64-bit code to match + linux64_closure.S. + +2009-12-04 Uros Bizjak + + PR libffi/41908 + * src/x86/ffi64.c (classify_argument): Update from + gcc/config/i386/i386.c. + (ffi_closure_unix64_inner): Do not use the address of two consecutive + SSE registers directly. + * testsuite/libffi.call/cls_dbls_struct.c (main): Remove xfail + for x86_64 linux targets. + +2009-12-04 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_closure_helper_DARWIN): Increment + pfr for long double split between fpr13 and stack. + +2009-12-03 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Increment next_arg and + fparg_count twice for long double. + +2009-12-03 David Edelsohn + + PR libffi/42243 + * src/powerpc/ffi_darwin.c (ffi_prep_args): Remove extra parentheses. + +2009-12-03 Uros Bizjak + + * testsuite/libffi.call/cls_longdouble_va.c (main): Fix format string. + Remove xfails for x86 linux targets. + +2009-12-02 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Fix typo in INT64 + case. + +2009-12-01 David Edelsohn + + * src/powerpc/aix.S (ffi_call_AIX): Convert to more standard + register usage. Call ffi_prep_args directly. Add long double + return value support. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Double arg increment + applies to FFI_TYPE_DOUBLE. Correct fpr_base increment typo. + Separate FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases. + (ffi_prep_cif_machdep): Only 16 byte stack alignment in 64 bit + mode. + (ffi_closure_helper_DARWIN): Remove nf and ng counters. Move temp + into case. + * src/powerpc/aix_closure.S: Maintain 16 byte stack alignment. + Allocate result area between params and FPRs. + +2009-11-30 David Edelsohn + + PR target/35484 + * src/powerpc/ffitarget.h (POWERPC64): Define for PPC64 Linux and + AIX64. + * src/powerpc/aix.S: Implement AIX64 version. + * src/powerpc/aix_closure.S: Implement AIX64 version. + (ffi_closure_ASM): Use extsb, lha and displament addresses. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Implement AIX64 + support. + (ffi_prep_cif_machdep): Same. + (ffi_call): Same. + (ffi_closure_helper_DARWIN): Same. + +2009-11-02 Andreas Tobler + + PR libffi/41908 + * testsuite/libffi.call/testclosure.c: New test. + +2009-09-28 Kai Tietz + + * src/x86/win64.S (_ffi_call_win64 stack): Remove for gnu + assembly version use of ___chkstk. + +2009-09-23 Matthias Klose + + PR libffi/40242, PR libffi/41443 + * src/arm/sysv.S (__ARM_ARCH__): Define for processors + __ARM_ARCH_6T2__, __ARM_ARCH_6M__, __ARM_ARCH_7__, + __ARM_ARCH_7A__, __ARM_ARCH_7R__, __ARM_ARCH_7M__. + Change the conditionals to __SOFTFP__ || __ARM_EABI__ + for -mfloat-abi=softfp to work. + +2009-09-17 Loren J. Rittle + + PR testsuite/32843 (strikes again) + * src/x86/ffi.c (ffi_prep_cif_machdep): Add X86_FREEBSD to + enable proper extension on char and short. + +2009-09-15 David Daney + + * src/java_raw_api.c (ffi_java_raw_to_rvalue): Remove special + handling for FFI_TYPE_POINTER. + * src/mips/ffitarget.h (FFI_TYPE_STRUCT_D_SOFT, + FFI_TYPE_STRUCT_F_SOFT, FFI_TYPE_STRUCT_DD_SOFT, + FFI_TYPE_STRUCT_FF_SOFT, FFI_TYPE_STRUCT_FD_SOFT, + FFI_TYPE_STRUCT_DF_SOFT, FFI_TYPE_STRUCT_SOFT): New defines. + (FFI_N32_SOFT_FLOAT, FFI_N64_SOFT_FLOAT): New ffi_abi enumerations. + (enum ffi_abi): Set FFI_DEFAULT_ABI for soft-float. + * src/mips/n32.S (ffi_call_N32): Add handling for soft-float + structure and pointer returns. + (ffi_closure_N32): Add handling for pointer returns. + * src/mips/ffi.c (ffi_prep_args, calc_n32_struct_flags, + calc_n32_return_struct_flags): Handle soft-float. + (ffi_prep_cif_machdep): Handle soft-float, fix pointer handling. + (ffi_call_N32): Declare proper argument types. + (ffi_call, copy_struct_N32, ffi_closure_mips_inner_N32): Handle + soft-float. + +2009-08-24 Ralf Wildenhues + + * configure.ac (AC_PREREQ): Bump to 2.64. + +2009-08-22 Ralf Wildenhues + + * Makefile.am (install-html, install-pdf): Remove. + * Makefile.in: Regenerate. + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2011-08-22 Jasper Lievisse Adriaanse + + * configure.ac: Add OpenBSD/hppa and OpenBSD/powerpc support. + * configure: Rebuilt. + +2009-07-30 Ralf Wildenhues + + * configure.ac (_AC_ARG_VAR_PRECIOUS): Use m4_rename_force. + +2009-07-24 Dave Korn + + PR libffi/40807 + * src/x86/ffi.c (ffi_prep_cif_machdep): Also use sign/zero-extending + return types for X86_WIN32. + * src/x86/win32.S (_ffi_call_SYSV): Handle omitted return types. + (_ffi_call_STDCALL, _ffi_closure_SYSV, _ffi_closure_raw_SYSV, + _ffi_closure_STDCALL): Likewise. + + * src/closures.c (is_selinux_enabled): Define to const 0 for Cygwin. + (dlmmap, dlmunmap): Also use these functions on Cygwin. + +2009-07-11 Richard Sandiford + + PR testsuite/40699 + PR testsuite/40707 + PR testsuite/40709 + * testsuite/lib/libffi-dg.exp: Revert 2009-07-02, 2009-07-01 and + 2009-06-30 commits. + +2009-07-01 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Set ld_library_path + to "" before adding paths. (This reinstates an assignment that + was removed by my 2009-06-30 commit, but changes the initial + value from "." to "".) + +2009-07-01 H.J. Lu + + PR testsuite/40601 + * testsuite/lib/libffi-dg.exp (libffi-init): Properly set + gccdir. Adjust ld_library_path for gcc only if gccdir isn't + empty. + +2009-06-30 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Don't add "." + to ld_library_path. Use add_path. Add just find_libgcc_s + to ld_library_path, not every libgcc multilib directory. + +2009-06-16 Wim Lewis + + * src/powerpc/ffi.c: Avoid clobbering cr3 and cr4, which are + supposed to be callee-saved. + * src/powerpc/sysv.S (small_struct_return_value): Fix overrun of + return buffer for odd-size structs. + +2009-06-16 Andreas Tobler + + PR libffi/40444 + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Add + allow_stack_execute for Darwin. + +2009-06-16 Andrew Haley + + * configure.ac (TARGETDIR): Add missing blank lines. + * configure: Regenerate. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-15 Andrew Haley + + * testsuite/libffi.call/err_bad_typedef.c: xfail everywhere. + * testsuite/libffi.call/err_bad_abi.c: Likewise. + +2009-06-12 Andrew Haley + + * Makefile.am: Remove info_TEXINFOS. + +2009-06-12 Andrew Haley + + * ChangeLog.libffi: testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-11 Kaz Kojima + + * testsuite/libffi.call/cls_longdouble_va.c: Add xfail sh*-*-linux-*. + * testsuite/libffi.call/err_bad_abi.c: Add xfail sh*-*-*. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + +2009-06-09 Andrew Haley + + * src/x86/freebsd.S: Add missing file. + +2009-06-08 Andrew Haley + + Import from libffi 3.0.8: + + * doc/libffi.texi: New file. + * doc/libffi.info: Likewise. + * doc/stamp-vti: Likewise. + * man/Makefile.am: New file. + * man/ffi_call.3: New file. + + * Makefile.am (EXTRA_DIST): Add src/x86/darwin64.S, + src/dlmalloc.c. + (nodist_libffi_la_SOURCES): Add X86_FREEBSD. + + * configure.ac: Bump version to 3.0.8. + parisc*-*-linux*: Add. + i386-*-freebsd* | i386-*-openbsd*: Add. + powerpc-*-beos*: Add. + AM_CONDITIONAL X86_FREEBSD: Add. + AC_CONFIG_FILES: Add man/Makefile. + + * include/ffi.h.in (FFI_FN): Change void (*)() to void (*)(void). + +2009-06-08 Andrew Haley + + * README: Import from libffi 3.0.8. + +2009-06-08 Andrew Haley + + * testsuite/libffi.call/err_bad_abi.c: Add xfails. + * testsuite/libffi.call/cls_longdouble_va.c: Add xfails. + * testsuite/libffi.call/cls_dbls_struct.c: Add xfail x86_64-*-linux-*. + * testsuite/libffi.call/err_bad_typedef.c: Add xfails. + + * testsuite/libffi.call/stret_medium2.c: Add __UNUSED__ to args. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2009-06-05 Andrew Haley + + * src/x86/ffitarget.h, src/x86/ffi.c: Merge stdcall changes from + libffi. + +2009-06-04 Andrew Haley + + * src/x86/ffitarget.h, src/x86/win32.S, src/x86/ffi.c: Back out + stdcall changes. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2009-06-04 Andrew Haley + + * include/ffi.h.in: Change void (*)() to void (*)(void). + * src/x86/ffi.c: Likewise. + +2009-06-04 Andrew Haley + + * src/powerpc/ppc_closure.S: Insert licence header. + * src/powerpc/linux64_closure.S: Likewise. + * src/m68k/sysv.S: Likewise. + + * src/sh64/ffi.c: Change void (*)() to void (*)(void). + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/m32r/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/alpha/ffi.c: Likewise. + * src/alpha/osf.S: Likewise. + * src/frv/ffi.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/pa/hpux32.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/ia64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2009-06-04 Andrew Haley + + include/ffi.h.in, + src/arm/ffitarget.h, + src/arm/ffi.c, + src/arm/sysv.S, + src/powerpc/ffitarget.h, + src/closures.c, + src/sh64/ffitarget.h, + src/sh64/ffi.c, + src/sh64/sysv.S, + src/types.c, + src/x86/ffi64.c, + src/x86/ffitarget.h, + src/x86/win32.S, + src/x86/darwin.S, + src/x86/ffi.c, + src/x86/sysv.S, + src/x86/unix64.S, + src/alpha/ffitarget.h, + src/alpha/ffi.c, + src/alpha/osf.S, + src/m68k/ffitarget.h, + src/frv/ffitarget.h, + src/frv/ffi.c, + src/s390/ffitarget.h, + src/s390/sysv.S, + src/cris/ffitarget.h, + src/pa/linux.S, + src/pa/ffitarget.h, + src/pa/ffi.c, + src/raw_api.c, + src/ia64/ffitarget.h, + src/ia64/unix.S, + src/ia64/ffi.c, + src/ia64/ia64_flags.h, + src/java_raw_api.c, + src/debug.c, + src/sparc/v9.S, + src/sparc/ffitarget.h, + src/sparc/ffi.c, + src/sparc/v8.S, + src/mips/ffitarget.h, + src/mips/n32.S, + src/mips/o32.S, + src/mips/ffi.c, + src/prep_cif.c, + src/sh/ffitarget.h, + src/sh/ffi.c, + src/sh/sysv.S: Update license text. + +2009-05-22 Dave Korn + + * src/x86/win32.S (_ffi_closure_STDCALL): New function. + (.eh_frame): Add FDE for it. + +2009-05-22 Dave Korn + + * configure.ac: Also check if assembler supports pc-relative + relocs on X86_WIN32 targets. + * configure: Regenerate. + * src/x86/win32.S (ffi_prep_args): Declare extern, not global. + (_ffi_call_SYSV): Add missing function type symbol .def and + add EH markup labels. + (_ffi_call_STDCALL): Likewise. + (_ffi_closure_SYSV): Likewise. + (_ffi_closure_raw_SYSV): Likewise. + (.eh_frame): Add hand-crafted EH data. + +2009-04-09 Jakub Jelinek + + * testsuite/lib/libffi-dg.exp: Change copyright header to refer to + version 3 of the GNU General Public License and to point readers + at the COPYING3 file and the FSF's license web page. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.special/special.exp: Likewise. + +2009-03-01 Ralf Wildenhues + + * configure: Regenerate. + +2008-12-18 Rainer Orth + + PR libffi/26048 + * configure.ac (HAVE_AS_X86_PCREL): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S [!FFI_NO_RAW_API]: Precalculate + RAW_CLOSURE_CIF_OFFSET, RAW_CLOSURE_FUN_OFFSET, + RAW_CLOSURE_USER_DATA_OFFSET for the Solaris 10/x86 assembler. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + * src/x86/unix64.S (.Lstore_table): Move to .text section. + (.Lload_table): Likewise. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + +2008-12-18 Ralf Wildenhues + + * configure: Regenerate. + +2008-11-21 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_cif_machdep): Add support for + signed/unsigned int8/16 return values. + * src/sparc/v8.S (ffi_call_v8): Likewise. + (ffi_closure_v8): Likewise. + +2008-09-26 Peter O'Gorman + Steve Ellcey + + * configure: Regenerate for new libtool. + * Makefile.in: Ditto. + * include/Makefile.in: Ditto. + * aclocal.m4: Ditto. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-06-17 Ralf Wildenhues + + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2008-06-07 Joseph Myers + + * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, + powerpc-*-beos*): Remove. + * configure: Regenerate. + +2008-05-09 Julian Brown + + * Makefile.am (LTLDFLAGS): New. + (libffi_la_LDFLAGS): Use above. + * Makefile.in: Regenerate. + +2008-04-18 Paolo Bonzini + + PR bootstrap/35457 + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2008-03-26 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-03-26 Daniel Jacobowitz + + * src/arm/sysv.S: Fix ARM comment marker. + +2008-03-26 Jakub Jelinek + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-03-16 Ralf Wildenhues + + * aclocal.m4: Regenerate. + * configure: Likewise. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2008-02-12 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-24 David Edelsohn + + * configure: Regenerate. + +2008-01-06 Andreas Tobler + + * src/x86/ffi.c (ffi_prep_cif_machdep): Fix thinko. + +2008-01-05 Andreas Tobler + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): Add code for + signed/unsigned int8/16 for X86_DARWIN. + Updated copyright info. + Handle one and two byte structs with special cif->flags. + * src/x86/ffitarget.h: Add special types for one and two byte structs. + Updated copyright info. + * src/x86/darwin.S (ffi_call_SYSV): Rewrite to use a jump table like + sysv.S + Remove code to pop args from the stack after call. + Special-case signed/unsigned for int8/16, one and two byte structs. + (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + Updated copyright info. + +2007-12-08 David Daney + + * src/mips/n32.S (ffi_call_N32): Replace dadd with ADDU, dsub with + SUBU, add with ADDU and use smaller code sequences. + +2007-12-07 David Daney + + * src/mips/ffi.c (ffi_prep_cif_machdep): Handle long double return + type. + +2007-12-06 David Daney + + * include/ffi.h.in (FFI_SIZEOF_JAVA_RAW): Define if not already + defined. + (ffi_java_raw): New typedef. + (ffi_java_raw_call, ffi_java_ptrarray_to_raw, + ffi_java_raw_to_ptrarray): Change parameter types from ffi_raw to + ffi_java_raw. + (ffi_java_raw_closure) : Same. + (ffi_prep_java_raw_closure, ffi_prep_java_raw_closure_loc): Change + parameter types. + * src/java_raw_api.c (ffi_java_raw_size): Replace FFI_SIZEOF_ARG with + FFI_SIZEOF_JAVA_RAW. + (ffi_java_raw_to_ptrarray): Change type of raw to ffi_java_raw. + Replace FFI_SIZEOF_ARG with FFI_SIZEOF_JAVA_RAW. Use + sizeof(ffi_java_raw) for alignment calculations. + (ffi_java_ptrarray_to_raw): Same. + (ffi_java_rvalue_to_raw): Add special handling for FFI_TYPE_POINTER + if FFI_SIZEOF_JAVA_RAW == 4. + (ffi_java_raw_to_rvalue): Same. + (ffi_java_raw_call): Change type of raw to ffi_java_raw. + (ffi_java_translate_args): Same. + (ffi_prep_java_raw_closure_loc, ffi_prep_java_raw_closure): Change + parameter types. + * src/mips/ffitarget.h (FFI_SIZEOF_JAVA_RAW): Define for N32 ABI. + +2007-12-06 David Daney + + * src/mips/n32.S (ffi_closure_N32): Use 64-bit add instruction on + pointer values. + +2007-12-01 Andreas Tobler + + PR libffi/31937 + * src/powerpc/ffitarget.h: Introduce new ABI FFI_LINUX_SOFT_FLOAT. + Add local FFI_TYPE_UINT128 to handle soft-float long-double-128. + * src/powerpc/ffi.c: Distinguish between __NO_FPRS__ and not and + set the NUM_FPR_ARG_REGISTERS according to. + Add support for potential soft-float support under hard-float + architecture. + (ffi_prep_args_SYSV): Set NUM_FPR_ARG_REGISTERS to 0 in case of + FFI_LINUX_SOFT_FLOAT, handle float, doubles and long-doubles according + to the FFI_LINUX_SOFT_FLOAT ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Make sure not to store float/double + on archs where __NO_FPRS__ is true. + Add FFI_TYPE_UINT128 support. + * src/powerpc/sysv.S: Add support for soft-float long-double-128. + Adjust copyright notice. + +2007-11-25 Andreas Tobler + + * src/closures.c: Move defintion of MAYBE_UNUSED from here to ... + * include/ffi_common.h: ... here. + Update copyright. + +2007-11-17 Andreas Tobler + + * src/powerpc/sysv.S: Load correct cr to compare if we have long double. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/ffi.c: Add a comment to show which part goes into cr6. + * testsuite/libffi.call/return_ldl.c: New test. + +2007-09-04 + + * src/arm/sysv.S (UNWIND): New. + (Whole file): Conditionally compile unwinder directives. + * src/arm/sysv.S: Add unwinder directives. + + * src/arm/ffi.c (ffi_prep_args): Align structs by at least 4 bytes. + Only treat r0 as a struct address if we're actually returning a + struct by address. + Only copy the bytes that are actually within a struct. + (ffi_prep_cif_machdep): A Composite Type not larger than 4 bytes + is returned in r0, not passed by address. + (ffi_call): Allocate a word-sized temporary for the case where + a composite is returned in r0. + (ffi_prep_incoming_args_SYSV): Align as necessary. + +2007-08-05 Steven Newbury + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Use __clear_cache instead of + directly using the sys_cacheflush syscall. + +2007-07-27 Andrew Haley + + * src/arm/sysv.S (ffi_closure_SYSV): Add soft-float. + +2007-09-03 Maciej W. Rozycki + + * Makefile.am: Unify MIPS_IRIX and MIPS_LINUX into MIPS. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure: Likewise. + +2007-08-24 David Daney + + * testsuite/libffi.call/return_sl.c: New test. + +2007-08-10 David Daney + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Remove xfail for mips64*-*-*. + +2007-08-10 David Daney + + PR libffi/28313 + * configure.ac: Don't treat mips64 as a special case. + * Makefile.am (nodist_libffi_la_SOURCES): Add n32.S. + * configure: Regenerate + * Makefile.in: Ditto. + * fficonfig.h.in: Ditto. + * src/mips/ffitarget.h (REG_L, REG_S, SUBU, ADDU, SRL, LI): Indent. + (LA, EH_FRAME_ALIGN, FDE_ADDR_BYTES): New preprocessor macros. + (FFI_DEFAULT_ABI): Set for n64 case. + (FFI_CLOSURES, FFI_TRAMPOLINE_SIZE): Define for n32 and n64 cases. + * src/mips/n32.S (ffi_call_N32): Add debug macros and labels for FDE. + (ffi_closure_N32): New function. + (.eh_frame): New section + * src/mips/o32.S: Clean up comments. + (ffi_closure_O32): Pass ffi_closure parameter in $12. + * src/mips/ffi.c: Use FFI_MIPS_N32 instead of + _MIPS_SIM == _ABIN32 throughout. + (FFI_MIPS_STOP_HERE): New, use in place of + ffi_stop_here. + (ffi_prep_args): Use unsigned long to hold pointer values. Rewrite + to support n32/n64 ABIs. + (calc_n32_struct_flags): Rewrite. + (calc_n32_return_struct_flags): Remove unused variable. Reverse + position of flag bits. + (ffi_prep_cif_machdep): Rewrite n32 portion. + (ffi_call): Enable for n64. Add special handling for small structure + return values. + (ffi_prep_closure_loc): Add n32 and n64 support. + (ffi_closure_mips_inner_O32): Add cast to silence warning. + (copy_struct_N32, ffi_closure_mips_inner_N32): New functions. + +2007-08-08 David Daney + + * testsuite/libffi.call/ffitest.h (ffi_type_mylong): Remove definition. + * testsuite/libffi.call/cls_align_uint16.c (main): Use correct type + specifiers. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/cls_sint.c (main): Ditto. + * testsuite/libffi.call/nested_struct9.c (main): Ditto. + * testsuite/libffi.call/cls_20byte1.c (main): Ditto. + * testsuite/libffi.call/cls_9byte1.c (main): Ditto. + * testsuite/libffi.call/closure_fn1.c (main): Ditto. + * testsuite/libffi.call/closure_fn3.c (main): Ditto. + * testsuite/libffi.call/return_dbl2.c (main): Ditto. + * testsuite/libffi.call/cls_sshort.c (main): Ditto. + * testsuite/libffi.call/return_fl3.c (main): Ditto. + * testsuite/libffi.call/closure_fn5.c (main): Ditto. + * testsuite/libffi.call/nested_struct.c (main): Ditto. + * testsuite/libffi.call/nested_struct10.c (main): Ditto. + * testsuite/libffi.call/return_ll1.c (main): Ditto. + * testsuite/libffi.call/cls_8byte.c (main): Ditto. + * testsuite/libffi.call/cls_align_uint32.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint16.c (main): Ditto. + * testsuite/libffi.call/cls_20byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct2.c (main): Ditto. + * testsuite/libffi.call/cls_24byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct6.c (main): Ditto. + * testsuite/libffi.call/cls_uint.c (main): Ditto. + * testsuite/libffi.call/cls_12byte.c (main): Ditto. + * testsuite/libffi.call/cls_16byte.c (main): Ditto. + * testsuite/libffi.call/closure_fn0.c (main): Ditto. + * testsuite/libffi.call/cls_9byte2.c (main): Ditto. + * testsuite/libffi.call/closure_fn2.c (main): Ditto. + * testsuite/libffi.call/return_dbl1.c (main): Ditto. + * testsuite/libffi.call/closure_fn4.c (main): Ditto. + * testsuite/libffi.call/closure_fn6.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint32.c (main): Ditto. + +2007-08-07 Andrew Haley + + * src/x86/sysv.S (ffi_closure_raw_SYSV): Fix typo in previous + checkin. + +2007-08-06 Andrew Haley + + PR testsuite/32843 + * src/x86/sysv.S (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + +2007-08-02 David Daney + + * testsuite/libffi.call/return_ul.c (main): Define return type as + ffi_arg. Use proper printf conversion specifier. + +2007-07-30 Andrew Haley + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): in x86 case, add code for + signed/unsigned int8/16. + * src/x86/sysv.S (ffi_call_SYSV): Rewrite to: + Use a jump table. + Remove code to pop args from the stack after call. + Special-case signed/unsigned int8/16. + * testsuite/libffi.call/return_sc.c (main): Revert. + +2007-07-26 Richard Guenther + + PR testsuite/32843 + * testsuite/libffi.call/return_sc.c (main): Verify call + result as signed char, not ffi_arg. + +2007-07-16 Rainer Orth + + * configure.ac (i?86-*-solaris2.1[0-9]): Set TARGET to X86_64. + * configure: Regenerate. + +2007-07-11 David Daney + + * src/mips/ffi.c: Don't include sys/cachectl.h. + (ffi_prep_closure_loc): Use __builtin___clear_cache() instead of + cacheflush(). + +2007-05-18 Aurelien Jarno + + * src/arm/ffi.c (ffi_prep_closure_loc): Renamed and ajusted + from (ffi_prep_closure): ... this. + (FFI_INIT_TRAMPOLINE): Adjust. + +2005-12-31 Phil Blundell + + * src/arm/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner, ffi_prep_closure): New, add closure support. + * src/arm/sysv.S(ffi_closure_SYSV): Likewise. + * src/arm/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-07-03 Andrew Haley + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Enable for ARM. + +2007-07-05 H.J. Lu + + * aclocal.m4: Regenerated. + +2007-06-02 Paolo Bonzini + + * configure: Regenerate. + +2007-05-23 Steve Ellcey + + * Makefile.in: Regenerate. + * configure: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner,ffi_prep_closure): New, add closure support. + * src/m68k/sysv.S(ffi_closure_SYSV,ffi_closure_struct_SYSV): Likewise. + * src/m68k/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-05-10 Roman Zippel + + * configure.ac (HAVE_AS_CFI_PSEUDO_OP): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/m68k/sysv.S (CFI_STARTPROC,CFI_ENDPROC, + CFI_OFFSET,CFI_DEF_CFA): New macros. + (ffi_call_SYSV): Add callframe annotation. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_args,ffi_prep_cif_machdep): Fix + numerous test suite failures. + * src/m68k/sysv.S (ffi_call_SYSV): Likewise. + +2007-04-11 Paolo Bonzini + + * Makefile.am (EXTRA_DIST): Bring up to date. + * Makefile.in: Regenerate. + * src/frv/eabi.S: Remove RCS keyword. + +2007-04-06 Richard Henderson + + * configure.ac: Tidy target case. + (HAVE_LONG_DOUBLE): Allow the target to override. + * configure: Regenerate. + * include/ffi.h.in: Don't define ffi_type_foo if + LIBFFI_HIDE_BASIC_TYPES is defined. + (ffi_type_longdouble): If not HAVE_LONG_DOUBLE, define + to ffi_type_double. + * types.c (LIBFFI_HIDE_BASIC_TYPES): Define. + (FFI_TYPEDEF, ffi_type_void): Mark the data const. + (ffi_type_longdouble): Special case for Alpha. Don't define + if long double == double. + + * src/alpha/ffi.c (FFI_TYPE_LONGDOUBLE): Assert unique value. + (ffi_prep_cif_machdep): Handle it as the 128-bit type. + (ffi_call, ffi_closure_osf_inner): Likewise. + (ffi_closure_osf_inner): Likewise. Mark hidden. + (ffi_call_osf, ffi_closure_osf): Mark hidden. + * src/alpha/ffitarget.h (FFI_LAST_ABI): Tidy definition. + * src/alpha/osf.S (ffi_call_osf, ffi_closure_osf): Mark hidden. + (load_table): Handle 128-bit long double. + + * testsuite/libffi.call/float4.c: Add -mieee for alpha. + +2007-04-06 Tom Tromey + + PR libffi/31491: + * README: Fixed bug in example. + +2007-04-03 Jakub Jelinek + + * src/closures.c: Include sys/statfs.h. + (_GNU_SOURCE): Define on Linux. + (FFI_MMAP_EXEC_SELINUX): Define. + (selinux_enabled): New variable. + (selinux_enabled_check): New function. + (is_selinux_enabled): Define. + (dlmmap): Use it. + +2007-03-24 Uros Bizjak + + * testsuite/libffi.call/return_fl2.c (return_fl): Mark as static. + Use 'volatile float sum' to create sum of floats to avoid false + negative due to excess precision on ix86 targets. + (main): Ditto. + +2007-03-08 Alexandre Oliva + + * src/powerpc/ffi.c (flush_icache): Fix left-over from previous + patch. + (ffi_prep_closure_loc): Remove unneeded casts. Add needed ones. + +2007-03-07 Alexandre Oliva + + * include/ffi.h.in (ffi_closure_alloc, ffi_closure_free): New. + (ffi_prep_closure_loc): New. + (ffi_prep_raw_closure_loc): New. + (ffi_prep_java_raw_closure_loc): New. + * src/closures.c: New file. + * src/dlmalloc.c [FFI_MMAP_EXEC_WRIT] (struct malloc_segment): + Replace sflags with exec_offset. + [FFI_MMAP_EXEC_WRIT] (mmap_exec_offset, add_segment_exec_offset, + sub_segment_exec_offset): New macros. + (get_segment_flags, set_segment_flags, check_segment_merge): New + macros. + (is_mmapped_segment, is_extern_segment): Use get_segment_flags. + (add_segment, sys_alloc, create_mspace, create_mspace_with_base, + destroy_mspace): Use new macros. + (sys_alloc): Silence warning. + * Makefile.am (libffi_la_SOURCES): Add src/closures.c. + * Makefile.in: Rebuilt. + * src/prep_cif [FFI_CLOSURES] (ffi_prep_closure): Implement in + terms of ffi_prep_closure_loc. + * src/raw_api.c (ffi_prep_raw_closure_loc): Renamed and adjusted + from... + (ffi_prep_raw_closure): ... this. Re-implement in terms of the + renamed version. + * src/java_raw_api (ffi_prep_java_raw_closure_loc): Renamed and + adjusted from... + (ffi_prep_java_raw_closure): ... this. Re-implement in terms of + the renamed version. + * src/alpha/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + * src/pa/ffi.c: Likewise. + * src/cris/ffi.c: Likewise. Adjust. + * src/frv/ffi.c: Likewise. + * src/ia64/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/x86/ffi.c: Likewise. + (FFI_INIT_TRAMPOLINE): Adjust. + (ffi_prep_raw_closure_loc): Renamed and adjusted from... + (ffi_prep_raw_closure): ... this. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + (flush_icache): Adjust. + +2007-03-07 Alexandre Oliva + + * src/dlmalloc.c: New file, imported version 2.8.3 of Doug + Lea's malloc. + +2007-03-01 Brooks Moses + + * Makefile.am: Add dummy install-pdf target. + * Makefile.in: Regenerate + +2007-02-13 Andreas Krebbel + + * src/s390/ffi.c (ffi_prep_args, ffi_prep_cif_machdep, + ffi_closure_helper_SYSV): Add long double handling. + +2007-02-02 Jakub Jelinek + + * src/powerpc/linux64.S (ffi_call_LINUX64): Move restore of r2 + immediately after bctrl instruction. + +2007-01-18 Alexandre Oliva + + * Makefile.am (all-recursive, install-recursive, + mostlyclean-recursive, clean-recursive, distclean-recursive, + maintainer-clean-recursive): Add missing targets. + * Makefile.in: Rebuilt. + +2006-12-14 Andreas Tobler + + * configure.ac: Add TARGET for x86_64-*-darwin*. + * Makefile.am (nodist_libffi_la_SOURCES): Add rules for 64-bit sources + for X86_DARWIN. + * src/x86/ffitarget.h: Set trampoline size for x86_64-*-darwin*. + * src/x86/darwin64.S: New file for x86_64-*-darwin* support. + * configure: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + * testsuite/libffi.special/unwindtest_ffi_call.cc: New test case for + ffi_call only. + +2006-12-13 Andreas Tobler + + * aclocal.m4: Regenerate with aclocal -I .. as written in the + Makefile.am. + +2006-10-31 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (darwin_adjust_aggregate_sizes): New. + (ffi_prep_cif_machdep): Call darwin_adjust_aggregate_sizes for + Darwin. + * testsuite/libffi.call/nested_struct4.c: Remove Darwin XFAIL. + * testsuite/libffi.call/nested_struct6.c: Remove Darwin XFAIL. + +2006-10-10 Paolo Bonzini + Sandro Tolaini + + * configure.ac [i*86-*-darwin*]: Set X86_DARWIN symbol and + conditional. + * configure: Regenerated. + * Makefile.am (nodist_libffi_la_SOURCES) [X86_DARWIN]: New case. + (EXTRA_DIST): Add src/x86/darwin.S. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + + * src/x86/ffi.c (ffi_prep_cif_machdep) [X86_DARWIN]: Treat like + X86_WIN32, and additionally align stack to 16 bytes. + * src/x86/darwin.S: New, based on sysv.S. + * src/prep_cif.c (ffi_prep_cif) [X86_DARWIN]: Align > 8-byte structs. + +2006-09-12 David Daney + + PR libffi/23935 + * include/Makefile.am: Install both ffi.h and ffitarget.h in + $(libdir)/gcc/$(target_alias)/$(gcc_version)/include. + * aclocal.m4: Regenerated for automake 1.9.6. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + +2006-08-17 Andreas Tobler + + * include/ffi_common.h (struct): Revert accidental commit. + +2006-08-15 Andreas Tobler + + * include/ffi_common.h: Remove lint directives. + * include/ffi.h.in: Likewise. + +2006-07-25 Torsten Schoenfeld + + * include/ffi.h.in (ffi_type_ulong, ffi_type_slong): Define correctly + for 32-bit architectures. + * testsuite/libffi.call/return_ul.c: New test case. + +2006-07-19 David Daney + + * testsuite/libffi.call/closure_fn6.c: Remove xfail for mips, + xfail remains for mips64. + +2006-05-23 Carlos O'Donell + + * Makefile.am: Add install-html target. Add install-html to .PHONY + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2006-05-18 John David Anglin + + * pa/ffi.c (ffi_prep_args_pa32): Load floating point arguments from + stack slot. + +2006-04-22 Andreas Tobler + + * README: Remove notice about 'Crazy Comments'. + * src/debug.c: Remove lint directives. Cleanup white spaces. + * src/java_raw_api.c: Likewise. + * src/prep_cif.c: Likewise. + * src/raw_api.c: Likewise. + * src/ffitest.c: Delete. No longer needed, all test cases migrated + to the testsuite. + * src/arm/ffi.c: Remove lint directives. + * src/m32r/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + +2006-04-13 Andreas Tobler + + * src/pa/hpux32.S: Correct unwind offset calculation for + ffi_closure_pa32. + * src/pa/linux.S: Likewise. + +2006-04-12 James E Wilson + + PR libgcj/26483 + * src/ia64/ffi.c (stf_spill, ldf_fill): Rewrite as macros. + (hfa_type_load): Call stf_spill. + (hfa_type_store): Call ldf_fill. + (ffi_call): Adjust calls to above routines. Add local temps for + macro result. + +2006-04-10 Matthias Klose + + * testsuite/lib/libffi-dg.exp (libffi-init): Recognize multilib + directory names containing underscores. + +2006-04-07 James E Wilson + + * testsuite/libffi.call/float4.c: New testcase. + +2006-04-05 John David Anglin + Andreas Tobler + + * Makefile.am: Add PA_HPUX port. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add PA_HPUX rules. + * configure: Regenerate. + * src/pa/ffitarget.h: Rename linux target to PA_LINUX. + Add PA_HPUX and PA64_HPUX. + Rename FFI_LINUX ABI to FFI_PA32 ABI. + (FFI_TRAMPOLINE_SIZE): Define for 32-bit HP-UX targets. + (FFI_TYPE_SMALL_STRUCT2): Define. + (FFI_TYPE_SMALL_STRUCT4): Likewise. + (FFI_TYPE_SMALL_STRUCT8): Likewise. + (FFI_TYPE_SMALL_STRUCT3): Redefine. + (FFI_TYPE_SMALL_STRUCT5): Likewise. + (FFI_TYPE_SMALL_STRUCT6): Likewise. + (FFI_TYPE_SMALL_STRUCT7): Likewise. + * src/pa/ffi.c (ROUND_DOWN): Delete. + (fldw, fstw, fldd, fstd): Use '__asm__'. + (ffi_struct_type): Add support for FFI_TYPE_SMALL_STRUCT2, + FFI_TYPE_SMALL_STRUCT4 and FFI_TYPE_SMALL_STRUCT8. + (ffi_prep_args_LINUX): Rename to ffi_prep_args_pa32. Update comment. + Simplify incrementing of stack slot variable. Change type of local + 'n' to unsigned int. + (ffi_size_stack_LINUX): Rename to ffi_size_stack_pa32. Handle long + double on PA_HPUX. + (ffi_prep_cif_machdep): Likewise. + (ffi_call): Likewise. + (ffi_closure_inner_LINUX): Rename to ffi_closure_inner_pa32. Change + return type to ffi_status. Simplify incrementing of stack slot + variable. Only copy floating point argument registers when PA_LINUX + is true. Reformat debug statement. + Add support for FFI_TYPE_SMALL_STRUCT2, FFI_TYPE_SMALL_STRUCT4 and + FFI_TYPE_SMALL_STRUCT8. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Add 'extern' to + declaration. + (ffi_prep_closure): Make linux trampoline conditional on PA_LINUX. + Add nops to cache flush. Add trampoline for PA_HPUX. + * src/pa/hpux32.S: New file. + * src/pa/linux.S (ffi_call_LINUX): Rename to ffi_call_pa32. Rename + ffi_prep_args_LINUX to ffi_prep_args_pa32. + Localize labels. Add support for 2, 4 and 8-byte small structs. Handle + unaligned destinations in 3, 5, 6 and 7-byte small structs. Order + argument type checks so that common argument types appear first. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Rename + ffi_closure_inner_LINUX to ffi_closure_inner_pa32. + +2006-03-24 Alan Modra + + * src/powerpc/ffitarget.h (enum ffi_abi): Add FFI_LINUX. Default + for 32-bit using IBM extended double format. Fix FFI_LAST_ABI. + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Handle linux variant of + FFI_TYPE_LONGDOUBLE. + (ffi_prep_args64): Assert using IBM extended double. + (ffi_prep_cif_machdep): Don't munge FFI_TYPE_LONGDOUBLE type. + Handle FFI_LINUX FFI_TYPE_LONGDOUBLE return and args. + (ffi_call): Handle FFI_LINUX. + (ffi_closure_helper_SYSV): Non FFI_LINUX long double return needs + gpr3 return pointer as for struct return. Handle FFI_LINUX + FFI_TYPE_LONGDOUBLE return and args. Don't increment "nf" + unnecessarily. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Load both f1 and f2 + for FFI_TYPE_LONGDOUBLE. Move epilogue insns into case table. + Don't use r6 as pointer to results, instead use sp offset. Don't + make a special call to load lr with case table address, instead + use offset from previous call. + * src/powerpc/sysv.S (ffi_call_SYSV): Save long double return. + * src/powerpc/linux64.S (ffi_call_LINUX64): Simplify long double + return. + +2006-03-15 Kaz Kojima + + * src/sh64/ffi.c (ffi_prep_cif_machdep): Handle float arguments + passed with FP registers correctly. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S: Likewise. + +2006-03-01 Andreas Tobler + + * testsuite/libffi.special/unwindtest.cc (closure_test_fn): Mark cif, + args and userdata unused. + (closure_test_fn1): Mark cif and userdata unused. + (main): Remove unused res. + +2006-02-28 Andreas Tobler + + * testsuite/libffi.call/call.exp: Adjust FSF address. Add test runs for + -O2, -O3, -Os and the warning flags -W -Wall. + * testsuite/libffi.special/special.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Add an __UNUSED__ macro to mark + unused parameter unused for gcc or else do nothing. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/cls_12byte.c (cls_struct_12byte_gn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_16byte.c (cls_struct_16byte_gn): Likewise. + * testsuite/libffi.call/cls_18byte.c (cls_struct_18byte_gn): Likewise. + * testsuite/libffi.call/cls_19byte.c (cls_struct_19byte_gn): Likewise. + * testsuite/libffi.call/cls_1_1byte.c (cls_struct_1_1byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte1.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_24byte.c (cls_struct_24byte_gn): Likewise. + * testsuite/libffi.call/cls_2byte.c (cls_struct_2byte_gn): Likewise. + * testsuite/libffi.call/cls_3_1byte.c (cls_struct_3_1byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte1.c (cls_struct_3byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte2.c (cls_struct_3byte_gn1): Likewise. + * testsuite/libffi.call/cls_4_1byte.c (cls_struct_4_1byte_gn): Likewise. + * testsuite/libffi.call/cls_4byte.c (cls_struct_4byte_gn): Likewise. + * testsuite/libffi.call/cls_5_1_byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_5byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_64byte.c (cls_struct_64byte_gn): Likewise. + * testsuite/libffi.call/cls_6_1_byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_6byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_7_1_byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_7byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_8byte.c (cls_struct_8byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte1.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte2.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_align_double.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_float.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_longdouble.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_pointer.c (cls_struct_align_fn): Cast + void* to avoid compiler warning. + (main): Likewise. + (cls_struct_align_gn): Mark cif and userdata unused. + * testsuite/libffi.call/cls_align_sint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint64.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_double.c (cls_ret_double_fn): Likewise. + * testsuite/libffi.call/cls_float.c (cls_ret_float_fn): Likewise. + * testsuite/libffi.call/cls_multi_schar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_uchar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_schar.c (cls_ret_schar_fn): Mark cif and + userdata unused. + (cls_ret_schar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sint.c (cls_ret_sint_fn): Mark cif and + userdata unused. + (cls_ret_sint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sshort.c (cls_ret_sshort_fn): Mark cif and + userdata unused. + (cls_ret_sshort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uchar.c (cls_ret_uchar_fn): Mark cif and + userdata unused. + (cls_ret_uchar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Mark cif and + userdata unused. + (cls_ret_uint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_ulonglong.c (cls_ret_ulonglong_fn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_ushort.c (cls_ret_ushort_fn): Mark cif and + userdata unused. + (cls_ret_ushort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/float.c (floating): Remove unused parameter e. + * testsuite/libffi.call/float1.c (main): Remove unused variable i. + Cleanup white spaces. + * testsuite/libffi.call/negint.c (checking): Remove unused variable i. + * testsuite/libffi.call/nested_struct.c (cls_struct_combined_gn): Mark + cif and userdata unused. + * testsuite/libffi.call/nested_struct1.c (cls_struct_combined_gn): + Likewise. + * testsuite/libffi.call/nested_struct10.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct2.c (B_fn): Adjust printf + formatters to silence gcc. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct3.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct4.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct5.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct6.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct7.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct8.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct9.c (B_gn): Likewise. + * testsuite/libffi.call/problem1.c (stub): Likewise. + * testsuite/libffi.call/pyobjc-tc.c (main): Cast the result to silence + gcc. + * testsuite/libffi.call/return_fl2.c (return_fl): Add the note mentioned + in the last commit for this test case in the test case itself. + * testsuite/libffi.call/closure_fn0.c (closure_test_fn0): Mark cif as + unused. + * testsuite/libffi.call/closure_fn1.c (closure_test_fn1): Likewise. + * testsuite/libffi.call/closure_fn2.c (closure_test_fn2): Likewise. + * testsuite/libffi.call/closure_fn3.c (closure_test_fn3): Likewise. + * testsuite/libffi.call/closure_fn4.c (closure_test_fn0): Likewise. + * testsuite/libffi.call/closure_fn5.c (closure_test_fn5): Likewise. + * testsuite/libffi.call/closure_fn6.c (closure_test_fn0): Likewise. + +2006-02-22 Kaz Kojima + + * src/sh/sysv.S: Fix register numbers in the FDE for + ffi_closure_SYSV. + +2006-02-20 Andreas Tobler + + * testsuite/libffi.call/return_fl2.c (return_fl): Remove static + declaration to avoid a false negative on ix86. See PR323. + +2006-02-18 Kaz Kojima + + * src/sh/ffi.c (ffi_closure_helper_SYSV): Remove unused variable + and cast integer to void * if needed. Update the pointer to + the FP register saved area correctly. + +2006-02-17 Andreas Tobler + + * testsuite/libffi.call/nested_struct6.c: XFAIL this test until PR25630 + is fixed. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-02-16 Andreas Tobler + + * testsuite/libffi.call/return_dbl.c: New test case. + * testsuite/libffi.call/return_dbl1.c: Likewise. + * testsuite/libffi.call/return_dbl2.c: Likewise. + * testsuite/libffi.call/return_fl.c: Likewise. + * testsuite/libffi.call/return_fl1.c: Likewise. + * testsuite/libffi.call/return_fl2.c: Likewise. + * testsuite/libffi.call/return_fl3.c: Likewise. + * testsuite/libffi.call/closure_fn6.c: Likewise. + + * testsuite/libffi.call/nested_struct2.c: Remove ffi_type_mylong + definition. + * testsuite/libffi.call/ffitest.h: Add ffi_type_mylong definition + here to be used by other test cases too. + + * testsuite/libffi.call/nested_struct10.c: New test case. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-01-21 Andreas Tobler + + * configure.ac: Enable libffi for sparc64-*-freebsd*. + * configure: Rebuilt. + +2006-01-18 Jakub Jelinek + + * src/powerpc/sysv.S (smst_two_register): Don't call __ashldi3, + instead do the shifting inline. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't compute %r5 + shift count unconditionally. Simplify load sequences for 1, 2, 3, 4 + and 8 byte structs, for the remaining struct sizes don't call + __lshrdi3, instead do the shifting inline. + +2005-12-07 Thiemo Seufer + + * src/mips/ffitarget.h: Remove obsolete sgidefs.h include. Add + missing parentheses. + * src/mips/o32.S (ffi_call_O32): Code formatting. Define + and use A3_OFF, FP_OFF, RA_OFF. Micro-optimizations. + (ffi_closure_O32): Likewise, but with newly defined A3_OFF2, + A2_OFF2, A1_OFF2, A0_OFF2, RA_OFF2, FP_OFF2, S0_OFF2, GP_OFF2, + V1_OFF2, V0_OFF2, FA_1_1_OFF2, FA_1_0_OFF2, FA_0_1_OFF2, + FA_0_0_OFF2. + * src/mips/ffi.c (ffi_prep_args): Code formatting. Fix + endianness bugs. + (ffi_prep_closure): Improve trampoline instruction scheduling. + (ffi_closure_mips_inner_O32): Fix endianness bugs. + +2005-12-03 Alan Modra + + * src/powerpc/ffi.c: Formatting. + (ffi_prep_args_SYSV): Avoid possible aliasing problems by using unions. + (ffi_prep_args64): Likewise. + +2005-09-30 Geoffrey Keating + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): For + darwin, use -shared-libgcc not -lgcc_s, and explain why. + +2005-09-26 Tom Tromey + + * testsuite/libffi.call/float1.c (value_type): New typedef. + (CANARY): New define. + (main): Check for result buffer overflow. + * src/powerpc/linux64.S: Handle linux64 long double returns. + * src/powerpc/ffi.c (FLAG_RETURNS_128BITS): New constant. + (ffi_prep_cif_machdep): Handle linux64 long double returns. + +2005-08-25 Alan Modra + + PR target/23404 + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Correct placement of stack + homed fp args. + (ffi_status ffi_prep_cif_machdep): Correct stack sizing for same. + +2005-08-11 Jakub Jelinek + + * configure.ac (HAVE_HIDDEN_VISIBILITY_ATTRIBUTE): New test. + (AH_BOTTOM): Add FFI_HIDDEN definition. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * src/powerpc/ffi.c (hidden): Remove. + (ffi_closure_LINUX64, ffi_prep_args64, ffi_call_LINUX64, + ffi_closure_helper_LINUX64): Use FFI_HIDDEN instead of hidden. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64, + .ffi_closure_LINUX64): Use FFI_HIDDEN instead of .hidden. + * src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): Remove, + add FFI_HIDDEN to its prototype. + (ffi_closure_SYSV_inner): New. + * src/x86/sysv.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + * src/x86/win32.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + +2005-08-10 Alfred M. Szmidt + + PR libffi/21819: + * configure: Rebuilt. + * configure.ac: Handle i*86-*-gnu*. + +2005-08-09 Jakub Jelinek + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Use + DW_CFA_offset_extended_sf rather than + DW_CFA_GNU_negative_offset_extended. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise. + +2005-07-22 SUGIOKA Toshinobu + + * src/sh/sysv.S (ffi_call_SYSV): Stop argument popping correctly + on sh3. + (ffi_closure_SYSV): Change the stack layout for sh3 struct argument. + * src/sh/ffi.c (ffi_prep_args): Fix sh3 argument copy, when it is + partially on register. + (ffi_closure_helper_SYSV): Likewise. + (ffi_prep_cif_machdep): Don't set too many cif->flags. + +2005-07-20 Kaz Kojima + + * src/sh/ffi.c (ffi_call): Handle small structures correctly. + Remove empty line. + * src/sh64/ffi.c (simple_type): Remove. + (return_type): Handle small structures correctly. + (ffi_prep_args): Likewise. + (ffi_call): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S (ffi_call_SYSV): Handle 1, 2 and 4-byte return. + Emit position independent code if PIC and remove wrong datalabel + prefixes from EH data. + +2005-07-19 Andreas Tobler + + * Makefile.am (nodist_libffi_la_SOURCES): Add POWERPC_FREEBSD. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add POWERPC_FREEBSD rules. + * configure: Regenerate. + * src/powerpc/ffitarget.h: Add POWERPC_FREEBSD rules. + (FFI_SYSV_TYPE_SMALL_STRUCT): Define. + * src/powerpc/ffi.c: Add flags to handle small structure returns + in ffi_call_SYSV. + (ffi_prep_cif_machdep): Handle small structures for SYSV 4 ABI. + Aka FFI_SYSV. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Add return types for small structures. + * src/powerpc/sysv.S: Add bits to handle small structures for + final SYSV 4 ABI. + +2005-07-10 Andreas Tobler + + * testsuite/libffi.call/cls_5_1_byte.c: New test file. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + +2005-07-05 Randolph Chung + + * src/pa/ffi.c (ffi_struct_type): Rename FFI_TYPE_SMALL_STRUCT1 + as FFI_TYPE_SMALL_STRUCT3. Break out handling for 5-7 byte + structures. Kill compilation warnings. + (ffi_closure_inner_LINUX): Print return values as hex in debug + message. Rename FFI_TYPE_SMALL_STRUCT1 as FFI_TYPE_SMALL_STRUCT3. + Properly handle 5-7 byte structure returns. + * src/pa/ffitarget.h (FFI_TYPE_SMALL_STRUCT1) + (FFI_TYPE_SMALL_STRUCT2): Remove. + (FFI_TYPE_SMALL_STRUCT3, FFI_TYPE_SMALL_STRUCT5) + (FFI_TYPE_SMALL_STRUCT6, FFI_TYPE_SMALL_STRUCT7): Define. + * src/pa/linux.S: Mark source file as using PA1.1 assembly. + (checksmst1, checksmst2): Remove. + (checksmst3): Optimize handling of 3-byte struct returns. + (checksmst567): Properly handle 5-7 byte struct returns. + +2005-06-15 Rainer Orth + + PR libgcj/21943 + * src/mips/n32.S: Enforce PIC code. + * src/mips/o32.S: Likewise. + +2005-06-15 Rainer Orth + + * configure.ac: Treat i*86-*-solaris2.10 and up as X86_64. + * configure: Regenerate. + +2005-06-01 Alan Modra + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't use JUMPTARGET + to call ffi_closure_helper_SYSV. Append @local instead. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise for ffi_prep_args_SYSV. + +2005-05-17 Kelley Cook + + * configure.ac: Use AC_C_BIGENDIAN instead of AC_C_BIGENDIAN_CROSS. + Use AC_CHECK_SIZEOF instead of AC_COMPILE_CHECK_SIZEOF. + * Makefile.am (ACLOCAL_AMFLAGS): Remove -I ../config. + * aclocal.m4, configure, fficonfig.h.in, Makefile.in, + include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2005-05-09 Mike Stump + + * configure: Regenerate. + +2005-05-08 Richard Henderson + + PR libffi/21285 + * src/alpha/osf.S: Update unwind into to match code. + +2005-05-04 Andreas Degert + Richard Henderson + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Save sse-used flag in + bit 11 of flags. + (ffi_call): Mask return type field. Pass ssecount to ffi_call_unix64. + (ffi_prep_closure): Set carry bit if sse-used flag set. + * src/x86/unix64.S (ffi_call_unix64): Add ssecount argument. + Only load sse registers if ssecount non-zero. + (ffi_closure_unix64): Only save sse registers if carry set on entry. + +2005-04-29 Ralf Corsepius + + * configure.ac: Add i*86-*-rtems*, sparc*-*-rtems*, + powerpc-*rtems*, arm*-*-rtems*, sh-*-rtems*. + * configure: Regenerate. + +2005-04-20 Hans-Peter Nilsson + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): In regsub use, + have Tcl8.3-compatible intermediate variable. + +2005-04-18 Simon Posnjak + Hans-Peter Nilsson + + * Makefile.am: Add CRIS support. + * configure.ac: Likewise. + * Makefile.in, configure, testsuite/Makefile.in, + include/Makefile.in: Regenerate. + * src/cris: New directory. + * src/cris/ffi.c, src/cris/sysv.S, src/cris/ffitarget.h: New files. + * src/prep_cif.c (ffi_prep_cif): Wrap in #ifndef __CRIS__. + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): Replace \n with + \r?\n in output tests. + +2005-04-12 Mike Stump + + * configure: Regenerate. + +2005-03-30 Hans Boehm + + * src/ia64/ffitarget.h (ffi_arg): Use long long instead of DI. + +2005-03-30 Steve Ellcey + + * src/ia64/ffitarget.h (ffi_arg) ADD DI attribute. + (ffi_sarg) Ditto. + * src/ia64/unix.S (ffi_closure_unix): Extend gp + to 64 bits in ILP32 mode. + Load 64 bits even for short data. + +2005-03-23 Mike Stump + + * src/powerpc/darwin.S: Update for -m64 multilib. + * src/powerpc/darwin_closure.S: Likewise. + +2005-03-21 Zack Weinberg + + * configure.ac: Do not invoke TL_AC_GCC_VERSION. + Do not set tool_include_dir. + * aclocal.m4, configure, Makefile.in, testsuite/Makefile.in: + Regenerate. + * include/Makefile.am: Set gcc_version and toollibffidir. + * include/Makefile.in: Regenerate. + +2005-02-22 Andrew Haley + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Bump alignment to + odd-numbered register pairs for 64-bit integer types. + +2005-02-23 Andreas Tobler + + PR libffi/20104 + * testsuite/libffi.call/return_ll1.c: New test case. + +2005-02-11 Janis Johnson + + * testsuite/libffi.call/cls_align_longdouble.c: Remove dg-options. + * testsuite/libffi.call/float.c: Ditto. + * testsuite/libffi.call/float2.c: Ditto. + * testsuite/libffi.call/float3.c: Ditto. + +2005-02-08 Andreas Tobler + + * src/frv/ffitarget.h: Remove PPC stuff which does not belong to frv. + +2005-01-12 Eric Botcazou + + * testsuite/libffi.special/special.exp (cxx_options): Add + -shared-libgcc. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_AGGREGATE_TYPEDEF): Remove. + (FFI_TYPEDEF): Rename from FFI_INTEGRAL_TYPEDEF. Replace size and + offset parameters with a type parameter; deduce size and structure + alignment. Update all users. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_TYPE_POINTER): Define with sizeof. + (FFI_TYPE_LONGDOUBLE): Fix for ia64. + * src/ia64/ffitarget.h (struct ffi_ia64_trampoline_struct): Move + into ffi_prep_closure. + * src/ia64/ia64_flags.h, src/ia64/ffi.c, src/ia64/unix.S: Rewrite + from scratch. + +2004-12-27 Richard Henderson + + * src/x86/unix64.S: Fix typo in unwind info. + +2004-12-25 Richard Henderson + + * src/x86/ffi64.c (struct register_args): Rename from stackLayout. + (enum x86_64_reg_class): Add X86_64_COMPLEX_X87_CLASS. + (merge_classes): Check for it. + (SSE_CLASS_P): New. + (classify_argument): Pass byte_offset by value; perform all updates + inside struct case. + (examine_argument): Add classes argument; handle + X86_64_COMPLEX_X87_CLASS. + (ffi_prep_args): Merge into ... + (ffi_call): ... here. Share stack frame with ffi_call_unix64. + (ffi_prep_cif_machdep): Setup cif->flags for proper structure return. + (ffi_fill_return_value): Remove. + (ffi_prep_closure): Remove dead assert. + (ffi_closure_unix64_inner): Rename from ffi_closure_UNIX64_inner. + Rewrite to use struct register_args instead of va_list. Create + flags for handling structure returns. + * src/x86/unix64.S: Remove dead strings. + (ffi_call_unix64): Rename from ffi_call_UNIX64. Rewrite to share + stack frame with ffi_call. Handle structure returns properly. + (float2sse, floatfloat2sse, double2sse): Remove. + (sse2float, sse2double, sse2floatfloat): Remove. + (ffi_closure_unix64): Rename from ffi_closure_UNIX64. Rewrite + to handle structure returns properly. + +2004-12-08 David Edelsohn + + * Makefile.am (AM_MAKEFLAGS): Remove duplicate LIBCFLAGS and + PICFLAG. + * Makefile.in: Regenerated. + +2004-12-02 Richard Sandiford + + * configure.ac: Use TL_AC_GCC_VERSION to set gcc_version. + * configure, aclocal.m4, Makefile.in: Regenerate. + * include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2004-11-29 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-25 Kelley Cook + + * configure: Regenerate for libtool reversion. + +2004-11-24 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-23 John David Anglin + + * testsuite/lib/libffi-dg.exp: Use new procs in target-libpath.exp. + +2004-11-23 Richard Sandiford + + * src/mips/o32.S (ffi_call_O32, ffi_closure_O32): Use jalr instead + of jal. Use an absolute encoding for the frame information. + +2004-11-23 Kelley Cook + + * Makefile.am: Remove no-dependencies. Add ACLOCAL_AMFLAGS. + * acinclude.m4: Delete logic for sincludes. + * aclocal.m4, Makefile.in, configure: Regenerate. + * include/Makefile: Likewise. + * testsuite/Makefile: Likewise. + +2004-11-22 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_closure): Align doubles and 64-bit integers + on a 8-byte boundary. + * src/sparc/v8.S (ffi_closure_v8): Reserve frame space for arguments. + +2004-10-27 Richard Earnshaw + + * src/arm/ffi.c (ffi_prep_cif_machdep): Handle functions that return + long long values. Round stack allocation to a multiple of 8 bytes + for ATPCS compatibility. + * src/arm/sysv.S (ffi_call_SYSV): Rework to avoid use of APCS register + names. Handle returning long long types. Add Thumb and interworking + support. Improve soft-float code. + +2004-10-27 Richard Earnshaw + + * testsuite/lib/libffi-db.exp (load_gcc_lib): New function. + (libffi_exit): New function. + (libffi_init): Build the testglue wrapper if needed. + +2004-10-25 Eric Botcazou + + PR other/18138 + * testsuite/lib/libffi-dg.exp: Accept more than one multilib libgcc. + +2004-10-25 Kazuhiro Inaoka + + * src/m32r/libffitarget.h (FFI_CLOSURES): Set to 0. + +2004-10-20 Kaz Kojima + + * src/sh/sysv.S (ffi_call_SYSV): Don't align for double data. + * testsuite/libffi.call/float3.c: New test case. + +2004-10-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure): Set T bit in trampoline for + the function returning a structure pointed with R2. + * src/sh/sysv.S (ffi_closure_SYSV): Use R2 as the pointer to + the structure return value if T bit set. Emit position + independent code and EH data if PIC. + +2004-10-13 Kazuhiro Inaoka + + * Makefile.am: Add m32r support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * confiugre: Regenerate. + * src/types.c: Add m32r port to FFI_INTERNAL_TYPEDEF + (uint64, sint64, double, longdouble) + * src/m32r: New directory. + * src/m32r/ffi.c: New file. + * src/m32r/sysv.S: Likewise. + * src/m32r/ffitarget.h: Likewise. + +2004-10-02 Kaz Kojima + + * testsuite/libffi.call/negint.c: New test case. + +2004-09-14 H.J. Lu + + PR libgcj/17465 + * testsuite/lib/libffi-dg.exp: Don't use global ld_library_path. + Set up LD_LIBRARY_PATH, SHLIB_PATH, LD_LIBRARYN32_PATH, + LD_LIBRARY64_PATH, LD_LIBRARY_PATH_32, LD_LIBRARY_PATH_64 and + DYLD_LIBRARY_PATH. + +2004-09-05 Andreas Tobler + + * testsuite/libffi.call/many_win32.c: Remove whitespaces. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Remove unused var. Cleanup + whitespaces. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + +2004-09-05 Andreas Tobler + + * src/powerpc/darwin.S: Fix comments and identation. + * src/powerpc/darwin_closure.S: Likewise. + +2004-09-02 Andreas Tobler + + * src/powerpc/ffi_darwin.c: Add flag for longdouble return values. + (ffi_prep_args): Handle longdouble arguments. + (ffi_prep_cif_machdep): Set flags for longdouble. Calculate space for + longdouble. + (ffi_closure_helper_DARWIN): Add closure handling for longdouble. + * src/powerpc/darwin.S (_ffi_call_DARWIN): Add handling of longdouble + values. + * src/powerpc/darwin_closure.S (_ffi_closure_ASM): Likewise. + * src/types.c: Defined longdouble size and alignment for darwin. + +2004-09-02 Andreas Tobler + + * src/powerpc/aix.S: Remove whitespaces. + * src/powerpc/aix_closure.S: Likewise. + * src/powerpc/asm.h: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffitarget.h: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + +2004-08-30 Anthony Green + + * Makefile.am: Add frv support. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + * configure.ac: Read configure.host. + * configure.in: Read configure.host. + * configure.host: New file. frv-elf needs libgloss. + * include/ffi.h.in: Force ffi_closure to have a nice big (8) + alignment. This is needed to frv and shouldn't harm the others. + * include/ffi_common.h (ALIGN_DOWN): New macro. + * src/frv/ffi.c, src/frv/ffitarget.h, src/frv/eabi.S: New files. + +2004-08-24 David Daney + + * testsuite/libffi.call/closure_fn0.c: Xfail mips64* instead of mips*. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_sshort.c: Likewise. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise and set return value + to zero. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + +2004-08-23 David Daney + + PR libgcj/13141 + * src/mips/ffitarget.h (FFI_O32_SOFT_FLOAT): New ABI. + * src/mips/ffi.c (ffi_prep_args): Fix alignment calculation. + (ffi_prep_cif_machdep): Handle FFI_O32_SOFT_FLOAT floating point + parameters and return types. + (ffi_call): Handle FFI_O32_SOFT_FLOAT ABI. + (ffi_prep_closure): Ditto. + (ffi_closure_mips_inner_O32): Handle FFI_O32_SOFT_FLOAT ABI, fix + alignment calculations. + * src/mips/o32.S (ffi_closure_O32): Don't use floating point + instructions if FFI_O32_SOFT_FLOAT, make stack frame ABI compliant. + +2004-08-14 Casey Marshall + + * src/mips/ffi.c (ffi_pref_cif_machdep): set `cif->flags' to + contain `FFI_TYPE_UINT64' as return type for any 64-bit + integer (O32 ABI only). + (ffi_prep_closure): new function. + (ffi_closure_mips_inner_O32): new function. + * src/mips/ffitarget.h: Define `FFI_CLOSURES' and + `FFI_TRAMPOLINE_SIZE' appropriately if the ABI is o32. + * src/mips/o32.S (ffi_call_O32): add labels for .eh_frame. Return + 64 bit integers correctly. + (ffi_closure_O32): new function. + Added DWARF-2 unwind info for both functions. + +2004-08-10 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args ): 8-align all stack arguments. + +2004-08-01 Robert Millan + + * configure.ac: Detect knetbsd-gnu and kfreebsd-gnu. + * configure: Regenerate. + +2004-07-30 Maciej W. Rozycki + + * acinclude.m4 (AC_FUNC_MMAP_BLACKLIST): Check for + and mmap() explicitly instead of relying on preset autoconf cache + variables. + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2004-07-11 Ulrich Weigand + + * src/s390/ffi.c (ffi_prep_args): Fix C aliasing violation. + (ffi_check_float_struct): Remove unused prototype. + +2004-06-30 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (flush_icache): ';' is a comment + character on Darwin, use '\n\t' instead. + +2004-06-26 Matthias Klose + + * libtool-version: Fix typo in revision/age. + +2004-06-17 Matthias Klose + + * libtool-version: New. + * Makefile.am (libffi_la_LDFLAGS): Use -version-info for soname. + * Makefile.in: Regenerate. + +2004-06-15 Paolo Bonzini + + * Makefile.am: Remove useless multilib rules. + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate with automake 1.8.5. + * configure.ac: Remove useless multilib configury. + * configure: Regenerate. + +2004-06-15 Paolo Bonzini + + * .cvsignore: New file. + +2004-06-10 Jakub Jelinek + + * src/ia64/unix.S (ffi_call_unix): Insert group barrier break + fp_done. + (ffi_closure_UNIX): Fix f14/f15 adjustment if FLOAT_SZ is ever + changed from 8. + +2004-06-06 Sean McNeil + + * configure.ac: Add x86_64-*-freebsd* support. + * configure: Regenerate. + +2004-04-26 Joe Buck + + Bug 15093 + * configure.ac: Test for existence of mmap and sys/mman.h before + checking blacklist. Fix suggested by Jim Wilson. + * configure: Regenerate. + +2004-04-26 Matt Austern + + * src/powerpc/darwin.S: Go through a non-lazy pointer for initial + FDE location. + * src/powerpc/darwin_closure.S: Likewise. + +2004-04-24 Andreas Tobler + + * testsuite/libffi.call/cls_multi_schar.c (main): Fix initialization + error. Reported by Thomas Heller . + * testsuite/libffi.call/cls_multi_sshort.c (main): Likewise. + * testsuite/libffi.call/cls_multi_ushort.c (main): Likewise. + +2004-03-20 Matthias Klose + + * src/pa/linux.S: Fix typo. + +2004-03-19 Matthias Klose + + * Makefile.am: Update. + * Makefile.in: Regenerate. + * src/pa/ffi.h.in: Remove. + * src/pa/ffitarget.h: New file. + +2004-02-10 Randolph Chung + + * Makefile.am: Add PA support. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * configure.ac: Add PA target. + * configure: Regenerate. + * src/pa/ffi.c: New file. + * src/pa/ffi.h.in: Add PA support. + * src/pa/linux.S: New file. + * prep_cif.c: Add PA support. + +2004-03-16 Hosaka Yuji + + * src/types.c: Fix alignment size of X86_WIN32 case int64 and + double. + * src/x86/ffi.c (ffi_prep_args): Replace ecif->cif->rtype->type + with ecif->cif->flags. + (ffi_call, ffi_prep_incoming_args_SYSV): Replace cif->rtype->type + with cif->flags. + (ffi_prep_cif_machdep): Add X86_WIN32 struct case. + (ffi_closure_SYSV): Add 1 or 2-bytes struct case for X86_WIN32. + * src/x86/win32.S (retstruct1b, retstruct2b, sc_retstruct1b, + sc_retstruct2b): Add for 1 or 2-bytes struct case. + +2004-03-15 Kelley Cook + + * configure.in: Rename file to ... + * configure.ac: ... this. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2004-03-12 Matt Austern + + * src/powerpc/darwin.S: Fix EH information so it corresponds to + changes in EH format resulting from addition of linkonce support. + * src/powerpc/darwin_closure.S: Likewise. + +2004-03-11 Andreas Tobler + Paolo Bonzini + + * Makefile.am (AUTOMAKE_OPTIONS): Set them. + Remove VPATH. Remove rules for object files. Remove multilib support. + (AM_CCASFLAGS): Add. + * configure.in (AC_CONFIG_HEADERS): Relace AM_CONFIG_HEADER. + (AC_PREREQ): Bump version to 2.59. + (AC_INIT): Fill with version info and bug address. + (ORIGINAL_LD_FOR_MULTILIBS): Remove. + (AM_ENABLE_MULTILIB): Use this instead of AC_ARG_ENABLE. + De-precious CC so that the right flags are passed down to multilibs. + (AC_MSG_ERROR): Replace obsolete macro AC_ERROR. + (AC_CONFIG_FILES): Replace obsolete macro AC_LINK_FILES. + (AC_OUTPUT): Reorganize the output with AC_CONFIG_COMMANDS. + * configure: Rebuilt. + * aclocal.m4: Likewise. + * Makefile.in, include/Makefile.in, testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2004-03-11 Andreas Schwab + + * src/ia64/ffi.c (ffi_prep_incoming_args_UNIX): Get floating point + arguments from fp registers only for the first 8 parameter slots. + Don't convert a float parameter when passed in memory. + +2004-03-09 Hans-Peter Nilsson + + * configure: Regenerate for config/accross.m4 correction. + +2004-02-25 Matt Kraai + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Change + ecif->cif->bytes to bytes. + (ffi_prep_cif_machdep): Add braces around nested if statement. + +2004-02-09 Alan Modra + + * src/types.c (pointer): POWERPC64 has 8 byte pointers. + + * src/powerpc/ffi.c (ffi_prep_args64): Correct long double handling. + (ffi_closure_helper_LINUX64): Fix typo. + * testsuite/libffi.call/cls_align_longdouble.c: Pass -mlong-double-128 + for powerpc64-*-*. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + +2004-02-08 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep ): Correct + long double function return and long double arg handling. + (ffi_closure_helper_LINUX64): Formatting. Delete unused "ng" var. + Use "end_pfr" instead of "nf". Correct long double handling. + Localise "temp". + * src/powerpc/linux64.S (ffi_call_LINUX64): Save f2 long double + return value. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Allocate + space for long double return value. Adjust stack frame and offsets. + Load f2 long double return. + +2004-02-07 Alan Modra + + * src/types.c: Use 16 byte long double for POWERPC64. + +2004-01-25 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_args_v9): Shift the parameter array + when the structure return address is passed in %o0. + (ffi_V9_return_struct): Rename into ffi_v9_layout_struct. + (ffi_v9_layout_struct): Align the field following a nested structure + on a word boundary. Use memmove instead of memcpy. + (ffi_call): Update call to ffi_V9_return_struct. + (ffi_prep_closure): Define 'ctx' only for V8. + (ffi_closure_sparc_inner): Clone into ffi_closure_sparc_inner_v8 + and ffi_closure_sparc_inner_v9. + (ffi_closure_sparc_inner_v8): Return long doubles by reference. + Always skip the structure return address. For structures and long + doubles, copy the argument directly. + (ffi_closure_sparc_inner_v9): Skip the structure return address only + if required. Shift the maximum floating-point slot accordingly. For + big structures, copy the argument directly; otherwise, left-justify the + argument and call ffi_v9_layout_struct to lay out the structure on + the stack. + * src/sparc/v8.S: Undef STACKFRAME before defining it. + (ffi_closure_v8): Pass the structure return address. Update call to + ffi_closure_sparc_inner_v8. Short-circuit FFI_TYPE_INT handling. + Skip the 'unimp' insn when returning long doubles and structures. + * src/sparc/v9.S: Undef STACKFRAME before defining it. + (ffi_closure_v9): Increase the frame size by 2 words. Short-circuit + FFI_TYPE_INT handling. Load structures both in integers and + floating-point registers on return. + * README: Update status of the SPARC port. + +2004-01-24 Andreas Tobler + + * testsuite/libffi.call/pyobjc-tc.c (main): Treat result value + as of type ffi_arg. + * testsuite/libffi.call/struct3.c (main): Fix CHECK. + +2004-01-22 Ulrich Weigand + + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Treat result + value as of type ffi_arg, not unsigned int. + +2004-01-21 Michael Ritzert + + * ffi64.c (ffi_prep_args): Cast the RHS of an assignment instead + of the LHS. + +2004-01-12 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_32 for + Solaris. + +2004-01-08 Rainer Orth + + * testsuite/libffi.call/ffitest.h (allocate_mmap): Cast MAP_FAILED + to void *. + +2003-12-10 Richard Henderson + + * testsuite/libffi.call/cls_align_pointer.c: Cast pointers to + size_t instead of int. + +2003-12-04 Hosaka Yuji + + * testsuite/libffi.call/many_win32.c: Include . + * testsuite/libffi.call/many_win32.c (main): Replace variable + int i with unsigned long ul. + + * testsuite/libffi.call/cls_align_uint64.c: New test case. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + +2003-12-02 Hosaka Yuji + + PR other/13221 + * src/x86/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): + Align arguments to 32 bits. + +2003-12-01 Andreas Tobler + + PR other/13221 + * testsuite/libffi.call/cls_multi_sshort.c: New test case. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Cosmetics. + +2003-11-26 Kaveh R. Ghazi + + * testsuite/libffi.call/ffitest.h: Include . + * testsuite/libffi.special/ffitestcxx.h: Likewise. + +2003-11-22 Andreas Tobler + + * Makefile.in: Rebuilt. + * configure: Likewise. + * testsuite/libffi.special/unwindtest.cc: Convert the mmap to + the right type. + +2003-11-21 Andreas Jaeger + Andreas Tobler + + * acinclude.m4: Add AC_FUNC_MMAP_BLACKLIST. + * configure.in: Call AC_FUNC_MMAP_BLACKLIST. + * Makefile.in: Rebuilt. + * aclocal.m4: Likewise. + * configure: Likewise. + * fficonfig.h.in: Likewise. + * testsuite/lib/libffi-dg.exp: Add include dir. + * testsuite/libffi.call/ffitest.h: Add MMAP definitions. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Use MMAP functionality + for ffi_closure if available. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + +2003-11-20 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Make the -lgcc_s conditional. + +2003-11-19 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Add DYLD_LIBRARY_PATH for darwin. + Add -lgcc_s to additional flags. + +2003-11-12 Andreas Tobler + + * configure.in, include/Makefile.am: PR libgcj/11147, install + the ffitarget.h header file in a gcc versioned and target + dependent place. + * configure: Regenerated. + * Makefile.in, include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2003-11-09 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Print result and check + with dg-output to make debugging easier. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Make ffi_closure + static. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_9byte2.c: New test case. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_double.c: Do a check on the result. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/return_sc.c: Cleanup whitespaces. + +2003-11-06 Andreas Tobler + + * src/prep_cif.c (ffi_prep_cif): Move the validity check after + the initialization. + +2003-10-23 Andreas Tobler + + * src/java_raw_api.c (ffi_java_ptrarray_to_raw): Replace + FFI_ASSERT(FALSE) with FFI_ASSERT(0). + +2003-10-22 David Daney + + * src/mips/ffitarget.h: Replace undefined UINT32 and friends with + __attribute__((__mode__(__SI__))) and friends. + +2003-10-22 Andreas Schwab + + * src/ia64/ffi.c: Replace FALSE/TRUE with false/true. + +2003-10-21 Andreas Tobler + + * configure.in: AC_LINK_FILES(ffitarget.h). + * configure: Regenerate. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2003-10-21 Paolo Bonzini + Richard Henderson + + Avoid that ffi.h includes fficonfig.h. + + * Makefile.am (EXTRA_DIST): Include ffitarget.h files + (TARGET_SRC_MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (TARGET_SRC_MIPS_SGI): Removed. + (MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (MIPS_SGI): Removed. + (CLEANFILES): Removed. + (mostlyclean-am, clean-am, mostlyclean-sub, clean-sub): New + targets. + * acconfig.h: Removed. + * configure.in: Compute sizeofs only for double and long double. + Use them to define and subst HAVE_LONG_DOUBLE. Include comments + into AC_DEFINE instead of using acconfig.h. Create + include/ffitarget.h instead of include/fficonfig.h. Rename + MIPS_GCC to MIPS_IRIX, drop MIPS_SGI since we are in gcc's tree. + AC_DEFINE EH_FRAME_FLAGS. + * include/Makefile.am (DISTCLEANFILES): New automake macro. + (hack_DATA): Add ffitarget.h. + * include/ffi.h.in: Remove all system specific definitions. + Declare raw API even if it is not installed, why bother? + Use limits.h instead of SIZEOF_* to define ffi_type_*. Do + not define EH_FRAME_FLAGS, it is in fficonfig.h now. Include + ffitarget.h instead of fficonfig.h. Remove ALIGN macro. + (UINT_ARG, INT_ARG): Removed, use ffi_arg and ffi_sarg instead. + * include/ffi_common.h (bool): Do not define. + (ffi_assert): Accept failed assertion. + (ffi_type_test): Return void and accept file/line. + (FFI_ASSERT): Pass stringized failed assertion. + (FFI_ASSERT_AT): New macro. + (FFI_ASSERT_VALID_TYPE): New macro. + (UINT8, SINT8, UINT16, SINT16, UINT32, SINT32, + UINT64, SINT64): Define here with gcc's __attribute__ macro + instead of in ffi.h + (FLOAT32, ALIGN): Define here instead of in ffi.h + * include/ffi-mips.h: Removed. Its content moved to + src/mips/ffitarget.h after separating assembly and C sections. + * src/alpha/ffi.c, src/alpha/ffi.c, src/java_raw_api.c + src/prep_cif.c, src/raw_api.c, src/ia64/ffi.c, + src/mips/ffi.c, src/mips/n32.S, src/mips/o32.S, + src/mips/ffitarget.h, src/sparc/ffi.c, src/x86/ffi64.c: + SIZEOF_ARG -> FFI_SIZEOF_ARG. + * src/ia64/ffi.c: Include stdbool.h (provided by GCC 2.95+). + * src/debug.c (ffi_assert): Accept stringized failed assertion. + (ffi_type_test): Rewritten. + * src/prep-cif.c (initialize_aggregate, ffi_prep_cif): Call + FFI_ASSERT_VALID_TYPE. + * src/alpha/ffitarget.h, src/arm/ffitarget.h, + src/ia64/ffitarget.h, src/m68k/ffitarget.h, + src/mips/ffitarget.h, src/powerpc/ffitarget.h, + src/s390/ffitarget.h, src/sh/ffitarget.h, + src/sh64/ffitarget.h, src/sparc/ffitarget.h, + src/x86/ffitarget.h: New files. + * src/alpha/osf.S, src/arm/sysv.S, src/ia64/unix.S, + src/m68k/sysv.S, src/mips/n32.S, src/mips/o32.S, + src/powerpc/aix.S, src/powerpc/darwin.S, + src/powerpc/ffi_darwin.c, src/powerpc/linux64.S, + src/powerpc/linux64_closure.S, src/powerpc/ppc_closure.S, + src/powerpc/sysv.S, src/s390/sysv.S, src/sh/sysv.S, + src/sh64/sysv.S, src/sparc/v8.S, src/sparc/v9.S, + src/x86/sysv.S, src/x86/unix64.S, src/x86/win32.S: + include fficonfig.h + +2003-10-20 Rainer Orth + + * src/mips/ffi.c: Use _ABIN32, _ABIO32 instead of external + _MIPS_SIM_NABI32, _MIPS_SIM_ABI32. + +2003-10-19 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Declare bytes again. + Used when FFI_DEBUG = 1. + +2003-10-14 Alan Modra + + * src/types.c (double, longdouble): Default POWERPC64 to 8 byte size + and align. + +2003-10-06 Rainer Orth + + * include/ffi_mips.h: Define FFI_MIPS_N32 for N32/N64 ABIs, + FFI_MIPS_O32 for O32 ABI. + +2003-10-01 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_64 for + SPARC64. Cleanup whitespaces. + +2003-09-19 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Xfail mips, arm, + strongarm, xscale. Cleanup whitespaces. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Cleanup whitespaces. + +2003-09-18 David Edelsohn + + * src/powerpc/aix.S: Cleanup whitespaces. + * src/powerpc/aix_closure.S: Likewise. + +2003-09-18 Andreas Tobler + + * src/powerpc/darwin.S: Cleanup whitespaces, comment formatting. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + +2003-09-18 Andreas Tobler + David Edelsohn + + * src/types.c (double): Add AIX and Darwin to the right TYPEDEF. + * src/powerpc/aix_closure.S: Remove the pointer to the outgoing + parameter stack. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Handle structures + according to the Darwin/AIX ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_DARWIN): Likewise. + Remove the outgoing parameter stack logic. Simplify the evaluation + of the different CASE types. + (ffi_prep_clousure): Avoid the casts on lvalues. Change the branch + statement in the trampoline code. + +2003-09-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_args): Take account into the alignement + for the register size. + (ffi_closure_helper_SYSV): Handle the structure return value + address correctly. + (ffi_closure_helper_SYSV): Return the appropriate type when + the registers are used for the structure return value. + * src/sh/sysv.S (ffi_closure_SYSV): Fix the stack layout for + the 64-bit return value. Update copyright years. + +2003-09-17 Rainer Orth + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Search in + srcdir for ffi_mips.h. + +2003-09-12 Alan Modra + + * src/prep_cif.c (initialize_aggregate): Include tail padding in + structure size. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Correct + placement of float result. + * testsuite/libffi.special/unwindtest.cc (closure_test_fn1): Correct + cast of "resp" for big-endian 64 bit machines. + +2003-09-11 Alan Modra + + * src/types.c (double, longdouble): Merge identical SH and ARM + typedefs, and add POWERPC64. + * src/powerpc/ffi.c (ffi_prep_args64): Correct next_arg calc for + struct split over gpr and rest. + (ffi_prep_cif_machdep): Correct intarg_count for structures. + * src/powerpc/linux64.S (ffi_call_LINUX64): Fix gpr offsets. + +2003-09-09 Andreas Tobler + + * src/powerpc/ffi.c (ffi_closure_helper_SYSV) Handle struct + passing correctly. + +2003-09-09 Alan Modra + + * configure: Regenerate. + +2003-09-04 Andreas Tobler + + * Makefile.am: Remove build rules for ffitest. + * Makefile.in: Rebuilt. + +2003-09-04 Andreas Tobler + + * src/java_raw_api.c: Include to fix compiler warning + about implicit declaration of abort(). + +2003-09-04 Andreas Tobler + + * Makefile.am: Add dejagnu test framework. Fixes PR other/11411. + * Makefile.in: Rebuilt. + * configure.in: Add dejagnu test framework. + * configure: Rebuilt. + + * testsuite/Makefile.am: New file. + * testsuite/Makefile.in: Built + * testsuite/lib/libffi-dg.exp: New file. + * testsuite/config/default.exp: Likewise. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Likewise. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float1.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/many.c: Likewise. + * testsuite/libffi.call/many_win32.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Likewise. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + * testsuite/libffi.call/strlen.c: Likewise. + * testsuite/libffi.call/strlen_win32.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.special/special.exp: New file. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + + +2003-08-13 Kaz Kojima + + * src/sh/ffi.c (OFS_INT16): Set 0 for little endian case. Update + copyright years. + +2003-08-02 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Modify for changed gcc + structure passing. + (ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64.S: Remove code writing to parm save area. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Use return + address in lr from ffi_closure_helper_LINUX64 call to calculate + table address. Optimize function tail. + +2003-07-28 Andreas Tobler + + * src/sparc/ffi.c: Handle all floating point registers. + * src/sparc/v9.S: Likewise. Fixes second part of PR target/11410. + +2003-07-11 Gerald Pfeifer + + * README: Note that libffi is not part of GCC. Update the project + URL and status. + +2003-06-19 Franz Sirl + + * src/powerpc/ppc_closure.S: Include ffi.h. + +2003-06-13 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .uleb128/.sleb128 directives. + Use C style comments. + +2003-06-13 Kaz Kojima + + * Makefile.am: Add SHmedia support. Fix a typo of SH support. + * Makefile.in: Regenerate. + * configure.in (sh64-*-linux*, sh5*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SHmedia support. + * src/sh64/ffi.c: New file. + * src/sh64/sysv.S: New file. + +2003-05-16 Jakub Jelinek + + * configure.in (HAVE_RO_EH_FRAME): Check whether .eh_frame section + should be read-only. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * include/ffi.h.in (EH_FRAME_FLAGS): Define. + * src/alpha/osf.S: Use EH_FRAME_FLAGS. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. Include ffi.h. + * src/powerpc/sysv.S: Use EH_FRAME_FLAGS. Use pcrel encoding + if -fpic/-fPIC/-mrelocatable. + * src/powerpc/powerpc_closure.S: Likewise. + * src/sparc/v8.S: If HAVE_RO_EH_FRAME is defined, don't include + #write in .eh_frame flags. + * src/sparc/v9.S: Likewise. + * src/x86/unix64.S: Use EH_FRAME_FLAGS. + * src/x86/sysv.S: Likewise. Use pcrel encoding if -fpic/-fPIC. + * src/s390/sysv.S: Use EH_FRAME_FLAGS. Include ffi.h. + +2003-05-07 Jeff Sturm + + Fixes PR bootstrap/10656 + * configure.in (HAVE_AS_REGISTER_PSEUDO_OP): Test assembler + support for .register pseudo-op. + * src/sparc/v8.S: Use it. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2003-04-18 Jakub Jelinek + + * include/ffi.h.in (POWERPC64): Define if 64-bit. + (enum ffi_abi): Add FFI_LINUX64 on POWERPC. + Make it the default on POWERPC64. + (FFI_TRAMPOLINE_SIZE): Define to 24 on POWERPC64. + * configure.in: Change powerpc-*-linux* into powerpc*-*-linux*. + * configure: Rebuilt. + * src/powerpc/ffi.c (hidden): Define. + (ffi_prep_args_SYSV): Renamed from + ffi_prep_args. Cast pointers to unsigned long to shut up warnings. + (NUM_GPR_ARG_REGISTERS64, NUM_FPR_ARG_REGISTERS64, + ASM_NEEDS_REGISTERS64): New. + (ffi_prep_args64): New function. + (ffi_prep_cif_machdep): Handle FFI_LINUX64 ABI. + (ffi_call): Likewise. + (ffi_prep_closure): Likewise. + (flush_icache): Surround by #ifndef POWERPC64. + (ffi_dblfl): New union type. + (ffi_closure_helper_SYSV): Use it to avoid aliasing problems. + (ffi_closure_helper_LINUX64): New function. + * src/powerpc/ppc_closure.S: Surround whole file by #ifndef + __powerpc64__. + * src/powerpc/sysv.S: Likewise. + (ffi_call_SYSV): Rename ffi_prep_args to ffi_prep_args_SYSV. + * src/powerpc/linux64.S: New file. + * src/powerpc/linux64_closure.S: New file. + * Makefile.am (EXTRA_DIST): Add src/powerpc/linux64.S and + src/powerpc/linux64_closure.S. + (TARGET_SRC_POWERPC): Likewise. + + * src/ffitest.c (closure_test_fn, closure_test_fn1, closure_test_fn2, + closure_test_fn3): Fix result printing on big-endian 64-bit + machines. + (main): Print tst2_arg instead of uninitialized tst2_result. + + * src/ffitest.c (main): Hide what closure pointer really points to + from the compiler. + +2003-04-16 Richard Earnshaw + + * configure.in (arm-*-netbsdelf*): Add configuration. + (configure): Regenerated. + +2003-04-04 Loren J. Rittle + + * include/Makefile.in: Regenerate. + +2003-03-21 Zdenek Dvorak + + * libffi/include/ffi.h.in: Define X86 instead of X86_64 in 32 + bit mode. + * libffi/src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): + Receive closure pointer through parameter, read args using + __builtin_dwarf_cfa. + (FFI_INIT_TRAMPOLINE): Send closure reference through eax. + +2003-03-12 Andreas Schwab + + * configure.in: Avoid trailing /. in toolexeclibdir. + * configure: Rebuilt. + +2003-03-03 Andreas Tobler + + * src/powerpc/darwin_closure.S: Recode to fit dynamic libraries. + +2003-02-06 Andreas Tobler + + * libffi/src/powerpc/darwin_closure.S: + Fix alignement bug, allocate 8 bytes for the result. + * libffi/src/powerpc/aix_closure.S: + Likewise. + * libffi/src/powerpc/ffi_darwin.c: + Update stackframe description for aix/darwin_closure.S. + +2003-02-06 Jakub Jelinek + + * src/s390/ffi.c (ffi_closure_helper_SYSV): Add hidden visibility + attribute. + +2003-01-31 Christian Cornelssen , + Andreas Schwab + + * configure.in: Adjust command to source config-ml.in to account + for changes to the libffi_basedir definition. + (libffi_basedir): Remove ${srcdir} from value and include trailing + slash if nonempty. + + * configure: Regenerate. + +2003-01-29 Franz Sirl + + * src/powerpc/ppc_closure.S: Recode to fit shared libs. + +2003-01-28 Andrew Haley + + * include/ffi.h.in: Enable FFI_CLOSURES for x86_64. + * src/x86/ffi64.c (ffi_prep_closure): New. + (ffi_closure_UNIX64_inner): New. + * src/x86/unix64.S (ffi_closure_UNIX64): New. + +2003-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +2003-01027 David Edelsohn + + * Makefile.am (TARGET_SRC_POWERPC_AIX): Fix typo. + * Makefile.in: Regenerate. + +2003-01-22 Andrew Haley + + * src/powerpc/darwin.S (_ffi_call_AIX): Add Augmentation size to + unwind info. + +2003-01-21 Andreas Tobler + + * src/powerpc/darwin.S: Add unwind info. + * src/powerpc/darwin_closure.S: Likewise. + +2003-01-14 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args): Check for void retval. + (ffi_prep_cif_machdep): Likewise. + * src/x86/unix64.S: Add unwind info. + +2003-01-14 Andreas Jaeger + + * src/ffitest.c (main): Only use ffi_closures if those are + supported. + +2003-01-13 Andreas Tobler + + * libffi/src/ffitest.c + add closure testcases + +2003-01-13 Kevin B. Hendricks + + * libffi/src/powerpc/ffi.c + fix alignment bug for float (4 byte aligned iso 8 byte) + +2003-01-09 Geoffrey Keating + + * src/powerpc/ffi_darwin.c: Remove RCS version string. + * src/powerpc/darwin.S: Remove RCS version string. + +2003-01-03 Jeff Sturm + + * include/ffi.h.in: Add closure defines for SPARC, SPARC64. + * src/ffitest.c (main): Use static storage for closure. + * src/sparc/ffi.c (ffi_prep_closure, ffi_closure_sparc_inner): New. + * src/sparc/v8.S (ffi_closure_v8): New. + * src/sparc/v9.S (ffi_closure_v9): New. + +2002-11-10 Ranjit Mathew + + * include/ffi.h.in: Added FFI_STDCALL ffi_type + enumeration for X86_WIN32. + * src/x86/win32.S: Added ffi_call_STDCALL function + definition. + * src/x86/ffi.c (ffi_call/ffi_raw_call): Added + switch cases for recognising FFI_STDCALL and + calling ffi_call_STDCALL if target is X86_WIN32. + * src/ffitest.c (my_stdcall_strlen/stdcall_many): + stdcall versions of the "my_strlen" and "many" + test functions (for X86_WIN32). + Added test cases to test stdcall invocation using + these functions. + +2002-12-02 Kaz Kojima + + * src/sh/sysv.S: Add DWARF2 unwind info. + +2002-11-27 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Make section read-only. + +2002-11-26 Jim Wilson + + * src/types.c (FFI_TYPE_POINTER): Has size 8 on IA64. + +2002-11-23 H.J. Lu + + * acinclude.m4: Add dummy AM_PROG_LIBTOOL. + Include ../config/accross.m4. + * aclocal.m4; Rebuild. + * configure: Likewise. + +2002-11-15 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Adapt to pcrel FDE encoding. + +2002-11-11 DJ Delorie + + * configure.in: Look for common files in the right place. + +2002-10-08 Ulrich Weigand + + * src/java_raw_api.c (ffi_java_raw_to_ptrarray): Interpret + raw data as _Jv_word values, not ffi_raw. + (ffi_java_ptrarray_to_raw): Likewise. + (ffi_java_rvalue_to_raw): New function. + (ffi_java_raw_call): Call it. + (ffi_java_raw_to_rvalue): New function. + (ffi_java_translate_args): Call it. + * src/ffitest.c (closure_test_fn): Interpret return value + as ffi_arg, not int. + * src/s390/ffi.c (ffi_prep_cif_machdep): Add missing + FFI_TYPE_POINTER case. + (ffi_closure_helper_SYSV): Likewise. Also, assume return + values extended to word size. + +2002-10-02 Andreas Jaeger + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Remove debug output. + +2002-10-01 Bo Thorsen + + * include/ffi.h.in: Fix i386 win32 compilation. + +2002-09-30 Ulrich Weigand + + * configure.in: Add s390x-*-linux-* target. + * configure: Regenerate. + * include/ffi.h.in: Define S390X for s390x targets. + (FFI_CLOSURES): Define for s390/s390x. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * src/prep_cif.c (ffi_prep_cif): Do not compute stack space for s390. + * src/types.c (FFI_TYPE_POINTER): Use 8-byte pointers on s390x. + * src/s390/ffi.c: Major rework of existing code. Add support for + s390x targets. Add closure support. + * src/s390/sysv.S: Likewise. + +2002-09-29 Richard Earnshaw + + * src/arm/sysv.S: Fix typo. + +2002-09-28 Richard Earnshaw + + * src/arm/sysv.S: If we don't have machine/asm.h and the pre-processor + has defined __USER_LABEL_PREFIX__, then use it in CNAME. + (ffi_call_SYSV): Handle soft-float. + +2002-09-27 Bo Thorsen + + * include/ffi.h.in: Fix multilib x86-64 support. + +2002-09-22 Kaveh R. Ghazi + + * Makefile.am (all-multi): Fix multilib parallel build. + +2002-07-19 Kaz Kojima + + * configure.in (sh[34]*-*-linux*): Add brackets. + * configure: Regenerate. + +2002-07-18 Kaz Kojima + + * Makefile.am: Add SH support. + * Makefile.in: Regenerate. + * configure.in (sh-*-linux*, sh[34]*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SH support. + * src/sh/ffi.c: New file. + * src/sh/sysv.S: New file. + * src/types.c: Add SH support. + +2002-07-16 Bo Thorsen + + * src/x86/ffi64.c: New file that adds x86-64 support. + * src/x86/unix64.S: New file that handles argument setup for + x86-64. + * src/x86/sysv.S: Don't use this on x86-64. + * src/x86/ffi.c: Don't use this on x86-64. + Remove unused vars. + * src/prep_cif.c (ffi_prep_cif): Don't do stack size calculation + for x86-64. + * src/ffitest.c (struct6): New test that tests a special case in + the x86-64 ABI. + (struct7): Likewise. + (struct8): Likewise. + (struct9): Likewise. + (closure_test_fn): Silence warning about this when it's not used. + (main): Add the new tests. + (main): Fix a couple of wrong casts and silence some compiler warnings. + * include/ffi.h.in: Add x86-64 ABI definition. + * fficonfig.h.in: Regenerate. + * Makefile.am: Add x86-64 support. + * configure.in: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + +2002-06-24 Bo Thorsen + + * src/types.c: Merge settings for similar architectures. + Add x86-64 sizes and alignments. + +2002-06-23 Bo Thorsen + + * src/arm/ffi.c (ffi_prep_args): Remove unused vars. + * src/sparc/ffi.c (ffi_prep_args_v8): Likewise. + * src/mips/ffi.c (ffi_prep_args): Likewise. + * src/m68k/ffi.c (ffi_prep_args): Likewise. + +2002-07-18 H.J. Lu (hjl@gnu.org) + + * Makefile.am (TARGET_SRC_MIPS_LINUX): New. + (libffi_la_SOURCES): Support MIPS_LINUX. + (libffi_convenience_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + + * configure.in (mips64*-*): Skip. + (mips*-*-linux*): New. + * configure: Regenerated. + + * src/mips/ffi.c: Include . + +2002-06-06 Ulrich Weigand + + * src/s390/sysv.S: Save/restore %r6. Add DWARF-2 unwind info. + +2002-05-27 Roger Sayle + + * src/x86/ffi.c (ffi_prep_args): Remove reference to avn. + +2002-05-27 Bo Thorsen + + * src/x86/ffi.c (ffi_prep_args): Remove unused variable and + fix formatting. + +2002-05-13 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_closure): Declare fd at + beginning of function (for older apple cc). + +2002-05-08 Alexandre Oliva + + * configure.in (ORIGINAL_LD_FOR_MULTILIBS): Preserve LD at + script entry, and set LD to it when configuring multilibs. + * configure: Rebuilt. + +2002-05-05 Jason Thorpe + + * configure.in (sparc64-*-netbsd*): Add target. + (sparc-*-netbsdelf*): Likewise. + * configure: Regenerate. + +2002-04-28 David S. Miller + + * configure.in, configure: Fix SPARC test in previous change. + +2002-04-29 Gerhard Tonn + + * Makefile.am: Add Linux for S/390 support. + * Makefile.in: Regenerate. + * configure.in: Add Linux for S/390 support. + * configure: Regenerate. + * include/ffi.h.in: Add Linux for S/390 support. + * src/s390/ffi.c: New file from libffi CVS tree. + * src/s390/sysv.S: New file from libffi CVS tree. + +2002-04-28 Jakub Jelinek + + * configure.in (HAVE_AS_SPARC_UA_PCREL): Check for working + %r_disp32(). + * src/sparc/v8.S: Use it. + * src/sparc/v9.S: Likewise. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2002-04-08 Hans Boehm + + * src/java_raw_api.c (ffi_java_raw_size): Handle FFI_TYPE_DOUBLE + correctly. + * src/ia64/unix.S: Add unwind information. Fix comments. + Save sp in a way that's compatible with unwind info. + (ffi_call_unix): Correctly restore sp in all cases. + * src/ia64/ffi.c: Add, fix comments. + +2002-04-08 Jakub Jelinek + + * src/sparc/v8.S: Make .eh_frame dependent on target word size. + +2002-04-06 Jason Thorpe + + * configure.in (alpha*-*-netbsd*): Add target. + * configure: Regenerate. + +2002-04-04 Jeff Sturm + + * src/sparc/v8.S: Add unwind info. + * src/sparc/v9.S: Likewise. + +2002-03-30 Krister Walfridsson + + * configure.in: Enable i*86-*-netbsdelf*. + * configure: Rebuilt. + +2002-03-29 David Billinghurst + + PR other/2620 + * src/mips/n32.s: Delete + * src/mips/o32.s: Delete + +2002-03-21 Loren J. Rittle + + * configure.in: Enable alpha*-*-freebsd*. + * configure: Rebuilt. + +2002-03-17 Bryce McKinlay + + * Makefile.am: libfficonvenience -> libffi_convenience. + * Makefile.in: Rebuilt. + + * Makefile.am: Define ffitest_OBJECTS. + * Makefile.in: Rebuilt. + +2002-03-07 Andreas Tobler + David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX closure files. + (TARGET_SRC_POWERPC_AIX): Add aix_closure.S. + (TARGET_SRC_POWERPC_DARWIN): Add darwin_closure.S. + * Makefile.in: Regenerate. + * include/ffi.h.in: Add AIX and Darwin closure definitions. + * src/powerpc/ffi_darwin.c (ffi_prep_closure): New function. + (flush_icache, flush_range): New functions. + (ffi_closure_helper_DARWIN): New function. + * src/powerpc/aix_closure.S: New file. + * src/powerpc/darwin_closure.S: New file. + +2002-02-24 Jeff Sturm + + * include/ffi.h.in: Add typedef for ffi_arg. + * src/ffitest.c (main): Declare rint with ffi_arg. + +2002-02-21 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Skip appropriate + number of GPRs for floating-point arguments. + +2002-01-31 Anthony Green + + * configure: Rebuilt. + * configure.in: Replace CHECK_SIZEOF and endian tests with + cross-compiler friendly macros. + * aclocal.m4 (AC_COMPILE_CHECK_SIZEOF, AC_C_BIGENDIAN_CROSS): New + macros. + +2002-01-18 David Edelsohn + + * src/powerpc/darwin.S (_ffi_call_AIX): New. + * src/powerpc/aix.S (ffi_call_DARWIN): New. + +2002-01-17 David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX files. + (TARGET_SRC_POWERPC_AIX): New. + (POWERPC_AIX): New stanza. + * Makefile.in: Regenerate. + * configure.in: Add AIX case. + * configure: Regenerate. + * include/ffi.h.in (ffi_abi): Add FFI_AIX. + * src/powerpc/ffi_darwin.c (ffi_status): Use "long" to scale frame + size. Fix "long double" support. + (ffi_call): Add FFI_AIX case. + * src/powerpc/aix.S: New. + +2001-10-09 John Hornkvist + + Implement Darwin PowerPC ABI. + * configure.in: Handle powerpc-*-darwin*. + * Makefile.am: Set source files for POWERPC_DARWIN. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + * include/ffi.h.in: Define FFI_DARWIN and FFI_DEFAULT_ABI for + POWERPC_DARWIN. + * src/powerpc/darwin.S: New file. + * src/powerpc/ffi_darwin.c: New file. + +2001-10-07 Joseph S. Myers + + * src/x86/ffi.c: Fix spelling error of "separate" as "seperate". + +2001-07-16 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .balign directive. + Use C style comments. + +2001-07-16 Rainer Orth + + * src/alpha/ffi.c (ffi_prep_closure): Avoid gas-only mnemonic. + Fixes PR bootstrap/3563. + +2001-06-26 Rainer Orth + + * src/alpha/osf.S (ffi_closure_osf): Use .rdata for ECOFF. + +2001-06-25 Rainer Orth + + * configure.in: Recognize sparc*-sun-* host. + * configure: Regenerate. + +2001-06-06 Andrew Haley + + * src/alpha/osf.S (__FRAME_BEGIN__): Conditionalize for ELF. + +2001-06-03 Andrew Haley + + * src/alpha/osf.S: Add unwind info. + * src/powerpc/sysv.S: Add unwind info. + * src/powerpc/ppc_closure.S: Likewise. + +2000-05-31 Jeff Sturm + + * configure.in: Fix AC_ARG_ENABLE usage. + * configure: Rebuilt. + +2001-05-06 Bryce McKinlay + + * configure.in: Remove warning about beta code. + * configure: Rebuilt. + +2001-04-25 Hans Boehm + + * src/ia64/unix.S: Restore stack pointer when returning from + ffi_closure_UNIX. + * src/ia64/ffi.c: Fix typo in comment. + +2001-04-18 Jim Wilson + + * src/ia64/unix.S: Delete unnecessary increment and decrement of loc2 + to eliminate RAW DV. + +2001-04-12 Bryce McKinlay + + * Makefile.am: Make a libtool convenience library. + * Makefile.in: Rebuilt. + +2001-03-29 Bryce McKinlay + + * configure.in: Use different syntax for subdirectory creation. + * configure: Rebuilt. + +2001-03-27 Jon Beniston + + * configure.in: Added X86_WIN32 target (Win32, CygWin, MingW). + * configure: Rebuilt. + * Makefile.am: Added X86_WIN32 target support. + * Makefile.in: Rebuilt. + + * include/ffi.h.in: Added X86_WIN32 target support. + + * src/ffitest.c: Doesn't run structure tests for X86_WIN32 targets. + * src/types.c: Added X86_WIN32 target support. + + * src/x86/win32.S: New file. Based on sysv.S, but with EH + stuff removed and made to work with CygWin's gas. + +2001-03-26 Bryce McKinlay + + * configure.in: Make target subdirectory in build dir. + * Makefile.am: Override suffix based rules to specify correct output + subdirectory. + * Makefile.in: Rebuilt. + * configure: Rebuilt. + +2001-03-23 Kevin B Hendricks + + * src/powerpc/ppc_closure.S: New file. + * src/powerpc/ffi.c (ffi_prep_args): Fixed ABI compatibility bug + involving long long and register pairs. + (ffi_prep_closure): New function. + (flush_icache): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * include/ffi.h.in (FFI_CLOSURES): Define on PPC. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Added src/powerpc/ppc_closure.S. + (TARGET_SRC_POWERPC): Likewise. + +2001-03-19 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (ffitest_LDFLAGS): New macro. + +2001-03-02 Nick Clifton + + * include/ffi.h.in: Remove RCS ident string. + * include/ffi_mips.h: Remove RCS ident string. + * src/debug.c: Remove RCS ident string. + * src/ffitest.c: Remove RCS ident string. + * src/prep_cif.c: Remove RCS ident string. + * src/types.c: Remove RCS ident string. + * src/alpha/ffi.c: Remove RCS ident string. + * src/alpha/osf.S: Remove RCS ident string. + * src/arm/ffi.c: Remove RCS ident string. + * src/arm/sysv.S: Remove RCS ident string. + * src/mips/ffi.c: Remove RCS ident string. + * src/mips/n32.S: Remove RCS ident string. + * src/mips/o32.S: Remove RCS ident string. + * src/sparc/ffi.c: Remove RCS ident string. + * src/sparc/v8.S: Remove RCS ident string. + * src/sparc/v9.S: Remove RCS ident string. + * src/x86/ffi.c: Remove RCS ident string. + * src/x86/sysv.S: Remove RCS ident string. + +2001-02-08 Joseph S. Myers + + * include/ffi.h.in: Change sourceware.cygnus.com references to + gcc.gnu.org. + +2000-12-09 Richard Henderson + + * src/alpha/ffi.c (ffi_call): Simplify struct return test. + (ffi_closure_osf_inner): Index rather than increment avalue + and arg_types. Give ffi_closure_osf the raw return value type. + * src/alpha/osf.S (ffi_closure_osf): Handle return value type + promotion. + +2000-12-07 Richard Henderson + + * src/raw_api.c (ffi_translate_args): Fix typo. + (ffi_prep_closure): Likewise. + + * include/ffi.h.in [ALPHA]: Define FFI_CLOSURES and + FFI_TRAMPOLINE_SIZE. + * src/alpha/ffi.c (ffi_prep_cif_machdep): Adjust minimal + cif->bytes for new ffi_call_osf implementation. + (ffi_prep_args): Absorb into ... + (ffi_call): ... here. Do all stack allocation here and + avoid a callback function. + (ffi_prep_closure, ffi_closure_osf_inner): New. + * src/alpha/osf.S (ffi_call_osf): Reimplement with no callback. + (ffi_closure_osf): New. + +2000-09-10 Alexandre Oliva + + * config.guess, config.sub, install-sh: Removed. + * ltconfig, ltmain.sh, missing, mkinstalldirs: Likewise. + * Makefile.in: Rebuilt. + + * acinclude.m4: Include libtool macros from the top level. + * aclocal.m4, configure: Rebuilt. + +2000-08-22 Alexandre Oliva + + * configure.in [i*86-*-freebsd*] (TARGET, TARGETDIR): Set. + * configure: Rebuilt. + +2000-05-11 Scott Bambrough + + * libffi/src/arm/sysv.S (ffi_call_SYSV): Doubles are not saved to + memory correctly. Use conditional instructions, not branches where + possible. + +2000-05-04 Tom Tromey + + * configure: Rebuilt. + * configure.in: Match `arm*-*-linux-*'. + From Chris Dornan . + +2000-04-28 Jakub Jelinek + + * Makefile.am (SUBDIRS): Define. + (AM_MAKEFLAGS): Likewise. + (Multilib support.): Add section. + * Makefile.in: Rebuilt. + * ltconfig (extra_compiler_flags, extra_compiler_flags_value): + New variables. Set for gcc using -print-multi-lib. Export them + to libtool. + (sparc64-*-linux-gnu*): Use libsuff 64 for search paths. + * ltmain.sh (B|b|V): Don't throw away gcc's -B, -b and -V options + for -shared links. + (extra_compiler_flags_value, extra_compiler_flags): Check these + for extra compiler options which need to be passed down in + compiler_flags. + +2000-04-16 Anthony Green + + * configure: Rebuilt. + * configure.in: Change i*86-pc-linux* to i*86-*-linux*. + +2000-04-14 Jakub Jelinek + + * include/ffi.h.in (SPARC64): Define for 64bit SPARC builds. + Set SPARC FFI_DEFAULT_ABI based on SPARC64 define. + * src/sparc/ffi.c (ffi_prep_args_v8): Renamed from ffi_prep_args. + Replace all void * sizeofs with sizeof(int). + Only compare type with FFI_TYPE_LONGDOUBLE if LONGDOUBLE is + different than DOUBLE. + Remove FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases (handled elsewhere). + (ffi_prep_args_v9): New function. + (ffi_prep_cif_machdep): Handle V9 ABI and long long on V8. + (ffi_V9_return_struct): New function. + (ffi_call): Handle FFI_V9 ABI from 64bit code and FFI_V8 ABI from + 32bit code (not yet cross-arch calls). + * src/sparc/v8.S: Add struct return delay nop. + Handle long long. + * src/sparc/v9.S: New file. + * src/prep_cif.c (ffi_prep_cif): Return structure pointer + is used on sparc64 only for structures larger than 32 bytes. + Pass by reference for structures is done for structure arguments + larger than 16 bytes. + * src/ffitest.c (main): Use 64bit rint on sparc64. + Run long long tests on sparc. + * src/types.c (FFI_TYPE_POINTER): Pointer is 64bit on alpha and + sparc64. + (FFI_TYPE_LONGDOUBLE): long double is 128 bit aligned to 128 bits + on sparc64. + * configure.in (sparc-*-linux*): New supported target. + (sparc64-*-linux*): Likewise. + * configure: Rebuilt. + * Makefile.am: Add v9.S to SPARC files. + * Makefile.in: Likewise. + (LINK): Surround $(CCLD) into double quotes, so that multilib + compiles work correctly. + +2000-04-04 Alexandre Petit-Bianco + + * configure: Rebuilt. + * configure.in: (i*86-*-solaris*): New libffi target. Patch + proposed by Bryce McKinlay. + +2000-03-20 Tom Tromey + + * Makefile.in: Hand edit for java_raw_api.lo. + +2000-03-08 Bryce McKinlay + + * config.guess, config.sub: Update from the gcc tree. + Fix for PR libgcj/168. + +2000-03-03 Tom Tromey + + * Makefile.in: Fixed ia64 by hand. + + * configure: Rebuilt. + * configure.in (--enable-multilib): New option. + (libffi_basedir): New subst. + (AC_OUTPUT): Added multilib code. + +2000-03-02 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (TARGET_SRC_IA64): Use `ia64', not `alpha', as + directory name. + +2000-02-25 Hans Boehm + + * src/ia64/ffi.c, src/ia64/ia64_flags.h, src/ia64/unix.S: New + files. + * src/raw_api.c (ffi_translate_args): Fixed typo in argument + list. + (ffi_prep_raw_closure): Use ffi_translate_args, not + ffi_closure_translate. + * src/java_raw_api.c: New file. + * src/ffitest.c (closure_test_fn): New function. + (main): Define `rint' as long long on IA64. Added new test when + FFI_CLOSURES is defined. + * include/ffi.h.in (ALIGN): Use size_t, not unsigned. + (ffi_abi): Recognize IA64. + (ffi_raw): Added `flt' field. + Added "Java raw API" code. + * configure.in: Recognize ia64. + * Makefile.am (TARGET_SRC_IA64): New macro. + (libffi_la_common_SOURCES): Added java_raw_api.c. + (libffi_la_SOURCES): Define in IA64 case. + +2000-01-04 Tom Tromey + + * Makefile.in: Rebuilt with newer automake. + +1999-12-31 Tom Tromey + + * Makefile.am (INCLUDES): Added -I$(top_srcdir)/src. + +1999-09-01 Tom Tromey + + * include/ffi.h.in: Removed PACKAGE and VERSION defines and + undefs. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + * configure.in: Pass 3rd argument to AM_INIT_AUTOMAKE. + Use AM_PROG_LIBTOOL (automake 1.4 compatibility). + * acconfig.h: Don't #undef PACKAGE or VERSION. + +1999-08-09 Anthony Green + + * include/ffi.h.in: Try to work around messy header problem + with PACKAGE and VERSION. + + * configure: Rebuilt. + * configure.in: Change version to 2.00-beta. + + * fficonfig.h.in: Rebuilt. + * acconfig.h (FFI_NO_STRUCTS, FFI_NO_RAW_API): Define. + + * src/x86/ffi.c (ffi_raw_call): Rename. + +1999-08-02 Kresten Krab Thorup + + * src/x86/ffi.c (ffi_closure_SYSV): New function. + (ffi_prep_incoming_args_SYSV): Ditto. + (ffi_prep_closure): Ditto. + (ffi_closure_raw_SYSV): Ditto. + (ffi_prep_raw_closure): More ditto. + (ffi_call_raw): Final ditto. + + * include/ffi.h.in: Add definitions for closure and raw API. + + * src/x86/ffi.c (ffi_prep_cif_machdep): Added case for + FFI_TYPE_UINT64. + + * Makefile.am (libffi_la_common_SOURCES): Added raw_api.c + + * src/raw_api.c: New file. + + * include/ffi.h.in (ffi_raw): New type. + (UINT_ARG, SINT_ARG): New defines. + (ffi_closure, ffi_raw_closure): New types. + (ffi_prep_closure, ffi_prep_raw_closure): New declarations. + + * configure.in: Add check for endianness and sizeof void*. + + * src/x86/sysv.S (ffi_call_SYSV): Call fixup routine via argument, + instead of directly. + + * configure: Rebuilt. + +Thu Jul 8 14:28:42 1999 Anthony Green + + * configure.in: Add x86 and powerpc BeOS configurations. + From Makoto Kato . + +1999-05-09 Anthony Green + + * configure.in: Add warning about this being beta code. + Remove src/Makefile.am from the picture. + * configure: Rebuilt. + + * Makefile.am: Move logic from src/Makefile.am. Add changes + to support libffi as a target library. + * Makefile.in: Rebuilt. + + * aclocal.m4, config.guess, config.sub, ltconfig, ltmain.sh: + Upgraded to new autoconf, automake, libtool. + + * README: Tweaks. + + * LICENSE: Update copyright date. + + * src/Makefile.am, src/Makefile.in: Removed. + +1998-11-29 Anthony Green + + * include/ChangeLog: Removed. + * src/ChangeLog: Removed. + * src/mips/ChangeLog: Removed. + * src/sparc/ChangeLog: Remboved. + * src/x86/ChangeLog: Removed. + + * ChangeLog.v1: Created. + +============================================================================= +From the old ChangeLog.libffi file.... + +2011-02-08 Andreas Tobler + + * testsuite/lib/libffi.exp: Tweak for stand-alone mode. + +2009-12-25 Samuli Suominen + + * configure.ac: Undefine _AC_ARG_VAR_PRECIOUS for autoconf 2.64. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/huge_struct.c: Ad x86 XFAILs. + * testsuite/libffi.call/float2.c: Fix dg-excess-errors. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-12 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-04 Andrew Haley + + * src/powerpc/ffitarget.h: Fix misapplied merge from gcc. + +2009-06-04 Andrew Haley + + * src/mips/o32.S, + src/mips/n32.S: Fix licence formatting. + +2009-06-04 Andrew Haley + + * src/x86/darwin.S: Fix licence formatting. + src/x86/win32.S: Likewise. + src/sh64/sysv.S: Likewise. + src/sh/sysv.S: Likewise. + +2009-06-04 Andrew Haley + + * src/sh64/ffi.c: Remove lint directives. Was missing from merge + of Andreas Tobler's patch from 2006-04-22. + +2009-06-04 Andrew Haley + + * src/sh/ffi.c: Apply missing hunk from Alexandre Oliva's patch of + 2007-03-07. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-12-19 Anthony Green + + * configure.ac: Bump version to 3.0.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-11-11 Anthony Green + + * configure.ac: Bump version to 3.0.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-07-17 Anthony Green + + * configure.ac: Bump version to 3.0.6. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. Add documentation. + * README: Update for new release. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-07-16 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-04-03 Anthony Green + + * libffi.pc.in (Libs): Add -L${libdir}. + * configure.ac: Bump version to 3.0.5. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-04-03 Anthony Green + Xerces Ranby + + * include/ffi.h.in: Wrap definition of target architecture to + protect from double definitions. + +2008-03-22 Moriyoshi Koizumi + + * src/x86/ffi.c (ffi_prep_closure_loc): Fix for bug revealed in + closure_loc_fn0.c. + * testsuite/libffi.call/closure_loc_fn0.c (closure_loc_test_fn0): + New test. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/huge_struct.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2008-02-26 Jakub Jelinek + Anthony Green + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-26 Anthony Green + Thomas Heller + + * include/ffi.h.in: Change void (*)() to void (*)(void). + +2008-02-26 Anthony Green + Thomas Heller + + * src/alpha/ffi.c: Change void (*)() to void (*)(void). + src/alpha/osf.S, src/arm/ffi.c, src/frv/ffi.c, src/ia64/ffi.c, + src/ia64/unix.S, src/java_raw_api.c, src/m32r/ffi.c, + src/mips/ffi.c, src/pa/ffi.c, src/pa/hpux32.S, src/pa/linux.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/raw_api.c, + src/s390/ffi.c, src/sh/ffi.c, src/sh64/ffi.c, src/sparc/ffi.c, + src/x86/ffi.c, src/x86/unix64.S, src/x86/darwin64.S, + src/x86/ffi64.c: Ditto. + +2008-02-24 Anthony Green + + * configure.ac: Accept openbsd*, not just openbsd. + Bump version to 3.0.4. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-22 Anthony Green + + * README: Clean up list of tested platforms. + +2008-02-22 Anthony Green + + * configure.ac: Bump version to 3.0.3. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. Clean up test docs. + +2008-02-22 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-02-22 Thomas Heller + + * configure.ac: Add x86 OpenBSD support. + * configure: Rebuilt. + +2008-02-21 Thomas Heller + + * README: Change "make test" to "make check". + +2008-02-21 Anthony Green + + * configure.ac: Bump version to 3.0.2. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-21 Björn König + + * src/x86/freebsd.S: New file. + * configure.ac: Add x86 FreeBSD support. + * Makefile.am: Ditto. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.1. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-15 David Daney + + * src/mips/ffi.c: Remove extra '>' from include directive. + (ffi_prep_closure_loc): Use clear_location instead of tramp. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.0. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2008-02-15 Anthony Green + + * man/ffi_call.3, man/ffi_prep_cif.3, man/ffi.3: + Update dates and remove all references to ffi_prep_closure. + * configure.ac: Bump version to 2.99.9. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 Anthony Green + + * man/ffi_prep_closure.3: Delete. + * man/Makefile.am (EXTRA_DIST): Remove ffi_prep_closure.3. + (man_MANS): Ditto. + * man/Makefile.in: Rebuilt. + * configure.ac: Bump version to 2.99.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * include/ffi.h.in LICENSE src/debug.c src/closures.c + src/ffitest.c src/s390/sysv.S src/s390/ffitarget.h + src/types.c src/m68k/ffitarget.h src/raw_api.c src/frv/ffi.c + src/frv/ffitarget.h src/sh/ffi.c src/sh/sysv.S + src/sh/ffitarget.h src/powerpc/ffitarget.h src/pa/ffi.c + src/pa/ffitarget.h src/pa/linux.S src/java_raw_api.c + src/cris/ffitarget.h src/x86/ffi.c src/x86/sysv.S + src/x86/unix64.S src/x86/win32.S src/x86/ffitarget.h + src/x86/ffi64.c src/x86/darwin.S src/ia64/ffi.c + src/ia64/ffitarget.h src/ia64/ia64_flags.h src/ia64/unix.S + src/sparc/ffi.c src/sparc/v9.S src/sparc/ffitarget.h + src/sparc/v8.S src/alpha/ffi.c src/alpha/ffitarget.h + src/alpha/osf.S src/sh64/ffi.c src/sh64/sysv.S + src/sh64/ffitarget.h src/mips/ffi.c src/mips/ffitarget.h + src/mips/n32.S src/mips/o32.S src/arm/ffi.c src/arm/sysv.S + src/arm/ffitarget.h src/prep_cif.c: Update license text. + +2008-02-14 Anthony Green + + * README: Update tested platforms. + * configure.ac: Bump version to 2.99.6. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.5. + * configure: Rebuilt. + * Makefile.am (EXTRA_DIST): Add darwin64.S + * Makefile.in: Rebuilt. + * testsuite/lib/libffi-dg.exp: Remove libstdc++ bits from GCC tree. + * LICENSE: Update WARRANTY. + +2008-02-14 Anthony Green + + * libffi.pc.in (libdir): Fix libdir definition. + * configure.ac: Bump version to 2.99.4. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * README: Update. + * libffi.info: New file. + * doc/stamp-vti: New file. + * configure.ac: Bump version to 2.99.3. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * Makefile.am (SUBDIRS): Add man dir. + * Makefile.in: Rebuilt. + * configure.ac: Create Makefile. + * configure: Rebuilt. + * man/ffi_call.3 man/ffi_prep_cif.3 man/ffi_prep_closure.3 + man/Makefile.am man/Makefile.in: New files. + +2008-02-14 Tom Tromey + + * aclocal.m4, Makefile.in, configure, fficonfig.h.in: Rebuilt. + * mdate-sh, texinfo.tex: New files. + * Makefile.am (info_TEXINFOS): New variable. + * doc/libffi.texi: New file. + * doc/version.texi: Likewise. + +2008-02-14 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't compile with -D$(TARGET). + (lib_LTLIBRARIES): Define. + (toolexeclib_LIBRARIES): Undefine. + * Makefile.in: Rebuilt. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + +2008-02-14 Anthony Green + + * libffi.pc.in: Use @PACKAGE_NAME@ and @PACKAGE_VERSION@. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Add ChangeLog.libffi. + * Makefile.in: Rebuilt. + * LICENSE: Update copyright notice. + +2008-02-14 Anthony Green + + * include/Makefile.am (nodist_includes_HEADERS): Define. Don't + distribute ffitarget.h or ffi.h from the build include dir. + * Makefile.in: Rebuilt. + +2008-02-14 Anthony Green + + * include/Makefile.am (includesdir): Install headers under libdir. + (pkgconfigdir): Define. Install libffi.pc. + * include/Makefile.in: Rebuilt. + * libffi.pc.in: Create. + * libtool-version: Increment CURRENT + * configure.ac: Add libffi.pc.in + * configure: Rebuilt. + +2008-02-03 Anthony Green + + * include/Makefile.am (includesdir): Fix header install with + DESTDIR. + * include/Makefile.in: Rebuilt. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-02-01 Anthony Green + + * include/Makefile.am: Fix header installs. + * Makefile.am: Ditto. + * include/Makefile.in: Rebuilt. + * Makefile.in: Ditto. + +2008-02-01 Anthony Green + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL, + FFI_INIT_TRAMPOLINE): Revert my broken changes to twall's last + patch. + +2008-01-31 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am: Ditto. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-30 Anthony Green + + * Makefile.am, include/Makefile.am: Move headers to + libffi_la_SOURCES for new automake. + * Makefile.in, include/Makefile.in: Rebuilt. + + * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for + execution outside of gcc tree. + * testsuite/lib/target-libpath.exp: Ditto. + + * testsuite/lib/libffi-dg.exp: Many changes to allow for execution + outside of gcc tree. + + +============================================================================= +From the old ChangeLog.libgcj file.... + +2004-01-14 Kelley Cook + + * configure.in: Add in AC_PREREQ(2.13) + +2003-02-20 Alexandre Oliva + + * configure.in: Propagate ORIGINAL_LD_FOR_MULTILIBS to + config.status. + * configure: Rebuilt. + +2002-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +Mon Aug 9 18:33:38 1999 Rainer Orth + + * include/Makefile.in: Rebuilt. + * Makefile.in: Rebuilt + * Makefile.am (toolexeclibdir): Add $(MULTISUBDIR) even for native + builds. + Use USE_LIBDIR. + + * configure: Rebuilt. + * configure.in (USE_LIBDIR): Define for native builds. + Use lowercase in configure --help explanations. + +1999-08-08 Anthony Green + + * include/ffi.h.in (FFI_FN): Remove `...'. + +1999-08-08 Anthony Green + + * Makefile.in: Rebuilt. + * Makefile.am (AM_CFLAGS): Compile with -fexceptions. + + * src/x86/sysv.S: Add exception handling metadata. + + +============================================================================= + +The libffi version 1 ChangeLog archive. + +Version 1 of libffi had per-directory ChangeLogs. Current and future +versions have a single ChangeLog file in the root directory. The +version 1 ChangeLogs have all been concatenated into this file for +future reference only. + +--- libffi ---------------------------------------------------------------- + +Mon Oct 5 02:17:50 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +Mon Oct 5 01:03:03 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +1998-07-25 Andreas Schwab + + * m68k/ffi.c (ffi_prep_cif_machdep): Use bitmask for cif->flags. + Correctly handle small structures. + (ffi_prep_args): Also handle small structures. + (ffi_call): Pass size of return type to ffi_call_SYSV. + * m68k/sysv.S: Adjust for above changes. Correctly align small + structures in the return value. + + * types.c (uint64, sint64) [M68K]: Change alignment to 4. + +Fri Apr 17 17:26:58 1998 Anthony Green + + * configure.in: Boosted rev. + * configure,Makefile.in,aclocal.m4: Rebuilt. + * README: Boosted rev and added release notes. + +Sun Feb 22 00:50:41 1998 Geoff Keating + + * configure.in: Add PowerPC config bits. + +1998-02-14 Andreas Schwab + + * configure.in: Add m68k config bits. Change AC_CANONICAL_SYSTEM + to AC_CANONICAL_HOST, this is not a compiler. Use $host instead + of $target. Remove AC_CHECK_SIZEOF(char), we already know the + result. Fix argument of AC_ARG_ENABLE. + * configure, fficonfig.h.in: Rebuilt. + +Tue Feb 10 20:53:40 1998 Richard Henderson + + * configure.in: Add Alpha config bits. + +Tue May 13 13:39:20 1997 Anthony Green + + * README: Updated dates and reworded Irix comments. + + * configure.in: Removed AC_PROG_RANLIB. + + * Makefile.in, aclocal.m4, config.guess, config.sub, configure, + ltmain.sh, */Makefile.in: libtoolized again and rebuilt with + automake and autoconf. + +Sat May 10 18:44:50 1997 Tom Tromey + + * configure, aclocal.m4: Rebuilt. + * configure.in: Don't compute EXTRADIST; now handled in + src/Makefile.in. Removed macros implied by AM_INIT_AUTOMAKE. + Don't run AM_MAINTAINER_MODE. + +Thu May 8 14:34:05 1997 Anthony Green + + * missing, ltmain.sh, ltconfig.sh: Created. These are new files + required by automake and libtool. + + * README: Boosted rev to 1.14. Added notes. + + * acconfig.h: Moved PACKAGE and VERSION for new automake. + + * configure.in: Changes for libtool. + + * Makefile.am (check): make test now make check. Uses libtool now. + + * Makefile.in, configure.in, aclocal.h, fficonfig.h.in: Rebuilt. + +Thu May 1 16:27:07 1997 Anthony Green + + * missing: Added file required by new automake. + +Tue Nov 26 14:10:42 1996 Anthony Green + + * acconfig.h: Added USING_PURIFY flag. This is defined when + --enable-purify-safety was used at configure time. + + * configure.in (allsources): Added --enable-purify-safety switch. + (VERSION): Boosted rev to 1.13. + * configure: Rebuilt. + +Fri Nov 22 06:46:12 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.12. + Removed special CFLAGS hack for gcc. + * configure: Rebuilt. + + * README: Boosted rev to 1.12. Added notes. + + * Many files: Cygnus Support changed to Cygnus Solutions. + +Wed Oct 30 11:15:25 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.11. + * configure: Rebuilt. + + * README: Boosted rev to 1.11. Added notes about GNU make. + +Tue Oct 29 12:25:12 1996 Anthony Green + + * configure.in: Fixed -Wall trick. + (VERSION): Boosted rev. + * configure: Rebuilt + + * acconfig.h: Needed for --enable-debug configure switch. + + * README: Boosted rev to 1.09. Added more notes on building + libffi, and LCLint. + + * configure.in: Added --enable-debug switch. Boosted rev to + 1.09. + * configure: Rebuilt + +Tue Oct 15 13:11:28 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.08 + * configure: Rebuilt. + + * README: Added n32 bug fix notes. + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + +Mon Oct 14 10:54:46 1996 Anthony Green + + * README: Added web page reference. + + * configure.in, README: Boosted rev to 1.05 + * configure: Rebuilt. + + * README: Fixed n32 sample code. + +Fri Oct 11 17:09:28 1996 Anthony Green + + * README: Added sparc notes. + + * configure.in, README: Boosted rev to 1.04. + * configure: Rebuilt. + +Thu Oct 10 10:31:03 1996 Anthony Green + + * configure.in, README: Boosted rev to 1.03. + * configure: Rebuilt. + + * README: Added struct notes. + + * Makefile.am (EXTRA_DIST): Added LICENSE to distribution. + * Makefile.in: Rebuilt. + + * README: Removed Linux section. No special notes now + because aggregates arg/return types work. + +Wed Oct 9 16:16:42 1996 Anthony Green + + * README, configure.in (VERSION): Boosted rev to 1.02 + * configure: Rebuilt. + +Tue Oct 8 11:56:33 1996 Anthony Green + + * README (NOTE): Added n32 notes. + + * Makefile.am: Added test production. + * Makefile: Rebuilt + + * README: spell checked! + + * configure.in (VERSION): Boosted rev to 1.01 + * configure: Rebuilt. + +Mon Oct 7 15:50:22 1996 Anthony Green + + * configure.in: Added nasty bit to support SGI tools. + * configure: Rebuilt. + + * README: Added SGI notes. Added note about automake bug. + +Mon Oct 7 11:00:28 1996 Anthony Green + + * README: Rewrote intro, and fixed examples. + +Fri Oct 4 10:19:55 1996 Anthony Green + + * configure.in: -D$TARGET is no longer used as a compiler switch. + It is now inserted into ffi.h at configure time. + * configure: Rebuilt. + + * FFI_ABI and FFI_STATUS are now ffi_abi and ffi_status. + +Thu Oct 3 13:47:34 1996 Anthony Green + + * README, LICENSE: Created. Wrote some docs. + + * configure.in: Don't barf on i586-unknown-linuxaout. + Added EXTRADIST code for "make dist". + * configure: Rebuilt. + + * */Makefile.in: Rebuilt with patched automake. + +Tue Oct 1 17:12:25 1996 Anthony Green + + * Makefile.am, aclocal.m4, config.guess, config.sub, + configure.in, fficonfig.h.in, install-sh, mkinstalldirs, + stamp-h.in: Created + * Makefile.in, configure: Generated + +--- libffi/include -------------------------------------------------------- + +Tue Feb 24 13:09:36 1998 Anthony Green + + * ffi_mips.h: Updated FFI_TYPE_STRUCT_* values based on + ffi.h.in changes. This is a work-around for SGI's "simple" + assembler. + +Sun Feb 22 00:51:55 1998 Geoff Keating + + * ffi.h.in: PowerPC support. + +1998-02-14 Andreas Schwab + + * ffi.h.in: Add m68k support. + (FFI_TYPE_LONGDOUBLE): Make it a separate value. + +Tue Feb 10 20:55:16 1998 Richard Henderson + + * ffi.h.in (SIZEOF_ARG): Use a pointer type by default. + + * ffi.h.in: Alpha support. + +Fri Nov 22 06:48:45 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Cygnus Support -> Cygnus Solutions. + +Wed Nov 20 22:31:01 1996 Anthony Green + + * ffi.h.in: Added ffi_type_void definition. + +Tue Oct 29 12:22:40 1996 Anthony Green + + * Makefile.am (hack_DATA): Always install ffi_mips.h. + + * ffi.h.in: Removed FFI_DEBUG. It's now in the correct + place (acconfig.h). + Added #include for size_t definition. + +Tue Oct 15 17:23:35 1996 Anthony Green + + * ffi.h.in, ffi_common.h, ffi_mips.h: More clean up. + Commented out #define of FFI_DEBUG. + +Tue Oct 15 13:01:06 1996 Anthony Green + + * ffi_common.h: Added bool definition. + + * ffi.h.in, ffi_common.h: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Mon Oct 14 12:29:23 1996 Anthony Green + + * ffi.h.in: Interface changes based on feedback from Jim + Blandy. + +Fri Oct 11 16:49:35 1996 Anthony Green + + * ffi.h.in: Small change for sparc support. + +Thu Oct 10 14:53:37 1996 Anthony Green + + * ffi_mips.h: Added FFI_TYPE_STRUCT_* definitions for + special structure return types. + +Wed Oct 9 13:55:57 1996 Anthony Green + + * ffi.h.in: Added SIZEOF_ARG definition for X86 + +Tue Oct 8 11:40:36 1996 Anthony Green + + * ffi.h.in (FFI_FN): Added macro for eliminating compiler warnings. + Use it to case your function pointers to the proper type. + + * ffi_mips.h (SIZEOF_ARG): Added magic to fix type promotion bug. + + * Makefile.am (EXTRA_DIST): Added ffi_mips.h to EXTRA_DIST. + * Makefile: Rebuilt. + + * ffi_mips.h: Created. Moved all common mips definitions here. + +Mon Oct 7 10:58:12 1996 Anthony Green + + * ffi.h.in: The SGI assember is very picky about parens. Redefined + some macros to avoid problems. + + * ffi.h.in: Added FFI_DEFAULT_ABI definitions. Also added + externs for pointer, and 64bit integral ffi_types. + +Fri Oct 4 09:51:37 1996 Anthony Green + + * ffi.h.in: Added FFI_ABI member to ffi_cif and changed + function prototypes accordingly. + Added #define @TARGET@. Now programs including ffi.h don't + have to specify this themselves. + +Thu Oct 3 15:36:44 1996 Anthony Green + + * ffi.h.in: Changed ffi_prep_cif's values from void* to void** + + * Makefile.am (EXTRA_DIST): Added EXTRA_DIST for "make dist" + to work. + * Makefile.in: Regenerated. + +Wed Oct 2 10:16:59 1996 Anthony Green + + * Makefile.am: Created + * Makefile.in: Generated + + * ffi_common.h: Added rcsid comment + +Tue Oct 1 17:13:51 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Created + +--- libffi/src ------------------------------------------------------------ + +Mon Oct 5 02:17:50 1998 Anthony Green + + * arm/ffi.c, arm/sysv.S: Created. + + * Makefile.am: Added arm files. + * Makefile.in: Rebuilt. + +Mon Oct 5 01:41:38 1998 Anthony Green + + * Makefile.am (libffi_la_LDFLAGS): Incremented revision. + +Sun Oct 4 16:27:17 1998 Anthony Green + + * alpha/osf.S (ffi_call_osf): Patch for DU assembler. + + * ffitest.c (main): long long and long double return values work + for x86. + +Fri Apr 17 11:50:58 1998 Anthony Green + + * Makefile.in: Rebuilt. + + * ffitest.c (main): Floating point tests not executed for systems + with broken lond double (SunOS 4 w/ GCC). + + * types.c: Fixed x86 alignment info for long long types. + +Thu Apr 16 07:15:28 1998 Anthony Green + + * ffitest.c: Added more notes about GCC bugs under Irix 6. + +Wed Apr 15 08:42:22 1998 Anthony Green + + * ffitest.c (struct5): New test function. + (main): New test with struct5. + +Thu Mar 5 10:48:11 1998 Anthony Green + + * prep_cif.c (initialize_aggregate): Fix assertion for + nested structures. + +Tue Feb 24 16:33:41 1998 Anthony Green + + * prep_cif.c (ffi_prep_cif): Added long double support for sparc. + +Sun Feb 22 00:52:18 1998 Geoff Keating + + * powerpc/asm.h: New file. + * powerpc/ffi.c: New file. + * powerpc/sysv.S: New file. + * Makefile.am: PowerPC port. + * ffitest.c (main): Allow all tests to run even in presence of gcc + bug on PowerPC. + +1998-02-17 Anthony Green + + * mips/ffi.c: Fixed comment typo. + + * x86/ffi.c (ffi_prep_cif_machdep), x86/sysv.S (retfloat): + Fixed x86 long double return handling. + + * types.c: Fixed x86 long double alignment info. + +1998-02-14 Andreas Schwab + + * types.c: Add m68k support. + + * ffitest.c (floating): Add long double parameter. + (return_ll, ldblit): New functions to test long long and long + double return value. + (main): Fix type error in assignment of ts[1-4]_type.elements. + Add tests for long long and long double arguments and return + values. + + * prep_cif.c (ffi_prep_cif) [M68K]: Don't allocate argument for + struct value pointer. + + * m68k/ffi.c, m68k/sysv.S: New files. + * Makefile.am: Add bits for m68k port. Add kludge to work around + automake deficiency. + (test): Don't require "." in $PATH. + * Makefile.in: Rebuilt. + +Wed Feb 11 07:36:50 1998 Anthony Green + + * Makefile.in: Rebuilt. + +Tue Feb 10 20:56:00 1998 Richard Henderson + + * alpha/ffi.c, alpha/osf.S: New files. + * Makefile.am: Alpha port. + +Tue Nov 18 14:12:07 1997 Anthony Green + + * mips/ffi.c (ffi_prep_cif_machdep): Initialize rstruct_flag + for n32. + +Tue Jun 3 17:18:20 1997 Anthony Green + + * ffitest.c (main): Added hack to get structure tests working + correctly. + +Sat May 10 19:06:42 1997 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Explicitly list all distributable + files in subdirs. + (VERSION, CC): Removed. + +Thu May 8 17:19:01 1997 Anthony Green + + * Makefile.am: Many changes for new automake and libtool. + * Makefile.in: Rebuilt. + +Fri Nov 22 06:57:56 1996 Anthony Green + + * ffitest.c (main): Fixed test case for non mips machines. + +Wed Nov 20 22:31:59 1996 Anthony Green + + * types.c: Added ffi_type_void declaration. + +Tue Oct 29 13:07:19 1996 Anthony Green + + * ffitest.c (main): Fixed character constants. + (main): Emit warning for structure test 3 failure on Sun. + + * Makefile.am (VPATH): Fixed VPATH def'n so automake won't + strip it out. + Moved distdir hack from libffi to automake. + (ffitest): Added missing -c for $(COMPILE) (change in automake). + * Makefile.in: Rebuilt. + +Tue Oct 15 13:08:20 1996 Anthony Green + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + + * prep_cif.c (STACK_ARG_SIZE): Improved STACK_ARG_SIZE macro. + Clean up based on LCLint output. Added funny /*@...@*/ comments to + annotate source. + + * ffitest.c, debug.c: Cleaned up code. + +Mon Oct 14 12:26:56 1996 Anthony Green + + * ffitest.c: Changes based on interface changes. + + * prep_cif.c (ffi_prep_cif): Cleaned up interface based on + feedback from Jim Blandy. + +Fri Oct 11 15:53:18 1996 Anthony Green + + * ffitest.c: Reordered tests while porting to sparc. + Made changes to handle lame structure passing for sparc. + Removed calls to fflush(). + + * prep_cif.c (ffi_prep_cif): Added special case for sparc + aggregate type arguments. + +Thu Oct 10 09:56:51 1996 Anthony Green + + * ffitest.c (main): Added structure passing/returning tests. + + * prep_cif.c (ffi_prep_cif): Perform proper initialization + of structure return types if needed. + (initialize_aggregate): Bug fix + +Wed Oct 9 16:04:20 1996 Anthony Green + + * types.c: Added special definitions for x86 (double doesn't + need double word alignment). + + * ffitest.c: Added many tests + +Tue Oct 8 09:19:22 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Fixed assertion. + + * debug.c (ffi_assert): Must return a non void now. + + * Makefile.am: Added test production. + * Makefile: Rebuilt. + + * ffitest.c (main): Created. + + * types.c: Created. Stripped common code out of */ffi.c. + + * prep_cif.c: Added missing stdlib.h include. + + * debug.c (ffi_type_test): Used "a" to eliminate compiler + warnings in non-debug builds. Included ffi_common.h. + +Mon Oct 7 15:36:42 1996 Anthony Green + + * Makefile.am: Added a rule for .s -> .o + This is required by the SGI compiler. + * Makefile: Rebuilt. + +Fri Oct 4 09:51:08 1996 Anthony Green + + * prep_cif.c (initialize_aggregate): Moved abi specification + to ffi_prep_cif(). + +Thu Oct 3 15:37:37 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Changed values from void* to void**. + (initialize_aggregate): Fixed aggregate type initialization. + + * Makefile.am (EXTRA_DIST): Added support code for "make dist". + * Makefile.in: Regenerated. + +Wed Oct 2 11:41:57 1996 Anthony Green + + * debug.c, prep_cif: Created. + + * Makefile.am: Added debug.o and prep_cif.o to OBJ. + * Makefile.in: Regenerated. + + * Makefile.am (INCLUDES): Added missing -I../include + * Makefile.in: Regenerated. + +Tue Oct 1 17:11:51 1996 Anthony Green + + * error.c, Makefile.am: Created. + * Makefile.in: Generated. + +--- libffi/src/x86 -------------------------------------------------------- + +Sun Oct 4 16:27:17 1998 Anthony Green + + * sysv.S (retlongdouble): Fixed long long return value support. + * ffi.c (ffi_prep_cif_machdep): Ditto. + +Wed May 13 04:30:33 1998 Anthony Green + + * ffi.c (ffi_prep_cif_machdep): Fixed long double return value + support. + +Wed Apr 15 08:43:20 1998 Anthony Green + + * ffi.c (ffi_prep_args): small struct support was missing. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Mon Dec 2 15:12:58 1996 Tom Tromey + + * sysv.S: Use .balign, for a.out Linux boxes. + +Tue Oct 15 13:06:50 1996 Anthony Green + + * ffi.c: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Fri Oct 11 16:43:38 1996 Anthony Green + + * ffi.c (ffi_call): Added assertion for bad ABIs. + +Wed Oct 9 13:57:27 1996 Anthony Green + + * sysv.S (retdouble): Fixed double return problems. + + * ffi.c (ffi_call): Corrected fn arg definition. + (ffi_prep_cif_machdep): Fixed double return problems + +Tue Oct 8 12:12:49 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + +Mon Oct 7 15:53:06 1996 Anthony Green + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:54:53 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 10:07:05 1996 Anthony Green + + * ffi.c, sysv.S, objects.mak: Created. + (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep(). + +--- libffi/src/mips ------------------------------------------------------- + +Tue Feb 17 17:18:07 1998 Anthony Green + + * o32.S: Fixed typo in comment. + + * ffi.c (ffi_prep_cif_machdep): Fixed argument processing. + +Thu May 8 16:53:58 1997 Anthony Green + + * o32.s, n32.s: Wrappers for SGI tool support. + + * objects.mak: Removed. + +Tue Oct 29 14:37:45 1996 Anthony Green + + * ffi.c (ffi_prep_args): Changed int z to size_t z. + +Tue Oct 15 13:17:25 1996 Anthony Green + + * n32.S: Fixed bad stack munging. + + * ffi.c: Moved prototypes for ffi_call_?32() to here from + ffi_mips.h because extended_cif is not defined in ffi_mips.h. + +Mon Oct 14 12:42:02 1996 Anthony Green + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 11:22:16 1996 Anthony Green + + * n32.S, ffi.c: Lots of changes to support passing and + returning structures with the n32 calling convention. + + * n32.S: Fixed fn pointer bug. + + * ffi.c (ffi_prep_cif_machdep): Fix for o32 structure + return values. + (ffi_prep_args): Fixed n32 structure passing when structures + partially fit in registers. + +Wed Oct 9 13:49:25 1996 Anthony Green + + * objects.mak: Added n32.o. + + * n32.S: Created. + + * ffi.c (ffi_prep_args): Added magic to support proper + n32 processing. + +Tue Oct 8 10:37:35 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + + * o32.S: This code is only built for o32 compiles. + A lot of the #define cruft has moved to ffi_mips.h. + + * ffi.c (ffi_prep_cif_machdep): Fixed arg flags. Second arg + is only processed if the first is either a float or double. + +Mon Oct 7 15:33:59 1996 Anthony Green + + * o32.S: Modified to compile under each of o32, n32 and n64. + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:53:25 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 17:41:22 1996 Anthony Green + + * o32.S: Removed crufty definitions. + +Wed Oct 2 12:53:42 1996 Anthony Green + + * ffi.c (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved all machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep. Return types + of FFI_TYPE_STRUCT are no different than FFI_TYPE_INT. + +Tue Oct 1 17:11:02 1996 Anthony Green + + * ffi.c, o32.S, object.mak: Created + +--- libffi/src/sparc ------------------------------------------------------ + +Tue Feb 24 16:33:18 1998 Anthony Green + + * ffi.c (ffi_prep_args): Added long double support. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Thu May 1 16:07:56 1997 Anthony Green + + * v8.S: Fixed minor portability problem reported by + Russ McManus . + +Tue Nov 26 14:12:43 1996 Anthony Green + + * v8.S: Used STACKFRAME define elsewhere. + + * ffi.c (ffi_prep_args): Zero out space when USING_PURIFY + is set. + (ffi_prep_cif_machdep): Allocate the correct stack frame + space for functions with < 6 args. + +Tue Oct 29 15:08:55 1996 Anthony Green + + * ffi.c (ffi_prep_args): int z is now size_t z. + +Mon Oct 14 13:31:24 1996 Anthony Green + + * v8.S (ffi_call_V8): Gordon rewrites this again. It looks + great now. + + * ffi.c (ffi_call): The comment about hijacked registers + is no longer valid after gordoni hacked v8.S. + + * v8.S (ffi_call_V8): Rewrote with gordoni. Much simpler. + + * v8.S, ffi.c: ffi_call() had changed to accept more than + two args, so v8.S had to change (because it hijacks incoming + arg registers). + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 17:48:16 1996 Anthony Green + + * ffi.c, v8.S, objects.mak: Created. + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE new file mode 100644 index 0000000..4f0b762 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE @@ -0,0 +1,21 @@ +libffi - Copyright (c) 1996-2020 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE 2 new file mode 100644 index 0000000..4f0b762 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE 2 @@ -0,0 +1,21 @@ +libffi - Copyright (c) 1996-2020 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS new file mode 100644 index 0000000..d1d626e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS @@ -0,0 +1,353 @@ +The libffi source distribution contains certain code that is not part +of libffi, and is only used as tooling to assist with the building and +testing of libffi. This includes the msvcc.sh script used to wrap the +Microsoft compiler with GNU compatible command-line options, +make_sunver.pl, and the libffi test code distributed in the +testsuite/libffi.bhaible directory. This code is distributed with +libffi for the purpose of convenience only, and libffi is in no way +derived from this code. + +msvcc.sh an testsuite/libffi.bhaible are both distributed under the +terms of the GNU GPL version 2, as below. + + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS 2 new file mode 100644 index 0000000..d1d626e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS 2 @@ -0,0 +1,353 @@ +The libffi source distribution contains certain code that is not part +of libffi, and is only used as tooling to assist with the building and +testing of libffi. This includes the msvcc.sh script used to wrap the +Microsoft compiler with GNU compatible command-line options, +make_sunver.pl, and the libffi test code distributed in the +testsuite/libffi.bhaible directory. This code is distributed with +libffi for the purpose of convenience only, and libffi is in no way +derived from this code. + +msvcc.sh an testsuite/libffi.bhaible are both distributed under the +terms of the GNU GPL version 2, as below. + + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.am new file mode 100644 index 0000000..563e9f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.am @@ -0,0 +1,160 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS = foreign subdir-objects + +ACLOCAL_AMFLAGS = -I m4 + +SUBDIRS = include testsuite man +if BUILD_DOCS +## This hack is needed because it doesn't seem possible to make a +## conditional info_TEXINFOS in Automake. At least Automake 1.14 +## either gives errors -- if this attempted in the most +## straightforward way -- or simply unconditionally tries to build the +## info file. +SUBDIRS += doc +endif + +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) + +MAKEOVERRIDES= + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc + +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la + +libffi_la_SOURCES = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c + +if FFI_DEBUG +libffi_la_SOURCES += src/debug.c +endif + +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +TARGET_OBJ = @TARGET_OBJ@ +libffi_la_LIBADD = $(TARGET_OBJ) + +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) + +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) + +AM_CFLAGS = +if FFI_DEBUG +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +AM_CFLAGS += -DFFI_DEBUG +endif + +if LIBFFI_BUILD_VERSIONED_SHLIB +if LIBFFI_BUILD_VERSIONED_SHLIB_GNU +libffi_version_script = -Wl,--version-script,libffi.map +libffi_version_dep = libffi.map +endif +if LIBFFI_BUILD_VERSIONED_SHLIB_SUN +libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = libffi.map-sun +libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ + $(libffi_la_OBJECTS) $(libffi_la_LIBADD) + perl $(top_srcdir)/make_sunver.pl libffi.map \ + `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ + sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ + > $@ || (rm -f $@ ; exit 1) +endif +else +libffi_version_script = +libffi_version_dep = +endif +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) + +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.in new file mode 100644 index 0000000..08aa75c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile 2.in @@ -0,0 +1,1796 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + + + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +@BUILD_DOCS_TRUE@am__append_1 = doc +@FFI_DEBUG_TRUE@am__append_2 = src/debug.c +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +@FFI_DEBUG_TRUE@am__append_3 = -DFFI_DEBUG +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(noinst_HEADERS) $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = fficonfig.h +CONFIG_CLEAN_FILES = libffi.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ + "$(DESTDIR)$(pkgconfigdir)" +LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am__libffi_la_SOURCES_DIST = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c src/debug.c +am__dirstamp = $(am__leading_dot)dirstamp +@FFI_DEBUG_TRUE@am__objects_1 = src/debug.lo +am_libffi_la_OBJECTS = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +libffi_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libffi_la_LDFLAGS) $(LDFLAGS) -o $@ +am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) +am__libffi_convenience_la_SOURCES_DIST = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c src/debug.c +am__objects_2 = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +am_libffi_convenience_la_OBJECTS = $(am__objects_2) +nodist_libffi_convenience_la_OBJECTS = +libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \ + $(nodist_libffi_convenience_la_OBJECTS) +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) +LTCPPASCOMPILE = $(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CCASFLAGS) $(CCASFLAGS) +AM_V_CPPAS = $(am__v_CPPAS_@AM_V@) +am__v_CPPAS_ = $(am__v_CPPAS_@AM_DEFAULT_V@) +am__v_CPPAS_0 = @echo " CPPAS " $@; +am__v_CPPAS_1 = +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libffi_la_SOURCES) $(EXTRA_libffi_la_SOURCES) \ + $(libffi_convenience_la_SOURCES) \ + $(EXTRA_libffi_convenience_la_SOURCES) \ + $(nodist_libffi_convenience_la_SOURCES) +DIST_SOURCES = $(am__libffi_la_SOURCES_DIST) \ + $(EXTRA_libffi_la_SOURCES) \ + $(am__libffi_convenience_la_SOURCES_DIST) \ + $(EXTRA_libffi_convenience_la_SOURCES) +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(pkgconfig_DATA) +HEADERS = $(noinst_HEADERS) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope distdir dist dist-all distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ + $(LISP)fficonfig.h.in +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +CSCOPE = cscope +DIST_SUBDIRS = include testsuite man doc +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fficonfig.h.in \ + $(srcdir)/libffi.pc.in compile config.guess config.sub depcomp \ + install-sh ltmain.sh missing +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +ALLOCA = @ALLOCA@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AM_LTLDFLAGS = @AM_LTLDFLAGS@ +AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@ +HAVE_LONG_DOUBLE_VARIANT = @HAVE_LONG_DOUBLE_VARIANT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPT_LDFLAGS = @OPT_LDFLAGS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PRTDIAG = @PRTDIAG@ +RANLIB = @RANLIB@ +SECTION_LDFLAGS = @SECTION_LDFLAGS@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +TARGET = @TARGET@ +TARGETDIR = @TARGETDIR@ +TARGET_OBJ = @TARGET_OBJ@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_enable_builddir_sed = @ax_enable_builddir_sed@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sys_symbol_underscore = @sys_symbol_underscore@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +toolexecdir = @toolexecdir@ +toolexeclibdir = @toolexeclibdir@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign subdir-objects +ACLOCAL_AMFLAGS = -I m4 +SUBDIRS = include testsuite man $(am__append_1) +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) +MAKEOVERRIDES = +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la +libffi_la_SOURCES = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c $(am__append_2) +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +libffi_la_LIBADD = $(TARGET_OBJ) +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) +AM_CFLAGS = $(am__append_3) +@LIBFFI_BUILD_VERSIONED_SHLIB_FALSE@libffi_version_script = +@LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@libffi_version_script = -Wl,--version-script,libffi.map +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@libffi_version_script = -Wl,-M,libffi.map-sun +@LIBFFI_BUILD_VERSIONED_SHLIB_FALSE@libffi_version_dep = +@LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@libffi_version_dep = libffi.map +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@libffi_version_dep = libffi.map-sun +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) +all: fficonfig.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +.SUFFIXES: .S .c .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +fficonfig.h: stamp-h1 + @test -f $@ || rm -f stamp-h1 + @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + +stamp-h1: $(srcdir)/fficonfig.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status fficonfig.h +$(srcdir)/fficonfig.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f fficonfig.h stamp-h1 +libffi.pc: $(top_builddir)/config.status $(srcdir)/libffi.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \ + } + +uninstall-toolexeclibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \ + done + +clean-toolexeclibLTLIBRARIES: + -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES) + @list='$(toolexeclib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } +src/$(am__dirstamp): + @$(MKDIR_P) src + @: > src/$(am__dirstamp) +src/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/$(DEPDIR) + @: > src/$(DEPDIR)/$(am__dirstamp) +src/prep_cif.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/types.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/java_raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/closures.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/aarch64/$(am__dirstamp): + @$(MKDIR_P) src/aarch64 + @: > src/aarch64/$(am__dirstamp) +src/aarch64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/aarch64/$(DEPDIR) + @: > src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/win64_armasm.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/alpha/$(am__dirstamp): + @$(MKDIR_P) src/alpha + @: > src/alpha/$(am__dirstamp) +src/alpha/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/alpha/$(DEPDIR) + @: > src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/ffi.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/osf.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/arc/$(am__dirstamp): + @$(MKDIR_P) src/arc + @: > src/arc/$(am__dirstamp) +src/arc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arc/$(DEPDIR) + @: > src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/ffi.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/arcompact.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arm/$(am__dirstamp): + @$(MKDIR_P) src/arm + @: > src/arm/$(am__dirstamp) +src/arm/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arm/$(DEPDIR) + @: > src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/ffi.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv_msvc_arm32.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/avr32/$(am__dirstamp): + @$(MKDIR_P) src/avr32 + @: > src/avr32/$(am__dirstamp) +src/avr32/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/avr32/$(DEPDIR) + @: > src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/ffi.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/sysv.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/bfin/$(am__dirstamp): + @$(MKDIR_P) src/bfin + @: > src/bfin/$(am__dirstamp) +src/bfin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/bfin/$(DEPDIR) + @: > src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/ffi.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/sysv.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/cris/$(am__dirstamp): + @$(MKDIR_P) src/cris + @: > src/cris/$(am__dirstamp) +src/cris/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/cris/$(DEPDIR) + @: > src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/ffi.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/sysv.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/frv/$(am__dirstamp): + @$(MKDIR_P) src/frv + @: > src/frv/$(am__dirstamp) +src/frv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/frv/$(DEPDIR) + @: > src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/ffi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/eabi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/ia64/$(am__dirstamp): + @$(MKDIR_P) src/ia64 + @: > src/ia64/$(am__dirstamp) +src/ia64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/ia64/$(DEPDIR) + @: > src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/ffi.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/unix.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/m32r/$(am__dirstamp): + @$(MKDIR_P) src/m32r + @: > src/m32r/$(am__dirstamp) +src/m32r/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m32r/$(DEPDIR) + @: > src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/ffi.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/sysv.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m68k/$(am__dirstamp): + @$(MKDIR_P) src/m68k + @: > src/m68k/$(am__dirstamp) +src/m68k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m68k/$(DEPDIR) + @: > src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/ffi.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/sysv.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m88k/$(am__dirstamp): + @$(MKDIR_P) src/m88k + @: > src/m88k/$(am__dirstamp) +src/m88k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m88k/$(DEPDIR) + @: > src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/ffi.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/obsd.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/metag/$(am__dirstamp): + @$(MKDIR_P) src/metag + @: > src/metag/$(am__dirstamp) +src/metag/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/metag/$(DEPDIR) + @: > src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/ffi.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/sysv.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/microblaze/$(am__dirstamp): + @$(MKDIR_P) src/microblaze + @: > src/microblaze/$(am__dirstamp) +src/microblaze/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/microblaze/$(DEPDIR) + @: > src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/ffi.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/sysv.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/mips/$(am__dirstamp): + @$(MKDIR_P) src/mips + @: > src/mips/$(am__dirstamp) +src/mips/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/mips/$(DEPDIR) + @: > src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/ffi.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/o32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/n32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/moxie/$(am__dirstamp): + @$(MKDIR_P) src/moxie + @: > src/moxie/$(am__dirstamp) +src/moxie/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/moxie/$(DEPDIR) + @: > src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/ffi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/eabi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/nios2/$(am__dirstamp): + @$(MKDIR_P) src/nios2 + @: > src/nios2/$(am__dirstamp) +src/nios2/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/nios2/$(DEPDIR) + @: > src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/ffi.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/sysv.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/or1k/$(am__dirstamp): + @$(MKDIR_P) src/or1k + @: > src/or1k/$(am__dirstamp) +src/or1k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/or1k/$(DEPDIR) + @: > src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/ffi.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/sysv.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/pa/$(am__dirstamp): + @$(MKDIR_P) src/pa + @: > src/pa/$(am__dirstamp) +src/pa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/pa/$(DEPDIR) + @: > src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/ffi.lo: src/pa/$(am__dirstamp) src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/linux.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/hpux32.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/powerpc/$(am__dirstamp): + @$(MKDIR_P) src/powerpc + @: > src/powerpc/$(am__dirstamp) +src/powerpc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/powerpc/$(DEPDIR) + @: > src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ppc_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/riscv/$(am__dirstamp): + @$(MKDIR_P) src/riscv + @: > src/riscv/$(am__dirstamp) +src/riscv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/riscv/$(DEPDIR) + @: > src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/ffi.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/sysv.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/s390/$(am__dirstamp): + @$(MKDIR_P) src/s390 + @: > src/s390/$(am__dirstamp) +src/s390/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/s390/$(DEPDIR) + @: > src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/ffi.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/sysv.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/sh/$(am__dirstamp): + @$(MKDIR_P) src/sh + @: > src/sh/$(am__dirstamp) +src/sh/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh/$(DEPDIR) + @: > src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/ffi.lo: src/sh/$(am__dirstamp) src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/sysv.lo: src/sh/$(am__dirstamp) \ + src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh64/$(am__dirstamp): + @$(MKDIR_P) src/sh64 + @: > src/sh64/$(am__dirstamp) +src/sh64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh64/$(DEPDIR) + @: > src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/ffi.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/sysv.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sparc/$(am__dirstamp): + @$(MKDIR_P) src/sparc + @: > src/sparc/$(am__dirstamp) +src/sparc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sparc/$(DEPDIR) + @: > src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi64.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v8.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v9.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/tile/$(am__dirstamp): + @$(MKDIR_P) src/tile + @: > src/tile/$(am__dirstamp) +src/tile/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/tile/$(DEPDIR) + @: > src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/ffi.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/tile.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/vax/$(am__dirstamp): + @$(MKDIR_P) src/vax + @: > src/vax/$(am__dirstamp) +src/vax/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/vax/$(DEPDIR) + @: > src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/ffi.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/elfbsd.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/x86/$(am__dirstamp): + @$(MKDIR_P) src/x86 + @: > src/x86/$(am__dirstamp) +src/x86/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/x86/$(DEPDIR) + @: > src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffiw64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/unix64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/xtensa/$(am__dirstamp): + @$(MKDIR_P) src/xtensa + @: > src/xtensa/$(am__dirstamp) +src/xtensa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/xtensa/$(DEPDIR) + @: > src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/ffi.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/sysv.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) + +libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES) $(EXTRA_libffi_la_DEPENDENCIES) + $(AM_V_CCLD)$(libffi_la_LINK) -rpath $(toolexeclibdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS) + +libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES) $(EXTRA_libffi_convenience_la_DEPENDENCIES) + $(AM_V_CCLD)$(LINK) $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f src/*.$(OBJEXT) + -rm -f src/*.lo + -rm -f src/aarch64/*.$(OBJEXT) + -rm -f src/aarch64/*.lo + -rm -f src/alpha/*.$(OBJEXT) + -rm -f src/alpha/*.lo + -rm -f src/arc/*.$(OBJEXT) + -rm -f src/arc/*.lo + -rm -f src/arm/*.$(OBJEXT) + -rm -f src/arm/*.lo + -rm -f src/avr32/*.$(OBJEXT) + -rm -f src/avr32/*.lo + -rm -f src/bfin/*.$(OBJEXT) + -rm -f src/bfin/*.lo + -rm -f src/cris/*.$(OBJEXT) + -rm -f src/cris/*.lo + -rm -f src/frv/*.$(OBJEXT) + -rm -f src/frv/*.lo + -rm -f src/ia64/*.$(OBJEXT) + -rm -f src/ia64/*.lo + -rm -f src/m32r/*.$(OBJEXT) + -rm -f src/m32r/*.lo + -rm -f src/m68k/*.$(OBJEXT) + -rm -f src/m68k/*.lo + -rm -f src/m88k/*.$(OBJEXT) + -rm -f src/m88k/*.lo + -rm -f src/metag/*.$(OBJEXT) + -rm -f src/metag/*.lo + -rm -f src/microblaze/*.$(OBJEXT) + -rm -f src/microblaze/*.lo + -rm -f src/mips/*.$(OBJEXT) + -rm -f src/mips/*.lo + -rm -f src/moxie/*.$(OBJEXT) + -rm -f src/moxie/*.lo + -rm -f src/nios2/*.$(OBJEXT) + -rm -f src/nios2/*.lo + -rm -f src/or1k/*.$(OBJEXT) + -rm -f src/or1k/*.lo + -rm -f src/pa/*.$(OBJEXT) + -rm -f src/pa/*.lo + -rm -f src/powerpc/*.$(OBJEXT) + -rm -f src/powerpc/*.lo + -rm -f src/riscv/*.$(OBJEXT) + -rm -f src/riscv/*.lo + -rm -f src/s390/*.$(OBJEXT) + -rm -f src/s390/*.lo + -rm -f src/sh/*.$(OBJEXT) + -rm -f src/sh/*.lo + -rm -f src/sh64/*.$(OBJEXT) + -rm -f src/sh64/*.lo + -rm -f src/sparc/*.$(OBJEXT) + -rm -f src/sparc/*.lo + -rm -f src/tile/*.$(OBJEXT) + -rm -f src/tile/*.lo + -rm -f src/vax/*.$(OBJEXT) + -rm -f src/vax/*.lo + -rm -f src/x86/*.$(OBJEXT) + -rm -f src/x86/*.lo + -rm -f src/xtensa/*.$(OBJEXT) + -rm -f src/xtensa/*.lo + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/closures.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/debug.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/java_raw_api.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/prep_cif.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/raw_api.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/types.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/win64_armasm.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/osf.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/arc/$(DEPDIR)/arcompact.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/arc/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/avr32/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/avr32/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/bfin/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/bfin/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/cris/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/cris/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/frv/$(DEPDIR)/eabi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/frv/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/ia64/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/ia64/$(DEPDIR)/unix.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m32r/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m32r/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m68k/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m68k/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m88k/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/m88k/$(DEPDIR)/obsd.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/metag/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/metag/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/microblaze/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/microblaze/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/n32.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/o32.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/moxie/$(DEPDIR)/eabi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/moxie/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/nios2/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/nios2/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/or1k/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/or1k/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/hpux32.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/linux.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/aix.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/aix_closure.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/darwin.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/darwin_closure.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/ffi_darwin.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/ffi_linux64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/ffi_sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/linux64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/linux64_closure.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/ppc_closure.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/powerpc/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/riscv/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/riscv/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/s390/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/s390/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sh/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sh/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sh64/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sh64/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/ffi64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v8.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v9.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/tile.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/vax/$(DEPDIR)/elfbsd.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/vax/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/ffi64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/ffiw64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/sysv.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/sysv_intel.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/unix64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/win64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/win64_intel.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/xtensa/$(DEPDIR)/ffi.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/xtensa/$(DEPDIR)/sysv.Plo@am__quote@ + +.S.o: +@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(CPPASCOMPILE) -c -o $@ $< + +.S.obj: +@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.S.lo: +@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(LTCPPASCOMPILE) -c -o $@ $< + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + -rm -rf src/.libs src/_libs + -rm -rf src/aarch64/.libs src/aarch64/_libs + -rm -rf src/alpha/.libs src/alpha/_libs + -rm -rf src/arc/.libs src/arc/_libs + -rm -rf src/arm/.libs src/arm/_libs + -rm -rf src/avr32/.libs src/avr32/_libs + -rm -rf src/bfin/.libs src/bfin/_libs + -rm -rf src/cris/.libs src/cris/_libs + -rm -rf src/frv/.libs src/frv/_libs + -rm -rf src/ia64/.libs src/ia64/_libs + -rm -rf src/m32r/.libs src/m32r/_libs + -rm -rf src/m68k/.libs src/m68k/_libs + -rm -rf src/m88k/.libs src/m88k/_libs + -rm -rf src/metag/.libs src/metag/_libs + -rm -rf src/microblaze/.libs src/microblaze/_libs + -rm -rf src/mips/.libs src/mips/_libs + -rm -rf src/moxie/.libs src/moxie/_libs + -rm -rf src/nios2/.libs src/nios2/_libs + -rm -rf src/or1k/.libs src/or1k/_libs + -rm -rf src/pa/.libs src/pa/_libs + -rm -rf src/powerpc/.libs src/powerpc/_libs + -rm -rf src/riscv/.libs src/riscv/_libs + -rm -rf src/s390/.libs src/s390/_libs + -rm -rf src/sh/.libs src/sh/_libs + -rm -rf src/sh64/.libs src/sh64/_libs + -rm -rf src/sparc/.libs src/sparc/_libs + -rm -rf src/tile/.libs src/tile/_libs + -rm -rf src/vax/.libs src/vax/_libs + -rm -rf src/x86/.libs src/x86/_libs + -rm -rf src/xtensa/.libs src/xtensa/_libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) fficonfig.h +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f src/$(DEPDIR)/$(am__dirstamp) + -rm -f src/$(am__dirstamp) + -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/aarch64/$(am__dirstamp) + -rm -f src/alpha/$(DEPDIR)/$(am__dirstamp) + -rm -f src/alpha/$(am__dirstamp) + -rm -f src/arc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arc/$(am__dirstamp) + -rm -f src/arm/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arm/$(am__dirstamp) + -rm -f src/avr32/$(DEPDIR)/$(am__dirstamp) + -rm -f src/avr32/$(am__dirstamp) + -rm -f src/bfin/$(DEPDIR)/$(am__dirstamp) + -rm -f src/bfin/$(am__dirstamp) + -rm -f src/cris/$(DEPDIR)/$(am__dirstamp) + -rm -f src/cris/$(am__dirstamp) + -rm -f src/frv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/frv/$(am__dirstamp) + -rm -f src/ia64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/ia64/$(am__dirstamp) + -rm -f src/m32r/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m32r/$(am__dirstamp) + -rm -f src/m68k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m68k/$(am__dirstamp) + -rm -f src/m88k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m88k/$(am__dirstamp) + -rm -f src/metag/$(DEPDIR)/$(am__dirstamp) + -rm -f src/metag/$(am__dirstamp) + -rm -f src/microblaze/$(DEPDIR)/$(am__dirstamp) + -rm -f src/microblaze/$(am__dirstamp) + -rm -f src/mips/$(DEPDIR)/$(am__dirstamp) + -rm -f src/mips/$(am__dirstamp) + -rm -f src/moxie/$(DEPDIR)/$(am__dirstamp) + -rm -f src/moxie/$(am__dirstamp) + -rm -f src/nios2/$(DEPDIR)/$(am__dirstamp) + -rm -f src/nios2/$(am__dirstamp) + -rm -f src/or1k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/or1k/$(am__dirstamp) + -rm -f src/pa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/pa/$(am__dirstamp) + -rm -f src/powerpc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/powerpc/$(am__dirstamp) + -rm -f src/riscv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/riscv/$(am__dirstamp) + -rm -f src/s390/$(DEPDIR)/$(am__dirstamp) + -rm -f src/s390/$(am__dirstamp) + -rm -f src/sh/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh/$(am__dirstamp) + -rm -f src/sh64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh64/$(am__dirstamp) + -rm -f src/sparc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sparc/$(am__dirstamp) + -rm -f src/tile/$(DEPDIR)/$(am__dirstamp) + -rm -f src/tile/$(am__dirstamp) + -rm -f src/vax/$(DEPDIR)/$(am__dirstamp) + -rm -f src/vax/$(am__dirstamp) + -rm -f src/x86/$(DEPDIR)/$(am__dirstamp) + -rm -f src/x86/$(am__dirstamp) + -rm -f src/xtensa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/xtensa/$(am__dirstamp) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: install-toolexeclibLTLIBRARIES + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.MAKE: $(am__recursive_targets) all install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--refresh check check-am clean clean-cscope clean-generic \ + clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES cscope cscopelist-am ctags \ + ctags-am dist dist-all dist-bzip2 dist-gzip dist-hook \ + dist-lzip dist-shar dist-tarZ dist-xz dist-zip distcheck \ + distclean distclean-compile distclean-generic distclean-hdr \ + distclean-libtool distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkgconfigDATA install-ps \ + install-ps-am install-strip install-toolexeclibLTLIBRARIES \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.PRECIOUS: Makefile + +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@ $(libffi_la_OBJECTS) $(libffi_la_LIBADD) +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@ perl $(top_srcdir)/make_sunver.pl libffi.map \ +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@ `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@ sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ +@LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE@@LIBFFI_BUILD_VERSIONED_SHLIB_TRUE@ > $@ || (rm -f $@ ; exit 1) + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am new file mode 100644 index 0000000..563e9f2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am @@ -0,0 +1,160 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS = foreign subdir-objects + +ACLOCAL_AMFLAGS = -I m4 + +SUBDIRS = include testsuite man +if BUILD_DOCS +## This hack is needed because it doesn't seem possible to make a +## conditional info_TEXINFOS in Automake. At least Automake 1.14 +## either gives errors -- if this attempted in the most +## straightforward way -- or simply unconditionally tries to build the +## info file. +SUBDIRS += doc +endif + +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) + +MAKEOVERRIDES= + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc + +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la + +libffi_la_SOURCES = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c + +if FFI_DEBUG +libffi_la_SOURCES += src/debug.c +endif + +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +TARGET_OBJ = @TARGET_OBJ@ +libffi_la_LIBADD = $(TARGET_OBJ) + +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) + +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) + +AM_CFLAGS = +if FFI_DEBUG +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +AM_CFLAGS += -DFFI_DEBUG +endif + +if LIBFFI_BUILD_VERSIONED_SHLIB +if LIBFFI_BUILD_VERSIONED_SHLIB_GNU +libffi_version_script = -Wl,--version-script,libffi.map +libffi_version_dep = libffi.map +endif +if LIBFFI_BUILD_VERSIONED_SHLIB_SUN +libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = libffi.map-sun +libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ + $(libffi_la_OBJECTS) $(libffi_la_LIBADD) + perl $(top_srcdir)/make_sunver.pl libffi.map \ + `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ + sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ + > $@ || (rm -f $@ ; exit 1) +endif +else +libffi_version_script = +libffi_version_dep = +endif +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) + +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README 2.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README 2.md new file mode 100644 index 0000000..66c9186 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README 2.md @@ -0,0 +1,482 @@ +Status +====== + +[![Build Status](https://travis-ci.org/libffi/libffi.svg?branch=master)](https://travis-ci.org/libffi/libffi) +[![Build status](https://ci.appveyor.com/api/projects/status/8lko9vagbx4w2kxq?svg=true)](https://ci.appveyor.com/project/atgreen/libffi) + +libffi-3.4 was released on TBD. Check the libffi web +page for updates: . + + +What is libffi? +=============== + +Compilers for high level languages generate code that follow certain +conventions. These conventions are necessary, in part, for separate +compilation to work. One such convention is the "calling +convention". The "calling convention" is essentially a set of +assumptions made by the compiler about where function arguments will +be found on entry to a function. A "calling convention" also specifies +where the return value for a function is found. + +Some programs may not know at the time of compilation what arguments +are to be passed to a function. For instance, an interpreter may be +told at run-time about the number and types of arguments used to call +a given function. Libffi can be used in such programs to provide a +bridge from the interpreter program to compiled code. + +The libffi library provides a portable, high level programming +interface to various calling conventions. This allows a programmer to +call any function specified by a call interface description at run +time. + +FFI stands for Foreign Function Interface. A foreign function +interface is the popular name for the interface that allows code +written in one language to call code written in another language. The +libffi library really only provides the lowest, machine dependent +layer of a fully featured foreign function interface. A layer must +exist above libffi that handles type conversions for values passed +between the two languages. + + +Supported Platforms +=================== + +Libffi has been ported to many different platforms. + +At the time of release, the following basic configurations have been +tested: + +| Architecture | Operating System | Compiler | +| --------------- | ---------------- | ----------------------- | +| AArch64 (ARM64) | iOS | Clang | +| AArch64 | Linux | GCC | +| AArch64 | Windows | MSVC | +| Alpha | Linux | GCC | +| Alpha | Tru64 | GCC | +| ARC | Linux | GCC | +| ARM | Linux | GCC | +| ARM | iOS | GCC | +| ARM | Windows | MSVC | +| AVR32 | Linux | GCC | +| Blackfin | uClinux | GCC | +| HPPA | HPUX | GCC | +| IA-64 | Linux | GCC | +| M68K | FreeMiNT | GCC | +| M68K | Linux | GCC | +| M68K | RTEMS | GCC | +| M88K | OpenBSD/mvme88k | GCC | +| Meta | Linux | GCC | +| MicroBlaze | Linux | GCC | +| MIPS | IRIX | GCC | +| MIPS | Linux | GCC | +| MIPS | RTEMS | GCC | +| MIPS64 | Linux | GCC | +| Moxie | Bare metal | GCC | +| Nios II | Linux | GCC | +| OpenRISC | Linux | GCC | +| PowerPC 32-bit | AIX | IBM XL C | +| PowerPC 64-bit | AIX | IBM XL C | +| PowerPC | AMIGA | GCC | +| PowerPC | Linux | GCC | +| PowerPC | Mac OSX | GCC | +| PowerPC | FreeBSD | GCC | +| PowerPC 64-bit | FreeBSD | GCC | +| PowerPC 64-bit | Linux ELFv1 | GCC | +| PowerPC 64-bit | Linux ELFv2 | GCC | +| RISC-V 32-bit | Linux | GCC | +| RISC-V 64-bit | Linux | GCC | +| S390 | Linux | GCC | +| S390X | Linux | GCC | +| SPARC | Linux | GCC | +| SPARC | Solaris | GCC | +| SPARC | Solaris | Oracle Solaris Studio C | +| SPARC64 | Linux | GCC | +| SPARC64 | FreeBSD | GCC | +| SPARC64 | Solaris | Oracle Solaris Studio C | +| TILE-Gx/TILEPro | Linux | GCC | +| VAX | OpenBSD/vax | GCC | +| X86 | FreeBSD | GCC | +| X86 | GNU HURD | GCC | +| X86 | Interix | GCC | +| X86 | kFreeBSD | GCC | +| X86 | Linux | GCC | +| X86 | OpenBSD | GCC | +| X86 | OS/2 | GCC | +| X86 | Solaris | GCC | +| X86 | Solaris | Oracle Solaris Studio C | +| X86 | Windows/Cygwin | GCC | +| X86 | Windows/MingW | GCC | +| X86-64 | FreeBSD | GCC | +| X86-64 | Linux | GCC | +| X86-64 | Linux/x32 | GCC | +| X86-64 | OpenBSD | GCC | +| X86-64 | Solaris | Oracle Solaris Studio C | +| X86-64 | Windows/Cygwin | GCC | +| X86-64 | Windows/MingW | GCC | +| X86-64 | Mac OSX | GCC | +| Xtensa | Linux | GCC | + +Please send additional platform test results to +libffi-discuss@sourceware.org. + +Installing libffi +================= + +First you must configure the distribution for your particular +system. Go to the directory you wish to build libffi in and run the +"configure" program found in the root directory of the libffi source +distribution. Note that building libffi requires a C99 compatible +compiler. + +If you're building libffi directly from git hosted sources, configure +won't exist yet; run ./autogen.sh first. This will require that you +install autoconf, automake and libtool. + +You may want to tell configure where to install the libffi library and +header files. To do that, use the ``--prefix`` configure switch. Libffi +will install under /usr/local by default. + +If you want to enable extra run-time debugging checks use the the +``--enable-debug`` configure switch. This is useful when your program dies +mysteriously while using libffi. + +Another useful configure switch is ``--enable-purify-safety``. Using this +will add some extra code which will suppress certain warnings when you +are using Purify with libffi. Only use this switch when using +Purify, as it will slow down the library. + +If you don't want to build documentation, use the ``--disable-docs`` +configure switch. + +It's also possible to build libffi on Windows platforms with +Microsoft's Visual C++ compiler. In this case, use the msvcc.sh +wrapper script during configuration like so: + + path/to/configure CC=path/to/msvcc.sh CXX=path/to/msvcc.sh LD=link CPP="cl -nologo -EP" CPPFLAGS="-DFFI_BUILDING_DLL" + +For 64-bit Windows builds, use ``CC="path/to/msvcc.sh -m64"`` and +``CXX="path/to/msvcc.sh -m64"``. You may also need to specify +``--build`` appropriately. + +It is also possible to build libffi on Windows platforms with the LLVM +project's clang-cl compiler, like below: + + path/to/configure CC="path/to/msvcc.sh -clang-cl" CXX="path/to/msvcc.sh -clang-cl" LD=link CPP="clang-cl -EP" + +When building with MSVC under a MingW environment, you may need to +remove the line in configure that sets 'fix_srcfile_path' to a 'cygpath' +command. ('cygpath' is not present in MingW, and is not required when +using MingW-style paths.) + +To build static library for ARM64 with MSVC using visual studio solution, msvc_build folder have + aarch64/Ffi_staticLib.sln + required header files in aarch64/aarch64_include/ + + +SPARC Solaris builds require the use of the GNU assembler and linker. +Point ``AS`` and ``LD`` environment variables at those tool prior to +configuration. + +For iOS builds, the ``libffi.xcodeproj`` Xcode project is available. + +Configure has many other options. Use ``configure --help`` to see them all. + +Once configure has finished, type "make". Note that you must be using +GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make . + +To ensure that libffi is working as advertised, type "make check". +This will require that you have DejaGNU installed. + +To install the library and header files, type ``make install``. + + +History +======= + +See the git log for details at http://github.com/libffi/libffi. + + 3.4 TBD + Add support for Intel Control-flow Enforcement Technology (CET). + Add support for ARM Pointer Authentication (PA). + Fix 32-bit PPC regression. + Fix MIPS soft-float problem. + + 3.3 Nov-23-19 + Add RISC-V support. + New API in support of GO closures. + Add IEEE754 binary128 long double support for 64-bit Power + Default to Microsoft's 64 bit long double ABI with Visual C++. + GNU compiler uses 80 bits (128 in memory) FFI_GNUW64 ABI. + Add Windows on ARM64 (WOA) support. + Add Windows 32-bit ARM support. + Raw java (gcj) API deprecated. + Add pre-built PDF documentation to source distribution. + Many new test cases and bug fixes. + + 3.2.1 Nov-12-14 + Build fix for non-iOS AArch64 targets. + + 3.2 Nov-11-14 + Add C99 Complex Type support (currently only supported on + s390). + Add support for PASCAL and REGISTER calling conventions on x86 + Windows/Linux. + Add OpenRISC and Cygwin-64 support. + Bug fixes. + + 3.1 May-19-14 + Add AArch64 (ARM64) iOS support. + Add Nios II support. + Add m88k and DEC VAX support. + Add support for stdcall, thiscall, and fastcall on non-Windows + 32-bit x86 targets such as Linux. + Various Android, MIPS N32, x86, FreeBSD and UltraSPARC IIi + fixes. + Make the testsuite more robust: eliminate several spurious + failures, and respect the $CC and $CXX environment variables. + Archive off the manually maintained ChangeLog in favor of git + log. + + 3.0.13 Mar-17-13 + Add Meta support. + Add missing Moxie bits. + Fix stack alignment bug on 32-bit x86. + Build fix for m68000 targets. + Build fix for soft-float Power targets. + Fix the install dir location for some platforms when building + with GCC (OS X, Solaris). + Fix Cygwin regression. + + 3.0.12 Feb-11-13 + Add Moxie support. + Add AArch64 support. + Add Blackfin support. + Add TILE-Gx/TILEPro support. + Add MicroBlaze support. + Add Xtensa support. + Add support for PaX enabled kernels with MPROTECT. + Add support for native vendor compilers on + Solaris and AIX. + Work around LLVM/GCC interoperability issue on x86_64. + + 3.0.11 Apr-11-12 + Lots of build fixes. + Add support for variadic functions (ffi_prep_cif_var). + Add Linux/x32 support. + Add thiscall, fastcall and MSVC cdecl support on Windows. + Add Amiga and newer MacOS support. + Add m68k FreeMiNT support. + Integration with iOS' xcode build tools. + Fix Octeon and MC68881 support. + Fix code pessimizations. + + 3.0.10 Aug-23-11 + Add support for Apple's iOS. + Add support for ARM VFP ABI. + Add RTEMS support for MIPS and M68K. + Fix instruction cache clearing problems on + ARM and SPARC. + Fix the N64 build on mips-sgi-irix6.5. + Enable builds with Microsoft's compiler. + Enable x86 builds with Oracle's Solaris compiler. + Fix support for calling code compiled with Oracle's Sparc + Solaris compiler. + Testsuite fixes for Tru64 Unix. + Additional platform support. + + 3.0.9 Dec-31-09 + Add AVR32 and win64 ports. Add ARM softfp support. + Many fixes for AIX, Solaris, HP-UX, *BSD. + Several PowerPC and x86-64 bug fixes. + Build DLL for windows. + + 3.0.8 Dec-19-08 + Add *BSD, BeOS, and PA-Linux support. + + 3.0.7 Nov-11-08 + Fix for ppc FreeBSD. + (thanks to Andreas Tobler) + + 3.0.6 Jul-17-08 + Fix for closures on sh. + Mark the sh/sh64 stack as non-executable. + (both thanks to Kaz Kojima) + + 3.0.5 Apr-3-08 + Fix libffi.pc file. + Fix #define ARM for IcedTea users. + Fix x86 closure bug. + + 3.0.4 Feb-24-08 + Fix x86 OpenBSD configury. + + 3.0.3 Feb-22-08 + Enable x86 OpenBSD thanks to Thomas Heller, and + x86-64 FreeBSD thanks to Björn König and Andreas Tobler. + Clean up test instruction in README. + + 3.0.2 Feb-21-08 + Improved x86 FreeBSD support. + Thanks to Björn König. + + 3.0.1 Feb-15-08 + Fix instruction cache flushing bug on MIPS. + Thanks to David Daney. + + 3.0.0 Feb-15-08 + Many changes, mostly thanks to the GCC project. + Cygnus Solutions is now Red Hat. + + [10 years go by...] + + 1.20 Oct-5-98 + Raffaele Sena produces ARM port. + + 1.19 Oct-5-98 + Fixed x86 long double and long long return support. + m68k bug fixes from Andreas Schwab. + Patch for DU assembler compatibility for the Alpha from Richard + Henderson. + + 1.18 Apr-17-98 + Bug fixes and MIPS configuration changes. + + 1.17 Feb-24-98 + Bug fixes and m68k port from Andreas Schwab. PowerPC port from + Geoffrey Keating. Various bug x86, Sparc and MIPS bug fixes. + + 1.16 Feb-11-98 + Richard Henderson produces Alpha port. + + 1.15 Dec-4-97 + Fixed an n32 ABI bug. New libtool, auto* support. + + 1.14 May-13-97 + libtool is now used to generate shared and static libraries. + Fixed a minor portability problem reported by Russ McManus + . + + 1.13 Dec-2-96 + Added --enable-purify-safety to keep Purify from complaining + about certain low level code. + Sparc fix for calling functions with < 6 args. + Linux x86 a.out fix. + + 1.12 Nov-22-96 + Added missing ffi_type_void, needed for supporting void return + types. Fixed test case for non MIPS machines. Cygnus Support + is now Cygnus Solutions. + + 1.11 Oct-30-96 + Added notes about GNU make. + + 1.10 Oct-29-96 + Added configuration fix for non GNU compilers. + + 1.09 Oct-29-96 + Added --enable-debug configure switch. Clean-ups based on LCLint + feedback. ffi_mips.h is always installed. Many configuration + fixes. Fixed ffitest.c for sparc builds. + + 1.08 Oct-15-96 + Fixed n32 problem. Many clean-ups. + + 1.07 Oct-14-96 + Gordon Irlam rewrites v8.S again. Bug fixes. + + 1.06 Oct-14-96 + Gordon Irlam improved the sparc port. + + 1.05 Oct-14-96 + Interface changes based on feedback. + + 1.04 Oct-11-96 + Sparc port complete (modulo struct passing bug). + + 1.03 Oct-10-96 + Passing struct args, and returning struct values works for + all architectures/calling conventions. Expanded tests. + + 1.02 Oct-9-96 + Added SGI n32 support. Fixed bugs in both o32 and Linux support. + Added "make test". + + 1.01 Oct-8-96 + Fixed float passing bug in mips version. Restructured some + of the code. Builds cleanly with SGI tools. + + 1.00 Oct-7-96 + First release. No public announcement. + +Authors & Credits +================= + +libffi was originally written by Anthony Green . + +The developers of the GNU Compiler Collection project have made +innumerable valuable contributions. See the ChangeLog file for +details. + +Some of the ideas behind libffi were inspired by Gianni Mariani's free +gencall library for Silicon Graphics machines. + +The closure mechanism was designed and implemented by Kresten Krab +Thorup. + +Major processor architecture ports were contributed by the following +developers: + + aarch64 Marcus Shawcroft, James Greenhalgh + alpha Richard Henderson + arc Hackers at Synopsis + arm Raffaele Sena + avr32 Bradley Smith + blackfin Alexandre Keunecke I. de Mendonca + cris Simon Posnjak, Hans-Peter Nilsson + frv Anthony Green + ia64 Hans Boehm + m32r Kazuhiro Inaoka + m68k Andreas Schwab + m88k Miod Vallat + metag Hackers at Imagination Technologies + microblaze Nathan Rossi + mips Anthony Green, Casey Marshall + mips64 David Daney + moxie Anthony Green + nios ii Sandra Loosemore + openrisc Sebastian Macke + pa Randolph Chung, Dave Anglin, Andreas Tobler + powerpc Geoffrey Keating, Andreas Tobler, + David Edelsohn, John Hornkvist + powerpc64 Jakub Jelinek + riscv Michael Knyszek, Andrew Waterman, Stef O'Rear + s390 Gerhard Tonn, Ulrich Weigand + sh Kaz Kojima + sh64 Kaz Kojima + sparc Anthony Green, Gordon Irlam + tile-gx/tilepro Walter Lee + vax Miod Vallat + x86 Anthony Green, Jon Beniston + x86-64 Bo Thorsen + xtensa Chris Zankel + +Jesper Skov and Andrew Haley both did more than their fair share of +stepping through the code and tracking down bugs. + +Thanks also to Tom Tromey for bug fixes, documentation and +configuration help. + +Thanks to Jim Blandy, who provided some useful feedback on the libffi +interface. + +Andreas Tobler has done a tremendous amount of work on the testsuite. + +Alex Oliva solved the executable page problem for SElinux. + +The list above is almost certainly incomplete and inaccurate. I'm +happy to make corrections or additions upon request. + +If you have a problem, or have found a bug, please send a note to the +author at green@moxielogic.com, or the project mailing list at +libffi-discuss@sourceware.org. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md new file mode 100644 index 0000000..66c9186 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md @@ -0,0 +1,482 @@ +Status +====== + +[![Build Status](https://travis-ci.org/libffi/libffi.svg?branch=master)](https://travis-ci.org/libffi/libffi) +[![Build status](https://ci.appveyor.com/api/projects/status/8lko9vagbx4w2kxq?svg=true)](https://ci.appveyor.com/project/atgreen/libffi) + +libffi-3.4 was released on TBD. Check the libffi web +page for updates: . + + +What is libffi? +=============== + +Compilers for high level languages generate code that follow certain +conventions. These conventions are necessary, in part, for separate +compilation to work. One such convention is the "calling +convention". The "calling convention" is essentially a set of +assumptions made by the compiler about where function arguments will +be found on entry to a function. A "calling convention" also specifies +where the return value for a function is found. + +Some programs may not know at the time of compilation what arguments +are to be passed to a function. For instance, an interpreter may be +told at run-time about the number and types of arguments used to call +a given function. Libffi can be used in such programs to provide a +bridge from the interpreter program to compiled code. + +The libffi library provides a portable, high level programming +interface to various calling conventions. This allows a programmer to +call any function specified by a call interface description at run +time. + +FFI stands for Foreign Function Interface. A foreign function +interface is the popular name for the interface that allows code +written in one language to call code written in another language. The +libffi library really only provides the lowest, machine dependent +layer of a fully featured foreign function interface. A layer must +exist above libffi that handles type conversions for values passed +between the two languages. + + +Supported Platforms +=================== + +Libffi has been ported to many different platforms. + +At the time of release, the following basic configurations have been +tested: + +| Architecture | Operating System | Compiler | +| --------------- | ---------------- | ----------------------- | +| AArch64 (ARM64) | iOS | Clang | +| AArch64 | Linux | GCC | +| AArch64 | Windows | MSVC | +| Alpha | Linux | GCC | +| Alpha | Tru64 | GCC | +| ARC | Linux | GCC | +| ARM | Linux | GCC | +| ARM | iOS | GCC | +| ARM | Windows | MSVC | +| AVR32 | Linux | GCC | +| Blackfin | uClinux | GCC | +| HPPA | HPUX | GCC | +| IA-64 | Linux | GCC | +| M68K | FreeMiNT | GCC | +| M68K | Linux | GCC | +| M68K | RTEMS | GCC | +| M88K | OpenBSD/mvme88k | GCC | +| Meta | Linux | GCC | +| MicroBlaze | Linux | GCC | +| MIPS | IRIX | GCC | +| MIPS | Linux | GCC | +| MIPS | RTEMS | GCC | +| MIPS64 | Linux | GCC | +| Moxie | Bare metal | GCC | +| Nios II | Linux | GCC | +| OpenRISC | Linux | GCC | +| PowerPC 32-bit | AIX | IBM XL C | +| PowerPC 64-bit | AIX | IBM XL C | +| PowerPC | AMIGA | GCC | +| PowerPC | Linux | GCC | +| PowerPC | Mac OSX | GCC | +| PowerPC | FreeBSD | GCC | +| PowerPC 64-bit | FreeBSD | GCC | +| PowerPC 64-bit | Linux ELFv1 | GCC | +| PowerPC 64-bit | Linux ELFv2 | GCC | +| RISC-V 32-bit | Linux | GCC | +| RISC-V 64-bit | Linux | GCC | +| S390 | Linux | GCC | +| S390X | Linux | GCC | +| SPARC | Linux | GCC | +| SPARC | Solaris | GCC | +| SPARC | Solaris | Oracle Solaris Studio C | +| SPARC64 | Linux | GCC | +| SPARC64 | FreeBSD | GCC | +| SPARC64 | Solaris | Oracle Solaris Studio C | +| TILE-Gx/TILEPro | Linux | GCC | +| VAX | OpenBSD/vax | GCC | +| X86 | FreeBSD | GCC | +| X86 | GNU HURD | GCC | +| X86 | Interix | GCC | +| X86 | kFreeBSD | GCC | +| X86 | Linux | GCC | +| X86 | OpenBSD | GCC | +| X86 | OS/2 | GCC | +| X86 | Solaris | GCC | +| X86 | Solaris | Oracle Solaris Studio C | +| X86 | Windows/Cygwin | GCC | +| X86 | Windows/MingW | GCC | +| X86-64 | FreeBSD | GCC | +| X86-64 | Linux | GCC | +| X86-64 | Linux/x32 | GCC | +| X86-64 | OpenBSD | GCC | +| X86-64 | Solaris | Oracle Solaris Studio C | +| X86-64 | Windows/Cygwin | GCC | +| X86-64 | Windows/MingW | GCC | +| X86-64 | Mac OSX | GCC | +| Xtensa | Linux | GCC | + +Please send additional platform test results to +libffi-discuss@sourceware.org. + +Installing libffi +================= + +First you must configure the distribution for your particular +system. Go to the directory you wish to build libffi in and run the +"configure" program found in the root directory of the libffi source +distribution. Note that building libffi requires a C99 compatible +compiler. + +If you're building libffi directly from git hosted sources, configure +won't exist yet; run ./autogen.sh first. This will require that you +install autoconf, automake and libtool. + +You may want to tell configure where to install the libffi library and +header files. To do that, use the ``--prefix`` configure switch. Libffi +will install under /usr/local by default. + +If you want to enable extra run-time debugging checks use the the +``--enable-debug`` configure switch. This is useful when your program dies +mysteriously while using libffi. + +Another useful configure switch is ``--enable-purify-safety``. Using this +will add some extra code which will suppress certain warnings when you +are using Purify with libffi. Only use this switch when using +Purify, as it will slow down the library. + +If you don't want to build documentation, use the ``--disable-docs`` +configure switch. + +It's also possible to build libffi on Windows platforms with +Microsoft's Visual C++ compiler. In this case, use the msvcc.sh +wrapper script during configuration like so: + + path/to/configure CC=path/to/msvcc.sh CXX=path/to/msvcc.sh LD=link CPP="cl -nologo -EP" CPPFLAGS="-DFFI_BUILDING_DLL" + +For 64-bit Windows builds, use ``CC="path/to/msvcc.sh -m64"`` and +``CXX="path/to/msvcc.sh -m64"``. You may also need to specify +``--build`` appropriately. + +It is also possible to build libffi on Windows platforms with the LLVM +project's clang-cl compiler, like below: + + path/to/configure CC="path/to/msvcc.sh -clang-cl" CXX="path/to/msvcc.sh -clang-cl" LD=link CPP="clang-cl -EP" + +When building with MSVC under a MingW environment, you may need to +remove the line in configure that sets 'fix_srcfile_path' to a 'cygpath' +command. ('cygpath' is not present in MingW, and is not required when +using MingW-style paths.) + +To build static library for ARM64 with MSVC using visual studio solution, msvc_build folder have + aarch64/Ffi_staticLib.sln + required header files in aarch64/aarch64_include/ + + +SPARC Solaris builds require the use of the GNU assembler and linker. +Point ``AS`` and ``LD`` environment variables at those tool prior to +configuration. + +For iOS builds, the ``libffi.xcodeproj`` Xcode project is available. + +Configure has many other options. Use ``configure --help`` to see them all. + +Once configure has finished, type "make". Note that you must be using +GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make . + +To ensure that libffi is working as advertised, type "make check". +This will require that you have DejaGNU installed. + +To install the library and header files, type ``make install``. + + +History +======= + +See the git log for details at http://github.com/libffi/libffi. + + 3.4 TBD + Add support for Intel Control-flow Enforcement Technology (CET). + Add support for ARM Pointer Authentication (PA). + Fix 32-bit PPC regression. + Fix MIPS soft-float problem. + + 3.3 Nov-23-19 + Add RISC-V support. + New API in support of GO closures. + Add IEEE754 binary128 long double support for 64-bit Power + Default to Microsoft's 64 bit long double ABI with Visual C++. + GNU compiler uses 80 bits (128 in memory) FFI_GNUW64 ABI. + Add Windows on ARM64 (WOA) support. + Add Windows 32-bit ARM support. + Raw java (gcj) API deprecated. + Add pre-built PDF documentation to source distribution. + Many new test cases and bug fixes. + + 3.2.1 Nov-12-14 + Build fix for non-iOS AArch64 targets. + + 3.2 Nov-11-14 + Add C99 Complex Type support (currently only supported on + s390). + Add support for PASCAL and REGISTER calling conventions on x86 + Windows/Linux. + Add OpenRISC and Cygwin-64 support. + Bug fixes. + + 3.1 May-19-14 + Add AArch64 (ARM64) iOS support. + Add Nios II support. + Add m88k and DEC VAX support. + Add support for stdcall, thiscall, and fastcall on non-Windows + 32-bit x86 targets such as Linux. + Various Android, MIPS N32, x86, FreeBSD and UltraSPARC IIi + fixes. + Make the testsuite more robust: eliminate several spurious + failures, and respect the $CC and $CXX environment variables. + Archive off the manually maintained ChangeLog in favor of git + log. + + 3.0.13 Mar-17-13 + Add Meta support. + Add missing Moxie bits. + Fix stack alignment bug on 32-bit x86. + Build fix for m68000 targets. + Build fix for soft-float Power targets. + Fix the install dir location for some platforms when building + with GCC (OS X, Solaris). + Fix Cygwin regression. + + 3.0.12 Feb-11-13 + Add Moxie support. + Add AArch64 support. + Add Blackfin support. + Add TILE-Gx/TILEPro support. + Add MicroBlaze support. + Add Xtensa support. + Add support for PaX enabled kernels with MPROTECT. + Add support for native vendor compilers on + Solaris and AIX. + Work around LLVM/GCC interoperability issue on x86_64. + + 3.0.11 Apr-11-12 + Lots of build fixes. + Add support for variadic functions (ffi_prep_cif_var). + Add Linux/x32 support. + Add thiscall, fastcall and MSVC cdecl support on Windows. + Add Amiga and newer MacOS support. + Add m68k FreeMiNT support. + Integration with iOS' xcode build tools. + Fix Octeon and MC68881 support. + Fix code pessimizations. + + 3.0.10 Aug-23-11 + Add support for Apple's iOS. + Add support for ARM VFP ABI. + Add RTEMS support for MIPS and M68K. + Fix instruction cache clearing problems on + ARM and SPARC. + Fix the N64 build on mips-sgi-irix6.5. + Enable builds with Microsoft's compiler. + Enable x86 builds with Oracle's Solaris compiler. + Fix support for calling code compiled with Oracle's Sparc + Solaris compiler. + Testsuite fixes for Tru64 Unix. + Additional platform support. + + 3.0.9 Dec-31-09 + Add AVR32 and win64 ports. Add ARM softfp support. + Many fixes for AIX, Solaris, HP-UX, *BSD. + Several PowerPC and x86-64 bug fixes. + Build DLL for windows. + + 3.0.8 Dec-19-08 + Add *BSD, BeOS, and PA-Linux support. + + 3.0.7 Nov-11-08 + Fix for ppc FreeBSD. + (thanks to Andreas Tobler) + + 3.0.6 Jul-17-08 + Fix for closures on sh. + Mark the sh/sh64 stack as non-executable. + (both thanks to Kaz Kojima) + + 3.0.5 Apr-3-08 + Fix libffi.pc file. + Fix #define ARM for IcedTea users. + Fix x86 closure bug. + + 3.0.4 Feb-24-08 + Fix x86 OpenBSD configury. + + 3.0.3 Feb-22-08 + Enable x86 OpenBSD thanks to Thomas Heller, and + x86-64 FreeBSD thanks to Björn König and Andreas Tobler. + Clean up test instruction in README. + + 3.0.2 Feb-21-08 + Improved x86 FreeBSD support. + Thanks to Björn König. + + 3.0.1 Feb-15-08 + Fix instruction cache flushing bug on MIPS. + Thanks to David Daney. + + 3.0.0 Feb-15-08 + Many changes, mostly thanks to the GCC project. + Cygnus Solutions is now Red Hat. + + [10 years go by...] + + 1.20 Oct-5-98 + Raffaele Sena produces ARM port. + + 1.19 Oct-5-98 + Fixed x86 long double and long long return support. + m68k bug fixes from Andreas Schwab. + Patch for DU assembler compatibility for the Alpha from Richard + Henderson. + + 1.18 Apr-17-98 + Bug fixes and MIPS configuration changes. + + 1.17 Feb-24-98 + Bug fixes and m68k port from Andreas Schwab. PowerPC port from + Geoffrey Keating. Various bug x86, Sparc and MIPS bug fixes. + + 1.16 Feb-11-98 + Richard Henderson produces Alpha port. + + 1.15 Dec-4-97 + Fixed an n32 ABI bug. New libtool, auto* support. + + 1.14 May-13-97 + libtool is now used to generate shared and static libraries. + Fixed a minor portability problem reported by Russ McManus + . + + 1.13 Dec-2-96 + Added --enable-purify-safety to keep Purify from complaining + about certain low level code. + Sparc fix for calling functions with < 6 args. + Linux x86 a.out fix. + + 1.12 Nov-22-96 + Added missing ffi_type_void, needed for supporting void return + types. Fixed test case for non MIPS machines. Cygnus Support + is now Cygnus Solutions. + + 1.11 Oct-30-96 + Added notes about GNU make. + + 1.10 Oct-29-96 + Added configuration fix for non GNU compilers. + + 1.09 Oct-29-96 + Added --enable-debug configure switch. Clean-ups based on LCLint + feedback. ffi_mips.h is always installed. Many configuration + fixes. Fixed ffitest.c for sparc builds. + + 1.08 Oct-15-96 + Fixed n32 problem. Many clean-ups. + + 1.07 Oct-14-96 + Gordon Irlam rewrites v8.S again. Bug fixes. + + 1.06 Oct-14-96 + Gordon Irlam improved the sparc port. + + 1.05 Oct-14-96 + Interface changes based on feedback. + + 1.04 Oct-11-96 + Sparc port complete (modulo struct passing bug). + + 1.03 Oct-10-96 + Passing struct args, and returning struct values works for + all architectures/calling conventions. Expanded tests. + + 1.02 Oct-9-96 + Added SGI n32 support. Fixed bugs in both o32 and Linux support. + Added "make test". + + 1.01 Oct-8-96 + Fixed float passing bug in mips version. Restructured some + of the code. Builds cleanly with SGI tools. + + 1.00 Oct-7-96 + First release. No public announcement. + +Authors & Credits +================= + +libffi was originally written by Anthony Green . + +The developers of the GNU Compiler Collection project have made +innumerable valuable contributions. See the ChangeLog file for +details. + +Some of the ideas behind libffi were inspired by Gianni Mariani's free +gencall library for Silicon Graphics machines. + +The closure mechanism was designed and implemented by Kresten Krab +Thorup. + +Major processor architecture ports were contributed by the following +developers: + + aarch64 Marcus Shawcroft, James Greenhalgh + alpha Richard Henderson + arc Hackers at Synopsis + arm Raffaele Sena + avr32 Bradley Smith + blackfin Alexandre Keunecke I. de Mendonca + cris Simon Posnjak, Hans-Peter Nilsson + frv Anthony Green + ia64 Hans Boehm + m32r Kazuhiro Inaoka + m68k Andreas Schwab + m88k Miod Vallat + metag Hackers at Imagination Technologies + microblaze Nathan Rossi + mips Anthony Green, Casey Marshall + mips64 David Daney + moxie Anthony Green + nios ii Sandra Loosemore + openrisc Sebastian Macke + pa Randolph Chung, Dave Anglin, Andreas Tobler + powerpc Geoffrey Keating, Andreas Tobler, + David Edelsohn, John Hornkvist + powerpc64 Jakub Jelinek + riscv Michael Knyszek, Andrew Waterman, Stef O'Rear + s390 Gerhard Tonn, Ulrich Weigand + sh Kaz Kojima + sh64 Kaz Kojima + sparc Anthony Green, Gordon Irlam + tile-gx/tilepro Walter Lee + vax Miod Vallat + x86 Anthony Green, Jon Beniston + x86-64 Bo Thorsen + xtensa Chris Zankel + +Jesper Skov and Andrew Haley both did more than their fair share of +stepping through the code and tracking down bugs. + +Thanks also to Tom Tromey for bug fixes, documentation and +configuration help. + +Thanks to Jim Blandy, who provided some useful feedback on the libffi +interface. + +Andreas Tobler has done a tremendous amount of work on the testsuite. + +Alex Oliva solved the executable page problem for SElinux. + +The list above is almost certainly incomplete and inaccurate. I'm +happy to make corrections or additions upon request. + +If you have a problem, or have found a bug, please send a note to the +author at green@moxielogic.com, or the project mailing list at +libffi-discuss@sourceware.org. diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude 2.m4 new file mode 100644 index 0000000..1a70efb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude 2.m4 @@ -0,0 +1,479 @@ +# mmap(2) blacklisting. Some platforms provide the mmap library routine +# but don't support all of the features we need from it. +AC_DEFUN([AC_FUNC_MMAP_BLACKLIST], +[ +AC_CHECK_HEADER([sys/mman.h], + [libffi_header_sys_mman_h=yes], [libffi_header_sys_mman_h=no]) +AC_CHECK_FUNC([mmap], [libffi_func_mmap=yes], [libffi_func_mmap=no]) +if test "$libffi_header_sys_mman_h" != yes \ + || test "$libffi_func_mmap" != yes; then + ac_cv_func_mmap_file=no + ac_cv_func_mmap_dev_zero=no + ac_cv_func_mmap_anon=no +else + AC_CACHE_CHECK([whether read-only mmap of a plain file works], + ac_cv_func_mmap_file, + [# Add a system to this blacklist if + # mmap(0, stat_size, PROT_READ, MAP_PRIVATE, fd, 0) doesn't return a + # memory area containing the same data that you'd get if you applied + # read() to the same fd. The only system known to have a problem here + # is VMS, where text files have record structure. + case "$host_os" in + vms* | ultrix*) + ac_cv_func_mmap_file=no ;; + *) + ac_cv_func_mmap_file=yes;; + esac]) + AC_CACHE_CHECK([whether mmap from /dev/zero works], + ac_cv_func_mmap_dev_zero, + [# Add a system to this blacklist if it has mmap() but /dev/zero + # does not exist, or if mmapping /dev/zero does not give anonymous + # zeroed pages with both the following properties: + # 1. If you map N consecutive pages in with one call, and then + # unmap any subset of those pages, the pages that were not + # explicitly unmapped remain accessible. + # 2. If you map two adjacent blocks of memory and then unmap them + # both at once, they must both go away. + # Systems known to be in this category are Windows (all variants), + # VMS, and Darwin. + case "$host_os" in + vms* | cygwin* | pe | mingw* | darwin* | ultrix* | hpux10* | hpux11.00) + ac_cv_func_mmap_dev_zero=no ;; + *) + ac_cv_func_mmap_dev_zero=yes;; + esac]) + + # Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for. + AC_CACHE_CHECK([for MAP_ANON(YMOUS)], ac_cv_decl_map_anon, + [AC_TRY_COMPILE( +[#include +#include +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +], +[int n = MAP_ANONYMOUS;], + ac_cv_decl_map_anon=yes, + ac_cv_decl_map_anon=no)]) + + if test $ac_cv_decl_map_anon = no; then + ac_cv_func_mmap_anon=no + else + AC_CACHE_CHECK([whether mmap with MAP_ANON(YMOUS) works], + ac_cv_func_mmap_anon, + [# Add a system to this blacklist if it has mmap() and MAP_ANON or + # MAP_ANONYMOUS, but using mmap(..., MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + # doesn't give anonymous zeroed pages with the same properties listed + # above for use of /dev/zero. + # Systems known to be in this category are Windows, VMS, and SCO Unix. + case "$host_os" in + vms* | cygwin* | pe | mingw* | sco* | udk* ) + ac_cv_func_mmap_anon=no ;; + *) + ac_cv_func_mmap_anon=yes;; + esac]) + fi +fi + +if test $ac_cv_func_mmap_file = yes; then + AC_DEFINE(HAVE_MMAP_FILE, 1, + [Define if read-only mmap of a plain file works.]) +fi +if test $ac_cv_func_mmap_dev_zero = yes; then + AC_DEFINE(HAVE_MMAP_DEV_ZERO, 1, + [Define if mmap of /dev/zero works.]) +fi +if test $ac_cv_func_mmap_anon = yes; then + AC_DEFINE(HAVE_MMAP_ANON, 1, + [Define if mmap with MAP_ANON(YMOUS) works.]) +fi +]) + +dnl ---------------------------------------------------------------------- +dnl This whole bit snagged from libstdc++-v3, via libatomic. + +dnl +dnl LIBFFI_ENABLE +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, permit a|b|c) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, SHELL-CODE-HANDLER) +dnl +dnl See docs/html/17_intro/configury.html#enable for documentation. +dnl +m4_define([LIBFFI_ENABLE],[dnl +m4_define([_g_switch],[--enable-$1])dnl +m4_define([_g_help],[AC_HELP_STRING(_g_switch$3,[$4 @<:@default=$2@:>@])])dnl + AC_ARG_ENABLE($1,_g_help, + m4_bmatch([$5], + [^permit ], + [[ + case "$enableval" in + m4_bpatsubst([$5],[permit ])) ;; + *) AC_MSG_ERROR(Unknown argument to enable/disable $1) ;; + dnl Idea for future: generate a URL pointing to + dnl "onlinedocs/configopts.html#whatever" + esac + ]], + [^$], + [[ + case "$enableval" in + yes|no) ;; + *) AC_MSG_ERROR(Argument to enable/disable $1 must be yes or no) ;; + esac + ]], + [[$5]]), + [enable_]m4_bpatsubst([$1],-,_)[=][$2]) +m4_undefine([_g_switch])dnl +m4_undefine([_g_help])dnl +]) + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + libat_ld_is_lld=no + if $LD --version 2>/dev/null | grep 'LLD '> /dev/null 2>&1; then + libat_ld_is_lld=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl Add version tags to symbols in shared library (or not), additionally +dnl marking other symbols as private/local (or not). +dnl +dnl --enable-symvers=style adds a version script to the linker call when +dnl creating the shared library. The choice of version script is +dnl controlled by 'style'. +dnl --disable-symvers does not. +dnl + Usage: LIBFFI_ENABLE_SYMVERS[(DEFAULT)] +dnl Where DEFAULT is either 'yes' or 'no'. Passing `yes' tries to +dnl choose a default style based on linker characteristics. Passing +dnl 'no' disables versioning. +dnl +AC_DEFUN([LIBFFI_ENABLE_SYMVERS], [ + +LIBFFI_ENABLE(symvers,yes,[=STYLE], + [enables symbol versioning of the shared library], + [permit yes|no|gnu*|sun]) + +# If we never went through the LIBFFI_CHECK_LINKER_FEATURES macro, then we +# don't know enough about $LD to do tricks... +AC_REQUIRE([LIBFFI_CHECK_LINKER_FEATURES]) + +# Turn a 'yes' into a suitable default. +if test x$enable_symvers = xyes ; then + # FIXME The following test is too strict, in theory. + if test $enable_shared = no || test "x$LD" = x; then + enable_symvers=no + else + if test $with_gnu_ld = yes ; then + enable_symvers=gnu + else + case ${target_os} in + # Sun symbol versioning exists since Solaris 2.5. + solaris2.[[5-9]]* | solaris2.1[[0-9]]*) + enable_symvers=sun ;; + *) + enable_symvers=no ;; + esac + fi + fi +fi + +# Check if 'sun' was requested on non-Solaris 2 platforms. +if test x$enable_symvers = xsun ; then + case ${target_os} in + solaris2*) + # All fine. + ;; + *) + # Unlikely to work. + AC_MSG_WARN([=== You have requested Sun symbol versioning, but]) + AC_MSG_WARN([=== you are not targetting Solaris 2.]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + ;; + esac +fi + +# Check to see if libgcc_s exists, indicating that shared libgcc is possible. +if test $enable_symvers != no; then + AC_MSG_CHECKING([for shared libgcc]) + ac_save_CFLAGS="$CFLAGS" + CFLAGS=' -lgcc_s' + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes, libat_shared_libgcc=no) + CFLAGS="$ac_save_CFLAGS" + if test $libat_shared_libgcc = no; then + cat > conftest.c <&1 >/dev/null \ + | sed -n 's/^.* -lgcc_s\([^ ]*\) .*$/\1/p'` +changequote([,])dnl + rm -f conftest.c conftest.so + if test x${libat_libgcc_s_suffix+set} = xset; then + CFLAGS=" -lgcc_s$libat_libgcc_s_suffix" + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes) + CFLAGS="$ac_save_CFLAGS" + fi + fi + AC_MSG_RESULT($libat_shared_libgcc) +fi + +# For GNU ld, we need at least this version. The format is described in +# LIBFFI_CHECK_LINKER_FEATURES above. +libat_min_gnu_ld_version=21400 +# XXXXXXXXXXX libat_gnu_ld_version=21390 + +# Check to see if unspecified "yes" value can win, given results above. +# Change "yes" into either "no" or a style name. +if test $enable_symvers != no && test $libat_shared_libgcc = yes; then + if test $with_gnu_ld = yes; then + if test $libat_gnu_ld_version -ge $libat_min_gnu_ld_version ; then + enable_symvers=gnu + elif test $libat_ld_is_gold = yes ; then + enable_symvers=gnu + elif test $libat_ld_is_lld = yes ; then + enable_symvers=gnu + else + # The right tools, the right setup, but too old. Fallbacks? + AC_MSG_WARN(=== Linker version $libat_gnu_ld_version is too old for) + AC_MSG_WARN(=== full symbol versioning support in this release of GCC.) + AC_MSG_WARN(=== You would need to upgrade your binutils to version) + AC_MSG_WARN(=== $libat_min_gnu_ld_version or later and rebuild GCC.) + if test $libat_gnu_ld_version -ge 21200 ; then + # Globbing fix is present, proper block support is not. + dnl AC_MSG_WARN([=== Dude, you are soooo close. Maybe we can fake it.]) + dnl enable_symvers=??? + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + else + # 2.11 or older. + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi + fi + elif test $enable_symvers = sun; then + : All interesting versions of Sun ld support sun style symbol versioning. + else + # just fail for now + AC_MSG_WARN([=== You have requested some kind of symbol versioning, but]) + AC_MSG_WARN([=== either you are not using a supported linker, or you are]) + AC_MSG_WARN([=== not building a shared libgcc_s (which is required).]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi +fi +if test $enable_symvers = gnu; then + AC_DEFINE(LIBFFI_GNU_SYMBOL_VERSIONING, 1, + [Define to 1 if GNU symbol versioning is used for libatomic.]) +fi + +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB, test $enable_symvers != no) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_GNU, test $enable_symvers = gnu) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_SUN, test $enable_symvers = sun) +AC_MSG_NOTICE(versioning on shared library symbols is $enable_symvers) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 new file mode 100644 index 0000000..1a70efb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 @@ -0,0 +1,479 @@ +# mmap(2) blacklisting. Some platforms provide the mmap library routine +# but don't support all of the features we need from it. +AC_DEFUN([AC_FUNC_MMAP_BLACKLIST], +[ +AC_CHECK_HEADER([sys/mman.h], + [libffi_header_sys_mman_h=yes], [libffi_header_sys_mman_h=no]) +AC_CHECK_FUNC([mmap], [libffi_func_mmap=yes], [libffi_func_mmap=no]) +if test "$libffi_header_sys_mman_h" != yes \ + || test "$libffi_func_mmap" != yes; then + ac_cv_func_mmap_file=no + ac_cv_func_mmap_dev_zero=no + ac_cv_func_mmap_anon=no +else + AC_CACHE_CHECK([whether read-only mmap of a plain file works], + ac_cv_func_mmap_file, + [# Add a system to this blacklist if + # mmap(0, stat_size, PROT_READ, MAP_PRIVATE, fd, 0) doesn't return a + # memory area containing the same data that you'd get if you applied + # read() to the same fd. The only system known to have a problem here + # is VMS, where text files have record structure. + case "$host_os" in + vms* | ultrix*) + ac_cv_func_mmap_file=no ;; + *) + ac_cv_func_mmap_file=yes;; + esac]) + AC_CACHE_CHECK([whether mmap from /dev/zero works], + ac_cv_func_mmap_dev_zero, + [# Add a system to this blacklist if it has mmap() but /dev/zero + # does not exist, or if mmapping /dev/zero does not give anonymous + # zeroed pages with both the following properties: + # 1. If you map N consecutive pages in with one call, and then + # unmap any subset of those pages, the pages that were not + # explicitly unmapped remain accessible. + # 2. If you map two adjacent blocks of memory and then unmap them + # both at once, they must both go away. + # Systems known to be in this category are Windows (all variants), + # VMS, and Darwin. + case "$host_os" in + vms* | cygwin* | pe | mingw* | darwin* | ultrix* | hpux10* | hpux11.00) + ac_cv_func_mmap_dev_zero=no ;; + *) + ac_cv_func_mmap_dev_zero=yes;; + esac]) + + # Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for. + AC_CACHE_CHECK([for MAP_ANON(YMOUS)], ac_cv_decl_map_anon, + [AC_TRY_COMPILE( +[#include +#include +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +], +[int n = MAP_ANONYMOUS;], + ac_cv_decl_map_anon=yes, + ac_cv_decl_map_anon=no)]) + + if test $ac_cv_decl_map_anon = no; then + ac_cv_func_mmap_anon=no + else + AC_CACHE_CHECK([whether mmap with MAP_ANON(YMOUS) works], + ac_cv_func_mmap_anon, + [# Add a system to this blacklist if it has mmap() and MAP_ANON or + # MAP_ANONYMOUS, but using mmap(..., MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + # doesn't give anonymous zeroed pages with the same properties listed + # above for use of /dev/zero. + # Systems known to be in this category are Windows, VMS, and SCO Unix. + case "$host_os" in + vms* | cygwin* | pe | mingw* | sco* | udk* ) + ac_cv_func_mmap_anon=no ;; + *) + ac_cv_func_mmap_anon=yes;; + esac]) + fi +fi + +if test $ac_cv_func_mmap_file = yes; then + AC_DEFINE(HAVE_MMAP_FILE, 1, + [Define if read-only mmap of a plain file works.]) +fi +if test $ac_cv_func_mmap_dev_zero = yes; then + AC_DEFINE(HAVE_MMAP_DEV_ZERO, 1, + [Define if mmap of /dev/zero works.]) +fi +if test $ac_cv_func_mmap_anon = yes; then + AC_DEFINE(HAVE_MMAP_ANON, 1, + [Define if mmap with MAP_ANON(YMOUS) works.]) +fi +]) + +dnl ---------------------------------------------------------------------- +dnl This whole bit snagged from libstdc++-v3, via libatomic. + +dnl +dnl LIBFFI_ENABLE +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, permit a|b|c) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, SHELL-CODE-HANDLER) +dnl +dnl See docs/html/17_intro/configury.html#enable for documentation. +dnl +m4_define([LIBFFI_ENABLE],[dnl +m4_define([_g_switch],[--enable-$1])dnl +m4_define([_g_help],[AC_HELP_STRING(_g_switch$3,[$4 @<:@default=$2@:>@])])dnl + AC_ARG_ENABLE($1,_g_help, + m4_bmatch([$5], + [^permit ], + [[ + case "$enableval" in + m4_bpatsubst([$5],[permit ])) ;; + *) AC_MSG_ERROR(Unknown argument to enable/disable $1) ;; + dnl Idea for future: generate a URL pointing to + dnl "onlinedocs/configopts.html#whatever" + esac + ]], + [^$], + [[ + case "$enableval" in + yes|no) ;; + *) AC_MSG_ERROR(Argument to enable/disable $1 must be yes or no) ;; + esac + ]], + [[$5]]), + [enable_]m4_bpatsubst([$1],-,_)[=][$2]) +m4_undefine([_g_switch])dnl +m4_undefine([_g_help])dnl +]) + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + libat_ld_is_lld=no + if $LD --version 2>/dev/null | grep 'LLD '> /dev/null 2>&1; then + libat_ld_is_lld=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl Add version tags to symbols in shared library (or not), additionally +dnl marking other symbols as private/local (or not). +dnl +dnl --enable-symvers=style adds a version script to the linker call when +dnl creating the shared library. The choice of version script is +dnl controlled by 'style'. +dnl --disable-symvers does not. +dnl + Usage: LIBFFI_ENABLE_SYMVERS[(DEFAULT)] +dnl Where DEFAULT is either 'yes' or 'no'. Passing `yes' tries to +dnl choose a default style based on linker characteristics. Passing +dnl 'no' disables versioning. +dnl +AC_DEFUN([LIBFFI_ENABLE_SYMVERS], [ + +LIBFFI_ENABLE(symvers,yes,[=STYLE], + [enables symbol versioning of the shared library], + [permit yes|no|gnu*|sun]) + +# If we never went through the LIBFFI_CHECK_LINKER_FEATURES macro, then we +# don't know enough about $LD to do tricks... +AC_REQUIRE([LIBFFI_CHECK_LINKER_FEATURES]) + +# Turn a 'yes' into a suitable default. +if test x$enable_symvers = xyes ; then + # FIXME The following test is too strict, in theory. + if test $enable_shared = no || test "x$LD" = x; then + enable_symvers=no + else + if test $with_gnu_ld = yes ; then + enable_symvers=gnu + else + case ${target_os} in + # Sun symbol versioning exists since Solaris 2.5. + solaris2.[[5-9]]* | solaris2.1[[0-9]]*) + enable_symvers=sun ;; + *) + enable_symvers=no ;; + esac + fi + fi +fi + +# Check if 'sun' was requested on non-Solaris 2 platforms. +if test x$enable_symvers = xsun ; then + case ${target_os} in + solaris2*) + # All fine. + ;; + *) + # Unlikely to work. + AC_MSG_WARN([=== You have requested Sun symbol versioning, but]) + AC_MSG_WARN([=== you are not targetting Solaris 2.]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + ;; + esac +fi + +# Check to see if libgcc_s exists, indicating that shared libgcc is possible. +if test $enable_symvers != no; then + AC_MSG_CHECKING([for shared libgcc]) + ac_save_CFLAGS="$CFLAGS" + CFLAGS=' -lgcc_s' + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes, libat_shared_libgcc=no) + CFLAGS="$ac_save_CFLAGS" + if test $libat_shared_libgcc = no; then + cat > conftest.c <&1 >/dev/null \ + | sed -n 's/^.* -lgcc_s\([^ ]*\) .*$/\1/p'` +changequote([,])dnl + rm -f conftest.c conftest.so + if test x${libat_libgcc_s_suffix+set} = xset; then + CFLAGS=" -lgcc_s$libat_libgcc_s_suffix" + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes) + CFLAGS="$ac_save_CFLAGS" + fi + fi + AC_MSG_RESULT($libat_shared_libgcc) +fi + +# For GNU ld, we need at least this version. The format is described in +# LIBFFI_CHECK_LINKER_FEATURES above. +libat_min_gnu_ld_version=21400 +# XXXXXXXXXXX libat_gnu_ld_version=21390 + +# Check to see if unspecified "yes" value can win, given results above. +# Change "yes" into either "no" or a style name. +if test $enable_symvers != no && test $libat_shared_libgcc = yes; then + if test $with_gnu_ld = yes; then + if test $libat_gnu_ld_version -ge $libat_min_gnu_ld_version ; then + enable_symvers=gnu + elif test $libat_ld_is_gold = yes ; then + enable_symvers=gnu + elif test $libat_ld_is_lld = yes ; then + enable_symvers=gnu + else + # The right tools, the right setup, but too old. Fallbacks? + AC_MSG_WARN(=== Linker version $libat_gnu_ld_version is too old for) + AC_MSG_WARN(=== full symbol versioning support in this release of GCC.) + AC_MSG_WARN(=== You would need to upgrade your binutils to version) + AC_MSG_WARN(=== $libat_min_gnu_ld_version or later and rebuild GCC.) + if test $libat_gnu_ld_version -ge 21200 ; then + # Globbing fix is present, proper block support is not. + dnl AC_MSG_WARN([=== Dude, you are soooo close. Maybe we can fake it.]) + dnl enable_symvers=??? + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + else + # 2.11 or older. + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi + fi + elif test $enable_symvers = sun; then + : All interesting versions of Sun ld support sun style symbol versioning. + else + # just fail for now + AC_MSG_WARN([=== You have requested some kind of symbol versioning, but]) + AC_MSG_WARN([=== either you are not using a supported linker, or you are]) + AC_MSG_WARN([=== not building a shared libgcc_s (which is required).]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi +fi +if test $enable_symvers = gnu; then + AC_DEFINE(LIBFFI_GNU_SYMBOL_VERSIONING, 1, + [Define to 1 if GNU symbol versioning is used for libatomic.]) +fi + +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB, test $enable_symvers != no) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_GNU, test $enable_symvers = gnu) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_SUN, test $enable_symvers = sun) +AC_MSG_NOTICE(versioning on shared library symbols is $enable_symvers) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen 2.sh new file mode 100755 index 0000000..fb014a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen 2.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec autoreconf -v -i diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh new file mode 100755 index 0000000..fb014a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec autoreconf -v -i diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.guess b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.guess new file mode 100644 index 0000000..faa63aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.guess @@ -0,0 +1,1466 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-05-11' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.sub b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.sub new file mode 100644 index 0000000..40ea5df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config 2.sub @@ -0,0 +1,1836 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-04-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia16 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | wasm32 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | wasm32-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + wasm32) + basic_machine=wasm32-unknown + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + pru-*) + os=-elf + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess new file mode 100644 index 0000000..faa63aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess @@ -0,0 +1,1466 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-05-11' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub new file mode 100644 index 0000000..40ea5df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub @@ -0,0 +1,1836 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-04-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia16 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | wasm32 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | wasm32-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + wasm32) + basic_machine=wasm32-unknown + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + pru-*) + os=-elf + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2 new file mode 100755 index 0000000..7f9cd3a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2 @@ -0,0 +1,21988 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for libffi 3.3. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: http://github.com/libffi/libffi/issues about your +$0: system, including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='libffi' +PACKAGE_TARNAME='libffi' +PACKAGE_VERSION='3.3' +PACKAGE_STRING='libffi 3.3' +PACKAGE_BUGREPORT='http://github.com/libffi/libffi/issues' +PACKAGE_URL='' + +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE +LIBFFI_BUILD_VERSIONED_SHLIB_FALSE +LIBFFI_BUILD_VERSIONED_SHLIB_TRUE +OPT_LDFLAGS +SECTION_LDFLAGS +toolexeclibdir +toolexecdir +FFI_DEBUG_FALSE +FFI_DEBUG_TRUE +TARGET_OBJ +TARGETDIR +TARGET +BUILD_DOCS_FALSE +BUILD_DOCS_TRUE +FFI_EXEC_TRAMPOLINE_TABLE +FFI_EXEC_TRAMPOLINE_TABLE_FALSE +FFI_EXEC_TRAMPOLINE_TABLE_TRUE +sys_symbol_underscore +HAVE_LONG_DOUBLE_VARIANT +HAVE_LONG_DOUBLE +ALLOCA +AM_LTLDFLAGS +AM_RUNTESTFLAGS +TESTSUBDIR_FALSE +TESTSUBDIR_TRUE +MAINT +MAINTAINER_MODE_FALSE +MAINTAINER_MODE_TRUE +PRTDIAG +CXXCPP +CPP +LT_SYS_LIBRARY_PATH +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +ac_ct_AR +AR +DLLTOOL +OBJDUMP +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +EGREP +GREP +SED +LIBTOOL +am__fastdepCCAS_FALSE +am__fastdepCCAS_TRUE +CCASDEPMODE +CCASFLAGS +CCAS +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +ac_ct_CXX +CXXFLAGS +CXX +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__quote +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +AM_BACKSLASH +AM_DEFAULT_VERBOSITY +AM_DEFAULT_V +AM_V +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +ax_enable_builddir_sed +target_os +target_vendor +target_cpu +target +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_builddir +enable_silent_rules +enable_dependency_tracking +enable_shared +enable_static +with_pic +enable_fast_install +with_aix_soname +with_gnu_ld +with_sysroot +enable_libtool_lock +enable_portable_binary +with_gcc_arch +enable_maintainer_mode +enable_pax_emutramp +enable_docs +enable_debug +enable_structs +enable_raw_api +enable_purify_safety +enable_multi_os_directory +enable_symvers +' + ac_precious_vars='build_alias +host_alias +target_alias +CCAS +CCASFLAGS +LT_SYS_LIBRARY_PATH +CPP +CPPFLAGS +CXXCPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures libffi 3.3 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/libffi] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] + --target=TARGET configure for building compilers for TARGET [HOST] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of libffi 3.3:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --disable-builddir disable automatic build in subdir of sources + + --enable-silent-rules less verbose build output (undo: "make V=1") + --disable-silent-rules verbose build output (undo: "make V=0") + --enable-dependency-tracking + do not reject slow dependency extractors + --disable-dependency-tracking + speeds up one-time build + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --enable-portable-binary + disable compiler optimizations that would produce + unportable binaries + --enable-maintainer-mode + enable make rules and dependencies not useful (and + sometimes confusing) to the casual installer + --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC + --disable-docs Disable building of docs (default: no) + --enable-debug debugging mode + --disable-structs omit code for struct support + --disable-raw-api make the raw api unavailable + --enable-purify-safety purify-safe mode + --disable-multi-os-directory + disable use of gcc --print-multi-os-directory to change the library installation directory + --enable-symvers=STYLE enables symbol versioning of the shared library + [default=yes] + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-aix-soname=aix|svr4|both + shared library versioning (aka "SONAME") variant to + provide on AIX, [default=aix]. + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot[=DIR] Search for dependent libraries within DIR (or the + compiler's sysroot if not specified). + --with-gcc-arch= use architecture for gcc -march/-mtune, + instead of guessing + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CCAS assembler compiler command (defaults to CC) + CCASFLAGS assembler compiler flags (defaults to CFLAGS) + LT_SYS_LIBRARY_PATH + User-defined run-time library search path. + CPP C preprocessor + CXXCPP C++ preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +libffi configure 3.3 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES +# -------------------------------------------- +# Tries to find the compile-time value of EXPR in a program that includes +# INCLUDES, setting VAR accordingly. Returns whether the value could be +# computed +ac_fn_c_compute_int () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=0 ac_mid=0 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid; break +else + as_fn_arith $ac_mid + 1 && ac_lo=$as_val + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) < 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=-1 ac_mid=-1 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=$ac_mid; break +else + as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + ac_lo= ac_hi= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid +else + as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in #(( +?*) eval "$3=\$ac_lo"; ac_retval=0 ;; +'') ac_retval=1 ;; +esac + else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +static long int longval () { return $2; } +static unsigned long int ulongval () { return $2; } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (($2) < 0) + { + long int i = longval (); + if (i != ($2)) + return 1; + fprintf (f, "%ld", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ($2)) + return 1; + fprintf (f, "%lu", i); + } + /* Do not output a trailing newline, as this causes \r\n confusion + on some platforms. */ + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + echo >>conftest.val; read $3 &5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ----------------------------------------------------- ## +## Report this to http://github.com/libffi/libffi/issues ## +## ----------------------------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_type +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libffi $as_me 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_config_headers="$ac_config_headers fficonfig.h" + + +ac_aux_dir= +for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 +$as_echo_n "checking target system type... " >&6; } +if ${ac_cv_target+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$target_alias" = x; then + ac_cv_target=$ac_cv_host +else + ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 +$as_echo "$ac_cv_target" >&6; } +case $ac_cv_target in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;; +esac +target=$ac_cv_target +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_target +shift +target_cpu=$1 +target_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +target_os=$* +IFS=$ac_save_IFS +case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac + + +# The aliases save the names the user supplied, while $host etc. +# will get canonicalized. +test -n "$target_alias" && + test "$program_prefix$program_suffix$program_transform_name" = \ + NONENONEs,x,x, && + program_prefix=${target_alias}- + +target_alias=${target_alias-$host_alias} + +case "${host}" in + frv*-elf) + LDFLAGS=`echo $LDFLAGS | sed "s/\-B^ *libgloss\/frv\///"`\ -B`pwd`/../libgloss/frv/ + ;; +esac + + + # [$]@ is unusable in 2.60+ but earlier autoconf had no ac_configure_args + if test "${ac_configure_args+set}" != "set" ; then + ac_configure_args= + for ac_arg in ${1+"$@"}; do + ac_configure_args="$ac_configure_args '$ac_arg'" + done + fi + +# Expand $ac_aux_dir to an absolute path. +am_aux_dir=`cd "$ac_aux_dir" && pwd` + + +ax_enable_builddir="." +# Check whether --enable-builddir was given. +if test "${enable_builddir+set}" = set; then : + enableval=$enable_builddir; ax_enable_builddir="$enableval" +else + ax_enable_builddir="auto" +fi + +if test ".$ac_srcdir_defaulted" != ".no" ; then +if test ".$srcdir" = ".." ; then + if test -f config.status ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: toplevel srcdir already configured... skipping subdir build" >&5 +$as_echo "$as_me: toplevel srcdir already configured... skipping subdir build" >&6;} + else + test ".$ax_enable_builddir" = "." && ax_enable_builddir="." + test ".$ax_enable_builddir" = ".no" && ax_enable_builddir="." + test ".$TARGET" = "." && TARGET="$target" + test ".$ax_enable_builddir" = ".auto" && ax_enable_builddir="$TARGET" + if test ".$ax_enable_builddir" != ".." ; then # we know where to go and + as_dir=$ax_enable_builddir; as_fn_mkdir_p + echo __.$ax_enable_builddir.__ > $ax_enable_builddir/conftest.tmp + cd $ax_enable_builddir + if grep __.$ax_enable_builddir.__ conftest.tmp >/dev/null 2>/dev/null ; then + rm conftest.tmp + { $as_echo "$as_me:${as_lineno-$LINENO}: result: continue configure in default builddir \"./$ax_enable_builddir\"" >&5 +$as_echo "continue configure in default builddir \"./$ax_enable_builddir\"" >&6; } + else + as_fn_error $? "could not change to default builddir \"./$ax_enable_builddir\"" "$LINENO" 5 + fi + srcdir=`echo "$ax_enable_builddir" | + sed -e 's,^\./,,;s,[^/]$,&/,;s,[^/]*/,../,g;s,[/]$,,;'` + # going to restart from subdirectory location + test -f $srcdir/config.log && mv $srcdir/config.log . + test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h . + test -f $srcdir/conftest.log && mv $srcdir/conftest.log . + test -f $srcdir/$cache_file && mv $srcdir/$cache_file . + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ....exec $SHELL $srcdir/$0 \"--srcdir=$srcdir\" \"--enable-builddir=$ax_enable_builddir\" ${1+\"$@\"}" >&5 +$as_echo "....exec $SHELL $srcdir/$0 \"--srcdir=$srcdir\" \"--enable-builddir=$ax_enable_builddir\" ${1+\"$@\"}" >&6; } + case "$0" in # restart + [\\/]* | ?:[\\/]*) # Absolute name + eval $SHELL "'$0'" "'--srcdir=$srcdir'" "'--enable-builddir=$ax_enable_builddir'" $ac_configure_args ;; + *) eval $SHELL "'$srcdir/$0'" "'--srcdir=$srcdir'" "'--enable-builddir=$ax_enable_builddir'" $ac_configure_args ;; + esac ; exit $? + fi + fi +fi fi +test ".$ax_enable_builddir" = ".auto" && ax_enable_builddir="." +# Extract the first word of "gsed sed", so it can be a program name with args. +set dummy gsed sed; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ax_enable_builddir_sed+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ax_enable_builddir_sed in + [\\/]* | ?:[\\/]*) + ac_cv_path_ax_enable_builddir_sed="$ax_enable_builddir_sed" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ax_enable_builddir_sed="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_ax_enable_builddir_sed" && ac_cv_path_ax_enable_builddir_sed="sed" + ;; +esac +fi +ax_enable_builddir_sed=$ac_cv_path_ax_enable_builddir_sed +if test -n "$ax_enable_builddir_sed"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_enable_builddir_sed" >&5 +$as_echo "$ax_enable_builddir_sed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +ax_enable_builddir_auxdir="$am_aux_dir" +ac_config_commands="$ac_config_commands buildir" + + +am__api_version='1.15' + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken + alias in your environment" "$LINENO" 5 + fi + if test "$2" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi + +rm -f conftest.file + +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=1;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='libffi' + VERSION='3.3' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +mkdir_p='$(MKDIR_P)' + +# We need awk for the "check" target (and possibly the TAP driver). The +# system "awk" is bad on some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar pax cpio none' + +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + + + + +# POSIX will say in a future version that running "rm -f" with no argument +# is OK; and we want to be able to make that assumption in our Makefile +# recipes. So use an aggressive probe to check that the usage we want is +# actually supported "in the wild" to an acceptable degree. +# See automake bug#10828. +# To make any issue more visible, cause the running configure to be aborted +# by default if the 'rm' program in use doesn't match our expectations; the +# user can still override this though. +if rm -f && rm -fr && rm -rf; then : OK; else + cat >&2 <<'END' +Oops! + +Your 'rm' program seems unable to run without file operands specified +on the command line, even when the '-f' option is present. This is contrary +to the behaviour of most rm programs out there, and not conforming with +the upcoming POSIX standard: + +Please tell bug-automake@gnu.org about your system, including the value +of your $PATH and any error possibly output before this message. This +can help us improve future automake versions. + +END + if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then + echo 'Configuration will proceed anyway, since you have set the' >&2 + echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 + echo >&2 + else + cat >&2 <<'END' +Aborting the configuration process, to ensure you take notice of the issue. + +You can download and install GNU coreutils to get an 'rm' implementation +that behaves properly: . + +If you want to complete the configuration process using your problematic +'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +to "yes", and re-run configure. + +END + as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 + fi +fi + + +# The same as in boehm-gc and libstdc++. Have to borrow it from there. +# We must force CC to /not/ be precious variables; otherwise +# the wrong, non-multilib-adjusted value will be used in multilibs. +# As a side effect, we have to subst CFLAGS ourselves. +# Also save and restore CFLAGS, since AC_PROG_CC will come up with +# defaults of its own if none are provided. + + + +save_CFLAGS=$CFLAGS +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } +if ${am_cv_prog_cc_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF + # Make sure it works both with $CC and with simple cc. + # Following AC_PROG_CC_C_O, we do the test twice because some + # compilers refuse to overwrite an existing .o file with -o, + # though they will create one. + am_cv_prog_cc_c_o=yes + for am_i in 1 2; do + if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 + ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } \ + && test -f conftest2.$ac_objext; then + : OK + else + am_cv_prog_cc_c_o=no + break + fi + done + rm -f core conftest* + unset am_i +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +$as_echo "$am_cv_prog_cc_c_o" >&6; } +if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 +$as_echo_n "checking for style of include used by $am_make... " >&6; } +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from 'make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 +$as_echo "$_am_result" >&6; } +rm -f confinc confmf + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CXX" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + +CFLAGS=$save_CFLAGS + + + + + +# By default we simply use the C compiler to build assembly code. + +test "${CCAS+set}" = set || CCAS=$CC +test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS + + + +depcc="$CCAS" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CCAS_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CCAS_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CCAS_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CCAS_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CCAS_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CCAS_dependencies_compiler_type" >&6; } +CCASDEPMODE=depmode=$am_cv_CCAS_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CCAS_dependencies_compiler_type" = gcc3; then + am__fastdepCCAS_TRUE= + am__fastdepCCAS_FALSE='#' +else + am__fastdepCCAS_TRUE='#' + am__fastdepCCAS_FALSE= +fi + + + +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.6' +macro_revision='2.4.6' + + + + + + + + + + + + + +ltmain=$ac_aux_dir/ltmain.sh + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case $ECHO in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM=$NM +else + lt_nm_to_check=${ac_tool_prefix}nm + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + tmp_nm=$ac_dir/$lt_tmp_nm + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the 'sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty + case $build_os in + mingw*) lt_bad_file=conftest.nm/nofile ;; + *) lt_bad_file=/dev/null ;; + esac + case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in + *$lt_bad_file* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break 2 + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break 2 + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS=$lt_save_ifs + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test no != "$lt_cv_path_NM"; then + NM=$lt_cv_path_NM +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols -headers" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test : != "$DUMPBIN"; then + NM=$DUMPBIN + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring=ABCD + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test X`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test 17 != "$i" # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n "$lt_cv_sys_max_cmd_len"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test yes != "$GCC"; then + reload_cmds=false + fi + ;; + darwin*) + if test yes = "$GCC"; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# 'unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# that responds to the $file_magic_cmd with a given extended regex. +# If you have 'file' or equivalent on your system and you're not sure +# whether 'pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd* | bitrig*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +os2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh; + # decide which one to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd=$ECHO + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cru} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -eq "$ac_status"; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -ne "$ac_status"; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test no = "$lt_cv_ar_at_file"; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + bitrig* | openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test ia64 = "$host_cpu"; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Gets list of data symbols to import. + lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" + # Adjust the below global symbol transforms to fixup imported variables. + lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" + lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" + lt_c_name_lib_hook="\ + -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ + -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +else + # Disable hooks by default. + lt_cv_sys_global_symbol_to_import= + lt_cdecl_hook= + lt_c_name_hook= + lt_c_name_lib_hook= +fi + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n"\ +$lt_cdecl_hook\ +" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ +$lt_c_name_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" + +# Transform an extracted symbol line into symbol name with lib prefix and +# symbol address. +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ +$lt_c_name_lib_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function, + # D for any global variable and I for any imported variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS=conftstm.$ac_objext + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest$ac_exeext; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test yes = "$pipe_works"; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case $with_sysroot in #( + yes) + if test yes = "$GCC"; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 +$as_echo "$with_sysroot" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 +$as_echo_n "checking for a working dd... " >&6; } +if ${ac_cv_path_lt_DD+:} false; then : + $as_echo_n "(cached) " >&6 +else + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +: ${lt_DD:=$DD} +if test -z "$lt_DD"; then + ac_path_lt_DD_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in dd; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_lt_DD" || continue +if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +fi + $ac_path_lt_DD_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_lt_DD"; then + : + fi +else + ac_cv_path_lt_DD=$lt_DD +fi + +rm -f conftest.i conftest2.i conftest.out +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 +$as_echo "$ac_cv_path_lt_DD" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 +$as_echo_n "checking how to truncate binary pipes... " >&6; } +if ${lt_cv_truncate_bin+:} false; then : + $as_echo_n "(cached) " >&6 +else + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +lt_cv_truncate_bin= +if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +fi +rm -f conftest.i conftest2.i conftest.out +test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 +$as_echo "$lt_cv_truncate_bin" >&6; } + + + + + + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test no = "$enable_libtool_lock" || enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out what ABI is being produced by ac_compile, and set mode + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE=32 + ;; + *ELF-64*) + HPUX_IA64_MODE=64 + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test yes = "$lt_cv_prog_gnu_ld"; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +mips64*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + emul=elf + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + emul="${emul}32" + ;; + *64-bit*) + emul="${emul}64" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *MSB*) + emul="${emul}btsmip" + ;; + *LSB*) + emul="${emul}ltsmip" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *N32*) + emul="${emul}n32" + ;; + esac + LD="${LD-ld} -m $emul" + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. Note that the listed cases only cover the + # situations where additional linker options are needed (such as when + # doing 32-bit compilation for a host where ld defaults to 64-bit, or + # vice versa); the common cases where no linker options are needed do + # not appear in the list. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test yes != "$lt_cv_cc_needs_belf"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS=$SAVE_CFLAGS + fi + ;; +*-*solaris*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*|x86_64-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD=${LD-ld}_sol2 + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks=$enable_libtool_lock + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test yes != "$lt_cv_path_mainfest_tool"; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "$LT_MULTI_MODULE"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test 0 = "$_lt_result"; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + 10.[012][,.]*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test yes = "$lt_cv_apple_cc_single_mod"; then + _lt_dar_single_mod='$single_module' + fi + if test yes = "$lt_cv_ld_exported_symbols_list"; then + _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' + fi + if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + + +func_stripname_cnf () +{ + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; + esac +} # func_stripname_cnf + + + + + +# Set options + + + + enable_dlopen=no + + + enable_win32_dll=no + + + # Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_shared=yes +fi + + + + + + + + + + # Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_static=yes +fi + + + + + + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for lt_pkg in $withval; do + IFS=$lt_save_ifs + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + pic_mode=default +fi + + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + shared_archive_member_spec= +case $host,$enable_shared in +power*-*-aix[5-9]*,yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 +$as_echo_n "checking which variant of shared library versioning to provide... " >&6; } + +# Check whether --with-aix-soname was given. +if test "${with_aix_soname+set}" = set; then : + withval=$with_aix_soname; case $withval in + aix|svr4|both) + ;; + *) + as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 + ;; + esac + lt_cv_with_aix_soname=$with_aix_soname +else + if ${lt_cv_with_aix_soname+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_with_aix_soname=aix +fi + + with_aix_soname=$lt_cv_with_aix_soname +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 +$as_echo "$with_aix_soname" >&6; } + if test aix != "$with_aix_soname"; then + # For the AIX way of multilib, we name the shared archive member + # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', + # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. + # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, + # the AIX toolchain works better with OBJECT_MODE set (default 32). + if test 64 = "${OBJECT_MODE-32}"; then + shared_archive_member_spec=shr_64 + else + shared_archive_member_spec=shr + fi + fi + ;; +*) + with_aix_soname=aix + ;; +esac + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS=$ltmain + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a '.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld=$lt_cv_prog_gnu_ld + +old_CC=$CC +old_CFLAGS=$CFLAGS + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/${ac_tool_prefix}file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC=$CC +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test yes = "$GCC"; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test yes = "$GCC"; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + lt_prog_compiler_pic='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + case $cc_basename in + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='$wl-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64, which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_pic_works"; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_static_works"; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test no = "$hard_links"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ' (' and ')$', so one must not match beginning or + # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', + # as well as any symbol that contains 'd'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test yes != "$GCC"; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd* | bitrig*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test yes = "$with_gnu_ld"; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test yes = "$lt_use_gnu_ld_interface"; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='$wl' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + export_dynamic_flag_spec='$wl--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test ia64 != "$host_cpu"; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='$wl--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test linux-dietlibc = "$host_os"; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test no = "$tmp_diet" + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + nagfor*) # NAGFOR 5.3 + tmp_sharedflag='-Wl,-shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + tcc*) + export_dynamic_flag_spec='-rdynamic' + ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test no = "$ld_shlibs"; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then + aix_use_runtimelinking=yes + break + fi + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # traditional, no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct=no + hardcode_direct_absolute=no + ;; + esac + + if test yes = "$GCC"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag="$shared_flag "'$wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' $wl-bernotok' + allow_undefined_flag=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test yes = "$GCC"; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='$wl-E' + ;; + + hpux10*) + if test yes,no = "$GCC,$with_gnu_ld"; then + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test yes,no = "$GCC,$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test yes = "$lt_cv_prog_compiler__b"; then + archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test yes = "$GCC"; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test yes = "$lt_cv_irix_exported_symbol"; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' + fi + link_all_deplibs=no + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + linux*) + case $cc_basename in + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + ld_shlibs=yes + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + else + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + osf3*) + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test yes = "$GCC"; then + wlarc='$wl' + archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='$wl' + archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. GCC discards it without '$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test yes = "$GCC"; then + whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test sequent = "$host_vendor"; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='$wl-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='$wl-z,text' + allow_undefined_flag='$wl-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test sni = "$host_vendor"; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='$wl-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test no = "$ld_shlibs" && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test yes = "$GCC"; then + case $host_os in + darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; + *) lt_awk_arg='/^libraries:/' ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; + *) lt_sed_strip_eq='s|=/|/|g' ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary... + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + # ...but if some path component already ends with the multilib dir we assume + # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). + case "$lt_multi_os_dir; $lt_search_path_spec " in + "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) + lt_multi_os_dir= + ;; + esac + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" + elif test -n "$lt_multi_os_dir"; then + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS = " "; FS = "/|\n";} { + lt_foo = ""; + lt_count = 0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo = "/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's|/\([A-Za-z]:\)|\1|g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test yes = "$hardcode_automatic"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && + test no != "$hardcode_minus_L"; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test relink = "$hardcode_action" || + test yes = "$inherit_rpath"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test yes != "$enable_dlopen"; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen=load_add_on + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen=LoadLibrary + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else + + lt_cv_dlopen=dyld + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + tpf*) + # Don't try to run any link tests for TPF. We know it's impossible + # because TPF is a cross-compiler, and we know how we open DSOs. + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + lt_cv_dlopen_self=no + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen=shl_load +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen=dlopen +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test no = "$lt_cv_dlopen"; then + enable_dlopen=no + else + enable_dlopen=yes + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS=$CPPFLAGS + test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS=$LDFLAGS + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS=$LIBS + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test yes = "$lt_cv_dlopen_self"; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS=$save_CPPFLAGS + LDFLAGS=$save_LDFLAGS + LIBS=$save_LIBS + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP"; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report what library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC + + if test -n "$CXX" && ( test no != "$CXX" && + ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || + (test g++ != "$CXX"))); then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_caught_CXX_error"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test yes = "$GXX"; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test yes = "$GXX"; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test yes = "$with_gnu_ld"; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='$wl' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct_CXX=no + hardcode_direct_absolute_CXX=no + ;; + esac + + if test yes = "$GXX"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag=$shared_flag' $wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec_CXX='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + # The "-G" linker flag allows undefined symbols. + no_undefined_flag_CXX='-bernotok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' $wl-bernotok' + allow_undefined_flag_CXX=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared + # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='$wl--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + if test yes != "$lt_cv_apple_cc_single_mod"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + os2*) + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_minus_L_CXX=yes + allow_undefined_flag_CXX=unsupported + shrext_cmds=.dll + archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes_CXX=yes + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='$wl-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='$wl-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='$wl-E' + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes,no = "$GXX,$with_gnu_ld"; then + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test yes,no = "$GXX,$with_gnu_ld"; then + no_undefined_flag_CXX=' $wl-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require '-G' NOT '-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='$wl-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='$wl-z,text' + allow_undefined_flag_CXX='$wl-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } + test no = "$ld_shlibs_CXX" && can_build_shared=no + + GCC_CXX=$GXX + LD_CXX=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $prev$p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test x-L = "$p" || + test x-R = "$p"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test no = "$pre_test_object_deps_done"; then + case $prev in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX=$prev$p + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX=$prev$p + else + postdeps_CXX="${postdeps_CXX} $prev$p" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test no = "$pre_test_object_deps_done"; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX=$p + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX=$p + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +case $host_os in +interix[3-9]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + + + # C++ specific cases for pic, static, wl, etc. + if test yes = "$GXX"; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + lt_prog_compiler_pic_CXX='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static_CXX='$wl-static' + ;; + esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + if test ia64 != "$host_cpu"; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64, which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then + : +else + lt_prog_compiler_static_CXX= +fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test no = "$hard_links"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX=$ltdll_cmds + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs_CXX=no + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } +test no = "$ld_shlibs_CXX" && can_build_shared=no + +with_gnu_ld_CXX=$with_gnu_ld + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec_CXX='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test yes = "$hardcode_automatic_CXX"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct_CXX" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && + test no != "$hardcode_minus_L_CXX"; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +$as_echo "$hardcode_action_CXX" >&6; } + +if test relink = "$hardcode_action_CXX" || + test yes = "$inherit_rpath_CXX"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test yes != "$_lt_caught_CXX_error" + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + + + +# Test for 64-bit build. +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of size_t" >&5 +$as_echo_n "checking size of size_t... " >&6; } +if ${ac_cv_sizeof_size_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (size_t))" "ac_cv_sizeof_size_t" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_size_t" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (size_t) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_size_t=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_size_t" >&5 +$as_echo "$ac_cv_sizeof_size_t" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t +_ACEOF + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler vendor" >&5 +$as_echo_n "checking for C compiler vendor... " >&6; } +if ${ax_cv_c_compiler_vendor+:} false; then : + $as_echo_n "(cached) " >&6 +else + # note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC, __SDCC + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + tcc: __TINYC__ + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + #if !($vencpp) + thisisanerror; + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done + ax_cv_c_compiler_vendor=`echo $vendor | cut -d: -f1` + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_c_compiler_vendor" >&5 +$as_echo "$ax_cv_c_compiler_vendor" >&6; } + + + + + + +# Check whether --enable-portable-binary was given. +if test "${enable_portable_binary+set}" = set; then : + enableval=$enable_portable_binary; acx_maxopt_portable=$enableval +else + acx_maxopt_portable=no +fi + + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$xlc_opt" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $xlc_opt" >&5 +$as_echo_n "checking whether C compiler accepts $xlc_opt... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS $xlc_opt" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + CFLAGS="-O3 -qansialias -w $xlc_opt" +else + CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************" +fi + + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 0 output" >&5 +$as_echo_n "checking for x86 cpuid 0 output... " >&6; } +if ${ax_cv_gcc_x86_cpuid_0+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ax_cv_gcc_x86_cpuid_0=unknown +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + int op = 0, level = 0, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ax_cv_gcc_x86_cpuid_0=`cat conftest_cpuid`; rm -f conftest_cpuid +else + ax_cv_gcc_x86_cpuid_0=unknown; rm -f conftest_cpuid +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_0" >&5 +$as_echo "$ax_cv_gcc_x86_cpuid_0" >&6; } +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 1 output" >&5 +$as_echo_n "checking for x86 cpuid 1 output... " >&6; } +if ${ax_cv_gcc_x86_cpuid_1+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ax_cv_gcc_x86_cpuid_1=unknown +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + int op = 1, level = 0, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ax_cv_gcc_x86_cpuid_1=`cat conftest_cpuid`; rm -f conftest_cpuid +else + ax_cv_gcc_x86_cpuid_1=unknown; rm -f conftest_cpuid +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_1" >&5 +$as_echo "$ax_cv_gcc_x86_cpuid_1" >&6; } +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *0?6[78ab]?:*:*:*|?6[78ab]?:*:*:*|6[78ab]?:*:*:*) icc_flags="-xK" ;; + *0?6[9d]?:*:*:*|?6[9d]?:*:*:*|6[9d]?:*:*:*|*1?65?:*:*:*) icc_flags="-xSSE2 -xB -xK" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) icc_flags="-xSSE3 -xP -xO -xB -xK" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) icc_flags="-xSSSE3 -xT -xB -xK" ;; + *1?6[7d]?:*:*:*) icc_flags="-xSSE4.1 -xS -xT -xB -xK" ;; + *1?6[aef]?:*:*:*|*2?6[5cef]?:*:*:*) icc_flags="-xSSE4.2 -xS -xT -xB -xK" ;; + *2?6[ad]?:*:*:*) icc_flags="-xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[ae]?:*:*:*) icc_flags="-xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[cf]?:*:*:*|*4?6[56]?:*:*:*) icc_flags="-xCORE-AVX2 -xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *000?f[346]?:*:*:*|?f[346]?:*:*:*|f[346]?:*:*:*) icc_flags="-xSSE3 -xP -xO -xN -xW -xK" ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) icc_flags="-xSSE2 -xN -xW -xK" ;; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$flag" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $flag" >&5 +$as_echo_n "checking whether C compiler accepts $flag... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS $flag" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + icc_archflag=$flag; break +else + : +fi + + done + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for icc architecture flag" >&5 +$as_echo_n "checking for icc architecture flag... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $icc_archflag" >&5 +$as_echo "$icc_archflag" >&6; } + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + # libffi local change -- don't align double, as it changes the ABI + # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts -fstrict-aliasing" >&5 +$as_echo_n "checking whether C compiler accepts -fstrict-aliasing... " >&6; } +if ${ax_cv_check_cflags___fstrict_aliasing+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS -fstrict-aliasing" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ax_cv_check_cflags___fstrict_aliasing=yes +else + ax_cv_check_cflags___fstrict_aliasing=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cflags___fstrict_aliasing" >&5 +$as_echo "$ax_cv_check_cflags___fstrict_aliasing" >&6; } +if test "x$ax_cv_check_cflags___fstrict_aliasing" = xyes; then : + CFLAGS="$CFLAGS -fstrict-aliasing" +else + : +fi + + + # note that we enable "unsafe" fp optimization with other compilers, too + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts -ffast-math" >&5 +$as_echo_n "checking whether C compiler accepts -ffast-math... " >&6; } +if ${ax_cv_check_cflags___ffast_math+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS -ffast-math" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ax_cv_check_cflags___ffast_math=yes +else + ax_cv_check_cflags___ffast_math=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cflags___ffast_math" >&5 +$as_echo "$ax_cv_check_cflags___ffast_math" >&6; } +if test "x$ax_cv_check_cflags___ffast_math" = xyes; then : + CFLAGS="$CFLAGS -ffast-math" +else + : +fi + + + + + + + + +# Check whether --with-gcc-arch was given. +if test "${with_gcc_arch+set}" = set; then : + withval=$with_gcc_arch; ax_gcc_arch=$withval +else + ax_gcc_arch=yes +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gcc architecture flag" >&5 +$as_echo_n "checking for gcc architecture flag... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +if ${ax_cv_gcc_archflag+:} false; then : + $as_echo_n "(cached) " >&6 +else + +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[3456]86*|x86_64*|amd64*) # use cpuid codes + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 0 output" >&5 +$as_echo_n "checking for x86 cpuid 0 output... " >&6; } +if ${ax_cv_gcc_x86_cpuid_0+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ax_cv_gcc_x86_cpuid_0=unknown +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + int op = 0, level = 0, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ax_cv_gcc_x86_cpuid_0=`cat conftest_cpuid`; rm -f conftest_cpuid +else + ax_cv_gcc_x86_cpuid_0=unknown; rm -f conftest_cpuid +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_0" >&5 +$as_echo "$ax_cv_gcc_x86_cpuid_0" >&6; } +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 1 output" >&5 +$as_echo_n "checking for x86 cpuid 1 output... " >&6; } +if ${ax_cv_gcc_x86_cpuid_1+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ax_cv_gcc_x86_cpuid_1=unknown +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + int op = 1, level = 0, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ax_cv_gcc_x86_cpuid_1=`cat conftest_cpuid`; rm -f conftest_cpuid +else + ax_cv_gcc_x86_cpuid_1=unknown; rm -f conftest_cpuid +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_1" >&5 +$as_echo "$ax_cv_gcc_x86_cpuid_1" >&6; } +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[4578]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5[123]?:*:*:*) ax_gcc_arch=pentium ;; + *0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;; + *0?6[356]?:*:*:*|?6[356]?:*:*:*|6[356]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *0?6[78ab]?:*:*:*|?6[78ab]?:*:*:*|6[78ab]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *0?6[9d]?:*:*:*|?6[9d]?:*:*:*|6[9d]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;; + *1?6[7d]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;; + *1?6[aef]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[5cf]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[ad]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[ae]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[cf]?:*:*:*|*4?6[56]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6d?:*:*:*|*4?6[7f]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *1?6c?:*:*:*|*2?6[67]?:*:*:*|*3?6[56]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;; + *3?67?:*:*:*|*[45]?6[ad]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;; + *000?f[012]?:*:*:*|?f[012]?:*:*:*|f[012]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + *000?f[346]?:*:*:*|?f[346]?:*:*:*|f[346]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;; + # fallback + *5??:*:*:*) ax_gcc_arch=pentium ;; + *??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + esac ;; + *:68747541:444d4163:69746e65) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[67]?:*:*:*) ax_gcc_arch=k6 ;; + *5[8]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[9d]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *6[12]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[34]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *6[678a]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *000?f[4578bcef]?:*:*:*|?f[4578bcef]?:*:*:*|f[4578bcef]?:*:*:*|*001?f[4578bcf]?:*:*:*|1?f[4578bcf]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *002?f[13457bcf]?:*:*:*|2?f[13457bcf]?:*:*:*|*004?f[138bcf]?:*:*:*|4?f[138bcf]?:*:*:*|*005?f[df]?:*:*:*|5?f[df]?:*:*:*|*006?f[8bcf]?:*:*:*|6?f[8bcf]?:*:*:*|*007?f[cf]?:*:*:*|7?f[cf]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *010?f[245689a]?:*:*:*|10?f[245689a]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *050?f[12]?:*:*:*|50?f[12]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *060?f2?:*:*:*|60?f2?:*:*:*|*061?f[03]?:*:*:*|61?f[03]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *07[03]?f0?:*:*:*|7[03]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + # fallback + *0[13]??f??:*:*:*|[13]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + *???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;; + esac ;; + *:746e6543:736c7561:48727561) # IDT / VIA (Centaur) + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *5[89]?:*:*:*) ax_gcc_arch=winchip2 ;; + *66?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[78]?:*:*:*) ax_gcc_arch=c3 ;; + *6[9adf]?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + # Extract the first word of "prtdiag", so it can be a program name with args. +set dummy prtdiag; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PRTDIAG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PRTDIAG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PRTDIAG="$PRTDIAG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_dummy="$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/" +for as_dir in $as_dummy +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PRTDIAG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PRTDIAG" && ac_cv_path_PRTDIAG="prtdiag" + ;; +esac +fi +PRTDIAG=$ac_cv_path_PRTDIAG +if test -n "$PRTDIAG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PRTDIAG" >&5 +$as_echo "$PRTDIAG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[05]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[056]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[0-9]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[4-5][0-9]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[0-9][0-9]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *POWER7*) ax_gcc_arch="power7";; + *POWER8*) ax_gcc_arch="power8";; + *POWER9*) ax_gcc_arch="power9";; + *POWER10*) ax_gcc_arch="power10";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; + aarch64) + cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + case $cpuimpl in + 0x42) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + 0x43) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx armv8-a native" ;; + 0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + esac + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +if test "x$acx_maxopt_portable" = xyes; then # if we require portable code + flag_prefixes="-mtune=" + if test "x$ax_cv_c_compiler_vendor" = xclang; then flag_prefixes="-march="; fi + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac +else + flag_prefixes="-march= -mcpu= -m" +fi +for flag_prefix in $flag_prefixes; do + for arch in $ax_gcc_arch; do + flag="$flag_prefix$arch" + as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$flag" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $flag" >&5 +$as_echo_n "checking whether C compiler accepts $flag... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS $flag" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + if test "x$ax_cv_c_compiler_vendor" = xclang; then + if test "x$acx_maxopt_portable" = xyes; then + if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi + fi + fi; ax_cv_gcc_archflag=$flag; break +else + : +fi + + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gcc architecture flag" >&5 +$as_echo_n "checking for gcc architecture flag... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_archflag" >&5 +$as_echo "$ax_cv_gcc_archflag" >&6; } +if test "x$ax_cv_gcc_archflag" = xunknown; then + : +else + CFLAGS="$CFLAGS $ax_cv_gcc_archflag" +fi + + ;; + + microsoft) + # default optimization flags for MSVC opt builds + CFLAGS="-O2" + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$CFLAGS" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $CFLAGS" >&5 +$as_echo_n "checking whether C compiler accepts $CFLAGS... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + : +else + + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + +fi + + +fi + +# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro +# compiler. +if test "$ax_cv_c_compiler_vendor" != "sun"; then + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking CFLAGS for maximum warnings" >&5 +$as_echo_n "checking CFLAGS for maximum warnings... " >&6; } +if ${ac_cv_cflags_warn_all+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_cflags_warn_all="no, unknown" +ac_save_CFLAGS="$CFLAGS" +for ac_arg in "-warn all % -warn all" "-pedantic % -Wall" "-xstrconst % -v" "-std1 % -verbose -w0 -warnprotos" "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" "-ansi -ansiE % -fullwarn" "+ESlit % +w1" "-Xc % -pvctl,fullmsg" "-h conform % -h msglevel 2" # +do CFLAGS="$ac_save_CFLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_cflags_warn_all=`echo $ac_arg | sed -e 's,.*% *,,'` ; break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +CFLAGS="$ac_save_CFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cflags_warn_all" >&5 +$as_echo "$ac_cv_cflags_warn_all" >&6; } + + +case ".$ac_cv_cflags_warn_all" in + .ok|.ok,*) ;; + .|.no|.no,*) ;; + *) +if ${CFLAGS+:} false; then : + + case " $CFLAGS " in #( + *" $ac_cv_cflags_warn_all "*) : + { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS already contains \$ac_cv_cflags_warn_all"; } >&5 + (: CFLAGS already contains $ac_cv_cflags_warn_all) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } ;; #( + *) : + + as_fn_append CFLAGS " $ac_cv_cflags_warn_all" + { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS=\"\$CFLAGS\""; } >&5 + (: CFLAGS="$CFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + ;; +esac + +else + + CFLAGS=$ac_cv_cflags_warn_all + { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS=\"\$CFLAGS\""; } >&5 + (: CFLAGS="$CFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + +fi + ;; +esac + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +fi + +if test "x$GCC" = "xyes"; then + CFLAGS="$CFLAGS -fexceptions" +fi + +cat > local.exp <&5 +$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } + # Check whether --enable-maintainer-mode was given. +if test "${enable_maintainer_mode+set}" = set; then : + enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval +else + USE_MAINTAINER_MODE=no +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 +$as_echo "$USE_MAINTAINER_MODE" >&6; } + if test $USE_MAINTAINER_MODE = yes; then + MAINTAINER_MODE_TRUE= + MAINTAINER_MODE_FALSE='#' +else + MAINTAINER_MODE_TRUE='#' + MAINTAINER_MODE_FALSE= +fi + + MAINT=$MAINTAINER_MODE_TRUE + + + +for ac_header in sys/mman.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_mman_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_MMAN_H 1 +_ACEOF + +fi + +done + +for ac_func in mmap mkostemp +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_mman_h" = xyes; then : + libffi_header_sys_mman_h=yes +else + libffi_header_sys_mman_h=no +fi + + +ac_fn_c_check_func "$LINENO" "mmap" "ac_cv_func_mmap" +if test "x$ac_cv_func_mmap" = xyes; then : + libffi_func_mmap=yes +else + libffi_func_mmap=no +fi + +if test "$libffi_header_sys_mman_h" != yes \ + || test "$libffi_func_mmap" != yes; then + ac_cv_func_mmap_file=no + ac_cv_func_mmap_dev_zero=no + ac_cv_func_mmap_anon=no +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether read-only mmap of a plain file works" >&5 +$as_echo_n "checking whether read-only mmap of a plain file works... " >&6; } +if ${ac_cv_func_mmap_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Add a system to this blacklist if + # mmap(0, stat_size, PROT_READ, MAP_PRIVATE, fd, 0) doesn't return a + # memory area containing the same data that you'd get if you applied + # read() to the same fd. The only system known to have a problem here + # is VMS, where text files have record structure. + case "$host_os" in + vms* | ultrix*) + ac_cv_func_mmap_file=no ;; + *) + ac_cv_func_mmap_file=yes;; + esac +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_file" >&5 +$as_echo "$ac_cv_func_mmap_file" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mmap from /dev/zero works" >&5 +$as_echo_n "checking whether mmap from /dev/zero works... " >&6; } +if ${ac_cv_func_mmap_dev_zero+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Add a system to this blacklist if it has mmap() but /dev/zero + # does not exist, or if mmapping /dev/zero does not give anonymous + # zeroed pages with both the following properties: + # 1. If you map N consecutive pages in with one call, and then + # unmap any subset of those pages, the pages that were not + # explicitly unmapped remain accessible. + # 2. If you map two adjacent blocks of memory and then unmap them + # both at once, they must both go away. + # Systems known to be in this category are Windows (all variants), + # VMS, and Darwin. + case "$host_os" in + vms* | cygwin* | pe | mingw* | darwin* | ultrix* | hpux10* | hpux11.00) + ac_cv_func_mmap_dev_zero=no ;; + *) + ac_cv_func_mmap_dev_zero=yes;; + esac +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_dev_zero" >&5 +$as_echo "$ac_cv_func_mmap_dev_zero" >&6; } + + # Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MAP_ANON(YMOUS)" >&5 +$as_echo_n "checking for MAP_ANON(YMOUS)... " >&6; } +if ${ac_cv_decl_map_anon+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +int +main () +{ +int n = MAP_ANONYMOUS; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_decl_map_anon=yes +else + ac_cv_decl_map_anon=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_decl_map_anon" >&5 +$as_echo "$ac_cv_decl_map_anon" >&6; } + + if test $ac_cv_decl_map_anon = no; then + ac_cv_func_mmap_anon=no + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mmap with MAP_ANON(YMOUS) works" >&5 +$as_echo_n "checking whether mmap with MAP_ANON(YMOUS) works... " >&6; } +if ${ac_cv_func_mmap_anon+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Add a system to this blacklist if it has mmap() and MAP_ANON or + # MAP_ANONYMOUS, but using mmap(..., MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + # doesn't give anonymous zeroed pages with the same properties listed + # above for use of /dev/zero. + # Systems known to be in this category are Windows, VMS, and SCO Unix. + case "$host_os" in + vms* | cygwin* | pe | mingw* | sco* | udk* ) + ac_cv_func_mmap_anon=no ;; + *) + ac_cv_func_mmap_anon=yes;; + esac +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_anon" >&5 +$as_echo "$ac_cv_func_mmap_anon" >&6; } + fi +fi + +if test $ac_cv_func_mmap_file = yes; then + +$as_echo "#define HAVE_MMAP_FILE 1" >>confdefs.h + +fi +if test $ac_cv_func_mmap_dev_zero = yes; then + +$as_echo "#define HAVE_MMAP_DEV_ZERO 1" >>confdefs.h + +fi +if test $ac_cv_func_mmap_anon = yes; then + +$as_echo "#define HAVE_MMAP_ANON 1" >>confdefs.h + +fi + + + if test -d $srcdir/testsuite; then + TESTSUBDIR_TRUE= + TESTSUBDIR_FALSE='#' +else + TESTSUBDIR_TRUE='#' + TESTSUBDIR_FALSE= +fi + + +TARGETDIR="unknown" +HAVE_LONG_DOUBLE_VARIANT=0 + +. ${srcdir}/configure.host + +if test -n "${UNSUPPORTED}"; then + as_fn_error $? "\"libffi has not been ported to $host.\"" "$LINENO" 5 +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +for ac_func in memcpy +do : + ac_fn_c_check_func "$LINENO" "memcpy" "ac_cv_func_memcpy" +if test "x$ac_cv_func_memcpy" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MEMCPY 1 +_ACEOF + +fi +done + +ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" +if test "x$ac_cv_type_size_t" = xyes; then : + +else + +cat >>confdefs.h <<_ACEOF +#define size_t unsigned int +_ACEOF + +fi + +# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works +# for constant arguments. Useless! +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 +$as_echo_n "checking for working alloca.h... " >&6; } +if ${ac_cv_working_alloca_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +char *p = (char *) alloca (2 * sizeof (int)); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_working_alloca_h=yes +else + ac_cv_working_alloca_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 +$as_echo "$ac_cv_working_alloca_h" >&6; } +if test $ac_cv_working_alloca_h = yes; then + +$as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 +$as_echo_n "checking for alloca... " >&6; } +if ${ac_cv_func_alloca_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __GNUC__ +# define alloca __builtin_alloca +#else +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef HAVE_ALLOCA_H +# include +# else +# ifdef _AIX + #pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +void *alloca (size_t); +# endif +# endif +# endif +# endif +#endif + +int +main () +{ +char *p = (char *) alloca (1); + if (p) return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_func_alloca_works=yes +else + ac_cv_func_alloca_works=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 +$as_echo "$ac_cv_func_alloca_works" >&6; } + +if test $ac_cv_func_alloca_works = yes; then + +$as_echo "#define HAVE_ALLOCA 1" >>confdefs.h + +else + # The SVR3 libPW and SVR4 libucb both contain incompatible functions +# that cause trouble. Some versions do not even contain alloca or +# contain a buggy version. If you still want to use their alloca, +# use ar to extract alloca.o from them instead of compiling alloca.c. + +ALLOCA=\${LIBOBJDIR}alloca.$ac_objext + +$as_echo "#define C_ALLOCA 1" >>confdefs.h + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 +$as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } +if ${ac_cv_os_cray+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#if defined CRAY && ! defined CRAY2 +webecray +#else +wenotbecray +#endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "webecray" >/dev/null 2>&1; then : + ac_cv_os_cray=yes +else + ac_cv_os_cray=no +fi +rm -f conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 +$as_echo "$ac_cv_os_cray" >&6; } +if test $ac_cv_os_cray = yes; then + for ac_func in _getb67 GETB67 getb67; do + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + +cat >>confdefs.h <<_ACEOF +#define CRAY_STACKSEG_END $ac_func +_ACEOF + + break +fi + + done +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 +$as_echo_n "checking stack direction for C alloca... " >&6; } +if ${ac_cv_c_stack_direction+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + ac_cv_c_stack_direction=0 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +find_stack_direction (int *addr, int depth) +{ + int dir, dummy = 0; + if (! addr) + addr = &dummy; + *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; + dir = depth ? find_stack_direction (addr, depth - 1) : 0; + return dir + dummy; +} + +int +main (int argc, char **argv) +{ + return find_stack_direction (0, argc + !argv + 20) < 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_stack_direction=1 +else + ac_cv_c_stack_direction=-1 +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 +$as_echo "$ac_cv_c_stack_direction" >&6; } +cat >>confdefs.h <<_ACEOF +#define STACK_DIRECTION $ac_cv_c_stack_direction +_ACEOF + + +fi + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of double" >&5 +$as_echo_n "checking size of double... " >&6; } +if ${ac_cv_sizeof_double+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (double))" "ac_cv_sizeof_double" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_double" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (double) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_double=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_double" >&5 +$as_echo "$ac_cv_sizeof_double" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_DOUBLE $ac_cv_sizeof_double +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long double" >&5 +$as_echo_n "checking size of long double... " >&6; } +if ${ac_cv_sizeof_long_double+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long double))" "ac_cv_sizeof_long_double" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_long_double" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (long double) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_long_double=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_double" >&5 +$as_echo "$ac_cv_sizeof_long_double" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG_DOUBLE $ac_cv_sizeof_long_double +_ACEOF + + + +# Also AC_SUBST this variable for ffi.h. +if test -z "$HAVE_LONG_DOUBLE"; then + HAVE_LONG_DOUBLE=0 + if test $ac_cv_sizeof_long_double != 0; then + if test $HAVE_LONG_DOUBLE_VARIANT != 0; then + +$as_echo "#define HAVE_LONG_DOUBLE_VARIANT 1" >>confdefs.h + + HAVE_LONG_DOUBLE=1 + else + if test $ac_cv_sizeof_double != $ac_cv_sizeof_long_double; then + HAVE_LONG_DOUBLE=1 + +$as_echo "#define HAVE_LONG_DOUBLE 1" >>confdefs.h + + fi + fi + fi +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#ifndef _BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; + +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h +;; #( + no) + ;; #( + universal) + +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h + + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .cfi pseudo-op support" >&5 +$as_echo_n "checking assembler .cfi pseudo-op support... " >&6; } +if ${gcc_cv_as_cfi_pseudo_op+:} false; then : + $as_echo_n "(cached) " >&6 +else + + gcc_cv_as_cfi_pseudo_op=unknown + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc"); +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + gcc_cv_as_cfi_pseudo_op=yes +else + gcc_cv_as_cfi_pseudo_op=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_cfi_pseudo_op" >&5 +$as_echo "$gcc_cv_as_cfi_pseudo_op" >&6; } + if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then + +$as_echo "#define HAVE_AS_CFI_PSEUDO_OP 1" >>confdefs.h + + fi + + +case "$TARGET" in + SPARC) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler and linker support unaligned pc related relocs" >&5 +$as_echo_n "checking assembler and linker support unaligned pc related relocs... " >&6; } +if ${libffi_cv_as_sparc_ua_pcrel+:} false; then : + $as_echo_n "(cached) " >&6 +else + + save_CFLAGS="$CFLAGS" + save_LDFLAGS="$LDFLAGS" + CFLAGS="$CFLAGS -fpic" + LDFLAGS="$LDFLAGS -shared" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +asm (".text; foo: nop; .data; .align 4; .byte 0; .uaword %r_disp32(foo); .text"); +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + libffi_cv_as_sparc_ua_pcrel=yes +else + libffi_cv_as_sparc_ua_pcrel=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS="$save_CFLAGS" + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_sparc_ua_pcrel" >&5 +$as_echo "$libffi_cv_as_sparc_ua_pcrel" >&6; } + if test "x$libffi_cv_as_sparc_ua_pcrel" = xyes; then + +$as_echo "#define HAVE_AS_SPARC_UA_PCREL 1" >>confdefs.h + + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .register pseudo-op support" >&5 +$as_echo_n "checking assembler .register pseudo-op support... " >&6; } +if ${libffi_cv_as_register_pseudo_op+:} false; then : + $as_echo_n "(cached) " >&6 +else + + libffi_cv_as_register_pseudo_op=unknown + # Check if we have .register + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +asm (".register %g2, #scratch"); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + libffi_cv_as_register_pseudo_op=yes +else + libffi_cv_as_register_pseudo_op=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_register_pseudo_op" >&5 +$as_echo "$libffi_cv_as_register_pseudo_op" >&6; } + if test "x$libffi_cv_as_register_pseudo_op" = xyes; then + +$as_echo "#define HAVE_AS_REGISTER_PSEUDO_OP 1" >>confdefs.h + + fi + ;; + + X86*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler supports pc related relocs" >&5 +$as_echo_n "checking assembler supports pc related relocs... " >&6; } +if ${libffi_cv_as_x86_pcrel+:} false; then : + $as_echo_n "(cached) " >&6 +else + + libffi_cv_as_x86_pcrel=no + echo '.text; foo: nop; .data; .long foo-.; .text' > conftest.s + if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then + libffi_cv_as_x86_pcrel=yes + fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_x86_pcrel" >&5 +$as_echo "$libffi_cv_as_x86_pcrel" >&6; } + if test "x$libffi_cv_as_x86_pcrel" = xyes; then + +$as_echo "#define HAVE_AS_X86_PCREL 1" >>confdefs.h + + fi + ;; + + S390) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking compiler uses zarch features" >&5 +$as_echo_n "checking compiler uses zarch features... " >&6; } +if ${libffi_cv_as_s390_zarch+:} false; then : + $as_echo_n "(cached) " >&6 +else + + libffi_cv_as_s390_zarch=no + echo 'void foo(void) { bar(); bar(); }' > conftest.c + if $CC $CFLAGS -S conftest.c > /dev/null 2>&1; then + if grep -q brasl conftest.s; then + libffi_cv_as_s390_zarch=yes + fi + fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_s390_zarch" >&5 +$as_echo "$libffi_cv_as_s390_zarch" >&6; } + if test "x$libffi_cv_as_s390_zarch" = xyes; then + +$as_echo "#define HAVE_AS_S390_ZARCH 1" >>confdefs.h + + fi + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports pointer authentication" >&5 +$as_echo_n "checking whether compiler supports pointer authentication... " >&6; } +if ${libffi_cv_as_ptrauth+:} false; then : + $as_echo_n "(cached) " >&6 +else + + libffi_cv_as_ptrauth=unknown + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifdef __clang__ +# if __has_feature(ptrauth_calls) +# define HAVE_PTRAUTH 1 +# endif +#endif + +#ifndef HAVE_PTRAUTH +# error Pointer authentication not supported +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + libffi_cv_as_ptrauth=yes +else + libffi_cv_as_ptrauth=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_ptrauth" >&5 +$as_echo "$libffi_cv_as_ptrauth" >&6; } +if test "x$libffi_cv_as_ptrauth" = xyes; then + +$as_echo "#define HAVE_PTRAUTH 1" >>confdefs.h + +fi + +# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. +# Check whether --enable-pax_emutramp was given. +if test "${enable_pax_emutramp+set}" = set; then : + enableval=$enable_pax_emutramp; if test "$enable_pax_emutramp" = "yes"; then + +$as_echo "#define FFI_MMAP_EXEC_EMUTRAMP_PAX 1" >>confdefs.h + + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _ prefix in compiled symbols" >&5 +$as_echo_n "checking for _ prefix in compiled symbols... " >&6; } +if ${lt_cv_sys_symbol_underscore+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sys_symbol_underscore=no + cat > conftest.$ac_ext <<_LT_EOF +void nm_test_func(){} +int main(){nm_test_func;return 0;} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + ac_nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$ac_nlist"; then + # See whether the symbols have a leading underscore. + if grep '^. _nm_test_func' "$ac_nlist" >/dev/null; then + lt_cv_sys_symbol_underscore=yes + else + if grep '^. nm_test_func ' "$ac_nlist" >/dev/null; then + : + else + echo "configure: cannot find nm_test_func in $ac_nlist" >&5 + fi + fi + else + echo "configure: cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "configure: failed program was:" >&5 + cat conftest.c >&5 + fi + rm -rf conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_symbol_underscore" >&5 +$as_echo "$lt_cv_sys_symbol_underscore" >&6; } + sys_symbol_underscore=$lt_cv_sys_symbol_underscore + + +if test "x$sys_symbol_underscore" = xyes; then + +$as_echo "#define SYMBOL_UNDERSCORE 1" >>confdefs.h + +fi + +FFI_EXEC_TRAMPOLINE_TABLE=0 +case "$target" in + *arm*-apple-* | aarch64-apple-*) + FFI_EXEC_TRAMPOLINE_TABLE=1 + +$as_echo "#define FFI_EXEC_TRAMPOLINE_TABLE 1" >>confdefs.h + + ;; + *-apple-* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris* | *-linux-android*) + +$as_echo "#define FFI_MMAP_EXEC_WRIT 1" >>confdefs.h + + ;; +esac + if test x$FFI_EXEC_TRAMPOLINE_TABLE = x1; then + FFI_EXEC_TRAMPOLINE_TABLE_TRUE= + FFI_EXEC_TRAMPOLINE_TABLE_FALSE='#' +else + FFI_EXEC_TRAMPOLINE_TABLE_TRUE='#' + FFI_EXEC_TRAMPOLINE_TABLE_FALSE= +fi + + + +if test x$TARGET = xX86_64; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking toolchain supports unwind section type" >&5 +$as_echo_n "checking toolchain supports unwind section type... " >&6; } +if ${libffi_cv_as_x86_64_unwind_section_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat > conftest1.s << EOF +.text +.globl foo +foo: +jmp bar +.section .eh_frame,"a",@unwind +bar: +EOF + + cat > conftest2.c << EOF +extern void foo(); +int main(){foo();} +EOF + + libffi_cv_as_x86_64_unwind_section_type=no + # we ensure that we can compile _and_ link an assembly file containing an @unwind section + # since the compiler can support it and not the linker (ie old binutils) + if $CC -Wa,--fatal-warnings $CFLAGS -c conftest1.s > /dev/null 2>&1 && \ + $CC conftest2.c conftest1.o > /dev/null 2>&1 ; then + libffi_cv_as_x86_64_unwind_section_type=yes + fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_x86_64_unwind_section_type" >&5 +$as_echo "$libffi_cv_as_x86_64_unwind_section_type" >&6; } + if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then + +$as_echo "#define HAVE_AS_X86_64_UNWIND_SECTION_TYPE 1" >>confdefs.h + + fi +fi + +if test "x$GCC" = "xyes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether .eh_frame section should be read-only" >&5 +$as_echo_n "checking whether .eh_frame section should be read-only... " >&6; } +if ${libffi_cv_ro_eh_frame+:} false; then : + $as_echo_n "(cached) " >&6 +else + + libffi_cv_ro_eh_frame=yes + echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c + if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then + if readelf -WS conftest.o | grep -q -n 'eh_frame .* WA'; then + libffi_cv_ro_eh_frame=no + fi + fi + rm -f conftest.* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_ro_eh_frame" >&5 +$as_echo "$libffi_cv_ro_eh_frame" >&6; } + if test "x$libffi_cv_ro_eh_frame" = xyes; then + +$as_echo "#define HAVE_RO_EH_FRAME 1" >>confdefs.h + + +$as_echo "#define EH_FRAME_FLAGS \"a\"" >>confdefs.h + + else + +$as_echo "#define EH_FRAME_FLAGS \"aw\"" >>confdefs.h + + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((visibility(\"hidden\")))" >&5 +$as_echo_n "checking for __attribute__((visibility(\"hidden\")))... " >&6; } +if ${libffi_cv_hidden_visibility_attribute+:} false; then : + $as_echo_n "(cached) " >&6 +else + + echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c + libffi_cv_hidden_visibility_attribute=no + if { ac_try='${CC-cc} -Werror -S conftest.c -o conftest.s 1>&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 + (eval $ac_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + if egrep '(\.hidden|\.private_extern).*foo' conftest.s >/dev/null; then + libffi_cv_hidden_visibility_attribute=yes + fi + fi + rm -f conftest.* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_hidden_visibility_attribute" >&5 +$as_echo "$libffi_cv_hidden_visibility_attribute" >&6; } + if test $libffi_cv_hidden_visibility_attribute = yes; then + +$as_echo "#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1" >>confdefs.h + + fi +fi + +# Check whether --enable-docs was given. +if test "${enable_docs+set}" = set; then : + enableval=$enable_docs; enable_docs=no +else + enable_docs=yes +fi + + if test x$enable_docs = xyes; then + BUILD_DOCS_TRUE= + BUILD_DOCS_FALSE='#' +else + BUILD_DOCS_TRUE='#' + BUILD_DOCS_FALSE= +fi + + + + + + + + +TARGET_OBJ= +for i in $SOURCES; do + TARGET_OBJ="${TARGET_OBJ} src/${TARGETDIR}/"`echo $i | sed 's/[cS]$/lo/'` +done + + + + + +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then : + enableval=$enable_debug; if test "$enable_debug" = "yes"; then + +$as_echo "#define FFI_DEBUG 1" >>confdefs.h + + fi +fi + + if test "$enable_debug" = "yes"; then + FFI_DEBUG_TRUE= + FFI_DEBUG_FALSE='#' +else + FFI_DEBUG_TRUE='#' + FFI_DEBUG_FALSE= +fi + + +# Check whether --enable-structs was given. +if test "${enable_structs+set}" = set; then : + enableval=$enable_structs; if test "$enable_structs" = "no"; then + +$as_echo "#define FFI_NO_STRUCTS 1" >>confdefs.h + + fi +fi + + if test "$enable_debug" = "yes"; then + FFI_DEBUG_TRUE= + FFI_DEBUG_FALSE='#' +else + FFI_DEBUG_TRUE='#' + FFI_DEBUG_FALSE= +fi + + +# Check whether --enable-raw-api was given. +if test "${enable_raw_api+set}" = set; then : + enableval=$enable_raw_api; if test "$enable_raw_api" = "no"; then + +$as_echo "#define FFI_NO_RAW_API 1" >>confdefs.h + + fi +fi + + +# Check whether --enable-purify-safety was given. +if test "${enable_purify_safety+set}" = set; then : + enableval=$enable_purify_safety; if test "$enable_purify_safety" = "yes"; then + +$as_echo "#define USING_PURIFY 1" >>confdefs.h + + fi +fi + + +# Check whether --enable-multi-os-directory was given. +if test "${enable_multi_os_directory+set}" = set; then : + enableval=$enable_multi_os_directory; +fi + + +# These variables are only ever used when we cross-build to X86_WIN32. +# And we only support this with GCC, so... +if test "x$GCC" = "xyes"; then + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + toolexecdir='${exec_prefix}'/'$(target_alias)' + toolexeclibdir='${toolexecdir}'/lib + else + toolexecdir='${libdir}'/gcc-lib/'$(target_alias)' + toolexeclibdir='${libdir}' + fi + if test x"$enable_multi_os_directory" != x"no"; then + multi_os_directory=`$CC $CFLAGS -print-multi-os-directory` + case $multi_os_directory in + .) ;; # Avoid trailing /. + ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;; + esac + fi + +else + toolexeclibdir='${libdir}' +fi + + +# Check linker support. + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + + + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + libat_ld_is_lld=no + if $LD --version 2>/dev/null | grep 'LLD '> /dev/null 2>&1; then + libat_ld_is_lld=yes + fi + + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) $3=0; print ($1*100+$2)*100+$3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld that supports -Wl,--gc-sections" >&5 +$as_echo_n "checking for ld that supports -Wl,--gc-sections... " >&6; } + if test "$cross_compiling" = yes; then : + ac_sectionLDflags=yes +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_sectionLDflags=yes +else + ac_sectionLDflags=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_sectionLDflags" >&5 +$as_echo "$ac_sectionLDflags" >&6; } + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + + + + + + # Check whether --enable-symvers was given. +if test "${enable_symvers+set}" = set; then : + enableval=$enable_symvers; + case "$enableval" in + yes|no|gnu*|sun) ;; + *) as_fn_error $? "Unknown argument to enable/disable symvers" "$LINENO" 5 ;; + esac + +else + enable_symvers=yes +fi + + + +# If we never went through the LIBFFI_CHECK_LINKER_FEATURES macro, then we +# don't know enough about $LD to do tricks... + + +# Turn a 'yes' into a suitable default. +if test x$enable_symvers = xyes ; then + # FIXME The following test is too strict, in theory. + if test $enable_shared = no || test "x$LD" = x; then + enable_symvers=no + else + if test $with_gnu_ld = yes ; then + enable_symvers=gnu + else + case ${target_os} in + # Sun symbol versioning exists since Solaris 2.5. + solaris2.[5-9]* | solaris2.1[0-9]*) + enable_symvers=sun ;; + *) + enable_symvers=no ;; + esac + fi + fi +fi + +# Check if 'sun' was requested on non-Solaris 2 platforms. +if test x$enable_symvers = xsun ; then + case ${target_os} in + solaris2*) + # All fine. + ;; + *) + # Unlikely to work. + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === You have requested Sun symbol versioning, but" >&5 +$as_echo "$as_me: WARNING: === You have requested Sun symbol versioning, but" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === you are not targetting Solaris 2." >&5 +$as_echo "$as_me: WARNING: === you are not targetting Solaris 2." >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === Symbol versioning will be disabled." >&5 +$as_echo "$as_me: WARNING: === Symbol versioning will be disabled." >&2;} + enable_symvers=no + ;; + esac +fi + +# Check to see if libgcc_s exists, indicating that shared libgcc is possible. +if test $enable_symvers != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared libgcc" >&5 +$as_echo_n "checking for shared libgcc... " >&6; } + ac_save_CFLAGS="$CFLAGS" + CFLAGS=' -lgcc_s' + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + libat_shared_libgcc=yes +else + libat_shared_libgcc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS="$ac_save_CFLAGS" + if test $libat_shared_libgcc = no; then + cat > conftest.c <&1 >/dev/null \ + | sed -n 's/^.* -lgcc_s\([^ ]*\) .*$/\1/p'` + rm -f conftest.c conftest.so + if test x${libat_libgcc_s_suffix+set} = xset; then + CFLAGS=" -lgcc_s$libat_libgcc_s_suffix" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + libat_shared_libgcc=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS="$ac_save_CFLAGS" + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $libat_shared_libgcc" >&5 +$as_echo "$libat_shared_libgcc" >&6; } +fi + +# For GNU ld, we need at least this version. The format is described in +# LIBFFI_CHECK_LINKER_FEATURES above. +libat_min_gnu_ld_version=21400 +# XXXXXXXXXXX libat_gnu_ld_version=21390 + +# Check to see if unspecified "yes" value can win, given results above. +# Change "yes" into either "no" or a style name. +if test $enable_symvers != no && test $libat_shared_libgcc = yes; then + if test $with_gnu_ld = yes; then + if test $libat_gnu_ld_version -ge $libat_min_gnu_ld_version ; then + enable_symvers=gnu + elif test $libat_ld_is_gold = yes ; then + enable_symvers=gnu + elif test $libat_ld_is_lld = yes ; then + enable_symvers=gnu + else + # The right tools, the right setup, but too old. Fallbacks? + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === Linker version $libat_gnu_ld_version is too old for" >&5 +$as_echo "$as_me: WARNING: === Linker version $libat_gnu_ld_version is too old for" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === full symbol versioning support in this release of GCC." >&5 +$as_echo "$as_me: WARNING: === full symbol versioning support in this release of GCC." >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === You would need to upgrade your binutils to version" >&5 +$as_echo "$as_me: WARNING: === You would need to upgrade your binutils to version" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === $libat_min_gnu_ld_version or later and rebuild GCC." >&5 +$as_echo "$as_me: WARNING: === $libat_min_gnu_ld_version or later and rebuild GCC." >&2;} + if test $libat_gnu_ld_version -ge 21200 ; then + # Globbing fix is present, proper block support is not. + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === Symbol versioning will be disabled." >&5 +$as_echo "$as_me: WARNING: === Symbol versioning will be disabled." >&2;} + enable_symvers=no + else + # 2.11 or older. + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === Symbol versioning will be disabled." >&5 +$as_echo "$as_me: WARNING: === Symbol versioning will be disabled." >&2;} + enable_symvers=no + fi + fi + elif test $enable_symvers = sun; then + : All interesting versions of Sun ld support sun style symbol versioning. + else + # just fail for now + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === You have requested some kind of symbol versioning, but" >&5 +$as_echo "$as_me: WARNING: === You have requested some kind of symbol versioning, but" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === either you are not using a supported linker, or you are" >&5 +$as_echo "$as_me: WARNING: === either you are not using a supported linker, or you are" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === not building a shared libgcc_s (which is required)." >&5 +$as_echo "$as_me: WARNING: === not building a shared libgcc_s (which is required)." >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: === Symbol versioning will be disabled." >&5 +$as_echo "$as_me: WARNING: === Symbol versioning will be disabled." >&2;} + enable_symvers=no + fi +fi +if test $enable_symvers = gnu; then + +$as_echo "#define LIBFFI_GNU_SYMBOL_VERSIONING 1" >>confdefs.h + +fi + + if test $enable_symvers != no; then + LIBFFI_BUILD_VERSIONED_SHLIB_TRUE= + LIBFFI_BUILD_VERSIONED_SHLIB_FALSE='#' +else + LIBFFI_BUILD_VERSIONED_SHLIB_TRUE='#' + LIBFFI_BUILD_VERSIONED_SHLIB_FALSE= +fi + + if test $enable_symvers = gnu; then + LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE= + LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE='#' +else + LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE='#' + LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE= +fi + + if test $enable_symvers = sun; then + LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE= + LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE='#' +else + LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE='#' + LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE= +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: versioning on shared library symbols is $enable_symvers" >&5 +$as_echo "$as_me: versioning on shared library symbols is $enable_symvers" >&6;} + + +ac_config_commands="$ac_config_commands include" + +ac_config_commands="$ac_config_commands src" + + +ac_config_files="$ac_config_files include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc" + + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +$as_echo_n "checking that generated files are newer than configure... " >&6; } + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 +$as_echo "done" >&6; } + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then + as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${TESTSUBDIR_TRUE}" && test -z "${TESTSUBDIR_FALSE}"; then + as_fn_error $? "conditional \"TESTSUBDIR\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +if test -z "${FFI_EXEC_TRAMPOLINE_TABLE_TRUE}" && test -z "${FFI_EXEC_TRAMPOLINE_TABLE_FALSE}"; then + as_fn_error $? "conditional \"FFI_EXEC_TRAMPOLINE_TABLE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_DOCS_TRUE}" && test -z "${BUILD_DOCS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_DOCS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${FFI_DEBUG_TRUE}" && test -z "${FFI_DEBUG_FALSE}"; then + as_fn_error $? "conditional \"FFI_DEBUG\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${FFI_DEBUG_TRUE}" && test -z "${FFI_DEBUG_FALSE}"; then + as_fn_error $? "conditional \"FFI_DEBUG\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_TRUE}" && test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_FALSE}"; then + as_fn_error $? "conditional \"LIBFFI_BUILD_VERSIONED_SHLIB\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE}" && test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE}"; then + as_fn_error $? "conditional \"LIBFFI_BUILD_VERSIONED_SHLIB_GNU\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE}" && test -z "${LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE}"; then + as_fn_error $? "conditional \"LIBFFI_BUILD_VERSIONED_SHLIB_SUN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libffi $as_me 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +libffi config.status 3.3 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +ax_enable_builddir_srcdir="$srcdir" # $srcdir +ax_enable_builddir_host="$HOST" # $HOST / $host +ax_enable_builddir_version="$VERSION" # $VERSION +ax_enable_builddir_package="$PACKAGE" # $PACKAGE +ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX +ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED +ax_enable_builddir="$ax_enable_builddir" # $SUB + +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' +configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_import \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +lt_cv_nm_interface \ +nm_file_list_spec \ +lt_cv_truncate_bin \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +configure_time_dlsearch_path \ +configure_time_lt_sys_library_path \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + RM='$RM' + ofile='$ofile' + + + + + +TARGETDIR="$TARGETDIR" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fficonfig.h") CONFIG_HEADERS="$CONFIG_HEADERS fficonfig.h" ;; + "buildir") CONFIG_COMMANDS="$CONFIG_COMMANDS buildir" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "include") CONFIG_COMMANDS="$CONFIG_COMMANDS include" ;; + "src") CONFIG_COMMANDS="$CONFIG_COMMANDS src" ;; + "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; + "include/ffi.h") CONFIG_FILES="$CONFIG_FILES include/ffi.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;; + "man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "libffi.pc") CONFIG_FILES="$CONFIG_FILES libffi.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "buildir":C) ac_top_srcdir="$ax_enable_builddir_srcdir" + if test ".$ax_enable_builddir" = ".." ; then + if test -f "$top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - left untouched" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - left untouched" >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - not created" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - not created" >&6;} + fi + else + if test -f "$ac_top_srcdir/Makefile" ; then + a=`grep "^VERSION " "$ac_top_srcdir/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$ac_top_srcdir/Makefile" + fi + if test -f "$ac_top_srcdir/Makefile" ; then + echo "$ac_top_srcdir/Makefile : $ac_top_srcdir/Makefile.in" > $tmp/conftemp.mk + echo " @ echo 'REMOVED,,,' >\$@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$ac_top_srcdir/Makefile" >/dev/null + then rm $ac_top_srcdir/Makefile ; fi + cp $tmp/conftemp.mk $ac_top_srcdir/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$ac_top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: create top_srcdir/Makefile guessed from local Makefile" >&5 +$as_echo "$as_me: create top_srcdir/Makefile guessed from local Makefile" >&6;} + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[ ]*[\\#]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[:=]/!d +/^\\./d +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/ \\1 \\1-all\\2/g +s/^\\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/\\1 \\1-all\\2/ +s/ / /g +/^all all-all[ :]/i\\ +all-configured : all-all +s/ [a-zA-Z0-9-]*-all [a-zA-Z0-9-]*-all-all//g +/-all-all/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +/dist-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/dist-[a-zA-Z0-9]*-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/distclean-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefile.sed~" ## DEBUGGING + $ax_enable_builddir_sed -f $tmp/conftemp.sed Makefile >$ac_top_srcdir/Makefile + if test -f "$ac_top_srcdir/Makefile.mk" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&5 +$as_echo "$as_me: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&6;} + cat $ac_top_srcdir/Makefile.mk >>$ac_top_srcdir/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$ac_top_srcdir/Makefile + # sanity check + if grep '^; echo "MAKE ' $ac_top_srcdir/Makefile >/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: buggy sed found - it deletes tab in \"a\" text parts" >&5 +$as_echo "$as_me: buggy sed found - it deletes tab in \"a\" text parts" >&6;} + $ax_enable_builddir_sed -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $ac_top_srcdir/Makefile \ + >$ac_top_srcdir/Makefile~ + (test -s $ac_top_srcdir/Makefile~ && mv $ac_top_srcdir/Makefile~ $ac_top_srcdir/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [^|]* | *$ax_enable_builddir *\$!$xxxx ...... $ax_enable_builddir!" >$tmp/conftemp.sed + $ax_enable_builddir_sed -f "$tmp/conftemp.sed" "$ac_top_srcdir/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$ac_top_srcdir/makefiles.out~" ## DEBUGGING + if cmp -s "$ac_top_srcdir/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: keeping top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: keeping top_srcdir/Makefile from earlier configure" >&6;} + rm "$tmp/mkfile.tmp" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: reusing top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: reusing top_srcdir/Makefile from earlier configure" >&6;} + mv "$tmp/mkfile.tmp" "$ac_top_srcdir/Makefile" + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&5 +$as_echo "$as_me: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&6;} + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$ax_enable_builddir" >>$ac_top_srcdir/Makefile + fi + ;; + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named 'Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running 'make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "$am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "include":C) test -d include || mkdir include ;; + "src":C) +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +# Copy this file instead of using AC_CONFIG_LINK in order to support +# compiling with MSVC, which won't understand cygwin style symlinks. +cp ${srcdir}/src/$TARGETDIR/ffitarget.h include/ffitarget.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.ac b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.ac new file mode 100644 index 0000000..3ca1f4f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.ac @@ -0,0 +1,410 @@ +dnl Process this with autoconf to create configure + +AC_PREREQ(2.68) + +AC_INIT([libffi], [3.3], [http://github.com/libffi/libffi/issues]) +AC_CONFIG_HEADERS([fficonfig.h]) + +AC_CANONICAL_SYSTEM +target_alias=${target_alias-$host_alias} + +case "${host}" in + frv*-elf) + LDFLAGS=`echo $LDFLAGS | sed "s/\-B[^ ]*libgloss\/frv\///"`\ -B`pwd`/../libgloss/frv/ + ;; +esac + +AX_ENABLE_BUILDDIR + +AM_INIT_AUTOMAKE + +# The same as in boehm-gc and libstdc++. Have to borrow it from there. +# We must force CC to /not/ be precious variables; otherwise +# the wrong, non-multilib-adjusted value will be used in multilibs. +# As a side effect, we have to subst CFLAGS ourselves. +# Also save and restore CFLAGS, since AC_PROG_CC will come up with +# defaults of its own if none are provided. + +m4_rename([_AC_ARG_VAR_PRECIOUS],[real_PRECIOUS]) +m4_define([_AC_ARG_VAR_PRECIOUS],[]) +save_CFLAGS=$CFLAGS +AC_PROG_CC +AC_PROG_CXX +CFLAGS=$save_CFLAGS +m4_undefine([_AC_ARG_VAR_PRECIOUS]) +m4_rename_force([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS]) + +AC_SUBST(CFLAGS) + +AM_PROG_AS +AM_PROG_CC_C_O +AC_PROG_LIBTOOL +AC_CONFIG_MACRO_DIR([m4]) + +# Test for 64-bit build. +AC_CHECK_SIZEOF([size_t]) + +AX_COMPILER_VENDOR +AX_CC_MAXOPT +# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro +# compiler. +if test "$ax_cv_c_compiler_vendor" != "sun"; then + AX_CFLAGS_WARN_ALL +fi + +if test "x$GCC" = "xyes"; then + CFLAGS="$CFLAGS -fexceptions" +fi + +cat > local.exp < conftest.s + if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then + libffi_cv_as_x86_pcrel=yes + fi + ]) + if test "x$libffi_cv_as_x86_pcrel" = xyes; then + AC_DEFINE(HAVE_AS_X86_PCREL, 1, + [Define if your assembler supports PC relative relocs.]) + fi + ;; + + S390) + AC_CACHE_CHECK([compiler uses zarch features], + libffi_cv_as_s390_zarch, [ + libffi_cv_as_s390_zarch=no + echo 'void foo(void) { bar(); bar(); }' > conftest.c + if $CC $CFLAGS -S conftest.c > /dev/null 2>&1; then + if grep -q brasl conftest.s; then + libffi_cv_as_s390_zarch=yes + fi + fi + ]) + if test "x$libffi_cv_as_s390_zarch" = xyes; then + AC_DEFINE(HAVE_AS_S390_ZARCH, 1, + [Define if the compiler uses zarch features.]) + fi + ;; +esac + +AC_CACHE_CHECK([whether compiler supports pointer authentication], + libffi_cv_as_ptrauth, [ + libffi_cv_as_ptrauth=unknown + AC_TRY_COMPILE(,[ +#ifdef __clang__ +# if __has_feature(ptrauth_calls) +# define HAVE_PTRAUTH 1 +# endif +#endif + +#ifndef HAVE_PTRAUTH +# error Pointer authentication not supported +#endif + ], + [libffi_cv_as_ptrauth=yes], + [libffi_cv_as_ptrauth=no]) +]) +if test "x$libffi_cv_as_ptrauth" = xyes; then + AC_DEFINE(HAVE_PTRAUTH, 1, + [Define if your compiler supports pointer authentication.]) +fi + +# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. +AC_ARG_ENABLE(pax_emutramp, + [ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC], + if test "$enable_pax_emutramp" = "yes"; then + AC_DEFINE(FFI_MMAP_EXEC_EMUTRAMP_PAX, 1, + [Define this if you want to enable pax emulated trampolines]) + fi) + +LT_SYS_SYMBOL_USCORE +if test "x$sys_symbol_underscore" = xyes; then + AC_DEFINE(SYMBOL_UNDERSCORE, 1, [Define if symbols are underscored.]) +fi + +FFI_EXEC_TRAMPOLINE_TABLE=0 +case "$target" in + *arm*-apple-* | aarch64-apple-*) + FFI_EXEC_TRAMPOLINE_TABLE=1 + AC_DEFINE(FFI_EXEC_TRAMPOLINE_TABLE, 1, + [Cannot use PROT_EXEC on this target, so, we revert to + alternative means]) + ;; + *-apple-* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris* | *-linux-android*) + AC_DEFINE(FFI_MMAP_EXEC_WRIT, 1, + [Cannot use malloc on this target, so, we revert to + alternative means]) + ;; +esac +AM_CONDITIONAL(FFI_EXEC_TRAMPOLINE_TABLE, test x$FFI_EXEC_TRAMPOLINE_TABLE = x1) +AC_SUBST(FFI_EXEC_TRAMPOLINE_TABLE) + +if test x$TARGET = xX86_64; then + AC_CACHE_CHECK([toolchain supports unwind section type], + libffi_cv_as_x86_64_unwind_section_type, [ + cat > conftest1.s << EOF +.text +.globl foo +foo: +jmp bar +.section .eh_frame,"a",@unwind +bar: +EOF + + cat > conftest2.c << EOF +extern void foo(); +int main(){foo();} +EOF + + libffi_cv_as_x86_64_unwind_section_type=no + # we ensure that we can compile _and_ link an assembly file containing an @unwind section + # since the compiler can support it and not the linker (ie old binutils) + if $CC -Wa,--fatal-warnings $CFLAGS -c conftest1.s > /dev/null 2>&1 && \ + $CC conftest2.c conftest1.o > /dev/null 2>&1 ; then + libffi_cv_as_x86_64_unwind_section_type=yes + fi + ]) + if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then + AC_DEFINE(HAVE_AS_X86_64_UNWIND_SECTION_TYPE, 1, + [Define if your assembler supports unwind section type.]) + fi +fi + +if test "x$GCC" = "xyes"; then + AC_CACHE_CHECK([whether .eh_frame section should be read-only], + libffi_cv_ro_eh_frame, [ + libffi_cv_ro_eh_frame=yes + echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c + if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then + if readelf -WS conftest.o | grep -q -n 'eh_frame .* WA'; then + libffi_cv_ro_eh_frame=no + fi + fi + rm -f conftest.* + ]) + if test "x$libffi_cv_ro_eh_frame" = xyes; then + AC_DEFINE(HAVE_RO_EH_FRAME, 1, + [Define if .eh_frame sections should be read-only.]) + AC_DEFINE(EH_FRAME_FLAGS, "a", + [Define to the flags needed for the .section .eh_frame directive. ]) + else + AC_DEFINE(EH_FRAME_FLAGS, "aw", + [Define to the flags needed for the .section .eh_frame directive. ]) + fi + + AC_CACHE_CHECK([for __attribute__((visibility("hidden")))], + libffi_cv_hidden_visibility_attribute, [ + echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c + libffi_cv_hidden_visibility_attribute=no + if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then + if egrep '(\.hidden|\.private_extern).*foo' conftest.s >/dev/null; then + libffi_cv_hidden_visibility_attribute=yes + fi + fi + rm -f conftest.* + ]) + if test $libffi_cv_hidden_visibility_attribute = yes; then + AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1, + [Define if __attribute__((visibility("hidden"))) is supported.]) + fi +fi + +AC_ARG_ENABLE(docs, + AC_HELP_STRING([--disable-docs], + [Disable building of docs (default: no)]), + [enable_docs=no], + [enable_docs=yes]) +AM_CONDITIONAL(BUILD_DOCS, [test x$enable_docs = xyes]) + +AH_BOTTOM([ +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif +]) + +AC_SUBST(TARGET) +AC_SUBST(TARGETDIR) + +changequote(<,>) +TARGET_OBJ= +for i in $SOURCES; do + TARGET_OBJ="${TARGET_OBJ} src/${TARGETDIR}/"`echo $i | sed 's/[cS]$/lo/'` +done +changequote([,]) +AC_SUBST(TARGET_OBJ) + +AC_SUBST(SHELL) + +AC_ARG_ENABLE(debug, +[ --enable-debug debugging mode], + if test "$enable_debug" = "yes"; then + AC_DEFINE(FFI_DEBUG, 1, [Define this if you want extra debugging.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(structs, +[ --disable-structs omit code for struct support], + if test "$enable_structs" = "no"; then + AC_DEFINE(FFI_NO_STRUCTS, 1, [Define this if you do not want support for aggregate types.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(raw-api, +[ --disable-raw-api make the raw api unavailable], + if test "$enable_raw_api" = "no"; then + AC_DEFINE(FFI_NO_RAW_API, 1, [Define this if you do not want support for the raw API.]) + fi) + +AC_ARG_ENABLE(purify-safety, +[ --enable-purify-safety purify-safe mode], + if test "$enable_purify_safety" = "yes"; then + AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.]) + fi) + +AC_ARG_ENABLE(multi-os-directory, +[ --disable-multi-os-directory + disable use of gcc --print-multi-os-directory to change the library installation directory]) + +# These variables are only ever used when we cross-build to X86_WIN32. +# And we only support this with GCC, so... +if test "x$GCC" = "xyes"; then + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + toolexecdir='${exec_prefix}'/'$(target_alias)' + toolexeclibdir='${toolexecdir}'/lib + else + toolexecdir='${libdir}'/gcc-lib/'$(target_alias)' + toolexeclibdir='${libdir}' + fi + if test x"$enable_multi_os_directory" != x"no"; then + multi_os_directory=`$CC $CFLAGS -print-multi-os-directory` + case $multi_os_directory in + .) ;; # Avoid trailing /. + ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;; + esac + fi + AC_SUBST(toolexecdir) +else + toolexeclibdir='${libdir}' +fi +AC_SUBST(toolexeclibdir) + +# Check linker support. +LIBFFI_ENABLE_SYMVERS + +AC_CONFIG_COMMANDS(include, [test -d include || mkdir include]) +AC_CONFIG_COMMANDS(src, [ +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR +], [TARGETDIR="$TARGETDIR"]) + +AC_CONFIG_FILES(include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc) + +AC_OUTPUT + +# Copy this file instead of using AC_CONFIG_LINK in order to support +# compiling with MSVC, which won't understand cygwin style symlinks. +cp ${srcdir}/src/$TARGETDIR/ffitarget.h include/ffitarget.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.host b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.host new file mode 100644 index 0000000..055e955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure 2.host @@ -0,0 +1,306 @@ +# configure.host +# +# This shell script handles all host based configuration for libffi. +# + +# THIS TABLE IS SORTED. KEEP IT THAT WAY. +# Most of the time we can define all the variables all at once... +case "${host}" in + aarch64*-*-cygwin* | aarch64*-*-mingw* | aarch64*-*-win* ) + TARGET=ARM_WIN64; TARGETDIR=aarch64 + MSVC=1 + ;; + + aarch64*-*-*) + TARGET=AARCH64; TARGETDIR=aarch64 + SOURCES="ffi.c sysv.S" + ;; + + alpha*-*-*) + TARGET=ALPHA; TARGETDIR=alpha; + # Support 128-bit long double, changeable via command-line switch. + HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)' + SOURCES="ffi.c osf.S" + ;; + + arc*-*-*) + TARGET=ARC; TARGETDIR=arc + SOURCES="ffi.c arcompact.S" + ;; + + arm*-*-cygwin* | arm*-*-mingw* | arm*-*-win* ) + TARGET=ARM_WIN32; TARGETDIR=arm + MSVC=1 + ;; + + arm*-*-*) + TARGET=ARM; TARGETDIR=arm + SOURCES="ffi.c sysv.S" + ;; + + avr32*-*-*) + TARGET=AVR32; TARGETDIR=avr32 + SOURCES="ffi.c sysv.S" + ;; + + bfin*) + TARGET=BFIN; TARGETDIR=bfin + SOURCES="ffi.c sysv.S" + ;; + + cris-*-*) + TARGET=LIBFFI_CRIS; TARGETDIR=cris + SOURCES="ffi.c sysv.S" + ;; + + frv-*-*) + TARGET=FRV; TARGETDIR=frv + SOURCES="ffi.c eabi.S" + ;; + + hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*) + TARGET=PA_LINUX; TARGETDIR=pa + SOURCES="ffi.c linux.S" + ;; + hppa*64-*-hpux*) + TARGET=PA64_HPUX; TARGETDIR=pa + ;; + hppa*-*-hpux*) + TARGET=PA_HPUX; TARGETDIR=pa + SOURCES="ffi.c hpux32.S" + ;; + + i?86-*-freebsd* | i?86-*-openbsd*) + TARGET=X86_FREEBSD; TARGETDIR=x86 + ;; + + i?86-*-cygwin* | i?86-*-mingw* | i?86-*-win* | i?86-*-os2* | i?86-*-interix* \ + | x86_64-*-cygwin* | x86_64-*-mingw* | x86_64-*-win* ) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_WIN32 + else + TARGET=X86_WIN64 + fi + if test "${ax_cv_c_compiler_vendor}" = "microsoft"; then + MSVC=1 + fi + # All mingw/cygwin/win32 builds require -no-undefined for sharedlib. + # We must also check with_cross_host to decide if this is a native + # or cross-build and select where to install dlls appropriately. + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"'; + else + AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"'; + fi + ;; + + i?86-*-darwin* | x86_64-*-darwin* | i?86-*-ios | x86_64-*-ios) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_DARWIN + else + TARGET=X86_64 + fi + ;; + + i?86-*-* | x86_64-*-* | amd64-*) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + case "$host" in + x86_64-*x32|x86_64-x32-*) + TARGET_X32=yes + TARGET=X86_64 + ;; + *) + echo 'int foo (void) { return __x86_64__; }' > conftest.c + if $CC $CFLAGS -Werror -S conftest.c -o conftest.s > /dev/null 2>&1; then + TARGET_X32=yes + TARGET=X86_64 + else + TARGET=X86; + fi + rm -f conftest.* + ;; + esac + else + TARGET=X86_64; + fi + ;; + + ia64*-*-*) + TARGET=IA64; TARGETDIR=ia64 + SOURCES="ffi.c unix.S" + ;; + + m32r*-*-*) + TARGET=M32R; TARGETDIR=m32r + SOURCES="ffi.c sysv.S" + ;; + + m68k-*-*) + TARGET=M68K; TARGETDIR=m68k + SOURCES="ffi.c sysv.S" + ;; + + m88k-*-*) + TARGET=M88K; TARGETDIR=m88k + SOURCES="ffi.c obsd.S" + ;; + + microblaze*-*-*) + TARGET=MICROBLAZE; TARGETDIR=microblaze + SOURCES="ffi.c sysv.S" + ;; + + moxie-*-*) + TARGET=MOXIE; TARGETDIR=moxie + SOURCES="ffi.c eabi.S" + ;; + + metag-*-*) + TARGET=METAG; TARGETDIR=metag + SOURCES="ffi.c sysv.S" + ;; + + mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*) + TARGET=MIPS; TARGETDIR=mips + ;; + mips*-*linux* | mips*-*-openbsd*) + # Support 128-bit long double for NewABI. + HAVE_LONG_DOUBLE='defined(__mips64)' + TARGET=MIPS; TARGETDIR=mips + ;; + + nios2*-linux*) + TARGET=NIOS2; TARGETDIR=nios2 + SOURCES="ffi.c sysv.S" + ;; + + or1k*-*-*) + TARGET=OR1K; TARGETDIR=or1k + SOURCES="ffi.c sysv.S" + ;; + + powerpc*-*-linux* | powerpc-*-sysv*) + TARGET=POWERPC; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc-*-amigaos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-eabi*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-beos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-darwin* | powerpc64-*-darwin*) + TARGET=POWERPC_DARWIN; TARGETDIR=powerpc + ;; + powerpc-*-aix* | rs6000-*-aix*) + TARGET=POWERPC_AIX; TARGETDIR=powerpc + ;; + powerpc-*-freebsd* | powerpc-*-openbsd* | powerpc-*-netbsd*) + TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc64-*-freebsd*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc*-*-rtems*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + + riscv*-*) + TARGET=RISCV; TARGETDIR=riscv + SOURCES="ffi.c sysv.S" + ;; + + s390-*-* | s390x-*-*) + TARGET=S390; TARGETDIR=s390 + SOURCES="ffi.c sysv.S" + ;; + + sh-*-* | sh[34]*-*-*) + TARGET=SH; TARGETDIR=sh + SOURCES="ffi.c sysv.S" + ;; + sh64-*-* | sh5*-*-*) + TARGET=SH64; TARGETDIR=sh64 + SOURCES="ffi.c sysv.S" + ;; + + sparc*-*-*) + TARGET=SPARC; TARGETDIR=sparc + SOURCES="ffi.c ffi64.c v8.S v9.S" + ;; + + tile*-*) + TARGET=TILE; TARGETDIR=tile + SOURCES="ffi.c tile.S" + ;; + + vax-*-*) + TARGET=VAX; TARGETDIR=vax + SOURCES="ffi.c elfbsd.S" + ;; + + xtensa*-*) + TARGET=XTENSA; TARGETDIR=xtensa + SOURCES="ffi.c sysv.S" + ;; +esac + +# ... but some of the cases above share configury. +case "${TARGET}" in + ARM_WIN32) + SOURCES="ffi.c sysv_msvc_arm32.S" + ;; + ARM_WIN64) + SOURCES="ffi.c win64_armasm.S" + ;; + MIPS) + SOURCES="ffi.c o32.S n32.S" + ;; + POWERPC) + SOURCES="ffi.c ffi_sysv.c ffi_linux64.c sysv.S ppc_closure.S" + SOURCES="${SOURCES} linux64.S linux64_closure.S" + ;; + POWERPC_AIX) + SOURCES="ffi_darwin.c aix.S aix_closure.S" + ;; + POWERPC_DARWIN) + SOURCES="ffi_darwin.c darwin.S darwin_closure.S" + ;; + POWERPC_FREEBSD) + SOURCES="ffi.c ffi_sysv.c sysv.S ppc_closure.S" + ;; + X86 | X86_DARWIN | X86_FREEBSD | X86_WIN32) + if test "$MSVC" = 1; then + SOURCES="ffi.c sysv_intel.S" + else + SOURCES="ffi.c sysv.S" + fi + ;; + X86_64) + if test x"$TARGET_X32" = xyes; then + SOURCES="ffi64.c unix64.S" + else + SOURCES="ffi64.c unix64.S ffiw64.c win64.S" + fi + ;; + X86_WIN64) + if test "$MSVC" = 1; then + SOURCES="ffiw64.c win64_intel.S" + else + SOURCES="ffiw64.c win64.S" + fi + ;; +esac + +# If we failed to configure SOURCES, we can't do anything. +if test -z "${SOURCES}"; then + UNSUPPORTED=1 +fi diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac new file mode 100644 index 0000000..3ca1f4f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac @@ -0,0 +1,410 @@ +dnl Process this with autoconf to create configure + +AC_PREREQ(2.68) + +AC_INIT([libffi], [3.3], [http://github.com/libffi/libffi/issues]) +AC_CONFIG_HEADERS([fficonfig.h]) + +AC_CANONICAL_SYSTEM +target_alias=${target_alias-$host_alias} + +case "${host}" in + frv*-elf) + LDFLAGS=`echo $LDFLAGS | sed "s/\-B[^ ]*libgloss\/frv\///"`\ -B`pwd`/../libgloss/frv/ + ;; +esac + +AX_ENABLE_BUILDDIR + +AM_INIT_AUTOMAKE + +# The same as in boehm-gc and libstdc++. Have to borrow it from there. +# We must force CC to /not/ be precious variables; otherwise +# the wrong, non-multilib-adjusted value will be used in multilibs. +# As a side effect, we have to subst CFLAGS ourselves. +# Also save and restore CFLAGS, since AC_PROG_CC will come up with +# defaults of its own if none are provided. + +m4_rename([_AC_ARG_VAR_PRECIOUS],[real_PRECIOUS]) +m4_define([_AC_ARG_VAR_PRECIOUS],[]) +save_CFLAGS=$CFLAGS +AC_PROG_CC +AC_PROG_CXX +CFLAGS=$save_CFLAGS +m4_undefine([_AC_ARG_VAR_PRECIOUS]) +m4_rename_force([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS]) + +AC_SUBST(CFLAGS) + +AM_PROG_AS +AM_PROG_CC_C_O +AC_PROG_LIBTOOL +AC_CONFIG_MACRO_DIR([m4]) + +# Test for 64-bit build. +AC_CHECK_SIZEOF([size_t]) + +AX_COMPILER_VENDOR +AX_CC_MAXOPT +# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro +# compiler. +if test "$ax_cv_c_compiler_vendor" != "sun"; then + AX_CFLAGS_WARN_ALL +fi + +if test "x$GCC" = "xyes"; then + CFLAGS="$CFLAGS -fexceptions" +fi + +cat > local.exp < conftest.s + if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then + libffi_cv_as_x86_pcrel=yes + fi + ]) + if test "x$libffi_cv_as_x86_pcrel" = xyes; then + AC_DEFINE(HAVE_AS_X86_PCREL, 1, + [Define if your assembler supports PC relative relocs.]) + fi + ;; + + S390) + AC_CACHE_CHECK([compiler uses zarch features], + libffi_cv_as_s390_zarch, [ + libffi_cv_as_s390_zarch=no + echo 'void foo(void) { bar(); bar(); }' > conftest.c + if $CC $CFLAGS -S conftest.c > /dev/null 2>&1; then + if grep -q brasl conftest.s; then + libffi_cv_as_s390_zarch=yes + fi + fi + ]) + if test "x$libffi_cv_as_s390_zarch" = xyes; then + AC_DEFINE(HAVE_AS_S390_ZARCH, 1, + [Define if the compiler uses zarch features.]) + fi + ;; +esac + +AC_CACHE_CHECK([whether compiler supports pointer authentication], + libffi_cv_as_ptrauth, [ + libffi_cv_as_ptrauth=unknown + AC_TRY_COMPILE(,[ +#ifdef __clang__ +# if __has_feature(ptrauth_calls) +# define HAVE_PTRAUTH 1 +# endif +#endif + +#ifndef HAVE_PTRAUTH +# error Pointer authentication not supported +#endif + ], + [libffi_cv_as_ptrauth=yes], + [libffi_cv_as_ptrauth=no]) +]) +if test "x$libffi_cv_as_ptrauth" = xyes; then + AC_DEFINE(HAVE_PTRAUTH, 1, + [Define if your compiler supports pointer authentication.]) +fi + +# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. +AC_ARG_ENABLE(pax_emutramp, + [ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC], + if test "$enable_pax_emutramp" = "yes"; then + AC_DEFINE(FFI_MMAP_EXEC_EMUTRAMP_PAX, 1, + [Define this if you want to enable pax emulated trampolines]) + fi) + +LT_SYS_SYMBOL_USCORE +if test "x$sys_symbol_underscore" = xyes; then + AC_DEFINE(SYMBOL_UNDERSCORE, 1, [Define if symbols are underscored.]) +fi + +FFI_EXEC_TRAMPOLINE_TABLE=0 +case "$target" in + *arm*-apple-* | aarch64-apple-*) + FFI_EXEC_TRAMPOLINE_TABLE=1 + AC_DEFINE(FFI_EXEC_TRAMPOLINE_TABLE, 1, + [Cannot use PROT_EXEC on this target, so, we revert to + alternative means]) + ;; + *-apple-* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris* | *-linux-android*) + AC_DEFINE(FFI_MMAP_EXEC_WRIT, 1, + [Cannot use malloc on this target, so, we revert to + alternative means]) + ;; +esac +AM_CONDITIONAL(FFI_EXEC_TRAMPOLINE_TABLE, test x$FFI_EXEC_TRAMPOLINE_TABLE = x1) +AC_SUBST(FFI_EXEC_TRAMPOLINE_TABLE) + +if test x$TARGET = xX86_64; then + AC_CACHE_CHECK([toolchain supports unwind section type], + libffi_cv_as_x86_64_unwind_section_type, [ + cat > conftest1.s << EOF +.text +.globl foo +foo: +jmp bar +.section .eh_frame,"a",@unwind +bar: +EOF + + cat > conftest2.c << EOF +extern void foo(); +int main(){foo();} +EOF + + libffi_cv_as_x86_64_unwind_section_type=no + # we ensure that we can compile _and_ link an assembly file containing an @unwind section + # since the compiler can support it and not the linker (ie old binutils) + if $CC -Wa,--fatal-warnings $CFLAGS -c conftest1.s > /dev/null 2>&1 && \ + $CC conftest2.c conftest1.o > /dev/null 2>&1 ; then + libffi_cv_as_x86_64_unwind_section_type=yes + fi + ]) + if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then + AC_DEFINE(HAVE_AS_X86_64_UNWIND_SECTION_TYPE, 1, + [Define if your assembler supports unwind section type.]) + fi +fi + +if test "x$GCC" = "xyes"; then + AC_CACHE_CHECK([whether .eh_frame section should be read-only], + libffi_cv_ro_eh_frame, [ + libffi_cv_ro_eh_frame=yes + echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c + if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then + if readelf -WS conftest.o | grep -q -n 'eh_frame .* WA'; then + libffi_cv_ro_eh_frame=no + fi + fi + rm -f conftest.* + ]) + if test "x$libffi_cv_ro_eh_frame" = xyes; then + AC_DEFINE(HAVE_RO_EH_FRAME, 1, + [Define if .eh_frame sections should be read-only.]) + AC_DEFINE(EH_FRAME_FLAGS, "a", + [Define to the flags needed for the .section .eh_frame directive. ]) + else + AC_DEFINE(EH_FRAME_FLAGS, "aw", + [Define to the flags needed for the .section .eh_frame directive. ]) + fi + + AC_CACHE_CHECK([for __attribute__((visibility("hidden")))], + libffi_cv_hidden_visibility_attribute, [ + echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c + libffi_cv_hidden_visibility_attribute=no + if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then + if egrep '(\.hidden|\.private_extern).*foo' conftest.s >/dev/null; then + libffi_cv_hidden_visibility_attribute=yes + fi + fi + rm -f conftest.* + ]) + if test $libffi_cv_hidden_visibility_attribute = yes; then + AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1, + [Define if __attribute__((visibility("hidden"))) is supported.]) + fi +fi + +AC_ARG_ENABLE(docs, + AC_HELP_STRING([--disable-docs], + [Disable building of docs (default: no)]), + [enable_docs=no], + [enable_docs=yes]) +AM_CONDITIONAL(BUILD_DOCS, [test x$enable_docs = xyes]) + +AH_BOTTOM([ +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif +]) + +AC_SUBST(TARGET) +AC_SUBST(TARGETDIR) + +changequote(<,>) +TARGET_OBJ= +for i in $SOURCES; do + TARGET_OBJ="${TARGET_OBJ} src/${TARGETDIR}/"`echo $i | sed 's/[cS]$/lo/'` +done +changequote([,]) +AC_SUBST(TARGET_OBJ) + +AC_SUBST(SHELL) + +AC_ARG_ENABLE(debug, +[ --enable-debug debugging mode], + if test "$enable_debug" = "yes"; then + AC_DEFINE(FFI_DEBUG, 1, [Define this if you want extra debugging.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(structs, +[ --disable-structs omit code for struct support], + if test "$enable_structs" = "no"; then + AC_DEFINE(FFI_NO_STRUCTS, 1, [Define this if you do not want support for aggregate types.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(raw-api, +[ --disable-raw-api make the raw api unavailable], + if test "$enable_raw_api" = "no"; then + AC_DEFINE(FFI_NO_RAW_API, 1, [Define this if you do not want support for the raw API.]) + fi) + +AC_ARG_ENABLE(purify-safety, +[ --enable-purify-safety purify-safe mode], + if test "$enable_purify_safety" = "yes"; then + AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.]) + fi) + +AC_ARG_ENABLE(multi-os-directory, +[ --disable-multi-os-directory + disable use of gcc --print-multi-os-directory to change the library installation directory]) + +# These variables are only ever used when we cross-build to X86_WIN32. +# And we only support this with GCC, so... +if test "x$GCC" = "xyes"; then + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + toolexecdir='${exec_prefix}'/'$(target_alias)' + toolexeclibdir='${toolexecdir}'/lib + else + toolexecdir='${libdir}'/gcc-lib/'$(target_alias)' + toolexeclibdir='${libdir}' + fi + if test x"$enable_multi_os_directory" != x"no"; then + multi_os_directory=`$CC $CFLAGS -print-multi-os-directory` + case $multi_os_directory in + .) ;; # Avoid trailing /. + ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;; + esac + fi + AC_SUBST(toolexecdir) +else + toolexeclibdir='${libdir}' +fi +AC_SUBST(toolexeclibdir) + +# Check linker support. +LIBFFI_ENABLE_SYMVERS + +AC_CONFIG_COMMANDS(include, [test -d include || mkdir include]) +AC_CONFIG_COMMANDS(src, [ +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR +], [TARGETDIR="$TARGETDIR"]) + +AC_CONFIG_FILES(include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc) + +AC_OUTPUT + +# Copy this file instead of using AC_CONFIG_LINK in order to support +# compiling with MSVC, which won't understand cygwin style symlinks. +cp ${srcdir}/src/$TARGETDIR/ffitarget.h include/ffitarget.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host new file mode 100644 index 0000000..055e955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host @@ -0,0 +1,306 @@ +# configure.host +# +# This shell script handles all host based configuration for libffi. +# + +# THIS TABLE IS SORTED. KEEP IT THAT WAY. +# Most of the time we can define all the variables all at once... +case "${host}" in + aarch64*-*-cygwin* | aarch64*-*-mingw* | aarch64*-*-win* ) + TARGET=ARM_WIN64; TARGETDIR=aarch64 + MSVC=1 + ;; + + aarch64*-*-*) + TARGET=AARCH64; TARGETDIR=aarch64 + SOURCES="ffi.c sysv.S" + ;; + + alpha*-*-*) + TARGET=ALPHA; TARGETDIR=alpha; + # Support 128-bit long double, changeable via command-line switch. + HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)' + SOURCES="ffi.c osf.S" + ;; + + arc*-*-*) + TARGET=ARC; TARGETDIR=arc + SOURCES="ffi.c arcompact.S" + ;; + + arm*-*-cygwin* | arm*-*-mingw* | arm*-*-win* ) + TARGET=ARM_WIN32; TARGETDIR=arm + MSVC=1 + ;; + + arm*-*-*) + TARGET=ARM; TARGETDIR=arm + SOURCES="ffi.c sysv.S" + ;; + + avr32*-*-*) + TARGET=AVR32; TARGETDIR=avr32 + SOURCES="ffi.c sysv.S" + ;; + + bfin*) + TARGET=BFIN; TARGETDIR=bfin + SOURCES="ffi.c sysv.S" + ;; + + cris-*-*) + TARGET=LIBFFI_CRIS; TARGETDIR=cris + SOURCES="ffi.c sysv.S" + ;; + + frv-*-*) + TARGET=FRV; TARGETDIR=frv + SOURCES="ffi.c eabi.S" + ;; + + hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*) + TARGET=PA_LINUX; TARGETDIR=pa + SOURCES="ffi.c linux.S" + ;; + hppa*64-*-hpux*) + TARGET=PA64_HPUX; TARGETDIR=pa + ;; + hppa*-*-hpux*) + TARGET=PA_HPUX; TARGETDIR=pa + SOURCES="ffi.c hpux32.S" + ;; + + i?86-*-freebsd* | i?86-*-openbsd*) + TARGET=X86_FREEBSD; TARGETDIR=x86 + ;; + + i?86-*-cygwin* | i?86-*-mingw* | i?86-*-win* | i?86-*-os2* | i?86-*-interix* \ + | x86_64-*-cygwin* | x86_64-*-mingw* | x86_64-*-win* ) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_WIN32 + else + TARGET=X86_WIN64 + fi + if test "${ax_cv_c_compiler_vendor}" = "microsoft"; then + MSVC=1 + fi + # All mingw/cygwin/win32 builds require -no-undefined for sharedlib. + # We must also check with_cross_host to decide if this is a native + # or cross-build and select where to install dlls appropriately. + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"'; + else + AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"'; + fi + ;; + + i?86-*-darwin* | x86_64-*-darwin* | i?86-*-ios | x86_64-*-ios) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_DARWIN + else + TARGET=X86_64 + fi + ;; + + i?86-*-* | x86_64-*-* | amd64-*) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + case "$host" in + x86_64-*x32|x86_64-x32-*) + TARGET_X32=yes + TARGET=X86_64 + ;; + *) + echo 'int foo (void) { return __x86_64__; }' > conftest.c + if $CC $CFLAGS -Werror -S conftest.c -o conftest.s > /dev/null 2>&1; then + TARGET_X32=yes + TARGET=X86_64 + else + TARGET=X86; + fi + rm -f conftest.* + ;; + esac + else + TARGET=X86_64; + fi + ;; + + ia64*-*-*) + TARGET=IA64; TARGETDIR=ia64 + SOURCES="ffi.c unix.S" + ;; + + m32r*-*-*) + TARGET=M32R; TARGETDIR=m32r + SOURCES="ffi.c sysv.S" + ;; + + m68k-*-*) + TARGET=M68K; TARGETDIR=m68k + SOURCES="ffi.c sysv.S" + ;; + + m88k-*-*) + TARGET=M88K; TARGETDIR=m88k + SOURCES="ffi.c obsd.S" + ;; + + microblaze*-*-*) + TARGET=MICROBLAZE; TARGETDIR=microblaze + SOURCES="ffi.c sysv.S" + ;; + + moxie-*-*) + TARGET=MOXIE; TARGETDIR=moxie + SOURCES="ffi.c eabi.S" + ;; + + metag-*-*) + TARGET=METAG; TARGETDIR=metag + SOURCES="ffi.c sysv.S" + ;; + + mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*) + TARGET=MIPS; TARGETDIR=mips + ;; + mips*-*linux* | mips*-*-openbsd*) + # Support 128-bit long double for NewABI. + HAVE_LONG_DOUBLE='defined(__mips64)' + TARGET=MIPS; TARGETDIR=mips + ;; + + nios2*-linux*) + TARGET=NIOS2; TARGETDIR=nios2 + SOURCES="ffi.c sysv.S" + ;; + + or1k*-*-*) + TARGET=OR1K; TARGETDIR=or1k + SOURCES="ffi.c sysv.S" + ;; + + powerpc*-*-linux* | powerpc-*-sysv*) + TARGET=POWERPC; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc-*-amigaos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-eabi*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-beos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-darwin* | powerpc64-*-darwin*) + TARGET=POWERPC_DARWIN; TARGETDIR=powerpc + ;; + powerpc-*-aix* | rs6000-*-aix*) + TARGET=POWERPC_AIX; TARGETDIR=powerpc + ;; + powerpc-*-freebsd* | powerpc-*-openbsd* | powerpc-*-netbsd*) + TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc64-*-freebsd*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc*-*-rtems*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + + riscv*-*) + TARGET=RISCV; TARGETDIR=riscv + SOURCES="ffi.c sysv.S" + ;; + + s390-*-* | s390x-*-*) + TARGET=S390; TARGETDIR=s390 + SOURCES="ffi.c sysv.S" + ;; + + sh-*-* | sh[34]*-*-*) + TARGET=SH; TARGETDIR=sh + SOURCES="ffi.c sysv.S" + ;; + sh64-*-* | sh5*-*-*) + TARGET=SH64; TARGETDIR=sh64 + SOURCES="ffi.c sysv.S" + ;; + + sparc*-*-*) + TARGET=SPARC; TARGETDIR=sparc + SOURCES="ffi.c ffi64.c v8.S v9.S" + ;; + + tile*-*) + TARGET=TILE; TARGETDIR=tile + SOURCES="ffi.c tile.S" + ;; + + vax-*-*) + TARGET=VAX; TARGETDIR=vax + SOURCES="ffi.c elfbsd.S" + ;; + + xtensa*-*) + TARGET=XTENSA; TARGETDIR=xtensa + SOURCES="ffi.c sysv.S" + ;; +esac + +# ... but some of the cases above share configury. +case "${TARGET}" in + ARM_WIN32) + SOURCES="ffi.c sysv_msvc_arm32.S" + ;; + ARM_WIN64) + SOURCES="ffi.c win64_armasm.S" + ;; + MIPS) + SOURCES="ffi.c o32.S n32.S" + ;; + POWERPC) + SOURCES="ffi.c ffi_sysv.c ffi_linux64.c sysv.S ppc_closure.S" + SOURCES="${SOURCES} linux64.S linux64_closure.S" + ;; + POWERPC_AIX) + SOURCES="ffi_darwin.c aix.S aix_closure.S" + ;; + POWERPC_DARWIN) + SOURCES="ffi_darwin.c darwin.S darwin_closure.S" + ;; + POWERPC_FREEBSD) + SOURCES="ffi.c ffi_sysv.c sysv.S ppc_closure.S" + ;; + X86 | X86_DARWIN | X86_FREEBSD | X86_WIN32) + if test "$MSVC" = 1; then + SOURCES="ffi.c sysv_intel.S" + else + SOURCES="ffi.c sysv.S" + fi + ;; + X86_64) + if test x"$TARGET_X32" = xyes; then + SOURCES="ffi64.c unix64.S" + else + SOURCES="ffi64.c unix64.S ffiw64.c win64.S" + fi + ;; + X86_WIN64) + if test "$MSVC" = 1; then + SOURCES="ffiw64.c win64_intel.S" + else + SOURCES="ffiw64.c win64.S" + fi + ;; +esac + +# If we failed to configure SOURCES, we can't do anything. +if test -z "${SOURCES}"; then + UNSUPPORTED=1 +fi diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/fficonfig.h 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/fficonfig.h 2.in new file mode 100644 index 0000000..aa2d90a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/fficonfig.h 2.in @@ -0,0 +1,217 @@ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +#undef AC_APPLE_UNIVERSAL_BUILD + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +#undef CRAY_STACKSEG_END + +/* Define to 1 if using `alloca.c'. */ +#undef C_ALLOCA + +/* Define to the flags needed for the .section .eh_frame directive. */ +#undef EH_FRAME_FLAGS + +/* Define this if you want extra debugging. */ +#undef FFI_DEBUG + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#undef FFI_EXEC_TRAMPOLINE_TABLE + +/* Define this if you want to enable pax emulated trampolines */ +#undef FFI_MMAP_EXEC_EMUTRAMP_PAX + +/* Cannot use malloc on this target, so, we revert to alternative means */ +#undef FFI_MMAP_EXEC_WRIT + +/* Define this if you do not want support for the raw API. */ +#undef FFI_NO_RAW_API + +/* Define this if you do not want support for aggregate types. */ +#undef FFI_NO_STRUCTS + +/* Define to 1 if you have `alloca', as a function or macro. */ +#undef HAVE_ALLOCA + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#undef HAVE_ALLOCA_H + +/* Define if your assembler supports .cfi_* directives. */ +#undef HAVE_AS_CFI_PSEUDO_OP + +/* Define if your assembler supports .register. */ +#undef HAVE_AS_REGISTER_PSEUDO_OP + +/* Define if the compiler uses zarch features. */ +#undef HAVE_AS_S390_ZARCH + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +#undef HAVE_AS_SPARC_UA_PCREL + +/* Define if your assembler supports unwind section type. */ +#undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE + +/* Define if your assembler supports PC relative relocs. */ +#undef HAVE_AS_X86_PCREL + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#undef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define if you have the long double type and it is bigger than a double */ +#undef HAVE_LONG_DOUBLE + +/* Define if you support more than one size of the long double type */ +#undef HAVE_LONG_DOUBLE_VARIANT + +/* Define to 1 if you have the `memcpy' function. */ +#undef HAVE_MEMCPY + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `mkostemp' function. */ +#undef HAVE_MKOSTEMP + +/* Define to 1 if you have the `mmap' function. */ +#undef HAVE_MMAP + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#undef HAVE_MMAP_ANON + +/* Define if mmap of /dev/zero works. */ +#undef HAVE_MMAP_DEV_ZERO + +/* Define if read-only mmap of a plain file works. */ +#undef HAVE_MMAP_FILE + +/* Define if your compiler supports pointer authentication. */ +#undef HAVE_PTRAUTH + +/* Define if .eh_frame sections should be read-only. */ +#undef HAVE_RO_EH_FRAME + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_MMAN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +#undef LIBFFI_GNU_SYMBOL_VERSIONING + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#undef LT_OBJDIR + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* The size of `double', as computed by sizeof. */ +#undef SIZEOF_DOUBLE + +/* The size of `long double', as computed by sizeof. */ +#undef SIZEOF_LONG_DOUBLE + +/* The size of `size_t', as computed by sizeof. */ +#undef SIZEOF_SIZE_T + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +#undef STACK_DIRECTION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define if symbols are underscored. */ +#undef SYMBOL_UNDERSCORE + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +#undef USING_PURIFY + +/* Version number of package */ +#undef VERSION + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +# undef WORDS_BIGENDIAN +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +#undef size_t + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers 2.py b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers 2.py new file mode 100755 index 0000000..b8c28e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers 2.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +import subprocess +import os +import errno +import collections +import glob +import argparse + +class Platform(object): + pass + +class simulator_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'i386' + triple = 'i386-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class simulator64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'x86_64' + triple = 'x86_64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +class device_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'armv7' + triple = 'arm-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm__\n\n" + suffix = "\n\n#endif" + src_dir = 'arm' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class device64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'arm64' + triple = 'aarch64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm64__\n\n" + suffix = "\n\n#endif" + src_dir = 'aarch64' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class desktop32_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'i386' + triple = 'i386-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + + +class desktop64_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'x86_64' + triple = 'x86_64-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno != errno.EEXIST: + raise + + +def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''): + mkdir_p(dst_dir) + out_filename = filename + + if file_suffix: + if filename in ['internal64.h', 'asmnames.h', 'internal.h']: + out_filename = filename + else: + split_name = os.path.splitext(filename) + out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1]) + + with open(os.path.join(src_dir, filename)) as in_file: + with open(os.path.join(dst_dir, out_filename), 'w') as out_file: + if prefix: + out_file.write(prefix) + + out_file.write(in_file.read()) + + if suffix: + out_file.write(suffix) + + +def list_files(src_dir, pattern=None, filelist=None): + if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern)) + for file in filelist: + yield os.path.basename(file) + + +def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None): + for filename in list_files(src_dir, pattern=pattern, filelist=filelist): + move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix) + + +def copy_src_platform_files(platform): + src_dir = os.path.join('src', platform.src_dir) + dst_dir = os.path.join(platform.directory, 'src', platform.src_dir) + copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix) + + +def build_target(platform, platform_headers): + def xcrun_cmd(cmd): + return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch) + + tag='%s-%s' % (platform.sdk, platform.arch) + build_dir = 'build_%s' % tag + mkdir_p(build_dir) + env = dict(CC=xcrun_cmd('clang'), + LD=xcrun_cmd('ld'), + CFLAGS='%s -fembed-bitcode' % (platform.version_min)) + working_dir = os.getcwd() + try: + os.chdir(build_dir) + subprocess.check_call(['../configure', '-host', platform.triple], env=env) + finally: + os.chdir(working_dir) + + for src_dir in [build_dir, os.path.join(build_dir, 'include')]: + copy_files(src_dir, + os.path.join(platform.directory, 'include'), + pattern='*.h', + file_suffix=platform.arch, + prefix=platform.prefix, + suffix=platform.suffix) + + for filename in list_files(src_dir, pattern='*.h'): + platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix)) + + +def generate_source_and_headers(generate_osx=True, generate_ios=True): + copy_files('src', 'darwin_common/src', pattern='*.c') + copy_files('include', 'darwin_common/include', pattern='*.h') + + if generate_ios: + copy_src_platform_files(simulator_platform) + copy_src_platform_files(simulator64_platform) + copy_src_platform_files(device_platform) + copy_src_platform_files(device64_platform) + if generate_osx: + copy_src_platform_files(desktop64_platform) + + platform_headers = collections.defaultdict(set) + + if generate_ios: + build_target(simulator_platform, platform_headers) + build_target(simulator64_platform, platform_headers) + build_target(device_platform, platform_headers) + build_target(device64_platform, platform_headers) + if generate_osx: + build_target(desktop64_platform, platform_headers) + + mkdir_p('darwin_common/include') + for header_name, tag_tuples in platform_headers.iteritems(): + basename, suffix = os.path.splitext(header_name) + with open(os.path.join('darwin_common/include', header_name), 'w') as header: + for tag_tuple in tag_tuples: + header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2])) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--only-ios', action='store_true', default=False) + parser.add_argument('--only-osx', action='store_true', default=False) + args = parser.parse_args() + + generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py new file mode 100755 index 0000000..b8c28e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +import subprocess +import os +import errno +import collections +import glob +import argparse + +class Platform(object): + pass + +class simulator_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'i386' + triple = 'i386-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class simulator64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'x86_64' + triple = 'x86_64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +class device_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'armv7' + triple = 'arm-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm__\n\n" + suffix = "\n\n#endif" + src_dir = 'arm' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class device64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'arm64' + triple = 'aarch64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm64__\n\n" + suffix = "\n\n#endif" + src_dir = 'aarch64' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class desktop32_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'i386' + triple = 'i386-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + + +class desktop64_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'x86_64' + triple = 'x86_64-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno != errno.EEXIST: + raise + + +def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''): + mkdir_p(dst_dir) + out_filename = filename + + if file_suffix: + if filename in ['internal64.h', 'asmnames.h', 'internal.h']: + out_filename = filename + else: + split_name = os.path.splitext(filename) + out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1]) + + with open(os.path.join(src_dir, filename)) as in_file: + with open(os.path.join(dst_dir, out_filename), 'w') as out_file: + if prefix: + out_file.write(prefix) + + out_file.write(in_file.read()) + + if suffix: + out_file.write(suffix) + + +def list_files(src_dir, pattern=None, filelist=None): + if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern)) + for file in filelist: + yield os.path.basename(file) + + +def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None): + for filename in list_files(src_dir, pattern=pattern, filelist=filelist): + move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix) + + +def copy_src_platform_files(platform): + src_dir = os.path.join('src', platform.src_dir) + dst_dir = os.path.join(platform.directory, 'src', platform.src_dir) + copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix) + + +def build_target(platform, platform_headers): + def xcrun_cmd(cmd): + return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch) + + tag='%s-%s' % (platform.sdk, platform.arch) + build_dir = 'build_%s' % tag + mkdir_p(build_dir) + env = dict(CC=xcrun_cmd('clang'), + LD=xcrun_cmd('ld'), + CFLAGS='%s -fembed-bitcode' % (platform.version_min)) + working_dir = os.getcwd() + try: + os.chdir(build_dir) + subprocess.check_call(['../configure', '-host', platform.triple], env=env) + finally: + os.chdir(working_dir) + + for src_dir in [build_dir, os.path.join(build_dir, 'include')]: + copy_files(src_dir, + os.path.join(platform.directory, 'include'), + pattern='*.h', + file_suffix=platform.arch, + prefix=platform.prefix, + suffix=platform.suffix) + + for filename in list_files(src_dir, pattern='*.h'): + platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix)) + + +def generate_source_and_headers(generate_osx=True, generate_ios=True): + copy_files('src', 'darwin_common/src', pattern='*.c') + copy_files('include', 'darwin_common/include', pattern='*.h') + + if generate_ios: + copy_src_platform_files(simulator_platform) + copy_src_platform_files(simulator64_platform) + copy_src_platform_files(device_platform) + copy_src_platform_files(device64_platform) + if generate_osx: + copy_src_platform_files(desktop64_platform) + + platform_headers = collections.defaultdict(set) + + if generate_ios: + build_target(simulator_platform, platform_headers) + build_target(simulator64_platform, platform_headers) + build_target(device_platform, platform_headers) + build_target(device64_platform, platform_headers) + if generate_osx: + build_target(desktop64_platform, platform_headers) + + mkdir_p('darwin_common/include') + for header_name, tag_tuples in platform_headers.iteritems(): + basename, suffix = os.path.splitext(header_name) + with open(os.path.join('darwin_common/include', header_name), 'w') as header: + for tag_tuple in tag_tuples: + header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2])) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--only-ios', action='store_true', default=False) + parser.add_argument('--only-osx', action='store_true', default=False) + args = parser.parse_args() + + generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.am new file mode 100644 index 0000000..c59df9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.am @@ -0,0 +1,9 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +DISTCLEANFILES=ffitarget.h +noinst_HEADERS=ffi_common.h ffi_cfi.h +EXTRA_DIST=ffi.h.in + +nodist_include_HEADERS = ffi.h ffitarget.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.in new file mode 100644 index 0000000..3e9ab5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile 2.in @@ -0,0 +1,605 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = ffi.h +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" +HEADERS = $(nodist_include_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ffi.h.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +ALLOCA = @ALLOCA@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AM_LTLDFLAGS = @AM_LTLDFLAGS@ +AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@ +HAVE_LONG_DOUBLE_VARIANT = @HAVE_LONG_DOUBLE_VARIANT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPT_LDFLAGS = @OPT_LDFLAGS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PRTDIAG = @PRTDIAG@ +RANLIB = @RANLIB@ +SECTION_LDFLAGS = @SECTION_LDFLAGS@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +TARGET = @TARGET@ +TARGETDIR = @TARGETDIR@ +TARGET_OBJ = @TARGET_OBJ@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_enable_builddir_sed = @ax_enable_builddir_sed@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sys_symbol_underscore = @sys_symbol_underscore@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +toolexecdir = @toolexecdir@ +toolexeclibdir = @toolexeclibdir@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +DISTCLEANFILES = ffitarget.h +noinst_HEADERS = ffi_common.h ffi_cfi.h +EXTRA_DIST = ffi.h.in +nodist_include_HEADERS = ffi.h ffitarget.h +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +ffi.h: $(top_builddir)/config.status $(srcdir)/ffi.h.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nodist_includeHEADERS: $(nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-nodist_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-nodist_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man \ + install-nodist_includeHEADERS install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am new file mode 100644 index 0000000..c59df9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am @@ -0,0 +1,9 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +DISTCLEANFILES=ffitarget.h +noinst_HEADERS=ffi_common.h ffi_cfi.h +EXTRA_DIST=ffi.h.in + +nodist_include_HEADERS = ffi.h ffitarget.h diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in new file mode 100644 index 0000000..38885b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi @VERSION@ - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef @TARGET@ +#define @TARGET@ +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if @HAVE_LONG_DOUBLE@ +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi 2.h new file mode 100644 index 0000000..244ce57 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi 2.h @@ -0,0 +1,55 @@ +/* ----------------------------------------------------------------------- + ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc. + + Conditionally assemble cfi directives. Only necessary for building libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_CFI_H +#define FFI_CFI_H + +#ifdef HAVE_AS_CFI_PSEUDO_OP + +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_escape(...) .cfi_escape __VA_ARGS__ + +#else + +# define cfi_startproc +# define cfi_endproc +# define cfi_def_cfa(reg, off) +# define cfi_def_cfa_register(reg) +# define cfi_def_cfa_offset(off) +# define cfi_adjust_cfa_offset(off) +# define cfi_offset(reg, off) +# define cfi_rel_offset(reg, off) +# define cfi_register(r1, r2) +# define cfi_return_column(reg) +# define cfi_restore(reg) +# define cfi_same_value(reg) +# define cfi_undefined(reg) +# define cfi_remember_state +# define cfi_restore_state +# define cfi_window_save +# define cfi_personality(enc, exp) +# define cfi_lsda(enc, exp) +# define cfi_escape(...) + +#endif /* HAVE_AS_CFI_PSEUDO_OP */ +#endif /* FFI_CFI_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h new file mode 100644 index 0000000..244ce57 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h @@ -0,0 +1,55 @@ +/* ----------------------------------------------------------------------- + ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc. + + Conditionally assemble cfi directives. Only necessary for building libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_CFI_H +#define FFI_CFI_H + +#ifdef HAVE_AS_CFI_PSEUDO_OP + +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_escape(...) .cfi_escape __VA_ARGS__ + +#else + +# define cfi_startproc +# define cfi_endproc +# define cfi_def_cfa(reg, off) +# define cfi_def_cfa_register(reg) +# define cfi_def_cfa_offset(off) +# define cfi_adjust_cfa_offset(off) +# define cfi_offset(reg, off) +# define cfi_rel_offset(reg, off) +# define cfi_register(r1, r2) +# define cfi_return_column(reg) +# define cfi_restore(reg) +# define cfi_same_value(reg) +# define cfi_undefined(reg) +# define cfi_remember_state +# define cfi_restore_state +# define cfi_window_save +# define cfi_personality(enc, exp) +# define cfi_lsda(enc, exp) +# define cfi_escape(...) + +#endif /* HAVE_AS_CFI_PSEUDO_OP */ +#endif /* FFI_CFI_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common 2.h new file mode 100644 index 0000000..76b9dd6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common 2.h @@ -0,0 +1,153 @@ +/* ----------------------------------------------------------------------- + ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc + Copyright (c) 1996 Red Hat, Inc. + + Common internal definitions and macros. Only necessary for building + libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_COMMON_H +#define FFI_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Do not move this. Some versions of AIX are very picky about where + this is positioned. */ +#ifdef __GNUC__ +# if HAVE_ALLOCA_H +# include +# else + /* mingw64 defines this already in malloc.h. */ +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif +# define MAYBE_UNUSED __attribute__((__unused__)) +#else +# define MAYBE_UNUSED +# if HAVE_ALLOCA_H +# include +# else +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +# ifdef _MSC_VER +# define alloca _alloca +# else +char *alloca (); +# endif +# endif +# endif +# endif +#endif + +/* Check for the existence of memcpy. */ +#if STDC_HEADERS +# include +#else +# ifndef HAVE_MEMCPY +# define memcpy(d, s, n) bcopy ((s), (d), (n)) +# endif +#endif + +#if defined(FFI_DEBUG) +#include +#endif + +#ifdef FFI_DEBUG +void ffi_assert(char *expr, char *file, int line); +void ffi_stop_here(void); +void ffi_type_test(ffi_type *a, char *file, int line); + +#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__)) +#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l))) +#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__) +#else +#define FFI_ASSERT(x) +#define FFI_ASSERT_AT(x, f, l) +#define FFI_ASSERT_VALID_TYPE(x) +#endif + +/* v cast to size_t and aligned up to a multiple of a */ +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) +/* v cast to size_t and aligned down to a multiple of a */ +#define FFI_ALIGN_DOWN(v, a) (((size_t) (v)) & -a) + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif); +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs); + + +#if HAVE_LONG_DOUBLE_VARIANT +/* Used to adjust size/alignment of ffi types. */ +void ffi_prep_types (ffi_abi abi); +#endif + +/* Used internally, but overridden by some architectures */ +ffi_status ffi_prep_cif_core(ffi_cif *cif, + ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +/* Translate a data pointer to a code pointer. Needed for closures on + some targets. */ +void *ffi_data_to_code_pointer (void *data) FFI_HIDDEN; + +/* Extended cif, used in callback from assembly routine */ +typedef struct +{ + ffi_cif *cif; + void *rvalue; + void **avalue; +} extended_cif; + +/* Terse sized type definitions. */ +#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C) +typedef unsigned char UINT8; +typedef signed char SINT8; +typedef unsigned short UINT16; +typedef signed short SINT16; +typedef unsigned int UINT32; +typedef signed int SINT32; +# ifdef _MSC_VER +typedef unsigned __int64 UINT64; +typedef signed __int64 SINT64; +# else +# include +typedef uint64_t UINT64; +typedef int64_t SINT64; +# endif +#else +typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); +typedef signed int SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); +typedef signed int SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); +typedef signed int SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); +typedef signed int SINT64 __attribute__((__mode__(__DI__))); +#endif + +typedef float FLOAT32; + +#ifndef __GNUC__ +#define __builtin_expect(x, expected_value) (x) +#endif +#define LIKELY(x) __builtin_expect(!!(x),1) +#define UNLIKELY(x) __builtin_expect((x)!=0,0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h new file mode 100644 index 0000000..76b9dd6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h @@ -0,0 +1,153 @@ +/* ----------------------------------------------------------------------- + ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc + Copyright (c) 1996 Red Hat, Inc. + + Common internal definitions and macros. Only necessary for building + libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_COMMON_H +#define FFI_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Do not move this. Some versions of AIX are very picky about where + this is positioned. */ +#ifdef __GNUC__ +# if HAVE_ALLOCA_H +# include +# else + /* mingw64 defines this already in malloc.h. */ +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif +# define MAYBE_UNUSED __attribute__((__unused__)) +#else +# define MAYBE_UNUSED +# if HAVE_ALLOCA_H +# include +# else +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +# ifdef _MSC_VER +# define alloca _alloca +# else +char *alloca (); +# endif +# endif +# endif +# endif +#endif + +/* Check for the existence of memcpy. */ +#if STDC_HEADERS +# include +#else +# ifndef HAVE_MEMCPY +# define memcpy(d, s, n) bcopy ((s), (d), (n)) +# endif +#endif + +#if defined(FFI_DEBUG) +#include +#endif + +#ifdef FFI_DEBUG +void ffi_assert(char *expr, char *file, int line); +void ffi_stop_here(void); +void ffi_type_test(ffi_type *a, char *file, int line); + +#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__)) +#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l))) +#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__) +#else +#define FFI_ASSERT(x) +#define FFI_ASSERT_AT(x, f, l) +#define FFI_ASSERT_VALID_TYPE(x) +#endif + +/* v cast to size_t and aligned up to a multiple of a */ +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) +/* v cast to size_t and aligned down to a multiple of a */ +#define FFI_ALIGN_DOWN(v, a) (((size_t) (v)) & -a) + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif); +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs); + + +#if HAVE_LONG_DOUBLE_VARIANT +/* Used to adjust size/alignment of ffi types. */ +void ffi_prep_types (ffi_abi abi); +#endif + +/* Used internally, but overridden by some architectures */ +ffi_status ffi_prep_cif_core(ffi_cif *cif, + ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +/* Translate a data pointer to a code pointer. Needed for closures on + some targets. */ +void *ffi_data_to_code_pointer (void *data) FFI_HIDDEN; + +/* Extended cif, used in callback from assembly routine */ +typedef struct +{ + ffi_cif *cif; + void *rvalue; + void **avalue; +} extended_cif; + +/* Terse sized type definitions. */ +#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C) +typedef unsigned char UINT8; +typedef signed char SINT8; +typedef unsigned short UINT16; +typedef signed short SINT16; +typedef unsigned int UINT32; +typedef signed int SINT32; +# ifdef _MSC_VER +typedef unsigned __int64 UINT64; +typedef signed __int64 SINT64; +# else +# include +typedef uint64_t UINT64; +typedef int64_t SINT64; +# endif +#else +typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); +typedef signed int SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); +typedef signed int SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); +typedef signed int SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); +typedef signed int SINT64 __attribute__((__mode__(__DI__))); +#endif + +typedef float FLOAT32; + +#ifndef __GNUC__ +#define __builtin_expect(x, expected_value) (x) +#endif +#define LIKELY(x) __builtin_expect(!!(x),1) +#define UNLIKELY(x) __builtin_expect((x)!=0,0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh 2 new file mode 100755 index 0000000..59990a1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh 2 @@ -0,0 +1,508 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2014-09-12.12; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# 'make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +tab=' ' +nl=' +' +IFS=" $tab$nl" + +# Set DOITPROG to "echo" to test this script. + +doit=${DOITPROG-} +doit_exec=${doit:-exec} + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +is_target_a_directory=possibly + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) + is_target_a_directory=always + dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) is_target_a_directory=never;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +# We allow the use of options -d and -T together, by making -d +# take the precedence; this is for compatibility with GNU install. + +if test -n "$dir_arg"; then + if test -n "$dst_arg"; then + echo "$0: target directory not allowed when installing a directory." >&2 + exit 1 + fi +fi + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call 'install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + if test $# -gt 1 || test "$is_target_a_directory" = always; then + if test ! -d "$dst_arg"; then + echo "$0: $dst_arg: Is not a directory." >&2 + exit 1 + fi + fi +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for 'test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test "$is_target_a_directory" = never; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + dstdir=`dirname "$dst"` + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + # $RANDOM is not portable (e.g. dash); use it when possible to + # lower collision chance + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null; exit $ret' 0 + + # As "mkdir -p" follows symlinks and we work in /tmp possibly; so + # create the $tmpdir first (and fail if unsuccessful) to make sure + # that nobody tries to guess the $tmpdir name. + if (umask $mkdir_umask && + $mkdirprog $mkdir_mode "$tmpdir" && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + test_tmpdir="$tmpdir/a" + ls_ld_tmpdir=`ls -ld "$test_tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + oIFS=$IFS + IFS=/ + set -f + set fnord $dstdir + shift + set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + set +f && + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi 2.xcodeproj/project.pbxproj b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi 2.xcodeproj/project.pbxproj new file mode 100644 index 0000000..480c4a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi 2.xcodeproj/project.pbxproj @@ -0,0 +1,997 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + DBFA714A187F1D8600A76262 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713F187F1D8600A76262 /* ffi_common.h */; }; + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7140187F1D8600A76262 /* fficonfig.h */; }; + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + DBFA714E187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA714F187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA715A187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA715B187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */; }; + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715F187F1D9B00A76262 /* ffi_armv7.h */; }; + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */; }; + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F4F1F5D846400EF414E /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDDB2F511F5D846400EF414E /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + DB13B1641849DF1E0010F42D /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */, + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */, + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */, + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FC11F6144FA00AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */, + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FE11F6156E000AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */, + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + 43E9A5DA1D35373600926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DB1D35374400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DC1D35375400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DD1D35375400926A8F /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + DB13B1661849DF1E0010F42D /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + DB13B1911849DF510010F42D /* ffi.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = ffi.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + DBFA713E187F1D8600A76262 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = ""; }; + DBFA713F187F1D8600A76262 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = ""; }; + DBFA7140187F1D8600A76262 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = ""; }; + DBFA7141187F1D8600A76262 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = ""; }; + DBFA7143187F1D8600A76262 /* closures.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = closures.c; sourceTree = ""; }; + DBFA7145187F1D8600A76262 /* dlmalloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dlmalloc.c; sourceTree = ""; }; + DBFA7147187F1D8600A76262 /* prep_cif.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = prep_cif.c; sourceTree = ""; }; + DBFA7148187F1D8600A76262 /* raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = raw_api.c; sourceTree = ""; }; + DBFA7149187F1D8600A76262 /* types.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = types.c; sourceTree = ""; }; + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_arm64.h; sourceTree = ""; }; + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_armv7.h; sourceTree = ""; }; + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_arm64.h; sourceTree = ""; }; + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_armv7.h; sourceTree = ""; }; + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_arm64.h; sourceTree = ""; }; + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_armv7.h; sourceTree = ""; }; + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_arm64.c; sourceTree = ""; }; + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_arm64.S; sourceTree = ""; }; + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_armv7.c; sourceTree = ""; }; + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_armv7.S; sourceTree = ""; }; + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + FDB52FC51F6144FA00AA92E6 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asmnames.h; sourceTree = ""; }; + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + FDDB2F421F5D68C900EF414E /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + FDDB2F431F5D68C900EF414E /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + FDDB2F621F5D846400EF414E /* libffi.a */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + DB13B15B1849DEB70010F42D = { + isa = PBXGroup; + children = ( + DBFA713C187F1D8600A76262 /* darwin_common */, + DBFA715C187F1D9B00A76262 /* darwin_ios */, + DBFA7180187F1DA100A76262 /* darwin_osx */, + DB13B1671849DF1E0010F42D /* Products */, + ); + sourceTree = ""; + }; + DB13B1671849DF1E0010F42D /* Products */ = { + isa = PBXGroup; + children = ( + DB13B1661849DF1E0010F42D /* libffi.a */, + DB13B1911849DF510010F42D /* ffi.dylib */, + FDDB2F621F5D846400EF414E /* libffi.a */, + FDB52FC51F6144FA00AA92E6 /* libffi.a */, + ); + name = Products; + sourceTree = ""; + }; + DBFA713C187F1D8600A76262 /* darwin_common */ = { + isa = PBXGroup; + children = ( + DBFA713D187F1D8600A76262 /* include */, + DBFA7142187F1D8600A76262 /* src */, + ); + path = darwin_common; + sourceTree = ""; + }; + DBFA713D187F1D8600A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA713E187F1D8600A76262 /* ffi.h */, + DBFA713F187F1D8600A76262 /* ffi_common.h */, + DBFA7140187F1D8600A76262 /* fficonfig.h */, + DBFA7141187F1D8600A76262 /* ffitarget.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7142187F1D8600A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7143187F1D8600A76262 /* closures.c */, + DBFA7145187F1D8600A76262 /* dlmalloc.c */, + DBFA7147187F1D8600A76262 /* prep_cif.c */, + DBFA7148187F1D8600A76262 /* raw_api.c */, + DBFA7149187F1D8600A76262 /* types.c */, + ); + path = src; + sourceTree = ""; + }; + DBFA715C187F1D9B00A76262 /* darwin_ios */ = { + isa = PBXGroup; + children = ( + DBFA715D187F1D9B00A76262 /* include */, + DBFA716A187F1D9B00A76262 /* src */, + ); + path = darwin_ios; + sourceTree = ""; + }; + DBFA715D187F1D9B00A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */, + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */, + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */, + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */, + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */, + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */, + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */, + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */, + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA716A187F1D9B00A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA716B187F1D9B00A76262 /* aarch64 */, + DBFA716E187F1D9B00A76262 /* arm */, + DBFA7172187F1D9B00A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA716B187F1D9B00A76262 /* aarch64 */ = { + isa = PBXGroup; + children = ( + 43E9A5DA1D35373600926A8F /* internal.h */, + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */, + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */, + ); + path = aarch64; + sourceTree = ""; + }; + DBFA716E187F1D9B00A76262 /* arm */ = { + isa = PBXGroup; + children = ( + 43E9A5DB1D35374400926A8F /* internal.h */, + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */, + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */, + ); + path = arm; + sourceTree = ""; + }; + DBFA7172187F1D9B00A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + 43E9A5DC1D35375400926A8F /* internal.h */, + 43E9A5DD1D35375400926A8F /* internal64.h */, + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */, + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */, + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */, + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; + DBFA7180187F1DA100A76262 /* darwin_osx */ = { + isa = PBXGroup; + children = ( + DBFA7181187F1DA100A76262 /* include */, + DBFA7188187F1DA100A76262 /* src */, + ); + path = darwin_osx; + sourceTree = ""; + }; + DBFA7181187F1DA100A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */, + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */, + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7188187F1DA100A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7189187F1DA100A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA7189187F1DA100A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + FDDB2F431F5D68C900EF414E /* internal.h */, + FDDB2F421F5D68C900EF414E /* internal64.h */, + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */, + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */, + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */, + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */, + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + DB13B18F1849DF510010F42D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */, + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */, + DBFA714A187F1D8600A76262 /* ffi.h in Headers */, + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */, + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */, + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */, + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + DB13B1651849DF1E0010F42D /* libffi-iOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */; + buildPhases = ( + 43B5D3FB1D35480D00D1E1FD /* Run Script */, + DB13B1621849DF1E0010F42D /* Sources */, + DB13B1641849DF1E0010F42D /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-iOS"; + productName = ffi; + productReference = DB13B1661849DF1E0010F42D /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + DB13B1901849DF510010F42D /* libffi-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */; + buildPhases = ( + DB13B3061849E0490010F42D /* ShellScript */, + DB13B18D1849DF510010F42D /* Sources */, + DB13B18F1849DF510010F42D /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-Mac"; + productName = ffi; + productReference = DB13B1911849DF510010F42D /* ffi.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */; + buildPhases = ( + FDB52FB11F6144FA00AA92E6 /* Run Script */, + FDB52FB21F6144FA00AA92E6 /* Sources */, + FDB52FC11F6144FA00AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-tvOS"; + productName = ffi; + productReference = FDB52FC51F6144FA00AA92E6 /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + FDDB2F471F5D846400EF414E /* libffi-static-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */; + buildPhases = ( + FDDB2F481F5D846400EF414E /* ShellScript */, + FDDB2F491F5D846400EF414E /* Sources */, + FDB52FE11F6156E000AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-static-Mac"; + productName = ffi; + productReference = FDDB2F621F5D846400EF414E /* libffi.a */; + productType = "com.apple.product-type.library.dynamic"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + DB13B15C1849DEB70010F42D /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0830; + }; + buildConfigurationList = DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = DB13B15B1849DEB70010F42D; + productRefGroup = DB13B1671849DF1E0010F42D /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + DB13B1651849DF1E0010F42D /* libffi-iOS */, + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */, + DB13B1901849DF510010F42D /* libffi-Mac */, + FDDB2F471F5D846400EF414E /* libffi-static-Mac */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 43B5D3FB1D35480D00D1E1FD /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + DB13B3061849E0490010F42D /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; + FDB52FB11F6144FA00AA92E6 /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + FDDB2F481F5D846400EF414E /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + DB13B1621849DF1E0010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */, + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */, + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */, + DBFA714E187F1D8600A76262 /* closures.c in Sources */, + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */, + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */, + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */, + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */, + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */, + DBFA715A187F1D8600A76262 /* types.c in Sources */, + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */, + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DB13B18D1849DF510010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */, + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */, + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */, + DBFA715B187F1D8600A76262 /* types.c in Sources */, + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */, + DBFA714F187F1D8600A76262 /* closures.c in Sources */, + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */, + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FB21F6144FA00AA92E6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */, + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */, + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */, + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */, + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */, + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */, + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */, + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */, + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */, + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */, + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */, + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDDB2F491F5D846400EF414E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */, + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */, + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */, + FDDB2F4F1F5D846400EF414E /* types.c in Sources */, + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */, + FDDB2F511F5D846400EF414E /* closures.c in Sources */, + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */, + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + DB13B1601849DEB70010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + ONLY_ACTIVE_ARCH = YES; + }; + name = Debug; + }; + DB13B1611849DEB70010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + }; + name = Release; + }; + DB13B1871849DF1E0010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DSTROOT = /tmp/ffi.dst; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Debug; + }; + DB13B1881849DF1E0010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + DSTROOT = /tmp/ffi.dst; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALIDATE_PRODUCT = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Release; + }; + DB13B1B11849DF520010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + DB13B1B21849DF520010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; + FDB52FC31F6144FA00AA92E6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + }; + name = Debug; + }; + FDB52FC41F6144FA00AA92E6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + FDDB2F601F5D846400EF414E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + FDDB2F611F5D846400EF414E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1601849DEB70010F42D /* Debug */, + DB13B1611849DEB70010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1871849DF1E0010F42D /* Debug */, + DB13B1881849DF1E0010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1B11849DF520010F42D /* Debug */, + DB13B1B21849DF520010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDB52FC31F6144FA00AA92E6 /* Debug */, + FDB52FC41F6144FA00AA92E6 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDDB2F601F5D846400EF414E /* Debug */, + FDDB2F611F5D846400EF414E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = DB13B15C1849DEB70010F42D /* Project object */; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map 2.in new file mode 100644 index 0000000..de8778a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map 2.in @@ -0,0 +1,76 @@ +#define LIBFFI_ASM +#define LIBFFI_H +#include +#include + +/* These version numbers correspond to the libtool-version abi numbers, + not to the libffi release numbers. */ + +LIBFFI_BASE_8.0 { + global: + /* Exported data variables. */ + ffi_type_void; + ffi_type_uint8; + ffi_type_sint8; + ffi_type_uint16; + ffi_type_sint16; + ffi_type_uint32; + ffi_type_sint32; + ffi_type_uint64; + ffi_type_sint64; + ffi_type_float; + ffi_type_double; + ffi_type_longdouble; + ffi_type_pointer; + + /* Exported functions. */ + ffi_call; + ffi_prep_cif; + ffi_prep_cif_var; + + ffi_raw_call; + ffi_ptrarray_to_raw; + ffi_raw_to_ptrarray; + ffi_raw_size; + + ffi_java_raw_call; + ffi_java_ptrarray_to_raw; + ffi_java_raw_to_ptrarray; + ffi_java_raw_size; + + ffi_get_struct_offsets; + local: + *; +}; + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +LIBFFI_COMPLEX_8.0 { + global: + /* Exported data variables. */ + ffi_type_complex_float; + ffi_type_complex_double; + ffi_type_complex_longdouble; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_CLOSURES +LIBFFI_CLOSURE_8.0 { + global: + ffi_closure_alloc; + ffi_closure_free; + ffi_prep_closure; + ffi_prep_closure_loc; + ffi_prep_raw_closure; + ffi_prep_raw_closure_loc; + ffi_prep_java_raw_closure; + ffi_prep_java_raw_closure_loc; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_GO_CLOSURES +LIBFFI_GO_CLOSURE_8.0 { + global: + ffi_call_go; + ffi_prep_go_closure; +} LIBFFI_CLOSURE_8.0; +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in new file mode 100644 index 0000000..de8778a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in @@ -0,0 +1,76 @@ +#define LIBFFI_ASM +#define LIBFFI_H +#include +#include + +/* These version numbers correspond to the libtool-version abi numbers, + not to the libffi release numbers. */ + +LIBFFI_BASE_8.0 { + global: + /* Exported data variables. */ + ffi_type_void; + ffi_type_uint8; + ffi_type_sint8; + ffi_type_uint16; + ffi_type_sint16; + ffi_type_uint32; + ffi_type_sint32; + ffi_type_uint64; + ffi_type_sint64; + ffi_type_float; + ffi_type_double; + ffi_type_longdouble; + ffi_type_pointer; + + /* Exported functions. */ + ffi_call; + ffi_prep_cif; + ffi_prep_cif_var; + + ffi_raw_call; + ffi_ptrarray_to_raw; + ffi_raw_to_ptrarray; + ffi_raw_size; + + ffi_java_raw_call; + ffi_java_ptrarray_to_raw; + ffi_java_raw_to_ptrarray; + ffi_java_raw_size; + + ffi_get_struct_offsets; + local: + *; +}; + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +LIBFFI_COMPLEX_8.0 { + global: + /* Exported data variables. */ + ffi_type_complex_float; + ffi_type_complex_double; + ffi_type_complex_longdouble; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_CLOSURES +LIBFFI_CLOSURE_8.0 { + global: + ffi_closure_alloc; + ffi_closure_free; + ffi_prep_closure; + ffi_prep_closure_loc; + ffi_prep_raw_closure; + ffi_prep_raw_closure_loc; + ffi_prep_java_raw_closure; + ffi_prep_java_raw_closure_loc; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_GO_CLOSURES +LIBFFI_GO_CLOSURE_8.0 { + global: + ffi_call_go; + ffi_prep_go_closure; +} LIBFFI_CLOSURE_8.0; +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc 2.in new file mode 100644 index 0000000..6fad83b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc 2.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +toolexeclibdir=@toolexeclibdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Description: Library supporting Foreign Function Interfaces +Version: @PACKAGE_VERSION@ +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in new file mode 100644 index 0000000..6fad83b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +toolexeclibdir=@toolexeclibdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Description: Library supporting Foreign Function Interfaces +Version: @PACKAGE_VERSION@ +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj new file mode 100644 index 0000000..480c4a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj @@ -0,0 +1,997 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + DBFA714A187F1D8600A76262 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713F187F1D8600A76262 /* ffi_common.h */; }; + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7140187F1D8600A76262 /* fficonfig.h */; }; + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + DBFA714E187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA714F187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA715A187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA715B187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */; }; + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715F187F1D9B00A76262 /* ffi_armv7.h */; }; + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */; }; + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F4F1F5D846400EF414E /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDDB2F511F5D846400EF414E /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + DB13B1641849DF1E0010F42D /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */, + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */, + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */, + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FC11F6144FA00AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */, + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FE11F6156E000AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */, + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + 43E9A5DA1D35373600926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DB1D35374400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DC1D35375400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DD1D35375400926A8F /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + DB13B1661849DF1E0010F42D /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + DB13B1911849DF510010F42D /* ffi.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = ffi.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + DBFA713E187F1D8600A76262 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = ""; }; + DBFA713F187F1D8600A76262 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = ""; }; + DBFA7140187F1D8600A76262 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = ""; }; + DBFA7141187F1D8600A76262 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = ""; }; + DBFA7143187F1D8600A76262 /* closures.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = closures.c; sourceTree = ""; }; + DBFA7145187F1D8600A76262 /* dlmalloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dlmalloc.c; sourceTree = ""; }; + DBFA7147187F1D8600A76262 /* prep_cif.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = prep_cif.c; sourceTree = ""; }; + DBFA7148187F1D8600A76262 /* raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = raw_api.c; sourceTree = ""; }; + DBFA7149187F1D8600A76262 /* types.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = types.c; sourceTree = ""; }; + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_arm64.h; sourceTree = ""; }; + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_armv7.h; sourceTree = ""; }; + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_arm64.h; sourceTree = ""; }; + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_armv7.h; sourceTree = ""; }; + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_arm64.h; sourceTree = ""; }; + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_armv7.h; sourceTree = ""; }; + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_arm64.c; sourceTree = ""; }; + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_arm64.S; sourceTree = ""; }; + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_armv7.c; sourceTree = ""; }; + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_armv7.S; sourceTree = ""; }; + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + FDB52FC51F6144FA00AA92E6 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asmnames.h; sourceTree = ""; }; + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + FDDB2F421F5D68C900EF414E /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + FDDB2F431F5D68C900EF414E /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + FDDB2F621F5D846400EF414E /* libffi.a */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + DB13B15B1849DEB70010F42D = { + isa = PBXGroup; + children = ( + DBFA713C187F1D8600A76262 /* darwin_common */, + DBFA715C187F1D9B00A76262 /* darwin_ios */, + DBFA7180187F1DA100A76262 /* darwin_osx */, + DB13B1671849DF1E0010F42D /* Products */, + ); + sourceTree = ""; + }; + DB13B1671849DF1E0010F42D /* Products */ = { + isa = PBXGroup; + children = ( + DB13B1661849DF1E0010F42D /* libffi.a */, + DB13B1911849DF510010F42D /* ffi.dylib */, + FDDB2F621F5D846400EF414E /* libffi.a */, + FDB52FC51F6144FA00AA92E6 /* libffi.a */, + ); + name = Products; + sourceTree = ""; + }; + DBFA713C187F1D8600A76262 /* darwin_common */ = { + isa = PBXGroup; + children = ( + DBFA713D187F1D8600A76262 /* include */, + DBFA7142187F1D8600A76262 /* src */, + ); + path = darwin_common; + sourceTree = ""; + }; + DBFA713D187F1D8600A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA713E187F1D8600A76262 /* ffi.h */, + DBFA713F187F1D8600A76262 /* ffi_common.h */, + DBFA7140187F1D8600A76262 /* fficonfig.h */, + DBFA7141187F1D8600A76262 /* ffitarget.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7142187F1D8600A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7143187F1D8600A76262 /* closures.c */, + DBFA7145187F1D8600A76262 /* dlmalloc.c */, + DBFA7147187F1D8600A76262 /* prep_cif.c */, + DBFA7148187F1D8600A76262 /* raw_api.c */, + DBFA7149187F1D8600A76262 /* types.c */, + ); + path = src; + sourceTree = ""; + }; + DBFA715C187F1D9B00A76262 /* darwin_ios */ = { + isa = PBXGroup; + children = ( + DBFA715D187F1D9B00A76262 /* include */, + DBFA716A187F1D9B00A76262 /* src */, + ); + path = darwin_ios; + sourceTree = ""; + }; + DBFA715D187F1D9B00A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */, + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */, + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */, + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */, + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */, + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */, + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */, + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */, + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA716A187F1D9B00A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA716B187F1D9B00A76262 /* aarch64 */, + DBFA716E187F1D9B00A76262 /* arm */, + DBFA7172187F1D9B00A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA716B187F1D9B00A76262 /* aarch64 */ = { + isa = PBXGroup; + children = ( + 43E9A5DA1D35373600926A8F /* internal.h */, + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */, + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */, + ); + path = aarch64; + sourceTree = ""; + }; + DBFA716E187F1D9B00A76262 /* arm */ = { + isa = PBXGroup; + children = ( + 43E9A5DB1D35374400926A8F /* internal.h */, + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */, + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */, + ); + path = arm; + sourceTree = ""; + }; + DBFA7172187F1D9B00A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + 43E9A5DC1D35375400926A8F /* internal.h */, + 43E9A5DD1D35375400926A8F /* internal64.h */, + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */, + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */, + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */, + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; + DBFA7180187F1DA100A76262 /* darwin_osx */ = { + isa = PBXGroup; + children = ( + DBFA7181187F1DA100A76262 /* include */, + DBFA7188187F1DA100A76262 /* src */, + ); + path = darwin_osx; + sourceTree = ""; + }; + DBFA7181187F1DA100A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */, + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */, + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7188187F1DA100A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7189187F1DA100A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA7189187F1DA100A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + FDDB2F431F5D68C900EF414E /* internal.h */, + FDDB2F421F5D68C900EF414E /* internal64.h */, + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */, + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */, + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */, + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */, + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + DB13B18F1849DF510010F42D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */, + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */, + DBFA714A187F1D8600A76262 /* ffi.h in Headers */, + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */, + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */, + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */, + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + DB13B1651849DF1E0010F42D /* libffi-iOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */; + buildPhases = ( + 43B5D3FB1D35480D00D1E1FD /* Run Script */, + DB13B1621849DF1E0010F42D /* Sources */, + DB13B1641849DF1E0010F42D /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-iOS"; + productName = ffi; + productReference = DB13B1661849DF1E0010F42D /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + DB13B1901849DF510010F42D /* libffi-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */; + buildPhases = ( + DB13B3061849E0490010F42D /* ShellScript */, + DB13B18D1849DF510010F42D /* Sources */, + DB13B18F1849DF510010F42D /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-Mac"; + productName = ffi; + productReference = DB13B1911849DF510010F42D /* ffi.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */; + buildPhases = ( + FDB52FB11F6144FA00AA92E6 /* Run Script */, + FDB52FB21F6144FA00AA92E6 /* Sources */, + FDB52FC11F6144FA00AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-tvOS"; + productName = ffi; + productReference = FDB52FC51F6144FA00AA92E6 /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + FDDB2F471F5D846400EF414E /* libffi-static-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */; + buildPhases = ( + FDDB2F481F5D846400EF414E /* ShellScript */, + FDDB2F491F5D846400EF414E /* Sources */, + FDB52FE11F6156E000AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-static-Mac"; + productName = ffi; + productReference = FDDB2F621F5D846400EF414E /* libffi.a */; + productType = "com.apple.product-type.library.dynamic"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + DB13B15C1849DEB70010F42D /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0830; + }; + buildConfigurationList = DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = DB13B15B1849DEB70010F42D; + productRefGroup = DB13B1671849DF1E0010F42D /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + DB13B1651849DF1E0010F42D /* libffi-iOS */, + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */, + DB13B1901849DF510010F42D /* libffi-Mac */, + FDDB2F471F5D846400EF414E /* libffi-static-Mac */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 43B5D3FB1D35480D00D1E1FD /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + DB13B3061849E0490010F42D /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; + FDB52FB11F6144FA00AA92E6 /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + FDDB2F481F5D846400EF414E /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + DB13B1621849DF1E0010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */, + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */, + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */, + DBFA714E187F1D8600A76262 /* closures.c in Sources */, + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */, + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */, + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */, + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */, + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */, + DBFA715A187F1D8600A76262 /* types.c in Sources */, + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */, + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DB13B18D1849DF510010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */, + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */, + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */, + DBFA715B187F1D8600A76262 /* types.c in Sources */, + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */, + DBFA714F187F1D8600A76262 /* closures.c in Sources */, + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */, + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FB21F6144FA00AA92E6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */, + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */, + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */, + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */, + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */, + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */, + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */, + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */, + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */, + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */, + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */, + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDDB2F491F5D846400EF414E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */, + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */, + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */, + FDDB2F4F1F5D846400EF414E /* types.c in Sources */, + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */, + FDDB2F511F5D846400EF414E /* closures.c in Sources */, + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */, + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + DB13B1601849DEB70010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + ONLY_ACTIVE_ARCH = YES; + }; + name = Debug; + }; + DB13B1611849DEB70010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + }; + name = Release; + }; + DB13B1871849DF1E0010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DSTROOT = /tmp/ffi.dst; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Debug; + }; + DB13B1881849DF1E0010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + DSTROOT = /tmp/ffi.dst; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALIDATE_PRODUCT = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Release; + }; + DB13B1B11849DF520010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + DB13B1B21849DF520010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; + FDB52FC31F6144FA00AA92E6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + }; + name = Debug; + }; + FDB52FC41F6144FA00AA92E6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + FDDB2F601F5D846400EF414E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + FDDB2F611F5D846400EF414E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1601849DEB70010F42D /* Debug */, + DB13B1611849DEB70010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1871849DF1E0010F42D /* Debug */, + DB13B1881849DF1E0010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1B11849DF520010F42D /* Debug */, + DB13B1B21849DF520010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDB52FC31F6144FA00AA92E6 /* Debug */, + FDB52FC41F6144FA00AA92E6 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDDB2F601F5D846400EF414E /* Debug */, + FDDB2F611F5D846400EF414E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = DB13B15C1849DEB70010F42D /* Project object */; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-ldflags 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-ldflags 2 new file mode 100755 index 0000000..e32e37b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-ldflags 2 @@ -0,0 +1,106 @@ +#! /bin/sh + +# Script to translate LDFLAGS into a form suitable for use with libtool. + +# Copyright (C) 2005 Free Software Foundation, Inc. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. + +# Contributed by CodeSourcery, LLC. + +# This script is designed to be used from a Makefile that uses libtool +# to build libraries as follows: +# +# LTLDFLAGS = $(shell libtool-ldflags $(LDFLAGS)) +# +# Then, use (LTLDFLAGS) in place of $(LDFLAGS) in your link line. + +# The output of the script. This string is built up as we process the +# arguments. +result= +prev_arg= + +for arg +do + case $arg in + -f*|--*) + # Libtool does not ascribe any special meaning options + # that begin with -f or with a double-dash. So, it will + # think these options are linker options, and prefix them + # with "-Wl,". Then, the compiler driver will ignore the + # options. So, we prefix these options with -Xcompiler to + # make clear to libtool that they are in fact compiler + # options. + case $prev_arg in + -Xpreprocessor|-Xcompiler|-Xlinker) + # This option is already prefixed; don't prefix it again. + ;; + *) + result="$result -Xcompiler" + ;; + esac + ;; + *) + # We do not want to add -Xcompiler to other options because + # that would prevent libtool itself from recognizing them. + ;; + esac + prev_arg=$arg + + # If $(LDFLAGS) is (say): + # a "b'c d" e + # then the user expects that: + # $(LD) $(LDFLAGS) + # will pass three arguments to $(LD): + # 1) a + # 2) b'c d + # 3) e + # We must ensure, therefore, that the arguments are appropriately + # quoted so that using: + # libtool --mode=link ... $(LTLDFLAGS) + # will result in the same number of arguments being passed to + # libtool. In other words, when this script was invoked, the shell + # removed one level of quoting, present in $(LDFLAGS); we have to put + # it back. + + # Quote any embedded single quotes. + case $arg in + *"'"*) + # The following command creates the script: + # 1s,^X,,;s|'|'"'"'|g + # which removes a leading X, and then quotes and embedded single + # quotes. + sed_script="1s,^X,,;s|'|'\"'\"'|g" + # Add a leading "X" so that if $arg starts with a dash, + # the echo command will not try to interpret the argument + # as a command-line option. + arg="X$arg" + # Generate the quoted string. + quoted_arg=`echo "$arg" | sed -e "$sed_script"` + ;; + *) + quoted_arg=$arg + ;; + esac + # Surround the entire argument with single quotes. + quoted_arg="'"$quoted_arg"'" + + # Add it to the string. + result="$result $quoted_arg" +done + +# Output the string we have built up. +echo "$result" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version new file mode 100644 index 0000000..607fee5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version @@ -0,0 +1,29 @@ +# This file is used to maintain libtool version info for libffi. See +# the libtool manual to understand the meaning of the fields. This is +# a separate file so that version updates don't involve re-running +# automake. +# +# Here are a set of rules to help you update your library version +# information: +# +# 1. Start with version information of `0:0:0' for each libtool library. +# +# 2. Update the version information only immediately before a public +# release of your software. More frequent updates are unnecessary, +# and only guarantee that the current interface number gets larger +# faster. +# +# 3. If the library source code has changed at all since the last +# update, then increment revision (`c:r:a' becomes `c:r+1:a'). +# +# 4. If any interfaces have been added, removed, or changed since the +# last update, increment current, and set revision to 0. +# +# 5. If any interfaces have been added since the last public release, +# then increment age. +# +# 6. If any interfaces have been removed since the last public +# release, then set age to 0. +# +# CURRENT:REVISION:AGE +9:0:1 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version 2 new file mode 100644 index 0000000..607fee5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version 2 @@ -0,0 +1,29 @@ +# This file is used to maintain libtool version info for libffi. See +# the libtool manual to understand the meaning of the fields. This is +# a separate file so that version updates don't involve re-running +# automake. +# +# Here are a set of rules to help you update your library version +# information: +# +# 1. Start with version information of `0:0:0' for each libtool library. +# +# 2. Update the version information only immediately before a public +# release of your software. More frequent updates are unnecessary, +# and only guarantee that the current interface number gets larger +# faster. +# +# 3. If the library source code has changed at all since the last +# update, then increment revision (`c:r:a' becomes `c:r+1:a'). +# +# 4. If any interfaces have been added, removed, or changed since the +# last update, increment current, and set revision to 0. +# +# 5. If any interfaces have been added since the last public release, +# then increment age. +# +# 6. If any interfaces have been removed since the last public +# release, then set age to 0. +# +# CURRENT:REVISION:AGE +9:0:1 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ltmain 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ltmain 2.sh new file mode 100644 index 0000000..a736cf9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ltmain 2.sh @@ -0,0 +1,11156 @@ +#! /bin/sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2014-01-03.01 + +# libtool (GNU libtool) 2.4.6 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.6 Debian-2.4.6-2" +package_revision=2.4.6 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2015-01-20.17; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# Copyright (C) 2004-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. + +# As a special exception to the GNU General Public License, if you distribute +# this file as part of a program or library that is built using GNU Libtool, +# you may include this file under the same distribution terms that you use +# for the rest of that program. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1+=\\ \$func_quote_for_eval_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1=\$$1\\ \$func_quote_for_eval_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_for_eval ARG... +# -------------------------- +# Aesthetically quote ARGs to be evaled later. +# This function returns two values: +# i) func_quote_for_eval_result +# double-quoted, suitable for a subsequent eval +# ii) func_quote_for_eval_unquoted_result +# has all characters that are still active within double +# quotes backslashified. +func_quote_for_eval () +{ + $debug_cmd + + func_quote_for_eval_unquoted_result= + func_quote_for_eval_result= + while test 0 -lt $#; do + case $1 in + *[\\\`\"\$]*) + _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; + *) + _G_unquoted_arg=$1 ;; + esac + if test -n "$func_quote_for_eval_unquoted_result"; then + func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" + else + func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" + fi + + case $_G_unquoted_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_quoted_arg=\"$_G_unquoted_arg\" + ;; + *) + _G_quoted_arg=$_G_unquoted_arg + ;; + esac + + if test -n "$func_quote_for_eval_result"; then + func_append func_quote_for_eval_result " $_G_quoted_arg" + else + func_append func_quote_for_eval_result "$_G_quoted_arg" + fi + shift + done +} + + +# func_quote_for_expand ARG +# ------------------------- +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + $debug_cmd + + case $1 in + *[\\\`\"]*) + _G_arg=`$ECHO "$1" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; + *) + _G_arg=$1 ;; + esac + + case $_G_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_arg=\"$_G_arg\" + ;; + esac + + func_quote_for_expand_result=$_G_arg +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_for_expand "$_G_cmd" + eval "func_notquiet $func_quote_for_expand_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_for_expand "$_G_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# Set a version string for this script. +scriptversion=2014-01-07.03; # UTC + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# Copyright (C) 2010-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# warranty; '. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# to the main code. A hook is just a named list of of function, that can +# be run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of functions called by FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It is assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook funcions.n" ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + eval $_G_hook '"$@"' + + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + done + + func_quote_for_eval ${1+"$@"} + func_run_hooks_result=$func_quote_for_eval_result +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list in your hook function, remove any +# options that you action, and then pass back the remaining unprocessed +# options in '_result', escaped suitably for +# 'eval'. Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# +# func_quote_for_eval ${1+"$@"} +# my_options_prep_result=$func_quote_for_eval_result +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# # Note that for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# ;; +# *) set dummy "$_G_opt" "$*"; shift; break ;; +# esac +# done +# +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# +# func_quote_for_eval ${1+"$@"} +# my_option_validation_result=$func_quote_for_eval_result +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll alse need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + func_options_prep ${1+"$@"} + eval func_parse_options \ + ${func_options_prep_result+"$func_options_prep_result"} + eval func_validate_options \ + ${func_parse_options_result+"$func_parse_options_result"} + + eval func_run_hooks func_options \ + ${func_validate_options_result+"$func_validate_options_result"} + + # save modified positional parameters for caller + func_options_result=$func_run_hooks_result +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propogate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before +# returning. +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + func_run_hooks func_options_prep ${1+"$@"} + + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + func_parse_options_result= + + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + func_run_hooks func_parse_options ${1+"$@"} + + # Adjust func_parse_options positional parameters to match + eval set dummy "$func_run_hooks_result"; shift + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + test $# = 0 && func_missing_arg $_G_opt && break + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + func_run_hooks func_validate_options ${1+"$@"} + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE + + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables after +# splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + test "x$func_split_equals_lhs" = "x$1" \ + && func_split_equals_rhs= + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /(C)/!b go + :more + /\./!{ + N + s|\n# | | + b more + } + :go + /^# Written by /,/# warranty; / { + s|^# || + s|^# *$|| + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + p + } + /^# Written by / { + s|^# || + p + } + /^warranty; /q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.6' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname $scriptversion Debian-2.4.6-2 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func__fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + esac + + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote_for_eval ${1+"$@"} + libtool_validate_options_result=$func_quote_for_eval_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type '$version_type'" + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi 2.m4 new file mode 100644 index 0000000..3e28602 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi 2.m4 @@ -0,0 +1,13 @@ +AC_DEFUN([GCC_AS_CFI_PSEUDO_OP], +[AC_CACHE_CHECK([assembler .cfi pseudo-op support], + gcc_cv_as_cfi_pseudo_op, [ + gcc_cv_as_cfi_pseudo_op=unknown + AC_TRY_COMPILE([asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc");],, + [gcc_cv_as_cfi_pseudo_op=yes], + [gcc_cv_as_cfi_pseudo_op=no]) + ]) + if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then + AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1, + [Define if your assembler supports .cfi_* directives.]) + fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 new file mode 100644 index 0000000..3e28602 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 @@ -0,0 +1,13 @@ +AC_DEFUN([GCC_AS_CFI_PSEUDO_OP], +[AC_CACHE_CHECK([assembler .cfi pseudo-op support], + gcc_cv_as_cfi_pseudo_op, [ + gcc_cv_as_cfi_pseudo_op=unknown + AC_TRY_COMPILE([asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc");],, + [gcc_cv_as_cfi_pseudo_op=yes], + [gcc_cv_as_cfi_pseudo_op=no]) + ]) + if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then + AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1, + [Define if your assembler supports .cfi_* directives.]) + fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag 2.m4 new file mode 100644 index 0000000..dd6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag 2.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 new file mode 100644 index 0000000..dd6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt 2.m4 new file mode 100644 index 0000000..9e7f1ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt 2.m4 @@ -0,0 +1,194 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cc_maxopt.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CC_MAXOPT +# +# DESCRIPTION +# +# Try to turn on "good" C optimization flags for various compilers and +# architectures, for some definition of "good". (In our case, good for +# FFTW and hopefully for other scientific codes. Modify as needed.) +# +# The user can override the flags by setting the CFLAGS environment +# variable. The user can also specify --enable-portable-binary in order to +# disable any optimization flags that might result in a binary that only +# runs on the host architecture. +# +# Note also that the flags assume that ANSI C aliasing rules are followed +# by the code (e.g. for gcc's -fstrict-aliasing), and that floating-point +# computations can be re-ordered as needed. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_COMPILER_VENDOR, +# AX_GCC_ARCHFLAG, AX_GCC_X86_CPUID. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_CC_MAXOPT], +[ +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AX_COMPILER_VENDOR]) +AC_REQUIRE([AC_CANONICAL_HOST]) + +AC_ARG_ENABLE(portable-binary, [AS_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])], + acx_maxopt_portable=$enableval, acx_maxopt_portable=no) + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + AX_CHECK_COMPILE_FLAG($xlc_opt, + CFLAGS="-O3 -qansialias -w $xlc_opt", + [CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************"]) + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) icc_flags="-xK" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) icc_flags="-xSSE2 -xB -xK" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) icc_flags="-xSSE3 -xP -xO -xB -xK" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) icc_flags="-xSSSE3 -xT -xB -xK" ;; + *1?6[[7d]]?:*:*:*) icc_flags="-xSSE4.1 -xS -xT -xB -xK" ;; + *1?6[[aef]]?:*:*:*|*2?6[[5cef]]?:*:*:*) icc_flags="-xSSE4.2 -xS -xT -xB -xK" ;; + *2?6[[ad]]?:*:*:*) icc_flags="-xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[ae]]?:*:*:*) icc_flags="-xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) icc_flags="-xCORE-AVX2 -xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) icc_flags="-xSSE3 -xP -xO -xN -xW -xK" ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) icc_flags="-xSSE2 -xN -xW -xK" ;; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + AX_CHECK_COMPILE_FLAG($flag, [icc_archflag=$flag; break]) + done + fi + AC_MSG_CHECKING([for icc architecture flag]) + AC_MSG_RESULT($icc_archflag) + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + # libffi local change -- don't align double, as it changes the ABI + # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + AX_CHECK_COMPILE_FLAG(-fstrict-aliasing, + CFLAGS="$CFLAGS -fstrict-aliasing") + + # note that we enable "unsafe" fp optimization with other compilers, too + AX_CHECK_COMPILE_FLAG(-ffast-math, CFLAGS="$CFLAGS -ffast-math") + + AX_GCC_ARCHFLAG($acx_maxopt_portable) + ;; + + microsoft) + # default optimization flags for MSVC opt builds + CFLAGS="-O2" + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + AX_CHECK_COMPILE_FLAG($CFLAGS, [], [ + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + ]) + +fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 new file mode 100644 index 0000000..9e7f1ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 @@ -0,0 +1,194 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cc_maxopt.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CC_MAXOPT +# +# DESCRIPTION +# +# Try to turn on "good" C optimization flags for various compilers and +# architectures, for some definition of "good". (In our case, good for +# FFTW and hopefully for other scientific codes. Modify as needed.) +# +# The user can override the flags by setting the CFLAGS environment +# variable. The user can also specify --enable-portable-binary in order to +# disable any optimization flags that might result in a binary that only +# runs on the host architecture. +# +# Note also that the flags assume that ANSI C aliasing rules are followed +# by the code (e.g. for gcc's -fstrict-aliasing), and that floating-point +# computations can be re-ordered as needed. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_COMPILER_VENDOR, +# AX_GCC_ARCHFLAG, AX_GCC_X86_CPUID. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_CC_MAXOPT], +[ +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AX_COMPILER_VENDOR]) +AC_REQUIRE([AC_CANONICAL_HOST]) + +AC_ARG_ENABLE(portable-binary, [AS_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])], + acx_maxopt_portable=$enableval, acx_maxopt_portable=no) + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + AX_CHECK_COMPILE_FLAG($xlc_opt, + CFLAGS="-O3 -qansialias -w $xlc_opt", + [CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************"]) + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) icc_flags="-xK" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) icc_flags="-xSSE2 -xB -xK" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) icc_flags="-xSSE3 -xP -xO -xB -xK" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) icc_flags="-xSSSE3 -xT -xB -xK" ;; + *1?6[[7d]]?:*:*:*) icc_flags="-xSSE4.1 -xS -xT -xB -xK" ;; + *1?6[[aef]]?:*:*:*|*2?6[[5cef]]?:*:*:*) icc_flags="-xSSE4.2 -xS -xT -xB -xK" ;; + *2?6[[ad]]?:*:*:*) icc_flags="-xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[ae]]?:*:*:*) icc_flags="-xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) icc_flags="-xCORE-AVX2 -xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) icc_flags="-xSSE3 -xP -xO -xN -xW -xK" ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) icc_flags="-xSSE2 -xN -xW -xK" ;; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + AX_CHECK_COMPILE_FLAG($flag, [icc_archflag=$flag; break]) + done + fi + AC_MSG_CHECKING([for icc architecture flag]) + AC_MSG_RESULT($icc_archflag) + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + # libffi local change -- don't align double, as it changes the ABI + # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + AX_CHECK_COMPILE_FLAG(-fstrict-aliasing, + CFLAGS="$CFLAGS -fstrict-aliasing") + + # note that we enable "unsafe" fp optimization with other compilers, too + AX_CHECK_COMPILE_FLAG(-ffast-math, CFLAGS="$CFLAGS -ffast-math") + + AX_GCC_ARCHFLAG($acx_maxopt_portable) + ;; + + microsoft) + # default optimization flags for MSVC opt builds + CFLAGS="-O2" + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + AX_CHECK_COMPILE_FLAG($CFLAGS, [], [ + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + ]) + +fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all 2.m4 new file mode 100644 index 0000000..094577e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all 2.m4 @@ -0,0 +1,122 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# +# DESCRIPTION +# +# Try to find a compiler option that enables most reasonable warnings. +# +# For the GNU compiler it will be -Wall (and -ansi -pedantic) The result +# is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default. +# +# Currently this macro knows about the GCC, Solaris, Digital Unix, AIX, +# HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and +# Intel compilers. For a given compiler, the Fortran flags are much more +# experimental than their C equivalents. +# +# - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS +# - $2 add-value-if-not-found : nothing +# - $3 action-if-found : add value to shellvariable +# - $4 action-if-not-found : nothing +# +# NOTE: These macros depend on AX_APPEND_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2010 Rhys Ulerich +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 16 + +AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl +AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings], +VAR,[VAR="no, unknown" +ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-warn all % -warn all" dnl Intel + "-pedantic % -Wall" dnl GCC + "-xstrconst % -v" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix + "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + "-ansi -ansiE % -fullwarn" dnl IRIX + "+ESlit % +w1" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done +FLAGS="$ac_save_[]FLAGS" +]) +AS_VAR_POPDEF([FLAGS])dnl +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;; + *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +])dnl AX_FLAGS_WARN_ALL +dnl implementation tactics: +dnl the for-argument contains a list of options. The first part of +dnl these does only exist to detect the compiler - usually it is +dnl a global option to enable -ansi or -extrawarnings. All other +dnl compilers will fail about it. That was needed since a lot of +dnl compilers will give false positives for some option-syntax +dnl like -Woption or -Xoption as they think of it is a pass-through +dnl to later compile stages or something. The "%" is used as a +dnl delimiter. A non-option comment can be given after "%%" marks +dnl which will be shown but not added to the respective C/CXXFLAGS. + +AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C]) +]) + +AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C++]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C++]) +]) + +AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([Fortran]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([Fortran]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 new file mode 100644 index 0000000..094577e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 @@ -0,0 +1,122 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# +# DESCRIPTION +# +# Try to find a compiler option that enables most reasonable warnings. +# +# For the GNU compiler it will be -Wall (and -ansi -pedantic) The result +# is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default. +# +# Currently this macro knows about the GCC, Solaris, Digital Unix, AIX, +# HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and +# Intel compilers. For a given compiler, the Fortran flags are much more +# experimental than their C equivalents. +# +# - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS +# - $2 add-value-if-not-found : nothing +# - $3 action-if-found : add value to shellvariable +# - $4 action-if-not-found : nothing +# +# NOTE: These macros depend on AX_APPEND_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2010 Rhys Ulerich +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 16 + +AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl +AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings], +VAR,[VAR="no, unknown" +ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-warn all % -warn all" dnl Intel + "-pedantic % -Wall" dnl GCC + "-xstrconst % -v" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix + "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + "-ansi -ansiE % -fullwarn" dnl IRIX + "+ESlit % +w1" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done +FLAGS="$ac_save_[]FLAGS" +]) +AS_VAR_POPDEF([FLAGS])dnl +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;; + *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +])dnl AX_FLAGS_WARN_ALL +dnl implementation tactics: +dnl the for-argument contains a list of options. The first part of +dnl these does only exist to detect the compiler - usually it is +dnl a global option to enable -ansi or -extrawarnings. All other +dnl compilers will fail about it. That was needed since a lot of +dnl compilers will give false positives for some option-syntax +dnl like -Woption or -Xoption as they think of it is a pass-through +dnl to later compile stages or something. The "%" is used as a +dnl delimiter. A non-option comment can be given after "%%" marks +dnl which will be shown but not added to the respective C/CXXFLAGS. + +AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C]) +]) + +AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C++]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C++]) +]) + +AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([Fortran]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([Fortran]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag 2.m4 new file mode 100644 index 0000000..bd753b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag 2.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 new file mode 100644 index 0000000..bd753b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor 2.m4 new file mode 100644 index 0000000..73efdb0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor 2.m4 @@ -0,0 +1,88 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun, +# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft, +# watcom, etc. The vendor is returned in the cache variable +# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_COMPILER_VENDOR], +[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + dnl Please add if possible support to ax_compiler_version.m4 + [# note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC, __SDCC + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + tcc: __TINYC__ + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ + #if !($vencpp) + thisisanerror; + #endif + ])], [break]) + done + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 new file mode 100644 index 0000000..73efdb0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 @@ -0,0 +1,88 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun, +# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft, +# watcom, etc. The vendor is returned in the cache variable +# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_COMPILER_VENDOR], +[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + dnl Please add if possible support to ax_compiler_version.m4 + [# note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC, __SDCC + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + tcc: __TINYC__ + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ + #if !($vencpp) + thisisanerror; + #endif + ])], [break]) + done + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args 2.m4 new file mode 100644 index 0000000..9237efe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args 2.m4 @@ -0,0 +1,49 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_configure_args.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CONFIGURE_ARGS +# +# DESCRIPTION +# +# Helper macro for AX_ENABLE_BUILDDIR. +# +# The traditional way of starting a subdir-configure is running the script +# with ${1+"$@"} but since autoconf 2.60 this is broken. Instead we have +# to rely on eval'ing $ac_configure_args however some old autoconf +# versions do not provide that. To ensure maximum portability of autoconf +# extension macros this helper can be AC_REQUIRE'd so that +# $ac_configure_args will always be present. +# +# Sadly, the traditional "exec $SHELL" of the enable_builddir macros is +# spoiled now and must be replaced by "eval + exit $?". +# +# Example: +# +# AC_DEFUN([AX_ENABLE_SUBDIR],[dnl +# AC_REQUIRE([AX_CONFIGURE_ARGS])dnl +# eval $SHELL $ac_configure_args || exit $? +# ...]) +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 14 + +AC_DEFUN([AX_CONFIGURE_ARGS],[ + # [$]@ is unusable in 2.60+ but earlier autoconf had no ac_configure_args + if test "${ac_configure_args+set}" != "set" ; then + ac_configure_args= + for ac_arg in ${1+"[$]@"}; do + ac_configure_args="$ac_configure_args '$ac_arg'" + done + fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 new file mode 100644 index 0000000..9237efe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 @@ -0,0 +1,49 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_configure_args.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CONFIGURE_ARGS +# +# DESCRIPTION +# +# Helper macro for AX_ENABLE_BUILDDIR. +# +# The traditional way of starting a subdir-configure is running the script +# with ${1+"$@"} but since autoconf 2.60 this is broken. Instead we have +# to rely on eval'ing $ac_configure_args however some old autoconf +# versions do not provide that. To ensure maximum portability of autoconf +# extension macros this helper can be AC_REQUIRE'd so that +# $ac_configure_args will always be present. +# +# Sadly, the traditional "exec $SHELL" of the enable_builddir macros is +# spoiled now and must be replaced by "eval + exit $?". +# +# Example: +# +# AC_DEFUN([AX_ENABLE_SUBDIR],[dnl +# AC_REQUIRE([AX_CONFIGURE_ARGS])dnl +# eval $SHELL $ac_configure_args || exit $? +# ...]) +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 14 + +AC_DEFUN([AX_CONFIGURE_ARGS],[ + # [$]@ is unusable in 2.60+ but earlier autoconf had no ac_configure_args + if test "${ac_configure_args+set}" != "set" ; then + ac_configure_args= + for ac_arg in ${1+"[$]@"}; do + ac_configure_args="$ac_configure_args '$ac_arg'" + done + fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir 2.m4 new file mode 100644 index 0000000..710384d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir 2.m4 @@ -0,0 +1,302 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_enable_builddir.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_ENABLE_BUILDDIR [(dirstring-or-command [,Makefile.mk [,-all]])] +# +# DESCRIPTION +# +# If the current configure was run within the srcdir then we move all +# configure-files into a subdir and let the configure steps continue +# there. We provide an option --disable-builddir to suppress the move into +# a separate builddir. +# +# Defaults: +# +# $1 = $host (overridden with $HOST) +# $2 = Makefile.mk +# $3 = -all +# +# This macro must be called before AM_INIT_AUTOMAKE. It creates a default +# toplevel srcdir Makefile from the information found in the created +# toplevel builddir Makefile. It just copies the variables and +# rule-targets, each extended with a default rule-execution that recurses +# into the build directory of the current "HOST". You can override the +# auto-detection through `config.guess` and build-time of course, as in +# +# make HOST=i386-mingw-cross +# +# which can of course set at configure time as well using +# +# configure --host=i386-mingw-cross +# +# After the default has been created, additional rules can be appended +# that will not just recurse into the subdirectories and only ever exist +# in the srcdir toplevel makefile - these parts are read from the $2 = +# Makefile.mk file +# +# The automatic rules are usually scanning the toplevel Makefile for lines +# like '#### $host |$builddir' to recognize the place where to recurse +# into. Usually, the last one is the only one used. However, almost all +# targets have an additional "*-all" rule which makes the script to +# recurse into _all_ variants of the current HOST (!!) setting. The "-all" +# suffix can be overridden for the macro as well. +# +# a special rule is only given for things like "dist" that will copy the +# tarball from the builddir to the sourcedir (or $(PUB)) for reason of +# convenience. +# +# LICENSE +# +# Copyright (c) 2009 Guido U. Draheim +# Copyright (c) 2009 Alan Jenkins +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 30 + +AC_DEFUN([AX_ENABLE_BUILDDIR],[ +AC_REQUIRE([AC_CANONICAL_HOST])[]dnl +AC_REQUIRE([AC_CANONICAL_TARGET])[]dnl +AC_REQUIRE([AX_CONFIGURE_ARGS])[]dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])[]dnl +AC_BEFORE([$0],[AM_INIT_AUTOMAKE])dnl +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +SUB="." +AC_ARG_ENABLE([builddir], AS_HELP_STRING( + [--disable-builddir],[disable automatic build in subdir of sources]) + ,[SUB="$enableval"], [SUB="auto"]) +if test ".$ac_srcdir_defaulted" != ".no" ; then +if test ".$srcdir" = ".." ; then + if test -f config.status ; then + AC_MSG_NOTICE(toplevel srcdir already configured... skipping subdir build) + else + test ".$SUB" = "." && SUB="." + test ".$SUB" = ".no" && SUB="." + test ".$TARGET" = "." && TARGET="$target" + test ".$SUB" = ".auto" && SUB="m4_ifval([$1], [$1],[$TARGET])" + if test ".$SUB" != ".." ; then # we know where to go and + AS_MKDIR_P([$SUB]) + echo __.$SUB.__ > $SUB/conftest.tmp + cd $SUB + if grep __.$SUB.__ conftest.tmp >/dev/null 2>/dev/null ; then + rm conftest.tmp + AC_MSG_RESULT([continue configure in default builddir "./$SUB"]) + else + AC_MSG_ERROR([could not change to default builddir "./$SUB"]) + fi + srcdir=`echo "$SUB" | + sed -e 's,^\./,,;s,[[^/]]$,&/,;s,[[^/]]*/,../,g;s,[[/]]$,,;'` + # going to restart from subdirectory location + test -f $srcdir/config.log && mv $srcdir/config.log . + test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h . + test -f $srcdir/conftest.log && mv $srcdir/conftest.log . + test -f $srcdir/$cache_file && mv $srcdir/$cache_file . + AC_MSG_RESULT(....exec $SHELL $srcdir/[$]0 "--srcdir=$srcdir" "--enable-builddir=$SUB" ${1+"[$]@"}) + case "[$]0" in # restart + [[\\/]]* | ?:[[\\/]]*) # Absolute name + eval $SHELL "'[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + *) eval $SHELL "'$srcdir/[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + esac ; exit $? + fi + fi +fi fi +test ".$SUB" = ".auto" && SUB="." +dnl ac_path_prog uses "set dummy" to override $@ which would defeat the "exec" +AC_PATH_PROG(SED,gsed sed, sed) +AUX="$am_aux_dir" +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SUB])dnl +AC_CONFIG_COMMANDS([buildir],[dnl .............. config.status .............. +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([TOP],[top_srcdir])dnl +AS_VAR_PUSHDEF([SRC],[ac_top_srcdir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +pushdef([END],[Makefile.mk])dnl +pushdef([_ALL],[ifelse([$3],,[-all],[$3])])dnl + SRC="$ax_enable_builddir_srcdir" + if test ".$SUB" = ".." ; then + if test -f "$TOP/Makefile" ; then + AC_MSG_NOTICE([skipping TOP/Makefile - left untouched]) + else + AC_MSG_NOTICE([skipping TOP/Makefile - not created]) + fi + else + if test -f "$SRC/Makefile" ; then + a=`grep "^VERSION " "$SRC/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$SRC/Makefile" + fi + if test -f "$SRC/Makefile" ; then + echo "$SRC/Makefile : $SRC/Makefile.in" > $tmp/conftemp.mk + echo " []@ echo 'REMOVED,,,' >\$[]@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$SRC/Makefile" >/dev/null + then rm $SRC/Makefile ; fi + cp $tmp/conftemp.mk $SRC/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$SRC/Makefile" ; then + AC_MSG_NOTICE([create TOP/Makefile guessed from local Makefile]) + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[[ ]]*[[\\#]]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[[:=]]/!d +/^\\./d +dnl Now handle rules (i.e. lines containing ":" but not " = "). +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/ \\1 \\1[]_ALL\\2/g +s/^\\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/\\1 \\1[]_ALL\\2/ +s/ / /g +/^all all[]_ALL[[ :]]/i\\ +all-configured : all[]_ALL +dnl dist-all exists... and would make for dist-all-all +s/ [[a-zA-Z0-9-]]*[]_ALL [[a-zA-Z0-9-]]*[]_ALL[]_ALL//g +/[]_ALL[]_ALL/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +dnl special rule add-on: "dist" copies the tarball to $(PUB). (source tree) +/dist[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "dist-foo" copies all the archives to $(PUB). (source tree) +/dist-[[a-zA-Z0-9]]*[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "distclean" removes all local builddirs completely +/distclean[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$SRC/makefile.sed~" ## DEBUGGING + $SED -f $tmp/conftemp.sed Makefile >$SRC/Makefile + if test -f "$SRC/m4_ifval([$2],[$2],[END])" ; then + AC_MSG_NOTICE([extend TOP/Makefile with TOP/m4_ifval([$2],[$2],[END])]) + cat $SRC/END >>$SRC/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$SRC/Makefile + # sanity check + if grep '^; echo "MAKE ' $SRC/Makefile >/dev/null ; then + AC_MSG_NOTICE([buggy sed found - it deletes tab in "a" text parts]) + $SED -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $SRC/Makefile \ + >$SRC/Makefile~ + (test -s $SRC/Makefile~ && mv $SRC/Makefile~ $SRC/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [[^|]]* | *$SUB *\$!$xxxx ...... $SUB!" >$tmp/conftemp.sed + $SED -f "$tmp/conftemp.sed" "$SRC/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$SRC/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$SRC/makefiles.out~" ## DEBUGGING + if cmp -s "$SRC/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + AC_MSG_NOTICE([keeping TOP/Makefile from earlier configure]) + rm "$tmp/mkfile.tmp" + else + AC_MSG_NOTICE([reusing TOP/Makefile from earlier configure]) + mv "$tmp/mkfile.tmp" "$SRC/Makefile" + fi + fi + AC_MSG_NOTICE([build in $SUB (HOST=$ax_enable_builddir_host)]) + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$SUB" >>$SRC/Makefile + fi +popdef([END])dnl +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SRC])dnl +AS_VAR_POPDEF([TOP])dnl +AS_VAR_POPDEF([SUB])dnl +],[dnl +ax_enable_builddir_srcdir="$srcdir" # $srcdir +ax_enable_builddir_host="$HOST" # $HOST / $host +ax_enable_builddir_version="$VERSION" # $VERSION +ax_enable_builddir_package="$PACKAGE" # $PACKAGE +ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX +ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED +ax_enable_builddir="$ax_enable_builddir" # $SUB +])dnl +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 new file mode 100644 index 0000000..710384d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 @@ -0,0 +1,302 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_enable_builddir.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_ENABLE_BUILDDIR [(dirstring-or-command [,Makefile.mk [,-all]])] +# +# DESCRIPTION +# +# If the current configure was run within the srcdir then we move all +# configure-files into a subdir and let the configure steps continue +# there. We provide an option --disable-builddir to suppress the move into +# a separate builddir. +# +# Defaults: +# +# $1 = $host (overridden with $HOST) +# $2 = Makefile.mk +# $3 = -all +# +# This macro must be called before AM_INIT_AUTOMAKE. It creates a default +# toplevel srcdir Makefile from the information found in the created +# toplevel builddir Makefile. It just copies the variables and +# rule-targets, each extended with a default rule-execution that recurses +# into the build directory of the current "HOST". You can override the +# auto-detection through `config.guess` and build-time of course, as in +# +# make HOST=i386-mingw-cross +# +# which can of course set at configure time as well using +# +# configure --host=i386-mingw-cross +# +# After the default has been created, additional rules can be appended +# that will not just recurse into the subdirectories and only ever exist +# in the srcdir toplevel makefile - these parts are read from the $2 = +# Makefile.mk file +# +# The automatic rules are usually scanning the toplevel Makefile for lines +# like '#### $host |$builddir' to recognize the place where to recurse +# into. Usually, the last one is the only one used. However, almost all +# targets have an additional "*-all" rule which makes the script to +# recurse into _all_ variants of the current HOST (!!) setting. The "-all" +# suffix can be overridden for the macro as well. +# +# a special rule is only given for things like "dist" that will copy the +# tarball from the builddir to the sourcedir (or $(PUB)) for reason of +# convenience. +# +# LICENSE +# +# Copyright (c) 2009 Guido U. Draheim +# Copyright (c) 2009 Alan Jenkins +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 30 + +AC_DEFUN([AX_ENABLE_BUILDDIR],[ +AC_REQUIRE([AC_CANONICAL_HOST])[]dnl +AC_REQUIRE([AC_CANONICAL_TARGET])[]dnl +AC_REQUIRE([AX_CONFIGURE_ARGS])[]dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])[]dnl +AC_BEFORE([$0],[AM_INIT_AUTOMAKE])dnl +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +SUB="." +AC_ARG_ENABLE([builddir], AS_HELP_STRING( + [--disable-builddir],[disable automatic build in subdir of sources]) + ,[SUB="$enableval"], [SUB="auto"]) +if test ".$ac_srcdir_defaulted" != ".no" ; then +if test ".$srcdir" = ".." ; then + if test -f config.status ; then + AC_MSG_NOTICE(toplevel srcdir already configured... skipping subdir build) + else + test ".$SUB" = "." && SUB="." + test ".$SUB" = ".no" && SUB="." + test ".$TARGET" = "." && TARGET="$target" + test ".$SUB" = ".auto" && SUB="m4_ifval([$1], [$1],[$TARGET])" + if test ".$SUB" != ".." ; then # we know where to go and + AS_MKDIR_P([$SUB]) + echo __.$SUB.__ > $SUB/conftest.tmp + cd $SUB + if grep __.$SUB.__ conftest.tmp >/dev/null 2>/dev/null ; then + rm conftest.tmp + AC_MSG_RESULT([continue configure in default builddir "./$SUB"]) + else + AC_MSG_ERROR([could not change to default builddir "./$SUB"]) + fi + srcdir=`echo "$SUB" | + sed -e 's,^\./,,;s,[[^/]]$,&/,;s,[[^/]]*/,../,g;s,[[/]]$,,;'` + # going to restart from subdirectory location + test -f $srcdir/config.log && mv $srcdir/config.log . + test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h . + test -f $srcdir/conftest.log && mv $srcdir/conftest.log . + test -f $srcdir/$cache_file && mv $srcdir/$cache_file . + AC_MSG_RESULT(....exec $SHELL $srcdir/[$]0 "--srcdir=$srcdir" "--enable-builddir=$SUB" ${1+"[$]@"}) + case "[$]0" in # restart + [[\\/]]* | ?:[[\\/]]*) # Absolute name + eval $SHELL "'[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + *) eval $SHELL "'$srcdir/[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + esac ; exit $? + fi + fi +fi fi +test ".$SUB" = ".auto" && SUB="." +dnl ac_path_prog uses "set dummy" to override $@ which would defeat the "exec" +AC_PATH_PROG(SED,gsed sed, sed) +AUX="$am_aux_dir" +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SUB])dnl +AC_CONFIG_COMMANDS([buildir],[dnl .............. config.status .............. +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([TOP],[top_srcdir])dnl +AS_VAR_PUSHDEF([SRC],[ac_top_srcdir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +pushdef([END],[Makefile.mk])dnl +pushdef([_ALL],[ifelse([$3],,[-all],[$3])])dnl + SRC="$ax_enable_builddir_srcdir" + if test ".$SUB" = ".." ; then + if test -f "$TOP/Makefile" ; then + AC_MSG_NOTICE([skipping TOP/Makefile - left untouched]) + else + AC_MSG_NOTICE([skipping TOP/Makefile - not created]) + fi + else + if test -f "$SRC/Makefile" ; then + a=`grep "^VERSION " "$SRC/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$SRC/Makefile" + fi + if test -f "$SRC/Makefile" ; then + echo "$SRC/Makefile : $SRC/Makefile.in" > $tmp/conftemp.mk + echo " []@ echo 'REMOVED,,,' >\$[]@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$SRC/Makefile" >/dev/null + then rm $SRC/Makefile ; fi + cp $tmp/conftemp.mk $SRC/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$SRC/Makefile" ; then + AC_MSG_NOTICE([create TOP/Makefile guessed from local Makefile]) + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[[ ]]*[[\\#]]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[[:=]]/!d +/^\\./d +dnl Now handle rules (i.e. lines containing ":" but not " = "). +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/ \\1 \\1[]_ALL\\2/g +s/^\\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/\\1 \\1[]_ALL\\2/ +s/ / /g +/^all all[]_ALL[[ :]]/i\\ +all-configured : all[]_ALL +dnl dist-all exists... and would make for dist-all-all +s/ [[a-zA-Z0-9-]]*[]_ALL [[a-zA-Z0-9-]]*[]_ALL[]_ALL//g +/[]_ALL[]_ALL/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +dnl special rule add-on: "dist" copies the tarball to $(PUB). (source tree) +/dist[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "dist-foo" copies all the archives to $(PUB). (source tree) +/dist-[[a-zA-Z0-9]]*[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "distclean" removes all local builddirs completely +/distclean[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$SRC/makefile.sed~" ## DEBUGGING + $SED -f $tmp/conftemp.sed Makefile >$SRC/Makefile + if test -f "$SRC/m4_ifval([$2],[$2],[END])" ; then + AC_MSG_NOTICE([extend TOP/Makefile with TOP/m4_ifval([$2],[$2],[END])]) + cat $SRC/END >>$SRC/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$SRC/Makefile + # sanity check + if grep '^; echo "MAKE ' $SRC/Makefile >/dev/null ; then + AC_MSG_NOTICE([buggy sed found - it deletes tab in "a" text parts]) + $SED -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $SRC/Makefile \ + >$SRC/Makefile~ + (test -s $SRC/Makefile~ && mv $SRC/Makefile~ $SRC/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [[^|]]* | *$SUB *\$!$xxxx ...... $SUB!" >$tmp/conftemp.sed + $SED -f "$tmp/conftemp.sed" "$SRC/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$SRC/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$SRC/makefiles.out~" ## DEBUGGING + if cmp -s "$SRC/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + AC_MSG_NOTICE([keeping TOP/Makefile from earlier configure]) + rm "$tmp/mkfile.tmp" + else + AC_MSG_NOTICE([reusing TOP/Makefile from earlier configure]) + mv "$tmp/mkfile.tmp" "$SRC/Makefile" + fi + fi + AC_MSG_NOTICE([build in $SUB (HOST=$ax_enable_builddir_host)]) + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$SUB" >>$SRC/Makefile + fi +popdef([END])dnl +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SRC])dnl +AS_VAR_POPDEF([TOP])dnl +AS_VAR_POPDEF([SUB])dnl +],[dnl +ax_enable_builddir_srcdir="$srcdir" # $srcdir +ax_enable_builddir_host="$HOST" # $HOST / $host +ax_enable_builddir_version="$VERSION" # $VERSION +ax_enable_builddir_package="$PACKAGE" # $PACKAGE +ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX +ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED +ax_enable_builddir="$ax_enable_builddir" # $SUB +])dnl +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag 2.m4 new file mode 100644 index 0000000..c52b9b2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag 2.m4 @@ -0,0 +1,267 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE]) +# +# DESCRIPTION +# +# This macro tries to guess the "native" arch corresponding to the target +# architecture for use with gcc's -march=arch or -mtune=arch flags. If +# found, the cache variable $ax_cv_gcc_archflag is set to this flag and +# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to +# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is +# to add $ax_cv_gcc_archflag to the end of $CFLAGS. +# +# PORTABLE? should be either [yes] (default) or [no]. In the former case, +# the flag is set to -mtune (or equivalent) so that the architecture is +# only used for tuning, but the instruction set used is still portable. In +# the latter case, the flag is set to -march (or equivalent) so that +# architecture-specific instructions are enabled. +# +# The user can specify --with-gcc-arch= in order to override the +# macro's choice of architecture, or --without-gcc-arch to disable this. +# +# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is +# called unless the user specified --with-gcc-arch manually. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID +# +# (The main emphasis here is on recent CPUs, on the principle that doing +# high-performance computing on old hardware is uncommon.) +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2014 Tsukasa Oi +# Copyright (c) 2017-2018 Alexey Kopytov +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 22 + +AC_DEFUN([AX_GCC_ARCHFLAG], +[AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_SED]) +AC_REQUIRE([AX_COMPILER_VENDOR]) + +AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=], [use architecture for gcc -march/-mtune, instead of guessing])], + ax_gcc_arch=$withval, ax_gcc_arch=yes) + +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT([]) +AC_CACHE_VAL(ax_cv_gcc_archflag, +[ +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[[3456]]86*|x86_64*|amd64*) # use cpuid codes + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[[4578]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5[[123]]?:*:*:*) ax_gcc_arch=pentium ;; + *0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;; + *0?6[[356]]?:*:*:*|?6[[356]]?:*:*:*|6[[356]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[aef]]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[5cf]]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[ad]]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;; + *3?67?:*:*:*|*[[45]]?6[[ad]]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;; + *000?f[[012]]?:*:*:*|?f[[012]]?:*:*:*|f[[012]]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;; + # fallback + *5??:*:*:*) ax_gcc_arch=pentium ;; + *??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + esac ;; + *:68747541:444d4163:69746e65) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;; + *5[[8]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[[9d]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *6[[678a]]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *000?f[[4578bcef]]?:*:*:*|?f[[4578bcef]]?:*:*:*|f[[4578bcef]]?:*:*:*|*001?f[[4578bcf]]?:*:*:*|1?f[[4578bcf]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *002?f[[13457bcf]]?:*:*:*|2?f[[13457bcf]]?:*:*:*|*004?f[[138bcf]]?:*:*:*|4?f[[138bcf]]?:*:*:*|*005?f[[df]]?:*:*:*|5?f[[df]]?:*:*:*|*006?f[[8bcf]]?:*:*:*|6?f[[8bcf]]?:*:*:*|*007?f[[cf]]?:*:*:*|7?f[[cf]]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *010?f[[245689a]]?:*:*:*|10?f[[245689a]]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *050?f[[12]]?:*:*:*|50?f[[12]]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *060?f2?:*:*:*|60?f2?:*:*:*|*061?f[[03]]?:*:*:*|61?f[[03]]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *07[[03]]?f0?:*:*:*|7[[03]]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + # fallback + *0[[13]]??f??:*:*:*|[[13]]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + *???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;; + esac ;; + *:746e6543:736c7561:48727561) # IDT / VIA (Centaur) + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *5[[89]]?:*:*:*) ax_gcc_arch=winchip2 ;; + *66?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;; + *6[[9adf]]?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/]) + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *POWER7*) ax_gcc_arch="power7";; + *POWER8*) ax_gcc_arch="power8";; + *POWER9*) ax_gcc_arch="power9";; + *POWER10*) ax_gcc_arch="power10";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; + aarch64) + cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + case $cpuimpl in + 0x42) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + 0x43) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx armv8-a native" ;; + 0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + esac + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code + flag_prefixes="-mtune=" + if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then flag_prefixes="-march="; fi + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac +else + flag_prefixes="-march= -mcpu= -m" +fi +for flag_prefix in $flag_prefixes; do + for arch in $ax_gcc_arch; do + flag="$flag_prefix$arch" + AX_CHECK_COMPILE_FLAG($flag, [if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then + if test "x[]m4_default([$1],yes)" = xyes; then + if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi + fi + fi; ax_cv_gcc_archflag=$flag; break]) + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes +]) +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT($ax_cv_gcc_archflag) +if test "x$ax_cv_gcc_archflag" = xunknown; then + m4_default([$3],:) +else + m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"]) +fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 new file mode 100644 index 0000000..c52b9b2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 @@ -0,0 +1,267 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE]) +# +# DESCRIPTION +# +# This macro tries to guess the "native" arch corresponding to the target +# architecture for use with gcc's -march=arch or -mtune=arch flags. If +# found, the cache variable $ax_cv_gcc_archflag is set to this flag and +# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to +# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is +# to add $ax_cv_gcc_archflag to the end of $CFLAGS. +# +# PORTABLE? should be either [yes] (default) or [no]. In the former case, +# the flag is set to -mtune (or equivalent) so that the architecture is +# only used for tuning, but the instruction set used is still portable. In +# the latter case, the flag is set to -march (or equivalent) so that +# architecture-specific instructions are enabled. +# +# The user can specify --with-gcc-arch= in order to override the +# macro's choice of architecture, or --without-gcc-arch to disable this. +# +# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is +# called unless the user specified --with-gcc-arch manually. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID +# +# (The main emphasis here is on recent CPUs, on the principle that doing +# high-performance computing on old hardware is uncommon.) +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2014 Tsukasa Oi +# Copyright (c) 2017-2018 Alexey Kopytov +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 22 + +AC_DEFUN([AX_GCC_ARCHFLAG], +[AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_SED]) +AC_REQUIRE([AX_COMPILER_VENDOR]) + +AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=], [use architecture for gcc -march/-mtune, instead of guessing])], + ax_gcc_arch=$withval, ax_gcc_arch=yes) + +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT([]) +AC_CACHE_VAL(ax_cv_gcc_archflag, +[ +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[[3456]]86*|x86_64*|amd64*) # use cpuid codes + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[[4578]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5[[123]]?:*:*:*) ax_gcc_arch=pentium ;; + *0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;; + *0?6[[356]]?:*:*:*|?6[[356]]?:*:*:*|6[[356]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[aef]]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[5cf]]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[ad]]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;; + *3?67?:*:*:*|*[[45]]?6[[ad]]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;; + *000?f[[012]]?:*:*:*|?f[[012]]?:*:*:*|f[[012]]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;; + # fallback + *5??:*:*:*) ax_gcc_arch=pentium ;; + *??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + esac ;; + *:68747541:444d4163:69746e65) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;; + *5[[8]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[[9d]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *6[[678a]]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *000?f[[4578bcef]]?:*:*:*|?f[[4578bcef]]?:*:*:*|f[[4578bcef]]?:*:*:*|*001?f[[4578bcf]]?:*:*:*|1?f[[4578bcf]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *002?f[[13457bcf]]?:*:*:*|2?f[[13457bcf]]?:*:*:*|*004?f[[138bcf]]?:*:*:*|4?f[[138bcf]]?:*:*:*|*005?f[[df]]?:*:*:*|5?f[[df]]?:*:*:*|*006?f[[8bcf]]?:*:*:*|6?f[[8bcf]]?:*:*:*|*007?f[[cf]]?:*:*:*|7?f[[cf]]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *010?f[[245689a]]?:*:*:*|10?f[[245689a]]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *050?f[[12]]?:*:*:*|50?f[[12]]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *060?f2?:*:*:*|60?f2?:*:*:*|*061?f[[03]]?:*:*:*|61?f[[03]]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *07[[03]]?f0?:*:*:*|7[[03]]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + # fallback + *0[[13]]??f??:*:*:*|[[13]]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + *???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;; + esac ;; + *:746e6543:736c7561:48727561) # IDT / VIA (Centaur) + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *5[[89]]?:*:*:*) ax_gcc_arch=winchip2 ;; + *66?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;; + *6[[9adf]]?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/]) + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *POWER7*) ax_gcc_arch="power7";; + *POWER8*) ax_gcc_arch="power8";; + *POWER9*) ax_gcc_arch="power9";; + *POWER10*) ax_gcc_arch="power10";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; + aarch64) + cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + case $cpuimpl in + 0x42) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + 0x43) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx armv8-a native" ;; + 0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + esac + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code + flag_prefixes="-mtune=" + if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then flag_prefixes="-march="; fi + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac +else + flag_prefixes="-march= -mcpu= -m" +fi +for flag_prefix in $flag_prefixes; do + for arch in $ax_gcc_arch; do + flag="$flag_prefix$arch" + AX_CHECK_COMPILE_FLAG($flag, [if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then + if test "x[]m4_default([$1],yes)" = xyes; then + if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi + fi + fi; ax_cv_gcc_archflag=$flag; break]) + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes +]) +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT($ax_cv_gcc_archflag) +if test "x$ax_cv_gcc_archflag" = xunknown; then + m4_default([$3],:) +else + m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"]) +fi +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid 2.m4 new file mode 100644 index 0000000..df95465 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid 2.m4 @@ -0,0 +1,89 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_X86_CPUID(OP) +# AX_GCC_X86_CPUID_COUNT(OP, COUNT) +# +# DESCRIPTION +# +# On Pentium and later x86 processors, with gcc or a compiler that has a +# compatible syntax for inline assembly instructions, run a small program +# that executes the cpuid instruction with input OP. This can be used to +# detect the CPU type. AX_GCC_X86_CPUID_COUNT takes an additional COUNT +# parameter that gets passed into register ECX before calling cpuid. +# +# On output, the values of the eax, ebx, ecx, and edx registers are stored +# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable +# ax_cv_gcc_x86_cpuid_OP. +# +# If the cpuid instruction fails (because you are running a +# cross-compiler, or because you are not using gcc, or because you are on +# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP +# is set to the string "unknown". +# +# This macro mainly exists to be used in AX_GCC_ARCHFLAG. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2015 Michael Petch +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 10 + +AC_DEFUN([AX_GCC_X86_CPUID], +[AX_GCC_X86_CPUID_COUNT($1, 0) +]) + +AC_DEFUN([AX_GCC_X86_CPUID_COUNT], +[AC_REQUIRE([AC_PROG_CC]) +AC_LANG_PUSH([C]) +AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1, + [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include ], [ + int op = $1, level = $2, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; +])], + [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown])]) +AC_LANG_POP([C]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 new file mode 100644 index 0000000..df95465 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 @@ -0,0 +1,89 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_X86_CPUID(OP) +# AX_GCC_X86_CPUID_COUNT(OP, COUNT) +# +# DESCRIPTION +# +# On Pentium and later x86 processors, with gcc or a compiler that has a +# compatible syntax for inline assembly instructions, run a small program +# that executes the cpuid instruction with input OP. This can be used to +# detect the CPU type. AX_GCC_X86_CPUID_COUNT takes an additional COUNT +# parameter that gets passed into register ECX before calling cpuid. +# +# On output, the values of the eax, ebx, ecx, and edx registers are stored +# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable +# ax_cv_gcc_x86_cpuid_OP. +# +# If the cpuid instruction fails (because you are running a +# cross-compiler, or because you are not using gcc, or because you are on +# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP +# is set to the string "unknown". +# +# This macro mainly exists to be used in AX_GCC_ARCHFLAG. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2015 Michael Petch +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 10 + +AC_DEFUN([AX_GCC_X86_CPUID], +[AX_GCC_X86_CPUID_COUNT($1, 0) +]) + +AC_DEFUN([AX_GCC_X86_CPUID_COUNT], +[AC_REQUIRE([AC_PROG_CC]) +AC_LANG_PUSH([C]) +AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1, + [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include ], [ + int op = $1, level = $2, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; +])], + [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown])]) +AC_LANG_POP([C]) +]) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined 2.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined 2.m4 new file mode 100644 index 0000000..17c3eab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined 2.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 new file mode 100644 index 0000000..17c3eab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver 2.pl b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver 2.pl new file mode 100644 index 0000000..8a90b1f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver 2.pl @@ -0,0 +1,333 @@ +#!/usr/bin/perl -w + +# make_sunver.pl +# +# This script takes at least two arguments, a GNU style version script and +# a list of object and archive files, and generates a corresponding Sun +# style version script as follows: +# +# Each glob pattern, C++ mangled pattern or literal in the input script is +# matched against all global symbols in the input objects, emitting those +# that matched (or nothing if no match was found). +# A comment with the original pattern and its type is left in the output +# file to make it easy to understand the matches. +# +# It uses elfdump when present (native), GNU readelf otherwise. +# It depends on the GNU version of c++filt, since it must understand the +# GNU mangling style. + +use FileHandle; +use IPC::Open2; + +# Enforce C locale. +$ENV{'LC_ALL'} = "C"; +$ENV{'LANG'} = "C"; + +# Input version script, GNU style. +my $symvers = shift; + +########## +# Get all the symbols from the library, match them, and add them to a hash. + +my %sym_hash = (); + +# List of objects and archives to process. +my @OBJECTS = (); + +# List of shared objects to omit from processing. +my @SHAREDOBJS = (); + +# Filter out those input archives that have corresponding shared objects to +# avoid adding all symbols matched in the archive to the output map. +foreach $file (@ARGV) { + if (($so = $file) =~ s/\.a$/.so/ && -e $so) { + printf STDERR "omitted $file -> $so\n"; + push (@SHAREDOBJS, $so); + } else { + push (@OBJECTS, $file); + } +} + +# We need to detect and ignore hidden symbols. Solaris nm can only detect +# this in the harder to parse default output format, and GNU nm not at all, +# so use elfdump -s in the native case and GNU readelf -s otherwise. +# GNU objdump -t cannot be used since it produces a variable number of +# columns. + +# The path to elfdump. +my $elfdump = "/usr/ccs/bin/elfdump"; + +if (-f $elfdump) { + open ELFDUMP,$elfdump.' -s '.(join ' ',@OBJECTS).'|' or die $!; + my $skip_arsym = 0; + + while () { + chomp; + + # Ignore empty lines. + if (/^$/) { + # End of archive symbol table, stop skipping. + $skip_arsym = 0 if $skip_arsym; + next; + } + + # Keep skipping until end of archive symbol table. + next if ($skip_arsym); + + # Ignore object name header for individual objects and archives. + next if (/:$/); + + # Ignore table header lines. + next if (/^Symbol Table Section:/); + next if (/index.*value.*size/); + + # Start of archive symbol table: start skipping. + if (/^Symbol Table: \(archive/) { + $skip_arsym = 1; + next; + } + + # Split table. + (undef, undef, undef, undef, $bind, $oth, undef, $shndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCL"); + # Ignore hidden symbols. + next if ($oth eq "H"); + # Ignore undefined symbols. + next if ($shndx eq "UNDEF"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOB|WEAK)/ or $oth ne "D") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close ELFDUMP or die "$elfdump error"; +} else { + open READELF, 'readelf -s -W '.(join ' ',@OBJECTS).'|' or die $!; + # Process each symbol. + while () { + chomp; + + # Ignore empty lines. + next if (/^$/); + + # Ignore object name header. + next if (/^File: .*$/); + + # Ignore table header lines. + next if (/^Symbol table.*contains.*:/); + next if (/Num:.*Value.*Size/); + + # Split table. + (undef, undef, undef, undef, $bind, $vis, $ndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCAL"); + # Ignore hidden symbols. + next if ($vis eq "HIDDEN"); + # Ignore undefined symbols. + next if ($ndx eq "UND"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOBAL|WEAK)/ or $vis ne "DEFAULT") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close READELF or die "readelf error"; +} + +########## +# The various types of glob patterns. +# +# A glob pattern that is to be applied to the demangled name: 'cxx'. +# A glob patterns that applies directly to the name in the .o files: 'glob'. +# This pattern is ignored; used for local variables (usually just '*'): 'ign'. + +# The type of the current pattern. +my $glob = 'glob'; + +# We're currently inside `extern "C++"', which Sun ld doesn't understand. +my $in_extern = 0; + +# The c++filt command to use. This *must* be GNU c++filt; the Sun Studio +# c++filt doesn't handle the GNU mangling style. +my $cxxfilt = $ENV{'CXXFILT'} || "c++filt"; + +# The current version name. +my $current_version = ""; + +# Was there any attempt to match a symbol to this version? +my $matches_attempted; + +# The number of versions which matched this symbol. +my $matched_symbols; + +open F,$symvers or die $!; + +# Print information about generating this file +print "# This file was generated by make_sunver.pl. DO NOT EDIT!\n"; +print "# It was generated by:\n"; +printf "# %s %s %s\n", $0, $symvers, (join ' ',@ARGV); +printf "# Omitted archives with corresponding shared libraries: %s\n", + (join ' ', @SHAREDOBJS) if $#SHAREDOBJS >= 0; +print "#\n\n"; + +while () { + # Lines of the form '};' + if (/^([ \t]*)(\}[ \t]*;[ \t]*)$/) { + $glob = 'glob'; + if ($in_extern) { + $in_extern--; + print "$1##$2\n"; + } else { + print; + } + next; + } + + # Lines of the form '} SOME_VERSION_NAME_1.0;' + if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) { + $glob = 'glob'; + # We tried to match symbols agains this version, but none matched. + # Emit dummy hidden symbol to avoid marking this version WEAK. + if ($matches_attempted && $matched_symbols == 0) { + print " hidden:\n"; + print " .force_WEAK_off_$current_version = DATA S0x0 V0x0;\n"; + } + print; next; + } + + # Comment and blank lines + if (/^[ \t]*\#/) { print; next; } + if (/^[ \t]*$/) { print; next; } + + # Lines of the form '{' + if (/^([ \t]*){$/) { + if ($in_extern) { + print "$1##{\n"; + } else { + print; + } + next; + } + + # Lines of the form 'SOME_VERSION_NAME_1.1 {' + if (/^([A-Z0-9_.]+)[ \t]+{$/) { + # Record version name. + $current_version = $1; + # Reset match attempts, #matched symbols for this version. + $matches_attempted = 0; + $matched_symbols = 0; + print; + next; + } + + # Ignore 'global:' + if (/^[ \t]*global:$/) { print; next; } + + # After 'local:', globs should be ignored, they won't be exported. + if (/^[ \t]*local:$/) { + $glob = 'ign'; + print; + next; + } + + # After 'extern "C++"', globs are C++ patterns + if (/^([ \t]*)(extern \"C\+\+\"[ \t]*)$/) { + $in_extern++; + $glob = 'cxx'; + # Need to comment, Sun ld cannot handle this. + print "$1##$2\n"; next; + } + + # Chomp newline now we're done with passing through the input file. + chomp; + + # Catch globs. Note that '{}' is not allowed in globs by this script, + # so only '*' and '[]' are available. + if (/^([ \t]*)([^ \t;{}#]+);?[ \t]*$/) { + my $ws = $1; + my $ptn = $2; + # Turn the glob into a regex by replacing '*' with '.*', '?' with '.'. + # Keep $ptn so we can still print the original form. + ($pattern = $ptn) =~ s/\*/\.\*/g; + $pattern =~ s/\?/\./g; + + if ($glob eq 'ign') { + # We're in a local: * section; just continue. + print "$_\n"; + next; + } + + # Print the glob commented for human readers. + print "$ws##$ptn ($glob)\n"; + # We tried to match a symbol to this version. + $matches_attempted++; + + if ($glob eq 'glob') { + my %ptn_syms = (); + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # Maybe it matches one of the patterns based on the symbol in + # the .o file. + $ptn_syms{$sym}++ if ($sym =~ /^$pattern$/); + } + + foreach my $sym (sort keys(%ptn_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } elsif ($glob eq 'cxx') { + my %dem_syms = (); + + # Verify that we're actually using GNU c++filt. Other versions + # most likely cannot handle GNU style symbol mangling. + my $cxxout = `$cxxfilt --version 2>&1`; + $cxxout =~ m/GNU/ or die "$0 requires GNU c++filt to function"; + + # Talk to c++filt through a pair of file descriptors. + # Need to start a fresh instance per pattern, otherwise the + # process grows to 500+ MB. + my $pid = open2(*FILTIN, *FILTOUT, $cxxfilt) or die $!; + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # No? Well, maybe its demangled form matches one of those + # patterns. + printf FILTOUT "%s\n",$sym; + my $dem = ; + chomp $dem; + $dem_syms{$sym}++ if ($dem =~ /^$pattern$/); + } + + close FILTOUT or die "c++filt error"; + close FILTIN or die "c++filt error"; + # Need to wait for the c++filt process to avoid lots of zombies. + waitpid $pid, 0; + + foreach my $sym (sort keys(%dem_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } else { + # No? Well, then ignore it. + } + next; + } + # Important sanity check. This script can't handle lots of formats + # that GNU ld can, so be sure to error out if one is seen! + die "strange line `$_'"; +} +close F; diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl new file mode 100644 index 0000000..8a90b1f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl @@ -0,0 +1,333 @@ +#!/usr/bin/perl -w + +# make_sunver.pl +# +# This script takes at least two arguments, a GNU style version script and +# a list of object and archive files, and generates a corresponding Sun +# style version script as follows: +# +# Each glob pattern, C++ mangled pattern or literal in the input script is +# matched against all global symbols in the input objects, emitting those +# that matched (or nothing if no match was found). +# A comment with the original pattern and its type is left in the output +# file to make it easy to understand the matches. +# +# It uses elfdump when present (native), GNU readelf otherwise. +# It depends on the GNU version of c++filt, since it must understand the +# GNU mangling style. + +use FileHandle; +use IPC::Open2; + +# Enforce C locale. +$ENV{'LC_ALL'} = "C"; +$ENV{'LANG'} = "C"; + +# Input version script, GNU style. +my $symvers = shift; + +########## +# Get all the symbols from the library, match them, and add them to a hash. + +my %sym_hash = (); + +# List of objects and archives to process. +my @OBJECTS = (); + +# List of shared objects to omit from processing. +my @SHAREDOBJS = (); + +# Filter out those input archives that have corresponding shared objects to +# avoid adding all symbols matched in the archive to the output map. +foreach $file (@ARGV) { + if (($so = $file) =~ s/\.a$/.so/ && -e $so) { + printf STDERR "omitted $file -> $so\n"; + push (@SHAREDOBJS, $so); + } else { + push (@OBJECTS, $file); + } +} + +# We need to detect and ignore hidden symbols. Solaris nm can only detect +# this in the harder to parse default output format, and GNU nm not at all, +# so use elfdump -s in the native case and GNU readelf -s otherwise. +# GNU objdump -t cannot be used since it produces a variable number of +# columns. + +# The path to elfdump. +my $elfdump = "/usr/ccs/bin/elfdump"; + +if (-f $elfdump) { + open ELFDUMP,$elfdump.' -s '.(join ' ',@OBJECTS).'|' or die $!; + my $skip_arsym = 0; + + while () { + chomp; + + # Ignore empty lines. + if (/^$/) { + # End of archive symbol table, stop skipping. + $skip_arsym = 0 if $skip_arsym; + next; + } + + # Keep skipping until end of archive symbol table. + next if ($skip_arsym); + + # Ignore object name header for individual objects and archives. + next if (/:$/); + + # Ignore table header lines. + next if (/^Symbol Table Section:/); + next if (/index.*value.*size/); + + # Start of archive symbol table: start skipping. + if (/^Symbol Table: \(archive/) { + $skip_arsym = 1; + next; + } + + # Split table. + (undef, undef, undef, undef, $bind, $oth, undef, $shndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCL"); + # Ignore hidden symbols. + next if ($oth eq "H"); + # Ignore undefined symbols. + next if ($shndx eq "UNDEF"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOB|WEAK)/ or $oth ne "D") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close ELFDUMP or die "$elfdump error"; +} else { + open READELF, 'readelf -s -W '.(join ' ',@OBJECTS).'|' or die $!; + # Process each symbol. + while () { + chomp; + + # Ignore empty lines. + next if (/^$/); + + # Ignore object name header. + next if (/^File: .*$/); + + # Ignore table header lines. + next if (/^Symbol table.*contains.*:/); + next if (/Num:.*Value.*Size/); + + # Split table. + (undef, undef, undef, undef, $bind, $vis, $ndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCAL"); + # Ignore hidden symbols. + next if ($vis eq "HIDDEN"); + # Ignore undefined symbols. + next if ($ndx eq "UND"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOBAL|WEAK)/ or $vis ne "DEFAULT") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close READELF or die "readelf error"; +} + +########## +# The various types of glob patterns. +# +# A glob pattern that is to be applied to the demangled name: 'cxx'. +# A glob patterns that applies directly to the name in the .o files: 'glob'. +# This pattern is ignored; used for local variables (usually just '*'): 'ign'. + +# The type of the current pattern. +my $glob = 'glob'; + +# We're currently inside `extern "C++"', which Sun ld doesn't understand. +my $in_extern = 0; + +# The c++filt command to use. This *must* be GNU c++filt; the Sun Studio +# c++filt doesn't handle the GNU mangling style. +my $cxxfilt = $ENV{'CXXFILT'} || "c++filt"; + +# The current version name. +my $current_version = ""; + +# Was there any attempt to match a symbol to this version? +my $matches_attempted; + +# The number of versions which matched this symbol. +my $matched_symbols; + +open F,$symvers or die $!; + +# Print information about generating this file +print "# This file was generated by make_sunver.pl. DO NOT EDIT!\n"; +print "# It was generated by:\n"; +printf "# %s %s %s\n", $0, $symvers, (join ' ',@ARGV); +printf "# Omitted archives with corresponding shared libraries: %s\n", + (join ' ', @SHAREDOBJS) if $#SHAREDOBJS >= 0; +print "#\n\n"; + +while () { + # Lines of the form '};' + if (/^([ \t]*)(\}[ \t]*;[ \t]*)$/) { + $glob = 'glob'; + if ($in_extern) { + $in_extern--; + print "$1##$2\n"; + } else { + print; + } + next; + } + + # Lines of the form '} SOME_VERSION_NAME_1.0;' + if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) { + $glob = 'glob'; + # We tried to match symbols agains this version, but none matched. + # Emit dummy hidden symbol to avoid marking this version WEAK. + if ($matches_attempted && $matched_symbols == 0) { + print " hidden:\n"; + print " .force_WEAK_off_$current_version = DATA S0x0 V0x0;\n"; + } + print; next; + } + + # Comment and blank lines + if (/^[ \t]*\#/) { print; next; } + if (/^[ \t]*$/) { print; next; } + + # Lines of the form '{' + if (/^([ \t]*){$/) { + if ($in_extern) { + print "$1##{\n"; + } else { + print; + } + next; + } + + # Lines of the form 'SOME_VERSION_NAME_1.1 {' + if (/^([A-Z0-9_.]+)[ \t]+{$/) { + # Record version name. + $current_version = $1; + # Reset match attempts, #matched symbols for this version. + $matches_attempted = 0; + $matched_symbols = 0; + print; + next; + } + + # Ignore 'global:' + if (/^[ \t]*global:$/) { print; next; } + + # After 'local:', globs should be ignored, they won't be exported. + if (/^[ \t]*local:$/) { + $glob = 'ign'; + print; + next; + } + + # After 'extern "C++"', globs are C++ patterns + if (/^([ \t]*)(extern \"C\+\+\"[ \t]*)$/) { + $in_extern++; + $glob = 'cxx'; + # Need to comment, Sun ld cannot handle this. + print "$1##$2\n"; next; + } + + # Chomp newline now we're done with passing through the input file. + chomp; + + # Catch globs. Note that '{}' is not allowed in globs by this script, + # so only '*' and '[]' are available. + if (/^([ \t]*)([^ \t;{}#]+);?[ \t]*$/) { + my $ws = $1; + my $ptn = $2; + # Turn the glob into a regex by replacing '*' with '.*', '?' with '.'. + # Keep $ptn so we can still print the original form. + ($pattern = $ptn) =~ s/\*/\.\*/g; + $pattern =~ s/\?/\./g; + + if ($glob eq 'ign') { + # We're in a local: * section; just continue. + print "$_\n"; + next; + } + + # Print the glob commented for human readers. + print "$ws##$ptn ($glob)\n"; + # We tried to match a symbol to this version. + $matches_attempted++; + + if ($glob eq 'glob') { + my %ptn_syms = (); + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # Maybe it matches one of the patterns based on the symbol in + # the .o file. + $ptn_syms{$sym}++ if ($sym =~ /^$pattern$/); + } + + foreach my $sym (sort keys(%ptn_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } elsif ($glob eq 'cxx') { + my %dem_syms = (); + + # Verify that we're actually using GNU c++filt. Other versions + # most likely cannot handle GNU style symbol mangling. + my $cxxout = `$cxxfilt --version 2>&1`; + $cxxout =~ m/GNU/ or die "$0 requires GNU c++filt to function"; + + # Talk to c++filt through a pair of file descriptors. + # Need to start a fresh instance per pattern, otherwise the + # process grows to 500+ MB. + my $pid = open2(*FILTIN, *FILTOUT, $cxxfilt) or die $!; + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # No? Well, maybe its demangled form matches one of those + # patterns. + printf FILTOUT "%s\n",$sym; + my $dem = ; + chomp $dem; + $dem_syms{$sym}++ if ($dem =~ /^$pattern$/); + } + + close FILTOUT or die "c++filt error"; + close FILTIN or die "c++filt error"; + # Need to wait for the c++filt process to avoid lots of zombies. + waitpid $pid, 0; + + foreach my $sym (sort keys(%dem_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } else { + # No? Well, then ignore it. + } + next; + } + # Important sanity check. This script can't handle lots of formats + # that GNU ld can, so be sure to error out if one is seen! + die "strange line `$_'"; +} +close F; diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.am new file mode 100644 index 0000000..afcbfb6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.am @@ -0,0 +1,8 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.in new file mode 100644 index 0000000..a439775 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile 2.in @@ -0,0 +1,559 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = man +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man3dir = $(mandir)/man3 +am__installdirs = "$(DESTDIR)$(man3dir)" +NROFF = nroff +MANS = $(man_MANS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +ALLOCA = @ALLOCA@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AM_LTLDFLAGS = @AM_LTLDFLAGS@ +AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@ +HAVE_LONG_DOUBLE_VARIANT = @HAVE_LONG_DOUBLE_VARIANT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPT_LDFLAGS = @OPT_LDFLAGS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PRTDIAG = @PRTDIAG@ +RANLIB = @RANLIB@ +SECTION_LDFLAGS = @SECTION_LDFLAGS@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +TARGET = @TARGET@ +TARGETDIR = @TARGETDIR@ +TARGET_OBJ = @TARGET_OBJ@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_enable_builddir_sed = @ax_enable_builddir_sed@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sys_symbol_underscore = @sys_symbol_underscore@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +toolexecdir = @toolexecdir@ +toolexeclibdir = @toolexeclibdir@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign man/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign man/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-man3: $(man_MANS) + @$(NORMAL_INSTALL) + @list1=''; \ + list2='$(man_MANS)'; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list=''; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ + sed -n '/\.3[a-z]*$$/p'; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(MANS) +installdirs: + for dir in "$(DESTDIR)$(man3dir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-man + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man3 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-man + +uninstall-man: uninstall-man3 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-man3 install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags-am uninstall uninstall-am uninstall-man \ + uninstall-man3 + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am new file mode 100644 index 0000000..afcbfb6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am @@ -0,0 +1,8 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 new file mode 100644 index 0000000..1f1d303 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 @@ -0,0 +1,41 @@ +.Dd February 15, 2008 +.Dt FFI 3 +.Sh NAME +.Nm FFI +.Nd Foreign Function Interface +.Sh LIBRARY +libffi, -lffi +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The foreign function interface provides a mechanism by which a function can +generate a call to another function at runtime without requiring knowledge of +the called function's interface at compile time. +.Sh SEE ALSO +.Xr ffi_prep_cif 3 , +.Xr ffi_prep_cif_var 3 , +.Xr ffi_call 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 2 new file mode 100644 index 0000000..1f1d303 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 2 @@ -0,0 +1,41 @@ +.Dd February 15, 2008 +.Dt FFI 3 +.Sh NAME +.Nm FFI +.Nd Foreign Function Interface +.Sh LIBRARY +libffi, -lffi +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The foreign function interface provides a mechanism by which a function can +generate a call to another function at runtime without requiring knowledge of +the called function's interface at compile time. +.Sh SEE ALSO +.Xr ffi_prep_cif 3 , +.Xr ffi_prep_cif_var 3 , +.Xr ffi_call 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 new file mode 100644 index 0000000..5351513 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 @@ -0,0 +1,103 @@ +.Dd February 15, 2008 +.Dt ffi_call 3 +.Sh NAME +.Nm ffi_call +.Nd Invoke a foreign function. +.Sh SYNOPSIS +.In ffi.h +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_call +function provides a simple mechanism for invoking a function without +requiring knowledge of the function's interface at compile time. +.Fa fn +is called with the values retrieved from the pointers in the +.Fa avalue +array. The return value from +.Fa fn +is placed in storage pointed to by +.Fa rvalue . +.Fa cif +contains information describing the data types, sizes and alignments of the +arguments to and return value from +.Fa fn , +and must be initialized with +.Nm ffi_prep_cif +before it is used with +.Nm ffi_call . +.Pp +.Fa rvalue +must point to storage that is sizeof(ffi_arg) or larger for non-floating point +types. For smaller-sized return value types, the +.Nm ffi_arg +or +.Nm ffi_sarg +integral type must be used to hold +the return value. +.Sh EXAMPLES +.Bd -literal +#include +#include + +unsigned char +foo(unsigned int, float); + +int +main(int argc, const char **argv) +{ + ffi_cif cif; + ffi_type *arg_types[2]; + void *arg_values[2]; + ffi_status status; + + // Because the return value from foo() is smaller than sizeof(long), it + // must be passed as ffi_arg or ffi_sarg. + ffi_arg result; + + // Specify the data type of each argument. Available types are defined + // in . + arg_types[0] = &ffi_type_uint; + arg_types[1] = &ffi_type_float; + + // Prepare the ffi_cif structure. + if ((status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 2, &ffi_type_uint8, arg_types)) != FFI_OK) + { + // Handle the ffi_status error. + } + + // Specify the values of each argument. + unsigned int arg1 = 42; + float arg2 = 5.1; + + arg_values[0] = &arg1; + arg_values[1] = &arg2; + + // Invoke the function. + ffi_call(&cif, FFI_FN(foo), &result, arg_values); + + // The ffi_arg 'result' now contains the unsigned char returned from foo(), + // which can be accessed by a typecast. + printf("result is %hhu", (unsigned char)result); + + return 0; +} + +// The target function. +unsigned char +foo(unsigned int x, float y) +{ + unsigned char result = x - y; + return result; +} +.Ed +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 2 new file mode 100644 index 0000000..5351513 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 2 @@ -0,0 +1,103 @@ +.Dd February 15, 2008 +.Dt ffi_call 3 +.Sh NAME +.Nm ffi_call +.Nd Invoke a foreign function. +.Sh SYNOPSIS +.In ffi.h +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_call +function provides a simple mechanism for invoking a function without +requiring knowledge of the function's interface at compile time. +.Fa fn +is called with the values retrieved from the pointers in the +.Fa avalue +array. The return value from +.Fa fn +is placed in storage pointed to by +.Fa rvalue . +.Fa cif +contains information describing the data types, sizes and alignments of the +arguments to and return value from +.Fa fn , +and must be initialized with +.Nm ffi_prep_cif +before it is used with +.Nm ffi_call . +.Pp +.Fa rvalue +must point to storage that is sizeof(ffi_arg) or larger for non-floating point +types. For smaller-sized return value types, the +.Nm ffi_arg +or +.Nm ffi_sarg +integral type must be used to hold +the return value. +.Sh EXAMPLES +.Bd -literal +#include +#include + +unsigned char +foo(unsigned int, float); + +int +main(int argc, const char **argv) +{ + ffi_cif cif; + ffi_type *arg_types[2]; + void *arg_values[2]; + ffi_status status; + + // Because the return value from foo() is smaller than sizeof(long), it + // must be passed as ffi_arg or ffi_sarg. + ffi_arg result; + + // Specify the data type of each argument. Available types are defined + // in . + arg_types[0] = &ffi_type_uint; + arg_types[1] = &ffi_type_float; + + // Prepare the ffi_cif structure. + if ((status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 2, &ffi_type_uint8, arg_types)) != FFI_OK) + { + // Handle the ffi_status error. + } + + // Specify the values of each argument. + unsigned int arg1 = 42; + float arg2 = 5.1; + + arg_values[0] = &arg1; + arg_values[1] = &arg2; + + // Invoke the function. + ffi_call(&cif, FFI_FN(foo), &result, arg_values); + + // The ffi_arg 'result' now contains the unsigned char returned from foo(), + // which can be accessed by a typecast. + printf("result is %hhu", (unsigned char)result); + + return 0; +} + +// The target function. +unsigned char +foo(unsigned int x, float y) +{ + unsigned char result = x - y; + return result; +} +.Ed +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 new file mode 100644 index 0000000..ab2be8a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 @@ -0,0 +1,68 @@ +.Dd February 15, 2008 +.Dt ffi_prep_cif 3 +.Sh NAME +.Nm ffi_prep_cif +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa nargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. Note that to call a variadic function +.Nm ffi_prep_cif_var +must be used instead. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm . +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif_var 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 2 new file mode 100644 index 0000000..ab2be8a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 2 @@ -0,0 +1,68 @@ +.Dd February 15, 2008 +.Dt ffi_prep_cif 3 +.Sh NAME +.Nm ffi_prep_cif +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa nargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. Note that to call a variadic function +.Nm ffi_prep_cif_var +must be used instead. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm . +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif_var 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 new file mode 100644 index 0000000..7e19d0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 @@ -0,0 +1,73 @@ +.Dd January 25, 2011 +.Dt ffi_prep_cif_var 3 +.Sh NAME +.Nm ffi_prep_cif_var +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif_var +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa ntotalargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. +.Fa nfixedargs +must contain the number of fixed (non-variadic) arguments. +Note that to call a non-variadic function +.Nm ffi_prep_cif +must be used. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif_var +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm +. +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 2 new file mode 100644 index 0000000..7e19d0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 2 @@ -0,0 +1,73 @@ +.Dd January 25, 2011 +.Dt ffi_prep_cif_var 3 +.Sh NAME +.Nm ffi_prep_cif_var +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif_var +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa ntotalargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. +.Fa nfixedargs +must contain the number of fixed (non-variadic) arguments. +Note that to call a non-variadic function +.Nm ffi_prep_cif +must be used. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif_var +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm +. +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing 2 new file mode 100755 index 0000000..f62bbae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing 2 @@ -0,0 +1,215 @@ +#! /bin/sh +# Common wrapper for a few potentially missing GNU programs. + +scriptversion=2013-10-28.13; # UTC + +# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Originally written by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try '$0 --help' for more information" + exit 1 +fi + +case $1 in + + --is-lightweight) + # Used by our autoconf macros to check whether the available missing + # script is modern enough. + exit 0 + ;; + + --run) + # Back-compat with the calling convention used by older automake. + shift + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due +to PROGRAM being missing or too old. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + +Supported PROGRAM values: + aclocal autoconf autoheader autom4te automake makeinfo + bison yacc flex lex help2man + +Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and +'g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: unknown '$1' option" + echo 1>&2 "Try '$0 --help' for more information" + exit 1 + ;; + +esac + +# Run the given program, remember its exit status. +"$@"; st=$? + +# If it succeeded, we are done. +test $st -eq 0 && exit 0 + +# Also exit now if we it failed (or wasn't found), and '--version' was +# passed; such an option is passed most likely to detect whether the +# program is present and works. +case $2 in --version|--help) exit $st;; esac + +# Exit code 63 means version mismatch. This often happens when the user +# tries to use an ancient version of a tool on a file that requires a +# minimum version. +if test $st -eq 63; then + msg="probably too old" +elif test $st -eq 127; then + # Program was missing. + msg="missing on your system" +else + # Program was found and executed, but failed. Give up. + exit $st +fi + +perl_URL=http://www.perl.org/ +flex_URL=http://flex.sourceforge.net/ +gnu_software_URL=http://www.gnu.org/software + +program_details () +{ + case $1 in + aclocal|automake) + echo "The '$1' program is part of the GNU Automake package:" + echo "<$gnu_software_URL/automake>" + echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/autoconf>" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + autoconf|autom4te|autoheader) + echo "The '$1' program is part of the GNU Autoconf package:" + echo "<$gnu_software_URL/autoconf/>" + echo "It also requires GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + esac +} + +give_advice () +{ + # Normalize program name to check for. + normalized_program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + + printf '%s\n' "'$1' is $msg." + + configure_deps="'configure.ac' or m4 files included by 'configure.ac'" + case $normalized_program in + autoconf*) + echo "You should only need it if you modified 'configure.ac'," + echo "or m4 files included by it." + program_details 'autoconf' + ;; + autoheader*) + echo "You should only need it if you modified 'acconfig.h' or" + echo "$configure_deps." + program_details 'autoheader' + ;; + automake*) + echo "You should only need it if you modified 'Makefile.am' or" + echo "$configure_deps." + program_details 'automake' + ;; + aclocal*) + echo "You should only need it if you modified 'acinclude.m4' or" + echo "$configure_deps." + program_details 'aclocal' + ;; + autom4te*) + echo "You might have modified some maintainer files that require" + echo "the 'autom4te' program to be rebuilt." + program_details 'autom4te' + ;; + bison*|yacc*) + echo "You should only need it if you modified a '.y' file." + echo "You may want to install the GNU Bison package:" + echo "<$gnu_software_URL/bison/>" + ;; + lex*|flex*) + echo "You should only need it if you modified a '.l' file." + echo "You may want to install the Fast Lexical Analyzer package:" + echo "<$flex_URL>" + ;; + help2man*) + echo "You should only need it if you modified a dependency" \ + "of a man page." + echo "You may want to install the GNU Help2man package:" + echo "<$gnu_software_URL/help2man/>" + ;; + makeinfo*) + echo "You should only need it if you modified a '.texi' file, or" + echo "any other file indirectly affecting the aspect of the manual." + echo "You might want to install the Texinfo package:" + echo "<$gnu_software_URL/texinfo/>" + echo "The spurious makeinfo call might also be the consequence of" + echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" + echo "want to install GNU make:" + echo "<$gnu_software_URL/make/>" + ;; + *) + echo "You might have modified some files without having the proper" + echo "tools for further handling them. Check the 'README' file, it" + echo "often tells you about the needed prerequisites for installing" + echo "this package. You may also peek at any GNU archive site, in" + echo "case some other package contains this missing '$1' program." + ;; + esac +} + +give_advice "$1" | sed -e '1s/^/WARNING: /' \ + -e '2,$s/^/ /' >&2 + +# Propagate the correct exit status (expected to be 127 for a program +# not found, 63 for a program that failed due to version mismatch). +exit $st + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.sln b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.sln new file mode 100644 index 0000000..d9119df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.sln @@ -0,0 +1,33 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.28302.56 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Ffi_staticLib_arm64", "Ffi_staticLib.vcxproj", "{115502C0-BE05-4767-BF19-5C87D805FAD6}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM64 = Debug|ARM64 + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|ARM64 = Release|ARM64 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.Build.0 = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x86.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.Build.0 = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x86.ActiveCfg = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {241C54C7-20DD-4897-9376-E6B6D1B43BD5} + EndGlobalSection +EndGlobal diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.vcxproj b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.vcxproj new file mode 100644 index 0000000..3187699 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib 2.vcxproj @@ -0,0 +1,130 @@ + + + + + Debug + ARM64 + + + Release + ARM64 + + + + 15.0 + {115502C0-BE05-4767-BF19-5C87D805FAD6} + Win32Proj + FfistaticLib + 10.0.17763.0 + Ffi_staticLib_arm64 + + + + StaticLibrary + true + v141 + Unicode + + + StaticLibrary + false + v141 + true + Unicode + + + + + + + + + + + + + + + true + + + false + + + + NotUsing + Level3 + Disabled + true + FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + false + true + + + false + + + Windows + true + + + + + NotUsing + Level3 + MaxSpeed + true + true + true + FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + true + Speed + true + ..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories) + + + Windows + true + true + true + + + true + + + + + + + + + + + + + + + + + + + + + + + cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i + armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj" + + win64_armasm.obj;%(Outputs) + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln new file mode 100644 index 0000000..d9119df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln @@ -0,0 +1,33 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.28302.56 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Ffi_staticLib_arm64", "Ffi_staticLib.vcxproj", "{115502C0-BE05-4767-BF19-5C87D805FAD6}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM64 = Debug|ARM64 + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|ARM64 = Release|ARM64 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.Build.0 = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x86.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.Build.0 = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x86.ActiveCfg = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {241C54C7-20DD-4897-9376-E6B6D1B43BD5} + EndGlobalSection +EndGlobal diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj new file mode 100644 index 0000000..3187699 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj @@ -0,0 +1,130 @@ + + + + + Debug + ARM64 + + + Release + ARM64 + + + + 15.0 + {115502C0-BE05-4767-BF19-5C87D805FAD6} + Win32Proj + FfistaticLib + 10.0.17763.0 + Ffi_staticLib_arm64 + + + + StaticLibrary + true + v141 + Unicode + + + StaticLibrary + false + v141 + true + Unicode + + + + + + + + + + + + + + + true + + + false + + + + NotUsing + Level3 + Disabled + true + FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + false + true + + + false + + + Windows + true + + + + + NotUsing + Level3 + MaxSpeed + true + true + true + FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + true + Speed + true + ..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories) + + + Windows + true + true + true + + + true + + + + + + + + + + + + + + + + + + + + + + + cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i + armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj" + + win64_armasm.obj;%(Outputs) + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.filters b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.filters new file mode 100644 index 0000000..1f8c6e1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.filters @@ -0,0 +1,57 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.user b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.user new file mode 100644 index 0000000..be25078 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj 2.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters new file mode 100644 index 0000000..1f8c6e1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters @@ -0,0 +1,57 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user new file mode 100644 index 0000000..be25078 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi 2.h new file mode 100644 index 0000000..02f26a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi 2.h @@ -0,0 +1,511 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#ifndef _M_ARM64 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#ifndef _M_ARM64 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h new file mode 100644 index 0000000..02f26a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h @@ -0,0 +1,511 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#ifndef _M_ARM64 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#ifndef _M_ARM64 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/fficonfig 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/fficonfig 2.h new file mode 100644 index 0000000..b3d89b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/fficonfig 2.h @@ -0,0 +1,219 @@ +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "a" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +/* #undef FFI_EXEC_TRAMPOLINE_TABLE */ + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +/* #undef FFI_MMAP_EXEC_WRIT */ + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +/*#define HAVE_ALLOCA_H 1 */ + +/* Define if your assembler supports .cfi_* directives. */ +#define HAVE_AS_CFI_PSEUDO_OP 1 + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +#define HAVE_LONG_DOUBLE 1 + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +#define HAVE_MKOSTEMP 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +#define HAVE_MMAP_DEV_ZERO 1 + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if .eh_frame sections should be read-only. */ +#define HAVE_RO_EH_FRAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +//#define HAVE_STDLIB_H 0 +#define LACKS_STDLIB_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +#define LIBFFI_GNU_SYMBOL_VERSIONING 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Define to 1 if your C compiler doesn't accept -c and -o together. */ +/* #undef NO_MINUS_C_MINUS_O */ + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3-rc0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3-rc0" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +/* #undef SYMBOL_UNDERSCORE */ + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3-rc0" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc 2.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc 2.sh new file mode 100755 index 0000000..97facd6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc 2.sh @@ -0,0 +1,353 @@ +#!/bin/sh + +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the MSVC wrappificator. +# +# The Initial Developer of the Original Code is +# Timothy Wall . +# Portions created by the Initial Developer are Copyright (C) 2009 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Daniel Witte +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +# +# GCC-compatible wrapper for cl.exe and ml.exe. Arguments are given in GCC +# format and translated into something sensible for cl or ml. +# + +args_orig=$@ +args="-nologo -W3" +linkargs= +static_crt= +debug_crt= +cl="cl" +ml="ml" +safeseh="-safeseh" +output= +libpaths= +libversion=7 +verbose= + +while [ $# -gt 0 ] +do + case $1 + in + --verbose) + verbose=1 + shift 1 + ;; + --version) + args="-help" + shift 1 + ;; + -fexceptions) + # Don't enable exceptions for now. + #args="$args -EHac" + shift 1 + ;; + -m32) + shift 1 + ;; + -m64) + ml="ml64" # "$MSVC/x86_amd64/ml64" + safeseh= + shift 1 + ;; + -marm) + ml='armasm' + safeseh= + shift 1 + ;; + -marm64) + ml='armasm64' + safeseh= + shift 1 + ;; + -clang-cl) + cl="clang-cl" + shift 1 + ;; + -O0) + args="$args -Od" + shift 1 + ;; + -O*) + # Runtime error checks (enabled by setting -RTC1 in the -DFFI_DEBUG + # case below) are not compatible with optimization flags and will + # cause the build to fail. Therefore, drop the optimization flag if + # -DFFI_DEBUG is also set. + case $args_orig in + *-DFFI_DEBUG*) + args="$args" + ;; + *) + # The ax_cc_maxopt.m4 macro from the upstream autoconf-archive + # project doesn't support MSVC and therefore ends up trying to + # use -O3. Use the equivalent "max optimization" flag for MSVC + # instead of erroring out. + case $1 in + -O3) + args="$args -O2" + ;; + *) + args="$args $1" + ;; + esac + opt="true" + ;; + esac + shift 1 + ;; + -g) + # Enable debug symbol generation. + args="$args -Zi" + shift 1 + ;; + -DFFI_DEBUG) + # Enable runtime error checks. + args="$args -RTC1" + defines="$defines $1" + shift 1 + ;; + -DUSE_STATIC_RTL) + # Link against static CRT. + static_crt=1 + shift 1 + ;; + -DUSE_DEBUG_RTL) + # Link against debug CRT. + debug_crt=1 + shift 1 + ;; + -c) + args="$args -c" + args="$(echo $args | sed 's%/Fe%/Fo%g')" + single="-c" + shift 1 + ;; + -D*=*) + name="$(echo $1|sed 's/-D\([^=][^=]*\)=.*/\1/g')" + value="$(echo $1|sed 's/-D[^=][^=]*=//g')" + args="$args -D${name}='$value'" + defines="$defines -D${name}='$value'" + shift 1 + ;; + -D*) + args="$args $1" + defines="$defines $1" + shift 1 + ;; + -I) + p=$(cygpath -m $2) + args="$args -I$p" + includes="$includes -I$p" + shift 2 + ;; + -I*) + p=$(cygpath -m ${1#-I}) + args="$args -I$p" + includes="$includes -I$p" + shift 1 + ;; + -L) + p=$(cygpath -m $2) + linkargs="$linkargs -LIBPATH:$p" + shift 2 + ;; + -L*) + p=$(cygpath -m ${1#-L}) + linkargs="$linkargs -LIBPATH:$p" + shift 1 + ;; + -link) + # add next argument verbatim to linker args + linkargs="$linkargs $2" + shift 2 + ;; + -l*) + case $1 + in + -lffi) + linkargs="$linkargs lib${1#-l}-${libversion}.lib" + ;; + *) + # ignore other libraries like -lm, hope they are + # covered by MSVCRT + # linkargs="$linkargs ${1#-l}.lib" + ;; + esac + shift 1 + ;; + -W|-Wextra) + # TODO map extra warnings + shift 1 + ;; + -Wall) + # -Wall on MSVC is overzealous, and we already build with -W3. Nothing + # to do here. + shift 1 + ;; + -pedantic) + # libffi tests -pedantic with -Wall, so drop it also. + shift 1 + ;; + -warn) + # ignore -warn all from libtool as well. + if test "$2" = "all"; then + shift 2 + else + args="$args -warn" + shift 1 + fi + ;; + -Werror) + args="$args -WX" + shift 1 + ;; + -W*) + # TODO map specific warnings + shift 1 + ;; + -S) + args="$args -FAs" + shift 1 + ;; + -o) + outdir="$(dirname $2)" + base="$(basename $2|sed 's/\.[^.]*//g')" + if [ -n "$single" ]; then + output="-Fo$2" + else + output="-Fe$2" + fi + armasm_output="-o $2" + if [ -n "$assembly" ]; then + args="$args $output" + else + args="$args $output -Fd$outdir/$base -Fp$outdir/$base -Fa$outdir/$base" + fi + shift 2 + ;; + *.S) + src=$1 + assembly="true" + shift 1 + ;; + *.c) + args="$args $1" + shift 1 + ;; + *) + # Assume it's an MSVC argument, and pass it through. + args="$args $1" + shift 1 + ;; + esac +done + +if [ -n "$linkargs" ]; then + + # If -Zi is specified, certain optimizations are implicitly disabled + # by MSVC. Add back those optimizations if this is an optimized build. + # NOTE: These arguments must come after all others. + if [ -n "$opt" ]; then + linkargs="$linkargs -OPT:REF -OPT:ICF -INCREMENTAL:NO" + fi + + args="$args -link $linkargs" +fi + +if [ -n "$static_crt" ]; then + md=-MT +else + md=-MD +fi + +if [ -n "$debug_crt" ]; then + md="${md}d" +fi + +if [ -n "$assembly" ]; then + if [ -z "$outdir" ]; then + outdir="." + fi + ppsrc="$outdir/$(basename $src|sed 's/.S$/.asm/g')" + + if [ $ml = "armasm" ]; then + defines="$defines -D_M_ARM" + fi + + if [ $ml = "armasm64" ]; then + defines="$defines -D_M_ARM64" + fi + + if test -n "$verbose"; then + echo "$cl -nologo -EP $includes $defines $src > $ppsrc" + fi + + "$cl" -nologo -EP $includes $defines $src > $ppsrc || exit $? + output="$(echo $output | sed 's%/F[dpa][^ ]*%%g')" + if [ $ml = "armasm" ]; then + args="-nologo -g -oldit $armasm_output $ppsrc -errorReport:prompt" + elif [ $ml = "armasm64" ]; then + args="-nologo -g $armasm_output $ppsrc -errorReport:prompt" + else + args="-nologo $safeseh $single $output $ppsrc" + fi + + if test -n "$verbose"; then + echo "$ml $args" + fi + + eval "\"$ml\" $args" + result=$? + + # required to fix ml64 broken output? + #mv *.obj $outdir +else + args="$md $args" + + if test -n "$verbose"; then + echo "$cl $args" + fi + # Return an error code of 1 if an invalid command line parameter is passed + # instead of just ignoring it. Any output that is not a warning or an + # error is filtered so this command behaves more like gcc. cl.exe prints + # the name of the compiled file otherwise, which breaks the dejagnu checks + # for excess warnings and errors. + eval "(\"$cl\" $args 2>&1 1>&3 | \ + awk '{print \$0} /D9002/ {error=1} END{exit error}' >&2) 3>&1 | \ + awk '/warning|error/'" + result=$? +fi + +exit $result + +# vim: noai:ts=4:sw=4 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh new file mode 100755 index 0000000..97facd6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh @@ -0,0 +1,353 @@ +#!/bin/sh + +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the MSVC wrappificator. +# +# The Initial Developer of the Original Code is +# Timothy Wall . +# Portions created by the Initial Developer are Copyright (C) 2009 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Daniel Witte +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +# +# GCC-compatible wrapper for cl.exe and ml.exe. Arguments are given in GCC +# format and translated into something sensible for cl or ml. +# + +args_orig=$@ +args="-nologo -W3" +linkargs= +static_crt= +debug_crt= +cl="cl" +ml="ml" +safeseh="-safeseh" +output= +libpaths= +libversion=7 +verbose= + +while [ $# -gt 0 ] +do + case $1 + in + --verbose) + verbose=1 + shift 1 + ;; + --version) + args="-help" + shift 1 + ;; + -fexceptions) + # Don't enable exceptions for now. + #args="$args -EHac" + shift 1 + ;; + -m32) + shift 1 + ;; + -m64) + ml="ml64" # "$MSVC/x86_amd64/ml64" + safeseh= + shift 1 + ;; + -marm) + ml='armasm' + safeseh= + shift 1 + ;; + -marm64) + ml='armasm64' + safeseh= + shift 1 + ;; + -clang-cl) + cl="clang-cl" + shift 1 + ;; + -O0) + args="$args -Od" + shift 1 + ;; + -O*) + # Runtime error checks (enabled by setting -RTC1 in the -DFFI_DEBUG + # case below) are not compatible with optimization flags and will + # cause the build to fail. Therefore, drop the optimization flag if + # -DFFI_DEBUG is also set. + case $args_orig in + *-DFFI_DEBUG*) + args="$args" + ;; + *) + # The ax_cc_maxopt.m4 macro from the upstream autoconf-archive + # project doesn't support MSVC and therefore ends up trying to + # use -O3. Use the equivalent "max optimization" flag for MSVC + # instead of erroring out. + case $1 in + -O3) + args="$args -O2" + ;; + *) + args="$args $1" + ;; + esac + opt="true" + ;; + esac + shift 1 + ;; + -g) + # Enable debug symbol generation. + args="$args -Zi" + shift 1 + ;; + -DFFI_DEBUG) + # Enable runtime error checks. + args="$args -RTC1" + defines="$defines $1" + shift 1 + ;; + -DUSE_STATIC_RTL) + # Link against static CRT. + static_crt=1 + shift 1 + ;; + -DUSE_DEBUG_RTL) + # Link against debug CRT. + debug_crt=1 + shift 1 + ;; + -c) + args="$args -c" + args="$(echo $args | sed 's%/Fe%/Fo%g')" + single="-c" + shift 1 + ;; + -D*=*) + name="$(echo $1|sed 's/-D\([^=][^=]*\)=.*/\1/g')" + value="$(echo $1|sed 's/-D[^=][^=]*=//g')" + args="$args -D${name}='$value'" + defines="$defines -D${name}='$value'" + shift 1 + ;; + -D*) + args="$args $1" + defines="$defines $1" + shift 1 + ;; + -I) + p=$(cygpath -m $2) + args="$args -I$p" + includes="$includes -I$p" + shift 2 + ;; + -I*) + p=$(cygpath -m ${1#-I}) + args="$args -I$p" + includes="$includes -I$p" + shift 1 + ;; + -L) + p=$(cygpath -m $2) + linkargs="$linkargs -LIBPATH:$p" + shift 2 + ;; + -L*) + p=$(cygpath -m ${1#-L}) + linkargs="$linkargs -LIBPATH:$p" + shift 1 + ;; + -link) + # add next argument verbatim to linker args + linkargs="$linkargs $2" + shift 2 + ;; + -l*) + case $1 + in + -lffi) + linkargs="$linkargs lib${1#-l}-${libversion}.lib" + ;; + *) + # ignore other libraries like -lm, hope they are + # covered by MSVCRT + # linkargs="$linkargs ${1#-l}.lib" + ;; + esac + shift 1 + ;; + -W|-Wextra) + # TODO map extra warnings + shift 1 + ;; + -Wall) + # -Wall on MSVC is overzealous, and we already build with -W3. Nothing + # to do here. + shift 1 + ;; + -pedantic) + # libffi tests -pedantic with -Wall, so drop it also. + shift 1 + ;; + -warn) + # ignore -warn all from libtool as well. + if test "$2" = "all"; then + shift 2 + else + args="$args -warn" + shift 1 + fi + ;; + -Werror) + args="$args -WX" + shift 1 + ;; + -W*) + # TODO map specific warnings + shift 1 + ;; + -S) + args="$args -FAs" + shift 1 + ;; + -o) + outdir="$(dirname $2)" + base="$(basename $2|sed 's/\.[^.]*//g')" + if [ -n "$single" ]; then + output="-Fo$2" + else + output="-Fe$2" + fi + armasm_output="-o $2" + if [ -n "$assembly" ]; then + args="$args $output" + else + args="$args $output -Fd$outdir/$base -Fp$outdir/$base -Fa$outdir/$base" + fi + shift 2 + ;; + *.S) + src=$1 + assembly="true" + shift 1 + ;; + *.c) + args="$args $1" + shift 1 + ;; + *) + # Assume it's an MSVC argument, and pass it through. + args="$args $1" + shift 1 + ;; + esac +done + +if [ -n "$linkargs" ]; then + + # If -Zi is specified, certain optimizations are implicitly disabled + # by MSVC. Add back those optimizations if this is an optimized build. + # NOTE: These arguments must come after all others. + if [ -n "$opt" ]; then + linkargs="$linkargs -OPT:REF -OPT:ICF -INCREMENTAL:NO" + fi + + args="$args -link $linkargs" +fi + +if [ -n "$static_crt" ]; then + md=-MT +else + md=-MD +fi + +if [ -n "$debug_crt" ]; then + md="${md}d" +fi + +if [ -n "$assembly" ]; then + if [ -z "$outdir" ]; then + outdir="." + fi + ppsrc="$outdir/$(basename $src|sed 's/.S$/.asm/g')" + + if [ $ml = "armasm" ]; then + defines="$defines -D_M_ARM" + fi + + if [ $ml = "armasm64" ]; then + defines="$defines -D_M_ARM64" + fi + + if test -n "$verbose"; then + echo "$cl -nologo -EP $includes $defines $src > $ppsrc" + fi + + "$cl" -nologo -EP $includes $defines $src > $ppsrc || exit $? + output="$(echo $output | sed 's%/F[dpa][^ ]*%%g')" + if [ $ml = "armasm" ]; then + args="-nologo -g -oldit $armasm_output $ppsrc -errorReport:prompt" + elif [ $ml = "armasm64" ]; then + args="-nologo -g $armasm_output $ppsrc -errorReport:prompt" + else + args="-nologo $safeseh $single $output $ppsrc" + fi + + if test -n "$verbose"; then + echo "$ml $args" + fi + + eval "\"$ml\" $args" + result=$? + + # required to fix ml64 broken output? + #mv *.obj $outdir +else + args="$md $args" + + if test -n "$verbose"; then + echo "$cl $args" + fi + # Return an error code of 1 if an invalid command line parameter is passed + # instead of just ignoring it. Any output that is not a warning or an + # error is filtered so this command behaves more like gcc. cl.exe prints + # the name of the compiled file otherwise, which breaks the dejagnu checks + # for excess warnings and errors. + eval "(\"$cl\" $args 2>&1 1>&3 | \ + awk '{print \$0} /D9002/ {error=1} END{exit error}' >&2) 3>&1 | \ + awk '/warning|error/'" + result=$? +fi + +exit $result + +# vim: noai:ts=4:sw=4 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi 2.c new file mode 100644 index 0000000..4cc5925 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi 2.c @@ -0,0 +1,1015 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__)|| defined (_M_ARM64) +#include +#include +#include +#include +#include +#include +#include "internal.h" +#ifdef _M_ARM64 +#include /* FlushInstructionCache */ +#endif + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +union _d +{ + UINT64 d; + UINT32 s[2]; +}; + +struct _v +{ + union _d d[2] __attribute__((aligned(16))); +}; + +struct call_context +{ + struct _v v[N_V_ARG_REG]; + UINT64 x[N_X_ARG_REG]; +}; + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#endif + +#else + +#if defined (__clang__) && defined (__APPLE__) +extern void sys_icache_invalidate (void *start, size_t len); +#endif + +static inline void +ffi_clear_cache (void *start, void *end) +{ +#if defined (__clang__) && defined (__APPLE__) + sys_icache_invalidate (start, (char *)end - (char *)start); +#elif defined (__GNUC__) + __builtin___clear_cache (start, end); +#elif defined (_M_ARM64) + FlushInstructionCache(GetCurrentProcess(), start, (char*)end - (char*)start); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +/* A subroutine of is_vfp_type. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of is_vfp_type. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY may be allocated to the FP registers. This is both an + fp scalar type as well as an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is an fp scalar. + + Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_* + constant for the type. */ + +static int +is_vfp_type (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (candidate) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + switch (candidate) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 2; + goto done; + } + return 0; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */ + size = ty->size; + if (size < 4 || size > 64) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + case FFI_TYPE_LONGDOUBLE: + ele_count = size / sizeof(long double); + if (size != ele_count * sizeof(long double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return candidate * 4 + (4 - (int)ele_count); +} + +/* Representation of the procedure call argument marshalling + state. + + The terse state variable names match the names used in the AARCH64 + PCS. */ + +struct arg_state +{ + unsigned ngrn; /* Next general-purpose register number. */ + unsigned nsrn; /* Next vector register number. */ + size_t nsaa; /* Next stack offset. */ + +#if defined (__APPLE__) + unsigned allocating_variadic; +#endif +}; + +/* Initialize a procedure call argument marshalling state. */ +static void +arg_init (struct arg_state *state) +{ + state->ngrn = 0; + state->nsrn = 0; + state->nsaa = 0; +#if defined (__APPLE__) + state->allocating_variadic = 0; +#endif +} + +/* Allocate an aligned slot on the stack and return a pointer to it. */ +static void * +allocate_to_stack (struct arg_state *state, void *stack, + size_t alignment, size_t size) +{ + size_t nsaa = state->nsaa; + + /* Round up the NSAA to the larger of 8 or the natural + alignment of the argument's type. */ +#if defined (__APPLE__) + if (state->allocating_variadic && alignment < 8) + alignment = 8; +#else + if (alignment < 8) + alignment = 8; +#endif + + nsaa = FFI_ALIGN (nsaa, alignment); + state->nsaa = nsaa + size; + + return (char *)stack + nsaa; +} + +static ffi_arg +extend_integer_type (void *source, int type) +{ + switch (type) + { + case FFI_TYPE_UINT8: + return *(UINT8 *) source; + case FFI_TYPE_SINT8: + return *(SINT8 *) source; + case FFI_TYPE_UINT16: + return *(UINT16 *) source; + case FFI_TYPE_SINT16: + return *(SINT16 *) source; + case FFI_TYPE_UINT32: + return *(UINT32 *) source; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + return *(SINT32 *) source; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + return *(UINT64 *) source; + break; + case FFI_TYPE_POINTER: + return *(uintptr_t *) source; + default: + abort(); + } +} + +#if defined(_MSC_VER) +void extend_hfa_type (void *dest, void *src, int h); +#else +static void +extend_hfa_type (void *dest, void *src, int h) +{ + ssize_t f = h - AARCH64_RET_S4; + void *x0; + + asm volatile ( + "adr %0, 0f\n" +" add %0, %0, %1\n" +" br %0\n" +"0: ldp s16, s17, [%3]\n" /* S4 */ +" ldp s18, s19, [%3, #8]\n" +" b 4f\n" +" ldp s16, s17, [%3]\n" /* S3 */ +" ldr s18, [%3, #8]\n" +" b 3f\n" +" ldp s16, s17, [%3]\n" /* S2 */ +" b 2f\n" +" nop\n" +" ldr s16, [%3]\n" /* S1 */ +" b 1f\n" +" nop\n" +" ldp d16, d17, [%3]\n" /* D4 */ +" ldp d18, d19, [%3, #16]\n" +" b 4f\n" +" ldp d16, d17, [%3]\n" /* D3 */ +" ldr d18, [%3, #16]\n" +" b 3f\n" +" ldp d16, d17, [%3]\n" /* D2 */ +" b 2f\n" +" nop\n" +" ldr d16, [%3]\n" /* D1 */ +" b 1f\n" +" nop\n" +" ldp q16, q17, [%3]\n" /* Q4 */ +" ldp q18, q19, [%3, #32]\n" +" b 4f\n" +" ldp q16, q17, [%3]\n" /* Q3 */ +" ldr q18, [%3, #32]\n" +" b 3f\n" +" ldp q16, q17, [%3]\n" /* Q2 */ +" b 2f\n" +" nop\n" +" ldr q16, [%3]\n" /* Q1 */ +" b 1f\n" +"4: str q19, [%2, #48]\n" +"3: str q18, [%2, #32]\n" +"2: str q17, [%2, #16]\n" +"1: str q16, [%2]" + : "=&r"(x0) + : "r"(f * 12), "r"(dest), "r"(src) + : "memory", "v16", "v17", "v18", "v19"); +} +#endif + +#if defined(_MSC_VER) +void* compress_hfa_type (void *dest, void *src, int h); +#else +static void * +compress_hfa_type (void *dest, void *reg, int h) +{ + switch (h) + { + case AARCH64_RET_S1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 12; +#endif + } + else + *(float *)dest = *(float *)reg; + break; + case AARCH64_RET_S2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.s, v17.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_S3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.s, v17.s, v18.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_S4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + case AARCH64_RET_D1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 8; +#endif + } + else + *(double *)dest = *(double *)reg; + break; + case AARCH64_RET_D2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.d, v17.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_D3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.d, v17.d, v18.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_D4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + default: + if (dest != reg) + return memcpy (dest, reg, 16 * (4 - (h & 3))); + break; + } + return dest; +} +#endif + +/* Either allocate an appropriate register for the argument type, or if + none are available, allocate a stack slot and return a pointer + to the allocated space. */ + +static void * +allocate_int_to_reg_or_stack (struct call_context *context, + struct arg_state *state, + void *stack, size_t size) +{ + if (state->ngrn < N_X_ARG_REG) + return &context->x[state->ngrn++]; + + state->ngrn = N_X_ARG_REG; + return allocate_to_stack (state, stack, size, size); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + size_t bytes = cif->bytes; + int flags, i, n; + + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = AARCH64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = AARCH64_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = AARCH64_RET_UINT16; + break; + case FFI_TYPE_UINT32: + flags = AARCH64_RET_UINT32; + break; + case FFI_TYPE_SINT8: + flags = AARCH64_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = AARCH64_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = AARCH64_RET_SINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = AARCH64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + flags = is_vfp_type (rtype); + if (flags == 0) + { + size_t s = rtype->size; + if (s > 16) + { + flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM; + bytes += 8; + } + else if (s == 16) + flags = AARCH64_RET_INT128; + else if (s == 8) + flags = AARCH64_RET_INT64; + else + flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY; + } + break; + + default: + abort(); + } + + for (i = 0, n = cif->nargs; i < n; i++) + if (is_vfp_type (cif->arg_types[i])) + { + flags |= AARCH64_FLAG_ARG_V; + break; + } + + /* Round the stack up to a multiple of the stack alignment requirement. */ + cif->bytes = (unsigned) FFI_ALIGN(bytes, 16); + cif->flags = flags; +#if defined (__APPLE__) + cif->aarch64_nfixedargs = 0; +#endif + + return FFI_OK; +} + +#if defined (__APPLE__) +/* Perform Apple-specific cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->aarch64_nfixedargs = nfixedargs; + return status; +} +#endif /* __APPLE__ */ + +extern void ffi_call_SYSV (struct call_context *context, void *frame, + void (*fn)(void), void *rvalue, int flags, + void *closure) FFI_HIDDEN; + +/* Call a function with the provided arguments and capture the return + value. */ +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue, + void **avalue, void *closure) +{ + struct call_context *context; + void *stack, *frame, *rvalue; + struct arg_state state; + size_t stack_bytes, rtype_size, rsize; + int i, nargs, flags; + ffi_type *rtype; + + flags = cif->flags; + rtype = cif->rtype; + rtype_size = rtype->size; + stack_bytes = cif->bytes; + + /* If the target function returns a structure via hidden pointer, + then we cannot allow a null rvalue. Otherwise, mash a null + rvalue to void return type. */ + rsize = 0; + if (flags & AARCH64_RET_IN_MEM) + { + if (orig_rvalue == NULL) + rsize = rtype_size; + } + else if (orig_rvalue == NULL) + flags &= AARCH64_FLAG_ARG_V; + else if (flags & AARCH64_RET_NEED_COPY) + rsize = 16; + + /* Allocate consectutive stack for everything we'll need. */ + context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize); + stack = context + 1; + frame = (void*)((uintptr_t)stack + (uintptr_t)stack_bytes); + rvalue = (rsize ? (void*)((uintptr_t)frame + 32) : orig_rvalue); + + arg_init (&state); + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + size_t s = ty->size; + void *a = avalue[i]; + int h, t; + + t = ty->type; + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + /* If the argument is a basic type the argument is allocated to an + appropriate register, or if none are available, to the stack. */ + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_pointer: + { + ffi_arg ext = extend_integer_type (a, t); + if (state.ngrn < N_X_ARG_REG) + context->x[state.ngrn++] = ext; + else + { + void *d = allocate_to_stack (&state, stack, ty->alignment, s); + state.ngrn = N_X_ARG_REG; + /* Note that the default abi extends each argument + to a full 64-bit slot, while the iOS abi allocates + only enough space. */ +#ifdef __APPLE__ + memcpy(d, a, s); +#else + *(ffi_arg *)d = ext; +#endif + } + } + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + { + void *dest; + + h = is_vfp_type (ty); + if (h) + { + int elems = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + elems <= N_X_ARG_REG) + { + dest = &context->x[state.ngrn]; + state.ngrn += elems; + extend_hfa_type(dest, a, h); + break; + } + state.nsrn = N_X_ARG_REG; + dest = allocate_to_stack(&state, stack, ty->alignment, s); + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + elems <= N_V_ARG_REG) + { + dest = &context->v[state.nsrn]; + state.nsrn += elems; + extend_hfa_type (dest, a, h); + break; + } + state.nsrn = N_V_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* If the argument is a composite type that is larger than 16 + bytes, then the argument has been copied to memory, and + the argument is replaced by a pointer to the copy. */ + a = &avalue[i]; + t = FFI_TYPE_POINTER; + s = sizeof (void *); + goto do_pointer; + } + else + { + size_t n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + /* If the argument is a composite type and the size in + double-words is not more than the number of available + X registers, then the argument is copied into + consecutive X registers. */ + dest = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + /* Otherwise, there are insufficient X registers. Further + X register allocations are prevented, the NSAA is + adjusted and the argument is copied to memory at the + adjusted NSAA. */ + state.ngrn = N_X_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + memcpy (dest, a, s); + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + ffi_call_SYSV (context, frame, fn, rvalue, flags, closure); + + if (flags & AARCH64_RET_NEED_COPY) + memcpy (orig_rvalue, rvalue, rtype_size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif /* FFI_GO_CLOSURES */ + +/* Build a trampoline. */ + +extern void ffi_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + void (*start)(void); + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_closure_SYSV_V; + else + start = ffi_closure_SYSV; + +#if FFI_EXEC_TRAMPOLINE_TABLE +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH + codeloc = ptrauth_strip (codeloc, ptrauth_key_asia); +#endif + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = start; +#endif +#else + static const unsigned char trampoline[16] = { + 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */ + 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */ + 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ + }; + char *tramp = closure->tramp; + + memcpy (tramp, trampoline, sizeof(trampoline)); + + *(UINT64 *)(tramp + 16) = (uintptr_t)start; + + ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE); + + /* Also flush the cache for code mapping. */ +#ifdef _M_ARM64 + // Not using dlmalloc.c for Windows ARM64 builds + // so calling ffi_data_to_code_pointer() isn't necessary + unsigned char *tramp_code = tramp; + #else + unsigned char *tramp_code = ffi_data_to_code_pointer (tramp); + #endif + ffi_clear_cache (tramp_code, tramp_code + FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*start)(void); + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_go_closure_SYSV_V; + else + start = ffi_go_closure_SYSV; + + closure->tramp = start; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif /* FFI_GO_CLOSURES */ + +/* Primary handler to setup and invoke a function within a closure. + + A closure when invoked enters via the assembler wrapper + ffi_closure_SYSV(). The wrapper allocates a call context on the + stack, saves the interesting registers (from the perspective of + the calling convention) into the context then passes control to + ffi_closure_SYSV_inner() passing the saved context and a pointer to + the stack at the point ffi_closure_SYSV() was invoked. + + On the return path the assembler wrapper will reload call context + registers. + + ffi_closure_SYSV_inner() marshalls the call context into ffi value + descriptors, invokes the wrapped function, then marshalls the return + value back into the call context. */ + +int FFI_HIDDEN +ffi_closure_SYSV_inner (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + struct call_context *context, + void *stack, void *rvalue, void *struct_rvalue) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + int i, h, nargs, flags; + struct arg_state state; + + arg_init (&state); + + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + int t = ty->type; + size_t n, s = ty->size; + + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + h = is_vfp_type (ty); + if (h) + { + n = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + n <= N_X_ARG_REG) + { + void *reg = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + + /* Eeek! We need a pointer to the structure, however the + homogeneous float elements are being passed in individual + registers, therefore for float and double the structure + is not represented as a contiguous sequence of bytes in + our saved register context. We don't need the original + contents of the register storage, so we reformat the + structure into the same memory. */ + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + n <= N_V_ARG_REG) + { + void *reg = &context->v[state.nsrn]; + state.nsrn += (unsigned int)n; + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* Replace Composite type of size greater than 16 with a + pointer. */ + avalue[i] = *(void **) + allocate_int_to_reg_or_stack (context, &state, stack, + sizeof (void *)); + } + else + { + n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + avalue[i] = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + state.ngrn = N_X_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + flags = cif->flags; + if (flags & AARCH64_RET_IN_MEM) + rvalue = struct_rvalue; + + fun (cif, rvalue, avalue, user_data); + + return flags; +} + +#endif /* (__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)*/ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c new file mode 100644 index 0000000..4cc5925 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c @@ -0,0 +1,1015 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__)|| defined (_M_ARM64) +#include +#include +#include +#include +#include +#include +#include "internal.h" +#ifdef _M_ARM64 +#include /* FlushInstructionCache */ +#endif + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +union _d +{ + UINT64 d; + UINT32 s[2]; +}; + +struct _v +{ + union _d d[2] __attribute__((aligned(16))); +}; + +struct call_context +{ + struct _v v[N_V_ARG_REG]; + UINT64 x[N_X_ARG_REG]; +}; + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#endif + +#else + +#if defined (__clang__) && defined (__APPLE__) +extern void sys_icache_invalidate (void *start, size_t len); +#endif + +static inline void +ffi_clear_cache (void *start, void *end) +{ +#if defined (__clang__) && defined (__APPLE__) + sys_icache_invalidate (start, (char *)end - (char *)start); +#elif defined (__GNUC__) + __builtin___clear_cache (start, end); +#elif defined (_M_ARM64) + FlushInstructionCache(GetCurrentProcess(), start, (char*)end - (char*)start); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +/* A subroutine of is_vfp_type. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of is_vfp_type. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY may be allocated to the FP registers. This is both an + fp scalar type as well as an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is an fp scalar. + + Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_* + constant for the type. */ + +static int +is_vfp_type (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (candidate) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + switch (candidate) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 2; + goto done; + } + return 0; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */ + size = ty->size; + if (size < 4 || size > 64) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + case FFI_TYPE_LONGDOUBLE: + ele_count = size / sizeof(long double); + if (size != ele_count * sizeof(long double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return candidate * 4 + (4 - (int)ele_count); +} + +/* Representation of the procedure call argument marshalling + state. + + The terse state variable names match the names used in the AARCH64 + PCS. */ + +struct arg_state +{ + unsigned ngrn; /* Next general-purpose register number. */ + unsigned nsrn; /* Next vector register number. */ + size_t nsaa; /* Next stack offset. */ + +#if defined (__APPLE__) + unsigned allocating_variadic; +#endif +}; + +/* Initialize a procedure call argument marshalling state. */ +static void +arg_init (struct arg_state *state) +{ + state->ngrn = 0; + state->nsrn = 0; + state->nsaa = 0; +#if defined (__APPLE__) + state->allocating_variadic = 0; +#endif +} + +/* Allocate an aligned slot on the stack and return a pointer to it. */ +static void * +allocate_to_stack (struct arg_state *state, void *stack, + size_t alignment, size_t size) +{ + size_t nsaa = state->nsaa; + + /* Round up the NSAA to the larger of 8 or the natural + alignment of the argument's type. */ +#if defined (__APPLE__) + if (state->allocating_variadic && alignment < 8) + alignment = 8; +#else + if (alignment < 8) + alignment = 8; +#endif + + nsaa = FFI_ALIGN (nsaa, alignment); + state->nsaa = nsaa + size; + + return (char *)stack + nsaa; +} + +static ffi_arg +extend_integer_type (void *source, int type) +{ + switch (type) + { + case FFI_TYPE_UINT8: + return *(UINT8 *) source; + case FFI_TYPE_SINT8: + return *(SINT8 *) source; + case FFI_TYPE_UINT16: + return *(UINT16 *) source; + case FFI_TYPE_SINT16: + return *(SINT16 *) source; + case FFI_TYPE_UINT32: + return *(UINT32 *) source; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + return *(SINT32 *) source; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + return *(UINT64 *) source; + break; + case FFI_TYPE_POINTER: + return *(uintptr_t *) source; + default: + abort(); + } +} + +#if defined(_MSC_VER) +void extend_hfa_type (void *dest, void *src, int h); +#else +static void +extend_hfa_type (void *dest, void *src, int h) +{ + ssize_t f = h - AARCH64_RET_S4; + void *x0; + + asm volatile ( + "adr %0, 0f\n" +" add %0, %0, %1\n" +" br %0\n" +"0: ldp s16, s17, [%3]\n" /* S4 */ +" ldp s18, s19, [%3, #8]\n" +" b 4f\n" +" ldp s16, s17, [%3]\n" /* S3 */ +" ldr s18, [%3, #8]\n" +" b 3f\n" +" ldp s16, s17, [%3]\n" /* S2 */ +" b 2f\n" +" nop\n" +" ldr s16, [%3]\n" /* S1 */ +" b 1f\n" +" nop\n" +" ldp d16, d17, [%3]\n" /* D4 */ +" ldp d18, d19, [%3, #16]\n" +" b 4f\n" +" ldp d16, d17, [%3]\n" /* D3 */ +" ldr d18, [%3, #16]\n" +" b 3f\n" +" ldp d16, d17, [%3]\n" /* D2 */ +" b 2f\n" +" nop\n" +" ldr d16, [%3]\n" /* D1 */ +" b 1f\n" +" nop\n" +" ldp q16, q17, [%3]\n" /* Q4 */ +" ldp q18, q19, [%3, #32]\n" +" b 4f\n" +" ldp q16, q17, [%3]\n" /* Q3 */ +" ldr q18, [%3, #32]\n" +" b 3f\n" +" ldp q16, q17, [%3]\n" /* Q2 */ +" b 2f\n" +" nop\n" +" ldr q16, [%3]\n" /* Q1 */ +" b 1f\n" +"4: str q19, [%2, #48]\n" +"3: str q18, [%2, #32]\n" +"2: str q17, [%2, #16]\n" +"1: str q16, [%2]" + : "=&r"(x0) + : "r"(f * 12), "r"(dest), "r"(src) + : "memory", "v16", "v17", "v18", "v19"); +} +#endif + +#if defined(_MSC_VER) +void* compress_hfa_type (void *dest, void *src, int h); +#else +static void * +compress_hfa_type (void *dest, void *reg, int h) +{ + switch (h) + { + case AARCH64_RET_S1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 12; +#endif + } + else + *(float *)dest = *(float *)reg; + break; + case AARCH64_RET_S2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.s, v17.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_S3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.s, v17.s, v18.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_S4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + case AARCH64_RET_D1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 8; +#endif + } + else + *(double *)dest = *(double *)reg; + break; + case AARCH64_RET_D2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.d, v17.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_D3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.d, v17.d, v18.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_D4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + default: + if (dest != reg) + return memcpy (dest, reg, 16 * (4 - (h & 3))); + break; + } + return dest; +} +#endif + +/* Either allocate an appropriate register for the argument type, or if + none are available, allocate a stack slot and return a pointer + to the allocated space. */ + +static void * +allocate_int_to_reg_or_stack (struct call_context *context, + struct arg_state *state, + void *stack, size_t size) +{ + if (state->ngrn < N_X_ARG_REG) + return &context->x[state->ngrn++]; + + state->ngrn = N_X_ARG_REG; + return allocate_to_stack (state, stack, size, size); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + size_t bytes = cif->bytes; + int flags, i, n; + + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = AARCH64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = AARCH64_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = AARCH64_RET_UINT16; + break; + case FFI_TYPE_UINT32: + flags = AARCH64_RET_UINT32; + break; + case FFI_TYPE_SINT8: + flags = AARCH64_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = AARCH64_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = AARCH64_RET_SINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = AARCH64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + flags = is_vfp_type (rtype); + if (flags == 0) + { + size_t s = rtype->size; + if (s > 16) + { + flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM; + bytes += 8; + } + else if (s == 16) + flags = AARCH64_RET_INT128; + else if (s == 8) + flags = AARCH64_RET_INT64; + else + flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY; + } + break; + + default: + abort(); + } + + for (i = 0, n = cif->nargs; i < n; i++) + if (is_vfp_type (cif->arg_types[i])) + { + flags |= AARCH64_FLAG_ARG_V; + break; + } + + /* Round the stack up to a multiple of the stack alignment requirement. */ + cif->bytes = (unsigned) FFI_ALIGN(bytes, 16); + cif->flags = flags; +#if defined (__APPLE__) + cif->aarch64_nfixedargs = 0; +#endif + + return FFI_OK; +} + +#if defined (__APPLE__) +/* Perform Apple-specific cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->aarch64_nfixedargs = nfixedargs; + return status; +} +#endif /* __APPLE__ */ + +extern void ffi_call_SYSV (struct call_context *context, void *frame, + void (*fn)(void), void *rvalue, int flags, + void *closure) FFI_HIDDEN; + +/* Call a function with the provided arguments and capture the return + value. */ +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue, + void **avalue, void *closure) +{ + struct call_context *context; + void *stack, *frame, *rvalue; + struct arg_state state; + size_t stack_bytes, rtype_size, rsize; + int i, nargs, flags; + ffi_type *rtype; + + flags = cif->flags; + rtype = cif->rtype; + rtype_size = rtype->size; + stack_bytes = cif->bytes; + + /* If the target function returns a structure via hidden pointer, + then we cannot allow a null rvalue. Otherwise, mash a null + rvalue to void return type. */ + rsize = 0; + if (flags & AARCH64_RET_IN_MEM) + { + if (orig_rvalue == NULL) + rsize = rtype_size; + } + else if (orig_rvalue == NULL) + flags &= AARCH64_FLAG_ARG_V; + else if (flags & AARCH64_RET_NEED_COPY) + rsize = 16; + + /* Allocate consectutive stack for everything we'll need. */ + context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize); + stack = context + 1; + frame = (void*)((uintptr_t)stack + (uintptr_t)stack_bytes); + rvalue = (rsize ? (void*)((uintptr_t)frame + 32) : orig_rvalue); + + arg_init (&state); + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + size_t s = ty->size; + void *a = avalue[i]; + int h, t; + + t = ty->type; + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + /* If the argument is a basic type the argument is allocated to an + appropriate register, or if none are available, to the stack. */ + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_pointer: + { + ffi_arg ext = extend_integer_type (a, t); + if (state.ngrn < N_X_ARG_REG) + context->x[state.ngrn++] = ext; + else + { + void *d = allocate_to_stack (&state, stack, ty->alignment, s); + state.ngrn = N_X_ARG_REG; + /* Note that the default abi extends each argument + to a full 64-bit slot, while the iOS abi allocates + only enough space. */ +#ifdef __APPLE__ + memcpy(d, a, s); +#else + *(ffi_arg *)d = ext; +#endif + } + } + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + { + void *dest; + + h = is_vfp_type (ty); + if (h) + { + int elems = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + elems <= N_X_ARG_REG) + { + dest = &context->x[state.ngrn]; + state.ngrn += elems; + extend_hfa_type(dest, a, h); + break; + } + state.nsrn = N_X_ARG_REG; + dest = allocate_to_stack(&state, stack, ty->alignment, s); + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + elems <= N_V_ARG_REG) + { + dest = &context->v[state.nsrn]; + state.nsrn += elems; + extend_hfa_type (dest, a, h); + break; + } + state.nsrn = N_V_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* If the argument is a composite type that is larger than 16 + bytes, then the argument has been copied to memory, and + the argument is replaced by a pointer to the copy. */ + a = &avalue[i]; + t = FFI_TYPE_POINTER; + s = sizeof (void *); + goto do_pointer; + } + else + { + size_t n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + /* If the argument is a composite type and the size in + double-words is not more than the number of available + X registers, then the argument is copied into + consecutive X registers. */ + dest = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + /* Otherwise, there are insufficient X registers. Further + X register allocations are prevented, the NSAA is + adjusted and the argument is copied to memory at the + adjusted NSAA. */ + state.ngrn = N_X_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + memcpy (dest, a, s); + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + ffi_call_SYSV (context, frame, fn, rvalue, flags, closure); + + if (flags & AARCH64_RET_NEED_COPY) + memcpy (orig_rvalue, rvalue, rtype_size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif /* FFI_GO_CLOSURES */ + +/* Build a trampoline. */ + +extern void ffi_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + void (*start)(void); + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_closure_SYSV_V; + else + start = ffi_closure_SYSV; + +#if FFI_EXEC_TRAMPOLINE_TABLE +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH + codeloc = ptrauth_strip (codeloc, ptrauth_key_asia); +#endif + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = start; +#endif +#else + static const unsigned char trampoline[16] = { + 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */ + 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */ + 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ + }; + char *tramp = closure->tramp; + + memcpy (tramp, trampoline, sizeof(trampoline)); + + *(UINT64 *)(tramp + 16) = (uintptr_t)start; + + ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE); + + /* Also flush the cache for code mapping. */ +#ifdef _M_ARM64 + // Not using dlmalloc.c for Windows ARM64 builds + // so calling ffi_data_to_code_pointer() isn't necessary + unsigned char *tramp_code = tramp; + #else + unsigned char *tramp_code = ffi_data_to_code_pointer (tramp); + #endif + ffi_clear_cache (tramp_code, tramp_code + FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*start)(void); + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_go_closure_SYSV_V; + else + start = ffi_go_closure_SYSV; + + closure->tramp = start; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif /* FFI_GO_CLOSURES */ + +/* Primary handler to setup and invoke a function within a closure. + + A closure when invoked enters via the assembler wrapper + ffi_closure_SYSV(). The wrapper allocates a call context on the + stack, saves the interesting registers (from the perspective of + the calling convention) into the context then passes control to + ffi_closure_SYSV_inner() passing the saved context and a pointer to + the stack at the point ffi_closure_SYSV() was invoked. + + On the return path the assembler wrapper will reload call context + registers. + + ffi_closure_SYSV_inner() marshalls the call context into ffi value + descriptors, invokes the wrapped function, then marshalls the return + value back into the call context. */ + +int FFI_HIDDEN +ffi_closure_SYSV_inner (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + struct call_context *context, + void *stack, void *rvalue, void *struct_rvalue) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + int i, h, nargs, flags; + struct arg_state state; + + arg_init (&state); + + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + int t = ty->type; + size_t n, s = ty->size; + + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + h = is_vfp_type (ty); + if (h) + { + n = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + n <= N_X_ARG_REG) + { + void *reg = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + + /* Eeek! We need a pointer to the structure, however the + homogeneous float elements are being passed in individual + registers, therefore for float and double the structure + is not represented as a contiguous sequence of bytes in + our saved register context. We don't need the original + contents of the register storage, so we reformat the + structure into the same memory. */ + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + n <= N_V_ARG_REG) + { + void *reg = &context->v[state.nsrn]; + state.nsrn += (unsigned int)n; + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* Replace Composite type of size greater than 16 with a + pointer. */ + avalue[i] = *(void **) + allocate_int_to_reg_or_stack (context, &state, stack, + sizeof (void *)); + } + else + { + n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + avalue[i] = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + state.ngrn = N_X_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + flags = cif->flags; + if (flags & AARCH64_RET_IN_MEM) + rvalue = struct_rvalue; + + fun (cif, rvalue, avalue, user_data); + + return flags; +} + +#endif /* (__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)*/ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget 2.h new file mode 100644 index 0000000..ecb6d2d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget 2.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#elif defined(_M_ARM64) +#define FFI_SIZEOF_ARG 8 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#ifdef _M_ARM64 +#define FFI_EXTRA_CIF_FIELDS unsigned is_variadic +#endif + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_TARGET_SPECIFIC_VARIADIC +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#elif !defined(_M_ARM64) +/* iOS and Windows reserve x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#ifndef _M_ARM64 +/* No complex type on Windows */ +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h new file mode 100644 index 0000000..ecb6d2d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#elif defined(_M_ARM64) +#define FFI_SIZEOF_ARG 8 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#ifdef _M_ARM64 +#define FFI_EXTRA_CIF_FIELDS unsigned is_variadic +#endif + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_TARGET_SPECIFIC_VARIADIC +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#elif !defined(_M_ARM64) +/* iOS and Windows reserve x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#ifndef _M_ARM64 +/* No complex type on Windows */ +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal 2.h new file mode 100644 index 0000000..9c3e077 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal 2.h @@ -0,0 +1,67 @@ +/* +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define AARCH64_RET_VOID 0 +#define AARCH64_RET_INT64 1 +#define AARCH64_RET_INT128 2 + +#define AARCH64_RET_UNUSED3 3 +#define AARCH64_RET_UNUSED4 4 +#define AARCH64_RET_UNUSED5 5 +#define AARCH64_RET_UNUSED6 6 +#define AARCH64_RET_UNUSED7 7 + +/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4, + so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */ +#define AARCH64_RET_S4 8 +#define AARCH64_RET_S3 9 +#define AARCH64_RET_S2 10 +#define AARCH64_RET_S1 11 + +#define AARCH64_RET_D4 12 +#define AARCH64_RET_D3 13 +#define AARCH64_RET_D2 14 +#define AARCH64_RET_D1 15 + +#define AARCH64_RET_Q4 16 +#define AARCH64_RET_Q3 17 +#define AARCH64_RET_Q2 18 +#define AARCH64_RET_Q1 19 + +/* Note that each of the sub-64-bit integers gets two entries. */ +#define AARCH64_RET_UINT8 20 +#define AARCH64_RET_UINT16 22 +#define AARCH64_RET_UINT32 24 + +#define AARCH64_RET_SINT8 26 +#define AARCH64_RET_SINT16 28 +#define AARCH64_RET_SINT32 30 + +#define AARCH64_RET_MASK 31 + +#define AARCH64_RET_IN_MEM (1 << 5) +#define AARCH64_RET_NEED_COPY (1 << 6) + +#define AARCH64_FLAG_ARG_V_BIT 7 +#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT) + +#define N_X_ARG_REG 8 +#define N_V_ARG_REG 8 +#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h new file mode 100644 index 0000000..9c3e077 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h @@ -0,0 +1,67 @@ +/* +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define AARCH64_RET_VOID 0 +#define AARCH64_RET_INT64 1 +#define AARCH64_RET_INT128 2 + +#define AARCH64_RET_UNUSED3 3 +#define AARCH64_RET_UNUSED4 4 +#define AARCH64_RET_UNUSED5 5 +#define AARCH64_RET_UNUSED6 6 +#define AARCH64_RET_UNUSED7 7 + +/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4, + so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */ +#define AARCH64_RET_S4 8 +#define AARCH64_RET_S3 9 +#define AARCH64_RET_S2 10 +#define AARCH64_RET_S1 11 + +#define AARCH64_RET_D4 12 +#define AARCH64_RET_D3 13 +#define AARCH64_RET_D2 14 +#define AARCH64_RET_D1 15 + +#define AARCH64_RET_Q4 16 +#define AARCH64_RET_Q3 17 +#define AARCH64_RET_Q2 18 +#define AARCH64_RET_Q1 19 + +/* Note that each of the sub-64-bit integers gets two entries. */ +#define AARCH64_RET_UINT8 20 +#define AARCH64_RET_UINT16 22 +#define AARCH64_RET_UINT32 24 + +#define AARCH64_RET_SINT8 26 +#define AARCH64_RET_SINT16 28 +#define AARCH64_RET_SINT32 30 + +#define AARCH64_RET_MASK 31 + +#define AARCH64_RET_IN_MEM (1 << 5) +#define AARCH64_RET_NEED_COPY (1 << 6) + +#define AARCH64_FLAG_ARG_V_BIT 7 +#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT) + +#define N_X_ARG_REG 8 +#define N_V_ARG_REG 8 +#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv 2.S new file mode 100644 index 0000000..a345439 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv 2.S @@ -0,0 +1,451 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__) +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#endif + +#ifdef __AARCH64EB__ +# define BE(X) X +#else +# define BE(X) 0 +#endif + +#ifdef __ILP32__ +#define PTR_REG(n) w##n +#else +#define PTR_REG(n) x##n +#endif + +#ifdef __ILP32__ +#define PTR_SIZE 4 +#else +#define PTR_SIZE 8 +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) +# define BR(r) braaz r +# define BLR(r) blraaz r +#else +# define BR(r) br r +# define BLR(r) blr r +#endif + + .text + .align 4 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + + Therefore on entry we have: + + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + cfi_startproc +CNAME(ffi_call_SYSV): + /* Use a stack frame allocated by our caller. */ + cfi_def_cfa(x1, 32); + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + cfi_def_cfa_register(x29) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + mov x18, x5 /* install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] +1: + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + BLR(x9) /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + cfi_def_cfa_register (sp) + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, 0f + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + .align 4 +0: ret /* VOID */ + nop +1: str x0, [x3] /* INT64 */ + ret +2: stp x0, x1, [x3] /* INT128 */ + ret +3: brk #1000 /* UNUSED */ + ret +4: brk #1000 /* UNUSED */ + ret +5: brk #1000 /* UNUSED */ + ret +6: brk #1000 /* UNUSED */ + ret +7: brk #1000 /* UNUSED */ + ret +8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret +9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret +10: stp s0, s1, [x3] /* S2 */ + ret +11: str s0, [x3] /* S1 */ + ret +12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret +13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret +14: stp d0, d1, [x3] /* D2 */ + ret +15: str d0, [x3] /* D1 */ + ret +16: str q3, [x3, #48] /* Q4 */ + nop +17: str q2, [x3, #32] /* Q3 */ + nop +18: stp q0, q1, [x3] /* Q2 */ + ret +19: str q0, [x3] /* Q1 */ + ret +20: uxtb w0, w0 /* UINT8 */ + str x0, [x3] +21: ret /* reserved */ + nop +22: uxth w0, w0 /* UINT16 */ + str x0, [x3] +23: ret /* reserved */ + nop +24: mov w0, w0 /* UINT32 */ + str x0, [x3] +25: ret /* reserved */ + nop +26: sxtb x0, w0 /* SINT8 */ + str x0, [x3] +27: ret /* reserved */ + nop +28: sxth x0, w0 /* SINT16 */ + str x0, [x3] +29: ret /* reserved */ + nop +30: sxtw x0, w0 /* SINT32 */ + str x0, [x3] +31: ret /* reserved */ + nop + + cfi_endproc + + .globl CNAME(ffi_call_SYSV) + FFI_HIDDEN(CNAME(ffi_call_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_call_SYSV), #function + .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV) +#endif + +/* ffi_closure_SYSV + + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + .align 4 +CNAME(ffi_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV_V), #function + .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ +.Ldo_closure: + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + bl CNAME(ffi_closure_SYSV_inner) + + /* Load the return value as directed. */ +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) + autiza x1 +#endif + adr x1, 0f + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + .align 4 +0: b 99f /* VOID */ + nop +1: ldr x0, [x3] /* INT64 */ + b 99f +2: ldp x0, x1, [x3] /* INT128 */ + b 99f +3: brk #1000 /* UNUSED */ + nop +4: brk #1000 /* UNUSED */ + nop +5: brk #1000 /* UNUSED */ + nop +6: brk #1000 /* UNUSED */ + nop +7: brk #1000 /* UNUSED */ + nop +8: ldr s3, [x3, #12] /* S4 */ + nop +9: ldr s2, [x3, #8] /* S3 */ + nop +10: ldp s0, s1, [x3] /* S2 */ + b 99f +11: ldr s0, [x3] /* S1 */ + b 99f +12: ldr d3, [x3, #24] /* D4 */ + nop +13: ldr d2, [x3, #16] /* D3 */ + nop +14: ldp d0, d1, [x3] /* D2 */ + b 99f +15: ldr d0, [x3] /* D1 */ + b 99f +16: ldr q3, [x3, #48] /* Q4 */ + nop +17: ldr q2, [x3, #32] /* Q3 */ + nop +18: ldp q0, q1, [x3] /* Q2 */ + b 99f +19: ldr q0, [x3] /* Q1 */ + b 99f +20: ldrb w0, [x3, #BE(7)] /* UINT8 */ + b 99f +21: brk #1000 /* reserved */ + nop +22: ldrh w0, [x3, #BE(6)] /* UINT16 */ + b 99f +23: brk #1000 /* reserved */ + nop +24: ldr w0, [x3, #BE(4)] /* UINT32 */ + b 99f +25: brk #1000 /* reserved */ + nop +26: ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b 99f +27: brk #1000 /* reserved */ + nop +28: ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b 99f +29: brk #1000 /* reserved */ + nop +30: ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop +31: /* reserved */ +99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS + cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) + cfi_restore (x29) + cfi_restore (x30) + ret + cfi_endproc + + .globl CNAME(ffi_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV), #function + .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV) +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + .align PAGE_MAX_SHIFT +CNAME(ffi_closure_trampoline_table_page): + .rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr x16, -PAGE_MAX_SIZE + ldp x17, x16, [x16] + BR(x16) + nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller that 16 bytes */ + .endr + + .globl CNAME(ffi_closure_trampoline_table_page) + FFI_HIDDEN(CNAME(ffi_closure_trampoline_table_page)) + #ifdef __ELF__ + .type CNAME(ffi_closure_trampoline_table_page), #function + .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page) + #endif +#endif + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#ifdef FFI_GO_CLOSURES + .align 4 +CNAME(ffi_go_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV_V), #function + .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_go_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b .Ldo_closure + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV), #function + .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV) +#endif +#endif /* FFI_GO_CLOSURES */ +#endif /* __arm64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S new file mode 100644 index 0000000..a345439 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S @@ -0,0 +1,451 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__) +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#endif + +#ifdef __AARCH64EB__ +# define BE(X) X +#else +# define BE(X) 0 +#endif + +#ifdef __ILP32__ +#define PTR_REG(n) w##n +#else +#define PTR_REG(n) x##n +#endif + +#ifdef __ILP32__ +#define PTR_SIZE 4 +#else +#define PTR_SIZE 8 +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) +# define BR(r) braaz r +# define BLR(r) blraaz r +#else +# define BR(r) br r +# define BLR(r) blr r +#endif + + .text + .align 4 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + + Therefore on entry we have: + + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + cfi_startproc +CNAME(ffi_call_SYSV): + /* Use a stack frame allocated by our caller. */ + cfi_def_cfa(x1, 32); + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + cfi_def_cfa_register(x29) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + mov x18, x5 /* install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] +1: + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + BLR(x9) /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + cfi_def_cfa_register (sp) + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, 0f + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + .align 4 +0: ret /* VOID */ + nop +1: str x0, [x3] /* INT64 */ + ret +2: stp x0, x1, [x3] /* INT128 */ + ret +3: brk #1000 /* UNUSED */ + ret +4: brk #1000 /* UNUSED */ + ret +5: brk #1000 /* UNUSED */ + ret +6: brk #1000 /* UNUSED */ + ret +7: brk #1000 /* UNUSED */ + ret +8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret +9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret +10: stp s0, s1, [x3] /* S2 */ + ret +11: str s0, [x3] /* S1 */ + ret +12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret +13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret +14: stp d0, d1, [x3] /* D2 */ + ret +15: str d0, [x3] /* D1 */ + ret +16: str q3, [x3, #48] /* Q4 */ + nop +17: str q2, [x3, #32] /* Q3 */ + nop +18: stp q0, q1, [x3] /* Q2 */ + ret +19: str q0, [x3] /* Q1 */ + ret +20: uxtb w0, w0 /* UINT8 */ + str x0, [x3] +21: ret /* reserved */ + nop +22: uxth w0, w0 /* UINT16 */ + str x0, [x3] +23: ret /* reserved */ + nop +24: mov w0, w0 /* UINT32 */ + str x0, [x3] +25: ret /* reserved */ + nop +26: sxtb x0, w0 /* SINT8 */ + str x0, [x3] +27: ret /* reserved */ + nop +28: sxth x0, w0 /* SINT16 */ + str x0, [x3] +29: ret /* reserved */ + nop +30: sxtw x0, w0 /* SINT32 */ + str x0, [x3] +31: ret /* reserved */ + nop + + cfi_endproc + + .globl CNAME(ffi_call_SYSV) + FFI_HIDDEN(CNAME(ffi_call_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_call_SYSV), #function + .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV) +#endif + +/* ffi_closure_SYSV + + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + .align 4 +CNAME(ffi_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV_V), #function + .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ +.Ldo_closure: + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + bl CNAME(ffi_closure_SYSV_inner) + + /* Load the return value as directed. */ +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) + autiza x1 +#endif + adr x1, 0f + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + .align 4 +0: b 99f /* VOID */ + nop +1: ldr x0, [x3] /* INT64 */ + b 99f +2: ldp x0, x1, [x3] /* INT128 */ + b 99f +3: brk #1000 /* UNUSED */ + nop +4: brk #1000 /* UNUSED */ + nop +5: brk #1000 /* UNUSED */ + nop +6: brk #1000 /* UNUSED */ + nop +7: brk #1000 /* UNUSED */ + nop +8: ldr s3, [x3, #12] /* S4 */ + nop +9: ldr s2, [x3, #8] /* S3 */ + nop +10: ldp s0, s1, [x3] /* S2 */ + b 99f +11: ldr s0, [x3] /* S1 */ + b 99f +12: ldr d3, [x3, #24] /* D4 */ + nop +13: ldr d2, [x3, #16] /* D3 */ + nop +14: ldp d0, d1, [x3] /* D2 */ + b 99f +15: ldr d0, [x3] /* D1 */ + b 99f +16: ldr q3, [x3, #48] /* Q4 */ + nop +17: ldr q2, [x3, #32] /* Q3 */ + nop +18: ldp q0, q1, [x3] /* Q2 */ + b 99f +19: ldr q0, [x3] /* Q1 */ + b 99f +20: ldrb w0, [x3, #BE(7)] /* UINT8 */ + b 99f +21: brk #1000 /* reserved */ + nop +22: ldrh w0, [x3, #BE(6)] /* UINT16 */ + b 99f +23: brk #1000 /* reserved */ + nop +24: ldr w0, [x3, #BE(4)] /* UINT32 */ + b 99f +25: brk #1000 /* reserved */ + nop +26: ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b 99f +27: brk #1000 /* reserved */ + nop +28: ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b 99f +29: brk #1000 /* reserved */ + nop +30: ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop +31: /* reserved */ +99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS + cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) + cfi_restore (x29) + cfi_restore (x30) + ret + cfi_endproc + + .globl CNAME(ffi_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV), #function + .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV) +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + .align PAGE_MAX_SHIFT +CNAME(ffi_closure_trampoline_table_page): + .rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr x16, -PAGE_MAX_SIZE + ldp x17, x16, [x16] + BR(x16) + nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller that 16 bytes */ + .endr + + .globl CNAME(ffi_closure_trampoline_table_page) + FFI_HIDDEN(CNAME(ffi_closure_trampoline_table_page)) + #ifdef __ELF__ + .type CNAME(ffi_closure_trampoline_table_page), #function + .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page) + #endif +#endif + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#ifdef FFI_GO_CLOSURES + .align 4 +CNAME(ffi_go_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV_V), #function + .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_go_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b .Ldo_closure + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV), #function + .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV) +#endif +#endif /* FFI_GO_CLOSURES */ +#endif /* __arm64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm 2.S new file mode 100644 index 0000000..a79f8a8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm 2.S @@ -0,0 +1,506 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + OPT 2 /*disable listing */ +/* For some macros to add unwind information */ +#include "ksarm64.h" + OPT 1 /*re-enable listing */ + +#define BE(X) 0 +#define PTR_REG(n) x##n +#define PTR_SIZE 8 + + IMPORT ffi_closure_SYSV_inner + EXPORT ffi_call_SYSV + EXPORT ffi_closure_SYSV_V + EXPORT ffi_closure_SYSV + EXPORT extend_hfa_type + EXPORT compress_hfa_type +#ifdef FFI_GO_CLOSURES + EXPORT ffi_go_closure_SYSV_V + EXPORT ffi_go_closure_SYSV +#endif + + TEXTAREA, ALLIGN=8 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + Therefore on entry we have: + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + NESTED_ENTRY ffi_call_SYSV_fake + + /* For unwind information, Windows has to store fp and lr */ + PROLOG_SAVE_REG_PAIR x29, x30, #-32! + + ALTERNATE_ENTRY ffi_call_SYSV + /* Use a stack frame allocated by our caller. */ + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + /*mov x18, x5 install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1 + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] + +ffi_call_SYSV_L1 + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + blr x9 /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, ffi_call_SYSV_return + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + ALIGN 4 +ffi_call_SYSV_return + ret /* VOID */ + nop + str x0, [x3] /* INT64 */ + ret + stp x0, x1, [x3] /* INT128 */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret + st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret + stp s0, s1, [x3] /* S2 */ + ret + str s0, [x3] /* S1 */ + ret + st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret + st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret + stp d0, d1, [x3] /* D2 */ + ret + str d0, [x3] /* D1 */ + ret + str q3, [x3, #48] /* Q4 */ + nop + str q2, [x3, #32] /* Q3 */ + nop + stp q0, q1, [x3] /* Q2 */ + ret + str q0, [x3] /* Q1 */ + ret + uxtb w0, w0 /* UINT8 */ + str x0, [x3] + ret /* reserved */ + nop + uxth w0, w0 /* UINT16 */ + str x0, [x3] + ret /* reserved */ + nop + mov w0, w0 /* UINT32 */ + str x0, [x3] + ret /* reserved */ + nop + sxtb x0, w0 /* SINT8 */ + str x0, [x3] + ret /* reserved */ + nop + sxth x0, w0 /* SINT16 */ + str x0, [x3] + ret /* reserved */ + nop + sxtw x0, w0 /* SINT32 */ + str x0, [x3] + ret /* reserved */ + nop + + + NESTED_END ffi_call_SYSV_fake + + +/* ffi_closure_SYSV + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + NESTED_ENTRY ffi_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + + b ffi_closure_SYSV_save_argument + NESTED_END ffi_closure_SYSV_V + + NESTED_ENTRY ffi_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ + +do_closure + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + + bl ffi_closure_SYSV_inner + + /* Load the return value as directed. */ + adr x1, ffi_closure_SYSV_return_base + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + ALIGN 8 +ffi_closure_SYSV_return_base + b ffi_closure_SYSV_epilog /* VOID */ + nop + ldr x0, [x3] /* INT64 */ + b ffi_closure_SYSV_epilog + ldp x0, x1, [x3] /* INT128 */ + b ffi_closure_SYSV_epilog + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + ldr s3, [x3, #12] /* S4 */ + nop + ldr s2, [x3, #8] /* S3 */ + nop + ldp s0, s1, [x3] /* S2 */ + b ffi_closure_SYSV_epilog + ldr s0, [x3] /* S1 */ + b ffi_closure_SYSV_epilog + ldr d3, [x3, #24] /* D4 */ + nop + ldr d2, [x3, #16] /* D3 */ + nop + ldp d0, d1, [x3] /* D2 */ + b ffi_closure_SYSV_epilog + ldr d0, [x3] /* D1 */ + b ffi_closure_SYSV_epilog + ldr q3, [x3, #48] /* Q4 */ + nop + ldr q2, [x3, #32] /* Q3 */ + nop + ldp q0, q1, [x3] /* Q2 */ + b ffi_closure_SYSV_epilog + ldr q0, [x3] /* Q1 */ + b ffi_closure_SYSV_epilog + ldrb w0, [x3, #BE(7)] /* UINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrh w0, [x3, #BE(6)] /* UINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldr w0, [x3, #BE(4)] /* UINT32 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop + /* reserved */ + +ffi_closure_SYSV_epilog + EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS! + EPILOG_RETURN + NESTED_END ffi_closure_SYSV + + +#ifdef FFI_GO_CLOSURES + NESTED_ENTRY ffi_go_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b ffi_go_closure_SYSV_save_argument + NESTED_END ffi_go_closure_SYSV_V + + NESTED_ENTRY ffi_go_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_go_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b do_closure + NESTED_END ffi_go_closure_SYSV + +#endif /* FFI_GO_CLOSURES */ + + +/* void extend_hfa_type (void *dest, void *src, int h) */ + + LEAF_ENTRY extend_hfa_type + + adr x3, extend_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +extend_hfa_type_jump_base + ldp s16, s17, [x1] /* S4 */ + ldp s18, s19, [x1, #8] + b extend_hfa_type_store_4 + nop + + ldp s16, s17, [x1] /* S3 */ + ldr s18, [x1, #8] + b extend_hfa_type_store_3 + nop + + ldp s16, s17, [x1] /* S2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr s16, [x1] /* S1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp d16, d17, [x1] /* D4 */ + ldp d18, d19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp d16, d17, [x1] /* D3 */ + ldr d18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp d16, d17, [x1] /* D2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr d16, [x1] /* D1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp q16, q17, [x1] /* Q2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr q16, [x1] /* Q1 */ + b extend_hfa_type_store_1 + +extend_hfa_type_store_4 + str q19, [x0, #48] +extend_hfa_type_store_3 + str q18, [x0, #32] +extend_hfa_type_store_2 + str q17, [x0, #16] +extend_hfa_type_store_1 + str q16, [x0] + ret + + LEAF_END extend_hfa_type + + +/* void compress_hfa_type (void *dest, void *reg, int h) */ + + LEAF_ENTRY compress_hfa_type + + adr x3, compress_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +compress_hfa_type_jump_base + ldp q16, q17, [x1] /* S4 */ + ldp q18, q19, [x1, #32] + st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S3 */ + ldr q18, [x1, #32] + st3 { v16.s, v17.s, v18.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S2 */ + st2 { v16.s, v17.s }[0], [x0] + ret + nop + + ldr q16, [x1] /* S1 */ + st1 { v16.s }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* D4 */ + ldp q18, q19, [x1, #32] + st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D3 */ + ldr q18, [x1, #32] + st3 { v16.d, v17.d, v18.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D2 */ + st2 { v16.d, v17.d }[0], [x0] + ret + nop + + ldr q16, [x1] /* D1 */ + st1 { v16.d }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #32] + b compress_hfa_type_store_q4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #32] + b compress_hfa_type_store_q3 + nop + + ldp q16, q17, [x1] /* Q2 */ + stp q16, q17, [x0] + ret + nop + + ldr q16, [x1] /* Q1 */ + str q16, [x0] + ret + +compress_hfa_type_store_q4 + str q19, [x0, #48] +compress_hfa_type_store_q3 + str q18, [x0, #32] + stp q16, q17, [x0] + ret + + LEAF_END compress_hfa_type + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S new file mode 100644 index 0000000..a79f8a8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S @@ -0,0 +1,506 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + OPT 2 /*disable listing */ +/* For some macros to add unwind information */ +#include "ksarm64.h" + OPT 1 /*re-enable listing */ + +#define BE(X) 0 +#define PTR_REG(n) x##n +#define PTR_SIZE 8 + + IMPORT ffi_closure_SYSV_inner + EXPORT ffi_call_SYSV + EXPORT ffi_closure_SYSV_V + EXPORT ffi_closure_SYSV + EXPORT extend_hfa_type + EXPORT compress_hfa_type +#ifdef FFI_GO_CLOSURES + EXPORT ffi_go_closure_SYSV_V + EXPORT ffi_go_closure_SYSV +#endif + + TEXTAREA, ALLIGN=8 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + Therefore on entry we have: + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + NESTED_ENTRY ffi_call_SYSV_fake + + /* For unwind information, Windows has to store fp and lr */ + PROLOG_SAVE_REG_PAIR x29, x30, #-32! + + ALTERNATE_ENTRY ffi_call_SYSV + /* Use a stack frame allocated by our caller. */ + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + /*mov x18, x5 install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1 + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] + +ffi_call_SYSV_L1 + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + blr x9 /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, ffi_call_SYSV_return + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + ALIGN 4 +ffi_call_SYSV_return + ret /* VOID */ + nop + str x0, [x3] /* INT64 */ + ret + stp x0, x1, [x3] /* INT128 */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret + st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret + stp s0, s1, [x3] /* S2 */ + ret + str s0, [x3] /* S1 */ + ret + st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret + st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret + stp d0, d1, [x3] /* D2 */ + ret + str d0, [x3] /* D1 */ + ret + str q3, [x3, #48] /* Q4 */ + nop + str q2, [x3, #32] /* Q3 */ + nop + stp q0, q1, [x3] /* Q2 */ + ret + str q0, [x3] /* Q1 */ + ret + uxtb w0, w0 /* UINT8 */ + str x0, [x3] + ret /* reserved */ + nop + uxth w0, w0 /* UINT16 */ + str x0, [x3] + ret /* reserved */ + nop + mov w0, w0 /* UINT32 */ + str x0, [x3] + ret /* reserved */ + nop + sxtb x0, w0 /* SINT8 */ + str x0, [x3] + ret /* reserved */ + nop + sxth x0, w0 /* SINT16 */ + str x0, [x3] + ret /* reserved */ + nop + sxtw x0, w0 /* SINT32 */ + str x0, [x3] + ret /* reserved */ + nop + + + NESTED_END ffi_call_SYSV_fake + + +/* ffi_closure_SYSV + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + NESTED_ENTRY ffi_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + + b ffi_closure_SYSV_save_argument + NESTED_END ffi_closure_SYSV_V + + NESTED_ENTRY ffi_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ + +do_closure + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + + bl ffi_closure_SYSV_inner + + /* Load the return value as directed. */ + adr x1, ffi_closure_SYSV_return_base + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + ALIGN 8 +ffi_closure_SYSV_return_base + b ffi_closure_SYSV_epilog /* VOID */ + nop + ldr x0, [x3] /* INT64 */ + b ffi_closure_SYSV_epilog + ldp x0, x1, [x3] /* INT128 */ + b ffi_closure_SYSV_epilog + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + ldr s3, [x3, #12] /* S4 */ + nop + ldr s2, [x3, #8] /* S3 */ + nop + ldp s0, s1, [x3] /* S2 */ + b ffi_closure_SYSV_epilog + ldr s0, [x3] /* S1 */ + b ffi_closure_SYSV_epilog + ldr d3, [x3, #24] /* D4 */ + nop + ldr d2, [x3, #16] /* D3 */ + nop + ldp d0, d1, [x3] /* D2 */ + b ffi_closure_SYSV_epilog + ldr d0, [x3] /* D1 */ + b ffi_closure_SYSV_epilog + ldr q3, [x3, #48] /* Q4 */ + nop + ldr q2, [x3, #32] /* Q3 */ + nop + ldp q0, q1, [x3] /* Q2 */ + b ffi_closure_SYSV_epilog + ldr q0, [x3] /* Q1 */ + b ffi_closure_SYSV_epilog + ldrb w0, [x3, #BE(7)] /* UINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrh w0, [x3, #BE(6)] /* UINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldr w0, [x3, #BE(4)] /* UINT32 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop + /* reserved */ + +ffi_closure_SYSV_epilog + EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS! + EPILOG_RETURN + NESTED_END ffi_closure_SYSV + + +#ifdef FFI_GO_CLOSURES + NESTED_ENTRY ffi_go_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b ffi_go_closure_SYSV_save_argument + NESTED_END ffi_go_closure_SYSV_V + + NESTED_ENTRY ffi_go_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_go_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b do_closure + NESTED_END ffi_go_closure_SYSV + +#endif /* FFI_GO_CLOSURES */ + + +/* void extend_hfa_type (void *dest, void *src, int h) */ + + LEAF_ENTRY extend_hfa_type + + adr x3, extend_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +extend_hfa_type_jump_base + ldp s16, s17, [x1] /* S4 */ + ldp s18, s19, [x1, #8] + b extend_hfa_type_store_4 + nop + + ldp s16, s17, [x1] /* S3 */ + ldr s18, [x1, #8] + b extend_hfa_type_store_3 + nop + + ldp s16, s17, [x1] /* S2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr s16, [x1] /* S1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp d16, d17, [x1] /* D4 */ + ldp d18, d19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp d16, d17, [x1] /* D3 */ + ldr d18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp d16, d17, [x1] /* D2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr d16, [x1] /* D1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp q16, q17, [x1] /* Q2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr q16, [x1] /* Q1 */ + b extend_hfa_type_store_1 + +extend_hfa_type_store_4 + str q19, [x0, #48] +extend_hfa_type_store_3 + str q18, [x0, #32] +extend_hfa_type_store_2 + str q17, [x0, #16] +extend_hfa_type_store_1 + str q16, [x0] + ret + + LEAF_END extend_hfa_type + + +/* void compress_hfa_type (void *dest, void *reg, int h) */ + + LEAF_ENTRY compress_hfa_type + + adr x3, compress_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +compress_hfa_type_jump_base + ldp q16, q17, [x1] /* S4 */ + ldp q18, q19, [x1, #32] + st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S3 */ + ldr q18, [x1, #32] + st3 { v16.s, v17.s, v18.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S2 */ + st2 { v16.s, v17.s }[0], [x0] + ret + nop + + ldr q16, [x1] /* S1 */ + st1 { v16.s }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* D4 */ + ldp q18, q19, [x1, #32] + st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D3 */ + ldr q18, [x1, #32] + st3 { v16.d, v17.d, v18.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D2 */ + st2 { v16.d, v17.d }[0], [x0] + ret + nop + + ldr q16, [x1] /* D1 */ + st1 { v16.d }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #32] + b compress_hfa_type_store_q4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #32] + b compress_hfa_type_store_q3 + nop + + ldp q16, q17, [x1] /* Q2 */ + stp q16, q17, [x0] + ret + nop + + ldr q16, [x1] /* Q1 */ + str q16, [x0] + ret + +compress_hfa_type_store_q4 + str q19, [x0, #48] +compress_hfa_type_store_q3 + str q18, [x0, #32] + stp q16, q17, [x0] + ret + + LEAF_END compress_hfa_type + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi 2.c new file mode 100644 index 0000000..7a95e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi 2.c @@ -0,0 +1,521 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Anthony Green + Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. + + Alpha Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if defined(__LONG_DOUBLE_128__) +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +extern void ffi_call_osf(void *stack, void *frame, unsigned flags, + void *raddr, void (*fn)(void), void *closure) + FFI_HIDDEN; +extern void ffi_closure_osf(void) FFI_HIDDEN; +extern void ffi_go_closure_osf(void) FFI_HIDDEN; + +/* Promote a float value to its in-register double representation. + Unlike actually casting to double, this does not trap on NaN. */ +static inline UINT64 lds(void *ptr) +{ + UINT64 ret; + asm("lds %0,%1" : "=f"(ret) : "m"(*(UINT32 *)ptr)); + return ret; +} + +/* And the reverse. */ +static inline void sts(void *ptr, UINT64 val) +{ + asm("sts %1,%0" : "=m"(*(UINT32 *)ptr) : "f"(val)); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int flags, i, avn; + ffi_type *rtype, *itype; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + /* Compute the size of the argument area. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + itype = cif->arg_types[i]; + switch (itype->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + /* All take one 8 byte slot. */ + bytes += 8; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + /* Passed by value in N slots. */ + bytes += FFI_ALIGN(itype->size, FFI_SIZEOF_ARG); + break; + + case FFI_TYPE_COMPLEX: + /* _Complex long double passed by reference; others in 2 slots. */ + if (itype->elements[0]->type == FFI_TYPE_LONGDOUBLE) + bytes += 8; + else + bytes += 16; + break; + + default: + abort(); + } + } + + /* Set the return type flag */ + rtype = cif->rtype; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = ALPHA_FLAGS(ALPHA_ST_VOID, ALPHA_LD_VOID); + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT32); + break; + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_FLOAT, ALPHA_LD_FLOAT); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_DOUBLE, ALPHA_LD_DOUBLE); + break; + case FFI_TYPE_UINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT8); + break; + case FFI_TYPE_SINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT8); + break; + case FFI_TYPE_UINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT16); + break; + case FFI_TYPE_SINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT16); + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + /* Passed in memory, with a hidden pointer. */ + flags = ALPHA_RET_IN_MEM; + break; + case FFI_TYPE_COMPLEX: + itype = rtype->elements[0]; + switch (itype->type) + { + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXF, ALPHA_LD_CPLXF); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXD, ALPHA_LD_CPLXD); + break; + default: + if (rtype->size <= 8) + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + else + flags = ALPHA_RET_IN_MEM; + break; + } + break; + default: + abort(); + } + cif->flags = flags; + + /* Include the hidden structure pointer in args requirement. */ + if (flags == ALPHA_RET_IN_MEM) + bytes += 8; + /* Minimum size is 6 slots, so that ffi_call_osf can pop them. */ + if (bytes < 6*8) + bytes = 6*8; + cif->bytes = bytes; + + return FFI_OK; +} + +static unsigned long +extend_basic_type(void *valp, int type, int argn) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)valp; + case FFI_TYPE_UINT8: + return *(UINT8 *)valp; + case FFI_TYPE_SINT16: + return *(SINT16 *)valp; + case FFI_TYPE_UINT16: + return *(UINT16 *)valp; + + case FFI_TYPE_FLOAT: + if (argn < 6) + return lds(valp); + /* FALLTHRU */ + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Note that unsigned 32-bit quantities are sign extended. */ + return *(SINT32 *)valp; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + return *(UINT64 *)valp; + + default: + abort(); + } +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + unsigned long *argp; + long i, avn, argn, flags = cif->flags; + ffi_type **arg_types; + void *frame; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + if (rvalue == NULL && flags == ALPHA_RET_IN_MEM) + rvalue = alloca(cif->rtype->size); + + /* Allocate the space for the arguments, plus 4 words of temp + space for ffi_call_osf. */ + argp = frame = alloca(cif->bytes + 4*FFI_SIZEOF_ARG); + frame += cif->bytes; + + argn = 0; + if (flags == ALPHA_RET_IN_MEM) + argp[argn++] = (unsigned long)rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + int type = ty->type; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + argp[argn] = extend_basic_type(valp, type, argn); + argn++; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Note that 128-bit long double is passed by reference. */ + argp[argn++] = (unsigned long)valp; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + memcpy(argp + argn, valp, size); + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + if (type == FFI_TYPE_LONGDOUBLE) + goto by_reference; + + /* Most complex types passed as two separate arguments. */ + size = ty->elements[0]->size; + argp[argn] = extend_basic_type(valp, type, argn); + argp[argn + 1] = extend_basic_type(valp + size, type, argn + 1); + argn += 2; + break; + + default: + abort(); + } + } + + flags = (flags >> ALPHA_ST_SHIFT) & 0xff; + ffi_call_osf(argp, frame, flags, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x47fb0401; /* mov $27,$1 */ + tramp[1] = 0xa77b0010; /* ldq $27,16($27) */ + tramp[2] = 0x6bfb0000; /* jmp $31,($27),0 */ + tramp[3] = 0x47ff041f; /* nop */ + *(void **) &tramp[4] = ffi_closure_osf; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the Icache. + + Tru64 UNIX as doesn't understand the imb mnemonic, so use call_pal + instead, since both Compaq as and gas can handle it. + + 0x86 is PAL_imb in Tru64 UNIX . */ + asm volatile ("call_pal 0x86" : : : "memory"); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + closure->tramp = (void *)ffi_go_closure_osf; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +long FFI_HIDDEN +ffi_closure_osf_inner (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, unsigned long *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn, argn, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + flags = cif->flags; + argn = 0; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (flags == ALPHA_RET_IN_MEM) + { + rvalue = (void *) argp[0]; + argn = 1; + } + + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + int type = ty->type; + void *valp = &argp[argn]; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + argn += 1; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_FLOAT: + /* Floats coming from registers need conversion from double + back to float format. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + argn += 1; + break; + + case FFI_TYPE_DOUBLE: + if (argn < 6) + valp = &argp[argn - 6]; + argn += 1; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* 128-bit long double is passed by reference. */ + valp = (void *)argp[argn]; + argn += 1; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + switch (type) + { + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passed as separate arguments, but they wind up sequential. */ + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Passed as separate arguments. Disjoint, but there's room + enough in one slot to hold the pair. */ + size = ty->elements[0]->size; + memcpy(valp + size, valp + 8, size); + break; + + case FFI_TYPE_FLOAT: + /* Passed as separate arguments. Disjoint, and each piece + may need conversion back to float. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + if (argn + 1 < 6) + sts(valp + 4, argp[argn + 1 - 6]); + else + *(UINT32 *)(valp + 4) = argp[argn + 1]; + break; + + case FFI_TYPE_DOUBLE: + /* Passed as separate arguments. Only disjoint if one part + is in fp regs and the other is on the stack. */ + if (argn < 5) + valp = &argp[argn - 6]; + else if (argn == 5) + { + valp = alloca(16); + ((UINT64 *)valp)[0] = argp[5 - 6]; + ((UINT64 *)valp)[1] = argp[6]; + } + break; + + case FFI_TYPE_LONGDOUBLE: + goto by_reference; + + default: + abort(); + } + argn += 2; + break; + + default: + abort (); + } + + avalue[i] = valp; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_osf how to perform return type promotions. */ + return (flags >> ALPHA_LD_SHIFT) & 0xff; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c new file mode 100644 index 0000000..7a95e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c @@ -0,0 +1,521 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Anthony Green + Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. + + Alpha Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if defined(__LONG_DOUBLE_128__) +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +extern void ffi_call_osf(void *stack, void *frame, unsigned flags, + void *raddr, void (*fn)(void), void *closure) + FFI_HIDDEN; +extern void ffi_closure_osf(void) FFI_HIDDEN; +extern void ffi_go_closure_osf(void) FFI_HIDDEN; + +/* Promote a float value to its in-register double representation. + Unlike actually casting to double, this does not trap on NaN. */ +static inline UINT64 lds(void *ptr) +{ + UINT64 ret; + asm("lds %0,%1" : "=f"(ret) : "m"(*(UINT32 *)ptr)); + return ret; +} + +/* And the reverse. */ +static inline void sts(void *ptr, UINT64 val) +{ + asm("sts %1,%0" : "=m"(*(UINT32 *)ptr) : "f"(val)); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int flags, i, avn; + ffi_type *rtype, *itype; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + /* Compute the size of the argument area. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + itype = cif->arg_types[i]; + switch (itype->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + /* All take one 8 byte slot. */ + bytes += 8; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + /* Passed by value in N slots. */ + bytes += FFI_ALIGN(itype->size, FFI_SIZEOF_ARG); + break; + + case FFI_TYPE_COMPLEX: + /* _Complex long double passed by reference; others in 2 slots. */ + if (itype->elements[0]->type == FFI_TYPE_LONGDOUBLE) + bytes += 8; + else + bytes += 16; + break; + + default: + abort(); + } + } + + /* Set the return type flag */ + rtype = cif->rtype; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = ALPHA_FLAGS(ALPHA_ST_VOID, ALPHA_LD_VOID); + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT32); + break; + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_FLOAT, ALPHA_LD_FLOAT); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_DOUBLE, ALPHA_LD_DOUBLE); + break; + case FFI_TYPE_UINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT8); + break; + case FFI_TYPE_SINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT8); + break; + case FFI_TYPE_UINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT16); + break; + case FFI_TYPE_SINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT16); + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + /* Passed in memory, with a hidden pointer. */ + flags = ALPHA_RET_IN_MEM; + break; + case FFI_TYPE_COMPLEX: + itype = rtype->elements[0]; + switch (itype->type) + { + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXF, ALPHA_LD_CPLXF); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXD, ALPHA_LD_CPLXD); + break; + default: + if (rtype->size <= 8) + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + else + flags = ALPHA_RET_IN_MEM; + break; + } + break; + default: + abort(); + } + cif->flags = flags; + + /* Include the hidden structure pointer in args requirement. */ + if (flags == ALPHA_RET_IN_MEM) + bytes += 8; + /* Minimum size is 6 slots, so that ffi_call_osf can pop them. */ + if (bytes < 6*8) + bytes = 6*8; + cif->bytes = bytes; + + return FFI_OK; +} + +static unsigned long +extend_basic_type(void *valp, int type, int argn) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)valp; + case FFI_TYPE_UINT8: + return *(UINT8 *)valp; + case FFI_TYPE_SINT16: + return *(SINT16 *)valp; + case FFI_TYPE_UINT16: + return *(UINT16 *)valp; + + case FFI_TYPE_FLOAT: + if (argn < 6) + return lds(valp); + /* FALLTHRU */ + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Note that unsigned 32-bit quantities are sign extended. */ + return *(SINT32 *)valp; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + return *(UINT64 *)valp; + + default: + abort(); + } +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + unsigned long *argp; + long i, avn, argn, flags = cif->flags; + ffi_type **arg_types; + void *frame; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + if (rvalue == NULL && flags == ALPHA_RET_IN_MEM) + rvalue = alloca(cif->rtype->size); + + /* Allocate the space for the arguments, plus 4 words of temp + space for ffi_call_osf. */ + argp = frame = alloca(cif->bytes + 4*FFI_SIZEOF_ARG); + frame += cif->bytes; + + argn = 0; + if (flags == ALPHA_RET_IN_MEM) + argp[argn++] = (unsigned long)rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + int type = ty->type; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + argp[argn] = extend_basic_type(valp, type, argn); + argn++; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Note that 128-bit long double is passed by reference. */ + argp[argn++] = (unsigned long)valp; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + memcpy(argp + argn, valp, size); + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + if (type == FFI_TYPE_LONGDOUBLE) + goto by_reference; + + /* Most complex types passed as two separate arguments. */ + size = ty->elements[0]->size; + argp[argn] = extend_basic_type(valp, type, argn); + argp[argn + 1] = extend_basic_type(valp + size, type, argn + 1); + argn += 2; + break; + + default: + abort(); + } + } + + flags = (flags >> ALPHA_ST_SHIFT) & 0xff; + ffi_call_osf(argp, frame, flags, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x47fb0401; /* mov $27,$1 */ + tramp[1] = 0xa77b0010; /* ldq $27,16($27) */ + tramp[2] = 0x6bfb0000; /* jmp $31,($27),0 */ + tramp[3] = 0x47ff041f; /* nop */ + *(void **) &tramp[4] = ffi_closure_osf; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the Icache. + + Tru64 UNIX as doesn't understand the imb mnemonic, so use call_pal + instead, since both Compaq as and gas can handle it. + + 0x86 is PAL_imb in Tru64 UNIX . */ + asm volatile ("call_pal 0x86" : : : "memory"); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + closure->tramp = (void *)ffi_go_closure_osf; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +long FFI_HIDDEN +ffi_closure_osf_inner (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, unsigned long *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn, argn, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + flags = cif->flags; + argn = 0; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (flags == ALPHA_RET_IN_MEM) + { + rvalue = (void *) argp[0]; + argn = 1; + } + + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + int type = ty->type; + void *valp = &argp[argn]; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + argn += 1; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_FLOAT: + /* Floats coming from registers need conversion from double + back to float format. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + argn += 1; + break; + + case FFI_TYPE_DOUBLE: + if (argn < 6) + valp = &argp[argn - 6]; + argn += 1; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* 128-bit long double is passed by reference. */ + valp = (void *)argp[argn]; + argn += 1; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + switch (type) + { + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passed as separate arguments, but they wind up sequential. */ + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Passed as separate arguments. Disjoint, but there's room + enough in one slot to hold the pair. */ + size = ty->elements[0]->size; + memcpy(valp + size, valp + 8, size); + break; + + case FFI_TYPE_FLOAT: + /* Passed as separate arguments. Disjoint, and each piece + may need conversion back to float. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + if (argn + 1 < 6) + sts(valp + 4, argp[argn + 1 - 6]); + else + *(UINT32 *)(valp + 4) = argp[argn + 1]; + break; + + case FFI_TYPE_DOUBLE: + /* Passed as separate arguments. Only disjoint if one part + is in fp regs and the other is on the stack. */ + if (argn < 5) + valp = &argp[argn - 6]; + else if (argn == 5) + { + valp = alloca(16); + ((UINT64 *)valp)[0] = argp[5 - 6]; + ((UINT64 *)valp)[1] = argp[6]; + } + break; + + case FFI_TYPE_LONGDOUBLE: + goto by_reference; + + default: + abort(); + } + argn += 2; + break; + + default: + abort (); + } + + avalue[i] = valp; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_osf how to perform return type promotions. */ + return (flags >> ALPHA_LD_SHIFT) & 0xff; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget 2.h new file mode 100644 index 0000000..a02dbd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget 2.h @@ -0,0 +1,57 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Alpha. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OSF, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_OSF +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h new file mode 100644 index 0000000..a02dbd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h @@ -0,0 +1,57 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Alpha. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OSF, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_OSF +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal 2.h new file mode 100644 index 0000000..44da192 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal 2.h @@ -0,0 +1,23 @@ +#define ALPHA_ST_VOID 0 +#define ALPHA_ST_INT 1 +#define ALPHA_ST_FLOAT 2 +#define ALPHA_ST_DOUBLE 3 +#define ALPHA_ST_CPLXF 4 +#define ALPHA_ST_CPLXD 5 + +#define ALPHA_LD_VOID 0 +#define ALPHA_LD_INT64 1 +#define ALPHA_LD_INT32 2 +#define ALPHA_LD_UINT16 3 +#define ALPHA_LD_SINT16 4 +#define ALPHA_LD_UINT8 5 +#define ALPHA_LD_SINT8 6 +#define ALPHA_LD_FLOAT 7 +#define ALPHA_LD_DOUBLE 8 +#define ALPHA_LD_CPLXF 9 +#define ALPHA_LD_CPLXD 10 + +#define ALPHA_ST_SHIFT 0 +#define ALPHA_LD_SHIFT 8 +#define ALPHA_RET_IN_MEM 0x10000 +#define ALPHA_FLAGS(S, L) (((L) << ALPHA_LD_SHIFT) | (S)) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h new file mode 100644 index 0000000..44da192 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h @@ -0,0 +1,23 @@ +#define ALPHA_ST_VOID 0 +#define ALPHA_ST_INT 1 +#define ALPHA_ST_FLOAT 2 +#define ALPHA_ST_DOUBLE 3 +#define ALPHA_ST_CPLXF 4 +#define ALPHA_ST_CPLXD 5 + +#define ALPHA_LD_VOID 0 +#define ALPHA_LD_INT64 1 +#define ALPHA_LD_INT32 2 +#define ALPHA_LD_UINT16 3 +#define ALPHA_LD_SINT16 4 +#define ALPHA_LD_UINT8 5 +#define ALPHA_LD_SINT8 6 +#define ALPHA_LD_FLOAT 7 +#define ALPHA_LD_DOUBLE 8 +#define ALPHA_LD_CPLXF 9 +#define ALPHA_LD_CPLXD 10 + +#define ALPHA_ST_SHIFT 0 +#define ALPHA_LD_SHIFT 8 +#define ALPHA_RET_IN_MEM 0x10000 +#define ALPHA_FLAGS(S, L) (((L) << ALPHA_LD_SHIFT) | (S)) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf 2.S new file mode 100644 index 0000000..b031828 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf 2.S @@ -0,0 +1,282 @@ +/* ----------------------------------------------------------------------- + osf.S - Copyright (c) 1998, 2001, 2007, 2008, 2011, 2014 Red Hat + + Alpha/OSF Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + .arch ev6 + .text + +/* Aid in building a direct addressed jump table, 4 insns per entry. */ +.macro E index + .align 4 + .org 99b + \index * 16 +.endm + +/* ffi_call_osf (void *stack, void *frame, unsigned flags, + void *raddr, void (*fnaddr)(void), void *closure) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 4 + .globl ffi_call_osf + .ent ffi_call_osf + FFI_HIDDEN(ffi_call_osf) + +ffi_call_osf: + cfi_startproc + cfi_def_cfa($17, 32) + mov $16, $30 + stq $26, 0($17) + stq $15, 8($17) + mov $17, $15 + .prologue 0 + cfi_def_cfa_register($15) + cfi_rel_offset($26, 0) + cfi_rel_offset($15, 8) + + stq $18, 16($17) # save flags into frame + stq $19, 24($17) # save rvalue into frame + mov $20, $27 # fn into place for call + mov $21, $1 # closure into static chain + + # Load up all of the (potential) argument registers. + ldq $16, 0($30) + ldt $f16, 0($30) + ldt $f17, 8($30) + ldq $17, 8($30) + ldt $f18, 16($30) + ldq $18, 16($30) + ldt $f19, 24($30) + ldq $19, 24($30) + ldt $f20, 32($30) + ldq $20, 32($30) + ldt $f21, 40($30) + ldq $21, 40($30) + + # Deallocate the register argument area. + lda $30, 48($30) + + jsr $26, ($27), 0 +0: + ldah $29, 0($26) !gpdisp!1 + ldq $2, 24($15) # reload rvalue + lda $29, 0($29) !gpdisp!1 + ldq $3, 16($15) # reload flags + lda $1, 99f-0b($26) + ldq $26, 0($15) + ldq $15, 8($15) + cfi_restore($26) + cfi_restore($15) + cfi_def_cfa($sp, 0) + cmoveq $2, ALPHA_ST_VOID, $3 # mash null rvalue to void + addq $3, $3, $3 + s8addq $3, $1, $1 # 99f + stcode * 16 + jmp $31, ($1), $st_int + + .align 4 +99: +E ALPHA_ST_VOID + ret +E ALPHA_ST_INT +$st_int: + stq $0, 0($2) + ret +E ALPHA_ST_FLOAT + sts $f0, 0($2) + ret +E ALPHA_ST_DOUBLE + stt $f0, 0($2) + ret +E ALPHA_ST_CPLXF + sts $f0, 0($2) + sts $f1, 4($2) + ret +E ALPHA_ST_CPLXD + stt $f0, 0($2) + stt $f1, 8($2) + ret + + cfi_endproc + .end ffi_call_osf + +/* ffi_closure_osf(...) + + Receives the closure argument in $1. */ + +#define CLOSURE_FS (16*8) + + .align 4 + .globl ffi_go_closure_osf + .ent ffi_go_closure_osf + FFI_HIDDEN(ffi_go_closure_osf) + +ffi_go_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 8($1) # load cif + ldq $17, 16($1) # load fun + mov $1, $18 # closure is user_data + br $do_closure + + cfi_endproc + .end ffi_go_closure_osf + + .align 4 + .globl ffi_closure_osf + .ent ffi_closure_osf + FFI_HIDDEN(ffi_closure_osf) + +ffi_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + # Store all of the potential argument registers in va_list format. + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 24($1) # load cif + ldq $17, 32($1) # load fun + ldq $18, 40($1) # load user_data + +$do_closure: + stq $19, 13*8($30) + stq $20, 14*8($30) + stq $21, 15*8($30) + stt $f16, 4*8($30) + stt $f17, 5*8($30) + stt $f18, 6*8($30) + stt $f19, 7*8($30) + stt $f20, 8*8($30) + stt $f21, 9*8($30) + + # Call ffi_closure_osf_inner to do the bulk of the work. + lda $19, 2*8($30) + lda $20, 10*8($30) + jsr $26, ffi_closure_osf_inner +0: + ldah $29, 0($26) !gpdisp!2 + lda $2, 99f-0b($26) + s4addq $0, 0, $1 # ldcode * 4 + ldq $0, 16($30) # preload return value + s4addq $1, $2, $1 # 99f + ldcode * 16 + lda $29, 0($29) !gpdisp!2 + ldq $26, 0($30) + cfi_restore($26) + jmp $31, ($1), $load_32 + +.macro epilogue + addq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(-CLOSURE_FS) + ret + .align 4 + cfi_adjust_cfa_offset(CLOSURE_FS) +.endm + + .align 4 +99: +E ALPHA_LD_VOID + epilogue + +E ALPHA_LD_INT64 + epilogue + +E ALPHA_LD_INT32 +$load_32: + sextl $0, $0 + epilogue + +E ALPHA_LD_UINT16 + zapnot $0, 3, $0 + epilogue + +E ALPHA_LD_SINT16 +#ifdef __alpha_bwx__ + sextw $0, $0 +#else + sll $0, 48, $0 + sra $0, 48, $0 +#endif + epilogue + +E ALPHA_LD_UINT8 + and $0, 0xff, $0 + epilogue + +E ALPHA_LD_SINT8 +#ifdef __alpha_bwx__ + sextb $0, $0 +#else + sll $0, 56, $0 + sra $0, 56, $0 +#endif + epilogue + +E ALPHA_LD_FLOAT + lds $f0, 16($sp) + epilogue + +E ALPHA_LD_DOUBLE + ldt $f0, 16($sp) + epilogue + +E ALPHA_LD_CPLXF + lds $f0, 16($sp) + lds $f1, 20($sp) + epilogue + +E ALPHA_LD_CPLXD + ldt $f0, 16($sp) + ldt $f1, 24($sp) + epilogue + + cfi_endproc + .end ffi_closure_osf + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S new file mode 100644 index 0000000..b031828 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S @@ -0,0 +1,282 @@ +/* ----------------------------------------------------------------------- + osf.S - Copyright (c) 1998, 2001, 2007, 2008, 2011, 2014 Red Hat + + Alpha/OSF Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + .arch ev6 + .text + +/* Aid in building a direct addressed jump table, 4 insns per entry. */ +.macro E index + .align 4 + .org 99b + \index * 16 +.endm + +/* ffi_call_osf (void *stack, void *frame, unsigned flags, + void *raddr, void (*fnaddr)(void), void *closure) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 4 + .globl ffi_call_osf + .ent ffi_call_osf + FFI_HIDDEN(ffi_call_osf) + +ffi_call_osf: + cfi_startproc + cfi_def_cfa($17, 32) + mov $16, $30 + stq $26, 0($17) + stq $15, 8($17) + mov $17, $15 + .prologue 0 + cfi_def_cfa_register($15) + cfi_rel_offset($26, 0) + cfi_rel_offset($15, 8) + + stq $18, 16($17) # save flags into frame + stq $19, 24($17) # save rvalue into frame + mov $20, $27 # fn into place for call + mov $21, $1 # closure into static chain + + # Load up all of the (potential) argument registers. + ldq $16, 0($30) + ldt $f16, 0($30) + ldt $f17, 8($30) + ldq $17, 8($30) + ldt $f18, 16($30) + ldq $18, 16($30) + ldt $f19, 24($30) + ldq $19, 24($30) + ldt $f20, 32($30) + ldq $20, 32($30) + ldt $f21, 40($30) + ldq $21, 40($30) + + # Deallocate the register argument area. + lda $30, 48($30) + + jsr $26, ($27), 0 +0: + ldah $29, 0($26) !gpdisp!1 + ldq $2, 24($15) # reload rvalue + lda $29, 0($29) !gpdisp!1 + ldq $3, 16($15) # reload flags + lda $1, 99f-0b($26) + ldq $26, 0($15) + ldq $15, 8($15) + cfi_restore($26) + cfi_restore($15) + cfi_def_cfa($sp, 0) + cmoveq $2, ALPHA_ST_VOID, $3 # mash null rvalue to void + addq $3, $3, $3 + s8addq $3, $1, $1 # 99f + stcode * 16 + jmp $31, ($1), $st_int + + .align 4 +99: +E ALPHA_ST_VOID + ret +E ALPHA_ST_INT +$st_int: + stq $0, 0($2) + ret +E ALPHA_ST_FLOAT + sts $f0, 0($2) + ret +E ALPHA_ST_DOUBLE + stt $f0, 0($2) + ret +E ALPHA_ST_CPLXF + sts $f0, 0($2) + sts $f1, 4($2) + ret +E ALPHA_ST_CPLXD + stt $f0, 0($2) + stt $f1, 8($2) + ret + + cfi_endproc + .end ffi_call_osf + +/* ffi_closure_osf(...) + + Receives the closure argument in $1. */ + +#define CLOSURE_FS (16*8) + + .align 4 + .globl ffi_go_closure_osf + .ent ffi_go_closure_osf + FFI_HIDDEN(ffi_go_closure_osf) + +ffi_go_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 8($1) # load cif + ldq $17, 16($1) # load fun + mov $1, $18 # closure is user_data + br $do_closure + + cfi_endproc + .end ffi_go_closure_osf + + .align 4 + .globl ffi_closure_osf + .ent ffi_closure_osf + FFI_HIDDEN(ffi_closure_osf) + +ffi_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + # Store all of the potential argument registers in va_list format. + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 24($1) # load cif + ldq $17, 32($1) # load fun + ldq $18, 40($1) # load user_data + +$do_closure: + stq $19, 13*8($30) + stq $20, 14*8($30) + stq $21, 15*8($30) + stt $f16, 4*8($30) + stt $f17, 5*8($30) + stt $f18, 6*8($30) + stt $f19, 7*8($30) + stt $f20, 8*8($30) + stt $f21, 9*8($30) + + # Call ffi_closure_osf_inner to do the bulk of the work. + lda $19, 2*8($30) + lda $20, 10*8($30) + jsr $26, ffi_closure_osf_inner +0: + ldah $29, 0($26) !gpdisp!2 + lda $2, 99f-0b($26) + s4addq $0, 0, $1 # ldcode * 4 + ldq $0, 16($30) # preload return value + s4addq $1, $2, $1 # 99f + ldcode * 16 + lda $29, 0($29) !gpdisp!2 + ldq $26, 0($30) + cfi_restore($26) + jmp $31, ($1), $load_32 + +.macro epilogue + addq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(-CLOSURE_FS) + ret + .align 4 + cfi_adjust_cfa_offset(CLOSURE_FS) +.endm + + .align 4 +99: +E ALPHA_LD_VOID + epilogue + +E ALPHA_LD_INT64 + epilogue + +E ALPHA_LD_INT32 +$load_32: + sextl $0, $0 + epilogue + +E ALPHA_LD_UINT16 + zapnot $0, 3, $0 + epilogue + +E ALPHA_LD_SINT16 +#ifdef __alpha_bwx__ + sextw $0, $0 +#else + sll $0, 48, $0 + sra $0, 48, $0 +#endif + epilogue + +E ALPHA_LD_UINT8 + and $0, 0xff, $0 + epilogue + +E ALPHA_LD_SINT8 +#ifdef __alpha_bwx__ + sextb $0, $0 +#else + sll $0, 56, $0 + sra $0, 56, $0 +#endif + epilogue + +E ALPHA_LD_FLOAT + lds $f0, 16($sp) + epilogue + +E ALPHA_LD_DOUBLE + ldt $f0, 16($sp) + epilogue + +E ALPHA_LD_CPLXF + lds $f0, 16($sp) + lds $f1, 20($sp) + epilogue + +E ALPHA_LD_CPLXD + ldt $f0, 16($sp) + ldt $f1, 24($sp) + epilogue + + cfi_endproc + .end ffi_closure_osf + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact 2.S new file mode 100644 index 0000000..03715fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact 2.S @@ -0,0 +1,135 @@ +/* ----------------------------------------------------------------------- + arcompact.S - Copyright (c) 2013 Synposys, Inc. (www.synopsys.com) + + ARCompact Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)` .type CNAME(x),%function` CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* R4: ecif.rvalue */ + /* R5: fn */ +ENTRY(ffi_call_ARCompact) + /* Save registers. */ + st.a fp, [sp, -4] /* fp + 20, fp */ + push_s blink /* fp + 16, blink */ + st.a r4, [sp, -4] /* fp + 12, ecif.rvalue */ + push_s r3 /* fp + 8, fig->flags */ + st.a r5, [sp, -4] /* fp + 4, fn */ + push_s r2 /* fp + 0, cif->bytes */ + mov fp, sp + + /* Make room for all of the new args. */ + sub sp, sp, r2 + + /* Place all of the ffi_prep_args in position. */ + /* ffi_prep_args(char *stack, extended_cif *ecif) */ + /* R1 already set. */ + + /* And call. */ + jl_s.d [r0] + mov_s r0, sp + + ld.ab r12, [fp, 4] /* cif->bytes */ + ld.ab r11, [fp, 4] /* fn */ + + /* Move first 8 parameters in registers... */ + ld_s r0, [sp] + ld_s r1, [sp, 4] + ld_s r2, [sp, 8] + ld_s r3, [sp, 12] + ld r4, [sp, 16] + ld r5, [sp, 20] + ld r6, [sp, 24] + ld r7, [sp, 28] + + /* ...and adjust the stack. */ + min r12, r12, 32 + + /* Call the function. */ + jl.d [r11] + add sp, sp, r12 + + mov sp, fp + pop_s r3 /* fig->flags, return type */ + pop_s r2 /* ecif.rvalue, pointer for return value */ + + /* If the return value pointer is NULL, assume no return value. */ + breq.d r2, 0, epilogue + pop_s blink + + /* Return INT. */ + brne r3, FFI_TYPE_INT, return_double + b.d epilogue + st_s r0, [r2] + +return_double: + brne r3, FFI_TYPE_DOUBLE, epilogue + st_s r0, [r2] + st_s r1, [r2,4] + +epilogue: + j_s.d [blink] + ld.ab fp, [sp, 4] + +ENTRY(ffi_closure_ARCompact) + st.a r0, [sp, -32] + st_s r1, [sp, 4] + st_s r2, [sp, 8] + st_s r3, [sp, 12] + st r4, [sp, 16] + st r5, [sp, 20] + st r6, [sp, 24] + st r7, [sp, 28] + + /* pointer to arguments */ + mov_s r2, sp + + /* return value goes here */ + sub sp, sp, 8 + mov_s r1, sp + + push_s blink + + bl.d ffi_closure_inner_ARCompact + mov_s r0, r8 /* codeloc, set by trampoline */ + + pop_s blink + + /* set return value to r1:r0 */ + pop_s r0 + pop_s r1 + j_s.d [blink] + add_s sp, sp, 32 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S new file mode 100644 index 0000000..03715fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S @@ -0,0 +1,135 @@ +/* ----------------------------------------------------------------------- + arcompact.S - Copyright (c) 2013 Synposys, Inc. (www.synopsys.com) + + ARCompact Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)` .type CNAME(x),%function` CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* R4: ecif.rvalue */ + /* R5: fn */ +ENTRY(ffi_call_ARCompact) + /* Save registers. */ + st.a fp, [sp, -4] /* fp + 20, fp */ + push_s blink /* fp + 16, blink */ + st.a r4, [sp, -4] /* fp + 12, ecif.rvalue */ + push_s r3 /* fp + 8, fig->flags */ + st.a r5, [sp, -4] /* fp + 4, fn */ + push_s r2 /* fp + 0, cif->bytes */ + mov fp, sp + + /* Make room for all of the new args. */ + sub sp, sp, r2 + + /* Place all of the ffi_prep_args in position. */ + /* ffi_prep_args(char *stack, extended_cif *ecif) */ + /* R1 already set. */ + + /* And call. */ + jl_s.d [r0] + mov_s r0, sp + + ld.ab r12, [fp, 4] /* cif->bytes */ + ld.ab r11, [fp, 4] /* fn */ + + /* Move first 8 parameters in registers... */ + ld_s r0, [sp] + ld_s r1, [sp, 4] + ld_s r2, [sp, 8] + ld_s r3, [sp, 12] + ld r4, [sp, 16] + ld r5, [sp, 20] + ld r6, [sp, 24] + ld r7, [sp, 28] + + /* ...and adjust the stack. */ + min r12, r12, 32 + + /* Call the function. */ + jl.d [r11] + add sp, sp, r12 + + mov sp, fp + pop_s r3 /* fig->flags, return type */ + pop_s r2 /* ecif.rvalue, pointer for return value */ + + /* If the return value pointer is NULL, assume no return value. */ + breq.d r2, 0, epilogue + pop_s blink + + /* Return INT. */ + brne r3, FFI_TYPE_INT, return_double + b.d epilogue + st_s r0, [r2] + +return_double: + brne r3, FFI_TYPE_DOUBLE, epilogue + st_s r0, [r2] + st_s r1, [r2,4] + +epilogue: + j_s.d [blink] + ld.ab fp, [sp, 4] + +ENTRY(ffi_closure_ARCompact) + st.a r0, [sp, -32] + st_s r1, [sp, 4] + st_s r2, [sp, 8] + st_s r3, [sp, 12] + st r4, [sp, 16] + st r5, [sp, 20] + st r6, [sp, 24] + st r7, [sp, 28] + + /* pointer to arguments */ + mov_s r2, sp + + /* return value goes here */ + sub sp, sp, 8 + mov_s r1, sp + + push_s blink + + bl.d ffi_closure_inner_ARCompact + mov_s r0, r8 /* codeloc, set by trampoline */ + + pop_s blink + + /* set return value to r1:r0 */ + pop_s r0 + pop_s r1 + j_s.d [blink] + add_s sp, sp, 32 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi 2.c new file mode 100644 index 0000000..4d10b21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi 2.c @@ -0,0 +1,266 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + + ARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#include + +/* for little endian ARC, the code is in fact stored as mixed endian for + performance reasons */ +#if __BIG_ENDIAN__ +#define CODE_ENDIAN(x) (x) +#else +#define CODE_ENDIAN(x) ( (((uint32_t) (x)) << 16) | (((uint32_t) (x)) >> 16)) +#endif + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_arg)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) (*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, (*p_arg)->size); + break; + + default: + FFI_ASSERT (0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + memcpy (argp, *p_argv, z); + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_ARCompact (void (*)(char *, extended_cif *), + extended_cif *, unsigned, unsigned, + unsigned *, void (*fn) (void)); + +void +ffi_call (ffi_cif * cif, void (*fn) (void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ARCOMPACT: + ffi_call_ARCompact (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +int +ffi_closure_inner_ARCompact (ffi_closure * closure, void *rvalue, + ffi_arg * args) +{ + void **arg_area, **p_argv; + ffi_cif *cif = closure->cif; + char *argp = (char *) args; + ffi_type **p_argt; + int i; + + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + /* handle hidden argument */ + if (cif->flags == FFI_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + + p_argv = arg_area; + + for (i = 0, p_argt = cif->arg_types; i < cif->nargs; + i++, p_argt++, p_argv++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_argt)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_argt)->size; + *p_argv = (void *) argp; + argp += z; + } + + (closure->fun) (cif, rvalue, arg_area, closure->user_data); + + return cif->flags; +} + +extern void ffi_closure_ARCompact (void); + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) & (closure->tramp[0]); + + switch (cif->abi) + { + case FFI_ARCOMPACT: + FFI_ASSERT (tramp == codeloc); + tramp[0] = CODE_ENDIAN (0x200a1fc0); /* mov r8, pcl */ + tramp[1] = CODE_ENDIAN (0x20200f80); /* j [long imm] */ + tramp[2] = CODE_ENDIAN (ffi_closure_ARCompact); + break; + + default: + return FFI_BAD_ABI; + } + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + cacheflush (codeloc, FFI_TRAMPOLINE_SIZE, BCACHE); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c new file mode 100644 index 0000000..4d10b21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c @@ -0,0 +1,266 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + + ARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#include + +/* for little endian ARC, the code is in fact stored as mixed endian for + performance reasons */ +#if __BIG_ENDIAN__ +#define CODE_ENDIAN(x) (x) +#else +#define CODE_ENDIAN(x) ( (((uint32_t) (x)) << 16) | (((uint32_t) (x)) >> 16)) +#endif + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_arg)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) (*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, (*p_arg)->size); + break; + + default: + FFI_ASSERT (0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + memcpy (argp, *p_argv, z); + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_ARCompact (void (*)(char *, extended_cif *), + extended_cif *, unsigned, unsigned, + unsigned *, void (*fn) (void)); + +void +ffi_call (ffi_cif * cif, void (*fn) (void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ARCOMPACT: + ffi_call_ARCompact (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +int +ffi_closure_inner_ARCompact (ffi_closure * closure, void *rvalue, + ffi_arg * args) +{ + void **arg_area, **p_argv; + ffi_cif *cif = closure->cif; + char *argp = (char *) args; + ffi_type **p_argt; + int i; + + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + /* handle hidden argument */ + if (cif->flags == FFI_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + + p_argv = arg_area; + + for (i = 0, p_argt = cif->arg_types; i < cif->nargs; + i++, p_argt++, p_argv++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_argt)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_argt)->size; + *p_argv = (void *) argp; + argp += z; + } + + (closure->fun) (cif, rvalue, arg_area, closure->user_data); + + return cif->flags; +} + +extern void ffi_closure_ARCompact (void); + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) & (closure->tramp[0]); + + switch (cif->abi) + { + case FFI_ARCOMPACT: + FFI_ASSERT (tramp == codeloc); + tramp[0] = CODE_ENDIAN (0x200a1fc0); /* mov r8, pcl */ + tramp[1] = CODE_ENDIAN (0x20200f80); /* j [long imm] */ + tramp[2] = CODE_ENDIAN (ffi_closure_ARCompact); + break; + + default: + return FFI_BAD_ABI; + } + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + cacheflush (codeloc, FFI_TRAMPOLINE_SIZE, BCACHE); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget 2.h new file mode 100644 index 0000000..bf8311b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget 2.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + Target configuration macros for ARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi +{ + FFI_FIRST_ABI = 0, + FFI_ARCOMPACT, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_ARCOMPACT +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h new file mode 100644 index 0000000..bf8311b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + Target configuration macros for ARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi +{ + FFI_FIRST_ABI = 0, + FFI_ARCOMPACT, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_ARCOMPACT +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi 2.c new file mode 100644 index 0000000..4e27071 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi 2.c @@ -0,0 +1,854 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Timothy Wall + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2011 Anthony Green + Copyright (c) 2011 Free Software Foundation + Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__arm__) || defined(_M_ARM) +#include +#include +#include +#include +#include +#include "internal.h" + +#if defined(_MSC_VER) && defined(_M_ARM) +#define WIN32_LEAN_AND_MEAN +#include +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else +#ifndef _M_ARM +extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN; +#else +extern unsigned int ffi_arm_trampoline[3] FFI_HIDDEN; +#endif +#endif + +/* Forward declares. */ +static int vfp_type_p (const ffi_type *); +static void layout_vfp_args (ffi_cif *); + +static void * +ffi_align (ffi_type *ty, void *p) +{ + /* Align if necessary */ + size_t alignment; +#ifdef _WIN32_WCE + alignment = 4; +#else + alignment = ty->alignment; + if (alignment < 4) + alignment = 4; +#endif + return (void *) FFI_ALIGN (p, alignment); +} + +static size_t +ffi_put_arg (ffi_type *ty, void *src, void *dst) +{ + size_t z = ty->size; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *(UINT32 *)dst = *(SINT8 *)src; + break; + case FFI_TYPE_UINT8: + *(UINT32 *)dst = *(UINT8 *)src; + break; + case FFI_TYPE_SINT16: + *(UINT32 *)dst = *(SINT16 *)src; + break; + case FFI_TYPE_UINT16: + *(UINT32 *)dst = *(UINT16 *)src; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: +#ifndef _MSC_VER + case FFI_TYPE_FLOAT: +#endif + *(UINT32 *)dst = *(UINT32 *)src; + break; + +#ifdef _MSC_VER + // casting a float* to a UINT32* doesn't work on Windows + case FFI_TYPE_FLOAT: + *(uintptr_t *)dst = 0; + *(float *)dst = *(float *)src; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + *(UINT64 *)dst = *(UINT64 *)src; + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + memcpy (dst, src, z); + break; + + default: + abort(); + } + + return FFI_ALIGN (z, 4); +} + +/* ffi_prep_args is called once stack space has been allocated + for the function's arguments. + + The vfp_space parameter is the load area for VFP regs, the return + value is cif->vfp_used (word bitset of VFP regs used for passing + arguments). These are only used for the VFP hard-float ABI. +*/ +static void +ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *argp) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (flags == ARM_TYPE_STRUCT) + { + *(void **) argp = rvalue; + argp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, avalue[i], argp); + } +} + +static void +ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *stack, char *vfp_space) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char stack_used = 0; + char done_with_regs = 0; + + /* The first 4 words on the stack are used for values + passed in core registers. */ + regp = stack; + eo_regp = argp = regp + 16; + + /* If the function returns an FFI_TYPE_STRUCT in memory, + that address is passed in r0 to the function. */ + if (flags == ARM_TYPE_STRUCT) + { + *(void **) regp = rvalue; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *a = avalue[i]; + int is_vfp_type = vfp_type_p (ty); + + /* Allocated in VFP registers. */ + if (vi < cif->vfp_nargs && is_vfp_type) + { + char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4; + ffi_put_arg (ty, a, vfp_slot); + continue; + } + /* Try allocating in core registers. */ + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + size_t size = ty->size; + size = (size < 4) ? 4 : size; // pad + /* Check if there is space left in the aligned register + area to place the argument. */ + if (tregp + size <= eo_regp) + { + regp = tregp + ffi_put_arg (ty, a, tregp); + done_with_regs = (regp == argp); + // ensure we did not write into the stack area + FFI_ASSERT (regp <= argp); + continue; + } + /* In case there are no arguments in the stack area yet, + the argument is passed in the remaining core registers + and on the stack. */ + else if (!stack_used) + { + stack_used = 1; + done_with_regs = 1; + argp = tregp + ffi_put_arg (ty, a, tregp); + FFI_ASSERT (eo_regp < argp); + continue; + } + } + /* Base case, arguments are passed on the stack */ + stack_used = 1; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, a, argp); + } +} + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int flags = 0, cabi = cif->abi; + size_t bytes = cif->bytes; + + /* Map out the register placements of VFP register args. The VFP + hard-float calling conventions are slightly more sophisticated + than the base calling conventions, so we do it here instead of + in ffi_prep_args(). */ + if (cabi == FFI_VFP) + layout_vfp_args (cif); + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = ARM_TYPE_VOID; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + flags = ARM_TYPE_INT; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = ARM_TYPE_INT64; + break; + + case FFI_TYPE_FLOAT: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT); + break; + case FFI_TYPE_DOUBLE: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64); + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + if (cabi == FFI_VFP) + { + int h = vfp_type_p (cif->rtype); + + flags = ARM_TYPE_VFP_N; + if (h == 0x100 + FFI_TYPE_FLOAT) + flags = ARM_TYPE_VFP_S; + if (h == 0x100 + FFI_TYPE_DOUBLE) + flags = ARM_TYPE_VFP_D; + if (h != 0) + break; + } + + /* A Composite Type not larger than 4 bytes is returned in r0. + A Composite Type larger than 4 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + if (cif->rtype->size <= 4) + flags = ARM_TYPE_INT; + else + { + flags = ARM_TYPE_STRUCT; + bytes += 4; + } + break; + + default: + abort(); + } + + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't harm anything + when it isn't needed. */ + bytes = FFI_ALIGN (bytes, 8); + + /* Minimum stack space is the 4 register arguments that we pop. */ + if (bytes < 4*4) + bytes = 4*4; + + cif->bytes = bytes; + cif->flags = flags; + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif * cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + /* VFP variadic calls actually use the SYSV ABI */ + if (cif->abi == FFI_VFP) + cif->abi = FFI_SYSV; + + return ffi_prep_cif_machdep (cif); +} + +/* Prototypes for assembly functions, in sysv.S. */ + +struct call_frame +{ + void *fp; + void *lr; + void *rvalue; + int flags; + void *closure; +}; + +extern void ffi_call_SYSV (void *stack, struct call_frame *, + void (*fn) (void)) FFI_HIDDEN; +extern void ffi_call_VFP (void *vfp_space, struct call_frame *, + void (*fn) (void), unsigned vfp_used) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + int flags = cif->flags; + ffi_type *rtype = cif->rtype; + size_t bytes, rsize, vfp_size; + char *stack, *vfp_space, *new_rvalue; + struct call_frame *frame; + + rsize = 0; + if (rvalue == NULL) + { + /* If the return value is a struct and we don't have a return + value address then we need to make one. Otherwise the return + value is in registers and we can ignore them. */ + if (flags == ARM_TYPE_STRUCT) + rsize = rtype->size; + else + flags = ARM_TYPE_VOID; + } + else if (flags == ARM_TYPE_VFP_N) + { + /* Largest case is double x 4. */ + rsize = 32; + } + else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT) + rsize = 4; + + /* Largest case. */ + vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0); + + bytes = cif->bytes; + stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize); + + vfp_space = NULL; + if (vfp_size) + { + vfp_space = stack; + stack += vfp_size; + } + + frame = (struct call_frame *)(stack + bytes); + + new_rvalue = rvalue; + if (rsize) + new_rvalue = (void *)(frame + 1); + + frame->rvalue = new_rvalue; + frame->flags = flags; + frame->closure = closure; + + if (vfp_space) + { + ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space); + ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used); + } + else + { + ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack); + ffi_call_SYSV (stack, frame, fn); + } + + if (rvalue && rvalue != new_rvalue) + memcpy (rvalue, new_rvalue, rtype->size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +static void * +ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue, + char *argp, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + else + { + if (cif->rtype->size && cif->rtype->size < 4) + *(uint32_t *) rvalue = 0; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +static void * +ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack, + char *vfp_space, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char done_with_regs = 0; + char stack_used = 0; + + regp = stack; + eo_regp = argp = regp + 16; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) regp; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + int is_vfp_type = vfp_type_p (ty); + size_t z = ty->size; + + if (vi < cif->vfp_nargs && is_vfp_type) + { + avalue[i] = vfp_space + cif->vfp_args[vi++] * 4; + continue; + } + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + + z = (z < 4) ? 4 : z; // pad + + /* If the arguments either fits into the registers or uses registers + and stack, while we haven't read other things from the stack */ + if (tregp + z <= eo_regp || !stack_used) + { + /* Because we're little endian, this is what it turns into. */ + avalue[i] = (void *) tregp; + regp = tregp + z; + + /* If we read past the last core register, make sure we + have not read from the stack before and continue + reading after regp. */ + if (regp > eo_regp) + { + FFI_ASSERT (!stack_used); + argp = regp; + } + if (regp >= eo_regp) + { + done_with_regs = 1; + stack_used = 1; + } + continue; + } + } + + stack_used = 1; + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +struct closure_frame +{ + char vfp_space[8*8] __attribute__((aligned(8))); + char result[8*4]; + char argp[]; +}; + +int FFI_HIDDEN +ffi_closure_inner_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result, + frame->argp, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +int FFI_HIDDEN +ffi_closure_inner_VFP (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp, + frame->vfp_space, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +void ffi_closure_SYSV (void) FFI_HIDDEN; +void ffi_closure_VFP (void) FFI_HIDDEN; +void ffi_go_closure_SYSV (void) FFI_HIDDEN; +void ffi_go_closure_VFP (void) FFI_HIDDEN; + +/* the cif must already be prep'ed */ + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, + ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + void (*closure_func) (void) = ffi_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + +#if FFI_EXEC_TRAMPOLINE_TABLE + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = closure_func; +#else + +#ifndef _M_ARM + memcpy(closure->tramp, ffi_arm_trampoline, 8); +#else + // cast away function type so MSVC doesn't set the lower bit of the function pointer + memcpy(closure->tramp, (void*)((uintptr_t)ffi_arm_trampoline & 0xFFFFFFFE), FFI_TRAMPOLINE_CLOSURE_OFFSET); +#endif + +#if defined (__QNX__) + msync(closure->tramp, 8, 0x1000000); /* clear data map */ + msync(codeloc, 8, 0x1000000); /* clear insn map */ +#elif defined(_MSC_VER) + FlushInstructionCache(GetCurrentProcess(), closure->tramp, FFI_TRAMPOLINE_SIZE); +#else + __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */ + __clear_cache(codeloc, codeloc + 8); /* clear insn map */ +#endif +#ifdef _M_ARM + *(void(**)(void))(closure->tramp + FFI_TRAMPOLINE_CLOSURE_FUNCTION) = closure_func; +#else + *(void (**)(void))(closure->tramp + 8) = closure_func; +#endif +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + void (*closure_func) (void) = ffi_go_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_go_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = closure_func; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Below are routines for VFP hard-float support. */ + +/* A subroutine of vfp_type_p. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of vfp_type_p. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY is an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is a floating point scalar. + + Returns non-zero iff TY is an HFA. The result is an encoded value where + bits 0-7 contain the type code, and bits 8-10 contain the element count. */ + +static int +vfp_type_p (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (ty->type) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE) + return 0; + ele_count = 2; + goto done; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */ + size = ty->size; + if (size < 4 || size > 32) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return (ele_count << 8) | candidate; +} + +static int +place_vfp_arg (ffi_cif *cif, int h) +{ + unsigned short reg = cif->vfp_reg_free; + int align = 1, nregs = h >> 8; + + if ((h & 0xff) == FFI_TYPE_DOUBLE) + align = 2, nregs *= 2; + + /* Align register number. */ + if ((reg & 1) && align == 2) + reg++; + + while (reg + nregs <= 16) + { + int s, new_used = 0; + for (s = reg; s < reg + nregs; s++) + { + new_used |= (1 << s); + if (cif->vfp_used & (1 << s)) + { + reg += align; + goto next_reg; + } + } + /* Found regs to allocate. */ + cif->vfp_used |= new_used; + cif->vfp_args[cif->vfp_nargs++] = (signed char)reg; + + /* Update vfp_reg_free. */ + if (cif->vfp_used & (1 << cif->vfp_reg_free)) + { + reg += nregs; + while (cif->vfp_used & (1 << reg)) + reg += 1; + cif->vfp_reg_free = reg; + } + return 0; + next_reg:; + } + // done, mark all regs as used + cif->vfp_reg_free = 16; + cif->vfp_used = 0xFFFF; + return 1; +} + +static void +layout_vfp_args (ffi_cif * cif) +{ + unsigned int i; + /* Init VFP fields */ + cif->vfp_used = 0; + cif->vfp_nargs = 0; + cif->vfp_reg_free = 0; + memset (cif->vfp_args, -1, 16); /* Init to -1. */ + + for (i = 0; i < cif->nargs; i++) + { + int h = vfp_type_p (cif->arg_types[i]); + if (h && place_vfp_arg (cif, h) == 1) + break; + } +} + +#endif /* __arm__ or _M_ARM */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c new file mode 100644 index 0000000..4e27071 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c @@ -0,0 +1,854 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Timothy Wall + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2011 Anthony Green + Copyright (c) 2011 Free Software Foundation + Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__arm__) || defined(_M_ARM) +#include +#include +#include +#include +#include +#include "internal.h" + +#if defined(_MSC_VER) && defined(_M_ARM) +#define WIN32_LEAN_AND_MEAN +#include +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else +#ifndef _M_ARM +extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN; +#else +extern unsigned int ffi_arm_trampoline[3] FFI_HIDDEN; +#endif +#endif + +/* Forward declares. */ +static int vfp_type_p (const ffi_type *); +static void layout_vfp_args (ffi_cif *); + +static void * +ffi_align (ffi_type *ty, void *p) +{ + /* Align if necessary */ + size_t alignment; +#ifdef _WIN32_WCE + alignment = 4; +#else + alignment = ty->alignment; + if (alignment < 4) + alignment = 4; +#endif + return (void *) FFI_ALIGN (p, alignment); +} + +static size_t +ffi_put_arg (ffi_type *ty, void *src, void *dst) +{ + size_t z = ty->size; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *(UINT32 *)dst = *(SINT8 *)src; + break; + case FFI_TYPE_UINT8: + *(UINT32 *)dst = *(UINT8 *)src; + break; + case FFI_TYPE_SINT16: + *(UINT32 *)dst = *(SINT16 *)src; + break; + case FFI_TYPE_UINT16: + *(UINT32 *)dst = *(UINT16 *)src; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: +#ifndef _MSC_VER + case FFI_TYPE_FLOAT: +#endif + *(UINT32 *)dst = *(UINT32 *)src; + break; + +#ifdef _MSC_VER + // casting a float* to a UINT32* doesn't work on Windows + case FFI_TYPE_FLOAT: + *(uintptr_t *)dst = 0; + *(float *)dst = *(float *)src; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + *(UINT64 *)dst = *(UINT64 *)src; + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + memcpy (dst, src, z); + break; + + default: + abort(); + } + + return FFI_ALIGN (z, 4); +} + +/* ffi_prep_args is called once stack space has been allocated + for the function's arguments. + + The vfp_space parameter is the load area for VFP regs, the return + value is cif->vfp_used (word bitset of VFP regs used for passing + arguments). These are only used for the VFP hard-float ABI. +*/ +static void +ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *argp) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (flags == ARM_TYPE_STRUCT) + { + *(void **) argp = rvalue; + argp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, avalue[i], argp); + } +} + +static void +ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *stack, char *vfp_space) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char stack_used = 0; + char done_with_regs = 0; + + /* The first 4 words on the stack are used for values + passed in core registers. */ + regp = stack; + eo_regp = argp = regp + 16; + + /* If the function returns an FFI_TYPE_STRUCT in memory, + that address is passed in r0 to the function. */ + if (flags == ARM_TYPE_STRUCT) + { + *(void **) regp = rvalue; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *a = avalue[i]; + int is_vfp_type = vfp_type_p (ty); + + /* Allocated in VFP registers. */ + if (vi < cif->vfp_nargs && is_vfp_type) + { + char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4; + ffi_put_arg (ty, a, vfp_slot); + continue; + } + /* Try allocating in core registers. */ + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + size_t size = ty->size; + size = (size < 4) ? 4 : size; // pad + /* Check if there is space left in the aligned register + area to place the argument. */ + if (tregp + size <= eo_regp) + { + regp = tregp + ffi_put_arg (ty, a, tregp); + done_with_regs = (regp == argp); + // ensure we did not write into the stack area + FFI_ASSERT (regp <= argp); + continue; + } + /* In case there are no arguments in the stack area yet, + the argument is passed in the remaining core registers + and on the stack. */ + else if (!stack_used) + { + stack_used = 1; + done_with_regs = 1; + argp = tregp + ffi_put_arg (ty, a, tregp); + FFI_ASSERT (eo_regp < argp); + continue; + } + } + /* Base case, arguments are passed on the stack */ + stack_used = 1; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, a, argp); + } +} + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int flags = 0, cabi = cif->abi; + size_t bytes = cif->bytes; + + /* Map out the register placements of VFP register args. The VFP + hard-float calling conventions are slightly more sophisticated + than the base calling conventions, so we do it here instead of + in ffi_prep_args(). */ + if (cabi == FFI_VFP) + layout_vfp_args (cif); + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = ARM_TYPE_VOID; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + flags = ARM_TYPE_INT; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = ARM_TYPE_INT64; + break; + + case FFI_TYPE_FLOAT: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT); + break; + case FFI_TYPE_DOUBLE: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64); + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + if (cabi == FFI_VFP) + { + int h = vfp_type_p (cif->rtype); + + flags = ARM_TYPE_VFP_N; + if (h == 0x100 + FFI_TYPE_FLOAT) + flags = ARM_TYPE_VFP_S; + if (h == 0x100 + FFI_TYPE_DOUBLE) + flags = ARM_TYPE_VFP_D; + if (h != 0) + break; + } + + /* A Composite Type not larger than 4 bytes is returned in r0. + A Composite Type larger than 4 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + if (cif->rtype->size <= 4) + flags = ARM_TYPE_INT; + else + { + flags = ARM_TYPE_STRUCT; + bytes += 4; + } + break; + + default: + abort(); + } + + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't harm anything + when it isn't needed. */ + bytes = FFI_ALIGN (bytes, 8); + + /* Minimum stack space is the 4 register arguments that we pop. */ + if (bytes < 4*4) + bytes = 4*4; + + cif->bytes = bytes; + cif->flags = flags; + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif * cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + /* VFP variadic calls actually use the SYSV ABI */ + if (cif->abi == FFI_VFP) + cif->abi = FFI_SYSV; + + return ffi_prep_cif_machdep (cif); +} + +/* Prototypes for assembly functions, in sysv.S. */ + +struct call_frame +{ + void *fp; + void *lr; + void *rvalue; + int flags; + void *closure; +}; + +extern void ffi_call_SYSV (void *stack, struct call_frame *, + void (*fn) (void)) FFI_HIDDEN; +extern void ffi_call_VFP (void *vfp_space, struct call_frame *, + void (*fn) (void), unsigned vfp_used) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + int flags = cif->flags; + ffi_type *rtype = cif->rtype; + size_t bytes, rsize, vfp_size; + char *stack, *vfp_space, *new_rvalue; + struct call_frame *frame; + + rsize = 0; + if (rvalue == NULL) + { + /* If the return value is a struct and we don't have a return + value address then we need to make one. Otherwise the return + value is in registers and we can ignore them. */ + if (flags == ARM_TYPE_STRUCT) + rsize = rtype->size; + else + flags = ARM_TYPE_VOID; + } + else if (flags == ARM_TYPE_VFP_N) + { + /* Largest case is double x 4. */ + rsize = 32; + } + else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT) + rsize = 4; + + /* Largest case. */ + vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0); + + bytes = cif->bytes; + stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize); + + vfp_space = NULL; + if (vfp_size) + { + vfp_space = stack; + stack += vfp_size; + } + + frame = (struct call_frame *)(stack + bytes); + + new_rvalue = rvalue; + if (rsize) + new_rvalue = (void *)(frame + 1); + + frame->rvalue = new_rvalue; + frame->flags = flags; + frame->closure = closure; + + if (vfp_space) + { + ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space); + ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used); + } + else + { + ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack); + ffi_call_SYSV (stack, frame, fn); + } + + if (rvalue && rvalue != new_rvalue) + memcpy (rvalue, new_rvalue, rtype->size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +static void * +ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue, + char *argp, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + else + { + if (cif->rtype->size && cif->rtype->size < 4) + *(uint32_t *) rvalue = 0; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +static void * +ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack, + char *vfp_space, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char done_with_regs = 0; + char stack_used = 0; + + regp = stack; + eo_regp = argp = regp + 16; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) regp; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + int is_vfp_type = vfp_type_p (ty); + size_t z = ty->size; + + if (vi < cif->vfp_nargs && is_vfp_type) + { + avalue[i] = vfp_space + cif->vfp_args[vi++] * 4; + continue; + } + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + + z = (z < 4) ? 4 : z; // pad + + /* If the arguments either fits into the registers or uses registers + and stack, while we haven't read other things from the stack */ + if (tregp + z <= eo_regp || !stack_used) + { + /* Because we're little endian, this is what it turns into. */ + avalue[i] = (void *) tregp; + regp = tregp + z; + + /* If we read past the last core register, make sure we + have not read from the stack before and continue + reading after regp. */ + if (regp > eo_regp) + { + FFI_ASSERT (!stack_used); + argp = regp; + } + if (regp >= eo_regp) + { + done_with_regs = 1; + stack_used = 1; + } + continue; + } + } + + stack_used = 1; + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +struct closure_frame +{ + char vfp_space[8*8] __attribute__((aligned(8))); + char result[8*4]; + char argp[]; +}; + +int FFI_HIDDEN +ffi_closure_inner_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result, + frame->argp, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +int FFI_HIDDEN +ffi_closure_inner_VFP (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp, + frame->vfp_space, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +void ffi_closure_SYSV (void) FFI_HIDDEN; +void ffi_closure_VFP (void) FFI_HIDDEN; +void ffi_go_closure_SYSV (void) FFI_HIDDEN; +void ffi_go_closure_VFP (void) FFI_HIDDEN; + +/* the cif must already be prep'ed */ + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, + ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + void (*closure_func) (void) = ffi_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + +#if FFI_EXEC_TRAMPOLINE_TABLE + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = closure_func; +#else + +#ifndef _M_ARM + memcpy(closure->tramp, ffi_arm_trampoline, 8); +#else + // cast away function type so MSVC doesn't set the lower bit of the function pointer + memcpy(closure->tramp, (void*)((uintptr_t)ffi_arm_trampoline & 0xFFFFFFFE), FFI_TRAMPOLINE_CLOSURE_OFFSET); +#endif + +#if defined (__QNX__) + msync(closure->tramp, 8, 0x1000000); /* clear data map */ + msync(codeloc, 8, 0x1000000); /* clear insn map */ +#elif defined(_MSC_VER) + FlushInstructionCache(GetCurrentProcess(), closure->tramp, FFI_TRAMPOLINE_SIZE); +#else + __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */ + __clear_cache(codeloc, codeloc + 8); /* clear insn map */ +#endif +#ifdef _M_ARM + *(void(**)(void))(closure->tramp + FFI_TRAMPOLINE_CLOSURE_FUNCTION) = closure_func; +#else + *(void (**)(void))(closure->tramp + 8) = closure_func; +#endif +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + void (*closure_func) (void) = ffi_go_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_go_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = closure_func; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Below are routines for VFP hard-float support. */ + +/* A subroutine of vfp_type_p. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of vfp_type_p. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY is an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is a floating point scalar. + + Returns non-zero iff TY is an HFA. The result is an encoded value where + bits 0-7 contain the type code, and bits 8-10 contain the element count. */ + +static int +vfp_type_p (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (ty->type) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE) + return 0; + ele_count = 2; + goto done; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */ + size = ty->size; + if (size < 4 || size > 32) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return (ele_count << 8) | candidate; +} + +static int +place_vfp_arg (ffi_cif *cif, int h) +{ + unsigned short reg = cif->vfp_reg_free; + int align = 1, nregs = h >> 8; + + if ((h & 0xff) == FFI_TYPE_DOUBLE) + align = 2, nregs *= 2; + + /* Align register number. */ + if ((reg & 1) && align == 2) + reg++; + + while (reg + nregs <= 16) + { + int s, new_used = 0; + for (s = reg; s < reg + nregs; s++) + { + new_used |= (1 << s); + if (cif->vfp_used & (1 << s)) + { + reg += align; + goto next_reg; + } + } + /* Found regs to allocate. */ + cif->vfp_used |= new_used; + cif->vfp_args[cif->vfp_nargs++] = (signed char)reg; + + /* Update vfp_reg_free. */ + if (cif->vfp_used & (1 << cif->vfp_reg_free)) + { + reg += nregs; + while (cif->vfp_used & (1 << reg)) + reg += 1; + cif->vfp_reg_free = reg; + } + return 0; + next_reg:; + } + // done, mark all regs as used + cif->vfp_reg_free = 16; + cif->vfp_used = 0xFFFF; + return 1; +} + +static void +layout_vfp_args (ffi_cif * cif) +{ + unsigned int i; + /* Init VFP fields */ + cif->vfp_used = 0; + cif->vfp_nargs = 0; + cif->vfp_reg_free = 0; + memset (cif->vfp_args, -1, 16); /* Init to -1. */ + + for (i = 0; i < cif->nargs; i++) + { + int h = vfp_type_p (cif->arg_types[i]); + if (h && place_vfp_arg (cif, h) == 1) + break; + } +} + +#endif /* __arm__ or _M_ARM */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget 2.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget 2.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal 2.h new file mode 100644 index 0000000..6cf0b2a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal 2.h @@ -0,0 +1,7 @@ +#define ARM_TYPE_VFP_S 0 +#define ARM_TYPE_VFP_D 1 +#define ARM_TYPE_VFP_N 2 +#define ARM_TYPE_INT64 3 +#define ARM_TYPE_INT 4 +#define ARM_TYPE_VOID 5 +#define ARM_TYPE_STRUCT 6 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h new file mode 100644 index 0000000..6cf0b2a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h @@ -0,0 +1,7 @@ +#define ARM_TYPE_VFP_S 0 +#define ARM_TYPE_VFP_D 1 +#define ARM_TYPE_VFP_N 2 +#define ARM_TYPE_INT64 3 +#define ARM_TYPE_INT 4 +#define ARM_TYPE_VOID 5 +#define ARM_TYPE_STRUCT 6 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv 2.S new file mode 100644 index 0000000..63180a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv 2.S @@ -0,0 +1,385 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __arm__ +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6M__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +/* Conditionally compile unwinder directives. */ +#ifdef __ARM_EABI__ +# define UNWIND(...) __VA_ARGS__ +#else +# define UNWIND(...) +#endif + +#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__) + .cfi_sections .debug_frame +#endif + +#define CONCAT(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +#ifdef __USER_LABEL_PREFIX__ +# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X) +#else +# define CNAME(X) X +#endif +#ifdef __ELF__ +# define SIZE(X) .size CNAME(X), . - CNAME(X) +# define TYPE(X, Y) .type CNAME(X), Y +#else +# define SIZE(X) +# define TYPE(X, Y) +#endif + +#define ARM_FUNC_START_LOCAL(name) \ + .align 3; \ + TYPE(CNAME(name), %function); \ + CNAME(name): + +#define ARM_FUNC_START(name) \ + .globl CNAME(name); \ + FFI_HIDDEN(CNAME(name)); \ + ARM_FUNC_START_LOCAL(name) + +#define ARM_FUNC_END(name) \ + SIZE(name) + +/* Aid in defining a jump table with 8 bytes between entries. */ +/* ??? The clang assembler doesn't handle .if with symbolic expressions. */ +#ifdef __clang__ +# define E(index) +#else +# define E(index) \ + .if . - 0b - 8*index; \ + .error "type table out of sync"; \ + .endif +#endif + + .text + .syntax unified + .arm + +#ifndef __clang__ + /* We require interworking on LDM, which implies ARMv5T, + which implies the existance of BLX. */ + .arch armv5t +#endif + + /* Note that we use STC and LDC to encode VFP instructions, + so that we do not need ".fpu vfp", nor get that added to + the object file attributes. These will not be executed + unless the FFI_VFP abi is used. */ + + @ r0: stack + @ r1: frame + @ r2: fn + @ r3: vfp_used + +ARM_FUNC_START(ffi_call_VFP) + UNWIND(.fnstart) + cfi_startproc + + cmp r3, #3 @ load only d0 if possible +#ifdef __clang__ + vldrle d0, [sp] + vldmgt sp, {d0-d7} +#else + ldcle p11, cr0, [r0] @ vldrle d0, [sp] + ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7} +#endif + add r0, r0, #64 @ discard the vfp register args + /* FALLTHRU */ +ARM_FUNC_END(ffi_call_VFP) + +ARM_FUNC_START(ffi_call_SYSV) + stm r1, {fp, lr} + mov fp, r1 + + @ This is a bit of a lie wrt the origin of the unwind info, but + @ now we've got the usual frame pointer and two saved registers. + UNWIND(.save {fp,lr}) + UNWIND(.setfp fp, sp) + cfi_def_cfa(fp, 8) + cfi_rel_offset(fp, 0) + cfi_rel_offset(lr, 4) + + mov sp, r0 @ install the stack pointer + mov lr, r2 @ move the fn pointer out of the way + ldr ip, [fp, #16] @ install the static chain + ldmia sp!, {r0-r3} @ move first 4 parameters in registers. + blx lr @ call fn + + @ Load r2 with the pointer to storage for the return value + @ Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + @ Deallocate the stack with the arguments. + mov sp, fp + cfi_def_cfa_register(sp) + + @ Store values stored in registers. + .align 3 + add pc, pc, r3, lsl #3 + nop +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vstr s0, [r2] +#else + stc p10, cr0, [r2] @ vstr s0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vstr d0, [r2] +#else + stc p11, cr0, [r2] @ vstr d0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vstm r2, {d0-d3} +#else + stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3} +#endif + pop {fp,pc} +E(ARM_TYPE_INT64) + str r1, [r2, #4] + nop +E(ARM_TYPE_INT) + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID) + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT) + pop {fp,pc} + + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_call_SYSV) + + +/* + int ffi_closure_inner_* (cif, fun, user_data, frame) +*/ + +ARM_FUNC_START(ffi_go_closure_SYSV) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_SYSV) + +ARM_FUNC_START(ffi_closure_SYSV) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 @ compute entry sp + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) + stmdb sp!, {ip,lr} + + /* Remember that EABI unwind info only applies at call sites. + We need do nothing except note the save of the stack pointer + and the link registers. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_SYSV) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_SYSV) + +ARM_FUNC_START(ffi_go_closure_VFP) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_VFP) + +ARM_FUNC_START(ffi_closure_VFP) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) +#ifdef __clang__ + vstm sp, {d0-d7} +#else + stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7} +#endif + stmdb sp!, {ip,lr} + + /* See above. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_VFP) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_VFP) + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + +ARM_FUNC_START_LOCAL(ffi_closure_ret) + cfi_startproc + cfi_rel_offset(sp, 0) + cfi_rel_offset(lr, 4) +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vldr s0, [r2] +#else + ldc p10, cr0, [r2] @ vldr s0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vldr d0, [r2] +#else + ldc p11, cr0, [r2] @ vldr d0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vldm r2, {d0-d3} +#else + ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3} +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_INT64) + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT) + ldr r0, [r2] + ldm sp, {sp,pc} +E(ARM_TYPE_VOID) + ldm sp, {sp,pc} + nop +E(ARM_TYPE_STRUCT) + ldm sp, {sp,pc} + cfi_endproc +ARM_FUNC_END(ffi_closure_ret) + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + +.align PAGE_MAX_SHIFT +ARM_FUNC_START(ffi_closure_trampoline_table_page) +.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page + sub ip, #8 @ account for pc bias + ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP +.endr +ARM_FUNC_END(ffi_closure_trampoline_table_page) +#endif + +#else + +ARM_FUNC_START(ffi_arm_trampoline) +0: adr ip, 0b + ldr pc, 1f +1: .long 0 +ARM_FUNC_END(ffi_arm_trampoline) + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ +#endif /* __arm__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S new file mode 100644 index 0000000..63180a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S @@ -0,0 +1,385 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __arm__ +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6M__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +/* Conditionally compile unwinder directives. */ +#ifdef __ARM_EABI__ +# define UNWIND(...) __VA_ARGS__ +#else +# define UNWIND(...) +#endif + +#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__) + .cfi_sections .debug_frame +#endif + +#define CONCAT(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +#ifdef __USER_LABEL_PREFIX__ +# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X) +#else +# define CNAME(X) X +#endif +#ifdef __ELF__ +# define SIZE(X) .size CNAME(X), . - CNAME(X) +# define TYPE(X, Y) .type CNAME(X), Y +#else +# define SIZE(X) +# define TYPE(X, Y) +#endif + +#define ARM_FUNC_START_LOCAL(name) \ + .align 3; \ + TYPE(CNAME(name), %function); \ + CNAME(name): + +#define ARM_FUNC_START(name) \ + .globl CNAME(name); \ + FFI_HIDDEN(CNAME(name)); \ + ARM_FUNC_START_LOCAL(name) + +#define ARM_FUNC_END(name) \ + SIZE(name) + +/* Aid in defining a jump table with 8 bytes between entries. */ +/* ??? The clang assembler doesn't handle .if with symbolic expressions. */ +#ifdef __clang__ +# define E(index) +#else +# define E(index) \ + .if . - 0b - 8*index; \ + .error "type table out of sync"; \ + .endif +#endif + + .text + .syntax unified + .arm + +#ifndef __clang__ + /* We require interworking on LDM, which implies ARMv5T, + which implies the existance of BLX. */ + .arch armv5t +#endif + + /* Note that we use STC and LDC to encode VFP instructions, + so that we do not need ".fpu vfp", nor get that added to + the object file attributes. These will not be executed + unless the FFI_VFP abi is used. */ + + @ r0: stack + @ r1: frame + @ r2: fn + @ r3: vfp_used + +ARM_FUNC_START(ffi_call_VFP) + UNWIND(.fnstart) + cfi_startproc + + cmp r3, #3 @ load only d0 if possible +#ifdef __clang__ + vldrle d0, [sp] + vldmgt sp, {d0-d7} +#else + ldcle p11, cr0, [r0] @ vldrle d0, [sp] + ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7} +#endif + add r0, r0, #64 @ discard the vfp register args + /* FALLTHRU */ +ARM_FUNC_END(ffi_call_VFP) + +ARM_FUNC_START(ffi_call_SYSV) + stm r1, {fp, lr} + mov fp, r1 + + @ This is a bit of a lie wrt the origin of the unwind info, but + @ now we've got the usual frame pointer and two saved registers. + UNWIND(.save {fp,lr}) + UNWIND(.setfp fp, sp) + cfi_def_cfa(fp, 8) + cfi_rel_offset(fp, 0) + cfi_rel_offset(lr, 4) + + mov sp, r0 @ install the stack pointer + mov lr, r2 @ move the fn pointer out of the way + ldr ip, [fp, #16] @ install the static chain + ldmia sp!, {r0-r3} @ move first 4 parameters in registers. + blx lr @ call fn + + @ Load r2 with the pointer to storage for the return value + @ Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + @ Deallocate the stack with the arguments. + mov sp, fp + cfi_def_cfa_register(sp) + + @ Store values stored in registers. + .align 3 + add pc, pc, r3, lsl #3 + nop +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vstr s0, [r2] +#else + stc p10, cr0, [r2] @ vstr s0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vstr d0, [r2] +#else + stc p11, cr0, [r2] @ vstr d0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vstm r2, {d0-d3} +#else + stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3} +#endif + pop {fp,pc} +E(ARM_TYPE_INT64) + str r1, [r2, #4] + nop +E(ARM_TYPE_INT) + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID) + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT) + pop {fp,pc} + + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_call_SYSV) + + +/* + int ffi_closure_inner_* (cif, fun, user_data, frame) +*/ + +ARM_FUNC_START(ffi_go_closure_SYSV) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_SYSV) + +ARM_FUNC_START(ffi_closure_SYSV) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 @ compute entry sp + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) + stmdb sp!, {ip,lr} + + /* Remember that EABI unwind info only applies at call sites. + We need do nothing except note the save of the stack pointer + and the link registers. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_SYSV) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_SYSV) + +ARM_FUNC_START(ffi_go_closure_VFP) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_VFP) + +ARM_FUNC_START(ffi_closure_VFP) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) +#ifdef __clang__ + vstm sp, {d0-d7} +#else + stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7} +#endif + stmdb sp!, {ip,lr} + + /* See above. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_VFP) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_VFP) + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + +ARM_FUNC_START_LOCAL(ffi_closure_ret) + cfi_startproc + cfi_rel_offset(sp, 0) + cfi_rel_offset(lr, 4) +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vldr s0, [r2] +#else + ldc p10, cr0, [r2] @ vldr s0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vldr d0, [r2] +#else + ldc p11, cr0, [r2] @ vldr d0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vldm r2, {d0-d3} +#else + ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3} +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_INT64) + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT) + ldr r0, [r2] + ldm sp, {sp,pc} +E(ARM_TYPE_VOID) + ldm sp, {sp,pc} + nop +E(ARM_TYPE_STRUCT) + ldm sp, {sp,pc} + cfi_endproc +ARM_FUNC_END(ffi_closure_ret) + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + +.align PAGE_MAX_SHIFT +ARM_FUNC_START(ffi_closure_trampoline_table_page) +.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page + sub ip, #8 @ account for pc bias + ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP +.endr +ARM_FUNC_END(ffi_closure_trampoline_table_page) +#endif + +#else + +ARM_FUNC_START(ffi_arm_trampoline) +0: adr ip, 0b + ldr pc, 1f +1: .long 0 +ARM_FUNC_END(ffi_arm_trampoline) + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ +#endif /* __arm__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32 2.S new file mode 100644 index 0000000..5c99d02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32 2.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2019 Microsoft Corporation. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" +#include "ksarm.h" + + + ; 8 byte aligned AREA to support 8 byte aligned jump tables + MACRO + NESTED_ENTRY_FFI $FuncName, $AreaName, $ExceptHandler + + ; compute the function's labels + __DeriveFunctionLabels $FuncName + + ; determine the area we will put the function into +__FuncArea SETS "|.text|" + IF "$AreaName" != "" +__FuncArea SETS "$AreaName" + ENDIF + + ; set up the exception handler itself +__FuncExceptionHandler SETS "" + IF "$ExceptHandler" != "" +__FuncExceptionHandler SETS "|$ExceptHandler|" + ENDIF + + ; switch to the specified area, jump tables require 8 byte alignment + AREA $__FuncArea,CODE,CODEALIGN,ALIGN=3,READONLY + + ; export the function name + __ExportProc $FuncName + + ; flush any pending literal pool stuff + ROUT + + ; reset the state of the unwind code tracking + __ResetUnwindState + + MEND + +; MACRO +; TABLE_ENTRY $Type, $Table +;$Type_$Table +; MEND + +#define E(index,table) return_##index##_##table + + ; r0: stack + ; r1: frame + ; r2: fn + ; r3: vfp_used + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_VFP + cmp r3, #3 ; load only d0 if possible + vldrle d0, [r0] + vldmgt r0, {d0-d7} + add r0, r0, #64 ; discard the vfp register args + b ffi_call_SYSV + NESTED_END ffi_call_VFP_fake + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_SYSV + stm r1, {fp, lr} + mov fp, r1 + + mov sp, r0 ; install the stack pointer + mov lr, r2 ; move the fn pointer out of the way + ldr ip, [fp, #16] ; install the static chain + ldmia sp!, {r0-r3} ; move first 4 parameters in registers. + blx lr ; call fn + + ; Load r2 with the pointer to storage for the return value + ; Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + ; Deallocate the stack with the arguments. + mov sp, fp + + ; Store values stored in registers. + ALIGN 8 + lsl r3, #3 + add r3, r3, pc + add r3, #8 + mov pc, r3 + + +E(ARM_TYPE_VFP_S, ffi_call) + ALIGN 8 + vstr s0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_D, ffi_call) + ALIGN 8 + vstr d0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_N, ffi_call) + ALIGN 8 + vstm r2, {d0-d3} + pop {fp,pc} +E(ARM_TYPE_INT64, ffi_call) + ALIGN 8 + str r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_call) + ALIGN 8 + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID, ffi_call) + ALIGN 8 + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT, ffi_call) + ALIGN 8 + cmp r3, #ARM_TYPE_STRUCT + pop {fp,pc} + NESTED_END ffi_call_SYSV_fake + + IMPORT |ffi_closure_inner_SYSV| + /* + int ffi_closure_inner_SYSV + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_SYSV + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_SYSV_0 + NESTED_END ffi_go_closure_SYSV + + ; r3: ffi_closure + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_closure_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + ALTERNATE_ENTRY ffi_closure_SYSV + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; ffi_closure->cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; ffi_closure->fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; ffi_closure->user_data + + ALTERNATE_ENTRY ffi_go_closure_SYSV_0 + add ip, sp, #16 ; compute entry sp + + sub sp, sp, #64+32 ; allocate frame parameter (sizeof(vfp_space) = 64, sizeof(result) = 32) + mov r3, sp ; set frame parameter + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_SYSV ; call the Python closure + + ; Load values returned in registers. + add r2, sp, #64+8 ; address of closure_frame->result + bl ffi_closure_ret ; move result to correct register or memory for type + + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_SYSV_fake + + IMPORT |ffi_closure_inner_VFP| + /* + int ffi_closure_inner_VFP + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_VFP + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_VFP_0 + NESTED_END ffi_go_closure_VFP + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + ; r3: closure + NESTED_ENTRY_FFI ffi_closure_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_closure_VFP + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; load user_data + + ALTERNATE_ENTRY ffi_go_closure_VFP_0 + add ip, sp, #16 ; compute entry sp + sub sp, sp, #32 ; save space for closure_frame->result + vstmdb sp!, {d0-d7} ; push closure_frame->vfp_space + + mov r3, sp ; save closure_frame + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_VFP + + ; Load values returned in registers. + add r2, sp, #64+8 ; load result + bl ffi_closure_ret + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_VFP_fake + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + + NESTED_ENTRY_FFI ffi_closure_ret + stmdb sp!, {fp,lr} + + ALIGN 8 + lsl r0, #3 + add r0, r0, pc + add r0, #8 + mov pc, r0 + +E(ARM_TYPE_VFP_S, ffi_closure) + ALIGN 8 + vldr s0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_D, ffi_closure) + ALIGN 8 + vldr d0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_N, ffi_closure) + ALIGN 8 + vldm r2, {d0-d3} + b call_epilogue +E(ARM_TYPE_INT64, ffi_closure) + ALIGN 8 + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_closure) + ALIGN 8 + ldr r0, [r2] + b call_epilogue +E(ARM_TYPE_VOID, ffi_closure) + ALIGN 8 + b call_epilogue + nop +E(ARM_TYPE_STRUCT, ffi_closure) + ALIGN 8 + b call_epilogue +call_epilogue + ldmfd sp!, {fp,pc} + NESTED_END ffi_closure_ret + + AREA |.trampoline|, DATA, THUMB, READONLY + EXPORT |ffi_arm_trampoline| +|ffi_arm_trampoline| DATA +thisproc adr ip, thisproc + stmdb sp!, {ip, r0} + ldr pc, [pc, #0] + DCD 0 + ;ENDP + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S new file mode 100644 index 0000000..5c99d02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2019 Microsoft Corporation. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" +#include "ksarm.h" + + + ; 8 byte aligned AREA to support 8 byte aligned jump tables + MACRO + NESTED_ENTRY_FFI $FuncName, $AreaName, $ExceptHandler + + ; compute the function's labels + __DeriveFunctionLabels $FuncName + + ; determine the area we will put the function into +__FuncArea SETS "|.text|" + IF "$AreaName" != "" +__FuncArea SETS "$AreaName" + ENDIF + + ; set up the exception handler itself +__FuncExceptionHandler SETS "" + IF "$ExceptHandler" != "" +__FuncExceptionHandler SETS "|$ExceptHandler|" + ENDIF + + ; switch to the specified area, jump tables require 8 byte alignment + AREA $__FuncArea,CODE,CODEALIGN,ALIGN=3,READONLY + + ; export the function name + __ExportProc $FuncName + + ; flush any pending literal pool stuff + ROUT + + ; reset the state of the unwind code tracking + __ResetUnwindState + + MEND + +; MACRO +; TABLE_ENTRY $Type, $Table +;$Type_$Table +; MEND + +#define E(index,table) return_##index##_##table + + ; r0: stack + ; r1: frame + ; r2: fn + ; r3: vfp_used + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_VFP + cmp r3, #3 ; load only d0 if possible + vldrle d0, [r0] + vldmgt r0, {d0-d7} + add r0, r0, #64 ; discard the vfp register args + b ffi_call_SYSV + NESTED_END ffi_call_VFP_fake + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_SYSV + stm r1, {fp, lr} + mov fp, r1 + + mov sp, r0 ; install the stack pointer + mov lr, r2 ; move the fn pointer out of the way + ldr ip, [fp, #16] ; install the static chain + ldmia sp!, {r0-r3} ; move first 4 parameters in registers. + blx lr ; call fn + + ; Load r2 with the pointer to storage for the return value + ; Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + ; Deallocate the stack with the arguments. + mov sp, fp + + ; Store values stored in registers. + ALIGN 8 + lsl r3, #3 + add r3, r3, pc + add r3, #8 + mov pc, r3 + + +E(ARM_TYPE_VFP_S, ffi_call) + ALIGN 8 + vstr s0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_D, ffi_call) + ALIGN 8 + vstr d0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_N, ffi_call) + ALIGN 8 + vstm r2, {d0-d3} + pop {fp,pc} +E(ARM_TYPE_INT64, ffi_call) + ALIGN 8 + str r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_call) + ALIGN 8 + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID, ffi_call) + ALIGN 8 + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT, ffi_call) + ALIGN 8 + cmp r3, #ARM_TYPE_STRUCT + pop {fp,pc} + NESTED_END ffi_call_SYSV_fake + + IMPORT |ffi_closure_inner_SYSV| + /* + int ffi_closure_inner_SYSV + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_SYSV + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_SYSV_0 + NESTED_END ffi_go_closure_SYSV + + ; r3: ffi_closure + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_closure_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + ALTERNATE_ENTRY ffi_closure_SYSV + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; ffi_closure->cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; ffi_closure->fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; ffi_closure->user_data + + ALTERNATE_ENTRY ffi_go_closure_SYSV_0 + add ip, sp, #16 ; compute entry sp + + sub sp, sp, #64+32 ; allocate frame parameter (sizeof(vfp_space) = 64, sizeof(result) = 32) + mov r3, sp ; set frame parameter + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_SYSV ; call the Python closure + + ; Load values returned in registers. + add r2, sp, #64+8 ; address of closure_frame->result + bl ffi_closure_ret ; move result to correct register or memory for type + + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_SYSV_fake + + IMPORT |ffi_closure_inner_VFP| + /* + int ffi_closure_inner_VFP + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_VFP + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_VFP_0 + NESTED_END ffi_go_closure_VFP + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + ; r3: closure + NESTED_ENTRY_FFI ffi_closure_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_closure_VFP + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; load user_data + + ALTERNATE_ENTRY ffi_go_closure_VFP_0 + add ip, sp, #16 ; compute entry sp + sub sp, sp, #32 ; save space for closure_frame->result + vstmdb sp!, {d0-d7} ; push closure_frame->vfp_space + + mov r3, sp ; save closure_frame + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_VFP + + ; Load values returned in registers. + add r2, sp, #64+8 ; load result + bl ffi_closure_ret + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_VFP_fake + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + + NESTED_ENTRY_FFI ffi_closure_ret + stmdb sp!, {fp,lr} + + ALIGN 8 + lsl r0, #3 + add r0, r0, pc + add r0, #8 + mov pc, r0 + +E(ARM_TYPE_VFP_S, ffi_closure) + ALIGN 8 + vldr s0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_D, ffi_closure) + ALIGN 8 + vldr d0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_N, ffi_closure) + ALIGN 8 + vldm r2, {d0-d3} + b call_epilogue +E(ARM_TYPE_INT64, ffi_closure) + ALIGN 8 + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_closure) + ALIGN 8 + ldr r0, [r2] + b call_epilogue +E(ARM_TYPE_VOID, ffi_closure) + ALIGN 8 + b call_epilogue + nop +E(ARM_TYPE_STRUCT, ffi_closure) + ALIGN 8 + b call_epilogue +call_epilogue + ldmfd sp!, {fp,pc} + NESTED_END ffi_closure_ret + + AREA |.trampoline|, DATA, THUMB, READONLY + EXPORT |ffi_arm_trampoline| +|ffi_arm_trampoline| DATA +thisproc adr ip, thisproc + stmdb sp!, {ip, r0} + ldr pc, [pc, #0] + DCD 0 + ;ENDP + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi 2.c new file mode 100644 index 0000000..3d43397 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi 2.c @@ -0,0 +1,423 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include + +/* #define DEBUG */ + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned int, unsigned int, unsigned int*, unsigned int, + void (*fn)(void)); +extern void ffi_closure_SYSV (ffi_closure *); + +unsigned int pass_struct_on_stack(ffi_type *type) +{ + if(type->type != FFI_TYPE_STRUCT) + return 0; + + if(type->alignment < type->size && + !(type->size == 4 || type->size == 8) && + !(type->size == 8 && type->alignment >= 4)) + return 1; + + if(type->size == 3 || type->size == 5 || type->size == 6 || + type->size == 7) + return 1; + + return 0; +} + +/* ffi_prep_args is called by the assembly routine once stack space + * has been allocated for the function's arguments + * + * This is annoyingly complex since we need to keep track of used + * registers. + */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + char *reg_base = stack; + char *stack_base = stack + 20; + unsigned int stack_offset = 0; + unsigned int reg_mask = 0; + + p_argv = ecif->avalue; + + /* If cif->flags is struct then we know it's not passed in registers */ + if(ecif->cif->flags == FFI_TYPE_STRUCT) + { + *(void**)reg_base = ecif->rvalue; + reg_mask |= 1; + } + + for(i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + int type = (*p_arg)->type; + char *addr = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + else if(z == sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + addr = reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + addr = reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + addr = reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!addr) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + + if(type == FFI_TYPE_STRUCT && (*p_arg)->elements[1] == NULL) + type = (*p_arg)->elements[0]->type; + + switch(type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8 *)(*p_argv); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8 *)(*p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16 *)(*p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16 *)(*p_argv); + break; + default: + memcpy(addr, *p_argv, z); + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < 5; i++) + { + if((reg_mask & (1 << i)) == 0) + printf("r%d: (unused)\n", 12 - i); + else + printf("r%d: 0x%08x\n", 12 - i, ((unsigned int*)reg_base)[i]); + } + + for(i = 0; i < stack_offset / 4; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack_base)[i]); + } +#endif +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Round the stack up to a multiple of 8 bytes. This isn't needed + * everywhere, but it is on some platforms, and it doesn't harm + * anything when it isn't needed. */ + cif->bytes = (cif->bytes + 7) & ~7; + + /* Flag to indicate that he return value is in fact a struct */ + cif->rstruct_flag = 0; + + /* Set the return type flag */ + switch(cif->rtype->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + cif->flags = (unsigned)FFI_TYPE_UINT8; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = (unsigned)FFI_TYPE_UINT16; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + cif->flags = (unsigned)FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned)FFI_TYPE_UINT64; + break; + case FFI_TYPE_STRUCT: + cif->rstruct_flag = 1; + if(!pass_struct_on_stack(cif->rtype)) + { + if(cif->rtype->size <= 1) + cif->flags = (unsigned)FFI_TYPE_UINT8; + else if(cif->rtype->size <= 2) + cif->flags = (unsigned)FFI_TYPE_UINT16; + else if(cif->rtype->size <= 4) + cif->flags = (unsigned)FFI_TYPE_UINT32; + else if(cif->rtype->size <= 8) + cif->flags = (unsigned)FFI_TYPE_UINT64; + else + cif->flags = (unsigned)cif->rtype->type; + } + else + cif->flags = (unsigned)cif->rtype->type; + break; + default: + cif->flags = (unsigned)cif->rtype->type; + break; + } + + return FFI_OK; +} + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + unsigned int size = 0, i = 0; + ffi_type **p_arg; + + ecif.cif = cif; + ecif.avalue = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + /* If the return value is a struct and we don't have a return value + * address then we need to make one */ + + /* If cif->flags is struct then it's not suitable for registers */ + if((rvalue == NULL) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch(cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, size, cif->flags, + ecif.rvalue, cif->rstruct_flag, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif) +{ + register unsigned int i, reg_mask = 0; + register void **p_argv; + register ffi_type **p_arg; + register char *reg_base = stack; + register char *stack_base = stack + 20; + register unsigned int stack_offset = 0; + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs + 7; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack)[i]); + } +#endif + + /* If cif->flags is struct then we know it's not passed in registers */ + if(cif->flags == FFI_TYPE_STRUCT) + { + *rvalue = *(void **)reg_base; + reg_mask |= 1; + } + + p_argv = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + + *p_argv = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + else if(z <= sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + *p_argv = (void*)reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + *p_argv = (void*)reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + *p_argv = (void*)reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!*p_argv) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + + if((*p_arg)->type != FFI_TYPE_STRUCT || + (*p_arg)->elements[1] == NULL) + { + if(alignment == 1) + **(unsigned int**)p_argv <<= 24; + else if(alignment == 2) + **(unsigned int**)p_argv <<= 16; + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs; i++) + { + printf("sp+%d: 0x%08x\n", i*4, *(((unsigned int**)avalue)[i])); + } +#endif +} + +/* This function is jumped to by the trampoline */ + +unsigned int ffi_closure_SYSV_inner(ffi_closure *closure, void **respp, + void *args) +{ + ffi_cif *cif; + void **arg_area; + unsigned int i, size = 0; + ffi_type **p_arg; + + cif = closure->cif; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + arg_area = (void **)alloca(size); + + /* this call will initialize ARG_AREA, such that each element in that + * array points to the corresponding value on the stack; and if the + * function returns a structure, it will re-set RESP to point to the + * structure return address. */ + + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif); + + (closure->fun)(cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status ffi_prep_closure_loc(ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + unsigned char *__tramp = (unsigned char*)(&closure->tramp[0]); + unsigned int __fun = (unsigned int)(&ffi_closure_SYSV); + unsigned int __ctx = (unsigned int)(codeloc); + unsigned int __rstruct_flag = (unsigned int)(cif->rstruct_flag); + unsigned int __inner = (unsigned int)(&ffi_closure_SYSV_inner); + *(unsigned int*) &__tramp[0] = 0xebcd1f00; /* pushm r8-r12 */ + *(unsigned int*) &__tramp[4] = 0xfefc0010; /* ld.w r12, pc[16] */ + *(unsigned int*) &__tramp[8] = 0xfefb0010; /* ld.w r11, pc[16] */ + *(unsigned int*) &__tramp[12] = 0xfefa0010; /* ld.w r10, pc[16] */ + *(unsigned int*) &__tramp[16] = 0xfeff0010; /* ld.w pc, pc[16] */ + *(unsigned int*) &__tramp[20] = __ctx; + *(unsigned int*) &__tramp[24] = __rstruct_flag; + *(unsigned int*) &__tramp[28] = __inner; + *(unsigned int*) &__tramp[32] = __fun; + syscall(__NR_cacheflush, 0, (&__tramp[0]), 36); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c new file mode 100644 index 0000000..3d43397 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c @@ -0,0 +1,423 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include + +/* #define DEBUG */ + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned int, unsigned int, unsigned int*, unsigned int, + void (*fn)(void)); +extern void ffi_closure_SYSV (ffi_closure *); + +unsigned int pass_struct_on_stack(ffi_type *type) +{ + if(type->type != FFI_TYPE_STRUCT) + return 0; + + if(type->alignment < type->size && + !(type->size == 4 || type->size == 8) && + !(type->size == 8 && type->alignment >= 4)) + return 1; + + if(type->size == 3 || type->size == 5 || type->size == 6 || + type->size == 7) + return 1; + + return 0; +} + +/* ffi_prep_args is called by the assembly routine once stack space + * has been allocated for the function's arguments + * + * This is annoyingly complex since we need to keep track of used + * registers. + */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + char *reg_base = stack; + char *stack_base = stack + 20; + unsigned int stack_offset = 0; + unsigned int reg_mask = 0; + + p_argv = ecif->avalue; + + /* If cif->flags is struct then we know it's not passed in registers */ + if(ecif->cif->flags == FFI_TYPE_STRUCT) + { + *(void**)reg_base = ecif->rvalue; + reg_mask |= 1; + } + + for(i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + int type = (*p_arg)->type; + char *addr = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + else if(z == sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + addr = reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + addr = reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + addr = reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!addr) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + + if(type == FFI_TYPE_STRUCT && (*p_arg)->elements[1] == NULL) + type = (*p_arg)->elements[0]->type; + + switch(type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8 *)(*p_argv); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8 *)(*p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16 *)(*p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16 *)(*p_argv); + break; + default: + memcpy(addr, *p_argv, z); + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < 5; i++) + { + if((reg_mask & (1 << i)) == 0) + printf("r%d: (unused)\n", 12 - i); + else + printf("r%d: 0x%08x\n", 12 - i, ((unsigned int*)reg_base)[i]); + } + + for(i = 0; i < stack_offset / 4; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack_base)[i]); + } +#endif +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Round the stack up to a multiple of 8 bytes. This isn't needed + * everywhere, but it is on some platforms, and it doesn't harm + * anything when it isn't needed. */ + cif->bytes = (cif->bytes + 7) & ~7; + + /* Flag to indicate that he return value is in fact a struct */ + cif->rstruct_flag = 0; + + /* Set the return type flag */ + switch(cif->rtype->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + cif->flags = (unsigned)FFI_TYPE_UINT8; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = (unsigned)FFI_TYPE_UINT16; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + cif->flags = (unsigned)FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned)FFI_TYPE_UINT64; + break; + case FFI_TYPE_STRUCT: + cif->rstruct_flag = 1; + if(!pass_struct_on_stack(cif->rtype)) + { + if(cif->rtype->size <= 1) + cif->flags = (unsigned)FFI_TYPE_UINT8; + else if(cif->rtype->size <= 2) + cif->flags = (unsigned)FFI_TYPE_UINT16; + else if(cif->rtype->size <= 4) + cif->flags = (unsigned)FFI_TYPE_UINT32; + else if(cif->rtype->size <= 8) + cif->flags = (unsigned)FFI_TYPE_UINT64; + else + cif->flags = (unsigned)cif->rtype->type; + } + else + cif->flags = (unsigned)cif->rtype->type; + break; + default: + cif->flags = (unsigned)cif->rtype->type; + break; + } + + return FFI_OK; +} + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + unsigned int size = 0, i = 0; + ffi_type **p_arg; + + ecif.cif = cif; + ecif.avalue = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + /* If the return value is a struct and we don't have a return value + * address then we need to make one */ + + /* If cif->flags is struct then it's not suitable for registers */ + if((rvalue == NULL) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch(cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, size, cif->flags, + ecif.rvalue, cif->rstruct_flag, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif) +{ + register unsigned int i, reg_mask = 0; + register void **p_argv; + register ffi_type **p_arg; + register char *reg_base = stack; + register char *stack_base = stack + 20; + register unsigned int stack_offset = 0; + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs + 7; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack)[i]); + } +#endif + + /* If cif->flags is struct then we know it's not passed in registers */ + if(cif->flags == FFI_TYPE_STRUCT) + { + *rvalue = *(void **)reg_base; + reg_mask |= 1; + } + + p_argv = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + + *p_argv = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + else if(z <= sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + *p_argv = (void*)reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + *p_argv = (void*)reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + *p_argv = (void*)reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!*p_argv) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + + if((*p_arg)->type != FFI_TYPE_STRUCT || + (*p_arg)->elements[1] == NULL) + { + if(alignment == 1) + **(unsigned int**)p_argv <<= 24; + else if(alignment == 2) + **(unsigned int**)p_argv <<= 16; + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs; i++) + { + printf("sp+%d: 0x%08x\n", i*4, *(((unsigned int**)avalue)[i])); + } +#endif +} + +/* This function is jumped to by the trampoline */ + +unsigned int ffi_closure_SYSV_inner(ffi_closure *closure, void **respp, + void *args) +{ + ffi_cif *cif; + void **arg_area; + unsigned int i, size = 0; + ffi_type **p_arg; + + cif = closure->cif; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + arg_area = (void **)alloca(size); + + /* this call will initialize ARG_AREA, such that each element in that + * array points to the corresponding value on the stack; and if the + * function returns a structure, it will re-set RESP to point to the + * structure return address. */ + + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif); + + (closure->fun)(cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status ffi_prep_closure_loc(ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + unsigned char *__tramp = (unsigned char*)(&closure->tramp[0]); + unsigned int __fun = (unsigned int)(&ffi_closure_SYSV); + unsigned int __ctx = (unsigned int)(codeloc); + unsigned int __rstruct_flag = (unsigned int)(cif->rstruct_flag); + unsigned int __inner = (unsigned int)(&ffi_closure_SYSV_inner); + *(unsigned int*) &__tramp[0] = 0xebcd1f00; /* pushm r8-r12 */ + *(unsigned int*) &__tramp[4] = 0xfefc0010; /* ld.w r12, pc[16] */ + *(unsigned int*) &__tramp[8] = 0xfefb0010; /* ld.w r11, pc[16] */ + *(unsigned int*) &__tramp[12] = 0xfefa0010; /* ld.w r10, pc[16] */ + *(unsigned int*) &__tramp[16] = 0xfeff0010; /* ld.w pc, pc[16] */ + *(unsigned int*) &__tramp[20] = __ctx; + *(unsigned int*) &__tramp[24] = __rstruct_flag; + *(unsigned int*) &__tramp[28] = __inner; + *(unsigned int*) &__tramp[32] = __fun; + syscall(__NR_cacheflush, 0, (&__tramp[0]), 36); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget 2.h new file mode 100644 index 0000000..d0c7586 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget 2.h @@ -0,0 +1,55 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2009 Bradley Smith + Target configuration macros for AVR32. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS unsigned int rstruct_flag + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 36 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h new file mode 100644 index 0000000..d0c7586 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h @@ -0,0 +1,55 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2009 Bradley Smith + Target configuration macros for AVR32. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS unsigned int rstruct_flag + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 36 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv 2.S new file mode 100644 index 0000000..a984b3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv 2.S @@ -0,0 +1,208 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + --------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* r12: ffi_prep_args + * r11: &ecif + * r10: size + * r9: cif->flags + * r8: ecif.rvalue + * sp+0: cif->rstruct_flag + * sp+4: fn */ + + .text + .align 1 + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + stm --sp, r0,r1,lr + stm --sp, r8-r12 + mov r0, sp + + /* Make room for all of the new args. */ + sub sp, r10 + /* Pad to make way for potential skipped registers */ + sub sp, 20 + + /* Call ffi_prep_args(stack, &ecif). */ + /* r11 already set */ + mov r1, r12 + mov r12, sp + icall r1 + + /* Save new argument size */ + mov r1, r12 + + /* Move first 5 parameters in registers. */ + ldm sp++, r8-r12 + + /* call (fn) (...). */ + ld.w r1, r0[36] + icall r1 + + /* Remove the space we pushed for the args. */ + mov sp, r0 + + /* Load r1 with the rstruct flag. */ + ld.w r1, sp[32] + + /* Load r9 with the return type code. */ + ld.w r9, sp[12] + + /* Load r8 with the return value pointer. */ + ld.w r8, sp[16] + + /* If the return value pointer is NULL, assume no return value. */ + cp.w r8, 0 + breq .Lend + + /* Check if return type is actually a struct */ + cp.w r1, 0 + breq 1f + + /* Return 8bit */ + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore8 + + /* Return 16bit */ + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore16 + +1: + /* Return 32bit */ + cp.w r9, FFI_TYPE_UINT32 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore32 + + /* Return 64bit */ + cp.w r9, FFI_TYPE_UINT64 + breq .Lstore64 + + /* Didn't match anything */ + bral .Lend + +.Lstore64: + st.w r8[0], r11 + st.w r8[4], r10 + bral .Lend + +.Lstore32: + st.w r8[0], r12 + bral .Lend + +.Lstore16: + st.h r8[0], r12 + bral .Lend + +.Lstore8: + st.b r8[0], r12 + bral .Lend + +.Lend: + sub sp, -20 + ldm sp++, r0,r1,pc + + .size ffi_call_SYSV, . - ffi_call_SYSV + + + /* r12: __ctx + * r11: __rstruct_flag + * r10: __inner */ + + .align 1 + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + stm --sp, r0,lr + mov r0, r11 + mov r8, r10 + sub r10, sp, -8 + sub sp, 12 + st.w sp[8], sp + sub r11, sp, -8 + icall r8 + + /* Check if return type is actually a struct */ + cp.w r0, 0 + breq 1f + + /* Return 8bit */ + cp.w r12, FFI_TYPE_UINT8 + breq .Lget8 + + /* Return 16bit */ + cp.w r12, FFI_TYPE_UINT16 + breq .Lget16 + +1: + /* Return 32bit */ + cp.w r12, FFI_TYPE_UINT32 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT16 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT8 + breq .Lget32 + + /* Return 64bit */ + cp.w r12, FFI_TYPE_UINT64 + breq .Lget64 + + /* Didn't match anything */ + bral .Lclend + +.Lget64: + ld.w r11, sp[0] + ld.w r10, sp[4] + bral .Lclend + +.Lget32: + ld.w r12, sp[0] + bral .Lclend + +.Lget16: + ld.uh r12, sp[0] + bral .Lclend + +.Lget8: + ld.ub r12, sp[0] + bral .Lclend + +.Lclend: + sub sp, -12 + ldm sp++, r0,lr + sub sp, -20 + mov pc, lr + + .size ffi_closure_SYSV, . - ffi_closure_SYSV + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S new file mode 100644 index 0000000..a984b3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S @@ -0,0 +1,208 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + --------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* r12: ffi_prep_args + * r11: &ecif + * r10: size + * r9: cif->flags + * r8: ecif.rvalue + * sp+0: cif->rstruct_flag + * sp+4: fn */ + + .text + .align 1 + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + stm --sp, r0,r1,lr + stm --sp, r8-r12 + mov r0, sp + + /* Make room for all of the new args. */ + sub sp, r10 + /* Pad to make way for potential skipped registers */ + sub sp, 20 + + /* Call ffi_prep_args(stack, &ecif). */ + /* r11 already set */ + mov r1, r12 + mov r12, sp + icall r1 + + /* Save new argument size */ + mov r1, r12 + + /* Move first 5 parameters in registers. */ + ldm sp++, r8-r12 + + /* call (fn) (...). */ + ld.w r1, r0[36] + icall r1 + + /* Remove the space we pushed for the args. */ + mov sp, r0 + + /* Load r1 with the rstruct flag. */ + ld.w r1, sp[32] + + /* Load r9 with the return type code. */ + ld.w r9, sp[12] + + /* Load r8 with the return value pointer. */ + ld.w r8, sp[16] + + /* If the return value pointer is NULL, assume no return value. */ + cp.w r8, 0 + breq .Lend + + /* Check if return type is actually a struct */ + cp.w r1, 0 + breq 1f + + /* Return 8bit */ + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore8 + + /* Return 16bit */ + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore16 + +1: + /* Return 32bit */ + cp.w r9, FFI_TYPE_UINT32 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore32 + + /* Return 64bit */ + cp.w r9, FFI_TYPE_UINT64 + breq .Lstore64 + + /* Didn't match anything */ + bral .Lend + +.Lstore64: + st.w r8[0], r11 + st.w r8[4], r10 + bral .Lend + +.Lstore32: + st.w r8[0], r12 + bral .Lend + +.Lstore16: + st.h r8[0], r12 + bral .Lend + +.Lstore8: + st.b r8[0], r12 + bral .Lend + +.Lend: + sub sp, -20 + ldm sp++, r0,r1,pc + + .size ffi_call_SYSV, . - ffi_call_SYSV + + + /* r12: __ctx + * r11: __rstruct_flag + * r10: __inner */ + + .align 1 + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + stm --sp, r0,lr + mov r0, r11 + mov r8, r10 + sub r10, sp, -8 + sub sp, 12 + st.w sp[8], sp + sub r11, sp, -8 + icall r8 + + /* Check if return type is actually a struct */ + cp.w r0, 0 + breq 1f + + /* Return 8bit */ + cp.w r12, FFI_TYPE_UINT8 + breq .Lget8 + + /* Return 16bit */ + cp.w r12, FFI_TYPE_UINT16 + breq .Lget16 + +1: + /* Return 32bit */ + cp.w r12, FFI_TYPE_UINT32 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT16 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT8 + breq .Lget32 + + /* Return 64bit */ + cp.w r12, FFI_TYPE_UINT64 + breq .Lget64 + + /* Didn't match anything */ + bral .Lclend + +.Lget64: + ld.w r11, sp[0] + ld.w r10, sp[4] + bral .Lclend + +.Lget32: + ld.w r12, sp[0] + bral .Lclend + +.Lget16: + ld.uh r12, sp[0] + bral .Lclend + +.Lget8: + ld.ub r12, sp[0] + bral .Lclend + +.Lclend: + sub sp, -12 + ldm sp++, r0,lr + sub sp, -20 + mov pc, lr + + .size ffi_closure_SYSV, . - ffi_closure_SYSV + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi 2.c new file mode 100644 index 0000000..22a2acd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi 2.c @@ -0,0 +1,196 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#include +#include + +#include +#include + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 3 + +/* + * Return types + */ +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 + +/*====================================================================*/ +/* PROTOTYPE * + /*====================================================================*/ +void ffi_prep_args(unsigned char *, extended_cif *); + +/*====================================================================*/ +/* Externals */ +/* (Assembly) */ +/*====================================================================*/ + +extern void ffi_call_SYSV(unsigned, extended_cif *, void(*)(unsigned char *, extended_cif *), unsigned, void *, void(*fn)(void)); + +/*====================================================================*/ +/* Implementation */ +/* */ +/*====================================================================*/ + + +/* + * This function calculates the return type (size) based on type. + */ + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* --------------------------------------* + * Return handling * + * --------------------------------------*/ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + cif->flags = FFIBFIN_RET_VOID; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + cif->flags = FFIBFIN_RET_HALFWORD; + break; + case FFI_TYPE_UINT8: + cif->flags = FFIBFIN_RET_BYTE; + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_SINT8: + cif->flags = FFIBFIN_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFIBFIN_RET_INT64; + break; + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4){ + cif->flags = FFIBFIN_RET_INT32; + }else if (cif->rtype->size == 8){ + cif->flags = FFIBFIN_RET_INT64; + }else{ + //it will return via a hidden pointer in P0 + cif->flags = FFIBFIN_RET_VOID; + } + break; + default: + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* + * This will prepare the arguments and will call the assembly routine + * cif = the call interface + * fn = the function to be called + * rvalue = the return value + * avalue = the arguments + */ +void ffi_call(ffi_cif *cif, void(*fn)(void), void *rvalue, void **avalue) +{ + int ret_type = cif->flags; + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(cif->bytes, &ecif, ffi_prep_args, ret_type, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +/* +* This function prepares the parameters (copies them from the ecif to the stack) +* to call the function (ffi_prep_args is called by the assembly routine in file +* sysv.S, which also calls the actual function) +*/ +void ffi_prep_args(unsigned char *stack, extended_cif *ecif) +{ + register unsigned int i = 0; + void **p_argv; + unsigned char *argp; + ffi_type **p_arg; + argp = stack; + p_argv = ecif->avalue; + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) { + size_t z; + z = (*p_arg)->size; + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) { + case FFI_TYPE_SINT8: { + signed char v = *(SINT8 *)(* p_argv); + signed int t = v; + *(signed int *) argp = t; + } + break; + case FFI_TYPE_UINT8: { + unsigned char v = *(UINT8 *)(* p_argv); + unsigned int t = v; + *(unsigned int *) argp = t; + } + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) * (SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) * (UINT16 *)(* p_argv); + break; + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + break; + } + } else if (z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int) * (UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + } +} + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c new file mode 100644 index 0000000..22a2acd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c @@ -0,0 +1,196 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#include +#include + +#include +#include + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 3 + +/* + * Return types + */ +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 + +/*====================================================================*/ +/* PROTOTYPE * + /*====================================================================*/ +void ffi_prep_args(unsigned char *, extended_cif *); + +/*====================================================================*/ +/* Externals */ +/* (Assembly) */ +/*====================================================================*/ + +extern void ffi_call_SYSV(unsigned, extended_cif *, void(*)(unsigned char *, extended_cif *), unsigned, void *, void(*fn)(void)); + +/*====================================================================*/ +/* Implementation */ +/* */ +/*====================================================================*/ + + +/* + * This function calculates the return type (size) based on type. + */ + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* --------------------------------------* + * Return handling * + * --------------------------------------*/ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + cif->flags = FFIBFIN_RET_VOID; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + cif->flags = FFIBFIN_RET_HALFWORD; + break; + case FFI_TYPE_UINT8: + cif->flags = FFIBFIN_RET_BYTE; + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_SINT8: + cif->flags = FFIBFIN_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFIBFIN_RET_INT64; + break; + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4){ + cif->flags = FFIBFIN_RET_INT32; + }else if (cif->rtype->size == 8){ + cif->flags = FFIBFIN_RET_INT64; + }else{ + //it will return via a hidden pointer in P0 + cif->flags = FFIBFIN_RET_VOID; + } + break; + default: + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* + * This will prepare the arguments and will call the assembly routine + * cif = the call interface + * fn = the function to be called + * rvalue = the return value + * avalue = the arguments + */ +void ffi_call(ffi_cif *cif, void(*fn)(void), void *rvalue, void **avalue) +{ + int ret_type = cif->flags; + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(cif->bytes, &ecif, ffi_prep_args, ret_type, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +/* +* This function prepares the parameters (copies them from the ecif to the stack) +* to call the function (ffi_prep_args is called by the assembly routine in file +* sysv.S, which also calls the actual function) +*/ +void ffi_prep_args(unsigned char *stack, extended_cif *ecif) +{ + register unsigned int i = 0; + void **p_argv; + unsigned char *argp; + ffi_type **p_arg; + argp = stack; + p_argv = ecif->avalue; + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) { + size_t z; + z = (*p_arg)->size; + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) { + case FFI_TYPE_SINT8: { + signed char v = *(SINT8 *)(* p_argv); + signed int t = v; + *(signed int *) argp = t; + } + break; + case FFI_TYPE_UINT8: { + unsigned char v = *(UINT8 *)(* p_argv); + unsigned int t = v; + *(unsigned int *) argp = t; + } + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) * (SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) * (UINT16 *)(* p_argv); + break; + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + break; + } + } else if (z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int) * (UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + } +} + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget 2.h new file mode 100644 index 0000000..2175c01 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget 2.h @@ -0,0 +1,43 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Alexandre K. I. de Mendonca + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h new file mode 100644 index 0000000..2175c01 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h @@ -0,0 +1,43 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Alexandre K. I. de Mendonca + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv 2.S new file mode 100644 index 0000000..f4278be --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv 2.S @@ -0,0 +1,179 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text +.align 4 + + /* + There is a "feature" in the bfin toolchain that it puts a _ before function names + that's why the function here it's called _ffi_call_SYSV and not ffi_call_SYSV + */ + .global _ffi_call_SYSV; + .type _ffi_call_SYSV, STT_FUNC; + .func ffi_call_SYSV + + /* + cif->bytes = R0 (fp+8) + &ecif = R1 (fp+12) + ffi_prep_args = R2 (fp+16) + ret_type = stack (fp+20) + ecif.rvalue = stack (fp+24) + fn = stack (fp+28) + got (fp+32) + + There is room for improvement here (we can use temporary registers + instead of saving the values in the memory) + REGS: + P5 => Stack pointer (function arguments) + R5 => cif->bytes + R4 => ret->type + + FP-20 = P3 + FP-16 = SP (parameters area) + FP-12 = SP (temp) + FP-08 = function return part 1 [R0] + FP-04 = function return part 2 [R1] + */ + +_ffi_call_SYSV: +.prologue: + LINK 20; + [FP-20] = P3; + [FP+8] = R0; + [FP+12] = R1; + [FP+16] = R2; + +.allocate_stack: + //alocate cif->bytes into the stack + R1 = [FP+8]; + R0 = SP; + R0 = R0 - R1; + R1 = 4; + R0 = R0 - R1; + [FP-12] = SP; + SP = R0; + [FP-16] = SP; + +.call_prep_args: + //get the addr of prep_args + P0 = [P3 + _ffi_prep_args@FUNCDESC_GOT17M4]; + P1 = [P0]; + P3 = [P0+4]; + R0 = [FP-16];//SP (parameter area) + R1 = [FP+12];//ecif + call (P1); + +.call_user_function: + //ajust SP so as to allow the user function access the parameters on the stack + SP = [FP-16]; //point to function parameters + R0 = [SP]; + R1 = [SP+4]; + R2 = [SP+8]; + //load user function address + P0 = FP; + P0 +=28; + P1 = [P0]; + P1 = [P1]; + P3 = [P0+4]; + /* + For functions returning aggregate values (struct) occupying more than 8 bytes, + the caller allocates the return value object on the stack and the address + of this object is passed to the callee as a hidden argument in register P0. + */ + P0 = [FP+24]; + + call (P1); + SP = [FP-12]; +.compute_return: + P2 = [FP-20]; + [FP-8] = R0; + [FP-4] = R1; + + R0 = [FP+20]; + R1 = R0 << 2; + + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R1 = [P2]; + + P2 = [FP+-20]; + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R0 = [FP-8]; + R1 = [FP-4]; + jump (P2); + +/* +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 +*/ +.align 4 +.align 4 +.rettable: + .dd .epilogue - .rettable + .dd .rbyte - .rettable; + .dd .rhalfword - .rettable; + .dd .rint64 - .rettable; + .dd .rint32 - .rettable; + +.rbyte: + P0 = [FP+24]; + R0 = R0.B (Z); + [P0] = R0; + JUMP .epilogue +.rhalfword: + P0 = [FP+24]; + R0 = R0.L; + [P0] = R0; + JUMP .epilogue +.rint64: + P0 = [FP+24];// &rvalue + [P0] = R0; + [P0+4] = R1; + JUMP .epilogue +.rint32: + P0 = [FP+24]; + [P0] = R0; +.epilogue: + R0 = [FP+8]; + R1 = [FP+12]; + R2 = [FP+16]; + P3 = [FP-20]; + UNLINK; + RTS; + +.size _ffi_call_SYSV,.-_ffi_call_SYSV; +.endfunc diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S new file mode 100644 index 0000000..f4278be --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S @@ -0,0 +1,179 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text +.align 4 + + /* + There is a "feature" in the bfin toolchain that it puts a _ before function names + that's why the function here it's called _ffi_call_SYSV and not ffi_call_SYSV + */ + .global _ffi_call_SYSV; + .type _ffi_call_SYSV, STT_FUNC; + .func ffi_call_SYSV + + /* + cif->bytes = R0 (fp+8) + &ecif = R1 (fp+12) + ffi_prep_args = R2 (fp+16) + ret_type = stack (fp+20) + ecif.rvalue = stack (fp+24) + fn = stack (fp+28) + got (fp+32) + + There is room for improvement here (we can use temporary registers + instead of saving the values in the memory) + REGS: + P5 => Stack pointer (function arguments) + R5 => cif->bytes + R4 => ret->type + + FP-20 = P3 + FP-16 = SP (parameters area) + FP-12 = SP (temp) + FP-08 = function return part 1 [R0] + FP-04 = function return part 2 [R1] + */ + +_ffi_call_SYSV: +.prologue: + LINK 20; + [FP-20] = P3; + [FP+8] = R0; + [FP+12] = R1; + [FP+16] = R2; + +.allocate_stack: + //alocate cif->bytes into the stack + R1 = [FP+8]; + R0 = SP; + R0 = R0 - R1; + R1 = 4; + R0 = R0 - R1; + [FP-12] = SP; + SP = R0; + [FP-16] = SP; + +.call_prep_args: + //get the addr of prep_args + P0 = [P3 + _ffi_prep_args@FUNCDESC_GOT17M4]; + P1 = [P0]; + P3 = [P0+4]; + R0 = [FP-16];//SP (parameter area) + R1 = [FP+12];//ecif + call (P1); + +.call_user_function: + //ajust SP so as to allow the user function access the parameters on the stack + SP = [FP-16]; //point to function parameters + R0 = [SP]; + R1 = [SP+4]; + R2 = [SP+8]; + //load user function address + P0 = FP; + P0 +=28; + P1 = [P0]; + P1 = [P1]; + P3 = [P0+4]; + /* + For functions returning aggregate values (struct) occupying more than 8 bytes, + the caller allocates the return value object on the stack and the address + of this object is passed to the callee as a hidden argument in register P0. + */ + P0 = [FP+24]; + + call (P1); + SP = [FP-12]; +.compute_return: + P2 = [FP-20]; + [FP-8] = R0; + [FP-4] = R1; + + R0 = [FP+20]; + R1 = R0 << 2; + + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R1 = [P2]; + + P2 = [FP+-20]; + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R0 = [FP-8]; + R1 = [FP-4]; + jump (P2); + +/* +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 +*/ +.align 4 +.align 4 +.rettable: + .dd .epilogue - .rettable + .dd .rbyte - .rettable; + .dd .rhalfword - .rettable; + .dd .rint64 - .rettable; + .dd .rint32 - .rettable; + +.rbyte: + P0 = [FP+24]; + R0 = R0.B (Z); + [P0] = R0; + JUMP .epilogue +.rhalfword: + P0 = [FP+24]; + R0 = R0.L; + [P0] = R0; + JUMP .epilogue +.rint64: + P0 = [FP+24];// &rvalue + [P0] = R0; + [P0+4] = R1; + JUMP .epilogue +.rint32: + P0 = [FP+24]; + [P0] = R0; +.epilogue: + R0 = [FP+8]; + R1 = [FP+12]; + R2 = [FP+16]; + P3 = [FP-20]; + UNLINK; + RTS; + +.size _ffi_call_SYSV,.-_ffi_call_SYSV; +.endfunc diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures 2.c new file mode 100644 index 0000000..b5eb2bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures 2.c @@ -0,0 +1,1004 @@ +/* ----------------------------------------------------------------------- + closures.c - Copyright (c) 2019 Anthony Green + Copyright (c) 2007, 2009, 2010 Red Hat, Inc. + Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + Code to allocate and deallocate memory for closures. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined __linux__ && !defined _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif + +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if __NetBSD_Version__ - 0 >= 799007200 +/* NetBSD with PROT_MPROTECT */ +#include + +#include +#include + +static const size_t overhead = + (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ? + sizeof(max_align_t) + : sizeof(void *) + sizeof(size_t); + +#define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d))) + +void * +ffi_closure_alloc (size_t size, void **code) +{ + static size_t page_size; + size_t rounded_size; + void *codeseg, *dataseg; + int prot; + + /* Expect that PAX mprotect is active and a separate code mapping is necessary. */ + if (!code) + return NULL; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */ + rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1); + + /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */ + prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC); + dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0); + if (dataseg == MAP_FAILED) + return NULL; + + /* Create secondary mapping and switch it to RX. */ + codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP); + if (codeseg == MAP_FAILED) { + munmap(dataseg, rounded_size); + return NULL; + } + if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) { + munmap(codeseg, rounded_size); + munmap(dataseg, rounded_size); + return NULL; + } + + /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */ + memcpy(dataseg, &rounded_size, sizeof(rounded_size)); + memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *)); + *code = ADD_TO_POINTER(codeseg, overhead); + return ADD_TO_POINTER(dataseg, overhead); +} + +void +ffi_closure_free (void *ptr) +{ + void *codeseg, *dataseg; + size_t rounded_size; + + dataseg = ADD_TO_POINTER(ptr, -overhead); + memcpy(&rounded_size, dataseg, sizeof(rounded_size)); + memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); + munmap(dataseg, rounded_size); + munmap(codeseg, rounded_size); +} +#else /* !NetBSD with PROT_MPROTECT */ + +#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE +# if __linux__ && !defined(__ANDROID__) +/* This macro indicates it may be forbidden to map anonymous memory + with both write and execute permission. Code compiled when this + option is defined will attempt to map such pages once, but if it + fails, it falls back to creating a temporary file in a writable and + executable filesystem and mapping pages from it into separate + locations in the virtual memory space, one location writable and + another executable. */ +# define FFI_MMAP_EXEC_WRIT 1 +# define HAVE_MNTENT 1 +# endif +# if defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__) +/* Windows systems may have Data Execution Protection (DEP) enabled, + which requires the use of VirtualMalloc/VirtualFree to alloc/free + executable memory. */ +# define FFI_MMAP_EXEC_WRIT 1 +# endif +#endif + +#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX +# if defined(__linux__) && !defined(__ANDROID__) +/* When defined to 1 check for SELinux and if SELinux is active, + don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that + might cause audit messages. */ +# define FFI_MMAP_EXEC_SELINUX 1 +# endif +#endif + +#if FFI_CLOSURES + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ + +#include +#include +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#include + +extern void *ffi_closure_trampoline_table_page; + +typedef struct ffi_trampoline_table ffi_trampoline_table; +typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry; + +struct ffi_trampoline_table +{ + /* contiguous writable and executable pages */ + vm_address_t config_page; + vm_address_t trampoline_page; + + /* free list tracking */ + uint16_t free_count; + ffi_trampoline_table_entry *free_list; + ffi_trampoline_table_entry *free_list_pool; + + ffi_trampoline_table *prev; + ffi_trampoline_table *next; +}; + +struct ffi_trampoline_table_entry +{ + void *(*trampoline) (void); + ffi_trampoline_table_entry *next; +}; + +/* Total number of trampolines that fit in one trampoline table */ +#define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE) + +static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER; +static ffi_trampoline_table *ffi_trampoline_tables = NULL; + +static ffi_trampoline_table * +ffi_trampoline_table_alloc (void) +{ + ffi_trampoline_table *table; + vm_address_t config_page; + vm_address_t trampoline_page; + vm_address_t trampoline_page_template; + vm_prot_t cur_prot; + vm_prot_t max_prot; + kern_return_t kt; + uint16_t i; + + /* Allocate two pages -- a config page and a placeholder page */ + config_page = 0x0; + kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, + VM_FLAGS_ANYWHERE); + if (kt != KERN_SUCCESS) + return NULL; + + /* Remap the trampoline table on top of the placeholder page */ + trampoline_page = config_page + PAGE_MAX_SIZE; + trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; +#ifdef __arm__ + /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ + trampoline_page_template &= ~1UL; +#endif + kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, + VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, + FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); + if (kt != KERN_SUCCESS) + { + vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); + return NULL; + } + + /* We have valid trampoline and config pages */ + table = calloc (1, sizeof (ffi_trampoline_table)); + table->free_count = FFI_TRAMPOLINE_COUNT; + table->config_page = config_page; + table->trampoline_page = trampoline_page; + + /* Create and initialize the free list */ + table->free_list_pool = + calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); + + for (i = 0; i < table->free_count; i++) + { + ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; + entry->trampoline = + (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); + + if (i < table->free_count - 1) + entry->next = &table->free_list_pool[i + 1]; + } + + table->free_list = table->free_list_pool; + + return table; +} + +static void +ffi_trampoline_table_free (ffi_trampoline_table *table) +{ + /* Remove from the list */ + if (table->prev != NULL) + table->prev->next = table->next; + + if (table->next != NULL) + table->next->prev = table->prev; + + /* Deallocate pages */ + vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2); + + /* Deallocate free list */ + free (table->free_list_pool); + free (table); +} + +void * +ffi_closure_alloc (size_t size, void **code) +{ + /* Create the closure */ + ffi_closure *closure = malloc (size); + if (closure == NULL) + return NULL; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Check for an active trampoline table with available entries. */ + ffi_trampoline_table *table = ffi_trampoline_tables; + if (table == NULL || table->free_list == NULL) + { + table = ffi_trampoline_table_alloc (); + if (table == NULL) + { + pthread_mutex_unlock (&ffi_trampoline_lock); + free (closure); + return NULL; + } + + /* Insert the new table at the top of the list */ + table->next = ffi_trampoline_tables; + if (table->next != NULL) + table->next->prev = table; + + ffi_trampoline_tables = table; + } + + /* Claim the free entry */ + ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list; + ffi_trampoline_tables->free_list = entry->next; + ffi_trampoline_tables->free_count--; + entry->next = NULL; + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Initialize the return values */ + *code = entry->trampoline; +#ifdef HAVE_PTRAUTH + *code = ptrauth_sign_unauthenticated (*code, ptrauth_key_asia, 0); +#endif + closure->trampoline_table = table; + closure->trampoline_table_entry = entry; + + return closure; +} + +void +ffi_closure_free (void *ptr) +{ + ffi_closure *closure = ptr; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Fetch the table and entry references */ + ffi_trampoline_table *table = closure->trampoline_table; + ffi_trampoline_table_entry *entry = closure->trampoline_table_entry; + + /* Return the entry to the free list */ + entry->next = table->free_list; + table->free_list = entry; + table->free_count++; + + /* If all trampolines within this table are free, and at least one other table exists, deallocate + * the table */ + if (table->free_count == FFI_TRAMPOLINE_COUNT + && ffi_trampoline_tables != table) + { + ffi_trampoline_table_free (table); + } + else if (ffi_trampoline_tables != table) + { + /* Otherwise, bump this table to the top of the list */ + table->prev = NULL; + table->next = ffi_trampoline_tables; + if (ffi_trampoline_tables != NULL) + ffi_trampoline_tables->prev = table; + + ffi_trampoline_tables = table; + } + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Free the closure */ + free (closure); +} + +#endif + +// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations. + +#elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */ + +#define USE_LOCKS 1 +#define USE_DL_PREFIX 1 +#ifdef __GNUC__ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 1 +#endif +#endif + +/* We need to use mmap, not sbrk. */ +#define HAVE_MORECORE 0 + +/* We could, in theory, support mremap, but it wouldn't buy us anything. */ +#define HAVE_MREMAP 0 + +/* We have no use for this, so save some code and data. */ +#define NO_MALLINFO 1 + +/* We need all allocations to be in regular segments, otherwise we + lose track of the corresponding code address. */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + +/* Don't allocate more than a page unless needed. */ +#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize) + +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#if !defined(X86_WIN32) && !defined(X86_WIN64) && !defined(_M_ARM64) +#ifdef HAVE_MNTENT +#include +#endif /* HAVE_MNTENT */ +#include +#include + +/* We don't want sys/mman.h to be included after we redefine mmap and + dlmunmap. */ +#include +#define LACKS_SYS_MMAN_H 1 + +#if FFI_MMAP_EXEC_SELINUX +#include +#include + +static int selinux_enabled = -1; + +static int +selinux_enabled_check (void) +{ + struct statfs sfs; + FILE *f; + char *buf = NULL; + size_t len = 0; + + if (statfs ("/selinux", &sfs) >= 0 + && (unsigned int) sfs.f_type == 0xf97cff8cU) + return 1; + f = fopen ("/proc/mounts", "r"); + if (f == NULL) + return 0; + while (getline (&buf, &len, f) >= 0) + { + char *p = strchr (buf, ' '); + if (p == NULL) + break; + p = strchr (p + 1, ' '); + if (p == NULL) + break; + if (strncmp (p + 1, "selinuxfs ", 10) == 0) + { + free (buf); + fclose (f); + return 1; + } + } + free (buf); + fclose (f); + return 0; +} + +#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \ + : (selinux_enabled = selinux_enabled_check ())) + +#else + +#define is_selinux_enabled() 0 + +#endif /* !FFI_MMAP_EXEC_SELINUX */ + +/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */ +#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX +#include + +static int emutramp_enabled = -1; + +static int +emutramp_enabled_check (void) +{ + char *buf = NULL; + size_t len = 0; + FILE *f; + int ret; + f = fopen ("/proc/self/status", "r"); + if (f == NULL) + return 0; + ret = 0; + + while (getline (&buf, &len, f) != -1) + if (!strncmp (buf, "PaX:", 4)) + { + char emutramp; + if (sscanf (buf, "%*s %*c%c", &emutramp) == 1) + ret = (emutramp == 'E'); + break; + } + free (buf); + fclose (f); + return ret; +} + +#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \ + : (emutramp_enabled = emutramp_enabled_check ())) +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +#elif defined (__CYGWIN__) || defined(__INTERIX) + +#include + +/* Cygwin is Linux-like, but not quite that Linux-like. */ +#define is_selinux_enabled() 0 + +#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */ + +#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX +#define is_emutramp_enabled() 0 +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Declare all functions defined in dlmalloc.c as static. */ +static void *dlmalloc(size_t); +static void dlfree(void*); +static void *dlcalloc(size_t, size_t) MAYBE_UNUSED; +static void *dlrealloc(void *, size_t) MAYBE_UNUSED; +static void *dlmemalign(size_t, size_t) MAYBE_UNUSED; +static void *dlvalloc(size_t) MAYBE_UNUSED; +static int dlmallopt(int, int) MAYBE_UNUSED; +static size_t dlmalloc_footprint(void) MAYBE_UNUSED; +static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED; +static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED; +static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED; +static void *dlpvalloc(size_t) MAYBE_UNUSED; +static int dlmalloc_trim(size_t) MAYBE_UNUSED; +static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED; +static void dlmalloc_stats(void) MAYBE_UNUSED; + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) +/* Use these for mmap and munmap within dlmalloc.c. */ +static void *dlmmap(void *, size_t, int, int, int, off_t); +static int dlmunmap(void *, size_t); +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +#define mmap dlmmap +#define munmap dlmunmap + +#include "dlmalloc.c" + +#undef mmap +#undef munmap + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) + +/* A mutex used to synchronize access to *exec* variables in this file. */ +static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* A file descriptor of a temporary file from which we'll map + executable pages. */ +static int execfd = -1; + +/* The amount of space already allocated from the temporary file. */ +static size_t execsize = 0; + +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_name (char *name, int flags) +{ + int fd; + +#ifdef HAVE_MKOSTEMP + fd = mkostemp (name, flags); +#else + fd = mkstemp (name); +#endif + + if (fd != -1) + unlink (name); + + return fd; +} + +/* Open a temporary file in the named directory. */ +static int +open_temp_exec_file_dir (const char *dir) +{ + static const char suffix[] = "/ffiXXXXXX"; + int lendir, flags; + char *tempname; +#ifdef O_TMPFILE + int fd; +#endif + +#ifdef O_CLOEXEC + flags = O_CLOEXEC; +#else + flags = 0; +#endif + +#ifdef O_TMPFILE + fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700); + /* If the running system does not support the O_TMPFILE flag then retry without it. */ + if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) { + return fd; + } else { + errno = 0; + } +#endif + + lendir = (int) strlen (dir); + tempname = __builtin_alloca (lendir + sizeof (suffix)); + + if (!tempname) + return -1; + + memcpy (tempname, dir, lendir); + memcpy (tempname + lendir, suffix, sizeof (suffix)); + + return open_temp_exec_file_name (tempname, flags); +} + +/* Open a temporary file in the directory in the named environment + variable. */ +static int +open_temp_exec_file_env (const char *envvar) +{ + const char *value = getenv (envvar); + + if (!value) + return -1; + + return open_temp_exec_file_dir (value); +} + +#ifdef HAVE_MNTENT +/* Open a temporary file in an executable and writable mount point + listed in the mounts file. Subsequent calls with the same mounts + keep searching for mount points in the same file. Providing NULL + as the mounts file closes the file. */ +static int +open_temp_exec_file_mnt (const char *mounts) +{ + static const char *last_mounts; + static FILE *last_mntent; + + if (mounts != last_mounts) + { + if (last_mntent) + endmntent (last_mntent); + + last_mounts = mounts; + + if (mounts) + last_mntent = setmntent (mounts, "r"); + else + last_mntent = NULL; + } + + if (!last_mntent) + return -1; + + for (;;) + { + int fd; + struct mntent mnt; + char buf[MAXPATHLEN * 3]; + + if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL) + return -1; + + if (hasmntopt (&mnt, "ro") + || hasmntopt (&mnt, "noexec") + || access (mnt.mnt_dir, W_OK)) + continue; + + fd = open_temp_exec_file_dir (mnt.mnt_dir); + + if (fd != -1) + return fd; + } +} +#endif /* HAVE_MNTENT */ + +/* Instructions to look for a location to hold a temporary file that + can be mapped in for execution. */ +static struct +{ + int (*func)(const char *); + const char *arg; + int repeat; +} open_temp_exec_file_opts[] = { + { open_temp_exec_file_env, "TMPDIR", 0 }, + { open_temp_exec_file_dir, "/tmp", 0 }, + { open_temp_exec_file_dir, "/var/tmp", 0 }, + { open_temp_exec_file_dir, "/dev/shm", 0 }, + { open_temp_exec_file_env, "HOME", 0 }, +#ifdef HAVE_MNTENT + { open_temp_exec_file_mnt, "/etc/mtab", 1 }, + { open_temp_exec_file_mnt, "/proc/mounts", 1 }, +#endif /* HAVE_MNTENT */ +}; + +/* Current index into open_temp_exec_file_opts. */ +static int open_temp_exec_file_opts_idx = 0; + +/* Reset a current multi-call func, then advances to the next entry. + If we're at the last, go back to the first and return nonzero, + otherwise return zero. */ +static int +open_temp_exec_file_opts_next (void) +{ + if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL); + + open_temp_exec_file_opts_idx++; + if (open_temp_exec_file_opts_idx + == (sizeof (open_temp_exec_file_opts) + / sizeof (*open_temp_exec_file_opts))) + { + open_temp_exec_file_opts_idx = 0; + return 1; + } + + return 0; +} + +/* Return a file descriptor of a temporary zero-sized file in a + writable and executable filesystem. */ +static int +open_temp_exec_file (void) +{ + int fd; + + do + { + fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func + (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg); + + if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat + || fd == -1) + { + if (open_temp_exec_file_opts_next ()) + break; + } + } + while (fd == -1); + + return fd; +} + +/* We need to allocate space in a file that will be backing a writable + mapping. Several problems exist with the usual approaches: + - fallocate() is Linux-only + - posix_fallocate() is not available on all platforms + - ftruncate() does not allocate space on filesystems with sparse files + Failure to allocate the space will cause SIGBUS to be thrown when + the mapping is subsequently written to. */ +static int +allocate_space (int fd, off_t offset, off_t len) +{ + static size_t page_size; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + unsigned char buf[page_size]; + memset (buf, 0, page_size); + + while (len > 0) + { + off_t to_write = (len < page_size) ? len : page_size; + if (write (fd, buf, to_write) < to_write) + return -1; + len -= to_write; + } + + return 0; +} + +/* Map in a chunk of memory from the temporary exec file into separate + locations in the virtual memory address space, one writable and one + executable. Returns the address of the writable portion, after + storing an offset to the corresponding executable portion at the + last word of the requested chunk. */ +static void * +dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset) +{ + void *ptr; + + if (execfd == -1) + { + open_temp_exec_file_opts_idx = 0; + retry_open: + execfd = open_temp_exec_file (); + if (execfd == -1) + return MFAIL; + } + + offset = execsize; + + if (allocate_space (execfd, offset, length)) + return MFAIL; + + flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS); + flags |= MAP_SHARED; + + ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC, + flags, execfd, offset); + if (ptr == MFAIL) + { + if (!offset) + { + close (execfd); + goto retry_open; + } + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + + return MFAIL; + } + else if (!offset + && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts_next (); + + start = mmap (start, length, prot, flags, execfd, offset); + + if (start == MFAIL) + { + munmap (ptr, length); + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + return start; + } + + mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start; + + execsize += length; + + return start; +} + +/* Map in a writable and executable chunk of memory if possible. + Failing that, fall back to dlmmap_locked. */ +static void * +dlmmap (void *start, size_t length, int prot, + int flags, int fd, off_t offset) +{ + void *ptr; + + assert (start == NULL && length % malloc_getpagesize == 0 + && prot == (PROT_READ | PROT_WRITE) + && flags == (MAP_PRIVATE | MAP_ANONYMOUS) + && fd == -1 && offset == 0); + + if (execfd == -1 && is_emutramp_enabled ()) + { + ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset); + return ptr; + } + + if (execfd == -1 && !is_selinux_enabled ()) + { + ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset); + + if (ptr != MFAIL || (errno != EPERM && errno != EACCES)) + /* Cool, no need to mess with separate segments. */ + return ptr; + + /* If MREMAP_DUP is ever introduced and implemented, try mmap + with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with + MREMAP_DUP and prot at this point. */ + } + + if (execsize == 0 || execfd == -1) + { + pthread_mutex_lock (&open_temp_exec_file_mutex); + ptr = dlmmap_locked (start, length, prot, flags, offset); + pthread_mutex_unlock (&open_temp_exec_file_mutex); + + return ptr; + } + + return dlmmap_locked (start, length, prot, flags, offset); +} + +/* Release memory at the given address, as well as the corresponding + executable page if it's separate. */ +static int +dlmunmap (void *start, size_t length) +{ + /* We don't bother decreasing execsize or truncating the file, since + we can't quite tell whether we're unmapping the end of the file. + We don't expect frequent deallocation anyway. If we did, we + could locate pages in the file by writing to the pages being + deallocated and checking that the file contents change. + Yuck. */ + msegmentptr seg = segment_holding (gm, start); + void *code; + + if (seg && (code = add_segment_exec_offset (start, seg)) != start) + { + int ret = munmap (code, length); + if (ret) + return ret; + } + + return munmap (start, length); +} + +#if FFI_CLOSURE_FREE_CODE +/* Return segment holding given code address. */ +static msegmentptr +segment_holding_code (mstate m, char* addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= add_segment_exec_offset (sp->base, sp) + && addr < add_segment_exec_offset (sp->base, sp) + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} +#endif + +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +/* Allocate a chunk of memory with the given size. Returns a pointer + to the writable address, and sets *CODE to the executable + corresponding virtual address. */ +void * +ffi_closure_alloc (size_t size, void **code) +{ + void *ptr; + + if (!code) + return NULL; + + ptr = FFI_CLOSURE_PTR (dlmalloc (size)); + + if (ptr) + { + msegmentptr seg = segment_holding (gm, ptr); + + *code = add_segment_exec_offset (ptr, seg); + } + + return ptr; +} + +void * +ffi_data_to_code_pointer (void *data) +{ + msegmentptr seg = segment_holding (gm, data); + /* We expect closures to be allocated with ffi_closure_alloc(), in + which case seg will be non-NULL. However, some users take on the + burden of managing this memory themselves, in which case this + we'll just return data. */ + if (seg) + return add_segment_exec_offset (data, seg); + else + return data; +} + +/* Release a chunk of memory allocated with ffi_closure_alloc. If + FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the + writable or the executable address given. Otherwise, only the + writable address can be provided here. */ +void +ffi_closure_free (void *ptr) +{ +#if FFI_CLOSURE_FREE_CODE + msegmentptr seg = segment_holding_code (gm, ptr); + + if (seg) + ptr = sub_segment_exec_offset (ptr, seg); +#endif + + dlfree (FFI_RESTORE_PTR (ptr)); +} + +# else /* ! FFI_MMAP_EXEC_WRIT */ + +/* On many systems, memory returned by malloc is writable and + executable, so just use it. */ + +#include + +void * +ffi_closure_alloc (size_t size, void **code) +{ + if (!code) + return NULL; + + return *code = FFI_CLOSURE_PTR (malloc (size)); +} + +void +ffi_closure_free (void *ptr) +{ + free (FFI_RESTORE_PTR (ptr)); +} + +void * +ffi_data_to_code_pointer (void *data) +{ + return data; +} + +# endif /* ! FFI_MMAP_EXEC_WRIT */ +#endif /* FFI_CLOSURES */ + +#endif /* NetBSD with PROT_MPROTECT */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c new file mode 100644 index 0000000..b5eb2bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c @@ -0,0 +1,1004 @@ +/* ----------------------------------------------------------------------- + closures.c - Copyright (c) 2019 Anthony Green + Copyright (c) 2007, 2009, 2010 Red Hat, Inc. + Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + Code to allocate and deallocate memory for closures. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined __linux__ && !defined _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif + +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if __NetBSD_Version__ - 0 >= 799007200 +/* NetBSD with PROT_MPROTECT */ +#include + +#include +#include + +static const size_t overhead = + (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ? + sizeof(max_align_t) + : sizeof(void *) + sizeof(size_t); + +#define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d))) + +void * +ffi_closure_alloc (size_t size, void **code) +{ + static size_t page_size; + size_t rounded_size; + void *codeseg, *dataseg; + int prot; + + /* Expect that PAX mprotect is active and a separate code mapping is necessary. */ + if (!code) + return NULL; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */ + rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1); + + /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */ + prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC); + dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0); + if (dataseg == MAP_FAILED) + return NULL; + + /* Create secondary mapping and switch it to RX. */ + codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP); + if (codeseg == MAP_FAILED) { + munmap(dataseg, rounded_size); + return NULL; + } + if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) { + munmap(codeseg, rounded_size); + munmap(dataseg, rounded_size); + return NULL; + } + + /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */ + memcpy(dataseg, &rounded_size, sizeof(rounded_size)); + memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *)); + *code = ADD_TO_POINTER(codeseg, overhead); + return ADD_TO_POINTER(dataseg, overhead); +} + +void +ffi_closure_free (void *ptr) +{ + void *codeseg, *dataseg; + size_t rounded_size; + + dataseg = ADD_TO_POINTER(ptr, -overhead); + memcpy(&rounded_size, dataseg, sizeof(rounded_size)); + memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); + munmap(dataseg, rounded_size); + munmap(codeseg, rounded_size); +} +#else /* !NetBSD with PROT_MPROTECT */ + +#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE +# if __linux__ && !defined(__ANDROID__) +/* This macro indicates it may be forbidden to map anonymous memory + with both write and execute permission. Code compiled when this + option is defined will attempt to map such pages once, but if it + fails, it falls back to creating a temporary file in a writable and + executable filesystem and mapping pages from it into separate + locations in the virtual memory space, one location writable and + another executable. */ +# define FFI_MMAP_EXEC_WRIT 1 +# define HAVE_MNTENT 1 +# endif +# if defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__) +/* Windows systems may have Data Execution Protection (DEP) enabled, + which requires the use of VirtualMalloc/VirtualFree to alloc/free + executable memory. */ +# define FFI_MMAP_EXEC_WRIT 1 +# endif +#endif + +#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX +# if defined(__linux__) && !defined(__ANDROID__) +/* When defined to 1 check for SELinux and if SELinux is active, + don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that + might cause audit messages. */ +# define FFI_MMAP_EXEC_SELINUX 1 +# endif +#endif + +#if FFI_CLOSURES + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ + +#include +#include +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#include + +extern void *ffi_closure_trampoline_table_page; + +typedef struct ffi_trampoline_table ffi_trampoline_table; +typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry; + +struct ffi_trampoline_table +{ + /* contiguous writable and executable pages */ + vm_address_t config_page; + vm_address_t trampoline_page; + + /* free list tracking */ + uint16_t free_count; + ffi_trampoline_table_entry *free_list; + ffi_trampoline_table_entry *free_list_pool; + + ffi_trampoline_table *prev; + ffi_trampoline_table *next; +}; + +struct ffi_trampoline_table_entry +{ + void *(*trampoline) (void); + ffi_trampoline_table_entry *next; +}; + +/* Total number of trampolines that fit in one trampoline table */ +#define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE) + +static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER; +static ffi_trampoline_table *ffi_trampoline_tables = NULL; + +static ffi_trampoline_table * +ffi_trampoline_table_alloc (void) +{ + ffi_trampoline_table *table; + vm_address_t config_page; + vm_address_t trampoline_page; + vm_address_t trampoline_page_template; + vm_prot_t cur_prot; + vm_prot_t max_prot; + kern_return_t kt; + uint16_t i; + + /* Allocate two pages -- a config page and a placeholder page */ + config_page = 0x0; + kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, + VM_FLAGS_ANYWHERE); + if (kt != KERN_SUCCESS) + return NULL; + + /* Remap the trampoline table on top of the placeholder page */ + trampoline_page = config_page + PAGE_MAX_SIZE; + trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; +#ifdef __arm__ + /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ + trampoline_page_template &= ~1UL; +#endif + kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, + VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, + FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); + if (kt != KERN_SUCCESS) + { + vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); + return NULL; + } + + /* We have valid trampoline and config pages */ + table = calloc (1, sizeof (ffi_trampoline_table)); + table->free_count = FFI_TRAMPOLINE_COUNT; + table->config_page = config_page; + table->trampoline_page = trampoline_page; + + /* Create and initialize the free list */ + table->free_list_pool = + calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); + + for (i = 0; i < table->free_count; i++) + { + ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; + entry->trampoline = + (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); + + if (i < table->free_count - 1) + entry->next = &table->free_list_pool[i + 1]; + } + + table->free_list = table->free_list_pool; + + return table; +} + +static void +ffi_trampoline_table_free (ffi_trampoline_table *table) +{ + /* Remove from the list */ + if (table->prev != NULL) + table->prev->next = table->next; + + if (table->next != NULL) + table->next->prev = table->prev; + + /* Deallocate pages */ + vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2); + + /* Deallocate free list */ + free (table->free_list_pool); + free (table); +} + +void * +ffi_closure_alloc (size_t size, void **code) +{ + /* Create the closure */ + ffi_closure *closure = malloc (size); + if (closure == NULL) + return NULL; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Check for an active trampoline table with available entries. */ + ffi_trampoline_table *table = ffi_trampoline_tables; + if (table == NULL || table->free_list == NULL) + { + table = ffi_trampoline_table_alloc (); + if (table == NULL) + { + pthread_mutex_unlock (&ffi_trampoline_lock); + free (closure); + return NULL; + } + + /* Insert the new table at the top of the list */ + table->next = ffi_trampoline_tables; + if (table->next != NULL) + table->next->prev = table; + + ffi_trampoline_tables = table; + } + + /* Claim the free entry */ + ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list; + ffi_trampoline_tables->free_list = entry->next; + ffi_trampoline_tables->free_count--; + entry->next = NULL; + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Initialize the return values */ + *code = entry->trampoline; +#ifdef HAVE_PTRAUTH + *code = ptrauth_sign_unauthenticated (*code, ptrauth_key_asia, 0); +#endif + closure->trampoline_table = table; + closure->trampoline_table_entry = entry; + + return closure; +} + +void +ffi_closure_free (void *ptr) +{ + ffi_closure *closure = ptr; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Fetch the table and entry references */ + ffi_trampoline_table *table = closure->trampoline_table; + ffi_trampoline_table_entry *entry = closure->trampoline_table_entry; + + /* Return the entry to the free list */ + entry->next = table->free_list; + table->free_list = entry; + table->free_count++; + + /* If all trampolines within this table are free, and at least one other table exists, deallocate + * the table */ + if (table->free_count == FFI_TRAMPOLINE_COUNT + && ffi_trampoline_tables != table) + { + ffi_trampoline_table_free (table); + } + else if (ffi_trampoline_tables != table) + { + /* Otherwise, bump this table to the top of the list */ + table->prev = NULL; + table->next = ffi_trampoline_tables; + if (ffi_trampoline_tables != NULL) + ffi_trampoline_tables->prev = table; + + ffi_trampoline_tables = table; + } + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Free the closure */ + free (closure); +} + +#endif + +// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations. + +#elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */ + +#define USE_LOCKS 1 +#define USE_DL_PREFIX 1 +#ifdef __GNUC__ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 1 +#endif +#endif + +/* We need to use mmap, not sbrk. */ +#define HAVE_MORECORE 0 + +/* We could, in theory, support mremap, but it wouldn't buy us anything. */ +#define HAVE_MREMAP 0 + +/* We have no use for this, so save some code and data. */ +#define NO_MALLINFO 1 + +/* We need all allocations to be in regular segments, otherwise we + lose track of the corresponding code address. */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + +/* Don't allocate more than a page unless needed. */ +#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize) + +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#if !defined(X86_WIN32) && !defined(X86_WIN64) && !defined(_M_ARM64) +#ifdef HAVE_MNTENT +#include +#endif /* HAVE_MNTENT */ +#include +#include + +/* We don't want sys/mman.h to be included after we redefine mmap and + dlmunmap. */ +#include +#define LACKS_SYS_MMAN_H 1 + +#if FFI_MMAP_EXEC_SELINUX +#include +#include + +static int selinux_enabled = -1; + +static int +selinux_enabled_check (void) +{ + struct statfs sfs; + FILE *f; + char *buf = NULL; + size_t len = 0; + + if (statfs ("/selinux", &sfs) >= 0 + && (unsigned int) sfs.f_type == 0xf97cff8cU) + return 1; + f = fopen ("/proc/mounts", "r"); + if (f == NULL) + return 0; + while (getline (&buf, &len, f) >= 0) + { + char *p = strchr (buf, ' '); + if (p == NULL) + break; + p = strchr (p + 1, ' '); + if (p == NULL) + break; + if (strncmp (p + 1, "selinuxfs ", 10) == 0) + { + free (buf); + fclose (f); + return 1; + } + } + free (buf); + fclose (f); + return 0; +} + +#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \ + : (selinux_enabled = selinux_enabled_check ())) + +#else + +#define is_selinux_enabled() 0 + +#endif /* !FFI_MMAP_EXEC_SELINUX */ + +/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */ +#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX +#include + +static int emutramp_enabled = -1; + +static int +emutramp_enabled_check (void) +{ + char *buf = NULL; + size_t len = 0; + FILE *f; + int ret; + f = fopen ("/proc/self/status", "r"); + if (f == NULL) + return 0; + ret = 0; + + while (getline (&buf, &len, f) != -1) + if (!strncmp (buf, "PaX:", 4)) + { + char emutramp; + if (sscanf (buf, "%*s %*c%c", &emutramp) == 1) + ret = (emutramp == 'E'); + break; + } + free (buf); + fclose (f); + return ret; +} + +#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \ + : (emutramp_enabled = emutramp_enabled_check ())) +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +#elif defined (__CYGWIN__) || defined(__INTERIX) + +#include + +/* Cygwin is Linux-like, but not quite that Linux-like. */ +#define is_selinux_enabled() 0 + +#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */ + +#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX +#define is_emutramp_enabled() 0 +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Declare all functions defined in dlmalloc.c as static. */ +static void *dlmalloc(size_t); +static void dlfree(void*); +static void *dlcalloc(size_t, size_t) MAYBE_UNUSED; +static void *dlrealloc(void *, size_t) MAYBE_UNUSED; +static void *dlmemalign(size_t, size_t) MAYBE_UNUSED; +static void *dlvalloc(size_t) MAYBE_UNUSED; +static int dlmallopt(int, int) MAYBE_UNUSED; +static size_t dlmalloc_footprint(void) MAYBE_UNUSED; +static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED; +static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED; +static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED; +static void *dlpvalloc(size_t) MAYBE_UNUSED; +static int dlmalloc_trim(size_t) MAYBE_UNUSED; +static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED; +static void dlmalloc_stats(void) MAYBE_UNUSED; + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) +/* Use these for mmap and munmap within dlmalloc.c. */ +static void *dlmmap(void *, size_t, int, int, int, off_t); +static int dlmunmap(void *, size_t); +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +#define mmap dlmmap +#define munmap dlmunmap + +#include "dlmalloc.c" + +#undef mmap +#undef munmap + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) + +/* A mutex used to synchronize access to *exec* variables in this file. */ +static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* A file descriptor of a temporary file from which we'll map + executable pages. */ +static int execfd = -1; + +/* The amount of space already allocated from the temporary file. */ +static size_t execsize = 0; + +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_name (char *name, int flags) +{ + int fd; + +#ifdef HAVE_MKOSTEMP + fd = mkostemp (name, flags); +#else + fd = mkstemp (name); +#endif + + if (fd != -1) + unlink (name); + + return fd; +} + +/* Open a temporary file in the named directory. */ +static int +open_temp_exec_file_dir (const char *dir) +{ + static const char suffix[] = "/ffiXXXXXX"; + int lendir, flags; + char *tempname; +#ifdef O_TMPFILE + int fd; +#endif + +#ifdef O_CLOEXEC + flags = O_CLOEXEC; +#else + flags = 0; +#endif + +#ifdef O_TMPFILE + fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700); + /* If the running system does not support the O_TMPFILE flag then retry without it. */ + if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) { + return fd; + } else { + errno = 0; + } +#endif + + lendir = (int) strlen (dir); + tempname = __builtin_alloca (lendir + sizeof (suffix)); + + if (!tempname) + return -1; + + memcpy (tempname, dir, lendir); + memcpy (tempname + lendir, suffix, sizeof (suffix)); + + return open_temp_exec_file_name (tempname, flags); +} + +/* Open a temporary file in the directory in the named environment + variable. */ +static int +open_temp_exec_file_env (const char *envvar) +{ + const char *value = getenv (envvar); + + if (!value) + return -1; + + return open_temp_exec_file_dir (value); +} + +#ifdef HAVE_MNTENT +/* Open a temporary file in an executable and writable mount point + listed in the mounts file. Subsequent calls with the same mounts + keep searching for mount points in the same file. Providing NULL + as the mounts file closes the file. */ +static int +open_temp_exec_file_mnt (const char *mounts) +{ + static const char *last_mounts; + static FILE *last_mntent; + + if (mounts != last_mounts) + { + if (last_mntent) + endmntent (last_mntent); + + last_mounts = mounts; + + if (mounts) + last_mntent = setmntent (mounts, "r"); + else + last_mntent = NULL; + } + + if (!last_mntent) + return -1; + + for (;;) + { + int fd; + struct mntent mnt; + char buf[MAXPATHLEN * 3]; + + if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL) + return -1; + + if (hasmntopt (&mnt, "ro") + || hasmntopt (&mnt, "noexec") + || access (mnt.mnt_dir, W_OK)) + continue; + + fd = open_temp_exec_file_dir (mnt.mnt_dir); + + if (fd != -1) + return fd; + } +} +#endif /* HAVE_MNTENT */ + +/* Instructions to look for a location to hold a temporary file that + can be mapped in for execution. */ +static struct +{ + int (*func)(const char *); + const char *arg; + int repeat; +} open_temp_exec_file_opts[] = { + { open_temp_exec_file_env, "TMPDIR", 0 }, + { open_temp_exec_file_dir, "/tmp", 0 }, + { open_temp_exec_file_dir, "/var/tmp", 0 }, + { open_temp_exec_file_dir, "/dev/shm", 0 }, + { open_temp_exec_file_env, "HOME", 0 }, +#ifdef HAVE_MNTENT + { open_temp_exec_file_mnt, "/etc/mtab", 1 }, + { open_temp_exec_file_mnt, "/proc/mounts", 1 }, +#endif /* HAVE_MNTENT */ +}; + +/* Current index into open_temp_exec_file_opts. */ +static int open_temp_exec_file_opts_idx = 0; + +/* Reset a current multi-call func, then advances to the next entry. + If we're at the last, go back to the first and return nonzero, + otherwise return zero. */ +static int +open_temp_exec_file_opts_next (void) +{ + if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL); + + open_temp_exec_file_opts_idx++; + if (open_temp_exec_file_opts_idx + == (sizeof (open_temp_exec_file_opts) + / sizeof (*open_temp_exec_file_opts))) + { + open_temp_exec_file_opts_idx = 0; + return 1; + } + + return 0; +} + +/* Return a file descriptor of a temporary zero-sized file in a + writable and executable filesystem. */ +static int +open_temp_exec_file (void) +{ + int fd; + + do + { + fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func + (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg); + + if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat + || fd == -1) + { + if (open_temp_exec_file_opts_next ()) + break; + } + } + while (fd == -1); + + return fd; +} + +/* We need to allocate space in a file that will be backing a writable + mapping. Several problems exist with the usual approaches: + - fallocate() is Linux-only + - posix_fallocate() is not available on all platforms + - ftruncate() does not allocate space on filesystems with sparse files + Failure to allocate the space will cause SIGBUS to be thrown when + the mapping is subsequently written to. */ +static int +allocate_space (int fd, off_t offset, off_t len) +{ + static size_t page_size; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + unsigned char buf[page_size]; + memset (buf, 0, page_size); + + while (len > 0) + { + off_t to_write = (len < page_size) ? len : page_size; + if (write (fd, buf, to_write) < to_write) + return -1; + len -= to_write; + } + + return 0; +} + +/* Map in a chunk of memory from the temporary exec file into separate + locations in the virtual memory address space, one writable and one + executable. Returns the address of the writable portion, after + storing an offset to the corresponding executable portion at the + last word of the requested chunk. */ +static void * +dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset) +{ + void *ptr; + + if (execfd == -1) + { + open_temp_exec_file_opts_idx = 0; + retry_open: + execfd = open_temp_exec_file (); + if (execfd == -1) + return MFAIL; + } + + offset = execsize; + + if (allocate_space (execfd, offset, length)) + return MFAIL; + + flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS); + flags |= MAP_SHARED; + + ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC, + flags, execfd, offset); + if (ptr == MFAIL) + { + if (!offset) + { + close (execfd); + goto retry_open; + } + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + + return MFAIL; + } + else if (!offset + && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts_next (); + + start = mmap (start, length, prot, flags, execfd, offset); + + if (start == MFAIL) + { + munmap (ptr, length); + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + return start; + } + + mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start; + + execsize += length; + + return start; +} + +/* Map in a writable and executable chunk of memory if possible. + Failing that, fall back to dlmmap_locked. */ +static void * +dlmmap (void *start, size_t length, int prot, + int flags, int fd, off_t offset) +{ + void *ptr; + + assert (start == NULL && length % malloc_getpagesize == 0 + && prot == (PROT_READ | PROT_WRITE) + && flags == (MAP_PRIVATE | MAP_ANONYMOUS) + && fd == -1 && offset == 0); + + if (execfd == -1 && is_emutramp_enabled ()) + { + ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset); + return ptr; + } + + if (execfd == -1 && !is_selinux_enabled ()) + { + ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset); + + if (ptr != MFAIL || (errno != EPERM && errno != EACCES)) + /* Cool, no need to mess with separate segments. */ + return ptr; + + /* If MREMAP_DUP is ever introduced and implemented, try mmap + with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with + MREMAP_DUP and prot at this point. */ + } + + if (execsize == 0 || execfd == -1) + { + pthread_mutex_lock (&open_temp_exec_file_mutex); + ptr = dlmmap_locked (start, length, prot, flags, offset); + pthread_mutex_unlock (&open_temp_exec_file_mutex); + + return ptr; + } + + return dlmmap_locked (start, length, prot, flags, offset); +} + +/* Release memory at the given address, as well as the corresponding + executable page if it's separate. */ +static int +dlmunmap (void *start, size_t length) +{ + /* We don't bother decreasing execsize or truncating the file, since + we can't quite tell whether we're unmapping the end of the file. + We don't expect frequent deallocation anyway. If we did, we + could locate pages in the file by writing to the pages being + deallocated and checking that the file contents change. + Yuck. */ + msegmentptr seg = segment_holding (gm, start); + void *code; + + if (seg && (code = add_segment_exec_offset (start, seg)) != start) + { + int ret = munmap (code, length); + if (ret) + return ret; + } + + return munmap (start, length); +} + +#if FFI_CLOSURE_FREE_CODE +/* Return segment holding given code address. */ +static msegmentptr +segment_holding_code (mstate m, char* addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= add_segment_exec_offset (sp->base, sp) + && addr < add_segment_exec_offset (sp->base, sp) + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} +#endif + +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +/* Allocate a chunk of memory with the given size. Returns a pointer + to the writable address, and sets *CODE to the executable + corresponding virtual address. */ +void * +ffi_closure_alloc (size_t size, void **code) +{ + void *ptr; + + if (!code) + return NULL; + + ptr = FFI_CLOSURE_PTR (dlmalloc (size)); + + if (ptr) + { + msegmentptr seg = segment_holding (gm, ptr); + + *code = add_segment_exec_offset (ptr, seg); + } + + return ptr; +} + +void * +ffi_data_to_code_pointer (void *data) +{ + msegmentptr seg = segment_holding (gm, data); + /* We expect closures to be allocated with ffi_closure_alloc(), in + which case seg will be non-NULL. However, some users take on the + burden of managing this memory themselves, in which case this + we'll just return data. */ + if (seg) + return add_segment_exec_offset (data, seg); + else + return data; +} + +/* Release a chunk of memory allocated with ffi_closure_alloc. If + FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the + writable or the executable address given. Otherwise, only the + writable address can be provided here. */ +void +ffi_closure_free (void *ptr) +{ +#if FFI_CLOSURE_FREE_CODE + msegmentptr seg = segment_holding_code (gm, ptr); + + if (seg) + ptr = sub_segment_exec_offset (ptr, seg); +#endif + + dlfree (FFI_RESTORE_PTR (ptr)); +} + +# else /* ! FFI_MMAP_EXEC_WRIT */ + +/* On many systems, memory returned by malloc is writable and + executable, so just use it. */ + +#include + +void * +ffi_closure_alloc (size_t size, void **code) +{ + if (!code) + return NULL; + + return *code = FFI_CLOSURE_PTR (malloc (size)); +} + +void +ffi_closure_free (void *ptr) +{ + free (FFI_RESTORE_PTR (ptr)); +} + +void * +ffi_data_to_code_pointer (void *data) +{ + return data; +} + +# endif /* ! FFI_MMAP_EXEC_WRIT */ +#endif /* FFI_CLOSURES */ + +#endif /* NetBSD with PROT_MPROTECT */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi 2.c new file mode 100644 index 0000000..9011fde --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi 2.c @@ -0,0 +1,386 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998 Cygnus Solutions + Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + Copyright (C) 2007 Free Software Foundation, Inc. + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +static ffi_status +initialize_aggregate_packed_struct (ffi_type * arg) +{ + ffi_type **ptr; + + FFI_ASSERT (arg != NULL); + + FFI_ASSERT (arg->elements != NULL); + FFI_ASSERT (arg->size == 0); + FFI_ASSERT (arg->alignment == 0); + + ptr = &(arg->elements[0]); + + while ((*ptr) != NULL) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT (ffi_type_test ((*ptr))); + + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +int +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + unsigned int struct_count = 0; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + + switch ((*p_arg)->type) + { + case FFI_TYPE_STRUCT: + { + z = (*p_arg)->size; + if (z <= 4) + { + memcpy (argp, *p_argv, z); + z = 4; + } + else if (z <= 8) + { + memcpy (argp, *p_argv, z); + z = 8; + } + else + { + unsigned int uiLocOnStack; + z = sizeof (void *); + uiLocOnStack = 4 * ecif->cif->nargs + struct_count; + struct_count = struct_count + (*p_arg)->size; + *(unsigned int *) argp = + (unsigned int) (UINT32 *) (stack + uiLocOnStack); + memcpy ((stack + uiLocOnStack), *p_argv, (*p_arg)->size); + } + break; + } + default: + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = + (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = + (unsigned int) *(UINT16 *) (*p_argv); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else if (z == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + else + memcpy (argp, *p_argv, z); + break; + } + p_argv++; + argp += z; + } + + return (struct_count); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_core (ffi_cif * cif, + ffi_abi abi, unsigned int isvariadic, + unsigned int nfixedargs, unsigned int ntotalargs, + ffi_type * rtype, ffi_type ** atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT (cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + FFI_ASSERT (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI); + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; + + if ((cif->rtype->size == 0) + && (initialize_aggregate_packed_struct (cif->rtype) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (cif->rtype); + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (*ptr); + + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN (bytes, (*ptr)->alignment); + if ((*ptr)->type == FFI_TYPE_STRUCT) + { + if ((*ptr)->size > 8) + { + bytes += (*ptr)->size; + bytes += sizeof (void *); + } + else + { + if ((*ptr)->size > 4) + bytes += 8; + else + bytes += 4; + } + } + else + bytes += STACK_ARG_SIZE ((*ptr)->size); + } + + cif->bytes = bytes; + + return ffi_prep_cif_machdep (cif); +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV (int (*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, unsigned *, void (*fn) ()) + __attribute__ ((__visibility__ ("hidden"))); + +void +ffi_call (ffi_cif * cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT (0); + break; + } +} + +/* Because the following variables are not exported outside libffi, we + mark them hidden. */ + +/* Assembly code for the jump stub. */ +extern const char ffi_cris_trampoline_template[] + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + ffi_prep_closure_inner function. */ +extern const int ffi_cris_trampoline_fn_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + closure data. */ +extern const int ffi_cris_trampoline_closure_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* This function is sibling-called (jumped to) by the closure + trampoline. We get R10..R13 at PARAMS[0..3] and a copy of [SP] at + PARAMS[4] to simplify handling of a straddling parameter. A copy + of R9 is at PARAMS[5] and SP at PARAMS[6]. These parameters are + put at the appropriate place in CLOSURE which is then executed and + the return value is passed back to the caller. */ + +static unsigned long long +ffi_prep_closure_inner (void **params, ffi_closure* closure) +{ + char *register_args = (char *) params; + void *struct_ret = params[5]; + char *stack_args = params[6]; + char *ptr = register_args; + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + + /* Max room needed is number of arguments as 64-bit values. */ + void **avalue = alloca (closure->cif->nargs * sizeof(void *)); + int i; + int doing_regs; + long long llret = 0; + + /* Find the address of each argument. */ + for (i = 0, doing_regs = 1; i < cif->nargs; i++) + { + /* Types up to and including 8 bytes go by-value. */ + if (arg_types[i]->size <= 4) + { + avalue[i] = ptr; + ptr += 4; + } + else if (arg_types[i]->size <= 8) + { + avalue[i] = ptr; + ptr += 8; + } + else + { + FFI_ASSERT (arg_types[i]->type == FFI_TYPE_STRUCT); + + /* Passed by-reference, so copy the pointer. */ + avalue[i] = *(void **) ptr; + ptr += 4; + } + + /* If we've handled more arguments than fit in registers, start + looking at the those passed on the stack. Step over the + first one if we had a straddling parameter. */ + if (doing_regs && ptr >= register_args + 4*4) + { + ptr = stack_args + ((ptr > register_args + 4*4) ? 4 : 0); + doing_regs = 0; + } + } + + /* Invoke the closure. */ + (closure->fun) (cif, + + cif->rtype->type == FFI_TYPE_STRUCT + /* The caller allocated space for the return + structure, and passed a pointer to this space in + R9. */ + ? struct_ret + + /* We take advantage of being able to ignore that + the high part isn't set if the return value is + not in R10:R11, but in R10 only. */ + : (void *) &llret, + + avalue, closure->user_data); + + return llret; +} + +/* API function: Prepare the trampoline. */ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif *, void *, void **, void*), + void *user_data, + void *codeloc) +{ + void *innerfn = ffi_prep_closure_inner; + FFI_ASSERT (cif->abi == FFI_SYSV); + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + memcpy (closure->tramp, ffi_cris_trampoline_template, + FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE); + memcpy (closure->tramp + ffi_cris_trampoline_fn_offset, + &innerfn, sizeof (void *)); + memcpy (closure->tramp + ffi_cris_trampoline_closure_offset, + &codeloc, sizeof (void *)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c new file mode 100644 index 0000000..9011fde --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c @@ -0,0 +1,386 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998 Cygnus Solutions + Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + Copyright (C) 2007 Free Software Foundation, Inc. + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +static ffi_status +initialize_aggregate_packed_struct (ffi_type * arg) +{ + ffi_type **ptr; + + FFI_ASSERT (arg != NULL); + + FFI_ASSERT (arg->elements != NULL); + FFI_ASSERT (arg->size == 0); + FFI_ASSERT (arg->alignment == 0); + + ptr = &(arg->elements[0]); + + while ((*ptr) != NULL) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT (ffi_type_test ((*ptr))); + + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +int +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + unsigned int struct_count = 0; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + + switch ((*p_arg)->type) + { + case FFI_TYPE_STRUCT: + { + z = (*p_arg)->size; + if (z <= 4) + { + memcpy (argp, *p_argv, z); + z = 4; + } + else if (z <= 8) + { + memcpy (argp, *p_argv, z); + z = 8; + } + else + { + unsigned int uiLocOnStack; + z = sizeof (void *); + uiLocOnStack = 4 * ecif->cif->nargs + struct_count; + struct_count = struct_count + (*p_arg)->size; + *(unsigned int *) argp = + (unsigned int) (UINT32 *) (stack + uiLocOnStack); + memcpy ((stack + uiLocOnStack), *p_argv, (*p_arg)->size); + } + break; + } + default: + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = + (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = + (unsigned int) *(UINT16 *) (*p_argv); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else if (z == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + else + memcpy (argp, *p_argv, z); + break; + } + p_argv++; + argp += z; + } + + return (struct_count); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_core (ffi_cif * cif, + ffi_abi abi, unsigned int isvariadic, + unsigned int nfixedargs, unsigned int ntotalargs, + ffi_type * rtype, ffi_type ** atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT (cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + FFI_ASSERT (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI); + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; + + if ((cif->rtype->size == 0) + && (initialize_aggregate_packed_struct (cif->rtype) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (cif->rtype); + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (*ptr); + + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN (bytes, (*ptr)->alignment); + if ((*ptr)->type == FFI_TYPE_STRUCT) + { + if ((*ptr)->size > 8) + { + bytes += (*ptr)->size; + bytes += sizeof (void *); + } + else + { + if ((*ptr)->size > 4) + bytes += 8; + else + bytes += 4; + } + } + else + bytes += STACK_ARG_SIZE ((*ptr)->size); + } + + cif->bytes = bytes; + + return ffi_prep_cif_machdep (cif); +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV (int (*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, unsigned *, void (*fn) ()) + __attribute__ ((__visibility__ ("hidden"))); + +void +ffi_call (ffi_cif * cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT (0); + break; + } +} + +/* Because the following variables are not exported outside libffi, we + mark them hidden. */ + +/* Assembly code for the jump stub. */ +extern const char ffi_cris_trampoline_template[] + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + ffi_prep_closure_inner function. */ +extern const int ffi_cris_trampoline_fn_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + closure data. */ +extern const int ffi_cris_trampoline_closure_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* This function is sibling-called (jumped to) by the closure + trampoline. We get R10..R13 at PARAMS[0..3] and a copy of [SP] at + PARAMS[4] to simplify handling of a straddling parameter. A copy + of R9 is at PARAMS[5] and SP at PARAMS[6]. These parameters are + put at the appropriate place in CLOSURE which is then executed and + the return value is passed back to the caller. */ + +static unsigned long long +ffi_prep_closure_inner (void **params, ffi_closure* closure) +{ + char *register_args = (char *) params; + void *struct_ret = params[5]; + char *stack_args = params[6]; + char *ptr = register_args; + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + + /* Max room needed is number of arguments as 64-bit values. */ + void **avalue = alloca (closure->cif->nargs * sizeof(void *)); + int i; + int doing_regs; + long long llret = 0; + + /* Find the address of each argument. */ + for (i = 0, doing_regs = 1; i < cif->nargs; i++) + { + /* Types up to and including 8 bytes go by-value. */ + if (arg_types[i]->size <= 4) + { + avalue[i] = ptr; + ptr += 4; + } + else if (arg_types[i]->size <= 8) + { + avalue[i] = ptr; + ptr += 8; + } + else + { + FFI_ASSERT (arg_types[i]->type == FFI_TYPE_STRUCT); + + /* Passed by-reference, so copy the pointer. */ + avalue[i] = *(void **) ptr; + ptr += 4; + } + + /* If we've handled more arguments than fit in registers, start + looking at the those passed on the stack. Step over the + first one if we had a straddling parameter. */ + if (doing_regs && ptr >= register_args + 4*4) + { + ptr = stack_args + ((ptr > register_args + 4*4) ? 4 : 0); + doing_regs = 0; + } + } + + /* Invoke the closure. */ + (closure->fun) (cif, + + cif->rtype->type == FFI_TYPE_STRUCT + /* The caller allocated space for the return + structure, and passed a pointer to this space in + R9. */ + ? struct_ret + + /* We take advantage of being able to ignore that + the high part isn't set if the return value is + not in R10:R11, but in R10 only. */ + : (void *) &llret, + + avalue, closure->user_data); + + return llret; +} + +/* API function: Prepare the trampoline. */ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif *, void *, void **, void*), + void *user_data, + void *codeloc) +{ + void *innerfn = ffi_prep_closure_inner; + FFI_ASSERT (cif->abi == FFI_SYSV); + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + memcpy (closure->tramp, ffi_cris_trampoline_template, + FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE); + memcpy (closure->tramp + ffi_cris_trampoline_fn_offset, + &innerfn, sizeof (void *)); + memcpy (closure->tramp + ffi_cris_trampoline_closure_offset, + &codeloc, sizeof (void *)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget 2.h new file mode 100644 index 0000000..b837e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget 2.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for CRIS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE 36 +#define FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE (7*4) +#define FFI_TRAMPOLINE_SIZE \ + (FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE + FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE) +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h new file mode 100644 index 0000000..b837e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for CRIS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE 36 +#define FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE (7*4) +#define FFI_TRAMPOLINE_SIZE \ + (FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE + FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE) +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv 2.S new file mode 100644 index 0000000..79abaee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv 2.S @@ -0,0 +1,215 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#define CONCAT(x,y) x ## y +#define XCONCAT(x,y) CONCAT (x, y) +#define L(x) XCONCAT (__USER_LABEL_PREFIX__, x) + + .text + + ;; OK, when we get called we should have this (according to + ;; AXIS ETRAX 100LX Programmer's Manual chapter 6.3). + ;; + ;; R10: ffi_prep_args (func. pointer) + ;; R11: &ecif + ;; R12: cif->bytes + ;; R13: fig->flags + ;; sp+0: ecif.rvalue + ;; sp+4: fn (function pointer to the function that we need to call) + + .globl L(ffi_call_SYSV) + .type L(ffi_call_SYSV),@function + .hidden L(ffi_call_SYSV) + +L(ffi_call_SYSV): + ;; Save the regs to the stack. + push $srp + ;; Used for stack pointer saving. + push $r6 + ;; Used for function address pointer. + push $r7 + ;; Used for stack pointer saving. + push $r8 + ;; We save fig->flags to stack we will need them after we + ;; call The Function. + push $r13 + + ;; Saving current stack pointer. + move.d $sp,$r8 + move.d $sp,$r6 + + ;; Move address of ffi_prep_args to r13. + move.d $r10,$r13 + + ;; Make room on the stack for the args of fn. + sub.d $r12,$sp + + ;; Function void ffi_prep_args(char *stack, extended_cif *ecif) parameters are: + ;; r10 <-- stack pointer + ;; r11 <-- &ecif (already there) + move.d $sp,$r10 + + ;; Call the function. + jsr $r13 + + ;; Save the size of the structures which are passed on stack. + move.d $r10,$r7 + + ;; Move first four args in to r10..r13. + move.d [$sp+0],$r10 + move.d [$sp+4],$r11 + move.d [$sp+8],$r12 + move.d [$sp+12],$r13 + + ;; Adjust the stack and check if any parameters are given on stack. + addq 16,$sp + sub.d $r7,$r6 + cmp.d $sp,$r6 + + bpl go_on + nop + +go_on_no_params_on_stack: + move.d $r6,$sp + +go_on: + ;; Discover if we need to put rval address in to r9. + move.d [$r8+0],$r7 + cmpq FFI_TYPE_STRUCT,$r7 + bne call_now + nop + + ;; Move rval address to $r9. + move.d [$r8+20],$r9 + +call_now: + ;; Move address of The Function in to r7. + move.d [$r8+24],$r7 + + ;; Call The Function. + jsr $r7 + + ;; Reset stack. + move.d $r8,$sp + + ;; Load rval type (fig->flags) in to r13. + pop $r13 + + ;; Detect rval type. + cmpq FFI_TYPE_VOID,$r13 + beq epilogue + + cmpq FFI_TYPE_STRUCT,$r13 + beq epilogue + + cmpq FFI_TYPE_DOUBLE,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_UINT64,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_SINT64,$r13 + beq return_double_or_longlong + nop + + ;; Just return the 32 bit value. + ba return + nop + +return_double_or_longlong: + ;; Load half of the rval to r10 and the other half to r11. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + addq 4,$r13 + move.d $r11,[$r13] + ba epilogue + nop + +return: + ;; Load the rval to r10. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + +epilogue: + pop $r8 + pop $r7 + pop $r6 + Jump [$sp+] + + .size ffi_call_SYSV,.-ffi_call_SYSV + +/* Save R10..R13 into an array, somewhat like varargs. Copy the next + argument too, to simplify handling of any straddling parameter. + Save R9 and SP after those. Jump to function handling the rest. + Since this is a template, copied and the main function filled in by + the user. */ + + .globl L(ffi_cris_trampoline_template) + .type L(ffi_cris_trampoline_template),@function + .hidden L(ffi_cris_trampoline_template) + +L(ffi_cris_trampoline_template): +0: + /* The value we get for "PC" is right after the prefix instruction, + two bytes from the beginning, i.e. 0b+2. */ + move.d $r10,[$pc+2f-(0b+2)] + move.d $pc,$r10 +1: + addq 2f-1b+4,$r10 + move.d $r11,[$r10+] + move.d $r12,[$r10+] + move.d $r13,[$r10+] + move.d [$sp],$r11 + move.d $r11,[$r10+] + move.d $r9,[$r10+] + move.d $sp,[$r10+] + subq FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE,$r10 + move.d 0,$r11 +3: + jump 0 +2: + .size ffi_cris_trampoline_template,.-0b + +/* This macro create a constant usable as "extern const int \name" in + C from within libffi, when \name has no prefix decoration. */ + + .macro const name,value + .globl \name + .type \name,@object + .hidden \name +\name: + .dword \value + .size \name,4 + .endm + +/* Constants for offsets within the trampoline. We could do this with + just symbols, avoiding memory contents and memory accesses, but the + C usage code would look a bit stranger. */ + + const L(ffi_cris_trampoline_fn_offset),2b-4-0b + const L(ffi_cris_trampoline_closure_offset),3b-4-0b diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S new file mode 100644 index 0000000..79abaee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S @@ -0,0 +1,215 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#define CONCAT(x,y) x ## y +#define XCONCAT(x,y) CONCAT (x, y) +#define L(x) XCONCAT (__USER_LABEL_PREFIX__, x) + + .text + + ;; OK, when we get called we should have this (according to + ;; AXIS ETRAX 100LX Programmer's Manual chapter 6.3). + ;; + ;; R10: ffi_prep_args (func. pointer) + ;; R11: &ecif + ;; R12: cif->bytes + ;; R13: fig->flags + ;; sp+0: ecif.rvalue + ;; sp+4: fn (function pointer to the function that we need to call) + + .globl L(ffi_call_SYSV) + .type L(ffi_call_SYSV),@function + .hidden L(ffi_call_SYSV) + +L(ffi_call_SYSV): + ;; Save the regs to the stack. + push $srp + ;; Used for stack pointer saving. + push $r6 + ;; Used for function address pointer. + push $r7 + ;; Used for stack pointer saving. + push $r8 + ;; We save fig->flags to stack we will need them after we + ;; call The Function. + push $r13 + + ;; Saving current stack pointer. + move.d $sp,$r8 + move.d $sp,$r6 + + ;; Move address of ffi_prep_args to r13. + move.d $r10,$r13 + + ;; Make room on the stack for the args of fn. + sub.d $r12,$sp + + ;; Function void ffi_prep_args(char *stack, extended_cif *ecif) parameters are: + ;; r10 <-- stack pointer + ;; r11 <-- &ecif (already there) + move.d $sp,$r10 + + ;; Call the function. + jsr $r13 + + ;; Save the size of the structures which are passed on stack. + move.d $r10,$r7 + + ;; Move first four args in to r10..r13. + move.d [$sp+0],$r10 + move.d [$sp+4],$r11 + move.d [$sp+8],$r12 + move.d [$sp+12],$r13 + + ;; Adjust the stack and check if any parameters are given on stack. + addq 16,$sp + sub.d $r7,$r6 + cmp.d $sp,$r6 + + bpl go_on + nop + +go_on_no_params_on_stack: + move.d $r6,$sp + +go_on: + ;; Discover if we need to put rval address in to r9. + move.d [$r8+0],$r7 + cmpq FFI_TYPE_STRUCT,$r7 + bne call_now + nop + + ;; Move rval address to $r9. + move.d [$r8+20],$r9 + +call_now: + ;; Move address of The Function in to r7. + move.d [$r8+24],$r7 + + ;; Call The Function. + jsr $r7 + + ;; Reset stack. + move.d $r8,$sp + + ;; Load rval type (fig->flags) in to r13. + pop $r13 + + ;; Detect rval type. + cmpq FFI_TYPE_VOID,$r13 + beq epilogue + + cmpq FFI_TYPE_STRUCT,$r13 + beq epilogue + + cmpq FFI_TYPE_DOUBLE,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_UINT64,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_SINT64,$r13 + beq return_double_or_longlong + nop + + ;; Just return the 32 bit value. + ba return + nop + +return_double_or_longlong: + ;; Load half of the rval to r10 and the other half to r11. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + addq 4,$r13 + move.d $r11,[$r13] + ba epilogue + nop + +return: + ;; Load the rval to r10. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + +epilogue: + pop $r8 + pop $r7 + pop $r6 + Jump [$sp+] + + .size ffi_call_SYSV,.-ffi_call_SYSV + +/* Save R10..R13 into an array, somewhat like varargs. Copy the next + argument too, to simplify handling of any straddling parameter. + Save R9 and SP after those. Jump to function handling the rest. + Since this is a template, copied and the main function filled in by + the user. */ + + .globl L(ffi_cris_trampoline_template) + .type L(ffi_cris_trampoline_template),@function + .hidden L(ffi_cris_trampoline_template) + +L(ffi_cris_trampoline_template): +0: + /* The value we get for "PC" is right after the prefix instruction, + two bytes from the beginning, i.e. 0b+2. */ + move.d $r10,[$pc+2f-(0b+2)] + move.d $pc,$r10 +1: + addq 2f-1b+4,$r10 + move.d $r11,[$r10+] + move.d $r12,[$r10+] + move.d $r13,[$r10+] + move.d [$sp],$r11 + move.d $r11,[$r10+] + move.d $r9,[$r10+] + move.d $sp,[$r10+] + subq FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE,$r10 + move.d 0,$r11 +3: + jump 0 +2: + .size ffi_cris_trampoline_template,.-0b + +/* This macro create a constant usable as "extern const int \name" in + C from within libffi, when \name has no prefix decoration. */ + + .macro const name,value + .globl \name + .type \name,@object + .hidden \name +\name: + .dword \value + .size \name,4 + .endm + +/* Constants for offsets within the trampoline. We could do this with + just symbols, avoiding memory contents and memory accesses, but the + C usage code would look a bit stranger. */ + + const L(ffi_cris_trampoline_fn_offset),2b-4-0b + const L(ffi_cris_trampoline_closure_offset),3b-4-0b diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug 2.c new file mode 100644 index 0000000..f3172b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug 2.c @@ -0,0 +1,64 @@ +/* ----------------------------------------------------------------------- + debug.c - Copyright (c) 1996 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +/* General debugging routines */ + +void ffi_stop_here(void) +{ + /* This function is only useful for debugging purposes. + Place a breakpoint on ffi_stop_here to be notified of + significant events. */ +} + +/* This function should only be called via the FFI_ASSERT() macro */ + +void ffi_assert(char *expr, char *file, int line) +{ + fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line); + ffi_stop_here(); + abort(); +} + +/* Perform a sanity check on an ffi_type structure */ + +void ffi_type_test(ffi_type *a, char *file, int line) +{ + FFI_ASSERT_AT(a != NULL, file, line); + + FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line); + FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX) + || a->elements != NULL, file, line); + FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX + || (a->elements != NULL + && a->elements[0] != NULL && a->elements[1] == NULL), + file, line); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c new file mode 100644 index 0000000..f3172b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c @@ -0,0 +1,64 @@ +/* ----------------------------------------------------------------------- + debug.c - Copyright (c) 1996 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +/* General debugging routines */ + +void ffi_stop_here(void) +{ + /* This function is only useful for debugging purposes. + Place a breakpoint on ffi_stop_here to be notified of + significant events. */ +} + +/* This function should only be called via the FFI_ASSERT() macro */ + +void ffi_assert(char *expr, char *file, int line) +{ + fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line); + ffi_stop_here(); + abort(); +} + +/* Perform a sanity check on an ffi_type structure */ + +void ffi_type_test(ffi_type *a, char *file, int line) +{ + FFI_ASSERT_AT(a != NULL, file, line); + + FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line); + FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX) + || a->elements != NULL, file, line); + FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX + || (a->elements != NULL + && a->elements[0] != NULL && a->elements[1] == NULL), + file, line); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc 2.c new file mode 100644 index 0000000..d63dd36 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc 2.c @@ -0,0 +1,5166 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using ptmalloc, which is derived from + a version of this malloc. (See http://www.malloc.de). + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. See + near the end of this file for guidelines for creating a custom + version of MORECORE. + +MORECORE_CONTIGUOUS default: 1 (true) + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 on unix + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. (On most x86s, the asm version is only + slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +*/ + +#if defined __linux__ && !defined _GNU_SOURCE +/* mremap() on Linux requires this via sys/mman.h */ +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define MALLOC_FAILURE_ACTION +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ +#endif /* WIN32 */ + +#ifdef __OS2__ +#define INCL_DOS +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_SYS_MMAN_H +#endif /* __OS2__ */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#ifndef MORECORE +#define MORECORE sbrk +#endif /* MORECORE */ +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if MORECORE_CONTIGUOUS +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ + +/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(x) +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#if HAVE_MORECORE +#ifndef LACKS_UNISTD_H +#include /* for sbrk */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ +#endif /* HAVE_MMAP */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if !HAVE_MMAP +#define IS_MMAPPED_BIT (SIZE_T_ZERO) +#define USE_MMAP_BIT (SIZE_T_ZERO) +#define CALL_MMAP(s) MFAIL +#define CALL_MUNMAP(a, s) (-1) +#define DIRECT_MMAP(s) MFAIL + +#else /* HAVE_MMAP */ +#define IS_MMAPPED_BIT (SIZE_T_ONE) +#define USE_MMAP_BIT (SIZE_T_ONE) + +#if !defined(WIN32) && !defined (__OS2__) +#define CALL_MUNMAP(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP(s) CALL_MMAP(s) + +#elif defined(__OS2__) + +/* OS/2 MMAP via DosAllocMem */ +static void* os2mmap(size_t size) { + void* ptr; + if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) && + DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE)) + return MFAIL; + return ptr; +} + +#define os2direct_mmap(n) os2mmap(n) + +/* This function supports releasing coalesed segments */ +static int os2munmap(void* ptr, size_t size) { + while (size) { + ULONG ulSize = size; + ULONG ulFlags = 0; + if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0) + return -1; + if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 || + ulSize > size) + return -1; + if (DosFreeMem(ptr) != 0) + return -1; + ptr = ( void * ) ( ( char * ) ptr + ulSize ); + size -= ulSize; + } + return 0; +} + +#define CALL_MMAP(s) os2mmap(s) +#define CALL_MUNMAP(a, s) os2munmap((a), (s)) +#define DIRECT_MMAP(s) os2direct_mmap(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define CALL_MMAP(s) win32mmap(s) +#define CALL_MUNMAP(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MMAP && HAVE_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#else /* HAVE_MMAP && HAVE_MREMAP */ +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +#if HAVE_MORECORE +#define CALL_MORECORE(S) MORECORE(S) +#else /* HAVE_MORECORE */ +#define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/* mstate bit set if contiguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + +#if !defined(WIN32) && !defined(__OS2__) +/* By default use posix locks */ +#include +#define MLOCK_T pthread_mutex_t +#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) +#define ACQUIRE_LOCK(l) pthread_mutex_lock(l) +#define RELEASE_LOCK(l) pthread_mutex_unlock(l) + +#if HAVE_MORECORE +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif /* HAVE_MORECORE */ + +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; + +#elif defined(__OS2__) +#define MLOCK_T HMTX +#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE) +#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT) +#define RELEASE_LOCK(l) DosReleaseMutexSem(*l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; + +#else /* WIN32 */ +/* + Because lock-protected regions have bounded times, and there + are no recursive lock calls, we can use simple spinlocks. +*/ + +#define MLOCK_T long +static int win32_acquire_lock (MLOCK_T *sl) { + for (;;) { +#ifdef InterlockedCompareExchangePointer + if (!InterlockedCompareExchange(sl, 1, 0)) + return 0; +#else /* Use older void* version */ + if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) + return 0; +#endif /* InterlockedCompareExchangePointer */ + Sleep (0); + } +} + +static void win32_release_lock (MLOCK_T *sl) { + InterlockedExchange (sl, 0); +} + +#define INITIAL_LOCK(l) *(l)=0 +#define ACQUIRE_LOCK(l) win32_acquire_lock(l) +#define RELEASE_LOCK(l) win32_release_lock(l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; +#endif /* WIN32 */ + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS && HAVE_MORECORE +#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); +#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); +#else /* USE_LOCKS && HAVE_MORECORE */ +#define ACQUIRE_MORECORE_LOCK() +#define RELEASE_MORECORE_LOCK() +#endif /* USE_LOCKS && HAVE_MORECORE */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +#if FFI_MMAP_EXEC_WRIT + /* The mmap magic is supposed to store the address of the executable + segment at the very end of the requested block. */ + +# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t))) + + /* We can only merge segments if their corresponding executable + segments are at identical offsets. */ +# define check_segment_merge(S,b,s) \ + (mmap_exec_offset((b),(s)) == (S)->exec_offset) + +# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset) +# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset) + + /* The removal of sflags only works with HAVE_MORECORE == 0. */ + +# define get_segment_flags(S) (IS_MMAPPED_BIT) +# define set_segment_flags(S,v) \ + (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \ + (((S)->exec_offset = \ + mmap_exec_offset((S)->base, (S)->size)), \ + (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \ + (S)->exec_offset) ? (ABORT, (v)) : \ + (mmap_exec_offset((S)->base, (S)->size) = 0), (v))) + + /* We use an offset here, instead of a pointer, because then, when + base changes, we don't have to modify this. On architectures + with segmented addresses, this might not work. */ + ptrdiff_t exec_offset; +#else + +# define get_segment_flags(S) ((S)->sflags) +# define set_segment_flags(S,v) ((S)->sflags = (v)) +# define check_segment_merge(S,b,s) (1) + + flag_t sflags; /* mmap and extern flag */ +#endif +}; + +#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT) +#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(__i386__) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(__i386__) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + INITIAL_LOCK(&gm->mutex); + gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + +#if !defined(WIN32) && !defined(__OS2__) + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); +#elif defined (__OS2__) + /* if low-memory is used, os2munmap() would break + if it were anything other than 64k */ + mparams.page_size = 4096u; + mparams.granularity = 65536u; +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + mparams.page_size = system_info.dwPageSize; + mparams.granularity = system_info.dwAllocationGranularity; + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT; + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!next_pinuse(p)); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->topsize == chunksize(m->top)); + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmapped); + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + init_mparams(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MORECORE_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { + size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MORECORE_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MORECORE_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MORECORE_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmap_flag); + m->magic = mparams.magic; + init_bins(m); + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MORECORE_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MORECORE_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + set_segment_flags(&m->seg, IS_MMAPPED_BIT); + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + set_segment_flags(&m->seg, EXTERN_BIT); + set_lock(m, locked); + } + return (mspace)m; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = get_segment_flags(sp); + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c new file mode 100644 index 0000000..d63dd36 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c @@ -0,0 +1,5166 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using ptmalloc, which is derived from + a version of this malloc. (See http://www.malloc.de). + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. See + near the end of this file for guidelines for creating a custom + version of MORECORE. + +MORECORE_CONTIGUOUS default: 1 (true) + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 on unix + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. (On most x86s, the asm version is only + slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +*/ + +#if defined __linux__ && !defined _GNU_SOURCE +/* mremap() on Linux requires this via sys/mman.h */ +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define MALLOC_FAILURE_ACTION +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ +#endif /* WIN32 */ + +#ifdef __OS2__ +#define INCL_DOS +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_SYS_MMAN_H +#endif /* __OS2__ */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#ifndef MORECORE +#define MORECORE sbrk +#endif /* MORECORE */ +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if MORECORE_CONTIGUOUS +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ + +/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(x) +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#if HAVE_MORECORE +#ifndef LACKS_UNISTD_H +#include /* for sbrk */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ +#endif /* HAVE_MMAP */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if !HAVE_MMAP +#define IS_MMAPPED_BIT (SIZE_T_ZERO) +#define USE_MMAP_BIT (SIZE_T_ZERO) +#define CALL_MMAP(s) MFAIL +#define CALL_MUNMAP(a, s) (-1) +#define DIRECT_MMAP(s) MFAIL + +#else /* HAVE_MMAP */ +#define IS_MMAPPED_BIT (SIZE_T_ONE) +#define USE_MMAP_BIT (SIZE_T_ONE) + +#if !defined(WIN32) && !defined (__OS2__) +#define CALL_MUNMAP(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP(s) CALL_MMAP(s) + +#elif defined(__OS2__) + +/* OS/2 MMAP via DosAllocMem */ +static void* os2mmap(size_t size) { + void* ptr; + if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) && + DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE)) + return MFAIL; + return ptr; +} + +#define os2direct_mmap(n) os2mmap(n) + +/* This function supports releasing coalesed segments */ +static int os2munmap(void* ptr, size_t size) { + while (size) { + ULONG ulSize = size; + ULONG ulFlags = 0; + if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0) + return -1; + if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 || + ulSize > size) + return -1; + if (DosFreeMem(ptr) != 0) + return -1; + ptr = ( void * ) ( ( char * ) ptr + ulSize ); + size -= ulSize; + } + return 0; +} + +#define CALL_MMAP(s) os2mmap(s) +#define CALL_MUNMAP(a, s) os2munmap((a), (s)) +#define DIRECT_MMAP(s) os2direct_mmap(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define CALL_MMAP(s) win32mmap(s) +#define CALL_MUNMAP(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MMAP && HAVE_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#else /* HAVE_MMAP && HAVE_MREMAP */ +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +#if HAVE_MORECORE +#define CALL_MORECORE(S) MORECORE(S) +#else /* HAVE_MORECORE */ +#define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/* mstate bit set if contiguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + +#if !defined(WIN32) && !defined(__OS2__) +/* By default use posix locks */ +#include +#define MLOCK_T pthread_mutex_t +#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) +#define ACQUIRE_LOCK(l) pthread_mutex_lock(l) +#define RELEASE_LOCK(l) pthread_mutex_unlock(l) + +#if HAVE_MORECORE +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif /* HAVE_MORECORE */ + +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; + +#elif defined(__OS2__) +#define MLOCK_T HMTX +#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE) +#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT) +#define RELEASE_LOCK(l) DosReleaseMutexSem(*l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; + +#else /* WIN32 */ +/* + Because lock-protected regions have bounded times, and there + are no recursive lock calls, we can use simple spinlocks. +*/ + +#define MLOCK_T long +static int win32_acquire_lock (MLOCK_T *sl) { + for (;;) { +#ifdef InterlockedCompareExchangePointer + if (!InterlockedCompareExchange(sl, 1, 0)) + return 0; +#else /* Use older void* version */ + if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) + return 0; +#endif /* InterlockedCompareExchangePointer */ + Sleep (0); + } +} + +static void win32_release_lock (MLOCK_T *sl) { + InterlockedExchange (sl, 0); +} + +#define INITIAL_LOCK(l) *(l)=0 +#define ACQUIRE_LOCK(l) win32_acquire_lock(l) +#define RELEASE_LOCK(l) win32_release_lock(l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; +#endif /* WIN32 */ + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS && HAVE_MORECORE +#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); +#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); +#else /* USE_LOCKS && HAVE_MORECORE */ +#define ACQUIRE_MORECORE_LOCK() +#define RELEASE_MORECORE_LOCK() +#endif /* USE_LOCKS && HAVE_MORECORE */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +#if FFI_MMAP_EXEC_WRIT + /* The mmap magic is supposed to store the address of the executable + segment at the very end of the requested block. */ + +# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t))) + + /* We can only merge segments if their corresponding executable + segments are at identical offsets. */ +# define check_segment_merge(S,b,s) \ + (mmap_exec_offset((b),(s)) == (S)->exec_offset) + +# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset) +# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset) + + /* The removal of sflags only works with HAVE_MORECORE == 0. */ + +# define get_segment_flags(S) (IS_MMAPPED_BIT) +# define set_segment_flags(S,v) \ + (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \ + (((S)->exec_offset = \ + mmap_exec_offset((S)->base, (S)->size)), \ + (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \ + (S)->exec_offset) ? (ABORT, (v)) : \ + (mmap_exec_offset((S)->base, (S)->size) = 0), (v))) + + /* We use an offset here, instead of a pointer, because then, when + base changes, we don't have to modify this. On architectures + with segmented addresses, this might not work. */ + ptrdiff_t exec_offset; +#else + +# define get_segment_flags(S) ((S)->sflags) +# define set_segment_flags(S,v) ((S)->sflags = (v)) +# define check_segment_merge(S,b,s) (1) + + flag_t sflags; /* mmap and extern flag */ +#endif +}; + +#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT) +#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(__i386__) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(__i386__) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + INITIAL_LOCK(&gm->mutex); + gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + +#if !defined(WIN32) && !defined(__OS2__) + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); +#elif defined (__OS2__) + /* if low-memory is used, os2munmap() would break + if it were anything other than 64k */ + mparams.page_size = 4096u; + mparams.granularity = 65536u; +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + mparams.page_size = system_info.dwPageSize; + mparams.granularity = system_info.dwAllocationGranularity; + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT; + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!next_pinuse(p)); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->topsize == chunksize(m->top)); + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmapped); + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + init_mparams(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MORECORE_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { + size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MORECORE_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MORECORE_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MORECORE_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmap_flag); + m->magic = mparams.magic; + init_bins(m); + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MORECORE_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MORECORE_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + set_segment_flags(&m->seg, IS_MMAPPED_BIT); + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + set_segment_flags(&m->seg, EXTERN_BIT); + set_lock(m, locked); + } + return (mspace)m; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = get_segment_flags(sp); + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi 2.S new file mode 100644 index 0000000..379ea4b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi 2.S @@ -0,0 +1,128 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2004 Anthony Green + + FR-V Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # gr8 : ffi_prep_args + # gr9 : &ecif + # gr10: cif->bytes + # gr11: fig->flags + # gr12: ecif.rvalue + # gr13: fn + +ffi_call_EABI: + addi sp, #-80, sp + sti fp, @(sp, #24) + addi sp, #24, fp + movsg lr, gr5 + + /* Make room for the new arguments. */ + /* subi sp, fp, gr10 */ + + /* Store return address and incoming args on stack. */ + sti gr5, @(fp, #8) + sti gr8, @(fp, #-4) + sti gr9, @(fp, #-8) + sti gr10, @(fp, #-12) + sti gr11, @(fp, #-16) + sti gr12, @(fp, #-20) + sti gr13, @(fp, #-24) + + sub sp, gr10, sp + + /* Call ffi_prep_args. */ + ldi @(fp, #-4), gr4 + addi sp, #0, gr8 + ldi @(fp, #-8), gr9 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* ffi_prep_args returns the new stack pointer. */ + mov gr8, gr4 + + ldi @(sp, #0), gr8 + ldi @(sp, #4), gr9 + ldi @(sp, #8), gr10 + ldi @(sp, #12), gr11 + ldi @(sp, #16), gr12 + ldi @(sp, #20), gr13 + + /* Always copy the return value pointer into the hidden + parameter register. This is only strictly necessary + when we're returning an aggregate type, but it doesn't + hurt to do this all the time, and it saves a branch. */ + ldi @(fp, #-20), gr3 + + /* Use the ffi_prep_args return value for the new sp. */ + mov gr4, sp + + /* Call the target function. */ + ldi @(fp, -24), gr4 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* Store the result. */ + ldi @(fp, #-16), gr10 /* fig->flags */ + ldi @(fp, #-20), gr4 /* ecif.rvalue */ + + /* Is the return value stored in two registers? */ + cmpi gr10, #8, icc0 + bne icc0, 0, .L2 + /* Yes, save them. */ + sti gr8, @(gr4, #0) + sti gr9, @(gr4, #4) + bra .L3 +.L2: + /* Is the return value a structure? */ + cmpi gr10, #-1, icc0 + beq icc0, 0, .L3 + /* No, save a 4 byte return value. */ + sti gr8, @(gr4, #0) +.L3: + + /* Restore the stack, and return. */ + ldi @(fp, 8), gr5 + ld @(fp, gr0), fp + addi sp,#80,sp + jmpl @(gr5,gr0) + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S new file mode 100644 index 0000000..379ea4b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S @@ -0,0 +1,128 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2004 Anthony Green + + FR-V Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # gr8 : ffi_prep_args + # gr9 : &ecif + # gr10: cif->bytes + # gr11: fig->flags + # gr12: ecif.rvalue + # gr13: fn + +ffi_call_EABI: + addi sp, #-80, sp + sti fp, @(sp, #24) + addi sp, #24, fp + movsg lr, gr5 + + /* Make room for the new arguments. */ + /* subi sp, fp, gr10 */ + + /* Store return address and incoming args on stack. */ + sti gr5, @(fp, #8) + sti gr8, @(fp, #-4) + sti gr9, @(fp, #-8) + sti gr10, @(fp, #-12) + sti gr11, @(fp, #-16) + sti gr12, @(fp, #-20) + sti gr13, @(fp, #-24) + + sub sp, gr10, sp + + /* Call ffi_prep_args. */ + ldi @(fp, #-4), gr4 + addi sp, #0, gr8 + ldi @(fp, #-8), gr9 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* ffi_prep_args returns the new stack pointer. */ + mov gr8, gr4 + + ldi @(sp, #0), gr8 + ldi @(sp, #4), gr9 + ldi @(sp, #8), gr10 + ldi @(sp, #12), gr11 + ldi @(sp, #16), gr12 + ldi @(sp, #20), gr13 + + /* Always copy the return value pointer into the hidden + parameter register. This is only strictly necessary + when we're returning an aggregate type, but it doesn't + hurt to do this all the time, and it saves a branch. */ + ldi @(fp, #-20), gr3 + + /* Use the ffi_prep_args return value for the new sp. */ + mov gr4, sp + + /* Call the target function. */ + ldi @(fp, -24), gr4 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* Store the result. */ + ldi @(fp, #-16), gr10 /* fig->flags */ + ldi @(fp, #-20), gr4 /* ecif.rvalue */ + + /* Is the return value stored in two registers? */ + cmpi gr10, #8, icc0 + bne icc0, 0, .L2 + /* Yes, save them. */ + sti gr8, @(gr4, #0) + sti gr9, @(gr4, #4) + bra .L3 +.L2: + /* Is the return value a structure? */ + cmpi gr10, #-1, icc0 + beq icc0, 0, .L3 + /* No, save a 4 byte return value. */ + sti gr8, @(gr4, #0) +.L3: + + /* Restore the stack, and return. */ + ldi @(fp, 8), gr5 + ld @(fp, gr0), fp + addi sp,#80,sp + jmpl @(gr5,gr0) + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi 2.c new file mode 100644 index 0000000..ed1c65a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi 2.c @@ -0,0 +1,292 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2004 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2008 Red Hat, Inc. + + FR-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + /* if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (count > 24) + { + // This is going on the stack. Turn it into a double. + *(double *) argp = (double) *(float*)(* p_argv); + z = sizeof(double); + } + else + *(void **) argp = *(void **)(* p_argv); + } */ + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in gr7. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("gr7"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("fp"); + char *stack_args = frame_pointer + 16; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + avalue[i] = ptr; + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == ((char *)register_args + (6*4))) + ptr = stack_args; + } + + /* Invoke the closure. */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + /* The caller allocates space for the return structure, and + passes a pointer to this space in gr3. Use this value directly + as the return value. */ + register void *return_struct_ptr __asm__("gr3"); + (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + + /* Functions return 4-byte or smaller results in gr8. 8-byte + values also use gr9. We fill the both, even for small return + values, just to avoid a branch. */ + asm ("ldi @(%0, #0), gr8" : : "r" (&rvalue)); + asm ("ldi @(%0, #0), gr9" : : "r" (&((int *) &rvalue)[1])); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; +#ifdef __FRV_FDPIC__ + register void *got __asm__("gr15"); +#endif + int i; + + fn = (unsigned long) ffi_closure_eabi; + +#ifdef __FRV_FDPIC__ + tramp[0] = &((unsigned int *)codeloc)[2]; + tramp[1] = got; + tramp[2] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[3] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[4] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[5] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[6] = 0x9cc86000; /* ldi @(gr6, #0), gr14 */ + tramp[7] = 0x8030e000; /* jmpl @(gr14, gr0) */ +#else + tramp[0] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[1] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[2] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[3] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[4] = 0x80300006; /* jmpl @(gr0, gr6) */ +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Cache flushing. */ + for (i = 0; i < FFI_TRAMPOLINE_SIZE; i++) + __asm__ volatile ("dcf @(%0,%1)\n\tici @(%2,%1)" :: "r" (tramp), "r" (i), + "r" (codeloc)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c new file mode 100644 index 0000000..ed1c65a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c @@ -0,0 +1,292 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2004 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2008 Red Hat, Inc. + + FR-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + /* if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (count > 24) + { + // This is going on the stack. Turn it into a double. + *(double *) argp = (double) *(float*)(* p_argv); + z = sizeof(double); + } + else + *(void **) argp = *(void **)(* p_argv); + } */ + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in gr7. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("gr7"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("fp"); + char *stack_args = frame_pointer + 16; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + avalue[i] = ptr; + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == ((char *)register_args + (6*4))) + ptr = stack_args; + } + + /* Invoke the closure. */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + /* The caller allocates space for the return structure, and + passes a pointer to this space in gr3. Use this value directly + as the return value. */ + register void *return_struct_ptr __asm__("gr3"); + (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + + /* Functions return 4-byte or smaller results in gr8. 8-byte + values also use gr9. We fill the both, even for small return + values, just to avoid a branch. */ + asm ("ldi @(%0, #0), gr8" : : "r" (&rvalue)); + asm ("ldi @(%0, #0), gr9" : : "r" (&((int *) &rvalue)[1])); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; +#ifdef __FRV_FDPIC__ + register void *got __asm__("gr15"); +#endif + int i; + + fn = (unsigned long) ffi_closure_eabi; + +#ifdef __FRV_FDPIC__ + tramp[0] = &((unsigned int *)codeloc)[2]; + tramp[1] = got; + tramp[2] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[3] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[4] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[5] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[6] = 0x9cc86000; /* ldi @(gr6, #0), gr14 */ + tramp[7] = 0x8030e000; /* jmpl @(gr14, gr0) */ +#else + tramp[0] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[1] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[2] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[3] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[4] = 0x80300006; /* jmpl @(gr0, gr6) */ +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Cache flushing. */ + for (i = 0; i < FFI_TRAMPOLINE_SIZE; i++) + __asm__ volatile ("dcf @(%0,%1)\n\tici @(%2,%1)" :: "r" (tramp), "r" (i), + "r" (codeloc)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget 2.h new file mode 100644 index 0000000..d42540e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget 2.h @@ -0,0 +1,62 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2004 Red Hat, Inc. + Target configuration macros for FR-V + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_EABI +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef __FRV_FDPIC__ +/* Trampolines are 8 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (8*4) +#else +/* Trampolines are 5 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (5*4) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h new file mode 100644 index 0000000..d42540e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h @@ -0,0 +1,62 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2004 Red Hat, Inc. + Target configuration macros for FR-V + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_EABI +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef __FRV_FDPIC__ +/* Trampolines are 8 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (8*4) +#else +/* Trampolines are 5 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (5*4) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi 2.c new file mode 100644 index 0000000..b1d04c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi 2.c @@ -0,0 +1,604 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + Copyright (c) 2011 Anthony Green + + IA64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include + +#include "ia64_flags.h" + +/* A 64-bit pointer value. In LP64 mode, this is effectively a plain + pointer. In ILP32 mode, it's a pointer that's been extended to + 64 bits by "addp4". */ +typedef void *PTR64 __attribute__((mode(DI))); + +/* Memory image of fp register contents. This is the implementation + specific format used by ldf.fill/stf.spill. All we care about is + that it wants a 16 byte aligned slot. */ +typedef struct +{ + UINT64 x[2] __attribute__((aligned(16))); +} fpreg; + + +/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */ + +struct ia64_args +{ + fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */ + UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */ + UINT64 other_args[]; /* Arguments passed on stack, variable size. */ +}; + + +/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */ + +static inline void * +endian_adjust (void *addr, size_t len) +{ +#ifdef __BIG_ENDIAN__ + return addr + (8 - len); +#else + return addr; +#endif +} + +/* Store VALUE to ADDR in the current cpu implementation's fp spill format. + This is a macro instead of a function, so that it works for all 3 floating + point types without type conversions. Type conversion to long double breaks + the denorm support. */ + +#define stf_spill(addr, value) \ + asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value)); + +/* Load a value from ADDR, which is in the current cpu implementation's + fp spill format. As above, this must also be a macro. */ + +#define ldf_fill(result, addr) \ + asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr)); + +/* Return the size of the C type associated with with TYPE. Which will + be one of the FFI_IA64_TYPE_HFA_* values. */ + +static size_t +hfa_type_size (int type) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + return sizeof(float); + case FFI_IA64_TYPE_HFA_DOUBLE: + return sizeof(double); + case FFI_IA64_TYPE_HFA_LDOUBLE: + return sizeof(__float80); + default: + abort (); + } +} + +/* Load from ADDR a value indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_load (fpreg *fpaddr, int type, void *addr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + stf_spill (fpaddr, *(float *) addr); + return; + case FFI_IA64_TYPE_HFA_DOUBLE: + stf_spill (fpaddr, *(double *) addr); + return; + case FFI_IA64_TYPE_HFA_LDOUBLE: + stf_spill (fpaddr, *(__float80 *) addr); + return; + default: + abort (); + } +} + +/* Load VALUE into ADDR as indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_store (int type, void *addr, fpreg *fpaddr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + { + float result; + ldf_fill (result, fpaddr); + *(float *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_DOUBLE: + { + double result; + ldf_fill (result, fpaddr); + *(double *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_LDOUBLE: + { + __float80 result; + ldf_fill (result, fpaddr); + *(__float80 *) addr = result; + break; + } + default: + abort (); + } +} + +/* Is TYPE a struct containing floats, doubles, or extended doubles, + all of the same fp type? If so, return the element type. Return + FFI_TYPE_VOID if not. */ + +static int +hfa_element_type (ffi_type *type, int nested) +{ + int element = FFI_TYPE_VOID; + + switch (type->type) + { + case FFI_TYPE_FLOAT: + /* We want to return VOID for raw floating-point types, but the + synthetic HFA type if we're nested within an aggregate. */ + if (nested) + element = FFI_IA64_TYPE_HFA_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + /* Similarly. */ + if (nested) + element = FFI_IA64_TYPE_HFA_DOUBLE; + break; + + case FFI_TYPE_LONGDOUBLE: + /* Similarly, except that that HFA is true for double extended, + but not quad precision. Both have sizeof == 16, so tell the + difference based on the precision. */ + if (LDBL_MANT_DIG == 64 && nested) + element = FFI_IA64_TYPE_HFA_LDOUBLE; + break; + + case FFI_TYPE_STRUCT: + { + ffi_type **ptr = &type->elements[0]; + + for (ptr = &type->elements[0]; *ptr ; ptr++) + { + int sub_element = hfa_element_type (*ptr, 1); + if (sub_element == FFI_TYPE_VOID) + return FFI_TYPE_VOID; + + if (element == FFI_TYPE_VOID) + element = sub_element; + else if (element != sub_element) + return FFI_TYPE_VOID; + } + } + break; + + default: + return FFI_TYPE_VOID; + } + + return element; +} + + +/* Perform machine dependent cif processing. */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + int flags; + + /* Adjust cif->bytes to include space for the bits of the ia64_args frame + that precedes the integer register portion. The estimate that the + generic bits did for the argument space required is good enough for the + integer component. */ + cif->bytes += offsetof(struct ia64_args, gp_regs[0]); + if (cif->bytes < sizeof(struct ia64_args)) + cif->bytes = sizeof(struct ia64_args); + + /* Set the return type flag. */ + flags = cif->rtype->type; + switch (cif->rtype->type) + { + case FFI_TYPE_LONGDOUBLE: + /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision, + and encode quad precision as a two-word integer structure. */ + if (LDBL_MANT_DIG != 64) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8); + break; + + case FFI_TYPE_STRUCT: + { + size_t size = cif->rtype->size; + int hfa_type = hfa_element_type (cif->rtype, 0); + + if (hfa_type != FFI_TYPE_VOID) + { + size_t nelts = size / hfa_type_size (hfa_type); + if (nelts <= 8) + flags = hfa_type | (size << 8); + } + else + { + if (size <= 32) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8); + } + } + break; + + default: + break; + } + cif->flags = flags; + + return FFI_OK; +} + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status +ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(void), UINT64); + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + struct ia64_args *stack; + long i, avn, gpcount, fpcount; + ffi_type **p_arg; + + FFI_ASSERT (cif->abi == FFI_UNIX); + + /* If we have no spot for a return value, make one. */ + if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID) + rvalue = alloca (cif->rtype->size); + + /* Allocate the stack frame. */ + stack = alloca (cif->bytes); + + gpcount = fpcount = 0; + avn = cif->nargs; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i]; + break; + case FFI_TYPE_UINT8: + stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i]; + break; + case FFI_TYPE_SINT16: + stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i]; + break; + case FFI_TYPE_UINT16: + stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i]; + break; + case FFI_TYPE_SINT32: + stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i]; + break; + case FFI_TYPE_UINT32: + stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i]; + break; + + case FFI_TYPE_POINTER: + stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i]; + break; + + case FFI_TYPE_FLOAT: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]); + { + UINT32 tmp; + memcpy (&tmp, avalue[i], sizeof (UINT32)); + stack->gp_regs[gpcount++] = tmp; + } + break; + + case FFI_TYPE_DOUBLE: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]); + memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64)); + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]); + memcpy (&stack->gp_regs[gpcount], avalue[i], 16); + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_load (&stack->fp_regs[fpcount], hfa_type, + avalue[i] + offset); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + } + + memcpy (&stack->gp_regs[gpcount], avalue[i], size); + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + ffi_call_unix (stack, rvalue, fn, cif->flags); +} + +/* Closures represent a pair consisting of a function pointer, and + some user data. A closure is invoked by reinterpreting the closure + as a function pointer, and branching to it. Thus we can make an + interpreted function callable as a C function: We turn the + interpreter itself, together with a pointer specifying the + interpreted procedure, into a closure. + + For IA64, function pointer are already pairs consisting of a code + pointer, and a gp pointer. The latter is needed to access global + variables. Here we set up such a pair as the first two words of + the closure (in the "trampoline" area), but we replace the gp + pointer with a pointer to the closure itself. We also add the real + gp pointer to the closure. This allows the function entry code to + both retrieve the user data, and to restore the correct gp pointer. */ + +extern void ffi_closure_unix (); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + /* The layout of a function descriptor. A C function pointer really + points to one of these. */ + struct ia64_fd + { + UINT64 code_pointer; + UINT64 gp; + }; + + struct ffi_ia64_trampoline_struct + { + UINT64 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT64 fake_gp; /* Pointer to closure, installed as gp. */ + UINT64 real_gp; /* Real gp value. */ + }; + + struct ffi_ia64_trampoline_struct *tramp; + struct ia64_fd *fd; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp; + fd = (struct ia64_fd *)(void *)ffi_closure_unix; + + tramp->code_pointer = fd->code_pointer; + tramp->real_gp = fd->gp; + tramp->fake_gp = (UINT64)(PTR64)codeloc; + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +UINT64 +ffi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack, + void *rvalue, void *r8) +{ + ffi_cif *cif; + void **avalue; + ffi_type **p_arg; + long i, avn, gpcount, fpcount, nfixedargs; + + cif = closure->cif; + avn = cif->nargs; + nfixedargs = cif->nfixedargs; + avalue = alloca (avn * sizeof (void *)); + + /* If the structure return value is passed in memory get that location + from r8 so as to pass the value directly back to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = r8; + + gpcount = fpcount = 0; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + int named = i < nfixedargs; + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1); + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2); + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4); + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + avalue[i] = &stack->gp_regs[gpcount++]; + break; + case FFI_TYPE_POINTER: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*)); + break; + + case FFI_TYPE_FLOAT: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + float result; + avalue[i] = addr; + ldf_fill (result, addr); + *(float *)addr = result; + } + else + avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4); + gpcount++; + break; + + case FFI_TYPE_DOUBLE: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + double result; + avalue[i] = addr; + ldf_fill (result, addr); + *(double *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount++; + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + __float80 result; + avalue[i] = addr; + ldf_fill (result, addr); + *(__float80 *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + void *addr = alloca (size); + + avalue[i] = addr; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_store (hfa_type, addr + offset, + &stack->fp_regs[fpcount]); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + + if (offset < size) + memcpy (addr + offset, (char *)stack->gp_regs + gp_offset, + size - offset); + } + else + avalue[i] = &stack->gp_regs[gpcount]; + + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + closure->fun (cif, rvalue, avalue, closure->user_data); + + return cif->flags; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c new file mode 100644 index 0000000..b1d04c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c @@ -0,0 +1,604 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + Copyright (c) 2011 Anthony Green + + IA64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include + +#include "ia64_flags.h" + +/* A 64-bit pointer value. In LP64 mode, this is effectively a plain + pointer. In ILP32 mode, it's a pointer that's been extended to + 64 bits by "addp4". */ +typedef void *PTR64 __attribute__((mode(DI))); + +/* Memory image of fp register contents. This is the implementation + specific format used by ldf.fill/stf.spill. All we care about is + that it wants a 16 byte aligned slot. */ +typedef struct +{ + UINT64 x[2] __attribute__((aligned(16))); +} fpreg; + + +/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */ + +struct ia64_args +{ + fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */ + UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */ + UINT64 other_args[]; /* Arguments passed on stack, variable size. */ +}; + + +/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */ + +static inline void * +endian_adjust (void *addr, size_t len) +{ +#ifdef __BIG_ENDIAN__ + return addr + (8 - len); +#else + return addr; +#endif +} + +/* Store VALUE to ADDR in the current cpu implementation's fp spill format. + This is a macro instead of a function, so that it works for all 3 floating + point types without type conversions. Type conversion to long double breaks + the denorm support. */ + +#define stf_spill(addr, value) \ + asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value)); + +/* Load a value from ADDR, which is in the current cpu implementation's + fp spill format. As above, this must also be a macro. */ + +#define ldf_fill(result, addr) \ + asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr)); + +/* Return the size of the C type associated with with TYPE. Which will + be one of the FFI_IA64_TYPE_HFA_* values. */ + +static size_t +hfa_type_size (int type) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + return sizeof(float); + case FFI_IA64_TYPE_HFA_DOUBLE: + return sizeof(double); + case FFI_IA64_TYPE_HFA_LDOUBLE: + return sizeof(__float80); + default: + abort (); + } +} + +/* Load from ADDR a value indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_load (fpreg *fpaddr, int type, void *addr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + stf_spill (fpaddr, *(float *) addr); + return; + case FFI_IA64_TYPE_HFA_DOUBLE: + stf_spill (fpaddr, *(double *) addr); + return; + case FFI_IA64_TYPE_HFA_LDOUBLE: + stf_spill (fpaddr, *(__float80 *) addr); + return; + default: + abort (); + } +} + +/* Load VALUE into ADDR as indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_store (int type, void *addr, fpreg *fpaddr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + { + float result; + ldf_fill (result, fpaddr); + *(float *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_DOUBLE: + { + double result; + ldf_fill (result, fpaddr); + *(double *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_LDOUBLE: + { + __float80 result; + ldf_fill (result, fpaddr); + *(__float80 *) addr = result; + break; + } + default: + abort (); + } +} + +/* Is TYPE a struct containing floats, doubles, or extended doubles, + all of the same fp type? If so, return the element type. Return + FFI_TYPE_VOID if not. */ + +static int +hfa_element_type (ffi_type *type, int nested) +{ + int element = FFI_TYPE_VOID; + + switch (type->type) + { + case FFI_TYPE_FLOAT: + /* We want to return VOID for raw floating-point types, but the + synthetic HFA type if we're nested within an aggregate. */ + if (nested) + element = FFI_IA64_TYPE_HFA_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + /* Similarly. */ + if (nested) + element = FFI_IA64_TYPE_HFA_DOUBLE; + break; + + case FFI_TYPE_LONGDOUBLE: + /* Similarly, except that that HFA is true for double extended, + but not quad precision. Both have sizeof == 16, so tell the + difference based on the precision. */ + if (LDBL_MANT_DIG == 64 && nested) + element = FFI_IA64_TYPE_HFA_LDOUBLE; + break; + + case FFI_TYPE_STRUCT: + { + ffi_type **ptr = &type->elements[0]; + + for (ptr = &type->elements[0]; *ptr ; ptr++) + { + int sub_element = hfa_element_type (*ptr, 1); + if (sub_element == FFI_TYPE_VOID) + return FFI_TYPE_VOID; + + if (element == FFI_TYPE_VOID) + element = sub_element; + else if (element != sub_element) + return FFI_TYPE_VOID; + } + } + break; + + default: + return FFI_TYPE_VOID; + } + + return element; +} + + +/* Perform machine dependent cif processing. */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + int flags; + + /* Adjust cif->bytes to include space for the bits of the ia64_args frame + that precedes the integer register portion. The estimate that the + generic bits did for the argument space required is good enough for the + integer component. */ + cif->bytes += offsetof(struct ia64_args, gp_regs[0]); + if (cif->bytes < sizeof(struct ia64_args)) + cif->bytes = sizeof(struct ia64_args); + + /* Set the return type flag. */ + flags = cif->rtype->type; + switch (cif->rtype->type) + { + case FFI_TYPE_LONGDOUBLE: + /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision, + and encode quad precision as a two-word integer structure. */ + if (LDBL_MANT_DIG != 64) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8); + break; + + case FFI_TYPE_STRUCT: + { + size_t size = cif->rtype->size; + int hfa_type = hfa_element_type (cif->rtype, 0); + + if (hfa_type != FFI_TYPE_VOID) + { + size_t nelts = size / hfa_type_size (hfa_type); + if (nelts <= 8) + flags = hfa_type | (size << 8); + } + else + { + if (size <= 32) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8); + } + } + break; + + default: + break; + } + cif->flags = flags; + + return FFI_OK; +} + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status +ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(void), UINT64); + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + struct ia64_args *stack; + long i, avn, gpcount, fpcount; + ffi_type **p_arg; + + FFI_ASSERT (cif->abi == FFI_UNIX); + + /* If we have no spot for a return value, make one. */ + if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID) + rvalue = alloca (cif->rtype->size); + + /* Allocate the stack frame. */ + stack = alloca (cif->bytes); + + gpcount = fpcount = 0; + avn = cif->nargs; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i]; + break; + case FFI_TYPE_UINT8: + stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i]; + break; + case FFI_TYPE_SINT16: + stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i]; + break; + case FFI_TYPE_UINT16: + stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i]; + break; + case FFI_TYPE_SINT32: + stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i]; + break; + case FFI_TYPE_UINT32: + stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i]; + break; + + case FFI_TYPE_POINTER: + stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i]; + break; + + case FFI_TYPE_FLOAT: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]); + { + UINT32 tmp; + memcpy (&tmp, avalue[i], sizeof (UINT32)); + stack->gp_regs[gpcount++] = tmp; + } + break; + + case FFI_TYPE_DOUBLE: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]); + memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64)); + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]); + memcpy (&stack->gp_regs[gpcount], avalue[i], 16); + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_load (&stack->fp_regs[fpcount], hfa_type, + avalue[i] + offset); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + } + + memcpy (&stack->gp_regs[gpcount], avalue[i], size); + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + ffi_call_unix (stack, rvalue, fn, cif->flags); +} + +/* Closures represent a pair consisting of a function pointer, and + some user data. A closure is invoked by reinterpreting the closure + as a function pointer, and branching to it. Thus we can make an + interpreted function callable as a C function: We turn the + interpreter itself, together with a pointer specifying the + interpreted procedure, into a closure. + + For IA64, function pointer are already pairs consisting of a code + pointer, and a gp pointer. The latter is needed to access global + variables. Here we set up such a pair as the first two words of + the closure (in the "trampoline" area), but we replace the gp + pointer with a pointer to the closure itself. We also add the real + gp pointer to the closure. This allows the function entry code to + both retrieve the user data, and to restore the correct gp pointer. */ + +extern void ffi_closure_unix (); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + /* The layout of a function descriptor. A C function pointer really + points to one of these. */ + struct ia64_fd + { + UINT64 code_pointer; + UINT64 gp; + }; + + struct ffi_ia64_trampoline_struct + { + UINT64 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT64 fake_gp; /* Pointer to closure, installed as gp. */ + UINT64 real_gp; /* Real gp value. */ + }; + + struct ffi_ia64_trampoline_struct *tramp; + struct ia64_fd *fd; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp; + fd = (struct ia64_fd *)(void *)ffi_closure_unix; + + tramp->code_pointer = fd->code_pointer; + tramp->real_gp = fd->gp; + tramp->fake_gp = (UINT64)(PTR64)codeloc; + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +UINT64 +ffi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack, + void *rvalue, void *r8) +{ + ffi_cif *cif; + void **avalue; + ffi_type **p_arg; + long i, avn, gpcount, fpcount, nfixedargs; + + cif = closure->cif; + avn = cif->nargs; + nfixedargs = cif->nfixedargs; + avalue = alloca (avn * sizeof (void *)); + + /* If the structure return value is passed in memory get that location + from r8 so as to pass the value directly back to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = r8; + + gpcount = fpcount = 0; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + int named = i < nfixedargs; + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1); + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2); + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4); + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + avalue[i] = &stack->gp_regs[gpcount++]; + break; + case FFI_TYPE_POINTER: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*)); + break; + + case FFI_TYPE_FLOAT: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + float result; + avalue[i] = addr; + ldf_fill (result, addr); + *(float *)addr = result; + } + else + avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4); + gpcount++; + break; + + case FFI_TYPE_DOUBLE: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + double result; + avalue[i] = addr; + ldf_fill (result, addr); + *(double *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount++; + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + __float80 result; + avalue[i] = addr; + ldf_fill (result, addr); + *(__float80 *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + void *addr = alloca (size); + + avalue[i] = addr; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_store (hfa_type, addr + offset, + &stack->fp_regs[fpcount]); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + + if (offset < size) + memcpy (addr + offset, (char *)stack->gp_regs + gp_offset, + size - offset); + } + else + avalue[i] = &stack->gp_regs[gpcount]; + + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + closure->fun (cif, rvalue, avalue, closure->user_data); + + return cif->flags; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget 2.h new file mode 100644 index 0000000..fd5b9a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget 2.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for IA-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, /* Linux and all Unix variants use the same conventions */ + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 /* Really the following struct, which */ + /* can be interpreted as a C function */ + /* descriptor: */ +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h new file mode 100644 index 0000000..fd5b9a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for IA-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, /* Linux and all Unix variants use the same conventions */ + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 /* Really the following struct, which */ + /* can be interpreted as a C function */ + /* descriptor: */ +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags 2.h new file mode 100644 index 0000000..9d652ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags 2.h @@ -0,0 +1,40 @@ +/* ----------------------------------------------------------------------- + ia64_flags.h - Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Original author: Hans Boehm, HP Labs + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* "Type" codes used between assembly and C. When used as a part of + a cfi->flags value, the low byte will be these extra type codes, + and bits 8-31 will be the actual size of the type. */ + +/* Small structures containing N words in integer registers. */ +#define FFI_IA64_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 1) + +/* Homogeneous Floating Point Aggregates (HFAs) which are returned + in FP registers. */ +#define FFI_IA64_TYPE_HFA_FLOAT (FFI_TYPE_LAST + 2) +#define FFI_IA64_TYPE_HFA_DOUBLE (FFI_TYPE_LAST + 3) +#define FFI_IA64_TYPE_HFA_LDOUBLE (FFI_TYPE_LAST + 4) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h new file mode 100644 index 0000000..9d652ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h @@ -0,0 +1,40 @@ +/* ----------------------------------------------------------------------- + ia64_flags.h - Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Original author: Hans Boehm, HP Labs + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* "Type" codes used between assembly and C. When used as a part of + a cfi->flags value, the low byte will be these extra type codes, + and bits 8-31 will be the actual size of the type. */ + +/* Small structures containing N words in integer registers. */ +#define FFI_IA64_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 1) + +/* Homogeneous Floating Point Aggregates (HFAs) which are returned + in FP registers. */ +#define FFI_IA64_TYPE_HFA_FLOAT (FFI_TYPE_LAST + 2) +#define FFI_IA64_TYPE_HFA_DOUBLE (FFI_TYPE_LAST + 3) +#define FFI_IA64_TYPE_HFA_LDOUBLE (FFI_TYPE_LAST + 4) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix 2.S new file mode 100644 index 0000000..e2547e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix 2.S @@ -0,0 +1,567 @@ +/* ----------------------------------------------------------------------- + unix.S - Copyright (c) 1998, 2008 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Primary author: Hans Boehm, HP Labs + + Loosely modeled on Cygnus code for other platforms. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "ia64_flags.h" + + .pred.safe_across_calls p1-p5,p16-p63 +.text + +/* int ffi_call_unix (struct ia64_args *stack, PTR64 rvalue, + void (*fn)(void), int flags); + */ + + .align 16 + .global ffi_call_unix + .proc ffi_call_unix +ffi_call_unix: + .prologue + /* Bit o trickiness. We actually share a stack frame with ffi_call. + Rely on the fact that ffi_call uses a vframe and don't bother + tracking one here at all. */ + .fframe 0 + .save ar.pfs, r36 // loc0 + alloc loc0 = ar.pfs, 4, 3, 8, 0 + .save rp, loc1 + mov loc1 = b0 + .body + add r16 = 16, in0 + mov loc2 = gp + mov r8 = in1 + ;; + + /* Load up all of the argument registers. */ + ldf.fill f8 = [in0], 32 + ldf.fill f9 = [r16], 32 + ;; + ldf.fill f10 = [in0], 32 + ldf.fill f11 = [r16], 32 + ;; + ldf.fill f12 = [in0], 32 + ldf.fill f13 = [r16], 32 + ;; + ldf.fill f14 = [in0], 32 + ldf.fill f15 = [r16], 24 + ;; + ld8 out0 = [in0], 16 + ld8 out1 = [r16], 16 + ;; + ld8 out2 = [in0], 16 + ld8 out3 = [r16], 16 + ;; + ld8 out4 = [in0], 16 + ld8 out5 = [r16], 16 + ;; + ld8 out6 = [in0] + ld8 out7 = [r16] + ;; + + /* Deallocate the register save area from the stack frame. */ + mov sp = in0 + + /* Call the target function. */ + ld8 r16 = [in2], 8 + ;; + ld8 gp = [in2] + mov b6 = r16 + br.call.sptk.many b0 = b6 + ;; + + /* Dispatch to handle return value. */ + mov gp = loc2 + zxt1 r16 = in3 + ;; + mov ar.pfs = loc0 + addl r18 = @ltoffx(.Lst_table), gp + ;; + ld8.mov r18 = [r18], .Lst_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + ;; + ld8 r17 = [r18] + shr in3 = in3, 8 + ;; + add r17 = r17, r18 + ;; + mov b6 = r17 + br b6 + ;; + +.Lst_void: + br.ret.sptk.many b0 + ;; +.Lst_uint8: + zxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint8: + sxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint16: + zxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint16: + sxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint32: + zxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint32: + sxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_int64: + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_float: + stfs [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_double: + stfd [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_ldouble: + stfe [in1] = f8 + br.ret.sptk.many b0 + ;; + +.Lst_small_struct: + cmp.lt p6, p0 = 8, in3 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; + add r16 = 8, sp + add r17 = 16, sp + add r18 = 24, sp + ;; + st8 [sp] = r8 +(p6) st8 [r16] = r9 + mov out0 = in1 +(p7) st8 [r17] = r10 +(p8) st8 [r18] = r11 + mov out1 = sp + mov out2 = in3 + ;; + // ia64 software calling convention requires + // top 16 bytes of stack to be scratch space + // PLT resolver uses that scratch space at + // 'memcpy' symbol reolution time + add sp = -16, sp + br.call.sptk.many b0 = memcpy# + ;; + mov ar.pfs = loc0 + mov b0 = loc1 + mov gp = loc2 + br.ret.sptk.many b0 + +.Lst_hfa_float: + add r16 = 4, in1 + cmp.lt p6, p0 = 4, in3 + ;; + stfs [in1] = f8, 8 +(p6) stfs [r16] = f9, 8 + cmp.lt p7, p0 = 8, in3 + cmp.lt p8, p0 = 12, in3 + ;; +(p7) stfs [in1] = f10, 8 +(p8) stfs [r16] = f11, 8 + cmp.lt p9, p0 = 16, in3 + cmp.lt p10, p0 = 20, in3 + ;; +(p9) stfs [in1] = f12, 8 +(p10) stfs [r16] = f13, 8 + cmp.lt p6, p0 = 24, in3 + cmp.lt p7, p0 = 28, in3 + ;; +(p6) stfs [in1] = f14 +(p7) stfs [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_double: + add r16 = 8, in1 + cmp.lt p6, p0 = 8, in3 + ;; + stfd [in1] = f8, 16 +(p6) stfd [r16] = f9, 16 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; +(p7) stfd [in1] = f10, 16 +(p8) stfd [r16] = f11, 16 + cmp.lt p9, p0 = 32, in3 + cmp.lt p10, p0 = 40, in3 + ;; +(p9) stfd [in1] = f12, 16 +(p10) stfd [r16] = f13, 16 + cmp.lt p6, p0 = 48, in3 + cmp.lt p7, p0 = 56, in3 + ;; +(p6) stfd [in1] = f14 +(p7) stfd [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_ldouble: + add r16 = 16, in1 + cmp.lt p6, p0 = 16, in3 + ;; + stfe [in1] = f8, 32 +(p6) stfe [r16] = f9, 32 + cmp.lt p7, p0 = 32, in3 + cmp.lt p8, p0 = 48, in3 + ;; +(p7) stfe [in1] = f10, 32 +(p8) stfe [r16] = f11, 32 + cmp.lt p9, p0 = 64, in3 + cmp.lt p10, p0 = 80, in3 + ;; +(p9) stfe [in1] = f12, 32 +(p10) stfe [r16] = f13, 32 + cmp.lt p6, p0 = 96, in3 + cmp.lt p7, p0 = 112, in3 + ;; +(p6) stfe [in1] = f14 +(p7) stfe [r16] = f15 + br.ret.sptk.many b0 + ;; + + .endp ffi_call_unix + + .align 16 + .global ffi_closure_unix + .proc ffi_closure_unix + +#define FRAME_SIZE (8*16 + 8*8 + 8*16) + +ffi_closure_unix: + .prologue + .save ar.pfs, r40 // loc0 + alloc loc0 = ar.pfs, 8, 4, 4, 0 + .fframe FRAME_SIZE + add r12 = -FRAME_SIZE, r12 + .save rp, loc1 + mov loc1 = b0 + .save ar.unat, loc2 + mov loc2 = ar.unat + .body + + /* Retrieve closure pointer and real gp. */ +#ifdef _ILP32 + addp4 out0 = 0, gp + addp4 gp = 16, gp +#else + mov out0 = gp + add gp = 16, gp +#endif + ;; + ld8 gp = [gp] + + /* Spill all of the possible argument registers. */ + add r16 = 16 + 8*16, sp + add r17 = 16 + 8*16 + 16, sp + ;; + stf.spill [r16] = f8, 32 + stf.spill [r17] = f9, 32 + mov loc3 = gp + ;; + stf.spill [r16] = f10, 32 + stf.spill [r17] = f11, 32 + ;; + stf.spill [r16] = f12, 32 + stf.spill [r17] = f13, 32 + ;; + stf.spill [r16] = f14, 32 + stf.spill [r17] = f15, 24 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in0, 16 + .mem.offset 8, 0 + st8.spill [r17] = in1, 16 + add out1 = 16 + 8*16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in2, 16 + .mem.offset 8, 0 + st8.spill [r17] = in3, 16 + add out2 = 16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in4, 16 + .mem.offset 8, 0 + st8.spill [r17] = in5, 16 + mov out3 = r8 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in6 + .mem.offset 8, 0 + st8.spill [r17] = in7 + + /* Invoke ffi_closure_unix_inner for the hard work. */ + br.call.sptk.many b0 = ffi_closure_unix_inner + ;; + + /* Dispatch to handle return value. */ + mov gp = loc3 + zxt1 r16 = r8 + ;; + addl r18 = @ltoffx(.Lld_table), gp + mov ar.pfs = loc0 + ;; + ld8.mov r18 = [r18], .Lld_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + mov ar.unat = loc2 + ;; + ld8 r17 = [r18] + shr r8 = r8, 8 + ;; + add r17 = r17, r18 + add r16 = 16, sp + ;; + mov b6 = r17 + br b6 + ;; + .label_state 1 + +.Lld_void: + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_int: + .body + .copy_state 1 + ld8 r8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_float: + .body + .copy_state 1 + ldfs f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_double: + .body + .copy_state 1 + ldfd f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_ldouble: + .body + .copy_state 1 + ldfe f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_small_struct: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; + ld8 r8 = [r16], 16 +(p6) ld8 r9 = [r17], 16 + ;; +(p7) ld8 r10 = [r16] +(p8) ld8 r11 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_float: + .body + .copy_state 1 + add r17 = 4, r16 + cmp.lt p6, p0 = 4, r8 + ;; + ldfs f8 = [r16], 8 +(p6) ldfs f9 = [r17], 8 + cmp.lt p7, p0 = 8, r8 + cmp.lt p8, p0 = 12, r8 + ;; +(p7) ldfs f10 = [r16], 8 +(p8) ldfs f11 = [r17], 8 + cmp.lt p9, p0 = 16, r8 + cmp.lt p10, p0 = 20, r8 + ;; +(p9) ldfs f12 = [r16], 8 +(p10) ldfs f13 = [r17], 8 + cmp.lt p6, p0 = 24, r8 + cmp.lt p7, p0 = 28, r8 + ;; +(p6) ldfs f14 = [r16] +(p7) ldfs f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_double: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + ;; + ldfd f8 = [r16], 16 +(p6) ldfd f9 = [r17], 16 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; +(p7) ldfd f10 = [r16], 16 +(p8) ldfd f11 = [r17], 16 + cmp.lt p9, p0 = 32, r8 + cmp.lt p10, p0 = 40, r8 + ;; +(p9) ldfd f12 = [r16], 16 +(p10) ldfd f13 = [r17], 16 + cmp.lt p6, p0 = 48, r8 + cmp.lt p7, p0 = 56, r8 + ;; +(p6) ldfd f14 = [r16] +(p7) ldfd f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_ldouble: + .body + .copy_state 1 + add r17 = 16, r16 + cmp.lt p6, p0 = 16, r8 + ;; + ldfe f8 = [r16], 32 +(p6) ldfe f9 = [r17], 32 + cmp.lt p7, p0 = 32, r8 + cmp.lt p8, p0 = 48, r8 + ;; +(p7) ldfe f10 = [r16], 32 +(p8) ldfe f11 = [r17], 32 + cmp.lt p9, p0 = 64, r8 + cmp.lt p10, p0 = 80, r8 + ;; +(p9) ldfe f12 = [r16], 32 +(p10) ldfe f13 = [r17], 32 + cmp.lt p6, p0 = 96, r8 + cmp.lt p7, p0 = 112, r8 + ;; +(p6) ldfe f14 = [r16] +(p7) ldfe f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + + .endp ffi_closure_unix + + .section .rodata + .align 8 +.Lst_table: + data8 @pcrel(.Lst_void) // FFI_TYPE_VOID + data8 @pcrel(.Lst_sint32) // FFI_TYPE_INT + data8 @pcrel(.Lst_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lst_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lst_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lst_uint8) // FFI_TYPE_UINT8 + data8 @pcrel(.Lst_sint8) // FFI_TYPE_SINT8 + data8 @pcrel(.Lst_uint16) // FFI_TYPE_UINT16 + data8 @pcrel(.Lst_sint16) // FFI_TYPE_SINT16 + data8 @pcrel(.Lst_uint32) // FFI_TYPE_UINT32 + data8 @pcrel(.Lst_sint32) // FFI_TYPE_SINT32 + data8 @pcrel(.Lst_int64) // FFI_TYPE_UINT64 + data8 @pcrel(.Lst_int64) // FFI_TYPE_SINT64 + data8 @pcrel(.Lst_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lst_int64) // FFI_TYPE_POINTER + data8 @pcrel(.Lst_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lst_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lst_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lst_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lst_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +.Lld_table: + data8 @pcrel(.Lld_void) // FFI_TYPE_VOID + data8 @pcrel(.Lld_int) // FFI_TYPE_INT + data8 @pcrel(.Lld_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lld_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lld_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT64 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT64 + data8 @pcrel(.Lld_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lld_int) // FFI_TYPE_POINTER + data8 @pcrel(.Lld_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lld_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lld_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lld_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lld_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S new file mode 100644 index 0000000..e2547e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S @@ -0,0 +1,567 @@ +/* ----------------------------------------------------------------------- + unix.S - Copyright (c) 1998, 2008 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Primary author: Hans Boehm, HP Labs + + Loosely modeled on Cygnus code for other platforms. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "ia64_flags.h" + + .pred.safe_across_calls p1-p5,p16-p63 +.text + +/* int ffi_call_unix (struct ia64_args *stack, PTR64 rvalue, + void (*fn)(void), int flags); + */ + + .align 16 + .global ffi_call_unix + .proc ffi_call_unix +ffi_call_unix: + .prologue + /* Bit o trickiness. We actually share a stack frame with ffi_call. + Rely on the fact that ffi_call uses a vframe and don't bother + tracking one here at all. */ + .fframe 0 + .save ar.pfs, r36 // loc0 + alloc loc0 = ar.pfs, 4, 3, 8, 0 + .save rp, loc1 + mov loc1 = b0 + .body + add r16 = 16, in0 + mov loc2 = gp + mov r8 = in1 + ;; + + /* Load up all of the argument registers. */ + ldf.fill f8 = [in0], 32 + ldf.fill f9 = [r16], 32 + ;; + ldf.fill f10 = [in0], 32 + ldf.fill f11 = [r16], 32 + ;; + ldf.fill f12 = [in0], 32 + ldf.fill f13 = [r16], 32 + ;; + ldf.fill f14 = [in0], 32 + ldf.fill f15 = [r16], 24 + ;; + ld8 out0 = [in0], 16 + ld8 out1 = [r16], 16 + ;; + ld8 out2 = [in0], 16 + ld8 out3 = [r16], 16 + ;; + ld8 out4 = [in0], 16 + ld8 out5 = [r16], 16 + ;; + ld8 out6 = [in0] + ld8 out7 = [r16] + ;; + + /* Deallocate the register save area from the stack frame. */ + mov sp = in0 + + /* Call the target function. */ + ld8 r16 = [in2], 8 + ;; + ld8 gp = [in2] + mov b6 = r16 + br.call.sptk.many b0 = b6 + ;; + + /* Dispatch to handle return value. */ + mov gp = loc2 + zxt1 r16 = in3 + ;; + mov ar.pfs = loc0 + addl r18 = @ltoffx(.Lst_table), gp + ;; + ld8.mov r18 = [r18], .Lst_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + ;; + ld8 r17 = [r18] + shr in3 = in3, 8 + ;; + add r17 = r17, r18 + ;; + mov b6 = r17 + br b6 + ;; + +.Lst_void: + br.ret.sptk.many b0 + ;; +.Lst_uint8: + zxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint8: + sxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint16: + zxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint16: + sxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint32: + zxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint32: + sxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_int64: + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_float: + stfs [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_double: + stfd [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_ldouble: + stfe [in1] = f8 + br.ret.sptk.many b0 + ;; + +.Lst_small_struct: + cmp.lt p6, p0 = 8, in3 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; + add r16 = 8, sp + add r17 = 16, sp + add r18 = 24, sp + ;; + st8 [sp] = r8 +(p6) st8 [r16] = r9 + mov out0 = in1 +(p7) st8 [r17] = r10 +(p8) st8 [r18] = r11 + mov out1 = sp + mov out2 = in3 + ;; + // ia64 software calling convention requires + // top 16 bytes of stack to be scratch space + // PLT resolver uses that scratch space at + // 'memcpy' symbol reolution time + add sp = -16, sp + br.call.sptk.many b0 = memcpy# + ;; + mov ar.pfs = loc0 + mov b0 = loc1 + mov gp = loc2 + br.ret.sptk.many b0 + +.Lst_hfa_float: + add r16 = 4, in1 + cmp.lt p6, p0 = 4, in3 + ;; + stfs [in1] = f8, 8 +(p6) stfs [r16] = f9, 8 + cmp.lt p7, p0 = 8, in3 + cmp.lt p8, p0 = 12, in3 + ;; +(p7) stfs [in1] = f10, 8 +(p8) stfs [r16] = f11, 8 + cmp.lt p9, p0 = 16, in3 + cmp.lt p10, p0 = 20, in3 + ;; +(p9) stfs [in1] = f12, 8 +(p10) stfs [r16] = f13, 8 + cmp.lt p6, p0 = 24, in3 + cmp.lt p7, p0 = 28, in3 + ;; +(p6) stfs [in1] = f14 +(p7) stfs [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_double: + add r16 = 8, in1 + cmp.lt p6, p0 = 8, in3 + ;; + stfd [in1] = f8, 16 +(p6) stfd [r16] = f9, 16 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; +(p7) stfd [in1] = f10, 16 +(p8) stfd [r16] = f11, 16 + cmp.lt p9, p0 = 32, in3 + cmp.lt p10, p0 = 40, in3 + ;; +(p9) stfd [in1] = f12, 16 +(p10) stfd [r16] = f13, 16 + cmp.lt p6, p0 = 48, in3 + cmp.lt p7, p0 = 56, in3 + ;; +(p6) stfd [in1] = f14 +(p7) stfd [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_ldouble: + add r16 = 16, in1 + cmp.lt p6, p0 = 16, in3 + ;; + stfe [in1] = f8, 32 +(p6) stfe [r16] = f9, 32 + cmp.lt p7, p0 = 32, in3 + cmp.lt p8, p0 = 48, in3 + ;; +(p7) stfe [in1] = f10, 32 +(p8) stfe [r16] = f11, 32 + cmp.lt p9, p0 = 64, in3 + cmp.lt p10, p0 = 80, in3 + ;; +(p9) stfe [in1] = f12, 32 +(p10) stfe [r16] = f13, 32 + cmp.lt p6, p0 = 96, in3 + cmp.lt p7, p0 = 112, in3 + ;; +(p6) stfe [in1] = f14 +(p7) stfe [r16] = f15 + br.ret.sptk.many b0 + ;; + + .endp ffi_call_unix + + .align 16 + .global ffi_closure_unix + .proc ffi_closure_unix + +#define FRAME_SIZE (8*16 + 8*8 + 8*16) + +ffi_closure_unix: + .prologue + .save ar.pfs, r40 // loc0 + alloc loc0 = ar.pfs, 8, 4, 4, 0 + .fframe FRAME_SIZE + add r12 = -FRAME_SIZE, r12 + .save rp, loc1 + mov loc1 = b0 + .save ar.unat, loc2 + mov loc2 = ar.unat + .body + + /* Retrieve closure pointer and real gp. */ +#ifdef _ILP32 + addp4 out0 = 0, gp + addp4 gp = 16, gp +#else + mov out0 = gp + add gp = 16, gp +#endif + ;; + ld8 gp = [gp] + + /* Spill all of the possible argument registers. */ + add r16 = 16 + 8*16, sp + add r17 = 16 + 8*16 + 16, sp + ;; + stf.spill [r16] = f8, 32 + stf.spill [r17] = f9, 32 + mov loc3 = gp + ;; + stf.spill [r16] = f10, 32 + stf.spill [r17] = f11, 32 + ;; + stf.spill [r16] = f12, 32 + stf.spill [r17] = f13, 32 + ;; + stf.spill [r16] = f14, 32 + stf.spill [r17] = f15, 24 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in0, 16 + .mem.offset 8, 0 + st8.spill [r17] = in1, 16 + add out1 = 16 + 8*16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in2, 16 + .mem.offset 8, 0 + st8.spill [r17] = in3, 16 + add out2 = 16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in4, 16 + .mem.offset 8, 0 + st8.spill [r17] = in5, 16 + mov out3 = r8 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in6 + .mem.offset 8, 0 + st8.spill [r17] = in7 + + /* Invoke ffi_closure_unix_inner for the hard work. */ + br.call.sptk.many b0 = ffi_closure_unix_inner + ;; + + /* Dispatch to handle return value. */ + mov gp = loc3 + zxt1 r16 = r8 + ;; + addl r18 = @ltoffx(.Lld_table), gp + mov ar.pfs = loc0 + ;; + ld8.mov r18 = [r18], .Lld_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + mov ar.unat = loc2 + ;; + ld8 r17 = [r18] + shr r8 = r8, 8 + ;; + add r17 = r17, r18 + add r16 = 16, sp + ;; + mov b6 = r17 + br b6 + ;; + .label_state 1 + +.Lld_void: + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_int: + .body + .copy_state 1 + ld8 r8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_float: + .body + .copy_state 1 + ldfs f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_double: + .body + .copy_state 1 + ldfd f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_ldouble: + .body + .copy_state 1 + ldfe f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_small_struct: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; + ld8 r8 = [r16], 16 +(p6) ld8 r9 = [r17], 16 + ;; +(p7) ld8 r10 = [r16] +(p8) ld8 r11 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_float: + .body + .copy_state 1 + add r17 = 4, r16 + cmp.lt p6, p0 = 4, r8 + ;; + ldfs f8 = [r16], 8 +(p6) ldfs f9 = [r17], 8 + cmp.lt p7, p0 = 8, r8 + cmp.lt p8, p0 = 12, r8 + ;; +(p7) ldfs f10 = [r16], 8 +(p8) ldfs f11 = [r17], 8 + cmp.lt p9, p0 = 16, r8 + cmp.lt p10, p0 = 20, r8 + ;; +(p9) ldfs f12 = [r16], 8 +(p10) ldfs f13 = [r17], 8 + cmp.lt p6, p0 = 24, r8 + cmp.lt p7, p0 = 28, r8 + ;; +(p6) ldfs f14 = [r16] +(p7) ldfs f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_double: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + ;; + ldfd f8 = [r16], 16 +(p6) ldfd f9 = [r17], 16 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; +(p7) ldfd f10 = [r16], 16 +(p8) ldfd f11 = [r17], 16 + cmp.lt p9, p0 = 32, r8 + cmp.lt p10, p0 = 40, r8 + ;; +(p9) ldfd f12 = [r16], 16 +(p10) ldfd f13 = [r17], 16 + cmp.lt p6, p0 = 48, r8 + cmp.lt p7, p0 = 56, r8 + ;; +(p6) ldfd f14 = [r16] +(p7) ldfd f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_ldouble: + .body + .copy_state 1 + add r17 = 16, r16 + cmp.lt p6, p0 = 16, r8 + ;; + ldfe f8 = [r16], 32 +(p6) ldfe f9 = [r17], 32 + cmp.lt p7, p0 = 32, r8 + cmp.lt p8, p0 = 48, r8 + ;; +(p7) ldfe f10 = [r16], 32 +(p8) ldfe f11 = [r17], 32 + cmp.lt p9, p0 = 64, r8 + cmp.lt p10, p0 = 80, r8 + ;; +(p9) ldfe f12 = [r16], 32 +(p10) ldfe f13 = [r17], 32 + cmp.lt p6, p0 = 96, r8 + cmp.lt p7, p0 = 112, r8 + ;; +(p6) ldfe f14 = [r16] +(p7) ldfe f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + + .endp ffi_closure_unix + + .section .rodata + .align 8 +.Lst_table: + data8 @pcrel(.Lst_void) // FFI_TYPE_VOID + data8 @pcrel(.Lst_sint32) // FFI_TYPE_INT + data8 @pcrel(.Lst_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lst_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lst_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lst_uint8) // FFI_TYPE_UINT8 + data8 @pcrel(.Lst_sint8) // FFI_TYPE_SINT8 + data8 @pcrel(.Lst_uint16) // FFI_TYPE_UINT16 + data8 @pcrel(.Lst_sint16) // FFI_TYPE_SINT16 + data8 @pcrel(.Lst_uint32) // FFI_TYPE_UINT32 + data8 @pcrel(.Lst_sint32) // FFI_TYPE_SINT32 + data8 @pcrel(.Lst_int64) // FFI_TYPE_UINT64 + data8 @pcrel(.Lst_int64) // FFI_TYPE_SINT64 + data8 @pcrel(.Lst_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lst_int64) // FFI_TYPE_POINTER + data8 @pcrel(.Lst_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lst_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lst_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lst_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lst_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +.Lld_table: + data8 @pcrel(.Lld_void) // FFI_TYPE_VOID + data8 @pcrel(.Lld_int) // FFI_TYPE_INT + data8 @pcrel(.Lld_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lld_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lld_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT64 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT64 + data8 @pcrel(.Lld_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lld_int) // FFI_TYPE_POINTER + data8 @pcrel(.Lld_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lld_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lld_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lld_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lld_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api 2.c new file mode 100644 index 0000000..114d3e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api 2.c @@ -0,0 +1,374 @@ +/* ----------------------------------------------------------------------- + java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc. + + Cloned from raw_api.c + + Raw_api.c author: Kresten Krab Thorup + Java_raw_api.c author: Hans-J. Boehm + + $Id $ + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This defines a Java- and 64-bit specific variant of the raw API. */ +/* It assumes that "raw" argument blocks look like Java stacks on a */ +/* 64-bit machine. Arguments that can be stored in a single stack */ +/* stack slots (longs, doubles) occupy 128 bits, but only the first */ +/* 64 bits are actually used. */ + +#include +#include +#include + +#if !defined(NO_JAVA_RAW_API) + +size_t +ffi_java_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { + switch((*at) -> type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + result += 2 * FFI_SIZEOF_JAVA_RAW; + break; + case FFI_TYPE_STRUCT: + /* No structure parameters in Java. */ + abort(); + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + result += FFI_SIZEOF_JAVA_RAW; + } + } + + return result; +} + + +void +ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + 3); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + 2); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void *)raw; + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + *args = raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if FFI_SIZEOF_JAVA_RAW == 8 + switch((*tp)->type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void*) raw; + raw += 2; + break; + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + *args = (void*) raw++; + } +#else /* FFI_SIZEOF_JAVA_RAW != 8 */ + *args = (void*) raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif /* FFI_SIZEOF_JAVA_RAW == 8 */ + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT8*) (*args); +#else + (raw++)->uint = *(UINT8*) (*args); +#endif + break; + + case FFI_TYPE_SINT8: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT8*) (*args); +#else + (raw++)->sint = *(SINT8*) (*args); +#endif + break; + + case FFI_TYPE_UINT16: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT16*) (*args); +#else + (raw++)->uint = *(UINT16*) (*args); +#endif + break; + + case FFI_TYPE_SINT16: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT16*) (*args); +#else + (raw++)->sint = *(SINT16*) (*args); +#endif + break; + + case FFI_TYPE_UINT32: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT32*) (*args); +#else + (raw++)->uint = *(UINT32*) (*args); +#endif + break; + + case FFI_TYPE_SINT32: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT32*) (*args); +#else + (raw++)->sint = *(SINT32*) (*args); +#endif + break; + + case FFI_TYPE_FLOAT: + (raw++)->flt = *(FLOAT32*) (*args); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + raw->uint = *(UINT64*) (*args); + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: +#if FFI_SIZEOF_JAVA_RAW == 8 + FFI_ASSERT(0); /* Should have covered all cases */ +#else + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif + } + } +} + +#if !FFI_NATIVE_RAW_API + +static void +ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: +#if FFI_SIZEOF_JAVA_RAW == 4 + case FFI_TYPE_POINTER: +#endif + *(SINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +static void +ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: + *(SINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, + ffi_java_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_java_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); + ffi_java_rvalue_to_raw (cif, rvalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_java_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_java_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data); + ffi_java_raw_to_rvalue (cif, rvalue); +} + +ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_java_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) +{ + return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ +#endif /* !NO_JAVA_RAW_API */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c new file mode 100644 index 0000000..114d3e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c @@ -0,0 +1,374 @@ +/* ----------------------------------------------------------------------- + java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc. + + Cloned from raw_api.c + + Raw_api.c author: Kresten Krab Thorup + Java_raw_api.c author: Hans-J. Boehm + + $Id $ + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This defines a Java- and 64-bit specific variant of the raw API. */ +/* It assumes that "raw" argument blocks look like Java stacks on a */ +/* 64-bit machine. Arguments that can be stored in a single stack */ +/* stack slots (longs, doubles) occupy 128 bits, but only the first */ +/* 64 bits are actually used. */ + +#include +#include +#include + +#if !defined(NO_JAVA_RAW_API) + +size_t +ffi_java_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { + switch((*at) -> type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + result += 2 * FFI_SIZEOF_JAVA_RAW; + break; + case FFI_TYPE_STRUCT: + /* No structure parameters in Java. */ + abort(); + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + result += FFI_SIZEOF_JAVA_RAW; + } + } + + return result; +} + + +void +ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + 3); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + 2); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void *)raw; + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + *args = raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if FFI_SIZEOF_JAVA_RAW == 8 + switch((*tp)->type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void*) raw; + raw += 2; + break; + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + *args = (void*) raw++; + } +#else /* FFI_SIZEOF_JAVA_RAW != 8 */ + *args = (void*) raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif /* FFI_SIZEOF_JAVA_RAW == 8 */ + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT8*) (*args); +#else + (raw++)->uint = *(UINT8*) (*args); +#endif + break; + + case FFI_TYPE_SINT8: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT8*) (*args); +#else + (raw++)->sint = *(SINT8*) (*args); +#endif + break; + + case FFI_TYPE_UINT16: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT16*) (*args); +#else + (raw++)->uint = *(UINT16*) (*args); +#endif + break; + + case FFI_TYPE_SINT16: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT16*) (*args); +#else + (raw++)->sint = *(SINT16*) (*args); +#endif + break; + + case FFI_TYPE_UINT32: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT32*) (*args); +#else + (raw++)->uint = *(UINT32*) (*args); +#endif + break; + + case FFI_TYPE_SINT32: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT32*) (*args); +#else + (raw++)->sint = *(SINT32*) (*args); +#endif + break; + + case FFI_TYPE_FLOAT: + (raw++)->flt = *(FLOAT32*) (*args); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + raw->uint = *(UINT64*) (*args); + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: +#if FFI_SIZEOF_JAVA_RAW == 8 + FFI_ASSERT(0); /* Should have covered all cases */ +#else + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif + } + } +} + +#if !FFI_NATIVE_RAW_API + +static void +ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: +#if FFI_SIZEOF_JAVA_RAW == 4 + case FFI_TYPE_POINTER: +#endif + *(SINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +static void +ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: + *(SINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, + ffi_java_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_java_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); + ffi_java_rvalue_to_raw (cif, rvalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_java_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_java_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data); + ffi_java_raw_to_rvalue (cif, rvalue); +} + +ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_java_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) +{ + return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ +#endif /* !NO_JAVA_RAW_API */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi 2.c new file mode 100644 index 0000000..ab8fc4e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi 2.c @@ -0,0 +1,232 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2004 Renesas Technology + Copyright (c) 2008 Red Hat, Inc. + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + int tmp; + unsigned int avn; + void **p_argv; + char *argp; + ffi_type **p_arg; + + tmp = 0; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 8) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0) && (avn != 0); + i--, p_arg++) + { + size_t z; + + /* Align if necessary. */ + if (((*p_arg)->alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, (*p_arg)->alignment); + + if (avn != 0) + { + avn--; + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + z = (*p_arg)->size; + if ((*p_arg)->alignment != 1) + memcpy (argp, *p_argv, z); + else + memcpy (argp + 4 - z, *p_argv, z); + z = sizeof (int); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + if (z > 8) + { + *(unsigned int *) argp = (unsigned int)(void *)(* p_argv); + z = sizeof(void *); + } + else + { + memcpy(argp, *p_argv, z); + z = 8; + } + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_DOUBLE; + + else + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + int size = cif->rtype->size; + int align = cif->rtype->alignment; + + if (size < 4) + { + if (align == 1) + *(unsigned long *)(ecif.rvalue) <<= (4 - size) * 8; + } + else if (4 < size && size < 8) + { + if (align == 1) + { + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + else if (align == 2) + { + if (size & 1) + size += 1; + + if (size != 8) + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + } + } + break; + + default: + FFI_ASSERT(0); + break; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c new file mode 100644 index 0000000..ab8fc4e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c @@ -0,0 +1,232 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2004 Renesas Technology + Copyright (c) 2008 Red Hat, Inc. + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + int tmp; + unsigned int avn; + void **p_argv; + char *argp; + ffi_type **p_arg; + + tmp = 0; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 8) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0) && (avn != 0); + i--, p_arg++) + { + size_t z; + + /* Align if necessary. */ + if (((*p_arg)->alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, (*p_arg)->alignment); + + if (avn != 0) + { + avn--; + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + z = (*p_arg)->size; + if ((*p_arg)->alignment != 1) + memcpy (argp, *p_argv, z); + else + memcpy (argp + 4 - z, *p_argv, z); + z = sizeof (int); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + if (z > 8) + { + *(unsigned int *) argp = (unsigned int)(void *)(* p_argv); + z = sizeof(void *); + } + else + { + memcpy(argp, *p_argv, z); + z = 8; + } + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_DOUBLE; + + else + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + int size = cif->rtype->size; + int align = cif->rtype->alignment; + + if (size < 4) + { + if (align == 1) + *(unsigned long *)(ecif.rvalue) <<= (4 - size) * 8; + } + else if (4 < size && size < 8) + { + if (align == 1) + { + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + else if (align == 2) + { + if (size & 1) + size += 1; + + if (size != 8) + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + } + } + break; + + default: + FFI_ASSERT(0); + break; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget 2.h new file mode 100644 index 0000000..6c34801 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget 2.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2004 Renesas Technology. + Target configuration macros for M32R. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +#define FFI_CLOSURES 0 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h new file mode 100644 index 0000000..6c34801 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2004 Renesas Technology. + Target configuration macros for M32R. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +#define FFI_CLOSURES 0 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv 2.S new file mode 100644 index 0000000..06b75c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv 2.S @@ -0,0 +1,121 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Renesas Technology + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)! .type CNAME(x),%function! CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* sp+0: ecif.rvalue */ + /* sp+4: fn */ + + /* This assumes we are using gas. */ +ENTRY(ffi_call_SYSV) + /* Save registers. */ + push fp + push lr + push r3 + push r2 + push r1 + push r0 + mv fp, sp + + /* Make room for all of the new args. */ + sub sp, r2 + + /* Place all of the ffi_prep_args in position. */ + mv lr, r0 + mv r0, sp + /* R1 already set. */ + + /* And call. */ + jl lr + + /* Move first 4 parameters in registers... */ + ld r0, @(0,sp) + ld r1, @(4,sp) + ld r2, @(8,sp) + ld r3, @(12,sp) + + /* ...and adjust the stack. */ + ld lr, @(8,fp) + cmpi lr, #16 + bc adjust_stack + ldi lr, #16 +adjust_stack: + add sp, lr + + /* Call the function. */ + ld lr, @(28,fp) + jl lr + + /* Remove the space we pushed for the args. */ + mv sp, fp + + /* Load R2 with the pointer to storage for the return value. */ + ld r2, @(24,sp) + + /* Load R3 with the return type code. */ + ld r3, @(12,sp) + + /* If the return value pointer is NULL, assume no return value. */ + beqz r2, epilogue + + /* Return INT. */ + ldi r4, #FFI_TYPE_INT + bne r3, r4, return_double + st r0, @r2 + bra epilogue + +return_double: + /* Return DOUBLE or LONGDOUBLE. */ + ldi r4, #FFI_TYPE_DOUBLE + bne r3, r4, epilogue + st r0, @r2 + st r1, @(4,r2) + +epilogue: + pop r0 + pop r1 + pop r2 + pop r3 + pop lr + pop fp + jmp lr + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S new file mode 100644 index 0000000..06b75c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S @@ -0,0 +1,121 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Renesas Technology + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)! .type CNAME(x),%function! CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* sp+0: ecif.rvalue */ + /* sp+4: fn */ + + /* This assumes we are using gas. */ +ENTRY(ffi_call_SYSV) + /* Save registers. */ + push fp + push lr + push r3 + push r2 + push r1 + push r0 + mv fp, sp + + /* Make room for all of the new args. */ + sub sp, r2 + + /* Place all of the ffi_prep_args in position. */ + mv lr, r0 + mv r0, sp + /* R1 already set. */ + + /* And call. */ + jl lr + + /* Move first 4 parameters in registers... */ + ld r0, @(0,sp) + ld r1, @(4,sp) + ld r2, @(8,sp) + ld r3, @(12,sp) + + /* ...and adjust the stack. */ + ld lr, @(8,fp) + cmpi lr, #16 + bc adjust_stack + ldi lr, #16 +adjust_stack: + add sp, lr + + /* Call the function. */ + ld lr, @(28,fp) + jl lr + + /* Remove the space we pushed for the args. */ + mv sp, fp + + /* Load R2 with the pointer to storage for the return value. */ + ld r2, @(24,sp) + + /* Load R3 with the return type code. */ + ld r3, @(12,sp) + + /* If the return value pointer is NULL, assume no return value. */ + beqz r2, epilogue + + /* Return INT. */ + ldi r4, #FFI_TYPE_INT + bne r3, r4, return_double + st r0, @r2 + bra epilogue + +return_double: + /* Return DOUBLE or LONGDOUBLE. */ + ldi r4, #FFI_TYPE_DOUBLE + bne r3, r4, epilogue + st r0, @r2 + st r1, @(4,r2) + +epilogue: + pop r0 + pop r1 + pop r2 + pop r3 + pop lr + pop fp + jmp lr + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi 2.c new file mode 100644 index 0000000..0330184 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi 2.c @@ -0,0 +1,362 @@ +/* ----------------------------------------------------------------------- + ffi.c + + m68k Foreign Function Interface + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#ifdef __rtems__ +void rtems_cache_flush_multiple_data_lines( const void *, size_t ); +#else +#include +#ifdef __MINT__ +#include +#include +#else +#include +#endif +#endif + +void ffi_call_SYSV (extended_cif *, + unsigned, unsigned, + void *, void (*fn) ()); +void *ffi_prep_args (void *stack, extended_cif *ecif); +void ffi_closure_SYSV (ffi_closure *); +void ffi_closure_struct_SYSV (ffi_closure *); +unsigned int ffi_closure_SYSV_inner (ffi_closure *closure, + void *resp, void *args); + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if ( +#ifdef __MINT__ + (ecif->cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((ecif->cif->rtype->type == FFI_TYPE_STRUCT) + && !ecif->cif->flags))) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z = (*p_arg)->size; + int type = (*p_arg)->type; + + if (z < sizeof (int)) + { + switch (type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: +#ifdef __MINT__ + if (z == 1 || z == 2) + memcpy (argp + 2, *p_argv, z); + else + memcpy (argp, *p_argv, z); +#else + memcpy (argp + sizeof (int) - z, *p_argv, z); +#endif + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +#define CIF_FLAGS_INT 1 +#define CIF_FLAGS_DINT 2 +#define CIF_FLAGS_FLOAT 4 +#define CIF_FLAGS_DOUBLE 8 +#define CIF_FLAGS_LDOUBLE 16 +#define CIF_FLAGS_POINTER 32 +#define CIF_FLAGS_STRUCT1 64 +#define CIF_FLAGS_STRUCT2 128 +#define CIF_FLAGS_SINT8 256 +#define CIF_FLAGS_SINT16 512 + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + switch (cif->rtype->size) + { + case 1: +#ifdef __MINT__ + cif->flags = CIF_FLAGS_STRUCT2; +#else + cif->flags = CIF_FLAGS_STRUCT1; +#endif + break; + case 2: + cif->flags = CIF_FLAGS_STRUCT2; + break; +#ifdef __MINT__ + case 3: +#endif + case 4: + cif->flags = CIF_FLAGS_INT; + break; +#ifdef __MINT__ + case 7: +#endif + case 8: + cif->flags = CIF_FLAGS_DINT; + break; + default: + cif->flags = 0; + break; + } + break; + + case FFI_TYPE_FLOAT: + cif->flags = CIF_FLAGS_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = CIF_FLAGS_DOUBLE; + break; + +#if (FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE) + case FFI_TYPE_LONGDOUBLE: +#ifdef __MINT__ + cif->flags = 0; +#else + cif->flags = CIF_FLAGS_LDOUBLE; +#endif + break; +#endif + + case FFI_TYPE_POINTER: + cif->flags = CIF_FLAGS_POINTER; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + case FFI_TYPE_SINT16: + cif->flags = CIF_FLAGS_SINT16; + break; + + case FFI_TYPE_SINT8: + cif->flags = CIF_FLAGS_SINT8; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (&ecif, cif->bytes, cif->flags, + ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +static void +ffi_prep_incoming_args_SYSV (char *stack, void **avalue, ffi_cif *cif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; +#ifdef __MINT__ + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 1 || z == 2)) + { + *p_argv = (void *) (argp + 2); + + z = 4; + } + else + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 3 || z == 4)) + { + *p_argv = (void *) (argp); + + z = 4; + } + else +#endif + if (z <= 4) + { + *p_argv = (void *) (argp + 4 - z); + + z = 4; + } + else + { + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } +} + +unsigned int +ffi_closure_SYSV_inner (ffi_closure *closure, void *resp, void *args) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_incoming_args_SYSV(args, arg_area, cif); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + *(unsigned short *)closure->tramp = 0x207c; + *(void **)(closure->tramp + 2) = codeloc; + *(unsigned short *)(closure->tramp + 6) = 0x4ef9; + + if ( +#ifdef __MINT__ + (cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((cif->rtype->type == FFI_TYPE_STRUCT) + && !cif->flags))) + *(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV; + else + *(void **)(closure->tramp + 8) = ffi_closure_SYSV; + +#ifdef __rtems__ + rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE ); +#elif defined(__MINT__) + Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE); +#else + syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE, + FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c new file mode 100644 index 0000000..0330184 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c @@ -0,0 +1,362 @@ +/* ----------------------------------------------------------------------- + ffi.c + + m68k Foreign Function Interface + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#ifdef __rtems__ +void rtems_cache_flush_multiple_data_lines( const void *, size_t ); +#else +#include +#ifdef __MINT__ +#include +#include +#else +#include +#endif +#endif + +void ffi_call_SYSV (extended_cif *, + unsigned, unsigned, + void *, void (*fn) ()); +void *ffi_prep_args (void *stack, extended_cif *ecif); +void ffi_closure_SYSV (ffi_closure *); +void ffi_closure_struct_SYSV (ffi_closure *); +unsigned int ffi_closure_SYSV_inner (ffi_closure *closure, + void *resp, void *args); + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if ( +#ifdef __MINT__ + (ecif->cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((ecif->cif->rtype->type == FFI_TYPE_STRUCT) + && !ecif->cif->flags))) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z = (*p_arg)->size; + int type = (*p_arg)->type; + + if (z < sizeof (int)) + { + switch (type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: +#ifdef __MINT__ + if (z == 1 || z == 2) + memcpy (argp + 2, *p_argv, z); + else + memcpy (argp, *p_argv, z); +#else + memcpy (argp + sizeof (int) - z, *p_argv, z); +#endif + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +#define CIF_FLAGS_INT 1 +#define CIF_FLAGS_DINT 2 +#define CIF_FLAGS_FLOAT 4 +#define CIF_FLAGS_DOUBLE 8 +#define CIF_FLAGS_LDOUBLE 16 +#define CIF_FLAGS_POINTER 32 +#define CIF_FLAGS_STRUCT1 64 +#define CIF_FLAGS_STRUCT2 128 +#define CIF_FLAGS_SINT8 256 +#define CIF_FLAGS_SINT16 512 + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + switch (cif->rtype->size) + { + case 1: +#ifdef __MINT__ + cif->flags = CIF_FLAGS_STRUCT2; +#else + cif->flags = CIF_FLAGS_STRUCT1; +#endif + break; + case 2: + cif->flags = CIF_FLAGS_STRUCT2; + break; +#ifdef __MINT__ + case 3: +#endif + case 4: + cif->flags = CIF_FLAGS_INT; + break; +#ifdef __MINT__ + case 7: +#endif + case 8: + cif->flags = CIF_FLAGS_DINT; + break; + default: + cif->flags = 0; + break; + } + break; + + case FFI_TYPE_FLOAT: + cif->flags = CIF_FLAGS_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = CIF_FLAGS_DOUBLE; + break; + +#if (FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE) + case FFI_TYPE_LONGDOUBLE: +#ifdef __MINT__ + cif->flags = 0; +#else + cif->flags = CIF_FLAGS_LDOUBLE; +#endif + break; +#endif + + case FFI_TYPE_POINTER: + cif->flags = CIF_FLAGS_POINTER; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + case FFI_TYPE_SINT16: + cif->flags = CIF_FLAGS_SINT16; + break; + + case FFI_TYPE_SINT8: + cif->flags = CIF_FLAGS_SINT8; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (&ecif, cif->bytes, cif->flags, + ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +static void +ffi_prep_incoming_args_SYSV (char *stack, void **avalue, ffi_cif *cif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; +#ifdef __MINT__ + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 1 || z == 2)) + { + *p_argv = (void *) (argp + 2); + + z = 4; + } + else + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 3 || z == 4)) + { + *p_argv = (void *) (argp); + + z = 4; + } + else +#endif + if (z <= 4) + { + *p_argv = (void *) (argp + 4 - z); + + z = 4; + } + else + { + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } +} + +unsigned int +ffi_closure_SYSV_inner (ffi_closure *closure, void *resp, void *args) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_incoming_args_SYSV(args, arg_area, cif); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + *(unsigned short *)closure->tramp = 0x207c; + *(void **)(closure->tramp + 2) = codeloc; + *(unsigned short *)(closure->tramp + 6) = 0x4ef9; + + if ( +#ifdef __MINT__ + (cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((cif->rtype->type == FFI_TYPE_STRUCT) + && !cif->flags))) + *(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV; + else + *(void **)(closure->tramp + 8) = ffi_closure_SYSV; + +#ifdef __rtems__ + rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE ); +#elif defined(__MINT__) + Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE); +#else + syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE, + FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget 2.h new file mode 100644 index 0000000..e81dde2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget 2.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Motorola 68K. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h new file mode 100644 index 0000000..e81dde2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Motorola 68K. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv 2.S new file mode 100644 index 0000000..ea40f11 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv 2.S @@ -0,0 +1,357 @@ +/* ----------------------------------------------------------------------- + + sysv.S - Copyright (c) 2012 Alan Hourihane + Copyright (c) 1998, 2012 Andreas Schwab + Copyright (c) 2008 Red Hat, Inc. + Copyright (c) 2012, 2016 Thorsten Glaser + + m68k Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef HAVE_AS_CFI_PSEUDO_OP +#define CFI_STARTPROC() .cfi_startproc +#define CFI_OFFSET(reg,off) .cfi_offset reg,off +#define CFI_DEF_CFA(reg,off) .cfi_def_cfa reg,off +#define CFI_ENDPROC() .cfi_endproc +#else +#define CFI_STARTPROC() +#define CFI_OFFSET(reg,off) +#define CFI_DEF_CFA(reg,off) +#define CFI_ENDPROC() +#endif + +#ifdef __MINT__ +#define CALLFUNC(funcname) _ ## funcname +#else +#define CALLFUNC(funcname) funcname +#endif + + .text + + .globl CALLFUNC(ffi_call_SYSV) + .type CALLFUNC(ffi_call_SYSV),@function + .align 4 + +CALLFUNC(ffi_call_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %d2,-(%sp) + CFI_OFFSET(2,-12) + + | Make room for all of the new args. + sub.l 12(%fp),%sp + + | Call ffi_prep_args + move.l 8(%fp),-(%sp) + pea 4(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_prep_args) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_prep_args@PLTPC) +#endif + addq.l #8,%sp + + | Pass pointer to struct value, if any +#ifdef __MINT__ + move.l %d0,%a1 +#else + move.l %a0,%a1 +#endif + + | Call the function + move.l 24(%fp),%a0 + jsr (%a0) + + | Remove the space we pushed for the args + add.l 12(%fp),%sp + + | Load the pointer to storage for the return value + move.l 20(%fp),%a1 + + | Load the return type code + move.l 16(%fp),%d2 + + | If the return value pointer is NULL, assume no return value. + | NOTE: On the mc68000, tst on an address register is not supported. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + cmp.w #0, %a1 +#else + tst.l %a1 +#endif + jbeq noretval + + btst #0,%d2 + jbeq retlongint + move.l %d0,(%a1) + jbra epilogue + +retlongint: + btst #1,%d2 + jbeq retfloat + move.l %d0,(%a1) + move.l %d1,4(%a1) + jbra epilogue + +retfloat: + btst #2,%d2 + jbeq retdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s %fp0,(%a1) +#else + move.l %d0,(%a1) +#endif + jbra epilogue + +retdouble: + btst #3,%d2 + jbeq retlongdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1) +#endif + jbra epilogue + +retlongdouble: + btst #4,%d2 + jbeq retpointer +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1)+ + move.l %d2,(%a1) +#endif + jbra epilogue + +retpointer: + btst #5,%d2 + jbeq retstruct1 +#ifdef __MINT__ + move.l %d0,(%a1) +#else + move.l %a0,(%a1) +#endif + jbra epilogue + +retstruct1: + btst #6,%d2 + jbeq retstruct2 + move.b %d0,(%a1) + jbra epilogue + +retstruct2: + btst #7,%d2 + jbeq retsint8 + move.w %d0,(%a1) + jbra epilogue + +retsint8: + btst #8,%d2 + jbeq retsint16 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + move.l %d0,(%a1) + jbra epilogue + +retsint16: + btst #9,%d2 + jbeq noretval + ext.l %d0 + move.l %d0,(%a1) + +noretval: +epilogue: + move.l (%sp)+,%d2 + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_call_SYSV),.-CALLFUNC(ffi_call_SYSV) + + .globl CALLFUNC(ffi_closure_SYSV) + .type CALLFUNC(ffi_closure_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_SYSV): + CFI_STARTPROC() + link %fp,#-12 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + pea -12(%fp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + + lsr.l #1,%d0 + jne 1f + jcc .Lcls_epilogue + | CIF_FLAGS_INT + move.l -12(%fp),%d0 +.Lcls_epilogue: + | no CIF_FLAGS_* + unlk %fp + rts +1: + lea -12(%fp),%a0 + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_float + | CIF_FLAGS_DINT + move.l (%a0)+,%d0 + move.l (%a0),%d1 + jra .Lcls_epilogue +.Lcls_ret_float: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s (%a0),%fp0 +#else + move.l (%a0),%d0 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_ldouble + | CIF_FLAGS_DOUBLE +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0),%d1 +#endif + jra .Lcls_epilogue +.Lcls_ret_ldouble: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0)+,%d1 + move.l (%a0),%d2 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_struct1 + | CIF_FLAGS_POINTER + move.l (%a0),%a0 + move.l %a0,%d0 + jra .Lcls_epilogue +.Lcls_ret_struct1: + move.b (%a0),%d0 + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_sint8 + | CIF_FLAGS_STRUCT2 + move.w (%a0),%d0 + jra .Lcls_epilogue +.Lcls_ret_sint8: + move.l (%a0),%d0 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + jra .Lcls_epilogue +1: + | CIF_FLAGS_SINT16 + move.l (%a0),%d0 + ext.l %d0 + jra .Lcls_epilogue + CFI_ENDPROC() + + .size CALLFUNC(ffi_closure_SYSV),.-CALLFUNC(ffi_closure_SYSV) + + .globl CALLFUNC(ffi_closure_struct_SYSV) + .type CALLFUNC(ffi_closure_struct_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_struct_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + move.l %a1,-(%sp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_closure_struct_SYSV),.-CALLFUNC(ffi_closure_struct_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S new file mode 100644 index 0000000..ea40f11 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S @@ -0,0 +1,357 @@ +/* ----------------------------------------------------------------------- + + sysv.S - Copyright (c) 2012 Alan Hourihane + Copyright (c) 1998, 2012 Andreas Schwab + Copyright (c) 2008 Red Hat, Inc. + Copyright (c) 2012, 2016 Thorsten Glaser + + m68k Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef HAVE_AS_CFI_PSEUDO_OP +#define CFI_STARTPROC() .cfi_startproc +#define CFI_OFFSET(reg,off) .cfi_offset reg,off +#define CFI_DEF_CFA(reg,off) .cfi_def_cfa reg,off +#define CFI_ENDPROC() .cfi_endproc +#else +#define CFI_STARTPROC() +#define CFI_OFFSET(reg,off) +#define CFI_DEF_CFA(reg,off) +#define CFI_ENDPROC() +#endif + +#ifdef __MINT__ +#define CALLFUNC(funcname) _ ## funcname +#else +#define CALLFUNC(funcname) funcname +#endif + + .text + + .globl CALLFUNC(ffi_call_SYSV) + .type CALLFUNC(ffi_call_SYSV),@function + .align 4 + +CALLFUNC(ffi_call_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %d2,-(%sp) + CFI_OFFSET(2,-12) + + | Make room for all of the new args. + sub.l 12(%fp),%sp + + | Call ffi_prep_args + move.l 8(%fp),-(%sp) + pea 4(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_prep_args) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_prep_args@PLTPC) +#endif + addq.l #8,%sp + + | Pass pointer to struct value, if any +#ifdef __MINT__ + move.l %d0,%a1 +#else + move.l %a0,%a1 +#endif + + | Call the function + move.l 24(%fp),%a0 + jsr (%a0) + + | Remove the space we pushed for the args + add.l 12(%fp),%sp + + | Load the pointer to storage for the return value + move.l 20(%fp),%a1 + + | Load the return type code + move.l 16(%fp),%d2 + + | If the return value pointer is NULL, assume no return value. + | NOTE: On the mc68000, tst on an address register is not supported. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + cmp.w #0, %a1 +#else + tst.l %a1 +#endif + jbeq noretval + + btst #0,%d2 + jbeq retlongint + move.l %d0,(%a1) + jbra epilogue + +retlongint: + btst #1,%d2 + jbeq retfloat + move.l %d0,(%a1) + move.l %d1,4(%a1) + jbra epilogue + +retfloat: + btst #2,%d2 + jbeq retdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s %fp0,(%a1) +#else + move.l %d0,(%a1) +#endif + jbra epilogue + +retdouble: + btst #3,%d2 + jbeq retlongdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1) +#endif + jbra epilogue + +retlongdouble: + btst #4,%d2 + jbeq retpointer +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1)+ + move.l %d2,(%a1) +#endif + jbra epilogue + +retpointer: + btst #5,%d2 + jbeq retstruct1 +#ifdef __MINT__ + move.l %d0,(%a1) +#else + move.l %a0,(%a1) +#endif + jbra epilogue + +retstruct1: + btst #6,%d2 + jbeq retstruct2 + move.b %d0,(%a1) + jbra epilogue + +retstruct2: + btst #7,%d2 + jbeq retsint8 + move.w %d0,(%a1) + jbra epilogue + +retsint8: + btst #8,%d2 + jbeq retsint16 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + move.l %d0,(%a1) + jbra epilogue + +retsint16: + btst #9,%d2 + jbeq noretval + ext.l %d0 + move.l %d0,(%a1) + +noretval: +epilogue: + move.l (%sp)+,%d2 + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_call_SYSV),.-CALLFUNC(ffi_call_SYSV) + + .globl CALLFUNC(ffi_closure_SYSV) + .type CALLFUNC(ffi_closure_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_SYSV): + CFI_STARTPROC() + link %fp,#-12 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + pea -12(%fp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + + lsr.l #1,%d0 + jne 1f + jcc .Lcls_epilogue + | CIF_FLAGS_INT + move.l -12(%fp),%d0 +.Lcls_epilogue: + | no CIF_FLAGS_* + unlk %fp + rts +1: + lea -12(%fp),%a0 + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_float + | CIF_FLAGS_DINT + move.l (%a0)+,%d0 + move.l (%a0),%d1 + jra .Lcls_epilogue +.Lcls_ret_float: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s (%a0),%fp0 +#else + move.l (%a0),%d0 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_ldouble + | CIF_FLAGS_DOUBLE +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0),%d1 +#endif + jra .Lcls_epilogue +.Lcls_ret_ldouble: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0)+,%d1 + move.l (%a0),%d2 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_struct1 + | CIF_FLAGS_POINTER + move.l (%a0),%a0 + move.l %a0,%d0 + jra .Lcls_epilogue +.Lcls_ret_struct1: + move.b (%a0),%d0 + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_sint8 + | CIF_FLAGS_STRUCT2 + move.w (%a0),%d0 + jra .Lcls_epilogue +.Lcls_ret_sint8: + move.l (%a0),%d0 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + jra .Lcls_epilogue +1: + | CIF_FLAGS_SINT16 + move.l (%a0),%d0 + ext.l %d0 + jra .Lcls_epilogue + CFI_ENDPROC() + + .size CALLFUNC(ffi_closure_SYSV),.-CALLFUNC(ffi_closure_SYSV) + + .globl CALLFUNC(ffi_closure_struct_SYSV) + .type CALLFUNC(ffi_closure_struct_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_struct_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + move.l %a1,-(%sp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_closure_struct_SYSV),.-CALLFUNC(ffi_closure_struct_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi 2.c new file mode 100644 index 0000000..57b344f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi 2.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + * + * Only OpenBSD/m88k is currently supported; other platforms (such as + * Motorola's SysV/m88k) could be supported with the following tweaks: + * + * - non-OpenBSD systems use an `outgoing parameter area' as part of the + * 88BCS calling convention, which is not supported under OpenBSD from + * release 3.6 onwards. Supporting it should be as easy as taking it + * into account when adjusting the stack, in the assembly code. + * + * - the logic deciding whether a function argument gets passed through + * registers, or on the stack, has changed several times in OpenBSD in + * edge cases (especially for structs larger than 32 bytes being passed + * by value). The code below attemps to match the logic used by the + * system compiler of OpenBSD 5.3, i.e. gcc 3.3.6 with many m88k backend + * fixes. + */ + +#include +#include + +#include +#include + +void ffi_call_OBSD (unsigned int, extended_cif *, unsigned int, void *, + void (*fn) ()); +void *ffi_prep_args (void *, extended_cif *); +void ffi_closure_OBSD (ffi_closure *); +void ffi_closure_struct_OBSD (ffi_closure *); +unsigned int ffi_closure_OBSD_inner (ffi_closure *, void *, unsigned int *, + char *); +void ffi_cacheflush_OBSD (unsigned int, unsigned int); + +#define CIF_FLAGS_INT (1 << 0) +#define CIF_FLAGS_DINT (1 << 1) + +/* + * Foreign Function Interface API + */ + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp, *stackp; + unsigned int *regp; + unsigned int regused; + ffi_type **p_arg; + void *struct_value_ptr; + + regp = (unsigned int *)stack; + stackp = (char *)(regp + 8); + regused = 0; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument can be passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + switch (t) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *(unsigned int *) argp = *(unsigned int *) *p_argv; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + + /* Align if necessary. */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are filled, and about to continue + on stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } + + return struct_value_ptr; +} + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size == sizeof (int) && + cif->rtype->alignment == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = 0; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && (cif->rtype->size != sizeof (int) + || cif->rtype->alignment != sizeof (int))) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_OBSD: + ffi_call_OBSD (cif->bytes, &ecif, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +static void +ffi_prep_closure_args_OBSD (ffi_cif *cif, void **avalue, unsigned int *regp, + char *stackp) +{ + unsigned int i; + void **p_argv; + char *argp; + unsigned int regused; + ffi_type **p_arg; + + regused = 0; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument has been passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + if (z < sizeof (int) && t != FFI_TYPE_STRUCT) + *p_argv = (void *) (argp + sizeof (int) - z); + else + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are exhausted, and about to fetch from + stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } +} + +unsigned int +ffi_closure_OBSD_inner (ffi_closure *closure, void *resp, unsigned int *regp, + char *stackp) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_args_OBSD(cif, arg_area, regp, stackp); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, void *codeloc) +{ + unsigned int *tramp = (unsigned int *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_OBSD); + + if (cif->rtype->type == FFI_TYPE_STRUCT && !cif->flags) + fn = &ffi_closure_struct_OBSD; + else + fn = &ffi_closure_OBSD; + + /* or.u %r10, %r0, %hi16(fn) */ + tramp[0] = 0x5d400000 | (((unsigned int)fn) >> 16); + /* or.u %r13, %r0, %hi16(closure) */ + tramp[1] = 0x5da00000 | ((unsigned int)closure >> 16); + /* or %r10, %r10, %lo16(fn) */ + tramp[2] = 0x594a0000 | (((unsigned int)fn) & 0xffff); + /* jmp.n %r10 */ + tramp[3] = 0xf400c40a; + /* or %r13, %r13, %lo16(closure) */ + tramp[4] = 0x59ad0000 | ((unsigned int)closure & 0xffff); + + ffi_cacheflush_OBSD((unsigned int)codeloc, FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c new file mode 100644 index 0000000..57b344f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + * + * Only OpenBSD/m88k is currently supported; other platforms (such as + * Motorola's SysV/m88k) could be supported with the following tweaks: + * + * - non-OpenBSD systems use an `outgoing parameter area' as part of the + * 88BCS calling convention, which is not supported under OpenBSD from + * release 3.6 onwards. Supporting it should be as easy as taking it + * into account when adjusting the stack, in the assembly code. + * + * - the logic deciding whether a function argument gets passed through + * registers, or on the stack, has changed several times in OpenBSD in + * edge cases (especially for structs larger than 32 bytes being passed + * by value). The code below attemps to match the logic used by the + * system compiler of OpenBSD 5.3, i.e. gcc 3.3.6 with many m88k backend + * fixes. + */ + +#include +#include + +#include +#include + +void ffi_call_OBSD (unsigned int, extended_cif *, unsigned int, void *, + void (*fn) ()); +void *ffi_prep_args (void *, extended_cif *); +void ffi_closure_OBSD (ffi_closure *); +void ffi_closure_struct_OBSD (ffi_closure *); +unsigned int ffi_closure_OBSD_inner (ffi_closure *, void *, unsigned int *, + char *); +void ffi_cacheflush_OBSD (unsigned int, unsigned int); + +#define CIF_FLAGS_INT (1 << 0) +#define CIF_FLAGS_DINT (1 << 1) + +/* + * Foreign Function Interface API + */ + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp, *stackp; + unsigned int *regp; + unsigned int regused; + ffi_type **p_arg; + void *struct_value_ptr; + + regp = (unsigned int *)stack; + stackp = (char *)(regp + 8); + regused = 0; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument can be passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + switch (t) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *(unsigned int *) argp = *(unsigned int *) *p_argv; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + + /* Align if necessary. */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are filled, and about to continue + on stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } + + return struct_value_ptr; +} + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size == sizeof (int) && + cif->rtype->alignment == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = 0; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && (cif->rtype->size != sizeof (int) + || cif->rtype->alignment != sizeof (int))) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_OBSD: + ffi_call_OBSD (cif->bytes, &ecif, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +static void +ffi_prep_closure_args_OBSD (ffi_cif *cif, void **avalue, unsigned int *regp, + char *stackp) +{ + unsigned int i; + void **p_argv; + char *argp; + unsigned int regused; + ffi_type **p_arg; + + regused = 0; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument has been passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + if (z < sizeof (int) && t != FFI_TYPE_STRUCT) + *p_argv = (void *) (argp + sizeof (int) - z); + else + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are exhausted, and about to fetch from + stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } +} + +unsigned int +ffi_closure_OBSD_inner (ffi_closure *closure, void *resp, unsigned int *regp, + char *stackp) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_args_OBSD(cif, arg_area, regp, stackp); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, void *codeloc) +{ + unsigned int *tramp = (unsigned int *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_OBSD); + + if (cif->rtype->type == FFI_TYPE_STRUCT && !cif->flags) + fn = &ffi_closure_struct_OBSD; + else + fn = &ffi_closure_OBSD; + + /* or.u %r10, %r0, %hi16(fn) */ + tramp[0] = 0x5d400000 | (((unsigned int)fn) >> 16); + /* or.u %r13, %r0, %hi16(closure) */ + tramp[1] = 0x5da00000 | ((unsigned int)closure >> 16); + /* or %r10, %r10, %lo16(fn) */ + tramp[2] = 0x594a0000 | (((unsigned int)fn) & 0xffff); + /* jmp.n %r10 */ + tramp[3] = 0xf400c40a; + /* or %r13, %r13, %lo16(closure) */ + tramp[4] = 0x59ad0000 | ((unsigned int)closure & 0xffff); + + ffi_cacheflush_OBSD((unsigned int)codeloc, FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget 2.h new file mode 100644 index 0000000..e52bf9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget 2.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OBSD, + FFI_DEFAULT_ABI = FFI_OBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 0x14 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h new file mode 100644 index 0000000..e52bf9f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OBSD, + FFI_DEFAULT_ABI = FFI_OBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 0x14 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd 2.S new file mode 100644 index 0000000..1944a23 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd 2.S @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * ffi_cacheflush_OBSD(unsigned int addr, %r2 + * unsigned int size); %r3 + */ + .align 4 + .globl ffi_cacheflush_OBSD + .type ffi_cacheflush_OBSD,@function +ffi_cacheflush_OBSD: + tb0 0, %r0, 451 + or %r0, %r0, %r0 + jmp %r1 + .size ffi_cacheflush_OBSD, . - ffi_cacheflush_OBSD + +/* + * ffi_call_OBSD(unsigned bytes, %r2 + * extended_cif *ecif, %r3 + * unsigned flags, %r4 + * void *rvalue, %r5 + * void (*fn)()); %r6 + */ + .align 4 + .globl ffi_call_OBSD + .type ffi_call_OBSD,@function +ffi_call_OBSD: + subu %r31, %r31, 32 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 32 + + | Save the few arguments we'll need after ffi_prep_args() + st.d %r4, %r31, 8 + st %r6, %r31, 16 + + | Allocate room for the image of r2-r9, and the stack space for + | the args (rounded to a 16-byte boundary) + addu %r2, %r2, (8 * 4) + 15 + clr %r2, %r2, 4<0> + subu %r31, %r31, %r2 + + | Fill register and stack image + or %r2, %r31, %r0 +#ifdef PIC + bsr ffi_prep_args#plt +#else + bsr ffi_prep_args +#endif + + | Save pointer to return struct address, if any + or %r12, %r2, %r0 + + | Get function pointer + subu %r4, %r30, 32 + ld %r1, %r4, 16 + + | Fetch the register arguments + ld.d %r2, %r31, (0 * 4) + ld.d %r4, %r31, (2 * 4) + ld.d %r6, %r31, (4 * 4) + ld.d %r8, %r31, (6 * 4) + addu %r31, %r31, (8 * 4) + + | Invoke the function + jsr %r1 + + | Restore stack now that we don't need the args anymore + subu %r31, %r30, 32 + + | Figure out what to return as the function's return value + ld %r5, %r31, 12 | rvalue + ld %r4, %r31, 8 | flags + + bcnd eq0, %r5, 9f + + bb0 0, %r4, 1f | CIF_FLAGS_INT + st %r2, %r5, 0 + br 9f + +1: + bb0 1, %r4, 1f | CIF_FLAGS_DINT + st.d %r2, %r5, 0 + br 9f + +1: +9: + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 32 + .size ffi_call_OBSD, . - ffi_call_OBSD + +/* + * ffi_closure_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_OBSD + .type ffi_closure_OBSD, @function +ffi_closure_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments and return + | value + subu %r31, %r31, (8 * 4) + (2 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + addu %r3, %r31, (8 * 4) | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + | Figure out what to return as the function's return value + bb0 0, %r2, 1f | CIF_FLAGS_INT + ld %r2, %r31, (8 * 4) + br 9f + +1: + bb0 1, %r2, 1f | CIF_FLAGS_DINT + ld.d %r2, %r31, (8 * 4) + br 9f + +1: +9: + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_OBSD,.-ffi_closure_OBSD + +/* + * ffi_closure_struct_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_struct_OBSD + .type ffi_closure_struct_OBSD, @function +ffi_closure_struct_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments + subu %r31, %r31, (8 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + or %r3, %r12, 0 | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_struct_OBSD,.-ffi_closure_struct_OBSD diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S new file mode 100644 index 0000000..1944a23 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * ffi_cacheflush_OBSD(unsigned int addr, %r2 + * unsigned int size); %r3 + */ + .align 4 + .globl ffi_cacheflush_OBSD + .type ffi_cacheflush_OBSD,@function +ffi_cacheflush_OBSD: + tb0 0, %r0, 451 + or %r0, %r0, %r0 + jmp %r1 + .size ffi_cacheflush_OBSD, . - ffi_cacheflush_OBSD + +/* + * ffi_call_OBSD(unsigned bytes, %r2 + * extended_cif *ecif, %r3 + * unsigned flags, %r4 + * void *rvalue, %r5 + * void (*fn)()); %r6 + */ + .align 4 + .globl ffi_call_OBSD + .type ffi_call_OBSD,@function +ffi_call_OBSD: + subu %r31, %r31, 32 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 32 + + | Save the few arguments we'll need after ffi_prep_args() + st.d %r4, %r31, 8 + st %r6, %r31, 16 + + | Allocate room for the image of r2-r9, and the stack space for + | the args (rounded to a 16-byte boundary) + addu %r2, %r2, (8 * 4) + 15 + clr %r2, %r2, 4<0> + subu %r31, %r31, %r2 + + | Fill register and stack image + or %r2, %r31, %r0 +#ifdef PIC + bsr ffi_prep_args#plt +#else + bsr ffi_prep_args +#endif + + | Save pointer to return struct address, if any + or %r12, %r2, %r0 + + | Get function pointer + subu %r4, %r30, 32 + ld %r1, %r4, 16 + + | Fetch the register arguments + ld.d %r2, %r31, (0 * 4) + ld.d %r4, %r31, (2 * 4) + ld.d %r6, %r31, (4 * 4) + ld.d %r8, %r31, (6 * 4) + addu %r31, %r31, (8 * 4) + + | Invoke the function + jsr %r1 + + | Restore stack now that we don't need the args anymore + subu %r31, %r30, 32 + + | Figure out what to return as the function's return value + ld %r5, %r31, 12 | rvalue + ld %r4, %r31, 8 | flags + + bcnd eq0, %r5, 9f + + bb0 0, %r4, 1f | CIF_FLAGS_INT + st %r2, %r5, 0 + br 9f + +1: + bb0 1, %r4, 1f | CIF_FLAGS_DINT + st.d %r2, %r5, 0 + br 9f + +1: +9: + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 32 + .size ffi_call_OBSD, . - ffi_call_OBSD + +/* + * ffi_closure_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_OBSD + .type ffi_closure_OBSD, @function +ffi_closure_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments and return + | value + subu %r31, %r31, (8 * 4) + (2 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + addu %r3, %r31, (8 * 4) | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + | Figure out what to return as the function's return value + bb0 0, %r2, 1f | CIF_FLAGS_INT + ld %r2, %r31, (8 * 4) + br 9f + +1: + bb0 1, %r2, 1f | CIF_FLAGS_DINT + ld.d %r2, %r31, (8 * 4) + br 9f + +1: +9: + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_OBSD,.-ffi_closure_OBSD + +/* + * ffi_closure_struct_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_struct_OBSD + .type ffi_closure_struct_OBSD, @function +ffi_closure_struct_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments + subu %r31, %r31, (8 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + or %r3, %r12, 0 | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_struct_OBSD,.-ffi_closure_struct_OBSD diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi 2.c new file mode 100644 index 0000000..3aecb0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi 2.c @@ -0,0 +1,330 @@ +/* ---------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Imagination Technologies + + Meta Foreign Function Interface + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + `Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED `AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. +----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) + +/* + * ffi_prep_args is called by the assembly routine once stack space has been + * allocated for the function's arguments + */ + +unsigned int ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + /* Store return value */ + if ( ecif->cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *(void **) argp = ecif->rvalue; + } + + p_argv = ecif->avalue; + + /* point to next location */ + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; (i != 0); i--, p_arg++, p_argv++) + { + size_t z; + + /* Move argp to address of argument */ + z = (*p_arg)->size; + argp -= z; + + /* Align if necessary */ + argp = (char *) FFI_ALIGN_DOWN(FFI_ALIGN_DOWN(argp, (*p_arg)->alignment), 4); + + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + } + } else if ( z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + } + + /* return the size of the arguments to be passed in registers, + padded to an 8 byte boundary to preserve stack alignment */ + return FFI_ALIGN(MIN(stack - argp, 6*4), 8); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type **ptr; + unsigned i, bytes = 0; + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) { + if ((*ptr)->size == 0) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN(bytes, (*ptr)->alignment); + + bytes += FFI_ALIGN((*ptr)->size, 4); + } + + /* Ensure arg space is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT) { + bytes += sizeof(void*); + + /* Ensure stack is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + } + + cif->bytes = bytes; + + /* Set the return type flag */ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) FFI_TYPE_SINT64; + break; + case FFI_TYPE_STRUCT: + /* Meta can store return values which are <= 64 bits */ + if (cif->rtype->size <= 4) + /* Returned to D0Re0 as 32-bit value */ + cif->flags = (unsigned)FFI_TYPE_INT; + else if ((cif->rtype->size > 4) && (cif->rtype->size <= 8)) + /* Returned valued is stored to D1Re0|R0Re0 */ + cif->flags = (unsigned)FFI_TYPE_DOUBLE; + else + /* value stored in memory */ + cif->flags = (unsigned)FFI_TYPE_STRUCT; + break; + default: + cif->flags = (unsigned)FFI_TYPE_INT; + break; + } + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*fn)(void), extended_cif *, unsigned, unsigned, double *); + +/* + * Exported in API. Entry point + * cif -> ffi_cif object + * fn -> function pointer + * rvalue -> pointer to return value + * avalue -> vector of void * pointers pointing to memory locations holding the + * arguments + */ +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + int small_struct = (((cif->flags == FFI_TYPE_INT) || (cif->flags == FFI_TYPE_DOUBLE)) && (cif->rtype->type == FFI_TYPE_STRUCT)); + ecif.cif = cif; + ecif.avalue = avalue; + + double temp; + + /* + * If the return value is a struct and we don't have a return value address + * then we need to make one + */ + + if ((rvalue == NULL ) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else if (small_struct) + ecif.rvalue = &temp; + else + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(fn, &ecif, cif->bytes, cif->flags, ecif.rvalue); + break; + default: + FFI_ASSERT(0); + break; + } + + if (small_struct) + memcpy (rvalue, &temp, cif->rtype->size); +} + +/* private members */ + +static void ffi_prep_incoming_args_SYSV (char *, void **, void **, + ffi_cif*, float *); + +void ffi_closure_SYSV (ffi_closure *); + +/* Do NOT change that without changing the FFI_TRAMPOLINE_SIZE */ +extern unsigned int ffi_metag_trampoline[10]; /* 10 instructions */ + +/* end of private members */ + +/* + * __tramp: trampoline memory location + * __fun: assembly routine + * __ctx: memory location for wrapper + * + * At this point, tramp[0] == __ctx ! + */ +void ffi_init_trampoline(unsigned char *__tramp, unsigned int __fun, unsigned int __ctx) { + memcpy (__tramp, ffi_metag_trampoline, sizeof(ffi_metag_trampoline)); + *(unsigned int*) &__tramp[40] = __ctx; + *(unsigned int*) &__tramp[44] = __fun; + /* This will flush the instruction cache */ + __builtin_meta2_cachewd(&__tramp[0], 1); + __builtin_meta2_cachewd(&__tramp[47], 1); +} + + + +/* the cif must already be prepared */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + void (*closure_func)(ffi_closure*) = NULL; + + if (cif->abi == FFI_SYSV) + closure_func = &ffi_closure_SYSV; + else + return FFI_BAD_ABI; + + ffi_init_trampoline( + (unsigned char*)&closure->tramp[0], + (unsigned int)closure_func, + (unsigned int)codeloc); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +/* This function is jumped to by the trampoline */ +unsigned int ffi_closure_SYSV_inner (closure, respp, args, vfp_args) + ffi_closure *closure; + void **respp; + void *args; + void *vfp_args; +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void*)); + + /* + * This call will initialize ARG_AREA, such that each + * element in that array points to the corresponding + * value on the stack; and if the function returns + * a structure, it will re-set RESP to point to the + * structure return address. + */ + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args); + + (closure->fun) ( cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif, + float *vfp_stack) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + /* stack points to original arguments */ + argp = stack; + + /* Store return value */ + if ( cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *rvalue = *(void **) argp; + } + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) { + size_t z; + size_t alignment; + + alignment = (*p_arg)->alignment; + if (alignment < 4) + alignment = 4; + if ((alignment - 1) & (unsigned)argp) + argp = (char *) FFI_ALIGN(argp, alignment); + + z = (*p_arg)->size; + *p_argv = (void*) argp; + p_argv++; + argp -= z; + } + return; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c new file mode 100644 index 0000000..3aecb0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c @@ -0,0 +1,330 @@ +/* ---------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Imagination Technologies + + Meta Foreign Function Interface + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + `Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED `AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. +----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) + +/* + * ffi_prep_args is called by the assembly routine once stack space has been + * allocated for the function's arguments + */ + +unsigned int ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + /* Store return value */ + if ( ecif->cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *(void **) argp = ecif->rvalue; + } + + p_argv = ecif->avalue; + + /* point to next location */ + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; (i != 0); i--, p_arg++, p_argv++) + { + size_t z; + + /* Move argp to address of argument */ + z = (*p_arg)->size; + argp -= z; + + /* Align if necessary */ + argp = (char *) FFI_ALIGN_DOWN(FFI_ALIGN_DOWN(argp, (*p_arg)->alignment), 4); + + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + } + } else if ( z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + } + + /* return the size of the arguments to be passed in registers, + padded to an 8 byte boundary to preserve stack alignment */ + return FFI_ALIGN(MIN(stack - argp, 6*4), 8); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type **ptr; + unsigned i, bytes = 0; + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) { + if ((*ptr)->size == 0) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN(bytes, (*ptr)->alignment); + + bytes += FFI_ALIGN((*ptr)->size, 4); + } + + /* Ensure arg space is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT) { + bytes += sizeof(void*); + + /* Ensure stack is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + } + + cif->bytes = bytes; + + /* Set the return type flag */ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) FFI_TYPE_SINT64; + break; + case FFI_TYPE_STRUCT: + /* Meta can store return values which are <= 64 bits */ + if (cif->rtype->size <= 4) + /* Returned to D0Re0 as 32-bit value */ + cif->flags = (unsigned)FFI_TYPE_INT; + else if ((cif->rtype->size > 4) && (cif->rtype->size <= 8)) + /* Returned valued is stored to D1Re0|R0Re0 */ + cif->flags = (unsigned)FFI_TYPE_DOUBLE; + else + /* value stored in memory */ + cif->flags = (unsigned)FFI_TYPE_STRUCT; + break; + default: + cif->flags = (unsigned)FFI_TYPE_INT; + break; + } + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*fn)(void), extended_cif *, unsigned, unsigned, double *); + +/* + * Exported in API. Entry point + * cif -> ffi_cif object + * fn -> function pointer + * rvalue -> pointer to return value + * avalue -> vector of void * pointers pointing to memory locations holding the + * arguments + */ +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + int small_struct = (((cif->flags == FFI_TYPE_INT) || (cif->flags == FFI_TYPE_DOUBLE)) && (cif->rtype->type == FFI_TYPE_STRUCT)); + ecif.cif = cif; + ecif.avalue = avalue; + + double temp; + + /* + * If the return value is a struct and we don't have a return value address + * then we need to make one + */ + + if ((rvalue == NULL ) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else if (small_struct) + ecif.rvalue = &temp; + else + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(fn, &ecif, cif->bytes, cif->flags, ecif.rvalue); + break; + default: + FFI_ASSERT(0); + break; + } + + if (small_struct) + memcpy (rvalue, &temp, cif->rtype->size); +} + +/* private members */ + +static void ffi_prep_incoming_args_SYSV (char *, void **, void **, + ffi_cif*, float *); + +void ffi_closure_SYSV (ffi_closure *); + +/* Do NOT change that without changing the FFI_TRAMPOLINE_SIZE */ +extern unsigned int ffi_metag_trampoline[10]; /* 10 instructions */ + +/* end of private members */ + +/* + * __tramp: trampoline memory location + * __fun: assembly routine + * __ctx: memory location for wrapper + * + * At this point, tramp[0] == __ctx ! + */ +void ffi_init_trampoline(unsigned char *__tramp, unsigned int __fun, unsigned int __ctx) { + memcpy (__tramp, ffi_metag_trampoline, sizeof(ffi_metag_trampoline)); + *(unsigned int*) &__tramp[40] = __ctx; + *(unsigned int*) &__tramp[44] = __fun; + /* This will flush the instruction cache */ + __builtin_meta2_cachewd(&__tramp[0], 1); + __builtin_meta2_cachewd(&__tramp[47], 1); +} + + + +/* the cif must already be prepared */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + void (*closure_func)(ffi_closure*) = NULL; + + if (cif->abi == FFI_SYSV) + closure_func = &ffi_closure_SYSV; + else + return FFI_BAD_ABI; + + ffi_init_trampoline( + (unsigned char*)&closure->tramp[0], + (unsigned int)closure_func, + (unsigned int)codeloc); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +/* This function is jumped to by the trampoline */ +unsigned int ffi_closure_SYSV_inner (closure, respp, args, vfp_args) + ffi_closure *closure; + void **respp; + void *args; + void *vfp_args; +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void*)); + + /* + * This call will initialize ARG_AREA, such that each + * element in that array points to the corresponding + * value on the stack; and if the function returns + * a structure, it will re-set RESP to point to the + * structure return address. + */ + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args); + + (closure->fun) ( cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif, + float *vfp_stack) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + /* stack points to original arguments */ + argp = stack; + + /* Store return value */ + if ( cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *rvalue = *(void **) argp; + } + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) { + size_t z; + size_t alignment; + + alignment = (*p_arg)->alignment; + if (alignment < 4) + alignment = 4; + if ((alignment - 1) & (unsigned)argp) + argp = (char *) FFI_ALIGN(argp, alignment); + + z = (*p_arg)->size; + *p_argv = (void*) argp; + p_argv++; + argp -= z; + } + return; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget 2.h new file mode 100644 index 0000000..7b9dbeb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget 2.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Imagination Technologies Ltd. + Target configuration macros for Meta + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_DEFAULT_ABI = FFI_SYSV, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1, +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 48 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h new file mode 100644 index 0000000..7b9dbeb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Imagination Technologies Ltd. + Target configuration macros for Meta + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_DEFAULT_ABI = FFI_SYSV, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1, +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 48 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv 2.S new file mode 100644 index 0000000..b4b2a3b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv 2.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Imagination Technologies Ltd. + + Meta Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#define ENTRY(x) .globl CNAME(x); .type CNAME(x), %function; CNAME(x): +#endif + +#ifdef __ELF__ +#define LSYM(x) .x +#else +#define LSYM(x) x +#endif + +.macro call_reg x= + .text + .balign 4 + mov D1RtP, \x + swap D1RtP, PC +.endm + +! Save register arguments +.macro SAVE_ARGS + .text + .balign 4 + setl [A0StP++], D0Ar6, D1Ar5 + setl [A0StP++], D0Ar4, D1Ar3 + setl [A0StP++], D0Ar2, D1Ar1 +.endm + +! Save retrun, frame pointer and other regs +.macro SAVE_REGS regs= + .text + .balign 4 + setl [A0StP++], D0FrT, D1RtP + ! Needs to be a pair of regs + .ifnc "\regs","" + setl [A0StP++], \regs + .endif +.endm + +! Declare a global function +.macro METAG_FUNC_START name + .text + .balign 4 + ENTRY(\name) +.endm + +! Return registers from the stack. Reverse SAVE_REGS operation +.macro RET_REGS regs=, cond= + .ifnc "\regs", "" + getl \regs, [--A0StP] + .endif + getl D0FrT, D1RtP, [--A0StP] +.endm + +! Return arguments +.macro RET_ARGS + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] +.endm + + + ! D1Ar1: fn + ! D0Ar2: &ecif + ! D1Ar3: cif->bytes + ! D0Ar4: fig->flags + ! D1Ar5: ecif.rvalue + + ! This assumes we are using GNU as +METAG_FUNC_START ffi_call_SYSV + ! Save argument registers + + SAVE_ARGS + + ! new frame + mov D0FrT, A0FrP + add A0FrP, A0StP, #0 + + ! Preserve the old frame pointer + SAVE_REGS "D1.5, D0.5" + + ! Make room for new args. cifs->bytes is the total space for input + ! and return arguments + + add A0StP, A0StP, D1Ar3 + + ! Preserve cifs->bytes & fn + mov D0.5, D1Ar3 + mov D1.5, D1Ar1 + + ! Place all of the ffi_prep_args in position + mov D1Ar1, A0StP + + ! Call ffi_prep_args(stack, &ecif) +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_prep_args@PLT) +#else + callr D1RtP, CNAME(ffi_prep_args) +#endif + + ! Restore fn pointer + + ! The foreign stack should look like this + ! XXXXX XXXXXX <--- stack pointer + ! FnArgN rvalue + ! FnArgN+2 FnArgN+1 + ! FnArgN+4 FnArgN+3 + ! .... + ! + + ! A0StP now points to the first (or return) argument + 4 + + ! Preserve cif->bytes + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] + + ! Place A0StP to the first argument again + add A0StP, A0StP, #24 ! That's because we loaded 6 regs x 4 byte each + + ! A0FrP points to the initial stack without the reserved space for the + ! cifs->bytes, whilst A0StP points to the stack after the space allocation + + ! fn was the first argument of ffi_call_SYSV. + ! The stack at this point looks like this: + ! + ! A0StP(on entry to _SYSV) -> Arg6 Arg5 | low + ! Arg4 Arg3 | + ! Arg2 Arg1 | + ! A0FrP ----> D0FrtP D1RtP | + ! D1.5 D0.5 | + ! A0StP(bf prep_args) -> FnArgn FnArgn-1 | + ! FnArgn-2FnArgn-3 | + ! ................ | <= cifs->bytes + ! FnArg4 FnArg3 | + ! A0StP (prv_A0StP+cifs->bytes) FnArg2 FnArg1 | high + ! + ! fn was in Arg1 so it's located in in A0FrP+#-0xC + ! + + ! D0Re0 contains the size of arguments stored in registers + sub A0StP, A0StP, D0Re0 + + ! Arg1 is the function pointer for the foreign call. This has been + ! preserved in D1.5 + + ! Time to call (fn). Arguments should be like this: + ! Arg1-Arg6 are loaded to regs + ! The rest of the arguments are stored in stack pointed by A0StP + + call_reg D1.5 + + ! Reset stack. + + mov A0StP, A0FrP + + ! Load Arg1 with the pointer to storage for the return type + ! This was stored in Arg5 + + getd D1Ar1, [A0FrP+#-20] + + ! Load D0Ar2 with the return type code. This was stored in Arg4 (flags) + + getd D0Ar2, [A0FrP+#-16] + + ! We are ready to start processing the return value + ! D0Re0 (and D1Re0) hold the return value + + ! If the return value is NULL, assume no return value + cmp D1Ar1, #0 + beq LSYM(Lepilogue) + + ! return INT + cmp D0Ar2, #FFI_TYPE_INT + ! Sadly, there is no setd{cc} instruction so we need to workaround that + bne .INT64 + setd [D1Ar1], D0Re0 + b LSYM(Lepilogue) + + ! return INT64 +.INT64: + cmp D0Ar2, #FFI_TYPE_SINT64 + setleq [D1Ar1], D0Re0, D1Re0 + + ! return DOUBLE + cmp D0Ar2, #FFI_TYPE_DOUBLE + setl [D1AR1++], D0Re0, D1Re0 + +LSYM(Lepilogue): + ! At this point, the stack pointer points right after the argument + ! saved area. We need to restore 4 regs, therefore we need to move + ! 16 bytes ahead. + add A0StP, A0StP, #16 + RET_REGS "D1.5, D0.5" + RET_ARGS + getd D0Re0, [A0StP] + mov A0FrP, D0FrT + swap D1RtP, PC + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + +/* + (called by ffi_metag_trampoline) + void ffi_closure_SYSV (ffi_closure*) + + (called by ffi_closure_SYSV) + unsigned int FFI_HIDDEN + ffi_closure_SYSV_inner (closure,respp, args) + ffi_closure *closure; + void **respp; + void *args; +*/ + +METAG_FUNC_START ffi_closure_SYSV + ! We assume that D1Ar1 holds the address of the + ! ffi_closure struct. We will use that to fetch the + ! arguments. The stack pointer points to an empty space + ! and it is ready to store more data. + + ! D1Ar1 is ready + ! Allocate stack space for return value + add A0StP, A0StP, #8 + ! Store it to D0Ar2 + sub D0Ar2, A0StP, #8 + + sub D1Ar3, A0FrP, #4 + + ! D1Ar3 contains the address of the original D1Ar1 argument + ! We need to subtract #4 later on + + ! Preverve D0Ar2 + mov D0.5, D0Ar2 + +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_closure_SYSV_inner@PLT) +#else + callr D1RtP, CNAME(ffi_closure_SYSV_inner) +#endif + + ! Check the return value and store it to D0.5 + cmp D0Re0, #FFI_TYPE_INT + beq .Lretint + cmp D0Re0, #FFI_TYPE_DOUBLE + beq .Lretdouble +.Lclosure_epilogue: + sub A0StP, A0StP, #8 + RET_REGS "D1.5, D0.5" + RET_ARGS + swap D1RtP, PC + +.Lretint: + setd [D0.5], D0Re0 + b .Lclosure_epilogue +.Lretdouble: + setl [D0.5++], D0Re0, D1Re0 + b .Lclosure_epilogue +.ffi_closure_SYSV_end: +.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + + +ENTRY(ffi_metag_trampoline) + SAVE_ARGS + ! New frame + mov A0FrP, A0StP + SAVE_REGS "D1.5, D0.5" + mov D0.5, PC + ! Load D1Ar1 the value of ffi_metag_trampoline + getd D1Ar1, [D0.5 + #8] + ! Jump to ffi_closure_SYSV + getd PC, [D0.5 + #12] diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S new file mode 100644 index 0000000..b4b2a3b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Imagination Technologies Ltd. + + Meta Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#define ENTRY(x) .globl CNAME(x); .type CNAME(x), %function; CNAME(x): +#endif + +#ifdef __ELF__ +#define LSYM(x) .x +#else +#define LSYM(x) x +#endif + +.macro call_reg x= + .text + .balign 4 + mov D1RtP, \x + swap D1RtP, PC +.endm + +! Save register arguments +.macro SAVE_ARGS + .text + .balign 4 + setl [A0StP++], D0Ar6, D1Ar5 + setl [A0StP++], D0Ar4, D1Ar3 + setl [A0StP++], D0Ar2, D1Ar1 +.endm + +! Save retrun, frame pointer and other regs +.macro SAVE_REGS regs= + .text + .balign 4 + setl [A0StP++], D0FrT, D1RtP + ! Needs to be a pair of regs + .ifnc "\regs","" + setl [A0StP++], \regs + .endif +.endm + +! Declare a global function +.macro METAG_FUNC_START name + .text + .balign 4 + ENTRY(\name) +.endm + +! Return registers from the stack. Reverse SAVE_REGS operation +.macro RET_REGS regs=, cond= + .ifnc "\regs", "" + getl \regs, [--A0StP] + .endif + getl D0FrT, D1RtP, [--A0StP] +.endm + +! Return arguments +.macro RET_ARGS + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] +.endm + + + ! D1Ar1: fn + ! D0Ar2: &ecif + ! D1Ar3: cif->bytes + ! D0Ar4: fig->flags + ! D1Ar5: ecif.rvalue + + ! This assumes we are using GNU as +METAG_FUNC_START ffi_call_SYSV + ! Save argument registers + + SAVE_ARGS + + ! new frame + mov D0FrT, A0FrP + add A0FrP, A0StP, #0 + + ! Preserve the old frame pointer + SAVE_REGS "D1.5, D0.5" + + ! Make room for new args. cifs->bytes is the total space for input + ! and return arguments + + add A0StP, A0StP, D1Ar3 + + ! Preserve cifs->bytes & fn + mov D0.5, D1Ar3 + mov D1.5, D1Ar1 + + ! Place all of the ffi_prep_args in position + mov D1Ar1, A0StP + + ! Call ffi_prep_args(stack, &ecif) +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_prep_args@PLT) +#else + callr D1RtP, CNAME(ffi_prep_args) +#endif + + ! Restore fn pointer + + ! The foreign stack should look like this + ! XXXXX XXXXXX <--- stack pointer + ! FnArgN rvalue + ! FnArgN+2 FnArgN+1 + ! FnArgN+4 FnArgN+3 + ! .... + ! + + ! A0StP now points to the first (or return) argument + 4 + + ! Preserve cif->bytes + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] + + ! Place A0StP to the first argument again + add A0StP, A0StP, #24 ! That's because we loaded 6 regs x 4 byte each + + ! A0FrP points to the initial stack without the reserved space for the + ! cifs->bytes, whilst A0StP points to the stack after the space allocation + + ! fn was the first argument of ffi_call_SYSV. + ! The stack at this point looks like this: + ! + ! A0StP(on entry to _SYSV) -> Arg6 Arg5 | low + ! Arg4 Arg3 | + ! Arg2 Arg1 | + ! A0FrP ----> D0FrtP D1RtP | + ! D1.5 D0.5 | + ! A0StP(bf prep_args) -> FnArgn FnArgn-1 | + ! FnArgn-2FnArgn-3 | + ! ................ | <= cifs->bytes + ! FnArg4 FnArg3 | + ! A0StP (prv_A0StP+cifs->bytes) FnArg2 FnArg1 | high + ! + ! fn was in Arg1 so it's located in in A0FrP+#-0xC + ! + + ! D0Re0 contains the size of arguments stored in registers + sub A0StP, A0StP, D0Re0 + + ! Arg1 is the function pointer for the foreign call. This has been + ! preserved in D1.5 + + ! Time to call (fn). Arguments should be like this: + ! Arg1-Arg6 are loaded to regs + ! The rest of the arguments are stored in stack pointed by A0StP + + call_reg D1.5 + + ! Reset stack. + + mov A0StP, A0FrP + + ! Load Arg1 with the pointer to storage for the return type + ! This was stored in Arg5 + + getd D1Ar1, [A0FrP+#-20] + + ! Load D0Ar2 with the return type code. This was stored in Arg4 (flags) + + getd D0Ar2, [A0FrP+#-16] + + ! We are ready to start processing the return value + ! D0Re0 (and D1Re0) hold the return value + + ! If the return value is NULL, assume no return value + cmp D1Ar1, #0 + beq LSYM(Lepilogue) + + ! return INT + cmp D0Ar2, #FFI_TYPE_INT + ! Sadly, there is no setd{cc} instruction so we need to workaround that + bne .INT64 + setd [D1Ar1], D0Re0 + b LSYM(Lepilogue) + + ! return INT64 +.INT64: + cmp D0Ar2, #FFI_TYPE_SINT64 + setleq [D1Ar1], D0Re0, D1Re0 + + ! return DOUBLE + cmp D0Ar2, #FFI_TYPE_DOUBLE + setl [D1AR1++], D0Re0, D1Re0 + +LSYM(Lepilogue): + ! At this point, the stack pointer points right after the argument + ! saved area. We need to restore 4 regs, therefore we need to move + ! 16 bytes ahead. + add A0StP, A0StP, #16 + RET_REGS "D1.5, D0.5" + RET_ARGS + getd D0Re0, [A0StP] + mov A0FrP, D0FrT + swap D1RtP, PC + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + +/* + (called by ffi_metag_trampoline) + void ffi_closure_SYSV (ffi_closure*) + + (called by ffi_closure_SYSV) + unsigned int FFI_HIDDEN + ffi_closure_SYSV_inner (closure,respp, args) + ffi_closure *closure; + void **respp; + void *args; +*/ + +METAG_FUNC_START ffi_closure_SYSV + ! We assume that D1Ar1 holds the address of the + ! ffi_closure struct. We will use that to fetch the + ! arguments. The stack pointer points to an empty space + ! and it is ready to store more data. + + ! D1Ar1 is ready + ! Allocate stack space for return value + add A0StP, A0StP, #8 + ! Store it to D0Ar2 + sub D0Ar2, A0StP, #8 + + sub D1Ar3, A0FrP, #4 + + ! D1Ar3 contains the address of the original D1Ar1 argument + ! We need to subtract #4 later on + + ! Preverve D0Ar2 + mov D0.5, D0Ar2 + +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_closure_SYSV_inner@PLT) +#else + callr D1RtP, CNAME(ffi_closure_SYSV_inner) +#endif + + ! Check the return value and store it to D0.5 + cmp D0Re0, #FFI_TYPE_INT + beq .Lretint + cmp D0Re0, #FFI_TYPE_DOUBLE + beq .Lretdouble +.Lclosure_epilogue: + sub A0StP, A0StP, #8 + RET_REGS "D1.5, D0.5" + RET_ARGS + swap D1RtP, PC + +.Lretint: + setd [D0.5], D0Re0 + b .Lclosure_epilogue +.Lretdouble: + setl [D0.5++], D0Re0, D1Re0 + b .Lclosure_epilogue +.ffi_closure_SYSV_end: +.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + + +ENTRY(ffi_metag_trampoline) + SAVE_ARGS + ! New frame + mov A0FrP, A0StP + SAVE_REGS "D1.5, D0.5" + mov D0.5, PC + ! Load D1Ar1 the value of ffi_metag_trampoline + getd D1Ar1, [D0.5 + #8] + ! Jump to ffi_closure_SYSV + getd PC, [D0.5 + #12] diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi 2.c new file mode 100644 index 0000000..df6e33c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi 2.c @@ -0,0 +1,321 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +extern void ffi_call_SYSV(void (*)(void*, extended_cif*), extended_cif*, + unsigned int, unsigned int, unsigned int*, void (*fn)(void), + unsigned int, unsigned int); + +extern void ffi_closure_SYSV(void); + +#define WORD_SIZE sizeof(unsigned int) +#define ARGS_REGISTER_SIZE (WORD_SIZE * 6) +#define WORD_FFI_ALIGN(x) FFI_ALIGN(x, WORD_SIZE) + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ +void ffi_prep_args(void* stack, extended_cif* ecif) +{ + unsigned int i; + ffi_type** p_arg; + void** p_argv; + void* stack_args_p = stack; + + if (ecif == NULL || ecif->cif == NULL) { + return; /* no description to prepare */ + } + + p_argv = ecif->avalue; + + if ((ecif->cif->rtype != NULL) && + (ecif->cif->rtype->type == FFI_TYPE_STRUCT)) + { + /* if return type is a struct which is referenced on the stack/reg5, + * by a pointer. Stored the return value pointer in r5. + */ + char* addr = stack_args_p; + memcpy(addr, &(ecif->rvalue), WORD_SIZE); + stack_args_p += WORD_SIZE; + } + + if (ecif->avalue == NULL) { + return; /* no arguments to prepare */ + } + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t size = (*p_arg)->size; + int type = (*p_arg)->type; + void* value = p_argv[i]; + char* addr = stack_args_p; + int aligned_size = WORD_FFI_ALIGN(size); + + /* force word alignment on the stack */ + stack_args_p += aligned_size; + + switch (type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8*)(value); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8*)(value); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16*)(value); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16*)(value); + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * MicroBlaze toolchain appears to emit: + * bsrli r5, r5, 8 (caller) + * ... + * + * ... + * bslli r5, r5, 8 (callee) + * + * For structs like "struct a { uint8_t a[3]; };", when passed + * by value. + * + * Structs like "struct b { uint16_t a; };" are also expected + * to be packed strangely in registers. + * + * This appears to be because the microblaze toolchain expects + * "struct b == uint16_t", which is only any issue for big + * endian. + * + * The following is a work around for big-endian only, for the + * above mentioned case, it will re-align the contents of a + * <= 3-byte struct value. + */ + if (size < WORD_SIZE) + { + memcpy (addr + (WORD_SIZE - size), value, size); + break; + } +#endif + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + default: + memcpy(addr, value, aligned_size); + } + } +} + +ffi_status ffi_prep_cif_machdep(ffi_cif* cif) +{ + /* check ABI */ + switch (cif->abi) + { + case FFI_SYSV: + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} + +void ffi_call(ffi_cif* cif, void (*fn)(void), void* rvalue, void** avalue) +{ + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ecif.rvalue = alloca(cif->rtype->size); + } else { + ecif.rvalue = rvalue; + } + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, + ecif.rvalue, fn, cif->rtype->type, cif->rtype->size); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_call_SYSV(void* register_args, void* stack_args, + ffi_closure* closure, void* rvalue, + unsigned int* rtype, unsigned int* rsize) +{ + /* prepare arguments for closure call */ + ffi_cif* cif = closure->cif; + ffi_type** arg_types = cif->arg_types; + + /* re-allocate data for the args. This needs to be done in order to keep + * multi-word objects (e.g. structs) in contiguous memory. Callers are not + * required to store the value of args in the lower 6 words in the stack + * (although they are allocated in the stack). + */ + char* stackclone = alloca(cif->bytes); + void** avalue = alloca(cif->nargs * sizeof(void*)); + void* struct_rvalue = NULL; + char* ptr = stackclone; + int i; + + /* copy registers into stack clone */ + int registers_used = cif->bytes; + if (registers_used > ARGS_REGISTER_SIZE) { + registers_used = ARGS_REGISTER_SIZE; + } + memcpy(stackclone, register_args, registers_used); + + /* copy stack allocated args into stack clone */ + if (cif->bytes > ARGS_REGISTER_SIZE) { + int stack_used = cif->bytes - ARGS_REGISTER_SIZE; + memcpy(stackclone + ARGS_REGISTER_SIZE, stack_args, stack_used); + } + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + struct_rvalue = *((void**)ptr); + ptr += WORD_SIZE; + } + + /* populate arg pointer list */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 3; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 2; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * Work around strange ABI behaviour. + * (see info in ffi_prep_args) + */ + if (arg_types[i]->size < WORD_SIZE) + { + memcpy (ptr, ptr + (WORD_SIZE - arg_types[i]->size), arg_types[i]->size); + } +#endif + avalue[i] = (void*)ptr; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + avalue[i] = ptr; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + default: + /* default 4-byte argument */ + avalue[i] = ptr; + break; + } + ptr += WORD_FFI_ALIGN(arg_types[i]->size); + } + + /* set the return type info passed back to the wrapper */ + *rsize = cif->rtype->size; + *rtype = cif->rtype->type; + if (struct_rvalue != NULL) { + closure->fun(cif, struct_rvalue, avalue, closure->user_data); + /* copy struct return pointer value into function return value */ + *((void**)rvalue) = struct_rvalue; + } else { + closure->fun(cif, rvalue, avalue, closure->user_data); + } +} + +ffi_status ffi_prep_closure_loc( + ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void* user_data, void* codeloc) +{ + unsigned long* tramp = (unsigned long*)&(closure->tramp[0]); + unsigned long cls = (unsigned long)codeloc; + unsigned long fn = 0; + unsigned long fn_closure_call_sysv = (unsigned long)ffi_closure_call_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + switch (cif->abi) + { + case FFI_SYSV: + fn = (unsigned long)ffi_closure_SYSV; + + /* load r11 (temp) with fn */ + /* imm fn(upper) */ + tramp[0] = 0xb0000000 | ((fn >> 16) & 0xffff); + /* addik r11, r0, fn(lower) */ + tramp[1] = 0x31600000 | (fn & 0xffff); + + /* load r12 (temp) with cls */ + /* imm cls(upper) */ + tramp[2] = 0xb0000000 | ((cls >> 16) & 0xffff); + /* addik r12, r0, cls(lower) */ + tramp[3] = 0x31800000 | (cls & 0xffff); + + /* load r3 (temp) with ffi_closure_call_SYSV */ + /* imm fn_closure_call_sysv(upper) */ + tramp[4] = 0xb0000000 | ((fn_closure_call_sysv >> 16) & 0xffff); + /* addik r3, r0, fn_closure_call_sysv(lower) */ + tramp[5] = 0x30600000 | (fn_closure_call_sysv & 0xffff); + /* branch/jump to address stored in r11 (fn) */ + tramp[6] = 0x98085800; /* bra r11 */ + + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c new file mode 100644 index 0000000..df6e33c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c @@ -0,0 +1,321 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +extern void ffi_call_SYSV(void (*)(void*, extended_cif*), extended_cif*, + unsigned int, unsigned int, unsigned int*, void (*fn)(void), + unsigned int, unsigned int); + +extern void ffi_closure_SYSV(void); + +#define WORD_SIZE sizeof(unsigned int) +#define ARGS_REGISTER_SIZE (WORD_SIZE * 6) +#define WORD_FFI_ALIGN(x) FFI_ALIGN(x, WORD_SIZE) + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ +void ffi_prep_args(void* stack, extended_cif* ecif) +{ + unsigned int i; + ffi_type** p_arg; + void** p_argv; + void* stack_args_p = stack; + + if (ecif == NULL || ecif->cif == NULL) { + return; /* no description to prepare */ + } + + p_argv = ecif->avalue; + + if ((ecif->cif->rtype != NULL) && + (ecif->cif->rtype->type == FFI_TYPE_STRUCT)) + { + /* if return type is a struct which is referenced on the stack/reg5, + * by a pointer. Stored the return value pointer in r5. + */ + char* addr = stack_args_p; + memcpy(addr, &(ecif->rvalue), WORD_SIZE); + stack_args_p += WORD_SIZE; + } + + if (ecif->avalue == NULL) { + return; /* no arguments to prepare */ + } + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t size = (*p_arg)->size; + int type = (*p_arg)->type; + void* value = p_argv[i]; + char* addr = stack_args_p; + int aligned_size = WORD_FFI_ALIGN(size); + + /* force word alignment on the stack */ + stack_args_p += aligned_size; + + switch (type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8*)(value); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8*)(value); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16*)(value); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16*)(value); + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * MicroBlaze toolchain appears to emit: + * bsrli r5, r5, 8 (caller) + * ... + * + * ... + * bslli r5, r5, 8 (callee) + * + * For structs like "struct a { uint8_t a[3]; };", when passed + * by value. + * + * Structs like "struct b { uint16_t a; };" are also expected + * to be packed strangely in registers. + * + * This appears to be because the microblaze toolchain expects + * "struct b == uint16_t", which is only any issue for big + * endian. + * + * The following is a work around for big-endian only, for the + * above mentioned case, it will re-align the contents of a + * <= 3-byte struct value. + */ + if (size < WORD_SIZE) + { + memcpy (addr + (WORD_SIZE - size), value, size); + break; + } +#endif + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + default: + memcpy(addr, value, aligned_size); + } + } +} + +ffi_status ffi_prep_cif_machdep(ffi_cif* cif) +{ + /* check ABI */ + switch (cif->abi) + { + case FFI_SYSV: + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} + +void ffi_call(ffi_cif* cif, void (*fn)(void), void* rvalue, void** avalue) +{ + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ecif.rvalue = alloca(cif->rtype->size); + } else { + ecif.rvalue = rvalue; + } + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, + ecif.rvalue, fn, cif->rtype->type, cif->rtype->size); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_call_SYSV(void* register_args, void* stack_args, + ffi_closure* closure, void* rvalue, + unsigned int* rtype, unsigned int* rsize) +{ + /* prepare arguments for closure call */ + ffi_cif* cif = closure->cif; + ffi_type** arg_types = cif->arg_types; + + /* re-allocate data for the args. This needs to be done in order to keep + * multi-word objects (e.g. structs) in contiguous memory. Callers are not + * required to store the value of args in the lower 6 words in the stack + * (although they are allocated in the stack). + */ + char* stackclone = alloca(cif->bytes); + void** avalue = alloca(cif->nargs * sizeof(void*)); + void* struct_rvalue = NULL; + char* ptr = stackclone; + int i; + + /* copy registers into stack clone */ + int registers_used = cif->bytes; + if (registers_used > ARGS_REGISTER_SIZE) { + registers_used = ARGS_REGISTER_SIZE; + } + memcpy(stackclone, register_args, registers_used); + + /* copy stack allocated args into stack clone */ + if (cif->bytes > ARGS_REGISTER_SIZE) { + int stack_used = cif->bytes - ARGS_REGISTER_SIZE; + memcpy(stackclone + ARGS_REGISTER_SIZE, stack_args, stack_used); + } + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + struct_rvalue = *((void**)ptr); + ptr += WORD_SIZE; + } + + /* populate arg pointer list */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 3; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 2; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * Work around strange ABI behaviour. + * (see info in ffi_prep_args) + */ + if (arg_types[i]->size < WORD_SIZE) + { + memcpy (ptr, ptr + (WORD_SIZE - arg_types[i]->size), arg_types[i]->size); + } +#endif + avalue[i] = (void*)ptr; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + avalue[i] = ptr; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + default: + /* default 4-byte argument */ + avalue[i] = ptr; + break; + } + ptr += WORD_FFI_ALIGN(arg_types[i]->size); + } + + /* set the return type info passed back to the wrapper */ + *rsize = cif->rtype->size; + *rtype = cif->rtype->type; + if (struct_rvalue != NULL) { + closure->fun(cif, struct_rvalue, avalue, closure->user_data); + /* copy struct return pointer value into function return value */ + *((void**)rvalue) = struct_rvalue; + } else { + closure->fun(cif, rvalue, avalue, closure->user_data); + } +} + +ffi_status ffi_prep_closure_loc( + ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void* user_data, void* codeloc) +{ + unsigned long* tramp = (unsigned long*)&(closure->tramp[0]); + unsigned long cls = (unsigned long)codeloc; + unsigned long fn = 0; + unsigned long fn_closure_call_sysv = (unsigned long)ffi_closure_call_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + switch (cif->abi) + { + case FFI_SYSV: + fn = (unsigned long)ffi_closure_SYSV; + + /* load r11 (temp) with fn */ + /* imm fn(upper) */ + tramp[0] = 0xb0000000 | ((fn >> 16) & 0xffff); + /* addik r11, r0, fn(lower) */ + tramp[1] = 0x31600000 | (fn & 0xffff); + + /* load r12 (temp) with cls */ + /* imm cls(upper) */ + tramp[2] = 0xb0000000 | ((cls >> 16) & 0xffff); + /* addik r12, r0, cls(lower) */ + tramp[3] = 0x31800000 | (cls & 0xffff); + + /* load r3 (temp) with ffi_closure_call_SYSV */ + /* imm fn_closure_call_sysv(upper) */ + tramp[4] = 0xb0000000 | ((fn_closure_call_sysv >> 16) & 0xffff); + /* addik r3, r0, fn_closure_call_sysv(lower) */ + tramp[5] = 0x30600000 | (fn_closure_call_sysv & 0xffff); + /* branch/jump to address stored in r11 (fn) */ + tramp[6] = 0x98085800; /* bra r11 */ + + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget 2.h new file mode 100644 index 0000000..c6fa5a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget 2.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012, 2013 Xilinx, Inc + + Target configuration macros for MicroBlaze. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#define FFI_TRAMPOLINE_SIZE (4*8) + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h new file mode 100644 index 0000000..c6fa5a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012, 2013 Xilinx, Inc + + Target configuration macros for MicroBlaze. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#define FFI_TRAMPOLINE_SIZE (4*8) + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv 2.S new file mode 100644 index 0000000..ea43e9d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv 2.S @@ -0,0 +1,302 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* + * arg[0] (r5) = ffi_prep_args, + * arg[1] (r6) = &ecif, + * arg[2] (r7) = cif->bytes, + * arg[3] (r8) = cif->flags, + * arg[4] (r9) = ecif.rvalue, + * arg[5] (r10) = fn + * arg[6] (sp[0]) = cif->rtype->type + * arg[7] (sp[4]) = cif->rtype->size + */ + .text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + /* push callee saves */ + addik r1, r1, -20 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + swi r22, r1, 12 /* save for locals */ + swi r23, r1, 16 /* save for locals */ + + /* save the r5-r10 registers in the stack */ + addik r1, r1, -24 /* increment sp to store 6x 32-bit words */ + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* save function pointer */ + addik r3, r5, 0 /* copy ffi_prep_args into r3 */ + addik r22, r1, 0 /* save sp for unallocated args into r22 (callee-saved) */ + addik r23, r10, 0 /* save function address into r23 (callee-saved) */ + + /* prepare stack with allocation for n (bytes = r7) args */ + rsub r1, r7, r1 /* subtract bytes from sp */ + + /* prep args for ffi_prep_args call */ + addik r5, r1, 0 /* store stack pointer into arg[0] */ + /* r6 still holds ecif for arg[1] */ + + /* Call ffi_prep_args(stack, &ecif). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + /* returns calling stack pointer location */ + + /* prepare args for fn call, prep_args populates them onto the stack */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + + /* call (fn) (...). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r23 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + + /* Remove the space we pushed for the args. */ + addik r1, r22, 0 /* restore old SP */ + + /* restore this functions parameters */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + addik r1, r1, 24 /* decrement sp to de-allocate 6x 32-bit words */ + + /* If the return value pointer is NULL, assume no return value. */ + beqi r9, ffi_call_SYSV_end + + lwi r22, r1, 48 /* get return type (20 for locals + 28 for arg[6]) */ + lwi r23, r1, 52 /* get return size (20 for locals + 32 for arg[7]) */ + + /* Check if return type is actually a struct, do nothing */ + rsubi r11, r22, FFI_TYPE_STRUCT + beqi r11, ffi_call_SYSV_end + + /* Return 8bit */ + rsubi r11, r23, 1 + beqi r11, ffi_call_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r23, 2 + beqi r11, ffi_call_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r23, 4 + beqi r11, ffi_call_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r23, 8 + beqi r11, ffi_call_SYSV_store64 + + /* Didn't match anything */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store64: + swi r3, r9, 0 /* store word r3 into return value */ + swi r4, r9, 4 /* store word r4 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store32: + swi r3, r9, 0 /* store word r3 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store16: +#ifdef __BIG_ENDIAN__ + shi r3, r9, 2 /* store half-word r3 into return value */ +#else + shi r3, r9, 0 /* store half-word r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_store8: +#ifdef __BIG_ENDIAN__ + sbi r3, r9, 3 /* store byte r3 into return value */ +#else + sbi r3, r9, 0 /* store byte r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_end: + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + lwi r22, r1, 12 + lwi r23, r1, 16 + addik r1, r1, 20 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_call_SYSV, . - ffi_call_SYSV + +/* ------------------------------------------------------------------------- */ + + /* + * args passed into this function, are passed down to the callee. + * this function is the target of the closure trampoline, as such r12 is + * a pointer to the closure object. + */ + .text + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + /* push callee saves */ + addik r11, r1, 28 /* save stack args start location (excluding regs/link) */ + addik r1, r1, -12 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + + /* store register args on stack */ + addik r1, r1, -24 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* setup args */ + addik r5, r1, 0 /* register_args */ + addik r6, r11, 0 /* stack_args */ + addik r7, r12, 0 /* closure object */ + addik r1, r1, -8 /* allocate return value */ + addik r8, r1, 0 /* void* rvalue */ + addik r1, r1, -8 /* allocate for return type/size values */ + addik r9, r1, 0 /* void* rtype */ + addik r10, r1, 4 /* void* rsize */ + + /* call the wrap_call function */ + addik r1, r1, -28 /* allocate args + link reg */ + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 28 /* restore the link register from the frame */ + +ffi_closure_SYSV_prepare_return: + lwi r9, r1, 0 /* rtype */ + lwi r10, r1, 4 /* rsize */ + addik r1, r1, 8 /* de-allocate return info values */ + + /* Check if return type is actually a struct, store 4 bytes */ + rsubi r11, r9, FFI_TYPE_STRUCT + beqi r11, ffi_closure_SYSV_store32 + + /* Return 8bit */ + rsubi r11, r10, 1 + beqi r11, ffi_closure_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r10, 2 + beqi r11, ffi_closure_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r10, 4 + beqi r11, ffi_closure_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r10, 8 + beqi r11, ffi_closure_SYSV_store64 + + /* Didn't match anything */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store64: + lwi r3, r1, 0 /* store word r3 into return value */ + lwi r4, r1, 4 /* store word r4 into return value */ + /* 64 bits == 2 words, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store32: + lwi r3, r1, 0 /* store word r3 into return value */ + /* 32 bits == 1 word, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store16: +#ifdef __BIG_ENDIAN__ + lhui r3, r1, 2 /* store half-word r3 into return value */ +#else + lhui r3, r1, 0 /* store half-word r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT16 + bnei r11, ffi_closure_SYSV_end + sext16 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store8: +#ifdef __BIG_ENDIAN__ + lbui r3, r1, 3 /* store byte r3 into return value */ +#else + lbui r3, r1, 0 /* store byte r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT8 + bnei r11, ffi_closure_SYSV_end + sext8 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_end: + addik r1, r1, 8 /* de-allocate return value */ + + /* de-allocate stored args */ + addik r1, r1, 24 + + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + addik r1, r1, 12 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_closure_SYSV, . - ffi_closure_SYSV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S new file mode 100644 index 0000000..ea43e9d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S @@ -0,0 +1,302 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* + * arg[0] (r5) = ffi_prep_args, + * arg[1] (r6) = &ecif, + * arg[2] (r7) = cif->bytes, + * arg[3] (r8) = cif->flags, + * arg[4] (r9) = ecif.rvalue, + * arg[5] (r10) = fn + * arg[6] (sp[0]) = cif->rtype->type + * arg[7] (sp[4]) = cif->rtype->size + */ + .text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + /* push callee saves */ + addik r1, r1, -20 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + swi r22, r1, 12 /* save for locals */ + swi r23, r1, 16 /* save for locals */ + + /* save the r5-r10 registers in the stack */ + addik r1, r1, -24 /* increment sp to store 6x 32-bit words */ + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* save function pointer */ + addik r3, r5, 0 /* copy ffi_prep_args into r3 */ + addik r22, r1, 0 /* save sp for unallocated args into r22 (callee-saved) */ + addik r23, r10, 0 /* save function address into r23 (callee-saved) */ + + /* prepare stack with allocation for n (bytes = r7) args */ + rsub r1, r7, r1 /* subtract bytes from sp */ + + /* prep args for ffi_prep_args call */ + addik r5, r1, 0 /* store stack pointer into arg[0] */ + /* r6 still holds ecif for arg[1] */ + + /* Call ffi_prep_args(stack, &ecif). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + /* returns calling stack pointer location */ + + /* prepare args for fn call, prep_args populates them onto the stack */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + + /* call (fn) (...). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r23 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + + /* Remove the space we pushed for the args. */ + addik r1, r22, 0 /* restore old SP */ + + /* restore this functions parameters */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + addik r1, r1, 24 /* decrement sp to de-allocate 6x 32-bit words */ + + /* If the return value pointer is NULL, assume no return value. */ + beqi r9, ffi_call_SYSV_end + + lwi r22, r1, 48 /* get return type (20 for locals + 28 for arg[6]) */ + lwi r23, r1, 52 /* get return size (20 for locals + 32 for arg[7]) */ + + /* Check if return type is actually a struct, do nothing */ + rsubi r11, r22, FFI_TYPE_STRUCT + beqi r11, ffi_call_SYSV_end + + /* Return 8bit */ + rsubi r11, r23, 1 + beqi r11, ffi_call_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r23, 2 + beqi r11, ffi_call_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r23, 4 + beqi r11, ffi_call_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r23, 8 + beqi r11, ffi_call_SYSV_store64 + + /* Didn't match anything */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store64: + swi r3, r9, 0 /* store word r3 into return value */ + swi r4, r9, 4 /* store word r4 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store32: + swi r3, r9, 0 /* store word r3 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store16: +#ifdef __BIG_ENDIAN__ + shi r3, r9, 2 /* store half-word r3 into return value */ +#else + shi r3, r9, 0 /* store half-word r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_store8: +#ifdef __BIG_ENDIAN__ + sbi r3, r9, 3 /* store byte r3 into return value */ +#else + sbi r3, r9, 0 /* store byte r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_end: + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + lwi r22, r1, 12 + lwi r23, r1, 16 + addik r1, r1, 20 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_call_SYSV, . - ffi_call_SYSV + +/* ------------------------------------------------------------------------- */ + + /* + * args passed into this function, are passed down to the callee. + * this function is the target of the closure trampoline, as such r12 is + * a pointer to the closure object. + */ + .text + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + /* push callee saves */ + addik r11, r1, 28 /* save stack args start location (excluding regs/link) */ + addik r1, r1, -12 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + + /* store register args on stack */ + addik r1, r1, -24 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* setup args */ + addik r5, r1, 0 /* register_args */ + addik r6, r11, 0 /* stack_args */ + addik r7, r12, 0 /* closure object */ + addik r1, r1, -8 /* allocate return value */ + addik r8, r1, 0 /* void* rvalue */ + addik r1, r1, -8 /* allocate for return type/size values */ + addik r9, r1, 0 /* void* rtype */ + addik r10, r1, 4 /* void* rsize */ + + /* call the wrap_call function */ + addik r1, r1, -28 /* allocate args + link reg */ + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 28 /* restore the link register from the frame */ + +ffi_closure_SYSV_prepare_return: + lwi r9, r1, 0 /* rtype */ + lwi r10, r1, 4 /* rsize */ + addik r1, r1, 8 /* de-allocate return info values */ + + /* Check if return type is actually a struct, store 4 bytes */ + rsubi r11, r9, FFI_TYPE_STRUCT + beqi r11, ffi_closure_SYSV_store32 + + /* Return 8bit */ + rsubi r11, r10, 1 + beqi r11, ffi_closure_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r10, 2 + beqi r11, ffi_closure_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r10, 4 + beqi r11, ffi_closure_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r10, 8 + beqi r11, ffi_closure_SYSV_store64 + + /* Didn't match anything */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store64: + lwi r3, r1, 0 /* store word r3 into return value */ + lwi r4, r1, 4 /* store word r4 into return value */ + /* 64 bits == 2 words, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store32: + lwi r3, r1, 0 /* store word r3 into return value */ + /* 32 bits == 1 word, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store16: +#ifdef __BIG_ENDIAN__ + lhui r3, r1, 2 /* store half-word r3 into return value */ +#else + lhui r3, r1, 0 /* store half-word r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT16 + bnei r11, ffi_closure_SYSV_end + sext16 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store8: +#ifdef __BIG_ENDIAN__ + lbui r3, r1, 3 /* store byte r3 into return value */ +#else + lbui r3, r1, 0 /* store byte r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT8 + bnei r11, ffi_closure_SYSV_end + sext8 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_end: + addik r1, r1, 8 /* de-allocate return value */ + + /* de-allocate stored args */ + addik r1, r1, 24 + + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + addik r1, r1, 12 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_closure_SYSV, . - ffi_closure_SYSV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi 2.c new file mode 100644 index 0000000..057b046 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi 2.c @@ -0,0 +1,1130 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2008 David Daney + Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#ifdef __GNUC__ +# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) +# define USE__BUILTIN___CLEAR_CACHE 1 +# endif +#endif + +#ifndef USE__BUILTIN___CLEAR_CACHE +# if defined(__OpenBSD__) +# include +# else +# include +# endif +#endif + +#ifdef FFI_DEBUG +# define FFI_MIPS_STOP_HERE() ffi_stop_here() +#else +# define FFI_MIPS_STOP_HERE() do {} while(0) +#endif + +#ifdef FFI_MIPS_N32 +#define FIX_ARGP \ +FFI_ASSERT(argp <= &stack[bytes]); \ +if (argp == &stack[bytes]) \ +{ \ + argp = stack; \ + FFI_MIPS_STOP_HERE(); \ +} +#else +#define FIX_ARGP +#endif + + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +static void ffi_prep_args(char *stack, + extended_cif *ecif, + int bytes, + int flags) +{ + int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + +#ifdef FFI_MIPS_N32 + /* If more than 8 double words are used, the remainder go + on the stack. We reorder stuff on the stack here to + support this easily. */ + if (bytes > 8 * sizeof(ffi_arg)) + argp = &stack[bytes - (8 * sizeof(ffi_arg))]; + else + argp = stack; +#else + argp = stack; +#endif + + memset(stack, 0, bytes); + +#ifdef FFI_MIPS_N32 + if ( ecif->cif->rstruct_flag != 0 ) +#else + if ( ecif->cif->rtype->type == FFI_TYPE_STRUCT ) +#endif + { + *(ffi_arg *) argp = (ffi_arg) ecif->rvalue; + argp += sizeof(ffi_arg); + FIX_ARGP; + } + + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; i++, p_arg++) + { + size_t z; + unsigned int a; + + /* Align if necessary. */ + a = (*p_arg)->alignment; + if (a < sizeof(ffi_arg)) + a = sizeof(ffi_arg); + + if ((a - 1) & (unsigned long) argp) + { + argp = (char *) FFI_ALIGN(argp, a); + FIX_ARGP; + } + + z = (*p_arg)->size; + if (z <= sizeof(ffi_arg)) + { + int type = (*p_arg)->type; + z = sizeof(ffi_arg); + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (ecif->cif->abi == FFI_N64 + || ecif->cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (i < 8 && (ecif->cif->abi == FFI_N32_SOFT_FLOAT + || ecif->cif->abi == FFI_N64_SOFT_FLOAT)) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_SINT8: + *(ffi_arg *)argp = *(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(ffi_arg *)argp = *(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(ffi_arg *)argp = *(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(ffi_arg *)argp = *(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_SINT32: + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); + break; + + case FFI_TYPE_UINT32: +#ifdef FFI_MIPS_N32 + /* The N32 ABI requires that 32-bit integers + be sign-extended to 64-bits, regardless of + whether they are signed or unsigned. */ + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); +#else + *(ffi_arg *)argp = *(UINT32 *)(* p_argv); +#endif + break; + + /* This can only happen with 64bit slots. */ + case FFI_TYPE_FLOAT: + *(float *) argp = *(float *)(* p_argv); + break; + + /* Handle structures. */ + default: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + } + } + else + { +#ifdef FFI_MIPS_O32 + memcpy(argp, *p_argv, z); +#else + { + unsigned long end = (unsigned long) argp + z; + unsigned long cap = (unsigned long) stack + bytes; + + /* Check if the data will fit within the register space. + Handle it if it doesn't. */ + + if (end <= cap) + memcpy(argp, *p_argv, z); + else + { + unsigned long portion = cap - (unsigned long)argp; + + memcpy(argp, *p_argv, portion); + argp = stack; + z -= portion; + memcpy(argp, (void*)((unsigned long)(*p_argv) + portion), + z); + } + } +#endif + } + p_argv++; + argp += z; + FIX_ARGP; + } +} + +#ifdef FFI_MIPS_N32 + +/* The n32 spec says that if "a chunk consists solely of a double + float field (but not a double, which is part of a union), it + is passed in a floating point register. Any other chunk is + passed in an integer register". This code traverses structure + definitions and generates the appropriate flags. */ + +static unsigned +calc_n32_struct_flags(int soft_float, ffi_type *arg, + unsigned *loc, unsigned *arg_reg) +{ + unsigned flags = 0; + unsigned index = 0; + + ffi_type *e; + + if (soft_float) + return 0; + + while ((e = arg->elements[index])) + { + /* Align this object. */ + *loc = FFI_ALIGN(*loc, e->alignment); + if (e->type == FFI_TYPE_DOUBLE) + { + /* Already aligned to FFI_SIZEOF_ARG. */ + *arg_reg = *loc / FFI_SIZEOF_ARG; + if (*arg_reg > 7) + break; + flags += (FFI_TYPE_DOUBLE << (*arg_reg * FFI_FLAG_BITS)); + *loc += e->size; + } + else + *loc += e->size; + index++; + } + /* Next Argument register at alignment of FFI_SIZEOF_ARG. */ + *arg_reg = FFI_ALIGN(*loc, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + + return flags; +} + +static unsigned +calc_n32_return_struct_flags(int soft_float, ffi_type *arg) +{ + unsigned flags = 0; + unsigned small = FFI_TYPE_SMALLSTRUCT; + ffi_type *e; + + /* Returning structures under n32 is a tricky thing. + A struct with only one or two floating point fields + is returned in $f0 (and $f2 if necessary). Any other + struct results at most 128 bits are returned in $2 + (the first 64 bits) and $3 (remainder, if necessary). + Larger structs are handled normally. */ + + if (arg->size > 16) + return 0; + + if (arg->size > 8) + small = FFI_TYPE_SMALLSTRUCT2; + + e = arg->elements[0]; + + if (e->type == FFI_TYPE_DOUBLE) + flags = FFI_TYPE_DOUBLE; + else if (e->type == FFI_TYPE_FLOAT) + flags = FFI_TYPE_FLOAT; + + if (flags && (e = arg->elements[1])) + { + if (e->type == FFI_TYPE_DOUBLE) + flags += FFI_TYPE_DOUBLE << FFI_FLAG_BITS; + else if (e->type == FFI_TYPE_FLOAT) + flags += FFI_TYPE_FLOAT << FFI_FLAG_BITS; + else + return small; + + if (flags && (arg->elements[2])) + { + /* There are three arguments and the first two are + floats! This must be passed the old way. */ + return small; + } + if (soft_float) + flags += FFI_TYPE_STRUCT_SOFT; + } + else + if (!flags) + return small; + + return flags; +} + +#endif + +/* Perform machine dependent cif processing */ +static ffi_status ffi_prep_cif_machdep_int(ffi_cif *cif, unsigned nfixedargs) +{ + cif->flags = 0; + cif->mips_nfixedargs = nfixedargs; + +#ifdef FFI_MIPS_O32 + /* Set the flags necessary for O32 processing. FFI_O32_SOFT_FLOAT + * does not have special handling for floating point args. + */ + + if (cif->rtype->type != FFI_TYPE_STRUCT && cif->abi == FFI_O32) + { + if (cif->nargs > 0 && cif->nargs == nfixedargs) + { + switch ((cif->arg_types)[0]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[0]->type; + break; + + default: + break; + } + + if (cif->nargs > 1) + { + /* Only handle the second argument if the first + is a float or double. */ + if (cif->flags) + { + switch ((cif->arg_types)[1]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[1]->type << FFI_FLAG_BITS; + break; + + default: + break; + } + } + } + } + } + + /* Set the return type flag */ + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } + else + { + /* FFI_O32 */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } +#endif + +#ifdef FFI_MIPS_N32 + /* Set the flags necessary for N32 processing */ + { + int type; + unsigned arg_reg = 0; + unsigned loc = 0; + unsigned count = (cif->nargs < 8) ? cif->nargs : 8; + unsigned index = 0; + + unsigned struct_flags = 0; + int soft_float = (cif->abi == FFI_N32_SOFT_FLOAT + || cif->abi == FFI_N64_SOFT_FLOAT); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + struct_flags = calc_n32_return_struct_flags(soft_float, cif->rtype); + + if (struct_flags == 0) + { + /* This means that the structure is being passed as + a hidden argument */ + + arg_reg = 1; + count = (cif->nargs < 7) ? cif->nargs : 7; + + cif->rstruct_flag = !0; + } + else + cif->rstruct_flag = 0; + } + else + cif->rstruct_flag = 0; + + while (count-- > 0 && arg_reg < 8) + { + type = (cif->arg_types)[index]->type; + + // Pass variadic arguments in integer registers even if they're floats + if (soft_float || index >= nfixedargs) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += + ((cif->arg_types)[index]->type << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + break; + case FFI_TYPE_LONGDOUBLE: + /* Align it. */ + arg_reg = FFI_ALIGN(arg_reg, 2); + /* Treat it as two adjacent doubles. */ + if (soft_float || index >= nfixedargs) + { + arg_reg += 2; + } + else + { + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + } + break; + + case FFI_TYPE_STRUCT: + loc = arg_reg * FFI_SIZEOF_ARG; + cif->flags += calc_n32_struct_flags(soft_float || index >= nfixedargs, + (cif->arg_types)[index], + &loc, &arg_reg); + break; + + default: + arg_reg++; + break; + } + + index++; + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + { + if (struct_flags == 0) + { + /* The structure is returned through a hidden + first argument. Do nothing, 'cause FFI_TYPE_VOID + is 0 */ + } + else + { + /* The structure is returned via some tricky + mechanism */ + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += struct_flags << (4 + (FFI_FLAG_BITS * 8)); + } + break; + } + + case FFI_TYPE_VOID: + /* Do nothing, 'cause FFI_TYPE_VOID is 0 */ + break; + + case FFI_TYPE_POINTER: + if (cif->abi == FFI_N32_SOFT_FLOAT || cif->abi == FFI_N32) + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + else + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_FLOAT: + if (soft_float) + { + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + break; + } + /* else fall through */ + case FFI_TYPE_DOUBLE: + if (soft_float) + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + else + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_LONGDOUBLE: + /* Long double is returned as if it were a struct containing + two doubles. */ + if (soft_float) + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += FFI_TYPE_SMALLSTRUCT2 << (4 + (FFI_FLAG_BITS * 8)); + } + else + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += (FFI_TYPE_DOUBLE + + (FFI_TYPE_DOUBLE << FFI_FLAG_BITS)) + << (4 + (FFI_FLAG_BITS * 8)); + } + break; + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + } + } +#endif + + return FFI_OK; +} + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + return ffi_prep_cif_machdep_int(cif, cif->nargs); +} + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned nfixedargs, + unsigned ntotalargs MAYBE_UNUSED) +{ + return ffi_prep_cif_machdep_int(cif, nfixedargs); +} + +/* Low level routine for calling O32 functions */ +extern int ffi_call_O32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, unsigned *, void (*)(void), void *closure); + +/* Low level routine for calling N32 functions */ +extern int ffi_call_N32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, void *, void (*)(void), void *closure); + +void ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { +#ifdef FFI_MIPS_O32 + case FFI_O32: + case FFI_O32_SOFT_FLOAT: + ffi_call_O32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn, closure); + break; +#endif + +#ifdef FFI_MIPS_N32 + case FFI_N32: + case FFI_N32_SOFT_FLOAT: + case FFI_N64: + case FFI_N64_SOFT_FLOAT: + { + int copy_rvalue = 0; + int copy_offset = 0; + char *rvalue_copy = ecif.rvalue; + if (cif->rtype->type == FFI_TYPE_STRUCT && cif->rtype->size < 16) + { + /* For structures smaller than 16 bytes we clobber memory + in 8 byte increments. Make a copy so we don't clobber + the callers memory outside of the struct bounds. */ + rvalue_copy = alloca(16); + copy_rvalue = 1; + } + else if (cif->rtype->type == FFI_TYPE_FLOAT + && (cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT)) + { + rvalue_copy = alloca (8); + copy_rvalue = 1; +#if defined(__MIPSEB__) || defined(_MIPSEB) + copy_offset = 4; +#endif + } + ffi_call_N32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, rvalue_copy, fn, closure); + if (copy_rvalue) + memcpy(ecif.rvalue, rvalue_copy + copy_offset, cif->rtype->size); + } + break; +#endif + + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +#if FFI_CLOSURES +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + void * fn; + char *clear_location = (char *) codeloc; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_closure_N32; +#endif /* FFI_MIPS_O32 */ + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned)fn >> 16); + /* ori $25,low(fn) */ + tramp[1] = 0x37390000 | ((unsigned)fn & 0xffff); + /* lui $12,high(codeloc) */ + tramp[2] = 0x3c0c0000 | ((unsigned)codeloc >> 16); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[3] = 0x03200008; +#else + tramp[3] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[4] = 0x358c0000 | ((unsigned)codeloc & 0xffff); +#else + /* N64 has a somewhat larger trampoline. */ + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned long)fn >> 48); + /* lui $12,high(codeloc) */ + tramp[1] = 0x3c0c0000 | ((unsigned long)codeloc >> 48); + /* ori $25,mid-high(fn) */ + tramp[2] = 0x37390000 | (((unsigned long)fn >> 32 ) & 0xffff); + /* ori $12,mid-high(codeloc) */ + tramp[3] = 0x358c0000 | (((unsigned long)codeloc >> 32) & 0xffff); + /* dsll $25,$25,16 */ + tramp[4] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[5] = 0x000c6438; + /* ori $25,mid-low(fn) */ + tramp[6] = 0x37390000 | (((unsigned long)fn >> 16 ) & 0xffff); + /* ori $12,mid-low(codeloc) */ + tramp[7] = 0x358c0000 | (((unsigned long)codeloc >> 16) & 0xffff); + /* dsll $25,$25,16 */ + tramp[8] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[9] = 0x000c6438; + /* ori $25,low(fn) */ + tramp[10] = 0x37390000 | ((unsigned long)fn & 0xffff); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[11] = 0x03200008; +#else + tramp[11] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[12] = 0x358c0000 | ((unsigned long)codeloc & 0xffff); + +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#ifdef USE__BUILTIN___CLEAR_CACHE + __builtin___clear_cache(clear_location, clear_location + FFI_TRAMPOLINE_SIZE); +#else + cacheflush (clear_location, FFI_TRAMPOLINE_SIZE, ICACHE); +#endif + return FFI_OK; +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer arguments + * (and, depending upon the arguments, some floating-point arguments + * as well). FPR is a pointer to the area where floating point + * registers have been saved, if any. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return type. + * + * Based on the similar routine for sparc. + */ +int +ffi_closure_mips_inner_O32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + double *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn, seen_int; + + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + seen_int = (cif->abi == FFI_O32_SOFT_FLOAT) || (cif->mips_nfixedargs != cif->nargs); + argn = 0; + + if ((cif->flags >> (FFI_FLAG_BITS * 2)) == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)ar[0]; + argn = 1; + seen_int = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->alignment == 8 && (argn & 0x1)) + argn++; + if (i < 2 && !seen_int && + (arg_types[i]->type == FFI_TYPE_FLOAT || + arg_types[i]->type == FFI_TYPE_DOUBLE || + arg_types[i]->type == FFI_TYPE_LONGDOUBLE)) + { +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT) + avaluep[i] = ((char *) &fpr[i]) + sizeof (float); + else +#endif + avaluep[i] = (char *) &fpr[i]; + } + else + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) ar[argn]; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) ar[argn]; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) ar[argn]; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) ar[argn]; + break; + + default: + avaluep[i] = (char *) &ar[argn]; + break; + } + seen_int = 1; + } + argn += FFI_ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + i++; + } + + /* Invoke the closure. */ + fun(cif, rvalue, avaluep, user_data); + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_FLOAT: + return FFI_TYPE_INT; + case FFI_TYPE_DOUBLE: + return FFI_TYPE_UINT64; + default: + return cif->rtype->type; + } + } + else + { + return cif->rtype->type; + } +} + +#if defined(FFI_MIPS_N32) + +static void +copy_struct_N32(char *target, unsigned offset, ffi_abi abi, ffi_type *type, + int argn, unsigned arg_offset, ffi_arg *ar, + ffi_arg *fpr, int soft_float) +{ + ffi_type **elt_typep = type->elements; + while(*elt_typep) + { + ffi_type *elt_type = *elt_typep; + unsigned o; + char *tp; + char *argp; + char *fpp; + + o = FFI_ALIGN(offset, elt_type->alignment); + arg_offset += o - offset; + offset = o; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + + argp = (char *)(ar + argn); + fpp = (char *)(argn >= 8 ? ar + argn : fpr + argn); + + tp = target + offset; + + if (elt_type->type == FFI_TYPE_DOUBLE && !soft_float) + *(double *)tp = *(double *)fpp; + else + memcpy(tp, argp + arg_offset, elt_type->size); + + offset += elt_type->size; + arg_offset += elt_type->size; + elt_typep++; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + } +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer + * arguments. FPR is a pointer to the area where floating point + * registers have been saved. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return flags. + * + */ +int +ffi_closure_mips_inner_N32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + ffi_arg *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn; + int soft_float; + ffi_arg *argp; + + soft_float = cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT; + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + argn = 0; + + if (cif->rstruct_flag) + { +#if _MIPS_SIM==_ABIN32 + rvalue = (void *)(UINT32)ar[0]; +#else /* N64 */ + rvalue = (void *)ar[0]; +#endif + argn = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->type == FFI_TYPE_FLOAT + || arg_types[i]->type == FFI_TYPE_DOUBLE + || arg_types[i]->type == FFI_TYPE_LONGDOUBLE) + { + argp = (argn >= 8 || i >= cif->mips_nfixedargs || soft_float) ? ar + argn : fpr + argn; + if ((arg_types[i]->type == FFI_TYPE_LONGDOUBLE) && ((uintptr_t)argp & (arg_types[i]->alignment-1))) + { + argp=(ffi_arg*)FFI_ALIGN(argp,arg_types[i]->alignment); + argn++; + } +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT && argn < 8) + avaluep[i] = ((char *) argp) + sizeof (float); + else +#endif + avaluep[i] = (char *) argp; + } + else + { + unsigned type = arg_types[i]->type; + + if (arg_types[i]->alignment > sizeof(ffi_arg)) + argn = FFI_ALIGN(argn, arg_types[i]->alignment / sizeof(ffi_arg)); + + argp = ar + argn; + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (cif->abi == FFI_N64 || cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (soft_float && type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + + switch (type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) *argp; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) *argp; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) *argp; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) *argp; + break; + + case FFI_TYPE_SINT32: + avaluep[i] = &avalue[i]; + *(SINT32 *) &avalue[i] = (SINT32) *argp; + break; + + case FFI_TYPE_UINT32: + avaluep[i] = &avalue[i]; + *(UINT32 *) &avalue[i] = (UINT32) *argp; + break; + + case FFI_TYPE_STRUCT: + if (argn < 8) + { + /* Allocate space for the struct as at least part of + it was passed in registers. */ + avaluep[i] = alloca(arg_types[i]->size); + copy_struct_N32(avaluep[i], 0, cif->abi, arg_types[i], + argn, 0, ar, fpr, i >= cif->mips_nfixedargs || soft_float); + + break; + } + /* Else fall through. */ + default: + avaluep[i] = (char *) argp; + break; + } + } + argn += FFI_ALIGN(arg_types[i]->size, sizeof(ffi_arg)) / sizeof(ffi_arg); + i++; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avaluep, user_data); + + return cif->flags >> (FFI_FLAG_BITS * 8); +} + +#endif /* FFI_MIPS_N32 */ + +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void * fn; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_go_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_go_closure_N32; +#endif /* FFI_MIPS_O32 */ + + closure->tramp = (void *)fn; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_CLOSURES */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c new file mode 100644 index 0000000..057b046 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c @@ -0,0 +1,1130 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2008 David Daney + Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#ifdef __GNUC__ +# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) +# define USE__BUILTIN___CLEAR_CACHE 1 +# endif +#endif + +#ifndef USE__BUILTIN___CLEAR_CACHE +# if defined(__OpenBSD__) +# include +# else +# include +# endif +#endif + +#ifdef FFI_DEBUG +# define FFI_MIPS_STOP_HERE() ffi_stop_here() +#else +# define FFI_MIPS_STOP_HERE() do {} while(0) +#endif + +#ifdef FFI_MIPS_N32 +#define FIX_ARGP \ +FFI_ASSERT(argp <= &stack[bytes]); \ +if (argp == &stack[bytes]) \ +{ \ + argp = stack; \ + FFI_MIPS_STOP_HERE(); \ +} +#else +#define FIX_ARGP +#endif + + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +static void ffi_prep_args(char *stack, + extended_cif *ecif, + int bytes, + int flags) +{ + int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + +#ifdef FFI_MIPS_N32 + /* If more than 8 double words are used, the remainder go + on the stack. We reorder stuff on the stack here to + support this easily. */ + if (bytes > 8 * sizeof(ffi_arg)) + argp = &stack[bytes - (8 * sizeof(ffi_arg))]; + else + argp = stack; +#else + argp = stack; +#endif + + memset(stack, 0, bytes); + +#ifdef FFI_MIPS_N32 + if ( ecif->cif->rstruct_flag != 0 ) +#else + if ( ecif->cif->rtype->type == FFI_TYPE_STRUCT ) +#endif + { + *(ffi_arg *) argp = (ffi_arg) ecif->rvalue; + argp += sizeof(ffi_arg); + FIX_ARGP; + } + + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; i++, p_arg++) + { + size_t z; + unsigned int a; + + /* Align if necessary. */ + a = (*p_arg)->alignment; + if (a < sizeof(ffi_arg)) + a = sizeof(ffi_arg); + + if ((a - 1) & (unsigned long) argp) + { + argp = (char *) FFI_ALIGN(argp, a); + FIX_ARGP; + } + + z = (*p_arg)->size; + if (z <= sizeof(ffi_arg)) + { + int type = (*p_arg)->type; + z = sizeof(ffi_arg); + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (ecif->cif->abi == FFI_N64 + || ecif->cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (i < 8 && (ecif->cif->abi == FFI_N32_SOFT_FLOAT + || ecif->cif->abi == FFI_N64_SOFT_FLOAT)) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_SINT8: + *(ffi_arg *)argp = *(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(ffi_arg *)argp = *(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(ffi_arg *)argp = *(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(ffi_arg *)argp = *(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_SINT32: + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); + break; + + case FFI_TYPE_UINT32: +#ifdef FFI_MIPS_N32 + /* The N32 ABI requires that 32-bit integers + be sign-extended to 64-bits, regardless of + whether they are signed or unsigned. */ + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); +#else + *(ffi_arg *)argp = *(UINT32 *)(* p_argv); +#endif + break; + + /* This can only happen with 64bit slots. */ + case FFI_TYPE_FLOAT: + *(float *) argp = *(float *)(* p_argv); + break; + + /* Handle structures. */ + default: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + } + } + else + { +#ifdef FFI_MIPS_O32 + memcpy(argp, *p_argv, z); +#else + { + unsigned long end = (unsigned long) argp + z; + unsigned long cap = (unsigned long) stack + bytes; + + /* Check if the data will fit within the register space. + Handle it if it doesn't. */ + + if (end <= cap) + memcpy(argp, *p_argv, z); + else + { + unsigned long portion = cap - (unsigned long)argp; + + memcpy(argp, *p_argv, portion); + argp = stack; + z -= portion; + memcpy(argp, (void*)((unsigned long)(*p_argv) + portion), + z); + } + } +#endif + } + p_argv++; + argp += z; + FIX_ARGP; + } +} + +#ifdef FFI_MIPS_N32 + +/* The n32 spec says that if "a chunk consists solely of a double + float field (but not a double, which is part of a union), it + is passed in a floating point register. Any other chunk is + passed in an integer register". This code traverses structure + definitions and generates the appropriate flags. */ + +static unsigned +calc_n32_struct_flags(int soft_float, ffi_type *arg, + unsigned *loc, unsigned *arg_reg) +{ + unsigned flags = 0; + unsigned index = 0; + + ffi_type *e; + + if (soft_float) + return 0; + + while ((e = arg->elements[index])) + { + /* Align this object. */ + *loc = FFI_ALIGN(*loc, e->alignment); + if (e->type == FFI_TYPE_DOUBLE) + { + /* Already aligned to FFI_SIZEOF_ARG. */ + *arg_reg = *loc / FFI_SIZEOF_ARG; + if (*arg_reg > 7) + break; + flags += (FFI_TYPE_DOUBLE << (*arg_reg * FFI_FLAG_BITS)); + *loc += e->size; + } + else + *loc += e->size; + index++; + } + /* Next Argument register at alignment of FFI_SIZEOF_ARG. */ + *arg_reg = FFI_ALIGN(*loc, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + + return flags; +} + +static unsigned +calc_n32_return_struct_flags(int soft_float, ffi_type *arg) +{ + unsigned flags = 0; + unsigned small = FFI_TYPE_SMALLSTRUCT; + ffi_type *e; + + /* Returning structures under n32 is a tricky thing. + A struct with only one or two floating point fields + is returned in $f0 (and $f2 if necessary). Any other + struct results at most 128 bits are returned in $2 + (the first 64 bits) and $3 (remainder, if necessary). + Larger structs are handled normally. */ + + if (arg->size > 16) + return 0; + + if (arg->size > 8) + small = FFI_TYPE_SMALLSTRUCT2; + + e = arg->elements[0]; + + if (e->type == FFI_TYPE_DOUBLE) + flags = FFI_TYPE_DOUBLE; + else if (e->type == FFI_TYPE_FLOAT) + flags = FFI_TYPE_FLOAT; + + if (flags && (e = arg->elements[1])) + { + if (e->type == FFI_TYPE_DOUBLE) + flags += FFI_TYPE_DOUBLE << FFI_FLAG_BITS; + else if (e->type == FFI_TYPE_FLOAT) + flags += FFI_TYPE_FLOAT << FFI_FLAG_BITS; + else + return small; + + if (flags && (arg->elements[2])) + { + /* There are three arguments and the first two are + floats! This must be passed the old way. */ + return small; + } + if (soft_float) + flags += FFI_TYPE_STRUCT_SOFT; + } + else + if (!flags) + return small; + + return flags; +} + +#endif + +/* Perform machine dependent cif processing */ +static ffi_status ffi_prep_cif_machdep_int(ffi_cif *cif, unsigned nfixedargs) +{ + cif->flags = 0; + cif->mips_nfixedargs = nfixedargs; + +#ifdef FFI_MIPS_O32 + /* Set the flags necessary for O32 processing. FFI_O32_SOFT_FLOAT + * does not have special handling for floating point args. + */ + + if (cif->rtype->type != FFI_TYPE_STRUCT && cif->abi == FFI_O32) + { + if (cif->nargs > 0 && cif->nargs == nfixedargs) + { + switch ((cif->arg_types)[0]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[0]->type; + break; + + default: + break; + } + + if (cif->nargs > 1) + { + /* Only handle the second argument if the first + is a float or double. */ + if (cif->flags) + { + switch ((cif->arg_types)[1]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[1]->type << FFI_FLAG_BITS; + break; + + default: + break; + } + } + } + } + } + + /* Set the return type flag */ + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } + else + { + /* FFI_O32 */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } +#endif + +#ifdef FFI_MIPS_N32 + /* Set the flags necessary for N32 processing */ + { + int type; + unsigned arg_reg = 0; + unsigned loc = 0; + unsigned count = (cif->nargs < 8) ? cif->nargs : 8; + unsigned index = 0; + + unsigned struct_flags = 0; + int soft_float = (cif->abi == FFI_N32_SOFT_FLOAT + || cif->abi == FFI_N64_SOFT_FLOAT); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + struct_flags = calc_n32_return_struct_flags(soft_float, cif->rtype); + + if (struct_flags == 0) + { + /* This means that the structure is being passed as + a hidden argument */ + + arg_reg = 1; + count = (cif->nargs < 7) ? cif->nargs : 7; + + cif->rstruct_flag = !0; + } + else + cif->rstruct_flag = 0; + } + else + cif->rstruct_flag = 0; + + while (count-- > 0 && arg_reg < 8) + { + type = (cif->arg_types)[index]->type; + + // Pass variadic arguments in integer registers even if they're floats + if (soft_float || index >= nfixedargs) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += + ((cif->arg_types)[index]->type << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + break; + case FFI_TYPE_LONGDOUBLE: + /* Align it. */ + arg_reg = FFI_ALIGN(arg_reg, 2); + /* Treat it as two adjacent doubles. */ + if (soft_float || index >= nfixedargs) + { + arg_reg += 2; + } + else + { + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + } + break; + + case FFI_TYPE_STRUCT: + loc = arg_reg * FFI_SIZEOF_ARG; + cif->flags += calc_n32_struct_flags(soft_float || index >= nfixedargs, + (cif->arg_types)[index], + &loc, &arg_reg); + break; + + default: + arg_reg++; + break; + } + + index++; + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + { + if (struct_flags == 0) + { + /* The structure is returned through a hidden + first argument. Do nothing, 'cause FFI_TYPE_VOID + is 0 */ + } + else + { + /* The structure is returned via some tricky + mechanism */ + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += struct_flags << (4 + (FFI_FLAG_BITS * 8)); + } + break; + } + + case FFI_TYPE_VOID: + /* Do nothing, 'cause FFI_TYPE_VOID is 0 */ + break; + + case FFI_TYPE_POINTER: + if (cif->abi == FFI_N32_SOFT_FLOAT || cif->abi == FFI_N32) + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + else + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_FLOAT: + if (soft_float) + { + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + break; + } + /* else fall through */ + case FFI_TYPE_DOUBLE: + if (soft_float) + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + else + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_LONGDOUBLE: + /* Long double is returned as if it were a struct containing + two doubles. */ + if (soft_float) + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += FFI_TYPE_SMALLSTRUCT2 << (4 + (FFI_FLAG_BITS * 8)); + } + else + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += (FFI_TYPE_DOUBLE + + (FFI_TYPE_DOUBLE << FFI_FLAG_BITS)) + << (4 + (FFI_FLAG_BITS * 8)); + } + break; + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + } + } +#endif + + return FFI_OK; +} + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + return ffi_prep_cif_machdep_int(cif, cif->nargs); +} + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned nfixedargs, + unsigned ntotalargs MAYBE_UNUSED) +{ + return ffi_prep_cif_machdep_int(cif, nfixedargs); +} + +/* Low level routine for calling O32 functions */ +extern int ffi_call_O32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, unsigned *, void (*)(void), void *closure); + +/* Low level routine for calling N32 functions */ +extern int ffi_call_N32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, void *, void (*)(void), void *closure); + +void ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { +#ifdef FFI_MIPS_O32 + case FFI_O32: + case FFI_O32_SOFT_FLOAT: + ffi_call_O32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn, closure); + break; +#endif + +#ifdef FFI_MIPS_N32 + case FFI_N32: + case FFI_N32_SOFT_FLOAT: + case FFI_N64: + case FFI_N64_SOFT_FLOAT: + { + int copy_rvalue = 0; + int copy_offset = 0; + char *rvalue_copy = ecif.rvalue; + if (cif->rtype->type == FFI_TYPE_STRUCT && cif->rtype->size < 16) + { + /* For structures smaller than 16 bytes we clobber memory + in 8 byte increments. Make a copy so we don't clobber + the callers memory outside of the struct bounds. */ + rvalue_copy = alloca(16); + copy_rvalue = 1; + } + else if (cif->rtype->type == FFI_TYPE_FLOAT + && (cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT)) + { + rvalue_copy = alloca (8); + copy_rvalue = 1; +#if defined(__MIPSEB__) || defined(_MIPSEB) + copy_offset = 4; +#endif + } + ffi_call_N32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, rvalue_copy, fn, closure); + if (copy_rvalue) + memcpy(ecif.rvalue, rvalue_copy + copy_offset, cif->rtype->size); + } + break; +#endif + + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +#if FFI_CLOSURES +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + void * fn; + char *clear_location = (char *) codeloc; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_closure_N32; +#endif /* FFI_MIPS_O32 */ + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned)fn >> 16); + /* ori $25,low(fn) */ + tramp[1] = 0x37390000 | ((unsigned)fn & 0xffff); + /* lui $12,high(codeloc) */ + tramp[2] = 0x3c0c0000 | ((unsigned)codeloc >> 16); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[3] = 0x03200008; +#else + tramp[3] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[4] = 0x358c0000 | ((unsigned)codeloc & 0xffff); +#else + /* N64 has a somewhat larger trampoline. */ + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned long)fn >> 48); + /* lui $12,high(codeloc) */ + tramp[1] = 0x3c0c0000 | ((unsigned long)codeloc >> 48); + /* ori $25,mid-high(fn) */ + tramp[2] = 0x37390000 | (((unsigned long)fn >> 32 ) & 0xffff); + /* ori $12,mid-high(codeloc) */ + tramp[3] = 0x358c0000 | (((unsigned long)codeloc >> 32) & 0xffff); + /* dsll $25,$25,16 */ + tramp[4] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[5] = 0x000c6438; + /* ori $25,mid-low(fn) */ + tramp[6] = 0x37390000 | (((unsigned long)fn >> 16 ) & 0xffff); + /* ori $12,mid-low(codeloc) */ + tramp[7] = 0x358c0000 | (((unsigned long)codeloc >> 16) & 0xffff); + /* dsll $25,$25,16 */ + tramp[8] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[9] = 0x000c6438; + /* ori $25,low(fn) */ + tramp[10] = 0x37390000 | ((unsigned long)fn & 0xffff); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[11] = 0x03200008; +#else + tramp[11] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[12] = 0x358c0000 | ((unsigned long)codeloc & 0xffff); + +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#ifdef USE__BUILTIN___CLEAR_CACHE + __builtin___clear_cache(clear_location, clear_location + FFI_TRAMPOLINE_SIZE); +#else + cacheflush (clear_location, FFI_TRAMPOLINE_SIZE, ICACHE); +#endif + return FFI_OK; +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer arguments + * (and, depending upon the arguments, some floating-point arguments + * as well). FPR is a pointer to the area where floating point + * registers have been saved, if any. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return type. + * + * Based on the similar routine for sparc. + */ +int +ffi_closure_mips_inner_O32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + double *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn, seen_int; + + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + seen_int = (cif->abi == FFI_O32_SOFT_FLOAT) || (cif->mips_nfixedargs != cif->nargs); + argn = 0; + + if ((cif->flags >> (FFI_FLAG_BITS * 2)) == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)ar[0]; + argn = 1; + seen_int = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->alignment == 8 && (argn & 0x1)) + argn++; + if (i < 2 && !seen_int && + (arg_types[i]->type == FFI_TYPE_FLOAT || + arg_types[i]->type == FFI_TYPE_DOUBLE || + arg_types[i]->type == FFI_TYPE_LONGDOUBLE)) + { +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT) + avaluep[i] = ((char *) &fpr[i]) + sizeof (float); + else +#endif + avaluep[i] = (char *) &fpr[i]; + } + else + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) ar[argn]; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) ar[argn]; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) ar[argn]; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) ar[argn]; + break; + + default: + avaluep[i] = (char *) &ar[argn]; + break; + } + seen_int = 1; + } + argn += FFI_ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + i++; + } + + /* Invoke the closure. */ + fun(cif, rvalue, avaluep, user_data); + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_FLOAT: + return FFI_TYPE_INT; + case FFI_TYPE_DOUBLE: + return FFI_TYPE_UINT64; + default: + return cif->rtype->type; + } + } + else + { + return cif->rtype->type; + } +} + +#if defined(FFI_MIPS_N32) + +static void +copy_struct_N32(char *target, unsigned offset, ffi_abi abi, ffi_type *type, + int argn, unsigned arg_offset, ffi_arg *ar, + ffi_arg *fpr, int soft_float) +{ + ffi_type **elt_typep = type->elements; + while(*elt_typep) + { + ffi_type *elt_type = *elt_typep; + unsigned o; + char *tp; + char *argp; + char *fpp; + + o = FFI_ALIGN(offset, elt_type->alignment); + arg_offset += o - offset; + offset = o; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + + argp = (char *)(ar + argn); + fpp = (char *)(argn >= 8 ? ar + argn : fpr + argn); + + tp = target + offset; + + if (elt_type->type == FFI_TYPE_DOUBLE && !soft_float) + *(double *)tp = *(double *)fpp; + else + memcpy(tp, argp + arg_offset, elt_type->size); + + offset += elt_type->size; + arg_offset += elt_type->size; + elt_typep++; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + } +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer + * arguments. FPR is a pointer to the area where floating point + * registers have been saved. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return flags. + * + */ +int +ffi_closure_mips_inner_N32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + ffi_arg *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn; + int soft_float; + ffi_arg *argp; + + soft_float = cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT; + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + argn = 0; + + if (cif->rstruct_flag) + { +#if _MIPS_SIM==_ABIN32 + rvalue = (void *)(UINT32)ar[0]; +#else /* N64 */ + rvalue = (void *)ar[0]; +#endif + argn = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->type == FFI_TYPE_FLOAT + || arg_types[i]->type == FFI_TYPE_DOUBLE + || arg_types[i]->type == FFI_TYPE_LONGDOUBLE) + { + argp = (argn >= 8 || i >= cif->mips_nfixedargs || soft_float) ? ar + argn : fpr + argn; + if ((arg_types[i]->type == FFI_TYPE_LONGDOUBLE) && ((uintptr_t)argp & (arg_types[i]->alignment-1))) + { + argp=(ffi_arg*)FFI_ALIGN(argp,arg_types[i]->alignment); + argn++; + } +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT && argn < 8) + avaluep[i] = ((char *) argp) + sizeof (float); + else +#endif + avaluep[i] = (char *) argp; + } + else + { + unsigned type = arg_types[i]->type; + + if (arg_types[i]->alignment > sizeof(ffi_arg)) + argn = FFI_ALIGN(argn, arg_types[i]->alignment / sizeof(ffi_arg)); + + argp = ar + argn; + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (cif->abi == FFI_N64 || cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (soft_float && type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + + switch (type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) *argp; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) *argp; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) *argp; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) *argp; + break; + + case FFI_TYPE_SINT32: + avaluep[i] = &avalue[i]; + *(SINT32 *) &avalue[i] = (SINT32) *argp; + break; + + case FFI_TYPE_UINT32: + avaluep[i] = &avalue[i]; + *(UINT32 *) &avalue[i] = (UINT32) *argp; + break; + + case FFI_TYPE_STRUCT: + if (argn < 8) + { + /* Allocate space for the struct as at least part of + it was passed in registers. */ + avaluep[i] = alloca(arg_types[i]->size); + copy_struct_N32(avaluep[i], 0, cif->abi, arg_types[i], + argn, 0, ar, fpr, i >= cif->mips_nfixedargs || soft_float); + + break; + } + /* Else fall through. */ + default: + avaluep[i] = (char *) argp; + break; + } + } + argn += FFI_ALIGN(arg_types[i]->size, sizeof(ffi_arg)) / sizeof(ffi_arg); + i++; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avaluep, user_data); + + return cif->flags >> (FFI_FLAG_BITS * 8); +} + +#endif /* FFI_MIPS_N32 */ + +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void * fn; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_go_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_go_closure_N32; +#endif /* FFI_MIPS_O32 */ + + closure->tramp = (void *)fn; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_CLOSURES */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget 2.h new file mode 100644 index 0000000..fffdb97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget 2.h @@ -0,0 +1,244 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for MIPS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifdef __linux__ +# include +#elif defined(__rtems__) +/* + * Subprogram calling convention - copied from sgidefs.h + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 +#elif !defined(__OpenBSD__) +# include +#endif + +# ifndef _ABIN32 +# define _ABIN32 _MIPS_SIM_NABI32 +# endif +# ifndef _ABI64 +# define _ABI64 _MIPS_SIM_ABI64 +# endif +# ifndef _ABIO32 +# define _ABIO32 _MIPS_SIM_ABI32 +# endif + +#if !defined(_MIPS_SIM) +# error -- something is very wrong -- +#else +# if (_MIPS_SIM==_ABIN32 && defined(_ABIN32)) || (_MIPS_SIM==_ABI64 && defined(_ABI64)) +# define FFI_MIPS_N32 +# else +# if (_MIPS_SIM==_ABIO32 && defined(_ABIO32)) +# define FFI_MIPS_O32 +# else +# error -- this is an unsupported platform -- +# endif +# endif +#endif + +#ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +# define FFI_SIZEOF_ARG 4 +#else +/* N32 and N64 frames have 64bit integer args */ +# define FFI_SIZEOF_ARG 8 +# if _MIPS_SIM == _ABIN32 +# define FFI_SIZEOF_JAVA_RAW 4 +# endif +#endif + +#define FFI_FLAG_BITS 2 + +/* SGI's strange assembler requires that we multiply by 4 rather + than shift left by FFI_FLAG_BITS */ + +#define FFI_ARGS_D FFI_TYPE_DOUBLE +#define FFI_ARGS_F FFI_TYPE_FLOAT +#define FFI_ARGS_DD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_DOUBLE +#define FFI_ARGS_FF FFI_TYPE_FLOAT * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_FD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_DF FFI_TYPE_FLOAT * 4 + FFI_TYPE_DOUBLE + +/* Needed for N32 structure returns */ +#define FFI_TYPE_SMALLSTRUCT FFI_TYPE_UINT8 +#define FFI_TYPE_SMALLSTRUCT2 FFI_TYPE_SINT8 + +#if 0 +/* The SGI assembler can't handle this.. */ +#define FFI_TYPE_STRUCT_DD (( FFI_ARGS_DD ) << 4) + FFI_TYPE_STRUCT +/* (and so on) */ +#else +/* ...so we calculate these by hand! */ +#define FFI_TYPE_STRUCT_D 61 +#define FFI_TYPE_STRUCT_F 45 +#define FFI_TYPE_STRUCT_DD 253 +#define FFI_TYPE_STRUCT_FF 173 +#define FFI_TYPE_STRUCT_FD 237 +#define FFI_TYPE_STRUCT_DF 189 +#define FFI_TYPE_STRUCT_SMALL 93 +#define FFI_TYPE_STRUCT_SMALL2 109 + +/* and for n32 soft float, add 16 * 2^4 */ +#define FFI_TYPE_STRUCT_D_SOFT 317 +#define FFI_TYPE_STRUCT_F_SOFT 301 +#define FFI_TYPE_STRUCT_DD_SOFT 509 +#define FFI_TYPE_STRUCT_FF_SOFT 429 +#define FFI_TYPE_STRUCT_FD_SOFT 493 +#define FFI_TYPE_STRUCT_DF_SOFT 445 +#define FFI_TYPE_STRUCT_SOFT 16 +#endif + +#ifdef LIBFFI_ASM +#define v0 $2 +#define v1 $3 +#define a0 $4 +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define a4 $8 +#define a5 $9 +#define a6 $10 +#define a7 $11 +#define t0 $8 +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define t8 $24 +#define t9 $25 +#define ra $31 + +#ifdef FFI_MIPS_O32 +# define REG_L lw +# define REG_S sw +# define SUBU subu +# define ADDU addu +# define SRL srl +# define LI li +#else /* !FFI_MIPS_O32 */ +# define REG_L ld +# define REG_S sd +# define SUBU dsubu +# define ADDU daddu +# define SRL dsrl +# define LI dli +# if (_MIPS_SIM==_ABI64) +# define LA dla +# define EH_FRAME_ALIGN 3 +# define FDE_ADDR_BYTES .8byte +# else +# define LA la +# define EH_FRAME_ALIGN 2 +# define FDE_ADDR_BYTES .4byte +# endif /* _MIPS_SIM==_ABI64 */ +#endif /* !FFI_MIPS_O32 */ +#else /* !LIBFFI_ASM */ +# ifdef __GNUC__ +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__SI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__SI__))); +#else +/* N32 and N64 frames have 64bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__DI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__DI__))); +# endif +# else +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef __uint32_t ffi_arg; +typedef __int32_t ffi_sarg; +# else +/* N32 and N64 frames have 64bit integer args */ +typedef __uint64_t ffi_arg; +typedef __int64_t ffi_sarg; +# endif +# endif /* __GNUC__ */ + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_O32, + FFI_N32, + FFI_N64, + FFI_O32_SOFT_FLOAT, + FFI_N32_SOFT_FLOAT, + FFI_N64_SOFT_FLOAT, + FFI_LAST_ABI, + +#ifdef FFI_MIPS_O32 +#ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT +#else + FFI_DEFAULT_ABI = FFI_O32 +#endif +#else +# if _MIPS_SIM==_ABI64 +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N64 +# endif +# else +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N32 +# endif +# endif +#endif +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS unsigned rstruct_flag; unsigned mips_nfixedargs +#define FFI_TARGET_SPECIFIC_VARIADIC +#endif /* !LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) +# define FFI_TRAMPOLINE_SIZE 20 +#else +# define FFI_TRAMPOLINE_SIZE 56 +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h new file mode 100644 index 0000000..fffdb97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h @@ -0,0 +1,244 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for MIPS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifdef __linux__ +# include +#elif defined(__rtems__) +/* + * Subprogram calling convention - copied from sgidefs.h + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 +#elif !defined(__OpenBSD__) +# include +#endif + +# ifndef _ABIN32 +# define _ABIN32 _MIPS_SIM_NABI32 +# endif +# ifndef _ABI64 +# define _ABI64 _MIPS_SIM_ABI64 +# endif +# ifndef _ABIO32 +# define _ABIO32 _MIPS_SIM_ABI32 +# endif + +#if !defined(_MIPS_SIM) +# error -- something is very wrong -- +#else +# if (_MIPS_SIM==_ABIN32 && defined(_ABIN32)) || (_MIPS_SIM==_ABI64 && defined(_ABI64)) +# define FFI_MIPS_N32 +# else +# if (_MIPS_SIM==_ABIO32 && defined(_ABIO32)) +# define FFI_MIPS_O32 +# else +# error -- this is an unsupported platform -- +# endif +# endif +#endif + +#ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +# define FFI_SIZEOF_ARG 4 +#else +/* N32 and N64 frames have 64bit integer args */ +# define FFI_SIZEOF_ARG 8 +# if _MIPS_SIM == _ABIN32 +# define FFI_SIZEOF_JAVA_RAW 4 +# endif +#endif + +#define FFI_FLAG_BITS 2 + +/* SGI's strange assembler requires that we multiply by 4 rather + than shift left by FFI_FLAG_BITS */ + +#define FFI_ARGS_D FFI_TYPE_DOUBLE +#define FFI_ARGS_F FFI_TYPE_FLOAT +#define FFI_ARGS_DD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_DOUBLE +#define FFI_ARGS_FF FFI_TYPE_FLOAT * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_FD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_DF FFI_TYPE_FLOAT * 4 + FFI_TYPE_DOUBLE + +/* Needed for N32 structure returns */ +#define FFI_TYPE_SMALLSTRUCT FFI_TYPE_UINT8 +#define FFI_TYPE_SMALLSTRUCT2 FFI_TYPE_SINT8 + +#if 0 +/* The SGI assembler can't handle this.. */ +#define FFI_TYPE_STRUCT_DD (( FFI_ARGS_DD ) << 4) + FFI_TYPE_STRUCT +/* (and so on) */ +#else +/* ...so we calculate these by hand! */ +#define FFI_TYPE_STRUCT_D 61 +#define FFI_TYPE_STRUCT_F 45 +#define FFI_TYPE_STRUCT_DD 253 +#define FFI_TYPE_STRUCT_FF 173 +#define FFI_TYPE_STRUCT_FD 237 +#define FFI_TYPE_STRUCT_DF 189 +#define FFI_TYPE_STRUCT_SMALL 93 +#define FFI_TYPE_STRUCT_SMALL2 109 + +/* and for n32 soft float, add 16 * 2^4 */ +#define FFI_TYPE_STRUCT_D_SOFT 317 +#define FFI_TYPE_STRUCT_F_SOFT 301 +#define FFI_TYPE_STRUCT_DD_SOFT 509 +#define FFI_TYPE_STRUCT_FF_SOFT 429 +#define FFI_TYPE_STRUCT_FD_SOFT 493 +#define FFI_TYPE_STRUCT_DF_SOFT 445 +#define FFI_TYPE_STRUCT_SOFT 16 +#endif + +#ifdef LIBFFI_ASM +#define v0 $2 +#define v1 $3 +#define a0 $4 +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define a4 $8 +#define a5 $9 +#define a6 $10 +#define a7 $11 +#define t0 $8 +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define t8 $24 +#define t9 $25 +#define ra $31 + +#ifdef FFI_MIPS_O32 +# define REG_L lw +# define REG_S sw +# define SUBU subu +# define ADDU addu +# define SRL srl +# define LI li +#else /* !FFI_MIPS_O32 */ +# define REG_L ld +# define REG_S sd +# define SUBU dsubu +# define ADDU daddu +# define SRL dsrl +# define LI dli +# if (_MIPS_SIM==_ABI64) +# define LA dla +# define EH_FRAME_ALIGN 3 +# define FDE_ADDR_BYTES .8byte +# else +# define LA la +# define EH_FRAME_ALIGN 2 +# define FDE_ADDR_BYTES .4byte +# endif /* _MIPS_SIM==_ABI64 */ +#endif /* !FFI_MIPS_O32 */ +#else /* !LIBFFI_ASM */ +# ifdef __GNUC__ +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__SI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__SI__))); +#else +/* N32 and N64 frames have 64bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__DI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__DI__))); +# endif +# else +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef __uint32_t ffi_arg; +typedef __int32_t ffi_sarg; +# else +/* N32 and N64 frames have 64bit integer args */ +typedef __uint64_t ffi_arg; +typedef __int64_t ffi_sarg; +# endif +# endif /* __GNUC__ */ + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_O32, + FFI_N32, + FFI_N64, + FFI_O32_SOFT_FLOAT, + FFI_N32_SOFT_FLOAT, + FFI_N64_SOFT_FLOAT, + FFI_LAST_ABI, + +#ifdef FFI_MIPS_O32 +#ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT +#else + FFI_DEFAULT_ABI = FFI_O32 +#endif +#else +# if _MIPS_SIM==_ABI64 +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N64 +# endif +# else +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N32 +# endif +# endif +#endif +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS unsigned rstruct_flag; unsigned mips_nfixedargs +#define FFI_TARGET_SPECIFIC_VARIADIC +#endif /* !LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) +# define FFI_TRAMPOLINE_SIZE 20 +#else +# define FFI_TRAMPOLINE_SIZE 56 +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32 2.S new file mode 100644 index 0000000..1a940b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32 2.S @@ -0,0 +1,663 @@ +/* ----------------------------------------------------------------------- + n32.S - Copyright (c) 1996, 1998, 2005, 2007, 2009, 2010 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Only build this code if we are compiling for n32 */ + +#if defined(FFI_MIPS_N32) + +#define callback a0 +#define bytes a2 +#define flags a3 +#define raddr a4 +#define fn a5 +#define closure a6 + +/* Note: to keep stack 16 byte aligned we need even number slots + used 9 slots here +*/ +#define SIZEOF_FRAME ( 10 * FFI_SIZEOF_ARG ) + +#ifdef __GNUC__ + .abicalls +#endif +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + .set mips4 +#endif + .text + .align 2 + .globl ffi_call_N32 + .ent ffi_call_N32 +ffi_call_N32: +.LFB0: + .frame $fp, SIZEOF_FRAME, ra + .mask 0xc0000000,-FFI_SIZEOF_ARG + .fmask 0x00000000,0 + + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +.LCFI00: + REG_S $fp, SIZEOF_FRAME - 2*FFI_SIZEOF_ARG($sp) # Save frame pointer + REG_S ra, SIZEOF_FRAME - 1*FFI_SIZEOF_ARG($sp) # Save return address +.LCFI01: + move $fp, $sp +.LCFI02: + move t9, callback # callback function pointer + REG_S bytes, 2*FFI_SIZEOF_ARG($fp) # bytes + REG_S flags, 3*FFI_SIZEOF_ARG($fp) # flags + REG_S raddr, 4*FFI_SIZEOF_ARG($fp) # raddr + REG_S fn, 5*FFI_SIZEOF_ARG($fp) # fn + REG_S closure, 6*FFI_SIZEOF_ARG($fp) # closure + + # Allocate at least 4 words in the argstack + move v0, bytes + bge bytes, 4 * FFI_SIZEOF_ARG, bigger + LI v0, 4 * FFI_SIZEOF_ARG + b sixteen + + bigger: + ADDU t4, v0, 2 * FFI_SIZEOF_ARG -1 # make sure it is aligned + and v0, t4, -2 * FFI_SIZEOF_ARG # to a proper boundry. + +sixteen: + SUBU $sp, $sp, v0 # move the stack pointer to reflect the + # arg space + + move a0, $sp # 4 * FFI_SIZEOF_ARG + ADDU a3, $fp, 3 * FFI_SIZEOF_ARG + + # Call ffi_prep_args + jal t9 + + # Copy the stack pointer to t9 + move t9, $sp + + # Fix the stack if there are more than 8 64bit slots worth + # of arguments. + + # Load the number of bytes + REG_L t6, 2*FFI_SIZEOF_ARG($fp) + + # Is it bigger than 8 * FFI_SIZEOF_ARG? + daddiu t8, t6, -(8 * FFI_SIZEOF_ARG) + bltz t8, loadregs + + ADDU t9, t9, t8 + +loadregs: + + REG_L t6, 3*FFI_SIZEOF_ARG($fp) # load the flags word into t6. + +#ifdef __mips_soft_float + REG_L a0, 0*FFI_SIZEOF_ARG(t9) + REG_L a1, 1*FFI_SIZEOF_ARG(t9) + REG_L a2, 2*FFI_SIZEOF_ARG(t9) + REG_L a3, 3*FFI_SIZEOF_ARG(t9) + REG_L a4, 4*FFI_SIZEOF_ARG(t9) + REG_L a5, 5*FFI_SIZEOF_ARG(t9) + REG_L a6, 6*FFI_SIZEOF_ARG(t9) + REG_L a7, 7*FFI_SIZEOF_ARG(t9) +#else + and t4, t6, ((1< +#include + +/* Only build this code if we are compiling for n32 */ + +#if defined(FFI_MIPS_N32) + +#define callback a0 +#define bytes a2 +#define flags a3 +#define raddr a4 +#define fn a5 +#define closure a6 + +/* Note: to keep stack 16 byte aligned we need even number slots + used 9 slots here +*/ +#define SIZEOF_FRAME ( 10 * FFI_SIZEOF_ARG ) + +#ifdef __GNUC__ + .abicalls +#endif +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + .set mips4 +#endif + .text + .align 2 + .globl ffi_call_N32 + .ent ffi_call_N32 +ffi_call_N32: +.LFB0: + .frame $fp, SIZEOF_FRAME, ra + .mask 0xc0000000,-FFI_SIZEOF_ARG + .fmask 0x00000000,0 + + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +.LCFI00: + REG_S $fp, SIZEOF_FRAME - 2*FFI_SIZEOF_ARG($sp) # Save frame pointer + REG_S ra, SIZEOF_FRAME - 1*FFI_SIZEOF_ARG($sp) # Save return address +.LCFI01: + move $fp, $sp +.LCFI02: + move t9, callback # callback function pointer + REG_S bytes, 2*FFI_SIZEOF_ARG($fp) # bytes + REG_S flags, 3*FFI_SIZEOF_ARG($fp) # flags + REG_S raddr, 4*FFI_SIZEOF_ARG($fp) # raddr + REG_S fn, 5*FFI_SIZEOF_ARG($fp) # fn + REG_S closure, 6*FFI_SIZEOF_ARG($fp) # closure + + # Allocate at least 4 words in the argstack + move v0, bytes + bge bytes, 4 * FFI_SIZEOF_ARG, bigger + LI v0, 4 * FFI_SIZEOF_ARG + b sixteen + + bigger: + ADDU t4, v0, 2 * FFI_SIZEOF_ARG -1 # make sure it is aligned + and v0, t4, -2 * FFI_SIZEOF_ARG # to a proper boundry. + +sixteen: + SUBU $sp, $sp, v0 # move the stack pointer to reflect the + # arg space + + move a0, $sp # 4 * FFI_SIZEOF_ARG + ADDU a3, $fp, 3 * FFI_SIZEOF_ARG + + # Call ffi_prep_args + jal t9 + + # Copy the stack pointer to t9 + move t9, $sp + + # Fix the stack if there are more than 8 64bit slots worth + # of arguments. + + # Load the number of bytes + REG_L t6, 2*FFI_SIZEOF_ARG($fp) + + # Is it bigger than 8 * FFI_SIZEOF_ARG? + daddiu t8, t6, -(8 * FFI_SIZEOF_ARG) + bltz t8, loadregs + + ADDU t9, t9, t8 + +loadregs: + + REG_L t6, 3*FFI_SIZEOF_ARG($fp) # load the flags word into t6. + +#ifdef __mips_soft_float + REG_L a0, 0*FFI_SIZEOF_ARG(t9) + REG_L a1, 1*FFI_SIZEOF_ARG(t9) + REG_L a2, 2*FFI_SIZEOF_ARG(t9) + REG_L a3, 3*FFI_SIZEOF_ARG(t9) + REG_L a4, 4*FFI_SIZEOF_ARG(t9) + REG_L a5, 5*FFI_SIZEOF_ARG(t9) + REG_L a6, 6*FFI_SIZEOF_ARG(t9) + REG_L a7, 7*FFI_SIZEOF_ARG(t9) +#else + and t4, t6, ((1< +#include + +/* Only build this code if we are compiling for o32 */ + +#if defined(FFI_MIPS_O32) + +#define callback a0 +#define bytes a2 +#define flags a3 + +#define SIZEOF_FRAME (4 * FFI_SIZEOF_ARG + 2 * FFI_SIZEOF_ARG) +#define A3_OFF (SIZEOF_FRAME + 3 * FFI_SIZEOF_ARG) +#define FP_OFF (SIZEOF_FRAME - 2 * FFI_SIZEOF_ARG) +#define RA_OFF (SIZEOF_FRAME - 1 * FFI_SIZEOF_ARG) + + .abicalls + .text + .align 2 + .globl ffi_call_O32 + .ent ffi_call_O32 +ffi_call_O32: +$LFB0: + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +$LCFI00: + REG_S $fp, FP_OFF($sp) # Save frame pointer +$LCFI01: + REG_S ra, RA_OFF($sp) # Save return address +$LCFI02: + move $fp, $sp + +$LCFI03: + move t9, callback # callback function pointer + REG_S flags, A3_OFF($fp) # flags + + # Allocate at least 4 words in the argstack + LI v0, 4 * FFI_SIZEOF_ARG + blt bytes, v0, sixteen + + ADDU v0, bytes, 7 # make sure it is aligned + and v0, -8 # to an 8 byte boundry + +sixteen: + SUBU $sp, v0 # move the stack pointer to reflect the + # arg space + + ADDU a0, $sp, 4 * FFI_SIZEOF_ARG + + jalr t9 + + REG_L t0, A3_OFF($fp) # load the flags word + SRL t2, t0, 4 # shift our arg info + and t0, ((1<<4)-1) # mask out the return type + + ADDU $sp, 4 * FFI_SIZEOF_ARG # adjust $sp to new args + +#ifndef __mips_soft_float + bnez t0, pass_d # make it quick for int +#endif + REG_L a0, 0*FFI_SIZEOF_ARG($sp) # just go ahead and load the + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # four regs. + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +#ifndef __mips_soft_float +pass_d: + bne t0, FFI_ARGS_D, pass_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a2, 2*FFI_SIZEOF_ARG($sp) # passing a double + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f: + bne t0, FFI_ARGS_F, pass_d_d + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # passing a float + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_d: + bne t0, FFI_ARGS_DD, pass_f_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing two doubles + b call_it + +pass_f_f: + bne t0, FFI_ARGS_FF, pass_d_f + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 1*FFI_SIZEOF_ARG($sp) # passing two floats + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_f: + bne t0, FFI_ARGS_DF, pass_f_d + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f_d: + # assume that the only other combination must be float then double + # bne t0, FFI_ARGS_F_D, call_it + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float +#endif + +call_it: + # Load the static chain pointer + REG_L t7, SIZEOF_FRAME + 6*FFI_SIZEOF_ARG($fp) + + # Load the function pointer + REG_L t9, SIZEOF_FRAME + 5*FFI_SIZEOF_ARG($fp) + + # If the return value pointer is NULL, assume no return value. + REG_L t1, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + beqz t1, noretval + + bne t2, FFI_TYPE_INT, retlonglong + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v0, 0(t0) + b epilogue + +retlonglong: + # Really any 64-bit int, signed or not. + bne t2, FFI_TYPE_UINT64, retfloat + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v1, 4(t0) + REG_S v0, 0(t0) + b epilogue + +retfloat: + bne t2, FFI_TYPE_FLOAT, retdouble + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.s $f0, 0(t0) +#else + REG_S v0, 0(t0) +#endif + b epilogue + +retdouble: + bne t2, FFI_TYPE_DOUBLE, noretval + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.d $f0, 0(t0) +#else + REG_S v1, 4(t0) + REG_S v0, 0(t0) +#endif + b epilogue + +noretval: + jalr t9 + + # Epilogue +epilogue: + move $sp, $fp + REG_L $fp, FP_OFF($sp) # Restore frame pointer + REG_L ra, RA_OFF($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME # Fix stack pointer + j ra + +$LFE0: + .end ffi_call_O32 + + +/* ffi_closure_O32. Expects address of the passed-in ffi_closure + in t4 ($12). Stores any arguments passed in registers onto the + stack, then calls ffi_closure_mips_inner_O32, which + then decodes them. + + Stack layout: + + 3 - a3 save + 2 - a2 save + 1 - a1 save + 0 - a0 save, original sp + -1 - ra save + -2 - fp save + -3 - $16 (s0) save + -4 - cprestore + -5 - return value high (v1) + -6 - return value low (v0) + -7 - f14 (le high, be low) + -8 - f14 (le low, be high) + -9 - f12 (le high, be low) + -10 - f12 (le low, be high) + -11 - Called function a5 save + -12 - Called function a4 save + -13 - Called function a3 save + -14 - Called function a2 save + -15 - Called function a1 save + -16 - Called function a0 save, our sp and fp point here + */ + +#define SIZEOF_FRAME2 (16 * FFI_SIZEOF_ARG) +#define A3_OFF2 (SIZEOF_FRAME2 + 3 * FFI_SIZEOF_ARG) +#define A2_OFF2 (SIZEOF_FRAME2 + 2 * FFI_SIZEOF_ARG) +#define A1_OFF2 (SIZEOF_FRAME2 + 1 * FFI_SIZEOF_ARG) +#define A0_OFF2 (SIZEOF_FRAME2 + 0 * FFI_SIZEOF_ARG) +#define RA_OFF2 (SIZEOF_FRAME2 - 1 * FFI_SIZEOF_ARG) +#define FP_OFF2 (SIZEOF_FRAME2 - 2 * FFI_SIZEOF_ARG) +#define S0_OFF2 (SIZEOF_FRAME2 - 3 * FFI_SIZEOF_ARG) +#define GP_OFF2 (SIZEOF_FRAME2 - 4 * FFI_SIZEOF_ARG) +#define V1_OFF2 (SIZEOF_FRAME2 - 5 * FFI_SIZEOF_ARG) +#define V0_OFF2 (SIZEOF_FRAME2 - 6 * FFI_SIZEOF_ARG) +#define FA_1_1_OFF2 (SIZEOF_FRAME2 - 7 * FFI_SIZEOF_ARG) +#define FA_1_0_OFF2 (SIZEOF_FRAME2 - 8 * FFI_SIZEOF_ARG) +#define FA_0_1_OFF2 (SIZEOF_FRAME2 - 9 * FFI_SIZEOF_ARG) +#define FA_0_0_OFF2 (SIZEOF_FRAME2 - 10 * FFI_SIZEOF_ARG) +#define CALLED_A5_OFF2 (SIZEOF_FRAME2 - 11 * FFI_SIZEOF_ARG) +#define CALLED_A4_OFF2 (SIZEOF_FRAME2 - 12 * FFI_SIZEOF_ARG) + + .text + + .align 2 + .globl ffi_go_closure_O32 + .ent ffi_go_closure_O32 +ffi_go_closure_O32: +$LFB1: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI10: + + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI11: + + move $fp, $sp +$LCFI12: + + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 4($15) # cif + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 4($15) # cif + REG_L a1, 8($15) # fun + move a2, $15 # user_data = go closure + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + + b $do_closure + +$LFE1: + .end ffi_go_closure_O32 + + .align 2 + .globl ffi_closure_O32 + .ent ffi_closure_O32 +ffi_closure_O32: +$LFB2: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI20: + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI21: + move $fp, $sp + +$LCFI22: + # Store all possible argument registers. If there are more than + # four arguments, then they are stored above where we put a3. + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 20($12) # cif pointer follows tramp. + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 20($12) # cif pointer follows tramp. + REG_L a1, 24($12) # fun + REG_L a2, 28($12) # user_data + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + +$do_closure: + la t9, ffi_closure_mips_inner_O32 + # Call ffi_closure_mips_inner_O32 to do the work. + jalr t9 + + # Load the return value into the appropriate register. + move $8, $2 + li $9, FFI_TYPE_VOID + beq $8, $9, closure_done + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp restore if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + li $9, FFI_TYPE_FLOAT + l.s $f0, V0_OFF2($fp) + beq $8, $9, closure_done + + li $9, FFI_TYPE_DOUBLE + l.d $f0, V0_OFF2($fp) + beq $8, $9, closure_done +#endif +1: + REG_L $3, V1_OFF2($fp) + REG_L $2, V0_OFF2($fp) + +closure_done: + # Epilogue + move $sp, $fp + REG_L $16, S0_OFF2($sp) # Restore s0 + REG_L $fp, FP_OFF2($sp) # Restore frame pointer + REG_L ra, RA_OFF2($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME2 + j ra +$LFE2: + .end ffi_closure_O32 + +/* DWARF-2 unwind info. */ + + .section .eh_frame,"a",@progbits +$Lframe0: + .4byte $LECIE0-$LSCIE0 # Length of Common Information Entry +$LSCIE0: + .4byte 0x0 # CIE Identifier Tag + .byte 0x1 # CIE Version + .ascii "zR\0" # CIE Augmentation + .uleb128 0x1 # CIE Code Alignment Factor + .sleb128 4 # CIE Data Alignment Factor + .byte 0x1f # CIE RA Column + .uleb128 0x1 # Augmentation size + .byte 0x00 # FDE Encoding (absptr) + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1d + .uleb128 0x0 + .align 2 +$LECIE0: + +$LSFDE0: + .4byte $LEFDE0-$LASFDE0 # FDE Length +$LASFDE0: + .4byte $LASFDE0-$Lframe0 # FDE CIE offset + .4byte $LFB0 # FDE initial location + .4byte $LFE0-$LFB0 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI00-$LFB0 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 0x18 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI01-$LCFI00 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI02-$LCFI01 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x18 + .align 2 +$LEFDE0: + +$LSFDE1: + .4byte $LEFDE1-$LASFDE1 # FDE Length +$LASFDE1: + .4byte $LASFDE1-$Lframe0 # FDE CIE offset + .4byte $LFB1 # FDE initial location + .4byte $LFE1-$LFB1 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI10-$LFB1 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI11-$LCFI10 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI12-$LCFI11 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE1: + +$LSFDE2: + .4byte $LEFDE2-$LASFDE2 # FDE Length +$LASFDE2: + .4byte $LASFDE2-$Lframe0 # FDE CIE offset + .4byte $LFB2 # FDE initial location + .4byte $LFE2-$LFB2 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI20-$LFB2 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI21-$LCFI20 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI22-$LCFI21 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE2: + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/o32.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/o32.S new file mode 100644 index 0000000..799139b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/o32.S @@ -0,0 +1,504 @@ +/* ----------------------------------------------------------------------- + o32.S - Copyright (c) 1996, 1998, 2005 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Only build this code if we are compiling for o32 */ + +#if defined(FFI_MIPS_O32) + +#define callback a0 +#define bytes a2 +#define flags a3 + +#define SIZEOF_FRAME (4 * FFI_SIZEOF_ARG + 2 * FFI_SIZEOF_ARG) +#define A3_OFF (SIZEOF_FRAME + 3 * FFI_SIZEOF_ARG) +#define FP_OFF (SIZEOF_FRAME - 2 * FFI_SIZEOF_ARG) +#define RA_OFF (SIZEOF_FRAME - 1 * FFI_SIZEOF_ARG) + + .abicalls + .text + .align 2 + .globl ffi_call_O32 + .ent ffi_call_O32 +ffi_call_O32: +$LFB0: + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +$LCFI00: + REG_S $fp, FP_OFF($sp) # Save frame pointer +$LCFI01: + REG_S ra, RA_OFF($sp) # Save return address +$LCFI02: + move $fp, $sp + +$LCFI03: + move t9, callback # callback function pointer + REG_S flags, A3_OFF($fp) # flags + + # Allocate at least 4 words in the argstack + LI v0, 4 * FFI_SIZEOF_ARG + blt bytes, v0, sixteen + + ADDU v0, bytes, 7 # make sure it is aligned + and v0, -8 # to an 8 byte boundry + +sixteen: + SUBU $sp, v0 # move the stack pointer to reflect the + # arg space + + ADDU a0, $sp, 4 * FFI_SIZEOF_ARG + + jalr t9 + + REG_L t0, A3_OFF($fp) # load the flags word + SRL t2, t0, 4 # shift our arg info + and t0, ((1<<4)-1) # mask out the return type + + ADDU $sp, 4 * FFI_SIZEOF_ARG # adjust $sp to new args + +#ifndef __mips_soft_float + bnez t0, pass_d # make it quick for int +#endif + REG_L a0, 0*FFI_SIZEOF_ARG($sp) # just go ahead and load the + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # four regs. + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +#ifndef __mips_soft_float +pass_d: + bne t0, FFI_ARGS_D, pass_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a2, 2*FFI_SIZEOF_ARG($sp) # passing a double + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f: + bne t0, FFI_ARGS_F, pass_d_d + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # passing a float + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_d: + bne t0, FFI_ARGS_DD, pass_f_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing two doubles + b call_it + +pass_f_f: + bne t0, FFI_ARGS_FF, pass_d_f + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 1*FFI_SIZEOF_ARG($sp) # passing two floats + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_f: + bne t0, FFI_ARGS_DF, pass_f_d + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f_d: + # assume that the only other combination must be float then double + # bne t0, FFI_ARGS_F_D, call_it + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float +#endif + +call_it: + # Load the static chain pointer + REG_L t7, SIZEOF_FRAME + 6*FFI_SIZEOF_ARG($fp) + + # Load the function pointer + REG_L t9, SIZEOF_FRAME + 5*FFI_SIZEOF_ARG($fp) + + # If the return value pointer is NULL, assume no return value. + REG_L t1, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + beqz t1, noretval + + bne t2, FFI_TYPE_INT, retlonglong + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v0, 0(t0) + b epilogue + +retlonglong: + # Really any 64-bit int, signed or not. + bne t2, FFI_TYPE_UINT64, retfloat + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v1, 4(t0) + REG_S v0, 0(t0) + b epilogue + +retfloat: + bne t2, FFI_TYPE_FLOAT, retdouble + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.s $f0, 0(t0) +#else + REG_S v0, 0(t0) +#endif + b epilogue + +retdouble: + bne t2, FFI_TYPE_DOUBLE, noretval + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.d $f0, 0(t0) +#else + REG_S v1, 4(t0) + REG_S v0, 0(t0) +#endif + b epilogue + +noretval: + jalr t9 + + # Epilogue +epilogue: + move $sp, $fp + REG_L $fp, FP_OFF($sp) # Restore frame pointer + REG_L ra, RA_OFF($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME # Fix stack pointer + j ra + +$LFE0: + .end ffi_call_O32 + + +/* ffi_closure_O32. Expects address of the passed-in ffi_closure + in t4 ($12). Stores any arguments passed in registers onto the + stack, then calls ffi_closure_mips_inner_O32, which + then decodes them. + + Stack layout: + + 3 - a3 save + 2 - a2 save + 1 - a1 save + 0 - a0 save, original sp + -1 - ra save + -2 - fp save + -3 - $16 (s0) save + -4 - cprestore + -5 - return value high (v1) + -6 - return value low (v0) + -7 - f14 (le high, be low) + -8 - f14 (le low, be high) + -9 - f12 (le high, be low) + -10 - f12 (le low, be high) + -11 - Called function a5 save + -12 - Called function a4 save + -13 - Called function a3 save + -14 - Called function a2 save + -15 - Called function a1 save + -16 - Called function a0 save, our sp and fp point here + */ + +#define SIZEOF_FRAME2 (16 * FFI_SIZEOF_ARG) +#define A3_OFF2 (SIZEOF_FRAME2 + 3 * FFI_SIZEOF_ARG) +#define A2_OFF2 (SIZEOF_FRAME2 + 2 * FFI_SIZEOF_ARG) +#define A1_OFF2 (SIZEOF_FRAME2 + 1 * FFI_SIZEOF_ARG) +#define A0_OFF2 (SIZEOF_FRAME2 + 0 * FFI_SIZEOF_ARG) +#define RA_OFF2 (SIZEOF_FRAME2 - 1 * FFI_SIZEOF_ARG) +#define FP_OFF2 (SIZEOF_FRAME2 - 2 * FFI_SIZEOF_ARG) +#define S0_OFF2 (SIZEOF_FRAME2 - 3 * FFI_SIZEOF_ARG) +#define GP_OFF2 (SIZEOF_FRAME2 - 4 * FFI_SIZEOF_ARG) +#define V1_OFF2 (SIZEOF_FRAME2 - 5 * FFI_SIZEOF_ARG) +#define V0_OFF2 (SIZEOF_FRAME2 - 6 * FFI_SIZEOF_ARG) +#define FA_1_1_OFF2 (SIZEOF_FRAME2 - 7 * FFI_SIZEOF_ARG) +#define FA_1_0_OFF2 (SIZEOF_FRAME2 - 8 * FFI_SIZEOF_ARG) +#define FA_0_1_OFF2 (SIZEOF_FRAME2 - 9 * FFI_SIZEOF_ARG) +#define FA_0_0_OFF2 (SIZEOF_FRAME2 - 10 * FFI_SIZEOF_ARG) +#define CALLED_A5_OFF2 (SIZEOF_FRAME2 - 11 * FFI_SIZEOF_ARG) +#define CALLED_A4_OFF2 (SIZEOF_FRAME2 - 12 * FFI_SIZEOF_ARG) + + .text + + .align 2 + .globl ffi_go_closure_O32 + .ent ffi_go_closure_O32 +ffi_go_closure_O32: +$LFB1: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI10: + + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI11: + + move $fp, $sp +$LCFI12: + + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 4($15) # cif + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 4($15) # cif + REG_L a1, 8($15) # fun + move a2, $15 # user_data = go closure + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + + b $do_closure + +$LFE1: + .end ffi_go_closure_O32 + + .align 2 + .globl ffi_closure_O32 + .ent ffi_closure_O32 +ffi_closure_O32: +$LFB2: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI20: + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI21: + move $fp, $sp + +$LCFI22: + # Store all possible argument registers. If there are more than + # four arguments, then they are stored above where we put a3. + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 20($12) # cif pointer follows tramp. + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 20($12) # cif pointer follows tramp. + REG_L a1, 24($12) # fun + REG_L a2, 28($12) # user_data + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + +$do_closure: + la t9, ffi_closure_mips_inner_O32 + # Call ffi_closure_mips_inner_O32 to do the work. + jalr t9 + + # Load the return value into the appropriate register. + move $8, $2 + li $9, FFI_TYPE_VOID + beq $8, $9, closure_done + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp restore if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + li $9, FFI_TYPE_FLOAT + l.s $f0, V0_OFF2($fp) + beq $8, $9, closure_done + + li $9, FFI_TYPE_DOUBLE + l.d $f0, V0_OFF2($fp) + beq $8, $9, closure_done +#endif +1: + REG_L $3, V1_OFF2($fp) + REG_L $2, V0_OFF2($fp) + +closure_done: + # Epilogue + move $sp, $fp + REG_L $16, S0_OFF2($sp) # Restore s0 + REG_L $fp, FP_OFF2($sp) # Restore frame pointer + REG_L ra, RA_OFF2($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME2 + j ra +$LFE2: + .end ffi_closure_O32 + +/* DWARF-2 unwind info. */ + + .section .eh_frame,"a",@progbits +$Lframe0: + .4byte $LECIE0-$LSCIE0 # Length of Common Information Entry +$LSCIE0: + .4byte 0x0 # CIE Identifier Tag + .byte 0x1 # CIE Version + .ascii "zR\0" # CIE Augmentation + .uleb128 0x1 # CIE Code Alignment Factor + .sleb128 4 # CIE Data Alignment Factor + .byte 0x1f # CIE RA Column + .uleb128 0x1 # Augmentation size + .byte 0x00 # FDE Encoding (absptr) + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1d + .uleb128 0x0 + .align 2 +$LECIE0: + +$LSFDE0: + .4byte $LEFDE0-$LASFDE0 # FDE Length +$LASFDE0: + .4byte $LASFDE0-$Lframe0 # FDE CIE offset + .4byte $LFB0 # FDE initial location + .4byte $LFE0-$LFB0 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI00-$LFB0 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 0x18 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI01-$LCFI00 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI02-$LCFI01 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x18 + .align 2 +$LEFDE0: + +$LSFDE1: + .4byte $LEFDE1-$LASFDE1 # FDE Length +$LASFDE1: + .4byte $LASFDE1-$Lframe0 # FDE CIE offset + .4byte $LFB1 # FDE initial location + .4byte $LFE1-$LFB1 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI10-$LFB1 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI11-$LCFI10 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI12-$LCFI11 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE1: + +$LSFDE2: + .4byte $LEFDE2-$LASFDE2 # FDE Length +$LASFDE2: + .4byte $LASFDE2-$Lframe0 # FDE CIE offset + .4byte $LFB2 # FDE initial location + .4byte $LFE2-$LFB2 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI20-$LFB2 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI21-$LCFI20 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI22-$LCFI21 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE2: + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi 2.S new file mode 100644 index 0000000..10cfb04 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi 2.S @@ -0,0 +1,101 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2012, 2013 Anthony Green + + Moxie Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # $r0 : ffi_prep_args + # $r1 : &ecif + # $r2 : cif->bytes + # $r3 : fig->flags + # $r4 : ecif.rvalue + # $r5 : fn + +ffi_call_EABI: + push $sp, $r6 + push $sp, $r7 + push $sp, $r8 + dec $sp, 24 + + /* Store incoming args on stack. */ + sto.l 0($sp), $r0 /* ffi_prep_args */ + sto.l 4($sp), $r1 /* ecif */ + sto.l 8($sp), $r2 /* bytes */ + sto.l 12($sp), $r3 /* flags */ + sto.l 16($sp), $r4 /* &rvalue */ + sto.l 20($sp), $r5 /* fn */ + + /* Call ffi_prep_args. */ + mov $r6, $r4 /* Save result buffer */ + mov $r7, $r5 /* Save the target fn */ + mov $r8, $r3 /* Save the flags */ + sub $sp, $r2 /* Allocate stack space */ + mov $r0, $sp /* We can stomp over $r0 */ + /* $r1 is already set up */ + jsra ffi_prep_args + + /* Load register arguments. */ + ldo.l $r0, 0($sp) + ldo.l $r1, 4($sp) + ldo.l $r2, 8($sp) + ldo.l $r3, 12($sp) + ldo.l $r4, 16($sp) + ldo.l $r5, 20($sp) + + /* Call the target function. */ + jsr $r7 + + ldi.l $r7, 0xffffffff + cmp $r8, $r7 + beq retstruct + + ldi.l $r7, 4 + cmp $r8, $r7 + bgt ret2reg + + st.l ($r6), $r0 + jmpa retdone + +ret2reg: + st.l ($r6), $r0 + sto.l 4($r6), $r1 + +retstruct: +retdone: + /* Return. */ + ldo.l $r6, -4($fp) + ldo.l $r7, -8($fp) + ldo.l $r8, -12($fp) + ret + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S new file mode 100644 index 0000000..10cfb04 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S @@ -0,0 +1,101 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2012, 2013 Anthony Green + + Moxie Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # $r0 : ffi_prep_args + # $r1 : &ecif + # $r2 : cif->bytes + # $r3 : fig->flags + # $r4 : ecif.rvalue + # $r5 : fn + +ffi_call_EABI: + push $sp, $r6 + push $sp, $r7 + push $sp, $r8 + dec $sp, 24 + + /* Store incoming args on stack. */ + sto.l 0($sp), $r0 /* ffi_prep_args */ + sto.l 4($sp), $r1 /* ecif */ + sto.l 8($sp), $r2 /* bytes */ + sto.l 12($sp), $r3 /* flags */ + sto.l 16($sp), $r4 /* &rvalue */ + sto.l 20($sp), $r5 /* fn */ + + /* Call ffi_prep_args. */ + mov $r6, $r4 /* Save result buffer */ + mov $r7, $r5 /* Save the target fn */ + mov $r8, $r3 /* Save the flags */ + sub $sp, $r2 /* Allocate stack space */ + mov $r0, $sp /* We can stomp over $r0 */ + /* $r1 is already set up */ + jsra ffi_prep_args + + /* Load register arguments. */ + ldo.l $r0, 0($sp) + ldo.l $r1, 4($sp) + ldo.l $r2, 8($sp) + ldo.l $r3, 12($sp) + ldo.l $r4, 16($sp) + ldo.l $r5, 20($sp) + + /* Call the target function. */ + jsr $r7 + + ldi.l $r7, 0xffffffff + cmp $r8, $r7 + beq retstruct + + ldi.l $r7, 4 + cmp $r8, $r7 + bgt ret2reg + + st.l ($r6), $r0 + jmpa retdone + +ret2reg: + st.l ($r6), $r0 + sto.l 4($r6), $r1 + +retstruct: +retdone: + /* Return. */ + ldo.l $r6, -4($fp) + ldo.l $r7, -8($fp) + ldo.l $r8, -12($fp) + ret + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi 2.c new file mode 100644 index 0000000..16d2bb3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi 2.c @@ -0,0 +1,285 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2012, 2013, 2018 Anthony Green + + Moxie Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in $r12. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("$r12"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("$fp"); + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) arg1; + + /* 6 words reserved for register args + 3 words from jsr */ + char *stack_args = frame_pointer + 9*4; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + char *register_args_ptr = (char *) register_args; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ptr += 4; + register_args_ptr = (char *)®ister_args[1]; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + if (ptr == (char *) ®ister_args[5]) + { + /* The value is split across two locations */ + unsigned *ip = alloca(8); + avalue[i] = ip; + ip[0] = *(unsigned *) ptr; + ip[1] = *(unsigned *) stack_args; + } + else + { + avalue[i] = ptr; + } + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == (char *) ®ister_args[6]) + ptr = stack_args; + else if (ptr == (char *) ®ister_args[7]) + ptr = stack_args + 4; + } + + /* Invoke the closure. */ + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + asm ("mov $r12, %0\n ld.l $r0, ($r12)\n ldo.l $r1, 4($r12)" : : "r" (&rvalue)); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; + + if (cif->abi != FFI_EABI) + return FFI_BAD_ABI; + + fn = (unsigned long) ffi_closure_eabi; + + tramp[0] = 0x01e0; /* ldi.l $r12, .... */ + tramp[1] = cls >> 16; + tramp[2] = cls & 0xffff; + tramp[3] = 0x1a00; /* jmpa .... */ + tramp[4] = fn >> 16; + tramp[5] = fn & 0xffff; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c new file mode 100644 index 0000000..16d2bb3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c @@ -0,0 +1,285 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2012, 2013, 2018 Anthony Green + + Moxie Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in $r12. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("$r12"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("$fp"); + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) arg1; + + /* 6 words reserved for register args + 3 words from jsr */ + char *stack_args = frame_pointer + 9*4; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + char *register_args_ptr = (char *) register_args; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ptr += 4; + register_args_ptr = (char *)®ister_args[1]; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + if (ptr == (char *) ®ister_args[5]) + { + /* The value is split across two locations */ + unsigned *ip = alloca(8); + avalue[i] = ip; + ip[0] = *(unsigned *) ptr; + ip[1] = *(unsigned *) stack_args; + } + else + { + avalue[i] = ptr; + } + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == (char *) ®ister_args[6]) + ptr = stack_args; + else if (ptr == (char *) ®ister_args[7]) + ptr = stack_args + 4; + } + + /* Invoke the closure. */ + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + asm ("mov $r12, %0\n ld.l $r0, ($r12)\n ldo.l $r1, 4($r12)" : : "r" (&rvalue)); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; + + if (cif->abi != FFI_EABI) + return FFI_BAD_ABI; + + fn = (unsigned long) ffi_closure_eabi; + + tramp[0] = 0x01e0; /* ldi.l $r12, .... */ + tramp[1] = cls >> 16; + tramp[2] = cls & 0xffff; + tramp[3] = 0x1a00; /* jmpa .... */ + tramp[4] = fn >> 16; + tramp[5] = fn & 0xffff; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget 2.h new file mode 100644 index 0000000..623e3ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget 2.h @@ -0,0 +1,52 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2013 Anthony Green + Target configuration macros for Moxie + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_DEFAULT_ABI = FFI_EABI, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +/* Trampolines are 12-bytes long. See ffi_prep_closure_loc. */ +#define FFI_TRAMPOLINE_SIZE (12) + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h new file mode 100644 index 0000000..623e3ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h @@ -0,0 +1,52 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2013 Anthony Green + Target configuration macros for Moxie + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_DEFAULT_ABI = FFI_EABI, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +/* Trampolines are 12-bytes long. See ffi_prep_closure_loc. */ +#define FFI_TRAMPOLINE_SIZE (12) + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi 2.c new file mode 100644 index 0000000..721080d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi 2.c @@ -0,0 +1,304 @@ +/* libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#include +#include + +#include + +/* The Nios II Processor Reference Handbook defines the procedure call + ABI as follows. + + Arguments are passed as if a structure containing the types of + the arguments were constructed. The first 16 bytes are passed in r4 + through r7, the remainder on the stack. The first 16 bytes of a function + taking variable arguments are passed in r4-r7 in the same way. + + Return values of types up to 8 bytes are returned in r2 and r3. For + return values greater than 8 bytes, the caller must allocate memory for + the result and pass the address as if it were argument 0. + + While this isn't specified explicitly in the ABI documentation, GCC + promotes integral arguments smaller than int size to 32 bits. + + Also of note, the ABI specifies that all structure objects are + aligned to 32 bits even if all their fields have a smaller natural + alignment. See FFI_AGGREGATE_ALIGNMENT. */ + + +/* Declare the assembly language hooks. */ + +extern UINT64 ffi_call_sysv (void (*) (char *, extended_cif *), + extended_cif *, + unsigned, + void (*fn) (void)); +extern void ffi_closure_sysv (void); + +/* Perform machine-dependent cif processing. */ + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* We always want at least 16 bytes in the parameter block since it + simplifies the low-level call function. Also round the parameter + block size up to a multiple of 4 bytes to preserve + 32-bit alignment of the stack pointer. */ + if (cif->bytes < 16) + cif->bytes = 16; + else + cif->bytes = (cif->bytes + 3) & ~3; + + return FFI_OK; +} + + +/* ffi_prep_args is called by the assembly routine to transfer arguments + to the stack using the pointers in the ecif array. + Note that the stack buffer is big enough to fit all the arguments, + but the first 16 bytes will be copied to registers for the actual + call. */ + +void ffi_prep_args (char *stack, extended_cif *ecif) +{ + char *argp = stack; + unsigned int i; + + /* The implicit return value pointer is passed as if it were a hidden + first argument. */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && ecif->cif->rtype->size > 8) + { + (*(void **) argp) = ecif->rvalue; + argp += 4; + } + + for (i = 0; i < ecif->cif->nargs; i++) + { + void *avalue = ecif->avalue[i]; + ffi_type *atype = ecif->cif->arg_types[i]; + size_t size = atype->size; + size_t alignment = atype->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Copy the argument, promoting integral types smaller than a + word to word size. */ + if (size < sizeof (int)) + { + size = sizeof (int); + switch (atype->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) avalue; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) avalue; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) avalue; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) avalue; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, avalue, atype->size); + break; + + default: + FFI_ASSERT(0); + } + } + else if (size == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) avalue; + else + memcpy (argp, avalue, size); + argp += size; + } +} + + +/* Call FN using the prepared CIF. RVALUE points to space allocated by + the caller for the return value, and AVALUE is an array of argument + pointers. */ + +void ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + + extended_cif ecif; + UINT64 result; + + /* If bigret is true, this is the case where a return value of larger + than 8 bytes is handled by being passed by reference as an implicit + argument. */ + int bigret = (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8); + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Allocate space for return value if this is the pass-by-reference case + and the caller did not provide a buffer. */ + if (rvalue == NULL && bigret) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + result = ffi_call_sysv (ffi_prep_args, &ecif, cif->bytes, fn); + + /* Now result contains the 64 bit contents returned from fn in + r2 and r3. Copy the value of the appropriate size to the user-provided + rvalue buffer. */ + if (rvalue && !bigret) + switch (cif->rtype->size) + { + case 1: + *(UINT8 *)rvalue = (UINT8) result; + break; + case 2: + *(UINT16 *)rvalue = (UINT16) result; + break; + case 4: + *(UINT32 *)rvalue = (UINT32) result; + break; + case 8: + *(UINT64 *)rvalue = (UINT64) result; + break; + default: + memcpy (rvalue, (void *)&result, cif->rtype->size); + break; + } +} + +/* This function is invoked from the closure trampoline to invoke + CLOSURE with argument block ARGS. Parse ARGS according to + CLOSURE->cfi and invoke CLOSURE->fun. */ + +static UINT64 +ffi_closure_helper (unsigned char *args, + ffi_closure *closure) +{ + ffi_cif *cif = closure->cif; + unsigned char *argp = args; + void **parsed_args = alloca (cif->nargs * sizeof (void *)); + UINT64 result; + void *retptr; + unsigned int i; + + /* First figure out what to do about the return type. If this is the + big-structure-return case, the first arg is the hidden return buffer + allocated by the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + { + retptr = *((void **) argp); + argp += 4; + } + else + retptr = (void *) &result; + + /* Fill in the array of argument pointers. */ + for (i = 0; i < cif->nargs; i++) + { + size_t size = cif->arg_types[i]->size; + size_t alignment = cif->arg_types[i]->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Arguments smaller than an int are promoted to int. */ + if (size < sizeof (int)) + size = sizeof (int); + + /* Store the pointer. */ + parsed_args[i] = argp; + argp += size; + } + + /* Call the user-supplied function. */ + (closure->fun) (cif, retptr, parsed_args, closure->user_data); + return result; +} + + +/* Initialize CLOSURE with a trampoline to call FUN with + CIF and USER_DATA. */ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun) (ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + int i; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + /* The trampoline looks like: + movhi r8, %hi(ffi_closure_sysv) + ori r8, r8, %lo(ffi_closure_sysv) + movhi r9, %hi(ffi_closure_helper) + ori r0, r9, %lo(ffi_closure_helper) + movhi r10, %hi(closure) + ori r10, r10, %lo(closure) + jmp r8 + and then ffi_closure_sysv retrieves the closure pointer out of r10 + in addition to the arguments passed in the normal way for the call, + and invokes ffi_closure_helper. We encode the pointer to + ffi_closure_helper in the trampoline because making a PIC call + to it in ffi_closure_sysv would be messy (it would have to indirect + through the GOT). */ + +#define HI(x) ((((unsigned int) (x)) >> 16) & 0xffff) +#define LO(x) (((unsigned int) (x)) & 0xffff) + tramp[0] = (0 << 27) | (8 << 22) | (HI (ffi_closure_sysv) << 6) | 0x34; + tramp[1] = (8 << 27) | (8 << 22) | (LO (ffi_closure_sysv) << 6) | 0x14; + tramp[2] = (0 << 27) | (9 << 22) | (HI (ffi_closure_helper) << 6) | 0x34; + tramp[3] = (9 << 27) | (9 << 22) | (LO (ffi_closure_helper) << 6) | 0x14; + tramp[4] = (0 << 27) | (10 << 22) | (HI (closure) << 6) | 0x34; + tramp[5] = (10 << 27) | (10 << 22) | (LO (closure) << 6) | 0x14; + tramp[6] = (8 << 27) | (0x0d << 11) | 0x3a; +#undef HI +#undef LO + + /* Flush the caches. + See Example 9-4 in the Nios II Software Developer's Handbook. */ + for (i = 0; i < 7; i++) + asm volatile ("flushd 0(%0); flushi %0" :: "r"(tramp + i) : "memory"); + asm volatile ("flushp" ::: "memory"); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c new file mode 100644 index 0000000..721080d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c @@ -0,0 +1,304 @@ +/* libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#include +#include + +#include + +/* The Nios II Processor Reference Handbook defines the procedure call + ABI as follows. + + Arguments are passed as if a structure containing the types of + the arguments were constructed. The first 16 bytes are passed in r4 + through r7, the remainder on the stack. The first 16 bytes of a function + taking variable arguments are passed in r4-r7 in the same way. + + Return values of types up to 8 bytes are returned in r2 and r3. For + return values greater than 8 bytes, the caller must allocate memory for + the result and pass the address as if it were argument 0. + + While this isn't specified explicitly in the ABI documentation, GCC + promotes integral arguments smaller than int size to 32 bits. + + Also of note, the ABI specifies that all structure objects are + aligned to 32 bits even if all their fields have a smaller natural + alignment. See FFI_AGGREGATE_ALIGNMENT. */ + + +/* Declare the assembly language hooks. */ + +extern UINT64 ffi_call_sysv (void (*) (char *, extended_cif *), + extended_cif *, + unsigned, + void (*fn) (void)); +extern void ffi_closure_sysv (void); + +/* Perform machine-dependent cif processing. */ + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* We always want at least 16 bytes in the parameter block since it + simplifies the low-level call function. Also round the parameter + block size up to a multiple of 4 bytes to preserve + 32-bit alignment of the stack pointer. */ + if (cif->bytes < 16) + cif->bytes = 16; + else + cif->bytes = (cif->bytes + 3) & ~3; + + return FFI_OK; +} + + +/* ffi_prep_args is called by the assembly routine to transfer arguments + to the stack using the pointers in the ecif array. + Note that the stack buffer is big enough to fit all the arguments, + but the first 16 bytes will be copied to registers for the actual + call. */ + +void ffi_prep_args (char *stack, extended_cif *ecif) +{ + char *argp = stack; + unsigned int i; + + /* The implicit return value pointer is passed as if it were a hidden + first argument. */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && ecif->cif->rtype->size > 8) + { + (*(void **) argp) = ecif->rvalue; + argp += 4; + } + + for (i = 0; i < ecif->cif->nargs; i++) + { + void *avalue = ecif->avalue[i]; + ffi_type *atype = ecif->cif->arg_types[i]; + size_t size = atype->size; + size_t alignment = atype->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Copy the argument, promoting integral types smaller than a + word to word size. */ + if (size < sizeof (int)) + { + size = sizeof (int); + switch (atype->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) avalue; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) avalue; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) avalue; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) avalue; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, avalue, atype->size); + break; + + default: + FFI_ASSERT(0); + } + } + else if (size == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) avalue; + else + memcpy (argp, avalue, size); + argp += size; + } +} + + +/* Call FN using the prepared CIF. RVALUE points to space allocated by + the caller for the return value, and AVALUE is an array of argument + pointers. */ + +void ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + + extended_cif ecif; + UINT64 result; + + /* If bigret is true, this is the case where a return value of larger + than 8 bytes is handled by being passed by reference as an implicit + argument. */ + int bigret = (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8); + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Allocate space for return value if this is the pass-by-reference case + and the caller did not provide a buffer. */ + if (rvalue == NULL && bigret) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + result = ffi_call_sysv (ffi_prep_args, &ecif, cif->bytes, fn); + + /* Now result contains the 64 bit contents returned from fn in + r2 and r3. Copy the value of the appropriate size to the user-provided + rvalue buffer. */ + if (rvalue && !bigret) + switch (cif->rtype->size) + { + case 1: + *(UINT8 *)rvalue = (UINT8) result; + break; + case 2: + *(UINT16 *)rvalue = (UINT16) result; + break; + case 4: + *(UINT32 *)rvalue = (UINT32) result; + break; + case 8: + *(UINT64 *)rvalue = (UINT64) result; + break; + default: + memcpy (rvalue, (void *)&result, cif->rtype->size); + break; + } +} + +/* This function is invoked from the closure trampoline to invoke + CLOSURE with argument block ARGS. Parse ARGS according to + CLOSURE->cfi and invoke CLOSURE->fun. */ + +static UINT64 +ffi_closure_helper (unsigned char *args, + ffi_closure *closure) +{ + ffi_cif *cif = closure->cif; + unsigned char *argp = args; + void **parsed_args = alloca (cif->nargs * sizeof (void *)); + UINT64 result; + void *retptr; + unsigned int i; + + /* First figure out what to do about the return type. If this is the + big-structure-return case, the first arg is the hidden return buffer + allocated by the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + { + retptr = *((void **) argp); + argp += 4; + } + else + retptr = (void *) &result; + + /* Fill in the array of argument pointers. */ + for (i = 0; i < cif->nargs; i++) + { + size_t size = cif->arg_types[i]->size; + size_t alignment = cif->arg_types[i]->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Arguments smaller than an int are promoted to int. */ + if (size < sizeof (int)) + size = sizeof (int); + + /* Store the pointer. */ + parsed_args[i] = argp; + argp += size; + } + + /* Call the user-supplied function. */ + (closure->fun) (cif, retptr, parsed_args, closure->user_data); + return result; +} + + +/* Initialize CLOSURE with a trampoline to call FUN with + CIF and USER_DATA. */ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun) (ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + int i; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + /* The trampoline looks like: + movhi r8, %hi(ffi_closure_sysv) + ori r8, r8, %lo(ffi_closure_sysv) + movhi r9, %hi(ffi_closure_helper) + ori r0, r9, %lo(ffi_closure_helper) + movhi r10, %hi(closure) + ori r10, r10, %lo(closure) + jmp r8 + and then ffi_closure_sysv retrieves the closure pointer out of r10 + in addition to the arguments passed in the normal way for the call, + and invokes ffi_closure_helper. We encode the pointer to + ffi_closure_helper in the trampoline because making a PIC call + to it in ffi_closure_sysv would be messy (it would have to indirect + through the GOT). */ + +#define HI(x) ((((unsigned int) (x)) >> 16) & 0xffff) +#define LO(x) (((unsigned int) (x)) & 0xffff) + tramp[0] = (0 << 27) | (8 << 22) | (HI (ffi_closure_sysv) << 6) | 0x34; + tramp[1] = (8 << 27) | (8 << 22) | (LO (ffi_closure_sysv) << 6) | 0x14; + tramp[2] = (0 << 27) | (9 << 22) | (HI (ffi_closure_helper) << 6) | 0x34; + tramp[3] = (9 << 27) | (9 << 22) | (LO (ffi_closure_helper) << 6) | 0x14; + tramp[4] = (0 << 27) | (10 << 22) | (HI (closure) << 6) | 0x34; + tramp[5] = (10 << 27) | (10 << 22) | (LO (closure) << 6) | 0x14; + tramp[6] = (8 << 27) | (0x0d << 11) | 0x3a; +#undef HI +#undef LO + + /* Flush the caches. + See Example 9-4 in the Nios II Software Developer's Handbook. */ + for (i = 0; i < 7; i++) + asm volatile ("flushd 0(%0); flushi %0" :: "r"(tramp + i) : "memory"); + asm volatile ("flushp" ::: "memory"); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget 2.h new file mode 100644 index 0000000..134d118 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget 2.h @@ -0,0 +1,52 @@ +/* libffi target includes for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Structures have a 4-byte alignment even if all the fields have lesser + alignment requirements. */ +#define FFI_AGGREGATE_ALIGNMENT 4 + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 28 /* 7 instructions */ +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h new file mode 100644 index 0000000..134d118 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h @@ -0,0 +1,52 @@ +/* libffi target includes for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Structures have a 4-byte alignment even if all the fields have lesser + alignment requirements. */ +#define FFI_AGGREGATE_ALIGNMENT 4 + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 28 /* 7 instructions */ +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv 2.S new file mode 100644 index 0000000..75f442b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv 2.S @@ -0,0 +1,136 @@ +/* Low-level libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +/* This function is declared on the C side as + + extern UINT64 ffi_call_sysv (void (*arghook) (char *, extended_cif *), + extended_cif *ecif, + unsigned nbytes, + void (*fn) (void)); + + On input, the arguments appear as + r4 = arghook + r5 = ecif + r6 = nbytes + r7 = fn +*/ + + .section .text + .align 2 + .global ffi_call_sysv + .type ffi_call_sysv, @function + +ffi_call_sysv: + .cfi_startproc + + /* Create the stack frame, saving r16 so we can use it locally. */ + addi sp, sp, -12 + .cfi_def_cfa_offset 12 + stw ra, 8(sp) + stw fp, 4(sp) + stw r16, 0(sp) + .cfi_offset 31, -4 + .cfi_offset 28, -8 + .cfi_offset 16, -12 + mov fp, sp + .cfi_def_cfa_register 28 + mov r16, r7 + + /* Adjust the stack pointer to create the argument buffer + nbytes long. */ + sub sp, sp, r6 + + /* Call the arghook function. */ + mov r2, r4 /* fn */ + mov r4, sp /* argbuffer */ + callr r2 /* r5 already contains ecif */ + + /* Pop off the first 16 bytes of the argument buffer on the stack, + transferring the contents to the argument registers. */ + ldw r4, 0(sp) + ldw r5, 4(sp) + ldw r6, 8(sp) + ldw r7, 12(sp) + addi sp, sp, 16 + + /* Call the user function, which leaves its result in r2 and r3. */ + callr r16 + + /* Pop off the stack frame. */ + mov sp, fp + ldw ra, 8(sp) + ldw fp, 4(sp) + ldw r16, 0(sp) + addi sp, sp, 12 + ret + .cfi_endproc + .size ffi_call_sysv, .-ffi_call_sysv + + +/* Closure trampolines jump here after putting the C helper address + in r9 and the closure pointer in r10. The user-supplied arguments + to the closure are in the normal places, in r4-r7 and on the + stack. Push the register arguments on the stack too and then call the + C helper function to deal with them. */ + + .section .text + .align 2 + .global ffi_closure_sysv + .type ffi_closure_sysv, @function + +ffi_closure_sysv: + .cfi_startproc + + /* Create the stack frame, pushing the register args on the stack + just below the stack args. This is the same trick illustrated + in Figure 7-3 in the Nios II Processor Reference Handbook, used + for variable arguments and structures passed by value. */ + addi sp, sp, -20 + .cfi_def_cfa_offset 20 + stw ra, 0(sp) + .cfi_offset 31, -20 + stw r4, 4(sp) + .cfi_offset 4, -16 + stw r5, 8(sp) + .cfi_offset 5, -12 + stw r6, 12(sp) + .cfi_offset 6, -8 + stw r7, 16(sp) + .cfi_offset 7, -4 + + /* Call the helper. + r4 = pointer to arguments on stack + r5 = closure pointer (loaded in r10 by the trampoline) + r9 = address of helper function (loaded by trampoline) */ + addi r4, sp, 4 + mov r5, r10 + callr r9 + + /* Pop the stack and return. */ + ldw ra, 0(sp) + addi sp, sp, 20 + .cfi_def_cfa_offset -20 + ret + .cfi_endproc + .size ffi_closure_sysv, .-ffi_closure_sysv + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S new file mode 100644 index 0000000..75f442b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S @@ -0,0 +1,136 @@ +/* Low-level libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +/* This function is declared on the C side as + + extern UINT64 ffi_call_sysv (void (*arghook) (char *, extended_cif *), + extended_cif *ecif, + unsigned nbytes, + void (*fn) (void)); + + On input, the arguments appear as + r4 = arghook + r5 = ecif + r6 = nbytes + r7 = fn +*/ + + .section .text + .align 2 + .global ffi_call_sysv + .type ffi_call_sysv, @function + +ffi_call_sysv: + .cfi_startproc + + /* Create the stack frame, saving r16 so we can use it locally. */ + addi sp, sp, -12 + .cfi_def_cfa_offset 12 + stw ra, 8(sp) + stw fp, 4(sp) + stw r16, 0(sp) + .cfi_offset 31, -4 + .cfi_offset 28, -8 + .cfi_offset 16, -12 + mov fp, sp + .cfi_def_cfa_register 28 + mov r16, r7 + + /* Adjust the stack pointer to create the argument buffer + nbytes long. */ + sub sp, sp, r6 + + /* Call the arghook function. */ + mov r2, r4 /* fn */ + mov r4, sp /* argbuffer */ + callr r2 /* r5 already contains ecif */ + + /* Pop off the first 16 bytes of the argument buffer on the stack, + transferring the contents to the argument registers. */ + ldw r4, 0(sp) + ldw r5, 4(sp) + ldw r6, 8(sp) + ldw r7, 12(sp) + addi sp, sp, 16 + + /* Call the user function, which leaves its result in r2 and r3. */ + callr r16 + + /* Pop off the stack frame. */ + mov sp, fp + ldw ra, 8(sp) + ldw fp, 4(sp) + ldw r16, 0(sp) + addi sp, sp, 12 + ret + .cfi_endproc + .size ffi_call_sysv, .-ffi_call_sysv + + +/* Closure trampolines jump here after putting the C helper address + in r9 and the closure pointer in r10. The user-supplied arguments + to the closure are in the normal places, in r4-r7 and on the + stack. Push the register arguments on the stack too and then call the + C helper function to deal with them. */ + + .section .text + .align 2 + .global ffi_closure_sysv + .type ffi_closure_sysv, @function + +ffi_closure_sysv: + .cfi_startproc + + /* Create the stack frame, pushing the register args on the stack + just below the stack args. This is the same trick illustrated + in Figure 7-3 in the Nios II Processor Reference Handbook, used + for variable arguments and structures passed by value. */ + addi sp, sp, -20 + .cfi_def_cfa_offset 20 + stw ra, 0(sp) + .cfi_offset 31, -20 + stw r4, 4(sp) + .cfi_offset 4, -16 + stw r5, 8(sp) + .cfi_offset 5, -12 + stw r6, 12(sp) + .cfi_offset 6, -8 + stw r7, 16(sp) + .cfi_offset 7, -4 + + /* Call the helper. + r4 = pointer to arguments on stack + r5 = closure pointer (loaded in r10 by the trampoline) + r9 = address of helper function (loaded by trampoline) */ + addi r4, sp, 4 + mov r5, r10 + callr r9 + + /* Pop the stack and return. */ + ldw ra, 0(sp) + addi sp, sp, 20 + .cfi_def_cfa_offset -20 + ret + .cfi_endproc + .size ffi_closure_sysv, .-ffi_closure_sysv + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi 2.c new file mode 100644 index 0000000..2bad938 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi 2.c @@ -0,0 +1,328 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include "ffi_common.h" + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void* ffi_prep_args(char *stack, extended_cif *ecif) +{ + char *stacktemp = stack; + int i, s; + ffi_type **arg; + int count = 0; + int nfixedargs; + + nfixedargs = ecif->cif->nfixedargs; + arg = ecif->cif->arg_types; + void **argv = ecif->avalue; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) stack = ecif->rvalue; + stack += 4; + count = 4; + } + for(i=0; icif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + count = 24; + stack = stacktemp + 24; + } + nfixedargs--; + + s = 4; + switch((*arg)->type) + { + case FFI_TYPE_STRUCT: + *(void **)stack = *argv; + break; + + case FFI_TYPE_SINT8: + *(signed int *) stack = (signed int)*(SINT8 *)(* argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) stack = (unsigned int)*(UINT8 *)(* argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) stack = (signed int)*(SINT16 *)(* argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) stack = (unsigned int)*(UINT16 *)(* argv); + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + *(int *)stack = *(int*)(*argv); + break; + + default: /* 8 byte types */ + if (count == 20) /* never split arguments */ + { + stack += 4; + count += 4; + } + s = (*arg)->size; + memcpy(stack, *argv, s); + break; + } + + stack += s; + count += s; + argv++; + arg++; + } + return stacktemp + ((count>24)?24:0); +} + +extern void ffi_call_SYSV(unsigned, + extended_cif *, + void *(*)(int *, extended_cif *), + unsigned *, + void (*fn)(void), + unsigned); + + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + int i; + int size; + ffi_type **arg; + + /* Calculate size to allocate on stack */ + + for(i = 0, arg = cif->arg_types, size=0; i < cif->nargs; i++, arg++) + { + if ((*arg)->type == FFI_TYPE_STRUCT) + size += 4; + else + if ((*arg)->size <= 4) + size += 4; + else + size += 8; + } + + /* for variadic functions more space is needed on the stack */ + if (cif->nargs != cif->nfixedargs) + size += 24; + + if (cif->rtype->type == FFI_TYPE_STRUCT) + size += 4; + + + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(size, &ecif, ffi_prep_args, rvalue, fn, cif->flags); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +void ffi_closure_SYSV(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, unsigned long r8) +{ + register int *sp __asm__ ("r17"); + register int *r13 __asm__ ("r13"); + + ffi_closure* closure = (ffi_closure*) r13; + char *stack_args = sp; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { r3, r4, r5, r6, r7, r8 }; + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) r3; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int count = 0; + int nfixedargs = cif->nfixedargs; + int i; + + /* preserve struct type return pointer passing */ + + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ptr += 4; + count = 4; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + ptr = stack_args; + count = 24; + } + nfixedargs--; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + + default: + /* 8-byte values */ + + /* arguments are never splitted */ + if (ptr == ®ister_args[5]) + ptr = stack_args; + avalue[i] = ptr; + ptr += 4; + count += 4; + break; + } + ptr += 4; + count += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + + if (count == 24) + ptr = stack_args; + } + + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } else + { + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + if (cif->rtype) + asm ("l.ori r12, %0, 0x0\n l.lwz r11, 0(r12)\n l.lwz r12, 4(r12)" : : "r" (&rvalue)); + } +} + + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) closure->tramp; + unsigned long fn = (unsigned long) ffi_closure_SYSV; + unsigned long cls = (unsigned long) codeloc; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + /* write pointers to temporary registers */ + tramp[0] = (0x6 << 10) | (13 << 5); /* l.movhi r13, ... */ + tramp[1] = cls >> 16; + tramp[2] = (0x2a << 10) | (13 << 5) | 13; /* l.ori r13, r13, ... */ + tramp[3] = cls & 0xFFFF; + + tramp[4] = (0x6 << 10) | (15 << 5); /* l.movhi r15, ... */ + tramp[5] = fn >> 16; + tramp[6] = (0x2a << 10) | (15 << 5) | 15; /* l.ori r15, r15 ... */ + tramp[7] = fn & 0xFFFF; + + tramp[8] = (0x11 << 10); /* l.jr r15 */ + tramp[9] = 15 << 11; + + tramp[10] = (0x2a << 10) | (17 << 5) | 1; /* l.ori r17, r1, ... */ + tramp[11] = 0x0; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + cif->flags = 0; + + /* structures are returned as pointers */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = FFI_TYPE_STRUCT; + else + if (cif->rtype->size > 4) + cif->flags = FFI_TYPE_UINT64; + + cif->nfixedargs = cif->nargs; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + ffi_status status; + + status = ffi_prep_cif_machdep (cif); + cif->nfixedargs = nfixedargs; + return status; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c new file mode 100644 index 0000000..2bad938 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c @@ -0,0 +1,328 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include "ffi_common.h" + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void* ffi_prep_args(char *stack, extended_cif *ecif) +{ + char *stacktemp = stack; + int i, s; + ffi_type **arg; + int count = 0; + int nfixedargs; + + nfixedargs = ecif->cif->nfixedargs; + arg = ecif->cif->arg_types; + void **argv = ecif->avalue; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) stack = ecif->rvalue; + stack += 4; + count = 4; + } + for(i=0; icif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + count = 24; + stack = stacktemp + 24; + } + nfixedargs--; + + s = 4; + switch((*arg)->type) + { + case FFI_TYPE_STRUCT: + *(void **)stack = *argv; + break; + + case FFI_TYPE_SINT8: + *(signed int *) stack = (signed int)*(SINT8 *)(* argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) stack = (unsigned int)*(UINT8 *)(* argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) stack = (signed int)*(SINT16 *)(* argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) stack = (unsigned int)*(UINT16 *)(* argv); + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + *(int *)stack = *(int*)(*argv); + break; + + default: /* 8 byte types */ + if (count == 20) /* never split arguments */ + { + stack += 4; + count += 4; + } + s = (*arg)->size; + memcpy(stack, *argv, s); + break; + } + + stack += s; + count += s; + argv++; + arg++; + } + return stacktemp + ((count>24)?24:0); +} + +extern void ffi_call_SYSV(unsigned, + extended_cif *, + void *(*)(int *, extended_cif *), + unsigned *, + void (*fn)(void), + unsigned); + + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + int i; + int size; + ffi_type **arg; + + /* Calculate size to allocate on stack */ + + for(i = 0, arg = cif->arg_types, size=0; i < cif->nargs; i++, arg++) + { + if ((*arg)->type == FFI_TYPE_STRUCT) + size += 4; + else + if ((*arg)->size <= 4) + size += 4; + else + size += 8; + } + + /* for variadic functions more space is needed on the stack */ + if (cif->nargs != cif->nfixedargs) + size += 24; + + if (cif->rtype->type == FFI_TYPE_STRUCT) + size += 4; + + + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(size, &ecif, ffi_prep_args, rvalue, fn, cif->flags); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +void ffi_closure_SYSV(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, unsigned long r8) +{ + register int *sp __asm__ ("r17"); + register int *r13 __asm__ ("r13"); + + ffi_closure* closure = (ffi_closure*) r13; + char *stack_args = sp; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { r3, r4, r5, r6, r7, r8 }; + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) r3; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int count = 0; + int nfixedargs = cif->nfixedargs; + int i; + + /* preserve struct type return pointer passing */ + + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ptr += 4; + count = 4; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + ptr = stack_args; + count = 24; + } + nfixedargs--; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + + default: + /* 8-byte values */ + + /* arguments are never splitted */ + if (ptr == ®ister_args[5]) + ptr = stack_args; + avalue[i] = ptr; + ptr += 4; + count += 4; + break; + } + ptr += 4; + count += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + + if (count == 24) + ptr = stack_args; + } + + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } else + { + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + if (cif->rtype) + asm ("l.ori r12, %0, 0x0\n l.lwz r11, 0(r12)\n l.lwz r12, 4(r12)" : : "r" (&rvalue)); + } +} + + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) closure->tramp; + unsigned long fn = (unsigned long) ffi_closure_SYSV; + unsigned long cls = (unsigned long) codeloc; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + /* write pointers to temporary registers */ + tramp[0] = (0x6 << 10) | (13 << 5); /* l.movhi r13, ... */ + tramp[1] = cls >> 16; + tramp[2] = (0x2a << 10) | (13 << 5) | 13; /* l.ori r13, r13, ... */ + tramp[3] = cls & 0xFFFF; + + tramp[4] = (0x6 << 10) | (15 << 5); /* l.movhi r15, ... */ + tramp[5] = fn >> 16; + tramp[6] = (0x2a << 10) | (15 << 5) | 15; /* l.ori r15, r15 ... */ + tramp[7] = fn & 0xFFFF; + + tramp[8] = (0x11 << 10); /* l.jr r15 */ + tramp[9] = 15 << 11; + + tramp[10] = (0x2a << 10) | (17 << 5) | 1; /* l.ori r17, r1, ... */ + tramp[11] = 0x0; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + cif->flags = 0; + + /* structures are returned as pointers */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = FFI_TYPE_STRUCT; + else + if (cif->rtype->size > 4) + cif->flags = FFI_TYPE_UINT64; + + cif->nfixedargs = cif->nargs; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + ffi_status status; + + status = ffi_prep_cif_machdep (cif); + cif->nfixedargs = nfixedargs; + return status; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget 2.h new file mode 100644 index 0000000..e55da28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget 2.h @@ -0,0 +1,58 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2014 Sebastian Macke + + OpenRISC Target configuration macros + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE (24) + +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs; + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h new file mode 100644 index 0000000..e55da28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h @@ -0,0 +1,58 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2014 Sebastian Macke + + OpenRISC Target configuration macros + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE (24) + +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs; + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv 2.S new file mode 100644 index 0000000..df6570b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv 2.S @@ -0,0 +1,107 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +/* + r3: size to allocate on stack + r4: extended cif structure + r5: function pointer ffi_prep_args + r6: ret address + r7: function to call + r8: flag for return type +*/ + +ffi_call_SYSV: + /* Store registers used on stack */ + l.sw -4(r1), r9 /* return address */ + l.sw -8(r1), r1 /* stack address */ + l.sw -12(r1), r14 /* callee saved registers */ + l.sw -16(r1), r16 + l.sw -20(r1), r18 + l.sw -24(r1), r20 + + l.ori r14, r1, 0x0 /* save stack pointer */ + l.addi r1, r1, -24 + + l.ori r16, r7, 0x0 /* save function address */ + l.ori r18, r6, 0x0 /* save ret address */ + l.ori r20, r8, 0x0 /* save flag */ + + l.sub r1, r1, r3 /* reserve space on stack */ + + /* Call ffi_prep_args */ + l.ori r3, r1, 0x0 /* first argument stack address, second already ecif */ + l.jalr r5 + l.nop + + /* Load register arguments and call*/ + + l.lwz r3, 0(r1) + l.lwz r4, 4(r1) + l.lwz r5, 8(r1) + l.lwz r6, 12(r1) + l.lwz r7, 16(r1) + l.lwz r8, 20(r1) + l.ori r1, r11, 0x0 /* new stack pointer */ + l.jalr r16 + l.nop + + /* handle return values */ + + l.sfeqi r20, FFI_TYPE_STRUCT + l.bf ret /* structs don't return an rvalue */ + l.nop + + /* copy ret address */ + + l.sfeqi r20, FFI_TYPE_UINT64 + l.bnf four_byte_ret /* 8 byte value is returned */ + l.nop + + l.sw 4(r18), r12 + +four_byte_ret: + l.sw 0(r18), r11 + +ret: + /* return */ + l.ori r1, r14, 0x0 /* reset stack pointer */ + l.lwz r9, -4(r1) + l.lwz r1, -8(r1) + l.lwz r14, -12(r1) + l.lwz r16, -16(r1) + l.lwz r18, -20(r1) + l.lwz r20, -24(r1) + l.jr r9 + l.nop + +.size ffi_call_SYSV, .-ffi_call_SYSV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S new file mode 100644 index 0000000..df6570b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S @@ -0,0 +1,107 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +/* + r3: size to allocate on stack + r4: extended cif structure + r5: function pointer ffi_prep_args + r6: ret address + r7: function to call + r8: flag for return type +*/ + +ffi_call_SYSV: + /* Store registers used on stack */ + l.sw -4(r1), r9 /* return address */ + l.sw -8(r1), r1 /* stack address */ + l.sw -12(r1), r14 /* callee saved registers */ + l.sw -16(r1), r16 + l.sw -20(r1), r18 + l.sw -24(r1), r20 + + l.ori r14, r1, 0x0 /* save stack pointer */ + l.addi r1, r1, -24 + + l.ori r16, r7, 0x0 /* save function address */ + l.ori r18, r6, 0x0 /* save ret address */ + l.ori r20, r8, 0x0 /* save flag */ + + l.sub r1, r1, r3 /* reserve space on stack */ + + /* Call ffi_prep_args */ + l.ori r3, r1, 0x0 /* first argument stack address, second already ecif */ + l.jalr r5 + l.nop + + /* Load register arguments and call*/ + + l.lwz r3, 0(r1) + l.lwz r4, 4(r1) + l.lwz r5, 8(r1) + l.lwz r6, 12(r1) + l.lwz r7, 16(r1) + l.lwz r8, 20(r1) + l.ori r1, r11, 0x0 /* new stack pointer */ + l.jalr r16 + l.nop + + /* handle return values */ + + l.sfeqi r20, FFI_TYPE_STRUCT + l.bf ret /* structs don't return an rvalue */ + l.nop + + /* copy ret address */ + + l.sfeqi r20, FFI_TYPE_UINT64 + l.bnf four_byte_ret /* 8 byte value is returned */ + l.nop + + l.sw 4(r18), r12 + +four_byte_ret: + l.sw 0(r18), r11 + +ret: + /* return */ + l.ori r1, r14, 0x0 /* reset stack pointer */ + l.lwz r9, -4(r1) + l.lwz r1, -8(r1) + l.lwz r14, -12(r1) + l.lwz r16, -16(r1) + l.lwz r18, -20(r1) + l.lwz r20, -24(r1) + l.jr r9 + l.nop + +.size ffi_call_SYSV, .-ffi_call_SYSV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi 2.c new file mode 100644 index 0000000..95e6694 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi 2.c @@ -0,0 +1,674 @@ +/* ----------------------------------------------------------------------- + ffi.c - (c) 2011 Anthony Green + (c) 2008 Red Hat, Inc. + (c) 2006 Free Software Foundation, Inc. + (c) 2003-2004 Randolph Chung + + HPPA Foreign Function Interface + HP-UX PA ABI support + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#define ROUND_UP(v, a) (((size_t)(v) + (a) - 1) & ~((a) - 1)) + +#define MIN_STACK_SIZE 64 +#define FIRST_ARG_SLOT 9 +#define DEBUG_LEVEL 0 + +#define fldw(addr, fpreg) \ + __asm__ volatile ("fldw 0(%0), %%" #fpreg "L" : : "r"(addr) : #fpreg) +#define fstw(fpreg, addr) \ + __asm__ volatile ("fstw %%" #fpreg "L, 0(%0)" : : "r"(addr)) +#define fldd(addr, fpreg) \ + __asm__ volatile ("fldd 0(%0), %%" #fpreg : : "r"(addr) : #fpreg) +#define fstd(fpreg, addr) \ + __asm__ volatile ("fstd %%" #fpreg "L, 0(%0)" : : "r"(addr)) + +#define debug(lvl, x...) do { if (lvl <= DEBUG_LEVEL) { printf(x); } } while (0) + +static inline int ffi_struct_type(ffi_type *t) +{ + size_t sz = t->size; + + /* Small structure results are passed in registers, + larger ones are passed by pointer. Note that + small structures of size 2, 4 and 8 differ from + the corresponding integer types in that they have + different alignment requirements. */ + + if (sz <= 1) + return FFI_TYPE_UINT8; + else if (sz == 2) + return FFI_TYPE_SMALL_STRUCT2; + else if (sz == 3) + return FFI_TYPE_SMALL_STRUCT3; + else if (sz == 4) + return FFI_TYPE_SMALL_STRUCT4; + else if (sz == 5) + return FFI_TYPE_SMALL_STRUCT5; + else if (sz == 6) + return FFI_TYPE_SMALL_STRUCT6; + else if (sz == 7) + return FFI_TYPE_SMALL_STRUCT7; + else if (sz <= 8) + return FFI_TYPE_SMALL_STRUCT8; + else + return FFI_TYPE_STRUCT; /* else, we pass it by pointer. */ +} + +/* PA has a downward growing stack, which looks like this: + + Offset + [ Variable args ] + SP = (4*(n+9)) arg word N + ... + SP-52 arg word 4 + [ Fixed args ] + SP-48 arg word 3 + SP-44 arg word 2 + SP-40 arg word 1 + SP-36 arg word 0 + [ Frame marker ] + ... + SP-20 RP + SP-4 previous SP + + The first four argument words on the stack are reserved for use by + the callee. Instead, the general and floating registers replace + the first four argument slots. Non FP arguments are passed solely + in the general registers. FP arguments are passed in both general + and floating registers when using libffi. + + Non-FP 32-bit args are passed in gr26, gr25, gr24 and gr23. + Non-FP 64-bit args are passed in register pairs, starting + on an odd numbered register (i.e. r25+r26 and r23+r24). + FP 32-bit arguments are passed in fr4L, fr5L, fr6L and fr7L. + FP 64-bit arguments are passed in fr5 and fr7. + + The registers are allocated in the same manner as stack slots. + This allows the callee to save its arguments on the stack if + necessary: + + arg word 3 -> gr23 or fr7L + arg word 2 -> gr24 or fr6L or fr7R + arg word 1 -> gr25 or fr5L + arg word 0 -> gr26 or fr4L or fr5R + + Note that fr4R and fr6R are never used for arguments (i.e., + doubles are not passed in fr4 or fr6). + + The rest of the arguments are passed on the stack starting at SP-52, + but 64-bit arguments need to be aligned to an 8-byte boundary + + This means we can have holes either in the register allocation, + or in the stack. */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments + + The following code will put everything into the stack frame + (which was allocated by the asm routine), and on return + the asm routine will load the arguments that should be + passed by register into the appropriate registers + + NOTE: We load floating point args in this function... that means we + assume gcc will not mess with fp regs in here. */ + +void ffi_prep_args_pa32(UINT32 *stack, extended_cif *ecif, unsigned bytes) +{ + register unsigned int i; + register ffi_type **p_arg; + register void **p_argv; + unsigned int slot = FIRST_ARG_SLOT; + char *dest_cpy; + size_t len; + + debug(1, "%s: stack = %p, ecif = %p, bytes = %u\n", __FUNCTION__, stack, + ecif, bytes); + + p_arg = ecif->cif->arg_types; + p_argv = ecif->avalue; + + for (i = 0; i < ecif->cif->nargs; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + *(SINT32 *)(stack - slot) = *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT32 *)(stack - slot) = *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT32 *)(stack - slot) = *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT32 *)(stack - slot) = *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + debug(3, "Storing UINT32 %u in slot %u\n", *(UINT32 *)(*p_argv), + slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + break; + + case FFI_TYPE_FLOAT: + /* First 4 args go in fr4L - fr7L. */ + debug(3, "Storing UINT32(float) in slot %u\n", slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 4 args go in fr4L - fr7L. */ + case 0: fldw(stack - slot, fr4); break; + case 1: fldw(stack - slot, fr5); break; + case 2: fldw(stack - slot, fr6); break; + case 3: fldw(stack - slot, fr7); break; + } + break; + + case FFI_TYPE_DOUBLE: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + debug(3, "Storing UINT64(double) at slot %u\n", slot); + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 2 args go in fr5, fr7. */ + case 1: fldd(stack - slot, fr5); break; + case 3: fldd(stack - slot, fr7); break; + } + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are passed in the same manner as structures + larger than 8 bytes. */ + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; +#endif + + case FFI_TYPE_STRUCT: + + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + + len = (*p_arg)->size; + if (len <= 4) + { + dest_cpy = (char *)(stack - slot) + 4 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else if (len <= 8) + { + slot += (slot & 1) ? 1 : 2; + dest_cpy = (char *)(stack - slot) + 8 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + p_argv++; + } + + /* Make sure we didn't mess up and scribble on the stack. */ + { + unsigned int n; + + debug(5, "Stack setup:\n"); + for (n = 0; n < (bytes + 3) / 4; n++) + { + if ((n%4) == 0) { debug(5, "\n%08x: ", (unsigned int)(stack - n)); } + debug(5, "%08x ", *(stack - n)); + } + debug(5, "\n"); + } + + FFI_ASSERT(slot * 4 <= bytes); + + return; +} + +static void ffi_size_stack_pa32(ffi_cif *cif) +{ + ffi_type **ptr; + int i; + int z = 0; /* # stack slots */ + + for (ptr = cif->arg_types, i = 0; i < cif->nargs; ptr++, i++) + { + int type = (*ptr)->type; + + switch (type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + z += 2 + (z & 1); /* must start on even regs, so we may waste one */ + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: +#endif + case FFI_TYPE_STRUCT: + z += 1; /* pass by ptr, callee will copy */ + break; + + default: /* <= 32-bit values */ + z++; + } + } + + /* We can fit up to 6 args in the default 64-byte stack frame, + if we need more, we need more stack. */ + if (z <= 6) + cif->bytes = MIN_STACK_SIZE; /* min stack size */ + else + cif->bytes = 64 + ROUND_UP((z - 6) * sizeof(UINT32), MIN_STACK_SIZE); + + debug(3, "Calculated stack size is %u bytes\n", cif->bytes); +} + +/* Perform machine dependent cif processing. */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a structure. */ + cif->flags = FFI_TYPE_STRUCT; + break; +#endif + + case FFI_TYPE_STRUCT: + /* For the return type we have to check the size of the structures. + If the size is smaller or equal 4 bytes, the result is given back + in one register. If the size is smaller or equal 8 bytes than we + return the result in two registers. But if the size is bigger than + 8 bytes, we work with pointers. */ + cif->flags = ffi_struct_type(cif->rtype); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + /* Lucky us, because of the unique PA ABI we get to do our + own stack sizing. */ + switch (cif->abi) + { + case FFI_PA32: + ffi_size_stack_pa32(cif); + break; + + default: + FFI_ASSERT(0); + break; + } + + return FFI_OK; +} + +extern void ffi_call_pa32(void (*)(UINT32 *, extended_cif *, unsigned), + extended_cif *, unsigned, unsigned, unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if (rvalue == NULL +#ifdef PA_HPUX + && (cif->rtype->type == FFI_TYPE_STRUCT + || cif->rtype->type == FFI_TYPE_LONGDOUBLE)) +#else + && cif->rtype->type == FFI_TYPE_STRUCT) +#endif + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_PA32: + debug(3, "Calling ffi_call_pa32: ecif=%p, bytes=%u, flags=%u, rvalue=%p, fn=%p\n", &ecif, cif->bytes, cif->flags, ecif.rvalue, (void *)fn); + ffi_call_pa32(ffi_prep_args_pa32, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT(0); + break; + } +} + +#if FFI_CLOSURES +/* This is more-or-less an inverse of ffi_call -- we have arguments on + the stack, and we need to fill them into a cif structure and invoke + the user function. This really ought to be in asm to make sure + the compiler doesn't do things we don't expect. */ +ffi_status ffi_closure_inner_pa32(ffi_closure *closure, UINT32 *stack) +{ + ffi_cif *cif; + void **avalue; + void *rvalue; + /* Functions can return up to 64-bits in registers. Return address + must be double word aligned. */ + union { double rd; UINT32 ret[2]; } u; + ffi_type **p_arg; + char *tmp; + int i, avn; + unsigned int slot = FIRST_ARG_SLOT; + register UINT32 r28 asm("r28"); + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + cif = closure->cif; + + /* If returning via structure, callee will write to our pointer. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = (void *)r28; + else + rvalue = &u; + + avalue = (void **)alloca(cif->nargs * FFI_SIZEOF_ARG); + avn = cif->nargs; + p_arg = cif->arg_types; + + for (i = 0; i < avn; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + avalue[i] = (char *)(stack - slot) + sizeof(UINT32) - (*p_arg)->size; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_FLOAT: +#ifdef PA_LINUX + /* The closure call is indirect. In Linux, floating point + arguments in indirect calls with a prototype are passed + in the floating point registers instead of the general + registers. So, we need to replace what was previously + stored in the current slot with the value in the + corresponding floating point register. */ + switch (slot - FIRST_ARG_SLOT) + { + case 0: fstw(fr4, (void *)(stack - slot)); break; + case 1: fstw(fr5, (void *)(stack - slot)); break; + case 2: fstw(fr6, (void *)(stack - slot)); break; + case 3: fstw(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_DOUBLE: + slot += (slot & 1) ? 1 : 2; +#ifdef PA_LINUX + /* See previous comment for FFI_TYPE_FLOAT. */ + switch (slot - FIRST_ARG_SLOT) + { + case 1: fstd(fr5, (void *)(stack - slot)); break; + case 3: fstd(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a big structure. */ + avalue[i] = (void *) *(stack - slot); + break; +#endif + + case FFI_TYPE_STRUCT: + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + if((*p_arg)->size <= 4) + { + avalue[i] = (void *)(stack - slot) + sizeof(UINT32) - + (*p_arg)->size; + } + else if ((*p_arg)->size <= 8) + { + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot) + sizeof(UINT64) - + (*p_arg)->size; + } + else + avalue[i] = (void *) *(stack - slot); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + } + + /* Invoke the closure. */ + (c->fun) (cif, rvalue, avalue, c->user_data); + + debug(3, "after calling function, ret[0] = %08x, ret[1] = %08x\n", u.ret[0], + u.ret[1]); + + /* Store the result using the lower 2 bytes of the flags. */ + switch (cif->flags) + { + case FFI_TYPE_UINT8: + *(stack - FIRST_ARG_SLOT) = (UINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_SINT8: + *(stack - FIRST_ARG_SLOT) = (SINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_UINT16: + *(stack - FIRST_ARG_SLOT) = (UINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_SINT16: + *(stack - FIRST_ARG_SLOT) = (SINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + *(stack - FIRST_ARG_SLOT - 1) = u.ret[1]; + break; + + case FFI_TYPE_DOUBLE: + fldd(rvalue, fr4); + break; + + case FFI_TYPE_FLOAT: + fldw(rvalue, fr4); + break; + + case FFI_TYPE_STRUCT: + /* Don't need a return value, done by caller. */ + break; + + case FFI_TYPE_SMALL_STRUCT2: + case FFI_TYPE_SMALL_STRUCT3: + case FFI_TYPE_SMALL_STRUCT4: + tmp = (void*)(stack - FIRST_ARG_SLOT); + tmp += 4 - cif->rtype->size; + memcpy((void*)tmp, &u, cif->rtype->size); + break; + + case FFI_TYPE_SMALL_STRUCT5: + case FFI_TYPE_SMALL_STRUCT6: + case FFI_TYPE_SMALL_STRUCT7: + case FFI_TYPE_SMALL_STRUCT8: + { + unsigned int ret2[2]; + int off; + + /* Right justify ret[0] and ret[1] */ + switch (cif->flags) + { + case FFI_TYPE_SMALL_STRUCT5: off = 3; break; + case FFI_TYPE_SMALL_STRUCT6: off = 2; break; + case FFI_TYPE_SMALL_STRUCT7: off = 1; break; + default: off = 0; break; + } + + memset (ret2, 0, sizeof (ret2)); + memcpy ((char *)ret2 + off, &u, 8 - off); + + *(stack - FIRST_ARG_SLOT) = ret2[0]; + *(stack - FIRST_ARG_SLOT - 1) = ret2[1]; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_VOID: + break; + + default: + debug(0, "assert with cif->flags: %d\n",cif->flags); + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* Fill in a closure to refer to the specified fun and user_data. + cif specifies the argument and result types for fun. + The cif must already be prep'ed. */ + +extern void ffi_closure_pa32(void); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + /* The layout of a function descriptor. A function pointer with the PLABEL + bit set points to a function descriptor. */ + struct pa32_fd + { + UINT32 code_pointer; + UINT32 gp; + }; + + struct ffi_pa32_trampoline_struct + { + UINT32 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT32 fake_gp; /* Pointer to closure, installed as gp. */ + UINT32 real_gp; /* Real gp value. */ + }; + + struct ffi_pa32_trampoline_struct *tramp; + struct pa32_fd *fd; + + if (cif->abi != FFI_PA32) + return FFI_BAD_ABI; + + /* Get function descriptor address for ffi_closure_pa32. */ + fd = (struct pa32_fd *)((UINT32)ffi_closure_pa32 & ~3); + + /* Setup trampoline. */ + tramp = (struct ffi_pa32_trampoline_struct *)c->tramp; + tramp->code_pointer = fd->code_pointer; + tramp->fake_gp = (UINT32)codeloc & ~3; + tramp->real_gp = fd->gp; + + c->cif = cif; + c->user_data = user_data; + c->fun = fun; + + return FFI_OK; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c new file mode 100644 index 0000000..95e6694 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c @@ -0,0 +1,674 @@ +/* ----------------------------------------------------------------------- + ffi.c - (c) 2011 Anthony Green + (c) 2008 Red Hat, Inc. + (c) 2006 Free Software Foundation, Inc. + (c) 2003-2004 Randolph Chung + + HPPA Foreign Function Interface + HP-UX PA ABI support + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#define ROUND_UP(v, a) (((size_t)(v) + (a) - 1) & ~((a) - 1)) + +#define MIN_STACK_SIZE 64 +#define FIRST_ARG_SLOT 9 +#define DEBUG_LEVEL 0 + +#define fldw(addr, fpreg) \ + __asm__ volatile ("fldw 0(%0), %%" #fpreg "L" : : "r"(addr) : #fpreg) +#define fstw(fpreg, addr) \ + __asm__ volatile ("fstw %%" #fpreg "L, 0(%0)" : : "r"(addr)) +#define fldd(addr, fpreg) \ + __asm__ volatile ("fldd 0(%0), %%" #fpreg : : "r"(addr) : #fpreg) +#define fstd(fpreg, addr) \ + __asm__ volatile ("fstd %%" #fpreg "L, 0(%0)" : : "r"(addr)) + +#define debug(lvl, x...) do { if (lvl <= DEBUG_LEVEL) { printf(x); } } while (0) + +static inline int ffi_struct_type(ffi_type *t) +{ + size_t sz = t->size; + + /* Small structure results are passed in registers, + larger ones are passed by pointer. Note that + small structures of size 2, 4 and 8 differ from + the corresponding integer types in that they have + different alignment requirements. */ + + if (sz <= 1) + return FFI_TYPE_UINT8; + else if (sz == 2) + return FFI_TYPE_SMALL_STRUCT2; + else if (sz == 3) + return FFI_TYPE_SMALL_STRUCT3; + else if (sz == 4) + return FFI_TYPE_SMALL_STRUCT4; + else if (sz == 5) + return FFI_TYPE_SMALL_STRUCT5; + else if (sz == 6) + return FFI_TYPE_SMALL_STRUCT6; + else if (sz == 7) + return FFI_TYPE_SMALL_STRUCT7; + else if (sz <= 8) + return FFI_TYPE_SMALL_STRUCT8; + else + return FFI_TYPE_STRUCT; /* else, we pass it by pointer. */ +} + +/* PA has a downward growing stack, which looks like this: + + Offset + [ Variable args ] + SP = (4*(n+9)) arg word N + ... + SP-52 arg word 4 + [ Fixed args ] + SP-48 arg word 3 + SP-44 arg word 2 + SP-40 arg word 1 + SP-36 arg word 0 + [ Frame marker ] + ... + SP-20 RP + SP-4 previous SP + + The first four argument words on the stack are reserved for use by + the callee. Instead, the general and floating registers replace + the first four argument slots. Non FP arguments are passed solely + in the general registers. FP arguments are passed in both general + and floating registers when using libffi. + + Non-FP 32-bit args are passed in gr26, gr25, gr24 and gr23. + Non-FP 64-bit args are passed in register pairs, starting + on an odd numbered register (i.e. r25+r26 and r23+r24). + FP 32-bit arguments are passed in fr4L, fr5L, fr6L and fr7L. + FP 64-bit arguments are passed in fr5 and fr7. + + The registers are allocated in the same manner as stack slots. + This allows the callee to save its arguments on the stack if + necessary: + + arg word 3 -> gr23 or fr7L + arg word 2 -> gr24 or fr6L or fr7R + arg word 1 -> gr25 or fr5L + arg word 0 -> gr26 or fr4L or fr5R + + Note that fr4R and fr6R are never used for arguments (i.e., + doubles are not passed in fr4 or fr6). + + The rest of the arguments are passed on the stack starting at SP-52, + but 64-bit arguments need to be aligned to an 8-byte boundary + + This means we can have holes either in the register allocation, + or in the stack. */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments + + The following code will put everything into the stack frame + (which was allocated by the asm routine), and on return + the asm routine will load the arguments that should be + passed by register into the appropriate registers + + NOTE: We load floating point args in this function... that means we + assume gcc will not mess with fp regs in here. */ + +void ffi_prep_args_pa32(UINT32 *stack, extended_cif *ecif, unsigned bytes) +{ + register unsigned int i; + register ffi_type **p_arg; + register void **p_argv; + unsigned int slot = FIRST_ARG_SLOT; + char *dest_cpy; + size_t len; + + debug(1, "%s: stack = %p, ecif = %p, bytes = %u\n", __FUNCTION__, stack, + ecif, bytes); + + p_arg = ecif->cif->arg_types; + p_argv = ecif->avalue; + + for (i = 0; i < ecif->cif->nargs; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + *(SINT32 *)(stack - slot) = *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT32 *)(stack - slot) = *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT32 *)(stack - slot) = *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT32 *)(stack - slot) = *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + debug(3, "Storing UINT32 %u in slot %u\n", *(UINT32 *)(*p_argv), + slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + break; + + case FFI_TYPE_FLOAT: + /* First 4 args go in fr4L - fr7L. */ + debug(3, "Storing UINT32(float) in slot %u\n", slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 4 args go in fr4L - fr7L. */ + case 0: fldw(stack - slot, fr4); break; + case 1: fldw(stack - slot, fr5); break; + case 2: fldw(stack - slot, fr6); break; + case 3: fldw(stack - slot, fr7); break; + } + break; + + case FFI_TYPE_DOUBLE: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + debug(3, "Storing UINT64(double) at slot %u\n", slot); + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 2 args go in fr5, fr7. */ + case 1: fldd(stack - slot, fr5); break; + case 3: fldd(stack - slot, fr7); break; + } + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are passed in the same manner as structures + larger than 8 bytes. */ + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; +#endif + + case FFI_TYPE_STRUCT: + + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + + len = (*p_arg)->size; + if (len <= 4) + { + dest_cpy = (char *)(stack - slot) + 4 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else if (len <= 8) + { + slot += (slot & 1) ? 1 : 2; + dest_cpy = (char *)(stack - slot) + 8 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + p_argv++; + } + + /* Make sure we didn't mess up and scribble on the stack. */ + { + unsigned int n; + + debug(5, "Stack setup:\n"); + for (n = 0; n < (bytes + 3) / 4; n++) + { + if ((n%4) == 0) { debug(5, "\n%08x: ", (unsigned int)(stack - n)); } + debug(5, "%08x ", *(stack - n)); + } + debug(5, "\n"); + } + + FFI_ASSERT(slot * 4 <= bytes); + + return; +} + +static void ffi_size_stack_pa32(ffi_cif *cif) +{ + ffi_type **ptr; + int i; + int z = 0; /* # stack slots */ + + for (ptr = cif->arg_types, i = 0; i < cif->nargs; ptr++, i++) + { + int type = (*ptr)->type; + + switch (type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + z += 2 + (z & 1); /* must start on even regs, so we may waste one */ + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: +#endif + case FFI_TYPE_STRUCT: + z += 1; /* pass by ptr, callee will copy */ + break; + + default: /* <= 32-bit values */ + z++; + } + } + + /* We can fit up to 6 args in the default 64-byte stack frame, + if we need more, we need more stack. */ + if (z <= 6) + cif->bytes = MIN_STACK_SIZE; /* min stack size */ + else + cif->bytes = 64 + ROUND_UP((z - 6) * sizeof(UINT32), MIN_STACK_SIZE); + + debug(3, "Calculated stack size is %u bytes\n", cif->bytes); +} + +/* Perform machine dependent cif processing. */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a structure. */ + cif->flags = FFI_TYPE_STRUCT; + break; +#endif + + case FFI_TYPE_STRUCT: + /* For the return type we have to check the size of the structures. + If the size is smaller or equal 4 bytes, the result is given back + in one register. If the size is smaller or equal 8 bytes than we + return the result in two registers. But if the size is bigger than + 8 bytes, we work with pointers. */ + cif->flags = ffi_struct_type(cif->rtype); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + /* Lucky us, because of the unique PA ABI we get to do our + own stack sizing. */ + switch (cif->abi) + { + case FFI_PA32: + ffi_size_stack_pa32(cif); + break; + + default: + FFI_ASSERT(0); + break; + } + + return FFI_OK; +} + +extern void ffi_call_pa32(void (*)(UINT32 *, extended_cif *, unsigned), + extended_cif *, unsigned, unsigned, unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if (rvalue == NULL +#ifdef PA_HPUX + && (cif->rtype->type == FFI_TYPE_STRUCT + || cif->rtype->type == FFI_TYPE_LONGDOUBLE)) +#else + && cif->rtype->type == FFI_TYPE_STRUCT) +#endif + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_PA32: + debug(3, "Calling ffi_call_pa32: ecif=%p, bytes=%u, flags=%u, rvalue=%p, fn=%p\n", &ecif, cif->bytes, cif->flags, ecif.rvalue, (void *)fn); + ffi_call_pa32(ffi_prep_args_pa32, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT(0); + break; + } +} + +#if FFI_CLOSURES +/* This is more-or-less an inverse of ffi_call -- we have arguments on + the stack, and we need to fill them into a cif structure and invoke + the user function. This really ought to be in asm to make sure + the compiler doesn't do things we don't expect. */ +ffi_status ffi_closure_inner_pa32(ffi_closure *closure, UINT32 *stack) +{ + ffi_cif *cif; + void **avalue; + void *rvalue; + /* Functions can return up to 64-bits in registers. Return address + must be double word aligned. */ + union { double rd; UINT32 ret[2]; } u; + ffi_type **p_arg; + char *tmp; + int i, avn; + unsigned int slot = FIRST_ARG_SLOT; + register UINT32 r28 asm("r28"); + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + cif = closure->cif; + + /* If returning via structure, callee will write to our pointer. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = (void *)r28; + else + rvalue = &u; + + avalue = (void **)alloca(cif->nargs * FFI_SIZEOF_ARG); + avn = cif->nargs; + p_arg = cif->arg_types; + + for (i = 0; i < avn; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + avalue[i] = (char *)(stack - slot) + sizeof(UINT32) - (*p_arg)->size; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_FLOAT: +#ifdef PA_LINUX + /* The closure call is indirect. In Linux, floating point + arguments in indirect calls with a prototype are passed + in the floating point registers instead of the general + registers. So, we need to replace what was previously + stored in the current slot with the value in the + corresponding floating point register. */ + switch (slot - FIRST_ARG_SLOT) + { + case 0: fstw(fr4, (void *)(stack - slot)); break; + case 1: fstw(fr5, (void *)(stack - slot)); break; + case 2: fstw(fr6, (void *)(stack - slot)); break; + case 3: fstw(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_DOUBLE: + slot += (slot & 1) ? 1 : 2; +#ifdef PA_LINUX + /* See previous comment for FFI_TYPE_FLOAT. */ + switch (slot - FIRST_ARG_SLOT) + { + case 1: fstd(fr5, (void *)(stack - slot)); break; + case 3: fstd(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a big structure. */ + avalue[i] = (void *) *(stack - slot); + break; +#endif + + case FFI_TYPE_STRUCT: + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + if((*p_arg)->size <= 4) + { + avalue[i] = (void *)(stack - slot) + sizeof(UINT32) - + (*p_arg)->size; + } + else if ((*p_arg)->size <= 8) + { + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot) + sizeof(UINT64) - + (*p_arg)->size; + } + else + avalue[i] = (void *) *(stack - slot); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + } + + /* Invoke the closure. */ + (c->fun) (cif, rvalue, avalue, c->user_data); + + debug(3, "after calling function, ret[0] = %08x, ret[1] = %08x\n", u.ret[0], + u.ret[1]); + + /* Store the result using the lower 2 bytes of the flags. */ + switch (cif->flags) + { + case FFI_TYPE_UINT8: + *(stack - FIRST_ARG_SLOT) = (UINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_SINT8: + *(stack - FIRST_ARG_SLOT) = (SINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_UINT16: + *(stack - FIRST_ARG_SLOT) = (UINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_SINT16: + *(stack - FIRST_ARG_SLOT) = (SINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + *(stack - FIRST_ARG_SLOT - 1) = u.ret[1]; + break; + + case FFI_TYPE_DOUBLE: + fldd(rvalue, fr4); + break; + + case FFI_TYPE_FLOAT: + fldw(rvalue, fr4); + break; + + case FFI_TYPE_STRUCT: + /* Don't need a return value, done by caller. */ + break; + + case FFI_TYPE_SMALL_STRUCT2: + case FFI_TYPE_SMALL_STRUCT3: + case FFI_TYPE_SMALL_STRUCT4: + tmp = (void*)(stack - FIRST_ARG_SLOT); + tmp += 4 - cif->rtype->size; + memcpy((void*)tmp, &u, cif->rtype->size); + break; + + case FFI_TYPE_SMALL_STRUCT5: + case FFI_TYPE_SMALL_STRUCT6: + case FFI_TYPE_SMALL_STRUCT7: + case FFI_TYPE_SMALL_STRUCT8: + { + unsigned int ret2[2]; + int off; + + /* Right justify ret[0] and ret[1] */ + switch (cif->flags) + { + case FFI_TYPE_SMALL_STRUCT5: off = 3; break; + case FFI_TYPE_SMALL_STRUCT6: off = 2; break; + case FFI_TYPE_SMALL_STRUCT7: off = 1; break; + default: off = 0; break; + } + + memset (ret2, 0, sizeof (ret2)); + memcpy ((char *)ret2 + off, &u, 8 - off); + + *(stack - FIRST_ARG_SLOT) = ret2[0]; + *(stack - FIRST_ARG_SLOT - 1) = ret2[1]; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_VOID: + break; + + default: + debug(0, "assert with cif->flags: %d\n",cif->flags); + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* Fill in a closure to refer to the specified fun and user_data. + cif specifies the argument and result types for fun. + The cif must already be prep'ed. */ + +extern void ffi_closure_pa32(void); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + /* The layout of a function descriptor. A function pointer with the PLABEL + bit set points to a function descriptor. */ + struct pa32_fd + { + UINT32 code_pointer; + UINT32 gp; + }; + + struct ffi_pa32_trampoline_struct + { + UINT32 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT32 fake_gp; /* Pointer to closure, installed as gp. */ + UINT32 real_gp; /* Real gp value. */ + }; + + struct ffi_pa32_trampoline_struct *tramp; + struct pa32_fd *fd; + + if (cif->abi != FFI_PA32) + return FFI_BAD_ABI; + + /* Get function descriptor address for ffi_closure_pa32. */ + fd = (struct pa32_fd *)((UINT32)ffi_closure_pa32 & ~3); + + /* Setup trampoline. */ + tramp = (struct ffi_pa32_trampoline_struct *)c->tramp; + tramp->code_pointer = fd->code_pointer; + tramp->fake_gp = (UINT32)codeloc & ~3; + tramp->real_gp = fd->gp; + + c->cif = cif; + c->user_data = user_data; + c->fun = fun; + + return FFI_OK; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget 2.h new file mode 100644 index 0000000..df1209e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget 2.h @@ -0,0 +1,80 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for hppa. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#ifdef PA_LINUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA_HPUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA64_HPUX +#error "PA64_HPUX FFI is not yet implemented" + FFI_PA64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA64 +#endif +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 12 + +#define FFI_TYPE_SMALL_STRUCT2 -1 +#define FFI_TYPE_SMALL_STRUCT3 -2 +#define FFI_TYPE_SMALL_STRUCT4 -3 +#define FFI_TYPE_SMALL_STRUCT5 -4 +#define FFI_TYPE_SMALL_STRUCT6 -5 +#define FFI_TYPE_SMALL_STRUCT7 -6 +#define FFI_TYPE_SMALL_STRUCT8 -7 +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h new file mode 100644 index 0000000..df1209e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h @@ -0,0 +1,80 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for hppa. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#ifdef PA_LINUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA_HPUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA64_HPUX +#error "PA64_HPUX FFI is not yet implemented" + FFI_PA64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA64 +#endif +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 12 + +#define FFI_TYPE_SMALL_STRUCT2 -1 +#define FFI_TYPE_SMALL_STRUCT3 -2 +#define FFI_TYPE_SMALL_STRUCT4 -3 +#define FFI_TYPE_SMALL_STRUCT5 -4 +#define FFI_TYPE_SMALL_STRUCT6 -5 +#define FFI_TYPE_SMALL_STRUCT7 -6 +#define FFI_TYPE_SMALL_STRUCT8 -7 +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32 2.S new file mode 100644 index 0000000..d0e5f69 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32 2.S @@ -0,0 +1,370 @@ +/* ----------------------------------------------------------------------- + hpux32.S - Copyright (c) 2006 Free Software Foundation, Inc. + (c) 2008 Red Hat, Inc. + based on src/pa/linux.S + + HP-UX PA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .LEVEL 1.1 + .SPACE $PRIVATE$ + .IMPORT $global$,DATA + .IMPORT $$dyncall,MILLICODE + .SUBSPA $DATA$ + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,ENTRY,PRIV_LEV=3 + .import ffi_prep_args_pa32,CODE + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .align 4 + +L$FB1 +ffi_call_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI11 + copy %sp, %r3 +L$CFI12 + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +L$CFI13 + copy %sp, %r4 + + addl %arg2, %r4, %arg0 ; arg stack + stw %arg3, -48(%r3) ; save flags we need it later + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args are loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 ; %ret0 <- rvalue + ldw -56(%r3), %r22 ; %r22 <- function to call + bl $$dyncall, %r31 ; Call the user function + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 ; r21 <- flags + ldw -52(%r3), %r20 ; r20 <- rvalue + + /* Store the result according to the return type. The most + likely types should come first. */ + +L$checkint + comib,<>,n FFI_TYPE_INT, %r21, L$checkint8 + b L$done + stw %ret0, 0(%r20) + +L$checkint8 + comib,<>,n FFI_TYPE_UINT8, %r21, L$checkint16 + b L$done + stb %ret0, 0(%r20) + +L$checkint16 + comib,<>,n FFI_TYPE_UINT16, %r21, L$checkdbl + b L$done + sth %ret0, 0(%r20) + +L$checkdbl + comib,<>,n FFI_TYPE_DOUBLE, %r21, L$checkfloat + b L$done + fstd %fr4,0(%r20) + +L$checkfloat + comib,<>,n FFI_TYPE_FLOAT, %r21, L$checkll + b L$done + fstw %fr4L,0(%r20) + +L$checkll + comib,<>,n FFI_TYPE_UINT64, %r21, L$checksmst2 + stw %ret0, 0(%r20) + b L$done + stw %ret1, 4(%r20) + +L$checksmst2 + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, L$checksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst3 + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, L$checksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst4 + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, L$checksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst5 + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, L$checksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst6 + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, L$checksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst7 + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, L$checksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst8 + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, L$done + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +L$done + /* all done, return */ + copy %r4, %sp ; pop arg stack + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 ; .. and pop stack + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +L$FE1 + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .export ffi_closure_pa32,ENTRY,PRIV_LEV=3,RTNVAL=GR + .import ffi_closure_inner_pa32,CODE + .align 4 +L$FB2 +ffi_closure_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI21 + copy %sp, %r3 +L$CFI22 + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%rp) + ldw -40(%sp), %ret1 + .exit + .procend +L$FE2: + + .SPACE $PRIVATE$ + .SUBSPA $DATA$ + + .align 4 + .EXPORT _GLOBAL__F_ffi_call_pa32,DATA +_GLOBAL__F_ffi_call_pa32 +L$frame1: + .word L$ECIE1-L$SCIE1 ;# Length of Common Information Entry +L$SCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version + .ascii "\0" ;# CIE Augmentation + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +L$ECIE1: +L$SFDE1: + .word L$EFDE1-L$ASFDE1 ;# FDE Length +L$ASFDE1: + .word L$ASFDE1-L$frame1 ;# FDE CIE offset + .word L$FB1 ;# FDE initial location + .word L$FE1-L$FB1 ;# FDE address range + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI11-L$FB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI12-L$CFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI13-L$CFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +L$EFDE1: + +L$SFDE2: + .word L$EFDE2-L$ASFDE2 ;# FDE Length +L$ASFDE2: + .word L$ASFDE2-L$frame1 ;# FDE CIE offset + .word L$FB2 ;# FDE initial location + .word L$FE2-L$FB2 ;# FDE address range + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI21-L$FB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI22-L$CFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +L$EFDE2: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S new file mode 100644 index 0000000..d0e5f69 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S @@ -0,0 +1,370 @@ +/* ----------------------------------------------------------------------- + hpux32.S - Copyright (c) 2006 Free Software Foundation, Inc. + (c) 2008 Red Hat, Inc. + based on src/pa/linux.S + + HP-UX PA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .LEVEL 1.1 + .SPACE $PRIVATE$ + .IMPORT $global$,DATA + .IMPORT $$dyncall,MILLICODE + .SUBSPA $DATA$ + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,ENTRY,PRIV_LEV=3 + .import ffi_prep_args_pa32,CODE + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .align 4 + +L$FB1 +ffi_call_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI11 + copy %sp, %r3 +L$CFI12 + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +L$CFI13 + copy %sp, %r4 + + addl %arg2, %r4, %arg0 ; arg stack + stw %arg3, -48(%r3) ; save flags we need it later + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args are loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 ; %ret0 <- rvalue + ldw -56(%r3), %r22 ; %r22 <- function to call + bl $$dyncall, %r31 ; Call the user function + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 ; r21 <- flags + ldw -52(%r3), %r20 ; r20 <- rvalue + + /* Store the result according to the return type. The most + likely types should come first. */ + +L$checkint + comib,<>,n FFI_TYPE_INT, %r21, L$checkint8 + b L$done + stw %ret0, 0(%r20) + +L$checkint8 + comib,<>,n FFI_TYPE_UINT8, %r21, L$checkint16 + b L$done + stb %ret0, 0(%r20) + +L$checkint16 + comib,<>,n FFI_TYPE_UINT16, %r21, L$checkdbl + b L$done + sth %ret0, 0(%r20) + +L$checkdbl + comib,<>,n FFI_TYPE_DOUBLE, %r21, L$checkfloat + b L$done + fstd %fr4,0(%r20) + +L$checkfloat + comib,<>,n FFI_TYPE_FLOAT, %r21, L$checkll + b L$done + fstw %fr4L,0(%r20) + +L$checkll + comib,<>,n FFI_TYPE_UINT64, %r21, L$checksmst2 + stw %ret0, 0(%r20) + b L$done + stw %ret1, 4(%r20) + +L$checksmst2 + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, L$checksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst3 + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, L$checksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst4 + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, L$checksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst5 + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, L$checksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst6 + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, L$checksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst7 + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, L$checksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst8 + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, L$done + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +L$done + /* all done, return */ + copy %r4, %sp ; pop arg stack + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 ; .. and pop stack + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +L$FE1 + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .export ffi_closure_pa32,ENTRY,PRIV_LEV=3,RTNVAL=GR + .import ffi_closure_inner_pa32,CODE + .align 4 +L$FB2 +ffi_closure_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI21 + copy %sp, %r3 +L$CFI22 + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%rp) + ldw -40(%sp), %ret1 + .exit + .procend +L$FE2: + + .SPACE $PRIVATE$ + .SUBSPA $DATA$ + + .align 4 + .EXPORT _GLOBAL__F_ffi_call_pa32,DATA +_GLOBAL__F_ffi_call_pa32 +L$frame1: + .word L$ECIE1-L$SCIE1 ;# Length of Common Information Entry +L$SCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version + .ascii "\0" ;# CIE Augmentation + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +L$ECIE1: +L$SFDE1: + .word L$EFDE1-L$ASFDE1 ;# FDE Length +L$ASFDE1: + .word L$ASFDE1-L$frame1 ;# FDE CIE offset + .word L$FB1 ;# FDE initial location + .word L$FE1-L$FB1 ;# FDE address range + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI11-L$FB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI12-L$CFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI13-L$CFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +L$EFDE1: + +L$SFDE2: + .word L$EFDE2-L$ASFDE2 ;# FDE Length +L$ASFDE2: + .word L$ASFDE2-L$frame1 ;# FDE CIE offset + .word L$FB2 ;# FDE initial location + .word L$FE2-L$FB2 ;# FDE address range + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI21-L$FB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI22-L$CFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +L$EFDE2: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux 2.S new file mode 100644 index 0000000..33ef0b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux 2.S @@ -0,0 +1,380 @@ +/* ----------------------------------------------------------------------- + linux.S - (c) 2003-2004 Randolph Chung + (c) 2008 Red Hat, Inc. + + HPPA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + .level 1.1 + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,code + .import ffi_prep_args_pa32,code + + .type ffi_call_pa32, @function +.LFB1: +ffi_call_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +.LCFI11: + + copy %sp, %r3 +.LCFI12: + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +.LCFI13: + copy %sp, %r4 + + addl %arg2, %r4, %arg0 /* arg stack */ + stw %arg3, -48(%r3) /* save flags; we need it later */ + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args were loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 /* %ret0 <- rvalue */ + ldw -56(%r3), %r22 /* %r22 <- function to call */ + bl $$dyncall, %r31 /* Call the user function */ + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 /* r21 <- flags */ + ldw -52(%r3), %r20 /* r20 <- rvalue */ + + /* Store the result according to the return type. */ + +.Lcheckint: + comib,<>,n FFI_TYPE_INT, %r21, .Lcheckint8 + b .Ldone + stw %ret0, 0(%r20) + +.Lcheckint8: + comib,<>,n FFI_TYPE_UINT8, %r21, .Lcheckint16 + b .Ldone + stb %ret0, 0(%r20) + +.Lcheckint16: + comib,<>,n FFI_TYPE_UINT16, %r21, .Lcheckdbl + b .Ldone + sth %ret0, 0(%r20) + +.Lcheckdbl: + comib,<>,n FFI_TYPE_DOUBLE, %r21, .Lcheckfloat + b .Ldone + fstd %fr4,0(%r20) + +.Lcheckfloat: + comib,<>,n FFI_TYPE_FLOAT, %r21, .Lcheckll + b .Ldone + fstw %fr4L,0(%r20) + +.Lcheckll: + comib,<>,n FFI_TYPE_UINT64, %r21, .Lchecksmst2 + stw %ret0, 0(%r20) + b .Ldone + stw %ret1, 4(%r20) + +.Lchecksmst2: + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, .Lchecksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst3: + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, .Lchecksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst4: + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, .Lchecksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst5: + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, .Lchecksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst6: + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, .Lchecksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst7: + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, .Lchecksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst8: + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, .Ldone + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +.Ldone: + /* all done, return */ + copy %r4, %sp /* pop arg stack */ + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 /* .. and pop stack */ + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +.LFE1: + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + .export ffi_closure_pa32,code + .import ffi_closure_inner_pa32,code + + .type ffi_closure_pa32, @function +.LFB2: +ffi_closure_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) +.LCFI20: + copy %r3, %r1 +.LCFI21: + copy %sp, %r3 +.LCFI22: + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%r2) + ldw -40(%sp), %ret1 + + .exit + .procend +.LFE2: + + .section ".eh_frame",EH_FRAME_FLAGS,@progbits +.Lframe1: + .word .LECIE1-.LSCIE1 ;# Length of Common Information Entry +.LSCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version +#ifdef __PIC__ + .ascii "zR\0" ;# CIE Augmentation: 'z' - data, 'R' - DW_EH_PE_... data +#else + .ascii "\0" ;# CIE Augmentation +#endif + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column +#ifdef __PIC__ + .uleb128 0x1 ;# Augmentation size + .byte 0x1b ;# FDE Encoding (DW_EH_PE_pcrel|DW_EH_PE_sdata4) +#endif + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +.LECIE1: +.LSFDE1: + .word .LEFDE1-.LASFDE1 ;# FDE Length +.LASFDE1: + .word .LASFDE1-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB1-. ;# FDE initial location +#else + .word .LFB1 ;# FDE initial location +#endif + .word .LFE1-.LFB1 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI11-.LFB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI12-.LCFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI13-.LCFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +.LEFDE1: + +.LSFDE2: + .word .LEFDE2-.LASFDE2 ;# FDE Length +.LASFDE2: + .word .LASFDE2-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB2-. ;# FDE initial location +#else + .word .LFB2 ;# FDE initial location +#endif + .word .LFE2-.LFB2 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI21-.LFB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI22-.LCFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +.LEFDE2: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S new file mode 100644 index 0000000..33ef0b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S @@ -0,0 +1,380 @@ +/* ----------------------------------------------------------------------- + linux.S - (c) 2003-2004 Randolph Chung + (c) 2008 Red Hat, Inc. + + HPPA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + .level 1.1 + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,code + .import ffi_prep_args_pa32,code + + .type ffi_call_pa32, @function +.LFB1: +ffi_call_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +.LCFI11: + + copy %sp, %r3 +.LCFI12: + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +.LCFI13: + copy %sp, %r4 + + addl %arg2, %r4, %arg0 /* arg stack */ + stw %arg3, -48(%r3) /* save flags; we need it later */ + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args were loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 /* %ret0 <- rvalue */ + ldw -56(%r3), %r22 /* %r22 <- function to call */ + bl $$dyncall, %r31 /* Call the user function */ + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 /* r21 <- flags */ + ldw -52(%r3), %r20 /* r20 <- rvalue */ + + /* Store the result according to the return type. */ + +.Lcheckint: + comib,<>,n FFI_TYPE_INT, %r21, .Lcheckint8 + b .Ldone + stw %ret0, 0(%r20) + +.Lcheckint8: + comib,<>,n FFI_TYPE_UINT8, %r21, .Lcheckint16 + b .Ldone + stb %ret0, 0(%r20) + +.Lcheckint16: + comib,<>,n FFI_TYPE_UINT16, %r21, .Lcheckdbl + b .Ldone + sth %ret0, 0(%r20) + +.Lcheckdbl: + comib,<>,n FFI_TYPE_DOUBLE, %r21, .Lcheckfloat + b .Ldone + fstd %fr4,0(%r20) + +.Lcheckfloat: + comib,<>,n FFI_TYPE_FLOAT, %r21, .Lcheckll + b .Ldone + fstw %fr4L,0(%r20) + +.Lcheckll: + comib,<>,n FFI_TYPE_UINT64, %r21, .Lchecksmst2 + stw %ret0, 0(%r20) + b .Ldone + stw %ret1, 4(%r20) + +.Lchecksmst2: + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, .Lchecksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst3: + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, .Lchecksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst4: + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, .Lchecksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst5: + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, .Lchecksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst6: + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, .Lchecksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst7: + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, .Lchecksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst8: + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, .Ldone + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +.Ldone: + /* all done, return */ + copy %r4, %sp /* pop arg stack */ + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 /* .. and pop stack */ + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +.LFE1: + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + .export ffi_closure_pa32,code + .import ffi_closure_inner_pa32,code + + .type ffi_closure_pa32, @function +.LFB2: +ffi_closure_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) +.LCFI20: + copy %r3, %r1 +.LCFI21: + copy %sp, %r3 +.LCFI22: + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%r2) + ldw -40(%sp), %ret1 + + .exit + .procend +.LFE2: + + .section ".eh_frame",EH_FRAME_FLAGS,@progbits +.Lframe1: + .word .LECIE1-.LSCIE1 ;# Length of Common Information Entry +.LSCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version +#ifdef __PIC__ + .ascii "zR\0" ;# CIE Augmentation: 'z' - data, 'R' - DW_EH_PE_... data +#else + .ascii "\0" ;# CIE Augmentation +#endif + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column +#ifdef __PIC__ + .uleb128 0x1 ;# Augmentation size + .byte 0x1b ;# FDE Encoding (DW_EH_PE_pcrel|DW_EH_PE_sdata4) +#endif + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +.LECIE1: +.LSFDE1: + .word .LEFDE1-.LASFDE1 ;# FDE Length +.LASFDE1: + .word .LASFDE1-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB1-. ;# FDE initial location +#else + .word .LFB1 ;# FDE initial location +#endif + .word .LFE1-.LFB1 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI11-.LFB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI12-.LCFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI13-.LCFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +.LEFDE1: + +.LSFDE2: + .word .LEFDE2-.LASFDE2 ;# FDE Length +.LASFDE2: + .word .LASFDE2-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB2-. ;# FDE initial location +#else + .word .LFB2 ;# FDE initial location +#endif + .word .LFE2-.LFB2 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI21-.LFB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI22-.LCFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +.LEFDE2: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix 2.S new file mode 100644 index 0000000..7ba5415 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix 2.S @@ -0,0 +1,566 @@ +/* ----------------------------------------------------------------------- + aix.S - Copyright (c) 2002, 2009 Free Software Foundation, Inc. + based on darwin.S by John Hornkvist + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_prep_args + +#define LIBFFI_ASM +#include +#include +#define JUMPTARGET(name) name +#define L(x) x + .file "aix.S" + .toc + + /* void ffi_call_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const)); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_AIX + .globl .ffi_call_AIX +.csect ffi_call_AIX[DS] +ffi_call_AIX: +#ifdef __64BIT__ + .llong .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r0, 16(r1) +LCFI..0: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address. */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 16(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-32-(13*8)(r28) + lfd f2,-32-(12*8)(r28) + lfd f3,-32-(11*8)(r28) + lfd f4,-32-(10*8)(r28) + nop + lfd f5,-32-(9*8)(r28) + lfd f6,-32-(8*8)(r28) + lfd f7,-32-(7*8)(r28) + lfd f8,-32-(6*8)(r28) + nop + lfd f9,-32-(5*8)(r28) + lfd f10,-32-(4*8)(r28) + lfd f11,-32-(3*8)(r28) + lfd f12,-32-(2*8)(r28) + nop + lfd f13,-32-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + ld r2, 40(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + std r3, 0(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + ld r0, 16(r28) + ld r28, -32(r1) + mtlr r0 + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + bf 31, L(done_return_value) + stfd f2, 8(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#else /* ! __64BIT__ */ + + .long .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r0, 8(r1) +LCFI..0: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 8(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-16-(13*8)(r28) + lfd f2,-16-(12*8)(r28) + lfd f3,-16-(11*8)(r28) + lfd f4,-16-(10*8)(r28) + nop + lfd f5,-16-(9*8)(r28) + lfd f6,-16-(8*8)(r28) + lfd f7,-16-(7*8)(r28) + lfd f8,-16-(6*8)(r28) + nop + lfd f9,-16-(5*8)(r28) + lfd f10,-16-(4*8)(r28) + lfd f11,-16-(3*8)(r28) + lfd f12,-16-(2*8)(r28) + nop + lfd f13,-16-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + lwz r2, 20(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + stw r3, 0(r30) + bf 28, L(done_return_value) + stw r4, 4(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + lwz r0, 8(r28) + lwz r28,-16(r1) + mtlr r0 + lwz r29,-12(r1) + lwz r30, -8(r1) + lwz r31, -4(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_AIX) */ + + /* void ffi_call_go_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const), + * void *closure); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args, r9=closure + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_go_AIX + .globl .ffi_call_go_AIX +.csect ffi_call_go_AIX[DS] +ffi_call_go_AIX: +#ifdef __64BIT__ + .llong .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r9, 8(r1) /* closure, saved in cr field. */ + std r0, 16(r1) +LCFI..2: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 8(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + + b L1 +LFE..1: +#else /* ! __64BIT__ */ + + .long .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 + /* Save registers we use. */ +LFB..1: + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r9, 4(r1) /* closure, saved in cr field. */ + stw r0, 8(r1) +LCFI..2: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 4(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + + b L1 +LFE..1: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_go_AIX) */ + +.csect .text[PR] + .align 2 + .globl ffi_call_DARWIN + .globl .ffi_call_DARWIN +.csect ffi_call_DARWIN[DS] +ffi_call_DARWIN: +#ifdef __64BIT__ + .llong .ffi_call_DARWIN, TOC[tc0], 0 +#else + .long .ffi_call_DARWIN, TOC[tc0], 0 +#endif + .csect .text[PR] +.ffi_call_DARWIN: + blr + .long 0 + .byte 0,0,0,0,0,0,0,0 +/* END(ffi_call_DARWIN) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix +_GLOBAL__F_libffi_src_powerpc_aix: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte 0x41 /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S new file mode 100644 index 0000000..7ba5415 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S @@ -0,0 +1,566 @@ +/* ----------------------------------------------------------------------- + aix.S - Copyright (c) 2002, 2009 Free Software Foundation, Inc. + based on darwin.S by John Hornkvist + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_prep_args + +#define LIBFFI_ASM +#include +#include +#define JUMPTARGET(name) name +#define L(x) x + .file "aix.S" + .toc + + /* void ffi_call_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const)); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_AIX + .globl .ffi_call_AIX +.csect ffi_call_AIX[DS] +ffi_call_AIX: +#ifdef __64BIT__ + .llong .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r0, 16(r1) +LCFI..0: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address. */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 16(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-32-(13*8)(r28) + lfd f2,-32-(12*8)(r28) + lfd f3,-32-(11*8)(r28) + lfd f4,-32-(10*8)(r28) + nop + lfd f5,-32-(9*8)(r28) + lfd f6,-32-(8*8)(r28) + lfd f7,-32-(7*8)(r28) + lfd f8,-32-(6*8)(r28) + nop + lfd f9,-32-(5*8)(r28) + lfd f10,-32-(4*8)(r28) + lfd f11,-32-(3*8)(r28) + lfd f12,-32-(2*8)(r28) + nop + lfd f13,-32-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + ld r2, 40(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + std r3, 0(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + ld r0, 16(r28) + ld r28, -32(r1) + mtlr r0 + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + bf 31, L(done_return_value) + stfd f2, 8(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#else /* ! __64BIT__ */ + + .long .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r0, 8(r1) +LCFI..0: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 8(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-16-(13*8)(r28) + lfd f2,-16-(12*8)(r28) + lfd f3,-16-(11*8)(r28) + lfd f4,-16-(10*8)(r28) + nop + lfd f5,-16-(9*8)(r28) + lfd f6,-16-(8*8)(r28) + lfd f7,-16-(7*8)(r28) + lfd f8,-16-(6*8)(r28) + nop + lfd f9,-16-(5*8)(r28) + lfd f10,-16-(4*8)(r28) + lfd f11,-16-(3*8)(r28) + lfd f12,-16-(2*8)(r28) + nop + lfd f13,-16-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + lwz r2, 20(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + stw r3, 0(r30) + bf 28, L(done_return_value) + stw r4, 4(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + lwz r0, 8(r28) + lwz r28,-16(r1) + mtlr r0 + lwz r29,-12(r1) + lwz r30, -8(r1) + lwz r31, -4(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_AIX) */ + + /* void ffi_call_go_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const), + * void *closure); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args, r9=closure + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_go_AIX + .globl .ffi_call_go_AIX +.csect ffi_call_go_AIX[DS] +ffi_call_go_AIX: +#ifdef __64BIT__ + .llong .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r9, 8(r1) /* closure, saved in cr field. */ + std r0, 16(r1) +LCFI..2: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 8(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + + b L1 +LFE..1: +#else /* ! __64BIT__ */ + + .long .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 + /* Save registers we use. */ +LFB..1: + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r9, 4(r1) /* closure, saved in cr field. */ + stw r0, 8(r1) +LCFI..2: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 4(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + + b L1 +LFE..1: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_go_AIX) */ + +.csect .text[PR] + .align 2 + .globl ffi_call_DARWIN + .globl .ffi_call_DARWIN +.csect ffi_call_DARWIN[DS] +ffi_call_DARWIN: +#ifdef __64BIT__ + .llong .ffi_call_DARWIN, TOC[tc0], 0 +#else + .long .ffi_call_DARWIN, TOC[tc0], 0 +#endif + .csect .text[PR] +.ffi_call_DARWIN: + blr + .long 0 + .byte 0,0,0,0,0,0,0,0 +/* END(ffi_call_DARWIN) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix +_GLOBAL__F_libffi_src_powerpc_aix: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte 0x41 /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure 2.S new file mode 100644 index 0000000..132c785 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure 2.S @@ -0,0 +1,694 @@ +/* ----------------------------------------------------------------------- + aix_closure.S - Copyright (c) 2002, 2003, 2009 Free Software Foundation, Inc. + based on darwin_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_closure_helper_DARWIN + .extern .ffi_go_closure_helper_DARWIN + +#define LIBFFI_ASM +#define JUMPTARGET(name) name +#define L(x) x + .file "aix_closure.S" + .toc +LC..60: + .tc L..60[TC],L..60 + .csect .text[PR] + .align 2 + +.csect .text[PR] + .align 2 + .globl ffi_closure_ASM + .globl .ffi_closure_ASM +.csect ffi_closure_ASM[DS] +ffi_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..0: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 10(r3) /* load type from return type */ + ld r4, LC..60(2) /* get address of jump table */ + sldi r3, r3, 4 /* now multiply return type by 16 */ + ld r0, 240+16(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_INT */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + lfd f2, 112+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 112+6(r1) + mtlr r0 +L..finish: + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 112+6(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT32 */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_POINTER */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr +LFE..0: + +#else /* ! __64BIT__ */ + + .long .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..0: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 6(r3) /* load type from return type */ + lwz r4, LC..60(2) /* get address of jump table */ + slwi r3, r3, 4 /* now multiply return type by 16 */ + lwz r0, 176+8(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_INT */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + lfd f2, 56+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_SINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_POINTER */ + lwz r3, 56+0(r1) + mtlr r0 +L..finish: + addi r1, r1, 176 + blr +LFE..0: +#endif + .ef __LINE__ +/* END(ffi_closure_ASM) */ + + +.csect .text[PR] + .align 2 + .globl ffi_go_closure_ASM + .globl .ffi_go_closure_ASM +.csect ffi_go_closure_ASM[DS] +ffi_go_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..2: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, r11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: + +#else /* ! __64BIT__ */ + + .long .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..2: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, 11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: +#endif + .ef __LINE__ +/* END(ffi_go_closure_ASM) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define CFA_OFFSET 0xf0,0x01 /* LEB128 240 */ +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define CFA_OFFSET 0xb0,0x01 /* LEB128 176 */ +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix_closure +_GLOBAL__F_libffi_src_powerpc_aix_closure: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte LR_REGNO /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix_closure /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S new file mode 100644 index 0000000..132c785 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S @@ -0,0 +1,694 @@ +/* ----------------------------------------------------------------------- + aix_closure.S - Copyright (c) 2002, 2003, 2009 Free Software Foundation, Inc. + based on darwin_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_closure_helper_DARWIN + .extern .ffi_go_closure_helper_DARWIN + +#define LIBFFI_ASM +#define JUMPTARGET(name) name +#define L(x) x + .file "aix_closure.S" + .toc +LC..60: + .tc L..60[TC],L..60 + .csect .text[PR] + .align 2 + +.csect .text[PR] + .align 2 + .globl ffi_closure_ASM + .globl .ffi_closure_ASM +.csect ffi_closure_ASM[DS] +ffi_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..0: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 10(r3) /* load type from return type */ + ld r4, LC..60(2) /* get address of jump table */ + sldi r3, r3, 4 /* now multiply return type by 16 */ + ld r0, 240+16(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_INT */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + lfd f2, 112+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 112+6(r1) + mtlr r0 +L..finish: + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 112+6(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT32 */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_POINTER */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr +LFE..0: + +#else /* ! __64BIT__ */ + + .long .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..0: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 6(r3) /* load type from return type */ + lwz r4, LC..60(2) /* get address of jump table */ + slwi r3, r3, 4 /* now multiply return type by 16 */ + lwz r0, 176+8(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_INT */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + lfd f2, 56+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_SINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_POINTER */ + lwz r3, 56+0(r1) + mtlr r0 +L..finish: + addi r1, r1, 176 + blr +LFE..0: +#endif + .ef __LINE__ +/* END(ffi_closure_ASM) */ + + +.csect .text[PR] + .align 2 + .globl ffi_go_closure_ASM + .globl .ffi_go_closure_ASM +.csect ffi_go_closure_ASM[DS] +ffi_go_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..2: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, r11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: + +#else /* ! __64BIT__ */ + + .long .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..2: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, 11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: +#endif + .ef __LINE__ +/* END(ffi_go_closure_ASM) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define CFA_OFFSET 0xf0,0x01 /* LEB128 240 */ +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define CFA_OFFSET 0xb0,0x01 /* LEB128 176 */ +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix_closure +_GLOBAL__F_libffi_src_powerpc_aix_closure: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte LR_REGNO /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix_closure /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm 2.h new file mode 100644 index 0000000..27b22f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm 2.h @@ -0,0 +1,125 @@ +/* ----------------------------------------------------------------------- + asm.h - Copyright (c) 1998 Geoffrey Keating + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define ASM_GLOBAL_DIRECTIVE .globl + + +#define C_SYMBOL_NAME(name) name +/* Macro for a label. */ +#ifdef __STDC__ +#define C_LABEL(name) name##: +#else +#define C_LABEL(name) name/**/: +#endif + +/* This seems to always be the case on PPC. */ +#define ALIGNARG(log2) log2 +/* For ELF we need the `.type' directive to make shared libs work right. */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +/* If compiled for profiling, call `_mcount' at the start of each function. */ +#ifdef PROF +/* The mcount code relies on the return address being on the stack + to locate our caller and so it can restore it; so store one just + for its benefit. */ +#ifdef PIC +#define CALL_MCOUNT \ + .pushsection; \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + stw %r0,4(%r1); \ + bl _GLOBAL_OFFSET_TABLE_@local-4; \ + mflr %r11; \ + lwz %r0,0b@got(%r11); \ + bl JUMPTARGET(_mcount); +#else /* PIC */ +#define CALL_MCOUNT \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + lis %r11,0b@ha; \ + stw %r0,4(%r1); \ + addi %r0,%r11,0b@l; \ + bl JUMPTARGET(_mcount); +#endif /* PIC */ +#else /* PROF */ +#define CALL_MCOUNT /* Do nothing. */ +#endif /* PROF */ + +#define ENTRY(name) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT + +#define EALIGN_W_0 /* No words to insert. */ +#define EALIGN_W_1 nop +#define EALIGN_W_2 nop;nop +#define EALIGN_W_3 nop;nop;nop +#define EALIGN_W_4 EALIGN_W_3;nop +#define EALIGN_W_5 EALIGN_W_4;nop +#define EALIGN_W_6 EALIGN_W_5;nop +#define EALIGN_W_7 EALIGN_W_6;nop + +/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes + past a 2^align boundary. */ +#ifdef PROF +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT \ + b 0f; \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + 0: +#else /* PROF */ +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + C_LABEL(name) +#endif + +#define END(name) \ + ASM_SIZE_DIRECTIVE(name) + +#ifdef PIC +#define JUMPTARGET(name) name##@plt +#else +#define JUMPTARGET(name) name +#endif + +/* Local labels stripped out by the linker. */ +#define L(x) .L##x diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h new file mode 100644 index 0000000..27b22f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h @@ -0,0 +1,125 @@ +/* ----------------------------------------------------------------------- + asm.h - Copyright (c) 1998 Geoffrey Keating + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define ASM_GLOBAL_DIRECTIVE .globl + + +#define C_SYMBOL_NAME(name) name +/* Macro for a label. */ +#ifdef __STDC__ +#define C_LABEL(name) name##: +#else +#define C_LABEL(name) name/**/: +#endif + +/* This seems to always be the case on PPC. */ +#define ALIGNARG(log2) log2 +/* For ELF we need the `.type' directive to make shared libs work right. */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +/* If compiled for profiling, call `_mcount' at the start of each function. */ +#ifdef PROF +/* The mcount code relies on the return address being on the stack + to locate our caller and so it can restore it; so store one just + for its benefit. */ +#ifdef PIC +#define CALL_MCOUNT \ + .pushsection; \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + stw %r0,4(%r1); \ + bl _GLOBAL_OFFSET_TABLE_@local-4; \ + mflr %r11; \ + lwz %r0,0b@got(%r11); \ + bl JUMPTARGET(_mcount); +#else /* PIC */ +#define CALL_MCOUNT \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + lis %r11,0b@ha; \ + stw %r0,4(%r1); \ + addi %r0,%r11,0b@l; \ + bl JUMPTARGET(_mcount); +#endif /* PIC */ +#else /* PROF */ +#define CALL_MCOUNT /* Do nothing. */ +#endif /* PROF */ + +#define ENTRY(name) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT + +#define EALIGN_W_0 /* No words to insert. */ +#define EALIGN_W_1 nop +#define EALIGN_W_2 nop;nop +#define EALIGN_W_3 nop;nop;nop +#define EALIGN_W_4 EALIGN_W_3;nop +#define EALIGN_W_5 EALIGN_W_4;nop +#define EALIGN_W_6 EALIGN_W_5;nop +#define EALIGN_W_7 EALIGN_W_6;nop + +/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes + past a 2^align boundary. */ +#ifdef PROF +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT \ + b 0f; \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + 0: +#else /* PROF */ +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + C_LABEL(name) +#endif + +#define END(name) \ + ASM_SIZE_DIRECTIVE(name) + +#ifdef PIC +#define JUMPTARGET(name) name##@plt +#else +#define JUMPTARGET(name) name +#endif + +/* Local labels stripped out by the linker. */ +#define L(x) .L##x diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin 2.S new file mode 100644 index 0000000..066eb82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin 2.S @@ -0,0 +1,378 @@ +/* ----------------------------------------------------------------------- + darwin.S - Copyright (c) 2000 John Hornkvist + Copyright (c) 2004, 2010 Free Software Foundation, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) +#define sgux MODE_CHOICE(stwux,stdux) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* If there is any FP stuff we make space for all of the regs. */ +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +#define RESULT_BYTES 16 + +/* This should be kept in step with the same value in ffi_darwin.c. */ +#define ASM_NEEDS_REGISTERS 4 +#define SAVE_REGS_SIZE (ASM_NEEDS_REGISTERS * GPR_BYTES) + +#include +#include + +#define JUMPTARGET(name) name +#define L(x) x + + .text + .align 2 + .globl _ffi_prep_args + + .align 2 + .globl _ffi_call_DARWIN + + /* We arrive here with: + r3 = ptr to extended cif. + r4 = -bytes. + r5 = cif flags. + r6 = ptr to return value. + r7 = fn pointer (user func). + r8 = fn pointer (ffi_prep_args). + r9 = ffi_type* for the ret val. */ + +_ffi_call_DARWIN: +Lstartcode: + mr r12,r8 /* We only need r12 until the call, + so it does not have to be saved. */ +LFB1: + /* Save the old stack pointer as AP. */ + mr r8,r1 +LCFI0: + + /* Save the retval type in parents frame. */ + sg r9,(LINKAGE_SIZE+6*GPR_BYTES)(r8) + + /* Allocate the stack space we need. */ + sgux r1,r1,r4 + + /* Save registers we use. */ + mflr r9 + sg r9,SAVED_LR_OFFSET(r8) + + sg r28,-(4 * GPR_BYTES)(r8) + sg r29,-(3 * GPR_BYTES)(r8) + sg r30,-(2 * GPR_BYTES)(r8) + sg r31,-( GPR_BYTES)(r8) + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + sg r2,(5 * GPR_BYTES)(r1) +#endif + +LCFI1: + + /* Save arguments over call. */ + mr r31,r5 /* flags, */ + mr r30,r6 /* rvalue, */ + mr r29,r7 /* function address, */ + mr r28,r8 /* our AP. */ +LCFI2: + /* Call ffi_prep_args. r3 = extended cif, r4 = stack ptr copy. */ + mr r4,r1 + li r9,0 + + mtctr r12 /* r12 holds address of _ffi_prep_args. */ + bctrl + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + lg r2,(5 * GPR_BYTES)(r1) +#endif + /* Now do the call. + Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,r31 + /* Get the address to call into CTR. */ + mtctr r29 + /* Load all those argument registers. + We have set up a nice stack frame, just load it into registers. */ + lg r3, (LINKAGE_SIZE )(r1) + lg r4, (LINKAGE_SIZE + GPR_BYTES)(r1) + lg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r1) + lg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r1) + nop + lg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r1) + lg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r1) + lg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r1) + lg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r1) + +L1: + /* ... Load all the FP registers. */ + bf 6,L2 /* No floats to load. */ + lfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + lfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + lfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + lfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + lfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + lfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + lfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + lfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + lfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + lfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + lfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + lfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + lfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + +L2: + mr r12,r29 /* Put the target address in r12 as specified. */ + mtctr r12 + nop + nop + + /* Make the call. */ + bctrl + + /* Now, deal with the return value. */ + + /* m64 structure returns can occupy the same set of registers as + would be used to pass such a structure as arg0 - so take care + not to step on any possibly hot regs. */ + + /* Get the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + ; FLAG_RETURNS_NOTHING also covers struct ret-by-ref. + bt 30,L(done_return_value) ; FLAG_RETURNS_NOTHING + bf 27,L(scalar_return_value) ; not FLAG_RETURNS_STRUCT + + /* OK, so we have a struct. */ +#if defined(__ppc64__) + bt 31,L(maybe_return_128) ; FLAG_RETURNS_128BITS, special case + + /* OK, we have to map the return back to a mem struct. + We are about to trample the parents param area, so recover the + return type. r29 is free, since the call is done. */ + lg r29,(LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + + sg r3, (LINKAGE_SIZE )(r28) + sg r4, (LINKAGE_SIZE + GPR_BYTES)(r28) + sg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r28) + sg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r28) + nop + sg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r28) + sg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r28) + sg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + sg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + /* OK, so do the block move - we trust that memcpy will not trample + the fprs... */ + mr r3,r30 ; dest + addi r4,r28,LINKAGE_SIZE ; source + /* The size is a size_t, should be long. */ + lg r5,0(r29) + /* Figure out small structs */ + cmpi 0,r5,4 + bgt L3 ; 1, 2 and 4 bytes have special rules. + cmpi 0,r5,3 + beq L3 ; not 3 + addi r4,r4,8 + subf r4,r5,r4 +L3: + bl _memcpy + + /* ... do we need the FP registers? - recover the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + bf 29,L(done_return_value) /* No floats in the struct. */ + stfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + stfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + stfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + stfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + stfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + stfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + stfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + stfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + stfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + stfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + stfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + stfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + stfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + + mr r3,r29 ; ffi_type * + mr r4,r30 ; dest + addi r5,r28,-SAVE_REGS_SIZE-(13*FPR_SIZE) ; fprs + xor r6,r6,r6 + sg r6,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + addi r6,r28,(LINKAGE_SIZE + 7 * GPR_BYTES) ; point to a zeroed counter. + bl _darwin64_struct_floats_to_mem + + b L(done_return_value) +#else + stw r3,0(r30) ; m32 the only struct return in reg is 4 bytes. +#endif + b L(done_return_value) + +L(fp_return_value): + /* Do we have long double to store? */ + bf 31,L(fd_return_value) ; FLAG_RETURNS_128BITS + stfd f1,0(r30) + stfd f2,FPR_SIZE(r30) + b L(done_return_value) + +L(fd_return_value): + /* Do we have double to store? */ + bf 28,L(float_return_value) + stfd f1,0(r30) + b L(done_return_value) + +L(float_return_value): + /* We only have a float to store. */ + stfs f1,0(r30) + b L(done_return_value) + +L(scalar_return_value): + bt 29,L(fp_return_value) ; FLAG_RETURNS_FP + ; ffi_arg is defined as unsigned long. + sg r3,0(r30) ; Save the reg. + bf 28,L(done_return_value) ; not FLAG_RETURNS_64BITS + +#if defined(__ppc64__) +L(maybe_return_128): + std r3,0(r30) + bf 31,L(done_return_value) ; not FLAG_RETURNS_128BITS + std r4,8(r30) +#else + stw r4,4(r30) +#endif + + /* Fall through. */ + /* We want this at the end to simplify eh epilog computation. */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lg r29,SAVED_LR_OFFSET(r28) + ; epilog + lg r31,-(1 * GPR_BYTES)(r28) + mtlr r29 + lg r30,-(2 * GPR_BYTES)(r28) + lg r29,-(3 * GPR_BYTES)(r28) + lg r28,-(4 * GPR_BYTES)(r28) + lg r1,0(r1) + blr +LFE1: + .align 1 +/* END(_ffi_call_DARWIN) */ + +/* Provide a null definition of _ffi_call_AIX. */ + .text + .globl _ffi_call_AIX + .align 2 +_ffi_call_AIX: + blr +/* END(_ffi_call_AIX) */ + +/* EH stuff. */ + +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + + .globl _ffi_call_DARWIN.eh +_ffi_call_DARWIN.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$3,LFE1-Lstartcode + .g_long L$set$3 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x08 ; uleb128 0x08 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$5,LCFI1-LCFI0 + .long L$set$5 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .byte 0x9f ; DW_CFA_offset, column 0x1f + .byte 0x1 ; uleb128 0x1 + .byte 0x9e ; DW_CFA_offset, column 0x1e + .byte 0x2 ; uleb128 0x2 + .byte 0x9d ; DW_CFA_offset, column 0x1d + .byte 0x3 ; uleb128 0x3 + .byte 0x9c ; DW_CFA_offset, column 0x1c + .byte 0x4 ; uleb128 0x4 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$6,LCFI2-LCFI1 + .long L$set$6 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x1c ; uleb128 0x1c + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S new file mode 100644 index 0000000..066eb82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S @@ -0,0 +1,378 @@ +/* ----------------------------------------------------------------------- + darwin.S - Copyright (c) 2000 John Hornkvist + Copyright (c) 2004, 2010 Free Software Foundation, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) +#define sgux MODE_CHOICE(stwux,stdux) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* If there is any FP stuff we make space for all of the regs. */ +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +#define RESULT_BYTES 16 + +/* This should be kept in step with the same value in ffi_darwin.c. */ +#define ASM_NEEDS_REGISTERS 4 +#define SAVE_REGS_SIZE (ASM_NEEDS_REGISTERS * GPR_BYTES) + +#include +#include + +#define JUMPTARGET(name) name +#define L(x) x + + .text + .align 2 + .globl _ffi_prep_args + + .align 2 + .globl _ffi_call_DARWIN + + /* We arrive here with: + r3 = ptr to extended cif. + r4 = -bytes. + r5 = cif flags. + r6 = ptr to return value. + r7 = fn pointer (user func). + r8 = fn pointer (ffi_prep_args). + r9 = ffi_type* for the ret val. */ + +_ffi_call_DARWIN: +Lstartcode: + mr r12,r8 /* We only need r12 until the call, + so it does not have to be saved. */ +LFB1: + /* Save the old stack pointer as AP. */ + mr r8,r1 +LCFI0: + + /* Save the retval type in parents frame. */ + sg r9,(LINKAGE_SIZE+6*GPR_BYTES)(r8) + + /* Allocate the stack space we need. */ + sgux r1,r1,r4 + + /* Save registers we use. */ + mflr r9 + sg r9,SAVED_LR_OFFSET(r8) + + sg r28,-(4 * GPR_BYTES)(r8) + sg r29,-(3 * GPR_BYTES)(r8) + sg r30,-(2 * GPR_BYTES)(r8) + sg r31,-( GPR_BYTES)(r8) + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + sg r2,(5 * GPR_BYTES)(r1) +#endif + +LCFI1: + + /* Save arguments over call. */ + mr r31,r5 /* flags, */ + mr r30,r6 /* rvalue, */ + mr r29,r7 /* function address, */ + mr r28,r8 /* our AP. */ +LCFI2: + /* Call ffi_prep_args. r3 = extended cif, r4 = stack ptr copy. */ + mr r4,r1 + li r9,0 + + mtctr r12 /* r12 holds address of _ffi_prep_args. */ + bctrl + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + lg r2,(5 * GPR_BYTES)(r1) +#endif + /* Now do the call. + Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,r31 + /* Get the address to call into CTR. */ + mtctr r29 + /* Load all those argument registers. + We have set up a nice stack frame, just load it into registers. */ + lg r3, (LINKAGE_SIZE )(r1) + lg r4, (LINKAGE_SIZE + GPR_BYTES)(r1) + lg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r1) + lg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r1) + nop + lg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r1) + lg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r1) + lg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r1) + lg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r1) + +L1: + /* ... Load all the FP registers. */ + bf 6,L2 /* No floats to load. */ + lfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + lfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + lfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + lfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + lfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + lfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + lfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + lfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + lfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + lfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + lfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + lfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + lfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + +L2: + mr r12,r29 /* Put the target address in r12 as specified. */ + mtctr r12 + nop + nop + + /* Make the call. */ + bctrl + + /* Now, deal with the return value. */ + + /* m64 structure returns can occupy the same set of registers as + would be used to pass such a structure as arg0 - so take care + not to step on any possibly hot regs. */ + + /* Get the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + ; FLAG_RETURNS_NOTHING also covers struct ret-by-ref. + bt 30,L(done_return_value) ; FLAG_RETURNS_NOTHING + bf 27,L(scalar_return_value) ; not FLAG_RETURNS_STRUCT + + /* OK, so we have a struct. */ +#if defined(__ppc64__) + bt 31,L(maybe_return_128) ; FLAG_RETURNS_128BITS, special case + + /* OK, we have to map the return back to a mem struct. + We are about to trample the parents param area, so recover the + return type. r29 is free, since the call is done. */ + lg r29,(LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + + sg r3, (LINKAGE_SIZE )(r28) + sg r4, (LINKAGE_SIZE + GPR_BYTES)(r28) + sg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r28) + sg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r28) + nop + sg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r28) + sg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r28) + sg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + sg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + /* OK, so do the block move - we trust that memcpy will not trample + the fprs... */ + mr r3,r30 ; dest + addi r4,r28,LINKAGE_SIZE ; source + /* The size is a size_t, should be long. */ + lg r5,0(r29) + /* Figure out small structs */ + cmpi 0,r5,4 + bgt L3 ; 1, 2 and 4 bytes have special rules. + cmpi 0,r5,3 + beq L3 ; not 3 + addi r4,r4,8 + subf r4,r5,r4 +L3: + bl _memcpy + + /* ... do we need the FP registers? - recover the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + bf 29,L(done_return_value) /* No floats in the struct. */ + stfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + stfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + stfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + stfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + stfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + stfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + stfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + stfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + stfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + stfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + stfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + stfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + stfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + + mr r3,r29 ; ffi_type * + mr r4,r30 ; dest + addi r5,r28,-SAVE_REGS_SIZE-(13*FPR_SIZE) ; fprs + xor r6,r6,r6 + sg r6,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + addi r6,r28,(LINKAGE_SIZE + 7 * GPR_BYTES) ; point to a zeroed counter. + bl _darwin64_struct_floats_to_mem + + b L(done_return_value) +#else + stw r3,0(r30) ; m32 the only struct return in reg is 4 bytes. +#endif + b L(done_return_value) + +L(fp_return_value): + /* Do we have long double to store? */ + bf 31,L(fd_return_value) ; FLAG_RETURNS_128BITS + stfd f1,0(r30) + stfd f2,FPR_SIZE(r30) + b L(done_return_value) + +L(fd_return_value): + /* Do we have double to store? */ + bf 28,L(float_return_value) + stfd f1,0(r30) + b L(done_return_value) + +L(float_return_value): + /* We only have a float to store. */ + stfs f1,0(r30) + b L(done_return_value) + +L(scalar_return_value): + bt 29,L(fp_return_value) ; FLAG_RETURNS_FP + ; ffi_arg is defined as unsigned long. + sg r3,0(r30) ; Save the reg. + bf 28,L(done_return_value) ; not FLAG_RETURNS_64BITS + +#if defined(__ppc64__) +L(maybe_return_128): + std r3,0(r30) + bf 31,L(done_return_value) ; not FLAG_RETURNS_128BITS + std r4,8(r30) +#else + stw r4,4(r30) +#endif + + /* Fall through. */ + /* We want this at the end to simplify eh epilog computation. */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lg r29,SAVED_LR_OFFSET(r28) + ; epilog + lg r31,-(1 * GPR_BYTES)(r28) + mtlr r29 + lg r30,-(2 * GPR_BYTES)(r28) + lg r29,-(3 * GPR_BYTES)(r28) + lg r28,-(4 * GPR_BYTES)(r28) + lg r1,0(r1) + blr +LFE1: + .align 1 +/* END(_ffi_call_DARWIN) */ + +/* Provide a null definition of _ffi_call_AIX. */ + .text + .globl _ffi_call_AIX + .align 2 +_ffi_call_AIX: + blr +/* END(_ffi_call_AIX) */ + +/* EH stuff. */ + +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + + .globl _ffi_call_DARWIN.eh +_ffi_call_DARWIN.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$3,LFE1-Lstartcode + .g_long L$set$3 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x08 ; uleb128 0x08 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$5,LCFI1-LCFI0 + .long L$set$5 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .byte 0x9f ; DW_CFA_offset, column 0x1f + .byte 0x1 ; uleb128 0x1 + .byte 0x9e ; DW_CFA_offset, column 0x1e + .byte 0x2 ; uleb128 0x2 + .byte 0x9d ; DW_CFA_offset, column 0x1d + .byte 0x3 ; uleb128 0x3 + .byte 0x9c ; DW_CFA_offset, column 0x1c + .byte 0x4 ; uleb128 0x4 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$6,LCFI2-LCFI1 + .long L$set$6 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x1c ; uleb128 0x1c + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure 2.S new file mode 100644 index 0000000..3121e6a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure 2.S @@ -0,0 +1,571 @@ +/* ----------------------------------------------------------------------- + darwin_closure.S - Copyright (c) 2002, 2003, 2004, 2010, + Free Software Foundation, Inc. + based on ppc_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#define L(x) x + +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) + +#define SAVED_CR_OFFSET MODE_CHOICE(4,8) /* save position for CR */ +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* WARNING: if ffi_type is changed... here be monsters. + Offsets of items within the result type. */ +#define FFI_TYPE_TYPE MODE_CHOICE(6,10) +#define FFI_TYPE_ELEM MODE_CHOICE(8,16) + +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +/* biggest m64 struct ret is 8GPRS + 13FPRS = 168 bytes - rounded to 16bytes = 176. */ +#define RESULT_BYTES MODE_CHOICE(16,176) + +; The whole stack frame **MUST** be 16byte-aligned. +#define SAVE_SIZE (((LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)+15) & -16LL) +#define PAD_SIZE (SAVE_SIZE-(LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)) + +#define PARENT_PARM_BASE (SAVE_SIZE+LINKAGE_SIZE) +#define FP_SAVE_BASE (LINKAGE_SIZE+PARAM_AREA) + +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 +; We no longer need the pic symbol stub for Darwin >= 9. +#define BLCLS_HELP _ffi_closure_helper_DARWIN +#define STRUCT_RETVALUE_P _darwin64_struct_ret_by_value_p +#define PASS_STR_FLOATS _darwin64_pass_struct_floats +#undef WANT_STUB +#else +#define BLCLS_HELP L_ffi_closure_helper_DARWIN$stub +#define STRUCT_RETVALUE_P L_darwin64_struct_ret_by_value_p$stub +#define PASS_STR_FLOATS L_darwin64_pass_struct_floats$stub +#define WANT_STUB +#endif + +/* m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee`s LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent`s frame. + |--------------------------------------------| <+ <<< on entry to + | Result Bytes 16/176 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callees LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< call. + +*/ + + .file "darwin_closure.S" + + .machine machine_choice + + .text + .globl _ffi_closure_ASM + .align LOG2_GPR_BYTES +_ffi_closure_ASM: +LFB1: +Lstartcode: + mflr r0 /* extract return address */ + sg r0,SAVED_LR_OFFSET(r1) /* save the return address */ +LCFI0: + sgu r1,-SAVE_SIZE(r1) /* skip over caller save area + keep stack aligned to 16. */ +LCFI1: + /* We want to build up an area for the parameters passed + in registers. (both floating point and integer) */ + + /* Put gpr 3 to gpr 10 in the parents outgoing area... + ... the remainder of any params that overflowed the regs will + follow here. */ + sg r3, (PARENT_PARM_BASE )(r1) + sg r4, (PARENT_PARM_BASE + GPR_BYTES )(r1) + sg r5, (PARENT_PARM_BASE + GPR_BYTES * 2)(r1) + sg r6, (PARENT_PARM_BASE + GPR_BYTES * 3)(r1) + sg r7, (PARENT_PARM_BASE + GPR_BYTES * 4)(r1) + sg r8, (PARENT_PARM_BASE + GPR_BYTES * 5)(r1) + sg r9, (PARENT_PARM_BASE + GPR_BYTES * 6)(r1) + sg r10,(PARENT_PARM_BASE + GPR_BYTES * 7)(r1) + + /* We save fpr 1 to fpr 14 in our own save frame. */ + stfd f1, (FP_SAVE_BASE )(r1) + stfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + stfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + stfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + stfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + stfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + stfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + stfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + stfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + stfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + stfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + stfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + stfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* Set up registers for the routine that actually does the work + get the context pointer from the trampoline. */ + mr r3,r11 + + /* Now load up the pointer to the result storage. */ + addi r4,r1,(SAVE_SIZE-RESULT_BYTES) + + /* Now load up the pointer to the saved gpr registers. */ + addi r5,r1,PARENT_PARM_BASE + + /* Now load up the pointer to the saved fpr registers. */ + addi r6,r1,FP_SAVE_BASE + + /* Make the call. */ + bl BLCLS_HELP + + /* r3 contains the rtype pointer... save it since we will need + it later. */ + sg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type + lg r0,0(r3) ; size => r0 + lhz r3,FFI_TYPE_TYPE(r3) ; type => r3 + + /* The helper will have intercepted structure returns and inserted + the caller`s destination address for structs returned by ref. */ + + /* r3 contains the return type so use it to look up in a table + so we know how to deal with each type. */ + + addi r5,r1,(SAVE_SIZE-RESULT_BYTES) /* Otherwise, our return is here. */ + bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */ + mflr r4 /* Move to r4. */ + slwi r3,r3,4 /* Now multiply return type by 16. */ + add r3,r3,r4 /* Add contents of table to table address. */ + mtctr r3 + bctr /* Jump to it. */ +LFE1: +/* Each of the ret_typeX code fragments has to be exactly 16 bytes long + (4 instructions). For cache effectiveness we align to a 16 byte boundary + first. */ + + .align 4 + + nop + nop + nop +Lget_ret_type0_addr: + blrl + +/* case FFI_TYPE_VOID */ +Lret_type0: + b Lfinish + nop + nop + nop + +/* case FFI_TYPE_INT */ +Lret_type1: + lg r3,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_FLOAT */ +Lret_type2: + lfs f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_DOUBLE */ +Lret_type3: + lfd f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_LONGDOUBLE */ +Lret_type4: + lfd f1,0(r5) + lfd f2,8(r5) + b Lfinish + nop + +/* case FFI_TYPE_UINT8 */ +Lret_type5: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT8 */ +Lret_type6: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + extsb r3,r3 + b Lfinish + nop + +/* case FFI_TYPE_UINT16 */ +Lret_type7: +#if defined(__ppc64__) + lhz r3,6(r5) +#else + lhz r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT16 */ +Lret_type8: +#if defined(__ppc64__) + lha r3,6(r5) +#else + lha r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT32 */ +Lret_type9: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT32 */ +Lret_type10: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT64 */ +Lret_type11: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_SINT64 */ +Lret_type12: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_STRUCT */ +Lret_type13: +#if defined(__ppc64__) + lg r3,0(r5) ; we need at least this... + cmpi 0,r0,4 + bgt Lstructend ; not a special small case + b Lsmallstruct ; see if we need more. +#else + cmpwi 0,r0,4 + bgt Lfinish ; not by value + lg r3,0(r5) + b Lfinish +#endif +/* case FFI_TYPE_POINTER */ +Lret_type14: + lg r3,0(r5) + b Lfinish + nop + nop + +#if defined(__ppc64__) +Lsmallstruct: + beq Lfour ; continuation of Lret13. + cmpi 0,r0,3 + beq Lfinish ; don`t adjust this - can`t be any floats here... + srdi r3,r3,48 + cmpi 0,r0,2 + beq Lfinish ; .. or here .. + srdi r3,r3,8 + b Lfinish ; .. or here. + +Lfour: + lg r6,LINKAGE_SIZE(r1) ; get the result type + lg r6,FFI_TYPE_ELEM(r6) ; elements array pointer + lg r6,0(r6) ; first element + lhz r0,FFI_TYPE_TYPE(r6) ; OK go the type + cmpi 0,r0,2 ; FFI_TYPE_FLOAT + bne Lfourint + lfs f1,0(r5) ; just one float in the struct. + b Lfinish + +Lfourint: + srdi r3,r3,32 ; four bytes. + b Lfinish + +Lstructend: + lg r3,LINKAGE_SIZE(r1) ; get the result type + bl STRUCT_RETVALUE_P + cmpi 0,r3,0 + beq Lfinish ; nope. + /* Recover a pointer to the results. */ + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we need at least this... + lg r4,8(r11) + cmpi 0,r0,16 + beq Lfinish ; special case 16 bytes we don't consider floats. + + /* OK, frustratingly, the process of saving the struct to mem might have + messed with the FPRs, so we have to re-load them :(. + We`ll use our FPRs space again - calling: + void darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) + We`ll temporarily pinch the first two slots of the param area for local + vars used by the routine. */ + xor r6,r6,r6 + addi r5,r1,PARENT_PARM_BASE ; some space + sg r6,0(r5) ; *nfpr zeroed. + addi r6,r5,8 ; **fprs + addi r3,r1,FP_SAVE_BASE ; pointer to FPRs space + sg r3,0(r6) + mr r4,r11 ; the struct is here... + lg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type. + bl PASS_STR_FLOATS ; get struct floats into FPR save space. + /* See if we used any floats */ + lwz r0,(SAVE_SIZE-RESULT_BYTES)(r1) + cmpi 0,r0,0 + beq Lstructints ; nope. + /* OK load `em up... */ + lfd f1, (FP_SAVE_BASE )(r1) + lfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + lfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + lfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + lfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + lfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + lfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + lfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + lfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + lfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + lfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + lfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + lfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* point back at our saved struct. */ +Lstructints: + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we end up picking the + lg r4,8(r11) ; first two again. + lg r5,16(r11) + lg r6,24(r11) + lg r7,32(r11) + lg r8,40(r11) + lg r9,48(r11) + lg r10,56(r11) +#endif + +/* case done */ +Lfinish: + addi r1,r1,SAVE_SIZE /* Restore stack pointer. */ + lg r0,SAVED_LR_OFFSET(r1) /* Get return address. */ + mtlr r0 /* Reset link register. */ + blr +Lendcode: + .align 1 + +/* END(ffi_closure_ASM) */ + +/* EH frame stuff. */ +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) +/* 176, 400 */ +#define EH_FRAME_OFFSETA MODE_CHOICE(176,0x90) +#define EH_FRAME_OFFSETB MODE_CHOICE(1,3) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + .globl _ffi_closure_ASM.eh +_ffi_closure_ASM.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length + +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$2,LFE1-Lstartcode + .g_long L$set$2 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$3,LCFI1-LCFI0 + .long L$set$3 + .byte 0xe ; DW_CFA_def_cfa_offset + .byte EH_FRAME_OFFSETA,EH_FRAME_OFFSETB ; uleb128 176,1/190,3 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + +#ifdef WANT_STUB + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_ffi_closure_helper_DARWIN$stub: + .indirect_symbol _ffi_closure_helper_DARWIN + mflr r0 + bcl 20,31,"L1$spb" +"L1$spb": + mflr r11 + addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb") + mtlr r0 + lwzu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_ffi_closure_helper_DARWIN$lazy_ptr: + .indirect_symbol _ffi_closure_helper_DARWIN + .g_long dyld_stub_binding_helper + +#if defined(__ppc64__) + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_struct_ret_by_value_p$stub: + .indirect_symbol _darwin64_struct_ret_by_value_p + mflr r0 + bcl 20,31,"L2$spb" +"L2$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_struct_ret_by_value_p$lazy_ptr: + .indirect_symbol _darwin64_struct_ret_by_value_p + .g_long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_pass_struct_floats$stub: + .indirect_symbol _darwin64_pass_struct_floats + mflr r0 + bcl 20,31,"L3$spb" +"L3$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_pass_struct_floats$lazy_ptr: + .indirect_symbol _darwin64_pass_struct_floats + .g_long dyld_stub_binding_helper +# endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S new file mode 100644 index 0000000..3121e6a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S @@ -0,0 +1,571 @@ +/* ----------------------------------------------------------------------- + darwin_closure.S - Copyright (c) 2002, 2003, 2004, 2010, + Free Software Foundation, Inc. + based on ppc_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#define L(x) x + +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) + +#define SAVED_CR_OFFSET MODE_CHOICE(4,8) /* save position for CR */ +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* WARNING: if ffi_type is changed... here be monsters. + Offsets of items within the result type. */ +#define FFI_TYPE_TYPE MODE_CHOICE(6,10) +#define FFI_TYPE_ELEM MODE_CHOICE(8,16) + +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +/* biggest m64 struct ret is 8GPRS + 13FPRS = 168 bytes - rounded to 16bytes = 176. */ +#define RESULT_BYTES MODE_CHOICE(16,176) + +; The whole stack frame **MUST** be 16byte-aligned. +#define SAVE_SIZE (((LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)+15) & -16LL) +#define PAD_SIZE (SAVE_SIZE-(LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)) + +#define PARENT_PARM_BASE (SAVE_SIZE+LINKAGE_SIZE) +#define FP_SAVE_BASE (LINKAGE_SIZE+PARAM_AREA) + +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 +; We no longer need the pic symbol stub for Darwin >= 9. +#define BLCLS_HELP _ffi_closure_helper_DARWIN +#define STRUCT_RETVALUE_P _darwin64_struct_ret_by_value_p +#define PASS_STR_FLOATS _darwin64_pass_struct_floats +#undef WANT_STUB +#else +#define BLCLS_HELP L_ffi_closure_helper_DARWIN$stub +#define STRUCT_RETVALUE_P L_darwin64_struct_ret_by_value_p$stub +#define PASS_STR_FLOATS L_darwin64_pass_struct_floats$stub +#define WANT_STUB +#endif + +/* m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee`s LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent`s frame. + |--------------------------------------------| <+ <<< on entry to + | Result Bytes 16/176 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callees LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< call. + +*/ + + .file "darwin_closure.S" + + .machine machine_choice + + .text + .globl _ffi_closure_ASM + .align LOG2_GPR_BYTES +_ffi_closure_ASM: +LFB1: +Lstartcode: + mflr r0 /* extract return address */ + sg r0,SAVED_LR_OFFSET(r1) /* save the return address */ +LCFI0: + sgu r1,-SAVE_SIZE(r1) /* skip over caller save area + keep stack aligned to 16. */ +LCFI1: + /* We want to build up an area for the parameters passed + in registers. (both floating point and integer) */ + + /* Put gpr 3 to gpr 10 in the parents outgoing area... + ... the remainder of any params that overflowed the regs will + follow here. */ + sg r3, (PARENT_PARM_BASE )(r1) + sg r4, (PARENT_PARM_BASE + GPR_BYTES )(r1) + sg r5, (PARENT_PARM_BASE + GPR_BYTES * 2)(r1) + sg r6, (PARENT_PARM_BASE + GPR_BYTES * 3)(r1) + sg r7, (PARENT_PARM_BASE + GPR_BYTES * 4)(r1) + sg r8, (PARENT_PARM_BASE + GPR_BYTES * 5)(r1) + sg r9, (PARENT_PARM_BASE + GPR_BYTES * 6)(r1) + sg r10,(PARENT_PARM_BASE + GPR_BYTES * 7)(r1) + + /* We save fpr 1 to fpr 14 in our own save frame. */ + stfd f1, (FP_SAVE_BASE )(r1) + stfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + stfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + stfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + stfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + stfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + stfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + stfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + stfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + stfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + stfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + stfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + stfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* Set up registers for the routine that actually does the work + get the context pointer from the trampoline. */ + mr r3,r11 + + /* Now load up the pointer to the result storage. */ + addi r4,r1,(SAVE_SIZE-RESULT_BYTES) + + /* Now load up the pointer to the saved gpr registers. */ + addi r5,r1,PARENT_PARM_BASE + + /* Now load up the pointer to the saved fpr registers. */ + addi r6,r1,FP_SAVE_BASE + + /* Make the call. */ + bl BLCLS_HELP + + /* r3 contains the rtype pointer... save it since we will need + it later. */ + sg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type + lg r0,0(r3) ; size => r0 + lhz r3,FFI_TYPE_TYPE(r3) ; type => r3 + + /* The helper will have intercepted structure returns and inserted + the caller`s destination address for structs returned by ref. */ + + /* r3 contains the return type so use it to look up in a table + so we know how to deal with each type. */ + + addi r5,r1,(SAVE_SIZE-RESULT_BYTES) /* Otherwise, our return is here. */ + bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */ + mflr r4 /* Move to r4. */ + slwi r3,r3,4 /* Now multiply return type by 16. */ + add r3,r3,r4 /* Add contents of table to table address. */ + mtctr r3 + bctr /* Jump to it. */ +LFE1: +/* Each of the ret_typeX code fragments has to be exactly 16 bytes long + (4 instructions). For cache effectiveness we align to a 16 byte boundary + first. */ + + .align 4 + + nop + nop + nop +Lget_ret_type0_addr: + blrl + +/* case FFI_TYPE_VOID */ +Lret_type0: + b Lfinish + nop + nop + nop + +/* case FFI_TYPE_INT */ +Lret_type1: + lg r3,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_FLOAT */ +Lret_type2: + lfs f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_DOUBLE */ +Lret_type3: + lfd f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_LONGDOUBLE */ +Lret_type4: + lfd f1,0(r5) + lfd f2,8(r5) + b Lfinish + nop + +/* case FFI_TYPE_UINT8 */ +Lret_type5: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT8 */ +Lret_type6: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + extsb r3,r3 + b Lfinish + nop + +/* case FFI_TYPE_UINT16 */ +Lret_type7: +#if defined(__ppc64__) + lhz r3,6(r5) +#else + lhz r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT16 */ +Lret_type8: +#if defined(__ppc64__) + lha r3,6(r5) +#else + lha r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT32 */ +Lret_type9: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT32 */ +Lret_type10: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT64 */ +Lret_type11: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_SINT64 */ +Lret_type12: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_STRUCT */ +Lret_type13: +#if defined(__ppc64__) + lg r3,0(r5) ; we need at least this... + cmpi 0,r0,4 + bgt Lstructend ; not a special small case + b Lsmallstruct ; see if we need more. +#else + cmpwi 0,r0,4 + bgt Lfinish ; not by value + lg r3,0(r5) + b Lfinish +#endif +/* case FFI_TYPE_POINTER */ +Lret_type14: + lg r3,0(r5) + b Lfinish + nop + nop + +#if defined(__ppc64__) +Lsmallstruct: + beq Lfour ; continuation of Lret13. + cmpi 0,r0,3 + beq Lfinish ; don`t adjust this - can`t be any floats here... + srdi r3,r3,48 + cmpi 0,r0,2 + beq Lfinish ; .. or here .. + srdi r3,r3,8 + b Lfinish ; .. or here. + +Lfour: + lg r6,LINKAGE_SIZE(r1) ; get the result type + lg r6,FFI_TYPE_ELEM(r6) ; elements array pointer + lg r6,0(r6) ; first element + lhz r0,FFI_TYPE_TYPE(r6) ; OK go the type + cmpi 0,r0,2 ; FFI_TYPE_FLOAT + bne Lfourint + lfs f1,0(r5) ; just one float in the struct. + b Lfinish + +Lfourint: + srdi r3,r3,32 ; four bytes. + b Lfinish + +Lstructend: + lg r3,LINKAGE_SIZE(r1) ; get the result type + bl STRUCT_RETVALUE_P + cmpi 0,r3,0 + beq Lfinish ; nope. + /* Recover a pointer to the results. */ + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we need at least this... + lg r4,8(r11) + cmpi 0,r0,16 + beq Lfinish ; special case 16 bytes we don't consider floats. + + /* OK, frustratingly, the process of saving the struct to mem might have + messed with the FPRs, so we have to re-load them :(. + We`ll use our FPRs space again - calling: + void darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) + We`ll temporarily pinch the first two slots of the param area for local + vars used by the routine. */ + xor r6,r6,r6 + addi r5,r1,PARENT_PARM_BASE ; some space + sg r6,0(r5) ; *nfpr zeroed. + addi r6,r5,8 ; **fprs + addi r3,r1,FP_SAVE_BASE ; pointer to FPRs space + sg r3,0(r6) + mr r4,r11 ; the struct is here... + lg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type. + bl PASS_STR_FLOATS ; get struct floats into FPR save space. + /* See if we used any floats */ + lwz r0,(SAVE_SIZE-RESULT_BYTES)(r1) + cmpi 0,r0,0 + beq Lstructints ; nope. + /* OK load `em up... */ + lfd f1, (FP_SAVE_BASE )(r1) + lfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + lfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + lfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + lfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + lfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + lfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + lfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + lfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + lfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + lfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + lfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + lfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* point back at our saved struct. */ +Lstructints: + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we end up picking the + lg r4,8(r11) ; first two again. + lg r5,16(r11) + lg r6,24(r11) + lg r7,32(r11) + lg r8,40(r11) + lg r9,48(r11) + lg r10,56(r11) +#endif + +/* case done */ +Lfinish: + addi r1,r1,SAVE_SIZE /* Restore stack pointer. */ + lg r0,SAVED_LR_OFFSET(r1) /* Get return address. */ + mtlr r0 /* Reset link register. */ + blr +Lendcode: + .align 1 + +/* END(ffi_closure_ASM) */ + +/* EH frame stuff. */ +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) +/* 176, 400 */ +#define EH_FRAME_OFFSETA MODE_CHOICE(176,0x90) +#define EH_FRAME_OFFSETB MODE_CHOICE(1,3) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + .globl _ffi_closure_ASM.eh +_ffi_closure_ASM.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length + +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$2,LFE1-Lstartcode + .g_long L$set$2 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$3,LCFI1-LCFI0 + .long L$set$3 + .byte 0xe ; DW_CFA_def_cfa_offset + .byte EH_FRAME_OFFSETA,EH_FRAME_OFFSETB ; uleb128 176,1/190,3 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + +#ifdef WANT_STUB + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_ffi_closure_helper_DARWIN$stub: + .indirect_symbol _ffi_closure_helper_DARWIN + mflr r0 + bcl 20,31,"L1$spb" +"L1$spb": + mflr r11 + addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb") + mtlr r0 + lwzu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_ffi_closure_helper_DARWIN$lazy_ptr: + .indirect_symbol _ffi_closure_helper_DARWIN + .g_long dyld_stub_binding_helper + +#if defined(__ppc64__) + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_struct_ret_by_value_p$stub: + .indirect_symbol _darwin64_struct_ret_by_value_p + mflr r0 + bcl 20,31,"L2$spb" +"L2$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_struct_ret_by_value_p$lazy_ptr: + .indirect_symbol _darwin64_struct_ret_by_value_p + .g_long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_pass_struct_floats$stub: + .indirect_symbol _darwin64_pass_struct_floats + mflr r0 + bcl 20,31,"L3$spb" +"L3$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_pass_struct_floats$lazy_ptr: + .indirect_symbol _darwin64_pass_struct_floats + .g_long dyld_stub_binding_helper +# endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi 2.c new file mode 100644 index 0000000..a19bcbb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi 2.c @@ -0,0 +1,175 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" +#include "ffi_common.h" +#include "ffi_powerpc.h" + +#if HAVE_LONG_DOUBLE_VARIANT +/* Adjust ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types (ffi_abi abi) +{ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# ifdef POWERPC64 + ffi_prep_types_linux64 (abi); +# else + ffi_prep_types_sysv (abi); +# endif +# endif +} +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64 (cif); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif *cif, + unsigned int nfixedargs MAYBE_UNUSED, + unsigned int ntotalargs MAYBE_UNUSED) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64_var (cif, nfixedargs, ntotalargs); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +static void +ffi_call_int (ffi_cif *cif, + void (*fn) (void), + void *rvalue, + void **avalue, + void *closure) +{ + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead returns + them in memory. + + We bounce-buffer SYSV small struct return values so that sysv.S + can write r3 and r4 to memory without worrying about struct size. + + For ELFv2 ABI, use a bounce buffer for homogeneous structs too, + for similar reasons. This bounce buffer must be aligned to 16 + bytes for use with homogeneous structs of vectors (float128). */ + float128 smst_buffer[8]; + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + ecif.rvalue = rvalue; + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + ecif.rvalue = smst_buffer; + /* Ensure that we have a valid struct return value. + FIXME: Isn't this just papering over a user problem? */ + else if (!rvalue && cif->rtype->type == FFI_TYPE_STRUCT) + ecif.rvalue = alloca (cif->rtype->size); + +#ifdef POWERPC64 + ffi_call_LINUX64 (&ecif, fn, ecif.rvalue, cif->flags, closure, + -(long) cif->bytes); +#else + ffi_call_SYSV (&ecif, fn, ecif.rvalue, cif->flags, closure, -cif->bytes); +#endif + + /* Check for a bounce-buffered return value */ + if (rvalue && ecif.rvalue == smst_buffer) + { + unsigned int rsize = cif->rtype->size; +#ifndef __LITTLE_ENDIAN__ + /* The SYSV ABI returns a structure of up to 4 bytes in size + left-padded in r3. */ +# ifndef POWERPC64 + if (rsize <= 4) + memcpy (rvalue, (char *) smst_buffer + 4 - rsize, rsize); + else +# endif + /* The SYSV ABI returns a structure of up to 8 bytes in size + left-padded in r3/r4, and the ELFv2 ABI similarly returns a + structure of up to 8 bytes in size left-padded in r3. But + note that a structure of a single float is not paddded. */ + if (rsize <= 8 && (cif->flags & FLAG_RETURNS_FP) == 0) + memcpy (rvalue, (char *) smst_buffer + 8 - rsize, rsize); + else +#endif + memcpy (rvalue, smst_buffer, rsize); + } +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#ifdef POWERPC64 + return ffi_prep_closure_loc_linux64 (closure, cif, fun, user_data, codeloc); +#else + return ffi_prep_closure_loc_sysv (closure, cif, fun, user_data, codeloc); +#endif +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ +#ifdef POWERPC64 + closure->tramp = ffi_go_closure_linux64; +#else + closure->tramp = ffi_go_closure_sysv; +#endif + closure->cif = cif; + closure->fun = fun; + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c new file mode 100644 index 0000000..a19bcbb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c @@ -0,0 +1,175 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" +#include "ffi_common.h" +#include "ffi_powerpc.h" + +#if HAVE_LONG_DOUBLE_VARIANT +/* Adjust ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types (ffi_abi abi) +{ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# ifdef POWERPC64 + ffi_prep_types_linux64 (abi); +# else + ffi_prep_types_sysv (abi); +# endif +# endif +} +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64 (cif); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif *cif, + unsigned int nfixedargs MAYBE_UNUSED, + unsigned int ntotalargs MAYBE_UNUSED) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64_var (cif, nfixedargs, ntotalargs); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +static void +ffi_call_int (ffi_cif *cif, + void (*fn) (void), + void *rvalue, + void **avalue, + void *closure) +{ + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead returns + them in memory. + + We bounce-buffer SYSV small struct return values so that sysv.S + can write r3 and r4 to memory without worrying about struct size. + + For ELFv2 ABI, use a bounce buffer for homogeneous structs too, + for similar reasons. This bounce buffer must be aligned to 16 + bytes for use with homogeneous structs of vectors (float128). */ + float128 smst_buffer[8]; + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + ecif.rvalue = rvalue; + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + ecif.rvalue = smst_buffer; + /* Ensure that we have a valid struct return value. + FIXME: Isn't this just papering over a user problem? */ + else if (!rvalue && cif->rtype->type == FFI_TYPE_STRUCT) + ecif.rvalue = alloca (cif->rtype->size); + +#ifdef POWERPC64 + ffi_call_LINUX64 (&ecif, fn, ecif.rvalue, cif->flags, closure, + -(long) cif->bytes); +#else + ffi_call_SYSV (&ecif, fn, ecif.rvalue, cif->flags, closure, -cif->bytes); +#endif + + /* Check for a bounce-buffered return value */ + if (rvalue && ecif.rvalue == smst_buffer) + { + unsigned int rsize = cif->rtype->size; +#ifndef __LITTLE_ENDIAN__ + /* The SYSV ABI returns a structure of up to 4 bytes in size + left-padded in r3. */ +# ifndef POWERPC64 + if (rsize <= 4) + memcpy (rvalue, (char *) smst_buffer + 4 - rsize, rsize); + else +# endif + /* The SYSV ABI returns a structure of up to 8 bytes in size + left-padded in r3/r4, and the ELFv2 ABI similarly returns a + structure of up to 8 bytes in size left-padded in r3. But + note that a structure of a single float is not paddded. */ + if (rsize <= 8 && (cif->flags & FLAG_RETURNS_FP) == 0) + memcpy (rvalue, (char *) smst_buffer + 8 - rsize, rsize); + else +#endif + memcpy (rvalue, smst_buffer, rsize); + } +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#ifdef POWERPC64 + return ffi_prep_closure_loc_linux64 (closure, cif, fun, user_data, codeloc); +#else + return ffi_prep_closure_loc_sysv (closure, cif, fun, user_data, codeloc); +#endif +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ +#ifdef POWERPC64 + closure->tramp = ffi_go_closure_linux64; +#else + closure->tramp = ffi_go_closure_sysv; +#endif + closure->cif = cif; + closure->fun = fun; + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin 2.c new file mode 100644 index 0000000..61a18c4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin 2.c @@ -0,0 +1,1440 @@ +/* ----------------------------------------------------------------------- + ffi_darwin.c + + Copyright (C) 1998 Geoffrey Keating + Copyright (C) 2001 John Hornkvist + Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. + + FFI support for Darwin and AIX. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +extern void ffi_closure_ASM (void); +extern void ffi_go_closure_ASM (void); + +enum { + /* The assembly depends on these exact flags. + For Darwin64 (when FLAG_RETURNS_STRUCT is set): + FLAG_RETURNS_FP indicates that the structure embeds FP data. + FLAG_RETURNS_128BITS signals a special struct size that is not + expanded for float content. */ + FLAG_RETURNS_128BITS = 1 << (31-31), /* These go in cr7 */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_64BITS = 1 << (31-28), + + FLAG_RETURNS_STRUCT = 1 << (31-27), /* This goes in cr6 */ + + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4) +}; + +/* About the DARWIN ABI. */ +enum { + NUM_GPR_ARG_REGISTERS = 8, + NUM_FPR_ARG_REGISTERS = 13, + LINKAGE_AREA_GPRS = 6 +}; + +enum { ASM_NEEDS_REGISTERS = 4 }; /* r28-r31 */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments. + + m32/m64 + + The stack layout we want looks like this: + + | Return address from ffi_call_DARWIN | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4/8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | ASM_NEEDS_REGISTERS=r28-r31 4*(4/8) | | ffi_call_DARWIN + |--------------------------------------------| | + | When we have any FP activity... the | | + | FPRs occupy NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 from high to low addr. | | + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_call_DARWIN + + */ + +#if defined(POWERPC_DARWIN64) +static void +darwin64_pass_struct_by_value + (ffi_type *, char *, unsigned, unsigned *, double **, unsigned long **); +#endif + +/* This depends on GPR_SIZE = sizeof (unsigned long) */ + +void +ffi_prep_args (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + const unsigned nargs = ecif->cif->nargs; +#if !defined(POWERPC_DARWIN64) + const ffi_abi abi = ecif->cif->abi; +#endif + + /* 'stacktop' points at the previous backchain pointer. */ + unsigned long *const stacktop = stack + (bytes / sizeof(unsigned long)); + + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + double *fpr_base = (double *) (stacktop - ASM_NEEDS_REGISTERS) - NUM_FPR_ARG_REGISTERS; + int gp_count = 0, fparg_count = 0; + + /* 'next_arg' grows up as we put parameters in it. */ + unsigned long *next_arg = stack + LINKAGE_AREA_GPRS; /* 6 reserved positions. */ + + int i; + double double_tmp; + void **p_argv = ecif->avalue; + unsigned long gprvalue; + ffi_type** ptr = ecif->cif->arg_types; +#if !defined(POWERPC_DARWIN64) + char *dest_cpy; +#endif + unsigned size_al = 0; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT(((unsigned) (char *) stack & 0xF) == 0); + FFI_ASSERT(((unsigned) (char *) stacktop & 0xF) == 0); + FFI_ASSERT((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. + Rule: + Return values are referenced by r3, so r4 is the first parameter. */ + + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + for (i = nargs; i > 0; i--, ptr++, p_argv++) + { + switch ((*ptr)->type) + { + /* If a floating-point parameter appears before all of the general- + purpose registers are filled, the corresponding GPRs that match + the size of the floating-point parameter are skipped. */ + case FFI_TYPE_FLOAT: + double_tmp = *(float *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; +#if defined(POWERPC_DARWIN) + *(float *)next_arg = *(float *) *p_argv; +#else + *(double *)next_arg = double_tmp; +#endif + next_arg++; + gp_count++; + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_DOUBLE: + double_tmp = *(double *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *)next_arg = double_tmp; +#ifdef POWERPC64 + next_arg++; + gp_count++; +#else + next_arg += 2; + gp_count += 2; +#endif + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +# if defined(POWERPC64) && !defined(POWERPC_DARWIN64) + /* ??? This will exceed the regs count when the value starts at fp13 + and it will not put the extra bit on the stack. */ + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *(long double *) fpr_base++ = *(long double *) *p_argv; + else + *(long double *) next_arg = *(long double *) *p_argv; + next_arg += 2; + fparg_count += 2; +# else + double_tmp = ((double *) *p_argv)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; + double_tmp = ((double *) *p_argv)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; +# endif + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; +#endif + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + gprvalue = *(long long *) *p_argv; + goto putgpr; +#else + *(long long *) next_arg = *(long long *) *p_argv; + next_arg += 2; + gp_count += 2; +#endif + break; + case FFI_TYPE_POINTER: + gprvalue = *(unsigned long *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT8: + gprvalue = *(unsigned char *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = *(signed char *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = *(unsigned short *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = *(signed short *) *p_argv; + goto putgpr; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + next_arg = (unsigned long *)FFI_ALIGN((char *)next_arg, (*ptr)->alignment); + darwin64_pass_struct_by_value (*ptr, (char *) *p_argv, + (unsigned) size_al, + (unsigned int *) &fparg_count, + &fpr_base, &next_arg); +#else + dest_cpy = (char *) next_arg; + + /* If the first member of the struct is a double, then include enough + padding in the struct size to align it to double-word. */ + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); + +# if defined(POWERPC64) + FFI_ASSERT (abi != FFI_DARWIN); + memcpy ((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. + Structures with 3 byte in size are padded upwards. */ + if (size_al < 3 && abi == FFI_DARWIN) + dest_cpy += 4 - size_al; + + memcpy((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = *(signed int *) *p_argv; + goto putgpr; + + case FFI_TYPE_UINT32: + gprvalue = *(unsigned int *) *p_argv; + putgpr: + *next_arg++ = gprvalue; + gp_count++; + break; + default: + break; + } + } + + /* Check that we didn't overrun the stack... */ + /* FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS); + FFI_ASSERT((unsigned *)fpr_base + <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS); + FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4); */ +} + +#if defined(POWERPC_DARWIN64) + +/* See if we can put some of the struct into fprs. + This should not be called for structures of size 16 bytes, since these are not + broken out this way. */ +static void +darwin64_scan_struct_for_floats (ffi_type *s, unsigned *nfpr) +{ + int i; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p = s->elements[i]; + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_scan_struct_for_floats (p, nfpr); + break; + case FFI_TYPE_LONGDOUBLE: + (*nfpr) += 2; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + (*nfpr) += 1; + break; + default: + break; + } + } +} + +static int +darwin64_struct_size_exceeds_gprs_p (ffi_type *s, char *src, unsigned *nfpr) +{ + unsigned struct_offset=0, i; + + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + if (darwin64_struct_size_exceeds_gprs_p (p, item_base, nfpr)) + return 1; + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + default: + /* If we try and place any item, that is non-float, once we've + exceeded the 8 GPR mark, then we can't fit the struct. */ + if ((unsigned long)item_base >= 8*8) + return 1; + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return 0; +} + +/* Can this struct be returned by value? */ +int +darwin64_struct_ret_by_value_p (ffi_type *s) +{ + unsigned nfp = 0; + + FFI_ASSERT (s && s->type == FFI_TYPE_STRUCT); + + /* The largest structure we can return is 8long + 13 doubles. */ + if (s->size > 168) + return 0; + + /* We can't pass more than 13 floats. */ + darwin64_scan_struct_for_floats (s, &nfp); + if (nfp > 13) + return 0; + + /* If there are not too many floats, and the struct is + small enough to accommodate in the GPRs, then it must be OK. */ + if (s->size <= 64) + return 1; + + /* Well, we have to look harder. */ + nfp = 0; + if (darwin64_struct_size_exceeds_gprs_p (s, NULL, &nfp)) + return 0; + + return 1; +} + +void +darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) +{ + int i; + double *fpr_base = *fprs; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_pass_struct_floats (p, item_base, nfpr, + &fpr_base); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = (double) *(float *)item_base; + (*nfpr) += 1; + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + /* Update the scores. */ + *fprs = fpr_base; +} + +/* Darwin64 special rules. + Break out a struct into params and float registers. */ +static void +darwin64_pass_struct_by_value (ffi_type *s, char *src, unsigned size, + unsigned *nfpr, double **fprs, unsigned long **arg) +{ + unsigned long *next_arg = *arg; + char *dest_cpy = (char *)next_arg; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + if (!size) + return; + + /* First... special cases. */ + if (size < 3 + || (size == 4 + && s->elements[0] + && s->elements[0]->type != FFI_TYPE_FLOAT)) + { + /* Must be at least one GPR, padding is unspecified in value, + let's make it zero. */ + *next_arg = 0UL; + dest_cpy += 8 - size; + memcpy ((char *) dest_cpy, src, size); + next_arg++; + } + else if (size == 16) + { + memcpy ((char *) dest_cpy, src, size); + next_arg += 2; + } + else + { + /* now the general case, we consider embedded floats. */ + memcpy ((char *) dest_cpy, src, size); + darwin64_pass_struct_floats (s, src, nfpr, fprs); + next_arg += (size+7)/8; + } + + *arg = next_arg; +} + +double * +darwin64_struct_floats_to_mem (ffi_type *s, char *dest, double *fprs, unsigned *nf) +{ + int i; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = dest + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + fprs = darwin64_struct_floats_to_mem (p, item_base, fprs, nf); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + break; + case FFI_TYPE_FLOAT: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(float *)item_base = (float) *fprs++ ; + (*nf) += 1; + } + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return fprs; +} + +#endif + +/* Adjust the size of S to be correct for Darwin. + On Darwin m32, the first field of a structure has natural alignment. + On Darwin m64, all fields have natural alignment. */ + +static void +darwin_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + if (p->type == FFI_TYPE_STRUCT) + darwin_adjust_aggregate_sizes (p); +#if defined(POWERPC_DARWIN64) + /* Natural alignment for all items. */ + align = p->alignment; +#else + /* Natural alignment for the first item... */ + if (i == 0) + align = p->alignment; + else if (p->alignment == 16 || p->alignment < 4) + /* .. subsequent items with vector or align < 4 have natural align. */ + align = p->alignment; + else + /* .. or align is 4. */ + align = 4; +#endif + /* Pad, if necessary, before adding the current item. */ + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + /* This should not be necessary on m64, but harmless. */ + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Adjust the size of S to be correct for AIX. + Word-align double unless it is the first member of a structure. */ + +static void +aix_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + aix_adjust_aggregate_sizes (p); + align = p->alignment; + if (i != 0 && p->type == FFI_TYPE_DOUBLE) + align = 4; + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* All this is for the DARWIN ABI. */ + unsigned i; + ffi_type **ptr; + unsigned bytes; + unsigned fparg_count = 0, intarg_count = 0; + unsigned flags = 0; + unsigned size_al = 0; + + /* All the machine-independent calculation of cif->bytes will be wrong. + All the calculation of structure sizes will also be wrong. + Redo the calculation for DARWIN. */ + + if (cif->abi == FFI_DARWIN) + { + darwin_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + darwin_adjust_aggregate_sizes (cif->arg_types[i]); + } + + if (cif->abi == FFI_AIX) + { + aix_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + aix_adjust_aggregate_sizes (cif->arg_types[i]); + } + + /* Space for the frame pointer, callee's LR, CR, etc, and for + the asm's temp regs. */ + + bytes = (LINKAGE_AREA_GPRS + ASM_NEEDS_REGISTERS) * sizeof(unsigned long); + + /* Return value handling. + The rules m32 are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values [??? and structures between 5 and 8 bytes] are + returned in gpr3 and gpr4; + - Single/double FP values are returned in fpr1; + - Long double FP (if not equivalent to double) values are returned in + fpr1 and fpr2; + m64: + - 64-bit or smaller integral values are returned in GPR3 + - Single/double FP values are returned in fpr1; + - Long double FP values are returned in fpr1 and fpr2; + m64 Structures: + - If the structure could be accommodated in registers were it to be the + first argument to a routine, then it is returned in those registers. + m32/m64 structures otherwise: + - Larger structures values are allocated space and a pointer is passed + as the first argument. */ + switch (cif->rtype->type) + { + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + flags |= FLAG_RETURNS_FP; + break; +#endif + + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + case FFI_TYPE_POINTER: +#endif + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if defined(POWERPC_DARWIN64) + { + /* Can we fit the struct into regs? */ + if (darwin64_struct_ret_by_value_p (cif->rtype)) + { + unsigned nfpr = 0; + flags |= FLAG_RETURNS_STRUCT; + if (cif->rtype->size != 16) + darwin64_scan_struct_for_floats (cif->rtype, &nfpr) ; + else + flags |= FLAG_RETURNS_128BITS; + /* Will be 0 for 16byte struct. */ + if (nfpr) + flags |= FLAG_RETURNS_FP; + } + else /* By ref. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size <= 4) + flags |= FLAG_RETURNS_STRUCT; + else /* else by reference. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } +#else /* assume we pass by ref. */ + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; +#endif + break; + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. + ??? Structures are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. + For m64 the count is effectively of half-GPRs. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned align_words; + switch ((*ptr)->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + fparg_count++; +#if !defined(POWERPC_DARWIN64) + /* If this FP arg is going on the stack, it must be + 8-byte-aligned. */ + if (fparg_count > NUM_FPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0) + intarg_count++; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + fparg_count += 2; + /* If this FP arg is going on the stack, it must be + 16-byte-aligned. */ + if (fparg_count >= NUM_FPR_ARG_REGISTERS) +#if defined (POWERPC64) + intarg_count = FFI_ALIGN(intarg_count, 2); +#else + intarg_count = FFI_ALIGN(intarg_count, 4); +#endif + break; +#endif + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#if defined(POWERPC64) + intarg_count++; +#else + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. */ + if (intarg_count == NUM_GPR_ARG_REGISTERS-1 + || (intarg_count >= NUM_GPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0)) + intarg_count++; + intarg_count += 2; +#endif + break; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + align_words = (*ptr)->alignment >> 3; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* Base size of the struct. */ + intarg_count += (size_al + 7) / 8; + /* If 16 bytes then don't worry about floats. */ + if (size_al != 16) + /* Scan through for floats to be placed in regs. */ + darwin64_scan_struct_for_floats (*ptr, &fparg_count) ; +#else + align_words = (*ptr)->alignment >> 2; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* If the first member of the struct is a double, then align + the struct to double-word. + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); */ +# ifdef POWERPC64 + intarg_count += (size_al + 7) / 8; +# else + intarg_count += (size_al + 3) / 4; +# endif +#endif + break; + + default: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + break; + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + +#if defined(POWERPC_DARWIN64) + /* Space to image the FPR registers, if needed - which includes when they might be + used in a struct return. */ + if (fparg_count != 0 + || ((flags & FLAG_RETURNS_STRUCT) + && (flags & FLAG_RETURNS_FP))) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#else + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#endif + + /* Stack space. */ +#ifdef POWERPC64 + if ((intarg_count + fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + fparg_count) * sizeof(long); +#else + if ((intarg_count + 2 * fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + 2 * fparg_count) * sizeof(long); +#endif + else + bytes += NUM_GPR_ARG_REGISTERS * sizeof(long); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = FFI_ALIGN(bytes, 16) ; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void)); + +extern void ffi_call_go_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), void *closure); + +extern void ffi_call_DARWIN(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), ffi_type*); + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args)); + break; + case FFI_DARWIN: + ffi_call_DARWIN(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), cif->rtype); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_go_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), closure); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void flush_icache(char *); +static void flush_range(char *, int); + +/* The layout of a function descriptor. A C function pointer really + points to one of these. */ + +typedef struct aix_fd_struct { + void *code_pointer; + void *toc; +} aix_fd; + +/* here I'd like to add the stack frame layout we use in darwin_closure.S + and aix_closure.S + + m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee's LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent's frame. + |--------------------------------------------| <+ <<< on entry to ffi_closure_ASM + | Result Bytes 16 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_closure_ASM. + +*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + struct ffi_aix_trampoline_struct *tramp_aix; + aix_fd *fd; + + switch (cif->abi) + { + case FFI_DARWIN: + + FFI_ASSERT (cif->abi == FFI_DARWIN); + + tramp = (unsigned int *) &closure->tramp[0]; +#if defined(POWERPC_DARWIN64) + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0015; /* bcl- 20,4*cr7+so, +0x18 (L1) */ + /* We put the addresses here. */ + tramp[6] = 0x7d6802a6; /*L1: mflr r11 */ + tramp[7] = 0xe98b0000; /* ld r12,0(r11) function address */ + tramp[8] = 0x7c0803a6; /* mtlr r0 */ + tramp[9] = 0x7d8903a6; /* mtctr r12 */ + tramp[10] = 0xe96b0008; /* lwz r11,8(r11) static chain */ + tramp[11] = 0x4e800420; /* bctr */ + + *((unsigned long *)&tramp[2]) = (unsigned long) ffi_closure_ASM; /* function */ + *((unsigned long *)&tramp[4]) = (unsigned long) codeloc; /* context */ +#else + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */ + tramp[4] = 0x7d6802a6; /* mflr r11 */ + tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */ + tramp[6] = 0x7c0803a6; /* mtlr r0 */ + tramp[7] = 0x7d8903a6; /* mtctr r12 */ + tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */ + tramp[9] = 0x4e800420; /* bctr */ + tramp[2] = (unsigned long) ffi_closure_ASM; /* function */ + tramp[3] = (unsigned long) codeloc; /* context */ +#endif + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. Only necessary on Darwin. */ + flush_range(codeloc, FFI_TRAMPOLINE_SIZE); + + break; + + case FFI_AIX: + + tramp_aix = (struct ffi_aix_trampoline_struct *) (closure->tramp); + fd = (aix_fd *)(void *)ffi_closure_ASM; + + FFI_ASSERT (cif->abi == FFI_AIX); + + tramp_aix->code_pointer = fd->code_pointer; + tramp_aix->toc = fd->toc; + tramp_aix->static_chain = codeloc; + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + break; + + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_AIX: + + FFI_ASSERT (cif->abi == FFI_AIX); + + closure->tramp = (void *)ffi_go_closure_ASM; + closure->cif = cif; + closure->fun = fun; + return FFI_OK; + + // For now, ffi_prep_go_closure is only implemented for AIX, not for Darwin + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +static void +flush_icache(char *addr) +{ +#ifndef _AIX + __asm__ volatile ( + "dcbf 0,%0\n" + "\tsync\n" + "\ticbi 0,%0\n" + "\tsync\n" + "\tisync" + : : "r"(addr) : "memory"); +#endif +} + +static void +flush_range(char * addr1, int size) +{ +#define MIN_LINE_SIZE 32 + int i; + for (i = 0; i < size; i += MIN_LINE_SIZE) + flush_icache(addr1+i); + flush_icache(addr1+size-1); +} + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *, void *, + unsigned long *, ffi_dblfl *); + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure*, void *, + unsigned long *, ffi_dblfl *); + +/* Basically the trampoline invokes ffi_closure_ASM, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_ASM invokes the + following helper function to do most of the work. */ + +static ffi_type * +ffi_closure_helper_common (ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + /* rvalue is the pointer to space for return value in closure assembly + pgr is the pointer to where r3-r10 are stored in ffi_closure_ASM + pfr is the pointer to where f1-f13 are stored in ffi_closure_ASM. */ + + typedef double ldbits[2]; + + union ldu + { + ldbits lb; + long double ld; + }; + + void ** avalue; + ffi_type ** arg_types; + long i, avn; + ffi_dblfl * end_pfr = pfr + NUM_FPR_ARG_REGISTERS; + unsigned size_al; +#if defined(POWERPC_DARWIN64) + unsigned fpsused = 0; +#endif + + avalue = alloca (cif->nargs * sizeof(void *)); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { +#if defined(POWERPC_DARWIN64) + if (!darwin64_struct_ret_by_value_p (cif->rtype)) + { + /* Won't fit into the regs - return by ref. */ + rvalue = (void *) *pgr; + pgr++; + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size > 4) + { + rvalue = (void *) *pgr; + pgr++; + } +#else /* assume we return by ref. */ + rvalue = (void *) *pgr; + pgr++; +#endif + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 7; +#else + avalue[i] = (char *) pgr + 3; +#endif + pgr++; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 6; +#else + avalue[i] = (char *) pgr + 2; +#endif + pgr++; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 4; +#else + case FFI_TYPE_POINTER: + avalue[i] = pgr; +#endif + pgr++; + break; + + case FFI_TYPE_STRUCT: + size_al = arg_types[i]->size; +#if defined(POWERPC_DARWIN64) + pgr = (unsigned long *)FFI_ALIGN((char *)pgr, arg_types[i]->alignment); + if (size_al < 3 || size_al == 4) + { + avalue[i] = ((char *)pgr)+8-size_al; + if (arg_types[i]->elements[0]->type == FFI_TYPE_FLOAT + && fpsused < NUM_FPR_ARG_REGISTERS) + { + *(float *)pgr = (float) *(double *)pfr; + pfr++; + fpsused++; + } + } + else + { + if (size_al != 16) + pfr = (ffi_dblfl *) + darwin64_struct_floats_to_mem (arg_types[i], (char *)pgr, + (double *)pfr, &fpsused); + avalue[i] = pgr; + } + pgr += (size_al + 7) / 8; +#else + /* If the first member of the struct is a double, then align + the struct to double-word. */ + if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN(arg_types[i]->size, 8); +# if defined(POWERPC64) + FFI_ASSERT (cif->abi != FFI_DARWIN); + avalue[i] = pgr; + pgr += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. */ + if (size_al < 3 && cif->abi == FFI_DARWIN) + avalue[i] = (char*) pgr + 4 - size_al; + else + avalue[i] = pgr; + pgr += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: +#if defined(POWERPC64) + case FFI_TYPE_POINTER: + avalue[i] = pgr; + pgr++; + break; +#else + /* Long long ints are passed in two gpr's. */ + avalue[i] = pgr; + pgr += 2; + break; +#endif + + case FFI_TYPE_FLOAT: + /* A float value consumes a GPR. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr++; + break; + + case FFI_TYPE_DOUBLE: + /* A double value consumes two GPRs. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } +#ifdef POWERPC64 + pgr++; +#else + pgr += 2; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +#ifdef POWERPC64 + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr) + { + *pgr = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pgr; + } + pgr += 2; +#else /* POWERPC64 */ + /* A long double value consumes four GPRs and two FPRs. + There are 13 64bit floating point registers. */ + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + /* Here we have the situation where one part of the long double + is stored in fpr13 and the other part is already on the stack. + We use a union to pass the long double to avalue[i]. */ + else if (pfr + 1 == end_pfr) + { + union ldu temp_ld; + memcpy (&temp_ld.lb[0], pfr, sizeof(ldbits)); + memcpy (&temp_ld.lb[1], pgr + 2, sizeof(ldbits)); + avalue[i] = &temp_ld.ld; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr += 4; +#endif /* POWERPC64 */ + break; +#endif + default: + FFI_ASSERT(0); + } + i++; + } + + (fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_ASM to perform return type promotions. */ + return cif->rtype; +} + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure->user_data, rvalue, pgr, pfr); +} + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure, rvalue, pgr, pfr); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c new file mode 100644 index 0000000..61a18c4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c @@ -0,0 +1,1440 @@ +/* ----------------------------------------------------------------------- + ffi_darwin.c + + Copyright (C) 1998 Geoffrey Keating + Copyright (C) 2001 John Hornkvist + Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. + + FFI support for Darwin and AIX. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +extern void ffi_closure_ASM (void); +extern void ffi_go_closure_ASM (void); + +enum { + /* The assembly depends on these exact flags. + For Darwin64 (when FLAG_RETURNS_STRUCT is set): + FLAG_RETURNS_FP indicates that the structure embeds FP data. + FLAG_RETURNS_128BITS signals a special struct size that is not + expanded for float content. */ + FLAG_RETURNS_128BITS = 1 << (31-31), /* These go in cr7 */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_64BITS = 1 << (31-28), + + FLAG_RETURNS_STRUCT = 1 << (31-27), /* This goes in cr6 */ + + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4) +}; + +/* About the DARWIN ABI. */ +enum { + NUM_GPR_ARG_REGISTERS = 8, + NUM_FPR_ARG_REGISTERS = 13, + LINKAGE_AREA_GPRS = 6 +}; + +enum { ASM_NEEDS_REGISTERS = 4 }; /* r28-r31 */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments. + + m32/m64 + + The stack layout we want looks like this: + + | Return address from ffi_call_DARWIN | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4/8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | ASM_NEEDS_REGISTERS=r28-r31 4*(4/8) | | ffi_call_DARWIN + |--------------------------------------------| | + | When we have any FP activity... the | | + | FPRs occupy NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 from high to low addr. | | + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_call_DARWIN + + */ + +#if defined(POWERPC_DARWIN64) +static void +darwin64_pass_struct_by_value + (ffi_type *, char *, unsigned, unsigned *, double **, unsigned long **); +#endif + +/* This depends on GPR_SIZE = sizeof (unsigned long) */ + +void +ffi_prep_args (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + const unsigned nargs = ecif->cif->nargs; +#if !defined(POWERPC_DARWIN64) + const ffi_abi abi = ecif->cif->abi; +#endif + + /* 'stacktop' points at the previous backchain pointer. */ + unsigned long *const stacktop = stack + (bytes / sizeof(unsigned long)); + + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + double *fpr_base = (double *) (stacktop - ASM_NEEDS_REGISTERS) - NUM_FPR_ARG_REGISTERS; + int gp_count = 0, fparg_count = 0; + + /* 'next_arg' grows up as we put parameters in it. */ + unsigned long *next_arg = stack + LINKAGE_AREA_GPRS; /* 6 reserved positions. */ + + int i; + double double_tmp; + void **p_argv = ecif->avalue; + unsigned long gprvalue; + ffi_type** ptr = ecif->cif->arg_types; +#if !defined(POWERPC_DARWIN64) + char *dest_cpy; +#endif + unsigned size_al = 0; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT(((unsigned) (char *) stack & 0xF) == 0); + FFI_ASSERT(((unsigned) (char *) stacktop & 0xF) == 0); + FFI_ASSERT((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. + Rule: + Return values are referenced by r3, so r4 is the first parameter. */ + + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + for (i = nargs; i > 0; i--, ptr++, p_argv++) + { + switch ((*ptr)->type) + { + /* If a floating-point parameter appears before all of the general- + purpose registers are filled, the corresponding GPRs that match + the size of the floating-point parameter are skipped. */ + case FFI_TYPE_FLOAT: + double_tmp = *(float *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; +#if defined(POWERPC_DARWIN) + *(float *)next_arg = *(float *) *p_argv; +#else + *(double *)next_arg = double_tmp; +#endif + next_arg++; + gp_count++; + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_DOUBLE: + double_tmp = *(double *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *)next_arg = double_tmp; +#ifdef POWERPC64 + next_arg++; + gp_count++; +#else + next_arg += 2; + gp_count += 2; +#endif + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +# if defined(POWERPC64) && !defined(POWERPC_DARWIN64) + /* ??? This will exceed the regs count when the value starts at fp13 + and it will not put the extra bit on the stack. */ + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *(long double *) fpr_base++ = *(long double *) *p_argv; + else + *(long double *) next_arg = *(long double *) *p_argv; + next_arg += 2; + fparg_count += 2; +# else + double_tmp = ((double *) *p_argv)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; + double_tmp = ((double *) *p_argv)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; +# endif + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; +#endif + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + gprvalue = *(long long *) *p_argv; + goto putgpr; +#else + *(long long *) next_arg = *(long long *) *p_argv; + next_arg += 2; + gp_count += 2; +#endif + break; + case FFI_TYPE_POINTER: + gprvalue = *(unsigned long *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT8: + gprvalue = *(unsigned char *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = *(signed char *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = *(unsigned short *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = *(signed short *) *p_argv; + goto putgpr; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + next_arg = (unsigned long *)FFI_ALIGN((char *)next_arg, (*ptr)->alignment); + darwin64_pass_struct_by_value (*ptr, (char *) *p_argv, + (unsigned) size_al, + (unsigned int *) &fparg_count, + &fpr_base, &next_arg); +#else + dest_cpy = (char *) next_arg; + + /* If the first member of the struct is a double, then include enough + padding in the struct size to align it to double-word. */ + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); + +# if defined(POWERPC64) + FFI_ASSERT (abi != FFI_DARWIN); + memcpy ((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. + Structures with 3 byte in size are padded upwards. */ + if (size_al < 3 && abi == FFI_DARWIN) + dest_cpy += 4 - size_al; + + memcpy((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = *(signed int *) *p_argv; + goto putgpr; + + case FFI_TYPE_UINT32: + gprvalue = *(unsigned int *) *p_argv; + putgpr: + *next_arg++ = gprvalue; + gp_count++; + break; + default: + break; + } + } + + /* Check that we didn't overrun the stack... */ + /* FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS); + FFI_ASSERT((unsigned *)fpr_base + <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS); + FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4); */ +} + +#if defined(POWERPC_DARWIN64) + +/* See if we can put some of the struct into fprs. + This should not be called for structures of size 16 bytes, since these are not + broken out this way. */ +static void +darwin64_scan_struct_for_floats (ffi_type *s, unsigned *nfpr) +{ + int i; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p = s->elements[i]; + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_scan_struct_for_floats (p, nfpr); + break; + case FFI_TYPE_LONGDOUBLE: + (*nfpr) += 2; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + (*nfpr) += 1; + break; + default: + break; + } + } +} + +static int +darwin64_struct_size_exceeds_gprs_p (ffi_type *s, char *src, unsigned *nfpr) +{ + unsigned struct_offset=0, i; + + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + if (darwin64_struct_size_exceeds_gprs_p (p, item_base, nfpr)) + return 1; + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + default: + /* If we try and place any item, that is non-float, once we've + exceeded the 8 GPR mark, then we can't fit the struct. */ + if ((unsigned long)item_base >= 8*8) + return 1; + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return 0; +} + +/* Can this struct be returned by value? */ +int +darwin64_struct_ret_by_value_p (ffi_type *s) +{ + unsigned nfp = 0; + + FFI_ASSERT (s && s->type == FFI_TYPE_STRUCT); + + /* The largest structure we can return is 8long + 13 doubles. */ + if (s->size > 168) + return 0; + + /* We can't pass more than 13 floats. */ + darwin64_scan_struct_for_floats (s, &nfp); + if (nfp > 13) + return 0; + + /* If there are not too many floats, and the struct is + small enough to accommodate in the GPRs, then it must be OK. */ + if (s->size <= 64) + return 1; + + /* Well, we have to look harder. */ + nfp = 0; + if (darwin64_struct_size_exceeds_gprs_p (s, NULL, &nfp)) + return 0; + + return 1; +} + +void +darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) +{ + int i; + double *fpr_base = *fprs; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_pass_struct_floats (p, item_base, nfpr, + &fpr_base); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = (double) *(float *)item_base; + (*nfpr) += 1; + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + /* Update the scores. */ + *fprs = fpr_base; +} + +/* Darwin64 special rules. + Break out a struct into params and float registers. */ +static void +darwin64_pass_struct_by_value (ffi_type *s, char *src, unsigned size, + unsigned *nfpr, double **fprs, unsigned long **arg) +{ + unsigned long *next_arg = *arg; + char *dest_cpy = (char *)next_arg; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + if (!size) + return; + + /* First... special cases. */ + if (size < 3 + || (size == 4 + && s->elements[0] + && s->elements[0]->type != FFI_TYPE_FLOAT)) + { + /* Must be at least one GPR, padding is unspecified in value, + let's make it zero. */ + *next_arg = 0UL; + dest_cpy += 8 - size; + memcpy ((char *) dest_cpy, src, size); + next_arg++; + } + else if (size == 16) + { + memcpy ((char *) dest_cpy, src, size); + next_arg += 2; + } + else + { + /* now the general case, we consider embedded floats. */ + memcpy ((char *) dest_cpy, src, size); + darwin64_pass_struct_floats (s, src, nfpr, fprs); + next_arg += (size+7)/8; + } + + *arg = next_arg; +} + +double * +darwin64_struct_floats_to_mem (ffi_type *s, char *dest, double *fprs, unsigned *nf) +{ + int i; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = dest + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + fprs = darwin64_struct_floats_to_mem (p, item_base, fprs, nf); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + break; + case FFI_TYPE_FLOAT: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(float *)item_base = (float) *fprs++ ; + (*nf) += 1; + } + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return fprs; +} + +#endif + +/* Adjust the size of S to be correct for Darwin. + On Darwin m32, the first field of a structure has natural alignment. + On Darwin m64, all fields have natural alignment. */ + +static void +darwin_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + if (p->type == FFI_TYPE_STRUCT) + darwin_adjust_aggregate_sizes (p); +#if defined(POWERPC_DARWIN64) + /* Natural alignment for all items. */ + align = p->alignment; +#else + /* Natural alignment for the first item... */ + if (i == 0) + align = p->alignment; + else if (p->alignment == 16 || p->alignment < 4) + /* .. subsequent items with vector or align < 4 have natural align. */ + align = p->alignment; + else + /* .. or align is 4. */ + align = 4; +#endif + /* Pad, if necessary, before adding the current item. */ + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + /* This should not be necessary on m64, but harmless. */ + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Adjust the size of S to be correct for AIX. + Word-align double unless it is the first member of a structure. */ + +static void +aix_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + aix_adjust_aggregate_sizes (p); + align = p->alignment; + if (i != 0 && p->type == FFI_TYPE_DOUBLE) + align = 4; + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* All this is for the DARWIN ABI. */ + unsigned i; + ffi_type **ptr; + unsigned bytes; + unsigned fparg_count = 0, intarg_count = 0; + unsigned flags = 0; + unsigned size_al = 0; + + /* All the machine-independent calculation of cif->bytes will be wrong. + All the calculation of structure sizes will also be wrong. + Redo the calculation for DARWIN. */ + + if (cif->abi == FFI_DARWIN) + { + darwin_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + darwin_adjust_aggregate_sizes (cif->arg_types[i]); + } + + if (cif->abi == FFI_AIX) + { + aix_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + aix_adjust_aggregate_sizes (cif->arg_types[i]); + } + + /* Space for the frame pointer, callee's LR, CR, etc, and for + the asm's temp regs. */ + + bytes = (LINKAGE_AREA_GPRS + ASM_NEEDS_REGISTERS) * sizeof(unsigned long); + + /* Return value handling. + The rules m32 are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values [??? and structures between 5 and 8 bytes] are + returned in gpr3 and gpr4; + - Single/double FP values are returned in fpr1; + - Long double FP (if not equivalent to double) values are returned in + fpr1 and fpr2; + m64: + - 64-bit or smaller integral values are returned in GPR3 + - Single/double FP values are returned in fpr1; + - Long double FP values are returned in fpr1 and fpr2; + m64 Structures: + - If the structure could be accommodated in registers were it to be the + first argument to a routine, then it is returned in those registers. + m32/m64 structures otherwise: + - Larger structures values are allocated space and a pointer is passed + as the first argument. */ + switch (cif->rtype->type) + { + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + flags |= FLAG_RETURNS_FP; + break; +#endif + + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + case FFI_TYPE_POINTER: +#endif + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if defined(POWERPC_DARWIN64) + { + /* Can we fit the struct into regs? */ + if (darwin64_struct_ret_by_value_p (cif->rtype)) + { + unsigned nfpr = 0; + flags |= FLAG_RETURNS_STRUCT; + if (cif->rtype->size != 16) + darwin64_scan_struct_for_floats (cif->rtype, &nfpr) ; + else + flags |= FLAG_RETURNS_128BITS; + /* Will be 0 for 16byte struct. */ + if (nfpr) + flags |= FLAG_RETURNS_FP; + } + else /* By ref. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size <= 4) + flags |= FLAG_RETURNS_STRUCT; + else /* else by reference. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } +#else /* assume we pass by ref. */ + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; +#endif + break; + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. + ??? Structures are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. + For m64 the count is effectively of half-GPRs. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned align_words; + switch ((*ptr)->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + fparg_count++; +#if !defined(POWERPC_DARWIN64) + /* If this FP arg is going on the stack, it must be + 8-byte-aligned. */ + if (fparg_count > NUM_FPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0) + intarg_count++; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + fparg_count += 2; + /* If this FP arg is going on the stack, it must be + 16-byte-aligned. */ + if (fparg_count >= NUM_FPR_ARG_REGISTERS) +#if defined (POWERPC64) + intarg_count = FFI_ALIGN(intarg_count, 2); +#else + intarg_count = FFI_ALIGN(intarg_count, 4); +#endif + break; +#endif + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#if defined(POWERPC64) + intarg_count++; +#else + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. */ + if (intarg_count == NUM_GPR_ARG_REGISTERS-1 + || (intarg_count >= NUM_GPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0)) + intarg_count++; + intarg_count += 2; +#endif + break; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + align_words = (*ptr)->alignment >> 3; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* Base size of the struct. */ + intarg_count += (size_al + 7) / 8; + /* If 16 bytes then don't worry about floats. */ + if (size_al != 16) + /* Scan through for floats to be placed in regs. */ + darwin64_scan_struct_for_floats (*ptr, &fparg_count) ; +#else + align_words = (*ptr)->alignment >> 2; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* If the first member of the struct is a double, then align + the struct to double-word. + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); */ +# ifdef POWERPC64 + intarg_count += (size_al + 7) / 8; +# else + intarg_count += (size_al + 3) / 4; +# endif +#endif + break; + + default: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + break; + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + +#if defined(POWERPC_DARWIN64) + /* Space to image the FPR registers, if needed - which includes when they might be + used in a struct return. */ + if (fparg_count != 0 + || ((flags & FLAG_RETURNS_STRUCT) + && (flags & FLAG_RETURNS_FP))) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#else + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#endif + + /* Stack space. */ +#ifdef POWERPC64 + if ((intarg_count + fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + fparg_count) * sizeof(long); +#else + if ((intarg_count + 2 * fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + 2 * fparg_count) * sizeof(long); +#endif + else + bytes += NUM_GPR_ARG_REGISTERS * sizeof(long); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = FFI_ALIGN(bytes, 16) ; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void)); + +extern void ffi_call_go_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), void *closure); + +extern void ffi_call_DARWIN(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), ffi_type*); + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args)); + break; + case FFI_DARWIN: + ffi_call_DARWIN(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), cif->rtype); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_go_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), closure); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void flush_icache(char *); +static void flush_range(char *, int); + +/* The layout of a function descriptor. A C function pointer really + points to one of these. */ + +typedef struct aix_fd_struct { + void *code_pointer; + void *toc; +} aix_fd; + +/* here I'd like to add the stack frame layout we use in darwin_closure.S + and aix_closure.S + + m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee's LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent's frame. + |--------------------------------------------| <+ <<< on entry to ffi_closure_ASM + | Result Bytes 16 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_closure_ASM. + +*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + struct ffi_aix_trampoline_struct *tramp_aix; + aix_fd *fd; + + switch (cif->abi) + { + case FFI_DARWIN: + + FFI_ASSERT (cif->abi == FFI_DARWIN); + + tramp = (unsigned int *) &closure->tramp[0]; +#if defined(POWERPC_DARWIN64) + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0015; /* bcl- 20,4*cr7+so, +0x18 (L1) */ + /* We put the addresses here. */ + tramp[6] = 0x7d6802a6; /*L1: mflr r11 */ + tramp[7] = 0xe98b0000; /* ld r12,0(r11) function address */ + tramp[8] = 0x7c0803a6; /* mtlr r0 */ + tramp[9] = 0x7d8903a6; /* mtctr r12 */ + tramp[10] = 0xe96b0008; /* lwz r11,8(r11) static chain */ + tramp[11] = 0x4e800420; /* bctr */ + + *((unsigned long *)&tramp[2]) = (unsigned long) ffi_closure_ASM; /* function */ + *((unsigned long *)&tramp[4]) = (unsigned long) codeloc; /* context */ +#else + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */ + tramp[4] = 0x7d6802a6; /* mflr r11 */ + tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */ + tramp[6] = 0x7c0803a6; /* mtlr r0 */ + tramp[7] = 0x7d8903a6; /* mtctr r12 */ + tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */ + tramp[9] = 0x4e800420; /* bctr */ + tramp[2] = (unsigned long) ffi_closure_ASM; /* function */ + tramp[3] = (unsigned long) codeloc; /* context */ +#endif + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. Only necessary on Darwin. */ + flush_range(codeloc, FFI_TRAMPOLINE_SIZE); + + break; + + case FFI_AIX: + + tramp_aix = (struct ffi_aix_trampoline_struct *) (closure->tramp); + fd = (aix_fd *)(void *)ffi_closure_ASM; + + FFI_ASSERT (cif->abi == FFI_AIX); + + tramp_aix->code_pointer = fd->code_pointer; + tramp_aix->toc = fd->toc; + tramp_aix->static_chain = codeloc; + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + break; + + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_AIX: + + FFI_ASSERT (cif->abi == FFI_AIX); + + closure->tramp = (void *)ffi_go_closure_ASM; + closure->cif = cif; + closure->fun = fun; + return FFI_OK; + + // For now, ffi_prep_go_closure is only implemented for AIX, not for Darwin + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +static void +flush_icache(char *addr) +{ +#ifndef _AIX + __asm__ volatile ( + "dcbf 0,%0\n" + "\tsync\n" + "\ticbi 0,%0\n" + "\tsync\n" + "\tisync" + : : "r"(addr) : "memory"); +#endif +} + +static void +flush_range(char * addr1, int size) +{ +#define MIN_LINE_SIZE 32 + int i; + for (i = 0; i < size; i += MIN_LINE_SIZE) + flush_icache(addr1+i); + flush_icache(addr1+size-1); +} + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *, void *, + unsigned long *, ffi_dblfl *); + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure*, void *, + unsigned long *, ffi_dblfl *); + +/* Basically the trampoline invokes ffi_closure_ASM, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_ASM invokes the + following helper function to do most of the work. */ + +static ffi_type * +ffi_closure_helper_common (ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + /* rvalue is the pointer to space for return value in closure assembly + pgr is the pointer to where r3-r10 are stored in ffi_closure_ASM + pfr is the pointer to where f1-f13 are stored in ffi_closure_ASM. */ + + typedef double ldbits[2]; + + union ldu + { + ldbits lb; + long double ld; + }; + + void ** avalue; + ffi_type ** arg_types; + long i, avn; + ffi_dblfl * end_pfr = pfr + NUM_FPR_ARG_REGISTERS; + unsigned size_al; +#if defined(POWERPC_DARWIN64) + unsigned fpsused = 0; +#endif + + avalue = alloca (cif->nargs * sizeof(void *)); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { +#if defined(POWERPC_DARWIN64) + if (!darwin64_struct_ret_by_value_p (cif->rtype)) + { + /* Won't fit into the regs - return by ref. */ + rvalue = (void *) *pgr; + pgr++; + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size > 4) + { + rvalue = (void *) *pgr; + pgr++; + } +#else /* assume we return by ref. */ + rvalue = (void *) *pgr; + pgr++; +#endif + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 7; +#else + avalue[i] = (char *) pgr + 3; +#endif + pgr++; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 6; +#else + avalue[i] = (char *) pgr + 2; +#endif + pgr++; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 4; +#else + case FFI_TYPE_POINTER: + avalue[i] = pgr; +#endif + pgr++; + break; + + case FFI_TYPE_STRUCT: + size_al = arg_types[i]->size; +#if defined(POWERPC_DARWIN64) + pgr = (unsigned long *)FFI_ALIGN((char *)pgr, arg_types[i]->alignment); + if (size_al < 3 || size_al == 4) + { + avalue[i] = ((char *)pgr)+8-size_al; + if (arg_types[i]->elements[0]->type == FFI_TYPE_FLOAT + && fpsused < NUM_FPR_ARG_REGISTERS) + { + *(float *)pgr = (float) *(double *)pfr; + pfr++; + fpsused++; + } + } + else + { + if (size_al != 16) + pfr = (ffi_dblfl *) + darwin64_struct_floats_to_mem (arg_types[i], (char *)pgr, + (double *)pfr, &fpsused); + avalue[i] = pgr; + } + pgr += (size_al + 7) / 8; +#else + /* If the first member of the struct is a double, then align + the struct to double-word. */ + if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN(arg_types[i]->size, 8); +# if defined(POWERPC64) + FFI_ASSERT (cif->abi != FFI_DARWIN); + avalue[i] = pgr; + pgr += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. */ + if (size_al < 3 && cif->abi == FFI_DARWIN) + avalue[i] = (char*) pgr + 4 - size_al; + else + avalue[i] = pgr; + pgr += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: +#if defined(POWERPC64) + case FFI_TYPE_POINTER: + avalue[i] = pgr; + pgr++; + break; +#else + /* Long long ints are passed in two gpr's. */ + avalue[i] = pgr; + pgr += 2; + break; +#endif + + case FFI_TYPE_FLOAT: + /* A float value consumes a GPR. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr++; + break; + + case FFI_TYPE_DOUBLE: + /* A double value consumes two GPRs. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } +#ifdef POWERPC64 + pgr++; +#else + pgr += 2; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +#ifdef POWERPC64 + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr) + { + *pgr = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pgr; + } + pgr += 2; +#else /* POWERPC64 */ + /* A long double value consumes four GPRs and two FPRs. + There are 13 64bit floating point registers. */ + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + /* Here we have the situation where one part of the long double + is stored in fpr13 and the other part is already on the stack. + We use a union to pass the long double to avalue[i]. */ + else if (pfr + 1 == end_pfr) + { + union ldu temp_ld; + memcpy (&temp_ld.lb[0], pfr, sizeof(ldbits)); + memcpy (&temp_ld.lb[1], pgr + 2, sizeof(ldbits)); + avalue[i] = &temp_ld.ld; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr += 4; +#endif /* POWERPC64 */ + break; +#endif + default: + FFI_ASSERT(0); + } + i++; + } + + (fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_ASM to perform return type promotions. */ + return cif->rtype; +} + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure->user_data, rvalue, pgr, pfr); +} + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure, rvalue, pgr, pfr); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64 2.c new file mode 100644 index 0000000..4d50878 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64 2.c @@ -0,0 +1,1153 @@ +/* ----------------------------------------------------------------------- + ffi_linux64.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifdef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the LINUX64 ABI. */ +enum { + NUM_GPR_ARG_REGISTERS64 = 8, + NUM_FPR_ARG_REGISTERS64 = 13, + NUM_VEC_ARG_REGISTERS64 = 12, +}; +enum { ASM_NEEDS_REGISTERS64 = 4 }; + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_linux64 (ffi_abi abi) +{ + if ((abi & (FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128)) == FFI_LINUX) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + + +static unsigned int +discover_homogeneous_aggregate (ffi_abi abi, + const ffi_type *t, + unsigned int *elnum) +{ + switch (t->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 64-bit long doubles are equivalent to doubles. */ + if ((abi & FFI_LINUX_LONG_DOUBLE_128) == 0) + { + *elnum = 1; + return FFI_TYPE_DOUBLE; + } + /* IBM extended precision values use unaligned pairs + of FPRs, but according to the ABI must be considered + distinct from doubles. They are also limited to a + maximum of four members in a homogeneous aggregate. */ + else if ((abi & FFI_LINUX_LONG_DOUBLE_IEEE128) == 0) + { + *elnum = 2; + return FFI_TYPE_LONGDOUBLE; + } + /* Fall through. */ +#endif + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + *elnum = 1; + return (int) t->type; + + case FFI_TYPE_STRUCT:; + { + unsigned int base_elt = 0, total_elnum = 0; + ffi_type **el = t->elements; + while (*el) + { + unsigned int el_elt, el_elnum = 0; + el_elt = discover_homogeneous_aggregate (abi, *el, &el_elnum); + if (el_elt == 0 + || (base_elt && base_elt != el_elt)) + return 0; + base_elt = el_elt; + total_elnum += el_elnum; +#if _CALL_ELF == 2 + if (total_elnum > 8) + return 0; +#else + if (total_elnum > 1) + return 0; +#endif + el++; + } + *elnum = total_elnum; + return base_elt; + } + + default: + return 0; + } +} + + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_linux64_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fparg_count = 0, intarg_count = 0, vecarg_count = 0; + unsigned flags = cif->flags; + unsigned elt, elnum, rtype; + +#if FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE + /* If compiled without long double support... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0 || + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#elif !defined(__VEC__) + /* If compiled without vector register support (used by assembly)... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#else + /* If the IEEE128 flag is set, but long double is only 64 bits wide... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) == 0 && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#endif + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ +#if _CALL_ELF == 2 + /* Space for backchain, CR, LR, TOC and the asm's temp regs. */ + bytes = (4 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the general registers. */ + bytes += NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#else + /* Space for backchain, CR, LR, cc/ld doubleword, TOC and the asm's temp + regs. */ + bytes = (6 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the mandatory parm save area and general registers. */ + bytes += 2 * NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#endif + + /* Return value handling. */ + rtype = cif->rtype->type; +#if _CALL_ELF == 2 +homogeneous: +#endif + switch (rtype) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + flags |= FLAG_RETURNS_VEC; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if _CALL_ELF == 2 + elt = discover_homogeneous_aggregate (cif->abi, cif->rtype, &elnum); + if (elt) + { + flags |= FLAG_RETURNS_SMST; + rtype = elt; + goto homogeneous; + } + if (cif->rtype->size <= 16) + { + flags |= FLAG_RETURNS_SMST; + break; + } +#endif + intarg_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned int align; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count++; + /* Align to 16 bytes, plus the 16-byte argument. */ + intarg_count = (intarg_count + 3) & ~0x1; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + fparg_count++; + intarg_count++; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + fparg_count++; + intarg_count++; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + align = align / 8; + if (align > 1) + intarg_count = FFI_ALIGN (intarg_count, align); + } + intarg_count += ((*ptr)->size + 7) / 8; + elt = discover_homogeneous_aggregate (cif->abi, *ptr, &elnum); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count += elnum; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + else +#endif + if (elt) + { + fparg_count += elnum; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + else + { + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 8-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + default: + FFI_ASSERT (0); + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (intarg_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (vecarg_count != 0) + flags |= FLAG_VEC_ARGUMENTS; + + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS64 * sizeof (double); + /* Space for the vector registers, if needed, aligned to 16 bytes. */ + if (vecarg_count != 0) { + bytes = (bytes + 15) & ~0xF; + bytes += NUM_VEC_ARG_REGISTERS64 * sizeof (float128); + } + + /* Stack space. */ +#if _CALL_ELF == 2 + if ((flags & FLAG_ARG_NEEDS_PSAVE) != 0) + bytes += intarg_count * sizeof (long); +#else + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + bytes += (intarg_count - NUM_GPR_ARG_REGISTERS64) * sizeof (long); +#endif + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64 (ffi_cif *cif) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = cif->nargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; + return ffi_prep_cif_linux64_core (cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64_var (ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = nfixedargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; +#if _CALL_ELF == 2 + cif->flags |= FLAG_ARG_NEEDS_PSAVE; +#endif + return ffi_prep_cif_linux64_core (cif); +} + + +/* ffi_prep_args64 is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Ret addr from ffi_call_LINUX64 8bytes | higher addresses + |--------------------------------------------| + | CR save area 8bytes | + |--------------------------------------------| + | Previous backchain pointer 8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*8 | | ffi_call_LINUX64 + |--------------------------------------------| | + | GPR registers r3-r10 8*8 | | + |--------------------------------------------| | + | FPR registers f1-f13 (optional) 13*8 | | + |--------------------------------------------| | + | VEC registers v2-v13 (optional) 12*16 | | + |--------------------------------------------| | + | Parameter save area | | + |--------------------------------------------| | + | TOC save area 8 | | + |--------------------------------------------| | stack | + | Linker doubleword 8 | | grows | + |--------------------------------------------| | down V + | Compiler doubleword 8 | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 8 | | + |--------------------------------------------| | + | CR save area 8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 8 |-/ during + |--------------------------------------------| <<< ffi_call_LINUX64 + +*/ + +void FFI_HIDDEN +ffi_prep_args64 (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned long bytes = ecif->cif->bytes; + const unsigned long flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'next_arg' points at the space for gpr3, and grows upwards as + we use GPR registers, then continues at rest. */ + valp gpr_base; + valp gpr_end; + valp rest; + valp next_arg; + + /* 'fpr_base' points at the space for f1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + unsigned int fparg_count; + + /* 'vec_base' points at the space for v2, and grows upwards as + we use vector registers. */ + valp vec_base; + unsigned int vecarg_count; + + unsigned int i, words, nargs, nfixedargs; + ffi_type **ptr; + double double_tmp; + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + signed int **si; + unsigned int **ui; + unsigned long **ul; + float **f; + double **d; + float128 **f128; + } p_argv; + unsigned long gprvalue; + unsigned long align; + + stacktop.c = (char *) stack + bytes; + gpr_base.ul = stacktop.ul - ASM_NEEDS_REGISTERS64 - NUM_GPR_ARG_REGISTERS64; + gpr_end.ul = gpr_base.ul + NUM_GPR_ARG_REGISTERS64; +#if _CALL_ELF == 2 + rest.ul = stack + 4 + NUM_GPR_ARG_REGISTERS64; +#else + rest.ul = stack + 6 + NUM_GPR_ARG_REGISTERS64; +#endif + fpr_base.d = gpr_base.d - NUM_FPR_ARG_REGISTERS64; + fparg_count = 0; + /* Place the vector args below the FPRs, if used, else the GPRs. */ + if (ecif->cif->flags & FLAG_FP_ARGUMENTS) + vec_base.p = fpr_base.p & ~0xF; + else + vec_base.p = gpr_base.p; + vec_base.f128 -= NUM_VEC_ARG_REGISTERS64; + vecarg_count = 0; + next_arg.ul = gpr_base.ul; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_base.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_end.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) vec_base.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg.ul++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + nargs = ecif->cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = ecif->cif->nfixedargs; + for (ptr = ecif->cif->arg_types, i = 0; + i < nargs; + i++, ptr++, p_argv.v++) + { + unsigned int elt, elnum; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + next_arg.p = FFI_ALIGN (next_arg.p, 16); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 && i < nfixedargs) + memcpy (vec_base.f128++, *p_argv.f128, sizeof (float128)); + else + memcpy (next_arg.f128, *p_argv.f128, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 113); + FFI_ASSERT (flags & FLAG_VEC_ARGUMENTS); + break; + } + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + double_tmp = (*p_argv.d)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + double_tmp = (*p_argv.d)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 106); + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + double_tmp = **p_argv.d; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +#endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + double_tmp = **p_argv.f; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } +#endif + } + else + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_STRUCT: + if ((ecif->cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + if (align > 1) + { + next_arg.p = FFI_ALIGN (next_arg.p, align); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + elt = discover_homogeneous_aggregate (ecif->cif->abi, *ptr, &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + float *f; + double *d; + float128 *f128; + } arg; + + arg.v = *p_argv.v; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 + && i < nfixedargs) + memcpy (vec_base.f128++, arg.f128, sizeof (float128)); + else + memcpy (next_arg.f128, arg.f128++, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + double_tmp = *arg.f++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 + && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.f = (float) double_tmp; + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + fparg_count++; + } + while (--elnum != 0); + if ((next_arg.p & 7) != 0) + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + } + else + do + { + double_tmp = *arg.d++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.d = double_tmp; + if (++next_arg.d == gpr_end.d) + next_arg.d = rest.d; + fparg_count++; + } + while (--elnum != 0); +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { + words = ((*ptr)->size + 7) / 8; + if (next_arg.ul >= gpr_base.ul && next_arg.ul + words > gpr_end.ul) + { + size_t first = gpr_end.c - next_arg.c; + memcpy (next_arg.c, *p_argv.c, first); + memcpy (rest.c, *p_argv.c + first, (*ptr)->size - first); + next_arg.c = rest.c + words * 8 - first; + } + else + { + char *where = next_arg.c; + +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if ((*ptr)->size < 8) + where += 8 - (*ptr)->size; +#endif + memcpy (where, *p_argv.c, (*ptr)->size); + next_arg.ul += words; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + break; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + case FFI_TYPE_UINT32: + gprvalue = **p_argv.ui; + goto putgpr; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = **p_argv.si; + goto putgpr; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + gprvalue = **p_argv.ul; + putgpr: + *next_arg.ul++ = gprvalue; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + break; + } + } + + FFI_ASSERT (flags & FLAG_4_GPR_ARGUMENTS + || (next_arg.ul >= gpr_base.ul + && next_arg.ul <= gpr_base.ul + 4)); +} + + +#if _CALL_ELF == 2 +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} +#endif + + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_linux64 (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#if _CALL_ELF == 2 + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp[0] = 0xe96c0018; /* 0: ld 11,2f-0b(12) */ + tramp[1] = 0xe98c0010; /* ld 12,1f-0b(12) */ + tramp[2] = 0x7d8903a6; /* mtctr 12 */ + tramp[3] = 0x4e800420; /* bctr */ + /* 1: .quad function_addr */ + /* 2: .quad context */ + *(void **) &tramp[4] = (void *) ffi_closure_LINUX64; + *(void **) &tramp[6] = codeloc; + flush_icache ((char *) tramp, (char *) codeloc, 4 * 4); +#else + void **tramp = (void **) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* Copy function address and TOC from ffi_closure_LINUX64 OPD. */ + memcpy (&tramp[0], (void **) ffi_closure_LINUX64, sizeof (void *)); + tramp[1] = codeloc; + memcpy (&tramp[2], (void **) ffi_closure_LINUX64 + 1, sizeof (void *)); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + + +int FFI_HIDDEN +ffi_closure_helper_LINUX64 (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pst, + ffi_dblfl *pfr, + float128 *pvec) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pst is the pointer to parameter save area + (r3-r10 are stored into its first 8 slots by ffi_closure_LINUX64) */ + /* pfr is the pointer to where f1-f13 are stored in ffi_closure_LINUX64 */ + /* pvec is the pointer to where v2-v13 are stored in ffi_closure_LINUX64 */ + + void **avalue; + ffi_type **arg_types; + unsigned long i, avn, nfixedargs; + ffi_dblfl *end_pfr = pfr + NUM_FPR_ARG_REGISTERS64; + float128 *end_pvec = pvec + NUM_VEC_ARG_REGISTERS64; + unsigned long align; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the + closure returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && (cif->flags & FLAG_RETURNS_SMST) == 0) + { + rvalue = (void *) *pst; + pst++; + } + + i = 0; + avn = cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((cif->flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = cif->nfixedargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + unsigned int elt, elnum; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 7; + pst++; + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 6; + pst++; + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; + pst++; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = arg_types[i]->alignment; + if (align > 16) + align = 16; + if (align > 1) + pst = (unsigned long *) FFI_ALIGN ((size_t) pst, align); + } + elt = discover_homogeneous_aggregate (cif->abi, arg_types[i], &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } to, from; + + /* Repackage the aggregate from its parts. The + aggregate size is not greater than the space taken by + the registers so store back to the register/parameter + save arrays. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (pvec + elnum <= end_pvec) + to.v = pvec; + else + to.v = pst; + } + else +#endif + if (pfr + elnum <= end_pfr) + to.v = pfr; + else + to.v = pst; + + avalue[i] = to.v; + from.ul = pst; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (pvec < end_pvec && i < nfixedargs) + memcpy (to.f128, pvec++, sizeof (float128)); + else + memcpy (to.f128, from.f128, sizeof (float128)); + to.f128++; + from.f128++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.f = (float) pfr->d; + pfr++; + } + else + *to.f = *from.f; + to.f++; + from.f++; + } + while (--elnum != 0); + } + else + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.d = pfr->d; + pfr++; + } + else + *to.d = *from.d; + to.d++; + from.d++; + } + while (--elnum != 0); + } +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if (arg_types[i]->size < 8) + avalue[i] = (char *) pst + 8 - arg_types[i]->size; + else +#endif + avalue[i] = pst; + } + pst += (arg_types[i]->size + 7) / 8; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (((unsigned long) pst & 0xF) != 0) + ++pst; + if (pvec < end_pvec && i < nfixedargs) + avalue[i] = pvec++; + else + avalue[i] = pst; + pst += 2; + break; + } + else if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + if (pfr + 1 < end_pfr && i + 1 < nfixedargs) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr && i < nfixedargs) + { + /* Passed partly in f13 and partly on the stack. + Move it all to the stack. */ + *pst = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pst; + } + pst += 2; + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + /* On the outgoing stack all values are aligned to 8 */ + /* there are 13 64bit floating point registers */ + + if (pfr < end_pfr && i < nfixedargs) + { + avalue[i] = pfr; + pfr++; + } + else + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + if (pfr < end_pfr && i < nfixedargs) + { + /* Float values are stored as doubles in the + ffi_closure_LINUX64 code. Fix them here. */ + pfr->f = (float) pfr->d; + avalue[i] = pfr; + pfr++; + } + else + { +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; +#else + avalue[i] = pst; +#endif + } + pst++; + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_LINUX64 how to perform return type promotions. */ + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + { + if ((cif->flags & (FLAG_RETURNS_FP | FLAG_RETURNS_VEC)) == 0) + return FFI_V2_TYPE_SMALL_STRUCT + cif->rtype->size - 1; + else if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR_HOMOG; + else if ((cif->flags & FLAG_RETURNS_64BITS) != 0) + return FFI_V2_TYPE_DOUBLE_HOMOG; + else + return FFI_V2_TYPE_FLOAT_HOMOG; + } + if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR; + return cif->rtype->type; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c new file mode 100644 index 0000000..4d50878 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c @@ -0,0 +1,1153 @@ +/* ----------------------------------------------------------------------- + ffi_linux64.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifdef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the LINUX64 ABI. */ +enum { + NUM_GPR_ARG_REGISTERS64 = 8, + NUM_FPR_ARG_REGISTERS64 = 13, + NUM_VEC_ARG_REGISTERS64 = 12, +}; +enum { ASM_NEEDS_REGISTERS64 = 4 }; + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_linux64 (ffi_abi abi) +{ + if ((abi & (FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128)) == FFI_LINUX) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + + +static unsigned int +discover_homogeneous_aggregate (ffi_abi abi, + const ffi_type *t, + unsigned int *elnum) +{ + switch (t->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 64-bit long doubles are equivalent to doubles. */ + if ((abi & FFI_LINUX_LONG_DOUBLE_128) == 0) + { + *elnum = 1; + return FFI_TYPE_DOUBLE; + } + /* IBM extended precision values use unaligned pairs + of FPRs, but according to the ABI must be considered + distinct from doubles. They are also limited to a + maximum of four members in a homogeneous aggregate. */ + else if ((abi & FFI_LINUX_LONG_DOUBLE_IEEE128) == 0) + { + *elnum = 2; + return FFI_TYPE_LONGDOUBLE; + } + /* Fall through. */ +#endif + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + *elnum = 1; + return (int) t->type; + + case FFI_TYPE_STRUCT:; + { + unsigned int base_elt = 0, total_elnum = 0; + ffi_type **el = t->elements; + while (*el) + { + unsigned int el_elt, el_elnum = 0; + el_elt = discover_homogeneous_aggregate (abi, *el, &el_elnum); + if (el_elt == 0 + || (base_elt && base_elt != el_elt)) + return 0; + base_elt = el_elt; + total_elnum += el_elnum; +#if _CALL_ELF == 2 + if (total_elnum > 8) + return 0; +#else + if (total_elnum > 1) + return 0; +#endif + el++; + } + *elnum = total_elnum; + return base_elt; + } + + default: + return 0; + } +} + + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_linux64_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fparg_count = 0, intarg_count = 0, vecarg_count = 0; + unsigned flags = cif->flags; + unsigned elt, elnum, rtype; + +#if FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE + /* If compiled without long double support... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0 || + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#elif !defined(__VEC__) + /* If compiled without vector register support (used by assembly)... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#else + /* If the IEEE128 flag is set, but long double is only 64 bits wide... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) == 0 && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#endif + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ +#if _CALL_ELF == 2 + /* Space for backchain, CR, LR, TOC and the asm's temp regs. */ + bytes = (4 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the general registers. */ + bytes += NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#else + /* Space for backchain, CR, LR, cc/ld doubleword, TOC and the asm's temp + regs. */ + bytes = (6 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the mandatory parm save area and general registers. */ + bytes += 2 * NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#endif + + /* Return value handling. */ + rtype = cif->rtype->type; +#if _CALL_ELF == 2 +homogeneous: +#endif + switch (rtype) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + flags |= FLAG_RETURNS_VEC; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if _CALL_ELF == 2 + elt = discover_homogeneous_aggregate (cif->abi, cif->rtype, &elnum); + if (elt) + { + flags |= FLAG_RETURNS_SMST; + rtype = elt; + goto homogeneous; + } + if (cif->rtype->size <= 16) + { + flags |= FLAG_RETURNS_SMST; + break; + } +#endif + intarg_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned int align; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count++; + /* Align to 16 bytes, plus the 16-byte argument. */ + intarg_count = (intarg_count + 3) & ~0x1; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + fparg_count++; + intarg_count++; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + fparg_count++; + intarg_count++; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + align = align / 8; + if (align > 1) + intarg_count = FFI_ALIGN (intarg_count, align); + } + intarg_count += ((*ptr)->size + 7) / 8; + elt = discover_homogeneous_aggregate (cif->abi, *ptr, &elnum); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count += elnum; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + else +#endif + if (elt) + { + fparg_count += elnum; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + else + { + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 8-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + default: + FFI_ASSERT (0); + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (intarg_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (vecarg_count != 0) + flags |= FLAG_VEC_ARGUMENTS; + + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS64 * sizeof (double); + /* Space for the vector registers, if needed, aligned to 16 bytes. */ + if (vecarg_count != 0) { + bytes = (bytes + 15) & ~0xF; + bytes += NUM_VEC_ARG_REGISTERS64 * sizeof (float128); + } + + /* Stack space. */ +#if _CALL_ELF == 2 + if ((flags & FLAG_ARG_NEEDS_PSAVE) != 0) + bytes += intarg_count * sizeof (long); +#else + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + bytes += (intarg_count - NUM_GPR_ARG_REGISTERS64) * sizeof (long); +#endif + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64 (ffi_cif *cif) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = cif->nargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; + return ffi_prep_cif_linux64_core (cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64_var (ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = nfixedargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; +#if _CALL_ELF == 2 + cif->flags |= FLAG_ARG_NEEDS_PSAVE; +#endif + return ffi_prep_cif_linux64_core (cif); +} + + +/* ffi_prep_args64 is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Ret addr from ffi_call_LINUX64 8bytes | higher addresses + |--------------------------------------------| + | CR save area 8bytes | + |--------------------------------------------| + | Previous backchain pointer 8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*8 | | ffi_call_LINUX64 + |--------------------------------------------| | + | GPR registers r3-r10 8*8 | | + |--------------------------------------------| | + | FPR registers f1-f13 (optional) 13*8 | | + |--------------------------------------------| | + | VEC registers v2-v13 (optional) 12*16 | | + |--------------------------------------------| | + | Parameter save area | | + |--------------------------------------------| | + | TOC save area 8 | | + |--------------------------------------------| | stack | + | Linker doubleword 8 | | grows | + |--------------------------------------------| | down V + | Compiler doubleword 8 | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 8 | | + |--------------------------------------------| | + | CR save area 8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 8 |-/ during + |--------------------------------------------| <<< ffi_call_LINUX64 + +*/ + +void FFI_HIDDEN +ffi_prep_args64 (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned long bytes = ecif->cif->bytes; + const unsigned long flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'next_arg' points at the space for gpr3, and grows upwards as + we use GPR registers, then continues at rest. */ + valp gpr_base; + valp gpr_end; + valp rest; + valp next_arg; + + /* 'fpr_base' points at the space for f1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + unsigned int fparg_count; + + /* 'vec_base' points at the space for v2, and grows upwards as + we use vector registers. */ + valp vec_base; + unsigned int vecarg_count; + + unsigned int i, words, nargs, nfixedargs; + ffi_type **ptr; + double double_tmp; + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + signed int **si; + unsigned int **ui; + unsigned long **ul; + float **f; + double **d; + float128 **f128; + } p_argv; + unsigned long gprvalue; + unsigned long align; + + stacktop.c = (char *) stack + bytes; + gpr_base.ul = stacktop.ul - ASM_NEEDS_REGISTERS64 - NUM_GPR_ARG_REGISTERS64; + gpr_end.ul = gpr_base.ul + NUM_GPR_ARG_REGISTERS64; +#if _CALL_ELF == 2 + rest.ul = stack + 4 + NUM_GPR_ARG_REGISTERS64; +#else + rest.ul = stack + 6 + NUM_GPR_ARG_REGISTERS64; +#endif + fpr_base.d = gpr_base.d - NUM_FPR_ARG_REGISTERS64; + fparg_count = 0; + /* Place the vector args below the FPRs, if used, else the GPRs. */ + if (ecif->cif->flags & FLAG_FP_ARGUMENTS) + vec_base.p = fpr_base.p & ~0xF; + else + vec_base.p = gpr_base.p; + vec_base.f128 -= NUM_VEC_ARG_REGISTERS64; + vecarg_count = 0; + next_arg.ul = gpr_base.ul; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_base.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_end.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) vec_base.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg.ul++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + nargs = ecif->cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = ecif->cif->nfixedargs; + for (ptr = ecif->cif->arg_types, i = 0; + i < nargs; + i++, ptr++, p_argv.v++) + { + unsigned int elt, elnum; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + next_arg.p = FFI_ALIGN (next_arg.p, 16); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 && i < nfixedargs) + memcpy (vec_base.f128++, *p_argv.f128, sizeof (float128)); + else + memcpy (next_arg.f128, *p_argv.f128, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 113); + FFI_ASSERT (flags & FLAG_VEC_ARGUMENTS); + break; + } + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + double_tmp = (*p_argv.d)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + double_tmp = (*p_argv.d)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 106); + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + double_tmp = **p_argv.d; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +#endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + double_tmp = **p_argv.f; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } +#endif + } + else + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_STRUCT: + if ((ecif->cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + if (align > 1) + { + next_arg.p = FFI_ALIGN (next_arg.p, align); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + elt = discover_homogeneous_aggregate (ecif->cif->abi, *ptr, &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + float *f; + double *d; + float128 *f128; + } arg; + + arg.v = *p_argv.v; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 + && i < nfixedargs) + memcpy (vec_base.f128++, arg.f128, sizeof (float128)); + else + memcpy (next_arg.f128, arg.f128++, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + double_tmp = *arg.f++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 + && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.f = (float) double_tmp; + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + fparg_count++; + } + while (--elnum != 0); + if ((next_arg.p & 7) != 0) + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + } + else + do + { + double_tmp = *arg.d++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.d = double_tmp; + if (++next_arg.d == gpr_end.d) + next_arg.d = rest.d; + fparg_count++; + } + while (--elnum != 0); +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { + words = ((*ptr)->size + 7) / 8; + if (next_arg.ul >= gpr_base.ul && next_arg.ul + words > gpr_end.ul) + { + size_t first = gpr_end.c - next_arg.c; + memcpy (next_arg.c, *p_argv.c, first); + memcpy (rest.c, *p_argv.c + first, (*ptr)->size - first); + next_arg.c = rest.c + words * 8 - first; + } + else + { + char *where = next_arg.c; + +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if ((*ptr)->size < 8) + where += 8 - (*ptr)->size; +#endif + memcpy (where, *p_argv.c, (*ptr)->size); + next_arg.ul += words; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + break; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + case FFI_TYPE_UINT32: + gprvalue = **p_argv.ui; + goto putgpr; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = **p_argv.si; + goto putgpr; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + gprvalue = **p_argv.ul; + putgpr: + *next_arg.ul++ = gprvalue; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + break; + } + } + + FFI_ASSERT (flags & FLAG_4_GPR_ARGUMENTS + || (next_arg.ul >= gpr_base.ul + && next_arg.ul <= gpr_base.ul + 4)); +} + + +#if _CALL_ELF == 2 +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} +#endif + + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_linux64 (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#if _CALL_ELF == 2 + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp[0] = 0xe96c0018; /* 0: ld 11,2f-0b(12) */ + tramp[1] = 0xe98c0010; /* ld 12,1f-0b(12) */ + tramp[2] = 0x7d8903a6; /* mtctr 12 */ + tramp[3] = 0x4e800420; /* bctr */ + /* 1: .quad function_addr */ + /* 2: .quad context */ + *(void **) &tramp[4] = (void *) ffi_closure_LINUX64; + *(void **) &tramp[6] = codeloc; + flush_icache ((char *) tramp, (char *) codeloc, 4 * 4); +#else + void **tramp = (void **) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* Copy function address and TOC from ffi_closure_LINUX64 OPD. */ + memcpy (&tramp[0], (void **) ffi_closure_LINUX64, sizeof (void *)); + tramp[1] = codeloc; + memcpy (&tramp[2], (void **) ffi_closure_LINUX64 + 1, sizeof (void *)); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + + +int FFI_HIDDEN +ffi_closure_helper_LINUX64 (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pst, + ffi_dblfl *pfr, + float128 *pvec) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pst is the pointer to parameter save area + (r3-r10 are stored into its first 8 slots by ffi_closure_LINUX64) */ + /* pfr is the pointer to where f1-f13 are stored in ffi_closure_LINUX64 */ + /* pvec is the pointer to where v2-v13 are stored in ffi_closure_LINUX64 */ + + void **avalue; + ffi_type **arg_types; + unsigned long i, avn, nfixedargs; + ffi_dblfl *end_pfr = pfr + NUM_FPR_ARG_REGISTERS64; + float128 *end_pvec = pvec + NUM_VEC_ARG_REGISTERS64; + unsigned long align; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the + closure returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && (cif->flags & FLAG_RETURNS_SMST) == 0) + { + rvalue = (void *) *pst; + pst++; + } + + i = 0; + avn = cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((cif->flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = cif->nfixedargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + unsigned int elt, elnum; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 7; + pst++; + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 6; + pst++; + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; + pst++; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = arg_types[i]->alignment; + if (align > 16) + align = 16; + if (align > 1) + pst = (unsigned long *) FFI_ALIGN ((size_t) pst, align); + } + elt = discover_homogeneous_aggregate (cif->abi, arg_types[i], &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } to, from; + + /* Repackage the aggregate from its parts. The + aggregate size is not greater than the space taken by + the registers so store back to the register/parameter + save arrays. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (pvec + elnum <= end_pvec) + to.v = pvec; + else + to.v = pst; + } + else +#endif + if (pfr + elnum <= end_pfr) + to.v = pfr; + else + to.v = pst; + + avalue[i] = to.v; + from.ul = pst; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (pvec < end_pvec && i < nfixedargs) + memcpy (to.f128, pvec++, sizeof (float128)); + else + memcpy (to.f128, from.f128, sizeof (float128)); + to.f128++; + from.f128++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.f = (float) pfr->d; + pfr++; + } + else + *to.f = *from.f; + to.f++; + from.f++; + } + while (--elnum != 0); + } + else + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.d = pfr->d; + pfr++; + } + else + *to.d = *from.d; + to.d++; + from.d++; + } + while (--elnum != 0); + } +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if (arg_types[i]->size < 8) + avalue[i] = (char *) pst + 8 - arg_types[i]->size; + else +#endif + avalue[i] = pst; + } + pst += (arg_types[i]->size + 7) / 8; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (((unsigned long) pst & 0xF) != 0) + ++pst; + if (pvec < end_pvec && i < nfixedargs) + avalue[i] = pvec++; + else + avalue[i] = pst; + pst += 2; + break; + } + else if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + if (pfr + 1 < end_pfr && i + 1 < nfixedargs) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr && i < nfixedargs) + { + /* Passed partly in f13 and partly on the stack. + Move it all to the stack. */ + *pst = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pst; + } + pst += 2; + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + /* On the outgoing stack all values are aligned to 8 */ + /* there are 13 64bit floating point registers */ + + if (pfr < end_pfr && i < nfixedargs) + { + avalue[i] = pfr; + pfr++; + } + else + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + if (pfr < end_pfr && i < nfixedargs) + { + /* Float values are stored as doubles in the + ffi_closure_LINUX64 code. Fix them here. */ + pfr->f = (float) pfr->d; + avalue[i] = pfr; + pfr++; + } + else + { +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; +#else + avalue[i] = pst; +#endif + } + pst++; + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_LINUX64 how to perform return type promotions. */ + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + { + if ((cif->flags & (FLAG_RETURNS_FP | FLAG_RETURNS_VEC)) == 0) + return FFI_V2_TYPE_SMALL_STRUCT + cif->rtype->size - 1; + else if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR_HOMOG; + else if ((cif->flags & FLAG_RETURNS_64BITS) != 0) + return FFI_V2_TYPE_DOUBLE_HOMOG; + else + return FFI_V2_TYPE_FLOAT_HOMOG; + } + if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR; + return cif->rtype->type; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc 2.h new file mode 100644 index 0000000..8e2f2f0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc 2.h @@ -0,0 +1,105 @@ +/* ----------------------------------------------------------------------- + ffi_powerpc.h - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +enum { + /* The assembly depends on these exact flags. */ + /* These go in cr7 */ + FLAG_RETURNS_SMST = 1 << (31-31), /* Used for FFI_SYSV small structs. */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_VEC = 1 << (31-28), + + /* These go in cr6 */ + FLAG_RETURNS_64BITS = 1 << (31-27), + FLAG_RETURNS_128BITS = 1 << (31-26), + + FLAG_COMPAT = 1 << (31- 8), /* Not used by assembly */ + + /* These go in cr1 */ + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), /* Used by sysv code */ + FLAG_ARG_NEEDS_PSAVE = FLAG_ARG_NEEDS_COPY, /* Used by linux64 code */ + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4), + FLAG_VEC_ARGUMENTS = 1 << (31- 3), +}; + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +#if defined(__FLOAT128_TYPE__) +typedef _Float128 float128; +#elif defined(__FLOAT128__) +typedef __float128 float128; +#else +typedef char float128[16] __attribute__((aligned(16))); +#endif + +void FFI_HIDDEN ffi_closure_SYSV (void); +void FFI_HIDDEN ffi_go_closure_sysv (void); +void FFI_HIDDEN ffi_call_SYSV(extended_cif *, void (*)(void), void *, + unsigned, void *, int); + +void FFI_HIDDEN ffi_prep_types_sysv (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_sysv (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_sysv (ffi_closure *, + ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_SYSV (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, unsigned long *, + ffi_dblfl *, unsigned long *); + +void FFI_HIDDEN ffi_call_LINUX64(extended_cif *, void (*) (void), void *, + unsigned long, void *, long); +void FFI_HIDDEN ffi_closure_LINUX64 (void); +void FFI_HIDDEN ffi_go_closure_linux64 (void); + +void FFI_HIDDEN ffi_prep_types_linux64 (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64 (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64_var (ffi_cif *, unsigned int, + unsigned int); +void FFI_HIDDEN ffi_prep_args64 (extended_cif *, unsigned long *const); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_linux64 (ffi_closure *, ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_LINUX64 (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, + unsigned long *, ffi_dblfl *, + float128 *); diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h new file mode 100644 index 0000000..8e2f2f0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h @@ -0,0 +1,105 @@ +/* ----------------------------------------------------------------------- + ffi_powerpc.h - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +enum { + /* The assembly depends on these exact flags. */ + /* These go in cr7 */ + FLAG_RETURNS_SMST = 1 << (31-31), /* Used for FFI_SYSV small structs. */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_VEC = 1 << (31-28), + + /* These go in cr6 */ + FLAG_RETURNS_64BITS = 1 << (31-27), + FLAG_RETURNS_128BITS = 1 << (31-26), + + FLAG_COMPAT = 1 << (31- 8), /* Not used by assembly */ + + /* These go in cr1 */ + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), /* Used by sysv code */ + FLAG_ARG_NEEDS_PSAVE = FLAG_ARG_NEEDS_COPY, /* Used by linux64 code */ + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4), + FLAG_VEC_ARGUMENTS = 1 << (31- 3), +}; + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +#if defined(__FLOAT128_TYPE__) +typedef _Float128 float128; +#elif defined(__FLOAT128__) +typedef __float128 float128; +#else +typedef char float128[16] __attribute__((aligned(16))); +#endif + +void FFI_HIDDEN ffi_closure_SYSV (void); +void FFI_HIDDEN ffi_go_closure_sysv (void); +void FFI_HIDDEN ffi_call_SYSV(extended_cif *, void (*)(void), void *, + unsigned, void *, int); + +void FFI_HIDDEN ffi_prep_types_sysv (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_sysv (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_sysv (ffi_closure *, + ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_SYSV (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, unsigned long *, + ffi_dblfl *, unsigned long *); + +void FFI_HIDDEN ffi_call_LINUX64(extended_cif *, void (*) (void), void *, + unsigned long, void *, long); +void FFI_HIDDEN ffi_closure_LINUX64 (void); +void FFI_HIDDEN ffi_go_closure_linux64 (void); + +void FFI_HIDDEN ffi_prep_types_linux64 (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64 (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64_var (ffi_cif *, unsigned int, + unsigned int); +void FFI_HIDDEN ffi_prep_args64 (extended_cif *, unsigned long *const); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_linux64 (ffi_closure *, ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_LINUX64 (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, + unsigned long *, ffi_dblfl *, + float128 *); diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv 2.c new file mode 100644 index 0000000..4078e75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv 2.c @@ -0,0 +1,923 @@ +/* ----------------------------------------------------------------------- + ffi_sysv.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifndef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the SYSV ABI. */ +#define ASM_NEEDS_REGISTERS 6 +#define NUM_GPR_ARG_REGISTERS 8 +#define NUM_FPR_ARG_REGISTERS 8 + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_sysv (ffi_abi abi) +{ + if ((abi & (FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128)) == FFI_SYSV) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + +/* Transform long double, double and float to other types as per abi. */ +static int +translate_float (int abi, int type) +{ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (type == FFI_TYPE_LONGDOUBLE + && (abi & FFI_SYSV_LONG_DOUBLE_128) == 0) + type = FFI_TYPE_DOUBLE; +#endif + if ((abi & FFI_SYSV_SOFT_FLOAT) != 0) + { + if (type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + else if (type == FFI_TYPE_DOUBLE) + type = FFI_TYPE_UINT64; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + else if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_UINT128; + } + else if ((abi & FFI_SYSV_IBM_LONG_DOUBLE) == 0) + { + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + } + return type; +} + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_sysv_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fpr_count = 0, gpr_count = 0, stack_count = 0; + unsigned flags = cif->flags; + unsigned struct_copy_size = 0; + unsigned type = cif->rtype->type; + unsigned size = cif->rtype->size; + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ + + /* Space for the frame pointer, callee's LR, and the asm's temp regs. */ + bytes = (2 + ASM_NEEDS_REGISTERS) * sizeof (int); + + /* Space for the GPR registers. */ + bytes += NUM_GPR_ARG_REGISTERS * sizeof (int); + + /* Return value handling. The rules for SYSV are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - Structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values and structures between 5 and 8 bytes are returned + in gpr3 and gpr4; + - Larger structures are allocated space and a pointer is passed as + the first argument. + - Single/double FP values are returned in fpr1; + - long doubles (if not equivalent to double) are returned in + fpr1,fpr2 for Linux and as for large structs for SysV. */ + + type = translate_float (cif->abi, type); + + switch (type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead + returns them in memory. */ + if ((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + { + flags |= FLAG_RETURNS_SMST; + break; + } + gpr_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. Structures and long doubles (if not equivalent + to double) are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned short typenum = (*ptr)->type; + + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS - 1) + { + fpr_count = NUM_FPR_ARG_REGISTERS; + /* 8-byte align long doubles. */ + stack_count += stack_count & 1; + stack_count += 4; + } + else + fpr_count += 2; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; +#endif + + case FFI_TYPE_DOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + { + /* 8-byte align doubles. */ + stack_count += stack_count & 1; + stack_count += 2; + } + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_FLOAT: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + /* Yes, we don't follow the ABI, but neither does gcc. */ + stack_count += 1; + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + /* A long double in FFI_LINUX_SOFT_FLOAT can use only a set + of four consecutive gprs. If we do not have enough, we + have to adjust the gpr_count value. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS - 3) + gpr_count = NUM_GPR_ARG_REGISTERS; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 4; + else + gpr_count += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. + + Also, only certain register pairs can be used for + passing long long int -- specifically (r3,r4), (r5,r6), + (r7,r8), (r9,r10). */ + gpr_count += gpr_count & 1; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + { + stack_count += stack_count & 1; + stack_count += 2; + } + else + gpr_count += 2; + break; + + case FFI_TYPE_STRUCT: + /* We must allocate space for a copy of these to enforce + pass-by-value. Pad the space up to a multiple of 16 + bytes (the maximum alignment required for anything under + the SYSV ABI). */ + struct_copy_size += ((*ptr)->size + 15) & ~0xF; + /* Fall through (allocate space for the pointer). */ + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 1; + else + gpr_count += 1; + break; + + default: + FFI_ASSERT (0); + } + } + + if (fpr_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (gpr_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (struct_copy_size != 0) + flags |= FLAG_ARG_NEEDS_COPY; + + /* Space for the FPR registers, if needed. */ + if (fpr_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof (double); + + /* Stack space. */ + bytes += stack_count * sizeof (int); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + /* Add in the space for the copied structures. */ + bytes += struct_copy_size; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_sysv (ffi_cif *cif) +{ + if ((cif->abi & FFI_SYSV) == 0) + { + /* This call is from old code. Translate to new ABI values. */ + cif->flags |= FLAG_COMPAT; + switch (cif->abi) + { + default: + return FFI_BAD_ABI; + + case FFI_COMPAT_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_STRUCT_RET | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_GCC_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_LINUX: + cif->abi = (FFI_SYSV | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + + case FFI_COMPAT_LINUX_SOFT_FLOAT: + cif->abi = (FFI_SYSV | FFI_SYSV_SOFT_FLOAT | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + } + } + return ffi_prep_cif_sysv_core (cif); +} + +/* ffi_prep_args_SYSV is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Return address from ffi_call_SYSV 4bytes | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*4 | | ffi_call_SYSV + |--------------------------------------------| | + | GPR registers r3-r10 8*4 | | ffi_call_SYSV + |--------------------------------------------| | + | FPR registers f1-f8 (optional) 8*8 | | + |--------------------------------------------| | stack | + | Space for copied structures | | grows | + |--------------------------------------------| | down V + | Parameters that didn't fit in registers | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 4 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4 |-/ during + |--------------------------------------------| <<< ffi_call_SYSV + +*/ + +void FFI_HIDDEN +ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned *u; + long long *ll; + float *f; + double *d; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'gpr_base' points at the space for gpr3, and grows upwards as + we use GPR registers. */ + valp gpr_base; + valp gpr_end; + +#ifndef __NO_FPRS__ + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + valp fpr_end; +#endif + + /* 'copy_space' grows down as we put structures in it. It should + stay 16-byte aligned. */ + valp copy_space; + + /* 'next_arg' grows up as we put parameters in it. */ + valp next_arg; + + int i; + ffi_type **ptr; +#ifndef __NO_FPRS__ + double double_tmp; +#endif + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **ui; + long long **ll; + float **f; + double **d; + } p_argv; + size_t struct_copy_size; + unsigned gprvalue; + + stacktop.c = (char *) stack + bytes; + gpr_end.u = stacktop.u - ASM_NEEDS_REGISTERS; + gpr_base.u = gpr_end.u - NUM_GPR_ARG_REGISTERS; +#ifndef __NO_FPRS__ + fpr_end.d = gpr_base.d; + fpr_base.d = fpr_end.d - NUM_FPR_ARG_REGISTERS; + copy_space.c = ((flags & FLAG_FP_ARGUMENTS) ? fpr_base.c : gpr_base.c); +#else + copy_space.c = gpr_base.c; +#endif + next_arg.u = stack + 2; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) copy_space.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + FFI_ASSERT (copy_space.c >= next_arg.c); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *gpr_base.u++ = (unsigned) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + for (ptr = ecif->cif->arg_types, i = ecif->cif->nargs; + i > 0; + i--, ptr++, p_argv.v++) + { + unsigned int typenum = (*ptr)->type; + + typenum = translate_float (ecif->cif->abi, typenum); + + /* Now test the translated value */ + switch (typenum) + { +#ifndef __NO_FPRS__ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + double_tmp = (*p_argv.d)[0]; + + if (fpr_base.d >= fpr_end.d - 1) + { + fpr_base.d = fpr_end.d; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + double_tmp = (*p_argv.d)[1]; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + { + *fpr_base.d++ = double_tmp; + double_tmp = (*p_argv.d)[1]; + *fpr_base.d++ = double_tmp; + } + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +# endif + case FFI_TYPE_DOUBLE: + double_tmp = **p_argv.d; + + if (fpr_base.d >= fpr_end.d) + { + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: + double_tmp = **p_argv.f; + if (fpr_base.d >= fpr_end.d) + { + *next_arg.f = (float) double_tmp; + next_arg.u += 1; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +#endif /* have FPRs */ + + case FFI_TYPE_UINT128: + /* The soft float ABI for long doubles works like this, a long double + is passed in four consecutive GPRs if available. A maximum of 2 + long doubles can be passed in gprs. If we do not have 4 GPRs + left, the long double is passed on the stack, 4-byte aligned. */ + if (gpr_base.u >= gpr_end.u - 3) + { + unsigned int ii; + gpr_base.u = gpr_end.u; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *next_arg.u++ = int_tmp; + } + } + else + { + unsigned int ii; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *gpr_base.u++ = int_tmp; + } + } + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (gpr_base.u >= gpr_end.u - 1) + { + gpr_base.u = gpr_end.u; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u++; + *next_arg.ll = **p_argv.ll; + next_arg.u += 2; + } + else + { + /* The abi states only certain register pairs can be + used for passing long long int specifically (r3,r4), + (r5,r6), (r7,r8), (r9,r10). If next arg is long long + but not correct starting register of pair then skip + until the proper starting register. */ + if (((gpr_end.u - gpr_base.u) & 1) != 0) + gpr_base.u++; + *gpr_base.ll++ = **p_argv.ll; + } + break; + + case FFI_TYPE_STRUCT: + struct_copy_size = ((*ptr)->size + 15) & ~0xF; + copy_space.c -= struct_copy_size; + memcpy (copy_space.c, *p_argv.c, (*ptr)->size); + + gprvalue = (unsigned long) copy_space.c; + + FFI_ASSERT (copy_space.c > next_arg.c); + FFI_ASSERT (flags & FLAG_ARG_NEEDS_COPY); + goto putgpr; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + + gprvalue = **p_argv.ui; + + putgpr: + if (gpr_base.u >= gpr_end.u) + *next_arg.u++ = gprvalue; + else + *gpr_base.u++ = gprvalue; + break; + } + } + + /* Check that we didn't overrun the stack... */ + FFI_ASSERT (copy_space.c >= next_arg.c); + FFI_ASSERT (gpr_base.u <= gpr_end.u); +#ifndef __NO_FPRS__ + FFI_ASSERT (fpr_base.u <= fpr_end.u); +#endif + FFI_ASSERT (((flags & FLAG_4_GPR_ARGUMENTS) != 0) + == (gpr_end.u - gpr_base.u < 4)); +} + +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_sysv (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi < FFI_SYSV || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0005; /* bcl 20,31,.+4 */ + tramp[2] = 0x7d6802a6; /* mflr r11 */ + tramp[3] = 0x7c0803a6; /* mtlr r0 */ + tramp[4] = 0x800b0018; /* lwz r0,24(r11) */ + tramp[5] = 0x816b001c; /* lwz r11,28(r11) */ + tramp[6] = 0x7c0903a6; /* mtctr r0 */ + tramp[7] = 0x4e800420; /* bctr */ + *(void **) &tramp[8] = (void *) ffi_closure_SYSV; /* function */ + *(void **) &tramp[9] = codeloc; /* context */ + + /* Flush the icache. */ + flush_icache ((char *)tramp, (char *)codeloc, 8 * 4); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_SYSV invokes the + following helper function to do most of the work. */ + +int +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pgr, + ffi_dblfl *pfr, + unsigned long *pst) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pgr is the pointer to where r3-r10 are stored in ffi_closure_SYSV */ + /* pfr is the pointer to where f1-f8 are stored in ffi_closure_SYSV */ + /* pst is the pointer to outgoing parameter stack in original caller */ + + void ** avalue; + ffi_type ** arg_types; + long i, avn; +#ifndef __NO_FPRS__ + long nf = 0; /* number of floating registers already used */ +#endif + long ng = 0; /* number of general registers already used */ + + unsigned size = cif->rtype->size; + unsigned short rtypenum = cif->rtype->type; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* First translate for softfloat/nonlinux */ + rtypenum = translate_float (cif->abi, rtypenum); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. + For FFI_SYSV the result is passed in r3/r4 if the struct size is less + or equal 8 bytes. */ + if (rtypenum == FFI_TYPE_STRUCT + && !((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8)) + { + rvalue = (void *) *pgr; + ng++; + pgr++; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) { + unsigned short typenum = arg_types[i]->type; + + /* We may need to handle some values depending on ABI. */ + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#ifndef __NO_FPRS__ + case FFI_TYPE_FLOAT: + /* Unfortunately float values are stored as doubles + in the ffi_closure_SYSV code (since we don't check + the type in that routine). */ + if (nf < NUM_FPR_ARG_REGISTERS) + { + /* FIXME? here we are really changing the values + stored in the original calling routines outgoing + parameter stack. This is probably a really + naughty thing to do but... */ + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + avalue[i] = pst; + pst += 1; + } + break; + + case FFI_TYPE_DOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS) + { + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + } + break; + +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS - 1) + { + avalue[i] = pfr; + pfr += 2; + nf += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 4; + nf = 8; + } + break; +# endif +#endif + + case FFI_TYPE_UINT128: + /* Test if for the whole long double, 4 gprs are available. + otherwise the stuff ends up on the stack. */ + if (ng < NUM_GPR_ARG_REGISTERS - 3) + { + avalue[i] = pgr; + pgr += 4; + ng += 4; + } + else + { + avalue[i] = pst; + pst += 4; + ng = 8+4; + } + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 3; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 3; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 2; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 2; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = pgr; + ng++; + pgr++; + } + else + { + avalue[i] = pst; + pst++; + } + break; + + case FFI_TYPE_STRUCT: + /* Structs are passed by reference. The address will appear in a + gpr if it is one of the first 8 arguments. */ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (void *) *pgr; + ng++; + pgr++; + } + else + { + avalue[i] = (void *) *pst; + pst++; + } + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passing long long ints are complex, they must + be passed in suitable register pairs such as + (r3,r4) or (r5,r6) or (r6,r7), or (r7,r8) or (r9,r10) + and if the entire pair aren't available then the outgoing + parameter stack is used for both but an alignment of 8 + must will be kept. So we must either look in pgr + or pst to find the correct address for this type + of parameter. */ + if (ng < NUM_GPR_ARG_REGISTERS - 1) + { + if (ng & 1) + { + /* skip r4, r6, r8 as starting points */ + ng++; + pgr++; + } + avalue[i] = pgr; + ng += 2; + pgr += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + ng = NUM_GPR_ARG_REGISTERS; + } + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. + Because the FFI_SYSV ABI returns the structures <= 8 bytes in + r3/r4 we have to tell ffi_closure_SYSV how to treat them. We + combine the base type FFI_SYSV_TYPE_SMALL_STRUCT with the size of + the struct less one. We never have a struct with size zero. + See the comment in ffitarget.h about ordering. */ + if (rtypenum == FFI_TYPE_STRUCT + && (cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + return FFI_SYSV_TYPE_SMALL_STRUCT - 1 + size; + return rtypenum; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c new file mode 100644 index 0000000..4078e75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c @@ -0,0 +1,923 @@ +/* ----------------------------------------------------------------------- + ffi_sysv.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifndef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the SYSV ABI. */ +#define ASM_NEEDS_REGISTERS 6 +#define NUM_GPR_ARG_REGISTERS 8 +#define NUM_FPR_ARG_REGISTERS 8 + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_sysv (ffi_abi abi) +{ + if ((abi & (FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128)) == FFI_SYSV) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + +/* Transform long double, double and float to other types as per abi. */ +static int +translate_float (int abi, int type) +{ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (type == FFI_TYPE_LONGDOUBLE + && (abi & FFI_SYSV_LONG_DOUBLE_128) == 0) + type = FFI_TYPE_DOUBLE; +#endif + if ((abi & FFI_SYSV_SOFT_FLOAT) != 0) + { + if (type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + else if (type == FFI_TYPE_DOUBLE) + type = FFI_TYPE_UINT64; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + else if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_UINT128; + } + else if ((abi & FFI_SYSV_IBM_LONG_DOUBLE) == 0) + { + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + } + return type; +} + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_sysv_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fpr_count = 0, gpr_count = 0, stack_count = 0; + unsigned flags = cif->flags; + unsigned struct_copy_size = 0; + unsigned type = cif->rtype->type; + unsigned size = cif->rtype->size; + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ + + /* Space for the frame pointer, callee's LR, and the asm's temp regs. */ + bytes = (2 + ASM_NEEDS_REGISTERS) * sizeof (int); + + /* Space for the GPR registers. */ + bytes += NUM_GPR_ARG_REGISTERS * sizeof (int); + + /* Return value handling. The rules for SYSV are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - Structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values and structures between 5 and 8 bytes are returned + in gpr3 and gpr4; + - Larger structures are allocated space and a pointer is passed as + the first argument. + - Single/double FP values are returned in fpr1; + - long doubles (if not equivalent to double) are returned in + fpr1,fpr2 for Linux and as for large structs for SysV. */ + + type = translate_float (cif->abi, type); + + switch (type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead + returns them in memory. */ + if ((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + { + flags |= FLAG_RETURNS_SMST; + break; + } + gpr_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. Structures and long doubles (if not equivalent + to double) are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned short typenum = (*ptr)->type; + + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS - 1) + { + fpr_count = NUM_FPR_ARG_REGISTERS; + /* 8-byte align long doubles. */ + stack_count += stack_count & 1; + stack_count += 4; + } + else + fpr_count += 2; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; +#endif + + case FFI_TYPE_DOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + { + /* 8-byte align doubles. */ + stack_count += stack_count & 1; + stack_count += 2; + } + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_FLOAT: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + /* Yes, we don't follow the ABI, but neither does gcc. */ + stack_count += 1; + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + /* A long double in FFI_LINUX_SOFT_FLOAT can use only a set + of four consecutive gprs. If we do not have enough, we + have to adjust the gpr_count value. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS - 3) + gpr_count = NUM_GPR_ARG_REGISTERS; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 4; + else + gpr_count += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. + + Also, only certain register pairs can be used for + passing long long int -- specifically (r3,r4), (r5,r6), + (r7,r8), (r9,r10). */ + gpr_count += gpr_count & 1; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + { + stack_count += stack_count & 1; + stack_count += 2; + } + else + gpr_count += 2; + break; + + case FFI_TYPE_STRUCT: + /* We must allocate space for a copy of these to enforce + pass-by-value. Pad the space up to a multiple of 16 + bytes (the maximum alignment required for anything under + the SYSV ABI). */ + struct_copy_size += ((*ptr)->size + 15) & ~0xF; + /* Fall through (allocate space for the pointer). */ + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 1; + else + gpr_count += 1; + break; + + default: + FFI_ASSERT (0); + } + } + + if (fpr_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (gpr_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (struct_copy_size != 0) + flags |= FLAG_ARG_NEEDS_COPY; + + /* Space for the FPR registers, if needed. */ + if (fpr_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof (double); + + /* Stack space. */ + bytes += stack_count * sizeof (int); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + /* Add in the space for the copied structures. */ + bytes += struct_copy_size; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_sysv (ffi_cif *cif) +{ + if ((cif->abi & FFI_SYSV) == 0) + { + /* This call is from old code. Translate to new ABI values. */ + cif->flags |= FLAG_COMPAT; + switch (cif->abi) + { + default: + return FFI_BAD_ABI; + + case FFI_COMPAT_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_STRUCT_RET | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_GCC_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_LINUX: + cif->abi = (FFI_SYSV | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + + case FFI_COMPAT_LINUX_SOFT_FLOAT: + cif->abi = (FFI_SYSV | FFI_SYSV_SOFT_FLOAT | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + } + } + return ffi_prep_cif_sysv_core (cif); +} + +/* ffi_prep_args_SYSV is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Return address from ffi_call_SYSV 4bytes | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*4 | | ffi_call_SYSV + |--------------------------------------------| | + | GPR registers r3-r10 8*4 | | ffi_call_SYSV + |--------------------------------------------| | + | FPR registers f1-f8 (optional) 8*8 | | + |--------------------------------------------| | stack | + | Space for copied structures | | grows | + |--------------------------------------------| | down V + | Parameters that didn't fit in registers | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 4 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4 |-/ during + |--------------------------------------------| <<< ffi_call_SYSV + +*/ + +void FFI_HIDDEN +ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned *u; + long long *ll; + float *f; + double *d; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'gpr_base' points at the space for gpr3, and grows upwards as + we use GPR registers. */ + valp gpr_base; + valp gpr_end; + +#ifndef __NO_FPRS__ + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + valp fpr_end; +#endif + + /* 'copy_space' grows down as we put structures in it. It should + stay 16-byte aligned. */ + valp copy_space; + + /* 'next_arg' grows up as we put parameters in it. */ + valp next_arg; + + int i; + ffi_type **ptr; +#ifndef __NO_FPRS__ + double double_tmp; +#endif + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **ui; + long long **ll; + float **f; + double **d; + } p_argv; + size_t struct_copy_size; + unsigned gprvalue; + + stacktop.c = (char *) stack + bytes; + gpr_end.u = stacktop.u - ASM_NEEDS_REGISTERS; + gpr_base.u = gpr_end.u - NUM_GPR_ARG_REGISTERS; +#ifndef __NO_FPRS__ + fpr_end.d = gpr_base.d; + fpr_base.d = fpr_end.d - NUM_FPR_ARG_REGISTERS; + copy_space.c = ((flags & FLAG_FP_ARGUMENTS) ? fpr_base.c : gpr_base.c); +#else + copy_space.c = gpr_base.c; +#endif + next_arg.u = stack + 2; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) copy_space.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + FFI_ASSERT (copy_space.c >= next_arg.c); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *gpr_base.u++ = (unsigned) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + for (ptr = ecif->cif->arg_types, i = ecif->cif->nargs; + i > 0; + i--, ptr++, p_argv.v++) + { + unsigned int typenum = (*ptr)->type; + + typenum = translate_float (ecif->cif->abi, typenum); + + /* Now test the translated value */ + switch (typenum) + { +#ifndef __NO_FPRS__ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + double_tmp = (*p_argv.d)[0]; + + if (fpr_base.d >= fpr_end.d - 1) + { + fpr_base.d = fpr_end.d; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + double_tmp = (*p_argv.d)[1]; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + { + *fpr_base.d++ = double_tmp; + double_tmp = (*p_argv.d)[1]; + *fpr_base.d++ = double_tmp; + } + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +# endif + case FFI_TYPE_DOUBLE: + double_tmp = **p_argv.d; + + if (fpr_base.d >= fpr_end.d) + { + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: + double_tmp = **p_argv.f; + if (fpr_base.d >= fpr_end.d) + { + *next_arg.f = (float) double_tmp; + next_arg.u += 1; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +#endif /* have FPRs */ + + case FFI_TYPE_UINT128: + /* The soft float ABI for long doubles works like this, a long double + is passed in four consecutive GPRs if available. A maximum of 2 + long doubles can be passed in gprs. If we do not have 4 GPRs + left, the long double is passed on the stack, 4-byte aligned. */ + if (gpr_base.u >= gpr_end.u - 3) + { + unsigned int ii; + gpr_base.u = gpr_end.u; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *next_arg.u++ = int_tmp; + } + } + else + { + unsigned int ii; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *gpr_base.u++ = int_tmp; + } + } + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (gpr_base.u >= gpr_end.u - 1) + { + gpr_base.u = gpr_end.u; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u++; + *next_arg.ll = **p_argv.ll; + next_arg.u += 2; + } + else + { + /* The abi states only certain register pairs can be + used for passing long long int specifically (r3,r4), + (r5,r6), (r7,r8), (r9,r10). If next arg is long long + but not correct starting register of pair then skip + until the proper starting register. */ + if (((gpr_end.u - gpr_base.u) & 1) != 0) + gpr_base.u++; + *gpr_base.ll++ = **p_argv.ll; + } + break; + + case FFI_TYPE_STRUCT: + struct_copy_size = ((*ptr)->size + 15) & ~0xF; + copy_space.c -= struct_copy_size; + memcpy (copy_space.c, *p_argv.c, (*ptr)->size); + + gprvalue = (unsigned long) copy_space.c; + + FFI_ASSERT (copy_space.c > next_arg.c); + FFI_ASSERT (flags & FLAG_ARG_NEEDS_COPY); + goto putgpr; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + + gprvalue = **p_argv.ui; + + putgpr: + if (gpr_base.u >= gpr_end.u) + *next_arg.u++ = gprvalue; + else + *gpr_base.u++ = gprvalue; + break; + } + } + + /* Check that we didn't overrun the stack... */ + FFI_ASSERT (copy_space.c >= next_arg.c); + FFI_ASSERT (gpr_base.u <= gpr_end.u); +#ifndef __NO_FPRS__ + FFI_ASSERT (fpr_base.u <= fpr_end.u); +#endif + FFI_ASSERT (((flags & FLAG_4_GPR_ARGUMENTS) != 0) + == (gpr_end.u - gpr_base.u < 4)); +} + +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_sysv (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi < FFI_SYSV || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0005; /* bcl 20,31,.+4 */ + tramp[2] = 0x7d6802a6; /* mflr r11 */ + tramp[3] = 0x7c0803a6; /* mtlr r0 */ + tramp[4] = 0x800b0018; /* lwz r0,24(r11) */ + tramp[5] = 0x816b001c; /* lwz r11,28(r11) */ + tramp[6] = 0x7c0903a6; /* mtctr r0 */ + tramp[7] = 0x4e800420; /* bctr */ + *(void **) &tramp[8] = (void *) ffi_closure_SYSV; /* function */ + *(void **) &tramp[9] = codeloc; /* context */ + + /* Flush the icache. */ + flush_icache ((char *)tramp, (char *)codeloc, 8 * 4); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_SYSV invokes the + following helper function to do most of the work. */ + +int +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pgr, + ffi_dblfl *pfr, + unsigned long *pst) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pgr is the pointer to where r3-r10 are stored in ffi_closure_SYSV */ + /* pfr is the pointer to where f1-f8 are stored in ffi_closure_SYSV */ + /* pst is the pointer to outgoing parameter stack in original caller */ + + void ** avalue; + ffi_type ** arg_types; + long i, avn; +#ifndef __NO_FPRS__ + long nf = 0; /* number of floating registers already used */ +#endif + long ng = 0; /* number of general registers already used */ + + unsigned size = cif->rtype->size; + unsigned short rtypenum = cif->rtype->type; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* First translate for softfloat/nonlinux */ + rtypenum = translate_float (cif->abi, rtypenum); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. + For FFI_SYSV the result is passed in r3/r4 if the struct size is less + or equal 8 bytes. */ + if (rtypenum == FFI_TYPE_STRUCT + && !((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8)) + { + rvalue = (void *) *pgr; + ng++; + pgr++; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) { + unsigned short typenum = arg_types[i]->type; + + /* We may need to handle some values depending on ABI. */ + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#ifndef __NO_FPRS__ + case FFI_TYPE_FLOAT: + /* Unfortunately float values are stored as doubles + in the ffi_closure_SYSV code (since we don't check + the type in that routine). */ + if (nf < NUM_FPR_ARG_REGISTERS) + { + /* FIXME? here we are really changing the values + stored in the original calling routines outgoing + parameter stack. This is probably a really + naughty thing to do but... */ + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + avalue[i] = pst; + pst += 1; + } + break; + + case FFI_TYPE_DOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS) + { + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + } + break; + +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS - 1) + { + avalue[i] = pfr; + pfr += 2; + nf += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 4; + nf = 8; + } + break; +# endif +#endif + + case FFI_TYPE_UINT128: + /* Test if for the whole long double, 4 gprs are available. + otherwise the stuff ends up on the stack. */ + if (ng < NUM_GPR_ARG_REGISTERS - 3) + { + avalue[i] = pgr; + pgr += 4; + ng += 4; + } + else + { + avalue[i] = pst; + pst += 4; + ng = 8+4; + } + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 3; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 3; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 2; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 2; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = pgr; + ng++; + pgr++; + } + else + { + avalue[i] = pst; + pst++; + } + break; + + case FFI_TYPE_STRUCT: + /* Structs are passed by reference. The address will appear in a + gpr if it is one of the first 8 arguments. */ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (void *) *pgr; + ng++; + pgr++; + } + else + { + avalue[i] = (void *) *pst; + pst++; + } + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passing long long ints are complex, they must + be passed in suitable register pairs such as + (r3,r4) or (r5,r6) or (r6,r7), or (r7,r8) or (r9,r10) + and if the entire pair aren't available then the outgoing + parameter stack is used for both but an alignment of 8 + must will be kept. So we must either look in pgr + or pst to find the correct address for this type + of parameter. */ + if (ng < NUM_GPR_ARG_REGISTERS - 1) + { + if (ng & 1) + { + /* skip r4, r6, r8 as starting points */ + ng++; + pgr++; + } + avalue[i] = pgr; + ng += 2; + pgr += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + ng = NUM_GPR_ARG_REGISTERS; + } + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. + Because the FFI_SYSV ABI returns the structures <= 8 bytes in + r3/r4 we have to tell ffi_closure_SYSV how to treat them. We + combine the base type FFI_SYSV_TYPE_SMALL_STRUCT with the size of + the struct less one. We never have a struct with size zero. + See the comment in ffitarget.h about ordering. */ + if (rtypenum == FFI_TYPE_STRUCT + && (cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + return FFI_SYSV_TYPE_SMALL_STRUCT - 1 + size; + return rtypenum; +} +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget 2.h new file mode 100644 index 0000000..7fb9a93 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget 2.h @@ -0,0 +1,204 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for PowerPC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined (POWERPC) && defined (__powerpc64__) /* linux64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#ifndef POWERPC_DARWIN64 +#define POWERPC_DARWIN64 +#endif +#elif defined (POWERPC_AIX) && defined (__64BIT__) /* AIX64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#if defined (POWERPC_AIX) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_AIX, + FFI_LAST_ABI + +#elif defined (POWERPC_DARWIN) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_DARWIN, + FFI_LAST_ABI + +#else + /* The FFI_COMPAT values are used by old code. Since libffi may be + a shared library we have to support old values for backwards + compatibility. */ + FFI_COMPAT_SYSV, + FFI_COMPAT_GCC_SYSV, + FFI_COMPAT_LINUX64, + FFI_COMPAT_LINUX, + FFI_COMPAT_LINUX_SOFT_FLOAT, + +# if defined (POWERPC64) + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 64-bit linux. We + only need worry about FFI_COMPAT_LINUX64, but to be safe avoid + all old values. */ + FFI_LINUX = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_LINUX_STRUCT_ALIGN = 1, + FFI_LINUX_LONG_DOUBLE_128 = 2, + FFI_LINUX_LONG_DOUBLE_IEEE128 = 4, + FFI_DEFAULT_ABI = (FFI_LINUX +# ifdef __STRUCT_PARM_ALIGN__ + | FFI_LINUX_STRUCT_ALIGN +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_LINUX_LONG_DOUBLE_128 +# ifdef __LONG_DOUBLE_IEEE128__ + | FFI_LINUX_LONG_DOUBLE_IEEE128 +# endif +# endif + ), + FFI_LAST_ABI = 16 + +# else + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 32-bit linux/sysv/bsd. */ + FFI_SYSV = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_SYSV_SOFT_FLOAT = 1, + FFI_SYSV_STRUCT_RET = 2, + FFI_SYSV_IBM_LONG_DOUBLE = 4, + FFI_SYSV_LONG_DOUBLE_128 = 16, + + FFI_DEFAULT_ABI = (FFI_SYSV +# ifdef __NO_FPRS__ + | FFI_SYSV_SOFT_FLOAT +# endif +# if (defined (__SVR4_STRUCT_RETURN) \ + || defined (POWERPC_FREEBSD) && !defined (__AIX_STRUCT_RETURN)) + | FFI_SYSV_STRUCT_RET +# endif +# if __LDBL_MANT_DIG__ == 106 + | FFI_SYSV_IBM_LONG_DOUBLE +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_SYSV_LONG_DOUBLE_128 +# endif + ), + FFI_LAST_ABI = 32 +# endif +#endif + +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#if defined (POWERPC) || defined (POWERPC_FREEBSD) +# define FFI_GO_CLOSURES 1 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs +#endif +#if defined (POWERPC_AIX) +# define FFI_GO_CLOSURES 1 +#endif + +/* ppc_closure.S and linux64_closure.S expect this. */ +#define FFI_PPC_TYPE_LAST FFI_TYPE_POINTER + +/* We define additional types below. If generic types are added that + must be supported by powerpc libffi then it is likely that + FFI_PPC_TYPE_LAST needs increasing *and* the jump tables in + ppc_closure.S and linux64_closure.S be extended. */ + +#if !(FFI_TYPE_LAST == FFI_PPC_TYPE_LAST \ + || (FFI_TYPE_LAST == FFI_TYPE_COMPLEX \ + && !defined FFI_TARGET_HAS_COMPLEX_TYPE)) +# error "You likely have a broken powerpc libffi" +#endif + +/* Needed for soft-float long-double-128 support. */ +#define FFI_TYPE_UINT128 (FFI_PPC_TYPE_LAST + 1) + +/* Needed for FFI_SYSV small structure returns. */ +#define FFI_SYSV_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 2) + +/* Used by ELFv2 for homogenous structure returns. */ +#define FFI_V2_TYPE_VECTOR (FFI_PPC_TYPE_LAST + 1) +#define FFI_V2_TYPE_VECTOR_HOMOG (FFI_PPC_TYPE_LAST + 2) +#define FFI_V2_TYPE_FLOAT_HOMOG (FFI_PPC_TYPE_LAST + 3) +#define FFI_V2_TYPE_DOUBLE_HOMOG (FFI_PPC_TYPE_LAST + 4) +#define FFI_V2_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 5) + +#if _CALL_ELF == 2 +# define FFI_TRAMPOLINE_SIZE 32 +#else +# if defined(POWERPC64) || defined(POWERPC_AIX) +# if defined(POWERPC_DARWIN64) +# define FFI_TRAMPOLINE_SIZE 48 +# else +# define FFI_TRAMPOLINE_SIZE 24 +# endif +# else /* POWERPC || POWERPC_AIX */ +# define FFI_TRAMPOLINE_SIZE 40 +# endif +#endif + +#ifndef LIBFFI_ASM +#if defined(POWERPC_DARWIN) || defined(POWERPC_AIX) +struct ffi_aix_trampoline_struct { + void * code_pointer; /* Pointer to ffi_closure_ASM */ + void * toc; /* TOC */ + void * static_chain; /* Pointer to closure */ +}; +#endif +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h new file mode 100644 index 0000000..7fb9a93 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h @@ -0,0 +1,204 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for PowerPC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined (POWERPC) && defined (__powerpc64__) /* linux64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#ifndef POWERPC_DARWIN64 +#define POWERPC_DARWIN64 +#endif +#elif defined (POWERPC_AIX) && defined (__64BIT__) /* AIX64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#if defined (POWERPC_AIX) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_AIX, + FFI_LAST_ABI + +#elif defined (POWERPC_DARWIN) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_DARWIN, + FFI_LAST_ABI + +#else + /* The FFI_COMPAT values are used by old code. Since libffi may be + a shared library we have to support old values for backwards + compatibility. */ + FFI_COMPAT_SYSV, + FFI_COMPAT_GCC_SYSV, + FFI_COMPAT_LINUX64, + FFI_COMPAT_LINUX, + FFI_COMPAT_LINUX_SOFT_FLOAT, + +# if defined (POWERPC64) + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 64-bit linux. We + only need worry about FFI_COMPAT_LINUX64, but to be safe avoid + all old values. */ + FFI_LINUX = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_LINUX_STRUCT_ALIGN = 1, + FFI_LINUX_LONG_DOUBLE_128 = 2, + FFI_LINUX_LONG_DOUBLE_IEEE128 = 4, + FFI_DEFAULT_ABI = (FFI_LINUX +# ifdef __STRUCT_PARM_ALIGN__ + | FFI_LINUX_STRUCT_ALIGN +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_LINUX_LONG_DOUBLE_128 +# ifdef __LONG_DOUBLE_IEEE128__ + | FFI_LINUX_LONG_DOUBLE_IEEE128 +# endif +# endif + ), + FFI_LAST_ABI = 16 + +# else + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 32-bit linux/sysv/bsd. */ + FFI_SYSV = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_SYSV_SOFT_FLOAT = 1, + FFI_SYSV_STRUCT_RET = 2, + FFI_SYSV_IBM_LONG_DOUBLE = 4, + FFI_SYSV_LONG_DOUBLE_128 = 16, + + FFI_DEFAULT_ABI = (FFI_SYSV +# ifdef __NO_FPRS__ + | FFI_SYSV_SOFT_FLOAT +# endif +# if (defined (__SVR4_STRUCT_RETURN) \ + || defined (POWERPC_FREEBSD) && !defined (__AIX_STRUCT_RETURN)) + | FFI_SYSV_STRUCT_RET +# endif +# if __LDBL_MANT_DIG__ == 106 + | FFI_SYSV_IBM_LONG_DOUBLE +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_SYSV_LONG_DOUBLE_128 +# endif + ), + FFI_LAST_ABI = 32 +# endif +#endif + +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#if defined (POWERPC) || defined (POWERPC_FREEBSD) +# define FFI_GO_CLOSURES 1 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs +#endif +#if defined (POWERPC_AIX) +# define FFI_GO_CLOSURES 1 +#endif + +/* ppc_closure.S and linux64_closure.S expect this. */ +#define FFI_PPC_TYPE_LAST FFI_TYPE_POINTER + +/* We define additional types below. If generic types are added that + must be supported by powerpc libffi then it is likely that + FFI_PPC_TYPE_LAST needs increasing *and* the jump tables in + ppc_closure.S and linux64_closure.S be extended. */ + +#if !(FFI_TYPE_LAST == FFI_PPC_TYPE_LAST \ + || (FFI_TYPE_LAST == FFI_TYPE_COMPLEX \ + && !defined FFI_TARGET_HAS_COMPLEX_TYPE)) +# error "You likely have a broken powerpc libffi" +#endif + +/* Needed for soft-float long-double-128 support. */ +#define FFI_TYPE_UINT128 (FFI_PPC_TYPE_LAST + 1) + +/* Needed for FFI_SYSV small structure returns. */ +#define FFI_SYSV_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 2) + +/* Used by ELFv2 for homogenous structure returns. */ +#define FFI_V2_TYPE_VECTOR (FFI_PPC_TYPE_LAST + 1) +#define FFI_V2_TYPE_VECTOR_HOMOG (FFI_PPC_TYPE_LAST + 2) +#define FFI_V2_TYPE_FLOAT_HOMOG (FFI_PPC_TYPE_LAST + 3) +#define FFI_V2_TYPE_DOUBLE_HOMOG (FFI_PPC_TYPE_LAST + 4) +#define FFI_V2_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 5) + +#if _CALL_ELF == 2 +# define FFI_TRAMPOLINE_SIZE 32 +#else +# if defined(POWERPC64) || defined(POWERPC_AIX) +# if defined(POWERPC_DARWIN64) +# define FFI_TRAMPOLINE_SIZE 48 +# else +# define FFI_TRAMPOLINE_SIZE 24 +# endif +# else /* POWERPC || POWERPC_AIX */ +# define FFI_TRAMPOLINE_SIZE 40 +# endif +#endif + +#ifndef LIBFFI_ASM +#if defined(POWERPC_DARWIN) || defined(POWERPC_AIX) +struct ffi_aix_trampoline_struct { + void * code_pointer; /* Pointer to ffi_closure_ASM */ + void * toc; /* TOC */ + void * static_chain; /* Pointer to closure */ +}; +#endif +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64 2.S new file mode 100644 index 0000000..c99889c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64 2.S @@ -0,0 +1,283 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef POWERPC64 + .hidden ffi_call_LINUX64 + .globl ffi_call_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_call_LINUX64: + addis %r2, %r12, .TOC.-ffi_call_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_call_LINUX64@l + .localentry ffi_call_LINUX64, . - ffi_call_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_call_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_call_LINUX64,.TOC.@tocbase,0 + .type ffi_call_LINUX64,@function + .text +.L.ffi_call_LINUX64: +# else + .hidden .ffi_call_LINUX64 + .globl .ffi_call_LINUX64 + .quad .ffi_call_LINUX64,.TOC.@tocbase,0 + .size ffi_call_LINUX64,24 + .type .ffi_call_LINUX64,@function + .text +.ffi_call_LINUX64: +# endif +# endif + mflr %r0 + std %r28, -32(%r1) + std %r29, -24(%r1) + std %r30, -16(%r1) + std %r31, -8(%r1) + std %r7, 8(%r1) /* closure, saved in cr field. */ + std %r0, 16(%r1) + + mr %r28, %r1 /* our AP. */ + .cfi_def_cfa_register 28 + .cfi_offset 65, 16 + .cfi_offset 31, -8 + .cfi_offset 30, -16 + .cfi_offset 29, -24 + .cfi_offset 28, -32 + + stdux %r1, %r1, %r8 + mr %r31, %r6 /* flags, */ + mr %r30, %r5 /* rvalue, */ + mr %r29, %r4 /* function address. */ +/* Save toc pointer, not for the ffi_prep_args64 call, but for the later + bctrl function call. */ +# if _CALL_ELF == 2 + std %r2, 24(%r1) +# else + std %r2, 40(%r1) +# endif + + /* Call ffi_prep_args64. */ + mr %r4, %r1 +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_prep_args64 +# else + bl .ffi_prep_args64 +# endif + +# if _CALL_ELF == 2 + mr %r12, %r29 +# else + ld %r12, 0(%r29) + ld %r2, 8(%r29) +# endif + /* Now do the call. */ + /* Set up cr1 with bits 3-7 of the flags. */ + mtcrf 0xc0, %r31 + + /* Get the address to call into CTR. */ + mtctr %r12 + /* Load all those argument registers. */ + addi %r29, %r28, -32-(8*8) + ld %r3, (0*8)(%r29) + ld %r4, (1*8)(%r29) + ld %r5, (2*8)(%r29) + ld %r6, (3*8)(%r29) + bf- 5, 1f + ld %r7, (4*8)(%r29) + ld %r8, (5*8)(%r29) + ld %r9, (6*8)(%r29) + ld %r10, (7*8)(%r29) +1: + + /* Load all the FP registers. */ + bf- 6, 2f + addi %r29, %r29, -(14*8) + lfd %f1, ( 1*8)(%r29) + lfd %f2, ( 2*8)(%r29) + lfd %f3, ( 3*8)(%r29) + lfd %f4, ( 4*8)(%r29) + lfd %f5, ( 5*8)(%r29) + lfd %f6, ( 6*8)(%r29) + lfd %f7, ( 7*8)(%r29) + lfd %f8, ( 8*8)(%r29) + lfd %f9, ( 9*8)(%r29) + lfd %f10, (10*8)(%r29) + lfd %f11, (11*8)(%r29) + lfd %f12, (12*8)(%r29) + lfd %f13, (13*8)(%r29) +2: + + /* Load all the vector registers. */ + bf- 3, 3f + addi %r29, %r29, -16 + lvx %v13, 0, %r29 + addi %r29, %r29, -16 + lvx %v12, 0, %r29 + addi %r29, %r29, -16 + lvx %v11, 0, %r29 + addi %r29, %r29, -16 + lvx %v10, 0, %r29 + addi %r29, %r29, -16 + lvx %v9, 0, %r29 + addi %r29, %r29, -16 + lvx %v8, 0, %r29 + addi %r29, %r29, -16 + lvx %v7, 0, %r29 + addi %r29, %r29, -16 + lvx %v6, 0, %r29 + addi %r29, %r29, -16 + lvx %v5, 0, %r29 + addi %r29, %r29, -16 + lvx %v4, 0, %r29 + addi %r29, %r29, -16 + lvx %v3, 0, %r29 + addi %r29, %r29, -16 + lvx %v2, 0, %r29 +3: + + /* Make the call. */ + ld %r11, 8(%r28) + bctrl + + /* This must follow the call immediately, the unwinder + uses this to find out if r2 has been saved or not. */ +# if _CALL_ELF == 2 + ld %r2, 24(%r1) +# else + ld %r2, 40(%r1) +# endif + + /* Now, deal with the return value. */ + mtcrf 0x01, %r31 + bt 31, .Lstruct_return_value + bt 30, .Ldone_return_value + bt 29, .Lfp_return_value + bt 28, .Lvec_return_value + std %r3, 0(%r30) + /* Fall through... */ + +.Ldone_return_value: + /* Restore the registers we used and return. */ + mr %r1, %r28 + .cfi_def_cfa_register 1 + ld %r0, 16(%r28) + ld %r28, -32(%r28) + mtlr %r0 + ld %r29, -24(%r1) + ld %r30, -16(%r1) + ld %r31, -8(%r1) + blr + +.Lvec_return_value: + stvx %v2, 0, %r30 + b .Ldone_return_value + +.Lfp_return_value: + .cfi_def_cfa_register 28 + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_return_value + stfd %f1, 0(%r30) + bf 26, .Ldone_return_value + stfd %f2, 8(%r30) + b .Ldone_return_value +.Lfloat_return_value: + stfs %f1, 0(%r30) + b .Ldone_return_value + +.Lstruct_return_value: + bf 29, .Lvec_homog_or_small_struct + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_homog_return_value + stfd %f1, 0(%r30) + stfd %f2, 8(%r30) + stfd %f3, 16(%r30) + stfd %f4, 24(%r30) + stfd %f5, 32(%r30) + stfd %f6, 40(%r30) + stfd %f7, 48(%r30) + stfd %f8, 56(%r30) + b .Ldone_return_value + +.Lfloat_homog_return_value: + stfs %f1, 0(%r30) + stfs %f2, 4(%r30) + stfs %f3, 8(%r30) + stfs %f4, 12(%r30) + stfs %f5, 16(%r30) + stfs %f6, 20(%r30) + stfs %f7, 24(%r30) + stfs %f8, 28(%r30) + b .Ldone_return_value + +.Lvec_homog_or_small_struct: + bf 28, .Lsmall_struct + stvx %v2, 0, %r30 + addi %r30, %r30, 16 + stvx %v3, 0, %r30 + addi %r30, %r30, 16 + stvx %v4, 0, %r30 + addi %r30, %r30, 16 + stvx %v5, 0, %r30 + addi %r30, %r30, 16 + stvx %v6, 0, %r30 + addi %r30, %r30, 16 + stvx %v7, 0, %r30 + addi %r30, %r30, 16 + stvx %v8, 0, %r30 + addi %r30, %r30, 16 + stvx %v9, 0, %r30 + b .Ldone_return_value + +.Lsmall_struct: + std %r3, 0(%r30) + std %r4, 8(%r30) + b .Ldone_return_value + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_call_LINUX64,.-ffi_call_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_call_LINUX64,.-.L.ffi_call_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,4,0,0 + .size .ffi_call_LINUX64,.-.ffi_call_LINUX64 +# endif +# endif + +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S new file mode 100644 index 0000000..c99889c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S @@ -0,0 +1,283 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef POWERPC64 + .hidden ffi_call_LINUX64 + .globl ffi_call_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_call_LINUX64: + addis %r2, %r12, .TOC.-ffi_call_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_call_LINUX64@l + .localentry ffi_call_LINUX64, . - ffi_call_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_call_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_call_LINUX64,.TOC.@tocbase,0 + .type ffi_call_LINUX64,@function + .text +.L.ffi_call_LINUX64: +# else + .hidden .ffi_call_LINUX64 + .globl .ffi_call_LINUX64 + .quad .ffi_call_LINUX64,.TOC.@tocbase,0 + .size ffi_call_LINUX64,24 + .type .ffi_call_LINUX64,@function + .text +.ffi_call_LINUX64: +# endif +# endif + mflr %r0 + std %r28, -32(%r1) + std %r29, -24(%r1) + std %r30, -16(%r1) + std %r31, -8(%r1) + std %r7, 8(%r1) /* closure, saved in cr field. */ + std %r0, 16(%r1) + + mr %r28, %r1 /* our AP. */ + .cfi_def_cfa_register 28 + .cfi_offset 65, 16 + .cfi_offset 31, -8 + .cfi_offset 30, -16 + .cfi_offset 29, -24 + .cfi_offset 28, -32 + + stdux %r1, %r1, %r8 + mr %r31, %r6 /* flags, */ + mr %r30, %r5 /* rvalue, */ + mr %r29, %r4 /* function address. */ +/* Save toc pointer, not for the ffi_prep_args64 call, but for the later + bctrl function call. */ +# if _CALL_ELF == 2 + std %r2, 24(%r1) +# else + std %r2, 40(%r1) +# endif + + /* Call ffi_prep_args64. */ + mr %r4, %r1 +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_prep_args64 +# else + bl .ffi_prep_args64 +# endif + +# if _CALL_ELF == 2 + mr %r12, %r29 +# else + ld %r12, 0(%r29) + ld %r2, 8(%r29) +# endif + /* Now do the call. */ + /* Set up cr1 with bits 3-7 of the flags. */ + mtcrf 0xc0, %r31 + + /* Get the address to call into CTR. */ + mtctr %r12 + /* Load all those argument registers. */ + addi %r29, %r28, -32-(8*8) + ld %r3, (0*8)(%r29) + ld %r4, (1*8)(%r29) + ld %r5, (2*8)(%r29) + ld %r6, (3*8)(%r29) + bf- 5, 1f + ld %r7, (4*8)(%r29) + ld %r8, (5*8)(%r29) + ld %r9, (6*8)(%r29) + ld %r10, (7*8)(%r29) +1: + + /* Load all the FP registers. */ + bf- 6, 2f + addi %r29, %r29, -(14*8) + lfd %f1, ( 1*8)(%r29) + lfd %f2, ( 2*8)(%r29) + lfd %f3, ( 3*8)(%r29) + lfd %f4, ( 4*8)(%r29) + lfd %f5, ( 5*8)(%r29) + lfd %f6, ( 6*8)(%r29) + lfd %f7, ( 7*8)(%r29) + lfd %f8, ( 8*8)(%r29) + lfd %f9, ( 9*8)(%r29) + lfd %f10, (10*8)(%r29) + lfd %f11, (11*8)(%r29) + lfd %f12, (12*8)(%r29) + lfd %f13, (13*8)(%r29) +2: + + /* Load all the vector registers. */ + bf- 3, 3f + addi %r29, %r29, -16 + lvx %v13, 0, %r29 + addi %r29, %r29, -16 + lvx %v12, 0, %r29 + addi %r29, %r29, -16 + lvx %v11, 0, %r29 + addi %r29, %r29, -16 + lvx %v10, 0, %r29 + addi %r29, %r29, -16 + lvx %v9, 0, %r29 + addi %r29, %r29, -16 + lvx %v8, 0, %r29 + addi %r29, %r29, -16 + lvx %v7, 0, %r29 + addi %r29, %r29, -16 + lvx %v6, 0, %r29 + addi %r29, %r29, -16 + lvx %v5, 0, %r29 + addi %r29, %r29, -16 + lvx %v4, 0, %r29 + addi %r29, %r29, -16 + lvx %v3, 0, %r29 + addi %r29, %r29, -16 + lvx %v2, 0, %r29 +3: + + /* Make the call. */ + ld %r11, 8(%r28) + bctrl + + /* This must follow the call immediately, the unwinder + uses this to find out if r2 has been saved or not. */ +# if _CALL_ELF == 2 + ld %r2, 24(%r1) +# else + ld %r2, 40(%r1) +# endif + + /* Now, deal with the return value. */ + mtcrf 0x01, %r31 + bt 31, .Lstruct_return_value + bt 30, .Ldone_return_value + bt 29, .Lfp_return_value + bt 28, .Lvec_return_value + std %r3, 0(%r30) + /* Fall through... */ + +.Ldone_return_value: + /* Restore the registers we used and return. */ + mr %r1, %r28 + .cfi_def_cfa_register 1 + ld %r0, 16(%r28) + ld %r28, -32(%r28) + mtlr %r0 + ld %r29, -24(%r1) + ld %r30, -16(%r1) + ld %r31, -8(%r1) + blr + +.Lvec_return_value: + stvx %v2, 0, %r30 + b .Ldone_return_value + +.Lfp_return_value: + .cfi_def_cfa_register 28 + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_return_value + stfd %f1, 0(%r30) + bf 26, .Ldone_return_value + stfd %f2, 8(%r30) + b .Ldone_return_value +.Lfloat_return_value: + stfs %f1, 0(%r30) + b .Ldone_return_value + +.Lstruct_return_value: + bf 29, .Lvec_homog_or_small_struct + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_homog_return_value + stfd %f1, 0(%r30) + stfd %f2, 8(%r30) + stfd %f3, 16(%r30) + stfd %f4, 24(%r30) + stfd %f5, 32(%r30) + stfd %f6, 40(%r30) + stfd %f7, 48(%r30) + stfd %f8, 56(%r30) + b .Ldone_return_value + +.Lfloat_homog_return_value: + stfs %f1, 0(%r30) + stfs %f2, 4(%r30) + stfs %f3, 8(%r30) + stfs %f4, 12(%r30) + stfs %f5, 16(%r30) + stfs %f6, 20(%r30) + stfs %f7, 24(%r30) + stfs %f8, 28(%r30) + b .Ldone_return_value + +.Lvec_homog_or_small_struct: + bf 28, .Lsmall_struct + stvx %v2, 0, %r30 + addi %r30, %r30, 16 + stvx %v3, 0, %r30 + addi %r30, %r30, 16 + stvx %v4, 0, %r30 + addi %r30, %r30, 16 + stvx %v5, 0, %r30 + addi %r30, %r30, 16 + stvx %v6, 0, %r30 + addi %r30, %r30, 16 + stvx %v7, 0, %r30 + addi %r30, %r30, 16 + stvx %v8, 0, %r30 + addi %r30, %r30, 16 + stvx %v9, 0, %r30 + b .Ldone_return_value + +.Lsmall_struct: + std %r3, 0(%r30) + std %r4, 8(%r30) + b .Ldone_return_value + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_call_LINUX64,.-ffi_call_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_call_LINUX64,.-.L.ffi_call_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,4,0,0 + .size .ffi_call_LINUX64,.-.ffi_call_LINUX64 +# endif +# endif + +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure 2.S new file mode 100644 index 0000000..d67e4bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure 2.S @@ -0,0 +1,552 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include + + .file "linux64_closure.S" + +#ifdef POWERPC64 + FFI_HIDDEN (ffi_closure_LINUX64) + .globl ffi_closure_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_closure_LINUX64: + addis %r2, %r12, .TOC.-ffi_closure_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_closure_LINUX64@l + .localentry ffi_closure_LINUX64, . - ffi_closure_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_closure_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_closure_LINUX64,.TOC.@tocbase,0 + .type ffi_closure_LINUX64,@function + .text +.L.ffi_closure_LINUX64: +# else + FFI_HIDDEN (.ffi_closure_LINUX64) + .globl .ffi_closure_LINUX64 + .quad .ffi_closure_LINUX64,.TOC.@tocbase,0 + .size ffi_closure_LINUX64,24 + .type .ffi_closure_LINUX64,@function + .text +.ffi_closure_LINUX64: +# endif +# endif + +# if _CALL_ELF == 2 +# ifdef __VEC__ +# 32 byte special reg save area + 64 byte parm save area +# + 128 byte retval area + 13*8 fpr save area + 12*16 vec save area + round to 16 +# define STACKFRAME 528 +# else +# 32 byte special reg save area + 64 byte parm save area +# + 64 byte retval area + 13*8 fpr save area + round to 16 +# define STACKFRAME 272 +# endif +# define PARMSAVE 32 +# define RETVAL PARMSAVE+64 +# else +# 48 bytes special reg save area + 64 bytes parm save area +# + 16 bytes retval area + 13*8 bytes fpr save area + round to 16 +# define STACKFRAME 240 +# define PARMSAVE 48 +# define RETVAL PARMSAVE+64 +# endif + +# if _CALL_ELF == 2 + ld %r12, FFI_TRAMPOLINE_SIZE(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + # copy r2 to r11 and load TOC into r2 + mr %r11, %r2 + ld %r2, 16(%r2) + + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + ld %r4, FFI_TRAMPOLINE_SIZE+8(%r11) + # closure->user_data + ld %r5, FFI_TRAMPOLINE_SIZE+16(%r11) + +.Ldoclosure: + # next save fpr 1 to fpr 13 + stfd %f1, -104+(0*8)(%r1) + stfd %f2, -104+(1*8)(%r1) + stfd %f3, -104+(2*8)(%r1) + stfd %f4, -104+(3*8)(%r1) + stfd %f5, -104+(4*8)(%r1) + stfd %f6, -104+(5*8)(%r1) + stfd %f7, -104+(6*8)(%r1) + stfd %f8, -104+(7*8)(%r1) + stfd %f9, -104+(8*8)(%r1) + stfd %f10, -104+(9*8)(%r1) + stfd %f11, -104+(10*8)(%r1) + stfd %f12, -104+(11*8)(%r1) + stfd %f13, -104+(12*8)(%r1) + + # load up the pointer to the saved fpr registers + addi %r8, %r1, -104 + +# ifdef __VEC__ + # load up the pointer to the saved vector registers + # 8 bytes padding for 16-byte alignment at -112(%r1) + addi %r9, %r8, -24 + stvx %v13, 0, %r9 + addi %r9, %r9, -16 + stvx %v12, 0, %r9 + addi %r9, %r9, -16 + stvx %v11, 0, %r9 + addi %r9, %r9, -16 + stvx %v10, 0, %r9 + addi %r9, %r9, -16 + stvx %v9, 0, %r9 + addi %r9, %r9, -16 + stvx %v8, 0, %r9 + addi %r9, %r9, -16 + stvx %v7, 0, %r9 + addi %r9, %r9, -16 + stvx %v6, 0, %r9 + addi %r9, %r9, -16 + stvx %v5, 0, %r9 + addi %r9, %r9, -16 + stvx %v4, 0, %r9 + addi %r9, %r9, -16 + stvx %v3, 0, %r9 + addi %r9, %r9, -16 + stvx %v2, 0, %r9 +# endif + + # load up the pointer to the result storage + addi %r6, %r1, -STACKFRAME+RETVAL + + stdu %r1, -STACKFRAME(%r1) + .cfi_def_cfa_offset STACKFRAME + .cfi_offset 65, 16 + + # make the call +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_closure_helper_LINUX64 +# else + bl .ffi_closure_helper_LINUX64 +# endif +.Lret: + + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + ld %r0, STACKFRAME+16(%r1) + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + bge .Lsmall + mflr %r4 # move address of .Lret to r4 + sldi %r3, %r3, 4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + add %r3, %r3, %r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 + +.Lret_type0: +# case FFI_TYPE_VOID + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_INT +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_FLOAT + lfs %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_DOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_LONGDOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + lfd %f2, RETVAL+8(%r1) + b .Lfinish +# case FFI_TYPE_UINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish +# case FFI_TYPE_UINT16 +# ifdef __LITTLE_ENDIAN__ + lhz %r3, RETVAL+0(%r1) +# else + lhz %r3, RETVAL+6(%r1) +# endif + mtlr %r0 +.Lfinish: + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT16 +# ifdef __LITTLE_ENDIAN__ + lha %r3, RETVAL+0(%r1) +# else + lha %r3, RETVAL+6(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT32 +# ifdef __LITTLE_ENDIAN__ + lwz %r3, RETVAL+0(%r1) +# else + lwz %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT32 +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_POINTER + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_V2_TYPE_VECTOR + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + mtlr %r0 + b .Lfinish +# case FFI_V2_TYPE_VECTOR_HOMOG + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + addi %r3, %r3, 16 + b .Lmorevector +# case FFI_V2_TYPE_FLOAT_HOMOG + lfs %f1, RETVAL+0(%r1) + lfs %f2, RETVAL+4(%r1) + lfs %f3, RETVAL+8(%r1) + b .Lmorefloat +# case FFI_V2_TYPE_DOUBLE_HOMOG + lfd %f1, RETVAL+0(%r1) + lfd %f2, RETVAL+8(%r1) + lfd %f3, RETVAL+16(%r1) + lfd %f4, RETVAL+24(%r1) + mtlr %r0 + lfd %f5, RETVAL+32(%r1) + lfd %f6, RETVAL+40(%r1) + lfd %f7, RETVAL+48(%r1) + lfd %f8, RETVAL+56(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorevector: + lvx %v3, 0, %r3 + addi %r3, %r3, 16 + lvx %v4, 0, %r3 + addi %r3, %r3, 16 + lvx %v5, 0, %r3 + mtlr %r0 + addi %r3, %r3, 16 + lvx %v6, 0, %r3 + addi %r3, %r3, 16 + lvx %v7, 0, %r3 + addi %r3, %r3, 16 + lvx %v8, 0, %r3 + addi %r3, %r3, 16 + lvx %v9, 0, %r3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorefloat: + lfs %f4, RETVAL+12(%r1) + mtlr %r0 + lfs %f5, RETVAL+16(%r1) + lfs %f6, RETVAL+20(%r1) + lfs %f7, RETVAL+24(%r1) + lfs %f8, RETVAL+28(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmall: +# ifdef __LITTLE_ENDIAN__ + ld %r3,RETVAL+0(%r1) + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr +# else + # A struct smaller than a dword is returned in the low bits of r3 + # ie. right justified. Larger structs are passed left justified + # in r3 and r4. The return value area on the stack will have + # the structs as they are usually stored in memory. + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + 7 # size 8 bytes? + neg %r5, %r3 + ld %r3,RETVAL+0(%r1) + blt .Lsmalldown + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmalldown: + addi %r5, %r5, FFI_V2_TYPE_SMALL_STRUCT + 7 + mtlr %r0 + sldi %r5, %r5, 3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + srd %r3, %r3, %r5 + blr +# endif + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_closure_LINUX64,.-ffi_closure_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_closure_LINUX64,.-.L.ffi_closure_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_closure_LINUX64,.-.ffi_closure_LINUX64 +# endif +# endif + + + FFI_HIDDEN (ffi_go_closure_linux64) + .globl ffi_go_closure_linux64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_go_closure_linux64: + addis %r2, %r12, .TOC.-ffi_go_closure_linux64@ha + addi %r2, %r2, .TOC.-ffi_go_closure_linux64@l + .localentry ffi_go_closure_linux64, . - ffi_go_closure_linux64 +# else + .section ".opd","aw" + .align 3 +ffi_go_closure_linux64: +# ifdef _CALL_LINUX + .quad .L.ffi_go_closure_linux64,.TOC.@tocbase,0 + .type ffi_go_closure_linux64,@function + .text +.L.ffi_go_closure_linux64: +# else + FFI_HIDDEN (.ffi_go_closure_linux64) + .globl .ffi_go_closure_linux64 + .quad .ffi_go_closure_linux64,.TOC.@tocbase,0 + .size ffi_go_closure_linux64,24 + .type .ffi_go_closure_linux64,@function + .text +.ffi_go_closure_linux64: +# endif +# endif + +# if _CALL_ELF == 2 + ld %r12, 8(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, 8(%r11) + # closure->fun + ld %r4, 16(%r11) + # user_data + mr %r5, %r11 + b .Ldoclosure + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_go_closure_linux64,.-ffi_go_closure_linux64 +# else +# ifdef _CALL_LINUX + .size ffi_go_closure_linux64,.-.L.ffi_go_closure_linux64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_go_closure_linux64,.-.ffi_go_closure_linux64 +# endif +# endif +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S new file mode 100644 index 0000000..d67e4bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S @@ -0,0 +1,552 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include + + .file "linux64_closure.S" + +#ifdef POWERPC64 + FFI_HIDDEN (ffi_closure_LINUX64) + .globl ffi_closure_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_closure_LINUX64: + addis %r2, %r12, .TOC.-ffi_closure_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_closure_LINUX64@l + .localentry ffi_closure_LINUX64, . - ffi_closure_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_closure_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_closure_LINUX64,.TOC.@tocbase,0 + .type ffi_closure_LINUX64,@function + .text +.L.ffi_closure_LINUX64: +# else + FFI_HIDDEN (.ffi_closure_LINUX64) + .globl .ffi_closure_LINUX64 + .quad .ffi_closure_LINUX64,.TOC.@tocbase,0 + .size ffi_closure_LINUX64,24 + .type .ffi_closure_LINUX64,@function + .text +.ffi_closure_LINUX64: +# endif +# endif + +# if _CALL_ELF == 2 +# ifdef __VEC__ +# 32 byte special reg save area + 64 byte parm save area +# + 128 byte retval area + 13*8 fpr save area + 12*16 vec save area + round to 16 +# define STACKFRAME 528 +# else +# 32 byte special reg save area + 64 byte parm save area +# + 64 byte retval area + 13*8 fpr save area + round to 16 +# define STACKFRAME 272 +# endif +# define PARMSAVE 32 +# define RETVAL PARMSAVE+64 +# else +# 48 bytes special reg save area + 64 bytes parm save area +# + 16 bytes retval area + 13*8 bytes fpr save area + round to 16 +# define STACKFRAME 240 +# define PARMSAVE 48 +# define RETVAL PARMSAVE+64 +# endif + +# if _CALL_ELF == 2 + ld %r12, FFI_TRAMPOLINE_SIZE(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + # copy r2 to r11 and load TOC into r2 + mr %r11, %r2 + ld %r2, 16(%r2) + + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + ld %r4, FFI_TRAMPOLINE_SIZE+8(%r11) + # closure->user_data + ld %r5, FFI_TRAMPOLINE_SIZE+16(%r11) + +.Ldoclosure: + # next save fpr 1 to fpr 13 + stfd %f1, -104+(0*8)(%r1) + stfd %f2, -104+(1*8)(%r1) + stfd %f3, -104+(2*8)(%r1) + stfd %f4, -104+(3*8)(%r1) + stfd %f5, -104+(4*8)(%r1) + stfd %f6, -104+(5*8)(%r1) + stfd %f7, -104+(6*8)(%r1) + stfd %f8, -104+(7*8)(%r1) + stfd %f9, -104+(8*8)(%r1) + stfd %f10, -104+(9*8)(%r1) + stfd %f11, -104+(10*8)(%r1) + stfd %f12, -104+(11*8)(%r1) + stfd %f13, -104+(12*8)(%r1) + + # load up the pointer to the saved fpr registers + addi %r8, %r1, -104 + +# ifdef __VEC__ + # load up the pointer to the saved vector registers + # 8 bytes padding for 16-byte alignment at -112(%r1) + addi %r9, %r8, -24 + stvx %v13, 0, %r9 + addi %r9, %r9, -16 + stvx %v12, 0, %r9 + addi %r9, %r9, -16 + stvx %v11, 0, %r9 + addi %r9, %r9, -16 + stvx %v10, 0, %r9 + addi %r9, %r9, -16 + stvx %v9, 0, %r9 + addi %r9, %r9, -16 + stvx %v8, 0, %r9 + addi %r9, %r9, -16 + stvx %v7, 0, %r9 + addi %r9, %r9, -16 + stvx %v6, 0, %r9 + addi %r9, %r9, -16 + stvx %v5, 0, %r9 + addi %r9, %r9, -16 + stvx %v4, 0, %r9 + addi %r9, %r9, -16 + stvx %v3, 0, %r9 + addi %r9, %r9, -16 + stvx %v2, 0, %r9 +# endif + + # load up the pointer to the result storage + addi %r6, %r1, -STACKFRAME+RETVAL + + stdu %r1, -STACKFRAME(%r1) + .cfi_def_cfa_offset STACKFRAME + .cfi_offset 65, 16 + + # make the call +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_closure_helper_LINUX64 +# else + bl .ffi_closure_helper_LINUX64 +# endif +.Lret: + + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + ld %r0, STACKFRAME+16(%r1) + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + bge .Lsmall + mflr %r4 # move address of .Lret to r4 + sldi %r3, %r3, 4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + add %r3, %r3, %r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 + +.Lret_type0: +# case FFI_TYPE_VOID + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_INT +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_FLOAT + lfs %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_DOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_LONGDOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + lfd %f2, RETVAL+8(%r1) + b .Lfinish +# case FFI_TYPE_UINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish +# case FFI_TYPE_UINT16 +# ifdef __LITTLE_ENDIAN__ + lhz %r3, RETVAL+0(%r1) +# else + lhz %r3, RETVAL+6(%r1) +# endif + mtlr %r0 +.Lfinish: + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT16 +# ifdef __LITTLE_ENDIAN__ + lha %r3, RETVAL+0(%r1) +# else + lha %r3, RETVAL+6(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT32 +# ifdef __LITTLE_ENDIAN__ + lwz %r3, RETVAL+0(%r1) +# else + lwz %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT32 +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_POINTER + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_V2_TYPE_VECTOR + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + mtlr %r0 + b .Lfinish +# case FFI_V2_TYPE_VECTOR_HOMOG + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + addi %r3, %r3, 16 + b .Lmorevector +# case FFI_V2_TYPE_FLOAT_HOMOG + lfs %f1, RETVAL+0(%r1) + lfs %f2, RETVAL+4(%r1) + lfs %f3, RETVAL+8(%r1) + b .Lmorefloat +# case FFI_V2_TYPE_DOUBLE_HOMOG + lfd %f1, RETVAL+0(%r1) + lfd %f2, RETVAL+8(%r1) + lfd %f3, RETVAL+16(%r1) + lfd %f4, RETVAL+24(%r1) + mtlr %r0 + lfd %f5, RETVAL+32(%r1) + lfd %f6, RETVAL+40(%r1) + lfd %f7, RETVAL+48(%r1) + lfd %f8, RETVAL+56(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorevector: + lvx %v3, 0, %r3 + addi %r3, %r3, 16 + lvx %v4, 0, %r3 + addi %r3, %r3, 16 + lvx %v5, 0, %r3 + mtlr %r0 + addi %r3, %r3, 16 + lvx %v6, 0, %r3 + addi %r3, %r3, 16 + lvx %v7, 0, %r3 + addi %r3, %r3, 16 + lvx %v8, 0, %r3 + addi %r3, %r3, 16 + lvx %v9, 0, %r3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorefloat: + lfs %f4, RETVAL+12(%r1) + mtlr %r0 + lfs %f5, RETVAL+16(%r1) + lfs %f6, RETVAL+20(%r1) + lfs %f7, RETVAL+24(%r1) + lfs %f8, RETVAL+28(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmall: +# ifdef __LITTLE_ENDIAN__ + ld %r3,RETVAL+0(%r1) + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr +# else + # A struct smaller than a dword is returned in the low bits of r3 + # ie. right justified. Larger structs are passed left justified + # in r3 and r4. The return value area on the stack will have + # the structs as they are usually stored in memory. + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + 7 # size 8 bytes? + neg %r5, %r3 + ld %r3,RETVAL+0(%r1) + blt .Lsmalldown + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmalldown: + addi %r5, %r5, FFI_V2_TYPE_SMALL_STRUCT + 7 + mtlr %r0 + sldi %r5, %r5, 3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + srd %r3, %r3, %r5 + blr +# endif + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_closure_LINUX64,.-ffi_closure_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_closure_LINUX64,.-.L.ffi_closure_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_closure_LINUX64,.-.ffi_closure_LINUX64 +# endif +# endif + + + FFI_HIDDEN (ffi_go_closure_linux64) + .globl ffi_go_closure_linux64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_go_closure_linux64: + addis %r2, %r12, .TOC.-ffi_go_closure_linux64@ha + addi %r2, %r2, .TOC.-ffi_go_closure_linux64@l + .localentry ffi_go_closure_linux64, . - ffi_go_closure_linux64 +# else + .section ".opd","aw" + .align 3 +ffi_go_closure_linux64: +# ifdef _CALL_LINUX + .quad .L.ffi_go_closure_linux64,.TOC.@tocbase,0 + .type ffi_go_closure_linux64,@function + .text +.L.ffi_go_closure_linux64: +# else + FFI_HIDDEN (.ffi_go_closure_linux64) + .globl .ffi_go_closure_linux64 + .quad .ffi_go_closure_linux64,.TOC.@tocbase,0 + .size ffi_go_closure_linux64,24 + .type .ffi_go_closure_linux64,@function + .text +.ffi_go_closure_linux64: +# endif +# endif + +# if _CALL_ELF == 2 + ld %r12, 8(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, 8(%r11) + # closure->fun + ld %r4, 16(%r11) + # user_data + mr %r5, %r11 + b .Ldoclosure + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_go_closure_linux64,.-ffi_go_closure_linux64 +# else +# ifdef _CALL_LINUX + .size ffi_go_closure_linux64,.-.L.ffi_go_closure_linux64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_go_closure_linux64,.-.ffi_go_closure_linux64 +# endif +# endif +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure 2.S new file mode 100644 index 0000000..b6d209d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure 2.S @@ -0,0 +1,397 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include +#include + + .file "ppc_closure.S" + +#ifndef POWERPC64 + +FFI_HIDDEN(ffi_closure_SYSV) +ENTRY(ffi_closure_SYSV) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + +# we want to build up an areas for the parameters passed +# in registers (both floating point and integer) + + # so first save gpr 3 to gpr 10 (aligned to 4) + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # set up registers for the routine that does the work + + # closure->cif + lwz %r3,FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + lwz %r4,FFI_TRAMPOLINE_SIZE+4(%r11) + # closure->user_data + lwz %r5,FFI_TRAMPOLINE_SIZE+8(%r11) + +.Ldoclosure: + stw %r6, 28(%r1) + stw %r7, 32(%r1) + stw %r8, 36(%r1) + stw %r9, 40(%r1) + stw %r10,44(%r1) + +#ifndef __NO_FPRS__ + # next save fpr 1 to fpr 8 (aligned to 8) + stfd %f1, 48(%r1) + stfd %f2, 56(%r1) + stfd %f3, 64(%r1) + stfd %f4, 72(%r1) + stfd %f5, 80(%r1) + stfd %f6, 88(%r1) + stfd %f7, 96(%r1) + stfd %f8, 104(%r1) +#endif + + # pointer to the result storage + addi %r6,%r1,112 + + # pointer to the saved gpr registers + addi %r7,%r1,16 + + # pointer to the saved fpr registers + addi %r8,%r1,48 + + # pointer to the outgoing parameter save area in the previous frame + # i.e. the previous frame pointer + 8 + addi %r9,%r1,152 + + # make the call + bl ffi_closure_helper_SYSV@local +.Lret: + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + + mflr %r4 # move address of .Lret to r4 + slwi %r3,%r3,4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + lwz %r0,148(%r1) + add %r3,%r3,%r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 +# case FFI_TYPE_VOID +.Lret_type0: + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_INT + lwz %r3,112+0(%r1) + mtlr %r0 +.Lfinish: + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_FLOAT +#ifndef __NO_FPRS__ + lfs %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_DOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_LONGDOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) + lfd %f2,112+8(%r1) + mtlr %r0 + b .Lfinish +#else + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop +#endif + +# case FFI_TYPE_UINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_UINT16 +#ifdef __LITTLE_ENDIAN__ + lhz %r3,112+0(%r1) +#else + lhz %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT16 +#ifdef __LITTLE_ENDIAN__ + lha %r3,112+0(%r1) +#else + lha %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_SINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_POINTER + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT128 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + lwz %r5,112+8(%r1) + b .Luint128 + +# The return types below are only used when the ABI type is FFI_SYSV. +# case FFI_SYSV_TYPE_SMALL_STRUCT + 1. One byte struct. + lbz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 2. Two byte struct. + lhz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 3. Three byte struct. + lwz %r3,112+0(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#else + srwi %r3,%r3,8 + mtlr %r0 + b .Lfinish +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 4. Four byte struct. + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 5. Five byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,24 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 6. Six byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,16 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 7. Seven byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,8 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 8. Eight byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +#ifndef __LITTLE_ENDIAN__ +.Lstruct567: + subfic %r6,%r5,32 + srw %r4,%r4,%r5 + slw %r6,%r3,%r6 + srw %r3,%r3,%r5 + or %r4,%r6,%r4 + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#endif + +.Luint128: + lwz %r6,112+12(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_endproc +END(ffi_closure_SYSV) + + +FFI_HIDDEN(ffi_go_closure_sysv) +ENTRY(ffi_go_closure_sysv) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # closure->cif + lwz %r3,4(%r11) + # closure->fun + lwz %r4,8(%r11) + # user_data + mr %r5,%r11 + b .Ldoclosure + .cfi_endproc +END(ffi_go_closure_sysv) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S new file mode 100644 index 0000000..b6d209d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S @@ -0,0 +1,397 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include +#include + + .file "ppc_closure.S" + +#ifndef POWERPC64 + +FFI_HIDDEN(ffi_closure_SYSV) +ENTRY(ffi_closure_SYSV) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + +# we want to build up an areas for the parameters passed +# in registers (both floating point and integer) + + # so first save gpr 3 to gpr 10 (aligned to 4) + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # set up registers for the routine that does the work + + # closure->cif + lwz %r3,FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + lwz %r4,FFI_TRAMPOLINE_SIZE+4(%r11) + # closure->user_data + lwz %r5,FFI_TRAMPOLINE_SIZE+8(%r11) + +.Ldoclosure: + stw %r6, 28(%r1) + stw %r7, 32(%r1) + stw %r8, 36(%r1) + stw %r9, 40(%r1) + stw %r10,44(%r1) + +#ifndef __NO_FPRS__ + # next save fpr 1 to fpr 8 (aligned to 8) + stfd %f1, 48(%r1) + stfd %f2, 56(%r1) + stfd %f3, 64(%r1) + stfd %f4, 72(%r1) + stfd %f5, 80(%r1) + stfd %f6, 88(%r1) + stfd %f7, 96(%r1) + stfd %f8, 104(%r1) +#endif + + # pointer to the result storage + addi %r6,%r1,112 + + # pointer to the saved gpr registers + addi %r7,%r1,16 + + # pointer to the saved fpr registers + addi %r8,%r1,48 + + # pointer to the outgoing parameter save area in the previous frame + # i.e. the previous frame pointer + 8 + addi %r9,%r1,152 + + # make the call + bl ffi_closure_helper_SYSV@local +.Lret: + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + + mflr %r4 # move address of .Lret to r4 + slwi %r3,%r3,4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + lwz %r0,148(%r1) + add %r3,%r3,%r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 +# case FFI_TYPE_VOID +.Lret_type0: + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_INT + lwz %r3,112+0(%r1) + mtlr %r0 +.Lfinish: + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_FLOAT +#ifndef __NO_FPRS__ + lfs %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_DOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_LONGDOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) + lfd %f2,112+8(%r1) + mtlr %r0 + b .Lfinish +#else + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop +#endif + +# case FFI_TYPE_UINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_UINT16 +#ifdef __LITTLE_ENDIAN__ + lhz %r3,112+0(%r1) +#else + lhz %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT16 +#ifdef __LITTLE_ENDIAN__ + lha %r3,112+0(%r1) +#else + lha %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_SINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_POINTER + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT128 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + lwz %r5,112+8(%r1) + b .Luint128 + +# The return types below are only used when the ABI type is FFI_SYSV. +# case FFI_SYSV_TYPE_SMALL_STRUCT + 1. One byte struct. + lbz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 2. Two byte struct. + lhz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 3. Three byte struct. + lwz %r3,112+0(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#else + srwi %r3,%r3,8 + mtlr %r0 + b .Lfinish +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 4. Four byte struct. + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 5. Five byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,24 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 6. Six byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,16 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 7. Seven byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,8 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 8. Eight byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +#ifndef __LITTLE_ENDIAN__ +.Lstruct567: + subfic %r6,%r5,32 + srw %r4,%r4,%r5 + slw %r6,%r3,%r6 + srw %r3,%r3,%r5 + or %r4,%r6,%r4 + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#endif + +.Luint128: + lwz %r6,112+12(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_endproc +END(ffi_closure_SYSV) + + +FFI_HIDDEN(ffi_go_closure_sysv) +ENTRY(ffi_go_closure_sysv) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # closure->cif + lwz %r3,4(%r11) + # closure->fun + lwz %r4,8(%r11) + # user_data + mr %r5,%r11 + b .Ldoclosure + .cfi_endproc +END(ffi_go_closure_sysv) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv 2.S new file mode 100644 index 0000000..df97734 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv 2.S @@ -0,0 +1,173 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998 Geoffrey Keating + Copyright (C) 2007 Free Software Foundation, Inc + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include + +#ifndef POWERPC64 +FFI_HIDDEN(ffi_call_SYSV) +ENTRY(ffi_call_SYSV) + .cfi_startproc + /* Save the old stack pointer as AP. */ + mr %r10,%r1 + .cfi_def_cfa_register 10 + + /* Allocate the stack space we need. */ + stwux %r1,%r1,%r8 + /* Save registers we use. */ + mflr %r9 + stw %r28,-16(%r10) + stw %r29,-12(%r10) + stw %r30, -8(%r10) + stw %r31, -4(%r10) + stw %r9, 4(%r10) + .cfi_offset 65, 4 + .cfi_offset 31, -4 + .cfi_offset 30, -8 + .cfi_offset 29, -12 + .cfi_offset 28, -16 + + /* Save arguments over call... */ + stw %r7, -20(%r10) /* closure, */ + mr %r31,%r6 /* flags, */ + mr %r30,%r5 /* rvalue, */ + mr %r29,%r4 /* function address, */ + mr %r28,%r10 /* our AP. */ + .cfi_def_cfa_register 28 + + /* Call ffi_prep_args_SYSV. */ + mr %r4,%r1 + bl ffi_prep_args_SYSV@local + + /* Now do the call. */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,%r31 + /* Get the address to call into CTR. */ + mtctr %r29 + /* Load all those argument registers. */ + lwz %r3,-24-(8*4)(%r28) + lwz %r4,-24-(7*4)(%r28) + lwz %r5,-24-(6*4)(%r28) + lwz %r6,-24-(5*4)(%r28) + bf- 5,1f + nop + lwz %r7,-24-(4*4)(%r28) + lwz %r8,-24-(3*4)(%r28) + lwz %r9,-24-(2*4)(%r28) + lwz %r10,-24-(1*4)(%r28) + nop +1: + +#ifndef __NO_FPRS__ + /* Load all the FP registers. */ + bf- 6,2f + lfd %f1,-24-(8*4)-(8*8)(%r28) + lfd %f2,-24-(8*4)-(7*8)(%r28) + lfd %f3,-24-(8*4)-(6*8)(%r28) + lfd %f4,-24-(8*4)-(5*8)(%r28) + nop + lfd %f5,-24-(8*4)-(4*8)(%r28) + lfd %f6,-24-(8*4)-(3*8)(%r28) + lfd %f7,-24-(8*4)-(2*8)(%r28) + lfd %f8,-24-(8*4)-(1*8)(%r28) +#endif +2: + + /* Make the call. */ + lwz %r11, -20(%r28) + bctrl + + /* Now, deal with the return value. */ + mtcrf 0x03,%r31 /* cr6-cr7 */ + bt- 31,L(small_struct_return_value) + bt- 30,L(done_return_value) +#ifndef __NO_FPRS__ + bt- 29,L(fp_return_value) +#endif + stw %r3,0(%r30) + bf+ 27,L(done_return_value) + stw %r4,4(%r30) + bf 26,L(done_return_value) + stw %r5,8(%r30) + stw %r6,12(%r30) + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lwz %r9, 4(%r28) + lwz %r31, -4(%r28) + mtlr %r9 + lwz %r30, -8(%r28) + lwz %r29,-12(%r28) + lwz %r28,-16(%r28) + .cfi_remember_state + /* At this point we don't have a cfa register. Say all our + saved regs have been restored. */ + .cfi_same_value 65 + .cfi_same_value 31 + .cfi_same_value 30 + .cfi_same_value 29 + .cfi_same_value 28 + /* Hopefully this works.. */ + .cfi_def_cfa_register 1 + .cfi_offset 1, 0 + lwz %r1,0(%r1) + .cfi_same_value 1 + blr + +#ifndef __NO_FPRS__ +L(fp_return_value): + .cfi_restore_state + bf 27,L(float_return_value) + stfd %f1,0(%r30) + bf 26,L(done_return_value) + stfd %f2,8(%r30) + b L(done_return_value) +L(float_return_value): + stfs %f1,0(%r30) + b L(done_return_value) +#endif + +L(small_struct_return_value): + /* + * The C code always allocates a properly-aligned 8-byte bounce + * buffer to make this assembly code very simple. Just write out + * r3 and r4 to the buffer to allow the C code to handle the rest. + */ + stw %r3, 0(%r30) + stw %r4, 4(%r30) + b L(done_return_value) + .cfi_endproc + +END(ffi_call_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S new file mode 100644 index 0000000..df97734 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S @@ -0,0 +1,173 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998 Geoffrey Keating + Copyright (C) 2007 Free Software Foundation, Inc + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include + +#ifndef POWERPC64 +FFI_HIDDEN(ffi_call_SYSV) +ENTRY(ffi_call_SYSV) + .cfi_startproc + /* Save the old stack pointer as AP. */ + mr %r10,%r1 + .cfi_def_cfa_register 10 + + /* Allocate the stack space we need. */ + stwux %r1,%r1,%r8 + /* Save registers we use. */ + mflr %r9 + stw %r28,-16(%r10) + stw %r29,-12(%r10) + stw %r30, -8(%r10) + stw %r31, -4(%r10) + stw %r9, 4(%r10) + .cfi_offset 65, 4 + .cfi_offset 31, -4 + .cfi_offset 30, -8 + .cfi_offset 29, -12 + .cfi_offset 28, -16 + + /* Save arguments over call... */ + stw %r7, -20(%r10) /* closure, */ + mr %r31,%r6 /* flags, */ + mr %r30,%r5 /* rvalue, */ + mr %r29,%r4 /* function address, */ + mr %r28,%r10 /* our AP. */ + .cfi_def_cfa_register 28 + + /* Call ffi_prep_args_SYSV. */ + mr %r4,%r1 + bl ffi_prep_args_SYSV@local + + /* Now do the call. */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,%r31 + /* Get the address to call into CTR. */ + mtctr %r29 + /* Load all those argument registers. */ + lwz %r3,-24-(8*4)(%r28) + lwz %r4,-24-(7*4)(%r28) + lwz %r5,-24-(6*4)(%r28) + lwz %r6,-24-(5*4)(%r28) + bf- 5,1f + nop + lwz %r7,-24-(4*4)(%r28) + lwz %r8,-24-(3*4)(%r28) + lwz %r9,-24-(2*4)(%r28) + lwz %r10,-24-(1*4)(%r28) + nop +1: + +#ifndef __NO_FPRS__ + /* Load all the FP registers. */ + bf- 6,2f + lfd %f1,-24-(8*4)-(8*8)(%r28) + lfd %f2,-24-(8*4)-(7*8)(%r28) + lfd %f3,-24-(8*4)-(6*8)(%r28) + lfd %f4,-24-(8*4)-(5*8)(%r28) + nop + lfd %f5,-24-(8*4)-(4*8)(%r28) + lfd %f6,-24-(8*4)-(3*8)(%r28) + lfd %f7,-24-(8*4)-(2*8)(%r28) + lfd %f8,-24-(8*4)-(1*8)(%r28) +#endif +2: + + /* Make the call. */ + lwz %r11, -20(%r28) + bctrl + + /* Now, deal with the return value. */ + mtcrf 0x03,%r31 /* cr6-cr7 */ + bt- 31,L(small_struct_return_value) + bt- 30,L(done_return_value) +#ifndef __NO_FPRS__ + bt- 29,L(fp_return_value) +#endif + stw %r3,0(%r30) + bf+ 27,L(done_return_value) + stw %r4,4(%r30) + bf 26,L(done_return_value) + stw %r5,8(%r30) + stw %r6,12(%r30) + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lwz %r9, 4(%r28) + lwz %r31, -4(%r28) + mtlr %r9 + lwz %r30, -8(%r28) + lwz %r29,-12(%r28) + lwz %r28,-16(%r28) + .cfi_remember_state + /* At this point we don't have a cfa register. Say all our + saved regs have been restored. */ + .cfi_same_value 65 + .cfi_same_value 31 + .cfi_same_value 30 + .cfi_same_value 29 + .cfi_same_value 28 + /* Hopefully this works.. */ + .cfi_def_cfa_register 1 + .cfi_offset 1, 0 + lwz %r1,0(%r1) + .cfi_same_value 1 + blr + +#ifndef __NO_FPRS__ +L(fp_return_value): + .cfi_restore_state + bf 27,L(float_return_value) + stfd %f1,0(%r30) + bf 26,L(done_return_value) + stfd %f2,8(%r30) + b L(done_return_value) +L(float_return_value): + stfs %f1,0(%r30) + b L(done_return_value) +#endif + +L(small_struct_return_value): + /* + * The C code always allocates a properly-aligned 8-byte bounce + * buffer to make this assembly code very simple. Just write out + * r3 and r4 to the buffer to allow the C code to handle the rest. + */ + stw %r3, 0(%r30) + stw %r4, 4(%r30) + b L(done_return_value) + .cfi_endproc + +END(ffi_call_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif 2.c new file mode 100644 index 0000000..06c6544 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif 2.c @@ -0,0 +1,263 @@ +/* ----------------------------------------------------------------------- + prep_cif.c - Copyright (c) 2011, 2012 Anthony Green + Copyright (c) 1996, 1998, 2007 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include + +/* Round up to FFI_SIZEOF_ARG. */ + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +/* Perform machine independent initialization of aggregate type + specifications. */ + +static ffi_status initialize_aggregate(ffi_type *arg, size_t *offsets) +{ + ffi_type **ptr; + + if (UNLIKELY(arg == NULL || arg->elements == NULL)) + return FFI_BAD_TYPEDEF; + + arg->size = 0; + arg->alignment = 0; + + ptr = &(arg->elements[0]); + + if (UNLIKELY(ptr == 0)) + return FFI_BAD_TYPEDEF; + + while ((*ptr) != NULL) + { + if (UNLIKELY(((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK))) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type */ + FFI_ASSERT_VALID_TYPE(*ptr); + + arg->size = FFI_ALIGN(arg->size, (*ptr)->alignment); + if (offsets) + *offsets++ = arg->size; + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + /* Structure size includes tail padding. This is important for + structures that fit in one register on ABIs like the PowerPC64 + Linux ABI that right justify small structs in a register. + It's also needed for nested structure layout, for example + struct A { long a; char b; }; struct B { struct A x; char y; }; + should find y at an offset of 2*sizeof(long) and result in a + total size of 3*sizeof(long). */ + arg->size = FFI_ALIGN (arg->size, arg->alignment); + + /* On some targets, the ABI defines that structures have an additional + alignment beyond the "natural" one based on their elements. */ +#ifdef FFI_AGGREGATE_ALIGNMENT + if (FFI_AGGREGATE_ALIGNMENT > arg->alignment) + arg->alignment = FFI_AGGREGATE_ALIGNMENT; +#endif + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +#ifndef __CRIS__ +/* The CRIS ABI specifies structure elements to have byte + alignment only, so it completely overrides this functions, + which assumes "natural" alignment and padding. */ + +/* Perform machine independent ffi_cif preparation, then call + machine dependent routine. */ + +/* For non variadic functions isvariadic should be 0 and + nfixedargs==ntotalargs. + + For variadic calls, isvariadic should be 1 and nfixedargs + and ntotalargs set as appropriate. nfixedargs must always be >=1 */ + + +ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, ffi_type **atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT(cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; +#ifdef _M_ARM64 + cif->is_variadic = isvariadic; +#endif +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + /* Initialize the return type if necessary */ + if ((cif->rtype->size == 0) + && (initialize_aggregate(cif->rtype, NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if (rtype->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the return type */ + FFI_ASSERT_VALID_TYPE(cif->rtype); + + /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */ +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT +#ifdef TILE + && (cif->rtype->size > 10 * FFI_SIZEOF_ARG) +#endif +#ifdef XTENSA + && (cif->rtype->size > 16) +#endif +#ifdef NIOS2 + && (cif->rtype->size > 8) +#endif + ) + bytes = STACK_ARG_SIZE(sizeof(void*)); +#endif + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + + /* Initialize any uninitialized aggregate type definitions */ + if (((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if ((*ptr)->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + { + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = (unsigned)FFI_ALIGN(bytes, (*ptr)->alignment); + +#ifdef TILE + if (bytes < 10 * FFI_SIZEOF_ARG && + bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG) + { + /* An argument is never split between the 10 parameter + registers and the stack. */ + bytes = 10 * FFI_SIZEOF_ARG; + } +#endif +#ifdef XTENSA + if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4) + bytes = 6*4; +#endif + + bytes += (unsigned int)STACK_ARG_SIZE((*ptr)->size); + } +#endif + } + + cif->bytes = bytes; + + /* Perform machine dependent cif processing */ +#ifdef FFI_TARGET_SPECIFIC_VARIADIC + if (isvariadic) + return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs); +#endif + + return ffi_prep_cif_machdep(cif); +} +#endif /* not __CRIS__ */ + +ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs, + ffi_type *rtype, ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes); +} + +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes); +} + +#if FFI_CLOSURES + +ffi_status +ffi_prep_closure (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +{ + return ffi_prep_closure_loc (closure, cif, fun, user_data, closure); +} + +#endif + +ffi_status +ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +{ + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + if (struct_type->type != FFI_TYPE_STRUCT) + return FFI_BAD_TYPEDEF; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + return initialize_aggregate(struct_type, offsets); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c new file mode 100644 index 0000000..06c6544 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c @@ -0,0 +1,263 @@ +/* ----------------------------------------------------------------------- + prep_cif.c - Copyright (c) 2011, 2012 Anthony Green + Copyright (c) 1996, 1998, 2007 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include + +/* Round up to FFI_SIZEOF_ARG. */ + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +/* Perform machine independent initialization of aggregate type + specifications. */ + +static ffi_status initialize_aggregate(ffi_type *arg, size_t *offsets) +{ + ffi_type **ptr; + + if (UNLIKELY(arg == NULL || arg->elements == NULL)) + return FFI_BAD_TYPEDEF; + + arg->size = 0; + arg->alignment = 0; + + ptr = &(arg->elements[0]); + + if (UNLIKELY(ptr == 0)) + return FFI_BAD_TYPEDEF; + + while ((*ptr) != NULL) + { + if (UNLIKELY(((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK))) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type */ + FFI_ASSERT_VALID_TYPE(*ptr); + + arg->size = FFI_ALIGN(arg->size, (*ptr)->alignment); + if (offsets) + *offsets++ = arg->size; + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + /* Structure size includes tail padding. This is important for + structures that fit in one register on ABIs like the PowerPC64 + Linux ABI that right justify small structs in a register. + It's also needed for nested structure layout, for example + struct A { long a; char b; }; struct B { struct A x; char y; }; + should find y at an offset of 2*sizeof(long) and result in a + total size of 3*sizeof(long). */ + arg->size = FFI_ALIGN (arg->size, arg->alignment); + + /* On some targets, the ABI defines that structures have an additional + alignment beyond the "natural" one based on their elements. */ +#ifdef FFI_AGGREGATE_ALIGNMENT + if (FFI_AGGREGATE_ALIGNMENT > arg->alignment) + arg->alignment = FFI_AGGREGATE_ALIGNMENT; +#endif + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +#ifndef __CRIS__ +/* The CRIS ABI specifies structure elements to have byte + alignment only, so it completely overrides this functions, + which assumes "natural" alignment and padding. */ + +/* Perform machine independent ffi_cif preparation, then call + machine dependent routine. */ + +/* For non variadic functions isvariadic should be 0 and + nfixedargs==ntotalargs. + + For variadic calls, isvariadic should be 1 and nfixedargs + and ntotalargs set as appropriate. nfixedargs must always be >=1 */ + + +ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, ffi_type **atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT(cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; +#ifdef _M_ARM64 + cif->is_variadic = isvariadic; +#endif +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + /* Initialize the return type if necessary */ + if ((cif->rtype->size == 0) + && (initialize_aggregate(cif->rtype, NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if (rtype->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the return type */ + FFI_ASSERT_VALID_TYPE(cif->rtype); + + /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */ +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT +#ifdef TILE + && (cif->rtype->size > 10 * FFI_SIZEOF_ARG) +#endif +#ifdef XTENSA + && (cif->rtype->size > 16) +#endif +#ifdef NIOS2 + && (cif->rtype->size > 8) +#endif + ) + bytes = STACK_ARG_SIZE(sizeof(void*)); +#endif + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + + /* Initialize any uninitialized aggregate type definitions */ + if (((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if ((*ptr)->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + { + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = (unsigned)FFI_ALIGN(bytes, (*ptr)->alignment); + +#ifdef TILE + if (bytes < 10 * FFI_SIZEOF_ARG && + bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG) + { + /* An argument is never split between the 10 parameter + registers and the stack. */ + bytes = 10 * FFI_SIZEOF_ARG; + } +#endif +#ifdef XTENSA + if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4) + bytes = 6*4; +#endif + + bytes += (unsigned int)STACK_ARG_SIZE((*ptr)->size); + } +#endif + } + + cif->bytes = bytes; + + /* Perform machine dependent cif processing */ +#ifdef FFI_TARGET_SPECIFIC_VARIADIC + if (isvariadic) + return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs); +#endif + + return ffi_prep_cif_machdep(cif); +} +#endif /* not __CRIS__ */ + +ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs, + ffi_type *rtype, ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes); +} + +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes); +} + +#if FFI_CLOSURES + +ffi_status +ffi_prep_closure (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +{ + return ffi_prep_closure_loc (closure, cif, fun, user_data, closure); +} + +#endif + +ffi_status +ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +{ + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + if (struct_type->type != FFI_TYPE_STRUCT) + return FFI_BAD_TYPEDEF; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + return initialize_aggregate(struct_type, offsets); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api 2.c new file mode 100644 index 0000000..be15611 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api 2.c @@ -0,0 +1,267 @@ +/* ----------------------------------------------------------------------- + raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc. + + Author: Kresten Krab Thorup + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This file defines generic functions for use with the raw api. */ + +#include +#include + +#if !FFI_NO_RAW_API + +size_t +ffi_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { +#if !FFI_NO_STRUCTS + if ((*at)->type == FFI_TYPE_STRUCT) + result += FFI_ALIGN (sizeof (void*), FFI_SIZEOF_ARG); + else +#endif + result += FFI_ALIGN ((*at)->size, FFI_SIZEOF_ARG); + } + + return result; +} + + +void +ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + *args = (raw++)->ptr; + break; +#endif + + case FFI_TYPE_COMPLEX: + *args = (raw++)->ptr; + break; + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + default: + *args = raw; + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if !FFI_NO_STRUCTS + if ((*tp)->type == FFI_TYPE_STRUCT) + { + *args = (raw++)->ptr; + } + else +#endif + if ((*tp)->type == FFI_TYPE_COMPLEX) + { + *args = (raw++)->ptr; + } + else + { + *args = (void*) raw; + raw += FFI_ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*); + } + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + (raw++)->uint = *(UINT8*) (*args); + break; + + case FFI_TYPE_SINT8: + (raw++)->sint = *(SINT8*) (*args); + break; + + case FFI_TYPE_UINT16: + (raw++)->uint = *(UINT16*) (*args); + break; + + case FFI_TYPE_SINT16: + (raw++)->sint = *(SINT16*) (*args); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + (raw++)->uint = *(UINT32*) (*args); + break; + + case FFI_TYPE_SINT32: + (raw++)->sint = *(SINT32*) (*args); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + (raw++)->ptr = *args; + break; +#endif + + case FFI_TYPE_COMPLEX: + (raw++)->ptr = *args; + break; + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } +} + +#if !FFI_NATIVE_RAW_API + + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, raw, cl->user_data); +} + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ + +#if FFI_CLOSURES + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_raw_closure (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data) +{ + return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ + +#endif /* !FFI_NO_RAW_API */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c new file mode 100644 index 0000000..be15611 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c @@ -0,0 +1,267 @@ +/* ----------------------------------------------------------------------- + raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc. + + Author: Kresten Krab Thorup + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This file defines generic functions for use with the raw api. */ + +#include +#include + +#if !FFI_NO_RAW_API + +size_t +ffi_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { +#if !FFI_NO_STRUCTS + if ((*at)->type == FFI_TYPE_STRUCT) + result += FFI_ALIGN (sizeof (void*), FFI_SIZEOF_ARG); + else +#endif + result += FFI_ALIGN ((*at)->size, FFI_SIZEOF_ARG); + } + + return result; +} + + +void +ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + *args = (raw++)->ptr; + break; +#endif + + case FFI_TYPE_COMPLEX: + *args = (raw++)->ptr; + break; + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + default: + *args = raw; + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if !FFI_NO_STRUCTS + if ((*tp)->type == FFI_TYPE_STRUCT) + { + *args = (raw++)->ptr; + } + else +#endif + if ((*tp)->type == FFI_TYPE_COMPLEX) + { + *args = (raw++)->ptr; + } + else + { + *args = (void*) raw; + raw += FFI_ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*); + } + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + (raw++)->uint = *(UINT8*) (*args); + break; + + case FFI_TYPE_SINT8: + (raw++)->sint = *(SINT8*) (*args); + break; + + case FFI_TYPE_UINT16: + (raw++)->uint = *(UINT16*) (*args); + break; + + case FFI_TYPE_SINT16: + (raw++)->sint = *(SINT16*) (*args); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + (raw++)->uint = *(UINT32*) (*args); + break; + + case FFI_TYPE_SINT32: + (raw++)->sint = *(SINT32*) (*args); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + (raw++)->ptr = *args; + break; +#endif + + case FFI_TYPE_COMPLEX: + (raw++)->ptr = *args; + break; + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } +} + +#if !FFI_NATIVE_RAW_API + + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, raw, cl->user_data); +} + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ + +#if FFI_CLOSURES + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_raw_closure (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data) +{ + return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ + +#endif /* !FFI_NO_RAW_API */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi 2.c new file mode 100644 index 0000000..c910858 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi 2.c @@ -0,0 +1,481 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + Based on MIPS N32/64 port + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#if __riscv_float_abi_double +#define ABI_FLEN 64 +#define ABI_FLOAT double +#elif __riscv_float_abi_single +#define ABI_FLEN 32 +#define ABI_FLOAT float +#endif + +#define NARGREG 8 +#define STKALIGN 16 +#define MAXCOPYARG (2 * sizeof(double)) + +typedef struct call_context +{ +#if ABI_FLEN + ABI_FLOAT fa[8]; +#endif + size_t a[8]; + /* used by the assembly code to in-place construct its own stack frame */ + char frame[16]; +} call_context; + +typedef struct call_builder +{ + call_context *aregs; + int used_integer; + int used_float; + size_t *used_stack; +} call_builder; + +/* integer (not pointer) less than ABI XLEN */ +/* FFI_TYPE_INT does not appear to be used */ +#if __SIZEOF_POINTER__ == 8 +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) +#else +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) +#endif + +#if ABI_FLEN +typedef struct { + char as_elements, type1, offset2, type2; +} float_struct_info; + +#if ABI_FLEN >= 64 +#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) +#else +#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) +#endif + +static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) { + int i; + if (out == out_end) return out; + if (in->type != FFI_TYPE_STRUCT) { + *(out++) = in; + } else { + for (i = 0; in->elements[i]; i++) + out = flatten_struct(in->elements[i], out, out_end); + } + return out; +} + +/* Structs with at most two fields after flattening, one of which is of + floating point type, are passed in multiple registers if sufficient + registers are available. */ +static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) { + float_struct_info ret = {0, 0, 0, 0}; + ffi_type *fields[3]; + int num_floats, num_ints; + int num_fields = flatten_struct(top, fields, fields + 3) - fields; + + if (num_fields == 1) { + if (IS_FLOAT(fields[0]->type)) { + ret.as_elements = 1; + ret.type1 = fields[0]->type; + } + } else if (num_fields == 2) { + num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type); + num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type); + if (num_floats == 0 || num_floats + num_ints != 2) + return ret; + if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG) + return ret; + if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type)) + return ret; + + ret.type1 = fields[0]->type; + ret.type2 = fields[1]->type; + ret.offset2 = FFI_ALIGN(fields[0]->size, fields[1]->alignment); + ret.as_elements = 1; + } + + return ret; +} +#endif + +/* allocates a single register, float register, or XLEN-sized stack slot to a datum */ +static void marshal_atom(call_builder *cb, int type, void *data) { + size_t value = 0; + switch (type) { + case FFI_TYPE_UINT8: value = *(uint8_t *)data; break; + case FFI_TYPE_SINT8: value = *(int8_t *)data; break; + case FFI_TYPE_UINT16: value = *(uint16_t *)data; break; + case FFI_TYPE_SINT16: value = *(int16_t *)data; break; + /* 32-bit quantities are always sign-extended in the ABI */ + case FFI_TYPE_UINT32: value = *(int32_t *)data; break; + case FFI_TYPE_SINT32: value = *(int32_t *)data; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: value = *(uint64_t *)data; break; + case FFI_TYPE_SINT64: value = *(int64_t *)data; break; +#endif + case FFI_TYPE_POINTER: value = *(size_t *)data; break; + + /* float values may be recoded in an implementation-defined way + by hardware conforming to 2.1 or earlier, so use asm to + reinterpret floats as doubles */ +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data)); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data)); + return; +#endif + default: FFI_ASSERT(0); break; + } + + if (cb->used_integer == NARGREG) { + *cb->used_stack++ = value; + } else { + cb->aregs->a[cb->used_integer++] = value; + } +} + +static void unmarshal_atom(call_builder *cb, int type, void *data) { + size_t value; + switch (type) { +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif + } + + if (cb->used_integer == NARGREG) { + value = *cb->used_stack++; + } else { + value = cb->aregs->a[cb->used_integer++]; + } + + switch (type) { + case FFI_TYPE_UINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_SINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_UINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_SINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_UINT32: *(uint32_t *)data = value; break; + case FFI_TYPE_SINT32: *(uint32_t *)data = value; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: *(uint64_t *)data = value; break; + case FFI_TYPE_SINT64: *(uint64_t *)data = value; break; +#endif + case FFI_TYPE_POINTER: *(size_t *)data = value; break; + default: FFI_ASSERT(0); break; + } +} + +/* adds an argument to a call, or a not by reference return value */ +static void marshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + marshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + marshal_atom(cb, type->type, data); + return; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + marshal_atom(cb, FFI_TYPE_POINTER, &data); + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + marshal_atom(cb, type->type, data); + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + memcpy(realign, data, type->size); + if (type->size > 0) + marshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + marshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + } +} + +/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */ +static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + void *pointer; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + unmarshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return data; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + unmarshal_atom(cb, type->type, data); + return data; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer); + return pointer; + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + unmarshal_atom(cb, type->type, data); + return data; + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + if (type->size > 0) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + memcpy(data, realign, type->size); + return data; + } +} + +static int passed_by_ref(call_builder *cb, ffi_type *type, int var) { +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) return 0; + } +#endif + + return type->size > 2 * __SIZEOF_POINTER__; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { + cif->riscv_nfixedargs = cif->nargs; + return FFI_OK; +} + +/* Perform machine dependent cif processing when we have a variadic function */ + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) { + cif->riscv_nfixedargs = nfixedargs; + return FFI_OK; +} + +/* Low level routine for calling functions */ +extern void ffi_call_asm (void *stack, struct call_context *regs, + void (*fn) (void), void *closure) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + /* this is a conservative estimate, assuming a complex return value and + that all remaining arguments are long long / __int128 */ + size_t arg_bytes = cif->nargs <= 3 ? 0 : + FFI_ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN); + size_t rval_bytes = 0; + if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__) + rval_bytes = FFI_ALIGN(cif->rtype->size, STKALIGN); + size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context); + + /* the assembly code will deallocate all stack data at lower addresses + than the argument region, so we need to allocate the frame and the + return value after the arguments in a single allocation */ + size_t alloc_base; + /* Argument region must be 16-byte aligned */ + if (_Alignof(max_align_t) >= STKALIGN) { + /* since sizeof long double is normally 16, the compiler will + guarantee alloca alignment to at least that much */ + alloc_base = (size_t)alloca(alloc_size); + } else { + alloc_base = FFI_ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN); + } + + if (rval_bytes) + rvalue = (void*)(alloc_base + arg_bytes); + + call_builder cb; + cb.used_float = cb.used_integer = 0; + cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes); + cb.used_stack = (void*)alloc_base; + + int return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + marshal(&cb, &ffi_type_pointer, 0, &rvalue); + + int i; + for (i = 0; i < cif->nargs; i++) + marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]); + + ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); + + cb.used_float = cb.used_integer = 0; + if (!return_by_ref && rvalue) + unmarshal(&cb, cif->rtype, 0, rvalue); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +extern void ffi_closure_asm(void) FFI_HIDDEN; + +ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) &closure->tramp[0]; + uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; + + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* we will call ffi_closure_inner with codeloc, not closure, but as long + as the memory is readable it should work */ + + tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */ +#if __SIZEOF_POINTER__ == 8 + tramp[1] = 0x01033383; /* ld t2, 16(t1) */ +#else + tramp[1] = 0x01032383; /* lw t2, 16(t1) */ +#endif + tramp[2] = 0x00038067; /* jr t2 */ + tramp[3] = 0x00000013; /* nop */ + tramp[4] = fn; + tramp[5] = fn >> 32; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE); + + return FFI_OK; +} + +extern void ffi_go_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + closure->tramp = (void *) ffi_go_closure_asm; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Called by the assembly code with aregs pointing to saved argument registers + and stack pointing to the stacked arguments. Return values passed in + registers will be reloaded from aregs. */ +void FFI_HIDDEN +ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stack, call_context *aregs) +{ + void **avalue = alloca(cif->nargs * sizeof(void*)); + /* storage for arguments which will be copied by unmarshal(). We could + theoretically avoid the copies in many cases and use at most 128 bytes + of memory, but allocating disjoint storage for each argument is + simpler. */ + char *astorage = alloca(cif->nargs * MAXCOPYARG); + void *rvalue; + call_builder cb; + int return_by_ref; + int i; + + cb.aregs = aregs; + cb.used_integer = cb.used_float = 0; + cb.used_stack = stack; + + return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + unmarshal(&cb, &ffi_type_pointer, 0, &rvalue); + else + rvalue = alloca(cif->rtype->size); + + for (i = 0; i < cif->nargs; i++) + avalue[i] = unmarshal(&cb, cif->arg_types[i], + i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG); + + fun (cif, rvalue, avalue, user_data); + + if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) { + cb.used_integer = cb.used_float = 0; + marshal(&cb, cif->rtype, 0, rvalue); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c new file mode 100644 index 0000000..c910858 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c @@ -0,0 +1,481 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + Based on MIPS N32/64 port + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#if __riscv_float_abi_double +#define ABI_FLEN 64 +#define ABI_FLOAT double +#elif __riscv_float_abi_single +#define ABI_FLEN 32 +#define ABI_FLOAT float +#endif + +#define NARGREG 8 +#define STKALIGN 16 +#define MAXCOPYARG (2 * sizeof(double)) + +typedef struct call_context +{ +#if ABI_FLEN + ABI_FLOAT fa[8]; +#endif + size_t a[8]; + /* used by the assembly code to in-place construct its own stack frame */ + char frame[16]; +} call_context; + +typedef struct call_builder +{ + call_context *aregs; + int used_integer; + int used_float; + size_t *used_stack; +} call_builder; + +/* integer (not pointer) less than ABI XLEN */ +/* FFI_TYPE_INT does not appear to be used */ +#if __SIZEOF_POINTER__ == 8 +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) +#else +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) +#endif + +#if ABI_FLEN +typedef struct { + char as_elements, type1, offset2, type2; +} float_struct_info; + +#if ABI_FLEN >= 64 +#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) +#else +#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) +#endif + +static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) { + int i; + if (out == out_end) return out; + if (in->type != FFI_TYPE_STRUCT) { + *(out++) = in; + } else { + for (i = 0; in->elements[i]; i++) + out = flatten_struct(in->elements[i], out, out_end); + } + return out; +} + +/* Structs with at most two fields after flattening, one of which is of + floating point type, are passed in multiple registers if sufficient + registers are available. */ +static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) { + float_struct_info ret = {0, 0, 0, 0}; + ffi_type *fields[3]; + int num_floats, num_ints; + int num_fields = flatten_struct(top, fields, fields + 3) - fields; + + if (num_fields == 1) { + if (IS_FLOAT(fields[0]->type)) { + ret.as_elements = 1; + ret.type1 = fields[0]->type; + } + } else if (num_fields == 2) { + num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type); + num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type); + if (num_floats == 0 || num_floats + num_ints != 2) + return ret; + if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG) + return ret; + if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type)) + return ret; + + ret.type1 = fields[0]->type; + ret.type2 = fields[1]->type; + ret.offset2 = FFI_ALIGN(fields[0]->size, fields[1]->alignment); + ret.as_elements = 1; + } + + return ret; +} +#endif + +/* allocates a single register, float register, or XLEN-sized stack slot to a datum */ +static void marshal_atom(call_builder *cb, int type, void *data) { + size_t value = 0; + switch (type) { + case FFI_TYPE_UINT8: value = *(uint8_t *)data; break; + case FFI_TYPE_SINT8: value = *(int8_t *)data; break; + case FFI_TYPE_UINT16: value = *(uint16_t *)data; break; + case FFI_TYPE_SINT16: value = *(int16_t *)data; break; + /* 32-bit quantities are always sign-extended in the ABI */ + case FFI_TYPE_UINT32: value = *(int32_t *)data; break; + case FFI_TYPE_SINT32: value = *(int32_t *)data; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: value = *(uint64_t *)data; break; + case FFI_TYPE_SINT64: value = *(int64_t *)data; break; +#endif + case FFI_TYPE_POINTER: value = *(size_t *)data; break; + + /* float values may be recoded in an implementation-defined way + by hardware conforming to 2.1 or earlier, so use asm to + reinterpret floats as doubles */ +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data)); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data)); + return; +#endif + default: FFI_ASSERT(0); break; + } + + if (cb->used_integer == NARGREG) { + *cb->used_stack++ = value; + } else { + cb->aregs->a[cb->used_integer++] = value; + } +} + +static void unmarshal_atom(call_builder *cb, int type, void *data) { + size_t value; + switch (type) { +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif + } + + if (cb->used_integer == NARGREG) { + value = *cb->used_stack++; + } else { + value = cb->aregs->a[cb->used_integer++]; + } + + switch (type) { + case FFI_TYPE_UINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_SINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_UINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_SINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_UINT32: *(uint32_t *)data = value; break; + case FFI_TYPE_SINT32: *(uint32_t *)data = value; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: *(uint64_t *)data = value; break; + case FFI_TYPE_SINT64: *(uint64_t *)data = value; break; +#endif + case FFI_TYPE_POINTER: *(size_t *)data = value; break; + default: FFI_ASSERT(0); break; + } +} + +/* adds an argument to a call, or a not by reference return value */ +static void marshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + marshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + marshal_atom(cb, type->type, data); + return; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + marshal_atom(cb, FFI_TYPE_POINTER, &data); + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + marshal_atom(cb, type->type, data); + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + memcpy(realign, data, type->size); + if (type->size > 0) + marshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + marshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + } +} + +/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */ +static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + void *pointer; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + unmarshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return data; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + unmarshal_atom(cb, type->type, data); + return data; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer); + return pointer; + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + unmarshal_atom(cb, type->type, data); + return data; + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + if (type->size > 0) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + memcpy(data, realign, type->size); + return data; + } +} + +static int passed_by_ref(call_builder *cb, ffi_type *type, int var) { +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) return 0; + } +#endif + + return type->size > 2 * __SIZEOF_POINTER__; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { + cif->riscv_nfixedargs = cif->nargs; + return FFI_OK; +} + +/* Perform machine dependent cif processing when we have a variadic function */ + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) { + cif->riscv_nfixedargs = nfixedargs; + return FFI_OK; +} + +/* Low level routine for calling functions */ +extern void ffi_call_asm (void *stack, struct call_context *regs, + void (*fn) (void), void *closure) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + /* this is a conservative estimate, assuming a complex return value and + that all remaining arguments are long long / __int128 */ + size_t arg_bytes = cif->nargs <= 3 ? 0 : + FFI_ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN); + size_t rval_bytes = 0; + if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__) + rval_bytes = FFI_ALIGN(cif->rtype->size, STKALIGN); + size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context); + + /* the assembly code will deallocate all stack data at lower addresses + than the argument region, so we need to allocate the frame and the + return value after the arguments in a single allocation */ + size_t alloc_base; + /* Argument region must be 16-byte aligned */ + if (_Alignof(max_align_t) >= STKALIGN) { + /* since sizeof long double is normally 16, the compiler will + guarantee alloca alignment to at least that much */ + alloc_base = (size_t)alloca(alloc_size); + } else { + alloc_base = FFI_ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN); + } + + if (rval_bytes) + rvalue = (void*)(alloc_base + arg_bytes); + + call_builder cb; + cb.used_float = cb.used_integer = 0; + cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes); + cb.used_stack = (void*)alloc_base; + + int return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + marshal(&cb, &ffi_type_pointer, 0, &rvalue); + + int i; + for (i = 0; i < cif->nargs; i++) + marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]); + + ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); + + cb.used_float = cb.used_integer = 0; + if (!return_by_ref && rvalue) + unmarshal(&cb, cif->rtype, 0, rvalue); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +extern void ffi_closure_asm(void) FFI_HIDDEN; + +ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) &closure->tramp[0]; + uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; + + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* we will call ffi_closure_inner with codeloc, not closure, but as long + as the memory is readable it should work */ + + tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */ +#if __SIZEOF_POINTER__ == 8 + tramp[1] = 0x01033383; /* ld t2, 16(t1) */ +#else + tramp[1] = 0x01032383; /* lw t2, 16(t1) */ +#endif + tramp[2] = 0x00038067; /* jr t2 */ + tramp[3] = 0x00000013; /* nop */ + tramp[4] = fn; + tramp[5] = fn >> 32; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE); + + return FFI_OK; +} + +extern void ffi_go_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + closure->tramp = (void *) ffi_go_closure_asm; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Called by the assembly code with aregs pointing to saved argument registers + and stack pointing to the stacked arguments. Return values passed in + registers will be reloaded from aregs. */ +void FFI_HIDDEN +ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stack, call_context *aregs) +{ + void **avalue = alloca(cif->nargs * sizeof(void*)); + /* storage for arguments which will be copied by unmarshal(). We could + theoretically avoid the copies in many cases and use at most 128 bytes + of memory, but allocating disjoint storage for each argument is + simpler. */ + char *astorage = alloca(cif->nargs * MAXCOPYARG); + void *rvalue; + call_builder cb; + int return_by_ref; + int i; + + cb.aregs = aregs; + cb.used_integer = cb.used_float = 0; + cb.used_stack = stack; + + return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + unmarshal(&cb, &ffi_type_pointer, 0, &rvalue); + else + rvalue = alloca(cif->rtype->size); + + for (i = 0; i < cif->nargs; i++) + avalue[i] = unmarshal(&cb, cif->arg_types[i], + i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG); + + fun (cif, rvalue, avalue, user_data); + + if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) { + cb.used_integer = cb.used_float = 0; + marshal(&cb, cif->rtype, 0, rvalue); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget 2.h new file mode 100644 index 0000000..75e6462 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget 2.h @@ -0,0 +1,69 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - 2014 Michael Knyszek + + Target configuration macros for RISC-V. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef __riscv +#error "libffi was configured for a RISC-V target but this does not appear to be a RISC-V compiler." +#endif + +#ifndef LIBFFI_ASM + +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +/* FFI_UNUSED_NN and riscv_unused are to maintain ABI compatibility with a + distributed Berkeley patch from 2014, and can be removed at SONAME bump */ +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_UNUSED_1, + FFI_UNUSED_2, + FFI_UNUSED_3, + FFI_LAST_ABI, + + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#endif /* LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 +#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused; +#define FFI_TARGET_SPECIFIC_VARIADIC + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h new file mode 100644 index 0000000..75e6462 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h @@ -0,0 +1,69 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - 2014 Michael Knyszek + + Target configuration macros for RISC-V. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef __riscv +#error "libffi was configured for a RISC-V target but this does not appear to be a RISC-V compiler." +#endif + +#ifndef LIBFFI_ASM + +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +/* FFI_UNUSED_NN and riscv_unused are to maintain ABI compatibility with a + distributed Berkeley patch from 2014, and can be removed at SONAME bump */ +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_UNUSED_1, + FFI_UNUSED_2, + FFI_UNUSED_3, + FFI_LAST_ABI, + + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#endif /* LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 +#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused; +#define FFI_TARGET_SPECIFIC_VARIADIC + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv 2.S new file mode 100644 index 0000000..522d0b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv 2.S @@ -0,0 +1,293 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Define aliases so that we can handle all ABIs uniformly */ + +#if __SIZEOF_POINTER__ == 8 +#define PTRS 8 +#define LARG ld +#define SARG sd +#else +#define PTRS 4 +#define LARG lw +#define SARG sw +#endif + +#if __riscv_float_abi_double +#define FLTS 8 +#define FLARG fld +#define FSARG fsd +#elif __riscv_float_abi_single +#define FLTS 4 +#define FLARG flw +#define FSARG fsw +#else +#define FLTS 0 +#endif + +#define fp s0 + + .text + .globl ffi_call_asm + .type ffi_call_asm, @function + .hidden ffi_call_asm +/* + struct call_context { + floatreg fa[8]; + intreg a[8]; + intreg pad[rv32 ? 2 : 0]; + intreg save_fp, save_ra; + } + void ffi_call_asm (size_t *stackargs, struct call_context *regargs, + void (*fn) (void), void *closure); +*/ + +#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16) + +ffi_call_asm: + .cfi_startproc + + /* + We are NOT going to set up an ordinary stack frame. In order to pass + the stacked args to the called function, we adjust our stack pointer to + a0, which is in the _caller's_ alloca area. We establish our own stack + frame at the end of the call_context. + + Anything below the arguments will be freed at this point, although we + preserve the call_context so that it can be read back in the caller. + */ + + .cfi_def_cfa 11, FRAME_LEN # interim CFA based on a1 + SARG fp, FRAME_LEN - 2*PTRS(a1) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(a1) + .cfi_offset 1, -1*PTRS + + addi fp, a1, FRAME_LEN + mv sp, a0 + .cfi_def_cfa 8, 0 # our frame is fully set up + + # Load arguments + mv t1, a2 + mv t2, a3 + +#if FLTS + FLARG fa0, -FRAME_LEN+0*FLTS(fp) + FLARG fa1, -FRAME_LEN+1*FLTS(fp) + FLARG fa2, -FRAME_LEN+2*FLTS(fp) + FLARG fa3, -FRAME_LEN+3*FLTS(fp) + FLARG fa4, -FRAME_LEN+4*FLTS(fp) + FLARG fa5, -FRAME_LEN+5*FLTS(fp) + FLARG fa6, -FRAME_LEN+6*FLTS(fp) + FLARG fa7, -FRAME_LEN+7*FLTS(fp) +#endif + + LARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + LARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + LARG a2, -FRAME_LEN+8*FLTS+2*PTRS(fp) + LARG a3, -FRAME_LEN+8*FLTS+3*PTRS(fp) + LARG a4, -FRAME_LEN+8*FLTS+4*PTRS(fp) + LARG a5, -FRAME_LEN+8*FLTS+5*PTRS(fp) + LARG a6, -FRAME_LEN+8*FLTS+6*PTRS(fp) + LARG a7, -FRAME_LEN+8*FLTS+7*PTRS(fp) + + /* Call */ + jalr t1 + + /* Save return values - only a0/a1 (fa0/fa1) are used */ +#if FLTS + FSARG fa0, -FRAME_LEN+0*FLTS(fp) + FSARG fa1, -FRAME_LEN+1*FLTS(fp) +#endif + + SARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + SARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + + /* Restore and return */ + addi sp, fp, -FRAME_LEN + .cfi_def_cfa 2, FRAME_LEN + LARG ra, -1*PTRS(fp) + .cfi_restore 1 + LARG fp, -2*PTRS(fp) + .cfi_restore 8 + ret + .cfi_endproc + .size ffi_call_asm, .-ffi_call_asm + + +/* + ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_closure_asm + .hidden ffi_closure_asm + .type ffi_closure_asm, @function +ffi_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, FFI_TRAMPOLINE_SIZE+0*PTRS(t1) + LARG a1, FFI_TRAMPOLINE_SIZE+1*PTRS(t1) + LARG a2, FFI_TRAMPOLINE_SIZE+2*PTRS(t1) + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_closure_asm, .-ffi_closure_asm + +/* + ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_go_closure_asm + .hidden ffi_go_closure_asm + .type ffi_go_closure_asm, @function +ffi_go_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, 1*PTRS(t2) + LARG a1, 2*PTRS(t2) + mv a2, t2 + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_go_closure_asm, .-ffi_go_closure_asm diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S new file mode 100644 index 0000000..522d0b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S @@ -0,0 +1,293 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Define aliases so that we can handle all ABIs uniformly */ + +#if __SIZEOF_POINTER__ == 8 +#define PTRS 8 +#define LARG ld +#define SARG sd +#else +#define PTRS 4 +#define LARG lw +#define SARG sw +#endif + +#if __riscv_float_abi_double +#define FLTS 8 +#define FLARG fld +#define FSARG fsd +#elif __riscv_float_abi_single +#define FLTS 4 +#define FLARG flw +#define FSARG fsw +#else +#define FLTS 0 +#endif + +#define fp s0 + + .text + .globl ffi_call_asm + .type ffi_call_asm, @function + .hidden ffi_call_asm +/* + struct call_context { + floatreg fa[8]; + intreg a[8]; + intreg pad[rv32 ? 2 : 0]; + intreg save_fp, save_ra; + } + void ffi_call_asm (size_t *stackargs, struct call_context *regargs, + void (*fn) (void), void *closure); +*/ + +#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16) + +ffi_call_asm: + .cfi_startproc + + /* + We are NOT going to set up an ordinary stack frame. In order to pass + the stacked args to the called function, we adjust our stack pointer to + a0, which is in the _caller's_ alloca area. We establish our own stack + frame at the end of the call_context. + + Anything below the arguments will be freed at this point, although we + preserve the call_context so that it can be read back in the caller. + */ + + .cfi_def_cfa 11, FRAME_LEN # interim CFA based on a1 + SARG fp, FRAME_LEN - 2*PTRS(a1) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(a1) + .cfi_offset 1, -1*PTRS + + addi fp, a1, FRAME_LEN + mv sp, a0 + .cfi_def_cfa 8, 0 # our frame is fully set up + + # Load arguments + mv t1, a2 + mv t2, a3 + +#if FLTS + FLARG fa0, -FRAME_LEN+0*FLTS(fp) + FLARG fa1, -FRAME_LEN+1*FLTS(fp) + FLARG fa2, -FRAME_LEN+2*FLTS(fp) + FLARG fa3, -FRAME_LEN+3*FLTS(fp) + FLARG fa4, -FRAME_LEN+4*FLTS(fp) + FLARG fa5, -FRAME_LEN+5*FLTS(fp) + FLARG fa6, -FRAME_LEN+6*FLTS(fp) + FLARG fa7, -FRAME_LEN+7*FLTS(fp) +#endif + + LARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + LARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + LARG a2, -FRAME_LEN+8*FLTS+2*PTRS(fp) + LARG a3, -FRAME_LEN+8*FLTS+3*PTRS(fp) + LARG a4, -FRAME_LEN+8*FLTS+4*PTRS(fp) + LARG a5, -FRAME_LEN+8*FLTS+5*PTRS(fp) + LARG a6, -FRAME_LEN+8*FLTS+6*PTRS(fp) + LARG a7, -FRAME_LEN+8*FLTS+7*PTRS(fp) + + /* Call */ + jalr t1 + + /* Save return values - only a0/a1 (fa0/fa1) are used */ +#if FLTS + FSARG fa0, -FRAME_LEN+0*FLTS(fp) + FSARG fa1, -FRAME_LEN+1*FLTS(fp) +#endif + + SARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + SARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + + /* Restore and return */ + addi sp, fp, -FRAME_LEN + .cfi_def_cfa 2, FRAME_LEN + LARG ra, -1*PTRS(fp) + .cfi_restore 1 + LARG fp, -2*PTRS(fp) + .cfi_restore 8 + ret + .cfi_endproc + .size ffi_call_asm, .-ffi_call_asm + + +/* + ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_closure_asm + .hidden ffi_closure_asm + .type ffi_closure_asm, @function +ffi_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, FFI_TRAMPOLINE_SIZE+0*PTRS(t1) + LARG a1, FFI_TRAMPOLINE_SIZE+1*PTRS(t1) + LARG a2, FFI_TRAMPOLINE_SIZE+2*PTRS(t1) + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_closure_asm, .-ffi_closure_asm + +/* + ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_go_closure_asm + .hidden ffi_go_closure_asm + .type ffi_go_closure_asm, @function +ffi_go_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, 1*PTRS(t2) + LARG a1, 2*PTRS(t2) + mv a2, t2 + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_go_closure_asm, .-ffi_go_closure_asm diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi 2.c new file mode 100644 index 0000000..4035b6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi 2.c @@ -0,0 +1,756 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2000, 2007 Software AG + Copyright (c) 2008 Red Hat, Inc + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +/*====================================================================*/ +/* Includes */ +/* -------- */ +/*====================================================================*/ + +#include +#include +#include +#include "internal.h" + +/*====================== End of Includes =============================*/ + +/*====================================================================*/ +/* Defines */ +/* ------- */ +/*====================================================================*/ + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 5 + +/* Maximum number of FPRs available for argument passing. */ +#ifdef __s390x__ +#define MAX_FPRARGS 4 +#else +#define MAX_FPRARGS 2 +#endif + +/* Round to multiple of 16. */ +#define ROUND_SIZE(size) (((size) + 15) & ~15) + +/*===================== End of Defines ===============================*/ + +/*====================================================================*/ +/* Externals */ +/* --------- */ +/*====================================================================*/ + +struct call_frame +{ + void *back_chain; + void *eos; + unsigned long gpr_args[5]; + unsigned long gpr_save[9]; + unsigned long long fpr_args[4]; +}; + +extern void FFI_HIDDEN ffi_call_SYSV(struct call_frame *, unsigned, void *, + void (*fn)(void), void *); + +extern void ffi_closure_SYSV(void); +extern void ffi_go_closure_SYSV(void); + +/*====================== End of Externals ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_check_struct_type. */ +/* */ +/* Function - Determine if a structure can be passed within a */ +/* general purpose or floating point register. */ +/* */ +/*====================================================================*/ + +static int +ffi_check_struct_type (ffi_type *arg) +{ + size_t size = arg->size; + + /* If the struct has just one element, look at that element + to find out whether to consider the struct as floating point. */ + while (arg->type == FFI_TYPE_STRUCT + && arg->elements[0] && !arg->elements[1]) + arg = arg->elements[0]; + + /* Structs of size 1, 2, 4, and 8 are passed in registers, + just like the corresponding int/float types. */ + switch (size) + { + case 1: + return FFI_TYPE_UINT8; + + case 2: + return FFI_TYPE_UINT16; + + case 4: + if (arg->type == FFI_TYPE_FLOAT) + return FFI_TYPE_FLOAT; + else + return FFI_TYPE_UINT32; + + case 8: + if (arg->type == FFI_TYPE_DOUBLE) + return FFI_TYPE_DOUBLE; + else + return FFI_TYPE_UINT64; + + default: + break; + } + + /* Other structs are passed via a pointer to the data. */ + return FFI_TYPE_POINTER; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_cif_machdep. */ +/* */ +/* Function - Perform machine dependent CIF processing. */ +/* */ +/*====================================================================*/ + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t struct_size = 0; + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Determine return value handling. */ + + switch (cif->rtype->type) + { + /* Void is easy. */ + case FFI_TYPE_VOID: + cif->flags = FFI390_RET_VOID; + break; + + /* Structures and complex are returned via a hidden pointer. */ + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; /* We need one GPR to pass the pointer. */ + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + cif->flags = FFI390_RET_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = FFI390_RET_DOUBLE; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; + break; +#endif + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI390_RET_INT64; + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* These are to be extended to word size. */ +#ifdef __s390x__ + cif->flags = FFI390_RET_INT64; +#else + cif->flags = FFI390_RET_INT32; +#endif + break; + + default: + FFI_ASSERT (0); + break; + } + + /* Now for the arguments. */ + + for (ptr = cif->arg_types, i = cif->nargs; + i > 0; + i--, ptr++) + { + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, we must reserve space + to copy its data for proper call-by-value semantics. */ + if (type == FFI_TYPE_POINTER) + struct_size += ROUND_SIZE ((*ptr)->size); + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + /* The first MAX_FPRARGS floating point arguments + go in FPRs, the rest overflow to the stack. */ + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov++; + break; + + /* On 31-bit machines, 64-bit integers are passed in GPR pairs, + if one is still available, or else on the stack. If only one + register is free, skip the register (it won't be used for any + subsequent argument either). */ + +#ifndef __s390x__ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + n_gpr += 2; + else + n_ov += 2; + break; +#endif + + /* Everything else is passed in GPRs (until MAX_GPRARGS + have been used) or overflows to the stack. */ + + default: + if (n_gpr < MAX_GPRARGS) + n_gpr++; + else + n_ov++; + break; + } + } + + /* Total stack space as required for overflow arguments + and temporary structure copies. */ + + cif->bytes = ROUND_SIZE (n_ov * sizeof (long)) + struct_size; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_call. */ +/* */ +/* Function - Call the FFI routine. */ +/* */ +/*====================================================================*/ + +static void +ffi_call_int(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue, + void *closure) +{ + int ret_type = cif->flags; + size_t rsize = 0, bytes = cif->bytes; + unsigned char *stack, *p_struct; + struct call_frame *frame; + unsigned long *p_ov, *p_gpr; + unsigned long long *p_fpr; + int n_fpr, n_gpr, n_ov, i, n; + ffi_type **arg_types; + + FFI_ASSERT (cif->abi == FFI_SYSV); + + /* If we don't have a return value, we need to fake one. */ + if (rvalue == NULL) + { + if (ret_type & FFI390_RET_IN_MEM) + rsize = cif->rtype->size; + else + ret_type = FFI390_RET_VOID; + } + + /* The stack space will be filled with those areas: + + dummy structure return (highest addresses) + FPR argument register save area + GPR argument register save area + stack frame for ffi_call_SYSV + temporary struct copies + overflow argument area (lowest addresses) + + We set up the following pointers: + + p_fpr: bottom of the FPR area (growing upwards) + p_gpr: bottom of the GPR area (growing upwards) + p_ov: bottom of the overflow area (growing upwards) + p_struct: top of the struct copy area (growing downwards) + + All areas are kept aligned to twice the word size. + + Note that we're going to create the stack frame for both + ffi_call_SYSV _and_ the target function right here. This + works because we don't make any function calls with more + than 5 arguments (indeed only memcpy and ffi_call_SYSV), + and thus we don't have any stacked outgoing parameters. */ + + stack = alloca (bytes + sizeof(struct call_frame) + rsize); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + /* Link the new frame back to the one from this function. */ + frame->back_chain = __builtin_frame_address (0); + + /* Fill in all of the argument stuff. */ + p_ov = (unsigned long *)stack; + p_struct = (unsigned char *)frame; + p_gpr = frame->gpr_args; + p_fpr = frame->fpr_args; + n_fpr = n_gpr = n_ov = 0; + + /* If we returning a structure then we set the first parameter register + to the address of where we are returning this structure. */ + if (cif->flags & FFI390_RET_IN_MEM) + p_gpr[n_gpr++] = (uintptr_t) rvalue; + + /* Now for the arguments. */ + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + void *arg = avalue[i]; + int type = ty->type; + ffi_arg val; + + restart: + switch (type) + { + case FFI_TYPE_SINT8: + val = *(SINT8 *)arg; + goto do_int; + case FFI_TYPE_UINT8: + val = *(UINT8 *)arg; + goto do_int; + case FFI_TYPE_SINT16: + val = *(SINT16 *)arg; + goto do_int; + case FFI_TYPE_UINT16: + val = *(UINT16 *)arg; + goto do_int; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + val = *(SINT32 *)arg; + goto do_int; + case FFI_TYPE_UINT32: + val = *(UINT32 *)arg; + goto do_int; + case FFI_TYPE_POINTER: + val = *(uintptr_t *)arg; + do_int: + *(n_gpr < MAX_GPRARGS ? p_gpr + n_gpr++ : p_ov + n_ov++) = val; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + val = *(UINT64 *)arg; + goto do_int; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + p_gpr[n_gpr++] = ((UINT32 *) arg)[0], + p_gpr[n_gpr++] = ((UINT32 *) arg)[1]; + else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + break; + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = *(UINT64 *) arg; + else + { +#ifdef __s390x__ + p_ov[n_ov++] = *(UINT64 *) arg; +#else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + } + break; + + case FFI_TYPE_FLOAT: + val = *(UINT32 *)arg; + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = (UINT64)val << 32; + else + p_ov[n_ov++] = val; + break; + + case FFI_TYPE_STRUCT: + /* Check how a structure type is passed. */ + type = ffi_check_struct_type (ty); + /* Some structures are passed via a type they contain. */ + if (type != FFI_TYPE_POINTER) + goto restart; + /* ... otherwise, passed by reference. fallthru. */ + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 16-byte long double is passed via reference. */ +#endif + case FFI_TYPE_COMPLEX: + /* Complex types are passed via reference. */ + p_struct -= ROUND_SIZE (ty->size); + memcpy (p_struct, arg, ty->size); + val = (uintptr_t)p_struct; + goto do_int; + + default: + FFI_ASSERT (0); + break; + } + } + + ffi_call_SYSV (frame, ret_type & FFI360_RET_MASK, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_closure_helper_SYSV. */ +/* */ +/* Function - Call a FFI closure target function. */ +/* */ +/*====================================================================*/ + +void FFI_HIDDEN +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + unsigned long *p_gpr, + unsigned long long *p_fpr, + unsigned long *p_ov) +{ + unsigned long long ret_buffer; + + void *rvalue = &ret_buffer; + void **avalue; + void **p_arg; + + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Allocate buffer for argument list pointers. */ + p_arg = avalue = alloca (cif->nargs * sizeof (void *)); + + /* If we returning a structure, pass the structure address + directly to the target function. Otherwise, have the target + function store the return value to the GPR save area. */ + if (cif->flags & FFI390_RET_IN_MEM) + rvalue = (void *) p_gpr[n_gpr++]; + + /* Now for the arguments. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, p_arg++, ptr++) + { + int deref_struct_pointer = 0; + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, remember to + retrieve the pointer later. */ + if (type == FFI_TYPE_POINTER) + deref_struct_pointer = 1; + } + + /* Pointers are passed like UINTs of the same size. */ + if (type == FFI_TYPE_POINTER) + { +#ifdef __s390x__ + type = FFI_TYPE_UINT64; +#else + type = FFI_TYPE_UINT32; +#endif + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = &p_ov[n_ov], + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr++]; + else + *p_arg = &p_ov[n_ov++]; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr], n_gpr += 2; + else + *p_arg = &p_ov[n_ov], n_ov += 2; +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 4; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 2; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 2; + break; + + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 1; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 1; + break; + + default: + FFI_ASSERT (0); + break; + } + + /* If this is a struct passed via pointer, we need to + actually retrieve that pointer. */ + if (deref_struct_pointer) + *p_arg = *(void **)*p_arg; + } + + + /* Call the target function. */ + (fun) (cif, rvalue, avalue, user_data); + + /* Convert the return value. */ + switch (cif->rtype->type) + { + /* Void is easy, and so is struct. */ + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: +#endif + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + p_fpr[0] = (long long) *(unsigned int *) rvalue << 32; + break; + + case FFI_TYPE_DOUBLE: + p_fpr[0] = *(unsigned long long *) rvalue; + break; + + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + p_gpr[0] = *(unsigned long *) rvalue; +#else + p_gpr[0] = ((unsigned long *) rvalue)[0], + p_gpr[1] = ((unsigned long *) rvalue)[1]; +#endif + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT8: + p_gpr[0] = *(unsigned long *) rvalue; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT8: + p_gpr[0] = *(signed long *) rvalue; + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_closure_loc. */ +/* */ +/* Function - Prepare a FFI closure. */ +/* */ +/*====================================================================*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + static unsigned short const template[] = { + 0x0d10, /* basr %r1,0 */ +#ifndef __s390x__ + 0x9801, 0x1006, /* lm %r0,%r1,6(%r1) */ +#else + 0xeb01, 0x100e, 0x0004, /* lmg %r0,%r1,14(%r1) */ +#endif + 0x07f1 /* br %r1 */ + }; + + unsigned long *tramp = (unsigned long *)&closure->tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + memcpy (tramp, template, sizeof(template)); + tramp[2] = (unsigned long)codeloc; + tramp[3] = (unsigned long)&ffi_closure_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/* Build a Go language closure. */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_SYSV; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c new file mode 100644 index 0000000..4035b6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c @@ -0,0 +1,756 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2000, 2007 Software AG + Copyright (c) 2008 Red Hat, Inc + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +/*====================================================================*/ +/* Includes */ +/* -------- */ +/*====================================================================*/ + +#include +#include +#include +#include "internal.h" + +/*====================== End of Includes =============================*/ + +/*====================================================================*/ +/* Defines */ +/* ------- */ +/*====================================================================*/ + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 5 + +/* Maximum number of FPRs available for argument passing. */ +#ifdef __s390x__ +#define MAX_FPRARGS 4 +#else +#define MAX_FPRARGS 2 +#endif + +/* Round to multiple of 16. */ +#define ROUND_SIZE(size) (((size) + 15) & ~15) + +/*===================== End of Defines ===============================*/ + +/*====================================================================*/ +/* Externals */ +/* --------- */ +/*====================================================================*/ + +struct call_frame +{ + void *back_chain; + void *eos; + unsigned long gpr_args[5]; + unsigned long gpr_save[9]; + unsigned long long fpr_args[4]; +}; + +extern void FFI_HIDDEN ffi_call_SYSV(struct call_frame *, unsigned, void *, + void (*fn)(void), void *); + +extern void ffi_closure_SYSV(void); +extern void ffi_go_closure_SYSV(void); + +/*====================== End of Externals ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_check_struct_type. */ +/* */ +/* Function - Determine if a structure can be passed within a */ +/* general purpose or floating point register. */ +/* */ +/*====================================================================*/ + +static int +ffi_check_struct_type (ffi_type *arg) +{ + size_t size = arg->size; + + /* If the struct has just one element, look at that element + to find out whether to consider the struct as floating point. */ + while (arg->type == FFI_TYPE_STRUCT + && arg->elements[0] && !arg->elements[1]) + arg = arg->elements[0]; + + /* Structs of size 1, 2, 4, and 8 are passed in registers, + just like the corresponding int/float types. */ + switch (size) + { + case 1: + return FFI_TYPE_UINT8; + + case 2: + return FFI_TYPE_UINT16; + + case 4: + if (arg->type == FFI_TYPE_FLOAT) + return FFI_TYPE_FLOAT; + else + return FFI_TYPE_UINT32; + + case 8: + if (arg->type == FFI_TYPE_DOUBLE) + return FFI_TYPE_DOUBLE; + else + return FFI_TYPE_UINT64; + + default: + break; + } + + /* Other structs are passed via a pointer to the data. */ + return FFI_TYPE_POINTER; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_cif_machdep. */ +/* */ +/* Function - Perform machine dependent CIF processing. */ +/* */ +/*====================================================================*/ + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t struct_size = 0; + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Determine return value handling. */ + + switch (cif->rtype->type) + { + /* Void is easy. */ + case FFI_TYPE_VOID: + cif->flags = FFI390_RET_VOID; + break; + + /* Structures and complex are returned via a hidden pointer. */ + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; /* We need one GPR to pass the pointer. */ + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + cif->flags = FFI390_RET_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = FFI390_RET_DOUBLE; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; + break; +#endif + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI390_RET_INT64; + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* These are to be extended to word size. */ +#ifdef __s390x__ + cif->flags = FFI390_RET_INT64; +#else + cif->flags = FFI390_RET_INT32; +#endif + break; + + default: + FFI_ASSERT (0); + break; + } + + /* Now for the arguments. */ + + for (ptr = cif->arg_types, i = cif->nargs; + i > 0; + i--, ptr++) + { + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, we must reserve space + to copy its data for proper call-by-value semantics. */ + if (type == FFI_TYPE_POINTER) + struct_size += ROUND_SIZE ((*ptr)->size); + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + /* The first MAX_FPRARGS floating point arguments + go in FPRs, the rest overflow to the stack. */ + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov++; + break; + + /* On 31-bit machines, 64-bit integers are passed in GPR pairs, + if one is still available, or else on the stack. If only one + register is free, skip the register (it won't be used for any + subsequent argument either). */ + +#ifndef __s390x__ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + n_gpr += 2; + else + n_ov += 2; + break; +#endif + + /* Everything else is passed in GPRs (until MAX_GPRARGS + have been used) or overflows to the stack. */ + + default: + if (n_gpr < MAX_GPRARGS) + n_gpr++; + else + n_ov++; + break; + } + } + + /* Total stack space as required for overflow arguments + and temporary structure copies. */ + + cif->bytes = ROUND_SIZE (n_ov * sizeof (long)) + struct_size; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_call. */ +/* */ +/* Function - Call the FFI routine. */ +/* */ +/*====================================================================*/ + +static void +ffi_call_int(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue, + void *closure) +{ + int ret_type = cif->flags; + size_t rsize = 0, bytes = cif->bytes; + unsigned char *stack, *p_struct; + struct call_frame *frame; + unsigned long *p_ov, *p_gpr; + unsigned long long *p_fpr; + int n_fpr, n_gpr, n_ov, i, n; + ffi_type **arg_types; + + FFI_ASSERT (cif->abi == FFI_SYSV); + + /* If we don't have a return value, we need to fake one. */ + if (rvalue == NULL) + { + if (ret_type & FFI390_RET_IN_MEM) + rsize = cif->rtype->size; + else + ret_type = FFI390_RET_VOID; + } + + /* The stack space will be filled with those areas: + + dummy structure return (highest addresses) + FPR argument register save area + GPR argument register save area + stack frame for ffi_call_SYSV + temporary struct copies + overflow argument area (lowest addresses) + + We set up the following pointers: + + p_fpr: bottom of the FPR area (growing upwards) + p_gpr: bottom of the GPR area (growing upwards) + p_ov: bottom of the overflow area (growing upwards) + p_struct: top of the struct copy area (growing downwards) + + All areas are kept aligned to twice the word size. + + Note that we're going to create the stack frame for both + ffi_call_SYSV _and_ the target function right here. This + works because we don't make any function calls with more + than 5 arguments (indeed only memcpy and ffi_call_SYSV), + and thus we don't have any stacked outgoing parameters. */ + + stack = alloca (bytes + sizeof(struct call_frame) + rsize); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + /* Link the new frame back to the one from this function. */ + frame->back_chain = __builtin_frame_address (0); + + /* Fill in all of the argument stuff. */ + p_ov = (unsigned long *)stack; + p_struct = (unsigned char *)frame; + p_gpr = frame->gpr_args; + p_fpr = frame->fpr_args; + n_fpr = n_gpr = n_ov = 0; + + /* If we returning a structure then we set the first parameter register + to the address of where we are returning this structure. */ + if (cif->flags & FFI390_RET_IN_MEM) + p_gpr[n_gpr++] = (uintptr_t) rvalue; + + /* Now for the arguments. */ + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + void *arg = avalue[i]; + int type = ty->type; + ffi_arg val; + + restart: + switch (type) + { + case FFI_TYPE_SINT8: + val = *(SINT8 *)arg; + goto do_int; + case FFI_TYPE_UINT8: + val = *(UINT8 *)arg; + goto do_int; + case FFI_TYPE_SINT16: + val = *(SINT16 *)arg; + goto do_int; + case FFI_TYPE_UINT16: + val = *(UINT16 *)arg; + goto do_int; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + val = *(SINT32 *)arg; + goto do_int; + case FFI_TYPE_UINT32: + val = *(UINT32 *)arg; + goto do_int; + case FFI_TYPE_POINTER: + val = *(uintptr_t *)arg; + do_int: + *(n_gpr < MAX_GPRARGS ? p_gpr + n_gpr++ : p_ov + n_ov++) = val; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + val = *(UINT64 *)arg; + goto do_int; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + p_gpr[n_gpr++] = ((UINT32 *) arg)[0], + p_gpr[n_gpr++] = ((UINT32 *) arg)[1]; + else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + break; + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = *(UINT64 *) arg; + else + { +#ifdef __s390x__ + p_ov[n_ov++] = *(UINT64 *) arg; +#else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + } + break; + + case FFI_TYPE_FLOAT: + val = *(UINT32 *)arg; + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = (UINT64)val << 32; + else + p_ov[n_ov++] = val; + break; + + case FFI_TYPE_STRUCT: + /* Check how a structure type is passed. */ + type = ffi_check_struct_type (ty); + /* Some structures are passed via a type they contain. */ + if (type != FFI_TYPE_POINTER) + goto restart; + /* ... otherwise, passed by reference. fallthru. */ + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 16-byte long double is passed via reference. */ +#endif + case FFI_TYPE_COMPLEX: + /* Complex types are passed via reference. */ + p_struct -= ROUND_SIZE (ty->size); + memcpy (p_struct, arg, ty->size); + val = (uintptr_t)p_struct; + goto do_int; + + default: + FFI_ASSERT (0); + break; + } + } + + ffi_call_SYSV (frame, ret_type & FFI360_RET_MASK, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_closure_helper_SYSV. */ +/* */ +/* Function - Call a FFI closure target function. */ +/* */ +/*====================================================================*/ + +void FFI_HIDDEN +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + unsigned long *p_gpr, + unsigned long long *p_fpr, + unsigned long *p_ov) +{ + unsigned long long ret_buffer; + + void *rvalue = &ret_buffer; + void **avalue; + void **p_arg; + + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Allocate buffer for argument list pointers. */ + p_arg = avalue = alloca (cif->nargs * sizeof (void *)); + + /* If we returning a structure, pass the structure address + directly to the target function. Otherwise, have the target + function store the return value to the GPR save area. */ + if (cif->flags & FFI390_RET_IN_MEM) + rvalue = (void *) p_gpr[n_gpr++]; + + /* Now for the arguments. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, p_arg++, ptr++) + { + int deref_struct_pointer = 0; + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, remember to + retrieve the pointer later. */ + if (type == FFI_TYPE_POINTER) + deref_struct_pointer = 1; + } + + /* Pointers are passed like UINTs of the same size. */ + if (type == FFI_TYPE_POINTER) + { +#ifdef __s390x__ + type = FFI_TYPE_UINT64; +#else + type = FFI_TYPE_UINT32; +#endif + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = &p_ov[n_ov], + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr++]; + else + *p_arg = &p_ov[n_ov++]; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr], n_gpr += 2; + else + *p_arg = &p_ov[n_ov], n_ov += 2; +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 4; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 2; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 2; + break; + + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 1; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 1; + break; + + default: + FFI_ASSERT (0); + break; + } + + /* If this is a struct passed via pointer, we need to + actually retrieve that pointer. */ + if (deref_struct_pointer) + *p_arg = *(void **)*p_arg; + } + + + /* Call the target function. */ + (fun) (cif, rvalue, avalue, user_data); + + /* Convert the return value. */ + switch (cif->rtype->type) + { + /* Void is easy, and so is struct. */ + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: +#endif + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + p_fpr[0] = (long long) *(unsigned int *) rvalue << 32; + break; + + case FFI_TYPE_DOUBLE: + p_fpr[0] = *(unsigned long long *) rvalue; + break; + + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + p_gpr[0] = *(unsigned long *) rvalue; +#else + p_gpr[0] = ((unsigned long *) rvalue)[0], + p_gpr[1] = ((unsigned long *) rvalue)[1]; +#endif + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT8: + p_gpr[0] = *(unsigned long *) rvalue; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT8: + p_gpr[0] = *(signed long *) rvalue; + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_closure_loc. */ +/* */ +/* Function - Prepare a FFI closure. */ +/* */ +/*====================================================================*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + static unsigned short const template[] = { + 0x0d10, /* basr %r1,0 */ +#ifndef __s390x__ + 0x9801, 0x1006, /* lm %r0,%r1,6(%r1) */ +#else + 0xeb01, 0x100e, 0x0004, /* lmg %r0,%r1,14(%r1) */ +#endif + 0x07f1 /* br %r1 */ + }; + + unsigned long *tramp = (unsigned long *)&closure->tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + memcpy (tramp, template, sizeof(template)); + tramp[2] = (unsigned long)codeloc; + tramp[3] = (unsigned long)&ffi_closure_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/* Build a Go language closure. */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_SYSV; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget 2.h new file mode 100644 index 0000000..d8a4ee4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget 2.h @@ -0,0 +1,70 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for S390. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#if defined (__s390x__) +#ifndef S390X +#define S390X +#endif +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#ifdef S390X +#define FFI_TRAMPOLINE_SIZE 32 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h new file mode 100644 index 0000000..d8a4ee4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h @@ -0,0 +1,70 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for S390. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#if defined (__s390x__) +#ifndef S390X +#define S390X +#endif +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#ifdef S390X +#define FFI_TRAMPOLINE_SIZE 32 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal 2.h new file mode 100644 index 0000000..b875578 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal 2.h @@ -0,0 +1,11 @@ +/* If these values change, sysv.S must be adapted! */ +#define FFI390_RET_DOUBLE 0 +#define FFI390_RET_FLOAT 1 +#define FFI390_RET_INT64 2 +#define FFI390_RET_INT32 3 +#define FFI390_RET_VOID 4 + +#define FFI360_RET_MASK 7 +#define FFI390_RET_IN_MEM 8 + +#define FFI390_RET_STRUCT (FFI390_RET_VOID | FFI390_RET_IN_MEM) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h new file mode 100644 index 0000000..b875578 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h @@ -0,0 +1,11 @@ +/* If these values change, sysv.S must be adapted! */ +#define FFI390_RET_DOUBLE 0 +#define FFI390_RET_FLOAT 1 +#define FFI390_RET_INT64 2 +#define FFI390_RET_INT32 3 +#define FFI390_RET_VOID 4 + +#define FFI360_RET_MASK 7 +#define FFI390_RET_IN_MEM 8 + +#define FFI390_RET_STRUCT (FFI390_RET_VOID | FFI390_RET_IN_MEM) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv 2.S new file mode 100644 index 0000000..c4b5006 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv 2.S @@ -0,0 +1,325 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2000 Software AG + Copyright (c) 2008 Red Hat, Inc. + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + +#ifndef __s390x__ + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + st %r6,44(%r2) # Save registers + stm %r12,%r14,48(%r2) + lr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 44 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_def_cfa_register r13 + st %r2,0(%r15) # Set up back chain + sla %r3,3 # ret_type *= 8 + lr %r12,%r4 # Save ret_addr + lr %r1,%r5 # Save fun + lr %r0,%r6 # Install static chain + + # Set return address, so that there is only one indirect jump. +#ifdef HAVE_AS_S390_ZARCH + larl %r14,.Ltable + ar %r14,%r3 +#else + basr %r14,0 +0: la %r14,.Ltable-0b(%r14,%r3) +#endif + + lm %r2,%r6,8(%r13) # Load arguments + ld %f0,64(%r13) + ld %f2,72(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_FLOAT + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + st %r3,4(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_INT32 + st %r2,0(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_VOID +.Ldone: + l %r14,56(%r13) + l %r12,48(%r13) + l %r6,44(%r13) + l %r13,52(%r13) + .cfi_restore 14 + .cfi_restore 13 + .cfi_restore 12 + .cfi_restore 6 + .cfi_def_cfa r15, 96 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Load closure -> user_data + l %r2,4(%r4) # ->cif + l %r3,8(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Closure + l %r2,16(%r4) # ->cif + l %r3,20(%r4) # ->fun + l %r4,24(%r4) # ->user_data +.Ldoclosure: + stm %r12,%r15,48(%r15) # Save registers + lr %r12,%r15 + .cfi_def_cfa_register r12 + .cfi_rel_offset r6, 24 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_rel_offset r15, 60 +#ifndef HAVE_AS_S390_ZARCH + basr %r13,0 # Set up base register +.Lcbase: + l %r1,.Lchelper-.Lcbase(%r13) # Get helper function +#endif + ahi %r15,-96-8 # Set up stack frame + st %r12,0(%r15) # Set up back chain + + std %f0,64(%r12) # Save fp arguments + std %f2,72(%r12) + + la %r5,96(%r12) # Overflow + st %r5,96(%r15) + la %r6,64(%r12) # FPRs + la %r5,8(%r12) # GPRs +#ifdef HAVE_AS_S390_ZARCH + brasl %r14,ffi_closure_helper_SYSV +#else + bas %r14,0(%r1,%r13) # Call helper +#endif + + lr %r15,%r12 + .cfi_def_cfa_register r15 + lm %r12,%r14,48(%r12) # Restore saved registers + l %r6,24(%r15) + ld %f0,64(%r15) # Load return registers + lm %r2,%r3,8(%r15) + br %r14 + .cfi_endproc + +#ifndef HAVE_AS_S390_ZARCH + .align 4 +.Lchelper: + .long ffi_closure_helper_SYSV-.Lcbase +#endif + + .size ffi_closure_SYSV,.-ffi_closure_SYSV + +#else + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + stg %r6,88(%r2) # Save registers + stmg %r12,%r14,96(%r2) + lgr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 88 + .cfi_rel_offset r12, 96 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_def_cfa_register r13 + stg %r2,0(%r15) # Set up back chain + larl %r14,.Ltable # Set up return address + slag %r3,%r3,3 # ret_type *= 8 + lgr %r12,%r4 # Save ret_addr + lgr %r1,%r5 # Save fun + lgr %r0,%r6 # Install static chain + agr %r14,%r3 + lmg %r2,%r6,16(%r13) # Load arguments + ld %f0,128(%r13) + ld %f2,136(%r13) + ld %f4,144(%r13) + ld %f6,152(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_DOUBLE + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + stg %r2,0(%r12) + + .balign 8 +# FFI390_RET_INT32 + # Never used, as we always store type ffi_arg. + # But the stg above is 6 bytes and we cannot + # jump around this case, so fall through. + nop + nop + + .balign 8 +# FFI390_RET_VOID +.Ldone: + lg %r14,112(%r13) + lg %r12,96(%r13) + lg %r6,88(%r13) + lg %r13,104(%r13) + .cfi_restore r14 + .cfi_restore r13 + .cfi_restore r12 + .cfi_restore r6 + .cfi_def_cfa r15, 160 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure -> user_data + lg %r2,8(%r4) # ->cif + lg %r3,16(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + .size ffi_go_closure_SYSV,.-ffi_go_closure_SYSV + + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure + lg %r2,32(%r4) # ->cif + lg %r3,40(%r4) # ->fun + lg %r4,48(%r4) # ->user_data +.Ldoclosure: + stmg %r13,%r15,104(%r15) # Save registers + lgr %r13,%r15 + .cfi_def_cfa_register r13 + .cfi_rel_offset r6, 48 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_rel_offset r15, 120 + aghi %r15,-160-16 # Set up stack frame + stg %r13,0(%r15) # Set up back chain + + std %f0,128(%r13) # Save fp arguments + std %f2,136(%r13) + std %f4,144(%r13) + std %f6,152(%r13) + la %r5,160(%r13) # Overflow + stg %r5,160(%r15) + la %r6,128(%r13) # FPRs + la %r5,16(%r13) # GPRs + brasl %r14,ffi_closure_helper_SYSV # Call helper + + lgr %r15,%r13 + .cfi_def_cfa_register r15 + lmg %r13,%r14,104(%r13) # Restore saved registers + lg %r6,48(%r15) + ld %f0,128(%r15) # Load return registers + lg %r2,16(%r15) + br %r14 + .cfi_endproc + .size ffi_closure_SYSV,.-ffi_closure_SYSV +#endif /* !s390x */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S new file mode 100644 index 0000000..c4b5006 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S @@ -0,0 +1,325 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2000 Software AG + Copyright (c) 2008 Red Hat, Inc. + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + +#ifndef __s390x__ + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + st %r6,44(%r2) # Save registers + stm %r12,%r14,48(%r2) + lr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 44 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_def_cfa_register r13 + st %r2,0(%r15) # Set up back chain + sla %r3,3 # ret_type *= 8 + lr %r12,%r4 # Save ret_addr + lr %r1,%r5 # Save fun + lr %r0,%r6 # Install static chain + + # Set return address, so that there is only one indirect jump. +#ifdef HAVE_AS_S390_ZARCH + larl %r14,.Ltable + ar %r14,%r3 +#else + basr %r14,0 +0: la %r14,.Ltable-0b(%r14,%r3) +#endif + + lm %r2,%r6,8(%r13) # Load arguments + ld %f0,64(%r13) + ld %f2,72(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_FLOAT + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + st %r3,4(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_INT32 + st %r2,0(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_VOID +.Ldone: + l %r14,56(%r13) + l %r12,48(%r13) + l %r6,44(%r13) + l %r13,52(%r13) + .cfi_restore 14 + .cfi_restore 13 + .cfi_restore 12 + .cfi_restore 6 + .cfi_def_cfa r15, 96 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Load closure -> user_data + l %r2,4(%r4) # ->cif + l %r3,8(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Closure + l %r2,16(%r4) # ->cif + l %r3,20(%r4) # ->fun + l %r4,24(%r4) # ->user_data +.Ldoclosure: + stm %r12,%r15,48(%r15) # Save registers + lr %r12,%r15 + .cfi_def_cfa_register r12 + .cfi_rel_offset r6, 24 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_rel_offset r15, 60 +#ifndef HAVE_AS_S390_ZARCH + basr %r13,0 # Set up base register +.Lcbase: + l %r1,.Lchelper-.Lcbase(%r13) # Get helper function +#endif + ahi %r15,-96-8 # Set up stack frame + st %r12,0(%r15) # Set up back chain + + std %f0,64(%r12) # Save fp arguments + std %f2,72(%r12) + + la %r5,96(%r12) # Overflow + st %r5,96(%r15) + la %r6,64(%r12) # FPRs + la %r5,8(%r12) # GPRs +#ifdef HAVE_AS_S390_ZARCH + brasl %r14,ffi_closure_helper_SYSV +#else + bas %r14,0(%r1,%r13) # Call helper +#endif + + lr %r15,%r12 + .cfi_def_cfa_register r15 + lm %r12,%r14,48(%r12) # Restore saved registers + l %r6,24(%r15) + ld %f0,64(%r15) # Load return registers + lm %r2,%r3,8(%r15) + br %r14 + .cfi_endproc + +#ifndef HAVE_AS_S390_ZARCH + .align 4 +.Lchelper: + .long ffi_closure_helper_SYSV-.Lcbase +#endif + + .size ffi_closure_SYSV,.-ffi_closure_SYSV + +#else + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + stg %r6,88(%r2) # Save registers + stmg %r12,%r14,96(%r2) + lgr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 88 + .cfi_rel_offset r12, 96 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_def_cfa_register r13 + stg %r2,0(%r15) # Set up back chain + larl %r14,.Ltable # Set up return address + slag %r3,%r3,3 # ret_type *= 8 + lgr %r12,%r4 # Save ret_addr + lgr %r1,%r5 # Save fun + lgr %r0,%r6 # Install static chain + agr %r14,%r3 + lmg %r2,%r6,16(%r13) # Load arguments + ld %f0,128(%r13) + ld %f2,136(%r13) + ld %f4,144(%r13) + ld %f6,152(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_DOUBLE + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + stg %r2,0(%r12) + + .balign 8 +# FFI390_RET_INT32 + # Never used, as we always store type ffi_arg. + # But the stg above is 6 bytes and we cannot + # jump around this case, so fall through. + nop + nop + + .balign 8 +# FFI390_RET_VOID +.Ldone: + lg %r14,112(%r13) + lg %r12,96(%r13) + lg %r6,88(%r13) + lg %r13,104(%r13) + .cfi_restore r14 + .cfi_restore r13 + .cfi_restore r12 + .cfi_restore r6 + .cfi_def_cfa r15, 160 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure -> user_data + lg %r2,8(%r4) # ->cif + lg %r3,16(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + .size ffi_go_closure_SYSV,.-ffi_go_closure_SYSV + + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure + lg %r2,32(%r4) # ->cif + lg %r3,40(%r4) # ->fun + lg %r4,48(%r4) # ->user_data +.Ldoclosure: + stmg %r13,%r15,104(%r15) # Save registers + lgr %r13,%r15 + .cfi_def_cfa_register r13 + .cfi_rel_offset r6, 48 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_rel_offset r15, 120 + aghi %r15,-160-16 # Set up stack frame + stg %r13,0(%r15) # Set up back chain + + std %f0,128(%r13) # Save fp arguments + std %f2,136(%r13) + std %f4,144(%r13) + std %f6,152(%r13) + la %r5,160(%r13) # Overflow + stg %r5,160(%r15) + la %r6,128(%r13) # FPRs + la %r5,16(%r13) # GPRs + brasl %r14,ffi_closure_helper_SYSV # Call helper + + lgr %r15,%r13 + .cfi_def_cfa_register r15 + lmg %r13,%r14,104(%r13) # Restore saved registers + lg %r6,48(%r15) + ld %f0,128(%r15) # Load return registers + lg %r2,16(%r15) + br %r14 + .cfi_endproc + .size ffi_closure_SYSV,.-ffi_closure_SYSV +#endif /* !s390x */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi 2.c new file mode 100644 index 0000000..9ec86bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi 2.c @@ -0,0 +1,717 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2002-2008, 2012 Kaz Kojima + Copyright (c) 2008 Red Hat, Inc. + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 4 +#if defined(__SH4__) +#define NFREGARG 8 +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +/* If the structure has essentially an unique element, return its type. */ +static int +simple_type (ffi_type *arg) +{ + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + else if (arg->elements[1]) + return FFI_TYPE_STRUCT; + + return simple_type (arg->elements[0]); +} + +static int +return_type (ffi_type *arg) +{ + unsigned short type; + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + type = simple_type (arg->elements[0]); + if (! arg->elements[1]) + { + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + return FFI_TYPE_INT; + + default: + return type; + } + } + + /* gcc uses r0/r1 pair for some kind of structures. */ + if (arg->size <= 2 * sizeof (int)) + { + int i = 0; + ffi_type *e; + + while ((e = arg->elements[i++])) + { + type = simple_type (e); + switch (type) + { + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + return FFI_TYPE_UINT64; + + default: + break; + } + } + } + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register int tmp; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + int greg, ireg; +#if defined(__SH4__) + int freg = 0; +#endif + + tmp = 0; + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + ireg = STRUCT_VALUE_ADDRESS_WITH_ARG ? 1 : 0; + } + else + ireg = 0; + + /* Set arguments for registers. */ + greg = ireg; + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + /* Set arguments on stack. */ + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg = NGREGARG; + continue; + } +#endif + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; +#if defined(__SH4__) + int freg = 0; +#endif + + cif->flags = 0; + + greg = ((return_type (cif->rtype) == FFI_TYPE_STRUCT) && + STRUCT_VALUE_ADDRESS_WITH_ARG) ? 1 : 0; + +#if defined(__SH4__) + for (i = j = 0; i < cif->nargs && j < 12; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + if (freg >= NFREGARG) + continue; + freg++; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + case FFI_TYPE_DOUBLE: + if ((freg + 1) >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + default: + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 >= NGREGARG) + continue; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + break; + } + } +#else + for (i = j = 0; i < cif->nargs && j < 4; i++) + { + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + n = NGREGARG - greg; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + } +#endif + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags += (unsigned) (return_type (cif->rtype)) << 24; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += (unsigned) cif->rtype->type << 24; + break; + + default: + cif->flags += FFI_TYPE_INT << 24; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, + fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +#if defined(__SH4__) +extern void __ic_invalidate (void *line); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + unsigned int insn; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Set T bit if the function returns a struct pointed with R2. */ + insn = (return_type (cif->rtype) == FFI_TYPE_STRUCT + ? 0x0018 /* sett */ + : 0x0008 /* clrt */); + +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0xd301d102; + tramp[1] = 0x0000412b | (insn << 16); +#else + tramp[0] = 0xd102d301; + tramp[1] = 0x412b0000 | insn; +#endif + *(void **) &tramp[2] = (void *)codeloc; /* ctx */ + *(void **) &tramp[3] = (void *)ffi_closure_SYSV; /* funaddr */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#if defined(__SH4__) + /* Flush the icache. */ + __ic_invalidate(codeloc); +#endif + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +#ifdef __LITTLE_ENDIAN__ +#define OFS_INT8 0 +#define OFS_INT16 0 +#else +#define OFS_INT8 3 +#define OFS_INT16 2 +#endif + +int +ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue, + unsigned long *pgr, unsigned long *pfr, + unsigned long *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int ireg, greg = 0; +#if defined(__SH4__) + int freg = 0; +#endif + ffi_cif *cif; + + cif = closure->cif; + avalue = alloca(cif->nargs * sizeof(void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT && STRUCT_VALUE_ADDRESS_WITH_ARG) + { + rvalue = (void *) *pgr++; + ireg = 1; + } + else + ireg = 0; + + cif = closure->cif; + greg = ireg; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pgr) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pgr) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pgr; + break; + + default: + FFI_ASSERT(0); + } + pgr++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + avalue[i] = pfr; + pfr++; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + avalue[i] = pgr; + pgr++; + } + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + if (freg & 1) + pfr++; + freg = (freg + 1) & ~1; + freg += 2; + avalue[i] = pfr; + pfr += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + avalue[i] = pgr; + pgr += n; + } + } + + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pst) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pst) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pst; + break; + + default: + FFI_ASSERT(0); + } + pst++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + avalue[i] = pst; + pst++; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + avalue[i] = pst; + pst += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg += n; + pst += greg - NGREGARG; + continue; + } +#endif + avalue[i] = pst; + pst += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c new file mode 100644 index 0000000..9ec86bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c @@ -0,0 +1,717 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2002-2008, 2012 Kaz Kojima + Copyright (c) 2008 Red Hat, Inc. + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 4 +#if defined(__SH4__) +#define NFREGARG 8 +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +/* If the structure has essentially an unique element, return its type. */ +static int +simple_type (ffi_type *arg) +{ + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + else if (arg->elements[1]) + return FFI_TYPE_STRUCT; + + return simple_type (arg->elements[0]); +} + +static int +return_type (ffi_type *arg) +{ + unsigned short type; + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + type = simple_type (arg->elements[0]); + if (! arg->elements[1]) + { + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + return FFI_TYPE_INT; + + default: + return type; + } + } + + /* gcc uses r0/r1 pair for some kind of structures. */ + if (arg->size <= 2 * sizeof (int)) + { + int i = 0; + ffi_type *e; + + while ((e = arg->elements[i++])) + { + type = simple_type (e); + switch (type) + { + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + return FFI_TYPE_UINT64; + + default: + break; + } + } + } + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register int tmp; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + int greg, ireg; +#if defined(__SH4__) + int freg = 0; +#endif + + tmp = 0; + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + ireg = STRUCT_VALUE_ADDRESS_WITH_ARG ? 1 : 0; + } + else + ireg = 0; + + /* Set arguments for registers. */ + greg = ireg; + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + /* Set arguments on stack. */ + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg = NGREGARG; + continue; + } +#endif + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; +#if defined(__SH4__) + int freg = 0; +#endif + + cif->flags = 0; + + greg = ((return_type (cif->rtype) == FFI_TYPE_STRUCT) && + STRUCT_VALUE_ADDRESS_WITH_ARG) ? 1 : 0; + +#if defined(__SH4__) + for (i = j = 0; i < cif->nargs && j < 12; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + if (freg >= NFREGARG) + continue; + freg++; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + case FFI_TYPE_DOUBLE: + if ((freg + 1) >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + default: + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 >= NGREGARG) + continue; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + break; + } + } +#else + for (i = j = 0; i < cif->nargs && j < 4; i++) + { + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + n = NGREGARG - greg; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + } +#endif + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags += (unsigned) (return_type (cif->rtype)) << 24; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += (unsigned) cif->rtype->type << 24; + break; + + default: + cif->flags += FFI_TYPE_INT << 24; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, + fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +#if defined(__SH4__) +extern void __ic_invalidate (void *line); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + unsigned int insn; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Set T bit if the function returns a struct pointed with R2. */ + insn = (return_type (cif->rtype) == FFI_TYPE_STRUCT + ? 0x0018 /* sett */ + : 0x0008 /* clrt */); + +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0xd301d102; + tramp[1] = 0x0000412b | (insn << 16); +#else + tramp[0] = 0xd102d301; + tramp[1] = 0x412b0000 | insn; +#endif + *(void **) &tramp[2] = (void *)codeloc; /* ctx */ + *(void **) &tramp[3] = (void *)ffi_closure_SYSV; /* funaddr */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#if defined(__SH4__) + /* Flush the icache. */ + __ic_invalidate(codeloc); +#endif + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +#ifdef __LITTLE_ENDIAN__ +#define OFS_INT8 0 +#define OFS_INT16 0 +#else +#define OFS_INT8 3 +#define OFS_INT16 2 +#endif + +int +ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue, + unsigned long *pgr, unsigned long *pfr, + unsigned long *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int ireg, greg = 0; +#if defined(__SH4__) + int freg = 0; +#endif + ffi_cif *cif; + + cif = closure->cif; + avalue = alloca(cif->nargs * sizeof(void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT && STRUCT_VALUE_ADDRESS_WITH_ARG) + { + rvalue = (void *) *pgr++; + ireg = 1; + } + else + ireg = 0; + + cif = closure->cif; + greg = ireg; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pgr) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pgr) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pgr; + break; + + default: + FFI_ASSERT(0); + } + pgr++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + avalue[i] = pfr; + pfr++; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + avalue[i] = pgr; + pgr++; + } + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + if (freg & 1) + pfr++; + freg = (freg + 1) & ~1; + freg += 2; + avalue[i] = pfr; + pfr += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + avalue[i] = pgr; + pgr += n; + } + } + + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pst) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pst) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pst; + break; + + default: + FFI_ASSERT(0); + } + pst++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + avalue[i] = pst; + pst++; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + avalue[i] = pst; + pst += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg += n; + pst += greg - NGREGARG; + continue; + } +#endif + avalue[i] = pst; + pst += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget 2.h new file mode 100644 index 0000000..a36bf42 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget 2.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h new file mode 100644 index 0000000..a36bf42 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv 2.S new file mode 100644 index 0000000..5be7516 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv 2.S @@ -0,0 +1,850 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2002, 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +.text + + # r4: ffi_prep_args + # r5: &ecif + # r6: bytes + # r7: flags + # sp+0: rvalue + # sp+4: fn + + # This assumes we are using gas. +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + mov.l r8,@-r15 +.LCFI0: + mov.l r9,@-r15 +.LCFI1: + mov.l r10,@-r15 +.LCFI2: + mov.l r12,@-r15 +.LCFI3: + mov.l r14,@-r15 +.LCFI4: + sts.l pr,@-r15 +.LCFI5: + mov r15,r14 +.LCFI6: +#if defined(__SH4__) + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r1 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + mov #4,r3 + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r1,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_f + + mov r3,r0 + and #1,r0 + tst r0,r0 + bt 1f + add #1,r3 +1: + mov #12,r0 + cmp/hs r0,r3 + bt/s 3f + shlr2 r1 + bsr L_pop_d + nop +3: + add #2,r3 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r3,r0 + add r0,r0 + add r3,r0 + add #-12,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_f: + cmp/eq #FFI_TYPE_FLOAT,r0 + bf L_pass_i + + mov #12,r0 + cmp/hs r0,r3 + bt/s 2f + shlr2 r1 + bsr L_pop_f + nop +2: + add #1,r3 + bra L_pass + add #-4,r8 + +L_pop_f: + mov r3,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r1 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr1,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr0,@r1 +#else + fmov.s fr0,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr1,@r1 +#endif + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_f + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_f: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bf L_ret_i + + mov.l @(24,r14),r1 + bra L_epilogue + fmov.s fr0,@r1 + +L_ret_i: + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue + + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#else + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r3 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r3,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_i + + mov r15,r0 + and #7,r0 + tst r0,r0 + bt 1f + add #4,r15 +1: + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_d + nop +2: + add #2,r2 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r2,r0 + add r0,r0 + add r2,r0 + add #-12,r0 + add r0,r0 + braf r0 + nop + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + rts + mov.l @r15+,r7 + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_i + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_i: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bt 1f + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue +1: + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#endif +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + +.globl ffi_closure_helper_SYSV + +ENTRY(ffi_closure_SYSV) +.LFB2: + mov.l r7,@-r15 +.LCFI7: + mov.l r6,@-r15 +.LCFI8: + mov.l r5,@-r15 +.LCFI9: + mov.l r4,@-r15 +.LCFIA: + mov.l r14,@-r15 +.LCFIB: + sts.l pr,@-r15 + + /* Stack layout: + xx bytes (on stack parameters) + 16 bytes (register parameters) + 4 bytes (saved frame pointer) + 4 bytes (saved return address) + 32 bytes (floating register parameters, SH-4 only) + 8 bytes (result) + 4 bytes (pad) + 4 bytes (5th arg) + <- new stack pointer + */ +.LCFIC: +#if defined(__SH4__) + add #-48,r15 +#else + add #-16,r15 +#endif +.LCFID: + mov r15,r14 +.LCFIE: + +#if defined(__SH4__) + mov r14,r1 + add #48,r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr10,@-r1 + fmov.s fr11,@-r1 + fmov.s fr8,@-r1 + fmov.s fr9,@-r1 + fmov.s fr6,@-r1 + fmov.s fr7,@-r1 + fmov.s fr4,@-r1 + fmov.s fr5,@-r1 +#else + fmov.s fr11,@-r1 + fmov.s fr10,@-r1 + fmov.s fr9,@-r1 + fmov.s fr8,@-r1 + fmov.s fr7,@-r1 + fmov.s fr6,@-r1 + fmov.s fr5,@-r1 + fmov.s fr4,@-r1 +#endif + mov r1,r7 + mov r14,r6 + add #56,r6 +#else + mov r14,r6 + add #24,r6 +#endif + + bt/s 10f + mov r2, r5 + mov r14,r1 + add #8,r1 + mov r1,r5 +10: + + mov r14,r1 +#if defined(__SH4__) + add #72,r1 +#else + add #40,r1 +#endif + mov.l r1,@r14 + +#ifdef PIC + mov.l L_got,r1 + mova L_got,r0 + add r0,r1 + mov.l L_helper,r0 + add r1,r0 +#else + mov.l L_helper,r0 +#endif + jsr @r0 + mov r3,r4 + + shll r0 + mov r0,r1 + mova L_table,r0 + add r1,r0 + mov.w @r0,r0 + mov r14,r2 + braf r0 + add #8,r2 +0: + .align 2 +#ifdef PIC +L_got: + .long _GLOBAL_OFFSET_TABLE_ +L_helper: + .long ffi_closure_helper_SYSV@GOTOFF +#else +L_helper: + .long ffi_closure_helper_SYSV +#endif +L_table: + .short L_case_v - 0b /* FFI_TYPE_VOID */ + .short L_case_i - 0b /* FFI_TYPE_INT */ +#if defined(__SH4__) + .short L_case_f - 0b /* FFI_TYPE_FLOAT */ + .short L_case_d - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_d - 0b /* FFI_TYPE_LONGDOUBLE */ +#else + .short L_case_i - 0b /* FFI_TYPE_FLOAT */ + .short L_case_ll - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_ll - 0b /* FFI_TYPE_LONGDOUBLE */ +#endif + .short L_case_uq - 0b /* FFI_TYPE_UINT8 */ + .short L_case_q - 0b /* FFI_TYPE_SINT8 */ + .short L_case_uh - 0b /* FFI_TYPE_UINT16 */ + .short L_case_h - 0b /* FFI_TYPE_SINT16 */ + .short L_case_i - 0b /* FFI_TYPE_UINT32 */ + .short L_case_i - 0b /* FFI_TYPE_SINT32 */ + .short L_case_ll - 0b /* FFI_TYPE_UINT64 */ + .short L_case_ll - 0b /* FFI_TYPE_SINT64 */ + .short L_case_v - 0b /* FFI_TYPE_STRUCT */ + .short L_case_i - 0b /* FFI_TYPE_POINTER */ + +#if defined(__SH4__) +L_case_d: +#ifdef __LITTLE_ENDIAN__ + fmov.s @r2+,fr1 + bra L_case_v + fmov.s @r2,fr0 +#else + fmov.s @r2+,fr0 + bra L_case_v + fmov.s @r2,fr1 +#endif + +L_case_f: + bra L_case_v + fmov.s @r2,fr0 +#endif + +L_case_ll: + mov.l @r2+,r0 + bra L_case_v + mov.l @r2,r1 + +L_case_i: + bra L_case_v + mov.l @r2,r0 + +L_case_q: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + bra L_case_v + mov.b @r2,r0 + +L_case_uq: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + mov.b @r2,r0 + bra L_case_v + extu.b r0,r0 + +L_case_h: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + bra L_case_v + mov.w @r2,r0 + +L_case_uh: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + mov.w @r2,r0 + extu.w r0,r0 + /* fall through */ + +L_case_v: +#if defined(__SH4__) + add #48,r15 +#else + add #16,r15 +#endif + lds.l @r15+,pr + mov.l @r15+,r14 + rts + add #16,r15 +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte 0x7c /* sleb128 -4; CIE Data Alignment Factor */ + .byte 0x11 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0xf /* uleb128 0xf */ + .byte 0x0 /* uleb128 0x0 */ + .align 2 +.LECIE1: +.LSFDE1: + .4byte .LEFDE1-.LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte .LASFDE1-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte .LFE1-.LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI0-.LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI1-.LCFI0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI2-.LCFI1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI3-.LCFI2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI4-.LCFI3 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI5-.LCFI4 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x8a /* DW_CFA_offset, column 0xa */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x89 /* DW_CFA_offset, column 0x9 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x88 /* DW_CFA_offset, column 0x8 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI6-.LCFI5 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte .LEFDE3-.LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte .LASFDE3-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte .LFE2-.LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI7-.LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI8-.LCFI7 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI9-.LCFI8 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIA-.LCFI9 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIB-.LCFIA + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIC-.LCFIB + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFID-.LCFIC + .byte 0xe /* DW_CFA_def_cfa_offset */ +#if defined(__SH4__) + .byte 24+48 /* uleb128 24+48 */ +#else + .byte 24+16 /* uleb128 24+16 */ +#endif + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x84 /* DW_CFA_offset, column 0x4 */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x85 /* DW_CFA_offset, column 0x5 */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x86 /* DW_CFA_offset, column 0x6 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x87 /* DW_CFA_offset, column 0x7 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIE-.LCFID + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S new file mode 100644 index 0000000..5be7516 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S @@ -0,0 +1,850 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2002, 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +.text + + # r4: ffi_prep_args + # r5: &ecif + # r6: bytes + # r7: flags + # sp+0: rvalue + # sp+4: fn + + # This assumes we are using gas. +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + mov.l r8,@-r15 +.LCFI0: + mov.l r9,@-r15 +.LCFI1: + mov.l r10,@-r15 +.LCFI2: + mov.l r12,@-r15 +.LCFI3: + mov.l r14,@-r15 +.LCFI4: + sts.l pr,@-r15 +.LCFI5: + mov r15,r14 +.LCFI6: +#if defined(__SH4__) + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r1 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + mov #4,r3 + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r1,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_f + + mov r3,r0 + and #1,r0 + tst r0,r0 + bt 1f + add #1,r3 +1: + mov #12,r0 + cmp/hs r0,r3 + bt/s 3f + shlr2 r1 + bsr L_pop_d + nop +3: + add #2,r3 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r3,r0 + add r0,r0 + add r3,r0 + add #-12,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_f: + cmp/eq #FFI_TYPE_FLOAT,r0 + bf L_pass_i + + mov #12,r0 + cmp/hs r0,r3 + bt/s 2f + shlr2 r1 + bsr L_pop_f + nop +2: + add #1,r3 + bra L_pass + add #-4,r8 + +L_pop_f: + mov r3,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r1 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr1,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr0,@r1 +#else + fmov.s fr0,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr1,@r1 +#endif + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_f + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_f: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bf L_ret_i + + mov.l @(24,r14),r1 + bra L_epilogue + fmov.s fr0,@r1 + +L_ret_i: + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue + + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#else + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r3 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r3,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_i + + mov r15,r0 + and #7,r0 + tst r0,r0 + bt 1f + add #4,r15 +1: + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_d + nop +2: + add #2,r2 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r2,r0 + add r0,r0 + add r2,r0 + add #-12,r0 + add r0,r0 + braf r0 + nop + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + rts + mov.l @r15+,r7 + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_i + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_i: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bt 1f + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue +1: + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#endif +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + +.globl ffi_closure_helper_SYSV + +ENTRY(ffi_closure_SYSV) +.LFB2: + mov.l r7,@-r15 +.LCFI7: + mov.l r6,@-r15 +.LCFI8: + mov.l r5,@-r15 +.LCFI9: + mov.l r4,@-r15 +.LCFIA: + mov.l r14,@-r15 +.LCFIB: + sts.l pr,@-r15 + + /* Stack layout: + xx bytes (on stack parameters) + 16 bytes (register parameters) + 4 bytes (saved frame pointer) + 4 bytes (saved return address) + 32 bytes (floating register parameters, SH-4 only) + 8 bytes (result) + 4 bytes (pad) + 4 bytes (5th arg) + <- new stack pointer + */ +.LCFIC: +#if defined(__SH4__) + add #-48,r15 +#else + add #-16,r15 +#endif +.LCFID: + mov r15,r14 +.LCFIE: + +#if defined(__SH4__) + mov r14,r1 + add #48,r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr10,@-r1 + fmov.s fr11,@-r1 + fmov.s fr8,@-r1 + fmov.s fr9,@-r1 + fmov.s fr6,@-r1 + fmov.s fr7,@-r1 + fmov.s fr4,@-r1 + fmov.s fr5,@-r1 +#else + fmov.s fr11,@-r1 + fmov.s fr10,@-r1 + fmov.s fr9,@-r1 + fmov.s fr8,@-r1 + fmov.s fr7,@-r1 + fmov.s fr6,@-r1 + fmov.s fr5,@-r1 + fmov.s fr4,@-r1 +#endif + mov r1,r7 + mov r14,r6 + add #56,r6 +#else + mov r14,r6 + add #24,r6 +#endif + + bt/s 10f + mov r2, r5 + mov r14,r1 + add #8,r1 + mov r1,r5 +10: + + mov r14,r1 +#if defined(__SH4__) + add #72,r1 +#else + add #40,r1 +#endif + mov.l r1,@r14 + +#ifdef PIC + mov.l L_got,r1 + mova L_got,r0 + add r0,r1 + mov.l L_helper,r0 + add r1,r0 +#else + mov.l L_helper,r0 +#endif + jsr @r0 + mov r3,r4 + + shll r0 + mov r0,r1 + mova L_table,r0 + add r1,r0 + mov.w @r0,r0 + mov r14,r2 + braf r0 + add #8,r2 +0: + .align 2 +#ifdef PIC +L_got: + .long _GLOBAL_OFFSET_TABLE_ +L_helper: + .long ffi_closure_helper_SYSV@GOTOFF +#else +L_helper: + .long ffi_closure_helper_SYSV +#endif +L_table: + .short L_case_v - 0b /* FFI_TYPE_VOID */ + .short L_case_i - 0b /* FFI_TYPE_INT */ +#if defined(__SH4__) + .short L_case_f - 0b /* FFI_TYPE_FLOAT */ + .short L_case_d - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_d - 0b /* FFI_TYPE_LONGDOUBLE */ +#else + .short L_case_i - 0b /* FFI_TYPE_FLOAT */ + .short L_case_ll - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_ll - 0b /* FFI_TYPE_LONGDOUBLE */ +#endif + .short L_case_uq - 0b /* FFI_TYPE_UINT8 */ + .short L_case_q - 0b /* FFI_TYPE_SINT8 */ + .short L_case_uh - 0b /* FFI_TYPE_UINT16 */ + .short L_case_h - 0b /* FFI_TYPE_SINT16 */ + .short L_case_i - 0b /* FFI_TYPE_UINT32 */ + .short L_case_i - 0b /* FFI_TYPE_SINT32 */ + .short L_case_ll - 0b /* FFI_TYPE_UINT64 */ + .short L_case_ll - 0b /* FFI_TYPE_SINT64 */ + .short L_case_v - 0b /* FFI_TYPE_STRUCT */ + .short L_case_i - 0b /* FFI_TYPE_POINTER */ + +#if defined(__SH4__) +L_case_d: +#ifdef __LITTLE_ENDIAN__ + fmov.s @r2+,fr1 + bra L_case_v + fmov.s @r2,fr0 +#else + fmov.s @r2+,fr0 + bra L_case_v + fmov.s @r2,fr1 +#endif + +L_case_f: + bra L_case_v + fmov.s @r2,fr0 +#endif + +L_case_ll: + mov.l @r2+,r0 + bra L_case_v + mov.l @r2,r1 + +L_case_i: + bra L_case_v + mov.l @r2,r0 + +L_case_q: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + bra L_case_v + mov.b @r2,r0 + +L_case_uq: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + mov.b @r2,r0 + bra L_case_v + extu.b r0,r0 + +L_case_h: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + bra L_case_v + mov.w @r2,r0 + +L_case_uh: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + mov.w @r2,r0 + extu.w r0,r0 + /* fall through */ + +L_case_v: +#if defined(__SH4__) + add #48,r15 +#else + add #16,r15 +#endif + lds.l @r15+,pr + mov.l @r15+,r14 + rts + add #16,r15 +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte 0x7c /* sleb128 -4; CIE Data Alignment Factor */ + .byte 0x11 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0xf /* uleb128 0xf */ + .byte 0x0 /* uleb128 0x0 */ + .align 2 +.LECIE1: +.LSFDE1: + .4byte .LEFDE1-.LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte .LASFDE1-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte .LFE1-.LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI0-.LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI1-.LCFI0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI2-.LCFI1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI3-.LCFI2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI4-.LCFI3 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI5-.LCFI4 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x8a /* DW_CFA_offset, column 0xa */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x89 /* DW_CFA_offset, column 0x9 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x88 /* DW_CFA_offset, column 0x8 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI6-.LCFI5 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte .LEFDE3-.LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte .LASFDE3-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte .LFE2-.LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI7-.LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI8-.LCFI7 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI9-.LCFI8 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIA-.LCFI9 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIB-.LCFIA + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIC-.LCFIB + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFID-.LCFIC + .byte 0xe /* DW_CFA_def_cfa_offset */ +#if defined(__SH4__) + .byte 24+48 /* uleb128 24+48 */ +#else + .byte 24+16 /* uleb128 24+16 */ +#endif + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x84 /* DW_CFA_offset, column 0x4 */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x85 /* DW_CFA_offset, column 0x5 */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x86 /* DW_CFA_offset, column 0x6 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x87 /* DW_CFA_offset, column 0x7 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIE-.LCFID + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi 2.c new file mode 100644 index 0000000..123b87a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi 2.c @@ -0,0 +1,469 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima + Copyright (c) 2008 Anthony Green + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 8 +#define NFREGARG 12 + +static int +return_type (ffi_type *arg) +{ + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + /* gcc uses r2 if the result can be packed in on register. */ + if (arg->size <= sizeof (UINT8)) + return FFI_TYPE_UINT8; + else if (arg->size <= sizeof (UINT16)) + return FFI_TYPE_UINT16; + else if (arg->size <= sizeof (UINT32)) + return FFI_TYPE_UINT32; + else if (arg->size <= sizeof (UINT64)) + return FFI_TYPE_UINT64; + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += sizeof (UINT64); + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + int align; + + z = (*p_arg)->size; + align = (*p_arg)->alignment; + if (z < sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(SINT64 *) argp = (SINT64) *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT64 *) argp = (UINT64) *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT64 *) argp = (SINT64) *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT64 *) argp = (UINT64) *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT(0); + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT32) && align == sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *(SINT64 *) argp = (SINT64) *(SINT32 *) (*p_argv); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_STRUCT: + *(UINT64 *) argp = (UINT64) *(UINT32 *) (*p_argv); + break; + + default: + FFI_ASSERT(0); + break; + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT64) + && align == sizeof (UINT64) + && ((int) *p_argv & (sizeof (UINT64) - 1)) == 0) + { + *(UINT64 *) argp = *(UINT64 *) (*p_argv); + argp += sizeof (UINT64); + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + memcpy (argp, *p_argv, z); + argp += n * sizeof (UINT64); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; + int freg; + int fpair = -1; + + greg = (return_type (cif->rtype) == FFI_TYPE_STRUCT ? 1 : 0); + freg = 0; + cif->flags2 = 0; + + for (i = j = 0; i < cif->nargs; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + greg++; + cif->bytes += sizeof (UINT64) - sizeof (float); + if (freg >= NFREGARG - 1) + continue; + if (fpair < 0) + { + fpair = freg; + freg += 2; + } + else + fpair = -1; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + break; + + case FFI_TYPE_DOUBLE: + if (greg++ >= NGREGARG && (freg + 1) >= NFREGARG) + continue; + if ((freg + 1) < NFREGARG) + { + freg += 2; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + } + else + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + + default: + size = (cif->arg_types)[i]->size; + if (size < sizeof (UINT64)) + cif->bytes += sizeof (UINT64) - size; + n = (size + sizeof (UINT64) - 1) / sizeof (UINT64); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + greg = NGREGARG; + else + greg += n; + for (m = 0; m < n; m++) + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + } + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags = return_type (cif->rtype); + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +/*@-declundef@*/ +/*@-exportheader@*/ +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), + /*@out@*/ extended_cif *, + unsigned, unsigned, long long, + /*@out@*/ unsigned *, + void (*fn)(void)); +/*@=declundef@*/ +/*@=exportheader@*/ + +void ffi_call(/*@dependent@*/ ffi_cif *cif, + void (*fn)(void), + /*@out@*/ void *rvalue, + /*@dependent@*/ void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, cif->flags2, + ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +extern void __ic_invalidate (void *line); + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Since ffi_closure is an aligned object, the ffi trampoline is + called as an SHcompact code. Sigh. + SHcompact part: + mova @(1,pc),r0; add #1,r0; jmp @r0; nop; + SHmedia part: + movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0 + movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */ +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0x7001c701; + tramp[1] = 0x0009402b; +#else + tramp[0] = 0xc7017001; + tramp[1] = 0x402b0009; +#endif + tramp[2] = 0xcc000010 | (((UINT32) ffi_closure_SYSV) >> 16) << 10; + tramp[3] = 0xc8000010 | (((UINT32) ffi_closure_SYSV) & 0xffff) << 10; + tramp[4] = 0x6bf10600; + tramp[5] = 0xcc000010 | (((UINT32) codeloc) >> 16) << 10; + tramp[6] = 0xc8000010 | (((UINT32) codeloc) & 0xffff) << 10; + tramp[7] = 0x4401fff0; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. */ + asm volatile ("ocbwb %0,0; synco; icbi %1,0; synci" : : "r" (tramp), + "r"(codeloc)); + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +int +ffi_closure_helper_SYSV (ffi_closure *closure, UINT64 *rvalue, + UINT64 *pgr, UINT64 *pfr, UINT64 *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int greg, freg; + ffi_cif *cif; + int fpair = -1; + + cif = closure->cif; + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (return_type (cif->rtype) == FFI_TYPE_STRUCT) + { + rvalue = (UINT64 *) *pgr; + greg = 1; + } + else + greg = 0; + + freg = 0; + cif = closure->cif; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + void *p; + + z = (*p_arg)->size; + if (z < sizeof (UINT32)) + { + p = pgr + greg++; + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_STRUCT: +#ifdef __LITTLE_ENDIAN__ + avalue[i] = p; +#else + avalue[i] = ((char *) p) + sizeof (UINT32) - z; +#endif + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (UINT32)) + { + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg < NFREGARG - 1) + { + if (fpair >= 0) + { + avalue[i] = (UINT32 *) pfr + fpair; + fpair = -1; + } + else + { +#ifdef __LITTLE_ENDIAN__ + fpair = freg; + avalue[i] = (UINT32 *) pfr + (1 ^ freg); +#else + fpair = 1 ^ freg; + avalue[i] = (UINT32 *) pfr + freg; +#endif + freg += 2; + } + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + greg++; + } + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + avalue[i] = pgr + greg; + else + { + avalue[i] = pfr + (freg >> 1); + freg += 2; + } + greg++; + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + avalue[i] = pgr + greg; + greg += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c new file mode 100644 index 0000000..123b87a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c @@ -0,0 +1,469 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima + Copyright (c) 2008 Anthony Green + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 8 +#define NFREGARG 12 + +static int +return_type (ffi_type *arg) +{ + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + /* gcc uses r2 if the result can be packed in on register. */ + if (arg->size <= sizeof (UINT8)) + return FFI_TYPE_UINT8; + else if (arg->size <= sizeof (UINT16)) + return FFI_TYPE_UINT16; + else if (arg->size <= sizeof (UINT32)) + return FFI_TYPE_UINT32; + else if (arg->size <= sizeof (UINT64)) + return FFI_TYPE_UINT64; + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += sizeof (UINT64); + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + int align; + + z = (*p_arg)->size; + align = (*p_arg)->alignment; + if (z < sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(SINT64 *) argp = (SINT64) *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT64 *) argp = (UINT64) *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT64 *) argp = (SINT64) *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT64 *) argp = (UINT64) *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT(0); + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT32) && align == sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *(SINT64 *) argp = (SINT64) *(SINT32 *) (*p_argv); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_STRUCT: + *(UINT64 *) argp = (UINT64) *(UINT32 *) (*p_argv); + break; + + default: + FFI_ASSERT(0); + break; + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT64) + && align == sizeof (UINT64) + && ((int) *p_argv & (sizeof (UINT64) - 1)) == 0) + { + *(UINT64 *) argp = *(UINT64 *) (*p_argv); + argp += sizeof (UINT64); + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + memcpy (argp, *p_argv, z); + argp += n * sizeof (UINT64); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; + int freg; + int fpair = -1; + + greg = (return_type (cif->rtype) == FFI_TYPE_STRUCT ? 1 : 0); + freg = 0; + cif->flags2 = 0; + + for (i = j = 0; i < cif->nargs; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + greg++; + cif->bytes += sizeof (UINT64) - sizeof (float); + if (freg >= NFREGARG - 1) + continue; + if (fpair < 0) + { + fpair = freg; + freg += 2; + } + else + fpair = -1; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + break; + + case FFI_TYPE_DOUBLE: + if (greg++ >= NGREGARG && (freg + 1) >= NFREGARG) + continue; + if ((freg + 1) < NFREGARG) + { + freg += 2; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + } + else + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + + default: + size = (cif->arg_types)[i]->size; + if (size < sizeof (UINT64)) + cif->bytes += sizeof (UINT64) - size; + n = (size + sizeof (UINT64) - 1) / sizeof (UINT64); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + greg = NGREGARG; + else + greg += n; + for (m = 0; m < n; m++) + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + } + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags = return_type (cif->rtype); + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +/*@-declundef@*/ +/*@-exportheader@*/ +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), + /*@out@*/ extended_cif *, + unsigned, unsigned, long long, + /*@out@*/ unsigned *, + void (*fn)(void)); +/*@=declundef@*/ +/*@=exportheader@*/ + +void ffi_call(/*@dependent@*/ ffi_cif *cif, + void (*fn)(void), + /*@out@*/ void *rvalue, + /*@dependent@*/ void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, cif->flags2, + ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +extern void __ic_invalidate (void *line); + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Since ffi_closure is an aligned object, the ffi trampoline is + called as an SHcompact code. Sigh. + SHcompact part: + mova @(1,pc),r0; add #1,r0; jmp @r0; nop; + SHmedia part: + movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0 + movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */ +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0x7001c701; + tramp[1] = 0x0009402b; +#else + tramp[0] = 0xc7017001; + tramp[1] = 0x402b0009; +#endif + tramp[2] = 0xcc000010 | (((UINT32) ffi_closure_SYSV) >> 16) << 10; + tramp[3] = 0xc8000010 | (((UINT32) ffi_closure_SYSV) & 0xffff) << 10; + tramp[4] = 0x6bf10600; + tramp[5] = 0xcc000010 | (((UINT32) codeloc) >> 16) << 10; + tramp[6] = 0xc8000010 | (((UINT32) codeloc) & 0xffff) << 10; + tramp[7] = 0x4401fff0; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. */ + asm volatile ("ocbwb %0,0; synco; icbi %1,0; synci" : : "r" (tramp), + "r"(codeloc)); + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +int +ffi_closure_helper_SYSV (ffi_closure *closure, UINT64 *rvalue, + UINT64 *pgr, UINT64 *pfr, UINT64 *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int greg, freg; + ffi_cif *cif; + int fpair = -1; + + cif = closure->cif; + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (return_type (cif->rtype) == FFI_TYPE_STRUCT) + { + rvalue = (UINT64 *) *pgr; + greg = 1; + } + else + greg = 0; + + freg = 0; + cif = closure->cif; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + void *p; + + z = (*p_arg)->size; + if (z < sizeof (UINT32)) + { + p = pgr + greg++; + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_STRUCT: +#ifdef __LITTLE_ENDIAN__ + avalue[i] = p; +#else + avalue[i] = ((char *) p) + sizeof (UINT32) - z; +#endif + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (UINT32)) + { + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg < NFREGARG - 1) + { + if (fpair >= 0) + { + avalue[i] = (UINT32 *) pfr + fpair; + fpair = -1; + } + else + { +#ifdef __LITTLE_ENDIAN__ + fpair = freg; + avalue[i] = (UINT32 *) pfr + (1 ^ freg); +#else + fpair = 1 ^ freg; + avalue[i] = (UINT32 *) pfr + freg; +#endif + freg += 2; + } + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + greg++; + } + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + avalue[i] = pgr + greg; + else + { + avalue[i] = pfr + (freg >> 1); + freg += 2; + } + greg++; + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + avalue[i] = pgr + greg; + greg += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget 2.h new file mode 100644 index 0000000..08a6fe9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget 2.h @@ -0,0 +1,58 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH - SHmedia. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS long long flags2 +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 32 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h new file mode 100644 index 0000000..08a6fe9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h @@ -0,0 +1,58 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH - SHmedia. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS long long flags2 +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 32 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv 2.S new file mode 100644 index 0000000..c4587d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv 2.S @@ -0,0 +1,539 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#ifdef __LITTLE_ENDIAN__ +#define OFS_FLT 0 +#else +#define OFS_FLT 4 +#endif + + .section .text..SHmedia32,"ax" + + # r2: ffi_prep_args + # r3: &ecif + # r4: bytes + # r5: flags + # r6: flags2 + # r7: rvalue + # r8: fn + + # This assumes we are using gas. + .align 5 +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + addi.l r15, -48, r15 +.LCFI0: + st.q r15, 40, r32 + st.q r15, 32, r31 + st.q r15, 24, r30 + st.q r15, 16, r29 + st.q r15, 8, r28 + st.l r15, 4, r18 + st.l r15, 0, r14 +.LCFI1: + add.l r15, r63, r14 +.LCFI2: +# add r4, r63, r28 + add r5, r63, r29 + add r6, r63, r30 + add r7, r63, r31 + add r8, r63, r32 + + addi r4, (64 + 7), r4 + andi r4, ~7, r4 + sub.l r15, r4, r15 + + ptabs/l r2, tr0 + add r15, r63, r2 + blink tr0, r18 + + addi r15, 64, r22 + movi 0, r0 + movi 0, r1 + movi -1, r23 + + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + ld.l r15, 0, r19 + addi r15, 8, r15 + addi r0, 1, r0 +1: + +.L_pass: + andi r30, 3, r20 + shlri r30, 2, r30 + + pt/l .L_call_it, tr0 + pt/l .L_pass_i, tr1 + pt/l .L_pass_f, tr2 + + beqi/l r20, FFI_TYPE_VOID, tr0 + beqi/l r20, FFI_TYPE_INT, tr1 + beqi/l r20, FFI_TYPE_FLOAT, tr2 + +.L_pass_d: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_d, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r1, 2, r1 + blink tr0, r63 + +.L_pop_d: + pt/l .L_pop_d_tbl, tr1 + gettr tr1, r20 + shlli r1, 2, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_d_tbl: + fld.d r15, 0, dr0 + blink tr0, r63 + fld.d r15, 0, dr2 + blink tr0, r63 + fld.d r15, 0, dr4 + blink tr0, r63 + fld.d r15, 0, dr6 + blink tr0, r63 + fld.d r15, 0, dr8 + blink tr0, r63 + fld.d r15, 0, dr10 + blink tr0, r63 + +.L_pass_f: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_f, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + blink tr0, r63 + +.L_pop_f: + pt/l .L_pop_f_tbl, tr1 + pt/l 5f, tr2 + gettr tr1, r20 + bge/l r23, r63, tr2 + add r1, r63, r23 + shlli r1, 3, r21 + addi r1, 2, r1 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 +5: + addi r23, 1, r21 + movi -1, r23 + shlli r21, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_f_tbl: + fld.s r15, OFS_FLT, fr0 + blink tr0, r63 + fld.s r15, OFS_FLT, fr1 + blink tr0, r63 + fld.s r15, OFS_FLT, fr2 + blink tr0, r63 + fld.s r15, OFS_FLT, fr3 + blink tr0, r63 + fld.s r15, OFS_FLT, fr4 + blink tr0, r63 + fld.s r15, OFS_FLT, fr5 + blink tr0, r63 + fld.s r15, OFS_FLT, fr6 + blink tr0, r63 + fld.s r15, OFS_FLT, fr7 + blink tr0, r63 + fld.s r15, OFS_FLT, fr8 + blink tr0, r63 + fld.s r15, OFS_FLT, fr9 + blink tr0, r63 + fld.s r15, OFS_FLT, fr10 + blink tr0, r63 + fld.s r15, OFS_FLT, fr11 + blink tr0, r63 + +.L_pass_i: + pt/l 3f, tr0 + movi 8, r20 + bge/l r0, r20, tr0 + + pt/l .L_pop_i, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r0, 1, r0 + blink tr0, r63 + +.L_pop_i: + pt/l .L_pop_i_tbl, tr1 + gettr tr1, r20 + shlli r0, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_i_tbl: + ld.q r15, 0, r2 + blink tr0, r63 + ld.q r15, 0, r3 + blink tr0, r63 + ld.q r15, 0, r4 + blink tr0, r63 + ld.q r15, 0, r5 + blink tr0, r63 + ld.q r15, 0, r6 + blink tr0, r63 + ld.q r15, 0, r7 + blink tr0, r63 + ld.q r15, 0, r8 + blink tr0, r63 + ld.q r15, 0, r9 + blink tr0, r63 + +.L_call_it: + # call function + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + add r19, r63, r2 +1: + add r22, r63, r15 + ptabs/l r32, tr0 + blink tr0, r18 + + pt/l .L_ret_i, tr0 + pt/l .L_ret_ll, tr1 + pt/l .L_ret_d, tr2 + pt/l .L_ret_f, tr3 + pt/l .L_epilogue, tr4 + + beqi/l r29, FFI_TYPE_INT, tr0 + beqi/l r29, FFI_TYPE_UINT32, tr0 + beqi/l r29, FFI_TYPE_SINT64, tr1 + beqi/l r29, FFI_TYPE_UINT64, tr1 + beqi/l r29, FFI_TYPE_DOUBLE, tr2 + beqi/l r29, FFI_TYPE_FLOAT, tr3 + + pt/l .L_ret_q, tr0 + pt/l .L_ret_h, tr1 + + beqi/l r29, FFI_TYPE_UINT8, tr0 + beqi/l r29, FFI_TYPE_UINT16, tr1 + blink tr4, r63 + +.L_ret_d: + fst.d r31, 0, dr0 + blink tr4, r63 + +.L_ret_ll: + st.q r31, 0, r2 + blink tr4, r63 + +.L_ret_f: + fst.s r31, OFS_FLT, fr0 + blink tr4, r63 + +.L_ret_q: + st.b r31, 0, r2 + blink tr4, r63 + +.L_ret_h: + st.w r31, 0, r2 + blink tr4, r63 + +.L_ret_i: + st.l r31, 0, r2 + # Fall + +.L_epilogue: + # Remove the space we pushed for the args + add r14, r63, r15 + + ld.l r15, 0, r14 + ld.l r15, 4, r18 + ld.q r15, 8, r28 + ld.q r15, 16, r29 + ld.q r15, 24, r30 + ld.q r15, 32, r31 + ld.q r15, 40, r32 + addi.l r15, 48, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + .align 5 +ENTRY(ffi_closure_SYSV) +.LFB2: + addi.l r15, -136, r15 +.LCFI3: + st.l r15, 12, r18 + st.l r15, 8, r14 + st.l r15, 4, r12 +.LCFI4: + add r15, r63, r14 +.LCFI5: + /* Stack layout: + ... + 64 bytes (register parameters) + 48 bytes (floating register parameters) + 8 bytes (result) + 4 bytes (r18) + 4 bytes (r14) + 4 bytes (r12) + 4 bytes (for align) + <- new stack pointer + */ + fst.d r14, 24, dr0 + fst.d r14, 32, dr2 + fst.d r14, 40, dr4 + fst.d r14, 48, dr6 + fst.d r14, 56, dr8 + fst.d r14, 64, dr10 + st.q r14, 72, r2 + st.q r14, 80, r3 + st.q r14, 88, r4 + st.q r14, 96, r5 + st.q r14, 104, r6 + st.q r14, 112, r7 + st.q r14, 120, r8 + st.q r14, 128, r9 + + add r1, r63, r2 + addi r14, 16, r3 + addi r14, 72, r4 + addi r14, 24, r5 + addi r14, 136, r6 +#ifdef PIC + movi (((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) >> 16) & 65535), r12 + shori ((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) & 65535), r12 +.LPCS0: ptrel/u r12, tr0 + movi ((ffi_closure_helper_SYSV@GOTPLT) & 65535), r1 + gettr tr0, r12 + ldx.l r1, r12, r1 + ptabs r1, tr0 +#else + pt/l ffi_closure_helper_SYSV, tr0 +#endif + blink tr0, r18 + + shlli r2, 1, r1 + movi (((datalabel .L_table) >> 16) & 65535), r2 + shori ((datalabel .L_table) & 65535), r2 + ldx.w r2, r1, r1 + add r1, r2, r1 + pt/l .L_case_v, tr1 + ptabs r1, tr0 + blink tr0, r63 + + .align 2 +.L_table: + .word .L_case_v - datalabel .L_table /* FFI_TYPE_VOID */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_INT */ + .word .L_case_f - datalabel .L_table /* FFI_TYPE_FLOAT */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_DOUBLE */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_LONGDOUBLE */ + .word .L_case_uq - datalabel .L_table /* FFI_TYPE_UINT8 */ + .word .L_case_q - datalabel .L_table /* FFI_TYPE_SINT8 */ + .word .L_case_uh - datalabel .L_table /* FFI_TYPE_UINT16 */ + .word .L_case_h - datalabel .L_table /* FFI_TYPE_SINT16 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_UINT32 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_SINT32 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_UINT64 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_SINT64 */ + .word .L_case_v - datalabel .L_table /* FFI_TYPE_STRUCT */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_POINTER */ + + .align 2 +.L_case_d: + fld.d r14, 16, dr0 + blink tr1, r63 +.L_case_f: + fld.s r14, 16, fr0 + blink tr1, r63 +.L_case_ll: + ld.q r14, 16, r2 + blink tr1, r63 +.L_case_i: + ld.l r14, 16, r2 + blink tr1, r63 +.L_case_q: + ld.b r14, 16, r2 + blink tr1, r63 +.L_case_uq: + ld.ub r14, 16, r2 + blink tr1, r63 +.L_case_h: + ld.w r14, 16, r2 + blink tr1, r63 +.L_case_uh: + ld.uw r14, 16, r2 + blink tr1, r63 +.L_case_v: + add.l r14, r63, r15 + ld.l r15, 4, r12 + ld.l r15, 8, r14 + ld.l r15, 12, r18 + addi.l r15, 136, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .uleb128 0x1 /* CIE Code Alignment Factor */ + .sleb128 -4 /* CIE Data Alignment Factor */ + .byte 0x12 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .uleb128 0xf + .uleb128 0x0 + .align 2 +.LECIE1: +.LSFDE1: + .4byte datalabel .LEFDE1-datalabel .LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte datalabel .LASFDE1-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte datalabel .LFE1-datalabel .LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI0-datalabel .LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x30 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI1-datalabel .LCFI0 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0xc + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0xb + .byte 0x9c /* DW_CFA_offset, column 0x1c */ + .uleb128 0xa + .byte 0x9d /* DW_CFA_offset, column 0x1d */ + .uleb128 0x8 + .byte 0x9e /* DW_CFA_offset, column 0x1e */ + .uleb128 0x6 + .byte 0x9f /* DW_CFA_offset, column 0x1f */ + .uleb128 0x4 + .byte 0xa0 /* DW_CFA_offset, column 0x20 */ + .uleb128 0x2 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI2-datalabel .LCFI1 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte datalabel .LEFDE3-datalabel .LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte datalabel .LASFDE3-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte datalabel .LFE2-datalabel .LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI3-datalabel .LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x88 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI4-datalabel .LCFI3 + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .uleb128 0x21 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0x20 + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0x1f + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI5-datalabel .LCFI4 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S new file mode 100644 index 0000000..c4587d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S @@ -0,0 +1,539 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#ifdef __LITTLE_ENDIAN__ +#define OFS_FLT 0 +#else +#define OFS_FLT 4 +#endif + + .section .text..SHmedia32,"ax" + + # r2: ffi_prep_args + # r3: &ecif + # r4: bytes + # r5: flags + # r6: flags2 + # r7: rvalue + # r8: fn + + # This assumes we are using gas. + .align 5 +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + addi.l r15, -48, r15 +.LCFI0: + st.q r15, 40, r32 + st.q r15, 32, r31 + st.q r15, 24, r30 + st.q r15, 16, r29 + st.q r15, 8, r28 + st.l r15, 4, r18 + st.l r15, 0, r14 +.LCFI1: + add.l r15, r63, r14 +.LCFI2: +# add r4, r63, r28 + add r5, r63, r29 + add r6, r63, r30 + add r7, r63, r31 + add r8, r63, r32 + + addi r4, (64 + 7), r4 + andi r4, ~7, r4 + sub.l r15, r4, r15 + + ptabs/l r2, tr0 + add r15, r63, r2 + blink tr0, r18 + + addi r15, 64, r22 + movi 0, r0 + movi 0, r1 + movi -1, r23 + + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + ld.l r15, 0, r19 + addi r15, 8, r15 + addi r0, 1, r0 +1: + +.L_pass: + andi r30, 3, r20 + shlri r30, 2, r30 + + pt/l .L_call_it, tr0 + pt/l .L_pass_i, tr1 + pt/l .L_pass_f, tr2 + + beqi/l r20, FFI_TYPE_VOID, tr0 + beqi/l r20, FFI_TYPE_INT, tr1 + beqi/l r20, FFI_TYPE_FLOAT, tr2 + +.L_pass_d: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_d, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r1, 2, r1 + blink tr0, r63 + +.L_pop_d: + pt/l .L_pop_d_tbl, tr1 + gettr tr1, r20 + shlli r1, 2, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_d_tbl: + fld.d r15, 0, dr0 + blink tr0, r63 + fld.d r15, 0, dr2 + blink tr0, r63 + fld.d r15, 0, dr4 + blink tr0, r63 + fld.d r15, 0, dr6 + blink tr0, r63 + fld.d r15, 0, dr8 + blink tr0, r63 + fld.d r15, 0, dr10 + blink tr0, r63 + +.L_pass_f: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_f, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + blink tr0, r63 + +.L_pop_f: + pt/l .L_pop_f_tbl, tr1 + pt/l 5f, tr2 + gettr tr1, r20 + bge/l r23, r63, tr2 + add r1, r63, r23 + shlli r1, 3, r21 + addi r1, 2, r1 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 +5: + addi r23, 1, r21 + movi -1, r23 + shlli r21, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_f_tbl: + fld.s r15, OFS_FLT, fr0 + blink tr0, r63 + fld.s r15, OFS_FLT, fr1 + blink tr0, r63 + fld.s r15, OFS_FLT, fr2 + blink tr0, r63 + fld.s r15, OFS_FLT, fr3 + blink tr0, r63 + fld.s r15, OFS_FLT, fr4 + blink tr0, r63 + fld.s r15, OFS_FLT, fr5 + blink tr0, r63 + fld.s r15, OFS_FLT, fr6 + blink tr0, r63 + fld.s r15, OFS_FLT, fr7 + blink tr0, r63 + fld.s r15, OFS_FLT, fr8 + blink tr0, r63 + fld.s r15, OFS_FLT, fr9 + blink tr0, r63 + fld.s r15, OFS_FLT, fr10 + blink tr0, r63 + fld.s r15, OFS_FLT, fr11 + blink tr0, r63 + +.L_pass_i: + pt/l 3f, tr0 + movi 8, r20 + bge/l r0, r20, tr0 + + pt/l .L_pop_i, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r0, 1, r0 + blink tr0, r63 + +.L_pop_i: + pt/l .L_pop_i_tbl, tr1 + gettr tr1, r20 + shlli r0, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_i_tbl: + ld.q r15, 0, r2 + blink tr0, r63 + ld.q r15, 0, r3 + blink tr0, r63 + ld.q r15, 0, r4 + blink tr0, r63 + ld.q r15, 0, r5 + blink tr0, r63 + ld.q r15, 0, r6 + blink tr0, r63 + ld.q r15, 0, r7 + blink tr0, r63 + ld.q r15, 0, r8 + blink tr0, r63 + ld.q r15, 0, r9 + blink tr0, r63 + +.L_call_it: + # call function + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + add r19, r63, r2 +1: + add r22, r63, r15 + ptabs/l r32, tr0 + blink tr0, r18 + + pt/l .L_ret_i, tr0 + pt/l .L_ret_ll, tr1 + pt/l .L_ret_d, tr2 + pt/l .L_ret_f, tr3 + pt/l .L_epilogue, tr4 + + beqi/l r29, FFI_TYPE_INT, tr0 + beqi/l r29, FFI_TYPE_UINT32, tr0 + beqi/l r29, FFI_TYPE_SINT64, tr1 + beqi/l r29, FFI_TYPE_UINT64, tr1 + beqi/l r29, FFI_TYPE_DOUBLE, tr2 + beqi/l r29, FFI_TYPE_FLOAT, tr3 + + pt/l .L_ret_q, tr0 + pt/l .L_ret_h, tr1 + + beqi/l r29, FFI_TYPE_UINT8, tr0 + beqi/l r29, FFI_TYPE_UINT16, tr1 + blink tr4, r63 + +.L_ret_d: + fst.d r31, 0, dr0 + blink tr4, r63 + +.L_ret_ll: + st.q r31, 0, r2 + blink tr4, r63 + +.L_ret_f: + fst.s r31, OFS_FLT, fr0 + blink tr4, r63 + +.L_ret_q: + st.b r31, 0, r2 + blink tr4, r63 + +.L_ret_h: + st.w r31, 0, r2 + blink tr4, r63 + +.L_ret_i: + st.l r31, 0, r2 + # Fall + +.L_epilogue: + # Remove the space we pushed for the args + add r14, r63, r15 + + ld.l r15, 0, r14 + ld.l r15, 4, r18 + ld.q r15, 8, r28 + ld.q r15, 16, r29 + ld.q r15, 24, r30 + ld.q r15, 32, r31 + ld.q r15, 40, r32 + addi.l r15, 48, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + .align 5 +ENTRY(ffi_closure_SYSV) +.LFB2: + addi.l r15, -136, r15 +.LCFI3: + st.l r15, 12, r18 + st.l r15, 8, r14 + st.l r15, 4, r12 +.LCFI4: + add r15, r63, r14 +.LCFI5: + /* Stack layout: + ... + 64 bytes (register parameters) + 48 bytes (floating register parameters) + 8 bytes (result) + 4 bytes (r18) + 4 bytes (r14) + 4 bytes (r12) + 4 bytes (for align) + <- new stack pointer + */ + fst.d r14, 24, dr0 + fst.d r14, 32, dr2 + fst.d r14, 40, dr4 + fst.d r14, 48, dr6 + fst.d r14, 56, dr8 + fst.d r14, 64, dr10 + st.q r14, 72, r2 + st.q r14, 80, r3 + st.q r14, 88, r4 + st.q r14, 96, r5 + st.q r14, 104, r6 + st.q r14, 112, r7 + st.q r14, 120, r8 + st.q r14, 128, r9 + + add r1, r63, r2 + addi r14, 16, r3 + addi r14, 72, r4 + addi r14, 24, r5 + addi r14, 136, r6 +#ifdef PIC + movi (((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) >> 16) & 65535), r12 + shori ((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) & 65535), r12 +.LPCS0: ptrel/u r12, tr0 + movi ((ffi_closure_helper_SYSV@GOTPLT) & 65535), r1 + gettr tr0, r12 + ldx.l r1, r12, r1 + ptabs r1, tr0 +#else + pt/l ffi_closure_helper_SYSV, tr0 +#endif + blink tr0, r18 + + shlli r2, 1, r1 + movi (((datalabel .L_table) >> 16) & 65535), r2 + shori ((datalabel .L_table) & 65535), r2 + ldx.w r2, r1, r1 + add r1, r2, r1 + pt/l .L_case_v, tr1 + ptabs r1, tr0 + blink tr0, r63 + + .align 2 +.L_table: + .word .L_case_v - datalabel .L_table /* FFI_TYPE_VOID */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_INT */ + .word .L_case_f - datalabel .L_table /* FFI_TYPE_FLOAT */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_DOUBLE */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_LONGDOUBLE */ + .word .L_case_uq - datalabel .L_table /* FFI_TYPE_UINT8 */ + .word .L_case_q - datalabel .L_table /* FFI_TYPE_SINT8 */ + .word .L_case_uh - datalabel .L_table /* FFI_TYPE_UINT16 */ + .word .L_case_h - datalabel .L_table /* FFI_TYPE_SINT16 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_UINT32 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_SINT32 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_UINT64 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_SINT64 */ + .word .L_case_v - datalabel .L_table /* FFI_TYPE_STRUCT */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_POINTER */ + + .align 2 +.L_case_d: + fld.d r14, 16, dr0 + blink tr1, r63 +.L_case_f: + fld.s r14, 16, fr0 + blink tr1, r63 +.L_case_ll: + ld.q r14, 16, r2 + blink tr1, r63 +.L_case_i: + ld.l r14, 16, r2 + blink tr1, r63 +.L_case_q: + ld.b r14, 16, r2 + blink tr1, r63 +.L_case_uq: + ld.ub r14, 16, r2 + blink tr1, r63 +.L_case_h: + ld.w r14, 16, r2 + blink tr1, r63 +.L_case_uh: + ld.uw r14, 16, r2 + blink tr1, r63 +.L_case_v: + add.l r14, r63, r15 + ld.l r15, 4, r12 + ld.l r15, 8, r14 + ld.l r15, 12, r18 + addi.l r15, 136, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .uleb128 0x1 /* CIE Code Alignment Factor */ + .sleb128 -4 /* CIE Data Alignment Factor */ + .byte 0x12 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .uleb128 0xf + .uleb128 0x0 + .align 2 +.LECIE1: +.LSFDE1: + .4byte datalabel .LEFDE1-datalabel .LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte datalabel .LASFDE1-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte datalabel .LFE1-datalabel .LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI0-datalabel .LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x30 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI1-datalabel .LCFI0 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0xc + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0xb + .byte 0x9c /* DW_CFA_offset, column 0x1c */ + .uleb128 0xa + .byte 0x9d /* DW_CFA_offset, column 0x1d */ + .uleb128 0x8 + .byte 0x9e /* DW_CFA_offset, column 0x1e */ + .uleb128 0x6 + .byte 0x9f /* DW_CFA_offset, column 0x1f */ + .uleb128 0x4 + .byte 0xa0 /* DW_CFA_offset, column 0x20 */ + .uleb128 0x2 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI2-datalabel .LCFI1 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte datalabel .LEFDE3-datalabel .LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte datalabel .LASFDE3-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte datalabel .LFE2-datalabel .LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI3-datalabel .LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x88 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI4-datalabel .LCFI3 + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .uleb128 0x21 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0x20 + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0x1f + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI5-datalabel .LCFI4 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi 2.c new file mode 100644 index 0000000..9e406d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi 2.c @@ -0,0 +1,468 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + flags = (rtype->size & 0xfff) << SPARC_SIZEMASK_SHIFT; + flags |= SPARC_RET_STRUCT; + break; + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_COMPLEX: + rtt = rtype->elements[0]->type; + switch (rtt) + { + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_4; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_8; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT128; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = SP_V8_RET_CPLX16; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = SP_V8_RET_CPLX8; + break; + default: + abort(); + } + break; + default: + abort(); + } + cif->flags = flags; + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + int tt = ty->type; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Passed by reference. */ + z = 4; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + /* FALLTHRU */ + + default: + z = FFI_ALIGN(z, 4); + } + bytes += z; + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 4) + bytes = 6 * 4; + + /* The ABI always requires space for the struct return pointer. */ + bytes += 4; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 2 * 4); + + /* Include the call frame to prep_args. */ + bytes += 4*16 + 4*8; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_v8(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +int FFI_HIDDEN +ffi_prep_args_v8(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + + /* This could only really be done when we are returning a structure. + However, the space is reserved so we can do it unconditionally. */ + *argp++ = (unsigned long)rvalue; + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*4); +#endif + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + int tt = ty->type; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + *argp++ = (unsigned long)a; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + memcpy(argp, a, 8); + argp += 2; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *argp++ = *(unsigned *)a; + break; + + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + { + memcpy((char *)argp + 4 - z, a, z); + argp++; + } + else + { + memcpy(argp, a, z); + argp += z / 4; + } + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V8); + + /* If we've not got a return value, we need to create one if we've + got to pass the return value to the callee. Otherwise ignore it. */ + if (rvalue == NULL + && (cif->flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + bytes += FFI_ALIGN (cif->rtype->size, 8); + + ffi_call_v8(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + /* SPARC v8 requires 5 instructions for flush to be visible */ + asm volatile ("iflush %0; iflush %0+8; nop; nop; nop; nop; nop" + : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v8(void) FFI_HIDDEN; +extern void ffi_go_closure_v8(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long ctx = (unsigned long) closure; + unsigned long fn = (unsigned long) ffi_closure_v8; + + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + tramp[0] = 0x03000000 | fn >> 10; /* sethi %hi(fn), %g1 */ + tramp[1] = 0x05000000 | ctx >> 10; /* sethi %hi(ctx), %g2 */ + tramp[2] = 0x81c06000 | (fn & 0x3ff); /* jmp %g1+%lo(fn) */ + tramp[3] = 0x8410a000 | (ctx & 0x3ff);/* or %g2, %lo(ctx) */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v8; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v8(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *argp) +{ + ffi_type **arg_types; + void **avalue; + int i, nargs, flags; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. Also install it so we + can return the address in %o0. */ + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + void *new_rvalue = (void *)*argp; + *(void **)rvalue = new_rvalue; + rvalue = new_rvalue; + } + + /* Always skip the structure return address. */ + argp++; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++) + { + ffi_type *ty = arg_types[i]; + int tt = ty->type; + void *a = argp; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Straight copy of invisible reference. */ + a = (void *)*argp; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + if ((unsigned long)a & 7) + { + /* Align on a 8-byte boundary. */ + UINT64 *tmp = alloca(8); + *tmp = ((UINT64)argp[0] << 32) | argp[1]; + a = tmp; + } + argp++; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 2; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 3; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + a += 4 - z; + else if (z > 4) + argp++; + break; + + default: + abort(); + } + argp++; + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* !SPARC64 */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c new file mode 100644 index 0000000..9e406d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c @@ -0,0 +1,468 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + flags = (rtype->size & 0xfff) << SPARC_SIZEMASK_SHIFT; + flags |= SPARC_RET_STRUCT; + break; + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_COMPLEX: + rtt = rtype->elements[0]->type; + switch (rtt) + { + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_4; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_8; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT128; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = SP_V8_RET_CPLX16; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = SP_V8_RET_CPLX8; + break; + default: + abort(); + } + break; + default: + abort(); + } + cif->flags = flags; + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + int tt = ty->type; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Passed by reference. */ + z = 4; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + /* FALLTHRU */ + + default: + z = FFI_ALIGN(z, 4); + } + bytes += z; + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 4) + bytes = 6 * 4; + + /* The ABI always requires space for the struct return pointer. */ + bytes += 4; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 2 * 4); + + /* Include the call frame to prep_args. */ + bytes += 4*16 + 4*8; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_v8(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +int FFI_HIDDEN +ffi_prep_args_v8(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + + /* This could only really be done when we are returning a structure. + However, the space is reserved so we can do it unconditionally. */ + *argp++ = (unsigned long)rvalue; + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*4); +#endif + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + int tt = ty->type; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + *argp++ = (unsigned long)a; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + memcpy(argp, a, 8); + argp += 2; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *argp++ = *(unsigned *)a; + break; + + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + { + memcpy((char *)argp + 4 - z, a, z); + argp++; + } + else + { + memcpy(argp, a, z); + argp += z / 4; + } + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V8); + + /* If we've not got a return value, we need to create one if we've + got to pass the return value to the callee. Otherwise ignore it. */ + if (rvalue == NULL + && (cif->flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + bytes += FFI_ALIGN (cif->rtype->size, 8); + + ffi_call_v8(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + /* SPARC v8 requires 5 instructions for flush to be visible */ + asm volatile ("iflush %0; iflush %0+8; nop; nop; nop; nop; nop" + : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v8(void) FFI_HIDDEN; +extern void ffi_go_closure_v8(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long ctx = (unsigned long) closure; + unsigned long fn = (unsigned long) ffi_closure_v8; + + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + tramp[0] = 0x03000000 | fn >> 10; /* sethi %hi(fn), %g1 */ + tramp[1] = 0x05000000 | ctx >> 10; /* sethi %hi(ctx), %g2 */ + tramp[2] = 0x81c06000 | (fn & 0x3ff); /* jmp %g1+%lo(fn) */ + tramp[3] = 0x8410a000 | (ctx & 0x3ff);/* or %g2, %lo(ctx) */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v8; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v8(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *argp) +{ + ffi_type **arg_types; + void **avalue; + int i, nargs, flags; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. Also install it so we + can return the address in %o0. */ + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + void *new_rvalue = (void *)*argp; + *(void **)rvalue = new_rvalue; + rvalue = new_rvalue; + } + + /* Always skip the structure return address. */ + argp++; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++) + { + ffi_type *ty = arg_types[i]; + int tt = ty->type; + void *a = argp; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Straight copy of invisible reference. */ + a = (void *)*argp; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + if ((unsigned long)a & 7) + { + /* Align on a 8-byte boundary. */ + UINT64 *tmp = alloca(8); + *tmp = ((UINT64)argp[0] << 32) | argp[1]; + a = tmp; + } + argp++; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 2; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 3; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + a += 4 - z; + else if (z > 4) + argp++; + break; + + default: + abort(); + } + argp++; + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* !SPARC64 */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64 2.c new file mode 100644 index 0000000..9e04061 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64 2.c @@ -0,0 +1,608 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#ifdef SPARC64 + +/* Flatten the contents of a structure to the parts that are passed in + floating point registers. The return is a bit mask wherein bit N + set means bytes [4*n, 4*n+3] are passed in %fN. + + We encode both the (running) size (maximum 32) and mask (maxumum 255) + into one integer. The size is placed in the low byte, so that align + and addition work correctly. The mask is placed in the second byte. */ + +static int +ffi_struct_float_mask (ffi_type *outer_type, int size_mask) +{ + ffi_type **elts; + ffi_type *t; + + if (outer_type->type == FFI_TYPE_COMPLEX) + { + int m = 0, tt = outer_type->elements[0]->type; + size_t z = outer_type->size; + + if (tt == FFI_TYPE_FLOAT + || tt == FFI_TYPE_DOUBLE + || tt == FFI_TYPE_LONGDOUBLE) + m = (1 << (z / 4)) - 1; + return (m << 8) | z; + } + FFI_ASSERT (outer_type->type == FFI_TYPE_STRUCT); + + for (elts = outer_type->elements; (t = *elts) != NULL; elts++) + { + size_t z = t->size; + int o, m, tt; + + size_mask = FFI_ALIGN(size_mask, t->alignment); + switch (t->type) + { + case FFI_TYPE_STRUCT: + size_mask = ffi_struct_float_mask (t, size_mask); + continue; + case FFI_TYPE_COMPLEX: + tt = t->elements[0]->type; + if (tt != FFI_TYPE_FLOAT + && tt != FFI_TYPE_DOUBLE + && tt != FFI_TYPE_LONGDOUBLE) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + m = (1 << (z / 4)) - 1; /* compute mask for type */ + o = (size_mask >> 2) & 0x3f; /* extract word offset */ + size_mask |= m << (o + 8); /* insert mask into place */ + break; + } + size_mask += z; + } + + size_mask = FFI_ALIGN(size_mask, outer_type->alignment); + FFI_ASSERT ((size_mask & 0xff) == outer_type->size); + + return size_mask; +} + +/* Merge floating point data into integer data. If the structure is + entirely floating point, simply return a pointer to the fp data. */ + +static void * +ffi_struct_float_merge (int size_mask, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + return vi; + else if (mask == (1 << n) - 1) + return vf; + else + { + unsigned int *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + if ((mask >> i) & 1) + wi[i] = wf[i]; + + return vi; + } +} + +/* Similar, but place the data into VD in the end. */ + +void FFI_HIDDEN +ffi_struct_float_copy (int size_mask, void *vd, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + ; + else if (mask == (1 << n) - 1) + vi = vf; + else + { + unsigned int *wd = vd, *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + wd[i] = ((mask >> i) & 1 ? wf : wi)[i]; + return; + } + memcpy (vd, vi, size); +} + +/* Perform machine dependent cif processing */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes = 0; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_4; + break; + + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + if (rtype->size > 32) + { + flags = SPARC_RET_VOID | SPARC_FLAG_RET_IN_MEM; + bytes = 8; + } + else + { + int size_mask = ffi_struct_float_mask (rtype, 0); + int word_size = (size_mask >> 2) & 0x3f; + int all_mask = (1 << word_size) - 1; + int fp_mask = size_mask >> 8; + + flags = (size_mask << SPARC_SIZEMASK_SHIFT) | SPARC_RET_STRUCT; + + /* For special cases of all-int or all-fp, we can return + the value directly without popping through a struct copy. */ + if (fp_mask == 0) + { + if (rtype->alignment >= 8) + { + if (rtype->size == 8) + flags = SPARC_RET_INT64; + else if (rtype->size == 16) + flags = SPARC_RET_INT128; + } + } + else if (fp_mask == all_mask) + switch (word_size) + { + case 1: flags = SPARC_RET_F_1; break; + case 2: flags = SPARC_RET_F_2; break; + case 3: flags = SP_V9_RET_F_3; break; + case 4: flags = SPARC_RET_F_4; break; + /* 5 word structures skipped; handled via RET_STRUCT. */ + case 6: flags = SPARC_RET_F_6; break; + /* 7 word structures skipped; handled via RET_STRUCT. */ + case 8: flags = SPARC_RET_F_8; break; + } + } + break; + + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = SP_V9_RET_SINT32; + break; + case FFI_TYPE_UINT32: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + flags = SPARC_RET_INT64; + break; + + default: + abort(); + } + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + size_t a = ty->alignment; + + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + /* Large structs passed by reference. */ + if (z > 16) + { + a = z = 8; + break; + } + /* Small structs may be passed in integer or fp regs or both. */ + if (bytes >= 16*8) + break; + if ((ffi_struct_float_mask (ty, 0) & 0xff00) == 0) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + flags |= SPARC_FLAG_FP_ARGS; + break; + } + bytes = FFI_ALIGN(bytes, a); + bytes += FFI_ALIGN(z, 8); + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 8) + bytes = 6 * 8; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 16); + + /* Include the call frame to prep_args. */ + bytes += 8*16 + 8*8; + + cif->bytes = bytes; + cif->flags = flags; + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned nfixedargs, unsigned ntotalargs) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern void ffi_call_v9(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +int FFI_HIDDEN +ffi_prep_args_v9(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if (flags & SPARC_FLAG_RET_IN_MEM) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*8); +#endif + + if (flags & SPARC_FLAG_RET_IN_MEM) + *argp++ = (unsigned long)rvalue; + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + size_t z; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *argp++ = *(SINT32 *)a; + break; + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + *argp++ = *(UINT32 *)a; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + *argp++ = *(UINT64 *)a; + break; + + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + { + /* For structures larger than 16 bytes we pass reference. */ + *argp++ = (unsigned long)a; + break; + } + if (((unsigned long)argp & 15) && ty->alignment > 8) + argp++; + memcpy(argp, a, z); + argp += FFI_ALIGN(z, 8) / 8; + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V9); + + if (rvalue == NULL && (cif->flags & SPARC_FLAG_RET_IN_MEM)) + bytes += FFI_ALIGN (cif->rtype->size, 16); + + ffi_call_v9(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + asm volatile ("flush %0; flush %0+8" : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v9(void) FFI_HIDDEN; +extern void ffi_go_closure_v9(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn; + + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + /* Trampoline address is equal to the closure address. We take advantage + of that to reduce the trampoline size by 8 bytes. */ + fn = (unsigned long) ffi_closure_v9; + tramp[0] = 0x83414000; /* rd %pc, %g1 */ + tramp[1] = 0xca586010; /* ldx [%g1+16], %g5 */ + tramp[2] = 0x81c14000; /* jmp %g5 */ + tramp[3] = 0x01000000; /* nop */ + *((unsigned long *) &tramp[4]) = fn; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v9; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v9(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *gpr, unsigned long *fpr) +{ + ffi_type **arg_types; + void **avalue; + int i, argn, argx, nargs, flags, nfixedargs; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + nfixedargs = cif->nfixedargs; + + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. */ + if (flags & SPARC_FLAG_RET_IN_MEM) + { + rvalue = (void *) gpr[0]; + /* Skip the structure return address. */ + argn = 1; + } + else + argn = 0; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++, argn = argx) + { + int named = i < nfixedargs; + ffi_type *ty = arg_types[i]; + void *a = &gpr[argn]; + size_t z; + + argx = argn + 1; + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + a = *(void **)a; + else + { + argx = argn + FFI_ALIGN (z, 8) / 8; + if (named && argn < 16) + { + int size_mask = ffi_struct_float_mask (ty, 0); + int argn_mask = (0xffff00 >> argn) & 0xff00; + + /* Eliminate fp registers off the end. */ + size_mask = (size_mask & 0xff) | (size_mask & argn_mask); + a = ffi_struct_float_merge (size_mask, gpr+argn, fpr+argn); + } + } + break; + + case FFI_TYPE_LONGDOUBLE: + argn = FFI_ALIGN (argn, 2); + a = (named && argn < 16 ? fpr : gpr) + argn; + argx = argn + 2; + break; + case FFI_TYPE_DOUBLE: + if (named && argn < 16) + a = fpr + argn; + break; + case FFI_TYPE_FLOAT: + if (named && argn < 16) + a = fpr + argn; + a += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + a += 4; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 6; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 7; + break; + + default: + abort(); + } + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* SPARC64 */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c new file mode 100644 index 0000000..9e04061 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c @@ -0,0 +1,608 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#ifdef SPARC64 + +/* Flatten the contents of a structure to the parts that are passed in + floating point registers. The return is a bit mask wherein bit N + set means bytes [4*n, 4*n+3] are passed in %fN. + + We encode both the (running) size (maximum 32) and mask (maxumum 255) + into one integer. The size is placed in the low byte, so that align + and addition work correctly. The mask is placed in the second byte. */ + +static int +ffi_struct_float_mask (ffi_type *outer_type, int size_mask) +{ + ffi_type **elts; + ffi_type *t; + + if (outer_type->type == FFI_TYPE_COMPLEX) + { + int m = 0, tt = outer_type->elements[0]->type; + size_t z = outer_type->size; + + if (tt == FFI_TYPE_FLOAT + || tt == FFI_TYPE_DOUBLE + || tt == FFI_TYPE_LONGDOUBLE) + m = (1 << (z / 4)) - 1; + return (m << 8) | z; + } + FFI_ASSERT (outer_type->type == FFI_TYPE_STRUCT); + + for (elts = outer_type->elements; (t = *elts) != NULL; elts++) + { + size_t z = t->size; + int o, m, tt; + + size_mask = FFI_ALIGN(size_mask, t->alignment); + switch (t->type) + { + case FFI_TYPE_STRUCT: + size_mask = ffi_struct_float_mask (t, size_mask); + continue; + case FFI_TYPE_COMPLEX: + tt = t->elements[0]->type; + if (tt != FFI_TYPE_FLOAT + && tt != FFI_TYPE_DOUBLE + && tt != FFI_TYPE_LONGDOUBLE) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + m = (1 << (z / 4)) - 1; /* compute mask for type */ + o = (size_mask >> 2) & 0x3f; /* extract word offset */ + size_mask |= m << (o + 8); /* insert mask into place */ + break; + } + size_mask += z; + } + + size_mask = FFI_ALIGN(size_mask, outer_type->alignment); + FFI_ASSERT ((size_mask & 0xff) == outer_type->size); + + return size_mask; +} + +/* Merge floating point data into integer data. If the structure is + entirely floating point, simply return a pointer to the fp data. */ + +static void * +ffi_struct_float_merge (int size_mask, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + return vi; + else if (mask == (1 << n) - 1) + return vf; + else + { + unsigned int *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + if ((mask >> i) & 1) + wi[i] = wf[i]; + + return vi; + } +} + +/* Similar, but place the data into VD in the end. */ + +void FFI_HIDDEN +ffi_struct_float_copy (int size_mask, void *vd, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + ; + else if (mask == (1 << n) - 1) + vi = vf; + else + { + unsigned int *wd = vd, *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + wd[i] = ((mask >> i) & 1 ? wf : wi)[i]; + return; + } + memcpy (vd, vi, size); +} + +/* Perform machine dependent cif processing */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes = 0; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_4; + break; + + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + if (rtype->size > 32) + { + flags = SPARC_RET_VOID | SPARC_FLAG_RET_IN_MEM; + bytes = 8; + } + else + { + int size_mask = ffi_struct_float_mask (rtype, 0); + int word_size = (size_mask >> 2) & 0x3f; + int all_mask = (1 << word_size) - 1; + int fp_mask = size_mask >> 8; + + flags = (size_mask << SPARC_SIZEMASK_SHIFT) | SPARC_RET_STRUCT; + + /* For special cases of all-int or all-fp, we can return + the value directly without popping through a struct copy. */ + if (fp_mask == 0) + { + if (rtype->alignment >= 8) + { + if (rtype->size == 8) + flags = SPARC_RET_INT64; + else if (rtype->size == 16) + flags = SPARC_RET_INT128; + } + } + else if (fp_mask == all_mask) + switch (word_size) + { + case 1: flags = SPARC_RET_F_1; break; + case 2: flags = SPARC_RET_F_2; break; + case 3: flags = SP_V9_RET_F_3; break; + case 4: flags = SPARC_RET_F_4; break; + /* 5 word structures skipped; handled via RET_STRUCT. */ + case 6: flags = SPARC_RET_F_6; break; + /* 7 word structures skipped; handled via RET_STRUCT. */ + case 8: flags = SPARC_RET_F_8; break; + } + } + break; + + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = SP_V9_RET_SINT32; + break; + case FFI_TYPE_UINT32: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + flags = SPARC_RET_INT64; + break; + + default: + abort(); + } + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + size_t a = ty->alignment; + + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + /* Large structs passed by reference. */ + if (z > 16) + { + a = z = 8; + break; + } + /* Small structs may be passed in integer or fp regs or both. */ + if (bytes >= 16*8) + break; + if ((ffi_struct_float_mask (ty, 0) & 0xff00) == 0) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + flags |= SPARC_FLAG_FP_ARGS; + break; + } + bytes = FFI_ALIGN(bytes, a); + bytes += FFI_ALIGN(z, 8); + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 8) + bytes = 6 * 8; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 16); + + /* Include the call frame to prep_args. */ + bytes += 8*16 + 8*8; + + cif->bytes = bytes; + cif->flags = flags; + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned nfixedargs, unsigned ntotalargs) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern void ffi_call_v9(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +int FFI_HIDDEN +ffi_prep_args_v9(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if (flags & SPARC_FLAG_RET_IN_MEM) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*8); +#endif + + if (flags & SPARC_FLAG_RET_IN_MEM) + *argp++ = (unsigned long)rvalue; + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + size_t z; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *argp++ = *(SINT32 *)a; + break; + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + *argp++ = *(UINT32 *)a; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + *argp++ = *(UINT64 *)a; + break; + + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + { + /* For structures larger than 16 bytes we pass reference. */ + *argp++ = (unsigned long)a; + break; + } + if (((unsigned long)argp & 15) && ty->alignment > 8) + argp++; + memcpy(argp, a, z); + argp += FFI_ALIGN(z, 8) / 8; + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V9); + + if (rvalue == NULL && (cif->flags & SPARC_FLAG_RET_IN_MEM)) + bytes += FFI_ALIGN (cif->rtype->size, 16); + + ffi_call_v9(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + asm volatile ("flush %0; flush %0+8" : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v9(void) FFI_HIDDEN; +extern void ffi_go_closure_v9(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn; + + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + /* Trampoline address is equal to the closure address. We take advantage + of that to reduce the trampoline size by 8 bytes. */ + fn = (unsigned long) ffi_closure_v9; + tramp[0] = 0x83414000; /* rd %pc, %g1 */ + tramp[1] = 0xca586010; /* ldx [%g1+16], %g5 */ + tramp[2] = 0x81c14000; /* jmp %g5 */ + tramp[3] = 0x01000000; /* nop */ + *((unsigned long *) &tramp[4]) = fn; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v9; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v9(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *gpr, unsigned long *fpr) +{ + ffi_type **arg_types; + void **avalue; + int i, argn, argx, nargs, flags, nfixedargs; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + nfixedargs = cif->nfixedargs; + + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. */ + if (flags & SPARC_FLAG_RET_IN_MEM) + { + rvalue = (void *) gpr[0]; + /* Skip the structure return address. */ + argn = 1; + } + else + argn = 0; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++, argn = argx) + { + int named = i < nfixedargs; + ffi_type *ty = arg_types[i]; + void *a = &gpr[argn]; + size_t z; + + argx = argn + 1; + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + a = *(void **)a; + else + { + argx = argn + FFI_ALIGN (z, 8) / 8; + if (named && argn < 16) + { + int size_mask = ffi_struct_float_mask (ty, 0); + int argn_mask = (0xffff00 >> argn) & 0xff00; + + /* Eliminate fp registers off the end. */ + size_mask = (size_mask & 0xff) | (size_mask & argn_mask); + a = ffi_struct_float_merge (size_mask, gpr+argn, fpr+argn); + } + } + break; + + case FFI_TYPE_LONGDOUBLE: + argn = FFI_ALIGN (argn, 2); + a = (named && argn < 16 ? fpr : gpr) + argn; + argx = argn + 2; + break; + case FFI_TYPE_DOUBLE: + if (named && argn < 16) + a = fpr + argn; + break; + case FFI_TYPE_FLOAT: + if (named && argn < 16) + a = fpr + argn; + a += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + a += 4; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 6; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 7; + break; + + default: + abort(); + } + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* SPARC64 */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget 2.h new file mode 100644 index 0000000..2f4cd9a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget 2.h @@ -0,0 +1,81 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SPARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined(__arch64__) || defined(__sparcv9) +#ifndef SPARC64 +#define SPARC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, +#ifdef SPARC64 + FFI_V9, + FFI_DEFAULT_ABI = FFI_V9, +#else + FFI_V8, + FFI_DEFAULT_ABI = FFI_V8, +#endif + FFI_LAST_ABI +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION 1 +#define FFI_TARGET_HAS_COMPLEX_TYPE 1 + +#ifdef SPARC64 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned int nfixedargs +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef SPARC64 +#define FFI_TRAMPOLINE_SIZE 24 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h new file mode 100644 index 0000000..2f4cd9a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h @@ -0,0 +1,81 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SPARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined(__arch64__) || defined(__sparcv9) +#ifndef SPARC64 +#define SPARC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, +#ifdef SPARC64 + FFI_V9, + FFI_DEFAULT_ABI = FFI_V9, +#else + FFI_V8, + FFI_DEFAULT_ABI = FFI_V8, +#endif + FFI_LAST_ABI +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION 1 +#define FFI_TARGET_HAS_COMPLEX_TYPE 1 + +#ifdef SPARC64 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned int nfixedargs +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef SPARC64 +#define FFI_TRAMPOLINE_SIZE 24 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal 2.h new file mode 100644 index 0000000..0a66472 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal 2.h @@ -0,0 +1,26 @@ +#define SPARC_RET_VOID 0 +#define SPARC_RET_STRUCT 1 +#define SPARC_RET_UINT8 2 +#define SPARC_RET_SINT8 3 +#define SPARC_RET_UINT16 4 +#define SPARC_RET_SINT16 5 +#define SPARC_RET_UINT32 6 +#define SP_V9_RET_SINT32 7 /* v9 only */ +#define SP_V8_RET_CPLX16 7 /* v8 only */ +#define SPARC_RET_INT64 8 +#define SPARC_RET_INT128 9 + +/* Note that F_7 is missing, and is handled by SPARC_RET_STRUCT. */ +#define SPARC_RET_F_8 10 +#define SPARC_RET_F_6 11 +#define SPARC_RET_F_4 12 +#define SPARC_RET_F_2 13 +#define SP_V9_RET_F_3 14 /* v9 only */ +#define SP_V8_RET_CPLX8 14 /* v8 only */ +#define SPARC_RET_F_1 15 + +#define SPARC_FLAG_RET_MASK 15 +#define SPARC_FLAG_RET_IN_MEM 32 +#define SPARC_FLAG_FP_ARGS 64 + +#define SPARC_SIZEMASK_SHIFT 8 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h new file mode 100644 index 0000000..0a66472 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h @@ -0,0 +1,26 @@ +#define SPARC_RET_VOID 0 +#define SPARC_RET_STRUCT 1 +#define SPARC_RET_UINT8 2 +#define SPARC_RET_SINT8 3 +#define SPARC_RET_UINT16 4 +#define SPARC_RET_SINT16 5 +#define SPARC_RET_UINT32 6 +#define SP_V9_RET_SINT32 7 /* v9 only */ +#define SP_V8_RET_CPLX16 7 /* v8 only */ +#define SPARC_RET_INT64 8 +#define SPARC_RET_INT128 9 + +/* Note that F_7 is missing, and is handled by SPARC_RET_STRUCT. */ +#define SPARC_RET_F_8 10 +#define SPARC_RET_F_6 11 +#define SPARC_RET_F_4 12 +#define SPARC_RET_F_2 13 +#define SP_V9_RET_F_3 14 /* v9 only */ +#define SP_V8_RET_CPLX8 14 /* v8 only */ +#define SPARC_RET_F_1 15 + +#define SPARC_FLAG_RET_MASK 15 +#define SPARC_FLAG_RET_IN_MEM 32 +#define SPARC_FLAG_FP_ARGS 64 + +#define SPARC_SIZEMASK_SHIFT 8 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8 2.S new file mode 100644 index 0000000..a2e4908 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8 2.S @@ -0,0 +1,443 @@ +/* ----------------------------------------------------------------------- + v8.S - Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + + .text + +#ifndef __GNUC__ + .align 8 + .globl C(ffi_flush_icache) + .type C(ffi_flush_icache),#function + FFI_HIDDEN(C(ffi_flush_icache)) + +C(ffi_flush_icache): +1: iflush %o0 + iflush %o+8 + nop + nop + nop + nop + nop + retl + nop + .size C(ffi_flush_icache), . - C(ffi_flush_icache) +#endif + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + + .align 8 + .globl C(ffi_call_v8) + .type C(ffi_call_v8),#function + FFI_HIDDEN(C(ffi_call_v8)) + +C(ffi_call_v8): +.LUW0: + ! Allocate a stack frame sized by ffi_call. + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, 64+32, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v8) + mov %i3, %o3 ! copy avalue + + add %sp, 32, %sp ! deallocate prep frame + and %o0, SPARC_FLAG_RET_MASK, %l0 ! save return type + srl %o0, SPARC_SIZEMASK_SHIFT, %l1 ! save return size + ld [%sp+64+4], %o0 ! load all argument registers + ld [%sp+64+8], %o1 + ld [%sp+64+12], %o2 + ld [%sp+64+16], %o3 + cmp %l0, SPARC_RET_STRUCT ! struct return needs an unimp 4 + ld [%sp+64+20], %o4 + be 8f + ld [%sp+64+24], %o5 + + ! Call foreign function + call %i1 + mov %i5, %g2 ! load static chain + +0: call 1f ! load pc in %o7 + sll %l0, 4, %l0 +1: add %o7, %l0, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + unimp +E(SPARC_RET_UINT8) + and %o0, 0xff, %o0 + st %o0, [%i2] + ret + restore +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + b 7f + sra %o0, 24, %o0 +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + b 7f + srl %o0, 16, %o0 +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + b 7f + sra %o0, 16, %o0 +E(SPARC_RET_UINT32) +7: st %o0, [%i2] + ret + restore +E(SP_V8_RET_CPLX16) + sth %o0, [%i2+2] + b 9f + srl %o0, 16, %o0 +E(SPARC_RET_INT64) + st %o0, [%i2] + st %o1, [%i2+4] + ret + restore +E(SPARC_RET_INT128) + std %o0, [%i2] + std %o2, [%i2+8] + ret + restore +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + st %f3, [%i2+3*4] + nop + st %f2, [%i2+2*4] + nop +E(SPARC_RET_F_2) + st %f1, [%i2+4] + st %f0, [%i2] + ret + restore +E(SP_V8_RET_CPLX8) + stb %o0, [%i2+1] + b 0f + srl %o0, 8, %o0 +E(SPARC_RET_F_1) + st %f0, [%i2] + ret + restore + + .align 8 +9: sth %o0, [%i2] + ret + restore + .align 8 +0: stb %o0, [%i2] + ret + restore + + ! Struct returning functions expect and skip the unimp here. + ! To make it worse, conforming callees examine the unimp and + ! make sure the low 12 bits of the unimp match the size of + ! the struct being returned. + .align 8 +8: call 1f ! load pc in %o7 + sll %l1, 2, %l0 ! size * 4 +1: sll %l1, 4, %l1 ! size * 16 + add %l0, %l1, %l0 ! size * 20 + add %o7, %l0, %o7 ! o7 = 8b + size*20 + jmp %o7+(2f-8b) + mov %i5, %g2 ! load static chain +2: + +/* The Sun assembler doesn't understand .rept 0x1000. */ +#define rept1 \ + call %i1; \ + nop; \ + unimp (. - 2b) / 20; \ + ret; \ + restore + +#define rept16 \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1 + +#define rept256 \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16 + + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + +.LUW2: + .size C(ffi_call_v8),. - C(ffi_call_v8) + + +/* 16*4 register window + 1*4 struct return + 6*4 args backing store + + 8*4 return storage + 1*4 alignment. */ +#define STACKFRAME (16*4 + 4 + 6*4 + 8*4 + 4) + +/* ffi_closure_v8(...) + + Receives the closure argument in %g2. */ + +#ifdef HAVE_AS_REGISTER_PSEUDO_OP + .register %g2, #scratch +#endif + + .align 8 + .globl C(ffi_go_closure_v8) + .type C(ffi_go_closure_v8),#function + FFI_HIDDEN(C(ffi_go_closure_v8)) + +C(ffi_go_closure_v8): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ld [%g2+4], %o0 ! load cif + ld [%g2+8], %o1 ! load fun + b 0f + mov %g2, %o2 ! load user_data +.LUW5: + .size C(ffi_go_closure_v8), . - C(ffi_go_closure_v8) + + .align 8 + .globl C(ffi_closure_v8) + .type C(ffi_closure_v8),#function + FFI_HIDDEN(C(ffi_closure_v8)) + +C(ffi_closure_v8): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ld [%g2+FFI_TRAMPOLINE_SIZE], %o0 ! load cif + ld [%g2+FFI_TRAMPOLINE_SIZE+4], %o1 ! load fun + ld [%g2+FFI_TRAMPOLINE_SIZE+8], %o2 ! load user_data +0: + ! Store all of the potential argument registers in va_list format. + st %i0, [%fp+68+0] + st %i1, [%fp+68+4] + st %i2, [%fp+68+8] + st %i3, [%fp+68+12] + st %i4, [%fp+68+16] + st %i5, [%fp+68+20] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, -8*4, %o3 + call ffi_closure_sparc_inner_v8 + add %fp, 64, %o4 + +0: call 1f + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o0 = o0 * 16 + add %o7, %o0, %o7 ! o7 = 0b + o0*16 + jmp %o7+(2f-0b) + add %fp, -8*4, %i2 + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + ld [%i2], %i0 + jmp %i7+12 + restore +E(SPARC_RET_UINT8) + ldub [%i2+3], %i0 + ret + restore +E(SPARC_RET_SINT8) + ldsb [%i2+3], %i0 + ret + restore +E(SPARC_RET_UINT16) + lduh [%i2+2], %i0 + ret + restore +E(SPARC_RET_SINT16) + ldsh [%i2+2], %i0 + ret + restore +E(SPARC_RET_UINT32) + ld [%i2], %i0 + ret + restore +E(SP_V8_RET_CPLX16) + ld [%i2], %i0 + ret + restore +E(SPARC_RET_INT64) + ldd [%i2], %i0 + ret + restore +E(SPARC_RET_INT128) + ldd [%i2], %i0 + ldd [%i2+8], %i2 + ret + restore +E(SPARC_RET_F_8) + ld [%i2+7*4], %f7 + nop + ld [%i2+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [%i2+5*4], %f5 + nop + ld [%i2+4*4], %f4 + nop +E(SPARC_RET_F_4) + ld [%i2+3*4], %f3 + nop + ld [%i2+2*4], %f2 + nop +E(SPARC_RET_F_2) + ldd [%i2], %f0 + ret + restore +E(SP_V8_RET_CPLX8) + lduh [%i2], %i0 + ret + restore +E(SPARC_RET_F_1) + ld [%i2], %f0 + ret + restore + +.LUW8: + .size C(ffi_closure_v8), . - C(ffi_closure_v8) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_ADDR(X) %r_disp32(X) +#else +# define FDE_ADDR(X) X +#endif + + .align 4 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x7c ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0 ! DW_CFA_def_cfa, %o6, offset 0 + .align 4 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW0) ! Initial location + .long .LUW2 - .LUW0 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW3) ! Initial location + .long .LUW5 - .LUW3 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW6) ! Initial location + .long .LUW8 - .LUW6 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE3: + +#endif /* !SPARC64 */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S new file mode 100644 index 0000000..a2e4908 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S @@ -0,0 +1,443 @@ +/* ----------------------------------------------------------------------- + v8.S - Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + + .text + +#ifndef __GNUC__ + .align 8 + .globl C(ffi_flush_icache) + .type C(ffi_flush_icache),#function + FFI_HIDDEN(C(ffi_flush_icache)) + +C(ffi_flush_icache): +1: iflush %o0 + iflush %o+8 + nop + nop + nop + nop + nop + retl + nop + .size C(ffi_flush_icache), . - C(ffi_flush_icache) +#endif + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + + .align 8 + .globl C(ffi_call_v8) + .type C(ffi_call_v8),#function + FFI_HIDDEN(C(ffi_call_v8)) + +C(ffi_call_v8): +.LUW0: + ! Allocate a stack frame sized by ffi_call. + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, 64+32, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v8) + mov %i3, %o3 ! copy avalue + + add %sp, 32, %sp ! deallocate prep frame + and %o0, SPARC_FLAG_RET_MASK, %l0 ! save return type + srl %o0, SPARC_SIZEMASK_SHIFT, %l1 ! save return size + ld [%sp+64+4], %o0 ! load all argument registers + ld [%sp+64+8], %o1 + ld [%sp+64+12], %o2 + ld [%sp+64+16], %o3 + cmp %l0, SPARC_RET_STRUCT ! struct return needs an unimp 4 + ld [%sp+64+20], %o4 + be 8f + ld [%sp+64+24], %o5 + + ! Call foreign function + call %i1 + mov %i5, %g2 ! load static chain + +0: call 1f ! load pc in %o7 + sll %l0, 4, %l0 +1: add %o7, %l0, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + unimp +E(SPARC_RET_UINT8) + and %o0, 0xff, %o0 + st %o0, [%i2] + ret + restore +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + b 7f + sra %o0, 24, %o0 +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + b 7f + srl %o0, 16, %o0 +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + b 7f + sra %o0, 16, %o0 +E(SPARC_RET_UINT32) +7: st %o0, [%i2] + ret + restore +E(SP_V8_RET_CPLX16) + sth %o0, [%i2+2] + b 9f + srl %o0, 16, %o0 +E(SPARC_RET_INT64) + st %o0, [%i2] + st %o1, [%i2+4] + ret + restore +E(SPARC_RET_INT128) + std %o0, [%i2] + std %o2, [%i2+8] + ret + restore +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + st %f3, [%i2+3*4] + nop + st %f2, [%i2+2*4] + nop +E(SPARC_RET_F_2) + st %f1, [%i2+4] + st %f0, [%i2] + ret + restore +E(SP_V8_RET_CPLX8) + stb %o0, [%i2+1] + b 0f + srl %o0, 8, %o0 +E(SPARC_RET_F_1) + st %f0, [%i2] + ret + restore + + .align 8 +9: sth %o0, [%i2] + ret + restore + .align 8 +0: stb %o0, [%i2] + ret + restore + + ! Struct returning functions expect and skip the unimp here. + ! To make it worse, conforming callees examine the unimp and + ! make sure the low 12 bits of the unimp match the size of + ! the struct being returned. + .align 8 +8: call 1f ! load pc in %o7 + sll %l1, 2, %l0 ! size * 4 +1: sll %l1, 4, %l1 ! size * 16 + add %l0, %l1, %l0 ! size * 20 + add %o7, %l0, %o7 ! o7 = 8b + size*20 + jmp %o7+(2f-8b) + mov %i5, %g2 ! load static chain +2: + +/* The Sun assembler doesn't understand .rept 0x1000. */ +#define rept1 \ + call %i1; \ + nop; \ + unimp (. - 2b) / 20; \ + ret; \ + restore + +#define rept16 \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1 + +#define rept256 \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16 + + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + +.LUW2: + .size C(ffi_call_v8),. - C(ffi_call_v8) + + +/* 16*4 register window + 1*4 struct return + 6*4 args backing store + + 8*4 return storage + 1*4 alignment. */ +#define STACKFRAME (16*4 + 4 + 6*4 + 8*4 + 4) + +/* ffi_closure_v8(...) + + Receives the closure argument in %g2. */ + +#ifdef HAVE_AS_REGISTER_PSEUDO_OP + .register %g2, #scratch +#endif + + .align 8 + .globl C(ffi_go_closure_v8) + .type C(ffi_go_closure_v8),#function + FFI_HIDDEN(C(ffi_go_closure_v8)) + +C(ffi_go_closure_v8): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ld [%g2+4], %o0 ! load cif + ld [%g2+8], %o1 ! load fun + b 0f + mov %g2, %o2 ! load user_data +.LUW5: + .size C(ffi_go_closure_v8), . - C(ffi_go_closure_v8) + + .align 8 + .globl C(ffi_closure_v8) + .type C(ffi_closure_v8),#function + FFI_HIDDEN(C(ffi_closure_v8)) + +C(ffi_closure_v8): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ld [%g2+FFI_TRAMPOLINE_SIZE], %o0 ! load cif + ld [%g2+FFI_TRAMPOLINE_SIZE+4], %o1 ! load fun + ld [%g2+FFI_TRAMPOLINE_SIZE+8], %o2 ! load user_data +0: + ! Store all of the potential argument registers in va_list format. + st %i0, [%fp+68+0] + st %i1, [%fp+68+4] + st %i2, [%fp+68+8] + st %i3, [%fp+68+12] + st %i4, [%fp+68+16] + st %i5, [%fp+68+20] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, -8*4, %o3 + call ffi_closure_sparc_inner_v8 + add %fp, 64, %o4 + +0: call 1f + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o0 = o0 * 16 + add %o7, %o0, %o7 ! o7 = 0b + o0*16 + jmp %o7+(2f-0b) + add %fp, -8*4, %i2 + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + ld [%i2], %i0 + jmp %i7+12 + restore +E(SPARC_RET_UINT8) + ldub [%i2+3], %i0 + ret + restore +E(SPARC_RET_SINT8) + ldsb [%i2+3], %i0 + ret + restore +E(SPARC_RET_UINT16) + lduh [%i2+2], %i0 + ret + restore +E(SPARC_RET_SINT16) + ldsh [%i2+2], %i0 + ret + restore +E(SPARC_RET_UINT32) + ld [%i2], %i0 + ret + restore +E(SP_V8_RET_CPLX16) + ld [%i2], %i0 + ret + restore +E(SPARC_RET_INT64) + ldd [%i2], %i0 + ret + restore +E(SPARC_RET_INT128) + ldd [%i2], %i0 + ldd [%i2+8], %i2 + ret + restore +E(SPARC_RET_F_8) + ld [%i2+7*4], %f7 + nop + ld [%i2+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [%i2+5*4], %f5 + nop + ld [%i2+4*4], %f4 + nop +E(SPARC_RET_F_4) + ld [%i2+3*4], %f3 + nop + ld [%i2+2*4], %f2 + nop +E(SPARC_RET_F_2) + ldd [%i2], %f0 + ret + restore +E(SP_V8_RET_CPLX8) + lduh [%i2], %i0 + ret + restore +E(SPARC_RET_F_1) + ld [%i2], %f0 + ret + restore + +.LUW8: + .size C(ffi_closure_v8), . - C(ffi_closure_v8) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_ADDR(X) %r_disp32(X) +#else +# define FDE_ADDR(X) X +#endif + + .align 4 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x7c ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0 ! DW_CFA_def_cfa, %o6, offset 0 + .align 4 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW0) ! Initial location + .long .LUW2 - .LUW0 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW3) ! Initial location + .long .LUW5 - .LUW3 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW6) ! Initial location + .long .LUW8 - .LUW6 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE3: + +#endif /* !SPARC64 */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9 2.S new file mode 100644 index 0000000..55f8f43 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9 2.S @@ -0,0 +1,440 @@ +/* ----------------------------------------------------------------------- + v9.S - Copyright (c) 2000, 2003, 2004, 2008 Red Hat, Inc. + + SPARC 64-bit Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifdef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + +#define STACK_BIAS 2047 + + .text + .align 8 + .globl C(ffi_call_v9) + .type C(ffi_call_v9),#function + FFI_HIDDEN(C(ffi_call_v9)) + +C(ffi_call_v9): +.LUW0: + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, STACK_BIAS+128+48, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v9) + mov %i3, %o3 ! copy avalue + + andcc %o0, SPARC_FLAG_FP_ARGS, %g0 ! need fp regs? + add %sp, 48, %sp ! deallocate prep frame + be,pt %xcc, 1f + mov %o0, %l0 ! save flags + + ldd [%sp+STACK_BIAS+128], %f0 ! load all fp arg regs + ldd [%sp+STACK_BIAS+128+8], %f2 + ldd [%sp+STACK_BIAS+128+16], %f4 + ldd [%sp+STACK_BIAS+128+24], %f6 + ldd [%sp+STACK_BIAS+128+32], %f8 + ldd [%sp+STACK_BIAS+128+40], %f10 + ldd [%sp+STACK_BIAS+128+48], %f12 + ldd [%sp+STACK_BIAS+128+56], %f14 + ldd [%sp+STACK_BIAS+128+64], %f16 + ldd [%sp+STACK_BIAS+128+72], %f18 + ldd [%sp+STACK_BIAS+128+80], %f20 + ldd [%sp+STACK_BIAS+128+88], %f22 + ldd [%sp+STACK_BIAS+128+96], %f24 + ldd [%sp+STACK_BIAS+128+104], %f26 + ldd [%sp+STACK_BIAS+128+112], %f28 + ldd [%sp+STACK_BIAS+128+120], %f30 + +1: ldx [%sp+STACK_BIAS+128], %o0 ! load all int arg regs + ldx [%sp+STACK_BIAS+128+8], %o1 + ldx [%sp+STACK_BIAS+128+16], %o2 + ldx [%sp+STACK_BIAS+128+24], %o3 + ldx [%sp+STACK_BIAS+128+32], %o4 + ldx [%sp+STACK_BIAS+128+40], %o5 + call %i1 + mov %i5, %g5 ! load static chain + +0: call 1f ! load pc in %o7 + and %l0, SPARC_FLAG_RET_MASK, %l1 +1: sll %l1, 4, %l1 + add %o7, %l1, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + add %sp, STACK_BIAS-64+128+48, %l2 + sub %sp, 64, %sp + b 8f + stx %o0, [%l2] +E(SPARC_RET_UINT8) + and %o0, 0xff, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + sra %o0, 24, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + srl %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + sra %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT32) + srl %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SP_V9_RET_SINT32) + sra %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_INT64) + stx %o0, [%i2] + return %i7+8 + nop +E(SPARC_RET_INT128) + stx %o0, [%i2] + stx %o1, [%i2+8] + return %i7+8 + nop +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + std %f2, [%i2+2*4] + return %i7+8 + std %f0, [%o2] +E(SPARC_RET_F_2) + return %i7+8 + std %f0, [%o2] +E(SP_V9_RET_F_3) + st %f2, [%i2+2*4] + nop + st %f1, [%i2+1*4] + nop +E(SPARC_RET_F_1) + return %i7+8 + st %f0, [%o2] + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: stx %o1, [%l2+8] + stx %o2, [%l2+16] + stx %o3, [%l2+24] + std %f0, [%l2+32] + std %f2, [%l2+40] + std %f4, [%l2+48] + std %f6, [%l2+56] + + ! Copy the structure into place. + srl %l0, SPARC_SIZEMASK_SHIFT, %o0 ! load size_mask + mov %i2, %o1 ! load dst + mov %l2, %o2 ! load src_gp + call C(ffi_struct_float_copy) + add %l2, 32, %o3 ! load src_fp + + return %i7+8 + nop + +.LUW2: + .size C(ffi_call_v9), . - C(ffi_call_v9) + + +#undef STACKFRAME +#define STACKFRAME 336 /* 16*8 register window + + 6*8 args backing store + + 20*8 locals */ +#define FP %fp+STACK_BIAS + +/* ffi_closure_v9(...) + + Receives the closure argument in %g1. */ + + .align 8 + .globl C(ffi_go_closure_v9) + .type C(ffi_go_closure_v9),#function + FFI_HIDDEN(C(ffi_go_closure_v9)) + +C(ffi_go_closure_v9): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ldx [%g5+8], %o0 + ldx [%g5+16], %o1 + b 0f + mov %g5, %o2 + +.LUW5: + .size C(ffi_go_closure_v9), . - C(ffi_go_closure_v9) + + .align 8 + .globl C(ffi_closure_v9) + .type C(ffi_closure_v9),#function + FFI_HIDDEN(C(ffi_closure_v9)) + +C(ffi_closure_v9): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ldx [%g1+FFI_TRAMPOLINE_SIZE], %o0 + ldx [%g1+FFI_TRAMPOLINE_SIZE+8], %o1 + ldx [%g1+FFI_TRAMPOLINE_SIZE+16], %o2 +0: + ! Store all of the potential argument registers in va_list format. + stx %i0, [FP+128+0] + stx %i1, [FP+128+8] + stx %i2, [FP+128+16] + stx %i3, [FP+128+24] + stx %i4, [FP+128+32] + stx %i5, [FP+128+40] + + ! Store possible floating point argument registers too. + std %f0, [FP-128] + std %f2, [FP-120] + std %f4, [FP-112] + std %f6, [FP-104] + std %f8, [FP-96] + std %f10, [FP-88] + std %f12, [FP-80] + std %f14, [FP-72] + std %f16, [FP-64] + std %f18, [FP-56] + std %f20, [FP-48] + std %f22, [FP-40] + std %f24, [FP-32] + std %f26, [FP-24] + std %f28, [FP-16] + std %f30, [FP-8] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, STACK_BIAS-160, %o3 + add %fp, STACK_BIAS+128, %o4 + call C(ffi_closure_sparc_inner_v9) + add %fp, STACK_BIAS-128, %o5 + +0: call 1f ! load pc in %o7 + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o2 = i2 * 16 + add %o7, %o0, %o7 ! o7 = 0b + i2*16 + jmp %o7+(2f-0b) + nop + + ! Note that we cannot load the data in the delay slot of + ! the return insn because the data is in the stack frame + ! that is deallocated by the return. + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + ldx [FP-160], %i0 + ldd [FP-160], %f0 + b 8f + ldx [FP-152], %i1 +E(SPARC_RET_UINT8) + ldub [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT8) + ldsb [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT16) + lduh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT16) + ldsh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT32) + lduw [FP-160+4], %i0 + return %i7+8 + nop +E(SP_V9_RET_SINT32) + ldsw [FP-160+4], %i0 + return %i7+8 + nop +E(SPARC_RET_INT64) + ldx [FP-160], %i0 + return %i7+8 + nop +E(SPARC_RET_INT128) + ldx [FP-160], %i0 + ldx [FP-160+8], %i1 + return %i7+8 + nop +E(SPARC_RET_F_8) + ld [FP-160+7*4], %f7 + nop + ld [FP-160+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [FP-160+5*4], %f5 + nop + ld [FP-160+4*4], %f4 + nop +E(SPARC_RET_F_4) + ldd [FP-160], %f0 + ldd [FP-160+8], %f2 + return %i7+8 + nop +E(SPARC_RET_F_2) + ldd [FP-160], %f0 + return %i7+8 + nop +E(SP_V9_RET_F_3) + ld [FP-160+2*4], %f2 + nop + ld [FP-160+1*4], %f1 + nop +E(SPARC_RET_F_1) + ld [FP-160], %f0 + return %i7+8 + nop + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: ldd [FP-152], %f2 + ldx [FP-144], %i2 + ldd [FP-144], %f4 + ldx [FP-136], %i3 + ldd [FP-136], %f6 + return %i7+8 + nop + +.LUW8: + .size C(ffi_closure_v9), . - C(ffi_closure_v9) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_RANGE(B, E) .long %r_disp32(B), E - B +#else +# define FDE_RANGE(B, E) .align 8; .xword B, E - B +#endif + + .align 8 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x78 ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0xff, 0xf ! DW_CFA_def_cfa, %o6, offset 0x7ff + .align 8 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW0, .LUW2) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW3, .LUW5) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW6, .LUW8) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE3: + +#endif /* SPARC64 */ +#ifdef __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S new file mode 100644 index 0000000..55f8f43 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S @@ -0,0 +1,440 @@ +/* ----------------------------------------------------------------------- + v9.S - Copyright (c) 2000, 2003, 2004, 2008 Red Hat, Inc. + + SPARC 64-bit Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifdef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + +#define STACK_BIAS 2047 + + .text + .align 8 + .globl C(ffi_call_v9) + .type C(ffi_call_v9),#function + FFI_HIDDEN(C(ffi_call_v9)) + +C(ffi_call_v9): +.LUW0: + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, STACK_BIAS+128+48, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v9) + mov %i3, %o3 ! copy avalue + + andcc %o0, SPARC_FLAG_FP_ARGS, %g0 ! need fp regs? + add %sp, 48, %sp ! deallocate prep frame + be,pt %xcc, 1f + mov %o0, %l0 ! save flags + + ldd [%sp+STACK_BIAS+128], %f0 ! load all fp arg regs + ldd [%sp+STACK_BIAS+128+8], %f2 + ldd [%sp+STACK_BIAS+128+16], %f4 + ldd [%sp+STACK_BIAS+128+24], %f6 + ldd [%sp+STACK_BIAS+128+32], %f8 + ldd [%sp+STACK_BIAS+128+40], %f10 + ldd [%sp+STACK_BIAS+128+48], %f12 + ldd [%sp+STACK_BIAS+128+56], %f14 + ldd [%sp+STACK_BIAS+128+64], %f16 + ldd [%sp+STACK_BIAS+128+72], %f18 + ldd [%sp+STACK_BIAS+128+80], %f20 + ldd [%sp+STACK_BIAS+128+88], %f22 + ldd [%sp+STACK_BIAS+128+96], %f24 + ldd [%sp+STACK_BIAS+128+104], %f26 + ldd [%sp+STACK_BIAS+128+112], %f28 + ldd [%sp+STACK_BIAS+128+120], %f30 + +1: ldx [%sp+STACK_BIAS+128], %o0 ! load all int arg regs + ldx [%sp+STACK_BIAS+128+8], %o1 + ldx [%sp+STACK_BIAS+128+16], %o2 + ldx [%sp+STACK_BIAS+128+24], %o3 + ldx [%sp+STACK_BIAS+128+32], %o4 + ldx [%sp+STACK_BIAS+128+40], %o5 + call %i1 + mov %i5, %g5 ! load static chain + +0: call 1f ! load pc in %o7 + and %l0, SPARC_FLAG_RET_MASK, %l1 +1: sll %l1, 4, %l1 + add %o7, %l1, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + add %sp, STACK_BIAS-64+128+48, %l2 + sub %sp, 64, %sp + b 8f + stx %o0, [%l2] +E(SPARC_RET_UINT8) + and %o0, 0xff, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + sra %o0, 24, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + srl %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + sra %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT32) + srl %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SP_V9_RET_SINT32) + sra %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_INT64) + stx %o0, [%i2] + return %i7+8 + nop +E(SPARC_RET_INT128) + stx %o0, [%i2] + stx %o1, [%i2+8] + return %i7+8 + nop +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + std %f2, [%i2+2*4] + return %i7+8 + std %f0, [%o2] +E(SPARC_RET_F_2) + return %i7+8 + std %f0, [%o2] +E(SP_V9_RET_F_3) + st %f2, [%i2+2*4] + nop + st %f1, [%i2+1*4] + nop +E(SPARC_RET_F_1) + return %i7+8 + st %f0, [%o2] + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: stx %o1, [%l2+8] + stx %o2, [%l2+16] + stx %o3, [%l2+24] + std %f0, [%l2+32] + std %f2, [%l2+40] + std %f4, [%l2+48] + std %f6, [%l2+56] + + ! Copy the structure into place. + srl %l0, SPARC_SIZEMASK_SHIFT, %o0 ! load size_mask + mov %i2, %o1 ! load dst + mov %l2, %o2 ! load src_gp + call C(ffi_struct_float_copy) + add %l2, 32, %o3 ! load src_fp + + return %i7+8 + nop + +.LUW2: + .size C(ffi_call_v9), . - C(ffi_call_v9) + + +#undef STACKFRAME +#define STACKFRAME 336 /* 16*8 register window + + 6*8 args backing store + + 20*8 locals */ +#define FP %fp+STACK_BIAS + +/* ffi_closure_v9(...) + + Receives the closure argument in %g1. */ + + .align 8 + .globl C(ffi_go_closure_v9) + .type C(ffi_go_closure_v9),#function + FFI_HIDDEN(C(ffi_go_closure_v9)) + +C(ffi_go_closure_v9): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ldx [%g5+8], %o0 + ldx [%g5+16], %o1 + b 0f + mov %g5, %o2 + +.LUW5: + .size C(ffi_go_closure_v9), . - C(ffi_go_closure_v9) + + .align 8 + .globl C(ffi_closure_v9) + .type C(ffi_closure_v9),#function + FFI_HIDDEN(C(ffi_closure_v9)) + +C(ffi_closure_v9): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ldx [%g1+FFI_TRAMPOLINE_SIZE], %o0 + ldx [%g1+FFI_TRAMPOLINE_SIZE+8], %o1 + ldx [%g1+FFI_TRAMPOLINE_SIZE+16], %o2 +0: + ! Store all of the potential argument registers in va_list format. + stx %i0, [FP+128+0] + stx %i1, [FP+128+8] + stx %i2, [FP+128+16] + stx %i3, [FP+128+24] + stx %i4, [FP+128+32] + stx %i5, [FP+128+40] + + ! Store possible floating point argument registers too. + std %f0, [FP-128] + std %f2, [FP-120] + std %f4, [FP-112] + std %f6, [FP-104] + std %f8, [FP-96] + std %f10, [FP-88] + std %f12, [FP-80] + std %f14, [FP-72] + std %f16, [FP-64] + std %f18, [FP-56] + std %f20, [FP-48] + std %f22, [FP-40] + std %f24, [FP-32] + std %f26, [FP-24] + std %f28, [FP-16] + std %f30, [FP-8] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, STACK_BIAS-160, %o3 + add %fp, STACK_BIAS+128, %o4 + call C(ffi_closure_sparc_inner_v9) + add %fp, STACK_BIAS-128, %o5 + +0: call 1f ! load pc in %o7 + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o2 = i2 * 16 + add %o7, %o0, %o7 ! o7 = 0b + i2*16 + jmp %o7+(2f-0b) + nop + + ! Note that we cannot load the data in the delay slot of + ! the return insn because the data is in the stack frame + ! that is deallocated by the return. + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + ldx [FP-160], %i0 + ldd [FP-160], %f0 + b 8f + ldx [FP-152], %i1 +E(SPARC_RET_UINT8) + ldub [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT8) + ldsb [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT16) + lduh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT16) + ldsh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT32) + lduw [FP-160+4], %i0 + return %i7+8 + nop +E(SP_V9_RET_SINT32) + ldsw [FP-160+4], %i0 + return %i7+8 + nop +E(SPARC_RET_INT64) + ldx [FP-160], %i0 + return %i7+8 + nop +E(SPARC_RET_INT128) + ldx [FP-160], %i0 + ldx [FP-160+8], %i1 + return %i7+8 + nop +E(SPARC_RET_F_8) + ld [FP-160+7*4], %f7 + nop + ld [FP-160+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [FP-160+5*4], %f5 + nop + ld [FP-160+4*4], %f4 + nop +E(SPARC_RET_F_4) + ldd [FP-160], %f0 + ldd [FP-160+8], %f2 + return %i7+8 + nop +E(SPARC_RET_F_2) + ldd [FP-160], %f0 + return %i7+8 + nop +E(SP_V9_RET_F_3) + ld [FP-160+2*4], %f2 + nop + ld [FP-160+1*4], %f1 + nop +E(SPARC_RET_F_1) + ld [FP-160], %f0 + return %i7+8 + nop + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: ldd [FP-152], %f2 + ldx [FP-144], %i2 + ldd [FP-144], %f4 + ldx [FP-136], %i3 + ldd [FP-136], %f6 + return %i7+8 + nop + +.LUW8: + .size C(ffi_closure_v9), . - C(ffi_closure_v9) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_RANGE(B, E) .long %r_disp32(B), E - B +#else +# define FDE_RANGE(B, E) .align 8; .xword B, E - B +#endif + + .align 8 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x78 ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0xff, 0xf ! DW_CFA_def_cfa, %o6, offset 0x7ff + .align 8 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW0, .LUW2) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW3, .LUW5) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW6, .LUW8) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE3: + +#endif /* SPARC64 */ +#ifdef __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi 2.c new file mode 100644 index 0000000..3a94469 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi 2.c @@ -0,0 +1,355 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Tilera Corp. + + TILE Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +/* Performs a raw function call with the given NUM_ARG_REGS register arguments + and the specified additional stack arguments (if any). */ +extern void ffi_call_tile(ffi_sarg reg_args[NUM_ARG_REGS], + const ffi_sarg *stack_args, + size_t stack_args_bytes, + void (*fnaddr)(void)) + FFI_HIDDEN; + +/* This handles the raw call from the closure stub, cleaning up the + parameters and delegating to ffi_closure_tile_inner. */ +extern void ffi_closure_tile(void) FFI_HIDDEN; + + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* We always allocate room for all registers. Even if we don't + use them as parameters, they get returned in the same array + as struct return values so we need to make room. */ + if (cif->bytes < NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->bytes = NUM_ARG_REGS * FFI_SIZEOF_ARG; + + if (cif->rtype->size > NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->flags = FFI_TYPE_STRUCT; + else + cif->flags = FFI_TYPE_INT; + + /* Nothing to do. */ + return FFI_OK; +} + + +static long +assign_to_ffi_arg(ffi_sarg *out, void *in, const ffi_type *type, + int write_to_reg) +{ + switch (type->type) + { + case FFI_TYPE_SINT8: + *out = *(SINT8 *)in; + return 1; + + case FFI_TYPE_UINT8: + *out = *(UINT8 *)in; + return 1; + + case FFI_TYPE_SINT16: + *out = *(SINT16 *)in; + return 1; + + case FFI_TYPE_UINT16: + *out = *(UINT16 *)in; + return 1; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LP64__ + case FFI_TYPE_POINTER: +#endif + /* Note that even unsigned 32-bit quantities are sign extended + on tilegx when stored in a register. */ + *out = *(SINT32 *)in; + return 1; + + case FFI_TYPE_FLOAT: +#ifdef __tilegx__ + if (write_to_reg) + { + /* Properly sign extend the value. */ + union { float f; SINT32 s32; } val; + val.f = *(float *)in; + *out = val.s32; + } + else +#endif + { + *(float *)out = *(float *)in; + } + return 1; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: +#ifdef __LP64__ + case FFI_TYPE_POINTER: +#endif + *(UINT64 *)out = *(UINT64 *)in; + return sizeof(UINT64) / FFI_SIZEOF_ARG; + + case FFI_TYPE_STRUCT: + memcpy(out, in, type->size); + return (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + case FFI_TYPE_VOID: + /* Must be a return type. Nothing to do. */ + return 0; + + default: + FFI_ASSERT(0); + return -1; + } +} + + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_sarg * const arg_mem = alloca(cif->bytes); + ffi_sarg * const reg_args = arg_mem; + ffi_sarg * const stack_args = ®_args[NUM_ARG_REGS]; + ffi_sarg *argp = arg_mem; + ffi_type ** const arg_types = cif->arg_types; + const long num_args = cif->nargs; + long i; + + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Pass a hidden pointer to the return value. We make sure there + is scratch space for the callee to store the return value even if + our caller doesn't care about it. */ + *argp++ = (intptr_t)(rvalue ? rvalue : alloca(cif->rtype->size)); + + /* No more work needed to return anything. */ + rvalue = NULL; + } + + for (i = 0; i < num_args; i++) + { + ffi_type *type = arg_types[i]; + void * const arg_in = avalue[i]; + ptrdiff_t arg_word = argp - arg_mem; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (type->type == FFI_TYPE_STRUCT) + { + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + if (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS) + { + /* Args are not allowed to span registers and the stack. */ + argp = stack_args; + } + + memcpy(argp, arg_in, type->size); + argp += arg_size_in_words; + } + else + { + argp += assign_to_ffi_arg(argp, arg_in, arg_types[i], 1); + } + } + + /* Actually do the call. */ + ffi_call_tile(reg_args, stack_args, + cif->bytes - (NUM_ARG_REGS * FFI_SIZEOF_ARG), fn); + + if (rvalue != NULL) + assign_to_ffi_arg(rvalue, reg_args, cif->rtype, 0); +} + + +/* Template code for closure. */ +extern const UINT64 ffi_template_tramp_tile[] FFI_HIDDEN; + + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ +#ifdef __tilegx__ + /* TILE-Gx */ + SINT64 c; + SINT64 h; + int s; + UINT64 *out; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + + c = (intptr_t)closure; + h = (intptr_t)ffi_closure_tile; + s = 0; + + /* Find the smallest shift count that doesn't lose information + (i.e. no need to explicitly insert high bits of the address that + are just the sign extension of the low bits). */ + while ((c >> s) != (SINT16)(c >> s) || (h >> s) != (SINT16)(h >> s)) + s += 16; + +#define OPS(a, b, shift) \ + (create_Imm16_X0((a) >> (shift)) | create_Imm16_X1((b) >> (shift))) + + /* Emit the moveli. */ + *out++ = ffi_template_tramp_tile[0] | OPS(c, h, s); + for (s -= 16; s >= 0; s -= 16) + *out++ = ffi_template_tramp_tile[1] | OPS(c, h, s); + +#undef OPS + + *out++ = ffi_template_tramp_tile[2]; + +#else + /* TILEPro */ + UINT64 *out; + intptr_t delta; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + delta = (intptr_t)ffi_closure_tile - (intptr_t)codeloc; + + *out++ = ffi_template_tramp_tile[0] | create_JOffLong_X1(delta >> 3); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + invalidate_icache(closure->tramp, (char *)out - closure->tramp, + getpagesize()); + + return FFI_OK; +} + + +/* This is called by the assembly wrapper for closures. This does + all of the work. On entry reg_args[0] holds the values the registers + had when the closure was invoked. On return reg_args[1] holds the register + values to be returned to the caller (many of which may be garbage). */ +void FFI_HIDDEN +ffi_closure_tile_inner(ffi_closure *closure, + ffi_sarg reg_args[2][NUM_ARG_REGS], + ffi_sarg *stack_args) +{ + ffi_cif * const cif = closure->cif; + void ** const avalue = alloca(cif->nargs * sizeof(void *)); + void *rvalue; + ffi_type ** const arg_types = cif->arg_types; + ffi_sarg * const reg_args_in = reg_args[0]; + ffi_sarg * const reg_args_out = reg_args[1]; + ffi_sarg * argp; + long i, arg_word, nargs = cif->nargs; + /* Use a union to guarantee proper alignment for double. */ + union { ffi_sarg arg[NUM_ARG_REGS]; double d; UINT64 u64; } closure_ret; + + /* Start out reading register arguments. */ + argp = reg_args_in; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Return by reference via hidden pointer. */ + rvalue = (void *)(intptr_t)*argp++; + arg_word = 1; + } + else + { + /* Return the value in registers. */ + rvalue = &closure_ret; + arg_word = 0; + } + + /* Grab the addresses of the arguments. */ + for (i = 0; i < nargs; i++) + { + ffi_type * const type = arg_types[i]; + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (arg_word == NUM_ARG_REGS || + (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS)) + { + /* Switch to reading arguments from the stack. */ + argp = stack_args; + arg_word = NUM_ARG_REGS; + } + + avalue[i] = argp; + argp += arg_size_in_words; + arg_word += arg_size_in_words; + } + + /* Invoke the closure. */ + closure->fun(cif, rvalue, avalue, closure->user_data); + + if (cif->flags != FFI_TYPE_STRUCT) + { + /* Canonicalize for register representation. */ + assign_to_ffi_arg(reg_args_out, &closure_ret, cif->rtype, 1); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c new file mode 100644 index 0000000..3a94469 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c @@ -0,0 +1,355 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Tilera Corp. + + TILE Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +/* Performs a raw function call with the given NUM_ARG_REGS register arguments + and the specified additional stack arguments (if any). */ +extern void ffi_call_tile(ffi_sarg reg_args[NUM_ARG_REGS], + const ffi_sarg *stack_args, + size_t stack_args_bytes, + void (*fnaddr)(void)) + FFI_HIDDEN; + +/* This handles the raw call from the closure stub, cleaning up the + parameters and delegating to ffi_closure_tile_inner. */ +extern void ffi_closure_tile(void) FFI_HIDDEN; + + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* We always allocate room for all registers. Even if we don't + use them as parameters, they get returned in the same array + as struct return values so we need to make room. */ + if (cif->bytes < NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->bytes = NUM_ARG_REGS * FFI_SIZEOF_ARG; + + if (cif->rtype->size > NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->flags = FFI_TYPE_STRUCT; + else + cif->flags = FFI_TYPE_INT; + + /* Nothing to do. */ + return FFI_OK; +} + + +static long +assign_to_ffi_arg(ffi_sarg *out, void *in, const ffi_type *type, + int write_to_reg) +{ + switch (type->type) + { + case FFI_TYPE_SINT8: + *out = *(SINT8 *)in; + return 1; + + case FFI_TYPE_UINT8: + *out = *(UINT8 *)in; + return 1; + + case FFI_TYPE_SINT16: + *out = *(SINT16 *)in; + return 1; + + case FFI_TYPE_UINT16: + *out = *(UINT16 *)in; + return 1; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LP64__ + case FFI_TYPE_POINTER: +#endif + /* Note that even unsigned 32-bit quantities are sign extended + on tilegx when stored in a register. */ + *out = *(SINT32 *)in; + return 1; + + case FFI_TYPE_FLOAT: +#ifdef __tilegx__ + if (write_to_reg) + { + /* Properly sign extend the value. */ + union { float f; SINT32 s32; } val; + val.f = *(float *)in; + *out = val.s32; + } + else +#endif + { + *(float *)out = *(float *)in; + } + return 1; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: +#ifdef __LP64__ + case FFI_TYPE_POINTER: +#endif + *(UINT64 *)out = *(UINT64 *)in; + return sizeof(UINT64) / FFI_SIZEOF_ARG; + + case FFI_TYPE_STRUCT: + memcpy(out, in, type->size); + return (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + case FFI_TYPE_VOID: + /* Must be a return type. Nothing to do. */ + return 0; + + default: + FFI_ASSERT(0); + return -1; + } +} + + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_sarg * const arg_mem = alloca(cif->bytes); + ffi_sarg * const reg_args = arg_mem; + ffi_sarg * const stack_args = ®_args[NUM_ARG_REGS]; + ffi_sarg *argp = arg_mem; + ffi_type ** const arg_types = cif->arg_types; + const long num_args = cif->nargs; + long i; + + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Pass a hidden pointer to the return value. We make sure there + is scratch space for the callee to store the return value even if + our caller doesn't care about it. */ + *argp++ = (intptr_t)(rvalue ? rvalue : alloca(cif->rtype->size)); + + /* No more work needed to return anything. */ + rvalue = NULL; + } + + for (i = 0; i < num_args; i++) + { + ffi_type *type = arg_types[i]; + void * const arg_in = avalue[i]; + ptrdiff_t arg_word = argp - arg_mem; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (type->type == FFI_TYPE_STRUCT) + { + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + if (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS) + { + /* Args are not allowed to span registers and the stack. */ + argp = stack_args; + } + + memcpy(argp, arg_in, type->size); + argp += arg_size_in_words; + } + else + { + argp += assign_to_ffi_arg(argp, arg_in, arg_types[i], 1); + } + } + + /* Actually do the call. */ + ffi_call_tile(reg_args, stack_args, + cif->bytes - (NUM_ARG_REGS * FFI_SIZEOF_ARG), fn); + + if (rvalue != NULL) + assign_to_ffi_arg(rvalue, reg_args, cif->rtype, 0); +} + + +/* Template code for closure. */ +extern const UINT64 ffi_template_tramp_tile[] FFI_HIDDEN; + + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ +#ifdef __tilegx__ + /* TILE-Gx */ + SINT64 c; + SINT64 h; + int s; + UINT64 *out; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + + c = (intptr_t)closure; + h = (intptr_t)ffi_closure_tile; + s = 0; + + /* Find the smallest shift count that doesn't lose information + (i.e. no need to explicitly insert high bits of the address that + are just the sign extension of the low bits). */ + while ((c >> s) != (SINT16)(c >> s) || (h >> s) != (SINT16)(h >> s)) + s += 16; + +#define OPS(a, b, shift) \ + (create_Imm16_X0((a) >> (shift)) | create_Imm16_X1((b) >> (shift))) + + /* Emit the moveli. */ + *out++ = ffi_template_tramp_tile[0] | OPS(c, h, s); + for (s -= 16; s >= 0; s -= 16) + *out++ = ffi_template_tramp_tile[1] | OPS(c, h, s); + +#undef OPS + + *out++ = ffi_template_tramp_tile[2]; + +#else + /* TILEPro */ + UINT64 *out; + intptr_t delta; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + delta = (intptr_t)ffi_closure_tile - (intptr_t)codeloc; + + *out++ = ffi_template_tramp_tile[0] | create_JOffLong_X1(delta >> 3); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + invalidate_icache(closure->tramp, (char *)out - closure->tramp, + getpagesize()); + + return FFI_OK; +} + + +/* This is called by the assembly wrapper for closures. This does + all of the work. On entry reg_args[0] holds the values the registers + had when the closure was invoked. On return reg_args[1] holds the register + values to be returned to the caller (many of which may be garbage). */ +void FFI_HIDDEN +ffi_closure_tile_inner(ffi_closure *closure, + ffi_sarg reg_args[2][NUM_ARG_REGS], + ffi_sarg *stack_args) +{ + ffi_cif * const cif = closure->cif; + void ** const avalue = alloca(cif->nargs * sizeof(void *)); + void *rvalue; + ffi_type ** const arg_types = cif->arg_types; + ffi_sarg * const reg_args_in = reg_args[0]; + ffi_sarg * const reg_args_out = reg_args[1]; + ffi_sarg * argp; + long i, arg_word, nargs = cif->nargs; + /* Use a union to guarantee proper alignment for double. */ + union { ffi_sarg arg[NUM_ARG_REGS]; double d; UINT64 u64; } closure_ret; + + /* Start out reading register arguments. */ + argp = reg_args_in; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Return by reference via hidden pointer. */ + rvalue = (void *)(intptr_t)*argp++; + arg_word = 1; + } + else + { + /* Return the value in registers. */ + rvalue = &closure_ret; + arg_word = 0; + } + + /* Grab the addresses of the arguments. */ + for (i = 0; i < nargs; i++) + { + ffi_type * const type = arg_types[i]; + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (arg_word == NUM_ARG_REGS || + (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS)) + { + /* Switch to reading arguments from the stack. */ + argp = stack_args; + arg_word = NUM_ARG_REGS; + } + + avalue[i] = argp; + argp += arg_size_in_words; + arg_word += arg_size_in_words; + } + + /* Invoke the closure. */ + closure->fun(cif, rvalue, avalue, closure->user_data); + + if (cif->flags != FFI_TYPE_STRUCT) + { + /* Canonicalize for register representation. */ + assign_to_ffi_arg(reg_args_out, &closure_ret, cif->rtype, 1); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget 2.h new file mode 100644 index 0000000..679fb5d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget 2.h @@ -0,0 +1,65 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Tilera Corp. + Target configuration macros for TILE. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM + +#include + +typedef uint_reg_t ffi_arg; +typedef int_reg_t ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ +#define FFI_CLOSURES 1 + +#ifdef __tilegx__ +/* We always pass 8-byte values, even in -m32 mode. */ +# define FFI_SIZEOF_ARG 8 +# ifdef __LP64__ +# define FFI_TRAMPOLINE_SIZE (8 * 5) /* 5 bundles */ +# else +# define FFI_TRAMPOLINE_SIZE (8 * 3) /* 3 bundles */ +# endif +#else +# define FFI_SIZEOF_ARG 4 +# define FFI_TRAMPOLINE_SIZE 8 /* 1 bundle */ +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h new file mode 100644 index 0000000..679fb5d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h @@ -0,0 +1,65 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Tilera Corp. + Target configuration macros for TILE. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM + +#include + +typedef uint_reg_t ffi_arg; +typedef int_reg_t ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ +#define FFI_CLOSURES 1 + +#ifdef __tilegx__ +/* We always pass 8-byte values, even in -m32 mode. */ +# define FFI_SIZEOF_ARG 8 +# ifdef __LP64__ +# define FFI_TRAMPOLINE_SIZE (8 * 5) /* 5 bundles */ +# else +# define FFI_TRAMPOLINE_SIZE (8 * 3) /* 3 bundles */ +# endif +#else +# define FFI_SIZEOF_ARG 4 +# define FFI_TRAMPOLINE_SIZE 8 /* 1 bundle */ +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile 2.S new file mode 100644 index 0000000..d1f82cb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile 2.S @@ -0,0 +1,360 @@ +/* ----------------------------------------------------------------------- + tile.S - Copyright (c) 2011 Tilera Corp. + + Tilera TILEPro and TILE-Gx Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Number of bytes in a register. */ +#define REG_SIZE FFI_SIZEOF_ARG + +/* Number of bytes in stack linkage area for backtracing. + + A note about the ABI: on entry to a procedure, sp points to a stack + slot where it must spill the return address if it's not a leaf. + REG_SIZE bytes beyond that is a slot owned by the caller which + contains the sp value that the caller had when it was originally + entered (i.e. the caller's frame pointer). */ +#define LINKAGE_SIZE (2 * REG_SIZE) + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +#ifdef __tilegx__ +#define SW st +#define LW ld +#define BGZT bgtzt +#else +#define SW sw +#define LW lw +#define BGZT bgzt +#endif + + +/* void ffi_call_tile (int_reg_t reg_args[NUM_ARG_REGS], + const int_reg_t *stack_args, + unsigned long stack_args_bytes, + void (*fnaddr)(void)); + + On entry, REG_ARGS contain the outgoing register values, + and STACK_ARGS contains STACK_ARG_BYTES of additional values + to be passed on the stack. If STACK_ARG_BYTES is zero, then + STACK_ARGS is ignored. + + When the invoked function returns, the values of r0-r9 are + blindly stored back into REG_ARGS for the caller to examine. */ + + .section .text.ffi_call_tile, "ax", @progbits + .align 8 + .globl ffi_call_tile + FFI_HIDDEN(ffi_call_tile) +ffi_call_tile: + +/* Incoming arguments. */ +#define REG_ARGS r0 +#define INCOMING_STACK_ARGS r1 +#define STACK_ARG_BYTES r2 +#define ORIG_FNADDR r3 + +/* Temporary values. */ +#define FRAME_SIZE r10 +#define TMP r11 +#define TMP2 r12 +#define OUTGOING_STACK_ARGS r13 +#define REG_ADDR_PTR r14 +#define RETURN_REG_ADDR r15 +#define FNADDR r16 + + .cfi_startproc + { + /* Save return address. */ + SW sp, lr + .cfi_offset lr, 0 + /* Prepare to spill incoming r52. */ + addi TMP, sp, -REG_SIZE + /* Increase frame size to have room to spill r52 and REG_ARGS. + The +7 is to round up mod 8. */ + addi FRAME_SIZE, STACK_ARG_BYTES, \ + REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7 + } + { + /* Round stack frame size to a multiple of 8 to satisfy ABI. */ + andi FRAME_SIZE, FRAME_SIZE, -8 + /* Compute where to spill REG_ARGS value. */ + addi TMP2, sp, -(REG_SIZE * 2) + } + { + /* Spill incoming r52. */ + SW TMP, r52 + .cfi_offset r52, -REG_SIZE + /* Set up our frame pointer. */ + move r52, sp + .cfi_def_cfa_register r52 + /* Push stack frame. */ + sub sp, sp, FRAME_SIZE + } + { + /* Prepare to set up stack linkage. */ + addi TMP, sp, REG_SIZE + /* Prepare to memcpy stack args. */ + addi OUTGOING_STACK_ARGS, sp, LINKAGE_SIZE + /* Save REG_ARGS which we will need after we call the subroutine. */ + SW TMP2, REG_ARGS + } + { + /* Set up linkage info to hold incoming stack pointer. */ + SW TMP, r52 + } + { + /* Skip stack args memcpy if we don't have any stack args (common). */ + blezt STACK_ARG_BYTES, .Ldone_stack_args_memcpy + } + +.Lmemcpy_stack_args: + { + /* Load incoming argument from stack_args. */ + LW TMP, INCOMING_STACK_ARGS + addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE + } + { + /* Store stack argument into outgoing stack argument area. */ + SW OUTGOING_STACK_ARGS, TMP + addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE + addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE + } + { + BGZT STACK_ARG_BYTES, .Lmemcpy_stack_args + } +.Ldone_stack_args_memcpy: + + { + /* Copy aside ORIG_FNADDR so we can overwrite its register. */ + move FNADDR, ORIG_FNADDR + /* Prepare to load argument registers. */ + addi REG_ADDR_PTR, r0, REG_SIZE + /* Load outgoing r0. */ + LW r0, r0 + } + + /* Load up argument registers from the REG_ARGS array. */ +#define LOAD_REG(REG, PTR) \ + { \ + LW REG, PTR ; \ + addi PTR, PTR, REG_SIZE \ + } + + LOAD_REG(r1, REG_ADDR_PTR) + LOAD_REG(r2, REG_ADDR_PTR) + LOAD_REG(r3, REG_ADDR_PTR) + LOAD_REG(r4, REG_ADDR_PTR) + LOAD_REG(r5, REG_ADDR_PTR) + LOAD_REG(r6, REG_ADDR_PTR) + LOAD_REG(r7, REG_ADDR_PTR) + LOAD_REG(r8, REG_ADDR_PTR) + LOAD_REG(r9, REG_ADDR_PTR) + + { + /* Call the subroutine. */ + jalr FNADDR + } + + { + /* Restore original lr. */ + LW lr, r52 + /* Prepare to recover ARGS, which we spilled earlier. */ + addi TMP, r52, -(2 * REG_SIZE) + } + { + /* Restore ARGS, so we can fill it in with the return regs r0-r9. */ + LW RETURN_REG_ADDR, TMP + /* Prepare to restore original r52. */ + addi TMP, r52, -REG_SIZE + } + + { + /* Pop stack frame. */ + move sp, r52 + /* Restore original r52. */ + LW r52, TMP + } + +#define STORE_REG(REG, PTR) \ + { \ + SW PTR, REG ; \ + addi PTR, PTR, REG_SIZE \ + } + + /* Return all register values by reference. */ + STORE_REG(r0, RETURN_REG_ADDR) + STORE_REG(r1, RETURN_REG_ADDR) + STORE_REG(r2, RETURN_REG_ADDR) + STORE_REG(r3, RETURN_REG_ADDR) + STORE_REG(r4, RETURN_REG_ADDR) + STORE_REG(r5, RETURN_REG_ADDR) + STORE_REG(r6, RETURN_REG_ADDR) + STORE_REG(r7, RETURN_REG_ADDR) + STORE_REG(r8, RETURN_REG_ADDR) + STORE_REG(r9, RETURN_REG_ADDR) + + { + jrp lr + } + + .cfi_endproc + .size ffi_call_tile, .-ffi_call_tile + +/* ffi_closure_tile(...) + + On entry, lr points to the closure plus 8 bytes, and r10 + contains the actual return address. + + This function simply dumps all register parameters into a stack array + and passes the closure, the registers array, and the stack arguments + to C code that does all of the actual closure processing. */ + + .section .text.ffi_closure_tile, "ax", @progbits + .align 8 + .globl ffi_closure_tile + FFI_HIDDEN(ffi_closure_tile) + + .cfi_startproc +/* Room to spill all NUM_ARG_REGS incoming registers, plus frame linkage. */ +#define CLOSURE_FRAME_SIZE (((NUM_ARG_REGS * REG_SIZE * 2 + LINKAGE_SIZE) + 7) & -8) +ffi_closure_tile: + { +#ifdef __tilegx__ + st sp, lr + .cfi_offset lr, 0 +#else + /* Save return address (in r10 due to closure stub wrapper). */ + SW sp, r10 + .cfi_return_column r10 + .cfi_offset r10, 0 +#endif + /* Compute address for stack frame linkage. */ + addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + } + { + /* Save incoming stack pointer in linkage area. */ + SW r10, sp + .cfi_offset sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + /* Push a new stack frame. */ + addli sp, sp, -CLOSURE_FRAME_SIZE + .cfi_adjust_cfa_offset CLOSURE_FRAME_SIZE + } + + { + /* Create pointer to where to start spilling registers. */ + addi r10, sp, LINKAGE_SIZE + } + + /* Spill all the incoming registers. */ + STORE_REG(r0, r10) + STORE_REG(r1, r10) + STORE_REG(r2, r10) + STORE_REG(r3, r10) + STORE_REG(r4, r10) + STORE_REG(r5, r10) + STORE_REG(r6, r10) + STORE_REG(r7, r10) + STORE_REG(r8, r10) + { + /* Save r9. */ + SW r10, r9 +#ifdef __tilegx__ + /* Pointer to closure is passed in r11. */ + move r0, r11 +#else + /* Compute pointer to the closure object. Because the closure + starts with a "jal ffi_closure_tile", we can just take the + value of lr (a phony return address pointing into the closure) + and subtract 8. */ + addi r0, lr, -8 +#endif + /* Compute a pointer to the register arguments we just spilled. */ + addi r1, sp, LINKAGE_SIZE + } + { + /* Compute a pointer to the extra stack arguments (if any). */ + addli r2, sp, CLOSURE_FRAME_SIZE + LINKAGE_SIZE + /* Call C code to deal with all of the grotty details. */ + jal ffi_closure_tile_inner + } + { + addli r10, sp, CLOSURE_FRAME_SIZE + } + { + /* Restore the return address. */ + LW lr, r10 + /* Compute pointer to registers array. */ + addli r10, sp, LINKAGE_SIZE + (NUM_ARG_REGS * REG_SIZE) + } + /* Return all the register values, which C code may have set. */ + LOAD_REG(r0, r10) + LOAD_REG(r1, r10) + LOAD_REG(r2, r10) + LOAD_REG(r3, r10) + LOAD_REG(r4, r10) + LOAD_REG(r5, r10) + LOAD_REG(r6, r10) + LOAD_REG(r7, r10) + LOAD_REG(r8, r10) + LOAD_REG(r9, r10) + { + /* Pop the frame. */ + addli sp, sp, CLOSURE_FRAME_SIZE + jrp lr + } + + .cfi_endproc + .size ffi_closure_tile, . - ffi_closure_tile + + +/* What follows are code template instructions that get copied to the + closure trampoline by ffi_prep_closure_loc. The zeroed operands + get replaced by their proper values at runtime. */ + + .section .text.ffi_template_tramp_tile, "ax", @progbits + .align 8 + .globl ffi_template_tramp_tile + FFI_HIDDEN(ffi_template_tramp_tile) +ffi_template_tramp_tile: +#ifdef __tilegx__ + { + moveli r11, 0 /* backpatched to address of containing closure. */ + moveli r10, 0 /* backpatched to ffi_closure_tile. */ + } + /* Note: the following bundle gets generated multiple times + depending on the pointer value (esp. useful for -m32 mode). */ + { shl16insli r11, r11, 0 ; shl16insli r10, r10, 0 } + { info 2+8 /* for backtracer: -> pc in lr, frame size 0 */ ; jr r10 } +#else + /* 'jal .' yields a PC-relative offset of zero so we can OR in the + right offset at runtime. */ + { move r10, lr ; jal . /* ffi_closure_tile */ } +#endif + + .size ffi_template_tramp_tile, . - ffi_template_tramp_tile diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S new file mode 100644 index 0000000..d1f82cb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S @@ -0,0 +1,360 @@ +/* ----------------------------------------------------------------------- + tile.S - Copyright (c) 2011 Tilera Corp. + + Tilera TILEPro and TILE-Gx Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Number of bytes in a register. */ +#define REG_SIZE FFI_SIZEOF_ARG + +/* Number of bytes in stack linkage area for backtracing. + + A note about the ABI: on entry to a procedure, sp points to a stack + slot where it must spill the return address if it's not a leaf. + REG_SIZE bytes beyond that is a slot owned by the caller which + contains the sp value that the caller had when it was originally + entered (i.e. the caller's frame pointer). */ +#define LINKAGE_SIZE (2 * REG_SIZE) + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +#ifdef __tilegx__ +#define SW st +#define LW ld +#define BGZT bgtzt +#else +#define SW sw +#define LW lw +#define BGZT bgzt +#endif + + +/* void ffi_call_tile (int_reg_t reg_args[NUM_ARG_REGS], + const int_reg_t *stack_args, + unsigned long stack_args_bytes, + void (*fnaddr)(void)); + + On entry, REG_ARGS contain the outgoing register values, + and STACK_ARGS contains STACK_ARG_BYTES of additional values + to be passed on the stack. If STACK_ARG_BYTES is zero, then + STACK_ARGS is ignored. + + When the invoked function returns, the values of r0-r9 are + blindly stored back into REG_ARGS for the caller to examine. */ + + .section .text.ffi_call_tile, "ax", @progbits + .align 8 + .globl ffi_call_tile + FFI_HIDDEN(ffi_call_tile) +ffi_call_tile: + +/* Incoming arguments. */ +#define REG_ARGS r0 +#define INCOMING_STACK_ARGS r1 +#define STACK_ARG_BYTES r2 +#define ORIG_FNADDR r3 + +/* Temporary values. */ +#define FRAME_SIZE r10 +#define TMP r11 +#define TMP2 r12 +#define OUTGOING_STACK_ARGS r13 +#define REG_ADDR_PTR r14 +#define RETURN_REG_ADDR r15 +#define FNADDR r16 + + .cfi_startproc + { + /* Save return address. */ + SW sp, lr + .cfi_offset lr, 0 + /* Prepare to spill incoming r52. */ + addi TMP, sp, -REG_SIZE + /* Increase frame size to have room to spill r52 and REG_ARGS. + The +7 is to round up mod 8. */ + addi FRAME_SIZE, STACK_ARG_BYTES, \ + REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7 + } + { + /* Round stack frame size to a multiple of 8 to satisfy ABI. */ + andi FRAME_SIZE, FRAME_SIZE, -8 + /* Compute where to spill REG_ARGS value. */ + addi TMP2, sp, -(REG_SIZE * 2) + } + { + /* Spill incoming r52. */ + SW TMP, r52 + .cfi_offset r52, -REG_SIZE + /* Set up our frame pointer. */ + move r52, sp + .cfi_def_cfa_register r52 + /* Push stack frame. */ + sub sp, sp, FRAME_SIZE + } + { + /* Prepare to set up stack linkage. */ + addi TMP, sp, REG_SIZE + /* Prepare to memcpy stack args. */ + addi OUTGOING_STACK_ARGS, sp, LINKAGE_SIZE + /* Save REG_ARGS which we will need after we call the subroutine. */ + SW TMP2, REG_ARGS + } + { + /* Set up linkage info to hold incoming stack pointer. */ + SW TMP, r52 + } + { + /* Skip stack args memcpy if we don't have any stack args (common). */ + blezt STACK_ARG_BYTES, .Ldone_stack_args_memcpy + } + +.Lmemcpy_stack_args: + { + /* Load incoming argument from stack_args. */ + LW TMP, INCOMING_STACK_ARGS + addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE + } + { + /* Store stack argument into outgoing stack argument area. */ + SW OUTGOING_STACK_ARGS, TMP + addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE + addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE + } + { + BGZT STACK_ARG_BYTES, .Lmemcpy_stack_args + } +.Ldone_stack_args_memcpy: + + { + /* Copy aside ORIG_FNADDR so we can overwrite its register. */ + move FNADDR, ORIG_FNADDR + /* Prepare to load argument registers. */ + addi REG_ADDR_PTR, r0, REG_SIZE + /* Load outgoing r0. */ + LW r0, r0 + } + + /* Load up argument registers from the REG_ARGS array. */ +#define LOAD_REG(REG, PTR) \ + { \ + LW REG, PTR ; \ + addi PTR, PTR, REG_SIZE \ + } + + LOAD_REG(r1, REG_ADDR_PTR) + LOAD_REG(r2, REG_ADDR_PTR) + LOAD_REG(r3, REG_ADDR_PTR) + LOAD_REG(r4, REG_ADDR_PTR) + LOAD_REG(r5, REG_ADDR_PTR) + LOAD_REG(r6, REG_ADDR_PTR) + LOAD_REG(r7, REG_ADDR_PTR) + LOAD_REG(r8, REG_ADDR_PTR) + LOAD_REG(r9, REG_ADDR_PTR) + + { + /* Call the subroutine. */ + jalr FNADDR + } + + { + /* Restore original lr. */ + LW lr, r52 + /* Prepare to recover ARGS, which we spilled earlier. */ + addi TMP, r52, -(2 * REG_SIZE) + } + { + /* Restore ARGS, so we can fill it in with the return regs r0-r9. */ + LW RETURN_REG_ADDR, TMP + /* Prepare to restore original r52. */ + addi TMP, r52, -REG_SIZE + } + + { + /* Pop stack frame. */ + move sp, r52 + /* Restore original r52. */ + LW r52, TMP + } + +#define STORE_REG(REG, PTR) \ + { \ + SW PTR, REG ; \ + addi PTR, PTR, REG_SIZE \ + } + + /* Return all register values by reference. */ + STORE_REG(r0, RETURN_REG_ADDR) + STORE_REG(r1, RETURN_REG_ADDR) + STORE_REG(r2, RETURN_REG_ADDR) + STORE_REG(r3, RETURN_REG_ADDR) + STORE_REG(r4, RETURN_REG_ADDR) + STORE_REG(r5, RETURN_REG_ADDR) + STORE_REG(r6, RETURN_REG_ADDR) + STORE_REG(r7, RETURN_REG_ADDR) + STORE_REG(r8, RETURN_REG_ADDR) + STORE_REG(r9, RETURN_REG_ADDR) + + { + jrp lr + } + + .cfi_endproc + .size ffi_call_tile, .-ffi_call_tile + +/* ffi_closure_tile(...) + + On entry, lr points to the closure plus 8 bytes, and r10 + contains the actual return address. + + This function simply dumps all register parameters into a stack array + and passes the closure, the registers array, and the stack arguments + to C code that does all of the actual closure processing. */ + + .section .text.ffi_closure_tile, "ax", @progbits + .align 8 + .globl ffi_closure_tile + FFI_HIDDEN(ffi_closure_tile) + + .cfi_startproc +/* Room to spill all NUM_ARG_REGS incoming registers, plus frame linkage. */ +#define CLOSURE_FRAME_SIZE (((NUM_ARG_REGS * REG_SIZE * 2 + LINKAGE_SIZE) + 7) & -8) +ffi_closure_tile: + { +#ifdef __tilegx__ + st sp, lr + .cfi_offset lr, 0 +#else + /* Save return address (in r10 due to closure stub wrapper). */ + SW sp, r10 + .cfi_return_column r10 + .cfi_offset r10, 0 +#endif + /* Compute address for stack frame linkage. */ + addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + } + { + /* Save incoming stack pointer in linkage area. */ + SW r10, sp + .cfi_offset sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + /* Push a new stack frame. */ + addli sp, sp, -CLOSURE_FRAME_SIZE + .cfi_adjust_cfa_offset CLOSURE_FRAME_SIZE + } + + { + /* Create pointer to where to start spilling registers. */ + addi r10, sp, LINKAGE_SIZE + } + + /* Spill all the incoming registers. */ + STORE_REG(r0, r10) + STORE_REG(r1, r10) + STORE_REG(r2, r10) + STORE_REG(r3, r10) + STORE_REG(r4, r10) + STORE_REG(r5, r10) + STORE_REG(r6, r10) + STORE_REG(r7, r10) + STORE_REG(r8, r10) + { + /* Save r9. */ + SW r10, r9 +#ifdef __tilegx__ + /* Pointer to closure is passed in r11. */ + move r0, r11 +#else + /* Compute pointer to the closure object. Because the closure + starts with a "jal ffi_closure_tile", we can just take the + value of lr (a phony return address pointing into the closure) + and subtract 8. */ + addi r0, lr, -8 +#endif + /* Compute a pointer to the register arguments we just spilled. */ + addi r1, sp, LINKAGE_SIZE + } + { + /* Compute a pointer to the extra stack arguments (if any). */ + addli r2, sp, CLOSURE_FRAME_SIZE + LINKAGE_SIZE + /* Call C code to deal with all of the grotty details. */ + jal ffi_closure_tile_inner + } + { + addli r10, sp, CLOSURE_FRAME_SIZE + } + { + /* Restore the return address. */ + LW lr, r10 + /* Compute pointer to registers array. */ + addli r10, sp, LINKAGE_SIZE + (NUM_ARG_REGS * REG_SIZE) + } + /* Return all the register values, which C code may have set. */ + LOAD_REG(r0, r10) + LOAD_REG(r1, r10) + LOAD_REG(r2, r10) + LOAD_REG(r3, r10) + LOAD_REG(r4, r10) + LOAD_REG(r5, r10) + LOAD_REG(r6, r10) + LOAD_REG(r7, r10) + LOAD_REG(r8, r10) + LOAD_REG(r9, r10) + { + /* Pop the frame. */ + addli sp, sp, CLOSURE_FRAME_SIZE + jrp lr + } + + .cfi_endproc + .size ffi_closure_tile, . - ffi_closure_tile + + +/* What follows are code template instructions that get copied to the + closure trampoline by ffi_prep_closure_loc. The zeroed operands + get replaced by their proper values at runtime. */ + + .section .text.ffi_template_tramp_tile, "ax", @progbits + .align 8 + .globl ffi_template_tramp_tile + FFI_HIDDEN(ffi_template_tramp_tile) +ffi_template_tramp_tile: +#ifdef __tilegx__ + { + moveli r11, 0 /* backpatched to address of containing closure. */ + moveli r10, 0 /* backpatched to ffi_closure_tile. */ + } + /* Note: the following bundle gets generated multiple times + depending on the pointer value (esp. useful for -m32 mode). */ + { shl16insli r11, r11, 0 ; shl16insli r10, r10, 0 } + { info 2+8 /* for backtracer: -> pc in lr, frame size 0 */ ; jr r10 } +#else + /* 'jal .' yields a PC-relative offset of zero so we can OR in the + right offset at runtime. */ + { move r10, lr ; jal . /* ffi_closure_tile */ } +#endif + + .size ffi_template_tramp_tile, . - ffi_template_tramp_tile diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types 2.c new file mode 100644 index 0000000..9ec27f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types 2.c @@ -0,0 +1,108 @@ +/* ----------------------------------------------------------------------- + types.c - Copyright (c) 1996, 1998 Red Hat, Inc. + + Predefined ffi_types needed by libffi. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* Hide the basic type definitions from the header file, so that we + can redefine them here as "const". */ +#define LIBFFI_HIDE_BASIC_TYPES + +#include +#include + +/* Type definitions */ + +#define FFI_TYPEDEF(name, type, id, maybe_const)\ +struct struct_align_##name { \ + char c; \ + type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_##name = { \ + sizeof(type), \ + offsetof(struct struct_align_##name, x), \ + id, NULL \ +} + +#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \ +static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffi_type_##name), NULL \ +}; \ +struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ +} + +/* Size and alignment are fake here. They must not be 0. */ +FFI_EXTERN const ffi_type ffi_type_void = { + 1, 1, FFI_TYPE_VOID, NULL +}; + +FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const); +FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const); +FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const); +FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const); +FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const); +FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const); +FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const); +FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const); + +FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + +FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); +FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ +#define FFI_LDBL_CONST const +#else +#define FFI_LDBL_CONST +#endif + +#ifdef __alpha__ +/* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +/* Validate the hard-coded number below. */ +# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL }; +#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST); +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_COMPLEX_TYPEDEF(float, float, const); +FFI_COMPLEX_TYPEDEF(double, double, const); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST); +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c new file mode 100644 index 0000000..9ec27f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c @@ -0,0 +1,108 @@ +/* ----------------------------------------------------------------------- + types.c - Copyright (c) 1996, 1998 Red Hat, Inc. + + Predefined ffi_types needed by libffi. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* Hide the basic type definitions from the header file, so that we + can redefine them here as "const". */ +#define LIBFFI_HIDE_BASIC_TYPES + +#include +#include + +/* Type definitions */ + +#define FFI_TYPEDEF(name, type, id, maybe_const)\ +struct struct_align_##name { \ + char c; \ + type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_##name = { \ + sizeof(type), \ + offsetof(struct struct_align_##name, x), \ + id, NULL \ +} + +#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \ +static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffi_type_##name), NULL \ +}; \ +struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ +} + +/* Size and alignment are fake here. They must not be 0. */ +FFI_EXTERN const ffi_type ffi_type_void = { + 1, 1, FFI_TYPE_VOID, NULL +}; + +FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const); +FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const); +FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const); +FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const); +FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const); +FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const); +FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const); +FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const); + +FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + +FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); +FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ +#define FFI_LDBL_CONST const +#else +#define FFI_LDBL_CONST +#endif + +#ifdef __alpha__ +/* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +/* Validate the hard-coded number below. */ +# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL }; +#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST); +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_COMPLEX_TYPEDEF(float, float, const); +FFI_COMPLEX_TYPEDEF(double, double, const); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST); +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd 2.S new file mode 100644 index 0000000..01ca313 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd 2.S @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * void * %r0 + * ffi_call_elfbsd(extended_cif *ecif, 4(%ap) + * unsigned bytes, 8(%ap) + * unsigned flags, 12(%ap) + * void *rvalue, 16(%ap) + * void (*fn)()); 20(%ap) + */ + .globl ffi_call_elfbsd + .type ffi_call_elfbsd,@function + .align 2 +ffi_call_elfbsd: + .word 0x00c # save R2 and R3 + + # Allocate stack space for the args + subl2 8(%ap), %sp + + # Call ffi_prep_args + pushl %sp + pushl 4(%ap) + calls $2, ffi_prep_args + + # Get function pointer + movl 20(%ap), %r1 + + # Build a CALLS frame + ashl $-2, 8(%ap), %r0 + pushl %r0 # argument stack usage + movl %sp, %r0 # future %ap + # saved registers + bbc $11, 0(%r1), 1f + pushl %r11 +1: bbc $10, 0(%r1), 1f + pushl %r10 +1: bbc $9, 0(%r1), 1f + pushl %r9 +1: bbc $8, 0(%r1), 1f + pushl %r8 +1: bbc $7, 0(%r1), 1f + pushl %r7 +1: bbc $6, 0(%r1), 1f + pushl %r6 +1: bbc $5, 0(%r1), 1f + pushl %r5 +1: bbc $4, 0(%r1), 1f + pushl %r4 +1: bbc $3, 0(%r1), 1f + pushl %r3 +1: bbc $2, 0(%r1), 1f + pushl %r2 +1: + pushal 9f + pushl %fp + pushl %ap + movl 16(%ap), %r3 # struct return address, if needed + movl %r0, %ap + movzwl 4(%fp), %r0 # previous PSW, without the saved registers mask + bisl2 $0x20000000, %r0 # calls frame + movzwl 0(%r1), %r2 + bicw2 $0xf003, %r2 # only keep R11-R2 + ashl $16, %r2, %r2 + bisl2 %r2, %r0 # saved register mask of the called function + pushl %r0 + pushl $0 + movl %sp, %fp + + # Invoke the function + pushal 2(%r1) # skip procedure entry mask + movl %r3, %r1 + bicpsw $0x000f + rsb + +9: + # Copy return value if necessary + tstl 16(%ap) + jeql 9f + movl 16(%ap), %r2 + + bbc $0, 12(%ap), 1f # CIF_FLAGS_CHAR + movb %r0, 0(%r2) + brb 9f +1: + bbc $1, 12(%ap), 1f # CIF_FLAGS_SHORT + movw %r0, 0(%r2) + brb 9f +1: + bbc $2, 12(%ap), 1f # CIF_FLAGS_INT + movl %r0, 0(%r2) + brb 9f +1: + bbc $3, 12(%ap), 1f # CIF_FLAGS_DINT + movq %r0, 0(%r2) + brb 9f +1: + movl %r1, %r0 # might have been a struct + #brb 9f + +9: + ret + +/* + * ffi_closure_elfbsd(void); + * invoked with %r0: ffi_closure *closure + */ + .globl ffi_closure_elfbsd + .type ffi_closure_elfbsd, @function + .align 2 +ffi_closure_elfbsd: + .word 0 + + # Allocate room on stack for return value + subl2 $8, %sp + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushal 4(%sp) # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + # Copy return value if necessary + bitb $1, %r0 # CIF_FLAGS_CHAR + beql 1f + movb 0(%sp), %r0 + brb 9f +1: + bitb $2, %r0 # CIF_FLAGS_SHORT + beql 1f + movw 0(%sp), %r0 + brb 9f +1: + bitb $4, %r0 # CIF_FLAGS_INT + beql 1f + movl 0(%sp), %r0 + brb 9f +1: + bitb $8, %r0 # CIF_FLAGS_DINT + beql 1f + movq 0(%sp), %r0 + #brb 9f +1: + +9: + ret + +/* + * ffi_closure_struct_elfbsd(void); + * invoked with %r0: ffi_closure *closure + * %r1: struct return address + */ + .globl ffi_closure_struct_elfbsd + .type ffi_closure_struct_elfbsd, @function + .align 2 +ffi_closure_struct_elfbsd: + .word 0 + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushl %r1 # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + ret diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S new file mode 100644 index 0000000..01ca313 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * void * %r0 + * ffi_call_elfbsd(extended_cif *ecif, 4(%ap) + * unsigned bytes, 8(%ap) + * unsigned flags, 12(%ap) + * void *rvalue, 16(%ap) + * void (*fn)()); 20(%ap) + */ + .globl ffi_call_elfbsd + .type ffi_call_elfbsd,@function + .align 2 +ffi_call_elfbsd: + .word 0x00c # save R2 and R3 + + # Allocate stack space for the args + subl2 8(%ap), %sp + + # Call ffi_prep_args + pushl %sp + pushl 4(%ap) + calls $2, ffi_prep_args + + # Get function pointer + movl 20(%ap), %r1 + + # Build a CALLS frame + ashl $-2, 8(%ap), %r0 + pushl %r0 # argument stack usage + movl %sp, %r0 # future %ap + # saved registers + bbc $11, 0(%r1), 1f + pushl %r11 +1: bbc $10, 0(%r1), 1f + pushl %r10 +1: bbc $9, 0(%r1), 1f + pushl %r9 +1: bbc $8, 0(%r1), 1f + pushl %r8 +1: bbc $7, 0(%r1), 1f + pushl %r7 +1: bbc $6, 0(%r1), 1f + pushl %r6 +1: bbc $5, 0(%r1), 1f + pushl %r5 +1: bbc $4, 0(%r1), 1f + pushl %r4 +1: bbc $3, 0(%r1), 1f + pushl %r3 +1: bbc $2, 0(%r1), 1f + pushl %r2 +1: + pushal 9f + pushl %fp + pushl %ap + movl 16(%ap), %r3 # struct return address, if needed + movl %r0, %ap + movzwl 4(%fp), %r0 # previous PSW, without the saved registers mask + bisl2 $0x20000000, %r0 # calls frame + movzwl 0(%r1), %r2 + bicw2 $0xf003, %r2 # only keep R11-R2 + ashl $16, %r2, %r2 + bisl2 %r2, %r0 # saved register mask of the called function + pushl %r0 + pushl $0 + movl %sp, %fp + + # Invoke the function + pushal 2(%r1) # skip procedure entry mask + movl %r3, %r1 + bicpsw $0x000f + rsb + +9: + # Copy return value if necessary + tstl 16(%ap) + jeql 9f + movl 16(%ap), %r2 + + bbc $0, 12(%ap), 1f # CIF_FLAGS_CHAR + movb %r0, 0(%r2) + brb 9f +1: + bbc $1, 12(%ap), 1f # CIF_FLAGS_SHORT + movw %r0, 0(%r2) + brb 9f +1: + bbc $2, 12(%ap), 1f # CIF_FLAGS_INT + movl %r0, 0(%r2) + brb 9f +1: + bbc $3, 12(%ap), 1f # CIF_FLAGS_DINT + movq %r0, 0(%r2) + brb 9f +1: + movl %r1, %r0 # might have been a struct + #brb 9f + +9: + ret + +/* + * ffi_closure_elfbsd(void); + * invoked with %r0: ffi_closure *closure + */ + .globl ffi_closure_elfbsd + .type ffi_closure_elfbsd, @function + .align 2 +ffi_closure_elfbsd: + .word 0 + + # Allocate room on stack for return value + subl2 $8, %sp + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushal 4(%sp) # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + # Copy return value if necessary + bitb $1, %r0 # CIF_FLAGS_CHAR + beql 1f + movb 0(%sp), %r0 + brb 9f +1: + bitb $2, %r0 # CIF_FLAGS_SHORT + beql 1f + movw 0(%sp), %r0 + brb 9f +1: + bitb $4, %r0 # CIF_FLAGS_INT + beql 1f + movl 0(%sp), %r0 + brb 9f +1: + bitb $8, %r0 # CIF_FLAGS_DINT + beql 1f + movq 0(%sp), %r0 + #brb 9f +1: + +9: + ret + +/* + * ffi_closure_struct_elfbsd(void); + * invoked with %r0: ffi_closure *closure + * %r1: struct return address + */ + .globl ffi_closure_struct_elfbsd + .type ffi_closure_struct_elfbsd, @function + .align 2 +ffi_closure_struct_elfbsd: + .word 0 + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushl %r1 # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + ret diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi 2.c new file mode 100644 index 0000000..e52caec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi 2.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + */ + +#include +#include + +#include +#include + +#define CIF_FLAGS_CHAR 1 /* for struct only */ +#define CIF_FLAGS_SHORT 2 /* for struct only */ +#define CIF_FLAGS_INT 4 +#define CIF_FLAGS_DINT 8 + +/* + * Foreign Function Interface API + */ + +void ffi_call_elfbsd (extended_cif *, unsigned, unsigned, void *, + void (*) ()); +void *ffi_prep_args (extended_cif *ecif, void *stack); + +void * +ffi_prep_args (extended_cif *ecif, void *stack) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + if (cif->rtype->size == sizeof (char)) + cif->flags = CIF_FLAGS_CHAR; + else if (cif->rtype->size == sizeof (short)) + cif->flags = CIF_FLAGS_SHORT; + else if (cif->rtype->size == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else if (cif->rtype->size == 2 * sizeof (int)) + cif->flags = CIF_FLAGS_DINT; + else + cif->flags = 0; + break; + + default: + if (cif->rtype->size <= sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = CIF_FLAGS_DINT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->flags == 0) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ELFBSD: + ffi_call_elfbsd (&ecif, cif->bytes, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +void ffi_closure_elfbsd (void); +void ffi_closure_struct_elfbsd (void); +unsigned int ffi_closure_elfbsd_inner (ffi_closure *, void *, char *); + +static void +ffi_prep_closure_elfbsd (ffi_cif *cif, void **avalue, char *stackp) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + *p_argv = stackp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + stackp += z; + } +} + +unsigned int +ffi_closure_elfbsd_inner (ffi_closure *closure, void *resp, char *stack) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_elfbsd (cif, arg_area, stack); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + char *tramp = (char *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_ELFBSD); + + /* entry mask */ + *(unsigned short *)(tramp + 0) = 0x0000; + /* movl #closure, r0 */ + tramp[2] = 0xd0; + tramp[3] = 0x8f; + *(unsigned int *)(tramp + 4) = (unsigned int) closure; + tramp[8] = 0x50; + + if (cif->rtype->type == FFI_TYPE_STRUCT + && !cif->flags) + fn = &ffi_closure_struct_elfbsd; + else + fn = &ffi_closure_elfbsd; + + /* jmpl #fn */ + tramp[9] = 0x17; + tramp[10] = 0xef; + *(unsigned int *)(tramp + 11) = (unsigned int)fn + 2 - + (unsigned int)tramp - 9 - 6; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c new file mode 100644 index 0000000..e52caec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + */ + +#include +#include + +#include +#include + +#define CIF_FLAGS_CHAR 1 /* for struct only */ +#define CIF_FLAGS_SHORT 2 /* for struct only */ +#define CIF_FLAGS_INT 4 +#define CIF_FLAGS_DINT 8 + +/* + * Foreign Function Interface API + */ + +void ffi_call_elfbsd (extended_cif *, unsigned, unsigned, void *, + void (*) ()); +void *ffi_prep_args (extended_cif *ecif, void *stack); + +void * +ffi_prep_args (extended_cif *ecif, void *stack) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + if (cif->rtype->size == sizeof (char)) + cif->flags = CIF_FLAGS_CHAR; + else if (cif->rtype->size == sizeof (short)) + cif->flags = CIF_FLAGS_SHORT; + else if (cif->rtype->size == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else if (cif->rtype->size == 2 * sizeof (int)) + cif->flags = CIF_FLAGS_DINT; + else + cif->flags = 0; + break; + + default: + if (cif->rtype->size <= sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = CIF_FLAGS_DINT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->flags == 0) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ELFBSD: + ffi_call_elfbsd (&ecif, cif->bytes, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +void ffi_closure_elfbsd (void); +void ffi_closure_struct_elfbsd (void); +unsigned int ffi_closure_elfbsd_inner (ffi_closure *, void *, char *); + +static void +ffi_prep_closure_elfbsd (ffi_cif *cif, void **avalue, char *stackp) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + *p_argv = stackp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + stackp += z; + } +} + +unsigned int +ffi_closure_elfbsd_inner (ffi_closure *closure, void *resp, char *stack) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_elfbsd (cif, arg_area, stack); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + char *tramp = (char *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_ELFBSD); + + /* entry mask */ + *(unsigned short *)(tramp + 0) = 0x0000; + /* movl #closure, r0 */ + tramp[2] = 0xd0; + tramp[3] = 0x8f; + *(unsigned int *)(tramp + 4) = (unsigned int) closure; + tramp[8] = 0x50; + + if (cif->rtype->type == FFI_TYPE_STRUCT + && !cif->flags) + fn = &ffi_closure_struct_elfbsd; + else + fn = &ffi_closure_elfbsd; + + /* jmpl #fn */ + tramp[9] = 0x17; + tramp[10] = 0xef; + *(unsigned int *)(tramp + 11) = (unsigned int)fn + 2 - + (unsigned int)tramp - 9 - 6; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget 2.h new file mode 100644 index 0000000..2fc9488 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget 2.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_ELFBSD, + FFI_DEFAULT_ABI = FFI_ELFBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 15 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h new file mode 100644 index 0000000..2fc9488 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_ELFBSD, + FFI_DEFAULT_ABI = FFI_ELFBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 15 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames 2.h new file mode 100644 index 0000000..7551021 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames 2.h @@ -0,0 +1,30 @@ +#ifndef ASMNAMES_H +#define ASMNAMES_H + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef __APPLE__ +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#if defined(__ELF__) && defined(__PIC__) +# define PLT(X) X@PLT +#else +# define PLT(X) X +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +#endif /* ASMNAMES_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h new file mode 100644 index 0000000..7551021 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h @@ -0,0 +1,30 @@ +#ifndef ASMNAMES_H +#define ASMNAMES_H + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef __APPLE__ +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#if defined(__ELF__) && defined(__PIC__) +# define PLT(X) X@PLT +#else +# define PLT(X) X +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +#endif /* ASMNAMES_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi 2.c new file mode 100644 index 0000000..346e784 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi 2.c @@ -0,0 +1,764 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2017 Anthony Green + Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__i386__) || defined(_M_IX86) +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 80-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#if defined(__GNUC__) && !defined(__declspec) +# define __declspec(x) __attribute__((x)) +#endif + +#if defined(_MSC_VER) && defined(_M_IX86) +/* Stack is not 16-byte aligned on Windows. */ +#define STACK_ALIGN(bytes) (bytes) +#else +#define STACK_ALIGN(bytes) FFI_ALIGN (bytes, 16) +#endif + +/* Perform machine dependent cif processing. */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int i, n, flags, cabi = cif->abi; + + switch (cabi) + { + case FFI_SYSV: + case FFI_STDCALL: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + case FFI_PASCAL: + case FFI_REGISTER: + break; + default: + return FFI_BAD_ABI; + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = X86_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = X86_RET_FLOAT; + break; + case FFI_TYPE_DOUBLE: + flags = X86_RET_DOUBLE; + break; + case FFI_TYPE_LONGDOUBLE: + flags = X86_RET_LDOUBLE; + break; + case FFI_TYPE_UINT8: + flags = X86_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = X86_RET_UINT16; + break; + case FFI_TYPE_SINT8: + flags = X86_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = X86_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = X86_RET_INT64; + break; + case FFI_TYPE_STRUCT: +#ifndef X86 + /* ??? This should be a different ABI rather than an ifdef. */ + if (cif->rtype->size == 1) + flags = X86_RET_STRUCT_1B; + else if (cif->rtype->size == 2) + flags = X86_RET_STRUCT_2B; + else if (cif->rtype->size == 4) + flags = X86_RET_INT32; + else if (cif->rtype->size == 8) + flags = X86_RET_INT64; + else +#endif + { + do_struct: + switch (cabi) + { + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_STDCALL: + case FFI_MS_CDECL: + flags = X86_RET_STRUCTARG; + break; + default: + flags = X86_RET_STRUCTPOP; + break; + } + /* Allocate space for return value pointer. */ + bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); + } + break; + case FFI_TYPE_COMPLEX: + switch (cif->rtype->elements[0]->type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + goto do_struct; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = X86_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = X86_RET_STRUCT_2B; + break; + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + cif->flags = flags; + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *t = cif->arg_types[i]; + + bytes = FFI_ALIGN (bytes, t->alignment); + bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); + } + cif->bytes = bytes; + + return FFI_OK; +} + +static ffi_arg +extend_basic_type(void *arg, int type) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)arg; + case FFI_TYPE_UINT8: + return *(UINT8 *)arg; + case FFI_TYPE_SINT16: + return *(SINT16 *)arg; + case FFI_TYPE_UINT16: + return *(UINT16 *)arg; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + return *(UINT32 *)arg; + + default: + abort(); + } +} + +struct call_frame +{ + void *ebp; /* 0 */ + void *retaddr; /* 4 */ + void (*fn)(void); /* 8 */ + int flags; /* 12 */ + void *rvalue; /* 16 */ + unsigned regs[3]; /* 20-28 */ +}; + +struct abi_params +{ + int dir; /* parameter growth direction */ + int static_chain; /* the static chain register used by gcc */ + int nregs; /* number of register parameters */ + int regs[3]; +}; + +static const struct abi_params abi_params[FFI_LAST_ABI] = { + [FFI_SYSV] = { 1, R_ECX, 0 }, + [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } }, + [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } }, + [FFI_STDCALL] = { 1, R_ECX, 0 }, + [FFI_PASCAL] = { -1, R_ECX, 0 }, + /* ??? No defined static chain; gcc does not support REGISTER. */ + [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } }, + [FFI_MS_CDECL] = { 1, R_ECX, 0 } +}; + +#ifdef HAVE_FASTCALL + #ifdef _MSC_VER + #define FFI_DECLARE_FASTCALL __fastcall + #else + #define FFI_DECLARE_FASTCALL __declspec(fastcall) + #endif +#else + #define FFI_DECLARE_FASTCALL +#endif + +extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, dir, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + dir = pabi->dir; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + stack = alloca(bytes + sizeof(*frame) + rsize); + argp = (dir < 0 ? stack + bytes : stack); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + frame->regs[pabi->static_chain] = (unsigned)closure; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + ffi_arg val = extend_basic_type (valp, t); + + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + frame->regs[pabi->regs[narg_reg++]] = val; + else if (dir < 0) + { + argp -= 4; + *(ffi_arg *)argp = val; + } + else + { + *(ffi_arg *)argp = val; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + /* Alignment rules for arguments are quite complex. Vectors and + structures with 16 byte alignment get it. Note that long double + on Darwin does have 16 byte alignment, and does not get this + alignment if passed directly; a structure with a long double + inside, however, would get 16 byte alignment. Since libffi does + not support vectors, we need non concern ourselves with other + cases. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + memcpy (argp, valp, z); + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + memcpy (argp, valp, z); + argp += za; + } + } + } + FFI_ASSERT (dir > 0 || argp == stack); + + ffi_call_i386 (frame, stack); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +/** private members **/ + +void FFI_HIDDEN ffi_closure_i386(void); +void FFI_HIDDEN ffi_closure_STDCALL(void); +void FFI_HIDDEN ffi_closure_REGISTER(void); + +struct closure_frame +{ + unsigned rettemp[4]; /* 0 */ + unsigned regs[3]; /* 16-24 */ + ffi_cif *cif; /* 28 */ + void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ + void *user_data; /* 36 */ +}; + +int FFI_HIDDEN FFI_DECLARE_FASTCALL +ffi_closure_inner (struct closure_frame *frame, char *stack) +{ + ffi_cif *cif = frame->cif; + int cabi, i, n, flags, dir, narg_reg; + const struct abi_params *pabi; + ffi_type **arg_types; + char *argp; + void *rvalue; + void **avalue; + + cabi = cif->abi; + flags = cif->flags; + narg_reg = 0; + rvalue = frame->rettemp; + pabi = &abi_params[cabi]; + dir = pabi->dir; + argp = (dir < 0 ? stack + STACK_ALIGN (cif->bytes) : stack); + + switch (flags) + { + case X86_RET_STRUCTARG: + if (pabi->nregs > 0) + { + rvalue = (void *)frame->regs[pabi->regs[0]]; + narg_reg = 1; + frame->rettemp[0] = (unsigned)rvalue; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + rvalue = *(void **)argp; + argp += sizeof(void *); + frame->rettemp[0] = (unsigned)rvalue; + break; + } + + n = cif->nargs; + avalue = alloca(sizeof(void *) * n); + + arg_types = cif->arg_types; + for (i = 0; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + void *valp; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + valp = &frame->regs[pabi->regs[narg_reg++]]; + else if (dir < 0) + { + argp -= 4; + valp = argp; + } + else + { + valp = argp; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* See the comment in ffi_call_int. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + valp = argp; + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + valp = argp; + argp += za; + } + } + + avalue[i] = valp; + } + + frame->fun (cif, rvalue, avalue, frame->user_data); + + if (cabi == FFI_STDCALL) + return flags + (cif->bytes << X86_RET_POP_SHIFT); + else + return flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int op = 0xb8; /* movl imm, %eax */ + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + dest = ffi_closure_i386; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_closure_STDCALL; + break; + case FFI_REGISTER: + dest = ffi_closure_REGISTER; + op = 0x68; /* pushl imm */ + break; + default: + return FFI_BAD_ABI; + } + + /* endbr32. */ + *(UINT32 *) tramp = 0xfb1e0ff3; + + /* movl or pushl immediate. */ + tramp[4] = op; + *(void **)(tramp + 5) = codeloc; + + /* jmp dest */ + tramp[9] = 0xe9; + *(unsigned *)(tramp + 10) = (unsigned)dest - ((unsigned)codeloc + 14); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void FFI_HIDDEN ffi_go_closure_EAX(void); +void FFI_HIDDEN ffi_go_closure_ECX(void); +void FFI_HIDDEN ffi_go_closure_STDCALL(void); + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*dest)(void); + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_MS_CDECL: + dest = ffi_go_closure_ECX; + break; + case FFI_THISCALL: + case FFI_FASTCALL: + dest = ffi_go_closure_EAX; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_go_closure_STDCALL; + break; + case FFI_REGISTER: + default: + return FFI_BAD_ABI; + } + + closure->tramp = dest; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + +void FFI_HIDDEN ffi_closure_raw_SYSV(void); +void FFI_HIDDEN ffi_closure_raw_THISCALL(void); + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int i; + + /* We currently don't support certain kinds of arguments for raw + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ + for (i = cif->nargs-1; i >= 0; i--) + switch (cif->arg_types[i]->type) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + return FFI_BAD_TYPEDEF; + } + + switch (cif->abi) + { + case FFI_THISCALL: + dest = ffi_closure_raw_THISCALL; + break; + case FFI_SYSV: + dest = ffi_closure_raw_SYSV; + break; + default: + return FFI_BAD_ABI; + } + + /* movl imm, %eax. */ + tramp[0] = 0xb8; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void +ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + argp = stack = + (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + bytes -= sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) + { + ffi_arg val = extend_basic_type (avalue, t); + frame->regs[pabi->regs[narg_reg++]] = val; + z = FFI_SIZEOF_ARG; + } + else + { + memcpy (argp, avalue, z); + z = FFI_ALIGN (z, FFI_SIZEOF_ARG); + argp += z; + } + avalue += z; + bytes -= z; + } + if (i < n) + memcpy (argp, avalue, bytes); + + ffi_call_i386 (frame, stack); +} +#endif /* !FFI_NO_RAW_API */ +#endif /* __i386__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c new file mode 100644 index 0000000..346e784 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c @@ -0,0 +1,764 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2017 Anthony Green + Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__i386__) || defined(_M_IX86) +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 80-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#if defined(__GNUC__) && !defined(__declspec) +# define __declspec(x) __attribute__((x)) +#endif + +#if defined(_MSC_VER) && defined(_M_IX86) +/* Stack is not 16-byte aligned on Windows. */ +#define STACK_ALIGN(bytes) (bytes) +#else +#define STACK_ALIGN(bytes) FFI_ALIGN (bytes, 16) +#endif + +/* Perform machine dependent cif processing. */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int i, n, flags, cabi = cif->abi; + + switch (cabi) + { + case FFI_SYSV: + case FFI_STDCALL: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + case FFI_PASCAL: + case FFI_REGISTER: + break; + default: + return FFI_BAD_ABI; + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = X86_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = X86_RET_FLOAT; + break; + case FFI_TYPE_DOUBLE: + flags = X86_RET_DOUBLE; + break; + case FFI_TYPE_LONGDOUBLE: + flags = X86_RET_LDOUBLE; + break; + case FFI_TYPE_UINT8: + flags = X86_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = X86_RET_UINT16; + break; + case FFI_TYPE_SINT8: + flags = X86_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = X86_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = X86_RET_INT64; + break; + case FFI_TYPE_STRUCT: +#ifndef X86 + /* ??? This should be a different ABI rather than an ifdef. */ + if (cif->rtype->size == 1) + flags = X86_RET_STRUCT_1B; + else if (cif->rtype->size == 2) + flags = X86_RET_STRUCT_2B; + else if (cif->rtype->size == 4) + flags = X86_RET_INT32; + else if (cif->rtype->size == 8) + flags = X86_RET_INT64; + else +#endif + { + do_struct: + switch (cabi) + { + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_STDCALL: + case FFI_MS_CDECL: + flags = X86_RET_STRUCTARG; + break; + default: + flags = X86_RET_STRUCTPOP; + break; + } + /* Allocate space for return value pointer. */ + bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); + } + break; + case FFI_TYPE_COMPLEX: + switch (cif->rtype->elements[0]->type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + goto do_struct; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = X86_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = X86_RET_STRUCT_2B; + break; + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + cif->flags = flags; + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *t = cif->arg_types[i]; + + bytes = FFI_ALIGN (bytes, t->alignment); + bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); + } + cif->bytes = bytes; + + return FFI_OK; +} + +static ffi_arg +extend_basic_type(void *arg, int type) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)arg; + case FFI_TYPE_UINT8: + return *(UINT8 *)arg; + case FFI_TYPE_SINT16: + return *(SINT16 *)arg; + case FFI_TYPE_UINT16: + return *(UINT16 *)arg; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + return *(UINT32 *)arg; + + default: + abort(); + } +} + +struct call_frame +{ + void *ebp; /* 0 */ + void *retaddr; /* 4 */ + void (*fn)(void); /* 8 */ + int flags; /* 12 */ + void *rvalue; /* 16 */ + unsigned regs[3]; /* 20-28 */ +}; + +struct abi_params +{ + int dir; /* parameter growth direction */ + int static_chain; /* the static chain register used by gcc */ + int nregs; /* number of register parameters */ + int regs[3]; +}; + +static const struct abi_params abi_params[FFI_LAST_ABI] = { + [FFI_SYSV] = { 1, R_ECX, 0 }, + [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } }, + [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } }, + [FFI_STDCALL] = { 1, R_ECX, 0 }, + [FFI_PASCAL] = { -1, R_ECX, 0 }, + /* ??? No defined static chain; gcc does not support REGISTER. */ + [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } }, + [FFI_MS_CDECL] = { 1, R_ECX, 0 } +}; + +#ifdef HAVE_FASTCALL + #ifdef _MSC_VER + #define FFI_DECLARE_FASTCALL __fastcall + #else + #define FFI_DECLARE_FASTCALL __declspec(fastcall) + #endif +#else + #define FFI_DECLARE_FASTCALL +#endif + +extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, dir, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + dir = pabi->dir; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + stack = alloca(bytes + sizeof(*frame) + rsize); + argp = (dir < 0 ? stack + bytes : stack); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + frame->regs[pabi->static_chain] = (unsigned)closure; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + ffi_arg val = extend_basic_type (valp, t); + + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + frame->regs[pabi->regs[narg_reg++]] = val; + else if (dir < 0) + { + argp -= 4; + *(ffi_arg *)argp = val; + } + else + { + *(ffi_arg *)argp = val; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + /* Alignment rules for arguments are quite complex. Vectors and + structures with 16 byte alignment get it. Note that long double + on Darwin does have 16 byte alignment, and does not get this + alignment if passed directly; a structure with a long double + inside, however, would get 16 byte alignment. Since libffi does + not support vectors, we need non concern ourselves with other + cases. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + memcpy (argp, valp, z); + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + memcpy (argp, valp, z); + argp += za; + } + } + } + FFI_ASSERT (dir > 0 || argp == stack); + + ffi_call_i386 (frame, stack); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +/** private members **/ + +void FFI_HIDDEN ffi_closure_i386(void); +void FFI_HIDDEN ffi_closure_STDCALL(void); +void FFI_HIDDEN ffi_closure_REGISTER(void); + +struct closure_frame +{ + unsigned rettemp[4]; /* 0 */ + unsigned regs[3]; /* 16-24 */ + ffi_cif *cif; /* 28 */ + void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ + void *user_data; /* 36 */ +}; + +int FFI_HIDDEN FFI_DECLARE_FASTCALL +ffi_closure_inner (struct closure_frame *frame, char *stack) +{ + ffi_cif *cif = frame->cif; + int cabi, i, n, flags, dir, narg_reg; + const struct abi_params *pabi; + ffi_type **arg_types; + char *argp; + void *rvalue; + void **avalue; + + cabi = cif->abi; + flags = cif->flags; + narg_reg = 0; + rvalue = frame->rettemp; + pabi = &abi_params[cabi]; + dir = pabi->dir; + argp = (dir < 0 ? stack + STACK_ALIGN (cif->bytes) : stack); + + switch (flags) + { + case X86_RET_STRUCTARG: + if (pabi->nregs > 0) + { + rvalue = (void *)frame->regs[pabi->regs[0]]; + narg_reg = 1; + frame->rettemp[0] = (unsigned)rvalue; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + rvalue = *(void **)argp; + argp += sizeof(void *); + frame->rettemp[0] = (unsigned)rvalue; + break; + } + + n = cif->nargs; + avalue = alloca(sizeof(void *) * n); + + arg_types = cif->arg_types; + for (i = 0; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + void *valp; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + valp = &frame->regs[pabi->regs[narg_reg++]]; + else if (dir < 0) + { + argp -= 4; + valp = argp; + } + else + { + valp = argp; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* See the comment in ffi_call_int. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + valp = argp; + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + valp = argp; + argp += za; + } + } + + avalue[i] = valp; + } + + frame->fun (cif, rvalue, avalue, frame->user_data); + + if (cabi == FFI_STDCALL) + return flags + (cif->bytes << X86_RET_POP_SHIFT); + else + return flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int op = 0xb8; /* movl imm, %eax */ + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + dest = ffi_closure_i386; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_closure_STDCALL; + break; + case FFI_REGISTER: + dest = ffi_closure_REGISTER; + op = 0x68; /* pushl imm */ + break; + default: + return FFI_BAD_ABI; + } + + /* endbr32. */ + *(UINT32 *) tramp = 0xfb1e0ff3; + + /* movl or pushl immediate. */ + tramp[4] = op; + *(void **)(tramp + 5) = codeloc; + + /* jmp dest */ + tramp[9] = 0xe9; + *(unsigned *)(tramp + 10) = (unsigned)dest - ((unsigned)codeloc + 14); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void FFI_HIDDEN ffi_go_closure_EAX(void); +void FFI_HIDDEN ffi_go_closure_ECX(void); +void FFI_HIDDEN ffi_go_closure_STDCALL(void); + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*dest)(void); + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_MS_CDECL: + dest = ffi_go_closure_ECX; + break; + case FFI_THISCALL: + case FFI_FASTCALL: + dest = ffi_go_closure_EAX; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_go_closure_STDCALL; + break; + case FFI_REGISTER: + default: + return FFI_BAD_ABI; + } + + closure->tramp = dest; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + +void FFI_HIDDEN ffi_closure_raw_SYSV(void); +void FFI_HIDDEN ffi_closure_raw_THISCALL(void); + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int i; + + /* We currently don't support certain kinds of arguments for raw + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ + for (i = cif->nargs-1; i >= 0; i--) + switch (cif->arg_types[i]->type) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + return FFI_BAD_TYPEDEF; + } + + switch (cif->abi) + { + case FFI_THISCALL: + dest = ffi_closure_raw_THISCALL; + break; + case FFI_SYSV: + dest = ffi_closure_raw_SYSV; + break; + default: + return FFI_BAD_ABI; + } + + /* movl imm, %eax. */ + tramp[0] = 0xb8; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void +ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + argp = stack = + (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + bytes -= sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) + { + ffi_arg val = extend_basic_type (avalue, t); + frame->regs[pabi->regs[narg_reg++]] = val; + z = FFI_SIZEOF_ARG; + } + else + { + memcpy (argp, avalue, z); + z = FFI_ALIGN (z, FFI_SIZEOF_ARG); + argp += z; + } + avalue += z; + bytes -= z; + } + if (i < n) + memcpy (argp, avalue, bytes); + + ffi_call_i386 (frame, stack); +} +#endif /* !FFI_NO_RAW_API */ +#endif /* __i386__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64 2.c new file mode 100644 index 0000000..ed82e23 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64 2.c @@ -0,0 +1,888 @@ +/* ----------------------------------------------------------------------- + ffi64.c - Copyright (c) 2011, 2018 Anthony Green + Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include "internal64.h" + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) +#include "xmmintrin.h" +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; + union big_int_union sse[MAX_SSE_REGS]; + UINT64 rax; /* ssecount */ + UINT64 r10; /* static chain */ +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ +static size_t +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_integer: + { + size_t size = byte_offset + type->size; + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; +#endif + case FFI_TYPE_STRUCT: + { + const size_t UNITS_PER_WORD = 8; + size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + ffi_type **ptr; + unsigned int i; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + case FFI_TYPE_VOID: + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { + size_t num; + + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { + size_t pos = byte_offset / 8; + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (i > 1 && classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (i > 1 && classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } + case FFI_TYPE_COMPLEX: + { + ffi_type *inner = type->elements[0]; + switch (inner->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + goto do_integer; + + case FFI_TYPE_FLOAT: + classes[0] = X86_64_SSE_CLASS; + if (byte_offset % 8) + { + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = classes[1] = X86_64_SSEDF_CLASS; + return 2; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; +#endif + } + } + } + abort(); +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + +static size_t +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ + size_t n; + unsigned int i; + int ngpr, nsse; + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_cif_machdep_efi64(ffi_cif *cif); +#endif + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int gprcount, ssecount, i, avn, ngpr, nsse; + unsigned flags; + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t bytes, n, rtype_size; + ffi_type *rtype; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_cif_machdep_efi64(cif); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + gprcount = ssecount = 0; + + rtype = cif->rtype; + rtype_size = rtype->size; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = UNIX64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = UNIX64_RET_UINT8; + break; + case FFI_TYPE_SINT8: + flags = UNIX64_RET_SINT8; + break; + case FFI_TYPE_UINT16: + flags = UNIX64_RET_UINT16; + break; + case FFI_TYPE_SINT16: + flags = UNIX64_RET_SINT16; + break; + case FFI_TYPE_UINT32: + flags = UNIX64_RET_UINT32; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = UNIX64_RET_SINT32; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM32; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_XMM64; + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87; + break; +#endif + case FFI_TYPE_STRUCT: + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ + flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; + } + else + { + _Bool sse0 = SSE_CLASS_P (classes[0]); + + if (rtype_size == 4 && sse0) + flags = UNIX64_RET_XMM32; + else if (rtype_size == 8) + flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64; + else + { + _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); + if (sse0 && sse1) + flags = UNIX64_RET_ST_XMM0_XMM1; + else if (sse0) + flags = UNIX64_RET_ST_XMM0_RAX; + else if (sse1) + flags = UNIX64_RET_ST_RAX_XMM0; + else + flags = UNIX64_RET_ST_RAX_RDX; + flags |= rtype_size << UNIX64_SIZE_SHIFT; + } + } + break; + case FFI_TYPE_COMPLEX: + switch (rtype->elements[0]->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_ST_RAX_RDX | ((unsigned) rtype_size << UNIX64_SIZE_SHIFT); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT); + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87_2; + break; +#endif + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + + bytes = FFI_ALIGN (bytes, align); + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) + flags |= UNIX64_FLAG_XMM_ARGS; + + cif->flags = flags; + cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; + int gprcount, ssecount, ngpr, nsse, i, avn, flags; + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value + address then we need to make one. Otherwise we can ignore it. */ + flags = cif->flags; + if (rvalue == NULL) + { + if (flags & UNIX64_FLAG_RET_IN_MEM) + rvalue = alloca (cif->rtype->size); + else + flags = UNIX64_RET_VOID; + } + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + + reg_args->r10 = (uintptr_t) closure; + + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ + if (flags & UNIX64_FLAG_RET_IN_MEM) + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { + size_t n, size = arg_types[i]->size; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; + unsigned int j; + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); + break; + case FFI_TYPE_SINT16: + reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); + break; + case FFI_TYPE_SINT32: + reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); + break; + default: + reg_args->gpr[gprcount] = 0; + memcpy (®_args->gpr[gprcount], a, size); + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: + memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); + break; + case X86_64_SSESF_CLASS: + memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); + break; + default: + abort(); + } + } + } + } + reg_args->rax = ssecount; + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + flags, rvalue, fn); +} + +#ifndef __ILP32__ +extern void +ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); +#endif + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_efi64(cif, fn, rvalue, avalue); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifndef __ILP32__ +extern void +ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); +#endif + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_go_efi64(cif, fn, rvalue, avalue, closure); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_unix64(void) FFI_HIDDEN; +extern void ffi_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_closure_loc_efi64(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[24] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + void (*dest)(void); + char *tramp = closure->tramp; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + if (cif->flags & UNIX64_FLAG_XMM_ARGS) + dest = ffi_closure_unix64_sse; + else + dest = ffi_closure_unix64; + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)dest; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_unix64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, + struct register_args *reg_args, + char *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; + int flags; + + avn = cif->nargs; + flags = cif->flags; + avalue = alloca(avn * sizeof(void *)); + gprcount = ssecount = 0; + + if (flags & UNIX64_FLAG_RET_IN_MEM) + { + /* On return, %rax will contain the address that was passed + by the caller in %rdi. */ + void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; + *(void **)rvalue = r; + rvalue = r; + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t n; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); + unsigned int j; + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell assembly how to perform return type promotions. */ + return flags; +} + +extern void ffi_go_closure_unix64(void) FFI_HIDDEN; +extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_go_closure_efi64(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)); +#endif + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_go_closure_efi64(closure, cif, fun); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS + ? ffi_go_closure_unix64_sse + : ffi_go_closure_unix64); + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c new file mode 100644 index 0000000..ed82e23 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c @@ -0,0 +1,888 @@ +/* ----------------------------------------------------------------------- + ffi64.c - Copyright (c) 2011, 2018 Anthony Green + Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include "internal64.h" + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) +#include "xmmintrin.h" +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; + union big_int_union sse[MAX_SSE_REGS]; + UINT64 rax; /* ssecount */ + UINT64 r10; /* static chain */ +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ +static size_t +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_integer: + { + size_t size = byte_offset + type->size; + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; +#endif + case FFI_TYPE_STRUCT: + { + const size_t UNITS_PER_WORD = 8; + size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + ffi_type **ptr; + unsigned int i; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + case FFI_TYPE_VOID: + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { + size_t num; + + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { + size_t pos = byte_offset / 8; + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (i > 1 && classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (i > 1 && classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } + case FFI_TYPE_COMPLEX: + { + ffi_type *inner = type->elements[0]; + switch (inner->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + goto do_integer; + + case FFI_TYPE_FLOAT: + classes[0] = X86_64_SSE_CLASS; + if (byte_offset % 8) + { + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = classes[1] = X86_64_SSEDF_CLASS; + return 2; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; +#endif + } + } + } + abort(); +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + +static size_t +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ + size_t n; + unsigned int i; + int ngpr, nsse; + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_cif_machdep_efi64(ffi_cif *cif); +#endif + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int gprcount, ssecount, i, avn, ngpr, nsse; + unsigned flags; + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t bytes, n, rtype_size; + ffi_type *rtype; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_cif_machdep_efi64(cif); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + gprcount = ssecount = 0; + + rtype = cif->rtype; + rtype_size = rtype->size; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = UNIX64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = UNIX64_RET_UINT8; + break; + case FFI_TYPE_SINT8: + flags = UNIX64_RET_SINT8; + break; + case FFI_TYPE_UINT16: + flags = UNIX64_RET_UINT16; + break; + case FFI_TYPE_SINT16: + flags = UNIX64_RET_SINT16; + break; + case FFI_TYPE_UINT32: + flags = UNIX64_RET_UINT32; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = UNIX64_RET_SINT32; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM32; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_XMM64; + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87; + break; +#endif + case FFI_TYPE_STRUCT: + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ + flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; + } + else + { + _Bool sse0 = SSE_CLASS_P (classes[0]); + + if (rtype_size == 4 && sse0) + flags = UNIX64_RET_XMM32; + else if (rtype_size == 8) + flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64; + else + { + _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); + if (sse0 && sse1) + flags = UNIX64_RET_ST_XMM0_XMM1; + else if (sse0) + flags = UNIX64_RET_ST_XMM0_RAX; + else if (sse1) + flags = UNIX64_RET_ST_RAX_XMM0; + else + flags = UNIX64_RET_ST_RAX_RDX; + flags |= rtype_size << UNIX64_SIZE_SHIFT; + } + } + break; + case FFI_TYPE_COMPLEX: + switch (rtype->elements[0]->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_ST_RAX_RDX | ((unsigned) rtype_size << UNIX64_SIZE_SHIFT); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT); + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87_2; + break; +#endif + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + + bytes = FFI_ALIGN (bytes, align); + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) + flags |= UNIX64_FLAG_XMM_ARGS; + + cif->flags = flags; + cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; + int gprcount, ssecount, ngpr, nsse, i, avn, flags; + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value + address then we need to make one. Otherwise we can ignore it. */ + flags = cif->flags; + if (rvalue == NULL) + { + if (flags & UNIX64_FLAG_RET_IN_MEM) + rvalue = alloca (cif->rtype->size); + else + flags = UNIX64_RET_VOID; + } + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + + reg_args->r10 = (uintptr_t) closure; + + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ + if (flags & UNIX64_FLAG_RET_IN_MEM) + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { + size_t n, size = arg_types[i]->size; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; + unsigned int j; + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); + break; + case FFI_TYPE_SINT16: + reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); + break; + case FFI_TYPE_SINT32: + reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); + break; + default: + reg_args->gpr[gprcount] = 0; + memcpy (®_args->gpr[gprcount], a, size); + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: + memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); + break; + case X86_64_SSESF_CLASS: + memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); + break; + default: + abort(); + } + } + } + } + reg_args->rax = ssecount; + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + flags, rvalue, fn); +} + +#ifndef __ILP32__ +extern void +ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); +#endif + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_efi64(cif, fn, rvalue, avalue); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifndef __ILP32__ +extern void +ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); +#endif + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_go_efi64(cif, fn, rvalue, avalue, closure); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_unix64(void) FFI_HIDDEN; +extern void ffi_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_closure_loc_efi64(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[24] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + void (*dest)(void); + char *tramp = closure->tramp; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + if (cif->flags & UNIX64_FLAG_XMM_ARGS) + dest = ffi_closure_unix64_sse; + else + dest = ffi_closure_unix64; + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)dest; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_unix64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, + struct register_args *reg_args, + char *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; + int flags; + + avn = cif->nargs; + flags = cif->flags; + avalue = alloca(avn * sizeof(void *)); + gprcount = ssecount = 0; + + if (flags & UNIX64_FLAG_RET_IN_MEM) + { + /* On return, %rax will contain the address that was passed + by the caller in %rdi. */ + void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; + *(void **)rvalue = r; + rvalue = r; + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t n; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); + unsigned int j; + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell assembly how to perform return type promotions. */ + return flags; +} + +extern void ffi_go_closure_unix64(void) FFI_HIDDEN; +extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_go_closure_efi64(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)); +#endif + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_go_closure_efi64(closure, cif, fun); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS + ? ffi_go_closure_unix64_sse + : ffi_go_closure_unix64); + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget 2.h new file mode 100644 index 0000000..a34f3e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget 2.h @@ -0,0 +1,160 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +/* 4 bytes of ENDBR64 + 7 bytes of LEA + 6 bytes of JMP + 7 bytes of NOP + + 8 bytes of pointer. */ +# define FFI_TRAMPOLINE_SIZE 32 +# define FFI_NATIVE_RAW_API 0 +#else +/* 4 bytes of ENDBR32 + 5 bytes of MOV + 5 bytes of JMP + 2 unused + bytes. */ +# define FFI_TRAMPOLINE_SIZE 16 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#if !defined(GENERATE_LIBFFI_MAP) && defined(__ASSEMBLER__) \ + && defined(__CET__) +# include +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h new file mode 100644 index 0000000..a34f3e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h @@ -0,0 +1,160 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +/* 4 bytes of ENDBR64 + 7 bytes of LEA + 6 bytes of JMP + 7 bytes of NOP + + 8 bytes of pointer. */ +# define FFI_TRAMPOLINE_SIZE 32 +# define FFI_NATIVE_RAW_API 0 +#else +/* 4 bytes of ENDBR32 + 5 bytes of MOV + 5 bytes of JMP + 2 unused + bytes. */ +# define FFI_TRAMPOLINE_SIZE 16 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#if !defined(GENERATE_LIBFFI_MAP) && defined(__ASSEMBLER__) \ + && defined(__CET__) +# include +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +#endif + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64 2.c new file mode 100644 index 0000000..034dffd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64 2.c @@ -0,0 +1,313 @@ +/* ----------------------------------------------------------------------- + ffiw64.c - Copyright (c) 2018 Anthony Green + Copyright (c) 2014 Red Hat, Inc. + + x86 win64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__x86_64__) || defined(_M_AMD64) +#include +#include +#include +#include + +#ifdef X86_WIN64 +#define EFI64(name) name +#else +#define EFI64(name) FFI_HIDDEN name##_efi64 +#endif + +struct win64_call_frame +{ + UINT64 rbp; /* 0 */ + UINT64 retaddr; /* 8 */ + UINT64 fn; /* 16 */ + UINT64 flags; /* 24 */ + UINT64 rvalue; /* 32 */ +}; + +extern void ffi_call_win64 (void *stack, struct win64_call_frame *, + void *closure) FFI_HIDDEN; + +ffi_status FFI_HIDDEN +EFI64(ffi_prep_cif_machdep)(ffi_cif *cif) +{ + int flags, n; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + flags = cif->rtype->type; + switch (flags) + { + default: + break; + case FFI_TYPE_LONGDOUBLE: + /* GCC returns long double values by reference, like a struct */ + if (cif->abi == FFI_GNUW64) + flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_COMPLEX: + flags = FFI_TYPE_STRUCT; + /* FALLTHRU */ + case FFI_TYPE_STRUCT: + switch (cif->rtype->size) + { + case 8: + flags = FFI_TYPE_UINT64; + break; + case 4: + flags = FFI_TYPE_SMALL_STRUCT_4B; + break; + case 2: + flags = FFI_TYPE_SMALL_STRUCT_2B; + break; + case 1: + flags = FFI_TYPE_SMALL_STRUCT_1B; + break; + } + break; + } + cif->flags = flags; + + /* Each argument either fits in a register, an 8 byte slot, or is + passed by reference with the pointer in the 8 byte slot. */ + n = cif->nargs; + n += (flags == FFI_TYPE_STRUCT); + if (n < 4) + n = 4; + cif->bytes = n * 8; + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + int i, j, n, flags; + UINT64 *stack; + size_t rsize; + struct win64_call_frame *frame; + + FFI_ASSERT(cif->abi == FFI_GNUW64 || cif->abi == FFI_WIN64); + + flags = cif->flags; + rsize = 0; + + /* If we have no return value for a structure, we need to create one. + Otherwise we can ignore the return type entirely. */ + if (rvalue == NULL) + { + if (flags == FFI_TYPE_STRUCT) + rsize = cif->rtype->size; + else + flags = FFI_TYPE_VOID; + } + + stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize); + frame = (struct win64_call_frame *)((char *)stack + cif->bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = (uintptr_t)fn; + frame->flags = flags; + frame->rvalue = (uintptr_t)rvalue; + + j = 0; + if (flags == FFI_TYPE_STRUCT) + { + stack[0] = (uintptr_t)rvalue; + j = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++j) + { + switch (cif->arg_types[i]->size) + { + case 8: + stack[j] = *(UINT64 *)avalue[i]; + break; + case 4: + stack[j] = *(UINT32 *)avalue[i]; + break; + case 2: + stack[j] = *(UINT16 *)avalue[i]; + break; + case 1: + stack[j] = *(UINT8 *)avalue[i]; + break; + default: + stack[j] = (uintptr_t)avalue[i]; + break; + } + } + + ffi_call_win64 (stack, frame, closure); +} + +void +EFI64(ffi_call)(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +EFI64(ffi_call_go)(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_win64(void) FFI_HIDDEN; +extern void ffi_go_closure_win64(void) FFI_HIDDEN; + +ffi_status +EFI64(ffi_prep_closure_loc)(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[FFI_TRAMPOLINE_SIZE - 8] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + char *tramp = closure->tramp; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)ffi_closure_win64; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +EFI64(ffi_prep_go_closure)(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + closure->tramp = ffi_go_closure_win64; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +struct win64_closure_frame +{ + UINT64 rvalue[2]; + UINT64 fargs[4]; + UINT64 retaddr; + UINT64 args[]; +}; + +/* Force the inner function to use the MS ABI. When compiling on win64 + this is a nop. When compiling on unix, this simplifies the assembly, + and places the burden of saving the extra call-saved registers on + the compiler. */ +int FFI_HIDDEN __attribute__((ms_abi)) +ffi_closure_win64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + struct win64_closure_frame *frame) +{ + void **avalue; + void *rvalue; + int i, n, nreg, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + rvalue = frame->rvalue; + nreg = 0; + + /* When returning a structure, the address is in the first argument. + We must also be prepared to return the same address in eax, so + install that address in the frame and pretend we return a pointer. */ + flags = cif->flags; + if (flags == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)frame->args[0]; + frame->rvalue[0] = frame->args[0]; + nreg = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++nreg) + { + size_t size = cif->arg_types[i]->size; + size_t type = cif->arg_types[i]->type; + void *a; + + if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT) + { + if (nreg < 4) + a = &frame->fargs[nreg]; + else + a = &frame->args[nreg]; + } + else if (size == 1 || size == 2 || size == 4 || size == 8) + a = &frame->args[nreg]; + else + a = (void *)(uintptr_t)frame->args[nreg]; + + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + return flags; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c new file mode 100644 index 0000000..034dffd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c @@ -0,0 +1,313 @@ +/* ----------------------------------------------------------------------- + ffiw64.c - Copyright (c) 2018 Anthony Green + Copyright (c) 2014 Red Hat, Inc. + + x86 win64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__x86_64__) || defined(_M_AMD64) +#include +#include +#include +#include + +#ifdef X86_WIN64 +#define EFI64(name) name +#else +#define EFI64(name) FFI_HIDDEN name##_efi64 +#endif + +struct win64_call_frame +{ + UINT64 rbp; /* 0 */ + UINT64 retaddr; /* 8 */ + UINT64 fn; /* 16 */ + UINT64 flags; /* 24 */ + UINT64 rvalue; /* 32 */ +}; + +extern void ffi_call_win64 (void *stack, struct win64_call_frame *, + void *closure) FFI_HIDDEN; + +ffi_status FFI_HIDDEN +EFI64(ffi_prep_cif_machdep)(ffi_cif *cif) +{ + int flags, n; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + flags = cif->rtype->type; + switch (flags) + { + default: + break; + case FFI_TYPE_LONGDOUBLE: + /* GCC returns long double values by reference, like a struct */ + if (cif->abi == FFI_GNUW64) + flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_COMPLEX: + flags = FFI_TYPE_STRUCT; + /* FALLTHRU */ + case FFI_TYPE_STRUCT: + switch (cif->rtype->size) + { + case 8: + flags = FFI_TYPE_UINT64; + break; + case 4: + flags = FFI_TYPE_SMALL_STRUCT_4B; + break; + case 2: + flags = FFI_TYPE_SMALL_STRUCT_2B; + break; + case 1: + flags = FFI_TYPE_SMALL_STRUCT_1B; + break; + } + break; + } + cif->flags = flags; + + /* Each argument either fits in a register, an 8 byte slot, or is + passed by reference with the pointer in the 8 byte slot. */ + n = cif->nargs; + n += (flags == FFI_TYPE_STRUCT); + if (n < 4) + n = 4; + cif->bytes = n * 8; + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + int i, j, n, flags; + UINT64 *stack; + size_t rsize; + struct win64_call_frame *frame; + + FFI_ASSERT(cif->abi == FFI_GNUW64 || cif->abi == FFI_WIN64); + + flags = cif->flags; + rsize = 0; + + /* If we have no return value for a structure, we need to create one. + Otherwise we can ignore the return type entirely. */ + if (rvalue == NULL) + { + if (flags == FFI_TYPE_STRUCT) + rsize = cif->rtype->size; + else + flags = FFI_TYPE_VOID; + } + + stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize); + frame = (struct win64_call_frame *)((char *)stack + cif->bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = (uintptr_t)fn; + frame->flags = flags; + frame->rvalue = (uintptr_t)rvalue; + + j = 0; + if (flags == FFI_TYPE_STRUCT) + { + stack[0] = (uintptr_t)rvalue; + j = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++j) + { + switch (cif->arg_types[i]->size) + { + case 8: + stack[j] = *(UINT64 *)avalue[i]; + break; + case 4: + stack[j] = *(UINT32 *)avalue[i]; + break; + case 2: + stack[j] = *(UINT16 *)avalue[i]; + break; + case 1: + stack[j] = *(UINT8 *)avalue[i]; + break; + default: + stack[j] = (uintptr_t)avalue[i]; + break; + } + } + + ffi_call_win64 (stack, frame, closure); +} + +void +EFI64(ffi_call)(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +EFI64(ffi_call_go)(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_win64(void) FFI_HIDDEN; +extern void ffi_go_closure_win64(void) FFI_HIDDEN; + +ffi_status +EFI64(ffi_prep_closure_loc)(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[FFI_TRAMPOLINE_SIZE - 8] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + char *tramp = closure->tramp; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)ffi_closure_win64; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +EFI64(ffi_prep_go_closure)(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + closure->tramp = ffi_go_closure_win64; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +struct win64_closure_frame +{ + UINT64 rvalue[2]; + UINT64 fargs[4]; + UINT64 retaddr; + UINT64 args[]; +}; + +/* Force the inner function to use the MS ABI. When compiling on win64 + this is a nop. When compiling on unix, this simplifies the assembly, + and places the burden of saving the extra call-saved registers on + the compiler. */ +int FFI_HIDDEN __attribute__((ms_abi)) +ffi_closure_win64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + struct win64_closure_frame *frame) +{ + void **avalue; + void *rvalue; + int i, n, nreg, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + rvalue = frame->rvalue; + nreg = 0; + + /* When returning a structure, the address is in the first argument. + We must also be prepared to return the same address in eax, so + install that address in the frame and pretend we return a pointer. */ + flags = cif->flags; + if (flags == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)frame->args[0]; + frame->rvalue[0] = frame->args[0]; + nreg = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++nreg) + { + size_t size = cif->arg_types[i]->size; + size_t type = cif->arg_types[i]->type; + void *a; + + if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT) + { + if (nreg < 4) + a = &frame->fargs[nreg]; + else + a = &frame->args[nreg]; + } + else if (size == 1 || size == 2 || size == 4 || size == 8) + a = &frame->args[nreg]; + else + a = (void *)(uintptr_t)frame->args[nreg]; + + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + return flags; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal 2.h new file mode 100644 index 0000000..09771ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal 2.h @@ -0,0 +1,29 @@ +#define X86_RET_FLOAT 0 +#define X86_RET_DOUBLE 1 +#define X86_RET_LDOUBLE 2 +#define X86_RET_SINT8 3 +#define X86_RET_SINT16 4 +#define X86_RET_UINT8 5 +#define X86_RET_UINT16 6 +#define X86_RET_INT64 7 +#define X86_RET_INT32 8 +#define X86_RET_VOID 9 +#define X86_RET_STRUCTPOP 10 +#define X86_RET_STRUCTARG 11 +#define X86_RET_STRUCT_1B 12 +#define X86_RET_STRUCT_2B 13 +#define X86_RET_UNUSED14 14 +#define X86_RET_UNUSED15 15 + +#define X86_RET_TYPE_MASK 15 +#define X86_RET_POP_SHIFT 4 + +#define R_EAX 0 +#define R_EDX 1 +#define R_ECX 2 + +#ifdef __PCC__ +# define HAVE_FASTCALL 0 +#else +# define HAVE_FASTCALL 1 +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h new file mode 100644 index 0000000..09771ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h @@ -0,0 +1,29 @@ +#define X86_RET_FLOAT 0 +#define X86_RET_DOUBLE 1 +#define X86_RET_LDOUBLE 2 +#define X86_RET_SINT8 3 +#define X86_RET_SINT16 4 +#define X86_RET_UINT8 5 +#define X86_RET_UINT16 6 +#define X86_RET_INT64 7 +#define X86_RET_INT32 8 +#define X86_RET_VOID 9 +#define X86_RET_STRUCTPOP 10 +#define X86_RET_STRUCTARG 11 +#define X86_RET_STRUCT_1B 12 +#define X86_RET_STRUCT_2B 13 +#define X86_RET_UNUSED14 14 +#define X86_RET_UNUSED15 15 + +#define X86_RET_TYPE_MASK 15 +#define X86_RET_POP_SHIFT 4 + +#define R_EAX 0 +#define R_EDX 1 +#define R_ECX 2 + +#ifdef __PCC__ +# define HAVE_FASTCALL 0 +#else +# define HAVE_FASTCALL 1 +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64 2.h new file mode 100644 index 0000000..512e955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64 2.h @@ -0,0 +1,22 @@ +#define UNIX64_RET_VOID 0 +#define UNIX64_RET_UINT8 1 +#define UNIX64_RET_UINT16 2 +#define UNIX64_RET_UINT32 3 +#define UNIX64_RET_SINT8 4 +#define UNIX64_RET_SINT16 5 +#define UNIX64_RET_SINT32 6 +#define UNIX64_RET_INT64 7 +#define UNIX64_RET_XMM32 8 +#define UNIX64_RET_XMM64 9 +#define UNIX64_RET_X87 10 +#define UNIX64_RET_X87_2 11 +#define UNIX64_RET_ST_XMM0_RAX 12 +#define UNIX64_RET_ST_RAX_XMM0 13 +#define UNIX64_RET_ST_XMM0_XMM1 14 +#define UNIX64_RET_ST_RAX_RDX 15 + +#define UNIX64_RET_LAST 15 + +#define UNIX64_FLAG_RET_IN_MEM (1 << 10) +#define UNIX64_FLAG_XMM_ARGS (1 << 11) +#define UNIX64_SIZE_SHIFT 12 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h new file mode 100644 index 0000000..512e955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h @@ -0,0 +1,22 @@ +#define UNIX64_RET_VOID 0 +#define UNIX64_RET_UINT8 1 +#define UNIX64_RET_UINT16 2 +#define UNIX64_RET_UINT32 3 +#define UNIX64_RET_SINT8 4 +#define UNIX64_RET_SINT16 5 +#define UNIX64_RET_SINT32 6 +#define UNIX64_RET_INT64 7 +#define UNIX64_RET_XMM32 8 +#define UNIX64_RET_XMM64 9 +#define UNIX64_RET_X87 10 +#define UNIX64_RET_X87_2 11 +#define UNIX64_RET_ST_XMM0_RAX 12 +#define UNIX64_RET_ST_RAX_XMM0 13 +#define UNIX64_RET_ST_XMM0_XMM1 14 +#define UNIX64_RET_ST_RAX_RDX 15 + +#define UNIX64_RET_LAST 15 + +#define UNIX64_FLAG_RET_IN_MEM (1 << 10) +#define UNIX64_FLAG_XMM_ARGS (1 << 11) +#define UNIX64_SIZE_SHIFT 12 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv 2.S new file mode 100644 index 0000000..6d56483 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv 2.S @@ -0,0 +1,1138 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __i386__ +#ifndef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef X86_DARWIN +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +/* Handle win32 fastcall name mangling. */ +#ifdef X86_WIN32 +# define ffi_call_i386 @ffi_call_i386@8 +# define ffi_closure_inner @ffi_closure_inner@8 +#else +# define ffi_call_i386 C(ffi_call_i386) +# define ffi_closure_inner C(ffi_closure_inner) +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + .balign 16 + .globl ffi_call_i386 + FFI_HIDDEN(ffi_call_i386) + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ffi_call_i386: +L(UW0): + # cfi_startproc + _CET_ENDBR +#if !HAVE_FASTCALL + movl 4(%esp), %ecx + movl 8(%esp), %edx +#endif + movl (%esp), %eax /* move the return address */ + movl %ebp, (%ecx) /* store %ebp into local frame */ + movl %eax, 4(%ecx) /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + movl %ecx, %ebp +L(UW1): + # cfi_def_cfa(%ebp, 8) + # cfi_rel_offset(%ebp, 0) + + movl %edx, %esp /* set outgoing argument stack */ + movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ + movl 20+R_EDX*4(%ebp), %edx + movl 20+R_ECX*4(%ebp), %ecx + + call *8(%ebp) + + movl 12(%ebp), %ecx /* load return type code */ + movl %ebx, 8(%ebp) /* preserve %ebx */ +L(UW2): + # cfi_rel_offset(%ebx, 8) + + andl $X86_RET_TYPE_MASK, %ecx +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc1): + leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx +#else + leal L(store_table)(,%ecx, 8), %ebx +#endif + movl 16(%ebp), %ecx /* load result address */ + _CET_NOTRACK jmp *%ebx + + .balign 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstps (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstpl (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstpt (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movswl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzwl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_INT64) + movl %edx, 4(%ecx) + /* fallthru */ +E(L(store_table), X86_RET_INT32) + movl %eax, (%ecx) + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + movl 8(%ebp), %ebx + movl %ebp, %esp + popl %ebp +L(UW3): + # cfi_remember_state + # cfi_def_cfa(%esp, 4) + # cfi_restore(%ebx) + # cfi_restore(%ebp) + ret +L(UW4): + # cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + movb %al, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + movw %ax, (%ecx) + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + ud2 +E(L(store_table), X86_RET_UNUSED15) + ud2 + +L(UW5): + # cfi_endproc +ENDF(ffi_call_i386) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +#define FFI_CLOSURE_SAVE_REGS \ + movl %eax, closure_CF+16+R_EAX*4(%esp); \ + movl %edx, closure_CF+16+R_EDX*4(%esp); \ + movl %ecx, closure_CF+16+R_ECX*4(%esp) + +#define FFI_CLOSURE_COPY_TRAMP_DATA \ + movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ + movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ + movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \ + movl %edx, closure_CF+28(%esp); \ + movl %ecx, closure_CF+32(%esp); \ + movl %eax, closure_CF+36(%esp) + +#if HAVE_FASTCALL +# define FFI_CLOSURE_PREP_CALL \ + movl %esp, %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ +#else +# define FFI_CLOSURE_PREP_CALL \ + leal closure_CF(%esp), %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ + movl %ecx, (%esp); \ + movl %edx, 4(%esp) +#endif + +#define FFI_CLOSURE_CALL_INNER(UWN) \ + call ffi_closure_inner + +#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))(, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx + +#ifdef __PIC__ +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + call C(__x86.get_pc_thunk.dx); \ +L(C1(pc,N)): \ + leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# else +# define FFI_CLOSURE_CALL_INNER_SAVE_EBX +# undef FFI_CLOSURE_CALL_INNER +# define FFI_CLOSURE_CALL_INNER(UWN) \ + movl %ebx, 40(%esp); /* save ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_rel_offset(%ebx, 40); */ \ + call C(__x86.get_pc_thunk.bx); /* load got register */ \ + addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \ + call ffi_closure_inner@PLT +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \ + movl 40(%esp), %ebx; /* restore ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_restore(%ebx); */ \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + + .balign 16 + .globl C(ffi_go_closure_EAX) + FFI_HIDDEN(C(ffi_go_closure_EAX)) +C(ffi_go_closure_EAX): +L(UW6): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW7): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%eax), %edx /* copy cif */ + movl 8(%eax), %ecx /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %ecx, closure_CF+32(%esp) + movl %eax, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + # cfi_endproc +ENDF(C(ffi_go_closure_EAX)) + + .balign 16 + .globl C(ffi_go_closure_ECX) + FFI_HIDDEN(C(ffi_go_closure_ECX)) +C(ffi_go_closure_ECX): +L(UW9): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW10): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + # cfi_endproc +ENDF(C(ffi_go_closure_ECX)) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + + .balign 16 + .globl C(ffi_closure_i386) + FFI_HIDDEN(C(ffi_closure_i386)) + +C(ffi_closure_i386): +L(UW12): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW13): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP(2, 15) + + .balign 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + flds closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fldl closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + addl $closure_FS, %esp +L(UW16): + # cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + addl $closure_FS, %esp +L(UW18): + # cfi_adjust_cfa_offset(-closure_FS) + ret $4 +L(UW19): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + ud2 +E(L(load_table2), X86_RET_UNUSED15) + ud2 + +L(UW20): + # cfi_endproc +ENDF(C(ffi_closure_i386)) + + .balign 16 + .globl C(ffi_go_closure_STDCALL) + FFI_HIDDEN(C(ffi_go_closure_STDCALL)) +C(ffi_go_closure_STDCALL): +L(UW21): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW22): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + # cfi_endproc +ENDF(C(ffi_go_closure_STDCALL)) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + + .balign 16 + .globl C(ffi_closure_REGISTER) + FFI_HIDDEN(C(ffi_closure_REGISTER)) +C(ffi_closure_REGISTER): +L(UW24): + # cfi_startproc + # cfi_def_cfa(%esp, 8) + # cfi_offset(%eip, -8) + _CET_ENDBR + subl $closure_FS-4, %esp +L(UW25): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl closure_FS-4(%esp), %ecx /* load retaddr */ + movl closure_FS(%esp), %eax /* load closure */ + movl %ecx, closure_FS(%esp) /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + # cfi_endproc +ENDF(C(ffi_closure_REGISTER)) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + + .balign 16 + .globl C(ffi_closure_STDCALL) + FFI_HIDDEN(C(ffi_closure_STDCALL)) +C(ffi_closure_STDCALL): +L(UW27): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW28): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER): + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + movl %eax, %ecx + shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */ + leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */ + movl closure_FS(%esp), %edx /* move return address */ + movl %edx, (%ecx) + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP(3, 30) + + .balign 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + flds closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_DOUBLE) + fldl closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT8) + movsbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT16) + movswl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT8) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT16) + movzwl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT32) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_VOID) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTPOP) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTARG) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzwl %ax, %eax + movl %ecx, %esp + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + ud2 +E(L(load_table3), X86_RET_UNUSED15) + ud2 + +L(UW31): + # cfi_endproc +ENDF(C(ffi_closure_STDCALL)) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + + .balign 16 + .globl C(ffi_closure_raw_SYSV) + FFI_HIDDEN(C(ffi_closure_raw_SYSV)) +C(ffi_closure_raw_SYSV): +L(UW32): + # cfi_startproc + _CET_ENDBR + subl $raw_closure_S_FS, %esp +L(UW33): + # cfi_def_cfa_offset(raw_closure_S_FS + 4) + movl %ebx, raw_closure_S_FS-4(%esp) +L(UW34): + # cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc4): + leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +#else + leal L(load_table4)(,%eax, 8), %ecx +#endif + movl raw_closure_S_FS-4(%esp), %ebx +L(UW35): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e4) +E(L(load_table4), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + addl $raw_closure_S_FS, %esp +L(UW36): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + addl $raw_closure_S_FS, %esp +L(UW38): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret $4 +L(UW39): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + ud2 +E(L(load_table4), X86_RET_UNUSED15) + ud2 + +L(UW40): + # cfi_endproc +ENDF(C(ffi_closure_raw_SYSV)) + +#define raw_closure_T_FS (16+16+8) + + .balign 16 + .globl C(ffi_closure_raw_THISCALL) + FFI_HIDDEN(C(ffi_closure_raw_THISCALL)) +C(ffi_closure_raw_THISCALL): +L(UW41): + # cfi_startproc + _CET_ENDBR + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + popl %edx +L(UW42): + # cfi_def_cfa_offset(0) + # cfi_register(%eip, %edx) + pushl %ecx +L(UW43): + # cfi_adjust_cfa_offset(4) + pushl %edx +L(UW44): + # cfi_adjust_cfa_offset(4) + # cfi_rel_offset(%eip, 0) + subl $raw_closure_T_FS, %esp +L(UW45): + # cfi_adjust_cfa_offset(raw_closure_T_FS) + movl %ebx, raw_closure_T_FS-4(%esp) +L(UW46): + # cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc5): + leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +#else + leal L(load_table5)(,%eax, 8), %ecx +#endif + movl raw_closure_T_FS-4(%esp), %ebx +L(UW47): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e5) +E(L(load_table5), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + addl $raw_closure_T_FS, %esp +L(UW48): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret $4 +L(UW49): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + addl $raw_closure_T_FS, %esp +L(UW50): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret $8 +L(UW51): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + ud2 +E(L(load_table5), X86_RET_UNUSED15) + ud2 + +L(UW52): + # cfi_endproc +ENDF(C(ffi_closure_raw_THISCALL)) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + .globl X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +#if defined(__PIC__) + COMDAT(C(__x86.get_pc_thunk.bx)) +C(__x86.get_pc_thunk.bx): + movl (%esp), %ebx + ret +ENDF(C(__x86.get_pc_thunk.bx)) +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + COMDAT(C(__x86.get_pc_thunk.dx)) +C(__x86.get_pc_thunk.dx): + movl (%esp), %edx + ret +ENDF(C(__x86.get_pc_thunk.dx)) +#endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + .globl @feat.00 +@feat.00 = 1 +#endif + +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_i386 */ + .long C(ffi_call_i386) + .set L1,L(UW5)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_EAX */ + .long C(ffi_go_closure_EAX) + .set L2,L(UW8)-L(UW6) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_ECX */ + .long C(ffi_go_closure_ECX) + .set L3,L(UW11)-L(UW9) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_i386 */ + .long C(ffi_closure_i386) + .set L4,L(UW20)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_STDCALL */ + .long C(ffi_go_closure_STDCALL) + .set L5,L(UW23)-L(UW21) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_REGISTER */ + .long C(ffi_closure_REGISTER) + .set L6,L(UW26)-L(UW24) + .long L6 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_STDCALL */ + .long C(ffi_closure_STDCALL) + .set L7,L(UW31)-L(UW27) + .long L7 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_SYSV */ + .long C(ffi_closure_raw_SYSV) + .set L8,L(UW40)-L(UW32) + .long L8 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_THISCALL */ + .long C(ffi_closure_raw_THISCALL) + .set L9,L(UW52)-L(UW41) + .long L9 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 +#endif /* __APPLE__ */ + +#endif /* ifndef _MSC_VER */ +#endif /* ifdef __i386__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S new file mode 100644 index 0000000..6d56483 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S @@ -0,0 +1,1138 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __i386__ +#ifndef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef X86_DARWIN +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +/* Handle win32 fastcall name mangling. */ +#ifdef X86_WIN32 +# define ffi_call_i386 @ffi_call_i386@8 +# define ffi_closure_inner @ffi_closure_inner@8 +#else +# define ffi_call_i386 C(ffi_call_i386) +# define ffi_closure_inner C(ffi_closure_inner) +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + .balign 16 + .globl ffi_call_i386 + FFI_HIDDEN(ffi_call_i386) + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ffi_call_i386: +L(UW0): + # cfi_startproc + _CET_ENDBR +#if !HAVE_FASTCALL + movl 4(%esp), %ecx + movl 8(%esp), %edx +#endif + movl (%esp), %eax /* move the return address */ + movl %ebp, (%ecx) /* store %ebp into local frame */ + movl %eax, 4(%ecx) /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + movl %ecx, %ebp +L(UW1): + # cfi_def_cfa(%ebp, 8) + # cfi_rel_offset(%ebp, 0) + + movl %edx, %esp /* set outgoing argument stack */ + movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ + movl 20+R_EDX*4(%ebp), %edx + movl 20+R_ECX*4(%ebp), %ecx + + call *8(%ebp) + + movl 12(%ebp), %ecx /* load return type code */ + movl %ebx, 8(%ebp) /* preserve %ebx */ +L(UW2): + # cfi_rel_offset(%ebx, 8) + + andl $X86_RET_TYPE_MASK, %ecx +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc1): + leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx +#else + leal L(store_table)(,%ecx, 8), %ebx +#endif + movl 16(%ebp), %ecx /* load result address */ + _CET_NOTRACK jmp *%ebx + + .balign 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstps (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstpl (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstpt (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movswl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzwl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_INT64) + movl %edx, 4(%ecx) + /* fallthru */ +E(L(store_table), X86_RET_INT32) + movl %eax, (%ecx) + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + movl 8(%ebp), %ebx + movl %ebp, %esp + popl %ebp +L(UW3): + # cfi_remember_state + # cfi_def_cfa(%esp, 4) + # cfi_restore(%ebx) + # cfi_restore(%ebp) + ret +L(UW4): + # cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + movb %al, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + movw %ax, (%ecx) + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + ud2 +E(L(store_table), X86_RET_UNUSED15) + ud2 + +L(UW5): + # cfi_endproc +ENDF(ffi_call_i386) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +#define FFI_CLOSURE_SAVE_REGS \ + movl %eax, closure_CF+16+R_EAX*4(%esp); \ + movl %edx, closure_CF+16+R_EDX*4(%esp); \ + movl %ecx, closure_CF+16+R_ECX*4(%esp) + +#define FFI_CLOSURE_COPY_TRAMP_DATA \ + movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ + movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ + movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \ + movl %edx, closure_CF+28(%esp); \ + movl %ecx, closure_CF+32(%esp); \ + movl %eax, closure_CF+36(%esp) + +#if HAVE_FASTCALL +# define FFI_CLOSURE_PREP_CALL \ + movl %esp, %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ +#else +# define FFI_CLOSURE_PREP_CALL \ + leal closure_CF(%esp), %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ + movl %ecx, (%esp); \ + movl %edx, 4(%esp) +#endif + +#define FFI_CLOSURE_CALL_INNER(UWN) \ + call ffi_closure_inner + +#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))(, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx + +#ifdef __PIC__ +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + call C(__x86.get_pc_thunk.dx); \ +L(C1(pc,N)): \ + leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# else +# define FFI_CLOSURE_CALL_INNER_SAVE_EBX +# undef FFI_CLOSURE_CALL_INNER +# define FFI_CLOSURE_CALL_INNER(UWN) \ + movl %ebx, 40(%esp); /* save ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_rel_offset(%ebx, 40); */ \ + call C(__x86.get_pc_thunk.bx); /* load got register */ \ + addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \ + call ffi_closure_inner@PLT +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \ + movl 40(%esp), %ebx; /* restore ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_restore(%ebx); */ \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + + .balign 16 + .globl C(ffi_go_closure_EAX) + FFI_HIDDEN(C(ffi_go_closure_EAX)) +C(ffi_go_closure_EAX): +L(UW6): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW7): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%eax), %edx /* copy cif */ + movl 8(%eax), %ecx /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %ecx, closure_CF+32(%esp) + movl %eax, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + # cfi_endproc +ENDF(C(ffi_go_closure_EAX)) + + .balign 16 + .globl C(ffi_go_closure_ECX) + FFI_HIDDEN(C(ffi_go_closure_ECX)) +C(ffi_go_closure_ECX): +L(UW9): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW10): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + # cfi_endproc +ENDF(C(ffi_go_closure_ECX)) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + + .balign 16 + .globl C(ffi_closure_i386) + FFI_HIDDEN(C(ffi_closure_i386)) + +C(ffi_closure_i386): +L(UW12): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW13): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP(2, 15) + + .balign 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + flds closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fldl closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + addl $closure_FS, %esp +L(UW16): + # cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + addl $closure_FS, %esp +L(UW18): + # cfi_adjust_cfa_offset(-closure_FS) + ret $4 +L(UW19): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + ud2 +E(L(load_table2), X86_RET_UNUSED15) + ud2 + +L(UW20): + # cfi_endproc +ENDF(C(ffi_closure_i386)) + + .balign 16 + .globl C(ffi_go_closure_STDCALL) + FFI_HIDDEN(C(ffi_go_closure_STDCALL)) +C(ffi_go_closure_STDCALL): +L(UW21): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW22): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + # cfi_endproc +ENDF(C(ffi_go_closure_STDCALL)) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + + .balign 16 + .globl C(ffi_closure_REGISTER) + FFI_HIDDEN(C(ffi_closure_REGISTER)) +C(ffi_closure_REGISTER): +L(UW24): + # cfi_startproc + # cfi_def_cfa(%esp, 8) + # cfi_offset(%eip, -8) + _CET_ENDBR + subl $closure_FS-4, %esp +L(UW25): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl closure_FS-4(%esp), %ecx /* load retaddr */ + movl closure_FS(%esp), %eax /* load closure */ + movl %ecx, closure_FS(%esp) /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + # cfi_endproc +ENDF(C(ffi_closure_REGISTER)) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + + .balign 16 + .globl C(ffi_closure_STDCALL) + FFI_HIDDEN(C(ffi_closure_STDCALL)) +C(ffi_closure_STDCALL): +L(UW27): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW28): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER): + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + movl %eax, %ecx + shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */ + leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */ + movl closure_FS(%esp), %edx /* move return address */ + movl %edx, (%ecx) + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP(3, 30) + + .balign 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + flds closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_DOUBLE) + fldl closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT8) + movsbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT16) + movswl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT8) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT16) + movzwl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT32) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_VOID) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTPOP) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTARG) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzwl %ax, %eax + movl %ecx, %esp + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + ud2 +E(L(load_table3), X86_RET_UNUSED15) + ud2 + +L(UW31): + # cfi_endproc +ENDF(C(ffi_closure_STDCALL)) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + + .balign 16 + .globl C(ffi_closure_raw_SYSV) + FFI_HIDDEN(C(ffi_closure_raw_SYSV)) +C(ffi_closure_raw_SYSV): +L(UW32): + # cfi_startproc + _CET_ENDBR + subl $raw_closure_S_FS, %esp +L(UW33): + # cfi_def_cfa_offset(raw_closure_S_FS + 4) + movl %ebx, raw_closure_S_FS-4(%esp) +L(UW34): + # cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc4): + leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +#else + leal L(load_table4)(,%eax, 8), %ecx +#endif + movl raw_closure_S_FS-4(%esp), %ebx +L(UW35): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e4) +E(L(load_table4), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + addl $raw_closure_S_FS, %esp +L(UW36): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + addl $raw_closure_S_FS, %esp +L(UW38): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret $4 +L(UW39): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + ud2 +E(L(load_table4), X86_RET_UNUSED15) + ud2 + +L(UW40): + # cfi_endproc +ENDF(C(ffi_closure_raw_SYSV)) + +#define raw_closure_T_FS (16+16+8) + + .balign 16 + .globl C(ffi_closure_raw_THISCALL) + FFI_HIDDEN(C(ffi_closure_raw_THISCALL)) +C(ffi_closure_raw_THISCALL): +L(UW41): + # cfi_startproc + _CET_ENDBR + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + popl %edx +L(UW42): + # cfi_def_cfa_offset(0) + # cfi_register(%eip, %edx) + pushl %ecx +L(UW43): + # cfi_adjust_cfa_offset(4) + pushl %edx +L(UW44): + # cfi_adjust_cfa_offset(4) + # cfi_rel_offset(%eip, 0) + subl $raw_closure_T_FS, %esp +L(UW45): + # cfi_adjust_cfa_offset(raw_closure_T_FS) + movl %ebx, raw_closure_T_FS-4(%esp) +L(UW46): + # cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc5): + leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +#else + leal L(load_table5)(,%eax, 8), %ecx +#endif + movl raw_closure_T_FS-4(%esp), %ebx +L(UW47): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e5) +E(L(load_table5), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + addl $raw_closure_T_FS, %esp +L(UW48): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret $4 +L(UW49): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + addl $raw_closure_T_FS, %esp +L(UW50): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret $8 +L(UW51): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + ud2 +E(L(load_table5), X86_RET_UNUSED15) + ud2 + +L(UW52): + # cfi_endproc +ENDF(C(ffi_closure_raw_THISCALL)) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + .globl X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +#if defined(__PIC__) + COMDAT(C(__x86.get_pc_thunk.bx)) +C(__x86.get_pc_thunk.bx): + movl (%esp), %ebx + ret +ENDF(C(__x86.get_pc_thunk.bx)) +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + COMDAT(C(__x86.get_pc_thunk.dx)) +C(__x86.get_pc_thunk.dx): + movl (%esp), %edx + ret +ENDF(C(__x86.get_pc_thunk.dx)) +#endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + .globl @feat.00 +@feat.00 = 1 +#endif + +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_i386 */ + .long C(ffi_call_i386) + .set L1,L(UW5)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_EAX */ + .long C(ffi_go_closure_EAX) + .set L2,L(UW8)-L(UW6) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_ECX */ + .long C(ffi_go_closure_ECX) + .set L3,L(UW11)-L(UW9) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_i386 */ + .long C(ffi_closure_i386) + .set L4,L(UW20)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_STDCALL */ + .long C(ffi_go_closure_STDCALL) + .set L5,L(UW23)-L(UW21) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_REGISTER */ + .long C(ffi_closure_REGISTER) + .set L6,L(UW26)-L(UW24) + .long L6 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_STDCALL */ + .long C(ffi_closure_STDCALL) + .set L7,L(UW31)-L(UW27) + .long L7 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_SYSV */ + .long C(ffi_closure_raw_SYSV) + .set L8,L(UW40)-L(UW32) + .long L8 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_THISCALL */ + .long C(ffi_closure_raw_THISCALL) + .set L9,L(UW52)-L(UW41) + .long L9 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 +#endif /* __APPLE__ */ + +#endif /* ifndef _MSC_VER */ +#endif /* ifdef __i386__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel 2.S new file mode 100644 index 0000000..3cafd71 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel 2.S @@ -0,0 +1,995 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#ifdef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#define L(X) C1(L, X) +# define ENDF(X) X ENDP + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .686P + .MODEL FLAT + +EXTRN @ffi_closure_inner@8:PROC +_TEXT SEGMENT + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ALIGN 16 +PUBLIC @ffi_call_i386@8 +@ffi_call_i386@8 PROC +L(UW0): + cfi_startproc + #if !HAVE_FASTCALL + mov ecx, [esp+4] + mov edx, [esp+8] + #endif + mov eax, [esp] /* move the return address */ + mov [ecx], ebp /* store ebp into local frame */ + mov [ecx+4], eax /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + mov ebp, ecx +L(UW1): + // cfi_def_cfa(%ebp, 8) + // cfi_rel_offset(%ebp, 0) + + mov esp, edx /* set outgoing argument stack */ + mov eax, [20+R_EAX*4+ebp] /* set register arguments */ + mov edx, [20+R_EDX*4+ebp] + mov ecx, [20+R_ECX*4+ebp] + + call dword ptr [ebp+8] + + mov ecx, [12+ebp] /* load return type code */ + mov [ebp+8], ebx /* preserve %ebx */ +L(UW2): + // cfi_rel_offset(%ebx, 8) + + and ecx, X86_RET_TYPE_MASK + lea ebx, [L(store_table) + ecx * 8] + mov ecx, [ebp+16] /* load result address */ + jmp ebx + + ALIGN 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstp DWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movsx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_INT64) + mov [ecx+4], edx + /* fallthru */ +E(L(store_table), X86_RET_int 32) + mov [ecx], eax + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + mov ebx, [ebp+8] + mov esp, ebp + pop ebp +L(UW3): + // cfi_remember_state + // cfi_def_cfa(%esp, 4) + // cfi_restore(%ebx) + // cfi_restore(%ebp) + ret +L(UW4): + // cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + mov [ecx], al + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + mov [ecx], ax + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + int 3 +E(L(store_table), X86_RET_UNUSED15) + int 3 + +L(UW5): + // cfi_endproc +ENDF(@ffi_call_i386@8) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +FFI_CLOSURE_SAVE_REGS MACRO + mov [esp + closure_CF+16+R_EAX*4], eax + mov [esp + closure_CF+16+R_EDX*4], edx + mov [esp + closure_CF+16+R_ECX*4], ecx +ENDM + +FFI_CLOSURE_COPY_TRAMP_DATA MACRO + mov edx, [eax+FFI_TRAMPOLINE_SIZE] /* copy cif */ + mov ecx, [eax+FFI_TRAMPOLINE_SIZE+4] /* copy fun */ + mov eax, [eax+FFI_TRAMPOLINE_SIZE+8]; /* copy user_data */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax +ENDM + +#if HAVE_FASTCALL +FFI_CLOSURE_PREP_CALL MACRO + mov ecx, esp /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ +ENDM +#else +FFI_CLOSURE_PREP_CALL MACRO + lea ecx, [esp+closure_CF] /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ + mov [esp], ecx + mov [esp+4], edx +ENDM +#endif + +FFI_CLOSURE_CALL_INNER MACRO UWN + call @ffi_closure_inner@8 +ENDM + +FFI_CLOSURE_MASK_AND_JUMP MACRO LABEL + and eax, X86_RET_TYPE_MASK + lea edx, [LABEL+eax*8] + mov eax, [esp+closure_CF] /* optimiztic load */ + jmp edx +ENDM + +ALIGN 16 +PUBLIC ffi_go_closure_EAX +ffi_go_closure_EAX PROC C +L(UW6): + // cfi_startproc + sub esp, closure_FS +L(UW7): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [eax+4] /* copy cif */ + mov ecx, [eax +8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + // cfi_endproc +ENDF(ffi_go_closure_EAX) + +ALIGN 16 +PUBLIC ffi_go_closure_ECX +ffi_go_closure_ECX PROC C +L(UW9): + // cfi_startproc + sub esp, closure_FS +L(UW10): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + // cfi_endproc +ENDF(ffi_go_closure_ECX) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + +ALIGN 16 +PUBLIC ffi_closure_i386 +ffi_closure_i386 PROC C +L(UW12): + // cfi_startproc + sub esp, closure_FS +L(UW13): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,2)) + + ALIGN 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + fld dword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movsx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + add esp, closure_FS +L(UW16): + // cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + add esp, closure_FS +L(UW18): + // cfi_adjust_cfa_offset(-closure_FS) + ret 4 +L(UW19): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + int 3 +E(L(load_table2), X86_RET_UNUSED15) + int 3 + +L(UW20): + // cfi_endproc +ENDF(ffi_closure_i386) + +ALIGN 16 +PUBLIC ffi_go_closure_STDCALL +ffi_go_closure_STDCALL PROC C +L(UW21): + // cfi_startproc + sub esp, closure_FS +L(UW22): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + // cfi_endproc +ENDF(ffi_go_closure_STDCALL) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + +ALIGN 16 +PUBLIC ffi_closure_REGISTER +ffi_closure_REGISTER PROC C +L(UW24): + // cfi_startproc + // cfi_def_cfa(%esp, 8) + // cfi_offset(%eip, -8) + sub esp, closure_FS-4 +L(UW25): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov ecx, [esp+closure_FS-4] /* load retaddr */ + mov eax, [esp+closure_FS] /* load closure */ + mov [esp+closure_FS], ecx /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + // cfi_endproc +ENDF(ffi_closure_REGISTER) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + +ALIGN 16 +PUBLIC ffi_closure_STDCALL +ffi_closure_STDCALL PROC C +L(UW27): + // cfi_startproc + sub esp, closure_FS +L(UW28): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER):: + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + mov ecx, eax + shr ecx, X86_RET_POP_SHIFT /* isolate pop count */ + lea ecx, [esp+closure_FS+ecx] /* compute popped esp */ + mov edx, [esp+closure_FS] /* move return address */ + mov [ecx], edx + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,3)) + + ALIGN 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + fld DWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_DOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_LDOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT8) + movsx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT16) + movsx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT8) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT16) + movzx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + mov esp, ecx + ret +E(L(load_table3), X86_RET_int 32) + mov esp, ecx + ret +E(L(load_table3), X86_RET_VOID) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTPOP) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTARG) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzx eax, ax + mov esp, ecx + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + int 3 +E(L(load_table3), X86_RET_UNUSED15) + int 3 + +L(UW31): + // cfi_endproc +ENDF(ffi_closure_STDCALL) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + +ALIGN 16 +PUBLIC ffi_closure_raw_SYSV +ffi_closure_raw_SYSV PROC C +L(UW32): + // cfi_startproc + sub esp, raw_closure_S_FS +L(UW33): + // cfi_def_cfa_offset(raw_closure_S_FS + 4) + mov [esp+raw_closure_S_FS-4], ebx +L(UW34): + // cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_S_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc4): +// lea ecx, L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table4)+eax+8] +// #endif + mov ebx, [esp+raw_closure_S_FS-4] +L(UW35): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp dword ptr [ecx] + + ALIGN 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movsx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e4) +E(L(load_table4), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + add esp, raw_closure_S_FS +L(UW36): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + add esp, raw_closure_S_FS +L(UW38): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret 4 +L(UW39): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + int 3 +E(L(load_table4), X86_RET_UNUSED15) + int 3 + +L(UW40): + // cfi_endproc +ENDF(ffi_closure_raw_SYSV) + +#define raw_closure_T_FS (16+16+8) + +ALIGN 16 +PUBLIC ffi_closure_raw_THISCALL +ffi_closure_raw_THISCALL PROC C +L(UW41): + // cfi_startproc + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + pop edx +L(UW42): + // cfi_def_cfa_offset(0) + // cfi_register(%eip, %edx) + push ecx +L(UW43): + // cfi_adjust_cfa_offset(4) + push edx +L(UW44): + // cfi_adjust_cfa_offset(4) + // cfi_rel_offset(%eip, 0) + sub esp, raw_closure_T_FS +L(UW45): + // cfi_adjust_cfa_offset(raw_closure_T_FS) + mov [esp+raw_closure_T_FS-4], ebx +L(UW46): + // cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_T_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc5): +// leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table5)+eax*8] +//#endif + mov ebx, [esp+raw_closure_T_FS-4] +L(UW47): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp DWORD PTR [ecx] + + AlIGN 4 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fld QWORD PTR [esp+16] + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movsx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e5) +E(L(load_table5), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + add esp, raw_closure_T_FS +L(UW48): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret 4 +L(UW49): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + add esp, raw_closure_T_FS +L(UW50): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret 8 +L(UW51): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + int 3 +E(L(load_table5), X86_RET_UNUSED15) + int 3 + +L(UW52): + // cfi_endproc +ENDF(ffi_closure_raw_THISCALL) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + PUBLIC X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +// #if defined(__PIC__) +// COMDAT(C(__x86.get_pc_thunk.bx)) +// C(__x86.get_pc_thunk.bx): +// movl (%esp), %ebx +// ret +// ENDF(C(__x86.get_pc_thunk.bx)) +// # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +// COMDAT(C(__x86.get_pc_thunk.dx)) +// C(__x86.get_pc_thunk.dx): +// movl (%esp), %edx +// ret +// ENDF(C(__x86.get_pc_thunk.dx)) +// #endif /* DARWIN || HIDDEN */ +// #endif /* __PIC__ */ + +#if 0 +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + PUBLIC @feat.00 +@feat.00 = 1 +#endif + +#endif /* ifndef _MSC_VER */ +#endif /* ifndef __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif + +END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S new file mode 100644 index 0000000..3cafd71 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S @@ -0,0 +1,995 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#ifdef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#define L(X) C1(L, X) +# define ENDF(X) X ENDP + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .686P + .MODEL FLAT + +EXTRN @ffi_closure_inner@8:PROC +_TEXT SEGMENT + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ALIGN 16 +PUBLIC @ffi_call_i386@8 +@ffi_call_i386@8 PROC +L(UW0): + cfi_startproc + #if !HAVE_FASTCALL + mov ecx, [esp+4] + mov edx, [esp+8] + #endif + mov eax, [esp] /* move the return address */ + mov [ecx], ebp /* store ebp into local frame */ + mov [ecx+4], eax /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + mov ebp, ecx +L(UW1): + // cfi_def_cfa(%ebp, 8) + // cfi_rel_offset(%ebp, 0) + + mov esp, edx /* set outgoing argument stack */ + mov eax, [20+R_EAX*4+ebp] /* set register arguments */ + mov edx, [20+R_EDX*4+ebp] + mov ecx, [20+R_ECX*4+ebp] + + call dword ptr [ebp+8] + + mov ecx, [12+ebp] /* load return type code */ + mov [ebp+8], ebx /* preserve %ebx */ +L(UW2): + // cfi_rel_offset(%ebx, 8) + + and ecx, X86_RET_TYPE_MASK + lea ebx, [L(store_table) + ecx * 8] + mov ecx, [ebp+16] /* load result address */ + jmp ebx + + ALIGN 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstp DWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movsx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_INT64) + mov [ecx+4], edx + /* fallthru */ +E(L(store_table), X86_RET_int 32) + mov [ecx], eax + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + mov ebx, [ebp+8] + mov esp, ebp + pop ebp +L(UW3): + // cfi_remember_state + // cfi_def_cfa(%esp, 4) + // cfi_restore(%ebx) + // cfi_restore(%ebp) + ret +L(UW4): + // cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + mov [ecx], al + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + mov [ecx], ax + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + int 3 +E(L(store_table), X86_RET_UNUSED15) + int 3 + +L(UW5): + // cfi_endproc +ENDF(@ffi_call_i386@8) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +FFI_CLOSURE_SAVE_REGS MACRO + mov [esp + closure_CF+16+R_EAX*4], eax + mov [esp + closure_CF+16+R_EDX*4], edx + mov [esp + closure_CF+16+R_ECX*4], ecx +ENDM + +FFI_CLOSURE_COPY_TRAMP_DATA MACRO + mov edx, [eax+FFI_TRAMPOLINE_SIZE] /* copy cif */ + mov ecx, [eax+FFI_TRAMPOLINE_SIZE+4] /* copy fun */ + mov eax, [eax+FFI_TRAMPOLINE_SIZE+8]; /* copy user_data */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax +ENDM + +#if HAVE_FASTCALL +FFI_CLOSURE_PREP_CALL MACRO + mov ecx, esp /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ +ENDM +#else +FFI_CLOSURE_PREP_CALL MACRO + lea ecx, [esp+closure_CF] /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ + mov [esp], ecx + mov [esp+4], edx +ENDM +#endif + +FFI_CLOSURE_CALL_INNER MACRO UWN + call @ffi_closure_inner@8 +ENDM + +FFI_CLOSURE_MASK_AND_JUMP MACRO LABEL + and eax, X86_RET_TYPE_MASK + lea edx, [LABEL+eax*8] + mov eax, [esp+closure_CF] /* optimiztic load */ + jmp edx +ENDM + +ALIGN 16 +PUBLIC ffi_go_closure_EAX +ffi_go_closure_EAX PROC C +L(UW6): + // cfi_startproc + sub esp, closure_FS +L(UW7): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [eax+4] /* copy cif */ + mov ecx, [eax +8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + // cfi_endproc +ENDF(ffi_go_closure_EAX) + +ALIGN 16 +PUBLIC ffi_go_closure_ECX +ffi_go_closure_ECX PROC C +L(UW9): + // cfi_startproc + sub esp, closure_FS +L(UW10): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + // cfi_endproc +ENDF(ffi_go_closure_ECX) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + +ALIGN 16 +PUBLIC ffi_closure_i386 +ffi_closure_i386 PROC C +L(UW12): + // cfi_startproc + sub esp, closure_FS +L(UW13): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,2)) + + ALIGN 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + fld dword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movsx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + add esp, closure_FS +L(UW16): + // cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + add esp, closure_FS +L(UW18): + // cfi_adjust_cfa_offset(-closure_FS) + ret 4 +L(UW19): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + int 3 +E(L(load_table2), X86_RET_UNUSED15) + int 3 + +L(UW20): + // cfi_endproc +ENDF(ffi_closure_i386) + +ALIGN 16 +PUBLIC ffi_go_closure_STDCALL +ffi_go_closure_STDCALL PROC C +L(UW21): + // cfi_startproc + sub esp, closure_FS +L(UW22): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + // cfi_endproc +ENDF(ffi_go_closure_STDCALL) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + +ALIGN 16 +PUBLIC ffi_closure_REGISTER +ffi_closure_REGISTER PROC C +L(UW24): + // cfi_startproc + // cfi_def_cfa(%esp, 8) + // cfi_offset(%eip, -8) + sub esp, closure_FS-4 +L(UW25): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov ecx, [esp+closure_FS-4] /* load retaddr */ + mov eax, [esp+closure_FS] /* load closure */ + mov [esp+closure_FS], ecx /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + // cfi_endproc +ENDF(ffi_closure_REGISTER) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + +ALIGN 16 +PUBLIC ffi_closure_STDCALL +ffi_closure_STDCALL PROC C +L(UW27): + // cfi_startproc + sub esp, closure_FS +L(UW28): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER):: + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + mov ecx, eax + shr ecx, X86_RET_POP_SHIFT /* isolate pop count */ + lea ecx, [esp+closure_FS+ecx] /* compute popped esp */ + mov edx, [esp+closure_FS] /* move return address */ + mov [ecx], edx + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,3)) + + ALIGN 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + fld DWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_DOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_LDOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT8) + movsx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT16) + movsx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT8) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT16) + movzx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + mov esp, ecx + ret +E(L(load_table3), X86_RET_int 32) + mov esp, ecx + ret +E(L(load_table3), X86_RET_VOID) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTPOP) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTARG) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzx eax, ax + mov esp, ecx + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + int 3 +E(L(load_table3), X86_RET_UNUSED15) + int 3 + +L(UW31): + // cfi_endproc +ENDF(ffi_closure_STDCALL) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + +ALIGN 16 +PUBLIC ffi_closure_raw_SYSV +ffi_closure_raw_SYSV PROC C +L(UW32): + // cfi_startproc + sub esp, raw_closure_S_FS +L(UW33): + // cfi_def_cfa_offset(raw_closure_S_FS + 4) + mov [esp+raw_closure_S_FS-4], ebx +L(UW34): + // cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_S_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc4): +// lea ecx, L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table4)+eax+8] +// #endif + mov ebx, [esp+raw_closure_S_FS-4] +L(UW35): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp dword ptr [ecx] + + ALIGN 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movsx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e4) +E(L(load_table4), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + add esp, raw_closure_S_FS +L(UW36): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + add esp, raw_closure_S_FS +L(UW38): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret 4 +L(UW39): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + int 3 +E(L(load_table4), X86_RET_UNUSED15) + int 3 + +L(UW40): + // cfi_endproc +ENDF(ffi_closure_raw_SYSV) + +#define raw_closure_T_FS (16+16+8) + +ALIGN 16 +PUBLIC ffi_closure_raw_THISCALL +ffi_closure_raw_THISCALL PROC C +L(UW41): + // cfi_startproc + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + pop edx +L(UW42): + // cfi_def_cfa_offset(0) + // cfi_register(%eip, %edx) + push ecx +L(UW43): + // cfi_adjust_cfa_offset(4) + push edx +L(UW44): + // cfi_adjust_cfa_offset(4) + // cfi_rel_offset(%eip, 0) + sub esp, raw_closure_T_FS +L(UW45): + // cfi_adjust_cfa_offset(raw_closure_T_FS) + mov [esp+raw_closure_T_FS-4], ebx +L(UW46): + // cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_T_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc5): +// leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table5)+eax*8] +//#endif + mov ebx, [esp+raw_closure_T_FS-4] +L(UW47): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp DWORD PTR [ecx] + + AlIGN 4 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fld QWORD PTR [esp+16] + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movsx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e5) +E(L(load_table5), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + add esp, raw_closure_T_FS +L(UW48): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret 4 +L(UW49): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + add esp, raw_closure_T_FS +L(UW50): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret 8 +L(UW51): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + int 3 +E(L(load_table5), X86_RET_UNUSED15) + int 3 + +L(UW52): + // cfi_endproc +ENDF(ffi_closure_raw_THISCALL) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + PUBLIC X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +// #if defined(__PIC__) +// COMDAT(C(__x86.get_pc_thunk.bx)) +// C(__x86.get_pc_thunk.bx): +// movl (%esp), %ebx +// ret +// ENDF(C(__x86.get_pc_thunk.bx)) +// # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +// COMDAT(C(__x86.get_pc_thunk.dx)) +// C(__x86.get_pc_thunk.dx): +// movl (%esp), %edx +// ret +// ENDF(C(__x86.get_pc_thunk.dx)) +// #endif /* DARWIN || HIDDEN */ +// #endif /* __PIC__ */ + +#if 0 +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + PUBLIC @feat.00 +@feat.00 = 1 +#endif + +#endif /* ifndef _MSC_VER */ +#endif /* ifndef __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif + +END \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64 2.S new file mode 100644 index 0000000..ee3c04f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64 2.S @@ -0,0 +1,622 @@ +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include "internal64.h" +#include "asmnames.h" + + .text + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# ifdef __CET__ +# define E(BASE, X) .balign 8; .org BASE + X * 16 +# else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +# endif +#endif + +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .balign 8 + .globl C(ffi_call_unix64) + FFI_HIDDEN(C(ffi_call_unix64)) + +C(ffi_call_unix64): +L(UW0): + _CET_ENDBR + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ + + /* New stack frame based off rbp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-8, so from the + perspective of the unwind info, it hasn't moved. */ +L(UW1): + /* cfi_def_cfa(%rbp, 32) */ + /* cfi_rel_offset(%rbp, 16) */ + + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + movl %r9d, %eax /* Set number of SSE registers. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi + movq 0x08(%r10), %rsi + movq 0x10(%r10), %rdx + movq 0x18(%r10), %rcx + movq 0x20(%r10), %r8 + movq 0x28(%r10), %r9 + movl 0xb0(%r10), %eax + testl %eax, %eax + jnz L(load_sse) +L(ret_from_load_sse): + + /* Deallocate the reg arg area, except for r10, then load via pop. */ + leaq 0xb8(%r10), %rsp + popq %r10 + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ +L(UW2): + /* cfi_remember_state */ + /* cfi_def_cfa(%rsp, 8) */ + /* cfi_restore(%rbp) */ + + /* The first byte of the flags contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %cl + movzbl %cl, %r10d + leaq L(store_table)(%rip), %r11 + ja L(sa) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + + /* Prep for the structure cases: scratch area in redzone. */ + leaq -20(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(store_table): +E(L(store_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(store_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl %al, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl %ax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl %eax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbq %al, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswq %ax, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT32) + _CET_ENDBR + cltq + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_INT64) + _CET_ENDBR + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_X87) + _CET_ENDBR + fstpt (%rdi) + ret +E(L(store_table), UNIX64_RET_X87_2) + _CET_ENDBR + fstpt (%rdi) + fstpt 16(%rdi) + ret +E(L(store_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq %rax, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq %xmm0, 8(%rsi) + jmp L(s2) +E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq %xmm1, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq %rdx, 8(%rsi) +L(s2): + movq %rax, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + .balign 8 +L(s3): + movq %xmm0, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + +L(sa): call PLT(C(abort)) + + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ + .balign 2 +L(UW3): + /* cfi_restore_state */ +L(load_sse): + movdqa 0x30(%r10), %xmm0 + movdqa 0x40(%r10), %xmm1 + movdqa 0x50(%r10), %xmm2 + movdqa 0x60(%r10), %xmm3 + movdqa 0x70(%r10), %xmm4 + movdqa 0x80(%r10), %xmm5 + movdqa 0x90(%r10), %xmm6 + movdqa 0xa0(%r10), %xmm7 + jmp L(ret_from_load_sse) + +L(UW4): +ENDF(C(ffi_call_unix64)) + +/* 6 general registers, 8 vector registers, + 32 bytes of rvalue, 8 bytes of alignment. */ +#define ffi_closure_OFS_G 0 +#define ffi_closure_OFS_V (6*8) +#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) +#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) + +/* The location of rvalue within the red zone after deallocating the frame. */ +#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) + + .balign 2 + .globl C(ffi_closure_unix64_sse) + FFI_HIDDEN(C(ffi_closure_unix64_sse)) + +C(ffi_closure_unix64_sse): +L(UW5): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW6): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry1) + +L(UW7): +ENDF(C(ffi_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_closure_unix64) + FFI_HIDDEN(C(ffi_closure_unix64)) + +C(ffi_closure_unix64): +L(UW8): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW9): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry1): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ + movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ + movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ +#else + movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ +#endif +L(do_closure): + leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ + movq %rsp, %r8 /* Load reg_args */ + leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ + call PLT(C(ffi_closure_unix64_inner)) + + /* Deallocate stack frame early; return value is now in redzone. */ + addq $ffi_closure_FS, %rsp +L(UW10): + /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ + + /* The first byte of the return value contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %al + movzbl %al, %r10d + leaq L(load_table)(%rip), %r11 + ja L(la) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + leaq ffi_closure_RED_RVALUE(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(load_table): +E(L(load_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(load_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_INT64) + _CET_ENDBR + movq (%rsi), %rax + ret +E(L(load_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_X87) + _CET_ENDBR + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_X87_2) + _CET_ENDBR + fldt 16(%rsi) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq 8(%rsi), %rax + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq 8(%rsi), %xmm0 + jmp L(l2) +E(L(load_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq 8(%rsi), %xmm1 + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq 8(%rsi), %rdx +L(l2): + movq (%rsi), %rax + ret + .balign 8 +L(l3): + movq (%rsi), %xmm0 + ret + +L(la): call PLT(C(abort)) + +L(UW11): +ENDF(C(ffi_closure_unix64)) + + .balign 2 + .globl C(ffi_go_closure_unix64_sse) + FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) + +C(ffi_go_closure_unix64_sse): +L(UW12): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW13): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry2) + +L(UW14): +ENDF(C(ffi_go_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_go_closure_unix64) + FFI_HIDDEN(C(ffi_go_closure_unix64)) + +C(ffi_go_closure_unix64): +L(UW15): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW16): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry2): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl 4(%r10), %edi /* Load cif */ + movl 8(%r10), %esi /* Load fun */ + movl %r10d, %edx /* Load closure (user_data) */ +#else + movq 8(%r10), %rdi /* Load cif */ + movq 16(%r10), %rsi /* Load fun */ + movq %r10, %rdx /* Load closure (user_data) */ +#endif + jmp L(do_closure) + +L(UW17): +ENDF(C(ffi_go_closure_unix64)) + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,"a",@unwind +#else +.section .eh_frame,"a",@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#ifdef __CET__ +/* Use DW_CFA_advance_loc2 when IBT is enabled. */ +# define ADV(N, P) .byte 3; .2byte L(N)-L(P) +#else +# define ADV(N, P) .byte 2, L(N)-L(P) +#endif + + .balign 8 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x78 /* CIE Data Alignment Factor */ + .byte 0x10 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ + .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ + .balign 8 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW4)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ + .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ + ADV(UW2, UW1) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + ADV(UW3, UW2) + .byte 0xb /* DW_CFA_restore_state */ + .balign 8 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW5)) /* Initial location */ + .long L(UW7)-L(UW5) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW6, UW5) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW8)) /* Initial location */ + .long L(UW11)-L(UW8) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW9, UW8) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + ADV(UW10, UW9) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW14)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW15)) /* Initial location */ + .long L(UW17)-L(UW15) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW16, UW15) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE5): +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_unix64 */ + .quad C(ffi_call_unix64) + .set L1,L(UW4)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64_sse */ + .quad C(ffi_closure_unix64_sse) + .set L2,L(UW7)-L(UW5) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64 */ + .quad C(ffi_closure_unix64) + .set L3,L(UW11)-L(UW8) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64_sse */ + .quad C(ffi_go_closure_unix64_sse) + .set L4,L(UW14)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64 */ + .quad C(ffi_go_closure_unix64) + .set L5,L(UW17)-L(UW15) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 +#endif + +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S new file mode 100644 index 0000000..ee3c04f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S @@ -0,0 +1,622 @@ +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include "internal64.h" +#include "asmnames.h" + + .text + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# ifdef __CET__ +# define E(BASE, X) .balign 8; .org BASE + X * 16 +# else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +# endif +#endif + +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .balign 8 + .globl C(ffi_call_unix64) + FFI_HIDDEN(C(ffi_call_unix64)) + +C(ffi_call_unix64): +L(UW0): + _CET_ENDBR + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ + + /* New stack frame based off rbp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-8, so from the + perspective of the unwind info, it hasn't moved. */ +L(UW1): + /* cfi_def_cfa(%rbp, 32) */ + /* cfi_rel_offset(%rbp, 16) */ + + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + movl %r9d, %eax /* Set number of SSE registers. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi + movq 0x08(%r10), %rsi + movq 0x10(%r10), %rdx + movq 0x18(%r10), %rcx + movq 0x20(%r10), %r8 + movq 0x28(%r10), %r9 + movl 0xb0(%r10), %eax + testl %eax, %eax + jnz L(load_sse) +L(ret_from_load_sse): + + /* Deallocate the reg arg area, except for r10, then load via pop. */ + leaq 0xb8(%r10), %rsp + popq %r10 + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ +L(UW2): + /* cfi_remember_state */ + /* cfi_def_cfa(%rsp, 8) */ + /* cfi_restore(%rbp) */ + + /* The first byte of the flags contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %cl + movzbl %cl, %r10d + leaq L(store_table)(%rip), %r11 + ja L(sa) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + + /* Prep for the structure cases: scratch area in redzone. */ + leaq -20(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(store_table): +E(L(store_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(store_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl %al, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl %ax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl %eax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbq %al, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswq %ax, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT32) + _CET_ENDBR + cltq + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_INT64) + _CET_ENDBR + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_X87) + _CET_ENDBR + fstpt (%rdi) + ret +E(L(store_table), UNIX64_RET_X87_2) + _CET_ENDBR + fstpt (%rdi) + fstpt 16(%rdi) + ret +E(L(store_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq %rax, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq %xmm0, 8(%rsi) + jmp L(s2) +E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq %xmm1, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq %rdx, 8(%rsi) +L(s2): + movq %rax, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + .balign 8 +L(s3): + movq %xmm0, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + +L(sa): call PLT(C(abort)) + + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ + .balign 2 +L(UW3): + /* cfi_restore_state */ +L(load_sse): + movdqa 0x30(%r10), %xmm0 + movdqa 0x40(%r10), %xmm1 + movdqa 0x50(%r10), %xmm2 + movdqa 0x60(%r10), %xmm3 + movdqa 0x70(%r10), %xmm4 + movdqa 0x80(%r10), %xmm5 + movdqa 0x90(%r10), %xmm6 + movdqa 0xa0(%r10), %xmm7 + jmp L(ret_from_load_sse) + +L(UW4): +ENDF(C(ffi_call_unix64)) + +/* 6 general registers, 8 vector registers, + 32 bytes of rvalue, 8 bytes of alignment. */ +#define ffi_closure_OFS_G 0 +#define ffi_closure_OFS_V (6*8) +#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) +#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) + +/* The location of rvalue within the red zone after deallocating the frame. */ +#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) + + .balign 2 + .globl C(ffi_closure_unix64_sse) + FFI_HIDDEN(C(ffi_closure_unix64_sse)) + +C(ffi_closure_unix64_sse): +L(UW5): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW6): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry1) + +L(UW7): +ENDF(C(ffi_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_closure_unix64) + FFI_HIDDEN(C(ffi_closure_unix64)) + +C(ffi_closure_unix64): +L(UW8): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW9): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry1): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ + movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ + movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ +#else + movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ +#endif +L(do_closure): + leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ + movq %rsp, %r8 /* Load reg_args */ + leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ + call PLT(C(ffi_closure_unix64_inner)) + + /* Deallocate stack frame early; return value is now in redzone. */ + addq $ffi_closure_FS, %rsp +L(UW10): + /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ + + /* The first byte of the return value contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %al + movzbl %al, %r10d + leaq L(load_table)(%rip), %r11 + ja L(la) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + leaq ffi_closure_RED_RVALUE(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(load_table): +E(L(load_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(load_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_INT64) + _CET_ENDBR + movq (%rsi), %rax + ret +E(L(load_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_X87) + _CET_ENDBR + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_X87_2) + _CET_ENDBR + fldt 16(%rsi) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq 8(%rsi), %rax + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq 8(%rsi), %xmm0 + jmp L(l2) +E(L(load_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq 8(%rsi), %xmm1 + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq 8(%rsi), %rdx +L(l2): + movq (%rsi), %rax + ret + .balign 8 +L(l3): + movq (%rsi), %xmm0 + ret + +L(la): call PLT(C(abort)) + +L(UW11): +ENDF(C(ffi_closure_unix64)) + + .balign 2 + .globl C(ffi_go_closure_unix64_sse) + FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) + +C(ffi_go_closure_unix64_sse): +L(UW12): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW13): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry2) + +L(UW14): +ENDF(C(ffi_go_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_go_closure_unix64) + FFI_HIDDEN(C(ffi_go_closure_unix64)) + +C(ffi_go_closure_unix64): +L(UW15): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW16): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry2): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl 4(%r10), %edi /* Load cif */ + movl 8(%r10), %esi /* Load fun */ + movl %r10d, %edx /* Load closure (user_data) */ +#else + movq 8(%r10), %rdi /* Load cif */ + movq 16(%r10), %rsi /* Load fun */ + movq %r10, %rdx /* Load closure (user_data) */ +#endif + jmp L(do_closure) + +L(UW17): +ENDF(C(ffi_go_closure_unix64)) + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,"a",@unwind +#else +.section .eh_frame,"a",@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#ifdef __CET__ +/* Use DW_CFA_advance_loc2 when IBT is enabled. */ +# define ADV(N, P) .byte 3; .2byte L(N)-L(P) +#else +# define ADV(N, P) .byte 2, L(N)-L(P) +#endif + + .balign 8 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x78 /* CIE Data Alignment Factor */ + .byte 0x10 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ + .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ + .balign 8 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW4)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ + .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ + ADV(UW2, UW1) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + ADV(UW3, UW2) + .byte 0xb /* DW_CFA_restore_state */ + .balign 8 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW5)) /* Initial location */ + .long L(UW7)-L(UW5) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW6, UW5) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW8)) /* Initial location */ + .long L(UW11)-L(UW8) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW9, UW8) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + ADV(UW10, UW9) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW14)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW15)) /* Initial location */ + .long L(UW17)-L(UW15) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW16, UW15) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE5): +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_unix64 */ + .quad C(ffi_call_unix64) + .set L1,L(UW4)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64_sse */ + .quad C(ffi_closure_unix64_sse) + .set L2,L(UW7)-L(UW5) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64 */ + .quad C(ffi_closure_unix64) + .set L3,L(UW11)-L(UW8) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64_sse */ + .quad C(ffi_go_closure_unix64_sse) + .set L4,L(UW14)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64 */ + .quad C(ffi_go_closure_unix64) + .set L5,L(UW17)-L(UW15) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 +#endif + +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64 2.S new file mode 100644 index 0000000..57c0e65 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64 2.S @@ -0,0 +1,240 @@ +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 %rcx +#define arg1 %rdx +#define arg2 %r8 +#define arg3 %r9 +#else +#define SEH(...) +#define arg0 %rdi +#define arg1 %rsi +#define arg2 %rdx +#define arg3 %rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 8 + .globl C(ffi_call_win64) + FFI_HIDDEN(C(ffi_call_win64)) + + SEH(.seh_proc ffi_call_win64) +C(ffi_call_win64): + cfi_startproc + _CET_ENDBR + /* Set up the local stack frame and install it in rbp/rsp. */ + movq (%rsp), %rax + movq %rbp, (arg1) + movq %rax, 8(arg1) + movq arg1, %rbp + cfi_def_cfa(%rbp, 16) + cfi_rel_offset(%rbp, 0) + SEH(.seh_pushreg %rbp) + SEH(.seh_setframe %rbp, 0) + SEH(.seh_endprologue) + movq arg0, %rsp + + movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + movq (%rsp), %rcx + movsd (%rsp), %xmm0 + movq 8(%rsp), %rdx + movsd 8(%rsp), %xmm1 + movq 16(%rsp), %r8 + movsd 16(%rsp), %xmm2 + movq 24(%rsp), %r9 + movsd 24(%rsp), %xmm3 + + call *16(%rbp) + + movl 24(%rbp), %ecx + movq 32(%rbp), %r8 + leaq 0f(%rip), %r10 + cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + leaq (%r10, %rcx, 8), %r10 + ja 99f + _CET_NOTRACK jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +.macro epilogue + leaveq + cfi_remember_state + cfi_def_cfa(%rsp, 8) + cfi_restore(%rbp) + ret + cfi_restore_state +.endm + + .align 8 +0: +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzbl %al, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsbq %al, %rax + jmp 98f +E(0b, FFI_TYPE_UINT16) + movzwl %ax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movswq %ax, %rax + jmp 98f +E(0b, FFI_TYPE_UINT32) + movl %eax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +98: movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + movl %eax, (%r8) + epilogue + + .align 8 +99: call PLT(C(abort)) + + epilogue + + cfi_endproc + SEH(.seh_endproc) + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + .align 8 + .globl C(ffi_go_closure_win64) + FFI_HIDDEN(C(ffi_go_closure_win64)) + + SEH(.seh_proc ffi_go_closure_win64) +C(ffi_go_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq 8(%r10), %rcx /* load cif */ + movq 16(%r10), %rdx /* load fun */ + movq %r10, %r8 /* closure is user_data */ + jmp 0f + cfi_endproc + SEH(.seh_endproc) + + .align 8 + .globl C(ffi_closure_win64) + FFI_HIDDEN(C(ffi_closure_win64)) + + SEH(.seh_proc ffi_closure_win64) +C(ffi_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +0: + subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.seh_stackalloc ffi_clo_FS) + SEH(.seh_endprologue) + + /* Save all sse arguments into the stack frame. */ + movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + leaq ffi_clo_OFF_R(%rsp), %r9 + call PLT(C(ffi_closure_win64_inner)) + + /* Load the result into both possible result registers. */ + movq ffi_clo_OFF_R(%rsp), %rax + movsd ffi_clo_OFF_R(%rsp), %xmm0 + + addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + SEH(.seh_endproc) +#endif /* __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S new file mode 100644 index 0000000..57c0e65 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S @@ -0,0 +1,240 @@ +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 %rcx +#define arg1 %rdx +#define arg2 %r8 +#define arg3 %r9 +#else +#define SEH(...) +#define arg0 %rdi +#define arg1 %rsi +#define arg2 %rdx +#define arg3 %rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 8 + .globl C(ffi_call_win64) + FFI_HIDDEN(C(ffi_call_win64)) + + SEH(.seh_proc ffi_call_win64) +C(ffi_call_win64): + cfi_startproc + _CET_ENDBR + /* Set up the local stack frame and install it in rbp/rsp. */ + movq (%rsp), %rax + movq %rbp, (arg1) + movq %rax, 8(arg1) + movq arg1, %rbp + cfi_def_cfa(%rbp, 16) + cfi_rel_offset(%rbp, 0) + SEH(.seh_pushreg %rbp) + SEH(.seh_setframe %rbp, 0) + SEH(.seh_endprologue) + movq arg0, %rsp + + movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + movq (%rsp), %rcx + movsd (%rsp), %xmm0 + movq 8(%rsp), %rdx + movsd 8(%rsp), %xmm1 + movq 16(%rsp), %r8 + movsd 16(%rsp), %xmm2 + movq 24(%rsp), %r9 + movsd 24(%rsp), %xmm3 + + call *16(%rbp) + + movl 24(%rbp), %ecx + movq 32(%rbp), %r8 + leaq 0f(%rip), %r10 + cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + leaq (%r10, %rcx, 8), %r10 + ja 99f + _CET_NOTRACK jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +.macro epilogue + leaveq + cfi_remember_state + cfi_def_cfa(%rsp, 8) + cfi_restore(%rbp) + ret + cfi_restore_state +.endm + + .align 8 +0: +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzbl %al, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsbq %al, %rax + jmp 98f +E(0b, FFI_TYPE_UINT16) + movzwl %ax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movswq %ax, %rax + jmp 98f +E(0b, FFI_TYPE_UINT32) + movl %eax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +98: movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + movl %eax, (%r8) + epilogue + + .align 8 +99: call PLT(C(abort)) + + epilogue + + cfi_endproc + SEH(.seh_endproc) + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + .align 8 + .globl C(ffi_go_closure_win64) + FFI_HIDDEN(C(ffi_go_closure_win64)) + + SEH(.seh_proc ffi_go_closure_win64) +C(ffi_go_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq 8(%r10), %rcx /* load cif */ + movq 16(%r10), %rdx /* load fun */ + movq %r10, %r8 /* closure is user_data */ + jmp 0f + cfi_endproc + SEH(.seh_endproc) + + .align 8 + .globl C(ffi_closure_win64) + FFI_HIDDEN(C(ffi_closure_win64)) + + SEH(.seh_proc ffi_closure_win64) +C(ffi_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +0: + subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.seh_stackalloc ffi_clo_FS) + SEH(.seh_endprologue) + + /* Save all sse arguments into the stack frame. */ + movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + leaq ffi_clo_OFF_R(%rsp), %r9 + call PLT(C(ffi_closure_win64_inner)) + + /* Load the result into both possible result registers. */ + movq ffi_clo_OFF_R(%rsp), %rax + movsd ffi_clo_OFF_R(%rsp), %xmm0 + + addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + SEH(.seh_endproc) +#endif /* __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel 2.S new file mode 100644 index 0000000..7df78b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel 2.S @@ -0,0 +1,237 @@ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 rcx +#define arg1 rdx +#define arg2 r8 +#define arg3 r9 +#else +#define SEH(...) +#define arg0 rdi +#define arg1 rsi +#define arg2 rdx +#define arg3 rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .CODE + extern PLT(C(abort)):near + extern C(ffi_closure_win64_inner):near + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + ALIGN 8 + PUBLIC C(ffi_call_win64) + + ; SEH(.safesh ffi_call_win64) +C(ffi_call_win64) proc SEH(frame) + cfi_startproc + /* Set up the local stack frame and install it in rbp/rsp. */ + mov RAX, [RSP] ; movq (%rsp), %rax + mov [arg1], RBP ; movq %rbp, (arg1) + mov [arg1 + 8], RAX; movq %rax, 8(arg1) + mov RBP, arg1; movq arg1, %rbp + cfi_def_cfa(rbp, 16) + cfi_rel_offset(rbp, 0) + SEH(.pushreg rbp) + SEH(.setframe rbp, 0) + SEH(.endprolog) + mov RSP, arg0 ; movq arg0, %rsp + + mov R10, arg2 ; movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + mov RCX, [RSP] ; movq (%rsp), %rcx + movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0 + mov RDX, [RSP + 8] ;movq 8(%rsp), %rdx + movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1 + mov R8, [RSP + 16] ; movq 16(%rsp), %r8 + movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2 + mov R9, [RSP + 24] ; movq 24(%rsp), %r9 + movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3 + + CALL qword ptr [RBP + 16] ; call *16(%rbp) + + mov ECX, [RBP + 24] ; movl 24(%rbp), %ecx + mov R8, [RBP + 32] ; movq 32(%rbp), %r8 + LEA R10, ffi_call_win64_tab ; leaq 0f(%rip), %r10 + CMP ECX, FFI_TYPE_SMALL_STRUCT_4B ; cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + LEA R10, [R10 + RCX*8] ; leaq (%r10, %rcx, 8), %r10 + JA L99 ; ja 99f + JMP R10 ; jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +epilogue macro + LEAVE + cfi_remember_state + cfi_def_cfa(rsp, 8) + cfi_restore(rbp) + RET + cfi_restore_state +endm + + ALIGN 8 +ffi_call_win64_tab LABEL NEAR +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movsxd rax, eax ; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss dword ptr [r8], xmm0 ; movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzx eax, al ;movzbl %al, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsx rax, al ; movsbq %al, %rax + jmp L98 +E(0b, FFI_TYPE_UINT16) + movzx eax, ax ; movzwl %ax, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movsx rax, ax; movswq %ax, %rax + jmp L98 +E(0b, FFI_TYPE_UINT32) + mov eax, eax; movl %eax, %eax + mov qword ptr[r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movsxd rax, eax; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +L98 LABEL near + mov qword ptr [r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + mov qword ptr [r8], rax;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + mov qword ptr [r8], rax ;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + mov byte ptr [r8], al ; movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + mov word ptr [r8], ax ; movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + mov dword ptr [r8], eax ; movl %eax, (%r8) + epilogue + + align 8 +L99 LABEL near + call PLT(C(abort)) + + epilogue + + cfi_endproc + C(ffi_call_win64) endp + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + align 8 + PUBLIC C(ffi_go_closure_win64) + +C(ffi_go_closure_win64) proc + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9 ;movq %r9, 32(%rsp) + + mov rcx, qword ptr [r10 + 8]; movq 8(%r10), %rcx /* load cif */ + mov rdx, qword ptr [r10 + 16]; movq 16(%r10), %rdx /* load fun */ + mov r8, r10 ; movq %r10, %r8 /* closure is user_data */ + jmp ffi_closure_win64_2 + cfi_endproc + C(ffi_go_closure_win64) endp + + align 8 + +PUBLIC C(ffi_closure_win64) +C(ffi_closure_win64) PROC FRAME + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9; movq %r9, 32(%rsp) + + mov rcx, qword ptr [FFI_TRAMPOLINE_SIZE + r10] ;movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + mov rdx, qword ptr [FFI_TRAMPOLINE_SIZE + 8 + r10] ; movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + mov r8, qword ptr [FFI_TRAMPOLINE_SIZE+16+r10] ;movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +ffi_closure_win64_2 LABEL near + sub rsp, ffi_clo_FS ;subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.allocstack ffi_clo_FS) + SEH(.endprolog) + + /* Save all sse arguments into the stack frame. */ + movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + lea r9, [ffi_clo_OFF_R + rsp] ; leaq ffi_clo_OFF_R(%rsp), %r9 + call C(ffi_closure_win64_inner) + + /* Load the result into both possible result registers. */ + + mov rax, qword ptr [ffi_clo_OFF_R + rsp] ;movq ffi_clo_OFF_R(%rsp), %rax + movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0 + + add rsp, ffi_clo_FS ;addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + C(ffi_closure_win64) endp + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +_text ends +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S new file mode 100644 index 0000000..7df78b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S @@ -0,0 +1,237 @@ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 rcx +#define arg1 rdx +#define arg2 r8 +#define arg3 r9 +#else +#define SEH(...) +#define arg0 rdi +#define arg1 rsi +#define arg2 rdx +#define arg3 rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .CODE + extern PLT(C(abort)):near + extern C(ffi_closure_win64_inner):near + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + ALIGN 8 + PUBLIC C(ffi_call_win64) + + ; SEH(.safesh ffi_call_win64) +C(ffi_call_win64) proc SEH(frame) + cfi_startproc + /* Set up the local stack frame and install it in rbp/rsp. */ + mov RAX, [RSP] ; movq (%rsp), %rax + mov [arg1], RBP ; movq %rbp, (arg1) + mov [arg1 + 8], RAX; movq %rax, 8(arg1) + mov RBP, arg1; movq arg1, %rbp + cfi_def_cfa(rbp, 16) + cfi_rel_offset(rbp, 0) + SEH(.pushreg rbp) + SEH(.setframe rbp, 0) + SEH(.endprolog) + mov RSP, arg0 ; movq arg0, %rsp + + mov R10, arg2 ; movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + mov RCX, [RSP] ; movq (%rsp), %rcx + movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0 + mov RDX, [RSP + 8] ;movq 8(%rsp), %rdx + movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1 + mov R8, [RSP + 16] ; movq 16(%rsp), %r8 + movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2 + mov R9, [RSP + 24] ; movq 24(%rsp), %r9 + movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3 + + CALL qword ptr [RBP + 16] ; call *16(%rbp) + + mov ECX, [RBP + 24] ; movl 24(%rbp), %ecx + mov R8, [RBP + 32] ; movq 32(%rbp), %r8 + LEA R10, ffi_call_win64_tab ; leaq 0f(%rip), %r10 + CMP ECX, FFI_TYPE_SMALL_STRUCT_4B ; cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + LEA R10, [R10 + RCX*8] ; leaq (%r10, %rcx, 8), %r10 + JA L99 ; ja 99f + JMP R10 ; jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +epilogue macro + LEAVE + cfi_remember_state + cfi_def_cfa(rsp, 8) + cfi_restore(rbp) + RET + cfi_restore_state +endm + + ALIGN 8 +ffi_call_win64_tab LABEL NEAR +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movsxd rax, eax ; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss dword ptr [r8], xmm0 ; movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzx eax, al ;movzbl %al, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsx rax, al ; movsbq %al, %rax + jmp L98 +E(0b, FFI_TYPE_UINT16) + movzx eax, ax ; movzwl %ax, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movsx rax, ax; movswq %ax, %rax + jmp L98 +E(0b, FFI_TYPE_UINT32) + mov eax, eax; movl %eax, %eax + mov qword ptr[r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movsxd rax, eax; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +L98 LABEL near + mov qword ptr [r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + mov qword ptr [r8], rax;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + mov qword ptr [r8], rax ;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + mov byte ptr [r8], al ; movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + mov word ptr [r8], ax ; movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + mov dword ptr [r8], eax ; movl %eax, (%r8) + epilogue + + align 8 +L99 LABEL near + call PLT(C(abort)) + + epilogue + + cfi_endproc + C(ffi_call_win64) endp + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + align 8 + PUBLIC C(ffi_go_closure_win64) + +C(ffi_go_closure_win64) proc + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9 ;movq %r9, 32(%rsp) + + mov rcx, qword ptr [r10 + 8]; movq 8(%r10), %rcx /* load cif */ + mov rdx, qword ptr [r10 + 16]; movq 16(%r10), %rdx /* load fun */ + mov r8, r10 ; movq %r10, %r8 /* closure is user_data */ + jmp ffi_closure_win64_2 + cfi_endproc + C(ffi_go_closure_win64) endp + + align 8 + +PUBLIC C(ffi_closure_win64) +C(ffi_closure_win64) PROC FRAME + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9; movq %r9, 32(%rsp) + + mov rcx, qword ptr [FFI_TRAMPOLINE_SIZE + r10] ;movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + mov rdx, qword ptr [FFI_TRAMPOLINE_SIZE + 8 + r10] ; movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + mov r8, qword ptr [FFI_TRAMPOLINE_SIZE+16+r10] ;movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +ffi_closure_win64_2 LABEL near + sub rsp, ffi_clo_FS ;subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.allocstack ffi_clo_FS) + SEH(.endprolog) + + /* Save all sse arguments into the stack frame. */ + movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + lea r9, [ffi_clo_OFF_R + rsp] ; leaq ffi_clo_OFF_R(%rsp), %r9 + call C(ffi_closure_win64_inner) + + /* Load the result into both possible result registers. */ + + mov rax, qword ptr [ffi_clo_OFF_R + rsp] ;movq ffi_clo_OFF_R(%rsp), %rax + movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0 + + add rsp, ffi_clo_FS ;addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + C(ffi_closure_win64) endp + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +_text ends +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi 2.c new file mode 100644 index 0000000..9a0575f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi 2.c @@ -0,0 +1,298 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +/* + |----------------------------------------| + | | + on entry to ffi_call ----> |----------------------------------------| + | caller stack frame for registers a0-a3 | + |----------------------------------------| + | | + | additional arguments | + entry of the function ---> |----------------------------------------| + | copy of function arguments a2-a7 | + | - - - - - - - - - - - - - | + | | + + The area below the entry line becomes the new stack frame for the function. + +*/ + + +#define FFI_TYPE_STRUCT_REGS FFI_TYPE_LAST + + +extern void ffi_call_SYSV(void *rvalue, unsigned rsize, unsigned flags, + void(*fn)(void), unsigned nbytes, extended_cif*); +extern void ffi_closure_SYSV(void) FFI_HIDDEN; + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + switch(cif->rtype->type) { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = cif->rtype->type; + break; + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + cif->flags = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; // cif->rtype->type; + break; + case FFI_TYPE_STRUCT: + cif->flags = FFI_TYPE_STRUCT; //_REGS; + /* Up to 16 bytes are returned in registers */ + if (cif->rtype->size > 4 * 4) { + /* returned structure is referenced by a register; use 8 bytes + (including 4 bytes for potential additional alignment) */ + cif->flags = FFI_TYPE_STRUCT; + cif->bytes += 8; + } + break; + + default: + cif->flags = FFI_TYPE_UINT32; + break; + } + + /* Round the stack up to a full 4 register frame, just in case + (we use this size in movsp). This way, it's also a multiple of + 8 bytes for 64-bit arguments. */ + cif->bytes = FFI_ALIGN(cif->bytes, 16); + + return FFI_OK; +} + +void ffi_prep_args(extended_cif *ecif, unsigned char* stack) +{ + unsigned int i; + unsigned long *addr; + ffi_type **ptr; + + union { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **i; + long long **ll; + float **f; + double **d; + } p_argv; + + /* Verify that everything is aligned up properly */ + FFI_ASSERT (((unsigned long) stack & 0x7) == 0); + + p_argv.v = ecif->avalue; + addr = (unsigned long*)stack; + + /* structures with a size greater than 16 bytes are passed in memory */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 16) + { + *addr++ = (unsigned long)ecif->rvalue; + } + + for (i = ecif->cif->nargs, ptr = ecif->cif->arg_types; + i > 0; + i--, ptr++, p_argv.v++) + { + switch ((*ptr)->type) + { + case FFI_TYPE_SINT8: + *addr++ = **p_argv.sc; + break; + case FFI_TYPE_UINT8: + *addr++ = **p_argv.uc; + break; + case FFI_TYPE_SINT16: + *addr++ = **p_argv.ss; + break; + case FFI_TYPE_UINT16: + *addr++ = **p_argv.us; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *addr++ = **p_argv.i; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (((unsigned long)addr & 4) != 0) + addr++; + *(unsigned long long*)addr = **p_argv.ll; + addr += sizeof(unsigned long long) / sizeof (addr); + break; + + case FFI_TYPE_STRUCT: + { + unsigned long offs; + unsigned long size; + + if (((unsigned long)addr & 4) != 0 && (*ptr)->alignment > 4) + addr++; + + offs = (unsigned long) addr - (unsigned long) stack; + size = (*ptr)->size; + + /* Entire structure must fit the argument registers or referenced */ + if (offs < FFI_REGISTER_NARGS * 4 + && offs + size > FFI_REGISTER_NARGS * 4) + addr = (unsigned long*) (stack + FFI_REGISTER_NARGS * 4); + + memcpy((char*) addr, *p_argv.c, size); + addr += (size + 3) / 4; + break; + } + + default: + FFI_ASSERT(0); + } + } +} + + +void ffi_call(ffi_cif* cif, void(*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + unsigned long rsize = cif->rtype->size; + int flags = cif->flags; + void *alloc = NULL; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Note that for structures that are returned in registers (size <= 16 bytes) + we allocate a temporary buffer and use memcpy to copy it to the final + destination. The reason is that the target address might be misaligned or + the length not a multiple of 4 bytes. Handling all those cases would be + very complex. */ + + if (flags == FFI_TYPE_STRUCT && (rsize <= 16 || rvalue == NULL)) + { + alloc = alloca(FFI_ALIGN(rsize, 4)); + ecif.rvalue = alloc; + } + else + { + ecif.rvalue = rvalue; + } + + if (cif->abi != FFI_SYSV) + FFI_ASSERT(0); + + ffi_call_SYSV (ecif.rvalue, rsize, cif->flags, fn, cif->bytes, &ecif); + + if (alloc != NULL && rvalue != NULL) + memcpy(rvalue, alloc, rsize); +} + +extern void ffi_trampoline(); +extern void ffi_cacheflush(void* start, void* end); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + /* copye trampoline to stack and patch 'ffi_closure_SYSV' pointer */ + memcpy(closure->tramp, ffi_trampoline, FFI_TRAMPOLINE_SIZE); + *(unsigned int*)(&closure->tramp[8]) = (unsigned int)ffi_closure_SYSV; + + // Do we have this function? + // __builtin___clear_cache(closer->tramp, closer->tramp + FFI_TRAMPOLINE_SIZE) + ffi_cacheflush(closure->tramp, closure->tramp + FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + return FFI_OK; +} + + +long FFI_HIDDEN +ffi_closure_SYSV_inner(ffi_closure *closure, void **values, void *rvalue) +{ + ffi_cif *cif; + ffi_type **arg_types; + void **avalue; + int i, areg; + + cif = closure->cif; + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + areg = 0; + + int rtype = cif->rtype->type; + if (rtype == FFI_TYPE_STRUCT && cif->rtype->size > 4 * 4) + { + rvalue = *values; + areg++; + } + + cif = closure->cif; + arg_types = cif->arg_types; + avalue = alloca(cif->nargs * sizeof(void *)); + + for (i = 0; i < cif->nargs; i++) + { + if (arg_types[i]->alignment == 8 && (areg & 1) != 0) + areg++; + + // skip the entry 16,a1 framework, add 16 bytes (4 registers) + if (areg == FFI_REGISTER_NARGS) + areg += 4; + + if (arg_types[i]->type == FFI_TYPE_STRUCT) + { + int numregs = ((arg_types[i]->size + 3) & ~3) / 4; + if (areg < FFI_REGISTER_NARGS && areg + numregs > FFI_REGISTER_NARGS) + areg = FFI_REGISTER_NARGS + 4; + } + + avalue[i] = &values[areg]; + areg += (arg_types[i]->size + 3) / 4; + } + + (closure->fun)(cif, rvalue, avalue, closure->user_data); + + return rtype; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c new file mode 100644 index 0000000..9a0575f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c @@ -0,0 +1,298 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +/* + |----------------------------------------| + | | + on entry to ffi_call ----> |----------------------------------------| + | caller stack frame for registers a0-a3 | + |----------------------------------------| + | | + | additional arguments | + entry of the function ---> |----------------------------------------| + | copy of function arguments a2-a7 | + | - - - - - - - - - - - - - | + | | + + The area below the entry line becomes the new stack frame for the function. + +*/ + + +#define FFI_TYPE_STRUCT_REGS FFI_TYPE_LAST + + +extern void ffi_call_SYSV(void *rvalue, unsigned rsize, unsigned flags, + void(*fn)(void), unsigned nbytes, extended_cif*); +extern void ffi_closure_SYSV(void) FFI_HIDDEN; + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + switch(cif->rtype->type) { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = cif->rtype->type; + break; + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + cif->flags = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; // cif->rtype->type; + break; + case FFI_TYPE_STRUCT: + cif->flags = FFI_TYPE_STRUCT; //_REGS; + /* Up to 16 bytes are returned in registers */ + if (cif->rtype->size > 4 * 4) { + /* returned structure is referenced by a register; use 8 bytes + (including 4 bytes for potential additional alignment) */ + cif->flags = FFI_TYPE_STRUCT; + cif->bytes += 8; + } + break; + + default: + cif->flags = FFI_TYPE_UINT32; + break; + } + + /* Round the stack up to a full 4 register frame, just in case + (we use this size in movsp). This way, it's also a multiple of + 8 bytes for 64-bit arguments. */ + cif->bytes = FFI_ALIGN(cif->bytes, 16); + + return FFI_OK; +} + +void ffi_prep_args(extended_cif *ecif, unsigned char* stack) +{ + unsigned int i; + unsigned long *addr; + ffi_type **ptr; + + union { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **i; + long long **ll; + float **f; + double **d; + } p_argv; + + /* Verify that everything is aligned up properly */ + FFI_ASSERT (((unsigned long) stack & 0x7) == 0); + + p_argv.v = ecif->avalue; + addr = (unsigned long*)stack; + + /* structures with a size greater than 16 bytes are passed in memory */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 16) + { + *addr++ = (unsigned long)ecif->rvalue; + } + + for (i = ecif->cif->nargs, ptr = ecif->cif->arg_types; + i > 0; + i--, ptr++, p_argv.v++) + { + switch ((*ptr)->type) + { + case FFI_TYPE_SINT8: + *addr++ = **p_argv.sc; + break; + case FFI_TYPE_UINT8: + *addr++ = **p_argv.uc; + break; + case FFI_TYPE_SINT16: + *addr++ = **p_argv.ss; + break; + case FFI_TYPE_UINT16: + *addr++ = **p_argv.us; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *addr++ = **p_argv.i; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (((unsigned long)addr & 4) != 0) + addr++; + *(unsigned long long*)addr = **p_argv.ll; + addr += sizeof(unsigned long long) / sizeof (addr); + break; + + case FFI_TYPE_STRUCT: + { + unsigned long offs; + unsigned long size; + + if (((unsigned long)addr & 4) != 0 && (*ptr)->alignment > 4) + addr++; + + offs = (unsigned long) addr - (unsigned long) stack; + size = (*ptr)->size; + + /* Entire structure must fit the argument registers or referenced */ + if (offs < FFI_REGISTER_NARGS * 4 + && offs + size > FFI_REGISTER_NARGS * 4) + addr = (unsigned long*) (stack + FFI_REGISTER_NARGS * 4); + + memcpy((char*) addr, *p_argv.c, size); + addr += (size + 3) / 4; + break; + } + + default: + FFI_ASSERT(0); + } + } +} + + +void ffi_call(ffi_cif* cif, void(*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + unsigned long rsize = cif->rtype->size; + int flags = cif->flags; + void *alloc = NULL; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Note that for structures that are returned in registers (size <= 16 bytes) + we allocate a temporary buffer and use memcpy to copy it to the final + destination. The reason is that the target address might be misaligned or + the length not a multiple of 4 bytes. Handling all those cases would be + very complex. */ + + if (flags == FFI_TYPE_STRUCT && (rsize <= 16 || rvalue == NULL)) + { + alloc = alloca(FFI_ALIGN(rsize, 4)); + ecif.rvalue = alloc; + } + else + { + ecif.rvalue = rvalue; + } + + if (cif->abi != FFI_SYSV) + FFI_ASSERT(0); + + ffi_call_SYSV (ecif.rvalue, rsize, cif->flags, fn, cif->bytes, &ecif); + + if (alloc != NULL && rvalue != NULL) + memcpy(rvalue, alloc, rsize); +} + +extern void ffi_trampoline(); +extern void ffi_cacheflush(void* start, void* end); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + /* copye trampoline to stack and patch 'ffi_closure_SYSV' pointer */ + memcpy(closure->tramp, ffi_trampoline, FFI_TRAMPOLINE_SIZE); + *(unsigned int*)(&closure->tramp[8]) = (unsigned int)ffi_closure_SYSV; + + // Do we have this function? + // __builtin___clear_cache(closer->tramp, closer->tramp + FFI_TRAMPOLINE_SIZE) + ffi_cacheflush(closure->tramp, closure->tramp + FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + return FFI_OK; +} + + +long FFI_HIDDEN +ffi_closure_SYSV_inner(ffi_closure *closure, void **values, void *rvalue) +{ + ffi_cif *cif; + ffi_type **arg_types; + void **avalue; + int i, areg; + + cif = closure->cif; + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + areg = 0; + + int rtype = cif->rtype->type; + if (rtype == FFI_TYPE_STRUCT && cif->rtype->size > 4 * 4) + { + rvalue = *values; + areg++; + } + + cif = closure->cif; + arg_types = cif->arg_types; + avalue = alloca(cif->nargs * sizeof(void *)); + + for (i = 0; i < cif->nargs; i++) + { + if (arg_types[i]->alignment == 8 && (areg & 1) != 0) + areg++; + + // skip the entry 16,a1 framework, add 16 bytes (4 registers) + if (areg == FFI_REGISTER_NARGS) + areg += 4; + + if (arg_types[i]->type == FFI_TYPE_STRUCT) + { + int numregs = ((arg_types[i]->size + 3) & ~3) / 4; + if (areg < FFI_REGISTER_NARGS && areg + numregs > FFI_REGISTER_NARGS) + areg = FFI_REGISTER_NARGS + 4; + } + + avalue[i] = &values[areg]; + areg += (arg_types[i]->size + 3) / 4; + } + + (closure->fun)(cif, rvalue, avalue, closure->user_data); + + return rtype; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget 2.h new file mode 100644 index 0000000..0ba728b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget 2.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Tensilica, Inc. + Target configuration macros for XTENSA. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_REGISTER_NARGS 6 + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 24 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h new file mode 100644 index 0000000..0ba728b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Tensilica, Inc. + Target configuration macros for XTENSA. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_REGISTER_NARGS 6 + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 24 + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv 2.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv 2.S new file mode 100644 index 0000000..e942179 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv 2.S @@ -0,0 +1,258 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#define ENTRY(name) .text; .globl name; .type name,@function; .align 4; name: +#define END(name) .size name , . - name + +/* Assert that the table below is in sync with ffi.h. */ + +#if FFI_TYPE_UINT8 != 5 \ + || FFI_TYPE_SINT8 != 6 \ + || FFI_TYPE_UINT16 != 7 \ + || FFI_TYPE_SINT16 != 8 \ + || FFI_TYPE_UINT32 != 9 \ + || FFI_TYPE_SINT32 != 10 \ + || FFI_TYPE_UINT64 != 11 +#error "xtensa/sysv.S out of sync with ffi.h" +#endif + + +/* ffi_call_SYSV (rvalue, rbytes, flags, (*fnaddr)(), bytes, ecif) + void *rvalue; a2 + unsigned long rbytes; a3 + unsigned flags; a4 + void (*fnaddr)(); a5 + unsigned long bytes; a6 + extended_cif* ecif) a7 +*/ + +ENTRY(ffi_call_SYSV) + + entry a1, 32 # 32 byte frame for using call8 below + + mov a10, a7 # a10(->arg0): ecif + sub a11, a1, a6 # a11(->arg1): stack pointer + mov a7, a1 # fp + movsp a1, a11 # set new sp = old_sp - bytes + + movi a8, ffi_prep_args + callx8 a8 # ffi_prep_args(ecif, stack) + + # prepare to move stack pointer back up to 6 arguments + # note that 'bytes' is already aligned + + movi a10, 6*4 + sub a11, a6, a10 + movgez a6, a10, a11 + add a6, a1, a6 + + + # we can pass up to 6 arguments in registers + # for simplicity, just load 6 arguments + # (the stack size is at least 32 bytes, so no risk to cross boundaries) + + l32i a10, a1, 0 + l32i a11, a1, 4 + l32i a12, a1, 8 + l32i a13, a1, 12 + l32i a14, a1, 16 + l32i a15, a1, 20 + + # move stack pointer + + movsp a1, a6 + + callx8 a5 # (*fn)(args...) + + # Handle return value(s) + + beqz a2, .Lexit + + movi a5, FFI_TYPE_STRUCT + bne a4, a5, .Lstore + movi a5, 16 + blt a5, a3, .Lexit + + s32i a10, a2, 0 + blti a3, 5, .Lexit + addi a3, a3, -1 + s32i a11, a2, 4 + blti a3, 8, .Lexit + s32i a12, a2, 8 + blti a3, 12, .Lexit + s32i a13, a2, 12 + +.Lexit: retw + +.Lstore: + addi a4, a4, -FFI_TYPE_UINT8 + bgei a4, 7, .Lexit # should never happen + movi a6, store_calls + add a4, a4, a4 + addx4 a6, a4, a6 # store_table + idx * 8 + jx a6 + + .align 8 +store_calls: + # UINT8 + s8i a10, a2, 0 + retw + + # SINT8 + .align 8 + s8i a10, a2, 0 + retw + + # UINT16 + .align 8 + s16i a10, a2, 0 + retw + + # SINT16 + .align 8 + s16i a10, a2, 0 + retw + + # UINT32 + .align 8 + s32i a10, a2, 0 + retw + + # SINT32 + .align 8 + s32i a10, a2, 0 + retw + + # UINT64 + .align 8 + s32i a10, a2, 0 + s32i a11, a2, 4 + retw + +END(ffi_call_SYSV) + + +/* + * void ffi_cacheflush (unsigned long start, unsigned long end) + */ + +#define EXTRA_ARGS_SIZE 24 + +ENTRY(ffi_cacheflush) + + entry a1, 16 + +1: +#if XCHAL_DCACHE_SIZE + dhwbi a2, 0 +#endif +#if XCHAL_ICACHE_SIZE + ihi a2, 0 +#endif + addi a2, a2, 4 + blt a2, a3, 1b + + retw + +END(ffi_cacheflush) + +/* ffi_trampoline is copied to the stack */ + +ENTRY(ffi_trampoline) + + entry a1, 16 + (FFI_REGISTER_NARGS * 4) + (4 * 4) # [ 0] + j 2f # [ 3] + .align 4 # [ 6] +1: .long 0 # [ 8] +2: l32r a15, 1b # [12] + _mov a14, a0 # [15] + callx0 a15 # [18] + # [21] +END(ffi_trampoline) + +/* + * ffi_closure() + * + * a0: closure + 21 + * a14: return address (a0) + */ + +ENTRY(ffi_closure_SYSV) + + /* intentionally omitting entry here */ + + # restore return address (a0) and move pointer to closure to a10 + addi a10, a0, -21 + mov a0, a14 + + # allow up to 4 arguments as return values + addi a11, a1, 4 * 4 + + # save up to 6 arguments to stack (allocated by entry below) + s32i a2, a11, 0 + s32i a3, a11, 4 + s32i a4, a11, 8 + s32i a5, a11, 12 + s32i a6, a11, 16 + s32i a7, a11, 20 + + movi a8, ffi_closure_SYSV_inner + mov a12, a1 + callx8 a8 # .._inner(*closure, **avalue, *rvalue) + + # load up to four return arguments + l32i a2, a1, 0 + l32i a3, a1, 4 + l32i a4, a1, 8 + l32i a5, a1, 12 + + # (sign-)extend return value + movi a11, FFI_TYPE_UINT8 + bne a10, a11, 1f + extui a2, a2, 0, 8 + retw + +1: movi a11, FFI_TYPE_SINT8 + bne a10, a11, 1f + sext a2, a2, 7 + retw + +1: movi a11, FFI_TYPE_UINT16 + bne a10, a11, 1f + extui a2, a2, 0, 16 + retw + +1: movi a11, FFI_TYPE_SINT16 + bne a10, a11, 1f + sext a2, a2, 15 + +1: retw + +END(ffi_closure_SYSV) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S new file mode 100644 index 0000000..e942179 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S @@ -0,0 +1,258 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#define ENTRY(name) .text; .globl name; .type name,@function; .align 4; name: +#define END(name) .size name , . - name + +/* Assert that the table below is in sync with ffi.h. */ + +#if FFI_TYPE_UINT8 != 5 \ + || FFI_TYPE_SINT8 != 6 \ + || FFI_TYPE_UINT16 != 7 \ + || FFI_TYPE_SINT16 != 8 \ + || FFI_TYPE_UINT32 != 9 \ + || FFI_TYPE_SINT32 != 10 \ + || FFI_TYPE_UINT64 != 11 +#error "xtensa/sysv.S out of sync with ffi.h" +#endif + + +/* ffi_call_SYSV (rvalue, rbytes, flags, (*fnaddr)(), bytes, ecif) + void *rvalue; a2 + unsigned long rbytes; a3 + unsigned flags; a4 + void (*fnaddr)(); a5 + unsigned long bytes; a6 + extended_cif* ecif) a7 +*/ + +ENTRY(ffi_call_SYSV) + + entry a1, 32 # 32 byte frame for using call8 below + + mov a10, a7 # a10(->arg0): ecif + sub a11, a1, a6 # a11(->arg1): stack pointer + mov a7, a1 # fp + movsp a1, a11 # set new sp = old_sp - bytes + + movi a8, ffi_prep_args + callx8 a8 # ffi_prep_args(ecif, stack) + + # prepare to move stack pointer back up to 6 arguments + # note that 'bytes' is already aligned + + movi a10, 6*4 + sub a11, a6, a10 + movgez a6, a10, a11 + add a6, a1, a6 + + + # we can pass up to 6 arguments in registers + # for simplicity, just load 6 arguments + # (the stack size is at least 32 bytes, so no risk to cross boundaries) + + l32i a10, a1, 0 + l32i a11, a1, 4 + l32i a12, a1, 8 + l32i a13, a1, 12 + l32i a14, a1, 16 + l32i a15, a1, 20 + + # move stack pointer + + movsp a1, a6 + + callx8 a5 # (*fn)(args...) + + # Handle return value(s) + + beqz a2, .Lexit + + movi a5, FFI_TYPE_STRUCT + bne a4, a5, .Lstore + movi a5, 16 + blt a5, a3, .Lexit + + s32i a10, a2, 0 + blti a3, 5, .Lexit + addi a3, a3, -1 + s32i a11, a2, 4 + blti a3, 8, .Lexit + s32i a12, a2, 8 + blti a3, 12, .Lexit + s32i a13, a2, 12 + +.Lexit: retw + +.Lstore: + addi a4, a4, -FFI_TYPE_UINT8 + bgei a4, 7, .Lexit # should never happen + movi a6, store_calls + add a4, a4, a4 + addx4 a6, a4, a6 # store_table + idx * 8 + jx a6 + + .align 8 +store_calls: + # UINT8 + s8i a10, a2, 0 + retw + + # SINT8 + .align 8 + s8i a10, a2, 0 + retw + + # UINT16 + .align 8 + s16i a10, a2, 0 + retw + + # SINT16 + .align 8 + s16i a10, a2, 0 + retw + + # UINT32 + .align 8 + s32i a10, a2, 0 + retw + + # SINT32 + .align 8 + s32i a10, a2, 0 + retw + + # UINT64 + .align 8 + s32i a10, a2, 0 + s32i a11, a2, 4 + retw + +END(ffi_call_SYSV) + + +/* + * void ffi_cacheflush (unsigned long start, unsigned long end) + */ + +#define EXTRA_ARGS_SIZE 24 + +ENTRY(ffi_cacheflush) + + entry a1, 16 + +1: +#if XCHAL_DCACHE_SIZE + dhwbi a2, 0 +#endif +#if XCHAL_ICACHE_SIZE + ihi a2, 0 +#endif + addi a2, a2, 4 + blt a2, a3, 1b + + retw + +END(ffi_cacheflush) + +/* ffi_trampoline is copied to the stack */ + +ENTRY(ffi_trampoline) + + entry a1, 16 + (FFI_REGISTER_NARGS * 4) + (4 * 4) # [ 0] + j 2f # [ 3] + .align 4 # [ 6] +1: .long 0 # [ 8] +2: l32r a15, 1b # [12] + _mov a14, a0 # [15] + callx0 a15 # [18] + # [21] +END(ffi_trampoline) + +/* + * ffi_closure() + * + * a0: closure + 21 + * a14: return address (a0) + */ + +ENTRY(ffi_closure_SYSV) + + /* intentionally omitting entry here */ + + # restore return address (a0) and move pointer to closure to a10 + addi a10, a0, -21 + mov a0, a14 + + # allow up to 4 arguments as return values + addi a11, a1, 4 * 4 + + # save up to 6 arguments to stack (allocated by entry below) + s32i a2, a11, 0 + s32i a3, a11, 4 + s32i a4, a11, 8 + s32i a5, a11, 12 + s32i a6, a11, 16 + s32i a7, a11, 20 + + movi a8, ffi_closure_SYSV_inner + mov a12, a1 + callx8 a8 # .._inner(*closure, **avalue, *rvalue) + + # load up to four return arguments + l32i a2, a1, 0 + l32i a3, a1, 4 + l32i a4, a1, 8 + l32i a5, a1, 12 + + # (sign-)extend return value + movi a11, FFI_TYPE_UINT8 + bne a10, a11, 1f + extui a2, a2, 0, 8 + retw + +1: movi a11, FFI_TYPE_SINT8 + bne a10, a11, 1f + sext a2, a2, 7 + retw + +1: movi a11, FFI_TYPE_UINT16 + bne a10, a11, 1f + extui a2, a2, 0, 16 + retw + +1: movi a11, FFI_TYPE_SINT16 + bne a10, a11, 1f + sext a2, a2, 15 + +1: retw + +END(ffi_closure_SYSV) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h 2.in new file mode 100644 index 0000000..9788f70 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h 2.in @@ -0,0 +1 @@ +timestamp diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in new file mode 100644 index 0000000..9788f70 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in @@ -0,0 +1 @@ +timestamp diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.am new file mode 100644 index 0000000..bcfea57 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.am @@ -0,0 +1,122 @@ +## Process this file with automake to produce Makefile.in. + +AUTOMAKE_OPTIONS = foreign dejagnu + +EXTRA_DEJAGNU_SITE_CONFIG=../local.exp + +CLEANFILES = *.exe core* *.log *.sum + +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.in b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.in new file mode 100644 index 0000000..4060fa1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile 2.in @@ -0,0 +1,645 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = testsuite +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DEJATOOL = $(PACKAGE) +RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir +EXPECT = expect +RUNTEST = runtest +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +ALLOCA = @ALLOCA@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AM_LTLDFLAGS = @AM_LTLDFLAGS@ +AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@ +HAVE_LONG_DOUBLE_VARIANT = @HAVE_LONG_DOUBLE_VARIANT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPT_LDFLAGS = @OPT_LDFLAGS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PRTDIAG = @PRTDIAG@ +RANLIB = @RANLIB@ +SECTION_LDFLAGS = @SECTION_LDFLAGS@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +TARGET = @TARGET@ +TARGETDIR = @TARGETDIR@ +TARGET_OBJ = @TARGET_OBJ@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +ax_enable_builddir_sed = @ax_enable_builddir_sed@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sys_symbol_underscore = @sys_symbol_underscore@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +toolexecdir = @toolexecdir@ +toolexeclibdir = @toolexeclibdir@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign dejagnu +EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp +CLEANFILES = *.exe core* *.log *.sum +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign testsuite/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +check-DEJAGNU: site.exp + srcdir='$(srcdir)'; export srcdir; \ + EXPECT=$(EXPECT); export EXPECT; \ + if $(SHELL) -c "$(RUNTEST) --version" > /dev/null 2>&1; then \ + exit_status=0; l='$(DEJATOOL)'; for tool in $$l; do \ + if $(RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \ + then :; else exit_status=1; fi; \ + done; \ + else echo "WARNING: could not find '$(RUNTEST)'" 1>&2; :;\ + fi; \ + exit $$exit_status +site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG) + @echo 'Making a new site.exp file ...' + @echo '## these variables are automatically generated by make ##' >site.tmp + @echo '# Do not edit here. If you wish to override these values' >>site.tmp + @echo '# edit the last section' >>site.tmp + @echo 'set srcdir "$(srcdir)"' >>site.tmp + @echo "set objdir `pwd`" >>site.tmp + @echo 'set build_alias "$(build_alias)"' >>site.tmp + @echo 'set build_triplet $(build_triplet)' >>site.tmp + @echo 'set host_alias "$(host_alias)"' >>site.tmp + @echo 'set host_triplet $(host_triplet)' >>site.tmp + @echo 'set target_alias "$(target_alias)"' >>site.tmp + @echo 'set target_triplet $(target_triplet)' >>site.tmp + @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \ + echo "## Begin content included from file $$f. Do not modify. ##" \ + && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \ + && echo "## End content included from file $$f. ##" \ + || exit 1; \ + done >> site.tmp + @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp + @if test -f site.exp; then \ + sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \ + fi + @-rm -f site.bak + @test ! -f site.exp || mv site.exp site.bak + @mv site.tmp site.exp + +distclean-DEJAGNU: + -rm -f site.exp site.bak + -l='$(DEJATOOL)'; for tool in $$l; do \ + rm -f $$tool.sum $$tool.log; \ + done + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-DEJAGNU distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am distclean \ + distclean-DEJAGNU distclean-generic distclean-libtool distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am new file mode 100644 index 0000000..bcfea57 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am @@ -0,0 +1,122 @@ +## Process this file with automake to produce Makefile.in. + +AUTOMAKE_OPTIONS = foreign dejagnu + +EXTRA_DEJAGNU_SITE_CONFIG=../local.exp + +CLEANFILES = *.exe core* *.log *.sum + +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default 2.exp new file mode 100644 index 0000000..90967cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default 2.exp @@ -0,0 +1 @@ +load_lib "standard.exp" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp new file mode 100644 index 0000000..90967cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp @@ -0,0 +1 @@ +load_lib "standard.exp" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi 2.exp new file mode 100644 index 0000000..c02adf9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi 2.exp @@ -0,0 +1,640 @@ +# Copyright (C) 2003, 2005, 2008, 2009, 2010, 2011, 2014, 2019 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +proc load_gcc_lib { filename } { + global srcdir + load_file $srcdir/lib/$filename +} + +load_lib dg.exp +load_lib libgloss.exp +load_gcc_lib target-libpath.exp +load_gcc_lib wrapper.exp + +# Return 1 if the target matches the effective target 'arg', 0 otherwise. +# This can be used with any check_* proc that takes no argument and +# returns only 1 or 0. It could be used with check_* procs that take +# arguments with keywords that pass particular arguments. + +proc is-effective-target { arg } { + global et_index + set selected 0 + if { ![info exists et_index] } { + # Initialize the effective target index that is used in some + # check_effective_target_* procs. + set et_index 0 + } + if { [info procs check_effective_target_${arg}] != [list] } { + set selected [check_effective_target_${arg}] + } else { + error "unknown effective target keyword `$arg'" + } + verbose "is-effective-target: $arg $selected" 2 + return $selected +} + +proc is-effective-target-keyword { arg } { + if { [info procs check_effective_target_${arg}] != [list] } { + return 1 + } else { + return 0 + } +} + +# Intercept the call to the DejaGnu version of dg-process-target to +# support use of an effective-target keyword in place of a list of +# target triplets to xfail or skip a test. +# +# The argument to dg-process-target is the keyword "target" or "xfail" +# followed by a selector: +# target-triplet-1 ... +# effective-target-keyword +# selector-expression +# +# For a target list the result is "S" if the target is selected, "N" otherwise. +# For an xfail list the result is "F" if the target is affected, "P" otherwise. + +# In contexts that allow either "target" or "xfail" the argument can be +# target selector1 xfail selector2 +# which returns "N" if selector1 is not selected, otherwise the result of +# "xfail selector2". +# +# A selector expression appears within curly braces and uses a single logical +# operator: !, &&, or ||. An operand is another selector expression, an +# effective-target keyword, or a list of target triplets within quotes or +# curly braces. + +if { [info procs saved-dg-process-target] == [list] } { + rename dg-process-target saved-dg-process-target + + # Evaluate an operand within a selector expression. + proc selector_opd { op } { + set selector "target" + lappend selector $op + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_opd: `$op' $answer" 2 + return $answer + } + + # Evaluate a target triplet list within a selector expression. + # Unlike other operands, this needs to be expanded from a list to + # the same string as "target". + proc selector_list { op } { + set selector "target [join $op]" + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_list: `$op' $answer" 2 + return $answer + } + + # Evaluate a selector expression. + proc selector_expression { exp } { + if { [llength $exp] == 2 } { + if [string match "!" [lindex $exp 0]] { + set op1 [lindex $exp 1] + set answer [expr { ! [selector_opd $op1] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } elseif { [llength $exp] == 3 } { + set op1 [lindex $exp 0] + set opr [lindex $exp 1] + set op2 [lindex $exp 2] + if [string match "&&" $opr] { + set answer [expr { [selector_opd $op1] && [selector_opd $op2] }] + } elseif [string match "||" $opr] { + set answer [expr { [selector_opd $op1] || [selector_opd $op2] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + + verbose "selector_expression: `$exp' $answer" 2 + return $answer + } + + # Evaluate "target selector" or "xfail selector". + + proc dg-process-target-1 { args } { + verbose "dg-process-target-1: `$args'" 2 + + # Extract the 'what' keyword from the argument list. + set selector [string trim [lindex $args 0]] + if [regexp "^xfail " $selector] { + set what "xfail" + } elseif [regexp "^target " $selector] { + set what "target" + } else { + error "syntax error in target selector \"$selector\"" + } + + # Extract the rest of the list, which might be a keyword. + regsub "^${what}" $selector "" rest + set rest [string trim $rest] + + if [is-effective-target-keyword $rest] { + # The selector is an effective target keyword. + if [is-effective-target $rest] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + if [string match "{*}" $rest] { + if [selector_expression [lindex $rest 0]] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + # The selector is not an effective-target keyword, so process + # the list of target triplets. + return [saved-dg-process-target $selector] + } + + # Intercept calls to the DejaGnu function. In addition to + # processing "target selector" or "xfail selector", handle + # "target selector1 xfail selector2". + + proc dg-process-target { args } { + verbose "replacement dg-process-target: `$args'" 2 + + set selector [string trim [lindex $args 0]] + + # If the argument list contains both 'target' and 'xfail', + # process 'target' and, if that succeeds, process 'xfail'. + if [regexp "^target .* xfail .*" $selector] { + set xfail_index [string first "xfail" $selector] + set xfail_selector [string range $selector $xfail_index end] + set target_selector [string range $selector 0 [expr $xfail_index-1]] + set target_selector [string trim $target_selector] + if { [dg-process-target-1 $target_selector] == "N" } { + return "N" + } + return [dg-process-target-1 $xfail_selector] + + } + return [dg-process-target-1 $selector] + } +} + +# Define libffi callbacks for dg.exp. + +proc libffi-dg-test-1 { target_compile prog do_what extra_tool_flags } { + + # To get all \n in dg-output test strings to match printf output + # in a system that outputs it as \015\012 (i.e. not just \012), we + # need to change all \n into \r?\n. As there is no dejagnu flag + # or hook to do that, we simply change the text being tested. + # Unfortunately, we have to know that the variable is called + # dg-output-text and lives in the caller of libffi-dg-test, which + # is two calls up. Overriding proc dg-output would be longer and + # would necessarily have the same assumption. + upvar 2 dg-output-text output_match + + if { [llength $output_match] > 1 } { + regsub -all "\n" [lindex $output_match 1] "\r?\n" x + set output_match [lreplace $output_match 1 1 $x] + } + + # Set up the compiler flags, based on what we're going to do. + + set options [list] + switch $do_what { + "compile" { + set compile_type "assembly" + set output_file "[file rootname [file tail $prog]].s" + } + "link" { + set compile_type "executable" + set output_file "[file rootname [file tail $prog]].exe" + # The following line is needed for targets like the i960 where + # the default output file is b.out. Sigh. + } + "run" { + set compile_type "executable" + # FIXME: "./" is to cope with "." not being in $PATH. + # Should this be handled elsewhere? + # YES. + set output_file "./[file rootname [file tail $prog]].exe" + # This is the only place where we care if an executable was + # created or not. If it was, dg.exp will try to run it. + remote_file build delete $output_file; + } + default { + perror "$do_what: not a valid dg-do keyword" + return "" + } + } + + if { $extra_tool_flags != "" } { + lappend options "additional_flags=$extra_tool_flags" + } + + set comp_output [libffi_target_compile "$prog" "$output_file" "$compile_type" $options]; + + + return [list $comp_output $output_file] +} + + +proc libffi-dg-test { prog do_what extra_tool_flags } { + return [libffi-dg-test-1 target_compile $prog $do_what $extra_tool_flags] +} + +proc libffi-dg-prune { target_triplet text } { + # We get this with some qemu emulated systems (eg. ppc64le-linux-gnu) + regsub -all "(^|\n)\[^\n\]*unable to perform all requested operations" $text "" text + return $text +} + +proc libffi-init { args } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global objdir + global TOOL_OPTIONS + global tool + global libffi_include + global libffi_link_flags + global tool_root_dir + global ld_library_path + global compiler_vendor + + if ![info exists blddirffi] { + set blddirffi [pwd]/.. + } + + verbose "libffi $blddirffi" + + # Which compiler are we building with? + set tmp [grep "$blddirffi/config.log" "^ax_cv_c_compiler_vendor.*$"] + regexp -- {^[^=]*=(.*)$} $tmp nil compiler_vendor + + if { [string match $compiler_vendor "gnu"] } { + set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a] + if {$gccdir != ""} { + set gccdir [file dirname $gccdir] + } + verbose "gccdir $gccdir" + + set ld_library_path "." + append ld_library_path ":${gccdir}" + + set compiler "${gccdir}/xgcc" + if { [is_remote host] == 0 && [which $compiler] != 0 } { + foreach i "[exec $compiler --print-multi-lib]" { + set mldir "" + regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir + set mldir [string trimright $mldir "\;@"] + if { "$mldir" == "." } { + continue + } + if { [llength [glob -nocomplain ${gccdir}/${mldir}/libgcc_s*.so.*]] >= 1 } { + append ld_library_path ":${gccdir}/${mldir}" + } + } + } + } + + # add the library path for libffi. + append ld_library_path ":${blddirffi}/.libs" + + verbose "ld_library_path: $ld_library_path" + + # Point to the Libffi headers in libffi. + set libffi_include "${blddirffi}/include" + verbose "libffi_include $libffi_include" + + set libffi_dir "${blddirffi}/.libs" + verbose "libffi_dir $libffi_dir" + if { $libffi_dir != "" } { + set libffi_dir [file dirname ${libffi_dir}] + set libffi_link_flags "-L${libffi_dir}/.libs" + } + + set_ld_library_path_env_vars + libffi_maybe_build_wrapper "${objdir}/testglue.o" +} + +proc libffi_exit { } { + global gluefile; + + if [info exists gluefile] { + file_on_build delete $gluefile; + unset gluefile; + } +} + +proc libffi_target_compile { source dest type options } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global TOOL_OPTIONS + global libffi_link_flags + global libffi_include + global target_triplet + global compiler_vendor + + if { [target_info needs_status_wrapper]!="" && [info exists gluefile] } { + lappend options "libs=${gluefile}" + lappend options "ldflags=$wrap_flags" + } + + # TOOL_OPTIONS must come first, so that it doesn't override testcase + # specific options. + if [info exists TOOL_OPTIONS] { + lappend options "additional_flags=$TOOL_OPTIONS" + } + + # search for ffi_mips.h in srcdir, too + lappend options "additional_flags=-I${libffi_include} -I${srcdir}/../include -I${libffi_include}/.." + lappend options "additional_flags=${libffi_link_flags}" + + # Darwin needs a stack execution allowed flag. + + if { [istarget "*-*-darwin9*"] || [istarget "*-*-darwin1*"] + || [istarget "*-*-darwin2*"] } { + lappend options "additional_flags=-Wl,-allow_stack_execute" + } + + # If you're building the compiler with --prefix set to a place + # where it's not yet installed, then the linker won't be able to + # find the libgcc used by libffi.dylib. We could pass the + # -dylib_file option, but that's complicated, and it's much easier + # to just make the linker find libgcc using -L options. + if { [string match "*-*-darwin*" $target_triplet] } { + lappend options "libs= -shared-libgcc" + } + + if { [string match "*-*-openbsd*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + lappend options "libs= -lffi" + + if { [string match "aarch64*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + # this may be required for g++, but just confused clang. + if { [string match "*.cc" $source] } { + lappend options "c++" + } + + if { [string match "arc*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + verbose "options: $options" + return [target_compile $source $dest $type $options] +} + +# TEST should be a preprocessor condition. Returns true if it holds. +proc libffi_feature_test { test } { + set src "ffitest[pid].c" + + set f [open $src "w"] + puts $f "#include " + puts $f $test + puts $f "/* OK */" + puts $f "#else" + puts $f "# error Failed $test" + puts $f "#endif" + close $f + + set lines [libffi_target_compile $src /dev/null assembly ""] + file delete $src + + return [string match "" $lines] +} + +# Utility routines. + +# +# search_for -- looks for a string match in a file +# +proc search_for { file pattern } { + set fd [open $file r] + while { [gets $fd cur_line]>=0 } { + if [string match "*$pattern*" $cur_line] then { + close $fd + return 1 + } + } + close $fd + return 0 +} + +# Modified dg-runtest that can cycle through a list of optimization options +# as c-torture does. +proc libffi-dg-runtest { testcases default-extra-flags } { + global runtests + + foreach test $testcases { + # If we're only testing specific files and this isn't one of + # them, skip it. + if ![runtest_file_p $runtests $test] { + continue + } + + # Look for a loop within the source code - if we don't find one, + # don't pass -funroll[-all]-loops. + global torture_with_loops torture_without_loops + if [expr [search_for $test "for*("]+[search_for $test "while*("]] { + set option_list $torture_with_loops + } else { + set option_list $torture_without_loops + } + + set nshort [file tail [file dirname $test]]/[file tail $test] + + foreach flags $option_list { + verbose "Testing $nshort, $flags" 1 + dg-test $test $flags ${default-extra-flags} + } + } +} + +proc run-many-tests { testcases extra_flags } { + global compiler_vendor + global env + switch $compiler_vendor { + "clang" { + set common "-W -Wall" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + "gnu" { + set common "-W -Wall -Wno-psabi" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + default { + # Assume we are using the vendor compiler. + set common "" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "" } + } + } + } + + info exists env(LD_LIBRARY_PATH) + + set targetabis { "" } + if [string match $compiler_vendor "gnu"] { + if [libffi_feature_test "#ifdef __i386__"] { + set targetabis { + "" + "-DABI_NUM=FFI_STDCALL -DABI_ATTR=__STDCALL__" + "-DABI_NUM=FFI_THISCALL -DABI_ATTR=__THISCALL__" + "-DABI_NUM=FFI_FASTCALL -DABI_ATTR=__FASTCALL__" + } + } elseif { [istarget "x86_64-*-*"] \ + && [libffi_feature_test "#if !defined __ILP32__ \ + && !defined __i386__"] } { + set targetabis { + "" + "-DABI_NUM=FFI_GNUW64 -DABI_ATTR=__MSABI__" + } + } + } + + set common [ concat $common $extra_flags ] + foreach test $testcases { + set testname [file tail $test] + if [search_for $test "ABI_NUM"] { + set abis $targetabis + } else { + set abis { "" } + } + foreach opt $optimizations { + foreach abi $abis { + set options [concat $common $opt $abi] + verbose "Testing $testname, $options" 1 + dg-test $test $options "" + } + } + } +} + +# Like check_conditional_xfail, but callable from a dg test. + +proc dg-xfail-if { args } { + set args [lreplace $args 0 0] + set selector "target [join [lindex $args 1]]" + if { [dg-process-target $selector] == "S" } { + global compiler_conditional_xfail_data + set compiler_conditional_xfail_data $args + } +} + +proc check-flags { args } { + + # The args are within another list; pull them out. + set args [lindex $args 0] + + # The next two arguments are optional. If they were not specified, + # use the defaults. + if { [llength $args] == 2 } { + lappend $args [list "*"] + } + if { [llength $args] == 3 } { + lappend $args [list ""] + } + + # If the option strings are the defaults, or the same as the + # defaults, there is no need to call check_conditional_xfail to + # compare them to the actual options. + if { [string compare [lindex $args 2] "*"] == 0 + && [string compare [lindex $args 3] "" ] == 0 } { + set result 1 + } else { + # The target list might be an effective-target keyword, so replace + # the original list with "*-*-*", since we already know it matches. + set result [check_conditional_xfail [lreplace $args 1 1 "*-*-*"]] + } + + return $result +} + +proc dg-skip-if { args } { + # Verify the number of arguments. The last two are optional. + set args [lreplace $args 0 0] + if { [llength $args] < 2 || [llength $args] > 4 } { + error "dg-skip-if 2: need 2, 3, or 4 arguments" + } + + # Don't bother if we're already skipping the test. + upvar dg-do-what dg-do-what + if { [lindex ${dg-do-what} 1] == "N" } { + return + } + + set selector [list target [lindex $args 1]] + if { [dg-process-target $selector] == "S" } { + if [check-flags $args] { + upvar dg-do-what dg-do-what + set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"] + } + } +} + +# We need to make sure that additional_files and additional_sources +# are both cleared out after every test. It is not enough to clear +# them out *before* the next test run because gcc-target-compile gets +# run directly from some .exp files (outside of any test). (Those +# uses should eventually be eliminated.) + +# Because the DG framework doesn't provide a hook that is run at the +# end of a test, we must replace dg-test with a wrapper. + +if { [info procs saved-dg-test] == [list] } { + rename dg-test saved-dg-test + + proc dg-test { args } { + global additional_files + global additional_sources + global errorInfo + + if { [ catch { eval saved-dg-test $args } errmsg ] } { + set saved_info $errorInfo + set additional_files "" + set additional_sources "" + error $errmsg $saved_info + } + set additional_files "" + set additional_sources "" + } +} + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp new file mode 100644 index 0000000..c02adf9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp @@ -0,0 +1,640 @@ +# Copyright (C) 2003, 2005, 2008, 2009, 2010, 2011, 2014, 2019 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +proc load_gcc_lib { filename } { + global srcdir + load_file $srcdir/lib/$filename +} + +load_lib dg.exp +load_lib libgloss.exp +load_gcc_lib target-libpath.exp +load_gcc_lib wrapper.exp + +# Return 1 if the target matches the effective target 'arg', 0 otherwise. +# This can be used with any check_* proc that takes no argument and +# returns only 1 or 0. It could be used with check_* procs that take +# arguments with keywords that pass particular arguments. + +proc is-effective-target { arg } { + global et_index + set selected 0 + if { ![info exists et_index] } { + # Initialize the effective target index that is used in some + # check_effective_target_* procs. + set et_index 0 + } + if { [info procs check_effective_target_${arg}] != [list] } { + set selected [check_effective_target_${arg}] + } else { + error "unknown effective target keyword `$arg'" + } + verbose "is-effective-target: $arg $selected" 2 + return $selected +} + +proc is-effective-target-keyword { arg } { + if { [info procs check_effective_target_${arg}] != [list] } { + return 1 + } else { + return 0 + } +} + +# Intercept the call to the DejaGnu version of dg-process-target to +# support use of an effective-target keyword in place of a list of +# target triplets to xfail or skip a test. +# +# The argument to dg-process-target is the keyword "target" or "xfail" +# followed by a selector: +# target-triplet-1 ... +# effective-target-keyword +# selector-expression +# +# For a target list the result is "S" if the target is selected, "N" otherwise. +# For an xfail list the result is "F" if the target is affected, "P" otherwise. + +# In contexts that allow either "target" or "xfail" the argument can be +# target selector1 xfail selector2 +# which returns "N" if selector1 is not selected, otherwise the result of +# "xfail selector2". +# +# A selector expression appears within curly braces and uses a single logical +# operator: !, &&, or ||. An operand is another selector expression, an +# effective-target keyword, or a list of target triplets within quotes or +# curly braces. + +if { [info procs saved-dg-process-target] == [list] } { + rename dg-process-target saved-dg-process-target + + # Evaluate an operand within a selector expression. + proc selector_opd { op } { + set selector "target" + lappend selector $op + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_opd: `$op' $answer" 2 + return $answer + } + + # Evaluate a target triplet list within a selector expression. + # Unlike other operands, this needs to be expanded from a list to + # the same string as "target". + proc selector_list { op } { + set selector "target [join $op]" + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_list: `$op' $answer" 2 + return $answer + } + + # Evaluate a selector expression. + proc selector_expression { exp } { + if { [llength $exp] == 2 } { + if [string match "!" [lindex $exp 0]] { + set op1 [lindex $exp 1] + set answer [expr { ! [selector_opd $op1] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } elseif { [llength $exp] == 3 } { + set op1 [lindex $exp 0] + set opr [lindex $exp 1] + set op2 [lindex $exp 2] + if [string match "&&" $opr] { + set answer [expr { [selector_opd $op1] && [selector_opd $op2] }] + } elseif [string match "||" $opr] { + set answer [expr { [selector_opd $op1] || [selector_opd $op2] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + + verbose "selector_expression: `$exp' $answer" 2 + return $answer + } + + # Evaluate "target selector" or "xfail selector". + + proc dg-process-target-1 { args } { + verbose "dg-process-target-1: `$args'" 2 + + # Extract the 'what' keyword from the argument list. + set selector [string trim [lindex $args 0]] + if [regexp "^xfail " $selector] { + set what "xfail" + } elseif [regexp "^target " $selector] { + set what "target" + } else { + error "syntax error in target selector \"$selector\"" + } + + # Extract the rest of the list, which might be a keyword. + regsub "^${what}" $selector "" rest + set rest [string trim $rest] + + if [is-effective-target-keyword $rest] { + # The selector is an effective target keyword. + if [is-effective-target $rest] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + if [string match "{*}" $rest] { + if [selector_expression [lindex $rest 0]] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + # The selector is not an effective-target keyword, so process + # the list of target triplets. + return [saved-dg-process-target $selector] + } + + # Intercept calls to the DejaGnu function. In addition to + # processing "target selector" or "xfail selector", handle + # "target selector1 xfail selector2". + + proc dg-process-target { args } { + verbose "replacement dg-process-target: `$args'" 2 + + set selector [string trim [lindex $args 0]] + + # If the argument list contains both 'target' and 'xfail', + # process 'target' and, if that succeeds, process 'xfail'. + if [regexp "^target .* xfail .*" $selector] { + set xfail_index [string first "xfail" $selector] + set xfail_selector [string range $selector $xfail_index end] + set target_selector [string range $selector 0 [expr $xfail_index-1]] + set target_selector [string trim $target_selector] + if { [dg-process-target-1 $target_selector] == "N" } { + return "N" + } + return [dg-process-target-1 $xfail_selector] + + } + return [dg-process-target-1 $selector] + } +} + +# Define libffi callbacks for dg.exp. + +proc libffi-dg-test-1 { target_compile prog do_what extra_tool_flags } { + + # To get all \n in dg-output test strings to match printf output + # in a system that outputs it as \015\012 (i.e. not just \012), we + # need to change all \n into \r?\n. As there is no dejagnu flag + # or hook to do that, we simply change the text being tested. + # Unfortunately, we have to know that the variable is called + # dg-output-text and lives in the caller of libffi-dg-test, which + # is two calls up. Overriding proc dg-output would be longer and + # would necessarily have the same assumption. + upvar 2 dg-output-text output_match + + if { [llength $output_match] > 1 } { + regsub -all "\n" [lindex $output_match 1] "\r?\n" x + set output_match [lreplace $output_match 1 1 $x] + } + + # Set up the compiler flags, based on what we're going to do. + + set options [list] + switch $do_what { + "compile" { + set compile_type "assembly" + set output_file "[file rootname [file tail $prog]].s" + } + "link" { + set compile_type "executable" + set output_file "[file rootname [file tail $prog]].exe" + # The following line is needed for targets like the i960 where + # the default output file is b.out. Sigh. + } + "run" { + set compile_type "executable" + # FIXME: "./" is to cope with "." not being in $PATH. + # Should this be handled elsewhere? + # YES. + set output_file "./[file rootname [file tail $prog]].exe" + # This is the only place where we care if an executable was + # created or not. If it was, dg.exp will try to run it. + remote_file build delete $output_file; + } + default { + perror "$do_what: not a valid dg-do keyword" + return "" + } + } + + if { $extra_tool_flags != "" } { + lappend options "additional_flags=$extra_tool_flags" + } + + set comp_output [libffi_target_compile "$prog" "$output_file" "$compile_type" $options]; + + + return [list $comp_output $output_file] +} + + +proc libffi-dg-test { prog do_what extra_tool_flags } { + return [libffi-dg-test-1 target_compile $prog $do_what $extra_tool_flags] +} + +proc libffi-dg-prune { target_triplet text } { + # We get this with some qemu emulated systems (eg. ppc64le-linux-gnu) + regsub -all "(^|\n)\[^\n\]*unable to perform all requested operations" $text "" text + return $text +} + +proc libffi-init { args } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global objdir + global TOOL_OPTIONS + global tool + global libffi_include + global libffi_link_flags + global tool_root_dir + global ld_library_path + global compiler_vendor + + if ![info exists blddirffi] { + set blddirffi [pwd]/.. + } + + verbose "libffi $blddirffi" + + # Which compiler are we building with? + set tmp [grep "$blddirffi/config.log" "^ax_cv_c_compiler_vendor.*$"] + regexp -- {^[^=]*=(.*)$} $tmp nil compiler_vendor + + if { [string match $compiler_vendor "gnu"] } { + set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a] + if {$gccdir != ""} { + set gccdir [file dirname $gccdir] + } + verbose "gccdir $gccdir" + + set ld_library_path "." + append ld_library_path ":${gccdir}" + + set compiler "${gccdir}/xgcc" + if { [is_remote host] == 0 && [which $compiler] != 0 } { + foreach i "[exec $compiler --print-multi-lib]" { + set mldir "" + regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir + set mldir [string trimright $mldir "\;@"] + if { "$mldir" == "." } { + continue + } + if { [llength [glob -nocomplain ${gccdir}/${mldir}/libgcc_s*.so.*]] >= 1 } { + append ld_library_path ":${gccdir}/${mldir}" + } + } + } + } + + # add the library path for libffi. + append ld_library_path ":${blddirffi}/.libs" + + verbose "ld_library_path: $ld_library_path" + + # Point to the Libffi headers in libffi. + set libffi_include "${blddirffi}/include" + verbose "libffi_include $libffi_include" + + set libffi_dir "${blddirffi}/.libs" + verbose "libffi_dir $libffi_dir" + if { $libffi_dir != "" } { + set libffi_dir [file dirname ${libffi_dir}] + set libffi_link_flags "-L${libffi_dir}/.libs" + } + + set_ld_library_path_env_vars + libffi_maybe_build_wrapper "${objdir}/testglue.o" +} + +proc libffi_exit { } { + global gluefile; + + if [info exists gluefile] { + file_on_build delete $gluefile; + unset gluefile; + } +} + +proc libffi_target_compile { source dest type options } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global TOOL_OPTIONS + global libffi_link_flags + global libffi_include + global target_triplet + global compiler_vendor + + if { [target_info needs_status_wrapper]!="" && [info exists gluefile] } { + lappend options "libs=${gluefile}" + lappend options "ldflags=$wrap_flags" + } + + # TOOL_OPTIONS must come first, so that it doesn't override testcase + # specific options. + if [info exists TOOL_OPTIONS] { + lappend options "additional_flags=$TOOL_OPTIONS" + } + + # search for ffi_mips.h in srcdir, too + lappend options "additional_flags=-I${libffi_include} -I${srcdir}/../include -I${libffi_include}/.." + lappend options "additional_flags=${libffi_link_flags}" + + # Darwin needs a stack execution allowed flag. + + if { [istarget "*-*-darwin9*"] || [istarget "*-*-darwin1*"] + || [istarget "*-*-darwin2*"] } { + lappend options "additional_flags=-Wl,-allow_stack_execute" + } + + # If you're building the compiler with --prefix set to a place + # where it's not yet installed, then the linker won't be able to + # find the libgcc used by libffi.dylib. We could pass the + # -dylib_file option, but that's complicated, and it's much easier + # to just make the linker find libgcc using -L options. + if { [string match "*-*-darwin*" $target_triplet] } { + lappend options "libs= -shared-libgcc" + } + + if { [string match "*-*-openbsd*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + lappend options "libs= -lffi" + + if { [string match "aarch64*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + # this may be required for g++, but just confused clang. + if { [string match "*.cc" $source] } { + lappend options "c++" + } + + if { [string match "arc*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + verbose "options: $options" + return [target_compile $source $dest $type $options] +} + +# TEST should be a preprocessor condition. Returns true if it holds. +proc libffi_feature_test { test } { + set src "ffitest[pid].c" + + set f [open $src "w"] + puts $f "#include " + puts $f $test + puts $f "/* OK */" + puts $f "#else" + puts $f "# error Failed $test" + puts $f "#endif" + close $f + + set lines [libffi_target_compile $src /dev/null assembly ""] + file delete $src + + return [string match "" $lines] +} + +# Utility routines. + +# +# search_for -- looks for a string match in a file +# +proc search_for { file pattern } { + set fd [open $file r] + while { [gets $fd cur_line]>=0 } { + if [string match "*$pattern*" $cur_line] then { + close $fd + return 1 + } + } + close $fd + return 0 +} + +# Modified dg-runtest that can cycle through a list of optimization options +# as c-torture does. +proc libffi-dg-runtest { testcases default-extra-flags } { + global runtests + + foreach test $testcases { + # If we're only testing specific files and this isn't one of + # them, skip it. + if ![runtest_file_p $runtests $test] { + continue + } + + # Look for a loop within the source code - if we don't find one, + # don't pass -funroll[-all]-loops. + global torture_with_loops torture_without_loops + if [expr [search_for $test "for*("]+[search_for $test "while*("]] { + set option_list $torture_with_loops + } else { + set option_list $torture_without_loops + } + + set nshort [file tail [file dirname $test]]/[file tail $test] + + foreach flags $option_list { + verbose "Testing $nshort, $flags" 1 + dg-test $test $flags ${default-extra-flags} + } + } +} + +proc run-many-tests { testcases extra_flags } { + global compiler_vendor + global env + switch $compiler_vendor { + "clang" { + set common "-W -Wall" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + "gnu" { + set common "-W -Wall -Wno-psabi" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + default { + # Assume we are using the vendor compiler. + set common "" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "" } + } + } + } + + info exists env(LD_LIBRARY_PATH) + + set targetabis { "" } + if [string match $compiler_vendor "gnu"] { + if [libffi_feature_test "#ifdef __i386__"] { + set targetabis { + "" + "-DABI_NUM=FFI_STDCALL -DABI_ATTR=__STDCALL__" + "-DABI_NUM=FFI_THISCALL -DABI_ATTR=__THISCALL__" + "-DABI_NUM=FFI_FASTCALL -DABI_ATTR=__FASTCALL__" + } + } elseif { [istarget "x86_64-*-*"] \ + && [libffi_feature_test "#if !defined __ILP32__ \ + && !defined __i386__"] } { + set targetabis { + "" + "-DABI_NUM=FFI_GNUW64 -DABI_ATTR=__MSABI__" + } + } + } + + set common [ concat $common $extra_flags ] + foreach test $testcases { + set testname [file tail $test] + if [search_for $test "ABI_NUM"] { + set abis $targetabis + } else { + set abis { "" } + } + foreach opt $optimizations { + foreach abi $abis { + set options [concat $common $opt $abi] + verbose "Testing $testname, $options" 1 + dg-test $test $options "" + } + } + } +} + +# Like check_conditional_xfail, but callable from a dg test. + +proc dg-xfail-if { args } { + set args [lreplace $args 0 0] + set selector "target [join [lindex $args 1]]" + if { [dg-process-target $selector] == "S" } { + global compiler_conditional_xfail_data + set compiler_conditional_xfail_data $args + } +} + +proc check-flags { args } { + + # The args are within another list; pull them out. + set args [lindex $args 0] + + # The next two arguments are optional. If they were not specified, + # use the defaults. + if { [llength $args] == 2 } { + lappend $args [list "*"] + } + if { [llength $args] == 3 } { + lappend $args [list ""] + } + + # If the option strings are the defaults, or the same as the + # defaults, there is no need to call check_conditional_xfail to + # compare them to the actual options. + if { [string compare [lindex $args 2] "*"] == 0 + && [string compare [lindex $args 3] "" ] == 0 } { + set result 1 + } else { + # The target list might be an effective-target keyword, so replace + # the original list with "*-*-*", since we already know it matches. + set result [check_conditional_xfail [lreplace $args 1 1 "*-*-*"]] + } + + return $result +} + +proc dg-skip-if { args } { + # Verify the number of arguments. The last two are optional. + set args [lreplace $args 0 0] + if { [llength $args] < 2 || [llength $args] > 4 } { + error "dg-skip-if 2: need 2, 3, or 4 arguments" + } + + # Don't bother if we're already skipping the test. + upvar dg-do-what dg-do-what + if { [lindex ${dg-do-what} 1] == "N" } { + return + } + + set selector [list target [lindex $args 1]] + if { [dg-process-target $selector] == "S" } { + if [check-flags $args] { + upvar dg-do-what dg-do-what + set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"] + } + } +} + +# We need to make sure that additional_files and additional_sources +# are both cleared out after every test. It is not enough to clear +# them out *before* the next test run because gcc-target-compile gets +# run directly from some .exp files (outside of any test). (Those +# uses should eventually be eliminated.) + +# Because the DG framework doesn't provide a hook that is run at the +# end of a test, we must replace dg-test with a wrapper. + +if { [info procs saved-dg-test] == [list] } { + rename dg-test saved-dg-test + + proc dg-test { args } { + global additional_files + global additional_sources + global errorInfo + + if { [ catch { eval saved-dg-test $args } errmsg ] } { + set saved_info $errorInfo + set additional_files "" + set additional_sources "" + error $errmsg $saved_info + } + set additional_files "" + set additional_sources "" + } +} + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath 2.exp new file mode 100644 index 0000000..6b7beba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath 2.exp @@ -0,0 +1,283 @@ +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file was contributed by John David Anglin (dave.anglin@nrc-cnrc.gc.ca) + +set orig_environment_saved 0 +set orig_ld_library_path_saved 0 +set orig_ld_run_path_saved 0 +set orig_shlib_path_saved 0 +set orig_ld_libraryn32_path_saved 0 +set orig_ld_library64_path_saved 0 +set orig_ld_library_path_32_saved 0 +set orig_ld_library_path_64_saved 0 +set orig_dyld_library_path_saved 0 +set orig_path_saved 0 + +####################################### +# proc set_ld_library_path_env_vars { } +####################################### + +proc set_ld_library_path_env_vars { } { + global ld_library_path + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + global GCC_EXEC_PREFIX + + # Set the relocated compiler prefix, but only if the user hasn't specified one. + if { [info exists GCC_EXEC_PREFIX] && ![info exists env(GCC_EXEC_PREFIX)] } { + setenv GCC_EXEC_PREFIX "$GCC_EXEC_PREFIX" + } + + # Setting the ld library path causes trouble when testing cross-compilers. + if { [is_remote target] } { + return + } + + if { $orig_environment_saved == 0 } { + global env + + set orig_environment_saved 1 + + # Save the original environment. + if [info exists env(LD_LIBRARY_PATH)] { + set orig_ld_library_path "$env(LD_LIBRARY_PATH)" + set orig_ld_library_path_saved 1 + } + if [info exists env(LD_RUN_PATH)] { + set orig_ld_run_path "$env(LD_RUN_PATH)" + set orig_ld_run_path_saved 1 + } + if [info exists env(SHLIB_PATH)] { + set orig_shlib_path "$env(SHLIB_PATH)" + set orig_shlib_path_saved 1 + } + if [info exists env(LD_LIBRARYN32_PATH)] { + set orig_ld_libraryn32_path "$env(LD_LIBRARYN32_PATH)" + set orig_ld_libraryn32_path_saved 1 + } + if [info exists env(LD_LIBRARY64_PATH)] { + set orig_ld_library64_path "$env(LD_LIBRARY64_PATH)" + set orig_ld_library64_path_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_32)] { + set orig_ld_library_path_32 "$env(LD_LIBRARY_PATH_32)" + set orig_ld_library_path_32_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_64)] { + set orig_ld_library_path_64 "$env(LD_LIBRARY_PATH_64)" + set orig_ld_library_path_64_saved 1 + } + if [info exists env(DYLD_LIBRARY_PATH)] { + set orig_dyld_library_path "$env(DYLD_LIBRARY_PATH)" + set orig_dyld_library_path_saved 1 + } + if [info exists env(PATH)] { + set orig_path "$env(PATH)" + set orig_path_saved 1 + } + } + + # We need to set ld library path in the environment. Currently, + # unix.exp doesn't set the environment correctly for all systems. + # It only sets SHLIB_PATH and LD_LIBRARY_PATH when it executes a + # program. We also need the environment set for compilations, etc. + # + # On IRIX 6, we have to set variables akin to LD_LIBRARY_PATH, but + # called LD_LIBRARYN32_PATH (for the N32 ABI) and LD_LIBRARY64_PATH + # (for the 64-bit ABI). The same applies to Darwin (DYLD_LIBRARY_PATH), + # Solaris 32 bit (LD_LIBRARY_PATH_32), Solaris 64 bit (LD_LIBRARY_PATH_64), + # and HP-UX (SHLIB_PATH). In some cases, the variables are independent + # of LD_LIBRARY_PATH, and in other cases LD_LIBRARY_PATH is used if the + # variable is not defined. + # + # Doing this is somewhat of a hack as ld_library_path gets repeated in + # SHLIB_PATH and LD_LIBRARY_PATH when unix_load sets these variables. + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH "$ld_library_path" + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$ld_library_path:$orig_ld_run_path" + } else { + setenv LD_RUN_PATH "$ld_library_path" + } + # The default shared library dynamic path search for 64-bit + # HP-UX executables searches LD_LIBRARY_PATH before SHLIB_PATH. + # LD_LIBRARY_PATH isn't used for 32-bit executables. Thus, we + # set LD_LIBRARY_PATH and SHLIB_PATH as if they were independent. + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$ld_library_path:$orig_shlib_path" + } else { + setenv SHLIB_PATH "$ld_library_path" + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_libraryn32_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARYN32_PATH "$ld_library_path" + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library64_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY64_PATH "$ld_library_path" + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path_32" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_32 "$ld_library_path" + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path_64" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_64 "$ld_library_path" + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$ld_library_path:$orig_dyld_library_path" + } else { + setenv DYLD_LIBRARY_PATH "$ld_library_path" + } + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + if { $orig_path_saved } { + setenv PATH "$ld_library_path:$orig_path" + } else { + setenv PATH "$ld_library_path" + } + } + + verbose -log "set_ld_library_path_env_vars: ld_library_path=$ld_library_path" +} + +####################################### +# proc restore_ld_library_path_env_vars { } +####################################### + +proc restore_ld_library_path_env_vars { } { + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + + if { $orig_environment_saved == 0 } { + return + } + + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$orig_ld_library_path" + } elseif [info exists env(LD_LIBRARY_PATH)] { + unsetenv LD_LIBRARY_PATH + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$orig_ld_run_path" + } elseif [info exists env(LD_RUN_PATH)] { + unsetenv LD_RUN_PATH + } + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$orig_shlib_path" + } elseif [info exists env(SHLIB_PATH)] { + unsetenv SHLIB_PATH + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$orig_ld_libraryn32_path" + } elseif [info exists env(LD_LIBRARYN32_PATH)] { + unsetenv LD_LIBRARYN32_PATH + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$orig_ld_library64_path" + } elseif [info exists env(LD_LIBRARY64_PATH)] { + unsetenv LD_LIBRARY64_PATH + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$orig_ld_library_path_32" + } elseif [info exists env(LD_LIBRARY_PATH_32)] { + unsetenv LD_LIBRARY_PATH_32 + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$orig_ld_library_path_64" + } elseif [info exists env(LD_LIBRARY_PATH_64)] { + unsetenv LD_LIBRARY_PATH_64 + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$orig_dyld_library_path" + } elseif [info exists env(DYLD_LIBRARY_PATH)] { + unsetenv DYLD_LIBRARY_PATH + } + if { $orig_path_saved } { + setenv PATH "$orig_path" + } elseif [info exists env(PATH)] { + unsetenv PATH + } +} + +####################################### +# proc get_shlib_extension { } +####################################### + +proc get_shlib_extension { } { + global shlib_ext + + if { [ istarget *-*-darwin* ] } { + set shlib_ext "dylib" + } elseif { [ istarget *-*-cygwin* ] || [ istarget *-*-mingw* ] } { + set shlib_ext "dll" + } elseif { [ istarget hppa*-*-hpux* ] } { + set shlib_ext "sl" + } else { + set shlib_ext "so" + } + return $shlib_ext +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp new file mode 100644 index 0000000..6b7beba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp @@ -0,0 +1,283 @@ +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file was contributed by John David Anglin (dave.anglin@nrc-cnrc.gc.ca) + +set orig_environment_saved 0 +set orig_ld_library_path_saved 0 +set orig_ld_run_path_saved 0 +set orig_shlib_path_saved 0 +set orig_ld_libraryn32_path_saved 0 +set orig_ld_library64_path_saved 0 +set orig_ld_library_path_32_saved 0 +set orig_ld_library_path_64_saved 0 +set orig_dyld_library_path_saved 0 +set orig_path_saved 0 + +####################################### +# proc set_ld_library_path_env_vars { } +####################################### + +proc set_ld_library_path_env_vars { } { + global ld_library_path + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + global GCC_EXEC_PREFIX + + # Set the relocated compiler prefix, but only if the user hasn't specified one. + if { [info exists GCC_EXEC_PREFIX] && ![info exists env(GCC_EXEC_PREFIX)] } { + setenv GCC_EXEC_PREFIX "$GCC_EXEC_PREFIX" + } + + # Setting the ld library path causes trouble when testing cross-compilers. + if { [is_remote target] } { + return + } + + if { $orig_environment_saved == 0 } { + global env + + set orig_environment_saved 1 + + # Save the original environment. + if [info exists env(LD_LIBRARY_PATH)] { + set orig_ld_library_path "$env(LD_LIBRARY_PATH)" + set orig_ld_library_path_saved 1 + } + if [info exists env(LD_RUN_PATH)] { + set orig_ld_run_path "$env(LD_RUN_PATH)" + set orig_ld_run_path_saved 1 + } + if [info exists env(SHLIB_PATH)] { + set orig_shlib_path "$env(SHLIB_PATH)" + set orig_shlib_path_saved 1 + } + if [info exists env(LD_LIBRARYN32_PATH)] { + set orig_ld_libraryn32_path "$env(LD_LIBRARYN32_PATH)" + set orig_ld_libraryn32_path_saved 1 + } + if [info exists env(LD_LIBRARY64_PATH)] { + set orig_ld_library64_path "$env(LD_LIBRARY64_PATH)" + set orig_ld_library64_path_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_32)] { + set orig_ld_library_path_32 "$env(LD_LIBRARY_PATH_32)" + set orig_ld_library_path_32_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_64)] { + set orig_ld_library_path_64 "$env(LD_LIBRARY_PATH_64)" + set orig_ld_library_path_64_saved 1 + } + if [info exists env(DYLD_LIBRARY_PATH)] { + set orig_dyld_library_path "$env(DYLD_LIBRARY_PATH)" + set orig_dyld_library_path_saved 1 + } + if [info exists env(PATH)] { + set orig_path "$env(PATH)" + set orig_path_saved 1 + } + } + + # We need to set ld library path in the environment. Currently, + # unix.exp doesn't set the environment correctly for all systems. + # It only sets SHLIB_PATH and LD_LIBRARY_PATH when it executes a + # program. We also need the environment set for compilations, etc. + # + # On IRIX 6, we have to set variables akin to LD_LIBRARY_PATH, but + # called LD_LIBRARYN32_PATH (for the N32 ABI) and LD_LIBRARY64_PATH + # (for the 64-bit ABI). The same applies to Darwin (DYLD_LIBRARY_PATH), + # Solaris 32 bit (LD_LIBRARY_PATH_32), Solaris 64 bit (LD_LIBRARY_PATH_64), + # and HP-UX (SHLIB_PATH). In some cases, the variables are independent + # of LD_LIBRARY_PATH, and in other cases LD_LIBRARY_PATH is used if the + # variable is not defined. + # + # Doing this is somewhat of a hack as ld_library_path gets repeated in + # SHLIB_PATH and LD_LIBRARY_PATH when unix_load sets these variables. + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH "$ld_library_path" + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$ld_library_path:$orig_ld_run_path" + } else { + setenv LD_RUN_PATH "$ld_library_path" + } + # The default shared library dynamic path search for 64-bit + # HP-UX executables searches LD_LIBRARY_PATH before SHLIB_PATH. + # LD_LIBRARY_PATH isn't used for 32-bit executables. Thus, we + # set LD_LIBRARY_PATH and SHLIB_PATH as if they were independent. + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$ld_library_path:$orig_shlib_path" + } else { + setenv SHLIB_PATH "$ld_library_path" + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_libraryn32_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARYN32_PATH "$ld_library_path" + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library64_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY64_PATH "$ld_library_path" + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path_32" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_32 "$ld_library_path" + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path_64" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_64 "$ld_library_path" + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$ld_library_path:$orig_dyld_library_path" + } else { + setenv DYLD_LIBRARY_PATH "$ld_library_path" + } + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + if { $orig_path_saved } { + setenv PATH "$ld_library_path:$orig_path" + } else { + setenv PATH "$ld_library_path" + } + } + + verbose -log "set_ld_library_path_env_vars: ld_library_path=$ld_library_path" +} + +####################################### +# proc restore_ld_library_path_env_vars { } +####################################### + +proc restore_ld_library_path_env_vars { } { + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + + if { $orig_environment_saved == 0 } { + return + } + + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$orig_ld_library_path" + } elseif [info exists env(LD_LIBRARY_PATH)] { + unsetenv LD_LIBRARY_PATH + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$orig_ld_run_path" + } elseif [info exists env(LD_RUN_PATH)] { + unsetenv LD_RUN_PATH + } + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$orig_shlib_path" + } elseif [info exists env(SHLIB_PATH)] { + unsetenv SHLIB_PATH + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$orig_ld_libraryn32_path" + } elseif [info exists env(LD_LIBRARYN32_PATH)] { + unsetenv LD_LIBRARYN32_PATH + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$orig_ld_library64_path" + } elseif [info exists env(LD_LIBRARY64_PATH)] { + unsetenv LD_LIBRARY64_PATH + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$orig_ld_library_path_32" + } elseif [info exists env(LD_LIBRARY_PATH_32)] { + unsetenv LD_LIBRARY_PATH_32 + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$orig_ld_library_path_64" + } elseif [info exists env(LD_LIBRARY_PATH_64)] { + unsetenv LD_LIBRARY_PATH_64 + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$orig_dyld_library_path" + } elseif [info exists env(DYLD_LIBRARY_PATH)] { + unsetenv DYLD_LIBRARY_PATH + } + if { $orig_path_saved } { + setenv PATH "$orig_path" + } elseif [info exists env(PATH)] { + unsetenv PATH + } +} + +####################################### +# proc get_shlib_extension { } +####################################### + +proc get_shlib_extension { } { + global shlib_ext + + if { [ istarget *-*-darwin* ] } { + set shlib_ext "dylib" + } elseif { [ istarget *-*-cygwin* ] || [ istarget *-*-mingw* ] } { + set shlib_ext "dll" + } elseif { [ istarget hppa*-*-hpux* ] } { + set shlib_ext "sl" + } else { + set shlib_ext "so" + } + return $shlib_ext +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper 2.exp new file mode 100644 index 0000000..4e5ae43 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper 2.exp @@ -0,0 +1,45 @@ +# Copyright (C) 2004, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file contains GCC-specifics for status wrappers for test programs. + +# ${tool}_maybe_build_wrapper -- Build wrapper object if the target +# needs it. FILENAME is the path to the wrapper file. If there are +# additional arguments, they are command-line options to provide to +# the compiler when compiling FILENAME. + +proc ${tool}_maybe_build_wrapper { filename args } { + global gluefile wrap_flags + + if { [target_info needs_status_wrapper] != "" \ + && [target_info needs_status_wrapper] != "0" \ + && ![info exists gluefile] } { + set saved_wrap_compile_flags [target_info wrap_compile_flags] + set flags [join $args " "] + # The wrapper code may contain code that gcc objects on. This + # became true for dejagnu-1.4.4. The set of warnings and code + # that gcc objects on may change, so just make sure -w is always + # passed to turn off all warnings. + set_currtarget_info wrap_compile_flags \ + "$saved_wrap_compile_flags -w $flags" + set result [build_wrapper $filename] + set_currtarget_info wrap_compile_flags "$saved_wrap_compile_flags" + if { $result != "" } { + set gluefile [lindex $result 0] + set wrap_flags [lindex $result 1] + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp new file mode 100644 index 0000000..4e5ae43 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp @@ -0,0 +1,45 @@ +# Copyright (C) 2004, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file contains GCC-specifics for status wrappers for test programs. + +# ${tool}_maybe_build_wrapper -- Build wrapper object if the target +# needs it. FILENAME is the path to the wrapper file. If there are +# additional arguments, they are command-line options to provide to +# the compiler when compiling FILENAME. + +proc ${tool}_maybe_build_wrapper { filename args } { + global gluefile wrap_flags + + if { [target_info needs_status_wrapper] != "" \ + && [target_info needs_status_wrapper] != "0" \ + && ![info exists gluefile] } { + set saved_wrap_compile_flags [target_info wrap_compile_flags] + set flags [join $args " "] + # The wrapper code may contain code that gcc objects on. This + # became true for dejagnu-1.4.4. The set of warnings and code + # that gcc objects on may change, so just make sure -w is always + # passed to turn off all warnings. + set_currtarget_info wrap_compile_flags \ + "$saved_wrap_compile_flags -w $flags" + set result [build_wrapper $filename] + set_currtarget_info wrap_compile_flags "$saved_wrap_compile_flags" + if { $result != "" } { + set gluefile [lindex $result 0] + set wrap_flags [lindex $result 1] + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile new file mode 100644 index 0000000..3322de9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile @@ -0,0 +1,28 @@ +CC = gcc +CFLAGS = -O2 -Wall +prefix = +includedir = $(prefix)/include +libdir = $(prefix)/lib +CPPFLAGS = -I$(includedir) +LDFLAGS = -L$(libdir) -Wl,-rpath,$(libdir) + +all: check-call check-callback + +test-call: test-call.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-call test-call.c -lffi + +test-callback: test-callback.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-callback test-callback.c -lffi + +check-call: test-call + ./test-call > test-call.out + LC_ALL=C uniq -u < test-call.out > failed-call + test '!' -s failed-call + +check-callback: test-callback + ./test-callback > test-callback.out + LC_ALL=C uniq -u < test-callback.out > failed-callback + test '!' -s failed-callback + +clean: + rm -f test-call test-callback test-call.out test-callback.out failed-call failed-callback diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile 2 new file mode 100644 index 0000000..3322de9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile 2 @@ -0,0 +1,28 @@ +CC = gcc +CFLAGS = -O2 -Wall +prefix = +includedir = $(prefix)/include +libdir = $(prefix)/lib +CPPFLAGS = -I$(includedir) +LDFLAGS = -L$(libdir) -Wl,-rpath,$(libdir) + +all: check-call check-callback + +test-call: test-call.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-call test-call.c -lffi + +test-callback: test-callback.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-callback test-callback.c -lffi + +check-call: test-call + ./test-call > test-call.out + LC_ALL=C uniq -u < test-call.out > failed-call + test '!' -s failed-call + +check-callback: test-callback + ./test-callback > test-callback.out + LC_ALL=C uniq -u < test-callback.out > failed-callback + test '!' -s failed-callback + +clean: + rm -f test-call test-callback test-call.out test-callback.out failed-call failed-callback diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README new file mode 100644 index 0000000..be8540b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README @@ -0,0 +1,78 @@ +This package contains a test suite for libffi. + +This test suite can be compiled with a C compiler. No need for 'expect' +or some other package that is often not installed. + +The test suite consists of 81 C functions, each with a different signature. +* test-call verifies that calling each function directly produces the same + results as calling the function indirectly through 'ffi_call'. +* test-callback verifies that calling each function directly produces the same + results as calling a function that is a callback (object build by + 'ffi_prep_closure_loc') and simulates the original function. + +Each direct or indirect invocation should produce one line of output to +stdout. A correct output consists of paired lines, such as + +void f(void): +void f(void): +int f(void):->99 +int f(void):->99 +int f(int):(1)->2 +int f(int):(1)->2 +int f(2*int):(1,2)->3 +int f(2*int):(1,2)->3 +... + +The Makefile then creates two files: +* failed-call, which consists of the non-paired lines of output of + 'test-call', +* failed-callback, which consists of the non-paired lines of output of + 'test-callback'. + +The test suite passes if both failed-call and failed-callback come out +as empty. + + +How to use the test suite +------------------------- + +1. Modify the Makefile's variables + prefix = the directory in which libffi was installed + CC = the C compiler, often with options such as "-m32" or "-m64" + that enforce a certain ABI, + CFLAGS = optimization options (need to change them only for non-GCC + compilers) +2. Run "make". If it fails already in "test-call", run also + "make check-callback". +3. If this failed, inspect the output files. + + +How to interpret the results +---------------------------- + +The failed-call and failed-callback files consist of paired lines: +The first line is the result of the direct invocation. +The second line is the result of invocation through libffi. + +For example, this output + +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->255 +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->0 + +indicates that the arguments were passed correctly, but the return +value came out wrong. + +And this output + +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,6,7,8,561,1105,1729,2465,2821,6601)->15319.1 +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,-140443648,10,268042216,-72537980,-140443648,-140443648,-140443648,-140443648,-140443648)->-6.47158e+08 + +indicates that integer arguments that come after 17 floating-point arguments +were not passed correctly. + + +Credits +------- + +The test suite is based on the one of GNU libffcall-2.0. +Authors: Bill Triggs, Bruno Haible diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README 2 b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README 2 new file mode 100644 index 0000000..be8540b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README 2 @@ -0,0 +1,78 @@ +This package contains a test suite for libffi. + +This test suite can be compiled with a C compiler. No need for 'expect' +or some other package that is often not installed. + +The test suite consists of 81 C functions, each with a different signature. +* test-call verifies that calling each function directly produces the same + results as calling the function indirectly through 'ffi_call'. +* test-callback verifies that calling each function directly produces the same + results as calling a function that is a callback (object build by + 'ffi_prep_closure_loc') and simulates the original function. + +Each direct or indirect invocation should produce one line of output to +stdout. A correct output consists of paired lines, such as + +void f(void): +void f(void): +int f(void):->99 +int f(void):->99 +int f(int):(1)->2 +int f(int):(1)->2 +int f(2*int):(1,2)->3 +int f(2*int):(1,2)->3 +... + +The Makefile then creates two files: +* failed-call, which consists of the non-paired lines of output of + 'test-call', +* failed-callback, which consists of the non-paired lines of output of + 'test-callback'. + +The test suite passes if both failed-call and failed-callback come out +as empty. + + +How to use the test suite +------------------------- + +1. Modify the Makefile's variables + prefix = the directory in which libffi was installed + CC = the C compiler, often with options such as "-m32" or "-m64" + that enforce a certain ABI, + CFLAGS = optimization options (need to change them only for non-GCC + compilers) +2. Run "make". If it fails already in "test-call", run also + "make check-callback". +3. If this failed, inspect the output files. + + +How to interpret the results +---------------------------- + +The failed-call and failed-callback files consist of paired lines: +The first line is the result of the direct invocation. +The second line is the result of invocation through libffi. + +For example, this output + +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->255 +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->0 + +indicates that the arguments were passed correctly, but the return +value came out wrong. + +And this output + +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,6,7,8,561,1105,1729,2465,2821,6601)->15319.1 +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,-140443648,10,268042216,-72537980,-140443648,-140443648,-140443648,-140443648,-140443648)->-6.47158e+08 + +indicates that integer arguments that come after 17 floating-point arguments +were not passed correctly. + + +Credits +------- + +The test suite is based on the one of GNU libffcall-2.0. +Authors: Bill Triggs, Bruno Haible diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof 2.h new file mode 100644 index 0000000..00604a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof 2.h @@ -0,0 +1,50 @@ +/* Determine alignment of types. + Copyright (C) 2003-2004, 2006, 2009-2017 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +#ifndef _ALIGNOF_H +#define _ALIGNOF_H + +#include + +/* alignof_slot (TYPE) + Determine the alignment of a structure slot (field) of a given type, + at compile time. Note that the result depends on the ABI. + This is the same as alignof (TYPE) and _Alignof (TYPE), defined in + if __alignof_is_defined is 1. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __cplusplus + template struct alignof_helper { char __slot1; type __slot2; }; +# define alignof_slot(type) offsetof (alignof_helper, __slot2) +#else +# define alignof_slot(type) offsetof (struct { char __slot1; type __slot2; }, __slot2) +#endif + +/* alignof_type (TYPE) + Determine the good alignment of an object of the given type at compile time. + Note that this is not necessarily the same as alignof_slot(type). + For example, with GNU C on x86 platforms: alignof_type(double) = 8, but + - when -malign-double is not specified: alignof_slot(double) = 4, + - when -malign-double is specified: alignof_slot(double) = 8. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __GNUC__ || defined __IBM__ALIGNOF__ +# define alignof_type __alignof__ +#else +# define alignof_type alignof_slot +#endif + +#endif /* _ALIGNOF_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h new file mode 100644 index 0000000..00604a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h @@ -0,0 +1,50 @@ +/* Determine alignment of types. + Copyright (C) 2003-2004, 2006, 2009-2017 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +#ifndef _ALIGNOF_H +#define _ALIGNOF_H + +#include + +/* alignof_slot (TYPE) + Determine the alignment of a structure slot (field) of a given type, + at compile time. Note that the result depends on the ABI. + This is the same as alignof (TYPE) and _Alignof (TYPE), defined in + if __alignof_is_defined is 1. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __cplusplus + template struct alignof_helper { char __slot1; type __slot2; }; +# define alignof_slot(type) offsetof (alignof_helper, __slot2) +#else +# define alignof_slot(type) offsetof (struct { char __slot1; type __slot2; }, __slot2) +#endif + +/* alignof_type (TYPE) + Determine the good alignment of an object of the given type at compile time. + Note that this is not necessarily the same as alignof_slot(type). + For example, with GNU C on x86 platforms: alignof_type(double) = 8, but + - when -malign-double is not specified: alignof_slot(double) = 4, + - when -malign-double is specified: alignof_slot(double) = 8. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __GNUC__ || defined __IBM__ALIGNOF__ +# define alignof_type __alignof__ +#else +# define alignof_type alignof_slot +#endif + +#endif /* _ALIGNOF_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible 2.exp new file mode 100644 index 0000000..44aebc5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible 2.exp @@ -0,0 +1,63 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2018 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir +global compiler_vendor + +# The conversion of this testsuite into a dejagnu compatible testsuite +# was done in a pretty lazy fashion, and requires the use of compiler +# flags to disable warnings for now. +if { [string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-unused-but-set-variable -Wno-uninitialized"; +} +if { [string match $compiler_vendor "microsoft"] } { + # -wd4996 suggest use of vsprintf_s instead of vsprintf + # -wd4116 unnamed type definition + # -wd4101 unreferenced local variable + # -wd4244 warning about implicit double to float conversion + set warning_options "-wd4996 -wd4116 -wd4101 -wd4244"; +} +if { ![string match $compiler_vendor "microsoft"] && ![string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-uninitialized"; +} + + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-call.c]] + +for {set i 1} {$i < 82} {incr i} { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-callback.c]] + +for {set i 1} {$i < 81} {incr i} { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] + } else { + foreach test $tlist { + unsupported [format "%s -DDGTEST=%d %s" $test $i $warning_options] + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp new file mode 100644 index 0000000..44aebc5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp @@ -0,0 +1,63 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2018 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir +global compiler_vendor + +# The conversion of this testsuite into a dejagnu compatible testsuite +# was done in a pretty lazy fashion, and requires the use of compiler +# flags to disable warnings for now. +if { [string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-unused-but-set-variable -Wno-uninitialized"; +} +if { [string match $compiler_vendor "microsoft"] } { + # -wd4996 suggest use of vsprintf_s instead of vsprintf + # -wd4116 unnamed type definition + # -wd4101 unreferenced local variable + # -wd4244 warning about implicit double to float conversion + set warning_options "-wd4996 -wd4116 -wd4101 -wd4244"; +} +if { ![string match $compiler_vendor "microsoft"] && ![string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-uninitialized"; +} + + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-call.c]] + +for {set i 1} {$i < 82} {incr i} { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-callback.c]] + +for {set i 1} {$i < 81} {incr i} { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] + } else { + foreach test $tlist { + unsupported [format "%s -DDGTEST=%d %s" $test $i $warning_options] + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call 2.c new file mode 100644 index 0000000..01a8a21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call 2.c @@ -0,0 +1,1745 @@ +/** + Copyright 1993 Bill Triggs + Copyright 1995-2017 Bruno Haible + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +**/ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() +#define FFI_CALL(cif,fn,args,retaddr) \ + ffi_call(&(cif),(void(*)(void))(fn),retaddr,args) + +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +void + void_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 1 + v_v(); + clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + { + FFI_CALL(cif,v_v,NULL,NULL); + } + } +#endif + return; +} +void + int_tests (void) +{ + int ir; + ffi_arg retvalue; +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + { + FFI_CALL(cif,i_v,NULL,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1 }; + FFI_CALL(cif,i_i,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2 }; + FFI_CALL(cif,i_i2,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4 }; + FFI_CALL(cif,i_i4,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8 }; + FFI_CALL(cif,i_i8,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8, &i9, &i10, &i11, &i12, &i13, &i14, &i15, &i16 }; + FFI_CALL(cif,i_i16,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + + return; +} +void + float_tests (void) +{ + float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1 }; + FFI_CALL(cif,f_f,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2 }; + FFI_CALL(cif,f_f2,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4 }; + FFI_CALL(cif,f_f4,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8 }; + FFI_CALL(cif,f_f8,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16 }; + FFI_CALL(cif,f_f16,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &f18, &f19, &f20, &f21, &f22, &f23, &f24 }; + FFI_CALL(cif,f_f24,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +} +void + double_tests (void) +{ + double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1 }; + FFI_CALL(cif,d_d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2 }; + FFI_CALL(cif,d_d2,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4 }; + FFI_CALL(cif,d_d4,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8 }; + FFI_CALL(cif,d_d8,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16 }; + FFI_CALL(cif,d_d16,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + pointer_tests (void) +{ + void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + { + void* puc1 = &uc1; + void* pd2 = &d2; + void* pstr3 = str3; + void* pI4 = &I4; + /*const*/ void* args[] = { &puc1, &pd2, &pstr3, &pI4 }; + FFI_CALL(cif,vp_vpdpcpsp,args,&vpr); + } + } + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + return; +} +void + mixed_number_tests (void) +{ + uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + + /* Unsigned types. + */ +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1, us2, ui3, ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + { + ffi_arg r; + /*const*/ void* args[] = { &uc1, &us2, &ui3, &ul4 }; + FFI_CALL(cif,uc_ucsil,args,&r); + ucr = (uchar) r; + } + } + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + /* Mixed int & float types. + */ + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &d3, &d4 }; + FFI_CALL(cif,d_iidd,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &d4, &i5 }; + FFI_CALL(cif,d_iiidi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &d2, &i3, &d4 }; + FFI_CALL(cif,d_idid,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &f1, &d2, &i3 }; + FFI_CALL(cif,d_fdi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + { + ffi_arg rint; + /*const*/ void* args[] = { &c1, &d2, &c3, &d4 }; + FFI_CALL(cif,us_cdcd,args,&rint); + usr = (ushort) rint; + } + } + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + /* Long long types. + */ + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &ll1, &i13 }; + FFI_CALL(cif,ll_iiilli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &f13, &ll1, &i13 }; + FFI_CALL(cif,ll_flli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &i9 }; + FFI_CALL(cif,f_fi,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &i9 }; + FFI_CALL(cif,f_f2i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &i9 }; + FFI_CALL(cif,f_f3i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &i9 }; + FFI_CALL(cif,f_f4i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &i9 }; + FFI_CALL(cif,f_f7i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &i9 }; + FFI_CALL(cif,f_f8i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f12i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &i9 }; + FFI_CALL(cif,f_f12i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &i9 }; + FFI_CALL(cif,f_f13i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &i9 }; + FFI_CALL(cif,d_di,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &i9 }; + FFI_CALL(cif,d_d2i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &i9 }; + FFI_CALL(cif,d_d3i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &i9 }; + FFI_CALL(cif,d_d4i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &i9 }; + FFI_CALL(cif,d_d7i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &i9 }; + FFI_CALL(cif,d_d8i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &i9 }; + FFI_CALL(cif,d_d12i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 43 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &i9 }; + FFI_CALL(cif,d_d13i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + small_structure_return_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + { + FFI_CALL(cif,S1_v,NULL,&r); + } + } + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + { + FFI_CALL(cif,S2_v,NULL,&r); + } + } + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + { + FFI_CALL(cif,S3_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + { + FFI_CALL(cif,S4_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + { + FFI_CALL(cif,S7_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + { + FFI_CALL(cif,S8_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + { + FFI_CALL(cif,S12_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + { + FFI_CALL(cif,S15_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 52 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + { + FFI_CALL(cif,S16_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif +} +void + structure_tests (void) +{ + Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + { + /*const*/ void* args[] = { &I1, &I2, &I3 }; + FFI_CALL(cif,I_III,args,&Ir); + } + } + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 54 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + { + /*const*/ void* args[] = { &C1, &d2, &C3 }; + FFI_CALL(cif,C_CdC,args,&Cr); + } + } + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 55 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + { + /*const*/ void* args[] = { &F1, &f2, &d3 }; + FFI_CALL(cif,F_Ffd,args,&Fr); + } + } + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &f1, &D2, &d3 }; + FFI_CALL(cif,D_fDd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 57 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &D1, &f2, &d3 }; + FFI_CALL(cif,D_Dfd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 58 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + { + /*const*/ void* args[] = { &J1, &i2, &J2 }; + FFI_CALL(cif,J_JiJ,args,&Jr); + } + } + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 59 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + { + char space = ' '; + /*const*/ void* args[] = { &T1, &space, &T2 }; + FFI_CALL(cif,T_TcT,args,&Tr); + } + } + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 60 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + { + /*const*/ void* args[] = { &B1, &c2, &d3, &B2 }; + FFI_CALL(cif,X_BcdB,args,&Xr); + } + } + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif + + return; +} + +void + gpargs_boundary_tests (void) +{ + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &K1, &l9 }; + FFI_CALL(cif,l_l0K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &K1, &l9 }; + FFI_CALL(cif,l_l1K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &K1, &l9 }; + FFI_CALL(cif,l_l2K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &K1, &l9 }; + FFI_CALL(cif,l_l3K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &K1, &l9 }; + FFI_CALL(cif,l_l4K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &K1, &l9 }; + FFI_CALL(cif,l_l5K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 67 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &K1, &l9 }; + FFI_CALL(cif,l_l6K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 68 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,f_f17l3L,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 69 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16, &d17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,d_d17l3L,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &ll1, &l9 }; + FFI_CALL(cif,ll_l2ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &ll1, &l9 }; + FFI_CALL(cif,ll_l3ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &ll1, &l9 }; + FFI_CALL(cif,ll_l4ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &ll1, &l9 }; + FFI_CALL(cif,ll_l5ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &ll1, &l9 }; + FFI_CALL(cif,ll_l6ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 75 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &ll1, &l9 }; + FFI_CALL(cif,ll_l7ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l2d(l1,l2,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &d2, &l9 }; + FFI_CALL(cif,d_l2d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l3d(l1,l2,l3,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &d2, &l9 }; + FFI_CALL(cif,d_l3d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l4d(l1,l2,l3,l4,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &d2, &l9 }; + FFI_CALL(cif,d_l4d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l5d(l1,l2,l3,l4,l5,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &d2, &l9 }; + FFI_CALL(cif,d_l5d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l6d(l1,l2,l3,l4,l5,l6,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &d2, &l9 }; + FFI_CALL(cif,d_l6d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 81 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &d2, &l9 }; + FFI_CALL(cif,d_l7d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} + +int + main (void) +{ + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + + void_tests(); + int_tests(); + float_tests(); + double_tests(); + pointer_tests(); + mixed_number_tests(); + small_structure_return_tests(); + structure_tests(); + gpargs_boundary_tests(); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c new file mode 100644 index 0000000..01a8a21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c @@ -0,0 +1,1745 @@ +/** + Copyright 1993 Bill Triggs + Copyright 1995-2017 Bruno Haible + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +**/ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() +#define FFI_CALL(cif,fn,args,retaddr) \ + ffi_call(&(cif),(void(*)(void))(fn),retaddr,args) + +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +void + void_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 1 + v_v(); + clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + { + FFI_CALL(cif,v_v,NULL,NULL); + } + } +#endif + return; +} +void + int_tests (void) +{ + int ir; + ffi_arg retvalue; +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + { + FFI_CALL(cif,i_v,NULL,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1 }; + FFI_CALL(cif,i_i,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2 }; + FFI_CALL(cif,i_i2,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4 }; + FFI_CALL(cif,i_i4,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8 }; + FFI_CALL(cif,i_i8,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8, &i9, &i10, &i11, &i12, &i13, &i14, &i15, &i16 }; + FFI_CALL(cif,i_i16,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + + return; +} +void + float_tests (void) +{ + float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1 }; + FFI_CALL(cif,f_f,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2 }; + FFI_CALL(cif,f_f2,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4 }; + FFI_CALL(cif,f_f4,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8 }; + FFI_CALL(cif,f_f8,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16 }; + FFI_CALL(cif,f_f16,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &f18, &f19, &f20, &f21, &f22, &f23, &f24 }; + FFI_CALL(cif,f_f24,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +} +void + double_tests (void) +{ + double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1 }; + FFI_CALL(cif,d_d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2 }; + FFI_CALL(cif,d_d2,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4 }; + FFI_CALL(cif,d_d4,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8 }; + FFI_CALL(cif,d_d8,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16 }; + FFI_CALL(cif,d_d16,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + pointer_tests (void) +{ + void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + { + void* puc1 = &uc1; + void* pd2 = &d2; + void* pstr3 = str3; + void* pI4 = &I4; + /*const*/ void* args[] = { &puc1, &pd2, &pstr3, &pI4 }; + FFI_CALL(cif,vp_vpdpcpsp,args,&vpr); + } + } + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + return; +} +void + mixed_number_tests (void) +{ + uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + + /* Unsigned types. + */ +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1, us2, ui3, ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + { + ffi_arg r; + /*const*/ void* args[] = { &uc1, &us2, &ui3, &ul4 }; + FFI_CALL(cif,uc_ucsil,args,&r); + ucr = (uchar) r; + } + } + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + /* Mixed int & float types. + */ + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &d3, &d4 }; + FFI_CALL(cif,d_iidd,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &d4, &i5 }; + FFI_CALL(cif,d_iiidi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &d2, &i3, &d4 }; + FFI_CALL(cif,d_idid,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &f1, &d2, &i3 }; + FFI_CALL(cif,d_fdi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + { + ffi_arg rint; + /*const*/ void* args[] = { &c1, &d2, &c3, &d4 }; + FFI_CALL(cif,us_cdcd,args,&rint); + usr = (ushort) rint; + } + } + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + /* Long long types. + */ + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &ll1, &i13 }; + FFI_CALL(cif,ll_iiilli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &f13, &ll1, &i13 }; + FFI_CALL(cif,ll_flli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &i9 }; + FFI_CALL(cif,f_fi,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &i9 }; + FFI_CALL(cif,f_f2i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &i9 }; + FFI_CALL(cif,f_f3i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &i9 }; + FFI_CALL(cif,f_f4i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &i9 }; + FFI_CALL(cif,f_f7i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &i9 }; + FFI_CALL(cif,f_f8i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f12i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &i9 }; + FFI_CALL(cif,f_f12i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &i9 }; + FFI_CALL(cif,f_f13i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &i9 }; + FFI_CALL(cif,d_di,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &i9 }; + FFI_CALL(cif,d_d2i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &i9 }; + FFI_CALL(cif,d_d3i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &i9 }; + FFI_CALL(cif,d_d4i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &i9 }; + FFI_CALL(cif,d_d7i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &i9 }; + FFI_CALL(cif,d_d8i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &i9 }; + FFI_CALL(cif,d_d12i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 43 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &i9 }; + FFI_CALL(cif,d_d13i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + small_structure_return_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + { + FFI_CALL(cif,S1_v,NULL,&r); + } + } + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + { + FFI_CALL(cif,S2_v,NULL,&r); + } + } + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + { + FFI_CALL(cif,S3_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + { + FFI_CALL(cif,S4_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + { + FFI_CALL(cif,S7_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + { + FFI_CALL(cif,S8_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + { + FFI_CALL(cif,S12_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + { + FFI_CALL(cif,S15_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 52 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + { + FFI_CALL(cif,S16_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif +} +void + structure_tests (void) +{ + Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + { + /*const*/ void* args[] = { &I1, &I2, &I3 }; + FFI_CALL(cif,I_III,args,&Ir); + } + } + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 54 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + { + /*const*/ void* args[] = { &C1, &d2, &C3 }; + FFI_CALL(cif,C_CdC,args,&Cr); + } + } + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 55 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + { + /*const*/ void* args[] = { &F1, &f2, &d3 }; + FFI_CALL(cif,F_Ffd,args,&Fr); + } + } + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &f1, &D2, &d3 }; + FFI_CALL(cif,D_fDd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 57 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &D1, &f2, &d3 }; + FFI_CALL(cif,D_Dfd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 58 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + { + /*const*/ void* args[] = { &J1, &i2, &J2 }; + FFI_CALL(cif,J_JiJ,args,&Jr); + } + } + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 59 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + { + char space = ' '; + /*const*/ void* args[] = { &T1, &space, &T2 }; + FFI_CALL(cif,T_TcT,args,&Tr); + } + } + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 60 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + { + /*const*/ void* args[] = { &B1, &c2, &d3, &B2 }; + FFI_CALL(cif,X_BcdB,args,&Xr); + } + } + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif + + return; +} + +void + gpargs_boundary_tests (void) +{ + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &K1, &l9 }; + FFI_CALL(cif,l_l0K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &K1, &l9 }; + FFI_CALL(cif,l_l1K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &K1, &l9 }; + FFI_CALL(cif,l_l2K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &K1, &l9 }; + FFI_CALL(cif,l_l3K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &K1, &l9 }; + FFI_CALL(cif,l_l4K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &K1, &l9 }; + FFI_CALL(cif,l_l5K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 67 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &K1, &l9 }; + FFI_CALL(cif,l_l6K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 68 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,f_f17l3L,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 69 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16, &d17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,d_d17l3L,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &ll1, &l9 }; + FFI_CALL(cif,ll_l2ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &ll1, &l9 }; + FFI_CALL(cif,ll_l3ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &ll1, &l9 }; + FFI_CALL(cif,ll_l4ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &ll1, &l9 }; + FFI_CALL(cif,ll_l5ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &ll1, &l9 }; + FFI_CALL(cif,ll_l6ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 75 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &ll1, &l9 }; + FFI_CALL(cif,ll_l7ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l2d(l1,l2,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &d2, &l9 }; + FFI_CALL(cif,d_l2d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l3d(l1,l2,l3,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &d2, &l9 }; + FFI_CALL(cif,d_l3d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l4d(l1,l2,l3,l4,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &d2, &l9 }; + FFI_CALL(cif,d_l4d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l5d(l1,l2,l3,l4,l5,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &d2, &l9 }; + FFI_CALL(cif,d_l5d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l6d(l1,l2,l3,l4,l5,l6,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &d2, &l9 }; + FFI_CALL(cif,d_l6d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 81 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &d2, &l9 }; + FFI_CALL(cif,d_l7d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} + +int + main (void) +{ + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + + void_tests(); + int_tests(); + float_tests(); + double_tests(); + pointer_tests(); + mixed_number_tests(); + small_structure_return_tests(); + structure_tests(); + gpargs_boundary_tests(); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback 2.c new file mode 100644 index 0000000..c2633c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback 2.c @@ -0,0 +1,2885 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() + +#if defined(__sparc__) && defined(__sun) && defined(__SUNPRO_C) /* SUNWspro cc */ +/* SunPRO cc miscompiles the simulator function for X_BcdB: d.i[1] is + * temporarily stored in %l2 and put onto the stack from %l2, but in between + * the copy of X has used %l2 as a counter without saving and restoring its + * value. + */ +#define SKIP_X +#endif +#if defined(__mipsn32__) && !defined(__GNUC__) +/* The X test crashes for an unknown reason. */ +#define SKIP_X +#endif + + +/* These functions simulate the behaviour of the functions defined in testcases.c. */ + +/* void tests */ +void v_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&v_v) { fprintf(out,"wrong data for v_v\n"); exit(1); } + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +void i_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_v) { fprintf(out,"wrong data for i_v\n"); exit(1); } + {int r=99; + fprintf(out,"int f(void):"); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i) { fprintf(out,"wrong data for i_i\n"); exit(1); } + int a = *(int*)(*args++); + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + *(ffi_arg*)retp = r; +} +void i_i2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i2) { fprintf(out,"wrong data for i_i2\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i4) { fprintf(out,"wrong data for i_i4\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i8) { fprintf(out,"wrong data for i_i8\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i16) { fprintf(out,"wrong data for i_i16\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int i = *(int*)(*args++); + int j = *(int*)(*args++); + int k = *(int*)(*args++); + int l = *(int*)(*args++); + int m = *(int*)(*args++); + int n = *(int*)(*args++); + int o = *(int*)(*args++); + int p = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(ffi_arg*)retp = r; +}} + +/* float tests */ +void f_f_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f) { fprintf(out,"wrong data for f_f\n"); exit(1); } + {float a = *(float*)(*args++); + float r=a+1.0; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + *(float*)retp = r; +}} +void f_f2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2) { fprintf(out,"wrong data for f_f2\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + *(float*)retp = r; +}} +void f_f4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4) { fprintf(out,"wrong data for f_f4\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(float*)retp = r; +}} +void f_f8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8) { fprintf(out,"wrong data for f_f8\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(float*)retp = r; +}} +void f_f16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f16) { fprintf(out,"wrong data for f_f16\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(float*)retp = r; +}} +void f_f24_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f24) { fprintf(out,"wrong data for f_f24\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + float s = *(float*)(*args++); + float t = *(float*)(*args++); + float u = *(float*)(*args++); + float v = *(float*)(*args++); + float w = *(float*)(*args++); + float x = *(float*)(*args++); + float y = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + *(float*)retp = r; +}} + +/* double tests */ +void d_d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d) { fprintf(out,"wrong data for d_d\n"); exit(1); } + {double a = *(double*)(*args++); + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + *(double*)retp = r; +}} +void d_d2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2) { fprintf(out,"wrong data for d_d2\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + *(double*)retp = r; +}} +void d_d4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4) { fprintf(out,"wrong data for d_d4\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_d8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8) { fprintf(out,"wrong data for d_d8\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(double*)retp = r; +}} +void d_d16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d16) { fprintf(out,"wrong data for d_d16\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(double*)retp = r; +}} + +/* pointer tests */ +void vp_vpdpcpsp_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&vp_vpdpcpsp) { fprintf(out,"wrong data for vp_vpdpcpsp\n"); exit(1); } + {void* a = *(void* *)(*args++); + double* b = *(double* *)(*args++); + char* c = *(char* *)(*args++); + Int* d = *(Int* *)(*args++); + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + *(void* *)retp = ret; +}} + +/* mixed number tests */ +void uc_ucsil_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&uc_ucsil) { fprintf(out,"wrong data for uc_ucsil\n"); exit(1); } + {uchar a = *(unsigned char *)(*args++); + ushort b = *(unsigned short *)(*args++); + uint c = *(unsigned int *)(*args++); + ulong d = *(unsigned long *)(*args++); + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void d_iidd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iidd) { fprintf(out,"wrong data for d_iidd\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_iiidi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iiidi) { fprintf(out,"wrong data for d_iiidi\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + int e = *(int*)(*args++); + double r=a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + *(double*)retp = r; +}} +void d_idid_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_idid) { fprintf(out,"wrong data for d_idid\n"); exit(1); } + {int a = *(int*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_fdi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_fdi) { fprintf(out,"wrong data for d_fdi\n"); exit(1); } + {float a = *(float*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double r=a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + *(double*)retp = r; +}} +void us_cdcd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&us_cdcd) { fprintf(out,"wrong data for us_cdcd\n"); exit(1); } + {char a = *(char*)(*args++); + double b = *(double*)(*args++); + char c = *(char*)(*args++); + double d = *(double*)(*args++); + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void ll_iiilli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_iiilli) { fprintf(out,"wrong data for ll_iiilli\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + long long d = *(long long *)(*args++); + int e = *(int*)(*args++); + long long r = (long long)(int)a + (long long)(int)b + (long long)(int)c + d + (long long)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + *(long long *)retp = r; +}} +void ll_flli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_flli) { fprintf(out,"wrong data for ll_flli\n"); exit(1); } + {float a = *(float*)(*args++); + long long b = *(long long *)(*args++); + int c = *(int*)(*args++); + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + *(long long *)retp = r; +}} +void f_fi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_fi) { fprintf(out,"wrong data for f_fi\n"); exit(1); } + {float a = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + *(float*)retp = r; +}} +void f_f2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2i) { fprintf(out,"wrong data for f_f2i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(float*)retp = r; +}} +void f_f3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f3i) { fprintf(out,"wrong data for f_f3i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(float*)retp = r; +}} +void f_f4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4i) { fprintf(out,"wrong data for f_f4i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(float*)retp = r; +}} +void f_f7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f7i) { fprintf(out,"wrong data for f_f7i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(float*)retp = r; +}} +void f_f8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8i) { fprintf(out,"wrong data for f_f8i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(float*)retp = r; +}} +void f_f12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f12i) { fprintf(out,"wrong data for f_f12i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(float*)retp = r; +}} +void f_f13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f13i) { fprintf(out,"wrong data for f_f13i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(float*)retp = r; +}} +void d_di_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_di) { fprintf(out,"wrong data for d_di\n"); exit(1); } + {double a = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + *(double*)retp = r; +}} +void d_d2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2i) { fprintf(out,"wrong data for d_d2i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(double*)retp = r; +}} +void d_d3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d3i) { fprintf(out,"wrong data for d_d3i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(double*)retp = r; +}} +void d_d4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4i) { fprintf(out,"wrong data for d_d4i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(double*)retp = r; +}} +void d_d7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d7i) { fprintf(out,"wrong data for d_d7i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(double*)retp = r; +}} +void d_d8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8i) { fprintf(out,"wrong data for d_d8i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(double*)retp = r; +}} +void d_d12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d12i) { fprintf(out,"wrong data for d_d12i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(double*)retp = r; +}} +void d_d13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d13i) { fprintf(out,"wrong data for d_d13i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(double*)retp = r; +}} + +/* small structure return tests */ +void S1_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S1_v) { fprintf(out,"wrong data for S1_v\n"); exit(1); } + {Size1 r = Size1_1; + fprintf(out,"Size1 f(void):"); + fflush(out); + *(Size1*)retp = r; +}} +void S2_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S2_v) { fprintf(out,"wrong data for S2_v\n"); exit(1); } + {Size2 r = Size2_1; + fprintf(out,"Size2 f(void):"); + fflush(out); + *(Size2*)retp = r; +}} +void S3_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S3_v) { fprintf(out,"wrong data for S3_v\n"); exit(1); } + {Size3 r = Size3_1; + fprintf(out,"Size3 f(void):"); + fflush(out); + *(Size3*)retp = r; +}} +void S4_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S4_v) { fprintf(out,"wrong data for S4_v\n"); exit(1); } + {Size4 r = Size4_1; + fprintf(out,"Size4 f(void):"); + fflush(out); + *(Size4*)retp = r; +}} +void S7_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S7_v) { fprintf(out,"wrong data for S7_v\n"); exit(1); } + {Size7 r = Size7_1; + fprintf(out,"Size7 f(void):"); + fflush(out); + *(Size7*)retp = r; +}} +void S8_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S8_v) { fprintf(out,"wrong data for S8_v\n"); exit(1); } + {Size8 r = Size8_1; + fprintf(out,"Size8 f(void):"); + fflush(out); + *(Size8*)retp = r; +}} +void S12_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S12_v) { fprintf(out,"wrong data for S12_v\n"); exit(1); } + {Size12 r = Size12_1; + fprintf(out,"Size12 f(void):"); + fflush(out); + *(Size12*)retp = r; +}} +void S15_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S15_v) { fprintf(out,"wrong data for S15_v\n"); exit(1); } + {Size15 r = Size15_1; + fprintf(out,"Size15 f(void):"); + fflush(out); + *(Size15*)retp = r; +}} +void S16_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S16_v) { fprintf(out,"wrong data for S16_v\n"); exit(1); } + {Size16 r = Size16_1; + fprintf(out,"Size16 f(void):"); + fflush(out); + *(Size16*)retp = r; +}} + +/* structure tests */ +void I_III_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&I_III) { fprintf(out,"wrong data for I_III\n"); exit(1); } + {Int a = *(Int*)(*args++); + Int b = *(Int*)(*args++); + Int c = *(Int*)(*args++); + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + *(Int*)retp = r; +}} +void C_CdC_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&C_CdC) { fprintf(out,"wrong data for C_CdC\n"); exit(1); } + {Char a = *(Char*)(*args++); + double b = *(double*)(*args++); + Char c = *(Char*)(*args++); + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + *(Char*)retp = r; +}} +void F_Ffd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&F_Ffd) { fprintf(out,"wrong data for F_Ffd\n"); exit(1); } + {Float a = *(Float*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Float r; + r.x = a.x + b + c; + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Float*)retp = r; +}} +void D_fDd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_fDd) { fprintf(out,"wrong data for D_fDd\n"); exit(1); } + {float a = *(float*)(*args++); + Double b = *(Double*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + *(Double*)retp = r; +}} +void D_Dfd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_Dfd) { fprintf(out,"wrong data for D_Dfd\n"); exit(1); } + {Double a = *(Double*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Double*)retp = r; +}} +void J_JiJ_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&J_JiJ) { fprintf(out,"wrong data for J_JiJ\n"); exit(1); } + {J a = *(J*)(*args++); + int b= *(int*)(*args++); + J c = *(J*)(*args++); + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + *(J*)retp = r; +}} +#ifndef SKIP_EXTRA_STRUCTS +void T_TcT_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&T_TcT) { fprintf(out,"wrong data for T_TcT\n"); exit(1); } + {T a = *(T*)(*args++); + char b = *(char*)(*args++); + T c = *(T*)(*args++); + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + *(T*)retp = r; +}} +void X_BcdB_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&X_BcdB) { fprintf(out,"wrong data for X_BcdB\n"); exit(1); } + {B a = *(B*)(*args++); + char b = *(char*)(*args++); + double c = *(double*)(*args++); + B d = *(B*)(*args++); + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + *(X*)retp = r; +}} +#endif + +/* gpargs boundary tests */ +void l_l0K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l0K) { fprintf(out,"wrong data for l_l0K\n"); exit(1); } + {K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l1K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l1K) { fprintf(out,"wrong data for l_l1K\n"); exit(1); } + {long a1 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l2K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l2K) { fprintf(out,"wrong data for l_l2K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l3K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l3K) { fprintf(out,"wrong data for l_l3K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l4K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l4K) { fprintf(out,"wrong data for l_l4K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l5K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l5K) { fprintf(out,"wrong data for l_l5K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l6K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l6K) { fprintf(out,"wrong data for l_l6K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void f_f17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f17l3L) { fprintf(out,"wrong data for f_f17l3L\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(float*)retp = r; +}} +void d_d17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d17l3L) { fprintf(out,"wrong data for d_d17l3L\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double q = *(double*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(double*)retp = r; +}} +void ll_l2ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l2ll) { fprintf(out,"wrong data for ll_l2ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l3ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l3ll) { fprintf(out,"wrong data for ll_l3ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l4ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l4ll) { fprintf(out,"wrong data for ll_l4ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l5ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l5ll) { fprintf(out,"wrong data for ll_l5ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l6ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l6ll) { fprintf(out,"wrong data for ll_l6ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l7ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l7ll) { fprintf(out,"wrong data for ll_l7ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void d_l2d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l2d) { fprintf(out,"wrong data for d_l2d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l3d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l3d) { fprintf(out,"wrong data for d_l3d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l4d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l4d) { fprintf(out,"wrong data for d_l4d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l5d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l5d) { fprintf(out,"wrong data for d_l5d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l6d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l6d) { fprintf(out,"wrong data for d_l6d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l7d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l7d) { fprintf(out,"wrong data for d_l7d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + *(double*)retp = r; +}} + + +/* + * The way we run these tests - first call the function directly, then + * through vacall() - there is the danger that arguments or results seem + * to be passed correctly, but what we are seeing are in fact the vestiges + * (traces) or the previous call. This may seriously fake the test. + * Avoid this by clearing the registers between the first and the second call. + */ +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +int main (void) +{ + void* callback_code; + void* callback_writable; +#define ALLOC_CALLBACK() \ + callback_writable = ffi_closure_alloc(sizeof(ffi_closure),&callback_code); \ + if (!callback_writable) abort() +#define PREP_CALLBACK(cif,simulator,data) \ + if (ffi_prep_closure_loc(callback_writable,&(cif),simulator,data,callback_code) != FFI_OK) abort() +#define FREE_CALLBACK() \ + ffi_closure_free(callback_writable) + + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + +#if (!defined(DGTEST)) || DGTEST == 1 + /* void tests */ + v_v(); + clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + PREP_CALLBACK(cif,v_v_simulator,(void*)&v_v); + ((void (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); +#endif + + /* int tests */ + { int ir; + +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + PREP_CALLBACK(cif,i_v_simulator,(void*)&i_v); + ir = ((int (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i_simulator,(void*)&i_i); + ir = ((int (ABI_ATTR *) (int)) callback_code) (i1); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i2_simulator,(void*)&i_i2); + ir = ((int (ABI_ATTR *) (int,int)) callback_code) (i1,i2); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i4_simulator,(void*)&i_i4); + ir = ((int (ABI_ATTR *) (int,int,int,int)) callback_code) (i1,i2,i3,i4); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i8_simulator,(void*)&i_i8); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i16_simulator,(void*)&i_i16); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int,int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + } + + /* float tests */ + { float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f_simulator,(void*)&f_f); + fr = ((float (ABI_ATTR *) (float)) callback_code) (f1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2_simulator,(void*)&f_f2); + fr = ((float (ABI_ATTR *) (float,float)) callback_code) (f1,f2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4_simulator,(void*)&f_f4); + fr = ((float (ABI_ATTR *) (float,float,float,float)) callback_code) (f1,f2,f3,f4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8_simulator,(void*)&f_f8); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f16_simulator,(void*)&f_f16); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f24_simulator,(void*)&f_f24); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + + } + + /* double tests */ + { double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d_simulator,(void*)&d_d); + dr = ((double (ABI_ATTR *) (double)) callback_code) (d1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2_simulator,(void*)&d_d2); + dr = ((double (ABI_ATTR *) (double,double)) callback_code) (d1,d2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4_simulator,(void*)&d_d4); + dr = ((double (ABI_ATTR *) (double,double,double,double)) callback_code) (d1,d2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8_simulator,(void*)&d_d8); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d16_simulator,(void*)&d_d16); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* pointer tests */ + { void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + PREP_CALLBACK(cif,vp_vpdpcpsp_simulator,(void*)&vp_vpdpcpsp); + vpr = ((void* (ABI_ATTR *) (void*,double*,char*,Int*)) callback_code) (&uc1,&d2,str3,&I4); + } + FREE_CALLBACK(); + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + } + + /* mixed number tests */ + { uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1,us2,ui3,ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + PREP_CALLBACK(cif,uc_ucsil_simulator,(void*)&uc_ucsil); + ucr = ((uchar (ABI_ATTR *) (uchar,ushort,uint,ulong)) callback_code) (uc1,us2,ui3,ul4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iidd_simulator,(void*)&d_iidd); + dr = ((double (ABI_ATTR *) (int,int,double,double)) callback_code) (i1,i2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iiidi_simulator,(void*)&d_iiidi); + dr = ((double (ABI_ATTR *) (int,int,int,double,int)) callback_code) (i1,i2,i3,d4,i5); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_idid_simulator,(void*)&d_idid); + dr = ((double (ABI_ATTR *) (int,double,int,double)) callback_code) (i1,d2,i3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_fdi_simulator,(void*)&d_fdi); + dr = ((double (ABI_ATTR *) (float,double,int)) callback_code) (f1,d2,i3); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + PREP_CALLBACK(cif,us_cdcd_simulator,(void*)&us_cdcd); + usr = ((ushort (ABI_ATTR *) (char,double,char,double)) callback_code) (c1,d2,c3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_iiilli_simulator,(void*)&ll_iiilli); + llr = ((long long (ABI_ATTR *) (int,int,int,long long,int)) callback_code) (i1,i2,i3,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_flli_simulator,(void*)&ll_flli); + llr = ((long long (ABI_ATTR *) (float,long long,int)) callback_code) (f13,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_fi_simulator,(void*)&f_fi); + fr = ((float (ABI_ATTR *) (float,int)) callback_code) (f1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2i_simulator,(void*)&f_f2i); + fr = ((float (ABI_ATTR *) (float,float,int)) callback_code) (f1,f2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f3i_simulator,(void*)&f_f3i); + fr = ((float (ABI_ATTR *) (float,float,float,int)) callback_code) (f1,f2,f3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4i_simulator,(void*)&f_f4i); + fr = ((float (ABI_ATTR *) (float,float,float,float,int)) callback_code) (f1,f2,f3,f4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f7i_simulator,(void*)&f_f7i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8i_simulator,(void*)&f_f8i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f13i_simulator,(void*)&f_f13i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_di_simulator,(void*)&d_di); + dr = ((double (ABI_ATTR *) (double,int)) callback_code) (d1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2i_simulator,(void*)&d_d2i); + dr = ((double (ABI_ATTR *) (double,double,int)) callback_code) (d1,d2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d3i_simulator,(void*)&d_d3i); + dr = ((double (ABI_ATTR *) (double,double,double,int)) callback_code) (d1,d2,d3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4i_simulator,(void*)&d_d4i); + dr = ((double (ABI_ATTR *) (double,double,double,double,int)) callback_code) (d1,d2,d3,d4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d7i_simulator,(void*)&d_d7i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8i_simulator,(void*)&d_d8i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d12i_simulator,(void*)&d_d12i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d13i_simulator,(void*)&d_d13i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* small structure return tests */ +#if (!defined(DGTEST)) || DGTEST == 43 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + PREP_CALLBACK(cif,S1_v_simulator,(void*)&S1_v); + r = ((Size1 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + PREP_CALLBACK(cif,S2_v_simulator,(void*)&S2_v); + r = ((Size2 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + PREP_CALLBACK(cif,S3_v_simulator,(void*)&S3_v); + r = ((Size3 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + PREP_CALLBACK(cif,S4_v_simulator,(void*)&S4_v); + r = ((Size4 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + PREP_CALLBACK(cif,S7_v_simulator,(void*)&S7_v); + r = ((Size7 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + PREP_CALLBACK(cif,S8_v_simulator,(void*)&S8_v); + r = ((Size8 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + PREP_CALLBACK(cif,S12_v_simulator,(void*)&S12_v); + r = ((Size12 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + PREP_CALLBACK(cif,S15_v_simulator,(void*)&S15_v); + r = ((Size15 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + PREP_CALLBACK(cif,S16_v_simulator,(void*)&S16_v); + r = ((Size16 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif + + + /* structure tests */ + { Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 52 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + PREP_CALLBACK(cif,I_III_simulator,(void*)&I_III); + Ir = ((Int (ABI_ATTR *) (Int,Int,Int)) callback_code) (I1,I2,I3); + } + FREE_CALLBACK(); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + PREP_CALLBACK(cif,C_CdC_simulator,(void*)&C_CdC); + Cr = ((Char (ABI_ATTR *) (Char,double,Char)) callback_code) (C1,d2,C3); + } + FREE_CALLBACK(); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 54 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + PREP_CALLBACK(cif,F_Ffd_simulator,(void*)&F_Ffd); + Fr = ((Float (ABI_ATTR *) (Float,float,double)) callback_code) (F1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 55 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_fDd_simulator,(void*)&D_fDd); + Dr = ((Double (ABI_ATTR *) (float,Double,double)) callback_code) (f1,D2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_Dfd_simulator,(void*)&D_Dfd); + Dr = ((Double (ABI_ATTR *) (Double,float,double)) callback_code) (D1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 57 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + PREP_CALLBACK(cif,J_JiJ_simulator,(void*)&J_JiJ); + Jr = ((J (ABI_ATTR *) (J,int,J)) callback_code) (J1,i2,J2); + } + FREE_CALLBACK(); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif + +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 58 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + PREP_CALLBACK(cif,T_TcT_simulator,(void*)&T_TcT); + Tr = ((T (ABI_ATTR *) (T,char,T)) callback_code) (T1,' ',T2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif + +#ifndef SKIP_X +#if (!defined(DGTEST)) || DGTEST == 59 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + PREP_CALLBACK(cif,X_BcdB_simulator,(void*)&X_BcdB); + Xr = ((X (ABI_ATTR *) (B,char,double,B)) callback_code) (B1,c2,d3,B2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif +#endif + } + + + /* gpargs boundary tests */ + { + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 60 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l0K_simulator,(void*)l_l0K); + lr = ((long (ABI_ATTR *) (K,long)) callback_code) (K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l1K_simulator,(void*)l_l1K); + lr = ((long (ABI_ATTR *) (long,K,long)) callback_code) (l1,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l2K_simulator,(void*)l_l2K); + lr = ((long (ABI_ATTR *) (long,long,K,long)) callback_code) (l1,l2,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l3K_simulator,(void*)l_l3K); + lr = ((long (ABI_ATTR *) (long,long,long,K,long)) callback_code) (l1,l2,l3,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l4K_simulator,(void*)l_l4K); + lr = ((long (ABI_ATTR *) (long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l5K_simulator,(void*)l_l5K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l6K_simulator,(void*)l_l6K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,l6,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 67 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f17l3L_simulator,(void*)&f_f17l3L); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,long,long,long,L)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 68 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d17l3L_simulator,(void*)&d_d17l3L); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,long,long,long,L)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 69 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l2ll_simulator,(void*)ll_l2ll); + llr = ((long long (ABI_ATTR *) (long,long,long long,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l3ll_simulator,(void*)ll_l3ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long long,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l4ll_simulator,(void*)ll_l4ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l5ll_simulator,(void*)ll_l5ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l6ll_simulator,(void*)ll_l6ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l7ll_simulator,(void*)ll_l7ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 75 + dr = d_l2d(l1,l2,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l2d_simulator,(void*)d_l2d); + dr = ((double (ABI_ATTR *) (long,long,double,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l3d(l1,l2,l3,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l3d_simulator,(void*)d_l3d); + dr = ((double (ABI_ATTR *) (long,long,long,double,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l4d(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l4d_simulator,(void*)d_l4d); + dr = ((double (ABI_ATTR *) (long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l5d(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l5d_simulator,(void*)d_l5d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l6d(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l6d_simulator,(void*)d_l6d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l7d_simulator,(void*)d_l7d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + + } + + exit(0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c new file mode 100644 index 0000000..c2633c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c @@ -0,0 +1,2885 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() + +#if defined(__sparc__) && defined(__sun) && defined(__SUNPRO_C) /* SUNWspro cc */ +/* SunPRO cc miscompiles the simulator function for X_BcdB: d.i[1] is + * temporarily stored in %l2 and put onto the stack from %l2, but in between + * the copy of X has used %l2 as a counter without saving and restoring its + * value. + */ +#define SKIP_X +#endif +#if defined(__mipsn32__) && !defined(__GNUC__) +/* The X test crashes for an unknown reason. */ +#define SKIP_X +#endif + + +/* These functions simulate the behaviour of the functions defined in testcases.c. */ + +/* void tests */ +void v_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&v_v) { fprintf(out,"wrong data for v_v\n"); exit(1); } + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +void i_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_v) { fprintf(out,"wrong data for i_v\n"); exit(1); } + {int r=99; + fprintf(out,"int f(void):"); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i) { fprintf(out,"wrong data for i_i\n"); exit(1); } + int a = *(int*)(*args++); + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + *(ffi_arg*)retp = r; +} +void i_i2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i2) { fprintf(out,"wrong data for i_i2\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i4) { fprintf(out,"wrong data for i_i4\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i8) { fprintf(out,"wrong data for i_i8\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i16) { fprintf(out,"wrong data for i_i16\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int i = *(int*)(*args++); + int j = *(int*)(*args++); + int k = *(int*)(*args++); + int l = *(int*)(*args++); + int m = *(int*)(*args++); + int n = *(int*)(*args++); + int o = *(int*)(*args++); + int p = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(ffi_arg*)retp = r; +}} + +/* float tests */ +void f_f_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f) { fprintf(out,"wrong data for f_f\n"); exit(1); } + {float a = *(float*)(*args++); + float r=a+1.0; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + *(float*)retp = r; +}} +void f_f2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2) { fprintf(out,"wrong data for f_f2\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + *(float*)retp = r; +}} +void f_f4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4) { fprintf(out,"wrong data for f_f4\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(float*)retp = r; +}} +void f_f8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8) { fprintf(out,"wrong data for f_f8\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(float*)retp = r; +}} +void f_f16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f16) { fprintf(out,"wrong data for f_f16\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(float*)retp = r; +}} +void f_f24_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f24) { fprintf(out,"wrong data for f_f24\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + float s = *(float*)(*args++); + float t = *(float*)(*args++); + float u = *(float*)(*args++); + float v = *(float*)(*args++); + float w = *(float*)(*args++); + float x = *(float*)(*args++); + float y = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + *(float*)retp = r; +}} + +/* double tests */ +void d_d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d) { fprintf(out,"wrong data for d_d\n"); exit(1); } + {double a = *(double*)(*args++); + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + *(double*)retp = r; +}} +void d_d2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2) { fprintf(out,"wrong data for d_d2\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + *(double*)retp = r; +}} +void d_d4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4) { fprintf(out,"wrong data for d_d4\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_d8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8) { fprintf(out,"wrong data for d_d8\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(double*)retp = r; +}} +void d_d16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d16) { fprintf(out,"wrong data for d_d16\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(double*)retp = r; +}} + +/* pointer tests */ +void vp_vpdpcpsp_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&vp_vpdpcpsp) { fprintf(out,"wrong data for vp_vpdpcpsp\n"); exit(1); } + {void* a = *(void* *)(*args++); + double* b = *(double* *)(*args++); + char* c = *(char* *)(*args++); + Int* d = *(Int* *)(*args++); + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + *(void* *)retp = ret; +}} + +/* mixed number tests */ +void uc_ucsil_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&uc_ucsil) { fprintf(out,"wrong data for uc_ucsil\n"); exit(1); } + {uchar a = *(unsigned char *)(*args++); + ushort b = *(unsigned short *)(*args++); + uint c = *(unsigned int *)(*args++); + ulong d = *(unsigned long *)(*args++); + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void d_iidd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iidd) { fprintf(out,"wrong data for d_iidd\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_iiidi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iiidi) { fprintf(out,"wrong data for d_iiidi\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + int e = *(int*)(*args++); + double r=a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + *(double*)retp = r; +}} +void d_idid_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_idid) { fprintf(out,"wrong data for d_idid\n"); exit(1); } + {int a = *(int*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_fdi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_fdi) { fprintf(out,"wrong data for d_fdi\n"); exit(1); } + {float a = *(float*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double r=a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + *(double*)retp = r; +}} +void us_cdcd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&us_cdcd) { fprintf(out,"wrong data for us_cdcd\n"); exit(1); } + {char a = *(char*)(*args++); + double b = *(double*)(*args++); + char c = *(char*)(*args++); + double d = *(double*)(*args++); + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void ll_iiilli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_iiilli) { fprintf(out,"wrong data for ll_iiilli\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + long long d = *(long long *)(*args++); + int e = *(int*)(*args++); + long long r = (long long)(int)a + (long long)(int)b + (long long)(int)c + d + (long long)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + *(long long *)retp = r; +}} +void ll_flli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_flli) { fprintf(out,"wrong data for ll_flli\n"); exit(1); } + {float a = *(float*)(*args++); + long long b = *(long long *)(*args++); + int c = *(int*)(*args++); + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + *(long long *)retp = r; +}} +void f_fi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_fi) { fprintf(out,"wrong data for f_fi\n"); exit(1); } + {float a = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + *(float*)retp = r; +}} +void f_f2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2i) { fprintf(out,"wrong data for f_f2i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(float*)retp = r; +}} +void f_f3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f3i) { fprintf(out,"wrong data for f_f3i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(float*)retp = r; +}} +void f_f4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4i) { fprintf(out,"wrong data for f_f4i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(float*)retp = r; +}} +void f_f7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f7i) { fprintf(out,"wrong data for f_f7i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(float*)retp = r; +}} +void f_f8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8i) { fprintf(out,"wrong data for f_f8i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(float*)retp = r; +}} +void f_f12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f12i) { fprintf(out,"wrong data for f_f12i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(float*)retp = r; +}} +void f_f13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f13i) { fprintf(out,"wrong data for f_f13i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(float*)retp = r; +}} +void d_di_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_di) { fprintf(out,"wrong data for d_di\n"); exit(1); } + {double a = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + *(double*)retp = r; +}} +void d_d2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2i) { fprintf(out,"wrong data for d_d2i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(double*)retp = r; +}} +void d_d3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d3i) { fprintf(out,"wrong data for d_d3i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(double*)retp = r; +}} +void d_d4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4i) { fprintf(out,"wrong data for d_d4i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(double*)retp = r; +}} +void d_d7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d7i) { fprintf(out,"wrong data for d_d7i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(double*)retp = r; +}} +void d_d8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8i) { fprintf(out,"wrong data for d_d8i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(double*)retp = r; +}} +void d_d12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d12i) { fprintf(out,"wrong data for d_d12i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(double*)retp = r; +}} +void d_d13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d13i) { fprintf(out,"wrong data for d_d13i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(double*)retp = r; +}} + +/* small structure return tests */ +void S1_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S1_v) { fprintf(out,"wrong data for S1_v\n"); exit(1); } + {Size1 r = Size1_1; + fprintf(out,"Size1 f(void):"); + fflush(out); + *(Size1*)retp = r; +}} +void S2_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S2_v) { fprintf(out,"wrong data for S2_v\n"); exit(1); } + {Size2 r = Size2_1; + fprintf(out,"Size2 f(void):"); + fflush(out); + *(Size2*)retp = r; +}} +void S3_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S3_v) { fprintf(out,"wrong data for S3_v\n"); exit(1); } + {Size3 r = Size3_1; + fprintf(out,"Size3 f(void):"); + fflush(out); + *(Size3*)retp = r; +}} +void S4_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S4_v) { fprintf(out,"wrong data for S4_v\n"); exit(1); } + {Size4 r = Size4_1; + fprintf(out,"Size4 f(void):"); + fflush(out); + *(Size4*)retp = r; +}} +void S7_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S7_v) { fprintf(out,"wrong data for S7_v\n"); exit(1); } + {Size7 r = Size7_1; + fprintf(out,"Size7 f(void):"); + fflush(out); + *(Size7*)retp = r; +}} +void S8_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S8_v) { fprintf(out,"wrong data for S8_v\n"); exit(1); } + {Size8 r = Size8_1; + fprintf(out,"Size8 f(void):"); + fflush(out); + *(Size8*)retp = r; +}} +void S12_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S12_v) { fprintf(out,"wrong data for S12_v\n"); exit(1); } + {Size12 r = Size12_1; + fprintf(out,"Size12 f(void):"); + fflush(out); + *(Size12*)retp = r; +}} +void S15_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S15_v) { fprintf(out,"wrong data for S15_v\n"); exit(1); } + {Size15 r = Size15_1; + fprintf(out,"Size15 f(void):"); + fflush(out); + *(Size15*)retp = r; +}} +void S16_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S16_v) { fprintf(out,"wrong data for S16_v\n"); exit(1); } + {Size16 r = Size16_1; + fprintf(out,"Size16 f(void):"); + fflush(out); + *(Size16*)retp = r; +}} + +/* structure tests */ +void I_III_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&I_III) { fprintf(out,"wrong data for I_III\n"); exit(1); } + {Int a = *(Int*)(*args++); + Int b = *(Int*)(*args++); + Int c = *(Int*)(*args++); + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + *(Int*)retp = r; +}} +void C_CdC_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&C_CdC) { fprintf(out,"wrong data for C_CdC\n"); exit(1); } + {Char a = *(Char*)(*args++); + double b = *(double*)(*args++); + Char c = *(Char*)(*args++); + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + *(Char*)retp = r; +}} +void F_Ffd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&F_Ffd) { fprintf(out,"wrong data for F_Ffd\n"); exit(1); } + {Float a = *(Float*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Float r; + r.x = a.x + b + c; + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Float*)retp = r; +}} +void D_fDd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_fDd) { fprintf(out,"wrong data for D_fDd\n"); exit(1); } + {float a = *(float*)(*args++); + Double b = *(Double*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + *(Double*)retp = r; +}} +void D_Dfd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_Dfd) { fprintf(out,"wrong data for D_Dfd\n"); exit(1); } + {Double a = *(Double*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Double*)retp = r; +}} +void J_JiJ_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&J_JiJ) { fprintf(out,"wrong data for J_JiJ\n"); exit(1); } + {J a = *(J*)(*args++); + int b= *(int*)(*args++); + J c = *(J*)(*args++); + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + *(J*)retp = r; +}} +#ifndef SKIP_EXTRA_STRUCTS +void T_TcT_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&T_TcT) { fprintf(out,"wrong data for T_TcT\n"); exit(1); } + {T a = *(T*)(*args++); + char b = *(char*)(*args++); + T c = *(T*)(*args++); + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + *(T*)retp = r; +}} +void X_BcdB_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&X_BcdB) { fprintf(out,"wrong data for X_BcdB\n"); exit(1); } + {B a = *(B*)(*args++); + char b = *(char*)(*args++); + double c = *(double*)(*args++); + B d = *(B*)(*args++); + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + *(X*)retp = r; +}} +#endif + +/* gpargs boundary tests */ +void l_l0K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l0K) { fprintf(out,"wrong data for l_l0K\n"); exit(1); } + {K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l1K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l1K) { fprintf(out,"wrong data for l_l1K\n"); exit(1); } + {long a1 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l2K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l2K) { fprintf(out,"wrong data for l_l2K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l3K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l3K) { fprintf(out,"wrong data for l_l3K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l4K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l4K) { fprintf(out,"wrong data for l_l4K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l5K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l5K) { fprintf(out,"wrong data for l_l5K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l6K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l6K) { fprintf(out,"wrong data for l_l6K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void f_f17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f17l3L) { fprintf(out,"wrong data for f_f17l3L\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(float*)retp = r; +}} +void d_d17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d17l3L) { fprintf(out,"wrong data for d_d17l3L\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double q = *(double*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(double*)retp = r; +}} +void ll_l2ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l2ll) { fprintf(out,"wrong data for ll_l2ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l3ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l3ll) { fprintf(out,"wrong data for ll_l3ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l4ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l4ll) { fprintf(out,"wrong data for ll_l4ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l5ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l5ll) { fprintf(out,"wrong data for ll_l5ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l6ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l6ll) { fprintf(out,"wrong data for ll_l6ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l7ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l7ll) { fprintf(out,"wrong data for ll_l7ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void d_l2d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l2d) { fprintf(out,"wrong data for d_l2d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l3d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l3d) { fprintf(out,"wrong data for d_l3d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l4d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l4d) { fprintf(out,"wrong data for d_l4d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l5d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l5d) { fprintf(out,"wrong data for d_l5d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l6d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l6d) { fprintf(out,"wrong data for d_l6d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l7d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l7d) { fprintf(out,"wrong data for d_l7d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + *(double*)retp = r; +}} + + +/* + * The way we run these tests - first call the function directly, then + * through vacall() - there is the danger that arguments or results seem + * to be passed correctly, but what we are seeing are in fact the vestiges + * (traces) or the previous call. This may seriously fake the test. + * Avoid this by clearing the registers between the first and the second call. + */ +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +int main (void) +{ + void* callback_code; + void* callback_writable; +#define ALLOC_CALLBACK() \ + callback_writable = ffi_closure_alloc(sizeof(ffi_closure),&callback_code); \ + if (!callback_writable) abort() +#define PREP_CALLBACK(cif,simulator,data) \ + if (ffi_prep_closure_loc(callback_writable,&(cif),simulator,data,callback_code) != FFI_OK) abort() +#define FREE_CALLBACK() \ + ffi_closure_free(callback_writable) + + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + +#if (!defined(DGTEST)) || DGTEST == 1 + /* void tests */ + v_v(); + clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + PREP_CALLBACK(cif,v_v_simulator,(void*)&v_v); + ((void (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); +#endif + + /* int tests */ + { int ir; + +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + PREP_CALLBACK(cif,i_v_simulator,(void*)&i_v); + ir = ((int (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i_simulator,(void*)&i_i); + ir = ((int (ABI_ATTR *) (int)) callback_code) (i1); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i2_simulator,(void*)&i_i2); + ir = ((int (ABI_ATTR *) (int,int)) callback_code) (i1,i2); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i4_simulator,(void*)&i_i4); + ir = ((int (ABI_ATTR *) (int,int,int,int)) callback_code) (i1,i2,i3,i4); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i8_simulator,(void*)&i_i8); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i16_simulator,(void*)&i_i16); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int,int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + } + + /* float tests */ + { float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f_simulator,(void*)&f_f); + fr = ((float (ABI_ATTR *) (float)) callback_code) (f1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2_simulator,(void*)&f_f2); + fr = ((float (ABI_ATTR *) (float,float)) callback_code) (f1,f2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4_simulator,(void*)&f_f4); + fr = ((float (ABI_ATTR *) (float,float,float,float)) callback_code) (f1,f2,f3,f4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8_simulator,(void*)&f_f8); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f16_simulator,(void*)&f_f16); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f24_simulator,(void*)&f_f24); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + + } + + /* double tests */ + { double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d_simulator,(void*)&d_d); + dr = ((double (ABI_ATTR *) (double)) callback_code) (d1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2_simulator,(void*)&d_d2); + dr = ((double (ABI_ATTR *) (double,double)) callback_code) (d1,d2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4_simulator,(void*)&d_d4); + dr = ((double (ABI_ATTR *) (double,double,double,double)) callback_code) (d1,d2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8_simulator,(void*)&d_d8); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d16_simulator,(void*)&d_d16); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* pointer tests */ + { void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + PREP_CALLBACK(cif,vp_vpdpcpsp_simulator,(void*)&vp_vpdpcpsp); + vpr = ((void* (ABI_ATTR *) (void*,double*,char*,Int*)) callback_code) (&uc1,&d2,str3,&I4); + } + FREE_CALLBACK(); + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + } + + /* mixed number tests */ + { uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1,us2,ui3,ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + PREP_CALLBACK(cif,uc_ucsil_simulator,(void*)&uc_ucsil); + ucr = ((uchar (ABI_ATTR *) (uchar,ushort,uint,ulong)) callback_code) (uc1,us2,ui3,ul4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iidd_simulator,(void*)&d_iidd); + dr = ((double (ABI_ATTR *) (int,int,double,double)) callback_code) (i1,i2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iiidi_simulator,(void*)&d_iiidi); + dr = ((double (ABI_ATTR *) (int,int,int,double,int)) callback_code) (i1,i2,i3,d4,i5); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_idid_simulator,(void*)&d_idid); + dr = ((double (ABI_ATTR *) (int,double,int,double)) callback_code) (i1,d2,i3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_fdi_simulator,(void*)&d_fdi); + dr = ((double (ABI_ATTR *) (float,double,int)) callback_code) (f1,d2,i3); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + PREP_CALLBACK(cif,us_cdcd_simulator,(void*)&us_cdcd); + usr = ((ushort (ABI_ATTR *) (char,double,char,double)) callback_code) (c1,d2,c3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_iiilli_simulator,(void*)&ll_iiilli); + llr = ((long long (ABI_ATTR *) (int,int,int,long long,int)) callback_code) (i1,i2,i3,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_flli_simulator,(void*)&ll_flli); + llr = ((long long (ABI_ATTR *) (float,long long,int)) callback_code) (f13,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_fi_simulator,(void*)&f_fi); + fr = ((float (ABI_ATTR *) (float,int)) callback_code) (f1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2i_simulator,(void*)&f_f2i); + fr = ((float (ABI_ATTR *) (float,float,int)) callback_code) (f1,f2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f3i_simulator,(void*)&f_f3i); + fr = ((float (ABI_ATTR *) (float,float,float,int)) callback_code) (f1,f2,f3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4i_simulator,(void*)&f_f4i); + fr = ((float (ABI_ATTR *) (float,float,float,float,int)) callback_code) (f1,f2,f3,f4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f7i_simulator,(void*)&f_f7i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8i_simulator,(void*)&f_f8i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f13i_simulator,(void*)&f_f13i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_di_simulator,(void*)&d_di); + dr = ((double (ABI_ATTR *) (double,int)) callback_code) (d1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2i_simulator,(void*)&d_d2i); + dr = ((double (ABI_ATTR *) (double,double,int)) callback_code) (d1,d2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d3i_simulator,(void*)&d_d3i); + dr = ((double (ABI_ATTR *) (double,double,double,int)) callback_code) (d1,d2,d3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4i_simulator,(void*)&d_d4i); + dr = ((double (ABI_ATTR *) (double,double,double,double,int)) callback_code) (d1,d2,d3,d4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d7i_simulator,(void*)&d_d7i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8i_simulator,(void*)&d_d8i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d12i_simulator,(void*)&d_d12i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d13i_simulator,(void*)&d_d13i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* small structure return tests */ +#if (!defined(DGTEST)) || DGTEST == 43 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + PREP_CALLBACK(cif,S1_v_simulator,(void*)&S1_v); + r = ((Size1 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + PREP_CALLBACK(cif,S2_v_simulator,(void*)&S2_v); + r = ((Size2 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + PREP_CALLBACK(cif,S3_v_simulator,(void*)&S3_v); + r = ((Size3 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + PREP_CALLBACK(cif,S4_v_simulator,(void*)&S4_v); + r = ((Size4 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + PREP_CALLBACK(cif,S7_v_simulator,(void*)&S7_v); + r = ((Size7 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + PREP_CALLBACK(cif,S8_v_simulator,(void*)&S8_v); + r = ((Size8 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + PREP_CALLBACK(cif,S12_v_simulator,(void*)&S12_v); + r = ((Size12 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + PREP_CALLBACK(cif,S15_v_simulator,(void*)&S15_v); + r = ((Size15 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + PREP_CALLBACK(cif,S16_v_simulator,(void*)&S16_v); + r = ((Size16 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif + + + /* structure tests */ + { Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 52 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + PREP_CALLBACK(cif,I_III_simulator,(void*)&I_III); + Ir = ((Int (ABI_ATTR *) (Int,Int,Int)) callback_code) (I1,I2,I3); + } + FREE_CALLBACK(); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + PREP_CALLBACK(cif,C_CdC_simulator,(void*)&C_CdC); + Cr = ((Char (ABI_ATTR *) (Char,double,Char)) callback_code) (C1,d2,C3); + } + FREE_CALLBACK(); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 54 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + PREP_CALLBACK(cif,F_Ffd_simulator,(void*)&F_Ffd); + Fr = ((Float (ABI_ATTR *) (Float,float,double)) callback_code) (F1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 55 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_fDd_simulator,(void*)&D_fDd); + Dr = ((Double (ABI_ATTR *) (float,Double,double)) callback_code) (f1,D2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_Dfd_simulator,(void*)&D_Dfd); + Dr = ((Double (ABI_ATTR *) (Double,float,double)) callback_code) (D1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 57 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + PREP_CALLBACK(cif,J_JiJ_simulator,(void*)&J_JiJ); + Jr = ((J (ABI_ATTR *) (J,int,J)) callback_code) (J1,i2,J2); + } + FREE_CALLBACK(); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif + +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 58 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + PREP_CALLBACK(cif,T_TcT_simulator,(void*)&T_TcT); + Tr = ((T (ABI_ATTR *) (T,char,T)) callback_code) (T1,' ',T2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif + +#ifndef SKIP_X +#if (!defined(DGTEST)) || DGTEST == 59 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + PREP_CALLBACK(cif,X_BcdB_simulator,(void*)&X_BcdB); + Xr = ((X (ABI_ATTR *) (B,char,double,B)) callback_code) (B1,c2,d3,B2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif +#endif + } + + + /* gpargs boundary tests */ + { + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 60 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l0K_simulator,(void*)l_l0K); + lr = ((long (ABI_ATTR *) (K,long)) callback_code) (K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l1K_simulator,(void*)l_l1K); + lr = ((long (ABI_ATTR *) (long,K,long)) callback_code) (l1,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l2K_simulator,(void*)l_l2K); + lr = ((long (ABI_ATTR *) (long,long,K,long)) callback_code) (l1,l2,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l3K_simulator,(void*)l_l3K); + lr = ((long (ABI_ATTR *) (long,long,long,K,long)) callback_code) (l1,l2,l3,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l4K_simulator,(void*)l_l4K); + lr = ((long (ABI_ATTR *) (long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l5K_simulator,(void*)l_l5K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l6K_simulator,(void*)l_l6K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,l6,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 67 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f17l3L_simulator,(void*)&f_f17l3L); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,long,long,long,L)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 68 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d17l3L_simulator,(void*)&d_d17l3L); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,long,long,long,L)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 69 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l2ll_simulator,(void*)ll_l2ll); + llr = ((long long (ABI_ATTR *) (long,long,long long,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l3ll_simulator,(void*)ll_l3ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long long,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l4ll_simulator,(void*)ll_l4ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l5ll_simulator,(void*)ll_l5ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l6ll_simulator,(void*)ll_l6ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l7ll_simulator,(void*)ll_l7ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 75 + dr = d_l2d(l1,l2,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l2d_simulator,(void*)d_l2d); + dr = ((double (ABI_ATTR *) (long,long,double,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l3d(l1,l2,l3,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l3d_simulator,(void*)d_l3d); + dr = ((double (ABI_ATTR *) (long,long,long,double,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l4d(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l4d_simulator,(void*)d_l4d); + dr = ((double (ABI_ATTR *) (long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l5d(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l5d_simulator,(void*)d_l5d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l6d(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l6d_simulator,(void*)d_l6d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l7d_simulator,(void*)d_l7d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + + } + + exit(0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases 2.c new file mode 100644 index 0000000..d25ebf4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases 2.c @@ -0,0 +1,743 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* This file defines test functions of selected signatures, that exercise + dark corners of the various ABIs. */ + +#include + +FILE* out; + +#define uchar unsigned char +#define ushort unsigned short +#define uint unsigned int +#define ulong unsigned long + +typedef struct { char x; } Char; +typedef struct { short x; } Short; +typedef struct { int x; } Int; +typedef struct { long x; } Long; +typedef struct { float x; } Float; +typedef struct { double x; } Double; +typedef struct { char c; float f; } A; +typedef struct { double d; int i[3]; } B; +typedef struct { long l1; long l2; } J; +typedef struct { long l1; long l2; long l3; long l4; } K; +typedef struct { long l1; long l2; long l3; long l4; long l5; long l6; } L; +typedef struct { char x1; } Size1; +typedef struct { char x1; char x2; } Size2; +typedef struct { char x1; char x2; char x3; } Size3; +typedef struct { char x1; char x2; char x3; char x4; } Size4; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; +} Size7; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; +} Size8; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; +} Size12; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; +} Size15; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; char x16; +} Size16; +typedef struct { char c[3]; } T; +typedef struct { char c[33],c1; } X; + +char c1='a', c2=127, c3=(char)128, c4=(char)255, c5=-1; +short s1=32767, s2=(short)32768, s3=3, s4=4, s5=5, s6=6, s7=7, s8=8, s9=9; +int i1=1, i2=2, i3=3, i4=4, i5=5, i6=6, i7=7, i8=8, i9=9, + i10=11, i11=12, i12=13, i13=14, i14=15, i15=16, i16=17; +long l1=1, l2=2, l3=3, l4=4, l5=5, l6=6, l7=7, l8=8, l9=9; +long long ll1 = 3875056143130689530LL; +float f1=0.1f, f2=0.2f, f3=0.3f, f4=0.4f, f5=0.5f, f6=0.6f, f7=0.7f, f8=0.8f, f9=0.9f, + f10=1.1f, f11=1.2f, f12=1.3f, f13=1.4f, f14=1.5f, f15=1.6f, f16=1.7f, f17=1.8f, + f18=1.9f, f19=2.1f, f20=2.2f, f21=2.3f, f22=2.4f, f23=2.5f, f24=2.6f; +double d1=0.1, d2=0.2, d3=0.3, d4=0.4, d5=0.5, d6=0.6, d7=0.7, d8=0.8, d9=0.9, + d10=1.1, d11=1.2, d12=1.3, d13=1.4, d14=1.5, d15=1.6, d16=1.7, d17=1.8; + +uchar uc1='a', uc2=127, uc3=128, uc4=255, uc5=(uchar)-1; +ushort us1=1, us2=2, us3=3, us4=4, us5=5, us6=6, us7=7, us8=8, us9=9; +uint ui1=1, ui2=2, ui3=3, ui4=4, ui5=5, ui6=6, ui7=7, ui8=8, ui9=9; +ulong ul1=1, ul2=2, ul3=3, ul4=4, ul5=5, ul6=6, ul7=7, ul8=8, ul9=9; + +char *str1="hello",str2[]="goodbye",*str3="still here?"; +Char C1={'A'}, C2={'B'}, C3={'C'}, C4={'\377'}, C5={(char)(-1)}; +Short S1={1}, S2={2}, S3={3}, S4={4}, S5={5}, S6={6}, S7={7}, S8={8}, S9={9}; +Int I1={1}, I2={2}, I3={3}, I4={4}, I5={5}, I6={6}, I7={7}, I8={8}, I9={9}; +Float F1={0.1f}, F2={0.2f}, F3={0.3f}, F4={0.4f}, F5={0.5f}, F6={0.6f}, F7={0.7f}, F8={0.8f}, F9={0.9f}; +Double D1={0.1}, D2={0.2}, D3={0.3}, D4={0.4}, D5={0.5}, D6={0.6}, D7={0.7}, D8={0.8}, D9={0.9}; + +A A1={'a',0.1f},A2={'b',0.2f},A3={'\377',0.3f}; +B B1={0.1,{1,2,3}},B2={0.2,{5,4,3}}; +J J1={47,11},J2={73,55}; +K K1={19,69,12,28}; +L L1={561,1105,1729,2465,2821,6601}; /* A002997 */ +Size1 Size1_1={'a'}; +Size2 Size2_1={'a','b'}; +Size3 Size3_1={'a','b','c'}; +Size4 Size4_1={'a','b','c','d'}; +Size7 Size7_1={'a','b','c','d','e','f','g'}; +Size8 Size8_1={'a','b','c','d','e','f','g','h'}; +Size12 Size12_1={'a','b','c','d','e','f','g','h','i','j','k','l'}; +Size15 Size15_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o'}; +Size16 Size16_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'}; +T T1={{'t','h','e'}},T2={{'f','o','x'}}; +X X1={"abcdefghijklmnopqrstuvwxyzABCDEF",'G'}, X2={"123",'9'}, X3={"return-return-return",'R'}; + +#if defined(__GNUC__) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_ATTR +#define ABI_ATTR +#endif + +/* void tests */ +void ABI_ATTR v_v (void) +{ + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +int ABI_ATTR i_v (void) +{ + int r=99; + fprintf(out,"int f(void):"); + fflush(out); + return r; +} +int ABI_ATTR i_i (int a) +{ + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + return r; +} +int ABI_ATTR i_i2 (int a, int b) +{ + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + return r; +} +int ABI_ATTR i_i4 (int a, int b, int c, int d) +{ + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + return r; +} +int ABI_ATTR i_i8 (int a, int b, int c, int d, int e, int f, int g, int h) +{ + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +int ABI_ATTR i_i16 (int a, int b, int c, int d, int e, int f, int g, int h, + int i, int j, int k, int l, int m, int n, int o, int p) +{ + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* float tests */ +float ABI_ATTR f_f (float a) +{ + float r=a+1.0f; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + return r; +} +float ABI_ATTR f_f2 (float a, float b) +{ + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + return r; +} +float ABI_ATTR f_f4 (float a, float b, float c, float d) +{ + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +float ABI_ATTR f_f8 (float a, float b, float c, float d, float e, float f, + float g, float h) +{ + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +float ABI_ATTR f_f16 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} +float ABI_ATTR f_f24 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p, + float q, float s, float t, float u, float v, float w, float x, float y) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + return r; +} + +/* double tests */ +double ABI_ATTR d_d (double a) +{ + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + return r; +} +double ABI_ATTR d_d2 (double a, double b) +{ + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + return r; +} +double ABI_ATTR d_d4 (double a, double b, double c, double d) +{ + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_d8 (double a, double b, double c, double d, double e, double f, + double g, double h) +{ + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +double ABI_ATTR d_d16 (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p) +{ + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* pointer tests */ +void* ABI_ATTR vp_vpdpcpsp (void* a, double* b, char* c, Int* d) +{ + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + return ret; +} + +/* mixed number tests */ +uchar ABI_ATTR uc_ucsil (uchar a, ushort b, uint c, ulong d) +{ + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iidd (int a, int b, double c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iiidi (int a, int b, int c, double d, int e) +{ + double r = a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + return r; +} +double ABI_ATTR d_idid (int a, double b, int c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_fdi (float a, double b, int c) +{ + double r = a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + return r; +} +ushort ABI_ATTR us_cdcd (char a, double b, char c, double d) +{ + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + return r; +} + +long long ABI_ATTR ll_iiilli (int a, int b, int c, long long d, int e) +{ + long long r = (long long)(int)a+(long long)(int)b+(long long)(int)c+d+(long long)(int)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + return r; +} +long long ABI_ATTR ll_flli (float a, long long b, int c) +{ + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + return r; +} + +float ABI_ATTR f_fi (float a, int z) +{ + float r = a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + return r; +} +float ABI_ATTR f_f2i (float a, float b, int z) +{ + float r = a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +float ABI_ATTR f_f3i (float a, float b, float c, int z) +{ + float r = a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +float ABI_ATTR f_f4i (float a, float b, float c, float d, int z) +{ + float r = a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +float ABI_ATTR f_f7i (float a, float b, float c, float d, float e, float f, float g, + int z) +{ + float r = a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +float ABI_ATTR f_f8i (float a, float b, float c, float d, float e, float f, float g, + float h, int z) +{ + float r = a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +float ABI_ATTR f_f12i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +float ABI_ATTR f_f13i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +double ABI_ATTR d_di (double a, int z) +{ + double r = a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + return r; +} +double ABI_ATTR d_d2i (double a, double b, int z) +{ + double r = a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +double ABI_ATTR d_d3i (double a, double b, double c, int z) +{ + double r = a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +double ABI_ATTR d_d4i (double a, double b, double c, double d, int z) +{ + double r = a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +double ABI_ATTR d_d7i (double a, double b, double c, double d, double e, double f, + double g, int z) +{ + double r = a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +double ABI_ATTR d_d8i (double a, double b, double c, double d, double e, double f, + double g, double h, int z) +{ + double r = a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +double ABI_ATTR d_d12i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +double ABI_ATTR d_d13i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +/* small structure return tests */ +Size1 ABI_ATTR S1_v (void) +{ + fprintf(out,"Size1 f(void):"); + fflush(out); + return Size1_1; +} +Size2 ABI_ATTR S2_v (void) +{ + fprintf(out,"Size2 f(void):"); + fflush(out); + return Size2_1; +} +Size3 ABI_ATTR S3_v (void) +{ + fprintf(out,"Size3 f(void):"); + fflush(out); + return Size3_1; +} +Size4 ABI_ATTR S4_v (void) +{ + fprintf(out,"Size4 f(void):"); + fflush(out); + return Size4_1; +} +Size7 ABI_ATTR S7_v (void) +{ + fprintf(out,"Size7 f(void):"); + fflush(out); + return Size7_1; +} +Size8 ABI_ATTR S8_v (void) +{ + fprintf(out,"Size8 f(void):"); + fflush(out); + return Size8_1; +} +Size12 ABI_ATTR S12_v (void) +{ + fprintf(out,"Size12 f(void):"); + fflush(out); + return Size12_1; +} +Size15 ABI_ATTR S15_v (void) +{ + fprintf(out,"Size15 f(void):"); + fflush(out); + return Size15_1; +} +Size16 ABI_ATTR S16_v (void) +{ + fprintf(out,"Size16 f(void):"); + fflush(out); + return Size16_1; +} + +/* structure tests */ +Int ABI_ATTR I_III (Int a, Int b, Int c) +{ + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + return r; +} +Char ABI_ATTR C_CdC (Char a, double b, Char c) +{ + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + return r; +} +Float ABI_ATTR F_Ffd (Float a, float b, double c) +{ + Float r; + r.x = (float) (a.x + b + c); + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +Double ABI_ATTR D_fDd (float a, Double b, double c) +{ + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + return r; +} +Double ABI_ATTR D_Dfd (Double a, float b, double c) +{ + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +J ABI_ATTR J_JiJ (J a, int b, J c) +{ + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + return r; +} +T ABI_ATTR T_TcT (T a, char b, T c) +{ + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + return r; +} +X ABI_ATTR X_BcdB (B a, char b, double c, B d) +{ + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + return r; +} + +/* Test for cases where some argument (especially structure, 'long long', or + 'double') may be passed partially in general-purpose argument registers + and partially on the stack. Different ABIs pass between 4 and 8 arguments + (or none) in general-purpose argument registers. */ + +long ABI_ATTR l_l0K (K b, long c) +{ + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l1K (long a1, K b, long c) +{ + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l2K (long a1, long a2, K b, long c) +{ + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l3K (long a1, long a2, long a3, K b, long c) +{ + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l4K (long a1, long a2, long a3, long a4, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l5K (long a1, long a2, long a3, long a4, long a5, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l6K (long a1, long a2, long a3, long a4, long a5, long a6, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +/* These tests is crafted on the knowledge that for all known ABIs: + * 17 > number of floating-point argument registers, + * 3 < number of general-purpose argument registers < 3 + 6. */ +float ABI_ATTR f_f17l3L (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p, float q, + long s, long t, long u, L z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} +double ABI_ATTR d_d17l3L (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p, double q, + long s, long t, long u, L z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} + +long long ABI_ATTR ll_l2ll (long a1, long a2, long long b, long c) +{ + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l3ll (long a1, long a2, long a3, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l4ll (long a1, long a2, long a3, long a4, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l5ll (long a1, long a2, long a3, long a4, long a5, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l6ll (long a1, long a2, long a3, long a4, long a5, long a6, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l7ll (long a1, long a2, long a3, long a4, long a5, long a6, long a7, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} + +double ABI_ATTR d_l2d (long a1, long a2, double b, long c) +{ + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l3d (long a1, long a2, long a3, double b, long c) +{ + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l4d (long a1, long a2, long a3, long a4, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l5d (long a1, long a2, long a3, long a4, long a5, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l6d (long a1, long a2, long a3, long a4, long a5, long a6, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l7d (long a1, long a2, long a3, long a4, long a5, long a6, long a7, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + return r; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c new file mode 100644 index 0000000..d25ebf4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c @@ -0,0 +1,743 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* This file defines test functions of selected signatures, that exercise + dark corners of the various ABIs. */ + +#include + +FILE* out; + +#define uchar unsigned char +#define ushort unsigned short +#define uint unsigned int +#define ulong unsigned long + +typedef struct { char x; } Char; +typedef struct { short x; } Short; +typedef struct { int x; } Int; +typedef struct { long x; } Long; +typedef struct { float x; } Float; +typedef struct { double x; } Double; +typedef struct { char c; float f; } A; +typedef struct { double d; int i[3]; } B; +typedef struct { long l1; long l2; } J; +typedef struct { long l1; long l2; long l3; long l4; } K; +typedef struct { long l1; long l2; long l3; long l4; long l5; long l6; } L; +typedef struct { char x1; } Size1; +typedef struct { char x1; char x2; } Size2; +typedef struct { char x1; char x2; char x3; } Size3; +typedef struct { char x1; char x2; char x3; char x4; } Size4; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; +} Size7; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; +} Size8; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; +} Size12; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; +} Size15; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; char x16; +} Size16; +typedef struct { char c[3]; } T; +typedef struct { char c[33],c1; } X; + +char c1='a', c2=127, c3=(char)128, c4=(char)255, c5=-1; +short s1=32767, s2=(short)32768, s3=3, s4=4, s5=5, s6=6, s7=7, s8=8, s9=9; +int i1=1, i2=2, i3=3, i4=4, i5=5, i6=6, i7=7, i8=8, i9=9, + i10=11, i11=12, i12=13, i13=14, i14=15, i15=16, i16=17; +long l1=1, l2=2, l3=3, l4=4, l5=5, l6=6, l7=7, l8=8, l9=9; +long long ll1 = 3875056143130689530LL; +float f1=0.1f, f2=0.2f, f3=0.3f, f4=0.4f, f5=0.5f, f6=0.6f, f7=0.7f, f8=0.8f, f9=0.9f, + f10=1.1f, f11=1.2f, f12=1.3f, f13=1.4f, f14=1.5f, f15=1.6f, f16=1.7f, f17=1.8f, + f18=1.9f, f19=2.1f, f20=2.2f, f21=2.3f, f22=2.4f, f23=2.5f, f24=2.6f; +double d1=0.1, d2=0.2, d3=0.3, d4=0.4, d5=0.5, d6=0.6, d7=0.7, d8=0.8, d9=0.9, + d10=1.1, d11=1.2, d12=1.3, d13=1.4, d14=1.5, d15=1.6, d16=1.7, d17=1.8; + +uchar uc1='a', uc2=127, uc3=128, uc4=255, uc5=(uchar)-1; +ushort us1=1, us2=2, us3=3, us4=4, us5=5, us6=6, us7=7, us8=8, us9=9; +uint ui1=1, ui2=2, ui3=3, ui4=4, ui5=5, ui6=6, ui7=7, ui8=8, ui9=9; +ulong ul1=1, ul2=2, ul3=3, ul4=4, ul5=5, ul6=6, ul7=7, ul8=8, ul9=9; + +char *str1="hello",str2[]="goodbye",*str3="still here?"; +Char C1={'A'}, C2={'B'}, C3={'C'}, C4={'\377'}, C5={(char)(-1)}; +Short S1={1}, S2={2}, S3={3}, S4={4}, S5={5}, S6={6}, S7={7}, S8={8}, S9={9}; +Int I1={1}, I2={2}, I3={3}, I4={4}, I5={5}, I6={6}, I7={7}, I8={8}, I9={9}; +Float F1={0.1f}, F2={0.2f}, F3={0.3f}, F4={0.4f}, F5={0.5f}, F6={0.6f}, F7={0.7f}, F8={0.8f}, F9={0.9f}; +Double D1={0.1}, D2={0.2}, D3={0.3}, D4={0.4}, D5={0.5}, D6={0.6}, D7={0.7}, D8={0.8}, D9={0.9}; + +A A1={'a',0.1f},A2={'b',0.2f},A3={'\377',0.3f}; +B B1={0.1,{1,2,3}},B2={0.2,{5,4,3}}; +J J1={47,11},J2={73,55}; +K K1={19,69,12,28}; +L L1={561,1105,1729,2465,2821,6601}; /* A002997 */ +Size1 Size1_1={'a'}; +Size2 Size2_1={'a','b'}; +Size3 Size3_1={'a','b','c'}; +Size4 Size4_1={'a','b','c','d'}; +Size7 Size7_1={'a','b','c','d','e','f','g'}; +Size8 Size8_1={'a','b','c','d','e','f','g','h'}; +Size12 Size12_1={'a','b','c','d','e','f','g','h','i','j','k','l'}; +Size15 Size15_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o'}; +Size16 Size16_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'}; +T T1={{'t','h','e'}},T2={{'f','o','x'}}; +X X1={"abcdefghijklmnopqrstuvwxyzABCDEF",'G'}, X2={"123",'9'}, X3={"return-return-return",'R'}; + +#if defined(__GNUC__) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_ATTR +#define ABI_ATTR +#endif + +/* void tests */ +void ABI_ATTR v_v (void) +{ + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +int ABI_ATTR i_v (void) +{ + int r=99; + fprintf(out,"int f(void):"); + fflush(out); + return r; +} +int ABI_ATTR i_i (int a) +{ + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + return r; +} +int ABI_ATTR i_i2 (int a, int b) +{ + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + return r; +} +int ABI_ATTR i_i4 (int a, int b, int c, int d) +{ + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + return r; +} +int ABI_ATTR i_i8 (int a, int b, int c, int d, int e, int f, int g, int h) +{ + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +int ABI_ATTR i_i16 (int a, int b, int c, int d, int e, int f, int g, int h, + int i, int j, int k, int l, int m, int n, int o, int p) +{ + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* float tests */ +float ABI_ATTR f_f (float a) +{ + float r=a+1.0f; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + return r; +} +float ABI_ATTR f_f2 (float a, float b) +{ + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + return r; +} +float ABI_ATTR f_f4 (float a, float b, float c, float d) +{ + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +float ABI_ATTR f_f8 (float a, float b, float c, float d, float e, float f, + float g, float h) +{ + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +float ABI_ATTR f_f16 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} +float ABI_ATTR f_f24 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p, + float q, float s, float t, float u, float v, float w, float x, float y) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + return r; +} + +/* double tests */ +double ABI_ATTR d_d (double a) +{ + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + return r; +} +double ABI_ATTR d_d2 (double a, double b) +{ + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + return r; +} +double ABI_ATTR d_d4 (double a, double b, double c, double d) +{ + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_d8 (double a, double b, double c, double d, double e, double f, + double g, double h) +{ + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +double ABI_ATTR d_d16 (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p) +{ + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* pointer tests */ +void* ABI_ATTR vp_vpdpcpsp (void* a, double* b, char* c, Int* d) +{ + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + return ret; +} + +/* mixed number tests */ +uchar ABI_ATTR uc_ucsil (uchar a, ushort b, uint c, ulong d) +{ + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iidd (int a, int b, double c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iiidi (int a, int b, int c, double d, int e) +{ + double r = a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + return r; +} +double ABI_ATTR d_idid (int a, double b, int c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_fdi (float a, double b, int c) +{ + double r = a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + return r; +} +ushort ABI_ATTR us_cdcd (char a, double b, char c, double d) +{ + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + return r; +} + +long long ABI_ATTR ll_iiilli (int a, int b, int c, long long d, int e) +{ + long long r = (long long)(int)a+(long long)(int)b+(long long)(int)c+d+(long long)(int)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + return r; +} +long long ABI_ATTR ll_flli (float a, long long b, int c) +{ + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + return r; +} + +float ABI_ATTR f_fi (float a, int z) +{ + float r = a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + return r; +} +float ABI_ATTR f_f2i (float a, float b, int z) +{ + float r = a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +float ABI_ATTR f_f3i (float a, float b, float c, int z) +{ + float r = a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +float ABI_ATTR f_f4i (float a, float b, float c, float d, int z) +{ + float r = a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +float ABI_ATTR f_f7i (float a, float b, float c, float d, float e, float f, float g, + int z) +{ + float r = a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +float ABI_ATTR f_f8i (float a, float b, float c, float d, float e, float f, float g, + float h, int z) +{ + float r = a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +float ABI_ATTR f_f12i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +float ABI_ATTR f_f13i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +double ABI_ATTR d_di (double a, int z) +{ + double r = a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + return r; +} +double ABI_ATTR d_d2i (double a, double b, int z) +{ + double r = a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +double ABI_ATTR d_d3i (double a, double b, double c, int z) +{ + double r = a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +double ABI_ATTR d_d4i (double a, double b, double c, double d, int z) +{ + double r = a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +double ABI_ATTR d_d7i (double a, double b, double c, double d, double e, double f, + double g, int z) +{ + double r = a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +double ABI_ATTR d_d8i (double a, double b, double c, double d, double e, double f, + double g, double h, int z) +{ + double r = a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +double ABI_ATTR d_d12i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +double ABI_ATTR d_d13i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +/* small structure return tests */ +Size1 ABI_ATTR S1_v (void) +{ + fprintf(out,"Size1 f(void):"); + fflush(out); + return Size1_1; +} +Size2 ABI_ATTR S2_v (void) +{ + fprintf(out,"Size2 f(void):"); + fflush(out); + return Size2_1; +} +Size3 ABI_ATTR S3_v (void) +{ + fprintf(out,"Size3 f(void):"); + fflush(out); + return Size3_1; +} +Size4 ABI_ATTR S4_v (void) +{ + fprintf(out,"Size4 f(void):"); + fflush(out); + return Size4_1; +} +Size7 ABI_ATTR S7_v (void) +{ + fprintf(out,"Size7 f(void):"); + fflush(out); + return Size7_1; +} +Size8 ABI_ATTR S8_v (void) +{ + fprintf(out,"Size8 f(void):"); + fflush(out); + return Size8_1; +} +Size12 ABI_ATTR S12_v (void) +{ + fprintf(out,"Size12 f(void):"); + fflush(out); + return Size12_1; +} +Size15 ABI_ATTR S15_v (void) +{ + fprintf(out,"Size15 f(void):"); + fflush(out); + return Size15_1; +} +Size16 ABI_ATTR S16_v (void) +{ + fprintf(out,"Size16 f(void):"); + fflush(out); + return Size16_1; +} + +/* structure tests */ +Int ABI_ATTR I_III (Int a, Int b, Int c) +{ + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + return r; +} +Char ABI_ATTR C_CdC (Char a, double b, Char c) +{ + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + return r; +} +Float ABI_ATTR F_Ffd (Float a, float b, double c) +{ + Float r; + r.x = (float) (a.x + b + c); + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +Double ABI_ATTR D_fDd (float a, Double b, double c) +{ + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + return r; +} +Double ABI_ATTR D_Dfd (Double a, float b, double c) +{ + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +J ABI_ATTR J_JiJ (J a, int b, J c) +{ + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + return r; +} +T ABI_ATTR T_TcT (T a, char b, T c) +{ + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + return r; +} +X ABI_ATTR X_BcdB (B a, char b, double c, B d) +{ + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + return r; +} + +/* Test for cases where some argument (especially structure, 'long long', or + 'double') may be passed partially in general-purpose argument registers + and partially on the stack. Different ABIs pass between 4 and 8 arguments + (or none) in general-purpose argument registers. */ + +long ABI_ATTR l_l0K (K b, long c) +{ + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l1K (long a1, K b, long c) +{ + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l2K (long a1, long a2, K b, long c) +{ + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l3K (long a1, long a2, long a3, K b, long c) +{ + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l4K (long a1, long a2, long a3, long a4, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l5K (long a1, long a2, long a3, long a4, long a5, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l6K (long a1, long a2, long a3, long a4, long a5, long a6, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +/* These tests is crafted on the knowledge that for all known ABIs: + * 17 > number of floating-point argument registers, + * 3 < number of general-purpose argument registers < 3 + 6. */ +float ABI_ATTR f_f17l3L (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p, float q, + long s, long t, long u, L z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} +double ABI_ATTR d_d17l3L (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p, double q, + long s, long t, long u, L z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} + +long long ABI_ATTR ll_l2ll (long a1, long a2, long long b, long c) +{ + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l3ll (long a1, long a2, long a3, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l4ll (long a1, long a2, long a3, long a4, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l5ll (long a1, long a2, long a3, long a4, long a5, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l6ll (long a1, long a2, long a3, long a4, long a5, long a6, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l7ll (long a1, long a2, long a3, long a4, long a5, long a6, long a7, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} + +double ABI_ATTR d_l2d (long a1, long a2, double b, long c) +{ + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l3d (long a1, long a2, long a3, double b, long c) +{ + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l4d (long a1, long a2, long a3, long a4, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l5d (long a1, long a2, long a3, long a4, long a5, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l6d (long a1, long a2, long a3, long a4, long a5, long a6, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l7d (long a1, long a2, long a3, long a4, long a5, long a6, long a7, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + return r; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed 2.c new file mode 100644 index 0000000..5d4959c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed 2.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]); + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c new file mode 100644 index 0000000..5d4959c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]); + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall 2.c new file mode 100644 index 0000000..5e5cb86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall 2.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]);; + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c new file mode 100644 index 0000000..5e5cb86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]);; + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call 2.exp new file mode 100644 index 0000000..13ba2bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call 2.exp @@ -0,0 +1,54 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +run-many-tests $tlist $additional_options + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + run-many-tests $tlist $additional_options +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp new file mode 100644 index 0000000..13ba2bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp @@ -0,0 +1,54 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +run-many-tests $tlist $additional_options + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + run-many-tests $tlist $additional_options +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef 2.c new file mode 100644 index 0000000..bf60161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef 2.c @@ -0,0 +1,26 @@ +/* Area: ffi_prep_cif + Purpose: Test error return for bad typedefs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +int main (void) +{ + ffi_cif cif; + ffi_type* arg_types[1]; + + ffi_type badType = ffi_type_void; + + arg_types[0] = NULL; + + badType.size = 0; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &badType, + arg_types) == FFI_BAD_TYPEDEF); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c new file mode 100644 index 0000000..bf60161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c @@ -0,0 +1,26 @@ +/* Area: ffi_prep_cif + Purpose: Test error return for bad typedefs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +int main (void) +{ + ffi_cif cif; + ffi_type* arg_types[1]; + + ffi_type badType = ffi_type_void; + + arg_types[0] = NULL; + + badType.size = 0; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &badType, + arg_types) == FFI_BAD_TYPEDEF); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest 2.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest 2.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float 2.c new file mode 100644 index 0000000..fbc272d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float 2.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int floating(int a, float b, double c, long double d) +{ + int i; + + i = (int) ((float)a/b + ((float)c/(float)d)); + + return i; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + float f; + signed int si1; + double d; + long double ld; + + args[0] = &ffi_type_sint; + values[0] = &si1; + args[1] = &ffi_type_float; + values[1] = &f; + args[2] = &ffi_type_double; + values[2] = &d; + args[3] = &ffi_type_longdouble; + values[3] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + si1 = 6; + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating (si1, f, d, ld); + + ffi_call(&cif, FFI_FN(floating), &rint, values); + + printf ("%d vs %d\n", (int)rint, floating (si1, f, d, ld)); + + CHECK((int)rint == floating(si1, f, d, ld)); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c new file mode 100644 index 0000000..fbc272d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int floating(int a, float b, double c, long double d) +{ + int i; + + i = (int) ((float)a/b + ((float)c/(float)d)); + + return i; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + float f; + signed int si1; + double d; + long double ld; + + args[0] = &ffi_type_sint; + values[0] = &si1; + args[1] = &ffi_type_float; + values[1] = &f; + args[2] = &ffi_type_double; + values[2] = &d; + args[3] = &ffi_type_longdouble; + values[3] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + si1 = 6; + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating (si1, f, d, ld); + + ffi_call(&cif, FFI_FN(floating), &rint, values); + + printf ("%d vs %d\n", (int)rint, floating (si1, f, d, ld)); + + CHECK((int)rint == floating(si1, f, d, ld)); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1 2.c new file mode 100644 index 0000000..c48493c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1 2.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +#include "float.h" + +#include + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(float f) +{ + return f/3.0; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* These are not always the same!! Check for a reasonable delta */ + + CHECK(fabs(result[0].d - dblit(f)) < DBL_EPSILON); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c new file mode 100644 index 0000000..c48493c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +#include "float.h" + +#include + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(float f) +{ + return f/3.0; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* These are not always the same!! Check for a reasonable delta */ + + CHECK(fabs(result[0].d - dblit(f)) < DBL_EPSILON); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2 2.c new file mode 100644 index 0000000..57cd9e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2 2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static long double ldblit(float f) +{ + return (long double) (((long double) f)/ (long double) 3.0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + long double ld; + long double original; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + f = 3.14159; + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf\n", ldblit(f)); +#endif + + ld = 666; + ffi_call(&cif, FFI_FN(ldblit), &ld, values); + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf, %Lf, %Lf, %Lf\n", ld, ldblit(f), ld - ldblit(f), LDBL_EPSILON); +#endif + + /* These are not always the same!! Check for a reasonable delta */ + original = ldblit(f); + if (((ld > original) ? (ld - original) : (original - ld)) < LDBL_EPSILON) + puts("long double return value tests ok!"); + else + CHECK(0); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c new file mode 100644 index 0000000..57cd9e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static long double ldblit(float f) +{ + return (long double) (((long double) f)/ (long double) 3.0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + long double ld; + long double original; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + f = 3.14159; + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf\n", ldblit(f)); +#endif + + ld = 666; + ffi_call(&cif, FFI_FN(ldblit), &ld, values); + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf, %Lf, %Lf, %Lf\n", ld, ldblit(f), ld - ldblit(f), LDBL_EPSILON); +#endif + + /* These are not always the same!! Check for a reasonable delta */ + original = ldblit(f); + if (((ld > original) ? (ld - original) : (original - ld)) < LDBL_EPSILON) + puts("long double return value tests ok!"); + else + CHECK(0); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3 2.c new file mode 100644 index 0000000..bab3206 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check float arguments with different orders. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static double floating_1(float a, double b, long double c) +{ + return (double) a + b + (double) c; +} + +static double floating_2(long double a, double b, float c) +{ + return (double) a + b + (double) c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double rd; + + float f; + double d; + long double ld; + + args[0] = &ffi_type_float; + values[0] = &f; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_longdouble; + values[2] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating_1 (f, d, ld); + + ffi_call(&cif, FFI_FN(floating_1), &rd, values); + + CHECK(fabs(rd - floating_1(f, d, ld)) < DBL_EPSILON); + + args[0] = &ffi_type_longdouble; + values[0] = &ld; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_float; + values[2] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + floating_2 (ld, d, f); + + ffi_call(&cif, FFI_FN(floating_2), &rd, values); + + CHECK(fabs(rd - floating_2(ld, d, f)) < DBL_EPSILON); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c new file mode 100644 index 0000000..bab3206 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check float arguments with different orders. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static double floating_1(float a, double b, long double c) +{ + return (double) a + b + (double) c; +} + +static double floating_2(long double a, double b, float c) +{ + return (double) a + b + (double) c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double rd; + + float f; + double d; + long double ld; + + args[0] = &ffi_type_float; + values[0] = &f; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_longdouble; + values[2] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating_1 (f, d, ld); + + ffi_call(&cif, FFI_FN(floating_1), &rd, values); + + CHECK(fabs(rd - floating_1(f, d, ld)) < DBL_EPSILON); + + args[0] = &ffi_type_longdouble; + values[0] = &ld; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_float; + values[2] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + floating_2 (ld, d, f); + + ffi_call(&cif, FFI_FN(floating_2), &rd, values); + + CHECK(fabs(rd - floating_2(ld, d, f)) < DBL_EPSILON); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4 2.c new file mode 100644 index 0000000..0dd6d85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4 2.c @@ -0,0 +1,62 @@ +/* Area: ffi_call + Purpose: Check denorm double value. + Limitations: none. + PR: PR26483. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +/* { dg-options "-mieee" { target alpha*-*-* } } */ + +#include "ffitest.h" +#include "float.h" + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(double d) +{ + return d; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double d; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_double; + values[0] = &d; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + d = DBL_MIN / 2; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* The standard delta check doesn't work for denorms. Since we didn't do + any arithmetic, we should get the original result back, and hence an + exact check should be OK here. */ + + CHECK(result[0].d == dblit(d)); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c new file mode 100644 index 0000000..0dd6d85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c @@ -0,0 +1,62 @@ +/* Area: ffi_call + Purpose: Check denorm double value. + Limitations: none. + PR: PR26483. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +/* { dg-options "-mieee" { target alpha*-*-* } } */ + +#include "ffitest.h" +#include "float.h" + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(double d) +{ + return d; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double d; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_double; + values[0] = &d; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + d = DBL_MIN / 2; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* The standard delta check doesn't work for denorms. Since we didn't do + any arithmetic, we should get the original result back, and hence an + exact check should be OK here. */ + + CHECK(result[0].d == dblit(d)); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va 2.c new file mode 100644 index 0000000..5acff91 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va 2.c @@ -0,0 +1,107 @@ +/* Area: fp and variadics + Purpose: check fp inputs and returns work on variadics, even the fixed params + Limitations: None + PR: none + Originator: 2011-01-25 + + Intended to stress the difference in ABI on ARM vfp +*/ + +/* { dg-do run } */ + +#include + +#include "ffitest.h" + +/* prints out all the parameters, and returns the sum of them all. + * 'x' is the number of variadic parameters all of which are double in this test + */ +double float_va_fn(unsigned int x, double y,...) +{ + double total=0.0; + va_list ap; + unsigned int i; + + total+=(double)x; + total+=y; + + printf("%u: %.1f :", x, y); + + va_start(ap, y); + for(i=0;i 2011-01-25 + + Intended to stress the difference in ABI on ARM vfp +*/ + +/* { dg-do run } */ + +#include + +#include "ffitest.h" + +/* prints out all the parameters, and returns the sum of them all. + * 'x' is the number of variadic parameters all of which are double in this test + */ +double float_va_fn(unsigned int x, double y,...) +{ + double total=0.0; + va_list ap; + unsigned int i; + + total+=(double)x; + total+=y; + + printf("%u: %.1f :", x, y); + + va_start(ap, y); + for(i=0;i +#include +#include + +static float ABI_ATTR many(float f1, float f2, float f3, float f4, float f5, float f6, float f7, float f8, float f9, float f10, float f11, float f12, float f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + float fa[13]; + float f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_float; + values[i] = &fa[i]; + fa[i] = (float) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 13, + &ffi_type_float, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many.c new file mode 100644 index 0000000..336968c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Check return value float, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static float ABI_ATTR many(float f1, float f2, float f3, float f4, float f5, float f6, float f7, float f8, float f9, float f10, float f11, float f12, float f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + float fa[13]; + float f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_float; + values[i] = &fa[i]; + fa[i] = (float) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 13, + &ffi_type_float, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2 2.c new file mode 100644 index 0000000..1c85746 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2 2.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check uint8_t arguments. + Limitations: none. + PR: PR45677. + Originator: Dan Witte 20100916 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +#define NARGS 7 + +typedef unsigned char u8; + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +uint8_t +foo (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return a + b + c + d + e + f + g; +} + +uint8_t ABI_ATTR +bar (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return foo (a, b, c, d, e, f, g); +} + +int +main (void) +{ + ffi_type *ffitypes[NARGS]; + int i; + ffi_cif cif; + ffi_arg result = 0; + uint8_t args[NARGS]; + void *argptrs[NARGS]; + + for (i = 0; i < NARGS; ++i) + ffitypes[i] = &ffi_type_uint8; + + CHECK (ffi_prep_cif (&cif, ABI_NUM, NARGS, + &ffi_type_uint8, ffitypes) == FFI_OK); + + for (i = 0; i < NARGS; ++i) + { + args[i] = i; + argptrs[i] = &args[i]; + } + ffi_call (&cif, FFI_FN (bar), &result, argptrs); + + CHECK (result == 21); + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c new file mode 100644 index 0000000..1c85746 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check uint8_t arguments. + Limitations: none. + PR: PR45677. + Originator: Dan Witte 20100916 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +#define NARGS 7 + +typedef unsigned char u8; + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +uint8_t +foo (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return a + b + c + d + e + f + g; +} + +uint8_t ABI_ATTR +bar (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return foo (a, b, c, d, e, f, g); +} + +int +main (void) +{ + ffi_type *ffitypes[NARGS]; + int i; + ffi_cif cif; + ffi_arg result = 0; + uint8_t args[NARGS]; + void *argptrs[NARGS]; + + for (i = 0; i < NARGS; ++i) + ffitypes[i] = &ffi_type_uint8; + + CHECK (ffi_prep_cif (&cif, ABI_NUM, NARGS, + &ffi_type_uint8, ffitypes) == FFI_OK); + + for (i = 0; i < NARGS; ++i) + { + args[i] = i; + argptrs[i] = &args[i]; + } + ffi_call (&cif, FFI_FN (bar), &result, argptrs); + + CHECK (result == 21); + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double 2.c new file mode 100644 index 0000000..4ef8c8a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double 2.c @@ -0,0 +1,70 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + double f3, + double f4, + double f5, + double f6, + double f7, + double f8, + double f9, + double f10, + double f11, + double f12, + double f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return ((f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + double fa[13]; + double f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_double; + values[i] = &fa[i]; + fa[i] = (double) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c new file mode 100644 index 0000000..4ef8c8a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c @@ -0,0 +1,70 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + double f3, + double f4, + double f5, + double f6, + double f7, + double f8, + double f9, + double f10, + double f11, + double f12, + double f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return ((f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + double fa[13]; + double f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_double; + values[i] = &fa[i]; + fa[i] = (double) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed 2.c new file mode 100644 index 0000000..85ec36e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed 2.c @@ -0,0 +1,78 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + long int i1, + double f3, + double f4, + long int i2, + double f5, + double f6, + long int i3, + double f7, + double f8, + long int i4, + double f9, + double f10, + long int i5, + double f11, + double f12, + long int i6, + double f13) +{ + return ((double) (i1 + i2 + i3 + i4 + i5 + i6) + (f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[19]; + void *values[19]; + double fa[19]; + long int la[19]; + double f, ff; + int i; + + for (i = 0; i < 19; i++) + { + if( (i - 2) % 3 == 0) { + args[i] = &ffi_type_slong; + la[i] = (long int) i; + values[i] = &la[i]; + } + else { + args[i] = &ffi_type_double; + fa[i] = (double) i; + values[i] = &fa[i]; + } + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 19, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], la[2], + fa[3], fa[4], la[5], + fa[6], fa[7], la[8], + fa[9], fa[10], la[11], + fa[12], fa[13], la[14], + fa[15], fa[16], la[17], + fa[18]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c new file mode 100644 index 0000000..85ec36e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c @@ -0,0 +1,78 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + long int i1, + double f3, + double f4, + long int i2, + double f5, + double f6, + long int i3, + double f7, + double f8, + long int i4, + double f9, + double f10, + long int i5, + double f11, + double f12, + long int i6, + double f13) +{ + return ((double) (i1 + i2 + i3 + i4 + i5 + i6) + (f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[19]; + void *values[19]; + double fa[19]; + long int la[19]; + double f, ff; + int i; + + for (i = 0; i < 19; i++) + { + if( (i - 2) % 3 == 0) { + args[i] = &ffi_type_slong; + la[i] = (long int) i; + values[i] = &la[i]; + } + else { + args[i] = &ffi_type_double; + fa[i] = (double) i; + values[i] = &fa[i]; + } + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 19, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], la[2], + fa[3], fa[4], la[5], + fa[6], fa[7], la[8], + fa[9], fa[10], la[11], + fa[12], fa[13], la[14], + fa[15], fa[16], la[17], + fa[18]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint 2.c new file mode 100644 index 0000000..6e2f26f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint 2.c @@ -0,0 +1,52 @@ +/* Area: ffi_call + Purpose: Check that negative integers are passed correctly. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int checking(int a, short b, signed char c) +{ + + return (a < 0 && b < 0 && c < 0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + + checking (si, ss, sc); + + ffi_call(&cif, FFI_FN(checking), &rint, values); + + printf ("%d vs %d\n", (int)rint, checking (si, ss, sc)); + + CHECK(rint != 0); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c new file mode 100644 index 0000000..6e2f26f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c @@ -0,0 +1,52 @@ +/* Area: ffi_call + Purpose: Check that negative integers are passed correctly. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int checking(int a, short b, signed char c) +{ + + return (a < 0 && b < 0 && c < 0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + + checking (si, ss, sc); + + ffi_call(&cif, FFI_FN(checking), &rint, values); + + printf ("%d vs %d\n", (int)rint, checking (si, ss, sc)); + + CHECK(rint != 0); + + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets 2.c new file mode 100644 index 0000000..23d88b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets 2.c @@ -0,0 +1,46 @@ +/* Area: Struct layout + Purpose: Test ffi_get_struct_offsets + Limitations: none. + PR: none. + Originator: Tom Tromey. */ + +/* { dg-do run } */ +#include "ffitest.h" +#include + +struct test_1 +{ + char c; + float f; + char c2; + int i; +}; + +int +main (void) +{ + ffi_type test_1_type; + ffi_type *test_1_elements[5]; + size_t test_1_offsets[4]; + + test_1_elements[0] = &ffi_type_schar; + test_1_elements[1] = &ffi_type_float; + test_1_elements[2] = &ffi_type_schar; + test_1_elements[3] = &ffi_type_sint; + test_1_elements[4] = NULL; + + test_1_type.size = 0; + test_1_type.alignment = 0; + test_1_type.type = FFI_TYPE_STRUCT; + test_1_type.elements = test_1_elements; + + CHECK (ffi_get_struct_offsets (FFI_DEFAULT_ABI, &test_1_type, test_1_offsets) + == FFI_OK); + CHECK (test_1_type.size == sizeof (struct test_1)); + CHECK (offsetof (struct test_1, c) == test_1_offsets[0]); + CHECK (offsetof (struct test_1, f) == test_1_offsets[1]); + CHECK (offsetof (struct test_1, c2) == test_1_offsets[2]); + CHECK (offsetof (struct test_1, i) == test_1_offsets[3]); + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c new file mode 100644 index 0000000..23d88b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c @@ -0,0 +1,46 @@ +/* Area: Struct layout + Purpose: Test ffi_get_struct_offsets + Limitations: none. + PR: none. + Originator: Tom Tromey. */ + +/* { dg-do run } */ +#include "ffitest.h" +#include + +struct test_1 +{ + char c; + float f; + char c2; + int i; +}; + +int +main (void) +{ + ffi_type test_1_type; + ffi_type *test_1_elements[5]; + size_t test_1_offsets[4]; + + test_1_elements[0] = &ffi_type_schar; + test_1_elements[1] = &ffi_type_float; + test_1_elements[2] = &ffi_type_schar; + test_1_elements[3] = &ffi_type_sint; + test_1_elements[4] = NULL; + + test_1_type.size = 0; + test_1_type.alignment = 0; + test_1_type.type = FFI_TYPE_STRUCT; + test_1_type.elements = test_1_elements; + + CHECK (ffi_get_struct_offsets (FFI_DEFAULT_ABI, &test_1_type, test_1_offsets) + == FFI_OK); + CHECK (test_1_type.size == sizeof (struct test_1)); + CHECK (offsetof (struct test_1, c) == test_1_offsets[0]); + CHECK (offsetof (struct test_1, f) == test_1_offsets[1]); + CHECK (offsetof (struct test_1, c2) == test_1_offsets[2]); + CHECK (offsetof (struct test_1, i) == test_1_offsets[3]); + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638 2.c new file mode 100644 index 0000000..7da1621 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638 2.c @@ -0,0 +1,127 @@ +/* Area: ffi_call + Purpose: Reproduce bug found in python ctypes + Limitations: none. + PR: Fedora 1174037 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + +static RECT ABI_ATTR pr_test(int i __UNUSED__, RECT ar __UNUSED__, + RECT* br __UNUSED__, POINT cp __UNUSED__, + RECT dr __UNUSED__, RECT *er __UNUSED__, + POINT fp, RECT gr __UNUSED__) +{ + RECT result; + + result.left = fp.x; + result.right = fp.y; + result.top = fp.x; + result.bottom = fp.y; + + return result; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type point_type, rect_type; + ffi_type *point_type_elements[3]; + ffi_type *rect_type_elements[5]; + + int i; + POINT cp, fp; + RECT ar, br, dr, er, gr; + RECT *p1, *p2; + + /* This is a hack to get a properly aligned result buffer */ + RECT *rect_result = + (RECT *) malloc (sizeof(RECT)); + + point_type.size = 0; + point_type.alignment = 0; + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = point_type_elements; + point_type_elements[0] = &ffi_type_slong; + point_type_elements[1] = &ffi_type_slong; + point_type_elements[2] = NULL; + + rect_type.size = 0; + rect_type.alignment = 0; + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = rect_type_elements; + rect_type_elements[0] = &ffi_type_slong; + rect_type_elements[1] = &ffi_type_slong; + rect_type_elements[2] = &ffi_type_slong; + rect_type_elements[3] = &ffi_type_slong; + rect_type_elements[4] = NULL; + + args[0] = &ffi_type_sint; + args[1] = &rect_type; + args[2] = &ffi_type_pointer; + args[3] = &point_type; + args[4] = &rect_type; + args[5] = &ffi_type_pointer; + args[6] = &point_type; + args[7] = &rect_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 8, &rect_type, args) == FFI_OK); + + i = 1; + ar.left = 2; + ar.right = 3; + ar.top = 4; + ar.bottom = 5; + br.left = 6; + br.right = 7; + br.top = 8; + br.bottom = 9; + cp.x = 10; + cp.y = 11; + dr.left = 12; + dr.right = 13; + dr.top = 14; + dr.bottom = 15; + er.left = 16; + er.right = 17; + er.top = 18; + er.bottom = 19; + fp.x = 20; + fp.y = 21; + gr.left = 22; + gr.right = 23; + gr.top = 24; + gr.bottom = 25; + + values[0] = &i; + values[1] = &ar; + p1 = &br; + values[2] = &p1; + values[3] = &cp; + values[4] = &dr; + p2 = &er; + values[5] = &p2; + values[6] = &fp; + values[7] = &gr; + + ffi_call (&cif, FFI_FN(pr_test), rect_result, values); + + CHECK(rect_result->top == 20); + + free (rect_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c new file mode 100644 index 0000000..7da1621 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c @@ -0,0 +1,127 @@ +/* Area: ffi_call + Purpose: Reproduce bug found in python ctypes + Limitations: none. + PR: Fedora 1174037 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + +static RECT ABI_ATTR pr_test(int i __UNUSED__, RECT ar __UNUSED__, + RECT* br __UNUSED__, POINT cp __UNUSED__, + RECT dr __UNUSED__, RECT *er __UNUSED__, + POINT fp, RECT gr __UNUSED__) +{ + RECT result; + + result.left = fp.x; + result.right = fp.y; + result.top = fp.x; + result.bottom = fp.y; + + return result; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type point_type, rect_type; + ffi_type *point_type_elements[3]; + ffi_type *rect_type_elements[5]; + + int i; + POINT cp, fp; + RECT ar, br, dr, er, gr; + RECT *p1, *p2; + + /* This is a hack to get a properly aligned result buffer */ + RECT *rect_result = + (RECT *) malloc (sizeof(RECT)); + + point_type.size = 0; + point_type.alignment = 0; + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = point_type_elements; + point_type_elements[0] = &ffi_type_slong; + point_type_elements[1] = &ffi_type_slong; + point_type_elements[2] = NULL; + + rect_type.size = 0; + rect_type.alignment = 0; + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = rect_type_elements; + rect_type_elements[0] = &ffi_type_slong; + rect_type_elements[1] = &ffi_type_slong; + rect_type_elements[2] = &ffi_type_slong; + rect_type_elements[3] = &ffi_type_slong; + rect_type_elements[4] = NULL; + + args[0] = &ffi_type_sint; + args[1] = &rect_type; + args[2] = &ffi_type_pointer; + args[3] = &point_type; + args[4] = &rect_type; + args[5] = &ffi_type_pointer; + args[6] = &point_type; + args[7] = &rect_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 8, &rect_type, args) == FFI_OK); + + i = 1; + ar.left = 2; + ar.right = 3; + ar.top = 4; + ar.bottom = 5; + br.left = 6; + br.right = 7; + br.top = 8; + br.bottom = 9; + cp.x = 10; + cp.y = 11; + dr.left = 12; + dr.right = 13; + dr.top = 14; + dr.bottom = 15; + er.left = 16; + er.right = 17; + er.top = 18; + er.bottom = 19; + fp.x = 20; + fp.y = 21; + gr.left = 22; + gr.right = 23; + gr.top = 24; + gr.bottom = 25; + + values[0] = &i; + values[1] = &ar; + p1 = &br; + values[2] = &p1; + values[3] = &cp; + values[4] = &dr; + p2 = &er; + values[5] = &p2; + values[6] = &fp; + values[7] = &gr; + + ffi_call (&cif, FFI_FN(pr_test), rect_result, values); + + CHECK(rect_result->top == 20); + + free (rect_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion 2.c new file mode 100644 index 0000000..4456161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion 2.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Promotion test. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static int promotion(signed char sc, signed short ss, + unsigned char uc, unsigned short us) +{ + int r = (int) sc + (int) ss + (int) uc + (int) us; + + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + unsigned char uc; + signed short ss; + unsigned short us; + unsigned long ul; + + args[0] = &ffi_type_schar; + args[1] = &ffi_type_sshort; + args[2] = &ffi_type_uchar; + args[3] = &ffi_type_ushort; + values[0] = ≻ + values[1] = &ss; + values[2] = &uc; + values[3] = &us; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + us = 0; + ul = 0; + + for (sc = (signed char) -127; + sc <= (signed char) 120; sc += 1) + for (ss = -30000; ss <= 30000; ss += 10000) + for (uc = (unsigned char) 0; + uc <= (unsigned char) 200; uc += 20) + for (us = 0; us <= 60000; us += 10000) + { + ul++; + ffi_call(&cif, FFI_FN(promotion), &rint, values); + CHECK((int)rint == (signed char) sc + (signed short) ss + + (unsigned char) uc + (unsigned short) us); + } + printf("%lu promotion tests run\n", ul); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c new file mode 100644 index 0000000..4456161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Promotion test. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static int promotion(signed char sc, signed short ss, + unsigned char uc, unsigned short us) +{ + int r = (int) sc + (int) ss + (int) uc + (int) us; + + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + unsigned char uc; + signed short ss; + unsigned short us; + unsigned long ul; + + args[0] = &ffi_type_schar; + args[1] = &ffi_type_sshort; + args[2] = &ffi_type_uchar; + args[3] = &ffi_type_ushort; + values[0] = ≻ + values[1] = &ss; + values[2] = &uc; + values[3] = &us; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + us = 0; + ul = 0; + + for (sc = (signed char) -127; + sc <= (signed char) 120; sc += 1) + for (ss = -30000; ss <= 30000; ss += 10000) + for (uc = (unsigned char) 0; + uc <= (unsigned char) 200; uc += 20) + for (us = 0; us <= 60000; us += 10000) + { + ul++; + ffi_call(&cif, FFI_FN(promotion), &rint, values); + CHECK((int)rint == (signed char) sc + (signed short) ss + + (unsigned char) uc + (unsigned short) us); + } + printf("%lu promotion tests run\n", ul); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc 2.c new file mode 100644 index 0000000..e29bd6c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc 2.c @@ -0,0 +1,114 @@ +/* Area: ffi_call + Purpose: Check different structures. + Limitations: none. + PR: none. + Originator: Ronald Oussoren 20030824 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct Point { + float x; + float y; +} Point; + +typedef struct Size { + float h; + float w; +} Size; + +typedef struct Rect { + Point o; + Size s; +} Rect; + +int doit(int o, char* s, Point p, Rect r, int last) +{ + printf("CALLED WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, s, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, last); + return 42; +} + + +int main(void) +{ + ffi_type point_type; + ffi_type size_type; + ffi_type rect_type; + ffi_cif cif; + ffi_type* arglist[6]; + void* values[6]; + int r; + + /* + * First set up FFI types for the 3 struct types + */ + + point_type.size = 0; /*sizeof(Point);*/ + point_type.alignment = 0; /*__alignof__(Point);*/ + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = malloc(3 * sizeof(ffi_type*)); + point_type.elements[0] = &ffi_type_float; + point_type.elements[1] = &ffi_type_float; + point_type.elements[2] = NULL; + + size_type.size = 0;/* sizeof(Size);*/ + size_type.alignment = 0;/* __alignof__(Size);*/ + size_type.type = FFI_TYPE_STRUCT; + size_type.elements = malloc(3 * sizeof(ffi_type*)); + size_type.elements[0] = &ffi_type_float; + size_type.elements[1] = &ffi_type_float; + size_type.elements[2] = NULL; + + rect_type.size = 0;/*sizeof(Rect);*/ + rect_type.alignment =0;/* __alignof__(Rect);*/ + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = malloc(3 * sizeof(ffi_type*)); + rect_type.elements[0] = &point_type; + rect_type.elements[1] = &size_type; + rect_type.elements[2] = NULL; + + /* + * Create a CIF + */ + arglist[0] = &ffi_type_sint; + arglist[1] = &ffi_type_pointer; + arglist[2] = &point_type; + arglist[3] = &rect_type; + arglist[4] = &ffi_type_sint; + arglist[5] = NULL; + + r = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 5, &ffi_type_sint, arglist); + if (r != FFI_OK) { + abort(); + } + + + /* And call the function through the CIF */ + + { + Point p = { 1.0, 2.0 }; + Rect r = { { 9.0, 10.0}, { -1.0, -2.0 } }; + int o = 0; + int l = 42; + char* m = "myMethod"; + ffi_arg result; + + values[0] = &o; + values[1] = &m; + values[2] = &p; + values[3] = &r; + values[4] = &l; + values[5] = NULL; + + printf("CALLING WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, m, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, l); + + ffi_call(&cif, FFI_FN(doit), &result, values); + + printf ("The result is %d\n", (int)result); + + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c new file mode 100644 index 0000000..e29bd6c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c @@ -0,0 +1,114 @@ +/* Area: ffi_call + Purpose: Check different structures. + Limitations: none. + PR: none. + Originator: Ronald Oussoren 20030824 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct Point { + float x; + float y; +} Point; + +typedef struct Size { + float h; + float w; +} Size; + +typedef struct Rect { + Point o; + Size s; +} Rect; + +int doit(int o, char* s, Point p, Rect r, int last) +{ + printf("CALLED WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, s, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, last); + return 42; +} + + +int main(void) +{ + ffi_type point_type; + ffi_type size_type; + ffi_type rect_type; + ffi_cif cif; + ffi_type* arglist[6]; + void* values[6]; + int r; + + /* + * First set up FFI types for the 3 struct types + */ + + point_type.size = 0; /*sizeof(Point);*/ + point_type.alignment = 0; /*__alignof__(Point);*/ + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = malloc(3 * sizeof(ffi_type*)); + point_type.elements[0] = &ffi_type_float; + point_type.elements[1] = &ffi_type_float; + point_type.elements[2] = NULL; + + size_type.size = 0;/* sizeof(Size);*/ + size_type.alignment = 0;/* __alignof__(Size);*/ + size_type.type = FFI_TYPE_STRUCT; + size_type.elements = malloc(3 * sizeof(ffi_type*)); + size_type.elements[0] = &ffi_type_float; + size_type.elements[1] = &ffi_type_float; + size_type.elements[2] = NULL; + + rect_type.size = 0;/*sizeof(Rect);*/ + rect_type.alignment =0;/* __alignof__(Rect);*/ + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = malloc(3 * sizeof(ffi_type*)); + rect_type.elements[0] = &point_type; + rect_type.elements[1] = &size_type; + rect_type.elements[2] = NULL; + + /* + * Create a CIF + */ + arglist[0] = &ffi_type_sint; + arglist[1] = &ffi_type_pointer; + arglist[2] = &point_type; + arglist[3] = &rect_type; + arglist[4] = &ffi_type_sint; + arglist[5] = NULL; + + r = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 5, &ffi_type_sint, arglist); + if (r != FFI_OK) { + abort(); + } + + + /* And call the function through the CIF */ + + { + Point p = { 1.0, 2.0 }; + Rect r = { { 9.0, 10.0}, { -1.0, -2.0 } }; + int o = 0; + int l = 42; + char* m = "myMethod"; + ffi_arg result; + + values[0] = &o; + values[1] = &m; + values[2] = &p; + values[3] = &r; + values[4] = &l; + values[5] = NULL; + + printf("CALLING WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, m, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, l); + + ffi_call(&cif, FFI_FN(doit), &result, values); + + printf ("The result is %d\n", (int)result); + + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl 2.c new file mode 100644 index 0000000..fd07e50 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl 2.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl) +{ + printf ("%f\n", dbl); + return 2 * dbl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl, rdbl; + + args[0] = &ffi_type_double; + values[0] = &dbl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + for (dbl = -127.3; dbl < 127; dbl++) + { + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl)); + CHECK(rdbl == 2 * dbl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c new file mode 100644 index 0000000..fd07e50 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl) +{ + printf ("%f\n", dbl); + return 2 * dbl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl, rdbl; + + args[0] = &ffi_type_double; + values[0] = &dbl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + for (dbl = -127.3; dbl < 127; dbl++) + { + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl)); + CHECK(rdbl == 2 * dbl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1 2.c new file mode 100644 index 0000000..0ea5d50 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1 2.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) +{ + return dbl1 + fl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl4, rdbl; + float fl2; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + fl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); + CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c new file mode 100644 index 0000000..0ea5d50 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) +{ + return dbl1 + fl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl4, rdbl; + float fl2; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + fl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); + CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2 2.c new file mode 100644 index 0000000..b3818f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2 2.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) +{ + return dbl1 + dbl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl2, dbl4, rdbl; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_double; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &dbl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + dbl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); + CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c new file mode 100644 index 0000000..b3818f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) +{ + return dbl1 + dbl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl2, dbl4, rdbl; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_double; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &dbl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + dbl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); + CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl 2.c new file mode 100644 index 0000000..fb8a09e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl 2.c @@ -0,0 +1,35 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl) +{ + return 2 * fl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl, rfl; + + args[0] = &ffi_type_float; + values[0] = &fl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, args) == FFI_OK); + + for (fl = -127.0; fl < 127; fl++) + { + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl)); + CHECK(rfl == 2 * fl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c new file mode 100644 index 0000000..fb8a09e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c @@ -0,0 +1,35 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl) +{ + return 2 * fl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl, rfl; + + args[0] = &ffi_type_float; + values[0] = &fl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, args) == FFI_OK); + + for (fl = -127.0; fl < 127; fl++) + { + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl)); + CHECK(rfl == 2 * fl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1 2.c new file mode 100644 index 0000000..c3d92c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1 2.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2) +{ + return fl1 + fl2; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, rfl; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2)); + CHECK(rfl == fl1 + fl2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c new file mode 100644 index 0000000..c3d92c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2) +{ + return fl1 + fl2; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, rfl; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2)); + CHECK(rfl == fl1 + fl2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2 2.c new file mode 100644 index 0000000..ddb976c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2 2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +/* Use volatile float to avoid false negative on ix86. See PR target/323. */ +static float return_fl(float fl1, float fl2, float fl3, float fl4) +{ + volatile float sum; + + sum = fl1 + fl2 + fl3 + fl4; + return sum; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl3, fl4, rfl; + volatile float sum; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_float; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &fl3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + fl3 = 255.1; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, fl3, fl4)); + + sum = fl1 + fl2 + fl3 + fl4; + CHECK(rfl == sum); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c new file mode 100644 index 0000000..ddb976c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +/* Use volatile float to avoid false negative on ix86. See PR target/323. */ +static float return_fl(float fl1, float fl2, float fl3, float fl4) +{ + volatile float sum; + + sum = fl1 + fl2 + fl3 + fl4; + return sum; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl3, fl4, rfl; + volatile float sum; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_float; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &fl3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + fl3 = 255.1; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, fl3, fl4)); + + sum = fl1 + fl2 + fl3 + fl4; + CHECK(rfl == sum); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3 2.c new file mode 100644 index 0000000..c37877b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3 2.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) +{ + return fl1 + fl2 + in3 + fl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl4, rfl; + unsigned int in3; + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + in3 = 255; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); + CHECK(rfl == fl1 + fl2 + in3 + fl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c new file mode 100644 index 0000000..c37877b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) +{ + return fl1 + fl2 + in3 + fl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl4, rfl; + unsigned int in3; + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + in3 = 255; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); + CHECK(rfl == fl1 + fl2 + in3 + fl4); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl 2.c new file mode 100644 index 0000000..52a92fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl 2.c @@ -0,0 +1,34 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: 20071113 */ +/* { dg-do run } */ + +#include "ffitest.h" + +static long double return_ldl(long double ldl) +{ + return 2*ldl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long double ldl, rldl; + + args[0] = &ffi_type_longdouble; + values[0] = &ldl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + for (ldl = -127.0; ldl < 127.0; ldl++) + { + ffi_call(&cif, FFI_FN(return_ldl), &rldl, values); + CHECK(rldl == 2 * ldl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c new file mode 100644 index 0000000..52a92fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c @@ -0,0 +1,34 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: 20071113 */ +/* { dg-do run } */ + +#include "ffitest.h" + +static long double return_ldl(long double ldl) +{ + return 2*ldl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long double ldl, rldl; + + args[0] = &ffi_type_longdouble; + values[0] = &ldl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + for (ldl = -127.0; ldl < 127.0; ldl++) + { + ffi_call(&cif, FFI_FN(return_ldl), &rldl, values); + CHECK(rldl == 2 * ldl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll 2.c new file mode 100644 index 0000000..ea4a1e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll 2.c @@ -0,0 +1,41 @@ +/* Area: ffi_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static long long return_ll(long long ll) +{ + return ll; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll; + + args[0] = &ffi_type_sint64; + values[0] = ≪ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint64, args) == FFI_OK); + + for (ll = 0LL; ll < 100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + + for (ll = 55555555555000LL; ll < 55555555555100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c new file mode 100644 index 0000000..ea4a1e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c @@ -0,0 +1,41 @@ +/* Area: ffi_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static long long return_ll(long long ll) +{ + return ll; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll; + + args[0] = &ffi_type_sint64; + values[0] = ≪ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint64, args) == FFI_OK); + + for (ll = 0LL; ll < 100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + + for (ll = 55555555555000LL; ll < 55555555555100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1 2.c new file mode 100644 index 0000000..593e8a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1 2.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check if long long are passed in the corresponding regs on ppc. + Limitations: none. + PR: 20104. + Originator: 20050222 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" +static long long return_ll(int ll0, long long ll1, int ll2) +{ + return ll0 + ll1 + ll2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll1; + unsigned ll0, ll2; + + args[0] = &ffi_type_sint; + args[1] = &ffi_type_sint64; + args[2] = &ffi_type_sint; + values[0] = &ll0; + values[1] = &ll1; + values[2] = &ll2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint64, args) == FFI_OK); + + ll0 = 11111111; + ll1 = 11111111111000LL; + ll2 = 11111111; + + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + printf("res: %" PRIdLL ", %" PRIdLL "\n", rlonglong, ll0 + ll1 + ll2); + /* { dg-output "res: 11111133333222, 11111133333222" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c new file mode 100644 index 0000000..593e8a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check if long long are passed in the corresponding regs on ppc. + Limitations: none. + PR: 20104. + Originator: 20050222 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" +static long long return_ll(int ll0, long long ll1, int ll2) +{ + return ll0 + ll1 + ll2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll1; + unsigned ll0, ll2; + + args[0] = &ffi_type_sint; + args[1] = &ffi_type_sint64; + args[2] = &ffi_type_sint; + values[0] = &ll0; + values[1] = &ll1; + values[2] = &ll2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint64, args) == FFI_OK); + + ll0 = 11111111; + ll1 = 11111111111000LL; + ll2 = 11111111; + + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + printf("res: %" PRIdLL ", %" PRIdLL "\n", rlonglong, ll0 + ll1 + ll2); + /* { dg-output "res: 11111133333222, 11111133333222" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc 2.c new file mode 100644 index 0000000..a36cf3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc 2.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value signed char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static signed char return_sc(signed char sc) +{ + return sc; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + + args[0] = &ffi_type_schar; + values[0] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, args) == FFI_OK); + + for (sc = (signed char) -127; + sc < (signed char) 127; sc++) + { + ffi_call(&cif, FFI_FN(return_sc), &rint, values); + CHECK((signed char)rint == sc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c new file mode 100644 index 0000000..a36cf3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value signed char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static signed char return_sc(signed char sc) +{ + return sc; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + + args[0] = &ffi_type_schar; + values[0] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, args) == FFI_OK); + + for (sc = (signed char) -127; + sc < (signed char) 127; sc++) + { + ffi_call(&cif, FFI_FN(return_sc), &rint, values); + CHECK((signed char)rint == sc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl 2.c new file mode 100644 index 0000000..f0fd345 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl 2.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if long as return type is handled correctly. + Limitations: none. + PR: none. + */ + +/* { dg-do run } */ +#include "ffitest.h" +static long return_sl(long l1, long l2) +{ + return l1 - l2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long l1, l2; + + args[0] = &ffi_type_slong; + args[1] = &ffi_type_slong; + values[0] = &l1; + values[1] = &l2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_slong, args) == FFI_OK); + + l1 = 1073741823L; + l2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_sl), &res, values); + printf("res: %ld, %ld\n", (long)res, l1 - l2); + /* { dg-output "res: -1, -1" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c new file mode 100644 index 0000000..f0fd345 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if long as return type is handled correctly. + Limitations: none. + PR: none. + */ + +/* { dg-do run } */ +#include "ffitest.h" +static long return_sl(long l1, long l2) +{ + return l1 - l2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long l1, l2; + + args[0] = &ffi_type_slong; + args[1] = &ffi_type_slong; + values[0] = &l1; + values[1] = &l2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_slong, args) == FFI_OK); + + l1 = 1073741823L; + l2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_sl), &res, values); + printf("res: %ld, %ld\n", (long)res, l1 - l2); + /* { dg-output "res: -1, -1" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc 2.c new file mode 100644 index 0000000..6fe5546 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc 2.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check return value unsigned char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static unsigned char return_uc(unsigned char uc) +{ + return uc; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + unsigned char uc; + + args[0] = &ffi_type_uchar; + values[0] = &uc; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, args) == FFI_OK); + + for (uc = (unsigned char) '\x00'; + uc < (unsigned char) '\xff'; uc++) + { + ffi_call(&cif, FFI_FN(return_uc), &rint, values); + CHECK((unsigned char)rint == uc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c new file mode 100644 index 0000000..6fe5546 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check return value unsigned char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static unsigned char return_uc(unsigned char uc) +{ + return uc; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + unsigned char uc; + + args[0] = &ffi_type_uchar; + values[0] = &uc; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, args) == FFI_OK); + + for (uc = (unsigned char) '\x00'; + uc < (unsigned char) '\xff'; uc++) + { + ffi_call(&cif, FFI_FN(return_uc), &rint, values); + CHECK((unsigned char)rint == uc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul 2.c new file mode 100644 index 0000000..12b266f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul 2.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if unsigned long as return type is handled correctly. + Limitations: none. + PR: none. + Originator: 20060724 */ + +/* { dg-do run } */ +#include "ffitest.h" +static unsigned long return_ul(unsigned long ul1, unsigned long ul2) +{ + return ul1 + ul2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long ul1, ul2; + + args[0] = &ffi_type_ulong; + args[1] = &ffi_type_ulong; + values[0] = &ul1; + values[1] = &ul2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ulong, args) == FFI_OK); + + ul1 = 1073741823L; + ul2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_ul), &res, values); + printf("res: %lu, %lu\n", (unsigned long)res, ul1 + ul2); + /* { dg-output "res: 2147483647, 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c new file mode 100644 index 0000000..12b266f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if unsigned long as return type is handled correctly. + Limitations: none. + PR: none. + Originator: 20060724 */ + +/* { dg-do run } */ +#include "ffitest.h" +static unsigned long return_ul(unsigned long ul1, unsigned long ul2) +{ + return ul1 + ul2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long ul1, ul2; + + args[0] = &ffi_type_ulong; + args[1] = &ffi_type_ulong; + values[0] = &ul1; + values[1] = &ul2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ulong, args) == FFI_OK); + + ul1 = 1073741823L; + ul2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_ul), &res, values); + printf("res: %lu, %lu\n", (unsigned long)res, ul1 + ul2); + /* { dg-output "res: 2147483647, 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen 2.c new file mode 100644 index 0000000..35b70ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen 2.c @@ -0,0 +1,44 @@ +/* Area: ffi_call + Purpose: Check strlen function call. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static size_t ABI_ATTR my_strlen(char *s) +{ + return (strlen(s)); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + + args[0] = &ffi_type_pointer; + values[0] = (void*) &s; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 7); + + s = "1234567890123456789012345"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 25); + + exit (0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c new file mode 100644 index 0000000..35b70ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c @@ -0,0 +1,44 @@ +/* Area: ffi_call + Purpose: Check strlen function call. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static size_t ABI_ATTR my_strlen(char *s) +{ + return (strlen(s)); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + + args[0] = &ffi_type_pointer; + values[0] = (void*) &s; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 7); + + s = "1234567890123456789012345"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 25); + + exit (0); +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2 2.c new file mode 100644 index 0000000..96282bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2 2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(char *s, float a) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[0] = &ffi_type_pointer; + args[1] = &ffi_type_float; + values[0] = (void*) &s; + values[1] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c new file mode 100644 index 0000000..96282bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(char *s, float a) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[0] = &ffi_type_pointer; + args[1] = &ffi_type_float; + values[0] = (void*) &s; + values[1] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3 2.c new file mode 100644 index 0000000..beba86e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3 2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c new file mode 100644 index 0000000..beba86e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4 2.c new file mode 100644 index 0000000..d5d42b4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4 2.c @@ -0,0 +1,55 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s, int i) +{ + return (size_t) ((int) strlen(s) + (int) a + i); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + int v1; + float v2; + args[2] = &ffi_type_sint; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[2] = (void*) &v1; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 3, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v1 = 1; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 2); + + s = "1234567"; + v2 = -1.0; + v1 = -2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 4); + + s = "1234567890123456789012345"; + v2 = 1.0; + v1 = 2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 28); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c new file mode 100644 index 0000000..d5d42b4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c @@ -0,0 +1,55 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s, int i) +{ + return (size_t) ((int) strlen(s) + (int) a + i); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + int v1; + float v2; + args[2] = &ffi_type_sint; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[2] = (void*) &v1; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 3, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v1 = 1; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 2); + + s = "1234567"; + v2 = -1.0; + v1 = -2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 4); + + s = "1234567890123456789012345"; + v2 = 1.0; + v1 = 2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 28); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1 2.c new file mode 100644 index 0000000..c13e23f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1 2.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 ABI_ATTR struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + test_structure_1 ts1_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c new file mode 100644 index 0000000..c13e23f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 ABI_ATTR struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + test_structure_1 ts1_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10 2.c new file mode 100644 index 0000000..17b1377 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10 2.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: Sergei Trofimovich + + The test originally discovered in ruby's bindings + for ffi in https://bugs.gentoo.org/634190 */ + +/* { dg-do run } */ +#include "ffitest.h" + +struct s { + int s32; + float f32; + signed char s8; +}; + +struct s make_s(void) { + struct s r; + r.s32 = 0x1234; + r.f32 = 7.0; + r.s8 = 0x78; + return r; +} + +int main() { + ffi_cif cif; + struct s r; + ffi_type rtype; + ffi_type* s_fields[] = { + &ffi_type_sint, + &ffi_type_float, + &ffi_type_schar, + NULL, + }; + + rtype.size = 0; + rtype.alignment = 0, + rtype.type = FFI_TYPE_STRUCT, + rtype.elements = s_fields, + + r.s32 = 0xbad; + r.f32 = 999.999; + r.s8 = 0x51; + + // Here we emulate the following call: + //r = make_s(); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &rtype, NULL) == FFI_OK); + ffi_call(&cif, FFI_FN(make_s), &r, NULL); + + CHECK(r.s32 == 0x1234); + CHECK(r.f32 == 7.0); + CHECK(r.s8 == 0x78); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c new file mode 100644 index 0000000..17b1377 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: Sergei Trofimovich + + The test originally discovered in ruby's bindings + for ffi in https://bugs.gentoo.org/634190 */ + +/* { dg-do run } */ +#include "ffitest.h" + +struct s { + int s32; + float f32; + signed char s8; +}; + +struct s make_s(void) { + struct s r; + r.s32 = 0x1234; + r.f32 = 7.0; + r.s8 = 0x78; + return r; +} + +int main() { + ffi_cif cif; + struct s r; + ffi_type rtype; + ffi_type* s_fields[] = { + &ffi_type_sint, + &ffi_type_float, + &ffi_type_schar, + NULL, + }; + + rtype.size = 0; + rtype.alignment = 0, + rtype.type = FFI_TYPE_STRUCT, + rtype.elements = s_fields, + + r.s32 = 0xbad; + r.f32 = 999.999; + r.s8 = 0x51; + + // Here we emulate the following call: + //r = make_s(); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &rtype, NULL) == FFI_OK); + ffi_call(&cif, FFI_FN(make_s), &r, NULL); + + CHECK(r.s32 == 0x1234); + CHECK(r.f32 == 7.0); + CHECK(r.s8 == 0x78); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2 2.c new file mode 100644 index 0000000..5077a5e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2 2.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + double d1; + double d2; +} test_structure_2; + +static test_structure_2 ABI_ATTR struct2(test_structure_2 ts) +{ + ts.d1--; + ts.d2--; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + test_structure_2 ts2_arg; + ffi_type ts2_type; + ffi_type *ts2_type_elements[3]; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_2 *ts2_result = + (test_structure_2 *) malloc (sizeof(test_structure_2)); + + ts2_type.size = 0; + ts2_type.alignment = 0; + ts2_type.type = FFI_TYPE_STRUCT; + ts2_type.elements = ts2_type_elements; + ts2_type_elements[0] = &ffi_type_double; + ts2_type_elements[1] = &ffi_type_double; + ts2_type_elements[2] = NULL; + + args[0] = &ts2_type; + values[0] = &ts2_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts2_type, args) == FFI_OK); + + ts2_arg.d1 = 5.55; + ts2_arg.d2 = 6.66; + + printf ("%g\n", ts2_arg.d1); + printf ("%g\n", ts2_arg.d2); + + ffi_call(&cif, FFI_FN(struct2), ts2_result, values); + + printf ("%g\n", ts2_result->d1); + printf ("%g\n", ts2_result->d2); + + CHECK(ts2_result->d1 == 5.55 - 1); + CHECK(ts2_result->d2 == 6.66 - 1); + + free (ts2_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c new file mode 100644 index 0000000..5077a5e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + double d1; + double d2; +} test_structure_2; + +static test_structure_2 ABI_ATTR struct2(test_structure_2 ts) +{ + ts.d1--; + ts.d2--; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + test_structure_2 ts2_arg; + ffi_type ts2_type; + ffi_type *ts2_type_elements[3]; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_2 *ts2_result = + (test_structure_2 *) malloc (sizeof(test_structure_2)); + + ts2_type.size = 0; + ts2_type.alignment = 0; + ts2_type.type = FFI_TYPE_STRUCT; + ts2_type.elements = ts2_type_elements; + ts2_type_elements[0] = &ffi_type_double; + ts2_type_elements[1] = &ffi_type_double; + ts2_type_elements[2] = NULL; + + args[0] = &ts2_type; + values[0] = &ts2_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts2_type, args) == FFI_OK); + + ts2_arg.d1 = 5.55; + ts2_arg.d2 = 6.66; + + printf ("%g\n", ts2_arg.d1); + printf ("%g\n", ts2_arg.d2); + + ffi_call(&cif, FFI_FN(struct2), ts2_result, values); + + printf ("%g\n", ts2_result->d1); + printf ("%g\n", ts2_result->d2); + + CHECK(ts2_result->d1 == 5.55 - 1); + CHECK(ts2_result->d2 == 6.66 - 1); + + free (ts2_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3 2.c new file mode 100644 index 0000000..7eba0ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3 2.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + int si; +} test_structure_3; + +static test_structure_3 ABI_ATTR struct3(test_structure_3 ts) +{ + ts.si = -(ts.si*2); + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + int compare_value; + ffi_type ts3_type; + ffi_type *ts3_type_elements[2]; + + test_structure_3 ts3_arg; + test_structure_3 *ts3_result = + (test_structure_3 *) malloc (sizeof(test_structure_3)); + + ts3_type.size = 0; + ts3_type.alignment = 0; + ts3_type.type = FFI_TYPE_STRUCT; + ts3_type.elements = ts3_type_elements; + ts3_type_elements[0] = &ffi_type_sint; + ts3_type_elements[1] = NULL; + + args[0] = &ts3_type; + values[0] = &ts3_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts3_type, args) == FFI_OK); + + ts3_arg.si = -123; + compare_value = ts3_arg.si; + + ffi_call(&cif, FFI_FN(struct3), ts3_result, values); + + printf ("%d %d\n", ts3_result->si, -(compare_value*2)); + + CHECK(ts3_result->si == -(compare_value*2)); + + free (ts3_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c new file mode 100644 index 0000000..7eba0ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + int si; +} test_structure_3; + +static test_structure_3 ABI_ATTR struct3(test_structure_3 ts) +{ + ts.si = -(ts.si*2); + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + int compare_value; + ffi_type ts3_type; + ffi_type *ts3_type_elements[2]; + + test_structure_3 ts3_arg; + test_structure_3 *ts3_result = + (test_structure_3 *) malloc (sizeof(test_structure_3)); + + ts3_type.size = 0; + ts3_type.alignment = 0; + ts3_type.type = FFI_TYPE_STRUCT; + ts3_type.elements = ts3_type_elements; + ts3_type_elements[0] = &ffi_type_sint; + ts3_type_elements[1] = NULL; + + args[0] = &ts3_type; + values[0] = &ts3_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts3_type, args) == FFI_OK); + + ts3_arg.si = -123; + compare_value = ts3_arg.si; + + ffi_call(&cif, FFI_FN(struct3), ts3_result, values); + + printf ("%d %d\n", ts3_result->si, -(compare_value*2)); + + CHECK(ts3_result->si == -(compare_value*2)); + + free (ts3_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4 2.c new file mode 100644 index 0000000..66a9551 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4 2.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned ui1; + unsigned ui2; + unsigned ui3; +} test_structure_4; + +static test_structure_4 ABI_ATTR struct4(test_structure_4 ts) +{ + ts.ui3 = ts.ui1 * ts.ui2 * ts.ui3; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts4_type; + ffi_type *ts4_type_elements[4]; + + test_structure_4 ts4_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_4 *ts4_result = + (test_structure_4 *) malloc (sizeof(test_structure_4)); + + ts4_type.size = 0; + ts4_type.alignment = 0; + ts4_type.type = FFI_TYPE_STRUCT; + ts4_type.elements = ts4_type_elements; + ts4_type_elements[0] = &ffi_type_uint; + ts4_type_elements[1] = &ffi_type_uint; + ts4_type_elements[2] = &ffi_type_uint; + ts4_type_elements[3] = NULL; + + args[0] = &ts4_type; + values[0] = &ts4_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts4_type, args) == FFI_OK); + + ts4_arg.ui1 = 2; + ts4_arg.ui2 = 3; + ts4_arg.ui3 = 4; + + ffi_call (&cif, FFI_FN(struct4), ts4_result, values); + + CHECK(ts4_result->ui3 == 2U * 3U * 4U); + + + free (ts4_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c new file mode 100644 index 0000000..66a9551 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned ui1; + unsigned ui2; + unsigned ui3; +} test_structure_4; + +static test_structure_4 ABI_ATTR struct4(test_structure_4 ts) +{ + ts.ui3 = ts.ui1 * ts.ui2 * ts.ui3; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts4_type; + ffi_type *ts4_type_elements[4]; + + test_structure_4 ts4_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_4 *ts4_result = + (test_structure_4 *) malloc (sizeof(test_structure_4)); + + ts4_type.size = 0; + ts4_type.alignment = 0; + ts4_type.type = FFI_TYPE_STRUCT; + ts4_type.elements = ts4_type_elements; + ts4_type_elements[0] = &ffi_type_uint; + ts4_type_elements[1] = &ffi_type_uint; + ts4_type_elements[2] = &ffi_type_uint; + ts4_type_elements[3] = NULL; + + args[0] = &ts4_type; + values[0] = &ts4_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts4_type, args) == FFI_OK); + + ts4_arg.ui1 = 2; + ts4_arg.ui2 = 3; + ts4_arg.ui3 = 4; + + ffi_call (&cif, FFI_FN(struct4), ts4_result, values); + + CHECK(ts4_result->ui3 == 2U * 3U * 4U); + + + free (ts4_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5 2.c new file mode 100644 index 0000000..23e2a3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5 2.c @@ -0,0 +1,66 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + char c1; + char c2; +} test_structure_5; + +static test_structure_5 ABI_ATTR struct5(test_structure_5 ts1, test_structure_5 ts2) +{ + ts1.c1 += ts2.c1; + ts1.c2 -= ts2.c2; + + return ts1; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts5_type; + ffi_type *ts5_type_elements[3]; + + test_structure_5 ts5_arg1, ts5_arg2; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_5 *ts5_result = + (test_structure_5 *) malloc (sizeof(test_structure_5)); + + ts5_type.size = 0; + ts5_type.alignment = 0; + ts5_type.type = FFI_TYPE_STRUCT; + ts5_type.elements = ts5_type_elements; + ts5_type_elements[0] = &ffi_type_schar; + ts5_type_elements[1] = &ffi_type_schar; + ts5_type_elements[2] = NULL; + + args[0] = &ts5_type; + args[1] = &ts5_type; + values[0] = &ts5_arg1; + values[1] = &ts5_arg2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, &ts5_type, args) == FFI_OK); + + ts5_arg1.c1 = 2; + ts5_arg1.c2 = 6; + ts5_arg2.c1 = 5; + ts5_arg2.c2 = 3; + + ffi_call (&cif, FFI_FN(struct5), ts5_result, values); + + CHECK(ts5_result->c1 == 7); + CHECK(ts5_result->c2 == 3); + + + free (ts5_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c new file mode 100644 index 0000000..23e2a3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c @@ -0,0 +1,66 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + char c1; + char c2; +} test_structure_5; + +static test_structure_5 ABI_ATTR struct5(test_structure_5 ts1, test_structure_5 ts2) +{ + ts1.c1 += ts2.c1; + ts1.c2 -= ts2.c2; + + return ts1; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts5_type; + ffi_type *ts5_type_elements[3]; + + test_structure_5 ts5_arg1, ts5_arg2; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_5 *ts5_result = + (test_structure_5 *) malloc (sizeof(test_structure_5)); + + ts5_type.size = 0; + ts5_type.alignment = 0; + ts5_type.type = FFI_TYPE_STRUCT; + ts5_type.elements = ts5_type_elements; + ts5_type_elements[0] = &ffi_type_schar; + ts5_type_elements[1] = &ffi_type_schar; + ts5_type_elements[2] = NULL; + + args[0] = &ts5_type; + args[1] = &ts5_type; + values[0] = &ts5_arg1; + values[1] = &ts5_arg2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, &ts5_type, args) == FFI_OK); + + ts5_arg1.c1 = 2; + ts5_arg1.c2 = 6; + ts5_arg2.c1 = 5; + ts5_arg2.c2 = 3; + + ffi_call (&cif, FFI_FN(struct5), ts5_result, values); + + CHECK(ts5_result->c1 == 7); + CHECK(ts5_result->c2 == 3); + + + free (ts5_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6 2.c new file mode 100644 index 0000000..173c66e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6 2.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f; + double d; +} test_structure_6; + +static test_structure_6 ABI_ATTR struct6 (test_structure_6 ts) +{ + ts.f += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts6_type; + ffi_type *ts6_type_elements[3]; + + test_structure_6 ts6_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_6 *ts6_result = + (test_structure_6 *) malloc (sizeof(test_structure_6)); + + ts6_type.size = 0; + ts6_type.alignment = 0; + ts6_type.type = FFI_TYPE_STRUCT; + ts6_type.elements = ts6_type_elements; + ts6_type_elements[0] = &ffi_type_float; + ts6_type_elements[1] = &ffi_type_double; + ts6_type_elements[2] = NULL; + + args[0] = &ts6_type; + values[0] = &ts6_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts6_type, args) == FFI_OK); + + ts6_arg.f = 5.55f; + ts6_arg.d = 6.66; + + printf ("%g\n", ts6_arg.f); + printf ("%g\n", ts6_arg.d); + + ffi_call(&cif, FFI_FN(struct6), ts6_result, values); + + CHECK(ts6_result->f == 5.55f + 1); + CHECK(ts6_result->d == 6.66 + 1); + + free (ts6_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c new file mode 100644 index 0000000..173c66e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f; + double d; +} test_structure_6; + +static test_structure_6 ABI_ATTR struct6 (test_structure_6 ts) +{ + ts.f += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts6_type; + ffi_type *ts6_type_elements[3]; + + test_structure_6 ts6_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_6 *ts6_result = + (test_structure_6 *) malloc (sizeof(test_structure_6)); + + ts6_type.size = 0; + ts6_type.alignment = 0; + ts6_type.type = FFI_TYPE_STRUCT; + ts6_type.elements = ts6_type_elements; + ts6_type_elements[0] = &ffi_type_float; + ts6_type_elements[1] = &ffi_type_double; + ts6_type_elements[2] = NULL; + + args[0] = &ts6_type; + values[0] = &ts6_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts6_type, args) == FFI_OK); + + ts6_arg.f = 5.55f; + ts6_arg.d = 6.66; + + printf ("%g\n", ts6_arg.f); + printf ("%g\n", ts6_arg.d); + + ffi_call(&cif, FFI_FN(struct6), ts6_result, values); + + CHECK(ts6_result->f == 5.55f + 1); + CHECK(ts6_result->d == 6.66 + 1); + + free (ts6_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7 2.c new file mode 100644 index 0000000..badc7e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + double d; +} test_structure_7; + +static test_structure_7 ABI_ATTR struct7 (test_structure_7 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts7_type; + ffi_type *ts7_type_elements[4]; + + test_structure_7 ts7_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_7 *ts7_result = + (test_structure_7 *) malloc (sizeof(test_structure_7)); + + ts7_type.size = 0; + ts7_type.alignment = 0; + ts7_type.type = FFI_TYPE_STRUCT; + ts7_type.elements = ts7_type_elements; + ts7_type_elements[0] = &ffi_type_float; + ts7_type_elements[1] = &ffi_type_float; + ts7_type_elements[2] = &ffi_type_double; + ts7_type_elements[3] = NULL; + + args[0] = &ts7_type; + values[0] = &ts7_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts7_type, args) == FFI_OK); + + ts7_arg.f1 = 5.55f; + ts7_arg.f2 = 55.5f; + ts7_arg.d = 6.66; + + printf ("%g\n", ts7_arg.f1); + printf ("%g\n", ts7_arg.f2); + printf ("%g\n", ts7_arg.d); + + ffi_call(&cif, FFI_FN(struct7), ts7_result, values); + + printf ("%g\n", ts7_result->f1); + printf ("%g\n", ts7_result->f2); + printf ("%g\n", ts7_result->d); + + CHECK(ts7_result->f1 == 5.55f + 1); + CHECK(ts7_result->f2 == 55.5f + 1); + CHECK(ts7_result->d == 6.66 + 1); + + free (ts7_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c new file mode 100644 index 0000000..badc7e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + double d; +} test_structure_7; + +static test_structure_7 ABI_ATTR struct7 (test_structure_7 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts7_type; + ffi_type *ts7_type_elements[4]; + + test_structure_7 ts7_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_7 *ts7_result = + (test_structure_7 *) malloc (sizeof(test_structure_7)); + + ts7_type.size = 0; + ts7_type.alignment = 0; + ts7_type.type = FFI_TYPE_STRUCT; + ts7_type.elements = ts7_type_elements; + ts7_type_elements[0] = &ffi_type_float; + ts7_type_elements[1] = &ffi_type_float; + ts7_type_elements[2] = &ffi_type_double; + ts7_type_elements[3] = NULL; + + args[0] = &ts7_type; + values[0] = &ts7_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts7_type, args) == FFI_OK); + + ts7_arg.f1 = 5.55f; + ts7_arg.f2 = 55.5f; + ts7_arg.d = 6.66; + + printf ("%g\n", ts7_arg.f1); + printf ("%g\n", ts7_arg.f2); + printf ("%g\n", ts7_arg.d); + + ffi_call(&cif, FFI_FN(struct7), ts7_result, values); + + printf ("%g\n", ts7_result->f1); + printf ("%g\n", ts7_result->f2); + printf ("%g\n", ts7_result->d); + + CHECK(ts7_result->f1 == 5.55f + 1); + CHECK(ts7_result->f2 == 55.5f + 1); + CHECK(ts7_result->d == 6.66 + 1); + + free (ts7_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8 2.c new file mode 100644 index 0000000..ef204ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8 2.c @@ -0,0 +1,81 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + float f3; + float f4; +} test_structure_8; + +static test_structure_8 ABI_ATTR struct8 (test_structure_8 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.f3 += 1; + ts.f4 += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts8_type; + ffi_type *ts8_type_elements[5]; + + test_structure_8 ts8_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_8 *ts8_result = + (test_structure_8 *) malloc (sizeof(test_structure_8)); + + ts8_type.size = 0; + ts8_type.alignment = 0; + ts8_type.type = FFI_TYPE_STRUCT; + ts8_type.elements = ts8_type_elements; + ts8_type_elements[0] = &ffi_type_float; + ts8_type_elements[1] = &ffi_type_float; + ts8_type_elements[2] = &ffi_type_float; + ts8_type_elements[3] = &ffi_type_float; + ts8_type_elements[4] = NULL; + + args[0] = &ts8_type; + values[0] = &ts8_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts8_type, args) == FFI_OK); + + ts8_arg.f1 = 5.55f; + ts8_arg.f2 = 55.5f; + ts8_arg.f3 = -5.55f; + ts8_arg.f4 = -55.5f; + + printf ("%g\n", ts8_arg.f1); + printf ("%g\n", ts8_arg.f2); + printf ("%g\n", ts8_arg.f3); + printf ("%g\n", ts8_arg.f4); + + ffi_call(&cif, FFI_FN(struct8), ts8_result, values); + + printf ("%g\n", ts8_result->f1); + printf ("%g\n", ts8_result->f2); + printf ("%g\n", ts8_result->f3); + printf ("%g\n", ts8_result->f4); + + CHECK(ts8_result->f1 == 5.55f + 1); + CHECK(ts8_result->f2 == 55.5f + 1); + CHECK(ts8_result->f3 == -5.55f + 1); + CHECK(ts8_result->f4 == -55.5f + 1); + + free (ts8_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c new file mode 100644 index 0000000..ef204ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c @@ -0,0 +1,81 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + float f3; + float f4; +} test_structure_8; + +static test_structure_8 ABI_ATTR struct8 (test_structure_8 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.f3 += 1; + ts.f4 += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts8_type; + ffi_type *ts8_type_elements[5]; + + test_structure_8 ts8_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_8 *ts8_result = + (test_structure_8 *) malloc (sizeof(test_structure_8)); + + ts8_type.size = 0; + ts8_type.alignment = 0; + ts8_type.type = FFI_TYPE_STRUCT; + ts8_type.elements = ts8_type_elements; + ts8_type_elements[0] = &ffi_type_float; + ts8_type_elements[1] = &ffi_type_float; + ts8_type_elements[2] = &ffi_type_float; + ts8_type_elements[3] = &ffi_type_float; + ts8_type_elements[4] = NULL; + + args[0] = &ts8_type; + values[0] = &ts8_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts8_type, args) == FFI_OK); + + ts8_arg.f1 = 5.55f; + ts8_arg.f2 = 55.5f; + ts8_arg.f3 = -5.55f; + ts8_arg.f4 = -55.5f; + + printf ("%g\n", ts8_arg.f1); + printf ("%g\n", ts8_arg.f2); + printf ("%g\n", ts8_arg.f3); + printf ("%g\n", ts8_arg.f4); + + ffi_call(&cif, FFI_FN(struct8), ts8_result, values); + + printf ("%g\n", ts8_result->f1); + printf ("%g\n", ts8_result->f2); + printf ("%g\n", ts8_result->f3); + printf ("%g\n", ts8_result->f4); + + CHECK(ts8_result->f1 == 5.55f + 1); + CHECK(ts8_result->f2 == 55.5f + 1); + CHECK(ts8_result->f3 == -5.55f + 1); + CHECK(ts8_result->f4 == -55.5f + 1); + + free (ts8_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9 2.c new file mode 100644 index 0000000..4a13b81 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9 2.c @@ -0,0 +1,68 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + float f; + int i; +} test_structure_9; + +static test_structure_9 ABI_ATTR struct9 (test_structure_9 ts) +{ + ts.f += 1; + ts.i += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts9_type; + ffi_type *ts9_type_elements[3]; + + test_structure_9 ts9_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_9 *ts9_result = + (test_structure_9 *) malloc (sizeof(test_structure_9)); + + ts9_type.size = 0; + ts9_type.alignment = 0; + ts9_type.type = FFI_TYPE_STRUCT; + ts9_type.elements = ts9_type_elements; + ts9_type_elements[0] = &ffi_type_float; + ts9_type_elements[1] = &ffi_type_sint; + ts9_type_elements[2] = NULL; + + args[0] = &ts9_type; + values[0] = &ts9_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts9_type, args) == FFI_OK); + + ts9_arg.f = 5.55f; + ts9_arg.i = 5; + + printf ("%g\n", ts9_arg.f); + printf ("%d\n", ts9_arg.i); + + ffi_call(&cif, FFI_FN(struct9), ts9_result, values); + + printf ("%g\n", ts9_result->f); + printf ("%d\n", ts9_result->i); + + CHECK(ts9_result->f == 5.55f + 1); + CHECK(ts9_result->i == 5 + 1); + + free (ts9_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c new file mode 100644 index 0000000..4a13b81 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c @@ -0,0 +1,68 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + float f; + int i; +} test_structure_9; + +static test_structure_9 ABI_ATTR struct9 (test_structure_9 ts) +{ + ts.f += 1; + ts.i += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts9_type; + ffi_type *ts9_type_elements[3]; + + test_structure_9 ts9_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_9 *ts9_result = + (test_structure_9 *) malloc (sizeof(test_structure_9)); + + ts9_type.size = 0; + ts9_type.alignment = 0; + ts9_type.type = FFI_TYPE_STRUCT; + ts9_type.elements = ts9_type_elements; + ts9_type_elements[0] = &ffi_type_float; + ts9_type_elements[1] = &ffi_type_sint; + ts9_type_elements[2] = NULL; + + args[0] = &ts9_type; + values[0] = &ts9_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts9_type, args) == FFI_OK); + + ts9_arg.f = 5.55f; + ts9_arg.i = 5; + + printf ("%g\n", ts9_arg.f); + printf ("%d\n", ts9_arg.i); + + ffi_call(&cif, FFI_FN(struct9), ts9_result, values); + + printf ("%g\n", ts9_result->f); + printf ("%d\n", ts9_result->i); + + CHECK(ts9_result->f == 5.55f + 1); + CHECK(ts9_result->i == 5 + 1); + + free (ts9_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized 2.c new file mode 100644 index 0000000..f00d830 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized 2.c @@ -0,0 +1,61 @@ +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + memset(&cif, 1, sizeof(cif)); + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + test_structure_1 ts1_arg; + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c new file mode 100644 index 0000000..f00d830 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c @@ -0,0 +1,61 @@ +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + memset(&cif, 1, sizeof(cif)); + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + test_structure_1 ts1_arg; + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1 2.c new file mode 100644 index 0000000..59d085c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1 2.c @@ -0,0 +1,196 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* m68k-*-* alpha-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + float f; + double d; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + + uc = va_arg (ap, unsigned); + sc = va_arg (ap, signed); + + us = va_arg (ap, unsigned); + ss = va_arg (ap, signed); + + ui = va_arg (ap, unsigned int); + si = va_arg (ap, signed int); + + ul = va_arg (ap, unsigned long); + sl = va_arg (ap, signed long); + + f = va_arg (ap, double); /* C standard promotes float->double + when anonymous */ + d = va_arg (ap, double); + + printf ("%u %u %u %u %u %u %u %u %u uc=%u sc=%d %u %d %u %d %lu %ld %f %f\n", + s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b, + uc, sc, + us, ss, + ui, si, + ul, sl, + f, d); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[15]; + ffi_type* arg_types[15]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + double d1; + double f1; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = &ffi_type_uchar; + arg_types[5] = &ffi_type_schar; + arg_types[6] = &ffi_type_ushort; + arg_types[7] = &ffi_type_sshort; + arg_types[8] = &ffi_type_uint; + arg_types[9] = &ffi_type_sint; + arg_types[10] = &ffi_type_ulong; + arg_types[11] = &ffi_type_slong; + arg_types[12] = &ffi_type_double; + arg_types[13] = &ffi_type_double; + arg_types[14] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 14, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + uc = 9; + sc = 10; + us = 11; + ss = 12; + ui = 13; + si = 14; + ul = 15; + sl = 16; + f1 = 2.12; + d1 = 3.13; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = &uc; + args[5] = ≻ + args[6] = &us; + args[7] = &ss; + args[8] = &ui; + args[9] = &si; + args[10] = &ul; + args[11] = &sl; + args[12] = &f1; + args[13] = &d1; + args[14] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8 uc=9 sc=10 11 12 13 14 15 16 2.120000 3.130000" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c new file mode 100644 index 0000000..59d085c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c @@ -0,0 +1,196 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* m68k-*-* alpha-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + float f; + double d; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + + uc = va_arg (ap, unsigned); + sc = va_arg (ap, signed); + + us = va_arg (ap, unsigned); + ss = va_arg (ap, signed); + + ui = va_arg (ap, unsigned int); + si = va_arg (ap, signed int); + + ul = va_arg (ap, unsigned long); + sl = va_arg (ap, signed long); + + f = va_arg (ap, double); /* C standard promotes float->double + when anonymous */ + d = va_arg (ap, double); + + printf ("%u %u %u %u %u %u %u %u %u uc=%u sc=%d %u %d %u %d %lu %ld %f %f\n", + s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b, + uc, sc, + us, ss, + ui, si, + ul, sl, + f, d); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[15]; + ffi_type* arg_types[15]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + double d1; + double f1; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = &ffi_type_uchar; + arg_types[5] = &ffi_type_schar; + arg_types[6] = &ffi_type_ushort; + arg_types[7] = &ffi_type_sshort; + arg_types[8] = &ffi_type_uint; + arg_types[9] = &ffi_type_sint; + arg_types[10] = &ffi_type_ulong; + arg_types[11] = &ffi_type_slong; + arg_types[12] = &ffi_type_double; + arg_types[13] = &ffi_type_double; + arg_types[14] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 14, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + uc = 9; + sc = 10; + us = 11; + ss = 12; + ui = 13; + si = 14; + ul = 15; + sl = 16; + f1 = 2.12; + d1 = 3.13; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = &uc; + args[5] = ≻ + args[6] = &us; + args[7] = &ss; + args[8] = &ui; + args[9] = &si; + args[10] = &ul; + args[11] = &sl; + args[12] = &f1; + args[13] = &d1; + args[14] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8 uc=9 sc=10 11 12 13 14 15 16 2.120000 3.130000" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1 2.c new file mode 100644 index 0000000..e645206 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1 2.c @@ -0,0 +1,121 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c new file mode 100644 index 0000000..e645206 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c @@ -0,0 +1,121 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2 2.c new file mode 100644 index 0000000..56f5b9c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2 2.c @@ -0,0 +1,123 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct small_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + s1.a += s2.a; + s1.b += s2.b; + return s1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct small_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &s_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d\n", res.a, res.b); + /* { dg-output "\nres: 12 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c new file mode 100644 index 0000000..56f5b9c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c @@ -0,0 +1,123 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct small_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + s1.a += s2.a; + s1.b += s2.b; + return s1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct small_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &s_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d\n", res.a, res.b); + /* { dg-output "\nres: 12 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3 2.c new file mode 100644 index 0000000..9a27e7f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3 2.c @@ -0,0 +1,125 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct large_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + l.a += s1.a; + l.b += s1.b; + l.c += s2.a; + l.d += s2.b; + return l; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct large_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &l_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d %d %d %d\n", res.a, res.b, res.c, res.d, res.e); + /* { dg-output "\nres: 15 17 19 21 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c new file mode 100644 index 0000000..9a27e7f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c @@ -0,0 +1,125 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct large_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + l.a += s1.a; + l.b += s1.b; + l.c += s2.a; + l.d += s2.b; + return l; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct large_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &l_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d %d %d %d\n", res.a, res.b, res.c, res.d, res.e); + /* { dg-output "\nres: 15 17 19 21 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure 2.exp new file mode 100644 index 0000000..ed4145c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure 2.exp @@ -0,0 +1,67 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2019 Free Software Foundation, Inc. +# Copyright (C) 2019 Anthony Green + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist $additional_options + } else { + foreach test $tlist { + unsupported "$test" + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp new file mode 100644 index 0000000..ed4145c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp @@ -0,0 +1,67 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2019 Free Software Foundation, Inc. +# Copyright (C) 2019 Anthony Green + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist $additional_options + } else { + foreach test $tlist { + unsupported "$test" + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0 2.c new file mode 100644 index 0000000..a579ff6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0 2.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + void * code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c new file mode 100644 index 0000000..a579ff6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + void * code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1 2.c new file mode 100644 index 0000000..9123173 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1 2.c @@ -0,0 +1,81 @@ +/* Area: closure_call. + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + + +static void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c new file mode 100644 index 0000000..9123173 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c @@ -0,0 +1,81 @@ +/* Area: closure_call. + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + + +static void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2 2.c new file mode 100644 index 0000000..08ff9d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2 2.c @@ -0,0 +1,81 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn2(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(double *)args[0] +(int)(*(double *)args[1]) + + (int)(*(double *)args[2]) + (int)*(double *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(double *)args[0], (int)(*(double *)args[1]), + (int)(*(double *)args[2]), (int)*(double *)args[3], + (int)(*(signed short *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(int *)args[7]), + (int)(*(double*)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type2)(double, double, double, double, signed short, + double, double, int, double, int, int, float, + int, float, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = &ffi_type_double; + cl_arg_types[2] = &ffi_type_double; + cl_arg_types[3] = &ffi_type_double; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn2, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type2)code)) + (1, 2, 3, 4, 127, 5, 6, 8, 9, 10, 11, 12.0, 13, + 19.0, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c new file mode 100644 index 0000000..08ff9d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c @@ -0,0 +1,81 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn2(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(double *)args[0] +(int)(*(double *)args[1]) + + (int)(*(double *)args[2]) + (int)*(double *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(double *)args[0], (int)(*(double *)args[1]), + (int)(*(double *)args[2]), (int)*(double *)args[3], + (int)(*(signed short *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(int *)args[7]), + (int)(*(double*)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type2)(double, double, double, double, signed short, + double, double, int, double, int, int, float, + int, float, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = &ffi_type_double; + cl_arg_types[2] = &ffi_type_double; + cl_arg_types[3] = &ffi_type_double; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn2, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type2)code)) + (1, 2, 3, 4, 127, 5, 6, 8, 9, 10, 11, 12.0, 13, + 19.0, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3 2.c new file mode 100644 index 0000000..9b54d80 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3 2.c @@ -0,0 +1,82 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn3(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(float *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(float *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(float *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(float *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(float *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(float *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); + + } + +typedef int (*closure_test_type3)(float, float, float, float, float, float, + float, float, double, int, float, float, int, + float, float, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_float; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_float; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_float; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn3, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type3)code)) + (1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9, 10, 11.11, 12.0, 13, + 19.19, 21.21, 1); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 19 21 1 3: 135" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 135" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c new file mode 100644 index 0000000..9b54d80 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c @@ -0,0 +1,82 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn3(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(float *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(float *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(float *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(float *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(float *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(float *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); + + } + +typedef int (*closure_test_type3)(float, float, float, float, float, float, + float, float, double, int, float, float, int, + float, float, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_float; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_float; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_float; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn3, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type3)code)) + (1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9, 10, 11.11, 12.0, 13, + 19.19, 21.21, 1); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 19 21 1 3: 135" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 135" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4 2.c new file mode 100644 index 0000000..d4a1530 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4 2.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(unsigned long long *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(unsigned long long *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11LL, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c new file mode 100644 index 0000000..d4a1530 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(unsigned long long *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(unsigned long long *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11LL, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5 2.c new file mode 100644 index 0000000..9907442 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5 2.c @@ -0,0 +1,92 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Exceed the limit of gpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn5(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(int *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(int *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + int, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 10; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[10] = &ffi_type_sint; + for (i = 11; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn5, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c new file mode 100644 index 0000000..9907442 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c @@ -0,0 +1,92 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Exceed the limit of gpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn5(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(int *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(int *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + int, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 10; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[10] = &ffi_type_sint; + for (i = 11; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn5, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6 2.c new file mode 100644 index 0000000..73c54fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6 2.c @@ -0,0 +1,90 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC. + Limitations: none. + PR: PR23404 + Originator: 20050830 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + + (int)(*(unsigned long long *)args[1]) + + (int)(*(unsigned long long *)args[2]) + + (int)*(unsigned long long *)args[3] + + (int)(*(int *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(double *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(double *)args[14]) + (int)*(double *)args[15] + + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)(*(unsigned long long *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(unsigned long long *)args[3], + (int)(*(int *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(double *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(double *)args[14]), (int)(*(double *)args[15]), + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + int, double, double, float, double, double, + int, float, int, int, double, double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_uint64; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_uint64; + cl_arg_types[4] = &ffi_type_sint; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_double; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_double; + cl_arg_types[15] = &ffi_type_double; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1, 2, 3, 4, 127, 429., 7., 8., 9.5, 10., 11, 12., 13, + 19, 21., 1.); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c new file mode 100644 index 0000000..73c54fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c @@ -0,0 +1,90 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC. + Limitations: none. + PR: PR23404 + Originator: 20050830 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + + (int)(*(unsigned long long *)args[1]) + + (int)(*(unsigned long long *)args[2]) + + (int)*(unsigned long long *)args[3] + + (int)(*(int *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(double *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(double *)args[14]) + (int)*(double *)args[15] + + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)(*(unsigned long long *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(unsigned long long *)args[3], + (int)(*(int *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(double *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(double *)args[14]), (int)(*(double *)args[15]), + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + int, double, double, float, double, double, + int, float, int, int, double, double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_uint64; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_uint64; + cl_arg_types[4] = &ffi_type_sint; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_double; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_double; + cl_arg_types[15] = &ffi_type_double; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1, 2, 3, 4, 127, 429., 7., 8., 9.5, 10., 11, 12., 13, + 19, 21., 1.); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0 2.c new file mode 100644 index 0000000..b3afa0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0 2.c @@ -0,0 +1,95 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_loc_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_loc_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + ffi_closure *pcl; + ffi_type * cl_arg_types[17]; + int res; + void *codeloc; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + pcl = ffi_closure_alloc(sizeof(ffi_closure), &codeloc); + CHECK(pcl != NULL); + CHECK(codeloc != NULL); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_loc_test_fn0, + (void *) 3 /* userdata */, codeloc) == FFI_OK); + + CHECK(memcmp(pcl, codeloc, sizeof(*pcl)) == 0); + + res = (*((closure_loc_test_type0)codeloc)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c new file mode 100644 index 0000000..b3afa0b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c @@ -0,0 +1,95 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_loc_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_loc_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + ffi_closure *pcl; + ffi_type * cl_arg_types[17]; + int res; + void *codeloc; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + pcl = ffi_closure_alloc(sizeof(ffi_closure), &codeloc); + CHECK(pcl != NULL); + CHECK(codeloc != NULL); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_loc_test_fn0, + (void *) 3 /* userdata */, codeloc) == FFI_OK); + + CHECK(memcmp(pcl, codeloc, sizeof(*pcl)) == 0); + + res = (*((closure_loc_test_type0)codeloc)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple 2.c new file mode 100644 index 0000000..5a4e728 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple 2.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check simple closure handling with all ABIs + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata) +{ + *(ffi_arg*)resp = + (int)*(int *)args[0] + (int)(*(int *)args[1]) + + (int)(*(int *)args[2]) + (int)(*(int *)args[3]) + + (int)(intptr_t)userdata; + + printf("%d %d %d %d: %d\n", + (int)*(int *)args[0], (int)(*(int *)args[1]), + (int)(*(int *)args[2]), (int)(*(int *)args[3]), + (int)*(ffi_arg *)resp); + +} + +typedef int (ABI_ATTR *closure_test_type0)(int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = &ffi_type_uint; + cl_arg_types[3] = &ffi_type_uint; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*(closure_test_type0)code)(0, 1, 2, 3); + /* { dg-output "0 1 2 3: 9" } */ + + printf("res: %d\n",res); + /* { dg-output "\nres: 9" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c new file mode 100644 index 0000000..5a4e728 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check simple closure handling with all ABIs + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata) +{ + *(ffi_arg*)resp = + (int)*(int *)args[0] + (int)(*(int *)args[1]) + + (int)(*(int *)args[2]) + (int)(*(int *)args[3]) + + (int)(intptr_t)userdata; + + printf("%d %d %d %d: %d\n", + (int)*(int *)args[0], (int)(*(int *)args[1]), + (int)(*(int *)args[2]), (int)(*(int *)args[3]), + (int)*(ffi_arg *)resp); + +} + +typedef int (ABI_ATTR *closure_test_type0)(int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = &ffi_type_uint; + cl_arg_types[3] = &ffi_type_uint; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*(closure_test_type0)code)(0, 1, 2, 3); + /* { dg-output "0 1 2 3: 9" } */ + + printf("res: %d\n",res); + /* { dg-output "\nres: 9" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte 2.c new file mode 100644 index 0000000..ea0825d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte 2.c @@ -0,0 +1,94 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_12byte { + int a; + int b; + int c; +} cls_struct_12byte; + +cls_struct_12byte cls_struct_12byte_fn(struct cls_struct_12byte b1, + struct cls_struct_12byte b2) +{ + struct cls_struct_12byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_12byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args , void* userdata __UNUSED__) +{ + struct cls_struct_12byte b1, b2; + + b1 = *(struct cls_struct_12byte*)(args[0]); + b2 = *(struct cls_struct_12byte*)(args[1]); + + *(cls_struct_12byte*)resp = cls_struct_12byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_12byte h_dbl = { 7, 4, 9 }; + struct cls_struct_12byte j_dbl = { 1, 5, 3 }; + struct cls_struct_12byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_12byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_12byte_gn, NULL, code) == FFI_OK); + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + res_dbl = ((cls_struct_12byte(*)(cls_struct_12byte, cls_struct_12byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c new file mode 100644 index 0000000..ea0825d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c @@ -0,0 +1,94 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_12byte { + int a; + int b; + int c; +} cls_struct_12byte; + +cls_struct_12byte cls_struct_12byte_fn(struct cls_struct_12byte b1, + struct cls_struct_12byte b2) +{ + struct cls_struct_12byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_12byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args , void* userdata __UNUSED__) +{ + struct cls_struct_12byte b1, b2; + + b1 = *(struct cls_struct_12byte*)(args[0]); + b2 = *(struct cls_struct_12byte*)(args[1]); + + *(cls_struct_12byte*)resp = cls_struct_12byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_12byte h_dbl = { 7, 4, 9 }; + struct cls_struct_12byte j_dbl = { 1, 5, 3 }; + struct cls_struct_12byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_12byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_12byte_gn, NULL, code) == FFI_OK); + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + res_dbl = ((cls_struct_12byte(*)(cls_struct_12byte, cls_struct_12byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte 2.c new file mode 100644 index 0000000..89a08a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte 2.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte { + int a; + double b; + int c; +} cls_struct_16byte; + +cls_struct_16byte cls_struct_16byte_fn(struct cls_struct_16byte b1, + struct cls_struct_16byte b2) +{ + struct cls_struct_16byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_16byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_16byte b1, b2; + + b1 = *(struct cls_struct_16byte*)(args[0]); + b2 = *(struct cls_struct_16byte*)(args[1]); + + *(cls_struct_16byte*)resp = cls_struct_16byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte h_dbl = { 7, 8.0, 9 }; + struct cls_struct_16byte j_dbl = { 1, 9.0, 3 }; + struct cls_struct_16byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_16byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0.0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_16byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_16byte(*)(cls_struct_16byte, cls_struct_16byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c new file mode 100644 index 0000000..89a08a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte { + int a; + double b; + int c; +} cls_struct_16byte; + +cls_struct_16byte cls_struct_16byte_fn(struct cls_struct_16byte b1, + struct cls_struct_16byte b2) +{ + struct cls_struct_16byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_16byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_16byte b1, b2; + + b1 = *(struct cls_struct_16byte*)(args[0]); + b2 = *(struct cls_struct_16byte*)(args[1]); + + *(cls_struct_16byte*)resp = cls_struct_16byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte h_dbl = { 7, 8.0, 9 }; + struct cls_struct_16byte j_dbl = { 1, 9.0, 3 }; + struct cls_struct_16byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_16byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0.0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_16byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_16byte(*)(cls_struct_16byte, cls_struct_16byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte 2.c new file mode 100644 index 0000000..9f75da8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte 2.c @@ -0,0 +1,96 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_18byte { + double a; + unsigned char b; + unsigned char c; + double d; +} cls_struct_18byte; + +cls_struct_18byte cls_struct_18byte_fn(struct cls_struct_18byte a1, + struct cls_struct_18byte a2) +{ + struct cls_struct_18byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + + printf("%g %d %d %g %g %d %d %g: %g %d %d %g\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + return result; +} + +static void +cls_struct_18byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_18byte a1, a2; + + a1 = *(struct cls_struct_18byte*)(args[0]); + a2 = *(struct cls_struct_18byte*)(args[1]); + + *(cls_struct_18byte*)resp = cls_struct_18byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 }; + struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 }; + struct cls_struct_18byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_18byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_18byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_18byte(*)(cls_struct_18byte, cls_struct_18byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c new file mode 100644 index 0000000..9f75da8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c @@ -0,0 +1,96 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_18byte { + double a; + unsigned char b; + unsigned char c; + double d; +} cls_struct_18byte; + +cls_struct_18byte cls_struct_18byte_fn(struct cls_struct_18byte a1, + struct cls_struct_18byte a2) +{ + struct cls_struct_18byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + + printf("%g %d %d %g %g %d %d %g: %g %d %d %g\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + return result; +} + +static void +cls_struct_18byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_18byte a1, a2; + + a1 = *(struct cls_struct_18byte*)(args[0]); + a2 = *(struct cls_struct_18byte*)(args[1]); + + *(cls_struct_18byte*)resp = cls_struct_18byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 }; + struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 }; + struct cls_struct_18byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_18byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_18byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_18byte(*)(cls_struct_18byte, cls_struct_18byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte 2.c new file mode 100644 index 0000000..278794b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte 2.c @@ -0,0 +1,102 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_19byte { + double a; + unsigned char b; + unsigned char c; + double d; + unsigned char e; +} cls_struct_19byte; + +cls_struct_19byte cls_struct_19byte_fn(struct cls_struct_19byte a1, + struct cls_struct_19byte a2) +{ + struct cls_struct_19byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + + printf("%g %d %d %g %d %g %d %d %g %d: %g %d %d %g %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + return result; +} + +static void +cls_struct_19byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_19byte a1, a2; + + a1 = *(struct cls_struct_19byte*)(args[0]); + a2 = *(struct cls_struct_19byte*)(args[1]); + + *(cls_struct_19byte*)resp = cls_struct_19byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 }; + struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 }; + struct cls_struct_19byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_19byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_19byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_19byte(*)(cls_struct_19byte, cls_struct_19byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c new file mode 100644 index 0000000..278794b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c @@ -0,0 +1,102 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_19byte { + double a; + unsigned char b; + unsigned char c; + double d; + unsigned char e; +} cls_struct_19byte; + +cls_struct_19byte cls_struct_19byte_fn(struct cls_struct_19byte a1, + struct cls_struct_19byte a2) +{ + struct cls_struct_19byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + + printf("%g %d %d %g %d %g %d %d %g %d: %g %d %d %g %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + return result; +} + +static void +cls_struct_19byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_19byte a1, a2; + + a1 = *(struct cls_struct_19byte*)(args[0]); + a2 = *(struct cls_struct_19byte*)(args[1]); + + *(cls_struct_19byte*)resp = cls_struct_19byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 }; + struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 }; + struct cls_struct_19byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_19byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_19byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_19byte(*)(cls_struct_19byte, cls_struct_19byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte 2.c new file mode 100644 index 0000000..82492c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte 2.c @@ -0,0 +1,89 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_1_1byte { + unsigned char a; +} cls_struct_1_1byte; + +cls_struct_1_1byte cls_struct_1_1byte_fn(struct cls_struct_1_1byte a1, + struct cls_struct_1_1byte a2) +{ + struct cls_struct_1_1byte result; + + result.a = a1.a + a2.a; + + printf("%d %d: %d\n", a1.a, a2.a, result.a); + + return result; +} + +static void +cls_struct_1_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_1_1byte a1, a2; + + a1 = *(struct cls_struct_1_1byte*)(args[0]); + a2 = *(struct cls_struct_1_1byte*)(args[1]); + + *(cls_struct_1_1byte*)resp = cls_struct_1_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[2]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_1_1byte g_dbl = { 12 }; + struct cls_struct_1_1byte f_dbl = { 178 }; + struct cls_struct_1_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_1_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_1_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_1_1byte(*)(cls_struct_1_1byte, cls_struct_1_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c new file mode 100644 index 0000000..82492c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c @@ -0,0 +1,89 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_1_1byte { + unsigned char a; +} cls_struct_1_1byte; + +cls_struct_1_1byte cls_struct_1_1byte_fn(struct cls_struct_1_1byte a1, + struct cls_struct_1_1byte a2) +{ + struct cls_struct_1_1byte result; + + result.a = a1.a + a2.a; + + printf("%d %d: %d\n", a1.a, a2.a, result.a); + + return result; +} + +static void +cls_struct_1_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_1_1byte a1, a2; + + a1 = *(struct cls_struct_1_1byte*)(args[0]); + a2 = *(struct cls_struct_1_1byte*)(args[1]); + + *(cls_struct_1_1byte*)resp = cls_struct_1_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[2]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_1_1byte g_dbl = { 12 }; + struct cls_struct_1_1byte f_dbl = { 178 }; + struct cls_struct_1_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_1_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_1_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_1_1byte(*)(cls_struct_1_1byte, cls_struct_1_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte 2.c new file mode 100644 index 0000000..3f8bb28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + double a; + double b; + int c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%g %g %d %g %g %d: %g %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 }; + struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c new file mode 100644 index 0000000..3f8bb28 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + double a; + double b; + int c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%g %g %d %g %g %d: %g %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 }; + struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1 2.c new file mode 100644 index 0000000..6562727 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1 2.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + int a; + double b; + double c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %g %d %g %g: %d %g %g\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 }; + struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c new file mode 100644 index 0000000..6562727 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + int a; + double b; + double c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %g %d %g %g: %d %g %g\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 }; + struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte 2.c new file mode 100644 index 0000000..1d82f6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte 2.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_24byte { + double a; + double b; + int c; + float d; +} cls_struct_24byte; + +cls_struct_24byte cls_struct_24byte_fn(struct cls_struct_24byte b0, + struct cls_struct_24byte b1, + struct cls_struct_24byte b2, + struct cls_struct_24byte b3) +{ + struct cls_struct_24byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + + printf("%g %g %d %g %g %g %d %g %g %g %d %g %g %g %d %g: %g %g %d %g\n", + b0.a, b0.b, b0.c, b0.d, + b1.a, b1.b, b1.c, b1.d, + b2.a, b2.b, b2.c, b2.d, + b3.a, b3.b, b3.c, b2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_24byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_24byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_24byte*)(args[0]); + b1 = *(struct cls_struct_24byte*)(args[1]); + b2 = *(struct cls_struct_24byte*)(args[2]); + b3 = *(struct cls_struct_24byte*)(args[3]); + + *(cls_struct_24byte*)resp = cls_struct_24byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_24byte e_dbl = { 9.0, 2.0, 6, 5.0 }; + struct cls_struct_24byte f_dbl = { 1.0, 2.0, 3, 7.0 }; + struct cls_struct_24byte g_dbl = { 4.0, 5.0, 7, 9.0 }; + struct cls_struct_24byte h_dbl = { 8.0, 6.0, 1, 4.0 }; + struct cls_struct_24byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = &ffi_type_float; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_24byte_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_24byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_24byte(*)(cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c new file mode 100644 index 0000000..1d82f6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_24byte { + double a; + double b; + int c; + float d; +} cls_struct_24byte; + +cls_struct_24byte cls_struct_24byte_fn(struct cls_struct_24byte b0, + struct cls_struct_24byte b1, + struct cls_struct_24byte b2, + struct cls_struct_24byte b3) +{ + struct cls_struct_24byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + + printf("%g %g %d %g %g %g %d %g %g %g %d %g %g %g %d %g: %g %g %d %g\n", + b0.a, b0.b, b0.c, b0.d, + b1.a, b1.b, b1.c, b1.d, + b2.a, b2.b, b2.c, b2.d, + b3.a, b3.b, b3.c, b2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_24byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_24byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_24byte*)(args[0]); + b1 = *(struct cls_struct_24byte*)(args[1]); + b2 = *(struct cls_struct_24byte*)(args[2]); + b3 = *(struct cls_struct_24byte*)(args[3]); + + *(cls_struct_24byte*)resp = cls_struct_24byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_24byte e_dbl = { 9.0, 2.0, 6, 5.0 }; + struct cls_struct_24byte f_dbl = { 1.0, 2.0, 3, 7.0 }; + struct cls_struct_24byte g_dbl = { 4.0, 5.0, 7, 9.0 }; + struct cls_struct_24byte h_dbl = { 8.0, 6.0, 1, 4.0 }; + struct cls_struct_24byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = &ffi_type_float; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_24byte_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_24byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_24byte(*)(cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte 2.c new file mode 100644 index 0000000..81bb0a6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_2byte { + unsigned char a; + unsigned char b; +} cls_struct_2byte; + +cls_struct_2byte cls_struct_2byte_fn(struct cls_struct_2byte a1, + struct cls_struct_2byte a2) +{ + struct cls_struct_2byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_2byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_2byte a1, a2; + + a1 = *(struct cls_struct_2byte*)(args[0]); + a2 = *(struct cls_struct_2byte*)(args[1]); + + *(cls_struct_2byte*)resp = cls_struct_2byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_2byte g_dbl = { 12, 127 }; + struct cls_struct_2byte f_dbl = { 1, 13 }; + struct cls_struct_2byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_2byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_2byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_2byte(*)(cls_struct_2byte, cls_struct_2byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c new file mode 100644 index 0000000..81bb0a6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_2byte { + unsigned char a; + unsigned char b; +} cls_struct_2byte; + +cls_struct_2byte cls_struct_2byte_fn(struct cls_struct_2byte a1, + struct cls_struct_2byte a2) +{ + struct cls_struct_2byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_2byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_2byte a1, a2; + + a1 = *(struct cls_struct_2byte*)(args[0]); + a2 = *(struct cls_struct_2byte*)(args[1]); + + *(cls_struct_2byte*)resp = cls_struct_2byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_2byte g_dbl = { 12, 127 }; + struct cls_struct_2byte f_dbl = { 1, 13 }; + struct cls_struct_2byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_2byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_2byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_2byte(*)(cls_struct_2byte, cls_struct_2byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte 2.c new file mode 100644 index 0000000..b782746 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte 2.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3_1byte { + unsigned char a; + unsigned char b; + unsigned char c; +} cls_struct_3_1byte; + +cls_struct_3_1byte cls_struct_3_1byte_fn(struct cls_struct_3_1byte a1, + struct cls_struct_3_1byte a2) +{ + struct cls_struct_3_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_3_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3_1byte a1, a2; + + a1 = *(struct cls_struct_3_1byte*)(args[0]); + a2 = *(struct cls_struct_3_1byte*)(args[1]); + + *(cls_struct_3_1byte*)resp = cls_struct_3_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3_1byte g_dbl = { 12, 13, 14 }; + struct cls_struct_3_1byte f_dbl = { 178, 179, 180 }; + struct cls_struct_3_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3_1byte(*)(cls_struct_3_1byte, cls_struct_3_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c new file mode 100644 index 0000000..b782746 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3_1byte { + unsigned char a; + unsigned char b; + unsigned char c; +} cls_struct_3_1byte; + +cls_struct_3_1byte cls_struct_3_1byte_fn(struct cls_struct_3_1byte a1, + struct cls_struct_3_1byte a2) +{ + struct cls_struct_3_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_3_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3_1byte a1, a2; + + a1 = *(struct cls_struct_3_1byte*)(args[0]); + a2 = *(struct cls_struct_3_1byte*)(args[1]); + + *(cls_struct_3_1byte*)resp = cls_struct_3_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3_1byte g_dbl = { 12, 13, 14 }; + struct cls_struct_3_1byte f_dbl = { 178, 179, 180 }; + struct cls_struct_3_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3_1byte(*)(cls_struct_3_1byte, cls_struct_3_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1 2.c new file mode 100644 index 0000000..a02c463 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte { + unsigned short a; + unsigned char b; +} cls_struct_3byte; + +cls_struct_3byte cls_struct_3byte_fn(struct cls_struct_3byte a1, + struct cls_struct_3byte a2) +{ + struct cls_struct_3byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte a1, a2; + + a1 = *(struct cls_struct_3byte*)(args[0]); + a2 = *(struct cls_struct_3byte*)(args[1]); + + *(cls_struct_3byte*)resp = cls_struct_3byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte g_dbl = { 12, 119 }; + struct cls_struct_3byte f_dbl = { 1, 15 }; + struct cls_struct_3byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte(*)(cls_struct_3byte, cls_struct_3byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c new file mode 100644 index 0000000..a02c463 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte { + unsigned short a; + unsigned char b; +} cls_struct_3byte; + +cls_struct_3byte cls_struct_3byte_fn(struct cls_struct_3byte a1, + struct cls_struct_3byte a2) +{ + struct cls_struct_3byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte a1, a2; + + a1 = *(struct cls_struct_3byte*)(args[0]); + a2 = *(struct cls_struct_3byte*)(args[1]); + + *(cls_struct_3byte*)resp = cls_struct_3byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte g_dbl = { 12, 119 }; + struct cls_struct_3byte f_dbl = { 1, 15 }; + struct cls_struct_3byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte(*)(cls_struct_3byte, cls_struct_3byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2 2.c new file mode 100644 index 0000000..c7251ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte_1 { + unsigned char a; + unsigned short b; +} cls_struct_3byte_1; + +cls_struct_3byte_1 cls_struct_3byte_fn1(struct cls_struct_3byte_1 a1, + struct cls_struct_3byte_1 a2) +{ + struct cls_struct_3byte_1 result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte_1 a1, a2; + + a1 = *(struct cls_struct_3byte_1*)(args[0]); + a2 = *(struct cls_struct_3byte_1*)(args[1]); + + *(cls_struct_3byte_1*)resp = cls_struct_3byte_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte_1 g_dbl = { 15, 125 }; + struct cls_struct_3byte_1 f_dbl = { 9, 19 }; + struct cls_struct_3byte_1 res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn1), &res_dbl, args_dbl); + /* { dg-output "15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn1, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte_1(*)(cls_struct_3byte_1, cls_struct_3byte_1))(code))(g_dbl, f_dbl); + /* { dg-output "\n15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c new file mode 100644 index 0000000..c7251ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte_1 { + unsigned char a; + unsigned short b; +} cls_struct_3byte_1; + +cls_struct_3byte_1 cls_struct_3byte_fn1(struct cls_struct_3byte_1 a1, + struct cls_struct_3byte_1 a2) +{ + struct cls_struct_3byte_1 result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte_1 a1, a2; + + a1 = *(struct cls_struct_3byte_1*)(args[0]); + a2 = *(struct cls_struct_3byte_1*)(args[1]); + + *(cls_struct_3byte_1*)resp = cls_struct_3byte_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte_1 g_dbl = { 15, 125 }; + struct cls_struct_3byte_1 f_dbl = { 9, 19 }; + struct cls_struct_3byte_1 res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn1), &res_dbl, args_dbl); + /* { dg-output "15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn1, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte_1(*)(cls_struct_3byte_1, cls_struct_3byte_1))(code))(g_dbl, f_dbl); + /* { dg-output "\n15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float 2.c new file mode 100644 index 0000000..48888f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float 2.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations:>none. + PR: none. + Originator: 20171026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_3float { + float f; + float g; + float h; +} cls_struct_3float; + +cls_struct_3float cls_struct_3float_fn(struct cls_struct_3float a1, + struct cls_struct_3float a2) +{ + struct cls_struct_3float result; + + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + result.h = a1.h + a2.h; + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.f, a1.g, a1.h, + a2.f, a2.g, a2.h, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_3float_gn(ffi_cif *cif __UNUSED__, void* resp, void **args, + void* userdata __UNUSED__) +{ + struct cls_struct_3float a1, a2; + + a1 = *(struct cls_struct_3float*)(args[0]); + a2 = *(struct cls_struct_3float*)(args[1]); + + *(cls_struct_3float*)resp = cls_struct_3float_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void *args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_3float g_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float f_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float res_dbl; + + cls_struct_fields[0] = &ffi_type_float; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_float; + cls_struct_fields[3] = NULL; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3float_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3float_gn, NULL, code) == + FFI_OK); + + res_dbl = ((cls_struct_3float(*)(cls_struct_3float, + cls_struct_3float))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c new file mode 100644 index 0000000..48888f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations:>none. + PR: none. + Originator: 20171026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_3float { + float f; + float g; + float h; +} cls_struct_3float; + +cls_struct_3float cls_struct_3float_fn(struct cls_struct_3float a1, + struct cls_struct_3float a2) +{ + struct cls_struct_3float result; + + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + result.h = a1.h + a2.h; + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.f, a1.g, a1.h, + a2.f, a2.g, a2.h, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_3float_gn(ffi_cif *cif __UNUSED__, void* resp, void **args, + void* userdata __UNUSED__) +{ + struct cls_struct_3float a1, a2; + + a1 = *(struct cls_struct_3float*)(args[0]); + a2 = *(struct cls_struct_3float*)(args[1]); + + *(cls_struct_3float*)resp = cls_struct_3float_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void *args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_3float g_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float f_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float res_dbl; + + cls_struct_fields[0] = &ffi_type_float; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_float; + cls_struct_fields[3] = NULL; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3float_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3float_gn, NULL, code) == + FFI_OK); + + res_dbl = ((cls_struct_3float(*)(cls_struct_3float, + cls_struct_3float))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte 2.c new file mode 100644 index 0000000..2d6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte 2.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_4_1byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; +} cls_struct_4_1byte; + +cls_struct_4_1byte cls_struct_4_1byte_fn(struct cls_struct_4_1byte a1, + struct cls_struct_4_1byte a2) +{ + struct cls_struct_4_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_4_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4_1byte a1, a2; + + a1 = *(struct cls_struct_4_1byte*)(args[0]); + a2 = *(struct cls_struct_4_1byte*)(args[1]); + + *(cls_struct_4_1byte*)resp = cls_struct_4_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 }; + struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 }; + struct cls_struct_4_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4_1byte(*)(cls_struct_4_1byte, cls_struct_4_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c new file mode 100644 index 0000000..2d6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_4_1byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; +} cls_struct_4_1byte; + +cls_struct_4_1byte cls_struct_4_1byte_fn(struct cls_struct_4_1byte a1, + struct cls_struct_4_1byte a2) +{ + struct cls_struct_4_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_4_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4_1byte a1, a2; + + a1 = *(struct cls_struct_4_1byte*)(args[0]); + a2 = *(struct cls_struct_4_1byte*)(args[1]); + + *(cls_struct_4_1byte*)resp = cls_struct_4_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 }; + struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 }; + struct cls_struct_4_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4_1byte(*)(cls_struct_4_1byte, cls_struct_4_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte 2.c new file mode 100644 index 0000000..4ac3787 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_4byte { + unsigned short a; + unsigned short b; +} cls_struct_4byte; + +cls_struct_4byte cls_struct_4byte_fn(struct cls_struct_4byte a1, + struct cls_struct_4byte a2) +{ + struct cls_struct_4byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_4byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4byte a1, a2; + + a1 = *(struct cls_struct_4byte*)(args[0]); + a2 = *(struct cls_struct_4byte*)(args[1]); + + *(cls_struct_4byte*)resp = cls_struct_4byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4byte g_dbl = { 127, 120 }; + struct cls_struct_4byte f_dbl = { 12, 128 }; + struct cls_struct_4byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4byte(*)(cls_struct_4byte, cls_struct_4byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c new file mode 100644 index 0000000..4ac3787 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_4byte { + unsigned short a; + unsigned short b; +} cls_struct_4byte; + +cls_struct_4byte cls_struct_4byte_fn(struct cls_struct_4byte a1, + struct cls_struct_4byte a2) +{ + struct cls_struct_4byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_4byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4byte a1, a2; + + a1 = *(struct cls_struct_4byte*)(args[0]); + a2 = *(struct cls_struct_4byte*)(args[1]); + + *(cls_struct_4byte*)resp = cls_struct_4byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4byte g_dbl = { 127, 120 }; + struct cls_struct_4byte f_dbl = { 12, 128 }; + struct cls_struct_4byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4byte(*)(cls_struct_4byte, cls_struct_4byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte 2.c new file mode 100644 index 0000000..ad9d51c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte 2.c @@ -0,0 +1,109 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + printf("%d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c new file mode 100644 index 0000000..ad9d51c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c @@ -0,0 +1,109 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + printf("%d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte 2.c new file mode 100644 index 0000000..4e0c000 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte 2.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned short a; + unsigned short b; + unsigned char c; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c new file mode 100644 index 0000000..4e0c000 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned short a; + unsigned short b; + unsigned char c; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte 2.c new file mode 100644 index 0000000..a55edc2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte 2.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_64byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; +} cls_struct_64byte; + +cls_struct_64byte cls_struct_64byte_fn(struct cls_struct_64byte b0, + struct cls_struct_64byte b1, + struct cls_struct_64byte b2, + struct cls_struct_64byte b3) +{ + struct cls_struct_64byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + + printf("%g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_64byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_64byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_64byte*)(args[0]); + b1 = *(struct cls_struct_64byte*)(args[1]); + b2 = *(struct cls_struct_64byte*)(args[2]); + b3 = *(struct cls_struct_64byte*)(args[3]); + + *(cls_struct_64byte*)resp = cls_struct_64byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[9]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_64byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0 }; + struct cls_struct_64byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0 }; + struct cls_struct_64byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0 }; + struct cls_struct_64byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0 }; + struct cls_struct_64byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_64byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_64byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_64byte(*)(cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c new file mode 100644 index 0000000..a55edc2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_64byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; +} cls_struct_64byte; + +cls_struct_64byte cls_struct_64byte_fn(struct cls_struct_64byte b0, + struct cls_struct_64byte b1, + struct cls_struct_64byte b2, + struct cls_struct_64byte b3) +{ + struct cls_struct_64byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + + printf("%g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_64byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_64byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_64byte*)(args[0]); + b1 = *(struct cls_struct_64byte*)(args[1]); + b2 = *(struct cls_struct_64byte*)(args[2]); + b3 = *(struct cls_struct_64byte*)(args[3]); + + *(cls_struct_64byte*)resp = cls_struct_64byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[9]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_64byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0 }; + struct cls_struct_64byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0 }; + struct cls_struct_64byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0 }; + struct cls_struct_64byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0 }; + struct cls_struct_64byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_64byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_64byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_64byte(*)(cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte 2.c new file mode 100644 index 0000000..b4dcdba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte 2.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, + result.a, result.b, result.c, result.d, result.e, result.f); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[7]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 }; + struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c new file mode 100644 index 0000000..b4dcdba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, + result.a, result.b, result.c, result.d, result.e, result.f); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[7]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 }; + struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte 2.c new file mode 100644 index 0000000..7406780 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte 2.c @@ -0,0 +1,99 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned char d; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 }; + struct cls_struct_6byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c new file mode 100644 index 0000000..7406780 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c @@ -0,0 +1,99 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned char d; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 }; + struct cls_struct_6byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte 2.c new file mode 100644 index 0000000..14a7e96 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte 2.c @@ -0,0 +1,117 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; + unsigned char g; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + result.a, result.b, result.c, result.d, result.e, result.f, result.g); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 }; + struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = &ffi_type_uchar; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + res_dbl.g = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c new file mode 100644 index 0000000..14a7e96 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c @@ -0,0 +1,117 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; + unsigned char g; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + result.a, result.b, result.c, result.d, result.e, result.f, result.g); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 }; + struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = &ffi_type_uchar; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + res_dbl.g = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte 2.c new file mode 100644 index 0000000..1645cc6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte 2.c @@ -0,0 +1,97 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned short d; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 }; + struct cls_struct_7byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_ushort; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c new file mode 100644 index 0000000..1645cc6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c @@ -0,0 +1,97 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned short d; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 }; + struct cls_struct_7byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_ushort; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte 2.c new file mode 100644 index 0000000..f6c1ea5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte 2.c @@ -0,0 +1,88 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_8byte { + int a; + float b; +} cls_struct_8byte; + +cls_struct_8byte cls_struct_8byte_fn(struct cls_struct_8byte a1, + struct cls_struct_8byte a2) +{ + struct cls_struct_8byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %g %d %g: %d %g\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_8byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_8byte a1, a2; + + a1 = *(struct cls_struct_8byte*)(args[0]); + a2 = *(struct cls_struct_8byte*)(args[1]); + + *(cls_struct_8byte*)resp = cls_struct_8byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_8byte g_dbl = { 1, 2.0 }; + struct cls_struct_8byte f_dbl = { 4, 5.0 }; + struct cls_struct_8byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_8byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_8byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_8byte(*)(cls_struct_8byte, cls_struct_8byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c new file mode 100644 index 0000000..f6c1ea5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c @@ -0,0 +1,88 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_8byte { + int a; + float b; +} cls_struct_8byte; + +cls_struct_8byte cls_struct_8byte_fn(struct cls_struct_8byte a1, + struct cls_struct_8byte a2) +{ + struct cls_struct_8byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %g %d %g: %d %g\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_8byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_8byte a1, a2; + + a1 = *(struct cls_struct_8byte*)(args[0]); + a2 = *(struct cls_struct_8byte*)(args[1]); + + *(cls_struct_8byte*)resp = cls_struct_8byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_8byte g_dbl = { 1, 2.0 }; + struct cls_struct_8byte f_dbl = { 4, 5.0 }; + struct cls_struct_8byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_8byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_8byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_8byte(*)(cls_struct_8byte, cls_struct_8byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1 2.c new file mode 100644 index 0000000..0b85722 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does not here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + int a; + double b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%d %g %d %g: %d %g\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7, 8.0}; + struct cls_struct_9byte j_dbl = { 1, 9.0}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c new file mode 100644 index 0000000..0b85722 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does not here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + int a; + double b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%d %g %d %g: %d %g\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7, 8.0}; + struct cls_struct_9byte j_dbl = { 1, 9.0}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2 2.c new file mode 100644 index 0000000..edf991d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + double a; + int b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%g %d %g %d: %g %d\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7.0, 8}; + struct cls_struct_9byte j_dbl = { 1.0, 9}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c new file mode 100644 index 0000000..edf991d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + double a; + int b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%g %d %g %d: %g %d\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7.0, 8}; + struct cls_struct_9byte j_dbl = { 1.0, 9}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double 2.c new file mode 100644 index 0000000..aad5f3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double 2.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of double. + Limitations: none. + PR: none. + Originator: 20031203 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c new file mode 100644 index 0000000..aad5f3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of double. + Limitations: none. + PR: none. + Originator: 20031203 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float 2.c new file mode 100644 index 0000000..37e0855 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of float. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + float b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c new file mode 100644 index 0000000..37e0855 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of float. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + float b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble 2.c new file mode 100644 index 0000000..b3322d8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble 2.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + long double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c new file mode 100644 index 0000000..b3322d8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + long double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split 2.c new file mode 100644 index 0000000..cc1c43b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split 2.c @@ -0,0 +1,132 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + long double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +cls_struct_align cls_struct_align_fn2( + cls_struct_align a1) +{ + struct cls_struct_align r; + + r.a = a1.a + 1; + r.b = a1.b + 1; + r.c = a1.c + 1; + r.d = a1.d + 1; + r.e = a1.e + 1; + r.f = a1.f + 1; + r.g = a1.g + 1; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_longdouble; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c new file mode 100644 index 0000000..cc1c43b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c @@ -0,0 +1,132 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + long double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +cls_struct_align cls_struct_align_fn2( + cls_struct_align a1) +{ + struct cls_struct_align r; + + r.a = a1.a + 1; + r.b = a1.b + 1; + r.c = a1.c + 1; + r.d = a1.d + 1; + r.e = a1.e + 1; + r.f = a1.f + 1; + r.g = a1.g + 1; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_longdouble; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2 2.c new file mode 100644 index 0000000..5d3bec0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2 2.c @@ -0,0 +1,115 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %g %Lg %Lg %Lg %Lg %Lg %Lg %g %Lg: " + "%Lg %Lg %Lg %Lg %Lg %g %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c new file mode 100644 index 0000000..5d3bec0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c @@ -0,0 +1,115 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %g %Lg %Lg %Lg %Lg %Lg %Lg %g %Lg: " + "%Lg %Lg %Lg %Lg %Lg %g %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} + + + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer 2.c new file mode 100644 index 0000000..8fbf36a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer 2.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of pointer. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + void *b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = (void *)((uintptr_t)a1.b + (uintptr_t)a2.b); + result.c = a1.c + a2.c; + + printf("%d %" PRIuPTR " %d %d %" PRIuPTR " %d: %d %" PRIuPTR " %d\n", + a1.a, (uintptr_t)a1.b, a1.c, + a2.a, (uintptr_t)a2.b, a2.c, + result.a, (uintptr_t)result.b, + result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, (void *)4951, 127 }; + struct cls_struct_align f_dbl = { 1, (void *)9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_pointer; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c new file mode 100644 index 0000000..8fbf36a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of pointer. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + void *b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = (void *)((uintptr_t)a1.b + (uintptr_t)a2.b); + result.c = a1.c + a2.c; + + printf("%d %" PRIuPTR " %d %d %" PRIuPTR " %d: %d %" PRIuPTR " %d\n", + a1.a, (uintptr_t)a1.b, a1.c, + a2.a, (uintptr_t)a2.b, a2.c, + result.a, (uintptr_t)result.b, + result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, (void *)4951, 127 }; + struct cls_struct_align f_dbl = { 1, (void *)9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_pointer; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16 2.c new file mode 100644 index 0000000..039b874 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sshort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c new file mode 100644 index 0000000..039b874 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sshort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32 2.c new file mode 100644 index 0000000..c96c6d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c new file mode 100644 index 0000000..c96c6d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64 2.c new file mode 100644 index 0000000..9aa7bdd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64 2.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c new file mode 100644 index 0000000..9aa7bdd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16 2.c new file mode 100644 index 0000000..97620b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c new file mode 100644 index 0000000..97620b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32 2.c new file mode 100644 index 0000000..5766fad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c new file mode 100644 index 0000000..5766fad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64 2.c new file mode 100644 index 0000000..a52cb89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64 2.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c new file mode 100644 index 0000000..a52cb89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct 2.c new file mode 100644 index 0000000..e451dea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct 2.c @@ -0,0 +1,66 @@ +/* Area: ffi_call, closure_call + Purpose: Check double arguments in structs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/23/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct Dbls { + double x; + double y; +} Dbls; + +void +closure_test_fn(Dbls p) +{ + printf("%.1f %.1f\n", p.x, p.y); +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Dbls*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Dbls arg = { 1.0, 2.0 }; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &ffi_type_double; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + ((void*(*)(Dbls))(code))(arg); + /* { dg-output "1.0 2.0" } */ + + closure_test_fn(arg); + /* { dg-output "\n1.0 2.0" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c new file mode 100644 index 0000000..e451dea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c @@ -0,0 +1,66 @@ +/* Area: ffi_call, closure_call + Purpose: Check double arguments in structs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/23/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct Dbls { + double x; + double y; +} Dbls; + +void +closure_test_fn(Dbls p) +{ + printf("%.1f %.1f\n", p.x, p.y); +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Dbls*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Dbls arg = { 1.0, 2.0 }; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &ffi_type_double; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + ((void*(*)(Dbls))(code))(arg); + /* { dg-output "1.0 2.0" } */ + + closure_test_fn(arg); + /* { dg-output "\n1.0 2.0" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double 2.c new file mode 100644 index 0000000..84ad4cb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double 2.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(double *)resp = *(double *)args[0]; + + printf("%f: %f\n",*(double *)args[0], + *(double *)resp); + } +typedef double (*cls_ret_double)(double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + double res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_double)code))(21474.789); + /* { dg-output "21474.789000: 21474.789000" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: 21474.789000" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c new file mode 100644 index 0000000..84ad4cb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(double *)resp = *(double *)args[0]; + + printf("%f: %f\n",*(double *)args[0], + *(double *)resp); + } +typedef double (*cls_ret_double)(double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + double res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_double)code))(21474.789); + /* { dg-output "21474.789000: 21474.789000" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: 21474.789000" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va 2.c new file mode 100644 index 0000000..e077f92 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va 2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_double_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + double doubleValue = *(double*)args[1]; + + *(ffi_arg*)resp = printf(format, doubleValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1f\n"; + double doubleArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_double; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &doubleArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_double_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, doubleArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c new file mode 100644 index 0000000..e077f92 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_double_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + double doubleValue = *(double*)args[1]; + + *(ffi_arg*)resp = printf(format, doubleValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1f\n"; + double doubleArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_double; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &doubleArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_double_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, doubleArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float 2.c new file mode 100644 index 0000000..0090fed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float 2.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_float_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(float *)resp = *(float *)args[0]; + + printf("%g: %g\n",*(float *)args[0], + *(float *)resp); + } + +typedef float (*cls_ret_float)(float); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + float res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_float_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_float)code)(-2122.12))); + /* { dg-output "\\-2122.12: \\-2122.12" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: \-2122.120117" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c new file mode 100644 index 0000000..0090fed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_float_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(float *)resp = *(float *)args[0]; + + printf("%g: %g\n",*(float *)args[0], + *(float *)resp); + } + +typedef float (*cls_ret_float)(float); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + float res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_float_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_float)code)(-2122.12))); + /* { dg-output "\\-2122.12: \\-2122.12" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: \-2122.120117" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble 2.c new file mode 100644 index 0000000..d24e72e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble 2.c @@ -0,0 +1,105 @@ +/* Area: ffi_call, closure_call + Purpose: Check long double arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin */ + +/* This test is known to PASS on armv7l-unknown-linux-gnueabihf, so I have + remove the xfail for arm*-*-* below, until we know more. */ +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +long double cls_ldouble_fn( + long double a1, + long double a2, + long double a3, + long double a4, + long double a5, + long double a6, + long double a7, + long double a8) +{ + long double r = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: %Lg\n", + a1, a2, a3, a4, a5, a6, a7, a8, r); + + return r; +} + +static void +cls_ldouble_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + long double a1 = *(long double*)args[0]; + long double a2 = *(long double*)args[1]; + long double a3 = *(long double*)args[2]; + long double a4 = *(long double*)args[3]; + long double a5 = *(long double*)args[4]; + long double a6 = *(long double*)args[5]; + long double a7 = *(long double*)args[6]; + long double a8 = *(long double*)args[7]; + + *(long double*)resp = cls_ldouble_fn( + a1, a2, a3, a4, a5, a6, a7, a8); +} + +int main(void) +{ + ffi_cif cif; + void* code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[9]; + ffi_type* arg_types[9]; + long double res = 0; + + long double arg1 = 1; + long double arg2 = 2; + long double arg3 = 3; + long double arg4 = 4; + long double arg5 = 5; + long double arg6 = 6; + long double arg7 = 7; + long double arg8 = 8; + + arg_types[0] = &ffi_type_longdouble; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = &ffi_type_longdouble; + arg_types[3] = &ffi_type_longdouble; + arg_types[4] = &ffi_type_longdouble; + arg_types[5] = &ffi_type_longdouble; + arg_types[6] = &ffi_type_longdouble; + arg_types[7] = &ffi_type_longdouble; + arg_types[8] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 8, &ffi_type_longdouble, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = &arg3; + args[3] = &arg4; + args[4] = &arg5; + args[5] = &arg6; + args[6] = &arg7; + args[7] = &arg8; + args[8] = NULL; + + ffi_call(&cif, FFI_FN(cls_ldouble_fn), &res, args); + /* { dg-output "1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ldouble_gn, NULL, code) == FFI_OK); + + res = ((long double(*)(long double, long double, long double, long double, + long double, long double, long double, long double))(code))(arg1, arg2, + arg3, arg4, arg5, arg6, arg7, arg8); + /* { dg-output "\n1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c new file mode 100644 index 0000000..d24e72e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c @@ -0,0 +1,105 @@ +/* Area: ffi_call, closure_call + Purpose: Check long double arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin */ + +/* This test is known to PASS on armv7l-unknown-linux-gnueabihf, so I have + remove the xfail for arm*-*-* below, until we know more. */ +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +long double cls_ldouble_fn( + long double a1, + long double a2, + long double a3, + long double a4, + long double a5, + long double a6, + long double a7, + long double a8) +{ + long double r = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: %Lg\n", + a1, a2, a3, a4, a5, a6, a7, a8, r); + + return r; +} + +static void +cls_ldouble_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + long double a1 = *(long double*)args[0]; + long double a2 = *(long double*)args[1]; + long double a3 = *(long double*)args[2]; + long double a4 = *(long double*)args[3]; + long double a5 = *(long double*)args[4]; + long double a6 = *(long double*)args[5]; + long double a7 = *(long double*)args[6]; + long double a8 = *(long double*)args[7]; + + *(long double*)resp = cls_ldouble_fn( + a1, a2, a3, a4, a5, a6, a7, a8); +} + +int main(void) +{ + ffi_cif cif; + void* code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[9]; + ffi_type* arg_types[9]; + long double res = 0; + + long double arg1 = 1; + long double arg2 = 2; + long double arg3 = 3; + long double arg4 = 4; + long double arg5 = 5; + long double arg6 = 6; + long double arg7 = 7; + long double arg8 = 8; + + arg_types[0] = &ffi_type_longdouble; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = &ffi_type_longdouble; + arg_types[3] = &ffi_type_longdouble; + arg_types[4] = &ffi_type_longdouble; + arg_types[5] = &ffi_type_longdouble; + arg_types[6] = &ffi_type_longdouble; + arg_types[7] = &ffi_type_longdouble; + arg_types[8] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 8, &ffi_type_longdouble, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = &arg3; + args[3] = &arg4; + args[4] = &arg5; + args[5] = &arg6; + args[6] = &arg7; + args[7] = &arg8; + args[8] = NULL; + + ffi_call(&cif, FFI_FN(cls_ldouble_fn), &res, args); + /* { dg-output "1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ldouble_gn, NULL, code) == FFI_OK); + + res = ((long double(*)(long double, long double, long double, long double, + long double, long double, long double, long double))(code))(arg1, arg2, + arg3, arg4, arg5, arg6, arg7, arg8); + /* { dg-output "\n1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va 2.c new file mode 100644 index 0000000..39b438b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va 2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test long doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* x86_64-*-mingw* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_longdouble_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + long double ldValue = *(long double*)args[1]; + + *(ffi_arg*)resp = printf(format, ldValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1Lf\n"; + long double ldArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &ldArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_longdouble_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, ldArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c new file mode 100644 index 0000000..39b438b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test long doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* x86_64-*-mingw* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_longdouble_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + long double ldValue = *(long double*)args[1]; + + *(ffi_arg*)resp = printf(format, ldValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1Lf\n"; + long double ldArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &ldArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_longdouble_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, ldArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args 2.c new file mode 100644 index 0000000..7fd6c82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args 2.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check closures called with many args of mixed types + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + int i; + double r = 0; + double t; + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + { + t = *(long int *)args[i]; + CHECK(t == i+1); + } + else + { + t = *(double *)args[i]; + CHECK(fabs(t - ((i+1) * 0.1)) < FLT_EPSILON); + } + r += t; + } + *(double *)resp = r; +} +typedef double (*cls_ret_double)(double, double, double, double, long int, +double, double, double, double, long int, double, long int, double, long int, +double, long int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[NARGS]; + double res; + int i; + double expected = 64.9; + + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + cl_arg_types[i] = &ffi_type_slong; + else + cl_arg_types[i] = &ffi_type_double; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NARGS, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (((cls_ret_double)code))(0.1, 0.2, 0.3, 0.4, 5, 0.6, 0.7, 0.8, 0.9, 10, + 1.1, 12, 1.3, 14, 1.5, 16); + if (fabs(res - expected) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c new file mode 100644 index 0000000..7fd6c82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check closures called with many args of mixed types + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + int i; + double r = 0; + double t; + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + { + t = *(long int *)args[i]; + CHECK(t == i+1); + } + else + { + t = *(double *)args[i]; + CHECK(fabs(t - ((i+1) * 0.1)) < FLT_EPSILON); + } + r += t; + } + *(double *)resp = r; +} +typedef double (*cls_ret_double)(double, double, double, double, long int, +double, double, double, double, long int, double, long int, double, long int, +double, long int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[NARGS]; + double res; + int i; + double expected = 64.9; + + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + cl_arg_types[i] = &ffi_type_slong; + else + cl_arg_types[i] = &ffi_type_double; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NARGS, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (((cls_ret_double)code))(0.1, 0.2, 0.3, 0.4, 5, 0.6, 0.7, 0.8, 0.9, 10, + 1.1, 12, 1.3, 14, 1.5, 16); + if (fabs(res - expected) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double 2.c new file mode 100644 index 0000000..62b0697 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double 2.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check register allocation for closure calls with many float and double arguments + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_mixed_float_double_fn(ffi_cif* cif , void* ret, void** args, + void* userdata __UNUSED__) +{ + double r = 0; + unsigned int i; + double t; + for(i=0; i < cif->nargs; i++) + { + if(cif->arg_types[i] == &ffi_type_double) { + t = *(((double**)(args))[i]); + } else { + t = *(((float**)(args))[i]); + } + r += t; + } + *((double*)ret) = r; +} +typedef double (*cls_mixed)(double, float, double, double, double, double, double, float, float, double, float, float); + +int main (void) +{ + ffi_cif cif; + ffi_closure *closure; + void* code; + ffi_type *argtypes[12] = {&ffi_type_double, &ffi_type_float, &ffi_type_double, + &ffi_type_double, &ffi_type_double, &ffi_type_double, + &ffi_type_double, &ffi_type_float, &ffi_type_float, + &ffi_type_double, &ffi_type_float, &ffi_type_float}; + + + closure = ffi_closure_alloc(sizeof(ffi_closure), (void**)&code); + if(closure ==NULL) + abort(); + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 12, &ffi_type_double, argtypes) == FFI_OK); + CHECK(ffi_prep_closure_loc(closure, &cif, cls_mixed_float_double_fn, NULL, code) == FFI_OK); + double ret = ((cls_mixed)code)(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2); + ffi_closure_free(closure); + if(fabs(ret - 7.8) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c new file mode 100644 index 0000000..62b0697 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check register allocation for closure calls with many float and double arguments + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_mixed_float_double_fn(ffi_cif* cif , void* ret, void** args, + void* userdata __UNUSED__) +{ + double r = 0; + unsigned int i; + double t; + for(i=0; i < cif->nargs; i++) + { + if(cif->arg_types[i] == &ffi_type_double) { + t = *(((double**)(args))[i]); + } else { + t = *(((float**)(args))[i]); + } + r += t; + } + *((double*)ret) = r; +} +typedef double (*cls_mixed)(double, float, double, double, double, double, double, float, float, double, float, float); + +int main (void) +{ + ffi_cif cif; + ffi_closure *closure; + void* code; + ffi_type *argtypes[12] = {&ffi_type_double, &ffi_type_float, &ffi_type_double, + &ffi_type_double, &ffi_type_double, &ffi_type_double, + &ffi_type_double, &ffi_type_float, &ffi_type_float, + &ffi_type_double, &ffi_type_float, &ffi_type_float}; + + + closure = ffi_closure_alloc(sizeof(ffi_closure), (void**)&code); + if(closure ==NULL) + abort(); + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 12, &ffi_type_double, argtypes) == FFI_OK); + CHECK(ffi_prep_closure_loc(closure, &cif, cls_mixed_float_double_fn, NULL, code) == FFI_OK); + double ret = ((cls_mixed)code)(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2); + ffi_closure_free(closure); + if(fabs(ret - 7.8) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar 2.c new file mode 100644 index 0000000..71df7b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed char test_func_fn(signed char a1, signed char a2) +{ + signed char result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a2; + + a1 = *(signed char *)avals[0]; + a2 = *(signed char *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed char (*test_type)(signed char, signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + signed char a, b, res_closure; + + a = 2; + b = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_schar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 125: 127" } */ + printf("res: %d\n", (signed char)res_call); + /* { dg-output "\nres: 127" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 125); + /* { dg-output "\n2 125: 127" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c new file mode 100644 index 0000000..71df7b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed char test_func_fn(signed char a1, signed char a2) +{ + signed char result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a2; + + a1 = *(signed char *)avals[0]; + a2 = *(signed char *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed char (*test_type)(signed char, signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + signed char a, b, res_closure; + + a = 2; + b = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_schar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 125: 127" } */ + printf("res: %d\n", (signed char)res_call); + /* { dg-output "\nres: 127" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 125); + /* { dg-output "\n2 125: 127" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort 2.c new file mode 100644 index 0000000..4c39153 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed short a1, signed short a2) +{ + signed short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed short a1, a2; + + a1 = *(signed short *)avals[0]; + a2 = *(signed short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed short (*test_type)(signed short, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c new file mode 100644 index 0000000..4c39153 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed short a1, signed short a2) +{ + signed short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed short a1, a2; + + a1 = *(signed short *)avals[0]; + a2 = *(signed short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed short (*test_type)(signed short, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar 2.c new file mode 100644 index 0000000..1c3aeb5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar 2.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed char a1, signed short a2, + signed char a3, signed short a4) +{ + signed short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a3; + signed short a2, a4; + + a1 = *(signed char *)avals[0]; + a2 = *(signed short *)avals[1]; + a3 = *(signed char *)avals[2]; + a4 = *(signed short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef signed short (*test_type)(signed char, signed short, + signed char, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + signed char a, c; + signed short b, d, res_closure; + + a = 1; + b = 32765; + c = 127; + d = -128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = &ffi_type_schar; + cl_arg_types[3] = &ffi_type_sshort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 32765 127 -128: 32765" } */ + printf("res: %d\n", (signed short)res_call); + /* { dg-output "\nres: 32765" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 32765, 127, -128); + /* { dg-output "\n1 32765 127 -128: 32765" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32765" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c new file mode 100644 index 0000000..1c3aeb5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed char a1, signed short a2, + signed char a3, signed short a4) +{ + signed short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a3; + signed short a2, a4; + + a1 = *(signed char *)avals[0]; + a2 = *(signed short *)avals[1]; + a3 = *(signed char *)avals[2]; + a4 = *(signed short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef signed short (*test_type)(signed char, signed short, + signed char, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + signed char a, c; + signed short b, d, res_closure; + + a = 1; + b = 32765; + c = 127; + d = -128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = &ffi_type_schar; + cl_arg_types[3] = &ffi_type_sshort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 32765 127 -128: 32765" } */ + printf("res: %d\n", (signed short)res_call); + /* { dg-output "\nres: 32765" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 32765, 127, -128); + /* { dg-output "\n1 32765 127 -128: 32765" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32765" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar 2.c new file mode 100644 index 0000000..009c02c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar 2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned char test_func_fn(unsigned char a1, unsigned char a2, + unsigned char a3, unsigned char a4) +{ + unsigned char result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a2, a3, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned char *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned char *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned char (*test_type)(unsigned char, unsigned char, + unsigned char, unsigned char); + +void test_func(ffi_cif *cif __UNUSED__, void *rval __UNUSED__, void **avals, + void *data __UNUSED__) +{ + printf("%d %d %d %d\n", *(unsigned char *)avals[0], + *(unsigned char *)avals[1], *(unsigned char *)avals[2], + *(unsigned char *)avals[3]); +} +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, b, c, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_uchar; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 125: 255" } */ + printf("res: %d\n", (unsigned char)res_call); + /* { dg-output "\nres: 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 125); + /* { dg-output "\n1 2 127 125: 255" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c new file mode 100644 index 0000000..009c02c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned char test_func_fn(unsigned char a1, unsigned char a2, + unsigned char a3, unsigned char a4) +{ + unsigned char result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a2, a3, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned char *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned char *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned char (*test_type)(unsigned char, unsigned char, + unsigned char, unsigned char); + +void test_func(ffi_cif *cif __UNUSED__, void *rval __UNUSED__, void **avals, + void *data __UNUSED__) +{ + printf("%d %d %d %d\n", *(unsigned char *)avals[0], + *(unsigned char *)avals[1], *(unsigned char *)avals[2], + *(unsigned char *)avals[3]); +} +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, b, c, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_uchar; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 125: 255" } */ + printf("res: %d\n", (unsigned char)res_call); + /* { dg-output "\nres: 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 125); + /* { dg-output "\n1 2 127 125: 255" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort 2.c new file mode 100644 index 0000000..dd10ca7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned short a1, unsigned short a2) +{ + unsigned short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned short a1, a2; + + a1 = *(unsigned short *)avals[0]; + a2 = *(unsigned short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef unsigned short (*test_type)(unsigned short, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c new file mode 100644 index 0000000..dd10ca7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned short a1, unsigned short a2) +{ + unsigned short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned short a1, a2; + + a1 = *(unsigned short *)avals[0]; + a2 = *(unsigned short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef unsigned short (*test_type)(unsigned short, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar 2.c new file mode 100644 index 0000000..2588e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar 2.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned char a1, unsigned short a2, + unsigned char a3, unsigned short a4) +{ + unsigned short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a3; + unsigned short a2, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned short *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned short (*test_type)(unsigned char, unsigned short, + unsigned char, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, c; + unsigned short b, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_ushort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 128: 258" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 258" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 128); + /* { dg-output "\n1 2 127 128: 258" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 258" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c new file mode 100644 index 0000000..2588e97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned char a1, unsigned short a2, + unsigned char a3, unsigned short a4) +{ + unsigned short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a3; + unsigned short a2, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned short *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned short (*test_type)(unsigned char, unsigned short, + unsigned char, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, c; + unsigned short b, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_ushort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 128: 258" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 258" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 128); + /* { dg-output "\n1 2 127 128: 258" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 258" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer 2.c new file mode 100644 index 0000000..d82a87a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer 2.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +void* cls_pointer_fn(void* a1, void* a2) +{ + void* result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + *(void**)resp = cls_pointer_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x12345678; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_pointer_fn), &res, args); + /* { dg-output "0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + /* { dg-output "\n0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c new file mode 100644 index 0000000..d82a87a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +void* cls_pointer_fn(void* a1, void* a2) +{ + void* result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + *(void**)resp = cls_pointer_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x12345678; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_pointer_fn), &res, args); + /* { dg-output "0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + /* { dg-output "\n0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack 2.c new file mode 100644 index 0000000..1f1d915 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack 2.c @@ -0,0 +1,142 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments across multiple hideous stack frames. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/7/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +static long dummyVar; + +long dummy_func( + long double a1, char b1, + long double a2, char b2, + long double a3, char b3, + long double a4, char b4) +{ + return a1 + b1 + a2 + b2 + a3 + b3 + a4 + b4; +} + +void* cls_pointer_fn2(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +void* cls_pointer_fn1(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(intptr_t) a1, + (unsigned int)(intptr_t) a2, + (unsigned int)(intptr_t) result); + + result = cls_pointer_fn2(result, a1); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + *(void**)resp = cls_pointer_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x01234567; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + printf("\n"); + ffi_call(&cif, FFI_FN(cls_pointer_fn1), &res, args); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c new file mode 100644 index 0000000..1f1d915 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c @@ -0,0 +1,142 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments across multiple hideous stack frames. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/7/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +static long dummyVar; + +long dummy_func( + long double a1, char b1, + long double a2, char b2, + long double a3, char b3, + long double a4, char b4) +{ + return a1 + b1 + a2 + b2 + a3 + b3 + a4 + b4; +} + +void* cls_pointer_fn2(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +void* cls_pointer_fn1(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(intptr_t) a1, + (unsigned int)(intptr_t) a2, + (unsigned int)(intptr_t) result); + + result = cls_pointer_fn2(result, a1); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + *(void**)resp = cls_pointer_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x01234567; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + printf("\n"); + ffi_call(&cif, FFI_FN(cls_pointer_fn1), &res, args); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar 2.c new file mode 100644 index 0000000..82986b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar 2.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Check return value schar. + Limitations: none. + PR: none. + Originator: 20031108 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_schar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed char *)args[0]; + printf("%d: %d\n",*(signed char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed char (*cls_ret_schar)(signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed char res; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_schar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_schar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c new file mode 100644 index 0000000..82986b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Check return value schar. + Limitations: none. + PR: none. + Originator: 20031108 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_schar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed char *)args[0]; + printf("%d: %d\n",*(signed char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed char (*cls_ret_schar)(signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed char res; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_schar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_schar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint 2.c new file mode 100644 index 0000000..c7e13b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint 2.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sint32. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed int *)args[0]; + printf("%d: %d\n",*(signed int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed int (*cls_ret_sint)(signed int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed int res; + + cl_arg_types[0] = &ffi_type_sint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sint)code))(65534); + /* { dg-output "65534: 65534" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65534" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c new file mode 100644 index 0000000..c7e13b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sint32. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed int *)args[0]; + printf("%d: %d\n",*(signed int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed int (*cls_ret_sint)(signed int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed int res; + + cl_arg_types[0] = &ffi_type_sint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sint)code))(65534); + /* { dg-output "65534: 65534" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65534" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort 2.c new file mode 100644 index 0000000..846d57e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort 2.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sshort. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sshort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed short *)args[0]; + printf("%d: %d\n",*(signed short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed short (*cls_ret_sshort)(signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed short res; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sshort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sshort)code))(255); + /* { dg-output "255: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c new file mode 100644 index 0000000..846d57e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sshort. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sshort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed short *)args[0]; + printf("%d: %d\n",*(signed short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed short (*cls_ret_sshort)(signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed short res; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sshort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sshort)code))(255); + /* { dg-output "255: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1 2.c new file mode 100644 index 0000000..6d1fdae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1 2.c @@ -0,0 +1,114 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +#include "ffitest.h" + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static void +test_fn (ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + int n = *(int*)args[0]; + struct small_tag s1 = * (struct small_tag *) args[1]; + struct large_tag l1 = * (struct large_tag *) args[2]; + struct small_tag s2 = * (struct small_tag *) args[3]; + + printf ("%d %d %d %d %d %d %d %d %d %d\n", n, s1.a, s1.b, + l1.a, l1.b, l1.c, l1.d, l1.e, + s2.a, s2.b); + * (ffi_arg*) resp = 42; +} + +int +main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc (sizeof (ffi_closure), &code); + ffi_type* arg_types[5]; + + ffi_arg res = 0; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int si; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, + arg_types) == FFI_OK); + + si = 4; + s1.a = 5; + s1.b = 6; + + s2.a = 20; + s2.b = 21; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_fn, NULL, code) == FFI_OK); + + res = ((int (*)(int, ...))(code))(si, s1, l1, s2); + /* { dg-output "4 5 6 10 11 12 13 14 20 21" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c new file mode 100644 index 0000000..6d1fdae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c @@ -0,0 +1,114 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +#include "ffitest.h" + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static void +test_fn (ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + int n = *(int*)args[0]; + struct small_tag s1 = * (struct small_tag *) args[1]; + struct large_tag l1 = * (struct large_tag *) args[2]; + struct small_tag s2 = * (struct small_tag *) args[3]; + + printf ("%d %d %d %d %d %d %d %d %d %d\n", n, s1.a, s1.b, + l1.a, l1.b, l1.c, l1.d, l1.e, + s2.a, s2.b); + * (ffi_arg*) resp = 42; +} + +int +main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc (sizeof (ffi_closure), &code); + ffi_type* arg_types[5]; + + ffi_arg res = 0; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int si; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, + arg_types) == FFI_OK); + + si = 4; + s1.a = 5; + s1.b = 6; + + s2.a = 20; + s2.b = 21; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_fn, NULL, code) == FFI_OK); + + res = ((int (*)(int, ...))(code))(si, s1, l1, s2); + /* { dg-output "4 5 6 10 11 12 13 14 20 21" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar 2.c new file mode 100644 index 0000000..c1317e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar 2.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value uchar. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uchar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned char *)args[0]; + printf("%d: %d\n",*(unsigned char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned char (*cls_ret_uchar)(unsigned char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned char res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uchar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uchar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c new file mode 100644 index 0000000..c1317e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value uchar. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uchar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned char *)args[0]; + printf("%d: %d\n",*(unsigned char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned char (*cls_ret_uchar)(unsigned char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned char res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uchar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uchar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va 2.c new file mode 100644 index 0000000..6491c5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va 2.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned char argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned char T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c new file mode 100644 index 0000000..6491c5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned char argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned char T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint 2.c new file mode 100644 index 0000000..885cff5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint 2.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value uint. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg *)resp = *(unsigned int *)args[0]; + + printf("%d: %d\n",*(unsigned int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned int (*cls_ret_uint)(unsigned int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uint)code))(2147483647); + /* { dg-output "2147483647: 2147483647" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c new file mode 100644 index 0000000..885cff5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value uint. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg *)resp = *(unsigned int *)args[0]; + + printf("%d: %d\n",*(unsigned int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned int (*cls_ret_uint)(unsigned int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uint)code))(2147483647); + /* { dg-output "2147483647: 2147483647" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va 2.c new file mode 100644 index 0000000..b04cfd1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va 2.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned int argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned int T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)*(ffi_arg *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c new file mode 100644 index 0000000..b04cfd1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned int argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned int T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)*(ffi_arg *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va 2.c new file mode 100644 index 0000000..0315082 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va 2.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned long argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned long T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(T *)resp = *(T *)args[0]; + + printf("%ld: %ld %ld\n", *(T *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ulong; + cl_arg_types[1] = &ffi_type_ulong; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ulong, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %ld\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c new file mode 100644 index 0000000..0315082 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned long argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned long T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(T *)resp = *(T *)args[0]; + + printf("%ld: %ld %ld\n", *(T *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ulong; + cl_arg_types[1] = &ffi_type_ulong; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ulong, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %ld\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong 2.c new file mode 100644 index 0000000..62f2cae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong 2.c @@ -0,0 +1,47 @@ +/* Area: closure_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +static void cls_ret_ulonglong_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + *(unsigned long long *)resp= 0xfffffffffffffffLL ^ *(unsigned long long *)args[0]; + + printf("%" PRIuLL ": %" PRIuLL "\n",*(unsigned long long *)args[0], + *(unsigned long long *)(resp)); +} +typedef unsigned long long (*cls_ret_ulonglong)(unsigned long long); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned long long res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint64, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ulonglong_fn, NULL, code) == FFI_OK); + res = (*((cls_ret_ulonglong)code))(214LL); + /* { dg-output "214: 1152921504606846761" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 1152921504606846761" } */ + + res = (*((cls_ret_ulonglong)code))(9223372035854775808LL); + /* { dg-output "\n9223372035854775808: 8070450533247928831" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 8070450533247928831" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c new file mode 100644 index 0000000..62f2cae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c @@ -0,0 +1,47 @@ +/* Area: closure_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +static void cls_ret_ulonglong_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + *(unsigned long long *)resp= 0xfffffffffffffffLL ^ *(unsigned long long *)args[0]; + + printf("%" PRIuLL ": %" PRIuLL "\n",*(unsigned long long *)args[0], + *(unsigned long long *)(resp)); +} +typedef unsigned long long (*cls_ret_ulonglong)(unsigned long long); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned long long res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint64, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ulonglong_fn, NULL, code) == FFI_OK); + res = (*((cls_ret_ulonglong)code))(214LL); + /* { dg-output "214: 1152921504606846761" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 1152921504606846761" } */ + + res = (*((cls_ret_ulonglong)code))(9223372035854775808LL); + /* { dg-output "\n9223372035854775808: 8070450533247928831" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 8070450533247928831" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort 2.c new file mode 100644 index 0000000..a00100e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort 2.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value ushort. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_ushort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned short *)args[0]; + + printf("%d: %d\n",*(unsigned short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned short (*cls_ret_ushort)(unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned short res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ushort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_ushort)code))(65535); + /* { dg-output "65535: 65535" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65535" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c new file mode 100644 index 0000000..a00100e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value ushort. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_ushort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned short *)args[0]; + + printf("%d: %d\n",*(unsigned short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned short (*cls_ret_ushort)(unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned short res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ushort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_ushort)code))(65535); + /* { dg-output "65535: 65535" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65535" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va 2.c new file mode 100644 index 0000000..37aa106 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va 2.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned short argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned short T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c new file mode 100644 index 0000000..37aa106 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned short argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned short T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi 2.c new file mode 100644 index 0000000..f5a7317 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi 2.c @@ -0,0 +1,36 @@ +/* Area: ffi_prep_cif, ffi_prep_closure + Purpose: Test error return for bad ABIs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +dummy_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* arg_types[1]; + + arg_types[0] = NULL; + + CHECK(ffi_prep_cif(&cif, 255, 0, &ffi_type_void, + arg_types) == FFI_BAD_ABI); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &ffi_type_void, + arg_types) == FFI_OK); + + cif.abi= 255; + + CHECK(ffi_prep_closure_loc(pcl, &cif, dummy_fn, NULL, code) == FFI_BAD_ABI); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c new file mode 100644 index 0000000..f5a7317 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c @@ -0,0 +1,36 @@ +/* Area: ffi_prep_cif, ffi_prep_closure + Purpose: Test error return for bad ABIs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +dummy_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* arg_types[1]; + + arg_types[0] = NULL; + + CHECK(ffi_prep_cif(&cif, 255, 0, &ffi_type_void, + arg_types) == FFI_BAD_ABI); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &ffi_type_void, + arg_types) == FFI_OK); + + cif.abi= 255; + + CHECK(ffi_prep_closure_loc(pcl, &cif, dummy_fn, NULL, code) == FFI_BAD_ABI); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest 2.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest 2.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct 2.c new file mode 100644 index 0000000..1915c3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct 2.c @@ -0,0 +1,341 @@ +/* Area: ffi_call, closure_call + Purpose: Check large structure returns. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ +/* { dg-options -Wformat=0 { target moxie*-*-elf or1k-*-* } } */ + +#include "ffitest.h" + +typedef struct BigStruct{ + uint8_t a; + int8_t b; + uint16_t c; + int16_t d; + uint32_t e; + int32_t f; + uint64_t g; + int64_t h; + float i; + double j; + long double k; + char* l; + uint8_t m; + int8_t n; + uint16_t o; + int16_t p; + uint32_t q; + int32_t r; + uint64_t s; + int64_t t; + float u; + double v; + long double w; + char* x; + uint8_t y; + int8_t z; + uint16_t aa; + int16_t bb; + uint32_t cc; + int32_t dd; + uint64_t ee; + int64_t ff; + float gg; + double hh; + long double ii; + char* jj; + uint8_t kk; + int8_t ll; + uint16_t mm; + int16_t nn; + uint32_t oo; + int32_t pp; + uint64_t qq; + int64_t rr; + float ss; + double tt; + long double uu; + char* vv; + uint8_t ww; + int8_t xx; +} BigStruct; + +BigStruct +test_large_fn( + uint8_t ui8_1, + int8_t si8_1, + uint16_t ui16_1, + int16_t si16_1, + uint32_t ui32_1, + int32_t si32_1, + uint64_t ui64_1, + int64_t si64_1, + float f_1, + double d_1, + long double ld_1, + char* p_1, + uint8_t ui8_2, + int8_t si8_2, + uint16_t ui16_2, + int16_t si16_2, + uint32_t ui32_2, + int32_t si32_2, + uint64_t ui64_2, + int64_t si64_2, + float f_2, + double d_2, + long double ld_2, + char* p_2, + uint8_t ui8_3, + int8_t si8_3, + uint16_t ui16_3, + int16_t si16_3, + uint32_t ui32_3, + int32_t si32_3, + uint64_t ui64_3, + int64_t si64_3, + float f_3, + double d_3, + long double ld_3, + char* p_3, + uint8_t ui8_4, + int8_t si8_4, + uint16_t ui16_4, + int16_t si16_4, + uint32_t ui32_4, + int32_t si32_4, + uint64_t ui64_4, + int64_t si64_4, + float f_4, + double d_4, + long double ld_4, + char* p_4, + uint8_t ui8_5, + int8_t si8_5) +{ + BigStruct retVal = { + ui8_1 + 1, si8_1 + 1, ui16_1 + 1, si16_1 + 1, ui32_1 + 1, si32_1 + 1, + ui64_1 + 1, si64_1 + 1, f_1 + 1, d_1 + 1, ld_1 + 1, (char*)((intptr_t)p_1 + 1), + ui8_2 + 2, si8_2 + 2, ui16_2 + 2, si16_2 + 2, ui32_2 + 2, si32_2 + 2, + ui64_2 + 2, si64_2 + 2, f_2 + 2, d_2 + 2, ld_2 + 2, (char*)((intptr_t)p_2 + 2), + ui8_3 + 3, si8_3 + 3, ui16_3 + 3, si16_3 + 3, ui32_3 + 3, si32_3 + 3, + ui64_3 + 3, si64_3 + 3, f_3 + 3, d_3 + 3, ld_3 + 3, (char*)((intptr_t)p_3 + 3), + ui8_4 + 4, si8_4 + 4, ui16_4 + 4, si16_4 + 4, ui32_4 + 4, si32_4 + 4, + ui64_4 + 4, si64_4 + 4, f_4 + 4, d_4 + 4, ld_4 + 4, (char*)((intptr_t)p_4 + 4), + ui8_5 + 5, si8_5 + 5}; + + printf("%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 ": " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, (unsigned long)p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, (unsigned long)p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, (unsigned long)p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, (unsigned long)p_4, ui8_5, si8_5, + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + + return retVal; +} + +static void +cls_large_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + uint8_t ui8_1 = *(uint8_t*)args[0]; + int8_t si8_1 = *(int8_t*)args[1]; + uint16_t ui16_1 = *(uint16_t*)args[2]; + int16_t si16_1 = *(int16_t*)args[3]; + uint32_t ui32_1 = *(uint32_t*)args[4]; + int32_t si32_1 = *(int32_t*)args[5]; + uint64_t ui64_1 = *(uint64_t*)args[6]; + int64_t si64_1 = *(int64_t*)args[7]; + float f_1 = *(float*)args[8]; + double d_1 = *(double*)args[9]; + long double ld_1 = *(long double*)args[10]; + char* p_1 = *(char**)args[11]; + uint8_t ui8_2 = *(uint8_t*)args[12]; + int8_t si8_2 = *(int8_t*)args[13]; + uint16_t ui16_2 = *(uint16_t*)args[14]; + int16_t si16_2 = *(int16_t*)args[15]; + uint32_t ui32_2 = *(uint32_t*)args[16]; + int32_t si32_2 = *(int32_t*)args[17]; + uint64_t ui64_2 = *(uint64_t*)args[18]; + int64_t si64_2 = *(int64_t*)args[19]; + float f_2 = *(float*)args[20]; + double d_2 = *(double*)args[21]; + long double ld_2 = *(long double*)args[22]; + char* p_2 = *(char**)args[23]; + uint8_t ui8_3 = *(uint8_t*)args[24]; + int8_t si8_3 = *(int8_t*)args[25]; + uint16_t ui16_3 = *(uint16_t*)args[26]; + int16_t si16_3 = *(int16_t*)args[27]; + uint32_t ui32_3 = *(uint32_t*)args[28]; + int32_t si32_3 = *(int32_t*)args[29]; + uint64_t ui64_3 = *(uint64_t*)args[30]; + int64_t si64_3 = *(int64_t*)args[31]; + float f_3 = *(float*)args[32]; + double d_3 = *(double*)args[33]; + long double ld_3 = *(long double*)args[34]; + char* p_3 = *(char**)args[35]; + uint8_t ui8_4 = *(uint8_t*)args[36]; + int8_t si8_4 = *(int8_t*)args[37]; + uint16_t ui16_4 = *(uint16_t*)args[38]; + int16_t si16_4 = *(int16_t*)args[39]; + uint32_t ui32_4 = *(uint32_t*)args[40]; + int32_t si32_4 = *(int32_t*)args[41]; + uint64_t ui64_4 = *(uint64_t*)args[42]; + int64_t si64_4 = *(int64_t*)args[43]; + float f_4 = *(float*)args[44]; + double d_4 = *(double*)args[45]; + long double ld_4 = *(long double*)args[46]; + char* p_4 = *(char**)args[47]; + uint8_t ui8_5 = *(uint8_t*)args[48]; + int8_t si8_5 = *(int8_t*)args[49]; + + *(BigStruct*)resp = test_large_fn( + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, p_4, + ui8_5, si8_5); +} + +int +main(int argc __UNUSED__, const char** argv __UNUSED__) +{ + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + + ffi_cif cif; + ffi_type* argTypes[51]; + void* argValues[51]; + + ffi_type ret_struct_type; + ffi_type* st_fields[51]; + BigStruct retVal; + + uint8_t ui8 = 1; + int8_t si8 = 2; + uint16_t ui16 = 3; + int16_t si16 = 4; + uint32_t ui32 = 5; + int32_t si32 = 6; + uint64_t ui64 = 7; + int64_t si64 = 8; + float f = 9; + double d = 10; + long double ld = 11; + char* p = (char*)0x12345678; + + memset (&retVal, 0, sizeof(retVal)); + + ret_struct_type.size = 0; + ret_struct_type.alignment = 0; + ret_struct_type.type = FFI_TYPE_STRUCT; + ret_struct_type.elements = st_fields; + + st_fields[0] = st_fields[12] = st_fields[24] = st_fields[36] = st_fields[48] = &ffi_type_uint8; + st_fields[1] = st_fields[13] = st_fields[25] = st_fields[37] = st_fields[49] = &ffi_type_sint8; + st_fields[2] = st_fields[14] = st_fields[26] = st_fields[38] = &ffi_type_uint16; + st_fields[3] = st_fields[15] = st_fields[27] = st_fields[39] = &ffi_type_sint16; + st_fields[4] = st_fields[16] = st_fields[28] = st_fields[40] = &ffi_type_uint32; + st_fields[5] = st_fields[17] = st_fields[29] = st_fields[41] = &ffi_type_sint32; + st_fields[6] = st_fields[18] = st_fields[30] = st_fields[42] = &ffi_type_uint64; + st_fields[7] = st_fields[19] = st_fields[31] = st_fields[43] = &ffi_type_sint64; + st_fields[8] = st_fields[20] = st_fields[32] = st_fields[44] = &ffi_type_float; + st_fields[9] = st_fields[21] = st_fields[33] = st_fields[45] = &ffi_type_double; + st_fields[10] = st_fields[22] = st_fields[34] = st_fields[46] = &ffi_type_longdouble; + st_fields[11] = st_fields[23] = st_fields[35] = st_fields[47] = &ffi_type_pointer; + + st_fields[50] = NULL; + + argTypes[0] = argTypes[12] = argTypes[24] = argTypes[36] = argTypes[48] = &ffi_type_uint8; + argValues[0] = argValues[12] = argValues[24] = argValues[36] = argValues[48] = &ui8; + argTypes[1] = argTypes[13] = argTypes[25] = argTypes[37] = argTypes[49] = &ffi_type_sint8; + argValues[1] = argValues[13] = argValues[25] = argValues[37] = argValues[49] = &si8; + argTypes[2] = argTypes[14] = argTypes[26] = argTypes[38] = &ffi_type_uint16; + argValues[2] = argValues[14] = argValues[26] = argValues[38] = &ui16; + argTypes[3] = argTypes[15] = argTypes[27] = argTypes[39] = &ffi_type_sint16; + argValues[3] = argValues[15] = argValues[27] = argValues[39] = &si16; + argTypes[4] = argTypes[16] = argTypes[28] = argTypes[40] = &ffi_type_uint32; + argValues[4] = argValues[16] = argValues[28] = argValues[40] = &ui32; + argTypes[5] = argTypes[17] = argTypes[29] = argTypes[41] = &ffi_type_sint32; + argValues[5] = argValues[17] = argValues[29] = argValues[41] = &si32; + argTypes[6] = argTypes[18] = argTypes[30] = argTypes[42] = &ffi_type_uint64; + argValues[6] = argValues[18] = argValues[30] = argValues[42] = &ui64; + argTypes[7] = argTypes[19] = argTypes[31] = argTypes[43] = &ffi_type_sint64; + argValues[7] = argValues[19] = argValues[31] = argValues[43] = &si64; + argTypes[8] = argTypes[20] = argTypes[32] = argTypes[44] = &ffi_type_float; + argValues[8] = argValues[20] = argValues[32] = argValues[44] = &f; + argTypes[9] = argTypes[21] = argTypes[33] = argTypes[45] = &ffi_type_double; + argValues[9] = argValues[21] = argValues[33] = argValues[45] = &d; + argTypes[10] = argTypes[22] = argTypes[34] = argTypes[46] = &ffi_type_longdouble; + argValues[10] = argValues[22] = argValues[34] = argValues[46] = &ld; + argTypes[11] = argTypes[23] = argTypes[35] = argTypes[47] = &ffi_type_pointer; + argValues[11] = argValues[23] = argValues[35] = argValues[47] = &p; + + argTypes[50] = NULL; + argValues[50] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 50, &ret_struct_type, argTypes) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_large_fn), &retVal, argValues); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_large_fn, NULL, code) == FFI_OK); + + retVal = ((BigStruct(*)( + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t))(code))( + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c new file mode 100644 index 0000000..1915c3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c @@ -0,0 +1,341 @@ +/* Area: ffi_call, closure_call + Purpose: Check large structure returns. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ +/* { dg-options -Wformat=0 { target moxie*-*-elf or1k-*-* } } */ + +#include "ffitest.h" + +typedef struct BigStruct{ + uint8_t a; + int8_t b; + uint16_t c; + int16_t d; + uint32_t e; + int32_t f; + uint64_t g; + int64_t h; + float i; + double j; + long double k; + char* l; + uint8_t m; + int8_t n; + uint16_t o; + int16_t p; + uint32_t q; + int32_t r; + uint64_t s; + int64_t t; + float u; + double v; + long double w; + char* x; + uint8_t y; + int8_t z; + uint16_t aa; + int16_t bb; + uint32_t cc; + int32_t dd; + uint64_t ee; + int64_t ff; + float gg; + double hh; + long double ii; + char* jj; + uint8_t kk; + int8_t ll; + uint16_t mm; + int16_t nn; + uint32_t oo; + int32_t pp; + uint64_t qq; + int64_t rr; + float ss; + double tt; + long double uu; + char* vv; + uint8_t ww; + int8_t xx; +} BigStruct; + +BigStruct +test_large_fn( + uint8_t ui8_1, + int8_t si8_1, + uint16_t ui16_1, + int16_t si16_1, + uint32_t ui32_1, + int32_t si32_1, + uint64_t ui64_1, + int64_t si64_1, + float f_1, + double d_1, + long double ld_1, + char* p_1, + uint8_t ui8_2, + int8_t si8_2, + uint16_t ui16_2, + int16_t si16_2, + uint32_t ui32_2, + int32_t si32_2, + uint64_t ui64_2, + int64_t si64_2, + float f_2, + double d_2, + long double ld_2, + char* p_2, + uint8_t ui8_3, + int8_t si8_3, + uint16_t ui16_3, + int16_t si16_3, + uint32_t ui32_3, + int32_t si32_3, + uint64_t ui64_3, + int64_t si64_3, + float f_3, + double d_3, + long double ld_3, + char* p_3, + uint8_t ui8_4, + int8_t si8_4, + uint16_t ui16_4, + int16_t si16_4, + uint32_t ui32_4, + int32_t si32_4, + uint64_t ui64_4, + int64_t si64_4, + float f_4, + double d_4, + long double ld_4, + char* p_4, + uint8_t ui8_5, + int8_t si8_5) +{ + BigStruct retVal = { + ui8_1 + 1, si8_1 + 1, ui16_1 + 1, si16_1 + 1, ui32_1 + 1, si32_1 + 1, + ui64_1 + 1, si64_1 + 1, f_1 + 1, d_1 + 1, ld_1 + 1, (char*)((intptr_t)p_1 + 1), + ui8_2 + 2, si8_2 + 2, ui16_2 + 2, si16_2 + 2, ui32_2 + 2, si32_2 + 2, + ui64_2 + 2, si64_2 + 2, f_2 + 2, d_2 + 2, ld_2 + 2, (char*)((intptr_t)p_2 + 2), + ui8_3 + 3, si8_3 + 3, ui16_3 + 3, si16_3 + 3, ui32_3 + 3, si32_3 + 3, + ui64_3 + 3, si64_3 + 3, f_3 + 3, d_3 + 3, ld_3 + 3, (char*)((intptr_t)p_3 + 3), + ui8_4 + 4, si8_4 + 4, ui16_4 + 4, si16_4 + 4, ui32_4 + 4, si32_4 + 4, + ui64_4 + 4, si64_4 + 4, f_4 + 4, d_4 + 4, ld_4 + 4, (char*)((intptr_t)p_4 + 4), + ui8_5 + 5, si8_5 + 5}; + + printf("%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 ": " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, (unsigned long)p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, (unsigned long)p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, (unsigned long)p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, (unsigned long)p_4, ui8_5, si8_5, + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + + return retVal; +} + +static void +cls_large_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + uint8_t ui8_1 = *(uint8_t*)args[0]; + int8_t si8_1 = *(int8_t*)args[1]; + uint16_t ui16_1 = *(uint16_t*)args[2]; + int16_t si16_1 = *(int16_t*)args[3]; + uint32_t ui32_1 = *(uint32_t*)args[4]; + int32_t si32_1 = *(int32_t*)args[5]; + uint64_t ui64_1 = *(uint64_t*)args[6]; + int64_t si64_1 = *(int64_t*)args[7]; + float f_1 = *(float*)args[8]; + double d_1 = *(double*)args[9]; + long double ld_1 = *(long double*)args[10]; + char* p_1 = *(char**)args[11]; + uint8_t ui8_2 = *(uint8_t*)args[12]; + int8_t si8_2 = *(int8_t*)args[13]; + uint16_t ui16_2 = *(uint16_t*)args[14]; + int16_t si16_2 = *(int16_t*)args[15]; + uint32_t ui32_2 = *(uint32_t*)args[16]; + int32_t si32_2 = *(int32_t*)args[17]; + uint64_t ui64_2 = *(uint64_t*)args[18]; + int64_t si64_2 = *(int64_t*)args[19]; + float f_2 = *(float*)args[20]; + double d_2 = *(double*)args[21]; + long double ld_2 = *(long double*)args[22]; + char* p_2 = *(char**)args[23]; + uint8_t ui8_3 = *(uint8_t*)args[24]; + int8_t si8_3 = *(int8_t*)args[25]; + uint16_t ui16_3 = *(uint16_t*)args[26]; + int16_t si16_3 = *(int16_t*)args[27]; + uint32_t ui32_3 = *(uint32_t*)args[28]; + int32_t si32_3 = *(int32_t*)args[29]; + uint64_t ui64_3 = *(uint64_t*)args[30]; + int64_t si64_3 = *(int64_t*)args[31]; + float f_3 = *(float*)args[32]; + double d_3 = *(double*)args[33]; + long double ld_3 = *(long double*)args[34]; + char* p_3 = *(char**)args[35]; + uint8_t ui8_4 = *(uint8_t*)args[36]; + int8_t si8_4 = *(int8_t*)args[37]; + uint16_t ui16_4 = *(uint16_t*)args[38]; + int16_t si16_4 = *(int16_t*)args[39]; + uint32_t ui32_4 = *(uint32_t*)args[40]; + int32_t si32_4 = *(int32_t*)args[41]; + uint64_t ui64_4 = *(uint64_t*)args[42]; + int64_t si64_4 = *(int64_t*)args[43]; + float f_4 = *(float*)args[44]; + double d_4 = *(double*)args[45]; + long double ld_4 = *(long double*)args[46]; + char* p_4 = *(char**)args[47]; + uint8_t ui8_5 = *(uint8_t*)args[48]; + int8_t si8_5 = *(int8_t*)args[49]; + + *(BigStruct*)resp = test_large_fn( + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, p_4, + ui8_5, si8_5); +} + +int +main(int argc __UNUSED__, const char** argv __UNUSED__) +{ + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + + ffi_cif cif; + ffi_type* argTypes[51]; + void* argValues[51]; + + ffi_type ret_struct_type; + ffi_type* st_fields[51]; + BigStruct retVal; + + uint8_t ui8 = 1; + int8_t si8 = 2; + uint16_t ui16 = 3; + int16_t si16 = 4; + uint32_t ui32 = 5; + int32_t si32 = 6; + uint64_t ui64 = 7; + int64_t si64 = 8; + float f = 9; + double d = 10; + long double ld = 11; + char* p = (char*)0x12345678; + + memset (&retVal, 0, sizeof(retVal)); + + ret_struct_type.size = 0; + ret_struct_type.alignment = 0; + ret_struct_type.type = FFI_TYPE_STRUCT; + ret_struct_type.elements = st_fields; + + st_fields[0] = st_fields[12] = st_fields[24] = st_fields[36] = st_fields[48] = &ffi_type_uint8; + st_fields[1] = st_fields[13] = st_fields[25] = st_fields[37] = st_fields[49] = &ffi_type_sint8; + st_fields[2] = st_fields[14] = st_fields[26] = st_fields[38] = &ffi_type_uint16; + st_fields[3] = st_fields[15] = st_fields[27] = st_fields[39] = &ffi_type_sint16; + st_fields[4] = st_fields[16] = st_fields[28] = st_fields[40] = &ffi_type_uint32; + st_fields[5] = st_fields[17] = st_fields[29] = st_fields[41] = &ffi_type_sint32; + st_fields[6] = st_fields[18] = st_fields[30] = st_fields[42] = &ffi_type_uint64; + st_fields[7] = st_fields[19] = st_fields[31] = st_fields[43] = &ffi_type_sint64; + st_fields[8] = st_fields[20] = st_fields[32] = st_fields[44] = &ffi_type_float; + st_fields[9] = st_fields[21] = st_fields[33] = st_fields[45] = &ffi_type_double; + st_fields[10] = st_fields[22] = st_fields[34] = st_fields[46] = &ffi_type_longdouble; + st_fields[11] = st_fields[23] = st_fields[35] = st_fields[47] = &ffi_type_pointer; + + st_fields[50] = NULL; + + argTypes[0] = argTypes[12] = argTypes[24] = argTypes[36] = argTypes[48] = &ffi_type_uint8; + argValues[0] = argValues[12] = argValues[24] = argValues[36] = argValues[48] = &ui8; + argTypes[1] = argTypes[13] = argTypes[25] = argTypes[37] = argTypes[49] = &ffi_type_sint8; + argValues[1] = argValues[13] = argValues[25] = argValues[37] = argValues[49] = &si8; + argTypes[2] = argTypes[14] = argTypes[26] = argTypes[38] = &ffi_type_uint16; + argValues[2] = argValues[14] = argValues[26] = argValues[38] = &ui16; + argTypes[3] = argTypes[15] = argTypes[27] = argTypes[39] = &ffi_type_sint16; + argValues[3] = argValues[15] = argValues[27] = argValues[39] = &si16; + argTypes[4] = argTypes[16] = argTypes[28] = argTypes[40] = &ffi_type_uint32; + argValues[4] = argValues[16] = argValues[28] = argValues[40] = &ui32; + argTypes[5] = argTypes[17] = argTypes[29] = argTypes[41] = &ffi_type_sint32; + argValues[5] = argValues[17] = argValues[29] = argValues[41] = &si32; + argTypes[6] = argTypes[18] = argTypes[30] = argTypes[42] = &ffi_type_uint64; + argValues[6] = argValues[18] = argValues[30] = argValues[42] = &ui64; + argTypes[7] = argTypes[19] = argTypes[31] = argTypes[43] = &ffi_type_sint64; + argValues[7] = argValues[19] = argValues[31] = argValues[43] = &si64; + argTypes[8] = argTypes[20] = argTypes[32] = argTypes[44] = &ffi_type_float; + argValues[8] = argValues[20] = argValues[32] = argValues[44] = &f; + argTypes[9] = argTypes[21] = argTypes[33] = argTypes[45] = &ffi_type_double; + argValues[9] = argValues[21] = argValues[33] = argValues[45] = &d; + argTypes[10] = argTypes[22] = argTypes[34] = argTypes[46] = &ffi_type_longdouble; + argValues[10] = argValues[22] = argValues[34] = argValues[46] = &ld; + argTypes[11] = argTypes[23] = argTypes[35] = argTypes[47] = &ffi_type_pointer; + argValues[11] = argValues[23] = argValues[35] = argValues[47] = &p; + + argTypes[50] = NULL; + argValues[50] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 50, &ret_struct_type, argTypes) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_large_fn), &retVal, argValues); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_large_fn, NULL, code) == FFI_OK); + + retVal = ((BigStruct(*)( + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t))(code))( + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct 2.c new file mode 100644 index 0000000..c15e3a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct 2.c @@ -0,0 +1,152 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined)) + (code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c new file mode 100644 index 0000000..c15e3a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c @@ -0,0 +1,152 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined)) + (code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1 2.c new file mode 100644 index 0000000..477a6b9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1 2.c @@ -0,0 +1,161 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2, + struct cls_struct_16byte1 b3) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g %g %g %d: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + b3.a, b3.b, b3.c, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + struct cls_struct_16byte1 b3; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + b3 = *(struct cls_struct_16byte1*)(args[3]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined, + cls_struct_16byte1)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + /* CHECK( 1 == 0); */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c new file mode 100644 index 0000000..477a6b9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c @@ -0,0 +1,161 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2, + struct cls_struct_16byte1 b3) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g %g %g %d: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + b3.a, b3.b, b3.c, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + struct cls_struct_16byte1 b3; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + b3 = *(struct cls_struct_16byte1*)(args[3]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined, + cls_struct_16byte1)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + /* CHECK( 1 == 0); */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10 2.c new file mode 100644 index 0000000..3cf2b44 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10 2.c @@ -0,0 +1,134 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + unsigned char y; + struct A x; + unsigned int z; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b3.z + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + result.z = 0; + + printf("%d %d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, b3.z, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[4]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = { 99, {12LL , 127}, 255}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &ffi_type_uchar; + cls_struct_fields1[1] = &cls_struct_type; + cls_struct_fields1[2] = &ffi_type_uint; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c new file mode 100644 index 0000000..3cf2b44 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c @@ -0,0 +1,134 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + unsigned char y; + struct A x; + unsigned int z; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b3.z + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + result.z = 0; + + printf("%d %d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, b3.z, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[4]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = { 99, {12LL , 127}, 255}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &ffi_type_uchar; + cls_struct_fields1[1] = &cls_struct_type; + cls_struct_fields1[2] = &ffi_type_uint; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11 2.c new file mode 100644 index 0000000..3510493 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11 2.c @@ -0,0 +1,121 @@ +/* Area: ffi_call, closure_call + Purpose: Check parameter passing with nested structs + of a single type. This tests the special cases + for homogeneous floating-point aggregates in the + AArch64 PCS. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + float a_x; + float a_y; +} A; + +typedef struct B { + float b_x; + float b_y; +} B; + +typedef struct C { + A a; + B b; +} C; + +static C C_fn (int x, int y, int z, C source, int i, int j, int k) +{ + C result; + result.a.a_x = source.a.a_x; + result.a.a_y = source.a.a_y; + result.b.b_x = source.b.b_x; + result.b.b_y = source.b.b_y; + + printf ("%d, %d, %d, %d, %d, %d\n", x, y, z, i, j, k); + + printf ("%.1f, %.1f, %.1f, %.1f, " + "%.1f, %.1f, %.1f, %.1f\n", + source.a.a_x, source.a.a_y, + source.b.b_x, source.b.b_y, + result.a.a_x, result.a.a_y, + result.b.b_x, result.b.b_y); + + return result; +} + +int main (void) +{ + ffi_cif cif; + + ffi_type* struct_fields_source_a[3]; + ffi_type* struct_fields_source_b[3]; + ffi_type* struct_fields_source_c[3]; + ffi_type* arg_types[8]; + + ffi_type struct_type_a, struct_type_b, struct_type_c; + + struct A source_fld_a = {1.0, 2.0}; + struct B source_fld_b = {4.0, 8.0}; + int k = 1; + + struct C result; + struct C source = {source_fld_a, source_fld_b}; + + struct_type_a.size = 0; + struct_type_a.alignment = 0; + struct_type_a.type = FFI_TYPE_STRUCT; + struct_type_a.elements = struct_fields_source_a; + + struct_type_b.size = 0; + struct_type_b.alignment = 0; + struct_type_b.type = FFI_TYPE_STRUCT; + struct_type_b.elements = struct_fields_source_b; + + struct_type_c.size = 0; + struct_type_c.alignment = 0; + struct_type_c.type = FFI_TYPE_STRUCT; + struct_type_c.elements = struct_fields_source_c; + + struct_fields_source_a[0] = &ffi_type_float; + struct_fields_source_a[1] = &ffi_type_float; + struct_fields_source_a[2] = NULL; + + struct_fields_source_b[0] = &ffi_type_float; + struct_fields_source_b[1] = &ffi_type_float; + struct_fields_source_b[2] = NULL; + + struct_fields_source_c[0] = &struct_type_a; + struct_fields_source_c[1] = &struct_type_b; + struct_fields_source_c[2] = NULL; + + arg_types[0] = &ffi_type_sint32; + arg_types[1] = &ffi_type_sint32; + arg_types[2] = &ffi_type_sint32; + arg_types[3] = &struct_type_c; + arg_types[4] = &ffi_type_sint32; + arg_types[5] = &ffi_type_sint32; + arg_types[6] = &ffi_type_sint32; + arg_types[7] = NULL; + + void *args[7]; + args[0] = &k; + args[1] = &k; + args[2] = &k; + args[3] = &source; + args[4] = &k; + args[5] = &k; + args[6] = &k; + CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, 7, &struct_type_c, + arg_types) == FFI_OK); + + ffi_call (&cif, FFI_FN (C_fn), &result, args); + /* { dg-output "1, 1, 1, 1, 1, 1\n" } */ + /* { dg-output "1.0, 2.0, 4.0, 8.0, 1.0, 2.0, 4.0, 8.0" } */ + CHECK (result.a.a_x == source.a.a_x); + CHECK (result.a.a_y == source.a.a_y); + CHECK (result.b.b_x == source.b.b_x); + CHECK (result.b.b_y == source.b.b_y); + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c new file mode 100644 index 0000000..3510493 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c @@ -0,0 +1,121 @@ +/* Area: ffi_call, closure_call + Purpose: Check parameter passing with nested structs + of a single type. This tests the special cases + for homogeneous floating-point aggregates in the + AArch64 PCS. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + float a_x; + float a_y; +} A; + +typedef struct B { + float b_x; + float b_y; +} B; + +typedef struct C { + A a; + B b; +} C; + +static C C_fn (int x, int y, int z, C source, int i, int j, int k) +{ + C result; + result.a.a_x = source.a.a_x; + result.a.a_y = source.a.a_y; + result.b.b_x = source.b.b_x; + result.b.b_y = source.b.b_y; + + printf ("%d, %d, %d, %d, %d, %d\n", x, y, z, i, j, k); + + printf ("%.1f, %.1f, %.1f, %.1f, " + "%.1f, %.1f, %.1f, %.1f\n", + source.a.a_x, source.a.a_y, + source.b.b_x, source.b.b_y, + result.a.a_x, result.a.a_y, + result.b.b_x, result.b.b_y); + + return result; +} + +int main (void) +{ + ffi_cif cif; + + ffi_type* struct_fields_source_a[3]; + ffi_type* struct_fields_source_b[3]; + ffi_type* struct_fields_source_c[3]; + ffi_type* arg_types[8]; + + ffi_type struct_type_a, struct_type_b, struct_type_c; + + struct A source_fld_a = {1.0, 2.0}; + struct B source_fld_b = {4.0, 8.0}; + int k = 1; + + struct C result; + struct C source = {source_fld_a, source_fld_b}; + + struct_type_a.size = 0; + struct_type_a.alignment = 0; + struct_type_a.type = FFI_TYPE_STRUCT; + struct_type_a.elements = struct_fields_source_a; + + struct_type_b.size = 0; + struct_type_b.alignment = 0; + struct_type_b.type = FFI_TYPE_STRUCT; + struct_type_b.elements = struct_fields_source_b; + + struct_type_c.size = 0; + struct_type_c.alignment = 0; + struct_type_c.type = FFI_TYPE_STRUCT; + struct_type_c.elements = struct_fields_source_c; + + struct_fields_source_a[0] = &ffi_type_float; + struct_fields_source_a[1] = &ffi_type_float; + struct_fields_source_a[2] = NULL; + + struct_fields_source_b[0] = &ffi_type_float; + struct_fields_source_b[1] = &ffi_type_float; + struct_fields_source_b[2] = NULL; + + struct_fields_source_c[0] = &struct_type_a; + struct_fields_source_c[1] = &struct_type_b; + struct_fields_source_c[2] = NULL; + + arg_types[0] = &ffi_type_sint32; + arg_types[1] = &ffi_type_sint32; + arg_types[2] = &ffi_type_sint32; + arg_types[3] = &struct_type_c; + arg_types[4] = &ffi_type_sint32; + arg_types[5] = &ffi_type_sint32; + arg_types[6] = &ffi_type_sint32; + arg_types[7] = NULL; + + void *args[7]; + args[0] = &k; + args[1] = &k; + args[2] = &k; + args[3] = &source; + args[4] = &k; + args[5] = &k; + args[6] = &k; + CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, 7, &struct_type_c, + arg_types) == FFI_OK); + + ffi_call (&cif, FFI_FN (C_fn), &result, args); + /* { dg-output "1, 1, 1, 1, 1, 1\n" } */ + /* { dg-output "1.0, 2.0, 4.0, 8.0, 1.0, 2.0, 4.0, 8.0" } */ + CHECK (result.a.a_x == source.a.a_x); + CHECK (result.a.a_y == source.a.a_y); + CHECK (result.b.b_x == source.b.b_x); + CHECK (result.b.b_y == source.b.b_y); + exit (0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2 2.c new file mode 100644 index 0000000..69268cd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2 2.c @@ -0,0 +1,110 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%lu %d %lu %d %d: %lu %d %d\n", b0.a, b0.b, b1.x.a, b1.x.b, b1.y, + result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1, 7}; + struct B f_dbl = {{12 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_ulong; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c new file mode 100644 index 0000000..69268cd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c @@ -0,0 +1,110 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%lu %d %lu %d %d: %lu %d %d\n", b0.a, b0.b, b1.x.a, b1.x.b, b1.y, + result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1, 7}; + struct B f_dbl = {{12 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_ulong; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3 2.c new file mode 100644 index 0000000..ab18cad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3 2.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b0.a, b0.b, + (int)b1.x.a, b1.x.b, b1.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c new file mode 100644 index 0000000..ab18cad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b0.a, b0.b, + (int)b1.x.a, b1.x.b, b1.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4 2.c new file mode 100644 index 0000000..2ffb4d6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4 2.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c new file mode 100644 index 0000000..2ffb4d6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5 2.c new file mode 100644 index 0000000..6c79845 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5 2.c @@ -0,0 +1,112 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + long double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c new file mode 100644 index 0000000..6c79845 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c @@ -0,0 +1,112 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + long double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6 2.c new file mode 100644 index 0000000..59d3579 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6 2.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_slong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c new file mode 100644 index 0000000..59d3579 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_slong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7 2.c new file mode 100644 index 0000000..27595e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7 2.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c new file mode 100644 index 0000000..27595e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8 2.c new file mode 100644 index 0000000..0e6c682 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8 2.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c new file mode 100644 index 0000000..0e6c682 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9 2.c new file mode 100644 index 0000000..5f7ac67 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9 2.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned char a; + unsigned long long b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", b2.a, (int)b2.b, + b3.x.a, (int)b3.x.b, b3.y, (int)b4.d, b4.e, + result.x.a, (int)result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1, 7LL}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_ulong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c new file mode 100644 index 0000000..5f7ac67 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned char a; + unsigned long long b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", b2.a, (int)b2.b, + b3.x.a, (int)b3.x.b, b3.y, (int)b4.d, b4.e, + result.x.a, (int)result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1, 7LL}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_ulong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1 2.c new file mode 100644 index 0000000..6a91555 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1 2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct my_ffi_struct { + double a; + double b; + double c; +} my_ffi_struct; + +my_ffi_struct callee(struct my_ffi_struct a1, struct my_ffi_struct a2) +{ + struct my_ffi_struct result; + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +void stub(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct my_ffi_struct a1; + struct my_ffi_struct a2; + + a1 = *(struct my_ffi_struct*)(args[0]); + a2 = *(struct my_ffi_struct*)(args[1]); + + *(my_ffi_struct *)resp = callee(a1, a2); +} + + +int main(void) +{ + ffi_type* my_ffi_struct_fields[4]; + ffi_type my_ffi_struct_type; + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[3]; + + struct my_ffi_struct g = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct f = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct res; + + my_ffi_struct_type.size = 0; + my_ffi_struct_type.alignment = 0; + my_ffi_struct_type.type = FFI_TYPE_STRUCT; + my_ffi_struct_type.elements = my_ffi_struct_fields; + + my_ffi_struct_fields[0] = &ffi_type_double; + my_ffi_struct_fields[1] = &ffi_type_double; + my_ffi_struct_fields[2] = &ffi_type_double; + my_ffi_struct_fields[3] = NULL; + + arg_types[0] = &my_ffi_struct_type; + arg_types[1] = &my_ffi_struct_type; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &my_ffi_struct_type, + arg_types) == FFI_OK); + + args[0] = &g; + args[1] = &f; + args[2] = NULL; + ffi_call(&cif, FFI_FN(callee), &res, args); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, stub, NULL, code) == FFI_OK); + + res = ((my_ffi_struct(*)(struct my_ffi_struct, struct my_ffi_struct))(code))(g, f); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0);; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c new file mode 100644 index 0000000..6a91555 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct my_ffi_struct { + double a; + double b; + double c; +} my_ffi_struct; + +my_ffi_struct callee(struct my_ffi_struct a1, struct my_ffi_struct a2) +{ + struct my_ffi_struct result; + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +void stub(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct my_ffi_struct a1; + struct my_ffi_struct a2; + + a1 = *(struct my_ffi_struct*)(args[0]); + a2 = *(struct my_ffi_struct*)(args[1]); + + *(my_ffi_struct *)resp = callee(a1, a2); +} + + +int main(void) +{ + ffi_type* my_ffi_struct_fields[4]; + ffi_type my_ffi_struct_type; + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[3]; + + struct my_ffi_struct g = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct f = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct res; + + my_ffi_struct_type.size = 0; + my_ffi_struct_type.alignment = 0; + my_ffi_struct_type.type = FFI_TYPE_STRUCT; + my_ffi_struct_type.elements = my_ffi_struct_fields; + + my_ffi_struct_fields[0] = &ffi_type_double; + my_ffi_struct_fields[1] = &ffi_type_double; + my_ffi_struct_fields[2] = &ffi_type_double; + my_ffi_struct_fields[3] = NULL; + + arg_types[0] = &my_ffi_struct_type; + arg_types[1] = &my_ffi_struct_type; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &my_ffi_struct_type, + arg_types) == FFI_OK); + + args[0] = &g; + args[1] = &f; + args[2] = NULL; + ffi_call(&cif, FFI_FN(callee), &res, args); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, stub, NULL, code) == FFI_OK); + + res = ((my_ffi_struct(*)(struct my_ffi_struct, struct my_ffi_struct))(code))(g, f); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0);; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large 2.c new file mode 100644 index 0000000..71c2469 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large 2.c @@ -0,0 +1,145 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_108byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + int n; +} struct_108byte; + +struct_108byte cls_struct_108byte_fn( + struct_108byte b0, + struct_108byte b1, + struct_108byte b2, + struct_108byte b3) +{ + struct_108byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n); + + return result; +} + +static void +cls_struct_108byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_108byte b0, b1, b2, b3; + + b0 = *(struct_108byte*)(args[0]); + b1 = *(struct_108byte*)(args[1]); + b2 = *(struct_108byte*)(args[2]); + b3 = *(struct_108byte*)(args[3]); + + *(struct_108byte*)resp = cls_struct_108byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[15]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_108byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 7 }; + struct_108byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 4 }; + struct_108byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 3 }; + struct_108byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 2 }; + struct_108byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_sint32; + cls_struct_fields[14] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_108byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_108byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_108byte(*)(struct_108byte, struct_108byte, + struct_108byte, struct_108byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c new file mode 100644 index 0000000..71c2469 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c @@ -0,0 +1,145 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_108byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + int n; +} struct_108byte; + +struct_108byte cls_struct_108byte_fn( + struct_108byte b0, + struct_108byte b1, + struct_108byte b2, + struct_108byte b3) +{ + struct_108byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n); + + return result; +} + +static void +cls_struct_108byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_108byte b0, b1, b2, b3; + + b0 = *(struct_108byte*)(args[0]); + b1 = *(struct_108byte*)(args[1]); + b2 = *(struct_108byte*)(args[2]); + b3 = *(struct_108byte*)(args[3]); + + *(struct_108byte*)resp = cls_struct_108byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[15]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_108byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 7 }; + struct_108byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 4 }; + struct_108byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 3 }; + struct_108byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 2 }; + struct_108byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_sint32; + cls_struct_fields[14] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_108byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_108byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_108byte(*)(struct_108byte, struct_108byte, + struct_108byte, struct_108byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2 2.c new file mode 100644 index 0000000..d9c750e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2 2.c @@ -0,0 +1,148 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_116byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + double n; + int o; +} struct_116byte; + +struct_116byte cls_struct_116byte_fn( + struct_116byte b0, + struct_116byte b1, + struct_116byte b2, + struct_116byte b3) +{ + struct_116byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + result.o = b0.o + b1.o + b2.o + b3.o; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n, result.o); + + return result; +} + +static void +cls_struct_116byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_116byte b0, b1, b2, b3; + + b0 = *(struct_116byte*)(args[0]); + b1 = *(struct_116byte*)(args[1]); + b2 = *(struct_116byte*)(args[2]); + b3 = *(struct_116byte*)(args[3]); + + *(struct_116byte*)resp = cls_struct_116byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[16]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_116byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 7 }; + struct_116byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 6.0, 4 }; + struct_116byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 7.0, 3 }; + struct_116byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 8.0, 2 }; + struct_116byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_double; + cls_struct_fields[14] = &ffi_type_sint32; + cls_struct_fields[15] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_116byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_116byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_116byte(*)(struct_116byte, struct_116byte, + struct_116byte, struct_116byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c new file mode 100644 index 0000000..d9c750e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c @@ -0,0 +1,148 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_116byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + double n; + int o; +} struct_116byte; + +struct_116byte cls_struct_116byte_fn( + struct_116byte b0, + struct_116byte b1, + struct_116byte b2, + struct_116byte b3) +{ + struct_116byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + result.o = b0.o + b1.o + b2.o + b3.o; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n, result.o); + + return result; +} + +static void +cls_struct_116byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_116byte b0, b1, b2, b3; + + b0 = *(struct_116byte*)(args[0]); + b1 = *(struct_116byte*)(args[1]); + b2 = *(struct_116byte*)(args[2]); + b3 = *(struct_116byte*)(args[3]); + + *(struct_116byte*)resp = cls_struct_116byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[16]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_116byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 7 }; + struct_116byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 6.0, 4 }; + struct_116byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 7.0, 3 }; + struct_116byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 8.0, 2 }; + struct_116byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_double; + cls_struct_fields[14] = &ffi_type_sint32; + cls_struct_fields[15] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_116byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_116byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_116byte(*)(struct_116byte, struct_116byte, + struct_116byte, struct_116byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium 2.c new file mode 100644 index 0000000..973ee02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium 2.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7.0 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3.0 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2.0 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c new file mode 100644 index 0000000..973ee02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7.0 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3.0 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2.0 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2 2.c new file mode 100644 index 0000000..84323d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2 2.c @@ -0,0 +1,125 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + long long i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %" PRIdLL "\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_sint64; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c new file mode 100644 index 0000000..84323d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c @@ -0,0 +1,125 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + long long i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %" PRIdLL "\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_sint64; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure 2.c new file mode 100644 index 0000000..ca31056 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure 2.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: 41908. + Originator: 20091102 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_combined { + float a; + float b; + float c; + float d; +} cls_struct_combined; + +void cls_struct_combined_fn(struct cls_struct_combined arg) +{ + printf("%g %g %g %g\n", + arg.a, arg.b, + arg.c, arg.d); + fflush(stdout); +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_combined a0; + + a0 = *(struct cls_struct_combined*)(args[0]); + + cls_struct_combined_fn(a0); +} + + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cls_struct_fields0[5]; + ffi_type cls_struct_type0; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0}; + + cls_struct_type0.size = 0; + cls_struct_type0.alignment = 0; + cls_struct_type0.type = FFI_TYPE_STRUCT; + cls_struct_type0.elements = cls_struct_fields0; + + cls_struct_fields0[0] = &ffi_type_float; + cls_struct_fields0[1] = &ffi_type_float; + cls_struct_fields0[2] = &ffi_type_float; + cls_struct_fields0[3] = &ffi_type_float; + cls_struct_fields0[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type0; + dbl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, &ffi_type_void, + dbl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + ((void(*)(cls_struct_combined)) (code))(g_dbl); + /* { dg-output "4 5 1 8" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c new file mode 100644 index 0000000..ca31056 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: 41908. + Originator: 20091102 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_combined { + float a; + float b; + float c; + float d; +} cls_struct_combined; + +void cls_struct_combined_fn(struct cls_struct_combined arg) +{ + printf("%g %g %g %g\n", + arg.a, arg.b, + arg.c, arg.d); + fflush(stdout); +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_combined a0; + + a0 = *(struct cls_struct_combined*)(args[0]); + + cls_struct_combined_fn(a0); +} + + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cls_struct_fields0[5]; + ffi_type cls_struct_type0; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0}; + + cls_struct_type0.size = 0; + cls_struct_type0.alignment = 0; + cls_struct_type0.type = FFI_TYPE_STRUCT; + cls_struct_type0.elements = cls_struct_fields0; + + cls_struct_fields0[0] = &ffi_type_float; + cls_struct_fields0[1] = &ffi_type_float; + cls_struct_fields0[2] = &ffi_type_float; + cls_struct_fields0[3] = &ffi_type_float; + cls_struct_fields0[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type0; + dbl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, &ffi_type_void, + dbl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + ((void(*)(cls_struct_combined)) (code))(g_dbl); + /* { dg-output "4 5 1 8" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest 2.cc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest 2.cc new file mode 100644 index 0000000..e114565 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest 2.cc @@ -0,0 +1,117 @@ +/* Area: ffi_closure, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Jeff Sturm */ + +/* { dg-do run { xfail x86_64-apple-darwin* moxie*-*-* } } */ + +#include "ffitest.h" + +void ABI_ATTR +closure_test_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{ + throw 9; +} + +typedef void (*closure_test_type)(); + +void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (int)(intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg*)resp); + + throw (int)*(ffi_arg*)resp; +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = (ffi_closure *)ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + + { + cl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, + &ffi_type_void, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn, NULL, code) == FFI_OK); + + try + { + (*((closure_test_type)(code)))(); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + + { + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_uint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_uint; + cl_arg_types[10] = &ffi_type_uint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_uint; + cl_arg_types[13] = &ffi_type_uint; + cl_arg_types[14] = &ffi_type_uint; + cl_arg_types[15] = &ffi_type_uint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + try + { + (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "\n1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + } catch (int exception_code) + { + CHECK(exception_code == 255); + } + printf("part two OK\n"); + /* { dg-output "\npart two OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc new file mode 100644 index 0000000..e114565 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc @@ -0,0 +1,117 @@ +/* Area: ffi_closure, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Jeff Sturm */ + +/* { dg-do run { xfail x86_64-apple-darwin* moxie*-*-* } } */ + +#include "ffitest.h" + +void ABI_ATTR +closure_test_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{ + throw 9; +} + +typedef void (*closure_test_type)(); + +void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (int)(intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg*)resp); + + throw (int)*(ffi_arg*)resp; +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = (ffi_closure *)ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + + { + cl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, + &ffi_type_void, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn, NULL, code) == FFI_OK); + + try + { + (*((closure_test_type)(code)))(); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + + { + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_uint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_uint; + cl_arg_types[10] = &ffi_type_uint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_uint; + cl_arg_types[13] = &ffi_type_uint; + cl_arg_types[14] = &ffi_type_uint; + cl_arg_types[15] = &ffi_type_uint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + try + { + (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "\n1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + } catch (int exception_code) + { + CHECK(exception_code == 255); + } + printf("part two OK\n"); + /* { dg-output "\npart two OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call 2.cc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call 2.cc new file mode 100644 index 0000000..153d240 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call 2.cc @@ -0,0 +1,54 @@ +/* Area: ffi_call, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Andreas Tobler 20061213 */ + +/* { dg-do run { xfail moxie*-*-* } } */ + +#include "ffitest.h" + +static int checking(int a __UNUSED__, short b __UNUSED__, + signed char c __UNUSED__) +{ + throw 9; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + { + try + { + ffi_call(&cif, FFI_FN(checking), &rint, values); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc new file mode 100644 index 0000000..153d240 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc @@ -0,0 +1,54 @@ +/* Area: ffi_call, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Andreas Tobler 20061213 */ + +/* { dg-do run { xfail moxie*-*-* } } */ + +#include "ffitest.h" + +static int checking(int a __UNUSED__, short b __UNUSED__, + signed char c __UNUSED__) +{ + throw 9; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + { + try + { + ffi_call(&cif, FFI_FN(checking), &rint, values); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex 2.inc new file mode 100644 index 0000000..4a812ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex 2.inc @@ -0,0 +1,91 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct cls_struct_align { + unsigned char a; + _Complex T_C_TYPE b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + struct cls_struct_align a1, struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %f,%fi %d %d %f,%fi %d: %d %f,%fi %d\n", + a1.a, T_CONV creal (a1.b), T_CONV cimag (a1.b), a1.c, + a2.a, T_CONV creal (a2.b), T_CONV cimag (a2.b), a2.c, + result.a, T_CONV creal (result.b), T_CONV cimag (result.b), result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_c[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* c_arg_types[5]; + + struct cls_struct_align g_c = { 12, 4951 + 7 * I, 127 }; + struct cls_struct_align f_c = { 1, 9320 + 1 * I, 13 }; + struct cls_struct_align res_c; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &T_FFI_TYPE; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + c_arg_types[0] = &cls_struct_type; + c_arg_types[1] = &cls_struct_type; + c_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + c_arg_types) == FFI_OK); + + args_c[0] = &g_c; + args_c[1] = &f_c; + args_c[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_c, args_c); + /* { dg-output "12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_c = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_c, f_c); + /* { dg-output "\n12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc new file mode 100644 index 0000000..4a812ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc @@ -0,0 +1,91 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct cls_struct_align { + unsigned char a; + _Complex T_C_TYPE b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + struct cls_struct_align a1, struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %f,%fi %d %d %f,%fi %d: %d %f,%fi %d\n", + a1.a, T_CONV creal (a1.b), T_CONV cimag (a1.b), a1.c, + a2.a, T_CONV creal (a2.b), T_CONV cimag (a2.b), a2.c, + result.a, T_CONV creal (result.b), T_CONV cimag (result.b), result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_c[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* c_arg_types[5]; + + struct cls_struct_align g_c = { 12, 4951 + 7 * I, 127 }; + struct cls_struct_align f_c = { 1, 9320 + 1 * I, 13 }; + struct cls_struct_align res_c; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &T_FFI_TYPE; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + c_arg_types[0] = &cls_struct_type; + c_arg_types[1] = &cls_struct_type; + c_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + c_arg_types) == FFI_OK); + + args_c[0] = &g_c; + args_c[1] = &f_c; + args_c[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_c, args_c); + /* { dg-output "12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_c = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_c, f_c); + /* { dg-output "\n12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double 2.c new file mode 100644 index 0000000..0dff23a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c new file mode 100644 index 0000000..0dff23a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float 2.c new file mode 100644 index 0000000..0affbd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c new file mode 100644 index 0000000..0affbd0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble 2.c new file mode 100644 index 0000000..7889ba8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c new file mode 100644 index 0000000..7889ba8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex 2.inc new file mode 100644 index 0000000..f937404 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex 2.inc @@ -0,0 +1,42 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static void cls_ret_complex_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + _Complex T_C_TYPE *pa; + _Complex T_C_TYPE *pr; + pa = (_Complex T_C_TYPE *)args[0]; + pr = (_Complex T_C_TYPE *)resp; + *pr = *pa; + + printf("%.6f,%.6fi: %.6f,%.6fi\n", + T_CONV creal (*pa), T_CONV cimag (*pa), + T_CONV creal (*pr), T_CONV cimag (*pr)); + } +typedef _Complex T_C_TYPE (*cls_ret_complex)(_Complex T_C_TYPE); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + _Complex T_C_TYPE res; + + cl_arg_types[0] = &T_FFI_TYPE; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_complex_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_complex)code))(0.125 + 128.0 * I); + printf("res: %.6f,%.6fi\n", T_CONV creal (res), T_CONV cimag (res)); + CHECK (res == (0.125 + 128.0 * I)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc new file mode 100644 index 0000000..f937404 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc @@ -0,0 +1,42 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static void cls_ret_complex_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + _Complex T_C_TYPE *pa; + _Complex T_C_TYPE *pr; + pa = (_Complex T_C_TYPE *)args[0]; + pr = (_Complex T_C_TYPE *)resp; + *pr = *pa; + + printf("%.6f,%.6fi: %.6f,%.6fi\n", + T_CONV creal (*pa), T_CONV cimag (*pa), + T_CONV creal (*pr), T_CONV cimag (*pr)); + } +typedef _Complex T_C_TYPE (*cls_ret_complex)(_Complex T_C_TYPE); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + _Complex T_C_TYPE res; + + cl_arg_types[0] = &T_FFI_TYPE; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_complex_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_complex)code))(0.125 + 128.0 * I); + printf("res: %.6f,%.6fi\n", T_CONV creal (res), T_CONV cimag (res)); + CHECK (res == (0.125 + 128.0 * I)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double 2.c new file mode 100644 index 0000000..05e3534 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double 2.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c new file mode 100644 index 0000000..05e3534 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float 2.c new file mode 100644 index 0000000..5df7849 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float 2.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c new file mode 100644 index 0000000..5df7849 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble 2.c new file mode 100644 index 0000000..2b1c320 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c new file mode 100644 index 0000000..2b1c320 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct 2.inc new file mode 100644 index 0000000..df8708d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct 2.inc @@ -0,0 +1,71 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct Cs { + _Complex T_C_TYPE x; + _Complex T_C_TYPE y; +} Cs; + +Cs gc; + +void +closure_test_fn(Cs p) +{ + printf("%.1f,%.1fi %.1f,%.1fi\n", + T_CONV creal (p.x), T_CONV cimag (p.x), + T_CONV creal (p.y), T_CONV cimag (p.y)); + gc = p; +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Cs*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type *cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Cs arg = { 1.0 + 11.0 * I, 2.0 + 22.0 * I}; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &T_FFI_TYPE; + ts1_type_elements[1] = &T_FFI_TYPE; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + ((void*(*)(Cs))(code))(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + closure_test_fn(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc new file mode 100644 index 0000000..df8708d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc @@ -0,0 +1,71 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct Cs { + _Complex T_C_TYPE x; + _Complex T_C_TYPE y; +} Cs; + +Cs gc; + +void +closure_test_fn(Cs p) +{ + printf("%.1f,%.1fi %.1f,%.1fi\n", + T_CONV creal (p.x), T_CONV cimag (p.x), + T_CONV creal (p.y), T_CONV cimag (p.y)); + gc = p; +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Cs*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type *cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Cs arg = { 1.0 + 11.0 * I, 2.0 + 22.0 * I}; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &T_FFI_TYPE; + ts1_type_elements[1] = &T_FFI_TYPE; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + ((void*(*)(Cs))(code))(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + closure_test_fn(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + return 0; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double 2.c new file mode 100644 index 0000000..ec71346 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c new file mode 100644 index 0000000..ec71346 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float 2.c new file mode 100644 index 0000000..96fdf75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c new file mode 100644 index 0000000..96fdf75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble 2.c new file mode 100644 index 0000000..005b467 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c new file mode 100644 index 0000000..005b467 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va 2.inc new file mode 100644 index 0000000..8a3e15f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va 2.inc @@ -0,0 +1,80 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include +#include +#include +#include + +static _Complex T_C_TYPE gComplexValue1 = 1 + 2 * I; +static _Complex T_C_TYPE gComplexValue2 = 3 + 4 * I; + +static int cls_variadic(const char *format, ...) +{ + va_list ap; + _Complex T_C_TYPE p1, p2; + + va_start (ap, format); + p1 = va_arg (ap, _Complex T_C_TYPE); + p2 = va_arg (ap, _Complex T_C_TYPE); + va_end (ap); + + return printf(format, T_CONV creal (p1), T_CONV cimag (p1), + T_CONV creal (p2), T_CONV cimag (p2)); +} + +static void +cls_complex_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + gComplexValue1 = *(_Complex T_C_TYPE*)args[1]; + gComplexValue2 = *(_Complex T_C_TYPE*)args[2]; + + *(ffi_arg*)resp = + printf(format, + T_CONV creal (gComplexValue1), T_CONV cimag (gComplexValue1), + T_CONV creal (gComplexValue2), T_CONV cimag (gComplexValue2)); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[4]; + char *format = "%.1f,%.1fi %.1f,%.1fi\n"; + + _Complex T_C_TYPE complexArg1 = 1.0 + 22.0 *I; + _Complex T_C_TYPE complexArg2 = 333.0 + 4444.0 *I; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &T_FFI_TYPE; + arg_types[2] = &T_FFI_TYPE; + arg_types[3] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 3, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &complexArg1; + args[2] = &complexArg2; + args[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_variadic), &res, args); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_complex_va_fn, NULL, code) + == FFI_OK); + + res = ((int(*)(char *, ...))(code))(format, complexArg1, complexArg2); + CHECK (gComplexValue1 == complexArg1); + CHECK (gComplexValue2 == complexArg2); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc new file mode 100644 index 0000000..8a3e15f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc @@ -0,0 +1,80 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include +#include +#include +#include + +static _Complex T_C_TYPE gComplexValue1 = 1 + 2 * I; +static _Complex T_C_TYPE gComplexValue2 = 3 + 4 * I; + +static int cls_variadic(const char *format, ...) +{ + va_list ap; + _Complex T_C_TYPE p1, p2; + + va_start (ap, format); + p1 = va_arg (ap, _Complex T_C_TYPE); + p2 = va_arg (ap, _Complex T_C_TYPE); + va_end (ap); + + return printf(format, T_CONV creal (p1), T_CONV cimag (p1), + T_CONV creal (p2), T_CONV cimag (p2)); +} + +static void +cls_complex_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + gComplexValue1 = *(_Complex T_C_TYPE*)args[1]; + gComplexValue2 = *(_Complex T_C_TYPE*)args[2]; + + *(ffi_arg*)resp = + printf(format, + T_CONV creal (gComplexValue1), T_CONV cimag (gComplexValue1), + T_CONV creal (gComplexValue2), T_CONV cimag (gComplexValue2)); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[4]; + char *format = "%.1f,%.1fi %.1f,%.1fi\n"; + + _Complex T_C_TYPE complexArg1 = 1.0 + 22.0 *I; + _Complex T_C_TYPE complexArg2 = 333.0 + 4444.0 *I; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &T_FFI_TYPE; + arg_types[2] = &T_FFI_TYPE; + arg_types[3] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 3, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &complexArg1; + args[2] = &complexArg2; + args[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_variadic), &res, args); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_complex_va_fn, NULL, code) + == FFI_OK); + + res = ((int(*)(char *, ...))(code))(format, complexArg1, complexArg2); + CHECK (gComplexValue1 == complexArg1); + CHECK (gComplexValue2 == complexArg2); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double 2.c new file mode 100644 index 0000000..879ccf3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c new file mode 100644 index 0000000..879ccf3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float 2.c new file mode 100644 index 0000000..2b17826 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float 2.c @@ -0,0 +1,16 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +/* Alpha splits _Complex into two arguments. It's illegal to pass + float through varargs, so _Complex float goes badly. In sort of + gets passed as _Complex double, but the compiler doesn't agree + with itself on this issue. */ +/* { dg-do run { xfail alpha*-*-* } } */ + +#include "complex_defs_float.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c new file mode 100644 index 0000000..2b17826 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c @@ -0,0 +1,16 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +/* Alpha splits _Complex into two arguments. It's illegal to pass + float through varargs, so _Complex float goes badly. In sort of + gets passed as _Complex double, but the compiler doesn't agree + with itself on this issue. */ +/* { dg-do run { xfail alpha*-*-* } } */ + +#include "complex_defs_float.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble 2.c new file mode 100644 index 0000000..6eca965 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c new file mode 100644 index 0000000..6eca965 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.exp new file mode 100644 index 0000000..4631db2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_TARGET_HAS_COMPLEX_TYPE"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.inc new file mode 100644 index 0000000..515ae3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex 2.inc @@ -0,0 +1,51 @@ +/* -*-c-*-*/ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE f_complex(_Complex T_C_TYPE c, int x, int *py) +{ + c = -(2 * creal (c)) + (cimag (c) + 1)* I; + *py += x; + + return c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex T_C_TYPE tc_arg; + _Complex T_C_TYPE tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &T_FFI_TYPE, args) == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%f,%fi %f,%fi, x %d 1234, y %d 11110\n", + T_CONV creal (tc_result), T_CONV cimag (tc_result), + T_CONV creal (2.0), T_CONV creal (8.0), tc_int_arg_x, tc_y); + + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp new file mode 100644 index 0000000..4631db2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_TARGET_HAS_COMPLEX_TYPE"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc new file mode 100644 index 0000000..515ae3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc @@ -0,0 +1,51 @@ +/* -*-c-*-*/ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE f_complex(_Complex T_C_TYPE c, int x, int *py) +{ + c = -(2 * creal (c)) + (cimag (c) + 1)* I; + *py += x; + + return c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex T_C_TYPE tc_arg; + _Complex T_C_TYPE tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &T_FFI_TYPE, args) == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%f,%fi %f,%fi, x %d 1234, y %d 11110\n", + T_CONV creal (tc_result), T_CONV cimag (tc_result), + T_CONV creal (2.0), T_CONV creal (8.0), tc_int_arg_x, tc_y); + + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double 2.inc new file mode 100644 index 0000000..3583e16 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double 2.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_double +/* C type corresponding to the base type. */ +#define T_C_TYPE double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc new file mode 100644 index 0000000..3583e16 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_double +/* C type corresponding to the base type. */ +#define T_C_TYPE double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float 2.inc new file mode 100644 index 0000000..bbd9375 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float 2.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_float +/* C type corresponding to the base type. */ +#define T_C_TYPE float +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV (double) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc new file mode 100644 index 0000000..bbd9375 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_float +/* C type corresponding to the base type. */ +#define T_C_TYPE float +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV (double) diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble 2.inc new file mode 100644 index 0000000..14b9f24 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble 2.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_longdouble +/* C type corresponding to the base type. */ +#define T_C_TYPE long double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc new file mode 100644 index 0000000..14b9f24 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_longdouble +/* C type corresponding to the base type. */ +#define T_C_TYPE long double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double 2.c new file mode 100644 index 0000000..8a3297b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c new file mode 100644 index 0000000..8a3297b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float 2.c new file mode 100644 index 0000000..5044ebb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c new file mode 100644 index 0000000..5044ebb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int 2.c new file mode 100644 index 0000000..bac3190 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int 2.c @@ -0,0 +1,86 @@ +/* Area: ffi_call + Purpose: Check non-standard complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "ffi.h" +#include + +_Complex int f_complex(_Complex int c, int x, int *py) +{ + __real__ c = -2 * __real__ c; + __imag__ c = __imag__ c + 1; + *py += x; + return c; +} + +/* + * This macro can be used to define new complex type descriptors + * in a platform independent way. + * + * name: Name of the new descriptor is ffi_type_complex_. + * type: The C base type of the complex type. + */ +#define FFI_COMPLEX_TYPEDEF(name, type, ffitype) \ + static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffitype), NULL \ + }; \ + struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ + }; \ + ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ + } + +/* Define new complex type descriptors using the macro: */ +/* ffi_type_complex_sint */ +FFI_COMPLEX_TYPEDEF(sint, int, ffi_type_sint); +/* ffi_type_complex_uchar */ +FFI_COMPLEX_TYPEDEF(uchar, unsigned char, ffi_type_uint8); + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex int tc_arg; + _Complex int tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &ffi_type_complex_sint; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &ffi_type_complex_sint, args) + == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%d,%di %d,%di, x %d 1234, y %d 11110\n", + (int)tc_result, (int)(tc_result * -I), 2, 8, tc_int_arg_x, tc_y); + /* dg-output "-2,8i 2,8i, x 1234 1234, y 11110 11110" */ + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c new file mode 100644 index 0000000..bac3190 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c @@ -0,0 +1,86 @@ +/* Area: ffi_call + Purpose: Check non-standard complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "ffi.h" +#include + +_Complex int f_complex(_Complex int c, int x, int *py) +{ + __real__ c = -2 * __real__ c; + __imag__ c = __imag__ c + 1; + *py += x; + return c; +} + +/* + * This macro can be used to define new complex type descriptors + * in a platform independent way. + * + * name: Name of the new descriptor is ffi_type_complex_. + * type: The C base type of the complex type. + */ +#define FFI_COMPLEX_TYPEDEF(name, type, ffitype) \ + static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffitype), NULL \ + }; \ + struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ + }; \ + ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ + } + +/* Define new complex type descriptors using the macro: */ +/* ffi_type_complex_sint */ +FFI_COMPLEX_TYPEDEF(sint, int, ffi_type_sint); +/* ffi_type_complex_uchar */ +FFI_COMPLEX_TYPEDEF(uchar, unsigned char, ffi_type_uint8); + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex int tc_arg; + _Complex int tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &ffi_type_complex_sint; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &ffi_type_complex_sint, args) + == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%d,%di %d,%di, x %d 1234, y %d 11110\n", + (int)tc_result, (int)(tc_result * -I), 2, 8, tc_int_arg_x, tc_y); + /* dg-output "-2,8i 2,8i, x 1234 1234, y 11110 11110" */ + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble 2.c new file mode 100644 index 0000000..7e78366 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c new file mode 100644 index 0000000..7e78366 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest 2.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest 2.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex 2.inc new file mode 100644 index 0000000..e37a774 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex 2.inc @@ -0,0 +1,78 @@ +/* -*-c-*- */ +#include "ffitest.h" + +#include +#include + +static _Complex T_C_TYPE many(_Complex T_C_TYPE c1, + _Complex T_C_TYPE c2, + _Complex T_C_TYPE c3, + _Complex T_C_TYPE c4, + _Complex T_C_TYPE c5, + _Complex T_C_TYPE c6, + _Complex T_C_TYPE c7, + _Complex T_C_TYPE c8, + _Complex T_C_TYPE c9, + _Complex T_C_TYPE c10, + _Complex T_C_TYPE c11, + _Complex T_C_TYPE c12, + _Complex T_C_TYPE c13) +{ + printf("0 :%f,%fi\n" + "1 :%f,%fi\n" + "2 :%f,%fi\n" + "3 :%f,%fi\n" + "4 :%f,%fi\n" + "5 :%f,%fi\n" + "6 :%f,%fi\n" + "7 :%f,%fi\n" + "8 :%f,%fi\n" + "9 :%f,%fi\n" + "10:%f,%fi\n" + "11:%f,%fi\n" + "12:%f,%fi\n", + T_CONV creal (c1), T_CONV cimag (c1), + T_CONV creal (c2), T_CONV cimag (c2), + T_CONV creal (c3), T_CONV cimag (c3), + T_CONV creal (c4), T_CONV cimag (c4), + T_CONV creal (c5), T_CONV cimag (c5), + T_CONV creal (c6), T_CONV cimag (c6), + T_CONV creal (c7), T_CONV cimag (c7), + T_CONV creal (c8), T_CONV cimag (c8), + T_CONV creal (c9), T_CONV cimag (c9), + T_CONV creal (c10), T_CONV cimag (c10), + T_CONV creal (c11), T_CONV cimag (c11), + T_CONV creal (c12), T_CONV cimag (c12), + T_CONV creal (c13), T_CONV cimag (c13)); + + return (c1+c2-c3-c4+c5+c6+c7-c8-c9-c10-c11+c12+c13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + _Complex T_C_TYPE ca[13]; + _Complex T_C_TYPE c, cc; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &T_FFI_TYPE; + values[i] = &ca[i]; + ca[i] = i + (-20 - i) * I; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, &T_FFI_TYPE, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &c, values); + + cc = many(ca[0], ca[1], ca[2], ca[3], ca[4], ca[5], ca[6], ca[7], ca[8], + ca[9], ca[10], ca[11], ca[12]); + CHECK(creal (cc) == creal (c)); + CHECK(cimag (cc) == cimag (c)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc new file mode 100644 index 0000000..e37a774 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc @@ -0,0 +1,78 @@ +/* -*-c-*- */ +#include "ffitest.h" + +#include +#include + +static _Complex T_C_TYPE many(_Complex T_C_TYPE c1, + _Complex T_C_TYPE c2, + _Complex T_C_TYPE c3, + _Complex T_C_TYPE c4, + _Complex T_C_TYPE c5, + _Complex T_C_TYPE c6, + _Complex T_C_TYPE c7, + _Complex T_C_TYPE c8, + _Complex T_C_TYPE c9, + _Complex T_C_TYPE c10, + _Complex T_C_TYPE c11, + _Complex T_C_TYPE c12, + _Complex T_C_TYPE c13) +{ + printf("0 :%f,%fi\n" + "1 :%f,%fi\n" + "2 :%f,%fi\n" + "3 :%f,%fi\n" + "4 :%f,%fi\n" + "5 :%f,%fi\n" + "6 :%f,%fi\n" + "7 :%f,%fi\n" + "8 :%f,%fi\n" + "9 :%f,%fi\n" + "10:%f,%fi\n" + "11:%f,%fi\n" + "12:%f,%fi\n", + T_CONV creal (c1), T_CONV cimag (c1), + T_CONV creal (c2), T_CONV cimag (c2), + T_CONV creal (c3), T_CONV cimag (c3), + T_CONV creal (c4), T_CONV cimag (c4), + T_CONV creal (c5), T_CONV cimag (c5), + T_CONV creal (c6), T_CONV cimag (c6), + T_CONV creal (c7), T_CONV cimag (c7), + T_CONV creal (c8), T_CONV cimag (c8), + T_CONV creal (c9), T_CONV cimag (c9), + T_CONV creal (c10), T_CONV cimag (c10), + T_CONV creal (c11), T_CONV cimag (c11), + T_CONV creal (c12), T_CONV cimag (c12), + T_CONV creal (c13), T_CONV cimag (c13)); + + return (c1+c2-c3-c4+c5+c6+c7-c8-c9-c10-c11+c12+c13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + _Complex T_C_TYPE ca[13]; + _Complex T_C_TYPE c, cc; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &T_FFI_TYPE; + values[i] = &ca[i]; + ca[i] = i + (-20 - i) * I; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, &T_FFI_TYPE, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &c, values); + + cc = many(ca[0], ca[1], ca[2], ca[3], ca[4], ca[5], ca[6], ca[7], ca[8], + ca[9], ca[10], ca[11], ca[12]); + CHECK(creal (cc) == creal (c)); + CHECK(cimag (cc) == cimag (c)); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double 2.c new file mode 100644 index 0000000..3fd53c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c new file mode 100644 index 0000000..3fd53c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float 2.c new file mode 100644 index 0000000..c43d21c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c new file mode 100644 index 0000000..c43d21c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble 2.c new file mode 100644 index 0000000..dbab723 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c new file mode 100644 index 0000000..dbab723 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex 2.inc new file mode 100644 index 0000000..8bf0c1f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex 2.inc @@ -0,0 +1,37 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c) +{ + printf ("%f,%fi\n", T_CONV creal (c), T_CONV cimag (c)); + return 2 * c; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c, rc, rc2; + T_C_TYPE cr, ci; + + args[0] = &T_FFI_TYPE; + values[0] = &c; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, args) == FFI_OK); + + for (cr = -127.0; cr < 127; cr++) + { + ci = 1000.0 - cr; + c = cr + ci * I; + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == 2 * c); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc new file mode 100644 index 0000000..8bf0c1f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc @@ -0,0 +1,37 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c) +{ + printf ("%f,%fi\n", T_CONV creal (c), T_CONV cimag (c)); + return 2 * c; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c, rc, rc2; + T_C_TYPE cr, ci; + + args[0] = &T_FFI_TYPE; + values[0] = &c; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, args) == FFI_OK); + + for (cr = -127.0; cr < 127; cr++) + { + ci = 1000.0 - cr; + c = cr + ci * I; + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == 2 * c); + } + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1 2.inc new file mode 100644 index 0000000..7cecc0f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1 2.inc @@ -0,0 +1,41 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c1, float fl2, unsigned int in3, _Complex T_C_TYPE c4) +{ + return c1 + fl2 + in3 + c4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c4, rc, rc2; + float fl2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + fl2 = 128.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, fl2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc new file mode 100644 index 0000000..7cecc0f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc @@ -0,0 +1,41 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c1, float fl2, unsigned int in3, _Complex T_C_TYPE c4) +{ + return c1 + fl2 + in3 + c4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c4, rc, rc2; + float fl2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + fl2 = 128.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, fl2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double 2.c new file mode 100644 index 0000000..727410d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c new file mode 100644 index 0000000..727410d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float 2.c new file mode 100644 index 0000000..a2aeada --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c new file mode 100644 index 0000000..a2aeada --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble 2.c new file mode 100644 index 0000000..103504b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c new file mode 100644 index 0000000..103504b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2 2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2 2.inc new file mode 100644 index 0000000..265170b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2 2.inc @@ -0,0 +1,44 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +_Complex T_C_TYPE +return_c(_Complex T_C_TYPE c1, _Complex T_C_TYPE c2, + unsigned int in3, _Complex T_C_TYPE c4) +{ + volatile _Complex T_C_TYPE r = c1 + c2 + in3 + c4; + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c2, c4, rc, rc2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &T_FFI_TYPE; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &c2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + c2 = 128.0 + 256.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, c2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc new file mode 100644 index 0000000..265170b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc @@ -0,0 +1,44 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +_Complex T_C_TYPE +return_c(_Complex T_C_TYPE c1, _Complex T_C_TYPE c2, + unsigned int in3, _Complex T_C_TYPE c4) +{ + volatile _Complex T_C_TYPE r = c1 + c2 + in3 + c4; + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c2, c4, rc, rc2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &T_FFI_TYPE; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &c2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + c2 = 128.0 + 256.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, c2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double 2.c new file mode 100644 index 0000000..ab9efac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c new file mode 100644 index 0000000..ab9efac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float 2.c new file mode 100644 index 0000000..d7f22c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c new file mode 100644 index 0000000..d7f22c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble 2.c new file mode 100644 index 0000000..3edea62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c new file mode 100644 index 0000000..3edea62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double 2.c new file mode 100644 index 0000000..e2497cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c new file mode 100644 index 0000000..e2497cc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float 2.c new file mode 100644 index 0000000..a35528f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c new file mode 100644 index 0000000..a35528f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble 2.c new file mode 100644 index 0000000..142d7be --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble 2.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c new file mode 100644 index 0000000..142d7be --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct 2.c new file mode 100644 index 0000000..b00c404 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct 2.c @@ -0,0 +1,34 @@ +/* { dg-do run } */ + +#include "static-chain.h" + +#if defined(__GNUC__) && !defined(__clang__) && defined(STATIC_CHAIN_REG) + +#include "ffitest.h" + +/* Blatent assumption here that the prologue doesn't clobber the + static chain for trivial functions. If this is not true, don't + define STATIC_CHAIN_REG, and we'll test what we can via other tests. */ +void *doit(void) +{ + register void *chain __asm__(STATIC_CHAIN_REG); + return chain; +} + +int main() +{ + ffi_cif cif; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(doit), &result, NULL, &result); + + CHECK(result == &result); + + return 0; +} + +#else /* UNSUPPORTED */ +int main() { return 0; } +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c new file mode 100644 index 0000000..b00c404 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c @@ -0,0 +1,34 @@ +/* { dg-do run } */ + +#include "static-chain.h" + +#if defined(__GNUC__) && !defined(__clang__) && defined(STATIC_CHAIN_REG) + +#include "ffitest.h" + +/* Blatent assumption here that the prologue doesn't clobber the + static chain for trivial functions. If this is not true, don't + define STATIC_CHAIN_REG, and we'll test what we can via other tests. */ +void *doit(void) +{ + register void *chain __asm__(STATIC_CHAIN_REG); + return chain; +} + +int main() +{ + ffi_cif cif; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(doit), &result, NULL, &result); + + CHECK(result == &result); + + return 0; +} + +#else /* UNSUPPORTED */ +int main() { return 0; } +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1 2.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1 2.c new file mode 100644 index 0000000..7b34afc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1 2.c @@ -0,0 +1,28 @@ +/* { dg-do run } */ + +#include "ffitest.h" + +void doit(ffi_cif *cif, void *rvalue, void **avalue, void *closure) +{ + (void)cif; + (void)avalue; + *(void **)rvalue = closure; +} + +typedef void * (*FN)(void); + +int main() +{ + ffi_cif cif; + ffi_go_closure cl; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + CHECK(ffi_prep_go_closure(&cl, &cif, doit) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(*(FN *)&cl), &result, NULL, &cl); + + CHECK(result == &cl); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c new file mode 100644 index 0000000..7b34afc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c @@ -0,0 +1,28 @@ +/* { dg-do run } */ + +#include "ffitest.h" + +void doit(ffi_cif *cif, void *rvalue, void **avalue, void *closure) +{ + (void)cif; + (void)avalue; + *(void **)rvalue = closure; +} + +typedef void * (*FN)(void); + +int main() +{ + ffi_cif cif; + ffi_go_closure cl; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + CHECK(ffi_prep_go_closure(&cl, &cif, doit) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(*(FN *)&cl), &result, NULL, &cl); + + CHECK(result == &cl); + + exit(0); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest 2.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest 2.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go 2.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go 2.exp new file mode 100644 index 0000000..100c5e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go 2.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_GO_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp new file mode 100644 index 0000000..100c5e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_GO_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain 2.h new file mode 100644 index 0000000..3675b40 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain 2.h @@ -0,0 +1,19 @@ +#ifdef __aarch64__ +# define STATIC_CHAIN_REG "x18" +#elif defined(__alpha__) +# define STATIC_CHAIN_REG "$1" +#elif defined(__arm__) +# define STATIC_CHAIN_REG "ip" +#elif defined(__sparc__) +# if defined(__arch64__) || defined(__sparcv9) +# define STATIC_CHAIN_REG "g5" +# else +# define STATIC_CHAIN_REG "g2" +# endif +#elif defined(__x86_64__) +# define STATIC_CHAIN_REG "r10" +#elif defined(__i386__) +# ifndef ABI_NUM +# define STATIC_CHAIN_REG "ecx" /* FFI_DEFAULT_ABI only */ +# endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h new file mode 100644 index 0000000..3675b40 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h @@ -0,0 +1,19 @@ +#ifdef __aarch64__ +# define STATIC_CHAIN_REG "x18" +#elif defined(__alpha__) +# define STATIC_CHAIN_REG "$1" +#elif defined(__arm__) +# define STATIC_CHAIN_REG "ip" +#elif defined(__sparc__) +# if defined(__arch64__) || defined(__sparcv9) +# define STATIC_CHAIN_REG "g5" +# else +# define STATIC_CHAIN_REG "g2" +# endif +#elif defined(__x86_64__) +# define STATIC_CHAIN_REG "r10" +#elif defined(__i386__) +# ifndef ABI_NUM +# define STATIC_CHAIN_REG "ecx" /* FFI_DEFAULT_ABI only */ +# endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi 2.h new file mode 100644 index 0000000..89b3e32 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi 2.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_RBFFI_H +#define RBFFI_RBFFI_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_PARAMETERS (32) + +extern VALUE rbffi_FFIModule; + +extern void rbffi_Type_Init(VALUE ffiModule); +extern void rbffi_Buffer_Init(VALUE ffiModule); +extern void rbffi_Invoker_Init(VALUE ffiModule); +extern void rbffi_Variadic_Init(VALUE ffiModule); +extern VALUE rbffi_AbstractMemoryClass, rbffi_InvokerClass; +extern int rbffi_type_size(VALUE type); +extern void rbffi_Thread_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_RBFFI_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h new file mode 100644 index 0000000..89b3e32 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_RBFFI_H +#define RBFFI_RBFFI_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_PARAMETERS (32) + +extern VALUE rbffi_FFIModule; + +extern void rbffi_Type_Init(VALUE ffiModule); +extern void rbffi_Buffer_Init(VALUE ffiModule); +extern void rbffi_Invoker_Init(VALUE ffiModule); +extern void rbffi_Variadic_Init(VALUE ffiModule); +extern VALUE rbffi_AbstractMemoryClass, rbffi_InvokerClass; +extern int rbffi_type_size(VALUE type); +extern void rbffi_Thread_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_RBFFI_H */ diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian 2.h new file mode 100644 index 0000000..ebb8420 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian 2.h @@ -0,0 +1,59 @@ +#ifndef JFFI_ENDIAN_H +#define JFFI_ENDIAN_H + +#ifndef _MSC_VER +#include +#endif + +#include + +#if defined(__linux__) || defined(__CYGWIN__) || defined(__GNU__) || defined(__GLIBC__) || defined(__HAIKU__) +# include +# if !defined(LITTLE_ENDIAN) && defined(__LITTLE_ENDIAN) +# define LITTLE_ENDIAN __LITTLE_ENDIAN +# endif +# if !defined(BIG_ENDIAN) && defined(__BIG_ENDIAN) +# define BIG_ENDIAN __BIG_ENDIAN +# endif +# if !defined(BYTE_ORDER) && defined(__BYTE_ORDER) +# define BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#ifdef __sun +# include +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(_BIG_ENDIAN) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(_LITTLE_ENDIAN) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_AIX) && !defined(BYTE_ORDER) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(__BIG_ENDIAN__) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(__LITTLE_ENDIAN__) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_WIN32) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# define BYTE_ORDER LITTLE_ENDIAN +#endif + +#if !defined(BYTE_ORDER) || !defined(LITTLE_ENDIAN) || !defined(BIG_ENDIAN) +# error "Cannot determine the endian-ness of this platform" +#endif + +#endif /* JFFI_ENDIAN_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h new file mode 100644 index 0000000..ebb8420 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h @@ -0,0 +1,59 @@ +#ifndef JFFI_ENDIAN_H +#define JFFI_ENDIAN_H + +#ifndef _MSC_VER +#include +#endif + +#include + +#if defined(__linux__) || defined(__CYGWIN__) || defined(__GNU__) || defined(__GLIBC__) || defined(__HAIKU__) +# include +# if !defined(LITTLE_ENDIAN) && defined(__LITTLE_ENDIAN) +# define LITTLE_ENDIAN __LITTLE_ENDIAN +# endif +# if !defined(BIG_ENDIAN) && defined(__BIG_ENDIAN) +# define BIG_ENDIAN __BIG_ENDIAN +# endif +# if !defined(BYTE_ORDER) && defined(__BYTE_ORDER) +# define BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#ifdef __sun +# include +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(_BIG_ENDIAN) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(_LITTLE_ENDIAN) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_AIX) && !defined(BYTE_ORDER) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(__BIG_ENDIAN__) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(__LITTLE_ENDIAN__) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_WIN32) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# define BYTE_ORDER LITTLE_ENDIAN +#endif + +#if !defined(BYTE_ORDER) || !defined(LITTLE_ENDIAN) || !defined(BIG_ENDIAN) +# error "Cannot determine the endian-ness of this platform" +#endif + +#endif /* JFFI_ENDIAN_H */ + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool 2.h new file mode 100644 index 0000000..9130a8b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool 2.h @@ -0,0 +1,8 @@ +#ifndef FFI_STDBOOL_H +#define FFI_STDBOOL_H + +typedef int bool; +#define true 1 +#define false 0 + +#endif /* FFI_STDBOOL_H */ \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h new file mode 100644 index 0000000..9130a8b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h @@ -0,0 +1,8 @@ +#ifndef FFI_STDBOOL_H +#define FFI_STDBOOL_H + +typedef int bool; +#define true 1 +#define false 0 + +#endif /* FFI_STDBOOL_H */ \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint 2.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint 2.h new file mode 100644 index 0000000..6ce7457 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint 2.h @@ -0,0 +1,201 @@ +/* stdint.h standard header */ +#if !defined(_MSC_VER) && !defined(INT8_MIN) +#pragma once +#ifndef _STDINT +#define _STDINT +#ifndef RC_INVOKED +#include + +/* NB: assumes + byte has 8 bits + long is 32 bits + pointer can convert to and from long long + long long is longest type + */ + +_C_STD_BEGIN + /* TYPE DEFINITIONS */ +typedef signed char int8_t; +typedef short int16_t; +typedef int int32_t; + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; + +typedef signed char int_least8_t; +typedef short int_least16_t; +typedef int int_least32_t; + +typedef unsigned char uint_least8_t; +typedef unsigned short uint_least16_t; +typedef unsigned int uint_least32_t; + +typedef char int_fast8_t; +typedef int int_fast16_t; +typedef int int_fast32_t; + +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef unsigned int uint_fast32_t; + +#ifndef _INTPTR_T_DEFINED + #define _INTPTR_T_DEFINED + #ifdef _WIN64 +typedef __int64 intptr_t; + #else /* _WIN64 */ +typedef _W64 int intptr_t; + #endif /* _WIN64 */ +#endif /* _INTPTR_T_DEFINED */ + +#ifndef _UINTPTR_T_DEFINED + #define _UINTPTR_T_DEFINED + #ifdef _WIN64 +typedef unsigned __int64 uintptr_t; + #else /* _WIN64 */ +typedef _W64 unsigned int uintptr_t; + #endif /* _WIN64 */ +#endif /* _UINTPTR_T_DEFINED */ + +typedef _Longlong int64_t; +typedef _ULonglong uint64_t; + +typedef _Longlong int_least64_t; +typedef _ULonglong uint_least64_t; + +typedef _Longlong int_fast64_t; +typedef _ULonglong uint_fast64_t; + +typedef _Longlong intmax_t; +typedef _ULonglong uintmax_t; + + /* LIMIT MACROS */ +#define INT8_MIN (-0x7f - _C2) +#define INT16_MIN (-0x7fff - _C2) +#define INT32_MIN (-0x7fffffff - _C2) + +#define INT8_MAX 0x7f +#define INT16_MAX 0x7fff +#define INT32_MAX 0x7fffffff +#define UINT8_MAX 0xff +#define UINT16_MAX 0xffff +#define UINT32_MAX 0xffffffff + +#define INT_LEAST8_MIN (-0x7f - _C2) +#define INT_LEAST16_MIN (-0x7fff - _C2) +#define INT_LEAST32_MIN (-0x7fffffff - _C2) + +#define INT_LEAST8_MAX 0x7f +#define INT_LEAST16_MAX 0x7fff +#define INT_LEAST32_MAX 0x7fffffff +#define UINT_LEAST8_MAX 0xff +#define UINT_LEAST16_MAX 0xffff +#define UINT_LEAST32_MAX 0xffffffff + +#define INT_FAST8_MIN (-0x7f - _C2) +#define INT_FAST16_MIN (-0x7fff - _C2) +#define INT_FAST32_MIN (-0x7fffffff - _C2) + +#define INT_FAST8_MAX 0x7f +#define INT_FAST16_MAX 0x7fff +#define INT_FAST32_MAX 0x7fffffff +#define UINT_FAST8_MAX 0xff +#define UINT_FAST16_MAX 0xffff +#define UINT_FAST32_MAX 0xffffffff + + #if _INTPTR == 0 || _INTPTR == 1 +#define INTPTR_MAX 0x7fffffff +#define INTPTR_MIN (-INTPTR_MAX - _C2) +#define UINTPTR_MAX 0xffffffff + + #else /* _INTPTR == 2 */ +#define INTPTR_MIN (-_LLONG_MAX - _C2) +#define INTPTR_MAX _LLONG_MAX +#define UINTPTR_MAX _ULLONG_MAX +#endif /* _INTPTR */ + +#define INT8_C(x) (x) +#define INT16_C(x) (x) +#define INT32_C(x) ((x) + (INT32_MAX - INT32_MAX)) + +#define UINT8_C(x) (x) +#define UINT16_C(x) (x) +#define UINT32_C(x) ((x) + (UINT32_MAX - UINT32_MAX)) + +#ifdef _WIN64 + #define PTRDIFF_MIN INT64_MIN + #define PTRDIFF_MAX INT64_MAX +#else /* _WIN64 */ + #define PTRDIFF_MIN INT32_MIN + #define PTRDIFF_MAX INT32_MAX +#endif /* _WIN64 */ + +#define SIG_ATOMIC_MIN INT32_MIN +#define SIG_ATOMIC_MAX INT32_MAX + +#ifndef SIZE_MAX + #ifdef _WIN64 + #define SIZE_MAX UINT64_MAX + #else /* _WIN64 */ + #define SIZE_MAX UINT32_MAX + #endif /* _WIN64 */ +#endif /* SIZE_MAX */ + +#define WCHAR_MIN 0x0000 +#define WCHAR_MAX 0xffff + +#define WINT_MIN 0x0000 +#define WINT_MAX 0xffff + + #define INT64_MIN (-0x7fffffffffffffff - _C2) + #define INT64_MAX 0x7fffffffffffffff + #define UINT64_MAX 0xffffffffffffffffU + + #define INT_LEAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_LEAST64_MAX 0x7fffffffffffffff + #define UINT_LEAST64_MAX 0xffffffffffffffffU + + #define INT_FAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_FAST64_MAX 0x7fffffffffffffff + #define UINT_FAST64_MAX 0xffffffffffffffffU + + #define INTMAX_MIN (-0x7fffffffffffffff - _C2) + #define INTMAX_MAX 0x7fffffffffffffff + #define UINTMAX_MAX 0xffffffffffffffffU + +#define INT64_C(x) ((x) + (INT64_MAX - INT64_MAX)) +#define UINT64_C(x) ((x) + (UINT64_MAX - UINT64_MAX)) +#define INTMAX_C(x) INT64_C(x) +#define UINTMAX_C(x) UINT64_C(x) +_C_STD_END +#endif /* RC_INVOKED */ +#endif /* _STDINT */ + + #if defined(_STD_USING) +using _CSTD int8_t; using _CSTD int16_t; +using _CSTD int32_t; using _CSTD int64_t; + +using _CSTD uint8_t; using _CSTD uint16_t; +using _CSTD uint32_t; using _CSTD uint64_t; + +using _CSTD int_least8_t; using _CSTD int_least16_t; +using _CSTD int_least32_t; using _CSTD int_least64_t; +using _CSTD uint_least8_t; using _CSTD uint_least16_t; +using _CSTD uint_least32_t; using _CSTD uint_least64_t; + +using _CSTD intmax_t; using _CSTD uintmax_t; + +using _CSTD uintptr_t; +using _CSTD intptr_t; + +using _CSTD int_fast8_t; using _CSTD int_fast16_t; +using _CSTD int_fast32_t; using _CSTD int_fast64_t; +using _CSTD uint_fast8_t; using _CSTD uint_fast16_t; +using _CSTD uint_fast32_t; using _CSTD uint_fast64_t; + #endif /* defined(_STD_USING) */ + +/* + * Copyright (c) 1992-2009 by P.J. Plauger. ALL RIGHTS RESERVED. + * Consult your license regarding permissions and restrictions. +V5.20:0009 */ +#endif /* !defined(_MSC_VER) && !defined(INT8_MIN) */ \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h new file mode 100644 index 0000000..6ce7457 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h @@ -0,0 +1,201 @@ +/* stdint.h standard header */ +#if !defined(_MSC_VER) && !defined(INT8_MIN) +#pragma once +#ifndef _STDINT +#define _STDINT +#ifndef RC_INVOKED +#include + +/* NB: assumes + byte has 8 bits + long is 32 bits + pointer can convert to and from long long + long long is longest type + */ + +_C_STD_BEGIN + /* TYPE DEFINITIONS */ +typedef signed char int8_t; +typedef short int16_t; +typedef int int32_t; + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; + +typedef signed char int_least8_t; +typedef short int_least16_t; +typedef int int_least32_t; + +typedef unsigned char uint_least8_t; +typedef unsigned short uint_least16_t; +typedef unsigned int uint_least32_t; + +typedef char int_fast8_t; +typedef int int_fast16_t; +typedef int int_fast32_t; + +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef unsigned int uint_fast32_t; + +#ifndef _INTPTR_T_DEFINED + #define _INTPTR_T_DEFINED + #ifdef _WIN64 +typedef __int64 intptr_t; + #else /* _WIN64 */ +typedef _W64 int intptr_t; + #endif /* _WIN64 */ +#endif /* _INTPTR_T_DEFINED */ + +#ifndef _UINTPTR_T_DEFINED + #define _UINTPTR_T_DEFINED + #ifdef _WIN64 +typedef unsigned __int64 uintptr_t; + #else /* _WIN64 */ +typedef _W64 unsigned int uintptr_t; + #endif /* _WIN64 */ +#endif /* _UINTPTR_T_DEFINED */ + +typedef _Longlong int64_t; +typedef _ULonglong uint64_t; + +typedef _Longlong int_least64_t; +typedef _ULonglong uint_least64_t; + +typedef _Longlong int_fast64_t; +typedef _ULonglong uint_fast64_t; + +typedef _Longlong intmax_t; +typedef _ULonglong uintmax_t; + + /* LIMIT MACROS */ +#define INT8_MIN (-0x7f - _C2) +#define INT16_MIN (-0x7fff - _C2) +#define INT32_MIN (-0x7fffffff - _C2) + +#define INT8_MAX 0x7f +#define INT16_MAX 0x7fff +#define INT32_MAX 0x7fffffff +#define UINT8_MAX 0xff +#define UINT16_MAX 0xffff +#define UINT32_MAX 0xffffffff + +#define INT_LEAST8_MIN (-0x7f - _C2) +#define INT_LEAST16_MIN (-0x7fff - _C2) +#define INT_LEAST32_MIN (-0x7fffffff - _C2) + +#define INT_LEAST8_MAX 0x7f +#define INT_LEAST16_MAX 0x7fff +#define INT_LEAST32_MAX 0x7fffffff +#define UINT_LEAST8_MAX 0xff +#define UINT_LEAST16_MAX 0xffff +#define UINT_LEAST32_MAX 0xffffffff + +#define INT_FAST8_MIN (-0x7f - _C2) +#define INT_FAST16_MIN (-0x7fff - _C2) +#define INT_FAST32_MIN (-0x7fffffff - _C2) + +#define INT_FAST8_MAX 0x7f +#define INT_FAST16_MAX 0x7fff +#define INT_FAST32_MAX 0x7fffffff +#define UINT_FAST8_MAX 0xff +#define UINT_FAST16_MAX 0xffff +#define UINT_FAST32_MAX 0xffffffff + + #if _INTPTR == 0 || _INTPTR == 1 +#define INTPTR_MAX 0x7fffffff +#define INTPTR_MIN (-INTPTR_MAX - _C2) +#define UINTPTR_MAX 0xffffffff + + #else /* _INTPTR == 2 */ +#define INTPTR_MIN (-_LLONG_MAX - _C2) +#define INTPTR_MAX _LLONG_MAX +#define UINTPTR_MAX _ULLONG_MAX +#endif /* _INTPTR */ + +#define INT8_C(x) (x) +#define INT16_C(x) (x) +#define INT32_C(x) ((x) + (INT32_MAX - INT32_MAX)) + +#define UINT8_C(x) (x) +#define UINT16_C(x) (x) +#define UINT32_C(x) ((x) + (UINT32_MAX - UINT32_MAX)) + +#ifdef _WIN64 + #define PTRDIFF_MIN INT64_MIN + #define PTRDIFF_MAX INT64_MAX +#else /* _WIN64 */ + #define PTRDIFF_MIN INT32_MIN + #define PTRDIFF_MAX INT32_MAX +#endif /* _WIN64 */ + +#define SIG_ATOMIC_MIN INT32_MIN +#define SIG_ATOMIC_MAX INT32_MAX + +#ifndef SIZE_MAX + #ifdef _WIN64 + #define SIZE_MAX UINT64_MAX + #else /* _WIN64 */ + #define SIZE_MAX UINT32_MAX + #endif /* _WIN64 */ +#endif /* SIZE_MAX */ + +#define WCHAR_MIN 0x0000 +#define WCHAR_MAX 0xffff + +#define WINT_MIN 0x0000 +#define WINT_MAX 0xffff + + #define INT64_MIN (-0x7fffffffffffffff - _C2) + #define INT64_MAX 0x7fffffffffffffff + #define UINT64_MAX 0xffffffffffffffffU + + #define INT_LEAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_LEAST64_MAX 0x7fffffffffffffff + #define UINT_LEAST64_MAX 0xffffffffffffffffU + + #define INT_FAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_FAST64_MAX 0x7fffffffffffffff + #define UINT_FAST64_MAX 0xffffffffffffffffU + + #define INTMAX_MIN (-0x7fffffffffffffff - _C2) + #define INTMAX_MAX 0x7fffffffffffffff + #define UINTMAX_MAX 0xffffffffffffffffU + +#define INT64_C(x) ((x) + (INT64_MAX - INT64_MAX)) +#define UINT64_C(x) ((x) + (UINT64_MAX - UINT64_MAX)) +#define INTMAX_C(x) INT64_C(x) +#define UINTMAX_C(x) UINT64_C(x) +_C_STD_END +#endif /* RC_INVOKED */ +#endif /* _STDINT */ + + #if defined(_STD_USING) +using _CSTD int8_t; using _CSTD int16_t; +using _CSTD int32_t; using _CSTD int64_t; + +using _CSTD uint8_t; using _CSTD uint16_t; +using _CSTD uint32_t; using _CSTD uint64_t; + +using _CSTD int_least8_t; using _CSTD int_least16_t; +using _CSTD int_least32_t; using _CSTD int_least64_t; +using _CSTD uint_least8_t; using _CSTD uint_least16_t; +using _CSTD uint_least32_t; using _CSTD uint_least64_t; + +using _CSTD intmax_t; using _CSTD uintmax_t; + +using _CSTD uintptr_t; +using _CSTD intptr_t; + +using _CSTD int_fast8_t; using _CSTD int_fast16_t; +using _CSTD int_fast32_t; using _CSTD int_fast64_t; +using _CSTD uint_fast8_t; using _CSTD uint_fast16_t; +using _CSTD uint_fast32_t; using _CSTD uint_fast64_t; + #endif /* defined(_STD_USING) */ + +/* + * Copyright (c) 1992-2009 by P.J. Plauger. ALL RIGHTS RESERVED. + * Consult your license regarding permissions and restrictions. +V5.20:0009 */ +#endif /* !defined(_MSC_VER) && !defined(INT8_MIN) */ \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi 2.gemspec b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi 2.gemspec new file mode 100644 index 0000000..48964f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi 2.gemspec @@ -0,0 +1,43 @@ +require File.expand_path("../lib/#{File.basename(__FILE__, '.gemspec')}/version", __FILE__) + +Gem::Specification.new do |s| + s.name = 'ffi' + s.version = FFI::VERSION + s.author = 'Wayne Meissner' + s.email = 'wmeissner@gmail.com' + s.homepage = 'https://github.com/ffi/ffi/wiki' + s.summary = 'Ruby FFI' + s.description = 'Ruby FFI library' + if s.respond_to?(:metadata) + s.metadata['bug_tracker_uri'] = 'https://github.com/ffi/ffi/issues' + s.metadata['changelog_uri'] = 'https://github.com/ffi/ffi/blob/master/CHANGELOG.md' + s.metadata['documentation_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['wiki_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['source_code_uri'] = 'https://github.com/ffi/ffi/' + s.metadata['mailing_list_uri'] = 'http://groups.google.com/group/ruby-ffi' + end + s.files = `git ls-files -z`.split("\x0").reject do |f| + f =~ /^(bench|gen|libtest|nbproject|spec)/ + end + + # Add libffi git files + lfs = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + # Add autoconf generated files of libffi + lfs += %w[ configure config.guess config.sub install-sh ltmain.sh missing fficonfig.h.in ] + # Add automake generated files of libffi + lfs += `git --git-dir ext/ffi_c/libffi/.git ls-files -z *.am */*.am`.gsub(".am\0", ".in\0").split("\x0") + s.files += lfs.map do |f| + File.join("ext/ffi_c/libffi", f) + end + + s.extensions << 'ext/ffi_c/extconf.rb' + s.rdoc_options = %w[--exclude=ext/ffi_c/.*\.o$ --exclude=ffi_c\.(bundle|so)$] + s.license = 'BSD-3-Clause' + s.require_paths << 'ext/ffi_c' + s.required_ruby_version = '>= 2.3' + s.add_development_dependency 'rake', '~> 13.0' + s.add_development_dependency 'rake-compiler', '~> 1.0' + s.add_development_dependency 'rake-compiler-dock', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.14.1' + s.add_development_dependency 'rubygems-tasks', "~> 0.2.4" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi.gemspec b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi.gemspec new file mode 100644 index 0000000..48964f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/ffi.gemspec @@ -0,0 +1,43 @@ +require File.expand_path("../lib/#{File.basename(__FILE__, '.gemspec')}/version", __FILE__) + +Gem::Specification.new do |s| + s.name = 'ffi' + s.version = FFI::VERSION + s.author = 'Wayne Meissner' + s.email = 'wmeissner@gmail.com' + s.homepage = 'https://github.com/ffi/ffi/wiki' + s.summary = 'Ruby FFI' + s.description = 'Ruby FFI library' + if s.respond_to?(:metadata) + s.metadata['bug_tracker_uri'] = 'https://github.com/ffi/ffi/issues' + s.metadata['changelog_uri'] = 'https://github.com/ffi/ffi/blob/master/CHANGELOG.md' + s.metadata['documentation_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['wiki_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['source_code_uri'] = 'https://github.com/ffi/ffi/' + s.metadata['mailing_list_uri'] = 'http://groups.google.com/group/ruby-ffi' + end + s.files = `git ls-files -z`.split("\x0").reject do |f| + f =~ /^(bench|gen|libtest|nbproject|spec)/ + end + + # Add libffi git files + lfs = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + # Add autoconf generated files of libffi + lfs += %w[ configure config.guess config.sub install-sh ltmain.sh missing fficonfig.h.in ] + # Add automake generated files of libffi + lfs += `git --git-dir ext/ffi_c/libffi/.git ls-files -z *.am */*.am`.gsub(".am\0", ".in\0").split("\x0") + s.files += lfs.map do |f| + File.join("ext/ffi_c/libffi", f) + end + + s.extensions << 'ext/ffi_c/extconf.rb' + s.rdoc_options = %w[--exclude=ext/ffi_c/.*\.o$ --exclude=ffi_c\.(bundle|so)$] + s.license = 'BSD-3-Clause' + s.require_paths << 'ext/ffi_c' + s.required_ruby_version = '>= 2.3' + s.add_development_dependency 'rake', '~> 13.0' + s.add_development_dependency 'rake-compiler', '~> 1.0' + s.add_development_dependency 'rake-compiler-dock', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.14.1' + s.add_development_dependency 'rubygems-tasks', "~> 0.2.4" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi 2.rb new file mode 100644 index 0000000..e6c75a7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi 2.rb @@ -0,0 +1,28 @@ +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + Object.send(:remove_const, :FFI) if defined?(::FFI) + begin + require RUBY_VERSION.split('.')[0, 2].join('.') + '/ffi_c' + rescue Exception + require 'ffi_c' + end + + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'jruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("9.3.pre") + JRuby::Util.load_ext("org.jruby.ext.ffi.FFIService") + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'truffleruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("20.1.0-dev-a") + require 'truffleruby/ffi_backend' + require 'ffi/ffi' + +else + # Remove the ffi gem dir from the load path, then reload the internal ffi implementation + $LOAD_PATH.delete(File.dirname(__FILE__)) + $LOAD_PATH.delete(File.join(File.dirname(__FILE__), 'ffi')) + unless $LOADED_FEATURES.nil? + $LOADED_FEATURES.delete(__FILE__) + $LOADED_FEATURES.delete('ffi.rb') + end + require 'ffi.rb' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi.rb new file mode 100644 index 0000000..e6c75a7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi.rb @@ -0,0 +1,28 @@ +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + Object.send(:remove_const, :FFI) if defined?(::FFI) + begin + require RUBY_VERSION.split('.')[0, 2].join('.') + '/ffi_c' + rescue Exception + require 'ffi_c' + end + + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'jruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("9.3.pre") + JRuby::Util.load_ext("org.jruby.ext.ffi.FFIService") + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'truffleruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("20.1.0-dev-a") + require 'truffleruby/ffi_backend' + require 'ffi/ffi' + +else + # Remove the ffi gem dir from the load path, then reload the internal ffi implementation + $LOAD_PATH.delete(File.dirname(__FILE__)) + $LOAD_PATH.delete(File.join(File.dirname(__FILE__), 'ffi')) + unless $LOADED_FEATURES.nil? + $LOADED_FEATURES.delete(__FILE__) + $LOADED_FEATURES.delete('ffi.rb') + end + require 'ffi.rb' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer 2.rb new file mode 100644 index 0000000..889a3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer 2.rb @@ -0,0 +1,203 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + class AutoPointer < Pointer + extend DataConverter + + # @overload initialize(pointer, method) + # @param pointer [Pointer] + # @param method [Method] + # @return [self] + # The passed Method will be invoked at GC time. + # @overload initialize(pointer, proc) + # @param pointer [Pointer] + # @return [self] + # The passed Proc will be invoked at GC time (SEE WARNING BELOW!) + # @note WARNING: passing a proc _may_ cause your pointer to never be + # GC'd, unless you're careful to avoid trapping a reference to the + # pointer in the proc. See the test specs for examples. + # @overload initialize(pointer) { |p| ... } + # @param pointer [Pointer] + # @yieldparam [Pointer] p +pointer+ passed to the block + # @return [self] + # The passed block will be invoked at GC time. + # @note + # WARNING: passing a block will cause your pointer to never be GC'd. + # This is bad. + # @overload initialize(pointer) + # @param pointer [Pointer] + # @return [self] + # The pointer's release() class method will be invoked at GC time. + # + # @note The safest, and therefore preferred, calling + # idiom is to pass a Method as the second parameter. Example usage: + # + # class PointerHelper + # def self.release(pointer) + # ... + # end + # end + # + # p = AutoPointer.new(other_pointer, PointerHelper.method(:release)) + # + # The above code will cause PointerHelper#release to be invoked at GC time. + # + # @note + # The last calling idiom (only one parameter) is generally only + # going to be useful if you subclass {AutoPointer}, and override + # #release, which by default does nothing. + def initialize(ptr, proc=nil, &block) + super(ptr.type_size, ptr) + raise TypeError, "Invalid pointer" if ptr.nil? || !ptr.kind_of?(Pointer) \ + || ptr.kind_of?(MemoryPointer) || ptr.kind_of?(AutoPointer) + + @releaser = if proc + if not proc.respond_to?(:call) + raise RuntimeError.new("proc must be callable") + end + CallableReleaser.new(ptr, proc) + + else + if not self.class.respond_to?(:release) + raise RuntimeError.new("no release method defined") + end + DefaultReleaser.new(ptr, self.class) + end + + ObjectSpace.define_finalizer(self, @releaser) + self + end + + # @return [nil] + # Free the pointer. + def free + @releaser.free + end + + # @param [Boolean] autorelease + # @return [Boolean] +autorelease+ + # Set +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease=(autorelease) + @releaser.autorelease=(autorelease) + end + + # @return [Boolean] +autorelease+ + # Get +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease? + @releaser.autorelease + end + + # @abstract Base class for {AutoPointer}'s releasers. + # + # All subclasses of Releaser should define a +#release(ptr)+ method. + # A releaser is an object in charge of release an {AutoPointer}. + class Releaser + attr_accessor :autorelease + + # @param [Pointer] ptr + # @param [#call] proc + # @return [nil] + # A new instance of Releaser. + def initialize(ptr, proc) + @ptr = ptr + @proc = proc + @autorelease = true + end + + # @return [nil] + # Free pointer. + def free + if @ptr + release(@ptr) + @autorelease = false + @ptr = nil + @proc = nil + end + end + + # @param args + # Release pointer if +autorelease+ is set. + def call(*args) + release(@ptr) if @autorelease && @ptr + end + end + + # DefaultReleaser is a {Releaser} used when an {AutoPointer} is defined + # without Proc or Method. In this case, the pointer to release must be of + # a class derived from AutoPointer with a {release} class method. + class DefaultReleaser < Releaser + # @param [Pointer] ptr + # @return [nil] + # Release +ptr+ using the {release} class method of its class. + def release(ptr) + @proc.release(ptr) + end + end + + # CallableReleaser is a {Releaser} used when an {AutoPointer} is defined with a + # Proc or a Method. + class CallableReleaser < Releaser + # Release +ptr+ by using Proc or Method defined at +ptr+ + # {AutoPointer#initialize initialization}. + # + # @param [Pointer] ptr + # @return [nil] + def release(ptr) + @proc.call(ptr) + end + end + + # Return native type of AutoPointer. + # + # Override {DataConverter#native_type}. + # @return [Type::POINTER] + # @raise {RuntimeError} if class does not implement a +#release+ method + def self.native_type + if not self.respond_to?(:release) + raise RuntimeError.new("no release method defined for #{self.inspect}") + end + Type::POINTER + end + + # Create a new AutoPointer. + # + # Override {DataConverter#from_native}. + # @overload self.from_native(ptr, ctx) + # @param [Pointer] ptr + # @param ctx not used. Please set +nil+. + # @return [AutoPointer] + def self.from_native(val, ctx) + self.new(val) + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb new file mode 100644 index 0000000..889a3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb @@ -0,0 +1,203 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + class AutoPointer < Pointer + extend DataConverter + + # @overload initialize(pointer, method) + # @param pointer [Pointer] + # @param method [Method] + # @return [self] + # The passed Method will be invoked at GC time. + # @overload initialize(pointer, proc) + # @param pointer [Pointer] + # @return [self] + # The passed Proc will be invoked at GC time (SEE WARNING BELOW!) + # @note WARNING: passing a proc _may_ cause your pointer to never be + # GC'd, unless you're careful to avoid trapping a reference to the + # pointer in the proc. See the test specs for examples. + # @overload initialize(pointer) { |p| ... } + # @param pointer [Pointer] + # @yieldparam [Pointer] p +pointer+ passed to the block + # @return [self] + # The passed block will be invoked at GC time. + # @note + # WARNING: passing a block will cause your pointer to never be GC'd. + # This is bad. + # @overload initialize(pointer) + # @param pointer [Pointer] + # @return [self] + # The pointer's release() class method will be invoked at GC time. + # + # @note The safest, and therefore preferred, calling + # idiom is to pass a Method as the second parameter. Example usage: + # + # class PointerHelper + # def self.release(pointer) + # ... + # end + # end + # + # p = AutoPointer.new(other_pointer, PointerHelper.method(:release)) + # + # The above code will cause PointerHelper#release to be invoked at GC time. + # + # @note + # The last calling idiom (only one parameter) is generally only + # going to be useful if you subclass {AutoPointer}, and override + # #release, which by default does nothing. + def initialize(ptr, proc=nil, &block) + super(ptr.type_size, ptr) + raise TypeError, "Invalid pointer" if ptr.nil? || !ptr.kind_of?(Pointer) \ + || ptr.kind_of?(MemoryPointer) || ptr.kind_of?(AutoPointer) + + @releaser = if proc + if not proc.respond_to?(:call) + raise RuntimeError.new("proc must be callable") + end + CallableReleaser.new(ptr, proc) + + else + if not self.class.respond_to?(:release) + raise RuntimeError.new("no release method defined") + end + DefaultReleaser.new(ptr, self.class) + end + + ObjectSpace.define_finalizer(self, @releaser) + self + end + + # @return [nil] + # Free the pointer. + def free + @releaser.free + end + + # @param [Boolean] autorelease + # @return [Boolean] +autorelease+ + # Set +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease=(autorelease) + @releaser.autorelease=(autorelease) + end + + # @return [Boolean] +autorelease+ + # Get +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease? + @releaser.autorelease + end + + # @abstract Base class for {AutoPointer}'s releasers. + # + # All subclasses of Releaser should define a +#release(ptr)+ method. + # A releaser is an object in charge of release an {AutoPointer}. + class Releaser + attr_accessor :autorelease + + # @param [Pointer] ptr + # @param [#call] proc + # @return [nil] + # A new instance of Releaser. + def initialize(ptr, proc) + @ptr = ptr + @proc = proc + @autorelease = true + end + + # @return [nil] + # Free pointer. + def free + if @ptr + release(@ptr) + @autorelease = false + @ptr = nil + @proc = nil + end + end + + # @param args + # Release pointer if +autorelease+ is set. + def call(*args) + release(@ptr) if @autorelease && @ptr + end + end + + # DefaultReleaser is a {Releaser} used when an {AutoPointer} is defined + # without Proc or Method. In this case, the pointer to release must be of + # a class derived from AutoPointer with a {release} class method. + class DefaultReleaser < Releaser + # @param [Pointer] ptr + # @return [nil] + # Release +ptr+ using the {release} class method of its class. + def release(ptr) + @proc.release(ptr) + end + end + + # CallableReleaser is a {Releaser} used when an {AutoPointer} is defined with a + # Proc or a Method. + class CallableReleaser < Releaser + # Release +ptr+ by using Proc or Method defined at +ptr+ + # {AutoPointer#initialize initialization}. + # + # @param [Pointer] ptr + # @return [nil] + def release(ptr) + @proc.call(ptr) + end + end + + # Return native type of AutoPointer. + # + # Override {DataConverter#native_type}. + # @return [Type::POINTER] + # @raise {RuntimeError} if class does not implement a +#release+ method + def self.native_type + if not self.respond_to?(:release) + raise RuntimeError.new("no release method defined for #{self.inspect}") + end + Type::POINTER + end + + # Create a new AutoPointer. + # + # Override {DataConverter#from_native}. + # @overload self.from_native(ptr, ctx) + # @param [Pointer] ptr + # @param ctx not used. Please set +nil+. + # @return [AutoPointer] + def self.from_native(val, ctx) + self.new(val) + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer 2.rb new file mode 100644 index 0000000..449e45b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer 2.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/buffer' in user code +# diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer.rb new file mode 100644 index 0000000..449e45b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/buffer.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/buffer' in user code +# diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback 2.rb new file mode 100644 index 0000000..32d52f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback 2.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/callback' in user code +# diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback.rb new file mode 100644 index 0000000..32d52f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/callback.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/callback' in user code +# diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter 2.rb new file mode 100644 index 0000000..1527588 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter 2.rb @@ -0,0 +1,67 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This module is used to extend somes classes and give then a common API. + # + # Most of methods defined here must be overriden. + module DataConverter + # Get native type. + # + # @overload native_type(type) + # @param [String, Symbol, Type] type + # @return [Type] + # Get native type from +type+. + # + # @overload native_type + # @raise {NotImplementedError} This method must be overriden. + def native_type(type = nil) + if type + @native_type = FFI.find_type(type) + else + native_type = @native_type + unless native_type + raise NotImplementedError, 'native_type method not overridden and no native_type set' + end + native_type + end + end + + # Convert to a native type. + def to_native(value, ctx) + value + end + + # Convert from a native type. + def from_native(value, ctx) + value + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb new file mode 100644 index 0000000..1527588 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb @@ -0,0 +1,67 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This module is used to extend somes classes and give then a common API. + # + # Most of methods defined here must be overriden. + module DataConverter + # Get native type. + # + # @overload native_type(type) + # @param [String, Symbol, Type] type + # @return [Type] + # Get native type from +type+. + # + # @overload native_type + # @raise {NotImplementedError} This method must be overriden. + def native_type(type = nil) + if type + @native_type = FFI.find_type(type) + else + native_type = @native_type + unless native_type + raise NotImplementedError, 'native_type method not overridden and no native_type set' + end + native_type + end + end + + # Convert to a native type. + def to_native(value, ctx) + value + end + + # Convert from a native type. + def from_native(value, ctx) + value + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum 2.rb new file mode 100644 index 0000000..8fcb498 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum 2.rb @@ -0,0 +1,296 @@ +# +# Copyright (C) 2009, 2010 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # An instance of this class permits to manage {Enum}s. In fact, Enums is a collection of {Enum}s. + class Enums + + # @return [nil] + def initialize + @all_enums = Array.new + @tagged_enums = Hash.new + @symbol_map = Hash.new + end + + # @param [Enum] enum + # Add an {Enum} to the collection. + def <<(enum) + @all_enums << enum + @tagged_enums[enum.tag] = enum unless enum.tag.nil? + @symbol_map.merge!(enum.symbol_map) + end + + # @param query enum tag or part of an enum name + # @return [Enum] + # Find a {Enum} in collection. + def find(query) + if @tagged_enums.has_key?(query) + @tagged_enums[query] + else + @all_enums.detect { |enum| enum.symbols.include?(query) } + end + end + + # @param symbol a symbol to find in merge symbol maps of all enums. + # @return a symbol + def __map_symbol(symbol) + @symbol_map[symbol] + end + + end + + # Represents a C enum. + # + # For a C enum: + # enum fruits { + # apple, + # banana, + # orange, + # pineapple + # }; + # are defined this vocabulary: + # * a _symbol_ is a word from the enumeration (ie. _apple_, by example); + # * a _value_ is the value of a symbol in the enumeration (by example, apple has value _0_ and banana _1_). + class Enum + include DataConverter + + attr_reader :tag + attr_reader :native_type + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info + # @param [nil, Symbol] tag enum tag + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Enum + # @param [nil, Enumerable] info symbols and values for new Enum + # @param [nil, Symbol] tag name of new Enum + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate enum key" if @kv_map.has_key?(i) + @kv_map[i] = value + last_cst = i + value += 1 + when Integer + @kv_map[last_cst] = i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # @return [Array] enum symbol names + def symbols + @kv_map.keys + end + + # Get a symbol or a value from the enum. + # @overload [](query) + # Get enum value from symbol. + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get enum symbol from value. + # @param [Integer] query + # @return [Symbol] + def [](query) + case query + when Symbol + @kv_map[query] + when Integer + @vk_map[query] + end + end + alias find [] + + # Get the symbol map. + # @return [Hash] + def symbol_map + @kv_map + end + + alias to_h symbol_map + alias to_hash symbol_map + + # @param [Symbol, Integer, #to_int] val + # @param ctx unused + # @return [Integer] value of a enum symbol + def to_native(val, ctx) + @kv_map[val] || if val.is_a?(Integer) + val + elsif val.respond_to?(:to_int) + val.to_int + else + raise ArgumentError, "invalid enum value, #{val.inspect}" + end + end + + # @param val + # @return symbol name if it exists for +val+. + def from_native(val, ctx) + @vk_map[val] || val + end + end + + # Represents a C enum whose values are power of 2 + # + # @example + # enum { + # red = (1<<0), + # green = (1<<1), + # blue = (1<<2) + # } + # + # Contrary to classical enums, bitmask values are usually combined + # when used. + class Bitmask < Enum + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Bitmask + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate bitmask key" if @kv_map.has_key?(i) + @kv_map[i] = 1 << value + last_cst = i + value += 1 + when Integer + raise ArgumentError, "bitmask index should be positive" if i<0 + @kv_map[last_cst] = 1 << i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # Get a symbol list or a value from the bitmask + # @overload [](*query) + # Get bitmask value from symbol list + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get bitmaks value from symbol array + # @param [Array] query + # @return [Integer] + # @overload [](*query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Integer] query + # @return [Array] + # @overload [](query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Array] query + # @return [Array] + def [](*query) + flat_query = query.flatten + raise ArgumentError, "query should be homogeneous, #{query.inspect}" unless flat_query.all? { |o| o.is_a?(Symbol) } || flat_query.all? { |o| o.is_a?(Integer) || o.respond_to?(:to_int) } + case flat_query[0] + when Symbol + flat_query.inject(0) do |val, o| + v = @kv_map[o] + if v then val |= v else val end + end + when Integer, ->(o) { o.respond_to?(:to_int) } + val = flat_query.inject(0) { |mask, o| mask |= o.to_int } + @kv_map.select { |_, v| v & val != 0 }.keys + end + end + + # Get the native value of a bitmask + # @overload to_native(query, ctx) + # @param [Symbol, Integer, #to_int] query + # @param ctx unused + # @return [Integer] value of a bitmask + # @overload to_native(query, ctx) + # @param [Array] query + # @param ctx unused + # @return [Integer] value of a bitmask + def to_native(query, ctx) + return 0 if query.nil? + flat_query = [query].flatten + flat_query.inject(0) do |val, o| + case o + when Symbol + v = @kv_map[o] + raise ArgumentError, "invalid bitmask value, #{o.inspect}" unless v + val |= v + when Integer + val |= o + when ->(obj) { obj.respond_to?(:to_int) } + val |= o.to_int + else + raise ArgumentError, "invalid bitmask value, #{o.inspect}" + end + end + end + + # @param [Integer] val + # @param ctx unused + # @return [Array] list of symbol names corresponding to val, plus an optional remainder if some bits don't match any constant + def from_native(val, ctx) + list = @kv_map.select { |_, v| v & val != 0 }.keys + # If there are unmatch flags, + # return them in an integer, + # else information can be lost. + # Similar to Enum behavior. + remainder = val ^ list.inject(0) do |tmp, o| + v = @kv_map[o] + if v then tmp |= v else tmp end + end + list.push remainder unless remainder == 0 + return list + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum.rb new file mode 100644 index 0000000..8fcb498 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/enum.rb @@ -0,0 +1,296 @@ +# +# Copyright (C) 2009, 2010 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # An instance of this class permits to manage {Enum}s. In fact, Enums is a collection of {Enum}s. + class Enums + + # @return [nil] + def initialize + @all_enums = Array.new + @tagged_enums = Hash.new + @symbol_map = Hash.new + end + + # @param [Enum] enum + # Add an {Enum} to the collection. + def <<(enum) + @all_enums << enum + @tagged_enums[enum.tag] = enum unless enum.tag.nil? + @symbol_map.merge!(enum.symbol_map) + end + + # @param query enum tag or part of an enum name + # @return [Enum] + # Find a {Enum} in collection. + def find(query) + if @tagged_enums.has_key?(query) + @tagged_enums[query] + else + @all_enums.detect { |enum| enum.symbols.include?(query) } + end + end + + # @param symbol a symbol to find in merge symbol maps of all enums. + # @return a symbol + def __map_symbol(symbol) + @symbol_map[symbol] + end + + end + + # Represents a C enum. + # + # For a C enum: + # enum fruits { + # apple, + # banana, + # orange, + # pineapple + # }; + # are defined this vocabulary: + # * a _symbol_ is a word from the enumeration (ie. _apple_, by example); + # * a _value_ is the value of a symbol in the enumeration (by example, apple has value _0_ and banana _1_). + class Enum + include DataConverter + + attr_reader :tag + attr_reader :native_type + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info + # @param [nil, Symbol] tag enum tag + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Enum + # @param [nil, Enumerable] info symbols and values for new Enum + # @param [nil, Symbol] tag name of new Enum + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate enum key" if @kv_map.has_key?(i) + @kv_map[i] = value + last_cst = i + value += 1 + when Integer + @kv_map[last_cst] = i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # @return [Array] enum symbol names + def symbols + @kv_map.keys + end + + # Get a symbol or a value from the enum. + # @overload [](query) + # Get enum value from symbol. + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get enum symbol from value. + # @param [Integer] query + # @return [Symbol] + def [](query) + case query + when Symbol + @kv_map[query] + when Integer + @vk_map[query] + end + end + alias find [] + + # Get the symbol map. + # @return [Hash] + def symbol_map + @kv_map + end + + alias to_h symbol_map + alias to_hash symbol_map + + # @param [Symbol, Integer, #to_int] val + # @param ctx unused + # @return [Integer] value of a enum symbol + def to_native(val, ctx) + @kv_map[val] || if val.is_a?(Integer) + val + elsif val.respond_to?(:to_int) + val.to_int + else + raise ArgumentError, "invalid enum value, #{val.inspect}" + end + end + + # @param val + # @return symbol name if it exists for +val+. + def from_native(val, ctx) + @vk_map[val] || val + end + end + + # Represents a C enum whose values are power of 2 + # + # @example + # enum { + # red = (1<<0), + # green = (1<<1), + # blue = (1<<2) + # } + # + # Contrary to classical enums, bitmask values are usually combined + # when used. + class Bitmask < Enum + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Bitmask + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate bitmask key" if @kv_map.has_key?(i) + @kv_map[i] = 1 << value + last_cst = i + value += 1 + when Integer + raise ArgumentError, "bitmask index should be positive" if i<0 + @kv_map[last_cst] = 1 << i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # Get a symbol list or a value from the bitmask + # @overload [](*query) + # Get bitmask value from symbol list + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get bitmaks value from symbol array + # @param [Array] query + # @return [Integer] + # @overload [](*query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Integer] query + # @return [Array] + # @overload [](query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Array] query + # @return [Array] + def [](*query) + flat_query = query.flatten + raise ArgumentError, "query should be homogeneous, #{query.inspect}" unless flat_query.all? { |o| o.is_a?(Symbol) } || flat_query.all? { |o| o.is_a?(Integer) || o.respond_to?(:to_int) } + case flat_query[0] + when Symbol + flat_query.inject(0) do |val, o| + v = @kv_map[o] + if v then val |= v else val end + end + when Integer, ->(o) { o.respond_to?(:to_int) } + val = flat_query.inject(0) { |mask, o| mask |= o.to_int } + @kv_map.select { |_, v| v & val != 0 }.keys + end + end + + # Get the native value of a bitmask + # @overload to_native(query, ctx) + # @param [Symbol, Integer, #to_int] query + # @param ctx unused + # @return [Integer] value of a bitmask + # @overload to_native(query, ctx) + # @param [Array] query + # @param ctx unused + # @return [Integer] value of a bitmask + def to_native(query, ctx) + return 0 if query.nil? + flat_query = [query].flatten + flat_query.inject(0) do |val, o| + case o + when Symbol + v = @kv_map[o] + raise ArgumentError, "invalid bitmask value, #{o.inspect}" unless v + val |= v + when Integer + val |= o + when ->(obj) { obj.respond_to?(:to_int) } + val |= o.to_int + else + raise ArgumentError, "invalid bitmask value, #{o.inspect}" + end + end + end + + # @param [Integer] val + # @param ctx unused + # @return [Array] list of symbol names corresponding to val, plus an optional remainder if some bits don't match any constant + def from_native(val, ctx) + list = @kv_map.select { |_, v| v & val != 0 }.keys + # If there are unmatch flags, + # return them in an integer, + # else information can be lost. + # Similar to Enum behavior. + remainder = val ^ list.inject(0) do |tmp, o| + v = @kv_map[o] + if v then tmp |= v else tmp end + end + list.push remainder unless remainder == 0 + return list + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno 2.rb new file mode 100644 index 0000000..de82d89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno 2.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # @return (see FFI::LastError.error) + # @see FFI::LastError.error + def self.errno + FFI::LastError.error + end + # @param error (see FFI::LastError.error=) + # @return (see FFI::LastError.error=) + # @see FFI::LastError.error= + def self.errno=(error) + FFI::LastError.error = error + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno.rb new file mode 100644 index 0000000..de82d89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/errno.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # @return (see FFI::LastError.error) + # @see FFI::LastError.error + def self.errno + FFI::LastError.error + end + # @param error (see FFI::LastError.error=) + # @return (see FFI::LastError.error=) + # @see FFI::LastError.error= + def self.errno=(error) + FFI::LastError.error = error + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi 2.rb new file mode 100644 index 0000000..5201dae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi 2.rb @@ -0,0 +1,46 @@ +# +# Copyright (C) 2008-2010 JRuby project +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'ffi/platform' +require 'ffi/data_converter' +require 'ffi/types' +require 'ffi/library' +require 'ffi/errno' +require 'ffi/pointer' +require 'ffi/memorypointer' +require 'ffi/struct' +require 'ffi/union' +require 'ffi/managedstruct' +require 'ffi/callback' +require 'ffi/io' +require 'ffi/autopointer' +require 'ffi/variadic' +require 'ffi/enum' +require 'ffi/version' diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi.rb new file mode 100644 index 0000000..5201dae --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/ffi.rb @@ -0,0 +1,46 @@ +# +# Copyright (C) 2008-2010 JRuby project +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'ffi/platform' +require 'ffi/data_converter' +require 'ffi/types' +require 'ffi/library' +require 'ffi/errno' +require 'ffi/pointer' +require 'ffi/memorypointer' +require 'ffi/struct' +require 'ffi/union' +require 'ffi/managedstruct' +require 'ffi/callback' +require 'ffi/io' +require 'ffi/autopointer' +require 'ffi/variadic' +require 'ffi/enum' +require 'ffi/version' diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io 2.rb new file mode 100644 index 0000000..7fa1cf7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io 2.rb @@ -0,0 +1,62 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + + # This module implements a couple of class methods to play with IO. + module IO + # @param [Integer] fd file decriptor + # @param [String] mode mode string + # @return [::IO] + # Synonym for IO::for_fd. + def self.for_fd(fd, mode = "r") + ::IO.for_fd(fd, mode) + end + + # @param [#read] io io to read from + # @param [AbstractMemory] buf destination for data read from +io+ + # @param [nil, Numeric] len maximul number of bytes to read from +io+. If +nil+, + # read until end of file. + # @return [Numeric] length really read, in bytes + # + # A version of IO#read that reads data from an IO and put then into a native buffer. + # + # This will be optimized at some future time to eliminate the double copy. + # + def self.native_read(io, buf, len) + tmp = io.read(len) + return -1 unless tmp + buf.put_bytes(0, tmp) + tmp.length + end + + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io.rb new file mode 100644 index 0000000..7fa1cf7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/io.rb @@ -0,0 +1,62 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + + # This module implements a couple of class methods to play with IO. + module IO + # @param [Integer] fd file decriptor + # @param [String] mode mode string + # @return [::IO] + # Synonym for IO::for_fd. + def self.for_fd(fd, mode = "r") + ::IO.for_fd(fd, mode) + end + + # @param [#read] io io to read from + # @param [AbstractMemory] buf destination for data read from +io+ + # @param [nil, Numeric] len maximul number of bytes to read from +io+. If +nil+, + # read until end of file. + # @return [Numeric] length really read, in bytes + # + # A version of IO#read that reads data from an IO and put then into a native buffer. + # + # This will be optimized at some future time to eliminate the double copy. + # + def self.native_read(io, buf, len) + tmp = io.read(len) + return -1 unless tmp + buf.put_bytes(0, tmp) + tmp.length + end + + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library 2.rb new file mode 100644 index 0000000..1a96c8e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library 2.rb @@ -0,0 +1,592 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + CURRENT_PROCESS = USE_THIS_PROCESS_AS_LIBRARY = Object.new + + # @param [#to_s] lib library name + # @return [String] library name formatted for current platform + # Transform a generic library name to a platform library name + # @example + # # Linux + # FFI.map_library_name 'c' # -> "libc.so.6" + # FFI.map_library_name 'jpeg' # -> "libjpeg.so" + # # Windows + # FFI.map_library_name 'c' # -> "msvcrt.dll" + # FFI.map_library_name 'jpeg' # -> "jpeg.dll" + def self.map_library_name(lib) + # Mangle the library name to reflect the native library naming conventions + lib = Library::LIBC if lib == 'c' + + if lib && File.basename(lib) == lib + lib = Platform::LIBPREFIX + lib unless lib =~ /^#{Platform::LIBPREFIX}/ + r = Platform::IS_WINDOWS || Platform::IS_MAC ? "\\.#{Platform::LIBSUFFIX}$" : "\\.so($|\\.[1234567890]+)" + lib += ".#{Platform::LIBSUFFIX}" unless lib =~ /#{r}/ + end + + lib + end + + # Exception raised when a function is not found in libraries + class NotFoundError < LoadError + def initialize(function, *libraries) + super("Function '#{function}' not found in [#{libraries[0].nil? ? 'current process' : libraries.join(", ")}]") + end + end + + # This module is the base to use native functions. + # + # A basic usage may be: + # require 'ffi' + # + # module Hello + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function 'puts', [ :string ], :int + # end + # + # Hello.puts("Hello, World") + # + # + module Library + CURRENT_PROCESS = FFI::CURRENT_PROCESS + LIBC = FFI::Platform::LIBC + + # @param mod extended object + # @return [nil] + # @raise {RuntimeError} if +mod+ is not a Module + # Test if extended object is a Module. If not, raise RuntimeError. + def self.extended(mod) + raise RuntimeError.new("must only be extended by module") unless mod.kind_of?(Module) + end + + + # @param [Array] names names of libraries to load + # @return [Array] + # @raise {LoadError} if a library cannot be opened + # Load native libraries. + def ffi_lib(*names) + raise LoadError.new("library names list must not be empty") if names.empty? + + lib_flags = defined?(@ffi_lib_flags) ? @ffi_lib_flags : FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL + ffi_libs = names.map do |name| + + if name == FFI::CURRENT_PROCESS + FFI::DynamicLibrary.open(nil, FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL) + + else + libnames = (name.is_a?(::Array) ? name : [ name ]).map(&:to_s).map { |n| [ n, FFI.map_library_name(n) ].uniq }.flatten.compact + lib = nil + errors = {} + + libnames.each do |libname| + begin + orig = libname + lib = FFI::DynamicLibrary.open(libname, lib_flags) + break if lib + + rescue Exception => ex + ldscript = false + if ex.message =~ /(([^ \t()])+\.so([^ \t:()])*):([ \t])*(invalid ELF header|file too short|invalid file format)/ + if File.read($1) =~ /(?:GROUP|INPUT) *\( *([^ \)]+)/ + libname = $1 + ldscript = true + end + end + + if ldscript + retry + else + # TODO better library lookup logic + unless libname.start_with?("/") || FFI::Platform.windows? + path = ['/usr/lib/','/usr/local/lib/','/opt/local/lib/'].find do |pth| + File.exist?(pth + libname) + end + if path + libname = path + libname + retry + end + end + + libr = (orig == libname ? orig : "#{orig} #{libname}") + errors[libr] = ex + end + end + end + + if lib.nil? + raise LoadError.new(errors.values.join(".\n")) + end + + # return the found lib + lib + end + end + + @ffi_libs = ffi_libs + end + + # Set the calling convention for {#attach_function} and {#callback} + # + # @see http://en.wikipedia.org/wiki/Stdcall#stdcall + # @note +:stdcall+ is typically used for attaching Windows API functions + # + # @param [Symbol] convention one of +:default+, +:stdcall+ + # @return [Symbol] the new calling convention + def ffi_convention(convention = nil) + @ffi_convention ||= :default + @ffi_convention = convention if convention + @ffi_convention + end + + # @see #ffi_lib + # @return [Array] array of currently loaded FFI libraries + # @raise [LoadError] if no libraries have been loaded (using {#ffi_lib}) + # Get FFI libraries loaded using {#ffi_lib}. + def ffi_libraries + raise LoadError.new("no library specified") if !defined?(@ffi_libs) || @ffi_libs.empty? + @ffi_libs + end + + # Flags used in {#ffi_lib}. + # + # This map allows you to supply symbols to {#ffi_lib_flags} instead of + # the actual constants. + FlagsMap = { + :global => DynamicLibrary::RTLD_GLOBAL, + :local => DynamicLibrary::RTLD_LOCAL, + :lazy => DynamicLibrary::RTLD_LAZY, + :now => DynamicLibrary::RTLD_NOW + } + + # Sets library flags for {#ffi_lib}. + # + # @example + # ffi_lib_flags(:lazy, :local) # => 5 + # + # @param [Symbol, …] flags (see {FlagsMap}) + # @return [Fixnum] the new value + def ffi_lib_flags(*flags) + @ffi_lib_flags = flags.inject(0) { |result, f| result | FlagsMap[f] } + end + + + ## + # @overload attach_function(func, args, returns, options = {}) + # @example attach function without an explicit name + # module Foo + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :malloc, [:size_t], :pointer + # end + # # now callable via Foo.malloc + # @overload attach_function(name, func, args, returns, options = {}) + # @example attach function with an explicit name + # module Bar + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :c_malloc, :malloc, [:size_t], :pointer + # end + # # now callable via Bar.c_malloc + # + # Attach C function +func+ to this module. + # + # + # @param [#to_s] name name of ruby method to attach as + # @param [#to_s] func name of C function to attach + # @param [Array] args an array of types + # @param [Symbol] returns type of return value + # @option options [Boolean] :blocking (@blocking) set to true if the C function is a blocking call + # @option options [Symbol] :convention (:default) calling convention (see {#ffi_convention}) + # @option options [FFI::Enums] :enums + # @option options [Hash] :type_map + # + # @return [FFI::VariadicInvoker] + # + # @raise [FFI::NotFoundError] if +func+ cannot be found in the attached libraries (see {#ffi_lib}) + def attach_function(name, func, args, returns = nil, options = nil) + mname, a2, a3, a4, a5 = name, func, args, returns, options + cname, arg_types, ret_type, opts = (a4 && (a2.is_a?(String) || a2.is_a?(Symbol))) ? [ a2, a3, a4, a5 ] : [ mname.to_s, a2, a3, a4 ] + + # Convert :foo to the native type + arg_types = arg_types.map { |e| find_type(e) } + options = { + :convention => ffi_convention, + :type_map => defined?(@ffi_typedefs) ? @ffi_typedefs : nil, + :blocking => defined?(@blocking) && @blocking, + :enums => defined?(@ffi_enums) ? @ffi_enums : nil, + } + + @blocking = false + options.merge!(opts) if opts && opts.is_a?(Hash) + + # Try to locate the function in any of the libraries + invokers = [] + ffi_libraries.each do |lib| + if invokers.empty? + begin + function = nil + function_names(cname, arg_types).find do |fname| + function = lib.find_function(fname) + end + raise LoadError unless function + + invokers << if arg_types.length > 0 && arg_types[arg_types.length - 1] == FFI::NativeType::VARARGS + VariadicInvoker.new(function, arg_types, find_type(ret_type), options) + + else + Function.new(find_type(ret_type), arg_types, function, options) + end + + rescue LoadError + end + end + end + invoker = invokers.compact.shift + raise FFI::NotFoundError.new(cname.to_s, ffi_libraries.map { |lib| lib.name }) unless invoker + + invoker.attach(self, mname.to_s) + invoker + end + + # @param [#to_s] name function name + # @param [Array] arg_types function's argument types + # @return [Array] + # This function returns a list of possible names to lookup. + # @note Function names on windows may be decorated if they are using stdcall. See + # * http://en.wikipedia.org/wiki/Name_mangling#C_name_decoration_in_Microsoft_Windows + # * http://msdn.microsoft.com/en-us/library/zxk0tw93%28v=VS.100%29.aspx + # * http://en.wikibooks.org/wiki/X86_Disassembly/Calling_Conventions#STDCALL + # Note that decorated names can be overridden via def files. Also note that the + # windows api, although using, doesn't have decorated names. + def function_names(name, arg_types) + result = [name.to_s] + if ffi_convention == :stdcall + # Get the size of each parameter + size = arg_types.inject(0) do |mem, arg| + size = arg.size + # The size must be a multiple of 4 + size += (4 - size) % 4 + mem + size + end + + result << "_#{name.to_s}@#{size}" # win32 + result << "#{name.to_s}@#{size}" # win64 + end + result + end + + # @overload attach_variable(mname, cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [#to_s] cname name of C variable to attach + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :c_myvar, :myvar, :long + # end + # # now callable via Bar.c_myvar + # @overload attach_variable(cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :myvar, :long + # end + # # now callable via Bar.myvar + # @return [DynamicLibrary::Symbol] + # @raise {FFI::NotFoundError} if +cname+ cannot be found in libraries + # + # Attach C variable +cname+ to this module. + def attach_variable(mname, a1, a2 = nil) + cname, type = a2 ? [ a1, a2 ] : [ mname.to_s, a1 ] + address = nil + ffi_libraries.each do |lib| + begin + address = lib.find_variable(cname.to_s) + break unless address.nil? + rescue LoadError + end + end + + raise FFI::NotFoundError.new(cname, ffi_libraries) if address.nil? || address.null? + if type.is_a?(Class) && type < FFI::Struct + # If it is a global struct, just attach directly to the pointer + s = s = type.new(address) # Assigning twice to suppress unused variable warning + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname} + end + code + + else + sc = Class.new(FFI::Struct) + sc.layout :gvar, find_type(type) + s = sc.new(address) + # + # Attach to this module as mname/mname= + # + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname}[:gvar] + end + def self.#{mname}=(value) + @@ffi_gvar_#{mname}[:gvar] = value + end + code + + end + + address + end + + + # @overload callback(name, params, ret) + # @param name callback name to add to type map + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @overload callback(params, ret) + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @return [FFI::CallbackInfo] + def callback(*args) + raise ArgumentError, "wrong number of arguments" if args.length < 2 || args.length > 3 + name, params, ret = if args.length == 3 + args + else + [ nil, args[0], args[1] ] + end + + native_params = params.map { |e| find_type(e) } + raise ArgumentError, "callbacks cannot have variadic parameters" if native_params.include?(FFI::Type::VARARGS) + options = Hash.new + options[:convention] = ffi_convention + options[:enums] = @ffi_enums if defined?(@ffi_enums) + ret_type = find_type(ret) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + cb = FFI::CallbackInfo.new(ret_type, native_params, options) + + # Add to the symbol -> type map (unless there was no name) + unless name.nil? + typedef cb, name + end + + cb + end + + # Register or get an already registered type definition. + # + # To register a new type definition, +old+ should be a {FFI::Type}. +add+ + # is in this case the type definition. + # + # If +old+ is a {DataConverter}, a {Type::Mapped} is returned. + # + # If +old+ is +:enum+ + # * and +add+ is an +Array+, a call to {#enum} is made with +add+ as single parameter; + # * in others cases, +info+ is used to create a named enum. + # + # If +old+ is a key for type map, #typedef get +old+ type definition. + # + # @param [DataConverter, Symbol, Type] old + # @param [Symbol] add + # @param [Symbol] info + # @return [FFI::Enum, FFI::Type] + def typedef(old, add, info=nil) + @ffi_typedefs = Hash.new unless defined?(@ffi_typedefs) + + @ffi_typedefs[add] = if old.kind_of?(FFI::Type) + old + + elsif @ffi_typedefs.has_key?(old) + @ffi_typedefs[old] + + elsif old.is_a?(DataConverter) + FFI::Type::Mapped.new(old) + + elsif old == :enum + if add.kind_of?(Array) + self.enum(add) + else + self.enum(info, add) + end + + else + FFI.find_type(old) + end + end + + private + # Generic enum builder + # @param [Class] klass can be one of FFI::Enum or FFI::Bitmask + # @param args (see #enum or #bitmask) + def generic_enum(klass, *args) + native_type = args.first.kind_of?(FFI::Type) ? args.shift : nil + name, values = if args[0].kind_of?(Symbol) && args[1].kind_of?(Array) + [ args[0], args[1] ] + elsif args[0].kind_of?(Array) + [ nil, args[0] ] + else + [ nil, args ] + end + @ffi_enums = FFI::Enums.new unless defined?(@ffi_enums) + @ffi_enums << (e = native_type ? klass.new(native_type, values, name) : klass.new(values, name)) + + # If called with a name, add a typedef alias + typedef(e, name) if name + e + end + + public + # @overload enum(name, values) + # Create a named enum. + # @example + # enum :foo, [:zero, :one, :two] # named enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(*args) + # Create an unnamed enum. + # @example + # enum :zero, :one, :two # unnamed enum + # @param args values for enum + # @overload enum(values) + # Create an unnamed enum. + # @example + # enum [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [Array] values values for enum + # @overload enum(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :foo, [:zero, :one, :two] # named enum + # @param [FFI::Type] native_type native type for new enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(native_type, *args) + # Create an unnamed enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :zero, :one, :two # unnamed enum + # @param [FFI::Type] native_type native type for new enum + # @param args values for enum + # @overload enum(native_type, values) + # Create an unnamed enum and specify the native type. + # @example + # enum Type::UINT64, [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [FFI::Type] native_type native type for new enum + # @param [Array] values values for enum + # @return [FFI::Enum] + # Create a new {FFI::Enum}. + def enum(*args) + generic_enum(FFI::Enum, *args) + end + + # @overload bitmask(name, values) + # Create a named bitmask + # @example + # bitmask :foo, [:red, :green, :blue] # bits 0,1,2 are used + # bitmask :foo, [:red, :green, 5, :blue] # bits 0,5,6 are used + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(*args) + # Create an unamed bitmask + # @example + # bm = bitmask :red, :green, :blue # bits 0,1,2 are used + # bm = bitmask :red, :green, 5, blue # bits 0,5,6 are used + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(values) + # Create an unamed bitmask + # @example + # bm = bitmask [:red, :green, :blue] # bits 0,1,2 are used + # bm = bitmask [:red, :green, 5, blue] # bits 0,5,6 are used + # @param [Array] values for new bitmask + # @overload bitmask(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, :foo, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(native_type, *args) + # @example + # bitmask FFI::Type::UINT64, :red, :green, :blue + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(native_type, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Array] values for new bitmask + # @return [FFI::Bitmask] + # Create a new FFI::Bitmask + def bitmask(*args) + generic_enum(FFI::Bitmask, *args) + end + + # @param name + # @return [FFI::Enum] + # Find an enum by name. + def enum_type(name) + @ffi_enums.find(name) if defined?(@ffi_enums) + end + + # @param symbol + # @return [FFI::Enum] + # Find an enum by a symbol it contains. + def enum_value(symbol) + @ffi_enums.__map_symbol(symbol) + end + + # @param [DataConverter, Type, Struct, Symbol] t type to find + # @return [Type] + # Find a type definition. + def find_type(t) + if t.kind_of?(Type) + t + + elsif defined?(@ffi_typedefs) && @ffi_typedefs.has_key?(t) + @ffi_typedefs[t] + + elsif t.is_a?(Class) && t < Struct + Type::POINTER + + elsif t.is_a?(DataConverter) + # Add a typedef so next time the converter is used, it hits the cache + typedef Type::Mapped.new(t), t + + end || FFI.find_type(t) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library.rb new file mode 100644 index 0000000..1a96c8e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/library.rb @@ -0,0 +1,592 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + CURRENT_PROCESS = USE_THIS_PROCESS_AS_LIBRARY = Object.new + + # @param [#to_s] lib library name + # @return [String] library name formatted for current platform + # Transform a generic library name to a platform library name + # @example + # # Linux + # FFI.map_library_name 'c' # -> "libc.so.6" + # FFI.map_library_name 'jpeg' # -> "libjpeg.so" + # # Windows + # FFI.map_library_name 'c' # -> "msvcrt.dll" + # FFI.map_library_name 'jpeg' # -> "jpeg.dll" + def self.map_library_name(lib) + # Mangle the library name to reflect the native library naming conventions + lib = Library::LIBC if lib == 'c' + + if lib && File.basename(lib) == lib + lib = Platform::LIBPREFIX + lib unless lib =~ /^#{Platform::LIBPREFIX}/ + r = Platform::IS_WINDOWS || Platform::IS_MAC ? "\\.#{Platform::LIBSUFFIX}$" : "\\.so($|\\.[1234567890]+)" + lib += ".#{Platform::LIBSUFFIX}" unless lib =~ /#{r}/ + end + + lib + end + + # Exception raised when a function is not found in libraries + class NotFoundError < LoadError + def initialize(function, *libraries) + super("Function '#{function}' not found in [#{libraries[0].nil? ? 'current process' : libraries.join(", ")}]") + end + end + + # This module is the base to use native functions. + # + # A basic usage may be: + # require 'ffi' + # + # module Hello + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function 'puts', [ :string ], :int + # end + # + # Hello.puts("Hello, World") + # + # + module Library + CURRENT_PROCESS = FFI::CURRENT_PROCESS + LIBC = FFI::Platform::LIBC + + # @param mod extended object + # @return [nil] + # @raise {RuntimeError} if +mod+ is not a Module + # Test if extended object is a Module. If not, raise RuntimeError. + def self.extended(mod) + raise RuntimeError.new("must only be extended by module") unless mod.kind_of?(Module) + end + + + # @param [Array] names names of libraries to load + # @return [Array] + # @raise {LoadError} if a library cannot be opened + # Load native libraries. + def ffi_lib(*names) + raise LoadError.new("library names list must not be empty") if names.empty? + + lib_flags = defined?(@ffi_lib_flags) ? @ffi_lib_flags : FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL + ffi_libs = names.map do |name| + + if name == FFI::CURRENT_PROCESS + FFI::DynamicLibrary.open(nil, FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL) + + else + libnames = (name.is_a?(::Array) ? name : [ name ]).map(&:to_s).map { |n| [ n, FFI.map_library_name(n) ].uniq }.flatten.compact + lib = nil + errors = {} + + libnames.each do |libname| + begin + orig = libname + lib = FFI::DynamicLibrary.open(libname, lib_flags) + break if lib + + rescue Exception => ex + ldscript = false + if ex.message =~ /(([^ \t()])+\.so([^ \t:()])*):([ \t])*(invalid ELF header|file too short|invalid file format)/ + if File.read($1) =~ /(?:GROUP|INPUT) *\( *([^ \)]+)/ + libname = $1 + ldscript = true + end + end + + if ldscript + retry + else + # TODO better library lookup logic + unless libname.start_with?("/") || FFI::Platform.windows? + path = ['/usr/lib/','/usr/local/lib/','/opt/local/lib/'].find do |pth| + File.exist?(pth + libname) + end + if path + libname = path + libname + retry + end + end + + libr = (orig == libname ? orig : "#{orig} #{libname}") + errors[libr] = ex + end + end + end + + if lib.nil? + raise LoadError.new(errors.values.join(".\n")) + end + + # return the found lib + lib + end + end + + @ffi_libs = ffi_libs + end + + # Set the calling convention for {#attach_function} and {#callback} + # + # @see http://en.wikipedia.org/wiki/Stdcall#stdcall + # @note +:stdcall+ is typically used for attaching Windows API functions + # + # @param [Symbol] convention one of +:default+, +:stdcall+ + # @return [Symbol] the new calling convention + def ffi_convention(convention = nil) + @ffi_convention ||= :default + @ffi_convention = convention if convention + @ffi_convention + end + + # @see #ffi_lib + # @return [Array] array of currently loaded FFI libraries + # @raise [LoadError] if no libraries have been loaded (using {#ffi_lib}) + # Get FFI libraries loaded using {#ffi_lib}. + def ffi_libraries + raise LoadError.new("no library specified") if !defined?(@ffi_libs) || @ffi_libs.empty? + @ffi_libs + end + + # Flags used in {#ffi_lib}. + # + # This map allows you to supply symbols to {#ffi_lib_flags} instead of + # the actual constants. + FlagsMap = { + :global => DynamicLibrary::RTLD_GLOBAL, + :local => DynamicLibrary::RTLD_LOCAL, + :lazy => DynamicLibrary::RTLD_LAZY, + :now => DynamicLibrary::RTLD_NOW + } + + # Sets library flags for {#ffi_lib}. + # + # @example + # ffi_lib_flags(:lazy, :local) # => 5 + # + # @param [Symbol, …] flags (see {FlagsMap}) + # @return [Fixnum] the new value + def ffi_lib_flags(*flags) + @ffi_lib_flags = flags.inject(0) { |result, f| result | FlagsMap[f] } + end + + + ## + # @overload attach_function(func, args, returns, options = {}) + # @example attach function without an explicit name + # module Foo + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :malloc, [:size_t], :pointer + # end + # # now callable via Foo.malloc + # @overload attach_function(name, func, args, returns, options = {}) + # @example attach function with an explicit name + # module Bar + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :c_malloc, :malloc, [:size_t], :pointer + # end + # # now callable via Bar.c_malloc + # + # Attach C function +func+ to this module. + # + # + # @param [#to_s] name name of ruby method to attach as + # @param [#to_s] func name of C function to attach + # @param [Array] args an array of types + # @param [Symbol] returns type of return value + # @option options [Boolean] :blocking (@blocking) set to true if the C function is a blocking call + # @option options [Symbol] :convention (:default) calling convention (see {#ffi_convention}) + # @option options [FFI::Enums] :enums + # @option options [Hash] :type_map + # + # @return [FFI::VariadicInvoker] + # + # @raise [FFI::NotFoundError] if +func+ cannot be found in the attached libraries (see {#ffi_lib}) + def attach_function(name, func, args, returns = nil, options = nil) + mname, a2, a3, a4, a5 = name, func, args, returns, options + cname, arg_types, ret_type, opts = (a4 && (a2.is_a?(String) || a2.is_a?(Symbol))) ? [ a2, a3, a4, a5 ] : [ mname.to_s, a2, a3, a4 ] + + # Convert :foo to the native type + arg_types = arg_types.map { |e| find_type(e) } + options = { + :convention => ffi_convention, + :type_map => defined?(@ffi_typedefs) ? @ffi_typedefs : nil, + :blocking => defined?(@blocking) && @blocking, + :enums => defined?(@ffi_enums) ? @ffi_enums : nil, + } + + @blocking = false + options.merge!(opts) if opts && opts.is_a?(Hash) + + # Try to locate the function in any of the libraries + invokers = [] + ffi_libraries.each do |lib| + if invokers.empty? + begin + function = nil + function_names(cname, arg_types).find do |fname| + function = lib.find_function(fname) + end + raise LoadError unless function + + invokers << if arg_types.length > 0 && arg_types[arg_types.length - 1] == FFI::NativeType::VARARGS + VariadicInvoker.new(function, arg_types, find_type(ret_type), options) + + else + Function.new(find_type(ret_type), arg_types, function, options) + end + + rescue LoadError + end + end + end + invoker = invokers.compact.shift + raise FFI::NotFoundError.new(cname.to_s, ffi_libraries.map { |lib| lib.name }) unless invoker + + invoker.attach(self, mname.to_s) + invoker + end + + # @param [#to_s] name function name + # @param [Array] arg_types function's argument types + # @return [Array] + # This function returns a list of possible names to lookup. + # @note Function names on windows may be decorated if they are using stdcall. See + # * http://en.wikipedia.org/wiki/Name_mangling#C_name_decoration_in_Microsoft_Windows + # * http://msdn.microsoft.com/en-us/library/zxk0tw93%28v=VS.100%29.aspx + # * http://en.wikibooks.org/wiki/X86_Disassembly/Calling_Conventions#STDCALL + # Note that decorated names can be overridden via def files. Also note that the + # windows api, although using, doesn't have decorated names. + def function_names(name, arg_types) + result = [name.to_s] + if ffi_convention == :stdcall + # Get the size of each parameter + size = arg_types.inject(0) do |mem, arg| + size = arg.size + # The size must be a multiple of 4 + size += (4 - size) % 4 + mem + size + end + + result << "_#{name.to_s}@#{size}" # win32 + result << "#{name.to_s}@#{size}" # win64 + end + result + end + + # @overload attach_variable(mname, cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [#to_s] cname name of C variable to attach + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :c_myvar, :myvar, :long + # end + # # now callable via Bar.c_myvar + # @overload attach_variable(cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :myvar, :long + # end + # # now callable via Bar.myvar + # @return [DynamicLibrary::Symbol] + # @raise {FFI::NotFoundError} if +cname+ cannot be found in libraries + # + # Attach C variable +cname+ to this module. + def attach_variable(mname, a1, a2 = nil) + cname, type = a2 ? [ a1, a2 ] : [ mname.to_s, a1 ] + address = nil + ffi_libraries.each do |lib| + begin + address = lib.find_variable(cname.to_s) + break unless address.nil? + rescue LoadError + end + end + + raise FFI::NotFoundError.new(cname, ffi_libraries) if address.nil? || address.null? + if type.is_a?(Class) && type < FFI::Struct + # If it is a global struct, just attach directly to the pointer + s = s = type.new(address) # Assigning twice to suppress unused variable warning + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname} + end + code + + else + sc = Class.new(FFI::Struct) + sc.layout :gvar, find_type(type) + s = sc.new(address) + # + # Attach to this module as mname/mname= + # + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname}[:gvar] + end + def self.#{mname}=(value) + @@ffi_gvar_#{mname}[:gvar] = value + end + code + + end + + address + end + + + # @overload callback(name, params, ret) + # @param name callback name to add to type map + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @overload callback(params, ret) + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @return [FFI::CallbackInfo] + def callback(*args) + raise ArgumentError, "wrong number of arguments" if args.length < 2 || args.length > 3 + name, params, ret = if args.length == 3 + args + else + [ nil, args[0], args[1] ] + end + + native_params = params.map { |e| find_type(e) } + raise ArgumentError, "callbacks cannot have variadic parameters" if native_params.include?(FFI::Type::VARARGS) + options = Hash.new + options[:convention] = ffi_convention + options[:enums] = @ffi_enums if defined?(@ffi_enums) + ret_type = find_type(ret) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + cb = FFI::CallbackInfo.new(ret_type, native_params, options) + + # Add to the symbol -> type map (unless there was no name) + unless name.nil? + typedef cb, name + end + + cb + end + + # Register or get an already registered type definition. + # + # To register a new type definition, +old+ should be a {FFI::Type}. +add+ + # is in this case the type definition. + # + # If +old+ is a {DataConverter}, a {Type::Mapped} is returned. + # + # If +old+ is +:enum+ + # * and +add+ is an +Array+, a call to {#enum} is made with +add+ as single parameter; + # * in others cases, +info+ is used to create a named enum. + # + # If +old+ is a key for type map, #typedef get +old+ type definition. + # + # @param [DataConverter, Symbol, Type] old + # @param [Symbol] add + # @param [Symbol] info + # @return [FFI::Enum, FFI::Type] + def typedef(old, add, info=nil) + @ffi_typedefs = Hash.new unless defined?(@ffi_typedefs) + + @ffi_typedefs[add] = if old.kind_of?(FFI::Type) + old + + elsif @ffi_typedefs.has_key?(old) + @ffi_typedefs[old] + + elsif old.is_a?(DataConverter) + FFI::Type::Mapped.new(old) + + elsif old == :enum + if add.kind_of?(Array) + self.enum(add) + else + self.enum(info, add) + end + + else + FFI.find_type(old) + end + end + + private + # Generic enum builder + # @param [Class] klass can be one of FFI::Enum or FFI::Bitmask + # @param args (see #enum or #bitmask) + def generic_enum(klass, *args) + native_type = args.first.kind_of?(FFI::Type) ? args.shift : nil + name, values = if args[0].kind_of?(Symbol) && args[1].kind_of?(Array) + [ args[0], args[1] ] + elsif args[0].kind_of?(Array) + [ nil, args[0] ] + else + [ nil, args ] + end + @ffi_enums = FFI::Enums.new unless defined?(@ffi_enums) + @ffi_enums << (e = native_type ? klass.new(native_type, values, name) : klass.new(values, name)) + + # If called with a name, add a typedef alias + typedef(e, name) if name + e + end + + public + # @overload enum(name, values) + # Create a named enum. + # @example + # enum :foo, [:zero, :one, :two] # named enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(*args) + # Create an unnamed enum. + # @example + # enum :zero, :one, :two # unnamed enum + # @param args values for enum + # @overload enum(values) + # Create an unnamed enum. + # @example + # enum [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [Array] values values for enum + # @overload enum(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :foo, [:zero, :one, :two] # named enum + # @param [FFI::Type] native_type native type for new enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(native_type, *args) + # Create an unnamed enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :zero, :one, :two # unnamed enum + # @param [FFI::Type] native_type native type for new enum + # @param args values for enum + # @overload enum(native_type, values) + # Create an unnamed enum and specify the native type. + # @example + # enum Type::UINT64, [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [FFI::Type] native_type native type for new enum + # @param [Array] values values for enum + # @return [FFI::Enum] + # Create a new {FFI::Enum}. + def enum(*args) + generic_enum(FFI::Enum, *args) + end + + # @overload bitmask(name, values) + # Create a named bitmask + # @example + # bitmask :foo, [:red, :green, :blue] # bits 0,1,2 are used + # bitmask :foo, [:red, :green, 5, :blue] # bits 0,5,6 are used + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(*args) + # Create an unamed bitmask + # @example + # bm = bitmask :red, :green, :blue # bits 0,1,2 are used + # bm = bitmask :red, :green, 5, blue # bits 0,5,6 are used + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(values) + # Create an unamed bitmask + # @example + # bm = bitmask [:red, :green, :blue] # bits 0,1,2 are used + # bm = bitmask [:red, :green, 5, blue] # bits 0,5,6 are used + # @param [Array] values for new bitmask + # @overload bitmask(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, :foo, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(native_type, *args) + # @example + # bitmask FFI::Type::UINT64, :red, :green, :blue + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(native_type, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Array] values for new bitmask + # @return [FFI::Bitmask] + # Create a new FFI::Bitmask + def bitmask(*args) + generic_enum(FFI::Bitmask, *args) + end + + # @param name + # @return [FFI::Enum] + # Find an enum by name. + def enum_type(name) + @ffi_enums.find(name) if defined?(@ffi_enums) + end + + # @param symbol + # @return [FFI::Enum] + # Find an enum by a symbol it contains. + def enum_value(symbol) + @ffi_enums.__map_symbol(symbol) + end + + # @param [DataConverter, Type, Struct, Symbol] t type to find + # @return [Type] + # Find a type definition. + def find_type(t) + if t.kind_of?(Type) + t + + elsif defined?(@ffi_typedefs) && @ffi_typedefs.has_key?(t) + @ffi_typedefs[t] + + elsif t.is_a?(Class) && t < Struct + Type::POINTER + + elsif t.is_a?(DataConverter) + # Add a typedef so next time the converter is used, it hits the cache + typedef Type::Mapped.new(t), t + + end || FFI.find_type(t) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct 2.rb new file mode 100644 index 0000000..0536280 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct 2.rb @@ -0,0 +1,84 @@ +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + # + # FFI::ManagedStruct allows custom garbage-collection of your FFI::Structs. + # + # The typical use case would be when interacting with a library + # that has a nontrivial memory management design, such as a linked + # list or a binary tree. + # + # When the {Struct} instance is garbage collected, FFI::ManagedStruct will + # invoke the class's release() method during object finalization. + # + # @example Example usage: + # module MyLibrary + # ffi_lib "libmylibrary" + # attach_function :new_dlist, [], :pointer + # attach_function :destroy_dlist, [:pointer], :void + # end + # + # class DoublyLinkedList < FFI::ManagedStruct + # @@@ + # struct do |s| + # s.name 'struct dlist' + # s.include 'dlist.h' + # s.field :head, :pointer + # s.field :tail, :pointer + # end + # @@@ + # + # def self.release ptr + # MyLibrary.destroy_dlist(ptr) + # end + # end + # + # begin + # ptr = DoublyLinkedList.new(MyLibrary.new_dlist) + # # do something with the list + # end + # # struct is out of scope, and will be GC'd using DoublyLinkedList#release + # + # + class ManagedStruct < FFI::Struct + + # @overload initialize(pointer) + # @param [Pointer] pointer + # Create a new ManagedStruct which will invoke the class method #release on + # @overload initialize + # A new instance of FFI::ManagedStruct. + def initialize(pointer=nil) + raise NoMethodError, "release() not implemented for class #{self}" unless self.class.respond_to? :release + raise ArgumentError, "Must supply a pointer to memory for the Struct" unless pointer + super AutoPointer.new(pointer, self.class.method(:release)) + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb new file mode 100644 index 0000000..0536280 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb @@ -0,0 +1,84 @@ +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + # + # FFI::ManagedStruct allows custom garbage-collection of your FFI::Structs. + # + # The typical use case would be when interacting with a library + # that has a nontrivial memory management design, such as a linked + # list or a binary tree. + # + # When the {Struct} instance is garbage collected, FFI::ManagedStruct will + # invoke the class's release() method during object finalization. + # + # @example Example usage: + # module MyLibrary + # ffi_lib "libmylibrary" + # attach_function :new_dlist, [], :pointer + # attach_function :destroy_dlist, [:pointer], :void + # end + # + # class DoublyLinkedList < FFI::ManagedStruct + # @@@ + # struct do |s| + # s.name 'struct dlist' + # s.include 'dlist.h' + # s.field :head, :pointer + # s.field :tail, :pointer + # end + # @@@ + # + # def self.release ptr + # MyLibrary.destroy_dlist(ptr) + # end + # end + # + # begin + # ptr = DoublyLinkedList.new(MyLibrary.new_dlist) + # # do something with the list + # end + # # struct is out of scope, and will be GC'd using DoublyLinkedList#release + # + # + class ManagedStruct < FFI::Struct + + # @overload initialize(pointer) + # @param [Pointer] pointer + # Create a new ManagedStruct which will invoke the class method #release on + # @overload initialize + # A new instance of FFI::ManagedStruct. + def initialize(pointer=nil) + raise NoMethodError, "release() not implemented for class #{self}" unless self.class.respond_to? :release + raise ArgumentError, "Must supply a pointer to memory for the Struct" unless pointer + super AutoPointer.new(pointer, self.class.method(:release)) + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer 2.rb new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer 2.rb @@ -0,0 +1 @@ +# This class is now implemented in C diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb @@ -0,0 +1 @@ +# This class is now implemented in C diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform 2.rb new file mode 100644 index 0000000..ebc9d8b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform 2.rb @@ -0,0 +1,179 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +require 'rbconfig' +module FFI + class PlatformError < LoadError; end + + # This module defines different constants and class methods to play with + # various platforms. + module Platform + OS = case RbConfig::CONFIG['host_os'].downcase + when /linux/ + "linux" + when /darwin/ + "darwin" + when /freebsd/ + "freebsd" + when /netbsd/ + "netbsd" + when /openbsd/ + "openbsd" + when /dragonfly/ + "dragonflybsd" + when /sunos|solaris/ + "solaris" + when /mingw|mswin/ + "windows" + else + RbConfig::CONFIG['host_os'].downcase + end + + OSVERSION = RbConfig::CONFIG['host_os'].gsub(/[^\d]/, '').to_i + + CPU = RbConfig::CONFIG['host_cpu'] + + ARCH = case CPU.downcase + when /amd64|x86_64|x64/ + "x86_64" + when /i?86|x86|i86pc/ + "i386" + when /ppc64|powerpc64/ + "powerpc64" + when /ppc|powerpc/ + "powerpc" + when /sparcv9|sparc64/ + "sparcv9" + else + case RbConfig::CONFIG['host_cpu'] + when /^arm/ + "arm" + else + RbConfig::CONFIG['host_cpu'] + end + end + + private + # @param [String) os + # @return [Boolean] + # Test if current OS is +os+. + def self.is_os(os) + OS == os + end + + IS_GNU = defined?(GNU_LIBC) + IS_LINUX = is_os("linux") + IS_MAC = is_os("darwin") + IS_FREEBSD = is_os("freebsd") + IS_NETBSD = is_os("netbsd") + IS_OPENBSD = is_os("openbsd") + IS_DRAGONFLYBSD = is_os("dragonfly") + IS_SOLARIS = is_os("solaris") + IS_WINDOWS = is_os("windows") + IS_BSD = IS_MAC || IS_FREEBSD || IS_NETBSD || IS_OPENBSD || IS_DRAGONFLYBSD + + # Add the version for known ABI breaks + name_version = "12" if IS_FREEBSD && OSVERSION >= 12 # 64-bit inodes + + NAME = "#{ARCH}-#{OS}#{name_version}" + CONF_DIR = File.join(File.dirname(__FILE__), 'platform', NAME) + + public + + LIBPREFIX = case OS + when /windows|msys/ + '' + when /cygwin/ + 'cyg' + else + 'lib' + end + + LIBSUFFIX = case OS + when /darwin/ + 'dylib' + when /linux|bsd|solaris/ + 'so' + when /windows|cygwin|msys/ + 'dll' + else + # Punt and just assume a sane unix (i.e. anything but AIX) + 'so' + end + + LIBC = if IS_WINDOWS + if RbConfig::CONFIG['host_os'] =~ /mingw/i + RbConfig::CONFIG['RUBY_SO_NAME'].split('-')[-2] + '.dll' + else + "ucrtbase.dll" + end + elsif IS_GNU + GNU_LIBC + elsif OS == 'cygwin' + "cygwin1.dll" + elsif OS == 'msys' + # Not sure how msys 1.0 behaves, tested on MSYS2. + "msys-2.0.dll" + else + "#{LIBPREFIX}c.#{LIBSUFFIX}" + end + + # Test if current OS is a *BSD (include MAC) + # @return [Boolean] + def self.bsd? + IS_BSD + end + + # Test if current OS is Windows + # @return [Boolean] + def self.windows? + IS_WINDOWS + end + + # Test if current OS is Mac OS + # @return [Boolean] + def self.mac? + IS_MAC + end + + # Test if current OS is Solaris (Sun OS) + # @return [Boolean] + def self.solaris? + IS_SOLARIS + end + + # Test if current OS is a unix OS + # @return [Boolean] + def self.unix? + !IS_WINDOWS + end + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform.rb new file mode 100644 index 0000000..ebc9d8b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform.rb @@ -0,0 +1,179 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +require 'rbconfig' +module FFI + class PlatformError < LoadError; end + + # This module defines different constants and class methods to play with + # various platforms. + module Platform + OS = case RbConfig::CONFIG['host_os'].downcase + when /linux/ + "linux" + when /darwin/ + "darwin" + when /freebsd/ + "freebsd" + when /netbsd/ + "netbsd" + when /openbsd/ + "openbsd" + when /dragonfly/ + "dragonflybsd" + when /sunos|solaris/ + "solaris" + when /mingw|mswin/ + "windows" + else + RbConfig::CONFIG['host_os'].downcase + end + + OSVERSION = RbConfig::CONFIG['host_os'].gsub(/[^\d]/, '').to_i + + CPU = RbConfig::CONFIG['host_cpu'] + + ARCH = case CPU.downcase + when /amd64|x86_64|x64/ + "x86_64" + when /i?86|x86|i86pc/ + "i386" + when /ppc64|powerpc64/ + "powerpc64" + when /ppc|powerpc/ + "powerpc" + when /sparcv9|sparc64/ + "sparcv9" + else + case RbConfig::CONFIG['host_cpu'] + when /^arm/ + "arm" + else + RbConfig::CONFIG['host_cpu'] + end + end + + private + # @param [String) os + # @return [Boolean] + # Test if current OS is +os+. + def self.is_os(os) + OS == os + end + + IS_GNU = defined?(GNU_LIBC) + IS_LINUX = is_os("linux") + IS_MAC = is_os("darwin") + IS_FREEBSD = is_os("freebsd") + IS_NETBSD = is_os("netbsd") + IS_OPENBSD = is_os("openbsd") + IS_DRAGONFLYBSD = is_os("dragonfly") + IS_SOLARIS = is_os("solaris") + IS_WINDOWS = is_os("windows") + IS_BSD = IS_MAC || IS_FREEBSD || IS_NETBSD || IS_OPENBSD || IS_DRAGONFLYBSD + + # Add the version for known ABI breaks + name_version = "12" if IS_FREEBSD && OSVERSION >= 12 # 64-bit inodes + + NAME = "#{ARCH}-#{OS}#{name_version}" + CONF_DIR = File.join(File.dirname(__FILE__), 'platform', NAME) + + public + + LIBPREFIX = case OS + when /windows|msys/ + '' + when /cygwin/ + 'cyg' + else + 'lib' + end + + LIBSUFFIX = case OS + when /darwin/ + 'dylib' + when /linux|bsd|solaris/ + 'so' + when /windows|cygwin|msys/ + 'dll' + else + # Punt and just assume a sane unix (i.e. anything but AIX) + 'so' + end + + LIBC = if IS_WINDOWS + if RbConfig::CONFIG['host_os'] =~ /mingw/i + RbConfig::CONFIG['RUBY_SO_NAME'].split('-')[-2] + '.dll' + else + "ucrtbase.dll" + end + elsif IS_GNU + GNU_LIBC + elsif OS == 'cygwin' + "cygwin1.dll" + elsif OS == 'msys' + # Not sure how msys 1.0 behaves, tested on MSYS2. + "msys-2.0.dll" + else + "#{LIBPREFIX}c.#{LIBSUFFIX}" + end + + # Test if current OS is a *BSD (include MAC) + # @return [Boolean] + def self.bsd? + IS_BSD + end + + # Test if current OS is Windows + # @return [Boolean] + def self.windows? + IS_WINDOWS + end + + # Test if current OS is Mac OS + # @return [Boolean] + def self.mac? + IS_MAC + end + + # Test if current OS is Solaris (Sun OS) + # @return [Boolean] + def self.solaris? + IS_SOLARIS + end + + # Test if current OS is a unix OS + # @return [Boolean] + def self.unix? + !IS_WINDOWS + end + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types 2.conf new file mode 100644 index 0000000..8255343 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf new file mode 100644 index 0000000..8255343 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types 2.conf new file mode 100644 index 0000000..c296932 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf new file mode 100644 index 0000000..c296932 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types 2.conf new file mode 100644 index 0000000..4cd2438 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf new file mode 100644 index 0000000..4cd2438 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types 2.conf new file mode 100644 index 0000000..cfbb90f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types 2.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf new file mode 100644 index 0000000..cfbb90f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types 2.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types 2.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types 2.conf new file mode 100644 index 0000000..a070d39 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types 2.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf new file mode 100644 index 0000000..a070d39 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types 2.conf new file mode 100644 index 0000000..93f6b86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types 2.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf new file mode 100644 index 0000000..93f6b86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types 2.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types 2.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types 2.conf new file mode 100644 index 0000000..6c882d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types 2.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf new file mode 100644 index 0000000..6c882d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types 2.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types 2.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types 2.conf new file mode 100644 index 0000000..fa2fa8c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types 2.conf @@ -0,0 +1,107 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsid_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__pthread_key = int +rbx.platform.typedef.__pthread_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__sigset_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.fsid_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.pthread_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf new file mode 100644 index 0000000..fa2fa8c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf @@ -0,0 +1,107 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsid_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__pthread_key = int +rbx.platform.typedef.__pthread_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__sigset_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.fsid_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.pthread_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types 2.conf new file mode 100644 index 0000000..feb6bc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types 2.conf @@ -0,0 +1,103 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf new file mode 100644 index 0000000..feb6bc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf @@ -0,0 +1,103 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types 2.conf new file mode 100644 index 0000000..a5aba89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types 2.conf @@ -0,0 +1,126 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf new file mode 100644 index 0000000..a5aba89 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf @@ -0,0 +1,126 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types 2.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types 2.conf new file mode 100644 index 0000000..22a2414 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types 2.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf new file mode 100644 index 0000000..22a2414 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types 2.conf new file mode 100644 index 0000000..a5d0b05 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types 2.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = int +rbx.platform.typedef._sigset_t = ulong +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rsize_t = uint +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf new file mode 100644 index 0000000..a5d0b05 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = int +rbx.platform.typedef._sigset_t = ulong +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rsize_t = uint +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types 2.conf new file mode 100644 index 0000000..e0eeecc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf new file mode 100644 index 0000000..e0eeecc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types 2.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types 2.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types 2.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types 2.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types 2.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types 2.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types 2.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types 2.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types 2.conf new file mode 100644 index 0000000..cbd20e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types 2.conf @@ -0,0 +1,180 @@ +rbx.platform.typedef.UTF32Char = uint +rbx.platform.typedef.UniChar = ushort +rbx.platform.typedef.__cptr32 = string +rbx.platform.typedef.__cptr64 = ulong_long +rbx.platform.typedef.__long32_t = long +rbx.platform.typedef.__long64_t = int +rbx.platform.typedef.__ptr32 = pointer +rbx.platform.typedef.__ptr64 = ulong_long +rbx.platform.typedef.__ulong32_t = ulong +rbx.platform.typedef.__ulong64_t = uint +rbx.platform.typedef.aptx_t = ushort +rbx.platform.typedef.blkcnt32_t = int +rbx.platform.typedef.blkcnt64_t = ulong_long +rbx.platform.typedef.blkcnt_t = int +rbx.platform.typedef.blksize32_t = int +rbx.platform.typedef.blksize64_t = ulong_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.boolean_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.chan_t = int +rbx.platform.typedef.class_id_t = uint +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = long_long +rbx.platform.typedef.cnt64_t = long_long +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.crid_t = int +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev32_t = uint +rbx.platform.typedef.dev64_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.esid_t = uint +rbx.platform.typedef.ext_t = int +rbx.platform.typedef.fpos64_t = long_long +rbx.platform.typedef.fpos_t = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino32_t = uint +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16 = short +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32 = int +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int32long64_t = int +rbx.platform.typedef.int64 = long_long +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8 = char +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intfast_t = int +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.krpn_t = int +rbx.platform.typedef.kvmhandle_t = ulong +rbx.platform.typedef.kvmid_t = long +rbx.platform.typedef.kvpn_t = int +rbx.platform.typedef.level_t = int +rbx.platform.typedef.liobn_t = uint +rbx.platform.typedef.long32int64_t = long +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.mid_t = pointer +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.mtyp_t = long +rbx.platform.typedef.nlink_t = short +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.paddr_t = long +rbx.platform.typedef.pdtx_t = int +rbx.platform.typedef.pid32_t = int +rbx.platform.typedef.pid64_t = ulong_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pshift_t = ushort +rbx.platform.typedef.psize_t = long_long +rbx.platform.typedef.psx_t = short +rbx.platform.typedef.ptex_t = ulong +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.rpn64_t = long_long +rbx.platform.typedef.rpn_t = int +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.signal_t = int +rbx.platform.typedef.size64_t = ulong_long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.slab_t[12] = char +rbx.platform.typedef.snidx_t = int +rbx.platform.typedef.socklen_t = ulong +rbx.platform.typedef.soff_t = int +rbx.platform.typedef.sshift_t = ushort +rbx.platform.typedef.ssize64_t = long_long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.swhatx_t = ulong +rbx.platform.typedef.tid32_t = int +rbx.platform.typedef.tid64_t = ulong_long +rbx.platform.typedef.tid_t = int +rbx.platform.typedef.time32_t = int +rbx.platform.typedef.time64_t = long_long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer32_t = int +rbx.platform.typedef.timer64_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16 = ushort +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32 = uint +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64 = ulong_long +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8 = uchar +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar = uchar +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint32long64_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintfast_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong32int64_t = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unidx_t = int +rbx.platform.typedef.unit_addr_t = ulong_long +rbx.platform.typedef.ureg_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.va_list = string +rbx.platform.typedef.vmfkey_t = uint +rbx.platform.typedef.vmhandle32_t = uint +rbx.platform.typedef.vmhandle_t = ulong +rbx.platform.typedef.vmhwkey_t = int +rbx.platform.typedef.vmid32_t = int +rbx.platform.typedef.vmid64_t = long_long +rbx.platform.typedef.vmid_t = long +rbx.platform.typedef.vmidx_t = int +rbx.platform.typedef.vmkey_t = int +rbx.platform.typedef.vmlpghandle_t = ulong +rbx.platform.typedef.vmm_lock_t = int +rbx.platform.typedef.vmnodeidx_t = int +rbx.platform.typedef.vmprkey_t = uint +rbx.platform.typedef.vmsize_t = int +rbx.platform.typedef.vpn_t = int +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = uint +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf new file mode 100644 index 0000000..cbd20e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf @@ -0,0 +1,180 @@ +rbx.platform.typedef.UTF32Char = uint +rbx.platform.typedef.UniChar = ushort +rbx.platform.typedef.__cptr32 = string +rbx.platform.typedef.__cptr64 = ulong_long +rbx.platform.typedef.__long32_t = long +rbx.platform.typedef.__long64_t = int +rbx.platform.typedef.__ptr32 = pointer +rbx.platform.typedef.__ptr64 = ulong_long +rbx.platform.typedef.__ulong32_t = ulong +rbx.platform.typedef.__ulong64_t = uint +rbx.platform.typedef.aptx_t = ushort +rbx.platform.typedef.blkcnt32_t = int +rbx.platform.typedef.blkcnt64_t = ulong_long +rbx.platform.typedef.blkcnt_t = int +rbx.platform.typedef.blksize32_t = int +rbx.platform.typedef.blksize64_t = ulong_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.boolean_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.chan_t = int +rbx.platform.typedef.class_id_t = uint +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = long_long +rbx.platform.typedef.cnt64_t = long_long +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.crid_t = int +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev32_t = uint +rbx.platform.typedef.dev64_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.esid_t = uint +rbx.platform.typedef.ext_t = int +rbx.platform.typedef.fpos64_t = long_long +rbx.platform.typedef.fpos_t = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino32_t = uint +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16 = short +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32 = int +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int32long64_t = int +rbx.platform.typedef.int64 = long_long +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8 = char +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intfast_t = int +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.krpn_t = int +rbx.platform.typedef.kvmhandle_t = ulong +rbx.platform.typedef.kvmid_t = long +rbx.platform.typedef.kvpn_t = int +rbx.platform.typedef.level_t = int +rbx.platform.typedef.liobn_t = uint +rbx.platform.typedef.long32int64_t = long +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.mid_t = pointer +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.mtyp_t = long +rbx.platform.typedef.nlink_t = short +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.paddr_t = long +rbx.platform.typedef.pdtx_t = int +rbx.platform.typedef.pid32_t = int +rbx.platform.typedef.pid64_t = ulong_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pshift_t = ushort +rbx.platform.typedef.psize_t = long_long +rbx.platform.typedef.psx_t = short +rbx.platform.typedef.ptex_t = ulong +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.rpn64_t = long_long +rbx.platform.typedef.rpn_t = int +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.signal_t = int +rbx.platform.typedef.size64_t = ulong_long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.slab_t[12] = char +rbx.platform.typedef.snidx_t = int +rbx.platform.typedef.socklen_t = ulong +rbx.platform.typedef.soff_t = int +rbx.platform.typedef.sshift_t = ushort +rbx.platform.typedef.ssize64_t = long_long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.swhatx_t = ulong +rbx.platform.typedef.tid32_t = int +rbx.platform.typedef.tid64_t = ulong_long +rbx.platform.typedef.tid_t = int +rbx.platform.typedef.time32_t = int +rbx.platform.typedef.time64_t = long_long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer32_t = int +rbx.platform.typedef.timer64_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16 = ushort +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32 = uint +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64 = ulong_long +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8 = uchar +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar = uchar +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint32long64_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintfast_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong32int64_t = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unidx_t = int +rbx.platform.typedef.unit_addr_t = ulong_long +rbx.platform.typedef.ureg_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.va_list = string +rbx.platform.typedef.vmfkey_t = uint +rbx.platform.typedef.vmhandle32_t = uint +rbx.platform.typedef.vmhandle_t = ulong +rbx.platform.typedef.vmhwkey_t = int +rbx.platform.typedef.vmid32_t = int +rbx.platform.typedef.vmid64_t = long_long +rbx.platform.typedef.vmid_t = long +rbx.platform.typedef.vmidx_t = int +rbx.platform.typedef.vmkey_t = int +rbx.platform.typedef.vmlpghandle_t = ulong +rbx.platform.typedef.vmm_lock_t = int +rbx.platform.typedef.vmnodeidx_t = int +rbx.platform.typedef.vmprkey_t = uint +rbx.platform.typedef.vmsize_t = int +rbx.platform.typedef.vpn_t = int +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = uint +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types 2.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types 2.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types 2.conf new file mode 100644 index 0000000..42eec13 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types 2.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf new file mode 100644 index 0000000..42eec13 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types 2.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types 2.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types 2.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types 2.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types 2.conf new file mode 100644 index 0000000..291b032 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf new file mode 100644 index 0000000..291b032 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types 2.conf new file mode 100644 index 0000000..32a7feb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf new file mode 100644 index 0000000..32a7feb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types 2.conf new file mode 100644 index 0000000..aea09d3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf new file mode 100644 index 0000000..aea09d3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types 2.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types 2.conf new file mode 100644 index 0000000..7626bfc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types 2.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf new file mode 100644 index 0000000..7626bfc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types 2.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types 2.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types 2.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types 2.conf new file mode 100644 index 0000000..5bef6d7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types 2.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int64 +rbx.platform.typedef.size_t = uint64 +rbx.platform.typedef.ssize_t = int64 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf new file mode 100644 index 0000000..5bef6d7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int64 +rbx.platform.typedef.size_t = uint64 +rbx.platform.typedef.ssize_t = int64 diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types 2.conf new file mode 100644 index 0000000..68841bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types 2.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = long +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_string_t[37] = char +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sae_associd_t = uint +rbx.platform.typedef.sae_connid_t = uint +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_off_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf new file mode 100644 index 0000000..68841bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = long +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_string_t[37] = char +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sae_associd_t = uint +rbx.platform.typedef.sae_connid_t = uint +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_off_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types 2.conf new file mode 100644 index 0000000..889f6a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types 2.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intlp_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintlp_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_daddr_t = uint +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.v_caddr_t = pointer +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf new file mode 100644 index 0000000..889f6a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intlp_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintlp_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_daddr_t = uint +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.v_caddr_t = pointer +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types 2.conf new file mode 100644 index 0000000..8dbe370 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf new file mode 100644 index 0000000..8dbe370 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types 2.conf new file mode 100644 index 0000000..31b1073 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types 2.conf @@ -0,0 +1,158 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__char16_t = ushort +rbx.platform.typedef.__char32_t = uint +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = long +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__rman_res_t = ulong +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = long +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = ulong +rbx.platform.typedef.__vm_paddr_t = ulong +rbx.platform.typedef.__vm_size_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.cap_ioctl_t = ulong +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.ksize_t = ulong +rbx.platform.typedef.kvaddr_t = ulong +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.rman_res_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_ooffset_t = long +rbx.platform.typedef.vm_paddr_t = ulong +rbx.platform.typedef.vm_pindex_t = ulong +rbx.platform.typedef.vm_size_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf new file mode 100644 index 0000000..31b1073 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf @@ -0,0 +1,158 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__char16_t = ushort +rbx.platform.typedef.__char32_t = uint +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = long +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__rman_res_t = ulong +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = long +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = ulong +rbx.platform.typedef.__vm_paddr_t = ulong +rbx.platform.typedef.__vm_size_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.cap_ioctl_t = ulong +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.ksize_t = ulong +rbx.platform.typedef.kvaddr_t = ulong +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.rman_res_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_ooffset_t = long +rbx.platform.typedef.vm_paddr_t = ulong +rbx.platform.typedef.vm_pindex_t = ulong +rbx.platform.typedef.vm_size_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types 2.conf new file mode 100644 index 0000000..060f161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types 2.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = long +rbx.platform.typedef.int_fast32_t = long +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ulong +rbx.platform.typedef.uint_fast32_t = ulong +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf new file mode 100644 index 0000000..060f161 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = long +rbx.platform.typedef.int_fast32_t = long +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ulong +rbx.platform.typedef.uint_fast32_t = ulong +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types 2.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types 2.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types 2.conf new file mode 100644 index 0000000..6abc9c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types 2.conf @@ -0,0 +1,134 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf new file mode 100644 index 0000000..6abc9c0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf @@ -0,0 +1,134 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types 2.conf new file mode 100644 index 0000000..a7890d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types 2.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = int +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt64_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = int +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = int +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = uint +rbx.platform.typedef.minor_t = uint +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.poolid_t = int +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = int +rbx.platform.typedef.t_uscalar_t = uint +rbx.platform.typedef.taskid_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf new file mode 100644 index 0000000..a7890d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = int +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt64_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = int +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = int +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = uint +rbx.platform.typedef.minor_t = uint +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.poolid_t = int +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = int +rbx.platform.typedef.t_uscalar_t = uint +rbx.platform.typedef.taskid_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = int diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types 2.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types 2.conf new file mode 100644 index 0000000..5bdbbde --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types 2.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = long_long +rbx.platform.typedef._sigset_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long_long +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = long_long +rbx.platform.typedef.ptrdiff_t = long_long +rbx.platform.typedef.rsize_t = ulong_long +rbx.platform.typedef.size_t = ulong_long +rbx.platform.typedef.ssize_t = long_long +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong_long +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf new file mode 100644 index 0000000..5bdbbde --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = long_long +rbx.platform.typedef._sigset_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long_long +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = long_long +rbx.platform.typedef.ptrdiff_t = long_long +rbx.platform.typedef.rsize_t = ulong_long +rbx.platform.typedef.size_t = ulong_long +rbx.platform.typedef.ssize_t = long_long +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong_long +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer 2.rb new file mode 100644 index 0000000..a5ee655 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer 2.rb @@ -0,0 +1,167 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' + +# NOTE: all method definitions in this file are conditional on +# whether they are not already defined. This is needed because +# some Ruby implementations (e.g., TruffleRuby) might already +# provide these methods due to using FFI internally, and we +# should not override them to avoid warnings. + +module FFI + class Pointer + + # Pointer size + SIZE = Platform::ADDRESS_SIZE / 8 unless const_defined?(:SIZE) + + # Return the size of a pointer on the current platform, in bytes + # @return [Numeric] + def self.size + SIZE + end unless respond_to?(:size) + + # @param [nil,Numeric] len length of string to return + # @return [String] + # Read pointer's contents as a string, or the first +len+ bytes of the + # equivalent string if +len+ is not +nil+. + def read_string(len=nil) + if len + return ''.b if len == 0 + get_bytes(0, len) + else + get_string(0) + end + end unless method_defined?(:read_string) + + # @param [Numeric] len length of string to return + # @return [String] + # Read the first +len+ bytes of pointer's contents as a string. + # + # Same as: + # ptr.read_string(len) # with len not nil + def read_string_length(len) + get_bytes(0, len) + end unless method_defined?(:read_string_length) + + # @return [String] + # Read pointer's contents as a string. + # + # Same as: + # ptr.read_string # with no len + def read_string_to_null + get_string(0) + end unless method_defined?(:read_string_to_null) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +len+ first bytes of +str+ in pointer's contents. + # + # Same as: + # ptr.write_string(str, len) # with len not nil + def write_string_length(str, len) + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string_length) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +str+ in pointer's contents, or first +len+ bytes if + # +len+ is not +nil+. + def write_string(str, len=nil) + len = str.bytesize unless len + # Write the string data without NUL termination + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string) + + # @param [Type] type type of data to read from pointer's contents + # @param [Symbol] reader method to send to +self+ to read +type+ + # @param [Numeric] length + # @return [Array] + # Read an array of +type+ of length +length+. + # @example + # ptr.read_array_of_type(TYPE_UINT8, :read_uint8, 4) # -> [1, 2, 3, 4] + def read_array_of_type(type, reader, length) + ary = [] + size = FFI.type_size(type) + tmp = self + length.times { |j| + ary << tmp.send(reader) + tmp += size unless j == length-1 # avoid OOB + } + ary + end unless method_defined?(:read_array_of_type) + + # @param [Type] type type of data to write to pointer's contents + # @param [Symbol] writer method to send to +self+ to write +type+ + # @param [Array] ary + # @return [self] + # Write +ary+ in pointer's contents as +type+. + # @example + # ptr.write_array_of_type(TYPE_UINT8, :put_uint8, [1, 2, 3 ,4]) + def write_array_of_type(type, writer, ary) + size = FFI.type_size(type) + ary.each_with_index { |val, i| + break unless i < self.size + self.send(writer, i * size, val) + } + self + end unless method_defined?(:write_array_of_type) + + # @return [self] + def to_ptr + self + end unless method_defined?(:to_ptr) + + # @param [Symbol,Type] type of data to read + # @return [Object] + # Read pointer's contents as +type+ + # + # Same as: + # ptr.get(type, 0) + def read(type) + get(type, 0) + end unless method_defined?(:read) + + # @param [Symbol,Type] type of data to read + # @param [Object] value to write + # @return [nil] + # Write +value+ of type +type+ to pointer's content + # + # Same as: + # ptr.put(type, 0) + def write(type, value) + put(type, 0, value) + end unless method_defined?(:write) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer.rb new file mode 100644 index 0000000..a5ee655 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/pointer.rb @@ -0,0 +1,167 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' + +# NOTE: all method definitions in this file are conditional on +# whether they are not already defined. This is needed because +# some Ruby implementations (e.g., TruffleRuby) might already +# provide these methods due to using FFI internally, and we +# should not override them to avoid warnings. + +module FFI + class Pointer + + # Pointer size + SIZE = Platform::ADDRESS_SIZE / 8 unless const_defined?(:SIZE) + + # Return the size of a pointer on the current platform, in bytes + # @return [Numeric] + def self.size + SIZE + end unless respond_to?(:size) + + # @param [nil,Numeric] len length of string to return + # @return [String] + # Read pointer's contents as a string, or the first +len+ bytes of the + # equivalent string if +len+ is not +nil+. + def read_string(len=nil) + if len + return ''.b if len == 0 + get_bytes(0, len) + else + get_string(0) + end + end unless method_defined?(:read_string) + + # @param [Numeric] len length of string to return + # @return [String] + # Read the first +len+ bytes of pointer's contents as a string. + # + # Same as: + # ptr.read_string(len) # with len not nil + def read_string_length(len) + get_bytes(0, len) + end unless method_defined?(:read_string_length) + + # @return [String] + # Read pointer's contents as a string. + # + # Same as: + # ptr.read_string # with no len + def read_string_to_null + get_string(0) + end unless method_defined?(:read_string_to_null) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +len+ first bytes of +str+ in pointer's contents. + # + # Same as: + # ptr.write_string(str, len) # with len not nil + def write_string_length(str, len) + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string_length) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +str+ in pointer's contents, or first +len+ bytes if + # +len+ is not +nil+. + def write_string(str, len=nil) + len = str.bytesize unless len + # Write the string data without NUL termination + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string) + + # @param [Type] type type of data to read from pointer's contents + # @param [Symbol] reader method to send to +self+ to read +type+ + # @param [Numeric] length + # @return [Array] + # Read an array of +type+ of length +length+. + # @example + # ptr.read_array_of_type(TYPE_UINT8, :read_uint8, 4) # -> [1, 2, 3, 4] + def read_array_of_type(type, reader, length) + ary = [] + size = FFI.type_size(type) + tmp = self + length.times { |j| + ary << tmp.send(reader) + tmp += size unless j == length-1 # avoid OOB + } + ary + end unless method_defined?(:read_array_of_type) + + # @param [Type] type type of data to write to pointer's contents + # @param [Symbol] writer method to send to +self+ to write +type+ + # @param [Array] ary + # @return [self] + # Write +ary+ in pointer's contents as +type+. + # @example + # ptr.write_array_of_type(TYPE_UINT8, :put_uint8, [1, 2, 3 ,4]) + def write_array_of_type(type, writer, ary) + size = FFI.type_size(type) + ary.each_with_index { |val, i| + break unless i < self.size + self.send(writer, i * size, val) + } + self + end unless method_defined?(:write_array_of_type) + + # @return [self] + def to_ptr + self + end unless method_defined?(:to_ptr) + + # @param [Symbol,Type] type of data to read + # @return [Object] + # Read pointer's contents as +type+ + # + # Same as: + # ptr.get(type, 0) + def read(type) + get(type, 0) + end unless method_defined?(:read) + + # @param [Symbol,Type] type of data to read + # @param [Object] value to write + # @return [nil] + # Write +value+ of type +type+ to pointer's content + # + # Same as: + # ptr.put(type, 0) + def write(type, value) + put(type, 0, value) + end unless method_defined?(:write) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct 2.rb new file mode 100644 index 0000000..7028258 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct 2.rb @@ -0,0 +1,316 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' +require 'ffi/struct_layout' +require 'ffi/struct_layout_builder' +require 'ffi/struct_by_reference' + +module FFI + + class Struct + + # Get struct size + # @return [Numeric] + def size + self.class.size + end + + # @return [Fixnum] Struct alignment + def alignment + self.class.alignment + end + alias_method :align, :alignment + + # (see FFI::StructLayout#offset_of) + def offset_of(name) + self.class.offset_of(name) + end + + # (see FFI::StructLayout#members) + def members + self.class.members + end + + # @return [Array] + # Get array of values from Struct fields. + def values + members.map { |m| self[m] } + end + + # (see FFI::StructLayout#offsets) + def offsets + self.class.offsets + end + + # Clear the struct content. + # @return [self] + def clear + pointer.clear + self + end + + # Get {Pointer} to struct content. + # @return [AbstractMemory] + def to_ptr + pointer + end + + # Get struct size + # @return [Numeric] + def self.size + defined?(@layout) ? @layout.size : defined?(@size) ? @size : 0 + end + + # set struct size + # @param [Numeric] size + # @return [size] + def self.size=(size) + raise ArgumentError, "Size already set" if defined?(@size) || defined?(@layout) + @size = size + end + + # @return (see Struct#alignment) + def self.alignment + @layout.alignment + end + + # (see FFI::Type#members) + def self.members + @layout.members + end + + # (see FFI::StructLayout#offsets) + def self.offsets + @layout.offsets + end + + # (see FFI::StructLayout#offset_of) + def self.offset_of(name) + @layout.offset_of(name) + end + + def self.in + ptr(:in) + end + + def self.out + ptr(:out) + end + + def self.ptr(flags = :inout) + @ref_data_type ||= Type::Mapped.new(StructByReference.new(self)) + end + + def self.val + @val_data_type ||= StructByValue.new(self) + end + + def self.by_value + self.val + end + + def self.by_ref(flags = :inout) + self.ptr(flags) + end + + class ManagedStructConverter < StructByReference + + # @param [Struct] struct_class + def initialize(struct_class) + super(struct_class) + + raise NoMethodError, "release() not implemented for class #{struct_class}" unless struct_class.respond_to? :release + @method = struct_class.method(:release) + end + + # @param [Pointer] ptr + # @param [nil] ctx + # @return [Struct] + def from_native(ptr, ctx) + struct_class.new(AutoPointer.new(ptr, @method)) + end + end + + def self.auto_ptr + @managed_type ||= Type::Mapped.new(ManagedStructConverter.new(self)) + end + + + class << self + public + + # @return [StructLayout] + # @overload layout + # @return [StructLayout] + # Get struct layout. + # @overload layout(*spec) + # @param [Array,Array(Hash)] spec + # @return [StructLayout] + # Create struct layout from +spec+. + # @example Creating a layout from an array +spec+ + # class MyStruct < Struct + # layout :field1, :int, + # :field2, :pointer, + # :field3, :string + # end + # @example Creating a layout from an array +spec+ with offset + # class MyStructWithOffset < Struct + # layout :field1, :int, + # :field2, :pointer, 6, # set offset to 6 for this field + # :field3, :string + # end + # @example Creating a layout from a hash +spec+ + # class MyStructFromHash < Struct + # layout :field1 => :int, + # :field2 => :pointer, + # :field3 => :string + # end + # @example Creating a layout with pointers to functions + # class MyFunctionTable < Struct + # layout :function1, callback([:int, :int], :int), + # :function2, callback([:pointer], :void), + # :field3, :string + # end + def layout(*spec) + warn "[DEPRECATION] Struct layout is already defined for class #{self.inspect}. Redefinition as in #{caller[0]} will be disallowed in ffi-2.0." if defined?(@layout) + return @layout if spec.size == 0 + + builder = StructLayoutBuilder.new + builder.union = self < Union + builder.packed = @packed if defined?(@packed) + builder.alignment = @min_alignment if defined?(@min_alignment) + + if spec[0].kind_of?(Hash) + hash_layout(builder, spec) + else + array_layout(builder, spec) + end + builder.size = @size if defined?(@size) && @size > builder.size + cspec = builder.build + @layout = cspec unless self == Struct + @size = cspec.size + return cspec + end + + + protected + + def callback(params, ret) + mod = enclosing_module + ret_type = find_type(ret, mod) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + FFI::CallbackInfo.new(ret_type, params.map { |e| find_type(e, mod) }) + end + + def packed(packed = 1) + @packed = packed + end + alias :pack :packed + + def aligned(alignment = 1) + @min_alignment = alignment + end + alias :align :aligned + + def enclosing_module + begin + mod = self.name.split("::")[0..-2].inject(Object) { |obj, c| obj.const_get(c) } + if mod.respond_to?(:find_type) && (mod.is_a?(FFI::Library) || mod < FFI::Struct) + mod + end + rescue Exception + nil + end + end + + + def find_field_type(type, mod = enclosing_module) + if type.kind_of?(Class) && type < Struct + FFI::Type::Struct.new(type) + + elsif type.kind_of?(Class) && type < FFI::StructLayout::Field + type + + elsif type.kind_of?(::Array) + FFI::Type::Array.new(find_field_type(type[0]), type[1]) + + else + find_type(type, mod) + end + end + + def find_type(type, mod = enclosing_module) + if mod + mod.find_type(type) + end || FFI.find_type(type) + end + + private + + # @param [StructLayoutBuilder] builder + # @param [Hash] spec + # @return [builder] + # Add hash +spec+ to +builder+. + def hash_layout(builder, spec) + spec[0].each do |name, type| + builder.add name, find_field_type(type), nil + end + end + + # @param [StructLayoutBuilder] builder + # @param [Array] spec + # @return [builder] + # Add array +spec+ to +builder+. + def array_layout(builder, spec) + i = 0 + while i < spec.size + name, type = spec[i, 2] + i += 2 + + # If the next param is a Integer, it specifies the offset + if spec[i].kind_of?(Integer) + offset = spec[i] + i += 1 + else + offset = nil + end + + builder.add name, find_field_type(type), offset + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct.rb new file mode 100644 index 0000000..7028258 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct.rb @@ -0,0 +1,316 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' +require 'ffi/struct_layout' +require 'ffi/struct_layout_builder' +require 'ffi/struct_by_reference' + +module FFI + + class Struct + + # Get struct size + # @return [Numeric] + def size + self.class.size + end + + # @return [Fixnum] Struct alignment + def alignment + self.class.alignment + end + alias_method :align, :alignment + + # (see FFI::StructLayout#offset_of) + def offset_of(name) + self.class.offset_of(name) + end + + # (see FFI::StructLayout#members) + def members + self.class.members + end + + # @return [Array] + # Get array of values from Struct fields. + def values + members.map { |m| self[m] } + end + + # (see FFI::StructLayout#offsets) + def offsets + self.class.offsets + end + + # Clear the struct content. + # @return [self] + def clear + pointer.clear + self + end + + # Get {Pointer} to struct content. + # @return [AbstractMemory] + def to_ptr + pointer + end + + # Get struct size + # @return [Numeric] + def self.size + defined?(@layout) ? @layout.size : defined?(@size) ? @size : 0 + end + + # set struct size + # @param [Numeric] size + # @return [size] + def self.size=(size) + raise ArgumentError, "Size already set" if defined?(@size) || defined?(@layout) + @size = size + end + + # @return (see Struct#alignment) + def self.alignment + @layout.alignment + end + + # (see FFI::Type#members) + def self.members + @layout.members + end + + # (see FFI::StructLayout#offsets) + def self.offsets + @layout.offsets + end + + # (see FFI::StructLayout#offset_of) + def self.offset_of(name) + @layout.offset_of(name) + end + + def self.in + ptr(:in) + end + + def self.out + ptr(:out) + end + + def self.ptr(flags = :inout) + @ref_data_type ||= Type::Mapped.new(StructByReference.new(self)) + end + + def self.val + @val_data_type ||= StructByValue.new(self) + end + + def self.by_value + self.val + end + + def self.by_ref(flags = :inout) + self.ptr(flags) + end + + class ManagedStructConverter < StructByReference + + # @param [Struct] struct_class + def initialize(struct_class) + super(struct_class) + + raise NoMethodError, "release() not implemented for class #{struct_class}" unless struct_class.respond_to? :release + @method = struct_class.method(:release) + end + + # @param [Pointer] ptr + # @param [nil] ctx + # @return [Struct] + def from_native(ptr, ctx) + struct_class.new(AutoPointer.new(ptr, @method)) + end + end + + def self.auto_ptr + @managed_type ||= Type::Mapped.new(ManagedStructConverter.new(self)) + end + + + class << self + public + + # @return [StructLayout] + # @overload layout + # @return [StructLayout] + # Get struct layout. + # @overload layout(*spec) + # @param [Array,Array(Hash)] spec + # @return [StructLayout] + # Create struct layout from +spec+. + # @example Creating a layout from an array +spec+ + # class MyStruct < Struct + # layout :field1, :int, + # :field2, :pointer, + # :field3, :string + # end + # @example Creating a layout from an array +spec+ with offset + # class MyStructWithOffset < Struct + # layout :field1, :int, + # :field2, :pointer, 6, # set offset to 6 for this field + # :field3, :string + # end + # @example Creating a layout from a hash +spec+ + # class MyStructFromHash < Struct + # layout :field1 => :int, + # :field2 => :pointer, + # :field3 => :string + # end + # @example Creating a layout with pointers to functions + # class MyFunctionTable < Struct + # layout :function1, callback([:int, :int], :int), + # :function2, callback([:pointer], :void), + # :field3, :string + # end + def layout(*spec) + warn "[DEPRECATION] Struct layout is already defined for class #{self.inspect}. Redefinition as in #{caller[0]} will be disallowed in ffi-2.0." if defined?(@layout) + return @layout if spec.size == 0 + + builder = StructLayoutBuilder.new + builder.union = self < Union + builder.packed = @packed if defined?(@packed) + builder.alignment = @min_alignment if defined?(@min_alignment) + + if spec[0].kind_of?(Hash) + hash_layout(builder, spec) + else + array_layout(builder, spec) + end + builder.size = @size if defined?(@size) && @size > builder.size + cspec = builder.build + @layout = cspec unless self == Struct + @size = cspec.size + return cspec + end + + + protected + + def callback(params, ret) + mod = enclosing_module + ret_type = find_type(ret, mod) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + FFI::CallbackInfo.new(ret_type, params.map { |e| find_type(e, mod) }) + end + + def packed(packed = 1) + @packed = packed + end + alias :pack :packed + + def aligned(alignment = 1) + @min_alignment = alignment + end + alias :align :aligned + + def enclosing_module + begin + mod = self.name.split("::")[0..-2].inject(Object) { |obj, c| obj.const_get(c) } + if mod.respond_to?(:find_type) && (mod.is_a?(FFI::Library) || mod < FFI::Struct) + mod + end + rescue Exception + nil + end + end + + + def find_field_type(type, mod = enclosing_module) + if type.kind_of?(Class) && type < Struct + FFI::Type::Struct.new(type) + + elsif type.kind_of?(Class) && type < FFI::StructLayout::Field + type + + elsif type.kind_of?(::Array) + FFI::Type::Array.new(find_field_type(type[0]), type[1]) + + else + find_type(type, mod) + end + end + + def find_type(type, mod = enclosing_module) + if mod + mod.find_type(type) + end || FFI.find_type(type) + end + + private + + # @param [StructLayoutBuilder] builder + # @param [Hash] spec + # @return [builder] + # Add hash +spec+ to +builder+. + def hash_layout(builder, spec) + spec[0].each do |name, type| + builder.add name, find_field_type(type), nil + end + end + + # @param [StructLayoutBuilder] builder + # @param [Array] spec + # @return [builder] + # Add array +spec+ to +builder+. + def array_layout(builder, spec) + i = 0 + while i < spec.size + name, type = spec[i, 2] + i += 2 + + # If the next param is a Integer, it specifies the offset + if spec[i].kind_of?(Integer) + offset = spec[i] + i += 1 + else + offset = nil + end + + builder.add name, find_field_type(type), offset + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference 2.rb new file mode 100644 index 0000000..27f25ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference 2.rb @@ -0,0 +1,72 @@ +# +# Copyright (C) 2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This class includes the {FFI::DataConverter} module. + class StructByReference + include DataConverter + + attr_reader :struct_class + + # @param [Struct] struct_class + def initialize(struct_class) + unless Class === struct_class and struct_class < FFI::Struct + raise TypeError, 'wrong type (expected subclass of FFI::Struct)' + end + @struct_class = struct_class + end + + # Always get {FFI::Type}::POINTER. + def native_type + FFI::Type::POINTER + end + + # @param [nil, Struct] value + # @param [nil] ctx + # @return [AbstractMemory] Pointer on +value+. + def to_native(value, ctx) + return Pointer::NULL if value.nil? + + unless @struct_class === value + raise TypeError, "wrong argument type #{value.class} (expected #{@struct_class})" + end + + value.pointer + end + + # @param [AbstractMemory] value + # @param [nil] ctx + # @return [Struct] + # Create a struct from content of memory +value+. + def from_native(value, ctx) + @struct_class.new(value) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb new file mode 100644 index 0000000..27f25ec --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb @@ -0,0 +1,72 @@ +# +# Copyright (C) 2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This class includes the {FFI::DataConverter} module. + class StructByReference + include DataConverter + + attr_reader :struct_class + + # @param [Struct] struct_class + def initialize(struct_class) + unless Class === struct_class and struct_class < FFI::Struct + raise TypeError, 'wrong type (expected subclass of FFI::Struct)' + end + @struct_class = struct_class + end + + # Always get {FFI::Type}::POINTER. + def native_type + FFI::Type::POINTER + end + + # @param [nil, Struct] value + # @param [nil] ctx + # @return [AbstractMemory] Pointer on +value+. + def to_native(value, ctx) + return Pointer::NULL if value.nil? + + unless @struct_class === value + raise TypeError, "wrong argument type #{value.class} (expected #{@struct_class})" + end + + value.pointer + end + + # @param [AbstractMemory] value + # @param [nil] ctx + # @return [Struct] + # Create a struct from content of memory +value+. + def from_native(value, ctx) + @struct_class.new(value) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout 2.rb new file mode 100644 index 0000000..d5a78a7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout 2.rb @@ -0,0 +1,96 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + class StructLayout + + # @return [Array + # Get an array of tuples (field name, offset of the field). + def offsets + members.map { |m| [ m, self[m].offset ] } + end + + # @return [Numeric] + # Get the offset of a field. + def offset_of(field_name) + self[field_name].offset + end + + # An enum {Field} in a {StructLayout}. + class Enum < Field + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @return [Object] + # Get an object of type {#type} from memory pointed by +ptr+. + def get(ptr) + type.find(ptr.get_int(offset)) + end + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @param value + # @return [nil] + # Set +value+ into memory pointed by +ptr+. + def put(ptr, value) + ptr.put_int(offset, type.find(value)) + end + + end + + class InnerStruct < Field + def get(ptr) + type.struct_class.new(ptr.slice(self.offset, self.size)) + end + + def put(ptr, value) + raise TypeError, "wrong value type (expected #{type.struct_class})" unless value.is_a?(type.struct_class) + ptr.slice(self.offset, self.size).__copy_from__(value.pointer, self.size) + end + end + + class Mapped < Field + def initialize(name, offset, type, orig_field) + super(name, offset, type) + @orig_field = orig_field + end + + def get(ptr) + type.from_native(@orig_field.get(ptr), nil) + end + + def put(ptr, value) + @orig_field.put(ptr, type.to_native(value, nil)) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb new file mode 100644 index 0000000..d5a78a7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb @@ -0,0 +1,96 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + class StructLayout + + # @return [Array + # Get an array of tuples (field name, offset of the field). + def offsets + members.map { |m| [ m, self[m].offset ] } + end + + # @return [Numeric] + # Get the offset of a field. + def offset_of(field_name) + self[field_name].offset + end + + # An enum {Field} in a {StructLayout}. + class Enum < Field + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @return [Object] + # Get an object of type {#type} from memory pointed by +ptr+. + def get(ptr) + type.find(ptr.get_int(offset)) + end + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @param value + # @return [nil] + # Set +value+ into memory pointed by +ptr+. + def put(ptr, value) + ptr.put_int(offset, type.find(value)) + end + + end + + class InnerStruct < Field + def get(ptr) + type.struct_class.new(ptr.slice(self.offset, self.size)) + end + + def put(ptr, value) + raise TypeError, "wrong value type (expected #{type.struct_class})" unless value.is_a?(type.struct_class) + ptr.slice(self.offset, self.size).__copy_from__(value.pointer, self.size) + end + end + + class Mapped < Field + def initialize(name, offset, type, orig_field) + super(name, offset, type) + @orig_field = orig_field + end + + def get(ptr) + type.from_native(@orig_field.get(ptr), nil) + end + + def put(ptr, value) + @orig_field.put(ptr, type.to_native(value, nil)) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder 2.rb new file mode 100644 index 0000000..4d6a464 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder 2.rb @@ -0,0 +1,227 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # Build a {StructLayout struct layout}. + class StructLayoutBuilder + attr_reader :size + attr_reader :alignment + + def initialize + @size = 0 + @alignment = 1 + @min_alignment = 1 + @packed = false + @union = false + @fields = Array.new + end + + # Set size attribute with +size+ only if +size+ is greater than attribute value. + # @param [Numeric] size + def size=(size) + @size = size if size > @size + end + + # Set alignment attribute with +align+ only if it is greater than attribute value. + # @param [Numeric] align + def alignment=(align) + @alignment = align if align > @alignment + @min_alignment = align + end + + # Set union attribute. + # Set to +true+ to build a {Union} instead of a {Struct}. + # @param [Boolean] is_union + # @return [is_union] + def union=(is_union) + @union = is_union + end + + # Building a {Union} or a {Struct} ? + # + # @return [Boolean] + # + def union? + @union + end + + # Set packed attribute + # @overload packed=(packed) Set alignment and packed attributes to + # +packed+. + # + # @param [Fixnum] packed + # + # @return [packed] + # @overload packed=(packed) Set packed attribute. + # @param packed + # + # @return [0,1] + # + def packed=(packed) + if packed.is_a?(0.class) + @alignment = packed + @packed = packed + else + @packed = packed ? 1 : 0 + end + end + + + # List of number types + NUMBER_TYPES = [ + Type::INT8, + Type::UINT8, + Type::INT16, + Type::UINT16, + Type::INT32, + Type::UINT32, + Type::LONG, + Type::ULONG, + Type::INT64, + Type::UINT64, + Type::FLOAT32, + Type::FLOAT64, + Type::LONGDOUBLE, + Type::BOOL, + ] + + # @param [String, Symbol] name name of the field + # @param [Array, DataConverter, Struct, StructLayout::Field, Symbol, Type] type type of the field + # @param [Numeric, nil] offset + # @return [self] + # Add a field to the builder. + # @note Setting +offset+ to +nil+ or +-1+ is equivalent to +0+. + def add(name, type, offset = nil) + + if offset.nil? || offset == -1 + offset = @union ? 0 : align(@size, @packed ? [ @packed, type.alignment ].min : [ @min_alignment, type.alignment ].max) + end + + # + # If a FFI::Type type was passed in as the field arg, try and convert to a StructLayout::Field instance + # + field = type.is_a?(StructLayout::Field) ? type : field_for_type(name, offset, type) + @fields << field + @alignment = [ @alignment, field.alignment ].max unless @packed + @size = [ @size, field.size + (@union ? 0 : field.offset) ].max + + return self + end + + # @param (see #add) + # @return (see #add) + # Same as {#add}. + # @see #add + def add_field(name, type, offset = nil) + add(name, type, offset) + end + + # @param (see #add) + # @return (see #add) + # Add a struct as a field to the builder. + def add_struct(name, type, offset = nil) + add(name, Type::Struct.new(type), offset) + end + + # @param name (see #add) + # @param type (see #add) + # @param [Numeric] count array length + # @param offset (see #add) + # @return (see #add) + # Add an array as a field to the builder. + def add_array(name, type, count, offset = nil) + add(name, Type::Array.new(type, count), offset) + end + + # @return [StructLayout] + # Build and return the struct layout. + def build + # Add tail padding if the struct is not packed + size = @packed ? @size : align(@size, @alignment) + + layout = StructLayout.new(@fields, size, @alignment) + layout.__union! if @union + layout + end + + private + + # @param [Numeric] offset + # @param [Numeric] align + # @return [Numeric] + def align(offset, align) + align + ((offset - 1) & ~(align - 1)); + end + + # @param (see #add) + # @return [StructLayout::Field] + def field_for_type(name, offset, type) + field_class = case + when type.is_a?(Type::Function) + StructLayout::Function + + when type.is_a?(Type::Struct) + StructLayout::InnerStruct + + when type.is_a?(Type::Array) + StructLayout::Array + + when type.is_a?(FFI::Enum) + StructLayout::Enum + + when NUMBER_TYPES.include?(type) + StructLayout::Number + + when type == Type::POINTER + StructLayout::Pointer + + when type == Type::STRING + StructLayout::String + + when type.is_a?(Class) && type < StructLayout::Field + type + + when type.is_a?(DataConverter) + return StructLayout::Mapped.new(name, offset, Type::Mapped.new(type), field_for_type(name, offset, type.native_type)) + + when type.is_a?(Type::Mapped) + return StructLayout::Mapped.new(name, offset, type, field_for_type(name, offset, type.native_type)) + + else + raise TypeError, "invalid struct field type #{type.inspect}" + end + + field_class.new(name, offset, type) + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb new file mode 100644 index 0000000..4d6a464 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb @@ -0,0 +1,227 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # Build a {StructLayout struct layout}. + class StructLayoutBuilder + attr_reader :size + attr_reader :alignment + + def initialize + @size = 0 + @alignment = 1 + @min_alignment = 1 + @packed = false + @union = false + @fields = Array.new + end + + # Set size attribute with +size+ only if +size+ is greater than attribute value. + # @param [Numeric] size + def size=(size) + @size = size if size > @size + end + + # Set alignment attribute with +align+ only if it is greater than attribute value. + # @param [Numeric] align + def alignment=(align) + @alignment = align if align > @alignment + @min_alignment = align + end + + # Set union attribute. + # Set to +true+ to build a {Union} instead of a {Struct}. + # @param [Boolean] is_union + # @return [is_union] + def union=(is_union) + @union = is_union + end + + # Building a {Union} or a {Struct} ? + # + # @return [Boolean] + # + def union? + @union + end + + # Set packed attribute + # @overload packed=(packed) Set alignment and packed attributes to + # +packed+. + # + # @param [Fixnum] packed + # + # @return [packed] + # @overload packed=(packed) Set packed attribute. + # @param packed + # + # @return [0,1] + # + def packed=(packed) + if packed.is_a?(0.class) + @alignment = packed + @packed = packed + else + @packed = packed ? 1 : 0 + end + end + + + # List of number types + NUMBER_TYPES = [ + Type::INT8, + Type::UINT8, + Type::INT16, + Type::UINT16, + Type::INT32, + Type::UINT32, + Type::LONG, + Type::ULONG, + Type::INT64, + Type::UINT64, + Type::FLOAT32, + Type::FLOAT64, + Type::LONGDOUBLE, + Type::BOOL, + ] + + # @param [String, Symbol] name name of the field + # @param [Array, DataConverter, Struct, StructLayout::Field, Symbol, Type] type type of the field + # @param [Numeric, nil] offset + # @return [self] + # Add a field to the builder. + # @note Setting +offset+ to +nil+ or +-1+ is equivalent to +0+. + def add(name, type, offset = nil) + + if offset.nil? || offset == -1 + offset = @union ? 0 : align(@size, @packed ? [ @packed, type.alignment ].min : [ @min_alignment, type.alignment ].max) + end + + # + # If a FFI::Type type was passed in as the field arg, try and convert to a StructLayout::Field instance + # + field = type.is_a?(StructLayout::Field) ? type : field_for_type(name, offset, type) + @fields << field + @alignment = [ @alignment, field.alignment ].max unless @packed + @size = [ @size, field.size + (@union ? 0 : field.offset) ].max + + return self + end + + # @param (see #add) + # @return (see #add) + # Same as {#add}. + # @see #add + def add_field(name, type, offset = nil) + add(name, type, offset) + end + + # @param (see #add) + # @return (see #add) + # Add a struct as a field to the builder. + def add_struct(name, type, offset = nil) + add(name, Type::Struct.new(type), offset) + end + + # @param name (see #add) + # @param type (see #add) + # @param [Numeric] count array length + # @param offset (see #add) + # @return (see #add) + # Add an array as a field to the builder. + def add_array(name, type, count, offset = nil) + add(name, Type::Array.new(type, count), offset) + end + + # @return [StructLayout] + # Build and return the struct layout. + def build + # Add tail padding if the struct is not packed + size = @packed ? @size : align(@size, @alignment) + + layout = StructLayout.new(@fields, size, @alignment) + layout.__union! if @union + layout + end + + private + + # @param [Numeric] offset + # @param [Numeric] align + # @return [Numeric] + def align(offset, align) + align + ((offset - 1) & ~(align - 1)); + end + + # @param (see #add) + # @return [StructLayout::Field] + def field_for_type(name, offset, type) + field_class = case + when type.is_a?(Type::Function) + StructLayout::Function + + when type.is_a?(Type::Struct) + StructLayout::InnerStruct + + when type.is_a?(Type::Array) + StructLayout::Array + + when type.is_a?(FFI::Enum) + StructLayout::Enum + + when NUMBER_TYPES.include?(type) + StructLayout::Number + + when type == Type::POINTER + StructLayout::Pointer + + when type == Type::STRING + StructLayout::String + + when type.is_a?(Class) && type < StructLayout::Field + type + + when type.is_a?(DataConverter) + return StructLayout::Mapped.new(name, offset, Type::Mapped.new(type), field_for_type(name, offset, type.native_type)) + + when type.is_a?(Type::Mapped) + return StructLayout::Mapped.new(name, offset, type, field_for_type(name, offset, type.native_type)) + + else + raise TypeError, "invalid struct field type #{type.inspect}" + end + + field_class.new(name, offset, type) + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator 2.rb new file mode 100644 index 0000000..fb34d94 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator 2.rb @@ -0,0 +1,230 @@ +require 'tempfile' +require 'open3' + +module FFI + + # ConstGenerator turns C constants into ruby values. + # + # @example a simple example for stdio + # require 'ffi/tools/const_generator' + # cg = FFI::ConstGenerator.new('stdio') do |gen| + # gen.const(:SEEK_SET) + # gen.const('SEEK_CUR') + # gen.const('seek_end') # this constant does not exist + # end # #calculate called automatically at the end of the block + # + # cg['SEEK_SET'] # => 0 + # cg['SEEK_CUR'] # => 1 + # cg['seek_end'] # => nil + # cg.to_ruby # => "SEEK_SET = 0\nSEEK_CUR = 1\n# seek_end not available" + class ConstGenerator + @options = {} + attr_reader :constants + + # Creates a new constant generator that uses +prefix+ as a name, and an + # options hash. + # + # The only option is +:required+, which if set to +true+ raises an error if a + # constant you have requested was not found. + # + # @param [#to_s] prefix + # @param [Hash] options + # @return + # @option options [Boolean] :required + # @overload initialize(prefix, options) + # @overload initialize(prefix, options) { |gen| ... } + # @yieldparam [ConstGenerator] gen new generator is passed to the block + # When passed a block, {#calculate} is automatically called at the end of + # the block, otherwise you must call it yourself. + def initialize(prefix = nil, options = {}) + @includes = ['stdio.h', 'stddef.h'] + @constants = {} + @prefix = prefix + + @required = options[:required] + @options = options + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + # Set class options + # These options are merged with {#initialize} options when it is called with a block. + # @param [Hash] options + # @return [Hash] class options + def self.options=(options) + @options = options + end + # Get class options. + # @return [Hash] class options + def self.options + @options + end + # @param [String] name + # @return constant value (converted if a +converter+ was defined). + # Access a constant by name. + def [](name) + @constants[name].converted_value + end + + # Request the value for C constant +name+. + # + # @param [#to_s] name C constant name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # + # @overload const(name, format=nil, cast='', ruby_name=nil, converter=nil) + # +converter+ is a Method or a Proc. + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + # @overload const(name, format=nil, cast='', ruby_name=nil) { |value| ... } + # Use a converter block. This block convert the value from a string to the + # appropriate type for {#to_ruby}. + # @yieldparam value constant value + def const(name, format = nil, cast = '', ruby_name = nil, converter = nil, + &converter_proc) + format ||= '%d' + cast ||= '' + + if converter_proc and converter then + raise ArgumentError, "Supply only converter or converter block" + end + + converter = converter_proc if converter.nil? + + const = Constant.new name, format, cast, ruby_name, converter + @constants[name.to_s] = const + return const + end + + # Calculate constants values. + # @param [Hash] options + # @option options [String] :cppflags flags for C compiler + # @return [nil] + # @raise if a constant is missing and +:required+ was set to +true+ (see {#initialize}) + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_const_gen_bin_#{Process.pid}" + + Tempfile.open("#{@prefix}.const_generator") do |f| + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + f.puts "\nint main(int argc, char **argv)\n{" + + @constants.each_value do |const| + f.puts <<-EOF + #ifdef #{const.name} + printf("#{const.name} #{const.format}\\n", #{const.cast}#{const.name}); + #endif + EOF + end + + f.puts "\n\treturn 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating constants #{@prefix}:\n#{output}" + end + end + + output = `#{binary}` + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + output.each_line do |line| + line =~ /^(\S+)\s(.*)$/ + const = @constants[$1] + const.value = $2 + end + + missing_constants = @constants.select do |name, constant| + constant.value.nil? + end.map { |name,| name } + + if @required and not missing_constants.empty? then + raise "Missing required constants for #{@prefix}: #{missing_constants.join ', '}" + end + end + + # Dump constants to +io+. + # @param [#puts] io + # @return [nil] + def dump_constants(io) + @constants.each do |name, constant| + name = [@prefix, name].join '.' if @prefix + io.puts "#{name} = #{constant.converted_value}" + end + end + + # Outputs values for discovered constants. If the constant's value was + # not discovered it is not omitted. + # @return [String] + def to_ruby + @constants.sort_by { |name,| name }.map do |name, constant| + if constant.value.nil? then + "# #{name} not available" + else + constant.to_ruby + end + end.join "\n" + end + + # Add additional C include file(s) to calculate constants from. + # @note +stdio.h+ and +stddef.h+ automatically included + # @param [List, Array] i include file(s) + # @return [Array] array of include files + def include(*i) + @includes |= i.flatten + end + + end + + # This class hold constants for {ConstGenerator} + class ConstGenerator::Constant + + attr_reader :name, :format, :cast + attr_accessor :value + + # @param [#to_s] name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + def initialize(name, format, cast, ruby_name = nil, converter=nil) + @name = name + @format = format + @cast = cast + @ruby_name = ruby_name + @converter = converter + @value = nil + end + + # Return constant value (converted if a +converter+ was defined). + # @return constant value. + def converted_value + if @converter + @converter.call(@value) + else + @value + end + end + + # get constant ruby name + # @return [String] + def ruby_name + @ruby_name || @name + end + + # Get an evaluable string from constant. + # @return [String] + def to_ruby + "#{ruby_name} = #{converted_value}" + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb new file mode 100644 index 0000000..fb34d94 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb @@ -0,0 +1,230 @@ +require 'tempfile' +require 'open3' + +module FFI + + # ConstGenerator turns C constants into ruby values. + # + # @example a simple example for stdio + # require 'ffi/tools/const_generator' + # cg = FFI::ConstGenerator.new('stdio') do |gen| + # gen.const(:SEEK_SET) + # gen.const('SEEK_CUR') + # gen.const('seek_end') # this constant does not exist + # end # #calculate called automatically at the end of the block + # + # cg['SEEK_SET'] # => 0 + # cg['SEEK_CUR'] # => 1 + # cg['seek_end'] # => nil + # cg.to_ruby # => "SEEK_SET = 0\nSEEK_CUR = 1\n# seek_end not available" + class ConstGenerator + @options = {} + attr_reader :constants + + # Creates a new constant generator that uses +prefix+ as a name, and an + # options hash. + # + # The only option is +:required+, which if set to +true+ raises an error if a + # constant you have requested was not found. + # + # @param [#to_s] prefix + # @param [Hash] options + # @return + # @option options [Boolean] :required + # @overload initialize(prefix, options) + # @overload initialize(prefix, options) { |gen| ... } + # @yieldparam [ConstGenerator] gen new generator is passed to the block + # When passed a block, {#calculate} is automatically called at the end of + # the block, otherwise you must call it yourself. + def initialize(prefix = nil, options = {}) + @includes = ['stdio.h', 'stddef.h'] + @constants = {} + @prefix = prefix + + @required = options[:required] + @options = options + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + # Set class options + # These options are merged with {#initialize} options when it is called with a block. + # @param [Hash] options + # @return [Hash] class options + def self.options=(options) + @options = options + end + # Get class options. + # @return [Hash] class options + def self.options + @options + end + # @param [String] name + # @return constant value (converted if a +converter+ was defined). + # Access a constant by name. + def [](name) + @constants[name].converted_value + end + + # Request the value for C constant +name+. + # + # @param [#to_s] name C constant name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # + # @overload const(name, format=nil, cast='', ruby_name=nil, converter=nil) + # +converter+ is a Method or a Proc. + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + # @overload const(name, format=nil, cast='', ruby_name=nil) { |value| ... } + # Use a converter block. This block convert the value from a string to the + # appropriate type for {#to_ruby}. + # @yieldparam value constant value + def const(name, format = nil, cast = '', ruby_name = nil, converter = nil, + &converter_proc) + format ||= '%d' + cast ||= '' + + if converter_proc and converter then + raise ArgumentError, "Supply only converter or converter block" + end + + converter = converter_proc if converter.nil? + + const = Constant.new name, format, cast, ruby_name, converter + @constants[name.to_s] = const + return const + end + + # Calculate constants values. + # @param [Hash] options + # @option options [String] :cppflags flags for C compiler + # @return [nil] + # @raise if a constant is missing and +:required+ was set to +true+ (see {#initialize}) + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_const_gen_bin_#{Process.pid}" + + Tempfile.open("#{@prefix}.const_generator") do |f| + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + f.puts "\nint main(int argc, char **argv)\n{" + + @constants.each_value do |const| + f.puts <<-EOF + #ifdef #{const.name} + printf("#{const.name} #{const.format}\\n", #{const.cast}#{const.name}); + #endif + EOF + end + + f.puts "\n\treturn 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating constants #{@prefix}:\n#{output}" + end + end + + output = `#{binary}` + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + output.each_line do |line| + line =~ /^(\S+)\s(.*)$/ + const = @constants[$1] + const.value = $2 + end + + missing_constants = @constants.select do |name, constant| + constant.value.nil? + end.map { |name,| name } + + if @required and not missing_constants.empty? then + raise "Missing required constants for #{@prefix}: #{missing_constants.join ', '}" + end + end + + # Dump constants to +io+. + # @param [#puts] io + # @return [nil] + def dump_constants(io) + @constants.each do |name, constant| + name = [@prefix, name].join '.' if @prefix + io.puts "#{name} = #{constant.converted_value}" + end + end + + # Outputs values for discovered constants. If the constant's value was + # not discovered it is not omitted. + # @return [String] + def to_ruby + @constants.sort_by { |name,| name }.map do |name, constant| + if constant.value.nil? then + "# #{name} not available" + else + constant.to_ruby + end + end.join "\n" + end + + # Add additional C include file(s) to calculate constants from. + # @note +stdio.h+ and +stddef.h+ automatically included + # @param [List, Array] i include file(s) + # @return [Array] array of include files + def include(*i) + @includes |= i.flatten + end + + end + + # This class hold constants for {ConstGenerator} + class ConstGenerator::Constant + + attr_reader :name, :format, :cast + attr_accessor :value + + # @param [#to_s] name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + def initialize(name, format, cast, ruby_name = nil, converter=nil) + @name = name + @format = format + @cast = cast + @ruby_name = ruby_name + @converter = converter + @value = nil + end + + # Return constant value (converted if a +converter+ was defined). + # @return constant value. + def converted_value + if @converter + @converter.call(@value) + else + @value + end + end + + # get constant ruby name + # @return [String] + def ruby_name + @ruby_name || @name + end + + # Get an evaluable string from constant. + # @return [String] + def to_ruby + "#{ruby_name} = #{converted_value}" + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator 2.rb new file mode 100644 index 0000000..5552ea5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator 2.rb @@ -0,0 +1,105 @@ +require 'ffi/tools/struct_generator' +require 'ffi/tools/const_generator' + +module FFI + + ## + # Generate files with C structs for FFI::Struct and C constants. + # + # == A simple example + # + # In file +zlib.rb.ffi+: + # module Zlib + # @@@ + # constants do |c| + # c.include "zlib.h" + # c.const :ZLIB_VERNUM + # end + # @@@ + # + # class ZStream < FFI::Struct + # + # struct do |s| + # s.name "struct z_stream_s" + # s.include "zlib.h" + # + # s.field :next_in, :pointer + # s.field :avail_in, :uint + # s.field :total_in, :ulong + # end + # @@@ + # end + # end + # + # Translate the file: + # require "ffi/tools/generator" + # FFI::Generator.new "zlib.rb.ffi", "zlib.rb" + # + # Generates the file +zlib.rb+ with constant values and offsets: + # module Zlib + # ZLIB_VERNUM = 4784 + # + # class ZStream < FFI::Struct + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 8, + # :total_in, :ulong, 16 + # end + # + # @see FFI::Generator::Task for easy integration in a Rakefile + class Generator + + def initialize(ffi_name, rb_name, options = {}) + @ffi_name = ffi_name + @rb_name = rb_name + @options = options + @name = File.basename rb_name, '.rb' + + file = File.read @ffi_name + + new_file = file.gsub(/^( *)@@@(.*?)@@@/m) do + @constants = [] + @structs = [] + + indent = $1 + original_lines = $2.count "\n" + + instance_eval $2, @ffi_name, $`.count("\n") + + new_lines = [] + @constants.each { |c| new_lines << c.to_ruby } + @structs.each { |s| new_lines << s.generate_layout } + + new_lines = new_lines.join("\n").split "\n" # expand multiline blocks + new_lines = new_lines.map { |line| indent + line } + + padding = original_lines - new_lines.length + new_lines += [nil] * padding if padding >= 0 + + new_lines.join "\n" + end + + open @rb_name, 'w' do |f| + f.puts "# This file is generated from `#{@ffi_name}'. Do not edit." + f.puts + f.puts new_file + end + end + + def constants(options = {}, &block) + @constants << FFI::ConstGenerator.new(@name, @options.merge(options), &block) + end + + def struct(options = {}, &block) + @structs << FFI::StructGenerator.new(@name, @options.merge(options), &block) + end + + ## + # Utility converter for constants + + def to_s + proc { |obj| obj.to_s.inspect } + end + + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb new file mode 100644 index 0000000..5552ea5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb @@ -0,0 +1,105 @@ +require 'ffi/tools/struct_generator' +require 'ffi/tools/const_generator' + +module FFI + + ## + # Generate files with C structs for FFI::Struct and C constants. + # + # == A simple example + # + # In file +zlib.rb.ffi+: + # module Zlib + # @@@ + # constants do |c| + # c.include "zlib.h" + # c.const :ZLIB_VERNUM + # end + # @@@ + # + # class ZStream < FFI::Struct + # + # struct do |s| + # s.name "struct z_stream_s" + # s.include "zlib.h" + # + # s.field :next_in, :pointer + # s.field :avail_in, :uint + # s.field :total_in, :ulong + # end + # @@@ + # end + # end + # + # Translate the file: + # require "ffi/tools/generator" + # FFI::Generator.new "zlib.rb.ffi", "zlib.rb" + # + # Generates the file +zlib.rb+ with constant values and offsets: + # module Zlib + # ZLIB_VERNUM = 4784 + # + # class ZStream < FFI::Struct + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 8, + # :total_in, :ulong, 16 + # end + # + # @see FFI::Generator::Task for easy integration in a Rakefile + class Generator + + def initialize(ffi_name, rb_name, options = {}) + @ffi_name = ffi_name + @rb_name = rb_name + @options = options + @name = File.basename rb_name, '.rb' + + file = File.read @ffi_name + + new_file = file.gsub(/^( *)@@@(.*?)@@@/m) do + @constants = [] + @structs = [] + + indent = $1 + original_lines = $2.count "\n" + + instance_eval $2, @ffi_name, $`.count("\n") + + new_lines = [] + @constants.each { |c| new_lines << c.to_ruby } + @structs.each { |s| new_lines << s.generate_layout } + + new_lines = new_lines.join("\n").split "\n" # expand multiline blocks + new_lines = new_lines.map { |line| indent + line } + + padding = original_lines - new_lines.length + new_lines += [nil] * padding if padding >= 0 + + new_lines.join "\n" + end + + open @rb_name, 'w' do |f| + f.puts "# This file is generated from `#{@ffi_name}'. Do not edit." + f.puts + f.puts new_file + end + end + + def constants(options = {}, &block) + @constants << FFI::ConstGenerator.new(@name, @options.merge(options), &block) + end + + def struct(options = {}, &block) + @structs << FFI::StructGenerator.new(@name, @options.merge(options), &block) + end + + ## + # Utility converter for constants + + def to_s + proc { |obj| obj.to_s.inspect } + end + + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task 2.rb new file mode 100644 index 0000000..da72968 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task 2.rb @@ -0,0 +1,32 @@ +require 'ffi/tools/generator' +require 'rake' +require 'rake/tasklib' + +## +# Add Rake tasks that generate files with C structs for FFI::Struct and C constants. +# +# @example a simple example for your Rakefile +# require "ffi/tools/generator_task" +# # Add a task to generate my_object.rb out of my_object.rb.ffi +# FFI::Generator::Task.new ["my_object.rb"], cflags: "-I/usr/local/mylibrary" +# +# The generated files are also added to the 'clear' task. +# +# @see FFI::Generator for a description of the file content +class FFI::Generator::Task < Rake::TaskLib + + def initialize(rb_names, options={}) + task :clean do rm_f rb_names end + + rb_names.each do |rb_name| + ffi_name = "#{rb_name}.ffi" + + file rb_name => ffi_name do |t| + puts "Generating #{rb_name}..." if Rake.application.options.trace + + FFI::Generator.new ffi_name, rb_name, options + end + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb new file mode 100644 index 0000000..da72968 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb @@ -0,0 +1,32 @@ +require 'ffi/tools/generator' +require 'rake' +require 'rake/tasklib' + +## +# Add Rake tasks that generate files with C structs for FFI::Struct and C constants. +# +# @example a simple example for your Rakefile +# require "ffi/tools/generator_task" +# # Add a task to generate my_object.rb out of my_object.rb.ffi +# FFI::Generator::Task.new ["my_object.rb"], cflags: "-I/usr/local/mylibrary" +# +# The generated files are also added to the 'clear' task. +# +# @see FFI::Generator for a description of the file content +class FFI::Generator::Task < Rake::TaskLib + + def initialize(rb_names, options={}) + task :clean do rm_f rb_names end + + rb_names.each do |rb_name| + ffi_name = "#{rb_name}.ffi" + + file rb_name => ffi_name do |t| + puts "Generating #{rb_name}..." if Rake.application.options.trace + + FFI::Generator.new ffi_name, rb_name, options + end + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator 2.rb new file mode 100644 index 0000000..7d2a6e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator 2.rb @@ -0,0 +1,194 @@ +require 'tempfile' + +module FFI + + ## + # Generates an FFI Struct layout. + # + # Given the @@@ portion in: + # + # class Zlib::ZStream < FFI::Struct + # @@@ + # name "struct z_stream_s" + # include "zlib.h" + # + # field :next_in, :pointer + # field :avail_in, :uint + # field :total_in, :ulong + # + # # ... + # @@@ + # end + # + # StructGenerator will create the layout: + # + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 4, + # :total_in, :ulong, 8, + # # ... + # + # StructGenerator does its best to pad the layout it produces to preserve + # line numbers. Place the struct definition as close to the top of the file + # for best results. + + class StructGenerator + @options = {} + attr_accessor :size + attr_reader :fields + + def initialize(name, options = {}) + @name = name + @struct_name = nil + @includes = [] + @fields = [] + @found = false + @size = nil + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + def self.options=(options) + @options = options + end + def self.options + @options + end + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_struct_gen_bin_#{Process.pid}" + + raise "struct name not set" if @struct_name.nil? + + Tempfile.open("#{@name}.struct_generator") do |f| + f.puts "#include " + + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + + f.puts "#include \n\n" + f.puts "int main(int argc, char **argv)\n{" + f.puts " #{@struct_name} s;" + f.puts %[ printf("sizeof(#{@struct_name}) %u\\n", (unsigned int) sizeof(#{@struct_name}));] + + @fields.each do |field| + f.puts <<-EOF + printf("#{field.name} %u %u\\n", (unsigned int) offsetof(#{@struct_name}, #{field.name}), + (unsigned int) sizeof(s.#{field.name})); + EOF + end + + f.puts "\n return 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} #{options[:cflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + @found = false + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating struct #{@name} (#{@struct_name}):\n#{output}" + end + end + + output = `#{binary}`.split "\n" + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + sizeof = output.shift + unless @size + m = /\s*sizeof\([^)]+\) (\d+)/.match sizeof + @size = m[1] + end + + line_no = 0 + output.each do |line| + md = line.match(/.+ (\d+) (\d+)/) + @fields[line_no].offset = md[1].to_i + @fields[line_no].size = md[2].to_i + + line_no += 1 + end + + @found = true + end + + def field(name, type=nil) + field = Field.new(name, type) + @fields << field + return field + end + + def found? + @found + end + + def dump_config(io) + io.puts "rbx.platform.#{@name}.sizeof = #{@size}" + + @fields.each { |field| io.puts field.to_config(@name) } + end + + def generate_layout + buf = "" + + @fields.each_with_index do |field, i| + if buf.empty? + buf << "layout :#{field.name}, :#{field.type}, #{field.offset}" + else + buf << " :#{field.name}, :#{field.type}, #{field.offset}" + end + + if i < @fields.length - 1 + buf << ",\n" + end + end + + buf + end + + def get_field(name) + @fields.find { |f| name == f.name } + end + + def include(i) + @includes << i + end + + def name(n) + @struct_name = n + end + + end + + ## + # A field in a Struct. + + class StructGenerator::Field + + attr_reader :name + attr_reader :type + attr_reader :offset + attr_accessor :size + + def initialize(name, type) + @name = name + @type = type + @offset = nil + @size = nil + end + + def offset=(o) + @offset = o + end + + def to_config(name) + buf = [] + buf << "rbx.platform.#{name}.#{@name}.offset = #{@offset}" + buf << "rbx.platform.#{name}.#{@name}.size = #{@size}" + buf << "rbx.platform.#{name}.#{@name}.type = #{@type}" if @type + buf + end + + end + +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb new file mode 100644 index 0000000..7d2a6e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb @@ -0,0 +1,194 @@ +require 'tempfile' + +module FFI + + ## + # Generates an FFI Struct layout. + # + # Given the @@@ portion in: + # + # class Zlib::ZStream < FFI::Struct + # @@@ + # name "struct z_stream_s" + # include "zlib.h" + # + # field :next_in, :pointer + # field :avail_in, :uint + # field :total_in, :ulong + # + # # ... + # @@@ + # end + # + # StructGenerator will create the layout: + # + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 4, + # :total_in, :ulong, 8, + # # ... + # + # StructGenerator does its best to pad the layout it produces to preserve + # line numbers. Place the struct definition as close to the top of the file + # for best results. + + class StructGenerator + @options = {} + attr_accessor :size + attr_reader :fields + + def initialize(name, options = {}) + @name = name + @struct_name = nil + @includes = [] + @fields = [] + @found = false + @size = nil + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + def self.options=(options) + @options = options + end + def self.options + @options + end + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_struct_gen_bin_#{Process.pid}" + + raise "struct name not set" if @struct_name.nil? + + Tempfile.open("#{@name}.struct_generator") do |f| + f.puts "#include " + + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + + f.puts "#include \n\n" + f.puts "int main(int argc, char **argv)\n{" + f.puts " #{@struct_name} s;" + f.puts %[ printf("sizeof(#{@struct_name}) %u\\n", (unsigned int) sizeof(#{@struct_name}));] + + @fields.each do |field| + f.puts <<-EOF + printf("#{field.name} %u %u\\n", (unsigned int) offsetof(#{@struct_name}, #{field.name}), + (unsigned int) sizeof(s.#{field.name})); + EOF + end + + f.puts "\n return 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} #{options[:cflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + @found = false + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating struct #{@name} (#{@struct_name}):\n#{output}" + end + end + + output = `#{binary}`.split "\n" + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + sizeof = output.shift + unless @size + m = /\s*sizeof\([^)]+\) (\d+)/.match sizeof + @size = m[1] + end + + line_no = 0 + output.each do |line| + md = line.match(/.+ (\d+) (\d+)/) + @fields[line_no].offset = md[1].to_i + @fields[line_no].size = md[2].to_i + + line_no += 1 + end + + @found = true + end + + def field(name, type=nil) + field = Field.new(name, type) + @fields << field + return field + end + + def found? + @found + end + + def dump_config(io) + io.puts "rbx.platform.#{@name}.sizeof = #{@size}" + + @fields.each { |field| io.puts field.to_config(@name) } + end + + def generate_layout + buf = "" + + @fields.each_with_index do |field, i| + if buf.empty? + buf << "layout :#{field.name}, :#{field.type}, #{field.offset}" + else + buf << " :#{field.name}, :#{field.type}, #{field.offset}" + end + + if i < @fields.length - 1 + buf << ",\n" + end + end + + buf + end + + def get_field(name) + @fields.find { |f| name == f.name } + end + + def include(i) + @includes << i + end + + def name(n) + @struct_name = n + end + + end + + ## + # A field in a Struct. + + class StructGenerator::Field + + attr_reader :name + attr_reader :type + attr_reader :offset + attr_accessor :size + + def initialize(name, type) + @name = name + @type = type + @offset = nil + @size = nil + end + + def offset=(o) + @offset = o + end + + def to_config(name) + buf = [] + buf << "rbx.platform.#{name}.#{@name}.offset = #{@offset}" + buf << "rbx.platform.#{name}.#{@name}.size = #{@size}" + buf << "rbx.platform.#{name}.#{@name}.type = #{@type}" if @type + buf + end + + end + +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator 2.rb new file mode 100644 index 0000000..ba2d8c5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator 2.rb @@ -0,0 +1,137 @@ +require 'tempfile' + +module FFI + + # @private + class TypesGenerator + + ## + # Maps different C types to the C type representations we use + + TYPE_MAP = { + "char" => :char, + "signed char" => :char, + "__signed char" => :char, + "unsigned char" => :uchar, + + "short" => :short, + "signed short" => :short, + "signed short int" => :short, + "unsigned short" => :ushort, + "unsigned short int" => :ushort, + + "int" => :int, + "signed int" => :int, + "unsigned int" => :uint, + + "long" => :long, + "long int" => :long, + "signed long" => :long, + "signed long int" => :long, + "unsigned long" => :ulong, + "unsigned long int" => :ulong, + "long unsigned int" => :ulong, + + "long long" => :long_long, + "long long int" => :long_long, + "signed long long" => :long_long, + "signed long long int" => :long_long, + "unsigned long long" => :ulong_long, + "unsigned long long int" => :ulong_long, + + "char *" => :string, + "void *" => :pointer, + } + + def self.generate(options = {}) + typedefs = nil + Tempfile.open 'ffi_types_generator' do |io| + io.puts <<-C +#include +#include +#include +#if !(defined(WIN32)) +#include +#include +#include +#endif + C + + io.close + cc = ENV['CC'] || 'gcc' + cmd = "#{cc} -E -x c #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -c" + if options[:input] + typedefs = File.read(options[:input]) + elsif options[:remote] + typedefs = `ssh #{options[:remote]} #{cmd} - < #{io.path}` + else + typedefs = `#{cmd} #{io.path}` + end + end + + code = [] + + typedefs.each_line do |type| + # We only care about single line typedef + next unless type =~ /typedef/ + # Ignore unions or structs + next if type =~ /union|struct/ + + # strip off the starting typedef and ending ; + type.gsub!(/^(.*typedef\s*)/, "") + type.gsub!(/\s*;\s*$/, "") + + parts = type.split(/\s+/) + def_type = parts.join(" ") + + # GCC does mapping with __attribute__ stuf, also see + # http://hal.cs.berkeley.edu/cil/cil016.html section 16.2.7. Problem + # with this is that the __attribute__ stuff can either occur before or + # after the new type that is defined... + if type =~ /__attribute__/ + if parts.last =~ /__QI__|__HI__|__SI__|__DI__|__word__/ + + # In this case, the new type is BEFORE __attribute__ we need to + # find the final_type as the type before the part that starts with + # __attribute__ + final_type = "" + parts.each do |p| + break if p =~ /__attribute__/ + final_type = p + end + else + final_type = parts.pop + end + + def_type = case type + when /__QI__/ then "char" + when /__HI__/ then "short" + when /__SI__/ then "int" + when /__DI__/ then "long long" + when /__word__/ then "long" + else "int" + end + + def_type = "unsigned #{def_type}" if type =~ /unsigned/ + else + final_type = parts.pop + def_type = parts.join(" ") + end + + if type = TYPE_MAP[def_type] + code << "rbx.platform.typedef.#{final_type} = #{type}" + TYPE_MAP[final_type] = TYPE_MAP[def_type] + else + # Fallback to an ordinary pointer if we don't know the type + if def_type =~ /\*/ + code << "rbx.platform.typedef.#{final_type} = pointer" + TYPE_MAP[final_type] = :pointer + end + end + end + + code.sort.join("\n") + end + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb new file mode 100644 index 0000000..ba2d8c5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb @@ -0,0 +1,137 @@ +require 'tempfile' + +module FFI + + # @private + class TypesGenerator + + ## + # Maps different C types to the C type representations we use + + TYPE_MAP = { + "char" => :char, + "signed char" => :char, + "__signed char" => :char, + "unsigned char" => :uchar, + + "short" => :short, + "signed short" => :short, + "signed short int" => :short, + "unsigned short" => :ushort, + "unsigned short int" => :ushort, + + "int" => :int, + "signed int" => :int, + "unsigned int" => :uint, + + "long" => :long, + "long int" => :long, + "signed long" => :long, + "signed long int" => :long, + "unsigned long" => :ulong, + "unsigned long int" => :ulong, + "long unsigned int" => :ulong, + + "long long" => :long_long, + "long long int" => :long_long, + "signed long long" => :long_long, + "signed long long int" => :long_long, + "unsigned long long" => :ulong_long, + "unsigned long long int" => :ulong_long, + + "char *" => :string, + "void *" => :pointer, + } + + def self.generate(options = {}) + typedefs = nil + Tempfile.open 'ffi_types_generator' do |io| + io.puts <<-C +#include +#include +#include +#if !(defined(WIN32)) +#include +#include +#include +#endif + C + + io.close + cc = ENV['CC'] || 'gcc' + cmd = "#{cc} -E -x c #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -c" + if options[:input] + typedefs = File.read(options[:input]) + elsif options[:remote] + typedefs = `ssh #{options[:remote]} #{cmd} - < #{io.path}` + else + typedefs = `#{cmd} #{io.path}` + end + end + + code = [] + + typedefs.each_line do |type| + # We only care about single line typedef + next unless type =~ /typedef/ + # Ignore unions or structs + next if type =~ /union|struct/ + + # strip off the starting typedef and ending ; + type.gsub!(/^(.*typedef\s*)/, "") + type.gsub!(/\s*;\s*$/, "") + + parts = type.split(/\s+/) + def_type = parts.join(" ") + + # GCC does mapping with __attribute__ stuf, also see + # http://hal.cs.berkeley.edu/cil/cil016.html section 16.2.7. Problem + # with this is that the __attribute__ stuff can either occur before or + # after the new type that is defined... + if type =~ /__attribute__/ + if parts.last =~ /__QI__|__HI__|__SI__|__DI__|__word__/ + + # In this case, the new type is BEFORE __attribute__ we need to + # find the final_type as the type before the part that starts with + # __attribute__ + final_type = "" + parts.each do |p| + break if p =~ /__attribute__/ + final_type = p + end + else + final_type = parts.pop + end + + def_type = case type + when /__QI__/ then "char" + when /__HI__/ then "short" + when /__SI__/ then "int" + when /__DI__/ then "long long" + when /__word__/ then "long" + else "int" + end + + def_type = "unsigned #{def_type}" if type =~ /unsigned/ + else + final_type = parts.pop + def_type = parts.join(" ") + end + + if type = TYPE_MAP[def_type] + code << "rbx.platform.typedef.#{final_type} = #{type}" + TYPE_MAP[final_type] = TYPE_MAP[def_type] + else + # Fallback to an ordinary pointer if we don't know the type + if def_type =~ /\*/ + code << "rbx.platform.typedef.#{final_type} = pointer" + TYPE_MAP[final_type] = :pointer + end + end + end + + code.sort.join("\n") + end + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types 2.rb new file mode 100644 index 0000000..90f50c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types 2.rb @@ -0,0 +1,194 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# see {file:README} +module FFI + + # @param [Type, DataConverter, Symbol] old type definition used by {FFI.find_type} + # @param [Symbol] add new type definition's name to add + # @return [Type] + # Add a definition type to type definitions. + def self.typedef(old, add) + TypeDefs[add] = self.find_type(old) + end + + # (see FFI.typedef) + def self.add_typedef(old, add) + typedef old, add + end + + + # @param [Type, DataConverter, Symbol] name + # @param [Hash] type_map if nil, {FFI::TypeDefs} is used + # @return [Type] + # Find a type in +type_map+ ({FFI::TypeDefs}, by default) from + # a type objet, a type name (symbol). If +name+ is a {DataConverter}, + # a new {Type::Mapped} is created. + def self.find_type(name, type_map = nil) + if name.is_a?(Type) + name + + elsif type_map && type_map.has_key?(name) + type_map[name] + + elsif TypeDefs.has_key?(name) + TypeDefs[name] + + elsif name.is_a?(DataConverter) + (type_map || TypeDefs)[name] = Type::Mapped.new(name) + else + raise TypeError, "unable to resolve type '#{name}'" + end + end + + # List of type definitions + TypeDefs.merge!({ + # The C void type; only useful for function return types + :void => Type::VOID, + + # C boolean type + :bool => Type::BOOL, + + # C nul-terminated string + :string => Type::STRING, + + # C signed char + :char => Type::CHAR, + # C unsigned char + :uchar => Type::UCHAR, + + # C signed short + :short => Type::SHORT, + # C unsigned short + :ushort => Type::USHORT, + + # C signed int + :int => Type::INT, + # C unsigned int + :uint => Type::UINT, + + # C signed long + :long => Type::LONG, + + # C unsigned long + :ulong => Type::ULONG, + + # C signed long long integer + :long_long => Type::LONG_LONG, + + # C unsigned long long integer + :ulong_long => Type::ULONG_LONG, + + # C single precision float + :float => Type::FLOAT, + + # C double precision float + :double => Type::DOUBLE, + + # C long double + :long_double => Type::LONGDOUBLE, + + # Native memory address + :pointer => Type::POINTER, + + # 8 bit signed integer + :int8 => Type::INT8, + # 8 bit unsigned integer + :uint8 => Type::UINT8, + + # 16 bit signed integer + :int16 => Type::INT16, + # 16 bit unsigned integer + :uint16 => Type::UINT16, + + # 32 bit signed integer + :int32 => Type::INT32, + # 32 bit unsigned integer + :uint32 => Type::UINT32, + + # 64 bit signed integer + :int64 => Type::INT64, + # 64 bit unsigned integer + :uint64 => Type::UINT64, + + :buffer_in => Type::BUFFER_IN, + :buffer_out => Type::BUFFER_OUT, + :buffer_inout => Type::BUFFER_INOUT, + + # Used in function prototypes to indicate the arguments are variadic + :varargs => Type::VARARGS, + }) + + # This will convert a pointer to a Ruby string (just like `:string`), but + # also allow to work with the pointer itself. This is useful when you want + # a Ruby string already containing a copy of the data, but also the pointer + # to the data for you to do something with it, like freeing it, in case the + # library handed the memory off to the caller (Ruby-FFI). + # + # It's {typedef}'d as +:strptr+. + class StrPtrConverter + extend DataConverter + native_type Type::POINTER + + # @param [Pointer] val + # @param ctx not used + # @return [Array(String, Pointer)] + # Returns a [ String, Pointer ] tuple so the C memory for the string can be freed + def self.from_native(val, ctx) + [ val.null? ? nil : val.get_string(0), val ] + end + end + + typedef(StrPtrConverter, :strptr) + + # @param type +type+ is an instance of class accepted by {FFI.find_type} + # @return [Numeric] + # Get +type+ size, in bytes. + def self.type_size(type) + find_type(type).size + end + + # Load all the platform dependent types + begin + File.open(File.join(Platform::CONF_DIR, 'types.conf'), "r") do |f| + prefix = "rbx.platform.typedef." + f.each_line { |line| + if line.index(prefix) == 0 + new_type, orig_type = line.chomp.slice(prefix.length..-1).split(/\s*=\s*/) + typedef(orig_type.to_sym, new_type.to_sym) + end + } + end + typedef :pointer, :caddr_t + rescue Errno::ENOENT + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types.rb new file mode 100644 index 0000000..90f50c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/types.rb @@ -0,0 +1,194 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# see {file:README} +module FFI + + # @param [Type, DataConverter, Symbol] old type definition used by {FFI.find_type} + # @param [Symbol] add new type definition's name to add + # @return [Type] + # Add a definition type to type definitions. + def self.typedef(old, add) + TypeDefs[add] = self.find_type(old) + end + + # (see FFI.typedef) + def self.add_typedef(old, add) + typedef old, add + end + + + # @param [Type, DataConverter, Symbol] name + # @param [Hash] type_map if nil, {FFI::TypeDefs} is used + # @return [Type] + # Find a type in +type_map+ ({FFI::TypeDefs}, by default) from + # a type objet, a type name (symbol). If +name+ is a {DataConverter}, + # a new {Type::Mapped} is created. + def self.find_type(name, type_map = nil) + if name.is_a?(Type) + name + + elsif type_map && type_map.has_key?(name) + type_map[name] + + elsif TypeDefs.has_key?(name) + TypeDefs[name] + + elsif name.is_a?(DataConverter) + (type_map || TypeDefs)[name] = Type::Mapped.new(name) + else + raise TypeError, "unable to resolve type '#{name}'" + end + end + + # List of type definitions + TypeDefs.merge!({ + # The C void type; only useful for function return types + :void => Type::VOID, + + # C boolean type + :bool => Type::BOOL, + + # C nul-terminated string + :string => Type::STRING, + + # C signed char + :char => Type::CHAR, + # C unsigned char + :uchar => Type::UCHAR, + + # C signed short + :short => Type::SHORT, + # C unsigned short + :ushort => Type::USHORT, + + # C signed int + :int => Type::INT, + # C unsigned int + :uint => Type::UINT, + + # C signed long + :long => Type::LONG, + + # C unsigned long + :ulong => Type::ULONG, + + # C signed long long integer + :long_long => Type::LONG_LONG, + + # C unsigned long long integer + :ulong_long => Type::ULONG_LONG, + + # C single precision float + :float => Type::FLOAT, + + # C double precision float + :double => Type::DOUBLE, + + # C long double + :long_double => Type::LONGDOUBLE, + + # Native memory address + :pointer => Type::POINTER, + + # 8 bit signed integer + :int8 => Type::INT8, + # 8 bit unsigned integer + :uint8 => Type::UINT8, + + # 16 bit signed integer + :int16 => Type::INT16, + # 16 bit unsigned integer + :uint16 => Type::UINT16, + + # 32 bit signed integer + :int32 => Type::INT32, + # 32 bit unsigned integer + :uint32 => Type::UINT32, + + # 64 bit signed integer + :int64 => Type::INT64, + # 64 bit unsigned integer + :uint64 => Type::UINT64, + + :buffer_in => Type::BUFFER_IN, + :buffer_out => Type::BUFFER_OUT, + :buffer_inout => Type::BUFFER_INOUT, + + # Used in function prototypes to indicate the arguments are variadic + :varargs => Type::VARARGS, + }) + + # This will convert a pointer to a Ruby string (just like `:string`), but + # also allow to work with the pointer itself. This is useful when you want + # a Ruby string already containing a copy of the data, but also the pointer + # to the data for you to do something with it, like freeing it, in case the + # library handed the memory off to the caller (Ruby-FFI). + # + # It's {typedef}'d as +:strptr+. + class StrPtrConverter + extend DataConverter + native_type Type::POINTER + + # @param [Pointer] val + # @param ctx not used + # @return [Array(String, Pointer)] + # Returns a [ String, Pointer ] tuple so the C memory for the string can be freed + def self.from_native(val, ctx) + [ val.null? ? nil : val.get_string(0), val ] + end + end + + typedef(StrPtrConverter, :strptr) + + # @param type +type+ is an instance of class accepted by {FFI.find_type} + # @return [Numeric] + # Get +type+ size, in bytes. + def self.type_size(type) + find_type(type).size + end + + # Load all the platform dependent types + begin + File.open(File.join(Platform::CONF_DIR, 'types.conf'), "r") do |f| + prefix = "rbx.platform.typedef." + f.each_line { |line| + if line.index(prefix) == 0 + new_type, orig_type = line.chomp.slice(prefix.length..-1).split(/\s*=\s*/) + typedef(orig_type.to_sym, new_type.to_sym) + end + } + end + typedef :pointer, :caddr_t + rescue Errno::ENOENT + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union 2.rb new file mode 100644 index 0000000..38414ab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union 2.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2009 Andrea Fazzi +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/struct' + +module FFI + + class Union < FFI::Struct + def self.builder + b = StructLayoutBuilder.new + b.union = true + b + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union.rb new file mode 100644 index 0000000..38414ab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/union.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2009 Andrea Fazzi +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/struct' + +module FFI + + class Union < FFI::Struct + def self.builder + b = StructLayoutBuilder.new + b.union = true + b + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic 2.rb new file mode 100644 index 0000000..2414055 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic 2.rb @@ -0,0 +1,78 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + class VariadicInvoker + def init(arg_types, type_map) + @fixed = Array.new + @type_map = type_map + arg_types.each_with_index do |type, i| + @fixed << type unless type == Type::VARARGS + end + end + + + def call(*args, &block) + param_types = Array.new(@fixed) + param_values = Array.new + @fixed.each_with_index do |t, i| + param_values << args[i] + end + i = @fixed.length + while i < args.length + param_types << FFI.find_type(args[i], @type_map) + param_values << args[i + 1] + i += 2 + end + invoke(param_types, param_values, &block) + end + + # + # Attach the invoker to module +mod+ as +mname+ + # + def attach(mod, mname) + invoker = self + params = "*args" + call = "call" + mod.module_eval <<-code + @@#{mname} = invoker + def self.#{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + def #{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + code + invoker + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic.rb new file mode 100644 index 0000000..2414055 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/variadic.rb @@ -0,0 +1,78 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + class VariadicInvoker + def init(arg_types, type_map) + @fixed = Array.new + @type_map = type_map + arg_types.each_with_index do |type, i| + @fixed << type unless type == Type::VARARGS + end + end + + + def call(*args, &block) + param_types = Array.new(@fixed) + param_values = Array.new + @fixed.each_with_index do |t, i| + param_values << args[i] + end + i = @fixed.length + while i < args.length + param_types << FFI.find_type(args[i], @type_map) + param_values << args[i + 1] + i += 2 + end + invoke(param_types, param_values, &block) + end + + # + # Attach the invoker to module +mod+ as +mname+ + # + def attach(mod, mname) + invoker = self + params = "*args" + call = "call" + mod.module_eval <<-code + @@#{mname} = invoker + def self.#{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + def #{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + code + invoker + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version 2.rb new file mode 100644 index 0000000..c0b65cf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version 2.rb @@ -0,0 +1,3 @@ +module FFI + VERSION = '1.13.1' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version.rb new file mode 100644 index 0000000..c0b65cf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/lib/ffi/version.rb @@ -0,0 +1,3 @@ +module FFI + VERSION = '1.13.1' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin 2.rb new file mode 100644 index 0000000..6713021 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin 2.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getlogin, [ ], :string +end +puts "getlogin=#{Foo.getlogin}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin.rb new file mode 100644 index 0000000..6713021 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getlogin.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getlogin, [ ], :string +end +puts "getlogin=#{Foo.getlogin}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid 2.rb new file mode 100644 index 0000000..1720635 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid 2.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getpid, [ ], :int +end +puts "My pid=#{Foo.getpid}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid.rb new file mode 100644 index 0000000..1720635 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/getpid.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getpid, [ ], :int +end +puts "My pid=#{Foo.getpid}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday 2.rb new file mode 100644 index 0000000..864bbb6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday 2.rb @@ -0,0 +1,18 @@ +require 'ffi' +require 'rbconfig' + +class Timeval < FFI::Struct + layout tv_sec: :ulong, tv_usec: :ulong +end +module LibC + extend FFI::Library + if FFI::Platform.windows? + ffi_lib RbConfig::CONFIG["LIBRUBY_SO"] + else + ffi_lib FFI::Library::LIBC + end + attach_function :gettimeofday, [ :pointer, :pointer ], :int +end +t = Timeval.new +LibC.gettimeofday(t.pointer, nil) +puts "t.tv_sec=#{t[:tv_sec]} t.tv_usec=#{t[:tv_usec]}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday.rb new file mode 100644 index 0000000..864bbb6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/gettimeofday.rb @@ -0,0 +1,18 @@ +require 'ffi' +require 'rbconfig' + +class Timeval < FFI::Struct + layout tv_sec: :ulong, tv_usec: :ulong +end +module LibC + extend FFI::Library + if FFI::Platform.windows? + ffi_lib RbConfig::CONFIG["LIBRUBY_SO"] + else + ffi_lib FFI::Library::LIBC + end + attach_function :gettimeofday, [ :pointer, :pointer ], :int +end +t = Timeval.new +LibC.gettimeofday(t.pointer, nil) +puts "t.tv_sec=#{t[:tv_sec]} t.tv_usec=#{t[:tv_usec]}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello 2.rb new file mode 100644 index 0000000..f2ccf37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello 2.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function("cputs", "puts", [ :string ], :int) +end +Foo.cputs("Hello, World via libc puts using FFI on MRI ruby") diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello.rb new file mode 100644 index 0000000..f2ccf37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/hello.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function("cputs", "puts", [ :string ], :int) +end +Foo.cputs("Hello, World via libc puts using FFI on MRI ruby") diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify 2.rb new file mode 100644 index 0000000..018d78c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify 2.rb @@ -0,0 +1,60 @@ +require 'ffi' + +module Inotify + extend FFI::Library + ffi_lib FFI::Library::LIBC + class Event < FFI::Struct + layout \ + :wd, :int, + :mask, :uint, + :cookie, :uint, + :len, :uint + end + attach_function :init, :inotify_init, [ ], :int + attach_function :add_watch, :inotify_add_watch, [ :int, :string, :uint ], :int + attach_function :rm_watch, :inotify_rm_watch, [ :int, :uint ], :int + attach_function :read, [ :int, :buffer_out, :uint ], :int + IN_ACCESS=0x00000001 + IN_MODIFY=0x00000002 + IN_ATTRIB=0x00000004 + IN_CLOSE_WRITE=0x00000008 + IN_CLOSE_NOWRITE=0x00000010 + IN_CLOSE=(IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_OPEN=0x00000020 + IN_MOVED_FROM=0x00000040 + IN_MOVED_TO=0x00000080 + IN_MOVE= (IN_MOVED_FROM | IN_MOVED_TO) + IN_CREATE=0x00000100 + IN_DELETE=0x00000200 + IN_DELETE_SELF=0x00000400 + IN_MOVE_SELF=0x00000800 + # Events sent by the kernel. + IN_UNMOUNT=0x00002000 + IN_Q_OVERFLOW=0x00004000 + IN_IGNORED=0x00008000 + IN_ONLYDIR=0x01000000 + IN_DONT_FOLLOW=0x02000000 + IN_MASK_ADD=0x20000000 + IN_ISDIR=0x40000000 + IN_ONESHOT=0x80000000 + IN_ALL_EVENTS=(IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE \ + | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM \ + | IN_MOVED_TO | IN_CREATE | IN_DELETE \ + | IN_DELETE_SELF | IN_MOVE_SELF) + +end +if $0 == __FILE__ + fd = Inotify.init + puts "fd=#{fd}" + wd = Inotify.add_watch(fd, "/tmp/", Inotify::IN_ALL_EVENTS) + fp = FFI::IO.for_fd(fd) + puts "wfp=#{fp}" + while true + buf = FFI::Buffer.alloc_out(Inotify::Event.size + 4096, 1, false) + ev = Inotify::Event.new buf + ready = IO.select([ fp ], nil, nil, nil) + n = Inotify.read(fd, buf, buf.total) + puts "Read #{n} bytes from inotify fd" + puts "event.wd=#{ev[:wd]} mask=#{ev[:mask]} len=#{ev[:len]} name=#{ev[:len] > 0 ? buf.get_string(16) : 'unknown'}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify.rb new file mode 100644 index 0000000..018d78c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/inotify.rb @@ -0,0 +1,60 @@ +require 'ffi' + +module Inotify + extend FFI::Library + ffi_lib FFI::Library::LIBC + class Event < FFI::Struct + layout \ + :wd, :int, + :mask, :uint, + :cookie, :uint, + :len, :uint + end + attach_function :init, :inotify_init, [ ], :int + attach_function :add_watch, :inotify_add_watch, [ :int, :string, :uint ], :int + attach_function :rm_watch, :inotify_rm_watch, [ :int, :uint ], :int + attach_function :read, [ :int, :buffer_out, :uint ], :int + IN_ACCESS=0x00000001 + IN_MODIFY=0x00000002 + IN_ATTRIB=0x00000004 + IN_CLOSE_WRITE=0x00000008 + IN_CLOSE_NOWRITE=0x00000010 + IN_CLOSE=(IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_OPEN=0x00000020 + IN_MOVED_FROM=0x00000040 + IN_MOVED_TO=0x00000080 + IN_MOVE= (IN_MOVED_FROM | IN_MOVED_TO) + IN_CREATE=0x00000100 + IN_DELETE=0x00000200 + IN_DELETE_SELF=0x00000400 + IN_MOVE_SELF=0x00000800 + # Events sent by the kernel. + IN_UNMOUNT=0x00002000 + IN_Q_OVERFLOW=0x00004000 + IN_IGNORED=0x00008000 + IN_ONLYDIR=0x01000000 + IN_DONT_FOLLOW=0x02000000 + IN_MASK_ADD=0x20000000 + IN_ISDIR=0x40000000 + IN_ONESHOT=0x80000000 + IN_ALL_EVENTS=(IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE \ + | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM \ + | IN_MOVED_TO | IN_CREATE | IN_DELETE \ + | IN_DELETE_SELF | IN_MOVE_SELF) + +end +if $0 == __FILE__ + fd = Inotify.init + puts "fd=#{fd}" + wd = Inotify.add_watch(fd, "/tmp/", Inotify::IN_ALL_EVENTS) + fp = FFI::IO.for_fd(fd) + puts "wfp=#{fp}" + while true + buf = FFI::Buffer.alloc_out(Inotify::Event.size + 4096, 1, false) + ev = Inotify::Event.new buf + ready = IO.select([ fp ], nil, nil, nil) + n = Inotify.read(fd, buf, buf.total) + puts "Read #{n} bytes from inotify fd" + puts "event.wd=#{ev[:wd]} mask=#{ev[:mask]} len=#{ev[:len]} name=#{ev[:len] > 0 ? buf.get_string(16) : 'unknown'}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty 2.rb new file mode 100644 index 0000000..8b6b885 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty 2.rb @@ -0,0 +1,75 @@ +require 'ffi' + +module PTY + private + module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :forkpty, [ :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :openpty, [ :buffer_out, :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :login_tty, [ :int ], :int + attach_function :close, [ :int ], :int + attach_function :strerror, [ :int ], :string + attach_function :fork, [], :int + attach_function :execv, [ :string, :buffer_in ], :int + attach_function :execvp, [ :string, :buffer_in ], :int + attach_function :dup2, [ :int, :int ], :int + attach_function :dup, [ :int ], :int + end + Buffer = FFI::Buffer + def self.build_args(args) + cmd = args.shift + cmd_args = args.map do |arg| + MemoryPointer.from_string(arg) + end + exec_args = MemoryPointer.new(:pointer, 1 + cmd_args.length + 1) + exec_cmd = MemoryPointer.from_string(cmd) + exec_args[0].put_pointer(0, exec_cmd) + cmd_args.each_with_index do |arg, i| + exec_args[i + 1].put_pointer(0, arg) + end + [ cmd, exec_args ] + end + public + def self.getpty(*args) + mfdp = Buffer.new :int + name = Buffer.new 1024 + # + # All the execv setup is done in the parent, since doing anything other than + # execv in the child after fork is really flakey + # + exec_cmd, exec_args = build_args(args) + pid = LibC.forkpty(mfdp, name, nil, nil) + raise "forkpty failed: #{LibC.strerror(FFI.errno)}" if pid < 0 + if pid == 0 + LibC.execvp(exec_cmd, exec_args) + exit 1 + end + masterfd = mfdp.get_int(0) + rfp = FFI::IO.for_fd(masterfd, "r") + wfp = FFI::IO.for_fd(LibC.dup(masterfd), "w") + if block_given? + yield rfp, wfp, pid + rfp.close unless rfp.closed? + wfp.close unless wfp.closed? + else + [ rfp, wfp, pid ] + end + end + def self.spawn(*args, &block) + self.getpty("/bin/sh", "-c", args[0], &block) + end +end +module LibC + extend FFI::Library + attach_function :close, [ :int ], :int + attach_function :write, [ :int, :buffer_in, :ulong ], :long + attach_function :read, [ :int, :buffer_out, :ulong ], :long +end +PTY.getpty("/bin/ls", "-alR", "/") { |rfd, wfd, pid| +#PTY.spawn("ls -laR /") { |rfd, wfd, pid| + puts "child pid=#{pid}" + while !rfd.eof? && (buf = rfd.gets) + puts "child: '#{buf.strip}'" + end +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty.rb new file mode 100644 index 0000000..8b6b885 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/pty.rb @@ -0,0 +1,75 @@ +require 'ffi' + +module PTY + private + module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :forkpty, [ :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :openpty, [ :buffer_out, :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :login_tty, [ :int ], :int + attach_function :close, [ :int ], :int + attach_function :strerror, [ :int ], :string + attach_function :fork, [], :int + attach_function :execv, [ :string, :buffer_in ], :int + attach_function :execvp, [ :string, :buffer_in ], :int + attach_function :dup2, [ :int, :int ], :int + attach_function :dup, [ :int ], :int + end + Buffer = FFI::Buffer + def self.build_args(args) + cmd = args.shift + cmd_args = args.map do |arg| + MemoryPointer.from_string(arg) + end + exec_args = MemoryPointer.new(:pointer, 1 + cmd_args.length + 1) + exec_cmd = MemoryPointer.from_string(cmd) + exec_args[0].put_pointer(0, exec_cmd) + cmd_args.each_with_index do |arg, i| + exec_args[i + 1].put_pointer(0, arg) + end + [ cmd, exec_args ] + end + public + def self.getpty(*args) + mfdp = Buffer.new :int + name = Buffer.new 1024 + # + # All the execv setup is done in the parent, since doing anything other than + # execv in the child after fork is really flakey + # + exec_cmd, exec_args = build_args(args) + pid = LibC.forkpty(mfdp, name, nil, nil) + raise "forkpty failed: #{LibC.strerror(FFI.errno)}" if pid < 0 + if pid == 0 + LibC.execvp(exec_cmd, exec_args) + exit 1 + end + masterfd = mfdp.get_int(0) + rfp = FFI::IO.for_fd(masterfd, "r") + wfp = FFI::IO.for_fd(LibC.dup(masterfd), "w") + if block_given? + yield rfp, wfp, pid + rfp.close unless rfp.closed? + wfp.close unless wfp.closed? + else + [ rfp, wfp, pid ] + end + end + def self.spawn(*args, &block) + self.getpty("/bin/sh", "-c", args[0], &block) + end +end +module LibC + extend FFI::Library + attach_function :close, [ :int ], :int + attach_function :write, [ :int, :buffer_in, :ulong ], :long + attach_function :read, [ :int, :buffer_out, :ulong ], :long +end +PTY.getpty("/bin/ls", "-alR", "/") { |rfd, wfd, pid| +#PTY.spawn("ls -laR /") { |rfd, wfd, pid| + puts "child pid=#{pid}" + while !rfd.eof? && (buf = rfd.gets) + puts "child: '#{buf.strip}'" + end +} diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort 2.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort 2.rb new file mode 100644 index 0000000..58622c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort 2.rb @@ -0,0 +1,20 @@ +require 'ffi' + +module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + callback :qsort_cmp, [ :pointer, :pointer ], :int + attach_function :qsort, [ :pointer, :ulong, :ulong, :qsort_cmp ], :int +end + +p = FFI::MemoryPointer.new(:int, 2) +p.put_array_of_int32(0, [ 2, 1 ]) +puts "ptr=#{p.inspect}" +puts "Before qsort #{p.get_array_of_int32(0, 2).join(', ')}" +LibC.qsort(p, 2, 4) do |p1, p2| + i1 = p1.get_int32(0) + i2 = p2.get_int32(0) + puts "In block: comparing #{i1} and #{i2}" + i1 < i2 ? -1 : i1 > i2 ? 1 : 0 +end +puts "After qsort #{p.get_array_of_int32(0, 2).join(', ')}" diff --git a/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort.rb b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort.rb new file mode 100644 index 0000000..58622c1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/ffi-1.13.1/samples/qsort.rb @@ -0,0 +1,20 @@ +require 'ffi' + +module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + callback :qsort_cmp, [ :pointer, :pointer ], :int + attach_function :qsort, [ :pointer, :ulong, :ulong, :qsort_cmp ], :int +end + +p = FFI::MemoryPointer.new(:int, 2) +p.put_array_of_int32(0, [ 2, 1 ]) +puts "ptr=#{p.inspect}" +puts "Before qsort #{p.get_array_of_int32(0, 2).join(', ')}" +LibC.qsort(p, 2, 4) do |p1, p2| + i1 = p1.get_int32(0) + i2 = p2.get_int32(0) + puts "In block: comparing #{i1} and #{i2}" + i1 < i2 ? -1 : i1 > i2 ? 1 : 0 +end +puts "After qsort #{p.get_array_of_int32(0, 2).join(', ')}" diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.gitignore b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.gitignore new file mode 100644 index 0000000..ee73f85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.gitignore @@ -0,0 +1,24 @@ +## MAC OS +.DS_Store + +## TEXTMATE +*.tmproj +tmtags + +## EMACS +*~ +\#* +.\#* + +## VIM +*.swp + +## PROJECT::GENERAL +coverage +doc +.yardoc +pkg + +## PROJECT::SPECIFIC +Gemfile.lock +*.gem diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.rspec b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.rspec new file mode 100644 index 0000000..5f16476 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/.rspec @@ -0,0 +1,2 @@ +--color +--format progress diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/CHANGELOG b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/CHANGELOG new file mode 100644 index 0000000..a1d6c74 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/CHANGELOG @@ -0,0 +1,93 @@ +2.0.4 / 2013-09-19 + +* Bug fixes + + * Restore 1.8 compatibility per @zhaocai in #6 + +2.0.3 / 2013-08-11 + +* Enhancements + + * If you pass a non-String to #find, the :read will be automatically applied + +2.0.2 / 2013-08-11 + +* Enhancements + + * You can pass identity rules as procs that take record1 and record2 in their original forms (note they should return nil if not sure) + +2.0.1 / 2013-06-06 + +* Bug fixes + + * Fix Ruby 1.8 compatibility - thanks @trishume @fuzzy76 ! https://github.com/seamusabshere/fuzzy_match/issues/5 + +2.0.0 / 2013-05-22 + +* Breaking changes + + * normalizers removed - use groupings instead + * first_grouping_decides removed + * FuzzyMatch#free gone + +* Enhancements + + * chained groupings! + * faster and simpler structure + * FuzzyMatch#find_with_score returns [record, dice_score, lev_score] + +1.5.0 / 2013-04-03 + +* Breaking changes + + * No longer automatically calls to_regexp on rules - you must pass Regexps to normalizers, groupings, etc. + +* Enhancements + + * FuzzyMatch#find_best returns all top results with the same score - thanks @ihough ! + * Doesn't require to_regexp gem for you - you can still use it if you want to convert strings into regexps safely, if you want, tho + +1.4.1 / 2013-01-17 + +* Bug fixes + + * Don't die when you're comparing a string of length 1 and another string of length less than three (thanks @ihough !) + +* Enhancements + + * '2A' is allowed to match '2 A'... funky stuff with pair distance and short strings + * FuzzyMatch#find_all_with_score returns a sorted array of records with their scores - thanks @brycesenz! (https://github.com/seamusabshere/fuzzy_match/issues/3) + +1.4.0 / 2012-09-07 + +* Breaking changes + + * Option keys are no longer symbolized automatically - make sure you do that if there's any chance they'll be strings + * active_record_inline_schema is no longer a runtime dependency - add it to your Gemfile if you use FuzzyMatch::CachedResult + +* Enhancements + + * Tiny bit better #explain(needle) + * Remove dependency on ActiveSupport + +1.3.3 / 2012-04-13 + +* Enhancements + + * Now you must require 'fuzzy_match/cached_result' if you want to use it. + * Use active_record_inline_schema to create the FuzzyMatch::CachedResult table + * Test against CohortAnalysis, the replacement for CohortScope + * Fix some other random deprecations (like set_primary_key) + +1.3.2 / 2012-02-24 + +* Enhancements + + * Start keeping a changelog! + * renamed blockings to groupings + * cleaned up tests + +* Bug fixes + + * better handling for one-letter similiarities like 'X foo' vs 'X bar' which couldn't be detected by pair distance + * take deprecated option :tighteners as :normalizers diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Gemfile b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Gemfile new file mode 100644 index 0000000..e3e0762 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Gemfile @@ -0,0 +1,11 @@ +source :rubygems + +gemspec + +# bin/fuzzy_match development +gem 'activesupport' +gem 'remote_table' +gem 'thor' +gem 'to_regexp' +gem 'perftools.rb' +gem 'pry' diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/LICENSE b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/LICENSE new file mode 100644 index 0000000..968ab96 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/LICENSE @@ -0,0 +1,20 @@ +Copyright 2011 Brighter Planet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/README.markdown b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/README.markdown new file mode 100644 index 0000000..03d29c3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/README.markdown @@ -0,0 +1,191 @@ +## Top 3 reasons you should use FuzzyMatch + +1. *intelligent defaults*: it uses a combination of Pair Distance (2-gram) and Levenshtein Edit Distance to effectively match many examples with no configuration +2. *all-vs-all*: it takes care of finding the optimal match by comparing everything against everything else (when that's necessary) +3. *refinable*: you might get to 90% with no configuration, but if you need to go beyond you can use regexps, grouping, and stop words + +It solves many mid-range matching problems — if your haystack is ~10k records — if you can winnow down the initial possibilities at the database level and only bring good contenders into app memory — why not give it a shot? + +# FuzzyMatch + +Find a needle in a haystack based on string similarity and regular expression rules. + +Replaces [`loose_tight_dictionary`](https://github.com/seamusabshere/loose_tight_dictionary) because that was a confusing name. + +Warning! `normalizers` are gone in version 2 and above! See the CHANGELOG and check out enhanced (and hopefully more intuitive) `groupings`. + +![diagram of matching process](https://raw.github.com/seamusabshere/fuzzy_match/master/highlevel.png) + +## Quickstart + + >> require 'fuzzy_match' + => true + >> FuzzyMatch.new(['seamus', 'andy', 'ben']).find('Shamus') + => "seamus" + +See also the blog post [Fuzzy match in Ruby](http://numbers.brighterplanet.com/2012/01/18/fuzzy-match-in-ruby/). + +## Default matching (string similarity) + +At the core, and even if you configure nothing else, string similarity (calculated by "pair distance" aka Dice's Coefficient) is used to compare records. + +You can tell `FuzzyMatch` what field or method to use via the `:read` option... for example, let's say you want to match a `Country` object like `#` + + >> fz = FuzzyMatch.new(Country.all, :read => :name) + => # + >> fz.find('youruguay') + => # + +## Optional rules (regular expressions) + +You can improve the default matchings with rules. There are 3 different kinds of rules. Each rule is a regular expression. + +We suggest that you **first try without any rules** and only define them to improve matching, prevent false positives, etc. + +### Groupings + +Group records together. The two laws of groupings: + +1. If a needle matches a grouping, only compare it with straws in the same grouping; (the "buddies vs buddies" rule) +2. If a needle doesn't match any grouping, only compare it with straws that also don't match ANY grouping (the "misfits vs misfits" rule) + +The two laws of chained groupings: (new in v2.0 and rather important) + +1. Sub-groupings (e.g., `/plaza/i` below) only match if their primary (e.g., `/ramada/i`) does +2. In final grouping decisions, sub-groupings win over primaries (so "Ramada Inn" is NOT grouped with "Ramada Plaza", but if you removed `/plaza/i` sub-grouping, then they would be grouped together) + +Hopefully they are rather intuitive once you start using them. + +[![screenshot of spreadsheet of groupings](https://raw.github.com/seamusabshere/fuzzy_match/master/groupings-screenshot.png)](https://docs.google.com/spreadsheet/pub?key=0AkCJNpm9Ks6JdG4xSWhfWFlOV1RsZ2NCeU9seGx6cnc&single=true&gid=0&output=html) + +That will... + +* separate "Orient Express Hotel" and "Ramada Conference Center Mandarin" from real Mandarin Oriental hotels +* keep "Trump Hotel Collection" away from "Luxury Collection" (another real hotel brand) without messing with the word "Luxury" +* make sure that "Ramada Plaza" are always grouped with other RPs—and not with plain old Ramadas—and vice versa +* splits out Hyatts into their different brands +* and more + +You specify chained groupings as arrays of regexps: + + groupings = [ + /mandarin/i, + /trump/i, + [ /ramada/i, /plaza/i ], + ... + ] + fz = FuzzyMatch.new(haystack, groupings: groupings) + +This way of specifying groupings is meant to be easy to load from a CSV, like `bin/fuzzy_match` does. + +Formerly called "blockings," but that was jargon that confused people. + +### Identities + +Prevent impossible matches. Can be very confusing—see if you can make things work with groupings first. + +Adding an identity like `/(f)-?(\d50)/i` ensures that "Ford F-150" and "Ford F-250" never match. + +Note that identities do not establish certainty. They just say whether two records **could** be identical... then string similarity takes over. + +### Stop words + +Ignore common and/or meaningless words when doing string similarity. + +Adding a stop word like `THE` ensures that it is not taken into account when comparing "THE CAT", "THE DAT", and "THE CATT" + +Stop words are NOT removed when checking `:must_match_at_least_one_word` and when doing identities and groupings. + +## Find options + +* `read`: how to interpret each record in the 'haystack', either a Proc or a symbol +* `must_match_grouping`: don't return a match unless the needle fits into one of the groupings you specified +* `must_match_at_least_one_word`: don't return a match unless the needle shares at least one word with the match. Note that "Foo's" is treated like one word (so that it won't match "'s") and "Bolivia," is treated as just "bolivia" +* `gather_last_result`: enable `last_result` + +## Case sensitivity + +String similarity is case-insensitive. Everything is downcased before scoring. This is a change from previous versions. + +Be careful with uppercase letters in your rules; in general, things are downcased before comparing. + +## String similarity algorithm + +The algorithm is [Dice's Coefficient](http://en.wikipedia.org/wiki/Dice's_coefficient) (aka Pair Distance) because it seemed to work better than Longest Substring, Hamming, Jaro Winkler, Levenshtein (although see edge case below) etc. + +Here's a great explanation copied from [the wikipedia entry](http://en.wikipedia.org/wiki/Dice%27s_coefficient): + + to calculate the similarity between: + + night + nacht + + We would find the set of bigrams in each word: + + {ni,ig,gh,ht} + {na,ac,ch,ht} + + Each set has four elements, and the intersection of these two sets has only one element: ht. + + Inserting these numbers into the formula, we calculate, s = (2 · 1) / (4 + 4) = 0.25. + +### Edge case: when Dice's fails, use Levenshtein + +In edge cases where Dice's finds that two strings are equally similar to a third string, then Levenshtein distance is used. For example, pair distance considers "RATZ" and "CATZ" to be equally similar to "RITZ" so we invoke Levenshtein. + + >> 'RITZ'.pair_distance_similar 'RATZ' + => 0.3333333333333333 + >> 'RITZ'.pair_distance_similar 'CATZ' + => 0.3333333333333333 # pair distance can't tell the difference, so we fall back to levenshtein... + >> 'RITZ'.levenshtein_similar 'RATZ' + => 0.75 + >> 'RITZ'.levenshtein_similar 'CATZ' + => 0.5 # which properly shows that RATZ should win + +## Cached results + +Make sure you add active\_record\_inline\_schema to your gemfile. + +TODO write documentation. For now, please see how [we manually cache matches between aircraft and flight segments](https://github.com/brighterplanet/earth/blob/master/lib/earth/air/aircraft.rb). + +## Glossary + +The admittedly imperfect metaphor is "look for a needle in a haystack" + +* needle: the search term +* haystack: the records you are searching (your result will be an object from here) + +## Using amatch to make it faster + +You can optionally use [`amatch`](http://flori.github.com/amatch/) by [Florian Frank](https://github.com/flori) (thanks Flori!) to make string similarity calculations in a C extension. + + require 'fuzzy_match' + require 'amatch' # note that you have to require this... fuzzy_match won't require it for you + FuzzyMatch.engine = :amatch + +Otherwise, pure ruby versions of the string similarity algorithms derived from the [answer to a StackOverflow question](http://stackoverflow.com/questions/653157/a-better-similarity-ranking-algorithm-for-variable-length-strings) and [the text gem](https://github.com/threedaymonk/text/blob/master/lib/text/levenshtein.rb) are used. Thanks [marzagao](http://stackoverflow.com/users/10997/marzagao) and [threedaymonk](https://github.com/threedaymonk)! + +## Real-world usage + +

Brighter Planet logo

+ +We use `fuzzy_match` for [data science at Brighter Planet](http://brighterplanet.com/research) and in production at + +* [Brighter Planet's impact estimate web service](http://impact.brighterplanet.com) +* [Brighter Planet's reference data web service](http://data.brighterplanet.com) + +We often combine it with [`remote_table`](https://github.com/seamusabshere/remote_table) and [`errata`](https://github.com/seamusabshere/errata): + +- download table with `remote_table` +- correct serious or repeated errors with `errata` +- `fuzzy_match` the rest + +## Authors + +* Seamus Abshere +* Ian Hough +* Andy Rossmeissl + +## Copyright + +Copyright 2013 Seamus Abshere diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Rakefile b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Rakefile new file mode 100644 index 0000000..7ed4815 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/Rakefile @@ -0,0 +1,5 @@ +require 'bundler' +Bundler::GemHelper.install_tasks + +require 'yard' +YARD::Rake::YardocTask.new diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb new file mode 100644 index 0000000..57ac8c2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb @@ -0,0 +1,37 @@ +#!/usr/bin/env ruby + +# Thanks William James! +# http://www.ruby-forum.com/topic/95519#200484 +def cart_prod(*args) + args.inject([[]]){|old,lst| + new = [] + lst.each{|e| new += old.map{|c| c.dup << e }} + new + } +end + +require 'benchmark' + +a = [1,2,3] +b = [4,5] +Benchmark.bmbm do |x| + x.report("native") do + 500_000.times { a.product(b) } + end + x.report("william-james") do |x| + 500_000.times { cart_prod(a, b) } + end +end + +# results: +# $ ruby foo.rb +# Rehearsal ------------------------------------------------- +# native 0.720000 0.000000 0.720000 ( 0.729319) +# william-james 3.620000 0.010000 3.630000 ( 3.629198) +# ---------------------------------------- total: 4.350000sec +# +# user system total real +# native 0.710000 0.000000 0.710000 ( 0.708620) +# william-james 3.800000 0.000000 3.800000 ( 3.792538) + +# thanks for all the fish! diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt new file mode 100644 index 0000000..b5729bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt @@ -0,0 +1,283 @@ + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 326 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 325 benchmark/memory.rb:21:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 201 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 31 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 21 ./benchmark/../lib/fuzzy_match.rb:199:FuzzyMatch::Grouping + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:__node__ + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 8 ./benchmark/../lib/fuzzy_match/grouping.rb:24:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:6:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:5:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 7 ./benchmark/../lib/fuzzy_match/grouping.rb:27:__node__ + 7 ./benchmark/../lib/fuzzy_match.rb:209:String + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/grouping.rb:22:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/result.rb:16:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:26:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:25:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:15:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:9:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:6:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:3:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:13:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:12:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:10:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:8:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:21:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:19:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:15:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:33:__node__ + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match.rb:77:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 2 ./benchmark/../lib/fuzzy_match/result.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/result.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:9:Class + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:29:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:86:Array + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Regexp + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/result.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:10:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:39:String + 1 ./benchmark/../lib/fuzzy_match.rb:39:FuzzyMatch::Result + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:209:Array + 1 ./benchmark/../lib/fuzzy_match.rb:199:String + 1 ./benchmark/../lib/fuzzy_match.rb:198:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array + 1 ./benchmark/../lib/fuzzy_match.rb:101:Array diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt new file mode 100644 index 0000000..d6a7bb8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt @@ -0,0 +1,257 @@ + 8393 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 3349 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 3276 ./benchmark/../lib/fuzzy_match.rb:187:Array + 3042 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__varmap__ + 2752 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 1883 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 1674 ./benchmark/../lib/fuzzy_match/score.rb:13:Amatch::PairDistance + 1017 ./benchmark/../lib/fuzzy_match/similarity.rb:37:Array + 779 ./benchmark/../lib/fuzzy_match/similarity.rb:42:FuzzyMatch::Score + 779 ./benchmark/../lib/fuzzy_match/similarity.rb:41:FuzzyMatch::Score + 676 benchmark/memory.rb:21:String + 607 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 444 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 234 ./benchmark/../lib/fuzzy_match/similarity.rb:56:Array + 234 ./benchmark/../lib/fuzzy_match/similarity.rb:55:Array + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 129 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 127 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 119 ./benchmark/../lib/fuzzy_match.rb:101:__scope__ + 118 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Hash + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__scope__ + 117 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__scope__ + 117 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:Array + 102 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:Array + 102 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:MatchData + 101 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:MatchData + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 36 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 26 ./benchmark/../lib/fuzzy_match.rb:187:FuzzyMatch::Normalizer + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 4 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match.rb:77:Array + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:30:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:29:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match.rb:86:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:101:Array + 1 benchmark/memory.rb:50:__scope__ + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__scope__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:62:Hash + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:35:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before.txt b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before.txt new file mode 100644 index 0000000..85a142b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/before.txt @@ -0,0 +1,304 @@ + 8642 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 3465 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 3388 ./benchmark/../lib/fuzzy_match/similarity.rb:25:Array + 3146 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__varmap__ + 2842 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 1936 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 1732 ./benchmark/../lib/fuzzy_match/score.rb:13:Amatch::PairDistance + 1054 ./benchmark/../lib/fuzzy_match/similarity.rb:37:Array + 806 ./benchmark/../lib/fuzzy_match/similarity.rb:41:FuzzyMatch::Score + 805 ./benchmark/../lib/fuzzy_match/similarity.rb:42:FuzzyMatch::Score + 688 benchmark/memory.rb:21:String + 639 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 448 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 242 ./benchmark/../lib/fuzzy_match/similarity.rb:56:Array + 242 ./benchmark/../lib/fuzzy_match/similarity.rb:55:Array + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 133 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 131 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 123 ./benchmark/../lib/fuzzy_match.rb:101:__scope__ + 122 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Hash + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__scope__ + 121 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__scope__ + 121 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:Array + 110 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:Array + 110 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:MatchData + 109 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:MatchData + 57 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 26 ./benchmark/../lib/fuzzy_match.rb:187:FuzzyMatch::Normalizer + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 21 ./benchmark/../lib/fuzzy_match.rb:199:FuzzyMatch::Grouping + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:__node__ + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 8 ./benchmark/../lib/fuzzy_match/grouping.rb:24:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:6:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:5:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 7 ./benchmark/../lib/fuzzy_match/grouping.rb:27:__node__ + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/grouping.rb:22:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/result.rb:16:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:26:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:25:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:15:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:9:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:6:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:3:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:13:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:12:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:10:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:8:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:21:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:19:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:15:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:33:__node__ + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match.rb:86:Array + 3 ./benchmark/../lib/fuzzy_match.rb:77:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/result.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/result.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:9:Class + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:29:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:101:Array + 1 benchmark/memory.rb:50:__scope__ + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Regexp + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__scope__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/result.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:10:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:39:String + 1 ./benchmark/../lib/fuzzy_match.rb:39:FuzzyMatch::Result + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:35:Array + 1 ./benchmark/../lib/fuzzy_match.rb:199:String + 1 ./benchmark/../lib/fuzzy_match.rb:198:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb new file mode 100644 index 0000000..4514d9a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb @@ -0,0 +1,53 @@ +#!/usr/bin/env ruby + +require 'rubygems' +require 'memprof' +require 'bundler' +Bundler.setup +require 'remote_table' +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) +require 'fuzzy_match' + +# messily stolen from the bts example + +# The records that your dictionary will return. +# (Example) A table of aircraft as defined by the U.S. Bureau of Transportation Statistics +HAYSTACK = RemoteTable.new :url => "file://#{File.expand_path('../../examples/bts_aircraft/number_260.csv', __FILE__)}", :select => lambda { |record| record['Aircraft Type'].to_i.between?(1, 998) and record['Manufacturer'].present? } + +# A reader used to convert every record (which could be an object of any type) into a string that will be used for similarity. +# (Example) Combine the make and model into something like "boeing 747" +# Note the downcase! +HAYSTACK_READER = lambda { |record| "#{record['Manufacturer']} #{record['Long Name']}".downcase } + +# Whether to even bother trying to find a match for something without an explicit group +# (Example) False, which is the default, which means we have more work to do +MUST_MATCH_GROUPING = false + +# Groupings +# (Example) We made these by trial and error +GROUPINGS = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/groupings.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +# Normalizers +# (Example) We made these by trial and error +NORMALIZERS = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/normalizers.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +# Identities +# (Example) We made these by trial and error +IDENTITIES = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/identities.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +FINAL_OPTIONS = { + :read => HAYSTACK_READER, + :must_match_grouping => MUST_MATCH_GROUPING, + :normalizers => NORMALIZERS, + :identities => IDENTITIES, + :groupings => GROUPINGS +} + +Memprof.start + +d = FuzzyMatch.new HAYSTACK, FINAL_OPTIONS +record = d.find('boeing 707(100)', :gather_last_result => false) + +Memprof.stats +Memprof.stop diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match new file mode 100755 index 0000000..b6a6b34 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match @@ -0,0 +1,106 @@ +#!/usr/bin/env ruby + +if File.exist?('Gemfile') + require 'bundler/setup' +end +if ENV['PROFILE'] == 'true' + require 'perftools' +end +# PerfTools::CpuProfiler.start("profile_data") do +require 'fuzzy_match' +require 'fuzzy_match/version' + +require 'active_support/core_ext' +require 'remote_table' +require 'thor' +require 'to_regexp' + +class FuzzyMatch + class Cli < ::Thor + desc :match, "Print out matches between A and B, where A is haystack and B is a bunch of needles." + method_option :csv, :default => false, :type => :boolean, :desc => "CSV output" + method_option :a_col, :default => 0, :type => :string, :desc => "Column name in A. Defaults to first column." + method_option :b_col, :default => 0, :type => :string, :desc => "Column name in B. Defaults to first column." + method_option :downcase, :default => true, :type => :boolean, :desc => "Whether to downcase everything (except regexes, where you have to do /foo/i)" + method_option :groupings, :default => nil, :type => :string, :desc => "Spreadsheet with groupings - no headers, multi-part groupings on the same row" + method_option :rules, :default => nil, :type => :string, :desc => "Spreadsheet with headers: stop_words, identities, find_options. Listing a find_option like must_match_grouping makes it true." + method_option :explain, :default => false, :type => :boolean + method_option :grep, :default => nil, :type => :string + method_option :limit, :default => 1.0/0, :type => :numeric + def match(a_url, b_url) + puts "Checking matches using fuzzy_match version #{FuzzyMatch::VERSION}..." + fz = mkfz a_url + b = load_b b_url + if ENV['PROFILE'] == 'true' + require 'perftools' + PerfTools::CpuProfiler.start("profile.bin") { report(fz, b) } + system "pprof.rb --text profile.bin" + `pprof.rb --gif profile.bin > profile.gif` + else + report fz, b + end + end + + private + + def report(fz, b) + b.each do |b_val| + if options.explain + fz.explain + else + a_val = fz.find b_val + if options.csv + # puts [ b_val.ljust(50), a_val ].join('-> ') + puts [ b_val, a_val ].to_csv + else + puts %{\nB: #{b_val}\nA: #{a_val}} + end + end + end + end + + def load_b(b_url) + b_options = options.b_col.is_a?(String) ? { headers: :first_row } : { headers: false } + if options[:grep] + regexp = options[:grep].to_regexp(detect: true) + b_options[:select] = lambda { |row| regexp =~ row[options.b_col] } + end + b = RemoteTable.new(b_url, b_options).to_a + limit = [options.limit, b.length].min + b.first(limit).map do |row| + b_val = row[options.b_col] + b_val.downcase! if options.downcase + b_val + end + end + + def mkfz(a_url) + a_options = options.a_col.is_a?(String) ? { headers: :first_row } : { headers: false } + a = RemoteTable.new(a_url, a_options).map { |row| row[options.a_col] } + a.map!(&:downcase) if options.downcase + FuzzyMatch.new a, fz_options + end + + def fz_options + memo = {} + if options.groupings + memo[:groupings] = RemoteTable.new(options.groupings, :headers => false).map do |row| + row.to_a.select(&:present?).map { |v| v.to_regexp(detect: true) } + end + end + if options.rules + t = RemoteTable.new(options.rules, :headers => :first_row) + find_options = t.rows.map { |row| row['find_options'] } + memo.merge!( + identities: t.rows.map { |row| row['identities'] }.select(&:present?).map { |v| v.to_regexp(detect: true) }, + stop_words: t.rows.map { |row| row['stop_words'] }.select(&:present?).map { |v| v.to_regexp(detect: true) }, + must_match_grouping: find_options.include?('must_match_grouping'), + must_match_at_least_one_word: find_options.include?('must_match_at_least_one_word'), + ) + end + memo + end + end +end + +FuzzyMatch::Cli.start diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec new file mode 100644 index 0000000..5f782d6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +require File.expand_path("../lib/fuzzy_match/version", __FILE__) + +Gem::Specification.new do |s| + s.name = "fuzzy_match" + s.version = FuzzyMatch::VERSION + s.authors = ["Seamus Abshere"] + s.email = ["seamus@abshere.net"] + s.homepage = "https://github.com/seamusabshere/fuzzy_match" + s.summary = %Q{Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.} + s.description = %Q{Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.} + + s.rubyforge_project = "fuzzy_match" + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- test/*`.split("\n") + s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } + s.require_paths = ["lib"] + + # needed if you use FuzzyMatch::CachedResult + s.add_development_dependency 'active_record_inline_schema', '>=0.4.0' + + # development dependencies + s.add_development_dependency 'pry' + s.add_development_dependency 'rspec-core' + s.add_development_dependency 'rspec-expectations' + s.add_development_dependency 'rspec-mocks' + s.add_development_dependency 'activerecord', '>=3' + s.add_development_dependency 'mysql2' + s.add_development_dependency 'cohort_analysis' + s.add_development_dependency 'weighted_average' + s.add_development_dependency 'yard' + s.add_development_dependency 'amatch' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png new file mode 100644 index 0000000..23e68bd Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png differ diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.graffle b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.graffle new file mode 100644 index 0000000..6f974a5 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.graffle differ diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.png b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.png new file mode 100644 index 0000000..b39970e Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/highlevel.png differ diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb new file mode 100644 index 0000000..0f87c22 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb @@ -0,0 +1,335 @@ +require 'fuzzy_match/rule' +require 'fuzzy_match/rule/grouping' +require 'fuzzy_match/rule/identity' +require 'fuzzy_match/result' +require 'fuzzy_match/record' +require 'fuzzy_match/similarity' +require 'fuzzy_match/score' + +# See the README for more information. +class FuzzyMatch + class << self + def engine + @engine + end + + def engine=(alt_engine) + @engine = alt_engine + end + + def score_class + case engine + when :pure_ruby + Score::PureRuby + when :amatch + Score::Amatch + else + raise ::ArgumentError, "[fuzzy_match] #{engine.inspect} is not a recognized engine." + end + end + end + + DEFAULT_ENGINE = :pure_ruby + + #TODO refactor at least all the :find_X things + DEFAULT_OPTIONS = { + :must_match_grouping => false, + :must_match_at_least_one_word => false, + :gather_last_result => false, + :find_all => false, + :find_all_with_score => false, + :threshold => 0, + :find_best => false, + :find_with_score => false, + } + + self.engine = DEFAULT_ENGINE + + attr_reader :haystack + attr_reader :groupings + attr_reader :identities + attr_reader :stop_words + attr_accessor :read + attr_reader :default_options + + # haystack - a bunch of records that will compete to see who best matches the needle + # + # Rules (can only be specified at initialization or by using a setter) + # * :identities - regexps + # * :groupings - regexps + # * :stop_words - regexps + # * :read - how to interpret each record in the 'haystack', either a Proc or a symbol + # + # Options (can be specified at initialization or when calling #find) + # * :must_match_grouping - don't return a match unless the needle fits into one of the groupings you specified + # * :must_match_at_least_one_word - don't return a match unless the needle shares at least one word with the match + # * :gather_last_result - enable last_result + # * :threshold - set a score threshold below which not to return results (not generally recommended - please test the results of setting a threshold thoroughly - one set of results and their scores probably won't be enough to determine the appropriate number). Only checked against the Pair Distance score and ignored when one string or the other is of length 1. + def initialize(haystack, options_and_rules = {}) + o = options_and_rules.dup + + # rules + @read = o.delete(:read) || o.delete(:haystack_reader) + @groupings = (o.delete(:groupings) || o.delete(:blockings) || []).map { |regexp| Rule::Grouping.make(regexp) }.flatten + @identities = (o.delete(:identities) || []).map { |regexp| Rule::Identity.new(regexp) } + @stop_words = o.delete(:stop_words) || [] + + # options + if deprecated = o.delete(:must_match_blocking) + o[:must_match_grouping] = deprecated + end + @default_options = DEFAULT_OPTIONS.merge(o).freeze + + @haystack = haystack.map { |original| Record.new original, :stop_words => @stop_words, :read => @read } + end + + def last_result + @last_result or raise("You can't access the last result until you've run a find with :gather_last_result => true") + end + + # Return everything in sorted order + def find_all(needle, options = {}) + options = options.merge(:find_all => true) + find needle, options + end + + # Return the top results with the same score + def find_best(needle, options = {}) + options = options.merge(:find_best => true) + find needle, options + end + + # Return everything in sorted order with score + def find_all_with_score(needle, options = {}) + options = options.merge(:find_all_with_score => true) + find needle, options + end + + # Return one with score + def find_with_score(needle, options = {}) + options = options.merge(:find_with_score => true) + find needle, options + end + + def find(needle, options = {}) + options = default_options.merge options + + threshold = options[:threshold] + gather_last_result = options[:gather_last_result] + is_find_all_with_score = options[:find_all_with_score] + is_find_with_score = options[:find_with_score] + is_find_best = options[:find_best] + is_find_all = options[:find_all] || is_find_all_with_score || is_find_best + must_match_grouping = options[:must_match_grouping] + must_match_at_least_one_word = options[:must_match_at_least_one_word] + + if gather_last_result + @last_result = Result.new + last_result.read = read + last_result.haystack = haystack + last_result.options = options + end + + if gather_last_result + last_result.identities = identities + last_result.groupings = groupings + last_result.stop_words = stop_words + end + + needle = case needle + when String + Record.new needle + else + Record.new needle, :read => read + end + + if gather_last_result + last_result.needle = needle + end + + if groupings.any? + first_grouping = groupings.detect { |grouping| grouping.xmatch? needle } + if gather_last_result + if first_grouping + last_result.timeline << "Grouping: #{first_grouping.inspect}" + else + last_result.timeline << "No grouping." + end + end + end + + if must_match_grouping and not first_grouping + if gather_last_result + last_result.timeline << <<-EOS +The needle didn't match any of the #{groupings.length} groupings, which was a requirement. +\t#{groupings.map(&:inspect).join("\n\t")} +EOS + end + if is_find_all + return [] + else + return nil + end + end + + if groupings.any? and not first_grouping + passed_grouping_requirement = haystack.reject do |straw| + groupings.any? { |grouping| grouping.xmatch? straw } + end + else + passed_grouping_requirement = haystack + end + + if must_match_at_least_one_word + passed_word_requirement = passed_grouping_requirement.select do |straw| + (needle.words & straw.words).any? + end + if gather_last_result + last_result.timeline << <<-EOS +Since :must_match_at_least_one_word => true, the competition was reduced to records sharing at least one word with the needle. +\tNeedle words: #{needle.words.map(&:inspect).join(', ')} +\tPassed (first 3): #{passed_word_requirement[0,3].map(&:inspect).join(', ')} +\tFailed (first 3): #{(passed_grouping_requirement-passed_word_requirement)[0,3].map(&:inspect).join(', ')} +EOS + end + else + passed_word_requirement = passed_grouping_requirement + end + + if first_grouping + joint = passed_word_requirement.select do |straw| + first_grouping.xjoin? needle, straw + end + # binding.pry + if gather_last_result + last_result.timeline << <<-EOS +Since there were groupings, the competition was reduced to #{joint.length} records in the same group as the needle. +\t#{joint.map(&:inspect).join("\n\t")} +EOS + end + else + joint = passed_word_requirement.dup + end + + if joint.none? + if must_match_grouping + if gather_last_result + last_result.timeline << <<-EOS +Since :must_match_at_least_one_word => true and none of the competition was in the same group as the needle, the search stopped. +EOS + end + if is_find_all + return [] + else + return nil + end + else + joint = passed_word_requirement.dup + end + end + + if identities.any? + possibly_identical = joint.select do |straw| + identities.all? do |identity| + answer = identity.identical? needle, straw + answer.nil? or answer == true + end + end + if gather_last_result + last_result.timeline << <<-EOS +Since there were identities, the competition was reduced to records that might be identical to the needle (in other words, are not certainly different) +\tIdentities (first 10 of #{identities.length}): #{identities[0,9].map(&:inspect).join(', ')} +\tPassed (first 10 of #{possibly_identical.length}): #{possibly_identical[0,9].map(&:inspect).join(', ')} +\tFailed (first 10 of #{(joint-possibly_identical).length}): #{(joint-possibly_identical)[0,9].map(&:inspect).join(', ')} +EOS + end + else + possibly_identical = joint.dup + end + + similarities = possibly_identical.map { |straw| needle.similarity straw }.sort.reverse + + if gather_last_result + last_result.timeline << <<-EOS +The competition was sorted in order of similarity to the needle. +\t#{similarities[0,9].map { |s| "#{s.record2.similarity(needle).inspect}" }.join("\n\t")} +EOS + end + + if is_find_all_with_score + memo = [] + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + bs = similarity.best_score + memo << [similarity.record2.original, bs.dices_coefficient_similar, bs.levenshtein_similar] + end + end + return memo + end + + if is_find_best + memo = [] + best_bs = nil + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + bs = similarity.best_score + best_bs ||= bs + if bs >= best_bs + memo << similarity.record2.original + else + break + end + end + end + return memo + end + + if is_find_all + memo = [] + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + memo << similarity.record2.original + end + end + return memo + end + + best_similarity = similarities.first + winner = nil + + if best_similarity and best_similarity.satisfy?(needle, threshold) + winner = best_similarity.record2.original + if gather_last_result + last_result.winner = winner + last_result.score = best_similarity.best_score.dices_coefficient_similar + last_result.timeline << <<-EOS +A winner was determined because the Dice's Coefficient similarity (#{'%0.5f' % best_similarity.best_score.dices_coefficient_similar}) is greater than zero or because it shared a word with the needle. +EOS + end + if is_find_with_score + bs = best_similarity.best_score + return [winner, bs.dices_coefficient_similar, bs.levenshtein_similar] + else + return winner + end + elsif gather_last_result + best_similarity_record = if best_similarity and best_similarity.record2 + best_similarity.record2.original + end + last_result.timeline << <<-EOS +No winner assigned because the score of the best similarity (#{best_similarity_record.inspect}) was zero and it didn't match any words with the needle (#{needle.inspect}). +EOS + end + + nil # ugly + end + + # Explain is like mysql's EXPLAIN command. You give it a needle and it tells you about how it was located (successfully or not) in the haystack. + # + # d = FuzzyMatch.new ['737', '747', '757' ] + # d.explain 'boeing 737-100' + def explain(needle, options = {}) + find needle, options.merge(:gather_last_result => true) + last_result.explain + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb new file mode 100644 index 0000000..e9d46bc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb @@ -0,0 +1,79 @@ +require 'active_record_inline_schema' + +class FuzzyMatch + class CachedResult < ::ActiveRecord::Base + if ::ActiveRecord::VERSION::STRING >= '3.2' + self.table_name = :fuzzy_match_cached_results + else + set_table_name :fuzzy_match_cached_results + end + + class << self + def setup(from_scratch = false) + if from_scratch + connection.drop_table :fuzzy_match_cached_results rescue nil + end + auto_upgrade! + end + end + + col :a_class + col :a + col :b_class + col :b + add_index [:a_class, :b_class, :a], :name => 'aba' + add_index [:a_class, :b_class, :b], :name => 'abb' + add_index [:a_class, :b_class, :a, :b], :name => 'abab' + + module ActiveRecordBaseExtension + # required options: + # :primary_key - what to call on this class + # :foreign_key - what to call on the other class + def cache_fuzzy_match_with(other_active_record_class, options) + other = other_active_record_class.to_s.singularize.camelcase + me = name + if me < other + a = me + b = other + primary_key = :a + foreign_key = :b + else + a = other + b = me + primary_key = :b + foreign_key = :a + end + + # def aircraft + define_method other.underscore.pluralize do + other.constantize.where options[:foreign_key] => send("#{other.underscore.pluralize}_foreign_keys") + end + + # def flight_segments_foreign_keys + define_method "#{other.underscore.pluralize}_foreign_keys" do + fz = ::FuzzyMatch::CachedResult.arel_table + sql = fz.project(fz[foreign_key]).where(fz["#{primary_key}_class".to_sym].eq(self.class.name).and(fz["#{foreign_key}_class".to_sym].eq(other)).and(fz[primary_key].eq(send(options[:primary_key])))).to_sql + connection.select_values sql + end + + # def cache_aircraft! + define_method "cache_#{other.underscore.pluralize}!" do + other_class = other.constantize + primary_key_value = send options[:primary_key] + other_class.fuzzy_match.find_all(primary_key_value).each do |other_instance| + attrs = {} + attrs[primary_key] = primary_key_value + attrs["#{primary_key}_class"] = self.class.name + attrs[foreign_key] = other_instance.send options[:foreign_key] + attrs["#{foreign_key}_class"] = other + unless ::FuzzyMatch::CachedResult.exists? attrs + ::FuzzyMatch::CachedResult.create! attrs + end + end + end + end + end + end +end + +::ActiveRecord::Base.extend ::FuzzyMatch::CachedResult::ActiveRecordBaseExtension diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb new file mode 100644 index 0000000..db13b12 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb @@ -0,0 +1,58 @@ +class FuzzyMatch + # Records are the tokens that are passed around when doing scoring and optimizing. + class Record #:nodoc: all + # "Foo's" is one word + # "North-west" is just one word + # "Bolivia," is just Bolivia + WORD_BOUNDARY = %r{\W*(?:\s+|$)} + EMPTY = [].freeze + BLANK = ''.freeze + + attr_reader :original + attr_reader :read + attr_reader :stop_words + + def initialize(original, options = {}) + @original = original + @read = options[:read] + @stop_words = options.fetch(:stop_words, EMPTY) + end + + def inspect + "w(#{clean.inspect})" + end + + def clean + @clean ||= begin + memo = whole.dup + stop_words.each do |stop_word| + memo.gsub! stop_word, BLANK + end + memo.strip.freeze + end + end + + def words + @words ||= clean.downcase.split(WORD_BOUNDARY).freeze + end + + def similarity(other) + Similarity.new self, other + end + + def whole + @whole ||= case read + when ::NilClass + original + when ::Numeric, ::String + original[read] + when ::Proc + read.call original + when ::Symbol + original.respond_to?(read) ? original.send(read) : original[read] + else + raise "Expected nil, a proc, or a symbol, got #{read.inspect}" + end.to_s.strip.freeze + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb new file mode 100644 index 0000000..bfaded9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb @@ -0,0 +1,45 @@ +# encoding: utf-8 +require 'erb' +require 'pp' + +class FuzzyMatch + class Result #:nodoc: all + EXPLANATION = <<-ERB +##################################################### +# SUMMARY +##################################################### + +Needle: <%= needle.inspect %> +Match: <%= winner.inspect %> + +##################################################### +# OPTIONS +##################################################### + +<%= PP.pp(options, '') %> + +<% timeline.each_with_index do |event, index| %> +(<%= index+1 %>) <%= event %> +<% end %> +ERB + + attr_accessor :needle + attr_accessor :read + attr_accessor :haystack + attr_accessor :options + attr_accessor :groupings + attr_accessor :identities + attr_accessor :stop_words + attr_accessor :winner + attr_accessor :score + attr_reader :timeline + + def initialize + @timeline = [] + end + + def explain + $stdout.puts ::ERB.new(EXPLANATION, 0, '%<').result(binding) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb new file mode 100644 index 0000000..400c35c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb @@ -0,0 +1,17 @@ +class FuzzyMatch + # A rule characterized by a regexp. Abstract. + class Rule + attr_reader :regexp + + def initialize(regexp) + unless regexp.is_a?(::Regexp) + raise ArgumentError, "[FuzzyMatch] Rules must be set with Regexp objects, but got #{regexp.inspect} (#{regexp.class.name})" + end + @regexp = regexp + end + + def ==(other) + other.class == self.class and regexp == other.regexp + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb new file mode 100644 index 0000000..2bcc4aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb @@ -0,0 +1,90 @@ +# require 'pry' +class FuzzyMatch + class Rule + # "Record linkage typically involves two main steps: grouping and scoring..." + # http://en.wikipedia.org/wiki/Record_linkage + # + # Groupings effectively divide up the haystack into groups that match a pattern + # + # A grouping (formerly known as a blocking) comes into effect when a str matches. + # Then the needle must also match the grouping's regexp. + class Grouping < Rule + class << self + def make(regexps) + case regexps + when ::Regexp + new regexps + when ::Array + chain = regexps.flatten.map { |regexp| new regexp } + if chain.length == 1 + chain[0] # not really a chain after all + else + chain.each { |grouping| grouping.chain = chain } + chain + end + else + raise ArgumentError, "[fuzzy_match] Groupings should be specified as single regexps or an array of regexps (got #{regexps.inspect})" + end + end + end + + attr_accessor :chain + + def inspect + memo = [] + memo << "#{regexp.inspect}" + if chain + memo << "(#{chain.find_index(self)} of #{chain.length})" + end + memo.join ' ' + end + + def xmatch?(record) + if primary? + match?(record) and subs.none? { |sub| sub.match?(record) } + else + match?(record) and primary.match?(record) + end + end + + def xjoin?(needle, straw) + if primary? + join?(needle, straw) and subs.none? { |sub| sub.match?(straw) } # maybe xmatch here? + else + join?(needle, straw) and primary.match?(straw) + end + end + + protected + + def primary? + chain ? (primary == self) : true + # not chain or primary == self + end + + def primary + chain ? chain[0] : self + end + + def subs + chain ? chain[1..-1] : [] + end + + def match?(record) + !!(regexp.match(record.whole)) + end + + def join?(needle, straw) + if straw_match_data = regexp.match(straw.whole) + if needle_match_data = regexp.match(needle.whole) + straw_match_data.captures.join.downcase == needle_match_data.captures.join.downcase + else + false + end + else + nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb new file mode 100644 index 0000000..cf08e61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb @@ -0,0 +1,40 @@ +class FuzzyMatch + class Rule + # Identities take effect when needle and haystack both match a regexp + # Then the captured part of the regexp has to match exactly + class Identity < Rule + attr_reader :proc + + def initialize(regexp_or_proc) + case regexp_or_proc + when Regexp + @regexp = regexp_or_proc + when Proc + @proc = regexp_or_proc + else + raise ArgumentError, "[FuzzyMatch] Identity must be set with either Regexp objects or Procs, but got #{regexp_or_proc.inspect} (#{regexp_or_proc.class.name})" + end + end + + def ==(other) + other.class == self.class and (regexp ? regexp == other.regexp : proc == other.proc) + end + + # Two strings are "identical" if they both match this identity and the captures are equal. + # + # Only returns true/false if both strings match the regexp. + # Otherwise returns nil. + def identical?(record1, record2) + if regexp + if (str1_match_data = regexp.match(record1.whole)) and (str2_match_data = regexp.match(record2.whole)) + str1_match_data.captures.join.downcase == str2_match_data.captures.join.downcase + else + nil + end + else + proc.call record1.original, record2.original + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb new file mode 100644 index 0000000..e982464 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb @@ -0,0 +1,30 @@ +require 'fuzzy_match/score/pure_ruby' +require 'fuzzy_match/score/amatch' + +class FuzzyMatch + class Score + include Comparable + + attr_reader :str1 + attr_reader :str2 + + def initialize(str1, str2) + @str1 = str1.downcase + @str2 = str2.downcase + end + + def inspect + %{(dice=#{"%0.5f" % dices_coefficient_similar},lev=#{"%0.5f" % levenshtein_similar})} + end + + def <=>(other) + a = dices_coefficient_similar + b = other.dices_coefficient_similar + if a.nan? or b.nan? or (by_dices_coefficient = (a <=> b)) == 0 + levenshtein_similar <=> other.levenshtein_similar + else + by_dices_coefficient + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb new file mode 100644 index 0000000..25180b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb @@ -0,0 +1,21 @@ +class FuzzyMatch + class Score + # be sure to `require 'amatch'` before you use this class + class Amatch < Score + + def dices_coefficient_similar + @dices_coefficient_similar ||= if str1 == str2 + 1.0 + elsif str1.length == 1 and str2.length == 1 + 0.0 + else + str1.pair_distance_similar str2 + end + end + + def levenshtein_similar + @levenshtein_similar ||= str1.levenshtein_similar str2 + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb new file mode 100644 index 0000000..55963da --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb @@ -0,0 +1,94 @@ +class FuzzyMatch + class Score + class PureRuby < Score + + SPACE = ' ' + + # http://stackoverflow.com/questions/653157/a-better-similarity-ranking-algorithm-for-variable-length-strings + def dices_coefficient_similar + @dices_coefficient_similar ||= begin + if str1 == str2 + 1.0 + elsif str1.length == 1 and str2.length == 1 + 0.0 + else + pairs1 = (0..str1.length-2).map do |i| + str1[i,2] + end.reject do |pair| + pair.include? SPACE + end + pairs2 = (0..str2.length-2).map do |i| + str2[i,2] + end.reject do |pair| + pair.include? SPACE + end + union = pairs1.size + pairs2.size + intersection = 0 + pairs1.each do |p1| + 0.upto(pairs2.size-1) do |i| + if p1 == pairs2[i] + intersection += 1 + pairs2.slice!(i) + break + end + end + end + (2.0 * intersection) / union + end + end + end + + # extracted/adapted from the text gem version 1.0.2 + # normalization added for utf-8 strings + # lib/text/levenshtein.rb + def levenshtein_similar + @levenshtein_similar ||= begin + if utf8? + unpack_rule = 'U*' + else + unpack_rule = 'C*' + end + s = str1.unpack(unpack_rule) + t = str2.unpack(unpack_rule) + n = s.length + m = t.length + + if n == 0 or m == 0 + 0.0 + else + d = (0..m).to_a + x = nil + (0...n).each do |i| + e = i+1 + (0...m).each do |j| + cost = (s[i] == t[j]) ? 0 : 1 + x = [ + d[j+1] + 1, # insertion + e + 1, # deletion + d[j] + cost # substitution + ].min + d[j] = e + e = x + end + d[m] = x + end + # normalization logic from https://github.com/flori/amatch/blob/master/ext/amatch_ext.c#L301 + # if (b_len > a_len) { + # result = rb_float_new(1.0 - ((double) v[p][b_len]) / b_len); + # } else { + # result = rb_float_new(1.0 - ((double) v[p][b_len]) / a_len); + # } + 1.0 - x.to_f / [n, m].max + end + end + end + + private + + def utf8? + return @utf8_query if defined?(@utf8_query) + @utf8_query = (defined?(::Encoding) ? str1.encoding.to_s : $KCODE).downcase.start_with?('u') + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb new file mode 100644 index 0000000..1692ad6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb @@ -0,0 +1,39 @@ +class FuzzyMatch + class Similarity + attr_reader :record1 + attr_reader :record2 + + def initialize(record1, record2) + @record1 = record1 + @record2 = record2 + end + + def <=>(other) + by_score = best_score <=> other.best_score + if by_score == 0 + original_weight <=> other.original_weight + else + by_score + end + end + + def best_score + @best_score ||= FuzzyMatch.score_class.new(record1.clean, record2.clean) + end + + def satisfy?(needle, threshold) + best_score.dices_coefficient_similar > threshold or + ((record2.clean.length < 3 or needle.clean.length < 3) and best_score.levenshtein_similar > 0) or + (needle.words & record2.words).any? + end + + def inspect + %{#{record2.clean.inspect} ~ #{record1.clean.inspect} => #{best_score.inspect}} + end + + # Weight things towards short original strings + def original_weight + @original_weight ||= (1.0 / (record1.clean.length * record2.clean.length)) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb new file mode 100644 index 0000000..3882a3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb @@ -0,0 +1,3 @@ +class FuzzyMatch + VERSION = '2.0.4' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb new file mode 100644 index 0000000..19cd465 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb @@ -0,0 +1,17 @@ +unless RUBY_PLATFORM == 'java' + require 'spec_helper' + require 'amatch' + + describe FuzzyMatch do + describe %{when using the :amatch string similarity engine} do + before do + $testing_amatch = true + FuzzyMatch.engine = :amatch + end + after do + $testing_amatch = false + FuzzyMatch.engine = nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb new file mode 100644 index 0000000..dfa4c25 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb @@ -0,0 +1,132 @@ +require 'spec_helper' + +require 'active_record' +require 'cohort_analysis' +require 'weighted_average' + +ActiveRecord::Base.establish_connection( + 'adapter' => 'mysql2', + 'database' => 'fuzzy_match_test', + 'username' => 'root', + 'password' => 'password' +) + +# require 'logger' +# ActiveRecord::Base.logger = Logger.new $stderr +# ActiveRecord::Base.logger.level = Logger::DEBUG + +ActiveSupport::Inflector.inflections do |inflect| + inflect.uncountable 'aircraft' +end + +require 'fuzzy_match/cached_result' + +::FuzzyMatch::CachedResult.setup(true) + +class Aircraft < ActiveRecord::Base + MUTEX = ::Mutex.new + self.primary_key = 'icao_code' + + cache_fuzzy_match_with :flight_segments, :primary_key => :aircraft_description, :foreign_key => :aircraft_description + + def aircraft_description + [manufacturer_name, model_name].compact.join(' ') + end + + def self.fuzzy_match + @fuzzy_match || MUTEX.synchronize do + @fuzzy_match||= FuzzyMatch.new(all, :read => ::Proc.new { |straw| straw.aircraft_description }) + end + end + + def self.create_table + connection.drop_table(:aircraft) rescue nil + connection.execute %{ +CREATE TABLE `aircraft` ( + `icao_code` varchar(255) DEFAULT NULL, + `manufacturer_name` varchar(255) DEFAULT NULL, + `model_name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`icao_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + } + reset_column_information + end +end + +class FlightSegment < ActiveRecord::Base + self.primary_key = 'row_hash' + + cache_fuzzy_match_with :aircraft, :primary_key => :aircraft_description, :foreign_key => :aircraft_description + + def self.create_table + connection.drop_table(:flight_segments) rescue nil + connection.execute %{ +CREATE TABLE `flight_segments` ( + `row_hash` varchar(255) NOT NULL DEFAULT '', + `aircraft_description` varchar(255) DEFAULT NULL, + `passengers` int(11) DEFAULT NULL, + `seats` int(11) DEFAULT NULL, + PRIMARY KEY (`row_hash`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + } + end +end + +FlightSegment.create_table +Aircraft.create_table + +a = Aircraft.new +a.icao_code = 'B742' +a.manufacturer_name = 'Boeing' +a.model_name = '747-200' +a.save! + +fs = FlightSegment.new +fs.row_hash = 'madison to chicago' +fs.aircraft_description = 'BORING 747200' +fs.passengers = 10 +fs.seats = 10 +fs.save! + +fs = FlightSegment.new +fs.row_hash = 'madison to minneapolis' +fs.aircraft_description = 'bing 747' +fs.passengers = 100 +fs.seats = 5 +fs.save! + +FlightSegment.all.each do |fs| + fs.cache_aircraft! +end + +describe FuzzyMatch::CachedResult do + it %{joins aircraft to flight segments} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.count.should == 2 + end + + it %{allow simple SQL operations} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.sum(:passengers).should == 110 + end + + it %{works with weighted_average} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.weighted_average(:seats, :weighted_by => :passengers).should == 5.45455 + end + + it %{works with cohort_scope (albeit rather clumsily)} do + aircraft = Aircraft.find('B742') + cohort = FlightSegment.cohort({:aircraft_description => aircraft.flight_segments_foreign_keys}, :minimum_size => 2) + FlightSegment.connection.select_value(cohort.project('COUNT(*)').to_sql).should == 2 + # FlightSegment.cohort(:aircraft_description => aircraft.flight_segments_foreign_keys).should == [] + end + + # def test_006_you_can_get_aircraft_from_flight_segments + # fs = FlightSegment.first + # # you need to add an aircraft_description column + # lambda do + # fs.aircraft.count.should == 2 + # end.must_raise ActiveRecord::StatementInvalid + # end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/foo.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/foo.rb new file mode 100644 index 0000000..b948eda --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/foo.rb @@ -0,0 +1,9 @@ +require 'fileutils' +Dir['test*.rb'].each do |f| + n = File.basename(f, '.rb') + n.sub! 'test_', '' + n += '_spec.rb' + puts f + puts n + FileUtils.cp f, n +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb new file mode 100644 index 0000000..9339c87 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb @@ -0,0 +1,367 @@ +# -*- encoding: utf-8 -*- +require 'spec_helper' + +describe FuzzyMatch do + describe '#find' do + it %{finds the best match using string similarity} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.find('RITZ').should == 'RATZ' + end + + it %{doesn't mind crazy characters} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.find('RíTZ').should == 'RATZ' + end + + it %{not return any result if the maximum score is zero} do + FuzzyMatch.new(['a']).find('b').should be_nil + end + + it %{finds exact matches} do + d = FuzzyMatch.new [ 'X' ] + d.find('X').should == 'X' + end + end + + describe '#find_all' do + it %{return all records in sorted order} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_all('X').should == ['X', 'X22' ] + d.find_all('A').should == [] + end + end + + describe '#find_best' do + it %{returns one or more records with the best score} do + d = FuzzyMatch.new [ 'X', 'X', 'X22', 'Y', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_best('X').should == ['X', 'X' ] + d.find_best('A').should == [] + end + end + + describe '#find_all_with_score' do + it %{return records with 2 scores} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_all_with_score('X').should == [ ['X', 1, 1], ['X22', 0, 0.33333333333333337] ] + d.find_all_with_score('A').should == [] + end + end + + describe '#find_with_score' do + it %{return record with dice's and lev's scores} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_with_score('X').should == ['X', 1, 1] + d.find_with_score('A').should be_nil + end + end + + describe '#explain' do + before do + require 'stringio' + @capture = StringIO.new + @old_stdout = $stdout + $stdout = @capture + end + after do + $stdout = @old_stdout + end + + it %{print a basic explanation to stdout} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.explain('RITZ') + @capture.rewind + @capture.read.should include('CATZ') + end + + it %{explains match failures} do + FuzzyMatch.new(['aaa']).explain('bbb') + @capture.rewind + @capture.read.should =~ %r{No winner assigned.*aaa.*bbb} + end + end + + describe "groupings replacings normalizers" do + it %{sometimes gets false results without them} do + d = FuzzyMatch.new ['BOEING 737-100/200', 'BOEING 737-900'] + d.find('BOEING 737100 number 900').should == 'BOEING 737-900' + end + + it %{can be used to improve results} do + d = FuzzyMatch.new ['BOEING 737-100/200', 'BOEING 737-900'], :groupings => [ [/boeing/i, /7(\d\d)-?(\d\d\d)?/]] + d.find('BOEING 737100 number 900').should == 'BOEING 737-100/200' + end + end + + describe "identities" do + it %{sometimes gets false results without them} do + # false positive without identity + d = FuzzyMatch.new %w{ foo bar } + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should == 'bar' + end + + it %{can be used to improve results} do + d = FuzzyMatch.new %w{ foo bar }, :identities => [ /ba(.)/ ] + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should be_nil + d.find('baze').should be_nil + end + + it %{is sort of like backreferences} do + one = '1 sauk ONEONEONEONEONE' + two = '2 sauk TWOTWOTWOTWO' + d = FuzzyMatch.new([one, two]) + d.find('1 sauk TWOTWOTWOTWO').should == two # wrong + d = FuzzyMatch.new([one, two], :identities => [/\A(\d+)\s+(\w+)/]) + d.find('1 sauk TWOTWOTWOTWO').should == one # correct + end + + it %{has a proc form} do + d = FuzzyMatch.new %w{ foo bar }, :identities => [ lambda { |a, b| (a.start_with?('ba') and b.start_with?('ba') ? a[2] == b[2] : nil) } ] + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should be_nil + d.find('baze').should be_nil + end + end + + describe 'groupings' do + it %{sometimes gets false results without them} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ] + d.find('Barack Bush').should == 'Barack Obama' # luke i am your father + d.find('George Obama').should == 'George Bush' # nooooooooooooooooooo + end + + it %{can be used to improve results} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ] + d.find('Barack Bush').should == 'George Bush' + d.find('George Obama').should == 'Barack Obama' + end + + it %{stays within the group} do + d = FuzzyMatch.new [ 'AB', 'CD' ] + d.find('ABCDCD').should == 'CD' + d = FuzzyMatch.new [ 'AB', 'CD' ], :groupings => [/A/] + d.find('ABCDCD').should == 'AB' + end + + describe 'with chains' do + describe 'hotel example' do + before do + @grandh = 'Grand Hyatt' + @h = 'Hyatt' + @hgarden = 'Hyatt Garden' + @grandhotel = 'Grand Hotel' + @fz = FuzzyMatch.new([@grandh, @h, @hgarden, @grandhotel], :groupings => [ [ /hyatt/i, /garden/i, /grand/i ] ], :stop_words => [ /hotel/i ]) + end + + it %{works as expected} do + @fz.find('Grand Hyatt').should == @grandh + @fz.find('Grand Hyatt Foobar').should == @grandh + @fz.find('Hyatt Garden').should == @hgarden + @fz.find('Hyatt Garden Foobar').should == @hgarden + end + + it %{enforces some stuff} do + # nope + @fz.find('Grund Hyatt').should == @h + @fz.find('Grund Hyatt Foobar').should == @h + @fz.find('Hyatt Gurden').should == @h + @fz.find('Hyatt Gurden Foobar').should == @h + # hmm - hyatt misspelled, so totally prevented from matching hyatt + @fz.find('Grund Hyutt').should == @grandhotel + @fz.find('Grund Hyutt Foobar').should == @grandhotel + # precedence + @fz.find('Grand Hyatt Garden').should == @hgarden + @fz.find('Grand Hyatt Garden Foobar').should == @hgarden + # sanity + @fz.find('Grund Hyatt Garden').should == @hgarden + @fz.find('Grund Hyatt Garden Foobar').should == @hgarden + @fz.find('Grand Hyatt Gurden').should == @grandh + @fz.find('Grand Hyatt Gurden Foobar').should == @grandh + end + + it %{is sticky} do + @fz.find('Grand Hotel').should == @grandhotel + @fz.find('Hotel Garden').should be_nil + @fz.find('Grand Hotel Garden').should == @grandhotel + end + end + + it "helps with subgroups" do + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7\d{2})/] ] + d.find_all('Boeing 747').should == [ 'Boeing 747', 'Boeing 747SR' ] + + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7\d{2})/] ] + d.find_all('Boeing ER6').should == ["Boeing ER6"] + + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7|E\d{2})/i] ] + d.find_all('Boeing ER6').should == [ 'Boeing ER6' ] + d.find_all('Boeing 747').should == [ 'Boeing 747', 'Boeing 747SR' ] + end + end + end + + describe "the :must_match_grouping option" do + it %{optionally only attempt matches with records that fit into a grouping} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ], :must_match_grouping => true + d.find('George Clinton').should be_nil + + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ] + d.find('George Clinton', :must_match_grouping => true).should be_nil + end + end + + describe "the :read option" do + it %{interpret a Numeric as an array index} do + ab = ['a', 'b'] + ba = ['b', 'a'] + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => 0 + by_last = FuzzyMatch.new haystack, :read => 1 + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + + it %{interpret a Symbol, etc. as hash key} do + ab = { :one => 'a', :two => 'b' } + ba = { :one => 'b', :two => 'a' } + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => :one + by_last = FuzzyMatch.new haystack, :read => :two + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + + MyStruct = Struct.new(:one, :two) + it %{interpret a Symbol as a method id (if the object responds to it)} do + ab = MyStruct.new('a', 'b') + ba = MyStruct.new('b', 'a') + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => :one + by_last = FuzzyMatch.new haystack, :read => :two + by_first.read.should == :one + by_last.read.should == :two + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + end + + describe 'the :must_match_at_least_one_word option' do + it %{optionally require that the matching record share at least one word with the needle} do + d = FuzzyMatch.new %w{ RATZ CATZ }, :must_match_at_least_one_word => true + d.find('RITZ').should be_nil + + d = FuzzyMatch.new ["Foo's Bar"], :must_match_at_least_one_word => true + d.find("Foo's").should == "Foo's Bar" + d.find("'s").should be_nil + d.find("Foo").should be_nil + + d = FuzzyMatch.new ["Bolivia, Plurinational State of"], :must_match_at_least_one_word => true + d.find("Bolivia").should == "Bolivia, Plurinational State of" + end + + it %{use STOP WORDS} do + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ] + d.find('A HTL', :must_match_at_least_one_word => true).should == 'B HTL' + + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ], :must_match_at_least_one_word => true + d.find('A HTL').should == 'B HTL' + + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ], :must_match_at_least_one_word => true, :stop_words => [ %r{HO?TE?L} ] + d.find('A HTL').should == 'A HOTEL' + end + + it %{not be fooled by substrings (but rather compare whole words to whole words)} do + d = FuzzyMatch.new [ 'PENINSULA HOTELS' ], :must_match_at_least_one_word => true + d.find('DOLCE LA HULPE BXL FI').should be_nil + end + + it %{not be case-sensitive when checking for sharing of words} do + d = FuzzyMatch.new [ 'A', 'B' ] + d.find('a', :must_match_at_least_one_word => true).should == 'A' + end + end + + describe "the :gather_last_result option" do + it %{not gather metadata about the last result by default} do + d = FuzzyMatch.new %w{ NISSAN HONDA } + d.find('MISSAM') + lambda do + d.last_result + end.should raise_error(::RuntimeError, /gather_last_result/) + end + + it %{optionally gather metadata about the last result} do + d = FuzzyMatch.new %w{ NISSAN HONDA } + d.find 'MISSAM', :gather_last_result => true + d.last_result.score.should == 0.6 + d.last_result.winner.should == 'NISSAN' + end + end + + describe 'quirks' do + it %{should not return false negatives because of one-letter similarities} do + # dices coefficient doesn't think these two are similar at all because it looks at pairs + FuzzyMatch.score_class.new('X foo', 'X bar').dices_coefficient_similar.should == 0 + # so we must compensate for that somewhere + d = FuzzyMatch.new ['X foo', 'randomness'] + d.find('X bar').should == 'X foo' + # without making false positives + d.find('Y bar').should be_nil + end + + it %{finds possible matches even when pair distance fails} do + d = FuzzyMatch.new ['XX', '2 A'] + d.find('2A').should == '2 A' + d = FuzzyMatch.new ['XX', '2A'] + d.find('2 A').should == '2A' + end + + it %{weird blow ups} do + d = FuzzyMatch.new ['XX', '2 A'] + d.find('A').should == '2 A' + d = FuzzyMatch.new ['XX', 'A'] + d.find('2 A').should == 'A' + end + + it %{from the wild 1} do + d = FuzzyMatch.new ["Doyle Collection", "Trump Collection", "Luxury Collection", "Autograph Collection"] + d.find("Algonquin Autograph Collection").should == "Autograph Collection" + end + + end + + describe 'deprecations' do + it %{takes :must_match_blocking as :must_match_grouping} do + d = FuzzyMatch.new [], :must_match_blocking => :a + d.default_options[:must_match_grouping].should == :a + end + + it %{takes :haystack_reader as :read} do + d = FuzzyMatch.new [], :haystack_reader => :c + d.read.should == :c + end + + it %{takes :blockings as :groupings} do + d = FuzzyMatch.new [], :blockings => [ /X/, /Y/ ] + d.groupings.should == [ FuzzyMatch::Rule::Grouping.new(/X/), FuzzyMatch::Rule::Grouping.new(/Y/) ] + end + end + + it %{defaults to a pure-ruby engine, but also has amatch} do + if defined?($testing_amatch) and $testing_amatch + FuzzyMatch.engine.should == :amatch + else + FuzzyMatch.engine.should == :pure_ruby + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb new file mode 100644 index 0000000..76b4165 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb @@ -0,0 +1,60 @@ +require 'spec_helper' + +describe FuzzyMatch::Rule::Grouping do + it %{matches a single string argument} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xmatch?(r('2 apples')).should == true + end + + it %{embraces case insensitivity} do + b = FuzzyMatch::Rule::Grouping.new %r{apple}i + b.xmatch?(r('2 Apples')).should == true + end + + it %{xjoins two string arguments} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('apple'), r('2 apples')).should == true + end + + it %{fails to xjoin two string arguments} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('orange'), r('2 apples')).should == false + end + + it %{returns nil instead of false when it has no information} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('orange'), r('orange')).should be_nil + end + + it %{has chains} do + h, gr, ga = FuzzyMatch::Rule::Grouping.make([/hyatt/, /grand/, /garden/]) + h.xjoin?(r('hyatt'), r('hyatt')).should == true + + h.xjoin?(r('grund hyatt'), r('grand hyatt')).should == true + gr.xjoin?(r('grund hyatt'), r('grand hyatt')).should == false + ga.xjoin?(r('grund hyatt'), r('grand hyatt')).should be_nil + + h.xjoin?(r('hyatt gurden'), r('hyatt garden')).should == true + gr.xjoin?(r('hyatt gurden'), r('hyatt garden')).should be_nil + ga.xjoin?(r('hyatt gurden'), r('hyatt garden')).should == false + + h.xjoin?(r('grand hyatt'), r('grand hyatt')).should == false # sacrificing itself + gr.xjoin?(r('grand hyatt'), r('grand hyatt')).should == true + ga.xjoin?(r('grand hyatt'), r('grand hyatt')).should be_nil + + h.xjoin?(r('hyatt garden'), r('hyatt garden')).should == false # sacrificing itself + gr.xjoin?(r('hyatt garden'), r('hyatt garden')).should be_nil + ga.xjoin?(r('hyatt garden'), r('hyatt garden')).should == true + + h.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == false # sacrificing itself + gr.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == true + ga.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == true # NOT sacrificing itself? + end + + private + + def r(str) + FuzzyMatch::Record.new str + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb new file mode 100644 index 0000000..d1e5f2f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb @@ -0,0 +1,29 @@ +require 'spec_helper' + +describe FuzzyMatch::Rule::Identity do + it %{determines whether two records COULD be identical} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('A1'), r('A 1foobar')).should == true + end + + it %{determines that two records MUST NOT be identical} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('A1'), r('A 2foobar')).should == false + end + + it %{returns nil indicating no information} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('B1'), r('A 2foobar')).should == nil + end + + it %{embraces case insensitivity} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)}i + i.identical?(r('A1'), r('a 1foobar')).should == true + end + + private + + def r(str) + FuzzyMatch::Record.new str + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb new file mode 100644 index 0000000..0958ea3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb @@ -0,0 +1,25 @@ +require 'spec_helper' + +describe FuzzyMatch::Record do + it %{does not treat "'s" as a word} do + assert_split ["foo's", "bar"], "Foo's Bar" + end + + it %{treats "bolivia," as just "bolivia"} do + assert_split ["bolivia", "plurinational", "state"], "Bolivia, Plurinational State" + end + + it %{does not split up hyphenated words} do + assert_split ['north-west'], "north-west" + end + + it %{splits up words as expected} do + assert_split ['the', 'quick', "fox's", 'mouth', 'is', 'always', 'full'], "the quick fox's mouth -- is always full." + end + + private + + def assert_split(ary, str) + FuzzyMatch::Record.new(str).words.should == ary + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb new file mode 100644 index 0000000..847a213 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb @@ -0,0 +1,21 @@ +# This file was generated by the `rspec --init` command. Conventionally, all +# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. +# Require this file using `require "spec_helper"` to ensure that it is only +# loaded once. +# +# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration +RSpec.configure do |config| + config.treat_symbols_as_metadata_keys_with_true_values = true + config.run_all_when_everything_filtered = true + config.filter_run :focus + + # Run specs in random order to surface order dependencies. If you find an + # order dependency and want to debug it, you can fix the order by providing + # the seed, which is printed after each run. + # --seed 1234 + config.order = 'random' +end + +require 'pry' + +require 'fuzzy_match' diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/README.md b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/README.md new file mode 100644 index 0000000..0296264 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/README.md @@ -0,0 +1,85 @@ +httpclient - HTTP accessing library. [![Gem Version](https://badge.fury.io/rb/httpclient.svg)](http://badge.fury.io/rb/httpclient) + +Copyright (C) 2000-2015 NAKAMURA, Hiroshi . + +'httpclient' gives something like the functionality of libwww-perl (LWP) in +Ruby. 'httpclient' formerly known as 'http-access2'. + +See [HTTPClient](http://www.rubydoc.info/gems/httpclient/frames) for documentation. + + +## Features + +* methods like GET/HEAD/POST/* via HTTP/1.1. +* HTTPS(SSL), Cookies, proxy, authentication(Digest, NTLM, Basic), etc. +* asynchronous HTTP request, streaming HTTP request. +* debug mode CLI. +* by contrast with net/http in standard distribution; + * Cookies support + * MT-safe + * streaming POST (POST with File/IO) + * Digest auth + * Negotiate/NTLM auth for WWW-Authenticate (requires net/ntlm module; rubyntlm gem) + * NTLM auth for Proxy-Authenticate (requires 'win32/sspi' module; rubysspi gem) + * extensible with filter interface + * you don't have to care HTTP/1.1 persistent connection + (httpclient cares instead of you) +* Not supported now + * Cache + * Rather advanced HTTP/1.1 usage such as Range, deflate, etc. + (of course you can set it in header by yourself) + +## httpclient command + +Usage: 1) `httpclient get https://www.google.co.jp/?q=ruby` +Usage: 2) `httpclient` + +For 1) it issues a GET request to the given URI and shows the wiredump and +the parsed result. For 2) it invokes irb shell with the binding that has a +HTTPClient as 'self'. You can call HTTPClient instance methods like; + +```ruby +get "https://www.google.co.jp/", :q => :ruby +``` + +## Author + + * Name:: Hiroshi Nakamura + * E-mail:: nahi@ruby-lang.org + * Project web site:: http://github.com/nahi/httpclient + + +## License + +This program is copyrighted free software by NAKAMURA, Hiroshi. You can +redistribute it and/or modify it under the same terms of Ruby's license; +either the dual license version in 2003, or any later version. + +httpclient/session.rb is based on http-access.rb in http-access/0.0.4. Some +part of it is copyrighted by Maebashi-san who made and published +http-access/0.0.4. http-access/0.0.4 did not include license notice but when +I asked Maebashi-san he agreed that I can redistribute it under the same terms +of Ruby. Many thanks to Maebashi-san. + + +## Install + +You can install httpclient via rubygems: `gem install httpclient` + + +## Usage + +See [HTTPClient](http://www.rubydoc.info/gems/httpclient/frames) for documentation. +You can also check sample/howto.rb how to use APIs. + +## Bug report or Feature request + +Please file a ticket at the project web site. + +1. find a similar ticket from https://github.com/nahi/httpclient/issues +2. create a new ticket by clicking 'Create Issue' button. +3. you can use github features such as pull-request if you like. + +## Changes + +See [ChangeLog](https://github.com/nahi/httpclient/blob/master/CHANGELOG.md) diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/httpclient b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/httpclient new file mode 100755 index 0000000..57d3f3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/httpclient @@ -0,0 +1,77 @@ +#!/usr/bin/env ruby + +# httpclient shell command. +# +# Usage: 1) % httpclient get https://www.google.co.jp/ q=ruby +# Usage: 2) % httpclient +# +# For 1) it issues a GET request to the given URI and shows the wiredump and +# the parsed result. For 2) it invokes irb shell with the binding that has a +# HTTPClient as 'self'. You can call HTTPClient instance methods like; +# > get "https://www.google.co.jp/", :q => :ruby +require 'httpclient' + +method = ARGV.shift +if method == 'version' + puts HTTPClient::VERSION + exit +end + +url = ARGV.shift +if method && url + client = HTTPClient.new + client.strict_response_size_check = true + if method == 'download' + print client.get_content(url) + else + client.debug_dev = STDERR + $DEBUG = true + require 'pp' + pp client.send(method, url, *ARGV) + end + exit +end + +require 'irb' +require 'irb/completion' + +class Runner + def initialize + @httpclient = HTTPClient.new + @httpclient.strict_response_size_check = true + end + + def method_missing(msg, *a, &b) + debug, $DEBUG = $DEBUG, true + begin + @httpclient.send(msg, *a, &b) + ensure + $DEBUG = debug + end + end + + def run + IRB.setup(nil) + ws = IRB::WorkSpace.new(binding) + irb = IRB::Irb.new(ws) + IRB.conf[:MAIN_CONTEXT] = irb.context + + trap("SIGINT") do + irb.signal_handle + end + + begin + catch(:IRB_EXIT) do + irb.eval_input + end + ensure + IRB.irb_at_exit + end + end + + def to_s + 'HTTPClient' + end +end + +Runner.new.run diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/jsonclient b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/jsonclient new file mode 100755 index 0000000..8f9af3a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/bin/jsonclient @@ -0,0 +1,85 @@ +#!/usr/bin/env ruby + +# jsonclient shell command. +# +# Usage: 1) % jsonclient post https://www.example.com/ content.json +# Usage: 2) % jsonclient +# +# For 1) it issues a GET request to the given URI and shows the wiredump and +# the parsed result. For 2) it invokes irb shell with the binding that has a +# JSONClient as 'self'. You can call JSONClient instance methods like; +# > post "https://www.example.com/resource", {'hello' => 'world'} +require 'jsonclient' + +method = ARGV.shift +url = ARGV.shift +body = [] +if ['post', 'put'].include?(method) + if ARGV.size == 1 && File.exist?(ARGV[0]) + body << File.read(ARGV[0]) + else + body << ARGF.read + end +end +if method && url + require 'pp' + client = JSONClient.new + client.debug_dev = STDERR if $DEBUG + res = client.send(method, url, *body) + STDERR.puts('RESPONSE HEADER: ') + PP.pp(res.headers, STDERR) + if res.ok? + begin + puts JSON.pretty_generate(res.content) + rescue JSON::GeneratorError + puts res.content + end + exit 0 + else + STDERR.puts res.content + exit 1 + end +end + +require 'irb' +require 'irb/completion' + +class Runner + def initialize + @httpclient = JSONClient.new + end + + def method_missing(msg, *a, &b) + debug, $DEBUG = $DEBUG, true + begin + @httpclient.send(msg, *a, &b) + ensure + $DEBUG = debug + end + end + + def run + IRB.setup(nil) + ws = IRB::WorkSpace.new(binding) + irb = IRB::Irb.new(ws) + IRB.conf[:MAIN_CONTEXT] = irb.context + + trap("SIGINT") do + irb.signal_handle + end + + begin + catch(:IRB_EXIT) do + irb.eval_input + end + ensure + IRB.irb_at_exit + end + end + + def to_s + 'JSONClient' + end +end + +Runner.new.run diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/hexdump.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/hexdump.rb new file mode 100644 index 0000000..4cd006f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/hexdump.rb @@ -0,0 +1,50 @@ +# encoding: binary + +# This was written by Arai-san and published at +# http://blade.nagaokaut.ac.jp/cgi-bin/scat.rb/ruby/ruby-list/31987 + + +module HexDump + # str must be in BINARY encoding in 1.9 + def encode(str) + offset = 0 + result = [] + while raw = str.slice(offset, 16) and raw.length > 0 + # data field + data = '' + for v in raw.unpack('N* a*') + if v.kind_of? Integer + data << sprintf("%08x ", v) + else + v.each_byte {|c| data << sprintf("%02x", c) } + end + end + # text field + text = raw.tr("\000-\037\177-\377", ".") + result << sprintf("%08x %-36s %s", offset, data, text) + offset += 16 + # omit duplicate line + if /^(#{regex_quote_n(raw)})+/n =~ str[offset .. -1] + result << sprintf("%08x ...", offset) + offset += $&.length + # should print at the end + if offset == str.length + result << sprintf("%08x %-36s %s", offset-16, data, text) + end + end + end + result + end + module_function :encode + + if RUBY_VERSION >= "1.9" + # raw must be in BINARY encoding in 1.9 + def self.regex_quote_n(raw) + Regexp.quote(raw) + end + else + def self.regex_quote_n(raw) + Regexp.quote(raw, 'n') + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2.rb new file mode 100644 index 0000000..4588011 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2.rb @@ -0,0 +1,55 @@ +# HTTPAccess2 - HTTP accessing library. +# Copyright (C) 2000-2007 NAKAMURA, Hiroshi . + +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + +# http-access2.rb is based on http-access.rb in http-access/0.0.4. Some +# part of it is copyrighted by Maebashi-san who made and published +# http-access/0.0.4. http-access/0.0.4 did not include license notice but +# when I asked Maebashi-san he agreed that I can redistribute it under the +# same terms of Ruby. Many thanks to Maebashi-san. + + +require 'httpclient' + + +module HTTPAccess2 + VERSION = ::HTTPClient::VERSION + RUBY_VERSION_STRING = ::HTTPClient::RUBY_VERSION_STRING + SSLEnabled = ::HTTPClient::SSLEnabled + SSPIEnabled = ::HTTPClient::SSPIEnabled + DEBUG_SSL = true + + Util = ::HTTPClient::Util + + class Client < ::HTTPClient + class RetryableResponse < StandardError + end + end + + SSLConfig = ::HTTPClient::SSLConfig + BasicAuth = ::HTTPClient::BasicAuth + DigestAuth = ::HTTPClient::DigestAuth + NegotiateAuth = ::HTTPClient::NegotiateAuth + AuthFilterBase = ::HTTPClient::AuthFilterBase + WWWAuth = ::HTTPClient::WWWAuth + ProxyAuth = ::HTTPClient::ProxyAuth + Site = ::HTTPClient::Site + Connection = ::HTTPClient::Connection + SessionManager = ::HTTPClient::SessionManager + SSLSocketWrap = ::HTTPClient::SSLSocket + DebugSocket = ::HTTPClient::DebugSocket + + class Session < ::HTTPClient::Session + class Error < StandardError + end + class InvalidState < Error + end + class BadResponse < Error + end + class KeepAliveDisconnected < Error + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb new file mode 100644 index 0000000..56f7884 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb @@ -0,0 +1 @@ +require 'httpclient/cookie' diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/http.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/http.rb new file mode 100644 index 0000000..fc9b23c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/http-access2/http.rb @@ -0,0 +1 @@ +require 'httpclient/http' diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient.rb new file mode 100644 index 0000000..4f4b429 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient.rb @@ -0,0 +1,1327 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'stringio' +require 'digest/sha1' + +# Extra library +require 'httpclient/version' +require 'httpclient/util' +require 'httpclient/ssl_config' +require 'httpclient/connection' +require 'httpclient/session' +require 'httpclient/http' +require 'httpclient/auth' +require 'httpclient/cookie' + +# :main:HTTPClient +# The HTTPClient class provides several methods for accessing Web resources +# via HTTP. +# +# HTTPClient instance is designed to be MT-safe. You can call a HTTPClient +# instance from several threads without synchronization after setting up an +# instance. +# +# clnt = HTTPClient.new +# clnt.set_cookie_store('/home/nahi/cookie.dat') +# urls.each do |url| +# Thread.new(url) do |u| +# p clnt.head(u).status +# end +# end +# +# == How to use +# +# At first, how to create your client. See initialize for more detail. +# +# 1. Create simple client. +# +# clnt = HTTPClient.new +# +# 2. Accessing resources through HTTP proxy. You can use environment +# variable 'http_proxy' or 'HTTP_PROXY' instead. +# +# clnt = HTTPClient.new('http://myproxy:8080') +# +# === How to retrieve web resources +# +# See get and get_content. +# +# 1. Get content of specified URL. It returns HTTP::Message object and +# calling 'body' method of it returns a content String. +# +# puts clnt.get('http://dev.ctor.org/').body +# +# 2. For getting content directly, use get_content. It follows redirect +# response and returns a String of whole result. +# +# puts clnt.get_content('http://dev.ctor.org/') +# +# 3. You can pass :follow_redirect option to follow redirect response in get. +# +# puts clnt.get('http://dev.ctor.org/', :follow_redirect => true) +# +# 4. Get content as chunks of String. It yields chunks of String. +# +# clnt.get_content('http://dev.ctor.org/') do |chunk| +# puts chunk +# end +# +# === Invoking other HTTP methods +# +# See head, get, post, put, delete, options, propfind, proppatch and trace. +# It returns a HTTP::Message instance as a response. +# +# 1. Do HEAD request. +# +# res = clnt.head(uri) +# p res.header['Last-Modified'][0] +# +# 2. Do GET request with query. +# +# query = { 'keyword' => 'ruby', 'lang' => 'en' } +# res = clnt.get(uri, query) +# p res.status +# p res.contenttype +# p res.header['X-Custom'] +# puts res.body +# +# You can also use keyword argument style. +# +# res = clnt.get(uri, :query => { :keyword => 'ruby', :lang => 'en' }) +# +# === How to POST +# +# See post. +# +# 1. Do POST a form data. +# +# body = { 'keyword' => 'ruby', 'lang' => 'en' } +# res = clnt.post(uri, body) +# +# Keyword argument style. +# +# res = clnt.post(uri, :body => ...) +# +# 2. Do multipart file upload with POST. No need to set extra header by +# yourself from httpclient/2.1.4. +# +# File.open('/tmp/post_data') do |file| +# body = { 'upload' => file, 'user' => 'nahi' } +# res = clnt.post(uri, body) +# end +# +# 3. Do multipart with custom body. +# +# File.open('/tmp/post_data') do |file| +# body = [{ 'Content-Type' => 'application/atom+xml; charset=UTF-8', +# :content => '...' }, +# { 'Content-Type' => 'video/mp4', +# 'Content-Transfer-Encoding' => 'binary', +# :content => file }] +# res = clnt.post(uri, body) +# end +# +# === Accessing via SSL +# +# Ruby needs to be compiled with OpenSSL. +# +# 1. Get content of specified URL via SSL. +# Just pass an URL which starts with 'https://'. +# +# https_url = 'https://www.rsa.com' +# clnt.get(https_url) +# +# 2. Getting peer certificate from response. +# +# res = clnt.get(https_url) +# p res.peer_cert #=> returns OpenSSL::X509::Certificate +# +# 3. Configuring OpenSSL options. See HTTPClient::SSLConfig for more details. +# +# user_cert_file = 'cert.pem' +# user_key_file = 'privkey.pem' +# clnt.ssl_config.set_client_cert_file(user_cert_file, user_key_file) +# clnt.get(https_url) +# +# 4. Revocation check. On JRuby you can set following options to let +# HTTPClient to perform revocation check with CRL and OCSP: +# +# -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true +# ex. jruby -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true app.rb +# Revoked cert example: https://test-sspev.verisign.com:2443/test-SSPEV-revoked-verisign.html +# +# On other platform you can download CRL by yourself and set it via +# SSLConfig#add_crl. +# +# === Handling Cookies +# +# 1. Using volatile Cookies. Nothing to do. HTTPClient handles Cookies. +# +# clnt = HTTPClient.new +# res = clnt.get(url1) # receives Cookies. +# res = clnt.get(url2) # sends Cookies if needed. +# p res.cookies +# +# 2. Saving non volatile Cookies to a specified file. Need to set a file at +# first and invoke save method at last. +# +# clnt = HTTPClient.new +# clnt.set_cookie_store('/home/nahi/cookie.dat') +# clnt.get(url) +# ... +# clnt.save_cookie_store +# +# 3. Disabling Cookies. +# +# clnt = HTTPClient.new +# clnt.cookie_manager = nil +# +# === Configuring authentication credentials +# +# 1. Authentication with Web server. Supports BasicAuth, DigestAuth, and +# Negotiate/NTLM (requires ruby/ntlm module). +# +# clnt = HTTPClient.new +# domain = 'http://dev.ctor.org/http-access2/' +# user = 'user' +# password = 'user' +# clnt.set_auth(domain, user, password) +# p clnt.get('http://dev.ctor.org/http-access2/login').status +# +# 2. Authentication with Proxy server. Supports BasicAuth and NTLM +# (requires win32/sspi) +# +# clnt = HTTPClient.new(proxy) +# user = 'proxy' +# password = 'proxy' +# clnt.set_proxy_auth(user, password) +# p clnt.get(url) +# +# === Invoking HTTP methods with custom header +# +# Pass a Hash or an Array for header argument. +# +# header = { 'Accept' => 'text/html' } +# clnt.get(uri, query, header) +# +# header = [['Accept', 'image/jpeg'], ['Accept', 'image/png']] +# clnt.get_content(uri, query, header) +# +# === Invoking HTTP methods asynchronously +# +# See head_async, get_async, post_async, put_async, delete_async, +# options_async, propfind_async, proppatch_async, and trace_async. +# It immediately returns a HTTPClient::Connection instance as a returning value. +# +# connection = clnt.post_async(url, body) +# print 'posting.' +# while true +# break if connection.finished? +# print '.' +# sleep 1 +# end +# puts '.' +# res = connection.pop +# p res.status +# p res.body.read # res.body is an IO for the res of async method. +# +# === Shortcut methods +# +# You can invoke get_content, get, etc. without creating HTTPClient instance. +# +# ruby -rhttpclient -e 'puts HTTPClient.get_content(ARGV.shift)' http://dev.ctor.org/ +# ruby -rhttpclient -e 'p HTTPClient.head(ARGV.shift).header["last-modified"]' http://dev.ctor.org/ +# +class HTTPClient + RUBY_VERSION_STRING = "ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE})" + LIB_NAME = "(#{VERSION}, #{RUBY_VERSION_STRING})" + + include Util + + # Raised for indicating running environment configuration error for example + # accessing via SSL under the ruby which is not compiled with OpenSSL. + class ConfigurationError < StandardError + end + + # Raised for indicating HTTP response error. + class BadResponseError < RuntimeError + # HTTP::Message:: a response + attr_reader :res + + def initialize(msg, res = nil) # :nodoc: + super(msg) + @res = res + end + end + + # Raised for indicating a timeout error. + class TimeoutError < RuntimeError + end + + # Raised for indicating a connection timeout error. + # You can configure connection timeout via HTTPClient#connect_timeout=. + class ConnectTimeoutError < TimeoutError + end + + # Raised for indicating a request sending timeout error. + # You can configure request sending timeout via HTTPClient#send_timeout=. + class SendTimeoutError < TimeoutError + end + + # Raised for indicating a response receiving timeout error. + # You can configure response receiving timeout via + # HTTPClient#receive_timeout=. + class ReceiveTimeoutError < TimeoutError + end + + # Deprecated. just for backward compatibility + class Session + BadResponse = ::HTTPClient::BadResponseError + end + + class << self + %w(get_content post_content head get post put delete options propfind proppatch trace).each do |name| + eval <<-EOD + def #{name}(*arg, &block) + clnt = new + begin + clnt.#{name}(*arg, &block) + ensure + clnt.reset_all + end + end + EOD + end + + private + + def attr_proxy(symbol, assignable = false) + name = symbol.to_s + define_method(name) { + @session_manager.__send__(name) + } + if assignable + aname = name + '=' + define_method(aname) { |rhs| + @session_manager.__send__(aname, rhs) + } + end + end + end + + # HTTPClient::SSLConfig:: SSL configurator. + attr_reader :ssl_config + # HTTPClient::CookieManager:: Cookies configurator. + attr_accessor :cookie_manager + # An array of response HTTP message body String which is used for loop-back + # test. See test/* to see how to use it. If you want to do loop-back test + # of HTTP header, use test_loopback_http_response instead. + attr_reader :test_loopback_response + # An array of request filter which can trap HTTP request/response. + # See HTTPClient::WWWAuth to see how to use it. + attr_reader :request_filter + # HTTPClient::ProxyAuth:: Proxy authentication handler. + attr_reader :proxy_auth + # HTTPClient::WWWAuth:: WWW authentication handler. + attr_reader :www_auth + # How many times get_content and post_content follows HTTP redirect. + # 10 by default. + attr_accessor :follow_redirect_count + # Base url of resources. + attr_accessor :base_url + # Default request header. + attr_accessor :default_header + + # Set HTTP version as a String:: 'HTTP/1.0' or 'HTTP/1.1' + attr_proxy(:protocol_version, true) + # Connect timeout in sec. + attr_proxy(:connect_timeout, true) + # Request sending timeout in sec. + attr_proxy(:send_timeout, true) + # Response receiving timeout in sec. + attr_proxy(:receive_timeout, true) + # Reuse the same connection within this timeout in sec. from last used. + attr_proxy(:keep_alive_timeout, true) + # Size of reading block for non-chunked response. + attr_proxy(:read_block_size, true) + # Negotiation retry count for authentication. 5 by default. + attr_proxy(:protocol_retry_count, true) + # if your ruby is older than 2005-09-06, do not set socket_sync = false to + # avoid an SSL socket blocking bug in openssl/buffering.rb. + attr_proxy(:socket_sync, true) + # Enables TCP keepalive; no timing settings exist at present + attr_proxy(:tcp_keepalive, true) + # User-Agent header in HTTP request. + attr_proxy(:agent_name, true) + # From header in HTTP request. + attr_proxy(:from, true) + # An array of response HTTP String (not a HTTP message body) which is used + # for loopback test. See test/* to see how to use it. + attr_proxy(:test_loopback_http_response) + # Decompress a compressed (with gzip or deflate) content body transparently. false by default. + attr_proxy(:transparent_gzip_decompression, true) + # Raise BadResponseError if response size does not match with Content-Length header in response. false by default. + # TODO: enable by default + attr_proxy(:strict_response_size_check, true) + # Local socket address. Set HTTPClient#socket_local.host and HTTPClient#socket_local.port to specify local binding hostname and port of TCP socket. + attr_proxy(:socket_local, true) + + # Default header for PROPFIND request. + PROPFIND_DEFAULT_EXTHEADER = { 'Depth' => '0' } + + # Default User-Agent header + DEFAULT_AGENT_NAME = 'HTTPClient/1.0' + + # Creates a HTTPClient instance which manages sessions, cookies, etc. + # + # HTTPClient.new takes optional arguments as a Hash. + # * :proxy - proxy url string + # * :agent_name - User-Agent String + # * :from - from header String + # * :base_url - base URL of resources + # * :default_header - header Hash all HTTP requests should have + # * :force_basic_auth - flag for sending Authorization header w/o gettin 401 first + # User-Agent and From are embedded in HTTP request Header if given. + # From header is not set without setting it explicitly. + # + # proxy = 'http://myproxy:8080' + # agent_name = 'MyAgent/0.1' + # from = 'from@example.com' + # HTTPClient.new(proxy, agent_name, from) + # + # After you set :base_url, all resources you pass to get, post and other + # methods are recognized to be prefixed with base_url. Say base_url is + # 'https://api.example.com/v1/, get('users') is the same as + # get('https://api.example.com/v1/users') internally. You can also pass + # full URL from 'http://' even after setting base_url. + # + # The expected base_url and path behavior is the following. Please take + # care of '/' in base_url and path. + # + # The last '/' is important for base_url: + # 1. http://foo/bar/baz/ + path -> http://foo/bar/baz/path + # 2. http://foo/bar/baz + path -> http://foo/bar/path + # Relative path handling: + # 3. http://foo/bar/baz/ + ../path -> http://foo/bar/path + # 4. http://foo/bar/baz + ../path -> http://foo/path + # 5. http://foo/bar/baz/ + ./path -> http://foo/bar/baz/path + # 6. http://foo/bar/baz + ./path -> http://foo/bar/path + # The leading '/' of path means absolute path: + # 7. http://foo/bar/baz/ + /path -> http://foo/path + # 8. http://foo/bar/baz + /path -> http://foo/path + # + # :default_header is for providing default headers Hash that all HTTP + # requests should have, such as custom 'Authorization' header in API. + # You can override :default_header with :header Hash parameter in HTTP + # request methods. + # + # :force_basic_auth turns on/off the BasicAuth force flag. Generally + # HTTP client must send Authorization header after it gets 401 error + # from server from security reason. But in some situation (e.g. API + # client) you might want to send Authorization from the beginning. + def initialize(*args, &block) + proxy, agent_name, from, base_url, default_header, force_basic_auth = + keyword_argument(args, :proxy, :agent_name, :from, :base_url, :default_header, :force_basic_auth) + @proxy = nil # assigned later. + @no_proxy = nil + @no_proxy_regexps = [] + @base_url = base_url + @default_header = default_header || {} + @www_auth = WWWAuth.new + @proxy_auth = ProxyAuth.new + @www_auth.basic_auth.force_auth = @proxy_auth.basic_auth.force_auth = force_basic_auth + @request_filter = [@proxy_auth, @www_auth] + @debug_dev = nil + @redirect_uri_callback = method(:default_redirect_uri_callback) + @test_loopback_response = [] + @session_manager = SessionManager.new(self) + @session_manager.agent_name = agent_name || DEFAULT_AGENT_NAME + @session_manager.from = from + @session_manager.ssl_config = @ssl_config = SSLConfig.new(self) + @cookie_manager = CookieManager.new + @follow_redirect_count = 10 + load_environment + self.proxy = proxy if proxy + keep_webmock_compat + instance_eval(&block) if block + end + + # webmock 1.6.2 depends on HTTP::Message#body.content to work. + # let's keep it work iif webmock is loaded for a while. + def keep_webmock_compat + if respond_to?(:do_get_block_with_webmock) + ::HTTP::Message.module_eval do + def body + def (o = self.content).content + self + end + o + end + end + end + end + + # Returns debug device if exists. See debug_dev=. + def debug_dev + @debug_dev + end + + # Sets debug device. Once debug device is set, all HTTP requests and + # responses are dumped to given device. dev must respond to << for dump. + # + # Calling this method resets all existing sessions. + def debug_dev=(dev) + @debug_dev = dev + reset_all + @session_manager.debug_dev = dev + end + + # Returns URI object of HTTP proxy if exists. + def proxy + @proxy + end + + # Sets HTTP proxy used for HTTP connection. Given proxy can be an URI, + # a String or nil. You can set user/password for proxy authentication like + # HTTPClient#proxy = 'http://user:passwd@myproxy:8080' + # + # You can use environment variable 'http_proxy' or 'HTTP_PROXY' for it. + # You need to use 'cgi_http_proxy' or 'CGI_HTTP_PROXY' instead if you run + # HTTPClient from CGI environment from security reason. (HTTPClient checks + # 'REQUEST_METHOD' environment variable whether it's CGI or not) + # + # Calling this method resets all existing sessions. + def proxy=(proxy) + if proxy.nil? || proxy.to_s.empty? + @proxy = nil + @proxy_auth.reset_challenge + else + @proxy = urify(proxy) + if @proxy.scheme == nil or @proxy.scheme.downcase != 'http' or + @proxy.host == nil or @proxy.port == nil + raise ArgumentError.new("unsupported proxy #{proxy}") + end + @proxy_auth.reset_challenge + if @proxy.user || @proxy.password + @proxy_auth.set_auth(@proxy.user, @proxy.password) + end + end + reset_all + @session_manager.proxy = @proxy + @proxy + end + + # Returns NO_PROXY setting String if given. + def no_proxy + @no_proxy + end + + # Sets NO_PROXY setting String. no_proxy must be a comma separated String. + # Each entry must be 'host' or 'host:port' such as; + # HTTPClient#no_proxy = 'example.com,example.co.jp:443' + # + # 'localhost' is treated as a no_proxy site regardless of explicitly listed. + # HTTPClient checks given URI objects before accessing it. + # 'host' is tail string match. No IP-addr conversion. + # + # You can use environment variable 'no_proxy' or 'NO_PROXY' for it. + # + # Calling this method resets all existing sessions. + def no_proxy=(no_proxy) + @no_proxy = no_proxy + @no_proxy_regexps.clear + if @no_proxy + @no_proxy.scan(/([^\s:,]+)(?::(\d+))?/) do |host, port| + if host[0] == ?. + regexp = /#{Regexp.quote(host)}\z/i + else + regexp = /(\A|\.)#{Regexp.quote(host)}\z/i + end + @no_proxy_regexps << [regexp, port] + end + end + reset_all + end + + # Sets credential for Web server authentication. + # domain:: a String or an URI to specify where HTTPClient should use this + # credential. If you set uri to nil, HTTPClient uses this credential + # wherever a server requires it. + # user:: username String. + # passwd:: password String. + # + # You can set multiple credentials for each uri. + # + # clnt.set_auth('http://www.example.com/foo/', 'foo_user', 'passwd') + # clnt.set_auth('http://www.example.com/bar/', 'bar_user', 'passwd') + # + # Calling this method resets all existing sessions. + def set_auth(domain, user, passwd) + uri = to_resource_url(domain) + @www_auth.set_auth(uri, user, passwd) + reset_all + end + + # Deprecated. Use set_auth instead. + def set_basic_auth(domain, user, passwd) + uri = to_resource_url(domain) + @www_auth.basic_auth.set(uri, user, passwd) + reset_all + end + + # Sets credential for Proxy authentication. + # user:: username String. + # passwd:: password String. + # + # Calling this method resets all existing sessions. + def set_proxy_auth(user, passwd) + @proxy_auth.set_auth(user, passwd) + reset_all + end + + # Turn on/off the BasicAuth force flag. Generally HTTP client must + # send Authorization header after it gets 401 error from server from + # security reason. But in some situation (e.g. API client) you might + # want to send Authorization from the beginning. + def force_basic_auth=(force_basic_auth) + @www_auth.basic_auth.force_auth = @proxy_auth.basic_auth.force_auth = force_basic_auth + end + + # Sets the filename where non-volatile Cookies be saved by calling + # save_cookie_store. + # This method tries to load and managing Cookies from the specified file. + # + # Calling this method resets all existing sessions. + def set_cookie_store(filename) + @cookie_manager.cookies_file = filename + @cookie_manager.load_cookies if filename + reset_all + end + + # Try to save Cookies to the file specified in set_cookie_store. Unexpected + # error will be raised if you don't call set_cookie_store first. + def save_cookie_store + @cookie_manager.save_cookies + end + + # Returns stored cookies. + def cookies + if @cookie_manager + @cookie_manager.cookies + end + end + + # Sets callback proc when HTTP redirect status is returned for get_content + # and post_content. default_redirect_uri_callback is used by default. + # + # If you need strict implementation which does not allow relative URI + # redirection, set strict_redirect_uri_callback instead. + # + # clnt.redirect_uri_callback = clnt.method(:strict_redirect_uri_callback) + # + def redirect_uri_callback=(redirect_uri_callback) + @redirect_uri_callback = redirect_uri_callback + end + + # Retrieves a web resource. + # + # uri:: a String or an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b'. + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c'. + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # get_content(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # get_content follows HTTP redirect status (see HTTP::Status.redirect?) + # internally and try to retrieve content from redirected URL. See + # redirect_uri_callback= how HTTP redirection is handled. + # + # If you need to get full HTTP response including HTTP status and headers, + # use get method. get returns HTTP::Message as a response and you need to + # follow HTTP redirect by yourself if you need. + def get_content(uri, *args, &block) + query, header = keyword_argument(args, :query, :header) + success_content(follow_redirect(:get, uri, query, nil, header || {}, &block)) + end + + # Posts a content. + # + # uri:: a String or an URI object which represents an URL of web resource. + # body:: a Hash or an Array of body part. e.g. + # { "a" => "b" } => 'a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'a=b&a=c' + # When you pass a File as a value, it will be posted as a + # multipart/form-data. e.g. + # { 'upload' => file } + # You can also send custom multipart by passing an array of hashes. + # Each part must have a :content attribute which can be a file, all + # other keys will become headers. + # [{ 'Content-Type' => 'text/plain', :content => "some text" }, + # { 'Content-Type' => 'video/mp4', :content => File.new('video.mp4') }] + # => + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } + # or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # post_content(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # post_content follows HTTP redirect status (see HTTP::Status.redirect?) + # internally and try to post the content to redirected URL. See + # redirect_uri_callback= how HTTP redirection is handled. + # Bear in mind that you should not depend on post_content because it sends + # the same POST method to the new location which is prohibited in HTTP spec. + # + # If you need to get full HTTP response including HTTP status and headers, + # use post method. + def post_content(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + query, body, header = keyword_argument(args, :query, :body, :header) + else + query = nil + body, header = keyword_argument(args, :body, :header) + end + success_content(follow_redirect(:post, uri, query, body, header || {}, &block)) + end + + # A method for redirect uri callback. How to use: + # clnt.redirect_uri_callback = clnt.method(:strict_redirect_uri_callback) + # This callback does not allow relative redirect such as + # Location: ../foo/ + # in HTTP header. (raises BadResponseError instead) + def strict_redirect_uri_callback(uri, res) + newuri = urify(res.header['location'][0]) + if https?(uri) && !https?(newuri) + raise BadResponseError.new("redirecting to non-https resource") + end + if !http?(newuri) && !https?(newuri) + raise BadResponseError.new("unexpected location: #{newuri}", res) + end + puts "redirect to: #{newuri}" if $DEBUG + newuri + end + + # A default method for redirect uri callback. This method is used by + # HTTPClient instance by default. + # This callback allows relative redirect such as + # Location: ../foo/ + # in HTTP header. + def default_redirect_uri_callback(uri, res) + newuri = urify(res.header['location'][0]) + if !http?(newuri) && !https?(newuri) + warn("#{newuri}: a relative URI in location header which is not recommended") + warn("'The field value consists of a single absolute URI' in HTTP spec") + newuri = uri + newuri + end + if https?(uri) && !https?(newuri) + raise BadResponseError.new("redirecting to non-https resource") + end + puts "redirect to: #{newuri}" if $DEBUG + newuri + end + + # Sends HEAD request to the specified URL. See request for arguments. + def head(uri, *args) + request(:head, uri, argument_to_hash(args, :query, :header, :follow_redirect)) + end + + # Sends GET request to the specified URL. See request for arguments. + def get(uri, *args, &block) + request(:get, uri, argument_to_hash(args, :query, :header, :follow_redirect), &block) + end + + # Sends PATCH request to the specified URL. See request for arguments. + def patch(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request(:patch, uri, new_args, &block) + end + + # Sends POST request to the specified URL. See request for arguments. + # You should not depend on :follow_redirect => true for POST method. It + # sends the same POST method to the new location which is prohibited in HTTP spec. + def post(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header, :follow_redirect) + end + request(:post, uri, new_args, &block) + end + + # Sends PUT request to the specified URL. See request for arguments. + def put(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request(:put, uri, new_args, &block) + end + + # Sends DELETE request to the specified URL. See request for arguments. + def delete(uri, *args, &block) + request(:delete, uri, argument_to_hash(args, :body, :header, :query), &block) + end + + # Sends OPTIONS request to the specified URL. See request for arguments. + def options(uri, *args, &block) + new_args = argument_to_hash(args, :header, :query, :body) + request(:options, uri, new_args, &block) + end + + # Sends PROPFIND request to the specified URL. See request for arguments. + def propfind(uri, *args, &block) + request(:propfind, uri, argument_to_hash(args, :header), &block) + end + + # Sends PROPPATCH request to the specified URL. See request for arguments. + def proppatch(uri, *args, &block) + request(:proppatch, uri, argument_to_hash(args, :body, :header), &block) + end + + # Sends TRACE request to the specified URL. See request for arguments. + def trace(uri, *args, &block) + request('TRACE', uri, argument_to_hash(args, :query, :header), &block) + end + + # Sends a request to the specified URL. + # + # method:: HTTP method to be sent. method.to_s.upcase is used. + # uri:: a String or an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c' + # body:: a Hash or an Array of body part. e.g. + # { "a" => "b" } + # => 'a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] + # => 'a=b&a=c'. + # When the given method is 'POST' and the given body contains a file + # as a value, it will be posted as a multipart/form-data. e.g. + # { 'upload' => file } + # You can also send custom multipart by passing an array of hashes. + # Each part must have a :content attribute which can be a file, all + # other keys will become headers. + # [{ 'Content-Type' => 'text/plain', :content => "some text" }, + # { 'Content-Type' => 'video/mp4', :content => File.new('video.mp4') }] + # => + # See HTTP::Message.file? for actual condition of 'a file'. + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # get(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # You can also pass a String as a body. HTTPClient just sends a String as + # a HTTP request message body. + # + # When you pass an IO as a body, HTTPClient sends it as a HTTP request with + # chunked encoding (Transfer-Encoding: chunked in HTTP header) if IO does not + # respond to :size. Bear in mind that some server application does not support + # chunked request. At least cgi.rb does not support it. + def request(method, uri, *args, &block) + query, body, header, follow_redirect = keyword_argument(args, :query, :body, :header, :follow_redirect) + if method == :propfind + header ||= PROPFIND_DEFAULT_EXTHEADER + else + header ||= {} + end + uri = to_resource_url(uri) + if block + filtered_block = adapt_block(&block) + end + if follow_redirect + follow_redirect(method, uri, query, body, header, &block) + else + do_request(method, uri, query, body, header, &filtered_block) + end + end + + # Sends HEAD request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def head_async(uri, *args) + request_async2(:head, uri, argument_to_hash(args, :query, :header)) + end + + # Sends GET request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def get_async(uri, *args) + request_async2(:get, uri, argument_to_hash(args, :query, :header)) + end + + # Sends PATCH request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def patch_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:patch, uri, new_args) + end + + # Sends POST request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def post_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:post, uri, new_args) + end + + # Sends PUT request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def put_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:put, uri, new_args) + end + + # Sends DELETE request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def delete_async(uri, *args) + request_async2(:delete, uri, argument_to_hash(args, :body, :header, :query)) + end + + # Sends OPTIONS request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def options_async(uri, *args) + request_async2(:options, uri, argument_to_hash(args, :header, :query, :body)) + end + + # Sends PROPFIND request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def propfind_async(uri, *args) + request_async2(:propfind, uri, argument_to_hash(args, :body, :header)) + end + + # Sends PROPPATCH request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def proppatch_async(uri, *args) + request_async2(:proppatch, uri, argument_to_hash(args, :body, :header)) + end + + # Sends TRACE request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def trace_async(uri, *args) + request_async2(:trace, uri, argument_to_hash(args, :query, :header)) + end + + # Sends a request in async style. request method creates new Thread for + # HTTP connection and returns a HTTPClient::Connection instance immediately. + # + # Arguments definition is the same as request. + def request_async(method, uri, query = nil, body = nil, header = {}) + uri = to_resource_url(uri) + do_request_async(method, uri, query, body, header) + end + + # new method that has same signature as 'request' + def request_async2(method, uri, *args) + query, body, header = keyword_argument(args, :query, :body, :header) + if [:post, :put].include?(method) + body ||= '' + end + if method == :propfind + header ||= PROPFIND_DEFAULT_EXTHEADER + else + header ||= {} + end + uri = to_resource_url(uri) + do_request_async(method, uri, query, body, header) + end + + # Resets internal session for the given URL. Keep-alive connection for the + # site (host-port pair) is disconnected if exists. + def reset(uri) + uri = to_resource_url(uri) + @session_manager.reset(uri) + end + + # Resets all of internal sessions. Keep-alive connections are disconnected. + def reset_all + @session_manager.reset_all + end + +private + + class RetryableResponse < StandardError # :nodoc: + attr_reader :res + + def initialize(res = nil) + @res = res + end + end + + class KeepAliveDisconnected < StandardError # :nodoc: + attr_reader :sess + attr_reader :cause + def initialize(sess = nil, cause = nil) + super("#{self.class.name}: #{cause ? cause.message : nil}") + @sess = sess + @cause = cause + end + end + + def hashy_argument_has_keys(args, *key) + # if the given arg is a single Hash and... + args.size == 1 and Hash === args[0] and + # it has any one of the key + key.all? { |e| args[0].key?(e) } + end + + def do_request(method, uri, query, body, header, &block) + res = nil + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_count = @session_manager.protocol_retry_count + proxy = no_proxy?(uri) ? nil : @proxy + previous_request = previous_response = nil + while retry_count > 0 + body.pos = pos if pos + req = create_request(method, uri, query, body, header) + if previous_request + # to remember IO positions to read + req.http_body.positions = previous_request.http_body.positions + end + begin + protect_keep_alive_disconnected do + # TODO: remove Connection.new + # We want to delete Connection usage in do_get_block but Newrelic gem depends on it. + # https://github.com/newrelic/rpm/blob/master/lib/new_relic/agent/instrumentation/httpclient.rb#L34-L36 + conn = Connection.new + res = do_get_block(req, proxy, conn, &block) + # Webmock's do_get_block returns ConditionVariable + if !res.respond_to?(:previous) + res = conn.pop + end + end + res.previous = previous_response + break + rescue RetryableResponse => e + previous_request = req + previous_response = res = e.res + retry_count -= 1 + end + end + res + end + + def do_request_async(method, uri, query, body, header) + conn = Connection.new + t = Thread.new(conn) { |tconn| + begin + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_count = @session_manager.protocol_retry_count + proxy = no_proxy?(uri) ? nil : @proxy + while retry_count > 0 + body.pos = pos if pos + req = create_request(method, uri, query, body, header) + begin + protect_keep_alive_disconnected do + do_get_stream(req, proxy, tconn) + end + break + rescue RetryableResponse + retry_count -= 1 + end + end + rescue Exception => e + conn.push e + end + } + conn.async_thread = t + conn + end + + def load_environment + # http_proxy + if getenv('REQUEST_METHOD') + # HTTP_PROXY conflicts with the environment variable usage in CGI where + # HTTP_* is used for HTTP header information. Unlike open-uri, we + # simply ignore http_proxy in CGI env and use cgi_http_proxy instead. + self.proxy = getenv('cgi_http_proxy') + else + self.proxy = getenv('http_proxy') + end + # no_proxy + self.no_proxy = getenv('no_proxy') + end + + def getenv(name) + ENV[name.downcase] || ENV[name.upcase] + end + + def adapt_block(&block) + return block if block.arity == 2 + proc { |r, str| block.call(str) } + end + + def follow_redirect(method, uri, query, body, header, &block) + uri = to_resource_url(uri) + if block + b = adapt_block(&block) + filtered_block = proc { |r, str| + b.call(r, str) if r.ok? + } + end + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_number = 0 + previous = nil + request_query = query + while retry_number < @follow_redirect_count + body.pos = pos if pos + res = do_request(method, uri, request_query, body, header, &filtered_block) + res.previous = previous + if res.redirect? + if res.header['location'].empty? + raise BadResponseError.new("Missing Location header for redirect", res) + end + method = :get if res.see_other? # See RFC2616 10.3.4 + uri = urify(@redirect_uri_callback.call(uri, res)) + # To avoid duped query parameter. 'location' must include query part. + request_query = nil + previous = res + retry_number += 1 + else + return res + end + end + raise BadResponseError.new("retry count exceeded", res) + end + + def success_content(res) + if res.ok? + return res.content + else + raise BadResponseError.new("unexpected response: #{res.header.inspect}", res) + end + end + + def protect_keep_alive_disconnected + begin + yield + rescue KeepAliveDisconnected + # Force to create new connection + Thread.current[:HTTPClient_AcquireNewConnection] = true + begin + yield + ensure + Thread.current[:HTTPClient_AcquireNewConnection] = false + end + end + end + + def create_request(method, uri, query, body, header) + method = method.to_s.upcase + if header.is_a?(Hash) + header = @default_header.merge(header).to_a + else + header = @default_header.to_a + header.dup + end + boundary = nil + if body + _, content_type = header.find { |key, value| + key.to_s.downcase == 'content-type' + } + if content_type + if /\Amultipart/ =~ content_type + if content_type =~ /boundary=(.+)\z/ + boundary = $1 + else + boundary = create_boundary + content_type = "#{content_type}; boundary=#{boundary}" + header = override_header(header, 'content-type', content_type) + end + end + else + if file_in_form_data?(body) + boundary = create_boundary + content_type = "multipart/form-data; boundary=#{boundary}" + else + content_type = 'application/x-www-form-urlencoded' + end + header << ['Content-Type', content_type] + end + end + req = HTTP::Message.new_request(method, uri, query, body, boundary) + header.each do |key, value| + req.header.add(key.to_s, value) + end + if @cookie_manager + cookie_value = @cookie_manager.cookie_value(uri) + if cookie_value + req.header.add('Cookie', cookie_value) + end + end + req + end + + def create_boundary + Digest::SHA1.hexdigest(Time.now.to_s) + end + + def file_in_form_data?(body) + HTTP::Message.multiparam_query?(body) && + body.any? { |k, v| HTTP::Message.file?(v) } + end + + def override_header(header, key, value) + result = [] + header.each do |k, v| + if k.to_s.downcase == key + result << [key, value] + else + result << [k, v] + end + end + result + end + + NO_PROXY_HOSTS = ['localhost'] + + def no_proxy?(uri) + if !@proxy or NO_PROXY_HOSTS.include?(uri.host) + return true + end + @no_proxy_regexps.each do |regexp, port| + if !port || uri.port == port.to_i + if regexp =~ uri.host + return true + end + end + end + false + end + + # !! CAUTION !! + # Method 'do_get*' runs under MT conditon. Be careful to change. + def do_get_block(req, proxy, conn, &block) + @request_filter.each do |filter| + filter.filter_request(req) + end + if str = @test_loopback_response.shift + dump_dummy_request_response(req.http_body.dump, str) if @debug_dev + res = HTTP::Message.new_response(str, req.header) + conn.push(res) + return res + end + content = block ? nil : '' + res = HTTP::Message.new_response(content, req.header) + @debug_dev << "= Request\n\n" if @debug_dev + sess = @session_manager.query(req, proxy) + res.peer_cert = sess.ssl_peer_cert + @debug_dev << "\n\n= Response\n\n" if @debug_dev + do_get_header(req, res, sess) + conn.push(res) + sess.get_body do |part| + set_encoding(part, res.body_encoding) + if block + block.call(res, part.dup) + else + content << part + end + end + # there could be a race condition but it's OK to cache unreusable + # connection because we do retry for that case. + @session_manager.keep(sess) unless sess.closed? + commands = @request_filter.collect { |filter| + filter.filter_response(req, res) + } + if commands.find { |command| command == :retry } + raise RetryableResponse.new(res) + end + res + end + + def do_get_stream(req, proxy, conn) + @request_filter.each do |filter| + filter.filter_request(req) + end + if str = @test_loopback_response.shift + dump_dummy_request_response(req.http_body.dump, str) if @debug_dev + conn.push(HTTP::Message.new_response(StringIO.new(str), req.header)) + return + end + piper, pipew = IO.pipe + pipew.binmode + res = HTTP::Message.new_response(piper, req.header) + @debug_dev << "= Request\n\n" if @debug_dev + sess = @session_manager.query(req, proxy) + res.peer_cert = sess.ssl_peer_cert + @debug_dev << "\n\n= Response\n\n" if @debug_dev + do_get_header(req, res, sess) + conn.push(res) + sess.get_body do |part| + set_encoding(part, res.body_encoding) + pipew.write(part) + end + pipew.close + @session_manager.keep(sess) unless sess.closed? + _ = @request_filter.collect { |filter| + filter.filter_response(req, res) + } + # ignore commands (not retryable in async mode) + res + end + + def do_get_header(req, res, sess) + res.http_version, res.status, res.reason, headers = sess.get_header + res.header.set_headers(headers) + if @cookie_manager + res.header['set-cookie'].each do |cookie| + @cookie_manager.parse(cookie, req.header.request_uri) + end + end + end + + def dump_dummy_request_response(req, res) + @debug_dev << "= Dummy Request\n\n" + @debug_dev << req + @debug_dev << "\n\n= Dummy Response\n\n" + @debug_dev << res + end + + def set_encoding(str, encoding) + str.force_encoding(encoding) if encoding + end + + def to_resource_url(uri) + u = urify(uri) + if @base_url && u.scheme.nil? && u.host.nil? + urify(@base_url) + uri + else + u + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb new file mode 100644 index 0000000..6b7b8b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb @@ -0,0 +1,924 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'digest/md5' +require 'httpclient/session' +require 'mutex_m' + + +class HTTPClient + + NTLMEnabled = false + SSPIEnabled = false + GSSAPIEnabled = false + + # Common abstract class for authentication filter. + # + # There are 2 authentication filters. + # WWWAuth:: Authentication filter for handling authentication negotiation + # between Web server. Parses 'WWW-Authentication' header in + # response and generates 'Authorization' header in request. + # ProxyAuth:: Authentication filter for handling authentication negotiation + # between Proxy server. Parses 'Proxy-Authentication' header in + # response and generates 'Proxy-Authorization' header in request. + class AuthFilterBase + private + + def parse_authentication_header(res, tag) + challenge = res.header[tag] + return nil unless challenge + challenge.collect { |c| parse_challenge_header(c) }.compact + end + + def parse_challenge_header(challenge) + scheme, param_str = challenge.scan(/\A(\S+)(?:\s+(.*))?\z/)[0] + return nil if scheme.nil? + return scheme, param_str + end + end + + + # Authentication filter for handling authentication negotiation between + # Web server. Parses 'WWW-Authentication' header in response and + # generates 'Authorization' header in request. + # + # Authentication filter is implemented using request filter of HTTPClient. + # It traps HTTP response header and maintains authentication state, and + # traps HTTP request header for inserting necessary authentication header. + # + # WWWAuth has sub filters (BasicAuth, DigestAuth, NegotiateAuth and + # SSPINegotiateAuth) and delegates some operations to it. + # NegotiateAuth requires 'ruby/ntlm' module (rubyntlm gem). + # SSPINegotiateAuth requires 'win32/sspi' module (rubysspi gem). + class WWWAuth < AuthFilterBase + attr_reader :basic_auth + attr_reader :digest_auth + attr_reader :negotiate_auth + attr_reader :sspi_negotiate_auth + attr_reader :oauth + + # Creates new WWWAuth. + def initialize + @basic_auth = BasicAuth.new + @digest_auth = DigestAuth.new + @negotiate_auth = NegotiateAuth.new + @ntlm_auth = NegotiateAuth.new('NTLM') + @sspi_negotiate_auth = SSPINegotiateAuth.new + @oauth = OAuth.new + # sort authenticators by priority + @authenticator = [@oauth, @negotiate_auth, @ntlm_auth, @sspi_negotiate_auth, @digest_auth, @basic_auth] + end + + # Resets challenge state. See sub filters for more details. + def reset_challenge + @authenticator.each do |auth| + auth.reset_challenge + end + end + + # Set authentication credential. See sub filters for more details. + def set_auth(uri, user, passwd) + @authenticator.each do |auth| + auth.set(uri, user, passwd) + end + reset_challenge + end + + # Filter API implementation. Traps HTTP request and insert + # 'Authorization' header if needed. + def filter_request(req) + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if cred = auth.get(req) + if cred == :skip + # some authenticator (NTLM and Negotiate) does not + # need to send extra header after authorization. In such case + # it should block other authenticators to respond and :skip is + # the marker for such case. + return + end + req.header.set('Authorization', auth.scheme + " " + cred) + return + end + end + end + + # Filter API implementation. Traps HTTP response and parses + # 'WWW-Authenticate' header. + # + # This remembers the challenges for all authentication methods + # available to the client. On the subsequent retry of the request, + # filter_request will select the strongest method. + def filter_response(req, res) + command = nil + if res.status == HTTP::Status::UNAUTHORIZED + if challenge = parse_authentication_header(res, 'www-authenticate') + uri = req.header.request_uri + challenge.each do |scheme, param_str| + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if scheme.downcase == auth.scheme.downcase + challengeable = auth.challenge(uri, param_str) + command = :retry if challengeable + end + end + end + # ignore unknown authentication scheme + end + end + command + end + end + + + # Authentication filter for handling authentication negotiation between + # Proxy server. Parses 'Proxy-Authentication' header in response and + # generates 'Proxy-Authorization' header in request. + # + # Authentication filter is implemented using request filter of HTTPClient. + # It traps HTTP response header and maintains authentication state, and + # traps HTTP request header for inserting necessary authentication header. + # + # ProxyAuth has sub filters (BasicAuth, NegotiateAuth, and SSPINegotiateAuth) + # and delegates some operations to it. + # NegotiateAuth requires 'ruby/ntlm' module. + # SSPINegotiateAuth requires 'win32/sspi' module. + class ProxyAuth < AuthFilterBase + attr_reader :basic_auth + attr_reader :digest_auth + attr_reader :negotiate_auth + attr_reader :sspi_negotiate_auth + + # Creates new ProxyAuth. + def initialize + @basic_auth = ProxyBasicAuth.new + @negotiate_auth = NegotiateAuth.new + @ntlm_auth = NegotiateAuth.new('NTLM') + @sspi_negotiate_auth = SSPINegotiateAuth.new + @digest_auth = ProxyDigestAuth.new + # sort authenticators by priority + @authenticator = [@negotiate_auth, @ntlm_auth, @sspi_negotiate_auth, @digest_auth, @basic_auth] + end + + # Resets challenge state. See sub filters for more details. + def reset_challenge + @authenticator.each do |auth| + auth.reset_challenge + end + end + + # Set authentication credential. See sub filters for more details. + def set_auth(user, passwd) + @authenticator.each do |auth| + auth.set(nil, user, passwd) + end + reset_challenge + end + + # Filter API implementation. Traps HTTP request and insert + # 'Proxy-Authorization' header if needed. + def filter_request(req) + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if cred = auth.get(req) + if cred == :skip + # some authenticator (NTLM and Negotiate) does not + # need to send extra header after authorization. In such case + # it should block other authenticators to respond and :skip is + # the marker for such case. + return + end + req.header.set('Proxy-Authorization', auth.scheme + " " + cred) + return + end + end + end + + # Filter API implementation. Traps HTTP response and parses + # 'Proxy-Authenticate' header. + def filter_response(req, res) + command = nil + if res.status == HTTP::Status::PROXY_AUTHENTICATE_REQUIRED + if challenge = parse_authentication_header(res, 'proxy-authenticate') + uri = req.header.request_uri + challenge.each do |scheme, param_str| + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if scheme.downcase == auth.scheme.downcase + challengeable = auth.challenge(uri, param_str) + command = :retry if challengeable + end + end + end + # ignore unknown authentication scheme + end + end + command + end + end + + # Authentication filter base class. + class AuthBase + include HTTPClient::Util + + # Authentication scheme. + attr_reader :scheme + + def initialize(scheme) + @scheme = scheme + @challenge = {} + end + + # Resets challenge state. Do not send '*Authorization' header until the + # server sends '*Authentication' again. + def reset_challenge + synchronize do + @challenge.clear + end + end + end + + # Authentication filter for handling BasicAuth negotiation. + # Used in WWWAuth and ProxyAuth. + class BasicAuth < AuthBase + include Mutex_m + + # Send Authorization Header without receiving 401 + attr_accessor :force_auth + + # Creates new BasicAuth filter. + def initialize + super('Basic') + @cred = nil + @auth = {} + @force_auth = false + end + + # Set authentication credential. + # uri == nil for generic purpose (allow to use user/password for any URL). + def set(uri, user, passwd) + synchronize do + if uri.nil? + @cred = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + else + uri = Util.uri_dirname(uri) + @auth[uri] = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @cred || @auth.any? + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + return nil if !@force_auth and !@challenge.any? { |uri, ok| + Util.uri_part_of(target_uri, uri) and ok + } + return @cred if @cred + Util.hash_find_value(@auth) { |uri, cred| + Util.uri_part_of(target_uri, uri) + } + } + end + + # Challenge handler: remember URL for response. + def challenge(uri, param_str = nil) + synchronize { + @challenge[urify(uri)] = true + true + } + end + end + + class ProxyBasicAuth < BasicAuth + def set(uri, user, passwd) + synchronize do + @cred = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + end + end + + def get(req) + synchronize { + return nil if !@force_auth and !@challenge['challenged'] + @cred + } + end + + # Challenge handler: remember URL for response. + def challenge(uri, param_str = nil) + synchronize { + @challenge['challenged'] = true + true + } + end + end + + # Authentication filter for handling DigestAuth negotiation. + # Used in WWWAuth. + class DigestAuth < AuthBase + include Mutex_m + + # Creates new DigestAuth filter. + def initialize + super('Digest') + @auth = {} + @nonce_count = 0 + end + + # Set authentication credential. + # uri == nil is ignored. + def set(uri, user, passwd) + synchronize do + if uri + uri = Util.uri_dirname(uri) + @auth[uri] = [user, passwd] + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @auth.any? + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + param = Util.hash_find_value(@challenge) { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + user, passwd = Util.hash_find_value(@auth) { |uri, auth_data| + Util.uri_part_of(target_uri, uri) + } + return nil unless user + calc_cred(req, user, passwd, param) + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + @challenge[uri] = parse_challenge_param(param_str) + true + } + end + + private + + # this method is implemented by sromano and posted to + # http://tools.assembla.com/breakout/wiki/DigestForSoap + # Thanks! + # supported algorithms: MD5, MD5-sess + def calc_cred(req, user, passwd, param) + method = req.header.request_method + path = req.header.create_query_uri + a_1 = "#{user}:#{param['realm']}:#{passwd}" + a_2 = "#{method}:#{path}" + qop = param['qop'] + nonce = param['nonce'] + cnonce = nil + if qop || param['algorithm'] =~ /MD5-sess/ + cnonce = generate_cnonce() + end + a_1_md5sum = Digest::MD5.hexdigest(a_1) + if param['algorithm'] =~ /MD5-sess/ + a_1_md5sum = Digest::MD5.hexdigest("#{a_1_md5sum}:#{nonce}:#{cnonce}") + algorithm = "MD5-sess" + else + algorithm = "MD5" + end + message_digest = [] + message_digest << a_1_md5sum + message_digest << nonce + if qop + @nonce_count += 1 + message_digest << ('%08x' % @nonce_count) + message_digest << cnonce + message_digest << param['qop'] + end + message_digest << Digest::MD5.hexdigest(a_2) + header = [] + header << "username=\"#{user}\"" + header << "realm=\"#{param['realm']}\"" + header << "nonce=\"#{nonce}\"" + header << "uri=\"#{path}\"" + if cnonce + header << "cnonce=\"#{cnonce}\"" + end + if qop + header << "nc=#{'%08x' % @nonce_count}" + header << "qop=#{param['qop']}" + end + header << "response=\"#{Digest::MD5.hexdigest(message_digest.join(":"))}\"" + header << "algorithm=#{algorithm}" + header << "opaque=\"#{param['opaque']}\"" if param.key?('opaque') + header.join(", ") + end + + # cf. WEBrick::HTTPAuth::DigestAuth#generate_next_nonce(aTime) + def generate_cnonce + now = "%012d" % Time.now.to_i + pk = Digest::MD5.hexdigest([now, self.__id__, Process.pid, rand(65535)].join)[0, 32] + [now + ':' + pk].pack('m*').chop + end + + def parse_challenge_param(param_str) + param = {} + param_str.scan(/\s*([^\,]+(?:\\.[^\,]*)*)/).each do |str| + key, value = str[0].scan(/\A([^=]+)=(.*)\z/)[0] + if /\A"(.*)"\z/ =~ value + value = $1.gsub(/\\(.)/, '\1') + end + param[key] = value + end + param + end + end + + + # Authentication filter for handling DigestAuth negotiation. + # Ignores uri argument. Used in ProxyAuth. + class ProxyDigestAuth < DigestAuth + + # overrides DigestAuth#set. sets default user name and password. uri is not used. + def set(uri, user, passwd) + synchronize do + @auth = [user, passwd] + end + end + + # overrides DigestAuth#get. Uses default user name and password + # regardless of target uri if the proxy has required authentication + # before + def get(req) + synchronize { + param = @challenge + return nil unless param + user, passwd = @auth + return nil unless user + calc_cred(req, user, passwd, param) + } + end + + def reset_challenge + synchronize do + @challenge = nil + end + end + + def challenge(uri, param_str) + synchronize { + @challenge = parse_challenge_param(param_str) + true + } + end + end + + # Authentication filter for handling Negotiate/NTLM negotiation. + # Used in WWWAuth and ProxyAuth. + # + # NegotiateAuth depends on 'ruby/ntlm' module. + class NegotiateAuth < AuthBase + include Mutex_m + + # NTLM opt for ruby/ntlm. {:ntlmv2 => true} by default. + attr_reader :ntlm_opt + + # Creates new NegotiateAuth filter. + def initialize(scheme = "Negotiate") + super(scheme) + @auth = {} + @auth_default = nil + @ntlm_opt = { + :ntlmv2 => true + } + end + + # Set authentication credential. + # uri == nil for generic purpose (allow to use user/password for any URL). + def set(uri, user, passwd) + synchronize do + if uri + uri = Util.uri_dirname(uri) + @auth[uri] = [user, passwd] + else + @auth_default = [user, passwd] + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @auth_default || @auth.any? + end + + # Response handler: returns credential. + # See ruby/ntlm for negotiation state transition. + def get(req) + target_uri = req.header.request_uri + synchronize { + _domain_uri, param = @challenge.find { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + user, passwd = Util.hash_find_value(@auth) { |uri, auth_data| + Util.uri_part_of(target_uri, uri) + } + unless user + user, passwd = @auth_default + end + return nil unless user + Util.try_require('net/ntlm') || return + domain = nil + domain, user = user.split("\\") if user.index("\\") + state = param[:state] + authphrase = param[:authphrase] + case state + when :init + t1 = Net::NTLM::Message::Type1.new + t1.domain = domain if domain + t1.encode64 + when :response + t2 = Net::NTLM::Message.decode64(authphrase) + param = {:user => user, :password => passwd} + param[:domain] = domain if domain + t3 = t2.response(param, @ntlm_opt.dup) + @challenge[target_uri][:state] = :done + t3.encode64 + when :done + :skip + else + nil + end + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + if param_str.nil? or @challenge[uri].nil? + c = @challenge[uri] = {} + c[:state] = :init + c[:authphrase] = "" + else + c = @challenge[uri] + c[:state] = :response + c[:authphrase] = param_str + end + true + } + end + end + + + # Authentication filter for handling Negotiate/NTLM negotiation. + # Used in ProxyAuth. + # + # SSPINegotiateAuth depends on 'win32/sspi' module. + class SSPINegotiateAuth < AuthBase + include Mutex_m + + # Creates new SSPINegotiateAuth filter. + def initialize + super('Negotiate') + end + + # Set authentication credential. + # NOT SUPPORTED: username and necessary data is retrieved by win32/sspi. + # See win32/sspi for more details. + def set(*args) + # not supported + end + + # Check always (not effective but it works) + def set? + !@challenge.empty? + end + + # Response handler: returns credential. + # See win32/sspi for negotiation state transition. + def get(req) + target_uri = req.header.request_uri + synchronize { + domain_uri, param = @challenge.find { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + Util.try_require('win32/sspi') || Util.try_require('gssapi') || return + state = param[:state] + authenticator = param[:authenticator] + authphrase = param[:authphrase] + case state + when :init + if defined?(Win32::SSPI) + authenticator = param[:authenticator] = Win32::SSPI::NegotiateAuth.new + authenticator.get_initial_token(@scheme) + else # use GSSAPI + authenticator = param[:authenticator] = GSSAPI::Simple.new(domain_uri.host, 'HTTP') + # Base64 encode the context token + [authenticator.init_context].pack('m').gsub(/\n/,'') + end + when :response + @challenge[target_uri][:state] = :done + if defined?(Win32::SSPI) + authenticator.complete_authentication(authphrase) + else # use GSSAPI + authenticator.init_context(authphrase.unpack('m').pop) + end + when :done + :skip + else + nil + end + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + if param_str.nil? or @challenge[uri].nil? + c = @challenge[uri] = {} + c[:state] = :init + c[:authenticator] = nil + c[:authphrase] = "" + else + c = @challenge[uri] + c[:state] = :response + c[:authphrase] = param_str + end + true + } + end + end + + # Authentication filter for handling OAuth negotiation. + # Used in WWWAuth. + # + # CAUTION: This impl only support '#7 Accessing Protected Resources' in OAuth + # Core 1.0 spec for now. You need to obtain Access token and Access secret by + # yourself. + # + # CAUTION: This impl does NOT support OAuth Request Body Hash spec for now. + # http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html + # + class OAuth < AuthBase + include Mutex_m + + class Config + include HTTPClient::Util + + attr_accessor :http_method + attr_accessor :realm + attr_accessor :consumer_key + attr_accessor :consumer_secret + attr_accessor :token + attr_accessor :secret + attr_accessor :signature_method + attr_accessor :version + attr_accessor :callback + attr_accessor :verifier + + # for OAuth Session 1.0 (draft) + attr_accessor :session_handle + + attr_reader :signature_handler + + attr_accessor :debug_timestamp + attr_accessor :debug_nonce + + def initialize(*args) + @http_method, + @realm, + @consumer_key, + @consumer_secret, + @token, + @secret, + @signature_method, + @version, + @callback, + @verifier = + keyword_argument(args, + :http_method, + :realm, + :consumer_key, + :consumer_secret, + :token, + :secret, + :signature_method, + :version, + :callback, + :verifier + ) + @http_method ||= :post + @session_handle = nil + @signature_handler = {} + end + end + + def self.escape(str) # :nodoc: + if str.respond_to?(:force_encoding) + str.dup.force_encoding('BINARY').gsub(/([^a-zA-Z0-9_.~-]+)/) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + } + else + str.gsub(/([^a-zA-Z0-9_.~-]+)/n) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + } + end + end + + def escape(str) + self.class.escape(str) + end + + # Creates new DigestAuth filter. + def initialize + super('OAuth') + @config = nil # common config + @auth = {} # configs for each site + @nonce_count = 0 + @signature_handler = { + 'HMAC-SHA1' => method(:sign_hmac_sha1) + } + end + + # Set authentication credential. + # You cannot set OAuth config via WWWAuth#set_auth. Use OAuth#config= + def set(*args) + # not supported + end + + # Check always (not effective but it works) + def set? + !@challenge.empty? + end + + # Set authentication credential. + def set_config(uri, config) + synchronize do + if uri.nil? + @config = config + else + uri = Util.uri_dirname(urify(uri)) + @auth[uri] = config + end + end + end + + # Get authentication credential. + def get_config(uri = nil) + synchronize { + do_get_config(uri) + } + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + return nil unless @challenge[nil] or @challenge.find { |uri, ok| + Util.uri_part_of(target_uri, uri) and ok + } + config = do_get_config(target_uri) || @config + return nil unless config + calc_cred(req, config) + } + end + + # Challenge handler: remember URL for response. + # + # challenge() in OAuth handler always returns false to avoid connection + # retry which should not work in OAuth authentication context. This + # method just remember URL (nil means 'any') for the next connection. + # Normally OAuthClient handles this correctly but see how it uses when + # you need to use this class directly. + def challenge(uri, param_str = nil) + synchronize { + if uri.nil? + @challenge[nil] = true + else + @challenge[urify(uri)] = true + end + false + } + end + + private + + def do_get_config(uri = nil) + if uri.nil? + @config + else + uri = urify(uri) + Util.hash_find_value(@auth) { |cand_uri, cred| + Util.uri_part_of(uri, cand_uri) + } + end + end + + def calc_cred(req, config) + header = {} + header['oauth_consumer_key'] = config.consumer_key + header['oauth_signature_method'] = config.signature_method + header['oauth_timestamp'] = config.debug_timestamp || Time.now.to_i.to_s + header['oauth_nonce'] = config.debug_nonce || generate_nonce() + header['oauth_token'] = config.token if config.token + header['oauth_version'] = config.version if config.version + header['oauth_callback'] = config.callback if config.callback + header['oauth_verifier'] = config.verifier if config.verifier + header['oauth_session_handle'] = config.session_handle if config.session_handle + signature = sign(config, header, req) + header['oauth_signature'] = signature + # no need to do but we should sort for easier to test. + str = header.sort_by { |k, v| k }.map { |k, v| encode_header(k, v) }.join(', ') + if config.realm + str = %Q(realm="#{config.realm}", ) + str + end + str + end + + def generate_nonce + @nonce_count += 1 + now = "%012d" % Time.now.to_i + pk = Digest::MD5.hexdigest([@nonce_count.to_s, now, self.__id__, Process.pid, rand(65535)].join)[0, 32] + [now + ':' + pk].pack('m*').chop + end + + def encode_header(k, v) + %Q(#{escape(k.to_s)}="#{escape(v.to_s)}") + end + + def encode_param(params) + params.map { |k, v| + [v].flatten.map { |vv| + %Q(#{escape(k.to_s)}=#{escape(vv.to_s)}) + } + }.flatten + end + + def sign(config, header, req) + base_string = create_base_string(config, header, req) + if handler = config.signature_handler[config.signature_method] || @signature_handler[config.signature_method.to_s] + handler.call(config, base_string) + else + raise ConfigurationError.new("Unknown OAuth signature method: #{config.signature_method}") + end + end + + def create_base_string(config, header, req) + params = encode_param(header) + query = req.header.request_query + if query and HTTP::Message.multiparam_query?(query) + params += encode_param(query) + end + # captures HTTP Message body only for 'application/x-www-form-urlencoded' + if req.header.contenttype == 'application/x-www-form-urlencoded' and req.http_body.size + params += encode_param(HTTP::Message.parse(req.http_body.content)) + end + uri = req.header.request_uri + if uri.query + params += encode_param(HTTP::Message.parse(uri.query)) + end + if uri.port == uri.default_port + request_url = "#{uri.scheme.downcase}://#{uri.host}#{uri.path}" + else + request_url = "#{uri.scheme.downcase}://#{uri.host}:#{uri.port}#{uri.path}" + end + [req.header.request_method.upcase, request_url, params.sort.join('&')].map { |e| + escape(e) + }.join('&') + end + + def sign_hmac_sha1(config, base_string) + unless SSLEnabled + raise ConfigurationError.new("openssl required for OAuth implementation") + end + key = [escape(config.consumer_secret.to_s), escape(config.secret.to_s)].join('&') + digester = OpenSSL::Digest::SHA1.new + [OpenSSL::HMAC.digest(digester, key, base_string)].pack('m*').chomp + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem new file mode 100644 index 0000000..742dc6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem @@ -0,0 +1,3952 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Wed Oct 28 04:12:04 2015 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.25. +## SHA1: 6d7d2f0a4fae587e7431be191a081ac1257d300a +## + + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +WoSign +====== +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNVBAMTIUNlcnRpZmljYXRpb24g +QXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJ +BgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA +vcqNrLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1UfcIiePyO +CbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcSccf+Hb0v1naMQFXQoOXXDX +2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2ZjC1vt7tj/id07sBMOby8w7gLJKA84X5 +KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4Mx1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR ++ScPewavVIMYe+HdVHpRaG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ez +EC8wQjchzDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDaruHqk +lWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221KmYo0SLwX3OSACCK2 +8jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvASh0JWzko/amrzgD5LkhLJuYwTKVY +yrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWvHYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0C +AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R +8bNLtwYgFP6HEtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 +LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJMuYhOZO9sxXq +T2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2eJXLOC62qx1ViC777Y7NhRCOj +y+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VNg64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC +2nz4SNAzqfkHx5Xh9T71XXG68pWpdIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes +5cVAWubXbHssw1abR80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/ +EaEQPkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGcexGATVdVh +mVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+J7x6v+Db9NpSvd4MVHAx +kUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMlOtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGi +kpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWTee5Ehr7XHuQe+w== +-----END CERTIFICATE----- + +WoSign China +============ +-----BEGIN CERTIFICATE----- +MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBGMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMMEkNBIOayg+mAmuagueiv +geS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYD +VQQKExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k +8H/rD195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld19AXbbQs5 +uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExfv5RxadmWPgxDT74wwJ85 +dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnkUkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5 +Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+LNVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFy +b7Ao65vh4YOhn0pdr8yb+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc +76DbT52VqyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6KyX2m ++Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0GAbQOXDBGVWCvOGU6 +yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaKJ/kR8slC/k7e3x9cxKSGhxYzoacX +GKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUA +A4ICAQBqinA4WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 +yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj/feTZU7n85iY +r83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6jBAyvd0zaziGfjk9DgNyp115 +j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0A +kLppRQjbbpCBhqcqBT/mhDn4t/lXX0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97 +qA4bLJyuQHCH2u2nFoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Y +jj4Du9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10lO1Hm13ZB +ONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Leie2uPAmvylezkolwQOQv +T8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR12KvxAmLBsX5VYc8T1yaw15zLKYs4SgsO +kI26oQ== +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl +OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV +MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF +JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G3 +================================== +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y +olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t +x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy +EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K +Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur +mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5 +1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp +07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo +FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE +41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu +yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq +KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1 +v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA +8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b +8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r +mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq +1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI +JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV +tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk= +-----END CERTIFICATE----- + +Staat der Nederlanden EV Root CA +================================ +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M +MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl +cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk +SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW +O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r +0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 +Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV +XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr +08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV +0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd +74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx +fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa +ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu +c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq +5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN +b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN +f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi +5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 +WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK +DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy +eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 +========================================================= +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1Qg +RWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAw +ODA3MDFaFw0yMzA0MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0w +SwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnE +n2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBFbGVrdHJvbmlrIFNlcnRp +ZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApCUZ4WWe60ghUEoI5RHwWrom/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537 +jVJp45wnEFPzpALFp/kRGml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1m +ep5Fimh34khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z5UNP +9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0hO8EuPbJbKoCPrZV +4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QIDAQABo0IwQDAdBgNVHQ4EFgQUVpkH +HtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAJ5FdnsXSDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPo +BP5yCccLqh0lVX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nfpeYVhDfwwvJl +lpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CFYv4HAqGEVka+lgqaE9chTLd8 +B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW+qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- + +TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H6 +========================================================= +-----BEGIN CERTIFICATE----- +MIIEJjCCAw6gAwIBAgIGfaHyZeyKMA0GCSqGSIb3DQEBCwUAMIGxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMU0wSwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg2MB4XDTEzMTIxODA5 +MDQxMFoXDTIzMTIxNjA5MDQxMFowgbExCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExTTBL +BgNVBAoMRFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2ZW5sacSf +aSBIaXptZXRsZXJpIEEuxZ4uMUIwQAYDVQQDDDlUw5xSS1RSVVNUIEVsZWt0cm9uaWsgU2VydGlm +aWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLEgSDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCdsGjW6L0UlqMACprx9MfMkU1xeHe59yEmFXNRFpQJRwXiM/VomjX/3EsvMsew7eKC5W/a +2uqsxgbPJQ1BgfbBOCK9+bGlprMBvD9QFyv26WZV1DOzXPhDIHiTVRZwGTLmiddk671IUP320EED +wnS3/faAz1vFq6TWlRKb55cTMgPp1KtDWxbtMyJkKbbSk60vbNg9tvYdDjTu0n2pVQ8g9P0pu5Fb +HH3GQjhtQiht1AH7zYiXSX6484P4tZgvsycLSF5W506jM7NE1qXyGJTtHB6plVxiSvgNZ1GpryHV ++DKdeboaX+UEVU0TRv/yz3THGmNtwx8XEsMeED5gCLMxAgMBAAGjQjBAMB0GA1UdDgQWBBTdVRcT +9qzoSCHK77Wv0QAy7Z6MtTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQsFAAOCAQEAb1gNl0OqFlQ+v6nfkkU/hQu7VtMMUszIv3ZnXuaqs6fvuay0EBQNdH49ba3R +fdCaqaXKGDsCQC4qnFAUi/5XfldcEQlLNkVS9z2sFP1E34uXI9TDwe7UU5X+LEr+DXCqu4svLcsy +o4LyVN/Y8t3XSHLuSqMplsNEzm61kod2pLv0kmzOLBQJZo6NrRa1xxsJYTvjIKIDgI6tflEATseW +hvtDmHd9KMeP2Cpu54Rvl0EpABZeTeIT6lnAY2c6RPuY/ATTMHKm9ocJV612ph1jmv3XZch4gyt1 +O6VbuA1df74jrlZVlFjvH4GMKrLN5ptjnhi85WsGtAuYSyher4hYyw== +-----END CERTIFICATE----- + +Certinomis - Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAbBgNVBAMTFENlcnRpbm9taXMg +LSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMzMTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIx +EzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRD +ZXJ0aW5vbWlzIC0gUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQos +P5L2fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJflLieY6pOo +d5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQVWZUKxkd8aRi5pwP5ynap +z8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDFTKWrteoB4owuZH9kb/2jJZOLyKIOSY00 +8B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09x +RLWtwHkziOC/7aOgFLScCbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE +6OXWk6RiwsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJwx3t +FvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SGm/lg0h9tkQPTYKbV +PZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4F2iw4lNVYC2vPsKD2NkJK/DAZNuH +i5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZngWVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGj +YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I +6tNxIqSSaHh02TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/0KGRHCwPT5iV +WVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWwF6YSjNRieOpWauwK0kDDPAUw +Pk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZSg081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAX +lCOotQqSD7J6wWAsOMwaplv/8gzjqh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJ +y29SWwNyhlCVCNSNh4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9 +Iff/ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8Vbtaw5Bng +DwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwjY/M50n92Uaf0yKHxDHYi +I0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nM +cyrDflOR1m749fPH0FFNjkulW+YZFzvWgQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVr +hkIGuUE= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem new file mode 100644 index 0000000..9794dfb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem @@ -0,0 +1,3866 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Tue Apr 22 08:29:31 2014 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla-release/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb new file mode 100644 index 0000000..b702c92 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb @@ -0,0 +1,88 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +class HTTPClient + + + # Represents a HTTP response to an asynchronous request. Async methods of + # HTTPClient such as get_async, post_async, etc. returns an instance of + # Connection. + # + # == How to use + # + # 1. Invoke HTTP method asynchronously and check if it's been finished + # periodically. + # + # connection = clnt.post_async(url, body) + # print 'posting.' + # while true + # break if connection.finished? + # print '.' + # sleep 1 + # end + # puts '.' + # res = connection.pop + # p res.status + # + # 2. Read the response as an IO. + # + # connection = clnt.get_async('http://dev.ctor.org/') + # io = connection.pop.content + # while str = io.read(40) + # p str + # end + class Connection + attr_accessor :async_thread + + def initialize(header_queue = [], body_queue = []) # :nodoc: + @headers = header_queue + @body = body_queue + @async_thread = nil + @queue = Queue.new + end + + # Checks if the asynchronous invocation has been finished or not. + def finished? + if !@async_thread + # Not in async mode. + true + elsif @async_thread.alive? + # Working... + false + else + # Async thread have been finished. + join + true + end + end + + # Retrieves a HTTP::Message instance of HTTP response. Do not invoke this + # method twice for now. The second invocation will be blocked. + def pop + response_or_exception = @queue.pop + if response_or_exception.is_a? Exception + raise response_or_exception + end + response_or_exception + end + + def push(result) # :nodoc: + @queue.push(result) + end + + # Waits the completion of the asynchronous invocation. + def join + if @async_thread + @async_thread.join + end + nil + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb new file mode 100644 index 0000000..65fc6ca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb @@ -0,0 +1,220 @@ +# do not override if httpclient/webagent-cookie is loaded already +unless defined?(HTTPClient::CookieManager) +begin # for catching LoadError and load webagent-cookie instead + +require 'http-cookie' +require 'httpclient/util' + +class HTTPClient + class CookieManager + include HTTPClient::Util + + attr_reader :format, :jar + attr_accessor :cookies_file + + def initialize(cookies_file = nil, format = WebAgentSaver, jar = HTTP::CookieJar.new) + @cookies_file = cookies_file + @format = format + @jar = jar + load_cookies if @cookies_file + end + + def load_cookies + check_cookies_file + @jar.clear + @jar.load(@cookies_file, :format => @format) + end + + def save_cookies(session = false) + check_cookies_file + @jar.save(@cookies_file, :format => @format, :session => session) + end + + def cookies(uri = nil) + cookies = @jar.cookies(uri) + # TODO: return HTTP::Cookie in the future + cookies.map { |cookie| + WebAgent::Cookie.new( + :name => cookie.name, + :value => cookie.value, + :domain => cookie.domain, + :path => cookie.path, + :origin => cookie.origin, + :for_domain => cookie.for_domain, + :expires => cookie.expires, + :httponly => cookie.httponly, + :secure => cookie.secure + ) + } + end + + def cookie_value(uri) + cookies = self.cookies(uri) + unless cookies.empty? + HTTP::Cookie.cookie_value(cookies) + end + end + + def parse(value, uri) + @jar.parse(value, uri) + end + + def cookies=(cookies) + @jar.clear + cookies.each do |cookie| + add(cookie) + end + end + + def add(cookie) + @jar.add(cookie) + end + + def find(uri) + warning('CookieManager#find is deprecated and will be removed in near future. Use HTTP::Cookie.cookie_value(CookieManager#cookies) instead') + if cookie = cookies(uri) + HTTP::Cookie.cookie_value(cookie) + end + end + + private + + def check_cookies_file + unless @cookies_file + raise ArgumentError.new('Cookies file not specified') + end + end + end + + class WebAgentSaver < HTTP::CookieJar::AbstractSaver + # no option + def default_options + {} + end + + # same as HTTP::CookieJar::CookiestxtSaver + def save(io, jar) + jar.each { |cookie| + next if !@session && cookie.session? + io.print cookie_to_record(cookie) + } + end + + # same as HTTP::CookieJar::CookiestxtSaver + def load(io, jar) + io.each_line { |line| + cookie = parse_record(line) and jar.add(cookie) + } + end + + private + + def cookie_to_record(cookie) + [ + cookie.origin, + cookie.name, + cookie.value, + cookie.expires.to_i, + cookie.dot_domain, + cookie.path, + self.class.flag(cookie) + ].join("\t") + "\n" + end + + def parse_record(line) + return nil if /\A#/ =~ line + col = line.chomp.split(/\t/) + + origin = col[0] + name = col[1] + value = col[2] + value.chomp! + if col[3].empty? or col[3] == '0' + expires = nil + else + expires = Time.at(col[3].to_i) + return nil if expires < Time.now + end + domain = col[4] + path = col[5] + + cookie = WebAgent::Cookie.new(name, value, + :origin => origin, + :domain => domain, + :path => path, + :expires => expires + ) + self.class.set_flag(cookie, col[6].to_i) + cookie + end + + USE = 1 + SECURE = 2 + DOMAIN = 4 + PATH = 8 + HTTP_ONLY = 64 + + def self.flag(cookie) + flg = 0 + flg += USE # not used + flg += SECURE if cookie.secure? + flg += DOMAIN if cookie.for_domain? + flg += HTTP_ONLY if cookie.httponly? + flg += PATH if cookie.path # not used + flg + end + + def self.set_flag(cookie, flag) + cookie.secure = true if flag & SECURE > 0 + cookie.for_domain = true if flag & DOMAIN > 0 + cookie.httponly = true if flag & HTTP_ONLY > 0 + end + end +end + +# for backward compatibility +class WebAgent + CookieManager = ::HTTPClient::CookieManager + + class Cookie < HTTP::Cookie + include HTTPClient::Util + + def url + deprecated('url', 'origin') + self.origin + end + + def url=(url) + deprecated('url=', 'origin=') + self.origin = url + end + + def http_only? + deprecated('http_only?', 'httponly?') + self.httponly? + end + + alias original_domain domain + + def domain + warning('Cookie#domain returns dot-less domain name now. Use Cookie#dot_domain if you need "." at the beginning.') + self.original_domain + end + + def flag + deprecated('flag', 'secure, for_domain, etc.') + HTTPClient::WebAgentSaver.flag(self) + end + + private + + def deprecated(old, new) + warning("WebAgent::Cookie is deprecated and will be replaced with HTTP::Cookie in the near future. Please use Cookie##{new} instead of Cookie##{old} for the replacement.") + end + end +end + +rescue LoadError + require 'httpclient/webagent-cookie' +end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/http.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/http.rb new file mode 100644 index 0000000..330b3db --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/http.rb @@ -0,0 +1,1082 @@ +# -*- encoding: utf-8 -*- + +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'time' +if defined?(Encoding::ASCII_8BIT) + require 'open-uri' # for encoding +end +require 'httpclient/util' + + +# A namespace module for HTTP Message definitions used by HTTPClient. +module HTTP + + + # Represents HTTP response status code. Defines constants for HTTP response + # and some conditional methods. + module Status + OK = 200 + CREATED = 201 + ACCEPTED = 202 + NON_AUTHORITATIVE_INFORMATION = 203 + NO_CONTENT = 204 + RESET_CONTENT = 205 + PARTIAL_CONTENT = 206 + MOVED_PERMANENTLY = 301 + FOUND = 302 + SEE_OTHER = 303 + TEMPORARY_REDIRECT = MOVED_TEMPORARILY = 307 + BAD_REQUEST = 400 + UNAUTHORIZED = 401 + PROXY_AUTHENTICATE_REQUIRED = 407 + INTERNAL = 500 + + # Status codes for successful HTTP response. + SUCCESSFUL_STATUS = [ + OK, CREATED, ACCEPTED, + NON_AUTHORITATIVE_INFORMATION, NO_CONTENT, + RESET_CONTENT, PARTIAL_CONTENT + ] + + # Status codes which is a redirect. + REDIRECT_STATUS = [ + MOVED_PERMANENTLY, FOUND, SEE_OTHER, + TEMPORARY_REDIRECT, MOVED_TEMPORARILY + ] + + # Returns true if the given status represents successful HTTP response. + # See also SUCCESSFUL_STATUS. + def self.successful?(status) + SUCCESSFUL_STATUS.include?(status) + end + + # Returns true if the given status is thought to be redirect. + # See also REDIRECT_STATUS. + def self.redirect?(status) + REDIRECT_STATUS.include?(status) + end + end + + + # Represents a HTTP message. A message is for a request or a response. + # + # Request message is generated from given parameters internally so users + # don't need to care about it. Response message is the instance that + # methods of HTTPClient returns so users need to know how to extract + # HTTP response data from Message. + # + # Some attributes are only for a request or a response, not both. + # + # == How to use HTTP response message + # + # 1. Gets response message body. + # + # res = clnt.get(url) + # p res.content #=> String + # + # 2. Gets response status code. + # + # res = clnt.get(url) + # p res.status #=> 200, 501, etc. (Integer) + # + # 3. Gets response header. + # + # res = clnt.get(url) + # res.header['set-cookie'].each do |value| + # p value + # end + # assert_equal(1, res.header['last-modified'].size) + # p res.header['last-modified'].first + # + class Message + include HTTPClient::Util + + CRLF = "\r\n" + + # Represents HTTP message header. + class Headers + # HTTP version in a HTTP header. String. + attr_accessor :http_version + # Size of body. nil when size is unknown (e.g. chunked response). + attr_reader :body_size + # Request/Response is chunked or not. + attr_accessor :chunked + + # Request only. Requested method. + attr_reader :request_method + # Request only. Requested URI. + attr_accessor :request_uri + # Request only. Requested query. + attr_accessor :request_query + # Request only. Requested via proxy or not. + attr_accessor :request_absolute_uri + + # Response only. HTTP status + attr_reader :status_code + # Response only. HTTP status reason phrase. + attr_accessor :reason_phrase + + # Used for dumping response. + attr_accessor :body_type # :nodoc: + # Used for dumping response. + attr_accessor :body_charset # :nodoc: + # Used for dumping response. + attr_accessor :body_date # :nodoc: + # Used for keeping content encoding. + attr_reader :body_encoding # :nodoc: + + # HTTP response status code to reason phrase mapping definition. + STATUS_CODE_MAP = { + Status::OK => 'OK', + Status::CREATED => "Created", + Status::NON_AUTHORITATIVE_INFORMATION => "Non-Authoritative Information", + Status::NO_CONTENT => "No Content", + Status::RESET_CONTENT => "Reset Content", + Status::PARTIAL_CONTENT => "Partial Content", + Status::MOVED_PERMANENTLY => 'Moved Permanently', + Status::FOUND => 'Found', + Status::SEE_OTHER => 'See Other', + Status::TEMPORARY_REDIRECT => 'Temporary Redirect', + Status::MOVED_TEMPORARILY => 'Temporary Redirect', + Status::BAD_REQUEST => 'Bad Request', + Status::INTERNAL => 'Internal Server Error', + } + + # $KCODE to charset mapping definition. + CHARSET_MAP = { + 'NONE' => 'us-ascii', + 'EUC' => 'euc-jp', + 'SJIS' => 'shift_jis', + 'UTF8' => 'utf-8', + } + + # Creates a Message::Headers. Use init_request, init_response, or + # init_connect_request for acutual initialize. + def initialize + @http_version = '1.1' + @body_size = nil + @chunked = false + + @request_method = nil + @request_uri = nil + @request_query = nil + @request_absolute_uri = nil + + @status_code = nil + @reason_phrase = nil + + @body_type = nil + @body_charset = nil + @body_date = nil + @body_encoding = nil + + @is_request = nil + @header_item = [] + @dumped = false + end + + # Initialize this instance as a CONNECT request. + def init_connect_request(uri) + @is_request = true + @request_method = 'CONNECT' + @request_uri = uri + @request_query = nil + @http_version = '1.0' + end + + # Placeholder URI object for nil uri. + NIL_URI = HTTPClient::Util.urify('http://nil-uri-given/') + # Initialize this instance as a general request. + def init_request(method, uri, query = nil) + @is_request = true + @request_method = method + @request_uri = uri || NIL_URI + @request_query = query + @request_absolute_uri = false + self + end + + # Initialize this instance as a response. + def init_response(status_code, req = nil) + @is_request = false + self.status_code = status_code + if req + @request_method = req.request_method + @request_uri = req.request_uri + @request_query = req.request_query + end + self + end + + # Sets status code and reason phrase. + def status_code=(status_code) + @status_code = status_code + @reason_phrase = STATUS_CODE_MAP[@status_code] + end + + # Returns 'Content-Type' header value. + def content_type + self['Content-Type'][0] + end + + # Sets 'Content-Type' header value. Overrides if already exists. + def content_type=(content_type) + delete('Content-Type') + self['Content-Type'] = content_type + end + + alias contenttype content_type + alias contenttype= content_type= + + if defined?(Encoding::ASCII_8BIT) + def set_body_encoding + if type = self.content_type + OpenURI::Meta.init(o = '') + o.meta_add_field('content-type', type) + @body_encoding = o.encoding + end + end + else + def set_body_encoding + @body_encoding = nil + end + end + + # Sets byte size of message body. + # body_size == nil means that the body is_a? IO + def body_size=(body_size) + @body_size = body_size + end + + # Dumps message header part and returns a dumped String. + def dump + set_header + str = nil + if @is_request + str = request_line + else + str = response_status_line + end + str + @header_item.collect { |key, value| + "#{ key }: #{ value }#{ CRLF }" + }.join + end + + # Set Date header + def set_date_header + set('Date', Time.now.httpdate) + end + + # Adds a header. Addition order is preserved. + def add(key, value) + if value.is_a?(Array) + value.each do |v| + @header_item.push([key, v]) + end + else + @header_item.push([key, value]) + end + end + + # Sets a header. + def set(key, value) + delete(key) + add(key, value) + end + + # Returns an Array of headers for the given key. Each element is a pair + # of key and value. It returns an single element Array even if the only + # one header exists. If nil key given, it returns all headers. + def get(key = nil) + if key.nil? + all + else + key = key.upcase + @header_item.find_all { |k, v| k.upcase == key } + end + end + + # Returns an Array of all headers. + def all + @header_item + end + + # Deletes headers of the given key. + def delete(key) + key = key.upcase + @header_item.delete_if { |k, v| k.upcase == key } + end + + # Adds a header. See set. + def []=(key, value) + set(key, value) + end + + # Returns an Array of header values for the given key. + def [](key) + get(key).collect { |item| item[1] } + end + + def set_headers(headers) + headers.each do |key, value| + add(key, value) + end + set_body_encoding + end + + def create_query_uri() + if @request_method == 'CONNECT' + return "#{@request_uri.host}:#{@request_uri.port}" + end + path = @request_uri.path + path = '/' if path.nil? or path.empty? + if query_str = create_query_part() + path += "?#{query_str}" + end + path + end + + def create_query_part() + query_str = nil + if @request_uri.query + query_str = @request_uri.query + end + if @request_query + if query_str + query_str += "&#{Message.create_query_part_str(@request_query)}" + else + query_str = Message.create_query_part_str(@request_query) + end + end + query_str + end + + private + + def request_line + path = create_query_uri() + if @request_absolute_uri + path = "#{ @request_uri.scheme }://#{ @request_uri.host }:#{ @request_uri.port }#{ path }" + end + "#{ @request_method } #{ path } HTTP/#{ @http_version }#{ CRLF }" + end + + def response_status_line + if defined?(Apache) + "HTTP/#{ @http_version } #{ @status_code } #{ @reason_phrase }#{ CRLF }" + else + "Status: #{ @status_code } #{ @reason_phrase }#{ CRLF }" + end + end + + def set_header + if @is_request + set_request_header + else + set_response_header + end + end + + def set_request_header + return if @dumped + @dumped = true + keep_alive = Message.keep_alive_enabled?(@http_version) + if !keep_alive and @request_method != 'CONNECT' + set('Connection', 'close') + end + if @chunked + set('Transfer-Encoding', 'chunked') + elsif @body_size and (keep_alive or @body_size != 0) + set('Content-Length', @body_size.to_s) + end + if @http_version >= '1.1' and get('Host').empty? + if @request_uri.port == @request_uri.default_port + # GFE/1.3 dislikes default port number (returns 404) + set('Host', "#{@request_uri.hostname}") + else + set('Host', "#{@request_uri.hostname}:#{@request_uri.port}") + end + end + end + + def set_response_header + return if @dumped + @dumped = true + if defined?(Apache) && self['Date'].empty? + set_date_header + end + keep_alive = Message.keep_alive_enabled?(@http_version) + if @chunked + set('Transfer-Encoding', 'chunked') + else + if keep_alive or @body_size != 0 + set('Content-Length', @body_size.to_s) + end + end + if @body_date + set('Last-Modified', @body_date.httpdate) + end + if self['Content-Type'].empty? + set('Content-Type', "#{ @body_type || 'text/html' }; charset=#{ charset_label }") + end + end + + def charset_label + # TODO: should handle response encoding for 1.9 correctly. + if RUBY_VERSION > "1.9" + CHARSET_MAP[@body_charset] || 'us-ascii' + else + CHARSET_MAP[@body_charset || $KCODE] || 'us-ascii' + end + end + end + + + # Represents HTTP message body. + class Body + # Size of body. nil when size is unknown (e.g. chunked response). + attr_reader :size + # maxbytes of IO#read for streaming request. See DEFAULT_CHUNK_SIZE. + attr_accessor :chunk_size + # Hash that keeps IO positions + attr_accessor :positions + + # Default value for chunk_size + DEFAULT_CHUNK_SIZE = 1024 * 16 + + # Creates a Message::Body. Use init_request or init_response + # for acutual initialize. + def initialize + @body = nil + @size = nil + @positions = nil + @chunk_size = nil + end + + # Initialize this instance as a request. + def init_request(body = nil, boundary = nil) + @boundary = boundary + @positions = {} + set_content(body, boundary) + @chunk_size = DEFAULT_CHUNK_SIZE + self + end + + # Initialize this instance as a response. + def init_response(body = nil) + @body = body + if @body.respond_to?(:bytesize) + @size = @body.bytesize + elsif @body.respond_to?(:size) + @size = @body.size + else + @size = nil + end + self + end + + # Dumps message body to given dev. + # dev needs to respond to <<. + # + # Message header must be given as the first argument for performance + # reason. (header is dumped to dev, too) + # If no dev (the second argument) given, this method returns a dumped + # String. + # + # assert: @size is not nil + def dump(header = '', dev = '') + if @body.is_a?(Parts) + dev << header + @body.parts.each do |part| + if Message.file?(part) + reset_pos(part) + dump_file(part, dev, @body.sizes[part]) + else + dev << part + end + end + elsif Message.file?(@body) + dev << header + reset_pos(@body) + dump_file(@body, dev, @size) + elsif @body + dev << header + @body + else + dev << header + end + dev + end + + # Dumps message body with chunked encoding to given dev. + # dev needs to respond to <<. + # + # Message header must be given as the first argument for performance + # reason. (header is dumped to dev, too) + # If no dev (the second argument) given, this method returns a dumped + # String. + def dump_chunked(header = '', dev = '') + dev << header + if @body.is_a?(Parts) + @body.parts.each do |part| + if Message.file?(part) + reset_pos(part) + dump_chunks(part, dev) + else + dev << dump_chunk(part) + end + end + dev << (dump_last_chunk + CRLF) + elsif @body + reset_pos(@body) + dump_chunks(@body, dev) + dev << (dump_last_chunk + CRLF) + end + dev + end + + # Returns a message body itself. + def content + @body + end + + private + + def set_content(body, boundary = nil) + if Message.file?(body) + # uses Transfer-Encoding: chunked if body does not respond to :size. + # bear in mind that server may not support it. at least ruby's CGI doesn't. + @body = body + remember_pos(@body) + @size = body.respond_to?(:size) ? body.size - body.pos : nil + elsif boundary and Message.multiparam_query?(body) + @body = build_query_multipart_str(body, boundary) + @size = @body.size + else + @body = Message.create_query_part_str(body) + @size = @body.bytesize + end + end + + def remember_pos(io) + # IO may not support it (ex. IO.pipe) + @positions[io] = io.pos if io.respond_to?(:pos) + end + + def reset_pos(io) + io.pos = @positions[io] if @positions.key?(io) + end + + def dump_file(io, dev, sz) + buf = '' + rest = sz + while rest > 0 + n = io.read([rest, @chunk_size].min, buf) + raise ArgumentError.new("Illegal size value: #size returns #{sz} but cannot read") if n.nil? + dev << buf + rest -= n.bytesize + end + end + + def dump_chunks(io, dev) + buf = '' + while !io.read(@chunk_size, buf).nil? + dev << dump_chunk(buf) + end + end + + def dump_chunk(str) + dump_chunk_size(str.bytesize) + (str + CRLF) + end + + def dump_last_chunk + dump_chunk_size(0) + end + + def dump_chunk_size(size) + sprintf("%x", size) + CRLF + end + + class Parts + attr_reader :size + attr_reader :sizes + + def initialize + @body = [] + @sizes = {} + @size = 0 # total + @as_stream = false + end + + def add(part) + if Message.file?(part) + @as_stream = true + @body << part + if part.respond_to?(:lstat) + sz = part.lstat.size + add_size(part, sz) + elsif part.respond_to?(:size) + if sz = part.size + add_size(part, sz) + else + @sizes.clear + @size = nil + end + else + # use chunked upload + @sizes.clear + @size = nil + end + elsif @body[-1].is_a?(String) + @body[-1] += part.to_s + @size += part.to_s.bytesize if @size + else + @body << part.to_s + @size += part.to_s.bytesize if @size + end + end + + def parts + if @as_stream + @body + else + [@body.join] + end + end + + private + + def add_size(part, sz) + if @size + @sizes[part] = sz + @size += sz + end + end + end + + def build_query_multipart_str(query, boundary) + parts = Parts.new + query.each do |attr, value| + headers = ["--#{boundary}"] + if Message.file?(value) + remember_pos(value) + param_str = params_from_file(value).collect { |k, v| + "#{k}=\"#{v}\"" + }.join("; ") + if value.respond_to?(:mime_type) + content_type = value.mime_type + elsif value.respond_to?(:content_type) + content_type = value.content_type + else + path = value.respond_to?(:path) ? value.path : nil + content_type = Message.mime_type(path) + end + headers << %{Content-Disposition: form-data; name="#{attr}"; #{param_str}} + headers << %{Content-Type: #{content_type}} + elsif attr.is_a?(Hash) + h = attr + value = h[:content] + h.each do |h_key, h_val| + headers << %{#{h_key}: #{h_val}} if h_key != :content + end + remember_pos(value) if Message.file?(value) + else + headers << %{Content-Disposition: form-data; name="#{attr}"} + value = value.to_s + end + parts.add(headers.join(CRLF) + CRLF + CRLF) + parts.add(value) + parts.add(CRLF) + end + parts.add("--#{boundary}--" + CRLF + CRLF) # empty epilogue + parts + end + + def params_from_file(value) + params = {} + original_filename = value.respond_to?(:original_filename) ? value.original_filename : nil + path = value.respond_to?(:path) ? value.path : nil + params['filename'] = original_filename || File.basename(path || '') + # Creation time is not available from File::Stat + if value.respond_to?(:mtime) + params['modification-date'] = value.mtime.rfc822 + end + if value.respond_to?(:atime) + params['read-date'] = value.atime.rfc822 + end + params + end + end + + + class << self + private :new + + # Creates a Message instance of 'CONNECT' request. + # 'CONNECT' request does not have Body. + # uri:: an URI that need to connect. Only uri.host and uri.port are used. + def new_connect_request(uri) + m = new + m.http_header.init_connect_request(uri) + m.http_header.body_size = nil + m + end + + # Creates a Message instance of general request. + # method:: HTTP method String. + # uri:: an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c' + # body:: a Hash or an Array of body part. + # e.g. { "a" => "b" } => 'a=b'. + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'a=b&a=c'. + # boundary:: When the boundary given, it is sent as + # a multipart/form-data using this boundary String. + def new_request(method, uri, query = nil, body = nil, boundary = nil) + m = new + m.http_header.init_request(method, uri, query) + m.http_body = Body.new + m.http_body.init_request(body || '', boundary) + if body + m.http_header.body_size = m.http_body.size + m.http_header.chunked = true if m.http_body.size.nil? + else + m.http_header.body_size = nil + end + m + end + + # Creates a Message instance of response. + # body:: a String or an IO of response message body. + def new_response(body, req = nil) + m = new + m.http_header.init_response(Status::OK, req) + m.http_body = Body.new + m.http_body.init_response(body) + m.http_header.body_size = m.http_body.size || 0 + m + end + + @@mime_type_handler = nil + + # Sets MIME type handler. + # + # handler must respond to :call with a single argument :path and returns + # a MIME type String e.g. 'text/html'. + # When the handler returns nil or an empty String, + # 'application/octet-stream' is used. + # + # When you set nil to the handler, internal_mime_type is used instead. + # The handler is nil by default. + def mime_type_handler=(handler) + @@mime_type_handler = handler + end + + # Returns MIME type handler. + def mime_type_handler + @@mime_type_handler + end + + # For backward compatibility. + alias set_mime_type_func mime_type_handler= + alias get_mime_type_func mime_type_handler + + def mime_type(path) # :nodoc: + if @@mime_type_handler + res = @@mime_type_handler.call(path) + if !res || res.to_s == '' + return 'application/octet-stream' + else + return res + end + else + internal_mime_type(path) + end + end + + # Default MIME type handler. + # See mime_type_handler=. + def internal_mime_type(path) + case path + when /\.txt$/i + 'text/plain' + when /\.xml$/i + 'text/xml' + when /\.(htm|html)$/i + 'text/html' + when /\.doc$/i + 'application/msword' + when /\.png$/i + 'image/png' + when /\.gif$/i + 'image/gif' + when /\.(jpg|jpeg)$/i + 'image/jpeg' + else + 'application/octet-stream' + end + end + + # Returns true if the given HTTP version allows keep alive connection. + # version:: String + def keep_alive_enabled?(version) + version >= '1.1' + end + + # Returns true if the given query (or body) has a multiple parameter. + def multiparam_query?(query) + query.is_a?(Array) or query.is_a?(Hash) + end + + # Returns true if the given object is a File. In HTTPClient, a file is; + # * must respond to :read for retrieving String chunks. + # * must respond to :pos and :pos= to rewind for reading. + # Rewinding is only needed for following HTTP redirect. Some IO impl + # defines :pos= but raises an Exception for pos= such as StringIO + # but there's no problem as far as using it for non-following methods + # (get/post/etc.) + def file?(obj) + obj.respond_to?(:read) and obj.respond_to?(:pos) and + obj.respond_to?(:pos=) + end + + def create_query_part_str(query) # :nodoc: + if multiparam_query?(query) + escape_query(query) + elsif query.respond_to?(:read) + query = query.read + else + query.to_s + end + end + + def Array.try_convert(value) + return value if value.instance_of?(Array) + return nil if !value.respond_to?(:to_ary) + converted = value.to_ary + return converted if converted.instance_of?(Array) + + cname = value.class.name + raise TypeError, "can't convert %s to %s (%s#%s gives %s)" % + [cname, Array.name, cname, :to_ary, converted.class.name] + end unless Array.respond_to?(:try_convert) + + def escape_query(query) # :nodoc: + pairs = [] + query.each { |attr, value| + left = escape(attr.to_s) << '=' + if values = Array.try_convert(value) + values.each { |v| + if v.respond_to?(:read) + v = v.read + end + pairs.push(left + escape(v.to_s)) + } + else + if value.respond_to?(:read) + value = value.read + end + pairs.push(left << escape(value.to_s)) + end + } + pairs.join('&') + end + + # from CGI.escape + if defined?(Encoding::ASCII_8BIT) + def escape(str) # :nodoc: + str.dup.force_encoding(Encoding::ASCII_8BIT).gsub(/([^ a-zA-Z0-9_.-]+)/) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + }.tr(' ', '+') + end + else + def escape(str) # :nodoc: + str.gsub(/([^ a-zA-Z0-9_.-]+)/n) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + }.tr(' ', '+') + end + end + + # from CGI.parse + def parse(query) + params = Hash.new([].freeze) + query.split(/[&;]/n).each do |pairs| + key, value = pairs.split('=',2).collect{|v| unescape(v) } + if params.has_key?(key) + params[key].push(value) + else + params[key] = [value] + end + end + params + end + + # from CGI.unescape + def unescape(string) + string.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n) do + [$1.delete('%')].pack('H*') + end + end + end + + + # HTTP::Message::Headers:: message header. + attr_accessor :http_header + + # HTTP::Message::Body:: message body. + attr_reader :http_body + + # OpenSSL::X509::Certificate:: response only. server certificate which is + # used for retrieving the response. + attr_accessor :peer_cert + + # The other Message object when this Message is generated instead of + # the Message because of redirection, negotiation, or format conversion. + attr_accessor :previous + + # Creates a Message. This method should be used internally. + # Use Message.new_connect_request, Message.new_request or + # Message.new_response instead. + def initialize # :nodoc: + @http_header = Headers.new + @http_body = @peer_cert = nil + @previous = nil + end + + # Dumps message (header and body) to given dev. + # dev needs to respond to <<. + def dump(dev = '') + str = @http_header.dump + CRLF + if @http_header.chunked + dev = @http_body.dump_chunked(str, dev) + elsif @http_body + dev = @http_body.dump(str, dev) + else + dev << str + end + dev + end + + # Sets a new body. header.body_size is updated with new body.size. + def http_body=(body) + @http_body = body + @http_header.body_size = @http_body.size if @http_header + end + alias body= http_body= + + # Returns HTTP version in a HTTP header. String. + def http_version + @http_header.http_version + end + + # Sets HTTP version in a HTTP header. String. + def http_version=(http_version) + @http_header.http_version = http_version + end + + VERSION_WARNING = 'Message#version (Float) is deprecated. Use Message#http_version (String) instead.' + def version + warning(VERSION_WARNING) + @http_header.http_version.to_f + end + + def version=(version) + warning(VERSION_WARNING) + @http_header.http_version = version + end + + # Returns HTTP status code in response. Integer. + def status + @http_header.status_code + end + + alias code status + alias status_code status + + # Sets HTTP status code of response. Integer. + # Reason phrase is updated, too. + def status=(status) + @http_header.status_code = status + end + + # Returns HTTP status reason phrase in response. String. + def reason + @http_header.reason_phrase + end + + # Sets HTTP status reason phrase of response. String. + def reason=(reason) + @http_header.reason_phrase = reason + end + + # Returns 'Content-Type' header value. + def content_type + @http_header.content_type + end + + # Sets 'Content-Type' header value. Overrides if already exists. + def content_type=(content_type) + @http_header.content_type = content_type + end + alias contenttype content_type + alias contenttype= content_type= + + # Returns content encoding + def body_encoding + @http_header.body_encoding + end + + # Returns a content of message body. A String or an IO. + def content + @http_body.content + end + + alias header http_header + alias body content + + # Returns Hash of header. key and value are both String. Each key has a + # single value so you can't extract exact value when a message has multiple + # headers like 'Set-Cookie'. Use header['Set-Cookie'] for that purpose. + # (It returns an Array always) + def headers + Hash[*http_header.all.flatten] + end + + # Extracts cookies from 'Set-Cookie' header. + # Supports 'Set-Cookie' in response header only. + # Do we need 'Cookie' support in request header? + def cookies + set_cookies = http_header['set-cookie'] + unless set_cookies.empty? + uri = http_header.request_uri + set_cookies.map { |str| + WebAgent::Cookie.parse(str, uri) + }.flatten + end + end + + # Convenience method to return boolean of whether we had a successful request + def ok? + HTTP::Status.successful?(status) + end + + def redirect? + HTTP::Status.redirect?(status) + end + + # SEE_OTHER is a redirect, but it should sent as GET + def see_other? + status == HTTP::Status::SEE_OTHER + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb new file mode 100644 index 0000000..e6aaf47 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb @@ -0,0 +1,85 @@ +# It is useful to re-use a HTTPClient instance for multiple requests, to +# re-use HTTP 1.1 persistent connections. +# +# To do that, you sometimes want to store an HTTPClient instance in a global/ +# class variable location, so it can be accessed and re-used. +# +# This mix-in makes it easy to create class-level access to one or more +# HTTPClient instances. The HTTPClient instances are lazily initialized +# on first use (to, for instance, avoid interfering with WebMock/VCR), +# and are initialized in a thread-safe manner. Note that a +# HTTPClient, once initialized, is safe for use in multiple threads. +# +# Note that you `extend` HTTPClient::IncludeClient, not `include. +# +# require 'httpclient/include_client' +# class Widget +# extend HTTPClient::IncludeClient +# +# include_http_client +# # and/or, specify more stuff +# include_http_client('http://myproxy:8080', :method_name => :my_client) do |client| +# # any init you want +# client.set_cookie_store nil +# client. +# end +# end +# +# That creates two HTTPClient instances available at the class level. +# The first will be available from Widget.http_client (default method +# name for `include_http_client`), with default initialization. +# +# The second will be available at Widget.my_client, with the init arguments +# provided, further initialized by the block provided. +# +# In addition to a class-level method, for convenience instance-level methods +# are also provided. Widget.http_client is identical to Widget.new.http_client +# +# +require 'httpclient' + +class HTTPClient + module IncludeClient + + + def include_http_client(*args, &block) + # We're going to dynamically define a class + # to hold our state, namespaced, as well as possibly dynamic + # name of cover method. + method_name = (args.last.delete(:method_name) if args.last.kind_of? Hash) || :http_client + args.pop if args.last == {} # if last arg was named methods now empty, remove it. + + # By the amazingness of closures, we can create these things + # in local vars here and use em in our method, we don't even + # need iVars for state. + client_instance = nil + client_mutex = Mutex.new + client_args = args + client_block = block + + # to define a _class method_ on the specific class that's currently + # `self`, we have to use this bit of metaprogramming, sorry. + (class << self; self ; end).instance_eval do + define_method(method_name) do + # implementation copied from ruby stdlib singleton + # to create this global obj thread-safely. + return client_instance if client_instance + client_mutex.synchronize do + return client_instance if client_instance + # init HTTPClient with specified args/block + client_instance = HTTPClient.new(*client_args) + client_block.call(client_instance) if client_block + end + return client_instance + end + end + + # And for convenience, an _instance method_ on the class that just + # delegates to the class method. + define_method(method_name) do + self.class.send(method_name) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb new file mode 100644 index 0000000..046b498 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb @@ -0,0 +1,588 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'java' +require 'httpclient/ssl_config' + + +class HTTPClient + +unless defined?(SSLSocket) + + class JavaSocketWrap + java_import 'java.net.InetSocketAddress' + java_import 'java.io.BufferedInputStream' + + BUF_SIZE = 1024 * 16 + + def self.connect(socket, site, opts = {}) + socket_addr = InetSocketAddress.new(site.host, site.port) + if opts[:connect_timeout] + socket.connect(socket_addr, opts[:connect_timeout]) + else + socket.connect(socket_addr) + end + socket.setSoTimeout(opts[:so_timeout]) if opts[:so_timeout] + socket.setKeepAlive(true) if opts[:tcp_keepalive] + socket + end + + def initialize(socket, debug_dev = nil) + @socket = socket + @debug_dev = debug_dev + @outstr = @socket.getOutputStream + @instr = BufferedInputStream.new(@socket.getInputStream) + @buf = (' ' * BUF_SIZE).to_java_bytes + @bufstr = '' + end + + def close + @socket.close + end + + def closed? + @socket.isClosed + end + + def eof? + @socket.isClosed + end + + def gets(rs) + while (size = @bufstr.index(rs)).nil? + if fill() == -1 + size = @bufstr.size + break + end + end + str = @bufstr.slice!(0, size + rs.size) + debug(str) + str + end + + def read(size, buf = nil) + while @bufstr.size < size + if fill() == -1 + break + end + end + str = @bufstr.slice!(0, size) + debug(str) + if buf + buf.replace(str) + else + str + end + end + + def readpartial(size, buf = nil) + while @bufstr.size == 0 + if fill() == -1 + raise EOFError.new('end of file reached') + end + end + str = @bufstr.slice!(0, size) + debug(str) + if buf + buf.replace(str) + else + str + end + end + + def <<(str) + rv = @outstr.write(str.to_java_bytes) + debug(str) + rv + end + + def flush + @socket.flush + end + + def sync + true + end + + def sync=(sync) + unless sync + raise "sync = false is not supported. This option was introduced for backward compatibility just in case." + end + end + + private + + def fill + begin + size = @instr.read(@buf) + if size > 0 + @bufstr << String.from_java_bytes(@buf, Encoding::BINARY)[0, size] + end + size + rescue java.io.IOException => e + raise OpenSSL::SSL::SSLError.new("#{e.class}: #{e.getMessage}") + end + end + + def debug(str) + @debug_dev << str if @debug_dev && str + end + end + + class JRubySSLSocket < JavaSocketWrap + java_import 'java.io.ByteArrayInputStream' + java_import 'java.io.InputStreamReader' + java_import 'java.net.Socket' + java_import 'java.security.KeyStore' + java_import 'java.security.cert.Certificate' + java_import 'java.security.cert.CertificateFactory' + java_import 'javax.net.ssl.KeyManagerFactory' + java_import 'javax.net.ssl.SSLContext' + java_import 'javax.net.ssl.SSLSocketFactory' + java_import 'javax.net.ssl.TrustManager' + java_import 'javax.net.ssl.TrustManagerFactory' + java_import 'javax.net.ssl.X509TrustManager' + java_import 'org.jruby.ext.openssl.x509store.PEMInputOutput' + + class JavaCertificate + attr_reader :cert + + def initialize(cert) + @cert = cert + end + + def subject + @cert.getSubjectDN + end + + def to_text + @cert.toString + end + + def to_pem + '(not in PEM format)' + end + end + + class SSLStoreContext + attr_reader :current_cert, :chain, :error_depth, :error, :error_string + + def initialize(current_cert, chain, error_depth, error, error_string) + @current_cert, @chain, @error_depth, @error, @error_string = + current_cert, chain, error_depth, error, error_string + end + end + + class JSSEVerifyCallback + def initialize(verify_callback) + @verify_callback = verify_callback + end + + def call(is_ok, chain, error_depth = -1, error = -1, error_string = '(unknown)') + if @verify_callback + ruby_chain = chain.map { |cert| + JavaCertificate.new(cert) + }.reverse + # NOTE: The order depends on provider implementation + ruby_chain.each do |cert| + is_ok = @verify_callback.call( + is_ok, + SSLStoreContext.new(cert, ruby_chain, error_depth, error, error_string) + ) + end + end + is_ok + end + end + + class VerifyNoneTrustManagerFactory + class VerifyNoneTrustManager + include X509TrustManager + + def initialize(verify_callback) + @verify_callback = JSSEVerifyCallback.new(verify_callback) + end + + def checkServerTrusted(chain, authType) + @verify_callback.call(true, chain) + end + + def checkClientTrusted(chain, authType); end + def getAcceptedIssuers; end + end + + def initialize(verify_callback = nil) + @verify_callback = verify_callback + end + + def init(trustStore) + @managers = [VerifyNoneTrustManager.new(@verify_callback)].to_java(X509TrustManager) + end + + def getTrustManagers + @managers + end + end + + class SystemTrustManagerFactory + class SystemTrustManager + include X509TrustManager + + def initialize(original, verify_callback) + @original = original + @verify_callback = JSSEVerifyCallback.new(verify_callback) + end + + def checkServerTrusted(chain, authType) + is_ok = false + excn = nil + # TODO can we detect the depth from excn? + error_depth = -1 + error = nil + error_message = nil + begin + @original.checkServerTrusted(chain, authType) + is_ok = true + rescue java.security.cert.CertificateException => excn + is_ok = false + error = excn.class.name + error_message = excn.getMessage + end + is_ok = @verify_callback.call(is_ok, chain, error_depth, error, error_message) + unless is_ok + excn ||= OpenSSL::SSL::SSLError.new('verifycallback failed') + raise excn + end + end + + def checkClientTrusted(chain, authType); end + def getAcceptedIssuers; end + end + + def initialize(verify_callback = nil) + @verify_callback = verify_callback + end + + def init(trust_store) + tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + tmf.java_method(:init, [KeyStore]).call(trust_store) + @original = tmf.getTrustManagers.find { |tm| + tm.is_a?(X509TrustManager) + } + @managers = [SystemTrustManager.new(@original, @verify_callback)].to_java(X509TrustManager) + end + + def getTrustManagers + @managers + end + end + + module PEMUtils + def self.read_certificate(pem) + cert = pem.sub(/.*?-----BEGIN CERTIFICATE-----/m, '').sub(/-----END CERTIFICATE-----.*?/m, '') + der = cert.unpack('m*').first + cf = CertificateFactory.getInstance('X.509') + cf.generateCertificate(ByteArrayInputStream.new(der.to_java_bytes)) + end + + def self.read_private_key(pem, password) + if password + password = password.unpack('C*').to_java(:char) + end + PEMInputOutput.read_private_key(InputStreamReader.new(ByteArrayInputStream.new(pem.to_java_bytes)), password) + end + end + + class KeyStoreLoader + PASSWORD = 16.times.map { rand(256) }.to_java(:char) + + def initialize + @keystore = KeyStore.getInstance('JKS') + @keystore.load(nil) + end + + def add(cert_source, key_source, password) + cert_str = cert_source.respond_to?(:to_pem) ? cert_source.to_pem : File.read(cert_source.to_s) + cert = PEMUtils.read_certificate(cert_str) + @keystore.setCertificateEntry('client_cert', cert) + key_str = key_source.respond_to?(:to_pem) ? key_source.to_pem : File.read(key_source.to_s) + key_pair = PEMUtils.read_private_key(key_str, password) + @keystore.setKeyEntry('client_key', key_pair.getPrivate, PASSWORD, [cert].to_java(Certificate)) + end + + def keystore + @keystore + end + end + + class TrustStoreLoader + attr_reader :size + + def initialize + @trust_store = KeyStore.getInstance('JKS') + @trust_store.load(nil) + @size = 0 + end + + def add(cert_source) + return if cert_source == :default + if cert_source.respond_to?(:to_pem) + pem = cert_source.to_pem + load_pem(pem) + elsif File.directory?(cert_source) + warn("#{cert_source}: directory not yet supported") + return + else + pem = nil + File.read(cert_source).each_line do |line| + case line + when /-----BEGIN CERTIFICATE-----/ + pem = '' + when /-----END CERTIFICATE-----/ + load_pem(pem) + # keep parsing in case where multiple certificates in a file + else + if pem + pem << line + end + end + end + end + end + + def trust_store + if @size == 0 + nil + else + @trust_store + end + end + + private + + def load_pem(pem) + cert = PEMUtils.read_certificate(pem) + @size += 1 + @trust_store.setCertificateEntry("cert_#{@size}", cert) + end + end + + # Ported from commons-httpclient 'BrowserCompatHostnameVerifier' + class BrowserCompatHostnameVerifier + BAD_COUNTRY_2LDS = %w(ac co com ed edu go gouv gov info lg ne net or org).sort + require 'ipaddr' + + def extract_sans(cert, subject_type) + sans = cert.getSubjectAlternativeNames rescue nil + if sans.nil? + return nil + end + sans.find_all { |san| + san.first.to_i == subject_type + }.map { |san| + san[1] + } + end + + def extract_cn(cert) + subject = cert.getSubjectX500Principal() + if subject + subject_dn = javax.naming.ldap.LdapName.new(subject.toString) + subject_dn.getRdns.to_a.reverse.each do |rdn| + attributes = rdn.toAttributes + cn = attributes.get('cn') + if cn + if value = cn.get + return value.to_s + end + end + end + end + end + + def ipaddr?(addr) + !(IPAddr.new(addr) rescue nil).nil? + end + + def verify(hostname, cert) + is_ipaddr = ipaddr?(hostname) + sans = extract_sans(cert, is_ipaddr ? 7 : 2) + cn = extract_cn(cert) + if sans + sans.each do |san| + return true if match_identify(hostname, san) + end + raise OpenSSL::SSL::SSLError.new("Certificate for <#{hostname}> doesn't match any of the subject alternative names: #{sans}") + elsif cn + return true if match_identify(hostname, cn) + raise OpenSSL::SSL::SSLError.new("Certificate for <#{hostname}> doesn't match common name of the certificate subject: #{cn}") + end + raise OpenSSL::SSL::SSLError.new("Certificate subject for for <#{hostname}> doesn't contain a common name and does not have alternative names") + end + + def match_identify(hostname, identity) + if hostname.nil? + return false + end + hostname = hostname.downcase + identity = identity.downcase + parts = identity.split('.') + if parts.length >= 3 && parts.first.end_with?('*') && valid_country_wildcard(parts) + create_wildcard_regexp(identity) =~ hostname + else + hostname == identity + end + end + + def create_wildcard_regexp(value) + # Escape first then search '\*' for meta-char interpolation + labels = value.split('.').map { |e| Regexp.escape(e) } + # Handle '*'s only at the left-most label, exclude A-label and U-label + labels[0].gsub!(/\\\*/, '[^.]+') if !labels[0].start_with?('xn\-\-') and labels[0].ascii_only? + /\A#{labels.join('\.')}\z/i + end + + def valid_country_wildcard(parts) + if parts.length != 3 || parts[2].length != 2 + true + else + !BAD_COUNTRY_2LDS.include?(parts[1]) + end + end + end + + def self.create_socket(session) + opts = { + :connect_timeout => session.connect_timeout * 1000, + # send_timeout is ignored in JRuby + :so_timeout => session.receive_timeout * 1000, + :tcp_keepalive => session.tcp_keepalive, + :debug_dev => session.debug_dev + } + socket = nil + begin + if session.proxy + site = session.proxy || session.dest + socket = JavaSocketWrap.connect(Socket.new, site, opts) + session.connect_ssl_proxy(JavaSocketWrap.new(socket), Util.urify(session.dest.to_s)) + end + new(socket, session.dest, session.ssl_config, opts) + rescue + socket.close if socket + raise + end + end + + DEFAULT_SSL_PROTOCOL = (java.lang.System.getProperty('java.specification.version') == '1.7') ? 'TLSv1.2' : 'TLS' + def initialize(socket, dest, config, opts = {}) + @config = config + begin + @ssl_socket = create_ssl_socket(socket, dest, config, opts) + ssl_version = java_ssl_version(config) + @ssl_socket.setEnabledProtocols([ssl_version].to_java(java.lang.String)) if ssl_version != DEFAULT_SSL_PROTOCOL + if config.ciphers != SSLConfig::CIPHERS_DEFAULT + @ssl_socket.setEnabledCipherSuites(config.ciphers.to_java(java.lang.String)) + end + ssl_connect(dest.host) + rescue java.security.GeneralSecurityException => e + raise OpenSSL::SSL::SSLError.new(e.getMessage) + rescue java.io.IOException => e + raise OpenSSL::SSL::SSLError.new("#{e.class}: #{e.getMessage}") + end + + super(@ssl_socket, opts[:debug_dev]) + end + + def java_ssl_version(config) + if config.ssl_version == :auto + DEFAULT_SSL_PROTOCOL + else + config.ssl_version.to_s.tr('_', '.') + end + end + + def create_ssl_context(config) + unless config.cert_store_crl_items.empty? + raise NotImplementedError.new('Manual CRL configuration is not yet supported') + end + + km = nil + if config.client_cert && config.client_key + loader = KeyStoreLoader.new + loader.add(config.client_cert, config.client_key, config.client_key_pass) + kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + kmf.init(loader.keystore, KeyStoreLoader::PASSWORD) + km = kmf.getKeyManagers + end + + trust_store = nil + verify_callback = config.verify_callback || config.method(:default_verify_callback) + if !config.verify? + tmf = VerifyNoneTrustManagerFactory.new(verify_callback) + else + tmf = SystemTrustManagerFactory.new(verify_callback) + loader = TrustStoreLoader.new + config.cert_store_items.each do |item| + loader.add(item) + end + trust_store = loader.trust_store + end + tmf.init(trust_store) + tm = tmf.getTrustManagers + + ctx = SSLContext.getInstance(java_ssl_version(config)) + ctx.init(km, tm, nil) + if config.timeout + ctx.getClientSessionContext.setSessionTimeout(config.timeout) + end + ctx + end + + def create_ssl_socket(socket, dest, config, opts) + ctx = create_ssl_context(config) + factory = ctx.getSocketFactory + if socket + ssl_socket = factory.createSocket(socket, dest.host, dest.port, true) + else + ssl_socket = factory.createSocket + JavaSocketWrap.connect(ssl_socket, dest, opts) + end + ssl_socket + end + + def peer_cert + @peer_cert + end + + private + + def ssl_connect(hostname) + @ssl_socket.startHandshake + ssl_session = @ssl_socket.getSession + @peer_cert = JavaCertificate.new(ssl_session.getPeerCertificates.first) + if $DEBUG + warn("Protocol version: #{ssl_session.getProtocol}") + warn("Cipher: #{@ssl_socket.getSession.getCipherSuite}") + end + post_connection_check(hostname) + end + + def post_connection_check(hostname) + if !@config.verify? + return + else + BrowserCompatHostnameVerifier.new.verify(hostname, @peer_cert.cert) + end + end + end + + SSLSocket = JRubySSLSocket + +end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/session.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/session.rb new file mode 100644 index 0000000..b3dedd5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/session.rb @@ -0,0 +1,960 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. +# +# httpclient/session.rb is based on http-access.rb in http-access/0.0.4. Some +# part of it is copyrighted by Maebashi-san who made and published +# http-access/0.0.4. http-access/0.0.4 did not include license notice but when +# I asked Maebashi-san he agreed that I can redistribute it under the same terms +# of Ruby. Many thanks to Maebashi-san. + + +require 'socket' +require 'thread' +require 'timeout' +require 'stringio' +require 'zlib' + +require 'httpclient/timeout' # TODO: remove this once we drop 1.8 support +require 'httpclient/ssl_config' +require 'httpclient/http' +if RUBY_ENGINE == 'jruby' + require 'httpclient/jruby_ssl_socket' +else + require 'httpclient/ssl_socket' +end + + +class HTTPClient + + + # Represents a Site: protocol scheme, host String and port Number. + class Site + # Protocol scheme. + attr_accessor :scheme + # Host String. + attr_accessor :host + alias hostname host + # Port number. + attr_accessor :port + + # Creates a new Site based on the given URI. + def initialize(uri = nil) + if uri + @scheme = uri.scheme || 'tcp' + @host = uri.hostname || '0.0.0.0' + @port = uri.port.to_i + else + @scheme = 'tcp' + @host = '0.0.0.0' + @port = 0 + end + end + + # Returns address String. + def addr + "#{@scheme}://#{@host}:#{@port.to_s}" + end + + # Returns true is scheme, host and port are '==' + def ==(rhs) + (@scheme == rhs.scheme) and (@host == rhs.host) and (@port == rhs.port) + end + + # Same as ==. + def eql?(rhs) + self == rhs + end + + def hash # :nodoc: + [@scheme, @host, @port].hash + end + + def to_s # :nodoc: + addr + end + + # Returns true if scheme, host and port of the given URI matches with this. + def match(uri) + (@scheme == uri.scheme) and (@host == uri.host) and (@port == uri.port.to_i) + end + + def inspect # :nodoc: + sprintf("#<%s:0x%x %s>", self.class.name, __id__, addr) + end + + EMPTY = Site.new.freeze + end + + + # Manages sessions for a HTTPClient instance. + class SessionManager + # Name of this client. Used for 'User-Agent' header in HTTP request. + attr_accessor :agent_name + # Owner of this client. Used for 'From' header in HTTP request. + attr_accessor :from + + # Requested protocol version + attr_accessor :protocol_version + # Chunk size for chunked request + attr_accessor :chunk_size + # Device for dumping log for debugging + attr_accessor :debug_dev + # Boolean value for Socket#sync + attr_accessor :socket_sync + # Boolean value to send TCP keepalive packets; no timing settings exist at present + attr_accessor :tcp_keepalive + + attr_accessor :connect_timeout + # Maximum retry count. 0 for infinite. + attr_accessor :connect_retry + attr_accessor :send_timeout + attr_accessor :receive_timeout + attr_accessor :keep_alive_timeout + attr_accessor :read_block_size + attr_accessor :protocol_retry_count + + # Raise BadResponseError if response size does not match with Content-Length header in response. + attr_accessor :strict_response_size_check + + # Local address to bind local side of the socket to + attr_accessor :socket_local + + attr_accessor :ssl_config + + attr_reader :test_loopback_http_response + + attr_accessor :transparent_gzip_decompression + + def initialize(client) + @client = client + @proxy = client.proxy + + @agent_name = nil + @from = nil + + @protocol_version = nil + @debug_dev = client.debug_dev + @socket_sync = true + @tcp_keepalive = false + @chunk_size = ::HTTP::Message::Body::DEFAULT_CHUNK_SIZE + + @connect_timeout = 60 + @connect_retry = 1 + @send_timeout = 120 + @receive_timeout = 60 # For each read_block_size bytes + @keep_alive_timeout = 15 # '15' is from Apache 2 default + @read_block_size = 1024 * 16 # follows net/http change in 1.8.7 + @protocol_retry_count = 5 + + @ssl_config = nil + @test_loopback_http_response = [] + + @transparent_gzip_decompression = false + @strict_response_size_check = false + @socket_local = Site.new + + @sess_pool = {} + @sess_pool_mutex = Mutex.new + @sess_pool_last_checked = Time.now + end + + def proxy=(proxy) + if proxy.nil? + @proxy = nil + else + @proxy = Site.new(proxy) + end + end + + def query(req, via_proxy) + req.http_body.chunk_size = @chunk_size if req.http_body + sess = get_session(req, via_proxy) + begin + sess.query(req) + rescue + sess.close + raise + end + sess + end + + def reset(uri) + site = Site.new(uri) + close(site) + end + + def reset_all + close_all + end + + # assert: sess.last_used must not be nil + def keep(sess) + add_cached_session(sess) + end + + private + + # TODO: create PR for webmock's httpclient adapter to use get_session + # instead of open so that we can remove duplicated Site creation for + # each session. + def get_session(req, via_proxy = false) + uri = req.header.request_uri + if uri.scheme.nil? + raise ArgumentError.new("Request URI must have schema. Possibly add 'http://' to the request URI?") + end + site = Site.new(uri) + if cached = get_cached_session(site) + cached + else + open(uri, via_proxy) + end + end + + def open(uri, via_proxy = false) + site = Site.new(uri) + sess = Session.new(@client, site, @agent_name, @from) + sess.proxy = via_proxy ? @proxy : nil + sess.socket_sync = @socket_sync + sess.tcp_keepalive = @tcp_keepalive + sess.requested_version = @protocol_version if @protocol_version + sess.connect_timeout = @connect_timeout + sess.connect_retry = @connect_retry + sess.send_timeout = @send_timeout + sess.receive_timeout = @receive_timeout + sess.read_block_size = @read_block_size + sess.protocol_retry_count = @protocol_retry_count + sess.ssl_config = @ssl_config + sess.debug_dev = @debug_dev + sess.strict_response_size_check = @strict_response_size_check + sess.socket_local = @socket_local + sess.test_loopback_http_response = @test_loopback_http_response + sess.transparent_gzip_decompression = @transparent_gzip_decompression + sess + end + + def close_all + @sess_pool_mutex.synchronize do + @sess_pool.each do |site, pool| + pool.each do |sess| + sess.close + end + end + end + @sess_pool.clear + end + + # This method might not work as you expected... + def close(dest) + if cached = get_cached_session(Site.new(dest)) + cached.close + true + else + false + end + end + + def get_cached_session(site) + if Thread.current[:HTTPClient_AcquireNewConnection] + return nil + end + @sess_pool_mutex.synchronize do + now = Time.now + if now > @sess_pool_last_checked + @keep_alive_timeout + scrub_cached_session(now) + @sess_pool_last_checked = now + end + if pool = @sess_pool[site] + pool.each_with_index do |sess, idx| + if valid_session?(sess, now) + return pool.slice!(idx) + end + end + end + end + nil + end + + def scrub_cached_session(now) + @sess_pool.each do |site, pool| + pool.replace(pool.select { |sess| + if valid_session?(sess, now) + true + else + sess.close # close & remove from the pool + false + end + }) + end + end + + def valid_session?(sess, now) + (now <= sess.last_used + @keep_alive_timeout) + end + + def add_cached_session(sess) + @sess_pool_mutex.synchronize do + (@sess_pool[sess.dest] ||= []).unshift(sess) + end + end + end + + + # Wraps up a Socket for method interception. + module SocketWrap + def initialize(socket, *args) + super(*args) + @socket = socket + end + + def close + @socket.close + end + + def closed? + @socket.closed? + end + + def eof? + @socket.eof? + end + + def gets(rs) + @socket.gets(rs) + end + + def read(size, buf = nil) + @socket.read(size, buf) + end + + def readpartial(size, buf = nil) + # StringIO doesn't support :readpartial + if @socket.respond_to?(:readpartial) + @socket.readpartial(size, buf) + else + @socket.read(size, buf) + end + end + + def <<(str) + @socket << str + end + + def flush + @socket.flush + end + + def sync + @socket.sync + end + + def sync=(sync) + @socket.sync = sync + end + end + + + # Module for intercepting Socket methods and dumps in/out to given debugging + # device. debug_dev must respond to <<. + module DebugSocket + extend SocketWrap + + def debug_dev=(debug_dev) + @debug_dev = debug_dev + end + + def close + super + debug("! CONNECTION CLOSED\n") + end + + def gets(rs) + str = super + debug(str) + str + end + + def read(size, buf = nil) + str = super + debug(str) + str + end + + def readpartial(size, buf = nil) + str = super + debug(str) + str + end + + def <<(str) + super + debug(str) + end + + private + + def debug(str) + if str && @debug_dev + if str.index("\0") + require 'hexdump' + str.force_encoding('BINARY') if str.respond_to?(:force_encoding) + @debug_dev << HexDump.encode(str).join("\n") + else + @debug_dev << str + end + end + end + end + + + # Dummy Socket for emulating loopback test. + class LoopBackSocket + include SocketWrap + + def initialize(host, port, response) + super(response.is_a?(StringIO) ? response : StringIO.new(response)) + @host = host + @port = port + end + + def <<(str) + # ignored + end + + def peer_cert + nil + end + end + + + # Manages a HTTP session with a Site. + class Session + include HTTPClient::Timeout + include Util + + # Destination site + attr_reader :dest + # Proxy site + attr_accessor :proxy + # Boolean value for Socket#sync + attr_accessor :socket_sync + # Boolean value to send TCP keepalive packets; no timing settings exist at present + attr_accessor :tcp_keepalive + # Requested protocol version + attr_accessor :requested_version + # Device for dumping log for debugging + attr_accessor :debug_dev + + attr_accessor :connect_timeout + attr_accessor :connect_retry + attr_accessor :send_timeout + attr_accessor :receive_timeout + attr_accessor :read_block_size + attr_accessor :protocol_retry_count + + attr_accessor :strict_response_size_check + attr_accessor :socket_local + + attr_accessor :ssl_config + attr_reader :ssl_peer_cert + attr_accessor :test_loopback_http_response + + attr_accessor :transparent_gzip_decompression + attr_reader :last_used + + def initialize(client, dest, agent_name, from) + @client = client + @dest = dest + @proxy = nil + @socket_sync = true + @tcp_keepalive = false + @requested_version = nil + + @debug_dev = nil + + @connect_timeout = nil + @connect_retry = 1 + @send_timeout = nil + @receive_timeout = nil + @read_block_size = nil + @protocol_retry_count = 5 + + @ssl_config = nil + @ssl_peer_cert = nil + + @test_loopback_http_response = nil + @strict_response_size_check = false + @socket_local = Site::EMPTY + + @agent_name = agent_name + @from = from + @state = :INIT + + @requests = [] + + @status = nil + @reason = nil + @headers = [] + + @socket = nil + @readbuf = nil + + @transparent_gzip_decompression = false + @last_used = nil + end + + # Send a request to the server + def query(req) + connect if @state == :INIT + # Use absolute URI (not absolute path) iif via proxy AND not HTTPS. + req.header.request_absolute_uri = !@proxy.nil? && !https?(@dest) + begin + ::Timeout.timeout(@send_timeout, SendTimeoutError) do + set_header(req) + req.dump(@socket) + # flush the IO stream as IO::sync mode is false + @socket.flush unless @socket_sync + end + rescue Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, IOError + # JRuby can raise IOError instead of ECONNRESET for now + close + raise KeepAliveDisconnected.new(self, $!) + rescue HTTPClient::TimeoutError + close + raise + rescue => e + close + if SSLEnabled and e.is_a?(OpenSSL::SSL::SSLError) + raise KeepAliveDisconnected.new(self, e) + else + raise + end + end + + @state = :META if @state == :WAIT + @next_connection = nil + @requests.push(req) + @last_used = Time.now + end + + def close + if !@socket.nil? and !@socket.closed? + # @socket.flush may block when it the socket is already closed by + # foreign host and the client runs under MT-condition. + @socket.close + end + @state = :INIT + end + + def closed? + @state == :INIT + end + + def get_header + begin + if @state != :META + raise RuntimeError.new("get_status must be called at the beginning of a session") + end + read_header + rescue + close + raise + end + [@version, @status, @reason, @headers] + end + + def eof? + if !@content_length.nil? + @content_length == 0 + else + @socket.closed? or @socket.eof? + end + end + + def get_body(&block) + begin + read_header if @state == :META + return nil if @state != :DATA + if @transparent_gzip_decompression + block = content_inflater_block(@content_encoding, block) + end + if @chunked + read_body_chunked(&block) + elsif @content_length + read_body_length(&block) + else + read_body_rest(&block) + end + rescue + close + raise + end + if eof? + if @next_connection + @state = :WAIT + else + close + end + end + nil + end + + def create_socket(host, port) + socket = nil + begin + @debug_dev << "! CONNECT TO #{host}:#{port}\n" if @debug_dev + clean_host = host.delete("[]") + if @socket_local == Site::EMPTY + socket = TCPSocket.new(clean_host, port) + else + clean_local = @socket_local.host.delete("[]") + socket = TCPSocket.new(clean_host, port, clean_local, @socket_local.port) + end + socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if @tcp_keepalive + if @debug_dev + @debug_dev << "! CONNECTION ESTABLISHED\n" + socket.extend(DebugSocket) + socket.debug_dev = @debug_dev + end + rescue SystemCallError => e + raise e.class, e.message + " (#{host}:#{port})" + rescue SocketError => e + raise e.class, e.message + " (#{host}:#{port})" + end + socket + end + + def create_loopback_socket(host, port, str) + @debug_dev << "! CONNECT TO #{host}:#{port}\n" if @debug_dev + socket = LoopBackSocket.new(host, port, str) + if @debug_dev + @debug_dev << "! CONNECTION ESTABLISHED\n" + socket.extend(DebugSocket) + socket.debug_dev = @debug_dev + end + if https?(@dest) && @proxy + connect_ssl_proxy(socket, Util.urify(@dest.to_s)) + end + socket + end + + def connect_ssl_proxy(socket, uri) + req = HTTP::Message.new_connect_request(uri) + @client.request_filter.each do |filter| + filter.filter_request(req) + end + set_header(req) + req.dump(socket) + socket.flush unless @socket_sync + res = HTTP::Message.new_response('') + parse_header(socket) + res.http_version, res.status, res.reason = @version, @status, @reason + @headers.each do |key, value| + res.header.set(key.to_s, value) + end + commands = @client.request_filter.collect { |filter| + filter.filter_response(req, res) + } + if commands.find { |command| command == :retry } + raise RetryableResponse.new(res) + end + unless @status == 200 + raise BadResponseError.new("connect to ssl proxy failed with status #{@status} #{@reason}", res) + end + end + + private + + # This inflater allows deflate compression with/without zlib header + class LenientInflater + def initialize + @inflater = Zlib::Inflate.new(Zlib::MAX_WBITS) + @first = true + end + + def inflate(body) + if @first + first_inflate(body) + else + @inflater.inflate(body) + end + end + + private + + def first_inflate(body) + @first = false + begin + @inflater.inflate(body) + rescue Zlib::DataError + # fallback to deflate without zlib header + @inflater = Zlib::Inflate.new(-Zlib::MAX_WBITS) + @inflater.inflate(body) + end + end + end + + def content_inflater_block(content_encoding, block) + case content_encoding + when 'gzip', 'x-gzip' + # zlib itself has a functionality to decompress gzip stream. + # - zlib 1.2.5 Manual + # http://www.zlib.net/manual.html#Advanced + # > windowBits can also be greater than 15 for optional gzip decoding. Add 32 to + # > windowBits to enable zlib and gzip decoding with automatic header detection, + # > or add 16 to decode only the gzip format + inflate_stream = Zlib::Inflate.new(Zlib::MAX_WBITS + 32) + when 'deflate' + inflate_stream = LenientInflater.new + else + return block + end + Proc.new { |buf| + block.call(inflate_stream.inflate(buf)) + } + end + + def set_header(req) + if @requested_version + if /^(?:HTTP\/|)(\d+.\d+)$/ =~ @requested_version + req.http_version = $1 + end + end + if @agent_name && req.header.get('User-Agent').empty? + req.header.set('User-Agent', "#{@agent_name} #{LIB_NAME}") + end + if @from && req.header.get('From').empty? + req.header.set('From', @from) + end + if req.header.get('Accept').empty? + req.header.set('Accept', '*/*') + end + if @transparent_gzip_decompression + req.header.set('Accept-Encoding', 'gzip,deflate') + end + if req.header.get('Date').empty? + req.header.set_date_header + end + end + + # Connect to the server + def connect + site = @proxy || @dest + retry_number = 0 + begin + ::Timeout.timeout(@connect_timeout, ConnectTimeoutError) do + if str = @test_loopback_http_response.shift + @socket = create_loopback_socket(site.host, site.port, str) + elsif https?(@dest) + @socket = SSLSocket.create_socket(self) + @ssl_peer_cert = @socket.peer_cert + else + @socket = create_socket(site.host, site.port) + end + @socket.sync = @socket_sync + end + rescue RetryableResponse + retry_number += 1 + if retry_number < @protocol_retry_count + retry + end + raise BadResponseError.new("connect to the server failed with status #{@status} #{@reason}") + rescue TimeoutError + if @connect_retry == 0 + retry + else + retry_number += 1 + retry if retry_number < @connect_retry + end + close + raise + end + @state = :WAIT + end + + # Read status block. + def read_header + @content_length = nil + @chunked = false + @content_encoding = nil + @chunk_length = 0 + parse_header(@socket) + # Header of the request has been parsed. + @state = :DATA + req = @requests.shift + if req.header.request_method == 'HEAD' or no_message_body?(@status) + @content_length = 0 + if @next_connection + @state = :WAIT + else + close + end + end + @next_connection = false if !@content_length and !@chunked + end + + StatusParseRegexp = %r(\AHTTP/(\d+\.\d+)\s+(\d\d\d)\s*([^\r\n]+)?\r?\n\z) + def parse_header(socket) + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + initial_line = nil + begin + begin + initial_line = socket.gets("\n") + if initial_line.nil? + close + raise KeepAliveDisconnected.new(self) + end + rescue Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, IOError + # JRuby can raise IOError instead of ECONNRESET for now + close + raise KeepAliveDisconnected.new(self, $!) + end + if StatusParseRegexp !~ initial_line + @version = '0.9' + @status = nil + @reason = nil + @next_connection = false + @content_length = nil + @readbuf = initial_line + break + end + @version, @status, @reason = $1, $2.to_i, $3 + @next_connection = HTTP::Message.keep_alive_enabled?(@version) + @headers = [] + while true + line = socket.gets("\n") + unless line + raise BadResponseError.new('unexpected EOF') + end + line.chomp! + break if line.empty? + if line[0] == ?\ or line[0] == ?\t + last = @headers.last[1] + last << ' ' unless last.empty? + last << line.strip + else + key, value = line.strip.split(/\s*:\s*/, 2) + parse_content_header(key, value) + @headers << [key, value] + end + end + end while (@version == '1.1' && @status == 100) + end + end + + def no_message_body?(status) + !status.nil? && # HTTP/0.9 + ((status >= 100 && status < 200) || status == 204 || status == 304) + end + + def parse_content_header(key, value) + key = key.downcase + case key + when 'content-length' + @content_length = value.to_i + when 'content-encoding' + @content_encoding = value.downcase + when 'transfer-encoding' + if value.downcase == 'chunked' + @chunked = true + @chunk_length = 0 + @content_length = nil + end + when 'connection', 'proxy-connection' + if value.downcase == 'keep-alive' + @next_connection = true + else + @next_connection = false + end + end + end + + def read_body_length(&block) + return nil if @content_length == 0 + while true + buf = empty_bin_str + maxbytes = @read_block_size + maxbytes = @content_length if maxbytes > @content_length && @content_length > 0 + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + begin + @socket.readpartial(maxbytes, buf) + rescue EOFError + close + buf = nil + if @strict_response_size_check + raise BadResponseError.new("EOF while reading rest #{@content_length} bytes") + end + end + end + if buf && buf.bytesize > 0 + @content_length -= buf.bytesize + yield buf + else + @content_length = 0 + end + return if @content_length == 0 + end + end + + RS = "\r\n" + def read_body_chunked(&block) + buf = empty_bin_str + while true + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + len = @socket.gets(RS) + if len.nil? # EOF + close + return + end + @chunk_length = len.hex + if @chunk_length == 0 + @content_length = 0 + @socket.gets(RS) + return + end + @socket.read(@chunk_length, buf) + @socket.read(2) + end + unless buf.empty? + yield buf + end + end + end + + def read_body_rest + if @readbuf and @readbuf.bytesize > 0 + yield @readbuf + @readbuf = nil + end + while true + buf = empty_bin_str + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + begin + @socket.readpartial(@read_block_size, buf) + rescue EOFError + buf = nil + if @strict_response_size_check + raise BadResponseError.new("EOF while reading chunked response") + end + end + end + if buf && buf.bytesize > 0 + yield buf + else + return + end + end + end + + def empty_bin_str + str = '' + str.force_encoding('BINARY') if str.respond_to?(:force_encoding) + str + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb new file mode 100644 index 0000000..f6e7ce9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb @@ -0,0 +1,424 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +class HTTPClient + + begin + require 'openssl' + SSLEnabled = true + rescue LoadError + SSLEnabled = false + end + + # Represents SSL configuration for HTTPClient instance. + # The implementation depends on OpenSSL. + # + # == Trust Anchor Control + # + # SSLConfig loads 'httpclient/cacert.pem' as a trust anchor + # (trusted certificate(s)) with add_trust_ca in initialization time. + # This means that HTTPClient instance trusts some CA certificates by default, + # like Web browsers. 'httpclient/cacert.pem' is downloaded from curl web + # site by the author and included in released package. + # + # On JRuby, HTTPClient uses Java runtime's trusted CA certificates, not + # cacert.pem by default. You can load cacert.pem by calling + # SSLConfig#load_trust_ca manually like: + # + # HTTPClient.new { self.ssl_config.load_trust_ca }.get("https://...") + # + # You may want to change trust anchor by yourself. Call clear_cert_store + # then add_trust_ca for that purpose. + class SSLConfig + include HTTPClient::Util + if SSLEnabled + include OpenSSL + + module ::OpenSSL + module X509 + class Store + attr_reader :_httpclient_cert_store_items + + # TODO: use prepend instead when we drop JRuby + 1.9.x support + wrapped = {} + + wrapped[:initialize] = instance_method(:initialize) + define_method(:initialize) do |*args| + wrapped[:initialize].bind(self).call(*args) + @_httpclient_cert_store_items = [ENV['SSL_CERT_FILE'] || :default] + end + + [:add_cert, :add_file, :add_path].each do |m| + wrapped[m] = instance_method(m) + define_method(m) do |cert| + res = wrapped[m].bind(self).call(cert) + @_httpclient_cert_store_items << cert + res + end + end + end + end + end + end + + class << self + private + def attr_config(symbol) + name = symbol.to_s + ivar_name = "@#{name}" + define_method(name) { + instance_variable_get(ivar_name) + } + define_method("#{name}=") { |rhs| + if instance_variable_get(ivar_name) != rhs + instance_variable_set(ivar_name, rhs) + change_notify + end + } + symbol + end + end + + + CIPHERS_DEFAULT = "ALL:!aNULL:!eNULL:!SSLv2" # OpenSSL >1.0.0 default + + # Which TLS protocol version (also called method) will be used. Defaults + # to :auto which means that OpenSSL decides (In my tests this resulted + # with always the highest available protocol being used). + # String name of OpenSSL's SSL version method name: TLSv1_2, TLSv1_1, TLSv1, + # SSLv2, SSLv23, SSLv3 or :auto (and nil) to allow version negotiation (default). + # See {OpenSSL::SSL::SSLContext::METHODS} for a list of available versions + # in your specific Ruby environment. + attr_config :ssl_version + # OpenSSL::X509::Certificate:: certificate for SSL client authentication. + # nil by default. (no client authentication) + attr_config :client_cert + # OpenSSL::PKey::PKey:: private key for SSL client authentication. + # nil by default. (no client authentication) + attr_config :client_key + # OpenSSL::PKey::PKey:: private key pass phrase for client_key. + # nil by default. (no pass phrase) + attr_config :client_key_pass + + # A number which represents OpenSSL's verify mode. Default value is + # OpenSSL::SSL::VERIFY_PEER | OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT. + attr_config :verify_mode + # A number of verify depth. Certification path which length is longer than + # this depth is not allowed. + # CAUTION: this is OpenSSL specific option and ignored on JRuby. + attr_config :verify_depth + # A callback handler for custom certificate verification. nil by default. + # If the handler is set, handler.call is invoked just after general + # OpenSSL's verification. handler.call is invoked with 2 arguments, + # ok and ctx; ok is a result of general OpenSSL's verification. ctx is a + # OpenSSL::X509::StoreContext. + attr_config :verify_callback + # SSL timeout in sec. nil by default. + attr_config :timeout + # A number of OpenSSL's SSL options. Default value is + # OpenSSL::SSL::OP_ALL | OpenSSL::SSL::OP_NO_SSLv2 + # CAUTION: this is OpenSSL specific option and ignored on JRuby. + # Use ssl_version to specify the TLS version you want to use. + attr_config :options + # A String of OpenSSL's cipher configuration. Default value is + # ALL:!ADH:!LOW:!EXP:!MD5:+SSLv2:@STRENGTH + # See ciphers(1) man in OpenSSL for more detail. + attr_config :ciphers + + # OpenSSL::X509::X509::Store used for verification. You can reset the + # store with clear_cert_store and set the new store with cert_store=. + attr_reader :cert_store # don't use if you don't know what it is. + + # For server side configuration. Ignore this. + attr_config :client_ca # :nodoc: + + # These array keeps original files/dirs that was added to @cert_store + def cert_store_items; @cert_store._httpclient_cert_store_items; end + attr_reader :cert_store_crl_items + + # Creates a SSLConfig. + def initialize(client) + return unless SSLEnabled + @client = client + @cert_store = X509::Store.new + @cert_store_crl_items = [] + @client_cert = @client_key = @client_key_pass = @client_ca = nil + @verify_mode = SSL::VERIFY_PEER | SSL::VERIFY_FAIL_IF_NO_PEER_CERT + @verify_depth = nil + @verify_callback = nil + @dest = nil + @timeout = nil + @ssl_version = :auto + # Follow ruby-ossl's definition + @options = OpenSSL::SSL::OP_ALL + @options &= ~OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS if defined?(OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS) + @options |= OpenSSL::SSL::OP_NO_COMPRESSION if defined?(OpenSSL::SSL::OP_NO_COMPRESSION) + @options |= OpenSSL::SSL::OP_NO_SSLv2 if defined?(OpenSSL::SSL::OP_NO_SSLv2) + @options |= OpenSSL::SSL::OP_NO_SSLv3 if defined?(OpenSSL::SSL::OP_NO_SSLv3) + # OpenSSL 0.9.8 default: "ALL:!ADH:!LOW:!EXP:!MD5:+SSLv2:@STRENGTH" + @ciphers = CIPHERS_DEFAULT + @cacerts_loaded = false + end + + # Sets certificate and private key for SSL client authentication. + # cert_file:: must be a filename of PEM/DER formatted file. + # key_file:: must be a filename of PEM/DER formatted file. Key must be an + # RSA key. If you want to use other PKey algorithm, + # use client_key=. + # + # Calling this method resets all existing sessions if value is changed. + def set_client_cert_file(cert_file, key_file, pass = nil) + if (@client_cert != cert_file) || (@client_key != key_file) || (@client_key_pass != pass) + @client_cert, @client_key, @client_key_pass = cert_file, key_file, pass + change_notify + end + end + + # Sets OpenSSL's default trusted CA certificates. Generally, OpenSSL is + # configured to use OS's trusted CA certificates located at + # /etc/pki/certs or /etc/ssl/certs. Unfortunately OpenSSL's Windows build + # does not work with Windows Certificate Storage. + # + # On Windows or when you build OpenSSL manually, you can set the + # CA certificates directory by SSL_CERT_DIR env variable at runtime. + # + # SSL_CERT_DIR=/etc/ssl/certs ruby -rhttpclient -e "..." + # + # Calling this method resets all existing sessions. + def set_default_paths + @cacerts_loaded = true # avoid lazy override + @cert_store = X509::Store.new + @cert_store.set_default_paths + change_notify + end + + # Drops current certificate store (OpenSSL::X509::Store) for SSL and create + # new one for the next session. + # + # Calling this method resets all existing sessions. + def clear_cert_store + @cacerts_loaded = true # avoid lazy override + @cert_store = X509::Store.new + @cert_store._httpclient_cert_store_items.clear + change_notify + end + + # Sets new certificate store (OpenSSL::X509::Store). + # don't use if you don't know what it is. + # + # Calling this method resets all existing sessions. + def cert_store=(cert_store) + # This is object equality check, since OpenSSL::X509::Store doesn't overload == + if !@cacerts_loaded || (@cert_store != cert_store) + @cacerts_loaded = true # avoid lazy override + @cert_store = cert_store + change_notify + end + end + + # Sets trust anchor certificate(s) for verification. + # trust_ca_file_or_hashed_dir:: a filename of a PEM/DER formatted + # OpenSSL::X509::Certificate or + # a 'c-rehash'eddirectory name which stores + # trusted certificate files. + # + # Calling this method resets all existing sessions. + def add_trust_ca(trust_ca_file_or_hashed_dir) + unless File.exist?(trust_ca_file_or_hashed_dir) + trust_ca_file_or_hashed_dir = File.join(File.dirname(__FILE__), trust_ca_file_or_hashed_dir) + end + @cacerts_loaded = true # avoid lazy override + add_trust_ca_to_store(@cert_store, trust_ca_file_or_hashed_dir) + change_notify + end + alias set_trust_ca add_trust_ca + + def add_trust_ca_to_store(cert_store, trust_ca_file_or_hashed_dir) + if FileTest.directory?(trust_ca_file_or_hashed_dir) + cert_store.add_path(trust_ca_file_or_hashed_dir) + else + cert_store.add_file(trust_ca_file_or_hashed_dir) + end + end + + # Loads default trust anchors. + # Calling this method resets all existing sessions. + def load_trust_ca + load_cacerts(@cert_store) + change_notify + end + + # Adds CRL for verification. + # crl:: a OpenSSL::X509::CRL or a filename of a PEM/DER formatted + # OpenSSL::X509::CRL. + # + # On JRuby, instead of setting CRL by yourself you can set following + # options to let HTTPClient to perform revocation check with CRL and OCSP: + # -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true + # ex. jruby -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true app.rb + # + # Revoked cert example: https://test-sspev.verisign.com:2443/test-SSPEV-revoked-verisign.html + # + # Calling this method resets all existing sessions. + def add_crl(crl) + unless crl.is_a?(X509::CRL) + crl = X509::CRL.new(File.open(crl) { |f| f.read }) + end + @cert_store.add_crl(crl) + @cert_store_crl_items << crl + @cert_store.flags = X509::V_FLAG_CRL_CHECK | X509::V_FLAG_CRL_CHECK_ALL + change_notify + end + alias set_crl add_crl + + def verify? + @verify_mode && (@verify_mode & OpenSSL::SSL::VERIFY_PEER != 0) + end + + # interfaces for SSLSocket. + def set_context(ctx) # :nodoc: + load_trust_ca unless @cacerts_loaded + @cacerts_loaded = true + # Verification: Use Store#verify_callback instead of SSLContext#verify*? + ctx.cert_store = @cert_store + ctx.verify_mode = @verify_mode + ctx.verify_depth = @verify_depth if @verify_depth + ctx.verify_callback = @verify_callback || method(:default_verify_callback) + # SSL config + if @client_cert + ctx.cert = @client_cert.is_a?(X509::Certificate) ? @client_cert : + X509::Certificate.new(File.open(@client_cert) { |f| f.read }) + end + if @client_key + ctx.key = @client_key.is_a?(PKey::PKey) ? @client_key : + PKey::RSA.new(File.open(@client_key) { |f| f.read }, @client_key_pass) + end + ctx.client_ca = @client_ca + ctx.timeout = @timeout + ctx.options = @options + ctx.ciphers = @ciphers + ctx.ssl_version = @ssl_version unless @ssl_version == :auto + end + + # post connection check proc for ruby < 1.8.5. + # this definition must match with the one in ext/openssl/lib/openssl/ssl.rb + def post_connection_check(peer_cert, hostname) # :nodoc: + check_common_name = true + cert = peer_cert + cert.extensions.each{|ext| + next if ext.oid != "subjectAltName" + ext.value.split(/,\s+/).each{|general_name| + if /\ADNS:(.*)/ =~ general_name + check_common_name = false + reg = Regexp.escape($1).gsub(/\\\*/, "[^.]+") + return true if /\A#{reg}\z/i =~ hostname + elsif /\AIP Address:(.*)/ =~ general_name + check_common_name = false + return true if $1 == hostname + end + } + } + if check_common_name + cert.subject.to_a.each{|oid, value| + if oid == "CN" + reg = Regexp.escape(value).gsub(/\\\*/, "[^.]+") + return true if /\A#{reg}\z/i =~ hostname + end + } + end + raise SSL::SSLError, "hostname was not match with the server certificate" + end + + # Default callback for verification: only dumps error. + def default_verify_callback(is_ok, ctx) + if $DEBUG + if is_ok + warn("ok: #{ctx.current_cert.subject.to_s.dump}") + else + warn("ng: #{ctx.current_cert.subject.to_s.dump} at depth #{ctx.error_depth} - #{ctx.error}: #{ctx.error_string} in #{ctx.chain.inspect}") + end + warn(ctx.current_cert.to_text) + warn(ctx.current_cert.to_pem) + end + if !is_ok + depth = ctx.error_depth + code = ctx.error + msg = ctx.error_string + warn("at depth #{depth} - #{code}: #{msg}") if $DEBUG + end + is_ok + end + + # Sample callback method: CAUTION: does not check CRL/ARL. + def sample_verify_callback(is_ok, ctx) + unless is_ok + depth = ctx.error_depth + code = ctx.error + msg = ctx.error_string + warn("at depth #{depth} - #{code}: #{msg}") if $DEBUG + return false + end + + cert = ctx.current_cert + self_signed = false + ca = false + pathlen = nil + server_auth = true + self_signed = (cert.subject.cmp(cert.issuer) == 0) + + # Check extensions whatever its criticality is. (sample) + cert.extensions.each do |ex| + case ex.oid + when 'basicConstraints' + /CA:(TRUE|FALSE), pathlen:(\d+)/ =~ ex.value + ca = ($1 == 'TRUE') + pathlen = $2.to_i + when 'keyUsage' + usage = ex.value.split(/\s*,\s*/) + ca = usage.include?('Certificate Sign') + server_auth = usage.include?('Key Encipherment') + when 'extendedKeyUsage' + usage = ex.value.split(/\s*,\s*/) + server_auth = usage.include?('Netscape Server Gated Crypto') + when 'nsCertType' + usage = ex.value.split(/\s*,\s*/) + ca = usage.include?('SSL CA') + server_auth = usage.include?('SSL Server') + end + end + + if self_signed + warn('self signing CA') if $DEBUG + return true + elsif ca + warn('middle level CA') if $DEBUG + return true + elsif server_auth + warn('for server authentication') if $DEBUG + return true + end + + return false + end + + private + + def change_notify + @client.reset_all + nil + end + + # Use 2048 bit certs trust anchor + def load_cacerts(cert_store) + file = File.join(File.dirname(__FILE__), 'cacert.pem') + add_trust_ca_to_store(cert_store, file) + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb new file mode 100644 index 0000000..1480142 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb @@ -0,0 +1,150 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'httpclient/ssl_config' + + +class HTTPClient + + # Wraps up OpenSSL::SSL::SSLSocket and offers debugging features. + class SSLSocket + def self.create_socket(session) + opts = { + :debug_dev => session.debug_dev + } + site = session.proxy || session.dest + socket = session.create_socket(site.host, site.port) + begin + if session.proxy + session.connect_ssl_proxy(socket, Util.urify(session.dest.to_s)) + end + new(socket, session.dest, session.ssl_config, opts) + rescue + socket.close + raise + end + end + + def initialize(socket, dest, config, opts = {}) + unless SSLEnabled + raise ConfigurationError.new('Ruby/OpenSSL module is required') + end + @socket = socket + @config = config + @ssl_socket = create_openssl_socket(@socket) + @debug_dev = opts[:debug_dev] + ssl_connect(dest.host) + end + + def peer_cert + @ssl_socket.peer_cert + end + + def close + @ssl_socket.close + @socket.close + end + + def closed? + @socket.closed? + end + + def eof? + @ssl_socket.eof? + end + + def gets(rs) + str = @ssl_socket.gets(rs) + debug(str) + str + end + + def read(size, buf = nil) + str = @ssl_socket.read(size, buf) + debug(str) + str + end + + def readpartial(size, buf = nil) + str = @ssl_socket.readpartial(size, buf) + debug(str) + str + end + + def <<(str) + rv = @ssl_socket.write(str) + debug(str) + rv + end + + def flush + @ssl_socket.flush + end + + def sync + @ssl_socket.sync + end + + def sync=(sync) + @ssl_socket.sync = sync + end + + private + + def ssl_connect(hostname = nil) + if hostname && @ssl_socket.respond_to?(:hostname=) + @ssl_socket.hostname = hostname + end + @ssl_socket.connect + if $DEBUG + if @ssl_socket.respond_to?(:ssl_version) + warn("Protocol version: #{@ssl_socket.ssl_version}") + end + warn("Cipher: #{@ssl_socket.cipher.inspect}") + end + post_connection_check(hostname) + end + + def post_connection_check(hostname) + verify_mode = @config.verify_mode || OpenSSL::SSL::VERIFY_NONE + if verify_mode == OpenSSL::SSL::VERIFY_NONE + return + elsif @ssl_socket.peer_cert.nil? and + check_mask(verify_mode, OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT) + raise OpenSSL::SSL::SSLError.new('no peer cert') + end + if @ssl_socket.respond_to?(:post_connection_check) and RUBY_VERSION > "1.8.4" + @ssl_socket.post_connection_check(hostname) + else + @config.post_connection_check(@ssl_socket.peer_cert, hostname) + end + end + + def check_mask(value, mask) + value & mask == mask + end + + def create_openssl_socket(socket) + ssl_socket = nil + if OpenSSL::SSL.const_defined?("SSLContext") + ctx = OpenSSL::SSL::SSLContext.new + @config.set_context(ctx) + ssl_socket = OpenSSL::SSL::SSLSocket.new(socket, ctx) + else + ssl_socket = OpenSSL::SSL::SSLSocket.new(socket) + @config.set_context(ssl_socket) + end + ssl_socket + end + + def debug(str) + @debug_dev << str if @debug_dev && str + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb new file mode 100644 index 0000000..c5bbcac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb @@ -0,0 +1,140 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'timeout' +require 'thread' + + +class HTTPClient + + + # Replaces timeout.rb to avoid Thread creation and scheduling overhead. + # + # You should check another timeout replace in WEBrick. + # See lib/webrick/utils.rb in ruby/1.9. + # + # About this implementation: + # * Do not create Thread for each timeout() call. Just create 1 Thread for + # timeout scheduler. + # * Do not wakeup the scheduler thread so often. Let scheduler thread sleep + # until the nearest period. +if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + class TimeoutScheduler + + # Represents timeout period. + class Period + attr_reader :thread, :time + + # Creates new Period. + def initialize(thread, time, ex) + @thread, @time, @ex = thread, time, ex + @lock = Mutex.new + end + + # Raises if thread exists and alive. + def raise(message) + @lock.synchronize do + if @thread and @thread.alive? + @thread.raise(@ex, message) + end + end + end + + # Cancel this Period. Mutex is needed to avoid too-late exception. + def cancel + @lock.synchronize do + @thread = nil + end + end + end + + # Creates new TimeoutScheduler. + def initialize + @pool = {} + @next = nil + @thread = start_timer_thread + end + + # Registers new timeout period. + def register(thread, sec, ex) + period = Period.new(thread, Time.now + sec, ex || ::Timeout::Error) + @pool[period] = true + if @next.nil? or period.time < @next + begin + @thread.wakeup + rescue ThreadError + # Thread may be dead by fork. + @thread = start_timer_thread + end + end + period + end + + # Cancels the given period. + def cancel(period) + @pool.delete(period) + period.cancel + end + + private + + def start_timer_thread + thread = Thread.new { + while true + if @pool.empty? + @next = nil + sleep + else + min, = @pool.min { |a, b| a[0].time <=> b[0].time } + @next = min.time + sec = @next - Time.now + if sec > 0 + sleep(sec) + end + end + now = Time.now + @pool.keys.each do |period| + if period.time < now + period.raise('execution expired') + cancel(period) + end + end + end + } + Thread.pass while thread.status != 'sleep' + thread + end + end + + class << self + # CAUTION: caller must aware of race condition. + def timeout_scheduler + @timeout_scheduler ||= TimeoutScheduler.new + end + end + timeout_scheduler # initialize at first time. +end + + module Timeout + if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + def timeout(sec, ex = nil, &block) + return yield if sec == nil or sec.zero? + scheduler = nil + begin + scheduler = HTTPClient.timeout_scheduler + period = scheduler.register(Thread.current, sec, ex) + yield(sec) + ensure + scheduler.cancel(period) if scheduler and period + end + end + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/util.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/util.rb new file mode 100644 index 0000000..6ff3801 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/util.rb @@ -0,0 +1,222 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +unless ''.respond_to?(:bytesize) + class String + alias bytesize size + end +end + +if RUBY_VERSION < "1.9.3" + require 'uri' + module URI + class Generic + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + end + end +end + +# With recent JRuby 1.7 + jruby-openssl, X509CRL#extentions_to_text causes +# StringIndexOOBException when we try to dump SSL Server Certificate. +# when one of extensions has "" as value. +if defined?(JRUBY_VERSION) + require 'openssl' + require 'java' + module OpenSSL + module X509 + class Certificate + java_import 'java.security.cert.Certificate' + java_import 'java.security.cert.CertificateFactory' + java_import 'java.io.ByteArrayInputStream' + def to_text + cf = CertificateFactory.getInstance('X.509') + cf.generateCertificate(ByteArrayInputStream.new(self.to_der.to_java_bytes)).toString + end + end + end + end +end + + +class HTTPClient + + + # A module for common function. + module Util + + # URI abstraction; Addressable::URI or URI + require 'uri' + begin + require 'addressable/uri' + # Older versions doesn't have #default_port + unless Addressable::URI.instance_methods.include?(:default_port) # 1.9 only + raise LoadError + end + class AddressableURI < Addressable::URI + # Overwrites the original definition just for one line... + def authority + self.host && @authority ||= (begin + authority = "" + if self.userinfo != nil + authority << "#{self.userinfo}@" + end + authority << self.host + if self.port != self.default_port # ...HERE! Compares with default_port because self.port is not nil in this wrapper. + authority << ":#{self.port}" + end + authority + end) + end + + # HTTPClient expects urify("http://foo/").port to be not nil but 80 like URI. + def port + super || default_port + end + + # Captured from uri/generic.rb + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + end + AddressableEnabled = true + rescue LoadError + AddressableEnabled = false + end + + # Keyword argument helper. + # args:: given arguments. + # *field:: a list of arguments to be extracted. + # + # You can extract 3 arguments (a, b, c) with: + # + # include Util + # def my_method(*args) + # a, b, c = keyword_argument(args, :a, :b, :c) + # ... + # end + # my_method(1, 2, 3) + # my_method(:b => 2, :a = 1) + # + # instead of; + # + # def my_method(a, b, c) + # ... + # end + # + def keyword_argument(args, *field) + if args.size == 1 and Hash === args[0] + h = args[0] + if field.any? { |f| h.key?(f) } + return h.values_at(*field) + end + end + args + end + + # Keyword argument to hash helper. + # args:: given arguments. + # *field:: a list of arguments to be extracted. + # + # Returns hash which has defined keys. When a Hash given, returns it + # including undefined keys. When an Array given, returns a Hash which only + # includes defined keys. + def argument_to_hash(args, *field) + return nil if args.empty? + if args.size == 1 and Hash === args[0] + h = args[0] + if field.any? { |f| h.key?(f) } + return h + end + end + h = {} + field.each_with_index do |e, idx| + h[e] = args[idx] + end + h + end + + # Gets an URI instance. + def urify(uri) + if uri.nil? + nil + elsif uri.is_a?(URI) + uri + elsif AddressableEnabled + AddressableURI.parse(uri.to_s) + else + URI.parse(uri.to_s) + end + end + module_function :urify + + # Returns true if the given 2 URIs have a part_of relationship. + # * the same scheme + # * the same host String (no host resolution or IP-addr conversion) + # * the same port number + # * target URI's path starts with base URI's path. + def uri_part_of(uri, part) + ((uri.scheme == part.scheme) and + (uri.host == part.host) and + (uri.port == part.port) and + uri.path.upcase.index(part.path.upcase) == 0) + end + module_function :uri_part_of + + # Returns parent directory URI of the given URI. + def uri_dirname(uri) + uri = uri.clone + uri.path = uri.path.sub(/\/[^\/]*\z/, '/') + uri + end + module_function :uri_dirname + + # Finds a value of a Hash. + def hash_find_value(hash, &block) + v = hash.find(&block) + v ? v[1] : nil + end + module_function :hash_find_value + + # Try to require a feature and returns true/false if loaded + # + # It returns 'true' for the second require in contrast of the standard + # require returns false if the feature is already loaded. + def try_require(feature) + require feature + true + rescue LoadError + false + end + module_function :try_require + + # show one warning message only once by caching message + # + # it cached all messages in memory so be careful not to show many kinds of warning message. + @@__warned = {} + def warning(message) + return if @@__warned.key?(message) + warn(message) + @@__warned[message] = true + end + + # Checks if the given URI is https. + def https?(uri) + uri.scheme && uri.scheme.downcase == 'https' + end + + def http?(uri) + uri.scheme && uri.scheme.downcase == 'http' + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/version.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/version.rb new file mode 100644 index 0000000..d1ab8e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/version.rb @@ -0,0 +1,3 @@ +class HTTPClient + VERSION = '2.8.3' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb new file mode 100644 index 0000000..74dbfef --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb @@ -0,0 +1,459 @@ +# cookie.rb is redistributed file which is originally included in Webagent +# version 0.6.2 by TAKAHASHI `Maki' Masayoshi. And it contains some bug fixes. +# You can download the entire package of Webagent from +# http://www.rubycolor.org/arc/. + + +# Cookie class +# +# I refered to w3m's source to make these classes. Some comments +# are quoted from it. I'm thanksful for author(s) of it. +# +# w3m homepage: http://ei5nazha.yz.yamagata-u.ac.jp/~aito/w3m/eng/ + +require 'time' +require 'monitor' +require 'httpclient/util' + +class WebAgent + + module CookieUtils + + def head_match?(str1, str2) + str1 == str2[0, str1.length] + end + + def tail_match?(str1, str2) + if str1.length > 0 + str1 == str2[-str1.length..-1].to_s + else + true + end + end + + def domain_match(host, domain) + return false if domain.nil? + domainname = domain.sub(/\.\z/, '').downcase + hostname = host.sub(/\.\z/, '').downcase + case domain + when /\d+\.\d+\.\d+\.\d+/ + return (hostname == domainname) + when '.' + return true + when /^\./ + # allows; host == rubyforge.org, domain == .rubyforge.org + return tail_match?(domainname, '.' + hostname) + else + return (hostname == domainname) + end + end + end + + class Cookie + include CookieUtils + + attr_accessor :name, :value + attr_accessor :domain, :path + attr_accessor :expires ## for Netscape Cookie + attr_accessor :url + attr_writer :use, :secure, :http_only, :discard, :domain_orig, :path_orig, :override + + USE = 1 + SECURE = 2 + DOMAIN = 4 + PATH = 8 + DISCARD = 16 + OVERRIDE = 32 + OVERRIDE_OK = 32 + HTTP_ONLY = 64 + + def self.parse(str, url) + cookie = new + cookie.parse(str, url) + cookie + end + + def initialize + @name = @value = @domain = @path = nil + @expires = nil + @url = nil + @use = @secure = @http_only = @discard = @domain_orig = @path_orig = @override = nil + end + + def discard? + @discard + end + + def use? + @use + end + + def secure? + @secure + end + + def http_only? + @http_only + end + + def domain_orig? + @domain_orig + end + + def path_orig? + @path_orig + end + + def override? + @override + end + + def flag + flg = 0 + flg += USE if @use + flg += SECURE if @secure + flg += HTTP_ONLY if @http_only + flg += DOMAIN if @domain_orig + flg += PATH if @path_orig + flg += DISCARD if @discard + flg += OVERRIDE if @override + flg + end + + def set_flag(flag) + flag = flag.to_i + @use = true if flag & USE > 0 + @secure = true if flag & SECURE > 0 + @http_only = true if flag & HTTP_ONLY > 0 + @domain_orig = true if flag & DOMAIN > 0 + @path_orig = true if flag & PATH > 0 + @discard = true if flag & DISCARD > 0 + @override = true if flag & OVERRIDE > 0 + end + + def match?(url) + domainname = url.host + if (!domainname || + !domain_match(domainname, @domain) || + (@path && !head_match?(@path, url.path.empty? ? '/' : url.path)) || + (@secure && (url.scheme != 'https')) ) + return false + else + return true + end + end + + def join_quotedstr(array, sep) + ret = Array.new + old_elem = nil + array.each{|elem| + if (elem.scan(/"/).length % 2) == 0 + if old_elem + old_elem << sep << elem + else + ret << elem + old_elem = nil + end + else + if old_elem + old_elem << sep << elem + ret << old_elem + old_elem = nil + else + old_elem = elem.dup + end + end + } + ret + end + + def parse(str, url) + @url = url + # TODO: should not depend on join_quotedstr. scan with escape like CSV. + cookie_elem = str.split(/;/) + cookie_elem = join_quotedstr(cookie_elem, ';') + cookie_elem -= [""] # del empty elements, a cookie might included ";;" + first_elem = cookie_elem.shift + if first_elem !~ /([^=]*)(\=(.*))?/ + return + ## raise ArgumentError 'invalid cookie value' + end + @name = $1.strip + @value = normalize_cookie_value($3) + cookie_elem.each{|pair| + key, value = pair.split(/=/, 2) ## value may nil + key.strip! + value = normalize_cookie_value(value) + case key.downcase + when 'domain' + @domain = value + when 'expires' + @expires = nil + begin + @expires = Time.parse(value).gmtime if value + rescue ArgumentError + end + when 'path' + @path = value + when 'secure' + @secure = true ## value may nil, but must 'true'. + when 'httponly' + @http_only = true ## value may nil, but must 'true'. + else + warn("Unknown key: #{key} = #{value}") + end + } + end + + private + + def normalize_cookie_value(value) + if value + value = value.strip.sub(/\A"(.*)"\z/) { $1 } + value = nil if value.empty? + end + value + end + end + + ## + # An Array class that already includes the MonitorMixin module. + # + class SynchronizedArray < Array + include MonitorMixin + end + + class CookieManager + include CookieUtils + + ### errors + class Error < StandardError; end + class ErrorOverrideOK < Error; end + class SpecialError < Error; end + + attr_reader :cookies + attr_accessor :cookies_file + attr_accessor :accept_domains, :reject_domains + + def initialize(file=nil) + @cookies = SynchronizedArray.new + @cookies_file = file + @is_saved = true + @reject_domains = Array.new + @accept_domains = Array.new + @netscape_rule = false + end + + def cookies=(cookies) + if cookies.is_a?(SynchronizedArray) + @cookies = cookies + else + @cookies = SynchronizedArray.new(cookies) + end + end + + def save_all_cookies(force = nil, save_unused = true, save_discarded = true) + @cookies.synchronize do + check_expired_cookies + if @is_saved and !force + return + end + File.open(@cookies_file, 'w') do |f| + @cookies.each do |cookie| + if (cookie.use? or save_unused) and + (!cookie.discard? or save_discarded) + f.print(cookie.url.to_s,"\t", + cookie.name,"\t", + cookie.value,"\t", + cookie.expires.to_i,"\t", + cookie.domain,"\t", + cookie.path,"\t", + cookie.flag,"\n") + end + end + end + end + @is_saved = true + end + + def save_cookies(force = nil) + save_all_cookies(force, false, false) + end + + def check_expired_cookies + @cookies.reject!{|cookie| + is_expired = (cookie.expires && (cookie.expires < Time.now.gmtime)) + if is_expired && !cookie.discard? + @is_saved = false + end + is_expired + } + end + + def parse(str, url) + cookie = WebAgent::Cookie.new + cookie.parse(str, url) + add(cookie) + end + + def find(url) + return nil if @cookies.empty? + + cookie_list = Array.new + @cookies.each{|cookie| + is_expired = (cookie.expires && (cookie.expires < Time.now.gmtime)) + if cookie.use? && !is_expired && cookie.match?(url) + if cookie_list.select{|c1| c1.name == cookie.name}.empty? + cookie_list << cookie + end + end + } + return make_cookie_str(cookie_list) + end + alias cookie_value find + + def add(given) + check_domain(given.domain, given.url.host, given.override?) + + domain = given.domain || given.url.host + path = given.path || given.url.path.sub(%r|/[^/]*\z|, '') + + cookie = nil + @cookies.synchronize do + check_expired_cookies + cookie = @cookies.find { |c| + c.domain == domain && c.path == path && c.name == given.name + } + if !cookie + cookie = WebAgent::Cookie.new + cookie.use = true + @cookies << cookie + end + end + + cookie.domain = domain + cookie.path = path + cookie.url = given.url + cookie.name = given.name + cookie.value = given.value + cookie.expires = given.expires + cookie.secure = given.secure? + cookie.http_only = given.http_only? + cookie.domain_orig = given.domain + cookie.path_orig = given.path + + if cookie.discard? || cookie.expires.nil? + cookie.discard = true + else + cookie.discard = false + @is_saved = false + end + end + + def load_cookies + return if !File.readable?(@cookies_file) + @cookies.synchronize do + @cookies.clear + File.open(@cookies_file,'r'){|f| + while line = f.gets + cookie = WebAgent::Cookie.new + @cookies << cookie + col = line.chomp.split(/\t/) + cookie.url = HTTPClient::Util.urify(col[0]) + cookie.name = col[1] + cookie.value = col[2] + if col[3].empty? or col[3] == '0' + cookie.expires = nil + else + cookie.expires = Time.at(col[3].to_i).gmtime + end + cookie.domain = col[4] + cookie.path = col[5] + cookie.set_flag(col[6]) + end + } + end + end + + # Who use it? + def check_cookie_accept_domain(domain) + unless domain + return false + end + @accept_domains.each{|dom| + if domain_match(domain, dom) + return true + end + } + @reject_domains.each{|dom| + if domain_match(domain, dom) + return false + end + } + return true + end + + private + + def make_cookie_str(cookie_list) + if cookie_list.empty? + return nil + end + + ret = '' + c = cookie_list.shift + ret += "#{c.name}=#{c.value}" + cookie_list.each{|cookie| + ret += "; #{cookie.name}=#{cookie.value}" + } + return ret + end + + # for conformance to http://wp.netscape.com/newsref/std/cookie_spec.html + attr_accessor :netscape_rule + SPECIAL_DOMAIN = [".com",".edu",".gov",".mil",".net",".org",".int"] + + def check_domain(domain, hostname, override) + return unless domain + + # [DRAFT 12] s. 4.2.2 (does not apply in the case that + # host name is the same as domain attribute for version 0 + # cookie) + # I think that this rule has almost the same effect as the + # tail match of [NETSCAPE]. + if domain !~ /^\./ && hostname != domain + domain = '.'+domain + end + # [NETSCAPE] rule + if @netscape_rule + n = domain.scan(/\./).length + if n < 2 + cookie_error(SpecialError.new, override) + elsif n == 2 + ## [NETSCAPE] rule + ok = SPECIAL_DOMAIN.select{|sdomain| + sdomain == domain[-(sdomain.length)..-1] + } + if ok.empty? + cookie_error(SpecialError.new, override) + end + end + end + # this implementation does not check RFC2109 4.3.2 case 2; + # the portion of host not in domain does not contain a dot. + # according to nsCookieService.cpp in Firefox 3.0.4, Firefox 3.0.4 + # and IE does not check, too. + end + + # not tested well; used only netscape_rule = true. + def cookie_error(err, override) + if !err.kind_of?(ErrorOverrideOK) || !override + raise err + end + end + end +end + +class HTTPClient + CookieManager = WebAgent::CookieManager +end unless defined?(HTTPClient::CookieManager) diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/jsonclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/jsonclient.rb new file mode 100755 index 0000000..fa099b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/jsonclient.rb @@ -0,0 +1,63 @@ +require 'httpclient' +require 'json' + +# JSONClient auto-converts Hash <-> JSON in request and response. +# * For POST or PUT request, convert Hash body to JSON String with 'application/json; charset=utf-8' header. +# * For response, convert JSON String to Hash when content-type is '(application|text)/(x-)?json' +class JSONClient < HTTPClient + CONTENT_TYPE_JSON_REGEX = /(application|text)\/(x-)?json/i + CONTENT_TYPE_JSON = 'application/json; charset=utf-8' + + attr_reader :content_type_json_request + attr_reader :content_type_json_response_regex + + def initialize(*args) + super + @content_type_json_request = CONTENT_TYPE_JSON + @content_type_json_response_regex = CONTENT_TYPE_JSON_REGEX + end + + def post(uri, *args, &block) + request(:post, uri, argument_to_hash_for_json(args), &block) + end + + def put(uri, *args, &block) + request(:put, uri, argument_to_hash_for_json(args), &block) + end + + def request(method, uri, *args, &block) + res = super + if @content_type_json_response_regex =~ res.content_type + res = wrap_json_response(res) + end + res + end + +private + + def argument_to_hash_for_json(args) + hash = argument_to_hash(args, :body, :header, :follow_redirect) + if hash[:body].is_a?(Hash) + hash[:header] = json_header(hash[:header]) + hash[:body] = JSON.generate(hash[:body]) + end + hash + end + + def json_header(header) + header ||= {} + if header.is_a?(Hash) + header['Content-Type'] = @content_type_json_request + else + header << ['Content-Type', @content_type_json_request] + end + header + end + + def wrap_json_response(original) + res = ::HTTP::Message.new_response(JSON.parse(original.content)) + res.http_header = original.http_header + res.previous = original + res + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/oauthclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/oauthclient.rb new file mode 100644 index 0000000..f9bb9d0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/lib/oauthclient.rb @@ -0,0 +1,111 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'httpclient' + +module HTTP + class Message + attr_accessor :oauth_params + end +end + + +# OAuthClient provides OAuth related methods in addition to HTTPClient. +# +# See sample/ dir how to use OAuthClient. There are sample clients for Twitter, +# FriendFeed and Google Buzz. +class OAuthClient < HTTPClient + + # HTTPClient::OAuth::Config:: OAuth configurator. + attr_accessor :oauth_config + + # Creates a OAuthClient instance which provides OAuth related methods in + # addition to HTTPClient. + # + # Method signature is as same as HTTPClient. See HTTPClient.new + def initialize(*arg) + super + @oauth_config = HTTPClient::OAuth::Config.new + self.www_auth.oauth.set_config(nil, @oauth_config) + self.www_auth.oauth.challenge(nil) + self.strict_response_size_check = true + end + + # Get request token. + # uri:: URI for request token. + # callback:: callback String. This can be nil for OAuth 1.0a + # param:: Additional query parameter Hash. + # + # It returns a HTTP::Message instance as a response. When the request is made + # successfully, you can retrieve a pair of request token and secret like + # following; + # res = client.get_request_token(...) + # token = res.oauth_params['oauth_token'] + # secret = res.oauth_params['oauth_token_secret'] + def get_request_token(uri, callback = nil, param = nil) + oauth_config.token = nil + oauth_config.secret = nil + oauth_config.callback = callback + oauth_config.verifier = nil + res = request(oauth_config.http_method, uri, param) + filter_response(res) + res + end + + # Get access token. + # uri:: URI for request token. + # request_token:: a request token String. See get_access_token. + # request_token_secret:: a request secret String. See get_access_token. + # verifier:: a verifier tag String. + # + # When the request succeeds and the server returns a pair of access token + # and secret, oauth_config.token and oauth_token.secret are updated with + # the access token. Then you can call OAuthClient#get, #post, #delete, etc. + # All requests are signed. + def get_access_token(uri, request_token, request_token_secret, verifier = nil) + oauth_config.token = request_token + oauth_config.secret = request_token_secret + oauth_config.callback = nil + oauth_config.verifier = verifier + res = request(oauth_config.http_method, uri) + filter_response(res) + oauth_config.verifier = nil + res + end + + # Parse response and returns a Hash. + def get_oauth_response(res) + enc = res.header['content-encoding'] + body = nil + if enc and enc[0] and enc[0].downcase == 'gzip' + body = Zlib::GzipReader.wrap(StringIO.new(res.content)) { |gz| gz.read } + else + body = res.content + end + body.split('&').inject({}) { |r, e| + key, value = e.split('=', 2) + r[unescape(key)] = unescape(value) + r + } + end + +private + + def unescape(escaped) + escaped ? ::HTTP::Message.unescape(escaped) : nil + end + + def filter_response(res) + if res.status == 200 + if res.oauth_params = get_oauth_response(res) + oauth_config.token = res.oauth_params['oauth_token'] + oauth_config.secret = res.oauth_params['oauth_token_secret'] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/async.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/async.rb new file mode 100644 index 0000000..283ddc8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/async.rb @@ -0,0 +1,8 @@ +require 'httpclient' + +c = HTTPClient.new +conn = c.get_async("http://dev.ctor.org/") +io = conn.pop.content +while str = io.read(40) + p str +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/auth.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/auth.rb new file mode 100644 index 0000000..7ec9ab8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/auth.rb @@ -0,0 +1,11 @@ +require 'httpclient' + +c = HTTPClient.new +c.debug_dev = STDOUT + +# for Proxy authentication: supports Basic, Negotiate and NTLM. +#c.set_proxy_auth("admin", "admin") + +# for WWW authentication: supports Basic, Digest and Negotiate. +c.set_auth("http://dev.ctor.org/http-access2/", "user", "user") +p c.get("http://dev.ctor.org/http-access2/login") diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/cookie.rb new file mode 100644 index 0000000..606bfd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/cookie.rb @@ -0,0 +1,18 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +proxy = ENV['HTTP_PROXY'] +clnt = HTTPClient.new(proxy) +clnt.set_cookie_store("cookie.dat") +clnt.debug_dev = STDOUT if $DEBUG + +while urlstr = ARGV.shift + response = clnt.get(urlstr){ |data| + print data + } + p response.contenttype +end + +clnt.save_cookie_store diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/dav.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/dav.rb new file mode 100644 index 0000000..61862cf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/dav.rb @@ -0,0 +1,103 @@ +require 'uri' +require 'httpclient' + +class DAV + attr_reader :headers + + def initialize(uri = nil) + @uri = nil + @headers = {} + open(uri) if uri + proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] || nil + @client = HTTPClient.new(proxy) + end + + def open(uri) + @uri = if uri.is_a?(URI) + uri + else + URI.parse(uri) + end + end + + def set_basic_auth(user_id, passwd) + @client.set_basic_auth(@uri, user_id, passwd) + end + + # TODO: propget/propset support + + def propfind(target) + target_uri = @uri + target + res = @client.propfind(target_uri) + res.body.content + end + + def get(target, local = nil) + local ||= target + target_uri = @uri + target + if FileTest.exist?(local) + raise RuntimeError.new("File #{ local } exists.") + end + f = File.open(local, "wb") + res = @client.get(target_uri, nil, @headers) do |data| + f << data + end + f.close + STDOUT.puts("#{ res.header['content-length'][0] } bytes saved to file #{ target }.") + end + + def debug_dev=(dev) + @client.debug_dev = dev + end + + def get_content(target) + target_uri = @uri + target + @client.get_content(target_uri, nil, @headers) + end + + def put_content(target, content) + target_uri = @uri + target + res = @client.put(target_uri, content, @headers) + if res.status < 200 or res.status >= 300 + raise "HTTP PUT failed: #{res.inspect}" + end + end + + class Mock + attr_reader :headers + + def initialize(uri = nil) + @uri = nil + @headers = {} + open(uri) if uri + + @cache = {} + end + + def open(uri) + @uri = uri.is_a?(URI) ? uri : URI.parse(uri) + end + + def set_basic_auth(user_id, passwd) + # ignore + end + + def propfind(target) + # not found + nil + end + + def get(target, local = nil) + # ignore + end + + def get_content(target) + @cache[target] + end + + def put_content(target, content) + @cache[target] = content + nil + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/howto.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/howto.rb new file mode 100644 index 0000000..c5e24ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/howto.rb @@ -0,0 +1,49 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +proxy = ENV['HTTP_PROXY'] +clnt = HTTPClient.new(proxy) +clnt.set_cookie_store("cookie.dat") +target = ARGV.shift || "http://localhost/foo.cgi" + +puts +puts '= GET content directly' +puts clnt.get_content(target) + +puts '= GET result object' +result = clnt.get(target) +puts '== Header object' +p result.header +puts "== Content-type" +p result.contenttype +puts '== Body object' +p result.body +puts '== Content' +print result.content +puts '== GET with Block' +clnt.get(target) do |str| + puts str +end + +puts +puts '= GET with query' +puts clnt.get(target, { "foo" => "bar", "baz" => "quz" }).content + +puts +puts '= GET with query 2' +puts clnt.get(target, [["foo", "bar1"], ["foo", "bar2"]]).content + +clnt.debug_dev = STDERR +puts +puts '= GET with extra header' +puts clnt.get(target, nil, { "SOAPAction" => "HelloWorld" }).content + +puts +puts '= GET with extra header 2' +puts clnt.get(target, nil, [["Accept", "text/plain"], ["Accept", "text/html"]]).content + +clnt.debug_dev = nil + +clnt.save_cookie_store diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/jsonclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/jsonclient.rb new file mode 100644 index 0000000..4a12f3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/jsonclient.rb @@ -0,0 +1,67 @@ +require 'httpclient' +require 'json' + +module HTTP + class Message + # Returns JSON object of message body + alias original_content content + def content + if JSONClient::CONTENT_TYPE_JSON_REGEX =~ content_type + JSON.parse(original_content) + else + original_content + end + end + end +end + + +# JSONClient provides JSON related methods in addition to HTTPClient. +class JSONClient < HTTPClient + CONTENT_TYPE_JSON_REGEX = /(application|text)\/(x-)?json/i + + attr_accessor :content_type_json + + class JSONRequestHeaderFilter + attr_accessor :replace + + def initialize(client) + @client = client + @replace = false + end + + def filter_request(req) + req.header['content-type'] = @client.content_type_json if @replace + end + + def filter_response(req, res) + @replace = false + end + end + + def initialize(*args) + super + @header_filter = JSONRequestHeaderFilter.new(self) + @request_filter << @header_filter + @content_type_json = 'application/json; charset=utf-8' + end + + def post(uri, *args, &block) + @header_filter.replace = true + request(:post, uri, jsonify(argument_to_hash(args, :body, :header, :follow_redirect)), &block) + end + + def put(uri, *args, &block) + @header_filter.replace = true + request(:put, uri, jsonify(argument_to_hash(args, :body, :header)), &block) + end + +private + + def jsonify(hash) + if hash[:body] && hash[:body].is_a?(Hash) + hash[:body] = JSON.generate(hash[:body]) + end + hash + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb new file mode 100644 index 0000000..15205e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb @@ -0,0 +1,57 @@ +require 'oauthclient' +require 'zlib' +require 'stringio' + +# Get your own consumer token from http://code.google.com/apis/accounts/docs/RegistrationForWebAppsAuto.html +consumer_key = nil +consumer_secret = nil + +callback = 'http://localhost/' # should point somewhere else... +scope = 'https://www.googleapis.com/auth/buzz' +request_token_url = 'https://www.google.com/accounts/OAuthGetRequestToken' +access_token_url = 'https://www.google.com/accounts/OAuthGetAccessToken' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # Twitter does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url, callback, :scope => scope) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: https://www.google.com/buzz/api/auth/OAuthAuthorizeToken?oauth_token=#{token}&domain=#{consumer_key}&scope=#{scope}" +puts "Type oauth_verifier (if given) and hit [enter] to go" +require 'cgi' +verifier = CGI.unescape(gets.chomp) +verifier = nil if verifier.empty? + +# Get access token. +res = client.get_access_token(access_token_url, token, secret, verifier) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +id = res.oauth_params['user_id'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. +# @consumption requires Buzz API +puts client.get_content("https://www.googleapis.com/buzz/v1/activities/@me/@consumption", :alt => :json, :prettyprint => true) diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb new file mode 100644 index 0000000..02d3496 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb @@ -0,0 +1,59 @@ +require 'oauthclient' + +# Get your own consumer token from http://friendfeed.com/api/applications +consumer_key = 'EDIT HERE' +consumer_secret = 'EDIT HERE' + +request_token_url = 'https://friendfeed.com/account/oauth/request_token' +oob_authorize_url = 'https://friendfeed.com/account/oauth/authorize' +access_token_url = 'https://friendfeed.com/account/oauth/access_token' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # FriendFeed does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: #{oob_authorize_url}?oauth_token=#{token}" +puts "Hit [enter] to go" +gets + +# Get access token. +# FYI: You may need to re-construct OAuthClient instance here. +# In normal web app flow, getting access token and getting request token +# must be done in different HTTP requests. +# client = OAuthClient.new +# client.oauth_config.consumer_key = consumer_key +# client.oauth_config.consumer_secret = consumer_secret +# client.oauth_config.signature_method = 'HMAC-SHA1' +# client.oauth_config.http_method = :get # Twitter does not allow :post +res = client.get_access_token(access_token_url, token, secret) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +username = res.oauth_params['username'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. (user profile) +puts client.get("http://friendfeed-api.com/v2/feedinfo/#{username}?format=json") diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb new file mode 100644 index 0000000..731645b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb @@ -0,0 +1,61 @@ +require 'oauthclient' + +# Get your own consumer token from http://twitter.com/apps +consumer_key = 'EDIT HERE' +consumer_secret = 'EDIT HERE' + +callback = ARGV.shift # can be nil for OAuth 1.0. (not 1.0a) +request_token_url = 'https://api.twitter.com/oauth/request_token' +oob_authorize_url = 'https://api.twitter.com/oauth/authorize' +access_token_url = 'https://api.twitter.com/oauth/access_token' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # Twitter does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url, callback) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: #{oob_authorize_url}?oauth_token=#{token}" +puts "Type oauth_verifier/PIN (if given) and hit [enter] to go" +verifier = gets.chomp +verifier = nil if verifier.empty? + +# Get access token. +# FYI: You may need to re-construct OAuthClient instance here. +# In normal web app flow, getting access token and getting request token +# must be done in different HTTP requests. +# client = OAuthClient.new +# client.oauth_config.consumer_key = consumer_key +# client.oauth_config.consumer_secret = consumer_secret +# client.oauth_config.signature_method = 'HMAC-SHA1' +# client.oauth_config.http_method = :get # Twitter does not allow :post +res = client.get_access_token(access_token_url, token, secret, verifier) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +id = res.oauth_params['user_id'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. (DM) +puts client.get("https://api.twitter.com/1.1/direct_messages.json") diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem new file mode 100644 index 0000000..9dfce02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIBADANBgkqhkiG9w0BAQUFADAtMQswCQYDVQQGEwJDWjEN +MAsGA1UEChMEUnVieTEPMA0GA1UEAxMGUnVieUNBMB4XDTAzMDUzMTAyNDcyOFoX +DTA1MDUzMDAyNDcyOFowLTELMAkGA1UEBhMCQ1oxDTALBgNVBAoTBFJ1YnkxDzAN +BgNVBAMTBlJ1YnlDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANfw +JSR2OxME6WBRYekhvN1M7uZwJLC3qHhRtBm583x7MS3yzF/HwFNH1oAmOmzUcDSz +Y9OmTDVIMU9b0bSYHuu8KswrRWmx/iEhgU/hODS1MQKi+uHoMTtY/dVXwTLAfw5d +UMPQ9F5EhiV57sKWrS7vIVBtpxUNgfJW61FP+Ru3Lr8uhUgXPECHck8fjFu8w2Sw +JQBQ8ePrfKBiTHpOzKmDVXXWzVYzIQN0zQfpu/FSjOJ4xWV3wmfltN4FK+UkpALW +3RKsNFx+Pmji0fr2/PeEPhzKkhWk5b+pYrIlTNkagS7u8EoeLtY1y3LSZvopbcPI +l0QFQHCMtxS7ngC6wsECAwEAAaOBuTCBtjAPBgNVHRMECDAGAQH/AgEAMC0GCWCG +SAGG+EIBDQQgFh5HZW5lcmF0ZWQgYnkgT3BlblNTTCBmb3IgUnVieS4wHQYDVR0O +BBYEFA2IpXrgDnpJ9p6bfBmtM6j0IejmMFUGA1UdIwROMEyAFA2IpXrgDnpJ9p6b +fBmtM6j0IejmoTGkLzAtMQswCQYDVQQGEwJDWjENMAsGA1UEChMEUnVieTEPMA0G +A1UEAxMGUnVieUNBggEAMA0GCSqGSIb3DQEBBQUAA4IBAQB1b4iTezqgZj6FhcAc +0AtleAc8TpUn8YOX6zSlHG6I7tKcLfnWen9eFs4Jx73Bup5wHvcrHF6U+/nAdpb5 +R7lkNbjWFWwoi5cQC36mh4ym8GWhUZUf8362yPzimbdKMBIiDoIBY8NyIBBORU2m +BlXsHr0dqGpeTmNnFhgi1FCN+F/VitplOUP9E8NzsgLS/GY9HO160HbPXoZc4yp5 +KFweqmyhbxZvkrrD6LanvB23cJP+TeHzA5mLJtIBciFq+AJe+cDTlJYjtLcsxkAy +Evojd//5gePjgSz3gbzU5Zq0/tI2T0rQYnmySoSQ1u2+i5FrMvEjh/H6tZtQ6nO0 +ROTL +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0key.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0key.pem new file mode 100644 index 0000000..71b7fe6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/0key.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,BC9A85421E11A052 + +dlFN8woboqu1mLWHSe6HnBCzrJUFw1NLv4sJM8MFPcSGkPEJoqceRQ5XFFlwnhwT +WStidVQLAaCqWxPlQbvKs/eW2yEZRX3JhNHlGkuZUbAuqC7FGcIPCFtXmshmR3/K +wSxM3DuEBA31kHoyH/DGPEOcp/L9M95fU2w/1Zz895nXgECdlYfMB7KEXJ7S0TVD +NvcH6dcNS2I56ikUfRd9MA+tTtJTDsolBii3HpNkI+i8GeAAznfuyhCa+RasbMnR +3SRHSgiZpK01JkepKEumG1iyGXBE4GMlF3dK9TL4UTfK7PjbW1BRGVdmGayA5L8X +vpn9uIMbpFUD7nU/cSs6+ECYy9Fdq/d8oE7YRVpqGA2mX+Vvjzc6ZByiW8eH8uYW +d1YCySAzttQUcDFx4YlpbQN+NSE67amGY/nkXn3CnQuB2uDiUm6818p1jjhEbIj6 +SQpNgpuhHPhyawMuC98BvxEXoSTFRmXx5h+Qgo8nfCBUpGUCA8STuJG+EebIXMHH +yNdZGYMdkkuzow1G+bcHaIV4pgD0hOX6D+AzhO8/11gsJNiZHWAkxRtXres0B7JQ +BTv6mrpmRsr1TbY7K55y5QePkvroiqI4BX2//RgAcUw+aBzPCSt87YU55pPxxDCv +KnoMQJapUIlXgYBs+2GBiFQlr3dAnzduXrXkZa/TuE0853QDDnKWN3aWapW0EieH +sDxYz6kZ6c/vjDJtNjjgysKvi2ZFnJMk92fi1sNd2MrH9w1vSmjHw6+b9REH+r6K +YCcMzCUnIV5Y5jgbnrY5jWlB5Jt5PlU+QDFTBNsdctyoES3h5yQh48hcpnJOy4YT +tn9jEmIAYM7QZtGZrY5yiZRZbM5tLL7VeXA0M7N0ivjZUVP4NBUV1IFhLpeus3Yo +yXB99Sib/M8fqlmnRlyKKaRseB9a80/LJdLJC7Q1aJG9JYTTpumydppDzvwwUFV/ +yHQibPzWhnHTElyXSGeWWQ/4gKJXJFkSyrBeKt/DcDggEnpsVw7kAeo0KJLWvBq2 +0CwnWxPsseIYSLdwE0wOZTnWvHC6viCCqxaFwROa+bHsgCtRR8xwOKVg0RQRexQi +SKsCEyq4hfKH3Kd7KkI5c4iCHrNDxQiYVAHGo0kuVWPNP9PH8XrRuP7f2Aq6ObEU +cGLN+OKlzChKJQZZzdthGTjO52cZERokS7etLs5yPNM27CUZIy2gUsWwANG700ov +DYi4S9j4y2fK9qVFGuNmZ7nNQ6juHIN8ZpZObmzGz/GEsVy8zYGV7jH2bC8fhCg1 +wiTn0CHyfI0AfJ5zQMQ48z/ATI5+/pP5DuJ4kPLYU90EdIlIQ/9zQDRxVPvUalas +kskX8qdsbILaLqKzqvPa6jkhncV7SVQPhxIrs6vU2RWrsU1Qw9wHhzaITu9yb1l1 +s8uialiJWnWOwkhIAUULOoGRnBB7U6sseFh7LcYHc2sZYJzQQO3g14ANvtsu/ILT +abUPXtGHSUlI4kUjI82NEkyQz/pDD9VkhTefiFHNymIkUaZBulmR5QdcEcQn6bku +J/P3M/T7CtsCPdBUrzDI71eAYqXPjikg+8unKWk9c/p+V7wXtvsdgJf3+xht/YUQ +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem new file mode 100644 index 0000000..d7eeea6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwLTELMAkGA1UEBhMCQ1ox +DTALBgNVBAoTBFJ1YnkxDzANBgNVBAMTBlJ1YnlDQTAeFw0wMzA1MzEwMzUwNDFa +Fw0wNDA1MzAwMzUwNDFaMDAxCzAJBgNVBAYTAkNaMQ0wCwYDVQQKEwRSdWJ5MRIw +EAYDVQQDEwlsb2NhbGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALhs +fh4i1c+K57vFG7SUDfdxuSlbPUqaV0sbiuvWb0f7B2T7bHicaIRsDYW7PQRLLwwR +Pd+aKg3KuwhWN47faRam19Z3yWCD7Tg+BhXDqlXnz6snnr4APpAxc22kJKjzuil6 +sp+QTkl/EFKI3+ocDur1UB+kSOmTzsDmepaWUZwTAgMBAAGjgbMwgbAwCQYDVR0T +BAIwADAtBglghkgBhvhCAQ0EIBYeR2VuZXJhdGVkIGJ5IE9wZW5TU0wgZm9yIFJ1 +YnkuMB0GA1UdDgQWBBQlYESgTYdkTOYy02+/jGSqa+OpzjBVBgNVHSMETjBMgBQN +iKV64A56Sfaem3wZrTOo9CHo5qExpC8wLTELMAkGA1UEBhMCQ1oxDTALBgNVBAoT +BFJ1YnkxDzANBgNVBAMTBlJ1YnlDQYIBADANBgkqhkiG9w0BAQUFAAOCAQEAJh9v +ehhUv69oilVWGvGB6xCr8LgErnO9QdAyqJE2xBhbNaB3crjWDdQTz4UNvCQoJG/4 +Oa9Vp10vM8E0ZMVHer87WM9tPEOg09r38U/1c7gSYBkPSGQfeWtZNjQ1YOm6RDx4 +JJp9sp9v/CdBlVXaBQQd+MQFny7E+EkMHRfiv89KTfOs0wYdirLrM1C90CZUEj0i +cMcBdHzH5XcNpWB1ag4cNiUn2ljsaWlUpEg48gLe2FLJVPBio+iZnOm/C6KIwBMO +BCVxkZ6oIR87JT4xbr8SxRS9d/irhVU9MtGYwMe4MPSztefASdmEyj59ZFCLKQHV ++ltGb7/b7DetoT1spA== +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem new file mode 100644 index 0000000..34261ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,5E9A9AC8F0F62A4A + +3/VDlc8E13xKZr+MXt+HVZCiuWN1gsVx1bTZE5fN5FVoJ45Bgy2zAegnqZiP1NNy +j/76Vy9Ru/G9HPh5QPbIPl+md+tOyJV2M23a+5jESk1tLuWt5lRqmTtHN000844l +uCgZPPhRV7nhYPC5f6Gqw/xDl/EZsElqJM/hl2JbqiVKfT5/1i0STU8LNkoaLWrJ +kQhc7hOR5ihmDPeD8mA99bmGD+UyyqzzLTtKvRbBObyi9dy7cQ5Q+iptQWTUuEKI ++W7b8f8/Iiin4JJZGpFuhQSx0ARjT0fuYNXddmz1L3Gu3sjzN1GvT2T3GlpiF4/7 +ERS8Q43zjoCP8nC2MTSvdNRRoPMBg2SDS2ZIq4GSiKsKjeN+GnPCycAMeZSr5yG6 +VMBJLoAJ7XIcl3se8gF6hH1AfhCzDaK/2pKLP9jH/W4g6VvUBazKEQCNbZXSTpQ4 +8EfvJBPpplFs3Zid6iwC/WjKhFBuBBfycwNJjXG9x1fMPkBM8HeiZXgrXoUXJMEP +RF05Beo0HPPEOPIxcG6EVmnpDvs8uC+xIQ6UE6DrLGK5TnR6kdz3BDpeAehujSss +wfZiOvuJQZQl+oovOH54pcwAwhicgRcNdIX47kHrXNL1vQMYTXte+ZzDGyoOXd0W +qf1CZbrjULT9nfJFWMMicTnLM/6iQx+3bTkXXvk0qP0qAoIPqtY4rwt6yHgq937A +LubDxudMWV0hqcnH8cBCPHfWdE4HELw4RcVXmQH43yHs1gwShyG9rTS+PCKoRr8u +bpssW6J0xJmilf1KprbNWJyof9i0CtSVOlUt6ttoinwqj8Me01dHqQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html new file mode 100644 index 0000000..6e871d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html @@ -0,0 +1,10 @@ + + + SSL test + + +

+ Verification succeeded? +

+ + diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb new file mode 100644 index 0000000..0c47520 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb @@ -0,0 +1,22 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', '..', 'lib')) +require 'httpclient' + +url = ARGV.shift || 'https://localhost:8808/' +uri = URI.parse(url) + +#ca_file = "0cert.pem" +#crl_file = '0crl.pem' + +# create CA's cert in pem format and run 'c_rehash' in trust_certs dir. before +# using this. +ca_path = File.join(File.dirname(File.expand_path(__FILE__)), "trust_certs") + +proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] || nil +h = HTTPClient.new(proxy) +#h.ssl_config.add_trust_ca(ca_file) +#h.ssl_config.add_crl(crl_file) +h.ssl_config.add_trust_ca(ca_path) + +print h.get_content(url) diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb new file mode 100644 index 0000000..1994f58 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby + +require 'webrick/https' +require 'getopts' + +getopts nil, 'r:', 'p:8808' + +dir = File::dirname(File::expand_path(__FILE__)) + +# Pass phrase of '1000key.pem' is '1000'. +data = open(File::join(dir, "1000key.pem")){|io| io.read } +pkey = OpenSSL::PKey::RSA.new(data) +data = open(File::join(dir, "1000cert.pem")){|io| io.read } +cert = OpenSSL::X509::Certificate.new(data) + +s = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Port => $OPT_p.to_i, + :Logger => nil, + :DocumentRoot => $OPT_r || File::join(dir, "/htdocs"), + :SSLEnable => true, + :SSLVerifyClient => ::OpenSSL::SSL::VERIFY_NONE, + :SSLCertificate => cert, + :SSLPrivateKey => pkey, + :SSLCertName => nil, + :SSLCACertificateFile => "all.pem" +) +trap("INT"){ s.shutdown } +s.start diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/stream.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/stream.rb new file mode 100644 index 0000000..3d1478b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/stream.rb @@ -0,0 +1,21 @@ +$:.unshift(File.join('..', 'lib')) +require "httpclient" + +c = HTTPClient.new + +piper, pipew = IO.pipe +conn = c.post_async("http://localhost:8080/stream", piper) + +Thread.new do + res = conn.pop + while str = res.content.read(10) + p str + end +end + +p "type here" +while line = STDIN.gets + pipew << line +end +pipew.close +sleep 5 diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/thread.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/thread.rb new file mode 100644 index 0000000..594cc97 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/thread.rb @@ -0,0 +1,27 @@ +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +urlstr = ARGV.shift + +proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] +h = HTTPClient.new(proxy) + +count = 20 + +res = [] +g = [] +for i in 0..count + g << Thread.new { + res[i] = h.get(urlstr) + } +end + +g.each do |th| + th.join +end + +for i in 0..(count - 1) + raise unless (res[i].content == res[i + 1].content) +end + +puts 'ok' diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/wcat.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/wcat.rb new file mode 100644 index 0000000..0781b6e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/sample/wcat.rb @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby + +# wcat for http-access2 +# Copyright (C) 2001 TAKAHASHI Masayoshi + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +if ENV['HTTP_PROXY'] + h = HTTPClient.new(ENV['HTTP_PROXY']) +else + h = HTTPClient.new() +end + +while urlstr = ARGV.shift + response = h.get(urlstr){ |data| + print data + } + p response.contenttype + p response.peer_cert if /^https/i =~ urlstr +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca-chain.pem b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca-chain.pem new file mode 100644 index 0000000..c642a41 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca-chain.pem @@ -0,0 +1,44 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIBADANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDIzMloXDTM2MDEyMjAwNDIzMlowPDELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQswCQYDVQQDDAJDQTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANbv0x42BTKFEQOE+KJ2XmiSdZpR +wjzQLAkPLRnLB98tlzs4xo+y4RyY/rd5TT9UzBJTIhP8CJi5GbS1oXEerQXB3P0d +L5oSSMwGGyuIzgZe5+vZ1kgzQxMEKMMKlzA73rbMd4Jx3u5+jdbP0EDrPYfXSvLY +bS04n2aX7zrN3x5KdDrNBfwBio2/qeaaj4+9OxnwRvYP3WOvqdW0h329eMfHw0pi +JI0drIVdsEqClUV4pebT/F+CPUPkEh/weySgo9wANockkYu5ujw2GbLFcO5LXxxm +dEfcVr3r6t6zOA4bJwL0W/e6LBcrwiG/qPDFErhwtgTLYf6Er67SzLyA66UCAwEA +AaOB3DCB2TAPBgNVHRMBAf8EBTADAQH/MDEGCWCGSAGG+EIBDQQkFiJSdWJ5L09w +ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBRJ7Xd380KzBV7f +USKIQ+O/vKbhDzAOBgNVHQ8BAf8EBAMCAQYwZAYDVR0jBF0wW4AUSe13d/NCswVe +31EiiEPjv7ym4Q+hQKQ+MDwxCzAJBgNVBAYMAkpQMRIwEAYDVQQKDAlKSU4uR1Iu +SlAxDDAKBgNVBAsMA1JSUjELMAkGA1UEAwwCQ0GCAQAwDQYJKoZIhvcNAQEFBQAD +ggEBAIu/mfiez5XN5tn2jScgShPgHEFJBR0BTJBZF6xCk0jyqNx/g9HMj2ELCuK+ +r/Y7KFW5c5M3AQ+xWW0ZSc4kvzyTcV7yTVIwj2jZ9ddYMN3nupZFgBK1GB4Y05GY +MJJFRkSu6d/Ph5ypzBVw2YMT/nsOo5VwMUGLgS7YVjU+u/HNWz80J3oO17mNZllj +PvORJcnjwlroDnS58KoJ7GDgejv3ESWADvX1OHLE4cRkiQGeLoEU4pxdCxXRqX0U +PbwIkZN9mXVcrmPHq8MWi4eC/V7hnbZETMHuWhUoiNdOEfsAXr3iP4KjyyRdwc7a +d/xgcK06UVQRL/HbEYGiQL056mc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDaDCCAlCgAwIBAgIBATANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDMyN1oXDTM1MDEyMjAwNDMyN1owPzELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQ4wDAYDVQQDDAVTdWJDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0Ou7AyRcRXnB/kVHv/6kwe +ANzgg/DyJfsAUqW90m7Lu1nqyug8gK0RBd77yU0w5HOAMHTVSdpjZK0g2sgx4Mb1 +d/213eL9TTl5MRVEChTvQr8q5DVG/8fxPPE7fMI8eOAzd98/NOAChk+80r4Sx7fC +kGVEE1bKwY1MrUsUNjOY2d6t3M4HHV3HX1V8ShuKfsHxgCmLzdI8U+5CnQedFgkm +3e+8tr8IX5RR1wA1Ifw9VadF7OdI/bGMzog/Q8XCLf+WPFjnK7Gcx6JFtzF6Gi4x +4dp1Xl45JYiVvi9zQ132wu8A1pDHhiNgQviyzbP+UjcB/tsOpzBQF8abYzgEkWEC +AwEAAaNyMHAwDwYDVR0TAQH/BAUwAwEB/zAxBglghkgBhvhCAQ0EJBYiUnVieS9P +cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUlCjXWLsReYzH +LzsxwVnCXmKoB/owCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCJ/OyN +rT8Cq2Y+G2yA/L1EMRvvxwFBqxavqaqHl/6rwsIBFlB3zbqGA/0oec6MAVnYynq4 +c4AcHTjx3bQ/S4r2sNTZq0DH4SYbQzIobx/YW8PjQUJt8KQdKMcwwi7arHP7A/Ha +LKu8eIC2nsUBnP4NhkYSGhbmpJK+PFD0FVtD0ZIRlY/wsnaZNjWWcnWF1/FNuQ4H +ySjIblqVQkPuzebv3Ror6ZnVDukn96Mg7kP4u6zgxOeqlJGRe1M949SS9Vudjl8X +SF4aZUUB9pQGhsqQJVqaz2OlhGOp9D0q54xko/rekjAIcuDjl1mdX4F2WRrzpUmZ +uY/bPeOBYiVsOYVe +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca.cert b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca.cert new file mode 100644 index 0000000..bcabbee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/ca.cert @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIBADANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDIzMloXDTM2MDEyMjAwNDIzMlowPDELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQswCQYDVQQDDAJDQTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANbv0x42BTKFEQOE+KJ2XmiSdZpR +wjzQLAkPLRnLB98tlzs4xo+y4RyY/rd5TT9UzBJTIhP8CJi5GbS1oXEerQXB3P0d +L5oSSMwGGyuIzgZe5+vZ1kgzQxMEKMMKlzA73rbMd4Jx3u5+jdbP0EDrPYfXSvLY +bS04n2aX7zrN3x5KdDrNBfwBio2/qeaaj4+9OxnwRvYP3WOvqdW0h329eMfHw0pi +JI0drIVdsEqClUV4pebT/F+CPUPkEh/weySgo9wANockkYu5ujw2GbLFcO5LXxxm +dEfcVr3r6t6zOA4bJwL0W/e6LBcrwiG/qPDFErhwtgTLYf6Er67SzLyA66UCAwEA +AaOB3DCB2TAPBgNVHRMBAf8EBTADAQH/MDEGCWCGSAGG+EIBDQQkFiJSdWJ5L09w +ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBRJ7Xd380KzBV7f +USKIQ+O/vKbhDzAOBgNVHQ8BAf8EBAMCAQYwZAYDVR0jBF0wW4AUSe13d/NCswVe +31EiiEPjv7ym4Q+hQKQ+MDwxCzAJBgNVBAYMAkpQMRIwEAYDVQQKDAlKSU4uR1Iu +SlAxDDAKBgNVBAsMA1JSUjELMAkGA1UEAwwCQ0GCAQAwDQYJKoZIhvcNAQEFBQAD +ggEBAIu/mfiez5XN5tn2jScgShPgHEFJBR0BTJBZF6xCk0jyqNx/g9HMj2ELCuK+ +r/Y7KFW5c5M3AQ+xWW0ZSc4kvzyTcV7yTVIwj2jZ9ddYMN3nupZFgBK1GB4Y05GY +MJJFRkSu6d/Ph5ypzBVw2YMT/nsOo5VwMUGLgS7YVjU+u/HNWz80J3oO17mNZllj +PvORJcnjwlroDnS58KoJ7GDgejv3ESWADvX1OHLE4cRkiQGeLoEU4pxdCxXRqX0U +PbwIkZN9mXVcrmPHq8MWi4eC/V7hnbZETMHuWhUoiNdOEfsAXr3iP4KjyyRdwc7a +d/xgcK06UVQRL/HbEYGiQL056mc= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client-pass.key b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client-pass.key new file mode 100644 index 0000000..c332d2a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client-pass.key @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,DE0F454B166A4941 + +Kub+uiaDkZAmUP2P1VKKB1tPcoJ/ZSs5sLckVv156XDfH+6OilEh+E4vXuKkJnW7 +KFVM/nKrKPxLtNmKha0yx2bqZeUfUdpwq1GqTve84v/oJDTOhBXPlKlkMvzzVhdC +IeJ61BgSt4ZVWSAcorae8yvDtUCtVoc0YonuiEno5bjEOWMuOu9iwviDIO+IePdY +mgIPkEyPQOY6/Ir3ImLdqmpPfVPnNxx5fIw9VXDfTqWfY3qHnGECx17ko4PCxhkN +IwnXU8E6r6XRpHV58t7JkM88eD0crpQpZ8Ki1zVPtHq8DfQLwQI+FGt6PBmeneVl +Dne6UPIaEDpd9f5X+Q7+2jZCBOsGntNh4+E7AwnG+G4IpleUG308DWsXZZpYhfLy +12WMzDlsaQ68qgO1a7rD+nOpIgUfIl7bdB242g7gWvXyVzZOGJIg/P3Fl6ydR7Al +afAQFH2L1YH7u9zJLIonMmVRz7VNUHwlVaPE18VGBbzwFOmZHj2THUUB3cOGfsC8 +FgQz0JVZT5t7fAS53KRXhH/mWEimcrKSvZJxOBwoknQDtHS517wMhyUco63UYEQq +2nkW6BD08Qc92xu14hWuWrActTtsJ3wyGSPMYbqo5QRvlnpaEzaQlMRXdBHYbSFJ +D5Eo2nXXqNPX7YbyIHh+cda80r9OwmH/gvXThQd79pMvNHPZ2TWnrlZF7YAdVxHH +etLrAVas2AxXs2LdhwFTI6dmxMv92gYz/WwMeZaNV7SJ4JIKHxGCmajv12cnGVh9 +qCxMIFcpISr3EMwEAnF0npfQ6Xp6rKFUXuEml036vE8= +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.cert b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.cert new file mode 100644 index 0000000..ad13c4b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKDCCAhCgAwIBAgIBAjANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMTAzMTQ1OFoXDTM1MDEyMzAzMTQ1OFowZTELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMRAwDgYDVQQDDAdleGFtcGxl +MSIwIAYJKoZIhvcNAQkBDBNleGFtcGxlQGV4YW1wbGUub3JnMIGfMA0GCSqGSIb3 +DQEBAQUAA4GNADCBiQKBgQDRWssrK8Gyr+500hpLjCGR3+AHL8/hEJM5zKi/MgLW +jTkvsgOwbYwXOiNtAbR9y4/ucDq7EY+cMUMHES4uFaPTcOaAV0aZRmk8AgslN1tQ +gNS6ew7/Luq3DcVeWkX8PYgR9VG0mD1MPfJ6+IFA5d3vKpdBkBgN4l46jjO0/2Xf +ewIDAQABo4GPMIGMMAwGA1UdEwEB/wQCMAAwMQYJYIZIAYb4QgENBCQWIlJ1Ynkv +T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFOFvay0H7lr2 +xUx6waYEV2bVDYQhMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYI +KwYBBQUHAwQwDQYJKoZIhvcNAQEFBQADggEBABd2dYWqbDIWf5sWFvslezxJv8gI +w64KCJBuyJAiDuf+oazr3016kMzAlt97KecLZDusGNagPrq02UX7YMoQFsWJBans +cDtHrkM0al5r6/WGexNMgtYbNTYzt/IwodISGBgZ6dsOuhznwms+IBsTNDAvWeLP +lt2tOqD8kEmjwMgn0GDRuKjs4EoboA3kMULb1p9akDV9ZESU3eOtpS5/G5J5msLI +9WXbYBjcjvkLuJH9VsJhb+R58Vl0ViemvAHhPilSl1SPWVunGhv6FcIkdBEi1k9F +e8BNMmsEjFiANiIRvpdLRbiGBt0KrKTndVfsmoKCvY48oCOvnzxtahFxfs8= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.key b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.key new file mode 100644 index 0000000..37bc62f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/client.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDRWssrK8Gyr+500hpLjCGR3+AHL8/hEJM5zKi/MgLWjTkvsgOw +bYwXOiNtAbR9y4/ucDq7EY+cMUMHES4uFaPTcOaAV0aZRmk8AgslN1tQgNS6ew7/ +Luq3DcVeWkX8PYgR9VG0mD1MPfJ6+IFA5d3vKpdBkBgN4l46jjO0/2XfewIDAQAB +AoGAZcz8llWErtsV3QB9gNb3S/PNADGjqBFjReva8n3jG2k4sZSibpwWTwUaTNtT +ZQgjSRKRvH1hk9XwffNAvXAQZNNkuj/16gO2oO45nyLj4dO365ujLptWnVIWDHOE +uN0GeiZO+VzcCisT0WCq4tvtLeH8svrxzA8cbXIEyOK7NiECQQDwo2zPFyKAZ/Cu +lDJ6zKT+RjfWwW7DgWzirAlTrt4ViMaW+IaDH29TmQpb4V4NuR3Xi+2Xl4oicu6S +36TW9+/FAkEA3rgfOQJuLlWSnw1RTGwvnC816a/W7iYYY7B+0U4cDbfWl7IoXT4y +M8nV/HESooviZLqBwzAYSoj3fFKYBKpGPwJAUO8GN5iWWA2dW3ooiDiv/X1sZmRk +dojfMFWgRW747tEzya8Ivq0h6kH8w+5GjeMG8Gn1nRiwsulo6Ckj7dEx6QJACyui +7UIQ8qP6GZ4aYMHgVW4Mvy7Bkeo5OO7GPYs0Xv/EdJFL8vlGnVBXOjUVoS9w6Gpu +TbLg1QQvnX2rADjmEwJANxZO2GUkaWGsEif8aGW0x5g/IdaMGG27pTWk5zqix7P3 +1UDrdo/JOXhptovhRi06EppIxAxYmbh9vd9VN8Itlw== +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/helper.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/helper.rb new file mode 100644 index 0000000..26bc4f9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/helper.rb @@ -0,0 +1,131 @@ +# -*- encoding: utf-8 -*- +begin + require 'simplecov' + require 'simplecov-rcov' + SimpleCov.formatter = SimpleCov::Formatter::RcovFormatter + SimpleCov.start +rescue LoadError +end +require 'test/unit' + +require 'httpclient' +require 'webrick' +require 'webrick/httpproxy.rb' +require 'logger' +require 'stringio' +require 'cgi' +require 'webrick/httputils' + + +module Helper + Port = 17171 + ProxyPort = 17172 + + def serverport + @serverport + end + + def proxyport + @proxyport + end + + def serverurl + "http://localhost:#{serverport}/" + end + + def proxyurl + "http://localhost:#{proxyport}/" + end + + def setup + @logger = Logger.new(STDERR) + @logger.level = Logger::Severity::FATAL + @proxyio = StringIO.new + @proxylogger = Logger.new(@proxyio) + @proxylogger.level = Logger::Severity::DEBUG + @server = @proxyserver = @client = nil + @server_thread = @proxyserver_thread = nil + @serverport = Port + @proxyport = ProxyPort + end + + def teardown + teardown_client if @client + teardown_proxyserver if @proxyserver + teardown_server if @server + end + + def setup_client + @client = HTTPClient.new + end + + def escape_noproxy + backup = HTTPClient::NO_PROXY_HOSTS.dup + HTTPClient::NO_PROXY_HOSTS.clear + yield + ensure + HTTPClient::NO_PROXY_HOSTS.replace(backup) + end + + def setup_proxyserver + @proxyserver = WEBrick::HTTPProxyServer.new( + :BindAddress => "localhost", + :Logger => @proxylogger, + :Port => 0, + :AccessLog => [] + ) + @proxyport = @proxyserver.config[:Port] + @proxyserver_thread = start_server_thread(@proxyserver) + end + + def teardown_client + @client.reset_all + end + + def teardown_server + @server.shutdown + #@server_thread.kill + end + + def teardown_proxyserver + @proxyserver.shutdown + #@proxyserver_thread.kill + end + + def start_server_thread(server) + t = Thread.new { + Thread.current.abort_on_exception = true + server.start + } + while server.status != :Running + Thread.pass + unless t.alive? + t.join + raise + end + end + t + end + + def params(str) + HTTP::Message.parse(str).inject({}) { |r, (k, v)| r[k] = v.first; r } + end + + def silent + begin + back, $VERBOSE = $VERBOSE, nil + yield + ensure + $VERBOSE = back + end + end + + def escape_env + env = {} + env.update(ENV) + yield + ensure + ENV.clear + ENV.update(env) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htdigest b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htdigest new file mode 100644 index 0000000..0a125d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htdigest @@ -0,0 +1 @@ +admin:auth:4302fe65caa32f27721949149ccd3083 diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htpasswd b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htpasswd new file mode 100644 index 0000000..70df50c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/htpasswd @@ -0,0 +1,2 @@ +admin:Qg266hq/YYKe2 +guest:gbPc4vPCH.h12 diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb new file mode 100644 index 0000000..560a1c6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb @@ -0,0 +1,32 @@ +require File.expand_path('helper', File.join(File.dirname(__FILE__), "..")) + + +class PEMUtilsTest < Test::Unit::TestCase + include Helper + + def setup + @raw_cert = "-----BEGIN CERTIFICATE-----\nMIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjEzNFoXDTE3MDgxMDE3MjEzNFowSzETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEZMBcGA1UEAwwQ\nUnVieSBjZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB\nAJCfsSXpSMpmZCVa+ZCM+QDgomnhDlvnrGDq6pasTaIspGTXgws+7r8Dt/cNe6EH\nHJpRH2cGRiO4yPcfcT9eS4X7k8OC4f33wHfACOmLu6LeoNE8ujmSk6L6WzLUI+sE\nnLZbFrXxoAo4XHsm8vEG9C+jEoXZ1p+47wrAGaDwDQTnzlMy4dT9pRQEJP2G/Rry\nUkuZn8SUWmh3/YS78iaSzsNF1cgE1ealHOrPPFDjiCGDaH/LHyUPYlbFSLZ/B7Qx\nLxi5sePLcywWq/EJrmWpgeVTDjtNijsdKv/A3qkY+fm/oD0pzt7XsfJaP9YKNyJO\nQFdxWZeiPcDF+Hwf+IwSr+kCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgeAMB0GA1Ud\nDgQWBBQNvzYzJyXemGhxbA8NMXLolDnPyjANBgkqhkiG9w0BAQsFAAOCAQEARIJV\noKejGlOTn71QutnNnu07UtTu0IHs6YqjYzzND+m4JXLN+wvYm72AFUG0b1L7dRg0\niK8XjQrlNQNVqP1Mc6tffchy20neOPOHeiO6qTdRU8P2S8D3Uwe+1qhgxjfE+cWc\nwZmWxYK4HA8c58PxWMqrkr2QqXDplG9KWLvOgrtPGiLLZcQSKhvvB63QzItHBDU6\nRayiJY3oPkK/HrIvFlySqFqzWmuyknkciOFywEHQMz/tcSFJ2QFpPj/tBz9VXohH\nZ8KscmfhZrTPBjo+ky1lz/WraWoz4LMiLnkC2ABczWLRSawu+v3Irx1NFJngt05e\npqwtqIUeg7j+JLiTaA==\n-----END CERTIFICATE-----" + end + + def test_read_certificate + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(@raw_cert) + end + end + + def test_read_certificate_works_with_random_ascii_text_outside_begin_end + raw_cert_with_ascii = "some text before begin\n" + @raw_cert + "\nsome text after end" + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(raw_cert_with_ascii) + end + end + + def test_read_certificate_uses_all_content_if_missing_begin_end + cert = @raw_cert.sub(/-----BEGIN CERTIFICATE-----/, '').sub(/-----END CERTIFICATE-----/, '') + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(cert) + end + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/runner.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/runner.rb new file mode 100644 index 0000000..a07c28e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/runner.rb @@ -0,0 +1,2 @@ +require 'test/unit' +exit Test::Unit::AutoRunner.run(true, File.dirname($0)) diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.cert b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.cert new file mode 100644 index 0000000..998ccc5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/zCCAeegAwIBAgIBATANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxDjAMBgNVBAMMBVN1YkNB +MB4XDTA0MDEzMTAzMTMxNloXDTMzMDEyMzAzMTMxNlowQzELMAkGA1UEBgwCSlAx +EjAQBgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMRIwEAYDVQQDDAlsb2Nh +bGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANFJTxWqup3nV9dsJAku +p+WaXnPNIzcpAA3qMGZDJTJsfa8Du7ZxTP0XJK5mETttBrn711cJxAuP3KjqnW9S +vtZ9lY2sXJ6Zj62sN5LwG3VVe25dI28yR1EsbHjJ5Zjf9tmggMC6am52dxuHbt5/ +vHo4ngJuKE/U+eeGRivMn6gFAgMBAAGjgYUwgYIwDAYDVR0TAQH/BAIwADAxBglg +hkgBhvhCAQ0EJBYiUnVieS9PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAd +BgNVHQ4EFgQUpZIyygD9JxFYHHOTEuWOLbCKfckwCwYDVR0PBAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBBQUAA4IBAQBwAIj5SaBHaA5X31IP +CFCJiep96awfp7RANO0cuUj+ZpGoFn9d6FXY0g+Eg5wAkCNIzZU5NHN9xsdOpnUo +zIBbyTfQEPrge1CMWMvL6uGaoEXytq84VTitF/xBTky4KtTn6+es4/e7jrrzeUXQ +RC46gkHObmDT91RkOEGjHLyld2328jo3DIN/VTHIryDeVHDWjY5dENwpwdkhhm60 +DR9IrNBbXWEe9emtguNXeN0iu1ux0lG1Hc6pWGQxMlRKNvGh0yZB9u5EVe38tOV0 +jQaoNyL7qzcQoXD3Dmbi1p0iRmg/+HngISsz8K7k7MBNVsSclztwgCzTZOBiVtkM +rRlQ +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.key b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.key new file mode 100644 index 0000000..9ba2218 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/server.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDRSU8Vqrqd51fXbCQJLqflml5zzSM3KQAN6jBmQyUybH2vA7u2 +cUz9FySuZhE7bQa5+9dXCcQLj9yo6p1vUr7WfZWNrFyemY+trDeS8Bt1VXtuXSNv +MkdRLGx4yeWY3/bZoIDAumpudncbh27ef7x6OJ4CbihP1PnnhkYrzJ+oBQIDAQAB +AoGBAIf4CstW2ltQO7+XYGoex7Hh8s9lTSW/G2vu5Hbr1LTHy3fzAvdq8MvVR12O +rk9fa+lU9vhzPc0NMB0GIDZ9GcHuhW5hD1Wg9OSCbTOkZDoH3CAFqonjh4Qfwv5W +IPAFn9KHukdqGXkwEMdErsUaPTy9A1V/aROVEaAY+HJgq/eZAkEA/BP1QMV04WEZ +Oynzz7/lLizJGGxp2AOvEVtqMoycA/Qk+zdKP8ufE0wbmCE3Qd6GoynavsHb6aGK +gQobb8zDZwJBANSK6MrXlrZTtEaeZuyOB4mAmRzGzOUVkUyULUjEx2GDT93ujAma +qm/2d3E+wXAkNSeRpjUmlQXy/2oSqnGvYbMCQQDRM+cYyEcGPUVpWpnj0shrF/QU +9vSot/X1G775EMTyaw6+BtbyNxVgOIu2J+rqGbn3c+b85XqTXOPL0A2RLYkFAkAm +syhSDtE9X55aoWsCNZY/vi+i4rvaFoQ/WleogVQAeGVpdo7/DK9t9YWoFBIqth0L +mGSYFu9ZhvZkvQNV8eYrAkBJ+rOIaLDsmbrgkeDruH+B/9yrm4McDtQ/rgnOGYnH +LjLpLLOrgUxqpzLWe++EwSLwK2//dHO+SPsQJ4xsyQJy +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/sslsvr.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/sslsvr.rb new file mode 100644 index 0000000..b1f4614 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/sslsvr.rb @@ -0,0 +1,65 @@ +require 'webrick/https' +require 'logger' +require 'rbconfig' + +PORT = 17171 +DIR = File.dirname(File.expand_path(__FILE__)) + +def cert(filename) + OpenSSL::X509::Certificate.new(File.open(File.join(DIR, filename)) { |f| + f.read + }) +end + +def key(filename) + OpenSSL::PKey::RSA.new(File.open(File.join(DIR, filename)) { |f| + f.read + }) +end + +def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" +end + +logger = Logger.new(STDERR) +logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + +server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => PORT, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key'), + :SSLVerifyClient => nil, #OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT|OpenSSL::SSL::VERIFY_PEER, + :SSLClientCA => cert('ca.cert'), + :SSLCertName => nil +) +trap(:INT) do + server.shutdown +end +[:hello].each do |sym| + server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) +end + +t = Thread.new { + Thread.current.abort_on_exception = true + server.start +} +while server.status != :Running + sleep 0.1 + unless t.alive? + t.join + raise + end +end +STDOUT.sync = true +puts $$ +t.join diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/subca.cert b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/subca.cert new file mode 100644 index 0000000..1e47185 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/subca.cert @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaDCCAlCgAwIBAgIBATANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDMyN1oXDTM1MDEyMjAwNDMyN1owPzELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQ4wDAYDVQQDDAVTdWJDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0Ou7AyRcRXnB/kVHv/6kwe +ANzgg/DyJfsAUqW90m7Lu1nqyug8gK0RBd77yU0w5HOAMHTVSdpjZK0g2sgx4Mb1 +d/213eL9TTl5MRVEChTvQr8q5DVG/8fxPPE7fMI8eOAzd98/NOAChk+80r4Sx7fC +kGVEE1bKwY1MrUsUNjOY2d6t3M4HHV3HX1V8ShuKfsHxgCmLzdI8U+5CnQedFgkm +3e+8tr8IX5RR1wA1Ifw9VadF7OdI/bGMzog/Q8XCLf+WPFjnK7Gcx6JFtzF6Gi4x +4dp1Xl45JYiVvi9zQ132wu8A1pDHhiNgQviyzbP+UjcB/tsOpzBQF8abYzgEkWEC +AwEAAaNyMHAwDwYDVR0TAQH/BAUwAwEB/zAxBglghkgBhvhCAQ0EJBYiUnVieS9P +cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUlCjXWLsReYzH +LzsxwVnCXmKoB/owCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCJ/OyN +rT8Cq2Y+G2yA/L1EMRvvxwFBqxavqaqHl/6rwsIBFlB3zbqGA/0oec6MAVnYynq4 +c4AcHTjx3bQ/S4r2sNTZq0DH4SYbQzIobx/YW8PjQUJt8KQdKMcwwi7arHP7A/Ha +LKu8eIC2nsUBnP4NhkYSGhbmpJK+PFD0FVtD0ZIRlY/wsnaZNjWWcnWF1/FNuQ4H +ySjIblqVQkPuzebv3Ror6ZnVDukn96Mg7kP4u6zgxOeqlJGRe1M949SS9Vudjl8X +SF4aZUUB9pQGhsqQJVqaz2OlhGOp9D0q54xko/rekjAIcuDjl1mdX4F2WRrzpUmZ +uY/bPeOBYiVsOYVe +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_auth.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_auth.rb new file mode 100644 index 0000000..ab5cd62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_auth.rb @@ -0,0 +1,492 @@ +require File.expand_path('helper', File.dirname(__FILE__)) +require 'digest/md5' +require 'rack' +require 'rack/lint' +require 'rack-ntlm' + +class TestAuth < Test::Unit::TestCase + include Helper + + def setup + super + setup_server + end + + def teardown + super + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + @server.mount( + '/basic_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_basic_auth).to_proc) + ) + @server.mount( + '/digest_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_digest_auth).to_proc) + ) + @server.mount( + '/digest_sess_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_digest_sess_auth).to_proc) + ) + # NTLM endpoint + ntlm_handler = Rack::Handler::WEBrick.new(@server, + Rack::Builder.app do + use Rack::ShowExceptions + use Rack::ContentLength + use Rack::Ntlm, {:uri_pattern => /.*/, :auth => {:username => "admin", :password => "admin"}} + run lambda { |env| [200, { 'Content-Type' => 'text/html' }, ['ntlm_auth OK']] } + end + ) + @server.mount( + '/ntlm_auth', + WEBrick::HTTPServlet::ProcHandler.new(Proc.new do |req, res| + ntlm_handler.service(req, res) + end) + ) + # Htpasswd + htpasswd = File.join(File.dirname(__FILE__), 'htpasswd') + htpasswd_userdb = WEBrick::HTTPAuth::Htpasswd.new(htpasswd) + htdigest = File.join(File.dirname(__FILE__), 'htdigest') + htdigest_userdb = WEBrick::HTTPAuth::Htdigest.new(htdigest) + @basic_auth = WEBrick::HTTPAuth::BasicAuth.new( + :Logger => @logger, + :Realm => 'auth', + :UserDB => htpasswd_userdb + ) + @digest_auth = WEBrick::HTTPAuth::DigestAuth.new( + :Logger => @logger, + :Algorithm => 'MD5', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + @digest_sess_auth = WEBrick::HTTPAuth::DigestAuth.new( + :Logger => @logger, + :Algorithm => 'MD5-sess', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + @server_thread = start_server_thread(@server) + + @proxy_digest_auth = WEBrick::HTTPAuth::ProxyDigestAuth.new( + :Logger => @proxylogger, + :Algorithm => 'MD5', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + + @proxyserver = WEBrick::HTTPProxyServer.new( + :ProxyAuthProc => @proxy_digest_auth.method(:authenticate).to_proc, + :BindAddress => "localhost", + :Logger => @proxylogger, + :Port => 0, + :AccessLog => [] + ) + @proxyport = @proxyserver.config[:Port] + @proxyserver_thread = start_server_thread(@proxyserver) + end + + def do_basic_auth(req, res) + @basic_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res.body = 'basic_auth OK' + end + + def do_digest_auth(req, res) + @digest_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res['x-query'] = req.body + res.body = 'digest_auth OK' + req.query_string.to_s + end + + def do_digest_sess_auth(req, res) + @digest_sess_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res['x-query'] = req.body + res.body = 'digest_sess_auth OK' + req.query_string.to_s + end + + # TODO: monkey patching for rack-ntlm-test-services's incompat. + module ::Net + module NTLM + # ruby-ntlm 0.3.0 -> 0.4.0 + def self.decode_utf16le(*arg) + EncodeUtil.decode_utf16le(*arg) + end + # Make it work if @value == nil + class SecurityBuffer < FieldSet + remove_method(:data_size) if method_defined?(:data_size) + def data_size + @active && @value ? @value.size : 0 + end + end + end + end + def test_ntlm_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/ntlm_auth", 'admin', 'admin') + assert_equal('ntlm_auth OK', c.get_content("http://localhost:#{serverport}/ntlm_auth")) + end + + def test_basic_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + end + + def test_basic_auth_compat + c = HTTPClient.new + c.set_basic_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + end + + def test_BASIC_auth + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + res = c.get("http://localhost:#{serverport}/basic_auth") + assert_equal('basic_auth OK', res.content) + assert_equal(200, res.status) + assert_equal(401, res.previous.status) + assert_equal(nil, res.previous.previous) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_force + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.force_basic_auth = true + c.debug_dev = str = '' + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + assert_equal('Authorization: Basic YWRtaW46YWRtaW4='.upcase, str.split(/\r?\n/)[5].upcase) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_async + # async methods don't issure retry call so for successful authentication you need to set force_basic_auth flag + c = HTTPClient.new(:force_basic_auth => true) + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + conn = c.get_async("http://localhost:#{serverport}/basic_auth") + assert_equal('basic_auth OK', conn.pop.body.read) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_nil_uri + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + c.set_auth(nil, 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + # To work this test consistently on CRuby you can to add 'Thread.pass' in + # @challenge iteration at BasicAuth#get like; + # + # return nil unless @challenge.find { |uri, ok| + # Thread.pass + # Util.uri_part_of(target_uri, uri) and ok + # } + def test_BASIC_auth_multi_thread + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + + 100.times.map { |idx| + Thread.new(idx) { |idx2| + Thread.abort_on_exception = true + Thread.pass + c.get("http://localhost:#{serverport}/basic_auth?#{idx2}") + } + }.map { |t| + t.join + } + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_basic_auth_reuses_credentials + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth/")) + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content("http://localhost:#{serverport}/basic_auth/sub/dir/") + assert_match(/Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_digest_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OK', c.get_content("http://localhost:#{serverport}/digest_auth")) + end + + def test_digest_auth_reuses_credentials + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OK', c.get_content("http://localhost:#{serverport}/digest_auth/")) + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content("http://localhost:#{serverport}/digest_auth/sub/dir/") + assert_match(/Authorization: Digest/, str) + end + + def test_digest_auth_with_block + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + called = false + c.get_content("http://localhost:#{serverport}/digest_auth") do |str| + assert_equal('digest_auth OK', str) + called = true + end + assert(called) + # + called = false + c.get("http://localhost:#{serverport}/digest_auth") do |str| + assert_equal('digest_auth OK', str) + called = true + end + assert(called) + end + + def test_digest_auth_with_post_io + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + post_body = StringIO.new("1234567890") + assert_equal('1234567890', c.post("http://localhost:#{serverport}/digest_auth", post_body).header['x-query'][0]) + # + post_body = StringIO.new("1234567890") + post_body.read(5) + assert_equal('67890', c.post("http://localhost:#{serverport}/digest_auth", post_body).header['x-query'][0]) + end + + def test_digest_auth_with_querystring + c = HTTPClient.new + c.debug_dev = STDERR if $DEBUG + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OKbar=baz', c.get_content("http://localhost:#{serverport}/digest_auth/foo?bar=baz")) + end + + def test_perfer_digest + c = HTTPClient.new + c.set_auth('http://example.com/', 'admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 401 Unauthorized\nWWW-Authenticate: Basic realm=\"foo\"\nWWW-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/^Authorization: Digest/, str) + end + + def test_digest_sess_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_sess_auth OK', c.get_content("http://localhost:#{serverport}/digest_sess_auth")) + end + + def test_proxy_auth + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Basic realm=\"foo\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_proxy_auth_force + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.force_basic_auth = true + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_proxy_auth_reuses_credentials + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Basic realm=\"foo\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.get_content('http://www1.example.com/') + c.debug_dev = str = '' + c.get_content('http://www2.example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_digest_proxy_auth_loop + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_digest_proxy_auth + c=HTTPClient.new("http://localhost:#{proxyport}/") + c.set_proxy_auth('admin', 'admin') + c.set_auth("http://127.0.0.1:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://127.0.0.1:#{serverport}/basic_auth")) + end + + def test_digest_proxy_invalid_auth + c=HTTPClient.new("http://localhost:#{proxyport}/") + c.set_proxy_auth('admin', 'wrong') + c.set_auth("http://127.0.0.1:#{serverport}/", 'admin', 'admin') + assert_raises(HTTPClient::BadResponseError) do + c.get_content("http://127.0.0.1:#{serverport}/basic_auth") + end + end + + def test_prefer_digest_to_basic_proxy_auth + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nProxy-Authenticate: Basic realm=\"bar\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_digest_proxy_auth_reuses_credentials + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.get_content('http://www1.example.com/') + c.debug_dev = str = '' + c.get_content('http://www2.example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_oauth + c = HTTPClient.new + config = HTTPClient::OAuth::Config.new( + :realm => 'http://photos.example.net/', + :consumer_key => 'dpf43f3p2l4k3l03', + :consumer_secret => 'kd94hf93k423kf44', + :token => 'nnch734d00sl2jdk', + :secret => 'pfkkdhi9sl3r4s00', + :version => '1.0', + :signature_method => 'HMAC-SHA1' + ) + config.debug_timestamp = '1191242096' + config.debug_nonce = 'kllo9940pd9333jh' + c.www_auth.oauth.set_config('http://photos.example.net/', config) + c.www_auth.oauth.challenge('http://photos.example.net/') + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://photos.example.net/photos', [[:file, 'vacation.jpg'], [:size, 'original']]) + assert(str.index(%q(GET /photos?file=vacation.jpg&size=original))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="tR3%2BTy81lMeYAr%2FFid0kMTYa%2FWM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + # + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://photos.example.net/photos?file=vacation.jpg&size=original') + assert(str.index(%q(GET /photos?file=vacation.jpg&size=original))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="tR3%2BTy81lMeYAr%2FFid0kMTYa%2FWM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + # + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.post_content('http://photos.example.net/photos', [[:file, 'vacation.jpg'], [:size, 'original']]) + assert(str.index(%q(POST /photos))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="wPkvxykrw%2BBTdCcGqKr%2B3I%2BPsiM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + end + + def test_basic_auth_post_with_multipart + retry_times = 0 + begin + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + File.open(__FILE__) do |f| + # read 'f' twice for authorization negotiation + assert_equal('basic_auth OK', c.post("http://localhost:#{serverport}/basic_auth", :file => f).content) + end + rescue Errno::ECONNRESET, HTTPClient::KeepAliveDisconnected + # TODO: WEBrick server returns ECONNRESET/EPIPE before sending Unauthorized response to client? + raise if retry_times > 5 + retry_times += 1 + sleep 1 + retry + end + end + + def test_negotiate_and_basic + c = HTTPClient.new + c.test_loopback_http_response << %Q(HTTP/1.1 401 Unauthorized\r\nWWW-Authenticate: NTLM\r\nWWW-Authenticate: Basic realm="foo"\r\nConnection: Keep-Alive\r\nContent-Length: 0\r\n\r\n) + c.test_loopback_http_response << %Q(HTTP/1.1 401 Unauthorized\r\nWWW-Authenticate: NTLM TlRMTVNTUAACAAAAAAAAACgAAAABAAAAAAAAAAAAAAA=\r\nConnection: Keep-Alive\r\nContent-Length: 0\r\n\r\n) + c.test_loopback_http_response << %Q(HTTP/1.0 200 OK\r\nConnection: Keep-Alive\r\nContent-Length: 1\r\n\r\na) + c.test_loopback_http_response << %Q(HTTP/1.0 200 OK\r\nConnection: Keep-Alive\r\nContent-Length: 1\r\n\r\nb) + c.debug_dev = str = '' + c.set_auth('http://www.example.org/', 'admin', 'admin') + # Do NTLM negotiation + c.get('http://www.example.org/foo') + # BasicAuth authenticator should not respond to it because NTLM + # negotiation has been finished. + assert_match(%r(Authorization: NTLM), str) + assert_not_match(%r(Authorization: Basic), str) + # ditto for other resource that is protected with NTLM + c.debug_dev = str = '' + c.get('http://www.example.org/foo/subdir') + assert_not_match(%r(Authorization: NTLM), str) + assert_not_match(%r(Authorization: Basic), str) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_cookie.rb new file mode 100644 index 0000000..57be1e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_cookie.rb @@ -0,0 +1,309 @@ +require 'test/unit' +require 'uri' +require 'tempfile' + +require 'httpclient/util' +require 'httpclient/cookie' + +class TestCookie < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @c = WebAgent::Cookie.new('hoge', 'funi') + end + + def test_s_new() + assert_instance_of(WebAgent::Cookie, @c) + end +end + +class TestCookieManager < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @cm = WebAgent::CookieManager.new() + end + + def teardown() + end + + def test_parse() + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2999 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2999, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse2() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + + def test_parse3() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT;Secure;HTTPOnly" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + assert_equal(true, cookie.secure?) + assert_equal(true, cookie.http_only?) + end + + def test_parse_double_semicolon() + str = "xmen=off,0,0,1;; path=\"/;;\"; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/;;", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + +# def test_make_portlist() +# assert_equal([80,8080], @cm.instance_eval{make_portlist("80,8080")}) +# assert_equal([80], @cm.instance_eval{make_portlist("80")}) +# assert_equal([80,8080,10080], @cm.instance_eval{make_portlist(" 80, 8080, 10080 \n")}) +# end + + def test_check_expired_cookies() + format = "%a, %d-%b-%Y %H:%M:%S GMT" + c1 = WebAgent::Cookie.new('hoge1', 'funi', :domain => 'http://www.example.com/', :path => '/') + c2 = WebAgent::Cookie.new('hoge2', 'funi', :domain => 'http://www.example.com/', :path => '/') + c3 = WebAgent::Cookie.new('hoge3', 'funi', :domain => 'http://www.example.com/', :path => '/') + c4 = WebAgent::Cookie.new('hoge4', 'funi', :domain => 'http://www.example.com/', :path => '/') + c1.expires = (Time.now - 100).gmtime.strftime(format) + c2.expires = (Time.now + 100).gmtime.strftime(format) + c3.expires = (Time.now - 10).gmtime.strftime(format) + c4.expires = nil + cookies = [c1,c2,c3,c4] + @cm.cookies = cookies + assert_equal(c2.name, @cm.cookies[0].name) + assert_equal(c2.expires, @cm.cookies[0].expires) + assert_equal(c4.name, @cm.cookies[1].name) + assert_equal(c4.expires, @cm.cookies[1].expires) + end + + def test_parse_expires + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=\"\"" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse_after_expiration + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2999 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2999, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + + time = Time.at(Time.now.to_i + 60).utc + expires = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT") + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=#{expires}; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(time, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_find_cookie() + str = "xmen=off,0,0,1; path=/; domain=.excite2.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite2.co.jp/")) + + str = "xmen=off,0,0,2; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite.co.jp/")) + + url = urify('http://www.excite.co.jp/hoge/funi/') + cookie_str = @cm.find(url) + assert_equal("xmen=\"off,0,0,2\"", cookie_str) + end + + def test_load_cookies() + cookiefile = Tempfile.new('test_cookie') + File.open(cookiefile.path, 'w') do |f| + f.write < same as URL + c.url = urify("http://www.inac.co.jp/hoge/hoge2/hoge3") + @cm.add(c) + # + c1, c2 = @cm.cookies + assert_equal('/hoge/hoge2/', c1.path) + assert_equal('/', c2.path) + end + + def test_keep_escaped + uri = urify('http://www.example.org') + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=\"2\"; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal('', c.value) + assert_equal('bar=', @cm.find(uri)) + + @cm.parse("bar=\"\"; path=/", uri) + c = @cm.cookies.first + assert_equal('', c.value) + assert_equal('bar=', @cm.find(uri)) + end + + def test_load_cookies_escaped + uri = urify('http://example.org/') + f = Tempfile.new('test_cookie') + File.open(f.path, 'w') do |out| + out.write <= "1.9.0" +# Rubinius 1.8 mode does not support Regexp.quote(raw, 'n') I don't want put +# a pressure on supporting it because 1.9 mode works fine. diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_http-access2.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_http-access2.rb new file mode 100644 index 0000000..732c044 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_http-access2.rb @@ -0,0 +1,508 @@ +# -*- encoding: utf-8 -*- +require 'http-access2' +require File.expand_path('helper', File.dirname(__FILE__)) +require 'tempfile' + + +module HTTPAccess2 + + +class TestClient < Test::Unit::TestCase + include Helper + include HTTPClient::Util + + def setup + super + setup_server + setup_client + end + + def teardown + super + end + + def test_initialize + setup_proxyserver + escape_noproxy do + @proxyio.string = "" + @client = HTTPAccess2::Client.new(proxyurl) + assert_equal(urify(proxyurl), @client.proxy) + assert_equal(200, @client.head(serverurl).status) + assert(!@proxyio.string.empty?) + end + end + + def test_agent_name + @client = HTTPAccess2::Client.new(nil, "agent_name_foo") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^User-Agent: agent_name_foo/, lines[4]) + end + + def test_from + @client = HTTPAccess2::Client.new(nil, nil, "from_bar") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^From: from_bar/, lines[5]) + end + + def test_debug_dev + str = "" + @client.debug_dev = str + assert(str.empty?) + @client.get(serverurl) + assert(!str.empty?) + end + + def _test_protocol_version_http09 + @client.protocol_version = 'HTTP/0.9' + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/0.9", lines[3]) + assert_equal("Connection: close", lines[5]) + assert_equal("= Response", lines[6]) + assert_match(/^hello/, lines[7]) + end + + def test_protocol_version_http10 + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/1.0", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + end + + def test_protocol_version_http11 + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + @client.protocol_version = 'HTTP/1.1' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.0", lines[3]) + end + + def test_proxy + setup_proxyserver + escape_noproxy do + begin + @client.proxy = "http://あ" + rescue => e + assert_match(/InvalidURIError/, e.class.to_s) + end + @client.proxy = "" + assert_nil(@client.proxy) + @client.proxy = "http://foo:1234" + assert_equal(urify("http://foo:1234"), @client.proxy) + uri = urify("http://bar:2345") + @client.proxy = uri + assert_equal(uri, @client.proxy) + # + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(@proxyio.string.empty?) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(!@proxyio.string.empty?) + end + end + + def test_noproxy_for_localhost + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(@proxyio.string.empty?) + end + + def test_no_proxy + setup_proxyserver + escape_noproxy do + # proxy is not set. + @client.no_proxy = 'localhost' + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:baz' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:443' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = "foobar,localhost:443:localhost:#{serverport},baz" + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + end + + def test_get_content + assert_equal('hello', @client.get_content(serverurl + 'hello')) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + assert_equal('hello', @client.get_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::Session::BadResponse) do + @client.get_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::Session::BadResponse) do + @client.get_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_post_content + assert_equal('hello', @client.post_content(serverurl + 'hello')) + assert_equal('hello', @client.post_content(serverurl + 'redirect1')) + assert_equal('hello', @client.post_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::Session::BadResponse) do + @client.post_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::Session::BadResponse) do + @client.post_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.post_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_head + assert_equal("head", @client.head(serverurl + 'servlet').header["x-head"][0]) + param = {'1'=>'2', '3'=>'4'} + res = @client.head(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get + assert_equal("get", @client.get(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post + assert_equal("post", @client.post(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_put + assert_equal("put", @client.put(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_delete + assert_equal("delete", @client.delete(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_options + assert_equal("options", @client.options(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace + assert_equal("trace", @client.trace(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_query + assert_equal({'1'=>'2'}, check_query_get({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_get({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_get({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_get({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_get([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_get('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_get('12+3=45&+=+')) + assert_equal({}, check_query_get('')) + end + + def test_post_body + assert_equal({'1'=>'2'}, check_query_post({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_post({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_post({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_post({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_post([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_post('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_post('12+3=45&+=+')) + assert_equal({}, check_query_post('')) + # + post_body = StringIO.new("foo=bar&foo=baz") + assert_equal( + ["bar", "baz"], + check_query_post(post_body)["foo"].to_ary.sort + ) + end + + def test_extra_headers + str = "" + @client.debug_dev = str + @client.head(serverurl, nil, {"ABC" => "DEF"}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + # + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, [["ABC", "DEF"], ["ABC", "DEF"]]) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + assert_match("ABC: DEF", lines[5]) + end + + def test_timeout + assert_equal(60, @client.connect_timeout) + assert_equal(120, @client.send_timeout) + assert_equal(60, @client.receive_timeout) + end + + def test_connect_timeout + # ToDo + end + + def test_send_timeout + # ToDo + end + + def test_receive_timeout + # this test takes 2 sec + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=2')) + @client.reset_all + @client.receive_timeout = 1 + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=0')) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(serverurl + 'sleep?sec=2') + end + @client.reset_all + @client.receive_timeout = 3 + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=2')) + end + + def test_cookies + cookiefile = Tempfile.new('test_cookies_file') + # from [ruby-talk:164079] + File.open(cookiefile.path, "wb") do |f| + f << "http://rubyforge.org//account/login.php session_ser LjEwMy45Ni40Ni0q%2A-fa0537de8cc31 2131676286 .rubyforge.org / 13\n" + end + cm = WebAgent::CookieManager::new(cookiefile.path) + cm.load_cookies + assert_equal(1, cm.cookies.size) + end + +private + + def check_query_get(query) + WEBrick::HTTPUtils.parse_query( + @client.get(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def check_query_post(query) + WEBrick::HTTPUtils.parse_query( + @client.post(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + [:hello, :sleep, :redirect1, :redirect2, :redirect3, :redirect_self, :relative_redirect].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server.mount('/servlet', TestServlet.new(@server)) + @server_thread = start_server_thread(@server) + end + + def escape_noproxy + backup = HTTPAccess2::Client::NO_PROXY_HOSTS.dup + HTTPAccess2::Client::NO_PROXY_HOSTS.clear + yield + ensure + HTTPAccess2::Client::NO_PROXY_HOSTS.replace(backup) + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_redirect1(req, res) + res.set_redirect(WEBrick::HTTPStatus::MovedPermanently, serverurl + "hello") + end + + def do_redirect2(req, res) + res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, serverurl + "redirect3") + end + + def do_redirect3(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "hello") + end + + def do_redirect_self(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "redirect_self") + end + + def do_relative_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, "hello") + end + + class TestServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def do_HEAD(req, res) + res["x-head"] = 'head' # use this for test purpose only. + res["x-query"] = query_response(req) + end + + def do_GET(req, res) + res.body = 'get' + res["x-query"] = query_response(req) + end + + def do_POST(req, res) + res.body = 'post' + res["x-query"] = body_response(req) + end + + def do_PUT(req, res) + res.body = 'put' + end + + def do_DELETE(req, res) + res.body = 'delete' + end + + def do_OPTIONS(req, res) + # check RFC for legal response. + res.body = 'options' + end + + def do_TRACE(req, res) + # client SHOULD reflect the message received back to the client as the + # entity-body of a 200 (OK) response. [RFC2616] + res.body = 'trace' + res["x-query"] = query_response(req) + end + + private + + def query_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.query_string)) + end + + def body_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.body)) + end + + def query_escape(query) + escaped = [] + query.collect do |k, v| + v.to_ary.each do |ve| + escaped << CGI.escape(k) + '=' + CGI.escape(ve) + end + end + escaped.join('&') + end + end +end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_httpclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_httpclient.rb new file mode 100644 index 0000000..c8e5330 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_httpclient.rb @@ -0,0 +1,2145 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) +require 'tempfile' + + +class TestHTTPClient < Test::Unit::TestCase + include Helper + include HTTPClient::Util + + def setup + super + setup_server + setup_client + end + + def teardown + super + end + + def test_initialize + setup_proxyserver + escape_noproxy do + @proxyio.string = "" + @client = HTTPClient.new(proxyurl) + assert_equal(urify(proxyurl), @client.proxy) + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + end + end + + def test_agent_name + @client = HTTPClient.new(nil, "agent_name_foo") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^User-Agent: agent_name_foo \(#{HTTPClient::VERSION}/, lines[4]) + end + + def test_from + @client = HTTPClient.new(nil, nil, "from_bar") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^From: from_bar/, lines[5]) + end + + def test_debug_dev + str = "" + @client.debug_dev = str + assert_equal(str.object_id, @client.debug_dev.object_id) + assert(str.empty?) + @client.get(serverurl) + assert(!str.empty?) + end + + def test_debug_dev_stream + str = "" + @client.debug_dev = str + conn = @client.get_async(serverurl) + Thread.pass while !conn.finished? + assert(!str.empty?) + end + + def test_protocol_version_http09 + @client.protocol_version = 'HTTP/0.9' + @client.debug_dev = str = '' + @client.test_loopback_http_response << "hello\nworld\n" + res = @client.get(serverurl + 'hello') + assert_equal('0.9', res.http_version) + assert_equal(nil, res.status) + assert_equal(nil, res.reason) + assert_equal("hello\nworld\n", res.content) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/0.9", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + assert_match(/^hello$/, lines[9]) + assert_match(/^world$/, lines[10]) + end + + def test_protocol_version_http10 + assert_equal(nil, @client.protocol_version) + @client.protocol_version = 'HTTP/1.0' + assert_equal('HTTP/1.0', @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/1.0", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + end + + def test_header_accept_by_default + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("Accept: */*", lines[5]) + end + + def test_header_accept + str = "" + @client.debug_dev = str + @client.get(serverurl, :header => {:Accept => 'text/html'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("Accept: text/html", lines[4]) + end + + def test_header_symbol + str = "" + @client.debug_dev = str + @client.post(serverurl + 'servlet', :header => {:'Content-Type' => 'application/json'}, :body => 'hello') + lines = str.split(/(?:\r?\n)+/).grep(/^Content-Type/) + assert_equal(2, lines.size) # 1 for both request and response + end + + def test_host_given + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + # + @client.reset_all + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, {'Host' => 'foo'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: foo", lines[4]) # use given param + end + + def test_redirect_returns_not_modified + assert_nothing_raised do + ::Timeout.timeout(2) do + @client.get(serverurl + 'status', {:status => 306}, {:follow_redirect => true}) + end + end + end + + class LocationRemoveFilter + def filter_request(req); end + def filter_response(req, res); res.header.delete('Location'); end + end + + def test_redirect_without_location_should_gracefully_fail + @client.request_filter << LocationRemoveFilter.new + assert_raises(HTTPClient::BadResponseError) do + @client.get(serverurl + 'redirect1', :follow_redirect => true) + end + end + + def test_protocol_version_http11 + assert_equal(nil, @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + @client.protocol_version = 'HTTP/1.1' + assert_equal('HTTP/1.1', @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.0", lines[3]) + end + + def test_proxy + setup_proxyserver + escape_noproxy do + begin + @client.proxy = "http://あ" + rescue => e + assert_match(/InvalidURIError/, e.class.to_s) + end + @client.proxy = "" + assert_nil(@client.proxy) + @client.proxy = "http://admin:admin@foo:1234" + assert_equal(urify("http://admin:admin@foo:1234"), @client.proxy) + uri = urify("http://bar:2345") + @client.proxy = uri + assert_equal(uri, @client.proxy) + # + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + @client.debug_dev = str = "" + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + assert(/Host: localhost:#{serverport}/ =~ str) + end + end + + def test_host_header + @client.proxy = proxyurl + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + assert_equal(200, @client.head('http://www.example.com/foo').status) + # ensure no ':80' is added. some servers dislike that. + assert(/\r\nHost: www\.example\.com\r\n/ =~ str) + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + assert_equal(200, @client.head('http://www.example.com:12345/foo').status) + # ensure ':12345' exists. + assert(/\r\nHost: www\.example\.com:12345\r\n/ =~ str) + end + + def test_proxy_env + setup_proxyserver + escape_env do + ENV['http_proxy'] = "http://admin:admin@foo:1234" + ENV['NO_PROXY'] = "foobar" + client = HTTPClient.new + assert_equal(urify("http://admin:admin@foo:1234"), client.proxy) + assert_equal('foobar', client.no_proxy) + end + end + + def test_proxy_env_cgi + setup_proxyserver + escape_env do + ENV['REQUEST_METHOD'] = 'GET' # CGI environment emulation + ENV['http_proxy'] = "http://admin:admin@foo:1234" + ENV['no_proxy'] = "foobar" + client = HTTPClient.new + assert_equal(nil, client.proxy) + ENV['CGI_HTTP_PROXY'] = "http://admin:admin@foo:1234" + client = HTTPClient.new + assert_equal(urify("http://admin:admin@foo:1234"), client.proxy) + end + end + + def test_empty_proxy_env + setup_proxyserver + escape_env do + ENV['http_proxy'] = "" + client = HTTPClient.new + assert_equal(nil, client.proxy) + end + end + + def test_noproxy_for_localhost + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + + def test_no_proxy + setup_proxyserver + escape_noproxy do + # proxy is not set. + assert_equal(nil, @client.no_proxy) + @client.no_proxy = 'localhost' + assert_equal('localhost', @client.no_proxy) + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:baz' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:443' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = "foobar,localhost:443:localhost:#{serverport},baz" + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + end + + def test_no_proxy_with_initial_dot + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '' + @client.proxy = proxyurl + @client.head('http://www.foo.com') + assert(/CONNECT TO localhost/ =~ str, 'via proxy') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '.foo.com' + @client.proxy = proxyurl + @client.head('http://www.foo.com') + assert(/CONNECT TO www.foo.com/ =~ str, 'no proxy because .foo.com matches with www.foo.com') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '.foo.com' + @client.proxy = proxyurl + @client.head('http://foo.com') + assert(/CONNECT TO localhost/ =~ str, 'via proxy because .foo.com does not matche with foo.com') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = 'foo.com' + @client.proxy = proxyurl + @client.head('http://foo.com') + assert(/CONNECT TO foo.com/ =~ str, 'no proxy because foo.com matches with foo.com') + end + + def test_cookie_update_while_authentication + escape_noproxy do + @client.test_loopback_http_response << < true).header.request_uri) + assert_equal(expected, @client.get(serverurl + 'redirect2', :follow_redirect => true).header.request_uri) + end + + def test_redirect_non_https + url = serverurl + 'redirect1' + https_url = urify(url) + https_url.scheme = 'https' + # + redirect_to_http = "HTTP/1.0 302 OK\nLocation: #{url}\n\n" + redirect_to_https = "HTTP/1.0 302 OK\nLocation: #{https_url}\n\n" + # + # https -> http is denied + @client.test_loopback_http_response << redirect_to_http + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(https_url) + end + # + # http -> http is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_http + assert_equal('hello', @client.get_content(url)) + # + # http -> https is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_https + assert_raises(OpenSSL::SSL::SSLError) do + # trying to normal endpoint with SSL -> SSL negotiation failure + @client.get_content(url) + end + # + # https -> https is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_https + assert_raises(OpenSSL::SSL::SSLError) do + # trying to normal endpoint with SSL -> SSL negotiation failure + @client.get_content(https_url) + end + # + # https -> http with strict_redirect_uri_callback + @client.redirect_uri_callback = @client.method(:strict_redirect_uri_callback) + @client.test_loopback_http_response << redirect_to_http + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(https_url) + end + end + + def test_redirect_see_other + assert_equal('hello', @client.post_content(serverurl + 'redirect_see_other')) + end + + def test_redirect_relative + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: hello\n\n" + silent do + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + end + # + @client.reset_all + @client.redirect_uri_callback = @client.method(:strict_redirect_uri_callback) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + @client.reset_all + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: hello\n\n" + begin + @client.get_content(serverurl + 'redirect1') + assert(false) + rescue HTTPClient::BadResponseError => e + assert_equal(302, e.res.status) + end + end + + def test_redirect_https_relative + url = serverurl + 'redirect1' + https_url = urify(url) + https_url.scheme = 'https' + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: /foo\n\n" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\n\nhello" + silent do + assert_equal('hello', @client.get_content(https_url)) + end + end + + def test_no_content + assert_nothing_raised do + ::Timeout.timeout(2) do + @client.get(serverurl + 'status', :status => 101) + @client.get(serverurl + 'status', :status => 204) + @client.get(serverurl + 'status', :status => 304) + end + end + end + + def test_get_content + assert_equal('hello', @client.get_content(serverurl + 'hello')) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + assert_equal('hello', @client.get_content(serverurl + 'redirect2')) + url = serverurl.sub(/localhost/, '127.0.0.1') + assert_equal('hello', @client.get_content(url + 'hello')) + assert_equal('hello', @client.get_content(url + 'redirect1')) + assert_equal('hello', @client.get_content(url + 'redirect2')) + @client.reset(serverurl) + @client.reset(url) + @client.reset(serverurl) + @client.reset(url) + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_get_content_with_base_url + @client = HTTPClient.new(:base_url => serverurl) + assert_equal('hello', @client.get_content('/hello')) + assert_equal('hello', @client.get_content('/redirect1')) + assert_equal('hello', @client.get_content('/redirect2')) + @client.reset('/') + assert_raises(HTTPClient::BadResponseError) do + @client.get_content('/notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.get_content('/redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content('/relative_redirect')) + assert(called) + end + + GZIP_CONTENT = "\x1f\x8b\x08\x00\x1a\x96\xe0\x4c\x00\x03\xcb\x48\xcd\xc9\xc9\x07\x00\x86\xa6\x10\x36\x05\x00\x00\x00" + DEFLATE_CONTENT = "\x78\x9c\xcb\x48\xcd\xc9\xc9\x07\x00\x06\x2c\x02\x15" + DEFLATE_NOHEADER_CONTENT = "x\x9C\xCBH\xCD\xC9\xC9\a\x00\x06,\x02\x15" + [GZIP_CONTENT, DEFLATE_CONTENT, DEFLATE_NOHEADER_CONTENT].each do |content| + content.force_encoding('BINARY') if content.respond_to?(:force_encoding) + end + def test_get_gzipped_content + @client.transparent_gzip_decompression = false + content = @client.get_content(serverurl + 'compressed?enc=gzip') + assert_not_equal('hello', content) + assert_equal(GZIP_CONTENT, content) + @client.transparent_gzip_decompression = true + @client.reset_all + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=gzip')) + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=deflate')) + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=deflate_noheader')) + @client.transparent_gzip_decompression = false + @client.reset_all + end + + def test_get_content_with_block + @client.get_content(serverurl + 'hello') do |str| + assert_equal('hello', str) + end + @client.get_content(serverurl + 'redirect1') do |str| + assert_equal('hello', str) + end + @client.get_content(serverurl + 'redirect2') do |str| + assert_equal('hello', str) + end + end + + def test_post_content + assert_equal('hello', @client.post_content(serverurl + 'hello')) + assert_equal('hello', @client.post_content(serverurl + 'redirect1')) + assert_equal('hello', @client.post_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::BadResponseError) do + @client.post_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.post_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.post_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_post_content_io + post_body = StringIO.new("1234567890") + assert_equal('post,1234567890', @client.post_content(serverurl + 'servlet', post_body)) + post_body = StringIO.new("1234567890") + assert_equal('post,1234567890', @client.post_content(serverurl + 'servlet_redirect', post_body)) + # + post_body = StringIO.new("1234567890") + post_body.read(5) + assert_equal('post,67890', @client.post_content(serverurl + 'servlet_redirect', post_body)) + end + + def test_head + assert_equal("head", @client.head(serverurl + 'servlet').header["x-head"][0]) + param = {'1'=>'2', '3'=>'4'} + res = @client.head(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_head_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.head_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get + assert_equal("get", @client.get(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_nil(res.contenttype) + # + url = serverurl.to_s + 'servlet?5=6&7=8' + res = @client.get(url, param) + assert_equal(param.merge("5"=>"6", "7"=>"8"), params(res.header["x-query"][0])) + assert_nil(res.contenttype) + end + + def test_get_with_base_url + @client = HTTPClient.new(:base_url => serverurl) + assert_equal("get", @client.get('/servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get('/servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_nil(res.contenttype) + # + @client.base_url = serverurl[0...-1] + '/servlet' + url = '?5=6&7=8' + res = @client.get(url, param) + assert_equal(param.merge("5"=>"6", "7"=>"8"), params(res.header["x-query"][0])) + assert_nil(res.contenttype) + end + + def test_get_with_default_header + @client = HTTPClient.new(:base_url => serverurl, :default_header => {'x-header' => 'custom'}) + assert_equal('custom', @client.get('/servlet').headers['X-Header']) + @client.default_header = {'x-header' => 'custom2'} + assert_equal('custom2', @client.get('/servlet').headers['X-Header']) + # passing Hash overrides + assert_equal('custom3', @client.get('/servlet', :header => {'x-header' => 'custom3'}).headers['X-Header']) + # passing Array does not override + assert_equal('custom2, custom4', @client.get('/servlet', :header => [['x-header', 'custom4']]).headers['X-Header']) + end + + def test_head_follow_redirect + expected = urify(serverurl + 'hello') + assert_equal(expected, @client.head(serverurl + 'hello', :follow_redirect => true).header.request_uri) + assert_equal(expected, @client.head(serverurl + 'redirect1', :follow_redirect => true).header.request_uri) + assert_equal(expected, @client.head(serverurl + 'redirect2', :follow_redirect => true).header.request_uri) + end + + def test_get_follow_redirect + assert_equal('hello', @client.get(serverurl + 'hello', :follow_redirect => true).body) + assert_equal('hello', @client.get(serverurl + 'redirect1', :follow_redirect => true).body) + + res = @client.get(serverurl + 'redirect2', :follow_redirect => true) + assert_equal('hello', res.body) + assert_equal("http://localhost:#{@serverport}/hello", res.header.request_uri.to_s) + assert_equal("http://localhost:#{@serverport}/redirect3", res.previous.header.request_uri.to_s) + assert_equal("http://localhost:#{@serverport}/redirect2", res.previous.previous.header.request_uri.to_s) + assert_equal(nil, res.previous.previous.previous) + end + + def test_get_follow_redirect_with_query + assert_equal('hello?1=2&3=4', @client.get(serverurl + 'redirect1', :query => {1 => 2, 3 => 4}, :follow_redirect => true).body) + end + + def test_get_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.get_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_async_with_base_url + param = {'1'=>'2', '3'=>'4'} + @client = HTTPClient.new(:base_url => serverurl) + + # Use preconfigured :base_url + conn = @client.get_async('servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + # full URL still works + conn = @client.get_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_async_for_largebody + conn = @client.get_async(serverurl + 'largebody') + res = conn.pop + assert_equal(1000*1000, res.content.read.length) + end + + if RUBY_VERSION > "1.9" + def test_post_async_with_default_internal + original_encoding = Encoding.default_internal + Encoding.default_internal = Encoding::UTF_8 + begin + post_body = StringIO.new("こんにちは") + conn = @client.post_async(serverurl + 'servlet', post_body) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal 'post,こんにちは', res.content.read + ensure + Encoding.default_internal = original_encoding + end + end + end + + def test_get_with_block + called = false + res = @client.get(serverurl + 'servlet') { |str| + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_arity_2 + called = false + res = @client.get(serverurl + 'servlet') { |blk_res, str| + assert_equal(200, blk_res.status) + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_and_redirects + called = false + res = @client.get(serverurl + 'servlet', :follow_redirect => true) { |str| + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_arity_2_and_redirects + called = false + res = @client.get(serverurl + 'servlet', :follow_redirect => true) { |blk_res, str| + assert_equal(200, blk_res.status) + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_string_recycle + @client.read_block_size = 2 + body = [] + _res = @client.get(serverurl + 'servlet') { |str| + body << str + } + assert_equal(2, body.size) + assert_equal("get", body.join) # Was "tt" by String object recycle... + end + + def test_get_with_block_chunked_string_recycle + server = TCPServer.open('localhost', 0) + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + create_keepalive_thread(1, sock) + } + url = "http://localhost:#{server.addr[1]}/" + body = [] + begin + _res = @client.get(url + 'chunked') { |str| + body << str + } + ensure + server.close + server_thread.join + end + assert_equal('abcdefghijklmnopqrstuvwxyz1234567890abcdef', body.join) + end + + def test_post + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4]) + param = {'1'=>'2', '3'=>'4'} + res = @client.post(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post_empty + @client.debug_dev = str = '' + # nil body means 'no content' that is allowed but WEBrick cannot handle it. + @client.post(serverurl + 'servlet', :body => nil) + # request does not have 'Content-Type' + assert_equal(1, str.scan(/content-type/i).size) + end + + def test_post_with_query + # this {:query => 'query'} recognized as body + res = @client.post(serverurl + 'servlet', :query => 'query') + assert_equal("post", res.content[0, 4]) + assert_equal("query=query", res.headers["X-Query"]) + assert_equal("", res.headers["X-Request-Query"]) + end + + def test_post_with_query_and_body + res = @client.post(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("post", res.content[0, 4]) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_post_follow_redirect + assert_equal('hello', @client.post(serverurl + 'hello', :follow_redirect => true).body) + assert_equal('hello', @client.post(serverurl + 'redirect1', :follow_redirect => true).body) + assert_equal('hello', @client.post(serverurl + 'redirect2', :follow_redirect => true).body) + end + + def test_post_with_content_type + param = [['1', '2'], ['3', '4']] + ext = {'content-type' => 'application/x-www-form-urlencoded', 'hello' => 'world'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_equal(Hash[param], params(res.header["x-query"][0])) + # + ext = [['content-type', 'multipart/form-data'], ['hello', 'world']] + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + # + ext = {'content-type' => 'multipart/form-data; boundary=hello'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + assert_equal("post,--hello\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n2\r\n--hello\r\nContent-Disposition: form-data; name=\"3\"\r\n\r\n4\r\n--hello--\r\n\r\n", res.content) + end + + def test_post_with_custom_multipart_and_boolean_params + param = [['boolean_true', true]] + ext = { 'content-type' => 'multipart/form-data' } + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="boolean_true"\r\n\r\ntrue\r\n/, res.content) + # + param = [['boolean_false', false]] + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="boolean_false"\r\n\r\nfalse\r\n/, res.content) + # + param = [['nil', nil]] + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="nil"\r\n\r\n\r\n/, res.content) + end + + def test_post_with_file + STDOUT.sync = true + File.open(__FILE__) do |file| + res = @client.post(serverurl + 'servlet', {1=>2, 3=>file}) + assert_match(/^Content-Disposition: form-data; name="1"\r\n/nm, res.content) + assert_match(/^Content-Disposition: form-data; name="3";/, res.content) + assert_match(/FIND_TAG_IN_THIS_FILE/, res.content) + end + end + + def test_post_with_file_without_size + STDOUT.sync = true + File.open(__FILE__) do |file| + def file.size + # Simulates some strange Windows behaviour + raise SystemCallError.new "Unknown Error (20047)" + end + assert_nothing_raised do + @client.post(serverurl + 'servlet', {1=>2, 3=>file}) + end + end + end + + def test_post_with_io # streaming, but not chunked + myio = StringIO.new("X" * (HTTP::Message::Body::DEFAULT_CHUNK_SIZE + 1)) + def myio.read(*args) + @called ||= 0 + @called += 1 + super + end + def myio.called + @called + end + @client.debug_dev = str = StringIO.new + res = @client.post(serverurl + 'servlet', {1=>2, 3=>myio}) + assert_match(/\r\nContent-Disposition: form-data; name="1"\r\n/m, res.content) + assert_match(/\r\n2\r\n/m, res.content) + assert_match(/\r\nContent-Disposition: form-data; name="3"; filename=""\r\n/m, res.content) + assert_match(/\r\nContent-Length:/m, str.string) + # HTTPClient reads from head to 'size'; CHUNK_SIZE bytes then 1 byte, that's all. + assert_equal(2, myio.called) + end + + def test_post_with_io_nosize # streaming + chunked post + myio = StringIO.new("4") + def myio.size + nil + end + @client.debug_dev = str = StringIO.new + res = @client.post(serverurl + 'servlet', {1=>2, 3=>myio}) + assert_match(/\r\nContent-Disposition: form-data; name="1"\r\n/m, res.content) + assert_match(/\r\n2\r\n/m, res.content) + assert_match(/\r\nContent-Disposition: form-data; name="3"; filename=""\r\n/m, res.content) + assert_match(/\r\n4\r\n/m, res.content) + assert_match(/\r\nTransfer-Encoding: chunked\r\n/m, str.string) + end + + def test_post_with_sized_io + myio = StringIO.new("45") + def myio.size + 1 + end + res = @client.post(serverurl + 'servlet', myio) + assert_equal('post,4', res.content) + end + + def test_post_with_sized_io_part + myio = StringIO.new("45") + def myio.size + 1 + end + @client.debug_dev = str = StringIO.new + _res = @client.post(serverurl + 'servlet', { :file => myio }) + assert_match(/\r\n4\r\n/, str.string, 'should send "4" not "45"') + end + + def test_post_with_unknown_sized_io_part + myio1 = StringIO.new("123") + myio2 = StringIO.new("45") + class << myio1 + undef :size + end + class << myio2 + # This does not work because other file is 'unknown sized' + def size + 1 + end + end + @client.debug_dev = str = StringIO.new + _res = @client.post(serverurl + 'servlet', { :file1 => myio1, :file2 => myio2 }) + assert_match(/\r\n45\r\n/, str.string) + end + + def test_post_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.post_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post_with_block + called = false + res = @client.post(serverurl + 'servlet', '') { |str| + assert_equal('post,', str) + called = true + } + assert(called) + assert_nil(res.content) + # + called = false + param = [['1', '2'], ['3', '4']] + res = @client.post(serverurl + 'servlet', param) { |str| + assert_equal('post,1=2&3=4', str) + called = true + } + assert(called) + assert_equal('1=2&3=4', res.header["x-query"][0]) + assert_nil(res.content) + end + + def test_post_with_custom_multipart + ext = {'content-type' => 'multipart/form-data'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + body = [{ 'Content-Disposition' => 'form-data; name="1"', :content => "2"}, + { 'Content-Disposition' => 'form-data; name="3"', :content => "4"}] + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + # + ext = {'content-type' => 'multipart/form-data; boundary=hello'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + assert_equal("post,--hello\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n2\r\n--hello\r\nContent-Disposition: form-data; name=\"3\"\r\n\r\n4\r\n--hello--\r\n\r\n", res.content) + end + + def test_post_with_custom_multipart_and_file + STDOUT.sync = true + File.open(__FILE__) do |file| + def file.original_filename + 'file.txt' + end + + ext = { 'Content-Type' => 'multipart/alternative' } + body = [{ 'Content-Type' => 'text/plain', :content => "this is only a test" }, + { 'Content-Type' => 'application/x-ruby', :content => file }] + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/^Content-Type: text\/plain\r\n/m, res.content) + assert_match(/^this is only a test\r\n/m, res.content) + assert_match(/^Content-Type: application\/x-ruby\r\n/m, res.content) + assert_match(/Content-Disposition: form-data; name="3"; filename="file.txt"/, res.content) + assert_match(/FIND_TAG_IN_THIS_FILE/, res.content) + end + end + + def test_patch + assert_equal("patch", @client.patch(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + res = @client.patch(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_equal('Content-Type: application/x-www-form-urlencoded', str.split(/\r?\n/)[5]) + end + + def test_patch_with_query_and_body + res = @client.patch(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("patch", res.content) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_patch_bytesize + res = @client.patch(serverurl + 'servlet', 'txt' => 'あいうえお') + assert_equal('txt=%E3%81%82%E3%81%84%E3%81%86%E3%81%88%E3%81%8A', res.header["x-query"][0]) + assert_equal('15', res.header["x-size"][0]) + end + + def test_patch_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.patch_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_put + assert_equal("put", @client.put(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + res = @client.put(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_equal('Content-Type: application/x-www-form-urlencoded', str.split(/\r?\n/)[5]) + end + + def test_put_with_query_and_body + res = @client.put(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("put", res.content) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_put_bytesize + res = @client.put(serverurl + 'servlet', 'txt' => 'あいうえお') + assert_equal('txt=%E3%81%82%E3%81%84%E3%81%86%E3%81%88%E3%81%8A', res.header["x-query"][0]) + assert_equal('15', res.header["x-size"][0]) + end + + def test_put_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.put_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_delete + assert_equal("delete", @client.delete(serverurl + 'servlet').content) + end + + def test_delete_with_query + res = @client.delete(serverurl + 'servlet', :query => {:query => 'query'}) + assert_equal("delete", res.content) + assert_equal('query=query', res.headers['X-Request-Query']) + end + + def test_delete_with_query_and_body + res = @client.delete(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("delete", res.content) + assert_equal('query=query', res.headers['X-Request-Query']) + assert_equal('body=body', res.headers['X-Query']) + end + + # Not prohibited by spec, but normally it's ignored + def test_delete_with_body + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + assert_equal("delete", @client.delete(serverurl + 'servlet', param).content) + assert_equal({'1' => ['2'], '3' => ['4']}, HTTP::Message.parse(str.split(/\r?\n\r?\n/)[2])) + end + + def test_delete_async + conn = @client.delete_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('delete', res.content.read) + end + + def test_options + assert_equal('options', @client.options(serverurl + 'servlet').content) + end + + def test_options_with_header + res = @client.options(serverurl + 'servlet', {'x-header' => 'header'}) + assert_equal('header', res.headers['X-Header']) + end + + def test_options_with_body + res = @client.options(serverurl + 'servlet', :body => 'body') + assert_equal('body', res.headers['X-Body']) + end + + def test_options_with_body_and_header + res = @client.options(serverurl + 'servlet', :body => 'body', :header => {'x-header' => 'header'}) + assert_equal('header', res.headers['X-Header']) + assert_equal('body', res.headers['X-Body']) + end + + def test_options_async + conn = @client.options_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('options', res.content.read) + end + + def test_propfind + assert_equal("propfind", @client.propfind(serverurl + 'servlet').content) + end + + def test_propfind_async + conn = @client.propfind_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('propfind', res.content.read) + end + + def test_proppatch + assert_equal("proppatch", @client.proppatch(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.proppatch(serverurl + 'servlet', param) + assert_equal('proppatch', res.content) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_proppatch_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.proppatch_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('proppatch', res.content.read) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace + assert_equal("trace", @client.trace(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.trace(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.trace_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_chunked + assert_equal('chunked', @client.get_content(serverurl + 'chunked', { 'msg' => 'chunked' })) + assert_equal('あいうえお', @client.get_content(serverurl + 'chunked', { 'msg' => 'あいうえお' })) + end + + def test_chunked_empty + assert_equal('', @client.get_content(serverurl + 'chunked', { 'msg' => '' })) + end + + def test_get_query + assert_equal({'1'=>'2'}, check_query_get({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_get({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_get({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_get({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_get([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_get('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_get('12+3=45&+=+')) + assert_equal({}, check_query_get('')) + assert_equal({'1'=>'2'}, check_query_get({1=>StringIO.new('2')})) + assert_equal({'1'=>'2', '3'=>'4'}, check_query_get(StringIO.new('3=4&1=2'))) + + hash = check_query_get({"a"=>["A","a"], "B"=>"b"}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + + hash = check_query_get({"a"=>WEBrick::HTTPUtils::FormData.new("A","a"), "B"=>"b"}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + + hash = check_query_get({"a"=>[StringIO.new("A"),StringIO.new("a")], "B"=>StringIO.new("b")}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + end + + def test_post_body + assert_equal({'1'=>'2'}, check_query_post({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_post({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_post({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_post({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_post([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_post('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_post('12+3=45&+=+')) + assert_equal({}, check_query_post('')) + # + post_body = StringIO.new("foo=bar&foo=baz") + assert_equal( + ["bar", "baz"], + check_query_post(post_body)["foo"].to_ary.sort + ) + end + + def test_extra_headers + str = "" + @client.debug_dev = str + @client.head(serverurl, nil, {"ABC" => "DEF"}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + # + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, [["ABC", "DEF"], ["ABC", "DEF"]]) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + assert_match("ABC: DEF", lines[5]) + end + + def test_http_custom_date_header + @client.debug_dev = (str = "") + _res = @client.get(serverurl + 'hello', :header => {'Date' => 'foo'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal('Date: foo', lines[4]) + end + + def test_timeout + assert_equal(60, @client.connect_timeout) + assert_equal(120, @client.send_timeout) + assert_equal(60, @client.receive_timeout) + # + @client.connect_timeout = 1 + @client.send_timeout = 2 + @client.receive_timeout = 3 + assert_equal(1, @client.connect_timeout) + assert_equal(2, @client.send_timeout) + assert_equal(3, @client.receive_timeout) + end + + def test_connect_timeout + # ToDo + end + + def test_send_timeout + # ToDo + end + + def test_receive_timeout + # this test takes 2 sec + assert_equal('hello?sec=2', @client.get_content(serverurl + 'sleep?sec=2')) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('hello?sec=0', @client.get_content(serverurl + 'sleep?sec=0')) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(serverurl + 'sleep?sec=2') + end + @client.receive_timeout = 3 + @client.reset_all + assert_equal('hello?sec=2', @client.get_content(serverurl + 'sleep?sec=2')) + end + + def test_receive_timeout_post + # this test takes 2 sec + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 2).content) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 0).content) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.post(serverurl + 'sleep', :sec => 2) + end + @client.receive_timeout = 3 + @client.reset_all + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 2).content) + end + + def test_reset + url = serverurl + 'servlet' + assert_nothing_raised do + 5.times do + @client.get(url) + @client.reset(url) + end + end + end + + def test_reset_all + assert_nothing_raised do + 5.times do + @client.get(serverurl + 'servlet') + @client.reset_all + end + end + end + + def test_cookies + cookiefile = Tempfile.new('test_cookies_file') + File.open(cookiefile.path, "wb") do |f| + f << "http://rubyforge.org/account/login.php\tsession_ser\tLjEwMy45Ni40Ni0q%2A-fa0537de8cc31\t2000000000\trubyforge.org\t/account/\t9\n" + end + @client.set_cookie_store(cookiefile.path) + # + @client.reset_all + @client.test_loopback_http_response << "HTTP/1.0 200 OK\nSet-Cookie: session_ser=bar; expires=#{Time.at(1924873200).gmtime.httpdate}\n\nOK" + @client.get_content('http://rubyforge.org/account/login.php') + @client.save_cookie_store + str = File.read(cookiefile.path) + assert_match(%r(http://rubyforge.org/account/login.php\tsession_ser\tbar\t1924873200\trubyforge.org\t/account/\t9), str) + end + + def test_eof_error_length + io = StringIO.new('') + def io.gets(*arg) + @buf ||= ["HTTP/1.0 200 OK\n", "content-length: 123\n", "\n"] + @buf.shift + end + def io.readpartial(size, buf) + @second ||= false + if !@second + @second = '1st' + buf << "abc" + buf + elsif @second == '1st' + @second = '2nd' + raise EOFError.new + else + raise Exception.new + end + end + def io.eof? + true + end + @client.test_loopback_http_response << io + assert_nothing_raised do + @client.get('http://foo/bar') + end + end + + def test_eof_error_rest + io = StringIO.new('') + def io.gets(*arg) + @buf ||= ["HTTP/1.0 200 OK\n", "\n"] + @buf.shift + end + def io.readpartial(size, buf) + @second ||= false + if !@second + @second = '1st' + buf << "abc" + buf + elsif @second == '1st' + @second = '2nd' + raise EOFError.new + else + raise Exception.new + end + end + def io.eof? + true + end + @client.test_loopback_http_response << io + assert_nothing_raised do + @client.get('http://foo/bar') + end + end + + def test_urify + extend HTTPClient::Util + assert_nil(urify(nil)) + uri = 'http://foo' + assert_equal(urify(uri), urify(uri)) + assert_equal(urify(uri), urify(urify(uri))) + end + + def test_connection + c = HTTPClient::Connection.new + assert(c.finished?) + assert_nil(c.join) + end + + def test_site + site = HTTPClient::Site.new + assert_equal('tcp', site.scheme) + assert_equal('0.0.0.0', site.host) + assert_equal(0, site.port) + assert_equal('tcp://0.0.0.0:0', site.addr) + assert_equal('tcp://0.0.0.0:0', site.to_s) + assert_nothing_raised do + site.inspect + end + # + site = HTTPClient::Site.new(urify('http://localhost:12345/foo')) + assert_equal('http', site.scheme) + assert_equal('localhost', site.host) + assert_equal(12345, site.port) + assert_equal('http://localhost:12345', site.addr) + assert_equal('http://localhost:12345', site.to_s) + assert_nothing_raised do + site.inspect + end + # + site1 = HTTPClient::Site.new(urify('http://localhost:12341/')) + site2 = HTTPClient::Site.new(urify('http://localhost:12342/')) + site3 = HTTPClient::Site.new(urify('http://localhost:12342/')) + assert(!(site1 == site2)) + h = { site1 => 'site1', site2 => 'site2' } + h[site3] = 'site3' + assert_equal('site1', h[site1]) + assert_equal('site3', h[site2]) + end + + def test_http_header + res = @client.get(serverurl + 'hello') + assert_equal('text/html', res.contenttype) + assert_equal(5, res.header.get(nil).size) + # + res.header.delete('connection') + assert_equal(4, res.header.get(nil).size) + # + res.header['foo'] = 'bar' + assert_equal(['bar'], res.header['foo']) + # + assert_equal([['foo', 'bar']], res.header.get('foo')) + res.header['foo'] = ['bar', 'bar2'] + assert_equal([['foo', 'bar'], ['foo', 'bar2']], res.header.get('foo')) + end + + def test_mime_type + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + assert_equal('text/html', HTTP::Message.mime_type('foo.html')) + assert_equal('text/html', HTTP::Message.mime_type('foo.htm')) + assert_equal('text/xml', HTTP::Message.mime_type('foo.xml')) + assert_equal('application/msword', HTTP::Message.mime_type('foo.doc')) + assert_equal('image/png', HTTP::Message.mime_type('foo.png')) + assert_equal('image/gif', HTTP::Message.mime_type('foo.gif')) + assert_equal('image/jpeg', HTTP::Message.mime_type('foo.jpg')) + assert_equal('image/jpeg', HTTP::Message.mime_type('foo.jpeg')) + assert_equal('application/octet-stream', HTTP::Message.mime_type('foo.unknown')) + # + handler = lambda { |path| 'hello/world' } + assert_nil(HTTP::Message.mime_type_handler) + assert_nil(HTTP::Message.get_mime_type_func) + HTTP::Message.mime_type_handler = handler + assert_not_nil(HTTP::Message.mime_type_handler) + assert_not_nil(HTTP::Message.get_mime_type_func) + assert_equal('hello/world', HTTP::Message.mime_type('foo.txt')) + HTTP::Message.mime_type_handler = nil + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + HTTP::Message.set_mime_type_func(nil) + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + # + handler = lambda { |path| nil } + HTTP::Message.mime_type_handler = handler + assert_equal('application/octet-stream', HTTP::Message.mime_type('foo.txt')) + end + + def test_connect_request + req = HTTP::Message.new_connect_request(urify('https://foo/bar')) + assert_equal("CONNECT foo:443 HTTP/1.0\r\n\r\n", req.dump) + req = HTTP::Message.new_connect_request(urify('https://example.com/')) + assert_equal("CONNECT example.com:443 HTTP/1.0\r\n\r\n", req.dump) + end + + def test_response + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Status: 200 OK", + "response" + ], + res.dump.split(/\r\n/).sort + ) + assert_equal(['8'], res.header['Content-Length']) + assert_equal('8', res.headers['Content-Length']) + res.header.set('foo', 'bar') + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Status: 200 OK", + "foo: bar", + "response" + ], + res.dump.split(/\r\n/).sort + ) + # nil body + res = HTTP::Message.new_response(nil) + assert_equal( + [ + "Content-Length: 0", + "Content-Type: text/html; charset=us-ascii", + "Status: 200 OK" + ], + res.dump.split(/\r\n/).sort + ) + # for mod_ruby env + Object.const_set('Apache', nil) + begin + res = HTTP::Message.new_response('response') + assert(res.dump.split(/\r\n/).any? { |line| /^Date/ =~ line }) + # + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + res.header['Date'] = Time.at(946652400).httpdate + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Date: Fri, 31 Dec 1999 15:00:00 GMT", + "HTTP/1.1 200 OK", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "response" + ], + res.dump.split(/\r\n/).sort + ) + ensure + Object.instance_eval { remove_const('Apache') } + end + end + + def test_response_cookies + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + res.header.request_uri = 'http://www.example.com/' + assert_nil(res.cookies) + # + res.header['Set-Cookie'] = [ + 'CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT', + 'PART_NUMBER=ROCKET_LAUNCHER_0001; path=/' + ] + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT", + "Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/", + "Status: 200 OK", + "response" + ], + res.dump.split(/\r\n/).sort + ) + assert_equal(2, res.cookies.size) + assert_equal('CUSTOMER', res.cookies[0].name) + assert_equal('PART_NUMBER', res.cookies[1].name) + end + + def test_ok_response_success + res = HTTP::Message.new_response('response') + assert_equal(true, res.ok?) + res.status = 404 + assert_equal(false, res.ok?) + res.status = 500 + assert_equal(false, res.ok?) + res.status = 302 + assert_equal(false, res.ok?) + end + + if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + def test_timeout_scheduler + assert_equal('hello', @client.get_content(serverurl + 'hello')) + status = HTTPClient.timeout_scheduler.instance_eval { @thread.kill; @thread.join; @thread.status } + assert(!status) # dead + assert_equal('hello', @client.get_content(serverurl + 'hello')) + end + end + + def test_session_manager + mgr = HTTPClient::SessionManager.new(@client) + assert_nil(mgr.instance_eval { @proxy }) + assert_nil(mgr.debug_dev) + @client.debug_dev = Object.new + @client.proxy = 'http://myproxy:12345' + mgr = HTTPClient::SessionManager.new(@client) + assert_equal('http://myproxy:12345', mgr.instance_eval { @proxy }.to_s) + assert_equal(@client.debug_dev, mgr.debug_dev) + end + + def create_keepalive_disconnected_thread(idx, sock) + Thread.new { + # return "12345" for the first connection + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + # for the next connection, close while reading the request for emulating + # KeepAliveDisconnected + sock.gets + sock.close + } + end + + def test_keepalive_disconnected + client = HTTPClient.new + server = TCPServer.open('127.0.0.1', 0) + server.listen(30) # set enough backlogs + endpoint = "http://127.0.0.1:#{server.addr[1]}/" + queue = Queue.new + Thread.new(queue) { |qs| + Thread.abort_on_exception = true + # want 5 requests issued + 5.times { qs.pop } + # emulate 10 keep-alive connections + 10.times do |idx| + sock = server.accept + create_keepalive_disconnected_thread(idx, sock) + end + # return "23456" for the request which gets KeepAliveDisconnected + 5.times do + sock = server.accept + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("\r\n") + sock.write("23456") + sock.close + end + # return "34567" for the rest requests + while true + sock = server.accept + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Connection: close\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("34567") + sock.close + end + } + # try to allocate 10 keep-alive connections; it's a race so some + # threads can reuse the connection so actual number of keep-alive + # connections should be smaller than 10. + (0...10).to_a.map { + Thread.new(queue) { |qc| + Thread.abort_on_exception = true + conn = client.get_async(endpoint) + qc.push(true) + assert_equal("12345", conn.pop.content.read) + } + }.each { |th| th.join } + # send 5 requests, some of these should get KeepAliveDesconnected + # but should retry with new connection. + (0...5).to_a.map { + Thread.new { + Thread.abort_on_exception = true + assert_equal("23456", client.get(endpoint).content) + } + }.each { |th| th.join } + # rest requests won't get KeepAliveDisconnected + (0...10).to_a.map { + Thread.new { + Thread.abort_on_exception = true + assert_equal("34567", client.get(endpoint).content) + } + }.each { |th| th.join } + end + + def create_keepalive_thread(count, sock) + Thread.new { + Thread.abort_on_exception = true + count.times do + req = sock.gets + while line = sock.gets + break if line.chomp.empty? + end + case req + when /chunked/ + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Transfer-Encoding: chunked\r\n") + sock.write("\r\n") + sock.write("1a\r\n") + sock.write("abcdefghijklmnopqrstuvwxyz\r\n") + sock.write("10\r\n") + sock.write("1234567890abcdef\r\n") + sock.write("0\r\n") + sock.write("\r\n") + else + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + end + end + sock.close + } + end + + def test_keepalive + server = TCPServer.open('localhost', 0) + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + create_keepalive_thread(10, sock) + } + url = "http://localhost:#{server.addr[1]}/" + begin + # content-length + 5.times do + assert_equal('12345', @client.get(url).body) + end + # chunked + 5.times do + assert_equal('abcdefghijklmnopqrstuvwxyz1234567890abcdef', @client.get(url + 'chunked').body) + end + ensure + server.close + server_thread.join + end + end + + def test_strict_response_size_check + @client.strict_response_size_check = false + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\nContent-Length: 12345\r\n\r\nhello world" + assert_equal('hello world', @client.get_content('http://dummy')) + + @client.reset_all + @client.strict_response_size_check = true + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\nContent-Length: 12345\r\n\r\nhello world" + assert_raise(HTTPClient::BadResponseError) do + @client.get_content('http://dummy') + end + end + + def test_socket_local + @client.socket_local.host = '127.0.0.1' + assert_equal('hello', @client.get_content(serverurl + 'hello')) + @client.reset_all + @client.socket_local.port = serverport + begin + @client.get_content(serverurl + 'hello') + rescue Errno::EADDRINUSE, SocketError + assert(true) + end + end + + def test_body_param_order + ary = ('b'..'d').map { |k| ['key2', k] } << ['key1', 'a'] << ['key3', 'z'] + assert_equal("key2=b&key2=c&key2=d&key1=a&key3=z", HTTP::Message.escape_query(ary)) + end + + if RUBY_VERSION > "1.9" + def test_charset + body = @client.get(serverurl + 'charset').body + assert_equal(Encoding::EUC_JP, body.encoding) + assert_equal('あいうえお'.encode(Encoding::EUC_JP), body) + end + end + + if RUBY_VERSION >= "1.9.3" + def test_continue + @client.debug_dev = str = '' + res = @client.get(serverurl + 'continue', :header => {:Expect => '100-continue'}) + assert_equal(200, res.status) + assert_equal('done!', res.body) + assert_match(/Expect: 100-continue/, str) + end + end + + def test_ipv6literaladdress_in_uri + server = TCPServer.open('::1', 0) rescue return # Skip if IPv6 is unavailable. + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + while line = sock.gets + break if line.chomp.empty? + end + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + sock.close + } + uri = "http://[::1]:#{server.addr[1]}/" + begin + assert_equal('12345', @client.get(uri).body) + ensure + server.close + server_thread.kill + server_thread.join + end + end + + def test_uri_no_schema + assert_raise(ArgumentError) do + @client.get_content("www.example.com") + end + end + + def test_tcp_keepalive + @client.tcp_keepalive = true + @client.get(serverurl) + + # expecting HTTP keepalive caches the socket + session = @client.instance_variable_get(:@session_manager).send(:get_cached_session, HTTPClient::Site.new(URI.parse(serverurl))) + socket = session.instance_variable_get(:@socket) + + assert_true(session.tcp_keepalive) + assert_equal(Socket::SO_KEEPALIVE, socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE).optname) + end + +private + + def check_query_get(query) + WEBrick::HTTPUtils.parse_query( + @client.get(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def check_query_post(query) + WEBrick::HTTPUtils.parse_query( + @client.post(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + [ + :hello, :sleep, :servlet_redirect, :redirect1, :redirect2, :redirect3, + :redirect_self, :relative_redirect, :redirect_see_other, :chunked, + :largebody, :status, :compressed, :charset, :continue + ].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server.mount('/servlet', TestServlet.new(@server)) + @server_thread = start_server_thread(@server) + end + + def add_query_string(req) + if req.query_string + '?' + req.query_string + else + '' + end + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + add_query_string(req) + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "hello" + add_query_string(req) + end + + def do_servlet_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "servlet" + add_query_string(req)) + end + + def do_redirect1(req, res) + res.set_redirect(WEBrick::HTTPStatus::MovedPermanently, serverurl + "hello" + add_query_string(req)) + end + + def do_redirect2(req, res) + res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, serverurl + "redirect3" + add_query_string(req)) + end + + def do_redirect3(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "hello" + add_query_string(req)) + end + + def do_redirect_self(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "redirect_self" + add_query_string(req)) + end + + def do_relative_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, "hello" + add_query_string(req)) + end + + def do_redirect_see_other(req, res) + if req.request_method == 'POST' + res.set_redirect(WEBrick::HTTPStatus::SeeOther, serverurl + "redirect_see_other" + add_query_string(req)) # self + else + res.body = 'hello' + end + end + + def do_chunked(req, res) + res.chunked = true + res['content-type'] = 'text/plain; charset=UTF-8' + piper, pipew = IO.pipe + res.body = piper + pipew << req.query['msg'] + pipew.close + end + + def do_largebody(req, res) + res['content-type'] = 'text/html' + res.body = "a" * 1000 * 1000 + end + + def do_compressed(req, res) + res['content-type'] = 'application/octet-stream' + if req.query['enc'] == 'gzip' + res['content-encoding'] = 'gzip' + res.body = GZIP_CONTENT + elsif req.query['enc'] == 'deflate' + res['content-encoding'] = 'deflate' + res.body = DEFLATE_CONTENT + elsif req.query['enc'] == 'deflate_noheader' + res['content-encoding'] = 'deflate' + res.body = DEFLATE_NOHEADER_CONTENT + end + end + + def do_charset(req, res) + if RUBY_VERSION > "1.9" + res.body = 'あいうえお'.encode("euc-jp") + res['Content-Type'] = 'text/plain; charset=euc-jp' + else + res.body = 'this endpoint is for 1.9 or later' + end + end + + def do_status(req, res) + res.status = req.query['status'].to_i + end + + def do_continue(req, res) + req.continue + res.body = 'done!' + end + + class TestServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def do_HEAD(req, res) + res["x-head"] = 'head' # use this for test purpose only. + res["x-query"] = query_response(req) + end + + def do_GET(req, res) + res.body = 'get' + res['x-header'] = req['X-Header'] + res["x-query"] = query_response(req) + end + + def do_POST(req, res) + res["content-type"] = "text/plain" # iso-8859-1, not US-ASCII + res.body = 'post,' + req.body.to_s + res["x-query"] = body_response(req) + res["x-request-query"] = req.query_string + end + + def do_PATCH(req, res) + res["x-query"] = body_response(req) + param = WEBrick::HTTPUtils.parse_query(req.body) || {} + res["x-size"] = (param['txt'] || '').size + res.body = param['txt'] || 'patch' + res["x-request-query"] = req.query_string + end + + def do_PUT(req, res) + res["x-query"] = body_response(req) + param = WEBrick::HTTPUtils.parse_query(req.body) || {} + res["x-size"] = (param['txt'] || '').size + res.body = param['txt'] || 'put' + res["x-request-query"] = req.query_string + end + + def do_DELETE(req, res) + res.body = 'delete' + res["x-query"] = body_response(req) + res["x-request-query"] = req.query_string + end + + def do_OPTIONS(req, res) + res.body = 'options' + res['x-header'] = req['X-Header'] + res['x-body'] = req.body + end + + def do_PROPFIND(req, res) + res.body = 'propfind' + end + + def do_PROPPATCH(req, res) + res.body = 'proppatch' + res["x-query"] = body_response(req) + end + + def do_TRACE(req, res) + # client SHOULD reflect the message received back to the client as the + # entity-body of a 200 (OK) response. [RFC2616] + res.body = 'trace' + res["x-query"] = query_response(req) + end + + private + + def query_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.query_string)) + end + + def body_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.body)) + end + + def query_escape(query) + escaped = [] + query.sort_by { |k, v| k }.collect do |k, v| + v.to_ary.each do |ve| + escaped << CGI.escape(k) + '=' + CGI.escape(ve) + end + end + escaped.join('&') + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_include_client.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_include_client.rb new file mode 100644 index 0000000..8cd05c4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_include_client.rb @@ -0,0 +1,52 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) + +require 'httpclient/include_client' +class TestIncludeClient < Test::Unit::TestCase + class Widget + extend HTTPClient::IncludeClient + + include_http_client("http://example.com") do |client| + client.cookie_manager = nil + client.agent_name = "iMonkey 4k" + end + end + + class OtherWidget + extend HTTPClient::IncludeClient + + include_http_client + include_http_client(:method_name => :other_http_client) + end + + class UnrelatedBlankClass ; end + + def test_client_class_level_singleton + assert_equal Widget.http_client.object_id, Widget.http_client.object_id + + assert_equal Widget.http_client.object_id, Widget.new.http_client.object_id + + assert_not_equal Widget.http_client.object_id, OtherWidget.http_client.object_id + end + + def test_configured + assert_equal Widget.http_client.agent_name, "iMonkey 4k" + assert_nil Widget.http_client.cookie_manager + assert_equal Widget.http_client.proxy.to_s, "http://example.com" + end + + def test_two_includes + assert_not_equal OtherWidget.http_client.object_id, OtherWidget.other_http_client.object_id + + assert_equal OtherWidget.other_http_client.object_id, OtherWidget.new.other_http_client.object_id + end + + # meta-programming gone wrong sometimes accidentally + # adds the class method to _everyone_, a mistake we've made before. + def test_not_infected_class_hieararchy + assert ! Class.respond_to?(:http_client) + assert ! UnrelatedBlankClass.respond_to?(:http_client) + end + + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_jsonclient.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_jsonclient.rb new file mode 100644 index 0000000..839aa54 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_jsonclient.rb @@ -0,0 +1,80 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) +require 'jsonclient' + + +class TestJSONClient < Test::Unit::TestCase + include Helper + + def setup + super + setup_server + @client = JSONClient.new + end + + def teardown + super + end + + def test_post + res = @client.post(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + # #previous contains the original response + assert_equal(1, JSON.parse(res.previous.content)['a']) + end + + def test_post_with_header + res = @client.post(serverurl + 'json', :header => {'X-foo' => 'bar'}, :body => {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_post_with_array_header + res = @client.post(serverurl + 'json', :header => [['X-foo', 'bar']], :body => {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_post_non_json_body + res = @client.post(serverurl + 'json', 'a=b&c=d') + assert_equal('a=b&c=d', res.content) + assert_equal('application/x-www-form-urlencoded', res.content_type) + end + + def test_put + res = @client.put(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_get_not_affected + res = @client.get(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal('', res.content) + assert_equal('', res.content_type) + end + + class JSONServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def service(req, res) + res['content-type'] = req['content-type'] + res.body = req.body + end + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + @server.mount('/json', JSONServlet.new(@server)) + @server_thread = start_server_thread(@server) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_ssl.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_ssl.rb new file mode 100644 index 0000000..2e634d7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_ssl.rb @@ -0,0 +1,559 @@ +require File.expand_path('helper', File.dirname(__FILE__)) +require 'webrick/https' + + +class TestSSL < Test::Unit::TestCase + include Helper + + DIR = File.dirname(File.expand_path(__FILE__)) + + def setup + super + @serverpid = @client = nil + @verify_callback_called = false + setup_server + setup_client + @url = "https://localhost:#{serverport}/hello" + end + + def teardown + super + end + + def path(filename) + File.expand_path(filename, DIR) + end + + def test_proxy_ssl + setup_proxyserver + escape_noproxy do + @client.proxy = proxyurl + @client.ssl_config.set_client_cert_file(path('client.cert'), path('client.key')) + @client.ssl_config.add_trust_ca(path('ca.cert')) + @client.ssl_config.add_trust_ca(path('subca.cert')) + @client.debug_dev = str = "" + assert_equal(200, @client.get(@url).status) + assert(/accept/ =~ @proxyio.string, 'proxy is not used') + assert(/Host: localhost:#{serverport}/ =~ str) + end + end + + def test_options + cfg = @client.ssl_config + assert_nil(cfg.client_cert) + assert_nil(cfg.client_key) + assert_nil(cfg.client_ca) + assert_equal(OpenSSL::SSL::VERIFY_PEER | OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT, cfg.verify_mode) + assert_nil(cfg.verify_callback) + assert_nil(cfg.timeout) + expected_options = OpenSSL::SSL::OP_ALL | OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3 + expected_options &= ~OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS if defined?(OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS) + expected_options |= OpenSSL::SSL::OP_NO_COMPRESSION if defined?(OpenSSL::SSL::OP_NO_COMPRESSION) + assert_equal(expected_options, cfg.options) + assert_equal("ALL:!aNULL:!eNULL:!SSLv2", cfg.ciphers) + assert_instance_of(OpenSSL::X509::Store, cfg.cert_store) + end + +unless defined?(HTTPClient::JRubySSLSocket) + # JRubySSLSocket does not support sync mode. + def test_sync + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client.key')) + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + + @client.socket_sync = false + @client.reset_all + assert_equal("hello", @client.get_content(@url)) + end +end + + def test_debug_dev + str = @client.debug_dev = '' + cfg = @client.ssl_config + cfg.client_cert = path("client.cert") + cfg.client_key = path("client.key") + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + assert(str.scan(/^hello$/)[0]) + end + + def test_verification_without_httpclient + raw_cert = "-----BEGIN CERTIFICATE-----\nMIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjEzNFoXDTE3MDgxMDE3MjEzNFowSzETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEZMBcGA1UEAwwQ\nUnVieSBjZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB\nAJCfsSXpSMpmZCVa+ZCM+QDgomnhDlvnrGDq6pasTaIspGTXgws+7r8Dt/cNe6EH\nHJpRH2cGRiO4yPcfcT9eS4X7k8OC4f33wHfACOmLu6LeoNE8ujmSk6L6WzLUI+sE\nnLZbFrXxoAo4XHsm8vEG9C+jEoXZ1p+47wrAGaDwDQTnzlMy4dT9pRQEJP2G/Rry\nUkuZn8SUWmh3/YS78iaSzsNF1cgE1ealHOrPPFDjiCGDaH/LHyUPYlbFSLZ/B7Qx\nLxi5sePLcywWq/EJrmWpgeVTDjtNijsdKv/A3qkY+fm/oD0pzt7XsfJaP9YKNyJO\nQFdxWZeiPcDF+Hwf+IwSr+kCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgeAMB0GA1Ud\nDgQWBBQNvzYzJyXemGhxbA8NMXLolDnPyjANBgkqhkiG9w0BAQsFAAOCAQEARIJV\noKejGlOTn71QutnNnu07UtTu0IHs6YqjYzzND+m4JXLN+wvYm72AFUG0b1L7dRg0\niK8XjQrlNQNVqP1Mc6tffchy20neOPOHeiO6qTdRU8P2S8D3Uwe+1qhgxjfE+cWc\nwZmWxYK4HA8c58PxWMqrkr2QqXDplG9KWLvOgrtPGiLLZcQSKhvvB63QzItHBDU6\nRayiJY3oPkK/HrIvFlySqFqzWmuyknkciOFywEHQMz/tcSFJ2QFpPj/tBz9VXohH\nZ8KscmfhZrTPBjo+ky1lz/WraWoz4LMiLnkC2ABczWLRSawu+v3Irx1NFJngt05e\npqwtqIUeg7j+JLiTaA==\n-----END CERTIFICATE-----" + raw_ca_cert = "-----BEGIN CERTIFICATE-----\nMIIDYjCCAkqgAwIBAgIBATANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjA1NFoXDTE4MDgxMDE3MjA1NFowQjETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEQMA4GA1UEAwwH\nUnVieSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALKGwyM3Ejtl\npo7CqaDlS71gDZn3gm6IwWpmRMLJofSI9LCwAbjijSC2HvO0xUWoYW40FbzjnnEi\ngszsWyPwuQIx9t0bhuAyllNIfImmkaQkrikXKBKzia4jPnbc4iXPnfjuThjESFWl\ntfbN6y1B5TjKhD1KelfakUO+iMu8WlIA9NKQZYfJ/F3QSpP5Iqb3KN/jVifFbDV8\nbAl3Ln4rT2kTCKrZZcl1jmWsJv8jBw6+P7hk0/Mu0JeHAITsjbNbpHd8UXpCfbVs\nsNGZrBU4uJdZ2YTG+Y27/t25jFNQwb+TWbvig7rfdX2sjssuxa00BBxarC08tIVj\nZprM37KcNn8CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\nAQYwHQYDVR0OBBYEFA2/NjMnJd6YaHFsDw0xcuiUOc/KMB8GA1UdIwQYMBYEFA2/\nNjMnJd6YaHFsDw0xcuiUOc/KMA0GCSqGSIb3DQEBCwUAA4IBAQAJSOw49XqvUll0\n3vU9EAO6yUdeZSsQENIfYbRMQgapbnN1vTyrUjPZkGC5hIE1pVdoHtEoUEICxIwy\nr6BKxiSLBDLp+rvIuDdzMkXIWdUVvTZguVRyKtM2gfnpsPLpVnv+stBmAW2SMyxm\nkymhOpkjdv3He+45uorB3tdfBS9VVomDEUJdg38UE1b5eXRQ3D6gG0iCPFzKszXg\nLoAYhGxtjCJaKlbzduMK0YO6aelgW1+XnVIKcA7DJ9egk5d/dFZBPFfwumwr9hTH\nh7/fp3Fr87weI+CkfmFyJZrsEBlXJBVuvPesMVHTh3Whm5kmCdWcBJU0QmSq42ZL\n72U0PXLR\n-----END CERTIFICATE-----" + ca_cert = ::OpenSSL::X509::Certificate.new(raw_ca_cert) + cert = ::OpenSSL::X509::Certificate.new(raw_cert) + store = ::OpenSSL::X509::Store.new + store.add_cert(ca_cert) + assert(store.verify(cert)) + end + + def test_verification + cfg = @client.ssl_config + cfg.verify_callback = method(:verify_callback).to_proc + begin + @verify_callback_called = false + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.client_cert = path("client.cert") + cfg.client_key = path("client.key") + @verify_callback_called = false + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.add_trust_ca(path('ca.cert')) + @verify_callback_called = false + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.add_trust_ca(path('subca.cert')) + @verify_callback_called = false + assert_equal("hello", @client.get_content(@url)) + assert(@verify_callback_called) + # +if false + # JRubySSLSocket does not support depth. + # Also on travis environment, verify_depth seems to not work properly. + cfg.verify_depth = 1 # 2 required: root-sub + @verify_callback_called = false + begin + @client.get(@url) + assert(false, "verify_depth is not supported? #{OpenSSL::OPENSSL_VERSION}") + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.verify_depth = 2 # 2 required: root-sub + @verify_callback_called = false + @client.get(@url) + assert(@verify_callback_called) + # +end + cfg.verify_depth = nil + cfg.cert_store = OpenSSL::X509::Store.new + cfg.verify_mode = OpenSSL::SSL::VERIFY_PEER + begin + @client.get_content(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + # + cfg.verify_mode = nil + assert_equal("hello", @client.get_content(@url)) + cfg.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_equal("hello", @client.get_content(@url)) + end + + def test_cert_store + cfg = @client.ssl_config + cfg.cert_store.add_cert(cert('ca.cert')) + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + # + cfg.cert_store.add_cert(cert('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + cfg.clear_cert_store + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + end + +if defined?(HTTPClient::JRubySSLSocket) + def test_ciphers + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client-pass.key'), 'pass4key') + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + cfg.timeout = 123 + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = [] + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/No appropriate protocol/, ssle.message) + end + # + cfg.ciphers = %w(TLS_RSA_WITH_AES_128_CBC_SHA) + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = HTTPClient::SSLConfig::CIPHERS_DEFAULT + assert_equal("hello", @client.get_content(@url)) + end + +else + + def test_ciphers + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client-pass.key'), 'pass4key') + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + cfg.timeout = 123 + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = "!ALL" + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/no cipher match/, ssle.message) + end + # + cfg.ciphers = "ALL" + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = "DEFAULT" + assert_equal("hello", @client.get_content(@url)) + end +end + + def test_set_default_paths + assert_raise(OpenSSL::SSL::SSLError) do + @client.get(@url) + end + escape_env do + ENV['SSL_CERT_FILE'] = File.join(DIR, 'ca-chain.pem') + @client.ssl_config.set_default_paths + @client.get(@url) + end + end + + def test_no_sslv3 + teardown_server + setup_server_with_ssl_version(:SSLv3) + assert_raise(OpenSSL::SSL::SSLError) do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + end + end + + def test_allow_tlsv1 + teardown_server + setup_server_with_ssl_version(:TLSv1) + assert_nothing_raised do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + end + end + + def test_use_higher_TLS + omit('TODO: it does not pass with Java 7 or old openssl ') + teardown_server + setup_server_with_ssl_version('TLSv1_2') + assert_nothing_raised do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + # TODO: should check JRubySSLSocket.ssl_socket.getSession.getProtocol + # but it's not thread safe. How can I return protocol version to the caller? + end + end + + VERIFY_TEST_CERT_LOCALHOST = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIIB9jCCAV+gAwIBAgIJAIH8Gsm4PcNKMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0xNjA4MTgxMDI2MDVaFw00NDAxMDMxMDI2MDVaMBQx +EjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +p7D8q0lcx5EZEV5+zPnQsxrbft5xyhH/MCStbH46DRATGPNSOaLRCG5r8gTKQzpD +4swGrQFYe2ienQ+7o4aEHErsXp4O/EmDKeiXWWrMqPr23r3HOBDebuynC/sCwy7N +epnX9u1VLB03eo+suj4d86OoOF+o11t9ZP+GA29Rsf8CAwEAAaNQME4wHQYDVR0O +BBYEFIxsJuPVvd5KKFcAvHGSeKSsWiUJMB8GA1UdIwQYMBaAFIxsJuPVvd5KKFcA +vHGSeKSsWiUJMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAMJaVCrrM +SM2I06Vr4BL+jtDFhZh3HmJFEDpwEFQ5Y9hduwdUGRBGCpkuea3fE2FKwWW9gLM1 +w7rFMzYFtCEqm78dJWIU79MRy0wjO4LgtYfoikgBh6JKWuV5ed/+L3sLyLG0ZTtv +lrD7lzDtXgwvj007PxDoYRp3JwYzKRmTbH8= +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_CERT_FOO_DOMAIN = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIIB8jCCAVugAwIBAgIJAL/od7Whx7VTMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV +BAMMB2Zvby5jb20wHhcNMTYwODE4MTAyMzUyWhcNNDQwMTAzMTAyMzUyWjASMRAw +DgYDVQQDDAdmb28uY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnsPyr +SVzHkRkRXn7M+dCzGtt+3nHKEf8wJK1sfjoNEBMY81I5otEIbmvyBMpDOkPizAat +AVh7aJ6dD7ujhoQcSuxeng78SYMp6JdZasyo+vbevcc4EN5u7KcL+wLDLs16mdf2 +7VUsHTd6j6y6Ph3zo6g4X6jXW31k/4YDb1Gx/wIDAQABo1AwTjAdBgNVHQ4EFgQU +jGwm49W93kooVwC8cZJ4pKxaJQkwHwYDVR0jBBgwFoAUjGwm49W93kooVwC8cZJ4 +pKxaJQkwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQCVKTvfxx+yezuR +5WpVKw1E9qabKOYFB5TqdHMHreRubMJTaoZC+YzhcCwtyLlAA9+axKINAiMM8T+z +jjfOHQSa2GS2TaaVDJWmXIgsAlEbjd2BEiQF0LZYGJRG9pyq0WbTV+CyFdrghjcO +xX/t7OG7NfOG9dhv3J+5SX10S5V5Dg== +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_CERT_ALT_NAME = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIICDDCCAXWgAwIBAgIJAOxXY4nOwxhGMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0xNjA4MTgxMDM0NTJaFw00NDAxMDMxMDM0NTJaMBQx +EjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +p7D8q0lcx5EZEV5+zPnQsxrbft5xyhH/MCStbH46DRATGPNSOaLRCG5r8gTKQzpD +4swGrQFYe2ienQ+7o4aEHErsXp4O/EmDKeiXWWrMqPr23r3HOBDebuynC/sCwy7N +epnX9u1VLB03eo+suj4d86OoOF+o11t9ZP+GA29Rsf8CAwEAAaNmMGQwFAYDVR0R +BA0wC4IJKi5mb28uY29tMB0GA1UdDgQWBBSMbCbj1b3eSihXALxxknikrFolCTAf +BgNVHSMEGDAWgBSMbCbj1b3eSihXALxxknikrFolCTAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4GBADJlKNFuOnsDIhHGW72HuQw4naN6lM3eZE9JJ+UF/XIF +ghGtgqw+00Yy5wMFc1K2Wm4p5NymmDfC/P1FOe34bpxt9/IWm6mEoIWoodC3N4Cm +PtnSS1/CRWzVIPGMglTGGDcUc70tfeAWgyTxgcNQd4vTFtnN0f0RDdaXa8kfKMTw +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_PKEY = OpenSSL::PKey::RSA.new(<<-EOS) +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCnsPyrSVzHkRkRXn7M+dCzGtt+3nHKEf8wJK1sfjoNEBMY81I5 +otEIbmvyBMpDOkPizAatAVh7aJ6dD7ujhoQcSuxeng78SYMp6JdZasyo+vbevcc4 +EN5u7KcL+wLDLs16mdf27VUsHTd6j6y6Ph3zo6g4X6jXW31k/4YDb1Gx/wIDAQAB +AoGAe0RHx+WKtQx8/96VmTl951qzxMPho2etTYd4kAsNwzJwx2N9qu57eBYrdWF+ +CQMYievucFhP4Y+bINtC1Eb6btz9TCUwjCfeIxfGRoFf3cxVmxlsRJJmN1kSZlu1 +yYlcMVuP4noeFIMQBRrt5pyLCx2Z9A01NCQT4Y6VoREBIeECQQDWeNhsL6xkrmdB +M9+zl+SqHdNKhgKwMdp74+UNnAV9I8GB7bGlOWhc83aqMLgS+JBDFXcmNF/KawTR +zcnkod5xAkEAyClFgr3lZQSnwUwoA/AOcyW0+H63taaaXS/g8n3H8ENK6kL4ldUx +IgCk2ekbQ5Y3S2WScIGXNxMOza9MlsOvbwJAPUtoPvMZB+U4KVBT/JXKijvf6QqH +tidpU8L78XnHr84KPcHa5WeUxgvmvBkUYoebYzC9TrPlNIqFZBi2PJtuYQJBAMda +E5j7eJT75fhm2RPS6xFT5MH5sw6AOA3HucrJ63AoFVzsBpl0E9NBwO4ndLgDzF6T +cx4Kc4iuunewuB8QFpECQQCfvsHCjIJ/X4kiqeBzxDq2GR/oDgQkOzY+4H9U7Lwl +e61RBaxk5OHOA0bLtvJblV6NL72ZEZhX60wAWbrOPhpT +-----END RSA PRIVATE KEY----- + EOS + + def test_post_connection_check + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_LOCALHOST, VERIFY_TEST_PKEY) + file = Tempfile.new('cert') + File.write(file.path, VERIFY_TEST_CERT_LOCALHOST.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_FOO_DOMAIN, VERIFY_TEST_PKEY) + File.write(file.path, VERIFY_TEST_CERT_FOO_DOMAIN.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_raises(OpenSSL::SSL::SSLError) do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_ALT_NAME, VERIFY_TEST_PKEY) + File.write(file.path, VERIFY_TEST_CERT_ALT_NAME.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_raises(OpenSSL::SSL::SSLError) do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + + def test_x509_store_add_cert_prepend + store = OpenSSL::X509::Store.new + assert_equal(store, store.add_cert(OpenSSL::X509::Certificate.new(VERIFY_TEST_CERT_LOCALHOST))) + end + + def test_tcp_keepalive + @client.tcp_keepalive = true + @client.ssl_config.add_trust_ca(path('ca-chain.pem')) + @client.get_content(@url) + + # expecting HTTP keepalive caches the socket + session = @client.instance_variable_get(:@session_manager).send(:get_cached_session, HTTPClient::Site.new(URI.parse(@url))) + socket = session.instance_variable_get(:@socket).instance_variable_get(:@socket) + + assert_true(session.tcp_keepalive) + if RUBY_ENGINE == 'jruby' + assert_true(socket.getKeepAlive()) + else + assert_equal(Socket::SO_KEEPALIVE, socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE).optname) + end + end + + def test_timeout + url = "https://localhost:#{serverport}/" + @client.ssl_config.add_trust_ca(path('ca-chain.pem')) + assert_equal('sleep', @client.get_content(url + 'sleep?sec=2')) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('sleep', @client.get_content(url + 'sleep?sec=0')) + + start = Time.now + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(url + 'sleep?sec=5') + end + if Time.now - start > 3 + # before #342 it detected timeout when IO was freed + fail 'timeout does not work' + end + + @client.receive_timeout = 3 + @client.reset_all + assert_equal('sleep', @client.get_content(url + 'sleep?sec=2')) + end + +private + + def cert(filename) + OpenSSL::X509::Certificate.new(File.read(File.join(DIR, filename))) + end + + def key(filename) + OpenSSL::PKey::RSA.new(File.read(File.join(DIR, filename))) + end + + def q(str) + %Q["#{str}"] + end + + def setup_server + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key'), + :SSLVerifyClient => nil, #OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT|OpenSSL::SSL::VERIFY_PEER, + :SSLClientCA => cert('ca.cert'), + :SSLCertName => nil + ) + @serverport = @server.config[:Port] + [:hello, :sleep].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def setup_server_with_ssl_version(ssl_version) + # JRubyOpenSSL does not support "TLSv1_2" as an known version, and some JCE provides TLS v1.2 as "TLSv1.2" not "TLSv1_2" + if RUBY_ENGINE == 'jruby' && ['TLSv1_1', 'TLSv1_2'].include?(ssl_version) + ssl_version = ssl_version.tr('_', '.') + end + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key') + ) + @server.ssl_context.ssl_version = ssl_version + @serverport = @server.config[:Port] + [:hello].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def setup_server_with_server_cert(ca_cert, server_cert, server_key) + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => ca_cert, + :SSLCertificate => server_cert, + :SSLPrivateKey => server_key, + :SSLVerifyClient => nil, + :SSLClientCA => nil, + :SSLCertName => nil + ) + @serverport = @server.config[:Port] + [:hello].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "sleep" + end + + def start_server_thread(server) + t = Thread.new { + Thread.current.abort_on_exception = true + server.start + } + while server.status != :Running + sleep 0.1 + unless t.alive? + t.join + raise + end + end + t + end + + def verify_callback(ok, cert) + @verify_callback_called = true + p ["client", ok, cert] if $DEBUG + ok + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb new file mode 100644 index 0000000..29a415b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb @@ -0,0 +1,465 @@ +require 'test/unit' +require 'uri' +require 'tempfile' + +# This testcase is located for reference, not for running. +if false + +require 'httpclient/webagent-cookie' + +class TestCookie < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @c = WebAgent::Cookie.new() + end + + def test_s_new() + assert_instance_of(WebAgent::Cookie, @c) + end + + def test_discard? + assert_equal(false, !!(@c.discard?)) + @c.discard = true + assert_equal(true, !!(@c.discard?)) + end + + def test_match() + url = urify('http://www.rubycolor.org/hoge/funi/#919191') + + @c.domain = 'www.rubycolor.org' + assert_equal(true, @c.match?(url)) + + @c.domain = '.rubycolor.org' + assert_equal(true, @c.match?(url)) + + @c.domain = 'aaa.www.rubycolor.org' + assert_equal(false, @c.match?(url)) + + @c.domain = 'aaa.www.rubycolor.org' + assert_equal(false, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/' + assert_equal(true, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + assert_equal(true, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge/hoge' + assert_equal(false, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = true + assert_equal(false, @c.match?(url)) + + url2 = urify('https://www.rubycolor.org/hoge/funi/#919191') + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = true + assert_equal(true, @c.match?(url2)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = nil + assert_equal(true, @c.match?(url2)) ## not false! + + url.port = 80 + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' +# @c.port = [80,8080] + assert_equal(true, @c.match?(url)) + + url_nopath = URI.parse('http://www.rubycolor.org') + @c.domain = 'www.rubycolor.org' + @c.path = '/' + assert_equal(true, @c.match?(url_nopath)) + + end + + def test_head_match?() + assert_equal(true, @c.head_match?("","")) + assert_equal(false, @c.head_match?("a","")) + assert_equal(true, @c.head_match?("","a")) + assert_equal(true, @c.head_match?("abcde","abcde")) + assert_equal(true, @c.head_match?("abcde","abcdef")) + assert_equal(false, @c.head_match?("abcdef","abcde")) + assert_equal(false, @c.head_match?("abcde","bcde")) + assert_equal(false, @c.head_match?("bcde","abcde")) + end + + def test_tail_match?() + assert_equal(true, @c.tail_match?("","")) + assert_equal(false, @c.tail_match?("a","")) + assert_equal(true, @c.tail_match?("","a")) + assert_equal(true, @c.tail_match?("abcde","abcde")) + assert_equal(false, @c.tail_match?("abcde","abcdef")) + assert_equal(false, @c.tail_match?("abcdef","abcde")) + assert_equal(false, @c.tail_match?("abcde","bcde")) + assert_equal(true, @c.tail_match?("bcde","abcde")) + end + + + def test_domain_match() + extend WebAgent::CookieUtils + assert_equal(true, !!domain_match("hoge.co.jp",".")) +# assert_equal(true, !!domain_match("locahost",".local")) + assert_equal(true, !!domain_match("192.168.10.1","192.168.10.1")) + assert_equal(false, !!domain_match("192.168.10.1","192.168.10.2")) +# assert_equal(false, !!domain_match("hoge.co.jp",".hoge.co.jp")) + # allows; host == rubyforge.org, domain == .rubyforge.org + assert_equal(true, !!domain_match("hoge.co.jp",".hoge.co.jp")) + assert_equal(true, !!domain_match("www.hoge.co.jp", "www.hoge.co.jp")) + assert_equal(false, !!domain_match("www.hoge.co.jp", "www2.hoge.co.jp")) + assert_equal(true, !!domain_match("www.hoge.co.jp", ".hoge.co.jp")) + assert_equal(true, !!domain_match("www.aa.hoge.co.jp", ".hoge.co.jp")) + assert_equal(false, !!domain_match("www.hoge.co.jp", "hoge.co.jp")) + end + + def test_join_quotedstr() + arr1 = ['hoge=funi', 'hoge2=funi2'] + assert_equal(arr1, @c.instance_eval{join_quotedstr(arr1,';')}) + arr2 = ['hoge="fu', 'ni"', 'funi=funi'] + assert_equal(['hoge="fu;ni"','funi=funi'], + @c.instance_eval{join_quotedstr(arr2,';')}) + arr3 = ['hoge="funi";hoge2="fu','ni2";hoge3="hoge"', 'funi="funi"'] + assert_equal(['hoge="funi";hoge2="fu,ni2";hoge3="hoge"', 'funi="funi"'], + @c.instance_eval{join_quotedstr(arr3,',')}) + end + +end + +class TestCookieManager < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @cm = WebAgent::CookieManager.new() + end + + def teardown() + end + + def test_parse() + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2010 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2010, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse2() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + + def test_parse3() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT;Secure;HTTPOnly" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + assert_equal(true, cookie.secure?) + assert_equal(true, cookie.http_only?) + end + + def test_parse_double_semicolon() + str = "xmen=off,0,0,1;; path=\"/;;\"; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/;;", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + +# def test_make_portlist() +# assert_equal([80,8080], @cm.instance_eval{make_portlist("80,8080")}) +# assert_equal([80], @cm.instance_eval{make_portlist("80")}) +# assert_equal([80,8080,10080], @cm.instance_eval{make_portlist(" 80, 8080, 10080 \n")}) +# end + + def test_check_expired_cookies() + c1 = WebAgent::Cookie.new() + c2 = c1.dup + c3 = c1.dup + c4 = c1.dup + c1.expires = Time.now - 100 + c2.expires = Time.now + 100 + c3.expires = Time.now - 10 + c4.expires = nil + cookies = [c1,c2,c3,c4] + @cm.cookies = cookies + @cm.check_expired_cookies() + # expires == nil cookies (session cookie) exists. + assert_equal([c2,c4], @cm.cookies) + end + + def test_parse_expires + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=\"\"" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse_after_expiration + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2010 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2010, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + + time = Time.at(Time.now.to_i + 60).utc + expires = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT") + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=#{expires}; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(time, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_find_cookie() + str = "xmen=off,0,0,1; path=/; domain=.excite2.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite2.co.jp/")) + + str = "xmen=off,0,0,2; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite.co.jp/")) + + @cm.cookies[0].use = true + @cm.cookies[1].use = true + + url = urify('http://www.excite.co.jp/hoge/funi/') + cookie_str = @cm.find(url) + assert_equal("xmen=off,0,0,2", cookie_str) + end + + def test_load_cookies() + begin + File.open("tmp_test.tmp","w") {|f| + f.write < same as URL + c.url = urify("http://www.inac.co.jp/hoge/hoge2/hoge3") + @cm.add(c) + # + c1, c2 = @cm.cookies + assert_equal('', c1.path) + assert_equal('/hoge/hoge2', c2.path) + end + + def test_check_cookie_accept_domain() + @cm.accept_domains = [".example1.co.jp", "www1.example.jp"] + @cm.reject_domains = [".example2.co.jp", "www2.example.jp"] + check1 = @cm.check_cookie_accept_domain("www.example1.co.jp") + assert_equal(true, check1) + check2 = @cm.check_cookie_accept_domain("www.example2.co.jp") + assert_equal(false, check2) + check3 = @cm.check_cookie_accept_domain("www1.example.jp") + assert_equal(true, check3) + check4 = @cm.check_cookie_accept_domain("www2.example.jp") + assert_equal(false, check4) + check5 = @cm.check_cookie_accept_domain("aa.www2.example.jp") + assert_equal(true, check5) + check6 = @cm.check_cookie_accept_domain("aa.www2.example.jp") + assert_equal(true, check6) + assert_equal(false, @cm.check_cookie_accept_domain(nil)) + end + + def test_escaped + uri = urify('http://www.example.org') + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal(nil, c.value) + assert_equal('bar=', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal(nil, c.value) + assert_equal('bar=', @cm.find(uri)) + end + + def test_load_cookies_escaped + uri = urify('http://example.org/') + f = Tempfile.new('test_cookie') + File.open(f.path, 'w') do |out| + out.write < "Dies ist ein Test" +``` + +## Features + +* Translation and localization +* Interpolation of values to translations +* Pluralization (CLDR compatible) +* Customizable transliteration to ASCII +* Flexible defaults +* Bulk lookup +* Lambdas as translation data +* Custom key/scope separator +* Custom exception handlers +* Extensible architecture with a swappable backend + +## Pluggable Features + +* Cache +* Pluralization: lambda pluralizers stored as translation data +* Locale fallbacks, RFC4647 compliant (optionally: RFC4646 locale validation) +* [Gettext support](https://github.com/ruby-i18n/i18n/wiki/Gettext) +* Translation metadata + +## Alternative Backend + +* Chain +* ActiveRecord (optionally: ActiveRecord::Missing and ActiveRecord::StoreProcs) +* KeyValue (uses active_support/json and cannot store procs) + +For more information and lots of resources see [the 'Resources' page on the wiki](https://github.com/ruby-i18n/i18n/wiki/Resources). + +## Tests + +You can run tests both with + +* `rake test` or just `rake` +* run any test file directly, e.g. `ruby -Ilib:test test/api/simple_test.rb` + +You can run all tests against all Gemfiles with + +* `ruby test/run_all.rb` + +The structure of the test suite is a bit unusual as it uses modules to reuse +particular tests in different test cases. + +The reason for this is that we need to enforce the I18n API across various +combinations of extensions. E.g. the Simple backend alone needs to support +the same API as any combination of feature and/or optimization modules included +to the Simple backend. We test this by reusing the same API definition (implemented +as test methods) in test cases with different setups. + +You can find the test cases that enforce the API in test/api. And you can find +the API definition test methods in test/api/tests. + +All other test cases (e.g. as defined in test/backend, test/core_ext) etc. +follow the usual test setup and should be easy to grok. + +## More Documentation + +Additional documentation can be found here: https://github.com/ruby-i18n/i18n/wiki + +## Contributors + +* @radar +* @carlosantoniodasilva +* @josevalim +* @knapo +* @tigrish +* [and many more](https://github.com/ruby-i18n/i18n/graphs/contributors) + +## License + +MIT License. See the included MIT-LICENSE file. diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n.rb new file mode 100644 index 0000000..ba29a1e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n.rb @@ -0,0 +1,414 @@ +# frozen_string_literal: true + +require 'concurrent/map' + +require 'i18n/version' +require 'i18n/exceptions' +require 'i18n/interpolate/ruby' + +module I18n + autoload :Backend, 'i18n/backend' + autoload :Config, 'i18n/config' + autoload :Gettext, 'i18n/gettext' + autoload :Locale, 'i18n/locale' + autoload :Tests, 'i18n/tests' + autoload :Middleware, 'i18n/middleware' + + RESERVED_KEYS = %i[ + cascade + deep_interpolation + default + exception_handler + fallback + fallback_in_progress + format + object + raise + resolve + scope + separator + throw + ].freeze + RESERVED_KEYS_PATTERN = /%\{(#{RESERVED_KEYS.join("|")})\}/ + EMPTY_HASH = {}.freeze + + def self.new_double_nested_cache # :nodoc: + Concurrent::Map.new { |h,k| h[k] = Concurrent::Map.new } + end + + module Base + # Gets I18n configuration object. + def config + Thread.current[:i18n_config] ||= I18n::Config.new + end + + # Sets I18n configuration object. + def config=(value) + Thread.current[:i18n_config] = value + end + + # Write methods which delegates to the configuration object + %w(locale backend default_locale available_locales default_separator + exception_handler load_path enforce_available_locales).each do |method| + module_eval <<-DELEGATORS, __FILE__, __LINE__ + 1 + def #{method} + config.#{method} + end + + def #{method}=(value) + config.#{method} = (value) + end + DELEGATORS + end + + # Tells the backend to reload translations. Used in situations like the + # Rails development environment. Backends can implement whatever strategy + # is useful. + def reload! + config.clear_available_locales_set + config.backend.reload! + end + + # Tells the backend to load translations now. Used in situations like the + # Rails production environment. Backends can implement whatever strategy + # is useful. + def eager_load! + config.backend.eager_load! + end + + # Translates, pluralizes and interpolates a given key using a given locale, + # scope, and default, as well as interpolation values. + # + # *LOOKUP* + # + # Translation data is organized as a nested hash using the upper-level keys + # as namespaces. E.g., ActionView ships with the translation: + # :date => {:formats => {:short => "%b %d"}}. + # + # Translations can be looked up at any level of this hash using the key argument + # and the scope option. E.g., in this example I18n.t :date + # returns the whole translations hash {:formats => {:short => "%b %d"}}. + # + # Key can be either a single key or a dot-separated key (both Strings and Symbols + # work). E.g., the short format can be looked up using both: + # I18n.t 'date.formats.short' + # I18n.t :'date.formats.short' + # + # Scope can be either a single key, a dot-separated key or an array of keys + # or dot-separated keys. Keys and scopes can be combined freely. So these + # examples will all look up the same short date format: + # I18n.t 'date.formats.short' + # I18n.t 'formats.short', :scope => 'date' + # I18n.t 'short', :scope => 'date.formats' + # I18n.t 'short', :scope => %w(date formats) + # + # *INTERPOLATION* + # + # Translations can contain interpolation variables which will be replaced by + # values passed to #translate as part of the options hash, with the keys matching + # the interpolation variable names. + # + # E.g., with a translation :foo => "foo %{bar}" the option + # value for the key +bar+ will be interpolated into the translation: + # I18n.t :foo, :bar => 'baz' # => 'foo baz' + # + # *PLURALIZATION* + # + # Translation data can contain pluralized translations. Pluralized translations + # are arrays of singular/plural versions of translations like ['Foo', 'Foos']. + # + # Note that I18n::Backend::Simple only supports an algorithm for English + # pluralization rules. Other algorithms can be supported by custom backends. + # + # This returns the singular version of a pluralized translation: + # I18n.t :foo, :count => 1 # => 'Foo' + # + # These both return the plural version of a pluralized translation: + # I18n.t :foo, :count => 0 # => 'Foos' + # I18n.t :foo, :count => 2 # => 'Foos' + # + # The :count option can be used both for pluralization and interpolation. + # E.g., with the translation + # :foo => ['%{count} foo', '%{count} foos'], count will + # be interpolated to the pluralized translation: + # I18n.t :foo, :count => 1 # => '1 foo' + # + # *DEFAULTS* + # + # This returns the translation for :foo or default if no translation was found: + # I18n.t :foo, :default => 'default' + # + # This returns the translation for :foo or the translation for :bar if no + # translation for :foo was found: + # I18n.t :foo, :default => :bar + # + # Returns the translation for :foo or the translation for :bar + # or default if no translations for :foo and :bar were found. + # I18n.t :foo, :default => [:bar, 'default'] + # + # *BULK LOOKUP* + # + # This returns an array with the translations for :foo and :bar. + # I18n.t [:foo, :bar] + # + # Can be used with dot-separated nested keys: + # I18n.t [:'baz.foo', :'baz.bar'] + # + # Which is the same as using a scope option: + # I18n.t [:foo, :bar], :scope => :baz + # + # *LAMBDAS* + # + # Both translations and defaults can be given as Ruby lambdas. Lambdas will be + # called and passed the key and options. + # + # E.g. assuming the key :salutation resolves to: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. #{options[:name]}" : "Mrs. #{options[:name]}" } + # + # Then I18n.t(:salutation, :gender => 'w', :name => 'Smith') will result in "Mrs. Smith". + # + # Note that the string returned by lambda will go through string interpolation too, + # so the following lambda would give the same result: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. %{name}" : "Mrs. %{name}" } + # + # It is recommended to use/implement lambdas in an "idempotent" way. E.g. when + # a cache layer is put in front of I18n.translate it will generate a cache key + # from the argument values passed to #translate. Therefore your lambdas should + # always return the same translations/values per unique combination of argument + # values. + # + # *Ruby 2.7+ keyword arguments warning* + # + # This method uses keyword arguments. + # There is a breaking change in ruby that produces warning with ruby 2.7 and won't work as expected with ruby 3.0 + # The "hash" parameter must be passed as keyword argument. + # + # Good: + # I18n.t(:salutation, :gender => 'w', :name => 'Smith') + # I18n.t(:salutation, **{ :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, **any_hash) + # + # Bad: + # I18n.t(:salutation, { :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, any_hash) + # + def translate(key = nil, *, throw: false, raise: false, locale: nil, **options) # TODO deprecate :raise + locale ||= config.locale + raise Disabled.new('t') if locale == false + enforce_available_locales!(locale) + + backend = config.backend + + result = catch(:exception) do + if key.is_a?(Array) + key.map { |k| backend.translate(locale, k, options) } + else + backend.translate(locale, key, options) + end + end + + if result.is_a?(MissingTranslation) + handle_exception((throw && :throw || raise && :raise), result, locale, key, options) + else + result + end + end + alias :t :translate + + # Wrapper for translate that adds :raise => true. With + # this option, if no translation is found, it will raise I18n::MissingTranslationData + def translate!(key, options = EMPTY_HASH) + translate(key, **options.merge(:raise => true)) + end + alias :t! :translate! + + # Returns true if a translation exists for a given key, otherwise returns false. + def exists?(key, _locale = nil, locale: _locale, **options) + locale ||= config.locale + raise Disabled.new('exists?') if locale == false + raise I18n::ArgumentError if key.is_a?(String) && key.empty? + config.backend.exists?(locale, key, options) + end + + # Transliterates UTF-8 characters to ASCII. By default this method will + # transliterate only Latin strings to an ASCII approximation: + # + # I18n.transliterate("Ærøskøbing") + # # => "AEroskobing" + # + # I18n.transliterate("日本語") + # # => "???" + # + # It's also possible to add support for per-locale transliterations. I18n + # expects transliteration rules to be stored at + # i18n.transliterate.rule. + # + # Transliteration rules can either be a Hash or a Proc. Procs must accept a + # single string argument. Hash rules inherit the default transliteration + # rules, while Procs do not. + # + # *Examples* + # + # Setting a Hash in .yml: + # + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # Setting a Hash using Ruby: + # + # store_translations(:de, :i18n => { + # :transliterate => { + # :rule => { + # "ü" => "ue", + # "ö" => "oe" + # } + # } + # ) + # + # Setting a Proc: + # + # translit = lambda {|string| MyTransliterator.transliterate(string) } + # store_translations(:xx, :i18n => {:transliterate => {:rule => translit}) + # + # Transliterating strings: + # + # I18n.locale = :en + # I18n.transliterate("Jürgen") # => "Jurgen" + # I18n.locale = :de + # I18n.transliterate("Jürgen") # => "Juergen" + # I18n.transliterate("Jürgen", :locale => :en) # => "Jurgen" + # I18n.transliterate("Jürgen", :locale => :de) # => "Juergen" + def transliterate(key, *, throw: false, raise: false, locale: nil, replacement: nil, **options) + locale ||= config.locale + raise Disabled.new('transliterate') if locale == false + enforce_available_locales!(locale) + + config.backend.transliterate(locale, key, replacement) + rescue I18n::ArgumentError => exception + handle_exception((throw && :throw || raise && :raise), exception, locale, key, options) + end + + # Localizes certain objects, such as dates and numbers to local formatting. + def localize(object, locale: nil, format: nil, **options) + locale ||= config.locale + raise Disabled.new('l') if locale == false + enforce_available_locales!(locale) + + format ||= :default + config.backend.localize(locale, object, format, options) + end + alias :l :localize + + # Executes block with given I18n.locale set. + def with_locale(tmp_locale = nil) + if tmp_locale == nil + yield + else + current_locale = self.locale + self.locale = tmp_locale + begin + yield + ensure + self.locale = current_locale + end + end + end + + # Merges the given locale, key and scope into a single array of keys. + # Splits keys that contain dots into multiple keys. Makes sure all + # keys are Symbols. + def normalize_keys(locale, key, scope, separator = nil) + separator ||= I18n.default_separator + + keys = [] + keys.concat normalize_key(locale, separator) + keys.concat normalize_key(scope, separator) + keys.concat normalize_key(key, separator) + keys + end + + # Returns true when the passed locale, which can be either a String or a + # Symbol, is in the list of available locales. Returns false otherwise. + def locale_available?(locale) + I18n.config.available_locales_set.include?(locale) + end + + # Raises an InvalidLocale exception when the passed locale is not available. + def enforce_available_locales!(locale) + if locale != false && config.enforce_available_locales + raise I18n::InvalidLocale.new(locale) if !locale_available?(locale) + end + end + + def available_locales_initialized? + config.available_locales_initialized? + end + + private + + # Any exceptions thrown in translate will be sent to the @@exception_handler + # which can be a Symbol, a Proc or any other Object unless they're forced to + # be raised or thrown (MissingTranslation). + # + # If exception_handler is a Symbol then it will simply be sent to I18n as + # a method call. A Proc will simply be called. In any other case the + # method #call will be called on the exception_handler object. + # + # Examples: + # + # I18n.exception_handler = :custom_exception_handler # this is the default + # I18n.custom_exception_handler(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = lambda { |*args| ... } # a lambda + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = I18nExceptionHandler.new # an object + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + def handle_exception(handling, exception, locale, key, options) + case handling + when :raise + raise exception.respond_to?(:to_exception) ? exception.to_exception : exception + when :throw + throw :exception, exception + else + case handler = options[:exception_handler] || config.exception_handler + when Symbol + send(handler, exception, locale, key, options) + else + handler.call(exception, locale, key, options) + end + end + end + + @@normalized_key_cache = I18n.new_double_nested_cache + + def normalize_key(key, separator) + @@normalized_key_cache[separator][key] ||= + case key + when Array + key.flat_map { |k| normalize_key(k, separator) } + else + keys = key.to_s.split(separator) + keys.delete('') + keys.map! do |k| + case k + when /\A[-+]?[1-9]\d*\z/ # integer + k.to_i + when 'true' + true + when 'false' + false + else + k.to_sym + end + end + keys + end + end + end + + extend Base +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend.rb new file mode 100644 index 0000000..453ed71 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module I18n + module Backend + autoload :Base, 'i18n/backend/base' + autoload :InterpolationCompiler, 'i18n/backend/interpolation_compiler' + autoload :Cache, 'i18n/backend/cache' + autoload :CacheFile, 'i18n/backend/cache_file' + autoload :Cascade, 'i18n/backend/cascade' + autoload :Chain, 'i18n/backend/chain' + autoload :Fallbacks, 'i18n/backend/fallbacks' + autoload :Flatten, 'i18n/backend/flatten' + autoload :Gettext, 'i18n/backend/gettext' + autoload :KeyValue, 'i18n/backend/key_value' + autoload :Memoize, 'i18n/backend/memoize' + autoload :Metadata, 'i18n/backend/metadata' + autoload :Pluralization, 'i18n/backend/pluralization' + autoload :Simple, 'i18n/backend/simple' + autoload :Transliterator, 'i18n/backend/transliterator' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb new file mode 100644 index 0000000..b04a259 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb @@ -0,0 +1,285 @@ +# frozen_string_literal: true + +require 'yaml' +require 'json' +require 'i18n/core_ext/hash' + +module I18n + module Backend + module Base + using I18n::HashRefinements + include I18n::Backend::Transliterator + + # Accepts a list of paths to translation files. Loads translations from + # plain Ruby (*.rb), YAML files (*.yml), or JSON files (*.json). See #load_rb, #load_yml, and #load_json + # for details. + def load_translations(*filenames) + filenames = I18n.load_path if filenames.empty? + filenames.flatten.each { |filename| load_file(filename) } + end + + # This method receives a locale, a data hash and options for storing translations. + # Should be implemented + def store_translations(locale, data, options = EMPTY_HASH) + raise NotImplementedError + end + + def translate(locale, key, options = EMPTY_HASH) + raise I18n::ArgumentError if (key.is_a?(String) || key.is_a?(Symbol)) && key.empty? + raise InvalidLocale.new(locale) unless locale + return nil if key.nil? && !options.key?(:default) + + entry = lookup(locale, key, options[:scope], options) unless key.nil? + + if entry.nil? && options.key?(:default) + entry = default(locale, key, options[:default], options) + else + entry = resolve(locale, key, entry, options) + end + + count = options[:count] + + if entry.nil? && (subtrees? || !count) + if (options.key?(:default) && !options[:default].nil?) || !options.key?(:default) + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + end + + entry = entry.dup if entry.is_a?(String) + entry = pluralize(locale, entry, count) if count + + if entry.nil? && !subtrees? + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + deep_interpolation = options[:deep_interpolation] + values = options.except(*RESERVED_KEYS) + if values + entry = if deep_interpolation + deep_interpolate(locale, entry, values) + else + interpolate(locale, entry, values) + end + end + entry + end + + def exists?(locale, key, options = EMPTY_HASH) + lookup(locale, key) != nil + end + + # Acts the same as +strftime+, but uses a localized version of the + # format string. Takes a key from the date/time formats translations as + # a format argument (e.g., :short in :'date.formats'). + def localize(locale, object, format = :default, options = EMPTY_HASH) + if object.nil? && options.include?(:default) + return options[:default] + end + raise ArgumentError, "Object must be a Date, DateTime or Time object. #{object.inspect} given." unless object.respond_to?(:strftime) + + if Symbol === format + key = format + type = object.respond_to?(:sec) ? 'time' : 'date' + options = options.merge(:raise => true, :object => object, :locale => locale) + format = I18n.t(:"#{type}.formats.#{key}", **options) + end + + format = translate_localization_format(locale, object, format, options) + object.strftime(format) + end + + # Returns an array of locales for which translations are available + # ignoring the reserved translation meta data key :i18n. + def available_locales + raise NotImplementedError + end + + def reload! + eager_load! if eager_loaded? + end + + def eager_load! + @eager_loaded = true + end + + protected + + def eager_loaded? + @eager_loaded ||= false + end + + # The method which actually looks up for the translation in the store. + def lookup(locale, key, scope = [], options = EMPTY_HASH) + raise NotImplementedError + end + + def subtrees? + true + end + + # Evaluates defaults. + # If given subject is an Array, it walks the array and returns the + # first translation that can be resolved. Otherwise it tries to resolve + # the translation directly. + def default(locale, object, subject, options = EMPTY_HASH) + options = options.reject { |key, value| key == :default } + case subject + when Array + subject.each do |item| + result = resolve(locale, object, item, options) + return result unless result.nil? + end and nil + else + resolve(locale, object, subject, options) + end + end + + # Resolves a translation. + # If the given subject is a Symbol, it will be translated with the + # given options. If it is a Proc then it will be evaluated. All other + # subjects will be returned directly. + def resolve(locale, object, subject, options = EMPTY_HASH) + return subject if options[:resolve] == false + result = catch(:exception) do + case subject + when Symbol + I18n.translate(subject, **options.merge(:locale => locale, :throw => true)) + when Proc + date_or_time = options.delete(:object) || object + resolve(locale, object, subject.call(date_or_time, **options)) + else + subject + end + end + result unless result.is_a?(MissingTranslation) + end + + # Picks a translation from a pluralized mnemonic subkey according to English + # pluralization rules : + # - It will pick the :one subkey if count is equal to 1. + # - It will pick the :other subkey otherwise. + # - It will pick the :zero subkey in the special case where count is + # equal to 0 and there is a :zero subkey present. This behaviour is + # not standard with regards to the CLDR pluralization rules. + # Other backends can implement more flexible or complex pluralization rules. + def pluralize(locale, entry, count) + entry = entry.reject { |k, _v| k == :attributes } if entry.is_a?(Hash) + return entry unless entry.is_a?(Hash) && count && entry.values.none? { |v| v.is_a?(Hash) } + + key = pluralization_key(entry, count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + end + + # Interpolates values into a given subject. + # + # if the given subject is a string then: + # method interpolates "file %{file} opened by %%{user}", :file => 'test.txt', :user => 'Mr. X' + # # => "file test.txt opened by %{user}" + # + # if the given subject is an array then: + # each element of the array is recursively interpolated (until it finds a string) + # method interpolates ["yes, %{user}", ["maybe no, %{user}, "no, %{user}"]], :user => "bartuz" + # # => "["yes, bartuz",["maybe no, bartuz", "no, bartuz"]]" + def interpolate(locale, subject, values = EMPTY_HASH) + return subject if values.empty? + + case subject + when ::String then I18n.interpolate(subject, values) + when ::Array then subject.map { |element| interpolate(locale, element, values) } + else + subject + end + end + + # Deep interpolation + # + # deep_interpolate { people: { ann: "Ann is %{ann}", john: "John is %{john}" } }, + # ann: 'good', john: 'big' + # #=> { people: { ann: "Ann is good", john: "John is big" } } + def deep_interpolate(locale, data, values = EMPTY_HASH) + return data if values.empty? + + case data + when ::String + I18n.interpolate(data, values) + when ::Hash + data.each_with_object({}) do |(k, v), result| + result[k] = deep_interpolate(locale, v, values) + end + when ::Array + data.map do |v| + deep_interpolate(locale, v, values) + end + else + data + end + end + + # Loads a single translations file by delegating to #load_rb or + # #load_yml depending on the file extension and directly merges the + # data to the existing translations. Raises I18n::UnknownFileType + # for all other file extensions. + def load_file(filename) + type = File.extname(filename).tr('.', '').downcase + raise UnknownFileType.new(type, filename) unless respond_to?(:"load_#{type}", true) + data = send(:"load_#{type}", filename) + unless data.is_a?(Hash) + raise InvalidLocaleData.new(filename, 'expects it to return a hash, but does not') + end + data.each { |locale, d| store_translations(locale, d || {}) } + end + + # Loads a plain Ruby translations file. eval'ing the file must yield + # a Hash containing translation data with locales as toplevel keys. + def load_rb(filename) + eval(IO.read(filename), binding, filename) + end + + # Loads a YAML translations file. The data must have locales as + # toplevel keys. + def load_yml(filename) + begin + YAML.load_file(filename) + rescue TypeError, ScriptError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + alias_method :load_yaml, :load_yml + + # Loads a JSON translations file. The data must have locales as + # toplevel keys. + def load_json(filename) + begin + ::JSON.parse(File.read(filename)) + rescue TypeError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + + def translate_localization_format(locale, object, format, options) + format.to_s.gsub(/%(|\^)[aAbBpP]/) do |match| + case match + when '%a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday] + when '%^a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday].upcase + when '%A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday] + when '%^A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday].upcase + when '%b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon] + when '%^b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon].upcase + when '%B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon] + when '%^B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon].upcase + when '%p' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).upcase if object.respond_to? :hour + when '%P' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).downcase if object.respond_to? :hour + end + end + rescue MissingTranslationData => e + e.message + end + + def pluralization_key(entry, count) + key = :zero if count == 0 && entry.has_key?(:zero) + key ||= count == 1 ? :one : :other + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb new file mode 100644 index 0000000..41b58fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +# This module allows you to easily cache all responses from the backend - thus +# speeding up the I18n aspects of your application quite a bit. +# +# To enable caching you can simply include the Cache module to the Simple +# backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.send(:include, I18n::Backend::Cache) +# +# You will also need to set a cache store implementation that you want to use: +# +# I18n.cache_store = ActiveSupport::Cache.lookup_store(:memory_store) +# +# You can use any cache implementation you want that provides the same API as +# ActiveSupport::Cache (only the methods #fetch and #write are being used). +# +# The cache_key implementation by default assumes you pass values that return +# a valid key from #hash (see +# http://www.ruby-doc.org/core/classes/Object.html#M000337). However, you can +# configure your own digest method via which responds to #hexdigest (see +# http://ruby-doc.org/stdlib/libdoc/digest/rdoc/index.html): +# +# I18n.cache_key_digest = Digest::MD5.new +# +# If you use a lambda as a default value in your translation like this: +# +# I18n.t(:"date.order", :default => lambda {[:month, :day, :year]}) +# +# Then you will always have a cache miss, because each time this method +# is called the lambda will have a different hash value. If you know +# the result of the lambda is a constant as in the example above, then +# to cache this you can make the lambda a constant, like this: +# +# DEFAULT_DATE_ORDER = lambda {[:month, :day, :year]} +# ... +# I18n.t(:"date.order", :default => DEFAULT_DATE_ORDER) +# +# If the lambda may result in different values for each call then consider +# also using the Memoize backend. +# +module I18n + class << self + @@cache_store = nil + @@cache_namespace = nil + @@cache_key_digest = nil + + def cache_store + @@cache_store + end + + def cache_store=(store) + @@cache_store = store + end + + def cache_namespace + @@cache_namespace + end + + def cache_namespace=(namespace) + @@cache_namespace = namespace + end + + def cache_key_digest + @@cache_key_digest + end + + def cache_key_digest=(key_digest) + @@cache_key_digest = key_digest + end + + def perform_caching? + !cache_store.nil? + end + end + + module Backend + # TODO Should the cache be cleared if new translations are stored? + module Cache + def translate(locale, key, options = EMPTY_HASH) + I18n.perform_caching? ? fetch(cache_key(locale, key, options)) { super } : super + end + + protected + + def fetch(cache_key, &block) + result = _fetch(cache_key, &block) + throw(:exception, result) if result.is_a?(MissingTranslation) + result = result.dup if result.frozen? rescue result + result + end + + def _fetch(cache_key, &block) + result = I18n.cache_store.read(cache_key) + return result unless result.nil? + result = catch(:exception, &block) + I18n.cache_store.write(cache_key, result) unless result.is_a?(Proc) + result + end + + def cache_key(locale, key, options) + # This assumes that only simple, native Ruby values are passed to I18n.translate. + "i18n/#{I18n.cache_namespace}/#{locale}/#{digest_item(key)}/#{digest_item(options)}" + end + + private + + def digest_item(key) + I18n.cache_key_digest ? I18n.cache_key_digest.hexdigest(key.to_s) : key.to_s.hash + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb new file mode 100644 index 0000000..4dafb3e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'digest/sha2' + +module I18n + module Backend + # Overwrites the Base load_file method to cache loaded file contents. + module CacheFile + # Optionally provide path_roots array to normalize filename paths, + # to make the cached i18n data portable across environments. + attr_accessor :path_roots + + protected + + # Track loaded translation files in the `i18n.load_file` scope, + # and skip loading the file if its contents are still up-to-date. + def load_file(filename) + initialized = !respond_to?(:initialized?) || initialized? + key = I18n::Backend::Flatten.escape_default_separator(normalized_path(filename)) + old_mtime, old_digest = initialized && lookup(:i18n, key, :load_file) + return if (mtime = File.mtime(filename).to_i) == old_mtime || + (digest = Digest::SHA2.file(filename).hexdigest) == old_digest + super + store_translations(:i18n, load_file: { key => [mtime, digest] }) + end + + # Translate absolute filename to relative path for i18n key. + def normalized_path(file) + return file unless path_roots + path = path_roots.find(&file.method(:start_with?)) || + raise(InvalidLocaleData.new(file, 'outside expected path roots')) + file.sub(path, path_roots.index(path).to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb new file mode 100644 index 0000000..782b07b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +# The Cascade module adds the ability to do cascading lookups to backends that +# are compatible to the Simple backend. +# +# By cascading lookups we mean that for any key that can not be found the +# Cascade module strips one segment off the scope part of the key and then +# tries to look up the key in that scope. +# +# E.g. when a lookup for the key :"foo.bar.baz" does not yield a result then +# the segment :bar will be stripped off the scope part :"foo.bar" and the new +# scope :foo will be used to look up the key :baz. If that does not succeed +# then the remaining scope segment :foo will be omitted, too, and again the +# key :baz will be looked up (now with no scope). +# +# To enable a cascading lookup one passes the :cascade option: +# +# I18n.t(:'foo.bar.baz', :cascade => true) +# +# This will return the first translation found for :"foo.bar.baz", :"foo.baz" +# or :baz in this order. +# +# The cascading lookup takes precedence over resolving any given defaults. +# I.e. defaults will kick in after the cascading lookups haven't succeeded. +# +# This behavior is useful for libraries like ActiveRecord validations where +# the library wants to give users a bunch of more or less fine-grained options +# of scopes for a particular key. +# +# Thanks to Clemens Kofler for the initial idea and implementation! See +# http://github.com/clemens/i18n-cascading-backend + +module I18n + module Backend + module Cascade + def lookup(locale, key, scope = [], options = EMPTY_HASH) + return super unless cascade = options[:cascade] + + cascade = { :step => 1 } unless cascade.is_a?(Hash) + step = cascade[:step] || 1 + offset = cascade[:offset] || 1 + separator = options[:separator] || I18n.default_separator + skip_root = cascade.has_key?(:skip_root) ? cascade[:skip_root] : true + + scope = I18n.normalize_keys(nil, key, scope, separator) + key = (scope.slice!(-offset, offset) || []).join(separator) + + begin + result = super + return result unless result.nil? + scope = scope.dup + end while (!scope.empty? || !skip_root) && scope.slice!(-step, step) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb new file mode 100644 index 0000000..9ab8575 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +module I18n + module Backend + # Backend that chains multiple other backends and checks each of them when + # a translation needs to be looked up. This is useful when you want to use + # standard translations with a Simple backend but store custom application + # translations in a database or other backends. + # + # To use the Chain backend instantiate it and set it to the I18n module. + # You can add chained backends through the initializer or backends + # accessor: + # + # # preserves the existing Simple backend set to I18n.backend + # I18n.backend = I18n::Backend::Chain.new(I18n::Backend::ActiveRecord.new, I18n.backend) + # + # The implementation assumes that all backends added to the Chain implement + # a lookup method with the same API as Simple backend does. + class Chain + using I18n::HashRefinements + + module Implementation + include Base + + attr_accessor :backends + + def initialize(*backends) + self.backends = backends + end + + def initialized? + backends.all? do |backend| + backend.instance_eval do + return false unless initialized? + end + end + true + end + + def reload! + backends.each { |backend| backend.reload! } + end + + def eager_load! + backends.each { |backend| backend.eager_load! } + end + + def store_translations(locale, data, options = EMPTY_HASH) + backends.first.store_translations(locale, data, options) + end + + def available_locales + backends.map { |backend| backend.available_locales }.flatten.uniq + end + + def translate(locale, key, default_options = EMPTY_HASH) + namespace = nil + options = default_options.except(:default) + + backends.each do |backend| + catch(:exception) do + options = default_options if backend == backends.last + translation = backend.translate(locale, key, options) + if namespace_lookup?(translation, options) + namespace = _deep_merge(translation, namespace || {}) + elsif !translation.nil? || (options.key?(:default) && options[:default].nil?) + return translation + end + end + end + + return namespace if namespace + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def exists?(locale, key, options = EMPTY_HASH) + backends.any? do |backend| + backend.exists?(locale, key, options) + end + end + + def localize(locale, object, format = :default, options = EMPTY_HASH) + backends.each do |backend| + catch(:exception) do + result = backend.localize(locale, object, format, options) and return result + end + end + throw(:exception, I18n::MissingTranslation.new(locale, format, options)) + end + + protected + def init_translations + backends.each do |backend| + backend.send(:init_translations) + end + end + + def translations + backends.reverse.each_with_object({}) do |backend, memo| + partial_translations = backend.instance_eval do + init_translations unless initialized? + translations + end + memo.deep_merge!(partial_translations) { |_, a, b| b || a } + end + end + + def namespace_lookup?(result, options) + result.is_a?(Hash) && !options.has_key?(:count) + end + + private + # This is approximately what gets used in ActiveSupport. + # However since we are not guaranteed to run in an ActiveSupport context + # it is wise to have our own copy. We underscore it + # to not pollute the namespace of the including class. + def _deep_merge(hash, other_hash) + copy = hash.dup + other_hash.each_pair do |k,v| + value_from_other = hash[k] + copy[k] = value_from_other.is_a?(Hash) && v.is_a?(Hash) ? _deep_merge(value_from_other, v) : v + end + copy + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb new file mode 100644 index 0000000..ce8635b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +# I18n locale fallbacks are useful when you want your application to use +# translations from other locales when translations for the current locale are +# missing. E.g. you might want to use :en translations when translations in +# your applications main locale :de are missing. +# +# To enable locale fallbacks you can simply include the Fallbacks module to +# the Simple backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Fallbacks) +module I18n + @@fallbacks = nil + + class << self + # Returns the current fallbacks implementation. Defaults to +I18n::Locale::Fallbacks+. + def fallbacks + @@fallbacks ||= I18n::Locale::Fallbacks.new + end + + # Sets the current fallbacks implementation. Use this to set a different fallbacks implementation. + def fallbacks=(fallbacks) + @@fallbacks = fallbacks.is_a?(Array) ? I18n::Locale::Fallbacks.new(fallbacks) : fallbacks + end + end + + module Backend + module Fallbacks + # Overwrites the Base backend translate method so that it will try each + # locale given by I18n.fallbacks for the given locale. E.g. for the + # locale :"de-DE" it might try the locales :"de-DE", :de and :en + # (depends on the fallbacks implementation) until it finds a result with + # the given options. If it does not find any result for any of the + # locales it will then throw MissingTranslation as usual. + # + # The default option takes precedence over fallback locales only when + # it's a Symbol. When the default contains a String, Proc or Hash + # it is evaluated last after all the fallback locales have been tried. + def translate(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + return super if options[:fallback_in_progress] + default = extract_non_symbol_default!(options) if options[:default] + + fallback_options = options.merge(:fallback_in_progress => true) + I18n.fallbacks[locale].each do |fallback| + begin + catch(:exception) do + result = super(fallback, key, fallback_options) + unless result.nil? + on_fallback(locale, fallback, key, options) if locale != fallback + return result + end + end + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + return if options.key?(:default) && options[:default].nil? + + return super(locale, nil, options.merge(:default => default)) if default + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def extract_non_symbol_default!(options) + defaults = [options[:default]].flatten + first_non_symbol_default = defaults.detect{|default| !default.is_a?(Symbol)} + if first_non_symbol_default + options[:default] = defaults[0, defaults.index(first_non_symbol_default)] + end + return first_non_symbol_default + end + + def exists?(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + I18n.fallbacks[locale].each do |fallback| + begin + return true if super(fallback, key) + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + false + end + + private + + # Overwrite on_fallback to add specified logic when the fallback succeeds. + def on_fallback(_original_locale, _fallback_locale, _key, _optoins) + nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb new file mode 100644 index 0000000..e9bd9d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +module I18n + module Backend + # This module contains several helpers to assist flattening translations. + # You may want to flatten translations for: + # + # 1) speed up lookups, as in the Memoize backend; + # 2) In case you want to store translations in a data store, as in ActiveRecord backend; + # + # You can check both backends above for some examples. + # This module also keeps all links in a hash so they can be properly resolved when flattened. + module Flatten + SEPARATOR_ESCAPE_CHAR = "\001" + FLATTEN_SEPARATOR = "." + + # normalize_keys the flatten way. This method is significantly faster + # and creates way less objects than the one at I18n.normalize_keys. + # It also handles escaping the translation keys. + def self.normalize_flat_keys(locale, key, scope, separator) + keys = [scope, key] + keys.flatten! + keys.compact! + + separator ||= I18n.default_separator + + if separator != FLATTEN_SEPARATOR + from_str = "#{FLATTEN_SEPARATOR}#{separator}" + to_str = "#{SEPARATOR_ESCAPE_CHAR}#{FLATTEN_SEPARATOR}" + + keys.map! { |k| k.to_s.tr from_str, to_str } + end + + keys.join(".") + end + + # Receives a string and escape the default separator. + def self.escape_default_separator(key) #:nodoc: + key.to_s.tr(FLATTEN_SEPARATOR, SEPARATOR_ESCAPE_CHAR) + end + + # Shortcut to I18n::Backend::Flatten.normalize_flat_keys + # and then resolve_links. + def normalize_flat_keys(locale, key, scope, separator) + key = I18n::Backend::Flatten.normalize_flat_keys(locale, key, scope, separator) + resolve_link(locale, key) + end + + # Store flattened links. + def links + @links ||= I18n.new_double_nested_cache + end + + # Flatten keys for nested Hashes by chaining up keys: + # + # >> { "a" => { "b" => { "c" => "d", "e" => "f" }, "g" => "h" }, "i" => "j"}.wind + # => { "a.b.c" => "d", "a.b.e" => "f", "a.g" => "h", "i" => "j" } + # + def flatten_keys(hash, escape, prev_key=nil, &block) + hash.each_pair do |key, value| + key = escape_default_separator(key) if escape + curr_key = [prev_key, key].compact.join(FLATTEN_SEPARATOR).to_sym + yield curr_key, value + flatten_keys(value, escape, curr_key, &block) if value.is_a?(Hash) + end + end + + # Receives a hash of translations (where the key is a locale and + # the value is another hash) and return a hash with all + # translations flattened. + # + # Nested hashes are included in the flattened hash just if subtree + # is true and Symbols are automatically stored as links. + def flatten_translations(locale, data, escape, subtree) + hash = {} + flatten_keys(data, escape) do |key, value| + if value.is_a?(Hash) + hash[key] = value if subtree + else + store_link(locale, key, value) if value.is_a?(Symbol) + hash[key] = value + end + end + hash + end + + protected + + def store_link(locale, key, link) + links[locale.to_sym][key.to_s] = link.to_s + end + + def resolve_link(locale, key) + key, locale = key.to_s, locale.to_sym + links = self.links[locale] + + if links.key?(key) + links[key] + elsif link = find_link(locale, key) + store_link(locale, key, key.gsub(*link)) + else + key + end + end + + def find_link(locale, key) #:nodoc: + links[locale].each_pair do |from, to| + return [from, to] if key[0, from.length] == from + end && nil + end + + def escape_default_separator(key) #:nodoc: + I18n::Backend::Flatten.escape_default_separator(key) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb new file mode 100644 index 0000000..72d20f0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require 'i18n/gettext' +require 'i18n/gettext/po_parser' + +module I18n + module Backend + # Experimental support for using Gettext po files to store translations. + # + # To use this you can simply include the module to the Simple backend - or + # whatever other backend you are using. + # + # I18n::Backend::Simple.include(I18n::Backend::Gettext) + # + # Now you should be able to include your Gettext translation (*.po) files to + # the +I18n.load_path+ so they're loaded to the backend and you can use them as + # usual: + # + # I18n.load_path += Dir["path/to/locales/*.po"] + # + # Following the Gettext convention this implementation expects that your + # translation files are named by their locales. E.g. the file en.po would + # contain the translations for the English locale. + # + # To translate text you must use one of the translate methods provided by + # I18n::Gettext::Helpers. + # + # include I18n::Gettext::Helpers + # puts _("some string") + # + # Without it strings containing periods (".") will not be translated. + + module Gettext + using I18n::HashRefinements + + class PoData < Hash + def set_comment(msgid_or_sym, comment) + # ignore + end + end + + protected + def load_po(filename) + locale = ::File.basename(filename, '.po').to_sym + data = normalize(locale, parse(filename)) + { locale => data } + end + + def parse(filename) + GetText::PoParser.new.parse(::File.read(filename), PoData.new) + end + + def normalize(locale, data) + data.inject({}) do |result, (key, value)| + unless key.nil? || key.empty? + key = key.gsub(I18n::Gettext::CONTEXT_SEPARATOR, '|') + key, value = normalize_pluralization(locale, key, value) if key.index("\000") + + parts = key.split('|').reverse + normalized = parts.inject({}) do |_normalized, part| + { part => _normalized.empty? ? value : _normalized } + end + + result.deep_merge!(normalized) + end + result + end + end + + def normalize_pluralization(locale, key, value) + # FIXME po_parser includes \000 chars that can not be turned into Symbols + key = key.gsub("\000", I18n::Gettext::PLURAL_SEPARATOR).split(I18n::Gettext::PLURAL_SEPARATOR).first + + keys = I18n::Gettext.plural_keys(locale) + values = value.split("\000") + raise "invalid number of plurals: #{values.size}, keys: #{keys.inspect} on #{locale} locale for msgid #{key.inspect} with values #{values.inspect}" if values.size != keys.size + + result = {} + values.each_with_index { |_value, ix| result[keys[ix]] = _value } + [key, result] + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb new file mode 100644 index 0000000..8b52e7b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +# The InterpolationCompiler module contains optimizations that can tremendously +# speed up the interpolation process on the Simple backend. +# +# It works by defining a pre-compiled method on stored translation Strings that +# already bring all the knowledge about contained interpolation variables etc. +# so that the actual recurring interpolation will be very fast. +# +# To enable pre-compiled interpolations you can simply include the +# InterpolationCompiler module to the Simple backend: +# +# I18n::Backend::Simple.include(I18n::Backend::InterpolationCompiler) +# +# Note that InterpolationCompiler does not yield meaningful results and consequently +# should not be used with Ruby 1.9 (YARV) but improves performance everywhere else +# (jRuby, Rubinius). +module I18n + module Backend + module InterpolationCompiler + module Compiler + extend self + + TOKENIZER = /(%%\{[^\}]+\}|%\{[^\}]+\})/ + INTERPOLATION_SYNTAX_PATTERN = /(%)?(%\{([^\}]+)\})/ + + def compile_if_an_interpolation(string) + if interpolated_str?(string) + string.instance_eval <<-RUBY_EVAL, __FILE__, __LINE__ + def i18n_interpolate(v = {}) + "#{compiled_interpolation_body(string)}" + end + RUBY_EVAL + end + + string + end + + def interpolated_str?(str) + str.kind_of?(::String) && str =~ INTERPOLATION_SYNTAX_PATTERN + end + + protected + # tokenize("foo %{bar} baz %%{buz}") # => ["foo ", "%{bar}", " baz ", "%%{buz}"] + def tokenize(str) + str.split(TOKENIZER) + end + + def compiled_interpolation_body(str) + tokenize(str).map do |token| + (matchdata = token.match(INTERPOLATION_SYNTAX_PATTERN)) ? handle_interpolation_token(token, matchdata) : escape_plain_str(token) + end.join + end + + def handle_interpolation_token(interpolation, matchdata) + escaped, pattern, key = matchdata.values_at(1, 2, 3) + escaped ? pattern : compile_interpolation_token(key.to_sym) + end + + def compile_interpolation_token(key) + "\#{#{interpolate_or_raise_missing(key)}}" + end + + def interpolate_or_raise_missing(key) + escaped_key = escape_key_sym(key) + RESERVED_KEYS.include?(key) ? reserved_key(escaped_key) : interpolate_key(escaped_key) + end + + def interpolate_key(key) + [direct_key(key), nil_key(key), missing_key(key)].join('||') + end + + def direct_key(key) + "((t = v[#{key}]) && t.respond_to?(:call) ? t.call : t)" + end + + def nil_key(key) + "(v.has_key?(#{key}) && '')" + end + + def missing_key(key) + "I18n.config.missing_interpolation_argument_handler.call(#{key}, v, self)" + end + + def reserved_key(key) + "raise(ReservedInterpolationKey.new(#{key}, self))" + end + + def escape_plain_str(str) + str.gsub(/"|\\|#/) {|x| "\\#{x}"} + end + + def escape_key_sym(key) + # rely on Ruby to do all the hard work :) + key.to_sym.inspect + end + end + + def interpolate(locale, string, values) + if string.respond_to?(:i18n_interpolate) + string.i18n_interpolate(values) + elsif values + super + else + string + end + end + + def store_translations(locale, data, options = EMPTY_HASH) + compile_all_strings_in(data) + super + end + + protected + def compile_all_strings_in(data) + data.each_value do |value| + Compiler.compile_if_an_interpolation(value) + compile_all_strings_in(value) if value.kind_of?(Hash) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb new file mode 100644 index 0000000..e391672 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb @@ -0,0 +1,206 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + + begin + require 'oj' + class JSON + class << self + def encode(value) + Oj::Rails.encode(value) + end + def decode(value) + Oj.load(value) + end + end + end + rescue LoadError + require 'active_support/json' + JSON = ActiveSupport::JSON + end + + module Backend + # This is a basic backend for key value stores. It receives on + # initialization the store, which should respond to three methods: + # + # * store#[](key) - Used to get a value + # * store#[]=(key, value) - Used to set a value + # * store#keys - Used to get all keys + # + # Since these stores only supports string, all values are converted + # to JSON before being stored, allowing it to also store booleans, + # hashes and arrays. However, this store does not support Procs. + # + # As the ActiveRecord backend, Symbols are just supported when loading + # translations from the filesystem or through explicit store translations. + # + # Also, avoid calling I18n.available_locales since it's a somehow + # expensive operation in most stores. + # + # == Example + # + # To setup I18n to use TokyoCabinet in memory is quite straightforward: + # + # require 'rufus/tokyo/cabinet' # gem install rufus-tokyo + # I18n.backend = I18n::Backend::KeyValue.new(Rufus::Tokyo::Cabinet.new('*')) + # + # == Performance + # + # You may make this backend even faster by including the Memoize module. + # However, notice that you should properly clear the cache if you change + # values directly in the key-store. + # + # == Subtrees + # + # In most backends, you are allowed to retrieve part of a translation tree: + # + # I18n.backend.store_translations :en, :foo => { :bar => :baz } + # I18n.t "foo" #=> { :bar => :baz } + # + # This backend supports this feature by default, but it slows down the storage + # of new data considerably and makes hard to delete entries. That said, you are + # allowed to disable the storage of subtrees on initialization: + # + # I18n::Backend::KeyValue.new(@store, false) + # + # This is useful if you are using a KeyValue backend chained to a Simple backend. + class KeyValue + using I18n::HashRefinements + + module Implementation + attr_accessor :store + + include Base, Flatten + + def initialize(store, subtrees=true) + @store, @subtrees = store, subtrees + end + + def initialized? + !@store.nil? + end + + def store_translations(locale, data, options = EMPTY_HASH) + escape = options.fetch(:escape, true) + flatten_translations(locale, data, escape, @subtrees).each do |key, value| + key = "#{locale}.#{key}" + + case value + when Hash + if @subtrees && (old_value = @store[key]) + old_value = JSON.decode(old_value) + value = old_value.deep_symbolize_keys.deep_merge!(value) if old_value.is_a?(Hash) + end + when Proc + raise "Key-value stores cannot handle procs" + end + + @store[key] = JSON.encode(value) unless value.is_a?(Symbol) + end + end + + def available_locales + locales = @store.keys.map { |k| k =~ /\./; $` } + locales.uniq! + locales.compact! + locales.map! { |k| k.to_sym } + locales + end + + protected + + # Queries the translations from the key-value store and converts + # them into a hash such as the one returned from loading the + # haml files + def translations + @translations = @store.keys.clone.map do |main_key| + main_value = JSON.decode(@store[main_key]) + main_key.to_s.split(".").reverse.inject(main_value) do |value, key| + {key.to_sym => value} + end + end.inject{|hash, elem| hash.deep_merge!(elem)}.deep_symbolize_keys + end + + def init_translations + # NO OP + # This call made also inside Simple Backend and accessed by + # other plugins like I18n-js and babilu and + # to use it along with the Chain backend we need to + # provide a uniform API even for protected methods :S + end + + def subtrees? + @subtrees + end + + def lookup(locale, key, scope = [], options = EMPTY_HASH) + key = normalize_flat_keys(locale, key, scope, options[:separator]) + value = @store["#{locale}.#{key}"] + value = JSON.decode(value) if value + + if value.is_a?(Hash) + value.deep_symbolize_keys + elsif !value.nil? + value + elsif !@subtrees + SubtreeProxy.new("#{locale}.#{key}", @store) + end + end + + def pluralize(locale, entry, count) + if subtrees? + super + else + return entry unless entry.is_a?(Hash) + key = pluralization_key(entry, count) + entry[key] + end + end + end + + class SubtreeProxy + def initialize(master_key, store) + @master_key = master_key + @store = store + @subtree = nil + end + + def has_key?(key) + @subtree && @subtree.has_key?(key) || self[key] + end + + def [](key) + unless @subtree && value = @subtree[key] + value = @store["#{@master_key}.#{key}"] + if value + value = JSON.decode(value) + (@subtree ||= {})[key] = value + end + end + value + end + + def is_a?(klass) + Hash == klass || super + end + alias :kind_of? :is_a? + + def instance_of?(klass) + Hash == klass || super + end + + def nil? + @subtree.nil? + end + + def inspect + @subtree.inspect + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb new file mode 100644 index 0000000..3293d2b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +# Memoize module simply memoizes the values returned by lookup using +# a flat hash and can tremendously speed up the lookup process in a backend. +# +# To enable it you can simply include the Memoize module to your backend: +# +# I18n::Backend::Simple.include(I18n::Backend::Memoize) +# +# Notice that it's the responsibility of the backend to define whenever the +# cache should be cleaned. +module I18n + module Backend + module Memoize + def available_locales + @memoized_locales ||= super + end + + def store_translations(locale, data, options = EMPTY_HASH) + reset_memoizations!(locale) + super + end + + def reload! + reset_memoizations! + super + end + + def eager_load! + memoized_lookup + available_locales + super + end + + protected + + def lookup(locale, key, scope = nil, options = EMPTY_HASH) + flat_key = I18n::Backend::Flatten.normalize_flat_keys(locale, + key, scope, options[:separator]).to_sym + flat_hash = memoized_lookup[locale.to_sym] + flat_hash.key?(flat_key) ? flat_hash[flat_key] : (flat_hash[flat_key] = super) + end + + def memoized_lookup + @memoized_lookup ||= I18n.new_double_nested_cache + end + + def reset_memoizations!(locale=nil) + @memoized_locales = nil + (locale ? memoized_lookup[locale.to_sym] : memoized_lookup).clear + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb new file mode 100644 index 0000000..51ea7a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +# I18n translation metadata is useful when you want to access information +# about how a translation was looked up, pluralized or interpolated in +# your application. +# +# msg = I18n.t(:message, :default => 'Hi!', :scope => :foo) +# msg.translation_metadata +# # => { :key => :message, :scope => :foo, :default => 'Hi!' } +# +# If a :count option was passed to #translate it will be set to the metadata. +# Likewise, if any interpolation variables were passed they will also be set. +# +# To enable translation metadata you can simply include the Metadata module +# into the Simple backend class - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Metadata) +# +module I18n + module Backend + module Metadata + class << self + def included(base) + Object.class_eval do + def translation_metadata + unless self.frozen? + @translation_metadata ||= {} + else + {} + end + end + + def translation_metadata=(translation_metadata) + @translation_metadata = translation_metadata unless self.frozen? + end + end unless Object.method_defined?(:translation_metadata) + end + end + + def translate(locale, key, options = EMPTY_HASH) + metadata = { + :locale => locale, + :key => key, + :scope => options[:scope], + :default => options[:default], + :separator => options[:separator], + :values => options.reject { |name, _value| RESERVED_KEYS.include?(name) } + } + with_metadata(metadata) { super } + end + + def interpolate(locale, entry, values = EMPTY_HASH) + metadata = entry.translation_metadata.merge(:original => entry) + with_metadata(metadata) { super } + end + + def pluralize(locale, entry, count) + with_metadata(:count => count) { super } + end + + protected + + def with_metadata(metadata, &block) + result = yield + result.translation_metadata = result.translation_metadata.merge(metadata) if result + result + end + + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb new file mode 100644 index 0000000..b602657 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +# I18n Pluralization are useful when you want your application to +# customize pluralization rules. +# +# To enable locale specific pluralizations you can simply include the +# Pluralization module to the Simple backend - or whatever other backend you +# are using. +# +# I18n::Backend::Simple.include(I18n::Backend::Pluralization) +# +# You also need to make sure to provide pluralization algorithms to the +# backend, i.e. include them to your I18n.load_path accordingly. +module I18n + module Backend + module Pluralization + # Overwrites the Base backend translate method so that it will check the + # translation meta data space (:i18n) for a locale specific pluralization + # rule and use it to pluralize the given entry. I.e. the library expects + # pluralization rules to be stored at I18n.t(:'i18n.plural.rule') + # + # Pluralization rules are expected to respond to #call(count) and + # return a pluralization key. Valid keys depend on the translation data + # hash (entry) but it is generally recommended to follow CLDR's style, + # i.e., return one of the keys :zero, :one, :few, :many, :other. + # + # The :zero key is always picked directly when count equals 0 AND the + # translation data has the key :zero. This way translators are free to + # either pick a special :zero translation even for languages where the + # pluralizer does not return a :zero key. + def pluralize(locale, entry, count) + return entry unless entry.is_a?(Hash) && count + + pluralizer = pluralizer(locale) + if pluralizer.respond_to?(:call) + key = count == 0 && entry.has_key?(:zero) ? :zero : pluralizer.call(count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + else + super + end + end + + protected + + def pluralizers + @pluralizers ||= {} + end + + def pluralizer(locale) + pluralizers[locale] ||= I18n.t(:'i18n.plural.rule', :locale => locale, :resolve => false) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb new file mode 100644 index 0000000..ed74146 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + module Backend + # A simple backend that reads translations from YAML files and stores them in + # an in-memory hash. Relies on the Base backend. + # + # The implementation is provided by a Implementation module allowing to easily + # extend Simple backend's behavior by including modules. E.g.: + # + # module I18n::Backend::Pluralization + # def pluralize(*args) + # # extended pluralization logic + # super + # end + # end + # + # I18n::Backend::Simple.include(I18n::Backend::Pluralization) + class Simple + using I18n::HashRefinements + + module Implementation + include Base + + def initialized? + @initialized ||= false + end + + # Stores translations for the given locale in memory. + # This uses a deep merge for the translations hash, so existing + # translations will be overwritten by new ones only at the deepest + # level of the hash. + def store_translations(locale, data, options = EMPTY_HASH) + if I18n.enforce_available_locales && + I18n.available_locales_initialized? && + !I18n.available_locales.include?(locale.to_sym) && + !I18n.available_locales.include?(locale.to_s) + return data + end + locale = locale.to_sym + translations[locale] ||= {} + data = data.deep_symbolize_keys + translations[locale].deep_merge!(data) + end + + # Get available locales from the translations hash + def available_locales + init_translations unless initialized? + translations.inject([]) do |locales, (locale, data)| + locales << locale unless data.size <= 1 && (data.empty? || data.has_key?(:i18n)) + locales + end + end + + # Clean up translations hash and set initialized to false on reload! + def reload! + @initialized = false + @translations = nil + super + end + + def eager_load! + init_translations unless initialized? + super + end + + def translations(do_init: false) + # To avoid returning empty translations, + # call `init_translations` + init_translations if do_init && !initialized? + + @translations ||= {} + end + + protected + + def init_translations + load_translations + @initialized = true + end + + # Looks up a translation from the translations hash. Returns nil if + # either key is nil, or locale, scope or key do not exist as a key in the + # nested translations hash. Splits keys or scopes containing dots + # into multiple keys, i.e. currency.format is regarded the same as + # %w(currency format). + def lookup(locale, key, scope = [], options = EMPTY_HASH) + init_translations unless initialized? + keys = I18n.normalize_keys(locale, key, scope, options[:separator]) + + keys.inject(translations) do |result, _key| + return nil unless result.is_a?(Hash) + unless result.has_key?(_key) + _key = _key.to_s.to_sym + return nil unless result.has_key?(_key) + end + result = result[_key] + result = resolve(locale, _key, result, options.merge(:scope => nil)) if result.is_a?(Symbol) + result + end + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb new file mode 100644 index 0000000..bb704ab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb @@ -0,0 +1,108 @@ +# encoding: utf-8 +# frozen_string_literal: true + +module I18n + module Backend + module Transliterator + DEFAULT_REPLACEMENT_CHAR = "?" + + # Given a locale and a UTF-8 string, return the locale's ASCII + # approximation for the string. + def transliterate(locale, string, replacement = nil) + @transliterators ||= {} + @transliterators[locale] ||= Transliterator.get I18n.t(:'i18n.transliterate.rule', + :locale => locale, :resolve => false, :default => {}) + @transliterators[locale].transliterate(string, replacement) + end + + # Get a transliterator instance. + def self.get(rule = nil) + if !rule || rule.kind_of?(Hash) + HashTransliterator.new(rule) + elsif rule.kind_of? Proc + ProcTransliterator.new(rule) + else + raise I18n::ArgumentError, "Transliteration rule must be a proc or a hash." + end + end + + # A transliterator which accepts a Proc as its transliteration rule. + class ProcTransliterator + def initialize(rule) + @rule = rule + end + + def transliterate(string, replacement = nil) + @rule.call(string) + end + end + + # A transliterator which accepts a Hash of characters as its translation + # rule. + class HashTransliterator + DEFAULT_APPROXIMATIONS = { + "À"=>"A", "Á"=>"A", "Â"=>"A", "Ã"=>"A", "Ä"=>"A", "Å"=>"A", "Æ"=>"AE", + "Ç"=>"C", "È"=>"E", "É"=>"E", "Ê"=>"E", "Ë"=>"E", "Ì"=>"I", "Í"=>"I", + "Î"=>"I", "Ï"=>"I", "Ð"=>"D", "Ñ"=>"N", "Ò"=>"O", "Ó"=>"O", "Ô"=>"O", + "Õ"=>"O", "Ö"=>"O", "×"=>"x", "Ø"=>"O", "Ù"=>"U", "Ú"=>"U", "Û"=>"U", + "Ü"=>"U", "Ý"=>"Y", "Þ"=>"Th", "ß"=>"ss", "à"=>"a", "á"=>"a", "â"=>"a", + "ã"=>"a", "ä"=>"a", "å"=>"a", "æ"=>"ae", "ç"=>"c", "è"=>"e", "é"=>"e", + "ê"=>"e", "ë"=>"e", "ì"=>"i", "í"=>"i", "î"=>"i", "ï"=>"i", "ð"=>"d", + "ñ"=>"n", "ò"=>"o", "ó"=>"o", "ô"=>"o", "õ"=>"o", "ö"=>"o", "ø"=>"o", + "ù"=>"u", "ú"=>"u", "û"=>"u", "ü"=>"u", "ý"=>"y", "þ"=>"th", "ÿ"=>"y", + "Ā"=>"A", "ā"=>"a", "Ă"=>"A", "ă"=>"a", "Ą"=>"A", "ą"=>"a", "Ć"=>"C", + "ć"=>"c", "Ĉ"=>"C", "ĉ"=>"c", "Ċ"=>"C", "ċ"=>"c", "Č"=>"C", "č"=>"c", + "Ď"=>"D", "ď"=>"d", "Đ"=>"D", "đ"=>"d", "Ē"=>"E", "ē"=>"e", "Ĕ"=>"E", + "ĕ"=>"e", "Ė"=>"E", "ė"=>"e", "Ę"=>"E", "ę"=>"e", "Ě"=>"E", "ě"=>"e", + "Ĝ"=>"G", "ĝ"=>"g", "Ğ"=>"G", "ğ"=>"g", "Ġ"=>"G", "ġ"=>"g", "Ģ"=>"G", + "ģ"=>"g", "Ĥ"=>"H", "ĥ"=>"h", "Ħ"=>"H", "ħ"=>"h", "Ĩ"=>"I", "ĩ"=>"i", + "Ī"=>"I", "ī"=>"i", "Ĭ"=>"I", "ĭ"=>"i", "Į"=>"I", "į"=>"i", "İ"=>"I", + "ı"=>"i", "IJ"=>"IJ", "ij"=>"ij", "Ĵ"=>"J", "ĵ"=>"j", "Ķ"=>"K", "ķ"=>"k", + "ĸ"=>"k", "Ĺ"=>"L", "ĺ"=>"l", "Ļ"=>"L", "ļ"=>"l", "Ľ"=>"L", "ľ"=>"l", + "Ŀ"=>"L", "ŀ"=>"l", "Ł"=>"L", "ł"=>"l", "Ń"=>"N", "ń"=>"n", "Ņ"=>"N", + "ņ"=>"n", "Ň"=>"N", "ň"=>"n", "ʼn"=>"'n", "Ŋ"=>"NG", "ŋ"=>"ng", + "Ō"=>"O", "ō"=>"o", "Ŏ"=>"O", "ŏ"=>"o", "Ő"=>"O", "ő"=>"o", "Œ"=>"OE", + "œ"=>"oe", "Ŕ"=>"R", "ŕ"=>"r", "Ŗ"=>"R", "ŗ"=>"r", "Ř"=>"R", "ř"=>"r", + "Ś"=>"S", "ś"=>"s", "Ŝ"=>"S", "ŝ"=>"s", "Ş"=>"S", "ş"=>"s", "Š"=>"S", + "š"=>"s", "Ţ"=>"T", "ţ"=>"t", "Ť"=>"T", "ť"=>"t", "Ŧ"=>"T", "ŧ"=>"t", + "Ũ"=>"U", "ũ"=>"u", "Ū"=>"U", "ū"=>"u", "Ŭ"=>"U", "ŭ"=>"u", "Ů"=>"U", + "ů"=>"u", "Ű"=>"U", "ű"=>"u", "Ų"=>"U", "ų"=>"u", "Ŵ"=>"W", "ŵ"=>"w", + "Ŷ"=>"Y", "ŷ"=>"y", "Ÿ"=>"Y", "Ź"=>"Z", "ź"=>"z", "Ż"=>"Z", "ż"=>"z", + "Ž"=>"Z", "ž"=>"z" + }.freeze + + def initialize(rule = nil) + @rule = rule + add_default_approximations + add rule if rule + end + + def transliterate(string, replacement = nil) + replacement ||= DEFAULT_REPLACEMENT_CHAR + string.gsub(/[^\x00-\x7f]/u) do |char| + approximations[char] || replacement + end + end + + private + + def approximations + @approximations ||= {} + end + + def add_default_approximations + DEFAULT_APPROXIMATIONS.each do |key, value| + approximations[key] = value + end + end + + # Add transliteration rules to the approximations hash. + def add(hash) + hash.each do |key, value| + approximations[key.to_s] = value.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/config.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/config.rb new file mode 100644 index 0000000..ea3dd1e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/config.rb @@ -0,0 +1,165 @@ +# frozen_string_literal: true + +require 'set' + +module I18n + class Config + # The only configuration value that is not global and scoped to thread is :locale. + # It defaults to the default_locale. + def locale + defined?(@locale) && @locale != nil ? @locale : default_locale + end + + # Sets the current locale pseudo-globally, i.e. in the Thread.current hash. + def locale=(locale) + I18n.enforce_available_locales!(locale) + @locale = locale && locale.to_sym + end + + # Returns the current backend. Defaults to +Backend::Simple+. + def backend + @@backend ||= Backend::Simple.new + end + + # Sets the current backend. Used to set a custom backend. + def backend=(backend) + @@backend = backend + end + + # Returns the current default locale. Defaults to :'en' + def default_locale + @@default_locale ||= :en + end + + # Sets the current default locale. Used to set a custom default locale. + def default_locale=(locale) + I18n.enforce_available_locales!(locale) + @@default_locale = locale && locale.to_sym + end + + # Returns an array of locales for which translations are available. + # Unless you explicitely set these through I18n.available_locales= + # the call will be delegated to the backend. + def available_locales + @@available_locales ||= nil + @@available_locales || backend.available_locales + end + + # Caches the available locales list as both strings and symbols in a Set, so + # that we can have faster lookups to do the available locales enforce check. + def available_locales_set #:nodoc: + @@available_locales_set ||= available_locales.inject(Set.new) do |set, locale| + set << locale.to_s << locale.to_sym + end + end + + # Sets the available locales. + def available_locales=(locales) + @@available_locales = Array(locales).map { |locale| locale.to_sym } + @@available_locales = nil if @@available_locales.empty? + @@available_locales_set = nil + end + + # Returns true if the available_locales have been initialized + def available_locales_initialized? + ( !!defined?(@@available_locales) && !!@@available_locales ) + end + + # Clears the available locales set so it can be recomputed again after I18n + # gets reloaded. + def clear_available_locales_set #:nodoc: + @@available_locales_set = nil + end + + # Returns the current default scope separator. Defaults to '.' + def default_separator + @@default_separator ||= '.' + end + + # Sets the current default scope separator. + def default_separator=(separator) + @@default_separator = separator + end + + # Returns the current exception handler. Defaults to an instance of + # I18n::ExceptionHandler. + def exception_handler + @@exception_handler ||= ExceptionHandler.new + end + + # Sets the exception handler. + def exception_handler=(exception_handler) + @@exception_handler = exception_handler + end + + # Returns the current handler for situations when interpolation argument + # is missing. MissingInterpolationArgument will be raised by default. + def missing_interpolation_argument_handler + @@missing_interpolation_argument_handler ||= lambda do |missing_key, provided_hash, string| + raise MissingInterpolationArgument.new(missing_key, provided_hash, string) + end + end + + # Sets the missing interpolation argument handler. It can be any + # object that responds to #call. The arguments that will be passed to #call + # are the same as for MissingInterpolationArgument initializer. Use +Proc.new+ + # if you don't care about arity. + # + # == Example: + # You can supress raising an exception and return string instead: + # + # I18n.config.missing_interpolation_argument_handler = Proc.new do |key| + # "#{key} is missing" + # end + def missing_interpolation_argument_handler=(exception_handler) + @@missing_interpolation_argument_handler = exception_handler + end + + # Allow clients to register paths providing translation data sources. The + # backend defines acceptable sources. + # + # E.g. the provided SimpleBackend accepts a list of paths to translation + # files which are either named *.rb and contain plain Ruby Hashes or are + # named *.yml and contain YAML data. So for the SimpleBackend clients may + # register translation files like this: + # I18n.load_path << 'path/to/locale/en.yml' + def load_path + @@load_path ||= [] + end + + # Sets the load path instance. Custom implementations are expected to + # behave like a Ruby Array. + def load_path=(load_path) + @@load_path = load_path + @@available_locales_set = nil + backend.reload! + end + + # Whether or not to verify if locales are in the list of available locales. + # Defaults to true. + @@enforce_available_locales = true + def enforce_available_locales + @@enforce_available_locales + end + + def enforce_available_locales=(enforce_available_locales) + @@enforce_available_locales = enforce_available_locales + end + + # Returns the current interpolation patterns. Defaults to + # I18n::DEFAULT_INTERPOLATION_PATTERNS. + def interpolation_patterns + @@interpolation_patterns ||= I18n::DEFAULT_INTERPOLATION_PATTERNS.dup + end + + # Sets the current interpolation patterns. Used to set a interpolation + # patterns. + # + # E.g. using {{}} as a placeholder like "{{hello}}, world!": + # + # I18n.config.interpolation_patterns << /\{\{(\w+)\}\}/ + def interpolation_patterns=(interpolation_patterns) + @@interpolation_patterns = interpolation_patterns + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb new file mode 100644 index 0000000..775d490 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb @@ -0,0 +1,59 @@ +module I18n + module HashRefinements + refine Hash do + using I18n::HashRefinements + def except(*keys) + dup.except!(*keys) + end + + def except!(*keys) + keys.each { |key| delete(key) } + self + end + + def deep_symbolize_keys + each_with_object({}) do |(key, value), result| + result[symbolize_key(key)] = deep_symbolize_keys_in_object(value) + result + end + end + + # deep_merge from activesupport 5 + # Copyright (c) 2005-2019 David Heinemeier Hansson + def deep_merge(other_hash, &block) + dup.deep_merge!(other_hash, &block) + end + + # deep_merge! from activesupport 5 + # Copyright (c) 2005-2019 David Heinemeier Hansson + def deep_merge!(other_hash, &block) + merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + this_val.deep_merge(other_val, &block) + elsif block_given? + block.call(key, this_val, other_val) + else + other_val + end + end + end + + def symbolize_key(key) + key.respond_to?(:to_sym) ? key.to_sym : key + end + + private + + def deep_symbolize_keys_in_object(value) + case value + when Hash + value.deep_symbolize_keys + when Array + value.map { |e| deep_symbolize_keys_in_object(e) } + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb new file mode 100644 index 0000000..74b3d34 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb @@ -0,0 +1,111 @@ +# frozen_string_literal: true + +require 'cgi' + +module I18n + class ExceptionHandler + def call(exception, _locale, _key, _options) + if exception.is_a?(MissingTranslation) + exception.message + else + raise exception + end + end + end + + class ArgumentError < ::ArgumentError; end + + class Disabled < ArgumentError + def initialize(method) + super(<<~MESSAGE) + I18n.#{method} is currently disabled, likely because your application is still in its loading phase. + + This method is meant to display text in the user locale, so calling it before the user locale has + been set is likely to display text from the wrong locale to some users. + + If you have a legitimate reason to access i18n data outside of the user flow, you can do so by passing + the desired locale explictly with the `locale` argument, e.g. `I18n.#{method}(..., locale: :en)` + MESSAGE + end + end + + class InvalidLocale < ArgumentError + attr_reader :locale + def initialize(locale) + @locale = locale + super "#{locale.inspect} is not a valid locale" + end + end + + class InvalidLocaleData < ArgumentError + attr_reader :filename + def initialize(filename, exception_message) + @filename, @exception_message = filename, exception_message + super "can not load translations from #{filename}: #{exception_message}" + end + end + + class MissingTranslation < ArgumentError + module Base + attr_reader :locale, :key, :options + + def initialize(locale, key, options = EMPTY_HASH) + @key, @locale, @options = key, locale, options.dup + options.each { |k, v| self.options[k] = v.inspect if v.is_a?(Proc) } + end + + def keys + @keys ||= I18n.normalize_keys(locale, key, options[:scope]).tap do |keys| + keys << 'no key' if keys.size < 2 + end + end + + def message + "translation missing: #{keys.join('.')}" + end + alias :to_s :message + + def to_exception + MissingTranslationData.new(locale, key, options) + end + end + + include Base + end + + class MissingTranslationData < ArgumentError + include MissingTranslation::Base + end + + class InvalidPluralizationData < ArgumentError + attr_reader :entry, :count, :key + def initialize(entry, count, key) + @entry, @count, @key = entry, count, key + super "translation data #{entry.inspect} can not be used with :count => #{count}. key '#{key}' is missing." + end + end + + class MissingInterpolationArgument < ArgumentError + attr_reader :key, :values, :string + def initialize(key, values, string) + @key, @values, @string = key, values, string + super "missing interpolation argument #{key.inspect} in #{string.inspect} (#{values.inspect} given)" + end + end + + class ReservedInterpolationKey < ArgumentError + attr_reader :key, :string + def initialize(key, string) + @key, @string = key, string + super "reserved key #{key.inspect} used in #{string.inspect}" + end + end + + class UnknownFileType < ArgumentError + attr_reader :type, :filename + def initialize(type, filename) + @type, @filename = type, filename + super "can not load translations from #{filename}, the file type #{type} is not known" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext.rb new file mode 100644 index 0000000..858daff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module I18n + module Gettext + PLURAL_SEPARATOR = "\001" + CONTEXT_SEPARATOR = "\004" + + autoload :Helpers, 'i18n/gettext/helpers' + + @@plural_keys = { :en => [:one, :other] } + + class << self + # returns an array of plural keys for the given locale or the whole hash + # of locale mappings to plural keys so that we can convert from gettext's + # integer-index based style + # TODO move this information to the pluralization module + def plural_keys(*args) + args.empty? ? @@plural_keys : @@plural_keys[args.first] || @@plural_keys[:en] + end + + def extract_scope(msgid, separator) + scope = msgid.to_s.split(separator) + msgid = scope.pop + [scope, msgid] + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb new file mode 100644 index 0000000..d077619 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require 'i18n/gettext' + +module I18n + module Gettext + # Implements classical Gettext style accessors. To use this include the + # module to the global namespace or wherever you want to use it. + # + # include I18n::Gettext::Helpers + module Helpers + # Makes dynamic translation messages readable for the gettext parser. + # _(fruit) cannot be understood by the gettext parser. To help the parser find all your translations, + # you can add fruit = N_("Apple") which does not translate, but tells the parser: "Apple" needs translation. + # * msgid: the message id. + # * Returns: msgid. + def N_(msgsid) + msgsid + end + + def gettext(msgid, options = EMPTY_HASH) + I18n.t(msgid, **{:default => msgid, :separator => '|'}.merge(options)) + end + alias _ gettext + + def sgettext(msgid, separator = '|') + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + I18n.t(msgid, :scope => scope, :default => msgid, :separator => separator) + end + alias s_ sgettext + + def pgettext(msgctxt, msgid) + separator = I18n::Gettext::CONTEXT_SEPARATOR + sgettext([msgctxt, msgid].join(separator), separator) + end + alias p_ pgettext + + def ngettext(msgid, msgid_plural, n = 1) + nsgettext(msgid, msgid_plural, n) + end + alias n_ ngettext + + # Method signatures: + # nsgettext('Fruits|apple', 'apples', 2) + # nsgettext(['Fruits|apple', 'apples'], 2) + def nsgettext(msgid, msgid_plural, n = 1, separator = '|') + if msgid.is_a?(Array) + msgid, msgid_plural, n, separator = msgid[0], msgid[1], msgid_plural, n + separator = '|' unless separator.is_a?(::String) + end + + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + default = { :one => msgid, :other => msgid_plural } + I18n.t(msgid, :default => default, :count => n, :scope => scope, :separator => separator) + end + alias ns_ nsgettext + + # Method signatures: + # npgettext('Fruits', 'apple', 'apples', 2) + # npgettext('Fruits', ['apple', 'apples'], 2) + def npgettext(msgctxt, msgid, msgid_plural, n = 1) + separator = I18n::Gettext::CONTEXT_SEPARATOR + + if msgid.is_a?(Array) + msgid_plural, msgid, n = msgid[1], [msgctxt, msgid[0]].join(separator), msgid_plural + else + msgid = [msgctxt, msgid].join(separator) + end + + nsgettext(msgid, msgid_plural, n, separator) + end + alias np_ npgettext + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb new file mode 100644 index 0000000..a07fdc5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb @@ -0,0 +1,329 @@ +=begin + poparser.rb - Generate a .mo + + Copyright (C) 2003-2009 Masao Mutoh + + You may redistribute it and/or modify it under the same + license terms as Ruby. +=end + +#MODIFIED +# removed include GetText etc +# added stub translation method _(x) +require 'racc/parser' + +module GetText + + class PoParser < Racc::Parser + + def _(x) + x + end + +module_eval <<'..end src/poparser.ry modeval..id7a99570e05', 'src/poparser.ry', 108 + def unescape(orig) + ret = orig.gsub(/\\n/, "\n") + ret.gsub!(/\\t/, "\t") + ret.gsub!(/\\r/, "\r") + ret.gsub!(/\\"/, "\"") + ret + end + + def parse(str, data, ignore_fuzzy = true) + @comments = [] + @data = data + @fuzzy = false + @msgctxt = "" + $ignore_fuzzy = ignore_fuzzy + + str.strip! + @q = [] + until str.empty? do + case str + when /\A\s+/ + str = $' + when /\Amsgctxt/ + @q.push [:MSGCTXT, $&] + str = $' + when /\Amsgid_plural/ + @q.push [:MSGID_PLURAL, $&] + str = $' + when /\Amsgid/ + @q.push [:MSGID, $&] + str = $' + when /\Amsgstr/ + @q.push [:MSGSTR, $&] + str = $' + when /\A\[(\d+)\]/ + @q.push [:PLURAL_NUM, $1] + str = $' + when /\A\#~(.*)/ + $stderr.print _("Warning: obsolete msgid exists.\n") + $stderr.print " #{$&}\n" + @q.push [:COMMENT, $&] + str = $' + when /\A\#(.*)/ + @q.push [:COMMENT, $&] + str = $' + when /\A\"(.*)\"/ + @q.push [:STRING, $1] + str = $' + else + #c = str[0,1] + #@q.push [:STRING, c] + str = str[1..-1] + end + end + @q.push [false, '$end'] + if $DEBUG + @q.each do |a,b| + puts "[#{a}, #{b}]" + end + end + @yydebug = true if $DEBUG + do_parse + + if @comments.size > 0 + @data.set_comment(:last, @comments.join("\n")) + end + @data + end + + def next_token + @q.shift + end + + def on_message(msgid, msgstr) + if msgstr.size > 0 + @data[msgid] = msgstr + @data.set_comment(msgid, @comments.join("\n")) + end + @comments.clear + @msgctxt = "" + end + + def on_comment(comment) + @fuzzy = true if (/fuzzy/ =~ comment) + @comments << comment + end + + +..end src/poparser.ry modeval..id7a99570e05 + +##### racc 1.4.5 generates ### + +racc_reduce_table = [ + 0, 0, :racc_error, + 0, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 12, :_reduce_5, + 1, 13, :_reduce_none, + 1, 13, :_reduce_none, + 4, 15, :_reduce_8, + 5, 16, :_reduce_9, + 2, 17, :_reduce_10, + 1, 17, :_reduce_none, + 3, 18, :_reduce_12, + 1, 11, :_reduce_13, + 2, 14, :_reduce_14, + 1, 14, :_reduce_15 ] + +racc_reduce_n = 16 + +racc_shift_n = 26 + +racc_action_table = [ + 3, 13, 5, 7, 9, 15, 16, 17, 20, 17, + 13, 17, 13, 13, 11, 17, 23, 20, 13, 17 ] + +racc_action_check = [ + 1, 16, 1, 1, 1, 12, 12, 12, 18, 18, + 7, 14, 15, 9, 3, 19, 20, 21, 23, 25 ] + +racc_action_pointer = [ + nil, 0, nil, 14, nil, nil, nil, 3, nil, 6, + nil, nil, 0, nil, 4, 5, -6, nil, 2, 8, + 8, 11, nil, 11, nil, 12 ] + +racc_action_default = [ + -1, -16, -2, -16, -3, -13, -4, -16, -6, -16, + -7, 26, -16, -15, -5, -16, -16, -14, -16, -8, + -16, -9, -11, -16, -10, -12 ] + +racc_goto_table = [ + 12, 22, 14, 4, 24, 6, 2, 8, 18, 19, + 10, 21, 1, nil, nil, nil, 25 ] + +racc_goto_check = [ + 5, 9, 5, 3, 9, 4, 2, 6, 5, 5, + 7, 8, 1, nil, nil, nil, 5 ] + +racc_goto_pointer = [ + nil, 12, 5, 2, 4, -7, 6, 9, -7, -17 ] + +racc_goto_default = [ + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil ] + +racc_token_table = { + false => 0, + Object.new => 1, + :COMMENT => 2, + :MSGID => 3, + :MSGCTXT => 4, + :MSGID_PLURAL => 5, + :MSGSTR => 6, + :STRING => 7, + :PLURAL_NUM => 8 } + +racc_use_result_var = true + +racc_nt_base = 9 + +Racc_arg = [ + racc_action_table, + racc_action_check, + racc_action_default, + racc_action_pointer, + racc_goto_table, + racc_goto_check, + racc_goto_default, + racc_goto_pointer, + racc_nt_base, + racc_reduce_table, + racc_token_table, + racc_shift_n, + racc_reduce_n, + racc_use_result_var ] + +Racc_token_to_s_table = [ +'$end', +'error', +'COMMENT', +'MSGID', +'MSGCTXT', +'MSGID_PLURAL', +'MSGSTR', +'STRING', +'PLURAL_NUM', +'$start', +'msgfmt', +'comment', +'msgctxt', +'message', +'string_list', +'single_message', +'plural_message', +'msgstr_plural', +'msgstr_plural_line'] + +Racc_debug_parser = true + +##### racc system variables end ##### + + # reduce 0 omitted + + # reduce 1 omitted + + # reduce 2 omitted + + # reduce 3 omitted + + # reduce 4 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 25 + def _reduce_5( val, _values, result ) + @msgctxt = unescape(val[1]) + "\004" + result + end +.,., + + # reduce 6 omitted + + # reduce 7 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 48 + def _reduce_8( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print " msgid '#{val[1]}'\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]), unescape(val[3])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 65 + def _reduce_9( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print "msgid = '#{val[1]}\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]) + "\000" + unescape(val[3]), unescape(val[4])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 76 + def _reduce_10( val, _values, result ) + if val[0].size > 0 + result = val[0] + "\000" + val[1] + else + result = "" + end + result + end +.,., + + # reduce 11 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 84 + def _reduce_12( val, _values, result ) + result = val[2] + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 91 + def _reduce_13( val, _values, result ) + on_comment(val[0]) + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 99 + def _reduce_14( val, _values, result ) + result = val.delete_if{|item| item == ""}.join + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 103 + def _reduce_15( val, _values, result ) + result = val[0] + result + end +.,., + + def _reduce_none( val, _values, result ) + result + end + + end # class PoParser + +end # module GetText diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb new file mode 100644 index 0000000..13171d3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb @@ -0,0 +1,39 @@ +# heavily based on Masao Mutoh's gettext String interpolation extension +# http://github.com/mutoh/gettext/blob/f6566738b981fe0952548c421042ad1e0cdfb31e/lib/gettext/core_ext/string.rb + +module I18n + DEFAULT_INTERPOLATION_PATTERNS = [ + /%%/, + /%\{([\w|]+)\}/, # matches placeholders like "%{foo} or %{foo|word}" + /%<(\w+)>(.*?\d*\.?\d*[bBdiouxXeEfgGcps])/ # matches placeholders like "%.d" + ].freeze + INTERPOLATION_PATTERN = Regexp.union(DEFAULT_INTERPOLATION_PATTERNS) + deprecate_constant :INTERPOLATION_PATTERN + + class << self + # Return String or raises MissingInterpolationArgument exception. + # Missing argument's logic is handled by I18n.config.missing_interpolation_argument_handler. + def interpolate(string, values) + raise ReservedInterpolationKey.new($1.to_sym, string) if string =~ RESERVED_KEYS_PATTERN + raise ArgumentError.new('Interpolation values must be a Hash.') unless values.kind_of?(Hash) + interpolate_hash(string, values) + end + + def interpolate_hash(string, values) + string.gsub(Regexp.union(config.interpolation_patterns)) do |match| + if match == '%%' + '%' + else + key = ($1 || $2 || match.tr("%{}", "")).to_sym + value = if values.key?(key) + values[key] + else + config.missing_interpolation_argument_handler.call(key, values, string) + end + value = value.call(values) if value.respond_to?(:call) + $3 ? sprintf("%#{$3}", value) : value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale.rb new file mode 100644 index 0000000..c4078e6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module I18n + module Locale + autoload :Fallbacks, 'i18n/locale/fallbacks' + autoload :Tag, 'i18n/locale/tag' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb new file mode 100644 index 0000000..94909aa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb @@ -0,0 +1,99 @@ +# Locale Fallbacks +# +# Extends the I18n module to hold a fallbacks instance which is set to an +# instance of I18n::Locale::Fallbacks by default but can be swapped with a +# different implementation. +# +# Locale fallbacks will compute a number of fallback locales for a given locale. +# For example: +# +#

+# I18n.fallbacks[:"es-MX"] # => [:"es-MX", :es, :en] 
+# +# Locale fallbacks always fall back to +# +# * all parent locales of a given locale (e.g. :es for :"es-MX") first, +# * the current default locales and all of their parents second +# +# The default locales are set to [I18n.default_locale] by default but can be +# set to something else. +# +# One can additionally add any number of additional fallback locales manually. +# These will be added before the default locales to the fallback chain. For +# example: +# +# # using the default locale as default fallback locale +# +# I18n.default_locale = :"en-US" +# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"de-AT" => :"de-DE") +# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"de-DE"] +# +# # using a custom locale as default fallback locale +# +# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"en-GB", :"de-AT" => :de, :"de-CH" => :de) +# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"en-GB", :en] +# I18n.fallbacks[:"de-CH"] # => [:"de-CH", :de, :"en-GB", :en] +# +# # mapping fallbacks to an existing instance +# +# # people speaking Catalan also speak Spanish as spoken in Spain +# fallbacks = I18n.fallbacks +# fallbacks.map(:ca => :"es-ES") +# fallbacks[:ca] # => [:ca, :"es-ES", :es, :"en-US", :en] +# +# # people speaking Arabian as spoken in Palestine also speak Hebrew as spoken in Israel +# fallbacks.map(:"ar-PS" => :"he-IL") +# fallbacks[:"ar-PS"] # => [:"ar-PS", :ar, :"he-IL", :he, :"en-US", :en] +# fallbacks[:"ar-EG"] # => [:"ar-EG", :ar, :"en-US", :en] +# +# # people speaking Sami as spoken in Finnland also speak Swedish and Finnish as spoken in Finnland +# fallbacks.map(:sms => [:"se-FI", :"fi-FI"]) +# fallbacks[:sms] # => [:sms, :"se-FI", :se, :"fi-FI", :fi, :"en-US", :en] + +module I18n + module Locale + class Fallbacks < Hash + def initialize(*mappings) + @map = {} + map(mappings.pop) if mappings.last.is_a?(Hash) + self.defaults = mappings.empty? ? [] : mappings + end + + def defaults=(defaults) + @defaults = defaults.flat_map { |default| compute(default, false) } + end + attr_reader :defaults + + def [](locale) + raise InvalidLocale.new(locale) if locale.nil? + raise Disabled.new('fallback#[]') if locale == false + locale = locale.to_sym + super || store(locale, compute(locale)) + end + + def map(mappings) + mappings.each do |from, to| + from, to = from.to_sym, Array(to) + to.each do |_to| + @map[from] ||= [] + @map[from] << _to.to_sym + end + end + end + + protected + + def compute(tags, include_defaults = true, exclude = []) + result = Array(tags).flat_map do |tag| + tags = I18n::Locale::Tag.tag(tag).self_and_parents.map! { |t| t.to_sym } - exclude + tags.each { |_tag| tags += compute(@map[_tag], false, exclude + tags) if @map[_tag] } + tags + end + result.push(*defaults) if include_defaults + result.uniq! + result.compact! + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb new file mode 100644 index 0000000..a640b44 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb @@ -0,0 +1,28 @@ +# encoding: utf-8 + +module I18n + module Locale + module Tag + autoload :Parents, 'i18n/locale/tag/parents' + autoload :Rfc4646, 'i18n/locale/tag/rfc4646' + autoload :Simple, 'i18n/locale/tag/simple' + + class << self + # Returns the current locale tag implementation. Defaults to +I18n::Locale::Tag::Simple+. + def implementation + @@implementation ||= Simple + end + + # Sets the current locale tag implementation. Use this to set a different locale tag implementation. + def implementation=(implementation) + @@implementation = implementation + end + + # Factory method for locale tags. Delegates to the current locale tag implementation. + def tag(tag) + implementation.tag(tag) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb new file mode 100644 index 0000000..6283e66 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb @@ -0,0 +1,24 @@ +module I18n + module Locale + module Tag + module Parents + def parent + @parent ||= + begin + segs = to_a + segs.compact! + segs.length > 1 ? self.class.tag(*segs[0..(segs.length - 2)].join('-')) : nil + end + end + + def self_and_parents + @self_and_parents ||= [self].concat parents + end + + def parents + @parents ||= parent ? [parent].concat(parent.parents) : [] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb new file mode 100644 index 0000000..4ce4c75 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb @@ -0,0 +1,74 @@ +# RFC 4646/47 compliant Locale tag implementation that parses locale tags to +# subtags such as language, script, region, variant etc. +# +# For more information see by http://en.wikipedia.org/wiki/IETF_language_tag +# +# Rfc4646::Parser does not implement grandfathered tags. + +module I18n + module Locale + module Tag + RFC4646_SUBTAGS = [ :language, :script, :region, :variant, :extension, :privateuse, :grandfathered ] + RFC4646_FORMATS = { :language => :downcase, :script => :capitalize, :region => :upcase, :variant => :downcase } + + class Rfc4646 < Struct.new(*RFC4646_SUBTAGS) + class << self + # Parses the given tag and returns a Tag instance if it is valid. + # Returns false if the given tag is not valid according to RFC 4646. + def tag(tag) + matches = parser.match(tag) + new(*matches) if matches + end + + def parser + @@parser ||= Rfc4646::Parser + end + + def parser=(parser) + @@parser = parser + end + end + + include Parents + + RFC4646_FORMATS.each do |name, format| + define_method(name) { self[name].send(format) unless self[name].nil? } + end + + def to_sym + to_s.to_sym + end + + def to_s + @tag ||= to_a.compact.join("-") + end + + def to_a + members.collect { |attr| self.send(attr) } + end + + module Parser + PATTERN = %r{\A(?: + ([a-z]{2,3}(?:(?:-[a-z]{3}){0,3})?|[a-z]{4}|[a-z]{5,8}) # language + (?:-([a-z]{4}))? # script + (?:-([a-z]{2}|\d{3}))? # region + (?:-([0-9a-z]{5,8}|\d[0-9a-z]{3}))* # variant + (?:-([0-9a-wyz](?:-[0-9a-z]{2,8})+))* # extension + (?:-(x(?:-[0-9a-z]{1,8})+))?| # privateuse subtag + (x(?:-[0-9a-z]{1,8})+)| # privateuse tag + /* ([a-z]{1,3}(?:-[0-9a-z]{2,8}){1,2}) */ # grandfathered + )\z}xi + + class << self + def match(tag) + c = PATTERN.match(tag.to_s).captures + c[0..4] << (c[5].nil? ? c[6] : c[5]) << c[7] # TODO c[7] is grandfathered, throw a NotImplemented exception here? + rescue + false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb new file mode 100644 index 0000000..6d9ab56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb @@ -0,0 +1,39 @@ +# Simple Locale tag implementation that computes subtags by simply splitting +# the locale tag at '-' occurences. +module I18n + module Locale + module Tag + class Simple + class << self + def tag(tag) + new(tag) + end + end + + include Parents + + attr_reader :tag + + def initialize(*tag) + @tag = tag.join('-').to_sym + end + + def subtags + @subtags = tag.to_s.split('-').map!(&:to_s) + end + + def to_sym + tag + end + + def to_s + tag.to_s + end + + def to_a + subtags + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/middleware.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/middleware.rb new file mode 100644 index 0000000..59b377e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/middleware.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module I18n + class Middleware + + def initialize(app) + @app = app + end + + def call(env) + @app.call(env) + ensure + Thread.current[:i18n_config] = I18n::Config.new + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests.rb new file mode 100644 index 0000000..30d0ed5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module I18n + module Tests + autoload :Basics, 'i18n/tests/basics' + autoload :Defaults, 'i18n/tests/defaults' + autoload :Interpolation, 'i18n/tests/interpolation' + autoload :Link, 'i18n/tests/link' + autoload :Localization, 'i18n/tests/localization' + autoload :Lookup, 'i18n/tests/lookup' + autoload :Pluralization, 'i18n/tests/pluralization' + autoload :Procs, 'i18n/tests/procs' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb new file mode 100644 index 0000000..951d7ab --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb @@ -0,0 +1,60 @@ +module I18n + module Tests + module Basics + def teardown + I18n.available_locales = nil + end + + test "available_locales returns the locales stored to the backend by default" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + assert I18n.available_locales.include?(:de) + assert I18n.available_locales.include?(:en) + end + + test "available_locales can be set to something else independently from the actual locale data" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + I18n.available_locales = :foo + assert_equal [:foo], I18n.available_locales + + I18n.available_locales = [:foo, 'bar'] + assert_equal [:foo, :bar], I18n.available_locales + + I18n.available_locales = nil + assert I18n.available_locales.include?(:de) + assert I18n.available_locales.include?(:en) + end + + test "available_locales memoizes when set explicitely" do + I18n.backend.expects(:available_locales).never + I18n.available_locales = [:foo] + I18n.backend.store_translations('de', :bar => 'baz') + I18n.reload! + assert_equal [:foo], I18n.available_locales + end + + test "available_locales delegates to the backend when not set explicitely" do + original_available_locales_value = I18n.backend.available_locales + I18n.backend.expects(:available_locales).returns(original_available_locales_value).twice + assert_equal I18n.backend.available_locales, I18n.available_locales + end + + test "exists? is implemented by the backend" do + I18n.backend.store_translations(:foo, :bar => 'baz') + assert I18n.exists?(:bar, :foo) + end + + test "storing a nil value as a translation removes it from the available locale data" do + I18n.backend.store_translations(:en, :to_be_deleted => 'bar') + assert_equal 'bar', I18n.t(:to_be_deleted, :default => 'baz') + + I18n.cache_store.clear if I18n.respond_to?(:cache_store) && I18n.cache_store + I18n.backend.store_translations(:en, :to_be_deleted => nil) + assert_equal 'baz', I18n.t(:to_be_deleted, :default => 'baz') + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb new file mode 100644 index 0000000..b46af61 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb @@ -0,0 +1,52 @@ +# encoding: utf-8 + +module I18n + module Tests + module Defaults + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }) + end + + test "defaults: given nil as a key it returns the given default" do + assert_equal 'default', I18n.t(nil, :default => 'default') + end + + test "defaults: given a symbol as a default it translates the symbol" do + assert_equal 'bar', I18n.t(nil, :default => :'foo.bar') + end + + test "defaults: given a symbol as a default and a scope it stays inside the scope when looking up the symbol" do + assert_equal 'bar', I18n.t(:missing, :default => :bar, :scope => :foo) + end + + test "defaults: given an array as a default it returns the first match" do + assert_equal 'bar', I18n.t(:does_not_exist, :default => [:does_not_exist_2, :'foo.bar']) + end + + test "defaults: given an array as a default with false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => [false]) + end + + test "defaults: given false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => false) + end + + test "defaults: given nil it returns nil" do + assert_nil I18n.t(:does_not_exist, :default => nil) + end + + test "defaults: given an array of missing keys it raises a MissingTranslationData exception" do + assert_raise I18n::MissingTranslationData do + I18n.t(:does_not_exist, :default => [:does_not_exist_2, :does_not_exist_3], :raise => true) + end + end + + test "defaults: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t(nil, :default => :'foo|bar', :separator => '|') + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb new file mode 100644 index 0000000..fd78804 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb @@ -0,0 +1,163 @@ +# encoding: utf-8 + +module I18n + module Tests + module Interpolation + # If no interpolation parameter is not given, I18n should not alter the string. + # This behavior is due to three reasons: + # + # * Checking interpolation keys in all strings hits performance, badly; + # + # * This allows us to retrieve untouched values through I18n. For example + # I could have a middleware that returns I18n lookup results in JSON + # to be processed through Javascript. Leaving the keys untouched allows + # the interpolation to happen at the javascript level; + # + # * Security concerns: if I allow users to translate a web site, they can + # insert %{} in messages causing the I18n lookup to fail in every request. + # + test "interpolation: given no values it does not alter the string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!') + end + + test "interpolation: given values it interpolates them into the string" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => 'David') + end + + test "interpolation: works with a pipe" do + assert_equal 'Hi david!', interpolate(:default => 'Hi %{name|lowercase}!', :'name|lowercase' => 'david') + end + + test "interpolation: given a nil value it still interpolates it into the string" do + assert_equal 'Hi !', interpolate(:default => 'Hi %{name}!', :name => nil) + end + + test "interpolation: given a lambda as a value it calls it if the string contains the key" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => lambda { |*args| 'David' }) + end + + test "interpolation: given a lambda as a value it does not call it if the string does not contain the key" do + assert_nothing_raised { interpolate(:default => 'Hi!', :name => lambda { |*args| raise 'fail' }) } + end + + test "interpolation: given values but missing a key it raises I18n::MissingInterpolationArgument" do + assert_raise(I18n::MissingInterpolationArgument) do + interpolate(:default => '%{foo}', :bar => 'bar') + end + end + + test "interpolation: it does not raise I18n::MissingInterpolationArgument for escaped variables" do + assert_nothing_raised do + assert_equal 'Barr %{foo}', interpolate(:default => '%{bar} %%{foo}', :bar => 'Barr') + end + end + + test "interpolation: it does not change the original, stored translation string" do + I18n.backend.store_translations(:en, :interpolate => 'Hi %{name}!') + assert_equal 'Hi David!', interpolate(:interpolate, :name => 'David') + assert_equal 'Hi Yehuda!', interpolate(:interpolate, :name => 'Yehuda') + end + + test "interpolation: given an array interpolates each element" do + I18n.backend.store_translations(:en, :array_interpolate => ['Hi', 'Mr. %{name}', 'or sir %{name}']) + assert_equal ['Hi', 'Mr. Bartuz', 'or sir Bartuz'], interpolate(:array_interpolate, :name => 'Bartuz') + end + + test "interpolation: given the translation is in utf-8 it still works" do + assert_equal 'Häi David!', interpolate(:default => 'Häi %{name}!', :name => 'David') + end + + test "interpolation: given the value is in utf-8 it still works" do + assert_equal 'Hi ゆきひろ!', interpolate(:default => 'Hi %{name}!', :name => 'ゆきひろ') + end + + test "interpolation: given the translation and the value are in utf-8 it still works" do + assert_equal 'こんにちは、ゆきひろさん!', interpolate(:default => 'こんにちは、%{name}さん!', :name => 'ゆきひろ') + end + + if Object.const_defined?(:Encoding) + test "interpolation: given a euc-jp translation and a utf-8 value it raises Encoding::CompatibilityError" do + assert_raise(Encoding::CompatibilityError) do + interpolate(:default => euc_jp('こんにちは、%{name}さん!'), :name => 'ゆきひろ') + end + end + + test "interpolation: given a utf-8 translation and a euc-jp value it raises Encoding::CompatibilityError" do + assert_raise(Encoding::CompatibilityError) do + interpolate(:default => 'こんにちは、%{name}さん!', :name => euc_jp('ゆきひろ')) + end + end + + test "interpolation: ASCII strings in the backend should be encoded to UTF8 if interpolation options are in UTF8" do + I18n.backend.store_translations 'en', 'encoding' => ('%{who} let me go'.force_encoding("ASCII")) + result = I18n.t 'encoding', :who => "måmmå miå" + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 with ASCII interpolation" do + I18n.backend.store_translations 'en', 'encoding' => 'måmmå miå %{what}' + result = I18n.t 'encoding', :what => 'let me go'.force_encoding("ASCII") + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 even with numbers interpolation" do + I18n.backend.store_translations 'en', 'encoding' => '%{count} times: måmmå miå' + result = I18n.t 'encoding', :count => 3 + assert_equal Encoding::UTF_8, result.encoding + end + end + + test "interpolation: given a translations containing a reserved key it raises I18n::ReservedInterpolationKey" do + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{exception_handler}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{default}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{separator}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{scope}') } + end + + test "interpolation: deep interpolation for default string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for interpolated string" do + assert_equal 'Hi Ann!', interpolate(:default => 'Hi %{name}!', :name => 'Ann', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Hash" do + people = { :people => { :ann => 'Ann is %{ann}', :john => 'John is %{john}' } } + interpolated_people = { :people => { :ann => 'Ann is good', :john => 'John is big' } } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Array" do + people = { :people => ['Ann is %{ann}', 'John is %{john}'] } + interpolated_people = { :people => ['Ann is good', 'John is big'] } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + protected + + def capture(stream) + begin + stream = stream.to_s + eval "$#{stream} = StringIO.new" + yield + result = eval("$#{stream}").string + ensure + eval("$#{stream} = #{stream.upcase}") + end + + result + end + + def euc_jp(string) + string.encode!(Encoding::EUC_JP) + end + + def interpolate(*args) + options = args.last.is_a?(Hash) ? args.pop : {} + key = args.pop + I18n.backend.translate('en', key, options) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb new file mode 100644 index 0000000..d2f20e8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb @@ -0,0 +1,66 @@ +# encoding: utf-8 + +module I18n + module Tests + module Link + test "linked lookup: if a key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :linked, + :linked => 'linked' + } + assert_equal 'linked', I18n.backend.translate('en', :link) + end + + test "linked lookup: if a key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :"foo.linked", + :foo => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :link)) + end + + test "linked lookup: if a dot-separated key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked }, + :linked => 'linked' + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: if a dot-separated key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :"bar.linked" }, + :bar => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: links always refer to the absolute key" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked, :linked => 'linked in foo' }, + :linked => 'linked absolutely' + } + assert_equal 'linked absolutely', I18n.backend.translate('en', :link, :scope => :foo) + end + + test "linked lookup: a link can resolve to a namespace in the middle of a dot-separated key" do + I18n.backend.store_translations 'en', { + :activemodel => { :errors => { :messages => { :blank => "can't be blank" } } }, + :activerecord => { :errors => { :messages => :"activemodel.errors.messages" } } + } + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + end + + test "linked lookup: a link can resolve with option :count" do + I18n.backend.store_translations 'en', { + :counter => :counted, + :counted => { :foo => { :one => "one", :other => "other" }, :bar => "bar" } + } + assert_equal "one", I18n.t(:'counter.foo', count: 1) + assert_equal "other", I18n.t(:'counter.foo', count: 2) + assert_equal "bar", I18n.t(:'counter.bar', count: 3) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb new file mode 100644 index 0000000..53b1502 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb @@ -0,0 +1,19 @@ +module I18n + module Tests + module Localization + autoload :Date, 'i18n/tests/localization/date' + autoload :DateTime, 'i18n/tests/localization/date_time' + autoload :Time, 'i18n/tests/localization/time' + autoload :Procs, 'i18n/tests/localization/procs' + + def self.included(base) + base.class_eval do + include I18n::Tests::Localization::Date + include I18n::Tests::Localization::DateTime + include I18n::Tests::Localization::Procs + include I18n::Tests::Localization::Time + end + end + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb new file mode 100644 index 0000000..34fd7a3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb @@ -0,0 +1,117 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Date + def setup + super + setup_date_translations + @date = ::Date.new(2008, 3, 1) + end + + test "localize Date: given the short format it uses it" do + assert_equal '01. Mär', I18n.l(@date, :format => :short, :locale => :de) + end + + test "localize Date: given the long format it uses it" do + assert_equal '01. März 2008', I18n.l(@date, :format => :long, :locale => :de) + end + + test "localize Date: given the default format it uses it" do + assert_equal '01.03.2008', I18n.l(@date, :format => :default, :locale => :de) + end + + test "localize Date: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@date, :format => '%A', :locale => :de) + end + + test "localize Date: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@date, :format => '%^A', :locale => :de) + end + + test "localize Date: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@date, :format => '%a', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@date, :format => '%^a', :locale => :de) + end + + test "localize Date: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@date, :format => '%B', :locale => :de) + end + + test "localize Date: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@date, :format => '%^B', :locale => :de) + end + + test "localize Date: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@date, :format => '%b', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@date, :format => '%^b', :locale => :de) + end + + test "localize Date: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Date.new(2008, 2, 1), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Date: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@date, :format => '%b', :locale => :fr) + end + + test "localize Date: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@date, :format => '%x') } + end + + test "localize Date: does not modify the options hash" do + options = { :format => '%b', :locale => :de } + assert_equal 'Mär', I18n.l(@date, **options) + assert_equal({ :format => '%b', :locale => :de }, options) + assert_nothing_raised { I18n.l(@date, **options.freeze) } + end + + test "localize Date: given nil with default value it returns default" do + assert_equal 'default', I18n.l(nil, :default => 'default') + end + + test "localize Date: given nil it raises I18n::ArgumentError" do + assert_raise(I18n::ArgumentError) { I18n.l(nil) } + end + + test "localize Date: given a plain Object it raises I18n::ArgumentError" do + assert_raise(I18n::ArgumentError) { I18n.l(Object.new) } + end + + test "localize Date: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@date, :format => :missing) } + end + + test "localize Date: it does not alter the format string" do + assert_equal '01. Februar 2009', I18n.l(::Date.parse('2009-02-01'), :format => :long, :locale => :de) + assert_equal '01. Oktober 2009', I18n.l(::Date.parse('2009-10-01'), :format => :long, :locale => :de) + end + + protected + + def setup_date_translations + I18n.backend.store_translations :de, { + :date => { + :formats => { + :default => "%d.%m.%Y", + :short => "%d. %b", + :long => "%d. %B %Y", + }, + :day_names => %w(Sonntag Montag Dienstag Mittwoch Donnerstag Freitag Samstag), + :abbr_day_names => %w(So Mo Di Mi Do Fr Sa), + :month_names => %w(Januar Februar März April Mai Juni Juli August September Oktober November Dezember).unshift(nil), + :abbr_month_names => %w(Jan Feb Mär Apr Mai Jun Jul Aug Sep Okt Nov Dez).unshift(nil) + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb new file mode 100644 index 0000000..a4ed9ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module DateTime + def setup + super + setup_datetime_translations + @datetime = ::DateTime.new(2008, 3, 1, 6) + @other_datetime = ::DateTime.new(2008, 3, 1, 18) + end + + test "localize DateTime: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@datetime, :format => :short, :locale => :de) + end + + test "localize DateTime: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@datetime, :format => :long, :locale => :de) + end + + test "localize DateTime: given the default format it uses it" do + assert_equal 'Sa, 01. Mär 2008 06:00:00 +0000', I18n.l(@datetime, :format => :default, :locale => :de) + end + + test "localize DateTime: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@datetime, :format => '%A', :locale => :de) + end + + test "localize DateTime: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@datetime, :format => '%^A', :locale => :de) + end + + test "localize DateTime: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@datetime, :format => '%a', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@datetime, :format => '%^a', :locale => :de) + end + + test "localize DateTime: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@datetime, :format => '%B', :locale => :de) + end + + test "localize DateTime: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@datetime, :format => '%^B', :locale => :de) + end + + test "localize DateTime: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@datetime, :format => '%b', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@datetime, :format => '%^b', :locale => :de) + end + + test "localize DateTime: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::DateTime.new(2008, 2, 1, 6), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize DateTime: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@datetime, :format => '%b', :locale => :fr) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@datetime, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_datetime, :format => '%p', :locale => :de) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator in downcase" do + assert_equal 'am', I18n.l(@datetime, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_datetime, :format => '%P', :locale => :de) + end + + test "localize DateTime: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@datetime, :format => '%x') } + end + + test "localize DateTime: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@datetime, :format => :missing) } + end + + protected + + def setup_datetime_translations + # time translations might have been set up in Tests::Api::Localization::Time + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M" + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb new file mode 100644 index 0000000..5f3e9bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb @@ -0,0 +1,117 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Procs + test "localize: using day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/Суббота/, I18n.l(time, :format => "%A, %d %B", :locale => :ru)) + assert_match(/суббота/, I18n.l(time, :format => "%d %B (%A)", :locale => :ru)) + end + + test "localize: using month names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %B %Y", :locale => :ru)) + assert_match(/Март /, I18n.l(time, :format => "%B %Y", :locale => :ru)) + end + + test "localize: using abbreviated day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %b %Y", :locale => :ru)) + assert_match(/март /, I18n.l(time, :format => "%b %Y", :locale => :ru)) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {}]', I18n.l(date, :format => :proc, :locale => :ru) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {:foo=>"foo"}]', I18n.l(date, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {}]', I18n.l(datetime, :format => :proc, :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {:foo=>"foo"}]', I18n.l(datetime, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], {}), I18n.l(time, :format => :proc, :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + options = { :foo => 'foo' } + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], options), I18n.l(time, **options.merge(:format => :proc, :locale => :ru)) + end + + protected + + def self.inspect_args(args, kwargs) + args << kwargs + args = args.map do |arg| + case arg + when ::Time, ::DateTime + arg.strftime('%a, %d %b %Y %H:%M:%S %Z').sub('+0000', '+00:00') + when ::Date + arg.strftime('%a, %d %b %Y') + when Hash + arg.delete(:fallback_in_progress) + arg.inspect + else + arg.inspect + end + end + "[#{args.join(', ')}]" + end + + def setup_time_proc_translations + I18n.backend.store_translations :ru, { + :time => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + } + }, + :date => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + }, + :'day_names' => lambda { |key, options| + (options[:format] =~ /^%A/) ? + %w(Воскресенье Понедельник Вторник Среда Четверг Пятница Суббота) : + %w(воскресенье понедельник вторник среда четверг пятница суббота) + }, + :'month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)?(%B)/) ? + %w(января февраля марта апреля мая июня июля августа сентября октября ноября декабря).unshift(nil) : + %w(Январь Февраль Март Апрель Май Июнь Июль Август Сентябрь Октябрь Ноябрь Декабрь).unshift(nil) + }, + :'abbr_month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)(%b)/) ? + %w(янв. февр. марта апр. мая июня июля авг. сент. окт. нояб. дек.).unshift(nil) : + %w(янв. февр. март апр. май июнь июль авг. сент. окт. нояб. дек.).unshift(nil) + }, + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb new file mode 100644 index 0000000..6928791 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Time + def setup + super + setup_time_translations + @time = ::Time.utc(2008, 3, 1, 6, 0) + @other_time = ::Time.utc(2008, 3, 1, 18, 0) + end + + test "localize Time: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@time, :format => :short, :locale => :de) + end + + test "localize Time: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@time, :format => :long, :locale => :de) + end + + # TODO Seems to break on Windows because ENV['TZ'] is ignored. What's a better way to do this? + # def test_localize_given_the_default_format_it_uses_it + # assert_equal 'Sa, 01. Mar 2008 06:00:00 +0000', I18n.l(@time, :format => :default, :locale => :de) + # end + + test "localize Time: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@time, :format => '%A', :locale => :de) + end + + test "localize Time: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@time, :format => '%^A', :locale => :de) + end + + test "localize Time: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@time, :format => '%a', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@time, :format => '%^a', :locale => :de) + end + + test "localize Time: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@time, :format => '%B', :locale => :de) + end + + test "localize Time: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@time, :format => '%^B', :locale => :de) + end + + test "localize Time: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@time, :format => '%b', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@time, :format => '%^b', :locale => :de) + end + + test "localize Time: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Time.utc(2008, 2, 1, 6, 0), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Time: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@time, :format => '%b', :locale => :fr) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@time, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_time, :format => '%p', :locale => :de) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator in upcase" do + assert_equal 'am', I18n.l(@time, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_time, :format => '%P', :locale => :de) + end + + test "localize Time: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@time, :format => '%x') } + end + + test "localize Time: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@time, :format => :missing) } + end + + protected + + def setup_time_translations + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M", + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb new file mode 100644 index 0000000..61e16cd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb @@ -0,0 +1,81 @@ +# encoding: utf-8 + +module I18n + module Tests + module Lookup + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }, :falsy => false, :truthy => true, + :string => "a", :array => %w(a b c), :hash => { "a" => "b" }) + end + + test "lookup: it returns a string" do + assert_equal("a", I18n.t(:string)) + end + + test "lookup: it returns hash" do + assert_equal({ :a => "b" }, I18n.t(:hash)) + end + + test "lookup: it returns an array" do + assert_equal(%w(a b c), I18n.t(:array)) + end + + test "lookup: it returns a native true" do + assert I18n.t(:truthy) === true + end + + test "lookup: it returns a native false" do + assert I18n.t(:falsy) === false + end + + test "lookup: given a missing key, no default and no raise option it returns an error message" do + assert_equal "translation missing: en.missing", I18n.t(:missing) + end + + test "lookup: given a missing key, no default and the raise option it raises MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.t(:missing, :raise => true) } + end + + test "lookup: does not raise an exception if no translation data is present for the given locale" do + assert_nothing_raised { I18n.t(:foo, :locale => :xx) } + end + + test "lookup: does not modify the options hash" do + options = {} + assert_equal "a", I18n.t(:string, **options) + assert_equal({}, options) + assert_nothing_raised { I18n.t(:string, **options.freeze) } + end + + test "lookup: given an array of keys it translates all of them" do + assert_equal %w(bar baz), I18n.t([:bar, :baz], :scope => [:foo]) + end + + test "lookup: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t('foo|bar', :separator => '|') + end + + # In fact it probably *should* fail but Rails currently relies on using the default locale instead. + # So we'll stick to this for now until we get it fixed in Rails. + test "lookup: given nil as a locale it does not raise but use the default locale" do + # assert_raise(I18n::InvalidLocale) { I18n.t(:bar, :locale => nil) } + assert_nothing_raised { I18n.t(:bar, :locale => nil) } + end + + test "lookup: a resulting String is not frozen" do + assert !I18n.t(:string).frozen? + end + + test "lookup: a resulting Array is not frozen" do + assert !I18n.t(:array).frozen? + end + + test "lookup: a resulting Hash is not frozen" do + assert !I18n.t(:hash).frozen? + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb new file mode 100644 index 0000000..d3319dc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb @@ -0,0 +1,35 @@ +# encoding: utf-8 + +module I18n + module Tests + module Pluralization + test "pluralization: given 0 it returns the :zero translation if it is defined" do + assert_equal 'zero', I18n.t(:default => { :zero => 'zero' }, :count => 0) + end + + test "pluralization: given 0 it returns the :other translation if :zero is not defined" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 0) + end + + test "pluralization: given 1 it returns the singular translation" do + assert_equal 'bar', I18n.t(:default => { :one => 'bar' }, :count => 1) + end + + test "pluralization: given 2 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 2) + end + + test "pluralization: given 3 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 3) + end + + test "pluralization: given nil it returns the whole entry" do + assert_equal({ :one => 'bar' }, I18n.t(:default => { :one => 'bar' }, :count => nil)) + end + + test "pluralization: given incomplete pluralization data it raises I18n::InvalidPluralizationData" do + assert_raise(I18n::InvalidPluralizationData) { I18n.t(:default => { :one => 'bar' }, :count => 2) } + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb new file mode 100644 index 0000000..dad76de --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb @@ -0,0 +1,60 @@ +# encoding: utf-8 + +module I18n + module Tests + module Procs + test "lookup: given a translation is a proc it calls the proc with the key and interpolation values" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "lookup: given a translation is a proc it passes the interpolation values as keyword arguments" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |key, foo:, **| I18n::Tests::Procs.filter_args(key, foo: foo) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "defaults: given a default is a Proc it calls it with the key and interpolation values" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_equal '[nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "defaults: given a default is a key that resolves to a Proc it calls it with the key and interpolation values" do + the_lambda = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + I18n.backend.store_translations(:en, :a_lambda => the_lambda) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => :a_lambda, :foo => 'foo') + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => [nil, :a_lambda], :foo => 'foo') + end + + test "interpolation: given an interpolation value is a lambda it calls it with key and values before interpolating it" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_match %r(\[\{:foo=>#\}\]), I18n.t(nil, :default => '%{foo}', :foo => proc) + end + + test "interpolation: given a key resolves to a Proc that returns a string then interpolation still works" do + proc = lambda { |*args| "%{foo}: " + I18n::Tests::Procs.filter_args(*args) } + assert_equal 'foo: [nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "pluralization: given a key resolves to a Proc that returns valid data then pluralization still works" do + proc = lambda { |*args| { :zero => 'zero', :one => 'one', :other => 'other' } } + assert_equal 'zero', I18n.t(:default => proc, :count => 0) + assert_equal 'one', I18n.t(:default => proc, :count => 1) + assert_equal 'other', I18n.t(:default => proc, :count => 2) + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc translations" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal Proc, I18n.t(:a_lambda, :resolve => false).class + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc default" do + assert_equal Proc, I18n.t(nil, :default => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }, :resolve => false).class + end + + + def self.filter_args(*args) + args.map {|arg| arg.delete(:fallback_in_progress) if arg.is_a?(Hash) ; arg }.inspect + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/version.rb b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/version.rb new file mode 100644 index 0000000..3781c93 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/i18n-1.8.5/lib/i18n/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module I18n + VERSION = "1.8.5" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.gitignore b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.gitignore new file mode 100644 index 0000000..e8b1a52 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.gitignore @@ -0,0 +1,18 @@ +.*.sw[pon] +*.bundle +coverage +tags +pkg +.nfs.* +.idea +java/Json.iml +Gemfile.lock +.rvmrc +*.rbc +.rbx +.AppleDouble +.DS_Store +*/**/Makefile +*/**/*.o +.byebug_history +*.log diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.travis.yml new file mode 100644 index 0000000..920a4ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/.travis.yml @@ -0,0 +1,26 @@ +# Passes arguments to bundle install (http://gembundler.com/man/bundle-install.1.html) +#bundler_args: --binstubs +language: ruby + +# Specify which ruby versions you wish to run your tests on, each version will be used +rvm: + - 2.0.0 + - 2.1 + - 2.2 + - 2.3 + - 2.4 + - 2.5 + - 2.6 + - 2.7.0-preview3 + - ruby-head + - jruby + - jruby-9.2.7.0 + - truffleruby +matrix: + allow_failures: + - rvm: ruby-head + - rvm: jruby + - rvm: jruby-9.2.7.0 + - rvm: truffleruby +script: "bundle exec rake" +sudo: false diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/CHANGES.md b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/CHANGES.md new file mode 100644 index 0000000..0d39f21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/CHANGES.md @@ -0,0 +1,424 @@ +# Changes + +## 2020-06-30 (2.3.1) + +* Spelling and grammar fixes for comments. Pull request #191 by Josh + Kline. +* Enhance generic JSON and #generate docs. Pull request #347 by Victor + Shepelev. +* Add :nodoc: for GeneratorMethods. Pull request #349 by Victor Shepelev. +* Baseline changes to help (JRuby) development. Pull request #371 by Karol + Bucek. +* Add metadata for rubygems.org. Pull request #379 by Alexandre ZANNI. +* Remove invalid JSON.generate description from JSON module rdoc. Pull + request #384 by Jeremy Evans. +* Test with TruffleRuby in CI. Pull request #402 by Benoit Daloze. +* Rdoc enhancements. Pull request #413 by Burdette Lamar. +* Fixtures/ are not being tested... Pull request #416 by Marc-André + Lafortune. +* Use frozen string for hash key. Pull request #420 by Marc-André + Lafortune. +* Added :call-seq: to RDoc for some methods. Pull request #422 by Burdette + Lamar. +* Small typo fix. Pull request #423 by Marc-André Lafortune. + +## 2019-12-11 (2.3.0) + * Fix default of `create_additions` to always be `false` for `JSON(user_input)` + and `JSON.parse(user_input, nil)`. + Note that `JSON.load` remains with default `true` and is meant for internal + serialization of trusted data. [CVE-2020-10663] + * Fix passing args all #to_json in json/add/*. + * Fix encoding issues + * Fix issues of keyword vs positional parameter + * Fix JSON::Parser against bigdecimal updates + * Bug fixes to JRuby port + +## 2019-02-21 (2.2.0) + * Adds support for 2.6 BigDecimal and ruby standard library Set datetype. + +## 2017-04-18 (2.1.0) + * Allow passing of `decimal_class` option to specify a class as which to parse + JSON float numbers. +## 2017-03-23 (2.0.4) + * Raise exception for incomplete unicode surrogates/character escape + sequences. This problem was reported by Daniel Gollahon (dgollahon). + * Fix arbitrary heap exposure problem. This problem was reported by Ahmad + Sherif (ahmadsherif). + +## 2017-01-12 (2.0.3) + * Set `required_ruby_version` to 1.9 + * Some small fixes + +## 2016-07-26 (2.0.2) + * Specify `required_ruby_version` for json\_pure. + * Fix issue #295 failure when parsing frozen strings. + +## 2016-07-01 (2.0.1) + * Fix problem when requiring json\_pure and Parser constant was defined top + level. + * Add `RB_GC_GUARD` to avoid possible GC problem via Pete Johns. + * Store `current_nesting` on stack by Aaron Patterson. + +## 2015-09-11 (2.0.0) + * Now complies to newest JSON RFC 7159. + * Implements compatibiliy to ruby 2.4 integer unification. + * Drops support for old rubies whose life has ended, that is rubies < 2.0. + Also see https://www.ruby-lang.org/en/news/2014/07/01/eol-for-1-8-7-and-1-9-2/ + * There were still some mentions of dual GPL licensing in the source, but JSON + has just the Ruby license that itself includes an explicit dual-licensing + clause that allows covered software to be distributed under the terms of + the Simplified BSD License instead for all ruby versions >= 1.9.3. This is + however a GPL compatible license according to the Free Software Foundation. + I changed these mentions to be consistent with the Ruby license setting in + the gemspec files which were already correct now. + +## 2015-06-01 (1.8.3) + * Fix potential memory leak, thx to nobu. + +## 2015-01-08 (1.8.2) + * Some performance improvements by Vipul A M . + * Fix by Jason R. Clark to avoid mutation of + `JSON.dump_default_options`. + * More tests by Michael Mac-Vicar and fixing + `space_before` accessor in generator. + * Performance on Jruby improved by Ben Browning . + * Some fixes to be compatible with the new Ruby 2.2 by Zachary Scott + and SHIBATA Hiroshi . + +## 2013-05-13 (1.8.1) + * Remove Rubinius exception since transcoding should be working now. + +## 2013-05-13 (1.8.0) + * Fix https://github.com/flori/json/issues/162 reported by Marc-Andre + Lafortune . Thanks! + * Applied patches by Yui NARUSE to suppress warning with + -Wchar-subscripts and better validate UTF-8 strings. + * Applied patch by ginriki@github to remove unnecessary if. + * Add load/dump interface to `JSON::GenericObject` to make + serialize :some_attribute, `JSON::GenericObject` + work in Rails active models for convenient `SomeModel#some_attribute.foo.bar` + access to serialised JSON data. + +## 2013-02-04 (1.7.7) + * Security fix for JSON create_additions default value and + `JSON::GenericObject`. It should not be possible to create additions unless + explicitely requested by setting the create_additions argument to true or + using the JSON.load/dump interface. If `JSON::GenericObject` is supposed to + be automatically deserialised, this has to be explicitely enabled by + setting + JSON::GenericObject.json_creatable = true + as well. + * Remove useless assert in fbuffer implementation. + * Apply patch attached to https://github.com/flori/json/issues#issue/155 + provided by John Shahid , Thx! + * Add license information to rubygems spec data, reported by Jordi Massaguer Pla . + * Improve documentation, thx to Zachary Scott . + +## 2012-11-29 (1.7.6) + * Add `GeneratorState#merge` alias for JRuby, fix state accessor methods. Thx to + jvshahid@github. + * Increase hash likeness of state objects. + +## 2012-08-17 (1.7.5) + * Fix compilation of extension on older rubies. + +## 2012-07-26 (1.7.4) + * Fix compilation problem on AIX, see https://github.com/flori/json/issues/142 + +## 2012-05-12 (1.7.3) + * Work around Rubinius encoding issues using iconv for conversion instead. + +## 2012-05-11 (1.7.2) + * Fix some encoding issues, that cause problems for the pure and the + extension variant in jruby 1.9 mode. + +## 2012-04-28 (1.7.1) + * Some small fixes for building + +## 2012-04-28 (1.7.0) + * Add `JSON::GenericObject` for method access to objects transmitted via JSON. + +## 2012-04-27 (1.6.7) + * Fix possible crash when trying to parse nil value. + +## 2012-02-11 (1.6.6) + * Propagate src encoding to values made from it (fixes 1.9 mode converting + everything to ascii-8bit; harmless for 1.8 mode too) (Thomas E. Enebo + ), should fix + https://github.com/flori/json/issues#issue/119. + * Fix https://github.com/flori/json/issues#issue/124 Thx to Jason Hutchens. + * Fix https://github.com/flori/json/issues#issue/117 + +## 2012-01-15 (1.6.5) + * Vit Ondruch reported a bug that shows up when using + optimisation under GCC 4.7. Thx to him, Bohuslav Kabrda + and Yui NARUSE for debugging and + developing a patch fix. + +## 2011-12-24 (1.6.4) + * Patches that improve speed on JRuby contributed by Charles Oliver Nutter + . + * Support `object_class`/`array_class` with duck typed hash/array. + +## 2011-12-01 (1.6.3) + * Let `JSON.load('')` return nil as well to make mysql text columns (default to + `''`) work better for serialization. + +## 2011-11-21 (1.6.2) + * Add support for OpenStruct and BigDecimal. + * Fix bug when parsing nil in `quirks_mode`. + * Make JSON.dump and JSON.load methods better cooperate with Rails' serialize + method. Just use: + serialize :value, JSON + * Fix bug with time serialization concerning nanoseconds. Thanks for the + patch go to Josh Partlow (jpartlow@github). + * Improve parsing speed for JSON numbers (integers and floats) in a similar way to + what Evan Phoenix suggested in: + https://github.com/flori/json/pull/103 + +## 2011-09-18 (1.6.1) + * Using -target 1.5 to force Java bits to compile with 1.5. + +## 2011-09-12 (1.6.0) + * Extract utilities (prettifier and GUI-editor) in its own gem json-utils. + * Split json/add/core into different files for classes to be serialised. + +## 2011-08-31 (1.5.4) + * Fix memory leak when used from multiple JRuby. (Patch by + jfirebaugh@github). + * Apply patch by Eric Wong that fixes garbage collection problem + reported in https://github.com/flori/json/issues/46. + * Add :quirks_mode option to parser and generator. + * Add support for Rational and Complex number additions via json/add/complex + and json/add/rational requires. + +## 2011-06-20 (1.5.3) + * Alias State#configure method as State#merge to increase duck type synonymy with Hash. + * Add `as_json` methods in json/add/core, so rails can create its json objects + the new way. + +## 2011-05-11 (1.5.2) + * Apply documentation patch by Cory Monty . + * Add gemspecs for json and json\_pure. + * Fix bug in jruby pretty printing. + * Fix bug in `object_class` and `array_class` when inheriting from Hash or + Array. + +## 2011-01-24 (1.5.1) + * Made rake-compiler build a fat binary gem. This should fix issue + https://github.com/flori/json/issues#issue/54. + +## 2011-01-22 (1.5.0) + * Included Java source codes for the Jruby extension made by Daniel Luz + . + * Output full exception message of `deep_const_get` to aid debugging. + * Fixed an issue with ruby 1.9 `Module#const_defined?` method, that was + reported by Riley Goodside. + +## 2010-08-09 (1.4.6) + * Fixed oversight reported in http://github.com/flori/json/issues/closed#issue/23, + always create a new object from the state prototype. + * Made pure and ext api more similar again. + +## 2010-08-07 (1.4.5) + * Manage data structure nesting depth in state object during generation. This + should reduce problems with `to_json` method definіtions that only have one + argument. + * Some fixes in the state objects and additional tests. +## 2010-08-06 (1.4.4) + * Fixes build problem for rubinius under OS X, http://github.com/flori/json/issues/closed#issue/25 + * Fixes crashes described in http://github.com/flori/json/issues/closed#issue/21 and + http://github.com/flori/json/issues/closed#issue/23 +## 2010-05-05 (1.4.3) + * Fixed some test assertions, from Ruby r27587 and r27590, patch by nobu. + * Fixed issue http://github.com/flori/json/issues/#issue/20 reported by + electronicwhisper@github. Thx! + +## 2010-04-26 (1.4.2) + * Applied patch from naruse Yui NARUSE to make building with + Microsoft Visual C possible again. + * Applied patch from devrandom in order to allow building of + json_pure if extensiontask is not present. + * Thanks to Dustin Schneider , who reported a memory + leak, which is fixed in this release. + * Applied 993f261ccb8f911d2ae57e9db48ec7acd0187283 patch from josh@github. + +## 2010-04-25 (1.4.1) + * Fix for a bug reported by Dan DeLeo , caused by T_FIXNUM + being different on 32bit/64bit architectures. + +## 2010-04-23 (1.4.0) + * Major speed improvements and building with simplified + directory/file-structure. + * Extension should at least be comapatible with MRI, YARV and Rubinius. + +## 2010-04-07 (1.2.4) + * Triger const_missing callback to make Rails' dynamic class loading work. + +## 2010-03-11 (1.2.3) + * Added a `State#[]` method which returns an attribute's value in order to + increase duck type compatibility to Hash. + +## 2010-02-27 (1.2.2) + * Made some changes to make the building of the parser/generator compatible + to Rubinius. + +## 2009-11-25 (1.2.1) + * Added `:symbolize_names` option to Parser, which returns symbols instead of + strings in object names/keys. + +## 2009-10-01 (1.2.0) + * `fast_generate` now raises an exeception for nan and infinite floats. + * On Ruby 1.8 json supports parsing of UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + and UTF-32LE JSON documents now. Under Ruby 1.9 the M17n conversion + functions are used to convert from all supported encodings. ASCII-8BIT + encoded strings are handled like all strings under Ruby 1.8 were. + * Better documentation + +## 2009-08-23 (1.1.9) + * Added forgotten main doc file `extra_rdoc_files`. + +## 2009-08-23 (1.1.8) + * Applied a patch by OZAWA Sakuro to make json/pure + work in environments that don't provide iconv. + * Applied patch by okkez_ in order to fix Ruby Bug #1768: + http://redmine.ruby-lang.org/issues/show/1768. + * Finally got around to avoid the rather paranoid escaping of ?/ characters + in the generator's output. The parsers aren't affected by this change. + Thanks to Rich Apodaca for the suggestion. + +## 2009-06-29 (1.1.7) + * Security Fix for JSON::Pure::Parser. A specially designed string could + cause catastrophic backtracking in one of the parser's regular expressions + in earlier 1.1.x versions. JSON::Ext::Parser isn't affected by this issue. + Thanks to Bartosz Blimke for reporting this + problem. + * This release also uses a less strict ruby version requirement for the + creation of the mswin32 native gem. + +## 2009-05-10 (1.1.6) + * No changes. І tested native linux gems in the last release and they don't + play well with different ruby versions other than the one the gem was built + with. This release is just to bump the version number in order to skip the + native gem on rubyforge. + +## 2009-05-10 (1.1.5) + * Started to build gems with rake-compiler gem. + * Applied patch object/array class patch from Brian Candler + and fixes. + +## 2009-04-01 (1.1.4) + * Fixed a bug in the creation of serialized generic rails objects reported by + Friedrich Graeter . + * Deleted tests/runner.rb, we're using testrb instead. + * Editor supports Infinity in numbers now. + * Made some changes in order to get the library to compile/run under Ruby + 1.9. + * Improved speed of the code path for the fast_generate method in the pure + variant. + +## 2008-07-10 (1.1.3) + * Wesley Beary reported a bug in json/add/core's DateTime + handling: If the nominator and denominator of the offset were divisible by + each other Ruby's Rational#to_s returns them as an integer not a fraction + with '/'. This caused a ZeroDivisionError during parsing. + * Use Date#start and DateTime#start instead of sg method, while + remaining backwards compatible. + * Supports ragel >= 6.0 now. + * Corrected some tests. + * Some minor changes. + +## 2007-11-27 (1.1.2) + * Remember default dir (last used directory) in editor. + * JSON::Editor.edit method added, the editor can now receive json texts from + the clipboard via C-v. + * Load json texts from an URL pasted via middle button press. + * Added :create_additions option to Parser. This makes it possible to disable + the creation of additions by force, in order to treat json texts as data + while having additions loaded. + * Jacob Maine reported, that JSON(:foo) outputs a JSON + object if the rails addition is enabled, which is wrong. It now outputs a + JSON string "foo" instead, like suggested by Jacob Maine. + * Discovered a bug in the Ruby Bugs Tracker on rubyforge, that was reported + by John Evans lgastako@gmail.com. He could produce a crash in the JSON + generator by returning something other than a String instance from a + to_json method. I now guard against this by doing a rather crude type + check, which raises an exception instead of crashing. + +## 2007-07-06 (1.1.1) + * Yui NARUSE sent some patches to fix tests for Ruby + 1.9. I applied them and adapted some of them a bit to run both on 1.8 and + 1.9. + * Introduced a `JSON.parse!` method without depth checking for people who + like danger. + * Made generate and `pretty_generate` methods configurable by an options hash. + * Added :allow_nan option to parser and generator in order to handle NaN, + Infinity, and -Infinity correctly - if requested. Floats, which aren't numbers, + aren't valid JSON according to RFC4627, so by default an exception will be + raised if any of these symbols are encountered. Thanks to Andrea Censi + for his hint about this. + * Fixed some more tests for Ruby 1.9. + * Implemented dump/load interface of Marshal as suggested in ruby-core:11405 + by murphy . + * Implemented the `max_nesting` feature for generate methods, too. + * Added some implementations for ruby core's custom objects for + serialisation/deserialisation purposes. + +## 2007-05-21 (1.1.0) + * Implemented max_nesting feature for parser to avoid stack overflows for + data from untrusted sources. If you trust the source, you can disable it + with the option max_nesting => false. + * Piers Cawley reported a bug, that not every + character can be escaped by `\` as required by RFC4627. There's a + contradiction between David Crockford's JSON checker test vectors (in + tests/fixtures) and RFC4627, though. I decided to stick to the RFC, because + the JSON checker seems to be a bit older than the RFC. + * Extended license to Ruby License, which includes the GPL. + * Added keyboard shortcuts, and 'Open location' menu item to edit_json.rb. + +## 2007-05-09 (1.0.4) + * Applied a patch from Yui NARUSE to make JSON compile + under Ruby 1.9. Thank you very much for mailing it to me! + * Made binary variants of JSON fail early, instead of falling back to the + pure version. This should avoid overshadowing of eventual problems while + loading of the binary. + +## 2007-03-24 (1.0.3) + * Improved performance of pure variant a bit. + * The ext variant of this release supports the mswin32 platform. Ugh! + +## 2007-03-24 (1.0.2) + * Ext Parser didn't parse 0e0 correctly into 0.0: Fixed! + +## 2007-03-24 (1.0.1) + * Forgot some object files in the build dir. I really like that - not! + +## 2007-03-24 (1.0.0) + * Added C implementations for the JSON generator and a ragel based JSON + parser in C. + * Much more tests, especially fixtures from json.org. + * Further improved conformance to RFC4627. + +## 2007-02-09 (0.4.3) + * Conform more to RFC4627 for JSON: This means JSON strings + now always must contain exactly one object `"{ ... }"` or array `"[ ... ]"` in + order to be parsed without raising an exception. The definition of what + constitutes a whitespace is narrower in JSON than in Ruby ([ \t\r\n]), and + there are differences in floats and integers (no octals or hexadecimals) as + well. + * Added aliases generate and `pretty_generate` of unparse and `pretty_unparse`. + * Fixed a test case. + * Catch an `Iconv::InvalidEncoding` exception, that seems to occur on some Sun + boxes with SunOS 5.8, if iconv doesn't support utf16 conversions. This was + reported by Andrew R Jackson , thanks a bunch! + +## 2006-08-25 (0.4.2) + * Fixed a bug in handling solidi (/-characters), that was reported by + Kevin Gilpin . + +## 2006-02-06 (0.4.1) + * Fixed a bug related to escaping with backslashes. Thanks for the report go + to Florian Munz . + +## 2005-09-23 (0.4.0) + * Initial Rubyforge Version diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Gemfile b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Gemfile new file mode 100644 index 0000000..8fe591e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Gemfile @@ -0,0 +1,14 @@ +# vim: set ft=ruby: + +source 'https://rubygems.org' + +case ENV['JSON'] +when 'ext', nil + if ENV['RUBY_ENGINE'] == 'jruby' + gemspec :name => 'json-java' + else + gemspec :name => 'json' + end +when 'pure' + gemspec :name => 'json_pure' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README-json-jruby.md b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README-json-jruby.md new file mode 100644 index 0000000..1336837 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README-json-jruby.md @@ -0,0 +1,33 @@ +JSON-JRuby +========== + +JSON-JRuby is a port of Florian Frank's native +[`json` library](http://json.rubyforge.org/) to JRuby. +It aims to be a perfect drop-in replacement for `json_pure`. + + +Development version +=================== + +The latest version is available from the +[Git repository](http://github.com/mernen/json-jruby/tree): + + git clone git://github.com/mernen/json-jruby.git + + +Compiling +========= + +You'll need JRuby version 1.2 or greater to build JSON-JRuby. +Its path must be set on the `jruby.dir` property of +`nbproject/project.properties` (defaults to `../jruby`). + +Additionally, you'll need [Ant](http://ant.apache.org/), and +[Ragel](http://www.cs.queensu.ca/~thurston/ragel/) 6.4 or greater. + +Then, from the folder where the sources are located, type: + + ant clean jar + +to clean any leftovers from previous builds and generate the `.jar` files. +To generate a RubyGem, specify the `gem` action rather than `jar`. diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README.md b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README.md new file mode 100644 index 0000000..682db2f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/README.md @@ -0,0 +1,425 @@ +# JSON implementation for Ruby + +[![Travis Widget](http://travis-ci.org/flori/json.svg?branch=master)](https://travis-ci.org/flori/json) + +## Description + +This is a implementation of the JSON specification according to RFC 7159 +http://www.ietf.org/rfc/rfc7159.txt . Starting from version 1.0.0 on there +will be two variants available: + +* A pure ruby variant, that relies on the iconv and the stringscan + extensions, which are both part of the ruby standard library. +* The quite a bit faster native extension variant, which is in parts + implemented in C or Java and comes with its own unicode conversion + functions and a parser generated by the ragel state machine compiler + http://www.complang.org/ragel/ . + +Both variants of the JSON generator generate UTF-8 character sequences by +default. If an :ascii\_only option with a true value is given, they escape all +non-ASCII and control characters with \uXXXX escape sequences, and support +UTF-16 surrogate pairs in order to be able to generate the whole range of +unicode code points. + +All strings, that are to be encoded as JSON strings, should be UTF-8 byte +sequences on the Ruby side. To encode raw binary strings, that aren't UTF-8 +encoded, please use the to\_json\_raw\_object method of String (which produces +an object, that contains a byte array) and decode the result on the receiving +endpoint. + +## Installation + +It's recommended to use the extension variant of JSON, because it's faster than +the pure ruby variant. If you cannot build it on your system, you can settle +for the latter. + +Just type into the command line as root: + +``` +# rake install +``` + +The above command will build the extensions and install them on your system. + +``` +# rake install_pure +``` + +or + +``` +# ruby install.rb +``` + +will just install the pure ruby implementation of JSON. + +If you use Rubygems you can type + +``` +# gem install json +``` + +instead, to install the newest JSON version. + +There is also a pure ruby json only variant of the gem, that can be installed +with: + +``` +# gem install json_pure +``` + +## Compiling the extensions yourself + +If you want to create the `parser.c` file from its `parser.rl` file or draw nice +graphviz images of the state machines, you need ragel from: +http://www.complang.org/ragel/ + +## Usage + +To use JSON you can + +```ruby +require 'json' +``` + +to load the installed variant (either the extension `'json'` or the pure +variant `'json_pure'`). If you have installed the extension variant, you can +pick either the extension variant or the pure variant by typing + +```ruby +require 'json/ext' +``` + +or + +```ruby +require 'json/pure' +``` + +Now you can parse a JSON document into a ruby data structure by calling + +```ruby +JSON.parse(document) +``` + +If you want to generate a JSON document from a ruby data structure call +```ruby +JSON.generate(data) +``` + +You can also use the `pretty_generate` method (which formats the output more +verbosely and nicely) or `fast_generate` (which doesn't do any of the security +checks generate performs, e. g. nesting deepness checks). + +There are also the JSON and JSON[] methods which use parse on a String or +generate a JSON document from an array or hash: + +```ruby +document = JSON 'test' => 23 # => "{\"test\":23}" +document = JSON['test' => 23] # => "{\"test\":23}" +``` + +and + +```ruby +data = JSON '{"test":23}' # => {"test"=>23} +data = JSON['{"test":23}'] # => {"test"=>23} +``` + +You can choose to load a set of common additions to ruby core's objects if +you + +```ruby +require 'json/add/core' +``` + +After requiring this you can, e. g., serialise/deserialise Ruby ranges: + +```ruby +JSON JSON(1..10) # => 1..10 +``` + +To find out how to add JSON support to other or your own classes, read the +section "More Examples" below. + +To get the best compatibility to rails' JSON implementation, you can + +```ruby +require 'json/add/rails' +``` + +Both of the additions attempt to require `'json'` (like above) first, if it has +not been required yet. + +## Serializing exceptions + +The JSON module doesn't extend `Exception` by default. If you convert an `Exception` +object to JSON, it will by default only include the exception message. + +To include the full details, you must either load the `json/add/core` mentioned +above, or specifically load the exception addition: + +```ruby +require 'json/add/exception' +``` + +## More Examples + +To create a JSON document from a ruby data structure, you can call +`JSON.generate` like that: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,\"4..10\"]" +``` + +To get back a ruby data structure from a JSON document, you have to call +JSON.parse on it: + +```ruby +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, "4..10"] +``` + +Note, that the range from the original data structure is a simple +string now. The reason for this is, that JSON doesn't support ranges +or arbitrary classes. In this case the json library falls back to call +`Object#to_json`, which is the same as `#to_s.to_json`. + +It's possible to add JSON support serialization to arbitrary classes by +simply implementing a more specialized version of the `#to_json method`, that +should return a JSON object (a hash converted to JSON with `#to_json`) like +this (don't forget the `*a` for all the arguments): + +```ruby +class Range + def to_json(*a) + { + 'json_class' => self.class.name, # = 'Range' + 'data' => [ first, last, exclude_end? ] + }.to_json(*a) + end +end +``` + +The hash key `json_class` is the class, that will be asked to deserialise the +JSON representation later. In this case it's `Range`, but any namespace of +the form `A::B` or `::A::B` will do. All other keys are arbitrary and can be +used to store the necessary data to configure the object to be deserialised. + +If the key `json_class` is found in a JSON object, the JSON parser checks +if the given class responds to the `json_create` class method. If so, it is +called with the JSON object converted to a Ruby hash. So a range can +be deserialised by implementing `Range.json_create` like this: + +```ruby +class Range + def self.json_create(o) + new(*o['data']) + end +end +``` + +Now it possible to serialise/deserialise ranges as well: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json, :create_additions => true +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +``` + +`JSON.generate` always creates the shortest possible string representation of a +ruby data structure in one line. This is good for data storage or network +protocols, but not so good for humans to read. Fortunately there's also +`JSON.pretty_generate` (or `JSON.pretty_generate`) that creates a more readable +output: + +```ruby + puts JSON.pretty_generate([1, 2, {"a"=>3.141}, false, true, nil, 4..10]) + [ + 1, + 2, + { + "a": 3.141 + }, + false, + true, + null, + { + "json_class": "Range", + "data": [ + 4, + 10, + false + ] + } + ] +``` + +There are also the methods `Kernel#j` for generate, and `Kernel#jj` for +`pretty_generate` output to the console, that work analogous to Core Ruby's `p` and +the `pp` library's `pp` methods. + +The script `tools/server.rb` contains a small example if you want to test, how +receiving a JSON object from a webrick server in your browser with the +javasript prototype library http://www.prototypejs.org works. + +## Speed Comparisons + +I have created some benchmark results (see the benchmarks/data-p4-3Ghz +subdir of the package) for the JSON-parser to estimate the speed up in the C +extension: + +``` + Comparing times (call_time_mean): + 1 ParserBenchmarkExt#parser 900 repeats: + 553.922304770 ( real) -> 21.500x + 0.001805307 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 224.513358139 ( real) -> 8.714x + 0.004454078 + 3 ParserBenchmarkPure#parser 1000 repeats: + 26.755020642 ( real) -> 1.038x + 0.037376163 + 4 ParserBenchmarkRails#parser 1000 repeats: + 25.763381731 ( real) -> 1.000x + 0.038814780 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1 is `JSON::Ext::Parser`, 2 is `YAML.load` with YAML +compatbile JSON document, 3 is is `JSON::Pure::Parser`, and 4 is +`ActiveSupport::JSON.decode`. The ActiveSupport JSON-decoder converts the +input first to YAML and then uses the YAML-parser, the conversion seems to +slow it down so much that it is only as fast as the `JSON::Pure::Parser`! + +If you look at the benchmark data you can see that this is mostly caused by +the frequent high outliers - the median of the Rails-parser runs is still +overall smaller than the median of the `JSON::Pure::Parser` runs: + +``` + Comparing times (call_time_median): + 1 ParserBenchmarkExt#parser 900 repeats: + 800.592479481 ( real) -> 26.936x + 0.001249075 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 271.002390644 ( real) -> 9.118x + 0.003690004 + 3 ParserBenchmarkRails#parser 1000 repeats: + 30.227910865 ( real) -> 1.017x + 0.033082008 + 4 ParserBenchmarkPure#parser 1000 repeats: + 29.722384421 ( real) -> 1.000x + 0.033644676 + calls/sec ( time) -> speed covers + secs/call +``` + +I have benchmarked the `JSON-Generator` as well. This generated a few more +values, because there are different modes that also influence the achieved +speed: + +``` + Comparing times (call_time_mean): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 547.354332608 ( real) -> 15.090x + 0.001826970 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 443.968212317 ( real) -> 12.240x + 0.002252414 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 375.104545883 ( real) -> 10.341x + 0.002665923 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 49.978706968 ( real) -> 1.378x + 0.020008521 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 38.531868759 ( real) -> 1.062x + 0.025952543 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 36.927649925 ( real) -> 1.018x 7 (>=3859) + 0.027079979 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 36.272134441 ( real) -> 1.000x 6 (>=3859) + 0.027569373 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1-3 are `JSON::Ext::Generator` methods. 4, 6, and 7 are +`JSON::Pure::Generator` methods and 5 is the Rails JSON generator. It is now a +bit faster than the `generator_safe` and `generator_pretty` methods of the pure +variant but slower than the others. + +To achieve the fastest JSON document output, you can use the `fast_generate` +method. Beware, that this will disable the checking for circular Ruby data +structures, which may cause JSON to go into an infinite loop. + +Here are the median comparisons for completeness' sake: + +``` + Comparing times (call_time_median): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 708.258020939 ( real) -> 16.547x + 0.001411915 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 569.105020353 ( real) -> 13.296x + 0.001757145 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 482.825371244 ( real) -> 11.280x + 0.002071142 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 62.717626652 ( real) -> 1.465x + 0.015944481 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 43.965681162 ( real) -> 1.027x + 0.022745013 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 43.929073409 ( real) -> 1.026x 7 (>=3859) + 0.022763968 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 42.802514491 ( real) -> 1.000x 6 (>=3859) + 0.023363113 + calls/sec ( time) -> speed covers + secs/call +``` + +## Development + +### Release + +Update the json.gemspec and json-java.gemspec. + +``` +rbenv shell 2.6.5 +rake build +gem push pkg/json-2.3.0.gem + +rbenv shell jruby-9.2.9.0 +rake build +gem push pkg/json-2.3.0-java.gem +``` + +## Author + +Florian Frank + +## License + +Ruby License, see https://www.ruby-lang.org/en/about/license.txt. + +## Download + +The latest version of this library can be downloaded at + +* https://rubygems.org/gems/json + +Online Documentation should be located at + +* https://www.rubydoc.info/gems/json diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Rakefile b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Rakefile new file mode 100644 index 0000000..5b42292 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/Rakefile @@ -0,0 +1,334 @@ +begin + require 'rubygems/package_task' +rescue LoadError +end + +require 'rbconfig' +include\ + begin + RbConfig + rescue NameError + Config + end + +require 'rake/clean' +CLOBBER.include 'doc', 'Gemfile.lock' +CLEAN.include FileList['diagrams/*.*'], 'doc', 'coverage', 'tmp', + FileList["ext/**/{Makefile,mkmf.log}"], 'build', 'dist', FileList['**/*.rbc'], + FileList["{ext,lib}/**/*.{so,bundle,#{CONFIG['DLEXT']},o,obj,pdb,lib,manifest,exp,def,jar,class,dSYM}"], + FileList['java/src/**/*.class'] + +require 'rake/testtask' +class UndocumentedTestTask < Rake::TestTask + def desc(*) end +end + +which = lambda { |c| + w = `which #{c}` + break w.chomp unless w.empty? +} + +MAKE = ENV['MAKE'] || %w[gmake make].find(&which) +BUNDLE = ENV['BUNDLE'] || %w[bundle].find(&which) +PKG_NAME = 'json' +PKG_TITLE = 'JSON Implementation for Ruby' +PKG_VERSION = File.read('VERSION').chomp +PKG_FILES = FileList[`git ls-files`.split(/\n/)] + +EXT_ROOT_DIR = 'ext/json/ext' +EXT_PARSER_DIR = "#{EXT_ROOT_DIR}/parser" +EXT_PARSER_DL = "#{EXT_PARSER_DIR}/parser.#{CONFIG['DLEXT']}" +RAGEL_PATH = "#{EXT_PARSER_DIR}/parser.rl" +EXT_PARSER_SRC = "#{EXT_PARSER_DIR}/parser.c" +EXT_GENERATOR_DIR = "#{EXT_ROOT_DIR}/generator" +EXT_GENERATOR_DL = "#{EXT_GENERATOR_DIR}/generator.#{CONFIG['DLEXT']}" +EXT_GENERATOR_SRC = "#{EXT_GENERATOR_DIR}/generator.c" + +JAVA_DIR = "java/src/json/ext" +JAVA_RAGEL_PATH = "#{JAVA_DIR}/Parser.rl" +JAVA_PARSER_SRC = "#{JAVA_DIR}/Parser.java" +JAVA_SOURCES = FileList["#{JAVA_DIR}/*.java"] +JAVA_CLASSES = [] +JRUBY_PARSER_JAR = File.expand_path("lib/json/ext/parser.jar") +JRUBY_GENERATOR_JAR = File.expand_path("lib/json/ext/generator.jar") + +RAGEL_CODEGEN = %w[rlcodegen rlgen-cd ragel].find(&which) +RAGEL_DOTGEN = %w[rlgen-dot rlgen-cd ragel].find(&which) + +desc "Installing library (pure)" +task :install_pure => :version do + ruby 'install.rb' +end + +task :install_ext_really do + sitearchdir = CONFIG["sitearchdir"] + cd 'ext' do + for file in Dir["json/ext/*.#{CONFIG['DLEXT']}"] + d = File.join(sitearchdir, file) + mkdir_p File.dirname(d) + install(file, d) + end + warn " *** Installed EXT ruby library." + end +end + +desc "Installing library (extension)" +task :install_ext => [ :compile, :install_pure, :install_ext_really ] + +desc "Installing library (extension)" +task :install => :install_ext + +desc m = "Writing version information for #{PKG_VERSION}" +task :version do + puts m + File.open(File.join('lib', 'json', 'version.rb'), 'w') do |v| + v.puts < [ :set_env_pure, :check_env, :do_test_pure ] +task(:set_env_pure) { ENV['JSON'] = 'pure' } + +UndocumentedTestTask.new do |t| + t.name = 'do_test_pure' + t.libs << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' +end + +desc "Testing library (pure ruby and extension)" +task :test => [ :test_pure, :test_ext ] + +namespace :gems do + desc 'Install all development gems' + task :install do + sh "#{BUNDLE}" + end +end + +if defined?(RUBY_ENGINE) and RUBY_ENGINE == 'jruby' + ENV['JAVA_HOME'] ||= [ + '/usr/local/java/jdk', + '/usr/lib/jvm/java-6-openjdk', + '/Library/Java/Home', + ].find { |c| File.directory?(c) } + if ENV['JAVA_HOME'] + warn " *** JAVA_HOME is set to #{ENV['JAVA_HOME'].inspect}" + ENV['PATH'] = ENV['PATH'].split(/:/).unshift(java_path = "#{ENV['JAVA_HOME']}/bin") * ':' + warn " *** java binaries are assumed to be in #{java_path.inspect}" + else + warn " *** JAVA_HOME was not set or could not be guessed!" + exit 1 + end + + file JAVA_PARSER_SRC => JAVA_RAGEL_PATH do + cd JAVA_DIR do + if RAGEL_CODEGEN == 'ragel' + sh "ragel Parser.rl -J -o Parser.java" + else + sh "ragel -x Parser.rl | #{RAGEL_CODEGEN} -J" + end + end + end + + desc "Generate parser for java with ragel" + task :ragel => JAVA_PARSER_SRC + + desc "Delete the ragel generated Java source" + task :ragel_clean do + rm_rf JAVA_PARSER_SRC + end + + JRUBY_JAR = File.join(CONFIG["libdir"], "jruby.jar") + if File.exist?(JRUBY_JAR) + JAVA_SOURCES.each do |src| + classpath = (Dir['java/lib/*.jar'] << 'java/src' << JRUBY_JAR) * ':' + obj = src.sub(/\.java\Z/, '.class') + file obj => src do + sh 'javac', '-classpath', classpath, '-source', '1.6', '-target', '1.6', src + end + JAVA_CLASSES << obj + end + else + warn "WARNING: Cannot find jruby in path => Cannot build jruby extension!" + end + + desc "Compiling jruby extension" + task :compile => JAVA_CLASSES + + desc "Package the jruby gem" + task :jruby_gem => :create_jar do + sh 'gem build json-java.gemspec' + mkdir_p 'pkg' + mv "json-#{PKG_VERSION}-java.gem", 'pkg' + end + + desc "Testing library (jruby)" + task :test_ext => [ :set_env_ext, :create_jar, :check_env, :do_test_ext ] + task(:set_env_ext) { ENV['JSON'] = 'ext' } + + UndocumentedTestTask.new do |t| + t.name = 'do_test_ext' + t.libs << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' + end + + file JRUBY_PARSER_JAR => :compile do + cd 'java/src' do + parser_classes = FileList[ + "json/ext/ByteListTranscoder*.class", + "json/ext/OptionsReader*.class", + "json/ext/Parser*.class", + "json/ext/RuntimeInfo*.class", + "json/ext/StringDecoder*.class", + "json/ext/Utils*.class" + ] + sh 'jar', 'cf', File.basename(JRUBY_PARSER_JAR), *parser_classes + mv File.basename(JRUBY_PARSER_JAR), File.dirname(JRUBY_PARSER_JAR) + end + end + + desc "Create parser jar" + task :create_parser_jar => JRUBY_PARSER_JAR + + file JRUBY_GENERATOR_JAR => :compile do + cd 'java/src' do + generator_classes = FileList[ + "json/ext/ByteListTranscoder*.class", + "json/ext/OptionsReader*.class", + "json/ext/Generator*.class", + "json/ext/RuntimeInfo*.class", + "json/ext/StringEncoder*.class", + "json/ext/Utils*.class" + ] + sh 'jar', 'cf', File.basename(JRUBY_GENERATOR_JAR), *generator_classes + mv File.basename(JRUBY_GENERATOR_JAR), File.dirname(JRUBY_GENERATOR_JAR) + end + end + + desc "Create generator jar" + task :create_generator_jar => JRUBY_GENERATOR_JAR + + desc "Create parser and generator jars" + task :create_jar => [ :create_parser_jar, :create_generator_jar ] + + desc "Build all gems and archives for a new release of the jruby extension." + task :build => [ :clean, :version, :jruby_gem ] + + task :release => :build +else + desc "Compiling extension" + task :compile => [ EXT_PARSER_DL, EXT_GENERATOR_DL ] + + file EXT_PARSER_DL => EXT_PARSER_SRC do + cd EXT_PARSER_DIR do + ruby 'extconf.rb' + sh MAKE + end + cp "#{EXT_PARSER_DIR}/parser.#{CONFIG['DLEXT']}", EXT_ROOT_DIR + end + + file EXT_GENERATOR_DL => EXT_GENERATOR_SRC do + cd EXT_GENERATOR_DIR do + ruby 'extconf.rb' + sh MAKE + end + cp "#{EXT_GENERATOR_DIR}/generator.#{CONFIG['DLEXT']}", EXT_ROOT_DIR + end + + desc "Testing library (extension)" + task :test_ext => [ :check_env, :compile, :do_test_ext ] + + UndocumentedTestTask.new do |t| + t.name = 'do_test_ext' + t.libs << 'ext' << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' + end + + desc "Generate parser with ragel" + task :ragel => EXT_PARSER_SRC + + desc "Delete the ragel generated C source" + task :ragel_clean do + rm_rf EXT_PARSER_SRC + end + + desc "Update the tags file" + task :tags do + system 'ctags', *Dir['**/*.{rb,c,h,java}'] + end + + file EXT_PARSER_SRC => RAGEL_PATH do + cd EXT_PARSER_DIR do + if RAGEL_CODEGEN == 'ragel' + sh "ragel parser.rl -G2 -o parser.c" + else + sh "ragel -x parser.rl | #{RAGEL_CODEGEN} -G2" + end + src = File.read("parser.c").gsub(/[ \t]+$/, '') + src.gsub!(/^static const int (JSON_.*=.*);$/, 'enum {\1};') + src.gsub!(/0 <= \(\*p\) && \(\*p\) <= 31/, "0 <= (signed char)(*p) && (*p) <= 31") + src[0, 0] = "/* This file is automatically generated from parser.rl by using ragel */" + File.open("parser.c", "w") {|f| f.print src} + end + end + + desc "Generate diagrams of ragel parser (ps)" + task :ragel_dot_ps do + root = 'diagrams' + specs = [] + File.new(RAGEL_PATH).grep(/^\s*machine\s*(\S+);\s*$/) { specs << $1 } + for s in specs + if RAGEL_DOTGEN == 'ragel' + sh "ragel #{RAGEL_PATH} -S#{s} -p -V | dot -Tps -o#{root}/#{s}.ps" + else + sh "ragel -x #{RAGEL_PATH} -S#{s} | #{RAGEL_DOTGEN} -p|dot -Tps -o#{root}/#{s}.ps" + end + end + end + + desc "Generate diagrams of ragel parser (png)" + task :ragel_dot_png do + root = 'diagrams' + specs = [] + File.new(RAGEL_PATH).grep(/^\s*machine\s*(\S+);\s*$/) { specs << $1 } + for s in specs + if RAGEL_DOTGEN == 'ragel' + sh "ragel #{RAGEL_PATH} -S#{s} -p -V | dot -Tpng -o#{root}/#{s}.png" + else + sh "ragel -x #{RAGEL_PATH} -S#{s} | #{RAGEL_DOTGEN} -p|dot -Tpng -o#{root}/#{s}.png" + end + end + end + + desc "Generate diagrams of ragel parser" + task :ragel_dot => [ :ragel_dot_png, :ragel_dot_ps ] + + desc "Build all gems and archives for a new release of json and json_pure." + task :build => [ :clean, :gemspec, :package ] + + task :release => :build +end + +desc "Compile in the the source directory" +task :default => [ :clean, :test ] diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/VERSION b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/VERSION new file mode 100644 index 0000000..2bf1c1c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/VERSION @@ -0,0 +1 @@ +2.3.1 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/diagrams/.keep b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/diagrams/.keep new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/diagrams/.keep 2 b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/diagrams/.keep 2 new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer 2.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer 2.h new file mode 100644 index 0000000..dc8f406 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer 2.h @@ -0,0 +1,187 @@ + +#ifndef _FBUFFER_H_ +#define _FBUFFER_H_ + +#include "ruby.h" + +#ifndef RHASH_SIZE +#define RHASH_SIZE(hsh) (RHASH(hsh)->tbl->num_entries) +#endif + +#ifndef RFLOAT_VALUE +#define RFLOAT_VALUE(val) (RFLOAT(val)->value) +#endif + +#ifndef RARRAY_LEN +#define RARRAY_LEN(ARRAY) RARRAY(ARRAY)->len +#endif +#ifndef RSTRING_PTR +#define RSTRING_PTR(string) RSTRING(string)->ptr +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(string) RSTRING(string)->len +#endif + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +#ifdef HAVE_RUBY_ENCODING_H +#include "ruby/encoding.h" +#define FORCE_UTF8(obj) rb_enc_associate((obj), rb_utf8_encoding()) +#else +#define FORCE_UTF8(obj) +#endif + +/* We don't need to guard objects for rbx, so let's do nothing at all. */ +#ifndef RB_GC_GUARD +#define RB_GC_GUARD(object) +#endif + +typedef struct FBufferStruct { + unsigned long initial_length; + char *ptr; + unsigned long len; + unsigned long capa; +} FBuffer; + +#define FBUFFER_INITIAL_LENGTH_DEFAULT 1024 + +#define FBUFFER_PTR(fb) (fb->ptr) +#define FBUFFER_LEN(fb) (fb->len) +#define FBUFFER_CAPA(fb) (fb->capa) +#define FBUFFER_PAIR(fb) FBUFFER_PTR(fb), FBUFFER_LEN(fb) + +static FBuffer *fbuffer_alloc(unsigned long initial_length); +static void fbuffer_free(FBuffer *fb); +static void fbuffer_clear(FBuffer *fb); +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len); +#ifdef JSON_GENERATOR +static void fbuffer_append_long(FBuffer *fb, long number); +#endif +static void fbuffer_append_char(FBuffer *fb, char newchr); +#ifdef JSON_GENERATOR +static FBuffer *fbuffer_dup(FBuffer *fb); +static VALUE fbuffer_to_s(FBuffer *fb); +#endif + +static FBuffer *fbuffer_alloc(unsigned long initial_length) +{ + FBuffer *fb; + if (initial_length <= 0) initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + fb = ALLOC(FBuffer); + memset((void *) fb, 0, sizeof(FBuffer)); + fb->initial_length = initial_length; + return fb; +} + +static void fbuffer_free(FBuffer *fb) +{ + if (fb->ptr) ruby_xfree(fb->ptr); + ruby_xfree(fb); +} + +static void fbuffer_clear(FBuffer *fb) +{ + fb->len = 0; +} + +static void fbuffer_inc_capa(FBuffer *fb, unsigned long requested) +{ + unsigned long required; + + if (!fb->ptr) { + fb->ptr = ALLOC_N(char, fb->initial_length); + fb->capa = fb->initial_length; + } + + for (required = fb->capa; requested > required - fb->len; required <<= 1); + + if (required > fb->capa) { + REALLOC_N(fb->ptr, char, required); + fb->capa = required; + } +} + +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len) +{ + if (len > 0) { + fbuffer_inc_capa(fb, len); + MEMCPY(fb->ptr + fb->len, newstr, char, len); + fb->len += len; + } +} + +#ifdef JSON_GENERATOR +static void fbuffer_append_str(FBuffer *fb, VALUE str) +{ + const char *newstr = StringValuePtr(str); + unsigned long len = RSTRING_LEN(str); + + RB_GC_GUARD(str); + + fbuffer_append(fb, newstr, len); +} +#endif + +static void fbuffer_append_char(FBuffer *fb, char newchr) +{ + fbuffer_inc_capa(fb, 1); + *(fb->ptr + fb->len) = newchr; + fb->len++; +} + +#ifdef JSON_GENERATOR +static void freverse(char *start, char *end) +{ + char c; + + while (end > start) { + c = *end, *end-- = *start, *start++ = c; + } +} + +static long fltoa(long number, char *buf) +{ + static char digits[] = "0123456789"; + long sign = number; + char* tmp = buf; + + if (sign < 0) number = -number; + do *tmp++ = digits[number % 10]; while (number /= 10); + if (sign < 0) *tmp++ = '-'; + freverse(buf, tmp - 1); + return tmp - buf; +} + +static void fbuffer_append_long(FBuffer *fb, long number) +{ + char buf[20]; + unsigned long len = fltoa(number, buf); + fbuffer_append(fb, buf, len); +} + +static FBuffer *fbuffer_dup(FBuffer *fb) +{ + unsigned long len = fb->len; + FBuffer *result; + + result = fbuffer_alloc(len); + fbuffer_append(result, FBUFFER_PAIR(fb)); + return result; +} + +static VALUE fbuffer_to_s(FBuffer *fb) +{ + VALUE result = rb_str_new(FBUFFER_PTR(fb), FBUFFER_LEN(fb)); + fbuffer_free(fb); + FORCE_UTF8(result); + return result; +} +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h new file mode 100644 index 0000000..dc8f406 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h @@ -0,0 +1,187 @@ + +#ifndef _FBUFFER_H_ +#define _FBUFFER_H_ + +#include "ruby.h" + +#ifndef RHASH_SIZE +#define RHASH_SIZE(hsh) (RHASH(hsh)->tbl->num_entries) +#endif + +#ifndef RFLOAT_VALUE +#define RFLOAT_VALUE(val) (RFLOAT(val)->value) +#endif + +#ifndef RARRAY_LEN +#define RARRAY_LEN(ARRAY) RARRAY(ARRAY)->len +#endif +#ifndef RSTRING_PTR +#define RSTRING_PTR(string) RSTRING(string)->ptr +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(string) RSTRING(string)->len +#endif + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +#ifdef HAVE_RUBY_ENCODING_H +#include "ruby/encoding.h" +#define FORCE_UTF8(obj) rb_enc_associate((obj), rb_utf8_encoding()) +#else +#define FORCE_UTF8(obj) +#endif + +/* We don't need to guard objects for rbx, so let's do nothing at all. */ +#ifndef RB_GC_GUARD +#define RB_GC_GUARD(object) +#endif + +typedef struct FBufferStruct { + unsigned long initial_length; + char *ptr; + unsigned long len; + unsigned long capa; +} FBuffer; + +#define FBUFFER_INITIAL_LENGTH_DEFAULT 1024 + +#define FBUFFER_PTR(fb) (fb->ptr) +#define FBUFFER_LEN(fb) (fb->len) +#define FBUFFER_CAPA(fb) (fb->capa) +#define FBUFFER_PAIR(fb) FBUFFER_PTR(fb), FBUFFER_LEN(fb) + +static FBuffer *fbuffer_alloc(unsigned long initial_length); +static void fbuffer_free(FBuffer *fb); +static void fbuffer_clear(FBuffer *fb); +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len); +#ifdef JSON_GENERATOR +static void fbuffer_append_long(FBuffer *fb, long number); +#endif +static void fbuffer_append_char(FBuffer *fb, char newchr); +#ifdef JSON_GENERATOR +static FBuffer *fbuffer_dup(FBuffer *fb); +static VALUE fbuffer_to_s(FBuffer *fb); +#endif + +static FBuffer *fbuffer_alloc(unsigned long initial_length) +{ + FBuffer *fb; + if (initial_length <= 0) initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + fb = ALLOC(FBuffer); + memset((void *) fb, 0, sizeof(FBuffer)); + fb->initial_length = initial_length; + return fb; +} + +static void fbuffer_free(FBuffer *fb) +{ + if (fb->ptr) ruby_xfree(fb->ptr); + ruby_xfree(fb); +} + +static void fbuffer_clear(FBuffer *fb) +{ + fb->len = 0; +} + +static void fbuffer_inc_capa(FBuffer *fb, unsigned long requested) +{ + unsigned long required; + + if (!fb->ptr) { + fb->ptr = ALLOC_N(char, fb->initial_length); + fb->capa = fb->initial_length; + } + + for (required = fb->capa; requested > required - fb->len; required <<= 1); + + if (required > fb->capa) { + REALLOC_N(fb->ptr, char, required); + fb->capa = required; + } +} + +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len) +{ + if (len > 0) { + fbuffer_inc_capa(fb, len); + MEMCPY(fb->ptr + fb->len, newstr, char, len); + fb->len += len; + } +} + +#ifdef JSON_GENERATOR +static void fbuffer_append_str(FBuffer *fb, VALUE str) +{ + const char *newstr = StringValuePtr(str); + unsigned long len = RSTRING_LEN(str); + + RB_GC_GUARD(str); + + fbuffer_append(fb, newstr, len); +} +#endif + +static void fbuffer_append_char(FBuffer *fb, char newchr) +{ + fbuffer_inc_capa(fb, 1); + *(fb->ptr + fb->len) = newchr; + fb->len++; +} + +#ifdef JSON_GENERATOR +static void freverse(char *start, char *end) +{ + char c; + + while (end > start) { + c = *end, *end-- = *start, *start++ = c; + } +} + +static long fltoa(long number, char *buf) +{ + static char digits[] = "0123456789"; + long sign = number; + char* tmp = buf; + + if (sign < 0) number = -number; + do *tmp++ = digits[number % 10]; while (number /= 10); + if (sign < 0) *tmp++ = '-'; + freverse(buf, tmp - 1); + return tmp - buf; +} + +static void fbuffer_append_long(FBuffer *fb, long number) +{ + char buf[20]; + unsigned long len = fltoa(number, buf); + fbuffer_append(fb, buf, len); +} + +static FBuffer *fbuffer_dup(FBuffer *fb) +{ + unsigned long len = fb->len; + FBuffer *result; + + result = fbuffer_alloc(len); + fbuffer_append(result, FBUFFER_PAIR(fb)); + return result; +} + +static VALUE fbuffer_to_s(FBuffer *fb) +{ + VALUE result = rb_str_new(FBUFFER_PTR(fb), FBUFFER_LEN(fb)); + fbuffer_free(fb); + FORCE_UTF8(result); + return result; +} +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/Makefile 2 b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/Makefile 2 new file mode 100644 index 0000000..e52d762 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/Makefile 2 @@ -0,0 +1,268 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 +hdrdir = $(topdir) +arch_hdrdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20211106-75622-1enuftq +sitelibdir = $(DESTDIR)./.gem.20211106-75622-1enuftq +sitedir = $(DESTDIR)/Library/Ruby/Site +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(DESTDIR)/usr/share/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(DESTDIR)/usr/share/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(DESTDIR)/usr/include +includedir = $(DESTDIR)/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk$(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(DESTDIR)/Library/Ruby/Site +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = xcrun clang +CXX = xcrun clang++ +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = $(optflags) $(debugflags) $(warnflags) +optflags = +debugflags = -g +warnflags = +cppflags = +CCDLFLAGS = +CFLAGS = $(CCDLFLAGS) -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -DJSON_GENERATOR -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) -g -Os -pipe $(ARCH_FLAG) +ldflags = -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib +dldflags = $(ARCH_FLAG) -undefined dynamic_lookup -multiply_defined suppress +ARCH_FLAG = +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = libtool -static +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.2.6 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = universal-darwin21 +sitearch = $(arch) +ruby_version = 2.6.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = mkdir -p +INSTALL = /usr/bin/install -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = /json/ext +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) +ORIG_SRCS = generator.c +SRCS = $(ORIG_SRCS) +OBJS = generator.o +HDRS = $(srcdir)/generator.h +LOCAL_HDRS = +TARGET = generator +TARGET_NAME = generator +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object json/ext/$(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +### +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend new file mode 100644 index 0000000..1a042a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend @@ -0,0 +1 @@ +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend 2 b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend 2 new file mode 100644 index 0000000..1a042a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/depend 2 @@ -0,0 +1 @@ +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf 2.rb new file mode 100644 index 0000000..8627c5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf 2.rb @@ -0,0 +1,4 @@ +require 'mkmf' + +$defs << "-DJSON_GENERATOR" +create_makefile 'json/ext/generator' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb new file mode 100644 index 0000000..8627c5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb @@ -0,0 +1,4 @@ +require 'mkmf' + +$defs << "-DJSON_GENERATOR" +create_makefile 'json/ext/generator' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.c b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.c new file mode 100644 index 0000000..749efaf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.c @@ -0,0 +1,1569 @@ +#include "../fbuffer/fbuffer.h" +#include "generator.h" + +#ifdef HAVE_RUBY_ENCODING_H +static VALUE CEncoding_UTF_8; +static ID i_encoding, i_encode; +#endif + +static VALUE mJSON, mExt, mGenerator, cState, mGeneratorMethods, mObject, + mHash, mArray, +#ifdef RUBY_INTEGER_UNIFICATION + mInteger, +#else + mFixnum, mBignum, +#endif + mFloat, mString, mString_Extend, + mTrueClass, mFalseClass, mNilClass, eGeneratorError, + eNestingError, + i_SAFE_STATE_PROTOTYPE; + +static ID i_to_s, i_to_json, i_new, i_indent, i_space, i_space_before, + i_object_nl, i_array_nl, i_max_nesting, i_allow_nan, i_ascii_only, + i_pack, i_unpack, i_create_id, i_extend, i_key_p, + i_aref, i_send, i_respond_to_p, i_match, i_keys, i_depth, + i_buffer_initial_length, i_dup; + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length) +{ + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return 0; + /* Everything else falls through when "1"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xED: if (a > 0x9F) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + } + if (*source > 0xF4) return 0; + return 1; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf. */ +static void unicode_escape(char *buf, UTF16 character) +{ + const char *digits = "0123456789abcdef"; + + buf[2] = digits[character >> 12]; + buf[3] = digits[(character >> 8) & 0xf]; + buf[4] = digits[(character >> 4) & 0xf]; + buf[5] = digits[character & 0xf]; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf, then + * the buffer buf is appended to the FBuffer buffer. */ +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 + character) +{ + unicode_escape(buf, character); + fbuffer_append(buffer, buf, 6); +} + +/* Converts string to a JSON string in FBuffer buffer, where all but the ASCII + * and control characters are JSON escaped. */ +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string) +{ + const UTF8 *source = (UTF8 *) RSTRING_PTR(string); + const UTF8 *sourceEnd = source + RSTRING_LEN(string); + char buf[6] = { '\\', 'u' }; + + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8(source, extraBytesToRead+1)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* normal case */ + if (ch >= 0x20 && ch <= 0x7f) { + switch (ch) { + case '\\': + fbuffer_append(buffer, "\\\\", 2); + break; + case '"': + fbuffer_append(buffer, "\\\"", 2); + break; + default: + fbuffer_append_char(buffer, (char)ch); + break; + } + } else { + switch (ch) { + case '\n': + fbuffer_append(buffer, "\\n", 2); + break; + case '\r': + fbuffer_append(buffer, "\\r", 2); + break; + case '\t': + fbuffer_append(buffer, "\\t", 2); + break; + case '\f': + fbuffer_append(buffer, "\\f", 2); + break; + case '\b': + fbuffer_append(buffer, "\\b", 2); + break; + default: + unicode_escape_to_buffer(buffer, buf, (UTF16) ch); + break; + } + } + } + } else if (ch > UNI_MAX_UTF16) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the start */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + ch -= halfBase; + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START)); + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch & halfMask) + UNI_SUR_LOW_START)); + } + } + RB_GC_GUARD(string); +} + +/* Converts string to a JSON string in FBuffer buffer, where only the + * characters required by the JSON standard are JSON escaped. The remaining + * characters (should be UTF8) are just passed through and appended to the + * result. */ +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string) +{ + const char *ptr = RSTRING_PTR(string), *p; + unsigned long len = RSTRING_LEN(string), start = 0, end = 0; + const char *escape = NULL; + int escape_len; + unsigned char c; + char buf[6] = { '\\', 'u' }; + int ascii_only = rb_enc_str_asciionly_p(string); + + for (start = 0, end = 0; end < len;) { + p = ptr + end; + c = (unsigned char) *p; + if (c < 0x20) { + switch (c) { + case '\n': + escape = "\\n"; + escape_len = 2; + break; + case '\r': + escape = "\\r"; + escape_len = 2; + break; + case '\t': + escape = "\\t"; + escape_len = 2; + break; + case '\f': + escape = "\\f"; + escape_len = 2; + break; + case '\b': + escape = "\\b"; + escape_len = 2; + break; + default: + unicode_escape(buf, (UTF16) *p); + escape = buf; + escape_len = 6; + break; + } + } else { + switch (c) { + case '\\': + escape = "\\\\"; + escape_len = 2; + break; + case '"': + escape = "\\\""; + escape_len = 2; + break; + default: + { + unsigned short clen = 1; + if (!ascii_only) { + clen += trailingBytesForUTF8[c]; + if (end + clen > len) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8((UTF8 *) p, clen)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + } + end += clen; + } + continue; + break; + } + } + fbuffer_append(buffer, ptr + start, end - start); + fbuffer_append(buffer, escape, escape_len); + start = ++end; + escape = NULL; + } + fbuffer_append(buffer, ptr + start, end - start); +} + +static char *fstrndup(const char *ptr, unsigned long len) { + char *result; + if (len <= 0) return NULL; + result = ALLOC_N(char, len); + memcpy(result, ptr, len); + return result; +} + +/* + * Document-module: JSON::Ext::Generator + * + * This is the JSON generator implemented as a C extension. It can be + * configured to be used by setting + * + * JSON.generator = JSON::Ext::Generator + * + * with the method generator= in JSON. + * + */ + +/* Explanation of the following: that's the only way to not pollute + * standard library's docs with GeneratorMethods:: which + * are uninformative and take a large place in a list of classes + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Array + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Bignum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::FalseClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Fixnum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Float + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Hash + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Integer + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::NilClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Object + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String::Extend + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::TrueClass + * :nodoc: + */ + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON object, that is generated from + * this Hash instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(object); +} + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON array, that is generated from + * this Array instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self) { + GENERATE_JSON(array); +} + +#ifdef RUBY_INTEGER_UNIFICATION +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(integer); +} + +#else +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(fixnum); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(bignum); +} +#endif + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Float number. + */ +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(float); +} + +/* + * call-seq: String.included(modul) + * + * Extends _modul_ with the String::Extend module. + */ +static VALUE mString_included_s(VALUE self, VALUE modul) { + VALUE result = rb_funcall(modul, i_extend, 1, mString_Extend); + return result; +} + +/* + * call-seq: to_json(*) + * + * This string should be encoded with UTF-8 A call to this method + * returns a JSON string encoded with UTF16 big endian characters as + * \u????. + */ +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(string); +} + +/* + * call-seq: to_json_raw_object() + * + * This method creates a raw object hash, that can be nested into + * other data structures and will be generated as a raw string. This + * method should be used, if you want to convert raw strings to JSON + * instead of UTF-8 strings, e. g. binary data. + */ +static VALUE mString_to_json_raw_object(VALUE self) +{ + VALUE ary; + VALUE result = rb_hash_new(); + rb_hash_aset(result, rb_funcall(mJSON, i_create_id, 0), rb_class_name(rb_obj_class(self))); + ary = rb_funcall(self, i_unpack, 1, rb_str_new2("C*")); + rb_hash_aset(result, rb_str_new2("raw"), ary); + return result; +} + +/* + * call-seq: to_json_raw(*args) + * + * This method creates a JSON text from the result of a call to + * to_json_raw_object of this String. + */ +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self) +{ + VALUE obj = mString_to_json_raw_object(self); + Check_Type(obj, T_HASH); + return mHash_to_json(argc, argv, obj); +} + +/* + * call-seq: json_create(o) + * + * Raw Strings are JSON Objects (the raw bytes are stored in an array for the + * key "raw"). The Ruby String can be created by this module method. + */ +static VALUE mString_Extend_json_create(VALUE self, VALUE o) +{ + VALUE ary; + Check_Type(o, T_HASH); + ary = rb_hash_aref(o, rb_str_new2("raw")); + return rb_funcall(ary, i_pack, 1, rb_str_new2("C*")); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for true: 'true'. + */ +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(true); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for false: 'false'. + */ +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(false); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for nil: 'null'. + */ +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(null); +} + +/* + * call-seq: to_json(*) + * + * Converts this object to a string (calling #to_s), converts + * it to a JSON string, and returns the result. This is a fallback, if no + * special method #to_json was defined for some object. + */ +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self) +{ + VALUE state; + VALUE string = rb_funcall(self, i_to_s, 0); + rb_scan_args(argc, argv, "01", &state); + Check_Type(string, T_STRING); + state = cState_from_state_s(cState, state); + return cState_partial_generate(state, string); +} + +static void State_free(void *ptr) +{ + JSON_Generator_State *state = ptr; + if (state->indent) ruby_xfree(state->indent); + if (state->space) ruby_xfree(state->space); + if (state->space_before) ruby_xfree(state->space_before); + if (state->object_nl) ruby_xfree(state->object_nl); + if (state->array_nl) ruby_xfree(state->array_nl); + if (state->array_delim) fbuffer_free(state->array_delim); + if (state->object_delim) fbuffer_free(state->object_delim); + if (state->object_delim2) fbuffer_free(state->object_delim2); + ruby_xfree(state); +} + +static size_t State_memsize(const void *ptr) +{ + const JSON_Generator_State *state = ptr; + size_t size = sizeof(*state); + if (state->indent) size += state->indent_len + 1; + if (state->space) size += state->space_len + 1; + if (state->space_before) size += state->space_before_len + 1; + if (state->object_nl) size += state->object_nl_len + 1; + if (state->array_nl) size += state->array_nl_len + 1; + if (state->array_delim) size += FBUFFER_CAPA(state->array_delim); + if (state->object_delim) size += FBUFFER_CAPA(state->object_delim); + if (state->object_delim2) size += FBUFFER_CAPA(state->object_delim2); + return size; +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Generator_State_type = { + "JSON/Generator/State", + {NULL, State_free, State_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cState_s_allocate(VALUE klass) +{ + JSON_Generator_State *state; + return TypedData_Make_Struct(klass, JSON_Generator_State, + &JSON_Generator_State_type, state); +} + +/* + * call-seq: configure(opts) + * + * Configure this State instance with the Hash _opts_, and return + * itself. + */ +static VALUE cState_configure(VALUE self, VALUE opts) +{ + VALUE tmp; + GET_STATE(self); + tmp = rb_check_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(tmp)) tmp = rb_convert_type(opts, T_HASH, "Hash", "to_h"); + opts = tmp; + tmp = rb_hash_aref(opts, ID2SYM(i_indent)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->indent = fstrndup(RSTRING_PTR(tmp), len + 1); + state->indent_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space_before)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space_before = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_before_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_array_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->array_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->array_nl_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_object_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->object_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->object_nl_len = len; + } + tmp = ID2SYM(i_max_nesting); + state->max_nesting = 100; + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + state->max_nesting = FIX2LONG(max_nesting); + } else { + state->max_nesting = 0; + } + } + tmp = ID2SYM(i_depth); + state->depth = 0; + if (option_given_p(opts, tmp)) { + VALUE depth = rb_hash_aref(opts, tmp); + if (RTEST(depth)) { + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + } else { + state->depth = 0; + } + } + tmp = ID2SYM(i_buffer_initial_length); + if (option_given_p(opts, tmp)) { + VALUE buffer_initial_length = rb_hash_aref(opts, tmp); + if (RTEST(buffer_initial_length)) { + long initial_length; + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) state->buffer_initial_length = initial_length; + } + } + tmp = rb_hash_aref(opts, ID2SYM(i_allow_nan)); + state->allow_nan = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_ascii_only)); + state->ascii_only = RTEST(tmp); + return self; +} + +static void set_state_ivars(VALUE hash, VALUE state) +{ + VALUE ivars = rb_obj_instance_variables(state); + int i = 0; + for (i = 0; i < RARRAY_LEN(ivars); i++) { + VALUE key = rb_funcall(rb_ary_entry(ivars, i), i_to_s, 0); + long key_len = RSTRING_LEN(key); + VALUE value = rb_iv_get(state, StringValueCStr(key)); + rb_hash_aset(hash, rb_str_intern(rb_str_substr(key, 1, key_len - 1)), value); + } +} + +/* + * call-seq: to_h + * + * Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + */ +static VALUE cState_to_h(VALUE self) +{ + VALUE result = rb_hash_new(); + GET_STATE(self); + set_state_ivars(result, self); + rb_hash_aset(result, ID2SYM(i_indent), rb_str_new(state->indent, state->indent_len)); + rb_hash_aset(result, ID2SYM(i_space), rb_str_new(state->space, state->space_len)); + rb_hash_aset(result, ID2SYM(i_space_before), rb_str_new(state->space_before, state->space_before_len)); + rb_hash_aset(result, ID2SYM(i_object_nl), rb_str_new(state->object_nl, state->object_nl_len)); + rb_hash_aset(result, ID2SYM(i_array_nl), rb_str_new(state->array_nl, state->array_nl_len)); + rb_hash_aset(result, ID2SYM(i_allow_nan), state->allow_nan ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_ascii_only), state->ascii_only ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_max_nesting), LONG2FIX(state->max_nesting)); + rb_hash_aset(result, ID2SYM(i_depth), LONG2FIX(state->depth)); + rb_hash_aset(result, ID2SYM(i_buffer_initial_length), LONG2FIX(state->buffer_initial_length)); + return result; +} + +/* +* call-seq: [](name) +* +* Returns the value returned by method +name+. +*/ +static VALUE cState_aref(VALUE self, VALUE name) +{ + name = rb_funcall(name, i_to_s, 0); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) { + return rb_funcall(self, i_send, 1, name); + } else { + return rb_attr_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name))); + } +} + +/* +* call-seq: []=(name, value) +* +* Sets the attribute name to value. +*/ +static VALUE cState_aset(VALUE self, VALUE name, VALUE value) +{ + VALUE name_writer; + + name = rb_funcall(name, i_to_s, 0); + name_writer = rb_str_cat2(rb_str_dup(name), "="); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name_writer))) { + return rb_funcall(self, i_send, 2, name_writer, value); + } else { + rb_ivar_set(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)), value); + } + return Qnil; +} + +struct hash_foreach_arg { + FBuffer *buffer; + JSON_Generator_State *state; + VALUE Vstate; + int iter; +}; + +static int +json_object_i(VALUE key, VALUE val, VALUE _arg) +{ + struct hash_foreach_arg *arg = (struct hash_foreach_arg *)_arg; + FBuffer *buffer = arg->buffer; + JSON_Generator_State *state = arg->state; + VALUE Vstate = arg->Vstate; + + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + char *delim = FBUFFER_PTR(state->object_delim); + long delim_len = FBUFFER_LEN(state->object_delim); + char *delim2 = FBUFFER_PTR(state->object_delim2); + long delim2_len = FBUFFER_LEN(state->object_delim2); + long depth = state->depth; + int j; + VALUE klass, key_to_s; + + if (arg->iter > 0) fbuffer_append(buffer, delim, delim_len); + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + } + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + + klass = CLASS_OF(key); + if (klass == rb_cString) { + key_to_s = key; + } else if (klass == rb_cSymbol) { + key_to_s = rb_id2str(SYM2ID(key)); + } else { + key_to_s = rb_funcall(key, i_to_s, 0); + } + Check_Type(key_to_s, T_STRING); + generate_json(buffer, Vstate, state, key_to_s); + fbuffer_append(buffer, delim2, delim2_len); + generate_json(buffer, Vstate, state, val); + + arg->iter++; + return ST_CONTINUE; +} + +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + long depth = ++state->depth; + int j; + struct hash_foreach_arg arg; + + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '{'); + + arg.buffer = buffer; + arg.state = state; + arg.Vstate = Vstate; + arg.iter = 0; + rb_hash_foreach(obj, json_object_i, (VALUE)&arg); + + depth = --state->depth; + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, '}'); +} + +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *array_nl = state->array_nl; + long array_nl_len = state->array_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + char *delim = FBUFFER_PTR(state->array_delim); + long delim_len = FBUFFER_LEN(state->array_delim); + long depth = ++state->depth; + int i, j; + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '['); + if (array_nl) fbuffer_append(buffer, array_nl, array_nl_len); + for(i = 0; i < RARRAY_LEN(obj); i++) { + if (i > 0) fbuffer_append(buffer, delim, delim_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + generate_json(buffer, Vstate, state, rb_ary_entry(obj, i)); + } + state->depth = --depth; + if (array_nl) { + fbuffer_append(buffer, array_nl, array_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, ']'); +} + +#ifdef HAVE_RUBY_ENCODING_H +static int enc_utf8_compatible_p(rb_encoding *enc) +{ + if (enc == rb_usascii_encoding()) return 1; + if (enc == rb_utf8_encoding()) return 1; + return 0; +} +#endif + +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_char(buffer, '"'); +#ifdef HAVE_RUBY_ENCODING_H + if (!enc_utf8_compatible_p(rb_enc_get(obj))) { + obj = rb_str_encode(obj, CEncoding_UTF_8, 0, Qnil); + } +#endif + if (state->ascii_only) { + convert_UTF8_to_JSON_ASCII(buffer, obj); + } else { + convert_UTF8_to_JSON(buffer, obj); + } + fbuffer_append_char(buffer, '"'); +} + +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "null", 4); +} + +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "false", 5); +} + +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "true", 4); +} + +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_long(buffer, FIX2LONG(obj)); +} + +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp = rb_funcall(obj, i_to_s, 0); + fbuffer_append_str(buffer, tmp); +} + +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + if (FIXNUM_P(obj)) + generate_json_fixnum(buffer, Vstate, state, obj); + else + generate_json_bignum(buffer, Vstate, state, obj); +} +#endif +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + double value = RFLOAT_VALUE(obj); + char allow_nan = state->allow_nan; + VALUE tmp = rb_funcall(obj, i_to_s, 0); + if (!allow_nan) { + if (isinf(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } else if (isnan(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } + } + fbuffer_append_str(buffer, tmp); +} + +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp; + VALUE klass = CLASS_OF(obj); + if (klass == rb_cHash) { + generate_json_object(buffer, Vstate, state, obj); + } else if (klass == rb_cArray) { + generate_json_array(buffer, Vstate, state, obj); + } else if (klass == rb_cString) { + generate_json_string(buffer, Vstate, state, obj); + } else if (obj == Qnil) { + generate_json_null(buffer, Vstate, state, obj); + } else if (obj == Qfalse) { + generate_json_false(buffer, Vstate, state, obj); + } else if (obj == Qtrue) { + generate_json_true(buffer, Vstate, state, obj); + } else if (FIXNUM_P(obj)) { + generate_json_fixnum(buffer, Vstate, state, obj); + } else if (RB_TYPE_P(obj, T_BIGNUM)) { + generate_json_bignum(buffer, Vstate, state, obj); + } else if (klass == rb_cFloat) { + generate_json_float(buffer, Vstate, state, obj); + } else if (rb_respond_to(obj, i_to_json)) { + tmp = rb_funcall(obj, i_to_json, 1, Vstate); + Check_Type(tmp, T_STRING); + fbuffer_append_str(buffer, tmp); + } else { + tmp = rb_funcall(obj, i_to_s, 0); + Check_Type(tmp, T_STRING); + generate_json_string(buffer, Vstate, state, tmp); + } +} + +static FBuffer *cState_prepare_buffer(VALUE self) +{ + FBuffer *buffer; + GET_STATE(self); + buffer = fbuffer_alloc(state->buffer_initial_length); + + if (state->object_delim) { + fbuffer_clear(state->object_delim); + } else { + state->object_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->object_delim, ','); + if (state->object_delim2) { + fbuffer_clear(state->object_delim2); + } else { + state->object_delim2 = fbuffer_alloc(16); + } + if (state->space_before) fbuffer_append(state->object_delim2, state->space_before, state->space_before_len); + fbuffer_append_char(state->object_delim2, ':'); + if (state->space) fbuffer_append(state->object_delim2, state->space, state->space_len); + + if (state->array_delim) { + fbuffer_clear(state->array_delim); + } else { + state->array_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->array_delim, ','); + if (state->array_nl) fbuffer_append(state->array_delim, state->array_nl, state->array_nl_len); + return buffer; +} + +static VALUE cState_partial_generate(VALUE self, VALUE obj) +{ + FBuffer *buffer = cState_prepare_buffer(self); + GET_STATE(self); + generate_json(buffer, self, state, obj); + return fbuffer_to_s(buffer); +} + +/* + * call-seq: generate(obj) + * + * Generates a valid JSON document from object +obj+ and returns the + * result. If no valid JSON document can be created this method raises a + * GeneratorError exception. + */ +static VALUE cState_generate(VALUE self, VALUE obj) +{ + VALUE result = cState_partial_generate(self, obj); + GET_STATE(self); + (void)state; + return result; +} + +/* + * call-seq: new(opts = {}) + * + * Instantiates a new State object, configured by _opts_. + * + * _opts_ can have the following keys: + * + * * *indent*: a string used to indent levels (default: ''), + * * *space*: a string that is put after, a : or , delimiter (default: ''), + * * *space_before*: a string that is put before a : pair delimiter (default: ''), + * * *object_nl*: a string that is put at the end of a JSON object (default: ''), + * * *array_nl*: a string that is put at the end of a JSON array (default: ''), + * * *allow_nan*: true if NaN, Infinity, and -Infinity should be + * generated, otherwise an exception is thrown, if these values are + * encountered. This options defaults to false. + * * *ascii_only*: true if only ASCII characters should be generated. This + * option defaults to false. + * * *buffer_initial_length*: sets the initial length of the generator's + * internal buffer. + */ +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE opts; + GET_STATE(self); + state->max_nesting = 100; + state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + rb_scan_args(argc, argv, "01", &opts); + if (!NIL_P(opts)) cState_configure(self, opts); + return self; +} + +/* + * call-seq: initialize_copy(orig) + * + * Initializes this object from orig if it can be duplicated/cloned and returns + * it. +*/ +static VALUE cState_init_copy(VALUE obj, VALUE orig) +{ + JSON_Generator_State *objState, *origState; + + if (obj == orig) return obj; + GET_STATE_TO(obj, objState); + GET_STATE_TO(orig, origState); + if (!objState) rb_raise(rb_eArgError, "unallocated JSON::State"); + + MEMCPY(objState, origState, JSON_Generator_State, 1); + objState->indent = fstrndup(origState->indent, origState->indent_len); + objState->space = fstrndup(origState->space, origState->space_len); + objState->space_before = fstrndup(origState->space_before, origState->space_before_len); + objState->object_nl = fstrndup(origState->object_nl, origState->object_nl_len); + objState->array_nl = fstrndup(origState->array_nl, origState->array_nl_len); + if (origState->array_delim) objState->array_delim = fbuffer_dup(origState->array_delim); + if (origState->object_delim) objState->object_delim = fbuffer_dup(origState->object_delim); + if (origState->object_delim2) objState->object_delim2 = fbuffer_dup(origState->object_delim2); + return obj; +} + +/* + * call-seq: from_state(opts) + * + * Creates a State object from _opts_, which ought to be Hash to create a + * new State instance configured by _opts_, something else to create an + * unconfigured instance. If _opts_ is a State object, it is just returned. + */ +static VALUE cState_from_state_s(VALUE self, VALUE opts) +{ + if (rb_obj_is_kind_of(opts, self)) { + return opts; + } else if (rb_obj_is_kind_of(opts, rb_cHash)) { + return rb_funcall(self, i_new, 1, opts); + } else { + VALUE prototype = rb_const_get(mJSON, i_SAFE_STATE_PROTOTYPE); + return rb_funcall(prototype, i_dup, 0); + } +} + +/* + * call-seq: indent() + * + * Returns the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent(VALUE self) +{ + GET_STATE(self); + return state->indent ? rb_str_new(state->indent, state->indent_len) : rb_str_new2(""); +} + +/* + * call-seq: indent=(indent) + * + * Sets the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent_set(VALUE self, VALUE indent) +{ + unsigned long len; + GET_STATE(self); + Check_Type(indent, T_STRING); + len = RSTRING_LEN(indent); + if (len == 0) { + if (state->indent) { + ruby_xfree(state->indent); + state->indent = NULL; + state->indent_len = 0; + } + } else { + if (state->indent) ruby_xfree(state->indent); + state->indent = fstrndup(RSTRING_PTR(indent), len); + state->indent_len = len; + } + return Qnil; +} + +/* + * call-seq: space() + * + * Returns the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space(VALUE self) +{ + GET_STATE(self); + return state->space ? rb_str_new(state->space, state->space_len) : rb_str_new2(""); +} + +/* + * call-seq: space=(space) + * + * Sets _space_ to the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space_set(VALUE self, VALUE space) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space, T_STRING); + len = RSTRING_LEN(space); + if (len == 0) { + if (state->space) { + ruby_xfree(state->space); + state->space = NULL; + state->space_len = 0; + } + } else { + if (state->space) ruby_xfree(state->space); + state->space = fstrndup(RSTRING_PTR(space), len); + state->space_len = len; + } + return Qnil; +} + +/* + * call-seq: space_before() + * + * Returns the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before(VALUE self) +{ + GET_STATE(self); + return state->space_before ? rb_str_new(state->space_before, state->space_before_len) : rb_str_new2(""); +} + +/* + * call-seq: space_before=(space_before) + * + * Sets the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before_set(VALUE self, VALUE space_before) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space_before, T_STRING); + len = RSTRING_LEN(space_before); + if (len == 0) { + if (state->space_before) { + ruby_xfree(state->space_before); + state->space_before = NULL; + state->space_before_len = 0; + } + } else { + if (state->space_before) ruby_xfree(state->space_before); + state->space_before = fstrndup(RSTRING_PTR(space_before), len); + state->space_before_len = len; + } + return Qnil; +} + +/* + * call-seq: object_nl() + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl(VALUE self) +{ + GET_STATE(self); + return state->object_nl ? rb_str_new(state->object_nl, state->object_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: object_nl=(object_nl) + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(object_nl, T_STRING); + len = RSTRING_LEN(object_nl); + if (len == 0) { + if (state->object_nl) { + ruby_xfree(state->object_nl); + state->object_nl = NULL; + } + } else { + if (state->object_nl) ruby_xfree(state->object_nl); + state->object_nl = fstrndup(RSTRING_PTR(object_nl), len); + state->object_nl_len = len; + } + return Qnil; +} + +/* + * call-seq: array_nl() + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl(VALUE self) +{ + GET_STATE(self); + return state->array_nl ? rb_str_new(state->array_nl, state->array_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: array_nl=(array_nl) + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(array_nl, T_STRING); + len = RSTRING_LEN(array_nl); + if (len == 0) { + if (state->array_nl) { + ruby_xfree(state->array_nl); + state->array_nl = NULL; + } + } else { + if (state->array_nl) ruby_xfree(state->array_nl); + state->array_nl = fstrndup(RSTRING_PTR(array_nl), len); + state->array_nl_len = len; + } + return Qnil; +} + + +/* +* call-seq: check_circular? +* +* Returns true, if circular data structures should be checked, +* otherwise returns false. +*/ +static VALUE cState_check_circular_p(VALUE self) +{ + GET_STATE(self); + return state->max_nesting ? Qtrue : Qfalse; +} + +/* + * call-seq: max_nesting + * + * This integer returns the maximum level of data structure nesting in + * the generated JSON, max_nesting = 0 if no maximum is checked. + */ +static VALUE cState_max_nesting(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->max_nesting); +} + +/* + * call-seq: max_nesting=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_max_nesting_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + return state->max_nesting = FIX2LONG(depth); +} + +/* + * call-seq: allow_nan? + * + * Returns true, if NaN, Infinity, and -Infinity should be generated, otherwise + * returns false. + */ +static VALUE cState_allow_nan_p(VALUE self) +{ + GET_STATE(self); + return state->allow_nan ? Qtrue : Qfalse; +} + +/* + * call-seq: ascii_only? + * + * Returns true, if only ASCII characters should be generated. Otherwise + * returns false. + */ +static VALUE cState_ascii_only_p(VALUE self) +{ + GET_STATE(self); + return state->ascii_only ? Qtrue : Qfalse; +} + +/* + * call-seq: depth + * + * This integer returns the current depth of data structure nesting. + */ +static VALUE cState_depth(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->depth); +} + +/* + * call-seq: depth=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_depth_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + return Qnil; +} + +/* + * call-seq: buffer_initial_length + * + * This integer returns the current initial length of the buffer. + */ +static VALUE cState_buffer_initial_length(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->buffer_initial_length); +} + +/* + * call-seq: buffer_initial_length=(length) + * + * This sets the initial length of the buffer to +length+, if +length+ > 0, + * otherwise its value isn't changed. + */ +static VALUE cState_buffer_initial_length_set(VALUE self, VALUE buffer_initial_length) +{ + long initial_length; + GET_STATE(self); + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) { + state->buffer_initial_length = initial_length; + } + return Qnil; +} + +/* + * + */ +void Init_generator(void) +{ +#undef rb_intern + rb_require("json/common"); + + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + mGenerator = rb_define_module_under(mExt, "Generator"); + + eGeneratorError = rb_path2class("JSON::GeneratorError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eGeneratorError); + rb_gc_register_mark_object(eNestingError); + + cState = rb_define_class_under(mGenerator, "State", rb_cObject); + rb_define_alloc_func(cState, cState_s_allocate); + rb_define_singleton_method(cState, "from_state", cState_from_state_s, 1); + rb_define_method(cState, "initialize", cState_initialize, -1); + rb_define_method(cState, "initialize_copy", cState_init_copy, 1); + rb_define_method(cState, "indent", cState_indent, 0); + rb_define_method(cState, "indent=", cState_indent_set, 1); + rb_define_method(cState, "space", cState_space, 0); + rb_define_method(cState, "space=", cState_space_set, 1); + rb_define_method(cState, "space_before", cState_space_before, 0); + rb_define_method(cState, "space_before=", cState_space_before_set, 1); + rb_define_method(cState, "object_nl", cState_object_nl, 0); + rb_define_method(cState, "object_nl=", cState_object_nl_set, 1); + rb_define_method(cState, "array_nl", cState_array_nl, 0); + rb_define_method(cState, "array_nl=", cState_array_nl_set, 1); + rb_define_method(cState, "max_nesting", cState_max_nesting, 0); + rb_define_method(cState, "max_nesting=", cState_max_nesting_set, 1); + rb_define_method(cState, "check_circular?", cState_check_circular_p, 0); + rb_define_method(cState, "allow_nan?", cState_allow_nan_p, 0); + rb_define_method(cState, "ascii_only?", cState_ascii_only_p, 0); + rb_define_method(cState, "depth", cState_depth, 0); + rb_define_method(cState, "depth=", cState_depth_set, 1); + rb_define_method(cState, "buffer_initial_length", cState_buffer_initial_length, 0); + rb_define_method(cState, "buffer_initial_length=", cState_buffer_initial_length_set, 1); + rb_define_method(cState, "configure", cState_configure, 1); + rb_define_alias(cState, "merge", "configure"); + rb_define_method(cState, "to_h", cState_to_h, 0); + rb_define_alias(cState, "to_hash", "to_h"); + rb_define_method(cState, "[]", cState_aref, 1); + rb_define_method(cState, "[]=", cState_aset, 2); + rb_define_method(cState, "generate", cState_generate, 1); + + mGeneratorMethods = rb_define_module_under(mGenerator, "GeneratorMethods"); + mObject = rb_define_module_under(mGeneratorMethods, "Object"); + rb_define_method(mObject, "to_json", mObject_to_json, -1); + mHash = rb_define_module_under(mGeneratorMethods, "Hash"); + rb_define_method(mHash, "to_json", mHash_to_json, -1); + mArray = rb_define_module_under(mGeneratorMethods, "Array"); + rb_define_method(mArray, "to_json", mArray_to_json, -1); +#ifdef RUBY_INTEGER_UNIFICATION + mInteger = rb_define_module_under(mGeneratorMethods, "Integer"); + rb_define_method(mInteger, "to_json", mInteger_to_json, -1); +#else + mFixnum = rb_define_module_under(mGeneratorMethods, "Fixnum"); + rb_define_method(mFixnum, "to_json", mFixnum_to_json, -1); + mBignum = rb_define_module_under(mGeneratorMethods, "Bignum"); + rb_define_method(mBignum, "to_json", mBignum_to_json, -1); +#endif + mFloat = rb_define_module_under(mGeneratorMethods, "Float"); + rb_define_method(mFloat, "to_json", mFloat_to_json, -1); + mString = rb_define_module_under(mGeneratorMethods, "String"); + rb_define_singleton_method(mString, "included", mString_included_s, 1); + rb_define_method(mString, "to_json", mString_to_json, -1); + rb_define_method(mString, "to_json_raw", mString_to_json_raw, -1); + rb_define_method(mString, "to_json_raw_object", mString_to_json_raw_object, 0); + mString_Extend = rb_define_module_under(mString, "Extend"); + rb_define_method(mString_Extend, "json_create", mString_Extend_json_create, 1); + mTrueClass = rb_define_module_under(mGeneratorMethods, "TrueClass"); + rb_define_method(mTrueClass, "to_json", mTrueClass_to_json, -1); + mFalseClass = rb_define_module_under(mGeneratorMethods, "FalseClass"); + rb_define_method(mFalseClass, "to_json", mFalseClass_to_json, -1); + mNilClass = rb_define_module_under(mGeneratorMethods, "NilClass"); + rb_define_method(mNilClass, "to_json", mNilClass_to_json, -1); + + i_to_s = rb_intern("to_s"); + i_to_json = rb_intern("to_json"); + i_new = rb_intern("new"); + i_indent = rb_intern("indent"); + i_space = rb_intern("space"); + i_space_before = rb_intern("space_before"); + i_object_nl = rb_intern("object_nl"); + i_array_nl = rb_intern("array_nl"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_ascii_only = rb_intern("ascii_only"); + i_depth = rb_intern("depth"); + i_buffer_initial_length = rb_intern("buffer_initial_length"); + i_pack = rb_intern("pack"); + i_unpack = rb_intern("unpack"); + i_create_id = rb_intern("create_id"); + i_extend = rb_intern("extend"); + i_key_p = rb_intern("key?"); + i_aref = rb_intern("[]"); + i_send = rb_intern("__send__"); + i_respond_to_p = rb_intern("respond_to?"); + i_match = rb_intern("match"); + i_keys = rb_intern("keys"); + i_dup = rb_intern("dup"); +#ifdef HAVE_RUBY_ENCODING_H + CEncoding_UTF_8 = rb_funcall(rb_path2class("Encoding"), rb_intern("find"), 1, rb_str_new2("utf-8")); + i_encoding = rb_intern("encoding"); + i_encode = rb_intern("encode"); +#endif + i_SAFE_STATE_PROTOTYPE = rb_intern("SAFE_STATE_PROTOTYPE"); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.h new file mode 100644 index 0000000..c367a62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator 2.h @@ -0,0 +1,171 @@ +#ifndef _GENERATOR_H_ +#define _GENERATOR_H_ + +#include +#include + +#include "ruby.h" + +#ifdef HAVE_RUBY_RE_H +#include "ruby/re.h" +#else +#include "re.h" +#endif + +#ifndef rb_intern_str +#define rb_intern_str(string) SYM2ID(rb_str_intern(string)) +#endif + +#ifndef rb_obj_instance_variables +#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0) +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode definitions */ + +#define UNI_STRICT_CONVERSION 1 + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length); +static void unicode_escape(char *buf, UTF16 character); +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character); +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string); +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string); +static char *fstrndup(const char *ptr, unsigned long len); + +/* ruby api and some helpers */ + +typedef struct JSON_Generator_StateStruct { + char *indent; + long indent_len; + char *space; + long space_len; + char *space_before; + long space_before_len; + char *object_nl; + long object_nl_len; + char *array_nl; + long array_nl_len; + FBuffer *array_delim; + FBuffer *object_delim; + FBuffer *object_delim2; + long max_nesting; + char allow_nan; + char ascii_only; + long depth; + long buffer_initial_length; +} JSON_Generator_State; + +#define GET_STATE_TO(self, state) \ + TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state) + +#define GET_STATE(self) \ + JSON_Generator_State *state; \ + GET_STATE_TO(self, state) + +#define GENERATE_JSON(type) \ + FBuffer *buffer; \ + VALUE Vstate; \ + JSON_Generator_State *state; \ + \ + rb_scan_args(argc, argv, "01", &Vstate); \ + Vstate = cState_from_state_s(cState, Vstate); \ + TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \ + buffer = cState_prepare_buffer(Vstate); \ + generate_json_##type(buffer, Vstate, state, self); \ + return fbuffer_to_s(buffer) + +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self); +#ifdef RUBY_INTEGER_UNIFICATION +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self); +#else +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self); +#endif +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_included_s(VALUE self, VALUE modul); +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_to_json_raw_object(VALUE self); +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self); +static VALUE mString_Extend_json_create(VALUE self, VALUE o); +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self); +static void State_free(void *state); +static VALUE cState_s_allocate(VALUE klass); +static VALUE cState_configure(VALUE self, VALUE opts); +static VALUE cState_to_h(VALUE self); +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#endif +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static VALUE cState_partial_generate(VALUE self, VALUE obj); +static VALUE cState_generate(VALUE self, VALUE obj); +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cState_from_state_s(VALUE self, VALUE opts); +static VALUE cState_indent(VALUE self); +static VALUE cState_indent_set(VALUE self, VALUE indent); +static VALUE cState_space(VALUE self); +static VALUE cState_space_set(VALUE self, VALUE space); +static VALUE cState_space_before(VALUE self); +static VALUE cState_space_before_set(VALUE self, VALUE space_before); +static VALUE cState_object_nl(VALUE self); +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl); +static VALUE cState_array_nl(VALUE self); +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl); +static VALUE cState_max_nesting(VALUE self); +static VALUE cState_max_nesting_set(VALUE self, VALUE depth); +static VALUE cState_allow_nan_p(VALUE self); +static VALUE cState_ascii_only_p(VALUE self); +static VALUE cState_depth(VALUE self); +static VALUE cState_depth_set(VALUE self, VALUE depth); +static FBuffer *cState_prepare_buffer(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Generator_State_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json) +#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.c b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.c new file mode 100644 index 0000000..749efaf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.c @@ -0,0 +1,1569 @@ +#include "../fbuffer/fbuffer.h" +#include "generator.h" + +#ifdef HAVE_RUBY_ENCODING_H +static VALUE CEncoding_UTF_8; +static ID i_encoding, i_encode; +#endif + +static VALUE mJSON, mExt, mGenerator, cState, mGeneratorMethods, mObject, + mHash, mArray, +#ifdef RUBY_INTEGER_UNIFICATION + mInteger, +#else + mFixnum, mBignum, +#endif + mFloat, mString, mString_Extend, + mTrueClass, mFalseClass, mNilClass, eGeneratorError, + eNestingError, + i_SAFE_STATE_PROTOTYPE; + +static ID i_to_s, i_to_json, i_new, i_indent, i_space, i_space_before, + i_object_nl, i_array_nl, i_max_nesting, i_allow_nan, i_ascii_only, + i_pack, i_unpack, i_create_id, i_extend, i_key_p, + i_aref, i_send, i_respond_to_p, i_match, i_keys, i_depth, + i_buffer_initial_length, i_dup; + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length) +{ + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return 0; + /* Everything else falls through when "1"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xED: if (a > 0x9F) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + } + if (*source > 0xF4) return 0; + return 1; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf. */ +static void unicode_escape(char *buf, UTF16 character) +{ + const char *digits = "0123456789abcdef"; + + buf[2] = digits[character >> 12]; + buf[3] = digits[(character >> 8) & 0xf]; + buf[4] = digits[(character >> 4) & 0xf]; + buf[5] = digits[character & 0xf]; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf, then + * the buffer buf is appended to the FBuffer buffer. */ +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 + character) +{ + unicode_escape(buf, character); + fbuffer_append(buffer, buf, 6); +} + +/* Converts string to a JSON string in FBuffer buffer, where all but the ASCII + * and control characters are JSON escaped. */ +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string) +{ + const UTF8 *source = (UTF8 *) RSTRING_PTR(string); + const UTF8 *sourceEnd = source + RSTRING_LEN(string); + char buf[6] = { '\\', 'u' }; + + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8(source, extraBytesToRead+1)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* normal case */ + if (ch >= 0x20 && ch <= 0x7f) { + switch (ch) { + case '\\': + fbuffer_append(buffer, "\\\\", 2); + break; + case '"': + fbuffer_append(buffer, "\\\"", 2); + break; + default: + fbuffer_append_char(buffer, (char)ch); + break; + } + } else { + switch (ch) { + case '\n': + fbuffer_append(buffer, "\\n", 2); + break; + case '\r': + fbuffer_append(buffer, "\\r", 2); + break; + case '\t': + fbuffer_append(buffer, "\\t", 2); + break; + case '\f': + fbuffer_append(buffer, "\\f", 2); + break; + case '\b': + fbuffer_append(buffer, "\\b", 2); + break; + default: + unicode_escape_to_buffer(buffer, buf, (UTF16) ch); + break; + } + } + } + } else if (ch > UNI_MAX_UTF16) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the start */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + ch -= halfBase; + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START)); + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch & halfMask) + UNI_SUR_LOW_START)); + } + } + RB_GC_GUARD(string); +} + +/* Converts string to a JSON string in FBuffer buffer, where only the + * characters required by the JSON standard are JSON escaped. The remaining + * characters (should be UTF8) are just passed through and appended to the + * result. */ +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string) +{ + const char *ptr = RSTRING_PTR(string), *p; + unsigned long len = RSTRING_LEN(string), start = 0, end = 0; + const char *escape = NULL; + int escape_len; + unsigned char c; + char buf[6] = { '\\', 'u' }; + int ascii_only = rb_enc_str_asciionly_p(string); + + for (start = 0, end = 0; end < len;) { + p = ptr + end; + c = (unsigned char) *p; + if (c < 0x20) { + switch (c) { + case '\n': + escape = "\\n"; + escape_len = 2; + break; + case '\r': + escape = "\\r"; + escape_len = 2; + break; + case '\t': + escape = "\\t"; + escape_len = 2; + break; + case '\f': + escape = "\\f"; + escape_len = 2; + break; + case '\b': + escape = "\\b"; + escape_len = 2; + break; + default: + unicode_escape(buf, (UTF16) *p); + escape = buf; + escape_len = 6; + break; + } + } else { + switch (c) { + case '\\': + escape = "\\\\"; + escape_len = 2; + break; + case '"': + escape = "\\\""; + escape_len = 2; + break; + default: + { + unsigned short clen = 1; + if (!ascii_only) { + clen += trailingBytesForUTF8[c]; + if (end + clen > len) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8((UTF8 *) p, clen)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + } + end += clen; + } + continue; + break; + } + } + fbuffer_append(buffer, ptr + start, end - start); + fbuffer_append(buffer, escape, escape_len); + start = ++end; + escape = NULL; + } + fbuffer_append(buffer, ptr + start, end - start); +} + +static char *fstrndup(const char *ptr, unsigned long len) { + char *result; + if (len <= 0) return NULL; + result = ALLOC_N(char, len); + memcpy(result, ptr, len); + return result; +} + +/* + * Document-module: JSON::Ext::Generator + * + * This is the JSON generator implemented as a C extension. It can be + * configured to be used by setting + * + * JSON.generator = JSON::Ext::Generator + * + * with the method generator= in JSON. + * + */ + +/* Explanation of the following: that's the only way to not pollute + * standard library's docs with GeneratorMethods:: which + * are uninformative and take a large place in a list of classes + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Array + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Bignum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::FalseClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Fixnum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Float + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Hash + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Integer + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::NilClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Object + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String::Extend + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::TrueClass + * :nodoc: + */ + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON object, that is generated from + * this Hash instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(object); +} + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON array, that is generated from + * this Array instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self) { + GENERATE_JSON(array); +} + +#ifdef RUBY_INTEGER_UNIFICATION +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(integer); +} + +#else +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(fixnum); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(bignum); +} +#endif + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Float number. + */ +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(float); +} + +/* + * call-seq: String.included(modul) + * + * Extends _modul_ with the String::Extend module. + */ +static VALUE mString_included_s(VALUE self, VALUE modul) { + VALUE result = rb_funcall(modul, i_extend, 1, mString_Extend); + return result; +} + +/* + * call-seq: to_json(*) + * + * This string should be encoded with UTF-8 A call to this method + * returns a JSON string encoded with UTF16 big endian characters as + * \u????. + */ +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(string); +} + +/* + * call-seq: to_json_raw_object() + * + * This method creates a raw object hash, that can be nested into + * other data structures and will be generated as a raw string. This + * method should be used, if you want to convert raw strings to JSON + * instead of UTF-8 strings, e. g. binary data. + */ +static VALUE mString_to_json_raw_object(VALUE self) +{ + VALUE ary; + VALUE result = rb_hash_new(); + rb_hash_aset(result, rb_funcall(mJSON, i_create_id, 0), rb_class_name(rb_obj_class(self))); + ary = rb_funcall(self, i_unpack, 1, rb_str_new2("C*")); + rb_hash_aset(result, rb_str_new2("raw"), ary); + return result; +} + +/* + * call-seq: to_json_raw(*args) + * + * This method creates a JSON text from the result of a call to + * to_json_raw_object of this String. + */ +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self) +{ + VALUE obj = mString_to_json_raw_object(self); + Check_Type(obj, T_HASH); + return mHash_to_json(argc, argv, obj); +} + +/* + * call-seq: json_create(o) + * + * Raw Strings are JSON Objects (the raw bytes are stored in an array for the + * key "raw"). The Ruby String can be created by this module method. + */ +static VALUE mString_Extend_json_create(VALUE self, VALUE o) +{ + VALUE ary; + Check_Type(o, T_HASH); + ary = rb_hash_aref(o, rb_str_new2("raw")); + return rb_funcall(ary, i_pack, 1, rb_str_new2("C*")); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for true: 'true'. + */ +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(true); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for false: 'false'. + */ +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(false); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for nil: 'null'. + */ +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(null); +} + +/* + * call-seq: to_json(*) + * + * Converts this object to a string (calling #to_s), converts + * it to a JSON string, and returns the result. This is a fallback, if no + * special method #to_json was defined for some object. + */ +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self) +{ + VALUE state; + VALUE string = rb_funcall(self, i_to_s, 0); + rb_scan_args(argc, argv, "01", &state); + Check_Type(string, T_STRING); + state = cState_from_state_s(cState, state); + return cState_partial_generate(state, string); +} + +static void State_free(void *ptr) +{ + JSON_Generator_State *state = ptr; + if (state->indent) ruby_xfree(state->indent); + if (state->space) ruby_xfree(state->space); + if (state->space_before) ruby_xfree(state->space_before); + if (state->object_nl) ruby_xfree(state->object_nl); + if (state->array_nl) ruby_xfree(state->array_nl); + if (state->array_delim) fbuffer_free(state->array_delim); + if (state->object_delim) fbuffer_free(state->object_delim); + if (state->object_delim2) fbuffer_free(state->object_delim2); + ruby_xfree(state); +} + +static size_t State_memsize(const void *ptr) +{ + const JSON_Generator_State *state = ptr; + size_t size = sizeof(*state); + if (state->indent) size += state->indent_len + 1; + if (state->space) size += state->space_len + 1; + if (state->space_before) size += state->space_before_len + 1; + if (state->object_nl) size += state->object_nl_len + 1; + if (state->array_nl) size += state->array_nl_len + 1; + if (state->array_delim) size += FBUFFER_CAPA(state->array_delim); + if (state->object_delim) size += FBUFFER_CAPA(state->object_delim); + if (state->object_delim2) size += FBUFFER_CAPA(state->object_delim2); + return size; +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Generator_State_type = { + "JSON/Generator/State", + {NULL, State_free, State_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cState_s_allocate(VALUE klass) +{ + JSON_Generator_State *state; + return TypedData_Make_Struct(klass, JSON_Generator_State, + &JSON_Generator_State_type, state); +} + +/* + * call-seq: configure(opts) + * + * Configure this State instance with the Hash _opts_, and return + * itself. + */ +static VALUE cState_configure(VALUE self, VALUE opts) +{ + VALUE tmp; + GET_STATE(self); + tmp = rb_check_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(tmp)) tmp = rb_convert_type(opts, T_HASH, "Hash", "to_h"); + opts = tmp; + tmp = rb_hash_aref(opts, ID2SYM(i_indent)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->indent = fstrndup(RSTRING_PTR(tmp), len + 1); + state->indent_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space_before)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space_before = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_before_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_array_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->array_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->array_nl_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_object_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->object_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->object_nl_len = len; + } + tmp = ID2SYM(i_max_nesting); + state->max_nesting = 100; + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + state->max_nesting = FIX2LONG(max_nesting); + } else { + state->max_nesting = 0; + } + } + tmp = ID2SYM(i_depth); + state->depth = 0; + if (option_given_p(opts, tmp)) { + VALUE depth = rb_hash_aref(opts, tmp); + if (RTEST(depth)) { + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + } else { + state->depth = 0; + } + } + tmp = ID2SYM(i_buffer_initial_length); + if (option_given_p(opts, tmp)) { + VALUE buffer_initial_length = rb_hash_aref(opts, tmp); + if (RTEST(buffer_initial_length)) { + long initial_length; + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) state->buffer_initial_length = initial_length; + } + } + tmp = rb_hash_aref(opts, ID2SYM(i_allow_nan)); + state->allow_nan = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_ascii_only)); + state->ascii_only = RTEST(tmp); + return self; +} + +static void set_state_ivars(VALUE hash, VALUE state) +{ + VALUE ivars = rb_obj_instance_variables(state); + int i = 0; + for (i = 0; i < RARRAY_LEN(ivars); i++) { + VALUE key = rb_funcall(rb_ary_entry(ivars, i), i_to_s, 0); + long key_len = RSTRING_LEN(key); + VALUE value = rb_iv_get(state, StringValueCStr(key)); + rb_hash_aset(hash, rb_str_intern(rb_str_substr(key, 1, key_len - 1)), value); + } +} + +/* + * call-seq: to_h + * + * Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + */ +static VALUE cState_to_h(VALUE self) +{ + VALUE result = rb_hash_new(); + GET_STATE(self); + set_state_ivars(result, self); + rb_hash_aset(result, ID2SYM(i_indent), rb_str_new(state->indent, state->indent_len)); + rb_hash_aset(result, ID2SYM(i_space), rb_str_new(state->space, state->space_len)); + rb_hash_aset(result, ID2SYM(i_space_before), rb_str_new(state->space_before, state->space_before_len)); + rb_hash_aset(result, ID2SYM(i_object_nl), rb_str_new(state->object_nl, state->object_nl_len)); + rb_hash_aset(result, ID2SYM(i_array_nl), rb_str_new(state->array_nl, state->array_nl_len)); + rb_hash_aset(result, ID2SYM(i_allow_nan), state->allow_nan ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_ascii_only), state->ascii_only ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_max_nesting), LONG2FIX(state->max_nesting)); + rb_hash_aset(result, ID2SYM(i_depth), LONG2FIX(state->depth)); + rb_hash_aset(result, ID2SYM(i_buffer_initial_length), LONG2FIX(state->buffer_initial_length)); + return result; +} + +/* +* call-seq: [](name) +* +* Returns the value returned by method +name+. +*/ +static VALUE cState_aref(VALUE self, VALUE name) +{ + name = rb_funcall(name, i_to_s, 0); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) { + return rb_funcall(self, i_send, 1, name); + } else { + return rb_attr_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name))); + } +} + +/* +* call-seq: []=(name, value) +* +* Sets the attribute name to value. +*/ +static VALUE cState_aset(VALUE self, VALUE name, VALUE value) +{ + VALUE name_writer; + + name = rb_funcall(name, i_to_s, 0); + name_writer = rb_str_cat2(rb_str_dup(name), "="); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name_writer))) { + return rb_funcall(self, i_send, 2, name_writer, value); + } else { + rb_ivar_set(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)), value); + } + return Qnil; +} + +struct hash_foreach_arg { + FBuffer *buffer; + JSON_Generator_State *state; + VALUE Vstate; + int iter; +}; + +static int +json_object_i(VALUE key, VALUE val, VALUE _arg) +{ + struct hash_foreach_arg *arg = (struct hash_foreach_arg *)_arg; + FBuffer *buffer = arg->buffer; + JSON_Generator_State *state = arg->state; + VALUE Vstate = arg->Vstate; + + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + char *delim = FBUFFER_PTR(state->object_delim); + long delim_len = FBUFFER_LEN(state->object_delim); + char *delim2 = FBUFFER_PTR(state->object_delim2); + long delim2_len = FBUFFER_LEN(state->object_delim2); + long depth = state->depth; + int j; + VALUE klass, key_to_s; + + if (arg->iter > 0) fbuffer_append(buffer, delim, delim_len); + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + } + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + + klass = CLASS_OF(key); + if (klass == rb_cString) { + key_to_s = key; + } else if (klass == rb_cSymbol) { + key_to_s = rb_id2str(SYM2ID(key)); + } else { + key_to_s = rb_funcall(key, i_to_s, 0); + } + Check_Type(key_to_s, T_STRING); + generate_json(buffer, Vstate, state, key_to_s); + fbuffer_append(buffer, delim2, delim2_len); + generate_json(buffer, Vstate, state, val); + + arg->iter++; + return ST_CONTINUE; +} + +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + long depth = ++state->depth; + int j; + struct hash_foreach_arg arg; + + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '{'); + + arg.buffer = buffer; + arg.state = state; + arg.Vstate = Vstate; + arg.iter = 0; + rb_hash_foreach(obj, json_object_i, (VALUE)&arg); + + depth = --state->depth; + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, '}'); +} + +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *array_nl = state->array_nl; + long array_nl_len = state->array_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + char *delim = FBUFFER_PTR(state->array_delim); + long delim_len = FBUFFER_LEN(state->array_delim); + long depth = ++state->depth; + int i, j; + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '['); + if (array_nl) fbuffer_append(buffer, array_nl, array_nl_len); + for(i = 0; i < RARRAY_LEN(obj); i++) { + if (i > 0) fbuffer_append(buffer, delim, delim_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + generate_json(buffer, Vstate, state, rb_ary_entry(obj, i)); + } + state->depth = --depth; + if (array_nl) { + fbuffer_append(buffer, array_nl, array_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, ']'); +} + +#ifdef HAVE_RUBY_ENCODING_H +static int enc_utf8_compatible_p(rb_encoding *enc) +{ + if (enc == rb_usascii_encoding()) return 1; + if (enc == rb_utf8_encoding()) return 1; + return 0; +} +#endif + +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_char(buffer, '"'); +#ifdef HAVE_RUBY_ENCODING_H + if (!enc_utf8_compatible_p(rb_enc_get(obj))) { + obj = rb_str_encode(obj, CEncoding_UTF_8, 0, Qnil); + } +#endif + if (state->ascii_only) { + convert_UTF8_to_JSON_ASCII(buffer, obj); + } else { + convert_UTF8_to_JSON(buffer, obj); + } + fbuffer_append_char(buffer, '"'); +} + +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "null", 4); +} + +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "false", 5); +} + +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "true", 4); +} + +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_long(buffer, FIX2LONG(obj)); +} + +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp = rb_funcall(obj, i_to_s, 0); + fbuffer_append_str(buffer, tmp); +} + +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + if (FIXNUM_P(obj)) + generate_json_fixnum(buffer, Vstate, state, obj); + else + generate_json_bignum(buffer, Vstate, state, obj); +} +#endif +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + double value = RFLOAT_VALUE(obj); + char allow_nan = state->allow_nan; + VALUE tmp = rb_funcall(obj, i_to_s, 0); + if (!allow_nan) { + if (isinf(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } else if (isnan(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } + } + fbuffer_append_str(buffer, tmp); +} + +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp; + VALUE klass = CLASS_OF(obj); + if (klass == rb_cHash) { + generate_json_object(buffer, Vstate, state, obj); + } else if (klass == rb_cArray) { + generate_json_array(buffer, Vstate, state, obj); + } else if (klass == rb_cString) { + generate_json_string(buffer, Vstate, state, obj); + } else if (obj == Qnil) { + generate_json_null(buffer, Vstate, state, obj); + } else if (obj == Qfalse) { + generate_json_false(buffer, Vstate, state, obj); + } else if (obj == Qtrue) { + generate_json_true(buffer, Vstate, state, obj); + } else if (FIXNUM_P(obj)) { + generate_json_fixnum(buffer, Vstate, state, obj); + } else if (RB_TYPE_P(obj, T_BIGNUM)) { + generate_json_bignum(buffer, Vstate, state, obj); + } else if (klass == rb_cFloat) { + generate_json_float(buffer, Vstate, state, obj); + } else if (rb_respond_to(obj, i_to_json)) { + tmp = rb_funcall(obj, i_to_json, 1, Vstate); + Check_Type(tmp, T_STRING); + fbuffer_append_str(buffer, tmp); + } else { + tmp = rb_funcall(obj, i_to_s, 0); + Check_Type(tmp, T_STRING); + generate_json_string(buffer, Vstate, state, tmp); + } +} + +static FBuffer *cState_prepare_buffer(VALUE self) +{ + FBuffer *buffer; + GET_STATE(self); + buffer = fbuffer_alloc(state->buffer_initial_length); + + if (state->object_delim) { + fbuffer_clear(state->object_delim); + } else { + state->object_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->object_delim, ','); + if (state->object_delim2) { + fbuffer_clear(state->object_delim2); + } else { + state->object_delim2 = fbuffer_alloc(16); + } + if (state->space_before) fbuffer_append(state->object_delim2, state->space_before, state->space_before_len); + fbuffer_append_char(state->object_delim2, ':'); + if (state->space) fbuffer_append(state->object_delim2, state->space, state->space_len); + + if (state->array_delim) { + fbuffer_clear(state->array_delim); + } else { + state->array_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->array_delim, ','); + if (state->array_nl) fbuffer_append(state->array_delim, state->array_nl, state->array_nl_len); + return buffer; +} + +static VALUE cState_partial_generate(VALUE self, VALUE obj) +{ + FBuffer *buffer = cState_prepare_buffer(self); + GET_STATE(self); + generate_json(buffer, self, state, obj); + return fbuffer_to_s(buffer); +} + +/* + * call-seq: generate(obj) + * + * Generates a valid JSON document from object +obj+ and returns the + * result. If no valid JSON document can be created this method raises a + * GeneratorError exception. + */ +static VALUE cState_generate(VALUE self, VALUE obj) +{ + VALUE result = cState_partial_generate(self, obj); + GET_STATE(self); + (void)state; + return result; +} + +/* + * call-seq: new(opts = {}) + * + * Instantiates a new State object, configured by _opts_. + * + * _opts_ can have the following keys: + * + * * *indent*: a string used to indent levels (default: ''), + * * *space*: a string that is put after, a : or , delimiter (default: ''), + * * *space_before*: a string that is put before a : pair delimiter (default: ''), + * * *object_nl*: a string that is put at the end of a JSON object (default: ''), + * * *array_nl*: a string that is put at the end of a JSON array (default: ''), + * * *allow_nan*: true if NaN, Infinity, and -Infinity should be + * generated, otherwise an exception is thrown, if these values are + * encountered. This options defaults to false. + * * *ascii_only*: true if only ASCII characters should be generated. This + * option defaults to false. + * * *buffer_initial_length*: sets the initial length of the generator's + * internal buffer. + */ +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE opts; + GET_STATE(self); + state->max_nesting = 100; + state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + rb_scan_args(argc, argv, "01", &opts); + if (!NIL_P(opts)) cState_configure(self, opts); + return self; +} + +/* + * call-seq: initialize_copy(orig) + * + * Initializes this object from orig if it can be duplicated/cloned and returns + * it. +*/ +static VALUE cState_init_copy(VALUE obj, VALUE orig) +{ + JSON_Generator_State *objState, *origState; + + if (obj == orig) return obj; + GET_STATE_TO(obj, objState); + GET_STATE_TO(orig, origState); + if (!objState) rb_raise(rb_eArgError, "unallocated JSON::State"); + + MEMCPY(objState, origState, JSON_Generator_State, 1); + objState->indent = fstrndup(origState->indent, origState->indent_len); + objState->space = fstrndup(origState->space, origState->space_len); + objState->space_before = fstrndup(origState->space_before, origState->space_before_len); + objState->object_nl = fstrndup(origState->object_nl, origState->object_nl_len); + objState->array_nl = fstrndup(origState->array_nl, origState->array_nl_len); + if (origState->array_delim) objState->array_delim = fbuffer_dup(origState->array_delim); + if (origState->object_delim) objState->object_delim = fbuffer_dup(origState->object_delim); + if (origState->object_delim2) objState->object_delim2 = fbuffer_dup(origState->object_delim2); + return obj; +} + +/* + * call-seq: from_state(opts) + * + * Creates a State object from _opts_, which ought to be Hash to create a + * new State instance configured by _opts_, something else to create an + * unconfigured instance. If _opts_ is a State object, it is just returned. + */ +static VALUE cState_from_state_s(VALUE self, VALUE opts) +{ + if (rb_obj_is_kind_of(opts, self)) { + return opts; + } else if (rb_obj_is_kind_of(opts, rb_cHash)) { + return rb_funcall(self, i_new, 1, opts); + } else { + VALUE prototype = rb_const_get(mJSON, i_SAFE_STATE_PROTOTYPE); + return rb_funcall(prototype, i_dup, 0); + } +} + +/* + * call-seq: indent() + * + * Returns the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent(VALUE self) +{ + GET_STATE(self); + return state->indent ? rb_str_new(state->indent, state->indent_len) : rb_str_new2(""); +} + +/* + * call-seq: indent=(indent) + * + * Sets the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent_set(VALUE self, VALUE indent) +{ + unsigned long len; + GET_STATE(self); + Check_Type(indent, T_STRING); + len = RSTRING_LEN(indent); + if (len == 0) { + if (state->indent) { + ruby_xfree(state->indent); + state->indent = NULL; + state->indent_len = 0; + } + } else { + if (state->indent) ruby_xfree(state->indent); + state->indent = fstrndup(RSTRING_PTR(indent), len); + state->indent_len = len; + } + return Qnil; +} + +/* + * call-seq: space() + * + * Returns the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space(VALUE self) +{ + GET_STATE(self); + return state->space ? rb_str_new(state->space, state->space_len) : rb_str_new2(""); +} + +/* + * call-seq: space=(space) + * + * Sets _space_ to the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space_set(VALUE self, VALUE space) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space, T_STRING); + len = RSTRING_LEN(space); + if (len == 0) { + if (state->space) { + ruby_xfree(state->space); + state->space = NULL; + state->space_len = 0; + } + } else { + if (state->space) ruby_xfree(state->space); + state->space = fstrndup(RSTRING_PTR(space), len); + state->space_len = len; + } + return Qnil; +} + +/* + * call-seq: space_before() + * + * Returns the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before(VALUE self) +{ + GET_STATE(self); + return state->space_before ? rb_str_new(state->space_before, state->space_before_len) : rb_str_new2(""); +} + +/* + * call-seq: space_before=(space_before) + * + * Sets the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before_set(VALUE self, VALUE space_before) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space_before, T_STRING); + len = RSTRING_LEN(space_before); + if (len == 0) { + if (state->space_before) { + ruby_xfree(state->space_before); + state->space_before = NULL; + state->space_before_len = 0; + } + } else { + if (state->space_before) ruby_xfree(state->space_before); + state->space_before = fstrndup(RSTRING_PTR(space_before), len); + state->space_before_len = len; + } + return Qnil; +} + +/* + * call-seq: object_nl() + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl(VALUE self) +{ + GET_STATE(self); + return state->object_nl ? rb_str_new(state->object_nl, state->object_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: object_nl=(object_nl) + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(object_nl, T_STRING); + len = RSTRING_LEN(object_nl); + if (len == 0) { + if (state->object_nl) { + ruby_xfree(state->object_nl); + state->object_nl = NULL; + } + } else { + if (state->object_nl) ruby_xfree(state->object_nl); + state->object_nl = fstrndup(RSTRING_PTR(object_nl), len); + state->object_nl_len = len; + } + return Qnil; +} + +/* + * call-seq: array_nl() + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl(VALUE self) +{ + GET_STATE(self); + return state->array_nl ? rb_str_new(state->array_nl, state->array_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: array_nl=(array_nl) + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(array_nl, T_STRING); + len = RSTRING_LEN(array_nl); + if (len == 0) { + if (state->array_nl) { + ruby_xfree(state->array_nl); + state->array_nl = NULL; + } + } else { + if (state->array_nl) ruby_xfree(state->array_nl); + state->array_nl = fstrndup(RSTRING_PTR(array_nl), len); + state->array_nl_len = len; + } + return Qnil; +} + + +/* +* call-seq: check_circular? +* +* Returns true, if circular data structures should be checked, +* otherwise returns false. +*/ +static VALUE cState_check_circular_p(VALUE self) +{ + GET_STATE(self); + return state->max_nesting ? Qtrue : Qfalse; +} + +/* + * call-seq: max_nesting + * + * This integer returns the maximum level of data structure nesting in + * the generated JSON, max_nesting = 0 if no maximum is checked. + */ +static VALUE cState_max_nesting(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->max_nesting); +} + +/* + * call-seq: max_nesting=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_max_nesting_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + return state->max_nesting = FIX2LONG(depth); +} + +/* + * call-seq: allow_nan? + * + * Returns true, if NaN, Infinity, and -Infinity should be generated, otherwise + * returns false. + */ +static VALUE cState_allow_nan_p(VALUE self) +{ + GET_STATE(self); + return state->allow_nan ? Qtrue : Qfalse; +} + +/* + * call-seq: ascii_only? + * + * Returns true, if only ASCII characters should be generated. Otherwise + * returns false. + */ +static VALUE cState_ascii_only_p(VALUE self) +{ + GET_STATE(self); + return state->ascii_only ? Qtrue : Qfalse; +} + +/* + * call-seq: depth + * + * This integer returns the current depth of data structure nesting. + */ +static VALUE cState_depth(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->depth); +} + +/* + * call-seq: depth=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_depth_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + return Qnil; +} + +/* + * call-seq: buffer_initial_length + * + * This integer returns the current initial length of the buffer. + */ +static VALUE cState_buffer_initial_length(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->buffer_initial_length); +} + +/* + * call-seq: buffer_initial_length=(length) + * + * This sets the initial length of the buffer to +length+, if +length+ > 0, + * otherwise its value isn't changed. + */ +static VALUE cState_buffer_initial_length_set(VALUE self, VALUE buffer_initial_length) +{ + long initial_length; + GET_STATE(self); + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) { + state->buffer_initial_length = initial_length; + } + return Qnil; +} + +/* + * + */ +void Init_generator(void) +{ +#undef rb_intern + rb_require("json/common"); + + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + mGenerator = rb_define_module_under(mExt, "Generator"); + + eGeneratorError = rb_path2class("JSON::GeneratorError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eGeneratorError); + rb_gc_register_mark_object(eNestingError); + + cState = rb_define_class_under(mGenerator, "State", rb_cObject); + rb_define_alloc_func(cState, cState_s_allocate); + rb_define_singleton_method(cState, "from_state", cState_from_state_s, 1); + rb_define_method(cState, "initialize", cState_initialize, -1); + rb_define_method(cState, "initialize_copy", cState_init_copy, 1); + rb_define_method(cState, "indent", cState_indent, 0); + rb_define_method(cState, "indent=", cState_indent_set, 1); + rb_define_method(cState, "space", cState_space, 0); + rb_define_method(cState, "space=", cState_space_set, 1); + rb_define_method(cState, "space_before", cState_space_before, 0); + rb_define_method(cState, "space_before=", cState_space_before_set, 1); + rb_define_method(cState, "object_nl", cState_object_nl, 0); + rb_define_method(cState, "object_nl=", cState_object_nl_set, 1); + rb_define_method(cState, "array_nl", cState_array_nl, 0); + rb_define_method(cState, "array_nl=", cState_array_nl_set, 1); + rb_define_method(cState, "max_nesting", cState_max_nesting, 0); + rb_define_method(cState, "max_nesting=", cState_max_nesting_set, 1); + rb_define_method(cState, "check_circular?", cState_check_circular_p, 0); + rb_define_method(cState, "allow_nan?", cState_allow_nan_p, 0); + rb_define_method(cState, "ascii_only?", cState_ascii_only_p, 0); + rb_define_method(cState, "depth", cState_depth, 0); + rb_define_method(cState, "depth=", cState_depth_set, 1); + rb_define_method(cState, "buffer_initial_length", cState_buffer_initial_length, 0); + rb_define_method(cState, "buffer_initial_length=", cState_buffer_initial_length_set, 1); + rb_define_method(cState, "configure", cState_configure, 1); + rb_define_alias(cState, "merge", "configure"); + rb_define_method(cState, "to_h", cState_to_h, 0); + rb_define_alias(cState, "to_hash", "to_h"); + rb_define_method(cState, "[]", cState_aref, 1); + rb_define_method(cState, "[]=", cState_aset, 2); + rb_define_method(cState, "generate", cState_generate, 1); + + mGeneratorMethods = rb_define_module_under(mGenerator, "GeneratorMethods"); + mObject = rb_define_module_under(mGeneratorMethods, "Object"); + rb_define_method(mObject, "to_json", mObject_to_json, -1); + mHash = rb_define_module_under(mGeneratorMethods, "Hash"); + rb_define_method(mHash, "to_json", mHash_to_json, -1); + mArray = rb_define_module_under(mGeneratorMethods, "Array"); + rb_define_method(mArray, "to_json", mArray_to_json, -1); +#ifdef RUBY_INTEGER_UNIFICATION + mInteger = rb_define_module_under(mGeneratorMethods, "Integer"); + rb_define_method(mInteger, "to_json", mInteger_to_json, -1); +#else + mFixnum = rb_define_module_under(mGeneratorMethods, "Fixnum"); + rb_define_method(mFixnum, "to_json", mFixnum_to_json, -1); + mBignum = rb_define_module_under(mGeneratorMethods, "Bignum"); + rb_define_method(mBignum, "to_json", mBignum_to_json, -1); +#endif + mFloat = rb_define_module_under(mGeneratorMethods, "Float"); + rb_define_method(mFloat, "to_json", mFloat_to_json, -1); + mString = rb_define_module_under(mGeneratorMethods, "String"); + rb_define_singleton_method(mString, "included", mString_included_s, 1); + rb_define_method(mString, "to_json", mString_to_json, -1); + rb_define_method(mString, "to_json_raw", mString_to_json_raw, -1); + rb_define_method(mString, "to_json_raw_object", mString_to_json_raw_object, 0); + mString_Extend = rb_define_module_under(mString, "Extend"); + rb_define_method(mString_Extend, "json_create", mString_Extend_json_create, 1); + mTrueClass = rb_define_module_under(mGeneratorMethods, "TrueClass"); + rb_define_method(mTrueClass, "to_json", mTrueClass_to_json, -1); + mFalseClass = rb_define_module_under(mGeneratorMethods, "FalseClass"); + rb_define_method(mFalseClass, "to_json", mFalseClass_to_json, -1); + mNilClass = rb_define_module_under(mGeneratorMethods, "NilClass"); + rb_define_method(mNilClass, "to_json", mNilClass_to_json, -1); + + i_to_s = rb_intern("to_s"); + i_to_json = rb_intern("to_json"); + i_new = rb_intern("new"); + i_indent = rb_intern("indent"); + i_space = rb_intern("space"); + i_space_before = rb_intern("space_before"); + i_object_nl = rb_intern("object_nl"); + i_array_nl = rb_intern("array_nl"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_ascii_only = rb_intern("ascii_only"); + i_depth = rb_intern("depth"); + i_buffer_initial_length = rb_intern("buffer_initial_length"); + i_pack = rb_intern("pack"); + i_unpack = rb_intern("unpack"); + i_create_id = rb_intern("create_id"); + i_extend = rb_intern("extend"); + i_key_p = rb_intern("key?"); + i_aref = rb_intern("[]"); + i_send = rb_intern("__send__"); + i_respond_to_p = rb_intern("respond_to?"); + i_match = rb_intern("match"); + i_keys = rb_intern("keys"); + i_dup = rb_intern("dup"); +#ifdef HAVE_RUBY_ENCODING_H + CEncoding_UTF_8 = rb_funcall(rb_path2class("Encoding"), rb_intern("find"), 1, rb_str_new2("utf-8")); + i_encoding = rb_intern("encoding"); + i_encode = rb_intern("encode"); +#endif + i_SAFE_STATE_PROTOTYPE = rb_intern("SAFE_STATE_PROTOTYPE"); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.h new file mode 100644 index 0000000..c367a62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/generator/generator.h @@ -0,0 +1,171 @@ +#ifndef _GENERATOR_H_ +#define _GENERATOR_H_ + +#include +#include + +#include "ruby.h" + +#ifdef HAVE_RUBY_RE_H +#include "ruby/re.h" +#else +#include "re.h" +#endif + +#ifndef rb_intern_str +#define rb_intern_str(string) SYM2ID(rb_str_intern(string)) +#endif + +#ifndef rb_obj_instance_variables +#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0) +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode definitions */ + +#define UNI_STRICT_CONVERSION 1 + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length); +static void unicode_escape(char *buf, UTF16 character); +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character); +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string); +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string); +static char *fstrndup(const char *ptr, unsigned long len); + +/* ruby api and some helpers */ + +typedef struct JSON_Generator_StateStruct { + char *indent; + long indent_len; + char *space; + long space_len; + char *space_before; + long space_before_len; + char *object_nl; + long object_nl_len; + char *array_nl; + long array_nl_len; + FBuffer *array_delim; + FBuffer *object_delim; + FBuffer *object_delim2; + long max_nesting; + char allow_nan; + char ascii_only; + long depth; + long buffer_initial_length; +} JSON_Generator_State; + +#define GET_STATE_TO(self, state) \ + TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state) + +#define GET_STATE(self) \ + JSON_Generator_State *state; \ + GET_STATE_TO(self, state) + +#define GENERATE_JSON(type) \ + FBuffer *buffer; \ + VALUE Vstate; \ + JSON_Generator_State *state; \ + \ + rb_scan_args(argc, argv, "01", &Vstate); \ + Vstate = cState_from_state_s(cState, Vstate); \ + TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \ + buffer = cState_prepare_buffer(Vstate); \ + generate_json_##type(buffer, Vstate, state, self); \ + return fbuffer_to_s(buffer) + +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self); +#ifdef RUBY_INTEGER_UNIFICATION +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self); +#else +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self); +#endif +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_included_s(VALUE self, VALUE modul); +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_to_json_raw_object(VALUE self); +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self); +static VALUE mString_Extend_json_create(VALUE self, VALUE o); +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self); +static void State_free(void *state); +static VALUE cState_s_allocate(VALUE klass); +static VALUE cState_configure(VALUE self, VALUE opts); +static VALUE cState_to_h(VALUE self); +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#endif +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static VALUE cState_partial_generate(VALUE self, VALUE obj); +static VALUE cState_generate(VALUE self, VALUE obj); +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cState_from_state_s(VALUE self, VALUE opts); +static VALUE cState_indent(VALUE self); +static VALUE cState_indent_set(VALUE self, VALUE indent); +static VALUE cState_space(VALUE self); +static VALUE cState_space_set(VALUE self, VALUE space); +static VALUE cState_space_before(VALUE self); +static VALUE cState_space_before_set(VALUE self, VALUE space_before); +static VALUE cState_object_nl(VALUE self); +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl); +static VALUE cState_array_nl(VALUE self); +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl); +static VALUE cState_max_nesting(VALUE self); +static VALUE cState_max_nesting_set(VALUE self, VALUE depth); +static VALUE cState_allow_nan_p(VALUE self); +static VALUE cState_ascii_only_p(VALUE self); +static VALUE cState_depth(VALUE self); +static VALUE cState_depth_set(VALUE self, VALUE depth); +static FBuffer *cState_prepare_buffer(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Generator_State_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json) +#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend new file mode 100644 index 0000000..498ffa9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend @@ -0,0 +1 @@ +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend 2 b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend 2 new file mode 100644 index 0000000..498ffa9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/depend 2 @@ -0,0 +1 @@ +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf 2.rb new file mode 100644 index 0000000..f7360d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf 2.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: false +require 'mkmf' + +have_func("rb_enc_raise", "ruby.h") + +create_makefile 'json/ext/parser' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb new file mode 100644 index 0000000..f7360d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: false +require 'mkmf' + +have_func("rb_enc_raise", "ruby.h") + +create_makefile 'json/ext/parser' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.c b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.c new file mode 100644 index 0000000..6f0e4ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.c @@ -0,0 +1,2137 @@ +/* This file is automatically generated from parser.rl by using ragel */ +#line 1 "parser.rl" +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + + +#line 126 "parser.rl" + + + +#line 108 "parser.c" +enum {JSON_object_start = 1}; +enum {JSON_object_first_final = 27}; +enum {JSON_object_error = 0}; + +enum {JSON_object_en_main = 1}; + + +#line 168 "parser.rl" + + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + +#line 132 "parser.c" + { + cs = JSON_object_start; + } + +#line 183 "parser.rl" + +#line 139 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 123 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 47: goto st23; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st2; + goto st0; +tr2: +#line 150 "parser.rl" + { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, p, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { p--; {p++; cs = 3; goto _out;} } else {p = (( np))-1;} + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 180 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 47: goto st4; + case 58: goto st8; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st5; + case 47: goto st7; + } + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 42 ) + goto st6; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st3; + } + goto st5; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 10 ) + goto st3; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 13: goto st8; + case 32: goto st8; + case 34: goto tr11; + case 45: goto tr11; + case 47: goto st19; + case 73: goto tr11; + case 78: goto tr11; + case 91: goto tr11; + case 102: goto tr11; + case 110: goto tr11; + case 116: goto tr11; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr11; + } else if ( (*p) >= 9 ) + goto st8; + goto st0; +tr11: +#line 134 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 9; goto _out;} + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + {p = (( np))-1;} + } + } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 268 "parser.c" + switch( (*p) ) { + case 13: goto st9; + case 32: goto st9; + case 44: goto st10; + case 47: goto st15; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st9; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 34: goto tr2; + case 47: goto st11; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st12; + case 47: goto st14; + } + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 42 ) + goto st13; + goto st12; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st13; + case 47: goto st10; + } + goto st12; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 10 ) + goto st10; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st16; + case 47: goto st18; + } + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 42 ) + goto st17; + goto st16; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + switch( (*p) ) { + case 42: goto st17; + case 47: goto st9; + } + goto st16; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 10 ) + goto st9; + goto st18; +tr4: +#line 158 "parser.rl" + { p--; {p++; cs = 27; goto _out;} } + goto st27; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: +#line 364 "parser.c" + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + switch( (*p) ) { + case 42: goto st20; + case 47: goto st22; + } + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 42 ) + goto st21; + goto st20; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + switch( (*p) ) { + case 42: goto st21; + case 47: goto st8; + } + goto st20; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 10 ) + goto st8; + goto st22; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + switch( (*p) ) { + case 42: goto st24; + case 47: goto st26; + } + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 42 ) + goto st25; + goto st24; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + switch( (*p) ) { + case 42: goto st25; + case 47: goto st2; + } + goto st24; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 10 ) + goto st2; + goto st26; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 184 "parser.rl" + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 487 "parser.c" +enum {JSON_value_start = 1}; +enum {JSON_value_first_final = 29}; +enum {JSON_value_error = 0}; + +enum {JSON_value_en_main = 1}; + + +#line 284 "parser.rl" + + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + +#line 503 "parser.c" + { + cs = JSON_value_start; + } + +#line 291 "parser.rl" + +#line 510 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr3; + case 47: goto st6; + case 73: goto st10; + case 78: goto st17; + case 91: goto tr7; + case 102: goto st19; + case 110: goto st23; + case 116: goto st26; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr3; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 236 "parser.rl" + { + char *np = JSON_parse_string(json, p, pe, result); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr3: +#line 241 "parser.rl" + { + char *np; + if(pe > p + 8 && !strncmp(MinusInfinity, p, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + {p = (( p + 10))-1;} + p--; {p++; cs = 29; goto _out;} + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + np = JSON_parse_integer(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + p--; {p++; cs = 29; goto _out;} + } + goto st29; +tr7: +#line 259 "parser.rl" + { + char *np; + np = JSON_parse_array(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr11: +#line 265 "parser.rl" + { + char *np; + np = JSON_parse_object(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr25: +#line 229 "parser.rl" + { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + goto st29; +tr27: +#line 222 "parser.rl" + { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + goto st29; +tr31: +#line 216 "parser.rl" + { + *result = Qfalse; + } + goto st29; +tr34: +#line 213 "parser.rl" + { + *result = Qnil; + } + goto st29; +tr37: +#line 219 "parser.rl" + { + *result = Qtrue; + } + goto st29; +st29: + if ( ++p == pe ) + goto _test_eof29; +case 29: +#line 271 "parser.rl" + { p--; {p++; cs = 29; goto _out;} } +#line 630 "parser.c" + switch( (*p) ) { + case 13: goto st29; + case 32: goto st29; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st29; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st29; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st29; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 110 ) + goto st11; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + if ( (*p) == 102 ) + goto st12; + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 105 ) + goto st13; + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + if ( (*p) == 110 ) + goto st14; + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 105 ) + goto st15; + goto st0; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + if ( (*p) == 116 ) + goto st16; + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 121 ) + goto tr25; + goto st0; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + if ( (*p) == 97 ) + goto st18; + goto st0; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 78 ) + goto tr27; + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + if ( (*p) == 97 ) + goto st20; + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 108 ) + goto st21; + goto st0; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + if ( (*p) == 115 ) + goto st22; + goto st0; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 101 ) + goto tr31; + goto st0; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + if ( (*p) == 117 ) + goto st24; + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 108 ) + goto st25; + goto st0; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + if ( (*p) == 108 ) + goto tr34; + goto st0; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 114 ) + goto st27; + goto st0; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: + if ( (*p) == 117 ) + goto st28; + goto st0; +st28: + if ( ++p == pe ) + goto _test_eof28; +case 28: + if ( (*p) == 101 ) + goto tr37; + goto st0; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 292 "parser.rl" + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + + +#line 881 "parser.c" +enum {JSON_integer_start = 1}; +enum {JSON_integer_first_final = 3}; +enum {JSON_integer_error = 0}; + +enum {JSON_integer_en_main = 1}; + + +#line 308 "parser.rl" + + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 897 "parser.c" + { + cs = JSON_integer_start; + } + +#line 315 "parser.rl" + json->memo = p; + +#line 905 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st0; + goto tr4; +tr4: +#line 305 "parser.rl" + { p--; {p++; cs = 4; goto _out;} } + goto st4; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: +#line 946 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + goto tr4; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 317 "parser.rl" + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + + +#line 980 "parser.c" +enum {JSON_float_start = 1}; +enum {JSON_float_first_final = 8}; +enum {JSON_float_error = 0}; + +enum {JSON_float_en_main = 1}; + + +#line 342 "parser.rl" + + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 1009 "parser.c" + { + cs = JSON_float_start; + } + +#line 362 "parser.rl" + json->memo = p; + +#line 1017 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + goto st0; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 69: goto st5; + case 101: goto st5; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +tr9: +#line 336 "parser.rl" + { p--; {p++; cs = 9; goto _out;} } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 1082 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 43: goto st6; + case 45: goto st6; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 69: goto st0; + case 101: goto st0; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 364 "parser.rl" + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 1169 "parser.c" +enum {JSON_array_start = 1}; +enum {JSON_array_first_final = 17}; +enum {JSON_array_error = 0}; + +enum {JSON_array_en_main = 1}; + + +#line 417 "parser.rl" + + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + +#line 1191 "parser.c" + { + cs = JSON_array_start; + } + +#line 430 "parser.rl" + +#line 1198 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 91 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st13; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 93: goto tr4; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st2; + goto st0; +tr2: +#line 394 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 3; goto _out;} + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + {p = (( np))-1;} + } + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 1257 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 44: goto st4; + case 47: goto st9; + case 93: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 13: goto st4; + case 32: goto st4; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st5; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st4; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st8; + } + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) == 42 ) + goto st7; + goto st6; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st4; + } + goto st6; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + if ( (*p) == 10 ) + goto st4; + goto st8; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + switch( (*p) ) { + case 42: goto st10; + case 47: goto st12; + } + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 42 ) + goto st11; + goto st10; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st11; + case 47: goto st3; + } + goto st10; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 10 ) + goto st3; + goto st12; +tr4: +#line 409 "parser.rl" + { p--; {p++; cs = 17; goto _out;} } + goto st17; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: +#line 1364 "parser.c" + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st14; + case 47: goto st16; + } + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 42 ) + goto st15; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st15; + case 47: goto st2; + } + goto st14; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 10 ) + goto st2; + goto st16; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 431 "parser.rl" + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + + +#line 1509 "parser.c" +enum {JSON_string_start = 1}; +enum {JSON_string_first_final = 8}; +enum {JSON_string_error = 0}; + +enum {JSON_string_en_main = 1}; + + +#line 538 "parser.rl" + + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + +#line 1539 "parser.c" + { + cs = JSON_string_start; + } + +#line 559 "parser.rl" + json->memo = p; + +#line 1547 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 34 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 34: goto tr2; + case 92: goto st3; + } + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +tr2: +#line 524 "parser.rl" + { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + p--; + {p++; cs = 8; goto _out;} + } else { + FORCE_UTF8(*result); + {p = (( p + 1))-1;} + } + } +#line 535 "parser.rl" + { p--; {p++; cs = 8; goto _out;} } + goto st8; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: +#line 1590 "parser.c" + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 117 ) + goto st4; + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st5; + } else + goto st5; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st6; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st6; + } else + goto st6; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st7; + } else + goto st7; + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st2; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st2; + } else + goto st2; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 561 "parser.rl" + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + + +#line 1853 "parser.c" +enum {JSON_start = 1}; +enum {JSON_first_final = 10}; +enum {JSON_error = 0}; + +enum {JSON_en_main = 1}; + + +#line 761 "parser.rl" + + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + +#line 1878 "parser.c" + { + cs = JSON_start; + } + +#line 777 "parser.rl" + p = json->source; + pe = p + json->len; + +#line 1887 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st6; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 753 "parser.rl" + { + char *np = JSON_parse_value(json, p, pe, &result, 0); + if (np == NULL) { p--; {p++; cs = 10; goto _out;} } else {p = (( np))-1;} + } + goto st10; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: +#line 1931 "parser.c" + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st10; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st10; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 780 "parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.h new file mode 100644 index 0000000..e6cf779 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.h @@ -0,0 +1,91 @@ +#ifndef _PARSER_H_ +#define _PARSER_H_ + +#include "ruby.h" + +#ifndef HAVE_RUBY_RE_H +#include "re.h" +#endif + +#ifdef HAVE_RUBY_ST_H +#include "ruby/st.h" +#else +#include "st.h" +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode */ + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +typedef struct JSON_ParserStruct { + VALUE Vsource; + char *source; + long len; + char *memo; + VALUE create_id; + int max_nesting; + int allow_nan; + int parsing_name; + int symbolize_names; + VALUE object_class; + VALUE array_class; + VALUE decimal_class; + int create_additions; + VALUE match_string; + FBuffer *fbuffer; +} JSON_Parser; + +#define GET_PARSER \ + GET_PARSER_INIT; \ + if (!json->Vsource) rb_raise(rb_eTypeError, "uninitialized instance") +#define GET_PARSER_INIT \ + JSON_Parser *json; \ + TypedData_Get_Struct(self, JSON_Parser, &JSON_Parser_type, json) + +#define MinusInfinity "-Infinity" +#define EVIL 0x666 + +static UTF32 unescape_unicode(const unsigned char *p); +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch); +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd); +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result); +static VALUE convert_encoding(VALUE source); +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cParser_parse(VALUE self); +static void JSON_mark(void *json); +static void JSON_free(void *json); +static VALUE cJSON_parser_s_allocate(VALUE klass); +static VALUE cParser_source(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Parser_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, JSON_free, json) +#define TypedData_Get_Struct(self, JSON_Parser, ignore, json) Data_Get_Struct(self, JSON_Parser, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.rl b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.rl new file mode 100644 index 0000000..00a35bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser 2.rl @@ -0,0 +1,897 @@ +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + +%%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft\"\-\[\{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; +}%% + +%%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + fexec np; + } + } + + action parse_name { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, fpc, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + + pair = ignore* begin_name >parse_name ignore* name_separator ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object + (pair (next_pair)*)? ignore* + end_object + ) @exit; +}%% + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + %% write init; + %% write exec; + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + *result = Qnil; + } + action parse_false { + *result = Qfalse; + } + action parse_true { + *result = Qtrue; + } + action parse_nan { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + action parse_infinity { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + action parse_string { + char *np = JSON_parse_string(json, fpc, pe, result); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_number { + char *np; + if(pe > fpc + 8 && !strncmp(MinusInfinity, fpc, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + fexec p + 10; + fhold; fbreak; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, fpc, pe, result); + if (np != NULL) fexec np; + np = JSON_parse_integer(json, fpc, pe, result); + if (np != NULL) fexec np; + fhold; fbreak; + } + + action parse_array { + char *np; + np = JSON_parse_array(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_object { + char *np; + np = JSON_parse_object(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + +main := ignore* ( + Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) ignore* %*exit; +}%% + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + +%%{ + machine JSON_integer; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ('0' | [1-9][0-9]*) (^[0-9]? @exit); +}%% + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + +%%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ( + (('0' | [1-9][0-9]*) '.' [0-9]+ ([Ee] [+\-]?[0-9]+)?) + | (('0' | [1-9][0-9]*) ([Ee] [+\-]?[0-9]+)) + ) (^[0-9Ee.\-]? @exit ); +}%% + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + fexec np; + } + } + + action exit { fhold; fbreak; } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array ignore* + ((begin_value >parse_value ignore*) + (ignore* next_element ignore*)*)? + end_array @exit; +}%% + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + %% write init; + %% write exec; + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + +%%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + fhold; + fbreak; + } else { + FORCE_UTF8(*result); + fexec p + 1; + } + } + + action exit { fhold; fbreak; } + + main := '"' ((^([\"\\] | 0..0x1f) | '\\'[\"\\/bfnrt] | '\\u'[0-9a-fA-F]{4} | '\\'^([\"\\/bfnrtu]|0..0x1f))* %parse_string) '"' @exit; +}%% + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + %% write init; + json->memo = p; + %% write exec; + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + +%%{ + machine JSON; + + write data; + + include JSON_common; + + action parse_value { + char *np = JSON_parse_value(json, fpc, pe, &result, 0); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + main := ignore* ( + begin_value >parse_value + ) ignore*; +}%% + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + %% write init; + p = json->source; + pe = p + json->len; + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.c b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.c new file mode 100644 index 0000000..6f0e4ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.c @@ -0,0 +1,2137 @@ +/* This file is automatically generated from parser.rl by using ragel */ +#line 1 "parser.rl" +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + + +#line 126 "parser.rl" + + + +#line 108 "parser.c" +enum {JSON_object_start = 1}; +enum {JSON_object_first_final = 27}; +enum {JSON_object_error = 0}; + +enum {JSON_object_en_main = 1}; + + +#line 168 "parser.rl" + + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + +#line 132 "parser.c" + { + cs = JSON_object_start; + } + +#line 183 "parser.rl" + +#line 139 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 123 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 47: goto st23; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st2; + goto st0; +tr2: +#line 150 "parser.rl" + { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, p, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { p--; {p++; cs = 3; goto _out;} } else {p = (( np))-1;} + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 180 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 47: goto st4; + case 58: goto st8; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st5; + case 47: goto st7; + } + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 42 ) + goto st6; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st3; + } + goto st5; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 10 ) + goto st3; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 13: goto st8; + case 32: goto st8; + case 34: goto tr11; + case 45: goto tr11; + case 47: goto st19; + case 73: goto tr11; + case 78: goto tr11; + case 91: goto tr11; + case 102: goto tr11; + case 110: goto tr11; + case 116: goto tr11; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr11; + } else if ( (*p) >= 9 ) + goto st8; + goto st0; +tr11: +#line 134 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 9; goto _out;} + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + {p = (( np))-1;} + } + } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 268 "parser.c" + switch( (*p) ) { + case 13: goto st9; + case 32: goto st9; + case 44: goto st10; + case 47: goto st15; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st9; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 34: goto tr2; + case 47: goto st11; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st12; + case 47: goto st14; + } + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 42 ) + goto st13; + goto st12; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st13; + case 47: goto st10; + } + goto st12; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 10 ) + goto st10; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st16; + case 47: goto st18; + } + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 42 ) + goto st17; + goto st16; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + switch( (*p) ) { + case 42: goto st17; + case 47: goto st9; + } + goto st16; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 10 ) + goto st9; + goto st18; +tr4: +#line 158 "parser.rl" + { p--; {p++; cs = 27; goto _out;} } + goto st27; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: +#line 364 "parser.c" + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + switch( (*p) ) { + case 42: goto st20; + case 47: goto st22; + } + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 42 ) + goto st21; + goto st20; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + switch( (*p) ) { + case 42: goto st21; + case 47: goto st8; + } + goto st20; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 10 ) + goto st8; + goto st22; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + switch( (*p) ) { + case 42: goto st24; + case 47: goto st26; + } + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 42 ) + goto st25; + goto st24; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + switch( (*p) ) { + case 42: goto st25; + case 47: goto st2; + } + goto st24; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 10 ) + goto st2; + goto st26; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 184 "parser.rl" + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 487 "parser.c" +enum {JSON_value_start = 1}; +enum {JSON_value_first_final = 29}; +enum {JSON_value_error = 0}; + +enum {JSON_value_en_main = 1}; + + +#line 284 "parser.rl" + + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + +#line 503 "parser.c" + { + cs = JSON_value_start; + } + +#line 291 "parser.rl" + +#line 510 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr3; + case 47: goto st6; + case 73: goto st10; + case 78: goto st17; + case 91: goto tr7; + case 102: goto st19; + case 110: goto st23; + case 116: goto st26; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr3; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 236 "parser.rl" + { + char *np = JSON_parse_string(json, p, pe, result); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr3: +#line 241 "parser.rl" + { + char *np; + if(pe > p + 8 && !strncmp(MinusInfinity, p, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + {p = (( p + 10))-1;} + p--; {p++; cs = 29; goto _out;} + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + np = JSON_parse_integer(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + p--; {p++; cs = 29; goto _out;} + } + goto st29; +tr7: +#line 259 "parser.rl" + { + char *np; + np = JSON_parse_array(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr11: +#line 265 "parser.rl" + { + char *np; + np = JSON_parse_object(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr25: +#line 229 "parser.rl" + { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + goto st29; +tr27: +#line 222 "parser.rl" + { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + goto st29; +tr31: +#line 216 "parser.rl" + { + *result = Qfalse; + } + goto st29; +tr34: +#line 213 "parser.rl" + { + *result = Qnil; + } + goto st29; +tr37: +#line 219 "parser.rl" + { + *result = Qtrue; + } + goto st29; +st29: + if ( ++p == pe ) + goto _test_eof29; +case 29: +#line 271 "parser.rl" + { p--; {p++; cs = 29; goto _out;} } +#line 630 "parser.c" + switch( (*p) ) { + case 13: goto st29; + case 32: goto st29; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st29; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st29; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st29; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 110 ) + goto st11; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + if ( (*p) == 102 ) + goto st12; + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 105 ) + goto st13; + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + if ( (*p) == 110 ) + goto st14; + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 105 ) + goto st15; + goto st0; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + if ( (*p) == 116 ) + goto st16; + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 121 ) + goto tr25; + goto st0; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + if ( (*p) == 97 ) + goto st18; + goto st0; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 78 ) + goto tr27; + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + if ( (*p) == 97 ) + goto st20; + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 108 ) + goto st21; + goto st0; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + if ( (*p) == 115 ) + goto st22; + goto st0; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 101 ) + goto tr31; + goto st0; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + if ( (*p) == 117 ) + goto st24; + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 108 ) + goto st25; + goto st0; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + if ( (*p) == 108 ) + goto tr34; + goto st0; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 114 ) + goto st27; + goto st0; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: + if ( (*p) == 117 ) + goto st28; + goto st0; +st28: + if ( ++p == pe ) + goto _test_eof28; +case 28: + if ( (*p) == 101 ) + goto tr37; + goto st0; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 292 "parser.rl" + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + + +#line 881 "parser.c" +enum {JSON_integer_start = 1}; +enum {JSON_integer_first_final = 3}; +enum {JSON_integer_error = 0}; + +enum {JSON_integer_en_main = 1}; + + +#line 308 "parser.rl" + + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 897 "parser.c" + { + cs = JSON_integer_start; + } + +#line 315 "parser.rl" + json->memo = p; + +#line 905 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st0; + goto tr4; +tr4: +#line 305 "parser.rl" + { p--; {p++; cs = 4; goto _out;} } + goto st4; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: +#line 946 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + goto tr4; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 317 "parser.rl" + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + + +#line 980 "parser.c" +enum {JSON_float_start = 1}; +enum {JSON_float_first_final = 8}; +enum {JSON_float_error = 0}; + +enum {JSON_float_en_main = 1}; + + +#line 342 "parser.rl" + + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 1009 "parser.c" + { + cs = JSON_float_start; + } + +#line 362 "parser.rl" + json->memo = p; + +#line 1017 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + goto st0; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 69: goto st5; + case 101: goto st5; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +tr9: +#line 336 "parser.rl" + { p--; {p++; cs = 9; goto _out;} } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 1082 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 43: goto st6; + case 45: goto st6; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 69: goto st0; + case 101: goto st0; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 364 "parser.rl" + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 1169 "parser.c" +enum {JSON_array_start = 1}; +enum {JSON_array_first_final = 17}; +enum {JSON_array_error = 0}; + +enum {JSON_array_en_main = 1}; + + +#line 417 "parser.rl" + + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + +#line 1191 "parser.c" + { + cs = JSON_array_start; + } + +#line 430 "parser.rl" + +#line 1198 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 91 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st13; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 93: goto tr4; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st2; + goto st0; +tr2: +#line 394 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 3; goto _out;} + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + {p = (( np))-1;} + } + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 1257 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 44: goto st4; + case 47: goto st9; + case 93: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 13: goto st4; + case 32: goto st4; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st5; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st4; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st8; + } + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) == 42 ) + goto st7; + goto st6; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st4; + } + goto st6; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + if ( (*p) == 10 ) + goto st4; + goto st8; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + switch( (*p) ) { + case 42: goto st10; + case 47: goto st12; + } + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 42 ) + goto st11; + goto st10; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st11; + case 47: goto st3; + } + goto st10; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 10 ) + goto st3; + goto st12; +tr4: +#line 409 "parser.rl" + { p--; {p++; cs = 17; goto _out;} } + goto st17; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: +#line 1364 "parser.c" + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st14; + case 47: goto st16; + } + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 42 ) + goto st15; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st15; + case 47: goto st2; + } + goto st14; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 10 ) + goto st2; + goto st16; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 431 "parser.rl" + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + + +#line 1509 "parser.c" +enum {JSON_string_start = 1}; +enum {JSON_string_first_final = 8}; +enum {JSON_string_error = 0}; + +enum {JSON_string_en_main = 1}; + + +#line 538 "parser.rl" + + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + +#line 1539 "parser.c" + { + cs = JSON_string_start; + } + +#line 559 "parser.rl" + json->memo = p; + +#line 1547 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 34 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 34: goto tr2; + case 92: goto st3; + } + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +tr2: +#line 524 "parser.rl" + { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + p--; + {p++; cs = 8; goto _out;} + } else { + FORCE_UTF8(*result); + {p = (( p + 1))-1;} + } + } +#line 535 "parser.rl" + { p--; {p++; cs = 8; goto _out;} } + goto st8; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: +#line 1590 "parser.c" + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 117 ) + goto st4; + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st5; + } else + goto st5; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st6; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st6; + } else + goto st6; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st7; + } else + goto st7; + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st2; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st2; + } else + goto st2; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 561 "parser.rl" + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + + +#line 1853 "parser.c" +enum {JSON_start = 1}; +enum {JSON_first_final = 10}; +enum {JSON_error = 0}; + +enum {JSON_en_main = 1}; + + +#line 761 "parser.rl" + + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + +#line 1878 "parser.c" + { + cs = JSON_start; + } + +#line 777 "parser.rl" + p = json->source; + pe = p + json->len; + +#line 1887 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st6; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 753 "parser.rl" + { + char *np = JSON_parse_value(json, p, pe, &result, 0); + if (np == NULL) { p--; {p++; cs = 10; goto _out;} } else {p = (( np))-1;} + } + goto st10; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: +#line 1931 "parser.c" + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st10; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st10; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 780 "parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.h b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.h new file mode 100644 index 0000000..e6cf779 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.h @@ -0,0 +1,91 @@ +#ifndef _PARSER_H_ +#define _PARSER_H_ + +#include "ruby.h" + +#ifndef HAVE_RUBY_RE_H +#include "re.h" +#endif + +#ifdef HAVE_RUBY_ST_H +#include "ruby/st.h" +#else +#include "st.h" +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode */ + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +typedef struct JSON_ParserStruct { + VALUE Vsource; + char *source; + long len; + char *memo; + VALUE create_id; + int max_nesting; + int allow_nan; + int parsing_name; + int symbolize_names; + VALUE object_class; + VALUE array_class; + VALUE decimal_class; + int create_additions; + VALUE match_string; + FBuffer *fbuffer; +} JSON_Parser; + +#define GET_PARSER \ + GET_PARSER_INIT; \ + if (!json->Vsource) rb_raise(rb_eTypeError, "uninitialized instance") +#define GET_PARSER_INIT \ + JSON_Parser *json; \ + TypedData_Get_Struct(self, JSON_Parser, &JSON_Parser_type, json) + +#define MinusInfinity "-Infinity" +#define EVIL 0x666 + +static UTF32 unescape_unicode(const unsigned char *p); +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch); +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd); +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result); +static VALUE convert_encoding(VALUE source); +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cParser_parse(VALUE self); +static void JSON_mark(void *json); +static void JSON_free(void *json); +static VALUE cJSON_parser_s_allocate(VALUE klass); +static VALUE cParser_source(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Parser_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, JSON_free, json) +#define TypedData_Get_Struct(self, JSON_Parser, ignore, json) Data_Get_Struct(self, JSON_Parser, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl new file mode 100644 index 0000000..00a35bd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl @@ -0,0 +1,897 @@ +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + +%%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft\"\-\[\{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; +}%% + +%%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + fexec np; + } + } + + action parse_name { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, fpc, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + + pair = ignore* begin_name >parse_name ignore* name_separator ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object + (pair (next_pair)*)? ignore* + end_object + ) @exit; +}%% + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + %% write init; + %% write exec; + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + *result = Qnil; + } + action parse_false { + *result = Qfalse; + } + action parse_true { + *result = Qtrue; + } + action parse_nan { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + action parse_infinity { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + action parse_string { + char *np = JSON_parse_string(json, fpc, pe, result); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_number { + char *np; + if(pe > fpc + 8 && !strncmp(MinusInfinity, fpc, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + fexec p + 10; + fhold; fbreak; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, fpc, pe, result); + if (np != NULL) fexec np; + np = JSON_parse_integer(json, fpc, pe, result); + if (np != NULL) fexec np; + fhold; fbreak; + } + + action parse_array { + char *np; + np = JSON_parse_array(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_object { + char *np; + np = JSON_parse_object(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + +main := ignore* ( + Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) ignore* %*exit; +}%% + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + +%%{ + machine JSON_integer; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ('0' | [1-9][0-9]*) (^[0-9]? @exit); +}%% + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + +%%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ( + (('0' | [1-9][0-9]*) '.' [0-9]+ ([Ee] [+\-]?[0-9]+)?) + | (('0' | [1-9][0-9]*) ([Ee] [+\-]?[0-9]+)) + ) (^[0-9Ee.\-]? @exit ); +}%% + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + fexec np; + } + } + + action exit { fhold; fbreak; } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array ignore* + ((begin_value >parse_value ignore*) + (ignore* next_element ignore*)*)? + end_array @exit; +}%% + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + %% write init; + %% write exec; + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + +%%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + fhold; + fbreak; + } else { + FORCE_UTF8(*result); + fexec p + 1; + } + } + + action exit { fhold; fbreak; } + + main := '"' ((^([\"\\] | 0..0x1f) | '\\'[\"\\/bfnrt] | '\\u'[0-9a-fA-F]{4} | '\\'^([\"\\/bfnrtu]|0..0x1f))* %parse_string) '"' @exit; +}%% + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + %% write init; + json->memo = p; + %% write exec; + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + +%%{ + machine JSON; + + write data; + + include JSON_common; + + action parse_value { + char *np = JSON_parse_value(json, fpc, pe, &result, 0); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + main := ignore* ( + begin_value >parse_value + ) ignore*; +}%% + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + %% write init; + p = json->source; + pe = p + json->len; + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf 2.rb new file mode 100644 index 0000000..7595d58 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf 2.rb @@ -0,0 +1,2 @@ +require 'mkmf' +create_makefile('json') diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf.rb new file mode 100644 index 0000000..7595d58 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/ext/json/extconf.rb @@ -0,0 +1,2 @@ +require 'mkmf' +create_makefile('json') diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install 2.rb new file mode 100755 index 0000000..9d3ae12 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install 2.rb @@ -0,0 +1,23 @@ +#!/usr/bin/env ruby + +require 'fileutils' +include FileUtils::Verbose +require 'rbconfig' +include\ + begin + RbConfig + rescue NameError + Config + end + +sitelibdir = CONFIG["sitelibdir"] +cd 'lib' do + install('json.rb', sitelibdir) + mkdir_p File.join(sitelibdir, 'json') + for file in Dir['json/**/*}'] + d = File.join(sitelibdir, file) + mkdir_p File.dirname(d) + install(file, d) + end +end +warn " *** Installed PURE ruby library." diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install.rb new file mode 100755 index 0000000..9d3ae12 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/install.rb @@ -0,0 +1,23 @@ +#!/usr/bin/env ruby + +require 'fileutils' +include FileUtils::Verbose +require 'rbconfig' +include\ + begin + RbConfig + rescue NameError + Config + end + +sitelibdir = CONFIG["sitelibdir"] +cd 'lib' do + install('json.rb', sitelibdir) + mkdir_p File.join(sitelibdir, 'json') + for file in Dir['json/**/*}'] + d = File.join(sitelibdir, file) + mkdir_p File.dirname(d) + install(file, d) + end +end +warn " *** Installed PURE ruby library." diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder 2.java new file mode 100644 index 0000000..6f6ab66 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder 2.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A class specialized in transcoding a certain String format into another, + * using UTF-8 ByteLists as both input and output. + */ +abstract class ByteListTranscoder { + protected final ThreadContext context; + + protected ByteList src; + protected int srcEnd; + /** Position where the last read character started */ + protected int charStart; + /** Position of the next character to read */ + protected int pos; + + private ByteList out; + /** + * When a character that can be copied straight into the output is found, + * its index is stored on this variable, and copying is delayed until + * the sequence of characters that can be copied ends. + * + *

The variable stores -1 when not in a plain sequence. + */ + private int quoteStart = -1; + + protected ByteListTranscoder(ThreadContext context) { + this.context = context; + } + + protected void init(ByteList src, ByteList out) { + this.init(src, 0, src.length(), out); + } + + protected void init(ByteList src, int start, int end, ByteList out) { + this.src = src; + this.pos = start; + this.charStart = start; + this.srcEnd = end; + this.out = out; + } + + /** + * Returns whether there are any characters left to be read. + */ + protected boolean hasNext() { + return pos < srcEnd; + } + + /** + * Returns the next character in the buffer. + */ + private char next() { + return src.charAt(pos++); + } + + /** + * Reads an UTF-8 character from the input and returns its code point, + * while advancing the input position. + * + *

Raises an {@link #invalidUtf8()} exception if an invalid byte + * is found. + */ + protected int readUtf8Char() { + charStart = pos; + char head = next(); + if (head <= 0x7f) { // 0b0xxxxxxx (ASCII) + return head; + } + if (head <= 0xbf) { // 0b10xxxxxx + throw invalidUtf8(); // tail byte with no head + } + if (head <= 0xdf) { // 0b110xxxxx + ensureMin(1); + int cp = ((head & 0x1f) << 6) + | nextPart(); + if (cp < 0x0080) throw invalidUtf8(); + return cp; + } + if (head <= 0xef) { // 0b1110xxxx + ensureMin(2); + int cp = ((head & 0x0f) << 12) + | (nextPart() << 6) + | nextPart(); + if (cp < 0x0800) throw invalidUtf8(); + return cp; + } + if (head <= 0xf7) { // 0b11110xxx + ensureMin(3); + int cp = ((head & 0x07) << 18) + | (nextPart() << 12) + | (nextPart() << 6) + | nextPart(); + if (!Character.isValidCodePoint(cp)) throw invalidUtf8(); + return cp; + } + // 0b11111xxx? + throw invalidUtf8(); + } + + /** + * Throws a GeneratorError if the input list doesn't have at least this + * many bytes left. + */ + protected void ensureMin(int n) { + if (pos + n > srcEnd) throw incompleteUtf8(); + } + + /** + * Reads the next byte of a multi-byte UTF-8 character and returns its + * contents (lower 6 bits). + * + *

Throws a GeneratorError if the byte is not a valid tail. + */ + private int nextPart() { + char c = next(); + // tail bytes must be 0b10xxxxxx + if ((c & 0xc0) != 0x80) throw invalidUtf8(); + return c & 0x3f; + } + + + protected void quoteStart() { + if (quoteStart == -1) quoteStart = charStart; + } + + /** + * When in a sequence of characters that can be copied directly, + * interrupts the sequence and copies it to the output buffer. + * + * @param endPos The offset until which the direct character quoting should + * occur. You may pass {@link #pos} to quote until the most + * recently read character, or {@link #charStart} to quote + * until the character before it. + */ + protected void quoteStop(int endPos) { + if (quoteStart != -1) { + out.append(src, quoteStart, endPos - quoteStart); + quoteStart = -1; + } + } + + protected void append(int b) { + out.append(b); + } + + protected void append(byte[] origin, int start, int length) { + out.append(origin, start, length); + } + + + protected abstract RaiseException invalidUtf8(); + + protected RaiseException incompleteUtf8() { + return invalidUtf8(); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java new file mode 100644 index 0000000..6f6ab66 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A class specialized in transcoding a certain String format into another, + * using UTF-8 ByteLists as both input and output. + */ +abstract class ByteListTranscoder { + protected final ThreadContext context; + + protected ByteList src; + protected int srcEnd; + /** Position where the last read character started */ + protected int charStart; + /** Position of the next character to read */ + protected int pos; + + private ByteList out; + /** + * When a character that can be copied straight into the output is found, + * its index is stored on this variable, and copying is delayed until + * the sequence of characters that can be copied ends. + * + *

The variable stores -1 when not in a plain sequence. + */ + private int quoteStart = -1; + + protected ByteListTranscoder(ThreadContext context) { + this.context = context; + } + + protected void init(ByteList src, ByteList out) { + this.init(src, 0, src.length(), out); + } + + protected void init(ByteList src, int start, int end, ByteList out) { + this.src = src; + this.pos = start; + this.charStart = start; + this.srcEnd = end; + this.out = out; + } + + /** + * Returns whether there are any characters left to be read. + */ + protected boolean hasNext() { + return pos < srcEnd; + } + + /** + * Returns the next character in the buffer. + */ + private char next() { + return src.charAt(pos++); + } + + /** + * Reads an UTF-8 character from the input and returns its code point, + * while advancing the input position. + * + *

Raises an {@link #invalidUtf8()} exception if an invalid byte + * is found. + */ + protected int readUtf8Char() { + charStart = pos; + char head = next(); + if (head <= 0x7f) { // 0b0xxxxxxx (ASCII) + return head; + } + if (head <= 0xbf) { // 0b10xxxxxx + throw invalidUtf8(); // tail byte with no head + } + if (head <= 0xdf) { // 0b110xxxxx + ensureMin(1); + int cp = ((head & 0x1f) << 6) + | nextPart(); + if (cp < 0x0080) throw invalidUtf8(); + return cp; + } + if (head <= 0xef) { // 0b1110xxxx + ensureMin(2); + int cp = ((head & 0x0f) << 12) + | (nextPart() << 6) + | nextPart(); + if (cp < 0x0800) throw invalidUtf8(); + return cp; + } + if (head <= 0xf7) { // 0b11110xxx + ensureMin(3); + int cp = ((head & 0x07) << 18) + | (nextPart() << 12) + | (nextPart() << 6) + | nextPart(); + if (!Character.isValidCodePoint(cp)) throw invalidUtf8(); + return cp; + } + // 0b11111xxx? + throw invalidUtf8(); + } + + /** + * Throws a GeneratorError if the input list doesn't have at least this + * many bytes left. + */ + protected void ensureMin(int n) { + if (pos + n > srcEnd) throw incompleteUtf8(); + } + + /** + * Reads the next byte of a multi-byte UTF-8 character and returns its + * contents (lower 6 bits). + * + *

Throws a GeneratorError if the byte is not a valid tail. + */ + private int nextPart() { + char c = next(); + // tail bytes must be 0b10xxxxxx + if ((c & 0xc0) != 0x80) throw invalidUtf8(); + return c & 0x3f; + } + + + protected void quoteStart() { + if (quoteStart == -1) quoteStart = charStart; + } + + /** + * When in a sequence of characters that can be copied directly, + * interrupts the sequence and copies it to the output buffer. + * + * @param endPos The offset until which the direct character quoting should + * occur. You may pass {@link #pos} to quote until the most + * recently read character, or {@link #charStart} to quote + * until the character before it. + */ + protected void quoteStop(int endPos) { + if (quoteStart != -1) { + out.append(src, quoteStart, endPos - quoteStart); + quoteStart = -1; + } + } + + protected void append(int b) { + out.append(b); + } + + protected void append(byte[] origin, int start, int length) { + out.append(origin, start, length); + } + + + protected abstract RaiseException invalidUtf8(); + + protected RaiseException incompleteUtf8() { + return invalidUtf8(); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator 2.java new file mode 100644 index 0000000..24bf049 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator 2.java @@ -0,0 +1,466 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBasicObject; +import org.jruby.RubyBignum; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ClassIndex; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +public final class Generator { + private Generator() { + throw new RuntimeException(); + } + + /** + * Encodes the given object as a JSON string, using the given handler. + */ + static RubyString + generateJson(ThreadContext context, T object, + Handler handler, IRubyObject[] args) { + Session session = new Session(context, args.length > 0 ? args[0] + : null); + return session.infect(handler.generateNew(session, object)); + } + + /** + * Encodes the given object as a JSON string, detecting the appropriate handler + * for the given object. + */ + static RubyString + generateJson(ThreadContext context, T object, IRubyObject[] args) { + Handler handler = getHandlerFor(context.getRuntime(), object); + return generateJson(context, object, handler, args); + } + + /** + * Encodes the given object as a JSON string, using the appropriate + * handler if one is found or calling #to_json if not. + */ + public static RubyString + generateJson(ThreadContext context, T object, + GeneratorState config) { + Session session = new Session(context, config); + Handler handler = getHandlerFor(context.getRuntime(), object); + return handler.generateNew(session, object); + } + + // NOTE: drop this once Ruby 1.9.3 support is gone! + private static final int FIXNUM = 1; + private static final int BIGNUM = 2; + private static final int ARRAY = 3; + private static final int STRING = 4; + private static final int NIL = 5; + private static final int TRUE = 6; + private static final int FALSE = 7; + private static final int HASH = 10; + private static final int FLOAT = 11; + // hard-coded due JRuby 1.7 compatibility + // https://github.com/jruby/jruby/blob/1.7.27/core/src/main/java/org/jruby/runtime/ClassIndex.java + + /** + * Returns the best serialization handler for the given object. + */ + // Java's generics can't handle this satisfactorily, so I'll just leave + // the best I could get and ignore the warnings + @SuppressWarnings("unchecked") + private static + Handler getHandlerFor(Ruby runtime, T object) { + switch (((RubyBasicObject) object).getNativeTypeIndex()) { + // can not use getNativeClassIndex due 1.7 compatibility + case NIL : return (Handler) NIL_HANDLER; + case TRUE : return (Handler) TRUE_HANDLER; + case FALSE : return (Handler) FALSE_HANDLER; + case FLOAT : return (Handler) FLOAT_HANDLER; + case FIXNUM : return (Handler) FIXNUM_HANDLER; + case BIGNUM : return (Handler) BIGNUM_HANDLER; + case STRING : + if (((RubyBasicObject) object).getMetaClass() != runtime.getString()) break; + return (Handler) STRING_HANDLER; + case ARRAY : + if (((RubyBasicObject) object).getMetaClass() != runtime.getArray()) break; + return (Handler) ARRAY_HANDLER; + case HASH : + if (((RubyBasicObject) object).getMetaClass() != runtime.getHash()) break; + return (Handler) HASH_HANDLER; + } + return GENERIC_HANDLER; + } + + + /* Generator context */ + + /** + * A class that concentrates all the information that is shared by + * generators working on a single session. + * + *

A session is defined as the process of serializing a single root + * object; any handler directly called by container handlers (arrays and + * hashes/objects) shares this object with its caller. + * + *

Note that anything called indirectly (via {@link GENERIC_HANDLER}) + * won't be part of the session. + */ + static class Session { + private final ThreadContext context; + private GeneratorState state; + private IRubyObject possibleState; + private RuntimeInfo info; + private StringEncoder stringEncoder; + + private boolean tainted = false; + private boolean untrusted = false; + + Session(ThreadContext context, GeneratorState state) { + this.context = context; + this.state = state; + } + + Session(ThreadContext context, IRubyObject possibleState) { + this.context = context; + this.possibleState = possibleState == null || possibleState.isNil() + ? null : possibleState; + } + + public ThreadContext getContext() { + return context; + } + + public Ruby getRuntime() { + return context.getRuntime(); + } + + public GeneratorState getState() { + if (state == null) { + state = GeneratorState.fromState(context, getInfo(), possibleState); + } + return state; + } + + public RuntimeInfo getInfo() { + if (info == null) info = RuntimeInfo.forRuntime(getRuntime()); + return info; + } + + public StringEncoder getStringEncoder() { + if (stringEncoder == null) { + stringEncoder = new StringEncoder(context, getState().asciiOnly()); + } + return stringEncoder; + } + + public void infectBy(IRubyObject object) { + if (object.isTaint()) tainted = true; + if (object.isUntrusted()) untrusted = true; + } + + public T infect(T object) { + if (tainted) object.setTaint(true); + if (untrusted) object.setUntrusted(true); + return object; + } + } + + + /* Handler base classes */ + + private static abstract class Handler { + /** + * Returns an estimative of how much space the serialization of the + * given object will take. Used for allocating enough buffer space + * before invoking other methods. + */ + int guessSize(Session session, T object) { + return 4; + } + + RubyString generateNew(Session session, T object) { + RubyString result; + ByteList buffer = new ByteList(guessSize(session, object)); + generate(session, object, buffer); + result = RubyString.newString(session.getRuntime(), buffer); + ThreadContext context = session.getContext(); + RuntimeInfo info = session.getInfo(); + result.force_encoding(context, info.utf8.get()); + return result; + } + + abstract void generate(Session session, T object, ByteList buffer); + } + + /** + * A handler that returns a fixed keyword regardless of the passed object. + */ + private static class KeywordHandler + extends Handler { + private final ByteList keyword; + + private KeywordHandler(String keyword) { + this.keyword = new ByteList(ByteList.plain(keyword), false); + } + + @Override + int guessSize(Session session, T object) { + return keyword.length(); + } + + @Override + RubyString generateNew(Session session, T object) { + return RubyString.newStringShared(session.getRuntime(), keyword); + } + + @Override + void generate(Session session, T object, ByteList buffer) { + buffer.append(keyword); + } + } + + + /* Handlers */ + + static final Handler BIGNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyBignum object, ByteList buffer) { + // JRUBY-4751: RubyBignum.to_s() returns generic object + // representation (fixed in 1.5, but we maintain backwards + // compatibility; call to_s(IRubyObject[]) then + buffer.append(((RubyString)object.to_s(IRubyObject.NULL_ARRAY)).getByteList()); + } + }; + + static final Handler FIXNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFixnum object, ByteList buffer) { + buffer.append(object.to_s().getByteList()); + } + }; + + static final Handler FLOAT_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFloat object, ByteList buffer) { + double value = RubyFloat.num2dbl(object); + + if (Double.isInfinite(value) || Double.isNaN(value)) { + if (!session.getState().allowNaN()) { + throw Utils.newException(session.getContext(), + Utils.M_GENERATOR_ERROR, + object + " not allowed in JSON"); + } + } + buffer.append(((RubyString)object.to_s()).getByteList()); + } + }; + + static final Handler ARRAY_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyArray object) { + GeneratorState state = session.getState(); + int depth = state.getDepth(); + int perItem = + 4 // prealloc + + (depth + 1) * state.getIndent().length() // indent + + 1 + state.getArrayNl().length(); // ',' arrayNl + return 2 + object.size() * perItem; + } + + @Override + void generate(Session session, RubyArray object, ByteList buffer) { + ThreadContext context = session.getContext(); + Ruby runtime = context.getRuntime(); + GeneratorState state = session.getState(); + int depth = state.increaseDepth(); + + ByteList indentUnit = state.getIndent(); + byte[] shift = Utils.repeat(indentUnit, depth); + + ByteList arrayNl = state.getArrayNl(); + byte[] delim = new byte[1 + arrayNl.length()]; + delim[0] = ','; + System.arraycopy(arrayNl.unsafeBytes(), arrayNl.begin(), delim, 1, + arrayNl.length()); + + session.infectBy(object); + + buffer.append((byte)'['); + buffer.append(arrayNl); + boolean firstItem = true; + for (int i = 0, t = object.getLength(); i < t; i++) { + IRubyObject element = object.eltInternal(i); + session.infectBy(element); + if (firstItem) { + firstItem = false; + } else { + buffer.append(delim); + } + buffer.append(shift); + Handler handler = getHandlerFor(runtime, element); + handler.generate(session, element, buffer); + } + + state.decreaseDepth(); + if (arrayNl.length() != 0) { + buffer.append(arrayNl); + buffer.append(shift, 0, state.getDepth() * indentUnit.length()); + } + + buffer.append((byte)']'); + } + }; + + static final Handler HASH_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyHash object) { + GeneratorState state = session.getState(); + int perItem = + 12 // key, colon, comma + + (state.getDepth() + 1) * state.getIndent().length() + + state.getSpaceBefore().length() + + state.getSpace().length(); + return 2 + object.size() * perItem; + } + + @Override + void generate(final Session session, RubyHash object, + final ByteList buffer) { + ThreadContext context = session.getContext(); + final Ruby runtime = context.getRuntime(); + final GeneratorState state = session.getState(); + final int depth = state.increaseDepth(); + + final ByteList objectNl = state.getObjectNl(); + final byte[] indent = Utils.repeat(state.getIndent(), depth); + final ByteList spaceBefore = state.getSpaceBefore(); + final ByteList space = state.getSpace(); + + buffer.append((byte)'{'); + buffer.append(objectNl); + object.visitAll(new RubyHash.Visitor() { + private boolean firstPair = true; + + @Override + public void visit(IRubyObject key, IRubyObject value) { + if (firstPair) { + firstPair = false; + } else { + buffer.append((byte)','); + buffer.append(objectNl); + } + if (objectNl.length() != 0) buffer.append(indent); + + STRING_HANDLER.generate(session, key.asString(), buffer); + session.infectBy(key); + + buffer.append(spaceBefore); + buffer.append((byte)':'); + buffer.append(space); + + Handler valueHandler = getHandlerFor(runtime, value); + valueHandler.generate(session, value, buffer); + session.infectBy(value); + } + }); + state.decreaseDepth(); + if (objectNl.length() != 0) { + buffer.append(objectNl); + buffer.append(Utils.repeat(state.getIndent(), state.getDepth())); + } + buffer.append((byte)'}'); + } + }; + + static final Handler STRING_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyString object) { + // for most applications, most strings will be just a set of + // printable ASCII characters without any escaping, so let's + // just allocate enough space for that + the quotes + return 2 + object.getByteList().length(); + } + + @Override + void generate(Session session, RubyString object, ByteList buffer) { + RuntimeInfo info = session.getInfo(); + RubyString src; + + if (object.encoding(session.getContext()) != info.utf8.get()) { + src = (RubyString)object.encode(session.getContext(), + info.utf8.get()); + } else { + src = object; + } + + session.getStringEncoder().encode(src.getByteList(), buffer); + } + }; + + static final Handler TRUE_HANDLER = + new KeywordHandler("true"); + static final Handler FALSE_HANDLER = + new KeywordHandler("false"); + static final Handler NIL_HANDLER = + new KeywordHandler("null"); + + /** + * The default handler (Object#to_json): coerces the object + * to string using #to_s, and serializes that string. + */ + static final Handler OBJECT_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + RubyString str = object.asString(); + return STRING_HANDLER.generateNew(session, str); + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString str = object.asString(); + STRING_HANDLER.generate(session, str, buffer); + } + }; + + /** + * A handler that simply calls #to_json(state) on the + * given object. + */ + static final Handler GENERIC_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + if (object.respondsTo("to_json")) { + IRubyObject result = object.callMethod(session.getContext(), "to_json", + new IRubyObject[] {session.getState()}); + if (result instanceof RubyString) return (RubyString)result; + throw session.getRuntime().newTypeError("to_json must return a String"); + } else { + return OBJECT_HANDLER.generateNew(session, object); + } + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString result = generateNew(session, object); + buffer.append(result.getByteList()); + } + }; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator.java new file mode 100644 index 0000000..24bf049 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Generator.java @@ -0,0 +1,466 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBasicObject; +import org.jruby.RubyBignum; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ClassIndex; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +public final class Generator { + private Generator() { + throw new RuntimeException(); + } + + /** + * Encodes the given object as a JSON string, using the given handler. + */ + static RubyString + generateJson(ThreadContext context, T object, + Handler handler, IRubyObject[] args) { + Session session = new Session(context, args.length > 0 ? args[0] + : null); + return session.infect(handler.generateNew(session, object)); + } + + /** + * Encodes the given object as a JSON string, detecting the appropriate handler + * for the given object. + */ + static RubyString + generateJson(ThreadContext context, T object, IRubyObject[] args) { + Handler handler = getHandlerFor(context.getRuntime(), object); + return generateJson(context, object, handler, args); + } + + /** + * Encodes the given object as a JSON string, using the appropriate + * handler if one is found or calling #to_json if not. + */ + public static RubyString + generateJson(ThreadContext context, T object, + GeneratorState config) { + Session session = new Session(context, config); + Handler handler = getHandlerFor(context.getRuntime(), object); + return handler.generateNew(session, object); + } + + // NOTE: drop this once Ruby 1.9.3 support is gone! + private static final int FIXNUM = 1; + private static final int BIGNUM = 2; + private static final int ARRAY = 3; + private static final int STRING = 4; + private static final int NIL = 5; + private static final int TRUE = 6; + private static final int FALSE = 7; + private static final int HASH = 10; + private static final int FLOAT = 11; + // hard-coded due JRuby 1.7 compatibility + // https://github.com/jruby/jruby/blob/1.7.27/core/src/main/java/org/jruby/runtime/ClassIndex.java + + /** + * Returns the best serialization handler for the given object. + */ + // Java's generics can't handle this satisfactorily, so I'll just leave + // the best I could get and ignore the warnings + @SuppressWarnings("unchecked") + private static + Handler getHandlerFor(Ruby runtime, T object) { + switch (((RubyBasicObject) object).getNativeTypeIndex()) { + // can not use getNativeClassIndex due 1.7 compatibility + case NIL : return (Handler) NIL_HANDLER; + case TRUE : return (Handler) TRUE_HANDLER; + case FALSE : return (Handler) FALSE_HANDLER; + case FLOAT : return (Handler) FLOAT_HANDLER; + case FIXNUM : return (Handler) FIXNUM_HANDLER; + case BIGNUM : return (Handler) BIGNUM_HANDLER; + case STRING : + if (((RubyBasicObject) object).getMetaClass() != runtime.getString()) break; + return (Handler) STRING_HANDLER; + case ARRAY : + if (((RubyBasicObject) object).getMetaClass() != runtime.getArray()) break; + return (Handler) ARRAY_HANDLER; + case HASH : + if (((RubyBasicObject) object).getMetaClass() != runtime.getHash()) break; + return (Handler) HASH_HANDLER; + } + return GENERIC_HANDLER; + } + + + /* Generator context */ + + /** + * A class that concentrates all the information that is shared by + * generators working on a single session. + * + *

A session is defined as the process of serializing a single root + * object; any handler directly called by container handlers (arrays and + * hashes/objects) shares this object with its caller. + * + *

Note that anything called indirectly (via {@link GENERIC_HANDLER}) + * won't be part of the session. + */ + static class Session { + private final ThreadContext context; + private GeneratorState state; + private IRubyObject possibleState; + private RuntimeInfo info; + private StringEncoder stringEncoder; + + private boolean tainted = false; + private boolean untrusted = false; + + Session(ThreadContext context, GeneratorState state) { + this.context = context; + this.state = state; + } + + Session(ThreadContext context, IRubyObject possibleState) { + this.context = context; + this.possibleState = possibleState == null || possibleState.isNil() + ? null : possibleState; + } + + public ThreadContext getContext() { + return context; + } + + public Ruby getRuntime() { + return context.getRuntime(); + } + + public GeneratorState getState() { + if (state == null) { + state = GeneratorState.fromState(context, getInfo(), possibleState); + } + return state; + } + + public RuntimeInfo getInfo() { + if (info == null) info = RuntimeInfo.forRuntime(getRuntime()); + return info; + } + + public StringEncoder getStringEncoder() { + if (stringEncoder == null) { + stringEncoder = new StringEncoder(context, getState().asciiOnly()); + } + return stringEncoder; + } + + public void infectBy(IRubyObject object) { + if (object.isTaint()) tainted = true; + if (object.isUntrusted()) untrusted = true; + } + + public T infect(T object) { + if (tainted) object.setTaint(true); + if (untrusted) object.setUntrusted(true); + return object; + } + } + + + /* Handler base classes */ + + private static abstract class Handler { + /** + * Returns an estimative of how much space the serialization of the + * given object will take. Used for allocating enough buffer space + * before invoking other methods. + */ + int guessSize(Session session, T object) { + return 4; + } + + RubyString generateNew(Session session, T object) { + RubyString result; + ByteList buffer = new ByteList(guessSize(session, object)); + generate(session, object, buffer); + result = RubyString.newString(session.getRuntime(), buffer); + ThreadContext context = session.getContext(); + RuntimeInfo info = session.getInfo(); + result.force_encoding(context, info.utf8.get()); + return result; + } + + abstract void generate(Session session, T object, ByteList buffer); + } + + /** + * A handler that returns a fixed keyword regardless of the passed object. + */ + private static class KeywordHandler + extends Handler { + private final ByteList keyword; + + private KeywordHandler(String keyword) { + this.keyword = new ByteList(ByteList.plain(keyword), false); + } + + @Override + int guessSize(Session session, T object) { + return keyword.length(); + } + + @Override + RubyString generateNew(Session session, T object) { + return RubyString.newStringShared(session.getRuntime(), keyword); + } + + @Override + void generate(Session session, T object, ByteList buffer) { + buffer.append(keyword); + } + } + + + /* Handlers */ + + static final Handler BIGNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyBignum object, ByteList buffer) { + // JRUBY-4751: RubyBignum.to_s() returns generic object + // representation (fixed in 1.5, but we maintain backwards + // compatibility; call to_s(IRubyObject[]) then + buffer.append(((RubyString)object.to_s(IRubyObject.NULL_ARRAY)).getByteList()); + } + }; + + static final Handler FIXNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFixnum object, ByteList buffer) { + buffer.append(object.to_s().getByteList()); + } + }; + + static final Handler FLOAT_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFloat object, ByteList buffer) { + double value = RubyFloat.num2dbl(object); + + if (Double.isInfinite(value) || Double.isNaN(value)) { + if (!session.getState().allowNaN()) { + throw Utils.newException(session.getContext(), + Utils.M_GENERATOR_ERROR, + object + " not allowed in JSON"); + } + } + buffer.append(((RubyString)object.to_s()).getByteList()); + } + }; + + static final Handler ARRAY_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyArray object) { + GeneratorState state = session.getState(); + int depth = state.getDepth(); + int perItem = + 4 // prealloc + + (depth + 1) * state.getIndent().length() // indent + + 1 + state.getArrayNl().length(); // ',' arrayNl + return 2 + object.size() * perItem; + } + + @Override + void generate(Session session, RubyArray object, ByteList buffer) { + ThreadContext context = session.getContext(); + Ruby runtime = context.getRuntime(); + GeneratorState state = session.getState(); + int depth = state.increaseDepth(); + + ByteList indentUnit = state.getIndent(); + byte[] shift = Utils.repeat(indentUnit, depth); + + ByteList arrayNl = state.getArrayNl(); + byte[] delim = new byte[1 + arrayNl.length()]; + delim[0] = ','; + System.arraycopy(arrayNl.unsafeBytes(), arrayNl.begin(), delim, 1, + arrayNl.length()); + + session.infectBy(object); + + buffer.append((byte)'['); + buffer.append(arrayNl); + boolean firstItem = true; + for (int i = 0, t = object.getLength(); i < t; i++) { + IRubyObject element = object.eltInternal(i); + session.infectBy(element); + if (firstItem) { + firstItem = false; + } else { + buffer.append(delim); + } + buffer.append(shift); + Handler handler = getHandlerFor(runtime, element); + handler.generate(session, element, buffer); + } + + state.decreaseDepth(); + if (arrayNl.length() != 0) { + buffer.append(arrayNl); + buffer.append(shift, 0, state.getDepth() * indentUnit.length()); + } + + buffer.append((byte)']'); + } + }; + + static final Handler HASH_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyHash object) { + GeneratorState state = session.getState(); + int perItem = + 12 // key, colon, comma + + (state.getDepth() + 1) * state.getIndent().length() + + state.getSpaceBefore().length() + + state.getSpace().length(); + return 2 + object.size() * perItem; + } + + @Override + void generate(final Session session, RubyHash object, + final ByteList buffer) { + ThreadContext context = session.getContext(); + final Ruby runtime = context.getRuntime(); + final GeneratorState state = session.getState(); + final int depth = state.increaseDepth(); + + final ByteList objectNl = state.getObjectNl(); + final byte[] indent = Utils.repeat(state.getIndent(), depth); + final ByteList spaceBefore = state.getSpaceBefore(); + final ByteList space = state.getSpace(); + + buffer.append((byte)'{'); + buffer.append(objectNl); + object.visitAll(new RubyHash.Visitor() { + private boolean firstPair = true; + + @Override + public void visit(IRubyObject key, IRubyObject value) { + if (firstPair) { + firstPair = false; + } else { + buffer.append((byte)','); + buffer.append(objectNl); + } + if (objectNl.length() != 0) buffer.append(indent); + + STRING_HANDLER.generate(session, key.asString(), buffer); + session.infectBy(key); + + buffer.append(spaceBefore); + buffer.append((byte)':'); + buffer.append(space); + + Handler valueHandler = getHandlerFor(runtime, value); + valueHandler.generate(session, value, buffer); + session.infectBy(value); + } + }); + state.decreaseDepth(); + if (objectNl.length() != 0) { + buffer.append(objectNl); + buffer.append(Utils.repeat(state.getIndent(), state.getDepth())); + } + buffer.append((byte)'}'); + } + }; + + static final Handler STRING_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyString object) { + // for most applications, most strings will be just a set of + // printable ASCII characters without any escaping, so let's + // just allocate enough space for that + the quotes + return 2 + object.getByteList().length(); + } + + @Override + void generate(Session session, RubyString object, ByteList buffer) { + RuntimeInfo info = session.getInfo(); + RubyString src; + + if (object.encoding(session.getContext()) != info.utf8.get()) { + src = (RubyString)object.encode(session.getContext(), + info.utf8.get()); + } else { + src = object; + } + + session.getStringEncoder().encode(src.getByteList(), buffer); + } + }; + + static final Handler TRUE_HANDLER = + new KeywordHandler("true"); + static final Handler FALSE_HANDLER = + new KeywordHandler("false"); + static final Handler NIL_HANDLER = + new KeywordHandler("null"); + + /** + * The default handler (Object#to_json): coerces the object + * to string using #to_s, and serializes that string. + */ + static final Handler OBJECT_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + RubyString str = object.asString(); + return STRING_HANDLER.generateNew(session, str); + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString str = object.asString(); + STRING_HANDLER.generate(session, str, buffer); + } + }; + + /** + * A handler that simply calls #to_json(state) on the + * given object. + */ + static final Handler GENERIC_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + if (object.respondsTo("to_json")) { + IRubyObject result = object.callMethod(session.getContext(), "to_json", + new IRubyObject[] {session.getState()}); + if (result instanceof RubyString) return (RubyString)result; + throw session.getRuntime().newTypeError("to_json must return a String"); + } else { + return OBJECT_HANDLER.generateNew(session, object); + } + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString result = generateNew(session, object); + buffer.append(result.getByteList()); + } + }; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods 2.java new file mode 100644 index 0000000..bde7a18 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods 2.java @@ -0,0 +1,231 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBoolean; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * A class that populates the + * Json::Ext::Generator::GeneratorMethods module. + * + * @author mernen + */ +class GeneratorMethods { + /** + * Populates the given module with all modules and their methods + * @param info + * @param generatorMethodsModule The module to populate + * (normally JSON::Generator::GeneratorMethods) + */ + static void populate(RuntimeInfo info, RubyModule module) { + defineMethods(module, "Array", RbArray.class); + defineMethods(module, "FalseClass", RbFalse.class); + defineMethods(module, "Float", RbFloat.class); + defineMethods(module, "Hash", RbHash.class); + defineMethods(module, "Integer", RbInteger.class); + defineMethods(module, "NilClass", RbNil.class); + defineMethods(module, "Object", RbObject.class); + defineMethods(module, "String", RbString.class); + defineMethods(module, "TrueClass", RbTrue.class); + + info.stringExtendModule = new WeakReference(module.defineModuleUnder("String") + .defineModuleUnder("Extend")); + info.stringExtendModule.get().defineAnnotatedMethods(StringExtend.class); + } + + /** + * Convenience method for defining methods on a submodule. + * @param parentModule + * @param submoduleName + * @param klass + */ + private static void defineMethods(RubyModule parentModule, + String submoduleName, Class klass) { + RubyModule submodule = parentModule.defineModuleUnder(submoduleName); + submodule.defineAnnotatedMethods(klass); + } + + + public static class RbHash { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyHash)vSelf, + Generator.HASH_HANDLER, args); + } + } + + public static class RbArray { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyArray)vSelf, + Generator.ARRAY_HANDLER, args); + } + } + + public static class RbInteger { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, args); + } + } + + public static class RbFloat { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyFloat)vSelf, + Generator.FLOAT_HANDLER, args); + } + } + + public static class RbString { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyString)vSelf, + Generator.STRING_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw(*) + * + *

This method creates a JSON text from the result of a call to + * {@link #to_json_raw_object} of this String. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + RubyHash obj = toJsonRawObject(context, Utils.ensureString(vSelf)); + return Generator.generateJson(context, obj, + Generator.HASH_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw_object(*) + * + *

This method creates a raw object Hash, that can be nested into + * other data structures and will be unparsed as a raw string. This + * method should be used if you want to convert raw strings to JSON + * instead of UTF-8 strings, e.g. binary data. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw_object(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return toJsonRawObject(context, Utils.ensureString(vSelf)); + } + + private static RubyHash toJsonRawObject(ThreadContext context, + RubyString self) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + IRubyObject createId = RuntimeInfo.forRuntime(runtime) + .jsonModule.get().callMethod(context, "create_id"); + result.op_aset(context, createId, self.getMetaClass().to_s()); + + ByteList bl = self.getByteList(); + byte[] uBytes = bl.unsafeBytes(); + RubyArray array = runtime.newArray(bl.length()); + for (int i = bl.begin(), t = bl.begin() + bl.length(); i < t; i++) { + array.store(i, runtime.newFixnum(uBytes[i] & 0xff)); + } + + result.op_aset(context, runtime.newString("raw"), array); + return result; + } + + @JRubyMethod(required=1, module=true) + public static IRubyObject included(ThreadContext context, + IRubyObject vSelf, IRubyObject module) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + return module.callMethod(context, "extend", info.stringExtendModule.get()); + } + } + + public static class StringExtend { + /** + * {@link RubyString String}#json_create(o) + * + *

Raw Strings are JSON Objects (the raw bytes are stored in an + * array for the key "raw"). The Ruby String can be created by this + * module method. + */ + @JRubyMethod(required=1) + public static IRubyObject json_create(ThreadContext context, + IRubyObject vSelf, IRubyObject vHash) { + Ruby runtime = context.getRuntime(); + RubyHash o = vHash.convertToHash(); + IRubyObject rawData = o.fastARef(runtime.newString("raw")); + if (rawData == null) { + throw runtime.newArgumentError("\"raw\" value not defined " + + "for encoded String"); + } + RubyArray ary = Utils.ensureArray(rawData); + byte[] bytes = new byte[ary.getLength()]; + for (int i = 0, t = ary.getLength(); i < t; i++) { + IRubyObject element = ary.eltInternal(i); + if (element instanceof RubyFixnum) { + bytes[i] = (byte)RubyNumeric.fix2long(element); + } else { + throw runtime.newTypeError(element, runtime.getFixnum()); + } + } + return runtime.newString(new ByteList(bytes, false)); + } + } + + public static class RbTrue { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.TRUE_HANDLER, args); + } + } + + public static class RbFalse { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.FALSE_HANDLER, args); + } + } + + public static class RbNil { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, + Generator.NIL_HANDLER, args); + } + } + + public static class RbObject { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject self, IRubyObject[] args) { + return RbString.to_json(context, self.asString(), args); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java new file mode 100644 index 0000000..bde7a18 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java @@ -0,0 +1,231 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBoolean; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * A class that populates the + * Json::Ext::Generator::GeneratorMethods module. + * + * @author mernen + */ +class GeneratorMethods { + /** + * Populates the given module with all modules and their methods + * @param info + * @param generatorMethodsModule The module to populate + * (normally JSON::Generator::GeneratorMethods) + */ + static void populate(RuntimeInfo info, RubyModule module) { + defineMethods(module, "Array", RbArray.class); + defineMethods(module, "FalseClass", RbFalse.class); + defineMethods(module, "Float", RbFloat.class); + defineMethods(module, "Hash", RbHash.class); + defineMethods(module, "Integer", RbInteger.class); + defineMethods(module, "NilClass", RbNil.class); + defineMethods(module, "Object", RbObject.class); + defineMethods(module, "String", RbString.class); + defineMethods(module, "TrueClass", RbTrue.class); + + info.stringExtendModule = new WeakReference(module.defineModuleUnder("String") + .defineModuleUnder("Extend")); + info.stringExtendModule.get().defineAnnotatedMethods(StringExtend.class); + } + + /** + * Convenience method for defining methods on a submodule. + * @param parentModule + * @param submoduleName + * @param klass + */ + private static void defineMethods(RubyModule parentModule, + String submoduleName, Class klass) { + RubyModule submodule = parentModule.defineModuleUnder(submoduleName); + submodule.defineAnnotatedMethods(klass); + } + + + public static class RbHash { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyHash)vSelf, + Generator.HASH_HANDLER, args); + } + } + + public static class RbArray { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyArray)vSelf, + Generator.ARRAY_HANDLER, args); + } + } + + public static class RbInteger { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, args); + } + } + + public static class RbFloat { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyFloat)vSelf, + Generator.FLOAT_HANDLER, args); + } + } + + public static class RbString { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyString)vSelf, + Generator.STRING_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw(*) + * + *

This method creates a JSON text from the result of a call to + * {@link #to_json_raw_object} of this String. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + RubyHash obj = toJsonRawObject(context, Utils.ensureString(vSelf)); + return Generator.generateJson(context, obj, + Generator.HASH_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw_object(*) + * + *

This method creates a raw object Hash, that can be nested into + * other data structures and will be unparsed as a raw string. This + * method should be used if you want to convert raw strings to JSON + * instead of UTF-8 strings, e.g. binary data. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw_object(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return toJsonRawObject(context, Utils.ensureString(vSelf)); + } + + private static RubyHash toJsonRawObject(ThreadContext context, + RubyString self) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + IRubyObject createId = RuntimeInfo.forRuntime(runtime) + .jsonModule.get().callMethod(context, "create_id"); + result.op_aset(context, createId, self.getMetaClass().to_s()); + + ByteList bl = self.getByteList(); + byte[] uBytes = bl.unsafeBytes(); + RubyArray array = runtime.newArray(bl.length()); + for (int i = bl.begin(), t = bl.begin() + bl.length(); i < t; i++) { + array.store(i, runtime.newFixnum(uBytes[i] & 0xff)); + } + + result.op_aset(context, runtime.newString("raw"), array); + return result; + } + + @JRubyMethod(required=1, module=true) + public static IRubyObject included(ThreadContext context, + IRubyObject vSelf, IRubyObject module) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + return module.callMethod(context, "extend", info.stringExtendModule.get()); + } + } + + public static class StringExtend { + /** + * {@link RubyString String}#json_create(o) + * + *

Raw Strings are JSON Objects (the raw bytes are stored in an + * array for the key "raw"). The Ruby String can be created by this + * module method. + */ + @JRubyMethod(required=1) + public static IRubyObject json_create(ThreadContext context, + IRubyObject vSelf, IRubyObject vHash) { + Ruby runtime = context.getRuntime(); + RubyHash o = vHash.convertToHash(); + IRubyObject rawData = o.fastARef(runtime.newString("raw")); + if (rawData == null) { + throw runtime.newArgumentError("\"raw\" value not defined " + + "for encoded String"); + } + RubyArray ary = Utils.ensureArray(rawData); + byte[] bytes = new byte[ary.getLength()]; + for (int i = 0, t = ary.getLength(); i < t; i++) { + IRubyObject element = ary.eltInternal(i); + if (element instanceof RubyFixnum) { + bytes[i] = (byte)RubyNumeric.fix2long(element); + } else { + throw runtime.newTypeError(element, runtime.getFixnum()); + } + } + return runtime.newString(new ByteList(bytes, false)); + } + } + + public static class RbTrue { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.TRUE_HANDLER, args); + } + } + + public static class RbFalse { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.FALSE_HANDLER, args); + } + } + + public static class RbNil { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, + Generator.NIL_HANDLER, args); + } + } + + public static class RbObject { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject self, IRubyObject[] args) { + return RbString.to_json(context, self.asString(), args); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService 2.java new file mode 100644 index 0000000..e665ad1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService 2.java @@ -0,0 +1,42 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Generator module. + * @author mernen + */ +public class GeneratorService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyModule generatorModule = jsonExtModule.defineModuleUnder("Generator"); + + RubyClass stateClass = + generatorModule.defineClassUnder("State", runtime.getObject(), + GeneratorState.ALLOCATOR); + stateClass.defineAnnotatedMethods(GeneratorState.class); + info.generatorStateClass = new WeakReference(stateClass); + + RubyModule generatorMethods = + generatorModule.defineModuleUnder("GeneratorMethods"); + GeneratorMethods.populate(info, generatorMethods); + + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java new file mode 100644 index 0000000..e665ad1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java @@ -0,0 +1,42 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Generator module. + * @author mernen + */ +public class GeneratorService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyModule generatorModule = jsonExtModule.defineModuleUnder("Generator"); + + RubyClass stateClass = + generatorModule.defineClassUnder("State", runtime.getObject(), + GeneratorState.ALLOCATOR); + stateClass.defineAnnotatedMethods(GeneratorState.class); + info.generatorStateClass = new WeakReference(stateClass); + + RubyModule generatorMethods = + generatorModule.defineModuleUnder("GeneratorMethods"); + GeneratorMethods.populate(info, generatorMethods); + + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState 2.java new file mode 100644 index 0000000..44a9311 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState 2.java @@ -0,0 +1,490 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * The JSON::Ext::Generator::State class. + * + *

This class is used to create State instances, that are use to hold data + * while generating a JSON text from a a Ruby data structure. + * + * @author mernen + */ +public class GeneratorState extends RubyObject { + /** + * The indenting unit string. Will be repeated several times for larger + * indenting levels. + */ + private ByteList indent = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added after a semicolon on a JSON object. + * @see #spaceBefore + */ + private ByteList space = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added before a semicolon on a JSON object. + * @see #space + */ + private ByteList spaceBefore = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON object. + * It is assumed to be a newline, if set. + */ + private ByteList objectNl = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON Array. + * It is assumed to be a newline, if set. + */ + private ByteList arrayNl = ByteList.EMPTY_BYTELIST; + + /** + * The maximum level of nesting of structures allowed. + * 0 means disabled. + */ + private int maxNesting = DEFAULT_MAX_NESTING; + static final int DEFAULT_MAX_NESTING = 100; + /** + * Whether special float values (NaN, Infinity, + * -Infinity) are accepted. + * If set to false, an exception will be thrown upon + * encountering one. + */ + private boolean allowNaN = DEFAULT_ALLOW_NAN; + static final boolean DEFAULT_ALLOW_NAN = false; + /** + * If set to true all JSON documents generated do not contain + * any other characters than ASCII characters. + */ + private boolean asciiOnly = DEFAULT_ASCII_ONLY; + static final boolean DEFAULT_ASCII_ONLY = false; + /** + * If set to true all JSON values generated might not be + * RFC-conform JSON documents. + */ + private boolean quirksMode = DEFAULT_QUIRKS_MODE; + static final boolean DEFAULT_QUIRKS_MODE = false; + /** + * The initial buffer length of this state. (This isn't really used on all + * non-C implementations.) + */ + private int bufferInitialLength = DEFAULT_BUFFER_INITIAL_LENGTH; + static final int DEFAULT_BUFFER_INITIAL_LENGTH = 1024; + + /** + * The current depth (inside a #to_json call) + */ + private int depth = 0; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new GeneratorState(runtime, klazz); + } + }; + + public GeneratorState(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + /** + * State.from_state(opts) + * + *

Creates a State object from opts, which ought to be + * {@link RubyHash Hash} to create a new State instance + * configured by opts, something else to create an + * unconfigured instance. If opts is a State + * object, it is just returned. + * @param clazzParam The receiver of the method call + * ({@link RubyClass} State) + * @param opts The object to use as a base for the new State + * @param block The block passed to the method + * @return A GeneratorState as determined above + */ + @JRubyMethod(meta=true) + public static IRubyObject from_state(ThreadContext context, + IRubyObject klass, IRubyObject opts) { + return fromState(context, opts); + } + + static GeneratorState fromState(ThreadContext context, IRubyObject opts) { + return fromState(context, RuntimeInfo.forRuntime(context.getRuntime()), opts); + } + + static GeneratorState fromState(ThreadContext context, RuntimeInfo info, + IRubyObject opts) { + RubyClass klass = info.generatorStateClass.get(); + if (opts != null) { + // if the given parameter is a Generator::State, return itself + if (klass.isInstance(opts)) return (GeneratorState)opts; + + // if the given parameter is a Hash, pass it to the instantiator + if (context.getRuntime().getHash().isInstance(opts)) { + return (GeneratorState)klass.newInstance(context, + new IRubyObject[] {opts}, Block.NULL_BLOCK); + } + } + + // for other values, return the safe prototype + return (GeneratorState)info.getSafeStatePrototype(context).dup(); + } + + /** + * State#initialize(opts = {}) + * + * Instantiates a new State object, configured by opts. + * + * opts can have the following keys: + * + *

+ *
:indent + *
a {@link RubyString String} used to indent levels (default: "") + *
:space + *
a String that is put after a ':' or ',' + * delimiter (default: "") + *
:space_before + *
a String that is put before a ":" pair delimiter + * (default: "") + *
:object_nl + *
a String that is put at the end of a JSON object (default: "") + *
:array_nl + *
a String that is put at the end of a JSON array (default: "") + *
:allow_nan + *
true if NaN, Infinity, and + * -Infinity should be generated, otherwise an exception is + * thrown if these values are encountered. + * This options defaults to false. + */ + @JRubyMethod(optional=1, visibility=Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + configure(context, args.length > 0 ? args[0] : null); + return this; + } + + @JRubyMethod + public IRubyObject initialize_copy(ThreadContext context, IRubyObject vOrig) { + Ruby runtime = context.getRuntime(); + if (!(vOrig instanceof GeneratorState)) { + throw runtime.newTypeError(vOrig, getType()); + } + GeneratorState orig = (GeneratorState)vOrig; + this.indent = orig.indent; + this.space = orig.space; + this.spaceBefore = orig.spaceBefore; + this.objectNl = orig.objectNl; + this.arrayNl = orig.arrayNl; + this.maxNesting = orig.maxNesting; + this.allowNaN = orig.allowNaN; + this.asciiOnly = orig.asciiOnly; + this.quirksMode = orig.quirksMode; + this.bufferInitialLength = orig.bufferInitialLength; + this.depth = orig.depth; + return this; + } + + /** + * Generates a valid JSON document from object obj and returns + * the result. If no valid JSON document can be created this method raises + * a GeneratorError exception. + */ + @JRubyMethod + public IRubyObject generate(ThreadContext context, IRubyObject obj) { + RubyString result = Generator.generateJson(context, obj, this); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + result.force_encoding(context, info.utf8.get()); + return result; + } + + private static boolean matchClosingBrace(ByteList bl, int pos, int len, + int brace) { + for (int endPos = len - 1; endPos > pos; endPos--) { + int b = bl.get(endPos); + if (Character.isWhitespace(b)) continue; + return b == brace; + } + return false; + } + + @JRubyMethod(name="[]", required=1) + public IRubyObject op_aref(ThreadContext context, IRubyObject vName) { + String name = vName.asJavaString(); + if (getMetaClass().isMethodBound(name, true)) { + return send(context, vName, Block.NULL_BLOCK); + } else { + IRubyObject value = getInstanceVariables().getInstanceVariable("@" + name); + return value == null ? context.nil : value; + } + } + + @JRubyMethod(name="[]=", required=2) + public IRubyObject op_aset(ThreadContext context, IRubyObject vName, IRubyObject value) { + String name = vName.asJavaString(); + String nameWriter = name + "="; + if (getMetaClass().isMethodBound(nameWriter, true)) { + return send(context, context.getRuntime().newString(nameWriter), value, Block.NULL_BLOCK); + } else { + getInstanceVariables().setInstanceVariable("@" + name, value); + } + return context.getRuntime().getNil(); + } + + public ByteList getIndent() { + return indent; + } + + @JRubyMethod(name="indent") + public RubyString indent_get(ThreadContext context) { + return context.getRuntime().newString(indent); + } + + @JRubyMethod(name="indent=") + public IRubyObject indent_set(ThreadContext context, IRubyObject indent) { + this.indent = prepareByteList(context, indent); + return indent; + } + + public ByteList getSpace() { + return space; + } + + @JRubyMethod(name="space") + public RubyString space_get(ThreadContext context) { + return context.getRuntime().newString(space); + } + + @JRubyMethod(name="space=") + public IRubyObject space_set(ThreadContext context, IRubyObject space) { + this.space = prepareByteList(context, space); + return space; + } + + public ByteList getSpaceBefore() { + return spaceBefore; + } + + @JRubyMethod(name="space_before") + public RubyString space_before_get(ThreadContext context) { + return context.getRuntime().newString(spaceBefore); + } + + @JRubyMethod(name="space_before=") + public IRubyObject space_before_set(ThreadContext context, + IRubyObject spaceBefore) { + this.spaceBefore = prepareByteList(context, spaceBefore); + return spaceBefore; + } + + public ByteList getObjectNl() { + return objectNl; + } + + @JRubyMethod(name="object_nl") + public RubyString object_nl_get(ThreadContext context) { + return context.getRuntime().newString(objectNl); + } + + @JRubyMethod(name="object_nl=") + public IRubyObject object_nl_set(ThreadContext context, + IRubyObject objectNl) { + this.objectNl = prepareByteList(context, objectNl); + return objectNl; + } + + public ByteList getArrayNl() { + return arrayNl; + } + + @JRubyMethod(name="array_nl") + public RubyString array_nl_get(ThreadContext context) { + return context.getRuntime().newString(arrayNl); + } + + @JRubyMethod(name="array_nl=") + public IRubyObject array_nl_set(ThreadContext context, + IRubyObject arrayNl) { + this.arrayNl = prepareByteList(context, arrayNl); + return arrayNl; + } + + @JRubyMethod(name="check_circular?") + public RubyBoolean check_circular_p(ThreadContext context) { + return context.getRuntime().newBoolean(maxNesting != 0); + } + + /** + * Returns the maximum level of nesting configured for this state. + */ + public int getMaxNesting() { + return maxNesting; + } + + @JRubyMethod(name="max_nesting") + public RubyInteger max_nesting_get(ThreadContext context) { + return context.getRuntime().newFixnum(maxNesting); + } + + @JRubyMethod(name="max_nesting=") + public IRubyObject max_nesting_set(IRubyObject max_nesting) { + maxNesting = RubyNumeric.fix2int(max_nesting); + return max_nesting; + } + + public boolean allowNaN() { + return allowNaN; + } + + @JRubyMethod(name="allow_nan?") + public RubyBoolean allow_nan_p(ThreadContext context) { + return context.getRuntime().newBoolean(allowNaN); + } + + public boolean asciiOnly() { + return asciiOnly; + } + + @JRubyMethod(name="ascii_only?") + public RubyBoolean ascii_only_p(ThreadContext context) { + return context.getRuntime().newBoolean(asciiOnly); + } + + @JRubyMethod(name="buffer_initial_length") + public RubyInteger buffer_initial_length_get(ThreadContext context) { + return context.getRuntime().newFixnum(bufferInitialLength); + } + + @JRubyMethod(name="buffer_initial_length=") + public IRubyObject buffer_initial_length_set(IRubyObject buffer_initial_length) { + int newLength = RubyNumeric.fix2int(buffer_initial_length); + if (newLength > 0) bufferInitialLength = newLength; + return buffer_initial_length; + } + + public int getDepth() { + return depth; + } + + @JRubyMethod(name="depth") + public RubyInteger depth_get(ThreadContext context) { + return context.getRuntime().newFixnum(depth); + } + + @JRubyMethod(name="depth=") + public IRubyObject depth_set(IRubyObject vDepth) { + depth = RubyNumeric.fix2int(vDepth); + return vDepth; + } + + private ByteList prepareByteList(ThreadContext context, IRubyObject value) { + RubyString str = value.convertToString(); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str.getByteList().dup(); + } + + /** + * State#configure(opts) + * + *

Configures this State instance with the {@link RubyHash Hash} + * opts, and returns itself. + * @param vOpts The options hash + * @return The receiver + */ + @JRubyMethod(alias = "merge") + public IRubyObject configure(ThreadContext context, IRubyObject vOpts) { + OptionsReader opts = new OptionsReader(context, vOpts); + + ByteList indent = opts.getString("indent"); + if (indent != null) this.indent = indent; + + ByteList space = opts.getString("space"); + if (space != null) this.space = space; + + ByteList spaceBefore = opts.getString("space_before"); + if (spaceBefore != null) this.spaceBefore = spaceBefore; + + ByteList arrayNl = opts.getString("array_nl"); + if (arrayNl != null) this.arrayNl = arrayNl; + + ByteList objectNl = opts.getString("object_nl"); + if (objectNl != null) this.objectNl = objectNl; + + maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + allowNaN = opts.getBool("allow_nan", DEFAULT_ALLOW_NAN); + asciiOnly = opts.getBool("ascii_only", DEFAULT_ASCII_ONLY); + bufferInitialLength = opts.getInt("buffer_initial_length", DEFAULT_BUFFER_INITIAL_LENGTH); + + depth = opts.getInt("depth", 0); + + return this; + } + + /** + * State#to_h() + * + *

Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + * @return the hash + */ + @JRubyMethod(alias = "to_hash") + public RubyHash to_h(ThreadContext context) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + result.op_aset(context, runtime.newSymbol("indent"), indent_get(context)); + result.op_aset(context, runtime.newSymbol("space"), space_get(context)); + result.op_aset(context, runtime.newSymbol("space_before"), space_before_get(context)); + result.op_aset(context, runtime.newSymbol("object_nl"), object_nl_get(context)); + result.op_aset(context, runtime.newSymbol("array_nl"), array_nl_get(context)); + result.op_aset(context, runtime.newSymbol("allow_nan"), allow_nan_p(context)); + result.op_aset(context, runtime.newSymbol("ascii_only"), ascii_only_p(context)); + result.op_aset(context, runtime.newSymbol("max_nesting"), max_nesting_get(context)); + result.op_aset(context, runtime.newSymbol("depth"), depth_get(context)); + result.op_aset(context, runtime.newSymbol("buffer_initial_length"), buffer_initial_length_get(context)); + for (String name: getInstanceVariableNameList()) { + result.op_aset(context, runtime.newSymbol(name.substring(1)), getInstanceVariables().getInstanceVariable(name)); + } + return result; + } + + public int increaseDepth() { + depth++; + checkMaxNesting(); + return depth; + } + + public int decreaseDepth() { + return --depth; + } + + /** + * Checks if the current depth is allowed as per this state's options. + * @param context + * @param depth The corrent depth + */ + private void checkMaxNesting() { + if (maxNesting != 0 && depth > maxNesting) { + depth--; + throw Utils.newException(getRuntime().getCurrentContext(), + Utils.M_NESTING_ERROR, "nesting of " + depth + " is too deep"); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java new file mode 100644 index 0000000..44a9311 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java @@ -0,0 +1,490 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * The JSON::Ext::Generator::State class. + * + *

This class is used to create State instances, that are use to hold data + * while generating a JSON text from a a Ruby data structure. + * + * @author mernen + */ +public class GeneratorState extends RubyObject { + /** + * The indenting unit string. Will be repeated several times for larger + * indenting levels. + */ + private ByteList indent = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added after a semicolon on a JSON object. + * @see #spaceBefore + */ + private ByteList space = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added before a semicolon on a JSON object. + * @see #space + */ + private ByteList spaceBefore = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON object. + * It is assumed to be a newline, if set. + */ + private ByteList objectNl = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON Array. + * It is assumed to be a newline, if set. + */ + private ByteList arrayNl = ByteList.EMPTY_BYTELIST; + + /** + * The maximum level of nesting of structures allowed. + * 0 means disabled. + */ + private int maxNesting = DEFAULT_MAX_NESTING; + static final int DEFAULT_MAX_NESTING = 100; + /** + * Whether special float values (NaN, Infinity, + * -Infinity) are accepted. + * If set to false, an exception will be thrown upon + * encountering one. + */ + private boolean allowNaN = DEFAULT_ALLOW_NAN; + static final boolean DEFAULT_ALLOW_NAN = false; + /** + * If set to true all JSON documents generated do not contain + * any other characters than ASCII characters. + */ + private boolean asciiOnly = DEFAULT_ASCII_ONLY; + static final boolean DEFAULT_ASCII_ONLY = false; + /** + * If set to true all JSON values generated might not be + * RFC-conform JSON documents. + */ + private boolean quirksMode = DEFAULT_QUIRKS_MODE; + static final boolean DEFAULT_QUIRKS_MODE = false; + /** + * The initial buffer length of this state. (This isn't really used on all + * non-C implementations.) + */ + private int bufferInitialLength = DEFAULT_BUFFER_INITIAL_LENGTH; + static final int DEFAULT_BUFFER_INITIAL_LENGTH = 1024; + + /** + * The current depth (inside a #to_json call) + */ + private int depth = 0; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new GeneratorState(runtime, klazz); + } + }; + + public GeneratorState(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + /** + * State.from_state(opts) + * + *

Creates a State object from opts, which ought to be + * {@link RubyHash Hash} to create a new State instance + * configured by opts, something else to create an + * unconfigured instance. If opts is a State + * object, it is just returned. + * @param clazzParam The receiver of the method call + * ({@link RubyClass} State) + * @param opts The object to use as a base for the new State + * @param block The block passed to the method + * @return A GeneratorState as determined above + */ + @JRubyMethod(meta=true) + public static IRubyObject from_state(ThreadContext context, + IRubyObject klass, IRubyObject opts) { + return fromState(context, opts); + } + + static GeneratorState fromState(ThreadContext context, IRubyObject opts) { + return fromState(context, RuntimeInfo.forRuntime(context.getRuntime()), opts); + } + + static GeneratorState fromState(ThreadContext context, RuntimeInfo info, + IRubyObject opts) { + RubyClass klass = info.generatorStateClass.get(); + if (opts != null) { + // if the given parameter is a Generator::State, return itself + if (klass.isInstance(opts)) return (GeneratorState)opts; + + // if the given parameter is a Hash, pass it to the instantiator + if (context.getRuntime().getHash().isInstance(opts)) { + return (GeneratorState)klass.newInstance(context, + new IRubyObject[] {opts}, Block.NULL_BLOCK); + } + } + + // for other values, return the safe prototype + return (GeneratorState)info.getSafeStatePrototype(context).dup(); + } + + /** + * State#initialize(opts = {}) + * + * Instantiates a new State object, configured by opts. + * + * opts can have the following keys: + * + *

+ *
:indent + *
a {@link RubyString String} used to indent levels (default: "") + *
:space + *
a String that is put after a ':' or ',' + * delimiter (default: "") + *
:space_before + *
a String that is put before a ":" pair delimiter + * (default: "") + *
:object_nl + *
a String that is put at the end of a JSON object (default: "") + *
:array_nl + *
a String that is put at the end of a JSON array (default: "") + *
:allow_nan + *
true if NaN, Infinity, and + * -Infinity should be generated, otherwise an exception is + * thrown if these values are encountered. + * This options defaults to false. + */ + @JRubyMethod(optional=1, visibility=Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + configure(context, args.length > 0 ? args[0] : null); + return this; + } + + @JRubyMethod + public IRubyObject initialize_copy(ThreadContext context, IRubyObject vOrig) { + Ruby runtime = context.getRuntime(); + if (!(vOrig instanceof GeneratorState)) { + throw runtime.newTypeError(vOrig, getType()); + } + GeneratorState orig = (GeneratorState)vOrig; + this.indent = orig.indent; + this.space = orig.space; + this.spaceBefore = orig.spaceBefore; + this.objectNl = orig.objectNl; + this.arrayNl = orig.arrayNl; + this.maxNesting = orig.maxNesting; + this.allowNaN = orig.allowNaN; + this.asciiOnly = orig.asciiOnly; + this.quirksMode = orig.quirksMode; + this.bufferInitialLength = orig.bufferInitialLength; + this.depth = orig.depth; + return this; + } + + /** + * Generates a valid JSON document from object obj and returns + * the result. If no valid JSON document can be created this method raises + * a GeneratorError exception. + */ + @JRubyMethod + public IRubyObject generate(ThreadContext context, IRubyObject obj) { + RubyString result = Generator.generateJson(context, obj, this); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + result.force_encoding(context, info.utf8.get()); + return result; + } + + private static boolean matchClosingBrace(ByteList bl, int pos, int len, + int brace) { + for (int endPos = len - 1; endPos > pos; endPos--) { + int b = bl.get(endPos); + if (Character.isWhitespace(b)) continue; + return b == brace; + } + return false; + } + + @JRubyMethod(name="[]", required=1) + public IRubyObject op_aref(ThreadContext context, IRubyObject vName) { + String name = vName.asJavaString(); + if (getMetaClass().isMethodBound(name, true)) { + return send(context, vName, Block.NULL_BLOCK); + } else { + IRubyObject value = getInstanceVariables().getInstanceVariable("@" + name); + return value == null ? context.nil : value; + } + } + + @JRubyMethod(name="[]=", required=2) + public IRubyObject op_aset(ThreadContext context, IRubyObject vName, IRubyObject value) { + String name = vName.asJavaString(); + String nameWriter = name + "="; + if (getMetaClass().isMethodBound(nameWriter, true)) { + return send(context, context.getRuntime().newString(nameWriter), value, Block.NULL_BLOCK); + } else { + getInstanceVariables().setInstanceVariable("@" + name, value); + } + return context.getRuntime().getNil(); + } + + public ByteList getIndent() { + return indent; + } + + @JRubyMethod(name="indent") + public RubyString indent_get(ThreadContext context) { + return context.getRuntime().newString(indent); + } + + @JRubyMethod(name="indent=") + public IRubyObject indent_set(ThreadContext context, IRubyObject indent) { + this.indent = prepareByteList(context, indent); + return indent; + } + + public ByteList getSpace() { + return space; + } + + @JRubyMethod(name="space") + public RubyString space_get(ThreadContext context) { + return context.getRuntime().newString(space); + } + + @JRubyMethod(name="space=") + public IRubyObject space_set(ThreadContext context, IRubyObject space) { + this.space = prepareByteList(context, space); + return space; + } + + public ByteList getSpaceBefore() { + return spaceBefore; + } + + @JRubyMethod(name="space_before") + public RubyString space_before_get(ThreadContext context) { + return context.getRuntime().newString(spaceBefore); + } + + @JRubyMethod(name="space_before=") + public IRubyObject space_before_set(ThreadContext context, + IRubyObject spaceBefore) { + this.spaceBefore = prepareByteList(context, spaceBefore); + return spaceBefore; + } + + public ByteList getObjectNl() { + return objectNl; + } + + @JRubyMethod(name="object_nl") + public RubyString object_nl_get(ThreadContext context) { + return context.getRuntime().newString(objectNl); + } + + @JRubyMethod(name="object_nl=") + public IRubyObject object_nl_set(ThreadContext context, + IRubyObject objectNl) { + this.objectNl = prepareByteList(context, objectNl); + return objectNl; + } + + public ByteList getArrayNl() { + return arrayNl; + } + + @JRubyMethod(name="array_nl") + public RubyString array_nl_get(ThreadContext context) { + return context.getRuntime().newString(arrayNl); + } + + @JRubyMethod(name="array_nl=") + public IRubyObject array_nl_set(ThreadContext context, + IRubyObject arrayNl) { + this.arrayNl = prepareByteList(context, arrayNl); + return arrayNl; + } + + @JRubyMethod(name="check_circular?") + public RubyBoolean check_circular_p(ThreadContext context) { + return context.getRuntime().newBoolean(maxNesting != 0); + } + + /** + * Returns the maximum level of nesting configured for this state. + */ + public int getMaxNesting() { + return maxNesting; + } + + @JRubyMethod(name="max_nesting") + public RubyInteger max_nesting_get(ThreadContext context) { + return context.getRuntime().newFixnum(maxNesting); + } + + @JRubyMethod(name="max_nesting=") + public IRubyObject max_nesting_set(IRubyObject max_nesting) { + maxNesting = RubyNumeric.fix2int(max_nesting); + return max_nesting; + } + + public boolean allowNaN() { + return allowNaN; + } + + @JRubyMethod(name="allow_nan?") + public RubyBoolean allow_nan_p(ThreadContext context) { + return context.getRuntime().newBoolean(allowNaN); + } + + public boolean asciiOnly() { + return asciiOnly; + } + + @JRubyMethod(name="ascii_only?") + public RubyBoolean ascii_only_p(ThreadContext context) { + return context.getRuntime().newBoolean(asciiOnly); + } + + @JRubyMethod(name="buffer_initial_length") + public RubyInteger buffer_initial_length_get(ThreadContext context) { + return context.getRuntime().newFixnum(bufferInitialLength); + } + + @JRubyMethod(name="buffer_initial_length=") + public IRubyObject buffer_initial_length_set(IRubyObject buffer_initial_length) { + int newLength = RubyNumeric.fix2int(buffer_initial_length); + if (newLength > 0) bufferInitialLength = newLength; + return buffer_initial_length; + } + + public int getDepth() { + return depth; + } + + @JRubyMethod(name="depth") + public RubyInteger depth_get(ThreadContext context) { + return context.getRuntime().newFixnum(depth); + } + + @JRubyMethod(name="depth=") + public IRubyObject depth_set(IRubyObject vDepth) { + depth = RubyNumeric.fix2int(vDepth); + return vDepth; + } + + private ByteList prepareByteList(ThreadContext context, IRubyObject value) { + RubyString str = value.convertToString(); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str.getByteList().dup(); + } + + /** + * State#configure(opts) + * + *

Configures this State instance with the {@link RubyHash Hash} + * opts, and returns itself. + * @param vOpts The options hash + * @return The receiver + */ + @JRubyMethod(alias = "merge") + public IRubyObject configure(ThreadContext context, IRubyObject vOpts) { + OptionsReader opts = new OptionsReader(context, vOpts); + + ByteList indent = opts.getString("indent"); + if (indent != null) this.indent = indent; + + ByteList space = opts.getString("space"); + if (space != null) this.space = space; + + ByteList spaceBefore = opts.getString("space_before"); + if (spaceBefore != null) this.spaceBefore = spaceBefore; + + ByteList arrayNl = opts.getString("array_nl"); + if (arrayNl != null) this.arrayNl = arrayNl; + + ByteList objectNl = opts.getString("object_nl"); + if (objectNl != null) this.objectNl = objectNl; + + maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + allowNaN = opts.getBool("allow_nan", DEFAULT_ALLOW_NAN); + asciiOnly = opts.getBool("ascii_only", DEFAULT_ASCII_ONLY); + bufferInitialLength = opts.getInt("buffer_initial_length", DEFAULT_BUFFER_INITIAL_LENGTH); + + depth = opts.getInt("depth", 0); + + return this; + } + + /** + * State#to_h() + * + *

Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + * @return the hash + */ + @JRubyMethod(alias = "to_hash") + public RubyHash to_h(ThreadContext context) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + result.op_aset(context, runtime.newSymbol("indent"), indent_get(context)); + result.op_aset(context, runtime.newSymbol("space"), space_get(context)); + result.op_aset(context, runtime.newSymbol("space_before"), space_before_get(context)); + result.op_aset(context, runtime.newSymbol("object_nl"), object_nl_get(context)); + result.op_aset(context, runtime.newSymbol("array_nl"), array_nl_get(context)); + result.op_aset(context, runtime.newSymbol("allow_nan"), allow_nan_p(context)); + result.op_aset(context, runtime.newSymbol("ascii_only"), ascii_only_p(context)); + result.op_aset(context, runtime.newSymbol("max_nesting"), max_nesting_get(context)); + result.op_aset(context, runtime.newSymbol("depth"), depth_get(context)); + result.op_aset(context, runtime.newSymbol("buffer_initial_length"), buffer_initial_length_get(context)); + for (String name: getInstanceVariableNameList()) { + result.op_aset(context, runtime.newSymbol(name.substring(1)), getInstanceVariables().getInstanceVariable(name)); + } + return result; + } + + public int increaseDepth() { + depth++; + checkMaxNesting(); + return depth; + } + + public int decreaseDepth() { + return --depth; + } + + /** + * Checks if the current depth is allowed as per this state's options. + * @param context + * @param depth The corrent depth + */ + private void checkMaxNesting() { + if (maxNesting != 0 && depth > maxNesting) { + depth--; + throw Utils.newException(getRuntime().getCurrentContext(), + Utils.M_NESTING_ERROR, "nesting of " + depth + " is too deep"); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader 2.java new file mode 100644 index 0000000..70426d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader 2.java @@ -0,0 +1,113 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +final class OptionsReader { + private final ThreadContext context; + private final Ruby runtime; + private final RubyHash opts; + private RuntimeInfo info; + + OptionsReader(ThreadContext context, IRubyObject vOpts) { + this.context = context; + this.runtime = context.getRuntime(); + if (vOpts == null || vOpts.isNil()) { + opts = null; + } else if (vOpts.respondsTo("to_hash")) { + opts = vOpts.convertToHash(); + } else if (vOpts.respondsTo("to_h")) { + opts = vOpts.callMethod(context, "to_h").convertToHash(); + } else { + opts = vOpts.convertToHash(); /* Should just raise the correct TypeError */ + } + } + + private RuntimeInfo getRuntimeInfo() { + if (info != null) return info; + info = RuntimeInfo.forRuntime(runtime); + return info; + } + + /** + * Efficiently looks up items with a {@link RubySymbol Symbol} key + * @param key The Symbol name to look up for + * @return The item in the {@link RubyHash Hash}, or null + * if not found + */ + IRubyObject get(String key) { + return opts == null ? null : opts.fastARef(runtime.newSymbol(key)); + } + + boolean getBool(String key, boolean defaultValue) { + IRubyObject value = get(key); + return value == null ? defaultValue : value.isTrue(); + } + + int getInt(String key, int defaultValue) { + IRubyObject value = get(key); + if (value == null) return defaultValue; + if (!value.isTrue()) return 0; + return RubyNumeric.fix2int(value); + } + + /** + * Reads the setting from the options hash. If no entry is set for this + * key or if it evaluates to false, returns null; attempts to + * coerce the value to {@link RubyString String} otherwise. + * @param key The Symbol name to look up for + * @return null if the key is not in the Hash or if + * its value evaluates to false + * @throws RaiseException TypeError if the value does not + * evaluate to false and can't be + * converted to string + */ + ByteList getString(String key) { + RubyString str = getString(key, null); + return str == null ? null : str.getByteList().dup(); + } + + RubyString getString(String key, RubyString defaultValue) { + IRubyObject value = get(key); + if (value == null || !value.isTrue()) return defaultValue; + + RubyString str = value.convertToString(); + RuntimeInfo info = getRuntimeInfo(); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str; + } + + /** + * Reads the setting from the options hash. If it is nil or + * undefined, returns the default value given. + * If not, ensures it is a RubyClass instance and shares the same + * allocator as the default value (i.e. for the basic types which have + * their specific allocators, this ensures the passed value is + * a subclass of them). + */ + RubyClass getClass(String key, RubyClass defaultValue) { + IRubyObject value = get(key); + + if (value == null || value.isNil()) return defaultValue; + return (RubyClass)value; + } + + public RubyHash getHash(String key) { + IRubyObject value = get(key); + if (value == null || value.isNil()) return new RubyHash(runtime); + return (RubyHash) value; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java new file mode 100644 index 0000000..70426d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java @@ -0,0 +1,113 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +final class OptionsReader { + private final ThreadContext context; + private final Ruby runtime; + private final RubyHash opts; + private RuntimeInfo info; + + OptionsReader(ThreadContext context, IRubyObject vOpts) { + this.context = context; + this.runtime = context.getRuntime(); + if (vOpts == null || vOpts.isNil()) { + opts = null; + } else if (vOpts.respondsTo("to_hash")) { + opts = vOpts.convertToHash(); + } else if (vOpts.respondsTo("to_h")) { + opts = vOpts.callMethod(context, "to_h").convertToHash(); + } else { + opts = vOpts.convertToHash(); /* Should just raise the correct TypeError */ + } + } + + private RuntimeInfo getRuntimeInfo() { + if (info != null) return info; + info = RuntimeInfo.forRuntime(runtime); + return info; + } + + /** + * Efficiently looks up items with a {@link RubySymbol Symbol} key + * @param key The Symbol name to look up for + * @return The item in the {@link RubyHash Hash}, or null + * if not found + */ + IRubyObject get(String key) { + return opts == null ? null : opts.fastARef(runtime.newSymbol(key)); + } + + boolean getBool(String key, boolean defaultValue) { + IRubyObject value = get(key); + return value == null ? defaultValue : value.isTrue(); + } + + int getInt(String key, int defaultValue) { + IRubyObject value = get(key); + if (value == null) return defaultValue; + if (!value.isTrue()) return 0; + return RubyNumeric.fix2int(value); + } + + /** + * Reads the setting from the options hash. If no entry is set for this + * key or if it evaluates to false, returns null; attempts to + * coerce the value to {@link RubyString String} otherwise. + * @param key The Symbol name to look up for + * @return null if the key is not in the Hash or if + * its value evaluates to false + * @throws RaiseException TypeError if the value does not + * evaluate to false and can't be + * converted to string + */ + ByteList getString(String key) { + RubyString str = getString(key, null); + return str == null ? null : str.getByteList().dup(); + } + + RubyString getString(String key, RubyString defaultValue) { + IRubyObject value = get(key); + if (value == null || !value.isTrue()) return defaultValue; + + RubyString str = value.convertToString(); + RuntimeInfo info = getRuntimeInfo(); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str; + } + + /** + * Reads the setting from the options hash. If it is nil or + * undefined, returns the default value given. + * If not, ensures it is a RubyClass instance and shares the same + * allocator as the default value (i.e. for the basic types which have + * their specific allocators, this ensures the passed value is + * a subclass of them). + */ + RubyClass getClass(String key, RubyClass defaultValue) { + IRubyObject value = get(key); + + if (value == null || value.isNil()) return defaultValue; + return (RubyClass)value; + } + + public RubyHash getHash(String key) { + IRubyObject value = get(key); + if (value == null || value.isNil()) return new RubyHash(runtime); + return (RubyHash) value; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.java new file mode 100644 index 0000000..647afca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.java @@ -0,0 +1,2362 @@ + +// line 1 "Parser.rl" +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + +// line 339 "Parser.rl" + + + +// line 321 "Parser.java" +private static byte[] init__JSON_value_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2, 1, 3, 1, 4, 1, + 5, 1, 6, 1, 7, 1, 8, 1, 9 + }; +} + +private static final byte _JSON_value_actions[] = init__JSON_value_actions_0(); + + +private static byte[] init__JSON_value_key_offsets_0() +{ + return new byte [] { + 0, 0, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 + }; +} + +private static final byte _JSON_value_key_offsets[] = init__JSON_value_key_offsets_0(); + + +private static char[] init__JSON_value_trans_keys_0() +{ + return new char [] { + 34, 45, 73, 78, 91, 102, 110, 116, 123, 48, 57, 110, + 102, 105, 110, 105, 116, 121, 97, 78, 97, 108, 115, 101, + 117, 108, 108, 114, 117, 101, 0 + }; +} + +private static final char _JSON_value_trans_keys[] = init__JSON_value_trans_keys_0(); + + +private static byte[] init__JSON_value_single_lengths_0() +{ + return new byte [] { + 0, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 + }; +} + +private static final byte _JSON_value_single_lengths[] = init__JSON_value_single_lengths_0(); + + +private static byte[] init__JSON_value_range_lengths_0() +{ + return new byte [] { + 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_value_range_lengths[] = init__JSON_value_range_lengths_0(); + + +private static byte[] init__JSON_value_index_offsets_0() +{ + return new byte [] { + 0, 0, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, + 31, 33, 35, 37, 39, 41, 43, 45, 47, 49 + }; +} + +private static final byte _JSON_value_index_offsets[] = init__JSON_value_index_offsets_0(); + + +private static byte[] init__JSON_value_trans_targs_0() +{ + return new byte [] { + 21, 21, 2, 9, 21, 11, 15, 18, 21, 21, 0, 3, + 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 21, + 0, 10, 0, 21, 0, 12, 0, 13, 0, 14, 0, 21, + 0, 16, 0, 17, 0, 21, 0, 19, 0, 20, 0, 21, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_targs[] = init__JSON_value_trans_targs_0(); + + +private static byte[] init__JSON_value_trans_actions_0() +{ + return new byte [] { + 13, 11, 0, 0, 15, 0, 0, 0, 17, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, + 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 5, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_actions[] = init__JSON_value_trans_actions_0(); + + +private static byte[] init__JSON_value_from_state_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 19 + }; +} + +private static final byte _JSON_value_from_state_actions[] = init__JSON_value_from_state_actions_0(); + + +static final int JSON_value_start = 1; +static final int JSON_value_first_final = 21; +static final int JSON_value_error = 0; + +static final int JSON_value_en_main = 1; + + +// line 445 "Parser.rl" + + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 443 "Parser.java" + { + cs = JSON_value_start; + } + +// line 452 "Parser.rl" + +// line 450 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _acts = _JSON_value_from_state_actions[cs]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) { + switch ( _JSON_value_actions[_acts++] ) { + case 9: +// line 430 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 482 "Parser.java" + } + } + + _match: do { + _keys = _JSON_value_key_offsets[cs]; + _trans = _JSON_value_index_offsets[cs]; + _klen = _JSON_value_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_value_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_value_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_value_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + cs = _JSON_value_trans_targs[_trans]; + + if ( _JSON_value_trans_actions[_trans] != 0 ) { + _acts = _JSON_value_trans_actions[_trans]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_value_actions[_acts++] ) + { + case 0: +// line 347 "Parser.rl" + { + result = getRuntime().getNil(); + } + break; + case 1: +// line 350 "Parser.rl" + { + result = getRuntime().getFalse(); + } + break; + case 2: +// line 353 "Parser.rl" + { + result = getRuntime().getTrue(); + } + break; + case 3: +// line 356 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + break; + case 4: +// line 363 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + break; + case 5: +// line 370 "Parser.rl" + { + if (pe > p + 8 && + absSubSequence(p, p + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + {p = (( p + 10))-1;} + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + parseInteger(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; + case 6: +// line 396 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 7: +// line 406 "Parser.rl" + { + currentNesting++; + parseArray(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 8: +// line 418 "Parser.rl" + { + currentNesting++; + parseObject(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 654 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 453 "Parser.rl" + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + +// line 684 "Parser.java" +private static byte[] init__JSON_integer_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_integer_actions[] = init__JSON_integer_actions_0(); + + +private static byte[] init__JSON_integer_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 9 + }; +} + +private static final byte _JSON_integer_key_offsets[] = init__JSON_integer_key_offsets_0(); + + +private static char[] init__JSON_integer_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 48, 57, 48, 57, 0 + }; +} + +private static final char _JSON_integer_trans_keys[] = init__JSON_integer_trans_keys_0(); + + +private static byte[] init__JSON_integer_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 0, 0, 0 + }; +} + +private static final byte _JSON_integer_single_lengths[] = init__JSON_integer_single_lengths_0(); + + +private static byte[] init__JSON_integer_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 1, 0, 1 + }; +} + +private static final byte _JSON_integer_range_lengths[] = init__JSON_integer_range_lengths_0(); + + +private static byte[] init__JSON_integer_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 10 + }; +} + +private static final byte _JSON_integer_index_offsets[] = init__JSON_integer_index_offsets_0(); + + +private static byte[] init__JSON_integer_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 1, 4, 1, 3, 4, + 0 + }; +} + +private static final byte _JSON_integer_indicies[] = init__JSON_integer_indicies_0(); + + +private static byte[] init__JSON_integer_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 5, 4 + }; +} + +private static final byte _JSON_integer_trans_targs[] = init__JSON_integer_trans_targs_0(); + + +private static byte[] init__JSON_integer_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_integer_trans_actions[] = init__JSON_integer_trans_actions_0(); + + +static final int JSON_integer_start = 1; +static final int JSON_integer_first_final = 3; +static final int JSON_integer_error = 0; + +static final int JSON_integer_en_main = 1; + + +// line 472 "Parser.rl" + + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + +// line 801 "Parser.java" + { + cs = JSON_integer_start; + } + +// line 489 "Parser.rl" + int memo = p; + +// line 809 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_integer_key_offsets[cs]; + _trans = _JSON_integer_index_offsets[cs]; + _klen = _JSON_integer_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_integer_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_integer_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_integer_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_integer_indicies[_trans]; + cs = _JSON_integer_trans_targs[_trans]; + + if ( _JSON_integer_trans_actions[_trans] != 0 ) { + _acts = _JSON_integer_trans_actions[_trans]; + _nacts = (int) _JSON_integer_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_integer_actions[_acts++] ) + { + case 0: +// line 466 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 896 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 491 "Parser.rl" + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + +// line 938 "Parser.java" +private static byte[] init__JSON_float_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_float_actions[] = init__JSON_float_actions_0(); + + +private static byte[] init__JSON_float_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 10, 12, 16, 18, 23, 29, 29 + }; +} + +private static final byte _JSON_float_key_offsets[] = init__JSON_float_key_offsets_0(); + + +private static char[] init__JSON_float_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 46, 69, 101, 48, 57, + 43, 45, 48, 57, 48, 57, 46, 69, 101, 48, 57, 69, + 101, 45, 46, 48, 57, 69, 101, 45, 46, 48, 57, 0 + }; +} + +private static final char _JSON_float_trans_keys[] = init__JSON_float_trans_keys_0(); + + +private static byte[] init__JSON_float_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 3, 0, 2, 0, 3, 2, 0, 2 + }; +} + +private static final byte _JSON_float_single_lengths[] = init__JSON_float_single_lengths_0(); + + +private static byte[] init__JSON_float_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 0, 1, 1, 1, 1, 2, 0, 2 + }; +} + +private static final byte _JSON_float_range_lengths[] = init__JSON_float_range_lengths_0(); + + +private static byte[] init__JSON_float_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 11, 13, 17, 19, 24, 29, 30 + }; +} + +private static final byte _JSON_float_index_offsets[] = init__JSON_float_index_offsets_0(); + + +private static byte[] init__JSON_float_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 4, 5, 5, 1, 6, + 1, 7, 7, 8, 1, 8, 1, 4, 5, 5, 3, 1, + 5, 5, 1, 6, 9, 1, 1, 1, 1, 8, 9, 0 + }; +} + +private static final byte _JSON_float_indicies[] = init__JSON_float_indicies_0(); + + +private static byte[] init__JSON_float_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 7, 4, 5, 8, 6, 10, 9 + }; +} + +private static final byte _JSON_float_trans_targs[] = init__JSON_float_trans_targs_0(); + + +private static byte[] init__JSON_float_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_float_trans_actions[] = init__JSON_float_trans_actions_0(); + + +static final int JSON_float_start = 1; +static final int JSON_float_first_final = 8; +static final int JSON_float_error = 0; + +static final int JSON_float_en_main = 1; + + +// line 526 "Parser.rl" + + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + +// line 1060 "Parser.java" + { + cs = JSON_float_start; + } + +// line 545 "Parser.rl" + int memo = p; + +// line 1068 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_float_key_offsets[cs]; + _trans = _JSON_float_index_offsets[cs]; + _klen = _JSON_float_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_float_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_float_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_float_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_float_indicies[_trans]; + cs = _JSON_float_trans_targs[_trans]; + + if ( _JSON_float_trans_actions[_trans] != 0 ) { + _acts = _JSON_float_trans_actions[_trans]; + _nacts = (int) _JSON_float_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_float_actions[_acts++] ) + { + case 0: +// line 517 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1155 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 547 "Parser.rl" + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + +// line 1198 "Parser.java" +private static byte[] init__JSON_string_actions_0() +{ + return new byte [] { + 0, 2, 0, 1 + }; +} + +private static final byte _JSON_string_actions[] = init__JSON_string_actions_0(); + + +private static byte[] init__JSON_string_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 5, 8, 14, 20, 26, 32 + }; +} + +private static final byte _JSON_string_key_offsets[] = init__JSON_string_key_offsets_0(); + + +private static char[] init__JSON_string_trans_keys_0() +{ + return new char [] { + 34, 34, 92, 0, 31, 117, 0, 31, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 0 + }; +} + +private static final char _JSON_string_trans_keys[] = init__JSON_string_trans_keys_0(); + + +private static byte[] init__JSON_string_single_lengths_0() +{ + return new byte [] { + 0, 1, 2, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_single_lengths[] = init__JSON_string_single_lengths_0(); + + +private static byte[] init__JSON_string_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 3, 3, 3, 3, 0 + }; +} + +private static final byte _JSON_string_range_lengths[] = init__JSON_string_range_lengths_0(); + + +private static byte[] init__JSON_string_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 6, 9, 13, 17, 21, 25 + }; +} + +private static final byte _JSON_string_index_offsets[] = init__JSON_string_index_offsets_0(); + + +private static byte[] init__JSON_string_indicies_0() +{ + return new byte [] { + 0, 1, 2, 3, 1, 0, 4, 1, 0, 5, 5, 5, + 1, 6, 6, 6, 1, 7, 7, 7, 1, 0, 0, 0, + 1, 1, 0 + }; +} + +private static final byte _JSON_string_indicies[] = init__JSON_string_indicies_0(); + + +private static byte[] init__JSON_string_trans_targs_0() +{ + return new byte [] { + 2, 0, 8, 3, 4, 5, 6, 7 + }; +} + +private static final byte _JSON_string_trans_targs[] = init__JSON_string_trans_targs_0(); + + +private static byte[] init__JSON_string_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_trans_actions[] = init__JSON_string_trans_actions_0(); + + +static final int JSON_string_start = 1; +static final int JSON_string_first_final = 8; +static final int JSON_string_error = 0; + +static final int JSON_string_en_main = 1; + + +// line 599 "Parser.rl" + + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 1308 "Parser.java" + { + cs = JSON_string_start; + } + +// line 606 "Parser.rl" + int memo = p; + +// line 1316 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_string_key_offsets[cs]; + _trans = _JSON_string_index_offsets[cs]; + _klen = _JSON_string_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_string_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_string_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_string_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_string_indicies[_trans]; + cs = _JSON_string_trans_targs[_trans]; + + if ( _JSON_string_trans_actions[_trans] != 0 ) { + _acts = _JSON_string_trans_actions[_trans]; + _nacts = (int) _JSON_string_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_string_actions[_acts++] ) + { + case 0: +// line 574 "Parser.rl" + { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + {p = (( p + 1))-1;} + } + } + break; + case 1: +// line 587 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1418 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 608 "Parser.rl" + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + +// line 1476 "Parser.java" +private static byte[] init__JSON_array_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1 + }; +} + +private static final byte _JSON_array_actions[] = init__JSON_array_actions_0(); + + +private static byte[] init__JSON_array_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 18, 25, 41, 43, 44, 46, 47, 49, 50, + 52, 53, 55, 56, 58, 59 + }; +} + +private static final byte _JSON_array_key_offsets[] = init__JSON_array_key_offsets_0(); + + +private static char[] init__JSON_array_trans_keys_0() +{ + return new char [] { + 91, 13, 32, 34, 45, 47, 73, 78, 91, 93, 102, 110, + 116, 123, 9, 10, 48, 57, 13, 32, 44, 47, 93, 9, + 10, 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, + 123, 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, + 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, 10, 0 + }; +} + +private static final char _JSON_array_trans_keys[] = init__JSON_array_trans_keys_0(); + + +private static byte[] init__JSON_array_single_lengths_0() +{ + return new byte [] { + 0, 1, 13, 5, 12, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 0 + }; +} + +private static final byte _JSON_array_single_lengths[] = init__JSON_array_single_lengths_0(); + + +private static byte[] init__JSON_array_range_lengths_0() +{ + return new byte [] { + 0, 0, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_range_lengths[] = init__JSON_array_range_lengths_0(); + + +private static byte[] init__JSON_array_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 18, 25, 40, 43, 45, 48, 50, 53, 55, + 58, 60, 63, 65, 68, 70 + }; +} + +private static final byte _JSON_array_index_offsets[] = init__JSON_array_index_offsets_0(); + + +private static byte[] init__JSON_array_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 2, 3, 2, 2, 2, 4, 2, + 2, 2, 2, 0, 2, 1, 5, 5, 6, 7, 4, 5, + 1, 6, 6, 2, 2, 8, 2, 2, 2, 2, 2, 2, + 2, 6, 2, 1, 9, 10, 1, 11, 9, 11, 6, 9, + 6, 10, 12, 13, 1, 14, 12, 14, 5, 12, 5, 13, + 15, 16, 1, 17, 15, 17, 0, 15, 0, 16, 1, 0 + }; +} + +private static final byte _JSON_array_indicies[] = init__JSON_array_indicies_0(); + + +private static byte[] init__JSON_array_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 13, 17, 3, 4, 9, 5, 6, 8, 7, + 10, 12, 11, 14, 16, 15 + }; +} + +private static final byte _JSON_array_trans_targs[] = init__JSON_array_trans_targs_0(); + + +private static byte[] init__JSON_array_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_trans_actions[] = init__JSON_array_trans_actions_0(); + + +static final int JSON_array_start = 1; +static final int JSON_array_first_final = 17; +static final int JSON_array_error = 0; + +static final int JSON_array_en_main = 1; + + +// line 681 "Parser.rl" + + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1609 "Parser.java" + { + cs = JSON_array_start; + } + +// line 700 "Parser.rl" + +// line 1616 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_array_key_offsets[cs]; + _trans = _JSON_array_index_offsets[cs]; + _klen = _JSON_array_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_array_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_array_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_array_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_array_indicies[_trans]; + cs = _JSON_array_trans_targs[_trans]; + + if ( _JSON_array_trans_actions[_trans] != 0 ) { + _acts = _JSON_array_trans_actions[_trans]; + _nacts = (int) _JSON_array_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_array_actions[_acts++] ) + { + case 0: +// line 650 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 665 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1720 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 701 "Parser.rl" + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + +// line 1750 "Parser.java" +private static byte[] init__JSON_object_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2 + }; +} + +private static final byte _JSON_object_actions[] = init__JSON_object_actions_0(); + + +private static byte[] init__JSON_object_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 8, 14, 16, 17, 19, 20, 36, 43, 49, + 51, 52, 54, 55, 57, 58, 60, 61, 63, 64, 66, 67, + 69, 70, 72, 73 + }; +} + +private static final byte _JSON_object_key_offsets[] = init__JSON_object_key_offsets_0(); + + +private static char[] init__JSON_object_trans_keys_0() +{ + return new char [] { + 123, 13, 32, 34, 47, 125, 9, 10, 13, 32, 47, 58, + 9, 10, 42, 47, 42, 42, 47, 10, 13, 32, 34, 45, + 47, 73, 78, 91, 102, 110, 116, 123, 9, 10, 48, 57, + 13, 32, 44, 47, 125, 9, 10, 13, 32, 34, 47, 9, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 0 + }; +} + +private static final char _JSON_object_trans_keys[] = init__JSON_object_trans_keys_0(); + + +private static byte[] init__JSON_object_single_lengths_0() +{ + return new byte [] { + 0, 1, 5, 4, 2, 1, 2, 1, 12, 5, 4, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 0 + }; +} + +private static final byte _JSON_object_single_lengths[] = init__JSON_object_single_lengths_0(); + + +private static byte[] init__JSON_object_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 0, 0, 0, 0, 2, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_range_lengths[] = init__JSON_object_range_lengths_0(); + + +private static byte[] init__JSON_object_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 9, 15, 18, 20, 23, 25, 40, 47, 53, + 56, 58, 61, 63, 66, 68, 71, 73, 76, 78, 81, 83, + 86, 88, 91, 93 + }; +} + +private static final byte _JSON_object_index_offsets[] = init__JSON_object_index_offsets_0(); + + +private static byte[] init__JSON_object_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 3, 4, 0, 1, 5, 5, 6, + 7, 5, 1, 8, 9, 1, 10, 8, 10, 5, 8, 5, + 9, 7, 7, 11, 11, 12, 11, 11, 11, 11, 11, 11, + 11, 7, 11, 1, 13, 13, 14, 15, 4, 13, 1, 14, + 14, 2, 16, 14, 1, 17, 18, 1, 19, 17, 19, 14, + 17, 14, 18, 20, 21, 1, 22, 20, 22, 13, 20, 13, + 21, 23, 24, 1, 25, 23, 25, 7, 23, 7, 24, 26, + 27, 1, 28, 26, 28, 0, 26, 0, 27, 1, 0 + }; +} + +private static final byte _JSON_object_indicies[] = init__JSON_object_indicies_0(); + + +private static byte[] init__JSON_object_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 23, 27, 3, 4, 8, 5, 7, 6, 9, + 19, 9, 10, 15, 11, 12, 14, 13, 16, 18, 17, 20, + 22, 21, 24, 26, 25 + }; +} + +private static final byte _JSON_object_trans_targs[] = init__JSON_object_trans_targs_0(); + + +private static byte[] init__JSON_object_trans_actions_0() +{ + return new byte [] { + 0, 0, 3, 0, 5, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_trans_actions[] = init__JSON_object_trans_actions_0(); + + +static final int JSON_object_start = 1; +static final int JSON_object_first_final = 27; +static final int JSON_object_error = 0; + +static final int JSON_object_en_main = 1; + + +// line 760 "Parser.rl" + + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1898 "Parser.java" + { + cs = JSON_object_start; + } + +// line 784 "Parser.rl" + +// line 1905 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_object_key_offsets[cs]; + _trans = _JSON_object_index_offsets[cs]; + _klen = _JSON_object_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_object_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_object_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_object_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_object_indicies[_trans]; + cs = _JSON_object_trans_targs[_trans]; + + if ( _JSON_object_trans_actions[_trans] != 0 ) { + _acts = _JSON_object_trans_actions[_trans]; + _nacts = (int) _JSON_object_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_object_actions[_acts++] ) + { + case 0: +// line 715 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 730 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + {p = (( res.p))-1;} + } + } + break; + case 2: +// line 748 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 2029 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 785 "Parser.rl" + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + +// line 2082 "Parser.java" +private static byte[] init__JSON_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_actions[] = init__JSON_actions_0(); + + +private static byte[] init__JSON_key_offsets_0() +{ + return new byte [] { + 0, 0, 16, 18, 19, 21, 22, 24, 25, 27, 28 + }; +} + +private static final byte _JSON_key_offsets[] = init__JSON_key_offsets_0(); + + +private static char[] init__JSON_trans_keys_0() +{ + return new char [] { + 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, 123, + 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, 47, + 42, 42, 47, 10, 13, 32, 47, 9, 10, 0 + }; +} + +private static final char _JSON_trans_keys[] = init__JSON_trans_keys_0(); + + +private static byte[] init__JSON_single_lengths_0() +{ + return new byte [] { + 0, 12, 2, 1, 2, 1, 2, 1, 2, 1, 3 + }; +} + +private static final byte _JSON_single_lengths[] = init__JSON_single_lengths_0(); + + +private static byte[] init__JSON_range_lengths_0() +{ + return new byte [] { + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_range_lengths[] = init__JSON_range_lengths_0(); + + +private static byte[] init__JSON_index_offsets_0() +{ + return new byte [] { + 0, 0, 15, 18, 20, 23, 25, 28, 30, 33, 35 + }; +} + +private static final byte _JSON_index_offsets[] = init__JSON_index_offsets_0(); + + +private static byte[] init__JSON_indicies_0() +{ + return new byte [] { + 0, 0, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, + 0, 2, 1, 4, 5, 1, 6, 4, 6, 7, 4, 7, + 5, 8, 9, 1, 10, 8, 10, 0, 8, 0, 9, 7, + 7, 11, 7, 1, 0 + }; +} + +private static final byte _JSON_indicies[] = init__JSON_indicies_0(); + + +private static byte[] init__JSON_trans_targs_0() +{ + return new byte [] { + 1, 0, 10, 6, 3, 5, 4, 10, 7, 9, 8, 2 + }; +} + +private static final byte _JSON_trans_targs[] = init__JSON_trans_targs_0(); + + +private static byte[] init__JSON_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_trans_actions[] = init__JSON_trans_actions_0(); + + +static final int JSON_start = 1; +static final int JSON_first_final = 10; +static final int JSON_error = 0; + +static final int JSON_en_main = 1; + + +// line 836 "Parser.rl" + + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + +// line 2195 "Parser.java" + { + cs = JSON_start; + } + +// line 845 "Parser.rl" + p = byteList.begin(); + pe = p + byteList.length(); + +// line 2204 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_key_offsets[cs]; + _trans = _JSON_index_offsets[cs]; + _klen = _JSON_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_indicies[_trans]; + cs = _JSON_trans_targs[_trans]; + + if ( _JSON_trans_actions[_trans] != 0 ) { + _acts = _JSON_trans_actions[_trans]; + _nacts = (int) _JSON_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_actions[_acts++] ) + { + case 0: +// line 822 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 2297 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 848 "Parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.rl b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.rl new file mode 100644 index 0000000..c0c72ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser 2.rl @@ -0,0 +1,893 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + %%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft"\-[{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; + }%% + + %%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + result = getRuntime().getNil(); + } + action parse_false { + result = getRuntime().getFalse(); + } + action parse_true { + result = getRuntime().getTrue(); + } + action parse_nan { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + action parse_infinity { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + action parse_number { + if (pe > fpc + 8 && + absSubSequence(fpc, fpc + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + fexec p + 10; + fhold; + fbreak; + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + parseInteger(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + fhold; + fbreak; + } + action parse_string { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_array { + currentNesting++; + parseArray(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_object { + currentNesting++; + parseObject(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action exit { + fhold; + fbreak; + } + + main := ( Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) %*exit; + }%% + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + %%{ + machine JSON_integer; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? ( '0' | [1-9][0-9]* ) ( ^[0-9]? @exit ); + }%% + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + %%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? + ( ( ( '0' | [1-9][0-9]* ) '.' [0-9]+ ( [Ee] [+\-]?[0-9]+ )? ) + | ( ( '0' | [1-9][0-9]* ) ( [Ee] [+\-]? [0-9]+ ) ) ) + ( ^[0-9Ee.\-]? @exit ); + }%% + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + %%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + fhold; + fbreak; + } else { + fexec p + 1; + } + } + + action exit { + fhold; + fbreak; + } + + main := '"' + ( ( ^(["\\]|0..0x1f) + | '\\'["\\/bfnrt] + | '\\u'[0-9a-fA-F]{4} + | '\\'^(["\\/bfnrtu]|0..0x1f) + )* %parse_string + ) '"' @exit; + }%% + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + int memo = p; + %% write exec; + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + %%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array + ignore* + ( ( begin_value >parse_value + ignore* ) + ( ignore* + next_element + ignore* )* )? + ignore* + end_array @exit; + }%% + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + %%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + fexec res.p; + } + } + + action parse_name { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + pair = ignore* begin_name >parse_name ignore* name_separator + ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object (pair (next_pair)*)? ignore* end_object + ) @exit; + }%% + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + %%{ + machine JSON; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + + main := ignore* + ( begin_value >parse_value) + ignore*; + }%% + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + %% write init; + p = byteList.begin(); + pe = p + byteList.length(); + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.java new file mode 100644 index 0000000..647afca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.java @@ -0,0 +1,2362 @@ + +// line 1 "Parser.rl" +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + +// line 339 "Parser.rl" + + + +// line 321 "Parser.java" +private static byte[] init__JSON_value_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2, 1, 3, 1, 4, 1, + 5, 1, 6, 1, 7, 1, 8, 1, 9 + }; +} + +private static final byte _JSON_value_actions[] = init__JSON_value_actions_0(); + + +private static byte[] init__JSON_value_key_offsets_0() +{ + return new byte [] { + 0, 0, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 + }; +} + +private static final byte _JSON_value_key_offsets[] = init__JSON_value_key_offsets_0(); + + +private static char[] init__JSON_value_trans_keys_0() +{ + return new char [] { + 34, 45, 73, 78, 91, 102, 110, 116, 123, 48, 57, 110, + 102, 105, 110, 105, 116, 121, 97, 78, 97, 108, 115, 101, + 117, 108, 108, 114, 117, 101, 0 + }; +} + +private static final char _JSON_value_trans_keys[] = init__JSON_value_trans_keys_0(); + + +private static byte[] init__JSON_value_single_lengths_0() +{ + return new byte [] { + 0, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 + }; +} + +private static final byte _JSON_value_single_lengths[] = init__JSON_value_single_lengths_0(); + + +private static byte[] init__JSON_value_range_lengths_0() +{ + return new byte [] { + 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_value_range_lengths[] = init__JSON_value_range_lengths_0(); + + +private static byte[] init__JSON_value_index_offsets_0() +{ + return new byte [] { + 0, 0, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, + 31, 33, 35, 37, 39, 41, 43, 45, 47, 49 + }; +} + +private static final byte _JSON_value_index_offsets[] = init__JSON_value_index_offsets_0(); + + +private static byte[] init__JSON_value_trans_targs_0() +{ + return new byte [] { + 21, 21, 2, 9, 21, 11, 15, 18, 21, 21, 0, 3, + 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 21, + 0, 10, 0, 21, 0, 12, 0, 13, 0, 14, 0, 21, + 0, 16, 0, 17, 0, 21, 0, 19, 0, 20, 0, 21, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_targs[] = init__JSON_value_trans_targs_0(); + + +private static byte[] init__JSON_value_trans_actions_0() +{ + return new byte [] { + 13, 11, 0, 0, 15, 0, 0, 0, 17, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, + 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 5, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_actions[] = init__JSON_value_trans_actions_0(); + + +private static byte[] init__JSON_value_from_state_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 19 + }; +} + +private static final byte _JSON_value_from_state_actions[] = init__JSON_value_from_state_actions_0(); + + +static final int JSON_value_start = 1; +static final int JSON_value_first_final = 21; +static final int JSON_value_error = 0; + +static final int JSON_value_en_main = 1; + + +// line 445 "Parser.rl" + + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 443 "Parser.java" + { + cs = JSON_value_start; + } + +// line 452 "Parser.rl" + +// line 450 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _acts = _JSON_value_from_state_actions[cs]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) { + switch ( _JSON_value_actions[_acts++] ) { + case 9: +// line 430 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 482 "Parser.java" + } + } + + _match: do { + _keys = _JSON_value_key_offsets[cs]; + _trans = _JSON_value_index_offsets[cs]; + _klen = _JSON_value_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_value_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_value_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_value_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + cs = _JSON_value_trans_targs[_trans]; + + if ( _JSON_value_trans_actions[_trans] != 0 ) { + _acts = _JSON_value_trans_actions[_trans]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_value_actions[_acts++] ) + { + case 0: +// line 347 "Parser.rl" + { + result = getRuntime().getNil(); + } + break; + case 1: +// line 350 "Parser.rl" + { + result = getRuntime().getFalse(); + } + break; + case 2: +// line 353 "Parser.rl" + { + result = getRuntime().getTrue(); + } + break; + case 3: +// line 356 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + break; + case 4: +// line 363 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + break; + case 5: +// line 370 "Parser.rl" + { + if (pe > p + 8 && + absSubSequence(p, p + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + {p = (( p + 10))-1;} + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + parseInteger(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; + case 6: +// line 396 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 7: +// line 406 "Parser.rl" + { + currentNesting++; + parseArray(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 8: +// line 418 "Parser.rl" + { + currentNesting++; + parseObject(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 654 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 453 "Parser.rl" + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + +// line 684 "Parser.java" +private static byte[] init__JSON_integer_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_integer_actions[] = init__JSON_integer_actions_0(); + + +private static byte[] init__JSON_integer_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 9 + }; +} + +private static final byte _JSON_integer_key_offsets[] = init__JSON_integer_key_offsets_0(); + + +private static char[] init__JSON_integer_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 48, 57, 48, 57, 0 + }; +} + +private static final char _JSON_integer_trans_keys[] = init__JSON_integer_trans_keys_0(); + + +private static byte[] init__JSON_integer_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 0, 0, 0 + }; +} + +private static final byte _JSON_integer_single_lengths[] = init__JSON_integer_single_lengths_0(); + + +private static byte[] init__JSON_integer_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 1, 0, 1 + }; +} + +private static final byte _JSON_integer_range_lengths[] = init__JSON_integer_range_lengths_0(); + + +private static byte[] init__JSON_integer_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 10 + }; +} + +private static final byte _JSON_integer_index_offsets[] = init__JSON_integer_index_offsets_0(); + + +private static byte[] init__JSON_integer_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 1, 4, 1, 3, 4, + 0 + }; +} + +private static final byte _JSON_integer_indicies[] = init__JSON_integer_indicies_0(); + + +private static byte[] init__JSON_integer_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 5, 4 + }; +} + +private static final byte _JSON_integer_trans_targs[] = init__JSON_integer_trans_targs_0(); + + +private static byte[] init__JSON_integer_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_integer_trans_actions[] = init__JSON_integer_trans_actions_0(); + + +static final int JSON_integer_start = 1; +static final int JSON_integer_first_final = 3; +static final int JSON_integer_error = 0; + +static final int JSON_integer_en_main = 1; + + +// line 472 "Parser.rl" + + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + +// line 801 "Parser.java" + { + cs = JSON_integer_start; + } + +// line 489 "Parser.rl" + int memo = p; + +// line 809 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_integer_key_offsets[cs]; + _trans = _JSON_integer_index_offsets[cs]; + _klen = _JSON_integer_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_integer_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_integer_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_integer_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_integer_indicies[_trans]; + cs = _JSON_integer_trans_targs[_trans]; + + if ( _JSON_integer_trans_actions[_trans] != 0 ) { + _acts = _JSON_integer_trans_actions[_trans]; + _nacts = (int) _JSON_integer_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_integer_actions[_acts++] ) + { + case 0: +// line 466 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 896 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 491 "Parser.rl" + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + +// line 938 "Parser.java" +private static byte[] init__JSON_float_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_float_actions[] = init__JSON_float_actions_0(); + + +private static byte[] init__JSON_float_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 10, 12, 16, 18, 23, 29, 29 + }; +} + +private static final byte _JSON_float_key_offsets[] = init__JSON_float_key_offsets_0(); + + +private static char[] init__JSON_float_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 46, 69, 101, 48, 57, + 43, 45, 48, 57, 48, 57, 46, 69, 101, 48, 57, 69, + 101, 45, 46, 48, 57, 69, 101, 45, 46, 48, 57, 0 + }; +} + +private static final char _JSON_float_trans_keys[] = init__JSON_float_trans_keys_0(); + + +private static byte[] init__JSON_float_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 3, 0, 2, 0, 3, 2, 0, 2 + }; +} + +private static final byte _JSON_float_single_lengths[] = init__JSON_float_single_lengths_0(); + + +private static byte[] init__JSON_float_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 0, 1, 1, 1, 1, 2, 0, 2 + }; +} + +private static final byte _JSON_float_range_lengths[] = init__JSON_float_range_lengths_0(); + + +private static byte[] init__JSON_float_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 11, 13, 17, 19, 24, 29, 30 + }; +} + +private static final byte _JSON_float_index_offsets[] = init__JSON_float_index_offsets_0(); + + +private static byte[] init__JSON_float_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 4, 5, 5, 1, 6, + 1, 7, 7, 8, 1, 8, 1, 4, 5, 5, 3, 1, + 5, 5, 1, 6, 9, 1, 1, 1, 1, 8, 9, 0 + }; +} + +private static final byte _JSON_float_indicies[] = init__JSON_float_indicies_0(); + + +private static byte[] init__JSON_float_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 7, 4, 5, 8, 6, 10, 9 + }; +} + +private static final byte _JSON_float_trans_targs[] = init__JSON_float_trans_targs_0(); + + +private static byte[] init__JSON_float_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_float_trans_actions[] = init__JSON_float_trans_actions_0(); + + +static final int JSON_float_start = 1; +static final int JSON_float_first_final = 8; +static final int JSON_float_error = 0; + +static final int JSON_float_en_main = 1; + + +// line 526 "Parser.rl" + + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + +// line 1060 "Parser.java" + { + cs = JSON_float_start; + } + +// line 545 "Parser.rl" + int memo = p; + +// line 1068 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_float_key_offsets[cs]; + _trans = _JSON_float_index_offsets[cs]; + _klen = _JSON_float_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_float_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_float_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_float_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_float_indicies[_trans]; + cs = _JSON_float_trans_targs[_trans]; + + if ( _JSON_float_trans_actions[_trans] != 0 ) { + _acts = _JSON_float_trans_actions[_trans]; + _nacts = (int) _JSON_float_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_float_actions[_acts++] ) + { + case 0: +// line 517 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1155 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 547 "Parser.rl" + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + +// line 1198 "Parser.java" +private static byte[] init__JSON_string_actions_0() +{ + return new byte [] { + 0, 2, 0, 1 + }; +} + +private static final byte _JSON_string_actions[] = init__JSON_string_actions_0(); + + +private static byte[] init__JSON_string_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 5, 8, 14, 20, 26, 32 + }; +} + +private static final byte _JSON_string_key_offsets[] = init__JSON_string_key_offsets_0(); + + +private static char[] init__JSON_string_trans_keys_0() +{ + return new char [] { + 34, 34, 92, 0, 31, 117, 0, 31, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 0 + }; +} + +private static final char _JSON_string_trans_keys[] = init__JSON_string_trans_keys_0(); + + +private static byte[] init__JSON_string_single_lengths_0() +{ + return new byte [] { + 0, 1, 2, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_single_lengths[] = init__JSON_string_single_lengths_0(); + + +private static byte[] init__JSON_string_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 3, 3, 3, 3, 0 + }; +} + +private static final byte _JSON_string_range_lengths[] = init__JSON_string_range_lengths_0(); + + +private static byte[] init__JSON_string_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 6, 9, 13, 17, 21, 25 + }; +} + +private static final byte _JSON_string_index_offsets[] = init__JSON_string_index_offsets_0(); + + +private static byte[] init__JSON_string_indicies_0() +{ + return new byte [] { + 0, 1, 2, 3, 1, 0, 4, 1, 0, 5, 5, 5, + 1, 6, 6, 6, 1, 7, 7, 7, 1, 0, 0, 0, + 1, 1, 0 + }; +} + +private static final byte _JSON_string_indicies[] = init__JSON_string_indicies_0(); + + +private static byte[] init__JSON_string_trans_targs_0() +{ + return new byte [] { + 2, 0, 8, 3, 4, 5, 6, 7 + }; +} + +private static final byte _JSON_string_trans_targs[] = init__JSON_string_trans_targs_0(); + + +private static byte[] init__JSON_string_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_trans_actions[] = init__JSON_string_trans_actions_0(); + + +static final int JSON_string_start = 1; +static final int JSON_string_first_final = 8; +static final int JSON_string_error = 0; + +static final int JSON_string_en_main = 1; + + +// line 599 "Parser.rl" + + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 1308 "Parser.java" + { + cs = JSON_string_start; + } + +// line 606 "Parser.rl" + int memo = p; + +// line 1316 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_string_key_offsets[cs]; + _trans = _JSON_string_index_offsets[cs]; + _klen = _JSON_string_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_string_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_string_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_string_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_string_indicies[_trans]; + cs = _JSON_string_trans_targs[_trans]; + + if ( _JSON_string_trans_actions[_trans] != 0 ) { + _acts = _JSON_string_trans_actions[_trans]; + _nacts = (int) _JSON_string_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_string_actions[_acts++] ) + { + case 0: +// line 574 "Parser.rl" + { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + {p = (( p + 1))-1;} + } + } + break; + case 1: +// line 587 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1418 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 608 "Parser.rl" + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + +// line 1476 "Parser.java" +private static byte[] init__JSON_array_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1 + }; +} + +private static final byte _JSON_array_actions[] = init__JSON_array_actions_0(); + + +private static byte[] init__JSON_array_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 18, 25, 41, 43, 44, 46, 47, 49, 50, + 52, 53, 55, 56, 58, 59 + }; +} + +private static final byte _JSON_array_key_offsets[] = init__JSON_array_key_offsets_0(); + + +private static char[] init__JSON_array_trans_keys_0() +{ + return new char [] { + 91, 13, 32, 34, 45, 47, 73, 78, 91, 93, 102, 110, + 116, 123, 9, 10, 48, 57, 13, 32, 44, 47, 93, 9, + 10, 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, + 123, 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, + 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, 10, 0 + }; +} + +private static final char _JSON_array_trans_keys[] = init__JSON_array_trans_keys_0(); + + +private static byte[] init__JSON_array_single_lengths_0() +{ + return new byte [] { + 0, 1, 13, 5, 12, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 0 + }; +} + +private static final byte _JSON_array_single_lengths[] = init__JSON_array_single_lengths_0(); + + +private static byte[] init__JSON_array_range_lengths_0() +{ + return new byte [] { + 0, 0, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_range_lengths[] = init__JSON_array_range_lengths_0(); + + +private static byte[] init__JSON_array_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 18, 25, 40, 43, 45, 48, 50, 53, 55, + 58, 60, 63, 65, 68, 70 + }; +} + +private static final byte _JSON_array_index_offsets[] = init__JSON_array_index_offsets_0(); + + +private static byte[] init__JSON_array_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 2, 3, 2, 2, 2, 4, 2, + 2, 2, 2, 0, 2, 1, 5, 5, 6, 7, 4, 5, + 1, 6, 6, 2, 2, 8, 2, 2, 2, 2, 2, 2, + 2, 6, 2, 1, 9, 10, 1, 11, 9, 11, 6, 9, + 6, 10, 12, 13, 1, 14, 12, 14, 5, 12, 5, 13, + 15, 16, 1, 17, 15, 17, 0, 15, 0, 16, 1, 0 + }; +} + +private static final byte _JSON_array_indicies[] = init__JSON_array_indicies_0(); + + +private static byte[] init__JSON_array_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 13, 17, 3, 4, 9, 5, 6, 8, 7, + 10, 12, 11, 14, 16, 15 + }; +} + +private static final byte _JSON_array_trans_targs[] = init__JSON_array_trans_targs_0(); + + +private static byte[] init__JSON_array_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_trans_actions[] = init__JSON_array_trans_actions_0(); + + +static final int JSON_array_start = 1; +static final int JSON_array_first_final = 17; +static final int JSON_array_error = 0; + +static final int JSON_array_en_main = 1; + + +// line 681 "Parser.rl" + + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1609 "Parser.java" + { + cs = JSON_array_start; + } + +// line 700 "Parser.rl" + +// line 1616 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_array_key_offsets[cs]; + _trans = _JSON_array_index_offsets[cs]; + _klen = _JSON_array_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_array_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_array_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_array_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_array_indicies[_trans]; + cs = _JSON_array_trans_targs[_trans]; + + if ( _JSON_array_trans_actions[_trans] != 0 ) { + _acts = _JSON_array_trans_actions[_trans]; + _nacts = (int) _JSON_array_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_array_actions[_acts++] ) + { + case 0: +// line 650 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 665 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1720 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 701 "Parser.rl" + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + +// line 1750 "Parser.java" +private static byte[] init__JSON_object_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2 + }; +} + +private static final byte _JSON_object_actions[] = init__JSON_object_actions_0(); + + +private static byte[] init__JSON_object_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 8, 14, 16, 17, 19, 20, 36, 43, 49, + 51, 52, 54, 55, 57, 58, 60, 61, 63, 64, 66, 67, + 69, 70, 72, 73 + }; +} + +private static final byte _JSON_object_key_offsets[] = init__JSON_object_key_offsets_0(); + + +private static char[] init__JSON_object_trans_keys_0() +{ + return new char [] { + 123, 13, 32, 34, 47, 125, 9, 10, 13, 32, 47, 58, + 9, 10, 42, 47, 42, 42, 47, 10, 13, 32, 34, 45, + 47, 73, 78, 91, 102, 110, 116, 123, 9, 10, 48, 57, + 13, 32, 44, 47, 125, 9, 10, 13, 32, 34, 47, 9, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 0 + }; +} + +private static final char _JSON_object_trans_keys[] = init__JSON_object_trans_keys_0(); + + +private static byte[] init__JSON_object_single_lengths_0() +{ + return new byte [] { + 0, 1, 5, 4, 2, 1, 2, 1, 12, 5, 4, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 0 + }; +} + +private static final byte _JSON_object_single_lengths[] = init__JSON_object_single_lengths_0(); + + +private static byte[] init__JSON_object_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 0, 0, 0, 0, 2, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_range_lengths[] = init__JSON_object_range_lengths_0(); + + +private static byte[] init__JSON_object_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 9, 15, 18, 20, 23, 25, 40, 47, 53, + 56, 58, 61, 63, 66, 68, 71, 73, 76, 78, 81, 83, + 86, 88, 91, 93 + }; +} + +private static final byte _JSON_object_index_offsets[] = init__JSON_object_index_offsets_0(); + + +private static byte[] init__JSON_object_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 3, 4, 0, 1, 5, 5, 6, + 7, 5, 1, 8, 9, 1, 10, 8, 10, 5, 8, 5, + 9, 7, 7, 11, 11, 12, 11, 11, 11, 11, 11, 11, + 11, 7, 11, 1, 13, 13, 14, 15, 4, 13, 1, 14, + 14, 2, 16, 14, 1, 17, 18, 1, 19, 17, 19, 14, + 17, 14, 18, 20, 21, 1, 22, 20, 22, 13, 20, 13, + 21, 23, 24, 1, 25, 23, 25, 7, 23, 7, 24, 26, + 27, 1, 28, 26, 28, 0, 26, 0, 27, 1, 0 + }; +} + +private static final byte _JSON_object_indicies[] = init__JSON_object_indicies_0(); + + +private static byte[] init__JSON_object_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 23, 27, 3, 4, 8, 5, 7, 6, 9, + 19, 9, 10, 15, 11, 12, 14, 13, 16, 18, 17, 20, + 22, 21, 24, 26, 25 + }; +} + +private static final byte _JSON_object_trans_targs[] = init__JSON_object_trans_targs_0(); + + +private static byte[] init__JSON_object_trans_actions_0() +{ + return new byte [] { + 0, 0, 3, 0, 5, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_trans_actions[] = init__JSON_object_trans_actions_0(); + + +static final int JSON_object_start = 1; +static final int JSON_object_first_final = 27; +static final int JSON_object_error = 0; + +static final int JSON_object_en_main = 1; + + +// line 760 "Parser.rl" + + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1898 "Parser.java" + { + cs = JSON_object_start; + } + +// line 784 "Parser.rl" + +// line 1905 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_object_key_offsets[cs]; + _trans = _JSON_object_index_offsets[cs]; + _klen = _JSON_object_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_object_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_object_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_object_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_object_indicies[_trans]; + cs = _JSON_object_trans_targs[_trans]; + + if ( _JSON_object_trans_actions[_trans] != 0 ) { + _acts = _JSON_object_trans_actions[_trans]; + _nacts = (int) _JSON_object_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_object_actions[_acts++] ) + { + case 0: +// line 715 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 730 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + {p = (( res.p))-1;} + } + } + break; + case 2: +// line 748 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 2029 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 785 "Parser.rl" + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + +// line 2082 "Parser.java" +private static byte[] init__JSON_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_actions[] = init__JSON_actions_0(); + + +private static byte[] init__JSON_key_offsets_0() +{ + return new byte [] { + 0, 0, 16, 18, 19, 21, 22, 24, 25, 27, 28 + }; +} + +private static final byte _JSON_key_offsets[] = init__JSON_key_offsets_0(); + + +private static char[] init__JSON_trans_keys_0() +{ + return new char [] { + 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, 123, + 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, 47, + 42, 42, 47, 10, 13, 32, 47, 9, 10, 0 + }; +} + +private static final char _JSON_trans_keys[] = init__JSON_trans_keys_0(); + + +private static byte[] init__JSON_single_lengths_0() +{ + return new byte [] { + 0, 12, 2, 1, 2, 1, 2, 1, 2, 1, 3 + }; +} + +private static final byte _JSON_single_lengths[] = init__JSON_single_lengths_0(); + + +private static byte[] init__JSON_range_lengths_0() +{ + return new byte [] { + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_range_lengths[] = init__JSON_range_lengths_0(); + + +private static byte[] init__JSON_index_offsets_0() +{ + return new byte [] { + 0, 0, 15, 18, 20, 23, 25, 28, 30, 33, 35 + }; +} + +private static final byte _JSON_index_offsets[] = init__JSON_index_offsets_0(); + + +private static byte[] init__JSON_indicies_0() +{ + return new byte [] { + 0, 0, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, + 0, 2, 1, 4, 5, 1, 6, 4, 6, 7, 4, 7, + 5, 8, 9, 1, 10, 8, 10, 0, 8, 0, 9, 7, + 7, 11, 7, 1, 0 + }; +} + +private static final byte _JSON_indicies[] = init__JSON_indicies_0(); + + +private static byte[] init__JSON_trans_targs_0() +{ + return new byte [] { + 1, 0, 10, 6, 3, 5, 4, 10, 7, 9, 8, 2 + }; +} + +private static final byte _JSON_trans_targs[] = init__JSON_trans_targs_0(); + + +private static byte[] init__JSON_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_trans_actions[] = init__JSON_trans_actions_0(); + + +static final int JSON_start = 1; +static final int JSON_first_final = 10; +static final int JSON_error = 0; + +static final int JSON_en_main = 1; + + +// line 836 "Parser.rl" + + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + +// line 2195 "Parser.java" + { + cs = JSON_start; + } + +// line 845 "Parser.rl" + p = byteList.begin(); + pe = p + byteList.length(); + +// line 2204 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_key_offsets[cs]; + _trans = _JSON_index_offsets[cs]; + _klen = _JSON_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_indicies[_trans]; + cs = _JSON_trans_targs[_trans]; + + if ( _JSON_trans_actions[_trans] != 0 ) { + _acts = _JSON_trans_actions[_trans]; + _nacts = (int) _JSON_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_actions[_acts++] ) + { + case 0: +// line 822 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 2297 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 848 "Parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.rl b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.rl new file mode 100644 index 0000000..c0c72ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Parser.rl @@ -0,0 +1,893 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + %%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft"\-[{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; + }%% + + %%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + result = getRuntime().getNil(); + } + action parse_false { + result = getRuntime().getFalse(); + } + action parse_true { + result = getRuntime().getTrue(); + } + action parse_nan { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + action parse_infinity { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + action parse_number { + if (pe > fpc + 8 && + absSubSequence(fpc, fpc + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + fexec p + 10; + fhold; + fbreak; + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + parseInteger(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + fhold; + fbreak; + } + action parse_string { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_array { + currentNesting++; + parseArray(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_object { + currentNesting++; + parseObject(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action exit { + fhold; + fbreak; + } + + main := ( Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) %*exit; + }%% + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + %%{ + machine JSON_integer; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? ( '0' | [1-9][0-9]* ) ( ^[0-9]? @exit ); + }%% + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + %%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? + ( ( ( '0' | [1-9][0-9]* ) '.' [0-9]+ ( [Ee] [+\-]?[0-9]+ )? ) + | ( ( '0' | [1-9][0-9]* ) ( [Ee] [+\-]? [0-9]+ ) ) ) + ( ^[0-9Ee.\-]? @exit ); + }%% + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + %%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + fhold; + fbreak; + } else { + fexec p + 1; + } + } + + action exit { + fhold; + fbreak; + } + + main := '"' + ( ( ^(["\\]|0..0x1f) + | '\\'["\\/bfnrt] + | '\\u'[0-9a-fA-F]{4} + | '\\'^(["\\/bfnrtu]|0..0x1f) + )* %parse_string + ) '"' @exit; + }%% + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + int memo = p; + %% write exec; + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + %%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array + ignore* + ( ( begin_value >parse_value + ignore* ) + ( ignore* + next_element + ignore* )* )? + ignore* + end_array @exit; + }%% + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + %%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + fexec res.p; + } + } + + action parse_name { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + pair = ignore* begin_name >parse_name ignore* name_separator + ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object (pair (next_pair)*)? ignore* end_object + ) @exit; + }%% + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + %%{ + machine JSON; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + + main := ignore* + ( begin_value >parse_value) + ignore*; + }%% + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + %% write init; + p = byteList.begin(); + pe = p + byteList.length(); + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService 2.java new file mode 100644 index 0000000..b6015f9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService 2.java @@ -0,0 +1,34 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Parser class. + * @author mernen + */ +public class ParserService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyClass parserClass = + jsonExtModule.defineClassUnder("Parser", runtime.getObject(), + Parser.ALLOCATOR); + parserClass.defineAnnotatedMethods(Parser.class); + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService.java new file mode 100644 index 0000000..b6015f9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/ParserService.java @@ -0,0 +1,34 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Parser class. + * @author mernen + */ +public class ParserService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyClass parserClass = + jsonExtModule.defineClassUnder("Parser", runtime.getObject(), + Parser.ALLOCATOR); + parserClass.defineAnnotatedMethods(Parser.class); + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo 2.java new file mode 100644 index 0000000..2323bd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo 2.java @@ -0,0 +1,116 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import java.util.HashMap; +import java.util.Map; +import java.util.WeakHashMap; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyModule; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + + +final class RuntimeInfo { + // since the vast majority of cases runs just one runtime, + // we optimize for that + private static WeakReference runtime1 = new WeakReference(null); + private static RuntimeInfo info1; + // store remaining runtimes here (does not include runtime1) + private static Map runtimes; + + // these fields are filled by the service loaders + // Use WeakReferences so that RuntimeInfo doesn't indirectly hold a hard reference to + // the Ruby runtime object, which would cause memory leaks in the runtimes map above. + /** JSON */ + WeakReference jsonModule; + /** JSON::Ext::Generator::GeneratorMethods::String::Extend */ + WeakReference stringExtendModule; + /** JSON::Ext::Generator::State */ + WeakReference generatorStateClass; + /** JSON::SAFE_STATE_PROTOTYPE */ + WeakReference safeStatePrototype; + + final WeakReference utf8; + final WeakReference ascii8bit; + // other encodings + private final Map> encodings; + + private RuntimeInfo(Ruby runtime) { + RubyClass encodingClass = runtime.getEncoding(); + if (encodingClass == null) { // 1.8 mode + utf8 = ascii8bit = null; + encodings = null; + } else { + ThreadContext context = runtime.getCurrentContext(); + + utf8 = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("utf-8"))); + ascii8bit = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("ascii-8bit"))); + encodings = new HashMap>(); + } + } + + static RuntimeInfo initRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) { + return info1; + } else if (runtime1.get() == null) { + runtime1 = new WeakReference(runtime); + info1 = new RuntimeInfo(runtime); + return info1; + } else { + if (runtimes == null) { + runtimes = new WeakHashMap(1); + } + RuntimeInfo cache = runtimes.get(runtime); + if (cache == null) { + cache = new RuntimeInfo(runtime); + runtimes.put(runtime, cache); + } + return cache; + } + } + } + + public static RuntimeInfo forRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) return info1; + RuntimeInfo cache = null; + if (runtimes != null) cache = runtimes.get(runtime); + assert cache != null : "Runtime given has not initialized JSON::Ext"; + return cache; + } + } + + public RubyEncoding getEncoding(ThreadContext context, String name) { + synchronized (encodings) { + WeakReference encoding = encodings.get(name); + if (encoding == null) { + Ruby runtime = context.getRuntime(); + encoding = new WeakReference((RubyEncoding)RubyEncoding.find(context, + runtime.getEncoding(), runtime.newString(name))); + encodings.put(name, encoding); + } + return encoding.get(); + } + } + + public GeneratorState getSafeStatePrototype(ThreadContext context) { + if (safeStatePrototype == null) { + IRubyObject value = jsonModule.get().getConstant("SAFE_STATE_PROTOTYPE"); + if (!(value instanceof GeneratorState)) { + throw context.getRuntime().newTypeError(value, generatorStateClass.get()); + } + safeStatePrototype = new WeakReference((GeneratorState)value); + } + return safeStatePrototype.get(); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java new file mode 100644 index 0000000..2323bd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java @@ -0,0 +1,116 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import java.util.HashMap; +import java.util.Map; +import java.util.WeakHashMap; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyModule; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + + +final class RuntimeInfo { + // since the vast majority of cases runs just one runtime, + // we optimize for that + private static WeakReference runtime1 = new WeakReference(null); + private static RuntimeInfo info1; + // store remaining runtimes here (does not include runtime1) + private static Map runtimes; + + // these fields are filled by the service loaders + // Use WeakReferences so that RuntimeInfo doesn't indirectly hold a hard reference to + // the Ruby runtime object, which would cause memory leaks in the runtimes map above. + /** JSON */ + WeakReference jsonModule; + /** JSON::Ext::Generator::GeneratorMethods::String::Extend */ + WeakReference stringExtendModule; + /** JSON::Ext::Generator::State */ + WeakReference generatorStateClass; + /** JSON::SAFE_STATE_PROTOTYPE */ + WeakReference safeStatePrototype; + + final WeakReference utf8; + final WeakReference ascii8bit; + // other encodings + private final Map> encodings; + + private RuntimeInfo(Ruby runtime) { + RubyClass encodingClass = runtime.getEncoding(); + if (encodingClass == null) { // 1.8 mode + utf8 = ascii8bit = null; + encodings = null; + } else { + ThreadContext context = runtime.getCurrentContext(); + + utf8 = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("utf-8"))); + ascii8bit = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("ascii-8bit"))); + encodings = new HashMap>(); + } + } + + static RuntimeInfo initRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) { + return info1; + } else if (runtime1.get() == null) { + runtime1 = new WeakReference(runtime); + info1 = new RuntimeInfo(runtime); + return info1; + } else { + if (runtimes == null) { + runtimes = new WeakHashMap(1); + } + RuntimeInfo cache = runtimes.get(runtime); + if (cache == null) { + cache = new RuntimeInfo(runtime); + runtimes.put(runtime, cache); + } + return cache; + } + } + } + + public static RuntimeInfo forRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) return info1; + RuntimeInfo cache = null; + if (runtimes != null) cache = runtimes.get(runtime); + assert cache != null : "Runtime given has not initialized JSON::Ext"; + return cache; + } + } + + public RubyEncoding getEncoding(ThreadContext context, String name) { + synchronized (encodings) { + WeakReference encoding = encodings.get(name); + if (encoding == null) { + Ruby runtime = context.getRuntime(); + encoding = new WeakReference((RubyEncoding)RubyEncoding.find(context, + runtime.getEncoding(), runtime.newString(name))); + encodings.put(name, encoding); + } + return encoding.get(); + } + } + + public GeneratorState getSafeStatePrototype(ThreadContext context) { + if (safeStatePrototype == null) { + IRubyObject value = jsonModule.get().getConstant("SAFE_STATE_PROTOTYPE"); + if (!(value instanceof GeneratorState)) { + throw context.getRuntime().newTypeError(value, generatorStateClass.get()); + } + safeStatePrototype = new WeakReference((GeneratorState)value); + } + return safeStatePrototype.get(); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder 2.java new file mode 100644 index 0000000..76cf183 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder 2.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A decoder that reads a JSON-encoded string from the given sources and + * returns its decoded form on a new ByteList. Escaped Unicode characters + * are encoded as UTF-8. + */ +final class StringDecoder extends ByteListTranscoder { + /** + * Stores the offset of the high surrogate when reading a surrogate pair, + * or -1 when not. + */ + private int surrogatePairStart = -1; + + // Array used for writing multi-byte characters into the buffer at once + private final byte[] aux = new byte[4]; + + StringDecoder(ThreadContext context) { + super(context); + } + + ByteList decode(ByteList src, int start, int end) { + ByteList out = new ByteList(end - start); + out.setEncoding(src.getEncoding()); + init(src, start, end, out); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + return out; + } + + private void handleChar(int c) { + if (c == '\\') { + quoteStop(charStart); + handleEscapeSequence(); + } else { + quoteStart(); + } + } + + private void handleEscapeSequence() { + ensureMin(1); + switch (readUtf8Char()) { + case 'b': + append('\b'); + break; + case 'f': + append('\f'); + break; + case 'n': + append('\n'); + break; + case 'r': + append('\r'); + break; + case 't': + append('\t'); + break; + case 'u': + ensureMin(4); + int cp = readHex(); + if (Character.isHighSurrogate((char)cp)) { + handleLowSurrogate((char)cp); + } else if (Character.isLowSurrogate((char)cp)) { + // low surrogate with no high surrogate + throw invalidUtf8(); + } else { + writeUtf8Char(cp); + } + break; + default: // '\\', '"', '/'... + quoteStart(); + } + } + + private void handleLowSurrogate(char highSurrogate) { + surrogatePairStart = charStart; + ensureMin(1); + int lowSurrogate = readUtf8Char(); + + if (lowSurrogate == '\\') { + ensureMin(5); + if (readUtf8Char() != 'u') throw invalidUtf8(); + lowSurrogate = readHex(); + } + + if (Character.isLowSurrogate((char)lowSurrogate)) { + writeUtf8Char(Character.toCodePoint(highSurrogate, + (char)lowSurrogate)); + surrogatePairStart = -1; + } else { + throw invalidUtf8(); + } + } + + private void writeUtf8Char(int codePoint) { + if (codePoint < 0x80) { + append(codePoint); + } else if (codePoint < 0x800) { + aux[0] = (byte)(0xc0 | (codePoint >>> 6)); + aux[1] = tailByte(codePoint & 0x3f); + append(aux, 0, 2); + } else if (codePoint < 0x10000) { + aux[0] = (byte)(0xe0 | (codePoint >>> 12)); + aux[1] = tailByte(codePoint >>> 6); + aux[2] = tailByte(codePoint); + append(aux, 0, 3); + } else { + aux[0] = (byte)(0xf0 | codePoint >>> 18); + aux[1] = tailByte(codePoint >>> 12); + aux[2] = tailByte(codePoint >>> 6); + aux[3] = tailByte(codePoint); + append(aux, 0, 4); + } + } + + private byte tailByte(int value) { + return (byte)(0x80 | (value & 0x3f)); + } + + /** + * Reads a 4-digit unsigned hexadecimal number from the source. + */ + private int readHex() { + int numberStart = pos; + int result = 0; + int length = 4; + for (int i = 0; i < length; i++) { + int digit = readUtf8Char(); + int digitValue; + if (digit >= '0' && digit <= '9') { + digitValue = digit - '0'; + } else if (digit >= 'a' && digit <= 'f') { + digitValue = 10 + digit - 'a'; + } else if (digit >= 'A' && digit <= 'F') { + digitValue = 10 + digit - 'A'; + } else { + throw new NumberFormatException("Invalid base 16 number " + + src.subSequence(numberStart, numberStart + length)); + } + result = result * 16 + digitValue; + } + return result; + } + + @Override + protected RaiseException invalidUtf8() { + ByteList message = new ByteList( + ByteList.plain("partial character in source, " + + "but hit end near ")); + int start = surrogatePairStart != -1 ? surrogatePairStart : charStart; + message.append(src, start, srcEnd - start); + return Utils.newException(context, Utils.M_PARSER_ERROR, + context.getRuntime().newString(message)); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java new file mode 100644 index 0000000..76cf183 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A decoder that reads a JSON-encoded string from the given sources and + * returns its decoded form on a new ByteList. Escaped Unicode characters + * are encoded as UTF-8. + */ +final class StringDecoder extends ByteListTranscoder { + /** + * Stores the offset of the high surrogate when reading a surrogate pair, + * or -1 when not. + */ + private int surrogatePairStart = -1; + + // Array used for writing multi-byte characters into the buffer at once + private final byte[] aux = new byte[4]; + + StringDecoder(ThreadContext context) { + super(context); + } + + ByteList decode(ByteList src, int start, int end) { + ByteList out = new ByteList(end - start); + out.setEncoding(src.getEncoding()); + init(src, start, end, out); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + return out; + } + + private void handleChar(int c) { + if (c == '\\') { + quoteStop(charStart); + handleEscapeSequence(); + } else { + quoteStart(); + } + } + + private void handleEscapeSequence() { + ensureMin(1); + switch (readUtf8Char()) { + case 'b': + append('\b'); + break; + case 'f': + append('\f'); + break; + case 'n': + append('\n'); + break; + case 'r': + append('\r'); + break; + case 't': + append('\t'); + break; + case 'u': + ensureMin(4); + int cp = readHex(); + if (Character.isHighSurrogate((char)cp)) { + handleLowSurrogate((char)cp); + } else if (Character.isLowSurrogate((char)cp)) { + // low surrogate with no high surrogate + throw invalidUtf8(); + } else { + writeUtf8Char(cp); + } + break; + default: // '\\', '"', '/'... + quoteStart(); + } + } + + private void handleLowSurrogate(char highSurrogate) { + surrogatePairStart = charStart; + ensureMin(1); + int lowSurrogate = readUtf8Char(); + + if (lowSurrogate == '\\') { + ensureMin(5); + if (readUtf8Char() != 'u') throw invalidUtf8(); + lowSurrogate = readHex(); + } + + if (Character.isLowSurrogate((char)lowSurrogate)) { + writeUtf8Char(Character.toCodePoint(highSurrogate, + (char)lowSurrogate)); + surrogatePairStart = -1; + } else { + throw invalidUtf8(); + } + } + + private void writeUtf8Char(int codePoint) { + if (codePoint < 0x80) { + append(codePoint); + } else if (codePoint < 0x800) { + aux[0] = (byte)(0xc0 | (codePoint >>> 6)); + aux[1] = tailByte(codePoint & 0x3f); + append(aux, 0, 2); + } else if (codePoint < 0x10000) { + aux[0] = (byte)(0xe0 | (codePoint >>> 12)); + aux[1] = tailByte(codePoint >>> 6); + aux[2] = tailByte(codePoint); + append(aux, 0, 3); + } else { + aux[0] = (byte)(0xf0 | codePoint >>> 18); + aux[1] = tailByte(codePoint >>> 12); + aux[2] = tailByte(codePoint >>> 6); + aux[3] = tailByte(codePoint); + append(aux, 0, 4); + } + } + + private byte tailByte(int value) { + return (byte)(0x80 | (value & 0x3f)); + } + + /** + * Reads a 4-digit unsigned hexadecimal number from the source. + */ + private int readHex() { + int numberStart = pos; + int result = 0; + int length = 4; + for (int i = 0; i < length; i++) { + int digit = readUtf8Char(); + int digitValue; + if (digit >= '0' && digit <= '9') { + digitValue = digit - '0'; + } else if (digit >= 'a' && digit <= 'f') { + digitValue = 10 + digit - 'a'; + } else if (digit >= 'A' && digit <= 'F') { + digitValue = 10 + digit - 'A'; + } else { + throw new NumberFormatException("Invalid base 16 number " + + src.subSequence(numberStart, numberStart + length)); + } + result = result * 16 + digitValue; + } + return result; + } + + @Override + protected RaiseException invalidUtf8() { + ByteList message = new ByteList( + ByteList.plain("partial character in source, " + + "but hit end near ")); + int start = surrogatePairStart != -1 ? surrogatePairStart : charStart; + message.append(src, start, srcEnd - start); + return Utils.newException(context, Utils.M_PARSER_ERROR, + context.getRuntime().newString(message)); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder 2.java new file mode 100644 index 0000000..9d40dd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder 2.java @@ -0,0 +1,111 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * An encoder that reads from the given source and outputs its representation + * to another ByteList. The source string is fully checked for UTF-8 validity, + * and throws a GeneratorError if any problem is found. + */ +final class StringEncoder extends ByteListTranscoder { + private final boolean asciiOnly; + + // Escaped characters will reuse this array, to avoid new allocations + // or appending them byte-by-byte + private final byte[] aux = + new byte[] {/* First unicode character */ + '\\', 'u', 0, 0, 0, 0, + /* Second unicode character (for surrogate pairs) */ + '\\', 'u', 0, 0, 0, 0, + /* "\X" characters */ + '\\', 0}; + // offsets on the array above + private static final int ESCAPE_UNI1_OFFSET = 0; + private static final int ESCAPE_UNI2_OFFSET = ESCAPE_UNI1_OFFSET + 6; + private static final int ESCAPE_CHAR_OFFSET = ESCAPE_UNI2_OFFSET + 6; + /** Array used for code point decomposition in surrogates */ + private final char[] utf16 = new char[2]; + + private static final byte[] HEX = + new byte[] {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + StringEncoder(ThreadContext context, boolean asciiOnly) { + super(context); + this.asciiOnly = asciiOnly; + } + + void encode(ByteList src, ByteList out) { + init(src, out); + append('"'); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + append('"'); + } + + private void handleChar(int c) { + switch (c) { + case '"': + case '\\': + escapeChar((char)c); + break; + case '\n': + escapeChar('n'); + break; + case '\r': + escapeChar('r'); + break; + case '\t': + escapeChar('t'); + break; + case '\f': + escapeChar('f'); + break; + case '\b': + escapeChar('b'); + break; + default: + if (c >= 0x20 && c <= 0x7f || + (c >= 0x80 && !asciiOnly)) { + quoteStart(); + } else { + quoteStop(charStart); + escapeUtf8Char(c); + } + } + } + + private void escapeChar(char c) { + quoteStop(charStart); + aux[ESCAPE_CHAR_OFFSET + 1] = (byte)c; + append(aux, ESCAPE_CHAR_OFFSET, 2); + } + + private void escapeUtf8Char(int codePoint) { + int numChars = Character.toChars(codePoint, utf16, 0); + escapeCodeUnit(utf16[0], ESCAPE_UNI1_OFFSET + 2); + if (numChars > 1) escapeCodeUnit(utf16[1], ESCAPE_UNI2_OFFSET + 2); + append(aux, ESCAPE_UNI1_OFFSET, 6 * numChars); + } + + private void escapeCodeUnit(char c, int auxOffset) { + for (int i = 0; i < 4; i++) { + aux[auxOffset + i] = HEX[(c >>> (12 - 4 * i)) & 0xf]; + } + } + + @Override + protected RaiseException invalidUtf8() { + return Utils.newException(context, Utils.M_GENERATOR_ERROR, + "source sequence is illegal/malformed utf-8"); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java new file mode 100644 index 0000000..9d40dd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java @@ -0,0 +1,111 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * An encoder that reads from the given source and outputs its representation + * to another ByteList. The source string is fully checked for UTF-8 validity, + * and throws a GeneratorError if any problem is found. + */ +final class StringEncoder extends ByteListTranscoder { + private final boolean asciiOnly; + + // Escaped characters will reuse this array, to avoid new allocations + // or appending them byte-by-byte + private final byte[] aux = + new byte[] {/* First unicode character */ + '\\', 'u', 0, 0, 0, 0, + /* Second unicode character (for surrogate pairs) */ + '\\', 'u', 0, 0, 0, 0, + /* "\X" characters */ + '\\', 0}; + // offsets on the array above + private static final int ESCAPE_UNI1_OFFSET = 0; + private static final int ESCAPE_UNI2_OFFSET = ESCAPE_UNI1_OFFSET + 6; + private static final int ESCAPE_CHAR_OFFSET = ESCAPE_UNI2_OFFSET + 6; + /** Array used for code point decomposition in surrogates */ + private final char[] utf16 = new char[2]; + + private static final byte[] HEX = + new byte[] {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + StringEncoder(ThreadContext context, boolean asciiOnly) { + super(context); + this.asciiOnly = asciiOnly; + } + + void encode(ByteList src, ByteList out) { + init(src, out); + append('"'); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + append('"'); + } + + private void handleChar(int c) { + switch (c) { + case '"': + case '\\': + escapeChar((char)c); + break; + case '\n': + escapeChar('n'); + break; + case '\r': + escapeChar('r'); + break; + case '\t': + escapeChar('t'); + break; + case '\f': + escapeChar('f'); + break; + case '\b': + escapeChar('b'); + break; + default: + if (c >= 0x20 && c <= 0x7f || + (c >= 0x80 && !asciiOnly)) { + quoteStart(); + } else { + quoteStop(charStart); + escapeUtf8Char(c); + } + } + } + + private void escapeChar(char c) { + quoteStop(charStart); + aux[ESCAPE_CHAR_OFFSET + 1] = (byte)c; + append(aux, ESCAPE_CHAR_OFFSET, 2); + } + + private void escapeUtf8Char(int codePoint) { + int numChars = Character.toChars(codePoint, utf16, 0); + escapeCodeUnit(utf16[0], ESCAPE_UNI1_OFFSET + 2); + if (numChars > 1) escapeCodeUnit(utf16[1], ESCAPE_UNI2_OFFSET + 2); + append(aux, ESCAPE_UNI1_OFFSET, 6 * numChars); + } + + private void escapeCodeUnit(char c, int auxOffset) { + for (int i = 0; i < 4; i++) { + aux[auxOffset + i] = HEX[(c >>> (12 - 4 * i)) & 0xf]; + } + } + + @Override + protected RaiseException invalidUtf8() { + return Utils.newException(context, Utils.M_GENERATOR_ERROR, + "source sequence is illegal/malformed utf-8"); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils 2.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils 2.java new file mode 100644 index 0000000..ed6f832 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils 2.java @@ -0,0 +1,88 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyException; +import org.jruby.RubyHash; +import org.jruby.RubyString; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * Library of miscellaneous utility functions + */ +final class Utils { + public static final String M_GENERATOR_ERROR = "GeneratorError"; + public static final String M_NESTING_ERROR = "NestingError"; + public static final String M_PARSER_ERROR = "ParserError"; + + private Utils() { + throw new RuntimeException(); + } + + /** + * Safe {@link RubyArray} type-checking. + * Returns the given object if it is an Array, + * or throws an exception if not. + * @param object The object to test + * @return The given object if it is an Array + * @throws RaiseException TypeError if the object is not + * of the expected type + */ + static RubyArray ensureArray(IRubyObject object) throws RaiseException { + if (object instanceof RubyArray) return (RubyArray)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getArray()); + } + + static RubyHash ensureHash(IRubyObject object) throws RaiseException { + if (object instanceof RubyHash) return (RubyHash)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getHash()); + } + + static RubyString ensureString(IRubyObject object) throws RaiseException { + if (object instanceof RubyString) return (RubyString)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getString()); + } + + static RaiseException newException(ThreadContext context, + String className, String message) { + return newException(context, className, + context.getRuntime().newString(message)); + } + + static RaiseException newException(ThreadContext context, + String className, RubyString message) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + RubyClass klazz = info.jsonModule.get().getClass(className); + RubyException excptn = + (RubyException)klazz.newInstance(context, + new IRubyObject[] {message}, Block.NULL_BLOCK); + return new RaiseException(excptn); + } + + static byte[] repeat(ByteList a, int n) { + return repeat(a.unsafeBytes(), a.begin(), a.length(), n); + } + + static byte[] repeat(byte[] a, int begin, int length, int n) { + if (length == 0) return ByteList.NULL_ARRAY; + int resultLen = length * n; + byte[] result = new byte[resultLen]; + for (int pos = 0; pos < resultLen; pos += length) { + System.arraycopy(a, begin, result, pos, length); + } + return result; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils.java b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils.java new file mode 100644 index 0000000..ed6f832 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/java/src/json/ext/Utils.java @@ -0,0 +1,88 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyException; +import org.jruby.RubyHash; +import org.jruby.RubyString; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * Library of miscellaneous utility functions + */ +final class Utils { + public static final String M_GENERATOR_ERROR = "GeneratorError"; + public static final String M_NESTING_ERROR = "NestingError"; + public static final String M_PARSER_ERROR = "ParserError"; + + private Utils() { + throw new RuntimeException(); + } + + /** + * Safe {@link RubyArray} type-checking. + * Returns the given object if it is an Array, + * or throws an exception if not. + * @param object The object to test + * @return The given object if it is an Array + * @throws RaiseException TypeError if the object is not + * of the expected type + */ + static RubyArray ensureArray(IRubyObject object) throws RaiseException { + if (object instanceof RubyArray) return (RubyArray)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getArray()); + } + + static RubyHash ensureHash(IRubyObject object) throws RaiseException { + if (object instanceof RubyHash) return (RubyHash)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getHash()); + } + + static RubyString ensureString(IRubyObject object) throws RaiseException { + if (object instanceof RubyString) return (RubyString)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getString()); + } + + static RaiseException newException(ThreadContext context, + String className, String message) { + return newException(context, className, + context.getRuntime().newString(message)); + } + + static RaiseException newException(ThreadContext context, + String className, RubyString message) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + RubyClass klazz = info.jsonModule.get().getClass(className); + RubyException excptn = + (RubyException)klazz.newInstance(context, + new IRubyObject[] {message}, Block.NULL_BLOCK); + return new RaiseException(excptn); + } + + static byte[] repeat(ByteList a, int n) { + return repeat(a.unsafeBytes(), a.begin(), a.length(), n); + } + + static byte[] repeat(byte[] a, int begin, int length, int n) { + if (length == 0) return ByteList.NULL_ARRAY; + int resultLen = length * n; + byte[] result = new byte[resultLen]; + for (int pos = 0; pos < resultLen; pos += length) { + System.arraycopy(a, begin, result, pos, length); + } + return result; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json 2.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json 2.gemspec new file mode 100644 index 0000000..0c72e82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json 2.gemspec @@ -0,0 +1,139 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json" + s.version = File.read('VERSION').chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib"] + s.authors = ["Florian Frank"] + s.description = "This is a JSON implementation as a Ruby extension in C." + s.email = "flori@ping.de" + s.extensions = ["ext/json/ext/generator/extconf.rb", "ext/json/ext/parser/extconf.rb", "ext/json/extconf.rb"] + s.extra_rdoc_files = ["README.md"] + s.files = [ + ".gitignore", + ".travis.yml", + "CHANGES.md", + "Gemfile", + "README-json-jruby.md", + "README.md", + "Rakefile", + "VERSION", + "diagrams/.keep", + "ext/json/ext/fbuffer/fbuffer.h", + "ext/json/ext/generator/depend", + "ext/json/ext/generator/extconf.rb", + "ext/json/ext/generator/generator.c", + "ext/json/ext/generator/generator.h", + "ext/json/ext/parser/depend", + "ext/json/ext/parser/extconf.rb", + "ext/json/ext/parser/parser.c", + "ext/json/ext/parser/parser.h", + "ext/json/ext/parser/parser.rl", + "ext/json/extconf.rb", + "install.rb", + "java/src/json/ext/ByteListTranscoder.java", + "java/src/json/ext/Generator.java", + "java/src/json/ext/GeneratorMethods.java", + "java/src/json/ext/GeneratorService.java", + "java/src/json/ext/GeneratorState.java", + "java/src/json/ext/OptionsReader.java", + "java/src/json/ext/Parser.java", + "java/src/json/ext/Parser.rl", + "java/src/json/ext/ParserService.java", + "java/src/json/ext/RuntimeInfo.java", + "java/src/json/ext/StringDecoder.java", + "java/src/json/ext/StringEncoder.java", + "java/src/json/ext/Utils.java", + "json-java.gemspec", + "json.gemspec", + "json_pure.gemspec", + "lib/json.rb", + "lib/json/add/bigdecimal.rb", + "lib/json/add/complex.rb", + "lib/json/add/core.rb", + "lib/json/add/date.rb", + "lib/json/add/date_time.rb", + "lib/json/add/exception.rb", + "lib/json/add/ostruct.rb", + "lib/json/add/range.rb", + "lib/json/add/rational.rb", + "lib/json/add/regexp.rb", + "lib/json/add/set.rb", + "lib/json/add/struct.rb", + "lib/json/add/symbol.rb", + "lib/json/add/time.rb", + "lib/json/common.rb", + "lib/json/ext.rb", + "lib/json/ext/.keep", + "lib/json/generic_object.rb", + "lib/json/pure.rb", + "lib/json/pure/generator.rb", + "lib/json/pure/parser.rb", + "lib/json/version.rb", + "references/rfc7159.txt", + "tests/fixtures/fail10.json", + "tests/fixtures/fail11.json", + "tests/fixtures/fail12.json", + "tests/fixtures/fail13.json", + "tests/fixtures/fail14.json", + "tests/fixtures/fail18.json", + "tests/fixtures/fail19.json", + "tests/fixtures/fail2.json", + "tests/fixtures/fail20.json", + "tests/fixtures/fail21.json", + "tests/fixtures/fail22.json", + "tests/fixtures/fail23.json", + "tests/fixtures/fail24.json", + "tests/fixtures/fail25.json", + "tests/fixtures/fail27.json", + "tests/fixtures/fail28.json", + "tests/fixtures/fail3.json", + "tests/fixtures/fail4.json", + "tests/fixtures/fail5.json", + "tests/fixtures/fail6.json", + "tests/fixtures/fail7.json", + "tests/fixtures/fail8.json", + "tests/fixtures/fail9.json", + "tests/fixtures/obsolete_fail1.json", + "tests/fixtures/pass1.json", + "tests/fixtures/pass15.json", + "tests/fixtures/pass16.json", + "tests/fixtures/pass17.json", + "tests/fixtures/pass2.json", + "tests/fixtures/pass26.json", + "tests/fixtures/pass3.json", + "tests/json_addition_test.rb", + "tests/json_common_interface_test.rb", + "tests/json_encoding_test.rb", + "tests/json_ext_parser_test.rb", + "tests/json_fixtures_test.rb", + "tests/json_generator_test.rb", + "tests/json_generic_object_test.rb", + "tests/json_parser_test.rb", + "tests/json_string_matching_test.rb", + "tests/test_helper.rb", + "tests/test_helper.rb", + "tools/diff.sh", + "tools/fuzz.rb", + "tools/server.rb", + ] + s.homepage = "http://flori.github.com/json" + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/flori/json/issues', + 'changelog_uri' => 'https://github.com/flori/json/blob/master/CHANGES.md', + 'documentation_uri' => 'http://flori.github.io/json/doc/index.html', + 'homepage_uri' => 'http://flori.github.io/json/', + 'source_code_uri' => 'https://github.com/flori/json', + 'wiki_uri' => 'https://github.com/flori/json/wiki' + } + s.licenses = ["Ruby"] + s.rdoc_options = ["--title", "JSON implemention for Ruby", "--main", "README.md"] + s.required_ruby_version = Gem::Requirement.new(">= 2.0") + s.summary = "JSON Implementation for Ruby" + s.test_files = ["tests/test_helper.rb"] + + s.add_development_dependency("rake", [">= 0"]) + s.add_development_dependency("test-unit", [">= 2.0", "< 4.0"]) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java 2.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java 2.gemspec new file mode 100755 index 0000000..1bcd639 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java 2.gemspec @@ -0,0 +1,37 @@ +#!/usr/bin/env jruby +require "rubygems" + +spec = Gem::Specification.new do |s| + s.name = "json" + s.version = File.read("VERSION").chomp + s.summary = "JSON implementation for JRuby" + s.description = "A JSON implementation as a JRuby extension." + s.author = "Daniel Luz" + s.email = "dev+ruby@mernen.com" + s.homepage = "http://flori.github.com/json" + s.platform = 'java' + s.licenses = ["Ruby"] + + s.files = Dir["{docs,lib,tests}/**/*"] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q, [">= 0"]) + s.add_development_dependency(%q, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end +end + +if $0 == __FILE__ + Gem::Builder.new(spec).build +else + spec +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java.gemspec new file mode 100755 index 0000000..1bcd639 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json-java.gemspec @@ -0,0 +1,37 @@ +#!/usr/bin/env jruby +require "rubygems" + +spec = Gem::Specification.new do |s| + s.name = "json" + s.version = File.read("VERSION").chomp + s.summary = "JSON implementation for JRuby" + s.description = "A JSON implementation as a JRuby extension." + s.author = "Daniel Luz" + s.email = "dev+ruby@mernen.com" + s.homepage = "http://flori.github.com/json" + s.platform = 'java' + s.licenses = ["Ruby"] + + s.files = Dir["{docs,lib,tests}/**/*"] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q, [">= 0"]) + s.add_development_dependency(%q, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end +end + +if $0 == __FILE__ + Gem::Builder.new(spec).build +else + spec +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json.gemspec new file mode 100644 index 0000000..0c72e82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json.gemspec @@ -0,0 +1,139 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json" + s.version = File.read('VERSION').chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib"] + s.authors = ["Florian Frank"] + s.description = "This is a JSON implementation as a Ruby extension in C." + s.email = "flori@ping.de" + s.extensions = ["ext/json/ext/generator/extconf.rb", "ext/json/ext/parser/extconf.rb", "ext/json/extconf.rb"] + s.extra_rdoc_files = ["README.md"] + s.files = [ + ".gitignore", + ".travis.yml", + "CHANGES.md", + "Gemfile", + "README-json-jruby.md", + "README.md", + "Rakefile", + "VERSION", + "diagrams/.keep", + "ext/json/ext/fbuffer/fbuffer.h", + "ext/json/ext/generator/depend", + "ext/json/ext/generator/extconf.rb", + "ext/json/ext/generator/generator.c", + "ext/json/ext/generator/generator.h", + "ext/json/ext/parser/depend", + "ext/json/ext/parser/extconf.rb", + "ext/json/ext/parser/parser.c", + "ext/json/ext/parser/parser.h", + "ext/json/ext/parser/parser.rl", + "ext/json/extconf.rb", + "install.rb", + "java/src/json/ext/ByteListTranscoder.java", + "java/src/json/ext/Generator.java", + "java/src/json/ext/GeneratorMethods.java", + "java/src/json/ext/GeneratorService.java", + "java/src/json/ext/GeneratorState.java", + "java/src/json/ext/OptionsReader.java", + "java/src/json/ext/Parser.java", + "java/src/json/ext/Parser.rl", + "java/src/json/ext/ParserService.java", + "java/src/json/ext/RuntimeInfo.java", + "java/src/json/ext/StringDecoder.java", + "java/src/json/ext/StringEncoder.java", + "java/src/json/ext/Utils.java", + "json-java.gemspec", + "json.gemspec", + "json_pure.gemspec", + "lib/json.rb", + "lib/json/add/bigdecimal.rb", + "lib/json/add/complex.rb", + "lib/json/add/core.rb", + "lib/json/add/date.rb", + "lib/json/add/date_time.rb", + "lib/json/add/exception.rb", + "lib/json/add/ostruct.rb", + "lib/json/add/range.rb", + "lib/json/add/rational.rb", + "lib/json/add/regexp.rb", + "lib/json/add/set.rb", + "lib/json/add/struct.rb", + "lib/json/add/symbol.rb", + "lib/json/add/time.rb", + "lib/json/common.rb", + "lib/json/ext.rb", + "lib/json/ext/.keep", + "lib/json/generic_object.rb", + "lib/json/pure.rb", + "lib/json/pure/generator.rb", + "lib/json/pure/parser.rb", + "lib/json/version.rb", + "references/rfc7159.txt", + "tests/fixtures/fail10.json", + "tests/fixtures/fail11.json", + "tests/fixtures/fail12.json", + "tests/fixtures/fail13.json", + "tests/fixtures/fail14.json", + "tests/fixtures/fail18.json", + "tests/fixtures/fail19.json", + "tests/fixtures/fail2.json", + "tests/fixtures/fail20.json", + "tests/fixtures/fail21.json", + "tests/fixtures/fail22.json", + "tests/fixtures/fail23.json", + "tests/fixtures/fail24.json", + "tests/fixtures/fail25.json", + "tests/fixtures/fail27.json", + "tests/fixtures/fail28.json", + "tests/fixtures/fail3.json", + "tests/fixtures/fail4.json", + "tests/fixtures/fail5.json", + "tests/fixtures/fail6.json", + "tests/fixtures/fail7.json", + "tests/fixtures/fail8.json", + "tests/fixtures/fail9.json", + "tests/fixtures/obsolete_fail1.json", + "tests/fixtures/pass1.json", + "tests/fixtures/pass15.json", + "tests/fixtures/pass16.json", + "tests/fixtures/pass17.json", + "tests/fixtures/pass2.json", + "tests/fixtures/pass26.json", + "tests/fixtures/pass3.json", + "tests/json_addition_test.rb", + "tests/json_common_interface_test.rb", + "tests/json_encoding_test.rb", + "tests/json_ext_parser_test.rb", + "tests/json_fixtures_test.rb", + "tests/json_generator_test.rb", + "tests/json_generic_object_test.rb", + "tests/json_parser_test.rb", + "tests/json_string_matching_test.rb", + "tests/test_helper.rb", + "tests/test_helper.rb", + "tools/diff.sh", + "tools/fuzz.rb", + "tools/server.rb", + ] + s.homepage = "http://flori.github.com/json" + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/flori/json/issues', + 'changelog_uri' => 'https://github.com/flori/json/blob/master/CHANGES.md', + 'documentation_uri' => 'http://flori.github.io/json/doc/index.html', + 'homepage_uri' => 'http://flori.github.io/json/', + 'source_code_uri' => 'https://github.com/flori/json', + 'wiki_uri' => 'https://github.com/flori/json/wiki' + } + s.licenses = ["Ruby"] + s.rdoc_options = ["--title", "JSON implemention for Ruby", "--main", "README.md"] + s.required_ruby_version = Gem::Requirement.new(">= 2.0") + s.summary = "JSON Implementation for Ruby" + s.test_files = ["tests/test_helper.rb"] + + s.add_development_dependency("rake", [">= 0"]) + s.add_development_dependency("test-unit", [">= 2.0", "< 4.0"]) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure 2.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure 2.gemspec new file mode 100644 index 0000000..007acd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure 2.gemspec @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json_pure".freeze + s.version = File.read("VERSION").chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Florian Frank".freeze] + s.description = "This is a JSON implementation in pure Ruby.".freeze + s.email = "flori@ping.de".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["./tests/test_helper.rb".freeze, ".gitignore".freeze, ".travis.yml".freeze, "CHANGES.md".freeze, "Gemfile".freeze, "LICENSE".freeze, "README-json-jruby.md".freeze, "README.md".freeze, "Rakefile".freeze, "VERSION".freeze, "diagrams/.keep".freeze, "ext/json/ext/fbuffer/fbuffer.h".freeze, "ext/json/ext/generator/depend".freeze, "ext/json/ext/generator/extconf.rb".freeze, "ext/json/ext/generator/generator.c".freeze, "ext/json/ext/generator/generator.h".freeze, "ext/json/ext/parser/depend".freeze, "ext/json/ext/parser/extconf.rb".freeze, "ext/json/ext/parser/parser.c".freeze, "ext/json/ext/parser/parser.h".freeze, "ext/json/ext/parser/parser.rl".freeze, "ext/json/extconf.rb".freeze, "install.rb".freeze, "java/src/json/ext/ByteListTranscoder.java".freeze, "java/src/json/ext/Generator.java".freeze, "java/src/json/ext/GeneratorMethods.java".freeze, "java/src/json/ext/GeneratorService.java".freeze, "java/src/json/ext/GeneratorState.java".freeze, "java/src/json/ext/OptionsReader.java".freeze, "java/src/json/ext/Parser.java".freeze, "java/src/json/ext/Parser.rl".freeze, "java/src/json/ext/ParserService.java".freeze, "java/src/json/ext/RuntimeInfo.java".freeze, "java/src/json/ext/StringDecoder.java".freeze, "java/src/json/ext/StringEncoder.java".freeze, "java/src/json/ext/Utils.java".freeze, "json-java.gemspec".freeze, "json.gemspec".freeze, "json_pure.gemspec".freeze, "lib/json.rb".freeze, "lib/json/add/bigdecimal.rb".freeze, "lib/json/add/complex.rb".freeze, "lib/json/add/core.rb".freeze, "lib/json/add/date.rb".freeze, "lib/json/add/date_time.rb".freeze, "lib/json/add/exception.rb".freeze, "lib/json/add/ostruct.rb".freeze, "lib/json/add/range.rb".freeze, "lib/json/add/rational.rb".freeze, "lib/json/add/regexp.rb".freeze, "lib/json/add/set.rb".freeze, "lib/json/add/struct.rb".freeze, "lib/json/add/symbol.rb".freeze, "lib/json/add/time.rb".freeze, "lib/json/common.rb".freeze, "lib/json/ext.rb".freeze, "lib/json/ext/.keep".freeze, "lib/json/generic_object.rb".freeze, "lib/json/pure.rb".freeze, "lib/json/pure/generator.rb".freeze, "lib/json/pure/parser.rb".freeze, "lib/json/version.rb".freeze, "references/rfc7159.txt".freeze, "tests/fixtures/fail10.json".freeze, "tests/fixtures/fail11.json".freeze, "tests/fixtures/fail12.json".freeze, "tests/fixtures/fail13.json".freeze, "tests/fixtures/fail14.json".freeze, "tests/fixtures/fail18.json".freeze, "tests/fixtures/fail19.json".freeze, "tests/fixtures/fail2.json".freeze, "tests/fixtures/fail20.json".freeze, "tests/fixtures/fail21.json".freeze, "tests/fixtures/fail22.json".freeze, "tests/fixtures/fail23.json".freeze, "tests/fixtures/fail24.json".freeze, "tests/fixtures/fail25.json".freeze, "tests/fixtures/fail27.json".freeze, "tests/fixtures/fail28.json".freeze, "tests/fixtures/fail3.json".freeze, "tests/fixtures/fail4.json".freeze, "tests/fixtures/fail5.json".freeze, "tests/fixtures/fail6.json".freeze, "tests/fixtures/fail7.json".freeze, "tests/fixtures/fail8.json".freeze, "tests/fixtures/fail9.json".freeze, "tests/fixtures/obsolete_fail1.json".freeze, "tests/fixtures/pass1.json".freeze, "tests/fixtures/pass15.json".freeze, "tests/fixtures/pass16.json".freeze, "tests/fixtures/pass17.json".freeze, "tests/fixtures/pass2.json".freeze, "tests/fixtures/pass26.json".freeze, "tests/fixtures/pass3.json".freeze, "tests/json_addition_test.rb".freeze, "tests/json_common_interface_test.rb".freeze, "tests/json_encoding_test.rb".freeze, "tests/json_ext_parser_test.rb".freeze, "tests/json_fixtures_test.rb".freeze, "tests/json_generator_test.rb".freeze, "tests/json_generic_object_test.rb".freeze, "tests/json_parser_test.rb".freeze, "tests/json_string_matching_test.rb".freeze, "tests/test_helper.rb".freeze, "tools/diff.sh".freeze, "tools/fuzz.rb".freeze, "tools/server.rb".freeze] + s.homepage = "http://flori.github.com/json".freeze + s.licenses = ["Ruby".freeze] + s.rdoc_options = ["--title".freeze, "JSON implemention for ruby".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.1.2".freeze + s.summary = "JSON Implementation for Ruby".freeze + s.test_files = ["./tests/test_helper.rb".freeze] + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure.gemspec new file mode 100644 index 0000000..007acd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/json_pure.gemspec @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json_pure".freeze + s.version = File.read("VERSION").chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Florian Frank".freeze] + s.description = "This is a JSON implementation in pure Ruby.".freeze + s.email = "flori@ping.de".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["./tests/test_helper.rb".freeze, ".gitignore".freeze, ".travis.yml".freeze, "CHANGES.md".freeze, "Gemfile".freeze, "LICENSE".freeze, "README-json-jruby.md".freeze, "README.md".freeze, "Rakefile".freeze, "VERSION".freeze, "diagrams/.keep".freeze, "ext/json/ext/fbuffer/fbuffer.h".freeze, "ext/json/ext/generator/depend".freeze, "ext/json/ext/generator/extconf.rb".freeze, "ext/json/ext/generator/generator.c".freeze, "ext/json/ext/generator/generator.h".freeze, "ext/json/ext/parser/depend".freeze, "ext/json/ext/parser/extconf.rb".freeze, "ext/json/ext/parser/parser.c".freeze, "ext/json/ext/parser/parser.h".freeze, "ext/json/ext/parser/parser.rl".freeze, "ext/json/extconf.rb".freeze, "install.rb".freeze, "java/src/json/ext/ByteListTranscoder.java".freeze, "java/src/json/ext/Generator.java".freeze, "java/src/json/ext/GeneratorMethods.java".freeze, "java/src/json/ext/GeneratorService.java".freeze, "java/src/json/ext/GeneratorState.java".freeze, "java/src/json/ext/OptionsReader.java".freeze, "java/src/json/ext/Parser.java".freeze, "java/src/json/ext/Parser.rl".freeze, "java/src/json/ext/ParserService.java".freeze, "java/src/json/ext/RuntimeInfo.java".freeze, "java/src/json/ext/StringDecoder.java".freeze, "java/src/json/ext/StringEncoder.java".freeze, "java/src/json/ext/Utils.java".freeze, "json-java.gemspec".freeze, "json.gemspec".freeze, "json_pure.gemspec".freeze, "lib/json.rb".freeze, "lib/json/add/bigdecimal.rb".freeze, "lib/json/add/complex.rb".freeze, "lib/json/add/core.rb".freeze, "lib/json/add/date.rb".freeze, "lib/json/add/date_time.rb".freeze, "lib/json/add/exception.rb".freeze, "lib/json/add/ostruct.rb".freeze, "lib/json/add/range.rb".freeze, "lib/json/add/rational.rb".freeze, "lib/json/add/regexp.rb".freeze, "lib/json/add/set.rb".freeze, "lib/json/add/struct.rb".freeze, "lib/json/add/symbol.rb".freeze, "lib/json/add/time.rb".freeze, "lib/json/common.rb".freeze, "lib/json/ext.rb".freeze, "lib/json/ext/.keep".freeze, "lib/json/generic_object.rb".freeze, "lib/json/pure.rb".freeze, "lib/json/pure/generator.rb".freeze, "lib/json/pure/parser.rb".freeze, "lib/json/version.rb".freeze, "references/rfc7159.txt".freeze, "tests/fixtures/fail10.json".freeze, "tests/fixtures/fail11.json".freeze, "tests/fixtures/fail12.json".freeze, "tests/fixtures/fail13.json".freeze, "tests/fixtures/fail14.json".freeze, "tests/fixtures/fail18.json".freeze, "tests/fixtures/fail19.json".freeze, "tests/fixtures/fail2.json".freeze, "tests/fixtures/fail20.json".freeze, "tests/fixtures/fail21.json".freeze, "tests/fixtures/fail22.json".freeze, "tests/fixtures/fail23.json".freeze, "tests/fixtures/fail24.json".freeze, "tests/fixtures/fail25.json".freeze, "tests/fixtures/fail27.json".freeze, "tests/fixtures/fail28.json".freeze, "tests/fixtures/fail3.json".freeze, "tests/fixtures/fail4.json".freeze, "tests/fixtures/fail5.json".freeze, "tests/fixtures/fail6.json".freeze, "tests/fixtures/fail7.json".freeze, "tests/fixtures/fail8.json".freeze, "tests/fixtures/fail9.json".freeze, "tests/fixtures/obsolete_fail1.json".freeze, "tests/fixtures/pass1.json".freeze, "tests/fixtures/pass15.json".freeze, "tests/fixtures/pass16.json".freeze, "tests/fixtures/pass17.json".freeze, "tests/fixtures/pass2.json".freeze, "tests/fixtures/pass26.json".freeze, "tests/fixtures/pass3.json".freeze, "tests/json_addition_test.rb".freeze, "tests/json_common_interface_test.rb".freeze, "tests/json_encoding_test.rb".freeze, "tests/json_ext_parser_test.rb".freeze, "tests/json_fixtures_test.rb".freeze, "tests/json_generator_test.rb".freeze, "tests/json_generic_object_test.rb".freeze, "tests/json_parser_test.rb".freeze, "tests/json_string_matching_test.rb".freeze, "tests/test_helper.rb".freeze, "tools/diff.sh".freeze, "tools/fuzz.rb".freeze, "tools/server.rb".freeze] + s.homepage = "http://flori.github.com/json".freeze + s.licenses = ["Ruby".freeze] + s.rdoc_options = ["--title".freeze, "JSON implemention for ruby".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.1.2".freeze + s.summary = "JSON Implementation for Ruby".freeze + s.test_files = ["./tests/test_helper.rb".freeze] + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json 2.rb new file mode 100644 index 0000000..3eb59f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json 2.rb @@ -0,0 +1,412 @@ +#frozen_string_literal: false +require 'json/common' + +## +# = JavaScript \Object Notation (\JSON) +# +# \JSON is a lightweight data-interchange format. +# +# A \JSON value is one of the following: +# - Double-quoted text: "foo". +# - Number: +1+, +1.0+, +2.0e2+. +# - Boolean: +true+, +false+. +# - Null: +null+. +# - \Array: an ordered list of values, enclosed by square brackets: +# ["foo", 1, 1.0, 2.0e2, true, false, null] +# +# - \Object: a collection of name/value pairs, enclosed by curly braces; +# each name is double-quoted text; +# the values may be any \JSON values: +# {"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null} +# +# A \JSON array or object may contain nested arrays, objects, and scalars +# to any depth: +# {"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]} +# [{"foo": 0, "bar": 1}, ["baz", 2]] +# +# == Using \Module \JSON +# +# To make module \JSON available in your code, begin with: +# require 'json' +# +# All examples here assume that this has been done. +# +# === Parsing \JSON +# +# You can parse a \String containing \JSON data using +# either of two methods: +# - JSON.parse(source, opts) +# - JSON.parse!(source, opts) +# +# where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# The difference between the two methods +# is that JSON.parse! omits some checks +# and may not be safe for some +source+ data; +# use it only for data from trusted sources. +# Use the safer method JSON.parse for less trusted sources. +# +# ==== Parsing \JSON Arrays +# +# When +source+ is a \JSON array, JSON.parse by default returns a Ruby \Array: +# json = '["foo", 1, 1.0, 2.0e2, true, false, null]' +# ruby = JSON.parse(json) +# ruby # => ["foo", 1, 1.0, 200.0, true, false, nil] +# ruby.class # => Array +# +# The \JSON array may contain nested arrays, objects, and scalars +# to any depth: +# json = '[{"foo": 0, "bar": 1}, ["baz", 2]]' +# JSON.parse(json) # => [{"foo"=>0, "bar"=>1}, ["baz", 2]] +# +# ==== Parsing \JSON \Objects +# +# When the source is a \JSON object, JSON.parse by default returns a Ruby \Hash: +# json = '{"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null}' +# ruby = JSON.parse(json) +# ruby # => {"a"=>"foo", "b"=>1, "c"=>1.0, "d"=>200.0, "e"=>true, "f"=>false, "g"=>nil} +# ruby.class # => Hash +# +# The \JSON object may contain nested arrays, objects, and scalars +# to any depth: +# json = '{"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]}' +# JSON.parse(json) # => {"foo"=>{"bar"=>1, "baz"=>2}, "bat"=>[0, 1, 2]} +# +# ==== Parsing \JSON Scalars +# +# When the source is a \JSON scalar (not an array or object), +# JSON.parse returns a Ruby scalar. +# +# \String: +# ruby = JSON.parse('"foo"') +# ruby # => 'foo' +# ruby.class # => String +# \Integer: +# ruby = JSON.parse('1') +# ruby # => 1 +# ruby.class # => Integer +# \Float: +# ruby = JSON.parse('1.0') +# ruby # => 1.0 +# ruby.class # => Float +# ruby = JSON.parse('2.0e2') +# ruby # => 200 +# ruby.class # => Float +# Boolean: +# ruby = JSON.parse('true') +# ruby # => true +# ruby.class # => TrueClass +# ruby = JSON.parse('false') +# ruby # => false +# ruby.class # => FalseClass +# Null: +# ruby = JSON.parse('null') +# ruby # => nil +# ruby.class # => NilClass +# +# === Generating \JSON +# +# To generate a Ruby \String containing \JSON data, +# use method JSON.generate(source, opts), where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# ==== Generating \JSON from Arrays +# +# When the source is a Ruby \Array, JSON.generate returns +# a \String containing a \JSON array: +# ruby = [0, 's', :foo] +# json = JSON.generate(ruby) +# json # => '[0,"s","foo"]' +# +# The Ruby \Array array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = [0, [1, 2], {foo: 3, bar: 4}] +# json = JSON.generate(ruby) +# json # => '[0,[1,2],{"foo":3,"bar":4}]' +# +# ==== Generating \JSON from Hashes +# +# When the source is a Ruby \Hash, JSON.generate returns +# a \String containing a \JSON object: +# ruby = {foo: 0, bar: 's', baz: :bat} +# json = JSON.generate(ruby) +# json # => '{"foo":0,"bar":"s","baz":"bat"}' +# +# The Ruby \Hash array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} +# json = JSON.generate(ruby) +# json # => '{"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"}' +# +# ==== Generating \JSON from Other Objects +# +# When the source is neither an \Array nor a \Hash, +# the generated \JSON data depends on the class of the source. +# +# When the source is a Ruby \Integer or \Float, JSON.generate returns +# a \String containing a \JSON number: +# JSON.generate(42) # => '42' +# JSON.generate(0.42) # => '0.42' +# +# When the source is a Ruby \String, JSON.generate returns +# a \String containing a \JSON string (with double-quotes): +# JSON.generate('A string') # => '"A string"' +# +# When the source is +true+, +false+ or +nil+, JSON.generate returns +# a \String containing the corresponding \JSON token: +# JSON.generate(true) # => 'true' +# JSON.generate(false) # => 'false' +# JSON.generate(nil) # => 'null' +# +# When the source is none of the above, JSON.generate returns +# a \String containing a \JSON string representation of the source: +# JSON.generate(:foo) # => '"foo"' +# JSON.generate(Complex(0, 0)) # => '"0+0i"' +# JSON.generate(Dir.new('.')) # => '"#

"' +# +# == \JSON Additions +# +# When you "round trip" a non-\String object from Ruby to \JSON and back, +# you have a new \String, instead of the object you began with: +# ruby0 = Range.new(0, 2) +# json = JSON.generate(ruby0) +# json # => '0..2"' +# ruby1 = JSON.parse(json) +# ruby1 # => '0..2' +# ruby1.class # => String +# +# You can use \JSON _additions_ to preserve the original object. +# The addition is an extension of a ruby class, so that: +# - \JSON.generate stores more information in the \JSON string. +# - \JSON.parse, called with option +create_additions+, +# uses that information to create a proper Ruby object. +# +# This example shows a \Range being generated into \JSON +# and parsed back into Ruby, both without and with +# the addition for \Range: +# ruby = Range.new(0, 2) +# # This passage does not use the addition for Range. +# json0 = JSON.generate(ruby) +# ruby0 = JSON.parse(json0) +# # This passage uses the addition for Range. +# require 'json/add/range' +# json1 = JSON.generate(ruby) +# ruby1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <require 'json/add/bigdecimal' +# - Complex: require 'json/add/complex' +# - Date: require 'json/add/date' +# - DateTime: require 'json/add/date_time' +# - Exception: require 'json/add/exception' +# - OpenStruct: require 'json/add/ostruct' +# - Range: require 'json/add/range' +# - Rational: require 'json/add/rational' +# - Regexp: require 'json/add/regexp' +# - Set: require 'json/add/set' +# - Struct: require 'json/add/struct' +# - Symbol: require 'json/add/symbol' +# - Time: require 'json/add/time' +# +# To reduce punctuation clutter, the examples below +# show the generated \JSON via +puts+, rather than the usual +inspect+, +# +# \BigDecimal: +# require 'json/add/bigdecimal' +# ruby0 = BigDecimal(0) # 0.0 +# json = JSON.generate(ruby0) # {"json_class":"BigDecimal","b":"27:0.0"} +# ruby1 = JSON.parse(json, create_additions: true) # 0.0 +# ruby1.class # => BigDecimal +# +# \Complex: +# require 'json/add/complex' +# ruby0 = Complex(1+0i) # 1+0i +# json = JSON.generate(ruby0) # {"json_class":"Complex","r":1,"i":0} +# ruby1 = JSON.parse(json, create_additions: true) # 1+0i +# ruby1.class # Complex +# +# \Date: +# require 'json/add/date' +# ruby0 = Date.today # 2020-05-02 +# json = JSON.generate(ruby0) # {"json_class":"Date","y":2020,"m":5,"d":2,"sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 +# ruby1.class # Date +# +# \DateTime: +# require 'json/add/date_time' +# ruby0 = DateTime.now # 2020-05-02T10:38:13-05:00 +# json = JSON.generate(ruby0) # {"json_class":"DateTime","y":2020,"m":5,"d":2,"H":10,"M":38,"S":13,"of":"-5/24","sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02T10:38:13-05:00 +# ruby1.class # DateTime +# +# \Exception (and its subclasses including \RuntimeError): +# require 'json/add/exception' +# ruby0 = Exception.new('A message') # A message +# json = JSON.generate(ruby0) # {"json_class":"Exception","m":"A message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # A message +# ruby1.class # Exception +# ruby0 = RuntimeError.new('Another message') # Another message +# json = JSON.generate(ruby0) # {"json_class":"RuntimeError","m":"Another message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # Another message +# ruby1.class # RuntimeError +# +# \OpenStruct: +# require 'json/add/ostruct' +# ruby0 = OpenStruct.new(name: 'Matz', language: 'Ruby') # # +# json = JSON.generate(ruby0) # {"json_class":"OpenStruct","t":{"name":"Matz","language":"Ruby"}} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # OpenStruct +# +# \Range: +# require 'json/add/range' +# ruby0 = Range.new(0, 2) # 0..2 +# json = JSON.generate(ruby0) # {"json_class":"Range","a":[0,2,false]} +# ruby1 = JSON.parse(json, create_additions: true) # 0..2 +# ruby1.class # Range +# +# \Rational: +# require 'json/add/rational' +# ruby0 = Rational(1, 3) # 1/3 +# json = JSON.generate(ruby0) # {"json_class":"Rational","n":1,"d":3} +# ruby1 = JSON.parse(json, create_additions: true) # 1/3 +# ruby1.class # Rational +# +# \Regexp: +# require 'json/add/regexp' +# ruby0 = Regexp.new('foo') # (?-mix:foo) +# json = JSON.generate(ruby0) # {"json_class":"Regexp","o":0,"s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # (?-mix:foo) +# ruby1.class # Regexp +# +# \Set: +# require 'json/add/set' +# ruby0 = Set.new([0, 1, 2]) # # +# json = JSON.generate(ruby0) # {"json_class":"Set","a":[0,1,2]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Set +# +# \Struct: +# require 'json/add/struct' +# Customer = Struct.new(:name, :address) # Customer +# ruby0 = Customer.new("Dave", "123 Main") # # +# json = JSON.generate(ruby0) # {"json_class":"Customer","v":["Dave","123 Main"]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Customer + # +# \Symbol: +# require 'json/add/symbol' +# ruby0 = :foo # foo +# json = JSON.generate(ruby0) # {"json_class":"Symbol","s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # foo +# ruby1.class # Symbol +# +# \Time: +# require 'json/add/time' +# ruby0 = Time.now # 2020-05-02 11:28:26 -0500 +# json = JSON.generate(ruby0) # {"json_class":"Time","s":1588436906,"n":840560000} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 11:28:26 -0500 +# ruby1.class # Time +# +# +# === Custom \JSON Additions +# +# In addition to the \JSON additions provided, +# you can craft \JSON additions of your own, +# either for Ruby built-in classes or for user-defined classes. +# +# Here's a user-defined class +Foo+: +# class Foo +# attr_accessor :bar, :baz +# def initialize(bar, baz) +# self.bar = bar +# self.baz = baz +# end +# end +# +# Here's the \JSON addition for it: +# # Extend class Foo with JSON addition. +# class Foo +# # Serialize Foo object with its class name and arguments +# def to_json(*args) +# { +# JSON.create_id => self.class.name, +# 'a' => [ bar, baz ] +# }.to_json(*args) +# end +# # Deserialize JSON string by constructing new Foo object with arguments. +# def self.json_create(object) +# new(*object['a']) +# end +# end +# +# Demonstration: +# require 'json' +# # This Foo object has no custom addition. +# foo0 = Foo.new(0, 1) +# json0 = JSON.generate(foo0) +# obj0 = JSON.parse(json0) +# # Lood the custom addition. +# require_relative 'foo_addition' +# # This foo has the custom addition. +# foo1 = Foo.new(0, 1) +# json1 = JSON.generate(foo1) +# obj1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <" (String) +# With custom addition: {"json_class":"Foo","a":[0,1]} (String) +# Parsed JSON: +# Without custom addition: "#" (String) +# With custom addition: # (Foo) +# +module JSON + require 'json/version' + + begin + require 'json/ext' + rescue LoadError + require 'json/pure' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json.rb new file mode 100644 index 0000000..3eb59f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json.rb @@ -0,0 +1,412 @@ +#frozen_string_literal: false +require 'json/common' + +## +# = JavaScript \Object Notation (\JSON) +# +# \JSON is a lightweight data-interchange format. +# +# A \JSON value is one of the following: +# - Double-quoted text: "foo". +# - Number: +1+, +1.0+, +2.0e2+. +# - Boolean: +true+, +false+. +# - Null: +null+. +# - \Array: an ordered list of values, enclosed by square brackets: +# ["foo", 1, 1.0, 2.0e2, true, false, null] +# +# - \Object: a collection of name/value pairs, enclosed by curly braces; +# each name is double-quoted text; +# the values may be any \JSON values: +# {"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null} +# +# A \JSON array or object may contain nested arrays, objects, and scalars +# to any depth: +# {"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]} +# [{"foo": 0, "bar": 1}, ["baz", 2]] +# +# == Using \Module \JSON +# +# To make module \JSON available in your code, begin with: +# require 'json' +# +# All examples here assume that this has been done. +# +# === Parsing \JSON +# +# You can parse a \String containing \JSON data using +# either of two methods: +# - JSON.parse(source, opts) +# - JSON.parse!(source, opts) +# +# where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# The difference between the two methods +# is that JSON.parse! omits some checks +# and may not be safe for some +source+ data; +# use it only for data from trusted sources. +# Use the safer method JSON.parse for less trusted sources. +# +# ==== Parsing \JSON Arrays +# +# When +source+ is a \JSON array, JSON.parse by default returns a Ruby \Array: +# json = '["foo", 1, 1.0, 2.0e2, true, false, null]' +# ruby = JSON.parse(json) +# ruby # => ["foo", 1, 1.0, 200.0, true, false, nil] +# ruby.class # => Array +# +# The \JSON array may contain nested arrays, objects, and scalars +# to any depth: +# json = '[{"foo": 0, "bar": 1}, ["baz", 2]]' +# JSON.parse(json) # => [{"foo"=>0, "bar"=>1}, ["baz", 2]] +# +# ==== Parsing \JSON \Objects +# +# When the source is a \JSON object, JSON.parse by default returns a Ruby \Hash: +# json = '{"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null}' +# ruby = JSON.parse(json) +# ruby # => {"a"=>"foo", "b"=>1, "c"=>1.0, "d"=>200.0, "e"=>true, "f"=>false, "g"=>nil} +# ruby.class # => Hash +# +# The \JSON object may contain nested arrays, objects, and scalars +# to any depth: +# json = '{"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]}' +# JSON.parse(json) # => {"foo"=>{"bar"=>1, "baz"=>2}, "bat"=>[0, 1, 2]} +# +# ==== Parsing \JSON Scalars +# +# When the source is a \JSON scalar (not an array or object), +# JSON.parse returns a Ruby scalar. +# +# \String: +# ruby = JSON.parse('"foo"') +# ruby # => 'foo' +# ruby.class # => String +# \Integer: +# ruby = JSON.parse('1') +# ruby # => 1 +# ruby.class # => Integer +# \Float: +# ruby = JSON.parse('1.0') +# ruby # => 1.0 +# ruby.class # => Float +# ruby = JSON.parse('2.0e2') +# ruby # => 200 +# ruby.class # => Float +# Boolean: +# ruby = JSON.parse('true') +# ruby # => true +# ruby.class # => TrueClass +# ruby = JSON.parse('false') +# ruby # => false +# ruby.class # => FalseClass +# Null: +# ruby = JSON.parse('null') +# ruby # => nil +# ruby.class # => NilClass +# +# === Generating \JSON +# +# To generate a Ruby \String containing \JSON data, +# use method JSON.generate(source, opts), where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# ==== Generating \JSON from Arrays +# +# When the source is a Ruby \Array, JSON.generate returns +# a \String containing a \JSON array: +# ruby = [0, 's', :foo] +# json = JSON.generate(ruby) +# json # => '[0,"s","foo"]' +# +# The Ruby \Array array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = [0, [1, 2], {foo: 3, bar: 4}] +# json = JSON.generate(ruby) +# json # => '[0,[1,2],{"foo":3,"bar":4}]' +# +# ==== Generating \JSON from Hashes +# +# When the source is a Ruby \Hash, JSON.generate returns +# a \String containing a \JSON object: +# ruby = {foo: 0, bar: 's', baz: :bat} +# json = JSON.generate(ruby) +# json # => '{"foo":0,"bar":"s","baz":"bat"}' +# +# The Ruby \Hash array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} +# json = JSON.generate(ruby) +# json # => '{"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"}' +# +# ==== Generating \JSON from Other Objects +# +# When the source is neither an \Array nor a \Hash, +# the generated \JSON data depends on the class of the source. +# +# When the source is a Ruby \Integer or \Float, JSON.generate returns +# a \String containing a \JSON number: +# JSON.generate(42) # => '42' +# JSON.generate(0.42) # => '0.42' +# +# When the source is a Ruby \String, JSON.generate returns +# a \String containing a \JSON string (with double-quotes): +# JSON.generate('A string') # => '"A string"' +# +# When the source is +true+, +false+ or +nil+, JSON.generate returns +# a \String containing the corresponding \JSON token: +# JSON.generate(true) # => 'true' +# JSON.generate(false) # => 'false' +# JSON.generate(nil) # => 'null' +# +# When the source is none of the above, JSON.generate returns +# a \String containing a \JSON string representation of the source: +# JSON.generate(:foo) # => '"foo"' +# JSON.generate(Complex(0, 0)) # => '"0+0i"' +# JSON.generate(Dir.new('.')) # => '"#"' +# +# == \JSON Additions +# +# When you "round trip" a non-\String object from Ruby to \JSON and back, +# you have a new \String, instead of the object you began with: +# ruby0 = Range.new(0, 2) +# json = JSON.generate(ruby0) +# json # => '0..2"' +# ruby1 = JSON.parse(json) +# ruby1 # => '0..2' +# ruby1.class # => String +# +# You can use \JSON _additions_ to preserve the original object. +# The addition is an extension of a ruby class, so that: +# - \JSON.generate stores more information in the \JSON string. +# - \JSON.parse, called with option +create_additions+, +# uses that information to create a proper Ruby object. +# +# This example shows a \Range being generated into \JSON +# and parsed back into Ruby, both without and with +# the addition for \Range: +# ruby = Range.new(0, 2) +# # This passage does not use the addition for Range. +# json0 = JSON.generate(ruby) +# ruby0 = JSON.parse(json0) +# # This passage uses the addition for Range. +# require 'json/add/range' +# json1 = JSON.generate(ruby) +# ruby1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <require 'json/add/bigdecimal' +# - Complex: require 'json/add/complex' +# - Date: require 'json/add/date' +# - DateTime: require 'json/add/date_time' +# - Exception: require 'json/add/exception' +# - OpenStruct: require 'json/add/ostruct' +# - Range: require 'json/add/range' +# - Rational: require 'json/add/rational' +# - Regexp: require 'json/add/regexp' +# - Set: require 'json/add/set' +# - Struct: require 'json/add/struct' +# - Symbol: require 'json/add/symbol' +# - Time: require 'json/add/time' +# +# To reduce punctuation clutter, the examples below +# show the generated \JSON via +puts+, rather than the usual +inspect+, +# +# \BigDecimal: +# require 'json/add/bigdecimal' +# ruby0 = BigDecimal(0) # 0.0 +# json = JSON.generate(ruby0) # {"json_class":"BigDecimal","b":"27:0.0"} +# ruby1 = JSON.parse(json, create_additions: true) # 0.0 +# ruby1.class # => BigDecimal +# +# \Complex: +# require 'json/add/complex' +# ruby0 = Complex(1+0i) # 1+0i +# json = JSON.generate(ruby0) # {"json_class":"Complex","r":1,"i":0} +# ruby1 = JSON.parse(json, create_additions: true) # 1+0i +# ruby1.class # Complex +# +# \Date: +# require 'json/add/date' +# ruby0 = Date.today # 2020-05-02 +# json = JSON.generate(ruby0) # {"json_class":"Date","y":2020,"m":5,"d":2,"sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 +# ruby1.class # Date +# +# \DateTime: +# require 'json/add/date_time' +# ruby0 = DateTime.now # 2020-05-02T10:38:13-05:00 +# json = JSON.generate(ruby0) # {"json_class":"DateTime","y":2020,"m":5,"d":2,"H":10,"M":38,"S":13,"of":"-5/24","sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02T10:38:13-05:00 +# ruby1.class # DateTime +# +# \Exception (and its subclasses including \RuntimeError): +# require 'json/add/exception' +# ruby0 = Exception.new('A message') # A message +# json = JSON.generate(ruby0) # {"json_class":"Exception","m":"A message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # A message +# ruby1.class # Exception +# ruby0 = RuntimeError.new('Another message') # Another message +# json = JSON.generate(ruby0) # {"json_class":"RuntimeError","m":"Another message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # Another message +# ruby1.class # RuntimeError +# +# \OpenStruct: +# require 'json/add/ostruct' +# ruby0 = OpenStruct.new(name: 'Matz', language: 'Ruby') # # +# json = JSON.generate(ruby0) # {"json_class":"OpenStruct","t":{"name":"Matz","language":"Ruby"}} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # OpenStruct +# +# \Range: +# require 'json/add/range' +# ruby0 = Range.new(0, 2) # 0..2 +# json = JSON.generate(ruby0) # {"json_class":"Range","a":[0,2,false]} +# ruby1 = JSON.parse(json, create_additions: true) # 0..2 +# ruby1.class # Range +# +# \Rational: +# require 'json/add/rational' +# ruby0 = Rational(1, 3) # 1/3 +# json = JSON.generate(ruby0) # {"json_class":"Rational","n":1,"d":3} +# ruby1 = JSON.parse(json, create_additions: true) # 1/3 +# ruby1.class # Rational +# +# \Regexp: +# require 'json/add/regexp' +# ruby0 = Regexp.new('foo') # (?-mix:foo) +# json = JSON.generate(ruby0) # {"json_class":"Regexp","o":0,"s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # (?-mix:foo) +# ruby1.class # Regexp +# +# \Set: +# require 'json/add/set' +# ruby0 = Set.new([0, 1, 2]) # # +# json = JSON.generate(ruby0) # {"json_class":"Set","a":[0,1,2]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Set +# +# \Struct: +# require 'json/add/struct' +# Customer = Struct.new(:name, :address) # Customer +# ruby0 = Customer.new("Dave", "123 Main") # # +# json = JSON.generate(ruby0) # {"json_class":"Customer","v":["Dave","123 Main"]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Customer + # +# \Symbol: +# require 'json/add/symbol' +# ruby0 = :foo # foo +# json = JSON.generate(ruby0) # {"json_class":"Symbol","s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # foo +# ruby1.class # Symbol +# +# \Time: +# require 'json/add/time' +# ruby0 = Time.now # 2020-05-02 11:28:26 -0500 +# json = JSON.generate(ruby0) # {"json_class":"Time","s":1588436906,"n":840560000} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 11:28:26 -0500 +# ruby1.class # Time +# +# +# === Custom \JSON Additions +# +# In addition to the \JSON additions provided, +# you can craft \JSON additions of your own, +# either for Ruby built-in classes or for user-defined classes. +# +# Here's a user-defined class +Foo+: +# class Foo +# attr_accessor :bar, :baz +# def initialize(bar, baz) +# self.bar = bar +# self.baz = baz +# end +# end +# +# Here's the \JSON addition for it: +# # Extend class Foo with JSON addition. +# class Foo +# # Serialize Foo object with its class name and arguments +# def to_json(*args) +# { +# JSON.create_id => self.class.name, +# 'a' => [ bar, baz ] +# }.to_json(*args) +# end +# # Deserialize JSON string by constructing new Foo object with arguments. +# def self.json_create(object) +# new(*object['a']) +# end +# end +# +# Demonstration: +# require 'json' +# # This Foo object has no custom addition. +# foo0 = Foo.new(0, 1) +# json0 = JSON.generate(foo0) +# obj0 = JSON.parse(json0) +# # Lood the custom addition. +# require_relative 'foo_addition' +# # This foo has the custom addition. +# foo1 = Foo.new(0, 1) +# json1 = JSON.generate(foo1) +# obj1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <" (String) +# With custom addition: {"json_class":"Foo","a":[0,1]} (String) +# Parsed JSON: +# Without custom addition: "#" (String) +# With custom addition: # (Foo) +# +module JSON + require 'json/version' + + begin + require 'json/ext' + rescue LoadError + require 'json/pure' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal 2.rb new file mode 100644 index 0000000..c8b4f56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal 2.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::BigDecimal) or require 'bigdecimal' + +class BigDecimal + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + BigDecimal._load object['b'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'b' => _dump, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb new file mode 100644 index 0000000..c8b4f56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::BigDecimal) or require 'bigdecimal' + +class BigDecimal + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + BigDecimal._load object['b'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'b' => _dump, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex 2.rb new file mode 100644 index 0000000..4d977e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex 2.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Complex) or require 'complex' + +class Complex + + # Deserializes JSON string by converting Real value r, imaginary + # value i, to a Complex object. + def self.json_create(object) + Complex(object['r'], object['i']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'r' => real, + 'i' => imag, + } + end + + # Stores class name (Complex) along with real value r and imaginary value i as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex.rb new file mode 100644 index 0000000..4d977e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/complex.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Complex) or require 'complex' + +class Complex + + # Deserializes JSON string by converting Real value r, imaginary + # value i, to a Complex object. + def self.json_create(object) + Complex(object['r'], object['i']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'r' => real, + 'i' => imag, + } + end + + # Stores class name (Complex) along with real value r and imaginary value i as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core 2.rb new file mode 100644 index 0000000..bfb017c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core 2.rb @@ -0,0 +1,12 @@ +#frozen_string_literal: false +# This file requires the implementations of ruby core's custom objects for +# serialisation/deserialisation. + +require 'json/add/date' +require 'json/add/date_time' +require 'json/add/exception' +require 'json/add/range' +require 'json/add/regexp' +require 'json/add/struct' +require 'json/add/symbol' +require 'json/add/time' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core.rb new file mode 100644 index 0000000..bfb017c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/core.rb @@ -0,0 +1,12 @@ +#frozen_string_literal: false +# This file requires the implementations of ruby core's custom objects for +# serialisation/deserialisation. + +require 'json/add/date' +require 'json/add/date_time' +require 'json/add/exception' +require 'json/add/range' +require 'json/add/regexp' +require 'json/add/struct' +require 'json/add/symbol' +require 'json/add/time' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date 2.rb new file mode 100644 index 0000000..2552356 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date 2.rb @@ -0,0 +1,34 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class Date + + # Deserializes JSON string by converting Julian year y, month + # m, day d and Day of Calendar Reform sg to Date. + def self.json_create(object) + civil(*object.values_at('y', 'm', 'd', 'sg')) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'sg' => start, + } + end + + # Stores class name (Date) with Julian year y, month m, day + # d and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date.rb new file mode 100644 index 0000000..2552356 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date.rb @@ -0,0 +1,34 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class Date + + # Deserializes JSON string by converting Julian year y, month + # m, day d and Day of Calendar Reform sg to Date. + def self.json_create(object) + civil(*object.values_at('y', 'm', 'd', 'sg')) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'sg' => start, + } + end + + # Stores class name (Date) with Julian year y, month m, day + # d and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time 2.rb new file mode 100644 index 0000000..38b0e86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time 2.rb @@ -0,0 +1,50 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class DateTime + + # Deserializes JSON string by converting year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg to DateTime. + def self.json_create(object) + args = object.values_at('y', 'm', 'd', 'H', 'M', 'S') + of_a, of_b = object['of'].split('/') + if of_b and of_b != '0' + args << Rational(of_a.to_i, of_b.to_i) + else + args << of_a + end + args << object['sg'] + civil(*args) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'H' => hour, + 'M' => min, + 'S' => sec, + 'of' => offset.to_s, + 'sg' => start, + } + end + + # Stores class name (DateTime) with Julian year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end + + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time.rb new file mode 100644 index 0000000..38b0e86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/date_time.rb @@ -0,0 +1,50 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class DateTime + + # Deserializes JSON string by converting year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg to DateTime. + def self.json_create(object) + args = object.values_at('y', 'm', 'd', 'H', 'M', 'S') + of_a, of_b = object['of'].split('/') + if of_b and of_b != '0' + args << Rational(of_a.to_i, of_b.to_i) + else + args << of_a + end + args << object['sg'] + civil(*args) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'H' => hour, + 'M' => min, + 'S' => sec, + 'of' => offset.to_s, + 'sg' => start, + } + end + + # Stores class name (DateTime) with Julian year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end + + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception 2.rb new file mode 100644 index 0000000..a107e5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception 2.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Exception + + # Deserializes JSON string by constructing new Exception object with message + # m and backtrace b serialized with to_json + def self.json_create(object) + result = new(object['m']) + result.set_backtrace object['b'] + result + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'm' => message, + 'b' => backtrace, + } + end + + # Stores class name (Exception) with message m and backtrace array + # b as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception.rb new file mode 100644 index 0000000..a107e5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/exception.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Exception + + # Deserializes JSON string by constructing new Exception object with message + # m and backtrace b serialized with to_json + def self.json_create(object) + result = new(object['m']) + result.set_backtrace object['b'] + result + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'm' => message, + 'b' => backtrace, + } + end + + # Stores class name (Exception) with message m and backtrace array + # b as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct 2.rb new file mode 100644 index 0000000..686cf00 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct 2.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'ostruct' + +class OpenStruct + + # Deserializes JSON string by constructing new Struct object with values + # t serialized by to_json. + def self.json_create(object) + new(object['t'] || object[:t]) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 't' => table, + } + end + + # Stores class name (OpenStruct) with this struct's values t as a + # JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct.rb new file mode 100644 index 0000000..686cf00 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/ostruct.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'ostruct' + +class OpenStruct + + # Deserializes JSON string by constructing new Struct object with values + # t serialized by to_json. + def self.json_create(object) + new(object['t'] || object[:t]) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 't' => table, + } + end + + # Stores class name (OpenStruct) with this struct's values t as a + # JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range 2.rb new file mode 100644 index 0000000..93529fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range 2.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Range + + # Deserializes JSON string by constructing new Range object with arguments + # a serialized by to_json. + def self.json_create(object) + new(*object['a']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => [ first, last, exclude_end? ] + } + end + + # Stores class name (Range) with JSON array of arguments a which + # include first (integer), last (integer), and + # exclude_end? (boolean) as JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range.rb new file mode 100644 index 0000000..93529fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/range.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Range + + # Deserializes JSON string by constructing new Range object with arguments + # a serialized by to_json. + def self.json_create(object) + new(*object['a']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => [ first, last, exclude_end? ] + } + end + + # Stores class name (Range) with JSON array of arguments a which + # include first (integer), last (integer), and + # exclude_end? (boolean) as JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational 2.rb new file mode 100644 index 0000000..6be4034 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational 2.rb @@ -0,0 +1,28 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Rational) or require 'rational' + +class Rational + # Deserializes JSON string by converting numerator value n, + # denominator value d, to a Rational object. + def self.json_create(object) + Rational(object['n'], object['d']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'n' => numerator, + 'd' => denominator, + } + end + + # Stores class name (Rational) along with numerator value n and denominator value d as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational.rb new file mode 100644 index 0000000..6be4034 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/rational.rb @@ -0,0 +1,28 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Rational) or require 'rational' + +class Rational + # Deserializes JSON string by converting numerator value n, + # denominator value d, to a Rational object. + def self.json_create(object) + Rational(object['n'], object['d']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'n' => numerator, + 'd' => denominator, + } + end + + # Stores class name (Rational) along with numerator value n and denominator value d as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp 2.rb new file mode 100644 index 0000000..39d69fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp 2.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Regexp + + # Deserializes JSON string by constructing new Regexp object with source + # s (Regexp or String) and options o serialized by + # to_json + def self.json_create(object) + new(object['s'], object['o']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'o' => options, + 's' => source, + } + end + + # Stores class name (Regexp) with options o and source s + # (Regexp or String) as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp.rb new file mode 100644 index 0000000..39d69fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/regexp.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Regexp + + # Deserializes JSON string by constructing new Regexp object with source + # s (Regexp or String) and options o serialized by + # to_json + def self.json_create(object) + new(object['s'], object['o']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'o' => options, + 's' => source, + } + end + + # Stores class name (Regexp) with options o and source s + # (Regexp or String) as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set 2.rb new file mode 100644 index 0000000..71e2a0a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set 2.rb @@ -0,0 +1,29 @@ +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Set) or require 'set' + +class Set + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + new object['a'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => to_a, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set.rb new file mode 100644 index 0000000..71e2a0a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/set.rb @@ -0,0 +1,29 @@ +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Set) or require 'set' + +class Set + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + new object['a'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => to_a, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct 2.rb new file mode 100644 index 0000000..e8395ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct 2.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Struct + + # Deserializes JSON string by constructing new Struct object with values + # v serialized by to_json. + def self.json_create(object) + new(*object['v']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 'v' => values, + } + end + + # Stores class name (Struct) with Struct values v as a JSON string. + # Only named structs are supported. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct.rb new file mode 100644 index 0000000..e8395ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/struct.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Struct + + # Deserializes JSON string by constructing new Struct object with values + # v serialized by to_json. + def self.json_create(object) + new(*object['v']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 'v' => values, + } + end + + # Stores class name (Struct) with Struct values v as a JSON string. + # Only named structs are supported. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol 2.rb new file mode 100644 index 0000000..74b13a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol 2.rb @@ -0,0 +1,25 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Symbol + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 's' => to_s, + } + end + + # Stores class name (Symbol) with String representation of Symbol as a JSON string. + def to_json(*a) + as_json.to_json(*a) + end + + # Deserializes JSON string by converting the string value stored in the object to a Symbol + def self.json_create(o) + o['s'].to_sym + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol.rb new file mode 100644 index 0000000..74b13a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/symbol.rb @@ -0,0 +1,25 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Symbol + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 's' => to_s, + } + end + + # Stores class name (Symbol) with String representation of Symbol as a JSON string. + def to_json(*a) + as_json.to_json(*a) + end + + # Deserializes JSON string by converting the string value stored in the object to a Symbol + def self.json_create(o) + o['s'].to_sym + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time 2.rb new file mode 100644 index 0000000..b73acc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time 2.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Time + + # Deserializes JSON string by converting time since epoch to Time + def self.json_create(object) + if usec = object.delete('u') # used to be tv_usec -> tv_nsec + object['n'] = usec * 1000 + end + if method_defined?(:tv_nsec) + at(object['s'], Rational(object['n'], 1000)) + else + at(object['s'], object['n'] / 1000) + end + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + nanoseconds = [ tv_usec * 1000 ] + respond_to?(:tv_nsec) and nanoseconds << tv_nsec + nanoseconds = nanoseconds.max + { + JSON.create_id => self.class.name, + 's' => tv_sec, + 'n' => nanoseconds, + } + end + + # Stores class name (Time) with number of seconds since epoch and number of + # microseconds for Time as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time.rb new file mode 100644 index 0000000..b73acc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/add/time.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Time + + # Deserializes JSON string by converting time since epoch to Time + def self.json_create(object) + if usec = object.delete('u') # used to be tv_usec -> tv_nsec + object['n'] = usec * 1000 + end + if method_defined?(:tv_nsec) + at(object['s'], Rational(object['n'], 1000)) + else + at(object['s'], object['n'] / 1000) + end + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + nanoseconds = [ tv_usec * 1000 ] + respond_to?(:tv_nsec) and nanoseconds << tv_nsec + nanoseconds = nanoseconds.max + { + JSON.create_id => self.class.name, + 's' => tv_sec, + 'n' => nanoseconds, + } + end + + # Stores class name (Time) with number of seconds since epoch and number of + # microseconds for Time as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common 2.rb new file mode 100644 index 0000000..991d760 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common 2.rb @@ -0,0 +1,691 @@ +#frozen_string_literal: false +require 'json/version' +require 'json/generic_object' + +module JSON + class << self + # If +object+ is a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), calls JSON.parse with +object+ and +opts+: + # json = '[0, 1, null]' + # JSON[json]# => [0, 1, nil] + # + # Otherwise, calls JSON.generate with +object+ and +opts+: + # ruby = [0, 1, nil] + # JSON[ruby] # => '[0,1,null]' + def [](object, opts = {}) + if object.respond_to? :to_str + JSON.parse(object.to_str, opts) + else + JSON.generate(object, opts) + end + end + + # Returns the JSON parser class that is used by JSON. This is either + # JSON::Ext::Parser or JSON::Pure::Parser: + # JSON.parser # => JSON::Ext::Parser + attr_reader :parser + + # Set the JSON parser class _parser_ to be used by JSON. + def parser=(parser) # :nodoc: + @parser = parser + remove_const :Parser if const_defined?(:Parser, false) + const_set :Parser, parser + end + + # Return the constant located at _path_. The format of _path_ has to be + # either ::A::B::C or A::B::C. In any case, A has to be located at the top + # level (absolute namespace path?). If there doesn't exist a constant at + # the given path, an ArgumentError is raised. + def deep_const_get(path) # :nodoc: + path.to_s.split(/::/).inject(Object) do |p, c| + case + when c.empty? then p + when p.const_defined?(c, true) then p.const_get(c) + else + begin + p.const_missing(c) + rescue NameError => e + raise ArgumentError, "can't get const #{path}: #{e}" + end + end + end + end + + # Set the module _generator_ to be used by JSON. + def generator=(generator) # :nodoc: + old, $VERBOSE = $VERBOSE, nil + @generator = generator + generator_methods = generator::GeneratorMethods + for const in generator_methods.constants + klass = deep_const_get(const) + modul = generator_methods.const_get(const) + klass.class_eval do + instance_methods(false).each do |m| + m.to_s == 'to_json' and remove_method m + end + include modul + end + end + self.state = generator::State + const_set :State, self.state + const_set :SAFE_STATE_PROTOTYPE, State.new + const_set :FAST_STATE_PROTOTYPE, State.new( + :indent => '', + :space => '', + :object_nl => "", + :array_nl => "", + :max_nesting => false + ) + const_set :PRETTY_STATE_PROTOTYPE, State.new( + :indent => ' ', + :space => ' ', + :object_nl => "\n", + :array_nl => "\n" + ) + ensure + $VERBOSE = old + end + + # Returns the JSON generator module that is used by JSON. This is + # either JSON::Ext::Generator or JSON::Pure::Generator: + # JSON.generator # => JSON::Ext::Generator + attr_reader :generator + + # Sets or Returns the JSON generator state class that is used by JSON. This is + # either JSON::Ext::Generator::State or JSON::Pure::Generator::State: + # JSON.state # => JSON::Ext::Generator::State + attr_accessor :state + + # Sets or returns create identifier, which is used to decide if the _json_create_ + # hook of a class should be called; initial value is +json_class+: + # JSON.create_id # => 'json_class' + attr_accessor :create_id + end + self.create_id = 'json_class' + + NaN = 0.0/0 + + Infinity = 1.0/0 + + MinusInfinity = -Infinity + + # The base exception for JSON errors. + class JSONError < StandardError + def self.wrap(exception) + obj = new("Wrapped(#{exception.class}): #{exception.message.inspect}") + obj.set_backtrace exception.backtrace + obj + end + end + + # This exception is raised if a parser error occurs. + class ParserError < JSONError; end + + # This exception is raised if the nesting of parsed data structures is too + # deep. + class NestingError < ParserError; end + + # :stopdoc: + class CircularDatastructure < NestingError; end + # :startdoc: + + # This exception is raised if a generator or unparser error occurs. + class GeneratorError < JSONError; end + # For backwards compatibility + UnparserError = GeneratorError # :nodoc: + + # This exception is raised if the required unicode support is missing on the + # system. Usually this means that the iconv library is not installed. + class MissingUnicodeSupport < JSONError; end + + module_function + + # :call-seq: + # JSON.parse(source, opts) -> object + # + # Argument +source+ contains the \String to be parsed. It must be a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), and must contain valid \JSON data. + # + # Argument +opts+, if given, contains options for the parsing, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns the Ruby objects created by parsing the given +source+. + # + # --- + # + # When +source+ is a \JSON array, returns a Ruby \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby # => ["foo", 1.0, true, false, nil] + # ruby.class # => Array + # + # When +source+ is a \JSON object, returns a Ruby \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # ruby.class # => Hash + # + # For examples of parsing for all \JSON data types, see + # {Parsing \JSON}[#module-JSON-label-Parsing+JSON]. + # + # ====== Input Options + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth allowed; + # defaults to +100+; specify +false+ to disable depth checking. + # + # With the default, +false+: + # source = '[0, [1, [2, [3]]]]' + # ruby = JSON.parse(source) + # ruby # => [0, [1, [2, [3]]]] + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.parse(source, {max_nesting: 1}) + # Bad value: + # # Raises TypeError (wrong argument type Symbol (expected Fixnum)): + # JSON.parse(source, {max_nesting: :foo}) + # + # --- + # + # Option +allow_nan+ (boolean) specifies whether to allow + # NaN, Infinity, and MinusInfinity in +source+; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::ParserError (225: unexpected token at '[NaN]'): + # JSON.parse('[NaN]') + # # Raises JSON::ParserError (232: unexpected token at '[Infinity]'): + # JSON.parse('[Infinity]') + # # Raises JSON::ParserError (248: unexpected token at '[-Infinity]'): + # JSON.parse('[-Infinity]') + # Allow: + # source = '[NaN, Infinity, -Infinity]' + # ruby = JSON.parse(source, {allow_nan: true}) + # ruby # => [NaN, Infinity, -Infinity] + # + # ====== Output Options + # + # Option +symbolize_names+ (boolean) specifies whether returned \Hash keys + # should be Symbols; + # defaults to +false+ (use Strings). + # + # With the default, +false+: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # Use Symbols: + # ruby = JSON.parse(source, {symbolize_names: true}) + # ruby # => {:a=>"foo", :b=>1.0, :c=>true, :d=>false, :e=>nil} + # + # --- + # + # Option +object_class+ (\Class) specifies the Ruby class to be used + # for each \JSON object; + # defaults to \Hash. + # + # With the default, \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby.class # => Hash + # Use class \OpenStruct: + # ruby = JSON.parse(source, {object_class: OpenStruct}) + # ruby # => # + # + # --- + # + # Option +array_class+ (\Class) specifies the Ruby class to be used + # for each \JSON array; + # defaults to \Array. + # + # With the default, \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby.class # => Array + # Use class \Set: + # ruby = JSON.parse(source, {array_class: Set}) + # ruby # => # + # + # --- + # + # Option +create_additions+ (boolean) specifies whether to use \JSON additions in parsing. + # See {\JSON Additions}[#module-JSON-label-JSON+Additions]. + # + # ====== Exceptions + # + # Raises an exception if +source+ is not valid JSON: + # + # # Raises JSON::ParserError (783: unexpected token at ''): + # JSON.parse('') + # + def parse(source, opts = {}) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.parse!(source, opts) -> object + # + # Calls + # parse(source, opts) + # with +source+ and possibly modified +opts+. + # + # Differences from JSON.parse: + # - Option +max_nesting+, if not provided, defaults to +false+, + # which disables checking for nesting depth. + # - Option +allow_nan+, if not provided, defaults to +true+. + def parse!(source, opts = {}) + opts = { + :max_nesting => false, + :allow_nan => true + }.merge(opts) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.generate(obj, opts = nil) -> new_string + # + # Argument +obj+ is the Ruby object to be converted to \JSON. + # + # Argument +opts+, if given, contains options for the generation, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns a \String containing the generated \JSON data. + # + # See also JSON.fast_generate, JSON.pretty_generate. + # + # --- + # + # When +obj+ is an + # {Array-convertible object}[doc/implicit_conversion_rdoc.html#label-Array-Convertible+Objects] + # (implementing +to_ary+), returns a \String containing a \JSON array: + # obj = ["foo", 1.0, true, false, nil] + # json = JSON.generate(obj) + # json # => '["foo",1.0,true,false,null]' + # + # When +obj+ is a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects], + # return a \String containing a \JSON object: + # obj = {foo: 0, bar: 's', baz: :bat} + # json = JSON.generate(obj) + # json # => '{"foo":0,"bar":"s","baz":"bat"}' + # + # For examples of generating from other Ruby objects, see + # {Generating \JSON from Other Objects}[#module-JSON-label-Generating+JSON+from+Other+Objects]. + # + # ====== Input Options + # + # Option +allow_nan+ (boolean) specifies whether + # +NaN+, +Infinity+, and -Infinity may be generated; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::GeneratorError (920: NaN not allowed in JSON): + # JSON.generate(JSON::NaN) + # # Raises JSON::GeneratorError (917: Infinity not allowed in JSON): + # JSON.generate(JSON::Infinity) + # # Raises JSON::GeneratorError (917: -Infinity not allowed in JSON): + # JSON.generate(JSON::MinusInfinity) + # + # Allow: + # ruby = [Float::NaN, Float::Infinity, Float::MinusInfinity] + # JSON.generate(ruby, allow_nan: true) # => '[NaN,Infinity,-Infinity]' + # + # --- + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth + # in +obj+; defaults to +100+. + # + # With the default, +100+: + # obj = [[[[[[0]]]]]] + # JSON.generate(obj) # => '[[[[[[0]]]]]]' + # + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.generate(obj, max_nesting: 2) + # + # ====== Output Options + # + # The default formatting options generate the most compact + # \JSON data, all on one line and with no whitespace. + # + # You can use these formatting options to generate + # \JSON data in a more open format, using whitespace. + # See also JSON.pretty_generate. + # + # - Option +array_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON array; defaults to the empty \String, ''. + # - Option +object_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON object; defaults to the empty \String, ''. + # - Option +indent+ (\String) specifies the string (usually spaces) to be + # used for indentation; defaults to the empty \String, ''; + # defaults to the empty \String, ''; + # has no effect unless options +array_nl+ or +object_nl+ specify newlines. + # - Option +space+ (\String) specifies a string (usually a space) to be + # inserted after the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # - Option +space_before+ (\String) specifies a string (usually a space) to be + # inserted before the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # + # In this example, +obj+ is used first to generate the shortest + # \JSON data (no whitespace), then again with all formatting options + # specified: + # + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.generate(obj) + # puts 'Compact:', json + # opts = { + # array_nl: "\n", + # object_nl: "\n", + # indent+: ' ', + # space_before: ' ', + # space: ' ' + # } + # puts 'Open:', JSON.generate(obj, opts) + # + # Output: + # Compact: + # {"foo":["bar","baz"],"bat":{"bam":0,"bad":1}} + # Open: + # { + # "foo" : [ + # "bar", + # "baz" + # ], + # "bat" : { + # "bam" : 0, + # "bad" : 1 + # } + # } + # + # --- + # + # Raises an exception if any formatting option is not a \String. + # + # ====== Exceptions + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises JSON::NestingError (nesting of 100 is too deep): + # JSON.generate(a) + # + def generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = SAFE_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state = state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and + # later delete them. + alias unparse generate + module_function :unparse + # :startdoc: + + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # By default, generates \JSON data without checking + # for circular references in +obj+ (option +max_nesting+ set to +false+, disabled). + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises SystemStackError (stack level too deep): + # JSON.fast_generate(a) + def fast_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = FAST_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias fast_unparse fast_generate + module_function :fast_unparse + # :startdoc: + + # :call-seq: + # JSON.pretty_generate(obj, opts = nil) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # Default options are: + # { + # indent: ' ', # Two spaces + # space: ' ', # One space + # array_nl: "\n", # Newline + # object_nl: "\n" # Newline + # } + # + # Example: + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.pretty_generate(obj) + # puts json + # Output: + # { + # "foo": [ + # "bar", + # "baz" + # ], + # "bat": { + # "bam": 0, + # "bad": 1 + # } + # } + # + def pretty_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = PRETTY_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias pretty_unparse pretty_generate + module_function :pretty_unparse + # :startdoc: + + class << self + # Sets or returns default options for the JSON.load method. + # Initially: + # opts = JSON.load_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :allow_blank=>true, :create_additions=>true} + attr_accessor :load_default_options + end + self.load_default_options = { + :max_nesting => false, + :allow_nan => true, + :allow_blank => true, + :create_additions => true, + } + + # Load a ruby data structure from a JSON _source_ and return it. A source can + # either be a string-like object, an IO-like object, or an object responding + # to the read method. If _proc_ was given, it will be called with any nested + # Ruby object as an argument recursively in depth first order. To modify the + # default options pass in the optional _options_ argument as well. + # + # BEWARE: This method is meant to serialise data from trusted user input, + # like from your own database server or clients under your control, it could + # be dangerous to allow untrusted users to pass JSON sources into it. The + # default options for the parser can be changed via the load_default_options + # method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def load(source, proc = nil, options = {}) + opts = load_default_options.merge options + if source.respond_to? :to_str + source = source.to_str + elsif source.respond_to? :to_io + source = source.to_io.read + elsif source.respond_to?(:read) + source = source.read + end + if opts[:allow_blank] && (source.nil? || source.empty?) + source = 'null' + end + result = parse(source, opts) + recurse_proc(result, &proc) if proc + result + end + + # Recursively calls passed _Proc_ if the parsed data structure is an _Array_ or _Hash_ + def recurse_proc(result, &proc) + case result + when Array + result.each { |x| recurse_proc x, &proc } + proc.call result + when Hash + result.each { |x, y| recurse_proc x, &proc; recurse_proc y, &proc } + proc.call result + else + proc.call result + end + end + + alias restore load + module_function :restore + + class << self + # Sets or returns the default options for the JSON.dump method. + # Initially: + # opts = JSON.dump_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true} + attr_accessor :dump_default_options + end + self.dump_default_options = { + :max_nesting => false, + :allow_nan => true, + } + + # Dumps _obj_ as a JSON string, i.e. calls generate on the object and returns + # the result. + # + # If anIO (an IO-like object or an object that responds to the write method) + # was given, the resulting JSON is written to it. + # + # If the number of nested arrays or objects exceeds _limit_, an ArgumentError + # exception is raised. This argument is similar (but not exactly the + # same!) to the _limit_ argument in Marshal.dump. + # + # The default options for the generator can be changed via the + # dump_default_options method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def dump(obj, anIO = nil, limit = nil) + if anIO and limit.nil? + anIO = anIO.to_io if anIO.respond_to?(:to_io) + unless anIO.respond_to?(:write) + limit = anIO + anIO = nil + end + end + opts = JSON.dump_default_options + opts = opts.merge(:max_nesting => limit) if limit + result = generate(obj, opts) + if anIO + anIO.write result + anIO + else + result + end + rescue JSON::NestingError + raise ArgumentError, "exceed depth limit" + end + + # Encodes string using String.encode. + def self.iconv(to, from, string) + string.encode(to, from) + end +end + +module ::Kernel + private + + # Outputs _objs_ to STDOUT as JSON strings in the shortest form, that is in + # one line. + def j(*objs) + objs.each do |obj| + puts JSON::generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # Outputs _objs_ to STDOUT as JSON strings in a pretty format, with + # indentation and over many lines. + def jj(*objs) + objs.each do |obj| + puts JSON::pretty_generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # If _object_ is string-like, parse the string and return the parsed result as + # a Ruby data structure. Otherwise, generate a JSON text from the Ruby data + # structure object and return it. + # + # The _opts_ argument is passed through to generate/parse respectively. See + # generate and parse for their documentation. + def JSON(object, *args) + if object.respond_to? :to_str + JSON.parse(object.to_str, args.first) + else + JSON.generate(object, args.first) + end + end +end + +# Extends any Class to include _json_creatable?_ method. +class ::Class + # Returns true if this class can be used to create an instance + # from a serialised JSON string. The class has to implement a class + # method _json_create_ that expects a hash as first parameter. The hash + # should include the required data. + def json_creatable? + respond_to?(:json_create) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common.rb new file mode 100644 index 0000000..991d760 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/common.rb @@ -0,0 +1,691 @@ +#frozen_string_literal: false +require 'json/version' +require 'json/generic_object' + +module JSON + class << self + # If +object+ is a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), calls JSON.parse with +object+ and +opts+: + # json = '[0, 1, null]' + # JSON[json]# => [0, 1, nil] + # + # Otherwise, calls JSON.generate with +object+ and +opts+: + # ruby = [0, 1, nil] + # JSON[ruby] # => '[0,1,null]' + def [](object, opts = {}) + if object.respond_to? :to_str + JSON.parse(object.to_str, opts) + else + JSON.generate(object, opts) + end + end + + # Returns the JSON parser class that is used by JSON. This is either + # JSON::Ext::Parser or JSON::Pure::Parser: + # JSON.parser # => JSON::Ext::Parser + attr_reader :parser + + # Set the JSON parser class _parser_ to be used by JSON. + def parser=(parser) # :nodoc: + @parser = parser + remove_const :Parser if const_defined?(:Parser, false) + const_set :Parser, parser + end + + # Return the constant located at _path_. The format of _path_ has to be + # either ::A::B::C or A::B::C. In any case, A has to be located at the top + # level (absolute namespace path?). If there doesn't exist a constant at + # the given path, an ArgumentError is raised. + def deep_const_get(path) # :nodoc: + path.to_s.split(/::/).inject(Object) do |p, c| + case + when c.empty? then p + when p.const_defined?(c, true) then p.const_get(c) + else + begin + p.const_missing(c) + rescue NameError => e + raise ArgumentError, "can't get const #{path}: #{e}" + end + end + end + end + + # Set the module _generator_ to be used by JSON. + def generator=(generator) # :nodoc: + old, $VERBOSE = $VERBOSE, nil + @generator = generator + generator_methods = generator::GeneratorMethods + for const in generator_methods.constants + klass = deep_const_get(const) + modul = generator_methods.const_get(const) + klass.class_eval do + instance_methods(false).each do |m| + m.to_s == 'to_json' and remove_method m + end + include modul + end + end + self.state = generator::State + const_set :State, self.state + const_set :SAFE_STATE_PROTOTYPE, State.new + const_set :FAST_STATE_PROTOTYPE, State.new( + :indent => '', + :space => '', + :object_nl => "", + :array_nl => "", + :max_nesting => false + ) + const_set :PRETTY_STATE_PROTOTYPE, State.new( + :indent => ' ', + :space => ' ', + :object_nl => "\n", + :array_nl => "\n" + ) + ensure + $VERBOSE = old + end + + # Returns the JSON generator module that is used by JSON. This is + # either JSON::Ext::Generator or JSON::Pure::Generator: + # JSON.generator # => JSON::Ext::Generator + attr_reader :generator + + # Sets or Returns the JSON generator state class that is used by JSON. This is + # either JSON::Ext::Generator::State or JSON::Pure::Generator::State: + # JSON.state # => JSON::Ext::Generator::State + attr_accessor :state + + # Sets or returns create identifier, which is used to decide if the _json_create_ + # hook of a class should be called; initial value is +json_class+: + # JSON.create_id # => 'json_class' + attr_accessor :create_id + end + self.create_id = 'json_class' + + NaN = 0.0/0 + + Infinity = 1.0/0 + + MinusInfinity = -Infinity + + # The base exception for JSON errors. + class JSONError < StandardError + def self.wrap(exception) + obj = new("Wrapped(#{exception.class}): #{exception.message.inspect}") + obj.set_backtrace exception.backtrace + obj + end + end + + # This exception is raised if a parser error occurs. + class ParserError < JSONError; end + + # This exception is raised if the nesting of parsed data structures is too + # deep. + class NestingError < ParserError; end + + # :stopdoc: + class CircularDatastructure < NestingError; end + # :startdoc: + + # This exception is raised if a generator or unparser error occurs. + class GeneratorError < JSONError; end + # For backwards compatibility + UnparserError = GeneratorError # :nodoc: + + # This exception is raised if the required unicode support is missing on the + # system. Usually this means that the iconv library is not installed. + class MissingUnicodeSupport < JSONError; end + + module_function + + # :call-seq: + # JSON.parse(source, opts) -> object + # + # Argument +source+ contains the \String to be parsed. It must be a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), and must contain valid \JSON data. + # + # Argument +opts+, if given, contains options for the parsing, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns the Ruby objects created by parsing the given +source+. + # + # --- + # + # When +source+ is a \JSON array, returns a Ruby \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby # => ["foo", 1.0, true, false, nil] + # ruby.class # => Array + # + # When +source+ is a \JSON object, returns a Ruby \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # ruby.class # => Hash + # + # For examples of parsing for all \JSON data types, see + # {Parsing \JSON}[#module-JSON-label-Parsing+JSON]. + # + # ====== Input Options + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth allowed; + # defaults to +100+; specify +false+ to disable depth checking. + # + # With the default, +false+: + # source = '[0, [1, [2, [3]]]]' + # ruby = JSON.parse(source) + # ruby # => [0, [1, [2, [3]]]] + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.parse(source, {max_nesting: 1}) + # Bad value: + # # Raises TypeError (wrong argument type Symbol (expected Fixnum)): + # JSON.parse(source, {max_nesting: :foo}) + # + # --- + # + # Option +allow_nan+ (boolean) specifies whether to allow + # NaN, Infinity, and MinusInfinity in +source+; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::ParserError (225: unexpected token at '[NaN]'): + # JSON.parse('[NaN]') + # # Raises JSON::ParserError (232: unexpected token at '[Infinity]'): + # JSON.parse('[Infinity]') + # # Raises JSON::ParserError (248: unexpected token at '[-Infinity]'): + # JSON.parse('[-Infinity]') + # Allow: + # source = '[NaN, Infinity, -Infinity]' + # ruby = JSON.parse(source, {allow_nan: true}) + # ruby # => [NaN, Infinity, -Infinity] + # + # ====== Output Options + # + # Option +symbolize_names+ (boolean) specifies whether returned \Hash keys + # should be Symbols; + # defaults to +false+ (use Strings). + # + # With the default, +false+: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # Use Symbols: + # ruby = JSON.parse(source, {symbolize_names: true}) + # ruby # => {:a=>"foo", :b=>1.0, :c=>true, :d=>false, :e=>nil} + # + # --- + # + # Option +object_class+ (\Class) specifies the Ruby class to be used + # for each \JSON object; + # defaults to \Hash. + # + # With the default, \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby.class # => Hash + # Use class \OpenStruct: + # ruby = JSON.parse(source, {object_class: OpenStruct}) + # ruby # => # + # + # --- + # + # Option +array_class+ (\Class) specifies the Ruby class to be used + # for each \JSON array; + # defaults to \Array. + # + # With the default, \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby.class # => Array + # Use class \Set: + # ruby = JSON.parse(source, {array_class: Set}) + # ruby # => # + # + # --- + # + # Option +create_additions+ (boolean) specifies whether to use \JSON additions in parsing. + # See {\JSON Additions}[#module-JSON-label-JSON+Additions]. + # + # ====== Exceptions + # + # Raises an exception if +source+ is not valid JSON: + # + # # Raises JSON::ParserError (783: unexpected token at ''): + # JSON.parse('') + # + def parse(source, opts = {}) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.parse!(source, opts) -> object + # + # Calls + # parse(source, opts) + # with +source+ and possibly modified +opts+. + # + # Differences from JSON.parse: + # - Option +max_nesting+, if not provided, defaults to +false+, + # which disables checking for nesting depth. + # - Option +allow_nan+, if not provided, defaults to +true+. + def parse!(source, opts = {}) + opts = { + :max_nesting => false, + :allow_nan => true + }.merge(opts) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.generate(obj, opts = nil) -> new_string + # + # Argument +obj+ is the Ruby object to be converted to \JSON. + # + # Argument +opts+, if given, contains options for the generation, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns a \String containing the generated \JSON data. + # + # See also JSON.fast_generate, JSON.pretty_generate. + # + # --- + # + # When +obj+ is an + # {Array-convertible object}[doc/implicit_conversion_rdoc.html#label-Array-Convertible+Objects] + # (implementing +to_ary+), returns a \String containing a \JSON array: + # obj = ["foo", 1.0, true, false, nil] + # json = JSON.generate(obj) + # json # => '["foo",1.0,true,false,null]' + # + # When +obj+ is a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects], + # return a \String containing a \JSON object: + # obj = {foo: 0, bar: 's', baz: :bat} + # json = JSON.generate(obj) + # json # => '{"foo":0,"bar":"s","baz":"bat"}' + # + # For examples of generating from other Ruby objects, see + # {Generating \JSON from Other Objects}[#module-JSON-label-Generating+JSON+from+Other+Objects]. + # + # ====== Input Options + # + # Option +allow_nan+ (boolean) specifies whether + # +NaN+, +Infinity+, and -Infinity may be generated; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::GeneratorError (920: NaN not allowed in JSON): + # JSON.generate(JSON::NaN) + # # Raises JSON::GeneratorError (917: Infinity not allowed in JSON): + # JSON.generate(JSON::Infinity) + # # Raises JSON::GeneratorError (917: -Infinity not allowed in JSON): + # JSON.generate(JSON::MinusInfinity) + # + # Allow: + # ruby = [Float::NaN, Float::Infinity, Float::MinusInfinity] + # JSON.generate(ruby, allow_nan: true) # => '[NaN,Infinity,-Infinity]' + # + # --- + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth + # in +obj+; defaults to +100+. + # + # With the default, +100+: + # obj = [[[[[[0]]]]]] + # JSON.generate(obj) # => '[[[[[[0]]]]]]' + # + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.generate(obj, max_nesting: 2) + # + # ====== Output Options + # + # The default formatting options generate the most compact + # \JSON data, all on one line and with no whitespace. + # + # You can use these formatting options to generate + # \JSON data in a more open format, using whitespace. + # See also JSON.pretty_generate. + # + # - Option +array_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON array; defaults to the empty \String, ''. + # - Option +object_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON object; defaults to the empty \String, ''. + # - Option +indent+ (\String) specifies the string (usually spaces) to be + # used for indentation; defaults to the empty \String, ''; + # defaults to the empty \String, ''; + # has no effect unless options +array_nl+ or +object_nl+ specify newlines. + # - Option +space+ (\String) specifies a string (usually a space) to be + # inserted after the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # - Option +space_before+ (\String) specifies a string (usually a space) to be + # inserted before the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # + # In this example, +obj+ is used first to generate the shortest + # \JSON data (no whitespace), then again with all formatting options + # specified: + # + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.generate(obj) + # puts 'Compact:', json + # opts = { + # array_nl: "\n", + # object_nl: "\n", + # indent+: ' ', + # space_before: ' ', + # space: ' ' + # } + # puts 'Open:', JSON.generate(obj, opts) + # + # Output: + # Compact: + # {"foo":["bar","baz"],"bat":{"bam":0,"bad":1}} + # Open: + # { + # "foo" : [ + # "bar", + # "baz" + # ], + # "bat" : { + # "bam" : 0, + # "bad" : 1 + # } + # } + # + # --- + # + # Raises an exception if any formatting option is not a \String. + # + # ====== Exceptions + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises JSON::NestingError (nesting of 100 is too deep): + # JSON.generate(a) + # + def generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = SAFE_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state = state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and + # later delete them. + alias unparse generate + module_function :unparse + # :startdoc: + + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # By default, generates \JSON data without checking + # for circular references in +obj+ (option +max_nesting+ set to +false+, disabled). + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises SystemStackError (stack level too deep): + # JSON.fast_generate(a) + def fast_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = FAST_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias fast_unparse fast_generate + module_function :fast_unparse + # :startdoc: + + # :call-seq: + # JSON.pretty_generate(obj, opts = nil) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # Default options are: + # { + # indent: ' ', # Two spaces + # space: ' ', # One space + # array_nl: "\n", # Newline + # object_nl: "\n" # Newline + # } + # + # Example: + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.pretty_generate(obj) + # puts json + # Output: + # { + # "foo": [ + # "bar", + # "baz" + # ], + # "bat": { + # "bam": 0, + # "bad": 1 + # } + # } + # + def pretty_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = PRETTY_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias pretty_unparse pretty_generate + module_function :pretty_unparse + # :startdoc: + + class << self + # Sets or returns default options for the JSON.load method. + # Initially: + # opts = JSON.load_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :allow_blank=>true, :create_additions=>true} + attr_accessor :load_default_options + end + self.load_default_options = { + :max_nesting => false, + :allow_nan => true, + :allow_blank => true, + :create_additions => true, + } + + # Load a ruby data structure from a JSON _source_ and return it. A source can + # either be a string-like object, an IO-like object, or an object responding + # to the read method. If _proc_ was given, it will be called with any nested + # Ruby object as an argument recursively in depth first order. To modify the + # default options pass in the optional _options_ argument as well. + # + # BEWARE: This method is meant to serialise data from trusted user input, + # like from your own database server or clients under your control, it could + # be dangerous to allow untrusted users to pass JSON sources into it. The + # default options for the parser can be changed via the load_default_options + # method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def load(source, proc = nil, options = {}) + opts = load_default_options.merge options + if source.respond_to? :to_str + source = source.to_str + elsif source.respond_to? :to_io + source = source.to_io.read + elsif source.respond_to?(:read) + source = source.read + end + if opts[:allow_blank] && (source.nil? || source.empty?) + source = 'null' + end + result = parse(source, opts) + recurse_proc(result, &proc) if proc + result + end + + # Recursively calls passed _Proc_ if the parsed data structure is an _Array_ or _Hash_ + def recurse_proc(result, &proc) + case result + when Array + result.each { |x| recurse_proc x, &proc } + proc.call result + when Hash + result.each { |x, y| recurse_proc x, &proc; recurse_proc y, &proc } + proc.call result + else + proc.call result + end + end + + alias restore load + module_function :restore + + class << self + # Sets or returns the default options for the JSON.dump method. + # Initially: + # opts = JSON.dump_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true} + attr_accessor :dump_default_options + end + self.dump_default_options = { + :max_nesting => false, + :allow_nan => true, + } + + # Dumps _obj_ as a JSON string, i.e. calls generate on the object and returns + # the result. + # + # If anIO (an IO-like object or an object that responds to the write method) + # was given, the resulting JSON is written to it. + # + # If the number of nested arrays or objects exceeds _limit_, an ArgumentError + # exception is raised. This argument is similar (but not exactly the + # same!) to the _limit_ argument in Marshal.dump. + # + # The default options for the generator can be changed via the + # dump_default_options method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def dump(obj, anIO = nil, limit = nil) + if anIO and limit.nil? + anIO = anIO.to_io if anIO.respond_to?(:to_io) + unless anIO.respond_to?(:write) + limit = anIO + anIO = nil + end + end + opts = JSON.dump_default_options + opts = opts.merge(:max_nesting => limit) if limit + result = generate(obj, opts) + if anIO + anIO.write result + anIO + else + result + end + rescue JSON::NestingError + raise ArgumentError, "exceed depth limit" + end + + # Encodes string using String.encode. + def self.iconv(to, from, string) + string.encode(to, from) + end +end + +module ::Kernel + private + + # Outputs _objs_ to STDOUT as JSON strings in the shortest form, that is in + # one line. + def j(*objs) + objs.each do |obj| + puts JSON::generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # Outputs _objs_ to STDOUT as JSON strings in a pretty format, with + # indentation and over many lines. + def jj(*objs) + objs.each do |obj| + puts JSON::pretty_generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # If _object_ is string-like, parse the string and return the parsed result as + # a Ruby data structure. Otherwise, generate a JSON text from the Ruby data + # structure object and return it. + # + # The _opts_ argument is passed through to generate/parse respectively. See + # generate and parse for their documentation. + def JSON(object, *args) + if object.respond_to? :to_str + JSON.parse(object.to_str, args.first) + else + JSON.generate(object, args.first) + end + end +end + +# Extends any Class to include _json_creatable?_ method. +class ::Class + # Returns true if this class can be used to create an instance + # from a serialised JSON string. The class has to implement a class + # method _json_create_ that expects a hash as first parameter. The hash + # should include the required data. + def json_creatable? + respond_to?(:json_create) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext 2.rb new file mode 100644 index 0000000..7264a85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext 2.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality as C extensions. + module Ext + require 'json/ext/parser' + require 'json/ext/generator' + $DEBUG and warn "Using Ext extension for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext.rb new file mode 100644 index 0000000..7264a85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality as C extensions. + module Ext + require 'json/ext/parser' + require 'json/ext/generator' + $DEBUG and warn "Using Ext extension for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext/.keep b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext/.keep new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext/.keep 2 b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/ext/.keep 2 new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object 2.rb new file mode 100644 index 0000000..108309d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object 2.rb @@ -0,0 +1,71 @@ +#frozen_string_literal: false +require 'ostruct' + +module JSON + class GenericObject < OpenStruct + class << self + alias [] new + + def json_creatable? + @json_creatable + end + + attr_writer :json_creatable + + def json_create(data) + data = data.dup + data.delete JSON.create_id + self[data] + end + + def from_hash(object) + case + when object.respond_to?(:to_hash) + result = new + object.to_hash.each do |key, value| + result[key] = from_hash(value) + end + result + when object.respond_to?(:to_ary) + object.to_ary.map { |a| from_hash(a) } + else + object + end + end + + def load(source, proc = nil, opts = {}) + result = ::JSON.load(source, proc, opts.merge(:object_class => self)) + result.nil? ? new : result + end + + def dump(obj, *args) + ::JSON.dump(obj, *args) + end + end + self.json_creatable = false + + def to_hash + table + end + + def [](name) + __send__(name) + end unless method_defined?(:[]) + + def []=(name, value) + __send__("#{name}=", value) + end unless method_defined?(:[]=) + + def |(other) + self.class[other.to_hash.merge(to_hash)] + end + + def as_json(*) + { JSON.create_id => self.class.name }.merge to_hash + end + + def to_json(*a) + as_json.to_json(*a) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object.rb new file mode 100644 index 0000000..108309d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/generic_object.rb @@ -0,0 +1,71 @@ +#frozen_string_literal: false +require 'ostruct' + +module JSON + class GenericObject < OpenStruct + class << self + alias [] new + + def json_creatable? + @json_creatable + end + + attr_writer :json_creatable + + def json_create(data) + data = data.dup + data.delete JSON.create_id + self[data] + end + + def from_hash(object) + case + when object.respond_to?(:to_hash) + result = new + object.to_hash.each do |key, value| + result[key] = from_hash(value) + end + result + when object.respond_to?(:to_ary) + object.to_ary.map { |a| from_hash(a) } + else + object + end + end + + def load(source, proc = nil, opts = {}) + result = ::JSON.load(source, proc, opts.merge(:object_class => self)) + result.nil? ? new : result + end + + def dump(obj, *args) + ::JSON.dump(obj, *args) + end + end + self.json_creatable = false + + def to_hash + table + end + + def [](name) + __send__(name) + end unless method_defined?(:[]) + + def []=(name, value) + __send__("#{name}=", value) + end unless method_defined?(:[]=) + + def |(other) + self.class[other.to_hash.merge(to_hash)] + end + + def as_json(*) + { JSON.create_id => self.class.name }.merge to_hash + end + + def to_json(*a) + as_json.to_json(*a) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure 2.rb new file mode 100644 index 0000000..53178b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure 2.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality in pure ruby. + module Pure + require 'json/pure/parser' + require 'json/pure/generator' + $DEBUG and warn "Using Pure library for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure.rb new file mode 100644 index 0000000..53178b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality in pure ruby. + module Pure + require 'json/pure/parser' + require 'json/pure/generator' + $DEBUG and warn "Using Pure library for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator 2.rb new file mode 100644 index 0000000..471af63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator 2.rb @@ -0,0 +1,459 @@ +#frozen_string_literal: false +module JSON + MAP = { + "\x0" => '\u0000', + "\x1" => '\u0001', + "\x2" => '\u0002', + "\x3" => '\u0003', + "\x4" => '\u0004', + "\x5" => '\u0005', + "\x6" => '\u0006', + "\x7" => '\u0007', + "\b" => '\b', + "\t" => '\t', + "\n" => '\n', + "\xb" => '\u000b', + "\f" => '\f', + "\r" => '\r', + "\xe" => '\u000e', + "\xf" => '\u000f', + "\x10" => '\u0010', + "\x11" => '\u0011', + "\x12" => '\u0012', + "\x13" => '\u0013', + "\x14" => '\u0014', + "\x15" => '\u0015', + "\x16" => '\u0016', + "\x17" => '\u0017', + "\x18" => '\u0018', + "\x19" => '\u0019', + "\x1a" => '\u001a', + "\x1b" => '\u001b', + "\x1c" => '\u001c', + "\x1d" => '\u001d', + "\x1e" => '\u001e', + "\x1f" => '\u001f', + '"' => '\"', + '\\' => '\\\\', + } # :nodoc: + + # Convert a UTF8 encoded Ruby string _string_ to a JSON string, encoded with + # UTF16 big endian characters as \u????, and return it. + def utf8_to_json(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/) { MAP[$&] } + string.force_encoding(::Encoding::UTF_8) + string + end + + def utf8_to_json_ascii(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/n) { MAP[$&] } + string.gsub!(/( + (?: + [\xc2-\xdf][\x80-\xbf] | + [\xe0-\xef][\x80-\xbf]{2} | + [\xf0-\xf4][\x80-\xbf]{3} + )+ | + [\x80-\xc1\xf5-\xff] # invalid + )/nx) { |c| + c.size == 1 and raise GeneratorError, "invalid utf8 byte: '#{c}'" + s = JSON.iconv('utf-16be', 'utf-8', c).unpack('H*')[0] + s.force_encoding(::Encoding::ASCII_8BIT) + s.gsub!(/.{4}/n, '\\\\u\&') + s.force_encoding(::Encoding::UTF_8) + } + string.force_encoding(::Encoding::UTF_8) + string + rescue => e + raise GeneratorError.wrap(e) + end + + def valid_utf8?(string) + encoding = string.encoding + (encoding == Encoding::UTF_8 || encoding == Encoding::ASCII) && + string.valid_encoding? + end + module_function :utf8_to_json, :utf8_to_json_ascii, :valid_utf8? + + module Pure + module Generator + # This class is used to create State instances, that are use to hold data + # while generating a JSON text from a Ruby data structure. + class State + # Creates a State object from _opts_, which ought to be Hash to create + # a new State instance configured by _opts_, something else to create + # an unconfigured instance. If _opts_ is a State object, it is just + # returned. + def self.from_state(opts) + case + when self === opts + opts + when opts.respond_to?(:to_hash) + new(opts.to_hash) + when opts.respond_to?(:to_h) + new(opts.to_h) + else + SAFE_STATE_PROTOTYPE.dup + end + end + + # Instantiates a new State object, configured by _opts_. + # + # _opts_ can have the following keys: + # + # * *indent*: a string used to indent levels (default: ''), + # * *space*: a string that is put after, a : or , delimiter (default: ''), + # * *space_before*: a string that is put before a : pair delimiter (default: ''), + # * *object_nl*: a string that is put at the end of a JSON object (default: ''), + # * *array_nl*: a string that is put at the end of a JSON array (default: ''), + # * *check_circular*: is deprecated now, use the :max_nesting option instead, + # * *max_nesting*: sets the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum should be checked. + # * *allow_nan*: true if NaN, Infinity, and -Infinity should be + # generated, otherwise an exception is thrown, if these values are + # encountered. This options defaults to false. + def initialize(opts = {}) + @indent = '' + @space = '' + @space_before = '' + @object_nl = '' + @array_nl = '' + @allow_nan = false + @ascii_only = false + @buffer_initial_length = 1024 + configure opts + end + + # This string is used to indent levels in the JSON text. + attr_accessor :indent + + # This string is used to insert a space between the tokens in a JSON + # string. + attr_accessor :space + + # This string is used to insert a space before the ':' in JSON objects. + attr_accessor :space_before + + # This string is put at the end of a line that holds a JSON object (or + # Hash). + attr_accessor :object_nl + + # This string is put at the end of a line that holds a JSON array. + attr_accessor :array_nl + + # This integer returns the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum is checked. + attr_accessor :max_nesting + + # :stopdoc: + attr_reader :buffer_initial_length + + def buffer_initial_length=(length) + if length > 0 + @buffer_initial_length = length + end + end + # :startdoc: + + # This integer returns the current depth data structure nesting in the + # generated JSON. + attr_accessor :depth + + def check_max_nesting # :nodoc: + return if @max_nesting.zero? + current_nesting = depth + 1 + current_nesting > @max_nesting and + raise NestingError, "nesting of #{current_nesting} is too deep" + end + + # Returns true, if circular data structures are checked, + # otherwise returns false. + def check_circular? + !@max_nesting.zero? + end + + # Returns true if NaN, Infinity, and -Infinity should be considered as + # valid JSON and output. + def allow_nan? + @allow_nan + end + + # Returns true, if only ASCII characters should be generated. Otherwise + # returns false. + def ascii_only? + @ascii_only + end + + # Configure this State instance with the Hash _opts_, and return + # itself. + def configure(opts) + if opts.respond_to?(:to_hash) + opts = opts.to_hash + elsif opts.respond_to?(:to_h) + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + for key, value in opts + instance_variable_set "@#{key}", value + end + @indent = opts[:indent] if opts.key?(:indent) + @space = opts[:space] if opts.key?(:space) + @space_before = opts[:space_before] if opts.key?(:space_before) + @object_nl = opts[:object_nl] if opts.key?(:object_nl) + @array_nl = opts[:array_nl] if opts.key?(:array_nl) + @allow_nan = !!opts[:allow_nan] if opts.key?(:allow_nan) + @ascii_only = opts[:ascii_only] if opts.key?(:ascii_only) + @depth = opts[:depth] || 0 + @buffer_initial_length ||= opts[:buffer_initial_length] + + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + self + end + alias merge configure + + # Returns the configuration instance variables as a hash, that can be + # passed to the configure method. + def to_h + result = {} + for iv in instance_variables + iv = iv.to_s[1..-1] + result[iv.to_sym] = self[iv] + end + result + end + + alias to_hash to_h + + # Generates a valid JSON document from object +obj+ and + # returns the result. If no valid JSON document can be + # created this method raises a + # GeneratorError exception. + def generate(obj) + result = obj.to_json(self) + JSON.valid_utf8?(result) or raise GeneratorError, + "source sequence #{result.inspect} is illegal/malformed utf-8" + result + end + + # Return the value returned by method +name+. + def [](name) + if respond_to?(name) + __send__(name) + else + instance_variable_get("@#{name}") if + instance_variables.include?("@#{name}".to_sym) # avoid warning + end + end + + def []=(name, value) + if respond_to?(name_writer = "#{name}=") + __send__ name_writer, value + else + instance_variable_set "@#{name}", value + end + end + end + + module GeneratorMethods + module Object + # Converts this object to a string (calling #to_s), converts + # it to a JSON string, and returns the result. This is a fallback, if no + # special method #to_json was defined for some object. + def to_json(*) to_s.to_json end + end + + module Hash + # Returns a JSON string containing a JSON object, that is unparsed from + # this Hash instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + # _depth_ is used to find out nesting depth, to indent accordingly. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_shift(state) + state.object_nl.empty? or return '' + state.indent * state.depth + end + + def json_transform(state) + delim = ',' + delim << state.object_nl + result = '{' + result << state.object_nl + depth = state.depth += 1 + first = true + indent = !state.object_nl.empty? + each { |key,value| + result << delim unless first + result << state.indent * depth if indent + result << key.to_s.to_json(state) + result << state.space_before + result << ':' + result << state.space + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.object_nl + result << state.indent * depth if indent + result << '}' + result + end + end + + module Array + # Returns a JSON string containing a JSON array, that is unparsed from + # this Array instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_transform(state) + delim = ',' + delim << state.array_nl + result = '[' + result << state.array_nl + depth = state.depth += 1 + first = true + indent = !state.array_nl.empty? + each { |value| + result << delim unless first + result << state.indent * depth if indent + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.array_nl + result << state.indent * depth if indent + result << ']' + end + end + + module Integer + # Returns a JSON string representation for this Integer number. + def to_json(*) to_s end + end + + module Float + # Returns a JSON string representation for this Float number. + def to_json(state = nil, *) + state = State.from_state(state) + case + when infinite? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + when nan? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + else + to_s + end + end + end + + module String + # This string should be encoded with UTF-8 A call to this method + # returns a JSON string encoded with UTF16 big endian characters as + # \u????. + def to_json(state = nil, *args) + state = State.from_state(state) + if encoding == ::Encoding::UTF_8 + string = self + else + string = encode(::Encoding::UTF_8) + end + if state.ascii_only? + '"' << JSON.utf8_to_json_ascii(string) << '"' + else + '"' << JSON.utf8_to_json(string) << '"' + end + end + + # Module that holds the extending methods if, the String module is + # included. + module Extend + # Raw Strings are JSON Objects (the raw bytes are stored in an + # array for the key "raw"). The Ruby String can be created by this + # module method. + def json_create(o) + o['raw'].pack('C*') + end + end + + # Extends _modul_ with the String::Extend module. + def self.included(modul) + modul.extend Extend + end + + # This method creates a raw object hash, that can be nested into + # other data structures and will be unparsed as a raw string. This + # method should be used, if you want to convert raw strings to JSON + # instead of UTF-8 strings, e. g. binary data. + def to_json_raw_object + { + JSON.create_id => self.class.name, + 'raw' => self.unpack('C*'), + } + end + + # This method creates a JSON text from the result of + # a call to to_json_raw_object of this String. + def to_json_raw(*args) + to_json_raw_object.to_json(*args) + end + end + + module TrueClass + # Returns a JSON string for true: 'true'. + def to_json(*) 'true' end + end + + module FalseClass + # Returns a JSON string for false: 'false'. + def to_json(*) 'false' end + end + + module NilClass + # Returns a JSON string for nil: 'null'. + def to_json(*) 'null' end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator.rb new file mode 100644 index 0000000..471af63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/generator.rb @@ -0,0 +1,459 @@ +#frozen_string_literal: false +module JSON + MAP = { + "\x0" => '\u0000', + "\x1" => '\u0001', + "\x2" => '\u0002', + "\x3" => '\u0003', + "\x4" => '\u0004', + "\x5" => '\u0005', + "\x6" => '\u0006', + "\x7" => '\u0007', + "\b" => '\b', + "\t" => '\t', + "\n" => '\n', + "\xb" => '\u000b', + "\f" => '\f', + "\r" => '\r', + "\xe" => '\u000e', + "\xf" => '\u000f', + "\x10" => '\u0010', + "\x11" => '\u0011', + "\x12" => '\u0012', + "\x13" => '\u0013', + "\x14" => '\u0014', + "\x15" => '\u0015', + "\x16" => '\u0016', + "\x17" => '\u0017', + "\x18" => '\u0018', + "\x19" => '\u0019', + "\x1a" => '\u001a', + "\x1b" => '\u001b', + "\x1c" => '\u001c', + "\x1d" => '\u001d', + "\x1e" => '\u001e', + "\x1f" => '\u001f', + '"' => '\"', + '\\' => '\\\\', + } # :nodoc: + + # Convert a UTF8 encoded Ruby string _string_ to a JSON string, encoded with + # UTF16 big endian characters as \u????, and return it. + def utf8_to_json(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/) { MAP[$&] } + string.force_encoding(::Encoding::UTF_8) + string + end + + def utf8_to_json_ascii(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/n) { MAP[$&] } + string.gsub!(/( + (?: + [\xc2-\xdf][\x80-\xbf] | + [\xe0-\xef][\x80-\xbf]{2} | + [\xf0-\xf4][\x80-\xbf]{3} + )+ | + [\x80-\xc1\xf5-\xff] # invalid + )/nx) { |c| + c.size == 1 and raise GeneratorError, "invalid utf8 byte: '#{c}'" + s = JSON.iconv('utf-16be', 'utf-8', c).unpack('H*')[0] + s.force_encoding(::Encoding::ASCII_8BIT) + s.gsub!(/.{4}/n, '\\\\u\&') + s.force_encoding(::Encoding::UTF_8) + } + string.force_encoding(::Encoding::UTF_8) + string + rescue => e + raise GeneratorError.wrap(e) + end + + def valid_utf8?(string) + encoding = string.encoding + (encoding == Encoding::UTF_8 || encoding == Encoding::ASCII) && + string.valid_encoding? + end + module_function :utf8_to_json, :utf8_to_json_ascii, :valid_utf8? + + module Pure + module Generator + # This class is used to create State instances, that are use to hold data + # while generating a JSON text from a Ruby data structure. + class State + # Creates a State object from _opts_, which ought to be Hash to create + # a new State instance configured by _opts_, something else to create + # an unconfigured instance. If _opts_ is a State object, it is just + # returned. + def self.from_state(opts) + case + when self === opts + opts + when opts.respond_to?(:to_hash) + new(opts.to_hash) + when opts.respond_to?(:to_h) + new(opts.to_h) + else + SAFE_STATE_PROTOTYPE.dup + end + end + + # Instantiates a new State object, configured by _opts_. + # + # _opts_ can have the following keys: + # + # * *indent*: a string used to indent levels (default: ''), + # * *space*: a string that is put after, a : or , delimiter (default: ''), + # * *space_before*: a string that is put before a : pair delimiter (default: ''), + # * *object_nl*: a string that is put at the end of a JSON object (default: ''), + # * *array_nl*: a string that is put at the end of a JSON array (default: ''), + # * *check_circular*: is deprecated now, use the :max_nesting option instead, + # * *max_nesting*: sets the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum should be checked. + # * *allow_nan*: true if NaN, Infinity, and -Infinity should be + # generated, otherwise an exception is thrown, if these values are + # encountered. This options defaults to false. + def initialize(opts = {}) + @indent = '' + @space = '' + @space_before = '' + @object_nl = '' + @array_nl = '' + @allow_nan = false + @ascii_only = false + @buffer_initial_length = 1024 + configure opts + end + + # This string is used to indent levels in the JSON text. + attr_accessor :indent + + # This string is used to insert a space between the tokens in a JSON + # string. + attr_accessor :space + + # This string is used to insert a space before the ':' in JSON objects. + attr_accessor :space_before + + # This string is put at the end of a line that holds a JSON object (or + # Hash). + attr_accessor :object_nl + + # This string is put at the end of a line that holds a JSON array. + attr_accessor :array_nl + + # This integer returns the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum is checked. + attr_accessor :max_nesting + + # :stopdoc: + attr_reader :buffer_initial_length + + def buffer_initial_length=(length) + if length > 0 + @buffer_initial_length = length + end + end + # :startdoc: + + # This integer returns the current depth data structure nesting in the + # generated JSON. + attr_accessor :depth + + def check_max_nesting # :nodoc: + return if @max_nesting.zero? + current_nesting = depth + 1 + current_nesting > @max_nesting and + raise NestingError, "nesting of #{current_nesting} is too deep" + end + + # Returns true, if circular data structures are checked, + # otherwise returns false. + def check_circular? + !@max_nesting.zero? + end + + # Returns true if NaN, Infinity, and -Infinity should be considered as + # valid JSON and output. + def allow_nan? + @allow_nan + end + + # Returns true, if only ASCII characters should be generated. Otherwise + # returns false. + def ascii_only? + @ascii_only + end + + # Configure this State instance with the Hash _opts_, and return + # itself. + def configure(opts) + if opts.respond_to?(:to_hash) + opts = opts.to_hash + elsif opts.respond_to?(:to_h) + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + for key, value in opts + instance_variable_set "@#{key}", value + end + @indent = opts[:indent] if opts.key?(:indent) + @space = opts[:space] if opts.key?(:space) + @space_before = opts[:space_before] if opts.key?(:space_before) + @object_nl = opts[:object_nl] if opts.key?(:object_nl) + @array_nl = opts[:array_nl] if opts.key?(:array_nl) + @allow_nan = !!opts[:allow_nan] if opts.key?(:allow_nan) + @ascii_only = opts[:ascii_only] if opts.key?(:ascii_only) + @depth = opts[:depth] || 0 + @buffer_initial_length ||= opts[:buffer_initial_length] + + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + self + end + alias merge configure + + # Returns the configuration instance variables as a hash, that can be + # passed to the configure method. + def to_h + result = {} + for iv in instance_variables + iv = iv.to_s[1..-1] + result[iv.to_sym] = self[iv] + end + result + end + + alias to_hash to_h + + # Generates a valid JSON document from object +obj+ and + # returns the result. If no valid JSON document can be + # created this method raises a + # GeneratorError exception. + def generate(obj) + result = obj.to_json(self) + JSON.valid_utf8?(result) or raise GeneratorError, + "source sequence #{result.inspect} is illegal/malformed utf-8" + result + end + + # Return the value returned by method +name+. + def [](name) + if respond_to?(name) + __send__(name) + else + instance_variable_get("@#{name}") if + instance_variables.include?("@#{name}".to_sym) # avoid warning + end + end + + def []=(name, value) + if respond_to?(name_writer = "#{name}=") + __send__ name_writer, value + else + instance_variable_set "@#{name}", value + end + end + end + + module GeneratorMethods + module Object + # Converts this object to a string (calling #to_s), converts + # it to a JSON string, and returns the result. This is a fallback, if no + # special method #to_json was defined for some object. + def to_json(*) to_s.to_json end + end + + module Hash + # Returns a JSON string containing a JSON object, that is unparsed from + # this Hash instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + # _depth_ is used to find out nesting depth, to indent accordingly. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_shift(state) + state.object_nl.empty? or return '' + state.indent * state.depth + end + + def json_transform(state) + delim = ',' + delim << state.object_nl + result = '{' + result << state.object_nl + depth = state.depth += 1 + first = true + indent = !state.object_nl.empty? + each { |key,value| + result << delim unless first + result << state.indent * depth if indent + result << key.to_s.to_json(state) + result << state.space_before + result << ':' + result << state.space + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.object_nl + result << state.indent * depth if indent + result << '}' + result + end + end + + module Array + # Returns a JSON string containing a JSON array, that is unparsed from + # this Array instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_transform(state) + delim = ',' + delim << state.array_nl + result = '[' + result << state.array_nl + depth = state.depth += 1 + first = true + indent = !state.array_nl.empty? + each { |value| + result << delim unless first + result << state.indent * depth if indent + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.array_nl + result << state.indent * depth if indent + result << ']' + end + end + + module Integer + # Returns a JSON string representation for this Integer number. + def to_json(*) to_s end + end + + module Float + # Returns a JSON string representation for this Float number. + def to_json(state = nil, *) + state = State.from_state(state) + case + when infinite? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + when nan? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + else + to_s + end + end + end + + module String + # This string should be encoded with UTF-8 A call to this method + # returns a JSON string encoded with UTF16 big endian characters as + # \u????. + def to_json(state = nil, *args) + state = State.from_state(state) + if encoding == ::Encoding::UTF_8 + string = self + else + string = encode(::Encoding::UTF_8) + end + if state.ascii_only? + '"' << JSON.utf8_to_json_ascii(string) << '"' + else + '"' << JSON.utf8_to_json(string) << '"' + end + end + + # Module that holds the extending methods if, the String module is + # included. + module Extend + # Raw Strings are JSON Objects (the raw bytes are stored in an + # array for the key "raw"). The Ruby String can be created by this + # module method. + def json_create(o) + o['raw'].pack('C*') + end + end + + # Extends _modul_ with the String::Extend module. + def self.included(modul) + modul.extend Extend + end + + # This method creates a raw object hash, that can be nested into + # other data structures and will be unparsed as a raw string. This + # method should be used, if you want to convert raw strings to JSON + # instead of UTF-8 strings, e. g. binary data. + def to_json_raw_object + { + JSON.create_id => self.class.name, + 'raw' => self.unpack('C*'), + } + end + + # This method creates a JSON text from the result of + # a call to to_json_raw_object of this String. + def to_json_raw(*args) + to_json_raw_object.to_json(*args) + end + end + + module TrueClass + # Returns a JSON string for true: 'true'. + def to_json(*) 'true' end + end + + module FalseClass + # Returns a JSON string for false: 'false'. + def to_json(*) 'false' end + end + + module NilClass + # Returns a JSON string for nil: 'null'. + def to_json(*) 'null' end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser 2.rb new file mode 100644 index 0000000..5db244c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser 2.rb @@ -0,0 +1,319 @@ +#frozen_string_literal: false +require 'strscan' + +module JSON + module Pure + # This class implements the JSON parser that is used to parse a JSON string + # into a Ruby data structure. + class Parser < StringScanner + STRING = /" ((?:[^\x0-\x1f"\\] | + # escaped special characters: + \\["\\\/bfnrt] | + \\u[0-9a-fA-F]{4} | + # match all but escaped special characters: + \\[\x20-\x21\x23-\x2e\x30-\x5b\x5d-\x61\x63-\x65\x67-\x6d\x6f-\x71\x73\x75-\xff])*) + "/nx + INTEGER = /(-?0|-?[1-9]\d*)/ + FLOAT = /(-? + (?:0|[1-9]\d*) + (?: + \.\d+(?i:e[+-]?\d+) | + \.\d+ | + (?i:e[+-]?\d+) + ) + )/x + NAN = /NaN/ + INFINITY = /Infinity/ + MINUS_INFINITY = /-Infinity/ + OBJECT_OPEN = /\{/ + OBJECT_CLOSE = /\}/ + ARRAY_OPEN = /\[/ + ARRAY_CLOSE = /\]/ + PAIR_DELIMITER = /:/ + COLLECTION_DELIMITER = /,/ + TRUE = /true/ + FALSE = /false/ + NULL = /null/ + IGNORE = %r( + (?: + //[^\n\r]*[\n\r]| # line comments + /\* # c-style comments + (?: + [^*/]| # normal chars + /[^*]| # slashes that do not start a nested comment + \*[^/]| # asterisks that do not end this comment + /(?=\*/) # single slash before this comment's end + )* + \*/ # the End of this comment + |[ \t\r\n]+ # whitespaces: space, horicontal tab, lf, cr + )+ + )mx + + UNPARSED = Object.new.freeze + + # Creates a new JSON::Pure::Parser instance for the string _source_. + # + # It will be configured by the _opts_ hash. _opts_ can have the following + # keys: + # * *max_nesting*: The maximum depth of nesting allowed in the parsed data + # structures. Disable depth checking with :max_nesting => false|nil|0, + # it defaults to 100. + # * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + # defiance of RFC 7159 to be parsed by the Parser. This option defaults + # to false. + # * *symbolize_names*: If set to true, returns symbols for the names + # (keys) in a JSON object. Otherwise strings are returned, which is + # also the default. It's not possible to use this option in + # conjunction with the *create_additions* option. + # * *create_additions*: If set to true, the Parser creates + # additions when a matching class and create_id are found. This + # option defaults to false. + # * *object_class*: Defaults to Hash + # * *array_class*: Defaults to Array + # * *decimal_class*: Specifies which class to use instead of the default + # (Float) when parsing decimal numbers. This class must accept a single + # string argument in its constructor. + def initialize(source, opts = {}) + opts ||= {} + source = convert_encoding source + super source + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + @allow_nan = !!opts[:allow_nan] + @symbolize_names = !!opts[:symbolize_names] + if opts.key?(:create_additions) + @create_additions = !!opts[:create_additions] + else + @create_additions = false + end + @symbolize_names && @create_additions and raise ArgumentError, + 'options :symbolize_names and :create_additions cannot be used '\ + 'in conjunction' + @create_id = @create_additions ? JSON.create_id : nil + @object_class = opts[:object_class] || Hash + @array_class = opts[:array_class] || Array + @decimal_class = opts[:decimal_class] + @match_string = opts[:match_string] + end + + alias source string + + def reset + super + @current_nesting = 0 + end + + # Parses the current JSON string _source_ and returns the + # complete data structure as a result. + def parse + reset + obj = nil + while !eos? && skip(IGNORE) do end + if eos? + raise ParserError, "source is not valid JSON!" + else + obj = parse_value + UNPARSED.equal?(obj) and raise ParserError, + "source is not valid JSON!" + end + while !eos? && skip(IGNORE) do end + eos? or raise ParserError, "source is not valid JSON!" + obj + end + + private + + def convert_encoding(source) + if source.respond_to?(:to_str) + source = source.to_str + else + raise TypeError, + "#{source.inspect} is not like a string" + end + if source.encoding != ::Encoding::ASCII_8BIT + source = source.encode(::Encoding::UTF_8) + source.force_encoding(::Encoding::ASCII_8BIT) + end + source + end + + # Unescape characters in strings. + UNESCAPE_MAP = Hash.new { |h, k| h[k] = k.chr } + UNESCAPE_MAP.update({ + ?" => '"', + ?\\ => '\\', + ?/ => '/', + ?b => "\b", + ?f => "\f", + ?n => "\n", + ?r => "\r", + ?t => "\t", + ?u => nil, + }) + + EMPTY_8BIT_STRING = '' + if ::String.method_defined?(:encode) + EMPTY_8BIT_STRING.force_encoding Encoding::ASCII_8BIT + end + + def parse_string + if scan(STRING) + return '' if self[1].empty? + string = self[1].gsub(%r((?:\\[\\bfnrt"/]|(?:\\u(?:[A-Fa-f\d]{4}))+|\\[\x20-\xff]))n) do |c| + if u = UNESCAPE_MAP[$&[1]] + u + else # \uXXXX + bytes = EMPTY_8BIT_STRING.dup + i = 0 + while c[6 * i] == ?\\ && c[6 * i + 1] == ?u + bytes << c[6 * i + 2, 2].to_i(16) << c[6 * i + 4, 2].to_i(16) + i += 1 + end + JSON.iconv('utf-8', 'utf-16be', bytes) + end + end + if string.respond_to?(:force_encoding) + string.force_encoding(::Encoding::UTF_8) + end + if @create_additions and @match_string + for (regexp, klass) in @match_string + klass.json_creatable? or next + string =~ regexp and return klass.json_create(string) + end + end + string + else + UNPARSED + end + rescue => e + raise ParserError, "Caught #{e.class} at '#{peek(20)}': #{e}" + end + + def parse_value + case + when scan(FLOAT) + if @decimal_class then + if @decimal_class == BigDecimal then + BigDecimal(self[1]) + else + @decimal_class.new(self[1]) || Float(self[1]) + end + else + Float(self[1]) + end + when scan(INTEGER) + Integer(self[1]) + when scan(TRUE) + true + when scan(FALSE) + false + when scan(NULL) + nil + when !UNPARSED.equal?(string = parse_string) + string + when scan(ARRAY_OPEN) + @current_nesting += 1 + ary = parse_array + @current_nesting -= 1 + ary + when scan(OBJECT_OPEN) + @current_nesting += 1 + obj = parse_object + @current_nesting -= 1 + obj + when @allow_nan && scan(NAN) + NaN + when @allow_nan && scan(INFINITY) + Infinity + when @allow_nan && scan(MINUS_INFINITY) + MinusInfinity + else + UNPARSED + end + end + + def parse_array + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @array_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(value = parse_value) + delim = false + result << value + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(ARRAY_CLOSE) + ; + else + raise ParserError, "expected ',' or ']' in array at '#{peek(20)}'!" + end + when scan(ARRAY_CLOSE) + if delim + raise ParserError, "expected next element in array at '#{peek(20)}'!" + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in array at '#{peek(20)}'!" + end + end + result + end + + def parse_object + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @object_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(string = parse_string) + skip(IGNORE) + unless scan(PAIR_DELIMITER) + raise ParserError, "expected ':' in object at '#{peek(20)}'!" + end + skip(IGNORE) + unless UNPARSED.equal?(value = parse_value) + result[@symbolize_names ? string.to_sym : string] = value + delim = false + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(OBJECT_CLOSE) + ; + else + raise ParserError, "expected ',' or '}' in object at '#{peek(20)}'!" + end + else + raise ParserError, "expected value in object at '#{peek(20)}'!" + end + when scan(OBJECT_CLOSE) + if delim + raise ParserError, "expected next name, value pair in object at '#{peek(20)}'!" + end + if @create_additions and klassname = result[@create_id] + klass = JSON.deep_const_get klassname + break unless klass and klass.json_creatable? + result = klass.json_create(result) + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in object at '#{peek(20)}'!" + end + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser.rb new file mode 100644 index 0000000..5db244c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/pure/parser.rb @@ -0,0 +1,319 @@ +#frozen_string_literal: false +require 'strscan' + +module JSON + module Pure + # This class implements the JSON parser that is used to parse a JSON string + # into a Ruby data structure. + class Parser < StringScanner + STRING = /" ((?:[^\x0-\x1f"\\] | + # escaped special characters: + \\["\\\/bfnrt] | + \\u[0-9a-fA-F]{4} | + # match all but escaped special characters: + \\[\x20-\x21\x23-\x2e\x30-\x5b\x5d-\x61\x63-\x65\x67-\x6d\x6f-\x71\x73\x75-\xff])*) + "/nx + INTEGER = /(-?0|-?[1-9]\d*)/ + FLOAT = /(-? + (?:0|[1-9]\d*) + (?: + \.\d+(?i:e[+-]?\d+) | + \.\d+ | + (?i:e[+-]?\d+) + ) + )/x + NAN = /NaN/ + INFINITY = /Infinity/ + MINUS_INFINITY = /-Infinity/ + OBJECT_OPEN = /\{/ + OBJECT_CLOSE = /\}/ + ARRAY_OPEN = /\[/ + ARRAY_CLOSE = /\]/ + PAIR_DELIMITER = /:/ + COLLECTION_DELIMITER = /,/ + TRUE = /true/ + FALSE = /false/ + NULL = /null/ + IGNORE = %r( + (?: + //[^\n\r]*[\n\r]| # line comments + /\* # c-style comments + (?: + [^*/]| # normal chars + /[^*]| # slashes that do not start a nested comment + \*[^/]| # asterisks that do not end this comment + /(?=\*/) # single slash before this comment's end + )* + \*/ # the End of this comment + |[ \t\r\n]+ # whitespaces: space, horicontal tab, lf, cr + )+ + )mx + + UNPARSED = Object.new.freeze + + # Creates a new JSON::Pure::Parser instance for the string _source_. + # + # It will be configured by the _opts_ hash. _opts_ can have the following + # keys: + # * *max_nesting*: The maximum depth of nesting allowed in the parsed data + # structures. Disable depth checking with :max_nesting => false|nil|0, + # it defaults to 100. + # * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + # defiance of RFC 7159 to be parsed by the Parser. This option defaults + # to false. + # * *symbolize_names*: If set to true, returns symbols for the names + # (keys) in a JSON object. Otherwise strings are returned, which is + # also the default. It's not possible to use this option in + # conjunction with the *create_additions* option. + # * *create_additions*: If set to true, the Parser creates + # additions when a matching class and create_id are found. This + # option defaults to false. + # * *object_class*: Defaults to Hash + # * *array_class*: Defaults to Array + # * *decimal_class*: Specifies which class to use instead of the default + # (Float) when parsing decimal numbers. This class must accept a single + # string argument in its constructor. + def initialize(source, opts = {}) + opts ||= {} + source = convert_encoding source + super source + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + @allow_nan = !!opts[:allow_nan] + @symbolize_names = !!opts[:symbolize_names] + if opts.key?(:create_additions) + @create_additions = !!opts[:create_additions] + else + @create_additions = false + end + @symbolize_names && @create_additions and raise ArgumentError, + 'options :symbolize_names and :create_additions cannot be used '\ + 'in conjunction' + @create_id = @create_additions ? JSON.create_id : nil + @object_class = opts[:object_class] || Hash + @array_class = opts[:array_class] || Array + @decimal_class = opts[:decimal_class] + @match_string = opts[:match_string] + end + + alias source string + + def reset + super + @current_nesting = 0 + end + + # Parses the current JSON string _source_ and returns the + # complete data structure as a result. + def parse + reset + obj = nil + while !eos? && skip(IGNORE) do end + if eos? + raise ParserError, "source is not valid JSON!" + else + obj = parse_value + UNPARSED.equal?(obj) and raise ParserError, + "source is not valid JSON!" + end + while !eos? && skip(IGNORE) do end + eos? or raise ParserError, "source is not valid JSON!" + obj + end + + private + + def convert_encoding(source) + if source.respond_to?(:to_str) + source = source.to_str + else + raise TypeError, + "#{source.inspect} is not like a string" + end + if source.encoding != ::Encoding::ASCII_8BIT + source = source.encode(::Encoding::UTF_8) + source.force_encoding(::Encoding::ASCII_8BIT) + end + source + end + + # Unescape characters in strings. + UNESCAPE_MAP = Hash.new { |h, k| h[k] = k.chr } + UNESCAPE_MAP.update({ + ?" => '"', + ?\\ => '\\', + ?/ => '/', + ?b => "\b", + ?f => "\f", + ?n => "\n", + ?r => "\r", + ?t => "\t", + ?u => nil, + }) + + EMPTY_8BIT_STRING = '' + if ::String.method_defined?(:encode) + EMPTY_8BIT_STRING.force_encoding Encoding::ASCII_8BIT + end + + def parse_string + if scan(STRING) + return '' if self[1].empty? + string = self[1].gsub(%r((?:\\[\\bfnrt"/]|(?:\\u(?:[A-Fa-f\d]{4}))+|\\[\x20-\xff]))n) do |c| + if u = UNESCAPE_MAP[$&[1]] + u + else # \uXXXX + bytes = EMPTY_8BIT_STRING.dup + i = 0 + while c[6 * i] == ?\\ && c[6 * i + 1] == ?u + bytes << c[6 * i + 2, 2].to_i(16) << c[6 * i + 4, 2].to_i(16) + i += 1 + end + JSON.iconv('utf-8', 'utf-16be', bytes) + end + end + if string.respond_to?(:force_encoding) + string.force_encoding(::Encoding::UTF_8) + end + if @create_additions and @match_string + for (regexp, klass) in @match_string + klass.json_creatable? or next + string =~ regexp and return klass.json_create(string) + end + end + string + else + UNPARSED + end + rescue => e + raise ParserError, "Caught #{e.class} at '#{peek(20)}': #{e}" + end + + def parse_value + case + when scan(FLOAT) + if @decimal_class then + if @decimal_class == BigDecimal then + BigDecimal(self[1]) + else + @decimal_class.new(self[1]) || Float(self[1]) + end + else + Float(self[1]) + end + when scan(INTEGER) + Integer(self[1]) + when scan(TRUE) + true + when scan(FALSE) + false + when scan(NULL) + nil + when !UNPARSED.equal?(string = parse_string) + string + when scan(ARRAY_OPEN) + @current_nesting += 1 + ary = parse_array + @current_nesting -= 1 + ary + when scan(OBJECT_OPEN) + @current_nesting += 1 + obj = parse_object + @current_nesting -= 1 + obj + when @allow_nan && scan(NAN) + NaN + when @allow_nan && scan(INFINITY) + Infinity + when @allow_nan && scan(MINUS_INFINITY) + MinusInfinity + else + UNPARSED + end + end + + def parse_array + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @array_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(value = parse_value) + delim = false + result << value + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(ARRAY_CLOSE) + ; + else + raise ParserError, "expected ',' or ']' in array at '#{peek(20)}'!" + end + when scan(ARRAY_CLOSE) + if delim + raise ParserError, "expected next element in array at '#{peek(20)}'!" + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in array at '#{peek(20)}'!" + end + end + result + end + + def parse_object + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @object_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(string = parse_string) + skip(IGNORE) + unless scan(PAIR_DELIMITER) + raise ParserError, "expected ':' in object at '#{peek(20)}'!" + end + skip(IGNORE) + unless UNPARSED.equal?(value = parse_value) + result[@symbolize_names ? string.to_sym : string] = value + delim = false + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(OBJECT_CLOSE) + ; + else + raise ParserError, "expected ',' or '}' in object at '#{peek(20)}'!" + end + else + raise ParserError, "expected value in object at '#{peek(20)}'!" + end + when scan(OBJECT_CLOSE) + if delim + raise ParserError, "expected next name, value pair in object at '#{peek(20)}'!" + end + if @create_additions and klassname = result[@create_id] + klass = JSON.deep_const_get klassname + break unless klass and klass.json_creatable? + result = klass.json_create(result) + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in object at '#{peek(20)}'!" + end + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version 2.rb new file mode 100644 index 0000000..e4dd3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version 2.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +module JSON + # JSON version + VERSION = '2.3.1' + VERSION_ARRAY = VERSION.split(/\./).map { |x| x.to_i } # :nodoc: + VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc: + VERSION_MINOR = VERSION_ARRAY[1] # :nodoc: + VERSION_BUILD = VERSION_ARRAY[2] # :nodoc: +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version.rb new file mode 100644 index 0000000..e4dd3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/lib/json/version.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +module JSON + # JSON version + VERSION = '2.3.1' + VERSION_ARRAY = VERSION.split(/\./).map { |x| x.to_i } # :nodoc: + VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc: + VERSION_MINOR = VERSION_ARRAY[1] # :nodoc: + VERSION_BUILD = VERSION_ARRAY[2] # :nodoc: +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159 2.txt b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159 2.txt new file mode 100644 index 0000000..64fcada --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159 2.txt @@ -0,0 +1,899 @@ + + + + + + +Internet Engineering Task Force (IETF) T. Bray, Ed. +Request for Comments: 7159 Google, Inc. +Obsoletes: 4627, 7158 March 2014 +Category: Standards Track +ISSN: 2070-1721 + + + The JavaScript Object Notation (JSON) Data Interchange Format + +Abstract + + JavaScript Object Notation (JSON) is a lightweight, text-based, + language-independent data interchange format. It was derived from + the ECMAScript Programming Language Standard. JSON defines a small + set of formatting rules for the portable representation of structured + data. + + This document removes inconsistencies with other specifications of + JSON, repairs specification errors, and offers experience-based + interoperability guidance. + +Status of This Memo + + This is an Internet Standards Track document. + + This document is a product of the Internet Engineering Task Force + (IETF). It represents the consensus of the IETF community. It has + received public review and has been approved for publication by the + Internet Engineering Steering Group (IESG). Further information on + Internet Standards is available in Section 2 of RFC 5741. + + Information about the current status of this document, any errata, + and how to provide feedback on it may be obtained at + http://www.rfc-editor.org/info/rfc7159. + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 1] + +RFC 7159 JSON March 2014 + + +Copyright Notice + + Copyright (c) 2014 IETF Trust and the persons identified as the + document authors. All rights reserved. + + This document is subject to BCP 78 and the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info) in effect on the date of + publication of this document. Please review these documents + carefully, as they describe your rights and restrictions with respect + to this document. Code Components extracted from this document must + include Simplified BSD License text as described in Section 4.e of + the Trust Legal Provisions and are provided without warranty as + described in the Simplified BSD License. + + This document may contain material from IETF Documents or IETF + Contributions published or made publicly available before November + 10, 2008. The person(s) controlling the copyright in some of this + material may not have granted the IETF Trust the right to allow + modifications of such material outside the IETF Standards Process. + Without obtaining an adequate license from the person(s) controlling + the copyright in such materials, this document may not be modified + outside the IETF Standards Process, and derivative works of it may + not be created outside the IETF Standards Process, except to format + it for publication as an RFC or to translate it into languages other + than English. + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 2] + +RFC 7159 JSON March 2014 + + +Table of Contents + + 1. Introduction ....................................................3 + 1.1. Conventions Used in This Document ..........................4 + 1.2. Specifications of JSON .....................................4 + 1.3. Introduction to This Revision ..............................4 + 2. JSON Grammar ....................................................4 + 3. Values ..........................................................5 + 4. Objects .........................................................6 + 5. Arrays ..........................................................6 + 6. Numbers .........................................................6 + 7. Strings .........................................................8 + 8. String and Character Issues .....................................9 + 8.1. Character Encoding .........................................9 + 8.2. Unicode Characters .........................................9 + 8.3. String Comparison ..........................................9 + 9. Parsers ........................................................10 + 10. Generators ....................................................10 + 11. IANA Considerations ...........................................10 + 12. Security Considerations .......................................11 + 13. Examples ......................................................12 + 14. Contributors ..................................................13 + 15. References ....................................................13 + 15.1. Normative References .....................................13 + 15.2. Informative References ...................................13 + Appendix A. Changes from RFC 4627 .................................15 + +1. Introduction + + JavaScript Object Notation (JSON) is a text format for the + serialization of structured data. It is derived from the object + literals of JavaScript, as defined in the ECMAScript Programming + Language Standard, Third Edition [ECMA-262]. + + JSON can represent four primitive types (strings, numbers, booleans, + and null) and two structured types (objects and arrays). + + A string is a sequence of zero or more Unicode characters [UNICODE]. + Note that this citation references the latest version of Unicode + rather than a specific release. It is not expected that future + changes in the UNICODE specification will impact the syntax of JSON. + + An object is an unordered collection of zero or more name/value + pairs, where a name is a string and a value is a string, number, + boolean, null, object, or array. + + An array is an ordered sequence of zero or more values. + + + + +Bray Standards Track [Page 3] + +RFC 7159 JSON March 2014 + + + The terms "object" and "array" come from the conventions of + JavaScript. + + JSON's design goals were for it to be minimal, portable, textual, and + a subset of JavaScript. + +1.1. Conventions Used in This Document + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", + "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this + document are to be interpreted as described in [RFC2119]. + + The grammatical rules in this document are to be interpreted as + described in [RFC5234]. + +1.2. Specifications of JSON + + This document updates [RFC4627], which describes JSON and registers + the media type "application/json". + + A description of JSON in ECMAScript terms appears in Version 5.1 of + the ECMAScript specification [ECMA-262], Section 15.12. JSON is also + described in [ECMA-404]. + + All of the specifications of JSON syntax agree on the syntactic + elements of the language. + +1.3. Introduction to This Revision + + In the years since the publication of RFC 4627, JSON has found very + wide use. This experience has revealed certain patterns, which, + while allowed by its specifications, have caused interoperability + problems. + + Also, a small number of errata have been reported (see RFC Errata IDs + 607 [Err607] and 3607 [Err3607]). + + This document's goal is to apply the errata, remove inconsistencies + with other specifications of JSON, and highlight practices that can + lead to interoperability problems. + +2. JSON Grammar + + A JSON text is a sequence of tokens. The set of tokens includes six + structural characters, strings, numbers, and three literal names. + + A JSON text is a serialized value. Note that certain previous + specifications of JSON constrained a JSON text to be an object or an + + + +Bray Standards Track [Page 4] + +RFC 7159 JSON March 2014 + + + array. Implementations that generate only objects or arrays where a + JSON text is called for will be interoperable in the sense that all + implementations will accept these as conforming JSON texts. + + JSON-text = ws value ws + + These are the six structural characters: + + begin-array = ws %x5B ws ; [ left square bracket + + begin-object = ws %x7B ws ; { left curly bracket + + end-array = ws %x5D ws ; ] right square bracket + + end-object = ws %x7D ws ; } right curly bracket + + name-separator = ws %x3A ws ; : colon + + value-separator = ws %x2C ws ; , comma + + Insignificant whitespace is allowed before or after any of the six + structural characters. + + ws = *( + %x20 / ; Space + %x09 / ; Horizontal tab + %x0A / ; Line feed or New line + %x0D ) ; Carriage return + +3. Values + + A JSON value MUST be an object, array, number, or string, or one of + the following three literal names: + + false null true + + The literal names MUST be lowercase. No other literal names are + allowed. + + value = false / null / true / object / array / number / string + + false = %x66.61.6c.73.65 ; false + + null = %x6e.75.6c.6c ; null + + true = %x74.72.75.65 ; true + + + + + +Bray Standards Track [Page 5] + +RFC 7159 JSON March 2014 + + +4. Objects + + An object structure is represented as a pair of curly brackets + surrounding zero or more name/value pairs (or members). A name is a + string. A single colon comes after each name, separating the name + from the value. A single comma separates a value from a following + name. The names within an object SHOULD be unique. + + object = begin-object [ member *( value-separator member ) ] + end-object + + member = string name-separator value + + An object whose names are all unique is interoperable in the sense + that all software implementations receiving that object will agree on + the name-value mappings. When the names within an object are not + unique, the behavior of software that receives such an object is + unpredictable. Many implementations report the last name/value pair + only. Other implementations report an error or fail to parse the + object, and some implementations report all of the name/value pairs, + including duplicates. + + JSON parsing libraries have been observed to differ as to whether or + not they make the ordering of object members visible to calling + software. Implementations whose behavior does not depend on member + ordering will be interoperable in the sense that they will not be + affected by these differences. + +5. Arrays + + An array structure is represented as square brackets surrounding zero + or more values (or elements). Elements are separated by commas. + + array = begin-array [ value *( value-separator value ) ] end-array + + There is no requirement that the values in an array be of the same + type. + +6. Numbers + + The representation of numbers is similar to that used in most + programming languages. A number is represented in base 10 using + decimal digits. It contains an integer component that may be + prefixed with an optional minus sign, which may be followed by a + fraction part and/or an exponent part. Leading zeros are not + allowed. + + A fraction part is a decimal point followed by one or more digits. + + + +Bray Standards Track [Page 6] + +RFC 7159 JSON March 2014 + + + An exponent part begins with the letter E in upper or lower case, + which may be followed by a plus or minus sign. The E and optional + sign are followed by one or more digits. + + Numeric values that cannot be represented in the grammar below (such + as Infinity and NaN) are not permitted. + + number = [ minus ] int [ frac ] [ exp ] + + decimal-point = %x2E ; . + + digit1-9 = %x31-39 ; 1-9 + + e = %x65 / %x45 ; e E + + exp = e [ minus / plus ] 1*DIGIT + + frac = decimal-point 1*DIGIT + + int = zero / ( digit1-9 *DIGIT ) + + minus = %x2D ; - + + plus = %x2B ; + + + zero = %x30 ; 0 + + This specification allows implementations to set limits on the range + and precision of numbers accepted. Since software that implements + IEEE 754-2008 binary64 (double precision) numbers [IEEE754] is + generally available and widely used, good interoperability can be + achieved by implementations that expect no more precision or range + than these provide, in the sense that implementations will + approximate JSON numbers within the expected precision. A JSON + number such as 1E400 or 3.141592653589793238462643383279 may indicate + potential interoperability problems, since it suggests that the + software that created it expects receiving software to have greater + capabilities for numeric magnitude and precision than is widely + available. + + Note that when such software is used, numbers that are integers and + are in the range [-(2**53)+1, (2**53)-1] are interoperable in the + sense that implementations will agree exactly on their numeric + values. + + + + + + + +Bray Standards Track [Page 7] + +RFC 7159 JSON March 2014 + + +7. Strings + + The representation of strings is similar to conventions used in the C + family of programming languages. A string begins and ends with + quotation marks. All Unicode characters may be placed within the + quotation marks, except for the characters that must be escaped: + quotation mark, reverse solidus, and the control characters (U+0000 + through U+001F). + + Any character may be escaped. If the character is in the Basic + Multilingual Plane (U+0000 through U+FFFF), then it may be + represented as a six-character sequence: a reverse solidus, followed + by the lowercase letter u, followed by four hexadecimal digits that + encode the character's code point. The hexadecimal letters A though + F can be upper or lower case. So, for example, a string containing + only a single reverse solidus character may be represented as + "\u005C". + + Alternatively, there are two-character sequence escape + representations of some popular characters. So, for example, a + string containing only a single reverse solidus character may be + represented more compactly as "\\". + + To escape an extended character that is not in the Basic Multilingual + Plane, the character is represented as a 12-character sequence, + encoding the UTF-16 surrogate pair. So, for example, a string + containing only the G clef character (U+1D11E) may be represented as + "\uD834\uDD1E". + + string = quotation-mark *char quotation-mark + + char = unescaped / + escape ( + %x22 / ; " quotation mark U+0022 + %x5C / ; \ reverse solidus U+005C + %x2F / ; / solidus U+002F + %x62 / ; b backspace U+0008 + %x66 / ; f form feed U+000C + %x6E / ; n line feed U+000A + %x72 / ; r carriage return U+000D + %x74 / ; t tab U+0009 + %x75 4HEXDIG ) ; uXXXX U+XXXX + + escape = %x5C ; \ + + quotation-mark = %x22 ; " + + unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + + + +Bray Standards Track [Page 8] + +RFC 7159 JSON March 2014 + + +8. String and Character Issues + +8.1. Character Encoding + + JSON text SHALL be encoded in UTF-8, UTF-16, or UTF-32. The default + encoding is UTF-8, and JSON texts that are encoded in UTF-8 are + interoperable in the sense that they will be read successfully by the + maximum number of implementations; there are many implementations + that cannot successfully read texts in other encodings (such as + UTF-16 and UTF-32). + + Implementations MUST NOT add a byte order mark to the beginning of a + JSON text. In the interests of interoperability, implementations + that parse JSON texts MAY ignore the presence of a byte order mark + rather than treating it as an error. + +8.2. Unicode Characters + + When all the strings represented in a JSON text are composed entirely + of Unicode characters [UNICODE] (however escaped), then that JSON + text is interoperable in the sense that all software implementations + that parse it will agree on the contents of names and of string + values in objects and arrays. + + However, the ABNF in this specification allows member names and + string values to contain bit sequences that cannot encode Unicode + characters; for example, "\uDEAD" (a single unpaired UTF-16 + surrogate). Instances of this have been observed, for example, when + a library truncates a UTF-16 string without checking whether the + truncation split a surrogate pair. The behavior of software that + receives JSON texts containing such values is unpredictable; for + example, implementations might return different values for the length + of a string value or even suffer fatal runtime exceptions. + +8.3. String Comparison + + Software implementations are typically required to test names of + object members for equality. Implementations that transform the + textual representation into sequences of Unicode code units and then + perform the comparison numerically, code unit by code unit, are + interoperable in the sense that implementations will agree in all + cases on equality or inequality of two strings. For example, + implementations that compare strings with escaped characters + unconverted may incorrectly find that "a\\b" and "a\u005Cb" are not + equal. + + + + + + +Bray Standards Track [Page 9] + +RFC 7159 JSON March 2014 + + +9. Parsers + + A JSON parser transforms a JSON text into another representation. A + JSON parser MUST accept all texts that conform to the JSON grammar. + A JSON parser MAY accept non-JSON forms or extensions. + + An implementation may set limits on the size of texts that it + accepts. An implementation may set limits on the maximum depth of + nesting. An implementation may set limits on the range and precision + of numbers. An implementation may set limits on the length and + character contents of strings. + +10. Generators + + A JSON generator produces JSON text. The resulting text MUST + strictly conform to the JSON grammar. + +11. IANA Considerations + + The MIME media type for JSON text is application/json. + + Type name: application + + Subtype name: json + + Required parameters: n/a + + Optional parameters: n/a + + Encoding considerations: binary + + Security considerations: See [RFC7159], Section 12. + + Interoperability considerations: Described in [RFC7159] + + Published specification: [RFC7159] + + Applications that use this media type: + JSON has been used to exchange data between applications written + in all of these programming languages: ActionScript, C, C#, + Clojure, ColdFusion, Common Lisp, E, Erlang, Go, Java, JavaScript, + Lua, Objective CAML, Perl, PHP, Python, Rebol, Ruby, Scala, and + Scheme. + + + + + + + + +Bray Standards Track [Page 10] + +RFC 7159 JSON March 2014 + + + Additional information: + Magic number(s): n/a + File extension(s): .json + Macintosh file type code(s): TEXT + + Person & email address to contact for further information: + IESG + + + Intended usage: COMMON + + Restrictions on usage: none + + Author: + Douglas Crockford + + + Change controller: + IESG + + + Note: No "charset" parameter is defined for this registration. + Adding one really has no effect on compliant recipients. + +12. Security Considerations + + Generally, there are security issues with scripting languages. JSON + is a subset of JavaScript but excludes assignment and invocation. + + Since JSON's syntax is borrowed from JavaScript, it is possible to + use that language's "eval()" function to parse JSON texts. This + generally constitutes an unacceptable security risk, since the text + could contain executable code along with data declarations. The same + consideration applies to the use of eval()-like functions in any + other programming language in which JSON texts conform to that + language's syntax. + + + + + + + + + + + + + + + +Bray Standards Track [Page 11] + +RFC 7159 JSON March 2014 + + +13. Examples + + This is a JSON object: + + { + "Image": { + "Width": 800, + "Height": 600, + "Title": "View from 15th Floor", + "Thumbnail": { + "Url": "http://www.example.com/image/481989943", + "Height": 125, + "Width": 100 + }, + "Animated" : false, + "IDs": [116, 943, 234, 38793] + } + } + + Its Image member is an object whose Thumbnail member is an object and + whose IDs member is an array of numbers. + + This is a JSON array containing two objects: + + [ + { + "precision": "zip", + "Latitude": 37.7668, + "Longitude": -122.3959, + "Address": "", + "City": "SAN FRANCISCO", + "State": "CA", + "Zip": "94107", + "Country": "US" + }, + { + "precision": "zip", + "Latitude": 37.371991, + "Longitude": -122.026020, + "Address": "", + "City": "SUNNYVALE", + "State": "CA", + "Zip": "94085", + "Country": "US" + } + ] + + + + + +Bray Standards Track [Page 12] + +RFC 7159 JSON March 2014 + + + Here are three small JSON texts containing only values: + + "Hello world!" + + 42 + + true + +14. Contributors + + RFC 4627 was written by Douglas Crockford. This document was + constructed by making a relatively small number of changes to that + document; thus, the vast majority of the text here is his. + +15. References + +15.1. Normative References + + [IEEE754] IEEE, "IEEE Standard for Floating-Point Arithmetic", IEEE + Standard 754, August 2008, + . + + [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate + Requirement Levels", BCP 14, RFC 2119, March 1997. + + [RFC5234] Crocker, D. and P. Overell, "Augmented BNF for Syntax + Specifications: ABNF", STD 68, RFC 5234, January 2008. + + [UNICODE] The Unicode Consortium, "The Unicode Standard", + . + +15.2. Informative References + + [ECMA-262] Ecma International, "ECMAScript Language Specification + Edition 5.1", Standard ECMA-262, June 2011, + . + + [ECMA-404] Ecma International, "The JSON Data Interchange Format", + Standard ECMA-404, October 2013, + . + + [Err3607] RFC Errata, Errata ID 3607, RFC 3607, + . + + + + + + +Bray Standards Track [Page 13] + +RFC 7159 JSON March 2014 + + + [Err607] RFC Errata, Errata ID 607, RFC 607, + . + + [RFC4627] Crockford, D., "The application/json Media Type for + JavaScript Object Notation (JSON)", RFC 4627, July 2006. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 14] + +RFC 7159 JSON March 2014 + + +Appendix A. Changes from RFC 4627 + + This section lists changes between this document and the text in RFC + 4627. + + o Changed the title and abstract of the document. + + o Changed the reference to [UNICODE] to be not version specific. + + o Added a "Specifications of JSON" section. + + o Added an "Introduction to This Revision" section. + + o Changed the definition of "JSON text" so that it can be any JSON + value, removing the constraint that it be an object or array. + + o Added language about duplicate object member names, member + ordering, and interoperability. + + o Clarified the absence of a requirement that values in an array be + of the same JSON type. + + o Applied erratum #607 from RFC 4627 to correctly align the artwork + for the definition of "object". + + o Changed "as sequences of digits" to "in the grammar below" in the + "Numbers" section, and made base-10-ness explicit. + + o Added language about number interoperability as a function of + IEEE754, and added an IEEE754 reference. + + o Added language about interoperability and Unicode characters and + about string comparisons. To do this, turned the old "Encoding" + section into a "String and Character Issues" section, with three + subsections: "Character Encoding", "Unicode Characters", and + "String Comparison". + + o Changed guidance in the "Parsers" section to point out that + implementations may set limits on the range "and precision" of + numbers. + + o Updated and tidied the "IANA Considerations" section. + + o Made a real "Security Considerations" section and lifted the text + out of the previous "IANA Considerations" section. + + + + + + +Bray Standards Track [Page 15] + +RFC 7159 JSON March 2014 + + + o Applied erratum #3607 from RFC 4627 by removing the security + consideration that begins "A JSON text can be safely passed" and + the JavaScript code that went with that consideration. + + o Added a note to the "Security Considerations" section pointing out + the risks of using the "eval()" function in JavaScript or any + other language in which JSON texts conform to that language's + syntax. + + o Added a note to the "IANA Considerations" clarifying the absence + of a "charset" parameter for the application/json media type. + + o Changed "100" to 100 and added a boolean field, both in the first + example. + + o Added examples of JSON texts with simple values, neither objects + nor arrays. + + o Added a "Contributors" section crediting Douglas Crockford. + + o Added a reference to RFC 4627. + + o Moved the ECMAScript reference from Normative to Informative and + updated it to reference ECMAScript 5.1, and added a reference to + ECMA 404. + +Author's Address + + Tim Bray (editor) + Google, Inc. + + EMail: tbray@textuality.com + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 16] + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159.txt b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159.txt new file mode 100644 index 0000000..64fcada --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/references/rfc7159.txt @@ -0,0 +1,899 @@ + + + + + + +Internet Engineering Task Force (IETF) T. Bray, Ed. +Request for Comments: 7159 Google, Inc. +Obsoletes: 4627, 7158 March 2014 +Category: Standards Track +ISSN: 2070-1721 + + + The JavaScript Object Notation (JSON) Data Interchange Format + +Abstract + + JavaScript Object Notation (JSON) is a lightweight, text-based, + language-independent data interchange format. It was derived from + the ECMAScript Programming Language Standard. JSON defines a small + set of formatting rules for the portable representation of structured + data. + + This document removes inconsistencies with other specifications of + JSON, repairs specification errors, and offers experience-based + interoperability guidance. + +Status of This Memo + + This is an Internet Standards Track document. + + This document is a product of the Internet Engineering Task Force + (IETF). It represents the consensus of the IETF community. It has + received public review and has been approved for publication by the + Internet Engineering Steering Group (IESG). Further information on + Internet Standards is available in Section 2 of RFC 5741. + + Information about the current status of this document, any errata, + and how to provide feedback on it may be obtained at + http://www.rfc-editor.org/info/rfc7159. + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 1] + +RFC 7159 JSON March 2014 + + +Copyright Notice + + Copyright (c) 2014 IETF Trust and the persons identified as the + document authors. All rights reserved. + + This document is subject to BCP 78 and the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info) in effect on the date of + publication of this document. Please review these documents + carefully, as they describe your rights and restrictions with respect + to this document. Code Components extracted from this document must + include Simplified BSD License text as described in Section 4.e of + the Trust Legal Provisions and are provided without warranty as + described in the Simplified BSD License. + + This document may contain material from IETF Documents or IETF + Contributions published or made publicly available before November + 10, 2008. The person(s) controlling the copyright in some of this + material may not have granted the IETF Trust the right to allow + modifications of such material outside the IETF Standards Process. + Without obtaining an adequate license from the person(s) controlling + the copyright in such materials, this document may not be modified + outside the IETF Standards Process, and derivative works of it may + not be created outside the IETF Standards Process, except to format + it for publication as an RFC or to translate it into languages other + than English. + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 2] + +RFC 7159 JSON March 2014 + + +Table of Contents + + 1. Introduction ....................................................3 + 1.1. Conventions Used in This Document ..........................4 + 1.2. Specifications of JSON .....................................4 + 1.3. Introduction to This Revision ..............................4 + 2. JSON Grammar ....................................................4 + 3. Values ..........................................................5 + 4. Objects .........................................................6 + 5. Arrays ..........................................................6 + 6. Numbers .........................................................6 + 7. Strings .........................................................8 + 8. String and Character Issues .....................................9 + 8.1. Character Encoding .........................................9 + 8.2. Unicode Characters .........................................9 + 8.3. String Comparison ..........................................9 + 9. Parsers ........................................................10 + 10. Generators ....................................................10 + 11. IANA Considerations ...........................................10 + 12. Security Considerations .......................................11 + 13. Examples ......................................................12 + 14. Contributors ..................................................13 + 15. References ....................................................13 + 15.1. Normative References .....................................13 + 15.2. Informative References ...................................13 + Appendix A. Changes from RFC 4627 .................................15 + +1. Introduction + + JavaScript Object Notation (JSON) is a text format for the + serialization of structured data. It is derived from the object + literals of JavaScript, as defined in the ECMAScript Programming + Language Standard, Third Edition [ECMA-262]. + + JSON can represent four primitive types (strings, numbers, booleans, + and null) and two structured types (objects and arrays). + + A string is a sequence of zero or more Unicode characters [UNICODE]. + Note that this citation references the latest version of Unicode + rather than a specific release. It is not expected that future + changes in the UNICODE specification will impact the syntax of JSON. + + An object is an unordered collection of zero or more name/value + pairs, where a name is a string and a value is a string, number, + boolean, null, object, or array. + + An array is an ordered sequence of zero or more values. + + + + +Bray Standards Track [Page 3] + +RFC 7159 JSON March 2014 + + + The terms "object" and "array" come from the conventions of + JavaScript. + + JSON's design goals were for it to be minimal, portable, textual, and + a subset of JavaScript. + +1.1. Conventions Used in This Document + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", + "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this + document are to be interpreted as described in [RFC2119]. + + The grammatical rules in this document are to be interpreted as + described in [RFC5234]. + +1.2. Specifications of JSON + + This document updates [RFC4627], which describes JSON and registers + the media type "application/json". + + A description of JSON in ECMAScript terms appears in Version 5.1 of + the ECMAScript specification [ECMA-262], Section 15.12. JSON is also + described in [ECMA-404]. + + All of the specifications of JSON syntax agree on the syntactic + elements of the language. + +1.3. Introduction to This Revision + + In the years since the publication of RFC 4627, JSON has found very + wide use. This experience has revealed certain patterns, which, + while allowed by its specifications, have caused interoperability + problems. + + Also, a small number of errata have been reported (see RFC Errata IDs + 607 [Err607] and 3607 [Err3607]). + + This document's goal is to apply the errata, remove inconsistencies + with other specifications of JSON, and highlight practices that can + lead to interoperability problems. + +2. JSON Grammar + + A JSON text is a sequence of tokens. The set of tokens includes six + structural characters, strings, numbers, and three literal names. + + A JSON text is a serialized value. Note that certain previous + specifications of JSON constrained a JSON text to be an object or an + + + +Bray Standards Track [Page 4] + +RFC 7159 JSON March 2014 + + + array. Implementations that generate only objects or arrays where a + JSON text is called for will be interoperable in the sense that all + implementations will accept these as conforming JSON texts. + + JSON-text = ws value ws + + These are the six structural characters: + + begin-array = ws %x5B ws ; [ left square bracket + + begin-object = ws %x7B ws ; { left curly bracket + + end-array = ws %x5D ws ; ] right square bracket + + end-object = ws %x7D ws ; } right curly bracket + + name-separator = ws %x3A ws ; : colon + + value-separator = ws %x2C ws ; , comma + + Insignificant whitespace is allowed before or after any of the six + structural characters. + + ws = *( + %x20 / ; Space + %x09 / ; Horizontal tab + %x0A / ; Line feed or New line + %x0D ) ; Carriage return + +3. Values + + A JSON value MUST be an object, array, number, or string, or one of + the following three literal names: + + false null true + + The literal names MUST be lowercase. No other literal names are + allowed. + + value = false / null / true / object / array / number / string + + false = %x66.61.6c.73.65 ; false + + null = %x6e.75.6c.6c ; null + + true = %x74.72.75.65 ; true + + + + + +Bray Standards Track [Page 5] + +RFC 7159 JSON March 2014 + + +4. Objects + + An object structure is represented as a pair of curly brackets + surrounding zero or more name/value pairs (or members). A name is a + string. A single colon comes after each name, separating the name + from the value. A single comma separates a value from a following + name. The names within an object SHOULD be unique. + + object = begin-object [ member *( value-separator member ) ] + end-object + + member = string name-separator value + + An object whose names are all unique is interoperable in the sense + that all software implementations receiving that object will agree on + the name-value mappings. When the names within an object are not + unique, the behavior of software that receives such an object is + unpredictable. Many implementations report the last name/value pair + only. Other implementations report an error or fail to parse the + object, and some implementations report all of the name/value pairs, + including duplicates. + + JSON parsing libraries have been observed to differ as to whether or + not they make the ordering of object members visible to calling + software. Implementations whose behavior does not depend on member + ordering will be interoperable in the sense that they will not be + affected by these differences. + +5. Arrays + + An array structure is represented as square brackets surrounding zero + or more values (or elements). Elements are separated by commas. + + array = begin-array [ value *( value-separator value ) ] end-array + + There is no requirement that the values in an array be of the same + type. + +6. Numbers + + The representation of numbers is similar to that used in most + programming languages. A number is represented in base 10 using + decimal digits. It contains an integer component that may be + prefixed with an optional minus sign, which may be followed by a + fraction part and/or an exponent part. Leading zeros are not + allowed. + + A fraction part is a decimal point followed by one or more digits. + + + +Bray Standards Track [Page 6] + +RFC 7159 JSON March 2014 + + + An exponent part begins with the letter E in upper or lower case, + which may be followed by a plus or minus sign. The E and optional + sign are followed by one or more digits. + + Numeric values that cannot be represented in the grammar below (such + as Infinity and NaN) are not permitted. + + number = [ minus ] int [ frac ] [ exp ] + + decimal-point = %x2E ; . + + digit1-9 = %x31-39 ; 1-9 + + e = %x65 / %x45 ; e E + + exp = e [ minus / plus ] 1*DIGIT + + frac = decimal-point 1*DIGIT + + int = zero / ( digit1-9 *DIGIT ) + + minus = %x2D ; - + + plus = %x2B ; + + + zero = %x30 ; 0 + + This specification allows implementations to set limits on the range + and precision of numbers accepted. Since software that implements + IEEE 754-2008 binary64 (double precision) numbers [IEEE754] is + generally available and widely used, good interoperability can be + achieved by implementations that expect no more precision or range + than these provide, in the sense that implementations will + approximate JSON numbers within the expected precision. A JSON + number such as 1E400 or 3.141592653589793238462643383279 may indicate + potential interoperability problems, since it suggests that the + software that created it expects receiving software to have greater + capabilities for numeric magnitude and precision than is widely + available. + + Note that when such software is used, numbers that are integers and + are in the range [-(2**53)+1, (2**53)-1] are interoperable in the + sense that implementations will agree exactly on their numeric + values. + + + + + + + +Bray Standards Track [Page 7] + +RFC 7159 JSON March 2014 + + +7. Strings + + The representation of strings is similar to conventions used in the C + family of programming languages. A string begins and ends with + quotation marks. All Unicode characters may be placed within the + quotation marks, except for the characters that must be escaped: + quotation mark, reverse solidus, and the control characters (U+0000 + through U+001F). + + Any character may be escaped. If the character is in the Basic + Multilingual Plane (U+0000 through U+FFFF), then it may be + represented as a six-character sequence: a reverse solidus, followed + by the lowercase letter u, followed by four hexadecimal digits that + encode the character's code point. The hexadecimal letters A though + F can be upper or lower case. So, for example, a string containing + only a single reverse solidus character may be represented as + "\u005C". + + Alternatively, there are two-character sequence escape + representations of some popular characters. So, for example, a + string containing only a single reverse solidus character may be + represented more compactly as "\\". + + To escape an extended character that is not in the Basic Multilingual + Plane, the character is represented as a 12-character sequence, + encoding the UTF-16 surrogate pair. So, for example, a string + containing only the G clef character (U+1D11E) may be represented as + "\uD834\uDD1E". + + string = quotation-mark *char quotation-mark + + char = unescaped / + escape ( + %x22 / ; " quotation mark U+0022 + %x5C / ; \ reverse solidus U+005C + %x2F / ; / solidus U+002F + %x62 / ; b backspace U+0008 + %x66 / ; f form feed U+000C + %x6E / ; n line feed U+000A + %x72 / ; r carriage return U+000D + %x74 / ; t tab U+0009 + %x75 4HEXDIG ) ; uXXXX U+XXXX + + escape = %x5C ; \ + + quotation-mark = %x22 ; " + + unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + + + +Bray Standards Track [Page 8] + +RFC 7159 JSON March 2014 + + +8. String and Character Issues + +8.1. Character Encoding + + JSON text SHALL be encoded in UTF-8, UTF-16, or UTF-32. The default + encoding is UTF-8, and JSON texts that are encoded in UTF-8 are + interoperable in the sense that they will be read successfully by the + maximum number of implementations; there are many implementations + that cannot successfully read texts in other encodings (such as + UTF-16 and UTF-32). + + Implementations MUST NOT add a byte order mark to the beginning of a + JSON text. In the interests of interoperability, implementations + that parse JSON texts MAY ignore the presence of a byte order mark + rather than treating it as an error. + +8.2. Unicode Characters + + When all the strings represented in a JSON text are composed entirely + of Unicode characters [UNICODE] (however escaped), then that JSON + text is interoperable in the sense that all software implementations + that parse it will agree on the contents of names and of string + values in objects and arrays. + + However, the ABNF in this specification allows member names and + string values to contain bit sequences that cannot encode Unicode + characters; for example, "\uDEAD" (a single unpaired UTF-16 + surrogate). Instances of this have been observed, for example, when + a library truncates a UTF-16 string without checking whether the + truncation split a surrogate pair. The behavior of software that + receives JSON texts containing such values is unpredictable; for + example, implementations might return different values for the length + of a string value or even suffer fatal runtime exceptions. + +8.3. String Comparison + + Software implementations are typically required to test names of + object members for equality. Implementations that transform the + textual representation into sequences of Unicode code units and then + perform the comparison numerically, code unit by code unit, are + interoperable in the sense that implementations will agree in all + cases on equality or inequality of two strings. For example, + implementations that compare strings with escaped characters + unconverted may incorrectly find that "a\\b" and "a\u005Cb" are not + equal. + + + + + + +Bray Standards Track [Page 9] + +RFC 7159 JSON March 2014 + + +9. Parsers + + A JSON parser transforms a JSON text into another representation. A + JSON parser MUST accept all texts that conform to the JSON grammar. + A JSON parser MAY accept non-JSON forms or extensions. + + An implementation may set limits on the size of texts that it + accepts. An implementation may set limits on the maximum depth of + nesting. An implementation may set limits on the range and precision + of numbers. An implementation may set limits on the length and + character contents of strings. + +10. Generators + + A JSON generator produces JSON text. The resulting text MUST + strictly conform to the JSON grammar. + +11. IANA Considerations + + The MIME media type for JSON text is application/json. + + Type name: application + + Subtype name: json + + Required parameters: n/a + + Optional parameters: n/a + + Encoding considerations: binary + + Security considerations: See [RFC7159], Section 12. + + Interoperability considerations: Described in [RFC7159] + + Published specification: [RFC7159] + + Applications that use this media type: + JSON has been used to exchange data between applications written + in all of these programming languages: ActionScript, C, C#, + Clojure, ColdFusion, Common Lisp, E, Erlang, Go, Java, JavaScript, + Lua, Objective CAML, Perl, PHP, Python, Rebol, Ruby, Scala, and + Scheme. + + + + + + + + +Bray Standards Track [Page 10] + +RFC 7159 JSON March 2014 + + + Additional information: + Magic number(s): n/a + File extension(s): .json + Macintosh file type code(s): TEXT + + Person & email address to contact for further information: + IESG + + + Intended usage: COMMON + + Restrictions on usage: none + + Author: + Douglas Crockford + + + Change controller: + IESG + + + Note: No "charset" parameter is defined for this registration. + Adding one really has no effect on compliant recipients. + +12. Security Considerations + + Generally, there are security issues with scripting languages. JSON + is a subset of JavaScript but excludes assignment and invocation. + + Since JSON's syntax is borrowed from JavaScript, it is possible to + use that language's "eval()" function to parse JSON texts. This + generally constitutes an unacceptable security risk, since the text + could contain executable code along with data declarations. The same + consideration applies to the use of eval()-like functions in any + other programming language in which JSON texts conform to that + language's syntax. + + + + + + + + + + + + + + + +Bray Standards Track [Page 11] + +RFC 7159 JSON March 2014 + + +13. Examples + + This is a JSON object: + + { + "Image": { + "Width": 800, + "Height": 600, + "Title": "View from 15th Floor", + "Thumbnail": { + "Url": "http://www.example.com/image/481989943", + "Height": 125, + "Width": 100 + }, + "Animated" : false, + "IDs": [116, 943, 234, 38793] + } + } + + Its Image member is an object whose Thumbnail member is an object and + whose IDs member is an array of numbers. + + This is a JSON array containing two objects: + + [ + { + "precision": "zip", + "Latitude": 37.7668, + "Longitude": -122.3959, + "Address": "", + "City": "SAN FRANCISCO", + "State": "CA", + "Zip": "94107", + "Country": "US" + }, + { + "precision": "zip", + "Latitude": 37.371991, + "Longitude": -122.026020, + "Address": "", + "City": "SUNNYVALE", + "State": "CA", + "Zip": "94085", + "Country": "US" + } + ] + + + + + +Bray Standards Track [Page 12] + +RFC 7159 JSON March 2014 + + + Here are three small JSON texts containing only values: + + "Hello world!" + + 42 + + true + +14. Contributors + + RFC 4627 was written by Douglas Crockford. This document was + constructed by making a relatively small number of changes to that + document; thus, the vast majority of the text here is his. + +15. References + +15.1. Normative References + + [IEEE754] IEEE, "IEEE Standard for Floating-Point Arithmetic", IEEE + Standard 754, August 2008, + . + + [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate + Requirement Levels", BCP 14, RFC 2119, March 1997. + + [RFC5234] Crocker, D. and P. Overell, "Augmented BNF for Syntax + Specifications: ABNF", STD 68, RFC 5234, January 2008. + + [UNICODE] The Unicode Consortium, "The Unicode Standard", + . + +15.2. Informative References + + [ECMA-262] Ecma International, "ECMAScript Language Specification + Edition 5.1", Standard ECMA-262, June 2011, + . + + [ECMA-404] Ecma International, "The JSON Data Interchange Format", + Standard ECMA-404, October 2013, + . + + [Err3607] RFC Errata, Errata ID 3607, RFC 3607, + . + + + + + + +Bray Standards Track [Page 13] + +RFC 7159 JSON March 2014 + + + [Err607] RFC Errata, Errata ID 607, RFC 607, + . + + [RFC4627] Crockford, D., "The application/json Media Type for + JavaScript Object Notation (JSON)", RFC 4627, July 2006. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 14] + +RFC 7159 JSON March 2014 + + +Appendix A. Changes from RFC 4627 + + This section lists changes between this document and the text in RFC + 4627. + + o Changed the title and abstract of the document. + + o Changed the reference to [UNICODE] to be not version specific. + + o Added a "Specifications of JSON" section. + + o Added an "Introduction to This Revision" section. + + o Changed the definition of "JSON text" so that it can be any JSON + value, removing the constraint that it be an object or array. + + o Added language about duplicate object member names, member + ordering, and interoperability. + + o Clarified the absence of a requirement that values in an array be + of the same JSON type. + + o Applied erratum #607 from RFC 4627 to correctly align the artwork + for the definition of "object". + + o Changed "as sequences of digits" to "in the grammar below" in the + "Numbers" section, and made base-10-ness explicit. + + o Added language about number interoperability as a function of + IEEE754, and added an IEEE754 reference. + + o Added language about interoperability and Unicode characters and + about string comparisons. To do this, turned the old "Encoding" + section into a "String and Character Issues" section, with three + subsections: "Character Encoding", "Unicode Characters", and + "String Comparison". + + o Changed guidance in the "Parsers" section to point out that + implementations may set limits on the range "and precision" of + numbers. + + o Updated and tidied the "IANA Considerations" section. + + o Made a real "Security Considerations" section and lifted the text + out of the previous "IANA Considerations" section. + + + + + + +Bray Standards Track [Page 15] + +RFC 7159 JSON March 2014 + + + o Applied erratum #3607 from RFC 4627 by removing the security + consideration that begins "A JSON text can be safely passed" and + the JavaScript code that went with that consideration. + + o Added a note to the "Security Considerations" section pointing out + the risks of using the "eval()" function in JavaScript or any + other language in which JSON texts conform to that language's + syntax. + + o Added a note to the "IANA Considerations" clarifying the absence + of a "charset" parameter for the application/json media type. + + o Changed "100" to 100 and added a boolean field, both in the first + example. + + o Added examples of JSON texts with simple values, neither objects + nor arrays. + + o Added a "Contributors" section crediting Douglas Crockford. + + o Added a reference to RFC 4627. + + o Moved the ECMAScript reference from Normative to Informative and + updated it to reference ECMAScript 5.1, and added a reference to + ECMA 404. + +Author's Address + + Tim Bray (editor) + Google, Inc. + + EMail: tbray@textuality.com + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 16] + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10 2.json new file mode 100644 index 0000000..5d8c004 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10 2.json @@ -0,0 +1 @@ +{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10.json new file mode 100644 index 0000000..5d8c004 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail10.json @@ -0,0 +1 @@ +{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11 2.json new file mode 100644 index 0000000..76eb95b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11 2.json @@ -0,0 +1 @@ +{"Illegal expression": 1 + 2} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11.json new file mode 100644 index 0000000..76eb95b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail11.json @@ -0,0 +1 @@ +{"Illegal expression": 1 + 2} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12 2.json new file mode 100644 index 0000000..77580a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12 2.json @@ -0,0 +1 @@ +{"Illegal invocation": alert()} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12.json new file mode 100644 index 0000000..77580a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail12.json @@ -0,0 +1 @@ +{"Illegal invocation": alert()} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13 2.json new file mode 100644 index 0000000..379406b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13 2.json @@ -0,0 +1 @@ +{"Numbers cannot have leading zeroes": 013} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13.json new file mode 100644 index 0000000..379406b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail13.json @@ -0,0 +1 @@ +{"Numbers cannot have leading zeroes": 013} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14 2.json new file mode 100644 index 0000000..0ed366b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14 2.json @@ -0,0 +1 @@ +{"Numbers cannot be hex": 0x14} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14.json new file mode 100644 index 0000000..0ed366b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail14.json @@ -0,0 +1 @@ +{"Numbers cannot be hex": 0x14} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18 2.json new file mode 100644 index 0000000..ebc11eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18 2.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18.json new file mode 100644 index 0000000..ebc11eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail18.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19 2.json new file mode 100644 index 0000000..3b9c46f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19 2.json @@ -0,0 +1 @@ +{"Missing colon" null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19.json new file mode 100644 index 0000000..3b9c46f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail19.json @@ -0,0 +1 @@ +{"Missing colon" null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2 2.json new file mode 100644 index 0000000..6b7c11e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2 2.json @@ -0,0 +1 @@ +["Unclosed array" \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2.json new file mode 100644 index 0000000..6b7c11e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail2.json @@ -0,0 +1 @@ +["Unclosed array" \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20 2.json new file mode 100644 index 0000000..27c1af3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20 2.json @@ -0,0 +1 @@ +{"Double colon":: null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20.json new file mode 100644 index 0000000..27c1af3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail20.json @@ -0,0 +1 @@ +{"Double colon":: null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21 2.json new file mode 100644 index 0000000..6247457 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21 2.json @@ -0,0 +1 @@ +{"Comma instead of colon", null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21.json new file mode 100644 index 0000000..6247457 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail21.json @@ -0,0 +1 @@ +{"Comma instead of colon", null} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22 2.json new file mode 100644 index 0000000..a775258 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22 2.json @@ -0,0 +1 @@ +["Colon instead of comma": false] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22.json new file mode 100644 index 0000000..a775258 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail22.json @@ -0,0 +1 @@ +["Colon instead of comma": false] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23 2.json new file mode 100644 index 0000000..494add1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23 2.json @@ -0,0 +1 @@ +["Bad value", truth] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23.json new file mode 100644 index 0000000..494add1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail23.json @@ -0,0 +1 @@ +["Bad value", truth] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24 2.json new file mode 100644 index 0000000..caff239 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24 2.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24.json new file mode 100644 index 0000000..caff239 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail24.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25 2.json new file mode 100644 index 0000000..2dfbd25 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25 2.json @@ -0,0 +1 @@ +["tab character in string "] diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25.json new file mode 100644 index 0000000..2dfbd25 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail25.json @@ -0,0 +1 @@ +["tab character in string "] diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27 2.json new file mode 100644 index 0000000..6b01a2c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27 2.json @@ -0,0 +1,2 @@ +["line +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27.json new file mode 100644 index 0000000..6b01a2c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail27.json @@ -0,0 +1,2 @@ +["line +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28 2.json new file mode 100644 index 0000000..621a010 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28 2.json @@ -0,0 +1,2 @@ +["line\ +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28.json new file mode 100644 index 0000000..621a010 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail28.json @@ -0,0 +1,2 @@ +["line\ +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3 2.json new file mode 100644 index 0000000..168c81e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3 2.json @@ -0,0 +1 @@ +{unquoted_key: "keys must be quoted"} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3.json new file mode 100644 index 0000000..168c81e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail3.json @@ -0,0 +1 @@ +{unquoted_key: "keys must be quoted"} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4 2.json new file mode 100644 index 0000000..9de168b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4 2.json @@ -0,0 +1 @@ +["extra comma",] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4.json new file mode 100644 index 0000000..9de168b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail4.json @@ -0,0 +1 @@ +["extra comma",] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5 2.json new file mode 100644 index 0000000..ddf3ce3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5 2.json @@ -0,0 +1 @@ +["double extra comma",,] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5.json new file mode 100644 index 0000000..ddf3ce3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail5.json @@ -0,0 +1 @@ +["double extra comma",,] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6 2.json new file mode 100644 index 0000000..ed91580 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6 2.json @@ -0,0 +1 @@ +[ , "<-- missing value"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6.json new file mode 100644 index 0000000..ed91580 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail6.json @@ -0,0 +1 @@ +[ , "<-- missing value"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7 2.json new file mode 100644 index 0000000..8a96af3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7 2.json @@ -0,0 +1 @@ +["Comma after the close"], \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7.json new file mode 100644 index 0000000..8a96af3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail7.json @@ -0,0 +1 @@ +["Comma after the close"], \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8 2.json new file mode 100644 index 0000000..b28479c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8 2.json @@ -0,0 +1 @@ +["Extra close"]] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8.json new file mode 100644 index 0000000..b28479c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail8.json @@ -0,0 +1 @@ +["Extra close"]] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9 2.json new file mode 100644 index 0000000..5815574 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9 2.json @@ -0,0 +1 @@ +{"Extra comma": true,} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9.json new file mode 100644 index 0000000..5815574 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/fail9.json @@ -0,0 +1 @@ +{"Extra comma": true,} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1 2.json new file mode 100644 index 0000000..98b77de --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1 2.json @@ -0,0 +1 @@ +"A JSON payload should be an object or array, not a string." diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json new file mode 100644 index 0000000..98b77de --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json @@ -0,0 +1 @@ +"A JSON payload should be an object or array, not a string." diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1 2.json new file mode 100644 index 0000000..7828fcc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1 2.json @@ -0,0 +1,56 @@ +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E666, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ], + "compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066 + + +,"rosebud"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1.json new file mode 100644 index 0000000..7828fcc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass1.json @@ -0,0 +1,56 @@ +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E666, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ], + "compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066 + + +,"rosebud"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15 2.json new file mode 100644 index 0000000..fc8376b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15 2.json @@ -0,0 +1 @@ +["Illegal backslash escape: \x15"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15.json new file mode 100644 index 0000000..fc8376b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass15.json @@ -0,0 +1 @@ +["Illegal backslash escape: \x15"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16 2.json new file mode 100644 index 0000000..c43ae3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16 2.json @@ -0,0 +1 @@ +["Illegal backslash escape: \'"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16.json new file mode 100644 index 0000000..c43ae3c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass16.json @@ -0,0 +1 @@ +["Illegal backslash escape: \'"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17 2.json new file mode 100644 index 0000000..62b9214 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17 2.json @@ -0,0 +1 @@ +["Illegal backslash escape: \017"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17.json new file mode 100644 index 0000000..62b9214 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass17.json @@ -0,0 +1 @@ +["Illegal backslash escape: \017"] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2 2.json new file mode 100644 index 0000000..d3c63c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2 2.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2.json new file mode 100644 index 0000000..d3c63c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass2.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26 2.json new file mode 100644 index 0000000..845d26a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26 2.json @@ -0,0 +1 @@ +["tab\ character\ in\ string\ "] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26.json new file mode 100644 index 0000000..845d26a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass26.json @@ -0,0 +1 @@ +["tab\ character\ in\ string\ "] \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3 2.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3 2.json new file mode 100644 index 0000000..4528d51 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3 2.json @@ -0,0 +1,6 @@ +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3.json b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3.json new file mode 100644 index 0000000..4528d51 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/fixtures/pass3.json @@ -0,0 +1,6 @@ +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test 2.rb new file mode 100644 index 0000000..61625f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test 2.rb @@ -0,0 +1,203 @@ +#frozen_string_literal: false +require 'test_helper' +require 'json/add/core' +require 'json/add/complex' +require 'json/add/rational' +require 'json/add/bigdecimal' +require 'json/add/ostruct' +require 'json/add/set' +require 'date' + +class JSONAdditionTest < Test::Unit::TestCase + include JSON + + class A + def initialize(a) + @a = a + end + + attr_reader :a + + def ==(other) + a == other.a + end + + def self.json_create(object) + new(*object['args']) + end + + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class A2 < A + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class B + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => self.class.name, + }.to_json(*args) + end + end + + class C + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => 'JSONAdditionTest::Nix', + }.to_json(*args) + end + end + + def test_extended_json + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + end + + def test_extended_json_default + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_hash = parse(json) + assert_kind_of Hash, a_hash + end + + def test_extended_json_disabled + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + a_hash = parse(json, :create_additions => false) + assert_kind_of Hash, a_hash + assert_equal( + {"args"=>[666], "json_class"=>"JSONAdditionTest::A"}.sort_by { |k,| k }, + a_hash.sort_by { |k,| k } + ) + end + + def test_extended_json_fail1 + b = B.new + assert !B.json_creatable? + json = generate(b) + assert_equal({ "json_class"=>"JSONAdditionTest::B" }, parse(json)) + end + + def test_extended_json_fail2 + c = C.new + assert !C.json_creatable? + json = generate(c) + assert_raise(ArgumentError, NameError) { parse(json, :create_additions => true) } + end + + def test_raw_strings + raw = '' + raw.respond_to?(:encode!) and raw.encode!(Encoding::ASCII_8BIT) + raw_array = [] + for i in 0..255 + raw << i + raw_array << i + end + json = raw.to_json_raw + json_raw_object = raw.to_json_raw_object + hash = { 'json_class' => 'String', 'raw'=> raw_array } + assert_equal hash, json_raw_object + assert_match(/\A\{.*\}\z/, json) + assert_match(/"json_class":"String"/, json) + assert_match(/"raw":\[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255\]/, json) + raw_again = parse(json, :create_additions => true) + assert_equal raw, raw_again + end + + MyJsonStruct = Struct.new 'MyJsonStruct', :foo, :bar + + def test_core + t = Time.now + assert_equal t, JSON(JSON(t), :create_additions => true) + d = Date.today + assert_equal d, JSON(JSON(d), :create_additions => true) + d = DateTime.civil(2007, 6, 14, 14, 57, 10, Rational(1, 12), 2299161) + assert_equal d, JSON(JSON(d), :create_additions => true) + assert_equal 1..10, JSON(JSON(1..10), :create_additions => true) + assert_equal 1...10, JSON(JSON(1...10), :create_additions => true) + assert_equal "a".."c", JSON(JSON("a".."c"), :create_additions => true) + assert_equal "a"..."c", JSON(JSON("a"..."c"), :create_additions => true) + s = MyJsonStruct.new 4711, 'foot' + assert_equal s, JSON(JSON(s), :create_additions => true) + struct = Struct.new :foo, :bar + s = struct.new 4711, 'foot' + assert_raise(JSONError) { JSON(s) } + begin + raise TypeError, "test me" + rescue TypeError => e + e_json = JSON.generate e + e_again = JSON e_json, :create_additions => true + assert_kind_of TypeError, e_again + assert_equal e.message, e_again.message + assert_equal e.backtrace, e_again.backtrace + end + assert_equal(/foo/, JSON(JSON(/foo/), :create_additions => true)) + assert_equal(/foo/i, JSON(JSON(/foo/i), :create_additions => true)) + end + + def test_utc_datetime + now = Time.now + d = DateTime.parse(now.to_s, :create_additions => true) # usual case + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.parse(now.utc.to_s) # of = 0 + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(1,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(12,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + end + + def test_rational_complex + assert_equal Rational(2, 9), parse(JSON(Rational(2, 9)), :create_additions => true) + assert_equal Complex(2, 9), parse(JSON(Complex(2, 9)), :create_additions => true) + end + + def test_bigdecimal + assert_equal BigDecimal('3.141', 23), JSON(JSON(BigDecimal('3.141', 23)), :create_additions => true) + assert_equal BigDecimal('3.141', 666), JSON(JSON(BigDecimal('3.141', 666)), :create_additions => true) + end + + def test_ostruct + o = OpenStruct.new + # XXX this won't work; o.foo = { :bar => true } + o.foo = { 'bar' => true } + assert_equal o, parse(JSON(o), :create_additions => true) + end + + def test_set + s = Set.new([:a, :b, :c, :a]) + assert_equal s, JSON.parse(JSON(s), :create_additions => true) + ss = SortedSet.new([:d, :b, :a, :c]) + ss_again = JSON.parse(JSON(ss), :create_additions => true) + assert_kind_of ss.class, ss_again + assert_equal ss, ss_again + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test.rb new file mode 100644 index 0000000..61625f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_addition_test.rb @@ -0,0 +1,203 @@ +#frozen_string_literal: false +require 'test_helper' +require 'json/add/core' +require 'json/add/complex' +require 'json/add/rational' +require 'json/add/bigdecimal' +require 'json/add/ostruct' +require 'json/add/set' +require 'date' + +class JSONAdditionTest < Test::Unit::TestCase + include JSON + + class A + def initialize(a) + @a = a + end + + attr_reader :a + + def ==(other) + a == other.a + end + + def self.json_create(object) + new(*object['args']) + end + + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class A2 < A + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class B + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => self.class.name, + }.to_json(*args) + end + end + + class C + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => 'JSONAdditionTest::Nix', + }.to_json(*args) + end + end + + def test_extended_json + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + end + + def test_extended_json_default + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_hash = parse(json) + assert_kind_of Hash, a_hash + end + + def test_extended_json_disabled + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + a_hash = parse(json, :create_additions => false) + assert_kind_of Hash, a_hash + assert_equal( + {"args"=>[666], "json_class"=>"JSONAdditionTest::A"}.sort_by { |k,| k }, + a_hash.sort_by { |k,| k } + ) + end + + def test_extended_json_fail1 + b = B.new + assert !B.json_creatable? + json = generate(b) + assert_equal({ "json_class"=>"JSONAdditionTest::B" }, parse(json)) + end + + def test_extended_json_fail2 + c = C.new + assert !C.json_creatable? + json = generate(c) + assert_raise(ArgumentError, NameError) { parse(json, :create_additions => true) } + end + + def test_raw_strings + raw = '' + raw.respond_to?(:encode!) and raw.encode!(Encoding::ASCII_8BIT) + raw_array = [] + for i in 0..255 + raw << i + raw_array << i + end + json = raw.to_json_raw + json_raw_object = raw.to_json_raw_object + hash = { 'json_class' => 'String', 'raw'=> raw_array } + assert_equal hash, json_raw_object + assert_match(/\A\{.*\}\z/, json) + assert_match(/"json_class":"String"/, json) + assert_match(/"raw":\[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255\]/, json) + raw_again = parse(json, :create_additions => true) + assert_equal raw, raw_again + end + + MyJsonStruct = Struct.new 'MyJsonStruct', :foo, :bar + + def test_core + t = Time.now + assert_equal t, JSON(JSON(t), :create_additions => true) + d = Date.today + assert_equal d, JSON(JSON(d), :create_additions => true) + d = DateTime.civil(2007, 6, 14, 14, 57, 10, Rational(1, 12), 2299161) + assert_equal d, JSON(JSON(d), :create_additions => true) + assert_equal 1..10, JSON(JSON(1..10), :create_additions => true) + assert_equal 1...10, JSON(JSON(1...10), :create_additions => true) + assert_equal "a".."c", JSON(JSON("a".."c"), :create_additions => true) + assert_equal "a"..."c", JSON(JSON("a"..."c"), :create_additions => true) + s = MyJsonStruct.new 4711, 'foot' + assert_equal s, JSON(JSON(s), :create_additions => true) + struct = Struct.new :foo, :bar + s = struct.new 4711, 'foot' + assert_raise(JSONError) { JSON(s) } + begin + raise TypeError, "test me" + rescue TypeError => e + e_json = JSON.generate e + e_again = JSON e_json, :create_additions => true + assert_kind_of TypeError, e_again + assert_equal e.message, e_again.message + assert_equal e.backtrace, e_again.backtrace + end + assert_equal(/foo/, JSON(JSON(/foo/), :create_additions => true)) + assert_equal(/foo/i, JSON(JSON(/foo/i), :create_additions => true)) + end + + def test_utc_datetime + now = Time.now + d = DateTime.parse(now.to_s, :create_additions => true) # usual case + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.parse(now.utc.to_s) # of = 0 + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(1,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(12,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + end + + def test_rational_complex + assert_equal Rational(2, 9), parse(JSON(Rational(2, 9)), :create_additions => true) + assert_equal Complex(2, 9), parse(JSON(Complex(2, 9)), :create_additions => true) + end + + def test_bigdecimal + assert_equal BigDecimal('3.141', 23), JSON(JSON(BigDecimal('3.141', 23)), :create_additions => true) + assert_equal BigDecimal('3.141', 666), JSON(JSON(BigDecimal('3.141', 666)), :create_additions => true) + end + + def test_ostruct + o = OpenStruct.new + # XXX this won't work; o.foo = { :bar => true } + o.foo = { 'bar' => true } + assert_equal o, parse(JSON(o), :create_additions => true) + end + + def test_set + s = Set.new([:a, :b, :c, :a]) + assert_equal s, JSON.parse(JSON(s), :create_additions => true) + ss = SortedSet.new([:d, :b, :a, :c]) + ss_again = JSON.parse(JSON(ss), :create_additions => true) + assert_kind_of ss.class, ss_again + assert_equal ss, ss_again + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test 2.rb new file mode 100644 index 0000000..53f335e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test 2.rb @@ -0,0 +1,126 @@ +#frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' + +class JSONCommonInterfaceTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},'\ + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + end + + def test_index + assert_equal @json, JSON[@hash] + assert_equal @hash, JSON[@json] + end + + def test_parser + assert_match(/::Parser\z/, JSON.parser.name) + end + + def test_generator + assert_match(/::Generator\z/, JSON.generator.name) + end + + def test_state + assert_match(/::Generator::State\z/, JSON.state.name) + end + + def test_create_id + assert_equal 'json_class', JSON.create_id + JSON.create_id = 'foo_bar' + assert_equal 'foo_bar', JSON.create_id + ensure + JSON.create_id = 'json_class' + end + + def test_deep_const_get + assert_raise(ArgumentError) { JSON.deep_const_get('Nix::Da') } + assert_equal File::SEPARATOR, JSON.deep_const_get('File::SEPARATOR') + end + + def test_parse + assert_equal [ 1, 2, 3, ], JSON.parse('[ 1, 2, 3 ]') + end + + def test_parse_bang + assert_equal [ 1, Infinity, 3, ], JSON.parse!('[ 1, Infinity, 3 ]') + end + + def test_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_fast_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_pretty_generate + assert_equal "[\n 1,\n 2,\n 3\n]", JSON.pretty_generate([ 1, 2, 3 ]) + end + + def test_load + assert_equal @hash, JSON.load(@json) + tempfile = Tempfile.open('@json') + tempfile.write @json + tempfile.rewind + assert_equal @hash, JSON.load(tempfile) + stringio = StringIO.new(@json) + stringio.rewind + assert_equal @hash, JSON.load(stringio) + assert_equal nil, JSON.load(nil) + assert_equal nil, JSON.load('') + ensure + tempfile.close! + end + + def test_load_with_options + json = '{ "foo": NaN }' + assert JSON.load(json, nil, :allow_nan => true)['foo'].nan? + end + + def test_load_null + assert_equal nil, JSON.load(nil, nil, :allow_blank => true) + assert_raise(TypeError) { JSON.load(nil, nil, :allow_blank => false) } + assert_raise(JSON::ParserError) { JSON.load('', nil, :allow_blank => false) } + end + + def test_dump + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + assert_equal too_deep, dump(eval(too_deep)) + assert_kind_of String, Marshal.dump(eval(too_deep)) + assert_raise(ArgumentError) { dump(eval(too_deep), 100) } + assert_raise(ArgumentError) { Marshal.dump(eval(too_deep), 100) } + assert_equal too_deep, dump(eval(too_deep), 101) + assert_kind_of String, Marshal.dump(eval(too_deep), 101) + output = StringIO.new + dump(eval(too_deep), output) + assert_equal too_deep, output.string + output = StringIO.new + dump(eval(too_deep), output, 101) + assert_equal too_deep, output.string + end + + def test_dump_should_modify_defaults + max_nesting = JSON.dump_default_options[:max_nesting] + dump([], StringIO.new, 10) + assert_equal max_nesting, JSON.dump_default_options[:max_nesting] + end + + def test_JSON + assert_equal @json, JSON(@hash) + assert_equal @hash, JSON(@json) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test.rb new file mode 100644 index 0000000..53f335e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_common_interface_test.rb @@ -0,0 +1,126 @@ +#frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' + +class JSONCommonInterfaceTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},'\ + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + end + + def test_index + assert_equal @json, JSON[@hash] + assert_equal @hash, JSON[@json] + end + + def test_parser + assert_match(/::Parser\z/, JSON.parser.name) + end + + def test_generator + assert_match(/::Generator\z/, JSON.generator.name) + end + + def test_state + assert_match(/::Generator::State\z/, JSON.state.name) + end + + def test_create_id + assert_equal 'json_class', JSON.create_id + JSON.create_id = 'foo_bar' + assert_equal 'foo_bar', JSON.create_id + ensure + JSON.create_id = 'json_class' + end + + def test_deep_const_get + assert_raise(ArgumentError) { JSON.deep_const_get('Nix::Da') } + assert_equal File::SEPARATOR, JSON.deep_const_get('File::SEPARATOR') + end + + def test_parse + assert_equal [ 1, 2, 3, ], JSON.parse('[ 1, 2, 3 ]') + end + + def test_parse_bang + assert_equal [ 1, Infinity, 3, ], JSON.parse!('[ 1, Infinity, 3 ]') + end + + def test_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_fast_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_pretty_generate + assert_equal "[\n 1,\n 2,\n 3\n]", JSON.pretty_generate([ 1, 2, 3 ]) + end + + def test_load + assert_equal @hash, JSON.load(@json) + tempfile = Tempfile.open('@json') + tempfile.write @json + tempfile.rewind + assert_equal @hash, JSON.load(tempfile) + stringio = StringIO.new(@json) + stringio.rewind + assert_equal @hash, JSON.load(stringio) + assert_equal nil, JSON.load(nil) + assert_equal nil, JSON.load('') + ensure + tempfile.close! + end + + def test_load_with_options + json = '{ "foo": NaN }' + assert JSON.load(json, nil, :allow_nan => true)['foo'].nan? + end + + def test_load_null + assert_equal nil, JSON.load(nil, nil, :allow_blank => true) + assert_raise(TypeError) { JSON.load(nil, nil, :allow_blank => false) } + assert_raise(JSON::ParserError) { JSON.load('', nil, :allow_blank => false) } + end + + def test_dump + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + assert_equal too_deep, dump(eval(too_deep)) + assert_kind_of String, Marshal.dump(eval(too_deep)) + assert_raise(ArgumentError) { dump(eval(too_deep), 100) } + assert_raise(ArgumentError) { Marshal.dump(eval(too_deep), 100) } + assert_equal too_deep, dump(eval(too_deep), 101) + assert_kind_of String, Marshal.dump(eval(too_deep), 101) + output = StringIO.new + dump(eval(too_deep), output) + assert_equal too_deep, output.string + output = StringIO.new + dump(eval(too_deep), output, 101) + assert_equal too_deep, output.string + end + + def test_dump_should_modify_defaults + max_nesting = JSON.dump_default_options[:max_nesting] + dump([], StringIO.new, 10) + assert_equal max_nesting, JSON.dump_default_options[:max_nesting] + end + + def test_JSON + assert_equal @json, JSON(@hash) + assert_equal @hash, JSON(@json) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test 2.rb new file mode 100644 index 0000000..cc7b715 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test 2.rb @@ -0,0 +1,107 @@ +# encoding: utf-8 +#frozen_string_literal: false +require 'test_helper' + +class JSONEncodingTest < Test::Unit::TestCase + include JSON + + def setup + @utf_8 = '"© ≠ €!"' + @ascii_8bit = @utf_8.dup.force_encoding('ascii-8bit') + @parsed = "© ≠ €!" + @generated = '"\u00a9 \u2260 \u20ac!"' + if String.method_defined?(:encode) + @utf_16_data = @parsed.encode('utf-16be', 'utf-8') + @utf_16be = @utf_8.encode('utf-16be', 'utf-8') + @utf_16le = @utf_8.encode('utf-16le', 'utf-8') + @utf_32be = @utf_8.encode('utf-32be', 'utf-8') + @utf_32le = @utf_8.encode('utf-32le', 'utf-8') + else + require 'iconv' + @utf_16_data, = Iconv.iconv('utf-16be', 'utf-8', @parsed) + @utf_16be, = Iconv.iconv('utf-16be', 'utf-8', @utf_8) + @utf_16le, = Iconv.iconv('utf-16le', 'utf-8', @utf_8) + @utf_32be, = Iconv.iconv('utf-32be', 'utf-8', @utf_8) + @utf_32le, = Iconv.iconv('utf-32le', 'utf-8', @utf_8) + end + end + + def test_parse + assert_equal @parsed, JSON.parse(@ascii_8bit) + assert_equal @parsed, JSON.parse(@utf_8) + assert_equal @parsed, JSON.parse(@utf_16be) + assert_equal @parsed, JSON.parse(@utf_16le) + assert_equal @parsed, JSON.parse(@utf_32be) + assert_equal @parsed, JSON.parse(@utf_32le) + end + + def test_generate + assert_equal @generated, JSON.generate(@parsed, :ascii_only => true) + assert_equal @generated, JSON.generate(@utf_16_data, :ascii_only => true) + end + + def test_unicode + assert_equal '""', ''.to_json + assert_equal '"\\b"', "\b".to_json + assert_equal '"\u0001"', 0x1.chr.to_json + assert_equal '"\u001f"', 0x1f.chr.to_json + assert_equal '" "', ' '.to_json + assert_equal "\"#{0x7f.chr}\"", 0x7f.chr.to_json + utf8 = [ "© ≠ €! \01" ] + json = '["© ≠ €! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => false) + assert_equal utf8, parse(json) + json = '["\u00a9 \u2260 \u20ac! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + json = "[\"\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212\"]" + assert_equal utf8, parse(json) + assert_equal json, utf8.to_json(:ascii_only => false) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + assert_equal utf8, parse(json) + json = "[\"\\u3042\\u3044\\u3046\\u3048\\u304a\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ['საქართველო'] + json = '["საქართველო"]' + assert_equal json, utf8.to_json(:ascii_only => false) + json = "[\"\\u10e1\\u10d0\\u10e5\\u10d0\\u10e0\\u10d7\\u10d5\\u10d4\\u10da\\u10dd\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + assert_equal '["Ã"]', generate(["Ã"], :ascii_only => false) + assert_equal '["\\u00c3"]', generate(["Ã"], :ascii_only => true) + assert_equal ["€"], parse('["\u20ac"]') + utf8 = ["\xf0\xa0\x80\x81"] + json = "[\"\xf0\xa0\x80\x81\"]" + assert_equal json, generate(utf8, :ascii_only => false) + assert_equal utf8, parse(json) + json = '["\ud840\udc01"]' + assert_equal json, generate(utf8, :ascii_only => true) + assert_equal utf8, parse(json) + assert_raise(JSON::ParserError) { parse('"\u"') } + assert_raise(JSON::ParserError) { parse('"\ud800"') } + end + + def test_chars + (0..0x7f).each do |i| + json = '["\u%04x"]' % i + if RUBY_VERSION >= "1.9." + i = i.chr + end + assert_equal i, parse(json).first[0] + if i == ?\b + generated = generate(["" << i]) + assert '["\b"]' == generated || '["\10"]' == generated + elsif [?\n, ?\r, ?\t, ?\f].include?(i) + assert_equal '[' << ('' << i).dump << ']', generate(["" << i]) + elsif i.chr < 0x20.chr + assert_equal json, generate(["" << i]) + end + end + assert_raise(JSON::GeneratorError) do + generate(["\x80"], :ascii_only => true) + end + assert_equal "\302\200", parse('["\u0080"]').first + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test.rb new file mode 100644 index 0000000..cc7b715 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_encoding_test.rb @@ -0,0 +1,107 @@ +# encoding: utf-8 +#frozen_string_literal: false +require 'test_helper' + +class JSONEncodingTest < Test::Unit::TestCase + include JSON + + def setup + @utf_8 = '"© ≠ €!"' + @ascii_8bit = @utf_8.dup.force_encoding('ascii-8bit') + @parsed = "© ≠ €!" + @generated = '"\u00a9 \u2260 \u20ac!"' + if String.method_defined?(:encode) + @utf_16_data = @parsed.encode('utf-16be', 'utf-8') + @utf_16be = @utf_8.encode('utf-16be', 'utf-8') + @utf_16le = @utf_8.encode('utf-16le', 'utf-8') + @utf_32be = @utf_8.encode('utf-32be', 'utf-8') + @utf_32le = @utf_8.encode('utf-32le', 'utf-8') + else + require 'iconv' + @utf_16_data, = Iconv.iconv('utf-16be', 'utf-8', @parsed) + @utf_16be, = Iconv.iconv('utf-16be', 'utf-8', @utf_8) + @utf_16le, = Iconv.iconv('utf-16le', 'utf-8', @utf_8) + @utf_32be, = Iconv.iconv('utf-32be', 'utf-8', @utf_8) + @utf_32le, = Iconv.iconv('utf-32le', 'utf-8', @utf_8) + end + end + + def test_parse + assert_equal @parsed, JSON.parse(@ascii_8bit) + assert_equal @parsed, JSON.parse(@utf_8) + assert_equal @parsed, JSON.parse(@utf_16be) + assert_equal @parsed, JSON.parse(@utf_16le) + assert_equal @parsed, JSON.parse(@utf_32be) + assert_equal @parsed, JSON.parse(@utf_32le) + end + + def test_generate + assert_equal @generated, JSON.generate(@parsed, :ascii_only => true) + assert_equal @generated, JSON.generate(@utf_16_data, :ascii_only => true) + end + + def test_unicode + assert_equal '""', ''.to_json + assert_equal '"\\b"', "\b".to_json + assert_equal '"\u0001"', 0x1.chr.to_json + assert_equal '"\u001f"', 0x1f.chr.to_json + assert_equal '" "', ' '.to_json + assert_equal "\"#{0x7f.chr}\"", 0x7f.chr.to_json + utf8 = [ "© ≠ €! \01" ] + json = '["© ≠ €! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => false) + assert_equal utf8, parse(json) + json = '["\u00a9 \u2260 \u20ac! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + json = "[\"\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212\"]" + assert_equal utf8, parse(json) + assert_equal json, utf8.to_json(:ascii_only => false) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + assert_equal utf8, parse(json) + json = "[\"\\u3042\\u3044\\u3046\\u3048\\u304a\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ['საქართველო'] + json = '["საქართველო"]' + assert_equal json, utf8.to_json(:ascii_only => false) + json = "[\"\\u10e1\\u10d0\\u10e5\\u10d0\\u10e0\\u10d7\\u10d5\\u10d4\\u10da\\u10dd\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + assert_equal '["Ã"]', generate(["Ã"], :ascii_only => false) + assert_equal '["\\u00c3"]', generate(["Ã"], :ascii_only => true) + assert_equal ["€"], parse('["\u20ac"]') + utf8 = ["\xf0\xa0\x80\x81"] + json = "[\"\xf0\xa0\x80\x81\"]" + assert_equal json, generate(utf8, :ascii_only => false) + assert_equal utf8, parse(json) + json = '["\ud840\udc01"]' + assert_equal json, generate(utf8, :ascii_only => true) + assert_equal utf8, parse(json) + assert_raise(JSON::ParserError) { parse('"\u"') } + assert_raise(JSON::ParserError) { parse('"\ud800"') } + end + + def test_chars + (0..0x7f).each do |i| + json = '["\u%04x"]' % i + if RUBY_VERSION >= "1.9." + i = i.chr + end + assert_equal i, parse(json).first[0] + if i == ?\b + generated = generate(["" << i]) + assert '["\b"]' == generated || '["\10"]' == generated + elsif [?\n, ?\r, ?\t, ?\f].include?(i) + assert_equal '[' << ('' << i).dump << ']', generate(["" << i]) + elsif i.chr < 0x20.chr + assert_equal json, generate(["" << i]) + end + end + assert_raise(JSON::GeneratorError) do + generate(["\x80"], :ascii_only => true) + end + assert_equal "\302\200", parse('["\u0080"]').first + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test 2.rb new file mode 100644 index 0000000..c5a030e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test 2.rb @@ -0,0 +1,15 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONExtParserTest < Test::Unit::TestCase + if defined?(JSON::Ext::Parser) + def test_allocate + parser = JSON::Ext::Parser.new("{}") + assert_raise(TypeError, '[ruby-core:35079]') do + parser.__send__(:initialize, "{}") + end + parser = JSON::Ext::Parser.allocate + assert_raise(TypeError, '[ruby-core:35079]') { parser.source } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test.rb new file mode 100644 index 0000000..c5a030e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_ext_parser_test.rb @@ -0,0 +1,15 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONExtParserTest < Test::Unit::TestCase + if defined?(JSON::Ext::Parser) + def test_allocate + parser = JSON::Ext::Parser.new("{}") + assert_raise(TypeError, '[ruby-core:35079]') do + parser.__send__(:initialize, "{}") + end + parser = JSON::Ext::Parser.allocate + assert_raise(TypeError, '[ruby-core:35079]') { parser.source } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test 2.rb new file mode 100644 index 0000000..b2a1ec8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test 2.rb @@ -0,0 +1,37 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONFixturesTest < Test::Unit::TestCase + def setup + fixtures = File.join(File.dirname(__FILE__), 'fixtures/{fail,pass}*.json') + passed, failed = Dir[fixtures].partition { |f| f['pass'] } + @passed = passed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + @failed = failed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + end + + def test_passing + for name, source in @passed + begin + assert JSON.parse(source), + "Did not pass for fixture '#{name}': #{source.inspect}" + rescue => e + warn "\nCaught #{e.class}(#{e}) for fixture '#{name}': #{source.inspect}\n#{e.backtrace * "\n"}" + raise e + end + end + end + + def test_failing + for name, source in @failed + assert_raise(JSON::ParserError, JSON::NestingError, + "Did not fail for fixture '#{name}': #{source.inspect}") do + JSON.parse(source) + end + end + end + + def test_sanity + assert(@passed.size > 5) + assert(@failed.size > 20) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test.rb new file mode 100644 index 0000000..b2a1ec8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_fixtures_test.rb @@ -0,0 +1,37 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONFixturesTest < Test::Unit::TestCase + def setup + fixtures = File.join(File.dirname(__FILE__), 'fixtures/{fail,pass}*.json') + passed, failed = Dir[fixtures].partition { |f| f['pass'] } + @passed = passed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + @failed = failed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + end + + def test_passing + for name, source in @passed + begin + assert JSON.parse(source), + "Did not pass for fixture '#{name}': #{source.inspect}" + rescue => e + warn "\nCaught #{e.class}(#{e}) for fixture '#{name}': #{source.inspect}\n#{e.backtrace * "\n"}" + raise e + end + end + end + + def test_failing + for name, source in @failed + assert_raise(JSON::ParserError, JSON::NestingError, + "Did not fail for fixture '#{name}': #{source.inspect}") do + JSON.parse(source) + end + end + end + + def test_sanity + assert(@passed.size > 5) + assert(@failed.size > 20) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test 2.rb new file mode 100644 index 0000000..ee19fa5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test 2.rb @@ -0,0 +1,421 @@ +#!/usr/bin/env ruby +# encoding: utf-8 +# frozen_string_literal: false + +require 'test_helper' + +class JSONGeneratorTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json2 = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},' + + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + @json3 = <<'EOT'.chomp +{ + "a": 2, + "b": 3.141, + "c": "c", + "d": [ + 1, + "b", + 3.14 + ], + "e": { + "foo": "bar" + }, + "g": "\"\u0000\u001f", + "h": 1000.0, + "i": 0.001 +} +EOT + end + + def silence + v = $VERBOSE + $VERBOSE = nil + yield + ensure + $VERBOSE = v + end + + def test_remove_const_segv + return if RUBY_ENGINE == 'jruby' + stress = GC.stress + const = JSON::SAFE_STATE_PROTOTYPE.dup + + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, nil + end + + 10.times do |i| + assert_raise TypeError do + bignum_too_long_to_embed_as_string.to_json + end + end + ensure + GC.stress = stress + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, const + end + end if JSON.const_defined?("Ext") + + def test_generate + json = generate(@hash) + assert_equal(parse(@json2), parse(json)) + json = JSON[@hash] + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666) + end + + def test_generate_pretty + json = pretty_generate(@hash) + # hashes aren't (insertion) ordered on every ruby implementation + # assert_equal(@json3, json) + assert_equal(parse(@json3), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = pretty_generate({1=>2}) + assert_equal(<<'EOT'.chomp, json) +{ + "1": 2 +} +EOT + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', pretty_generate(666) + end + + def test_generate_custom + state = State.new(:space_before => " ", :space => " ", :indent => "", :object_nl => "\n", :array_nl => "") + json = generate({1=>{2=>3,4=>[5,6]}}, state) + assert_equal(<<'EOT'.chomp, json) +{ +"1" : { +"2" : 3, +"4" : [5,6] +} +} +EOT + end + + def test_fast_generate + json = fast_generate(@hash) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = fast_generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', fast_generate(666) + end + + def test_own_state + state = State.new + json = generate(@hash, state) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}, state) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666, state) + end + + def test_states + json = generate({1=>2}, nil) + assert_equal('{"1":2}', json) + s = JSON.state.new + assert s.check_circular? + assert s[:check_circular?] + h = { 1=>2 } + h[3] = h + assert_raise(JSON::NestingError) { generate(h) } + assert_raise(JSON::NestingError) { generate(h, s) } + s = JSON.state.new + a = [ 1, 2 ] + a << a + assert_raise(JSON::NestingError) { generate(a, s) } + assert s.check_circular? + assert s[:check_circular?] + end + + def test_pretty_state + state = PRETTY_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "\n", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => " ", + :max_nesting => 100, + :object_nl => "\n", + :space => " ", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_safe_state + state = SAFE_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 100, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_fast_state + state = FAST_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 0, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_allow_nan + assert_raise(GeneratorError) { generate([JSON::NaN]) } + assert_equal '[NaN]', generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::NaN]) } + assert_raise(GeneratorError) { pretty_generate([JSON::NaN]) } + assert_equal "[\n NaN\n]", pretty_generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::Infinity]) } + assert_equal '[Infinity]', generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::Infinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::Infinity]) } + assert_equal "[\n Infinity\n]", pretty_generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::MinusInfinity]) } + assert_equal '[-Infinity]', generate([JSON::MinusInfinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::MinusInfinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::MinusInfinity]) } + assert_equal "[\n -Infinity\n]", pretty_generate([JSON::MinusInfinity], :allow_nan => true) + end + + def test_depth + ary = []; ary << ary + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { generate(ary) } + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { JSON.pretty_generate(ary) } + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + s = JSON.state.new + assert_equal 0, s.depth + assert_raise(JSON::NestingError) { ary.to_json(s) } + assert_equal 100, s.depth + end + + def test_buffer_initial_length + s = JSON.state.new + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 0 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = -1 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 128 + assert_equal 128, s.buffer_initial_length + end + + def test_gc + if respond_to?(:assert_in_out_err) + assert_in_out_err(%w[-rjson --disable-gems], <<-EOS, [], []) + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + EOS + end + end if GC.respond_to?(:stress=) + + def test_configure_using_configure_and_merge + numbered_state = { + :indent => "1", + :space => '2', + :space_before => '3', + :object_nl => '4', + :array_nl => '5' + } + state1 = JSON.state.new + state1.merge(numbered_state) + assert_equal '1', state1.indent + assert_equal '2', state1.space + assert_equal '3', state1.space_before + assert_equal '4', state1.object_nl + assert_equal '5', state1.array_nl + state2 = JSON.state.new + state2.configure(numbered_state) + assert_equal '1', state2.indent + assert_equal '2', state2.space + assert_equal '3', state2.space_before + assert_equal '4', state2.object_nl + assert_equal '5', state2.array_nl + end + + def test_configure_hash_conversion + state = JSON.state.new + state.configure(:indent => '1') + assert_equal '1', state.indent + state = JSON.state.new + foo = 'foo' + assert_raise(TypeError) do + state.configure(foo) + end + def foo.to_h + { :indent => '2' } + end + state.configure(foo) + assert_equal '2', state.indent + end + + if defined?(JSON::Ext::Generator) + def test_broken_bignum # [ruby-core:38867] + pid = fork do + x = 1 << 64 + x.class.class_eval do + def to_s + end + end + begin + JSON::Ext::Generator::State.new.generate(x) + exit 1 + rescue TypeError + exit 0 + end + end + _, status = Process.waitpid2(pid) + assert status.success? + rescue NotImplementedError + # forking to avoid modifying core class of a parent process and + # introducing race conditions of tests are run in parallel + end + end + + def test_hash_likeness_set_symbol + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil.class, state[:foo].class + assert_equal nil, state['foo'] + state[:foo] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_hash_likeness_set_string + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil, state['foo'] + state['foo'] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_json_generate + assert_raise JSON::GeneratorError do + assert_equal true, generate(["\xea"]) + end + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { generate too_deep_ary } + assert_raise(JSON::NestingError) { generate too_deep_ary, :max_nesting => 100 } + ok = generate too_deep_ary, :max_nesting => 101 + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => nil + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => false + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => 0 + assert_equal too_deep, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal json, generate(data) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal json, generate(data) + # + data = [ '/' ] + json = '["/"]' + assert_equal json, generate(data) + # + data = ['"'] + json = '["\""]' + assert_equal json, generate(data) + # + data = ["'"] + json = '["\\\'"]' + assert_equal '["\'"]', generate(data) + end + + def test_string_subclass + s = Class.new(String) do + def to_s; self; end + undef to_json + end + assert_nothing_raised(SystemStackError) do + assert_equal '["foo"]', JSON.generate([s.new('foo')]) + end + end + + if defined?(Encoding) + def test_nonutf8_encoding + assert_equal("\"5\u{b0}\"", "5\xb0".force_encoding("iso-8859-1").to_json) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test.rb new file mode 100644 index 0000000..ee19fa5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generator_test.rb @@ -0,0 +1,421 @@ +#!/usr/bin/env ruby +# encoding: utf-8 +# frozen_string_literal: false + +require 'test_helper' + +class JSONGeneratorTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json2 = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},' + + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + @json3 = <<'EOT'.chomp +{ + "a": 2, + "b": 3.141, + "c": "c", + "d": [ + 1, + "b", + 3.14 + ], + "e": { + "foo": "bar" + }, + "g": "\"\u0000\u001f", + "h": 1000.0, + "i": 0.001 +} +EOT + end + + def silence + v = $VERBOSE + $VERBOSE = nil + yield + ensure + $VERBOSE = v + end + + def test_remove_const_segv + return if RUBY_ENGINE == 'jruby' + stress = GC.stress + const = JSON::SAFE_STATE_PROTOTYPE.dup + + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, nil + end + + 10.times do |i| + assert_raise TypeError do + bignum_too_long_to_embed_as_string.to_json + end + end + ensure + GC.stress = stress + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, const + end + end if JSON.const_defined?("Ext") + + def test_generate + json = generate(@hash) + assert_equal(parse(@json2), parse(json)) + json = JSON[@hash] + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666) + end + + def test_generate_pretty + json = pretty_generate(@hash) + # hashes aren't (insertion) ordered on every ruby implementation + # assert_equal(@json3, json) + assert_equal(parse(@json3), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = pretty_generate({1=>2}) + assert_equal(<<'EOT'.chomp, json) +{ + "1": 2 +} +EOT + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', pretty_generate(666) + end + + def test_generate_custom + state = State.new(:space_before => " ", :space => " ", :indent => "", :object_nl => "\n", :array_nl => "") + json = generate({1=>{2=>3,4=>[5,6]}}, state) + assert_equal(<<'EOT'.chomp, json) +{ +"1" : { +"2" : 3, +"4" : [5,6] +} +} +EOT + end + + def test_fast_generate + json = fast_generate(@hash) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = fast_generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', fast_generate(666) + end + + def test_own_state + state = State.new + json = generate(@hash, state) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}, state) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666, state) + end + + def test_states + json = generate({1=>2}, nil) + assert_equal('{"1":2}', json) + s = JSON.state.new + assert s.check_circular? + assert s[:check_circular?] + h = { 1=>2 } + h[3] = h + assert_raise(JSON::NestingError) { generate(h) } + assert_raise(JSON::NestingError) { generate(h, s) } + s = JSON.state.new + a = [ 1, 2 ] + a << a + assert_raise(JSON::NestingError) { generate(a, s) } + assert s.check_circular? + assert s[:check_circular?] + end + + def test_pretty_state + state = PRETTY_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "\n", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => " ", + :max_nesting => 100, + :object_nl => "\n", + :space => " ", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_safe_state + state = SAFE_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 100, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_fast_state + state = FAST_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 0, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_allow_nan + assert_raise(GeneratorError) { generate([JSON::NaN]) } + assert_equal '[NaN]', generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::NaN]) } + assert_raise(GeneratorError) { pretty_generate([JSON::NaN]) } + assert_equal "[\n NaN\n]", pretty_generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::Infinity]) } + assert_equal '[Infinity]', generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::Infinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::Infinity]) } + assert_equal "[\n Infinity\n]", pretty_generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::MinusInfinity]) } + assert_equal '[-Infinity]', generate([JSON::MinusInfinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::MinusInfinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::MinusInfinity]) } + assert_equal "[\n -Infinity\n]", pretty_generate([JSON::MinusInfinity], :allow_nan => true) + end + + def test_depth + ary = []; ary << ary + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { generate(ary) } + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { JSON.pretty_generate(ary) } + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + s = JSON.state.new + assert_equal 0, s.depth + assert_raise(JSON::NestingError) { ary.to_json(s) } + assert_equal 100, s.depth + end + + def test_buffer_initial_length + s = JSON.state.new + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 0 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = -1 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 128 + assert_equal 128, s.buffer_initial_length + end + + def test_gc + if respond_to?(:assert_in_out_err) + assert_in_out_err(%w[-rjson --disable-gems], <<-EOS, [], []) + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + EOS + end + end if GC.respond_to?(:stress=) + + def test_configure_using_configure_and_merge + numbered_state = { + :indent => "1", + :space => '2', + :space_before => '3', + :object_nl => '4', + :array_nl => '5' + } + state1 = JSON.state.new + state1.merge(numbered_state) + assert_equal '1', state1.indent + assert_equal '2', state1.space + assert_equal '3', state1.space_before + assert_equal '4', state1.object_nl + assert_equal '5', state1.array_nl + state2 = JSON.state.new + state2.configure(numbered_state) + assert_equal '1', state2.indent + assert_equal '2', state2.space + assert_equal '3', state2.space_before + assert_equal '4', state2.object_nl + assert_equal '5', state2.array_nl + end + + def test_configure_hash_conversion + state = JSON.state.new + state.configure(:indent => '1') + assert_equal '1', state.indent + state = JSON.state.new + foo = 'foo' + assert_raise(TypeError) do + state.configure(foo) + end + def foo.to_h + { :indent => '2' } + end + state.configure(foo) + assert_equal '2', state.indent + end + + if defined?(JSON::Ext::Generator) + def test_broken_bignum # [ruby-core:38867] + pid = fork do + x = 1 << 64 + x.class.class_eval do + def to_s + end + end + begin + JSON::Ext::Generator::State.new.generate(x) + exit 1 + rescue TypeError + exit 0 + end + end + _, status = Process.waitpid2(pid) + assert status.success? + rescue NotImplementedError + # forking to avoid modifying core class of a parent process and + # introducing race conditions of tests are run in parallel + end + end + + def test_hash_likeness_set_symbol + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil.class, state[:foo].class + assert_equal nil, state['foo'] + state[:foo] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_hash_likeness_set_string + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil, state['foo'] + state['foo'] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_json_generate + assert_raise JSON::GeneratorError do + assert_equal true, generate(["\xea"]) + end + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { generate too_deep_ary } + assert_raise(JSON::NestingError) { generate too_deep_ary, :max_nesting => 100 } + ok = generate too_deep_ary, :max_nesting => 101 + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => nil + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => false + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => 0 + assert_equal too_deep, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal json, generate(data) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal json, generate(data) + # + data = [ '/' ] + json = '["/"]' + assert_equal json, generate(data) + # + data = ['"'] + json = '["\""]' + assert_equal json, generate(data) + # + data = ["'"] + json = '["\\\'"]' + assert_equal '["\'"]', generate(data) + end + + def test_string_subclass + s = Class.new(String) do + def to_s; self; end + undef to_json + end + assert_nothing_raised(SystemStackError) do + assert_equal '["foo"]', JSON.generate([s.new('foo')]) + end + end + + if defined?(Encoding) + def test_nonutf8_encoding + assert_equal("\"5\u{b0}\"", "5\xb0".force_encoding("iso-8859-1").to_json) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test 2.rb new file mode 100644 index 0000000..82742dc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test 2.rb @@ -0,0 +1,82 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONGenericObjectTest < Test::Unit::TestCase + include JSON + + def setup + @go = GenericObject[ :a => 1, :b => 2 ] + end + + def test_attributes + assert_equal 1, @go.a + assert_equal 1, @go[:a] + assert_equal 2, @go.b + assert_equal 2, @go[:b] + assert_nil @go.c + assert_nil @go[:c] + end + + def test_generate_json + switch_json_creatable do + assert_equal @go, JSON(JSON(@go), :create_additions => true) + end + end + + def test_parse_json + assert_kind_of Hash, + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + switch_json_creatable do + assert_equal @go, l = + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + assert_equal 1, l.a + assert_equal @go, + l = JSON('{ "a": 1, "b": 2 }', :object_class => GenericObject) + assert_equal 1, l.a + assert_equal GenericObject[:a => GenericObject[:b => 2]], + l = JSON('{ "a": { "b": 2 } }', :object_class => GenericObject) + assert_equal 2, l.a.b + end + end + + def test_from_hash + result = GenericObject.from_hash( + :foo => { :bar => { :baz => true }, :quux => [ { :foobar => true } ] }) + assert_kind_of GenericObject, result.foo + assert_kind_of GenericObject, result.foo.bar + assert_equal true, result.foo.bar.baz + assert_kind_of GenericObject, result.foo.quux.first + assert_equal true, result.foo.quux.first.foobar + assert_equal true, GenericObject.from_hash(true) + end + + def test_json_generic_object_load + empty = JSON::GenericObject.load(nil) + assert_kind_of JSON::GenericObject, empty + simple_json = '{"json_class":"JSON::GenericObject","hello":"world"}' + simple = JSON::GenericObject.load(simple_json) + assert_kind_of JSON::GenericObject, simple + assert_equal "world", simple.hello + converting = JSON::GenericObject.load('{ "hello": "world" }') + assert_kind_of JSON::GenericObject, converting + assert_equal "world", converting.hello + + json = JSON::GenericObject.dump(JSON::GenericObject[:hello => 'world']) + assert_equal JSON(json), JSON('{"json_class":"JSON::GenericObject","hello":"world"}') + end + + private + + def switch_json_creatable + JSON::GenericObject.json_creatable = true + yield + ensure + JSON::GenericObject.json_creatable = false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test.rb new file mode 100644 index 0000000..82742dc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_generic_object_test.rb @@ -0,0 +1,82 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONGenericObjectTest < Test::Unit::TestCase + include JSON + + def setup + @go = GenericObject[ :a => 1, :b => 2 ] + end + + def test_attributes + assert_equal 1, @go.a + assert_equal 1, @go[:a] + assert_equal 2, @go.b + assert_equal 2, @go[:b] + assert_nil @go.c + assert_nil @go[:c] + end + + def test_generate_json + switch_json_creatable do + assert_equal @go, JSON(JSON(@go), :create_additions => true) + end + end + + def test_parse_json + assert_kind_of Hash, + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + switch_json_creatable do + assert_equal @go, l = + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + assert_equal 1, l.a + assert_equal @go, + l = JSON('{ "a": 1, "b": 2 }', :object_class => GenericObject) + assert_equal 1, l.a + assert_equal GenericObject[:a => GenericObject[:b => 2]], + l = JSON('{ "a": { "b": 2 } }', :object_class => GenericObject) + assert_equal 2, l.a.b + end + end + + def test_from_hash + result = GenericObject.from_hash( + :foo => { :bar => { :baz => true }, :quux => [ { :foobar => true } ] }) + assert_kind_of GenericObject, result.foo + assert_kind_of GenericObject, result.foo.bar + assert_equal true, result.foo.bar.baz + assert_kind_of GenericObject, result.foo.quux.first + assert_equal true, result.foo.quux.first.foobar + assert_equal true, GenericObject.from_hash(true) + end + + def test_json_generic_object_load + empty = JSON::GenericObject.load(nil) + assert_kind_of JSON::GenericObject, empty + simple_json = '{"json_class":"JSON::GenericObject","hello":"world"}' + simple = JSON::GenericObject.load(simple_json) + assert_kind_of JSON::GenericObject, simple + assert_equal "world", simple.hello + converting = JSON::GenericObject.load('{ "hello": "world" }') + assert_kind_of JSON::GenericObject, converting + assert_equal "world", converting.hello + + json = JSON::GenericObject.dump(JSON::GenericObject[:hello => 'world']) + assert_equal JSON(json), JSON('{"json_class":"JSON::GenericObject","hello":"world"}') + end + + private + + def switch_json_creatable + JSON::GenericObject.json_creatable = true + yield + ensure + JSON::GenericObject.json_creatable = false + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test 2.rb new file mode 100644 index 0000000..9946dd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test 2.rb @@ -0,0 +1,472 @@ +# encoding: utf-8 +# frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' +require 'ostruct' +require 'bigdecimal' + +class JSONParserTest < Test::Unit::TestCase + include JSON + + def test_construction + parser = JSON::Parser.new('test') + assert_equal 'test', parser.source + end + + def test_argument_encoding + source = "{}".encode("UTF-16") + JSON::Parser.new(source) + assert_equal Encoding::UTF_16, source.encoding + end if defined?(Encoding::UTF_16) + + def test_error_message_encoding + bug10705 = '[ruby-core:67386] [Bug #10705]' + json = ".\"\xE2\x88\x9A\"".force_encoding(Encoding::UTF_8) + e = assert_raise(JSON::ParserError) { + JSON::Ext::Parser.new(json).parse + } + assert_equal(Encoding::UTF_8, e.message.encoding, bug10705) + assert_include(e.message, json, bug10705) + end if defined?(Encoding::UTF_8) and defined?(JSON::Ext::Parser) + + def test_parsing + parser = JSON::Parser.new('"test"') + assert_equal 'test', parser.parse + end + + def test_parser_reset + parser = Parser.new('{"a":"b"}') + assert_equal({ 'a' => 'b' }, parser.parse) + assert_equal({ 'a' => 'b' }, parser.parse) + end + + def test_parse_values + assert_equal(nil, parse('null')) + assert_equal(false, parse('false')) + assert_equal(true, parse('true')) + assert_equal(-23, parse('-23')) + assert_equal(23, parse('23')) + assert_in_delta(0.23, parse('0.23'), 1e-2) + assert_in_delta(0.0, parse('0e0'), 1e-2) + assert_equal("", parse('""')) + assert_equal("foobar", parse('"foobar"')) + end + + def test_parse_simple_arrays + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([ nil ], parse('[null]')) + assert_equal([ false ], parse('[false]')) + assert_equal([ true ], parse('[true]')) + assert_equal([ -23 ], parse('[-23]')) + assert_equal([ 23 ], parse('[23]')) + assert_equal_float([ 0.23 ], parse('[0.23]')) + assert_equal_float([ 0.0 ], parse('[0e0]')) + assert_equal([""], parse('[""]')) + assert_equal(["foobar"], parse('["foobar"]')) + assert_equal([{}], parse('[{}]')) + end + + def test_parse_simple_objects + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({ "a" => nil }, parse('{ "a" : null}')) + assert_equal({ "a" => nil }, parse('{"a":null}')) + assert_equal({ "a" => false }, parse('{ "a" : false } ')) + assert_equal({ "a" => false }, parse('{"a":false}')) + assert_raise(JSON::ParserError) { parse('{false}') } + assert_equal({ "a" => true }, parse('{"a":true}')) + assert_equal({ "a" => true }, parse(' { "a" : true } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => 23 }, parse('{"a":23 } ')) + assert_equal({ "a" => 23 }, parse(' { "a" : 23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + end + + def test_parse_numbers + assert_raise(JSON::ParserError) { parse('+23.2') } + assert_raise(JSON::ParserError) { parse('+23') } + assert_raise(JSON::ParserError) { parse('.23') } + assert_raise(JSON::ParserError) { parse('023') } + assert_equal(23, parse('23')) + assert_equal(-23, parse('-23')) + assert_equal_float(3.141, parse('3.141')) + assert_equal_float(-3.141, parse('-3.141')) + assert_equal_float(3.141, parse('3141e-3')) + assert_equal_float(3.141, parse('3141.1e-3')) + assert_equal_float(3.141, parse('3141E-3')) + assert_equal_float(3.141, parse('3141.0E-3')) + assert_equal_float(-3.141, parse('-3141.0e-3')) + assert_equal_float(-3.141, parse('-3141e-3')) + assert_raise(ParserError) { parse('NaN') } + assert parse('NaN', :allow_nan => true).nan? + assert_raise(ParserError) { parse('Infinity') } + assert_equal(1.0/0, parse('Infinity', :allow_nan => true)) + assert_raise(ParserError) { parse('-Infinity') } + assert_equal(-1.0/0, parse('-Infinity', :allow_nan => true)) + end + + def test_parse_bigdecimals + assert_equal(BigDecimal, JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"].class) + assert_equal(BigDecimal("0.901234567890123456789E1"),JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"] ) + end + + if Array.method_defined?(:permutation) + def test_parse_more_complex_arrays + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + json = pretty_generate(perm) + assert_equal perm, parse(json) + end + end + + def test_parse_complex_objects + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + s = "a" + orig_obj = perm.inject({}) { |h, x| h[s.dup] = x; s = s.succ; h } + json = pretty_generate(orig_obj) + assert_equal orig_obj, parse(json) + end + end + end + + def test_parse_arrays + assert_equal([1,2,3], parse('[1,2,3]')) + assert_equal([1.2,2,3], parse('[1.2,2,3]')) + assert_equal([[],[[],[]]], parse('[[],[[],[]]]')) + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([1], parse('[1]')) + assert_equal([1], parse(' [ 1 ] ')) + ary = [[1], ["foo"], [3.14], [4711.0], [2.718], [nil], + [[1, -2, 3]], [false], [true]] + assert_equal(ary, + parse('[[1],["foo"],[3.14],[47.11e+2],[2718.0E-3],[null],[[1,-2,3]],[false],[true]]')) + assert_equal(ary, parse(%Q{ [ [1] , ["foo"] , [3.14] \t , [47.11e+2]\s + , [2718.0E-3 ],\r[ null] , [[1, -2, 3 ]], [false ],[ true]\n ] })) + end + + def test_parse_json_primitive_values + assert_raise(JSON::ParserError) { parse('') } + assert_raise(TypeError) { parse(nil) } + assert_raise(JSON::ParserError) { parse(' /* foo */ ') } + assert_equal nil, parse('null') + assert_equal false, parse('false') + assert_equal true, parse('true') + assert_equal 23, parse('23') + assert_equal 1, parse('1') + assert_equal_float 3.141, parse('3.141'), 1E-3 + assert_equal 2 ** 64, parse('18446744073709551616') + assert_equal 'foo', parse('"foo"') + assert parse('NaN', :allow_nan => true).nan? + assert parse('Infinity', :allow_nan => true).infinite? + assert parse('-Infinity', :allow_nan => true).infinite? + assert_raise(JSON::ParserError) { parse('[ 1, ]') } + end + + def test_parse_some_strings + assert_equal([""], parse('[""]')) + assert_equal(["\\"], parse('["\\\\"]')) + assert_equal(['"'], parse('["\""]')) + assert_equal(['\\"\\'], parse('["\\\\\\"\\\\"]')) + assert_equal( + ["\"\b\n\r\t\0\037"], + parse('["\"\b\n\r\t\u0000\u001f"]') + ) + end + + def test_parse_big_integers + json1 = JSON(orig = (1 << 31) - 1) + assert_equal orig, parse(json1) + json2 = JSON(orig = 1 << 31) + assert_equal orig, parse(json2) + json3 = JSON(orig = (1 << 62) - 1) + assert_equal orig, parse(json3) + json4 = JSON(orig = 1 << 62) + assert_equal orig, parse(json4) + json5 = JSON(orig = 1 << 64) + assert_equal orig, parse(json5) + end + + def test_some_wrong_inputs + assert_raise(ParserError) { parse('[] bla') } + assert_raise(ParserError) { parse('[] 1') } + assert_raise(ParserError) { parse('[] []') } + assert_raise(ParserError) { parse('[] {}') } + assert_raise(ParserError) { parse('{} []') } + assert_raise(ParserError) { parse('{} {}') } + assert_raise(ParserError) { parse('[NULL]') } + assert_raise(ParserError) { parse('[FALSE]') } + assert_raise(ParserError) { parse('[TRUE]') } + assert_raise(ParserError) { parse('[07] ') } + assert_raise(ParserError) { parse('[0a]') } + assert_raise(ParserError) { parse('[1.]') } + assert_raise(ParserError) { parse(' ') } + end + + def test_symbolize_names + assert_equal({ "foo" => "bar", "baz" => "quux" }, + parse('{"foo":"bar", "baz":"quux"}')) + assert_equal({ :foo => "bar", :baz => "quux" }, + parse('{"foo":"bar", "baz":"quux"}', :symbolize_names => true)) + assert_raise(ArgumentError) do + parse('{}', :symbolize_names => true, :create_additions => true) + end + end + + def test_parse_comments + json = < "value1", "key2" => "value2", "key3" => "value3" }, + parse(json)) + json = < "value1" }, parse(json)) + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { parse too_deep } + assert_raise(JSON::NestingError) { parse too_deep, :max_nesting => 100 } + ok = parse too_deep, :max_nesting => 101 + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => nil + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => false + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => 0 + assert_equal too_deep_ary, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal data, parse(json) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal data, parse(json) + # + json = '["/"]' + data = [ '/' ] + assert_equal data, parse(json) + # + json = '["\""]' + data = ['"'] + assert_equal data, parse(json) + # + json = '["\\\'"]' + data = ["'"] + assert_equal data, parse(json) + end + + class SubArray < Array + def <<(v) + @shifted = true + super + end + + def shifted? + @shifted + end + end + + class SubArray2 < Array + def to_json(*a) + { + JSON.create_id => self.class.name, + 'ary' => to_a, + }.to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + o['ary'] + end + end + + class SubArrayWrapper + def initialize + @data = [] + end + + attr_reader :data + + def [](index) + @data[index] + end + + def <<(value) + @data << value + @shifted = true + end + + def shifted? + @shifted + end + end + + def test_parse_array_custom_array_derived_class + res = parse('[1,2]', :array_class => SubArray) + assert_equal([1,2], res) + assert_equal(SubArray, res.class) + assert res.shifted? + end + + def test_parse_array_custom_non_array_derived_class + res = parse('[1,2]', :array_class => SubArrayWrapper) + assert_equal([1,2], res.data) + assert_equal(SubArrayWrapper, res.class) + assert res.shifted? + end + + def test_parse_object + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({'foo'=>'bar'}, parse('{"foo":"bar"}')) + assert_equal({'foo'=>'bar'}, parse(' { "foo" : "bar" } ')) + end + + class SubHash < Hash + def []=(k, v) + @item_set = true + super + end + + def item_set? + @item_set + end + end + + class SubHash2 < Hash + def to_json(*a) + { + JSON.create_id => self.class.name, + }.merge(self).to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + self[o] + end + end + + class SubOpenStruct < OpenStruct + def [](k) + __send__(k) + end + + def []=(k, v) + @item_set = true + __send__("#{k}=", v) + end + + def item_set? + @item_set + end + end + + def test_parse_object_custom_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubHash) + assert_equal({"foo" => "bar"}, res) + assert_equal(SubHash, res.class) + assert res.item_set? + end + + def test_parse_object_custom_non_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubOpenStruct) + assert_equal "bar", res.foo + assert_equal(SubOpenStruct, res.class) + assert res.item_set? + end + + def test_parse_generic_object + res = parse( + '{"foo":"bar", "baz":{}}', + :object_class => JSON::GenericObject + ) + assert_equal(JSON::GenericObject, res.class) + assert_equal "bar", res.foo + assert_equal "bar", res["foo"] + assert_equal "bar", res[:foo] + assert_equal "bar", res.to_hash[:foo] + assert_equal(JSON::GenericObject, res.baz.class) + end + + def test_generate_core_subclasses_with_new_to_json + obj = SubHash2["foo" => SubHash2["bar" => true]] + obj_json = JSON(obj) + obj_again = parse(obj_json, :create_additions => true) + assert_kind_of SubHash2, obj_again + assert_kind_of SubHash2, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + assert_equal ["foo"], + JSON(JSON(SubArray2["foo"]), :create_additions => true) + end + + def test_generate_core_subclasses_with_default_to_json + assert_equal '{"foo":"bar"}', JSON(SubHash["foo" => "bar"]) + assert_equal '["foo"]', JSON(SubArray["foo"]) + end + + def test_generate_of_core_subclasses + obj = SubHash["foo" => SubHash["bar" => true]] + obj_json = JSON(obj) + obj_again = JSON(obj_json) + assert_kind_of Hash, obj_again + assert_kind_of Hash, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + end + + def test_parsing_frozen_ascii8bit_string + assert_equal( + { 'foo' => 'bar' }, + JSON('{ "foo": "bar" }'.force_encoding(Encoding::ASCII_8BIT).freeze) + ) + end + + private + + def assert_equal_float(expected, actual, delta = 1e-2) + Array === expected and expected = expected.first + Array === actual and actual = actual.first + assert_in_delta(expected, actual, delta) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test.rb new file mode 100644 index 0000000..9946dd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_parser_test.rb @@ -0,0 +1,472 @@ +# encoding: utf-8 +# frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' +require 'ostruct' +require 'bigdecimal' + +class JSONParserTest < Test::Unit::TestCase + include JSON + + def test_construction + parser = JSON::Parser.new('test') + assert_equal 'test', parser.source + end + + def test_argument_encoding + source = "{}".encode("UTF-16") + JSON::Parser.new(source) + assert_equal Encoding::UTF_16, source.encoding + end if defined?(Encoding::UTF_16) + + def test_error_message_encoding + bug10705 = '[ruby-core:67386] [Bug #10705]' + json = ".\"\xE2\x88\x9A\"".force_encoding(Encoding::UTF_8) + e = assert_raise(JSON::ParserError) { + JSON::Ext::Parser.new(json).parse + } + assert_equal(Encoding::UTF_8, e.message.encoding, bug10705) + assert_include(e.message, json, bug10705) + end if defined?(Encoding::UTF_8) and defined?(JSON::Ext::Parser) + + def test_parsing + parser = JSON::Parser.new('"test"') + assert_equal 'test', parser.parse + end + + def test_parser_reset + parser = Parser.new('{"a":"b"}') + assert_equal({ 'a' => 'b' }, parser.parse) + assert_equal({ 'a' => 'b' }, parser.parse) + end + + def test_parse_values + assert_equal(nil, parse('null')) + assert_equal(false, parse('false')) + assert_equal(true, parse('true')) + assert_equal(-23, parse('-23')) + assert_equal(23, parse('23')) + assert_in_delta(0.23, parse('0.23'), 1e-2) + assert_in_delta(0.0, parse('0e0'), 1e-2) + assert_equal("", parse('""')) + assert_equal("foobar", parse('"foobar"')) + end + + def test_parse_simple_arrays + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([ nil ], parse('[null]')) + assert_equal([ false ], parse('[false]')) + assert_equal([ true ], parse('[true]')) + assert_equal([ -23 ], parse('[-23]')) + assert_equal([ 23 ], parse('[23]')) + assert_equal_float([ 0.23 ], parse('[0.23]')) + assert_equal_float([ 0.0 ], parse('[0e0]')) + assert_equal([""], parse('[""]')) + assert_equal(["foobar"], parse('["foobar"]')) + assert_equal([{}], parse('[{}]')) + end + + def test_parse_simple_objects + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({ "a" => nil }, parse('{ "a" : null}')) + assert_equal({ "a" => nil }, parse('{"a":null}')) + assert_equal({ "a" => false }, parse('{ "a" : false } ')) + assert_equal({ "a" => false }, parse('{"a":false}')) + assert_raise(JSON::ParserError) { parse('{false}') } + assert_equal({ "a" => true }, parse('{"a":true}')) + assert_equal({ "a" => true }, parse(' { "a" : true } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => 23 }, parse('{"a":23 } ')) + assert_equal({ "a" => 23 }, parse(' { "a" : 23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + end + + def test_parse_numbers + assert_raise(JSON::ParserError) { parse('+23.2') } + assert_raise(JSON::ParserError) { parse('+23') } + assert_raise(JSON::ParserError) { parse('.23') } + assert_raise(JSON::ParserError) { parse('023') } + assert_equal(23, parse('23')) + assert_equal(-23, parse('-23')) + assert_equal_float(3.141, parse('3.141')) + assert_equal_float(-3.141, parse('-3.141')) + assert_equal_float(3.141, parse('3141e-3')) + assert_equal_float(3.141, parse('3141.1e-3')) + assert_equal_float(3.141, parse('3141E-3')) + assert_equal_float(3.141, parse('3141.0E-3')) + assert_equal_float(-3.141, parse('-3141.0e-3')) + assert_equal_float(-3.141, parse('-3141e-3')) + assert_raise(ParserError) { parse('NaN') } + assert parse('NaN', :allow_nan => true).nan? + assert_raise(ParserError) { parse('Infinity') } + assert_equal(1.0/0, parse('Infinity', :allow_nan => true)) + assert_raise(ParserError) { parse('-Infinity') } + assert_equal(-1.0/0, parse('-Infinity', :allow_nan => true)) + end + + def test_parse_bigdecimals + assert_equal(BigDecimal, JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"].class) + assert_equal(BigDecimal("0.901234567890123456789E1"),JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"] ) + end + + if Array.method_defined?(:permutation) + def test_parse_more_complex_arrays + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + json = pretty_generate(perm) + assert_equal perm, parse(json) + end + end + + def test_parse_complex_objects + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + s = "a" + orig_obj = perm.inject({}) { |h, x| h[s.dup] = x; s = s.succ; h } + json = pretty_generate(orig_obj) + assert_equal orig_obj, parse(json) + end + end + end + + def test_parse_arrays + assert_equal([1,2,3], parse('[1,2,3]')) + assert_equal([1.2,2,3], parse('[1.2,2,3]')) + assert_equal([[],[[],[]]], parse('[[],[[],[]]]')) + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([1], parse('[1]')) + assert_equal([1], parse(' [ 1 ] ')) + ary = [[1], ["foo"], [3.14], [4711.0], [2.718], [nil], + [[1, -2, 3]], [false], [true]] + assert_equal(ary, + parse('[[1],["foo"],[3.14],[47.11e+2],[2718.0E-3],[null],[[1,-2,3]],[false],[true]]')) + assert_equal(ary, parse(%Q{ [ [1] , ["foo"] , [3.14] \t , [47.11e+2]\s + , [2718.0E-3 ],\r[ null] , [[1, -2, 3 ]], [false ],[ true]\n ] })) + end + + def test_parse_json_primitive_values + assert_raise(JSON::ParserError) { parse('') } + assert_raise(TypeError) { parse(nil) } + assert_raise(JSON::ParserError) { parse(' /* foo */ ') } + assert_equal nil, parse('null') + assert_equal false, parse('false') + assert_equal true, parse('true') + assert_equal 23, parse('23') + assert_equal 1, parse('1') + assert_equal_float 3.141, parse('3.141'), 1E-3 + assert_equal 2 ** 64, parse('18446744073709551616') + assert_equal 'foo', parse('"foo"') + assert parse('NaN', :allow_nan => true).nan? + assert parse('Infinity', :allow_nan => true).infinite? + assert parse('-Infinity', :allow_nan => true).infinite? + assert_raise(JSON::ParserError) { parse('[ 1, ]') } + end + + def test_parse_some_strings + assert_equal([""], parse('[""]')) + assert_equal(["\\"], parse('["\\\\"]')) + assert_equal(['"'], parse('["\""]')) + assert_equal(['\\"\\'], parse('["\\\\\\"\\\\"]')) + assert_equal( + ["\"\b\n\r\t\0\037"], + parse('["\"\b\n\r\t\u0000\u001f"]') + ) + end + + def test_parse_big_integers + json1 = JSON(orig = (1 << 31) - 1) + assert_equal orig, parse(json1) + json2 = JSON(orig = 1 << 31) + assert_equal orig, parse(json2) + json3 = JSON(orig = (1 << 62) - 1) + assert_equal orig, parse(json3) + json4 = JSON(orig = 1 << 62) + assert_equal orig, parse(json4) + json5 = JSON(orig = 1 << 64) + assert_equal orig, parse(json5) + end + + def test_some_wrong_inputs + assert_raise(ParserError) { parse('[] bla') } + assert_raise(ParserError) { parse('[] 1') } + assert_raise(ParserError) { parse('[] []') } + assert_raise(ParserError) { parse('[] {}') } + assert_raise(ParserError) { parse('{} []') } + assert_raise(ParserError) { parse('{} {}') } + assert_raise(ParserError) { parse('[NULL]') } + assert_raise(ParserError) { parse('[FALSE]') } + assert_raise(ParserError) { parse('[TRUE]') } + assert_raise(ParserError) { parse('[07] ') } + assert_raise(ParserError) { parse('[0a]') } + assert_raise(ParserError) { parse('[1.]') } + assert_raise(ParserError) { parse(' ') } + end + + def test_symbolize_names + assert_equal({ "foo" => "bar", "baz" => "quux" }, + parse('{"foo":"bar", "baz":"quux"}')) + assert_equal({ :foo => "bar", :baz => "quux" }, + parse('{"foo":"bar", "baz":"quux"}', :symbolize_names => true)) + assert_raise(ArgumentError) do + parse('{}', :symbolize_names => true, :create_additions => true) + end + end + + def test_parse_comments + json = < "value1", "key2" => "value2", "key3" => "value3" }, + parse(json)) + json = < "value1" }, parse(json)) + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { parse too_deep } + assert_raise(JSON::NestingError) { parse too_deep, :max_nesting => 100 } + ok = parse too_deep, :max_nesting => 101 + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => nil + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => false + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => 0 + assert_equal too_deep_ary, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal data, parse(json) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal data, parse(json) + # + json = '["/"]' + data = [ '/' ] + assert_equal data, parse(json) + # + json = '["\""]' + data = ['"'] + assert_equal data, parse(json) + # + json = '["\\\'"]' + data = ["'"] + assert_equal data, parse(json) + end + + class SubArray < Array + def <<(v) + @shifted = true + super + end + + def shifted? + @shifted + end + end + + class SubArray2 < Array + def to_json(*a) + { + JSON.create_id => self.class.name, + 'ary' => to_a, + }.to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + o['ary'] + end + end + + class SubArrayWrapper + def initialize + @data = [] + end + + attr_reader :data + + def [](index) + @data[index] + end + + def <<(value) + @data << value + @shifted = true + end + + def shifted? + @shifted + end + end + + def test_parse_array_custom_array_derived_class + res = parse('[1,2]', :array_class => SubArray) + assert_equal([1,2], res) + assert_equal(SubArray, res.class) + assert res.shifted? + end + + def test_parse_array_custom_non_array_derived_class + res = parse('[1,2]', :array_class => SubArrayWrapper) + assert_equal([1,2], res.data) + assert_equal(SubArrayWrapper, res.class) + assert res.shifted? + end + + def test_parse_object + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({'foo'=>'bar'}, parse('{"foo":"bar"}')) + assert_equal({'foo'=>'bar'}, parse(' { "foo" : "bar" } ')) + end + + class SubHash < Hash + def []=(k, v) + @item_set = true + super + end + + def item_set? + @item_set + end + end + + class SubHash2 < Hash + def to_json(*a) + { + JSON.create_id => self.class.name, + }.merge(self).to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + self[o] + end + end + + class SubOpenStruct < OpenStruct + def [](k) + __send__(k) + end + + def []=(k, v) + @item_set = true + __send__("#{k}=", v) + end + + def item_set? + @item_set + end + end + + def test_parse_object_custom_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubHash) + assert_equal({"foo" => "bar"}, res) + assert_equal(SubHash, res.class) + assert res.item_set? + end + + def test_parse_object_custom_non_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubOpenStruct) + assert_equal "bar", res.foo + assert_equal(SubOpenStruct, res.class) + assert res.item_set? + end + + def test_parse_generic_object + res = parse( + '{"foo":"bar", "baz":{}}', + :object_class => JSON::GenericObject + ) + assert_equal(JSON::GenericObject, res.class) + assert_equal "bar", res.foo + assert_equal "bar", res["foo"] + assert_equal "bar", res[:foo] + assert_equal "bar", res.to_hash[:foo] + assert_equal(JSON::GenericObject, res.baz.class) + end + + def test_generate_core_subclasses_with_new_to_json + obj = SubHash2["foo" => SubHash2["bar" => true]] + obj_json = JSON(obj) + obj_again = parse(obj_json, :create_additions => true) + assert_kind_of SubHash2, obj_again + assert_kind_of SubHash2, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + assert_equal ["foo"], + JSON(JSON(SubArray2["foo"]), :create_additions => true) + end + + def test_generate_core_subclasses_with_default_to_json + assert_equal '{"foo":"bar"}', JSON(SubHash["foo" => "bar"]) + assert_equal '["foo"]', JSON(SubArray["foo"]) + end + + def test_generate_of_core_subclasses + obj = SubHash["foo" => SubHash["bar" => true]] + obj_json = JSON(obj) + obj_again = JSON(obj_json) + assert_kind_of Hash, obj_again + assert_kind_of Hash, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + end + + def test_parsing_frozen_ascii8bit_string + assert_equal( + { 'foo' => 'bar' }, + JSON('{ "foo": "bar" }'.force_encoding(Encoding::ASCII_8BIT).freeze) + ) + end + + private + + def assert_equal_float(expected, actual, delta = 1e-2) + Array === expected and expected = expected.first + Array === actual and actual = actual.first + assert_in_delta(expected, actual, delta) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test 2.rb new file mode 100644 index 0000000..5d55dc3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test 2.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +require 'test_helper' +require 'time' + +class JSONStringMatchingTest < Test::Unit::TestCase + include JSON + + class TestTime < ::Time + def self.json_create(string) + Time.parse(string) + end + + def to_json(*) + %{"#{strftime('%FT%T%z')}"} + end + + def ==(other) + to_i == other.to_i + end + end + + def test_match_date + t = TestTime.new + t_json = [ t ].to_json + time_regexp = /\A\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4}\z/ + assert_equal [ t ], + parse( + t_json, + :create_additions => true, + :match_string => { time_regexp => TestTime } + ) + assert_equal [ t.strftime('%FT%T%z') ], + parse( + t_json, + :match_string => { time_regexp => TestTime } + ) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test.rb new file mode 100644 index 0000000..5d55dc3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/json_string_matching_test.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +require 'test_helper' +require 'time' + +class JSONStringMatchingTest < Test::Unit::TestCase + include JSON + + class TestTime < ::Time + def self.json_create(string) + Time.parse(string) + end + + def to_json(*) + %{"#{strftime('%FT%T%z')}"} + end + + def ==(other) + to_i == other.to_i + end + end + + def test_match_date + t = TestTime.new + t_json = [ t ].to_json + time_regexp = /\A\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4}\z/ + assert_equal [ t ], + parse( + t_json, + :create_additions => true, + :match_string => { time_regexp => TestTime } + ) + assert_equal [ t.strftime('%FT%T%z') ], + parse( + t_json, + :match_string => { time_regexp => TestTime } + ) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper 2.rb new file mode 100644 index 0000000..c5ec0fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper 2.rb @@ -0,0 +1,17 @@ +case ENV['JSON'] +when 'pure' + $:.unshift 'lib' + require 'json/pure' +when 'ext' + $:.unshift 'ext', 'lib' + require 'json/ext' +else + $:.unshift 'ext', 'lib' + require 'json' +end + +require 'test/unit' +begin + require 'byebug' +rescue LoadError +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper.rb new file mode 100644 index 0000000..c5ec0fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tests/test_helper.rb @@ -0,0 +1,17 @@ +case ENV['JSON'] +when 'pure' + $:.unshift 'lib' + require 'json/pure' +when 'ext' + $:.unshift 'ext', 'lib' + require 'json/ext' +else + $:.unshift 'ext', 'lib' + require 'json' +end + +require 'test/unit' +begin + require 'byebug' +rescue LoadError +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff 2.sh b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff 2.sh new file mode 100755 index 0000000..89385b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff 2.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +files=`find ext -name '*.[ch]' -o -name parser.rl` + +for f in $files +do + b=`basename $f` + g=`find ../ruby/ext/json -name $b` + d=`diff -u $f $g` + test -z "$d" && continue + echo "$d" + read -p "Edit diff of $b? " a + case $a in + [yY]*) + vimdiff $f $g + ;; + esac +done diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff.sh b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff.sh new file mode 100755 index 0000000..89385b0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/diff.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +files=`find ext -name '*.[ch]' -o -name parser.rl` + +for f in $files +do + b=`basename $f` + g=`find ../ruby/ext/json -name $b` + d=`diff -u $f $g` + test -z "$d" && continue + echo "$d" + read -p "Edit diff of $b? " a + case $a in + [yY]*) + vimdiff $f $g + ;; + esac +done diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz 2.rb new file mode 100755 index 0000000..8261930 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz 2.rb @@ -0,0 +1,131 @@ +require 'json' + +class Fuzzer + def initialize(n, freqs = {}) + sum = freqs.inject(0.0) { |s, x| s + x.last } + freqs.each_key { |x| freqs[x] /= sum } + s = 0.0 + freqs.each_key do |x| + freqs[x] = s .. (s + t = freqs[x]) + s += t + end + @freqs = freqs + @n = n + @alpha = (0..0xff).to_a + end + + def random_string + s = '' + 30.times { s << @alpha[rand(@alpha.size)] } + s + end + + def pick + r = rand + found = @freqs.find { |k, f| f.include? rand } + found && found.first + end + + def make_pick + k = pick + case + when k == Hash, k == Array + k.new + when k == true, k == false, k == nil + k + when k == String + random_string + when k == Fixnum + rand(2 ** 30) - 2 ** 29 + when k == Bignum + rand(2 ** 70) - 2 ** 69 + end + end + + def fuzz(current = nil) + if @n > 0 + case current + when nil + @n -= 1 + current = fuzz [ Hash, Array ][rand(2)].new + when Array + while @n > 0 + @n -= 1 + current << case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + when Hash + while @n > 0 + @n -= 1 + current[random_string] = case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + end + end + current + end +end + +class MyState < JSON.state + WS = " \r\t\n" + + def initialize + super( + :indent => make_spaces, + :space => make_spaces, + :space_before => make_spaces, + :object_nl => make_spaces, + :array_nl => make_spaces, + :max_nesting => false + ) + end + + def make_spaces + s = '' + rand(1).times { s << WS[rand(WS.size)] } + s + end +end + +n = (ARGV.shift || 500).to_i +loop do + fuzzer = Fuzzer.new(n, + Hash => 25, + Array => 25, + String => 10, + Fixnum => 10, + Bignum => 10, + nil => 5, + true => 5, + false => 5 + ) + o1 = fuzzer.fuzz + json = JSON.generate o1, MyState.new + if $DEBUG + puts "-" * 80 + puts json, json.size + else + puts json.size + end + begin + o2 = JSON.parse(json, :max_nesting => false) + rescue JSON::ParserError => e + puts "Caught #{e.class}: #{e.message}\n#{e.backtrace * "\n"}" + puts "o1 = #{o1.inspect}", "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + exit + end + if o1 != o2 + puts "mismatch", "o1 = #{o1.inspect}", "o2 = #{o2.inspect}", + "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz.rb new file mode 100755 index 0000000..8261930 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/fuzz.rb @@ -0,0 +1,131 @@ +require 'json' + +class Fuzzer + def initialize(n, freqs = {}) + sum = freqs.inject(0.0) { |s, x| s + x.last } + freqs.each_key { |x| freqs[x] /= sum } + s = 0.0 + freqs.each_key do |x| + freqs[x] = s .. (s + t = freqs[x]) + s += t + end + @freqs = freqs + @n = n + @alpha = (0..0xff).to_a + end + + def random_string + s = '' + 30.times { s << @alpha[rand(@alpha.size)] } + s + end + + def pick + r = rand + found = @freqs.find { |k, f| f.include? rand } + found && found.first + end + + def make_pick + k = pick + case + when k == Hash, k == Array + k.new + when k == true, k == false, k == nil + k + when k == String + random_string + when k == Fixnum + rand(2 ** 30) - 2 ** 29 + when k == Bignum + rand(2 ** 70) - 2 ** 69 + end + end + + def fuzz(current = nil) + if @n > 0 + case current + when nil + @n -= 1 + current = fuzz [ Hash, Array ][rand(2)].new + when Array + while @n > 0 + @n -= 1 + current << case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + when Hash + while @n > 0 + @n -= 1 + current[random_string] = case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + end + end + current + end +end + +class MyState < JSON.state + WS = " \r\t\n" + + def initialize + super( + :indent => make_spaces, + :space => make_spaces, + :space_before => make_spaces, + :object_nl => make_spaces, + :array_nl => make_spaces, + :max_nesting => false + ) + end + + def make_spaces + s = '' + rand(1).times { s << WS[rand(WS.size)] } + s + end +end + +n = (ARGV.shift || 500).to_i +loop do + fuzzer = Fuzzer.new(n, + Hash => 25, + Array => 25, + String => 10, + Fixnum => 10, + Bignum => 10, + nil => 5, + true => 5, + false => 5 + ) + o1 = fuzzer.fuzz + json = JSON.generate o1, MyState.new + if $DEBUG + puts "-" * 80 + puts json, json.size + else + puts json.size + end + begin + o2 = JSON.parse(json, :max_nesting => false) + rescue JSON::ParserError => e + puts "Caught #{e.class}: #{e.message}\n#{e.backtrace * "\n"}" + puts "o1 = #{o1.inspect}", "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + exit + end + if o1 != o2 + puts "mismatch", "o1 = #{o1.inspect}", "o2 = #{o2.inspect}", + "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server 2.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server 2.rb new file mode 100755 index 0000000..184a01c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server 2.rb @@ -0,0 +1,62 @@ +#!/usr/bin/env ruby +# encoding: utf-8 + +require 'webrick' +include WEBrick +$:.unshift 'ext' +$:.unshift 'lib' +require 'json' + +class JSONServlet < HTTPServlet::AbstractServlet + @@count = 1 + + def do_GET(req, res) + obj = { + "TIME" => Time.now.strftime("%FT%T"), + "foo" => "Bär", + "bar" => "© ≠ €!", + 'a' => 2, + 'b' => 3.141, + 'COUNT' => @@count += 1, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "松本行弘", + 'h' => 1000.0, + 'i' => 0.001, + 'j' => "\xf0\xa0\x80\x81", + } + res.body = JSON.generate obj + res['Content-Type'] = "application/json" + end +end + +def create_server(err, dir, port) + dir = File.expand_path(dir) + err.puts "Surf to:", "http://#{Socket.gethostname}:#{port}" + + s = HTTPServer.new( + :Port => port, + :DocumentRoot => dir, + :Logger => WEBrick::Log.new(err), + :AccessLog => [ + [ err, WEBrick::AccessLog::COMMON_LOG_FORMAT ], + [ err, WEBrick::AccessLog::REFERER_LOG_FORMAT ], + [ err, WEBrick::AccessLog::AGENT_LOG_FORMAT ] + ] + ) + s.mount("/json", JSONServlet) + s +end + +default_dir = File.expand_path(File.join(File.dirname(__FILE__), '..', 'data')) +dir = ARGV.shift || default_dir +port = (ARGV.shift || 6666).to_i +s = create_server(STDERR, dir, 6666) +t = Thread.new { s.start } +trap(:INT) do + s.shutdown + t.join + exit +end +sleep diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server.rb new file mode 100755 index 0000000..184a01c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.3.1/tools/server.rb @@ -0,0 +1,62 @@ +#!/usr/bin/env ruby +# encoding: utf-8 + +require 'webrick' +include WEBrick +$:.unshift 'ext' +$:.unshift 'lib' +require 'json' + +class JSONServlet < HTTPServlet::AbstractServlet + @@count = 1 + + def do_GET(req, res) + obj = { + "TIME" => Time.now.strftime("%FT%T"), + "foo" => "Bär", + "bar" => "© ≠ €!", + 'a' => 2, + 'b' => 3.141, + 'COUNT' => @@count += 1, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "松本行弘", + 'h' => 1000.0, + 'i' => 0.001, + 'j' => "\xf0\xa0\x80\x81", + } + res.body = JSON.generate obj + res['Content-Type'] = "application/json" + end +end + +def create_server(err, dir, port) + dir = File.expand_path(dir) + err.puts "Surf to:", "http://#{Socket.gethostname}:#{port}" + + s = HTTPServer.new( + :Port => port, + :DocumentRoot => dir, + :Logger => WEBrick::Log.new(err), + :AccessLog => [ + [ err, WEBrick::AccessLog::COMMON_LOG_FORMAT ], + [ err, WEBrick::AccessLog::REFERER_LOG_FORMAT ], + [ err, WEBrick::AccessLog::AGENT_LOG_FORMAT ] + ] + ) + s.mount("/json", JSONServlet) + s +end + +default_dir = File.expand_path(File.join(File.dirname(__FILE__), '..', 'data')) +dir = ARGV.shift || default_dir +port = (ARGV.shift || 6666).to_i +s = create_server(STDERR, dir, 6666) +t = Thread.new { s.start } +trap(:INT) do + s.shutdown + t.join + exit +end +sleep diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/CHANGES.md b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/CHANGES.md new file mode 100644 index 0000000..ba66514 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/CHANGES.md @@ -0,0 +1,457 @@ +# Changes + +### 2021-10-24 (2.6.1) + +* Restore version.rb with 2.6.1 + +### 2021-10-14 (2.6.0) + +* Use `rb_enc_interned_str` if available to reduce allocations in `freeze: true` mode. #451. +* Bump required_ruby_version to 2.3. +* Fix compatibility with `GC.compact`. +* Fix some compilation warnings. #469 + +## 2020-12-22 (2.5.1) + +* Restore the compatibility for constants of JSON class. + +## 2020-12-22 (2.5.0) + +* Ready to Ractor-safe at Ruby 3.0. + +## 2020-12-17 (2.4.1) + +* Restore version.rb with 2.4.1 + +## 2020-12-15 (2.4.0) + +* Implement a freeze: parser option #447 +* Fix an issue with generate_pretty and empty objects in the Ruby and Java implementations #449 +* Fix JSON.load_file doc #448 +* Fix pure parser with unclosed arrays / objects #425 +* bundle the LICENSE file in the gem #444 +* Add an option to escape forward slash character #405 +* RDoc for JSON #439 #446 #442 #434 #433 #430 + +## 2020-06-30 (2.3.1) + +* Spelling and grammar fixes for comments. Pull request #191 by Josh + Kline. +* Enhance generic JSON and #generate docs. Pull request #347 by Victor + Shepelev. +* Add :nodoc: for GeneratorMethods. Pull request #349 by Victor Shepelev. +* Baseline changes to help (JRuby) development. Pull request #371 by Karol + Bucek. +* Add metadata for rubygems.org. Pull request #379 by Alexandre ZANNI. +* Remove invalid JSON.generate description from JSON module rdoc. Pull + request #384 by Jeremy Evans. +* Test with TruffleRuby in CI. Pull request #402 by Benoit Daloze. +* Rdoc enhancements. Pull request #413 by Burdette Lamar. +* Fixtures/ are not being tested... Pull request #416 by Marc-André + Lafortune. +* Use frozen string for hash key. Pull request #420 by Marc-André + Lafortune. +* Added :call-seq: to RDoc for some methods. Pull request #422 by Burdette + Lamar. +* Small typo fix. Pull request #423 by Marc-André Lafortune. + +## 2019-12-11 (2.3.0) + * Fix default of `create_additions` to always be `false` for `JSON(user_input)` + and `JSON.parse(user_input, nil)`. + Note that `JSON.load` remains with default `true` and is meant for internal + serialization of trusted data. [CVE-2020-10663] + * Fix passing args all #to_json in json/add/*. + * Fix encoding issues + * Fix issues of keyword vs positional parameter + * Fix JSON::Parser against bigdecimal updates + * Bug fixes to JRuby port + +## 2019-02-21 (2.2.0) + * Adds support for 2.6 BigDecimal and ruby standard library Set datetype. + +## 2017-04-18 (2.1.0) + * Allow passing of `decimal_class` option to specify a class as which to parse + JSON float numbers. +## 2017-03-23 (2.0.4) + * Raise exception for incomplete unicode surrogates/character escape + sequences. This problem was reported by Daniel Gollahon (dgollahon). + * Fix arbitrary heap exposure problem. This problem was reported by Ahmad + Sherif (ahmadsherif). + +## 2017-01-12 (2.0.3) + * Set `required_ruby_version` to 1.9 + * Some small fixes + +## 2016-07-26 (2.0.2) + * Specify `required_ruby_version` for json\_pure. + * Fix issue #295 failure when parsing frozen strings. + +## 2016-07-01 (2.0.1) + * Fix problem when requiring json\_pure and Parser constant was defined top + level. + * Add `RB_GC_GUARD` to avoid possible GC problem via Pete Johns. + * Store `current_nesting` on stack by Aaron Patterson. + +## 2015-09-11 (2.0.0) + * Now complies to newest JSON RFC 7159. + * Implements compatibility to ruby 2.4 integer unification. + * Drops support for old rubies whose life has ended, that is rubies < 2.0. + Also see https://www.ruby-lang.org/en/news/2014/07/01/eol-for-1-8-7-and-1-9-2/ + * There were still some mentions of dual GPL licensing in the source, but JSON + has just the Ruby license that itself includes an explicit dual-licensing + clause that allows covered software to be distributed under the terms of + the Simplified BSD License instead for all ruby versions >= 1.9.3. This is + however a GPL compatible license according to the Free Software Foundation. + I changed these mentions to be consistent with the Ruby license setting in + the gemspec files which were already correct now. + +## 2015-06-01 (1.8.3) + * Fix potential memory leak, thx to nobu. + +## 2015-01-08 (1.8.2) + * Some performance improvements by Vipul A M . + * Fix by Jason R. Clark to avoid mutation of + `JSON.dump_default_options`. + * More tests by Michael Mac-Vicar and fixing + `space_before` accessor in generator. + * Performance on Jruby improved by Ben Browning . + * Some fixes to be compatible with the new Ruby 2.2 by Zachary Scott + and SHIBATA Hiroshi . + +## 2013-05-13 (1.8.1) + * Remove Rubinius exception since transcoding should be working now. + +## 2013-05-13 (1.8.0) + * Fix https://github.com/flori/json/issues/162 reported by Marc-Andre + Lafortune . Thanks! + * Applied patches by Yui NARUSE to suppress warning with + -Wchar-subscripts and better validate UTF-8 strings. + * Applied patch by ginriki@github to remove unnecessary if. + * Add load/dump interface to `JSON::GenericObject` to make + serialize :some_attribute, `JSON::GenericObject` + work in Rails active models for convenient `SomeModel#some_attribute.foo.bar` + access to serialised JSON data. + +## 2013-02-04 (1.7.7) + * Security fix for JSON create_additions default value and + `JSON::GenericObject`. It should not be possible to create additions unless + explicitly requested by setting the create_additions argument to true or + using the JSON.load/dump interface. If `JSON::GenericObject` is supposed to + be automatically deserialised, this has to be explicitly enabled by + setting + JSON::GenericObject.json_creatable = true + as well. + * Remove useless assert in fbuffer implementation. + * Apply patch attached to https://github.com/flori/json/issues#issue/155 + provided by John Shahid , Thx! + * Add license information to rubygems spec data, reported by Jordi Massaguer Pla . + * Improve documentation, thx to Zachary Scott . + +## 2012-11-29 (1.7.6) + * Add `GeneratorState#merge` alias for JRuby, fix state accessor methods. Thx to + jvshahid@github. + * Increase hash likeness of state objects. + +## 2012-08-17 (1.7.5) + * Fix compilation of extension on older rubies. + +## 2012-07-26 (1.7.4) + * Fix compilation problem on AIX, see https://github.com/flori/json/issues/142 + +## 2012-05-12 (1.7.3) + * Work around Rubinius encoding issues using iconv for conversion instead. + +## 2012-05-11 (1.7.2) + * Fix some encoding issues, that cause problems for the pure and the + extension variant in jruby 1.9 mode. + +## 2012-04-28 (1.7.1) + * Some small fixes for building + +## 2012-04-28 (1.7.0) + * Add `JSON::GenericObject` for method access to objects transmitted via JSON. + +## 2012-04-27 (1.6.7) + * Fix possible crash when trying to parse nil value. + +## 2012-02-11 (1.6.6) + * Propagate src encoding to values made from it (fixes 1.9 mode converting + everything to ascii-8bit; harmless for 1.8 mode too) (Thomas E. Enebo + ), should fix + https://github.com/flori/json/issues#issue/119. + * Fix https://github.com/flori/json/issues#issue/124 Thx to Jason Hutchens. + * Fix https://github.com/flori/json/issues#issue/117 + +## 2012-01-15 (1.6.5) + * Vit Ondruch reported a bug that shows up when using + optimisation under GCC 4.7. Thx to him, Bohuslav Kabrda + and Yui NARUSE for debugging and + developing a patch fix. + +## 2011-12-24 (1.6.4) + * Patches that improve speed on JRuby contributed by Charles Oliver Nutter + . + * Support `object_class`/`array_class` with duck typed hash/array. + +## 2011-12-01 (1.6.3) + * Let `JSON.load('')` return nil as well to make mysql text columns (default to + `''`) work better for serialization. + +## 2011-11-21 (1.6.2) + * Add support for OpenStruct and BigDecimal. + * Fix bug when parsing nil in `quirks_mode`. + * Make JSON.dump and JSON.load methods better cooperate with Rails' serialize + method. Just use: + serialize :value, JSON + * Fix bug with time serialization concerning nanoseconds. Thanks for the + patch go to Josh Partlow (jpartlow@github). + * Improve parsing speed for JSON numbers (integers and floats) in a similar way to + what Evan Phoenix suggested in: + https://github.com/flori/json/pull/103 + +## 2011-09-18 (1.6.1) + * Using -target 1.5 to force Java bits to compile with 1.5. + +## 2011-09-12 (1.6.0) + * Extract utilities (prettifier and GUI-editor) in its own gem json-utils. + * Split json/add/core into different files for classes to be serialised. + +## 2011-08-31 (1.5.4) + * Fix memory leak when used from multiple JRuby. (Patch by + jfirebaugh@github). + * Apply patch by Eric Wong that fixes garbage collection problem + reported in https://github.com/flori/json/issues/46. + * Add :quirks_mode option to parser and generator. + * Add support for Rational and Complex number additions via json/add/complex + and json/add/rational requires. + +## 2011-06-20 (1.5.3) + * Alias State#configure method as State#merge to increase duck type synonymy with Hash. + * Add `as_json` methods in json/add/core, so rails can create its json objects + the new way. + +## 2011-05-11 (1.5.2) + * Apply documentation patch by Cory Monty . + * Add gemspecs for json and json\_pure. + * Fix bug in jruby pretty printing. + * Fix bug in `object_class` and `array_class` when inheriting from Hash or + Array. + +## 2011-01-24 (1.5.1) + * Made rake-compiler build a fat binary gem. This should fix issue + https://github.com/flori/json/issues#issue/54. + +## 2011-01-22 (1.5.0) + * Included Java source codes for the Jruby extension made by Daniel Luz + . + * Output full exception message of `deep_const_get` to aid debugging. + * Fixed an issue with ruby 1.9 `Module#const_defined?` method, that was + reported by Riley Goodside. + +## 2010-08-09 (1.4.6) + * Fixed oversight reported in http://github.com/flori/json/issues/closed#issue/23, + always create a new object from the state prototype. + * Made pure and ext api more similar again. + +## 2010-08-07 (1.4.5) + * Manage data structure nesting depth in state object during generation. This + should reduce problems with `to_json` method definіtions that only have one + argument. + * Some fixes in the state objects and additional tests. +## 2010-08-06 (1.4.4) + * Fixes build problem for rubinius under OS X, http://github.com/flori/json/issues/closed#issue/25 + * Fixes crashes described in http://github.com/flori/json/issues/closed#issue/21 and + http://github.com/flori/json/issues/closed#issue/23 +## 2010-05-05 (1.4.3) + * Fixed some test assertions, from Ruby r27587 and r27590, patch by nobu. + * Fixed issue http://github.com/flori/json/issues/#issue/20 reported by + electronicwhisper@github. Thx! + +## 2010-04-26 (1.4.2) + * Applied patch from naruse Yui NARUSE to make building with + Microsoft Visual C possible again. + * Applied patch from devrandom in order to allow building of + json_pure if extensiontask is not present. + * Thanks to Dustin Schneider , who reported a memory + leak, which is fixed in this release. + * Applied 993f261ccb8f911d2ae57e9db48ec7acd0187283 patch from josh@github. + +## 2010-04-25 (1.4.1) + * Fix for a bug reported by Dan DeLeo , caused by T_FIXNUM + being different on 32bit/64bit architectures. + +## 2010-04-23 (1.4.0) + * Major speed improvements and building with simplified + directory/file-structure. + * Extension should at least be compatible with MRI, YARV and Rubinius. + +## 2010-04-07 (1.2.4) + * Triger const_missing callback to make Rails' dynamic class loading work. + +## 2010-03-11 (1.2.3) + * Added a `State#[]` method which returns an attribute's value in order to + increase duck type compatibility to Hash. + +## 2010-02-27 (1.2.2) + * Made some changes to make the building of the parser/generator compatible + to Rubinius. + +## 2009-11-25 (1.2.1) + * Added `:symbolize_names` option to Parser, which returns symbols instead of + strings in object names/keys. + +## 2009-10-01 (1.2.0) + * `fast_generate` now raises an exception for nan and infinite floats. + * On Ruby 1.8 json supports parsing of UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + and UTF-32LE JSON documents now. Under Ruby 1.9 the M17n conversion + functions are used to convert from all supported encodings. ASCII-8BIT + encoded strings are handled like all strings under Ruby 1.8 were. + * Better documentation + +## 2009-08-23 (1.1.9) + * Added forgotten main doc file `extra_rdoc_files`. + +## 2009-08-23 (1.1.8) + * Applied a patch by OZAWA Sakuro to make json/pure + work in environments that don't provide iconv. + * Applied patch by okkez_ in order to fix Ruby Bug #1768: + http://redmine.ruby-lang.org/issues/show/1768. + * Finally got around to avoid the rather paranoid escaping of ?/ characters + in the generator's output. The parsers aren't affected by this change. + Thanks to Rich Apodaca for the suggestion. + +## 2009-06-29 (1.1.7) + * Security Fix for JSON::Pure::Parser. A specially designed string could + cause catastrophic backtracking in one of the parser's regular expressions + in earlier 1.1.x versions. JSON::Ext::Parser isn't affected by this issue. + Thanks to Bartosz Blimke for reporting this + problem. + * This release also uses a less strict ruby version requirement for the + creation of the mswin32 native gem. + +## 2009-05-10 (1.1.6) + * No changes. І tested native linux gems in the last release and they don't + play well with different ruby versions other than the one the gem was built + with. This release is just to bump the version number in order to skip the + native gem on rubyforge. + +## 2009-05-10 (1.1.5) + * Started to build gems with rake-compiler gem. + * Applied patch object/array class patch from Brian Candler + and fixes. + +## 2009-04-01 (1.1.4) + * Fixed a bug in the creation of serialized generic rails objects reported by + Friedrich Graeter . + * Deleted tests/runner.rb, we're using testrb instead. + * Editor supports Infinity in numbers now. + * Made some changes in order to get the library to compile/run under Ruby + 1.9. + * Improved speed of the code path for the fast_generate method in the pure + variant. + +## 2008-07-10 (1.1.3) + * Wesley Beary reported a bug in json/add/core's DateTime + handling: If the nominator and denominator of the offset were divisible by + each other Ruby's Rational#to_s returns them as an integer not a fraction + with '/'. This caused a ZeroDivisionError during parsing. + * Use Date#start and DateTime#start instead of sg method, while + remaining backwards compatible. + * Supports ragel >= 6.0 now. + * Corrected some tests. + * Some minor changes. + +## 2007-11-27 (1.1.2) + * Remember default dir (last used directory) in editor. + * JSON::Editor.edit method added, the editor can now receive json texts from + the clipboard via C-v. + * Load json texts from an URL pasted via middle button press. + * Added :create_additions option to Parser. This makes it possible to disable + the creation of additions by force, in order to treat json texts as data + while having additions loaded. + * Jacob Maine reported, that JSON(:foo) outputs a JSON + object if the rails addition is enabled, which is wrong. It now outputs a + JSON string "foo" instead, like suggested by Jacob Maine. + * Discovered a bug in the Ruby Bugs Tracker on rubyforge, that was reported + by John Evans lgastako@gmail.com. He could produce a crash in the JSON + generator by returning something other than a String instance from a + to_json method. I now guard against this by doing a rather crude type + check, which raises an exception instead of crashing. + +## 2007-07-06 (1.1.1) + * Yui NARUSE sent some patches to fix tests for Ruby + 1.9. I applied them and adapted some of them a bit to run both on 1.8 and + 1.9. + * Introduced a `JSON.parse!` method without depth checking for people who + like danger. + * Made generate and `pretty_generate` methods configurable by an options hash. + * Added :allow_nan option to parser and generator in order to handle NaN, + Infinity, and -Infinity correctly - if requested. Floats, which aren't numbers, + aren't valid JSON according to RFC4627, so by default an exception will be + raised if any of these symbols are encountered. Thanks to Andrea Censi + for his hint about this. + * Fixed some more tests for Ruby 1.9. + * Implemented dump/load interface of Marshal as suggested in ruby-core:11405 + by murphy . + * Implemented the `max_nesting` feature for generate methods, too. + * Added some implementations for ruby core's custom objects for + serialisation/deserialisation purposes. + +## 2007-05-21 (1.1.0) + * Implemented max_nesting feature for parser to avoid stack overflows for + data from untrusted sources. If you trust the source, you can disable it + with the option max_nesting => false. + * Piers Cawley reported a bug, that not every + character can be escaped by `\` as required by RFC4627. There's a + contradiction between David Crockford's JSON checker test vectors (in + tests/fixtures) and RFC4627, though. I decided to stick to the RFC, because + the JSON checker seems to be a bit older than the RFC. + * Extended license to Ruby License, which includes the GPL. + * Added keyboard shortcuts, and 'Open location' menu item to edit_json.rb. + +## 2007-05-09 (1.0.4) + * Applied a patch from Yui NARUSE to make JSON compile + under Ruby 1.9. Thank you very much for mailing it to me! + * Made binary variants of JSON fail early, instead of falling back to the + pure version. This should avoid overshadowing of eventual problems while + loading of the binary. + +## 2007-03-24 (1.0.3) + * Improved performance of pure variant a bit. + * The ext variant of this release supports the mswin32 platform. Ugh! + +## 2007-03-24 (1.0.2) + * Ext Parser didn't parse 0e0 correctly into 0.0: Fixed! + +## 2007-03-24 (1.0.1) + * Forgot some object files in the build dir. I really like that - not! + +## 2007-03-24 (1.0.0) + * Added C implementations for the JSON generator and a ragel based JSON + parser in C. + * Much more tests, especially fixtures from json.org. + * Further improved conformance to RFC4627. + +## 2007-02-09 (0.4.3) + * Conform more to RFC4627 for JSON: This means JSON strings + now always must contain exactly one object `"{ ... }"` or array `"[ ... ]"` in + order to be parsed without raising an exception. The definition of what + constitutes a whitespace is narrower in JSON than in Ruby ([ \t\r\n]), and + there are differences in floats and integers (no octals or hexadecimals) as + well. + * Added aliases generate and `pretty_generate` of unparse and `pretty_unparse`. + * Fixed a test case. + * Catch an `Iconv::InvalidEncoding` exception, that seems to occur on some Sun + boxes with SunOS 5.8, if iconv doesn't support utf16 conversions. This was + reported by Andrew R Jackson , thanks a bunch! + +## 2006-08-25 (0.4.2) + * Fixed a bug in handling solidi (/-characters), that was reported by + Kevin Gilpin . + +## 2006-02-06 (0.4.1) + * Fixed a bug related to escaping with backslashes. Thanks for the report go + to Florian Munz . + +## 2005-09-23 (0.4.0) + * Initial Rubyforge Version diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/LICENSE b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/LICENSE new file mode 100644 index 0000000..426810a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/LICENSE @@ -0,0 +1,56 @@ +Ruby is copyrighted free software by Yukihiro Matsumoto . +You can redistribute it and/or modify it under either the terms of the +2-clause BSDL (see the file BSDL), or the conditions below: + + 1. You may make and give away verbatim copies of the source form of the + software without restriction, provided that you duplicate all of the + original copyright notices and associated disclaimers. + + 2. You may modify your copy of the software in any way, provided that + you do at least ONE of the following: + + a) place your modifications in the Public Domain or otherwise + make them Freely Available, such as by posting said + modifications to Usenet or an equivalent medium, or by allowing + the author to include your modifications in the software. + + b) use the modified software only within your corporation or + organization. + + c) give non-standard binaries non-standard names, with + instructions on where to get the original software distribution. + + d) make other distribution arrangements with the author. + + 3. You may distribute the software in object code or binary form, + provided that you do at least ONE of the following: + + a) distribute the binaries and library files of the software, + together with instructions (in the manual page or equivalent) + on where to get the original distribution. + + b) accompany the distribution with the machine-readable source of + the software. + + c) give non-standard binaries non-standard names, with + instructions on where to get the original software distribution. + + d) make other distribution arrangements with the author. + + 4. You may modify and include the part of the software into any other + software (possibly commercial). But some files in the distribution + are not written by the author, so that they are not under these terms. + + For the list of those files and their copying conditions, see the + file LEGAL. + + 5. The scripts and library files supplied as input to or produced as + output from the software do not automatically fall under the + copyright of the software, but belong to whomever generated them, + and may be sold commercially, and may be aggregated with this + software. + + 6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/README.md b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/README.md new file mode 100644 index 0000000..288ccdf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/README.md @@ -0,0 +1,425 @@ +# JSON implementation for Ruby + +[![CI](https://github.com/flori/json/actions/workflows/ci.yml/badge.svg)](https://github.com/flori/json/actions/workflows/ci.yml) + +## Description + +This is a implementation of the JSON specification according to RFC 7159 +http://www.ietf.org/rfc/rfc7159.txt . Starting from version 1.0.0 on there +will be two variants available: + +* A pure ruby variant, that relies on the iconv and the stringscan + extensions, which are both part of the ruby standard library. +* The quite a bit faster native extension variant, which is in parts + implemented in C or Java and comes with its own unicode conversion + functions and a parser generated by the ragel state machine compiler + http://www.complang.org/ragel/ . + +Both variants of the JSON generator generate UTF-8 character sequences by +default. If an :ascii\_only option with a true value is given, they escape all +non-ASCII and control characters with \uXXXX escape sequences, and support +UTF-16 surrogate pairs in order to be able to generate the whole range of +unicode code points. + +All strings, that are to be encoded as JSON strings, should be UTF-8 byte +sequences on the Ruby side. To encode raw binary strings, that aren't UTF-8 +encoded, please use the to\_json\_raw\_object method of String (which produces +an object, that contains a byte array) and decode the result on the receiving +endpoint. + +## Installation + +It's recommended to use the extension variant of JSON, because it's faster than +the pure ruby variant. If you cannot build it on your system, you can settle +for the latter. + +Just type into the command line as root: + +``` +# rake install +``` + +The above command will build the extensions and install them on your system. + +``` +# rake install_pure +``` + +or + +``` +# ruby install.rb +``` + +will just install the pure ruby implementation of JSON. + +If you use Rubygems you can type + +``` +# gem install json +``` + +instead, to install the newest JSON version. + +There is also a pure ruby json only variant of the gem, that can be installed +with: + +``` +# gem install json_pure +``` + +## Compiling the extensions yourself + +If you want to create the `parser.c` file from its `parser.rl` file or draw nice +graphviz images of the state machines, you need ragel from: +http://www.complang.org/ragel/ + +## Usage + +To use JSON you can + +```ruby +require 'json' +``` + +to load the installed variant (either the extension `'json'` or the pure +variant `'json_pure'`). If you have installed the extension variant, you can +pick either the extension variant or the pure variant by typing + +```ruby +require 'json/ext' +``` + +or + +```ruby +require 'json/pure' +``` + +Now you can parse a JSON document into a ruby data structure by calling + +```ruby +JSON.parse(document) +``` + +If you want to generate a JSON document from a ruby data structure call +```ruby +JSON.generate(data) +``` + +You can also use the `pretty_generate` method (which formats the output more +verbosely and nicely) or `fast_generate` (which doesn't do any of the security +checks generate performs, e. g. nesting deepness checks). + +There are also the JSON and JSON[] methods which use parse on a String or +generate a JSON document from an array or hash: + +```ruby +document = JSON 'test' => 23 # => "{\"test\":23}" +document = JSON['test' => 23] # => "{\"test\":23}" +``` + +and + +```ruby +data = JSON '{"test":23}' # => {"test"=>23} +data = JSON['{"test":23}'] # => {"test"=>23} +``` + +You can choose to load a set of common additions to ruby core's objects if +you + +```ruby +require 'json/add/core' +``` + +After requiring this you can, e. g., serialise/deserialise Ruby ranges: + +```ruby +JSON JSON(1..10) # => 1..10 +``` + +To find out how to add JSON support to other or your own classes, read the +section "More Examples" below. + +To get the best compatibility to rails' JSON implementation, you can + +```ruby +require 'json/add/rails' +``` + +Both of the additions attempt to require `'json'` (like above) first, if it has +not been required yet. + +## Serializing exceptions + +The JSON module doesn't extend `Exception` by default. If you convert an `Exception` +object to JSON, it will by default only include the exception message. + +To include the full details, you must either load the `json/add/core` mentioned +above, or specifically load the exception addition: + +```ruby +require 'json/add/exception' +``` + +## More Examples + +To create a JSON document from a ruby data structure, you can call +`JSON.generate` like that: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,\"4..10\"]" +``` + +To get back a ruby data structure from a JSON document, you have to call +JSON.parse on it: + +```ruby +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, "4..10"] +``` + +Note, that the range from the original data structure is a simple +string now. The reason for this is, that JSON doesn't support ranges +or arbitrary classes. In this case the json library falls back to call +`Object#to_json`, which is the same as `#to_s.to_json`. + +It's possible to add JSON support serialization to arbitrary classes by +simply implementing a more specialized version of the `#to_json method`, that +should return a JSON object (a hash converted to JSON with `#to_json`) like +this (don't forget the `*a` for all the arguments): + +```ruby +class Range + def to_json(*a) + { + 'json_class' => self.class.name, # = 'Range' + 'data' => [ first, last, exclude_end? ] + }.to_json(*a) + end +end +``` + +The hash key `json_class` is the class, that will be asked to deserialise the +JSON representation later. In this case it's `Range`, but any namespace of +the form `A::B` or `::A::B` will do. All other keys are arbitrary and can be +used to store the necessary data to configure the object to be deserialised. + +If the key `json_class` is found in a JSON object, the JSON parser checks +if the given class responds to the `json_create` class method. If so, it is +called with the JSON object converted to a Ruby hash. So a range can +be deserialised by implementing `Range.json_create` like this: + +```ruby +class Range + def self.json_create(o) + new(*o['data']) + end +end +``` + +Now it possible to serialise/deserialise ranges as well: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json, :create_additions => true +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +``` + +`JSON.generate` always creates the shortest possible string representation of a +ruby data structure in one line. This is good for data storage or network +protocols, but not so good for humans to read. Fortunately there's also +`JSON.pretty_generate` (or `JSON.pretty_generate`) that creates a more readable +output: + +```ruby + puts JSON.pretty_generate([1, 2, {"a"=>3.141}, false, true, nil, 4..10]) + [ + 1, + 2, + { + "a": 3.141 + }, + false, + true, + null, + { + "json_class": "Range", + "data": [ + 4, + 10, + false + ] + } + ] +``` + +There are also the methods `Kernel#j` for generate, and `Kernel#jj` for +`pretty_generate` output to the console, that work analogous to Core Ruby's `p` and +the `pp` library's `pp` methods. + +The script `tools/server.rb` contains a small example if you want to test, how +receiving a JSON object from a webrick server in your browser with the +JavaScript prototype library http://www.prototypejs.org works. + +## Speed Comparisons + +I have created some benchmark results (see the benchmarks/data-p4-3Ghz +subdir of the package) for the JSON-parser to estimate the speed up in the C +extension: + +``` + Comparing times (call_time_mean): + 1 ParserBenchmarkExt#parser 900 repeats: + 553.922304770 ( real) -> 21.500x + 0.001805307 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 224.513358139 ( real) -> 8.714x + 0.004454078 + 3 ParserBenchmarkPure#parser 1000 repeats: + 26.755020642 ( real) -> 1.038x + 0.037376163 + 4 ParserBenchmarkRails#parser 1000 repeats: + 25.763381731 ( real) -> 1.000x + 0.038814780 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1 is `JSON::Ext::Parser`, 2 is `YAML.load` with YAML +compatible JSON document, 3 is is `JSON::Pure::Parser`, and 4 is +`ActiveSupport::JSON.decode`. The ActiveSupport JSON-decoder converts the +input first to YAML and then uses the YAML-parser, the conversion seems to +slow it down so much that it is only as fast as the `JSON::Pure::Parser`! + +If you look at the benchmark data you can see that this is mostly caused by +the frequent high outliers - the median of the Rails-parser runs is still +overall smaller than the median of the `JSON::Pure::Parser` runs: + +``` + Comparing times (call_time_median): + 1 ParserBenchmarkExt#parser 900 repeats: + 800.592479481 ( real) -> 26.936x + 0.001249075 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 271.002390644 ( real) -> 9.118x + 0.003690004 + 3 ParserBenchmarkRails#parser 1000 repeats: + 30.227910865 ( real) -> 1.017x + 0.033082008 + 4 ParserBenchmarkPure#parser 1000 repeats: + 29.722384421 ( real) -> 1.000x + 0.033644676 + calls/sec ( time) -> speed covers + secs/call +``` + +I have benchmarked the `JSON-Generator` as well. This generated a few more +values, because there are different modes that also influence the achieved +speed: + +``` + Comparing times (call_time_mean): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 547.354332608 ( real) -> 15.090x + 0.001826970 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 443.968212317 ( real) -> 12.240x + 0.002252414 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 375.104545883 ( real) -> 10.341x + 0.002665923 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 49.978706968 ( real) -> 1.378x + 0.020008521 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 38.531868759 ( real) -> 1.062x + 0.025952543 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 36.927649925 ( real) -> 1.018x 7 (>=3859) + 0.027079979 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 36.272134441 ( real) -> 1.000x 6 (>=3859) + 0.027569373 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1-3 are `JSON::Ext::Generator` methods. 4, 6, and 7 are +`JSON::Pure::Generator` methods and 5 is the Rails JSON generator. It is now a +bit faster than the `generator_safe` and `generator_pretty` methods of the pure +variant but slower than the others. + +To achieve the fastest JSON document output, you can use the `fast_generate` +method. Beware, that this will disable the checking for circular Ruby data +structures, which may cause JSON to go into an infinite loop. + +Here are the median comparisons for completeness' sake: + +``` + Comparing times (call_time_median): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 708.258020939 ( real) -> 16.547x + 0.001411915 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 569.105020353 ( real) -> 13.296x + 0.001757145 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 482.825371244 ( real) -> 11.280x + 0.002071142 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 62.717626652 ( real) -> 1.465x + 0.015944481 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 43.965681162 ( real) -> 1.027x + 0.022745013 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 43.929073409 ( real) -> 1.026x 7 (>=3859) + 0.022763968 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 42.802514491 ( real) -> 1.000x 6 (>=3859) + 0.023363113 + calls/sec ( time) -> speed covers + secs/call +``` + +## Development + +### Release + +Update the json.gemspec and json-java.gemspec. + +``` +rbenv shell 2.6.5 +rake build +gem push pkg/json-2.3.0.gem + +rbenv shell jruby-9.2.9.0 +rake build +gem push pkg/json-2.3.0-java.gem +``` + +## Author + +Florian Frank + +## License + +Ruby License, see https://www.ruby-lang.org/en/about/license.txt. + +## Download + +The latest version of this library can be downloaded at + +* https://rubygems.org/gems/json + +Online Documentation should be located at + +* https://www.rubydoc.info/gems/json diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/VERSION b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/VERSION new file mode 100644 index 0000000..6a6a3d8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/VERSION @@ -0,0 +1 @@ +2.6.1 diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h new file mode 100644 index 0000000..dc8f406 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h @@ -0,0 +1,187 @@ + +#ifndef _FBUFFER_H_ +#define _FBUFFER_H_ + +#include "ruby.h" + +#ifndef RHASH_SIZE +#define RHASH_SIZE(hsh) (RHASH(hsh)->tbl->num_entries) +#endif + +#ifndef RFLOAT_VALUE +#define RFLOAT_VALUE(val) (RFLOAT(val)->value) +#endif + +#ifndef RARRAY_LEN +#define RARRAY_LEN(ARRAY) RARRAY(ARRAY)->len +#endif +#ifndef RSTRING_PTR +#define RSTRING_PTR(string) RSTRING(string)->ptr +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(string) RSTRING(string)->len +#endif + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +#ifdef HAVE_RUBY_ENCODING_H +#include "ruby/encoding.h" +#define FORCE_UTF8(obj) rb_enc_associate((obj), rb_utf8_encoding()) +#else +#define FORCE_UTF8(obj) +#endif + +/* We don't need to guard objects for rbx, so let's do nothing at all. */ +#ifndef RB_GC_GUARD +#define RB_GC_GUARD(object) +#endif + +typedef struct FBufferStruct { + unsigned long initial_length; + char *ptr; + unsigned long len; + unsigned long capa; +} FBuffer; + +#define FBUFFER_INITIAL_LENGTH_DEFAULT 1024 + +#define FBUFFER_PTR(fb) (fb->ptr) +#define FBUFFER_LEN(fb) (fb->len) +#define FBUFFER_CAPA(fb) (fb->capa) +#define FBUFFER_PAIR(fb) FBUFFER_PTR(fb), FBUFFER_LEN(fb) + +static FBuffer *fbuffer_alloc(unsigned long initial_length); +static void fbuffer_free(FBuffer *fb); +static void fbuffer_clear(FBuffer *fb); +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len); +#ifdef JSON_GENERATOR +static void fbuffer_append_long(FBuffer *fb, long number); +#endif +static void fbuffer_append_char(FBuffer *fb, char newchr); +#ifdef JSON_GENERATOR +static FBuffer *fbuffer_dup(FBuffer *fb); +static VALUE fbuffer_to_s(FBuffer *fb); +#endif + +static FBuffer *fbuffer_alloc(unsigned long initial_length) +{ + FBuffer *fb; + if (initial_length <= 0) initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + fb = ALLOC(FBuffer); + memset((void *) fb, 0, sizeof(FBuffer)); + fb->initial_length = initial_length; + return fb; +} + +static void fbuffer_free(FBuffer *fb) +{ + if (fb->ptr) ruby_xfree(fb->ptr); + ruby_xfree(fb); +} + +static void fbuffer_clear(FBuffer *fb) +{ + fb->len = 0; +} + +static void fbuffer_inc_capa(FBuffer *fb, unsigned long requested) +{ + unsigned long required; + + if (!fb->ptr) { + fb->ptr = ALLOC_N(char, fb->initial_length); + fb->capa = fb->initial_length; + } + + for (required = fb->capa; requested > required - fb->len; required <<= 1); + + if (required > fb->capa) { + REALLOC_N(fb->ptr, char, required); + fb->capa = required; + } +} + +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len) +{ + if (len > 0) { + fbuffer_inc_capa(fb, len); + MEMCPY(fb->ptr + fb->len, newstr, char, len); + fb->len += len; + } +} + +#ifdef JSON_GENERATOR +static void fbuffer_append_str(FBuffer *fb, VALUE str) +{ + const char *newstr = StringValuePtr(str); + unsigned long len = RSTRING_LEN(str); + + RB_GC_GUARD(str); + + fbuffer_append(fb, newstr, len); +} +#endif + +static void fbuffer_append_char(FBuffer *fb, char newchr) +{ + fbuffer_inc_capa(fb, 1); + *(fb->ptr + fb->len) = newchr; + fb->len++; +} + +#ifdef JSON_GENERATOR +static void freverse(char *start, char *end) +{ + char c; + + while (end > start) { + c = *end, *end-- = *start, *start++ = c; + } +} + +static long fltoa(long number, char *buf) +{ + static char digits[] = "0123456789"; + long sign = number; + char* tmp = buf; + + if (sign < 0) number = -number; + do *tmp++ = digits[number % 10]; while (number /= 10); + if (sign < 0) *tmp++ = '-'; + freverse(buf, tmp - 1); + return tmp - buf; +} + +static void fbuffer_append_long(FBuffer *fb, long number) +{ + char buf[20]; + unsigned long len = fltoa(number, buf); + fbuffer_append(fb, buf, len); +} + +static FBuffer *fbuffer_dup(FBuffer *fb) +{ + unsigned long len = fb->len; + FBuffer *result; + + result = fbuffer_alloc(len); + fbuffer_append(result, FBUFFER_PAIR(fb)); + return result; +} + +static VALUE fbuffer_to_s(FBuffer *fb) +{ + VALUE result = rb_str_new(FBUFFER_PTR(fb), FBUFFER_LEN(fb)); + fbuffer_free(fb); + FORCE_UTF8(result); + return result; +} +#endif +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/Makefile b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/Makefile new file mode 100644 index 0000000..c6e16b4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/Makefile @@ -0,0 +1,268 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0 +hdrdir = $(topdir) +arch_hdrdir = /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/include/ruby-2.6.0/universal-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20211106-75777-13nb42t +sitelibdir = $(DESTDIR)./.gem.20211106-75777-13nb42t +sitedir = $(DESTDIR)/Library/Ruby/Site +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(DESTDIR)/usr/share/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(DESTDIR)/usr/share/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(DESTDIR)/usr/include +includedir = $(DESTDIR)/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk$(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(DESTDIR)/Library/Ruby/Site +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = xcrun clang +CXX = xcrun clang++ +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = $(optflags) $(debugflags) $(warnflags) +optflags = +debugflags = -g +warnflags = +cppflags = +CCDLFLAGS = +CFLAGS = $(CCDLFLAGS) -g -Os -pipe -DHAVE_GCC_ATOMIC_BUILTINS -DUSE_FFI_CLOSURE_ALLOC $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -DJSON_GENERATOR -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) -g -Os -pipe $(ARCH_FLAG) +ldflags = -L. -L/System/Volumes/Data/SWE/macOS/BuildRoots/b8ff8433dc/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.0.Internal.sdk/usr/local/lib +dldflags = $(ARCH_FLAG) -undefined dynamic_lookup -multiply_defined suppress +ARCH_FLAG = +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = libtool -static +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.2.6 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = universal-darwin21 +sitearch = $(arch) +ruby_version = 2.6.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = mkdir -p +INSTALL = /usr/bin/install -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = /json/ext +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) +ORIG_SRCS = generator.c +SRCS = $(ORIG_SRCS) +OBJS = generator.o +HDRS = $(srcdir)/generator.h +LOCAL_HDRS = +TARGET = generator +TARGET_NAME = generator +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object json/ext/$(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +### +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/depend b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/depend new file mode 100644 index 0000000..1a042a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/depend @@ -0,0 +1 @@ +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb new file mode 100644 index 0000000..8627c5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb @@ -0,0 +1,4 @@ +require 'mkmf' + +$defs << "-DJSON_GENERATOR" +create_makefile 'json/ext/generator' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.c b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.c new file mode 100644 index 0000000..e3a8347 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.c @@ -0,0 +1,1608 @@ +#include "../fbuffer/fbuffer.h" +#include "generator.h" + +static VALUE mJSON, mExt, mGenerator, cState, mGeneratorMethods, mObject, + mHash, mArray, +#ifdef RUBY_INTEGER_UNIFICATION + mInteger, +#else + mFixnum, mBignum, +#endif + mFloat, mString, mString_Extend, + mTrueClass, mFalseClass, mNilClass, eGeneratorError, + eNestingError; + +static ID i_to_s, i_to_json, i_new, i_indent, i_space, i_space_before, + i_object_nl, i_array_nl, i_max_nesting, i_allow_nan, i_ascii_only, + i_pack, i_unpack, i_create_id, i_extend, i_key_p, + i_aref, i_send, i_respond_to_p, i_match, i_keys, i_depth, + i_buffer_initial_length, i_dup, i_escape_slash; + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length) +{ + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return 0; + /* Everything else falls through when "1"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xED: if (a > 0x9F) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + } + if (*source > 0xF4) return 0; + return 1; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf. */ +static void unicode_escape(char *buf, UTF16 character) +{ + const char *digits = "0123456789abcdef"; + + buf[2] = digits[character >> 12]; + buf[3] = digits[(character >> 8) & 0xf]; + buf[4] = digits[(character >> 4) & 0xf]; + buf[5] = digits[character & 0xf]; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf, then + * the buffer buf is appended to the FBuffer buffer. */ +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 + character) +{ + unicode_escape(buf, character); + fbuffer_append(buffer, buf, 6); +} + +/* Converts string to a JSON string in FBuffer buffer, where all but the ASCII + * and control characters are JSON escaped. */ +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string, char escape_slash) +{ + const UTF8 *source = (UTF8 *) RSTRING_PTR(string); + const UTF8 *sourceEnd = source + RSTRING_LEN(string); + char buf[6] = { '\\', 'u' }; + + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8(source, extraBytesToRead+1)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* normal case */ + if (ch >= 0x20 && ch <= 0x7f) { + switch (ch) { + case '\\': + fbuffer_append(buffer, "\\\\", 2); + break; + case '"': + fbuffer_append(buffer, "\\\"", 2); + break; + case '/': + if(escape_slash) { + fbuffer_append(buffer, "\\/", 2); + break; + } + default: + fbuffer_append_char(buffer, (char)ch); + break; + } + } else { + switch (ch) { + case '\n': + fbuffer_append(buffer, "\\n", 2); + break; + case '\r': + fbuffer_append(buffer, "\\r", 2); + break; + case '\t': + fbuffer_append(buffer, "\\t", 2); + break; + case '\f': + fbuffer_append(buffer, "\\f", 2); + break; + case '\b': + fbuffer_append(buffer, "\\b", 2); + break; + default: + unicode_escape_to_buffer(buffer, buf, (UTF16) ch); + break; + } + } + } + } else if (ch > UNI_MAX_UTF16) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the start */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + ch -= halfBase; + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START)); + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch & halfMask) + UNI_SUR_LOW_START)); + } + } + RB_GC_GUARD(string); +} + +/* Converts string to a JSON string in FBuffer buffer, where only the + * characters required by the JSON standard are JSON escaped. The remaining + * characters (should be UTF8) are just passed through and appended to the + * result. */ +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string, char escape_slash) +{ + const char *ptr = RSTRING_PTR(string), *p; + unsigned long len = RSTRING_LEN(string), start = 0, end = 0; + const char *escape = NULL; + int escape_len; + unsigned char c; + char buf[6] = { '\\', 'u' }; + int ascii_only = rb_enc_str_asciionly_p(string); + + for (start = 0, end = 0; end < len;) { + p = ptr + end; + c = (unsigned char) *p; + if (c < 0x20) { + switch (c) { + case '\n': + escape = "\\n"; + escape_len = 2; + break; + case '\r': + escape = "\\r"; + escape_len = 2; + break; + case '\t': + escape = "\\t"; + escape_len = 2; + break; + case '\f': + escape = "\\f"; + escape_len = 2; + break; + case '\b': + escape = "\\b"; + escape_len = 2; + break; + default: + unicode_escape(buf, (UTF16) *p); + escape = buf; + escape_len = 6; + break; + } + } else { + switch (c) { + case '\\': + escape = "\\\\"; + escape_len = 2; + break; + case '"': + escape = "\\\""; + escape_len = 2; + break; + case '/': + if(escape_slash) { + escape = "\\/"; + escape_len = 2; + break; + } + default: + { + unsigned short clen = 1; + if (!ascii_only) { + clen += trailingBytesForUTF8[c]; + if (end + clen > len) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8((UTF8 *) p, clen)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + } + end += clen; + } + continue; + break; + } + } + fbuffer_append(buffer, ptr + start, end - start); + fbuffer_append(buffer, escape, escape_len); + start = ++end; + escape = NULL; + } + fbuffer_append(buffer, ptr + start, end - start); +} + +static char *fstrndup(const char *ptr, unsigned long len) { + char *result; + if (len <= 0) return NULL; + result = ALLOC_N(char, len); + memcpy(result, ptr, len); + return result; +} + +/* + * Document-module: JSON::Ext::Generator + * + * This is the JSON generator implemented as a C extension. It can be + * configured to be used by setting + * + * JSON.generator = JSON::Ext::Generator + * + * with the method generator= in JSON. + * + */ + +/* Explanation of the following: that's the only way to not pollute + * standard library's docs with GeneratorMethods:: which + * are uninformative and take a large place in a list of classes + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Array + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Bignum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::FalseClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Fixnum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Float + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Hash + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Integer + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::NilClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Object + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String::Extend + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::TrueClass + * :nodoc: + */ + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON object, that is generated from + * this Hash instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(object); +} + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON array, that is generated from + * this Array instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self) { + GENERATE_JSON(array); +} + +#ifdef RUBY_INTEGER_UNIFICATION +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(integer); +} + +#else +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(fixnum); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(bignum); +} +#endif + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Float number. + */ +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(float); +} + +/* + * call-seq: String.included(modul) + * + * Extends _modul_ with the String::Extend module. + */ +static VALUE mString_included_s(VALUE self, VALUE modul) { + VALUE result = rb_funcall(modul, i_extend, 1, mString_Extend); + return result; +} + +/* + * call-seq: to_json(*) + * + * This string should be encoded with UTF-8 A call to this method + * returns a JSON string encoded with UTF16 big endian characters as + * \u????. + */ +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(string); +} + +/* + * call-seq: to_json_raw_object() + * + * This method creates a raw object hash, that can be nested into + * other data structures and will be generated as a raw string. This + * method should be used, if you want to convert raw strings to JSON + * instead of UTF-8 strings, e. g. binary data. + */ +static VALUE mString_to_json_raw_object(VALUE self) +{ + VALUE ary; + VALUE result = rb_hash_new(); + rb_hash_aset(result, rb_funcall(mJSON, i_create_id, 0), rb_class_name(rb_obj_class(self))); + ary = rb_funcall(self, i_unpack, 1, rb_str_new2("C*")); + rb_hash_aset(result, rb_str_new2("raw"), ary); + return result; +} + +/* + * call-seq: to_json_raw(*args) + * + * This method creates a JSON text from the result of a call to + * to_json_raw_object of this String. + */ +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self) +{ + VALUE obj = mString_to_json_raw_object(self); + Check_Type(obj, T_HASH); + return mHash_to_json(argc, argv, obj); +} + +/* + * call-seq: json_create(o) + * + * Raw Strings are JSON Objects (the raw bytes are stored in an array for the + * key "raw"). The Ruby String can be created by this module method. + */ +static VALUE mString_Extend_json_create(VALUE self, VALUE o) +{ + VALUE ary; + Check_Type(o, T_HASH); + ary = rb_hash_aref(o, rb_str_new2("raw")); + return rb_funcall(ary, i_pack, 1, rb_str_new2("C*")); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for true: 'true'. + */ +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(true); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for false: 'false'. + */ +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(false); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for nil: 'null'. + */ +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(null); +} + +/* + * call-seq: to_json(*) + * + * Converts this object to a string (calling #to_s), converts + * it to a JSON string, and returns the result. This is a fallback, if no + * special method #to_json was defined for some object. + */ +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self) +{ + VALUE state; + VALUE string = rb_funcall(self, i_to_s, 0); + rb_scan_args(argc, argv, "01", &state); + Check_Type(string, T_STRING); + state = cState_from_state_s(cState, state); + return cState_partial_generate(state, string); +} + +static void State_free(void *ptr) +{ + JSON_Generator_State *state = ptr; + if (state->indent) ruby_xfree(state->indent); + if (state->space) ruby_xfree(state->space); + if (state->space_before) ruby_xfree(state->space_before); + if (state->object_nl) ruby_xfree(state->object_nl); + if (state->array_nl) ruby_xfree(state->array_nl); + if (state->array_delim) fbuffer_free(state->array_delim); + if (state->object_delim) fbuffer_free(state->object_delim); + if (state->object_delim2) fbuffer_free(state->object_delim2); + ruby_xfree(state); +} + +static size_t State_memsize(const void *ptr) +{ + const JSON_Generator_State *state = ptr; + size_t size = sizeof(*state); + if (state->indent) size += state->indent_len + 1; + if (state->space) size += state->space_len + 1; + if (state->space_before) size += state->space_before_len + 1; + if (state->object_nl) size += state->object_nl_len + 1; + if (state->array_nl) size += state->array_nl_len + 1; + if (state->array_delim) size += FBUFFER_CAPA(state->array_delim); + if (state->object_delim) size += FBUFFER_CAPA(state->object_delim); + if (state->object_delim2) size += FBUFFER_CAPA(state->object_delim2); + return size; +} + +#ifndef HAVE_RB_EXT_RACTOR_SAFE +# undef RUBY_TYPED_FROZEN_SHAREABLE +# define RUBY_TYPED_FROZEN_SHAREABLE 0 +#endif + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Generator_State_type = { + "JSON/Generator/State", + {NULL, State_free, State_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_FROZEN_SHAREABLE, +#endif +}; +#endif + +static VALUE cState_s_allocate(VALUE klass) +{ + JSON_Generator_State *state; + return TypedData_Make_Struct(klass, JSON_Generator_State, + &JSON_Generator_State_type, state); +} + +/* + * call-seq: configure(opts) + * + * Configure this State instance with the Hash _opts_, and return + * itself. + */ +static VALUE cState_configure(VALUE self, VALUE opts) +{ + VALUE tmp; + GET_STATE(self); + tmp = rb_check_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(tmp)) tmp = rb_convert_type(opts, T_HASH, "Hash", "to_h"); + opts = tmp; + tmp = rb_hash_aref(opts, ID2SYM(i_indent)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->indent = fstrndup(RSTRING_PTR(tmp), len + 1); + state->indent_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space_before)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space_before = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_before_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_array_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->array_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->array_nl_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_object_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->object_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->object_nl_len = len; + } + tmp = ID2SYM(i_max_nesting); + state->max_nesting = 100; + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + state->max_nesting = FIX2LONG(max_nesting); + } else { + state->max_nesting = 0; + } + } + tmp = ID2SYM(i_depth); + state->depth = 0; + if (option_given_p(opts, tmp)) { + VALUE depth = rb_hash_aref(opts, tmp); + if (RTEST(depth)) { + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + } else { + state->depth = 0; + } + } + tmp = ID2SYM(i_buffer_initial_length); + if (option_given_p(opts, tmp)) { + VALUE buffer_initial_length = rb_hash_aref(opts, tmp); + if (RTEST(buffer_initial_length)) { + long initial_length; + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) state->buffer_initial_length = initial_length; + } + } + tmp = rb_hash_aref(opts, ID2SYM(i_allow_nan)); + state->allow_nan = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_ascii_only)); + state->ascii_only = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_escape_slash)); + state->escape_slash = RTEST(tmp); + return self; +} + +static void set_state_ivars(VALUE hash, VALUE state) +{ + VALUE ivars = rb_obj_instance_variables(state); + int i = 0; + for (i = 0; i < RARRAY_LEN(ivars); i++) { + VALUE key = rb_funcall(rb_ary_entry(ivars, i), i_to_s, 0); + long key_len = RSTRING_LEN(key); + VALUE value = rb_iv_get(state, StringValueCStr(key)); + rb_hash_aset(hash, rb_str_intern(rb_str_substr(key, 1, key_len - 1)), value); + } +} + +/* + * call-seq: to_h + * + * Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + */ +static VALUE cState_to_h(VALUE self) +{ + VALUE result = rb_hash_new(); + GET_STATE(self); + set_state_ivars(result, self); + rb_hash_aset(result, ID2SYM(i_indent), rb_str_new(state->indent, state->indent_len)); + rb_hash_aset(result, ID2SYM(i_space), rb_str_new(state->space, state->space_len)); + rb_hash_aset(result, ID2SYM(i_space_before), rb_str_new(state->space_before, state->space_before_len)); + rb_hash_aset(result, ID2SYM(i_object_nl), rb_str_new(state->object_nl, state->object_nl_len)); + rb_hash_aset(result, ID2SYM(i_array_nl), rb_str_new(state->array_nl, state->array_nl_len)); + rb_hash_aset(result, ID2SYM(i_allow_nan), state->allow_nan ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_ascii_only), state->ascii_only ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_max_nesting), LONG2FIX(state->max_nesting)); + rb_hash_aset(result, ID2SYM(i_escape_slash), state->escape_slash ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_depth), LONG2FIX(state->depth)); + rb_hash_aset(result, ID2SYM(i_buffer_initial_length), LONG2FIX(state->buffer_initial_length)); + return result; +} + +/* +* call-seq: [](name) +* +* Returns the value returned by method +name+. +*/ +static VALUE cState_aref(VALUE self, VALUE name) +{ + name = rb_funcall(name, i_to_s, 0); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) { + return rb_funcall(self, i_send, 1, name); + } else { + return rb_attr_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name))); + } +} + +/* +* call-seq: []=(name, value) +* +* Sets the attribute name to value. +*/ +static VALUE cState_aset(VALUE self, VALUE name, VALUE value) +{ + VALUE name_writer; + + name = rb_funcall(name, i_to_s, 0); + name_writer = rb_str_cat2(rb_str_dup(name), "="); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name_writer))) { + return rb_funcall(self, i_send, 2, name_writer, value); + } else { + rb_ivar_set(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)), value); + } + return Qnil; +} + +struct hash_foreach_arg { + FBuffer *buffer; + JSON_Generator_State *state; + VALUE Vstate; + int iter; +}; + +static int +json_object_i(VALUE key, VALUE val, VALUE _arg) +{ + struct hash_foreach_arg *arg = (struct hash_foreach_arg *)_arg; + FBuffer *buffer = arg->buffer; + JSON_Generator_State *state = arg->state; + VALUE Vstate = arg->Vstate; + + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + char *delim = FBUFFER_PTR(state->object_delim); + long delim_len = FBUFFER_LEN(state->object_delim); + char *delim2 = FBUFFER_PTR(state->object_delim2); + long delim2_len = FBUFFER_LEN(state->object_delim2); + long depth = state->depth; + int j; + VALUE klass, key_to_s; + + if (arg->iter > 0) fbuffer_append(buffer, delim, delim_len); + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + } + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + + klass = CLASS_OF(key); + if (klass == rb_cString) { + key_to_s = key; + } else if (klass == rb_cSymbol) { + key_to_s = rb_id2str(SYM2ID(key)); + } else { + key_to_s = rb_funcall(key, i_to_s, 0); + } + Check_Type(key_to_s, T_STRING); + generate_json(buffer, Vstate, state, key_to_s); + fbuffer_append(buffer, delim2, delim2_len); + generate_json(buffer, Vstate, state, val); + + arg->iter++; + return ST_CONTINUE; +} + +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + long depth = ++state->depth; + int j; + struct hash_foreach_arg arg; + + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '{'); + + arg.buffer = buffer; + arg.state = state; + arg.Vstate = Vstate; + arg.iter = 0; + rb_hash_foreach(obj, json_object_i, (VALUE)&arg); + + depth = --state->depth; + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, '}'); +} + +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *array_nl = state->array_nl; + long array_nl_len = state->array_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + char *delim = FBUFFER_PTR(state->array_delim); + long delim_len = FBUFFER_LEN(state->array_delim); + long depth = ++state->depth; + int i, j; + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '['); + if (array_nl) fbuffer_append(buffer, array_nl, array_nl_len); + for(i = 0; i < RARRAY_LEN(obj); i++) { + if (i > 0) fbuffer_append(buffer, delim, delim_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + generate_json(buffer, Vstate, state, rb_ary_entry(obj, i)); + } + state->depth = --depth; + if (array_nl) { + fbuffer_append(buffer, array_nl, array_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, ']'); +} + +#ifdef HAVE_RUBY_ENCODING_H +static int enc_utf8_compatible_p(rb_encoding *enc) +{ + if (enc == rb_usascii_encoding()) return 1; + if (enc == rb_utf8_encoding()) return 1; + return 0; +} +#endif + +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_char(buffer, '"'); +#ifdef HAVE_RUBY_ENCODING_H + if (!enc_utf8_compatible_p(rb_enc_get(obj))) { + obj = rb_str_export_to_enc(obj, rb_utf8_encoding()); + } +#endif + if (state->ascii_only) { + convert_UTF8_to_JSON_ASCII(buffer, obj, state->escape_slash); + } else { + convert_UTF8_to_JSON(buffer, obj, state->escape_slash); + } + fbuffer_append_char(buffer, '"'); +} + +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "null", 4); +} + +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "false", 5); +} + +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "true", 4); +} + +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_long(buffer, FIX2LONG(obj)); +} + +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp = rb_funcall(obj, i_to_s, 0); + fbuffer_append_str(buffer, tmp); +} + +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + if (FIXNUM_P(obj)) + generate_json_fixnum(buffer, Vstate, state, obj); + else + generate_json_bignum(buffer, Vstate, state, obj); +} +#endif +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + double value = RFLOAT_VALUE(obj); + char allow_nan = state->allow_nan; + VALUE tmp = rb_funcall(obj, i_to_s, 0); + if (!allow_nan) { + if (isinf(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } else if (isnan(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } + } + fbuffer_append_str(buffer, tmp); +} + +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp; + VALUE klass = CLASS_OF(obj); + if (klass == rb_cHash) { + generate_json_object(buffer, Vstate, state, obj); + } else if (klass == rb_cArray) { + generate_json_array(buffer, Vstate, state, obj); + } else if (klass == rb_cString) { + generate_json_string(buffer, Vstate, state, obj); + } else if (obj == Qnil) { + generate_json_null(buffer, Vstate, state, obj); + } else if (obj == Qfalse) { + generate_json_false(buffer, Vstate, state, obj); + } else if (obj == Qtrue) { + generate_json_true(buffer, Vstate, state, obj); + } else if (FIXNUM_P(obj)) { + generate_json_fixnum(buffer, Vstate, state, obj); + } else if (RB_TYPE_P(obj, T_BIGNUM)) { + generate_json_bignum(buffer, Vstate, state, obj); + } else if (klass == rb_cFloat) { + generate_json_float(buffer, Vstate, state, obj); + } else if (rb_respond_to(obj, i_to_json)) { + tmp = rb_funcall(obj, i_to_json, 1, Vstate); + Check_Type(tmp, T_STRING); + fbuffer_append_str(buffer, tmp); + } else { + tmp = rb_funcall(obj, i_to_s, 0); + Check_Type(tmp, T_STRING); + generate_json_string(buffer, Vstate, state, tmp); + } +} + +static FBuffer *cState_prepare_buffer(VALUE self) +{ + FBuffer *buffer; + GET_STATE(self); + buffer = fbuffer_alloc(state->buffer_initial_length); + + if (state->object_delim) { + fbuffer_clear(state->object_delim); + } else { + state->object_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->object_delim, ','); + if (state->object_delim2) { + fbuffer_clear(state->object_delim2); + } else { + state->object_delim2 = fbuffer_alloc(16); + } + if (state->space_before) fbuffer_append(state->object_delim2, state->space_before, state->space_before_len); + fbuffer_append_char(state->object_delim2, ':'); + if (state->space) fbuffer_append(state->object_delim2, state->space, state->space_len); + + if (state->array_delim) { + fbuffer_clear(state->array_delim); + } else { + state->array_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->array_delim, ','); + if (state->array_nl) fbuffer_append(state->array_delim, state->array_nl, state->array_nl_len); + return buffer; +} + +static VALUE cState_partial_generate(VALUE self, VALUE obj) +{ + FBuffer *buffer = cState_prepare_buffer(self); + GET_STATE(self); + generate_json(buffer, self, state, obj); + return fbuffer_to_s(buffer); +} + +/* + * call-seq: generate(obj) + * + * Generates a valid JSON document from object +obj+ and returns the + * result. If no valid JSON document can be created this method raises a + * GeneratorError exception. + */ +static VALUE cState_generate(VALUE self, VALUE obj) +{ + VALUE result = cState_partial_generate(self, obj); + GET_STATE(self); + (void)state; + return result; +} + +/* + * call-seq: new(opts = {}) + * + * Instantiates a new State object, configured by _opts_. + * + * _opts_ can have the following keys: + * + * * *indent*: a string used to indent levels (default: ''), + * * *space*: a string that is put after, a : or , delimiter (default: ''), + * * *space_before*: a string that is put before a : pair delimiter (default: ''), + * * *object_nl*: a string that is put at the end of a JSON object (default: ''), + * * *array_nl*: a string that is put at the end of a JSON array (default: ''), + * * *allow_nan*: true if NaN, Infinity, and -Infinity should be + * generated, otherwise an exception is thrown, if these values are + * encountered. This options defaults to false. + * * *ascii_only*: true if only ASCII characters should be generated. This + * option defaults to false. + * * *buffer_initial_length*: sets the initial length of the generator's + * internal buffer. + */ +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE opts; + GET_STATE(self); + state->max_nesting = 100; + state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + rb_scan_args(argc, argv, "01", &opts); + if (!NIL_P(opts)) cState_configure(self, opts); + return self; +} + +/* + * call-seq: initialize_copy(orig) + * + * Initializes this object from orig if it can be duplicated/cloned and returns + * it. +*/ +static VALUE cState_init_copy(VALUE obj, VALUE orig) +{ + JSON_Generator_State *objState, *origState; + + if (obj == orig) return obj; + GET_STATE_TO(obj, objState); + GET_STATE_TO(orig, origState); + if (!objState) rb_raise(rb_eArgError, "unallocated JSON::State"); + + MEMCPY(objState, origState, JSON_Generator_State, 1); + objState->indent = fstrndup(origState->indent, origState->indent_len); + objState->space = fstrndup(origState->space, origState->space_len); + objState->space_before = fstrndup(origState->space_before, origState->space_before_len); + objState->object_nl = fstrndup(origState->object_nl, origState->object_nl_len); + objState->array_nl = fstrndup(origState->array_nl, origState->array_nl_len); + if (origState->array_delim) objState->array_delim = fbuffer_dup(origState->array_delim); + if (origState->object_delim) objState->object_delim = fbuffer_dup(origState->object_delim); + if (origState->object_delim2) objState->object_delim2 = fbuffer_dup(origState->object_delim2); + return obj; +} + +/* + * call-seq: from_state(opts) + * + * Creates a State object from _opts_, which ought to be Hash to create a + * new State instance configured by _opts_, something else to create an + * unconfigured instance. If _opts_ is a State object, it is just returned. + */ +static VALUE cState_from_state_s(VALUE self, VALUE opts) +{ + if (rb_obj_is_kind_of(opts, self)) { + return opts; + } else if (rb_obj_is_kind_of(opts, rb_cHash)) { + return rb_funcall(self, i_new, 1, opts); + } else { + return rb_class_new_instance(0, NULL, cState); + } +} + +/* + * call-seq: indent() + * + * Returns the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent(VALUE self) +{ + GET_STATE(self); + return state->indent ? rb_str_new(state->indent, state->indent_len) : rb_str_new2(""); +} + +/* + * call-seq: indent=(indent) + * + * Sets the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent_set(VALUE self, VALUE indent) +{ + unsigned long len; + GET_STATE(self); + Check_Type(indent, T_STRING); + len = RSTRING_LEN(indent); + if (len == 0) { + if (state->indent) { + ruby_xfree(state->indent); + state->indent = NULL; + state->indent_len = 0; + } + } else { + if (state->indent) ruby_xfree(state->indent); + state->indent = fstrndup(RSTRING_PTR(indent), len); + state->indent_len = len; + } + return Qnil; +} + +/* + * call-seq: space() + * + * Returns the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space(VALUE self) +{ + GET_STATE(self); + return state->space ? rb_str_new(state->space, state->space_len) : rb_str_new2(""); +} + +/* + * call-seq: space=(space) + * + * Sets _space_ to the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space_set(VALUE self, VALUE space) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space, T_STRING); + len = RSTRING_LEN(space); + if (len == 0) { + if (state->space) { + ruby_xfree(state->space); + state->space = NULL; + state->space_len = 0; + } + } else { + if (state->space) ruby_xfree(state->space); + state->space = fstrndup(RSTRING_PTR(space), len); + state->space_len = len; + } + return Qnil; +} + +/* + * call-seq: space_before() + * + * Returns the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before(VALUE self) +{ + GET_STATE(self); + return state->space_before ? rb_str_new(state->space_before, state->space_before_len) : rb_str_new2(""); +} + +/* + * call-seq: space_before=(space_before) + * + * Sets the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before_set(VALUE self, VALUE space_before) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space_before, T_STRING); + len = RSTRING_LEN(space_before); + if (len == 0) { + if (state->space_before) { + ruby_xfree(state->space_before); + state->space_before = NULL; + state->space_before_len = 0; + } + } else { + if (state->space_before) ruby_xfree(state->space_before); + state->space_before = fstrndup(RSTRING_PTR(space_before), len); + state->space_before_len = len; + } + return Qnil; +} + +/* + * call-seq: object_nl() + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl(VALUE self) +{ + GET_STATE(self); + return state->object_nl ? rb_str_new(state->object_nl, state->object_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: object_nl=(object_nl) + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(object_nl, T_STRING); + len = RSTRING_LEN(object_nl); + if (len == 0) { + if (state->object_nl) { + ruby_xfree(state->object_nl); + state->object_nl = NULL; + } + } else { + if (state->object_nl) ruby_xfree(state->object_nl); + state->object_nl = fstrndup(RSTRING_PTR(object_nl), len); + state->object_nl_len = len; + } + return Qnil; +} + +/* + * call-seq: array_nl() + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl(VALUE self) +{ + GET_STATE(self); + return state->array_nl ? rb_str_new(state->array_nl, state->array_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: array_nl=(array_nl) + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(array_nl, T_STRING); + len = RSTRING_LEN(array_nl); + if (len == 0) { + if (state->array_nl) { + ruby_xfree(state->array_nl); + state->array_nl = NULL; + } + } else { + if (state->array_nl) ruby_xfree(state->array_nl); + state->array_nl = fstrndup(RSTRING_PTR(array_nl), len); + state->array_nl_len = len; + } + return Qnil; +} + + +/* +* call-seq: check_circular? +* +* Returns true, if circular data structures should be checked, +* otherwise returns false. +*/ +static VALUE cState_check_circular_p(VALUE self) +{ + GET_STATE(self); + return state->max_nesting ? Qtrue : Qfalse; +} + +/* + * call-seq: max_nesting + * + * This integer returns the maximum level of data structure nesting in + * the generated JSON, max_nesting = 0 if no maximum is checked. + */ +static VALUE cState_max_nesting(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->max_nesting); +} + +/* + * call-seq: max_nesting=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_max_nesting_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + return state->max_nesting = FIX2LONG(depth); +} + +/* + * call-seq: escape_slash + * + * If this boolean is true, the forward slashes will be escaped in + * the json output. + */ +static VALUE cState_escape_slash(VALUE self) +{ + GET_STATE(self); + return state->escape_slash ? Qtrue : Qfalse; +} + +/* + * call-seq: escape_slash=(depth) + * + * This sets whether or not the forward slashes will be escaped in + * the json output. + */ +static VALUE cState_escape_slash_set(VALUE self, VALUE enable) +{ + GET_STATE(self); + state->escape_slash = RTEST(enable); + return Qnil; +} + +/* + * call-seq: allow_nan? + * + * Returns true, if NaN, Infinity, and -Infinity should be generated, otherwise + * returns false. + */ +static VALUE cState_allow_nan_p(VALUE self) +{ + GET_STATE(self); + return state->allow_nan ? Qtrue : Qfalse; +} + +/* + * call-seq: ascii_only? + * + * Returns true, if only ASCII characters should be generated. Otherwise + * returns false. + */ +static VALUE cState_ascii_only_p(VALUE self) +{ + GET_STATE(self); + return state->ascii_only ? Qtrue : Qfalse; +} + +/* + * call-seq: depth + * + * This integer returns the current depth of data structure nesting. + */ +static VALUE cState_depth(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->depth); +} + +/* + * call-seq: depth=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_depth_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + return Qnil; +} + +/* + * call-seq: buffer_initial_length + * + * This integer returns the current initial length of the buffer. + */ +static VALUE cState_buffer_initial_length(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->buffer_initial_length); +} + +/* + * call-seq: buffer_initial_length=(length) + * + * This sets the initial length of the buffer to +length+, if +length+ > 0, + * otherwise its value isn't changed. + */ +static VALUE cState_buffer_initial_length_set(VALUE self, VALUE buffer_initial_length) +{ + long initial_length; + GET_STATE(self); + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) { + state->buffer_initial_length = initial_length; + } + return Qnil; +} + +/* + * + */ +void Init_generator(void) +{ +#ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); +#endif + +#undef rb_intern + rb_require("json/common"); + + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + mGenerator = rb_define_module_under(mExt, "Generator"); + + eGeneratorError = rb_path2class("JSON::GeneratorError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eGeneratorError); + rb_gc_register_mark_object(eNestingError); + + cState = rb_define_class_under(mGenerator, "State", rb_cObject); + rb_define_alloc_func(cState, cState_s_allocate); + rb_define_singleton_method(cState, "from_state", cState_from_state_s, 1); + rb_define_method(cState, "initialize", cState_initialize, -1); + rb_define_method(cState, "initialize_copy", cState_init_copy, 1); + rb_define_method(cState, "indent", cState_indent, 0); + rb_define_method(cState, "indent=", cState_indent_set, 1); + rb_define_method(cState, "space", cState_space, 0); + rb_define_method(cState, "space=", cState_space_set, 1); + rb_define_method(cState, "space_before", cState_space_before, 0); + rb_define_method(cState, "space_before=", cState_space_before_set, 1); + rb_define_method(cState, "object_nl", cState_object_nl, 0); + rb_define_method(cState, "object_nl=", cState_object_nl_set, 1); + rb_define_method(cState, "array_nl", cState_array_nl, 0); + rb_define_method(cState, "array_nl=", cState_array_nl_set, 1); + rb_define_method(cState, "max_nesting", cState_max_nesting, 0); + rb_define_method(cState, "max_nesting=", cState_max_nesting_set, 1); + rb_define_method(cState, "escape_slash", cState_escape_slash, 0); + rb_define_method(cState, "escape_slash?", cState_escape_slash, 0); + rb_define_method(cState, "escape_slash=", cState_escape_slash_set, 1); + rb_define_method(cState, "check_circular?", cState_check_circular_p, 0); + rb_define_method(cState, "allow_nan?", cState_allow_nan_p, 0); + rb_define_method(cState, "ascii_only?", cState_ascii_only_p, 0); + rb_define_method(cState, "depth", cState_depth, 0); + rb_define_method(cState, "depth=", cState_depth_set, 1); + rb_define_method(cState, "buffer_initial_length", cState_buffer_initial_length, 0); + rb_define_method(cState, "buffer_initial_length=", cState_buffer_initial_length_set, 1); + rb_define_method(cState, "configure", cState_configure, 1); + rb_define_alias(cState, "merge", "configure"); + rb_define_method(cState, "to_h", cState_to_h, 0); + rb_define_alias(cState, "to_hash", "to_h"); + rb_define_method(cState, "[]", cState_aref, 1); + rb_define_method(cState, "[]=", cState_aset, 2); + rb_define_method(cState, "generate", cState_generate, 1); + + mGeneratorMethods = rb_define_module_under(mGenerator, "GeneratorMethods"); + mObject = rb_define_module_under(mGeneratorMethods, "Object"); + rb_define_method(mObject, "to_json", mObject_to_json, -1); + mHash = rb_define_module_under(mGeneratorMethods, "Hash"); + rb_define_method(mHash, "to_json", mHash_to_json, -1); + mArray = rb_define_module_under(mGeneratorMethods, "Array"); + rb_define_method(mArray, "to_json", mArray_to_json, -1); +#ifdef RUBY_INTEGER_UNIFICATION + mInteger = rb_define_module_under(mGeneratorMethods, "Integer"); + rb_define_method(mInteger, "to_json", mInteger_to_json, -1); +#else + mFixnum = rb_define_module_under(mGeneratorMethods, "Fixnum"); + rb_define_method(mFixnum, "to_json", mFixnum_to_json, -1); + mBignum = rb_define_module_under(mGeneratorMethods, "Bignum"); + rb_define_method(mBignum, "to_json", mBignum_to_json, -1); +#endif + mFloat = rb_define_module_under(mGeneratorMethods, "Float"); + rb_define_method(mFloat, "to_json", mFloat_to_json, -1); + mString = rb_define_module_under(mGeneratorMethods, "String"); + rb_define_singleton_method(mString, "included", mString_included_s, 1); + rb_define_method(mString, "to_json", mString_to_json, -1); + rb_define_method(mString, "to_json_raw", mString_to_json_raw, -1); + rb_define_method(mString, "to_json_raw_object", mString_to_json_raw_object, 0); + mString_Extend = rb_define_module_under(mString, "Extend"); + rb_define_method(mString_Extend, "json_create", mString_Extend_json_create, 1); + mTrueClass = rb_define_module_under(mGeneratorMethods, "TrueClass"); + rb_define_method(mTrueClass, "to_json", mTrueClass_to_json, -1); + mFalseClass = rb_define_module_under(mGeneratorMethods, "FalseClass"); + rb_define_method(mFalseClass, "to_json", mFalseClass_to_json, -1); + mNilClass = rb_define_module_under(mGeneratorMethods, "NilClass"); + rb_define_method(mNilClass, "to_json", mNilClass_to_json, -1); + + i_to_s = rb_intern("to_s"); + i_to_json = rb_intern("to_json"); + i_new = rb_intern("new"); + i_indent = rb_intern("indent"); + i_space = rb_intern("space"); + i_space_before = rb_intern("space_before"); + i_object_nl = rb_intern("object_nl"); + i_array_nl = rb_intern("array_nl"); + i_max_nesting = rb_intern("max_nesting"); + i_escape_slash = rb_intern("escape_slash"); + i_allow_nan = rb_intern("allow_nan"); + i_ascii_only = rb_intern("ascii_only"); + i_depth = rb_intern("depth"); + i_buffer_initial_length = rb_intern("buffer_initial_length"); + i_pack = rb_intern("pack"); + i_unpack = rb_intern("unpack"); + i_create_id = rb_intern("create_id"); + i_extend = rb_intern("extend"); + i_key_p = rb_intern("key?"); + i_aref = rb_intern("[]"); + i_send = rb_intern("__send__"); + i_respond_to_p = rb_intern("respond_to?"); + i_match = rb_intern("match"); + i_keys = rb_intern("keys"); + i_dup = rb_intern("dup"); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.h b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.h new file mode 100644 index 0000000..3ebd622 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/generator/generator.h @@ -0,0 +1,174 @@ +#ifndef _GENERATOR_H_ +#define _GENERATOR_H_ + +#include +#include + +#include "ruby.h" + +#ifdef HAVE_RUBY_RE_H +#include "ruby/re.h" +#else +#include "re.h" +#endif + +#ifndef rb_intern_str +#define rb_intern_str(string) SYM2ID(rb_str_intern(string)) +#endif + +#ifndef rb_obj_instance_variables +#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0) +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode definitions */ + +#define UNI_STRICT_CONVERSION 1 + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length); +static void unicode_escape(char *buf, UTF16 character); +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character); +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string, char escape_slash); +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string, char escape_slash); +static char *fstrndup(const char *ptr, unsigned long len); + +/* ruby api and some helpers */ + +typedef struct JSON_Generator_StateStruct { + char *indent; + long indent_len; + char *space; + long space_len; + char *space_before; + long space_before_len; + char *object_nl; + long object_nl_len; + char *array_nl; + long array_nl_len; + FBuffer *array_delim; + FBuffer *object_delim; + FBuffer *object_delim2; + long max_nesting; + char allow_nan; + char ascii_only; + char escape_slash; + long depth; + long buffer_initial_length; +} JSON_Generator_State; + +#define GET_STATE_TO(self, state) \ + TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state) + +#define GET_STATE(self) \ + JSON_Generator_State *state; \ + GET_STATE_TO(self, state) + +#define GENERATE_JSON(type) \ + FBuffer *buffer; \ + VALUE Vstate; \ + JSON_Generator_State *state; \ + \ + rb_scan_args(argc, argv, "01", &Vstate); \ + Vstate = cState_from_state_s(cState, Vstate); \ + TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \ + buffer = cState_prepare_buffer(Vstate); \ + generate_json_##type(buffer, Vstate, state, self); \ + return fbuffer_to_s(buffer) + +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self); +#ifdef RUBY_INTEGER_UNIFICATION +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self); +#else +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self); +#endif +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_included_s(VALUE self, VALUE modul); +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_to_json_raw_object(VALUE self); +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self); +static VALUE mString_Extend_json_create(VALUE self, VALUE o); +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self); +static void State_free(void *state); +static VALUE cState_s_allocate(VALUE klass); +static VALUE cState_configure(VALUE self, VALUE opts); +static VALUE cState_to_h(VALUE self); +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#endif +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static VALUE cState_partial_generate(VALUE self, VALUE obj); +static VALUE cState_generate(VALUE self, VALUE obj); +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cState_from_state_s(VALUE self, VALUE opts); +static VALUE cState_indent(VALUE self); +static VALUE cState_indent_set(VALUE self, VALUE indent); +static VALUE cState_space(VALUE self); +static VALUE cState_space_set(VALUE self, VALUE space); +static VALUE cState_space_before(VALUE self); +static VALUE cState_space_before_set(VALUE self, VALUE space_before); +static VALUE cState_object_nl(VALUE self); +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl); +static VALUE cState_array_nl(VALUE self); +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl); +static VALUE cState_max_nesting(VALUE self); +static VALUE cState_max_nesting_set(VALUE self, VALUE depth); +static VALUE cState_allow_nan_p(VALUE self); +static VALUE cState_ascii_only_p(VALUE self); +static VALUE cState_depth(VALUE self); +static VALUE cState_depth_set(VALUE self, VALUE depth); +static VALUE cState_escape_slash(VALUE self); +static VALUE cState_escape_slash_set(VALUE self, VALUE depth); +static FBuffer *cState_prepare_buffer(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Generator_State_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json) +#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/depend b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/depend new file mode 100644 index 0000000..498ffa9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/depend @@ -0,0 +1 @@ +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb new file mode 100644 index 0000000..feb586e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: false +require 'mkmf' + +have_func("rb_enc_raise", "ruby.h") +have_func("rb_enc_interned_str", "ruby.h") + +# checking if String#-@ (str_uminus) dedupes... ' +begin + a = -(%w(t e s t).join) + b = -(%w(t e s t).join) + if a.equal?(b) + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=1 ' + else + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=0 ' + end +rescue NoMethodError + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=0 ' +end + +# checking if String#-@ (str_uminus) directly interns frozen strings... ' +begin + s = rand.to_s.freeze + if (-s).equal?(s) && (-s.dup).equal?(s) + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=1 ' + else + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=0 ' + end +rescue NoMethodError + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=0 ' +end + +create_makefile 'json/ext/parser' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.c b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.c new file mode 100644 index 0000000..b1dc881 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.c @@ -0,0 +1,3338 @@ +/* This file is automatically generated from parser.rl by using ragel */ +#line 1 "parser.rl" +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, +i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, +i_object_class, i_array_class, i_decimal_class, i_key_p, +i_deep_const_get, i_match, i_match_string, i_aset, i_aref, +i_leftshift, i_new, i_try_convert, i_freeze, i_uminus; + + +#line 125 "parser.rl" + + + +enum {JSON_object_start = 1}; +enum {JSON_object_first_final = 27}; +enum {JSON_object_error = 0}; + +enum {JSON_object_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 167 "parser.rl" + + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + + { + cs = (int)JSON_object_start; + } + + #line 182 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + case 17: + goto st_case_17; + case 18: + goto st_case_18; + case 27: + goto st_case_27; + case 19: + goto st_case_19; + case 20: + goto st_case_20; + case 21: + goto st_case_21; + case 22: + goto st_case_22; + case 23: + goto st_case_23; + case 24: + goto st_case_24; + case 25: + goto st_case_25; + case 26: + goto st_case_26; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 123 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 13: { + goto st2; + } + case 32: { + goto st2; + } + case 34: { + goto ctr2; + } + case 47: { + goto st23; + } + case 125: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st2; + } + { + goto st0; + } + ctr2: + { + #line 149 "parser.rl" + + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, p, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 3; goto _out;} } else {p = (( np))-1;} + + } + + goto st3; + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 13: { + goto st3; + } + case 32: { + goto st3; + } + case 47: { + goto st4; + } + case 58: { + goto st8; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st3; + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st5; + } + case 47: { + goto st7; + } + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 42 ) { + goto st6; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st6; + } + case 47: { + goto st3; + } + } + { + goto st5; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 10 ) { + goto st3; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 13: { + goto st8; + } + case 32: { + goto st8; + } + case 34: { + goto ctr11; + } + case 45: { + goto ctr11; + } + case 47: { + goto st19; + } + case 73: { + goto ctr11; + } + case 78: { + goto ctr11; + } + case 91: { + goto ctr11; + } + case 102: { + goto ctr11; + } + case 110: { + goto ctr11; + } + case 116: { + goto ctr11; + } + case 123: { + goto ctr11; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr11; + } + } else if ( ( (*( p))) >= 9 ) { + goto st8; + } + { + goto st0; + } + ctr11: + { + #line 133 "parser.rl" + + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + {p = p - 1; } {p+= 1; cs = 9; goto _out;} + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + {p = (( np))-1;} + + } + } + + goto st9; + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + switch( ( (*( p))) ) { + case 13: { + goto st9; + } + case 32: { + goto st9; + } + case 44: { + goto st10; + } + case 47: { + goto st15; + } + case 125: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st9; + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 13: { + goto st10; + } + case 32: { + goto st10; + } + case 34: { + goto ctr2; + } + case 47: { + goto st11; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st10; + } + { + goto st0; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + switch( ( (*( p))) ) { + case 42: { + goto st12; + } + case 47: { + goto st14; + } + } + { + goto st0; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 42 ) { + goto st13; + } + { + goto st12; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + switch( ( (*( p))) ) { + case 42: { + goto st13; + } + case 47: { + goto st10; + } + } + { + goto st12; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 10 ) { + goto st10; + } + { + goto st14; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + switch( ( (*( p))) ) { + case 42: { + goto st16; + } + case 47: { + goto st18; + } + } + { + goto st0; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 42 ) { + goto st17; + } + { + goto st16; + } + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + switch( ( (*( p))) ) { + case 42: { + goto st17; + } + case 47: { + goto st9; + } + } + { + goto st16; + } + st18: + p+= 1; + if ( p == pe ) + goto _test_eof18; + st_case_18: + if ( ( (*( p))) == 10 ) { + goto st9; + } + { + goto st18; + } + ctr4: + { + #line 157 "parser.rl" + {p = p - 1; } {p+= 1; cs = 27; goto _out;} } + + goto st27; + st27: + p+= 1; + if ( p == pe ) + goto _test_eof27; + st_case_27: + { + goto st0; + } + st19: + p+= 1; + if ( p == pe ) + goto _test_eof19; + st_case_19: + switch( ( (*( p))) ) { + case 42: { + goto st20; + } + case 47: { + goto st22; + } + } + { + goto st0; + } + st20: + p+= 1; + if ( p == pe ) + goto _test_eof20; + st_case_20: + if ( ( (*( p))) == 42 ) { + goto st21; + } + { + goto st20; + } + st21: + p+= 1; + if ( p == pe ) + goto _test_eof21; + st_case_21: + switch( ( (*( p))) ) { + case 42: { + goto st21; + } + case 47: { + goto st8; + } + } + { + goto st20; + } + st22: + p+= 1; + if ( p == pe ) + goto _test_eof22; + st_case_22: + if ( ( (*( p))) == 10 ) { + goto st8; + } + { + goto st22; + } + st23: + p+= 1; + if ( p == pe ) + goto _test_eof23; + st_case_23: + switch( ( (*( p))) ) { + case 42: { + goto st24; + } + case 47: { + goto st26; + } + } + { + goto st0; + } + st24: + p+= 1; + if ( p == pe ) + goto _test_eof24; + st_case_24: + if ( ( (*( p))) == 42 ) { + goto st25; + } + { + goto st24; + } + st25: + p+= 1; + if ( p == pe ) + goto _test_eof25; + st_case_25: + switch( ( (*( p))) ) { + case 42: { + goto st25; + } + case 47: { + goto st2; + } + } + { + goto st24; + } + st26: + p+= 1; + if ( p == pe ) + goto _test_eof26; + st_case_26: + if ( ( (*( p))) == 10 ) { + goto st2; + } + { + goto st26; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 183 "parser.rl" + + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + + +enum {JSON_value_start = 1}; +enum {JSON_value_first_final = 29}; +enum {JSON_value_error = 0}; + +enum {JSON_value_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 283 "parser.rl" + + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + + { + cs = (int)JSON_value_start; + } + + #line 290 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 29: + goto st_case_29; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + case 17: + goto st_case_17; + case 18: + goto st_case_18; + case 19: + goto st_case_19; + case 20: + goto st_case_20; + case 21: + goto st_case_21; + case 22: + goto st_case_22; + case 23: + goto st_case_23; + case 24: + goto st_case_24; + case 25: + goto st_case_25; + case 26: + goto st_case_26; + case 27: + goto st_case_27; + case 28: + goto st_case_28; + } + goto st_out; + st1: + p+= 1; + if ( p == pe ) + goto _test_eof1; + st_case_1: + switch( ( (*( p))) ) { + case 13: { + goto st1; + } + case 32: { + goto st1; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr3; + } + case 47: { + goto st6; + } + case 73: { + goto st10; + } + case 78: { + goto st17; + } + case 91: { + goto ctr7; + } + case 102: { + goto st19; + } + case 110: { + goto st23; + } + case 116: { + goto st26; + } + case 123: { + goto ctr11; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr3; + } + } else if ( ( (*( p))) >= 9 ) { + goto st1; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + ctr2: + { + #line 235 "parser.rl" + + char *np = JSON_parse_string(json, p, pe, result); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr3: + { + #line 240 "parser.rl" + + char *np; + if(pe > p + 8 && !strncmp(MinusInfinity, p, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + {p = (( p + 10))-1;} + + {p = p - 1; } {p+= 1; cs = 29; goto _out;} + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + + np = JSON_parse_integer(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + + {p = p - 1; } {p+= 1; cs = 29; goto _out;} + } + + goto st29; + ctr7: + { + #line 258 "parser.rl" + + char *np; + np = JSON_parse_array(json, p, pe, result, current_nesting + 1); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr11: + { + #line 264 "parser.rl" + + char *np; + np = JSON_parse_object(json, p, pe, result, current_nesting + 1); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr25: + { + #line 228 "parser.rl" + + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + + goto st29; + ctr27: + { + #line 221 "parser.rl" + + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + + goto st29; + ctr31: + { + #line 215 "parser.rl" + + *result = Qfalse; + } + + goto st29; + ctr34: + { + #line 212 "parser.rl" + + *result = Qnil; + } + + goto st29; + ctr37: + { + #line 218 "parser.rl" + + *result = Qtrue; + } + + goto st29; + st29: + p+= 1; + if ( p == pe ) + goto _test_eof29; + st_case_29: + { + #line 270 "parser.rl" + {p = p - 1; } {p+= 1; cs = 29; goto _out;} } + switch( ( (*( p))) ) { + case 13: { + goto st29; + } + case 32: { + goto st29; + } + case 47: { + goto st2; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st29; + } + { + goto st0; + } + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 42: { + goto st3; + } + case 47: { + goto st5; + } + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 42 ) { + goto st4; + } + { + goto st3; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st4; + } + case 47: { + goto st29; + } + } + { + goto st3; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 10 ) { + goto st29; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st9; + } + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 42 ) { + goto st8; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 42: { + goto st8; + } + case 47: { + goto st1; + } + } + { + goto st7; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + if ( ( (*( p))) == 10 ) { + goto st1; + } + { + goto st9; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + if ( ( (*( p))) == 110 ) { + goto st11; + } + { + goto st0; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + if ( ( (*( p))) == 102 ) { + goto st12; + } + { + goto st0; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 105 ) { + goto st13; + } + { + goto st0; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + if ( ( (*( p))) == 110 ) { + goto st14; + } + { + goto st0; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 105 ) { + goto st15; + } + { + goto st0; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + if ( ( (*( p))) == 116 ) { + goto st16; + } + { + goto st0; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 121 ) { + goto ctr25; + } + { + goto st0; + } + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + if ( ( (*( p))) == 97 ) { + goto st18; + } + { + goto st0; + } + st18: + p+= 1; + if ( p == pe ) + goto _test_eof18; + st_case_18: + if ( ( (*( p))) == 78 ) { + goto ctr27; + } + { + goto st0; + } + st19: + p+= 1; + if ( p == pe ) + goto _test_eof19; + st_case_19: + if ( ( (*( p))) == 97 ) { + goto st20; + } + { + goto st0; + } + st20: + p+= 1; + if ( p == pe ) + goto _test_eof20; + st_case_20: + if ( ( (*( p))) == 108 ) { + goto st21; + } + { + goto st0; + } + st21: + p+= 1; + if ( p == pe ) + goto _test_eof21; + st_case_21: + if ( ( (*( p))) == 115 ) { + goto st22; + } + { + goto st0; + } + st22: + p+= 1; + if ( p == pe ) + goto _test_eof22; + st_case_22: + if ( ( (*( p))) == 101 ) { + goto ctr31; + } + { + goto st0; + } + st23: + p+= 1; + if ( p == pe ) + goto _test_eof23; + st_case_23: + if ( ( (*( p))) == 117 ) { + goto st24; + } + { + goto st0; + } + st24: + p+= 1; + if ( p == pe ) + goto _test_eof24; + st_case_24: + if ( ( (*( p))) == 108 ) { + goto st25; + } + { + goto st0; + } + st25: + p+= 1; + if ( p == pe ) + goto _test_eof25; + st_case_25: + if ( ( (*( p))) == 108 ) { + goto ctr34; + } + { + goto st0; + } + st26: + p+= 1; + if ( p == pe ) + goto _test_eof26; + st_case_26: + if ( ( (*( p))) == 114 ) { + goto st27; + } + { + goto st0; + } + st27: + p+= 1; + if ( p == pe ) + goto _test_eof27; + st_case_27: + if ( ( (*( p))) == 117 ) { + goto st28; + } + { + goto st0; + } + st28: + p+= 1; + if ( p == pe ) + goto _test_eof28; + st_case_28: + if ( ( (*( p))) == 101 ) { + goto ctr37; + } + { + goto st0; + } + st_out: + _test_eof1: cs = 1; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 291 "parser.rl" + + + if (json->freeze) { + OBJ_FREEZE(*result); + } + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + + +enum {JSON_integer_start = 1}; +enum {JSON_integer_first_final = 3}; +enum {JSON_integer_error = 0}; + +enum {JSON_integer_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 311 "parser.rl" + + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + + { + cs = (int)JSON_integer_start; + } + + #line 318 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + } + goto st_out; + st_case_1: + switch( ( (*( p))) ) { + case 45: { + goto st2; + } + case 48: { + goto st3; + } + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + if ( ( (*( p))) == 48 ) { + goto st3; + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st0; + } + { + goto ctr4; + } + ctr4: + { + #line 308 "parser.rl" + {p = p - 1; } {p+= 1; cs = 4; goto _out;} } + + goto st4; + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto ctr4; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 320 "parser.rl" + + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + + +enum {JSON_float_start = 1}; +enum {JSON_float_first_final = 8}; +enum {JSON_float_error = 0}; + +enum {JSON_float_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 345 "parser.rl" + + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + + { + cs = (int)JSON_float_start; + } + + #line 352 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 10: + goto st_case_10; + case 7: + goto st_case_7; + } + goto st_out; + st_case_1: + switch( ( (*( p))) ) { + case 45: { + goto st2; + } + case 48: { + goto st3; + } + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + if ( ( (*( p))) == 48 ) { + goto st3; + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 46: { + goto st4; + } + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st8; + } + { + goto st0; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + if ( ( (*( p))) > 46 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st8; + } + } else if ( ( (*( p))) >= 45 ) { + goto st0; + } + { + goto ctr9; + } + ctr9: + { + #line 339 "parser.rl" + {p = p - 1; } {p+= 1; cs = 9; goto _out;} } + + goto st9; + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + switch( ( (*( p))) ) { + case 43: { + goto st6; + } + case 45: { + goto st6; + } + } + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 69: { + goto st0; + } + case 101: { + goto st0; + } + } + if ( ( (*( p))) > 46 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + } else if ( ( (*( p))) >= 45 ) { + goto st0; + } + { + goto ctr9; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + switch( ( (*( p))) ) { + case 46: { + goto st4; + } + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 354 "parser.rl" + + + if (cs >= JSON_float_first_final) { + VALUE mod = Qnil; + ID method_id = 0; + if (rb_respond_to(json->decimal_class, i_try_convert)) { + mod = json->decimal_class; + method_id = i_try_convert; + } else if (rb_respond_to(json->decimal_class, i_new)) { + mod = json->decimal_class; + method_id = i_new; + } else if (RB_TYPE_P(json->decimal_class, T_CLASS)) { + VALUE name = rb_class_name(json->decimal_class); + const char *name_cstr = RSTRING_PTR(name); + const char *last_colon = strrchr(name_cstr, ':'); + if (last_colon) { + const char *mod_path_end = last_colon - 1; + VALUE mod_path = rb_str_substr(name, 0, mod_path_end - name_cstr); + mod = rb_path_to_class(mod_path); + + const char *method_name_beg = last_colon + 1; + long before_len = method_name_beg - name_cstr; + long len = RSTRING_LEN(name) - before_len; + VALUE method_name = rb_str_substr(name, before_len, len); + method_id = SYM2ID(rb_str_intern(method_name)); + } else { + mod = rb_mKernel; + method_id = SYM2ID(rb_str_intern(name)); + } + } + + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + + if (method_id) { + VALUE text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + *result = rb_funcallv(mod, method_id, 1, &text); + } else { + *result = DBL2NUM(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } + + return p + 1; + } else { + return NULL; + } +} + + + +enum {JSON_array_start = 1}; +enum {JSON_array_first_final = 17}; +enum {JSON_array_error = 0}; + +enum {JSON_array_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 432 "parser.rl" + + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + + { + cs = (int)JSON_array_start; + } + + #line 445 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 17: + goto st_case_17; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 91 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 13: { + goto st2; + } + case 32: { + goto st2; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st13; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 93: { + goto ctr4; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st2; + } + { + goto st0; + } + ctr2: + { + #line 409 "parser.rl" + + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + {p = p - 1; } {p+= 1; cs = 3; goto _out;} + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + {p = (( np))-1;} + + } + } + + goto st3; + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 13: { + goto st3; + } + case 32: { + goto st3; + } + case 44: { + goto st4; + } + case 47: { + goto st9; + } + case 93: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st3; + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 13: { + goto st4; + } + case 32: { + goto st4; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st5; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st4; + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + switch( ( (*( p))) ) { + case 42: { + goto st6; + } + case 47: { + goto st8; + } + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( ( (*( p))) == 42 ) { + goto st7; + } + { + goto st6; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st4; + } + } + { + goto st6; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + if ( ( (*( p))) == 10 ) { + goto st4; + } + { + goto st8; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + switch( ( (*( p))) ) { + case 42: { + goto st10; + } + case 47: { + goto st12; + } + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + if ( ( (*( p))) == 42 ) { + goto st11; + } + { + goto st10; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + switch( ( (*( p))) ) { + case 42: { + goto st11; + } + case 47: { + goto st3; + } + } + { + goto st10; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 10 ) { + goto st3; + } + { + goto st12; + } + ctr4: + { + #line 424 "parser.rl" + {p = p - 1; } {p+= 1; cs = 17; goto _out;} } + + goto st17; + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + { + goto st0; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + switch( ( (*( p))) ) { + case 42: { + goto st14; + } + case 47: { + goto st16; + } + } + { + goto st0; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 42 ) { + goto st15; + } + { + goto st14; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + switch( ( (*( p))) ) { + case 42: { + goto st15; + } + case 47: { + goto st2; + } + } + { + goto st14; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 10 ) { + goto st2; + } + { + goto st16; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 446 "parser.rl" + + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static const size_t MAX_STACK_BUFFER_SIZE = 128; +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize) +{ + VALUE result = Qnil; + size_t bufferSize = stringEnd - string; + char *p = string, *pe = string, *unescape, *bufferStart, *buffer; + int unescape_len; + char buf[4]; + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + bufferStart = buffer = ALLOC_N(char, bufferSize); + } else { + bufferStart = buffer = ALLOCA_N(char, bufferSize); + } + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + MEMCPY(buffer, unescape, char, unescape_len); + buffer += unescape_len; + p = ++pe; + } else { + pe++; + } + } + + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + + # ifdef HAVE_RB_ENC_INTERNED_STR + if (intern) { + result = rb_enc_interned_str(bufferStart, (long)(buffer - bufferStart), rb_utf8_encoding()); + } else { + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + } + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + # else + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + + if (intern) { + # if STR_UMINUS_DEDUPE_FROZEN + // Starting from MRI 2.8 it is preferable to freeze the string + // before deduplication so that it can be interned directly + // otherwise it would be duplicated first which is wasteful. + result = rb_funcall(rb_str_freeze(result), i_uminus, 0); + # elif STR_UMINUS_DEDUPE + // MRI 2.5 and older do not deduplicate strings that are already + // frozen. + result = rb_funcall(result, i_uminus, 0); + # else + result = rb_str_freeze(result); + # endif + } + # endif + + if (symbolize) { + result = rb_str_intern(result); + } + + return result; +} + + +enum {JSON_string_start = 1}; +enum {JSON_string_first_final = 8}; +enum {JSON_string_error = 0}; + +enum {JSON_string_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 612 "parser.rl" + + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + + { + cs = (int)JSON_string_start; + } + + #line 632 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 8: + goto st_case_8; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 34 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 34: { + goto ctr2; + } + case 92: { + goto st3; + } + } + if ( 0 <= (signed char)(*(p)) && (*(p)) <= 31 ) { + goto st0; + } + { + goto st2; + } + ctr2: + { + #line 599 "parser.rl" + + *result = json_string_unescape(json->memo + 1, p, json->parsing_name || json-> freeze, json->parsing_name && json->symbolize_names); + if (NIL_P(*result)) { + {p = p - 1; } + {p+= 1; cs = 8; goto _out;} + } else { + {p = (( p + 1))-1;} + + } + } + { + #line 609 "parser.rl" + {p = p - 1; } {p+= 1; cs = 8; goto _out;} } + + goto st8; + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 117 ) { + goto st4; + } + if ( 0 <= (signed char)(*(p)) && (*(p)) <= 31 ) { + goto st0; + } + { + goto st2; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st5; + } + } else { + goto st5; + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st6; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st6; + } + } else { + goto st6; + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st7; + } + } else { + goto st7; + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st2; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st2; + } + } else { + goto st2; + } + { + goto st0; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 634 "parser.rl" + + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* +* Document-class: JSON::Ext::Parser +* +* This is the JSON parser implemented as a C extension. It can be configured +* to be used by setting +* +* JSON.parser = JSON::Ext::Parser +* +* with the method parser= in JSON. +* +*/ + +static VALUE convert_encoding(VALUE source) +{ + #ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } + #endif + return source; +} + +/* +* call-seq: new(source, opts => {}) +* +* Creates a new JSON::Ext::Parser instance for the string _source_. +* +* Creates a new JSON::Ext::Parser instance for the string _source_. +* +* It will be configured by the _opts_ hash. _opts_ can have the following +* keys: +* +* _opts_ can have the following keys: +* * *max_nesting*: The maximum depth of nesting allowed in the parsed data +* structures. Disable depth checking with :max_nesting => false|nil|0, it +* defaults to 100. +* * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in +* defiance of RFC 4627 to be parsed by the Parser. This option defaults to +* false. +* * *symbolize_names*: If set to true, returns symbols for the names +* (keys) in a JSON object. Otherwise strings are returned, which is +* also the default. It's not possible to use this option in +* conjunction with the *create_additions* option. +* * *create_additions*: If set to false, the Parser doesn't create +* additions even if a matching class and create_id was found. This option +* defaults to false. +* * *object_class*: Defaults to Hash +* * *array_class*: Defaults to Array +*/ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } + #ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); + #else + rb_scan_args(argc, argv, "11", &source, &opts); + #endif + if (!NIL_P(opts)) { + #ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { + #endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_freeze); + if (option_given_p(opts, tmp)) { + json->freeze = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->freeze = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } + #ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } + #endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + + +enum {JSON_start = 1}; +enum {JSON_first_final = 10}; +enum {JSON_error = 0}; + +enum {JSON_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 835 "parser.rl" + + +/* +* call-seq: parse() +* +* Parses the current JSON text _source_ and returns the complete data +* structure as a result. +*/ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + + { + cs = (int)JSON_start; + } + + #line 851 "parser.rl" + + p = json->source; + pe = p + json->len; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 10: + goto st_case_10; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + } + goto st_out; + st1: + p+= 1; + if ( p == pe ) + goto _test_eof1; + st_case_1: + switch( ( (*( p))) ) { + case 13: { + goto st1; + } + case 32: { + goto st1; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st6; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st1; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + ctr2: + { + #line 827 "parser.rl" + + char *np = JSON_parse_value(json, p, pe, &result, 0); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 10; goto _out;} } else {p = (( np))-1;} + + } + + goto st10; + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 13: { + goto st10; + } + case 32: { + goto st10; + } + case 47: { + goto st2; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st10; + } + { + goto st0; + } + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 42: { + goto st3; + } + case 47: { + goto st5; + } + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 42 ) { + goto st4; + } + { + goto st3; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st4; + } + case 47: { + goto st10; + } + } + { + goto st3; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 10 ) { + goto st10; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st9; + } + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 42 ) { + goto st8; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 42: { + goto st8; + } + case 47: { + goto st1; + } + } + { + goto st7; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + if ( ( (*( p))) == 10 ) { + goto st1; + } + { + goto st9; + } + st_out: + _test_eof1: cs = 1; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 854 "parser.rl" + + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, + #ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, + #endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* +* call-seq: source() +* +* Returns a copy of the current _source_ string, that was used to construct +* this Parser. +*/ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ + #ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); + #endif + + #undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_try_convert = rb_intern("try_convert"); + i_freeze = rb_intern("freeze"); + i_uminus = rb_intern("-@"); +} + +/* +* Local variables: +* mode: c +* c-file-style: ruby +* indent-tabs-mode: nil +* End: +*/ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.h b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.h new file mode 100644 index 0000000..92ed3fd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.h @@ -0,0 +1,96 @@ +#ifndef _PARSER_H_ +#define _PARSER_H_ + +#include "ruby.h" + +#ifndef HAVE_RUBY_RE_H +#include "re.h" +#endif + +#ifdef HAVE_RUBY_ST_H +#include "ruby/st.h" +#else +#include "st.h" +#endif + +#ifndef MAYBE_UNUSED +# define MAYBE_UNUSED(x) x +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode */ + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +typedef struct JSON_ParserStruct { + VALUE Vsource; + char *source; + long len; + char *memo; + VALUE create_id; + int max_nesting; + int allow_nan; + int parsing_name; + int symbolize_names; + int freeze; + VALUE object_class; + VALUE array_class; + VALUE decimal_class; + int create_additions; + VALUE match_string; + FBuffer *fbuffer; +} JSON_Parser; + +#define GET_PARSER \ + GET_PARSER_INIT; \ + if (!json->Vsource) rb_raise(rb_eTypeError, "uninitialized instance") +#define GET_PARSER_INIT \ + JSON_Parser *json; \ + TypedData_Get_Struct(self, JSON_Parser, &JSON_Parser_type, json) + +#define MinusInfinity "-Infinity" +#define EVIL 0x666 + +static UTF32 unescape_unicode(const unsigned char *p); +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch); +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize); +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result); +static VALUE convert_encoding(VALUE source); +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cParser_parse(VALUE self); +static void JSON_mark(void *json); +static void JSON_free(void *json); +static VALUE cJSON_parser_s_allocate(VALUE klass); +static VALUE cParser_source(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Parser_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, JSON_free, json) +#define TypedData_Get_Struct(self, JSON_Parser, ignore, json) Data_Get_Struct(self, JSON_Parser, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl new file mode 100644 index 0000000..f7be1a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl @@ -0,0 +1,977 @@ +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_try_convert, i_freeze, i_uminus; + +%%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft\"\-\[\{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; +}%% + +%%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + fexec np; + } + } + + action parse_name { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, fpc, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + + pair = ignore* begin_name >parse_name ignore* name_separator ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object + (pair (next_pair)*)? ignore* + end_object + ) @exit; +}%% + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + %% write init; + %% write exec; + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + *result = Qnil; + } + action parse_false { + *result = Qfalse; + } + action parse_true { + *result = Qtrue; + } + action parse_nan { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + action parse_infinity { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + action parse_string { + char *np = JSON_parse_string(json, fpc, pe, result); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_number { + char *np; + if(pe > fpc + 8 && !strncmp(MinusInfinity, fpc, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + fexec p + 10; + fhold; fbreak; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, fpc, pe, result); + if (np != NULL) fexec np; + np = JSON_parse_integer(json, fpc, pe, result); + if (np != NULL) fexec np; + fhold; fbreak; + } + + action parse_array { + char *np; + np = JSON_parse_array(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_object { + char *np; + np = JSON_parse_object(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + +main := ignore* ( + Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) ignore* %*exit; +}%% + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + %% write init; + %% write exec; + + if (json->freeze) { + OBJ_FREEZE(*result); + } + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + +%%{ + machine JSON_integer; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ('0' | [1-9][0-9]*) (^[0-9]? @exit); +}%% + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + +%%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ( + (('0' | [1-9][0-9]*) '.' [0-9]+ ([Ee] [+\-]?[0-9]+)?) + | (('0' | [1-9][0-9]*) ([Ee] [+\-]?[0-9]+)) + ) (^[0-9Ee.\-]? @exit ); +}%% + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_float_first_final) { + VALUE mod = Qnil; + ID method_id = 0; + if (rb_respond_to(json->decimal_class, i_try_convert)) { + mod = json->decimal_class; + method_id = i_try_convert; + } else if (rb_respond_to(json->decimal_class, i_new)) { + mod = json->decimal_class; + method_id = i_new; + } else if (RB_TYPE_P(json->decimal_class, T_CLASS)) { + VALUE name = rb_class_name(json->decimal_class); + const char *name_cstr = RSTRING_PTR(name); + const char *last_colon = strrchr(name_cstr, ':'); + if (last_colon) { + const char *mod_path_end = last_colon - 1; + VALUE mod_path = rb_str_substr(name, 0, mod_path_end - name_cstr); + mod = rb_path_to_class(mod_path); + + const char *method_name_beg = last_colon + 1; + long before_len = method_name_beg - name_cstr; + long len = RSTRING_LEN(name) - before_len; + VALUE method_name = rb_str_substr(name, before_len, len); + method_id = SYM2ID(rb_str_intern(method_name)); + } else { + mod = rb_mKernel; + method_id = SYM2ID(rb_str_intern(name)); + } + } + + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + + if (method_id) { + VALUE text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + *result = rb_funcallv(mod, method_id, 1, &text); + } else { + *result = DBL2NUM(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } + + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + fexec np; + } + } + + action exit { fhold; fbreak; } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array ignore* + ((begin_value >parse_value ignore*) + (ignore* next_element ignore*)*)? + end_array @exit; +}%% + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + %% write init; + %% write exec; + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static const size_t MAX_STACK_BUFFER_SIZE = 128; +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize) +{ + VALUE result = Qnil; + size_t bufferSize = stringEnd - string; + char *p = string, *pe = string, *unescape, *bufferStart, *buffer; + int unescape_len; + char buf[4]; + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + bufferStart = buffer = ALLOC_N(char, bufferSize); + } else { + bufferStart = buffer = ALLOCA_N(char, bufferSize); + } + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + MEMCPY(buffer, unescape, char, unescape_len); + buffer += unescape_len; + p = ++pe; + } else { + pe++; + } + } + + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + +# ifdef HAVE_RB_ENC_INTERNED_STR + if (intern) { + result = rb_enc_interned_str(bufferStart, (long)(buffer - bufferStart), rb_utf8_encoding()); + } else { + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + } + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } +# else + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + + if (intern) { + # if STR_UMINUS_DEDUPE_FROZEN + // Starting from MRI 2.8 it is preferable to freeze the string + // before deduplication so that it can be interned directly + // otherwise it would be duplicated first which is wasteful. + result = rb_funcall(rb_str_freeze(result), i_uminus, 0); + # elif STR_UMINUS_DEDUPE + // MRI 2.5 and older do not deduplicate strings that are already + // frozen. + result = rb_funcall(result, i_uminus, 0); + # else + result = rb_str_freeze(result); + # endif + } +# endif + + if (symbolize) { + result = rb_str_intern(result); + } + + return result; +} + +%%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + *result = json_string_unescape(json->memo + 1, p, json->parsing_name || json-> freeze, json->parsing_name && json->symbolize_names); + if (NIL_P(*result)) { + fhold; + fbreak; + } else { + fexec p + 1; + } + } + + action exit { fhold; fbreak; } + + main := '"' ((^([\"\\] | 0..0x1f) | '\\'[\"\\/bfnrt] | '\\u'[0-9a-fA-F]{4} | '\\'^([\"\\/bfnrtu]|0..0x1f))* %parse_string) '"' @exit; +}%% + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + %% write init; + json->memo = p; + %% write exec; + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_freeze); + if (option_given_p(opts, tmp)) { + json->freeze = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->freeze = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + +%%{ + machine JSON; + + write data; + + include JSON_common; + + action parse_value { + char *np = JSON_parse_value(json, fpc, pe, &result, 0); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + main := ignore* ( + begin_value >parse_value + ) ignore*; +}%% + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + %% write init; + p = json->source; + pe = p + json->len; + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); +#endif + +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_try_convert = rb_intern("try_convert"); + i_freeze = rb_intern("freeze"); + i_uminus = rb_intern("-@"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/extconf.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/extconf.rb new file mode 100644 index 0000000..8a99b6a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/ext/json/extconf.rb @@ -0,0 +1,3 @@ +require 'mkmf' + +create_makefile('json') diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/json.gemspec b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/json.gemspec new file mode 100644 index 0000000..948e92c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/json.gemspec @@ -0,0 +1,67 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json" + s.version = File.read(File.expand_path('../VERSION', __FILE__)).chomp + + s.summary = "JSON Implementation for Ruby" + s.description = "This is a JSON implementation as a Ruby extension in C." + s.licenses = ["Ruby"] + s.authors = ["Florian Frank"] + s.email = "flori@ping.de" + + s.extensions = ["ext/json/ext/generator/extconf.rb", "ext/json/ext/parser/extconf.rb", "ext/json/extconf.rb"] + s.extra_rdoc_files = ["README.md"] + s.rdoc_options = ["--title", "JSON implementation for Ruby", "--main", "README.md"] + s.files = [ + "CHANGES.md", + "LICENSE", + "README.md", + "VERSION", + "ext/json/ext/fbuffer/fbuffer.h", + "ext/json/ext/generator/depend", + "ext/json/ext/generator/extconf.rb", + "ext/json/ext/generator/generator.c", + "ext/json/ext/generator/generator.h", + "ext/json/ext/parser/depend", + "ext/json/ext/parser/extconf.rb", + "ext/json/ext/parser/parser.c", + "ext/json/ext/parser/parser.h", + "ext/json/ext/parser/parser.rl", + "ext/json/extconf.rb", + "json.gemspec", + "lib/json.rb", + "lib/json/add/bigdecimal.rb", + "lib/json/add/complex.rb", + "lib/json/add/core.rb", + "lib/json/add/date.rb", + "lib/json/add/date_time.rb", + "lib/json/add/exception.rb", + "lib/json/add/ostruct.rb", + "lib/json/add/range.rb", + "lib/json/add/rational.rb", + "lib/json/add/regexp.rb", + "lib/json/add/set.rb", + "lib/json/add/struct.rb", + "lib/json/add/symbol.rb", + "lib/json/add/time.rb", + "lib/json/common.rb", + "lib/json/ext.rb", + "lib/json/generic_object.rb", + "lib/json/pure.rb", + "lib/json/pure/generator.rb", + "lib/json/pure/parser.rb", + "lib/json/version.rb", + ] + s.homepage = "http://flori.github.com/json" + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/flori/json/issues', + 'changelog_uri' => 'https://github.com/flori/json/blob/master/CHANGES.md', + 'documentation_uri' => 'http://flori.github.io/json/doc/index.html', + 'homepage_uri' => 'http://flori.github.io/json/', + 'source_code_uri' => 'https://github.com/flori/json', + 'wiki_uri' => 'https://github.com/flori/json/wiki' + } + + s.required_ruby_version = Gem::Requirement.new(">= 2.3") +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json.rb new file mode 100644 index 0000000..1e64bfc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json.rb @@ -0,0 +1,583 @@ +#frozen_string_literal: false +require 'json/common' + +## +# = JavaScript \Object Notation (\JSON) +# +# \JSON is a lightweight data-interchange format. +# +# A \JSON value is one of the following: +# - Double-quoted text: "foo". +# - Number: +1+, +1.0+, +2.0e2+. +# - Boolean: +true+, +false+. +# - Null: +null+. +# - \Array: an ordered list of values, enclosed by square brackets: +# ["foo", 1, 1.0, 2.0e2, true, false, null] +# +# - \Object: a collection of name/value pairs, enclosed by curly braces; +# each name is double-quoted text; +# the values may be any \JSON values: +# {"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null} +# +# A \JSON array or object may contain nested arrays, objects, and scalars +# to any depth: +# {"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]} +# [{"foo": 0, "bar": 1}, ["baz", 2]] +# +# == Using \Module \JSON +# +# To make module \JSON available in your code, begin with: +# require 'json' +# +# All examples here assume that this has been done. +# +# === Parsing \JSON +# +# You can parse a \String containing \JSON data using +# either of two methods: +# - JSON.parse(source, opts) +# - JSON.parse!(source, opts) +# +# where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# The difference between the two methods +# is that JSON.parse! omits some checks +# and may not be safe for some +source+ data; +# use it only for data from trusted sources. +# Use the safer method JSON.parse for less trusted sources. +# +# ==== Parsing \JSON Arrays +# +# When +source+ is a \JSON array, JSON.parse by default returns a Ruby \Array: +# json = '["foo", 1, 1.0, 2.0e2, true, false, null]' +# ruby = JSON.parse(json) +# ruby # => ["foo", 1, 1.0, 200.0, true, false, nil] +# ruby.class # => Array +# +# The \JSON array may contain nested arrays, objects, and scalars +# to any depth: +# json = '[{"foo": 0, "bar": 1}, ["baz", 2]]' +# JSON.parse(json) # => [{"foo"=>0, "bar"=>1}, ["baz", 2]] +# +# ==== Parsing \JSON \Objects +# +# When the source is a \JSON object, JSON.parse by default returns a Ruby \Hash: +# json = '{"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null}' +# ruby = JSON.parse(json) +# ruby # => {"a"=>"foo", "b"=>1, "c"=>1.0, "d"=>200.0, "e"=>true, "f"=>false, "g"=>nil} +# ruby.class # => Hash +# +# The \JSON object may contain nested arrays, objects, and scalars +# to any depth: +# json = '{"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]}' +# JSON.parse(json) # => {"foo"=>{"bar"=>1, "baz"=>2}, "bat"=>[0, 1, 2]} +# +# ==== Parsing \JSON Scalars +# +# When the source is a \JSON scalar (not an array or object), +# JSON.parse returns a Ruby scalar. +# +# \String: +# ruby = JSON.parse('"foo"') +# ruby # => 'foo' +# ruby.class # => String +# \Integer: +# ruby = JSON.parse('1') +# ruby # => 1 +# ruby.class # => Integer +# \Float: +# ruby = JSON.parse('1.0') +# ruby # => 1.0 +# ruby.class # => Float +# ruby = JSON.parse('2.0e2') +# ruby # => 200 +# ruby.class # => Float +# Boolean: +# ruby = JSON.parse('true') +# ruby # => true +# ruby.class # => TrueClass +# ruby = JSON.parse('false') +# ruby # => false +# ruby.class # => FalseClass +# Null: +# ruby = JSON.parse('null') +# ruby # => nil +# ruby.class # => NilClass +# +# ==== Parsing Options +# +# ====== Input Options +# +# Option +max_nesting+ (\Integer) specifies the maximum nesting depth allowed; +# defaults to +100+; specify +false+ to disable depth checking. +# +# With the default, +false+: +# source = '[0, [1, [2, [3]]]]' +# ruby = JSON.parse(source) +# ruby # => [0, [1, [2, [3]]]] +# Too deep: +# # Raises JSON::NestingError (nesting of 2 is too deep): +# JSON.parse(source, {max_nesting: 1}) +# Bad value: +# # Raises TypeError (wrong argument type Symbol (expected Fixnum)): +# JSON.parse(source, {max_nesting: :foo}) +# +# --- +# +# Option +allow_nan+ (boolean) specifies whether to allow +# NaN, Infinity, and MinusInfinity in +source+; +# defaults to +false+. +# +# With the default, +false+: +# # Raises JSON::ParserError (225: unexpected token at '[NaN]'): +# JSON.parse('[NaN]') +# # Raises JSON::ParserError (232: unexpected token at '[Infinity]'): +# JSON.parse('[Infinity]') +# # Raises JSON::ParserError (248: unexpected token at '[-Infinity]'): +# JSON.parse('[-Infinity]') +# Allow: +# source = '[NaN, Infinity, -Infinity]' +# ruby = JSON.parse(source, {allow_nan: true}) +# ruby # => [NaN, Infinity, -Infinity] +# +# ====== Output Options +# +# Option +symbolize_names+ (boolean) specifies whether returned \Hash keys +# should be Symbols; +# defaults to +false+ (use Strings). +# +# With the default, +false+: +# source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' +# ruby = JSON.parse(source) +# ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} +# Use Symbols: +# ruby = JSON.parse(source, {symbolize_names: true}) +# ruby # => {:a=>"foo", :b=>1.0, :c=>true, :d=>false, :e=>nil} +# +# --- +# +# Option +object_class+ (\Class) specifies the Ruby class to be used +# for each \JSON object; +# defaults to \Hash. +# +# With the default, \Hash: +# source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' +# ruby = JSON.parse(source) +# ruby.class # => Hash +# Use class \OpenStruct: +# ruby = JSON.parse(source, {object_class: OpenStruct}) +# ruby # => # +# +# --- +# +# Option +array_class+ (\Class) specifies the Ruby class to be used +# for each \JSON array; +# defaults to \Array. +# +# With the default, \Array: +# source = '["foo", 1.0, true, false, null]' +# ruby = JSON.parse(source) +# ruby.class # => Array +# Use class \Set: +# ruby = JSON.parse(source, {array_class: Set}) +# ruby # => # +# +# --- +# +# Option +create_additions+ (boolean) specifies whether to use \JSON additions in parsing. +# See {\JSON Additions}[#module-JSON-label-JSON+Additions]. +# +# === Generating \JSON +# +# To generate a Ruby \String containing \JSON data, +# use method JSON.generate(source, opts), where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# ==== Generating \JSON from Arrays +# +# When the source is a Ruby \Array, JSON.generate returns +# a \String containing a \JSON array: +# ruby = [0, 's', :foo] +# json = JSON.generate(ruby) +# json # => '[0,"s","foo"]' +# +# The Ruby \Array array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = [0, [1, 2], {foo: 3, bar: 4}] +# json = JSON.generate(ruby) +# json # => '[0,[1,2],{"foo":3,"bar":4}]' +# +# ==== Generating \JSON from Hashes +# +# When the source is a Ruby \Hash, JSON.generate returns +# a \String containing a \JSON object: +# ruby = {foo: 0, bar: 's', baz: :bat} +# json = JSON.generate(ruby) +# json # => '{"foo":0,"bar":"s","baz":"bat"}' +# +# The Ruby \Hash array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} +# json = JSON.generate(ruby) +# json # => '{"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"}' +# +# ==== Generating \JSON from Other Objects +# +# When the source is neither an \Array nor a \Hash, +# the generated \JSON data depends on the class of the source. +# +# When the source is a Ruby \Integer or \Float, JSON.generate returns +# a \String containing a \JSON number: +# JSON.generate(42) # => '42' +# JSON.generate(0.42) # => '0.42' +# +# When the source is a Ruby \String, JSON.generate returns +# a \String containing a \JSON string (with double-quotes): +# JSON.generate('A string') # => '"A string"' +# +# When the source is +true+, +false+ or +nil+, JSON.generate returns +# a \String containing the corresponding \JSON token: +# JSON.generate(true) # => 'true' +# JSON.generate(false) # => 'false' +# JSON.generate(nil) # => 'null' +# +# When the source is none of the above, JSON.generate returns +# a \String containing a \JSON string representation of the source: +# JSON.generate(:foo) # => '"foo"' +# JSON.generate(Complex(0, 0)) # => '"0+0i"' +# JSON.generate(Dir.new('.')) # => '"#"' +# +# ==== Generating Options +# +# ====== Input Options +# +# Option +allow_nan+ (boolean) specifies whether +# +NaN+, +Infinity+, and -Infinity may be generated; +# defaults to +false+. +# +# With the default, +false+: +# # Raises JSON::GeneratorError (920: NaN not allowed in JSON): +# JSON.generate(JSON::NaN) +# # Raises JSON::GeneratorError (917: Infinity not allowed in JSON): +# JSON.generate(JSON::Infinity) +# # Raises JSON::GeneratorError (917: -Infinity not allowed in JSON): +# JSON.generate(JSON::MinusInfinity) +# +# Allow: +# ruby = [Float::NaN, Float::Infinity, Float::MinusInfinity] +# JSON.generate(ruby, allow_nan: true) # => '[NaN,Infinity,-Infinity]' +# +# --- +# +# Option +max_nesting+ (\Integer) specifies the maximum nesting depth +# in +obj+; defaults to +100+. +# +# With the default, +100+: +# obj = [[[[[[0]]]]]] +# JSON.generate(obj) # => '[[[[[[0]]]]]]' +# +# Too deep: +# # Raises JSON::NestingError (nesting of 2 is too deep): +# JSON.generate(obj, max_nesting: 2) +# +# ====== Output Options +# +# The default formatting options generate the most compact +# \JSON data, all on one line and with no whitespace. +# +# You can use these formatting options to generate +# \JSON data in a more open format, using whitespace. +# See also JSON.pretty_generate. +# +# - Option +array_nl+ (\String) specifies a string (usually a newline) +# to be inserted after each \JSON array; defaults to the empty \String, ''. +# - Option +object_nl+ (\String) specifies a string (usually a newline) +# to be inserted after each \JSON object; defaults to the empty \String, ''. +# - Option +indent+ (\String) specifies the string (usually spaces) to be +# used for indentation; defaults to the empty \String, ''; +# defaults to the empty \String, ''; +# has no effect unless options +array_nl+ or +object_nl+ specify newlines. +# - Option +space+ (\String) specifies a string (usually a space) to be +# inserted after the colon in each \JSON object's pair; +# defaults to the empty \String, ''. +# - Option +space_before+ (\String) specifies a string (usually a space) to be +# inserted before the colon in each \JSON object's pair; +# defaults to the empty \String, ''. +# +# In this example, +obj+ is used first to generate the shortest +# \JSON data (no whitespace), then again with all formatting options +# specified: +# +# obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} +# json = JSON.generate(obj) +# puts 'Compact:', json +# opts = { +# array_nl: "\n", +# object_nl: "\n", +# indent: ' ', +# space_before: ' ', +# space: ' ' +# } +# puts 'Open:', JSON.generate(obj, opts) +# +# Output: +# Compact: +# {"foo":["bar","baz"],"bat":{"bam":0,"bad":1}} +# Open: +# { +# "foo" : [ +# "bar", +# "baz" +# ], +# "bat" : { +# "bam" : 0, +# "bad" : 1 +# } +# } +# +# == \JSON Additions +# +# When you "round trip" a non-\String object from Ruby to \JSON and back, +# you have a new \String, instead of the object you began with: +# ruby0 = Range.new(0, 2) +# json = JSON.generate(ruby0) +# json # => '0..2"' +# ruby1 = JSON.parse(json) +# ruby1 # => '0..2' +# ruby1.class # => String +# +# You can use \JSON _additions_ to preserve the original object. +# The addition is an extension of a ruby class, so that: +# - \JSON.generate stores more information in the \JSON string. +# - \JSON.parse, called with option +create_additions+, +# uses that information to create a proper Ruby object. +# +# This example shows a \Range being generated into \JSON +# and parsed back into Ruby, both without and with +# the addition for \Range: +# ruby = Range.new(0, 2) +# # This passage does not use the addition for Range. +# json0 = JSON.generate(ruby) +# ruby0 = JSON.parse(json0) +# # This passage uses the addition for Range. +# require 'json/add/range' +# json1 = JSON.generate(ruby) +# ruby1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <require 'json/add/bigdecimal' +# - Complex: require 'json/add/complex' +# - Date: require 'json/add/date' +# - DateTime: require 'json/add/date_time' +# - Exception: require 'json/add/exception' +# - OpenStruct: require 'json/add/ostruct' +# - Range: require 'json/add/range' +# - Rational: require 'json/add/rational' +# - Regexp: require 'json/add/regexp' +# - Set: require 'json/add/set' +# - Struct: require 'json/add/struct' +# - Symbol: require 'json/add/symbol' +# - Time: require 'json/add/time' +# +# To reduce punctuation clutter, the examples below +# show the generated \JSON via +puts+, rather than the usual +inspect+, +# +# \BigDecimal: +# require 'json/add/bigdecimal' +# ruby0 = BigDecimal(0) # 0.0 +# json = JSON.generate(ruby0) # {"json_class":"BigDecimal","b":"27:0.0"} +# ruby1 = JSON.parse(json, create_additions: true) # 0.0 +# ruby1.class # => BigDecimal +# +# \Complex: +# require 'json/add/complex' +# ruby0 = Complex(1+0i) # 1+0i +# json = JSON.generate(ruby0) # {"json_class":"Complex","r":1,"i":0} +# ruby1 = JSON.parse(json, create_additions: true) # 1+0i +# ruby1.class # Complex +# +# \Date: +# require 'json/add/date' +# ruby0 = Date.today # 2020-05-02 +# json = JSON.generate(ruby0) # {"json_class":"Date","y":2020,"m":5,"d":2,"sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 +# ruby1.class # Date +# +# \DateTime: +# require 'json/add/date_time' +# ruby0 = DateTime.now # 2020-05-02T10:38:13-05:00 +# json = JSON.generate(ruby0) # {"json_class":"DateTime","y":2020,"m":5,"d":2,"H":10,"M":38,"S":13,"of":"-5/24","sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02T10:38:13-05:00 +# ruby1.class # DateTime +# +# \Exception (and its subclasses including \RuntimeError): +# require 'json/add/exception' +# ruby0 = Exception.new('A message') # A message +# json = JSON.generate(ruby0) # {"json_class":"Exception","m":"A message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # A message +# ruby1.class # Exception +# ruby0 = RuntimeError.new('Another message') # Another message +# json = JSON.generate(ruby0) # {"json_class":"RuntimeError","m":"Another message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # Another message +# ruby1.class # RuntimeError +# +# \OpenStruct: +# require 'json/add/ostruct' +# ruby0 = OpenStruct.new(name: 'Matz', language: 'Ruby') # # +# json = JSON.generate(ruby0) # {"json_class":"OpenStruct","t":{"name":"Matz","language":"Ruby"}} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # OpenStruct +# +# \Range: +# require 'json/add/range' +# ruby0 = Range.new(0, 2) # 0..2 +# json = JSON.generate(ruby0) # {"json_class":"Range","a":[0,2,false]} +# ruby1 = JSON.parse(json, create_additions: true) # 0..2 +# ruby1.class # Range +# +# \Rational: +# require 'json/add/rational' +# ruby0 = Rational(1, 3) # 1/3 +# json = JSON.generate(ruby0) # {"json_class":"Rational","n":1,"d":3} +# ruby1 = JSON.parse(json, create_additions: true) # 1/3 +# ruby1.class # Rational +# +# \Regexp: +# require 'json/add/regexp' +# ruby0 = Regexp.new('foo') # (?-mix:foo) +# json = JSON.generate(ruby0) # {"json_class":"Regexp","o":0,"s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # (?-mix:foo) +# ruby1.class # Regexp +# +# \Set: +# require 'json/add/set' +# ruby0 = Set.new([0, 1, 2]) # # +# json = JSON.generate(ruby0) # {"json_class":"Set","a":[0,1,2]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Set +# +# \Struct: +# require 'json/add/struct' +# Customer = Struct.new(:name, :address) # Customer +# ruby0 = Customer.new("Dave", "123 Main") # # +# json = JSON.generate(ruby0) # {"json_class":"Customer","v":["Dave","123 Main"]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Customer +# +# \Symbol: +# require 'json/add/symbol' +# ruby0 = :foo # foo +# json = JSON.generate(ruby0) # {"json_class":"Symbol","s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # foo +# ruby1.class # Symbol +# +# \Time: +# require 'json/add/time' +# ruby0 = Time.now # 2020-05-02 11:28:26 -0500 +# json = JSON.generate(ruby0) # {"json_class":"Time","s":1588436906,"n":840560000} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 11:28:26 -0500 +# ruby1.class # Time +# +# +# === Custom \JSON Additions +# +# In addition to the \JSON additions provided, +# you can craft \JSON additions of your own, +# either for Ruby built-in classes or for user-defined classes. +# +# Here's a user-defined class +Foo+: +# class Foo +# attr_accessor :bar, :baz +# def initialize(bar, baz) +# self.bar = bar +# self.baz = baz +# end +# end +# +# Here's the \JSON addition for it: +# # Extend class Foo with JSON addition. +# class Foo +# # Serialize Foo object with its class name and arguments +# def to_json(*args) +# { +# JSON.create_id => self.class.name, +# 'a' => [ bar, baz ] +# }.to_json(*args) +# end +# # Deserialize JSON string by constructing new Foo object with arguments. +# def self.json_create(object) +# new(*object['a']) +# end +# end +# +# Demonstration: +# require 'json' +# # This Foo object has no custom addition. +# foo0 = Foo.new(0, 1) +# json0 = JSON.generate(foo0) +# obj0 = JSON.parse(json0) +# # Lood the custom addition. +# require_relative 'foo_addition' +# # This foo has the custom addition. +# foo1 = Foo.new(0, 1) +# json1 = JSON.generate(foo1) +# obj1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <" (String) +# With custom addition: {"json_class":"Foo","a":[0,1]} (String) +# Parsed JSON: +# Without custom addition: "#" (String) +# With custom addition: # (Foo) +# +module JSON + require 'json/version' + + begin + require 'json/ext' + rescue LoadError + require 'json/pure' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb new file mode 100644 index 0000000..c8b4f56 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::BigDecimal) or require 'bigdecimal' + +class BigDecimal + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + BigDecimal._load object['b'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'b' => _dump, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/complex.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/complex.rb new file mode 100644 index 0000000..e63e29f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/complex.rb @@ -0,0 +1,28 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Complex + + # Deserializes JSON string by converting Real value r, imaginary + # value i, to a Complex object. + def self.json_create(object) + Complex(object['r'], object['i']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'r' => real, + 'i' => imag, + } + end + + # Stores class name (Complex) along with real value r and imaginary value i as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/core.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/core.rb new file mode 100644 index 0000000..bfb017c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/core.rb @@ -0,0 +1,12 @@ +#frozen_string_literal: false +# This file requires the implementations of ruby core's custom objects for +# serialisation/deserialisation. + +require 'json/add/date' +require 'json/add/date_time' +require 'json/add/exception' +require 'json/add/range' +require 'json/add/regexp' +require 'json/add/struct' +require 'json/add/symbol' +require 'json/add/time' diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date.rb new file mode 100644 index 0000000..2552356 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date.rb @@ -0,0 +1,34 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class Date + + # Deserializes JSON string by converting Julian year y, month + # m, day d and Day of Calendar Reform sg to Date. + def self.json_create(object) + civil(*object.values_at('y', 'm', 'd', 'sg')) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'sg' => start, + } + end + + # Stores class name (Date) with Julian year y, month m, day + # d and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date_time.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date_time.rb new file mode 100644 index 0000000..38b0e86 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/date_time.rb @@ -0,0 +1,50 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class DateTime + + # Deserializes JSON string by converting year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg to DateTime. + def self.json_create(object) + args = object.values_at('y', 'm', 'd', 'H', 'M', 'S') + of_a, of_b = object['of'].split('/') + if of_b and of_b != '0' + args << Rational(of_a.to_i, of_b.to_i) + else + args << of_a + end + args << object['sg'] + civil(*args) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'H' => hour, + 'M' => min, + 'S' => sec, + 'of' => offset.to_s, + 'sg' => start, + } + end + + # Stores class name (DateTime) with Julian year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end + + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/exception.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/exception.rb new file mode 100644 index 0000000..a107e5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/exception.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Exception + + # Deserializes JSON string by constructing new Exception object with message + # m and backtrace b serialized with to_json + def self.json_create(object) + result = new(object['m']) + result.set_backtrace object['b'] + result + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'm' => message, + 'b' => backtrace, + } + end + + # Stores class name (Exception) with message m and backtrace array + # b as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/ostruct.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/ostruct.rb new file mode 100644 index 0000000..686cf00 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/ostruct.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'ostruct' + +class OpenStruct + + # Deserializes JSON string by constructing new Struct object with values + # t serialized by to_json. + def self.json_create(object) + new(object['t'] || object[:t]) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 't' => table, + } + end + + # Stores class name (OpenStruct) with this struct's values t as a + # JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/range.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/range.rb new file mode 100644 index 0000000..93529fb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/range.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Range + + # Deserializes JSON string by constructing new Range object with arguments + # a serialized by to_json. + def self.json_create(object) + new(*object['a']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => [ first, last, exclude_end? ] + } + end + + # Stores class name (Range) with JSON array of arguments a which + # include first (integer), last (integer), and + # exclude_end? (boolean) as JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/rational.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/rational.rb new file mode 100644 index 0000000..f776226 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/rational.rb @@ -0,0 +1,27 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Rational + # Deserializes JSON string by converting numerator value n, + # denominator value d, to a Rational object. + def self.json_create(object) + Rational(object['n'], object['d']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'n' => numerator, + 'd' => denominator, + } + end + + # Stores class name (Rational) along with numerator value n and denominator value d as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/regexp.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/regexp.rb new file mode 100644 index 0000000..39d69fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/regexp.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Regexp + + # Deserializes JSON string by constructing new Regexp object with source + # s (Regexp or String) and options o serialized by + # to_json + def self.json_create(object) + new(object['s'], object['o']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'o' => options, + 's' => source, + } + end + + # Stores class name (Regexp) with options o and source s + # (Regexp or String) as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/set.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/set.rb new file mode 100644 index 0000000..71e2a0a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/set.rb @@ -0,0 +1,29 @@ +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Set) or require 'set' + +class Set + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + new object['a'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => to_a, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/struct.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/struct.rb new file mode 100644 index 0000000..e8395ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/struct.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Struct + + # Deserializes JSON string by constructing new Struct object with values + # v serialized by to_json. + def self.json_create(object) + new(*object['v']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 'v' => values, + } + end + + # Stores class name (Struct) with Struct values v as a JSON string. + # Only named structs are supported. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/symbol.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/symbol.rb new file mode 100644 index 0000000..74b13a4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/symbol.rb @@ -0,0 +1,25 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Symbol + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 's' => to_s, + } + end + + # Stores class name (Symbol) with String representation of Symbol as a JSON string. + def to_json(*a) + as_json.to_json(*a) + end + + # Deserializes JSON string by converting the string value stored in the object to a Symbol + def self.json_create(o) + o['s'].to_sym + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/time.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/time.rb new file mode 100644 index 0000000..b73acc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/add/time.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Time + + # Deserializes JSON string by converting time since epoch to Time + def self.json_create(object) + if usec = object.delete('u') # used to be tv_usec -> tv_nsec + object['n'] = usec * 1000 + end + if method_defined?(:tv_nsec) + at(object['s'], Rational(object['n'], 1000)) + else + at(object['s'], object['n'] / 1000) + end + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + nanoseconds = [ tv_usec * 1000 ] + respond_to?(:tv_nsec) and nanoseconds << tv_nsec + nanoseconds = nanoseconds.max + { + JSON.create_id => self.class.name, + 's' => tv_sec, + 'n' => nanoseconds, + } + end + + # Stores class name (Time) with number of seconds since epoch and number of + # microseconds for Time as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/common.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/common.rb new file mode 100644 index 0000000..ea46896 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/common.rb @@ -0,0 +1,703 @@ +#frozen_string_literal: false +require 'json/version' +require 'json/generic_object' + +module JSON + class << self + # :call-seq: + # JSON[object] -> new_array or new_string + # + # If +object+ is a \String, + # calls JSON.parse with +object+ and +opts+ (see method #parse): + # json = '[0, 1, null]' + # JSON[json]# => [0, 1, nil] + # + # Otherwise, calls JSON.generate with +object+ and +opts+ (see method #generate): + # ruby = [0, 1, nil] + # JSON[ruby] # => '[0,1,null]' + def [](object, opts = {}) + if object.respond_to? :to_str + JSON.parse(object.to_str, opts) + else + JSON.generate(object, opts) + end + end + + # Returns the JSON parser class that is used by JSON. This is either + # JSON::Ext::Parser or JSON::Pure::Parser: + # JSON.parser # => JSON::Ext::Parser + attr_reader :parser + + # Set the JSON parser class _parser_ to be used by JSON. + def parser=(parser) # :nodoc: + @parser = parser + remove_const :Parser if const_defined?(:Parser, false) + const_set :Parser, parser + end + + # Return the constant located at _path_. The format of _path_ has to be + # either ::A::B::C or A::B::C. In any case, A has to be located at the top + # level (absolute namespace path?). If there doesn't exist a constant at + # the given path, an ArgumentError is raised. + def deep_const_get(path) # :nodoc: + path.to_s.split(/::/).inject(Object) do |p, c| + case + when c.empty? then p + when p.const_defined?(c, true) then p.const_get(c) + else + begin + p.const_missing(c) + rescue NameError => e + raise ArgumentError, "can't get const #{path}: #{e}" + end + end + end + end + + # Set the module _generator_ to be used by JSON. + def generator=(generator) # :nodoc: + old, $VERBOSE = $VERBOSE, nil + @generator = generator + generator_methods = generator::GeneratorMethods + for const in generator_methods.constants + klass = deep_const_get(const) + modul = generator_methods.const_get(const) + klass.class_eval do + instance_methods(false).each do |m| + m.to_s == 'to_json' and remove_method m + end + include modul + end + end + self.state = generator::State + const_set :State, self.state + const_set :SAFE_STATE_PROTOTYPE, State.new # for JRuby + const_set :FAST_STATE_PROTOTYPE, create_fast_state + const_set :PRETTY_STATE_PROTOTYPE, create_pretty_state + ensure + $VERBOSE = old + end + + def create_fast_state + State.new( + :indent => '', + :space => '', + :object_nl => "", + :array_nl => "", + :max_nesting => false + ) + end + + def create_pretty_state + State.new( + :indent => ' ', + :space => ' ', + :object_nl => "\n", + :array_nl => "\n" + ) + end + + # Returns the JSON generator module that is used by JSON. This is + # either JSON::Ext::Generator or JSON::Pure::Generator: + # JSON.generator # => JSON::Ext::Generator + attr_reader :generator + + # Sets or Returns the JSON generator state class that is used by JSON. This is + # either JSON::Ext::Generator::State or JSON::Pure::Generator::State: + # JSON.state # => JSON::Ext::Generator::State + attr_accessor :state + end + + DEFAULT_CREATE_ID = 'json_class'.freeze + private_constant :DEFAULT_CREATE_ID + + CREATE_ID_TLS_KEY = "JSON.create_id".freeze + private_constant :CREATE_ID_TLS_KEY + + # Sets create identifier, which is used to decide if the _json_create_ + # hook of a class should be called; initial value is +json_class+: + # JSON.create_id # => 'json_class' + def self.create_id=(new_value) + Thread.current[CREATE_ID_TLS_KEY] = new_value.dup.freeze + end + + # Returns the current create identifier. + # See also JSON.create_id=. + def self.create_id + Thread.current[CREATE_ID_TLS_KEY] || DEFAULT_CREATE_ID + end + + NaN = 0.0/0 + + Infinity = 1.0/0 + + MinusInfinity = -Infinity + + # The base exception for JSON errors. + class JSONError < StandardError + def self.wrap(exception) + obj = new("Wrapped(#{exception.class}): #{exception.message.inspect}") + obj.set_backtrace exception.backtrace + obj + end + end + + # This exception is raised if a parser error occurs. + class ParserError < JSONError; end + + # This exception is raised if the nesting of parsed data structures is too + # deep. + class NestingError < ParserError; end + + # :stopdoc: + class CircularDatastructure < NestingError; end + # :startdoc: + + # This exception is raised if a generator or unparser error occurs. + class GeneratorError < JSONError; end + # For backwards compatibility + UnparserError = GeneratorError # :nodoc: + + # This exception is raised if the required unicode support is missing on the + # system. Usually this means that the iconv library is not installed. + class MissingUnicodeSupport < JSONError; end + + module_function + + # :call-seq: + # JSON.parse(source, opts) -> object + # + # Returns the Ruby objects created by parsing the given +source+. + # + # Argument +source+ contains the \String to be parsed. + # + # Argument +opts+, if given, contains a \Hash of options for the parsing. + # See {Parsing Options}[#module-JSON-label-Parsing+Options]. + # + # --- + # + # When +source+ is a \JSON array, returns a Ruby \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby # => ["foo", 1.0, true, false, nil] + # ruby.class # => Array + # + # When +source+ is a \JSON object, returns a Ruby \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # ruby.class # => Hash + # + # For examples of parsing for all \JSON data types, see + # {Parsing \JSON}[#module-JSON-label-Parsing+JSON]. + # + # Parses nested JSON objects: + # source = <<-EOT + # { + # "name": "Dave", + # "age" :40, + # "hats": [ + # "Cattleman's", + # "Panama", + # "Tophat" + # ] + # } + # EOT + # ruby = JSON.parse(source) + # ruby # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # --- + # + # Raises an exception if +source+ is not valid JSON: + # # Raises JSON::ParserError (783: unexpected token at ''): + # JSON.parse('') + # + def parse(source, opts = {}) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.parse!(source, opts) -> object + # + # Calls + # parse(source, opts) + # with +source+ and possibly modified +opts+. + # + # Differences from JSON.parse: + # - Option +max_nesting+, if not provided, defaults to +false+, + # which disables checking for nesting depth. + # - Option +allow_nan+, if not provided, defaults to +true+. + def parse!(source, opts = {}) + opts = { + :max_nesting => false, + :allow_nan => true + }.merge(opts) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.load_file(path, opts={}) -> object + # + # Calls: + # parse(File.read(path), opts) + # + # See method #parse. + def load_file(filespec, opts = {}) + parse(File.read(filespec), opts) + end + + # :call-seq: + # JSON.load_file!(path, opts = {}) + # + # Calls: + # JSON.parse!(File.read(path, opts)) + # + # See method #parse! + def load_file!(filespec, opts = {}) + parse!(File.read(filespec), opts) + end + + # :call-seq: + # JSON.generate(obj, opts = nil) -> new_string + # + # Returns a \String containing the generated \JSON data. + # + # See also JSON.fast_generate, JSON.pretty_generate. + # + # Argument +obj+ is the Ruby object to be converted to \JSON. + # + # Argument +opts+, if given, contains a \Hash of options for the generation. + # See {Generating Options}[#module-JSON-label-Generating+Options]. + # + # --- + # + # When +obj+ is an \Array, returns a \String containing a \JSON array: + # obj = ["foo", 1.0, true, false, nil] + # json = JSON.generate(obj) + # json # => '["foo",1.0,true,false,null]' + # + # When +obj+ is a \Hash, returns a \String containing a \JSON object: + # obj = {foo: 0, bar: 's', baz: :bat} + # json = JSON.generate(obj) + # json # => '{"foo":0,"bar":"s","baz":"bat"}' + # + # For examples of generating from other Ruby objects, see + # {Generating \JSON from Other Objects}[#module-JSON-label-Generating+JSON+from+Other+Objects]. + # + # --- + # + # Raises an exception if any formatting option is not a \String. + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises JSON::NestingError (nesting of 100 is too deep): + # JSON.generate(a) + # + def generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = State.new + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state = state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and + # later delete them. + alias unparse generate + module_function :unparse + # :startdoc: + + # :call-seq: + # JSON.fast_generate(obj, opts) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # By default, generates \JSON data without checking + # for circular references in +obj+ (option +max_nesting+ set to +false+, disabled). + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises SystemStackError (stack level too deep): + # JSON.fast_generate(a) + def fast_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = JSON.create_fast_state + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias fast_unparse fast_generate + module_function :fast_unparse + # :startdoc: + + # :call-seq: + # JSON.pretty_generate(obj, opts = nil) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # Default options are: + # { + # indent: ' ', # Two spaces + # space: ' ', # One space + # array_nl: "\n", # Newline + # object_nl: "\n" # Newline + # } + # + # Example: + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.pretty_generate(obj) + # puts json + # Output: + # { + # "foo": [ + # "bar", + # "baz" + # ], + # "bat": { + # "bam": 0, + # "bad": 1 + # } + # } + # + def pretty_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = JSON.create_pretty_state + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias pretty_unparse pretty_generate + module_function :pretty_unparse + # :startdoc: + + class << self + # Sets or returns default options for the JSON.load method. + # Initially: + # opts = JSON.load_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :allow_blank=>true, :create_additions=>true} + attr_accessor :load_default_options + end + self.load_default_options = { + :max_nesting => false, + :allow_nan => true, + :allow_blank => true, + :create_additions => true, + } + + # :call-seq: + # JSON.load(source, proc = nil, options = {}) -> object + # + # Returns the Ruby objects created by parsing the given +source+. + # + # - Argument +source+ must be, or be convertible to, a \String: + # - If +source+ responds to instance method +to_str+, + # source.to_str becomes the source. + # - If +source+ responds to instance method +to_io+, + # source.to_io.read becomes the source. + # - If +source+ responds to instance method +read+, + # source.read becomes the source. + # - If both of the following are true, source becomes the \String 'null': + # - Option +allow_blank+ specifies a truthy value. + # - The source, as defined above, is +nil+ or the empty \String ''. + # - Otherwise, +source+ remains the source. + # - Argument +proc+, if given, must be a \Proc that accepts one argument. + # It will be called recursively with each result (depth-first order). + # See details below. + # BEWARE: This method is meant to serialise data from trusted user input, + # like from your own database server or clients under your control, it could + # be dangerous to allow untrusted users to pass JSON sources into it. + # - Argument +opts+, if given, contains a \Hash of options for the parsing. + # See {Parsing Options}[#module-JSON-label-Parsing+Options]. + # The default options can be changed via method JSON.load_default_options=. + # + # --- + # + # When no +proc+ is given, modifies +source+ as above and returns the result of + # parse(source, opts); see #parse. + # + # Source for following examples: + # source = <<-EOT + # { + # "name": "Dave", + # "age" :40, + # "hats": [ + # "Cattleman's", + # "Panama", + # "Tophat" + # ] + # } + # EOT + # + # Load a \String: + # ruby = JSON.load(source) + # ruby # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # Load an \IO object: + # require 'stringio' + # object = JSON.load(StringIO.new(source)) + # object # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # Load a \File object: + # path = 't.json' + # File.write(path, source) + # File.open(path) do |file| + # JSON.load(file) + # end # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # --- + # + # When +proc+ is given: + # - Modifies +source+ as above. + # - Gets the +result+ from calling parse(source, opts). + # - Recursively calls proc(result). + # - Returns the final result. + # + # Example: + # require 'json' + # + # # Some classes for the example. + # class Base + # def initialize(attributes) + # @attributes = attributes + # end + # end + # class User < Base; end + # class Account < Base; end + # class Admin < Base; end + # # The JSON source. + # json = <<-EOF + # { + # "users": [ + # {"type": "User", "username": "jane", "email": "jane@example.com"}, + # {"type": "User", "username": "john", "email": "john@example.com"} + # ], + # "accounts": [ + # {"account": {"type": "Account", "paid": true, "account_id": "1234"}}, + # {"account": {"type": "Account", "paid": false, "account_id": "1235"}} + # ], + # "admins": {"type": "Admin", "password": "0wn3d"} + # } + # EOF + # # Deserializer method. + # def deserialize_obj(obj, safe_types = %w(User Account Admin)) + # type = obj.is_a?(Hash) && obj["type"] + # safe_types.include?(type) ? Object.const_get(type).new(obj) : obj + # end + # # Call to JSON.load + # ruby = JSON.load(json, proc {|obj| + # case obj + # when Hash + # obj.each {|k, v| obj[k] = deserialize_obj v } + # when Array + # obj.map! {|v| deserialize_obj v } + # end + # }) + # pp ruby + # Output: + # {"users"=> + # [#"User", "username"=>"jane", "email"=>"jane@example.com"}>, + # #"User", "username"=>"john", "email"=>"john@example.com"}>], + # "accounts"=> + # [{"account"=> + # #"Account", "paid"=>true, "account_id"=>"1234"}>}, + # {"account"=> + # #"Account", "paid"=>false, "account_id"=>"1235"}>}], + # "admins"=> + # #"Admin", "password"=>"0wn3d"}>} + # + def load(source, proc = nil, options = {}) + opts = load_default_options.merge options + if source.respond_to? :to_str + source = source.to_str + elsif source.respond_to? :to_io + source = source.to_io.read + elsif source.respond_to?(:read) + source = source.read + end + if opts[:allow_blank] && (source.nil? || source.empty?) + source = 'null' + end + result = parse(source, opts) + recurse_proc(result, &proc) if proc + result + end + + # Recursively calls passed _Proc_ if the parsed data structure is an _Array_ or _Hash_ + def recurse_proc(result, &proc) # :nodoc: + case result + when Array + result.each { |x| recurse_proc x, &proc } + proc.call result + when Hash + result.each { |x, y| recurse_proc x, &proc; recurse_proc y, &proc } + proc.call result + else + proc.call result + end + end + + alias restore load + module_function :restore + + class << self + # Sets or returns the default options for the JSON.dump method. + # Initially: + # opts = JSON.dump_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :escape_slash=>false} + attr_accessor :dump_default_options + end + self.dump_default_options = { + :max_nesting => false, + :allow_nan => true, + :escape_slash => false, + } + + # :call-seq: + # JSON.dump(obj, io = nil, limit = nil) + # + # Dumps +obj+ as a \JSON string, i.e. calls generate on the object and returns the result. + # + # The default options can be changed via method JSON.dump_default_options. + # + # - Argument +io+, if given, should respond to method +write+; + # the \JSON \String is written to +io+, and +io+ is returned. + # If +io+ is not given, the \JSON \String is returned. + # - Argument +limit+, if given, is passed to JSON.generate as option +max_nesting+. + # + # --- + # + # When argument +io+ is not given, returns the \JSON \String generated from +obj+: + # obj = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} + # json = JSON.dump(obj) + # json # => "{\"foo\":[0,1],\"bar\":{\"baz\":2,\"bat\":3},\"bam\":\"bad\"}" + # + # When argument +io+ is given, writes the \JSON \String to +io+ and returns +io+: + # path = 't.json' + # File.open(path, 'w') do |file| + # JSON.dump(obj, file) + # end # => # + # puts File.read(path) + # Output: + # {"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"} + def dump(obj, anIO = nil, limit = nil) + if anIO and limit.nil? + anIO = anIO.to_io if anIO.respond_to?(:to_io) + unless anIO.respond_to?(:write) + limit = anIO + anIO = nil + end + end + opts = JSON.dump_default_options + opts = opts.merge(:max_nesting => limit) if limit + result = generate(obj, opts) + if anIO + anIO.write result + anIO + else + result + end + rescue JSON::NestingError + raise ArgumentError, "exceed depth limit" + end + + # Encodes string using String.encode. + def self.iconv(to, from, string) + string.encode(to, from) + end +end + +module ::Kernel + private + + # Outputs _objs_ to STDOUT as JSON strings in the shortest form, that is in + # one line. + def j(*objs) + objs.each do |obj| + puts JSON::generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # Outputs _objs_ to STDOUT as JSON strings in a pretty format, with + # indentation and over many lines. + def jj(*objs) + objs.each do |obj| + puts JSON::pretty_generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # If _object_ is string-like, parse the string and return the parsed result as + # a Ruby data structure. Otherwise, generate a JSON text from the Ruby data + # structure object and return it. + # + # The _opts_ argument is passed through to generate/parse respectively. See + # generate and parse for their documentation. + def JSON(object, *args) + if object.respond_to? :to_str + JSON.parse(object.to_str, args.first) + else + JSON.generate(object, args.first) + end + end +end + +# Extends any Class to include _json_creatable?_ method. +class ::Class + # Returns true if this class can be used to create an instance + # from a serialised JSON string. The class has to implement a class + # method _json_create_ that expects a hash as first parameter. The hash + # should include the required data. + def json_creatable? + respond_to?(:json_create) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/ext.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/ext.rb new file mode 100644 index 0000000..7264a85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/ext.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality as C extensions. + module Ext + require 'json/ext/parser' + require 'json/ext/generator' + $DEBUG and warn "Using Ext extension for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/generic_object.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/generic_object.rb new file mode 100644 index 0000000..108309d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/generic_object.rb @@ -0,0 +1,71 @@ +#frozen_string_literal: false +require 'ostruct' + +module JSON + class GenericObject < OpenStruct + class << self + alias [] new + + def json_creatable? + @json_creatable + end + + attr_writer :json_creatable + + def json_create(data) + data = data.dup + data.delete JSON.create_id + self[data] + end + + def from_hash(object) + case + when object.respond_to?(:to_hash) + result = new + object.to_hash.each do |key, value| + result[key] = from_hash(value) + end + result + when object.respond_to?(:to_ary) + object.to_ary.map { |a| from_hash(a) } + else + object + end + end + + def load(source, proc = nil, opts = {}) + result = ::JSON.load(source, proc, opts.merge(:object_class => self)) + result.nil? ? new : result + end + + def dump(obj, *args) + ::JSON.dump(obj, *args) + end + end + self.json_creatable = false + + def to_hash + table + end + + def [](name) + __send__(name) + end unless method_defined?(:[]) + + def []=(name, value) + __send__("#{name}=", value) + end unless method_defined?(:[]=) + + def |(other) + self.class[other.to_hash.merge(to_hash)] + end + + def as_json(*) + { JSON.create_id => self.class.name }.merge to_hash + end + + def to_json(*a) + as_json.to_json(*a) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure.rb new file mode 100644 index 0000000..53178b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality in pure ruby. + module Pure + require 'json/pure/parser' + require 'json/pure/generator' + $DEBUG and warn "Using Pure library for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/generator.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/generator.rb new file mode 100644 index 0000000..2257ee3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/generator.rb @@ -0,0 +1,479 @@ +#frozen_string_literal: false +module JSON + MAP = { + "\x0" => '\u0000', + "\x1" => '\u0001', + "\x2" => '\u0002', + "\x3" => '\u0003', + "\x4" => '\u0004', + "\x5" => '\u0005', + "\x6" => '\u0006', + "\x7" => '\u0007', + "\b" => '\b', + "\t" => '\t', + "\n" => '\n', + "\xb" => '\u000b', + "\f" => '\f', + "\r" => '\r', + "\xe" => '\u000e', + "\xf" => '\u000f', + "\x10" => '\u0010', + "\x11" => '\u0011', + "\x12" => '\u0012', + "\x13" => '\u0013', + "\x14" => '\u0014', + "\x15" => '\u0015', + "\x16" => '\u0016', + "\x17" => '\u0017', + "\x18" => '\u0018', + "\x19" => '\u0019', + "\x1a" => '\u001a', + "\x1b" => '\u001b', + "\x1c" => '\u001c', + "\x1d" => '\u001d', + "\x1e" => '\u001e', + "\x1f" => '\u001f', + '"' => '\"', + '\\' => '\\\\', + } # :nodoc: + + ESCAPE_SLASH_MAP = MAP.merge( + '/' => '\\/', + ) + + # Convert a UTF8 encoded Ruby string _string_ to a JSON string, encoded with + # UTF16 big endian characters as \u????, and return it. + def utf8_to_json(string, escape_slash = false) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + map = escape_slash ? ESCAPE_SLASH_MAP : MAP + string.gsub!(/[\/"\\\x0-\x1f]/) { map[$&] || $& } + string.force_encoding(::Encoding::UTF_8) + string + end + + def utf8_to_json_ascii(string, escape_slash = false) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + map = escape_slash ? ESCAPE_SLASH_MAP : MAP + string.gsub!(/[\/"\\\x0-\x1f]/n) { map[$&] || $& } + string.gsub!(/( + (?: + [\xc2-\xdf][\x80-\xbf] | + [\xe0-\xef][\x80-\xbf]{2} | + [\xf0-\xf4][\x80-\xbf]{3} + )+ | + [\x80-\xc1\xf5-\xff] # invalid + )/nx) { |c| + c.size == 1 and raise GeneratorError, "invalid utf8 byte: '#{c}'" + s = JSON.iconv('utf-16be', 'utf-8', c).unpack('H*')[0] + s.force_encoding(::Encoding::ASCII_8BIT) + s.gsub!(/.{4}/n, '\\\\u\&') + s.force_encoding(::Encoding::UTF_8) + } + string.force_encoding(::Encoding::UTF_8) + string + rescue => e + raise GeneratorError.wrap(e) + end + + def valid_utf8?(string) + encoding = string.encoding + (encoding == Encoding::UTF_8 || encoding == Encoding::ASCII) && + string.valid_encoding? + end + module_function :utf8_to_json, :utf8_to_json_ascii, :valid_utf8? + + module Pure + module Generator + # This class is used to create State instances, that are use to hold data + # while generating a JSON text from a Ruby data structure. + class State + # Creates a State object from _opts_, which ought to be Hash to create + # a new State instance configured by _opts_, something else to create + # an unconfigured instance. If _opts_ is a State object, it is just + # returned. + def self.from_state(opts) + case + when self === opts + opts + when opts.respond_to?(:to_hash) + new(opts.to_hash) + when opts.respond_to?(:to_h) + new(opts.to_h) + else + SAFE_STATE_PROTOTYPE.dup + end + end + + # Instantiates a new State object, configured by _opts_. + # + # _opts_ can have the following keys: + # + # * *indent*: a string used to indent levels (default: ''), + # * *space*: a string that is put after, a : or , delimiter (default: ''), + # * *space_before*: a string that is put before a : pair delimiter (default: ''), + # * *object_nl*: a string that is put at the end of a JSON object (default: ''), + # * *array_nl*: a string that is put at the end of a JSON array (default: ''), + # * *escape_slash*: true if forward slash (/) should be escaped (default: false) + # * *check_circular*: is deprecated now, use the :max_nesting option instead, + # * *max_nesting*: sets the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum should be checked. + # * *allow_nan*: true if NaN, Infinity, and -Infinity should be + # generated, otherwise an exception is thrown, if these values are + # encountered. This options defaults to false. + def initialize(opts = {}) + @indent = '' + @space = '' + @space_before = '' + @object_nl = '' + @array_nl = '' + @allow_nan = false + @ascii_only = false + @escape_slash = false + @buffer_initial_length = 1024 + configure opts + end + + # This string is used to indent levels in the JSON text. + attr_accessor :indent + + # This string is used to insert a space between the tokens in a JSON + # string. + attr_accessor :space + + # This string is used to insert a space before the ':' in JSON objects. + attr_accessor :space_before + + # This string is put at the end of a line that holds a JSON object (or + # Hash). + attr_accessor :object_nl + + # This string is put at the end of a line that holds a JSON array. + attr_accessor :array_nl + + # This integer returns the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum is checked. + attr_accessor :max_nesting + + # If this attribute is set to true, forward slashes will be escaped in + # all json strings. + attr_accessor :escape_slash + + # :stopdoc: + attr_reader :buffer_initial_length + + def buffer_initial_length=(length) + if length > 0 + @buffer_initial_length = length + end + end + # :startdoc: + + # This integer returns the current depth data structure nesting in the + # generated JSON. + attr_accessor :depth + + def check_max_nesting # :nodoc: + return if @max_nesting.zero? + current_nesting = depth + 1 + current_nesting > @max_nesting and + raise NestingError, "nesting of #{current_nesting} is too deep" + end + + # Returns true, if circular data structures are checked, + # otherwise returns false. + def check_circular? + !@max_nesting.zero? + end + + # Returns true if NaN, Infinity, and -Infinity should be considered as + # valid JSON and output. + def allow_nan? + @allow_nan + end + + # Returns true, if only ASCII characters should be generated. Otherwise + # returns false. + def ascii_only? + @ascii_only + end + + # Returns true, if forward slashes are escaped. Otherwise returns false. + def escape_slash? + @escape_slash + end + + # Configure this State instance with the Hash _opts_, and return + # itself. + def configure(opts) + if opts.respond_to?(:to_hash) + opts = opts.to_hash + elsif opts.respond_to?(:to_h) + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + for key, value in opts + instance_variable_set "@#{key}", value + end + @indent = opts[:indent] if opts.key?(:indent) + @space = opts[:space] if opts.key?(:space) + @space_before = opts[:space_before] if opts.key?(:space_before) + @object_nl = opts[:object_nl] if opts.key?(:object_nl) + @array_nl = opts[:array_nl] if opts.key?(:array_nl) + @allow_nan = !!opts[:allow_nan] if opts.key?(:allow_nan) + @ascii_only = opts[:ascii_only] if opts.key?(:ascii_only) + @depth = opts[:depth] || 0 + @buffer_initial_length ||= opts[:buffer_initial_length] + @escape_slash = !!opts[:escape_slash] if opts.key?(:escape_slash) + + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + self + end + alias merge configure + + # Returns the configuration instance variables as a hash, that can be + # passed to the configure method. + def to_h + result = {} + for iv in instance_variables + iv = iv.to_s[1..-1] + result[iv.to_sym] = self[iv] + end + result + end + + alias to_hash to_h + + # Generates a valid JSON document from object +obj+ and + # returns the result. If no valid JSON document can be + # created this method raises a + # GeneratorError exception. + def generate(obj) + result = obj.to_json(self) + JSON.valid_utf8?(result) or raise GeneratorError, + "source sequence #{result.inspect} is illegal/malformed utf-8" + result + end + + # Return the value returned by method +name+. + def [](name) + if respond_to?(name) + __send__(name) + else + instance_variable_get("@#{name}") if + instance_variables.include?("@#{name}".to_sym) # avoid warning + end + end + + def []=(name, value) + if respond_to?(name_writer = "#{name}=") + __send__ name_writer, value + else + instance_variable_set "@#{name}", value + end + end + end + + module GeneratorMethods + module Object + # Converts this object to a string (calling #to_s), converts + # it to a JSON string, and returns the result. This is a fallback, if no + # special method #to_json was defined for some object. + def to_json(*) to_s.to_json end + end + + module Hash + # Returns a JSON string containing a JSON object, that is unparsed from + # this Hash instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + # _depth_ is used to find out nesting depth, to indent accordingly. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_shift(state) + state.object_nl.empty? or return '' + state.indent * state.depth + end + + def json_transform(state) + delim = ',' + delim << state.object_nl + result = '{' + result << state.object_nl + depth = state.depth += 1 + first = true + indent = !state.object_nl.empty? + each { |key,value| + result << delim unless first + result << state.indent * depth if indent + result << key.to_s.to_json(state) + result << state.space_before + result << ':' + result << state.space + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + unless first + result << state.object_nl + result << state.indent * depth if indent + end + result << '}' + result + end + end + + module Array + # Returns a JSON string containing a JSON array, that is unparsed from + # this Array instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_transform(state) + delim = ',' + delim << state.array_nl + result = '[' + result << state.array_nl + depth = state.depth += 1 + first = true + indent = !state.array_nl.empty? + each { |value| + result << delim unless first + result << state.indent * depth if indent + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.array_nl + result << state.indent * depth if indent + result << ']' + end + end + + module Integer + # Returns a JSON string representation for this Integer number. + def to_json(*) to_s end + end + + module Float + # Returns a JSON string representation for this Float number. + def to_json(state = nil, *) + state = State.from_state(state) + case + when infinite? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + when nan? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + else + to_s + end + end + end + + module String + # This string should be encoded with UTF-8 A call to this method + # returns a JSON string encoded with UTF16 big endian characters as + # \u????. + def to_json(state = nil, *args) + state = State.from_state(state) + if encoding == ::Encoding::UTF_8 + string = self + else + string = encode(::Encoding::UTF_8) + end + if state.ascii_only? + '"' << JSON.utf8_to_json_ascii(string, state.escape_slash) << '"' + else + '"' << JSON.utf8_to_json(string, state.escape_slash) << '"' + end + end + + # Module that holds the extending methods if, the String module is + # included. + module Extend + # Raw Strings are JSON Objects (the raw bytes are stored in an + # array for the key "raw"). The Ruby String can be created by this + # module method. + def json_create(o) + o['raw'].pack('C*') + end + end + + # Extends _modul_ with the String::Extend module. + def self.included(modul) + modul.extend Extend + end + + # This method creates a raw object hash, that can be nested into + # other data structures and will be unparsed as a raw string. This + # method should be used, if you want to convert raw strings to JSON + # instead of UTF-8 strings, e. g. binary data. + def to_json_raw_object + { + JSON.create_id => self.class.name, + 'raw' => self.unpack('C*'), + } + end + + # This method creates a JSON text from the result of + # a call to to_json_raw_object of this String. + def to_json_raw(*args) + to_json_raw_object.to_json(*args) + end + end + + module TrueClass + # Returns a JSON string for true: 'true'. + def to_json(*) 'true' end + end + + module FalseClass + # Returns a JSON string for false: 'false'. + def to_json(*) 'false' end + end + + module NilClass + # Returns a JSON string for nil: 'null'. + def to_json(*) 'null' end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/parser.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/parser.rb new file mode 100644 index 0000000..e1d701b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/pure/parser.rb @@ -0,0 +1,337 @@ +#frozen_string_literal: false +require 'strscan' + +module JSON + module Pure + # This class implements the JSON parser that is used to parse a JSON string + # into a Ruby data structure. + class Parser < StringScanner + STRING = /" ((?:[^\x0-\x1f"\\] | + # escaped special characters: + \\["\\\/bfnrt] | + \\u[0-9a-fA-F]{4} | + # match all but escaped special characters: + \\[\x20-\x21\x23-\x2e\x30-\x5b\x5d-\x61\x63-\x65\x67-\x6d\x6f-\x71\x73\x75-\xff])*) + "/nx + INTEGER = /(-?0|-?[1-9]\d*)/ + FLOAT = /(-? + (?:0|[1-9]\d*) + (?: + \.\d+(?i:e[+-]?\d+) | + \.\d+ | + (?i:e[+-]?\d+) + ) + )/x + NAN = /NaN/ + INFINITY = /Infinity/ + MINUS_INFINITY = /-Infinity/ + OBJECT_OPEN = /\{/ + OBJECT_CLOSE = /\}/ + ARRAY_OPEN = /\[/ + ARRAY_CLOSE = /\]/ + PAIR_DELIMITER = /:/ + COLLECTION_DELIMITER = /,/ + TRUE = /true/ + FALSE = /false/ + NULL = /null/ + IGNORE = %r( + (?: + //[^\n\r]*[\n\r]| # line comments + /\* # c-style comments + (?: + [^*/]| # normal chars + /[^*]| # slashes that do not start a nested comment + \*[^/]| # asterisks that do not end this comment + /(?=\*/) # single slash before this comment's end + )* + \*/ # the End of this comment + |[ \t\r\n]+ # whitespaces: space, horizontal tab, lf, cr + )+ + )mx + + UNPARSED = Object.new.freeze + + # Creates a new JSON::Pure::Parser instance for the string _source_. + # + # It will be configured by the _opts_ hash. _opts_ can have the following + # keys: + # * *max_nesting*: The maximum depth of nesting allowed in the parsed data + # structures. Disable depth checking with :max_nesting => false|nil|0, + # it defaults to 100. + # * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + # defiance of RFC 7159 to be parsed by the Parser. This option defaults + # to false. + # * *freeze*: If set to true, all parsed objects will be frozen. Parsed + # string will be deduplicated if possible. + # * *symbolize_names*: If set to true, returns symbols for the names + # (keys) in a JSON object. Otherwise strings are returned, which is + # also the default. It's not possible to use this option in + # conjunction with the *create_additions* option. + # * *create_additions*: If set to true, the Parser creates + # additions when a matching class and create_id are found. This + # option defaults to false. + # * *object_class*: Defaults to Hash + # * *array_class*: Defaults to Array + # * *decimal_class*: Specifies which class to use instead of the default + # (Float) when parsing decimal numbers. This class must accept a single + # string argument in its constructor. + def initialize(source, opts = {}) + opts ||= {} + source = convert_encoding source + super source + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + @allow_nan = !!opts[:allow_nan] + @symbolize_names = !!opts[:symbolize_names] + @freeze = !!opts[:freeze] + if opts.key?(:create_additions) + @create_additions = !!opts[:create_additions] + else + @create_additions = false + end + @symbolize_names && @create_additions and raise ArgumentError, + 'options :symbolize_names and :create_additions cannot be used '\ + 'in conjunction' + @create_id = @create_additions ? JSON.create_id : nil + @object_class = opts[:object_class] || Hash + @array_class = opts[:array_class] || Array + @decimal_class = opts[:decimal_class] + @match_string = opts[:match_string] + end + + alias source string + + def reset + super + @current_nesting = 0 + end + + # Parses the current JSON string _source_ and returns the + # complete data structure as a result. + def parse + reset + obj = nil + while !eos? && skip(IGNORE) do end + if eos? + raise ParserError, "source is not valid JSON!" + else + obj = parse_value + UNPARSED.equal?(obj) and raise ParserError, + "source is not valid JSON!" + obj.freeze if @freeze + end + while !eos? && skip(IGNORE) do end + eos? or raise ParserError, "source is not valid JSON!" + obj + end + + private + + def convert_encoding(source) + if source.respond_to?(:to_str) + source = source.to_str + else + raise TypeError, + "#{source.inspect} is not like a string" + end + if source.encoding != ::Encoding::ASCII_8BIT + source = source.encode(::Encoding::UTF_8) + source.force_encoding(::Encoding::ASCII_8BIT) + end + source + end + + # Unescape characters in strings. + UNESCAPE_MAP = Hash.new { |h, k| h[k] = k.chr } + UNESCAPE_MAP.update({ + ?" => '"', + ?\\ => '\\', + ?/ => '/', + ?b => "\b", + ?f => "\f", + ?n => "\n", + ?r => "\r", + ?t => "\t", + ?u => nil, + }) + + EMPTY_8BIT_STRING = '' + if ::String.method_defined?(:encode) + EMPTY_8BIT_STRING.force_encoding Encoding::ASCII_8BIT + end + + STR_UMINUS = ''.respond_to?(:-@) + def parse_string + if scan(STRING) + return '' if self[1].empty? + string = self[1].gsub(%r((?:\\[\\bfnrt"/]|(?:\\u(?:[A-Fa-f\d]{4}))+|\\[\x20-\xff]))n) do |c| + if u = UNESCAPE_MAP[$&[1]] + u + else # \uXXXX + bytes = EMPTY_8BIT_STRING.dup + i = 0 + while c[6 * i] == ?\\ && c[6 * i + 1] == ?u + bytes << c[6 * i + 2, 2].to_i(16) << c[6 * i + 4, 2].to_i(16) + i += 1 + end + JSON.iconv('utf-8', 'utf-16be', bytes) + end + end + if string.respond_to?(:force_encoding) + string.force_encoding(::Encoding::UTF_8) + end + + if @freeze + if STR_UMINUS + string = -string + else + string.freeze + end + end + + if @create_additions and @match_string + for (regexp, klass) in @match_string + klass.json_creatable? or next + string =~ regexp and return klass.json_create(string) + end + end + string + else + UNPARSED + end + rescue => e + raise ParserError, "Caught #{e.class} at '#{peek(20)}': #{e}" + end + + def parse_value + case + when scan(FLOAT) + if @decimal_class then + if @decimal_class == BigDecimal then + BigDecimal(self[1]) + else + @decimal_class.new(self[1]) || Float(self[1]) + end + else + Float(self[1]) + end + when scan(INTEGER) + Integer(self[1]) + when scan(TRUE) + true + when scan(FALSE) + false + when scan(NULL) + nil + when !UNPARSED.equal?(string = parse_string) + string + when scan(ARRAY_OPEN) + @current_nesting += 1 + ary = parse_array + @current_nesting -= 1 + ary + when scan(OBJECT_OPEN) + @current_nesting += 1 + obj = parse_object + @current_nesting -= 1 + obj + when @allow_nan && scan(NAN) + NaN + when @allow_nan && scan(INFINITY) + Infinity + when @allow_nan && scan(MINUS_INFINITY) + MinusInfinity + else + UNPARSED + end + end + + def parse_array + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @array_class.new + delim = false + loop do + case + when eos? + raise ParserError, "unexpected end of string while parsing array" + when !UNPARSED.equal?(value = parse_value) + delim = false + result << value + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(ARRAY_CLOSE) + ; + else + raise ParserError, "expected ',' or ']' in array at '#{peek(20)}'!" + end + when scan(ARRAY_CLOSE) + if delim + raise ParserError, "expected next element in array at '#{peek(20)}'!" + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in array at '#{peek(20)}'!" + end + end + result + end + + def parse_object + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @object_class.new + delim = false + loop do + case + when eos? + raise ParserError, "unexpected end of string while parsing object" + when !UNPARSED.equal?(string = parse_string) + skip(IGNORE) + unless scan(PAIR_DELIMITER) + raise ParserError, "expected ':' in object at '#{peek(20)}'!" + end + skip(IGNORE) + unless UNPARSED.equal?(value = parse_value) + result[@symbolize_names ? string.to_sym : string] = value + delim = false + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(OBJECT_CLOSE) + ; + else + raise ParserError, "expected ',' or '}' in object at '#{peek(20)}'!" + end + else + raise ParserError, "expected value in object at '#{peek(20)}'!" + end + when scan(OBJECT_CLOSE) + if delim + raise ParserError, "expected next name, value pair in object at '#{peek(20)}'!" + end + if @create_additions and klassname = result[@create_id] + klass = JSON.deep_const_get klassname + break unless klass and klass.json_creatable? + result = klass.json_create(result) + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in object at '#{peek(20)}'!" + end + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/version.rb b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/version.rb new file mode 100644 index 0000000..35e8dd3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/json-2.6.1/lib/json/version.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +module JSON + # JSON version + VERSION = '2.6.1' + VERSION_ARRAY = VERSION.split(/\./).map { |x| x.to_i } # :nodoc: + VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc: + VERSION_MINOR = VERSION_ARRAY[1] # :nodoc: + VERSION_BUILD = VERSION_ARRAY[2] # :nodoc: +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/.autotest b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/.autotest new file mode 100644 index 0000000..b6fbce5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/.autotest @@ -0,0 +1,34 @@ +# -*- ruby -*- + +require 'autotest/restart' +require 'autotest/rcov' if ENV['RCOV'] + +Autotest.add_hook :initialize do |at| + at.testlib = 'minitest/autorun' + + bench_tests = %w(TestMinitestBenchmark) + mock_tests = %w(TestMinitestMock TestMinitestStub) + spec_tests = %w(TestMinitestReporter TestMetaStatic TestMeta + TestSpecInTestCase) + unit_tests = %w(TestMinitestGuard TestMinitestRunnable + TestMinitestRunner TestMinitestTest TestMinitestUnit + TestMinitestUnitInherited TestMinitestUnitOrder + TestMinitestUnitRecording TestMinitestUnitTestCase) + + { + bench_tests => "test/minitest/test_minitest_benchmark.rb", + mock_tests => "test/minitest/test_minitest_mock.rb", + spec_tests => "test/minitest/test_minitest_reporter.rb", + unit_tests => "test/minitest/test_minitest_unit.rb", + }.each do |klasses, file| + klasses.each do |klass| + at.extra_class_map[klass] = file + end + end + + at.add_exception 'coverage.info' + at.add_exception 'coverage' +end + +# require 'autotest/rcov' +# Autotest::RCov.command = 'rcov_info' diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/History.rdoc b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/History.rdoc new file mode 100644 index 0000000..0b00862 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/History.rdoc @@ -0,0 +1,1397 @@ +=== 5.14.2 / 2020-08-31 + +* 1 bug fix: + + * Bumped ruby version to include 3.0 (trunk). + +=== 5.14.1 / 2020-05-15 + +* 3 minor enhancements: + + * Minitest.filter_backtrace returns original backtrace if filter comes back empty. + * Minitest::BacktraceFilter now returns entire backtrace if $MT_DEBUG set in env. + * Return true on a successful refute. (jusleg) + +* 1 bug fix: + + * Fixed expectation doco to not use global expectations. + +=== 5.14.0 / 2020-01-11 + +* 2 minor enhancements: + + * Block-assertions (eg assert_output) now error if raised inside the block. (casperisfine) + * Changed assert_raises to only catch Assertion since that covers Skip and friends. + +* 3 bug fixes: + + * Added example for value wrapper with block to Expectations module. (stomar) + * Fixed use of must/wont_be_within_delta on Expectation instance. (stomar) + * Renamed UnexpectedError#exception to #error to avoid problems with reraising. (casperisfine) + +=== 5.13.0 / 2019-10-29 + +* 9 minor enhancements: + + * Added Minitest::Guard#osx? + * Added examples to documentation for assert_raises. (lxxxvi) + * Added expectations #path_must_exist and #path_wont_exist. Not thrilled with the names. + * Added fail_after(year, month, day, msg) to allow time-bombing after a deadline. + * Added skip_until(year, month, day, msg) to allow deferring until a deadline. + * Deprecated Minitest::Guard#maglev? + * Deprecated Minitest::Guard#rubinius? + * Finally added assert_path_exists and refute_path_exists. (deivid-rodriguez) + * Refactored and pulled Assertions#things_to_diff out of #diff. (BurdetteLamar) + +* 3 bug fixes: + + * Fix autorun bug that affects fork exit status in tests. (dylanahsmith/jhawthorn) + * Improved documentation for _/value/expect, especially for blocks. (svoop) + * Support new Proc#to_s format. (ko1) + +=== 5.12.2 / 2019-09-28 + +* 1 bug fix: + + * After chatting w/ @y-yagi and others, decided to lower support to include ruby 2.2. + +=== 5.12.1 / 2019-09-28 + +* 1 minor enhancement: + + * Added documentation for Reporter classes. (sshaw) + +* 3 bug fixes: + + * Avoid using 'match?' to support older ruby versions. (y-yagi) + * Fixed broken link to reference on goodness-of-fit testing. (havenwood) + * Update requirements in readme and Rakefile/hoe spec. + +=== 5.12.0 / 2019-09-22 + +* 8 minor enhancements: + + * Added a descriptive error if assert_output or assert_raises called without a block. (okuramasafumi) + * Changed mu_pp_for_diff to make having both \n and \\n easier to debug. + * Deprecated $N for specifying number of parallel test runners. Use MT_CPU. + * Deprecated use of global expectations. To be removed from MT6. + * Extended Assertions#mu_pp to encoding validity output for strings to improve diffs. + * Extended Assertions#mu_pp to output encoding and validity if invalid to improve diffs. + * Extended Assertions#mu_pp_for_diff to make escaped newlines more obvious in diffs. + * Fail gracefully when expectation used outside of `it`. + +* 3 bug fixes: + + * Check `option[:filter]` klass before match. Fixes 2.6 warning. (y-yagi) + * Fixed Assertions#diff from recalculating if set to nil + * Fixed spec section of readme to not use deprecated global expectations. (CheezItMan) + +=== 5.11.3 / 2018-01-26 + +* 1 bug fix: + + * Pushed #error? up to Reportable module. (composerinteralia) + +=== 5.11.2 / 2018-01-25 + +* 1 minor enhancement: + + * Reversed Test < Result. Back to < Runnable and using Reportable for shared code. + +* 2 bug fixes: + + * Fixed Result#location for instances of Test. (alexisbernard) + * Fixed deprecation message for Runnable#marshal_dump. (y-yagi) + +=== 5.11.1 / 2018-01-02 + +* 1 bug fix: + + * Fixed Result (a superclass of Test) overriding Runnable's name accessors. (y-yagi, MSP-Greg) + +=== 5.11.0 / 2018-01-01 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 7 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Added deprecation warning for Runnable#marshal_dump. + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.11.0b1 / 2017-12-20 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 6 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + * Removed Runnable.marshal_dump/load. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.10.3 / 2017-07-21 + +* 1 minor enhancement: + + * Extended documentation for Mock#expect for multiple calls to mock object. (insti) + +* 2 bug fixes: + + * Finished off missing doco. + * Fixed verbose output on parallelize_me! classes. (chanks) + +=== 5.10.2 / 2017-05-09 + +* 1 minor enhancement: + + * Added suggestion in minitest/hell to install minitest/proveit. + +* 7 bug fixes: + + * Expand MT6 to Minitest 6. (xaviershay) + * Fixed location of assert_send deprecation. (rab) + * Fixed location of nil assert_equal deprecation to work with expectations. (jeremyevans) + * Fixed minitest/hell to use parallelize_me! (azul) + * Made deprecation use warn so -W0 will silence it. + * Workaround for rdoc nodoc generation bug that totally f'd up minitest doco. (Paxa) + * Write aggregated_results directly to the IO object to avoid mixed encoding errors. (tenderlove) + +=== 5.10.1 / 2016-12-01 + +* 1 bug fix: + + * Added a hack/kludge to deal with missing #prerecord on reporters that aren't properly subclassing AbstractReporter (I'm looking at you minitest-reporters) + +=== 5.10.0 / 2016-11-30 + +* 1 major enhancement: + + * Deprecated ruby 1.8, 1.9, possibly 2.0, assert_send, & old MiniTest namespace. + +* 3 minor enhancements: + + * Warn if assert_equal expects a nil. This will fail in minitest 6+. (tenderlove) + * Added AbstractReporter#prerecord and extended ProgressReporter and CompositeReporter to use it. + * Minor optimization: remove runnables with no runnable methods before run. + +* 3 bug fixes: + + * Fix assert_throw rescuing any NameError and ArgumentError. (waldyr) + * Clean up (most of the) last remaining vestiges of minitest/unit. + * 2.4: removed deprecation warnings when referring to Fixnum. + +=== 5.9.1 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.9.0 / 2016-05-16 + +* 8 minor enhancements: + + * Added Minitest.info_signal accessors to customize signal for test run info. (nate) + * Added assert_mock to make it more clear that you're testing w/ them. + * Added negative filter by test name. (utilum) + * Added warning to README that 1.8 and 1.9 support will be dropped in minitest 6. + * Automatically activate minitest/hell if $MT_HELL is defined. + * Improved default error messages for assert and refute. (bhenderson) + * minitest/hell now tries to require minitest/proveit + * mu_pp for strings prints out non-standard encodings to improve assert_equal diffs. + +* 1 bug fix: + + * Removed Interrupt from PASSTHROUGH_EXCEPTIONS (already handled). (waldyr) + +=== 5.8.5 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.8.4 / 2016-01-21 + +* 1 bug fix: + + * Allow Minitest::Assertion to pass through assert_raises so inner failures are dealt with first. + +=== 5.8.3 / 2015-11-17 + +* 1 minor enhancement: + + * Added extra note about mocks and threads to readme. (zamith) + +* 1 bug fix: + + * Fixed bug in Mock#verify. (pithub/zamith) + +=== 5.8.2 / 2015-10-26 + +* 1 bug fix: + + * Fixed using parallelize_me! and capture_io (or any locking io). (arlt/tenderlove) + +=== 5.8.1 / 2015-09-23 + +* 1 minor enhancement: + + * Refactor assert_raises to be cleaner and to pass SystemExit and SignalException. (bhenderson) + +=== 5.8.0 / 2015-08-06 + +* 2 minor enhancements: + + * Add optional delegation mechanism to extend object with a mock. (zamith) + * Return early if there are no filtered methods. (jeremyevans) + +* 1 bug fix: + + * Don't extend io with pride if io is not a tty. (toy) + +=== 5.7.0 / 2015-05-27 + +* 1 major enhancement: + + * assert_raises now matches subclasses of the expected exception types. (jeremyevans) + +* 3 minor enhancements: + + * Added :block type for minitest/spec's #infect_an_assertion. (jeremyevans) + * Inline verification error messages in minitest/mock for GC performance. (zamith) + * assert_raises defaults to RuntimeError if not specified. (jeremyevans) + +* 4 bug fixes: + + * Added 'class' to minitest/mock's overridden_methods list. (zamith) + * Added file/line to infect_an_assertion's class_eval call. (jeremyevans) + * Cleared UnexpectedError's mesg w/ generic string. + * Fixed non-proc-oriented expectations when used on proc target. (jeremyevans) + +=== 5.6.1 / 2015-04-27 + +* 2 bug fixes: + + * Added Minitest.clock_time and switched all Time.now to it. (tenderlove) + * Moved Minitest::Expectations#_ into Minitest::Spec::DSL. + +=== 5.6.0 / 2015-04-13 + +* 4 major enhancements: + + * Added Minitest::Expectation value monad. + * Added Minitest::Expectations#_ that returns an Expectation. Aliased to value. + * All expectations are added to Minitest::Expectation. + * At some point, the methods on Object will be deprecated and then removed. + +* 4 minor enhancements: + + * Added a note about bundle exec pitfall in ruby 2.2+. (searls) + * Lazily start the parallel executor. (tenderlove) + * Make mocks more debugger-friendly (edward) + * Print out the current test run on interrupt. (riffraff) + +* 3 bug fixes: + + * Fix failing test under Windows. (kimhmadsen) + * Record mocked calls before they happen so mocks can raise exceptions easier (tho I'm not a fan). (corecode) + * Tried to clarify mocks vs stubs terminology better. (kkirsche) + +=== 5.5.1 / 2015-01-09 + +* 1 bug fix: + + * Fixed doco problems. (zzak) + +=== 5.5.0 / 2014-12-12 // mri 2.2.0 (as a real gem) + +* 1 minor enhancement: + + * Allow seed to be given via ENV for rake test loader sadness: eg rake SEED=42. + +=== 5.4.3 / 2014-11-11 + +* 2 bug fixes: + + * Clarified requirements for ruby are now 1.8.7 or better. + * Force encode error output in case mal-encoded exception is raised. (jasonrclark) + +=== 5.4.2 / 2014-09-26 + +* 2 minor enhancements: + + * Extract teardown method list. + * Thanks to minitest-gcstats got a 5-10% speedup via reduced GC! + +=== 5.4.1 / 2014-08-28 + +* 1 bug fix: + + * Fixed specs hidden by nesting/ordering bug (blowmage/apotonick) + +=== 5.4.0 / 2014-07-07 + +* 2 minor enhancements: + + * Kernel#describe extended to splat additional_desc. + * Spec#spec_type extended to take a splat of additional items, passed to matcher procs. + +* 1 bug fix: + + * minitest/spec should require minitest/test, not minitest/unit. (doudou) + +=== 5.3.5 / 2014-06-17 + +* 1 minor enhancement: + + * Spit and polish (mostly spit). + +=== 5.3.4 / 2014-05-15 + +* 1 minor enhancement: + + * Test classes are randomized before running. (judofyr) + +=== 5.3.3 / 2014-04-14 + +* 1 bug fix: + + * Fixed using expectations w/ DSL in Test class w/o describe. (blowmage+others) + +=== 5.3.2 / 2014-04-02 + +* 1 bug fix: + + * Fixed doco on Assertions.assertions. (xaviershay) + +=== 5.3.1 / 2014-03-14 + +* 1 minor enhancement: + + * Modified verbage on bad 'let' names to be more helpful. (Archytaus) + +* 1 bug fix: + + * Fixed 2 cases still using MiniTest. (mikesea) + +=== 5.3.0 / 2014-02-25 + +* 1 minor enhancement: + + * Mocked methods can take a block to verify state. Seattle.rb 12 bday present from ernie! Thanks!! + +=== 5.2.3 / 2014-02-10 + +* 1 bug fix: + + * Fixed Spec#let check to allow overriding of other lets. (mvz) + +=== 5.2.2 / 2014-01-22 + +* 1 minor enhancement: + + * Spec#let raises ArgumentError if you override _any_ instance method (except subject). (rynr) + +* 1 bug fix: + + * Fixed up benchmark spec doco and added a test to demonstrate. (bhenderson) + +=== 5.2.1 / 2014-01-07 + +* 1 bug fix: + + * Properly deal with horrible mix of runtime load errors + other at_exit handlers. (dougo/chqr) + +=== 5.2.0 / 2013-12-13 + +* 1 minor enhancement: + + * Change expectations to allow calling most on procs (but not calling the proc). (bhenderson+others) + +=== 5.1.0 / 2013-12-05 + +* 1 minor enhancement: + + * Use a Queue for scheduling parallel tests. (tenderlove) + +* 1 bug fix: + + * Fixed misspelling in doco. (amatsuda) + +=== 5.0.8 / 2013-09-20 + +* 1 bug fix: + + * Fixed siginfo handler by rearranging reporters and fixing to_s. (tenderlove) + +=== 5.0.7 / 2013-09-05 + +* 2 minor enhancements: + + * Added clarification about the use of thread local variables in expectations. (jemc) + * Added extra message about skipped tests, if any. Disable globally with $MT_NO_SKIP_MSG. + +* 2 bug fixes: + + * Only require minitest, not minitest/autorun in pride_plugin. (judofyr) + * Require rubygems in load_plugins in case you're not using minitest/autorun. + +=== 5.0.6 / 2013-06-28 + +* 3 minor enhancements: + + * Allow stub to pass args to blocks. (swindsor) + * Improved warning message about minitest/autorun to address 1.9's minitest/autorun. + * Made minitest/test require minitest as needed. For lib writers. (erikh) + +* 1 bug fix: + + * Fixed missing require in minitest/test. (erikh) + +=== 4.7.5 / 2013-06-21 // mri 2.1.1 + +* 2 bug fixes: + + * Fix Spec#describe_stack to be thread local. + * Fix multithreaded test failures by defining Time local to mock test namespace + +=== 5.0.5 / 2013-06-20 + +* 6 bug fixes: + + * DOH! Fixed the rest of the new casing on Minitest. (splattael) + * Fixed typo on minitest/mock rdoc. (mrgilman/guiceolin) + * Make Spec::DSL.describe_stack thread local to avoid failing on my own tests. + * Make a fake Time.now local to the tests so they won't interfere with real reporter timings. + * Make everything mockable by wrapping all 'special' methods in a smarter wrapper. (bestie) + * Raise ArgumentError if let name starts with 'test'. (johnmaxwell) + +=== 5.0.4 / 2013-06-07 + +* 5 minor enhancements: + + * Added AbstractReporter, defining required Reporter API to quack properly. + * Added doco for writing reporters. + * Refactored Reporter into ProgressReporter and SummaryReporter. (idea: phiggins, code:me+scotch) + * Refactored SummaryReporter pushing up to StatisticsReporter. (phiggins) + * Removed Reporter#run_and_report... cleaner, but doesn't "fit" in the API. + +=== 5.0.3 / 2013-05-29 + +* 4 minor enhancements: + + * Added Runnable.with_info_handler and Runnable.on_signal. + * Moved io.sync restore to Reporter#run_and_report. + * Refactored inner loop of Reporter#report to #to_s. Callable for status updates. + * Restored MT4's mid-run report (^t). (tenderlove). + +=== 5.0.2 / 2013-05-20 + +* 3 bug fixes: + + * Gem.find_files is smarter than I remember... cause I wrote it that way. *sigh* I'm getting old. + * Pride wasn't doing puts through its #io. (tmiller/tenderlove) + * Replaced Runnable#dup and Test#dup with marshal_dump/load. Too many problems cropping up on untested rails code. (tenderlove/rubys) + +=== 5.0.1 / 2013-05-14 + +* 2 bug fixes: + + * Documented Assertions' need for @assertions to be defined by the includer. + * Only load one plugin version per name. Tries for latest. + +=== 5.0.0 / 2013-05-10 + +Oh god... here we go... + +Minitest 5: + +* 4 deaths in the family: + + * MiniTest.runner is dead. No more manager objects. + * MiniTest::Unit#record is dead. Use a Reporter instance instead. + * MiniTest::Unit._run_* is dead. Runnable things are responsible for their own runs. + * MiniTest::Unit.output is dead. No more centralized IO. + +* 12 major (oft incompatible) changes: + + * Renamed MiniTest to Minitest. Your pinkies will thank me. (aliased to MiniTest) + * Removed MiniTest::Unit entirely. No more manager objects. + * Added Minitest::Runnable. Everything minitest can run subclasses this. + * Renamed MiniTest::Unit::TestCase to Minitest::Test (subclassing Runnable). + * Added Minitest::Benchmark. + * Your benchmarks need to move to their own subclass. + * Benchmarks using the spec DSL have to have "Bench" somewhere in their describe. + * MiniTest::Unit.after_tests moved to Minitest.after_tests + * MiniTest::Unit.autorun is now Minitest.autorun. Just require minitest/autorun pls. + * Removed ParallelEach#grep since it isn't used anywhere. + * Renamed Runnable#__name__ to Runnable#name (but uses @NAME internally). + * Runnable#run needs to return self. Allows for swapping of results as needed. + +* 8 minor moves: + + * Moved Assertions module to minitest/assertions.rb + * Moved Expectations module to minitest/expectations.rb + * Moved Test to minitest/test.rb + * Moved everything else in minitest/unit.rb to minitest.rb + * minitest/unit.rb is now just a small (user-test only) compatibility layer. + * Moved most of minitest/pride into minitest/pride_plugin. + * minitest/pride now just activates pride. + * Moved ParallelEach under Minitest. + +* 9 additions: + + * Added a plugin system that can extend command-line options. + * Added Minitest.extensions. + * Added Minitest.reporter (only available during startup). + * Added Minitest.run(args). This is the very top of any Minitest run. + * Added Minitest::Reporter. Everything minitest can report goes through here. + * Minitest.reporter is a composite so you can add your own. + * Added Minitest::CompositeReporter. Much easier to extend with your own reporters. + * Added UnexpectedError, an Assertion subclass, to wrap up errors. + * Minitest::Test#run is now freakin' beautiful. 47 -> 17 loc + +* 11 other: + + * Removed Object.infect_with_assertions (it was already dead code). + * Runnables are responsible for knowing their result_code (eg "." or "F"). + * Minitest.autorun now returns boolean, not exit code. + * Added FAQ entry for extending via modules. (phiggins) + * Implement Runnable#dup to cleanse state back to test results. Helps with serialization. pair:tenderlove + * Moved ParallelEach under Minitest. + * Runnable#run needs to return self. Allows for swapping of results as needed. + * Minitest.init_plugins passes down options. + * Minitest.load_plugins only loads once. + * Fixed minitest/pride to work with rake test loader again. (tmiller) + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + +* 5 voodoo: + + * Removed mutex from minitest.rb (phiggins) + * Removed mutex from test.rb (phiggins) + * Removed Minitest::Reporter.synchronize (phiggins) + * Removed Minitest::Test.synchronize (phiggins) + * Upon loading minitest/parallel_each, record, capture_io and capture_subprocess_io are doped with synchronization code. (phiggins) + +=== 4.7.4 / 2013-05-01 + +This is probably the last release of the 4.x series. It will be merged +to ruby and will be put into maintenance mode there. + +I'm not set in stone on this, but at this point further development of +minitest (5+) will be gem-only. It is just too hard to work w/in +ruby-core w/ test-unit compatibility holding minitest development +back. + +* 2 minor enhancements: + + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + * Allow disabling of info_signal handler in runner. (erikh) + +=== 4.7.3 / 2013-04-20 + +* 1 bug fix: + + * Reverted stubbing of module methods change. Stub the user, not the impl. (ab9/tyabe) + +=== 4.7.2 / 2013-04-18 + +* 2 bug fixes: + + * Fixed inconsistency in refute_in_delta/epsilon. I double negatived my logic. (nettsundere) + * Fixed stubbing of module methods (eg Kernel#sleep). (steveklabnik) + +=== 4.7.1 / 2013-04-09 + +* 1 minor enhancement: + + * Added FAQ section to README + +* 1 bug fix: + + * Fixed bug where guard runs tests bypassing minitest/autorun and an ivar isn't set right. (darrencauthon) + +=== 4.7.0 / 2013-03-18 + +* 1 major enhancement: + + * Refactored MiniTest::Spec into MiniTest::Spec::DSL. + +* 1 bug fix: + + * Removed $DEBUG handler that detected when test/unit and minitest were both loaded. (tenderlove) + +=== 4.6.2 / 2013-02-27 + +* 1 minor enhancement: + + * Change error output to match Class#method, making it easier to use -n filter. + +=== 4.6.1 / 2013-02-14 + +* 1 bug fix: + + * Fixed an option processing bug caused by test/unit's irresponsibly convoluted code. (floehopper) + +=== 4.6.0 / 2013-02-07 + +* 3 major enhancements: + + * Removed ::reset_setup_teardown_hooks + * Removed the long deprecated assert_block + * Removed the long deprecated lifecycle hooks: add_(setup|teardown)_hook + +* 1 minor enhancement: + + * Allow filtering tests by suite name as well as test name. (lazyatom) + +* 2 bug fixes: + + * Made hex handling (eg object_ids) in mu_pp_for_diff more specific. (maxim) + * nodoc top-level module. (zzak) + +=== 4.5.0 / 2013-01-22 + +* 1 major enhancement: + + * Rearranged minitest/unit.rb so NO parallelization code is loaded/used until you opt-in. + +* 4 minor enhancements: + + * Added TestCase#skipped? for teardown guards + * Added maglev? guard + * Document that record can be sent twice if teardown fails or errors (randycoulman) + * Errors in teardown are now recorded. (randycoulman) + +* 3 bug fixes: + + * Added hacks and skips to get clean test runs on maglev + * Modified float tests for maglev float output differences. Not sure this is right. Not sure I care. + * Test for existance of diff.exe instead of assuming they have devkit. (blowmage/Cumbayah) + +=== 4.4.0 / 2013-01-07 + +* 3 minor enhancements: + + * Added fit_logarithic and assert_performance_logarithmic. (ktheory) + * Merge processed options so others can mess with defaults. (tenderlove) + * TestCase#message can now take another proc to defer custom message cost. (ordinaryzelig/bhenderson) + +* 1 bug fix: + + * TestCase#passed? now true if test is skipped. (qanhd) + +=== 4.3.3 / 2012-12-06 + +* 1 bug fix: + + * Updated information about stubbing. (daviddavis) + +=== 4.3.2 / 2012-11-27 // mri 2.0.0 + +* 1 minor enhancement: + + * Improved assert_equals error message to point you at #== of member objects. (kcurtin) + +=== 4.3.1 / 2012-11-23 + +* 1 bug fix: + + * Moved test_children to serial testcase to prevent random failures. + +=== 4.3.0 / 2012-11-17 + +* 4 minor enhancements: + + * Allow #autorun to run even if loaded with other test libs that call exit. (sunaku) + * Do not include Expectations in Object if $MT_NO_EXPECTATIONS is set (experimental?) + * Gave some much needed love to assert_raises. + * Mock#expect can take a block to custom-validate args. (gmoothart) + +=== 4.2.0 / 2012-11-02 + +* 4 major enhancements: + + * Added minitest/hell - run all your tests through the ringer! + * Added support for :parallel test_order to run test cases in parallel. + * Removed last_error and refactored runner code to be threadsafe. + * _run_suites now runs suites in parallel if they opt-in. + +* 4 minor enhancements: + + * Added TestCase#synchronize + * Added TestCase.make_my_diffs_pretty! + * Added TestCase.parallelize_me! + * Lock on capture_io for thread safety (tenderlove) + +=== 4.1.0 / 2012-10-05 + +* 2 minor enhancements: + + * Added skip example to readme. (dissolved) + * Extracted backtrace filter to object. (tenderlove) + +* 1 bug fix: + + * OMG I'm so dumb. Fixed access to deprecated hook class methods. I hate ruby modules. (route) + +=== 4.0.0 / 2012-09-28 + +* 1 major enhancement: + + * The names of a privately-used undocumented constants are Super Important™. + +* 1 minor enhancement: + + * Support stubbing methods that would be handled via method_missing. (jhsu) + +* 3 bug fixes: + + * Add include_private param to MiniTest::Mock#respond_to? (rf-) + * Fixed use of minitest/pride with --help. (zw963) + * Made 'No visible difference.' message more clear. (ckrailo) + +=== 3.5.0 / 2012-09-21 + +* 1 minor enhancement: + + * Added #capture_subprocess_io. (route) + +=== 3.4.0 / 2012-09-05 + +* 2 minor enhancements: + + * assert_output can now take regexps for expected values. (suggested by stomar) + * Clarified that ruby 1.9/2.0's phony gems cause serious confusion for rubygems. + +=== 3.3.0 / 2012-07-26 + +* 1 major enhancement: + + * Deprecated add_(setup|teardown)_hook in favor of (before|after)_(setup|teardown) [2013-01-01] + +* 4 minor enhancements: + + * Refactored deprecated hook system into a module. + * Refactored lifecycle hooks into a module. + * Removed after_setup/before_teardown + run_X_hooks from Spec. + * Spec#before/after now do a simple define_method and call super. DUR. + +* 2 bug fixes: + + * Fixed #passed? when used against a test that called flunk. (floehopper) + * Fixed rdoc bug preventing doco for some expectations. (stomar). + +=== 3.2.0 / 2012-06-26 + +* 1 minor enhancement: + + * Stubs now yield self. (peterhellberg) + +* 1 bug fix: + + * Fixed verbose test that only fails when run in verbose mode. mmmm irony. + +=== 3.1.0 / 2012-06-13 + +* 2 minor enhancements: + + * Removed LONG deprecated Unit.out accessor + * Removed generated method name munging from minitest/spec. (ordinaryzelig/tenderlove) + +=== 3.0.1 / 2012-05-24 + +* 1 bug fix: + + * I'm a dumbass and refactored into Mock#call. Renamed to #__call so you can mock #call. (mschuerig) + +=== 3.0.0 / 2012-05-08 + +* 3 major enhancements: + + * Added Object#stub (in minitest/mock.rb). + * Mock#expect mocks are used in the order they're given. + * Mock#verify now strictly compares against expect calls. + +* 3 minor enhancements: + + * Added caller to deprecation message. + * Mock error messages are much prettier. + * Removed String check for RHS of assert/refute_match. This lets #to_str work properly. + +* 1 bug fix: + + * Support drive letter on Windows. Patch provided from MRI by Usaku NAKAMURA. (ayumin) + +=== 2.12.1 / 2012-04-10 + +* 1 minor enhancement: + + * Added ruby releases to History.txt to make it easier to see what you're missing + +* 1 bug fix: + + * Rolled my own deprecate msg to allow MT to work with rubygems < 1.7 + +=== 2.12.0 / 2012-04-03 + +* 4 minor enhancements: + + * ::it returns test method name (wojtekmach) + * Added #record method to runner so runner subclasses can cleanly gather data. + * Added Minitest alias for MiniTest because even I forget. + * Deprecated assert_block!! Yay!!! + +* 1 bug fix: + + * Fixed warning in i_suck_and_my_tests_are_order_dependent! (phiggins) + +=== 2.11.4 / 2012-03-20 + +* 2 minor enhancements: + + * Updated known extensions + * You got your unicode in my tests! You got your tests in my unicode! (fl00r) + +* 1 bug fix: + + * Fixed MiniTest::Mock example in the readme. (conradwt) + +=== 2.11.3 / 2012-02-29 + +* 2 bug fixes: + + * Clarified that assert_raises returns the exception for further testing + * Fixed assert_in_epsilon when both args are negative. (tamc) + +=== 2.11.2 / 2012-02-14 + +* 1 minor enhancement: + + * Display failures/errors on SIGINFO. (tenderlove) + +* 1 bug fix: + + * Fixed MiniTest::Unit.after_tests for Ruby 1.9.3. (ysbaddaden) + +=== 2.11.1 / 2012-02-01 + +* 3 bug fixes: + + * Improved description for --name argument. (drd) + * Ensure Mock#expect's expected args is an Array. (mperham) + * Ensure Mock#verify verifies multiple expects of the same method. (chastell) + +=== 2.11.0 / 2012-01-25 + +* 2 minor enhancements: + + * Added before / after hooks for setup and teardown. (tenderlove) + * Pushed run_setup_hooks down to Spec. (tenderlove) + +=== 2.10.1 / 2012-01-17 + +* 1 bug fix: + + * Fixed stupid 1.9 path handling grumble grumble. (graaff) + +=== 2.10.0 / 2011-12-20 + +* 3 minor enhancements: + + * Added specs for must/wont be_empty/respond_to/be_kind_of and others. + * Added tests for assert/refute predicate. + * Split minitest/excludes.rb out to its own gem. + +* 1 bug fix: + + * Fixed must_be_empty and wont_be_empty argument handling. (mrsimo) + +=== 2.9.1 / 2011-12-13 + +* 4 minor enhancements: + + * Added a ton of tests on spec error message output. + * Cleaned up consistency of msg handling on unary expectations. + * Improved error messages on assert/refute_in_delta. + * infect_an_assertion no longer checks arity and better handles args. + +* 1 bug fix: + + * Fixed error message on specs when 2+ args and custom message provided. (chastell) + +=== 2.9.0 / 2011-12-07 + +* 4 minor enhancements: + + * Added TestCase.exclude and load_excludes for programmatic filtering of tests. + * Added guard methods so you can cleanly skip based on platform/impl + * Holy crap! 100% doco! `rdoc -C` ftw + * Switch assert_output to test stderr before stdout to possibly improve debugging + +=== 2.8.1 / 2011-11-17 + +* 1 bug fix: + + * Ugh. 1.9's test/unit violates my internals. Added const_missing. + +=== 2.8.0 / 2011-11-08 + +* 2 minor enhancements: + + * Add a method so that code can be run around a particular test case (tenderlove) + * Turn off backtrace filtering if we're running inside a ruby checkout. (drbrain) + +* 2 bug fixes: + + * Fixed 2 typos and 2 doc glitches. (splattael) + * Remove unused block arguments to avoid creating Proc objects. (k-tsj) + +=== 2.7.0 / 2011-10-25 + +* 2 minor enhancements: + + * Include failed values in the expected arg output in MockExpectationError. (nono) + * Make minitest/pride work with other 256 color capable terms. (sunaku) + +* 2 bug fixes: + + * Clarified the documentation of minitest/benchmark (eregon) + * Fixed using expectations in regular unit tests. (sunaku) + +=== 2.6.2 / 2011-10-19 + +* 1 minor enhancement: + + * Added link to vim bundle. (sunaku) + +* 2 bug fixes: + + * Force gem activation in hoe minitest plugin + * Support RUBY_VERSION='2.0.0' (nagachika) + +=== 2.6.1 / 2011-09-27 + +* 2 bug fixes: + + * Alias Spec.name from Spec.to_s so it works when @name is nil (nathany) + * Fixed assert and refute_operator where second object has a bad == method. + +=== 2.6.0 / 2011-09-13 + +* 2 minor enhancements: + + * Added specify alias for it and made desc optional. + * Spec#must_be and #wont_be can be used with predicates (metaskills) + +* 1 bug fix: + + * Fixed Mock.respond_to?(var) to work with strings. (holli) + +=== 2.5.1 / 2011-08-27 // ruby 1.9.3: p0, p125, p34579 + +* 2 minor enhancements: + + * Added gem activation for minitest in minitest/autoload to help out 1.9 users + * Extended Spec.register_spec_type to allow for procs instead of just regexps. + +=== 2.5.0 / 2011-08-18 + +* 4 minor enhancements: + + * Added 2 more arguments against rspec: let & subject in 9 loc! (emmanuel/luis) + * Added TestCase.i_suck_and_my_tests_are_order_dependent! + * Extended describe to take an optional method name (2 line change!). (emmanuel) + * Refactored and extended minitest/pride to do full 256 color support. (lolcat) + +* 1 bug fix: + + * Doc fixes. (chastell) + +=== 2.4.0 / 2011-08-09 + +* 4 minor enhancements: + + * Added simple examples for all expectations. + * Improved Mock error output when args mismatch. + * Moved all expectations from Object to MiniTest::Expectations. + * infect_with_assertions has been removed due to excessive clever + +* 4 bug fixes: + + * Fix Assertions#mu_pp to deal with immutable encoded strings. (ferrous26) + * Fix minitest/pride for MacRuby (ferrous26) + * Made error output less fancy so it is more readable + * Mock shouldn't undef === and inspect. (dgraham) + +=== 2.3.1 / 2011-06-22 + +* 1 bug fix: + + * Fixed minitest hoe plugin to be a spermy dep and not depend on itself. + +=== 2.3.0 / 2011-06-15 + +* 5 minor enhancements: + + * Add setup and teardown hooks to MiniTest::TestCase. (phiggins) + * Added nicer error messages for MiniTest::Mock. (phiggins) + * Allow for less specific expected arguments in Mock. (bhenderson/phiggins) + * Made MiniTest::Mock a blank slate. (phiggins) + * Refactored minitest/spec to use the hooks instead of define_inheritable_method. (phiggins) + +* 2 bug fixes: + + * Fixed TestCase's inherited hook. (dchelimsky/phiggins/jamis, the 'good' neighbor) + * MiniTest::Assertions#refute_empty should use mu_pp in the default message. (whatthejeff) + +=== 2.2.2 / 2011-06-01 + +* 2 bug fixes: + + * Got rid of the trailing period in message for assert_equal. (tenderlove) + * Windows needs more flushing. (Akio Tajima) + +=== 2.2.1 / 2011-05-31 + +* 1 bug fix: + + * My _ONE_ non-rubygems-using minitest user goes to Seattle.rb! + +=== 2.2.0 / 2011-05-29 + +* 6 minor enhancements: + + * assert_equal (and must_equal) now tries to diff output where it makes sense. + * Added Assertions#diff(exp, act) to be used by assert_equal. + * Added Assertions#mu_pp_for_diff + * Added Assertions.diff and diff= + * Moved minitest hoe-plugin from hoe-seattlerb. (erikh) + * Skipped tests only output details in verbose mode. (tenderlove+zenspider=xoxo) + +=== 2.1.0 / 2011-04-11 + +* 5 minor enhancements: + + * Added MiniTest::Spec.register_spec_type(matcher, klass) and spec_type(desc) + * Added ability for specs to share code via subclassing of Spec. (metaskills) + * Clarified (or tried to) bench_performance_linear's use of threshold. + * MiniTest::Unit.runner=(runner) provides an easy way of creating custom test runners for specialized needs. (justinweiss) + * Reverse order of inheritance in teardowns of specs. (deepfryed) + +* 3 bug fixes: + + * FINALLY fixed problems of inheriting specs in describe/it/describe scenario. (MGPalmer) + * Fixed a new warning in 1.9.3. + * Fixed assert_block's message handling. (nobu) + +=== 2.0.2 / 2010-12-24 + +* 1 minor enhancement: + + * Completed doco on minitest/benchmark for specs. + +* 1 bug fix: + + * Benchmarks in specs that didn't call bench_range would die. (zzak). + +=== 2.0.1 / 2010-12-15 + +* 4 minor enhancements: + + * Do not filter backtrace if $DEBUG + * Exit autorun via nested at_exit handler, in case other libs call exit + * Make options accesor lazy. + * Split printing of test name and its time. (nurse) + +* 1 bug fix: + + * Fix bug when ^T is hit before runner start + +=== 2.0.0 / 2010-11-11 + +* 3 major enhancements: + + * Added minitest/benchmark! Assert your performance! YAY! + * Refactored runner to allow for more extensibility. See minitest/benchmark. + * This makes the runner backwards incompatible in some ways! + +* 9 minor enhancements: + + * Added MiniTest::Unit.after_tests { ... } + * Improved output by adding test rates and a more sortable verbose format + * Improved readme based on feedback from others + * Added io method to TestCase. If used, it'll supplant '.EF' output. + * Refactored IO in MiniTest::Unit. + * Refactored _run_anything to _run_suite to make it easier to wrap (ngauthier) + * Spec class names are now the unmunged descriptions (btakita) + * YAY for not having redundant rdoc/readmes! + * Help output is now generated from the flags you passed straight up. + +* 4 bug fixes: + + * Fixed scoping issue on minitest/mock (srbaker/prosperity) + * Fixed some of the assertion default messages + * Fixes autorun when on windows with ruby install on different drive (larsch) + * Fixed rdoc output bug in spec.rb + +=== 1.7.2 / 2010-09-23 + +* 3 bug fixes: + + * Fixed doco for expectations and Spec. + * Fixed test_capture_io on 1.9.3+ (sora_h) + * assert_raises now lets MiniTest::Skip through. (shyouhei) + +=== 1.7.1 / 2010-09-01 + +* 1 bug fix: + + * 1.9.2 fixes for spec tests + +=== 1.7.0 / 2010-07-15 + +* 5 minor enhancements: + + * Added assert_output (mapped to must_output). + * Added assert_silent (mapped to must_be_silent). + * Added examples to readme (Mike Dalessio) + * Added options output at the top of the run, for fatal run debugging (tenderlove) + * Spec's describe method returns created class + +=== 1.6.0 / 2010-03-27 // ruby 1.9.2-p290 + +* 10 minor enhancements: + + * Added --seed argument so you can reproduce a random order for debugging. + * Added documentation for assertions + * Added more rdoc and tons of :nodoc: + * Added output to give you all the options you need to reproduce that run. + * Added proper argument parsing to minitest. + * Added unique serial # to spec names so order can be preserved (needs tests). (phrogz) + * Empty 'it' fails with default msg. (phrogz) + * Remove previous method on expect to remove 1.9 warnings + * Spec#it is now order-proof wrt subclasses/nested describes. + * assert_same error message now reports in decimal, eg: oid=123. (mattkent) + +* 2 bug fixes: + + * Fixed message on refute_same to be consistent with assert_same. + * Fixed method randomization to be stable for testing. + +=== 1.5.0 / 2010-01-06 + +* 4 minor enhancements: + + * Added ability to specify what assertions should have their args flipped. + * Don't flip arguments on *include and *respond_to assertions. + * Refactored Module.infect_an_assertion from Module.infect_with_assertions. + * before/after :all now bitches and acts like :each + +* 3 bug fixes: + + * Nested describes now map to nested test classes to avoid namespace collision. + * Using undef_method instead of remove_method to clean out inherited specs. + * assert_raises was ignoring passed in message. + +=== 1.4.2 / 2009-06-25 + +* 1 bug fix: + + * Fixed info handler for systems that don't have siginfo. + +=== 1.4.1 / 2009-06-23 + +* 1 major enhancement: + + * Handle ^C and other fatal exceptions by failing + +* 1 minor enhancement: + + * Added something to catch mixed use of test/unit and minitest if $DEBUG + +* 1 bug fix: + + * Added SIGINFO handler for finding slow tests without verbose + +=== 1.4.0 / 2009-06-18 + +* 5 minor enhancement: + + * Added clarification doco. + * Added specs and mocks to autorun. + * Changed spec test class creation to be non-destructive. + * Updated rakefile for new hoe capabilities. + * describes are nestable (via subclass). before/after/def inherits, specs don't. + +* 3 bug fixes: + + * Fixed location on must/wont. + * Switched to __name__ to avoid common ivar name. + * Fixed indentation in test file (1.9). + +=== 1.3.1 / 2009-01-20 // ruby 1.9.1-p431 + +* 1 minor enhancement: + + * Added miniunit/autorun.rb as replacement for test/unit.rb's autorun. + +* 16 bug fixes: + + * 1.9 test fixes. + * Bug fixes from nobu and akira for really odd scenarios. They run ruby funny. + * Fixed (assert|refute)_match's argument order. + * Fixed LocalJumpError in autorun if exception thrown before at_exit. + * Fixed assert_in_delta (should be >=, not >). + * Fixed assert_raises to match Modules. + * Fixed capture_io to not dup IOs. + * Fixed indentation of capture_io for ruby 1.9 warning. + * Fixed location to deal better with custom assertions and load paths. (Yuki) + * Fixed order of (must|wont)_include in MiniTest::Spec. + * Fixed skip's backtrace. + * Got arg order wrong in *_match in tests, message wrong as a result. + * Made describe private. For some reason I thought that an attribute of Kernel. + * Removed disable_autorun method, added autorun.rb instead. + * assert_match escapes if passed string for pattern. + * instance_of? is different from ===, use instance_of. + +=== 1.3.0 / 2008-10-09 + +* 2 major enhancements: + + * renamed to minitest and pulled out test/unit compatibility. + * mini/test.rb is now minitest/unit.rb, everything else maps directly. + +* 12 minor enhancements: + + * assert_match now checks that act can call =~ and converts exp to a + regexp only if needed. + * Added assert_send... seems useless to me tho. + * message now forces to string... ruby-core likes to pass classes and arrays :( + * Added -v handling and switched to @verbose from $DEBUG. + * Verbose output now includes test class name and adds a sortable running time! + * Switched message generation into procs for message deferment. + * Added skip and renamed fail to flunk. + * Improved output failure messages for assert_instance_of, assert_kind_of + * Improved output for assert_respond_to, assert_same. + * at_exit now exits false instead of errors+failures. + * Made the tests happier and more readable imhfo. + * Switched index(s) == 0 to rindex(s, 0) on nobu's suggestion. Faster. + +* 5 bug fixes: + + * 1.9: Added encoding normalization in mu_pp. + * 1.9: Fixed backtrace filtering (BTs are expanded now) + * Added back exception_details to assert_raises. DOH. + * Fixed shadowed variable in mock.rb + * Fixed stupid muscle memory message bug in assert_send. + +=== 1.2.1 / 2008-06-10 + +* 7 minor enhancements: + + * Added deprecations everywhere in test/unit. + * Added test_order to TestCase. :random on mini, :sorted on test/unit (for now). + * Big cleanup in test/unit for rails. Thanks Jeremy Kemper! + * Minor readability cleanup. + * Pushed setup/run/teardown down to testcase allowing specialized testcases. + * Removed pp. Tests run 2x faster. :/ + * Renamed deprecation methods and moved to test/unit/deprecate.rb. + +=== 1.2.0 / 2008-06-09 + +* 2 major enhancements: + + * Added Mini::Spec. + * Added Mini::Mock. Thanks Steven Baker!! + +* 23 minor enhancements: + + * Added bin/use_miniunit to make it easy to test out miniunit. + * Added -n filtering, thanks to Phil Hagelberg! + * Added args argument to #run, takes ARGV from at_exit. + * Added test name output if $DEBUG. + * Added a refute (was deny) for every assert. + * Added capture_io and a bunch of nice assertions from zentest. + * Added deprecation mechanism for assert_no/not methods to test/unit/assertions. + * Added pp output when available. + * Added tests for all assertions. Pretty much maxed out coverage. + * Added tests to verify consistency and good naming. + * Aliased and deprecated all ugly assertions. + * Cleaned out test/unit. Moved autorun there. + * Code cleanup to make extensions easier. Thanks Chad! + * Got spec args reversed in all but a couple assertions. Much more readable. + * Improved error messages across the board. Adds your message to the default. + * Moved into Mini namespace, renamed to Mini::Test and Mini::Spec. + * Pulled the assertions into their own module... + * Removed as much code as I could while still maintaining full functionality. + * Moved filter_backtrace into MiniTest. + * Removed MiniTest::Unit::run. Unnecessary. + * Removed location_of_failure. Unnecessary. + * Rewrote test/unit's filter_backtrace. Flog from 37.0 to 18.1 + * Removed assert_send. Google says it is never used. + * Renamed MiniTest::Unit.autotest to #run. + * Renamed deny to refute. + * Rewrote some ugly/confusing default assertion messages. + * assert_in_delta now defaults to 0.001 precision. Makes specs prettier. + +* 9 bug fixes: + + * Fixed assert_raises to raise outside of the inner-begin/rescue. + * Fixed for ruby 1.9 and rubinius. + * No longer exits 0 if exception in code PRE-test run causes early exit. + * Removed implementors method list from mini/test.rb - too stale. + * assert_nothing_raised takes a class as an arg. wtf? STUPID + * ".EF" output is now unbuffered. + * Bunch of changes to get working with rails... UGH. + * Added stupid hacks to deal with rails not requiring their dependencies. + * Now bitch loudly if someone defines one of my classes instead of requiring. + * Fixed infect method to work better on 1.9. + * Fixed all shadowed variable warnings in 1.9. + +=== 1.1.0 / 2007-11-08 + +* 4 major enhancements: + + * Finished writing all missing assertions. + * Output matches original test/unit. + * Documented every method needed by language implementor. + * Fully switched over to self-testing setup. + +* 2 minor enhancements: + + * Added deny (assert ! test), our favorite extension to test/unit. + * Added .autotest and fairly complete unit tests. (thanks Chad for help here) + +=== 1.0.0 / 2006-10-30 + +* 1 major enhancement + + * Birthday! diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Manifest.txt b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Manifest.txt new file mode 100644 index 0000000..8e096ff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Manifest.txt @@ -0,0 +1,27 @@ +.autotest +History.rdoc +Manifest.txt +README.rdoc +Rakefile +design_rationale.rb +lib/hoe/minitest.rb +lib/minitest.rb +lib/minitest/assertions.rb +lib/minitest/autorun.rb +lib/minitest/benchmark.rb +lib/minitest/expectations.rb +lib/minitest/hell.rb +lib/minitest/mock.rb +lib/minitest/parallel.rb +lib/minitest/pride.rb +lib/minitest/pride_plugin.rb +lib/minitest/spec.rb +lib/minitest/test.rb +lib/minitest/unit.rb +test/minitest/metametameta.rb +test/minitest/test_minitest_assertions.rb +test/minitest/test_minitest_benchmark.rb +test/minitest/test_minitest_mock.rb +test/minitest/test_minitest_reporter.rb +test/minitest/test_minitest_spec.rb +test/minitest/test_minitest_test.rb diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/README.rdoc b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/README.rdoc new file mode 100644 index 0000000..202a08a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/README.rdoc @@ -0,0 +1,764 @@ += minitest/{test,spec,mock,benchmark} + +home :: https://github.com/seattlerb/minitest +bugs :: https://github.com/seattlerb/minitest/issues +rdoc :: http://docs.seattlerb.org/minitest +vim :: https://github.com/sunaku/vim-ruby-minitest +emacs:: https://github.com/arthurnn/minitest-emacs + +== DESCRIPTION: + +minitest provides a complete suite of testing facilities supporting +TDD, BDD, mocking, and benchmarking. + + "I had a class with Jim Weirich on testing last week and we were + allowed to choose our testing frameworks. Kirk Haines and I were + paired up and we cracked open the code for a few test + frameworks... + + I MUST say that minitest is *very* readable / understandable + compared to the 'other two' options we looked at. Nicely done and + thank you for helping us keep our mental sanity." + + -- Wayne E. Seguin + +minitest/test is a small and incredibly fast unit testing framework. +It provides a rich set of assertions to make your tests clean and +readable. + +minitest/spec is a functionally complete spec engine. It hooks onto +minitest/test and seamlessly bridges test assertions over to spec +expectations. + +minitest/benchmark is an awesome way to assert the performance of your +algorithms in a repeatable manner. Now you can assert that your newb +co-worker doesn't replace your linear algorithm with an exponential +one! + +minitest/mock by Steven Baker, is a beautifully tiny mock (and stub) +object framework. + +minitest/pride shows pride in testing and adds coloring to your test +output. I guess it is an example of how to write IO pipes too. :P + +minitest/test is meant to have a clean implementation for language +implementors that need a minimal set of methods to bootstrap a working +test suite. For example, there is no magic involved for test-case +discovery. + + "Again, I can't praise enough the idea of a testing/specing + framework that I can actually read in full in one sitting!" + + -- Piotr Szotkowski + +Comparing to rspec: + + rspec is a testing DSL. minitest is ruby. + + -- Adam Hawkins, "Bow Before MiniTest" + +minitest doesn't reinvent anything that ruby already provides, like: +classes, modules, inheritance, methods. This means you only have to +learn ruby to use minitest and all of your regular OO practices like +extract-method refactorings still apply. + +== FEATURES/PROBLEMS: + +* minitest/autorun - the easy and explicit way to run all your tests. +* minitest/test - a very fast, simple, and clean test system. +* minitest/spec - a very fast, simple, and clean spec system. +* minitest/mock - a simple and clean mock/stub system. +* minitest/benchmark - an awesome way to assert your algorithm's performance. +* minitest/pride - show your pride in testing! +* Incredibly small and fast runner, but no bells and whistles. +* Written by squishy human beings. Software can never be perfect. We will all eventually die. + +== RATIONALE: + +See design_rationale.rb to see how specs and tests work in minitest. + +== SYNOPSIS: + +Given that you'd like to test the following class: + + class Meme + def i_can_has_cheezburger? + "OHAI!" + end + + def will_it_blend? + "YES!" + end + end + +=== Unit tests + +Define your tests as methods beginning with +test_+. + + require "minitest/autorun" + + class TestMeme < Minitest::Test + def setup + @meme = Meme.new + end + + def test_that_kitty_can_eat + assert_equal "OHAI!", @meme.i_can_has_cheezburger? + end + + def test_that_it_will_not_blend + refute_match /^no/i, @meme.will_it_blend? + end + + def test_that_will_be_skipped + skip "test this later" + end + end + +=== Specs + + require "minitest/autorun" + + describe Meme do + before do + @meme = Meme.new + end + + describe "when asked about cheeseburgers" do + it "must respond positively" do + _(@meme.i_can_has_cheezburger?).must_equal "OHAI!" + end + end + + describe "when asked about blending possibilities" do + it "won't say no" do + _(@meme.will_it_blend?).wont_match /^no/i + end + end + end + +For matchers support check out: + +* https://github.com/wojtekmach/minitest-matchers +* https://github.com/rmm5t/minitest-matchers_vaccine + +=== Benchmarks + +Add benchmarks to your tests. + + # optionally run benchmarks, good for CI-only work! + require "minitest/benchmark" if ENV["BENCH"] + + class TestMeme < Minitest::Benchmark + # Override self.bench_range or default range is [1, 10, 100, 1_000, 10_000] + def bench_my_algorithm + assert_performance_linear 0.9999 do |n| # n is a range value + @obj.my_algorithm(n) + end + end + end + +Or add them to your specs. If you make benchmarks optional, you'll +need to wrap your benchmarks in a conditional since the methods won't +be defined. In minitest 5, the describe name needs to match +/Bench(mark)?$/. + + describe "Meme Benchmark" do + if ENV["BENCH"] then + bench_performance_linear "my_algorithm", 0.9999 do |n| + 100.times do + @obj.my_algorithm(n) + end + end + end + end + +outputs something like: + + # Running benchmarks: + + TestBlah 100 1000 10000 + bench_my_algorithm 0.006167 0.079279 0.786993 + bench_other_algorithm 0.061679 0.792797 7.869932 + +Output is tab-delimited to make it easy to paste into a spreadsheet. + +=== Mocks + +Mocks and stubs defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Mocks are pre-programmed with expectations which form a specification +of the calls they are expected to receive. They can throw an exception +if they receive a call they don't expect and are checked during +verification to ensure they got all the calls they were expecting." + + class MemeAsker + def initialize(meme) + @meme = meme + end + + def ask(question) + method = question.tr(" ", "_") + "?" + @meme.__send__(method) + end + end + + require "minitest/autorun" + + describe MemeAsker, :ask do + describe "when passed an unpunctuated question" do + it "should invoke the appropriate predicate method on the meme" do + @meme = Minitest::Mock.new + @meme_asker = MemeAsker.new @meme + @meme.expect :will_it_blend?, :return_value + + @meme_asker.ask "will it blend" + + @meme.verify + end + end + end + +==== Multi-threading and Mocks + +Minitest mocks do not support multi-threading. If it works, fine, if it doesn't +you can use regular ruby patterns and facilities like local variables. Here's +an example of asserting that code inside a thread is run: + + def test_called_inside_thread + called = false + pr = Proc.new { called = true } + thread = Thread.new(&pr) + thread.join + assert called, "proc not called" + end + +=== Stubs + +Mocks and stubs are defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Stubs provide canned answers to calls made during the test". + +Minitest's stub method overrides a single method for the duration of +the block. + + def test_stale_eh + obj_under_test = Something.new + + refute obj_under_test.stale? + + Time.stub :now, Time.at(0) do # stub goes away once the block is done + assert obj_under_test.stale? + end + end + +A note on stubbing: In order to stub a method, the method must +actually exist prior to stubbing. Use a singleton method to create a +new non-existing method: + + def obj_under_test.fake_method + ... + end + +=== Running Your Tests + +Ideally, you'll use a rake task to run your tests, either piecemeal or +all at once. Both rake and rails ship with rake tasks for running your +tests. BUT! You don't have to: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb + Run options: --seed 37685 + + # Running: + + ...................................................................... (etc) + + Finished in 0.107130s, 1446.8403 runs/s, 2959.0217 assertions/s. + + 155 runs, 317 assertions, 0 failures, 0 errors, 0 skips + +There are runtime options available, both from minitest itself, and also +provided via plugins. To see them, simply run with +--help+: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb --help + minitest options: + -h, --help Display this help. + -s, --seed SEED Sets random seed. Also via env. Eg: SEED=n rake + -v, --verbose Verbose. Show progress processing files. + -n, --name PATTERN Filter run on /regexp/ or string. + -e, --exclude PATTERN Exclude /regexp/ or string from run. + + Known extensions: pride, autotest + -p, --pride Pride. Show your testing pride! + -a, --autotest Connect to autotest server. + +You can set up a rake task to run all your tests by adding this to your Rakefile: + + require "rake/testtask" + + Rake::TestTask.new(:test) do |t| + t.libs << "test" + t.libs << "lib" + t.test_files = FileList["test/**/test_*.rb"] + end + + task :default => :test + +== Writing Extensions + +To define a plugin, add a file named minitest/XXX_plugin.rb to your +project/gem. That file must be discoverable via ruby's LOAD_PATH (via +rubygems or otherwise). Minitest will find and require that file using +Gem.find_files. It will then try to call +plugin_XXX_init+ during +startup. The option processor will also try to call +plugin_XXX_options+ +passing the OptionParser instance and the current options hash. This +lets you register your own command-line options. Here's a totally +bogus example: + + # minitest/bogus_plugin.rb: + + module Minitest + def self.plugin_bogus_options(opts, options) + opts.on "--myci", "Report results to my CI" do + options[:myci] = true + options[:myci_addr] = get_myci_addr + options[:myci_port] = get_myci_port + end + end + + def self.plugin_bogus_init(options) + self.reporter << MyCI.new(options) if options[:myci] + end + end + +=== Adding custom reporters + +Minitest uses composite reporter to output test results using multiple +reporter instances. You can add new reporters to the composite during +the init_plugins phase. As we saw in +plugin_bogus_init+ above, you +simply add your reporter instance to the composite via <<. + ++AbstractReporter+ defines the API for reporters. You may subclass it +and override any method you want to achieve your desired behavior. + +start :: Called when the run has started. +record :: Called for each result, passed or otherwise. +report :: Called at the end of the run. +passed? :: Called to see if you detected any problems. + +Using our example above, here is how we might implement MyCI: + + # minitest/bogus_plugin.rb + + module Minitest + class MyCI < AbstractReporter + attr_accessor :results, :addr, :port + + def initialize options + self.results = [] + self.addr = options[:myci_addr] + self.port = options[:myci_port] + end + + def record result + self.results << result + end + + def report + CI.connect(addr, port).send_results self.results + end + end + + # code from above... + end + +== FAQ + +=== How to test SimpleDelegates? + +The following implementation and test: + + class Worker < SimpleDelegator + def work + end + end + + describe Worker do + before do + @worker = Worker.new(Object.new) + end + + it "must respond to work" do + _(@worker).must_respond_to :work + end + end + +outputs a failure: + + 1) Failure: + Worker#test_0001_must respond to work [bug11.rb:16]: + Expected # (Object) to respond to #work. + +Worker is a SimpleDelegate which in 1.9+ is a subclass of BasicObject. +Expectations are put on Object (one level down) so the Worker +(SimpleDelegate) hits +method_missing+ and delegates down to the ++Object.new+ instance. That object doesn't respond to work so the test +fails. + +You can bypass SimpleDelegate#method_missing by extending the worker +with Minitest::Expectations. You can either do that in your setup at +the instance level, like: + + before do + @worker = Worker.new(Object.new) + @worker.extend Minitest::Expectations + end + +or you can extend the Worker class (within the test file!), like: + + class Worker + include ::Minitest::Expectations + end + +=== How to share code across test classes? + +Use a module. That's exactly what they're for: + + module UsefulStuff + def useful_method + # ... + end + end + + describe Blah do + include UsefulStuff + + def test_whatever + # useful_method available here + end + end + +Remember, +describe+ simply creates test classes. It's just ruby at +the end of the day and all your normal Good Ruby Rules (tm) apply. If +you want to extend your test using setup/teardown via a module, just +make sure you ALWAYS call super. before/after automatically call super +for you, so make sure you don't do it twice. + +=== How to run code before a group of tests? + +Use a constant with begin...end like this: + + describe Blah do + SETUP = begin + # ... this runs once when describe Blah starts + end + # ... + end + +This can be useful for expensive initializations or sharing state. +Remember, this is just ruby code, so you need to make sure this +technique and sharing state doesn't interfere with your tests. + +=== Why am I seeing uninitialized constant MiniTest::Test (NameError)? + +Are you running the test with Bundler (e.g. via bundle exec )? If so, +in order to require minitest, you must first add the gem 'minitest' +to your Gemfile and run +bundle+. Once it's installed, you should be +able to require minitest and run your tests. + +== Prominent Projects using Minitest: + +* arel +* journey +* mime-types +* nokogiri +* rails (active_support et al) +* rake +* rdoc +* ...and of course, everything from seattle.rb... + +== Developing Minitest: + +Minitest requires {Hoe}[https://rubygems.org/gems/hoe]. + +=== Minitest's own tests require UTF-8 external encoding. + +This is a common problem in Windows, where the default external Encoding is +often CP850, but can affect any platform. +Minitest can run test suites using any Encoding, but to run Minitest's +own tests you must have a default external Encoding of UTF-8. + +If your encoding is wrong, you'll see errors like: + + --- expected + +++ actual + @@ -1,2 +1,3 @@ + # encoding: UTF-8 + -"Expected /\\w+/ to not match \"blah blah blah\"." + +"Expected /\\w+/ to not match # encoding: UTF-8 + +\"blah blah blah\"." + +To check your current encoding, run: + + ruby -e 'puts Encoding.default_external' + +If your output is something other than UTF-8, you can set the RUBYOPTS +env variable to a value of '-Eutf-8'. Something like: + + RUBYOPT='-Eutf-8' ruby -e 'puts Encoding.default_external' + +Check your OS/shell documentation for the precise syntax (the above +will not work on a basic Windows CMD prompt, look for the SET command). +Once you've got it successfully outputing UTF-8, use the same setting +when running rake in Minitest. + +=== Minitest's own tests require GNU (or similar) diff. + +This is also a problem primarily affecting Windows developers. PowerShell +has a command called diff, but it is not suitable for use with Minitest. + +If you see failures like either of these, you are probably missing diff tool: + + 4) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_long [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:936]: + Expected: "--- expected\n+++ actual\n@@ -1 +1 @@\n-\"hahahahahahahahahahahahahahahahahahahaha\"\n+\"blahblahblahblahblahblahblahblahblahblah\"\n" + Actual: "Expected: \"hahahahahahahahahahahahahahahahahahahaha\"\n Actual: \"blahblahblahblahblahblahblahblahblahblah\"" + + + 5) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_collection_hash_hex_invisible [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:845]: + Expected: "No visible difference in the Hash#inspect output.\nYou should look at the implementation of #== on Hash or its members.\n + {1=>#}" + Actual: "Expected: {1=>#}\n Actual: {1=>#}" + + +If you use Cygwin or MSYS2 or similar there are packages that include a +GNU diff for Windows. If you don't, you can download GNU diffutils from +http://gnuwin32.sourceforge.net/packages/diffutils.htm +(make sure to add it to your PATH). + +You can make sure it's installed and path is configured properly with: + + diff.exe -v + +There are multiple lines of output, the first should be something like: + + diff (GNU diffutils) 2.8.1 + +If you are using PowerShell make sure you run diff.exe, not just diff, +which will invoke the PowerShell built in function. + +== Known Extensions: + +capybara_minitest_spec :: Bridge between Capybara RSpec matchers and + Minitest::Spec expectations (e.g. + page.must_have_content("Title")). +color_pound_spec_reporter :: Test names print Ruby Object types in color with + your Minitest Spec style tests. +minispec-metadata :: Metadata for describe/it blocks & CLI tag filter. + E.g. it "requires JS driver", js: true do & + ruby test.rb --tag js runs tests tagged :js. +minispec-rails :: Minimal support to use Spec style in Rails 5+. +mini-apivore :: for swagger based automated API testing. +minitest-around :: Around block for minitest. An alternative to + setup/teardown dance. +minitest-assert_errors :: Adds Minitest assertions to test for errors raised + or not raised by Minitest itself. +minitest-autotest :: autotest is a continuous testing facility meant to + be used during development. +minitest-bacon :: minitest-bacon extends minitest with bacon-like + functionality. +minitest-bang :: Adds support for RSpec-style let! to immediately + invoke let statements before each test. +minitest-bisect :: Helps you isolate and debug random test failures. +minitest-blink1_reporter :: Display test results with a Blink1. +minitest-capistrano :: Assertions and expectations for testing + Capistrano recipes. +minitest-capybara :: Capybara matchers support for minitest unit and + spec. +minitest-chef-handler :: Run Minitest suites as Chef report handlers +minitest-ci :: CI reporter plugin for Minitest. +minitest-context :: Defines contexts for code reuse in Minitest + specs that share common expectations. +minitest-debugger :: Wraps assert so failed assertions drop into + the ruby debugger. +minitest-display :: Patches Minitest to allow for an easily + configurable output. +minitest-documentation :: Minimal documentation format inspired by rspec's. +minitest-doc_reporter :: Detailed output inspired by rspec's documentation + format. +minitest-emoji :: Print out emoji for your test passes, fails, and + skips. +minitest-english :: Semantically symmetric aliases for assertions and + expectations. +minitest-excludes :: Clean API for excluding certain tests you + don't want to run under certain conditions. +minitest-fail-fast :: Reimplements RSpec's "fail fast" feature +minitest-filecontent :: Support unit tests with expectation results in files. + Differing results will be stored again in files. +minitest-filesystem :: Adds assertion and expectation to help testing + filesystem contents. +minitest-firemock :: Makes your Minitest mocks more resilient. +minitest-focus :: Focus on one test at a time. +minitest-gcstats :: A minitest plugin that adds a report of the top + tests by number of objects allocated. +minitest-global_expectations:: Support minitest expectation methods for all objects +minitest-great_expectations :: Generally useful additions to minitest's + assertions and expectations. +minitest-growl :: Test notifier for minitest via growl. +minitest-happy :: GLOBALLY ACTIVATE MINITEST PRIDE! RAWR! +minitest-have_tag :: Adds Minitest assertions to test for the existence of + HTML tags, including contents, within a provided string. +minitest-hooks :: Around and before_all/after_all/around_all hooks +minitest-hyper :: Pretty, single-page HTML reports for your Minitest runs +minitest-implicit-subject :: Implicit declaration of the test subject. +minitest-instrument :: Instrument ActiveSupport::Notifications when + test method is executed. +minitest-instrument-db :: Store information about speed of test execution + provided by minitest-instrument in database. +minitest-junit :: JUnit-style XML reporter for minitest. +minitest-keyword :: Use Minitest assertions with keyword arguments. +minitest-libnotify :: Test notifier for minitest via libnotify. +minitest-line :: Run test at line number. +minitest-logger :: Define assert_log and enable minitest to test log messages. + Supports Logger and Log4r::Logger. +minitest-macruby :: Provides extensions to minitest for macruby UI + testing. +minitest-matchers :: Adds support for RSpec-style matchers to + minitest. +minitest-matchers_vaccine :: Adds assertions that adhere to the matcher spec, + but without any expectation infections. +minitest-metadata :: Annotate tests with metadata (key-value). +minitest-mock_expectations :: Provides method call assertions for minitest. +minitest-mongoid :: Mongoid assertion matchers for Minitest. +minitest-must_not :: Provides must_not as an alias for wont in + Minitest. +minitest-optional_retry :: Automatically retry failed test to help with flakiness. +minitest-osx :: Reporter for the Mac OS X notification center. +minitest-parallel_fork :: Fork-based parallelization +minitest-parallel-db :: Run tests in parallel with a single database. +minitest-power_assert :: PowerAssert for Minitest. +minitest-predicates :: Adds support for .predicate? methods. +minitest-profile :: List the 10 slowest tests in your suite. +minitest-rails :: Minitest integration for Rails 3.x. +minitest-rails-capybara :: Capybara integration for Minitest::Rails. +minitest-reporters :: Create customizable Minitest output formats. +minitest-rg :: Colored red/green output for Minitest. +minitest-rspec_mocks :: Use RSpec Mocks with Minitest. +minitest-server :: minitest-server provides a client/server setup + with your minitest process, allowing your test + run to send its results directly to a handler. +minitest-sequel :: Minitest assertions to speed-up development and + testing of Ruby Sequel database setups. +minitest-shared_description :: Support for shared specs and shared spec + subclasses +minitest-should_syntax :: RSpec-style x.should == y assertions for + Minitest. +minitest-shouldify :: Adding all manner of shoulds to Minitest (bad + idea) +minitest-snail :: Print a list of tests that take too long +minitest-spec-context :: Provides rspec-ish context method to + Minitest::Spec. +minitest-spec-expect :: Expect syntax for Minitest::Spec (e.g. + expect(sequences).to_include :celery_man). +minitest-spec-magic :: Minitest::Spec extensions for Rails and beyond. +minitest-spec-rails :: Drop in Minitest::Spec superclass for + ActiveSupport::TestCase. +minitest-sprint :: Runs (Get it? It's fast!) your tests and makes + it easier to rerun individual failures. +minitest-stately :: Find leaking state between tests +minitest-stub_any_instance :: Stub any instance of a method on the given class + for the duration of a block. +minitest-stub-const :: Stub constants for the duration of a block. +minitest-tags :: Add tags for minitest. +minitest-unordered :: Adds a new assertion to minitest for checking the + contents of a collection, ignoring element order. +minitest-vcr :: Automatic cassette managment with Minitest::Spec + and VCR. +minitest_log :: Adds structured logging, data explication, and verdicts. +minitest_owrapper :: Get tests results as a TestResult object. +minitest_should :: Shoulda style syntax for minitest test::unit. +minitest_tu_shim :: Bridges between test/unit and minitest. +mongoid-minitest :: Minitest matchers for Mongoid. +mutant-minitest :: Minitest integration for mutant. +pry-rescue :: A pry plugin w/ minitest support. See + pry-rescue/minitest.rb. +rspec2minitest :: Easily translate any RSpec matchers to Minitest + assertions and expectations. + +== Unknown Extensions: + +Authors... Please send me a pull request with a description of your minitest extension. + +* assay-minitest +* detroit-minitest +* em-minitest-spec +* flexmock-minitest +* guard-minitest +* guard-minitest-decisiv +* minitest-activemodel +* minitest-ar-assertions +* minitest-capybara-unit +* minitest-colorer +* minitest-deluxe +* minitest-extra-assertions +* minitest-rails-shoulda +* minitest-spec +* minitest-spec-should +* minitest-sugar +* spork-minitest + +== Minitest related goods + +* minitest/pride fabric: http://www.spoonflower.com/fabric/3928730-again-by-katie_allen + +== REQUIREMENTS: + +* Ruby 2.3+. No magic is involved. I hope. + +== INSTALL: + + sudo gem install minitest + +On 1.9, you already have it. To get newer candy you can still install +the gem, and then requiring "minitest/autorun" should automatically +pull it in. If not, you'll need to do it yourself: + + gem "minitest" # ensures you"re using the gem, and not the built-in MT + require "minitest/autorun" + + # ... usual testing stuffs ... + +DO NOTE: There is a serious problem with the way that ruby 1.9/2.0 +packages their own gems. They install a gem specification file, but +don't install the gem contents in the gem path. This messes up +Gem.find_files and many other things (gem which, gem contents, etc). + +Just install minitest as a gem for real and you'll be happier. + +== LICENSE: + +(The MIT License) + +Copyright (c) Ryan Davis, seattle.rb + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Rakefile b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Rakefile new file mode 100644 index 0000000..0237b44 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/Rakefile @@ -0,0 +1,74 @@ +# -*- ruby -*- + +require "rubygems" +require "hoe" + +Hoe.plugin :seattlerb +Hoe.plugin :rdoc + +Hoe.spec "minitest" do + developer "Ryan Davis", "ryand-ruby@zenspider.com" + + license "MIT" + + require_ruby_version [">= 2.2", "< 3.1"] +end + +desc "Find missing expectations" +task :specs do + $:.unshift "lib" + require "minitest/test" + require "minitest/spec" + + pos_prefix, neg_prefix = "must", "wont" + skip_re = /^(must|wont)$|wont_(throw)|must_(block|not?_|nothing|send|raise$)/x + dont_flip_re = /(must|wont)_(include|respond_to)/ + + map = { + /(must_throw)s/ => '\1', + /(?!not)_same/ => "_be_same_as", + /_in_/ => "_be_within_", + /_operator/ => "_be", + /_includes/ => "_include", + /(must|wont)_(.*_of|nil|silent|empty)/ => '\1_be_\2', + /must_raises/ => "must_raise", + /(must|wont)_predicate/ => '\1_be', + /(must|wont)_path_exists/ => 'path_\1_exist', + } + + expectations = Minitest::Expectations.public_instance_methods.map(&:to_s) + assertions = Minitest::Assertions.public_instance_methods.map(&:to_s) + + assertions.sort.each do |assertion| + expectation = case assertion + when /^assert/ then + assertion.sub(/^assert/, pos_prefix.to_s) + when /^refute/ then + assertion.sub(/^refute/, neg_prefix.to_s) + end + + next unless expectation + next if expectation =~ skip_re + + regexp, replacement = map.find { |re, _| expectation =~ re } + expectation.sub! regexp, replacement if replacement + + next if expectations.include? expectation + + args = [assertion, expectation].map(&:to_sym).map(&:inspect) + args << :reverse if expectation =~ dont_flip_re + + puts + puts "##" + puts "# :method: #{expectation}" + puts "# See Minitest::Assertions##{assertion}" + puts + puts "infect_an_assertion #{args.join ", "}" + end +end + +task :bugs do + sh "for f in bug*.rb ; do echo $f; echo; #{Gem.ruby} -Ilib $f && rm $f ; done" +end + +# vim: syntax=Ruby diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/design_rationale.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/design_rationale.rb new file mode 100644 index 0000000..a3fcc37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/design_rationale.rb @@ -0,0 +1,52 @@ +# Specs: # Equivalent Unit Tests: +############################################################################### +describe Thingy do # class TestThingy < Minitest::Test + before do # def setup + do_some_setup # super + end # do_some_setup + # end + it "should do the first thing" do # + 1.must_equal 1 # def test_first_thing + end # assert_equal 1, 1 + # end + describe SubThingy do # end + before do # + do_more_setup # class TestSubThingy < TestThingy + end # def setup + # super + it "should do the second thing" do # do_more_setup + 2.must_equal 2 # end + end # + end # def test_second_thing +end # assert_equal 2, 2 + # end + # end +############################################################################### +# runs 2 specs # runs 3 tests +############################################################################### +# The specs generate: + +class ThingySpec < Minitest::Spec + def setup + super + do_some_setup + end + + def test_should_do_the_first_thing + assert_equal 1, 1 + end +end + +class SubThingySpec < ThingySpec + def setup + super + do_more_setup + end + + # because only setup/teardown is inherited, not specs + remove_method :test_should_do_the_first_thing + + def test_should_do_the_second_thing + assert_equal 2, 2 + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/hoe/minitest.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/hoe/minitest.rb new file mode 100644 index 0000000..6c045a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/hoe/minitest.rb @@ -0,0 +1,32 @@ +# :stopdoc: + +class Hoe +end + +module Hoe::Minitest + def minitest? + self.name == "minitest" + end + + def initialize_minitest + unless minitest? then + dir = "../../minitest/dev/lib" + Hoe.add_include_dirs dir if File.directory? dir + end + + gem "minitest" + require "minitest" + version = Minitest::VERSION.split(/\./).first(2).join(".") + + dependency "minitest", "~> #{version}", :development unless + minitest? or ENV["MT_NO_ISOLATE"] + end + + def define_minitest_tasks + self.testlib = :minitest + + # make sure we use the gemmed minitest on 1.9 + self.test_prelude = 'gem "minitest"' unless + minitest? or ENV["MT_NO_ISOLATE"] + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest.rb new file mode 100644 index 0000000..e796de5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest.rb @@ -0,0 +1,1056 @@ +require "optparse" +require "thread" +require "mutex_m" +require "minitest/parallel" +require "stringio" + +## +# :include: README.rdoc + +module Minitest + VERSION = "5.14.2" # :nodoc: + ENCS = "".respond_to? :encoding # :nodoc: + + @@installed_at_exit ||= false + @@after_run = [] + @extensions = [] + + mc = (class << self; self; end) + + ## + # Parallel test executor + + mc.send :attr_accessor, :parallel_executor + + warn "DEPRECATED: use MT_CPU instead of N for parallel test runs" if ENV["N"] + n_threads = (ENV["MT_CPU"] || ENV["N"] || 2).to_i + self.parallel_executor = Parallel::Executor.new n_threads + + ## + # Filter object for backtraces. + + mc.send :attr_accessor, :backtrace_filter + + ## + # Reporter object to be used for all runs. + # + # NOTE: This accessor is only available during setup, not during runs. + + mc.send :attr_accessor, :reporter + + ## + # Names of known extension plugins. + + mc.send :attr_accessor, :extensions + + ## + # The signal to use for dumping information to STDERR. Defaults to "INFO". + + mc.send :attr_accessor, :info_signal + self.info_signal = "INFO" + + ## + # Registers Minitest to run at process exit + + def self.autorun + at_exit { + next if $! and not ($!.kind_of? SystemExit and $!.success?) + + exit_code = nil + + pid = Process.pid + at_exit { + next if Process.pid != pid + @@after_run.reverse_each(&:call) + exit exit_code || false + } + + exit_code = Minitest.run ARGV + } unless @@installed_at_exit + @@installed_at_exit = true + end + + ## + # A simple hook allowing you to run a block of code after everything + # is done running. Eg: + # + # Minitest.after_run { p $debugging_info } + + def self.after_run &block + @@after_run << block + end + + def self.init_plugins options # :nodoc: + self.extensions.each do |name| + msg = "plugin_#{name}_init" + send msg, options if self.respond_to? msg + end + end + + def self.load_plugins # :nodoc: + return unless self.extensions.empty? + + seen = {} + + require "rubygems" unless defined? Gem + + Gem.find_files("minitest/*_plugin.rb").each do |plugin_path| + name = File.basename plugin_path, "_plugin.rb" + + next if seen[name] + seen[name] = true + + require plugin_path + self.extensions << name + end + end + + ## + # This is the top-level run method. Everything starts from here. It + # tells each Runnable sub-class to run, and each of those are + # responsible for doing whatever they do. + # + # The overall structure of a run looks like this: + # + # Minitest.autorun + # Minitest.run(args) + # Minitest.__run(reporter, options) + # Runnable.runnables.each + # runnable.run(reporter, options) + # self.runnable_methods.each + # self.run_one_method(self, runnable_method, reporter) + # Minitest.run_one_method(klass, runnable_method) + # klass.new(runnable_method).run + + def self.run args = [] + self.load_plugins unless args.delete("--no-plugins") || ENV["MT_NO_PLUGINS"] + + options = process_args args + + reporter = CompositeReporter.new + reporter << SummaryReporter.new(options[:io], options) + reporter << ProgressReporter.new(options[:io], options) + + self.reporter = reporter # this makes it available to plugins + self.init_plugins options + self.reporter = nil # runnables shouldn't depend on the reporter, ever + + self.parallel_executor.start if parallel_executor.respond_to?(:start) + reporter.start + begin + __run reporter, options + rescue Interrupt + warn "Interrupted. Exiting..." + end + self.parallel_executor.shutdown + reporter.report + + reporter.passed? + end + + ## + # Internal run method. Responsible for telling all Runnable + # sub-classes to run. + + def self.__run reporter, options + suites = Runnable.runnables.reject { |s| s.runnable_methods.empty? }.shuffle + parallel, serial = suites.partition { |s| s.test_order == :parallel } + + # If we run the parallel tests before the serial tests, the parallel tests + # could run in parallel with the serial tests. This would be bad because + # the serial tests won't lock around Reporter#record. Run the serial tests + # first, so that after they complete, the parallel tests will lock when + # recording results. + serial.map { |suite| suite.run reporter, options } + + parallel.map { |suite| suite.run reporter, options } + end + + def self.process_args args = [] # :nodoc: + options = { + :io => $stdout, + } + orig_args = args.dup + + OptionParser.new do |opts| + opts.banner = "minitest options:" + opts.version = Minitest::VERSION + + opts.on "-h", "--help", "Display this help." do + puts opts + exit + end + + opts.on "--no-plugins", "Bypass minitest plugin auto-loading (or set $MT_NO_PLUGINS)." + + desc = "Sets random seed. Also via env. Eg: SEED=n rake" + opts.on "-s", "--seed SEED", Integer, desc do |m| + options[:seed] = m.to_i + end + + opts.on "-v", "--verbose", "Verbose. Show progress processing files." do + options[:verbose] = true + end + + opts.on "-n", "--name PATTERN", "Filter run on /regexp/ or string." do |a| + options[:filter] = a + end + + opts.on "-e", "--exclude PATTERN", "Exclude /regexp/ or string from run." do |a| + options[:exclude] = a + end + + unless extensions.empty? + opts.separator "" + opts.separator "Known extensions: #{extensions.join(", ")}" + + extensions.each do |meth| + msg = "plugin_#{meth}_options" + send msg, opts, options if self.respond_to?(msg) + end + end + + begin + opts.parse! args + rescue OptionParser::InvalidOption => e + puts + puts e + puts + puts opts + exit 1 + end + + orig_args -= args + end + + unless options[:seed] then + srand + options[:seed] = (ENV["SEED"] || srand).to_i % 0xFFFF + orig_args << "--seed" << options[:seed].to_s + end + + srand options[:seed] + + options[:args] = orig_args.map { |s| + s =~ /[\s|&<>$()]/ ? s.inspect : s + }.join " " + + options + end + + def self.filter_backtrace bt # :nodoc: + result = backtrace_filter.filter bt + result = bt.dup if result.empty? + result + end + + ## + # Represents anything "runnable", like Test, Spec, Benchmark, or + # whatever you can dream up. + # + # Subclasses of this are automatically registered and available in + # Runnable.runnables. + + class Runnable + ## + # Number of assertions executed in this run. + + attr_accessor :assertions + + ## + # An assertion raised during the run, if any. + + attr_accessor :failures + + ## + # The time it took to run. + + attr_accessor :time + + def time_it # :nodoc: + t0 = Minitest.clock_time + + yield + ensure + self.time = Minitest.clock_time - t0 + end + + ## + # Name of the run. + + def name + @NAME + end + + ## + # Set the name of the run. + + def name= o + @NAME = o + end + + ## + # Returns all instance methods matching the pattern +re+. + + def self.methods_matching re + public_instance_methods(true).grep(re).map(&:to_s) + end + + def self.reset # :nodoc: + @@runnables = [] + end + + reset + + ## + # Responsible for running all runnable methods in a given class, + # each in its own instance. Each instance is passed to the + # reporter to record. + + def self.run reporter, options = {} + filter = options[:filter] || "/./" + filter = Regexp.new $1 if filter.is_a?(String) && filter =~ %r%/(.*)/% + + filtered_methods = self.runnable_methods.find_all { |m| + filter === m || filter === "#{self}##{m}" + } + + exclude = options[:exclude] + exclude = Regexp.new $1 if exclude =~ %r%/(.*)/% + + filtered_methods.delete_if { |m| + exclude === m || exclude === "#{self}##{m}" + } + + return if filtered_methods.empty? + + with_info_handler reporter do + filtered_methods.each do |method_name| + run_one_method self, method_name, reporter + end + end + end + + ## + # Runs a single method and has the reporter record the result. + # This was considered internal API but is factored out of run so + # that subclasses can specialize the running of an individual + # test. See Minitest::ParallelTest::ClassMethods for an example. + + def self.run_one_method klass, method_name, reporter + reporter.prerecord klass, method_name + reporter.record Minitest.run_one_method(klass, method_name) + end + + def self.with_info_handler reporter, &block # :nodoc: + handler = lambda do + unless reporter.passed? then + warn "Current results:" + warn "" + warn reporter.reporters.first + warn "" + end + end + + on_signal ::Minitest.info_signal, handler, &block + end + + SIGNALS = Signal.list # :nodoc: + + def self.on_signal name, action # :nodoc: + supported = SIGNALS[name] + + old_trap = trap name do + old_trap.call if old_trap.respond_to? :call + action.call + end if supported + + yield + ensure + trap name, old_trap if supported + end + + ## + # Each subclass of Runnable is responsible for overriding this + # method to return all runnable methods. See #methods_matching. + + def self.runnable_methods + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns all subclasses of Runnable. + + def self.runnables + @@runnables + end + + @@marshal_dump_warned = false + + def marshal_dump # :nodoc: + unless @@marshal_dump_warned then + warn ["Minitest::Runnable#marshal_dump is deprecated.", + "You might be violating internals. From", caller.first].join " " + @@marshal_dump_warned = true + end + + [self.name, self.failures, self.assertions, self.time] + end + + def marshal_load ary # :nodoc: + self.name, self.failures, self.assertions, self.time = ary + end + + def failure # :nodoc: + self.failures.first + end + + def initialize name # :nodoc: + self.name = name + self.failures = [] + self.assertions = 0 + end + + ## + # Runs a single method. Needs to return self. + + def run + raise NotImplementedError, "subclass responsibility" + end + + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns a single character string to print based on the result + # of the run. One of ".", "F", + # "E" or "S". + + def result_code + raise NotImplementedError, "subclass responsibility" + end + + ## + # Was this run skipped? See #passed? for more information. + + def skipped? + raise NotImplementedError, "subclass responsibility" + end + end + + ## + # Shared code for anything that can get passed to a Reporter. See + # Minitest::Test & Minitest::Result. + + module Reportable + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + not self.failure + end + + ## + # The location identifier of this test. Depends on a method + # existing called class_name. + + def location + loc = " [#{self.failure.location}]" unless passed? or error? + "#{self.class_name}##{self.name}#{loc}" + end + + def class_name # :nodoc: + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns ".", "F", or "E" based on the result of the run. + + def result_code + self.failure and self.failure.result_code or "." + end + + ## + # Was this run skipped? + + def skipped? + self.failure and Skip === self.failure + end + + ## + # Did this run error? + + def error? + self.failures.any? { |f| UnexpectedError === f } + end + end + + ## + # This represents a test result in a clean way that can be + # marshalled over a wire. Tests can do anything they want to the + # test instance and can create conditions that cause Marshal.dump to + # blow up. By using Result.from(a_test) you can be reasonably sure + # that the test result can be marshalled. + + class Result < Runnable + include Minitest::Reportable + + undef_method :marshal_dump + undef_method :marshal_load + + ## + # The class name of the test result. + + attr_accessor :klass + + ## + # The location of the test method. + + attr_accessor :source_location + + ## + # Create a new test result from a Runnable instance. + + def self.from runnable + o = runnable + + r = self.new o.name + r.klass = o.class.name + r.assertions = o.assertions + r.failures = o.failures.dup + r.time = o.time + + r.source_location = o.method(o.name).source_location rescue ["unknown", -1] + + r + end + + def class_name # :nodoc: + self.klass # for Minitest::Reportable + end + + def to_s # :nodoc: + return location if passed? and not skipped? + + failures.map { |failure| + "#{failure.result_label}:\n#{self.location}:\n#{failure.message}\n" + }.join "\n" + end + end + + ## + # Defines the API for Reporters. Subclass this and override whatever + # you want. Go nuts. + + class AbstractReporter + include Mutex_m + + ## + # Starts reporting on the run. + + def start + end + + ## + # About to start running a test. This allows a reporter to show + # that it is starting or that we are in the middle of a test run. + + def prerecord klass, name + end + + ## + # Output and record the result of the test. Call + # {result#result_code}[rdoc-ref:Runnable#result_code] to get the + # result character string. Stores the result of the run if the run + # did not pass. + + def record result + end + + ## + # Outputs the summary of the run. + + def report + end + + ## + # Did this run pass? + + def passed? + true + end + end + + class Reporter < AbstractReporter # :nodoc: + ## + # The IO used to report. + + attr_accessor :io + + ## + # Command-line options for this run. + + attr_accessor :options + + def initialize io = $stdout, options = {} # :nodoc: + super() + self.io = io + self.options = options + end + end + + ## + # A very simple reporter that prints the "dots" during the run. + # + # This is added to the top-level CompositeReporter at the start of + # the run. If you want to change the output of minitest via a + # plugin, pull this out of the composite and replace it with your + # own. + + class ProgressReporter < Reporter + def prerecord klass, name #:nodoc: + if options[:verbose] then + io.print "%s#%s = " % [klass.name, name] + io.flush + end + end + + def record result # :nodoc: + io.print "%.2f s = " % [result.time] if options[:verbose] + io.print result.result_code + io.puts if options[:verbose] + end + end + + ## + # A reporter that gathers statistics about a test run. Does not do + # any IO because meant to be used as a parent class for a reporter + # that does. + # + # If you want to create an entirely different type of output (eg, + # CI, HTML, etc), this is the place to start. + # + # Example: + # + # class JenkinsCIReporter < StatisticsReporter + # def report + # super # Needed to calculate some statistics + # + # print " 30 characters. + # 3. or: Strings are equal to each other (but maybe different encodings?). + # 4. and: we found a diff executable. + + def things_to_diff exp, act + expect = mu_pp_for_diff exp + butwas = mu_pp_for_diff act + + e1, e2 = expect.include?("\n"), expect.include?("\\n") + b1, b2 = butwas.include?("\n"), butwas.include?("\\n") + + need_to_diff = + (e1 ^ e2 || + b1 ^ b2 || + expect.size > 30 || + butwas.size > 30 || + expect == butwas) && + Minitest::Assertions.diff + + need_to_diff && [expect, butwas] + end + + ## + # This returns a human-readable version of +obj+. By default + # #inspect is called. You can override this to use #pretty_inspect + # if you want. + # + # See Minitest::Test.make_my_diffs_pretty! + + def mu_pp obj + s = obj.inspect + + if defined? Encoding then + s = s.encode Encoding.default_external + + if String === obj && (obj.encoding != Encoding.default_external || + !obj.valid_encoding?) then + enc = "# encoding: #{obj.encoding}" + val = "# valid: #{obj.valid_encoding?}" + s = "#{enc}\n#{val}\n#{s}" + end + end + + s + end + + ## + # This returns a diff-able more human-readable version of +obj+. + # This differs from the regular mu_pp because it expands escaped + # newlines and makes hex-values (like object_ids) generic. This + # uses mu_pp to do the first pass and then cleans it up. + + def mu_pp_for_diff obj + str = mu_pp obj + + # both '\n' & '\\n' (_after_ mu_pp (aka inspect)) + single = !!str.match(/(?exp == act printing the difference between + # the two, if possible. + # + # If there is no visible difference but the assertion fails, you + # should suspect that your #== is buggy, or your inspect output is + # missing crucial details. For nicer structural diffing, set + # Minitest::Test.make_my_diffs_pretty! + # + # For floats use assert_in_delta. + # + # See also: Minitest::Assertions.diff + + def assert_equal exp, act, msg = nil + msg = message(msg, E) { diff exp, act } + result = assert exp == act, msg + + if nil == exp then + if Minitest::VERSION =~ /^6/ then + refute_nil exp, "Use assert_nil if expecting nil." + else + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + + warn "DEPRECATED: Use assert_nil if expecting nil from #{where}. This will fail in Minitest 6." + end + end + + result + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ are within +delta+ + # of each other. + # + # assert_in_delta Math::PI, (22.0 / 7.0), 0.01 + + def assert_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to be <= #{delta}" + } + assert delta >= n, msg + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ have a relative + # error less than +epsilon+. + + def assert_in_epsilon exp, act, epsilon = 0.001, msg = nil + assert_in_delta exp, act, [exp.abs, act.abs].min * epsilon, msg + end + + ## + # Fails unless +collection+ includes +obj+. + + def assert_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + assert collection.include?(obj), msg + end + + ## + # Fails unless +obj+ is an instance of +cls+. + + def assert_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be an instance of #{cls}, not #{obj.class}" + } + + assert obj.instance_of?(cls), msg + end + + ## + # Fails unless +obj+ is a kind of +cls+. + + def assert_kind_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be a kind of #{cls}, not #{obj.class}" } + + assert obj.kind_of?(cls), msg + end + + ## + # Fails unless +matcher+ =~ +obj+. + + def assert_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + assert matcher =~ obj, msg + end + + ## + # Fails unless +obj+ is nil + + def assert_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to be nil" } + assert obj.nil?, msg + end + + ## + # For testing with binary operators. Eg: + # + # assert_operator 5, :<=, 4 + + def assert_operator o1, op, o2 = UNDEFINED, msg = nil + return assert_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op} #{mu_pp(o2)}" } + assert o1.__send__(op, o2), msg + end + + ## + # Fails if stdout or stderr do not output the expected results. + # Pass in nil if you don't care about that streams output. Pass in + # "" if you require it to be silent. Pass in a regexp if you want + # to pattern match. + # + # assert_output(/hey/) { method_with_output } + # + # NOTE: this uses #capture_io, not #capture_subprocess_io. + # + # See also: #assert_silent + + def assert_output stdout = nil, stderr = nil + flunk "assert_output requires a block to capture output." unless + block_given? + + out, err = capture_io do + yield + end + + err_msg = Regexp === stderr ? :assert_match : :assert_equal if stderr + out_msg = Regexp === stdout ? :assert_match : :assert_equal if stdout + + y = send err_msg, stderr, err, "In stderr" if err_msg + x = send out_msg, stdout, out, "In stdout" if out_msg + + (!stdout || x) && (!stderr || y) + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Fails unless +path+ exists. + + def assert_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to exist" } + assert File.exist?(path), msg + end + + ## + # For testing with predicates. Eg: + # + # assert_predicate str, :empty? + # + # This is really meant for specs and is front-ended by assert_operator: + # + # str.must_be :empty? + + def assert_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op}" } + assert o1.__send__(op), msg + end + + ## + # Fails unless the block raises one of +exp+. Returns the + # exception matched so you can check the message, attributes, etc. + # + # +exp+ takes an optional message on the end to help explain + # failures and defaults to StandardError if no exception class is + # passed. Eg: + # + # assert_raises(CustomError) { method_with_custom_error } + # + # With custom error message: + # + # assert_raises(CustomError, 'This should have raised CustomError') { method_with_custom_error } + # + # Using the returned object: + # + # error = assert_raises(CustomError) do + # raise CustomError, 'This is really bad' + # end + # + # assert_equal 'This is really bad', error.message + + def assert_raises *exp + flunk "assert_raises requires a block to capture errors." unless + block_given? + + msg = "#{exp.pop}.\n" if String === exp.last + exp << StandardError if exp.empty? + + begin + yield + rescue *exp => e + pass # count assertion + return e + rescue Minitest::Assertion # incl Skip & UnexpectedError + # don't count assertion + raise + rescue SignalException, SystemExit + raise + rescue Exception => e + flunk proc { + exception_details(e, "#{msg}#{mu_pp(exp)} exception expected, not") + } + end + + exp = exp.first if exp.size == 1 + + flunk "#{msg}#{mu_pp(exp)} expected but nothing was raised." + end + + ## + # Fails unless +obj+ responds to +meth+. + + def assert_respond_to obj, meth, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} (#{obj.class}) to respond to ##{meth}" + } + assert obj.respond_to?(meth), msg + end + + ## + # Fails unless +exp+ and +act+ are #equal? + + def assert_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to be the same as %s (oid=%d)" % data + } + assert exp.equal?(act), msg + end + + ## + # +send_ary+ is a receiver, message and arguments. + # + # Fails unless the call returns a true value + + def assert_send send_ary, m = nil + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + warn "DEPRECATED: assert_send. From #{where}" + + recv, msg, *args = send_ary + m = message(m) { + "Expected #{mu_pp(recv)}.#{msg}(*#{mu_pp(args)}) to return true" } + assert recv.__send__(msg, *args), m + end + + ## + # Fails if the block outputs anything to stderr or stdout. + # + # See also: #assert_output + + def assert_silent + assert_output "", "" do + yield + end + end + + ## + # Fails unless the block throws +sym+ + + def assert_throws sym, msg = nil + default = "Expected #{mu_pp(sym)} to have been thrown" + caught = true + catch(sym) do + begin + yield + rescue ThreadError => e # wtf?!? 1.8 + threads == suck + default += ", not \:#{e.message[/uncaught throw \`(\w+?)\'/, 1]}" + rescue ArgumentError => e # 1.9 exception + raise e unless e.message.include?("uncaught throw") + default += ", not #{e.message.split(/ /).last}" + rescue NameError => e # 1.8 exception + raise e unless e.name == sym + default += ", not #{e.name.inspect}" + end + caught = false + end + + assert caught, message(msg) { default } + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Captures $stdout and $stderr into strings: + # + # out, err = capture_io do + # puts "Some info" + # warn "You did a bad thing" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: For efficiency, this method uses StringIO and does not + # capture IO for subprocesses. Use #capture_subprocess_io for + # that. + + def capture_io + _synchronize do + begin + captured_stdout, captured_stderr = StringIO.new, StringIO.new + + orig_stdout, orig_stderr = $stdout, $stderr + $stdout, $stderr = captured_stdout, captured_stderr + + yield + + return captured_stdout.string, captured_stderr.string + ensure + $stdout = orig_stdout + $stderr = orig_stderr + end + end + end + + ## + # Captures $stdout and $stderr into strings, using Tempfile to + # ensure that subprocess IO is captured as well. + # + # out, err = capture_subprocess_io do + # system "echo Some info" + # system "echo You did a bad thing 1>&2" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: This method is approximately 10x slower than #capture_io so + # only use it when you need to test the output of a subprocess. + + def capture_subprocess_io + _synchronize do + begin + require "tempfile" + + captured_stdout, captured_stderr = Tempfile.new("out"), Tempfile.new("err") + + orig_stdout, orig_stderr = $stdout.dup, $stderr.dup + $stdout.reopen captured_stdout + $stderr.reopen captured_stderr + + yield + + $stdout.rewind + $stderr.rewind + + return captured_stdout.read, captured_stderr.read + ensure + captured_stdout.unlink + captured_stderr.unlink + $stdout.reopen orig_stdout + $stderr.reopen orig_stderr + + orig_stdout.close + orig_stderr.close + captured_stdout.close + captured_stderr.close + end + end + end + + ## + # Returns details for exception +e+ + + def exception_details e, msg + [ + "#{msg}", + "Class: <#{e.class}>", + "Message: <#{e.message.inspect}>", + "---Backtrace---", + "#{Minitest.filter_backtrace(e.backtrace).join("\n")}", + "---------------", + ].join "\n" + end + + ## + # Fails after a given date (in the local time zone). This allows + # you to put time-bombs in your tests if you need to keep + # something around until a later date lest you forget about it. + + def fail_after y,m,d,msg + flunk msg if Time.now > Time.local(y, m, d) + end + + ## + # Fails with +msg+. + + def flunk msg = nil + msg ||= "Epic Fail!" + assert false, msg + end + + ## + # Returns a proc that will output +msg+ along with the default message. + + def message msg = nil, ending = nil, &default + proc { + msg = msg.call.chomp(".") if Proc === msg + custom_message = "#{msg}.\n" unless msg.nil? or msg.to_s.empty? + "#{custom_message}#{default.call}#{ending || "."}" + } + end + + ## + # used for counting assertions + + def pass _msg = nil + assert true + end + + ## + # Fails if +test+ is truthy. + + def refute test, msg = nil + msg ||= message { "Expected #{mu_pp(test)} to not be truthy" } + assert !test, msg + end + + ## + # Fails if +obj+ is empty. + + def refute_empty obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be empty" } + assert_respond_to obj, :empty? + refute obj.empty?, msg + end + + ## + # Fails if exp == act. + # + # For floats use refute_in_delta. + + def refute_equal exp, act, msg = nil + msg = message(msg) { + "Expected #{mu_pp(act)} to not be equal to #{mu_pp(exp)}" + } + refute exp == act, msg + end + + ## + # For comparing Floats. Fails if +exp+ is within +delta+ of +act+. + # + # refute_in_delta Math::PI, (22.0 / 7.0) + + def refute_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to not be <= #{delta}" + } + refute delta >= n, msg + end + + ## + # For comparing Floats. Fails if +exp+ and +act+ have a relative error + # less than +epsilon+. + + def refute_in_epsilon a, b, epsilon = 0.001, msg = nil + refute_in_delta a, b, a * epsilon, msg + end + + ## + # Fails if +collection+ includes +obj+. + + def refute_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to not include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + refute collection.include?(obj), msg + end + + ## + # Fails if +obj+ is an instance of +cls+. + + def refute_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to not be an instance of #{cls}" + } + refute obj.instance_of?(cls), msg + end + + ## + # Fails if +obj+ is a kind of +cls+. + + def refute_kind_of cls, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be a kind of #{cls}" } + refute obj.kind_of?(cls), msg + end + + ## + # Fails if +matcher+ =~ +obj+. + + def refute_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to not match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + refute matcher =~ obj, msg + end + + ## + # Fails if +obj+ is nil. + + def refute_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be nil" } + refute obj.nil?, msg + end + + ## + # Fails if +o1+ is not +op+ +o2+. Eg: + # + # refute_operator 1, :>, 2 #=> pass + # refute_operator 1, :<, 2 #=> fail + + def refute_operator o1, op, o2 = UNDEFINED, msg = nil + return refute_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op} #{mu_pp(o2)}" } + refute o1.__send__(op, o2), msg + end + + ## + # Fails if +path+ exists. + + def refute_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to not exist" } + refute File.exist?(path), msg + end + + ## + # For testing with predicates. + # + # refute_predicate str, :empty? + # + # This is really meant for specs and is front-ended by refute_operator: + # + # str.wont_be :empty? + + def refute_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op}" } + refute o1.__send__(op), msg + end + + ## + # Fails if +obj+ responds to the message +meth+. + + def refute_respond_to obj, meth, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not respond to #{meth}" } + + refute obj.respond_to?(meth), msg + end + + ## + # Fails if +exp+ is the same (by object identity) as +act+. + + def refute_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to not be the same as %s (oid=%d)" % data + } + refute exp.equal?(act), msg + end + + ## + # Skips the current run. If run in verbose-mode, the skipped run + # gets listed at the end of the run but doesn't cause a failure + # exit code. + + def skip msg = nil, bt = caller + msg ||= "Skipped, no message given" + @skip = true + raise Minitest::Skip, msg, bt + end + + ## + # Skips the current run until a given date (in the local time + # zone). This allows you to put some fixes on hold until a later + # date, but still holds you accountable and prevents you from + # forgetting it. + + def skip_until y,m,d,msg + skip msg if Time.now < Time.local(y, m, d) + where = caller.first.split(/:/, 3).first(2).join ":" + warn "Stale skip_until %p at %s" % [msg, where] + end + + ## + # Was this testcase skipped? Meant for #teardown. + + def skipped? + defined?(@skip) and @skip + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/autorun.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/autorun.rb new file mode 100644 index 0000000..9409b04 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/autorun.rb @@ -0,0 +1,13 @@ +begin + require "rubygems" + gem "minitest" +rescue Gem::LoadError + # do nothing +end + +require "minitest" +require "minitest/spec" +require "minitest/mock" +require "minitest/hell" if ENV["MT_HELL"] + +Minitest.autorun diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb new file mode 100644 index 0000000..f2b2465 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb @@ -0,0 +1,455 @@ +require "minitest/test" +require "minitest/spec" + +module Minitest + ## + # Subclass Benchmark to create your own benchmark runs. Methods + # starting with "bench_" get executed on a per-class. + # + # See Minitest::Assertions + + class Benchmark < Test + def self.io # :nodoc: + @io + end + + def io # :nodoc: + self.class.io + end + + def self.run reporter, options = {} # :nodoc: + @io = reporter.io + super + end + + def self.runnable_methods # :nodoc: + methods_matching(/^bench_/) + end + + ## + # Returns a set of ranges stepped exponentially from +min+ to + # +max+ by powers of +base+. Eg: + # + # bench_exp(2, 16, 2) # => [2, 4, 8, 16] + + def self.bench_exp min, max, base = 10 + min = (Math.log10(min) / Math.log10(base)).to_i + max = (Math.log10(max) / Math.log10(base)).to_i + + (min..max).map { |m| base ** m }.to_a + end + + ## + # Returns a set of ranges stepped linearly from +min+ to +max+ by + # +step+. Eg: + # + # bench_linear(20, 40, 10) # => [20, 30, 40] + + def self.bench_linear min, max, step = 10 + (min..max).step(step).to_a + rescue LocalJumpError # 1.8.6 + r = []; (min..max).step(step) { |n| r << n }; r + end + + ## + # Specifies the ranges used for benchmarking for that class. + # Defaults to exponential growth from 1 to 10k by powers of 10. + # Override if you need different ranges for your benchmarks. + # + # See also: ::bench_exp and ::bench_linear. + + def self.bench_range + bench_exp 1, 10_000 + end + + ## + # Runs the given +work+, gathering the times of each run. Range + # and times are then passed to a given +validation+ proc. Outputs + # the benchmark name and times in tab-separated format, making it + # easy to paste into a spreadsheet for graphing or further + # analysis. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # validation = proc { |x, y| ... } + # assert_performance validation do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance validation, &work + range = self.class.bench_range + + io.print "#{self.name}" + + times = [] + + range.each do |x| + GC.start + t0 = Minitest.clock_time + instance_exec(x, &work) + t = Minitest.clock_time - t0 + + io.print "\t%9.6f" % t + times << t + end + io.puts + + validation[range, times] + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a constant rate (eg, linear slope == 0) within a given + # +threshold+. Note: because we're testing for a slope of 0, R^2 + # is not a good determining factor for the fit, so the threshold + # is applied against the slope itself. As such, you probably want + # to tighten it from the default. + # + # See https://www.graphpad.com/guides/prism/8/curve-fitting/reg_intepretingnonlinr2.htm + # for more details. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_constant 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_constant threshold = 0.99, &work + validation = proc do |range, times| + a, b, rr = fit_linear range, times + assert_in_delta 0, b, 1 - threshold + [a, b, rr] + end + + assert_performance validation, &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a exponential curve within a given error +threshold+. + # + # Fit is calculated by #fit_exponential. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_exponential 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_exponential threshold = 0.99, &work + assert_performance validation_for_fit(:exponential, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a logarithmic curve within a given error +threshold+. + # + # Fit is calculated by #fit_logarithmic. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_logarithmic 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_logarithmic threshold = 0.99, &work + assert_performance validation_for_fit(:logarithmic, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a straight line within a given error +threshold+. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_linear 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_linear threshold = 0.99, &work + assert_performance validation_for_fit(:linear, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered curve + # fit to match a power curve within a given error +threshold+. + # + # Fit is calculated by #fit_power. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_power 0.9999 do |x| + # @obj.algorithm + # end + # end + + def assert_performance_power threshold = 0.99, &work + assert_performance validation_for_fit(:power, threshold), &work + end + + ## + # Takes an array of x/y pairs and calculates the general R^2 value. + # + # See: http://en.wikipedia.org/wiki/Coefficient_of_determination + + def fit_error xys + y_bar = sigma(xys) { |_, y| y } / xys.size.to_f + ss_tot = sigma(xys) { |_, y| (y - y_bar) ** 2 } + ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 } + + 1 - (ss_err / ss_tot) + end + + ## + # To fit a functional form: y = ae^(bx). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html + + def fit_exponential xs, ys + n = xs.size + xys = xs.zip(ys) + sxlny = sigma(xys) { |x, y| x * Math.log(y) } + slny = sigma(xys) { |_, y| Math.log(y) } + sx2 = sigma(xys) { |x, _| x * x } + sx = sigma xs + + c = n * sx2 - sx ** 2 + a = (slny * sx2 - sx * sxlny) / c + b = ( n * sxlny - sx * slny ) / c + + return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) } + end + + ## + # To fit a functional form: y = a + b*ln(x). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingLogarithmic.html + + def fit_logarithmic xs, ys + n = xs.size + xys = xs.zip(ys) + slnx2 = sigma(xys) { |x, _| Math.log(x) ** 2 } + slnx = sigma(xys) { |x, _| Math.log(x) } + sylnx = sigma(xys) { |x, y| y * Math.log(x) } + sy = sigma(xys) { |_, y| y } + + c = n * slnx2 - slnx ** 2 + b = ( n * sylnx - sy * slnx ) / c + a = (sy - b * slnx) / n + + return a, b, fit_error(xys) { |x| a + b * Math.log(x) } + end + + ## + # Fits the functional form: a + bx. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFitting.html + + def fit_linear xs, ys + n = xs.size + xys = xs.zip(ys) + sx = sigma xs + sy = sigma ys + sx2 = sigma(xs) { |x| x ** 2 } + sxy = sigma(xys) { |x, y| x * y } + + c = n * sx2 - sx**2 + a = (sy * sx2 - sx * sxy) / c + b = ( n * sxy - sx * sy ) / c + + return a, b, fit_error(xys) { |x| a + b * x } + end + + ## + # To fit a functional form: y = ax^b. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html + + def fit_power xs, ys + n = xs.size + xys = xs.zip(ys) + slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) } + slnx = sigma(xs) { |x | Math.log(x) } + slny = sigma(ys) { | y| Math.log(y) } + slnx2 = sigma(xs) { |x | Math.log(x) ** 2 } + + b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2) + a = (slny - b * slnx) / n + + return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) } + end + + ## + # Enumerates over +enum+ mapping +block+ if given, returning the + # sum of the result. Eg: + # + # sigma([1, 2, 3]) # => 1 + 2 + 3 => 6 + # sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14 + + def sigma enum, &block + enum = enum.map(&block) if block + enum.inject { |sum, n| sum + n } + end + + ## + # Returns a proc that calls the specified fit method and asserts + # that the error is within a tolerable threshold. + + def validation_for_fit msg, threshold + proc do |range, times| + a, b, rr = send "fit_#{msg}", range, times + assert_operator rr, :>=, threshold + [a, b, rr] + end + end + end +end + +module Minitest + ## + # The spec version of Minitest::Benchmark. + + class BenchSpec < Benchmark + extend Minitest::Spec::DSL + + ## + # This is used to define a new benchmark method. You usually don't + # use this directly and is intended for those needing to write new + # performance curve fits (eg: you need a specific polynomial fit). + # + # See ::bench_performance_linear for an example of how to use this. + + def self.bench name, &block + define_method "bench_#{name.gsub(/\W+/, "_")}", &block + end + + ## + # Specifies the ranges used for benchmarking for that class. + # + # bench_range do + # bench_exp(2, 16, 2) + # end + # + # See Minitest::Benchmark#bench_range for more details. + + def self.bench_range &block + return super unless block + + meta = (class << self; self; end) + meta.send :define_method, "bench_range", &block + end + + ## + # Create a benchmark that verifies that the performance is linear. + # + # describe "my class Bench" do + # bench_performance_linear "fast_algorithm", 0.9999 do |n| + # @obj.fast_algorithm(n) + # end + # end + + def self.bench_performance_linear name, threshold = 0.99, &work + bench name do + assert_performance_linear threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is constant. + # + # describe "my class Bench" do + # bench_performance_constant "zoom_algorithm!" do |n| + # @obj.zoom_algorithm!(n) + # end + # end + + def self.bench_performance_constant name, threshold = 0.99, &work + bench name do + assert_performance_constant threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is exponential. + # + # describe "my class Bench" do + # bench_performance_exponential "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_exponential name, threshold = 0.99, &work + bench name do + assert_performance_exponential threshold, &work + end + end + + + ## + # Create a benchmark that verifies that the performance is logarithmic. + # + # describe "my class Bench" do + # bench_performance_logarithmic "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_logarithmic name, threshold = 0.99, &work + bench name do + assert_performance_logarithmic threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is power. + # + # describe "my class Bench" do + # bench_performance_power "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_power name, threshold = 0.99, &work + bench name do + assert_performance_power threshold, &work + end + end + end + + Minitest::Spec.register_spec_type(/Bench(mark)?$/, Minitest::BenchSpec) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/expectations.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/expectations.rb new file mode 100644 index 0000000..fb4ec4d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/expectations.rb @@ -0,0 +1,303 @@ +## +# It's where you hide your "assertions". +# +# Please note, because of the way that expectations are implemented, +# all expectations (eg must_equal) are dependent upon a thread local +# variable +:current_spec+. If your specs rely on mixing threads into +# the specs themselves, you're better off using assertions or the new +# _(value) wrapper. For example: +# +# it "should still work in threads" do +# my_threaded_thingy do +# (1+1).must_equal 2 # bad +# assert_equal 2, 1+1 # good +# _(1 + 1).must_equal 2 # good +# value(1 + 1).must_equal 2 # good, also #expect +# _ { 1 + "1" }.must_raise TypeError # good +# end +# end + +module Minitest::Expectations + + ## + # See Minitest::Assertions#assert_empty. + # + # _(collection).must_be_empty + # + # :method: must_be_empty + + infect_an_assertion :assert_empty, :must_be_empty, :unary + + ## + # See Minitest::Assertions#assert_equal + # + # _(a).must_equal b + # + # :method: must_equal + + infect_an_assertion :assert_equal, :must_equal + + ## + # See Minitest::Assertions#assert_in_delta + # + # _(n).must_be_close_to m [, delta] + # + # :method: must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#assert_in_epsilon + # + # _(n).must_be_within_epsilon m [, epsilon] + # + # :method: must_be_within_epsilon + + infect_an_assertion :assert_in_epsilon, :must_be_within_epsilon + + ## + # See Minitest::Assertions#assert_includes + # + # _(collection).must_include obj + # + # :method: must_include + + infect_an_assertion :assert_includes, :must_include, :reverse + + ## + # See Minitest::Assertions#assert_instance_of + # + # _(obj).must_be_instance_of klass + # + # :method: must_be_instance_of + + infect_an_assertion :assert_instance_of, :must_be_instance_of + + ## + # See Minitest::Assertions#assert_kind_of + # + # _(obj).must_be_kind_of mod + # + # :method: must_be_kind_of + + infect_an_assertion :assert_kind_of, :must_be_kind_of + + ## + # See Minitest::Assertions#assert_match + # + # _(a).must_match b + # + # :method: must_match + + infect_an_assertion :assert_match, :must_match + + ## + # See Minitest::Assertions#assert_nil + # + # _(obj).must_be_nil + # + # :method: must_be_nil + + infect_an_assertion :assert_nil, :must_be_nil, :unary + + ## + # See Minitest::Assertions#assert_operator + # + # _(n).must_be :<=, 42 + # + # This can also do predicates: + # + # _(str).must_be :empty? + # + # :method: must_be + + infect_an_assertion :assert_operator, :must_be, :reverse + + ## + # See Minitest::Assertions#assert_output + # + # _ { ... }.must_output out_or_nil [, err] + # + # :method: must_output + + infect_an_assertion :assert_output, :must_output, :block + + ## + # See Minitest::Assertions#assert_raises + # + # _ { ... }.must_raise exception + # + # :method: must_raise + + infect_an_assertion :assert_raises, :must_raise, :block + + ## + # See Minitest::Assertions#assert_respond_to + # + # _(obj).must_respond_to msg + # + # :method: must_respond_to + + infect_an_assertion :assert_respond_to, :must_respond_to, :reverse + + ## + # See Minitest::Assertions#assert_same + # + # _(a).must_be_same_as b + # + # :method: must_be_same_as + + infect_an_assertion :assert_same, :must_be_same_as + + ## + # See Minitest::Assertions#assert_silent + # + # _ { ... }.must_be_silent + # + # :method: must_be_silent + + infect_an_assertion :assert_silent, :must_be_silent, :block + + ## + # See Minitest::Assertions#assert_throws + # + # _ { ... }.must_throw sym + # + # :method: must_throw + + infect_an_assertion :assert_throws, :must_throw, :block + + ## + # See Minitest::Assertions#assert_path_exists + # + # _(some_path).path_must_exist + # + # :method: path_must_exist + + infect_an_assertion :assert_path_exists, :path_must_exist, :unary + + ## + # See Minitest::Assertions#refute_path_exists + # + # _(some_path).path_wont_exist + # + # :method: path_wont_exist + + infect_an_assertion :refute_path_exists, :path_wont_exist, :unary + + ## + # See Minitest::Assertions#refute_empty + # + # _(collection).wont_be_empty + # + # :method: wont_be_empty + + infect_an_assertion :refute_empty, :wont_be_empty, :unary + + ## + # See Minitest::Assertions#refute_equal + # + # _(a).wont_equal b + # + # :method: wont_equal + + infect_an_assertion :refute_equal, :wont_equal + + ## + # See Minitest::Assertions#refute_in_delta + # + # _(n).wont_be_close_to m [, delta] + # + # :method: wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#refute_in_epsilon + # + # _(n).wont_be_within_epsilon m [, epsilon] + # + # :method: wont_be_within_epsilon + + infect_an_assertion :refute_in_epsilon, :wont_be_within_epsilon + + ## + # See Minitest::Assertions#refute_includes + # + # _(collection).wont_include obj + # + # :method: wont_include + + infect_an_assertion :refute_includes, :wont_include, :reverse + + ## + # See Minitest::Assertions#refute_instance_of + # + # _(obj).wont_be_instance_of klass + # + # :method: wont_be_instance_of + + infect_an_assertion :refute_instance_of, :wont_be_instance_of + + ## + # See Minitest::Assertions#refute_kind_of + # + # _(obj).wont_be_kind_of mod + # + # :method: wont_be_kind_of + + infect_an_assertion :refute_kind_of, :wont_be_kind_of + + ## + # See Minitest::Assertions#refute_match + # + # _(a).wont_match b + # + # :method: wont_match + + infect_an_assertion :refute_match, :wont_match + + ## + # See Minitest::Assertions#refute_nil + # + # _(obj).wont_be_nil + # + # :method: wont_be_nil + + infect_an_assertion :refute_nil, :wont_be_nil, :unary + + ## + # See Minitest::Assertions#refute_operator + # + # _(n).wont_be :<=, 42 + # + # This can also do predicates: + # + # str.wont_be :empty? + # + # :method: wont_be + + infect_an_assertion :refute_operator, :wont_be, :reverse + + ## + # See Minitest::Assertions#refute_respond_to + # + # _(obj).wont_respond_to msg + # + # :method: wont_respond_to + + infect_an_assertion :refute_respond_to, :wont_respond_to, :reverse + + ## + # See Minitest::Assertions#refute_same + # + # _(a).wont_be_same_as b + # + # :method: wont_be_same_as + + infect_an_assertion :refute_same, :wont_be_same_as +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/hell.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/hell.rb new file mode 100644 index 0000000..73c88ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/hell.rb @@ -0,0 +1,11 @@ +require "minitest/parallel" + +class Minitest::Test + parallelize_me! +end + +begin + require "minitest/proveit" +rescue LoadError + warn "NOTE: `gem install minitest-proveit` for even more hellish tests" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/mock.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/mock.rb new file mode 100644 index 0000000..39cfc24 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/mock.rb @@ -0,0 +1,240 @@ +class MockExpectationError < StandardError; end # :nodoc: + +module Minitest # :nodoc: + + ## + # A simple and clean mock object framework. + # + # All mock objects are an instance of Mock + + class Mock + alias :__respond_to? :respond_to? + + overridden_methods = %w[ + === + class + inspect + instance_eval + instance_variables + object_id + public_send + respond_to_missing? + send + to_s + ] + + instance_methods.each do |m| + undef_method m unless overridden_methods.include?(m.to_s) || m =~ /^__/ + end + + overridden_methods.map(&:to_sym).each do |method_id| + define_method method_id do |*args, &b| + if @expected_calls.key? method_id then + method_missing(method_id, *args, &b) + else + super(*args, &b) + end + end + end + + def initialize delegator = nil # :nodoc: + @delegator = delegator + @expected_calls = Hash.new { |calls, name| calls[name] = [] } + @actual_calls = Hash.new { |calls, name| calls[name] = [] } + end + + ## + # Expect that method +name+ is called, optionally with +args+ or a + # +blk+, and returns +retval+. + # + # @mock.expect(:meaning_of_life, 42) + # @mock.meaning_of_life # => 42 + # + # @mock.expect(:do_something_with, true, [some_obj, true]) + # @mock.do_something_with(some_obj, true) # => true + # + # @mock.expect(:do_something_else, true) do |a1, a2| + # a1 == "buggs" && a2 == :bunny + # end + # + # +args+ is compared to the expected args using case equality (ie, the + # '===' operator), allowing for less specific expectations. + # + # @mock.expect(:uses_any_string, true, [String]) + # @mock.uses_any_string("foo") # => true + # @mock.verify # => true + # + # @mock.expect(:uses_one_string, true, ["foo"]) + # @mock.uses_one_string("bar") # => raises MockExpectationError + # + # If a method will be called multiple times, specify a new expect for each one. + # They will be used in the order you define them. + # + # @mock.expect(:ordinal_increment, 'first') + # @mock.expect(:ordinal_increment, 'second') + # + # @mock.ordinal_increment # => 'first' + # @mock.ordinal_increment # => 'second' + # @mock.ordinal_increment # => raises MockExpectationError "No more expects available for :ordinal_increment" + # + + def expect name, retval, args = [], &blk + name = name.to_sym + + if block_given? + raise ArgumentError, "args ignored when block given" unless args.empty? + @expected_calls[name] << { :retval => retval, :block => blk } + else + raise ArgumentError, "args must be an array" unless Array === args + @expected_calls[name] << { :retval => retval, :args => args } + end + self + end + + def __call name, data # :nodoc: + case data + when Hash then + "#{name}(#{data[:args].inspect[1..-2]}) => #{data[:retval].inspect}" + else + data.map { |d| __call name, d }.join ", " + end + end + + ## + # Verify that all methods were called as expected. Raises + # +MockExpectationError+ if the mock object was not called as + # expected. + + def verify + @expected_calls.each do |name, expected| + actual = @actual_calls.fetch(name, nil) + raise MockExpectationError, "expected #{__call name, expected[0]}" unless actual + raise MockExpectationError, "expected #{__call name, expected[actual.size]}, got [#{__call name, actual}]" if + actual.size < expected.size + end + true + end + + def method_missing sym, *args, &block # :nodoc: + unless @expected_calls.key?(sym) then + if @delegator && @delegator.respond_to?(sym) + return @delegator.public_send(sym, *args, &block) + else + raise NoMethodError, "unmocked method %p, expected one of %p" % + [sym, @expected_calls.keys.sort_by(&:to_s)] + end + end + + index = @actual_calls[sym].length + expected_call = @expected_calls[sym][index] + + unless expected_call then + raise MockExpectationError, "No more expects available for %p: %p" % + [sym, args] + end + + expected_args, retval, val_block = + expected_call.values_at(:args, :retval, :block) + + if val_block then + # keep "verify" happy + @actual_calls[sym] << expected_call + + raise MockExpectationError, "mocked method %p failed block w/ %p" % + [sym, args] unless val_block.call(*args, &block) + + return retval + end + + if expected_args.size != args.size then + raise ArgumentError, "mocked method %p expects %d arguments, got %d" % + [sym, expected_args.size, args.size] + end + + zipped_args = expected_args.zip(args) + fully_matched = zipped_args.all? { |mod, a| + mod === a or mod == a + } + + unless fully_matched then + raise MockExpectationError, "mocked method %p called with unexpected arguments %p" % + [sym, args] + end + + @actual_calls[sym] << { + :retval => retval, + :args => zipped_args.map! { |mod, a| mod === a ? mod : a }, + } + + retval + end + + def respond_to? sym, include_private = false # :nodoc: + return true if @expected_calls.key? sym.to_sym + return true if @delegator && @delegator.respond_to?(sym, include_private) + __respond_to?(sym, include_private) + end + end +end + +module Minitest::Assertions + ## + # Assert that the mock verifies correctly. + + def assert_mock mock + assert mock.verify + end +end + +## +# Object extensions for Minitest::Mock. + +class Object + + ## + # Add a temporary stubbed method replacing +name+ for the duration + # of the +block+. If +val_or_callable+ responds to #call, then it + # returns the result of calling it, otherwise returns the value + # as-is. If stubbed method yields a block, +block_args+ will be + # passed along. Cleans up the stub at the end of the +block+. The + # method +name+ must exist before stubbing. + # + # def test_stale_eh + # obj_under_test = Something.new + # refute obj_under_test.stale? + # + # Time.stub :now, Time.at(0) do + # assert obj_under_test.stale? + # end + # end + # + + def stub name, val_or_callable, *block_args + new_name = "__minitest_stub__#{name}" + + metaclass = class << self; self; end + + if respond_to? name and not methods.map(&:to_s).include? name.to_s then + metaclass.send :define_method, name do |*args| + super(*args) + end + end + + metaclass.send :alias_method, new_name, name + + metaclass.send :define_method, name do |*args, &blk| + if val_or_callable.respond_to? :call then + val_or_callable.call(*args, &blk) + else + blk.call(*block_args) if blk + val_or_callable + end + end + + yield self + ensure + metaclass.send :undef_method, name + metaclass.send :alias_method, name, new_name + metaclass.send :undef_method, new_name + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/parallel.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/parallel.rb new file mode 100644 index 0000000..40ac709 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/parallel.rb @@ -0,0 +1,70 @@ +module Minitest + module Parallel #:nodoc: + + ## + # The engine used to run multiple tests in parallel. + + class Executor + + ## + # The size of the pool of workers. + + attr_reader :size + + ## + # Create a parallel test executor of with +size+ workers. + + def initialize size + @size = size + @queue = Queue.new + @pool = nil + end + + ## + # Start the executor + + def start + @pool = size.times.map { + Thread.new(@queue) do |queue| + Thread.current.abort_on_exception = true + while (job = queue.pop) + klass, method, reporter = job + reporter.synchronize { reporter.prerecord klass, method } + result = Minitest.run_one_method klass, method + reporter.synchronize { reporter.record result } + end + end + } + end + + ## + # Add a job to the queue + + def << work; @queue << work; end + + ## + # Shuts down the pool of workers by signalling them to quit and + # waiting for them all to finish what they're currently working + # on. + + def shutdown + size.times { @queue << nil } + @pool.each(&:join) + end + end + + module Test # :nodoc: + def _synchronize; Minitest::Test.io_lock.synchronize { yield }; end # :nodoc: + + module ClassMethods # :nodoc: + def run_one_method klass, method_name, reporter + Minitest.parallel_executor << [klass, method_name, reporter] + end + + def test_order + :parallel + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride.rb new file mode 100644 index 0000000..f3b8e47 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride.rb @@ -0,0 +1,4 @@ +require "minitest" + +Minitest.load_plugins +Minitest::PrideIO.pride! diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb new file mode 100644 index 0000000..aeef2b9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb @@ -0,0 +1,142 @@ +require "minitest" + +module Minitest + def self.plugin_pride_options opts, _options # :nodoc: + opts.on "-p", "--pride", "Pride. Show your testing pride!" do + PrideIO.pride! + end + end + + def self.plugin_pride_init options # :nodoc: + if PrideIO.pride? then + klass = ENV["TERM"] =~ /^xterm|-256color$/ ? PrideLOL : PrideIO + io = klass.new options[:io] + + self.reporter.reporters.grep(Minitest::Reporter).each do |rep| + rep.io = io if rep.io.tty? + end + end + end + + ## + # Show your testing pride! + + class PrideIO + ## + # Activate the pride plugin. Called from both -p option and minitest/pride + + def self.pride! + @pride = true + end + + ## + # Are we showing our testing pride? + + def self.pride? + @pride ||= false + end + + # Start an escape sequence + ESC = "\e[" + + # End the escape sequence + NND = "#{ESC}0m" + + # The IO we're going to pipe through. + attr_reader :io + + def initialize io # :nodoc: + @io = io + # stolen from /System/Library/Perl/5.10.0/Term/ANSIColor.pm + # also reference http://en.wikipedia.org/wiki/ANSI_escape_code + @colors ||= (31..36).to_a + @size = @colors.size + @index = 0 + end + + ## + # Wrap print to colorize the output. + + def print o + case o + when "." then + io.print pride o + when "E", "F" then + io.print "#{ESC}41m#{ESC}37m#{o}#{NND}" + when "S" then + io.print pride o + else + io.print o + end + end + + def puts *o # :nodoc: + o.map! { |s| + s.to_s.sub(/Finished/) { + @index = 0 + "Fabulous run".split(//).map { |c| + pride(c) + }.join + } + } + + io.puts(*o) + end + + ## + # Color a string. + + def pride string + string = "*" if string == "." + c = @colors[@index % @size] + @index += 1 + "#{ESC}#{c}m#{string}#{NND}" + end + + def method_missing msg, *args # :nodoc: + io.send(msg, *args) + end + end + + ## + # If you thought the PrideIO was colorful... + # + # (Inspired by lolcat, but with clean math) + + class PrideLOL < PrideIO + PI_3 = Math::PI / 3 # :nodoc: + + def initialize io # :nodoc: + # walk red, green, and blue around a circle separated by equal thirds. + # + # To visualize, type this into wolfram-alpha: + # + # plot (3*sin(x)+3), (3*sin(x+2*pi/3)+3), (3*sin(x+4*pi/3)+3) + + # 6 has wide pretty gradients. 3 == lolcat, about half the width + @colors = (0...(6 * 7)).map { |n| + n *= 1.0 / 6 + r = (3 * Math.sin(n ) + 3).to_i + g = (3 * Math.sin(n + 2 * PI_3) + 3).to_i + b = (3 * Math.sin(n + 4 * PI_3) + 3).to_i + + # Then we take rgb and encode them in a single number using base 6. + # For some mysterious reason, we add 16... to clear the bottom 4 bits? + # Yes... they're ugly. + + 36 * r + 6 * g + b + 16 + } + + super + end + + ## + # Make the string even more colorful. Damnit. + + def pride string + c = @colors[@index % @size] + @index += 1 + "#{ESC}38;5;#{c}m#{string}#{NND}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/spec.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/spec.rb new file mode 100644 index 0000000..12f1975 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/spec.rb @@ -0,0 +1,342 @@ +require "minitest/test" + +class Module # :nodoc: + def infect_an_assertion meth, new_name, dont_flip = false # :nodoc: + block = dont_flip == :block + dont_flip = false if block + + # warn "%-22p -> %p %p" % [meth, new_name, dont_flip] + self.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + warn "DEPRECATED: global use of #{new_name} from #\{where}. Use _(obj).#{new_name} instead. This will fail in Minitest 6." + Minitest::Expectation.new(self, Minitest::Spec.current).#{new_name}(*args) + end + EOM + + Minitest::Expectation.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + raise "Calling ##{new_name} outside of test." unless ctx + case + when #{!!dont_flip} then + ctx.#{meth}(target, *args) + when #{block} && Proc === target then + ctx.#{meth}(*args, &target) + else + ctx.#{meth}(args.first, target, *args[1..-1]) + end + end + EOM + end +end + +Minitest::Expectation = Struct.new :target, :ctx # :nodoc: + +## +# Kernel extensions for minitest + +module Kernel + ## + # Describe a series of expectations for a given target +desc+. + # + # Defines a test class subclassing from either Minitest::Spec or + # from the surrounding describe's class. The surrounding class may + # subclass Minitest::Spec manually in order to easily share code: + # + # class MySpec < Minitest::Spec + # # ... shared code ... + # end + # + # class TestStuff < MySpec + # it "does stuff" do + # # shared code available here + # end + # describe "inner stuff" do + # it "still does stuff" do + # # ...and here + # end + # end + # end + # + # For more information on getting started with writing specs, see: + # + # http://www.rubyinside.com/a-minitestspec-tutorial-elegant-spec-style-testing-that-comes-with-ruby-5354.html + # + # For some suggestions on how to improve your specs, try: + # + # http://betterspecs.org + # + # but do note that several items there are debatable or specific to + # rspec. + # + # For more information about expectations, see Minitest::Expectations. + + def describe desc, *additional_desc, &block # :doc: + stack = Minitest::Spec.describe_stack + name = [stack.last, desc, *additional_desc].compact.join("::") + sclas = stack.last || if Class === self && kind_of?(Minitest::Spec::DSL) then + self + else + Minitest::Spec.spec_type desc, *additional_desc + end + + cls = sclas.create name, desc + + stack.push cls + cls.class_eval(&block) + stack.pop + cls + end + private :describe +end + +## +# Minitest::Spec -- The faster, better, less-magical spec framework! +# +# For a list of expectations, see Minitest::Expectations. + +class Minitest::Spec < Minitest::Test + + def self.current # :nodoc: + Thread.current[:current_spec] + end + + def initialize name # :nodoc: + super + Thread.current[:current_spec] = self + end + + ## + # Oh look! A Minitest::Spec::DSL module! Eat your heart out DHH. + + module DSL + ## + # Contains pairs of matchers and Spec classes to be used to + # calculate the superclass of a top-level describe. This allows for + # automatically customizable spec types. + # + # See: register_spec_type and spec_type + + TYPES = [[//, Minitest::Spec]] + + ## + # Register a new type of spec that matches the spec's description. + # This method can take either a Regexp and a spec class or a spec + # class and a block that takes the description and returns true if + # it matches. + # + # Eg: + # + # register_spec_type(/Controller$/, Minitest::Spec::Rails) + # + # or: + # + # register_spec_type(Minitest::Spec::RailsModel) do |desc| + # desc.superclass == ActiveRecord::Base + # end + + def register_spec_type *args, &block + if block then + matcher, klass = block, args.first + else + matcher, klass = *args + end + TYPES.unshift [matcher, klass] + end + + ## + # Figure out the spec class to use based on a spec's description. Eg: + # + # spec_type("BlahController") # => Minitest::Spec::Rails + + def spec_type desc, *additional + TYPES.find { |matcher, _klass| + if matcher.respond_to? :call then + matcher.call desc, *additional + else + matcher === desc.to_s + end + }.last + end + + def describe_stack # :nodoc: + Thread.current[:describe_stack] ||= [] + end + + def children # :nodoc: + @children ||= [] + end + + def nuke_test_methods! # :nodoc: + self.public_instance_methods.grep(/^test_/).each do |name| + self.send :undef_method, name + end + end + + ## + # Define a 'before' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#setup. + + def before _type = nil, &block + define_method :setup do + super() + self.instance_eval(&block) + end + end + + ## + # Define an 'after' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#teardown. + + def after _type = nil, &block + define_method :teardown do + self.instance_eval(&block) + super() + end + end + + ## + # Define an expectation with name +desc+. Name gets morphed to a + # proper test method name. For some freakish reason, people who + # write specs don't like class inheritance, so this goes way out of + # its way to make sure that expectations aren't inherited. + # + # This is also aliased to #specify and doesn't require a +desc+ arg. + # + # Hint: If you _do_ want inheritance, use minitest/test. You can mix + # and match between assertions and expectations as much as you want. + + def it desc = "anonymous", &block + block ||= proc { skip "(no tests defined)" } + + @specs ||= 0 + @specs += 1 + + name = "test_%04d_%s" % [ @specs, desc ] + + undef_klasses = self.children.reject { |c| c.public_method_defined? name } + + define_method name, &block + + undef_klasses.each do |undef_klass| + undef_klass.send :undef_method, name + end + + name + end + + ## + # Essentially, define an accessor for +name+ with +block+. + # + # Why use let instead of def? I honestly don't know. + + def let name, &block + name = name.to_s + pre, post = "let '#{name}' cannot ", ". Please use another name." + methods = Minitest::Spec.instance_methods.map(&:to_s) - %w[subject] + raise ArgumentError, "#{pre}begin with 'test'#{post}" if + name =~ /\Atest/ + raise ArgumentError, "#{pre}override a method in Minitest::Spec#{post}" if + methods.include? name + + define_method name do + @_memoized ||= {} + @_memoized.fetch(name) { |k| @_memoized[k] = instance_eval(&block) } + end + end + + ## + # Another lazy man's accessor generator. Made even more lazy by + # setting the name for you to +subject+. + + def subject &block + let :subject, &block + end + + def create name, desc # :nodoc: + cls = Class.new(self) do + @name = name + @desc = desc + + nuke_test_methods! + end + + children << cls + + cls + end + + def name # :nodoc: + defined?(@name) ? @name : super + end + + def to_s # :nodoc: + name # Can't alias due to 1.8.7, not sure why + end + + attr_reader :desc # :nodoc: + alias :specify :it + + ## + # Rdoc... why are you so dumb? + + module InstanceMethods + ## + # Takes a value or a block and returns a value monad that has + # all of Expectations methods available to it. + # + # _(1 + 1).must_equal 2 + # + # And for blocks: + # + # _ { 1 + "1" }.must_raise TypeError + # + # This method of expectation-based testing is preferable to + # straight-expectation methods (on Object) because it stores its + # test context, bypassing our hacky use of thread-local variables. + # + # NOTE: At some point, the methods on Object will be deprecated + # and then removed. + # + # It is also aliased to #value and #expect for your aesthetic + # pleasure: + # + # _(1 + 1).must_equal 2 + # value(1 + 1).must_equal 2 + # expect(1 + 1).must_equal 2 + + def _ value = nil, &block + Minitest::Expectation.new block || value, self + end + + alias value _ + alias expect _ + + def before_setup # :nodoc: + super + Thread.current[:current_spec] = self + end + end + + def self.extended obj # :nodoc: + obj.send :include, InstanceMethods + end + end + + extend DSL + + TYPES = DSL::TYPES # :nodoc: +end + +require "minitest/expectations" + +class Object # :nodoc: + include Minitest::Expectations unless ENV["MT_NO_EXPECTATIONS"] +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/test.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/test.rb new file mode 100644 index 0000000..0993bc4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/test.rb @@ -0,0 +1,220 @@ +require "minitest" unless defined? Minitest::Runnable + +module Minitest + ## + # Subclass Test to create your own tests. Typically you'll want a + # Test subclass per implementation class. + # + # See Minitest::Assertions + + class Test < Runnable + require "minitest/assertions" + include Minitest::Assertions + include Minitest::Reportable + + def class_name # :nodoc: + self.class.name # for Minitest::Reportable + end + + PASSTHROUGH_EXCEPTIONS = [NoMemoryError, SignalException, SystemExit] # :nodoc: + + # :stopdoc: + class << self; attr_accessor :io_lock; end + self.io_lock = Mutex.new + # :startdoc: + + ## + # Call this at the top of your tests when you absolutely + # positively need to have ordered tests. In doing so, you're + # admitting that you suck and your tests are weak. + + def self.i_suck_and_my_tests_are_order_dependent! + class << self + undef_method :test_order if method_defined? :test_order + define_method :test_order do :alpha end + end + end + + ## + # Make diffs for this Test use #pretty_inspect so that diff + # in assert_equal can have more details. NOTE: this is much slower + # than the regular inspect but much more usable for complex + # objects. + + def self.make_my_diffs_pretty! + require "pp" + + define_method :mu_pp, &:pretty_inspect + end + + ## + # Call this at the top of your tests when you want to run your + # tests in parallel. In doing so, you're admitting that you rule + # and your tests are awesome. + + def self.parallelize_me! + include Minitest::Parallel::Test + extend Minitest::Parallel::Test::ClassMethods + end + + ## + # Returns all instance methods starting with "test_". Based on + # #test_order, the methods are either sorted, randomized + # (default), or run in parallel. + + def self.runnable_methods + methods = methods_matching(/^test_/) + + case self.test_order + when :random, :parallel then + max = methods.size + methods.sort.sort_by { rand max } + when :alpha, :sorted then + methods.sort + else + raise "Unknown test_order: #{self.test_order.inspect}" + end + end + + ## + # Defines the order to run tests (:random by default). Override + # this or use a convenience method to change it for your tests. + + def self.test_order + :random + end + + TEARDOWN_METHODS = %w[ before_teardown teardown after_teardown ] # :nodoc: + + ## + # Runs a single test with setup/teardown hooks. + + def run + with_info_handler do + time_it do + capture_exceptions do + before_setup; setup; after_setup + + self.send self.name + end + + TEARDOWN_METHODS.each do |hook| + capture_exceptions do + self.send hook + end + end + end + end + + Result.from self # per contract + end + + ## + # Provides before/after hooks for setup and teardown. These are + # meant for library writers, NOT for regular test authors. See + # #before_setup for an example. + + module LifecycleHooks + + ## + # Runs before every test, before setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # As a simplistic example: + # + # module MyMinitestPlugin + # def before_setup + # super + # # ... stuff to do before setup is run + # end + # + # def after_setup + # # ... stuff to do after setup is run + # super + # end + # + # def before_teardown + # super + # # ... stuff to do before teardown is run + # end + # + # def after_teardown + # # ... stuff to do after teardown is run + # super + # end + # end + # + # class MiniTest::Test + # include MyMinitestPlugin + # end + + def before_setup; end + + ## + # Runs before every test. Use this to set up before each test + # run. + + def setup; end + + ## + # Runs before every test, after setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_setup; end + + ## + # Runs after every test, before teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def before_teardown; end + + ## + # Runs after every test. Use this to clean up after each test + # run. + + def teardown; end + + ## + # Runs after every test, after teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_teardown; end + end # LifecycleHooks + + def capture_exceptions # :nodoc: + yield + rescue *PASSTHROUGH_EXCEPTIONS + raise + rescue Assertion => e + self.failures << e + rescue Exception => e + self.failures << UnexpectedError.new(e) + end + + def with_info_handler &block # :nodoc: + t0 = Minitest.clock_time + + handler = lambda do + warn "\nCurrent: %s#%s %.2fs" % [self.class, self.name, Minitest.clock_time - t0] + end + + self.class.on_signal ::Minitest.info_signal, handler, &block + end + + include LifecycleHooks + include Guard + extend Guard + end # Test +end + +require "minitest/unit" unless defined?(MiniTest) # compatibility layer only diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/unit.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/unit.rb new file mode 100644 index 0000000..9a4eadf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/lib/minitest/unit.rb @@ -0,0 +1,45 @@ +# :stopdoc: + +unless defined?(Minitest) then + # all of this crap is just to avoid circular requires and is only + # needed if a user requires "minitest/unit" directly instead of + # "minitest/autorun", so we also warn + + from = caller.reject { |s| s =~ /rubygems/ }.join("\n ") + warn "Warning: you should require 'minitest/autorun' instead." + warn %(Warning: or add 'gem "minitest"' before 'require "minitest/autorun"') + warn "From:\n #{from}" + + module Minitest; end + MiniTest = Minitest # prevents minitest.rb from requiring back to us + require "minitest" +end + +MiniTest = Minitest unless defined?(MiniTest) + +module Minitest + class Unit + VERSION = Minitest::VERSION + class TestCase < Minitest::Test + def self.inherited klass # :nodoc: + from = caller.first + warn "MiniTest::Unit::TestCase is now Minitest::Test. From #{from}" + super + end + end + + def self.autorun # :nodoc: + from = caller.first + warn "MiniTest::Unit.autorun is now Minitest.autorun. From #{from}" + Minitest.autorun + end + + def self.after_tests &b # :nodoc: + from = caller.first + warn "MiniTest::Unit.after_tests is now Minitest.after_run. From #{from}" + Minitest.after_run(&b) + end + end +end + +# :startdoc: diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/metametameta.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/metametameta.rb new file mode 100644 index 0000000..6e75b9d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/metametameta.rb @@ -0,0 +1,136 @@ +require "tempfile" +require "stringio" +require "minitest/autorun" + +class Minitest::Test + def clean s + s.gsub(/^ {6}/, "") + end + + def with_empty_backtrace_filter + original = Minitest.backtrace_filter + + obj = Minitest::BacktraceFilter.new + def obj.filter _bt + [] + end + + Minitest::Test.io_lock.synchronize do # try not to trounce in parallel + begin + Minitest.backtrace_filter = obj + yield + ensure + Minitest.backtrace_filter = original + end + end + end +end + + +class FakeNamedTest < Minitest::Test + @@count = 0 + + def self.name + @fake_name ||= begin + @@count += 1 + "FakeNamedTest%02d" % @@count + end + end +end + +module MyModule; end +class AnError < StandardError; include MyModule; end + +class MetaMetaMetaTestCase < Minitest::Test + attr_accessor :reporter, :output, :tu + + def with_stderr err + old = $stderr + $stderr = err + yield + ensure + $stderr = old + end + + def run_tu_with_fresh_reporter flags = %w[--seed 42] + options = Minitest.process_args flags + + @output = StringIO.new("".encode('UTF-8')) + + self.reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(@output, options) + reporter << Minitest::ProgressReporter.new(@output, options) + + with_stderr @output do + reporter.start + + yield(reporter) if block_given? + + @tus ||= [@tu] + @tus.each do |tu| + Minitest::Runnable.runnables.delete tu + + tu.run reporter, options + end + + reporter.report + end + end + + def first_reporter + reporter.reporters.first + end + + def assert_report expected, flags = %w[--seed 42], &block + header = clean <<-EOM + Run options: #{flags.map { |s| s =~ /\|/ ? s.inspect : s }.join " "} + + # Running: + + EOM + + run_tu_with_fresh_reporter flags, &block + + output = normalize_output @output.string.dup + + assert_equal header + expected, output + end + + def normalize_output output + output.sub!(/Finished in .*/, "Finished in 0.00") + output.sub!(/Loaded suite .*/, "Loaded suite blah") + + output.gsub!(/FakeNamedTest\d+/, "FakeNamedTestXX") + output.gsub!(/ = \d+.\d\d s = /, " = 0.00 s = ") + output.gsub!(/0x[A-Fa-f0-9]+/, "0xXXX") + output.gsub!(/ +$/, "") + + if windows? then + output.gsub!(/\[(?:[A-Za-z]:)?[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)(?:[A-Za-z]:)?[^:]+:\d+:in/, '\1FILE:LINE:in') + else + output.gsub!(/\[[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)[^:]+:\d+:in/, '\1FILE:LINE:in') + end + + output.gsub!(/( at )[^:]+:\d+/, '\1[FILE:LINE]') + + output + end + + def restore_env + old_value = ENV["MT_NO_SKIP_MSG"] + ENV.delete "MT_NO_SKIP_MSG" + + yield + ensure + ENV["MT_NO_SKIP_MSG"] = old_value + end + + def setup + super + srand 42 + Minitest::Test.reset + @tu = nil + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb new file mode 100644 index 0000000..5bc456f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb @@ -0,0 +1,1575 @@ +# encoding: UTF-8 + +require "minitest/autorun" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +SomeError = Class.new Exception + +unless defined? MyModule then + module MyModule; end + class AnError < StandardError; include MyModule; end +end + +class TestMinitestAssertions < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + class DummyTest + include Minitest::Assertions + # include Minitest::Reportable # TODO: why do I really need this? + + attr_accessor :assertions, :failure + + def initialize + self.assertions = 0 + self.failure = nil + end + end + + def setup + super + + Minitest::Test.reset + + @tc = DummyTest.new + @zomg = "zomg ponies!" # TODO: const + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") + end + + def assert_deprecated name + dep = /DEPRECATED: #{name}. From #{__FILE__}:\d+(?::.*)?/ + dep = "" if $-w.nil? + + assert_output nil, dep do + yield + end + end + + def assert_triggered expected, klass = Minitest::Assertion + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + + assert_msg = Regexp === expected ? :assert_match : :assert_equal + self.send assert_msg, expected, msg + end + + def assert_unexpected expected + expected = Regexp.new expected if String === expected + + assert_triggered expected, Minitest::UnexpectedError do + yield + end + end + + def clean s + s.gsub(/^ {6,10}/, "") + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def test_assert + @assertion_count = 2 + + @tc.assert_equal true, @tc.assert(true), "returns true on success" + end + + def test_assert__triggered + assert_triggered "Expected false to be truthy." do + @tc.assert false + end + end + + def test_assert__triggered_message + assert_triggered @zomg do + @tc.assert false, @zomg + end + end + + def test_assert__triggered_lambda + assert_triggered "whoops" do + @tc.assert false, lambda { "whoops" } + end + end + + def test_assert_empty + @assertion_count = 2 + + @tc.assert_empty [] + end + + def test_assert_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [1] to be empty." do + @tc.assert_empty [1] + end + end + + def test_assert_equal + @tc.assert_equal 1, 1 + end + + def test_assert_equal_different_collection_array_hex_invisible + object1 = Object.new + object2 = Object.new + msg = "No visible difference in the Array#inspect output. + You should look at the implementation of #== on Array or its members. + [#]".gsub(/^ +/, "") + assert_triggered msg do + @tc.assert_equal [object1], [object2] + end + end + + def test_assert_equal_different_collection_hash_hex_invisible + h1, h2 = {}, {} + h1[1] = Object.new + h2[1] = Object.new + msg = "No visible difference in the Hash#inspect output. + You should look at the implementation of #== on Hash or its members. + {1=>#}".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal h1, h2 + end + end + + def test_assert_equal_different_diff_deactivated + without_diff do + assert_triggered util_msg("haha" * 10, "blah" * 10) do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + end + + def test_assert_equal_different_message + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, message { "whoops" } + end + end + + def test_assert_equal_different_lambda + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, lambda { "whoops" } + end + end + + def test_assert_equal_different_hex + c = Class.new do + def initialize s; @name = s; end + end + + o1 = c.new "a" + o2 = c.new "b" + msg = clean <<-EOS + --- expected + +++ actual + @@ -1 +1 @@ + -#<#:0xXXXXXX @name=\"a\"> + +#<#:0xXXXXXX @name=\"b\"> + EOS + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_hex_invisible + o1 = Object.new + o2 = Object.new + + msg = "No visible difference in the Object#inspect output. + You should look at the implementation of #== on Object or its members. + #".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long + msg = "--- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_invisible + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_msg + msg = "message. + --- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + @tc.assert_equal o1, o2, "message" + end + end + + def test_assert_equal_different_short + assert_triggered util_msg(1, 2) do + @tc.assert_equal 1, 2 + end + end + + def test_assert_equal_different_short_msg + assert_triggered util_msg(1, 2, "message") do + @tc.assert_equal 1, 2, "message" + end + end + + def test_assert_equal_different_short_multiline + msg = "--- expected\n+++ actual\n@@ -1,2 +1,2 @@\n \"a\n-b\"\n+c\"\n" + assert_triggered msg do + @tc.assert_equal "a\nb", "a\nc" + end + end + + def test_assert_equal_does_not_allow_lhs_nil + if Minitest::VERSION =~ /^6/ then + warn "Time to strip the MT5 test" + + @assertion_count += 1 + assert_triggered(/Use assert_nil if expecting nil/) do + @tc.assert_equal nil, nil + end + else + err_re = /Use assert_nil if expecting nil from .*test_minitest_\w+.rb/ + err_re = "" if $-w.nil? + + assert_output "", err_re do + @tc.assert_equal nil, nil + end + end + end + + def test_assert_equal_does_not_allow_lhs_nil_triggered + assert_triggered "Expected: nil\n Actual: false" do + @tc.assert_equal nil, false + end + end + + def test_assert_equal_string_bug791 + exp = <<-'EOF'.gsub(/^ {10}/, "") # note single quotes + --- expected + +++ actual + @@ -1,2 +1 @@ + -"\\n + -" + +"\\\" + EOF + + exp = "Expected: \"\\\\n\"\n Actual: \"\\\\\"" + assert_triggered exp do + @tc.assert_equal "\\n", "\\" + end + end + + def test_assert_equal_string_both_escaped_unescaped_newlines + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,2 +1 @@ + -\"A\\n + -B\" + +\"A\\n\\\\nB\" + EOM + + assert_triggered msg do + exp = "A\\nB" + act = "A\n\\nB" + + @tc.assert_equal exp, act + end + end + + def test_assert_equal_string_encodings + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: UTF-8 + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_string_encodings_both_different + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: US-ASCII + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt".force_encoding "ASCII" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_unescape_newlines + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + -"hello + +"hello\n + world" + EOM + + assert_triggered msg do + exp = "hello\nworld" + act = 'hello\nworld' # notice single quotes + + @tc.assert_equal exp, act + end + end + + def test_assert_in_delta + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.1 + end + + def test_assert_in_delta_triggered + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.000001 + end + end + + def test_assert_in_epsilon + @assertion_count = 10 + + @tc.assert_in_epsilon 10_000, 9991 + @tc.assert_in_epsilon 9991, 10_000 + @tc.assert_in_epsilon 1.0, 1.001 + @tc.assert_in_epsilon 1.001, 1.0 + + @tc.assert_in_epsilon 10_000, 9999.1, 0.0001 + @tc.assert_in_epsilon 9999.1, 10_000, 0.0001 + @tc.assert_in_epsilon 1.0, 1.0001, 0.0001 + @tc.assert_in_epsilon 1.0001, 1.0, 0.0001 + + @tc.assert_in_epsilon(-1, -1) + @tc.assert_in_epsilon(-10_000, -9991) + end + + def test_assert_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to be <= 9.99." do + @tc.assert_in_epsilon 10_000, 9990 + end + end + + def test_assert_in_epsilon_triggered_negative_case + x = (RUBY18 and not maglev?) ? "0.1" : "0.100000xxx" + y = "0.1" + assert_triggered "Expected |-1.1 - -1| (#{x}) to be <= #{y}." do + @tc.assert_in_epsilon(-1.1, -1, 0.1) + end + end + + def test_assert_includes + @assertion_count = 2 + + @tc.assert_includes [true], true + end + + def test_assert_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.assert_includes [true], false + end + + expected = "Expected [true] to include false." + assert_equal expected, e.message + end + + def test_assert_instance_of + @tc.assert_instance_of String, "blah" + end + + def test_assert_instance_of_triggered + assert_triggered 'Expected "blah" to be an instance of Array, not String.' do + @tc.assert_instance_of Array, "blah" + end + end + + def test_assert_kind_of + @tc.assert_kind_of String, "blah" + end + + def test_assert_kind_of_triggered + assert_triggered 'Expected "blah" to be a kind of Array, not String.' do + @tc.assert_kind_of Array, "blah" + end + end + + def test_assert_match + @assertion_count = 2 + @tc.assert_match(/\w+/, "blah blah blah") + end + + def test_assert_match_matchee_to_str + @assertion_count = 2 + + obj = Object.new + def obj.to_str; "blah" end + + @tc.assert_match "blah", obj + end + + def test_assert_match_matcher_object + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + + @tc.assert_match pattern, 5 + end + + def test_assert_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; false end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to match 5." do + @tc.assert_match pattern, 5 + end + end + + def test_assert_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\d+/ to match "blah blah blah".' do + @tc.assert_match(/\d+/, "blah blah blah") + end + end + + def test_assert_nil + @tc.assert_nil nil + end + + def test_assert_nil_triggered + assert_triggered "Expected 42 to be nil." do + @tc.assert_nil 42 + end + end + + def test_assert_operator + @tc.assert_operator 2, :>, 1 + end + + def test_assert_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.assert_operator bad, :equal?, bad + end + + def test_assert_operator_triggered + assert_triggered "Expected 2 to be < 1." do + @tc.assert_operator 2, :<, 1 + end + end + + def test_assert_output_both + @assertion_count = 2 + + @tc.assert_output "yay", "blah" do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_both_regexps + @assertion_count = 4 + + @tc.assert_output(/y.y/, /bl.h/) do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_err + @tc.assert_output nil, "blah" do + $stderr.print "blah" + end + end + + def test_assert_output_neither + @assertion_count = 0 + + @tc.assert_output do + # do nothing + end + end + + def test_assert_output_out + @tc.assert_output "blah" do + print "blah" + end + end + + def test_assert_output_triggered_both + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output "yay", "blah" do + print "boo" + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_err + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output nil, "blah" do + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_out + assert_triggered util_msg("blah", "blah blah", "In stdout") do + @tc.assert_output "blah" do + print "blah blah" + end + end + end + + def test_assert_output_no_block + assert_triggered "assert_output requires a block to capture output." do + @tc.assert_output "blah" + end + end + + def test_assert_output_nested_assert_uncaught + @assertion_count = 1 + + assert_triggered "Epic Fail!" do + @tc.assert_output "blah\n" do + puts "blah" + @tc.flunk + end + end + end + + def test_assert_output_nested_raise + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_raises RuntimeError do + puts "blah" + raise "boom!" + end + end + end + + def test_assert_output_nested_raise_bad + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise "boom!" + end + end + end + end + + def test_assert_output_nested_raise_mismatch + # this test is redundant, but illustrative + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises RuntimeError do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise ArgumentError, "boom!" + end + end + end + end + + def test_assert_output_nested_throw_caught + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_throws :boom! do + puts "blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_caught_bad + @assertion_count = 1 # want 0; can't prevent throw from escaping :( + + @tc.assert_throws :boom! do # 2) captured via catch + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_mismatch + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_throws :not_boom! do # 2) captured via assert_throws+rescue + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + end + + def test_assert_output_uncaught_raise + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + raise "boom!" + end + end + end + + def test_assert_output_uncaught_throw + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + throw :boom! + end + end + end + def test_assert_predicate + @tc.assert_predicate "", :empty? + end + + def test_assert_predicate_triggered + assert_triggered 'Expected "blah" to be empty?.' do + @tc.assert_predicate "blah", :empty? + end + end + + def test_assert_raises + @tc.assert_raises RuntimeError do + raise "blah" + end + end + + def test_assert_raises_default + @tc.assert_raises do + raise StandardError, "blah" + end + end + + def test_assert_raises_default_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises do + raise SomeError, "blah" + end + end + + expected = clean <<-EOM.chomp + [StandardError] exception expected, not + Class: + Message: <\"blah\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_default_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected, actual + end + + def test_assert_raises_exit + @tc.assert_raises SystemExit do + exit 1 + end + end + + def test_assert_raises_module + @tc.assert_raises MyModule do + raise AnError + end + end + + def test_assert_raises_signals + @tc.assert_raises SignalException do + raise SignalException, :INT + end + end + + def test_assert_raises_throw_nested_bad + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_raises do + @tc.assert_throws :blah do + raise "boom!" + throw :not_blah + end + end + end + end + + ## + # *sigh* This is quite an odd scenario, but it is from real (albeit + # ugly) test code in ruby-core: + + # http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=29259 + + def test_assert_raises_skip + @assertion_count = 0 + + assert_triggered "skipped", Minitest::Skip do + @tc.assert_raises ArgumentError do + begin + raise "blah" + rescue + skip "skipped" + end + end + end + end + + def test_assert_raises_subclass + @tc.assert_raises StandardError do + raise AnError + end + end + + def test_assert_raises_subclass_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises SomeError do + raise AnError, "some message" + end + end + + expected = clean <<-EOM + [SomeError] exception expected, not + Class: + Message: <\"some message\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_subclass_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_different + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM.chomp + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_triggered_different\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected, actual + end + + def test_assert_raises_triggered_different_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError, "XXX" do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM + XXX. + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_triggered_different_msg\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_none + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion do + # do nothing + end + end + + expected = "Minitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_triggered_none_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion, "XXX" do + # do nothing + end + end + + expected = "XXX.\nMinitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_without_block + assert_triggered "assert_raises requires a block to capture errors." do + @tc.assert_raises StandardError + end + end + + def test_assert_respond_to + @tc.assert_respond_to "blah", :empty? + end + + def test_assert_respond_to_triggered + assert_triggered 'Expected "blah" (String) to respond to #rawr!.' do + @tc.assert_respond_to "blah", :rawr! + end + end + + def test_assert_same + @assertion_count = 3 + + o = "blah" + @tc.assert_same 1, 1 + @tc.assert_same :blah, :blah + @tc.assert_same o, o + end + + def test_assert_same_triggered + @assertion_count = 2 + + assert_triggered "Expected 2 (oid=N) to be the same as 1 (oid=N)." do + @tc.assert_same 1, 2 + end + + s1 = "blah" + s2 = "blah" + + assert_triggered 'Expected "blah" (oid=N) to be the same as "blah" (oid=N).' do + @tc.assert_same s1, s2 + end + end + + def test_assert_send + assert_deprecated :assert_send do + @tc.assert_send [1, :<, 2] + end + end + + def test_assert_send_bad + assert_deprecated :assert_send do + assert_triggered "Expected 1.>(*[2]) to return true." do + @tc.assert_send [1, :>, 2] + end + end + end + + def test_assert_silent + @assertion_count = 2 + + @tc.assert_silent do + # do nothing + end + end + + def test_assert_silent_triggered_err + assert_triggered util_msg("", "blah blah", "In stderr") do + @tc.assert_silent do + $stderr.print "blah blah" + end + end + end + + def test_assert_silent_triggered_out + @assertion_count = 2 + + assert_triggered util_msg("", "blah blah", "In stdout") do + @tc.assert_silent do + print "blah blah" + end + end + end + + def test_assert_throws + @tc.assert_throws :blah do + throw :blah + end + end + + def test_assert_throws_argument_exception + @assertion_count = 0 + + assert_unexpected "ArgumentError" do + @tc.assert_throws :blah do + raise ArgumentError + end + end + end + + def test_assert_throws_different + assert_triggered "Expected :blah to have been thrown, not :not_blah." do + @tc.assert_throws :blah do + throw :not_blah + end + end + end + + def test_assert_throws_name_error + @assertion_count = 0 + + assert_unexpected "NameError" do + @tc.assert_throws :blah do + raise NameError + end + end + end + + def test_assert_throws_unthrown + assert_triggered "Expected :blah to have been thrown." do + @tc.assert_throws :blah do + # do nothing + end + end + end + + def test_assert_path_exists + @tc.assert_path_exists __FILE__ + end + + def test_assert_path_exists_triggered + assert_triggered "Expected path 'blah' to exist." do + @tc.assert_path_exists "blah" + end + end + + def test_capture_io + @assertion_count = 0 + + non_verbose do + out, err = capture_io do + puts "hi" + $stderr.puts "bye!" + end + + assert_equal "hi\n", out + assert_equal "bye!\n", err + end + end + + def test_capture_subprocess_io + @assertion_count = 0 + + non_verbose do + out, err = capture_subprocess_io do + system("echo hi") + system("echo bye! 1>&2") + end + + assert_equal "hi\n", out + assert_equal "bye!", err.strip + end + end + + def test_class_asserts_match_refutes + @assertion_count = 0 + + methods = Minitest::Assertions.public_instance_methods + methods.map!(&:to_s) if Symbol === methods.first + + # These don't have corresponding refutes _on purpose_. They're + # useless and will never be added, so don't bother. + ignores = %w[assert_output assert_raises assert_send + assert_silent assert_throws assert_mock] + + # These are test/unit methods. I'm not actually sure why they're still here + ignores += %w[assert_no_match assert_not_equal assert_not_nil + assert_not_same assert_nothing_raised + assert_nothing_thrown assert_raise] + + asserts = methods.grep(/^assert/).sort - ignores + refutes = methods.grep(/^refute/).sort - ignores + + assert_empty refutes.map { |n| n.sub(/^refute/, "assert") } - asserts + assert_empty asserts.map { |n| n.sub(/^assert/, "refute") } - refutes + end + + def test_delta_consistency + @assertion_count = 2 + + @tc.assert_in_delta 0, 1, 1 + + assert_triggered "Expected |0 - 1| (1) to not be <= 1." do + @tc.refute_in_delta 0, 1, 1 + end + end + + def test_epsilon_consistency + @assertion_count = 2 + + @tc.assert_in_epsilon 1.0, 1.001 + + msg = "Expected |1.0 - 1.001| (0.000999xxx) to not be <= 0.001." + assert_triggered msg do + @tc.refute_in_epsilon 1.0, 1.001 + end + end + + def assert_fail_after t + @tc.fail_after t.year, t.month, t.day, "remove the deprecations" + end + + def test_fail_after + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_silent do + assert_fail_after d1 + end + + assert_triggered "remove the deprecations" do + assert_fail_after d0 + end + end + + def test_flunk + assert_triggered "Epic Fail!" do + @tc.flunk + end + end + + def test_flunk_message + assert_triggered @zomg do + @tc.flunk @zomg + end + end + + def test_pass + @tc.pass + end + + def test_refute + @assertion_count = 2 + + @tc.assert_equal true, @tc.refute(false), "returns true on success" + end + + def test_refute_empty + @assertion_count = 2 + + @tc.refute_empty [1] + end + + def test_refute_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [] to not be empty." do + @tc.refute_empty [] + end + end + + def test_refute_equal + @tc.refute_equal "blah", "yay" + end + + def test_refute_equal_triggered + assert_triggered 'Expected "blah" to not be equal to "blah".' do + @tc.refute_equal "blah", "blah" + end + end + + def test_refute_in_delta + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.000001 + end + + def test_refute_in_delta_triggered + x = "0.1" + assert_triggered "Expected |0.0 - 0.001| (0.001) to not be <= #{x}." do + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.1 + end + end + + def test_refute_in_epsilon + @tc.refute_in_epsilon 10_000, 9990-1 + end + + def test_refute_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to not be <= 10.0." do + @tc.refute_in_epsilon 10_000, 9990 + flunk + end + end + + def test_refute_includes + @assertion_count = 2 + + @tc.refute_includes [true], false + end + + def test_refute_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.refute_includes [true], true + end + + expected = "Expected [true] to not include true." + assert_equal expected, e.message + end + + def test_refute_instance_of + @tc.refute_instance_of Array, "blah" + end + + def test_refute_instance_of_triggered + assert_triggered 'Expected "blah" to not be an instance of String.' do + @tc.refute_instance_of String, "blah" + end + end + + def test_refute_kind_of + @tc.refute_kind_of Array, "blah" + end + + def test_refute_kind_of_triggered + assert_triggered 'Expected "blah" to not be a kind of String.' do + @tc.refute_kind_of String, "blah" + end + end + + def test_refute_match + @assertion_count = 2 + @tc.refute_match(/\d+/, "blah blah blah") + end + + def test_refute_match_matcher_object + @assertion_count = 2 + pattern = Object.new + def pattern.=~ _; false end + @tc.refute_match pattern, 5 + end + + def test_refute_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to not match 5." do + @tc.refute_match pattern, 5 + end + end + + def test_refute_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\w+/ to not match "blah blah blah".' do + @tc.refute_match(/\w+/, "blah blah blah") + end + end + + def test_refute_nil + @tc.refute_nil 42 + end + + def test_refute_nil_triggered + assert_triggered "Expected nil to not be nil." do + @tc.refute_nil nil + end + end + + def test_refute_operator + @tc.refute_operator 2, :<, 1 + end + + def test_refute_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.refute_operator true, :equal?, bad + end + + def test_refute_operator_triggered + assert_triggered "Expected 2 to not be > 1." do + @tc.refute_operator 2, :>, 1 + end + end + + def test_refute_predicate + @tc.refute_predicate "42", :empty? + end + + def test_refute_predicate_triggered + assert_triggered 'Expected "" to not be empty?.' do + @tc.refute_predicate "", :empty? + end + end + + def test_refute_respond_to + @tc.refute_respond_to "blah", :rawr! + end + + def test_refute_respond_to_triggered + assert_triggered 'Expected "blah" to not respond to empty?.' do + @tc.refute_respond_to "blah", :empty? + end + end + + def test_refute_same + @tc.refute_same 1, 2 + end + + def test_refute_same_triggered + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + @tc.refute_same 1, 1 + end + end + + def test_refute_path_exists + @tc.refute_path_exists "blah" + end + + def test_refute_path_exists_triggered + assert_triggered "Expected path '#{__FILE__}' to not exist." do + @tc.refute_path_exists __FILE__ + end + end + + def test_skip + @assertion_count = 0 + + assert_triggered "haha!", Minitest::Skip do + @tc.skip "haha!" + end + end + + def assert_skip_until t, msg + @tc.skip_until t.year, t.month, t.day, msg + end + + def test_skip_until + @assertion_count = 0 + + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_output "", /Stale skip_until \"not yet\" at .*?:\d+$/ do + assert_skip_until d0, "not yet" + end + + assert_triggered "not ready yet", Minitest::Skip do + assert_skip_until d1, "not ready yet" + end + end + + def util_msg exp, act, msg = nil + s = "Expected: #{exp.inspect}\n Actual: #{act.inspect}" + s = "#{msg}.\n#{s}" if msg + s + end + + def without_diff + old_diff = Minitest::Assertions.diff + Minitest::Assertions.diff = nil + + yield + ensure + Minitest::Assertions.diff = old_diff + end +end + +class TestMinitestAssertionHelpers < Minitest::Test + def assert_mu_pp exp, input, raw = false + act = mu_pp input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def assert_mu_pp_for_diff exp, input, raw = false + act = mu_pp_for_diff input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def test_diff_equal + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + + assert_equal msg, diff(o1, o2) + end + + def test_diff_str_mixed + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1 +1 @@ + -"A\\n\nB" + +"A\n\\nB" + EOM + + exp = "A\\n\nB" + act = "A\n\\nB" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_multiline + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + "A + -B" + +C" + EOM + + exp = "A\nB" + act = "A\nC" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_simple + msg = <<-'EOM'.gsub(/^ {10}/, "").chomp # NOTE single quotes on heredoc + Expected: "A" + Actual: "B" + EOM + + exp = "A" + act = "B" + + assert_equal msg, diff(exp, act) + end + + def test_message + assert_equal "blah2.", message { "blah2" }.call + assert_equal "blah2.", message("") { "blah2" }.call + assert_equal "blah1.\nblah2.", message(:blah1) { "blah2" }.call + assert_equal "blah1.\nblah2.", message("blah1") { "blah2" }.call + + message = proc { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + + message = message { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + end + + def test_message_deferred + var = nil + + msg = message { var = "blah" } + + assert_nil var + + msg.call + + assert_equal "blah", var + end + + def test_mu_pp + assert_mu_pp 42.inspect, 42 + assert_mu_pp %w[a b c].inspect, %w[a b c] + assert_mu_pp "A B", "A B" + assert_mu_pp "A\\nB", "A\nB" + assert_mu_pp "A\\\\nB", 'A\nB' # notice single quotes + end + + def test_mu_pp_for_diff + assert_mu_pp_for_diff "#", Object.new + assert_mu_pp_for_diff "A B", "A B" + assert_mu_pp_for_diff [1, 2, 3].inspect, [1, 2, 3] + assert_mu_pp_for_diff "A\nB", "A\nB" + end + + def test_mu_pp_for_diff_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_bad_encoding_both + str = "\666A\\n\nB".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding_both + str = "A\\n\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_nerd + assert_mu_pp_for_diff "A\\nB\\\\nC", "A\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\nC", "\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\n", "\nB\\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\\\nB\\n", "\\nB\n" + assert_mu_pp_for_diff "\\\\nB\\nC", "\\nB\nC" + assert_mu_pp_for_diff "A\\\\n\\nB", "A\\n\nB" + assert_mu_pp_for_diff "A\\n\\\\nB", "A\n\\nB" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + end + + def test_mu_pp_for_diff_str_normal + assert_mu_pp_for_diff "", "" + assert_mu_pp_for_diff "A\\n\n", "A\\n" + assert_mu_pp_for_diff "A\\n\nB", "A\\nB" + assert_mu_pp_for_diff "A\n", "A\n" + assert_mu_pp_for_diff "A\nB", "A\nB" + assert_mu_pp_for_diff "\\n\n", "\\n" + assert_mu_pp_for_diff "\n", "\n" + assert_mu_pp_for_diff "\\n\nA", "\\nA" + assert_mu_pp_for_diff "\nA", "\nA" + end + + def test_mu_pp_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\nB\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_immutable + printer = Class.new { extend Minitest::Assertions } + str = "test".freeze + assert_equal '"test"', printer.mu_pp(str) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb new file mode 100644 index 0000000..88abf77 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb @@ -0,0 +1,137 @@ +require "minitest/autorun" +require "minitest/benchmark" + +## +# Used to verify data: +# http://www.wolframalpha.com/examples/RegressionAnalysis.html + +class TestMinitestBenchmark < Minitest::Test + def test_cls_bench_exp + assert_equal [2, 4, 8, 16, 32], Minitest::Benchmark.bench_exp(2, 32, 2) + end + + def test_cls_bench_linear + assert_equal [2, 4, 6, 8, 10], Minitest::Benchmark.bench_linear(2, 10, 2) + end + + def test_cls_runnable_methods + assert_equal [], Minitest::Benchmark.runnable_methods + + c = Class.new(Minitest::Benchmark) do + def bench_blah + end + end + + assert_equal ["bench_blah"], c.runnable_methods + end + + def test_cls_bench_range + assert_equal [1, 10, 100, 1_000, 10_000], Minitest::Benchmark.bench_range + end + + def test_fit_exponential_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 * Math.exp(2.1 * n) } + + assert_fit :exponential, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_exponential_noisy + x = [1.0, 1.9, 2.6, 3.4, 5.0] + y = [12, 10, 8.2, 6.9, 5.9] + + # verified with Numbers and R + assert_fit :exponential, x, y, 0.95, 13.81148, -0.1820 + end + + def test_fit_logarithmic_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 + 2.1 * Math.log(n) } + + assert_fit :logarithmic, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_logarithmic_noisy + x = [1.0, 2.0, 3.0, 4.0, 5.0] + # Generated with + # y = x.map { |n| jitter = 0.999 + 0.002 * rand; (Math.log(n) ) * jitter } + y = [0.0, 0.6935, 1.0995, 1.3873, 1.6097] + + assert_fit :logarithmic, x, y, 0.95, 0, 1 + end + + def test_fit_constant_clean + x = (1..5).to_a + y = [5.0, 5.0, 5.0, 5.0, 5.0] + + assert_fit :linear, x, y, nil, 5.0, 0 + end + + def test_fit_constant_noisy + x = (1..5).to_a + y = [1.0, 1.2, 1.0, 0.8, 1.0] + + # verified in numbers and R + assert_fit :linear, x, y, nil, 1.12, -0.04 + end + + def test_fit_linear_clean + # y = m * x + b where m = 2.2, b = 3.1 + x = (1..5).to_a + y = x.map { |n| 2.2 * n + 3.1 } + + assert_fit :linear, x, y, 1.0, 3.1, 2.2 + end + + def test_fit_linear_noisy + x = [ 60, 61, 62, 63, 65] + y = [3.1, 3.6, 3.8, 4.0, 4.1] + + # verified in numbers and R + assert_fit :linear, x, y, 0.8315, -7.9635, 0.1878 + end + + def test_fit_power_clean + # y = A x ** B, where B = b and A = e ** a + # if, A = 1, B = 2, then + + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = [1.0, 4.0, 9.0, 16.0, 25.0] + + assert_fit :power, x, y, 1.0, 1.0, 2.0 + end + + def test_fit_power_noisy + # from www.engr.uidaho.edu/thompson/courses/ME330/lecture/least_squares.html + x = [10, 12, 15, 17, 20, 22, 25, 27, 30, 32, 35] + y = [95, 105, 125, 141, 173, 200, 253, 298, 385, 459, 602] + + # verified in numbers + assert_fit :power, x, y, 0.90, 2.6217, 1.4556 + + # income to % of households below income amount + # http://library.wolfram.com/infocenter/Conferences/6461/PowerLaws.nb + x = [15_000, 25_000, 35_000, 50_000, 75_000, 100_000] + y = [0.154, 0.283, 0.402, 0.55, 0.733, 0.843] + + # verified in numbers + assert_fit :power, x, y, 0.96, 3.119e-5, 0.8959 + end + + def assert_fit msg, x, y, fit, exp_a, exp_b + bench = Minitest::Benchmark.new :blah + + a, b, rr = bench.send "fit_#{msg}", x, y + + assert_operator rr, :>=, fit if fit + assert_in_delta exp_a, a + assert_in_delta exp_b, b + end +end + +describe "my class Bench" do + klass = self + it "should provide bench methods" do + klass.must_respond_to :bench + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb new file mode 100644 index 0000000..7128d5b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb @@ -0,0 +1,872 @@ +require "minitest/autorun" + +class TestMinitestMock < Minitest::Test + parallelize_me! + + def setup + @mock = Minitest::Mock.new.expect(:foo, nil) + @mock.expect(:meaning_of_life, 42) + end + + def test_create_stub_method + assert_nil @mock.foo + end + + def test_allow_return_value_specification + assert_equal 42, @mock.meaning_of_life + end + + def test_blow_up_if_not_called + @mock.foo + + util_verify_bad "expected meaning_of_life() => 42" + end + + def test_not_blow_up_if_everything_called + @mock.foo + @mock.meaning_of_life + + assert_mock @mock + end + + def test_allow_expectations_to_be_added_after_creation + @mock.expect(:bar, true) + assert @mock.bar + end + + def test_not_verify_if_new_expected_method_is_not_called + @mock.foo + @mock.meaning_of_life + @mock.expect(:bar, true) + + util_verify_bad "expected bar() => true" + end + + def test_blow_up_on_wrong_number_of_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises ArgumentError do + @mock.sum + end + + assert_equal "mocked method :sum expects 2 arguments, got 0", e.message + end + + def test_return_mock_does_not_raise + retval = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, retval) + mock.foo + + assert_mock mock + end + + def test_mock_args_does_not_raise + arg = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, nil, [arg]) + mock.foo(arg) + + assert_mock mock + end + + def test_set_expectation_on_special_methods + mock = Minitest::Mock.new + + mock.expect :object_id, "received object_id" + assert_equal "received object_id", mock.object_id + + mock.expect :respond_to_missing?, "received respond_to_missing?" + assert_equal "received respond_to_missing?", mock.respond_to_missing? + + mock.expect :===, "received ===" + assert_equal "received ===", mock.=== + + mock.expect :inspect, "received inspect" + assert_equal "received inspect", mock.inspect + + mock.expect :to_s, "received to_s" + assert_equal "received to_s", mock.to_s + + mock.expect :public_send, "received public_send" + assert_equal "received public_send", mock.public_send + + mock.expect :send, "received send" + assert_equal "received send", mock.send + + assert_mock mock + end + + def test_expectations_can_be_satisfied_via_send + @mock.send :foo + @mock.send :meaning_of_life + + assert_mock @mock + end + + def test_expectations_can_be_satisfied_via_public_send + skip "Doesn't run on 1.8" if RUBY_VERSION < "1.9" + + @mock.public_send :foo + @mock.public_send :meaning_of_life + + assert_mock @mock + end + + def test_blow_up_on_wrong_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises MockExpectationError do + @mock.sum(2, 4) + end + + exp = "mocked method :sum called with unexpected arguments [2, 4]" + assert_equal exp, e.message + end + + def test_expect_with_non_array_args + e = assert_raises ArgumentError do + @mock.expect :blah, 3, false + end + + assert_equal "args must be an array", e.message + end + + def test_respond_appropriately + assert @mock.respond_to?(:foo) + assert @mock.respond_to?(:foo, true) + assert @mock.respond_to?("foo") + assert !@mock.respond_to?(:bar) + end + + def test_no_method_error_on_unexpected_methods + e = assert_raises NoMethodError do + @mock.bar + end + + expected = "unmocked method :bar, expected one of [:foo, :meaning_of_life]" + + assert_equal expected, e.message + end + + def test_assign_per_mock_return_values + a = Minitest::Mock.new + b = Minitest::Mock.new + + a.expect(:foo, :a) + b.expect(:foo, :b) + + assert_equal :a, a.foo + assert_equal :b, b.foo + end + + def test_do_not_create_stub_method_on_new_mocks + a = Minitest::Mock.new + a.expect(:foo, :a) + + assert !Minitest::Mock.new.respond_to?(:foo) + end + + def test_mock_is_a_blank_slate + @mock.expect :kind_of?, true, [String] + @mock.expect :==, true, [1] + + assert @mock.kind_of?(String), "didn't mock :kind_of\?" + assert @mock == 1, "didn't mock :==" + end + + def test_verify_allows_called_args_to_be_loosely_specified + mock = Minitest::Mock.new + mock.expect :loose_expectation, true, [Integer] + mock.loose_expectation 1 + + assert_mock mock + end + + def test_verify_raises_with_strict_args + mock = Minitest::Mock.new + mock.expect :strict_expectation, true, [2] + + e = assert_raises MockExpectationError do + mock.strict_expectation 1 + end + + exp = "mocked method :strict_expectation called with unexpected arguments [1]" + assert_equal exp, e.message + end + + def test_method_missing_empty + mock = Minitest::Mock.new + + mock.expect :a, nil + + mock.a + + e = assert_raises MockExpectationError do + mock.a + end + + assert_equal "No more expects available for :a: []", e.message + end + + def test_same_method_expects_are_verified_when_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + mock.foo :baz + + assert_mock mock + end + + def test_same_method_expects_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:baz) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_same_method_expects_with_same_args_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:bar] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:bar) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_verify_passes_when_mock_block_returns_true + mock = Minitest::Mock.new + mock.expect :foo, nil do + true + end + + mock.foo + + assert_mock mock + end + + def test_mock_block_is_passed_function_params + arg1, arg2, arg3 = :bar, [1, 2, 3], { :a => "a" } + mock = Minitest::Mock.new + mock.expect :foo, nil do |a1, a2, a3| + a1 == arg1 && a2 == arg2 && a3 == arg3 + end + + mock.foo arg1, arg2, arg3 + + assert_mock mock + end + + def test_mock_block_is_passed_function_block + mock = Minitest::Mock.new + block = proc { "bar" } + mock.expect :foo, nil do |arg, &blk| + arg == "foo" && + blk == block + end + mock.foo "foo", &block + assert_mock mock + end + + def test_verify_fails_when_mock_block_returns_false + mock = Minitest::Mock.new + mock.expect :foo, nil do + false + end + + e = assert_raises(MockExpectationError) { mock.foo } + exp = "mocked method :foo failed block w/ []" + + assert_equal exp, e.message + end + + def test_mock_block_throws_if_args_passed + mock = Minitest::Mock.new + + e = assert_raises(ArgumentError) do + mock.expect :foo, nil, [:a, :b, :c] do + true + end + end + + exp = "args ignored when block given" + + assert_equal exp, e.message + end + + def test_mock_returns_retval_when_called_with_block + mock = Minitest::Mock.new + mock.expect(:foo, 32) do + true + end + + rs = mock.foo + + assert_equal rs, 32 + end + + def util_verify_bad exp + e = assert_raises MockExpectationError do + @mock.verify + end + + assert_equal exp, e.message + end + + def test_mock_called_via_send + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.send :foo + assert_mock mock + end + + def test_mock_called_via___send__ + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.__send__ :foo + assert_mock mock + end + + def test_mock_called_via_send_with_args + mock = Minitest::Mock.new + mock.expect(:foo, true, [1, 2, 3]) + + mock.send(:foo, 1, 2, 3) + assert_mock mock + end + +end + +require "minitest/metametameta" + +class TestMinitestStub < Minitest::Test + # Do not parallelize since we're calling stub on class methods + + def setup + super + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @assertion_count = 1 + end + + def teardown + super + assert_equal @assertion_count, @tc.assertions if self.passed? + end + + class Time + def self.now + 24 + end + end + + def assert_stub val_or_callable + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, val_or_callable do + @tc.assert_equal 42, Time.now + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_private_module_method + @assertion_count += 1 + + t0 = Time.now + + self.stub :sleep, nil do + @tc.assert_nil sleep(10) + end + + @tc.assert_operator Time.now - t0, :<=, 1 + end + + def test_stub_private_module_method_indirect + @assertion_count += 1 + + fail_clapper = Class.new do + def fail_clap + raise + :clap + end + end.new + + fail_clapper.stub :raise, nil do |safe_clapper| + @tc.assert_equal :clap, safe_clapper.fail_clap # either form works + @tc.assert_equal :clap, fail_clapper.fail_clap # yay closures + end + end + + def test_stub_public_module_method + Math.stub :log10, :stubbed do + @tc.assert_equal :stubbed, Math.log10(1000) + end + end + + def test_stub_value + assert_stub 42 + end + + def test_stub_block + assert_stub lambda { 42 } + end + + def test_stub_block_args + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, lambda { |n| n * 2 } do + @tc.assert_equal 42, Time.now(21) + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_callable + obj = Object.new + + def obj.call + 42 + end + + assert_stub obj + end + + def test_stub_yield_self + obj = "foo" + + val = obj.stub :to_s, "bar" do |s| + s.to_s + end + + @tc.assert_equal "bar", val + end + + def test_dynamic_method + @assertion_count = 2 + + dynamic = Class.new do + def self.respond_to? meth + meth == :found + end + + def self.method_missing meth, *args, &block + if meth == :found + false + else + super + end + end + end + + val = dynamic.stub(:found, true) do |s| + s.found + end + + @tc.assert_equal true, val + @tc.assert_equal false, dynamic.found + end + + def test_stub_NameError + e = @tc.assert_raises NameError do + Time.stub :nope_nope_nope, 42 do + # do nothing + end + end + + exp = /undefined method `nope_nope_nope' for( class)? `#{self.class}::Time'/ + assert_match exp, e.message + end + + def test_mock_with_yield + mock = Minitest::Mock.new + mock.expect(:write, true) do + true + end + rs = nil + + File.stub :open, true, mock do + File.open "foo.txt", "r" do |f| + rs = f.write + end + end + @tc.assert_equal true, rs + end + + alias test_stub_value__old test_stub_value # TODO: remove/rename + + ## Permutation Sets: + + # [:value, :lambda] + # [:*, :block, :block_call] + # [:**, :block_args] + # + # Where: + # + # :value = a normal value + # :lambda = callable or lambda + # :* = no block + # :block = normal block + # :block_call = :lambda invokes the block (N/A for :value) + # :** = no args + # :args = args passed to stub + + ## Permutations + + # [:call, :*, :**] =>5 callable+block FIX: CALL BOTH (bug) + # [:call, :*, :**] =>6 callable + + # [:lambda, :*, :**] => lambda result + + # [:lambda, :*, :args] => lambda result NO ARGS + + # [:lambda, :block, :**] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :**] =>6 lambda result + + # [:lambda, :block, :args] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :args] =>6 lambda result + # [:lambda, :block, :args] =>7 raise ArgumentError + + # [:lambda, :block_call, :**] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :**] =>6 lambda+block result + + # [:lambda, :block_call, :args] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :args] =>6 lambda+block result + + # [:value, :*, :**] => value + + # [:value, :*, :args] => value, ignore args + + # [:value, :block, :**] =>5 value, call block + # [:value, :block, :**] =>6 value + + # [:value, :block, :args] =>5 value, call block w/ args + # [:value, :block, :args] =>6 value, call block w/ args, deprecated + # [:value, :block, :args] =>7 raise ArgumentError + + # [:value, :block_call, :**] => N/A + + # [:value, :block_call, :args] => N/A + + class Bar + def call + puts "hi" + end + end + + class Foo + def self.blocking + yield + end + end + + class Thingy + def self.identity arg + arg + end + end + + def test_stub_callable_block_5 # from tenderlove + @assertion_count += 1 + Foo.stub5 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_callable_block_6 # from tenderlove + skip_stub6 + + @assertion_count += 1 + Foo.stub6 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_lambda + Thread.stub :new, lambda { 21+21 } do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_args + Thread.stub :new, lambda { 21+21 }, :wtf do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_block_5 + Thread.stub5 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_6 + skip_stub6 + + Thread.stub6 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_5 + @assertion_count += 1 + Thingy.stub5 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6 + skip_stub6 + + @assertion_count += 1 + Thingy.stub6 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6_2 + skip_stub6 + + @tc.assert_raises ArgumentError do + Thingy.stub6_2 :identity, lambda { |y| :__not_run__ }, :WTF? do + # doesn't matter + end + end + end + + def test_stub_lambda_block_call_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, lambda { |p, m, &blk| blk and blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f && f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6 :open, lambda { |p, m, &blk| blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5(:open, lambda { |p, m, &blk| blk and blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def test_stub_value + Thread.stub :new, 42 do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_args + Thread.stub :new, 42, :WTF? do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_5 + @assertion_count += 1 + Thread.stub5 :new, 42 do + result = Thread.new do + @tc.assert true + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_6 + skip_stub6 + + Thread.stub6 :new, 42 do + result = Thread.new do + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_args_5 + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_5__break_if_not_passed + e = @tc.assert_raises NoMethodError do + File.stub5 :open, :return_value do # intentionally bad setup w/ no args + File.open "foo.txt", "r" do |f| + f.write "woot" + end + end + end + exp = "undefined method `write' for nil:NilClass" + assert_equal exp, e.message + end + + def test_stub_value_block_args_6 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + assert_deprecated do + File.stub6 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal :value, result + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def assert_deprecated re = /deprecated/ + assert_output "", re do + yield + end + end + + def skip_stub6 + skip "not yet" unless STUB6 + end +end + +STUB6 = ENV["STUB6"] + +if STUB6 then + require "minitest/mock6" if STUB6 +else + class Object + alias stub5 stub + alias stub6 stub + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb new file mode 100644 index 0000000..6619d03 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb @@ -0,0 +1,299 @@ +require "minitest/autorun" +require "minitest/metametameta" + +class Runnable + def woot + assert true + end +end + +class TestMinitestReporter < MetaMetaMetaTestCase + + attr_accessor :r, :io + + def new_composite_reporter + reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(self.io) + reporter << Minitest::ProgressReporter.new(self.io) + + def reporter.first + reporters.first + end + + def reporter.results + first.results + end + + def reporter.count + first.count + end + + def reporter.assertions + first.assertions + end + + reporter + end + + def setup + self.io = StringIO.new("") + self.r = new_composite_reporter + end + + def error_test + unless defined? @et then + @et = Minitest::Test.new(:woot) + @et.failures << Minitest::UnexpectedError.new(begin + raise "no" + rescue => e + e + end) + @et = Minitest::Result.from @et + end + @et + end + + def fail_test + unless defined? @ft then + @ft = Minitest::Test.new(:woot) + @ft.failures << begin + raise Minitest::Assertion, "boo" + rescue Minitest::Assertion => e + e + end + @ft = Minitest::Result.from @ft + end + @ft + end + + def passing_test + @pt ||= Minitest::Result.from Minitest::Test.new(:woot) + end + + def skip_test + unless defined? @st then + @st = Minitest::Test.new(:woot) + @st.failures << begin + raise Minitest::Skip + rescue Minitest::Assertion => e + e + end + @st = Minitest::Result.from @st + end + @st + end + + def test_to_s + r.record passing_test + r.record fail_test + assert_match "woot", r.first.to_s + end + + def test_passed_eh_empty + assert_predicate r, :passed? + end + + def test_passed_eh_failure + r.results << fail_test + + refute_predicate r, :passed? + end + + SKIP_MSG = "\n\nYou have skipped tests. Run with --verbose for details." + + def test_passed_eh_error + r.start + + r.results << error_test + + refute_predicate r, :passed? + + r.report + + refute_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped + r.start + r.results << skip_test + assert r.passed? + + restore_env do + r.report + end + + assert_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped_verbose + r.first.options[:verbose] = true + + r.start + r.results << skip_test + assert r.passed? + r.report + + refute_match SKIP_MSG, io.string + end + + def test_start + r.start + + exp = "Run options: \n\n# Running:\n\n" + + assert_equal exp, io.string + end + + def test_record_pass + r.record passing_test + + assert_equal ".", io.string + assert_empty r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_fail + fail_test = self.fail_test + r.record fail_test + + assert_equal "F", io.string + assert_equal [fail_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_error + error_test = self.error_test + r.record error_test + + assert_equal "E", io.string + assert_equal [error_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_skip + skip_test = self.skip_test + r.record skip_test + + assert_equal "S", io.string + assert_equal [skip_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_report_empty + r.start + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_passing + r.start + r.record passing_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + . + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_failure + r.start + r.record fail_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + F + + Finished in 0.00 + + 1) Failure: + Minitest::Test#woot [FILE:LINE]: + boo + + 1 runs, 0 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_error + r.start + r.record error_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + E + + Finished in 0.00 + + 1) Error: + Minitest::Test#woot: + RuntimeError: no + FILE:LINE:in `error_test' + FILE:LINE:in `test_report_error' + + 1 runs, 0 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_skipped + r.start + r.record skip_test + + restore_env do + r.report + end + + exp = clean <<-EOM + Run options: + + # Running: + + S + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + assert_equal exp, normalize_output(io.string) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb new file mode 100644 index 0000000..fe4710d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb @@ -0,0 +1,1061 @@ +# encoding: utf-8 +require "minitest/metametameta" +require "stringio" + +class MiniSpecA < Minitest::Spec; end +class MiniSpecB < Minitest::Test; extend Minitest::Spec::DSL; end +class MiniSpecC < MiniSpecB; end +class NamedExampleA < MiniSpecA; end +class NamedExampleB < MiniSpecB; end +class NamedExampleC < MiniSpecC; end +class ExampleA; end +class ExampleB < ExampleA; end + +describe Minitest::Spec do + # helps to deal with 2.4 deprecation of Fixnum for Integer + Int = 1.class + + # do not parallelize this suite... it just can"t handle it. + + def assert_triggered expected = "blah", klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises(klass) do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + msg.gsub!(/:0x[Xa-fA-F0-9]{4,}[ @].+?>/, ":0xXXXXXX@PATH>") + + if expected + @assertion_count += 1 + case expected + when String then + assert_equal expected, msg + when Regexp then + @assertion_count += 1 + assert_match expected, msg + else + flunk "Unknown: #{expected.inspect}" + end + end + end + + def assert_success spec + assert_equal true, spec + end + + before do + @assertion_count = 4 + end + + after do + _(self.assertions).must_equal @assertion_count if passed? and not skipped? + end + + it "needs to be able to catch a Minitest::Assertion exception" do + @assertion_count = 1 + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + end + + it "needs to check for file existence" do + @assertion_count = 3 + + assert_success _(__FILE__).path_must_exist + + assert_triggered "Expected path 'blah' to exist." do + _("blah").path_must_exist + end + end + + it "needs to check for file non-existence" do + @assertion_count = 3 + + assert_success _("blah").path_wont_exist + + assert_triggered "Expected path '#{__FILE__}' to not exist." do + _(__FILE__).path_wont_exist + end + end + + it "needs to be sensible about must_include order" do + @assertion_count += 3 # must_include is 2 assertions + + assert_success _([1, 2, 3]).must_include(2) + + assert_triggered "Expected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5, "msg" + end + end + + it "needs to be sensible about wont_include order" do + @assertion_count += 3 # wont_include is 2 assertions + + assert_success _([1, 2, 3]).wont_include(5) + + assert_triggered "Expected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2, "msg" + end + end + + it "needs to catch an expected exception" do + @assertion_count = 2 + + expect { raise "blah" }.must_raise RuntimeError + expect { raise Minitest::Assertion }.must_raise Minitest::Assertion + end + + it "needs to catch an unexpected exception" do + @assertion_count -= 2 # no positive + + msg = <<-EOM.gsub(/^ {6}/, "").chomp + [RuntimeError] exception expected, not + Class: + Message: <"woot"> + ---Backtrace--- + EOM + + assert_triggered msg do + expect { raise StandardError, "woot" }.must_raise RuntimeError + end + + assert_triggered "msg.\n#{msg}" do + expect { raise StandardError, "woot" }.must_raise RuntimeError, "msg" + end + end + + it "needs to ensure silence" do + @assertion_count -= 1 # no msg + @assertion_count += 2 # assert_output is 2 assertions + + assert_success expect {}.must_be_silent + + assert_triggered "In stdout.\nExpected: \"\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_be_silent + end + end + + it "needs to have all methods named well" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count = 2 + + methods = Minitest::Expectations.public_instance_methods.grep(/must|wont/) + methods.map!(&:to_s) if Symbol === methods.first + + musts, wonts = methods.sort.partition { |m| m =~ /must/ } + + expected_musts = %w[must_be + must_be_close_to + must_be_empty + must_be_instance_of + must_be_kind_of + must_be_nil + must_be_same_as + must_be_silent + must_be_within_delta + must_be_within_epsilon + must_equal + must_include + must_match + must_output + must_raise + must_respond_to + must_throw + path_must_exist] + + bad = %w[not raise throw send output be_silent] + + expected_wonts = expected_musts.map { |m| m.sub(/must/, "wont") }.sort + expected_wonts.reject! { |m| m =~ /wont_#{Regexp.union(*bad)}/ } + + _(musts).must_equal expected_musts + _(wonts).must_equal expected_wonts + end + + it "needs to raise if an expected exception is not raised" do + @assertion_count -= 2 # no positive test + + assert_triggered "RuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError + end + + assert_triggered "msg.\nRuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError, "msg" + end + end + + it "needs to verify binary messages" do + assert_success _(42).wont_be(:<, 24) + + assert_triggered "Expected 24 to not be < 42." do + _(24).wont_be :<, 42 + end + + assert_triggered "msg.\nExpected 24 to not be < 42." do + _(24).wont_be :<, 42, "msg" + end + end + + it "needs to verify emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _([]).must_be_empty + + assert_triggered "Expected [42] to be empty." do + _([42]).must_be_empty + end + + assert_triggered "msg.\nExpected [42] to be empty." do + _([42]).must_be_empty "msg" + end + end + + it "needs to verify equality" do + @assertion_count += 1 + + assert_success _(6 * 7).must_equal(42) + + assert_triggered "Expected: 42\n Actual: 54" do + _(6 * 9).must_equal 42 + end + + assert_triggered "msg.\nExpected: 42\n Actual: 54" do + _(6 * 9).must_equal 42, "msg" + end + + assert_triggered(/^-42\n\+#\n/) do + _(proc { 42 }).must_equal 42 # proc isn't called, so expectation fails + end + end + + it "needs to warn on equality with nil" do + @assertion_count += 1 # extra test + + out, err = capture_io do + assert_success _(nil).must_equal(nil) + end + + exp = "DEPRECATED: Use assert_nil if expecting nil from #{__FILE__}:#{__LINE__-3}. " \ + "This will fail in Minitest 6.\n" + exp = "" if $-w.nil? + + assert_empty out + assert_equal exp, err + end + + it "needs to verify floats outside a delta" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_close_to(42) + + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= 0.001." do + _(6 * 7.0).wont_be_close_to 42 + end + + x = "1.0e-05" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001, "msg" + end + end + + it "needs to verify floats outside an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_within_epsilon(42) + + x = "0.042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42 + end + + x = "0.00042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001, "msg" + end + end + + it "needs to verify floats within a delta" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_close_to(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.001." do + _(1.0 / 100).must_be_close_to 0.0 + end + + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001, "msg" + end + end + + it "needs to verify floats within an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_within_epsilon(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.0." do + _(1.0 / 100).must_be_within_epsilon 0.0 + end + + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001, "msg" + end + end + + it "needs to verify identity" do + assert_success _(1).must_be_same_as(1) + + assert_triggered "Expected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2, "msg" + end + end + + it "needs to verify inequality" do + @assertion_count += 2 + assert_success _(42).wont_equal(6 * 9) + assert_success _(proc {}).wont_equal(42) + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + + assert_triggered "msg.\nExpected 1 to not be equal to 1." do + _(1).wont_equal 1, "msg" + end + end + + it "needs to verify instances of a class" do + assert_success _(42).wont_be_instance_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be an instance of #{Int.name}." do + _(42).wont_be_instance_of Int, "msg" + end + end + + it "needs to verify kinds of a class" do + @assertion_count += 2 + + assert_success _(42).wont_be_kind_of(String) + assert_success _(proc {}).wont_be_kind_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int, "msg" + end + end + + it "needs to verify kinds of objects" do + @assertion_count += 3 # extra test + + assert_success _(6 * 7).must_be_kind_of(Int) + assert_success _(6 * 7).must_be_kind_of(Numeric) + + assert_triggered "Expected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String + end + + assert_triggered "msg.\nExpected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String, "msg" + end + + exp = "Expected # to be a kind of String, not Proc." + assert_triggered exp do + _(proc {}).must_be_kind_of String + end + end + + it "needs to verify mismatch" do + @assertion_count += 3 # match is 2 + + assert_success _("blah").wont_match(/\d+/) + + assert_triggered "Expected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/) + end + + assert_triggered "msg.\nExpected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/, "msg") + end + end + + it "needs to verify nil" do + assert_success _(nil).must_be_nil + + assert_triggered "Expected 42 to be nil." do + _(42).must_be_nil + end + + assert_triggered "msg.\nExpected 42 to be nil." do + _(42).must_be_nil "msg" + end + end + + it "needs to verify non-emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _(["some item"]).wont_be_empty + + assert_triggered "Expected [] to not be empty." do + _([]).wont_be_empty + end + + assert_triggered "msg.\nExpected [] to not be empty." do + _([]).wont_be_empty "msg" + end + end + + it "needs to verify non-identity" do + assert_success _(1).wont_be_same_as(2) + + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1, "msg" + end + end + + it "needs to verify non-nil" do + assert_success _(42).wont_be_nil + + assert_triggered "Expected nil to not be nil." do + _(nil).wont_be_nil + end + + assert_triggered "msg.\nExpected nil to not be nil." do + _(nil).wont_be_nil "msg" + end + end + + it "needs to verify objects not responding to a message" do + assert_success _("").wont_respond_to(:woot!) + + assert_triggered "Expected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s + end + + assert_triggered "msg.\nExpected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s, "msg" + end + end + + it "needs to verify output in stderr" do + @assertion_count -= 1 # no msg + + assert_success expect { $stderr.print "blah" }.must_output(nil, "blah") + + assert_triggered "In stderr.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { $stderr.print "xxx" }.must_output(nil, "blah") + end + end + + it "needs to verify output in stdout" do + @assertion_count -= 1 # no msg + + assert_success expect { print "blah" }.must_output("blah") + + assert_triggered "In stdout.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_output("blah") + end + end + + it "needs to verify regexp matches" do + @assertion_count += 3 # must_match is 2 assertions + + assert_success _("blah").must_match(/\w+/) + + assert_triggered "Expected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/) + end + + assert_triggered "msg.\nExpected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/, "msg") + end + end + + describe "expect" do + before do + @assertion_count -= 3 + end + + it "can use expect" do + _(1 + 1).must_equal 2 + end + + it "can use expect with a lambda" do + _ { raise "blah" }.must_raise RuntimeError + end + + it "can use expect in a thread" do + Thread.new { _(1 + 1).must_equal 2 }.join + end + + it "can NOT use must_equal in a thread. It must use expect in a thread" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + assert_raises RuntimeError do + capture_io do + Thread.new { (1 + 1).must_equal 2 }.join + end + end + end + + it "fails gracefully when expectation used outside of `it`" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 1 + + e = assert_raises RuntimeError do + capture_io do + Thread.new { # forces ctx to be nil + describe("woot") do + (1 + 1).must_equal 2 + end + }.join + end + end + + assert_equal "Calling #must_equal outside of test.", e.message + end + + it "deprecates expectation used without _" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + + # https://github.com/seattlerb/minitest/issues/837 + # https://github.com/rails/rails/pull/39304 + it "deprecates expectation used without _ with empty backtrace_filter" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + with_empty_backtrace_filter do + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + end + end + + it "needs to verify throw" do + @assertion_count += 2 # 2 extra tests + + assert_success expect { throw :blah }.must_throw(:blah) + + assert_triggered "Expected :blah to have been thrown." do + expect {}.must_throw :blah + end + + assert_triggered "Expected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah + end + + assert_triggered "msg.\nExpected :blah to have been thrown." do + expect {}.must_throw :blah, "msg" + end + + assert_triggered "msg.\nExpected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah, "msg" + end + end + + it "needs to verify types of objects" do + assert_success _(6 * 7).must_be_instance_of(Int) + + exp = "Expected 42 to be an instance of String, not #{Int.name}." + + assert_triggered exp do + _(6 * 7).must_be_instance_of String + end + + assert_triggered "msg.\n#{exp}" do + _(6 * 7).must_be_instance_of String, "msg" + end + end + + it "needs to verify using any (negative) predicate" do + @assertion_count -= 1 # doesn"t take a message + + assert_success _("blah").wont_be(:empty?) + + assert_triggered "Expected \"\" to not be empty?." do + _("").wont_be :empty? + end + end + + it "needs to verify using any binary operator" do + @assertion_count -= 1 # no msg + + assert_success _(41).must_be(:<, 42) + + assert_triggered "Expected 42 to be < 41." do + _(42).must_be(:<, 41) + end + end + + it "needs to verify using any predicate" do + @assertion_count -= 1 # no msg + + assert_success _("").must_be(:empty?) + + assert_triggered "Expected \"blah\" to be empty?." do + _("blah").must_be :empty? + end + end + + it "needs to verify using respond_to" do + assert_success _(42).must_respond_to(:+) + + assert_triggered "Expected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear + end + + assert_triggered "msg.\nExpected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear, "msg" + end + end +end + +describe Minitest::Spec, :let do + i_suck_and_my_tests_are_order_dependent! + + def _count + $let_count ||= 0 + end + + let :count do + $let_count += 1 + $let_count + end + + it "is evaluated once per example" do + _(_count).must_equal 0 + + _(count).must_equal 1 + _(count).must_equal 1 + + _(_count).must_equal 1 + end + + it "is REALLY evaluated once per example" do + _(_count).must_equal 1 + + _(count).must_equal 2 + _(count).must_equal 2 + + _(_count).must_equal 2 + end + + it 'raises an error if the name begins with "test"' do + expect { self.class.let(:test_value) { true } }.must_raise ArgumentError + end + + it "raises an error if the name shadows a normal instance method" do + expect { self.class.let(:message) { true } }.must_raise ArgumentError + end + + it "doesn't raise an error if it is just another let" do + v = proc do + describe :outer do + let(:bar) + describe :inner do + let(:bar) + end + end + :good + end.call + _(v).must_equal :good + end + + it "procs come after dont_flip" do + p = proc {} + assert_respond_to p, :call + _(p).must_respond_to :call + end +end + +describe Minitest::Spec, :subject do + attr_reader :subject_evaluation_count + + subject do + @subject_evaluation_count ||= 0 + @subject_evaluation_count += 1 + @subject_evaluation_count + end + + it "is evaluated once per example" do + _(subject).must_equal 1 + _(subject).must_equal 1 + _(subject_evaluation_count).must_equal 1 + end +end + +class TestMetaStatic < Minitest::Test + def test_children + Minitest::Spec.children.clear # prevents parallel run + + y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + it "top-level-it" do end + + z = describe "second thingy" do end + end + + assert_equal [x], Minitest::Spec.children + assert_equal [y, z], x.children + assert_equal [], y.children + assert_equal [], z.children + end + + def test_it_wont_remove_existing_child_test_methods + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do + it do + assert true + end + end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 1, inner.public_instance_methods.grep(/^test_/).count + end + + def test_it_wont_add_test_methods_to_children + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 0, inner.public_instance_methods.grep(/^test_/).count + end +end + +class TestMeta < MetaMetaMetaTestCase + # do not call parallelize_me! here because specs use register_spec_type globally + + def util_structure + y = z = nil + before_list = [] + after_list = [] + x = describe "top-level thingy" do + before { before_list << 1 } + after { after_list << 1 } + + it "top-level-it" do end + + y = describe "inner thingy" do + before { before_list << 2 } + after { after_list << 2 } + it "inner-it" do end + + z = describe "very inner thingy" do + before { before_list << 3 } + after { after_list << 3 } + it "inner-it" do end + + it { } # ignore me + specify { } # anonymous it + end + end + end + + return x, y, z, before_list, after_list + end + + def test_register_spec_type + original_types = Minitest::Spec::TYPES.dup + + assert_includes Minitest::Spec::TYPES, [//, Minitest::Spec] + + Minitest::Spec.register_spec_type(/woot/, TestMeta) + + p = lambda do |_| true end + Minitest::Spec.register_spec_type TestMeta, &p + + keys = Minitest::Spec::TYPES.map(&:first) + + assert_includes keys, /woot/ + assert_includes keys, p + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_spec_type + original_types = Minitest::Spec::TYPES.dup + + Minitest::Spec.register_spec_type(/A$/, MiniSpecA) + Minitest::Spec.register_spec_type MiniSpecB do |desc| + desc.superclass == ExampleA + end + Minitest::Spec.register_spec_type MiniSpecC do |_desc, *addl| + addl.include? :woot + end + + assert_equal MiniSpecA, Minitest::Spec.spec_type(ExampleA) + assert_equal MiniSpecB, Minitest::Spec.spec_type(ExampleB) + assert_equal MiniSpecC, Minitest::Spec.spec_type(ExampleB, :woot) + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_bug_dsl_expectations + spec_class = Class.new MiniSpecB do + it "should work" do + _(0).must_equal 0 + end + end + + test_name = spec_class.instance_methods.sort.grep(/test/).first + + spec = spec_class.new test_name + + result = spec.run + + assert spec.passed? + assert result.passed? + assert_equal 1, result.assertions + end + + def test_name + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + spec_c = describe ExampleB, :random_method, :addl_context do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + assert_equal "ExampleB::random_method::addl_context", spec_c.name + end + + def test_name2 + assert_equal "NamedExampleA", NamedExampleA.name + assert_equal "NamedExampleB", NamedExampleB.name + assert_equal "NamedExampleC", NamedExampleC.name + + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + end + + def test_structure + x, y, z, * = util_structure + + assert_equal "top-level thingy", x.to_s + assert_equal "top-level thingy::inner thingy", y.to_s + assert_equal "top-level thingy::inner thingy::very inner thingy", z.to_s + + assert_equal "top-level thingy", x.desc + assert_equal "inner thingy", y.desc + assert_equal "very inner thingy", z.desc + + top_methods = %w[setup teardown test_0001_top-level-it] + inner_methods1 = %w[setup teardown test_0001_inner-it] + inner_methods2 = inner_methods1 + + %w[test_0002_anonymous test_0003_anonymous] + + assert_equal top_methods, x.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods1, y.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods2, z.instance_methods(false).sort.map(&:to_s) + end + + def test_structure_postfix_it + z = nil + y = describe "outer" do + # NOT here, below the inner-describe! + # it "inner-it" do end + + z = describe "inner" do + it "inner-it" do end + end + + # defined AFTER inner describe means we'll try to wipe out the inner-it + it "inner-it" do end + end + + assert_equal %w[test_0001_inner-it], y.instance_methods(false).map(&:to_s) + assert_equal %w[test_0001_inner-it], z.instance_methods(false).map(&:to_s) + end + + def test_setup_teardown_behavior + _, _, z, before_list, after_list = util_structure + + @tu = z + + run_tu_with_fresh_reporter + + size = z.runnable_methods.size + assert_equal [1, 2, 3] * size, before_list + assert_equal [3, 2, 1] * size, after_list + end + + def test_describe_first_structure + x1 = x2 = y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + x1 = it "top level it" do end + x2 = it "не латинские &いった α, β, γ, δ, ε hello!!! world" do end + + z = describe "second thingy" do end + end + + test_methods = ["test_0001_top level it", + "test_0002_не латинские &いった α, β, γ, δ, ε hello!!! world", + ].sort + + assert_equal test_methods, [x1, x2] + assert_equal test_methods, x.instance_methods.grep(/^test/).map(&:to_s).sort + assert_equal [], y.instance_methods.grep(/^test/) + assert_equal [], z.instance_methods.grep(/^test/) + end + + def test_structure_subclasses + z = nil + x = Class.new Minitest::Spec do + def xyz; end + end + y = Class.new x do + z = describe("inner") { } + end + + assert_respond_to x.new(nil), "xyz" + assert_respond_to y.new(nil), "xyz" + assert_respond_to z.new(nil), "xyz" + end +end + +class TestSpecInTestCase < MetaMetaMetaTestCase + def setup + super + + Thread.current[:current_spec] = self + @tc = self + @assertion_count = 2 + end + + def assert_triggered expected, klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, "\1") + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + + assert_equal expected, msg + end + + def teardown + msg = "expected #{@assertion_count} assertions, not #{@tc.assertions}" + assert_equal @assertion_count, @tc.assertions, msg + end + + def test_expectation + @tc.assert_equal true, _(1).must_equal(1) + end + + def test_expectation_triggered + assert_triggered "Expected: 2\n Actual: 1" do + _(1).must_equal 2 + end + end + + include Minitest::Spec::DSL::InstanceMethods + + def test_expectation_with_a_message + assert_triggered "woot.\nExpected: 2\n Actual: 1" do + _(1).must_equal 2, "woot" + end + end +end + +class ValueMonadTest < Minitest::Test + attr_accessor :struct + + def setup + @struct = { :_ => "a", :value => "b", :expect => "c" } + def @struct.method_missing k # think openstruct + self[k] + end + end + + def test_value_monad_method + assert_equal "a", struct._ + end + + def test_value_monad_value_alias + assert_equal "b", struct.value + end + + def test_value_monad_expect_alias + assert_equal "c", struct.expect + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb new file mode 100644 index 0000000..89ba812 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb @@ -0,0 +1,1084 @@ +# encoding: UTF-8 + +require "pathname" +require "minitest/metametameta" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +class Minitest::Runnable + def whatever # faked for testing + assert true + end +end + +class TestMinitestUnit < MetaMetaMetaTestCase + parallelize_me! + + pwd = Pathname.new File.expand_path Dir.pwd + basedir = Pathname.new(File.expand_path "lib/minitest") + "mini" + basedir = basedir.relative_path_from(pwd).to_s + MINITEST_BASE_DIR = basedir[/\A\./] ? basedir : "./#{basedir}" + BT_MIDDLE = ["#{MINITEST_BASE_DIR}/test.rb:161:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:158:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:139:in `run'", + "#{MINITEST_BASE_DIR}/test.rb:106:in `run'"] + + def test_filter_backtrace + # this is a semi-lame mix of relative paths. + # I cheated by making the autotest parts not have ./ + bt = (["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'", + "#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29", + "test/test_autotest.rb:422"]) + bt = util_expand_bt bt + + ex = ["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'"] + ex = util_expand_bt ex + + fu = Minitest.filter_backtrace(bt) + + assert_equal ex, fu + end + + def test_filter_backtrace_all_unit + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29"]) + ex = bt.clone + fu = Minitest.filter_backtrace(bt) + assert_equal ex, fu + end + + def test_filter_backtrace_unit_starts + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/mini/test.rb:29", + "-e:1"]) + + bt = util_expand_bt bt + + ex = ["-e:1"] + fu = Minitest.filter_backtrace bt + assert_equal ex, fu + end + + def test_filter_backtrace__empty + with_empty_backtrace_filter do + bt = %w[first second third] + fu = Minitest.filter_backtrace bt.dup + assert_equal bt, fu + end + end + + def test_infectious_binary_encoding + @tu = Class.new FakeNamedTest do + def test_this_is_not_ascii_assertion + assert_equal "ЁЁЁ", "ёёё" + end + + def test_this_is_non_ascii_failure_message + fail 'ЁЁЁ'.force_encoding('ASCII-8BIT') + end + end + + expected = clean <<-EOM + EF + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_this_is_non_ascii_failure_message: + RuntimeError: ЁЁЁ + FILE:LINE:in `test_this_is_non_ascii_failure_message' + + 2) Failure: + FakeNamedTestXX#test_this_is_not_ascii_assertion [FILE:LINE]: + Expected: \"ЁЁЁ\" + Actual: \"ёёё\" + + 2 runs, 1 assertions, 1 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_passed_eh_teardown_good + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + assert_predicate test, :passed? + refute_predicate test, :skipped? + end + + def test_passed_eh_teardown_skipped + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; skip "bork"; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + assert_predicate test, :skipped? + end + + def test_passed_eh_teardown_flunked + test_class = Class.new FakeNamedTest do + def teardown; flunk; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + refute_predicate test, :skipped? + end + + def util_expand_bt bt + if RUBY_VERSION >= "1.9.0" then + bt.map { |f| (f =~ /^\./) ? File.expand_path(f) : f } + else + bt + end + end +end + +class TestMinitestUnitInherited < MetaMetaMetaTestCase + def with_overridden_include + Class.class_eval do + def inherited_with_hacks _klass + throw :inherited_hook + end + + alias inherited_without_hacks inherited + alias inherited inherited_with_hacks + alias IGNORE_ME! inherited # 1.8 bug. god I love venture bros + end + + yield + ensure + Class.class_eval do + alias inherited inherited_without_hacks + + undef_method :inherited_with_hacks + undef_method :inherited_without_hacks + end + + refute_respond_to Class, :inherited_with_hacks + refute_respond_to Class, :inherited_without_hacks + end + + def test_inherited_hook_plays_nice_with_others + with_overridden_include do + assert_throws :inherited_hook do + Class.new FakeNamedTest + end + end + end +end + +class TestMinitestRunner < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_class_runnables + @assertion_count = 0 + + tc = Class.new(Minitest::Test) + + assert_equal 1, Minitest::Test.runnables.size + assert_equal [tc], Minitest::Test.runnables + end + + def test_run_test + @tu = + Class.new FakeNamedTest do + attr_reader :foo + + def run + @foo = "hi mom!" + r = super + @foo = "okay" + + r + end + + def test_something + assert_equal "hi mom!", foo + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_error + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E. + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_error: + RuntimeError: unhandled exception + FILE:LINE:in \`test_error\' + + 2 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error_teardown + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def teardown + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_something: + RuntimeError: unhandled exception + FILE:LINE:in \`teardown\' + + 1 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_failing + setup_basic_tu + + expected = clean <<-EOM + F. + + Finished in 0.00 + + 1) Failure: + FakeNamedTestXX#test_failure [FILE:LINE]: + Expected false to be truthy. + + 2 runs, 2 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def setup_basic_tu + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_failure + assert false + end + end + end + + def test_run_failing_filtered + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--name /some|thing/ --seed 42] + end + + def assert_filtering filter, name, expected, a = false + args = %W[--#{filter} #{name} --seed 42] + + alpha = Class.new FakeNamedTest do + define_method :test_something do + assert a + end + end + Object.const_set(:Alpha, alpha) + + beta = Class.new FakeNamedTest do + define_method :test_something do + assert true + end + end + Object.const_set(:Beta, beta) + + @tus = [alpha, beta] + + assert_report expected, args + ensure + Object.send :remove_const, :Alpha + Object.send :remove_const, :Beta + end + + def test_run_filtered_including_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "/Beta#test_something/", expected + end + + def test_run_filtered_including_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "Beta#test_something", expected + end + + def test_run_filtered_string_method_only + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "test_something", expected, :pass + end + + def test_run_failing_excluded + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--exclude /failure/ --seed 42] + end + + def test_run_filtered_excluding_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "/Alpha#test_something/", expected + end + + def test_run_filtered_excluding_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "Alpha#test_something", expected + end + + def test_run_filtered_excluding_string_method_only + expected = clean <<-EOM + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "test_something", expected, :pass + end + + def test_run_passing + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_skip + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + S. + + Finished in 0.00 + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + restore_env do + assert_report expected + end + end + + def test_run_skip_verbose + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + FakeNamedTestXX#test_skip = 0.00 s = S + FakeNamedTestXX#test_something = 0.00 s = . + + Finished in 0.00 + + 1) Skipped: + FakeNamedTestXX#test_skip [FILE:LINE]: + not yet + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + EOM + + assert_report expected, %w[--seed 42 --verbose] + end + + def test_run_with_other_runner + @tu = + Class.new FakeNamedTest do + def self.run reporter, options = {} + @reporter = reporter + before_my_suite + super + end + + def self.name; "wacky!" end + + def self.before_my_suite + @reporter.io.puts "Running #{self.name} tests" + @@foo = 1 + end + + def test_something + assert_equal 1, @@foo + end + + def test_something_else + assert_equal 1, @@foo + end + end + + expected = clean <<-EOM + Running wacky! tests + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + require "monitor" + + class Latch + def initialize count = 1 + @count = count + @lock = Monitor.new + @cv = @lock.new_cond + end + + def release + @lock.synchronize do + @count -= 1 if @count > 0 + @cv.broadcast if @count == 0 + end + end + + def await + @lock.synchronize { @cv.wait_while { @count > 0 } } + end + end + + def test_run_parallel + test_count = 2 + test_latch = Latch.new test_count + wait_latch = Latch.new test_count + main_latch = Latch.new + + thread = Thread.new { + Thread.current.abort_on_exception = true + + # This latch waits until both test latches have been released. Both + # latches can't be released unless done in separate threads because + # `main_latch` keeps the test method from finishing. + test_latch.await + main_latch.release + } + + @tu = + Class.new FakeNamedTest do + parallelize_me! + + test_count.times do |i| + define_method :"test_wait_on_main_thread_#{i}" do + test_latch.release + + # This latch blocks until the "main thread" releases it. The main + # thread can't release this latch until both test latches have + # been released. This forces the latches to be released in separate + # threads. + main_latch.await + assert true + end + end + end + + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report(expected) do |reporter| + reporter.extend(Module.new { + define_method("record") do |result| + super(result) + wait_latch.release + end + + define_method("report") do + wait_latch.await + super() + end + }) + end + assert thread.join + end +end + +class TestMinitestUnitOrder < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_before_setup + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :setup do + super() + call_order << :setup + end + + define_method :before_setup do + call_order << :before_setup + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_setup, :setup] + assert_equal expected, call_order + end + + def test_after_teardown + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :teardown do + super() + call_order << :teardown + end + + define_method :after_teardown do + call_order << :after_teardown + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_all_teardowns_are_guaranteed_to_run + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :after_teardown do + super() + call_order << :after_teardown + raise + end + + define_method :teardown do + super() + call_order << :teardown + raise + end + + define_method :before_teardown do + super() + call_order << :before_teardown + raise + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_teardown, :teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_setup_and_teardown_survive_inheritance + call_order = [] + + @tu = Class.new FakeNamedTest do + define_method :setup do + call_order << :setup_method + end + + define_method :teardown do + call_order << :teardown_method + end + + define_method :test_something do + call_order << :test + end + end + + run_tu_with_fresh_reporter + + @tu = Class.new @tu + run_tu_with_fresh_reporter + + # Once for the parent class, once for the child + expected = [:setup_method, :test, :teardown_method] * 2 + + assert_equal expected, call_order + end +end + +class TestMinitestRunnable < Minitest::Test + def setup_marshal klass + tc = klass.new "whatever" + tc.assertions = 42 + tc.failures << "a failure" + + yield tc if block_given? + + def tc.setup + @blah = "blah" + end + tc.setup + + @tc = Minitest::Result.from tc + end + + def assert_marshal expected_ivars + new_tc = Marshal.load Marshal.dump @tc + + ivars = new_tc.instance_variables.map(&:to_s).sort + assert_equal expected_ivars, ivars + assert_equal "whatever", new_tc.name + assert_equal 42, new_tc.assertions + assert_equal ["a failure"], new_tc.failures + + yield new_tc if block_given? + end + + def test_marshal + setup_marshal Minitest::Runnable + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] + end + + def test_spec_marshal + klass = describe("whatever") { it("passes") { assert true } } + rm = klass.runnable_methods.first + + # Run the test + @tc = klass.new(rm).run + + assert_kind_of Minitest::Result, @tc + + # Pass it over the wire + over_the_wire = Marshal.load Marshal.dump @tc + + assert_equal @tc.time, over_the_wire.time + assert_equal @tc.name, over_the_wire.name + assert_equal @tc.assertions, over_the_wire.assertions + assert_equal @tc.failures, over_the_wire.failures + assert_equal @tc.klass, over_the_wire.klass + end +end + +class TestMinitestTest < TestMinitestRunnable + def test_dup + setup_marshal Minitest::Test do |tc| + tc.time = 3.14 + end + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] do |new_tc| + assert_in_epsilon 3.14, new_tc.time + end + end +end + +class TestMinitestUnitTestCase < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + def setup + super + + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @zomg = "zomg ponies!" + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") if @tc.passed? + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def test_runnable_methods_random + @assertion_count = 0 + + sample_test_case = Class.new FakeNamedTest do + def self.test_order; :random; end + def test_test1; assert "does not matter" end + def test_test2; assert "does not matter" end + def test_test3; assert "does not matter" end + end + + srand 42 + expected = %w[test_test2 test_test1 test_test3] + assert_equal expected, sample_test_case.runnable_methods + end + + def test_runnable_methods_sorted + @assertion_count = 0 + + sample_test_case = Class.new FakeNamedTest do + def self.test_order; :sorted end + def test_test3; assert "does not matter" end + def test_test2; assert "does not matter" end + def test_test1; assert "does not matter" end + end + + expected = %w[test_test1 test_test2 test_test3] + assert_equal expected, sample_test_case.runnable_methods + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_sets_test_order_alpha + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + + assert_equal :alpha, shitty_test_case.test_order + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_does_not_warn + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + def shitty_test_case.test_order; :lol end + + assert_silent do + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + end + end + + def test_autorun_does_not_affect_fork_success_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork {}) + assert_equal true, $?.success? + end + + def test_autorun_does_not_affect_fork_exit_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork { exit 42 }) + assert_equal 42, $?.exitstatus + end +end + +class TestMinitestGuard < Minitest::Test + parallelize_me! + + def test_mri_eh + assert self.class.mri? "ruby blah" + assert self.mri? "ruby blah" + end + + def test_jruby_eh + assert self.class.jruby? "java" + assert self.jruby? "java" + end + + def test_rubinius_eh + assert_output "", /DEPRECATED/ do + assert self.class.rubinius? "rbx" + end + assert_output "", /DEPRECATED/ do + assert self.rubinius? "rbx" + end + end + + def test_maglev_eh + assert_output "", /DEPRECATED/ do + assert self.class.maglev? "maglev" + end + assert_output "", /DEPRECATED/ do + assert self.maglev? "maglev" + end + end + + def test_osx_eh + assert self.class.osx? "darwin" + assert self.osx? "darwin" + end + + def test_windows_eh + assert self.class.windows? "mswin" + assert self.windows? "mswin" + end +end + +class TestMinitestUnitRecording < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def assert_run_record *expected, &block + @tu = Class.new FakeNamedTest, &block + + run_tu_with_fresh_reporter + + recorded = first_reporter.results.map(&:failures).flatten.map { |f| f.error.class } + + assert_equal expected, recorded + end + + def test_run_with_bogus_reporter + # https://github.com/seattlerb/minitest/issues/659 + # TODO: remove test for minitest 6 + @tu = Class.new FakeNamedTest do + def test_method + assert true + end + end + + bogus_reporter = Class.new do # doesn't subclass AbstractReporter + def start; @success = false; end + # def prerecord klass, name; end # doesn't define full API + def record result; @success = true; end + def report; end + def passed?; end + def results; end + def success?; @success; end + end.new + + self.reporter = Minitest::CompositeReporter.new + reporter << bogus_reporter + + Minitest::Runnable.runnables.delete @tu + + @tu.run reporter, {} + + assert_predicate bogus_reporter, :success? + end + + def test_record_passing + assert_run_record do + def test_method + assert true + end + end + end + + def test_record_failing + assert_run_record Minitest::Assertion do + def test_method + assert false + end + end + end + + def test_record_error + assert_run_record RuntimeError do + def test_method + raise "unhandled exception" + end + end + end + + def test_record_error_teardown + assert_run_record RuntimeError do + def test_method + assert true + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_record_error_in_test_and_teardown + assert_run_record AnError, RuntimeError do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_to_s_error_in_test_and_teardown + @tu = Class.new FakeNamedTest do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + + run_tu_with_fresh_reporter + + exp = clean " + Error: + FakeNamedTestXX#test_method: + AnError: AnError + FILE:LINE:in `test_method' + + Error: + FakeNamedTestXX#test_method: + RuntimeError: unhandled exception + FILE:LINE:in `teardown' + " + + assert_equal exp.strip, normalize_output(first_reporter.results.first.to_s).strip + end + + def test_record_skip + assert_run_record Minitest::Skip do + def test_method + skip "not yet" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/LICENSE b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/LICENSE new file mode 100644 index 0000000..97c1a79 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2015 Manfred Stienstra, Fingertips + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/README.md b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/README.md new file mode 100644 index 0000000..af0a26a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/README.md @@ -0,0 +1,80 @@ +# Nap + +Nap is an extremely simple REST client for Ruby. It was built to quickly +fire off HTTP requests without having to research net/http internals. + +## Example + +```ruby +gem 'nap' +require 'rest' +require 'json' + +response = REST.get('http://twitter.com/statuses/friends_timeline.json', {}, + {:username => '_evan', :password => 'buttonscat'} +) +if response.ok? + timeline = JSON.parse(response.body) + puts(timeline.map do |item| + "#{item['user']['name']}\n\n#{item['text']}" + end.join("\n\n--\n\n")) +elsif response.forbidden? + puts "Are you sure you're `_evan' and your password is the name of your cat?" +else + puts "Something went wrong (#{response.status_code})" + puts response.body +end +``` + +## Advanced request configuration + +If you need more control over the Net::HTTP request you can pass a block to all of the request methods. +```ruby +response = REST.get('http://google.com') do |http_request| + http_request.open_timeout = 15 + http_request.set_debug_output(STDERR) +end +``` + +## Proxy support + +To enable the proxy settings in Nap, you can either use the HTTP\_PROXY or http\_proxy enviroment variable. + + $ env HTTP_PROXY=http://rob:secret@192.167.1.254:665 ruby app.rb + +## Exceptions + +Nap defines one top-level and three main error types which allow you to catch a whole range of exceptions thrown by underlying protocol implementations. + +* *REST::Error*: Any type of error +* *REST::Error::Timeout*: Read timeouts of various sorts +* *REST::Error::Connection*: Connection errors caused by dropped sockets +* *REST::Error::Protocol*: Request failed because of a problem when handling the HTTP request or response + +In the most basic case you can rescue from the top-level type to warn about fetching problems. + +```ruby +begin + REST.get('http://example.com/pigeons/12') +rescue REST::Error + puts "[!] Failed to fetch Pigeon number 12." +end +``` + +## Contributions + +Nap couldn't be the shining beacon in the eternal darkness without help from: + +* Eloy Durán +* Joshua Sierles +* Thijs van der Vossen + +For all other great human beings, please visit the GitHub contributors page. + +## Changes from 1.0.0 to 1.1.0 + +* REST::Request now allows all HTTP verbs to send a body entity. + +## Changes from 0.8.0 to 1.0.0 + +* Removed REST::DisconnectedError, please use REST::Error::Connection instead. diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest.rb b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest.rb new file mode 100644 index 0000000..5f4e36f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest.rb @@ -0,0 +1,106 @@ +require 'uri' + +# REST is basically a convenience wrapper around Net::HTTP. It defines a simple and consistant API +# for doing REST-style HTTP calls. +# +# In addition it provides wrappers for the many error classes that can be raised while making +# requests. See REST::Error for a complete discussion of options. +module REST + # Library version + VERSION = '1.1.0' + + # Performs a GET on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.get('http://example.com/pigeons/12', + # {'Accept' => 'text/plain'}, + # {:username => 'admin', :password => 'secret'} + # ) + # if response.ok? + # puts response.body + # else + # puts "Couldn't fetch your pigeon (#{response.status_code})" + # end + def self.get(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:get, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a HEAD on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.head('http://example.com/pigeons/12') + # if response.ok? + # puts "Your pigeon exists!" + # elsif response.found? + # puts "Someone moved your pigeon!" + # else + # puts "Couldn't fetch your pigeon (#{response.status_code})" + # end + def self.head(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:head, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a DELETE on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.delete('http://example.com/pigeons/12') + # if response.ok? + # puts "Your pigeon died ): )" + # elsif response.found? + # puts "Someone moved your pigeon!" + # else + # puts "Couldn't delete your pigeon (#{response.status_code})" + # end + def self.delete(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:delete, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a PATCH on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.patch('http://example.com/pigeons/12', + # {'Name' => 'Homer'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.ok? + # puts "Your pigeon was renamed to 'Homer'!" + # else + # puts "Couldn't rename your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.patch(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:patch, URI.parse(uri), body, headers, options, &configure_block) + end + + # Performs a PUT on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.put('http://example.com/pigeons/12', + # {'Name' => 'Homer'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.ok? + # puts "Your pigeon 'Bowser' was replaced by 'Homer'!" + # else + # puts "Couldn't replace your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.put(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:put, URI.parse(uri), body, headers, options, &configure_block) + end + + # Performs a POST on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.post('http://example.com/pigeons', + # {'Name' => 'Bowser'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.created? + # puts "Created a new pigeon called 'Bowser'" + # else + # puts "Couldn't create your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.post(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:post, URI.parse(uri), body, headers, options, &configure_block) + end +end + +require File.expand_path('../rest/error', __FILE__) +require File.expand_path('../rest/request', __FILE__) +require File.expand_path('../rest/response', __FILE__) diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/error.rb b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/error.rb new file mode 100644 index 0000000..3eedce3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/error.rb @@ -0,0 +1,92 @@ +require 'net/http' + +module REST + # This constant can be used to rescue any of the known `Timeout`, `Connection`, and `Protocol` + # error classes. + # + # For instance, to rescue _any_ type of error that could be raise while making a request: + # + # begin + # REST.get('http://example.com/pigeons/12') + # rescue REST::Error => e + # p e # => Timeout::Error + # end + # + # If you want to rescue only `Timeout` related error classes, however, you can limit the scope: + # + # begin + # REST.get('http://example.com/pigeons/12') + # rescue REST::Error::Timeout => e + # p e # => Timeout::Error + # end + module Error + # This constant can be used to rescue only the known `Timeout` error classes. + module Timeout + def self.class_names + %w( + Errno::ETIMEDOUT + Timeout::Error + Net::OpenTimeout + Net::ReadTimeout + ) + end + end + + # This constant can be used to rescue only the known `Connection` error classes. + module Connection + def self.class_names + %w( + EOFError + Errno::ECONNABORTED + Errno::ECONNREFUSED + Errno::ECONNRESET + Errno::EHOSTDOWN + Errno::EHOSTUNREACH + Errno::EINVAL + Errno::ENETUNREACH + SocketError + OpenSSL::SSL::SSLError + ) + end + end + + # This constant can be used to rescue only the known `Protocol` error classes. + module Protocol + def self.class_names + %w( + Net::HTTPBadResponse + Net::HTTPHeaderSyntaxError + Net::ProtocolError + Zlib::GzipFile::Error + ) + end + end + + private + + [Timeout, Connection, Protocol].each do |mod| + mod.send(:include, Error) + + # Collect all the error classes that exist at runtime. + def mod.classes + class_names.map do |name| + begin + # MRI < 2 does not support full constant paths for `Object.const_get`. + name.split('::').inject(Object) do |current, const| + current.const_get(const) + end + rescue NameError + nil + end + end.compact + end + + # Include the `mod` into the classes. + def mod.extend_classes! + classes.each { |klass| klass.send(:include, self) } + end + + mod.extend_classes! + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/request.rb b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/request.rb new file mode 100644 index 0000000..d9fb086 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/request.rb @@ -0,0 +1,196 @@ +require 'uri' +require 'net/http' + +module REST + # Request holds a HTTP request + class Request + attr_accessor :verb, :url, :body, :headers, :options, :request + + # * verb: The verb to use in the request, either :get, :head, :patch, :put, or :post + # * url: The URL to send the request to, must be a URI instance + # * body: The body to use in the request + # * headers: A hash of headers to add to the request + # * options: A hash of additional options + # * username: Username to use for basic authentication + # * password: Password to use for basic authentication + # * tls_verify/verify_ssl: Verify the server certificate against known CA's + # * tls_ca_file: Use a specific file for CA certificates instead of the built-in one + # this only works when :tls_verify is also set. + # * tls_key_and_certificate_file: The client key and certificate file to use for this + # request + # * tls_certificate: The client certficate to use for this request + # * tls_key: The client private key to use for this request + # * configure_block: An optional block that yields the underlying Net::HTTP + # request object allowing for more fine-grained configuration + # + # == Examples + # + # request = REST::Request.new(:get, URI.parse('http://example.com/pigeons/1')) + # + # request = REST::Request.new(:head, URI.parse('http://example.com/pigeons/1')) + # + # request = REST::Request.new(:post, + # URI.parse('http://example.com/pigeons'), + # {'name' => 'Homr'}.to_json, + # {'Accept' => 'application/json, */*', 'Content-Type' => 'application/json; charset=utf-8'} + # ) + # + # # Pass a block to configure the underlying +Net::HTTP+ request. + # request = REST::Request.new(:get, URI.parse('http://example.com/pigeons/largest')) do |http_request| + # http_request.open_timeout = 15 # seconds + # end + # + # == Authentication example + # + # request = REST::Request.new(:put, + # URI.parse('http://example.com/pigeons/1'), + # {'name' => 'Homer'}.to_json, + # {'Accept' => 'application/json, */*', 'Content-Type' => 'application/json; charset=utf-8'}, + # {:username => 'Admin', :password => 'secret'} + # ) + # + # == TLS / SSL examples + # + # # Use a client key and certificate + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_key_and_certificate_file => '/home/alice/keys/example.pem' + # }) + # + # # Use a client certificate and key from a specific location + # key_and_certificate = File.read('/home/alice/keys/example.pem') + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_key => OpenSSL::PKey::RSA.new(key_and_certificate), + # :tls_certificate => OpenSSL::X509::Certificate.new(key_and_certificate) + # }) + # + # # Verify the server certificate against a specific certificate + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_verify => true, + # :tls_ca_file => '/home/alice/keys/example.pem' + # }) + def initialize(verb, url, body=nil, headers={}, options={}, &configure_block) + @verb = verb + @url = url + @body = body + @headers = headers + @options = options + @configure_block = configure_block + end + + # Returns the path (including the query) for the request + def path + [url.path.empty? ? '/' : url.path, url.query].compact.join('?') + end + + def proxy_env + { + 'http' => ENV['HTTP_PROXY'] || ENV['http_proxy'], + 'https' => ENV['HTTPS_PROXY'] || ENV['https_proxy'] + } + end + + def proxy_settings + proxy_env[url.scheme] ? URI.parse(proxy_env[url.scheme]) : nil + end + + def http_proxy + if settings = proxy_settings + Net::HTTP.Proxy(settings.host, settings.port, settings.user, settings.password) + end + end + + # Configures and returns a new Net::HTTP request object + def http_request + if http_proxy + http_request = http_proxy.new(url.host, url.port) + else + http_request = Net::HTTP.new(url.host, url.port) + end + + # enable SSL/TLS + if url.scheme == 'https' + require 'net/https' + require 'openssl' + Error::Connection.extend_classes! + + http_request.use_ssl = true + + if options[:tls_verify] or options[:verify_ssl] + if http_request.respond_to?(:enable_post_connection_check=) + http_request.enable_post_connection_check = true + end + # from http://curl.haxx.se/ca/cacert.pem + http_request.ca_file = options[:tls_ca_file] || File.expand_path('../../../support/cacert.pem', __FILE__) + http_request.verify_mode = OpenSSL::SSL::VERIFY_PEER + else + http_request.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + + if options[:tls_key_and_certificate_file] + key_and_certificate = File.read(options[:tls_key_and_certificate_file]) + options[:tls_key] = OpenSSL::PKey::RSA.new(key_and_certificate) + options[:tls_certificate] = OpenSSL::X509::Certificate.new(key_and_certificate) + end + + if options[:tls_key] and options[:tls_certificate] + http_request.key = options[:tls_key] + http_request.cert = options[:tls_certificate] + elsif options[:tls_key] || options[:tls_certificate] + raise ArgumentError, "Please specify both the certificate and private key (:tls_key and :tls_certificate)" + end + end + + if @configure_block + @configure_block.call(http_request) + end + + http_request + end + + def request_for_verb + case verb + when :get + Net::HTTP::Get.new(path, headers) + when :head + Net::HTTP::Head.new(path, headers) + when :delete + Net::HTTP::Delete.new(path, headers) + when :patch + if defined?(Net::HTTP::Patch) + Net::HTTP::Patch.new(path, headers) + else + raise ArgumentError, "This version of the Ruby standard library doesn't support PATCH" + end + when :put + Net::HTTP::Put.new(path, headers) + when :post + Net::HTTP::Post.new(path, headers) + else + raise ArgumentError, "Unknown HTTP verb `#{verb}'" + end + end + + # Performs the actual request and returns a REST::Response object with the response + def perform + self.request = request_for_verb + request.body = body + + if options[:username] and options[:password] + request.basic_auth(options[:username], options[:password]) + end + + http_request = http_request() + response = http_request.start { |http| http.request(request) } + REST::Response.new(response.code, response.instance_variable_get('@header'), response.body) + end + + # Shortcut for REST::Request.new(*args).perform. + # + # See new for options. + def self.perform(*args, &configure_block) + request = new(*args, &configure_block) + request.perform + end + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/response.rb b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/response.rb new file mode 100644 index 0000000..8ac1e2f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/lib/rest/response.rb @@ -0,0 +1,46 @@ +module REST + # Response holds a HTTP response + class Response + # These codes are used to define convenience boolean accessors on the response object. + # + # Examples + # + # REST::Response.new(200).ok? #=> true + # REST::Response.new(201).ok? #=> falses + # REST::Response.new(403).forbidden? #=> true + CODES = [ + [200, :ok], + [201, :created], + [301, :moved_permanently], + [302, :found], + [400, :bad_request], + [401, :unauthorized], + [403, :forbidden], + [422, :unprocessable_entity], + [404, :not_found], + [500, :internal_server_error] + ] + + attr_accessor :body, :headers, :status_code + + # * status_code: The status code of the response (ie. 200 or '404') + # * headers: The headers of the response + # * body: The body of the response + def initialize(status_code, headers={}, body='') + @status_code = status_code.to_i + @headers = headers + @body = body + end + + CODES.each do |code, name| + define_method "#{name}?" do + status_code == code + end + end + + # Returns _true_ when the status code is in the 2XX range. Returns false otherwise. + def success? + (status_code.to_s =~ /2../) ? true : false + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/support/cacert.pem b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/support/cacert.pem new file mode 100644 index 0000000..1b24dc6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/nap-1.1.0/support/cacert.pem @@ -0,0 +1,3988 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Wed Apr 22 03:12:04 2015 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.25. +## SHA1: ed3c0bbfb7912bcc00cd2033b0cb85c98d10559c +## + + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) FÅ‘tanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +WoSign +====== +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNVBAMTIUNlcnRpZmljYXRpb24g +QXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJ +BgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA +vcqNrLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1UfcIiePyO +CbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcSccf+Hb0v1naMQFXQoOXXDX +2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2ZjC1vt7tj/id07sBMOby8w7gLJKA84X5 +KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4Mx1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR ++ScPewavVIMYe+HdVHpRaG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ez +EC8wQjchzDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDaruHqk +lWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221KmYo0SLwX3OSACCK2 +8jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvASh0JWzko/amrzgD5LkhLJuYwTKVY +yrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWvHYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0C +AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R +8bNLtwYgFP6HEtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 +LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJMuYhOZO9sxXq +T2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2eJXLOC62qx1ViC777Y7NhRCOj +y+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VNg64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC +2nz4SNAzqfkHx5Xh9T71XXG68pWpdIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes +5cVAWubXbHssw1abR80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/ +EaEQPkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGcexGATVdVh +mVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+J7x6v+Db9NpSvd4MVHAx +kUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMlOtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGi +kpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWTee5Ehr7XHuQe+w== +-----END CERTIFICATE----- + +WoSign China +============ +-----BEGIN CERTIFICATE----- +MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBGMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMMEkNBIOayg+mAmuagueiv +geS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYD +VQQKExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k +8H/rD195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld19AXbbQs5 +uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExfv5RxadmWPgxDT74wwJ85 +dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnkUkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5 +Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+LNVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFy +b7Ao65vh4YOhn0pdr8yb+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc +76DbT52VqyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6KyX2m ++Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0GAbQOXDBGVWCvOGU6 +yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaKJ/kR8slC/k7e3x9cxKSGhxYzoacX +GKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUA +A4ICAQBqinA4WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 +yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj/feTZU7n85iY +r83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6jBAyvd0zaziGfjk9DgNyp115 +j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0A +kLppRQjbbpCBhqcqBT/mhDn4t/lXX0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97 +qA4bLJyuQHCH2u2nFoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Y +jj4Du9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10lO1Hm13ZB +ONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Leie2uPAmvylezkolwQOQv +T8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR12KvxAmLBsX5VYc8T1yaw15zLKYs4SgsO +kI26oQ== +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl +OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV +MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF +JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G3 +================================== +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y +olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t +x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy +EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K +Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur +mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5 +1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp +07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo +FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE +41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu +yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq +KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1 +v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA +8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b +8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r +mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq +1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI +JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV +tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk= +-----END CERTIFICATE----- + +Staat der Nederlanden EV Root CA +================================ +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M +MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl +cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk +SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW +O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r +0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 +Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV +XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr +08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV +0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd +74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx +fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa +ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu +c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq +5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN +b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN +f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi +5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 +WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK +DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy +eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/LICENSE.md b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/LICENSE.md new file mode 100644 index 0000000..a834fbb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2011-2014 [CONTRIBUTORS.md](https://github.com/geemus/netrc/blob/master/CONTRIBUTORS.md) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/Readme.md b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/Readme.md new file mode 100644 index 0000000..9193474 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/Readme.md @@ -0,0 +1,53 @@ +# Netrc + +This library reads and writes +[`.netrc` files](http://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html). + +## API + +Read a netrc file: + + n = Netrc.read("sample.netrc") + +If the file doesn't exist, Netrc.read will return an empty object. If +the filename ends in ".gpg", it will be decrypted using +[GPG](http://www.gnupg.org/). + +Read the user's default netrc file. + +**On Unix:** `$NETRC/.netrc` or `$HOME/.netrc` (whichever is set first). + +**On Windows:** `%NETRC%\_netrc`, `%HOME%\_netrc`, `%HOMEDRIVE%%HOMEPATH%\_netrc`, or `%USERPROFILE%\_netrc` (whichever is set first). + + n = Netrc.read + +Configure netrc to allow permissive files (with permissions other than 0600): + + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = true + end + +Look up a username and password: + + user, pass = n["example.com"] + +Write a username and password: + + n["example.com"] = user, newpass + n.save + +If you make an entry that wasn't there before, it will be appended +to the end of the file. Sometimes people want to include a comment +explaining that the entry was added automatically. You can do it +like this: + + n.new_item_prefix = "# This entry was added automatically\n" + n["example.com"] = user, newpass + n.save + +Have fun! + +## Running Tests + + $ bundle install + $ bundle exec ruby -e 'Dir.glob "./test/**/test_*.rb", &method(:require)' diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/changelog.txt b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/changelog.txt new file mode 100644 index 0000000..60beb4e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/changelog.txt @@ -0,0 +1,93 @@ +0.11.0 10/29/15 +=============== + +Respect NETRC environment variable +Fix for JRuby PernGen Space + +0.10.3 02/24/15 +=============== + +error when Dir.home is not readable + +0.10.2 12/17/14 +=============== + +set file permissions in /data to be world readable after test runs + +0.10.1 12/14/14 +=============== + +fix bug for `Dir.home` when can't find home + +0.10.0 12/10/14 +=============== + +use `Dir.home` for finding home on Ruby 1.9+ + +0.9.0 12/01/14 +============== + +use HOME or HOMEPATH/HOMEDRIVE to find home on windows + +0.8.0 10/16/14 +============== + +re-revert entry changes with minor bump + +0.7.9 10/16/14 +============== + +revert entry changes for a backwards-compatible version + +0.7.8 10/15/14 +============== + +add entry class to facilitate usage +switch gem source to rubygems.org +use guard, when available via guardfile +add default/read-only behavior +add allow_permissive_netrc_file option +fix an undefined variable path +fix Errno::EACCES error +silence const warnings in test + +0.7.7 08/15/12 +============== + +add newline between entries if one is missing + +0.7.6 08/15/12 +============== + +more unified newline handling +make entries with login/password parsable + + +0.7.5 06/25/12 +============== + +* improved operating system detection + +0.7.4 06/04/12 +============== + +* add support for encrypted files pgp netrc files + +0.7.3 06/04/12 +============== + +* also skip permissions check on cygwin + +0.7.2 05/23/12 +============= + +* use length instead of count on Array, provides compatibility with 1.8.6 + + +0.7.1 03/13/12 +============== + +* add Gemfile to simplify development +* add MIT license +* fix test require path +* fix unused variable assignment (caused warnings) in tests diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/default_only.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/default_only.netrc new file mode 100644 index 0000000..8df77a9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/default_only.netrc @@ -0,0 +1,4 @@ +# this is my netrc with only a default +default + login ld # this is my default username + password pd diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/login.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/login.netrc new file mode 100644 index 0000000..f0ec3b6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/login.netrc @@ -0,0 +1,3 @@ +# this is my login netrc +machine m + login l # this is my username diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/newlineless.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/newlineless.netrc new file mode 100644 index 0000000..5f3b1ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/newlineless.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/password.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/password.netrc new file mode 100644 index 0000000..ce68670 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/password.netrc @@ -0,0 +1,3 @@ +# this is my password netrc +machine m + password p # this is my password diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/permissive.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/permissive.netrc new file mode 100644 index 0000000..b92cad3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/permissive.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample.netrc new file mode 100644 index 0000000..b92cad3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi.netrc new file mode 100644 index 0000000..1936d4b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi.netrc @@ -0,0 +1,8 @@ +# this is my netrc with multiple machines +machine m + login lm # this is my m-username + password pm + +machine n + login ln # this is my n-username + password pn diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc new file mode 100644 index 0000000..4e7dfe6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc @@ -0,0 +1,12 @@ +# this is my netrc with multiple machines and a default +machine m + login lm # this is my m-username + password pm + +machine n + login ln # this is my n-username + password pn + +default + login ld # this is my default username + password pd diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_with_default.netrc b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_with_default.netrc new file mode 100644 index 0000000..76597f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/data/sample_with_default.netrc @@ -0,0 +1,8 @@ +# this is my netrc with default +machine m + login l # this is my username + password p + +default + login default_login # this is my default username + password default_password diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/lib/netrc.rb b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/lib/netrc.rb new file mode 100644 index 0000000..83e4c5e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/lib/netrc.rb @@ -0,0 +1,251 @@ +require 'rbconfig' + +class Netrc + VERSION = "0.11.0" + + # see http://stackoverflow.com/questions/4871309/what-is-the-correct-way-to-detect-if-ruby-is-running-on-windows + WINDOWS = RbConfig::CONFIG["host_os"] =~ /mswin|mingw|cygwin/ + CYGWIN = RbConfig::CONFIG["host_os"] =~ /cygwin/ + + def self.default_path + File.join(ENV['NETRC'] || home_path, netrc_filename) + end + + def self.home_path + home = Dir.respond_to?(:home) ? Dir.home : ENV['HOME'] + + if WINDOWS && !CYGWIN + home ||= File.join(ENV['HOMEDRIVE'], ENV['HOMEPATH']) if ENV['HOMEDRIVE'] && ENV['HOMEPATH'] + home ||= ENV['USERPROFILE'] + # XXX: old stuff; most likely unnecessary + home = home.tr("\\", "/") unless home.nil? + end + + (home && File.readable?(home)) ? home : Dir.pwd + rescue ArgumentError + return Dir.pwd + end + + def self.netrc_filename + WINDOWS && !CYGWIN ? "_netrc" : ".netrc" + end + + def self.config + @config ||= {} + end + + def self.configure + yield(self.config) if block_given? + self.config + end + + def self.check_permissions(path) + perm = File.stat(path).mode & 0777 + if perm != 0600 && !(WINDOWS) && !(Netrc.config[:allow_permissive_netrc_file]) + raise Error, "Permission bits for '#{path}' should be 0600, but are "+perm.to_s(8) + end + end + + # Reads path and parses it as a .netrc file. If path doesn't + # exist, returns an empty object. Decrypt paths ending in .gpg. + def self.read(path=default_path) + check_permissions(path) + data = if path =~ /\.gpg$/ + decrypted = `gpg --batch --quiet --decrypt #{path}` + if $?.success? + decrypted + else + raise Error.new("Decrypting #{path} failed.") unless $?.success? + end + else + File.read(path) + end + new(path, parse(lex(data.lines.to_a))) + rescue Errno::ENOENT + new(path, parse(lex([]))) + end + + class TokenArray < Array + def take + if length < 1 + raise Error, "unexpected EOF" + end + shift + end + + def readto + l = [] + while length > 0 && ! yield(self[0]) + l << shift + end + return l.join + end + end + + def self.lex(lines) + tokens = TokenArray.new + for line in lines + content, comment = line.split(/(\s*#.*)/m) + content.each_char do |char| + case char + when /\s/ + if tokens.last && tokens.last[-1..-1] =~ /\s/ + tokens.last << char + else + tokens << char + end + else + if tokens.last && tokens.last[-1..-1] =~ /\S/ + tokens.last << char + else + tokens << char + end + end + end + if comment + tokens << comment + end + end + tokens + end + + def self.skip?(s) + s =~ /^\s/ + end + + + + # Returns two values, a header and a list of items. + # Each item is a tuple, containing some or all of: + # - machine keyword (including trailing whitespace+comments) + # - machine name + # - login keyword (including surrounding whitespace+comments) + # - login + # - password keyword (including surrounding whitespace+comments) + # - password + # - trailing chars + # This lets us change individual fields, then write out the file + # with all its original formatting. + def self.parse(ts) + cur, item = [], [] + + unless ts.is_a?(TokenArray) + ts = TokenArray.new(ts) + end + + pre = ts.readto{|t| t == "machine" || t == "default"} + + while ts.length > 0 + if ts[0] == 'default' + cur << ts.take.to_sym + cur << '' + else + cur << ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + if ts.include?('login') + cur << ts.readto{|t| t == "login"} + ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + if ts.include?('password') + cur << ts.readto{|t| t == "password"} + ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + cur << ts.readto{|t| t == "machine" || t == "default"} + + item << cur + cur = [] + end + + [pre, item] + end + + def initialize(path, data) + @new_item_prefix = '' + @path = path + @pre, @data = data + + if @data && @data.last && :default == @data.last[0] + @default = @data.pop + else + @default = nil + end + end + + attr_accessor :new_item_prefix + + def [](k) + if item = @data.detect {|datum| datum[1] == k} + Entry.new(item[3], item[5]) + elsif @default + Entry.new(@default[3], @default[5]) + end + end + + def []=(k, info) + if item = @data.detect {|datum| datum[1] == k} + item[3], item[5] = info + else + @data << new_item(k, info[0], info[1]) + end + end + + def length + @data.length + end + + def delete(key) + datum = nil + for value in @data + if value[1] == key + datum = value + break + end + end + @data.delete(datum) + end + + def each(&block) + @data.each(&block) + end + + def new_item(m, l, p) + [new_item_prefix+"machine ", m, "\n login ", l, "\n password ", p, "\n"] + end + + def save + if @path =~ /\.gpg$/ + e = IO.popen("gpg -a --batch --default-recipient-self -e", "r+") do |gpg| + gpg.puts(unparse) + gpg.close_write + gpg.read + end + raise Error.new("Encrypting #{@path} failed.") unless $?.success? + File.open(@path, 'w', 0600) {|file| file.print(e)} + else + File.open(@path, 'w', 0600) {|file| file.print(unparse)} + end + end + + def unparse + @pre + @data.map do |datum| + datum = datum.join + unless datum[-1..-1] == "\n" + datum << "\n" + else + datum + end + end.join + end + + Entry = Struct.new(:login, :password) do + alias to_ary to_a + end + +end + +class Netrc::Error < ::StandardError +end diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_lex.rb b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_lex.rb new file mode 100644 index 0000000..e63ff1d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_lex.rb @@ -0,0 +1,58 @@ +$VERBOSE = true +require 'minitest/autorun' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") + +class TestLex < Minitest::Test + def test_lex_empty + t = Netrc.lex([]) + assert_equal([], t) + end + + def test_lex_comment + t = Netrc.lex(["# foo\n"]) + assert_equal(["# foo\n"], t) + end + + def test_lex_comment_after_space + t = Netrc.lex([" # foo\n"]) + assert_equal([" # foo\n"], t) + end + + def test_lex_comment_after_word + t = Netrc.lex(["x # foo\n"]) + assert_equal(["x", " # foo\n"], t) + end + + def test_lex_comment_with_hash + t = Netrc.lex(["x # foo # bar\n"]) + assert_equal(["x", " # foo # bar\n"], t) + end + + def test_lex_word + t = Netrc.lex(["x"]) + assert_equal(["x"], t) + end + + def test_lex_two_lines + t = Netrc.lex(["x\ny\n"]) + assert_equal(["x", "\n", "y", "\n"], t) + end + + def test_lex_word_and_comment + t = Netrc.lex(["x\n", "# foo\n"]) + assert_equal(["x", "\n", "# foo\n"], t) + end + + def test_lex_six_words + t = Netrc.lex(["machine m login l password p\n"]) + e = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] + assert_equal(e, t) + end + + def test_lex_complex + t = Netrc.lex(["machine sub.domain.com login email@domain.com password pass\n"]) + e = ["machine", " ", "sub.domain.com", " ", "login", " ", "email@domain.com", " ", "password", " ", "pass", "\n"] + assert_equal(e, t) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_netrc.rb b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_netrc.rb new file mode 100644 index 0000000..73c5c25 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_netrc.rb @@ -0,0 +1,273 @@ +$VERBOSE = true +require 'minitest/autorun' +require 'fileutils' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") +require "rbconfig" + +class TestNetrc < Minitest::Test + + def setup + Dir.glob('data/*.netrc').each{|f| File.chmod(0600, f)} + File.chmod(0644, "data/permissive.netrc") + end + + def teardown + Dir.glob('data/*.netrc').each{|f| File.chmod(0644, f)} + end + + def test_parse_empty + pre, items = Netrc.parse(Netrc.lex([])) + assert_equal("", pre) + assert_equal([], items) + end + + def test_parse_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/sample.netrc"))) + assert_equal("# this is my netrc\n", pre) + exp = [["machine ", + "m", + "\n login ", + "l", + " # this is my username\n password ", + "p", + "\n"]] + assert_equal(exp, items) + end + + def test_login_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/login.netrc"))) + assert_equal("# this is my login netrc\n", pre) + exp = [["machine ", + "m", + "\n login ", + "l", + " # this is my username\n"]] + assert_equal(exp, items) + end + + def test_password_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/password.netrc"))) + assert_equal("# this is my password netrc\n", pre) + exp = [["machine ", + "m", + "\n password ", + "p", + " # this is my password\n"]] + assert_equal(exp, items) + end + + def test_missing_file + n = Netrc.read("data/nonexistent.netrc") + assert_equal(0, n.length) + end + + def test_permission_error + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, false) + Netrc.read("data/permissive.netrc") + assert false, "Should raise an error if permissions are wrong on a non-windows system." + rescue Netrc::Error + assert true, "" + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + end + + def test_allow_permissive_netrc_file_option + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = true + end + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, false) + Netrc.read("data/permissive.netrc") + assert true, "" + rescue Netrc::Error + assert false, "Should not raise an error if allow_permissive_netrc_file option is set to true" + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = false + end + end + + def test_permission_error_windows + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, true) + Netrc.read("data/permissive.netrc") + rescue Netrc::Error + assert false, "Should not raise an error if permissions are wrong on a non-windows system." + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + end + + def test_round_trip + n = Netrc.read("data/sample.netrc") + assert_equal(IO.read("data/sample.netrc"), n.unparse) + end + + def test_set + n = Netrc.read("data/sample.netrc") + n["m"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login a # this is my username\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_set_get + n = Netrc.read("data/sample.netrc") + n["m"] = "a", "b" + assert_equal(["a", "b"], n["m"].to_a) + end + + def test_add + n = Netrc.read("data/sample.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login l # this is my username\n"+ + " password p\n"+ + "# added\n"+ + "machine x\n"+ + " login a\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_add_newlineless + n = Netrc.read("data/newlineless.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login l # this is my username\n"+ + " password p\n"+ + "# added\n"+ + "machine x\n"+ + " login a\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_add_get + n = Netrc.read("data/sample.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + assert_equal(["a", "b"], n["x"].to_a) + end + + def test_get_missing + n = Netrc.read("data/sample.netrc") + assert_equal(nil, n["x"]) + end + + def test_save + n = Netrc.read("data/sample.netrc") + n.save + assert_equal(File.read("data/sample.netrc"), n.unparse) + end + + def test_save_create + FileUtils.rm_f("/tmp/created.netrc") + n = Netrc.read("/tmp/created.netrc") + n.save + unless Netrc::WINDOWS + assert_equal(0600, File.stat("/tmp/created.netrc").mode & 0777) + end + end + + def test_encrypted_roundtrip + if `gpg --list-keys 2> /dev/null` != "" + FileUtils.rm_f("/tmp/test.netrc.gpg") + n = Netrc.read("/tmp/test.netrc.gpg") + n["m"] = "a", "b" + n.save + assert_equal(0600, File.stat("/tmp/test.netrc.gpg").mode & 0777) + netrc = Netrc.read("/tmp/test.netrc.gpg")["m"] + assert_equal("a", netrc.login) + assert_equal("b", netrc.password) + end + end + + def test_missing_environment + nil_home = nil + ENV["HOME"], nil_home = nil_home, ENV["HOME"] + assert_equal File.join(Dir.pwd, '.netrc'), Netrc.default_path + ensure + ENV["HOME"], nil_home = nil_home, ENV["HOME"] + end + + def test_netrc_environment_variable + ENV["NETRC"] = File.join(Dir.pwd, 'data') + assert_equal File.join(Dir.pwd, 'data', '.netrc'), Netrc.default_path + ensure + ENV.delete("NETRC") + end + + def test_read_entry + entry = Netrc.read("data/sample.netrc")['m'] + assert_equal 'l', entry.login + assert_equal 'p', entry.password + + # hash-style + assert_equal 'l', entry[:login] + assert_equal 'p', entry[:password] + end + + def test_write_entry + n = Netrc.read("data/sample.netrc") + entry = n['m'] + entry.login = 'new_login' + entry.password = 'new_password' + n['m'] = entry + assert_equal(['new_login', 'new_password'], n['m'].to_a) + end + + def test_entry_splat + e = Netrc::Entry.new("user", "pass") + user, pass = *e + assert_equal("user", user) + assert_equal("pass", pass) + end + + def test_entry_implicit_splat + e = Netrc::Entry.new("user", "pass") + user, pass = e + assert_equal("user", user) + assert_equal("pass", pass) + end + + def test_with_default + netrc = Netrc.read('data/sample_with_default.netrc') + assert_equal(['l', 'p'], netrc['m'].to_a) + assert_equal(['default_login', 'default_password'], netrc['unknown'].to_a) + end + + def test_multi_without_default + netrc = Netrc.read('data/sample_multi.netrc') + assert_equal(['lm', 'pm'], netrc['m'].to_a) + assert_equal(['ln', 'pn'], netrc['n'].to_a) + assert_equal([], netrc['other'].to_a) + end + + def test_multi_with_default + netrc = Netrc.read('data/sample_multi_with_default.netrc') + assert_equal(['lm', 'pm'], netrc['m'].to_a) + assert_equal(['ln', 'pn'], netrc['n'].to_a) + assert_equal(['ld', 'pd'], netrc['other'].to_a) + end + + def test_default_only + netrc = Netrc.read('data/default_only.netrc') + assert_equal(['ld', 'pd'], netrc['m'].to_a) + assert_equal(['ld', 'pd'], netrc['other'].to_a) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_parse.rb b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_parse.rb new file mode 100644 index 0000000..9e61c69 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/netrc-0.11.0/test/test_parse.rb @@ -0,0 +1,34 @@ +$VERBOSE = true +require 'minitest/autorun' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") + +class TestParse < Minitest::Test + def test_parse_empty + pre, items = Netrc.parse([]) + assert_equal("", pre) + assert_equal([], items) + end + + def test_parse_comment + pre, items = Netrc.parse(["# foo\n"]) + assert_equal("# foo\n", pre) + assert_equal([], items) + end + + def test_parse_item + t = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] + pre, items = Netrc.parse(t) + assert_equal("", pre) + e = [["machine ", "m", " login ", "l", " password ", "p", "\n"]] + assert_equal(e, items) + end + + def test_parse_two_items + t = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] * 2 + pre, items = Netrc.parse(t) + assert_equal("", pre) + e = [["machine ", "m", " login ", "l", " password ", "p", "\n"]] * 2 + assert_equal(e, items) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/FUNDING.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/FUNDING.yml new file mode 100644 index 0000000..085e1ac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: "rubygems/public_suffix" +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with a single custom sponsorship URL diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml new file mode 100644 index 0000000..2de9058 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml @@ -0,0 +1,36 @@ +name: Tests + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + + build: + strategy: + matrix: + ruby-version: + # - "2.3" + - "2.4" + - "2.5" + - "2.6" + - "2.7" + platform: [ubuntu-latest] + + runs-on: ${{ matrix.platform }} + steps: + + - uses: actions/checkout@v2 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + + - name: Install dependencies + run: bundle install + + - name: Run tests + run: bundle exec rake diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.gitignore b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.gitignore new file mode 100644 index 0000000..8506292 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.gitignore @@ -0,0 +1,8 @@ +# Bundler +/.bundle +/Gemfile.lock +/pkg/* + +# YARD +/.yardoc +/yardoc/ diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop.yml new file mode 100644 index 0000000..af922f5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop.yml @@ -0,0 +1,36 @@ +inherit_from: + - .rubocop_opinionated.yml + +AllCops: + Exclude: + # Exclude .gemspec files because they are generally auto-generated + - '*.gemspec' + # Exclude vendored folders + - 'tmp/**/*' + - 'vendor/**/*' + # Exclude artifacts + - 'pkg/**/*' + # Other + - 'test/benchmarks/**/*' + - 'test/profilers/**/*' + +# I often use @_variable to avoid clashing. +Naming/MemoizedInstanceVariableName: + Enabled: false + +Style/ClassAndModuleChildren: + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# Dear Rubocop, I don't want to use String#strip_heredoc +Layout/HeredocIndentation: + Enabled: false + +Style/WordArray: + Enabled: false + MinSize: 3 + +Style/SymbolArray: + Enabled: false + MinSize: 3 diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml new file mode 100644 index 0000000..401e684 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml @@ -0,0 +1,157 @@ +AllCops: + Exclude: + # Exclude .gemspec files because they are generally auto-generated + - '*.gemspec' + # Exclude vendored folders + - 'tmp/**/*' + - 'vendor/**/*' + NewCops: enable + +# [codesmell] +Layout/LineLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + Max: 100 + +# [codesmell] +Metrics/AbcSize: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/BlockLength: + Enabled: false + +# [codesmell] +Metrics/CyclomaticComplexity: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/ClassLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/MethodLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + Max: 10 + +# [codesmell] +Metrics/ModuleLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/ParameterLists: + Enabled: false + Max: 5 + +# [codesmell] +Metrics/PerceivedComplexity: + Enabled: false + +# Do not use "and" or "or" in conditionals, but for readability we can use it +# to chain executions. Just beware of operator order. +Style/AndOr: + EnforcedStyle: conditionals + +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + +# Double empty lines are useful to separate conceptually different methods +# in the same class or module. +Layout/EmptyLines: + Enabled: false + +# In most cases, a space is nice. Sometimes, it's not. +# Just be consistent with the rest of the surrounding code. +Layout/EmptyLinesAroundClassBody: + Enabled: false + +# In most cases, a space is nice. Sometimes, it's not. +# Just be consistent with the rest of the surrounding code. +Layout/EmptyLinesAroundModuleBody: + Enabled: false + +# This is quite buggy, as it doesn't recognize double lines. +# Double empty lines are useful to separate conceptually different methods +# in the same class or module. +Layout/EmptyLineBetweenDefs: + Enabled: false + +# I personally don't care about the format style. +# In most cases I like to use %, but not at the point I want to enforce it +# as a convention in the entire code. +Style/FormatString: + Enabled: false + +# Annotated tokens (like %s) are a good thing, but in most cases we don't need them. +# %s is a simpler and straightforward version that works in almost all cases. So don't complain. +Style/FormatStringToken: + Enabled: false + +# unless is not always cool. +Style/NegatedIf: + Enabled: false + +# For years, %w() has been the de-facto standard. A lot of libraries are using (). +# Switching to [] would be a nightmare. +Style/PercentLiteralDelimiters: + Enabled: false + +# There are cases were the inline rescue is ok. We can either downgrade the severity, +# or rely on the developer judgement on a case-by-case basis. +Style/RescueModifier: + Enabled: false + +Style/SymbolArray: + EnforcedStyle: brackets + +# Sorry, but using trailing spaces helps readability. +# +# %w( foo bar ) +# +# looks better to me than +# +# %w( foo bar ) +# +Layout/SpaceInsidePercentLiteralDelimiters: + Enabled: false + +# Hate It or Love It, I prefer double quotes as this is more consistent +# with several other programming languages and the output of puts and inspect. +Style/StringLiterals: + EnforcedStyle: double_quotes + +# It's nice to be consistent. The trailing comma also allows easy reordering, +# and doesn't cause a diff in Git when you add a line to the bottom. +Style/TrailingCommaInArrayLiteral: + EnforcedStyleForMultiline: consistent_comma +Style/TrailingCommaInHashLiteral: + EnforcedStyleForMultiline: consistent_comma + +Style/TrivialAccessors: + # IgnoreClassMethods because I want to be able to define class-level accessors + # that sets an instance variable on the metaclass, such as: + # + # def self.default=(value) + # @default = value + # end + # + IgnoreClassMethods: true diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.travis.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.travis.yml new file mode 100644 index 0000000..30f78e8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.travis.yml @@ -0,0 +1,23 @@ +language: ruby + +rvm: + # - 2.3 + - 2.4 + - 2.5 + - 2.6 + - 2.7 + - ruby-head + +env: + - COVERAGE=1 + +cache: + - bundler + +matrix: + allow_failures: + - rvm: ruby-head + +before_install: + - gem update --system + - gem install bundler diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.yardopts b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.yardopts new file mode 100644 index 0000000..0a782de --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/.yardopts @@ -0,0 +1 @@ +--title 'Ruby Public Suffix API Documentation' diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/2.0-Upgrade.md b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/2.0-Upgrade.md new file mode 100644 index 0000000..1a10bfb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/2.0-Upgrade.md @@ -0,0 +1,52 @@ +# Welcome to PublicSuffix 2.0! + +PublicSuffix 2.0 contains a rewritten internal representation and comparison logic, that drastically increases the lookup performance. The new version also changes several internal and external API. + +This document documents the most relevant changes to help you upgrading from PublicSuffix 1.0 to 2.0. + +## What's New + +- The library is now 100% compliants with the official PublicSuffix tests. The major breaking change you may experience, is that if a domain passed as input doesn't match any rule, the rule `*` is assumed. You can override this behavior by passing a custom default rule with the `default_rule` option. The old behavior can be restored by passing `default_rule: nil`. +- `PublicSuffix.domain` is a new method that parses the input and returns the domain (combination of second level domain + suffix). This is a convenient helper to parse a domain name, for example when you need to determine the cookie or SSL scope. +- Added the ability to disable the use of private domains either at runtime, in addition to the ability to not load the private domains section when reading the list (`private_domains: false`). This feature also superseded the `private_domains` class-level attribute, that is no longer available. + +## Upgrade + +When upgrading, here's the most relevant changes to keep an eye on: + +- Several futile utility helpers were removed, such as `Domain#rule`, `Domain#is_a_domain?`, `Domain#is_a_subdomain?`, `Domain#valid?`. You can easily obtain the same result by having a custom method that reconstructs the logic, and/or calling `PublicSuffix.{domain|parse}(domain.to_s)`. +- `PublicSuffix::List.private_domains` is no longer available. Instead, you now have two ways to enable/disable the private domains: + + 1. At runtime, by using the `ignore_private` option + + ```ruby + PublicSuffix.domain("something.blogspot.com", ignore_private: true) + ``` + + 1. Loading a filtered list: + + ```ruby + # Disable support for private TLDs + PublicSuffix::List.default = PublicSuffix::List.parse(File.read(PublicSuffix::List::DEFAULT_LIST_PATH), private_domains: false) + # => "blogspot.com" + PublicSuffix.domain("something.blogspot.com") + # => "blogspot.com" + ``` +- Now that the library is 100% compliant with the official PublicSuffix algorithm, if a domain passed as input doesn't match any rule, the wildcard rule `*` is assumed. This means that unlisted TLDs will be considered valid by default, when they would have been invalid in 1.x. However, you can override this behavior to emulate the 1.x behavior if needed: + + ```ruby + # 1.x: + + PublicSuffix.valid?("google.commm") + # => false + + # 2.x: + + PublicSuffix.valid?("google.commm") + # => true + + # Overriding 2.x behavior if needed: + + PublicSuffix.valid?("google.commm", default_rule: nil) + # => false + ```` diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/CHANGELOG.md b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/CHANGELOG.md new file mode 100644 index 0000000..4861d62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/CHANGELOG.md @@ -0,0 +1,406 @@ +# Changelog + +This project uses [Semantic Versioning 2.0.0](https://semver.org/). + + +## 4.0.6 + +### Changed + +- Updated definitions. + + +## 4.0.5 + +### Changed + +- Updated definitions. + + +## 4.0.4 + +### Changed + +- Updated definitions. + + +## 4.0.3 + +### Fixed + +- Fixed 2.7 deprecations and warnings (GH-167). [Thanks @BrianHawley] + + +## 4.0.2 + +### Changed + +- Updated definitions. + + +## 4.0.1 + +### Changed + +- Updated definitions. + + +## 4.0.0 + +### Changed + +- Minimum Ruby version is 2.3 + + +## Release 3.1.1 + +- CHANGED: Updated definitions. +- CHANGED: Rolled back support for Ruby 2.3 (GH-161, GH-162) + +IMPORTANT: 3.x is the latest version compatible with Ruby 2.1 and Ruby 2.2. + + +## Release 3.1.0 + +- CHANGED: Updated definitions. +- CHANGED: Minimum Ruby version is 2.3 +- CHANGED: Upgraded to Bundler 2.x + + +## Release 3.0.3 + +- CHANGED: Updated definitions. + + +## Release 3.0.2 + +- CHANGED: Updated definitions. + + +## Release 3.0.1 + +- CHANGED: Updated definitions. +- CHANGED: Improve performance and avoid allocation (GH-146). [Thanks @robholland] + + +## Release 3.0.0 + +This new version includes a major redesign of the library internals, with the goal to drastically +improve the lookup time while reducing storage space. + +For this reason, several public methods that are no longer applicable have been deprecated +and/or removed. You can find more information at GH-133. + +- CHANGED: Updated definitions. +- CHANGED: Dropped support for Ruby < 2.1 +- CHANGED: `PublicSuffix::List#rules` is now protected. You should not rely on it as the internal rule representation is subject to change to optimize performances. +- CHANGED: Removed `PublicSuffix::List.clear`, it was an unnecessary accessor method. Use `PublicSuffix::List.default = nil` if you **really** need to reset the default list. You shouldn't. +- CHANGED: `PublicSuffix::List#select` is now private. You should not use it, instead use `PublicSuffix::List#find`. +- CHANGED: `PublicSuffix::List` no longer implements Enumerable. Instead, use `#each` to loop over, or get an Enumerator. +- CHANGED: Redesigned internal list storage and lookup algorithm to achieve O(1) lookup time (see GH-133). + + +## Release 2.0.5 + +- CHANGED: Updated definitions. +- CHANGED: Initialization performance improvements (GH-128). [Thanks @casperisfine] + + +## Release 2.0.4 + +- FIXED: Fix a bug that caused the GEM to be published with the wrong version number in the gemspec (GH-121). + +- CHANGED: Updated definitions. + + +## Release 2.0.3 + +- CHANGED: Updated definitions. + + +## Release 2.0.2 + +- CHANGED: Updated definitions. + + +## Release 2.0.1 + +- FIXED: Fix bug that prevented .valid? to reset the default rule + + +## Release 2.0.0 + +- NEW: Added PublicSuffix.domain # => sld.tld +- NEW: Added the ability to disable the use of private domains either at runtime, in addition to the ability to not load the private domains section when reading the list (`private_domains: false`). This feature also superseded the `private_domains` class-level attribute, that is no longer available. + +- CHANGED: Considerable performance improvements (GH-92) +- CHANGED: Updated definitions. +- CHANGED: Removed deprecated PublicSuffix::InvalidDomain exception +- CHANGED: If the suffix is now listed, then the prevaling rule is "*" as defined by the PSL algorithm (GH-91) +- CHANGED: Input validation is performed only if you call `PublicSuffix.parse` or `PublicSuffix.list` +- CHANGED: Input with leading dot is invalid per PSL acceptance tests +- CHANGED: Removed `private_domains` class-level attribute. It is replaced by the `private_domains: false` option in the list parse method. +- CHANGED: The default list now assumes you use UTF-8 for reading the input (GH-94), + +- REMOVED: Removed futile utility helpers such as `Domain#rule`, `Domain#is_a_domain?`, `Domain#is_a_subdomain?`, `Domain#valid?`. You can easily obtain the same result by having a custom method that reconstructs the logic, and/or calling `PublicSuffix.{domain|parse}(domain.to_s)`. + + +## Release 1.5.3 + +- FIXED: Don't duplicate rule indices when creating index (GH-77). [Thanks @ags] + +- CHANGED: Updated definitions. + + +## Release 1.5.2 + +- CHANGED: Updated definitions. + + +## Release 1.5.1 + +- FIXED: Ignore case for parsing and validating (GH-62) + +- CHANGED: Updated definitions. + + +## Release 1.5.0 + +- CHANGED: Dropped support for Ruby < 2.0 + +- CHANGED: Updated definitions. + + +## Release 1.4.6 + +- CHANGED: Updated definitions. + + +## Release 1.4.5 + +- CHANGED: Updated definitions. + + +## Release 1.4.4 + +- CHANGED: Updated definitions. + + +## Release 1.4.3 + +- CHANGED: Updated definitions. + + +## Release 1.4.2 + +- CHANGED: Updated definitions. + + +## Release 1.4.1 + +- CHANGED: Updated definitions. + + +## Release 1.4.0 + +- CHANGED: Moved the definitions in the lib folder. + +- CHANGED: Updated definitions. + + +## Release 1.3.3 + +- CHANGED: Updated definitions. + + +## Release 1.3.2 + +- CHANGED: Updated definitions. + + +## Release 1.3.1 + +- CHANGED: Updated definitions. + + +## Release 1.3.0 + +- NEW: Ability to skip Private Domains (GH-28). [Thanks @rb2k] + +- CHANGED: Updated definitions. + + +## Release 1.2.1 + +- CHANGED: Updated definitions. + + +## Release 1.2.0 + +- NEW: Allow a custom List on `PublicSuffix.parse` (GH-26). [Thanks @itspriddle] + +- FIXED: PublicSuffix.parse and PublicSuffix.valid? crashes when input is nil (GH-20). + +- CHANGED: Updated definitions. + + +## Release 1.1.3 + +- CHANGED: Updated definitions. + + +## Release 1.1.2 + +- CHANGED: Updated definitions. + + +## Release 1.1.1 + +- CHANGED: Updated definitions. + + +## Release 1.1.0 + +- FIXED: #valid? and #parse consider URIs as valid domains (GH-15) + +- CHANGED: Updated definitions. + +- CHANGED: Removed deprecatd PublicSuffixService::RuleList. + + +## Release 1.0.0 + +- CHANGED: Updated definitions. + + +## Release 1.0.0.rc1 + +The library is now known as PublicSuffix. + + +## Release 0.9.1 + +- CHANGED: Renamed PublicSuffixService::RuleList to PublicSuffixService::List. + +- CHANGED: Renamed PublicSuffixService::List#list to PublicSuffixService::List#rules. + +- CHANGED: Renamed PublicSuffixService to PublicSuffix. + +- CHANGED: Updated definitions. + + +## Release 0.9.0 + +- CHANGED: Minimum Ruby version increased to Ruby 1.8.7. + +- CHANGED: rake/gempackagetask is deprecated. Use rubygems/package_task instead. + + +## Release 0.8.4 + +- FIXED: Reverted bugfix for issue #12 for Ruby 1.8.6. + This is the latest version compatible with Ruby 1.8.6. + + +## Release 0.8.3 + +- FIXED: Fixed ArgumentError: invalid byte sequence in US-ASCII with Ruby 1.9.2 (#12). + +- CHANGED: Updated definitions (#11). + +- CHANGED: Renamed definitions.txt to definitions.dat. + + +## Release 0.8.2 + +- NEW: Added support for rubygems-test. + +- CHANGED: Integrated Bundler. + +- CHANGED: Updated definitions. + + +## Release 0.8.1 + +- FIXED: The files in the release 0.8.0 have wrong permission 600 and can't be loaded (#10). + + +## Release 0.8.0 + +- CHANGED: Update public suffix list to d1a5599b49fa 2010-10-25 15:10 +0100 (#9) + +- NEW: Add support for Fully Qualified Domain Names (#7) + + +## Release 0.7.0 + +- CHANGED: Using YARD to document the code instead of RDoc. + +- FIXED: RuleList cache is not recreated when a new rule is appended to the list (#6) + +- FIXED: PublicSuffixService.valid? should return false if the domain is not defined or not allowed (#4, #5) + + +## Release 0.6.0 + +- NEW: PublicSuffixService.parse raises DomainNotAllowed when trying to parse a domain name + which exists, but is not allowed by the current definition list (#3) + + PublicSuffixService.parse("nic.do") + # => PublicSuffixService::DomainNotAllowed + +- CHANGED: Renamed PublicSuffixService::InvalidDomain to PublicSuffixService::DomainInvalid + + +## Release 0.5.2 + +- CHANGED: Update public suffix list to 248ea690d671 2010-09-16 18:02 +0100 + + +## Release 0.5.1 + +- CHANGED: Update public suffix list to 14dc66dd53c1 2010-09-15 17:09 +0100 + + +## Release 0.5.0 + +- CHANGED: Improve documentation for Domain#domain and Domain#subdomain (#1). + +- CHANGED: Performance improvements (#2). + + +## Release 0.4.0 + +- CHANGED: Rename library from DomainName to PublicSuffixService to reduce the probability of name conflicts. + + +## Release 0.3.1 + +- Deprecated DomainName library. + + +## Release 0.3.0 + +- CHANGED: DomainName#domain and DomainName#subdomain are no longer alias of Domain#sld and Domain#tld. + +- CHANGED: Removed DomainName#labels and decoupled Rule from DomainName. + +- CHANGED: DomainName#valid? no longer instantiates new DomainName objects. This means less overhead. + +- CHANGED: Refactoring the entire DomainName API. Removed the internal on-the-fly parsing. Added a bunch of new methods to check and validate the DomainName. + + +## Release 0.2.0 + +- NEW: DomainName#valid? + +- NEW: DomainName#parse and DomainName#parse! + +- NEW: DomainName#valid_domain? and DomainName#valid_subdomain? + +- CHANGED: Make sure RuleList lookup is only performed once. + + +## Release 0.1.0 + +- Initial version diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Gemfile b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Gemfile new file mode 100644 index 0000000..6ae6816 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Gemfile @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +source "https://rubygems.org" + +gemspec + +gem "rake" + +gem "codecov", require: false +gem "memory_profiler", require: false +gem "minitest" +gem "minitest-reporters" +gem "mocha" +gem "rubocop", "~>0.90", require: false +gem "yard" diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/LICENSE.txt b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/LICENSE.txt new file mode 100644 index 0000000..3bbd112 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2009-2020 Simone Carletti + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/README.md b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/README.md new file mode 100644 index 0000000..12ec7a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/README.md @@ -0,0 +1,207 @@ +# Public Suffix for Ruby + +PublicSuffix is a Ruby domain name parser based on the [Public Suffix List](https://publicsuffix.org/). + +[![Build Status](https://travis-ci.com/weppos/publicsuffix-ruby.svg?branch=master)](https://travis-ci.com/weppos/publicsuffix-ruby) +[![Tidelift dependencies](https://tidelift.com/badges/package/rubygems/public_suffix)](https://tidelift.com/subscription/pkg/rubygems-public-suffix?utm_source=rubygems-public-suffix&utm_medium=referral&utm_campaign=enterprise) + + +## Links + +- [Homepage](https://simonecarletti.com/code/publicsuffix-ruby) +- [Repository](https://github.com/weppos/publicsuffix-ruby) +- [API Documentation](https://rubydoc.info/gems/public_suffix) +- [Introducing the Public Suffix List library for Ruby](https://simonecarletti.com/blog/2010/06/public-suffix-list-library-for-ruby/) + + +## Requirements + +PublicSuffix requires **Ruby >= 2.3**. For an older versions of Ruby use a previous release. + + +## Installation + +You can install the gem manually: + +```shell +gem install public_suffix +``` + +Or use Bundler and define it as a dependency in your `Gemfile`: + +```ruby +gem 'public_suffix' +``` + +If you are upgrading to 2.0, see [2.0-Upgrade.md](2.0-Upgrade.md). + +## Usage + +Extract the domain out from a name: + +```ruby +PublicSuffix.domain("google.com") +# => "google.com" +PublicSuffix.domain("www.google.com") +# => "google.com" +PublicSuffix.domain("www.google.co.uk") +# => "google.co.uk" +``` + +Parse a domain without subdomains: + +```ruby +domain = PublicSuffix.parse("google.com") +# => # +domain.tld +# => "com" +domain.sld +# => "google" +domain.trd +# => nil +domain.domain +# => "google.com" +domain.subdomain +# => nil +``` + +Parse a domain with subdomains: + +```ruby +domain = PublicSuffix.parse("www.google.com") +# => # +domain.tld +# => "com" +domain.sld +# => "google" +domain.trd +# => "www" +domain.domain +# => "google.com" +domain.subdomain +# => "www.google.com" +``` + +Simple validation example: + +```ruby +PublicSuffix.valid?("google.com") +# => true + +PublicSuffix.valid?("www.google.com") +# => true + +# Explicitly forbidden, it is listed as a private domain +PublicSuffix.valid?("blogspot.com") +# => false + +# Unknown/not-listed TLD domains are valid by default +PublicSuffix.valid?("example.tldnotlisted") +# => true +``` + +Strict validation (without applying the default * rule): + +```ruby +PublicSuffix.valid?("example.tldnotlisted", default_rule: nil) +# => false +``` + + +## Fully Qualified Domain Names + +This library automatically recognizes Fully Qualified Domain Names. A FQDN is a domain name that end with a trailing dot. + +```ruby +# Parse a standard domain name +PublicSuffix.domain("www.google.com") +# => "google.com" + +# Parse a fully qualified domain name +PublicSuffix.domain("www.google.com.") +# => "google.com" +``` + +## Private domains + +This library has support for switching off support for private (non-ICANN). + +```ruby +# Extract a domain including private domains (by default) +PublicSuffix.domain("something.blogspot.com") +# => "something.blogspot.com" + +# Extract a domain excluding private domains +PublicSuffix.domain("something.blogspot.com", ignore_private: true) +# => "blogspot.com" + +# It also works for #parse and #valid? +PublicSuffix.parse("something.blogspot.com", ignore_private: true) +PublicSuffix.valid?("something.blogspot.com", ignore_private: true) +``` + +If you don't care about private domains at all, it's more efficient to exclude them when the list is parsed: + +```ruby +# Disable support for private TLDs +PublicSuffix::List.default = PublicSuffix::List.parse(File.read(PublicSuffix::List::DEFAULT_LIST_PATH), private_domains: false) +# => "blogspot.com" +PublicSuffix.domain("something.blogspot.com") +# => "blogspot.com" +``` + + +## What is the Public Suffix List? + +The [Public Suffix List](https://publicsuffix.org) is a cross-vendor initiative to provide an accurate list of domain name suffixes. + +The Public Suffix List is an initiative of the Mozilla Project, but is maintained as a community resource. It is available for use in any software, but was originally created to meet the needs of browser manufacturers. + +A "public suffix" is one under which Internet users can directly register names. Some examples of public suffixes are ".com", ".co.uk" and "pvt.k12.wy.us". The Public Suffix List is a list of all known public suffixes. + + +## Why the Public Suffix List is better than any available Regular Expression parser? + +Previously, browsers used an algorithm which basically only denied setting wide-ranging cookies for top-level domains with no dots (e.g. com or org). However, this did not work for top-level domains where only third-level registrations are allowed (e.g. co.uk). In these cases, websites could set a cookie for co.uk which will be passed onto every website registered under co.uk. + +Clearly, this was a security risk as it allowed websites other than the one setting the cookie to read it, and therefore potentially extract sensitive information. + +Since there is no algorithmic method of finding the highest level at which a domain may be registered for a particular top-level domain (the policies differ with each registry), the only method is to create a list of all top-level domains and the level at which domains can be registered. This is the aim of the effective TLD list. + +As well as being used to prevent cookies from being set where they shouldn't be, the list can also potentially be used for other applications where the registry controlled and privately controlled parts of a domain name need to be known, for example when grouping by top-level domains. + +Source: https://wiki.mozilla.org/Public_Suffix_List + +Not convinced yet? Check out [this real world example](https://stackoverflow.com/q/288810/123527). + + +## Does PublicSuffix make requests to Public Suffix List website? + +No. PublicSuffix comes with a bundled list. It does not make any HTTP requests to parse or validate a domain. + + +## Support + +Library documentation is auto-generated from the [README](https://github.com/weppos/publicsuffix-ruby/blob/master/README.md) and the source code, and it's available at https://rubydoc.info/gems/public_suffix. + +- The PublicSuffix bug tracker is here: https://github.com/weppos/publicsuffix-ruby/issues +- The PublicSuffix code repository is here: https://github.com/weppos/publicsuffix-ruby. Contributions are welcome! Please include tests and/or feature coverage for every patch, and create a topic branch for every separate change you make. + +[Consider subscribing to Tidelift which provides Enterprise support for this project](https://tidelift.com/subscription/pkg/rubygems-public-suffix?utm_source=rubygems-public-suffix&utm_medium=referral&utm_campaign=readme) as part of the Tidelift Subscription. Tidelift subscriptions also help the maintainers by funding the project, which in turn allows us to ship releases, bugfixes, and security updates more often. + + +## Security and Vulnerability Reporting + +Full information and description of our security policy please visit [`SECURITY.md`](SECURITY.md) + + +## Changelog + +See the [CHANGELOG.md](CHANGELOG.md) file for details. + + +## License + +Copyright (c) 2009-2020 Simone Carletti. This is Free Software distributed under the MIT license. + +The [Public Suffix List source](https://publicsuffix.org/list/) is subject to the terms of the Mozilla Public License, v. 2.0. diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Rakefile b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Rakefile new file mode 100644 index 0000000..502270a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/Rakefile @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "bundler/gem_tasks" + +# By default, run tests and linter. +task default: [:test, :rubocop] + + +require "rake/testtask" + +Rake::TestTask.new do |t| + t.libs = %w( lib test ) + t.pattern = "test/**/*_test.rb" + t.verbose = !ENV["VERBOSE"].nil? + t.warning = !ENV["WARNING"].nil? +end + +require "rubocop/rake_task" + +RuboCop::RakeTask.new + + +require "yard/rake/yardoc_task" + +YARD::Rake::YardocTask.new(:yardoc) do |y| + y.options = ["--output-dir", "yardoc"] +end + +CLOBBER.include "yardoc" + + +task :benchmarks do + Dir["benchmarks/bm_*.rb"].each do |file| + sh "ruby #{file}" + end +end +task default: [:benchmarks] if ENV["BENCHMARKS"] == "1" + + +desc "Downloads the Public Suffix List file from the repository and stores it locally." +task :"update-list" do + require "net/http" + + DEFINITION_URL = "https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat" + + File.open("data/list.txt", "w+") do |f| + response = Net::HTTP.get_response(URI.parse(DEFINITION_URL)) + response.body + f.write(response.body) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/SECURITY.md b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/SECURITY.md new file mode 100644 index 0000000..f329128 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/SECURITY.md @@ -0,0 +1,104 @@ +# Security Policy + +## Supported Versions + +Security updates are provided only for the current minor version. + +If you are using a previous minor version, we recommend to upgrade to the current minor version. +This project uses [semantic versioning](https://semver.org/), therefore you can upgrade to a more recent minor version without incurring into breaking changes. + +Exceptionally, we may support previous minor versions upon request if there are significant reasons preventing to immediately switch the latest minor version. + +Older major versions are no longer supported. + + +## Reporting a Vulnerability + +To make a report, please email weppos@weppos.net. + +Please consider encrypting your report with GPG using the key [0x420da82a989398df](https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x420da82a989398df). + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsBNBE/QiI0BCACtBbjJnJIzaLb4NfjaljzT/+dvodst+wyDRE8Vwc6ujwboZjr2 +0QwXScNzObPazyvkSZVh3g6PveneeSD0dSw2XDqKbbtLMg/Ss12yqXJfjavH/zjk +6Xq+nnbSnxBPzwFAAEaEFIc6H6BygJ7zHPP5WEY5QIMqifEAX//aBqHi4GXHJiHE +237Zqufdry23jBYjY7wGXAa11VsU9Iwqh6LPB9/hc1KtzjAuvvm5ufeT/iVjxGQX +te1OZZk6n8xSVYeLsn97PfgYs0yauhexwD9dG7FbRCB379JxPRn5akr391qXcVOG +ZA3yBXUSPOL6D1+TS1S0su5zbw2AEp4+z3SpABEBAAHNIlNpbW9uZSBDYXJsZXR0 +aSA8d2VwcG9zQGdtYWlsLmNvbT7CwHcEEwEKACEFAlXH0UQCGy8FCwkIBwMFFQoJ +CAsFFgIDAQACHgECF4AACgkQQg2oKpiTmN9BOQf/UHd+bmww71MkbS38KkowDu+0 +1VH35aL8sFcAMUSEA4I5oPWZoBtYYPGpALLxtrSNW+SCnmmeCQVfVmLedUVHwDZo +TS4qiynpqnz+Cnq4KRC8VMIyaFoiT5Vg6MLtau8hJtqZn1Wv68g0nXuprsCuf9vs +z7DDZ36z8em6OJQJQ/FQ4BGogzyYHa90cJnIM6BeLiRUUpFTl1tHLlw4JFUNi8sx +6VQ1/nhcr3OyskAix5TytRnJ8uIn22m25GGdTF2WQPNfkWJQVT4ZDbCxT20acRp0 +l3x1DAk3Eel8gOKzgOboB3bkI5/l1XZvNL0YWGZeyfp8I7ZqpXg/m4qLDkYU2cLA +egQTAQoAJAIbLwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAUCVf6KvAIZAQAKCRBC +DagqmJOY34ABB/9WbNAh0l07UN1ePfVm6Brg2Yt8k6Q2lIRUG2xAeQj/+Kx/4lAL +oY6F0jJ44kIDZqZdNA0QIqYzZRBV4iW+cQrsBUUyM+chiA3RuOsDG18sfvkyPvRy +ecOVubHCN+nK2GKy1oHQkCpVFIeetr0ugB/j+xNDKJ3Oa5dGBKF29ZH5Pxg7cqwH +cdkhBGMpPbBYq5pJtYGggqypELzFTG292StbtV837Eze+clWRTKtMBOHke/oKBCr +YYic2fmipGC9XUiqvMEMAKYq5WWWXIlcSVSnBDdxq41tXjKK4XMVgoOboZCcNFvh +0NxuDQATk1YruRZOS4SpBPXykEA1pK/zm3WmzSNTaW1vbmUgQ2FybGV0dGkgPHdl +cHBvc0B3ZXBwb3MubmV0PsLAeQQTAQIAIwUCT9CIjQIbLwcLCQgHAwIBBhUIAgkK +CwQWAgMBAh4BAheAAAoJEEINqCqYk5jfGWcH/Ax3EhAckGeCqNYE5BTx94bKB1LL +vUjeUoImMtGGFxQu2jNOAjtpuyjihm9uHBZ+dxaxHHrhE11f+0sDcwvW8qtKEzOs +GESr01VqTaVFS2JOEHhLphXseaLXJe32Osz0kHCZmrz1fCwv3b8QuWBifn8oVzcV +vrE7lGC6pGwaiUvMsvA++RUquTlNVlh8uRrqcQCU8Ne9lSoDWHlUJes5s4FoCh3R +oVBcKPsx3m/P9+GlEgTDqYP+WU3sfSfJYERH0r0NAYP96m2e7UQrqdgvMTVVDkPB +UB9efZzgkL7u9IAqmLU2klSGdEZnJ8t1AsjEyHXMztC7ICUhRFCeXHdTNhHCwHwE +EwEKACYCGy8HCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAUCVcfRaQIZAQAKCRBC +DagqmJOY31y1B/41I/SsWwDqJP/Y3LzzatGmIv/gy+LkJBBTr/NV0NYzKV2XJ1BG +ese2ZE4tKKdG4HDwF+IwFLBHcPZRv358IwwTRPnzeO23mxpTYAnRCdg/pcaYIJ9r +OxIOP+R52YbgGrNKcezVA+7TY9za072P7Bk85jTM2FNfqevaf/YQ4GRcGLQ3JI8N +tBUdvrOEETDpR0QFTr22Wv1C7UfPDsSf7ZUM7zJ38CmDji8JSlr6y75/LYSY50BB +8EHb03QxyePe98A3WzvOoqamiCIe9bRzH5IqRAtJYDX8cK4PZmp43bQhrjdjawCc +AU/OY9iz+zCw00+b6CNiRb59N+OwpNJh5iNNwsB5BBMBCgAjAhsvBwsJCAcDAgEG +FQgCCQoLBBYCAwECHgECF4AFAlX+iq0ACgkQQg2oKpiTmN/z2gf/VbcQHgTlXFYa +Sq/dE7S54uGFrdzHOV3IJyl+ByMwVoKn6zdpksRoyt7jPV3RonrUO7jEcrt7VKCU +2KC7/MZMDoUsn9BXXTtUk+uTCNh8qllR0Fo/FvWM9RJKmcDMKwAJwcKIgbfUBJGx +1N6pP2DUc+YCnEerRbnQ1DWJUM7BaOEN6bvPxuGblPst1l6S5VktFj3gZGYItHrs +pit5pesILP8K6B6VCNP2WXXYvYQo7yyYcG8WBWXin8/SdNwU68lUbfhhQVIKv6LU +h0wvgG97NsBPrFbij0K6O63FufnNr9WLMZhAzi0h6gNK2HKAyw9AZNKpPccwg+mX +Huc/4CPRlM0uU2ltb25lIENhcmxldHRpIDxzaW1vbmUuY2FybGV0dGlAZG5zaW1w +bGUuY29tPsLAdwQTAQoAIQUCVh4ipAIbLwULCQgHAwUVCgkICwUWAgMBAAIeAQIX +gAAKCRBCDagqmJOY329iCACpOY5SV7hwOZ8VqmRfxRoHQFQe9Owr+hD3eL0AKZaJ +V918dCPrrxbAmwwMAC8pS8J4CmrrTR27kxcUgVwcfyydFPrgST5pg+H7UTrBR045 +4Npw1+m99I2Pyyl3oaym4lKJFbp2c2DGODEzTg8kKfjk0cb8bd+MJrXqFyod1z5r +0pfexwaLVt1Hz+ZsmFIPO1ISHYBPV8OkpL8Kgb8WtY6REntgNjfcmtHNi0VWQ7+N +vgeYqdhscX8c9ROe26BiiiGXphRlAsCU/VLHOJkzoW3f9QLy4z01Xj/7OaD0JkHS +HrES1ye3ZDxnjnTRdh4U8ntJ+L+xnePcFQA2t0eCbPwIzSZTaW1vbmUgQ2FybGV0 +dGkgPHNpbW9uZUBjYXJsZXR0aS5uYW1lPsLAdwQTAQoAIQUCVf7gmwIbLwULCQgH +AwUVCgkICwUWAgMBAAIeAQIXgAAKCRBCDagqmJOY37L+B/45pWT3wgm43+kzHVOT +j63m4zmRb53TGZToRSxz3acyuVSuqU9Tv010F0ZV9ccb0NDeN+88s9tEisuoO0Rz +5vhC8AtwRUyR3ADE9pBtvvxT+4R9y8yYNTCIX45VPG9ZPp9+7i+XCdKtz30KIV7r +smktd2FrK16r/KUN8+03iZSgzQ9lsTmXK5L7zH/f3Tqhbfvybr4+M71KGnSoP+iP +vwfsoBb5rhijQLOykTb+VzdDpHQbupwxwm/3S4nsA4U6tonIywlJgBDSjgDjQj0i +Ez+Db2Wt59y6LoksRQogvJqm0nuxFUWMZc47zdhsRnqmxUYTNpKaJPWc6pfxsQPK +ZvTjzsBNBE/QiI0BCACsaNbG6kyKJBWL5jPhebsijk8PCfSHte1jNCA5l/NvaImZ +6ORq9f8S9MWlYxmzyUkVJaWrv+9p5zmjwcaegjerj6ggjPDEXlZG41Z4YE1/R8pf +wkSvrkLziBxZDB1aYplg8kgXkaIf2yi2FrMPSi04sjvQbBSCcIJeh6+vGK8tIJTn +e0tQbEvRorTwBAPAFlpx/bdk1wZYu11vFKbckhKWou7f8XSdn9ng9cY5uK+xBlFU +2ORgL1ygeIoY9uRvNZG2ncvCvxUPgOqbo31R8KPyvV4rNNvGBOfxQER9LbieBF2I +5I1gpyboGWKcXu1eV7tOpjtW6LHt+6NHhE6L1Lw1ABEBAAHCwX4EGAECAAkFAk/Q +iI0CGy4BKQkQQg2oKpiTmN/AXSAEGQECAAYFAk/QiI0ACgkQcBROh493BN9hdwf9 +GjiF1GcQN+3TZkXdr2WY0AlbcA/wBp6+ShnqcoU5XLuA0RY3+rWGuaSc2buLke6Y +2MhMAYcgmPdG+WTBoW5dWQGXBZ1IHYVR8HLGaF+Vate1MofE1BNHXhnilIMMfH4G +Tcr3Z3/FaSk9OdHlyiE/Jo7++8PQ+auHVyjtqry+/ysAnyr+lnCn+K4E0PQ1fYpP +fiawKtfSqk9h6HjjMyx9Adrz+ljXh+NyVqYZUfRytjgO+v+dAQmMczT1EawLTdX+ +trx1tHR549pEey7in5QKsje3GLH4zq4mCdWBlivQxmmmlvR07DysLADMbcpjKK2g +utfzygZHCU9hWGR3wbWZ7lXjB/0ZzutNaNYzSCkiC8PIWH1bG+TJO9pslHwP+aBJ +NGAmcwyOH9Bub2CSXikQFZNUmVRwtl7mN4bVAHI8zbMd6xdlX22yDgQei54dPXDw +UYsvGE4zmrD97he1EYcIOKMFHzlJNcWK+uR7lEq6mv7SFGnBr8qTYZRi1bySRgwd +UORuDV12GKTen9WectKtepW0fgYSz+udbDKQyyRef+7xGtCErWRL7f1qr8xm60da ++gSwyD/WkPTY8SP2mdq4u+6m4dWS26kKoENwuL7jUktl/C/EG7NmUKURbXG8lmeu +q59MIs/Fb3SgaO+zN2FZTYp6dyRJHbeEz55JdOu6F+6ihZYH +=j6Xr +-----END PGP PUBLIC KEY BLOCK----- +``` + + +## Tracking Security Updates + +Information about security vulnerabilities are published in the [Security Advisories](https://github.com/weppos/publicsuffix-ruby/security/advisories) page. diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/bin/console b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/bin/console new file mode 100755 index 0000000..c638bb4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "bundler/setup" +require "public_suffix" + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require "irb" +IRB.start diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/codecov.yml b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/codecov.yml new file mode 100644 index 0000000..b695937 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/codecov.yml @@ -0,0 +1,12 @@ +# https://docs.codecov.io/docs/coverage-configuration +coverage: + precision: 1 + round: down + status: + project: + default: false + patch: + default: false + +# https://docs.codecov.io/docs/pull-request-comments#section-requiring-changes +comment: off diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/data/list.txt b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/data/list.txt new file mode 100644 index 0000000..9ca00ee --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/data/list.txt @@ -0,0 +1,13380 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Please pull this list from, and only from https://publicsuffix.org/list/public_suffix_list.dat, +// rather than any other VCS sites. Pulling from any other URL is not guaranteed to be supported. + +// Instructions on pulling and using this list can be found at https://publicsuffix.org/list/. + +// ===BEGIN ICANN DOMAINS=== + +// ac : https://en.wikipedia.org/wiki/.ac +ac +com.ac +edu.ac +gov.ac +net.ac +mil.ac +org.ac + +// ad : https://en.wikipedia.org/wiki/.ad +ad +nom.ad + +// ae : https://en.wikipedia.org/wiki/.ae +// see also: "Domain Name Eligibility Policy" at http://www.aeda.ae/eng/aepolicy.php +ae +co.ae +net.ae +org.ae +sch.ae +ac.ae +gov.ae +mil.ae + +// aero : see https://www.information.aero/index.php?id=66 +aero +accident-investigation.aero +accident-prevention.aero +aerobatic.aero +aeroclub.aero +aerodrome.aero +agents.aero +aircraft.aero +airline.aero +airport.aero +air-surveillance.aero +airtraffic.aero +air-traffic-control.aero +ambulance.aero +amusement.aero +association.aero +author.aero +ballooning.aero +broker.aero +caa.aero +cargo.aero +catering.aero +certification.aero +championship.aero +charter.aero +civilaviation.aero +club.aero +conference.aero +consultant.aero +consulting.aero +control.aero +council.aero +crew.aero +design.aero +dgca.aero +educator.aero +emergency.aero +engine.aero +engineer.aero +entertainment.aero +equipment.aero +exchange.aero +express.aero +federation.aero +flight.aero +fuel.aero +gliding.aero +government.aero +groundhandling.aero +group.aero +hanggliding.aero +homebuilt.aero +insurance.aero +journal.aero +journalist.aero +leasing.aero +logistics.aero +magazine.aero +maintenance.aero +media.aero +microlight.aero +modelling.aero +navigation.aero +parachuting.aero +paragliding.aero +passenger-association.aero +pilot.aero +press.aero +production.aero +recreation.aero +repbody.aero +res.aero +research.aero +rotorcraft.aero +safety.aero +scientist.aero +services.aero +show.aero +skydiving.aero +software.aero +student.aero +trader.aero +trading.aero +trainer.aero +union.aero +workinggroup.aero +works.aero + +// af : http://www.nic.af/help.jsp +af +gov.af +com.af +org.af +net.af +edu.af + +// ag : http://www.nic.ag/prices.htm +ag +com.ag +org.ag +net.ag +co.ag +nom.ag + +// ai : http://nic.com.ai/ +ai +off.ai +com.ai +net.ai +org.ai + +// al : http://www.ert.gov.al/ert_alb/faq_det.html?Id=31 +al +com.al +edu.al +gov.al +mil.al +net.al +org.al + +// am : https://www.amnic.net/policy/en/Policy_EN.pdf +am +co.am +com.am +commune.am +net.am +org.am + +// ao : https://en.wikipedia.org/wiki/.ao +// http://www.dns.ao/REGISTR.DOC +ao +ed.ao +gv.ao +og.ao +co.ao +pb.ao +it.ao + +// aq : https://en.wikipedia.org/wiki/.aq +aq + +// ar : https://nic.ar/nic-argentina/normativa-vigente +ar +com.ar +edu.ar +gob.ar +gov.ar +int.ar +mil.ar +musica.ar +net.ar +org.ar +tur.ar + +// arpa : https://en.wikipedia.org/wiki/.arpa +// Confirmed by registry 2008-06-18 +arpa +e164.arpa +in-addr.arpa +ip6.arpa +iris.arpa +uri.arpa +urn.arpa + +// as : https://en.wikipedia.org/wiki/.as +as +gov.as + +// asia : https://en.wikipedia.org/wiki/.asia +asia + +// at : https://en.wikipedia.org/wiki/.at +// Confirmed by registry 2008-06-17 +at +ac.at +co.at +gv.at +or.at +sth.ac.at + +// au : https://en.wikipedia.org/wiki/.au +// http://www.auda.org.au/ +au +// 2LDs +com.au +net.au +org.au +edu.au +gov.au +asn.au +id.au +// Historic 2LDs (closed to new registration, but sites still exist) +info.au +conf.au +oz.au +// CGDNs - http://www.cgdn.org.au/ +act.au +nsw.au +nt.au +qld.au +sa.au +tas.au +vic.au +wa.au +// 3LDs +act.edu.au +catholic.edu.au +// eq.edu.au - Removed at the request of the Queensland Department of Education +nsw.edu.au +nt.edu.au +qld.edu.au +sa.edu.au +tas.edu.au +vic.edu.au +wa.edu.au +// act.gov.au Bug 984824 - Removed at request of Greg Tankard +// nsw.gov.au Bug 547985 - Removed at request of +// nt.gov.au Bug 940478 - Removed at request of Greg Connors +qld.gov.au +sa.gov.au +tas.gov.au +vic.gov.au +wa.gov.au +// 4LDs +// education.tas.edu.au - Removed at the request of the Department of Education Tasmania +schools.nsw.edu.au + +// aw : https://en.wikipedia.org/wiki/.aw +aw +com.aw + +// ax : https://en.wikipedia.org/wiki/.ax +ax + +// az : https://en.wikipedia.org/wiki/.az +az +com.az +net.az +int.az +gov.az +org.az +edu.az +info.az +pp.az +mil.az +name.az +pro.az +biz.az + +// ba : http://nic.ba/users_data/files/pravilnik_o_registraciji.pdf +ba +com.ba +edu.ba +gov.ba +mil.ba +net.ba +org.ba + +// bb : https://en.wikipedia.org/wiki/.bb +bb +biz.bb +co.bb +com.bb +edu.bb +gov.bb +info.bb +net.bb +org.bb +store.bb +tv.bb + +// bd : https://en.wikipedia.org/wiki/.bd +*.bd + +// be : https://en.wikipedia.org/wiki/.be +// Confirmed by registry 2008-06-08 +be +ac.be + +// bf : https://en.wikipedia.org/wiki/.bf +bf +gov.bf + +// bg : https://en.wikipedia.org/wiki/.bg +// https://www.register.bg/user/static/rules/en/index.html +bg +a.bg +b.bg +c.bg +d.bg +e.bg +f.bg +g.bg +h.bg +i.bg +j.bg +k.bg +l.bg +m.bg +n.bg +o.bg +p.bg +q.bg +r.bg +s.bg +t.bg +u.bg +v.bg +w.bg +x.bg +y.bg +z.bg +0.bg +1.bg +2.bg +3.bg +4.bg +5.bg +6.bg +7.bg +8.bg +9.bg + +// bh : https://en.wikipedia.org/wiki/.bh +bh +com.bh +edu.bh +net.bh +org.bh +gov.bh + +// bi : https://en.wikipedia.org/wiki/.bi +// http://whois.nic.bi/ +bi +co.bi +com.bi +edu.bi +or.bi +org.bi + +// biz : https://en.wikipedia.org/wiki/.biz +biz + +// bj : https://en.wikipedia.org/wiki/.bj +bj +asso.bj +barreau.bj +gouv.bj + +// bm : http://www.bermudanic.bm/dnr-text.txt +bm +com.bm +edu.bm +gov.bm +net.bm +org.bm + +// bn : http://www.bnnic.bn/faqs +bn +com.bn +edu.bn +gov.bn +net.bn +org.bn + +// bo : https://nic.bo/delegacion2015.php#h-1.10 +bo +com.bo +edu.bo +gob.bo +int.bo +org.bo +net.bo +mil.bo +tv.bo +web.bo +// Social Domains +academia.bo +agro.bo +arte.bo +blog.bo +bolivia.bo +ciencia.bo +cooperativa.bo +democracia.bo +deporte.bo +ecologia.bo +economia.bo +empresa.bo +indigena.bo +industria.bo +info.bo +medicina.bo +movimiento.bo +musica.bo +natural.bo +nombre.bo +noticias.bo +patria.bo +politica.bo +profesional.bo +plurinacional.bo +pueblo.bo +revista.bo +salud.bo +tecnologia.bo +tksat.bo +transporte.bo +wiki.bo + +// br : http://registro.br/dominio/categoria.html +// Submitted by registry +br +9guacu.br +abc.br +adm.br +adv.br +agr.br +aju.br +am.br +anani.br +aparecida.br +app.br +arq.br +art.br +ato.br +b.br +barueri.br +belem.br +bhz.br +bib.br +bio.br +blog.br +bmd.br +boavista.br +bsb.br +campinagrande.br +campinas.br +caxias.br +cim.br +cng.br +cnt.br +com.br +contagem.br +coop.br +coz.br +cri.br +cuiaba.br +curitiba.br +def.br +des.br +det.br +dev.br +ecn.br +eco.br +edu.br +emp.br +enf.br +eng.br +esp.br +etc.br +eti.br +far.br +feira.br +flog.br +floripa.br +fm.br +fnd.br +fortal.br +fot.br +foz.br +fst.br +g12.br +geo.br +ggf.br +goiania.br +gov.br +// gov.br 26 states + df https://en.wikipedia.org/wiki/States_of_Brazil +ac.gov.br +al.gov.br +am.gov.br +ap.gov.br +ba.gov.br +ce.gov.br +df.gov.br +es.gov.br +go.gov.br +ma.gov.br +mg.gov.br +ms.gov.br +mt.gov.br +pa.gov.br +pb.gov.br +pe.gov.br +pi.gov.br +pr.gov.br +rj.gov.br +rn.gov.br +ro.gov.br +rr.gov.br +rs.gov.br +sc.gov.br +se.gov.br +sp.gov.br +to.gov.br +gru.br +imb.br +ind.br +inf.br +jab.br +jampa.br +jdf.br +joinville.br +jor.br +jus.br +leg.br +lel.br +log.br +londrina.br +macapa.br +maceio.br +manaus.br +maringa.br +mat.br +med.br +mil.br +morena.br +mp.br +mus.br +natal.br +net.br +niteroi.br +*.nom.br +not.br +ntr.br +odo.br +ong.br +org.br +osasco.br +palmas.br +poa.br +ppg.br +pro.br +psc.br +psi.br +pvh.br +qsl.br +radio.br +rec.br +recife.br +rep.br +ribeirao.br +rio.br +riobranco.br +riopreto.br +salvador.br +sampa.br +santamaria.br +santoandre.br +saobernardo.br +saogonca.br +seg.br +sjc.br +slg.br +slz.br +sorocaba.br +srv.br +taxi.br +tc.br +tec.br +teo.br +the.br +tmp.br +trd.br +tur.br +tv.br +udi.br +vet.br +vix.br +vlog.br +wiki.br +zlg.br + +// bs : http://www.nic.bs/rules.html +bs +com.bs +net.bs +org.bs +edu.bs +gov.bs + +// bt : https://en.wikipedia.org/wiki/.bt +bt +com.bt +edu.bt +gov.bt +net.bt +org.bt + +// bv : No registrations at this time. +// Submitted by registry +bv + +// bw : https://en.wikipedia.org/wiki/.bw +// http://www.gobin.info/domainname/bw.doc +// list of other 2nd level tlds ? +bw +co.bw +org.bw + +// by : https://en.wikipedia.org/wiki/.by +// http://tld.by/rules_2006_en.html +// list of other 2nd level tlds ? +by +gov.by +mil.by +// Official information does not indicate that com.by is a reserved +// second-level domain, but it's being used as one (see www.google.com.by and +// www.yahoo.com.by, for example), so we list it here for safety's sake. +com.by + +// http://hoster.by/ +of.by + +// bz : https://en.wikipedia.org/wiki/.bz +// http://www.belizenic.bz/ +bz +com.bz +net.bz +org.bz +edu.bz +gov.bz + +// ca : https://en.wikipedia.org/wiki/.ca +ca +// ca geographical names +ab.ca +bc.ca +mb.ca +nb.ca +nf.ca +nl.ca +ns.ca +nt.ca +nu.ca +on.ca +pe.ca +qc.ca +sk.ca +yk.ca +// gc.ca: https://en.wikipedia.org/wiki/.gc.ca +// see also: http://registry.gc.ca/en/SubdomainFAQ +gc.ca + +// cat : https://en.wikipedia.org/wiki/.cat +cat + +// cc : https://en.wikipedia.org/wiki/.cc +cc + +// cd : https://en.wikipedia.org/wiki/.cd +// see also: https://www.nic.cd/domain/insertDomain_2.jsp?act=1 +cd +gov.cd + +// cf : https://en.wikipedia.org/wiki/.cf +cf + +// cg : https://en.wikipedia.org/wiki/.cg +cg + +// ch : https://en.wikipedia.org/wiki/.ch +ch + +// ci : https://en.wikipedia.org/wiki/.ci +// http://www.nic.ci/index.php?page=charte +ci +org.ci +or.ci +com.ci +co.ci +edu.ci +ed.ci +ac.ci +net.ci +go.ci +asso.ci +aéroport.ci +int.ci +presse.ci +md.ci +gouv.ci + +// ck : https://en.wikipedia.org/wiki/.ck +*.ck +!www.ck + +// cl : https://www.nic.cl +// Confirmed by .CL registry +cl +aprendemas.cl +co.cl +gob.cl +gov.cl +mil.cl + +// cm : https://en.wikipedia.org/wiki/.cm plus bug 981927 +cm +co.cm +com.cm +gov.cm +net.cm + +// cn : https://en.wikipedia.org/wiki/.cn +// Submitted by registry +cn +ac.cn +com.cn +edu.cn +gov.cn +net.cn +org.cn +mil.cn +公司.cn +网络.cn +網絡.cn +// cn geographic names +ah.cn +bj.cn +cq.cn +fj.cn +gd.cn +gs.cn +gz.cn +gx.cn +ha.cn +hb.cn +he.cn +hi.cn +hl.cn +hn.cn +jl.cn +js.cn +jx.cn +ln.cn +nm.cn +nx.cn +qh.cn +sc.cn +sd.cn +sh.cn +sn.cn +sx.cn +tj.cn +xj.cn +xz.cn +yn.cn +zj.cn +hk.cn +mo.cn +tw.cn + +// co : https://en.wikipedia.org/wiki/.co +// Submitted by registry +co +arts.co +com.co +edu.co +firm.co +gov.co +info.co +int.co +mil.co +net.co +nom.co +org.co +rec.co +web.co + +// com : https://en.wikipedia.org/wiki/.com +com + +// coop : https://en.wikipedia.org/wiki/.coop +coop + +// cr : http://www.nic.cr/niccr_publico/showRegistroDominiosScreen.do +cr +ac.cr +co.cr +ed.cr +fi.cr +go.cr +or.cr +sa.cr + +// cu : https://en.wikipedia.org/wiki/.cu +cu +com.cu +edu.cu +org.cu +net.cu +gov.cu +inf.cu + +// cv : https://en.wikipedia.org/wiki/.cv +cv + +// cw : http://www.una.cw/cw_registry/ +// Confirmed by registry 2013-03-26 +cw +com.cw +edu.cw +net.cw +org.cw + +// cx : https://en.wikipedia.org/wiki/.cx +// list of other 2nd level tlds ? +cx +gov.cx + +// cy : http://www.nic.cy/ +// Submitted by registry Panayiotou Fotia +cy +ac.cy +biz.cy +com.cy +ekloges.cy +gov.cy +ltd.cy +name.cy +net.cy +org.cy +parliament.cy +press.cy +pro.cy +tm.cy + +// cz : https://en.wikipedia.org/wiki/.cz +cz + +// de : https://en.wikipedia.org/wiki/.de +// Confirmed by registry (with technical +// reservations) 2008-07-01 +de + +// dj : https://en.wikipedia.org/wiki/.dj +dj + +// dk : https://en.wikipedia.org/wiki/.dk +// Confirmed by registry 2008-06-17 +dk + +// dm : https://en.wikipedia.org/wiki/.dm +dm +com.dm +net.dm +org.dm +edu.dm +gov.dm + +// do : https://en.wikipedia.org/wiki/.do +do +art.do +com.do +edu.do +gob.do +gov.do +mil.do +net.do +org.do +sld.do +web.do + +// dz : https://en.wikipedia.org/wiki/.dz +dz +com.dz +org.dz +net.dz +gov.dz +edu.dz +asso.dz +pol.dz +art.dz + +// ec : http://www.nic.ec/reg/paso1.asp +// Submitted by registry +ec +com.ec +info.ec +net.ec +fin.ec +k12.ec +med.ec +pro.ec +org.ec +edu.ec +gov.ec +gob.ec +mil.ec + +// edu : https://en.wikipedia.org/wiki/.edu +edu + +// ee : http://www.eenet.ee/EENet/dom_reeglid.html#lisa_B +ee +edu.ee +gov.ee +riik.ee +lib.ee +med.ee +com.ee +pri.ee +aip.ee +org.ee +fie.ee + +// eg : https://en.wikipedia.org/wiki/.eg +eg +com.eg +edu.eg +eun.eg +gov.eg +mil.eg +name.eg +net.eg +org.eg +sci.eg + +// er : https://en.wikipedia.org/wiki/.er +*.er + +// es : https://www.nic.es/site_ingles/ingles/dominios/index.html +es +com.es +nom.es +org.es +gob.es +edu.es + +// et : https://en.wikipedia.org/wiki/.et +et +com.et +gov.et +org.et +edu.et +biz.et +name.et +info.et +net.et + +// eu : https://en.wikipedia.org/wiki/.eu +eu + +// fi : https://en.wikipedia.org/wiki/.fi +fi +// aland.fi : https://en.wikipedia.org/wiki/.ax +// This domain is being phased out in favor of .ax. As there are still many +// domains under aland.fi, we still keep it on the list until aland.fi is +// completely removed. +// TODO: Check for updates (expected to be phased out around Q1/2009) +aland.fi + +// fj : http://domains.fj/ +// Submitted by registry 2020-02-11 +fj +ac.fj +biz.fj +com.fj +gov.fj +info.fj +mil.fj +name.fj +net.fj +org.fj +pro.fj + +// fk : https://en.wikipedia.org/wiki/.fk +*.fk + +// fm : https://en.wikipedia.org/wiki/.fm +com.fm +edu.fm +net.fm +org.fm +fm + +// fo : https://en.wikipedia.org/wiki/.fo +fo + +// fr : http://www.afnic.fr/ +// domaines descriptifs : https://www.afnic.fr/medias/documents/Cadre_legal/Afnic_Naming_Policy_12122016_VEN.pdf +fr +asso.fr +com.fr +gouv.fr +nom.fr +prd.fr +tm.fr +// domaines sectoriels : https://www.afnic.fr/en/products-and-services/the-fr-tld/sector-based-fr-domains-4.html +aeroport.fr +avocat.fr +avoues.fr +cci.fr +chambagri.fr +chirurgiens-dentistes.fr +experts-comptables.fr +geometre-expert.fr +greta.fr +huissier-justice.fr +medecin.fr +notaires.fr +pharmacien.fr +port.fr +veterinaire.fr + +// ga : https://en.wikipedia.org/wiki/.ga +ga + +// gb : This registry is effectively dormant +// Submitted by registry +gb + +// gd : https://en.wikipedia.org/wiki/.gd +edu.gd +gov.gd +gd + +// ge : http://www.nic.net.ge/policy_en.pdf +ge +com.ge +edu.ge +gov.ge +org.ge +mil.ge +net.ge +pvt.ge + +// gf : https://en.wikipedia.org/wiki/.gf +gf + +// gg : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +gg +co.gg +net.gg +org.gg + +// gh : https://en.wikipedia.org/wiki/.gh +// see also: http://www.nic.gh/reg_now.php +// Although domains directly at second level are not possible at the moment, +// they have been possible for some time and may come back. +gh +com.gh +edu.gh +gov.gh +org.gh +mil.gh + +// gi : http://www.nic.gi/rules.html +gi +com.gi +ltd.gi +gov.gi +mod.gi +edu.gi +org.gi + +// gl : https://en.wikipedia.org/wiki/.gl +// http://nic.gl +gl +co.gl +com.gl +edu.gl +net.gl +org.gl + +// gm : http://www.nic.gm/htmlpages%5Cgm-policy.htm +gm + +// gn : http://psg.com/dns/gn/gn.txt +// Submitted by registry +gn +ac.gn +com.gn +edu.gn +gov.gn +org.gn +net.gn + +// gov : https://en.wikipedia.org/wiki/.gov +gov + +// gp : http://www.nic.gp/index.php?lang=en +gp +com.gp +net.gp +mobi.gp +edu.gp +org.gp +asso.gp + +// gq : https://en.wikipedia.org/wiki/.gq +gq + +// gr : https://grweb.ics.forth.gr/english/1617-B-2005.html +// Submitted by registry +gr +com.gr +edu.gr +net.gr +org.gr +gov.gr + +// gs : https://en.wikipedia.org/wiki/.gs +gs + +// gt : http://www.gt/politicas_de_registro.html +gt +com.gt +edu.gt +gob.gt +ind.gt +mil.gt +net.gt +org.gt + +// gu : http://gadao.gov.gu/register.html +// University of Guam : https://www.uog.edu +// Submitted by uognoc@triton.uog.edu +gu +com.gu +edu.gu +gov.gu +guam.gu +info.gu +net.gu +org.gu +web.gu + +// gw : https://en.wikipedia.org/wiki/.gw +gw + +// gy : https://en.wikipedia.org/wiki/.gy +// http://registry.gy/ +gy +co.gy +com.gy +edu.gy +gov.gy +net.gy +org.gy + +// hk : https://www.hkirc.hk +// Submitted by registry +hk +com.hk +edu.hk +gov.hk +idv.hk +net.hk +org.hk +公司.hk +教育.hk +敎育.hk +政府.hk +個人.hk +个人.hk +箇人.hk +網络.hk +网络.hk +组織.hk +網絡.hk +网絡.hk +组织.hk +組織.hk +組织.hk + +// hm : https://en.wikipedia.org/wiki/.hm +hm + +// hn : http://www.nic.hn/politicas/ps02,,05.html +hn +com.hn +edu.hn +org.hn +net.hn +mil.hn +gob.hn + +// hr : http://www.dns.hr/documents/pdf/HRTLD-regulations.pdf +hr +iz.hr +from.hr +name.hr +com.hr + +// ht : http://www.nic.ht/info/charte.cfm +ht +com.ht +shop.ht +firm.ht +info.ht +adult.ht +net.ht +pro.ht +org.ht +med.ht +art.ht +coop.ht +pol.ht +asso.ht +edu.ht +rel.ht +gouv.ht +perso.ht + +// hu : http://www.domain.hu/domain/English/sld.html +// Confirmed by registry 2008-06-12 +hu +co.hu +info.hu +org.hu +priv.hu +sport.hu +tm.hu +2000.hu +agrar.hu +bolt.hu +casino.hu +city.hu +erotica.hu +erotika.hu +film.hu +forum.hu +games.hu +hotel.hu +ingatlan.hu +jogasz.hu +konyvelo.hu +lakas.hu +media.hu +news.hu +reklam.hu +sex.hu +shop.hu +suli.hu +szex.hu +tozsde.hu +utazas.hu +video.hu + +// id : https://pandi.id/en/domain/registration-requirements/ +id +ac.id +biz.id +co.id +desa.id +go.id +mil.id +my.id +net.id +or.id +ponpes.id +sch.id +web.id + +// ie : https://en.wikipedia.org/wiki/.ie +ie +gov.ie + +// il : http://www.isoc.org.il/domains/ +il +ac.il +co.il +gov.il +idf.il +k12.il +muni.il +net.il +org.il + +// im : https://www.nic.im/ +// Submitted by registry +im +ac.im +co.im +com.im +ltd.co.im +net.im +org.im +plc.co.im +tt.im +tv.im + +// in : https://en.wikipedia.org/wiki/.in +// see also: https://registry.in/Policies +// Please note, that nic.in is not an official eTLD, but used by most +// government institutions. +in +co.in +firm.in +net.in +org.in +gen.in +ind.in +nic.in +ac.in +edu.in +res.in +gov.in +mil.in + +// info : https://en.wikipedia.org/wiki/.info +info + +// int : https://en.wikipedia.org/wiki/.int +// Confirmed by registry 2008-06-18 +int +eu.int + +// io : http://www.nic.io/rules.html +// list of other 2nd level tlds ? +io +com.io + +// iq : http://www.cmc.iq/english/iq/iqregister1.htm +iq +gov.iq +edu.iq +mil.iq +com.iq +org.iq +net.iq + +// ir : http://www.nic.ir/Terms_and_Conditions_ir,_Appendix_1_Domain_Rules +// Also see http://www.nic.ir/Internationalized_Domain_Names +// Two .ir entries added at request of , 2010-04-16 +ir +ac.ir +co.ir +gov.ir +id.ir +net.ir +org.ir +sch.ir +// xn--mgba3a4f16a.ir (.ir, Persian YEH) +ایران.ir +// xn--mgba3a4fra.ir (.ir, Arabic YEH) +ايران.ir + +// is : http://www.isnic.is/domain/rules.php +// Confirmed by registry 2008-12-06 +is +net.is +com.is +edu.is +gov.is +org.is +int.is + +// it : https://en.wikipedia.org/wiki/.it +it +gov.it +edu.it +// Reserved geo-names (regions and provinces): +// https://www.nic.it/sites/default/files/archivio/docs/Regulation_assignation_v7.1.pdf +// Regions +abr.it +abruzzo.it +aosta-valley.it +aostavalley.it +bas.it +basilicata.it +cal.it +calabria.it +cam.it +campania.it +emilia-romagna.it +emiliaromagna.it +emr.it +friuli-v-giulia.it +friuli-ve-giulia.it +friuli-vegiulia.it +friuli-venezia-giulia.it +friuli-veneziagiulia.it +friuli-vgiulia.it +friuliv-giulia.it +friulive-giulia.it +friulivegiulia.it +friulivenezia-giulia.it +friuliveneziagiulia.it +friulivgiulia.it +fvg.it +laz.it +lazio.it +lig.it +liguria.it +lom.it +lombardia.it +lombardy.it +lucania.it +mar.it +marche.it +mol.it +molise.it +piedmont.it +piemonte.it +pmn.it +pug.it +puglia.it +sar.it +sardegna.it +sardinia.it +sic.it +sicilia.it +sicily.it +taa.it +tos.it +toscana.it +trentin-sud-tirol.it +trentin-süd-tirol.it +trentin-sudtirol.it +trentin-südtirol.it +trentin-sued-tirol.it +trentin-suedtirol.it +trentino-a-adige.it +trentino-aadige.it +trentino-alto-adige.it +trentino-altoadige.it +trentino-s-tirol.it +trentino-stirol.it +trentino-sud-tirol.it +trentino-süd-tirol.it +trentino-sudtirol.it +trentino-südtirol.it +trentino-sued-tirol.it +trentino-suedtirol.it +trentino.it +trentinoa-adige.it +trentinoaadige.it +trentinoalto-adige.it +trentinoaltoadige.it +trentinos-tirol.it +trentinostirol.it +trentinosud-tirol.it +trentinosüd-tirol.it +trentinosudtirol.it +trentinosüdtirol.it +trentinosued-tirol.it +trentinosuedtirol.it +trentinsud-tirol.it +trentinsüd-tirol.it +trentinsudtirol.it +trentinsüdtirol.it +trentinsued-tirol.it +trentinsuedtirol.it +tuscany.it +umb.it +umbria.it +val-d-aosta.it +val-daosta.it +vald-aosta.it +valdaosta.it +valle-aosta.it +valle-d-aosta.it +valle-daosta.it +valleaosta.it +valled-aosta.it +valledaosta.it +vallee-aoste.it +vallée-aoste.it +vallee-d-aoste.it +vallée-d-aoste.it +valleeaoste.it +valléeaoste.it +valleedaoste.it +valléedaoste.it +vao.it +vda.it +ven.it +veneto.it +// Provinces +ag.it +agrigento.it +al.it +alessandria.it +alto-adige.it +altoadige.it +an.it +ancona.it +andria-barletta-trani.it +andria-trani-barletta.it +andriabarlettatrani.it +andriatranibarletta.it +ao.it +aosta.it +aoste.it +ap.it +aq.it +aquila.it +ar.it +arezzo.it +ascoli-piceno.it +ascolipiceno.it +asti.it +at.it +av.it +avellino.it +ba.it +balsan-sudtirol.it +balsan-südtirol.it +balsan-suedtirol.it +balsan.it +bari.it +barletta-trani-andria.it +barlettatraniandria.it +belluno.it +benevento.it +bergamo.it +bg.it +bi.it +biella.it +bl.it +bn.it +bo.it +bologna.it +bolzano-altoadige.it +bolzano.it +bozen-sudtirol.it +bozen-südtirol.it +bozen-suedtirol.it +bozen.it +br.it +brescia.it +brindisi.it +bs.it +bt.it +bulsan-sudtirol.it +bulsan-südtirol.it +bulsan-suedtirol.it +bulsan.it +bz.it +ca.it +cagliari.it +caltanissetta.it +campidano-medio.it +campidanomedio.it +campobasso.it +carbonia-iglesias.it +carboniaiglesias.it +carrara-massa.it +carraramassa.it +caserta.it +catania.it +catanzaro.it +cb.it +ce.it +cesena-forli.it +cesena-forlì.it +cesenaforli.it +cesenaforlì.it +ch.it +chieti.it +ci.it +cl.it +cn.it +co.it +como.it +cosenza.it +cr.it +cremona.it +crotone.it +cs.it +ct.it +cuneo.it +cz.it +dell-ogliastra.it +dellogliastra.it +en.it +enna.it +fc.it +fe.it +fermo.it +ferrara.it +fg.it +fi.it +firenze.it +florence.it +fm.it +foggia.it +forli-cesena.it +forlì-cesena.it +forlicesena.it +forlìcesena.it +fr.it +frosinone.it +ge.it +genoa.it +genova.it +go.it +gorizia.it +gr.it +grosseto.it +iglesias-carbonia.it +iglesiascarbonia.it +im.it +imperia.it +is.it +isernia.it +kr.it +la-spezia.it +laquila.it +laspezia.it +latina.it +lc.it +le.it +lecce.it +lecco.it +li.it +livorno.it +lo.it +lodi.it +lt.it +lu.it +lucca.it +macerata.it +mantova.it +massa-carrara.it +massacarrara.it +matera.it +mb.it +mc.it +me.it +medio-campidano.it +mediocampidano.it +messina.it +mi.it +milan.it +milano.it +mn.it +mo.it +modena.it +monza-brianza.it +monza-e-della-brianza.it +monza.it +monzabrianza.it +monzaebrianza.it +monzaedellabrianza.it +ms.it +mt.it +na.it +naples.it +napoli.it +no.it +novara.it +nu.it +nuoro.it +og.it +ogliastra.it +olbia-tempio.it +olbiatempio.it +or.it +oristano.it +ot.it +pa.it +padova.it +padua.it +palermo.it +parma.it +pavia.it +pc.it +pd.it +pe.it +perugia.it +pesaro-urbino.it +pesarourbino.it +pescara.it +pg.it +pi.it +piacenza.it +pisa.it +pistoia.it +pn.it +po.it +pordenone.it +potenza.it +pr.it +prato.it +pt.it +pu.it +pv.it +pz.it +ra.it +ragusa.it +ravenna.it +rc.it +re.it +reggio-calabria.it +reggio-emilia.it +reggiocalabria.it +reggioemilia.it +rg.it +ri.it +rieti.it +rimini.it +rm.it +rn.it +ro.it +roma.it +rome.it +rovigo.it +sa.it +salerno.it +sassari.it +savona.it +si.it +siena.it +siracusa.it +so.it +sondrio.it +sp.it +sr.it +ss.it +suedtirol.it +südtirol.it +sv.it +ta.it +taranto.it +te.it +tempio-olbia.it +tempioolbia.it +teramo.it +terni.it +tn.it +to.it +torino.it +tp.it +tr.it +trani-andria-barletta.it +trani-barletta-andria.it +traniandriabarletta.it +tranibarlettaandria.it +trapani.it +trento.it +treviso.it +trieste.it +ts.it +turin.it +tv.it +ud.it +udine.it +urbino-pesaro.it +urbinopesaro.it +va.it +varese.it +vb.it +vc.it +ve.it +venezia.it +venice.it +verbania.it +vercelli.it +verona.it +vi.it +vibo-valentia.it +vibovalentia.it +vicenza.it +viterbo.it +vr.it +vs.it +vt.it +vv.it + +// je : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +je +co.je +net.je +org.je + +// jm : http://www.com.jm/register.html +*.jm + +// jo : http://www.dns.jo/Registration_policy.aspx +jo +com.jo +org.jo +net.jo +edu.jo +sch.jo +gov.jo +mil.jo +name.jo + +// jobs : https://en.wikipedia.org/wiki/.jobs +jobs + +// jp : https://en.wikipedia.org/wiki/.jp +// http://jprs.co.jp/en/jpdomain.html +// Submitted by registry +jp +// jp organizational type names +ac.jp +ad.jp +co.jp +ed.jp +go.jp +gr.jp +lg.jp +ne.jp +or.jp +// jp prefecture type names +aichi.jp +akita.jp +aomori.jp +chiba.jp +ehime.jp +fukui.jp +fukuoka.jp +fukushima.jp +gifu.jp +gunma.jp +hiroshima.jp +hokkaido.jp +hyogo.jp +ibaraki.jp +ishikawa.jp +iwate.jp +kagawa.jp +kagoshima.jp +kanagawa.jp +kochi.jp +kumamoto.jp +kyoto.jp +mie.jp +miyagi.jp +miyazaki.jp +nagano.jp +nagasaki.jp +nara.jp +niigata.jp +oita.jp +okayama.jp +okinawa.jp +osaka.jp +saga.jp +saitama.jp +shiga.jp +shimane.jp +shizuoka.jp +tochigi.jp +tokushima.jp +tokyo.jp +tottori.jp +toyama.jp +wakayama.jp +yamagata.jp +yamaguchi.jp +yamanashi.jp +栃木.jp +愛知.jp +愛媛.jp +兵庫.jp +熊本.jp +茨城.jp +北海道.jp +千葉.jp +和歌山.jp +長崎.jp +長野.jp +新潟.jp +青森.jp +静岡.jp +東京.jp +石川.jp +埼玉.jp +三重.jp +京都.jp +佐賀.jp +大分.jp +大阪.jp +奈良.jp +宮城.jp +宮崎.jp +富山.jp +山口.jp +山形.jp +山梨.jp +岩手.jp +岐阜.jp +岡山.jp +島根.jp +広島.jp +徳島.jp +沖縄.jp +滋賀.jp +神奈川.jp +福井.jp +福岡.jp +福島.jp +秋田.jp +群馬.jp +香川.jp +高知.jp +鳥取.jp +鹿児島.jp +// jp geographic type names +// http://jprs.jp/doc/rule/saisoku-1.html +*.kawasaki.jp +*.kitakyushu.jp +*.kobe.jp +*.nagoya.jp +*.sapporo.jp +*.sendai.jp +*.yokohama.jp +!city.kawasaki.jp +!city.kitakyushu.jp +!city.kobe.jp +!city.nagoya.jp +!city.sapporo.jp +!city.sendai.jp +!city.yokohama.jp +// 4th level registration +aisai.aichi.jp +ama.aichi.jp +anjo.aichi.jp +asuke.aichi.jp +chiryu.aichi.jp +chita.aichi.jp +fuso.aichi.jp +gamagori.aichi.jp +handa.aichi.jp +hazu.aichi.jp +hekinan.aichi.jp +higashiura.aichi.jp +ichinomiya.aichi.jp +inazawa.aichi.jp +inuyama.aichi.jp +isshiki.aichi.jp +iwakura.aichi.jp +kanie.aichi.jp +kariya.aichi.jp +kasugai.aichi.jp +kira.aichi.jp +kiyosu.aichi.jp +komaki.aichi.jp +konan.aichi.jp +kota.aichi.jp +mihama.aichi.jp +miyoshi.aichi.jp +nishio.aichi.jp +nisshin.aichi.jp +obu.aichi.jp +oguchi.aichi.jp +oharu.aichi.jp +okazaki.aichi.jp +owariasahi.aichi.jp +seto.aichi.jp +shikatsu.aichi.jp +shinshiro.aichi.jp +shitara.aichi.jp +tahara.aichi.jp +takahama.aichi.jp +tobishima.aichi.jp +toei.aichi.jp +togo.aichi.jp +tokai.aichi.jp +tokoname.aichi.jp +toyoake.aichi.jp +toyohashi.aichi.jp +toyokawa.aichi.jp +toyone.aichi.jp +toyota.aichi.jp +tsushima.aichi.jp +yatomi.aichi.jp +akita.akita.jp +daisen.akita.jp +fujisato.akita.jp +gojome.akita.jp +hachirogata.akita.jp +happou.akita.jp +higashinaruse.akita.jp +honjo.akita.jp +honjyo.akita.jp +ikawa.akita.jp +kamikoani.akita.jp +kamioka.akita.jp +katagami.akita.jp +kazuno.akita.jp +kitaakita.akita.jp +kosaka.akita.jp +kyowa.akita.jp +misato.akita.jp +mitane.akita.jp +moriyoshi.akita.jp +nikaho.akita.jp +noshiro.akita.jp +odate.akita.jp +oga.akita.jp +ogata.akita.jp +semboku.akita.jp +yokote.akita.jp +yurihonjo.akita.jp +aomori.aomori.jp +gonohe.aomori.jp +hachinohe.aomori.jp +hashikami.aomori.jp +hiranai.aomori.jp +hirosaki.aomori.jp +itayanagi.aomori.jp +kuroishi.aomori.jp +misawa.aomori.jp +mutsu.aomori.jp +nakadomari.aomori.jp +noheji.aomori.jp +oirase.aomori.jp +owani.aomori.jp +rokunohe.aomori.jp +sannohe.aomori.jp +shichinohe.aomori.jp +shingo.aomori.jp +takko.aomori.jp +towada.aomori.jp +tsugaru.aomori.jp +tsuruta.aomori.jp +abiko.chiba.jp +asahi.chiba.jp +chonan.chiba.jp +chosei.chiba.jp +choshi.chiba.jp +chuo.chiba.jp +funabashi.chiba.jp +futtsu.chiba.jp +hanamigawa.chiba.jp +ichihara.chiba.jp +ichikawa.chiba.jp +ichinomiya.chiba.jp +inzai.chiba.jp +isumi.chiba.jp +kamagaya.chiba.jp +kamogawa.chiba.jp +kashiwa.chiba.jp +katori.chiba.jp +katsuura.chiba.jp +kimitsu.chiba.jp +kisarazu.chiba.jp +kozaki.chiba.jp +kujukuri.chiba.jp +kyonan.chiba.jp +matsudo.chiba.jp +midori.chiba.jp +mihama.chiba.jp +minamiboso.chiba.jp +mobara.chiba.jp +mutsuzawa.chiba.jp +nagara.chiba.jp +nagareyama.chiba.jp +narashino.chiba.jp +narita.chiba.jp +noda.chiba.jp +oamishirasato.chiba.jp +omigawa.chiba.jp +onjuku.chiba.jp +otaki.chiba.jp +sakae.chiba.jp +sakura.chiba.jp +shimofusa.chiba.jp +shirako.chiba.jp +shiroi.chiba.jp +shisui.chiba.jp +sodegaura.chiba.jp +sosa.chiba.jp +tako.chiba.jp +tateyama.chiba.jp +togane.chiba.jp +tohnosho.chiba.jp +tomisato.chiba.jp +urayasu.chiba.jp +yachimata.chiba.jp +yachiyo.chiba.jp +yokaichiba.chiba.jp +yokoshibahikari.chiba.jp +yotsukaido.chiba.jp +ainan.ehime.jp +honai.ehime.jp +ikata.ehime.jp +imabari.ehime.jp +iyo.ehime.jp +kamijima.ehime.jp +kihoku.ehime.jp +kumakogen.ehime.jp +masaki.ehime.jp +matsuno.ehime.jp +matsuyama.ehime.jp +namikata.ehime.jp +niihama.ehime.jp +ozu.ehime.jp +saijo.ehime.jp +seiyo.ehime.jp +shikokuchuo.ehime.jp +tobe.ehime.jp +toon.ehime.jp +uchiko.ehime.jp +uwajima.ehime.jp +yawatahama.ehime.jp +echizen.fukui.jp +eiheiji.fukui.jp +fukui.fukui.jp +ikeda.fukui.jp +katsuyama.fukui.jp +mihama.fukui.jp +minamiechizen.fukui.jp +obama.fukui.jp +ohi.fukui.jp +ono.fukui.jp +sabae.fukui.jp +sakai.fukui.jp +takahama.fukui.jp +tsuruga.fukui.jp +wakasa.fukui.jp +ashiya.fukuoka.jp +buzen.fukuoka.jp +chikugo.fukuoka.jp +chikuho.fukuoka.jp +chikujo.fukuoka.jp +chikushino.fukuoka.jp +chikuzen.fukuoka.jp +chuo.fukuoka.jp +dazaifu.fukuoka.jp +fukuchi.fukuoka.jp +hakata.fukuoka.jp +higashi.fukuoka.jp +hirokawa.fukuoka.jp +hisayama.fukuoka.jp +iizuka.fukuoka.jp +inatsuki.fukuoka.jp +kaho.fukuoka.jp +kasuga.fukuoka.jp +kasuya.fukuoka.jp +kawara.fukuoka.jp +keisen.fukuoka.jp +koga.fukuoka.jp +kurate.fukuoka.jp +kurogi.fukuoka.jp +kurume.fukuoka.jp +minami.fukuoka.jp +miyako.fukuoka.jp +miyama.fukuoka.jp +miyawaka.fukuoka.jp +mizumaki.fukuoka.jp +munakata.fukuoka.jp +nakagawa.fukuoka.jp +nakama.fukuoka.jp +nishi.fukuoka.jp +nogata.fukuoka.jp +ogori.fukuoka.jp +okagaki.fukuoka.jp +okawa.fukuoka.jp +oki.fukuoka.jp +omuta.fukuoka.jp +onga.fukuoka.jp +onojo.fukuoka.jp +oto.fukuoka.jp +saigawa.fukuoka.jp +sasaguri.fukuoka.jp +shingu.fukuoka.jp +shinyoshitomi.fukuoka.jp +shonai.fukuoka.jp +soeda.fukuoka.jp +sue.fukuoka.jp +tachiarai.fukuoka.jp +tagawa.fukuoka.jp +takata.fukuoka.jp +toho.fukuoka.jp +toyotsu.fukuoka.jp +tsuiki.fukuoka.jp +ukiha.fukuoka.jp +umi.fukuoka.jp +usui.fukuoka.jp +yamada.fukuoka.jp +yame.fukuoka.jp +yanagawa.fukuoka.jp +yukuhashi.fukuoka.jp +aizubange.fukushima.jp +aizumisato.fukushima.jp +aizuwakamatsu.fukushima.jp +asakawa.fukushima.jp +bandai.fukushima.jp +date.fukushima.jp +fukushima.fukushima.jp +furudono.fukushima.jp +futaba.fukushima.jp +hanawa.fukushima.jp +higashi.fukushima.jp +hirata.fukushima.jp +hirono.fukushima.jp +iitate.fukushima.jp +inawashiro.fukushima.jp +ishikawa.fukushima.jp +iwaki.fukushima.jp +izumizaki.fukushima.jp +kagamiishi.fukushima.jp +kaneyama.fukushima.jp +kawamata.fukushima.jp +kitakata.fukushima.jp +kitashiobara.fukushima.jp +koori.fukushima.jp +koriyama.fukushima.jp +kunimi.fukushima.jp +miharu.fukushima.jp +mishima.fukushima.jp +namie.fukushima.jp +nango.fukushima.jp +nishiaizu.fukushima.jp +nishigo.fukushima.jp +okuma.fukushima.jp +omotego.fukushima.jp +ono.fukushima.jp +otama.fukushima.jp +samegawa.fukushima.jp +shimogo.fukushima.jp +shirakawa.fukushima.jp +showa.fukushima.jp +soma.fukushima.jp +sukagawa.fukushima.jp +taishin.fukushima.jp +tamakawa.fukushima.jp +tanagura.fukushima.jp +tenei.fukushima.jp +yabuki.fukushima.jp +yamato.fukushima.jp +yamatsuri.fukushima.jp +yanaizu.fukushima.jp +yugawa.fukushima.jp +anpachi.gifu.jp +ena.gifu.jp +gifu.gifu.jp +ginan.gifu.jp +godo.gifu.jp +gujo.gifu.jp +hashima.gifu.jp +hichiso.gifu.jp +hida.gifu.jp +higashishirakawa.gifu.jp +ibigawa.gifu.jp +ikeda.gifu.jp +kakamigahara.gifu.jp +kani.gifu.jp +kasahara.gifu.jp +kasamatsu.gifu.jp +kawaue.gifu.jp +kitagata.gifu.jp +mino.gifu.jp +minokamo.gifu.jp +mitake.gifu.jp +mizunami.gifu.jp +motosu.gifu.jp +nakatsugawa.gifu.jp +ogaki.gifu.jp +sakahogi.gifu.jp +seki.gifu.jp +sekigahara.gifu.jp +shirakawa.gifu.jp +tajimi.gifu.jp +takayama.gifu.jp +tarui.gifu.jp +toki.gifu.jp +tomika.gifu.jp +wanouchi.gifu.jp +yamagata.gifu.jp +yaotsu.gifu.jp +yoro.gifu.jp +annaka.gunma.jp +chiyoda.gunma.jp +fujioka.gunma.jp +higashiagatsuma.gunma.jp +isesaki.gunma.jp +itakura.gunma.jp +kanna.gunma.jp +kanra.gunma.jp +katashina.gunma.jp +kawaba.gunma.jp +kiryu.gunma.jp +kusatsu.gunma.jp +maebashi.gunma.jp +meiwa.gunma.jp +midori.gunma.jp +minakami.gunma.jp +naganohara.gunma.jp +nakanojo.gunma.jp +nanmoku.gunma.jp +numata.gunma.jp +oizumi.gunma.jp +ora.gunma.jp +ota.gunma.jp +shibukawa.gunma.jp +shimonita.gunma.jp +shinto.gunma.jp +showa.gunma.jp +takasaki.gunma.jp +takayama.gunma.jp +tamamura.gunma.jp +tatebayashi.gunma.jp +tomioka.gunma.jp +tsukiyono.gunma.jp +tsumagoi.gunma.jp +ueno.gunma.jp +yoshioka.gunma.jp +asaminami.hiroshima.jp +daiwa.hiroshima.jp +etajima.hiroshima.jp +fuchu.hiroshima.jp +fukuyama.hiroshima.jp +hatsukaichi.hiroshima.jp +higashihiroshima.hiroshima.jp +hongo.hiroshima.jp +jinsekikogen.hiroshima.jp +kaita.hiroshima.jp +kui.hiroshima.jp +kumano.hiroshima.jp +kure.hiroshima.jp +mihara.hiroshima.jp +miyoshi.hiroshima.jp +naka.hiroshima.jp +onomichi.hiroshima.jp +osakikamijima.hiroshima.jp +otake.hiroshima.jp +saka.hiroshima.jp +sera.hiroshima.jp +seranishi.hiroshima.jp +shinichi.hiroshima.jp +shobara.hiroshima.jp +takehara.hiroshima.jp +abashiri.hokkaido.jp +abira.hokkaido.jp +aibetsu.hokkaido.jp +akabira.hokkaido.jp +akkeshi.hokkaido.jp +asahikawa.hokkaido.jp +ashibetsu.hokkaido.jp +ashoro.hokkaido.jp +assabu.hokkaido.jp +atsuma.hokkaido.jp +bibai.hokkaido.jp +biei.hokkaido.jp +bifuka.hokkaido.jp +bihoro.hokkaido.jp +biratori.hokkaido.jp +chippubetsu.hokkaido.jp +chitose.hokkaido.jp +date.hokkaido.jp +ebetsu.hokkaido.jp +embetsu.hokkaido.jp +eniwa.hokkaido.jp +erimo.hokkaido.jp +esan.hokkaido.jp +esashi.hokkaido.jp +fukagawa.hokkaido.jp +fukushima.hokkaido.jp +furano.hokkaido.jp +furubira.hokkaido.jp +haboro.hokkaido.jp +hakodate.hokkaido.jp +hamatonbetsu.hokkaido.jp +hidaka.hokkaido.jp +higashikagura.hokkaido.jp +higashikawa.hokkaido.jp +hiroo.hokkaido.jp +hokuryu.hokkaido.jp +hokuto.hokkaido.jp +honbetsu.hokkaido.jp +horokanai.hokkaido.jp +horonobe.hokkaido.jp +ikeda.hokkaido.jp +imakane.hokkaido.jp +ishikari.hokkaido.jp +iwamizawa.hokkaido.jp +iwanai.hokkaido.jp +kamifurano.hokkaido.jp +kamikawa.hokkaido.jp +kamishihoro.hokkaido.jp +kamisunagawa.hokkaido.jp +kamoenai.hokkaido.jp +kayabe.hokkaido.jp +kembuchi.hokkaido.jp +kikonai.hokkaido.jp +kimobetsu.hokkaido.jp +kitahiroshima.hokkaido.jp +kitami.hokkaido.jp +kiyosato.hokkaido.jp +koshimizu.hokkaido.jp +kunneppu.hokkaido.jp +kuriyama.hokkaido.jp +kuromatsunai.hokkaido.jp +kushiro.hokkaido.jp +kutchan.hokkaido.jp +kyowa.hokkaido.jp +mashike.hokkaido.jp +matsumae.hokkaido.jp +mikasa.hokkaido.jp +minamifurano.hokkaido.jp +mombetsu.hokkaido.jp +moseushi.hokkaido.jp +mukawa.hokkaido.jp +muroran.hokkaido.jp +naie.hokkaido.jp +nakagawa.hokkaido.jp +nakasatsunai.hokkaido.jp +nakatombetsu.hokkaido.jp +nanae.hokkaido.jp +nanporo.hokkaido.jp +nayoro.hokkaido.jp +nemuro.hokkaido.jp +niikappu.hokkaido.jp +niki.hokkaido.jp +nishiokoppe.hokkaido.jp +noboribetsu.hokkaido.jp +numata.hokkaido.jp +obihiro.hokkaido.jp +obira.hokkaido.jp +oketo.hokkaido.jp +okoppe.hokkaido.jp +otaru.hokkaido.jp +otobe.hokkaido.jp +otofuke.hokkaido.jp +otoineppu.hokkaido.jp +oumu.hokkaido.jp +ozora.hokkaido.jp +pippu.hokkaido.jp +rankoshi.hokkaido.jp +rebun.hokkaido.jp +rikubetsu.hokkaido.jp +rishiri.hokkaido.jp +rishirifuji.hokkaido.jp +saroma.hokkaido.jp +sarufutsu.hokkaido.jp +shakotan.hokkaido.jp +shari.hokkaido.jp +shibecha.hokkaido.jp +shibetsu.hokkaido.jp +shikabe.hokkaido.jp +shikaoi.hokkaido.jp +shimamaki.hokkaido.jp +shimizu.hokkaido.jp +shimokawa.hokkaido.jp +shinshinotsu.hokkaido.jp +shintoku.hokkaido.jp +shiranuka.hokkaido.jp +shiraoi.hokkaido.jp +shiriuchi.hokkaido.jp +sobetsu.hokkaido.jp +sunagawa.hokkaido.jp +taiki.hokkaido.jp +takasu.hokkaido.jp +takikawa.hokkaido.jp +takinoue.hokkaido.jp +teshikaga.hokkaido.jp +tobetsu.hokkaido.jp +tohma.hokkaido.jp +tomakomai.hokkaido.jp +tomari.hokkaido.jp +toya.hokkaido.jp +toyako.hokkaido.jp +toyotomi.hokkaido.jp +toyoura.hokkaido.jp +tsubetsu.hokkaido.jp +tsukigata.hokkaido.jp +urakawa.hokkaido.jp +urausu.hokkaido.jp +uryu.hokkaido.jp +utashinai.hokkaido.jp +wakkanai.hokkaido.jp +wassamu.hokkaido.jp +yakumo.hokkaido.jp +yoichi.hokkaido.jp +aioi.hyogo.jp +akashi.hyogo.jp +ako.hyogo.jp +amagasaki.hyogo.jp +aogaki.hyogo.jp +asago.hyogo.jp +ashiya.hyogo.jp +awaji.hyogo.jp +fukusaki.hyogo.jp +goshiki.hyogo.jp +harima.hyogo.jp +himeji.hyogo.jp +ichikawa.hyogo.jp +inagawa.hyogo.jp +itami.hyogo.jp +kakogawa.hyogo.jp +kamigori.hyogo.jp +kamikawa.hyogo.jp +kasai.hyogo.jp +kasuga.hyogo.jp +kawanishi.hyogo.jp +miki.hyogo.jp +minamiawaji.hyogo.jp +nishinomiya.hyogo.jp +nishiwaki.hyogo.jp +ono.hyogo.jp +sanda.hyogo.jp +sannan.hyogo.jp +sasayama.hyogo.jp +sayo.hyogo.jp +shingu.hyogo.jp +shinonsen.hyogo.jp +shiso.hyogo.jp +sumoto.hyogo.jp +taishi.hyogo.jp +taka.hyogo.jp +takarazuka.hyogo.jp +takasago.hyogo.jp +takino.hyogo.jp +tamba.hyogo.jp +tatsuno.hyogo.jp +toyooka.hyogo.jp +yabu.hyogo.jp +yashiro.hyogo.jp +yoka.hyogo.jp +yokawa.hyogo.jp +ami.ibaraki.jp +asahi.ibaraki.jp +bando.ibaraki.jp +chikusei.ibaraki.jp +daigo.ibaraki.jp +fujishiro.ibaraki.jp +hitachi.ibaraki.jp +hitachinaka.ibaraki.jp +hitachiomiya.ibaraki.jp +hitachiota.ibaraki.jp +ibaraki.ibaraki.jp +ina.ibaraki.jp +inashiki.ibaraki.jp +itako.ibaraki.jp +iwama.ibaraki.jp +joso.ibaraki.jp +kamisu.ibaraki.jp +kasama.ibaraki.jp +kashima.ibaraki.jp +kasumigaura.ibaraki.jp +koga.ibaraki.jp +miho.ibaraki.jp +mito.ibaraki.jp +moriya.ibaraki.jp +naka.ibaraki.jp +namegata.ibaraki.jp +oarai.ibaraki.jp +ogawa.ibaraki.jp +omitama.ibaraki.jp +ryugasaki.ibaraki.jp +sakai.ibaraki.jp +sakuragawa.ibaraki.jp +shimodate.ibaraki.jp +shimotsuma.ibaraki.jp +shirosato.ibaraki.jp +sowa.ibaraki.jp +suifu.ibaraki.jp +takahagi.ibaraki.jp +tamatsukuri.ibaraki.jp +tokai.ibaraki.jp +tomobe.ibaraki.jp +tone.ibaraki.jp +toride.ibaraki.jp +tsuchiura.ibaraki.jp +tsukuba.ibaraki.jp +uchihara.ibaraki.jp +ushiku.ibaraki.jp +yachiyo.ibaraki.jp +yamagata.ibaraki.jp +yawara.ibaraki.jp +yuki.ibaraki.jp +anamizu.ishikawa.jp +hakui.ishikawa.jp +hakusan.ishikawa.jp +kaga.ishikawa.jp +kahoku.ishikawa.jp +kanazawa.ishikawa.jp +kawakita.ishikawa.jp +komatsu.ishikawa.jp +nakanoto.ishikawa.jp +nanao.ishikawa.jp +nomi.ishikawa.jp +nonoichi.ishikawa.jp +noto.ishikawa.jp +shika.ishikawa.jp +suzu.ishikawa.jp +tsubata.ishikawa.jp +tsurugi.ishikawa.jp +uchinada.ishikawa.jp +wajima.ishikawa.jp +fudai.iwate.jp +fujisawa.iwate.jp +hanamaki.iwate.jp +hiraizumi.iwate.jp +hirono.iwate.jp +ichinohe.iwate.jp +ichinoseki.iwate.jp +iwaizumi.iwate.jp +iwate.iwate.jp +joboji.iwate.jp +kamaishi.iwate.jp +kanegasaki.iwate.jp +karumai.iwate.jp +kawai.iwate.jp +kitakami.iwate.jp +kuji.iwate.jp +kunohe.iwate.jp +kuzumaki.iwate.jp +miyako.iwate.jp +mizusawa.iwate.jp +morioka.iwate.jp +ninohe.iwate.jp +noda.iwate.jp +ofunato.iwate.jp +oshu.iwate.jp +otsuchi.iwate.jp +rikuzentakata.iwate.jp +shiwa.iwate.jp +shizukuishi.iwate.jp +sumita.iwate.jp +tanohata.iwate.jp +tono.iwate.jp +yahaba.iwate.jp +yamada.iwate.jp +ayagawa.kagawa.jp +higashikagawa.kagawa.jp +kanonji.kagawa.jp +kotohira.kagawa.jp +manno.kagawa.jp +marugame.kagawa.jp +mitoyo.kagawa.jp +naoshima.kagawa.jp +sanuki.kagawa.jp +tadotsu.kagawa.jp +takamatsu.kagawa.jp +tonosho.kagawa.jp +uchinomi.kagawa.jp +utazu.kagawa.jp +zentsuji.kagawa.jp +akune.kagoshima.jp +amami.kagoshima.jp +hioki.kagoshima.jp +isa.kagoshima.jp +isen.kagoshima.jp +izumi.kagoshima.jp +kagoshima.kagoshima.jp +kanoya.kagoshima.jp +kawanabe.kagoshima.jp +kinko.kagoshima.jp +kouyama.kagoshima.jp +makurazaki.kagoshima.jp +matsumoto.kagoshima.jp +minamitane.kagoshima.jp +nakatane.kagoshima.jp +nishinoomote.kagoshima.jp +satsumasendai.kagoshima.jp +soo.kagoshima.jp +tarumizu.kagoshima.jp +yusui.kagoshima.jp +aikawa.kanagawa.jp +atsugi.kanagawa.jp +ayase.kanagawa.jp +chigasaki.kanagawa.jp +ebina.kanagawa.jp +fujisawa.kanagawa.jp +hadano.kanagawa.jp +hakone.kanagawa.jp +hiratsuka.kanagawa.jp +isehara.kanagawa.jp +kaisei.kanagawa.jp +kamakura.kanagawa.jp +kiyokawa.kanagawa.jp +matsuda.kanagawa.jp +minamiashigara.kanagawa.jp +miura.kanagawa.jp +nakai.kanagawa.jp +ninomiya.kanagawa.jp +odawara.kanagawa.jp +oi.kanagawa.jp +oiso.kanagawa.jp +sagamihara.kanagawa.jp +samukawa.kanagawa.jp +tsukui.kanagawa.jp +yamakita.kanagawa.jp +yamato.kanagawa.jp +yokosuka.kanagawa.jp +yugawara.kanagawa.jp +zama.kanagawa.jp +zushi.kanagawa.jp +aki.kochi.jp +geisei.kochi.jp +hidaka.kochi.jp +higashitsuno.kochi.jp +ino.kochi.jp +kagami.kochi.jp +kami.kochi.jp +kitagawa.kochi.jp +kochi.kochi.jp +mihara.kochi.jp +motoyama.kochi.jp +muroto.kochi.jp +nahari.kochi.jp +nakamura.kochi.jp +nankoku.kochi.jp +nishitosa.kochi.jp +niyodogawa.kochi.jp +ochi.kochi.jp +okawa.kochi.jp +otoyo.kochi.jp +otsuki.kochi.jp +sakawa.kochi.jp +sukumo.kochi.jp +susaki.kochi.jp +tosa.kochi.jp +tosashimizu.kochi.jp +toyo.kochi.jp +tsuno.kochi.jp +umaji.kochi.jp +yasuda.kochi.jp +yusuhara.kochi.jp +amakusa.kumamoto.jp +arao.kumamoto.jp +aso.kumamoto.jp +choyo.kumamoto.jp +gyokuto.kumamoto.jp +kamiamakusa.kumamoto.jp +kikuchi.kumamoto.jp +kumamoto.kumamoto.jp +mashiki.kumamoto.jp +mifune.kumamoto.jp +minamata.kumamoto.jp +minamioguni.kumamoto.jp +nagasu.kumamoto.jp +nishihara.kumamoto.jp +oguni.kumamoto.jp +ozu.kumamoto.jp +sumoto.kumamoto.jp +takamori.kumamoto.jp +uki.kumamoto.jp +uto.kumamoto.jp +yamaga.kumamoto.jp +yamato.kumamoto.jp +yatsushiro.kumamoto.jp +ayabe.kyoto.jp +fukuchiyama.kyoto.jp +higashiyama.kyoto.jp +ide.kyoto.jp +ine.kyoto.jp +joyo.kyoto.jp +kameoka.kyoto.jp +kamo.kyoto.jp +kita.kyoto.jp +kizu.kyoto.jp +kumiyama.kyoto.jp +kyotamba.kyoto.jp +kyotanabe.kyoto.jp +kyotango.kyoto.jp +maizuru.kyoto.jp +minami.kyoto.jp +minamiyamashiro.kyoto.jp +miyazu.kyoto.jp +muko.kyoto.jp +nagaokakyo.kyoto.jp +nakagyo.kyoto.jp +nantan.kyoto.jp +oyamazaki.kyoto.jp +sakyo.kyoto.jp +seika.kyoto.jp +tanabe.kyoto.jp +uji.kyoto.jp +ujitawara.kyoto.jp +wazuka.kyoto.jp +yamashina.kyoto.jp +yawata.kyoto.jp +asahi.mie.jp +inabe.mie.jp +ise.mie.jp +kameyama.mie.jp +kawagoe.mie.jp +kiho.mie.jp +kisosaki.mie.jp +kiwa.mie.jp +komono.mie.jp +kumano.mie.jp +kuwana.mie.jp +matsusaka.mie.jp +meiwa.mie.jp +mihama.mie.jp +minamiise.mie.jp +misugi.mie.jp +miyama.mie.jp +nabari.mie.jp +shima.mie.jp +suzuka.mie.jp +tado.mie.jp +taiki.mie.jp +taki.mie.jp +tamaki.mie.jp +toba.mie.jp +tsu.mie.jp +udono.mie.jp +ureshino.mie.jp +watarai.mie.jp +yokkaichi.mie.jp +furukawa.miyagi.jp +higashimatsushima.miyagi.jp +ishinomaki.miyagi.jp +iwanuma.miyagi.jp +kakuda.miyagi.jp +kami.miyagi.jp +kawasaki.miyagi.jp +marumori.miyagi.jp +matsushima.miyagi.jp +minamisanriku.miyagi.jp +misato.miyagi.jp +murata.miyagi.jp +natori.miyagi.jp +ogawara.miyagi.jp +ohira.miyagi.jp +onagawa.miyagi.jp +osaki.miyagi.jp +rifu.miyagi.jp +semine.miyagi.jp +shibata.miyagi.jp +shichikashuku.miyagi.jp +shikama.miyagi.jp +shiogama.miyagi.jp +shiroishi.miyagi.jp +tagajo.miyagi.jp +taiwa.miyagi.jp +tome.miyagi.jp +tomiya.miyagi.jp +wakuya.miyagi.jp +watari.miyagi.jp +yamamoto.miyagi.jp +zao.miyagi.jp +aya.miyazaki.jp +ebino.miyazaki.jp +gokase.miyazaki.jp +hyuga.miyazaki.jp +kadogawa.miyazaki.jp +kawaminami.miyazaki.jp +kijo.miyazaki.jp +kitagawa.miyazaki.jp +kitakata.miyazaki.jp +kitaura.miyazaki.jp +kobayashi.miyazaki.jp +kunitomi.miyazaki.jp +kushima.miyazaki.jp +mimata.miyazaki.jp +miyakonojo.miyazaki.jp +miyazaki.miyazaki.jp +morotsuka.miyazaki.jp +nichinan.miyazaki.jp +nishimera.miyazaki.jp +nobeoka.miyazaki.jp +saito.miyazaki.jp +shiiba.miyazaki.jp +shintomi.miyazaki.jp +takaharu.miyazaki.jp +takanabe.miyazaki.jp +takazaki.miyazaki.jp +tsuno.miyazaki.jp +achi.nagano.jp +agematsu.nagano.jp +anan.nagano.jp +aoki.nagano.jp +asahi.nagano.jp +azumino.nagano.jp +chikuhoku.nagano.jp +chikuma.nagano.jp +chino.nagano.jp +fujimi.nagano.jp +hakuba.nagano.jp +hara.nagano.jp +hiraya.nagano.jp +iida.nagano.jp +iijima.nagano.jp +iiyama.nagano.jp +iizuna.nagano.jp +ikeda.nagano.jp +ikusaka.nagano.jp +ina.nagano.jp +karuizawa.nagano.jp +kawakami.nagano.jp +kiso.nagano.jp +kisofukushima.nagano.jp +kitaaiki.nagano.jp +komagane.nagano.jp +komoro.nagano.jp +matsukawa.nagano.jp +matsumoto.nagano.jp +miasa.nagano.jp +minamiaiki.nagano.jp +minamimaki.nagano.jp +minamiminowa.nagano.jp +minowa.nagano.jp +miyada.nagano.jp +miyota.nagano.jp +mochizuki.nagano.jp +nagano.nagano.jp +nagawa.nagano.jp +nagiso.nagano.jp +nakagawa.nagano.jp +nakano.nagano.jp +nozawaonsen.nagano.jp +obuse.nagano.jp +ogawa.nagano.jp +okaya.nagano.jp +omachi.nagano.jp +omi.nagano.jp +ookuwa.nagano.jp +ooshika.nagano.jp +otaki.nagano.jp +otari.nagano.jp +sakae.nagano.jp +sakaki.nagano.jp +saku.nagano.jp +sakuho.nagano.jp +shimosuwa.nagano.jp +shinanomachi.nagano.jp +shiojiri.nagano.jp +suwa.nagano.jp +suzaka.nagano.jp +takagi.nagano.jp +takamori.nagano.jp +takayama.nagano.jp +tateshina.nagano.jp +tatsuno.nagano.jp +togakushi.nagano.jp +togura.nagano.jp +tomi.nagano.jp +ueda.nagano.jp +wada.nagano.jp +yamagata.nagano.jp +yamanouchi.nagano.jp +yasaka.nagano.jp +yasuoka.nagano.jp +chijiwa.nagasaki.jp +futsu.nagasaki.jp +goto.nagasaki.jp +hasami.nagasaki.jp +hirado.nagasaki.jp +iki.nagasaki.jp +isahaya.nagasaki.jp +kawatana.nagasaki.jp +kuchinotsu.nagasaki.jp +matsuura.nagasaki.jp +nagasaki.nagasaki.jp +obama.nagasaki.jp +omura.nagasaki.jp +oseto.nagasaki.jp +saikai.nagasaki.jp +sasebo.nagasaki.jp +seihi.nagasaki.jp +shimabara.nagasaki.jp +shinkamigoto.nagasaki.jp +togitsu.nagasaki.jp +tsushima.nagasaki.jp +unzen.nagasaki.jp +ando.nara.jp +gose.nara.jp +heguri.nara.jp +higashiyoshino.nara.jp +ikaruga.nara.jp +ikoma.nara.jp +kamikitayama.nara.jp +kanmaki.nara.jp +kashiba.nara.jp +kashihara.nara.jp +katsuragi.nara.jp +kawai.nara.jp +kawakami.nara.jp +kawanishi.nara.jp +koryo.nara.jp +kurotaki.nara.jp +mitsue.nara.jp +miyake.nara.jp +nara.nara.jp +nosegawa.nara.jp +oji.nara.jp +ouda.nara.jp +oyodo.nara.jp +sakurai.nara.jp +sango.nara.jp +shimoichi.nara.jp +shimokitayama.nara.jp +shinjo.nara.jp +soni.nara.jp +takatori.nara.jp +tawaramoto.nara.jp +tenkawa.nara.jp +tenri.nara.jp +uda.nara.jp +yamatokoriyama.nara.jp +yamatotakada.nara.jp +yamazoe.nara.jp +yoshino.nara.jp +aga.niigata.jp +agano.niigata.jp +gosen.niigata.jp +itoigawa.niigata.jp +izumozaki.niigata.jp +joetsu.niigata.jp +kamo.niigata.jp +kariwa.niigata.jp +kashiwazaki.niigata.jp +minamiuonuma.niigata.jp +mitsuke.niigata.jp +muika.niigata.jp +murakami.niigata.jp +myoko.niigata.jp +nagaoka.niigata.jp +niigata.niigata.jp +ojiya.niigata.jp +omi.niigata.jp +sado.niigata.jp +sanjo.niigata.jp +seiro.niigata.jp +seirou.niigata.jp +sekikawa.niigata.jp +shibata.niigata.jp +tagami.niigata.jp +tainai.niigata.jp +tochio.niigata.jp +tokamachi.niigata.jp +tsubame.niigata.jp +tsunan.niigata.jp +uonuma.niigata.jp +yahiko.niigata.jp +yoita.niigata.jp +yuzawa.niigata.jp +beppu.oita.jp +bungoono.oita.jp +bungotakada.oita.jp +hasama.oita.jp +hiji.oita.jp +himeshima.oita.jp +hita.oita.jp +kamitsue.oita.jp +kokonoe.oita.jp +kuju.oita.jp +kunisaki.oita.jp +kusu.oita.jp +oita.oita.jp +saiki.oita.jp +taketa.oita.jp +tsukumi.oita.jp +usa.oita.jp +usuki.oita.jp +yufu.oita.jp +akaiwa.okayama.jp +asakuchi.okayama.jp +bizen.okayama.jp +hayashima.okayama.jp +ibara.okayama.jp +kagamino.okayama.jp +kasaoka.okayama.jp +kibichuo.okayama.jp +kumenan.okayama.jp +kurashiki.okayama.jp +maniwa.okayama.jp +misaki.okayama.jp +nagi.okayama.jp +niimi.okayama.jp +nishiawakura.okayama.jp +okayama.okayama.jp +satosho.okayama.jp +setouchi.okayama.jp +shinjo.okayama.jp +shoo.okayama.jp +soja.okayama.jp +takahashi.okayama.jp +tamano.okayama.jp +tsuyama.okayama.jp +wake.okayama.jp +yakage.okayama.jp +aguni.okinawa.jp +ginowan.okinawa.jp +ginoza.okinawa.jp +gushikami.okinawa.jp +haebaru.okinawa.jp +higashi.okinawa.jp +hirara.okinawa.jp +iheya.okinawa.jp +ishigaki.okinawa.jp +ishikawa.okinawa.jp +itoman.okinawa.jp +izena.okinawa.jp +kadena.okinawa.jp +kin.okinawa.jp +kitadaito.okinawa.jp +kitanakagusuku.okinawa.jp +kumejima.okinawa.jp +kunigami.okinawa.jp +minamidaito.okinawa.jp +motobu.okinawa.jp +nago.okinawa.jp +naha.okinawa.jp +nakagusuku.okinawa.jp +nakijin.okinawa.jp +nanjo.okinawa.jp +nishihara.okinawa.jp +ogimi.okinawa.jp +okinawa.okinawa.jp +onna.okinawa.jp +shimoji.okinawa.jp +taketomi.okinawa.jp +tarama.okinawa.jp +tokashiki.okinawa.jp +tomigusuku.okinawa.jp +tonaki.okinawa.jp +urasoe.okinawa.jp +uruma.okinawa.jp +yaese.okinawa.jp +yomitan.okinawa.jp +yonabaru.okinawa.jp +yonaguni.okinawa.jp +zamami.okinawa.jp +abeno.osaka.jp +chihayaakasaka.osaka.jp +chuo.osaka.jp +daito.osaka.jp +fujiidera.osaka.jp +habikino.osaka.jp +hannan.osaka.jp +higashiosaka.osaka.jp +higashisumiyoshi.osaka.jp +higashiyodogawa.osaka.jp +hirakata.osaka.jp +ibaraki.osaka.jp +ikeda.osaka.jp +izumi.osaka.jp +izumiotsu.osaka.jp +izumisano.osaka.jp +kadoma.osaka.jp +kaizuka.osaka.jp +kanan.osaka.jp +kashiwara.osaka.jp +katano.osaka.jp +kawachinagano.osaka.jp +kishiwada.osaka.jp +kita.osaka.jp +kumatori.osaka.jp +matsubara.osaka.jp +minato.osaka.jp +minoh.osaka.jp +misaki.osaka.jp +moriguchi.osaka.jp +neyagawa.osaka.jp +nishi.osaka.jp +nose.osaka.jp +osakasayama.osaka.jp +sakai.osaka.jp +sayama.osaka.jp +sennan.osaka.jp +settsu.osaka.jp +shijonawate.osaka.jp +shimamoto.osaka.jp +suita.osaka.jp +tadaoka.osaka.jp +taishi.osaka.jp +tajiri.osaka.jp +takaishi.osaka.jp +takatsuki.osaka.jp +tondabayashi.osaka.jp +toyonaka.osaka.jp +toyono.osaka.jp +yao.osaka.jp +ariake.saga.jp +arita.saga.jp +fukudomi.saga.jp +genkai.saga.jp +hamatama.saga.jp +hizen.saga.jp +imari.saga.jp +kamimine.saga.jp +kanzaki.saga.jp +karatsu.saga.jp +kashima.saga.jp +kitagata.saga.jp +kitahata.saga.jp +kiyama.saga.jp +kouhoku.saga.jp +kyuragi.saga.jp +nishiarita.saga.jp +ogi.saga.jp +omachi.saga.jp +ouchi.saga.jp +saga.saga.jp +shiroishi.saga.jp +taku.saga.jp +tara.saga.jp +tosu.saga.jp +yoshinogari.saga.jp +arakawa.saitama.jp +asaka.saitama.jp +chichibu.saitama.jp +fujimi.saitama.jp +fujimino.saitama.jp +fukaya.saitama.jp +hanno.saitama.jp +hanyu.saitama.jp +hasuda.saitama.jp +hatogaya.saitama.jp +hatoyama.saitama.jp +hidaka.saitama.jp +higashichichibu.saitama.jp +higashimatsuyama.saitama.jp +honjo.saitama.jp +ina.saitama.jp +iruma.saitama.jp +iwatsuki.saitama.jp +kamiizumi.saitama.jp +kamikawa.saitama.jp +kamisato.saitama.jp +kasukabe.saitama.jp +kawagoe.saitama.jp +kawaguchi.saitama.jp +kawajima.saitama.jp +kazo.saitama.jp +kitamoto.saitama.jp +koshigaya.saitama.jp +kounosu.saitama.jp +kuki.saitama.jp +kumagaya.saitama.jp +matsubushi.saitama.jp +minano.saitama.jp +misato.saitama.jp +miyashiro.saitama.jp +miyoshi.saitama.jp +moroyama.saitama.jp +nagatoro.saitama.jp +namegawa.saitama.jp +niiza.saitama.jp +ogano.saitama.jp +ogawa.saitama.jp +ogose.saitama.jp +okegawa.saitama.jp +omiya.saitama.jp +otaki.saitama.jp +ranzan.saitama.jp +ryokami.saitama.jp +saitama.saitama.jp +sakado.saitama.jp +satte.saitama.jp +sayama.saitama.jp +shiki.saitama.jp +shiraoka.saitama.jp +soka.saitama.jp +sugito.saitama.jp +toda.saitama.jp +tokigawa.saitama.jp +tokorozawa.saitama.jp +tsurugashima.saitama.jp +urawa.saitama.jp +warabi.saitama.jp +yashio.saitama.jp +yokoze.saitama.jp +yono.saitama.jp +yorii.saitama.jp +yoshida.saitama.jp +yoshikawa.saitama.jp +yoshimi.saitama.jp +aisho.shiga.jp +gamo.shiga.jp +higashiomi.shiga.jp +hikone.shiga.jp +koka.shiga.jp +konan.shiga.jp +kosei.shiga.jp +koto.shiga.jp +kusatsu.shiga.jp +maibara.shiga.jp +moriyama.shiga.jp +nagahama.shiga.jp +nishiazai.shiga.jp +notogawa.shiga.jp +omihachiman.shiga.jp +otsu.shiga.jp +ritto.shiga.jp +ryuoh.shiga.jp +takashima.shiga.jp +takatsuki.shiga.jp +torahime.shiga.jp +toyosato.shiga.jp +yasu.shiga.jp +akagi.shimane.jp +ama.shimane.jp +gotsu.shimane.jp +hamada.shimane.jp +higashiizumo.shimane.jp +hikawa.shimane.jp +hikimi.shimane.jp +izumo.shimane.jp +kakinoki.shimane.jp +masuda.shimane.jp +matsue.shimane.jp +misato.shimane.jp +nishinoshima.shimane.jp +ohda.shimane.jp +okinoshima.shimane.jp +okuizumo.shimane.jp +shimane.shimane.jp +tamayu.shimane.jp +tsuwano.shimane.jp +unnan.shimane.jp +yakumo.shimane.jp +yasugi.shimane.jp +yatsuka.shimane.jp +arai.shizuoka.jp +atami.shizuoka.jp +fuji.shizuoka.jp +fujieda.shizuoka.jp +fujikawa.shizuoka.jp +fujinomiya.shizuoka.jp +fukuroi.shizuoka.jp +gotemba.shizuoka.jp +haibara.shizuoka.jp +hamamatsu.shizuoka.jp +higashiizu.shizuoka.jp +ito.shizuoka.jp +iwata.shizuoka.jp +izu.shizuoka.jp +izunokuni.shizuoka.jp +kakegawa.shizuoka.jp +kannami.shizuoka.jp +kawanehon.shizuoka.jp +kawazu.shizuoka.jp +kikugawa.shizuoka.jp +kosai.shizuoka.jp +makinohara.shizuoka.jp +matsuzaki.shizuoka.jp +minamiizu.shizuoka.jp +mishima.shizuoka.jp +morimachi.shizuoka.jp +nishiizu.shizuoka.jp +numazu.shizuoka.jp +omaezaki.shizuoka.jp +shimada.shizuoka.jp +shimizu.shizuoka.jp +shimoda.shizuoka.jp +shizuoka.shizuoka.jp +susono.shizuoka.jp +yaizu.shizuoka.jp +yoshida.shizuoka.jp +ashikaga.tochigi.jp +bato.tochigi.jp +haga.tochigi.jp +ichikai.tochigi.jp +iwafune.tochigi.jp +kaminokawa.tochigi.jp +kanuma.tochigi.jp +karasuyama.tochigi.jp +kuroiso.tochigi.jp +mashiko.tochigi.jp +mibu.tochigi.jp +moka.tochigi.jp +motegi.tochigi.jp +nasu.tochigi.jp +nasushiobara.tochigi.jp +nikko.tochigi.jp +nishikata.tochigi.jp +nogi.tochigi.jp +ohira.tochigi.jp +ohtawara.tochigi.jp +oyama.tochigi.jp +sakura.tochigi.jp +sano.tochigi.jp +shimotsuke.tochigi.jp +shioya.tochigi.jp +takanezawa.tochigi.jp +tochigi.tochigi.jp +tsuga.tochigi.jp +ujiie.tochigi.jp +utsunomiya.tochigi.jp +yaita.tochigi.jp +aizumi.tokushima.jp +anan.tokushima.jp +ichiba.tokushima.jp +itano.tokushima.jp +kainan.tokushima.jp +komatsushima.tokushima.jp +matsushige.tokushima.jp +mima.tokushima.jp +minami.tokushima.jp +miyoshi.tokushima.jp +mugi.tokushima.jp +nakagawa.tokushima.jp +naruto.tokushima.jp +sanagochi.tokushima.jp +shishikui.tokushima.jp +tokushima.tokushima.jp +wajiki.tokushima.jp +adachi.tokyo.jp +akiruno.tokyo.jp +akishima.tokyo.jp +aogashima.tokyo.jp +arakawa.tokyo.jp +bunkyo.tokyo.jp +chiyoda.tokyo.jp +chofu.tokyo.jp +chuo.tokyo.jp +edogawa.tokyo.jp +fuchu.tokyo.jp +fussa.tokyo.jp +hachijo.tokyo.jp +hachioji.tokyo.jp +hamura.tokyo.jp +higashikurume.tokyo.jp +higashimurayama.tokyo.jp +higashiyamato.tokyo.jp +hino.tokyo.jp +hinode.tokyo.jp +hinohara.tokyo.jp +inagi.tokyo.jp +itabashi.tokyo.jp +katsushika.tokyo.jp +kita.tokyo.jp +kiyose.tokyo.jp +kodaira.tokyo.jp +koganei.tokyo.jp +kokubunji.tokyo.jp +komae.tokyo.jp +koto.tokyo.jp +kouzushima.tokyo.jp +kunitachi.tokyo.jp +machida.tokyo.jp +meguro.tokyo.jp +minato.tokyo.jp +mitaka.tokyo.jp +mizuho.tokyo.jp +musashimurayama.tokyo.jp +musashino.tokyo.jp +nakano.tokyo.jp +nerima.tokyo.jp +ogasawara.tokyo.jp +okutama.tokyo.jp +ome.tokyo.jp +oshima.tokyo.jp +ota.tokyo.jp +setagaya.tokyo.jp +shibuya.tokyo.jp +shinagawa.tokyo.jp +shinjuku.tokyo.jp +suginami.tokyo.jp +sumida.tokyo.jp +tachikawa.tokyo.jp +taito.tokyo.jp +tama.tokyo.jp +toshima.tokyo.jp +chizu.tottori.jp +hino.tottori.jp +kawahara.tottori.jp +koge.tottori.jp +kotoura.tottori.jp +misasa.tottori.jp +nanbu.tottori.jp +nichinan.tottori.jp +sakaiminato.tottori.jp +tottori.tottori.jp +wakasa.tottori.jp +yazu.tottori.jp +yonago.tottori.jp +asahi.toyama.jp +fuchu.toyama.jp +fukumitsu.toyama.jp +funahashi.toyama.jp +himi.toyama.jp +imizu.toyama.jp +inami.toyama.jp +johana.toyama.jp +kamiichi.toyama.jp +kurobe.toyama.jp +nakaniikawa.toyama.jp +namerikawa.toyama.jp +nanto.toyama.jp +nyuzen.toyama.jp +oyabe.toyama.jp +taira.toyama.jp +takaoka.toyama.jp +tateyama.toyama.jp +toga.toyama.jp +tonami.toyama.jp +toyama.toyama.jp +unazuki.toyama.jp +uozu.toyama.jp +yamada.toyama.jp +arida.wakayama.jp +aridagawa.wakayama.jp +gobo.wakayama.jp +hashimoto.wakayama.jp +hidaka.wakayama.jp +hirogawa.wakayama.jp +inami.wakayama.jp +iwade.wakayama.jp +kainan.wakayama.jp +kamitonda.wakayama.jp +katsuragi.wakayama.jp +kimino.wakayama.jp +kinokawa.wakayama.jp +kitayama.wakayama.jp +koya.wakayama.jp +koza.wakayama.jp +kozagawa.wakayama.jp +kudoyama.wakayama.jp +kushimoto.wakayama.jp +mihama.wakayama.jp +misato.wakayama.jp +nachikatsuura.wakayama.jp +shingu.wakayama.jp +shirahama.wakayama.jp +taiji.wakayama.jp +tanabe.wakayama.jp +wakayama.wakayama.jp +yuasa.wakayama.jp +yura.wakayama.jp +asahi.yamagata.jp +funagata.yamagata.jp +higashine.yamagata.jp +iide.yamagata.jp +kahoku.yamagata.jp +kaminoyama.yamagata.jp +kaneyama.yamagata.jp +kawanishi.yamagata.jp +mamurogawa.yamagata.jp +mikawa.yamagata.jp +murayama.yamagata.jp +nagai.yamagata.jp +nakayama.yamagata.jp +nanyo.yamagata.jp +nishikawa.yamagata.jp +obanazawa.yamagata.jp +oe.yamagata.jp +oguni.yamagata.jp +ohkura.yamagata.jp +oishida.yamagata.jp +sagae.yamagata.jp +sakata.yamagata.jp +sakegawa.yamagata.jp +shinjo.yamagata.jp +shirataka.yamagata.jp +shonai.yamagata.jp +takahata.yamagata.jp +tendo.yamagata.jp +tozawa.yamagata.jp +tsuruoka.yamagata.jp +yamagata.yamagata.jp +yamanobe.yamagata.jp +yonezawa.yamagata.jp +yuza.yamagata.jp +abu.yamaguchi.jp +hagi.yamaguchi.jp +hikari.yamaguchi.jp +hofu.yamaguchi.jp +iwakuni.yamaguchi.jp +kudamatsu.yamaguchi.jp +mitou.yamaguchi.jp +nagato.yamaguchi.jp +oshima.yamaguchi.jp +shimonoseki.yamaguchi.jp +shunan.yamaguchi.jp +tabuse.yamaguchi.jp +tokuyama.yamaguchi.jp +toyota.yamaguchi.jp +ube.yamaguchi.jp +yuu.yamaguchi.jp +chuo.yamanashi.jp +doshi.yamanashi.jp +fuefuki.yamanashi.jp +fujikawa.yamanashi.jp +fujikawaguchiko.yamanashi.jp +fujiyoshida.yamanashi.jp +hayakawa.yamanashi.jp +hokuto.yamanashi.jp +ichikawamisato.yamanashi.jp +kai.yamanashi.jp +kofu.yamanashi.jp +koshu.yamanashi.jp +kosuge.yamanashi.jp +minami-alps.yamanashi.jp +minobu.yamanashi.jp +nakamichi.yamanashi.jp +nanbu.yamanashi.jp +narusawa.yamanashi.jp +nirasaki.yamanashi.jp +nishikatsura.yamanashi.jp +oshino.yamanashi.jp +otsuki.yamanashi.jp +showa.yamanashi.jp +tabayama.yamanashi.jp +tsuru.yamanashi.jp +uenohara.yamanashi.jp +yamanakako.yamanashi.jp +yamanashi.yamanashi.jp + +// ke : http://www.kenic.or.ke/index.php/en/ke-domains/ke-domains +ke +ac.ke +co.ke +go.ke +info.ke +me.ke +mobi.ke +ne.ke +or.ke +sc.ke + +// kg : http://www.domain.kg/dmn_n.html +kg +org.kg +net.kg +com.kg +edu.kg +gov.kg +mil.kg + +// kh : http://www.mptc.gov.kh/dns_registration.htm +*.kh + +// ki : http://www.ki/dns/index.html +ki +edu.ki +biz.ki +net.ki +org.ki +gov.ki +info.ki +com.ki + +// km : https://en.wikipedia.org/wiki/.km +// http://www.domaine.km/documents/charte.doc +km +org.km +nom.km +gov.km +prd.km +tm.km +edu.km +mil.km +ass.km +com.km +// These are only mentioned as proposed suggestions at domaine.km, but +// https://en.wikipedia.org/wiki/.km says they're available for registration: +coop.km +asso.km +presse.km +medecin.km +notaires.km +pharmaciens.km +veterinaire.km +gouv.km + +// kn : https://en.wikipedia.org/wiki/.kn +// http://www.dot.kn/domainRules.html +kn +net.kn +org.kn +edu.kn +gov.kn + +// kp : http://www.kcce.kp/en_index.php +kp +com.kp +edu.kp +gov.kp +org.kp +rep.kp +tra.kp + +// kr : https://en.wikipedia.org/wiki/.kr +// see also: http://domain.nida.or.kr/eng/registration.jsp +kr +ac.kr +co.kr +es.kr +go.kr +hs.kr +kg.kr +mil.kr +ms.kr +ne.kr +or.kr +pe.kr +re.kr +sc.kr +// kr geographical names +busan.kr +chungbuk.kr +chungnam.kr +daegu.kr +daejeon.kr +gangwon.kr +gwangju.kr +gyeongbuk.kr +gyeonggi.kr +gyeongnam.kr +incheon.kr +jeju.kr +jeonbuk.kr +jeonnam.kr +seoul.kr +ulsan.kr + +// kw : https://www.nic.kw/policies/ +// Confirmed by registry +kw +com.kw +edu.kw +emb.kw +gov.kw +ind.kw +net.kw +org.kw + +// ky : http://www.icta.ky/da_ky_reg_dom.php +// Confirmed by registry 2008-06-17 +ky +edu.ky +gov.ky +com.ky +org.ky +net.ky + +// kz : https://en.wikipedia.org/wiki/.kz +// see also: http://www.nic.kz/rules/index.jsp +kz +org.kz +edu.kz +net.kz +gov.kz +mil.kz +com.kz + +// la : https://en.wikipedia.org/wiki/.la +// Submitted by registry +la +int.la +net.la +info.la +edu.la +gov.la +per.la +com.la +org.la + +// lb : https://en.wikipedia.org/wiki/.lb +// Submitted by registry +lb +com.lb +edu.lb +gov.lb +net.lb +org.lb + +// lc : https://en.wikipedia.org/wiki/.lc +// see also: http://www.nic.lc/rules.htm +lc +com.lc +net.lc +co.lc +org.lc +edu.lc +gov.lc + +// li : https://en.wikipedia.org/wiki/.li +li + +// lk : https://www.nic.lk/index.php/domain-registration/lk-domain-naming-structure +lk +gov.lk +sch.lk +net.lk +int.lk +com.lk +org.lk +edu.lk +ngo.lk +soc.lk +web.lk +ltd.lk +assn.lk +grp.lk +hotel.lk +ac.lk + +// lr : http://psg.com/dns/lr/lr.txt +// Submitted by registry +lr +com.lr +edu.lr +gov.lr +org.lr +net.lr + +// ls : http://www.nic.ls/ +// Confirmed by registry +ls +ac.ls +biz.ls +co.ls +edu.ls +gov.ls +info.ls +net.ls +org.ls +sc.ls + +// lt : https://en.wikipedia.org/wiki/.lt +lt +// gov.lt : http://www.gov.lt/index_en.php +gov.lt + +// lu : http://www.dns.lu/en/ +lu + +// lv : http://www.nic.lv/DNS/En/generic.php +lv +com.lv +edu.lv +gov.lv +org.lv +mil.lv +id.lv +net.lv +asn.lv +conf.lv + +// ly : http://www.nic.ly/regulations.php +ly +com.ly +net.ly +gov.ly +plc.ly +edu.ly +sch.ly +med.ly +org.ly +id.ly + +// ma : https://en.wikipedia.org/wiki/.ma +// http://www.anrt.ma/fr/admin/download/upload/file_fr782.pdf +ma +co.ma +net.ma +gov.ma +org.ma +ac.ma +press.ma + +// mc : http://www.nic.mc/ +mc +tm.mc +asso.mc + +// md : https://en.wikipedia.org/wiki/.md +md + +// me : https://en.wikipedia.org/wiki/.me +me +co.me +net.me +org.me +edu.me +ac.me +gov.me +its.me +priv.me + +// mg : http://nic.mg/nicmg/?page_id=39 +mg +org.mg +nom.mg +gov.mg +prd.mg +tm.mg +edu.mg +mil.mg +com.mg +co.mg + +// mh : https://en.wikipedia.org/wiki/.mh +mh + +// mil : https://en.wikipedia.org/wiki/.mil +mil + +// mk : https://en.wikipedia.org/wiki/.mk +// see also: http://dns.marnet.net.mk/postapka.php +mk +com.mk +org.mk +net.mk +edu.mk +gov.mk +inf.mk +name.mk + +// ml : http://www.gobin.info/domainname/ml-template.doc +// see also: https://en.wikipedia.org/wiki/.ml +ml +com.ml +edu.ml +gouv.ml +gov.ml +net.ml +org.ml +presse.ml + +// mm : https://en.wikipedia.org/wiki/.mm +*.mm + +// mn : https://en.wikipedia.org/wiki/.mn +mn +gov.mn +edu.mn +org.mn + +// mo : http://www.monic.net.mo/ +mo +com.mo +net.mo +org.mo +edu.mo +gov.mo + +// mobi : https://en.wikipedia.org/wiki/.mobi +mobi + +// mp : http://www.dot.mp/ +// Confirmed by registry 2008-06-17 +mp + +// mq : https://en.wikipedia.org/wiki/.mq +mq + +// mr : https://en.wikipedia.org/wiki/.mr +mr +gov.mr + +// ms : http://www.nic.ms/pdf/MS_Domain_Name_Rules.pdf +ms +com.ms +edu.ms +gov.ms +net.ms +org.ms + +// mt : https://www.nic.org.mt/go/policy +// Submitted by registry +mt +com.mt +edu.mt +net.mt +org.mt + +// mu : https://en.wikipedia.org/wiki/.mu +mu +com.mu +net.mu +org.mu +gov.mu +ac.mu +co.mu +or.mu + +// museum : http://about.museum/naming/ +// http://index.museum/ +museum +academy.museum +agriculture.museum +air.museum +airguard.museum +alabama.museum +alaska.museum +amber.museum +ambulance.museum +american.museum +americana.museum +americanantiques.museum +americanart.museum +amsterdam.museum +and.museum +annefrank.museum +anthro.museum +anthropology.museum +antiques.museum +aquarium.museum +arboretum.museum +archaeological.museum +archaeology.museum +architecture.museum +art.museum +artanddesign.museum +artcenter.museum +artdeco.museum +arteducation.museum +artgallery.museum +arts.museum +artsandcrafts.museum +asmatart.museum +assassination.museum +assisi.museum +association.museum +astronomy.museum +atlanta.museum +austin.museum +australia.museum +automotive.museum +aviation.museum +axis.museum +badajoz.museum +baghdad.museum +bahn.museum +bale.museum +baltimore.museum +barcelona.museum +baseball.museum +basel.museum +baths.museum +bauern.museum +beauxarts.museum +beeldengeluid.museum +bellevue.museum +bergbau.museum +berkeley.museum +berlin.museum +bern.museum +bible.museum +bilbao.museum +bill.museum +birdart.museum +birthplace.museum +bonn.museum +boston.museum +botanical.museum +botanicalgarden.museum +botanicgarden.museum +botany.museum +brandywinevalley.museum +brasil.museum +bristol.museum +british.museum +britishcolumbia.museum +broadcast.museum +brunel.museum +brussel.museum +brussels.museum +bruxelles.museum +building.museum +burghof.museum +bus.museum +bushey.museum +cadaques.museum +california.museum +cambridge.museum +can.museum +canada.museum +capebreton.museum +carrier.museum +cartoonart.museum +casadelamoneda.museum +castle.museum +castres.museum +celtic.museum +center.museum +chattanooga.museum +cheltenham.museum +chesapeakebay.museum +chicago.museum +children.museum +childrens.museum +childrensgarden.museum +chiropractic.museum +chocolate.museum +christiansburg.museum +cincinnati.museum +cinema.museum +circus.museum +civilisation.museum +civilization.museum +civilwar.museum +clinton.museum +clock.museum +coal.museum +coastaldefence.museum +cody.museum +coldwar.museum +collection.museum +colonialwilliamsburg.museum +coloradoplateau.museum +columbia.museum +columbus.museum +communication.museum +communications.museum +community.museum +computer.museum +computerhistory.museum +comunicações.museum +contemporary.museum +contemporaryart.museum +convent.museum +copenhagen.museum +corporation.museum +correios-e-telecomunicações.museum +corvette.museum +costume.museum +countryestate.museum +county.museum +crafts.museum +cranbrook.museum +creation.museum +cultural.museum +culturalcenter.museum +culture.museum +cyber.museum +cymru.museum +dali.museum +dallas.museum +database.museum +ddr.museum +decorativearts.museum +delaware.museum +delmenhorst.museum +denmark.museum +depot.museum +design.museum +detroit.museum +dinosaur.museum +discovery.museum +dolls.museum +donostia.museum +durham.museum +eastafrica.museum +eastcoast.museum +education.museum +educational.museum +egyptian.museum +eisenbahn.museum +elburg.museum +elvendrell.museum +embroidery.museum +encyclopedic.museum +england.museum +entomology.museum +environment.museum +environmentalconservation.museum +epilepsy.museum +essex.museum +estate.museum +ethnology.museum +exeter.museum +exhibition.museum +family.museum +farm.museum +farmequipment.museum +farmers.museum +farmstead.museum +field.museum +figueres.museum +filatelia.museum +film.museum +fineart.museum +finearts.museum +finland.museum +flanders.museum +florida.museum +force.museum +fortmissoula.museum +fortworth.museum +foundation.museum +francaise.museum +frankfurt.museum +franziskaner.museum +freemasonry.museum +freiburg.museum +fribourg.museum +frog.museum +fundacio.museum +furniture.museum +gallery.museum +garden.museum +gateway.museum +geelvinck.museum +gemological.museum +geology.museum +georgia.museum +giessen.museum +glas.museum +glass.museum +gorge.museum +grandrapids.museum +graz.museum +guernsey.museum +halloffame.museum +hamburg.museum +handson.museum +harvestcelebration.museum +hawaii.museum +health.museum +heimatunduhren.museum +hellas.museum +helsinki.museum +hembygdsforbund.museum +heritage.museum +histoire.museum +historical.museum +historicalsociety.museum +historichouses.museum +historisch.museum +historisches.museum +history.museum +historyofscience.museum +horology.museum +house.museum +humanities.museum +illustration.museum +imageandsound.museum +indian.museum +indiana.museum +indianapolis.museum +indianmarket.museum +intelligence.museum +interactive.museum +iraq.museum +iron.museum +isleofman.museum +jamison.museum +jefferson.museum +jerusalem.museum +jewelry.museum +jewish.museum +jewishart.museum +jfk.museum +journalism.museum +judaica.museum +judygarland.museum +juedisches.museum +juif.museum +karate.museum +karikatur.museum +kids.museum +koebenhavn.museum +koeln.museum +kunst.museum +kunstsammlung.museum +kunstunddesign.museum +labor.museum +labour.museum +lajolla.museum +lancashire.museum +landes.museum +lans.museum +läns.museum +larsson.museum +lewismiller.museum +lincoln.museum +linz.museum +living.museum +livinghistory.museum +localhistory.museum +london.museum +losangeles.museum +louvre.museum +loyalist.museum +lucerne.museum +luxembourg.museum +luzern.museum +mad.museum +madrid.museum +mallorca.museum +manchester.museum +mansion.museum +mansions.museum +manx.museum +marburg.museum +maritime.museum +maritimo.museum +maryland.museum +marylhurst.museum +media.museum +medical.museum +medizinhistorisches.museum +meeres.museum +memorial.museum +mesaverde.museum +michigan.museum +midatlantic.museum +military.museum +mill.museum +miners.museum +mining.museum +minnesota.museum +missile.museum +missoula.museum +modern.museum +moma.museum +money.museum +monmouth.museum +monticello.museum +montreal.museum +moscow.museum +motorcycle.museum +muenchen.museum +muenster.museum +mulhouse.museum +muncie.museum +museet.museum +museumcenter.museum +museumvereniging.museum +music.museum +national.museum +nationalfirearms.museum +nationalheritage.museum +nativeamerican.museum +naturalhistory.museum +naturalhistorymuseum.museum +naturalsciences.museum +nature.museum +naturhistorisches.museum +natuurwetenschappen.museum +naumburg.museum +naval.museum +nebraska.museum +neues.museum +newhampshire.museum +newjersey.museum +newmexico.museum +newport.museum +newspaper.museum +newyork.museum +niepce.museum +norfolk.museum +north.museum +nrw.museum +nyc.museum +nyny.museum +oceanographic.museum +oceanographique.museum +omaha.museum +online.museum +ontario.museum +openair.museum +oregon.museum +oregontrail.museum +otago.museum +oxford.museum +pacific.museum +paderborn.museum +palace.museum +paleo.museum +palmsprings.museum +panama.museum +paris.museum +pasadena.museum +pharmacy.museum +philadelphia.museum +philadelphiaarea.museum +philately.museum +phoenix.museum +photography.museum +pilots.museum +pittsburgh.museum +planetarium.museum +plantation.museum +plants.museum +plaza.museum +portal.museum +portland.museum +portlligat.museum +posts-and-telecommunications.museum +preservation.museum +presidio.museum +press.museum +project.museum +public.museum +pubol.museum +quebec.museum +railroad.museum +railway.museum +research.museum +resistance.museum +riodejaneiro.museum +rochester.museum +rockart.museum +roma.museum +russia.museum +saintlouis.museum +salem.museum +salvadordali.museum +salzburg.museum +sandiego.museum +sanfrancisco.museum +santabarbara.museum +santacruz.museum +santafe.museum +saskatchewan.museum +satx.museum +savannahga.museum +schlesisches.museum +schoenbrunn.museum +schokoladen.museum +school.museum +schweiz.museum +science.museum +scienceandhistory.museum +scienceandindustry.museum +sciencecenter.museum +sciencecenters.museum +science-fiction.museum +sciencehistory.museum +sciences.museum +sciencesnaturelles.museum +scotland.museum +seaport.museum +settlement.museum +settlers.museum +shell.museum +sherbrooke.museum +sibenik.museum +silk.museum +ski.museum +skole.museum +society.museum +sologne.museum +soundandvision.museum +southcarolina.museum +southwest.museum +space.museum +spy.museum +square.museum +stadt.museum +stalbans.museum +starnberg.museum +state.museum +stateofdelaware.museum +station.museum +steam.museum +steiermark.museum +stjohn.museum +stockholm.museum +stpetersburg.museum +stuttgart.museum +suisse.museum +surgeonshall.museum +surrey.museum +svizzera.museum +sweden.museum +sydney.museum +tank.museum +tcm.museum +technology.museum +telekommunikation.museum +television.museum +texas.museum +textile.museum +theater.museum +time.museum +timekeeping.museum +topology.museum +torino.museum +touch.museum +town.museum +transport.museum +tree.museum +trolley.museum +trust.museum +trustee.museum +uhren.museum +ulm.museum +undersea.museum +university.museum +usa.museum +usantiques.museum +usarts.museum +uscountryestate.museum +usculture.museum +usdecorativearts.museum +usgarden.museum +ushistory.museum +ushuaia.museum +uslivinghistory.museum +utah.museum +uvic.museum +valley.museum +vantaa.museum +versailles.museum +viking.museum +village.museum +virginia.museum +virtual.museum +virtuel.museum +vlaanderen.museum +volkenkunde.museum +wales.museum +wallonie.museum +war.museum +washingtondc.museum +watchandclock.museum +watch-and-clock.museum +western.museum +westfalen.museum +whaling.museum +wildlife.museum +williamsburg.museum +windmill.museum +workshop.museum +york.museum +yorkshire.museum +yosemite.museum +youth.museum +zoological.museum +zoology.museum +ירושלים.museum +иком.museum + +// mv : https://en.wikipedia.org/wiki/.mv +// "mv" included because, contra Wikipedia, google.mv exists. +mv +aero.mv +biz.mv +com.mv +coop.mv +edu.mv +gov.mv +info.mv +int.mv +mil.mv +museum.mv +name.mv +net.mv +org.mv +pro.mv + +// mw : http://www.registrar.mw/ +mw +ac.mw +biz.mw +co.mw +com.mw +coop.mw +edu.mw +gov.mw +int.mw +museum.mw +net.mw +org.mw + +// mx : http://www.nic.mx/ +// Submitted by registry +mx +com.mx +org.mx +gob.mx +edu.mx +net.mx + +// my : http://www.mynic.net.my/ +my +com.my +net.my +org.my +gov.my +edu.my +mil.my +name.my + +// mz : http://www.uem.mz/ +// Submitted by registry +mz +ac.mz +adv.mz +co.mz +edu.mz +gov.mz +mil.mz +net.mz +org.mz + +// na : http://www.na-nic.com.na/ +// http://www.info.na/domain/ +na +info.na +pro.na +name.na +school.na +or.na +dr.na +us.na +mx.na +ca.na +in.na +cc.na +tv.na +ws.na +mobi.na +co.na +com.na +org.na + +// name : has 2nd-level tlds, but there's no list of them +name + +// nc : http://www.cctld.nc/ +nc +asso.nc +nom.nc + +// ne : https://en.wikipedia.org/wiki/.ne +ne + +// net : https://en.wikipedia.org/wiki/.net +net + +// nf : https://en.wikipedia.org/wiki/.nf +nf +com.nf +net.nf +per.nf +rec.nf +web.nf +arts.nf +firm.nf +info.nf +other.nf +store.nf + +// ng : http://www.nira.org.ng/index.php/join-us/register-ng-domain/189-nira-slds +ng +com.ng +edu.ng +gov.ng +i.ng +mil.ng +mobi.ng +name.ng +net.ng +org.ng +sch.ng + +// ni : http://www.nic.ni/ +ni +ac.ni +biz.ni +co.ni +com.ni +edu.ni +gob.ni +in.ni +info.ni +int.ni +mil.ni +net.ni +nom.ni +org.ni +web.ni + +// nl : https://en.wikipedia.org/wiki/.nl +// https://www.sidn.nl/ +// ccTLD for the Netherlands +nl + +// no : http://www.norid.no/regelverk/index.en.html +// The Norwegian registry has declined to notify us of updates. The web pages +// referenced below are the official source of the data. There is also an +// announce mailing list: +// https://postlister.uninett.no/sympa/info/norid-diskusjon +no +// Norid generic domains : http://www.norid.no/regelverk/vedlegg-c.en.html +fhs.no +vgs.no +fylkesbibl.no +folkebibl.no +museum.no +idrett.no +priv.no +// Non-Norid generic domains : http://www.norid.no/regelverk/vedlegg-d.en.html +mil.no +stat.no +dep.no +kommune.no +herad.no +// no geographical names : http://www.norid.no/regelverk/vedlegg-b.en.html +// counties +aa.no +ah.no +bu.no +fm.no +hl.no +hm.no +jan-mayen.no +mr.no +nl.no +nt.no +of.no +ol.no +oslo.no +rl.no +sf.no +st.no +svalbard.no +tm.no +tr.no +va.no +vf.no +// primary and lower secondary schools per county +gs.aa.no +gs.ah.no +gs.bu.no +gs.fm.no +gs.hl.no +gs.hm.no +gs.jan-mayen.no +gs.mr.no +gs.nl.no +gs.nt.no +gs.of.no +gs.ol.no +gs.oslo.no +gs.rl.no +gs.sf.no +gs.st.no +gs.svalbard.no +gs.tm.no +gs.tr.no +gs.va.no +gs.vf.no +// cities +akrehamn.no +åkrehamn.no +algard.no +ålgård.no +arna.no +brumunddal.no +bryne.no +bronnoysund.no +brønnøysund.no +drobak.no +drøbak.no +egersund.no +fetsund.no +floro.no +florø.no +fredrikstad.no +hokksund.no +honefoss.no +hønefoss.no +jessheim.no +jorpeland.no +jørpeland.no +kirkenes.no +kopervik.no +krokstadelva.no +langevag.no +langevåg.no +leirvik.no +mjondalen.no +mjøndalen.no +mo-i-rana.no +mosjoen.no +mosjøen.no +nesoddtangen.no +orkanger.no +osoyro.no +osøyro.no +raholt.no +råholt.no +sandnessjoen.no +sandnessjøen.no +skedsmokorset.no +slattum.no +spjelkavik.no +stathelle.no +stavern.no +stjordalshalsen.no +stjørdalshalsen.no +tananger.no +tranby.no +vossevangen.no +// communities +afjord.no +åfjord.no +agdenes.no +al.no +ål.no +alesund.no +ålesund.no +alstahaug.no +alta.no +áltá.no +alaheadju.no +álaheadju.no +alvdal.no +amli.no +åmli.no +amot.no +åmot.no +andebu.no +andoy.no +andøy.no +andasuolo.no +ardal.no +årdal.no +aremark.no +arendal.no +ås.no +aseral.no +åseral.no +asker.no +askim.no +askvoll.no +askoy.no +askøy.no +asnes.no +åsnes.no +audnedaln.no +aukra.no +aure.no +aurland.no +aurskog-holand.no +aurskog-høland.no +austevoll.no +austrheim.no +averoy.no +averøy.no +balestrand.no +ballangen.no +balat.no +bálát.no +balsfjord.no +bahccavuotna.no +báhccavuotna.no +bamble.no +bardu.no +beardu.no +beiarn.no +bajddar.no +bájddar.no +baidar.no +báidár.no +berg.no +bergen.no +berlevag.no +berlevåg.no +bearalvahki.no +bearalváhki.no +bindal.no +birkenes.no +bjarkoy.no +bjarkøy.no +bjerkreim.no +bjugn.no +bodo.no +bodø.no +badaddja.no +bådåddjå.no +budejju.no +bokn.no +bremanger.no +bronnoy.no +brønnøy.no +bygland.no +bykle.no +barum.no +bærum.no +bo.telemark.no +bø.telemark.no +bo.nordland.no +bø.nordland.no +bievat.no +bievát.no +bomlo.no +bømlo.no +batsfjord.no +båtsfjord.no +bahcavuotna.no +báhcavuotna.no +dovre.no +drammen.no +drangedal.no +dyroy.no +dyrøy.no +donna.no +dønna.no +eid.no +eidfjord.no +eidsberg.no +eidskog.no +eidsvoll.no +eigersund.no +elverum.no +enebakk.no +engerdal.no +etne.no +etnedal.no +evenes.no +evenassi.no +evenášši.no +evje-og-hornnes.no +farsund.no +fauske.no +fuossko.no +fuoisku.no +fedje.no +fet.no +finnoy.no +finnøy.no +fitjar.no +fjaler.no +fjell.no +flakstad.no +flatanger.no +flekkefjord.no +flesberg.no +flora.no +fla.no +flå.no +folldal.no +forsand.no +fosnes.no +frei.no +frogn.no +froland.no +frosta.no +frana.no +fræna.no +froya.no +frøya.no +fusa.no +fyresdal.no +forde.no +førde.no +gamvik.no +gangaviika.no +gáŋgaviika.no +gaular.no +gausdal.no +gildeskal.no +gildeskål.no +giske.no +gjemnes.no +gjerdrum.no +gjerstad.no +gjesdal.no +gjovik.no +gjøvik.no +gloppen.no +gol.no +gran.no +grane.no +granvin.no +gratangen.no +grimstad.no +grong.no +kraanghke.no +kråanghke.no +grue.no +gulen.no +hadsel.no +halden.no +halsa.no +hamar.no +hamaroy.no +habmer.no +hábmer.no +hapmir.no +hápmir.no +hammerfest.no +hammarfeasta.no +hámmárfeasta.no +haram.no +hareid.no +harstad.no +hasvik.no +aknoluokta.no +ákŋoluokta.no +hattfjelldal.no +aarborte.no +haugesund.no +hemne.no +hemnes.no +hemsedal.no +heroy.more-og-romsdal.no +herøy.møre-og-romsdal.no +heroy.nordland.no +herøy.nordland.no +hitra.no +hjartdal.no +hjelmeland.no +hobol.no +hobøl.no +hof.no +hol.no +hole.no +holmestrand.no +holtalen.no +holtålen.no +hornindal.no +horten.no +hurdal.no +hurum.no +hvaler.no +hyllestad.no +hagebostad.no +hægebostad.no +hoyanger.no +høyanger.no +hoylandet.no +høylandet.no +ha.no +hå.no +ibestad.no +inderoy.no +inderøy.no +iveland.no +jevnaker.no +jondal.no +jolster.no +jølster.no +karasjok.no +karasjohka.no +kárášjohka.no +karlsoy.no +galsa.no +gálsá.no +karmoy.no +karmøy.no +kautokeino.no +guovdageaidnu.no +klepp.no +klabu.no +klæbu.no +kongsberg.no +kongsvinger.no +kragero.no +kragerø.no +kristiansand.no +kristiansund.no +krodsherad.no +krødsherad.no +kvalsund.no +rahkkeravju.no +ráhkkerávju.no +kvam.no +kvinesdal.no +kvinnherad.no +kviteseid.no +kvitsoy.no +kvitsøy.no +kvafjord.no +kvæfjord.no +giehtavuoatna.no +kvanangen.no +kvænangen.no +navuotna.no +návuotna.no +kafjord.no +kåfjord.no +gaivuotna.no +gáivuotna.no +larvik.no +lavangen.no +lavagis.no +loabat.no +loabát.no +lebesby.no +davvesiida.no +leikanger.no +leirfjord.no +leka.no +leksvik.no +lenvik.no +leangaviika.no +leaŋgaviika.no +lesja.no +levanger.no +lier.no +lierne.no +lillehammer.no +lillesand.no +lindesnes.no +lindas.no +lindås.no +lom.no +loppa.no +lahppi.no +láhppi.no +lund.no +lunner.no +luroy.no +lurøy.no +luster.no +lyngdal.no +lyngen.no +ivgu.no +lardal.no +lerdal.no +lærdal.no +lodingen.no +lødingen.no +lorenskog.no +lørenskog.no +loten.no +løten.no +malvik.no +masoy.no +måsøy.no +muosat.no +muosát.no +mandal.no +marker.no +marnardal.no +masfjorden.no +meland.no +meldal.no +melhus.no +meloy.no +meløy.no +meraker.no +meråker.no +moareke.no +moåreke.no +midsund.no +midtre-gauldal.no +modalen.no +modum.no +molde.no +moskenes.no +moss.no +mosvik.no +malselv.no +målselv.no +malatvuopmi.no +málatvuopmi.no +namdalseid.no +aejrie.no +namsos.no +namsskogan.no +naamesjevuemie.no +nååmesjevuemie.no +laakesvuemie.no +nannestad.no +narvik.no +narviika.no +naustdal.no +nedre-eiker.no +nes.akershus.no +nes.buskerud.no +nesna.no +nesodden.no +nesseby.no +unjarga.no +unjárga.no +nesset.no +nissedal.no +nittedal.no +nord-aurdal.no +nord-fron.no +nord-odal.no +norddal.no +nordkapp.no +davvenjarga.no +davvenjárga.no +nordre-land.no +nordreisa.no +raisa.no +ráisa.no +nore-og-uvdal.no +notodden.no +naroy.no +nærøy.no +notteroy.no +nøtterøy.no +odda.no +oksnes.no +øksnes.no +oppdal.no +oppegard.no +oppegård.no +orkdal.no +orland.no +ørland.no +orskog.no +ørskog.no +orsta.no +ørsta.no +os.hedmark.no +os.hordaland.no +osen.no +osteroy.no +osterøy.no +ostre-toten.no +østre-toten.no +overhalla.no +ovre-eiker.no +øvre-eiker.no +oyer.no +øyer.no +oygarden.no +øygarden.no +oystre-slidre.no +øystre-slidre.no +porsanger.no +porsangu.no +porsáŋgu.no +porsgrunn.no +radoy.no +radøy.no +rakkestad.no +rana.no +ruovat.no +randaberg.no +rauma.no +rendalen.no +rennebu.no +rennesoy.no +rennesøy.no +rindal.no +ringebu.no +ringerike.no +ringsaker.no +rissa.no +risor.no +risør.no +roan.no +rollag.no +rygge.no +ralingen.no +rælingen.no +rodoy.no +rødøy.no +romskog.no +rømskog.no +roros.no +røros.no +rost.no +røst.no +royken.no +røyken.no +royrvik.no +røyrvik.no +rade.no +råde.no +salangen.no +siellak.no +saltdal.no +salat.no +sálát.no +sálat.no +samnanger.no +sande.more-og-romsdal.no +sande.møre-og-romsdal.no +sande.vestfold.no +sandefjord.no +sandnes.no +sandoy.no +sandøy.no +sarpsborg.no +sauda.no +sauherad.no +sel.no +selbu.no +selje.no +seljord.no +sigdal.no +siljan.no +sirdal.no +skaun.no +skedsmo.no +ski.no +skien.no +skiptvet.no +skjervoy.no +skjervøy.no +skierva.no +skiervá.no +skjak.no +skjåk.no +skodje.no +skanland.no +skånland.no +skanit.no +skánit.no +smola.no +smøla.no +snillfjord.no +snasa.no +snåsa.no +snoasa.no +snaase.no +snåase.no +sogndal.no +sokndal.no +sola.no +solund.no +songdalen.no +sortland.no +spydeberg.no +stange.no +stavanger.no +steigen.no +steinkjer.no +stjordal.no +stjørdal.no +stokke.no +stor-elvdal.no +stord.no +stordal.no +storfjord.no +omasvuotna.no +strand.no +stranda.no +stryn.no +sula.no +suldal.no +sund.no +sunndal.no +surnadal.no +sveio.no +svelvik.no +sykkylven.no +sogne.no +søgne.no +somna.no +sømna.no +sondre-land.no +søndre-land.no +sor-aurdal.no +sør-aurdal.no +sor-fron.no +sør-fron.no +sor-odal.no +sør-odal.no +sor-varanger.no +sør-varanger.no +matta-varjjat.no +mátta-várjjat.no +sorfold.no +sørfold.no +sorreisa.no +sørreisa.no +sorum.no +sørum.no +tana.no +deatnu.no +time.no +tingvoll.no +tinn.no +tjeldsund.no +dielddanuorri.no +tjome.no +tjøme.no +tokke.no +tolga.no +torsken.no +tranoy.no +tranøy.no +tromso.no +tromsø.no +tromsa.no +romsa.no +trondheim.no +troandin.no +trysil.no +trana.no +træna.no +trogstad.no +trøgstad.no +tvedestrand.no +tydal.no +tynset.no +tysfjord.no +divtasvuodna.no +divttasvuotna.no +tysnes.no +tysvar.no +tysvær.no +tonsberg.no +tønsberg.no +ullensaker.no +ullensvang.no +ulvik.no +utsira.no +vadso.no +vadsø.no +cahcesuolo.no +čáhcesuolo.no +vaksdal.no +valle.no +vang.no +vanylven.no +vardo.no +vardø.no +varggat.no +várggát.no +vefsn.no +vaapste.no +vega.no +vegarshei.no +vegårshei.no +vennesla.no +verdal.no +verran.no +vestby.no +vestnes.no +vestre-slidre.no +vestre-toten.no +vestvagoy.no +vestvågøy.no +vevelstad.no +vik.no +vikna.no +vindafjord.no +volda.no +voss.no +varoy.no +værøy.no +vagan.no +vågan.no +voagat.no +vagsoy.no +vågsøy.no +vaga.no +vågå.no +valer.ostfold.no +våler.østfold.no +valer.hedmark.no +våler.hedmark.no + +// np : http://www.mos.com.np/register.html +*.np + +// nr : http://cenpac.net.nr/dns/index.html +// Submitted by registry +nr +biz.nr +info.nr +gov.nr +edu.nr +org.nr +net.nr +com.nr + +// nu : https://en.wikipedia.org/wiki/.nu +nu + +// nz : https://en.wikipedia.org/wiki/.nz +// Submitted by registry +nz +ac.nz +co.nz +cri.nz +geek.nz +gen.nz +govt.nz +health.nz +iwi.nz +kiwi.nz +maori.nz +mil.nz +māori.nz +net.nz +org.nz +parliament.nz +school.nz + +// om : https://en.wikipedia.org/wiki/.om +om +co.om +com.om +edu.om +gov.om +med.om +museum.om +net.om +org.om +pro.om + +// onion : https://tools.ietf.org/html/rfc7686 +onion + +// org : https://en.wikipedia.org/wiki/.org +org + +// pa : http://www.nic.pa/ +// Some additional second level "domains" resolve directly as hostnames, such as +// pannet.pa, so we add a rule for "pa". +pa +ac.pa +gob.pa +com.pa +org.pa +sld.pa +edu.pa +net.pa +ing.pa +abo.pa +med.pa +nom.pa + +// pe : https://www.nic.pe/InformeFinalComision.pdf +pe +edu.pe +gob.pe +nom.pe +mil.pe +org.pe +com.pe +net.pe + +// pf : http://www.gobin.info/domainname/formulaire-pf.pdf +pf +com.pf +org.pf +edu.pf + +// pg : https://en.wikipedia.org/wiki/.pg +*.pg + +// ph : http://www.domains.ph/FAQ2.asp +// Submitted by registry +ph +com.ph +net.ph +org.ph +gov.ph +edu.ph +ngo.ph +mil.ph +i.ph + +// pk : http://pk5.pknic.net.pk/pk5/msgNamepk.PK +pk +com.pk +net.pk +edu.pk +org.pk +fam.pk +biz.pk +web.pk +gov.pk +gob.pk +gok.pk +gon.pk +gop.pk +gos.pk +info.pk + +// pl http://www.dns.pl/english/index.html +// Submitted by registry +pl +com.pl +net.pl +org.pl +// pl functional domains (http://www.dns.pl/english/index.html) +aid.pl +agro.pl +atm.pl +auto.pl +biz.pl +edu.pl +gmina.pl +gsm.pl +info.pl +mail.pl +miasta.pl +media.pl +mil.pl +nieruchomosci.pl +nom.pl +pc.pl +powiat.pl +priv.pl +realestate.pl +rel.pl +sex.pl +shop.pl +sklep.pl +sos.pl +szkola.pl +targi.pl +tm.pl +tourism.pl +travel.pl +turystyka.pl +// Government domains +gov.pl +ap.gov.pl +ic.gov.pl +is.gov.pl +us.gov.pl +kmpsp.gov.pl +kppsp.gov.pl +kwpsp.gov.pl +psp.gov.pl +wskr.gov.pl +kwp.gov.pl +mw.gov.pl +ug.gov.pl +um.gov.pl +umig.gov.pl +ugim.gov.pl +upow.gov.pl +uw.gov.pl +starostwo.gov.pl +pa.gov.pl +po.gov.pl +psse.gov.pl +pup.gov.pl +rzgw.gov.pl +sa.gov.pl +so.gov.pl +sr.gov.pl +wsa.gov.pl +sko.gov.pl +uzs.gov.pl +wiih.gov.pl +winb.gov.pl +pinb.gov.pl +wios.gov.pl +witd.gov.pl +wzmiuw.gov.pl +piw.gov.pl +wiw.gov.pl +griw.gov.pl +wif.gov.pl +oum.gov.pl +sdn.gov.pl +zp.gov.pl +uppo.gov.pl +mup.gov.pl +wuoz.gov.pl +konsulat.gov.pl +oirm.gov.pl +// pl regional domains (http://www.dns.pl/english/index.html) +augustow.pl +babia-gora.pl +bedzin.pl +beskidy.pl +bialowieza.pl +bialystok.pl +bielawa.pl +bieszczady.pl +boleslawiec.pl +bydgoszcz.pl +bytom.pl +cieszyn.pl +czeladz.pl +czest.pl +dlugoleka.pl +elblag.pl +elk.pl +glogow.pl +gniezno.pl +gorlice.pl +grajewo.pl +ilawa.pl +jaworzno.pl +jelenia-gora.pl +jgora.pl +kalisz.pl +kazimierz-dolny.pl +karpacz.pl +kartuzy.pl +kaszuby.pl +katowice.pl +kepno.pl +ketrzyn.pl +klodzko.pl +kobierzyce.pl +kolobrzeg.pl +konin.pl +konskowola.pl +kutno.pl +lapy.pl +lebork.pl +legnica.pl +lezajsk.pl +limanowa.pl +lomza.pl +lowicz.pl +lubin.pl +lukow.pl +malbork.pl +malopolska.pl +mazowsze.pl +mazury.pl +mielec.pl +mielno.pl +mragowo.pl +naklo.pl +nowaruda.pl +nysa.pl +olawa.pl +olecko.pl +olkusz.pl +olsztyn.pl +opoczno.pl +opole.pl +ostroda.pl +ostroleka.pl +ostrowiec.pl +ostrowwlkp.pl +pila.pl +pisz.pl +podhale.pl +podlasie.pl +polkowice.pl +pomorze.pl +pomorskie.pl +prochowice.pl +pruszkow.pl +przeworsk.pl +pulawy.pl +radom.pl +rawa-maz.pl +rybnik.pl +rzeszow.pl +sanok.pl +sejny.pl +slask.pl +slupsk.pl +sosnowiec.pl +stalowa-wola.pl +skoczow.pl +starachowice.pl +stargard.pl +suwalki.pl +swidnica.pl +swiebodzin.pl +swinoujscie.pl +szczecin.pl +szczytno.pl +tarnobrzeg.pl +tgory.pl +turek.pl +tychy.pl +ustka.pl +walbrzych.pl +warmia.pl +warszawa.pl +waw.pl +wegrow.pl +wielun.pl +wlocl.pl +wloclawek.pl +wodzislaw.pl +wolomin.pl +wroclaw.pl +zachpomor.pl +zagan.pl +zarow.pl +zgora.pl +zgorzelec.pl + +// pm : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +pm + +// pn : http://www.government.pn/PnRegistry/policies.htm +pn +gov.pn +co.pn +org.pn +edu.pn +net.pn + +// post : https://en.wikipedia.org/wiki/.post +post + +// pr : http://www.nic.pr/index.asp?f=1 +pr +com.pr +net.pr +org.pr +gov.pr +edu.pr +isla.pr +pro.pr +biz.pr +info.pr +name.pr +// these aren't mentioned on nic.pr, but on https://en.wikipedia.org/wiki/.pr +est.pr +prof.pr +ac.pr + +// pro : http://registry.pro/get-pro +pro +aaa.pro +aca.pro +acct.pro +avocat.pro +bar.pro +cpa.pro +eng.pro +jur.pro +law.pro +med.pro +recht.pro + +// ps : https://en.wikipedia.org/wiki/.ps +// http://www.nic.ps/registration/policy.html#reg +ps +edu.ps +gov.ps +sec.ps +plo.ps +com.ps +org.ps +net.ps + +// pt : http://online.dns.pt/dns/start_dns +pt +net.pt +gov.pt +org.pt +edu.pt +int.pt +publ.pt +com.pt +nome.pt + +// pw : https://en.wikipedia.org/wiki/.pw +pw +co.pw +ne.pw +or.pw +ed.pw +go.pw +belau.pw + +// py : http://www.nic.py/pautas.html#seccion_9 +// Submitted by registry +py +com.py +coop.py +edu.py +gov.py +mil.py +net.py +org.py + +// qa : http://domains.qa/en/ +qa +com.qa +edu.qa +gov.qa +mil.qa +name.qa +net.qa +org.qa +sch.qa + +// re : http://www.afnic.re/obtenir/chartes/nommage-re/annexe-descriptifs +re +asso.re +com.re +nom.re + +// ro : http://www.rotld.ro/ +ro +arts.ro +com.ro +firm.ro +info.ro +nom.ro +nt.ro +org.ro +rec.ro +store.ro +tm.ro +www.ro + +// rs : https://www.rnids.rs/en/domains/national-domains +rs +ac.rs +co.rs +edu.rs +gov.rs +in.rs +org.rs + +// ru : https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +ru + +// rw : https://www.ricta.org.rw/sites/default/files/resources/registry_registrar_contract_0.pdf +rw +ac.rw +co.rw +coop.rw +gov.rw +mil.rw +net.rw +org.rw + +// sa : http://www.nic.net.sa/ +sa +com.sa +net.sa +org.sa +gov.sa +med.sa +pub.sa +edu.sa +sch.sa + +// sb : http://www.sbnic.net.sb/ +// Submitted by registry +sb +com.sb +edu.sb +gov.sb +net.sb +org.sb + +// sc : http://www.nic.sc/ +sc +com.sc +gov.sc +net.sc +org.sc +edu.sc + +// sd : http://www.isoc.sd/sudanic.isoc.sd/billing_pricing.htm +// Submitted by registry +sd +com.sd +net.sd +org.sd +edu.sd +med.sd +tv.sd +gov.sd +info.sd + +// se : https://en.wikipedia.org/wiki/.se +// Submitted by registry +se +a.se +ac.se +b.se +bd.se +brand.se +c.se +d.se +e.se +f.se +fh.se +fhsk.se +fhv.se +g.se +h.se +i.se +k.se +komforb.se +kommunalforbund.se +komvux.se +l.se +lanbib.se +m.se +n.se +naturbruksgymn.se +o.se +org.se +p.se +parti.se +pp.se +press.se +r.se +s.se +t.se +tm.se +u.se +w.se +x.se +y.se +z.se + +// sg : http://www.nic.net.sg/page/registration-policies-procedures-and-guidelines +sg +com.sg +net.sg +org.sg +gov.sg +edu.sg +per.sg + +// sh : http://www.nic.sh/registrar.html +sh +com.sh +net.sh +gov.sh +org.sh +mil.sh + +// si : https://en.wikipedia.org/wiki/.si +si + +// sj : No registrations at this time. +// Submitted by registry +sj + +// sk : https://en.wikipedia.org/wiki/.sk +// list of 2nd level domains ? +sk + +// sl : http://www.nic.sl +// Submitted by registry +sl +com.sl +net.sl +edu.sl +gov.sl +org.sl + +// sm : https://en.wikipedia.org/wiki/.sm +sm + +// sn : https://en.wikipedia.org/wiki/.sn +sn +art.sn +com.sn +edu.sn +gouv.sn +org.sn +perso.sn +univ.sn + +// so : http://sonic.so/policies/ +so +com.so +edu.so +gov.so +me.so +net.so +org.so + +// sr : https://en.wikipedia.org/wiki/.sr +sr + +// ss : https://registry.nic.ss/ +// Submitted by registry +ss +biz.ss +com.ss +edu.ss +gov.ss +net.ss +org.ss + +// st : http://www.nic.st/html/policyrules/ +st +co.st +com.st +consulado.st +edu.st +embaixada.st +gov.st +mil.st +net.st +org.st +principe.st +saotome.st +store.st + +// su : https://en.wikipedia.org/wiki/.su +su + +// sv : http://www.svnet.org.sv/niveldos.pdf +sv +com.sv +edu.sv +gob.sv +org.sv +red.sv + +// sx : https://en.wikipedia.org/wiki/.sx +// Submitted by registry +sx +gov.sx + +// sy : https://en.wikipedia.org/wiki/.sy +// see also: http://www.gobin.info/domainname/sy.doc +sy +edu.sy +gov.sy +net.sy +mil.sy +com.sy +org.sy + +// sz : https://en.wikipedia.org/wiki/.sz +// http://www.sispa.org.sz/ +sz +co.sz +ac.sz +org.sz + +// tc : https://en.wikipedia.org/wiki/.tc +tc + +// td : https://en.wikipedia.org/wiki/.td +td + +// tel: https://en.wikipedia.org/wiki/.tel +// http://www.telnic.org/ +tel + +// tf : https://en.wikipedia.org/wiki/.tf +tf + +// tg : https://en.wikipedia.org/wiki/.tg +// http://www.nic.tg/ +tg + +// th : https://en.wikipedia.org/wiki/.th +// Submitted by registry +th +ac.th +co.th +go.th +in.th +mi.th +net.th +or.th + +// tj : http://www.nic.tj/policy.html +tj +ac.tj +biz.tj +co.tj +com.tj +edu.tj +go.tj +gov.tj +int.tj +mil.tj +name.tj +net.tj +nic.tj +org.tj +test.tj +web.tj + +// tk : https://en.wikipedia.org/wiki/.tk +tk + +// tl : https://en.wikipedia.org/wiki/.tl +tl +gov.tl + +// tm : http://www.nic.tm/local.html +tm +com.tm +co.tm +org.tm +net.tm +nom.tm +gov.tm +mil.tm +edu.tm + +// tn : https://en.wikipedia.org/wiki/.tn +// http://whois.ati.tn/ +tn +com.tn +ens.tn +fin.tn +gov.tn +ind.tn +intl.tn +nat.tn +net.tn +org.tn +info.tn +perso.tn +tourism.tn +edunet.tn +rnrt.tn +rns.tn +rnu.tn +mincom.tn +agrinet.tn +defense.tn +turen.tn + +// to : https://en.wikipedia.org/wiki/.to +// Submitted by registry +to +com.to +gov.to +net.to +org.to +edu.to +mil.to + +// tr : https://nic.tr/ +// https://nic.tr/forms/eng/policies.pdf +// https://nic.tr/index.php?USRACTN=PRICELST +tr +av.tr +bbs.tr +bel.tr +biz.tr +com.tr +dr.tr +edu.tr +gen.tr +gov.tr +info.tr +mil.tr +k12.tr +kep.tr +name.tr +net.tr +org.tr +pol.tr +tel.tr +tsk.tr +tv.tr +web.tr +// Used by Northern Cyprus +nc.tr +// Used by government agencies of Northern Cyprus +gov.nc.tr + +// tt : http://www.nic.tt/ +tt +co.tt +com.tt +org.tt +net.tt +biz.tt +info.tt +pro.tt +int.tt +coop.tt +jobs.tt +mobi.tt +travel.tt +museum.tt +aero.tt +name.tt +gov.tt +edu.tt + +// tv : https://en.wikipedia.org/wiki/.tv +// Not listing any 2LDs as reserved since none seem to exist in practice, +// Wikipedia notwithstanding. +tv + +// tw : https://en.wikipedia.org/wiki/.tw +tw +edu.tw +gov.tw +mil.tw +com.tw +net.tw +org.tw +idv.tw +game.tw +ebiz.tw +club.tw +網路.tw +組織.tw +商業.tw + +// tz : http://www.tznic.or.tz/index.php/domains +// Submitted by registry +tz +ac.tz +co.tz +go.tz +hotel.tz +info.tz +me.tz +mil.tz +mobi.tz +ne.tz +or.tz +sc.tz +tv.tz + +// ua : https://hostmaster.ua/policy/?ua +// Submitted by registry +ua +// ua 2LD +com.ua +edu.ua +gov.ua +in.ua +net.ua +org.ua +// ua geographic names +// https://hostmaster.ua/2ld/ +cherkassy.ua +cherkasy.ua +chernigov.ua +chernihiv.ua +chernivtsi.ua +chernovtsy.ua +ck.ua +cn.ua +cr.ua +crimea.ua +cv.ua +dn.ua +dnepropetrovsk.ua +dnipropetrovsk.ua +donetsk.ua +dp.ua +if.ua +ivano-frankivsk.ua +kh.ua +kharkiv.ua +kharkov.ua +kherson.ua +khmelnitskiy.ua +khmelnytskyi.ua +kiev.ua +kirovograd.ua +km.ua +kr.ua +krym.ua +ks.ua +kv.ua +kyiv.ua +lg.ua +lt.ua +lugansk.ua +lutsk.ua +lv.ua +lviv.ua +mk.ua +mykolaiv.ua +nikolaev.ua +od.ua +odesa.ua +odessa.ua +pl.ua +poltava.ua +rivne.ua +rovno.ua +rv.ua +sb.ua +sebastopol.ua +sevastopol.ua +sm.ua +sumy.ua +te.ua +ternopil.ua +uz.ua +uzhgorod.ua +vinnica.ua +vinnytsia.ua +vn.ua +volyn.ua +yalta.ua +zaporizhzhe.ua +zaporizhzhia.ua +zhitomir.ua +zhytomyr.ua +zp.ua +zt.ua + +// ug : https://www.registry.co.ug/ +ug +co.ug +or.ug +ac.ug +sc.ug +go.ug +ne.ug +com.ug +org.ug + +// uk : https://en.wikipedia.org/wiki/.uk +// Submitted by registry +uk +ac.uk +co.uk +gov.uk +ltd.uk +me.uk +net.uk +nhs.uk +org.uk +plc.uk +police.uk +*.sch.uk + +// us : https://en.wikipedia.org/wiki/.us +us +dni.us +fed.us +isa.us +kids.us +nsn.us +// us geographic names +ak.us +al.us +ar.us +as.us +az.us +ca.us +co.us +ct.us +dc.us +de.us +fl.us +ga.us +gu.us +hi.us +ia.us +id.us +il.us +in.us +ks.us +ky.us +la.us +ma.us +md.us +me.us +mi.us +mn.us +mo.us +ms.us +mt.us +nc.us +nd.us +ne.us +nh.us +nj.us +nm.us +nv.us +ny.us +oh.us +ok.us +or.us +pa.us +pr.us +ri.us +sc.us +sd.us +tn.us +tx.us +ut.us +vi.us +vt.us +va.us +wa.us +wi.us +wv.us +wy.us +// The registrar notes several more specific domains available in each state, +// such as state.*.us, dst.*.us, etc., but resolution of these is somewhat +// haphazard; in some states these domains resolve as addresses, while in others +// only subdomains are available, or even nothing at all. We include the +// most common ones where it's clear that different sites are different +// entities. +k12.ak.us +k12.al.us +k12.ar.us +k12.as.us +k12.az.us +k12.ca.us +k12.co.us +k12.ct.us +k12.dc.us +k12.de.us +k12.fl.us +k12.ga.us +k12.gu.us +// k12.hi.us Bug 614565 - Hawaii has a state-wide DOE login +k12.ia.us +k12.id.us +k12.il.us +k12.in.us +k12.ks.us +k12.ky.us +k12.la.us +k12.ma.us +k12.md.us +k12.me.us +k12.mi.us +k12.mn.us +k12.mo.us +k12.ms.us +k12.mt.us +k12.nc.us +// k12.nd.us Bug 1028347 - Removed at request of Travis Rosso +k12.ne.us +k12.nh.us +k12.nj.us +k12.nm.us +k12.nv.us +k12.ny.us +k12.oh.us +k12.ok.us +k12.or.us +k12.pa.us +k12.pr.us +// k12.ri.us Removed at request of Kim Cournoyer +k12.sc.us +// k12.sd.us Bug 934131 - Removed at request of James Booze +k12.tn.us +k12.tx.us +k12.ut.us +k12.vi.us +k12.vt.us +k12.va.us +k12.wa.us +k12.wi.us +// k12.wv.us Bug 947705 - Removed at request of Verne Britton +k12.wy.us +cc.ak.us +cc.al.us +cc.ar.us +cc.as.us +cc.az.us +cc.ca.us +cc.co.us +cc.ct.us +cc.dc.us +cc.de.us +cc.fl.us +cc.ga.us +cc.gu.us +cc.hi.us +cc.ia.us +cc.id.us +cc.il.us +cc.in.us +cc.ks.us +cc.ky.us +cc.la.us +cc.ma.us +cc.md.us +cc.me.us +cc.mi.us +cc.mn.us +cc.mo.us +cc.ms.us +cc.mt.us +cc.nc.us +cc.nd.us +cc.ne.us +cc.nh.us +cc.nj.us +cc.nm.us +cc.nv.us +cc.ny.us +cc.oh.us +cc.ok.us +cc.or.us +cc.pa.us +cc.pr.us +cc.ri.us +cc.sc.us +cc.sd.us +cc.tn.us +cc.tx.us +cc.ut.us +cc.vi.us +cc.vt.us +cc.va.us +cc.wa.us +cc.wi.us +cc.wv.us +cc.wy.us +lib.ak.us +lib.al.us +lib.ar.us +lib.as.us +lib.az.us +lib.ca.us +lib.co.us +lib.ct.us +lib.dc.us +// lib.de.us Issue #243 - Moved to Private section at request of Ed Moore +lib.fl.us +lib.ga.us +lib.gu.us +lib.hi.us +lib.ia.us +lib.id.us +lib.il.us +lib.in.us +lib.ks.us +lib.ky.us +lib.la.us +lib.ma.us +lib.md.us +lib.me.us +lib.mi.us +lib.mn.us +lib.mo.us +lib.ms.us +lib.mt.us +lib.nc.us +lib.nd.us +lib.ne.us +lib.nh.us +lib.nj.us +lib.nm.us +lib.nv.us +lib.ny.us +lib.oh.us +lib.ok.us +lib.or.us +lib.pa.us +lib.pr.us +lib.ri.us +lib.sc.us +lib.sd.us +lib.tn.us +lib.tx.us +lib.ut.us +lib.vi.us +lib.vt.us +lib.va.us +lib.wa.us +lib.wi.us +// lib.wv.us Bug 941670 - Removed at request of Larry W Arnold +lib.wy.us +// k12.ma.us contains school districts in Massachusetts. The 4LDs are +// managed independently except for private (PVT), charter (CHTR) and +// parochial (PAROCH) schools. Those are delegated directly to the +// 5LD operators. +pvt.k12.ma.us +chtr.k12.ma.us +paroch.k12.ma.us +// Merit Network, Inc. maintains the registry for =~ /(k12|cc|lib).mi.us/ and the following +// see also: http://domreg.merit.edu +// see also: whois -h whois.domreg.merit.edu help +ann-arbor.mi.us +cog.mi.us +dst.mi.us +eaton.mi.us +gen.mi.us +mus.mi.us +tec.mi.us +washtenaw.mi.us + +// uy : http://www.nic.org.uy/ +uy +com.uy +edu.uy +gub.uy +mil.uy +net.uy +org.uy + +// uz : http://www.reg.uz/ +uz +co.uz +com.uz +net.uz +org.uz + +// va : https://en.wikipedia.org/wiki/.va +va + +// vc : https://en.wikipedia.org/wiki/.vc +// Submitted by registry +vc +com.vc +net.vc +org.vc +gov.vc +mil.vc +edu.vc + +// ve : https://registro.nic.ve/ +// Submitted by registry +ve +arts.ve +co.ve +com.ve +e12.ve +edu.ve +firm.ve +gob.ve +gov.ve +info.ve +int.ve +mil.ve +net.ve +org.ve +rec.ve +store.ve +tec.ve +web.ve + +// vg : https://en.wikipedia.org/wiki/.vg +vg + +// vi : http://www.nic.vi/newdomainform.htm +// http://www.nic.vi/Domain_Rules/body_domain_rules.html indicates some other +// TLDs are "reserved", such as edu.vi and gov.vi, but doesn't actually say they +// are available for registration (which they do not seem to be). +vi +co.vi +com.vi +k12.vi +net.vi +org.vi + +// vn : https://www.dot.vn/vnnic/vnnic/domainregistration.jsp +vn +com.vn +net.vn +org.vn +edu.vn +gov.vn +int.vn +ac.vn +biz.vn +info.vn +name.vn +pro.vn +health.vn + +// vu : https://en.wikipedia.org/wiki/.vu +// http://www.vunic.vu/ +vu +com.vu +edu.vu +net.vu +org.vu + +// wf : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +wf + +// ws : https://en.wikipedia.org/wiki/.ws +// http://samoanic.ws/index.dhtml +ws +com.ws +net.ws +org.ws +gov.ws +edu.ws + +// yt : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +yt + +// IDN ccTLDs +// When submitting patches, please maintain a sort by ISO 3166 ccTLD, then +// U-label, and follow this format: +// // A-Label ("", [, variant info]) : +// // [sponsoring org] +// U-Label + +// xn--mgbaam7a8h ("Emerat", Arabic) : AE +// http://nic.ae/english/arabicdomain/rules.jsp +امارات + +// xn--y9a3aq ("hye", Armenian) : AM +// ISOC AM (operated by .am Registry) +հայ + +// xn--54b7fta0cc ("Bangla", Bangla) : BD +বাংলা + +// xn--90ae ("bg", Bulgarian) : BG +бг + +// xn--90ais ("bel", Belarusian/Russian Cyrillic) : BY +// Operated by .by registry +бел + +// xn--fiqs8s ("Zhongguo/China", Chinese, Simplified) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中国 + +// xn--fiqz9s ("Zhongguo/China", Chinese, Traditional) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中國 + +// xn--lgbbat1ad8j ("Algeria/Al Jazair", Arabic) : DZ +الجزائر + +// xn--wgbh1c ("Egypt/Masr", Arabic) : EG +// http://www.dotmasr.eg/ +مصر + +// xn--e1a4c ("eu", Cyrillic) : EU +// https://eurid.eu +ею + +// xn--qxa6a ("eu", Greek) : EU +// https://eurid.eu +ευ + +// xn--mgbah1a3hjkrd ("Mauritania", Arabic) : MR +موريتانيا + +// xn--node ("ge", Georgian Mkhedruli) : GE +გე + +// xn--qxam ("el", Greek) : GR +// Hellenic Ministry of Infrastructure, Transport, and Networks +ελ + +// xn--j6w193g ("Hong Kong", Chinese) : HK +// https://www.hkirc.hk +// Submitted by registry +// https://www.hkirc.hk/content.jsp?id=30#!/34 +香港 +公司.香港 +教育.香港 +政府.香港 +個人.香港 +網絡.香港 +組織.香港 + +// xn--2scrj9c ("Bharat", Kannada) : IN +// India +ಭಾರತ + +// xn--3hcrj9c ("Bharat", Oriya) : IN +// India +ଭାରତ + +// xn--45br5cyl ("Bharatam", Assamese) : IN +// India +ভাৰত + +// xn--h2breg3eve ("Bharatam", Sanskrit) : IN +// India +भारतम् + +// xn--h2brj9c8c ("Bharot", Santali) : IN +// India +भारोत + +// xn--mgbgu82a ("Bharat", Sindhi) : IN +// India +ڀارت + +// xn--rvc1e0am3e ("Bharatam", Malayalam) : IN +// India +ഭാരതം + +// xn--h2brj9c ("Bharat", Devanagari) : IN +// India +भारत + +// xn--mgbbh1a ("Bharat", Kashmiri) : IN +// India +بارت + +// xn--mgbbh1a71e ("Bharat", Arabic) : IN +// India +بھارت + +// xn--fpcrj9c3d ("Bharat", Telugu) : IN +// India +భారత్ + +// xn--gecrj9c ("Bharat", Gujarati) : IN +// India +ભારત + +// xn--s9brj9c ("Bharat", Gurmukhi) : IN +// India +ਭਾਰਤ + +// xn--45brj9c ("Bharat", Bengali) : IN +// India +ভারত + +// xn--xkc2dl3a5ee0h ("India", Tamil) : IN +// India +இந்தியா + +// xn--mgba3a4f16a ("Iran", Persian) : IR +ایران + +// xn--mgba3a4fra ("Iran", Arabic) : IR +ايران + +// xn--mgbtx2b ("Iraq", Arabic) : IQ +// Communications and Media Commission +عراق + +// xn--mgbayh7gpa ("al-Ordon", Arabic) : JO +// National Information Technology Center (NITC) +// Royal Scientific Society, Al-Jubeiha +الاردن + +// xn--3e0b707e ("Republic of Korea", Hangul) : KR +한국 + +// xn--80ao21a ("Kaz", Kazakh) : KZ +қаз + +// xn--fzc2c9e2c ("Lanka", Sinhalese-Sinhala) : LK +// https://nic.lk +ලංකා + +// xn--xkc2al3hye2a ("Ilangai", Tamil) : LK +// https://nic.lk +இலங்கை + +// xn--mgbc0a9azcg ("Morocco/al-Maghrib", Arabic) : MA +المغرب + +// xn--d1alf ("mkd", Macedonian) : MK +// MARnet +мкд + +// xn--l1acc ("mon", Mongolian) : MN +мон + +// xn--mix891f ("Macao", Chinese, Traditional) : MO +// MONIC / HNET Asia (Registry Operator for .mo) +澳門 + +// xn--mix082f ("Macao", Chinese, Simplified) : MO +澳门 + +// xn--mgbx4cd0ab ("Malaysia", Malay) : MY +مليسيا + +// xn--mgb9awbf ("Oman", Arabic) : OM +عمان + +// xn--mgbai9azgqp6j ("Pakistan", Urdu/Arabic) : PK +پاکستان + +// xn--mgbai9a5eva00b ("Pakistan", Urdu/Arabic, variant) : PK +پاكستان + +// xn--ygbi2ammx ("Falasteen", Arabic) : PS +// The Palestinian National Internet Naming Authority (PNINA) +// http://www.pnina.ps +فلسطين + +// xn--90a3ac ("srb", Cyrillic) : RS +// https://www.rnids.rs/en/domains/national-domains +срб +пр.срб +орг.срб +обр.срб +од.срб +упр.срб +ак.срб + +// xn--p1ai ("rf", Russian-Cyrillic) : RU +// https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +рф + +// xn--wgbl6a ("Qatar", Arabic) : QA +// http://www.ict.gov.qa/ +قطر + +// xn--mgberp4a5d4ar ("AlSaudiah", Arabic) : SA +// http://www.nic.net.sa/ +السعودية + +// xn--mgberp4a5d4a87g ("AlSaudiah", Arabic, variant) : SA +السعودیة + +// xn--mgbqly7c0a67fbc ("AlSaudiah", Arabic, variant) : SA +السعودیۃ + +// xn--mgbqly7cvafr ("AlSaudiah", Arabic, variant) : SA +السعوديه + +// xn--mgbpl2fh ("sudan", Arabic) : SD +// Operated by .sd registry +سودان + +// xn--yfro4i67o Singapore ("Singapore", Chinese) : SG +新加坡 + +// xn--clchc0ea0b2g2a9gcd ("Singapore", Tamil) : SG +சிங்கப்பூர் + +// xn--ogbpf8fl ("Syria", Arabic) : SY +سورية + +// xn--mgbtf8fl ("Syria", Arabic, variant) : SY +سوريا + +// xn--o3cw4h ("Thai", Thai) : TH +// http://www.thnic.co.th +ไทย +ศึกษา.ไทย +ธุรกิจ.ไทย +รัฐบาล.ไทย +ทหาร.ไทย +เน็ต.ไทย +องค์กร.ไทย + +// xn--pgbs0dh ("Tunisia", Arabic) : TN +// http://nic.tn +تونس + +// xn--kpry57d ("Taiwan", Chinese, Traditional) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台灣 + +// xn--kprw13d ("Taiwan", Chinese, Simplified) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台湾 + +// xn--nnx388a ("Taiwan", Chinese, variant) : TW +臺灣 + +// xn--j1amh ("ukr", Cyrillic) : UA +укр + +// xn--mgb2ddes ("AlYemen", Arabic) : YE +اليمن + +// xxx : http://icmregistry.com +xxx + +// ye : http://www.y.net.ye/services/domain_name.htm +*.ye + +// za : https://www.zadna.org.za/content/page/domain-information/ +ac.za +agric.za +alt.za +co.za +edu.za +gov.za +grondar.za +law.za +mil.za +net.za +ngo.za +nic.za +nis.za +nom.za +org.za +school.za +tm.za +web.za + +// zm : https://zicta.zm/ +// Submitted by registry +zm +ac.zm +biz.zm +co.zm +com.zm +edu.zm +gov.zm +info.zm +mil.zm +net.zm +org.zm +sch.zm + +// zw : https://www.potraz.gov.zw/ +// Confirmed by registry 2017-01-25 +zw +ac.zw +co.zw +gov.zw +mil.zw +org.zw + + +// newGTLDs + +// List of new gTLDs imported from https://www.icann.org/resources/registries/gtlds/v2/gtlds.json on 2020-08-07T17:16:50Z +// This list is auto-generated, don't edit it manually. +// aaa : 2015-02-26 American Automobile Association, Inc. +aaa + +// aarp : 2015-05-21 AARP +aarp + +// abarth : 2015-07-30 Fiat Chrysler Automobiles N.V. +abarth + +// abb : 2014-10-24 ABB Ltd +abb + +// abbott : 2014-07-24 Abbott Laboratories, Inc. +abbott + +// abbvie : 2015-07-30 AbbVie Inc. +abbvie + +// abc : 2015-07-30 Disney Enterprises, Inc. +abc + +// able : 2015-06-25 Able Inc. +able + +// abogado : 2014-04-24 Minds + Machines Group Limited +abogado + +// abudhabi : 2015-07-30 Abu Dhabi Systems and Information Centre +abudhabi + +// academy : 2013-11-07 Binky Moon, LLC +academy + +// accenture : 2014-08-15 Accenture plc +accenture + +// accountant : 2014-11-20 dot Accountant Limited +accountant + +// accountants : 2014-03-20 Binky Moon, LLC +accountants + +// aco : 2015-01-08 ACO Severin Ahlmann GmbH & Co. KG +aco + +// actor : 2013-12-12 Dog Beach, LLC +actor + +// adac : 2015-07-16 Allgemeiner Deutscher Automobil-Club e.V. (ADAC) +adac + +// ads : 2014-12-04 Charleston Road Registry Inc. +ads + +// adult : 2014-10-16 ICM Registry AD LLC +adult + +// aeg : 2015-03-19 Aktiebolaget Electrolux +aeg + +// aetna : 2015-05-21 Aetna Life Insurance Company +aetna + +// afamilycompany : 2015-07-23 Johnson Shareholdings, Inc. +afamilycompany + +// afl : 2014-10-02 Australian Football League +afl + +// africa : 2014-03-24 ZA Central Registry NPC trading as Registry.Africa +africa + +// agakhan : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +agakhan + +// agency : 2013-11-14 Binky Moon, LLC +agency + +// aig : 2014-12-18 American International Group, Inc. +aig + +// airbus : 2015-07-30 Airbus S.A.S. +airbus + +// airforce : 2014-03-06 Dog Beach, LLC +airforce + +// airtel : 2014-10-24 Bharti Airtel Limited +airtel + +// akdn : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +akdn + +// alfaromeo : 2015-07-31 Fiat Chrysler Automobiles N.V. +alfaromeo + +// alibaba : 2015-01-15 Alibaba Group Holding Limited +alibaba + +// alipay : 2015-01-15 Alibaba Group Holding Limited +alipay + +// allfinanz : 2014-07-03 Allfinanz Deutsche Vermögensberatung Aktiengesellschaft +allfinanz + +// allstate : 2015-07-31 Allstate Fire and Casualty Insurance Company +allstate + +// ally : 2015-06-18 Ally Financial Inc. +ally + +// alsace : 2014-07-02 Region Grand Est +alsace + +// alstom : 2015-07-30 ALSTOM +alstom + +// amazon : 2019-12-19 Amazon Registry Services, Inc. +amazon + +// americanexpress : 2015-07-31 American Express Travel Related Services Company, Inc. +americanexpress + +// americanfamily : 2015-07-23 AmFam, Inc. +americanfamily + +// amex : 2015-07-31 American Express Travel Related Services Company, Inc. +amex + +// amfam : 2015-07-23 AmFam, Inc. +amfam + +// amica : 2015-05-28 Amica Mutual Insurance Company +amica + +// amsterdam : 2014-07-24 Gemeente Amsterdam +amsterdam + +// analytics : 2014-12-18 Campus IP LLC +analytics + +// android : 2014-08-07 Charleston Road Registry Inc. +android + +// anquan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +anquan + +// anz : 2015-07-31 Australia and New Zealand Banking Group Limited +anz + +// aol : 2015-09-17 Oath Inc. +aol + +// apartments : 2014-12-11 Binky Moon, LLC +apartments + +// app : 2015-05-14 Charleston Road Registry Inc. +app + +// apple : 2015-05-14 Apple Inc. +apple + +// aquarelle : 2014-07-24 Aquarelle.com +aquarelle + +// arab : 2015-11-12 League of Arab States +arab + +// aramco : 2014-11-20 Aramco Services Company +aramco + +// archi : 2014-02-06 Afilias Limited +archi + +// army : 2014-03-06 Dog Beach, LLC +army + +// art : 2016-03-24 UK Creative Ideas Limited +art + +// arte : 2014-12-11 Association Relative à la Télévision Européenne G.E.I.E. +arte + +// asda : 2015-07-31 Wal-Mart Stores, Inc. +asda + +// associates : 2014-03-06 Binky Moon, LLC +associates + +// athleta : 2015-07-30 The Gap, Inc. +athleta + +// attorney : 2014-03-20 Dog Beach, LLC +attorney + +// auction : 2014-03-20 Dog Beach, LLC +auction + +// audi : 2015-05-21 AUDI Aktiengesellschaft +audi + +// audible : 2015-06-25 Amazon Registry Services, Inc. +audible + +// audio : 2014-03-20 UNR Corp. +audio + +// auspost : 2015-08-13 Australian Postal Corporation +auspost + +// author : 2014-12-18 Amazon Registry Services, Inc. +author + +// auto : 2014-11-13 XYZ.COM LLC +auto + +// autos : 2014-01-09 DERAutos, LLC +autos + +// avianca : 2015-01-08 Avianca Holdings S.A. +avianca + +// aws : 2015-06-25 Amazon Registry Services, Inc. +aws + +// axa : 2013-12-19 AXA SA +axa + +// azure : 2014-12-18 Microsoft Corporation +azure + +// baby : 2015-04-09 XYZ.COM LLC +baby + +// baidu : 2015-01-08 Baidu, Inc. +baidu + +// banamex : 2015-07-30 Citigroup Inc. +banamex + +// bananarepublic : 2015-07-31 The Gap, Inc. +bananarepublic + +// band : 2014-06-12 Dog Beach, LLC +band + +// bank : 2014-09-25 fTLD Registry Services LLC +bank + +// bar : 2013-12-12 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +bar + +// barcelona : 2014-07-24 Municipi de Barcelona +barcelona + +// barclaycard : 2014-11-20 Barclays Bank PLC +barclaycard + +// barclays : 2014-11-20 Barclays Bank PLC +barclays + +// barefoot : 2015-06-11 Gallo Vineyards, Inc. +barefoot + +// bargains : 2013-11-14 Binky Moon, LLC +bargains + +// baseball : 2015-10-29 MLB Advanced Media DH, LLC +baseball + +// basketball : 2015-08-20 Fédération Internationale de Basketball (FIBA) +basketball + +// bauhaus : 2014-04-17 Werkhaus GmbH +bauhaus + +// bayern : 2014-01-23 Bayern Connect GmbH +bayern + +// bbc : 2014-12-18 British Broadcasting Corporation +bbc + +// bbt : 2015-07-23 BB&T Corporation +bbt + +// bbva : 2014-10-02 BANCO BILBAO VIZCAYA ARGENTARIA, S.A. +bbva + +// bcg : 2015-04-02 The Boston Consulting Group, Inc. +bcg + +// bcn : 2014-07-24 Municipi de Barcelona +bcn + +// beats : 2015-05-14 Beats Electronics, LLC +beats + +// beauty : 2015-12-03 XYZ.COM LLC +beauty + +// beer : 2014-01-09 Minds + Machines Group Limited +beer + +// bentley : 2014-12-18 Bentley Motors Limited +bentley + +// berlin : 2013-10-31 dotBERLIN GmbH & Co. KG +berlin + +// best : 2013-12-19 BestTLD Pty Ltd +best + +// bestbuy : 2015-07-31 BBY Solutions, Inc. +bestbuy + +// bet : 2015-05-07 Afilias Limited +bet + +// bharti : 2014-01-09 Bharti Enterprises (Holding) Private Limited +bharti + +// bible : 2014-06-19 American Bible Society +bible + +// bid : 2013-12-19 dot Bid Limited +bid + +// bike : 2013-08-27 Binky Moon, LLC +bike + +// bing : 2014-12-18 Microsoft Corporation +bing + +// bingo : 2014-12-04 Binky Moon, LLC +bingo + +// bio : 2014-03-06 Afilias Limited +bio + +// black : 2014-01-16 Afilias Limited +black + +// blackfriday : 2014-01-16 UNR Corp. +blackfriday + +// blockbuster : 2015-07-30 Dish DBS Corporation +blockbuster + +// blog : 2015-05-14 Knock Knock WHOIS There, LLC +blog + +// bloomberg : 2014-07-17 Bloomberg IP Holdings LLC +bloomberg + +// blue : 2013-11-07 Afilias Limited +blue + +// bms : 2014-10-30 Bristol-Myers Squibb Company +bms + +// bmw : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +bmw + +// bnpparibas : 2014-05-29 BNP Paribas +bnpparibas + +// boats : 2014-12-04 DERBoats, LLC +boats + +// boehringer : 2015-07-09 Boehringer Ingelheim International GmbH +boehringer + +// bofa : 2015-07-31 Bank of America Corporation +bofa + +// bom : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +bom + +// bond : 2014-06-05 ShortDot SA +bond + +// boo : 2014-01-30 Charleston Road Registry Inc. +boo + +// book : 2015-08-27 Amazon Registry Services, Inc. +book + +// booking : 2015-07-16 Booking.com B.V. +booking + +// bosch : 2015-06-18 Robert Bosch GMBH +bosch + +// bostik : 2015-05-28 Bostik SA +bostik + +// boston : 2015-12-10 Boston TLD Management, LLC +boston + +// bot : 2014-12-18 Amazon Registry Services, Inc. +bot + +// boutique : 2013-11-14 Binky Moon, LLC +boutique + +// box : 2015-11-12 .BOX INC. +box + +// bradesco : 2014-12-18 Banco Bradesco S.A. +bradesco + +// bridgestone : 2014-12-18 Bridgestone Corporation +bridgestone + +// broadway : 2014-12-22 Celebrate Broadway, Inc. +broadway + +// broker : 2014-12-11 Dotbroker Registry Limited +broker + +// brother : 2015-01-29 Brother Industries, Ltd. +brother + +// brussels : 2014-02-06 DNS.be vzw +brussels + +// budapest : 2013-11-21 Minds + Machines Group Limited +budapest + +// bugatti : 2015-07-23 Bugatti International SA +bugatti + +// build : 2013-11-07 Plan Bee LLC +build + +// builders : 2013-11-07 Binky Moon, LLC +builders + +// business : 2013-11-07 Binky Moon, LLC +business + +// buy : 2014-12-18 Amazon Registry Services, Inc. +buy + +// buzz : 2013-10-02 DOTSTRATEGY CO. +buzz + +// bzh : 2014-02-27 Association www.bzh +bzh + +// cab : 2013-10-24 Binky Moon, LLC +cab + +// cafe : 2015-02-11 Binky Moon, LLC +cafe + +// cal : 2014-07-24 Charleston Road Registry Inc. +cal + +// call : 2014-12-18 Amazon Registry Services, Inc. +call + +// calvinklein : 2015-07-30 PVH gTLD Holdings LLC +calvinklein + +// cam : 2016-04-21 AC Webconnecting Holding B.V. +cam + +// camera : 2013-08-27 Binky Moon, LLC +camera + +// camp : 2013-11-07 Binky Moon, LLC +camp + +// cancerresearch : 2014-05-15 Australian Cancer Research Foundation +cancerresearch + +// canon : 2014-09-12 Canon Inc. +canon + +// capetown : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +capetown + +// capital : 2014-03-06 Binky Moon, LLC +capital + +// capitalone : 2015-08-06 Capital One Financial Corporation +capitalone + +// car : 2015-01-22 XYZ.COM LLC +car + +// caravan : 2013-12-12 Caravan International, Inc. +caravan + +// cards : 2013-12-05 Binky Moon, LLC +cards + +// care : 2014-03-06 Binky Moon, LLC +care + +// career : 2013-10-09 dotCareer LLC +career + +// careers : 2013-10-02 Binky Moon, LLC +careers + +// cars : 2014-11-13 XYZ.COM LLC +cars + +// casa : 2013-11-21 Minds + Machines Group Limited +casa + +// case : 2015-09-03 CNH Industrial N.V. +case + +// caseih : 2015-09-03 CNH Industrial N.V. +caseih + +// cash : 2014-03-06 Binky Moon, LLC +cash + +// casino : 2014-12-18 Binky Moon, LLC +casino + +// catering : 2013-12-05 Binky Moon, LLC +catering + +// catholic : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +catholic + +// cba : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +cba + +// cbn : 2014-08-22 The Christian Broadcasting Network, Inc. +cbn + +// cbre : 2015-07-02 CBRE, Inc. +cbre + +// cbs : 2015-08-06 CBS Domains Inc. +cbs + +// ceb : 2015-04-09 The Corporate Executive Board Company +ceb + +// center : 2013-11-07 Binky Moon, LLC +center + +// ceo : 2013-11-07 CEOTLD Pty Ltd +ceo + +// cern : 2014-06-05 European Organization for Nuclear Research ("CERN") +cern + +// cfa : 2014-08-28 CFA Institute +cfa + +// cfd : 2014-12-11 DotCFD Registry Limited +cfd + +// chanel : 2015-04-09 Chanel International B.V. +chanel + +// channel : 2014-05-08 Charleston Road Registry Inc. +channel + +// charity : 2018-04-11 Binky Moon, LLC +charity + +// chase : 2015-04-30 JPMorgan Chase Bank, National Association +chase + +// chat : 2014-12-04 Binky Moon, LLC +chat + +// cheap : 2013-11-14 Binky Moon, LLC +cheap + +// chintai : 2015-06-11 CHINTAI Corporation +chintai + +// christmas : 2013-11-21 UNR Corp. +christmas + +// chrome : 2014-07-24 Charleston Road Registry Inc. +chrome + +// church : 2014-02-06 Binky Moon, LLC +church + +// cipriani : 2015-02-19 Hotel Cipriani Srl +cipriani + +// circle : 2014-12-18 Amazon Registry Services, Inc. +circle + +// cisco : 2014-12-22 Cisco Technology, Inc. +cisco + +// citadel : 2015-07-23 Citadel Domain LLC +citadel + +// citi : 2015-07-30 Citigroup Inc. +citi + +// citic : 2014-01-09 CITIC Group Corporation +citic + +// city : 2014-05-29 Binky Moon, LLC +city + +// cityeats : 2014-12-11 Lifestyle Domain Holdings, Inc. +cityeats + +// claims : 2014-03-20 Binky Moon, LLC +claims + +// cleaning : 2013-12-05 Binky Moon, LLC +cleaning + +// click : 2014-06-05 UNR Corp. +click + +// clinic : 2014-03-20 Binky Moon, LLC +clinic + +// clinique : 2015-10-01 The Estée Lauder Companies Inc. +clinique + +// clothing : 2013-08-27 Binky Moon, LLC +clothing + +// cloud : 2015-04-16 Aruba PEC S.p.A. +cloud + +// club : 2013-11-08 .CLUB DOMAINS, LLC +club + +// clubmed : 2015-06-25 Club Méditerranée S.A. +clubmed + +// coach : 2014-10-09 Binky Moon, LLC +coach + +// codes : 2013-10-31 Binky Moon, LLC +codes + +// coffee : 2013-10-17 Binky Moon, LLC +coffee + +// college : 2014-01-16 XYZ.COM LLC +college + +// cologne : 2014-02-05 dotKoeln GmbH +cologne + +// comcast : 2015-07-23 Comcast IP Holdings I, LLC +comcast + +// commbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +commbank + +// community : 2013-12-05 Binky Moon, LLC +community + +// company : 2013-11-07 Binky Moon, LLC +company + +// compare : 2015-10-08 Registry Services, LLC +compare + +// computer : 2013-10-24 Binky Moon, LLC +computer + +// comsec : 2015-01-08 VeriSign, Inc. +comsec + +// condos : 2013-12-05 Binky Moon, LLC +condos + +// construction : 2013-09-16 Binky Moon, LLC +construction + +// consulting : 2013-12-05 Dog Beach, LLC +consulting + +// contact : 2015-01-08 Dog Beach, LLC +contact + +// contractors : 2013-09-10 Binky Moon, LLC +contractors + +// cooking : 2013-11-21 Minds + Machines Group Limited +cooking + +// cookingchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +cookingchannel + +// cool : 2013-11-14 Binky Moon, LLC +cool + +// corsica : 2014-09-25 Collectivité de Corse +corsica + +// country : 2013-12-19 DotCountry LLC +country + +// coupon : 2015-02-26 Amazon Registry Services, Inc. +coupon + +// coupons : 2015-03-26 Binky Moon, LLC +coupons + +// courses : 2014-12-04 OPEN UNIVERSITIES AUSTRALIA PTY LTD +courses + +// cpa : 2019-06-10 American Institute of Certified Public Accountants +cpa + +// credit : 2014-03-20 Binky Moon, LLC +credit + +// creditcard : 2014-03-20 Binky Moon, LLC +creditcard + +// creditunion : 2015-01-22 CUNA Performance Resources, LLC +creditunion + +// cricket : 2014-10-09 dot Cricket Limited +cricket + +// crown : 2014-10-24 Crown Equipment Corporation +crown + +// crs : 2014-04-03 Federated Co-operatives Limited +crs + +// cruise : 2015-12-10 Viking River Cruises (Bermuda) Ltd. +cruise + +// cruises : 2013-12-05 Binky Moon, LLC +cruises + +// csc : 2014-09-25 Alliance-One Services, Inc. +csc + +// cuisinella : 2014-04-03 SCHMIDT GROUPE S.A.S. +cuisinella + +// cymru : 2014-05-08 Nominet UK +cymru + +// cyou : 2015-01-22 ShortDot SA +cyou + +// dabur : 2014-02-06 Dabur India Limited +dabur + +// dad : 2014-01-23 Charleston Road Registry Inc. +dad + +// dance : 2013-10-24 Dog Beach, LLC +dance + +// data : 2016-06-02 Dish DBS Corporation +data + +// date : 2014-11-20 dot Date Limited +date + +// dating : 2013-12-05 Binky Moon, LLC +dating + +// datsun : 2014-03-27 NISSAN MOTOR CO., LTD. +datsun + +// day : 2014-01-30 Charleston Road Registry Inc. +day + +// dclk : 2014-11-20 Charleston Road Registry Inc. +dclk + +// dds : 2015-05-07 Minds + Machines Group Limited +dds + +// deal : 2015-06-25 Amazon Registry Services, Inc. +deal + +// dealer : 2014-12-22 Intercap Registry Inc. +dealer + +// deals : 2014-05-22 Binky Moon, LLC +deals + +// degree : 2014-03-06 Dog Beach, LLC +degree + +// delivery : 2014-09-11 Binky Moon, LLC +delivery + +// dell : 2014-10-24 Dell Inc. +dell + +// deloitte : 2015-07-31 Deloitte Touche Tohmatsu +deloitte + +// delta : 2015-02-19 Delta Air Lines, Inc. +delta + +// democrat : 2013-10-24 Dog Beach, LLC +democrat + +// dental : 2014-03-20 Binky Moon, LLC +dental + +// dentist : 2014-03-20 Dog Beach, LLC +dentist + +// desi : 2013-11-14 Desi Networks LLC +desi + +// design : 2014-11-07 Top Level Design, LLC +design + +// dev : 2014-10-16 Charleston Road Registry Inc. +dev + +// dhl : 2015-07-23 Deutsche Post AG +dhl + +// diamonds : 2013-09-22 Binky Moon, LLC +diamonds + +// diet : 2014-06-26 UNR Corp. +diet + +// digital : 2014-03-06 Binky Moon, LLC +digital + +// direct : 2014-04-10 Binky Moon, LLC +direct + +// directory : 2013-09-20 Binky Moon, LLC +directory + +// discount : 2014-03-06 Binky Moon, LLC +discount + +// discover : 2015-07-23 Discover Financial Services +discover + +// dish : 2015-07-30 Dish DBS Corporation +dish + +// diy : 2015-11-05 Lifestyle Domain Holdings, Inc. +diy + +// dnp : 2013-12-13 Dai Nippon Printing Co., Ltd. +dnp + +// docs : 2014-10-16 Charleston Road Registry Inc. +docs + +// doctor : 2016-06-02 Binky Moon, LLC +doctor + +// dog : 2014-12-04 Binky Moon, LLC +dog + +// domains : 2013-10-17 Binky Moon, LLC +domains + +// dot : 2015-05-21 Dish DBS Corporation +dot + +// download : 2014-11-20 dot Support Limited +download + +// drive : 2015-03-05 Charleston Road Registry Inc. +drive + +// dtv : 2015-06-04 Dish DBS Corporation +dtv + +// dubai : 2015-01-01 Dubai Smart Government Department +dubai + +// duck : 2015-07-23 Johnson Shareholdings, Inc. +duck + +// dunlop : 2015-07-02 The Goodyear Tire & Rubber Company +dunlop + +// dupont : 2015-06-25 E. I. du Pont de Nemours and Company +dupont + +// durban : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +durban + +// dvag : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +dvag + +// dvr : 2016-05-26 DISH Technologies L.L.C. +dvr + +// earth : 2014-12-04 Interlink Co., Ltd. +earth + +// eat : 2014-01-23 Charleston Road Registry Inc. +eat + +// eco : 2016-07-08 Big Room Inc. +eco + +// edeka : 2014-12-18 EDEKA Verband kaufmännischer Genossenschaften e.V. +edeka + +// education : 2013-11-07 Binky Moon, LLC +education + +// email : 2013-10-31 Binky Moon, LLC +email + +// emerck : 2014-04-03 Merck KGaA +emerck + +// energy : 2014-09-11 Binky Moon, LLC +energy + +// engineer : 2014-03-06 Dog Beach, LLC +engineer + +// engineering : 2014-03-06 Binky Moon, LLC +engineering + +// enterprises : 2013-09-20 Binky Moon, LLC +enterprises + +// epson : 2014-12-04 Seiko Epson Corporation +epson + +// equipment : 2013-08-27 Binky Moon, LLC +equipment + +// ericsson : 2015-07-09 Telefonaktiebolaget L M Ericsson +ericsson + +// erni : 2014-04-03 ERNI Group Holding AG +erni + +// esq : 2014-05-08 Charleston Road Registry Inc. +esq + +// estate : 2013-08-27 Binky Moon, LLC +estate + +// etisalat : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +etisalat + +// eurovision : 2014-04-24 European Broadcasting Union (EBU) +eurovision + +// eus : 2013-12-12 Puntueus Fundazioa +eus + +// events : 2013-12-05 Binky Moon, LLC +events + +// exchange : 2014-03-06 Binky Moon, LLC +exchange + +// expert : 2013-11-21 Binky Moon, LLC +expert + +// exposed : 2013-12-05 Binky Moon, LLC +exposed + +// express : 2015-02-11 Binky Moon, LLC +express + +// extraspace : 2015-05-14 Extra Space Storage LLC +extraspace + +// fage : 2014-12-18 Fage International S.A. +fage + +// fail : 2014-03-06 Binky Moon, LLC +fail + +// fairwinds : 2014-11-13 FairWinds Partners, LLC +fairwinds + +// faith : 2014-11-20 dot Faith Limited +faith + +// family : 2015-04-02 Dog Beach, LLC +family + +// fan : 2014-03-06 Dog Beach, LLC +fan + +// fans : 2014-11-07 ZDNS International Limited +fans + +// farm : 2013-11-07 Binky Moon, LLC +farm + +// farmers : 2015-07-09 Farmers Insurance Exchange +farmers + +// fashion : 2014-07-03 Minds + Machines Group Limited +fashion + +// fast : 2014-12-18 Amazon Registry Services, Inc. +fast + +// fedex : 2015-08-06 Federal Express Corporation +fedex + +// feedback : 2013-12-19 Top Level Spectrum, Inc. +feedback + +// ferrari : 2015-07-31 Fiat Chrysler Automobiles N.V. +ferrari + +// ferrero : 2014-12-18 Ferrero Trading Lux S.A. +ferrero + +// fiat : 2015-07-31 Fiat Chrysler Automobiles N.V. +fiat + +// fidelity : 2015-07-30 Fidelity Brokerage Services LLC +fidelity + +// fido : 2015-08-06 Rogers Communications Canada Inc. +fido + +// film : 2015-01-08 Motion Picture Domain Registry Pty Ltd +film + +// final : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +final + +// finance : 2014-03-20 Binky Moon, LLC +finance + +// financial : 2014-03-06 Binky Moon, LLC +financial + +// fire : 2015-06-25 Amazon Registry Services, Inc. +fire + +// firestone : 2014-12-18 Bridgestone Licensing Services, Inc +firestone + +// firmdale : 2014-03-27 Firmdale Holdings Limited +firmdale + +// fish : 2013-12-12 Binky Moon, LLC +fish + +// fishing : 2013-11-21 Minds + Machines Group Limited +fishing + +// fit : 2014-11-07 Minds + Machines Group Limited +fit + +// fitness : 2014-03-06 Binky Moon, LLC +fitness + +// flickr : 2015-04-02 Flickr, Inc. +flickr + +// flights : 2013-12-05 Binky Moon, LLC +flights + +// flir : 2015-07-23 FLIR Systems, Inc. +flir + +// florist : 2013-11-07 Binky Moon, LLC +florist + +// flowers : 2014-10-09 UNR Corp. +flowers + +// fly : 2014-05-08 Charleston Road Registry Inc. +fly + +// foo : 2014-01-23 Charleston Road Registry Inc. +foo + +// food : 2016-04-21 Lifestyle Domain Holdings, Inc. +food + +// foodnetwork : 2015-07-02 Lifestyle Domain Holdings, Inc. +foodnetwork + +// football : 2014-12-18 Binky Moon, LLC +football + +// ford : 2014-11-13 Ford Motor Company +ford + +// forex : 2014-12-11 Dotforex Registry Limited +forex + +// forsale : 2014-05-22 Dog Beach, LLC +forsale + +// forum : 2015-04-02 Fegistry, LLC +forum + +// foundation : 2013-12-05 Binky Moon, LLC +foundation + +// fox : 2015-09-11 FOX Registry, LLC +fox + +// free : 2015-12-10 Amazon Registry Services, Inc. +free + +// fresenius : 2015-07-30 Fresenius Immobilien-Verwaltungs-GmbH +fresenius + +// frl : 2014-05-15 FRLregistry B.V. +frl + +// frogans : 2013-12-19 OP3FT +frogans + +// frontdoor : 2015-07-02 Lifestyle Domain Holdings, Inc. +frontdoor + +// frontier : 2015-02-05 Frontier Communications Corporation +frontier + +// ftr : 2015-07-16 Frontier Communications Corporation +ftr + +// fujitsu : 2015-07-30 Fujitsu Limited +fujitsu + +// fujixerox : 2015-07-23 Xerox DNHC LLC +fujixerox + +// fun : 2016-01-14 DotSpace Inc. +fun + +// fund : 2014-03-20 Binky Moon, LLC +fund + +// furniture : 2014-03-20 Binky Moon, LLC +furniture + +// futbol : 2013-09-20 Dog Beach, LLC +futbol + +// fyi : 2015-04-02 Binky Moon, LLC +fyi + +// gal : 2013-11-07 Asociación puntoGAL +gal + +// gallery : 2013-09-13 Binky Moon, LLC +gallery + +// gallo : 2015-06-11 Gallo Vineyards, Inc. +gallo + +// gallup : 2015-02-19 Gallup, Inc. +gallup + +// game : 2015-05-28 UNR Corp. +game + +// games : 2015-05-28 Dog Beach, LLC +games + +// gap : 2015-07-31 The Gap, Inc. +gap + +// garden : 2014-06-26 Minds + Machines Group Limited +garden + +// gay : 2019-05-23 Top Level Design, LLC +gay + +// gbiz : 2014-07-17 Charleston Road Registry Inc. +gbiz + +// gdn : 2014-07-31 Joint Stock Company "Navigation-information systems" +gdn + +// gea : 2014-12-04 GEA Group Aktiengesellschaft +gea + +// gent : 2014-01-23 COMBELL NV +gent + +// genting : 2015-03-12 Resorts World Inc Pte. Ltd. +genting + +// george : 2015-07-31 Wal-Mart Stores, Inc. +george + +// ggee : 2014-01-09 GMO Internet, Inc. +ggee + +// gift : 2013-10-17 DotGift, LLC +gift + +// gifts : 2014-07-03 Binky Moon, LLC +gifts + +// gives : 2014-03-06 Dog Beach, LLC +gives + +// giving : 2014-11-13 Giving Limited +giving + +// glade : 2015-07-23 Johnson Shareholdings, Inc. +glade + +// glass : 2013-11-07 Binky Moon, LLC +glass + +// gle : 2014-07-24 Charleston Road Registry Inc. +gle + +// global : 2014-04-17 Dot Global Domain Registry Limited +global + +// globo : 2013-12-19 Globo Comunicação e Participações S.A +globo + +// gmail : 2014-05-01 Charleston Road Registry Inc. +gmail + +// gmbh : 2016-01-29 Binky Moon, LLC +gmbh + +// gmo : 2014-01-09 GMO Internet, Inc. +gmo + +// gmx : 2014-04-24 1&1 Mail & Media GmbH +gmx + +// godaddy : 2015-07-23 Go Daddy East, LLC +godaddy + +// gold : 2015-01-22 Binky Moon, LLC +gold + +// goldpoint : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +goldpoint + +// golf : 2014-12-18 Binky Moon, LLC +golf + +// goo : 2014-12-18 NTT Resonant Inc. +goo + +// goodyear : 2015-07-02 The Goodyear Tire & Rubber Company +goodyear + +// goog : 2014-11-20 Charleston Road Registry Inc. +goog + +// google : 2014-07-24 Charleston Road Registry Inc. +google + +// gop : 2014-01-16 Republican State Leadership Committee, Inc. +gop + +// got : 2014-12-18 Amazon Registry Services, Inc. +got + +// grainger : 2015-05-07 Grainger Registry Services, LLC +grainger + +// graphics : 2013-09-13 Binky Moon, LLC +graphics + +// gratis : 2014-03-20 Binky Moon, LLC +gratis + +// green : 2014-05-08 Afilias Limited +green + +// gripe : 2014-03-06 Binky Moon, LLC +gripe + +// grocery : 2016-06-16 Wal-Mart Stores, Inc. +grocery + +// group : 2014-08-15 Binky Moon, LLC +group + +// guardian : 2015-07-30 The Guardian Life Insurance Company of America +guardian + +// gucci : 2014-11-13 Guccio Gucci S.p.a. +gucci + +// guge : 2014-08-28 Charleston Road Registry Inc. +guge + +// guide : 2013-09-13 Binky Moon, LLC +guide + +// guitars : 2013-11-14 UNR Corp. +guitars + +// guru : 2013-08-27 Binky Moon, LLC +guru + +// hair : 2015-12-03 XYZ.COM LLC +hair + +// hamburg : 2014-02-20 Hamburg Top-Level-Domain GmbH +hamburg + +// hangout : 2014-11-13 Charleston Road Registry Inc. +hangout + +// haus : 2013-12-05 Dog Beach, LLC +haus + +// hbo : 2015-07-30 HBO Registry Services, Inc. +hbo + +// hdfc : 2015-07-30 HOUSING DEVELOPMENT FINANCE CORPORATION LIMITED +hdfc + +// hdfcbank : 2015-02-12 HDFC Bank Limited +hdfcbank + +// health : 2015-02-11 DotHealth, LLC +health + +// healthcare : 2014-06-12 Binky Moon, LLC +healthcare + +// help : 2014-06-26 UNR Corp. +help + +// helsinki : 2015-02-05 City of Helsinki +helsinki + +// here : 2014-02-06 Charleston Road Registry Inc. +here + +// hermes : 2014-07-10 HERMES INTERNATIONAL +hermes + +// hgtv : 2015-07-02 Lifestyle Domain Holdings, Inc. +hgtv + +// hiphop : 2014-03-06 UNR Corp. +hiphop + +// hisamitsu : 2015-07-16 Hisamitsu Pharmaceutical Co.,Inc. +hisamitsu + +// hitachi : 2014-10-31 Hitachi, Ltd. +hitachi + +// hiv : 2014-03-13 UNR Corp. +hiv + +// hkt : 2015-05-14 PCCW-HKT DataCom Services Limited +hkt + +// hockey : 2015-03-19 Binky Moon, LLC +hockey + +// holdings : 2013-08-27 Binky Moon, LLC +holdings + +// holiday : 2013-11-07 Binky Moon, LLC +holiday + +// homedepot : 2015-04-02 Home Depot Product Authority, LLC +homedepot + +// homegoods : 2015-07-16 The TJX Companies, Inc. +homegoods + +// homes : 2014-01-09 DERHomes, LLC +homes + +// homesense : 2015-07-16 The TJX Companies, Inc. +homesense + +// honda : 2014-12-18 Honda Motor Co., Ltd. +honda + +// horse : 2013-11-21 Minds + Machines Group Limited +horse + +// hospital : 2016-10-20 Binky Moon, LLC +hospital + +// host : 2014-04-17 DotHost Inc. +host + +// hosting : 2014-05-29 UNR Corp. +hosting + +// hot : 2015-08-27 Amazon Registry Services, Inc. +hot + +// hoteles : 2015-03-05 Travel Reservations SRL +hoteles + +// hotels : 2016-04-07 Booking.com B.V. +hotels + +// hotmail : 2014-12-18 Microsoft Corporation +hotmail + +// house : 2013-11-07 Binky Moon, LLC +house + +// how : 2014-01-23 Charleston Road Registry Inc. +how + +// hsbc : 2014-10-24 HSBC Global Services (UK) Limited +hsbc + +// hughes : 2015-07-30 Hughes Satellite Systems Corporation +hughes + +// hyatt : 2015-07-30 Hyatt GTLD, L.L.C. +hyatt + +// hyundai : 2015-07-09 Hyundai Motor Company +hyundai + +// ibm : 2014-07-31 International Business Machines Corporation +ibm + +// icbc : 2015-02-19 Industrial and Commercial Bank of China Limited +icbc + +// ice : 2014-10-30 IntercontinentalExchange, Inc. +ice + +// icu : 2015-01-08 ShortDot SA +icu + +// ieee : 2015-07-23 IEEE Global LLC +ieee + +// ifm : 2014-01-30 ifm electronic gmbh +ifm + +// ikano : 2015-07-09 Ikano S.A. +ikano + +// imamat : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +imamat + +// imdb : 2015-06-25 Amazon Registry Services, Inc. +imdb + +// immo : 2014-07-10 Binky Moon, LLC +immo + +// immobilien : 2013-11-07 Dog Beach, LLC +immobilien + +// inc : 2018-03-10 Intercap Registry Inc. +inc + +// industries : 2013-12-05 Binky Moon, LLC +industries + +// infiniti : 2014-03-27 NISSAN MOTOR CO., LTD. +infiniti + +// ing : 2014-01-23 Charleston Road Registry Inc. +ing + +// ink : 2013-12-05 Top Level Design, LLC +ink + +// institute : 2013-11-07 Binky Moon, LLC +institute + +// insurance : 2015-02-19 fTLD Registry Services LLC +insurance + +// insure : 2014-03-20 Binky Moon, LLC +insure + +// intel : 2015-08-06 Intel Corporation +intel + +// international : 2013-11-07 Binky Moon, LLC +international + +// intuit : 2015-07-30 Intuit Administrative Services, Inc. +intuit + +// investments : 2014-03-20 Binky Moon, LLC +investments + +// ipiranga : 2014-08-28 Ipiranga Produtos de Petroleo S.A. +ipiranga + +// irish : 2014-08-07 Binky Moon, LLC +irish + +// ismaili : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +ismaili + +// ist : 2014-08-28 Istanbul Metropolitan Municipality +ist + +// istanbul : 2014-08-28 Istanbul Metropolitan Municipality +istanbul + +// itau : 2014-10-02 Itau Unibanco Holding S.A. +itau + +// itv : 2015-07-09 ITV Services Limited +itv + +// iveco : 2015-09-03 CNH Industrial N.V. +iveco + +// jaguar : 2014-11-13 Jaguar Land Rover Ltd +jaguar + +// java : 2014-06-19 Oracle Corporation +java + +// jcb : 2014-11-20 JCB Co., Ltd. +jcb + +// jcp : 2015-04-23 JCP Media, Inc. +jcp + +// jeep : 2015-07-30 FCA US LLC. +jeep + +// jetzt : 2014-01-09 Binky Moon, LLC +jetzt + +// jewelry : 2015-03-05 Binky Moon, LLC +jewelry + +// jio : 2015-04-02 Reliance Industries Limited +jio + +// jll : 2015-04-02 Jones Lang LaSalle Incorporated +jll + +// jmp : 2015-03-26 Matrix IP LLC +jmp + +// jnj : 2015-06-18 Johnson & Johnson Services, Inc. +jnj + +// joburg : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +joburg + +// jot : 2014-12-18 Amazon Registry Services, Inc. +jot + +// joy : 2014-12-18 Amazon Registry Services, Inc. +joy + +// jpmorgan : 2015-04-30 JPMorgan Chase Bank, National Association +jpmorgan + +// jprs : 2014-09-18 Japan Registry Services Co., Ltd. +jprs + +// juegos : 2014-03-20 UNR Corp. +juegos + +// juniper : 2015-07-30 JUNIPER NETWORKS, INC. +juniper + +// kaufen : 2013-11-07 Dog Beach, LLC +kaufen + +// kddi : 2014-09-12 KDDI CORPORATION +kddi + +// kerryhotels : 2015-04-30 Kerry Trading Co. Limited +kerryhotels + +// kerrylogistics : 2015-04-09 Kerry Trading Co. Limited +kerrylogistics + +// kerryproperties : 2015-04-09 Kerry Trading Co. Limited +kerryproperties + +// kfh : 2014-12-04 Kuwait Finance House +kfh + +// kia : 2015-07-09 KIA MOTORS CORPORATION +kia + +// kim : 2013-09-23 Afilias Limited +kim + +// kinder : 2014-11-07 Ferrero Trading Lux S.A. +kinder + +// kindle : 2015-06-25 Amazon Registry Services, Inc. +kindle + +// kitchen : 2013-09-20 Binky Moon, LLC +kitchen + +// kiwi : 2013-09-20 DOT KIWI LIMITED +kiwi + +// koeln : 2014-01-09 dotKoeln GmbH +koeln + +// komatsu : 2015-01-08 Komatsu Ltd. +komatsu + +// kosher : 2015-08-20 Kosher Marketing Assets LLC +kosher + +// kpmg : 2015-04-23 KPMG International Cooperative (KPMG International Genossenschaft) +kpmg + +// kpn : 2015-01-08 Koninklijke KPN N.V. +kpn + +// krd : 2013-12-05 KRG Department of Information Technology +krd + +// kred : 2013-12-19 KredTLD Pty Ltd +kred + +// kuokgroup : 2015-04-09 Kerry Trading Co. Limited +kuokgroup + +// kyoto : 2014-11-07 Academic Institution: Kyoto Jyoho Gakuen +kyoto + +// lacaixa : 2014-01-09 Fundación Bancaria Caixa d’Estalvis i Pensions de Barcelona, “la Caixa” +lacaixa + +// lamborghini : 2015-06-04 Automobili Lamborghini S.p.A. +lamborghini + +// lamer : 2015-10-01 The Estée Lauder Companies Inc. +lamer + +// lancaster : 2015-02-12 LANCASTER +lancaster + +// lancia : 2015-07-31 Fiat Chrysler Automobiles N.V. +lancia + +// land : 2013-09-10 Binky Moon, LLC +land + +// landrover : 2014-11-13 Jaguar Land Rover Ltd +landrover + +// lanxess : 2015-07-30 LANXESS Corporation +lanxess + +// lasalle : 2015-04-02 Jones Lang LaSalle Incorporated +lasalle + +// lat : 2014-10-16 ECOM-LAC Federaciòn de Latinoamèrica y el Caribe para Internet y el Comercio Electrònico +lat + +// latino : 2015-07-30 Dish DBS Corporation +latino + +// latrobe : 2014-06-16 La Trobe University +latrobe + +// law : 2015-01-22 LW TLD Limited +law + +// lawyer : 2014-03-20 Dog Beach, LLC +lawyer + +// lds : 2014-03-20 IRI Domain Management, LLC +lds + +// lease : 2014-03-06 Binky Moon, LLC +lease + +// leclerc : 2014-08-07 A.C.D. LEC Association des Centres Distributeurs Edouard Leclerc +leclerc + +// lefrak : 2015-07-16 LeFrak Organization, Inc. +lefrak + +// legal : 2014-10-16 Binky Moon, LLC +legal + +// lego : 2015-07-16 LEGO Juris A/S +lego + +// lexus : 2015-04-23 TOYOTA MOTOR CORPORATION +lexus + +// lgbt : 2014-05-08 Afilias Limited +lgbt + +// lidl : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +lidl + +// life : 2014-02-06 Binky Moon, LLC +life + +// lifeinsurance : 2015-01-15 American Council of Life Insurers +lifeinsurance + +// lifestyle : 2014-12-11 Lifestyle Domain Holdings, Inc. +lifestyle + +// lighting : 2013-08-27 Binky Moon, LLC +lighting + +// like : 2014-12-18 Amazon Registry Services, Inc. +like + +// lilly : 2015-07-31 Eli Lilly and Company +lilly + +// limited : 2014-03-06 Binky Moon, LLC +limited + +// limo : 2013-10-17 Binky Moon, LLC +limo + +// lincoln : 2014-11-13 Ford Motor Company +lincoln + +// linde : 2014-12-04 Linde Aktiengesellschaft +linde + +// link : 2013-11-14 UNR Corp. +link + +// lipsy : 2015-06-25 Lipsy Ltd +lipsy + +// live : 2014-12-04 Dog Beach, LLC +live + +// living : 2015-07-30 Lifestyle Domain Holdings, Inc. +living + +// lixil : 2015-03-19 LIXIL Group Corporation +lixil + +// llc : 2017-12-14 Afilias Limited +llc + +// llp : 2019-08-26 UNR Corp. +llp + +// loan : 2014-11-20 dot Loan Limited +loan + +// loans : 2014-03-20 Binky Moon, LLC +loans + +// locker : 2015-06-04 Dish DBS Corporation +locker + +// locus : 2015-06-25 Locus Analytics LLC +locus + +// loft : 2015-07-30 Annco, Inc. +loft + +// lol : 2015-01-30 UNR Corp. +lol + +// london : 2013-11-14 Dot London Domains Limited +london + +// lotte : 2014-11-07 Lotte Holdings Co., Ltd. +lotte + +// lotto : 2014-04-10 Afilias Limited +lotto + +// love : 2014-12-22 Merchant Law Group LLP +love + +// lpl : 2015-07-30 LPL Holdings, Inc. +lpl + +// lplfinancial : 2015-07-30 LPL Holdings, Inc. +lplfinancial + +// ltd : 2014-09-25 Binky Moon, LLC +ltd + +// ltda : 2014-04-17 InterNetX, Corp +ltda + +// lundbeck : 2015-08-06 H. Lundbeck A/S +lundbeck + +// lupin : 2014-11-07 LUPIN LIMITED +lupin + +// luxe : 2014-01-09 Minds + Machines Group Limited +luxe + +// luxury : 2013-10-17 Luxury Partners, LLC +luxury + +// macys : 2015-07-31 Macys, Inc. +macys + +// madrid : 2014-05-01 Comunidad de Madrid +madrid + +// maif : 2014-10-02 Mutuelle Assurance Instituteur France (MAIF) +maif + +// maison : 2013-12-05 Binky Moon, LLC +maison + +// makeup : 2015-01-15 XYZ.COM LLC +makeup + +// man : 2014-12-04 MAN SE +man + +// management : 2013-11-07 Binky Moon, LLC +management + +// mango : 2013-10-24 PUNTO FA S.L. +mango + +// map : 2016-06-09 Charleston Road Registry Inc. +map + +// market : 2014-03-06 Dog Beach, LLC +market + +// marketing : 2013-11-07 Binky Moon, LLC +marketing + +// markets : 2014-12-11 Dotmarkets Registry Limited +markets + +// marriott : 2014-10-09 Marriott Worldwide Corporation +marriott + +// marshalls : 2015-07-16 The TJX Companies, Inc. +marshalls + +// maserati : 2015-07-31 Fiat Chrysler Automobiles N.V. +maserati + +// mattel : 2015-08-06 Mattel Sites, Inc. +mattel + +// mba : 2015-04-02 Binky Moon, LLC +mba + +// mckinsey : 2015-07-31 McKinsey Holdings, Inc. +mckinsey + +// med : 2015-08-06 Medistry LLC +med + +// media : 2014-03-06 Binky Moon, LLC +media + +// meet : 2014-01-16 Charleston Road Registry Inc. +meet + +// melbourne : 2014-05-29 The Crown in right of the State of Victoria, represented by its Department of State Development, Business and Innovation +melbourne + +// meme : 2014-01-30 Charleston Road Registry Inc. +meme + +// memorial : 2014-10-16 Dog Beach, LLC +memorial + +// men : 2015-02-26 Exclusive Registry Limited +men + +// menu : 2013-09-11 Dot Menu Registry, LLC +menu + +// merckmsd : 2016-07-14 MSD Registry Holdings, Inc. +merckmsd + +// metlife : 2015-05-07 MetLife Services and Solutions, LLC +metlife + +// miami : 2013-12-19 Minds + Machines Group Limited +miami + +// microsoft : 2014-12-18 Microsoft Corporation +microsoft + +// mini : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +mini + +// mint : 2015-07-30 Intuit Administrative Services, Inc. +mint + +// mit : 2015-07-02 Massachusetts Institute of Technology +mit + +// mitsubishi : 2015-07-23 Mitsubishi Corporation +mitsubishi + +// mlb : 2015-05-21 MLB Advanced Media DH, LLC +mlb + +// mls : 2015-04-23 The Canadian Real Estate Association +mls + +// mma : 2014-11-07 MMA IARD +mma + +// mobile : 2016-06-02 Dish DBS Corporation +mobile + +// moda : 2013-11-07 Dog Beach, LLC +moda + +// moe : 2013-11-13 Interlink Co., Ltd. +moe + +// moi : 2014-12-18 Amazon Registry Services, Inc. +moi + +// mom : 2015-04-16 UNR Corp. +mom + +// monash : 2013-09-30 Monash University +monash + +// money : 2014-10-16 Binky Moon, LLC +money + +// monster : 2015-09-11 XYZ.COM LLC +monster + +// mormon : 2013-12-05 IRI Domain Management, LLC +mormon + +// mortgage : 2014-03-20 Dog Beach, LLC +mortgage + +// moscow : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +moscow + +// moto : 2015-06-04 Motorola Trademark Holdings, LLC +moto + +// motorcycles : 2014-01-09 DERMotorcycles, LLC +motorcycles + +// mov : 2014-01-30 Charleston Road Registry Inc. +mov + +// movie : 2015-02-05 Binky Moon, LLC +movie + +// msd : 2015-07-23 MSD Registry Holdings, Inc. +msd + +// mtn : 2014-12-04 MTN Dubai Limited +mtn + +// mtr : 2015-03-12 MTR Corporation Limited +mtr + +// mutual : 2015-04-02 Northwestern Mutual MU TLD Registry, LLC +mutual + +// nab : 2015-08-20 National Australia Bank Limited +nab + +// nagoya : 2013-10-24 GMO Registry, Inc. +nagoya + +// nationwide : 2015-07-23 Nationwide Mutual Insurance Company +nationwide + +// natura : 2015-03-12 NATURA COSMÉTICOS S.A. +natura + +// navy : 2014-03-06 Dog Beach, LLC +navy + +// nba : 2015-07-31 NBA REGISTRY, LLC +nba + +// nec : 2015-01-08 NEC Corporation +nec + +// netbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +netbank + +// netflix : 2015-06-18 Netflix, Inc. +netflix + +// network : 2013-11-14 Binky Moon, LLC +network + +// neustar : 2013-12-05 NeuStar, Inc. +neustar + +// new : 2014-01-30 Charleston Road Registry Inc. +new + +// newholland : 2015-09-03 CNH Industrial N.V. +newholland + +// news : 2014-12-18 Dog Beach, LLC +news + +// next : 2015-06-18 Next plc +next + +// nextdirect : 2015-06-18 Next plc +nextdirect + +// nexus : 2014-07-24 Charleston Road Registry Inc. +nexus + +// nfl : 2015-07-23 NFL Reg Ops LLC +nfl + +// ngo : 2014-03-06 Public Interest Registry +ngo + +// nhk : 2014-02-13 Japan Broadcasting Corporation (NHK) +nhk + +// nico : 2014-12-04 DWANGO Co., Ltd. +nico + +// nike : 2015-07-23 NIKE, Inc. +nike + +// nikon : 2015-05-21 NIKON CORPORATION +nikon + +// ninja : 2013-11-07 Dog Beach, LLC +ninja + +// nissan : 2014-03-27 NISSAN MOTOR CO., LTD. +nissan + +// nissay : 2015-10-29 Nippon Life Insurance Company +nissay + +// nokia : 2015-01-08 Nokia Corporation +nokia + +// northwesternmutual : 2015-06-18 Northwestern Mutual Registry, LLC +northwesternmutual + +// norton : 2014-12-04 Symantec Corporation +norton + +// now : 2015-06-25 Amazon Registry Services, Inc. +now + +// nowruz : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +nowruz + +// nowtv : 2015-05-14 Starbucks (HK) Limited +nowtv + +// nra : 2014-05-22 NRA Holdings Company, INC. +nra + +// nrw : 2013-11-21 Minds + Machines GmbH +nrw + +// ntt : 2014-10-31 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +ntt + +// nyc : 2014-01-23 The City of New York by and through the New York City Department of Information Technology & Telecommunications +nyc + +// obi : 2014-09-25 OBI Group Holding SE & Co. KGaA +obi + +// observer : 2015-04-30 Top Level Spectrum, Inc. +observer + +// off : 2015-07-23 Johnson Shareholdings, Inc. +off + +// office : 2015-03-12 Microsoft Corporation +office + +// okinawa : 2013-12-05 BRregistry, Inc. +okinawa + +// olayan : 2015-05-14 Crescent Holding GmbH +olayan + +// olayangroup : 2015-05-14 Crescent Holding GmbH +olayangroup + +// oldnavy : 2015-07-31 The Gap, Inc. +oldnavy + +// ollo : 2015-06-04 Dish DBS Corporation +ollo + +// omega : 2015-01-08 The Swatch Group Ltd +omega + +// one : 2014-11-07 One.com A/S +one + +// ong : 2014-03-06 Public Interest Registry +ong + +// onl : 2013-09-16 I-Registry Ltd. +onl + +// online : 2015-01-15 DotOnline Inc. +online + +// onyourside : 2015-07-23 Nationwide Mutual Insurance Company +onyourside + +// ooo : 2014-01-09 INFIBEAM AVENUES LIMITED +ooo + +// open : 2015-07-31 American Express Travel Related Services Company, Inc. +open + +// oracle : 2014-06-19 Oracle Corporation +oracle + +// orange : 2015-03-12 Orange Brand Services Limited +orange + +// organic : 2014-03-27 Afilias Limited +organic + +// origins : 2015-10-01 The Estée Lauder Companies Inc. +origins + +// osaka : 2014-09-04 Osaka Registry Co., Ltd. +osaka + +// otsuka : 2013-10-11 Otsuka Holdings Co., Ltd. +otsuka + +// ott : 2015-06-04 Dish DBS Corporation +ott + +// ovh : 2014-01-16 MédiaBC +ovh + +// page : 2014-12-04 Charleston Road Registry Inc. +page + +// panasonic : 2015-07-30 Panasonic Corporation +panasonic + +// paris : 2014-01-30 City of Paris +paris + +// pars : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +pars + +// partners : 2013-12-05 Binky Moon, LLC +partners + +// parts : 2013-12-05 Binky Moon, LLC +parts + +// party : 2014-09-11 Blue Sky Registry Limited +party + +// passagens : 2015-03-05 Travel Reservations SRL +passagens + +// pay : 2015-08-27 Amazon Registry Services, Inc. +pay + +// pccw : 2015-05-14 PCCW Enterprises Limited +pccw + +// pet : 2015-05-07 Afilias Limited +pet + +// pfizer : 2015-09-11 Pfizer Inc. +pfizer + +// pharmacy : 2014-06-19 National Association of Boards of Pharmacy +pharmacy + +// phd : 2016-07-28 Charleston Road Registry Inc. +phd + +// philips : 2014-11-07 Koninklijke Philips N.V. +philips + +// phone : 2016-06-02 Dish DBS Corporation +phone + +// photo : 2013-11-14 UNR Corp. +photo + +// photography : 2013-09-20 Binky Moon, LLC +photography + +// photos : 2013-10-17 Binky Moon, LLC +photos + +// physio : 2014-05-01 PhysBiz Pty Ltd +physio + +// pics : 2013-11-14 UNR Corp. +pics + +// pictet : 2014-06-26 Pictet Europe S.A. +pictet + +// pictures : 2014-03-06 Binky Moon, LLC +pictures + +// pid : 2015-01-08 Top Level Spectrum, Inc. +pid + +// pin : 2014-12-18 Amazon Registry Services, Inc. +pin + +// ping : 2015-06-11 Ping Registry Provider, Inc. +ping + +// pink : 2013-10-01 Afilias Limited +pink + +// pioneer : 2015-07-16 Pioneer Corporation +pioneer + +// pizza : 2014-06-26 Binky Moon, LLC +pizza + +// place : 2014-04-24 Binky Moon, LLC +place + +// play : 2015-03-05 Charleston Road Registry Inc. +play + +// playstation : 2015-07-02 Sony Interactive Entertainment Inc. +playstation + +// plumbing : 2013-09-10 Binky Moon, LLC +plumbing + +// plus : 2015-02-05 Binky Moon, LLC +plus + +// pnc : 2015-07-02 PNC Domain Co., LLC +pnc + +// pohl : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +pohl + +// poker : 2014-07-03 Afilias Limited +poker + +// politie : 2015-08-20 Politie Nederland +politie + +// porn : 2014-10-16 ICM Registry PN LLC +porn + +// pramerica : 2015-07-30 Prudential Financial, Inc. +pramerica + +// praxi : 2013-12-05 Praxi S.p.A. +praxi + +// press : 2014-04-03 DotPress Inc. +press + +// prime : 2015-06-25 Amazon Registry Services, Inc. +prime + +// prod : 2014-01-23 Charleston Road Registry Inc. +prod + +// productions : 2013-12-05 Binky Moon, LLC +productions + +// prof : 2014-07-24 Charleston Road Registry Inc. +prof + +// progressive : 2015-07-23 Progressive Casualty Insurance Company +progressive + +// promo : 2014-12-18 Afilias Limited +promo + +// properties : 2013-12-05 Binky Moon, LLC +properties + +// property : 2014-05-22 UNR Corp. +property + +// protection : 2015-04-23 XYZ.COM LLC +protection + +// pru : 2015-07-30 Prudential Financial, Inc. +pru + +// prudential : 2015-07-30 Prudential Financial, Inc. +prudential + +// pub : 2013-12-12 Dog Beach, LLC +pub + +// pwc : 2015-10-29 PricewaterhouseCoopers LLP +pwc + +// qpon : 2013-11-14 dotCOOL, Inc. +qpon + +// quebec : 2013-12-19 PointQuébec Inc +quebec + +// quest : 2015-03-26 XYZ.COM LLC +quest + +// qvc : 2015-07-30 QVC, Inc. +qvc + +// racing : 2014-12-04 Premier Registry Limited +racing + +// radio : 2016-07-21 European Broadcasting Union (EBU) +radio + +// raid : 2015-07-23 Johnson Shareholdings, Inc. +raid + +// read : 2014-12-18 Amazon Registry Services, Inc. +read + +// realestate : 2015-09-11 dotRealEstate LLC +realestate + +// realtor : 2014-05-29 Real Estate Domains LLC +realtor + +// realty : 2015-03-19 Fegistry, LLC +realty + +// recipes : 2013-10-17 Binky Moon, LLC +recipes + +// red : 2013-11-07 Afilias Limited +red + +// redstone : 2014-10-31 Redstone Haute Couture Co., Ltd. +redstone + +// redumbrella : 2015-03-26 Travelers TLD, LLC +redumbrella + +// rehab : 2014-03-06 Dog Beach, LLC +rehab + +// reise : 2014-03-13 Binky Moon, LLC +reise + +// reisen : 2014-03-06 Binky Moon, LLC +reisen + +// reit : 2014-09-04 National Association of Real Estate Investment Trusts, Inc. +reit + +// reliance : 2015-04-02 Reliance Industries Limited +reliance + +// ren : 2013-12-12 ZDNS International Limited +ren + +// rent : 2014-12-04 XYZ.COM LLC +rent + +// rentals : 2013-12-05 Binky Moon, LLC +rentals + +// repair : 2013-11-07 Binky Moon, LLC +repair + +// report : 2013-12-05 Binky Moon, LLC +report + +// republican : 2014-03-20 Dog Beach, LLC +republican + +// rest : 2013-12-19 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +rest + +// restaurant : 2014-07-03 Binky Moon, LLC +restaurant + +// review : 2014-11-20 dot Review Limited +review + +// reviews : 2013-09-13 Dog Beach, LLC +reviews + +// rexroth : 2015-06-18 Robert Bosch GMBH +rexroth + +// rich : 2013-11-21 I-Registry Ltd. +rich + +// richardli : 2015-05-14 Pacific Century Asset Management (HK) Limited +richardli + +// ricoh : 2014-11-20 Ricoh Company, Ltd. +ricoh + +// ril : 2015-04-02 Reliance Industries Limited +ril + +// rio : 2014-02-27 Empresa Municipal de Informática SA - IPLANRIO +rio + +// rip : 2014-07-10 Dog Beach, LLC +rip + +// rmit : 2015-11-19 Royal Melbourne Institute of Technology +rmit + +// rocher : 2014-12-18 Ferrero Trading Lux S.A. +rocher + +// rocks : 2013-11-14 Dog Beach, LLC +rocks + +// rodeo : 2013-12-19 Minds + Machines Group Limited +rodeo + +// rogers : 2015-08-06 Rogers Communications Canada Inc. +rogers + +// room : 2014-12-18 Amazon Registry Services, Inc. +room + +// rsvp : 2014-05-08 Charleston Road Registry Inc. +rsvp + +// rugby : 2016-12-15 World Rugby Strategic Developments Limited +rugby + +// ruhr : 2013-10-02 regiodot GmbH & Co. KG +ruhr + +// run : 2015-03-19 Binky Moon, LLC +run + +// rwe : 2015-04-02 RWE AG +rwe + +// ryukyu : 2014-01-09 BRregistry, Inc. +ryukyu + +// saarland : 2013-12-12 dotSaarland GmbH +saarland + +// safe : 2014-12-18 Amazon Registry Services, Inc. +safe + +// safety : 2015-01-08 Safety Registry Services, LLC. +safety + +// sakura : 2014-12-18 SAKURA Internet Inc. +sakura + +// sale : 2014-10-16 Dog Beach, LLC +sale + +// salon : 2014-12-11 Binky Moon, LLC +salon + +// samsclub : 2015-07-31 Wal-Mart Stores, Inc. +samsclub + +// samsung : 2014-04-03 SAMSUNG SDS CO., LTD +samsung + +// sandvik : 2014-11-13 Sandvik AB +sandvik + +// sandvikcoromant : 2014-11-07 Sandvik AB +sandvikcoromant + +// sanofi : 2014-10-09 Sanofi +sanofi + +// sap : 2014-03-27 SAP AG +sap + +// sarl : 2014-07-03 Binky Moon, LLC +sarl + +// sas : 2015-04-02 Research IP LLC +sas + +// save : 2015-06-25 Amazon Registry Services, Inc. +save + +// saxo : 2014-10-31 Saxo Bank A/S +saxo + +// sbi : 2015-03-12 STATE BANK OF INDIA +sbi + +// sbs : 2014-11-07 SPECIAL BROADCASTING SERVICE CORPORATION +sbs + +// sca : 2014-03-13 SVENSKA CELLULOSA AKTIEBOLAGET SCA (publ) +sca + +// scb : 2014-02-20 The Siam Commercial Bank Public Company Limited ("SCB") +scb + +// schaeffler : 2015-08-06 Schaeffler Technologies AG & Co. KG +schaeffler + +// schmidt : 2014-04-03 SCHMIDT GROUPE S.A.S. +schmidt + +// scholarships : 2014-04-24 Scholarships.com, LLC +scholarships + +// school : 2014-12-18 Binky Moon, LLC +school + +// schule : 2014-03-06 Binky Moon, LLC +schule + +// schwarz : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +schwarz + +// science : 2014-09-11 dot Science Limited +science + +// scjohnson : 2015-07-23 Johnson Shareholdings, Inc. +scjohnson + +// scot : 2014-01-23 Dot Scot Registry Limited +scot + +// search : 2016-06-09 Charleston Road Registry Inc. +search + +// seat : 2014-05-22 SEAT, S.A. (Sociedad Unipersonal) +seat + +// secure : 2015-08-27 Amazon Registry Services, Inc. +secure + +// security : 2015-05-14 XYZ.COM LLC +security + +// seek : 2014-12-04 Seek Limited +seek + +// select : 2015-10-08 Registry Services, LLC +select + +// sener : 2014-10-24 Sener Ingeniería y Sistemas, S.A. +sener + +// services : 2014-02-27 Binky Moon, LLC +services + +// ses : 2015-07-23 SES +ses + +// seven : 2015-08-06 Seven West Media Ltd +seven + +// sew : 2014-07-17 SEW-EURODRIVE GmbH & Co KG +sew + +// sex : 2014-11-13 ICM Registry SX LLC +sex + +// sexy : 2013-09-11 UNR Corp. +sexy + +// sfr : 2015-08-13 Societe Francaise du Radiotelephone - SFR +sfr + +// shangrila : 2015-09-03 Shangri‐La International Hotel Management Limited +shangrila + +// sharp : 2014-05-01 Sharp Corporation +sharp + +// shaw : 2015-04-23 Shaw Cablesystems G.P. +shaw + +// shell : 2015-07-30 Shell Information Technology International Inc +shell + +// shia : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +shia + +// shiksha : 2013-11-14 Afilias Limited +shiksha + +// shoes : 2013-10-02 Binky Moon, LLC +shoes + +// shop : 2016-04-08 GMO Registry, Inc. +shop + +// shopping : 2016-03-31 Binky Moon, LLC +shopping + +// shouji : 2015-01-08 Beijing Qihu Keji Co., Ltd. +shouji + +// show : 2015-03-05 Binky Moon, LLC +show + +// showtime : 2015-08-06 CBS Domains Inc. +showtime + +// shriram : 2014-01-23 Shriram Capital Ltd. +shriram + +// silk : 2015-06-25 Amazon Registry Services, Inc. +silk + +// sina : 2015-03-12 Sina Corporation +sina + +// singles : 2013-08-27 Binky Moon, LLC +singles + +// site : 2015-01-15 DotSite Inc. +site + +// ski : 2015-04-09 Afilias Limited +ski + +// skin : 2015-01-15 XYZ.COM LLC +skin + +// sky : 2014-06-19 Sky International AG +sky + +// skype : 2014-12-18 Microsoft Corporation +skype + +// sling : 2015-07-30 DISH Technologies L.L.C. +sling + +// smart : 2015-07-09 Smart Communications, Inc. (SMART) +smart + +// smile : 2014-12-18 Amazon Registry Services, Inc. +smile + +// sncf : 2015-02-19 Société Nationale des Chemins de fer Francais S N C F +sncf + +// soccer : 2015-03-26 Binky Moon, LLC +soccer + +// social : 2013-11-07 Dog Beach, LLC +social + +// softbank : 2015-07-02 SoftBank Group Corp. +softbank + +// software : 2014-03-20 Dog Beach, LLC +software + +// sohu : 2013-12-19 Sohu.com Limited +sohu + +// solar : 2013-11-07 Binky Moon, LLC +solar + +// solutions : 2013-11-07 Binky Moon, LLC +solutions + +// song : 2015-02-26 Amazon Registry Services, Inc. +song + +// sony : 2015-01-08 Sony Corporation +sony + +// soy : 2014-01-23 Charleston Road Registry Inc. +soy + +// spa : 2019-09-19 Asia Spa and Wellness Promotion Council Limited +spa + +// space : 2014-04-03 DotSpace Inc. +space + +// sport : 2017-11-16 Global Association of International Sports Federations (GAISF) +sport + +// spot : 2015-02-26 Amazon Registry Services, Inc. +spot + +// spreadbetting : 2014-12-11 Dotspreadbetting Registry Limited +spreadbetting + +// srl : 2015-05-07 InterNetX, Corp +srl + +// stada : 2014-11-13 STADA Arzneimittel AG +stada + +// staples : 2015-07-30 Staples, Inc. +staples + +// star : 2015-01-08 Star India Private Limited +star + +// statebank : 2015-03-12 STATE BANK OF INDIA +statebank + +// statefarm : 2015-07-30 State Farm Mutual Automobile Insurance Company +statefarm + +// stc : 2014-10-09 Saudi Telecom Company +stc + +// stcgroup : 2014-10-09 Saudi Telecom Company +stcgroup + +// stockholm : 2014-12-18 Stockholms kommun +stockholm + +// storage : 2014-12-22 XYZ.COM LLC +storage + +// store : 2015-04-09 DotStore Inc. +store + +// stream : 2016-01-08 dot Stream Limited +stream + +// studio : 2015-02-11 Dog Beach, LLC +studio + +// study : 2014-12-11 OPEN UNIVERSITIES AUSTRALIA PTY LTD +study + +// style : 2014-12-04 Binky Moon, LLC +style + +// sucks : 2014-12-22 Vox Populi Registry Ltd. +sucks + +// supplies : 2013-12-19 Binky Moon, LLC +supplies + +// supply : 2013-12-19 Binky Moon, LLC +supply + +// support : 2013-10-24 Binky Moon, LLC +support + +// surf : 2014-01-09 Minds + Machines Group Limited +surf + +// surgery : 2014-03-20 Binky Moon, LLC +surgery + +// suzuki : 2014-02-20 SUZUKI MOTOR CORPORATION +suzuki + +// swatch : 2015-01-08 The Swatch Group Ltd +swatch + +// swiftcover : 2015-07-23 Swiftcover Insurance Services Limited +swiftcover + +// swiss : 2014-10-16 Swiss Confederation +swiss + +// sydney : 2014-09-18 State of New South Wales, Department of Premier and Cabinet +sydney + +// systems : 2013-11-07 Binky Moon, LLC +systems + +// tab : 2014-12-04 Tabcorp Holdings Limited +tab + +// taipei : 2014-07-10 Taipei City Government +taipei + +// talk : 2015-04-09 Amazon Registry Services, Inc. +talk + +// taobao : 2015-01-15 Alibaba Group Holding Limited +taobao + +// target : 2015-07-31 Target Domain Holdings, LLC +target + +// tatamotors : 2015-03-12 Tata Motors Ltd +tatamotors + +// tatar : 2014-04-24 Limited Liability Company "Coordination Center of Regional Domain of Tatarstan Republic" +tatar + +// tattoo : 2013-08-30 UNR Corp. +tattoo + +// tax : 2014-03-20 Binky Moon, LLC +tax + +// taxi : 2015-03-19 Binky Moon, LLC +taxi + +// tci : 2014-09-12 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +tci + +// tdk : 2015-06-11 TDK Corporation +tdk + +// team : 2015-03-05 Binky Moon, LLC +team + +// tech : 2015-01-30 Personals TLD Inc. +tech + +// technology : 2013-09-13 Binky Moon, LLC +technology + +// temasek : 2014-08-07 Temasek Holdings (Private) Limited +temasek + +// tennis : 2014-12-04 Binky Moon, LLC +tennis + +// teva : 2015-07-02 Teva Pharmaceutical Industries Limited +teva + +// thd : 2015-04-02 Home Depot Product Authority, LLC +thd + +// theater : 2015-03-19 Binky Moon, LLC +theater + +// theatre : 2015-05-07 XYZ.COM LLC +theatre + +// tiaa : 2015-07-23 Teachers Insurance and Annuity Association of America +tiaa + +// tickets : 2015-02-05 Accent Media Limited +tickets + +// tienda : 2013-11-14 Binky Moon, LLC +tienda + +// tiffany : 2015-01-30 Tiffany and Company +tiffany + +// tips : 2013-09-20 Binky Moon, LLC +tips + +// tires : 2014-11-07 Binky Moon, LLC +tires + +// tirol : 2014-04-24 punkt Tirol GmbH +tirol + +// tjmaxx : 2015-07-16 The TJX Companies, Inc. +tjmaxx + +// tjx : 2015-07-16 The TJX Companies, Inc. +tjx + +// tkmaxx : 2015-07-16 The TJX Companies, Inc. +tkmaxx + +// tmall : 2015-01-15 Alibaba Group Holding Limited +tmall + +// today : 2013-09-20 Binky Moon, LLC +today + +// tokyo : 2013-11-13 GMO Registry, Inc. +tokyo + +// tools : 2013-11-21 Binky Moon, LLC +tools + +// top : 2014-03-20 .TOP Registry +top + +// toray : 2014-12-18 Toray Industries, Inc. +toray + +// toshiba : 2014-04-10 TOSHIBA Corporation +toshiba + +// total : 2015-08-06 Total SA +total + +// tours : 2015-01-22 Binky Moon, LLC +tours + +// town : 2014-03-06 Binky Moon, LLC +town + +// toyota : 2015-04-23 TOYOTA MOTOR CORPORATION +toyota + +// toys : 2014-03-06 Binky Moon, LLC +toys + +// trade : 2014-01-23 Elite Registry Limited +trade + +// trading : 2014-12-11 Dottrading Registry Limited +trading + +// training : 2013-11-07 Binky Moon, LLC +training + +// travel : 2015-10-09 Dog Beach, LLC +travel + +// travelchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +travelchannel + +// travelers : 2015-03-26 Travelers TLD, LLC +travelers + +// travelersinsurance : 2015-03-26 Travelers TLD, LLC +travelersinsurance + +// trust : 2014-10-16 NCC Group Domain Services, Inc. +trust + +// trv : 2015-03-26 Travelers TLD, LLC +trv + +// tube : 2015-06-11 Latin American Telecom LLC +tube + +// tui : 2014-07-03 TUI AG +tui + +// tunes : 2015-02-26 Amazon Registry Services, Inc. +tunes + +// tushu : 2014-12-18 Amazon Registry Services, Inc. +tushu + +// tvs : 2015-02-19 T V SUNDRAM IYENGAR & SONS LIMITED +tvs + +// ubank : 2015-08-20 National Australia Bank Limited +ubank + +// ubs : 2014-12-11 UBS AG +ubs + +// unicom : 2015-10-15 China United Network Communications Corporation Limited +unicom + +// university : 2014-03-06 Binky Moon, LLC +university + +// uno : 2013-09-11 DotSite Inc. +uno + +// uol : 2014-05-01 UBN INTERNET LTDA. +uol + +// ups : 2015-06-25 UPS Market Driver, Inc. +ups + +// vacations : 2013-12-05 Binky Moon, LLC +vacations + +// vana : 2014-12-11 Lifestyle Domain Holdings, Inc. +vana + +// vanguard : 2015-09-03 The Vanguard Group, Inc. +vanguard + +// vegas : 2014-01-16 Dot Vegas, Inc. +vegas + +// ventures : 2013-08-27 Binky Moon, LLC +ventures + +// verisign : 2015-08-13 VeriSign, Inc. +verisign + +// versicherung : 2014-03-20 tldbox GmbH +versicherung + +// vet : 2014-03-06 Dog Beach, LLC +vet + +// viajes : 2013-10-17 Binky Moon, LLC +viajes + +// video : 2014-10-16 Dog Beach, LLC +video + +// vig : 2015-05-14 VIENNA INSURANCE GROUP AG Wiener Versicherung Gruppe +vig + +// viking : 2015-04-02 Viking River Cruises (Bermuda) Ltd. +viking + +// villas : 2013-12-05 Binky Moon, LLC +villas + +// vin : 2015-06-18 Binky Moon, LLC +vin + +// vip : 2015-01-22 Minds + Machines Group Limited +vip + +// virgin : 2014-09-25 Virgin Enterprises Limited +virgin + +// visa : 2015-07-30 Visa Worldwide Pte. Limited +visa + +// vision : 2013-12-05 Binky Moon, LLC +vision + +// viva : 2014-11-07 Saudi Telecom Company +viva + +// vivo : 2015-07-31 Telefonica Brasil S.A. +vivo + +// vlaanderen : 2014-02-06 DNS.be vzw +vlaanderen + +// vodka : 2013-12-19 Minds + Machines Group Limited +vodka + +// volkswagen : 2015-05-14 Volkswagen Group of America Inc. +volkswagen + +// volvo : 2015-11-12 Volvo Holding Sverige Aktiebolag +volvo + +// vote : 2013-11-21 Monolith Registry LLC +vote + +// voting : 2013-11-13 Valuetainment Corp. +voting + +// voto : 2013-11-21 Monolith Registry LLC +voto + +// voyage : 2013-08-27 Binky Moon, LLC +voyage + +// vuelos : 2015-03-05 Travel Reservations SRL +vuelos + +// wales : 2014-05-08 Nominet UK +wales + +// walmart : 2015-07-31 Wal-Mart Stores, Inc. +walmart + +// walter : 2014-11-13 Sandvik AB +walter + +// wang : 2013-10-24 Zodiac Wang Limited +wang + +// wanggou : 2014-12-18 Amazon Registry Services, Inc. +wanggou + +// watch : 2013-11-14 Binky Moon, LLC +watch + +// watches : 2014-12-22 Richemont DNS Inc. +watches + +// weather : 2015-01-08 International Business Machines Corporation +weather + +// weatherchannel : 2015-03-12 International Business Machines Corporation +weatherchannel + +// webcam : 2014-01-23 dot Webcam Limited +webcam + +// weber : 2015-06-04 Saint-Gobain Weber SA +weber + +// website : 2014-04-03 DotWebsite Inc. +website + +// wed : 2013-10-01 Atgron, Inc. +wed + +// wedding : 2014-04-24 Minds + Machines Group Limited +wedding + +// weibo : 2015-03-05 Sina Corporation +weibo + +// weir : 2015-01-29 Weir Group IP Limited +weir + +// whoswho : 2014-02-20 Who's Who Registry +whoswho + +// wien : 2013-10-28 punkt.wien GmbH +wien + +// wiki : 2013-11-07 Top Level Design, LLC +wiki + +// williamhill : 2014-03-13 William Hill Organization Limited +williamhill + +// win : 2014-11-20 First Registry Limited +win + +// windows : 2014-12-18 Microsoft Corporation +windows + +// wine : 2015-06-18 Binky Moon, LLC +wine + +// winners : 2015-07-16 The TJX Companies, Inc. +winners + +// wme : 2014-02-13 William Morris Endeavor Entertainment, LLC +wme + +// wolterskluwer : 2015-08-06 Wolters Kluwer N.V. +wolterskluwer + +// woodside : 2015-07-09 Woodside Petroleum Limited +woodside + +// work : 2013-12-19 Minds + Machines Group Limited +work + +// works : 2013-11-14 Binky Moon, LLC +works + +// world : 2014-06-12 Binky Moon, LLC +world + +// wow : 2015-10-08 Amazon Registry Services, Inc. +wow + +// wtc : 2013-12-19 World Trade Centers Association, Inc. +wtc + +// wtf : 2014-03-06 Binky Moon, LLC +wtf + +// xbox : 2014-12-18 Microsoft Corporation +xbox + +// xerox : 2014-10-24 Xerox DNHC LLC +xerox + +// xfinity : 2015-07-09 Comcast IP Holdings I, LLC +xfinity + +// xihuan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +xihuan + +// xin : 2014-12-11 Elegant Leader Limited +xin + +// xn--11b4c3d : 2015-01-15 VeriSign Sarl +कॉम + +// xn--1ck2e1b : 2015-02-26 Amazon Registry Services, Inc. +セール + +// xn--1qqw23a : 2014-01-09 Guangzhou YU Wei Information Technology Co., Ltd. +佛山 + +// xn--30rr7y : 2014-06-12 Excellent First Limited +慈善 + +// xn--3bst00m : 2013-09-13 Eagle Horizon Limited +集团 + +// xn--3ds443g : 2013-09-08 TLD REGISTRY LIMITED OY +在线 + +// xn--3oq18vl8pn36a : 2015-07-02 Volkswagen (China) Investment Co., Ltd. +大众汽车 + +// xn--3pxu8k : 2015-01-15 VeriSign Sarl +点看 + +// xn--42c2d9a : 2015-01-15 VeriSign Sarl +คอม + +// xn--45q11c : 2013-11-21 Zodiac Gemini Ltd +八卦 + +// xn--4gbrim : 2013-10-04 Fans TLD Limited +موقع + +// xn--55qw42g : 2013-11-08 China Organizational Name Administration Center +公益 + +// xn--55qx5d : 2013-11-14 China Internet Network Information Center (CNNIC) +公司 + +// xn--5su34j936bgsg : 2015-09-03 Shangri‐La International Hotel Management Limited +香格里拉 + +// xn--5tzm5g : 2014-12-22 Global Website TLD Asia Limited +网站 + +// xn--6frz82g : 2013-09-23 Afilias Limited +移动 + +// xn--6qq986b3xl : 2013-09-13 Tycoon Treasure Limited +我爱你 + +// xn--80adxhks : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +москва + +// xn--80aqecdr1a : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +католик + +// xn--80asehdb : 2013-07-14 CORE Association +онлайн + +// xn--80aswg : 2013-07-14 CORE Association +сайт + +// xn--8y0a063a : 2015-03-26 China United Network Communications Corporation Limited +联通 + +// xn--9dbq2a : 2015-01-15 VeriSign Sarl +קום + +// xn--9et52u : 2014-06-12 RISE VICTORY LIMITED +时尚 + +// xn--9krt00a : 2015-03-12 Sina Corporation +微博 + +// xn--b4w605ferd : 2014-08-07 Temasek Holdings (Private) Limited +淡马锡 + +// xn--bck1b9a5dre4c : 2015-02-26 Amazon Registry Services, Inc. +ファッション + +// xn--c1avg : 2013-11-14 Public Interest Registry +орг + +// xn--c2br7g : 2015-01-15 VeriSign Sarl +नेट + +// xn--cck2b3b : 2015-02-26 Amazon Registry Services, Inc. +ストア + +// xn--cckwcxetd : 2019-12-19 Amazon Registry Services, Inc. +アマゾン + +// xn--cg4bki : 2013-09-27 SAMSUNG SDS CO., LTD +삼성 + +// xn--czr694b : 2014-01-16 Internet DotTrademark Organisation Limited +商标 + +// xn--czrs0t : 2013-12-19 Binky Moon, LLC +商店 + +// xn--czru2d : 2013-11-21 Zodiac Aquarius Limited +商城 + +// xn--d1acj3b : 2013-11-20 The Foundation for Network Initiatives “The Smart Internet” +дети + +// xn--eckvdtc9d : 2014-12-18 Amazon Registry Services, Inc. +ポイント + +// xn--efvy88h : 2014-08-22 Guangzhou YU Wei Information Technology Co., Ltd. +新闻 + +// xn--fct429k : 2015-04-09 Amazon Registry Services, Inc. +家電 + +// xn--fhbei : 2015-01-15 VeriSign Sarl +كوم + +// xn--fiq228c5hs : 2013-09-08 TLD REGISTRY LIMITED OY +中文网 + +// xn--fiq64b : 2013-10-14 CITIC Group Corporation +中信 + +// xn--fjq720a : 2014-05-22 Binky Moon, LLC +娱乐 + +// xn--flw351e : 2014-07-31 Charleston Road Registry Inc. +谷歌 + +// xn--fzys8d69uvgm : 2015-05-14 PCCW Enterprises Limited +電訊盈科 + +// xn--g2xx48c : 2015-01-30 Minds + Machines Group Limited +购物 + +// xn--gckr3f0f : 2015-02-26 Amazon Registry Services, Inc. +クラウド + +// xn--gk3at1e : 2015-10-08 Amazon Registry Services, Inc. +通販 + +// xn--hxt814e : 2014-05-15 Zodiac Taurus Limited +网店 + +// xn--i1b6b1a6a2e : 2013-11-14 Public Interest Registry +संगठन + +// xn--imr513n : 2014-12-11 Internet DotTrademark Organisation Limited +餐厅 + +// xn--io0a7i : 2013-11-14 China Internet Network Information Center (CNNIC) +网络 + +// xn--j1aef : 2015-01-15 VeriSign Sarl +ком + +// xn--jlq480n2rg : 2019-12-19 Amazon Registry Services, Inc. +亚马逊 + +// xn--jlq61u9w7b : 2015-01-08 Nokia Corporation +诺基亚 + +// xn--jvr189m : 2015-02-26 Amazon Registry Services, Inc. +食品 + +// xn--kcrx77d1x4a : 2014-11-07 Koninklijke Philips N.V. +飞利浦 + +// xn--kput3i : 2014-02-13 Beijing RITT-Net Technology Development Co., Ltd +手机 + +// xn--mgba3a3ejt : 2014-11-20 Aramco Services Company +ارامكو + +// xn--mgba7c0bbn0a : 2015-05-14 Crescent Holding GmbH +العليان + +// xn--mgbaakc7dvf : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +اتصالات + +// xn--mgbab2bd : 2013-10-31 CORE Association +بازار + +// xn--mgbca7dzdo : 2015-07-30 Abu Dhabi Systems and Information Centre +ابوظبي + +// xn--mgbi4ecexp : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +كاثوليك + +// xn--mgbt3dhd : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +همراه + +// xn--mk1bu44c : 2015-01-15 VeriSign Sarl +닷컴 + +// xn--mxtq1m : 2014-03-06 Net-Chinese Co., Ltd. +政府 + +// xn--ngbc5azd : 2013-07-13 International Domain Registry Pty. Ltd. +شبكة + +// xn--ngbe9e0a : 2014-12-04 Kuwait Finance House +بيتك + +// xn--ngbrx : 2015-11-12 League of Arab States +عرب + +// xn--nqv7f : 2013-11-14 Public Interest Registry +机构 + +// xn--nqv7fs00ema : 2013-11-14 Public Interest Registry +组织机构 + +// xn--nyqy26a : 2014-11-07 Stable Tone Limited +健康 + +// xn--otu796d : 2017-08-06 Jiang Yu Liang Cai Technology Company Limited +招聘 + +// xn--p1acf : 2013-12-12 Rusnames Limited +рус + +// xn--pssy2u : 2015-01-15 VeriSign Sarl +大拿 + +// xn--q9jyb4c : 2013-09-17 Charleston Road Registry Inc. +みんな + +// xn--qcka1pmc : 2014-07-31 Charleston Road Registry Inc. +グーグル + +// xn--rhqv96g : 2013-09-11 Stable Tone Limited +世界 + +// xn--rovu88b : 2015-02-26 Amazon Registry Services, Inc. +書籍 + +// xn--ses554g : 2014-01-16 KNET Co., Ltd. +网址 + +// xn--t60b56a : 2015-01-15 VeriSign Sarl +닷넷 + +// xn--tckwe : 2015-01-15 VeriSign Sarl +コム + +// xn--tiq49xqyj : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +天主教 + +// xn--unup4y : 2013-07-14 Binky Moon, LLC +游戏 + +// xn--vermgensberater-ctb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberater + +// xn--vermgensberatung-pwb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberatung + +// xn--vhquv : 2013-08-27 Binky Moon, LLC +企业 + +// xn--vuq861b : 2014-10-16 Beijing Tele-info Network Technology Co., Ltd. +信息 + +// xn--w4r85el8fhu5dnra : 2015-04-30 Kerry Trading Co. Limited +嘉里大酒店 + +// xn--w4rs40l : 2015-07-30 Kerry Trading Co. Limited +嘉里 + +// xn--xhq521b : 2013-11-14 Guangzhou YU Wei Information Technology Co., Ltd. +广东 + +// xn--zfr164b : 2013-11-08 China Organizational Name Administration Center +政务 + +// xyz : 2013-12-05 XYZ.COM LLC +xyz + +// yachts : 2014-01-09 DERYachts, LLC +yachts + +// yahoo : 2015-04-02 Yahoo! Domain Services Inc. +yahoo + +// yamaxun : 2014-12-18 Amazon Registry Services, Inc. +yamaxun + +// yandex : 2014-04-10 Yandex Europe B.V. +yandex + +// yodobashi : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +yodobashi + +// yoga : 2014-05-29 Minds + Machines Group Limited +yoga + +// yokohama : 2013-12-12 GMO Registry, Inc. +yokohama + +// you : 2015-04-09 Amazon Registry Services, Inc. +you + +// youtube : 2014-05-01 Charleston Road Registry Inc. +youtube + +// yun : 2015-01-08 Beijing Qihu Keji Co., Ltd. +yun + +// zappos : 2015-06-25 Amazon Registry Services, Inc. +zappos + +// zara : 2014-11-07 Industria de Diseño Textil, S.A. (INDITEX, S.A.) +zara + +// zero : 2014-12-18 Amazon Registry Services, Inc. +zero + +// zip : 2014-05-08 Charleston Road Registry Inc. +zip + +// zone : 2013-11-14 Binky Moon, LLC +zone + +// zuerich : 2014-11-07 Kanton Zürich (Canton of Zurich) +zuerich + + +// ===END ICANN DOMAINS=== +// ===BEGIN PRIVATE DOMAINS=== +// (Note: these are in alphabetical order by company name) + +// 1GB LLC : https://www.1gb.ua/ +// Submitted by 1GB LLC +cc.ua +inf.ua +ltd.ua + +// 611coin : https://611project.org/ +611.to + +// Adobe : https://www.adobe.com/ +// Submitted by Ian Boston +adobeaemcloud.com +adobeaemcloud.net +*.dev.adobeaemcloud.com + +// Agnat sp. z o.o. : https://domena.pl +// Submitted by Przemyslaw Plewa +beep.pl + +// alboto.ca : http://alboto.ca +// Submitted by Anton Avramov +barsy.ca + +// Alces Software Ltd : http://alces-software.com +// Submitted by Mark J. Titorenko +*.compute.estate +*.alces.network + +// all-inkl.com : https://all-inkl.com +// Submitted by Werner Kaltofen +kasserver.com + +// Algorithmia, Inc. : algorithmia.com +// Submitted by Eli Perelman +*.algorithmia.com +!teams.algorithmia.com +!test.algorithmia.com + +// Altervista: https://www.altervista.org +// Submitted by Carlo Cannas +altervista.org + +// alwaysdata : https://www.alwaysdata.com +// Submitted by Cyril +alwaysdata.net + +// Amazon CloudFront : https://aws.amazon.com/cloudfront/ +// Submitted by Donavan Miller +cloudfront.net + +// Amazon Elastic Compute Cloud : https://aws.amazon.com/ec2/ +// Submitted by Luke Wells +*.compute.amazonaws.com +*.compute-1.amazonaws.com +*.compute.amazonaws.com.cn +us-east-1.amazonaws.com + +// Amazon Elastic Beanstalk : https://aws.amazon.com/elasticbeanstalk/ +// Submitted by Luke Wells +cn-north-1.eb.amazonaws.com.cn +cn-northwest-1.eb.amazonaws.com.cn +elasticbeanstalk.com +ap-northeast-1.elasticbeanstalk.com +ap-northeast-2.elasticbeanstalk.com +ap-northeast-3.elasticbeanstalk.com +ap-south-1.elasticbeanstalk.com +ap-southeast-1.elasticbeanstalk.com +ap-southeast-2.elasticbeanstalk.com +ca-central-1.elasticbeanstalk.com +eu-central-1.elasticbeanstalk.com +eu-west-1.elasticbeanstalk.com +eu-west-2.elasticbeanstalk.com +eu-west-3.elasticbeanstalk.com +sa-east-1.elasticbeanstalk.com +us-east-1.elasticbeanstalk.com +us-east-2.elasticbeanstalk.com +us-gov-west-1.elasticbeanstalk.com +us-west-1.elasticbeanstalk.com +us-west-2.elasticbeanstalk.com + +// Amazon Elastic Load Balancing : https://aws.amazon.com/elasticloadbalancing/ +// Submitted by Luke Wells +*.elb.amazonaws.com +*.elb.amazonaws.com.cn + +// Amazon S3 : https://aws.amazon.com/s3/ +// Submitted by Luke Wells +s3.amazonaws.com +s3-ap-northeast-1.amazonaws.com +s3-ap-northeast-2.amazonaws.com +s3-ap-south-1.amazonaws.com +s3-ap-southeast-1.amazonaws.com +s3-ap-southeast-2.amazonaws.com +s3-ca-central-1.amazonaws.com +s3-eu-central-1.amazonaws.com +s3-eu-west-1.amazonaws.com +s3-eu-west-2.amazonaws.com +s3-eu-west-3.amazonaws.com +s3-external-1.amazonaws.com +s3-fips-us-gov-west-1.amazonaws.com +s3-sa-east-1.amazonaws.com +s3-us-gov-west-1.amazonaws.com +s3-us-east-2.amazonaws.com +s3-us-west-1.amazonaws.com +s3-us-west-2.amazonaws.com +s3.ap-northeast-2.amazonaws.com +s3.ap-south-1.amazonaws.com +s3.cn-north-1.amazonaws.com.cn +s3.ca-central-1.amazonaws.com +s3.eu-central-1.amazonaws.com +s3.eu-west-2.amazonaws.com +s3.eu-west-3.amazonaws.com +s3.us-east-2.amazonaws.com +s3.dualstack.ap-northeast-1.amazonaws.com +s3.dualstack.ap-northeast-2.amazonaws.com +s3.dualstack.ap-south-1.amazonaws.com +s3.dualstack.ap-southeast-1.amazonaws.com +s3.dualstack.ap-southeast-2.amazonaws.com +s3.dualstack.ca-central-1.amazonaws.com +s3.dualstack.eu-central-1.amazonaws.com +s3.dualstack.eu-west-1.amazonaws.com +s3.dualstack.eu-west-2.amazonaws.com +s3.dualstack.eu-west-3.amazonaws.com +s3.dualstack.sa-east-1.amazonaws.com +s3.dualstack.us-east-1.amazonaws.com +s3.dualstack.us-east-2.amazonaws.com +s3-website-us-east-1.amazonaws.com +s3-website-us-west-1.amazonaws.com +s3-website-us-west-2.amazonaws.com +s3-website-ap-northeast-1.amazonaws.com +s3-website-ap-southeast-1.amazonaws.com +s3-website-ap-southeast-2.amazonaws.com +s3-website-eu-west-1.amazonaws.com +s3-website-sa-east-1.amazonaws.com +s3-website.ap-northeast-2.amazonaws.com +s3-website.ap-south-1.amazonaws.com +s3-website.ca-central-1.amazonaws.com +s3-website.eu-central-1.amazonaws.com +s3-website.eu-west-2.amazonaws.com +s3-website.eu-west-3.amazonaws.com +s3-website.us-east-2.amazonaws.com + +// Amsterdam Wireless: https://www.amsterdamwireless.nl/ +// Submitted by Imre Jonk +amsw.nl + +// Amune : https://amune.org/ +// Submitted by Team Amune +t3l3p0rt.net +tele.amune.org + +// Apigee : https://apigee.com/ +// Submitted by Apigee Security Team +apigee.io + +// Aptible : https://www.aptible.com/ +// Submitted by Thomas Orozco +on-aptible.com + +// ASEINet : https://www.aseinet.com/ +// Submitted by Asei SEKIGUCHI +user.aseinet.ne.jp +gv.vc +d.gv.vc + +// Asociación Amigos de la Informática "Euskalamiga" : http://encounter.eus/ +// Submitted by Hector Martin +user.party.eus + +// Association potager.org : https://potager.org/ +// Submitted by Lunar +pimienta.org +poivron.org +potager.org +sweetpepper.org + +// ASUSTOR Inc. : http://www.asustor.com +// Submitted by Vincent Tseng +myasustor.com + +// AVM : https://avm.de +// Submitted by Andreas Weise +myfritz.net + +// AW AdvisorWebsites.com Software Inc : https://advisorwebsites.com +// Submitted by James Kennedy +*.awdev.ca +*.advisor.ws + +// b-data GmbH : https://www.b-data.io +// Submitted by Olivier Benz +b-data.io + +// backplane : https://www.backplane.io +// Submitted by Anthony Voutas +backplaneapp.io + +// Balena : https://www.balena.io +// Submitted by Petros Angelatos +balena-devices.com + +// Banzai Cloud +// Submitted by Janos Matyas +*.banzai.cloud +app.banzaicloud.io +*.backyards.banzaicloud.io + + +// BetaInABox +// Submitted by Adrian +betainabox.com + +// BinaryLane : http://www.binarylane.com +// Submitted by Nathan O'Sullivan +bnr.la + +// Blackbaud, Inc. : https://www.blackbaud.com +// Submitted by Paul Crowder +blackbaudcdn.net + +// Boomla : https://boomla.com +// Submitted by Tibor Halter +boomla.net + +// Boxfuse : https://boxfuse.com +// Submitted by Axel Fontaine +boxfuse.io + +// bplaced : https://www.bplaced.net/ +// Submitted by Miroslav Bozic +square7.ch +bplaced.com +bplaced.de +square7.de +bplaced.net +square7.net + +// BrowserSafetyMark +// Submitted by Dave Tharp +browsersafetymark.io + +// Bytemark Hosting : https://www.bytemark.co.uk +// Submitted by Paul Cammish +uk0.bigv.io +dh.bytemark.co.uk +vm.bytemark.co.uk + +// callidomus : https://www.callidomus.com/ +// Submitted by Marcus Popp +mycd.eu + +// Carrd : https://carrd.co +// Submitted by AJ +carrd.co +crd.co +uwu.ai + +// CentralNic : http://www.centralnic.com/names/domains +// Submitted by registry +ae.org +br.com +cn.com +com.de +com.se +de.com +eu.com +gb.net +hu.net +jp.net +jpn.com +mex.com +ru.com +sa.com +se.net +uk.com +uk.net +us.com +za.bz +za.com + +// No longer operated by CentralNic, these entries should be adopted and/or removed by current operators +// Submitted by Gavin Brown +ar.com +gb.com +hu.com +kr.com +no.com +qc.com +uy.com + +// Africa.com Web Solutions Ltd : https://registry.africa.com +// Submitted by Gavin Brown +africa.com + +// iDOT Services Limited : http://www.domain.gr.com +// Submitted by Gavin Brown +gr.com + +// Radix FZC : http://domains.in.net +// Submitted by Gavin Brown +in.net +web.in + +// US REGISTRY LLC : http://us.org +// Submitted by Gavin Brown +us.org + +// co.com Registry, LLC : https://registry.co.com +// Submitted by Gavin Brown +co.com + +// Roar Domains LLC : https://roar.basketball/ +// Submitted by Gavin Brown +aus.basketball +nz.basketball + +// BRS Media : https://brsmedia.com/ +// Submitted by Gavin Brown +radio.am +radio.fm + +// Globe Hosting SRL : https://www.globehosting.com/ +// Submitted by Gavin Brown +co.ro +shop.ro + +// c.la : http://www.c.la/ +c.la + +// certmgr.org : https://certmgr.org +// Submitted by B. Blechschmidt +certmgr.org + +// Citrix : https://citrix.com +// Submitted by Alex Stoddard +xenapponazure.com + +// Civilized Discourse Construction Kit, Inc. : https://www.discourse.org/ +// Submitted by Rishabh Nambiar & Michael Brown +discourse.group +discourse.team + +// ClearVox : http://www.clearvox.nl/ +// Submitted by Leon Rowland +virtueeldomein.nl + +// Clever Cloud : https://www.clever-cloud.com/ +// Submitted by Quentin Adam +cleverapps.io + +// Clerk : https://www.clerk.dev +// Submitted by Colin Sidoti +*.lcl.dev +*.stg.dev + +// Clic2000 : https://clic2000.fr +// Submitted by Mathilde Blanchemanche +clic2000.net + +// Cloud66 : https://www.cloud66.com/ +// Submitted by Khash Sajadi +c66.me +cloud66.ws +cloud66.zone + +// CloudAccess.net : https://www.cloudaccess.net/ +// Submitted by Pawel Panek +jdevcloud.com +wpdevcloud.com +cloudaccess.host +freesite.host +cloudaccess.net + +// cloudControl : https://www.cloudcontrol.com/ +// Submitted by Tobias Wilken +cloudcontrolled.com +cloudcontrolapp.com + +// Cloudera, Inc. : https://www.cloudera.com/ +// Submitted by Philip Langdale +cloudera.site + +// Cloudflare, Inc. : https://www.cloudflare.com/ +// Submitted by Jake Riesterer +trycloudflare.com +workers.dev + +// Clovyr : https://clovyr.io +// Submitted by Patrick Nielsen +wnext.app + +// co.ca : http://registry.co.ca/ +co.ca + +// Co & Co : https://co-co.nl/ +// Submitted by Govert Versluis +*.otap.co + +// i-registry s.r.o. : http://www.i-registry.cz/ +// Submitted by Martin Semrad +co.cz + +// CDN77.com : http://www.cdn77.com +// Submitted by Jan Krpes +c.cdn77.org +cdn77-ssl.net +r.cdn77.net +rsc.cdn77.org +ssl.origin.cdn77-secure.org + +// Cloud DNS Ltd : http://www.cloudns.net +// Submitted by Aleksander Hristov +cloudns.asia +cloudns.biz +cloudns.club +cloudns.cc +cloudns.eu +cloudns.in +cloudns.info +cloudns.org +cloudns.pro +cloudns.pw +cloudns.us + +// Cloudeity Inc : https://cloudeity.com +// Submitted by Stefan Dimitrov +cloudeity.net + +// CNPY : https://cnpy.gdn +// Submitted by Angelo Gladding +cnpy.gdn + +// CoDNS B.V. +co.nl +co.no + +// Combell.com : https://www.combell.com +// Submitted by Thomas Wouters +webhosting.be +hosting-cluster.nl + +// Coordination Center for TLD RU and XN--P1AI : https://cctld.ru/en/domains/domens_ru/reserved/ +// Submitted by George Georgievsky +ac.ru +edu.ru +gov.ru +int.ru +mil.ru +test.ru + +// COSIMO GmbH : http://www.cosimo.de +// Submitted by Rene Marticke +dyn.cosidns.de +dynamisches-dns.de +dnsupdater.de +internet-dns.de +l-o-g-i-n.de +dynamic-dns.info +feste-ip.net +knx-server.net +static-access.net + +// Craynic, s.r.o. : http://www.craynic.com/ +// Submitted by Ales Krajnik +realm.cz + +// Cryptonomic : https://cryptonomic.net/ +// Submitted by Andrew Cady +*.cryptonomic.net + +// Cupcake : https://cupcake.io/ +// Submitted by Jonathan Rudenberg +cupcake.is + +// Curv UG : https://curv-labs.de/ +// Submitted by Marvin Wiesner +curv.dev + +// Customer OCI - Oracle Dyn https://cloud.oracle.com/home https://dyn.com/dns/ +// Submitted by Gregory Drake +// Note: This is intended to also include customer-oci.com due to wildcards implicitly including the current label +*.customer-oci.com +*.oci.customer-oci.com +*.ocp.customer-oci.com +*.ocs.customer-oci.com + +// cyon GmbH : https://www.cyon.ch/ +// Submitted by Dominic Luechinger +cyon.link +cyon.site + +// Danger Science Group: https://dangerscience.com/ +// Submitted by Skylar MacDonald +fnwk.site +folionetwork.site +platform0.app + +// Daplie, Inc : https://daplie.com +// Submitted by AJ ONeal +daplie.me +localhost.daplie.me + +// Datto, Inc. : https://www.datto.com/ +// Submitted by Philipp Heckel +dattolocal.com +dattorelay.com +dattoweb.com +mydatto.com +dattolocal.net +mydatto.net + +// Dansk.net : http://www.dansk.net/ +// Submitted by Anani Voule +biz.dk +co.dk +firm.dk +reg.dk +store.dk + +// dappnode.io : https://dappnode.io/ +// Submitted by Abel Boldu / DAppNode Team +dyndns.dappnode.io + +// dapps.earth : https://dapps.earth/ +// Submitted by Daniil Burdakov +*.dapps.earth +*.bzz.dapps.earth + +// Dark, Inc. : https://darklang.com +// Submitted by Paul Biggar +builtwithdark.com + +// Datawire, Inc : https://www.datawire.io +// Submitted by Richard Li +edgestack.me + +// Debian : https://www.debian.org/ +// Submitted by Peter Palfrader / Debian Sysadmin Team +debian.net + +// deSEC : https://desec.io/ +// Submitted by Peter Thomassen +dedyn.io + +// DNS Africa Ltd https://dns.business +// Submitted by Calvin Browne +jozi.biz + +// DNShome : https://www.dnshome.de/ +// Submitted by Norbert Auler +dnshome.de + +// DotArai : https://www.dotarai.com/ +// Submitted by Atsadawat Netcharadsang +online.th +shop.th + +// DrayTek Corp. : https://www.draytek.com/ +// Submitted by Paul Fang +drayddns.com + +// DreamHost : http://www.dreamhost.com/ +// Submitted by Andrew Farmer +dreamhosters.com + +// Drobo : http://www.drobo.com/ +// Submitted by Ricardo Padilha +mydrobo.com + +// Drud Holdings, LLC. : https://www.drud.com/ +// Submitted by Kevin Bridges +drud.io +drud.us + +// DuckDNS : http://www.duckdns.org/ +// Submitted by Richard Harper +duckdns.org + +// bitbridge.net : Submitted by Craig Welch, abeliidev@gmail.com +bitbridge.net + +// dy.fi : http://dy.fi/ +// Submitted by Heikki Hannikainen +dy.fi +tunk.org + +// DynDNS.com : http://www.dyndns.com/services/dns/dyndns/ +dyndns-at-home.com +dyndns-at-work.com +dyndns-blog.com +dyndns-free.com +dyndns-home.com +dyndns-ip.com +dyndns-mail.com +dyndns-office.com +dyndns-pics.com +dyndns-remote.com +dyndns-server.com +dyndns-web.com +dyndns-wiki.com +dyndns-work.com +dyndns.biz +dyndns.info +dyndns.org +dyndns.tv +at-band-camp.net +ath.cx +barrel-of-knowledge.info +barrell-of-knowledge.info +better-than.tv +blogdns.com +blogdns.net +blogdns.org +blogsite.org +boldlygoingnowhere.org +broke-it.net +buyshouses.net +cechire.com +dnsalias.com +dnsalias.net +dnsalias.org +dnsdojo.com +dnsdojo.net +dnsdojo.org +does-it.net +doesntexist.com +doesntexist.org +dontexist.com +dontexist.net +dontexist.org +doomdns.com +doomdns.org +dvrdns.org +dyn-o-saur.com +dynalias.com +dynalias.net +dynalias.org +dynathome.net +dyndns.ws +endofinternet.net +endofinternet.org +endoftheinternet.org +est-a-la-maison.com +est-a-la-masion.com +est-le-patron.com +est-mon-blogueur.com +for-better.biz +for-more.biz +for-our.info +for-some.biz +for-the.biz +forgot.her.name +forgot.his.name +from-ak.com +from-al.com +from-ar.com +from-az.net +from-ca.com +from-co.net +from-ct.com +from-dc.com +from-de.com +from-fl.com +from-ga.com +from-hi.com +from-ia.com +from-id.com +from-il.com +from-in.com +from-ks.com +from-ky.com +from-la.net +from-ma.com +from-md.com +from-me.org +from-mi.com +from-mn.com +from-mo.com +from-ms.com +from-mt.com +from-nc.com +from-nd.com +from-ne.com +from-nh.com +from-nj.com +from-nm.com +from-nv.com +from-ny.net +from-oh.com +from-ok.com +from-or.com +from-pa.com +from-pr.com +from-ri.com +from-sc.com +from-sd.com +from-tn.com +from-tx.com +from-ut.com +from-va.com +from-vt.com +from-wa.com +from-wi.com +from-wv.com +from-wy.com +ftpaccess.cc +fuettertdasnetz.de +game-host.org +game-server.cc +getmyip.com +gets-it.net +go.dyndns.org +gotdns.com +gotdns.org +groks-the.info +groks-this.info +ham-radio-op.net +here-for-more.info +hobby-site.com +hobby-site.org +home.dyndns.org +homedns.org +homeftp.net +homeftp.org +homeip.net +homelinux.com +homelinux.net +homelinux.org +homeunix.com +homeunix.net +homeunix.org +iamallama.com +in-the-band.net +is-a-anarchist.com +is-a-blogger.com +is-a-bookkeeper.com +is-a-bruinsfan.org +is-a-bulls-fan.com +is-a-candidate.org +is-a-caterer.com +is-a-celticsfan.org +is-a-chef.com +is-a-chef.net +is-a-chef.org +is-a-conservative.com +is-a-cpa.com +is-a-cubicle-slave.com +is-a-democrat.com +is-a-designer.com +is-a-doctor.com +is-a-financialadvisor.com +is-a-geek.com +is-a-geek.net +is-a-geek.org +is-a-green.com +is-a-guru.com +is-a-hard-worker.com +is-a-hunter.com +is-a-knight.org +is-a-landscaper.com +is-a-lawyer.com +is-a-liberal.com +is-a-libertarian.com +is-a-linux-user.org +is-a-llama.com +is-a-musician.com +is-a-nascarfan.com +is-a-nurse.com +is-a-painter.com +is-a-patsfan.org +is-a-personaltrainer.com +is-a-photographer.com +is-a-player.com +is-a-republican.com +is-a-rockstar.com +is-a-socialist.com +is-a-soxfan.org +is-a-student.com +is-a-teacher.com +is-a-techie.com +is-a-therapist.com +is-an-accountant.com +is-an-actor.com +is-an-actress.com +is-an-anarchist.com +is-an-artist.com +is-an-engineer.com +is-an-entertainer.com +is-by.us +is-certified.com +is-found.org +is-gone.com +is-into-anime.com +is-into-cars.com +is-into-cartoons.com +is-into-games.com +is-leet.com +is-lost.org +is-not-certified.com +is-saved.org +is-slick.com +is-uberleet.com +is-very-bad.org +is-very-evil.org +is-very-good.org +is-very-nice.org +is-very-sweet.org +is-with-theband.com +isa-geek.com +isa-geek.net +isa-geek.org +isa-hockeynut.com +issmarterthanyou.com +isteingeek.de +istmein.de +kicks-ass.net +kicks-ass.org +knowsitall.info +land-4-sale.us +lebtimnetz.de +leitungsen.de +likes-pie.com +likescandy.com +merseine.nu +mine.nu +misconfused.org +mypets.ws +myphotos.cc +neat-url.com +office-on-the.net +on-the-web.tv +podzone.net +podzone.org +readmyblog.org +saves-the-whales.com +scrapper-site.net +scrapping.cc +selfip.biz +selfip.com +selfip.info +selfip.net +selfip.org +sells-for-less.com +sells-for-u.com +sells-it.net +sellsyourhome.org +servebbs.com +servebbs.net +servebbs.org +serveftp.net +serveftp.org +servegame.org +shacknet.nu +simple-url.com +space-to-rent.com +stuff-4-sale.org +stuff-4-sale.us +teaches-yoga.com +thruhere.net +traeumtgerade.de +webhop.biz +webhop.info +webhop.net +webhop.org +worse-than.tv +writesthisblog.com + +// ddnss.de : https://www.ddnss.de/ +// Submitted by Robert Niedziela +ddnss.de +dyn.ddnss.de +dyndns.ddnss.de +dyndns1.de +dyn-ip24.de +home-webserver.de +dyn.home-webserver.de +myhome-server.de +ddnss.org + +// Definima : http://www.definima.com/ +// Submitted by Maxence Bitterli +definima.net +definima.io + +// dnstrace.pro : https://dnstrace.pro/ +// Submitted by Chris Partridge +bci.dnstrace.pro + +// Dynu.com : https://www.dynu.com/ +// Submitted by Sue Ye +ddnsfree.com +ddnsgeek.com +giize.com +gleeze.com +kozow.com +loseyourip.com +ooguy.com +theworkpc.com +casacam.net +dynu.net +accesscam.org +camdvr.org +freeddns.org +mywire.org +webredirect.org +myddns.rocks +blogsite.xyz + +// dynv6 : https://dynv6.com +// Submitted by Dominik Menke +dynv6.net + +// E4YOU spol. s.r.o. : https://e4you.cz/ +// Submitted by Vladimir Dudr +e4.cz + +// En root‽ : https://en-root.org +// Submitted by Emmanuel Raviart +en-root.fr + +// Enalean SAS: https://www.enalean.com +// Submitted by Thomas Cottier +mytuleap.com + +// ECG Robotics, Inc: https://ecgrobotics.org +// Submitted by +onred.one +staging.onred.one + +// Enonic : http://enonic.com/ +// Submitted by Erik Kaareng-Sunde +enonic.io +customer.enonic.io + +// EU.org https://eu.org/ +// Submitted by Pierre Beyssac +eu.org +al.eu.org +asso.eu.org +at.eu.org +au.eu.org +be.eu.org +bg.eu.org +ca.eu.org +cd.eu.org +ch.eu.org +cn.eu.org +cy.eu.org +cz.eu.org +de.eu.org +dk.eu.org +edu.eu.org +ee.eu.org +es.eu.org +fi.eu.org +fr.eu.org +gr.eu.org +hr.eu.org +hu.eu.org +ie.eu.org +il.eu.org +in.eu.org +int.eu.org +is.eu.org +it.eu.org +jp.eu.org +kr.eu.org +lt.eu.org +lu.eu.org +lv.eu.org +mc.eu.org +me.eu.org +mk.eu.org +mt.eu.org +my.eu.org +net.eu.org +ng.eu.org +nl.eu.org +no.eu.org +nz.eu.org +paris.eu.org +pl.eu.org +pt.eu.org +q-a.eu.org +ro.eu.org +ru.eu.org +se.eu.org +si.eu.org +sk.eu.org +tr.eu.org +uk.eu.org +us.eu.org + +// Evennode : http://www.evennode.com/ +// Submitted by Michal Kralik +eu-1.evennode.com +eu-2.evennode.com +eu-3.evennode.com +eu-4.evennode.com +us-1.evennode.com +us-2.evennode.com +us-3.evennode.com +us-4.evennode.com + +// eDirect Corp. : https://hosting.url.com.tw/ +// Submitted by C.S. chang +twmail.cc +twmail.net +twmail.org +mymailer.com.tw +url.tw + +// Fabrica Technologies, Inc. : https://www.fabrica.dev/ +// Submitted by Eric Jiang +onfabrica.com + +// Facebook, Inc. +// Submitted by Peter Ruibal +apps.fbsbx.com + +// FAITID : https://faitid.org/ +// Submitted by Maxim Alzoba +// https://www.flexireg.net/stat_info +ru.net +adygeya.ru +bashkiria.ru +bir.ru +cbg.ru +com.ru +dagestan.ru +grozny.ru +kalmykia.ru +kustanai.ru +marine.ru +mordovia.ru +msk.ru +mytis.ru +nalchik.ru +nov.ru +pyatigorsk.ru +spb.ru +vladikavkaz.ru +vladimir.ru +abkhazia.su +adygeya.su +aktyubinsk.su +arkhangelsk.su +armenia.su +ashgabad.su +azerbaijan.su +balashov.su +bashkiria.su +bryansk.su +bukhara.su +chimkent.su +dagestan.su +east-kazakhstan.su +exnet.su +georgia.su +grozny.su +ivanovo.su +jambyl.su +kalmykia.su +kaluga.su +karacol.su +karaganda.su +karelia.su +khakassia.su +krasnodar.su +kurgan.su +kustanai.su +lenug.su +mangyshlak.su +mordovia.su +msk.su +murmansk.su +nalchik.su +navoi.su +north-kazakhstan.su +nov.su +obninsk.su +penza.su +pokrovsk.su +sochi.su +spb.su +tashkent.su +termez.su +togliatti.su +troitsk.su +tselinograd.su +tula.su +tuva.su +vladikavkaz.su +vladimir.su +vologda.su + +// Fancy Bits, LLC : http://getchannels.com +// Submitted by Aman Gupta +channelsdvr.net +u.channelsdvr.net + +// Fastly Inc. : http://www.fastly.com/ +// Submitted by Fastly Security +fastly-terrarium.com +fastlylb.net +map.fastlylb.net +freetls.fastly.net +map.fastly.net +a.prod.fastly.net +global.prod.fastly.net +a.ssl.fastly.net +b.ssl.fastly.net +global.ssl.fastly.net + +// FASTVPS EESTI OU : https://fastvps.ru/ +// Submitted by Likhachev Vasiliy +fastvps-server.com +fastvps.host +myfast.host +fastvps.site +myfast.space + +// Featherhead : https://featherhead.xyz/ +// Submitted by Simon Menke +fhapp.xyz + +// Fedora : https://fedoraproject.org/ +// submitted by Patrick Uiterwijk +fedorainfracloud.org +fedorapeople.org +cloud.fedoraproject.org +app.os.fedoraproject.org +app.os.stg.fedoraproject.org + +// FearWorks Media Ltd. : https://fearworksmedia.co.uk +// submitted by Keith Fairley +conn.uk +copro.uk +couk.me +ukco.me + +// Fermax : https://fermax.com/ +// submitted by Koen Van Isterdael +mydobiss.com + +// Filegear Inc. : https://www.filegear.com +// Submitted by Jason Zhu +filegear.me +filegear-au.me +filegear-de.me +filegear-gb.me +filegear-ie.me +filegear-jp.me +filegear-sg.me + +// Firebase, Inc. +// Submitted by Chris Raynor +firebaseapp.com + +// fly.io: https://fly.io +// Submitted by Kurt Mackey +fly.dev +edgeapp.net +shw.io + +// Flynn : https://flynn.io +// Submitted by Jonathan Rudenberg +flynnhosting.net + +// Frederik Braun https://frederik-braun.com +// Submitted by Frederik Braun +0e.vc + +// Freebox : http://www.freebox.fr +// Submitted by Romain Fliedel +freebox-os.com +freeboxos.com +fbx-os.fr +fbxos.fr +freebox-os.fr +freeboxos.fr + +// freedesktop.org : https://www.freedesktop.org +// Submitted by Daniel Stone +freedesktop.org + +// FunkFeuer - Verein zur Förderung freier Netze : https://www.funkfeuer.at +// Submitted by Daniel A. Maierhofer +wien.funkfeuer.at + +// Futureweb OG : http://www.futureweb.at +// Submitted by Andreas Schnederle-Wagner +*.futurecms.at +*.ex.futurecms.at +*.in.futurecms.at +futurehosting.at +futuremailing.at +*.ex.ortsinfo.at +*.kunden.ortsinfo.at +*.statics.cloud + +// GDS : https://www.gov.uk/service-manual/operations/operating-servicegovuk-subdomains +// Submitted by David Illsley +service.gov.uk + +// Gehirn Inc. : https://www.gehirn.co.jp/ +// Submitted by Kohei YOSHIDA +gehirn.ne.jp +usercontent.jp + +// Gentlent, Inc. : https://www.gentlent.com +// Submitted by Tom Klein +gentapps.com +gentlentapis.com +lab.ms + +// GitHub, Inc. +// Submitted by Patrick Toomey +github.io +githubusercontent.com + +// GitLab, Inc. +// Submitted by Alex Hanselka +gitlab.io + +// Gitplac.si - https://gitplac.si +// Submitted by Aljaž Starc +gitpage.si + +// Glitch, Inc : https://glitch.com +// Submitted by Mads Hartmann +glitch.me + +// GMO Pepabo, Inc. : https://pepabo.com/ +// Submitted by dojineko +lolipop.io + +// GOV.UK Platform as a Service : https://www.cloud.service.gov.uk/ +// Submitted by Tom Whitwell +cloudapps.digital +london.cloudapps.digital + +// GOV.UK Pay : https://www.payments.service.gov.uk/ +// Submitted by Richard Baker +pymnt.uk + +// UKHomeOffice : https://www.gov.uk/government/organisations/home-office +// Submitted by Jon Shanks +homeoffice.gov.uk + +// GlobeHosting, Inc. +// Submitted by Zoltan Egresi +ro.im + +// GoIP DNS Services : http://www.goip.de +// Submitted by Christian Poulter +goip.de + +// Google, Inc. +// Submitted by Eduardo Vela +run.app +a.run.app +web.app +*.0emm.com +appspot.com +*.r.appspot.com +blogspot.ae +blogspot.al +blogspot.am +blogspot.ba +blogspot.be +blogspot.bg +blogspot.bj +blogspot.ca +blogspot.cf +blogspot.ch +blogspot.cl +blogspot.co.at +blogspot.co.id +blogspot.co.il +blogspot.co.ke +blogspot.co.nz +blogspot.co.uk +blogspot.co.za +blogspot.com +blogspot.com.ar +blogspot.com.au +blogspot.com.br +blogspot.com.by +blogspot.com.co +blogspot.com.cy +blogspot.com.ee +blogspot.com.eg +blogspot.com.es +blogspot.com.mt +blogspot.com.ng +blogspot.com.tr +blogspot.com.uy +blogspot.cv +blogspot.cz +blogspot.de +blogspot.dk +blogspot.fi +blogspot.fr +blogspot.gr +blogspot.hk +blogspot.hr +blogspot.hu +blogspot.ie +blogspot.in +blogspot.is +blogspot.it +blogspot.jp +blogspot.kr +blogspot.li +blogspot.lt +blogspot.lu +blogspot.md +blogspot.mk +blogspot.mr +blogspot.mx +blogspot.my +blogspot.nl +blogspot.no +blogspot.pe +blogspot.pt +blogspot.qa +blogspot.re +blogspot.ro +blogspot.rs +blogspot.ru +blogspot.se +blogspot.sg +blogspot.si +blogspot.sk +blogspot.sn +blogspot.td +blogspot.tw +blogspot.ug +blogspot.vn +cloudfunctions.net +cloud.goog +codespot.com +googleapis.com +googlecode.com +pagespeedmobilizer.com +publishproxy.com +withgoogle.com +withyoutube.com + +// Aaron Marais' Gitlab pages: https://lab.aaronleem.co.za +// Submitted by Aaron Marais +graphox.us + +// Group 53, LLC : https://www.group53.com +// Submitted by Tyler Todd +awsmppl.com + +// Hakaran group: http://hakaran.cz +// Submited by Arseniy Sokolov +fin.ci +free.hr +caa.li +ua.rs +conf.se + +// Handshake : https://handshake.org +// Submitted by Mike Damm +hs.zone +hs.run + +// Hashbang : https://hashbang.sh +hashbang.sh + +// Hasura : https://hasura.io +// Submitted by Shahidh K Muhammed +hasura.app +hasura-app.io + +// Hepforge : https://www.hepforge.org +// Submitted by David Grellscheid +hepforge.org + +// Heroku : https://www.heroku.com/ +// Submitted by Tom Maher +herokuapp.com +herokussl.com + +// Hibernating Rhinos +// Submitted by Oren Eini +myravendb.com +ravendb.community +ravendb.me +development.run +ravendb.run + +// HOSTBIP REGISTRY : https://www.hostbip.com/ +// Submitted by Atanunu Igbunuroghene +bpl.biz +orx.biz +ng.city +biz.gl +ng.ink +col.ng +firm.ng +gen.ng +ltd.ng +ngo.ng +ng.school +sch.so + +// HostyHosting (hostyhosting.com) +hostyhosting.io + +// Häkkinen.fi +// Submitted by Eero Häkkinen +häkkinen.fi + +// Ici la Lune : http://www.icilalune.com/ +// Submitted by Simon Morvan +*.moonscale.io +moonscale.net + +// iki.fi +// Submitted by Hannu Aronsson +iki.fi + +// Individual Network Berlin e.V. : https://www.in-berlin.de/ +// Submitted by Christian Seitz +dyn-berlin.de +in-berlin.de +in-brb.de +in-butter.de +in-dsl.de +in-dsl.net +in-dsl.org +in-vpn.de +in-vpn.net +in-vpn.org + +// info.at : http://www.info.at/ +biz.at +info.at + +// info.cx : http://info.cx +// Submitted by Jacob Slater +info.cx + +// Interlegis : http://www.interlegis.leg.br +// Submitted by Gabriel Ferreira +ac.leg.br +al.leg.br +am.leg.br +ap.leg.br +ba.leg.br +ce.leg.br +df.leg.br +es.leg.br +go.leg.br +ma.leg.br +mg.leg.br +ms.leg.br +mt.leg.br +pa.leg.br +pb.leg.br +pe.leg.br +pi.leg.br +pr.leg.br +rj.leg.br +rn.leg.br +ro.leg.br +rr.leg.br +rs.leg.br +sc.leg.br +se.leg.br +sp.leg.br +to.leg.br + +// intermetrics GmbH : https://pixolino.com/ +// Submitted by Wolfgang Schwarz +pixolino.com + +// Internet-Pro, LLP: https://netangels.ru/ +// Submited by Vasiliy Sheredeko +na4u.ru + +// IPiFony Systems, Inc. : https://www.ipifony.com/ +// Submitted by Matthew Hardeman +ipifony.net + +// IServ GmbH : https://iserv.eu +// Submitted by Kim-Alexander Brodowski +mein-iserv.de +schulserver.de +test-iserv.de +iserv.dev + +// I-O DATA DEVICE, INC. : http://www.iodata.com/ +// Submitted by Yuji Minagawa +iobb.net + +//Jelastic, Inc. : https://jelastic.com/ +// Submited by Ihor Kolodyuk +appengine.flow.ch +vip.jelastic.cloud +jele.cloud +jele.club +dopaas.com +hidora.com +jcloud.ik-server.com +demo.jelastic.com +paas.massivegrid.com +j.scaleforce.com.cy +jelastic.dogado.eu +fi.cloudplatform.fi +paas.datacenter.fi +jele.host +mircloud.host +jele.io +cloudjiffy.net +jls-sto1.elastx.net +jelastic.saveincloud.net +jelastic.regruhosting.ru +jele.site +jelastic.team +j.layershift.co.uk + +// Jino : https://www.jino.ru +// Submitted by Sergey Ulyashin +myjino.ru +*.hosting.myjino.ru +*.landing.myjino.ru +*.spectrum.myjino.ru +*.vps.myjino.ru + +// Joyent : https://www.joyent.com/ +// Submitted by Brian Bennett +*.triton.zone +*.cns.joyent.com + +// JS.ORG : http://dns.js.org +// Submitted by Stefan Keim +js.org + +// KaasHosting : http://www.kaashosting.nl/ +// Submitted by Wouter Bakker +kaas.gg +khplay.nl + +// Keyweb AG : https://www.keyweb.de +// Submitted by Martin Dannehl +keymachine.de + +// KingHost : https://king.host +// Submitted by Felipe Keller Braz +kinghost.net +uni5.net + +// KnightPoint Systems, LLC : http://www.knightpoint.com/ +// Submitted by Roy Keene +knightpoint.systems + +// KUROKU LTD : https://kuroku.ltd/ +// Submitted by DisposaBoy +oya.to + +// .KRD : http://nic.krd/data/krd/Registration%20Policy.pdf +co.krd +edu.krd + +// LCube - Professional hosting e.K. : https://www.lcube-webhosting.de +// Submitted by Lars Laehn +git-repos.de +lcube-server.de +svn-repos.de + +// Leadpages : https://www.leadpages.net +// Submitted by Greg Dallavalle +leadpages.co +lpages.co +lpusercontent.com + +// Lelux.fi : https://lelux.fi/ +// Submitted by Lelux Admin +lelux.site + +// Lifetime Hosting : https://Lifetime.Hosting/ +// Submitted by Mike Fillator +co.business +co.education +co.events +co.financial +co.network +co.place +co.technology + +// Lightmaker Property Manager, Inc. : https://app.lmpm.com/ +// Submitted by Greg Holland +app.lmpm.com + +// Linki Tools UG : https://linki.tools +// Submitted by Paulo Matos +linkitools.space + +// linkyard ldt: https://www.linkyard.ch/ +// Submitted by Mario Siegenthaler +linkyard.cloud +linkyard-cloud.ch + +// Linode : https://linode.com +// Submitted by +members.linode.com +*.nodebalancer.linode.com +*.linodeobjects.com + +// LiquidNet Ltd : http://www.liquidnetlimited.com/ +// Submitted by Victor Velchev +we.bs + +// Log'in Line : https://www.loginline.com/ +// Submitted by Rémi Mach +loginline.app +loginline.dev +loginline.io +loginline.services +loginline.site + +// LubMAN UMCS Sp. z o.o : https://lubman.pl/ +// Submitted by Ireneusz Maliszewski +krasnik.pl +leczna.pl +lubartow.pl +lublin.pl +poniatowa.pl +swidnik.pl + +// Lug.org.uk : https://lug.org.uk +// Submitted by Jon Spriggs +uklugs.org +glug.org.uk +lug.org.uk +lugs.org.uk + +// Lukanet Ltd : https://lukanet.com +// Submitted by Anton Avramov +barsy.bg +barsy.co.uk +barsyonline.co.uk +barsycenter.com +barsyonline.com +barsy.club +barsy.de +barsy.eu +barsy.in +barsy.info +barsy.io +barsy.me +barsy.menu +barsy.mobi +barsy.net +barsy.online +barsy.org +barsy.pro +barsy.pub +barsy.shop +barsy.site +barsy.support +barsy.uk + +// Magento Commerce +// Submitted by Damien Tournoud +*.magentosite.cloud + +// May First - People Link : https://mayfirst.org/ +// Submitted by Jamie McClelland +mayfirst.info +mayfirst.org + +// Mail.Ru Group : https://hb.cldmail.ru +// Submitted by Ilya Zaretskiy +hb.cldmail.ru + +// mcpe.me : https://mcpe.me +// Submitted by Noa Heyl +mcpe.me + +// McHost : https://mchost.ru +// Submitted by Evgeniy Subbotin +mcdir.ru +vps.mcdir.ru + +// Memset hosting : https://www.memset.com +// Submitted by Tom Whitwell +miniserver.com +memset.net + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Zdeněk Šustr +*.cloud.metacentrum.cz +custom.metacentrum.cz + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Radim Janča +flt.cloud.muni.cz +usr.cloud.muni.cz + +// Meteor Development Group : https://www.meteor.com/hosting +// Submitted by Pierre Carrier +meteorapp.com +eu.meteorapp.com + +// Michau Enterprises Limited : http://www.co.pl/ +co.pl + +// Microsoft Corporation : http://microsoft.com +// Submitted by Mostafa Elzeiny +*.azurecontainer.io +azurewebsites.net +azure-mobile.net +cloudapp.net + +// minion.systems : http://minion.systems +// Submitted by Robert Böttinger +csx.cc + +// MobileEducation, LLC : https://joinforte.com +// Submitted by Grayson Martin +forte.id + +// Mozilla Corporation : https://mozilla.com +// Submitted by Ben Francis +mozilla-iot.org + +// Mozilla Foundation : https://mozilla.org/ +// Submitted by glob +bmoattachments.org + +// MSK-IX : https://www.msk-ix.ru/ +// Submitted by Khannanov Roman +net.ru +org.ru +pp.ru + +// Mythic Beasts : https://www.mythic-beasts.com +// Submitted by Paul Cammish +hostedpi.com +customer.mythic-beasts.com +lynx.mythic-beasts.com +ocelot.mythic-beasts.com +onza.mythic-beasts.com +sphinx.mythic-beasts.com +vs.mythic-beasts.com +x.mythic-beasts.com +yali.mythic-beasts.com +cust.retrosnub.co.uk + +// Nabu Casa : https://www.nabucasa.com +// Submitted by Paulus Schoutsen +ui.nabu.casa + +// Names.of.London : https://names.of.london/ +// Submitted by James Stevens or +pony.club +of.fashion +on.fashion +of.football +in.london +of.london +for.men +and.mom +for.mom +for.one +for.sale +of.work +to.work + +// NCTU.ME : https://nctu.me/ +// Submitted by Tocknicsu +nctu.me + +// Netlify : https://www.netlify.com +// Submitted by Jessica Parsons +netlify.app + +// Neustar Inc. +// Submitted by Trung Tran +4u.com + +// ngrok : https://ngrok.com/ +// Submitted by Alan Shreve +ngrok.io + +// Nimbus Hosting Ltd. : https://www.nimbushosting.co.uk/ +// Submitted by Nicholas Ford +nh-serv.co.uk + +// NFSN, Inc. : https://www.NearlyFreeSpeech.NET/ +// Submitted by Jeff Wheelhouse +nfshost.com + +// Now-DNS : https://now-dns.com +// Submitted by Steve Russell +dnsking.ch +mypi.co +n4t.co +001www.com +ddnslive.com +myiphost.com +forumz.info +16-b.it +32-b.it +64-b.it +soundcast.me +tcp4.me +dnsup.net +hicam.net +now-dns.net +ownip.net +vpndns.net +dynserv.org +now-dns.org +x443.pw +now-dns.top +ntdll.top +freeddns.us +crafting.xyz +zapto.xyz + +// nsupdate.info : https://www.nsupdate.info/ +// Submitted by Thomas Waldmann +nsupdate.info +nerdpol.ovh + +// No-IP.com : https://noip.com/ +// Submitted by Deven Reza +blogsyte.com +brasilia.me +cable-modem.org +ciscofreak.com +collegefan.org +couchpotatofries.org +damnserver.com +ddns.me +ditchyourip.com +dnsfor.me +dnsiskinky.com +dvrcam.info +dynns.com +eating-organic.net +fantasyleague.cc +geekgalaxy.com +golffan.us +health-carereform.com +homesecuritymac.com +homesecuritypc.com +hopto.me +ilovecollege.info +loginto.me +mlbfan.org +mmafan.biz +myactivedirectory.com +mydissent.net +myeffect.net +mymediapc.net +mypsx.net +mysecuritycamera.com +mysecuritycamera.net +mysecuritycamera.org +net-freaks.com +nflfan.org +nhlfan.net +no-ip.ca +no-ip.co.uk +no-ip.net +noip.us +onthewifi.com +pgafan.net +point2this.com +pointto.us +privatizehealthinsurance.net +quicksytes.com +read-books.org +securitytactics.com +serveexchange.com +servehumour.com +servep2p.com +servesarcasm.com +stufftoread.com +ufcfan.org +unusualperson.com +workisboring.com +3utilities.com +bounceme.net +ddns.net +ddnsking.com +gotdns.ch +hopto.org +myftp.biz +myftp.org +myvnc.com +no-ip.biz +no-ip.info +no-ip.org +noip.me +redirectme.net +servebeer.com +serveblog.net +servecounterstrike.com +serveftp.com +servegame.com +servehalflife.com +servehttp.com +serveirc.com +serveminecraft.net +servemp3.com +servepics.com +servequake.com +sytes.net +webhop.me +zapto.org + +// NodeArt : https://nodeart.io +// Submitted by Konstantin Nosov +stage.nodeart.io + +// Nodum B.V. : https://nodum.io/ +// Submitted by Wietse Wind +nodum.co +nodum.io + +// Nucleos Inc. : https://nucleos.com +// Submitted by Piotr Zduniak +pcloud.host + +// NYC.mn : http://www.information.nyc.mn +// Submitted by Matthew Brown +nyc.mn + +// NymNom : https://nymnom.com/ +// Submitted by NymNom +nom.ae +nom.af +nom.ai +nom.al +nym.by +nom.bz +nym.bz +nom.cl +nym.ec +nom.gd +nom.ge +nom.gl +nym.gr +nom.gt +nym.gy +nym.hk +nom.hn +nym.ie +nom.im +nom.ke +nym.kz +nym.la +nym.lc +nom.li +nym.li +nym.lt +nym.lu +nom.lv +nym.me +nom.mk +nym.mn +nym.mx +nom.nu +nym.nz +nym.pe +nym.pt +nom.pw +nom.qa +nym.ro +nom.rs +nom.si +nym.sk +nom.st +nym.su +nym.sx +nom.tj +nym.tw +nom.ug +nom.uy +nom.vc +nom.vg + +// Observable, Inc. : https://observablehq.com +// Submitted by Mike Bostock +static.observableusercontent.com + +// Octopodal Solutions, LLC. : https://ulterius.io/ +// Submitted by Andrew Sampson +cya.gg + +// OMG.LOL : +// Submitted by Adam Newbold +omg.lol + +// Omnibond Systems, LLC. : https://www.omnibond.com +// Submitted by Cole Estep +cloudycluster.net + +// OmniWe Limited: https://omniwe.com +// Submitted by Vicary Archangel +omniwe.site + +// One Fold Media : http://www.onefoldmedia.com/ +// Submitted by Eddie Jones +nid.io + +// Open Social : https://www.getopensocial.com/ +// Submitted by Alexander Varwijk +opensocial.site + +// OpenCraft GmbH : http://opencraft.com/ +// Submitted by Sven Marnach +opencraft.hosting + +// Opera Software, A.S.A. +// Submitted by Yngve Pettersen +operaunite.com + +// Oursky Limited : https://skygear.io/ +// Submited by Skygear Developer +skygearapp.com + +// OutSystems +// Submitted by Duarte Santos +outsystemscloud.com + +// OwnProvider GmbH: http://www.ownprovider.com +// Submitted by Jan Moennich +ownprovider.com +own.pm + +// OwO : https://whats-th.is/ +// Submitted by Dean Sheather +*.owo.codes + +// OX : http://www.ox.rs +// Submitted by Adam Grand +ox.rs + +// oy.lc +// Submitted by Charly Coste +oy.lc + +// Pagefog : https://pagefog.com/ +// Submitted by Derek Myers +pgfog.com + +// Pagefront : https://www.pagefronthq.com/ +// Submitted by Jason Kriss +pagefrontapp.com + +// PageXL : https://pagexl.com +// Submitted by Yann Guichard +pagexl.com + +// pcarrier.ca Software Inc: https://pcarrier.ca/ +// Submitted by Pierre Carrier +bar0.net +bar1.net +bar2.net +rdv.to + +// .pl domains (grandfathered) +art.pl +gliwice.pl +krakow.pl +poznan.pl +wroc.pl +zakopane.pl + +// Pantheon Systems, Inc. : https://pantheon.io/ +// Submitted by Gary Dylina +pantheonsite.io +gotpantheon.com + +// Peplink | Pepwave : http://peplink.com/ +// Submitted by Steve Leung +mypep.link + +// Perspecta : https://perspecta.com/ +// Submitted by Kenneth Van Alstyne +perspecta.cloud + +// Planet-Work : https://www.planet-work.com/ +// Submitted by Frédéric VANNIÈRE +on-web.fr + +// Platform.sh : https://platform.sh +// Submitted by Nikola Kotur +bc.platform.sh +ent.platform.sh +eu.platform.sh +us.platform.sh +*.platformsh.site + +// Platter: https://platter.dev +// Submitted by Patrick Flor +platter-app.com +platter-app.dev +platterp.us + +// Plesk : https://www.plesk.com/ +// Submitted by Anton Akhtyamov +pdns.page +plesk.page +pleskns.com + +// Port53 : https://port53.io/ +// Submitted by Maximilian Schieder +dyn53.io + +// Positive Codes Technology Company : http://co.bn/faq.html +// Submitted by Zulfais +co.bn + +// prgmr.com : https://prgmr.com/ +// Submitted by Sarah Newman +xen.prgmr.com + +// priv.at : http://www.nic.priv.at/ +// Submitted by registry +priv.at + +// privacytools.io : https://www.privacytools.io/ +// Submitted by Jonah Aragon +prvcy.page + +// Protocol Labs : https://protocol.ai/ +// Submitted by Michael Burns +*.dweb.link + +// Protonet GmbH : http://protonet.io +// Submitted by Martin Meier +protonet.io + +// Publication Presse Communication SARL : https://ppcom.fr +// Submitted by Yaacov Akiba Slama +chirurgiens-dentistes-en-france.fr +byen.site + +// pubtls.org: https://www.pubtls.org +// Submitted by Kor Nielsen +pubtls.org + +// Qualifio : https://qualifio.com/ +// Submitted by Xavier De Cock +qualifioapp.com + +// QuickBackend: https://www.quickbackend.com +// Submitted by Dani Biro +qbuser.com + +// Redstar Consultants : https://www.redstarconsultants.com/ +// Submitted by Jons Slemmer +instantcloud.cn + +// Russian Academy of Sciences +// Submitted by Tech Support +ras.ru + +// QA2 +// Submitted by Daniel Dent (https://www.danieldent.com/) +qa2.com + +// QCX +// Submitted by Cassandra Beelen +qcx.io +*.sys.qcx.io + +// QNAP System Inc : https://www.qnap.com +// Submitted by Nick Chang +dev-myqnapcloud.com +alpha-myqnapcloud.com +myqnapcloud.com + +// Quip : https://quip.com +// Submitted by Patrick Linehan +*.quipelements.com + +// Qutheory LLC : http://qutheory.io +// Submitted by Jonas Schwartz +vapor.cloud +vaporcloud.io + +// Rackmaze LLC : https://www.rackmaze.com +// Submitted by Kirill Pertsev +rackmaze.com +rackmaze.net + +// Rakuten Games, Inc : https://dev.viberplay.io +// Submitted by Joshua Zhang +g.vbrplsbx.io + +// Rancher Labs, Inc : https://rancher.com +// Submitted by Vincent Fiduccia +*.on-k3s.io +*.on-rancher.cloud +*.on-rio.io + +// Read The Docs, Inc : https://www.readthedocs.org +// Submitted by David Fischer +readthedocs.io + +// Red Hat, Inc. OpenShift : https://openshift.redhat.com/ +// Submitted by Tim Kramer +rhcloud.com + +// Render : https://render.com +// Submitted by Anurag Goel +app.render.com +onrender.com + +// Repl.it : https://repl.it +// Submitted by Mason Clayton +repl.co +repl.run + +// Resin.io : https://resin.io +// Submitted by Tim Perry +resindevice.io +devices.resinstaging.io + +// RethinkDB : https://www.rethinkdb.com/ +// Submitted by Chris Kastorff +hzc.io + +// Revitalised Limited : http://www.revitalised.co.uk +// Submitted by Jack Price +wellbeingzone.eu +ptplus.fit +wellbeingzone.co.uk + +// Rochester Institute of Technology : http://www.rit.edu/ +// Submitted by Jennifer Herting +git-pages.rit.edu + +// Sandstorm Development Group, Inc. : https://sandcats.io/ +// Submitted by Asheesh Laroia +sandcats.io + +// SBE network solutions GmbH : https://www.sbe.de/ +// Submitted by Norman Meilick +logoip.de +logoip.com + +// schokokeks.org GbR : https://schokokeks.org/ +// Submitted by Hanno Böck +schokokeks.net + +// Scottish Government: https://www.gov.scot +// Submitted by Martin Ellis +gov.scot + +// Scry Security : http://www.scrysec.com +// Submitted by Shante Adam +scrysec.com + +// Securepoint GmbH : https://www.securepoint.de +// Submitted by Erik Anders +firewall-gateway.com +firewall-gateway.de +my-gateway.de +my-router.de +spdns.de +spdns.eu +firewall-gateway.net +my-firewall.org +myfirewall.org +spdns.org + +// Seidat : https://www.seidat.com +// Submitted by Artem Kondratev +seidat.net + +// Senseering GmbH : https://www.senseering.de +// Submitted by Felix Mönckemeyer +senseering.net + +// Service Online LLC : http://drs.ua/ +// Submitted by Serhii Bulakh +biz.ua +co.ua +pp.ua + +// ShiftEdit : https://shiftedit.net/ +// Submitted by Adam Jimenez +shiftedit.io + +// Shopblocks : http://www.shopblocks.com/ +// Submitted by Alex Bowers +myshopblocks.com + +// Shopit : https://www.shopitcommerce.com/ +// Submitted by Craig McMahon +shopitsite.com + +// shopware AG : https://shopware.com +// Submitted by Jens Küper +shopware.store + +// Siemens Mobility GmbH +// Submitted by Oliver Graebner +mo-siemens.io + +// SinaAppEngine : http://sae.sina.com.cn/ +// Submitted by SinaAppEngine +1kapp.com +appchizi.com +applinzi.com +sinaapp.com +vipsinaapp.com + +// Siteleaf : https://www.siteleaf.com/ +// Submitted by Skylar Challand +siteleaf.net + +// Skyhat : http://www.skyhat.io +// Submitted by Shante Adam +bounty-full.com +alpha.bounty-full.com +beta.bounty-full.com + +// Small Technology Foundation : https://small-tech.org +// Submitted by Aral Balkan +small-web.org + +// Stackhero : https://www.stackhero.io +// Submitted by Adrien Gillon +stackhero-network.com + +// staticland : https://static.land +// Submitted by Seth Vincent +static.land +dev.static.land +sites.static.land + +// Sony Interactive Entertainment LLC : https://sie.com/ +// Submitted by David Coles +playstation-cloud.com + +// SourceLair PC : https://www.sourcelair.com +// Submitted by Antonis Kalipetis +apps.lair.io +*.stolos.io + +// SpaceKit : https://www.spacekit.io/ +// Submitted by Reza Akhavan +spacekit.io + +// SpeedPartner GmbH: https://www.speedpartner.de/ +// Submitted by Stefan Neufeind +customer.speedpartner.de + +// Standard Library : https://stdlib.com +// Submitted by Jacob Lee +api.stdlib.com + +// Storj Labs Inc. : https://storj.io/ +// Submitted by Philip Hutchins +storj.farm + +// Studenten Net Twente : http://www.snt.utwente.nl/ +// Submitted by Silke Hofstra +utwente.io + +// Student-Run Computing Facility : https://www.srcf.net/ +// Submitted by Edwin Balani +soc.srcf.net +user.srcf.net + +// Sub 6 Limited: http://www.sub6.com +// Submitted by Dan Miller +temp-dns.com + +// Swisscom Application Cloud: https://developer.swisscom.com +// Submitted by Matthias.Winzeler +applicationcloud.io +scapp.io + +// Symfony, SAS : https://symfony.com/ +// Submitted by Fabien Potencier +*.s5y.io +*.sensiosite.cloud + +// Syncloud : https://syncloud.org +// Submitted by Boris Rybalkin +syncloud.it + +// Synology, Inc. : https://www.synology.com/ +// Submitted by Rony Weng +diskstation.me +dscloud.biz +dscloud.me +dscloud.mobi +dsmynas.com +dsmynas.net +dsmynas.org +familyds.com +familyds.net +familyds.org +i234.me +myds.me +synology.me +vpnplus.to +direct.quickconnect.to + +// TAIFUN Software AG : http://taifun-software.de +// Submitted by Bjoern Henke +taifun-dns.de + +// TASK geographical domains (www.task.gda.pl/uslugi/dns) +gda.pl +gdansk.pl +gdynia.pl +med.pl +sopot.pl + +// Teckids e.V. : https://www.teckids.org +// Submitted by Dominik George +edugit.org + +// Telebit : https://telebit.cloud +// Submitted by AJ ONeal +telebit.app +telebit.io +*.telebit.xyz + +// The Gwiddle Foundation : https://gwiddlefoundation.org.uk +// Submitted by Joshua Bayfield +gwiddle.co.uk + +// Thingdust AG : https://thingdust.com/ +// Submitted by Adrian Imboden +thingdustdata.com +cust.dev.thingdust.io +cust.disrec.thingdust.io +cust.prod.thingdust.io +cust.testing.thingdust.io +*.firenet.ch +*.svc.firenet.ch + +// Tlon.io : https://tlon.io +// Submitted by Mark Staarink +arvo.network +azimuth.network + +// TownNews.com : http://www.townnews.com +// Submitted by Dustin Ward +bloxcms.com +townnews-staging.com + +// TrafficPlex GmbH : https://www.trafficplex.de/ +// Submitted by Phillipp Röll +12hp.at +2ix.at +4lima.at +lima-city.at +12hp.ch +2ix.ch +4lima.ch +lima-city.ch +trafficplex.cloud +de.cool +12hp.de +2ix.de +4lima.de +lima-city.de +1337.pictures +clan.rip +lima-city.rocks +webspace.rocks +lima.zone + +// TransIP : https://www.transip.nl +// Submitted by Rory Breuk +*.transurl.be +*.transurl.eu +*.transurl.nl + +// TuxFamily : http://tuxfamily.org +// Submitted by TuxFamily administrators +tuxfamily.org + +// TwoDNS : https://www.twodns.de/ +// Submitted by TwoDNS-Support +dd-dns.de +diskstation.eu +diskstation.org +dray-dns.de +draydns.de +dyn-vpn.de +dynvpn.de +mein-vigor.de +my-vigor.de +my-wan.de +syno-ds.de +synology-diskstation.de +synology-ds.de + +// Uberspace : https://uberspace.de +// Submitted by Moritz Werner +uber.space +*.uberspace.de + +// UDR Limited : http://www.udr.hk.com +// Submitted by registry +hk.com +hk.org +ltd.hk +inc.hk + +// United Gameserver GmbH : https://united-gameserver.de +// Submitted by Stefan Schwarz +virtualuser.de +virtual-user.de + +// urown.net : https://urown.net +// Submitted by Hostmaster +urown.cloud +dnsupdate.info + +// .US +// Submitted by Ed Moore +lib.de.us + +// VeryPositive SIA : http://very.lv +// Submitted by Danko Aleksejevs +2038.io + +// Vercel, Inc : https://vercel.com/ +// Submitted by Connor Davis +vercel.app +vercel.dev +now.sh + +// Viprinet Europe GmbH : http://www.viprinet.com +// Submitted by Simon Kissel +router.management + +// Virtual-Info : https://www.virtual-info.info/ +// Submitted by Adnan RIHAN +v-info.info + +// Voorloper.com: https://voorloper.com +// Submitted by Nathan van Bakel +voorloper.cloud + +// Voxel.sh DNS : https://voxel.sh/dns/ +// Submitted by Mia Rehlinger +neko.am +nyaa.am +be.ax +cat.ax +es.ax +eu.ax +gg.ax +mc.ax +us.ax +xy.ax +nl.ci +xx.gl +app.gp +blog.gt +de.gt +to.gt +be.gy +cc.hn +blog.kg +io.kg +jp.kg +tv.kg +uk.kg +us.kg +de.ls +at.md +de.md +jp.md +to.md +uwu.nu +indie.porn +vxl.sh +ch.tc +me.tc +we.tc +nyan.to +at.vg +blog.vu +dev.vu +me.vu + +// V.UA Domain Administrator : https://domain.v.ua/ +// Submitted by Serhii Rostilo +v.ua + +// Waffle Computer Inc., Ltd. : https://docs.waffleinfo.com +// Submitted by Masayuki Note +wafflecell.com + +// WebHare bv: https://www.webhare.com/ +// Submitted by Arnold Hendriks +*.webhare.dev + +// WeDeploy by Liferay, Inc. : https://www.wedeploy.com +// Submitted by Henrique Vicente +wedeploy.io +wedeploy.me +wedeploy.sh + +// Western Digital Technologies, Inc : https://www.wdc.com +// Submitted by Jung Jin +remotewd.com + +// WIARD Enterprises : https://wiardweb.com +// Submitted by Kidd Hustle +pages.wiardweb.com + +// Wikimedia Labs : https://wikitech.wikimedia.org +// Submitted by Arturo Borrero Gonzalez +wmflabs.org +toolforge.org +wmcloud.org + +// WISP : https://wisp.gg +// Submitted by Stepan Fedotov +panel.gg +daemon.panel.gg + +// WoltLab GmbH : https://www.woltlab.com +// Submitted by Tim Düsterhus +myforum.community +community-pro.de +diskussionsbereich.de +community-pro.net +meinforum.net + +// www.com.vc : http://www.com.vc +// Submitted by Li Hui +cn.vu + +// XenonCloud GbR: https://xenoncloud.net +// Submitted by Julian Uphoff +half.host + +// XnBay Technology : http://www.xnbay.com/ +// Submitted by XnBay Developer +xnbay.com +u2.xnbay.com +u2-local.xnbay.com + +// XS4ALL Internet bv : https://www.xs4all.nl/ +// Submitted by Daniel Mostertman +cistron.nl +demon.nl +xs4all.space + +// Yandex.Cloud LLC: https://cloud.yandex.com +// Submitted by Alexander Lodin +yandexcloud.net +storage.yandexcloud.net +website.yandexcloud.net + +// YesCourse Pty Ltd : https://yescourse.com +// Submitted by Atul Bhouraskar +official.academy + +// Yola : https://www.yola.com/ +// Submitted by Stefano Rivera +yolasite.com + +// Yombo : https://yombo.net +// Submitted by Mitch Schwenk +ybo.faith +yombo.me +homelink.one +ybo.party +ybo.review +ybo.science +ybo.trade + +// Yunohost : https://yunohost.org +// Submitted by Valentin Grimaud +nohost.me +noho.st + +// ZaNiC : http://www.za.net/ +// Submitted by registry +za.net +za.org + +// Zine EOOD : https://zine.bg/ +// Submitted by Martin Angelov +bss.design + +// Zitcom A/S : https://www.zitcom.dk +// Submitted by Emil Stahl +basicserver.io +virtualserver.io +enterprisecloud.nu + +// Mintere : https://mintere.com/ +// Submitted by Ben Aubin +mintere.site + +// WP Engine : https://wpengine.com/ +// Submitted by Michael Smith +wpenginepowered.com + +// Impertrix Solutions : +// Submitted by Zhixiang Zhao +impertrixcdn.com +impertrix.com +// ===END PRIVATE DOMAINS=== diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix.rb new file mode 100644 index 0000000..01f880e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +require_relative "public_suffix/domain" +require_relative "public_suffix/version" +require_relative "public_suffix/errors" +require_relative "public_suffix/rule" +require_relative "public_suffix/list" + +# PublicSuffix is a Ruby domain name parser based on the Public Suffix List. +# +# The [Public Suffix List](https://publicsuffix.org) is a cross-vendor initiative +# to provide an accurate list of domain name suffixes. +# +# The Public Suffix List is an initiative of the Mozilla Project, +# but is maintained as a community resource. It is available for use in any software, +# but was originally created to meet the needs of browser manufacturers. +module PublicSuffix + + DOT = "." + BANG = "!" + STAR = "*" + + # Parses +name+ and returns the {PublicSuffix::Domain} instance. + # + # @example Parse a valid domain + # PublicSuffix.parse("google.com") + # # => # + # + # @example Parse a valid subdomain + # PublicSuffix.parse("www.google.com") + # # => # + # + # @example Parse a fully qualified domain + # PublicSuffix.parse("google.com.") + # # => # + # + # @example Parse a fully qualified domain (subdomain) + # PublicSuffix.parse("www.google.com.") + # # => # + # + # @example Parse an invalid (unlisted) domain + # PublicSuffix.parse("x.yz") + # # => # + # + # @example Parse an invalid (unlisted) domain with strict checking (without applying the default * rule) + # PublicSuffix.parse("x.yz", default_rule: nil) + # # => PublicSuffix::DomainInvalid: `x.yz` is not a valid domain + # + # @example Parse an URL (not supported, only domains) + # PublicSuffix.parse("http://www.google.com") + # # => PublicSuffix::DomainInvalid: http://www.google.com is not expected to contain a scheme + # + # + # @param [String, #to_s] name The domain name or fully qualified domain name to parse. + # @param [PublicSuffix::List] list The rule list to search, defaults to the default {PublicSuffix::List} + # @param [Boolean] ignore_private + # @return [PublicSuffix::Domain] + # + # @raise [PublicSuffix::DomainInvalid] + # If domain is not a valid domain. + # @raise [PublicSuffix::DomainNotAllowed] + # If a rule for +domain+ is found, but the rule doesn't allow +domain+. + def self.parse(name, list: List.default, default_rule: list.default_rule, ignore_private: false) + what = normalize(name) + raise what if what.is_a?(DomainInvalid) + + rule = list.find(what, default: default_rule, ignore_private: ignore_private) + + # rubocop:disable Style/IfUnlessModifier + if rule.nil? + raise DomainInvalid, "`#{what}` is not a valid domain" + end + if rule.decompose(what).last.nil? + raise DomainNotAllowed, "`#{what}` is not allowed according to Registry policy" + end + + # rubocop:enable Style/IfUnlessModifier + + decompose(what, rule) + end + + # Checks whether +domain+ is assigned and allowed, without actually parsing it. + # + # This method doesn't care whether domain is a domain or subdomain. + # The validation is performed using the default {PublicSuffix::List}. + # + # @example Validate a valid domain + # PublicSuffix.valid?("example.com") + # # => true + # + # @example Validate a valid subdomain + # PublicSuffix.valid?("www.example.com") + # # => true + # + # @example Validate a not-listed domain + # PublicSuffix.valid?("example.tldnotlisted") + # # => true + # + # @example Validate a not-listed domain with strict checking (without applying the default * rule) + # PublicSuffix.valid?("example.tldnotlisted") + # # => true + # PublicSuffix.valid?("example.tldnotlisted", default_rule: nil) + # # => false + # + # @example Validate a fully qualified domain + # PublicSuffix.valid?("google.com.") + # # => true + # PublicSuffix.valid?("www.google.com.") + # # => true + # + # @example Check an URL (which is not a valid domain) + # PublicSuffix.valid?("http://www.example.com") + # # => false + # + # + # @param [String, #to_s] name The domain name or fully qualified domain name to validate. + # @param [Boolean] ignore_private + # @return [Boolean] + def self.valid?(name, list: List.default, default_rule: list.default_rule, ignore_private: false) + what = normalize(name) + return false if what.is_a?(DomainInvalid) + + rule = list.find(what, default: default_rule, ignore_private: ignore_private) + + !rule.nil? && !rule.decompose(what).last.nil? + end + + # Attempt to parse the name and returns the domain, if valid. + # + # This method doesn't raise. Instead, it returns nil if the domain is not valid for whatever reason. + # + # @param [String, #to_s] name The domain name or fully qualified domain name to parse. + # @param [PublicSuffix::List] list The rule list to search, defaults to the default {PublicSuffix::List} + # @param [Boolean] ignore_private + # @return [String] + def self.domain(name, **options) + parse(name, **options).domain + rescue PublicSuffix::Error + nil + end + + + # private + + def self.decompose(name, rule) + left, right = rule.decompose(name) + + parts = left.split(DOT) + # If we have 0 parts left, there is just a tld and no domain or subdomain + # If we have 1 part left, there is just a tld, domain and not subdomain + # If we have 2 parts left, the last part is the domain, the other parts (combined) are the subdomain + tld = right + sld = parts.empty? ? nil : parts.pop + trd = parts.empty? ? nil : parts.join(DOT) + + Domain.new(tld, sld, trd) + end + + # Pretend we know how to deal with user input. + def self.normalize(name) + name = name.to_s.dup + name.strip! + name.chomp!(DOT) + name.downcase! + + return DomainInvalid.new("Name is blank") if name.empty? + return DomainInvalid.new("Name starts with a dot") if name.start_with?(DOT) + return DomainInvalid.new("%s is not expected to contain a scheme" % name) if name.include?("://") + + name + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb new file mode 100644 index 0000000..3d65eac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # Domain represents a domain name, composed by a TLD, SLD and TRD. + class Domain + + # Splits a string into the labels, that is the dot-separated parts. + # + # The input is not validated, but it is assumed to be a valid domain name. + # + # @example + # + # name_to_labels('example.com') + # # => ['example', 'com'] + # + # name_to_labels('example.co.uk') + # # => ['example', 'co', 'uk'] + # + # @param name [String, #to_s] The domain name to split. + # @return [Array] + def self.name_to_labels(name) + name.to_s.split(DOT) + end + + + attr_reader :tld, :sld, :trd + + # Creates and returns a new {PublicSuffix::Domain} instance. + # + # @overload initialize(tld) + # Initializes with a +tld+. + # @param [String] tld The TLD (extension) + # @overload initialize(tld, sld) + # Initializes with a +tld+ and +sld+. + # @param [String] tld The TLD (extension) + # @param [String] sld The TRD (domain) + # @overload initialize(tld, sld, trd) + # Initializes with a +tld+, +sld+ and +trd+. + # @param [String] tld The TLD (extension) + # @param [String] sld The SLD (domain) + # @param [String] trd The TRD (subdomain) + # + # @yield [self] Yields on self. + # @yieldparam [PublicSuffix::Domain] self The newly creates instance + # + # @example Initialize with a TLD + # PublicSuffix::Domain.new("com") + # # => # + # + # @example Initialize with a TLD and SLD + # PublicSuffix::Domain.new("com", "example") + # # => # + # + # @example Initialize with a TLD, SLD and TRD + # PublicSuffix::Domain.new("com", "example", "wwww") + # # => # + # + def initialize(*args) + @tld, @sld, @trd = args + yield(self) if block_given? + end + + # Returns a string representation of this object. + # + # @return [String] + def to_s + name + end + + # Returns an array containing the domain parts. + # + # @return [Array] + # + # @example + # + # PublicSuffix::Domain.new("google.com").to_a + # # => [nil, "google", "com"] + # + # PublicSuffix::Domain.new("www.google.com").to_a + # # => [nil, "google", "com"] + # + def to_a + [@trd, @sld, @tld] + end + + # Returns the full domain name. + # + # @return [String] + # + # @example Gets the domain name of a domain + # PublicSuffix::Domain.new("com", "google").name + # # => "google.com" + # + # @example Gets the domain name of a subdomain + # PublicSuffix::Domain.new("com", "google", "www").name + # # => "www.google.com" + # + def name + [@trd, @sld, @tld].compact.join(DOT) + end + + # Returns a domain-like representation of this object + # if the object is a {#domain?}, nil otherwise. + # + # PublicSuffix::Domain.new("com").domain + # # => nil + # + # PublicSuffix::Domain.new("com", "google").domain + # # => "google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").domain + # # => "www.google.com" + # + # This method doesn't validate the input. It handles the domain + # as a valid domain name and simply applies the necessary transformations. + # + # This method returns a FQD, not just the domain part. + # To get the domain part, use #sld (aka second level domain). + # + # PublicSuffix::Domain.new("com", "google", "www").domain + # # => "google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").sld + # # => "google" + # + # @see #domain? + # @see #subdomain + # + # @return [String] + def domain + [@sld, @tld].join(DOT) if domain? + end + + # Returns a subdomain-like representation of this object + # if the object is a {#subdomain?}, nil otherwise. + # + # PublicSuffix::Domain.new("com").subdomain + # # => nil + # + # PublicSuffix::Domain.new("com", "google").subdomain + # # => nil + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain + # # => "www.google.com" + # + # This method doesn't validate the input. It handles the domain + # as a valid domain name and simply applies the necessary transformations. + # + # This method returns a FQD, not just the subdomain part. + # To get the subdomain part, use #trd (aka third level domain). + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain + # # => "www.google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").trd + # # => "www" + # + # @see #subdomain? + # @see #domain + # + # @return [String] + def subdomain + [@trd, @sld, @tld].join(DOT) if subdomain? + end + + # Checks whether self looks like a domain. + # + # This method doesn't actually validate the domain. + # It only checks whether the instance contains + # a value for the {#tld} and {#sld} attributes. + # + # @example + # + # PublicSuffix::Domain.new("com").domain? + # # => false + # + # PublicSuffix::Domain.new("com", "google").domain? + # # => true + # + # PublicSuffix::Domain.new("com", "google", "www").domain? + # # => true + # + # # This is an invalid domain, but returns true + # # because this method doesn't validate the content. + # PublicSuffix::Domain.new("com", nil).domain? + # # => true + # + # @see #subdomain? + # + # @return [Boolean] + def domain? + !(@tld.nil? || @sld.nil?) + end + + # Checks whether self looks like a subdomain. + # + # This method doesn't actually validate the subdomain. + # It only checks whether the instance contains + # a value for the {#tld}, {#sld} and {#trd} attributes. + # If you also want to validate the domain, + # use {#valid_subdomain?} instead. + # + # @example + # + # PublicSuffix::Domain.new("com").subdomain? + # # => false + # + # PublicSuffix::Domain.new("com", "google").subdomain? + # # => false + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain? + # # => true + # + # # This is an invalid domain, but returns true + # # because this method doesn't validate the content. + # PublicSuffix::Domain.new("com", "example", nil).subdomain? + # # => true + # + # @see #domain? + # + # @return [Boolean] + def subdomain? + !(@tld.nil? || @sld.nil? || @trd.nil?) + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb new file mode 100644 index 0000000..2d5798d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + class Error < StandardError + end + + # Raised when trying to parse an invalid name. + # A name is considered invalid when no rule is found in the definition list. + # + # @example + # + # PublicSuffix.parse("nic.test") + # # => PublicSuffix::DomainInvalid + # + # PublicSuffix.parse("http://www.nic.it") + # # => PublicSuffix::DomainInvalid + # + class DomainInvalid < Error + end + + # Raised when trying to parse a name that matches a suffix. + # + # @example + # + # PublicSuffix.parse("nic.do") + # # => PublicSuffix::DomainNotAllowed + # + # PublicSuffix.parse("www.nic.do") + # # => PublicSuffix::Domain + # + class DomainNotAllowed < DomainInvalid + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb new file mode 100644 index 0000000..321b59a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb @@ -0,0 +1,247 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # A {PublicSuffix::List} is a collection of one + # or more {PublicSuffix::Rule}. + # + # Given a {PublicSuffix::List}, + # you can add or remove {PublicSuffix::Rule}, + # iterate all items in the list or search for the first rule + # which matches a specific domain name. + # + # # Create a new list + # list = PublicSuffix::List.new + # + # # Push two rules to the list + # list << PublicSuffix::Rule.factory("it") + # list << PublicSuffix::Rule.factory("com") + # + # # Get the size of the list + # list.size + # # => 2 + # + # # Search for the rule matching given domain + # list.find("example.com") + # # => # + # list.find("example.org") + # # => nil + # + # You can create as many {PublicSuffix::List} you want. + # The {PublicSuffix::List.default} rule list is used + # to tokenize and validate a domain. + # + class List + + DEFAULT_LIST_PATH = File.expand_path("../../data/list.txt", __dir__) + + # Gets the default rule list. + # + # Initializes a new {PublicSuffix::List} parsing the content + # of {PublicSuffix::List.default_list_content}, if required. + # + # @return [PublicSuffix::List] + def self.default(**options) + @default ||= parse(File.read(DEFAULT_LIST_PATH), **options) + end + + # Sets the default rule list to +value+. + # + # @param value [PublicSuffix::List] the new list + # @return [PublicSuffix::List] + def self.default=(value) + @default = value + end + + # Parse given +input+ treating the content as Public Suffix List. + # + # See http://publicsuffix.org/format/ for more details about input format. + # + # @param string [#each_line] the list to parse + # @param private_domains [Boolean] whether to ignore the private domains section + # @return [PublicSuffix::List] + def self.parse(input, private_domains: true) + comment_token = "//" + private_token = "===BEGIN PRIVATE DOMAINS===" + section = nil # 1 == ICANN, 2 == PRIVATE + + new do |list| + input.each_line do |line| + line.strip! + case # rubocop:disable Style/EmptyCaseCondition + + # skip blank lines + when line.empty? + next + + # include private domains or stop scanner + when line.include?(private_token) + break if !private_domains + + section = 2 + + # skip comments + when line.start_with?(comment_token) + next + + else + list.add(Rule.factory(line, private: section == 2)) + + end + end + end + end + + + # Initializes an empty {PublicSuffix::List}. + # + # @yield [self] Yields on self. + # @yieldparam [PublicSuffix::List] self The newly created instance. + def initialize + @rules = {} + yield(self) if block_given? + end + + + # Checks whether two lists are equal. + # + # List one is equal to two, if two is an instance of + # {PublicSuffix::List} and each +PublicSuffix::Rule::*+ + # in list one is available in list two, in the same order. + # + # @param other [PublicSuffix::List] the List to compare + # @return [Boolean] + def ==(other) + return false unless other.is_a?(List) + + equal?(other) || @rules == other.rules + end + alias eql? == + + # Iterates each rule in the list. + def each(&block) + Enumerator.new do |y| + @rules.each do |key, node| + y << entry_to_rule(node, key) + end + end.each(&block) + end + + + # Adds the given object to the list and optionally refreshes the rule index. + # + # @param rule [PublicSuffix::Rule::*] the rule to add to the list + # @return [self] + def add(rule) + @rules[rule.value] = rule_to_entry(rule) + self + end + alias << add + + # Gets the number of rules in the list. + # + # @return [Integer] + def size + @rules.size + end + + # Checks whether the list is empty. + # + # @return [Boolean] + def empty? + @rules.empty? + end + + # Removes all rules. + # + # @return [self] + def clear + @rules.clear + self + end + + # Finds and returns the rule corresponding to the longest public suffix for the hostname. + # + # @param name [#to_s] the hostname + # @param default [PublicSuffix::Rule::*] the default rule to return in case no rule matches + # @return [PublicSuffix::Rule::*] + def find(name, default: default_rule, **options) + rule = select(name, **options).inject do |l, r| + return r if r.class == Rule::Exception + + l.length > r.length ? l : r + end + rule || default + end + + # Selects all the rules matching given hostame. + # + # If `ignore_private` is set to true, the algorithm will skip the rules that are flagged as + # private domain. Note that the rules will still be part of the loop. + # If you frequently need to access lists ignoring the private domains, + # you should create a list that doesn't include these domains setting the + # `private_domains: false` option when calling {.parse}. + # + # Note that this method is currently private, as you should not rely on it. Instead, + # the public interface is {#find}. The current internal algorithm allows to return all + # matching rules, but different data structures may not be able to do it, and instead would + # return only the match. For this reason, you should rely on {#find}. + # + # @param name [#to_s] the hostname + # @param ignore_private [Boolean] + # @return [Array] + def select(name, ignore_private: false) + name = name.to_s + + parts = name.split(DOT).reverse! + index = 0 + query = parts[index] + rules = [] + + loop do + match = @rules[query] + rules << entry_to_rule(match, query) if !match.nil? && (ignore_private == false || match.private == false) + + index += 1 + break if index >= parts.size + + query = parts[index] + DOT + query + end + + rules + end + private :select + + # Gets the default rule. + # + # @see PublicSuffix::Rule.default_rule + # + # @return [PublicSuffix::Rule::*] + def default_rule + PublicSuffix::Rule.default + end + + + protected + + attr_reader :rules + + + private + + def entry_to_rule(entry, value) + entry.type.new(value: value, length: entry.length, private: entry.private) + end + + def rule_to_entry(rule) + Rule::Entry.new(rule.class, rule.length, rule.private) + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb new file mode 100644 index 0000000..d4c32ca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb @@ -0,0 +1,350 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # A Rule is a special object which holds a single definition + # of the Public Suffix List. + # + # There are 3 types of rules, each one represented by a specific + # subclass within the +PublicSuffix::Rule+ namespace. + # + # To create a new Rule, use the {PublicSuffix::Rule#factory} method. + # + # PublicSuffix::Rule.factory("ar") + # # => # + # + module Rule + + # @api internal + Entry = Struct.new(:type, :length, :private) # rubocop:disable Lint/StructNewOverride + + # = Abstract rule class + # + # This represent the base class for a Rule definition + # in the {Public Suffix List}[https://publicsuffix.org]. + # + # This is intended to be an Abstract class + # and you shouldn't create a direct instance. The only purpose + # of this class is to expose a common interface + # for all the available subclasses. + # + # * {PublicSuffix::Rule::Normal} + # * {PublicSuffix::Rule::Exception} + # * {PublicSuffix::Rule::Wildcard} + # + # ## Properties + # + # A rule is composed by 4 properties: + # + # value - A normalized version of the rule name. + # The normalization process depends on rule tpe. + # + # Here's an example + # + # PublicSuffix::Rule.factory("*.google.com") + # # + # + # ## Rule Creation + # + # The best way to create a new rule is passing the rule name + # to the PublicSuffix::Rule.factory method. + # + # PublicSuffix::Rule.factory("com") + # # => PublicSuffix::Rule::Normal + # + # PublicSuffix::Rule.factory("*.com") + # # => PublicSuffix::Rule::Wildcard + # + # This method will detect the rule type and create an instance + # from the proper rule class. + # + # ## Rule Usage + # + # A rule describes the composition of a domain name and explains how to tokenize + # the name into tld, sld and trd. + # + # To use a rule, you first need to be sure the name you want to tokenize + # can be handled by the current rule. + # You can use the #match? method. + # + # rule = PublicSuffix::Rule.factory("com") + # + # rule.match?("google.com") + # # => true + # + # rule.match?("google.com") + # # => false + # + # Rule order is significant. A name can match more than one rule. + # See the {Public Suffix Documentation}[http://publicsuffix.org/format/] + # to learn more about rule priority. + # + # When you have the right rule, you can use it to tokenize the domain name. + # + # rule = PublicSuffix::Rule.factory("com") + # + # rule.decompose("google.com") + # # => ["google", "com"] + # + # rule.decompose("www.google.com") + # # => ["www.google", "com"] + # + # @abstract + # + class Base + + # @return [String] the rule definition + attr_reader :value + + # @return [String] the length of the rule + attr_reader :length + + # @return [Boolean] true if the rule is a private domain + attr_reader :private + + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content, private: private) + end + + # Initializes a new rule. + # + # @param value [String] + # @param private [Boolean] + def initialize(value:, length: nil, private: false) + @value = value.to_s + @length = length || @value.count(DOT) + 1 + @private = private + end + + # Checks whether this rule is equal to other. + # + # @param [PublicSuffix::Rule::*] other The rule to compare + # @return [Boolean] + # Returns true if this rule and other are instances of the same class + # and has the same value, false otherwise. + def ==(other) + equal?(other) || (self.class == other.class && value == other.value) + end + alias eql? == + + # Checks if this rule matches +name+. + # + # A domain name is said to match a rule if and only if + # all of the following conditions are met: + # + # - When the domain and rule are split into corresponding labels, + # that the domain contains as many or more labels than the rule. + # - Beginning with the right-most labels of both the domain and the rule, + # and continuing for all labels in the rule, one finds that for every pair, + # either they are identical, or that the label from the rule is "*". + # + # @see https://publicsuffix.org/list/ + # + # @example + # PublicSuffix::Rule.factory("com").match?("example.com") + # # => true + # PublicSuffix::Rule.factory("com").match?("example.net") + # # => false + # + # @param name [String] the domain name to check + # @return [Boolean] + def match?(name) + # Note: it works because of the assumption there are no + # rules like foo.*.com. If the assumption is incorrect, + # we need to properly walk the input and skip parts according + # to wildcard component. + diff = name.chomp(value) + diff.empty? || diff.end_with?(DOT) + end + + # @abstract + def parts + raise NotImplementedError + end + + # @abstract + # @param [String, #to_s] name The domain name to decompose + # @return [Array] + def decompose(*) + raise NotImplementedError + end + + end + + # Normal represents a standard rule (e.g. com). + class Normal < Base + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = parts.join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # + # @return [Array] + def parts + @value.split(DOT) + end + + end + + # Wildcard represents a wildcard rule (e.g. *.co.uk). + class Wildcard < Base + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content.to_s[2..-1], private: private) + end + + # Initializes a new rule. + # + # @param value [String] + # @param private [Boolean] + def initialize(value:, length: nil, private: false) + super(value: value, length: length, private: private) + length or @length += 1 # * counts as 1 + end + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + value == "" ? STAR : STAR + DOT + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = ([".*?"] + parts).join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # + # @return [Array] + def parts + @value.split(DOT) + end + + end + + # Exception represents an exception rule (e.g. !parliament.uk). + class Exception < Base + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content.to_s[1..-1], private: private) + end + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + BANG + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = parts.join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # The leftmost label is not considered a label. + # + # See http://publicsuffix.org/format/: + # If the prevailing rule is a exception rule, + # modify it by removing the leftmost label. + # + # @return [Array] + def parts + @value.split(DOT)[1..-1] + end + + end + + + # Takes the +name+ of the rule, detects the specific rule class + # and creates a new instance of that class. + # The +name+ becomes the rule +value+. + # + # @example Creates a Normal rule + # PublicSuffix::Rule.factory("ar") + # # => # + # + # @example Creates a Wildcard rule + # PublicSuffix::Rule.factory("*.ar") + # # => # + # + # @example Creates an Exception rule + # PublicSuffix::Rule.factory("!congresodelalengua3.ar") + # # => # + # + # @param [String] content The rule content. + # @return [PublicSuffix::Rule::*] A rule instance. + def self.factory(content, private: false) + case content.to_s[0, 1] + when STAR + Wildcard + when BANG + Exception + else + Normal + end.build(content, private: private) + end + + # The default rule to use if no rule match. + # + # The default rule is "*". From https://publicsuffix.org/list/: + # + # > If no rules match, the prevailing rule is "*". + # + # @return [PublicSuffix::Rule::Wildcard] The default rule. + def self.default + factory(STAR) + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb new file mode 100644 index 0000000..c9f93f1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +# +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + # The current library version. + VERSION = "4.0.6" +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/public_suffix.gemspec b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/public_suffix.gemspec new file mode 100644 index 0000000..ae552fc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/public_suffix.gemspec @@ -0,0 +1,29 @@ +# -*- encoding: utf-8 -*- +$LOAD_PATH.push File.expand_path("../lib", __FILE__) +require "public_suffix/version" + +Gem::Specification.new do |s| + s.name = "public_suffix" + s.version = PublicSuffix::VERSION + s.authors = ["Simone Carletti"] + s.email = ["weppos@weppos.net"] + s.homepage = "https://simonecarletti.com/code/publicsuffix-ruby" + s.summary = "Domain name parser based on the Public Suffix List." + s.description = "PublicSuffix can parse and decompose a domain name into top level domain, domain and subdomains." + s.licenses = ["MIT"] + + s.metadata = { + "bug_tracker_uri" => "https://github.com/weppos/publicsuffix-ruby/issues", + "changelog_uri" => "https://github.com/weppos/publicsuffix-ruby/blob/master/CHANGELOG.md", + "documentation_uri" => "https://rubydoc.info/gems/#{s.name}/#{s.version}", + "homepage_uri" => s.homepage, + "source_code_uri" => "https://github.com/weppos/publicsuffix-ruby/tree/v#{s.version}", + } + + s.required_ruby_version = ">= 2.3" + + s.require_paths = ["lib"] + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") + s.extra_rdoc_files = %w( LICENSE.txt ) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/.empty b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/.empty new file mode 100644 index 0000000..e322015 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/.empty @@ -0,0 +1,2 @@ +# This is an empty file I use to force a non-empty commit when I only need to store notes +.. \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/acceptance_test.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/acceptance_test.rb new file mode 100644 index 0000000..371bfe1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/acceptance_test.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require "test_helper" + +class AcceptanceTest < Minitest::Test + + VALID_CASES = [ + ["example.com", "example.com", [nil, "example", "com"]], + ["foo.example.com", "example.com", ["foo", "example", "com"]], + + ["verybritish.co.uk", "verybritish.co.uk", [nil, "verybritish", "co.uk"]], + ["foo.verybritish.co.uk", "verybritish.co.uk", ["foo", "verybritish", "co.uk"]], + + ["parliament.uk", "parliament.uk", [nil, "parliament", "uk"]], + ["foo.parliament.uk", "parliament.uk", ["foo", "parliament", "uk"]], + ].freeze + + def test_valid + VALID_CASES.each do |input, domain, results| + parsed = PublicSuffix.parse(input) + trd, sld, tld = results + assert_equal tld, parsed.tld, "Invalid tld for `#{name}`" + assert_equal sld, parsed.sld, "Invalid sld for `#{name}`" + if trd.nil? + assert_nil parsed.trd, "Invalid trd for `#{name}`" + else + assert_equal trd, parsed.trd, "Invalid trd for `#{name}`" + end + + assert_equal domain, PublicSuffix.domain(input) + assert PublicSuffix.valid?(input) + end + end + + + INVALID_CASES = [ + ["nic.bd", PublicSuffix::DomainNotAllowed], + [nil, PublicSuffix::DomainInvalid], + ["", PublicSuffix::DomainInvalid], + [" ", PublicSuffix::DomainInvalid], + ].freeze + + def test_invalid + INVALID_CASES.each do |(name, error)| + assert_raises(error) { PublicSuffix.parse(name) } + assert !PublicSuffix.valid?(name) + end + end + + + REJECTED_CASES = [ + ["www. .com", true], + ["foo.co..uk", true], + ["goo,gle.com", true], + ["-google.com", true], + ["google-.com", true], + + # This case was covered in GH-15. + # I decided to cover this case because it's not easily reproducible with URI.parse + # and can lead to several false positives. + ["http://google.com", false], + ].freeze + + def test_rejected + REJECTED_CASES.each do |name, expected| + assert_equal expected, PublicSuffix.valid?(name), + "Expected %s to be %s" % [name.inspect, expected.inspect] + assert !valid_domain?(name), + "#{name} expected to be invalid" + end + end + + + CASE_CASES = [ + ["Www.google.com", %w( www google com )], + ["www.Google.com", %w( www google com )], + ["www.google.Com", %w( www google com )], + ].freeze + + def test_ignore_case + CASE_CASES.each do |name, results| + domain = PublicSuffix.parse(name) + trd, sld, tld = results + assert_equal tld, domain.tld, "Invalid tld for `#{name}'" + assert_equal sld, domain.sld, "Invalid sld for `#{name}'" + assert_equal trd, domain.trd, "Invalid trd for `#{name}'" + assert PublicSuffix.valid?(name) + end + end + + + INCLUDE_PRIVATE_CASES = [ + ["blogspot.com", true, "blogspot.com"], + ["blogspot.com", false, nil], + ["subdomain.blogspot.com", true, "blogspot.com"], + ["subdomain.blogspot.com", false, "subdomain.blogspot.com"], + ].freeze + + # rubocop:disable Style/CombinableLoops + def test_ignore_private + # test domain and parse + INCLUDE_PRIVATE_CASES.each do |given, ignore_private, expected| + if expected.nil? + assert_nil PublicSuffix.domain(given, ignore_private: ignore_private) + else + assert_equal expected, PublicSuffix.domain(given, ignore_private: ignore_private) + end + end + # test valid? + INCLUDE_PRIVATE_CASES.each do |given, ignore_private, expected| + assert_equal !expected.nil?, PublicSuffix.valid?(given, ignore_private: ignore_private) + end + end + # rubocop:enable Style/CombinableLoops + + + def valid_uri?(name) + uri = URI.parse(name) + !uri.host.nil? + rescue StandardError + false + end + + def valid_domain?(name) + uri = URI.parse(name) + !uri.host.nil? && uri.scheme.nil? + rescue StandardError + false + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb new file mode 100644 index 0000000..0074f1d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb @@ -0,0 +1,66 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.find("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffixList.find(NAME_SHORT) != nil } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM) != nil } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffixList.find(NAME_LONG) != nil } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffixList.find(NAME_WILD) != nil } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffixList.find(NAME_EXCP) != nil } + end + + x.report("IAAA") do + TIMES.times { PublicSuffixList.find(IAAA) != nil } + end + x.report("IZZZ") do + TIMES.times { PublicSuffixList.find(IZZZ) != nil } + end + + x.report("PAAA") do + TIMES.times { PublicSuffixList.find(PAAA) != nil } + end + x.report("PZZZ") do + TIMES.times { PublicSuffixList.find(PZZZ) != nil } + end + + x.report("JP") do + TIMES.times { PublicSuffixList.find(JP) != nil } + end + x.report("IT") do + TIMES.times { PublicSuffixList.find(IT) != nil } + end + x.report("COM") do + TIMES.times { PublicSuffixList.find(COM) != nil } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb new file mode 100644 index 0000000..0bcfd42 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb @@ -0,0 +1,102 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.find("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffixList.find(NAME_SHORT) != nil } + end + x.report("NAME_SHORT (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_SHORT, ignore_private: true) != nil } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM) != nil } + end + x.report("NAME_MEDIUM (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM, ignore_private: true) != nil } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffixList.find(NAME_LONG) != nil } + end + x.report("NAME_LONG (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_LONG, ignore_private: true) != nil } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffixList.find(NAME_WILD) != nil } + end + x.report("NAME_WILD (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_WILD, ignore_private: true) != nil } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffixList.find(NAME_EXCP) != nil } + end + x.report("NAME_EXCP (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_EXCP, ignore_private: true) != nil } + end + + x.report("IAAA") do + TIMES.times { PublicSuffixList.find(IAAA) != nil } + end + x.report("IAAA (noprivate)") do + TIMES.times { PublicSuffixList.find(IAAA, ignore_private: true) != nil } + end + x.report("IZZZ") do + TIMES.times { PublicSuffixList.find(IZZZ) != nil } + end + x.report("IZZZ (noprivate)") do + TIMES.times { PublicSuffixList.find(IZZZ, ignore_private: true) != nil } + end + + x.report("PAAA") do + TIMES.times { PublicSuffixList.find(PAAA) != nil } + end + x.report("PAAA (noprivate)") do + TIMES.times { PublicSuffixList.find(PAAA, ignore_private: true) != nil } + end + x.report("PZZZ") do + TIMES.times { PublicSuffixList.find(PZZZ) != nil } + end + x.report("PZZZ (noprivate)") do + TIMES.times { PublicSuffixList.find(PZZZ, ignore_private: true) != nil } + end + + x.report("JP") do + TIMES.times { PublicSuffixList.find(JP) != nil } + end + x.report("JP (noprivate)") do + TIMES.times { PublicSuffixList.find(JP, ignore_private: true) != nil } + end + x.report("IT") do + TIMES.times { PublicSuffixList.find(IT) != nil } + end + x.report("IT (noprivate)") do + TIMES.times { PublicSuffixList.find(IT, ignore_private: true) != nil } + end + x.report("COM") do + TIMES.times { PublicSuffixList.find(COM) != nil } + end + x.report("COM (noprivate)") do + TIMES.times { PublicSuffixList.find(COM, ignore_private: true) != nil } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb new file mode 100644 index 0000000..36b2bce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb @@ -0,0 +1,91 @@ +require 'benchmark/ips' + +STRING = "www.subdomain.example.com" +ARRAY = %w( + com + example.com + subdomain.example.com + www.subdomain.example.com +) + +def tokenizer1(string) + parts = string.split(".").reverse! + index = 0 + query = parts[index] + names = [] + + loop do + names << query + + index += 1 + break if index >= parts.size + query = parts[index] + "." + query + end + names +end + +def tokenizer2(string) + parts = string.split(".") + index = parts.size - 1 + query = parts[index] + names = [] + + loop do + names << query + + index -= 1 + break if index < 0 + query = parts[index] + "." + query + end + names +end + +def tokenizer3(string) + isx = string.size + idx = string.size - 1 + names = [] + + loop do + isx = string.rindex(".", isx - 1) || -1 + names << string[isx + 1, idx - isx] + + break if isx <= 0 + end + names +end + +def tokenizer4(string) + isx = string.size + idx = string.size - 1 + names = [] + + loop do + isx = string.rindex(".", isx - 1) || -1 + names << string[(isx+1)..idx] + + break if isx <= 0 + end + names +end + +(x = tokenizer1(STRING)) == ARRAY or fail("tokenizer1 failed: #{x.inspect}") +(x = tokenizer2(STRING)) == ARRAY or fail("tokenizer2 failed: #{x.inspect}") +(x = tokenizer3(STRING)) == ARRAY or fail("tokenizer3 failed: #{x.inspect}") +(x = tokenizer4(STRING)) == ARRAY or fail("tokenizer4 failed: #{x.inspect}") + +Benchmark.ips do |x| + x.report("tokenizer1") do + tokenizer1(STRING).is_a?(Array) + end + x.report("tokenizer2") do + tokenizer2(STRING).is_a?(Array) + end + x.report("tokenizer3") do + tokenizer3(STRING).is_a?(Array) + end + x.report("tokenizer4") do + tokenizer4(STRING).is_a?(Array) + end + + x.compare! +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb new file mode 100644 index 0000000..66d908b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb @@ -0,0 +1,26 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +JP = "www.yokoshibahikari.chiba.jp" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +class PublicSuffix::List + public :select +end +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.select("example.jp") +PublicSuffixList.find("example.jp") + +Benchmark.bmbm(25) do |x| + x.report("JP select") do + TIMES.times { PublicSuffixList.select(JP) } + end + x.report("JP find") do + TIMES.times { PublicSuffixList.find(JP) } + end + # x.report("JP (noprivate)") do + # TIMES.times { PublicSuffixList.find(JP, ignore_private: true) != nil } + # end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb new file mode 100644 index 0000000..f002c82 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb @@ -0,0 +1,25 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +JP = "www.yokoshibahikari.chiba.jp" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +class PublicSuffix::List + public :select +end +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.select("example.jp") + +Benchmark.bmbm(25) do |x| + x.report("select jp") do + TIMES.times { PublicSuffixList.select("jp") } + end + x.report("select example.jp") do + TIMES.times { PublicSuffixList.select("example.jp") } + end + x.report("select www.example.jp") do + TIMES.times { PublicSuffixList.select("www.example.jp") } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb new file mode 100644 index 0000000..a484451 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb @@ -0,0 +1,101 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffix.valid?("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffix.valid?(NAME_SHORT) == true } + end + x.report("NAME_SHORT (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_SHORT, ignore_private: true) == true } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffix.valid?(NAME_MEDIUM) == true } + end + x.report("NAME_MEDIUM (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_MEDIUM, ignore_private: true) == true } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffix.valid?(NAME_LONG) == true } + end + x.report("NAME_LONG (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_LONG, ignore_private: true) == true } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffix.valid?(NAME_WILD) == true } + end + x.report("NAME_WILD (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_WILD, ignore_private: true) == true } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffix.valid?(NAME_EXCP) == true } + end + x.report("NAME_EXCP (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_EXCP, ignore_private: true) == true } + end + + x.report("IAAA") do + TIMES.times { PublicSuffix.valid?(IAAA) == true } + end + x.report("IAAA (noprivate)") do + TIMES.times { PublicSuffix.valid?(IAAA, ignore_private: true) == true } + end + x.report("IZZZ") do + TIMES.times { PublicSuffix.valid?(IZZZ) == true } + end + x.report("IZZZ (noprivate)") do + TIMES.times { PublicSuffix.valid?(IZZZ, ignore_private: true) == true } + end + + x.report("PAAA") do + TIMES.times { PublicSuffix.valid?(PAAA) == true } + end + x.report("PAAA (noprivate)") do + TIMES.times { PublicSuffix.valid?(PAAA, ignore_private: true) == true } + end + x.report("PZZZ") do + TIMES.times { PublicSuffix.valid?(PZZZ) == true } + end + x.report("PZZZ (noprivate)") do + TIMES.times { PublicSuffix.valid?(PZZZ, ignore_private: true) == true } + end + + x.report("JP") do + TIMES.times { PublicSuffix.valid?(JP) == true } + end + x.report("JP (noprivate)") do + TIMES.times { PublicSuffix.valid?(JP, ignore_private: true) == true } + end + x.report("IT") do + TIMES.times { PublicSuffix.valid?(IT) == true } + end + x.report("IT (noprivate)") do + TIMES.times { PublicSuffix.valid?(IT, ignore_private: true) == true } + end + x.report("COM") do + TIMES.times { PublicSuffix.valid?(COM) == true } + end + x.report("COM (noprivate)") do + TIMES.times { PublicSuffix.valid?(COM, ignore_private: true) == true } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb new file mode 100644 index 0000000..1ed1050 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix.domain("www.example.com") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb new file mode 100644 index 0000000..53d28eb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix::List.default.find("www.example.com") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb new file mode 100644 index 0000000..65c13fe --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix::List.default.find("a.b.ide.kyoto.jp") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb new file mode 100644 index 0000000..008b1e7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb @@ -0,0 +1,11 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +report = MemoryProfiler.report do + PublicSuffix::List.default +end + +report.pretty_print +# report.pretty_print(to_file: 'profiler-%s-%d.txt' % [ARGV[0], Time.now.to_i]) diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb new file mode 100644 index 0000000..0b98b4b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb @@ -0,0 +1,11 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require_relative "object_binsize" +require "public_suffix" + +list = PublicSuffix::List.default +puts "#{list.size} rules:" + +prof = ObjectBinsize.new +prof.report(PublicSuffix::List.default, label: "PublicSuffix::List size") +prof.report(PublicSuffix::List.default.instance_variable_get(:@rules), label: "Size of rules") diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb new file mode 100644 index 0000000..dc60bdb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb @@ -0,0 +1,57 @@ +require 'tempfile' + +# A very simple memory profiles that checks the full size of a variable +# by serializing into a binary file. +# +# Yes, I know this is very rough, but there are cases where ObjectSpace.memsize_of +# doesn't cooperate, and this is one of the possible workarounds. +# +# For certain cases, it works (TM). +class ObjectBinsize + + def measure(var, label: nil) + dump(var, label: label) + end + + def report(var, label: nil, padding: 10) + file = measure(var, label: label) + + size = format_integer(file.size) + name = label || File.basename(file.path) + printf("%#{padding}s %s\n", size, name) + end + + private + + def dump(var, **args) + file = Tempfile.new(args[:label].to_s) + file.write(Marshal.dump(var)) + file + ensure + file.close + end + + def format_integer(int) + int.to_s.reverse.gsub(/...(?=.)/, '\&,').reverse + end + +end + +if __FILE__ == $0 + prof = ObjectBinsize.new + + prof.report(nil, label: "nil") + prof.report(false, label: "false") + prof.report(true, label: "true") + prof.report(0, label: "integer") + prof.report("", label: "empty string") + prof.report({}, label: "empty hash") + prof.report({}, label: "empty array") + + prof.report({ foo: "1" }, label: "hash 1 item (symbol)") + prof.report({ foo: "1", bar: 2 }, label: "hash 2 items (symbol)") + prof.report({ "foo" => "1" }, label: "hash 1 item (string)") + prof.report({ "foo" => "1", "bar" => 2 }, label: "hash 2 items (string)") + + prof.report("big string" * 200, label: "big string * 200") +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/psl_test.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/psl_test.rb new file mode 100644 index 0000000..fae398f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/psl_test.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +require "test_helper" +require "public_suffix" + +# This test runs against the current PSL file and ensures +# the definitions satisfies the test suite. +class PslTest < Minitest::Test + + ROOT = File.expand_path("..", __dir__) + + # rubocop:disable Security/Eval + def self.tests + File.readlines(File.join(ROOT, "test/tests.txt")).map do |line| + line = line.strip + next if line.empty? + next if line.start_with?("//") + + input, output = line.split(", ") + + # handle the case of eval("null"), it must be eval("nil") + input = "nil" if input == "null" + output = "nil" if output == "null" + + input = eval(input) + output = eval(output) + [input, output] + end + end + # rubocop:enable Security/Eval + + + def test_valid + # Parse the PSL and run the tests + data = File.read(PublicSuffix::List::DEFAULT_LIST_PATH) + PublicSuffix::List.default = PublicSuffix::List.parse(data) + + failures = [] + self.class.tests.each do |input, output| + # Punycode domains are not supported ATM + next if input =~ /xn--/ + + domain = PublicSuffix.domain(input) rescue nil + failures << [input, output, domain] if output != domain + end + + message = "The following #{failures.size} tests fail:\n" + failures.each { |i, o, d| message += "Expected %s to be %s, got %s\n" % [i.inspect, o.inspect, d.inspect] } + assert_equal 0, failures.size, message + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/test_helper.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/test_helper.rb new file mode 100644 index 0000000..4f8a447 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/test_helper.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +if ENV["COVERAGE"] + require "simplecov" + SimpleCov.start + + require "codecov" + SimpleCov.formatter = SimpleCov::Formatter::Codecov +end + +require "minitest/autorun" +require "minitest/reporters" +require "mocha/minitest" + +Minitest::Reporters.use! Minitest::Reporters::DefaultReporter.new(color: true) + +$LOAD_PATH.unshift File.expand_path("../lib", __dir__) +require "public_suffix" diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/tests.txt b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/tests.txt new file mode 100644 index 0000000..b11150a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/tests.txt @@ -0,0 +1,98 @@ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// null input +null, null +// Mixed case +'COM', null +'example.COM', 'example.com' +'WwW.example.COM', 'example.com' +// Leading dot +'.com', null +'.example', null +'.example.com', null +'.example.example', null +// Unlisted TLD +'example', null +'example.example', 'example.example' +'b.example.example', 'example.example' +'a.b.example.example', 'example.example' +// Listed, but non-Internet, TLD +//'local', null +//'example.local', null +//'b.example.local', null +//'a.b.example.local', null +// TLD with only 1 rule +'biz', null +'domain.biz', 'domain.biz' +'b.domain.biz', 'domain.biz' +'a.b.domain.biz', 'domain.biz' +// TLD with some 2-level rules +'com', null +'example.com', 'example.com' +'b.example.com', 'example.com' +'a.b.example.com', 'example.com' +'uk.com', null +'example.uk.com', 'example.uk.com' +'b.example.uk.com', 'example.uk.com' +'a.b.example.uk.com', 'example.uk.com' +'test.ac', 'test.ac' +// TLD with only 1 (wildcard) rule +'mm', null +'c.mm', null +'b.c.mm', 'b.c.mm' +'a.b.c.mm', 'b.c.mm' +// More complex TLD +'jp', null +'test.jp', 'test.jp' +'www.test.jp', 'test.jp' +'ac.jp', null +'test.ac.jp', 'test.ac.jp' +'www.test.ac.jp', 'test.ac.jp' +'kyoto.jp', null +'test.kyoto.jp', 'test.kyoto.jp' +'ide.kyoto.jp', null +'b.ide.kyoto.jp', 'b.ide.kyoto.jp' +'a.b.ide.kyoto.jp', 'b.ide.kyoto.jp' +'c.kobe.jp', null +'b.c.kobe.jp', 'b.c.kobe.jp' +'a.b.c.kobe.jp', 'b.c.kobe.jp' +'city.kobe.jp', 'city.kobe.jp' +'www.city.kobe.jp', 'city.kobe.jp' +// TLD with a wildcard rule and exceptions +'ck', null +'test.ck', null +'b.test.ck', 'b.test.ck' +'a.b.test.ck', 'b.test.ck' +'www.ck', 'www.ck' +'www.www.ck', 'www.ck' +// US K12 +'us', null +'test.us', 'test.us' +'www.test.us', 'test.us' +'ak.us', null +'test.ak.us', 'test.ak.us' +'www.test.ak.us', 'test.ak.us' +'k12.ak.us', null +'test.k12.ak.us', 'test.k12.ak.us' +'www.test.k12.ak.us', 'test.k12.ak.us' +// IDN labels +'食狮.com.cn', '食狮.com.cn' +'食狮.公司.cn', '食狮.公司.cn' +'www.食狮.公司.cn', '食狮.公司.cn' +'shishi.公司.cn', 'shishi.公司.cn' +'公司.cn', null +'食狮.中国', '食狮.中国' +'www.食狮.中国', '食狮.中国' +'shishi.中国', 'shishi.中国' +'中国', null +// Same as above, but punycoded +'xn--85x722f.com.cn', 'xn--85x722f.com.cn' +'xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn' +'www.xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn' +'shishi.xn--55qx5d.cn', 'shishi.xn--55qx5d.cn' +'xn--55qx5d.cn', null +'xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s' +'www.xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s' +'shishi.xn--fiqs8s', 'shishi.xn--fiqs8s' +'xn--fiqs8s', null diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb new file mode 100644 index 0000000..968462d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "test_helper" + +class PublicSuffix::DomainTest < Minitest::Test + + def setup + @klass = PublicSuffix::Domain + end + + # Tokenizes given input into labels. + def test_self_name_to_labels + assert_equal %w( someone spaces live com ), + PublicSuffix::Domain.name_to_labels("someone.spaces.live.com") + assert_equal %w( leontina23samiko wiki zoho com ), + PublicSuffix::Domain.name_to_labels("leontina23samiko.wiki.zoho.com") + end + + # Converts input into String. + def test_self_name_to_labels_converts_input_to_string + assert_equal %w( someone spaces live com ), + PublicSuffix::Domain.name_to_labels(:"someone.spaces.live.com") + end + + + def test_initialize_with_tld + domain = @klass.new("com") + assert_equal "com", domain.tld + assert_nil domain.sld + assert_nil domain.trd + end + + def test_initialize_with_tld_and_sld + domain = @klass.new("com", "google") + assert_equal "com", domain.tld + assert_equal "google", domain.sld + assert_nil domain.trd + end + + def test_initialize_with_tld_and_sld_and_trd + domain = @klass.new("com", "google", "www") + assert_equal "com", domain.tld + assert_equal "google", domain.sld + assert_equal "www", domain.trd + end + + + def test_to_s + assert_equal "com", @klass.new("com").to_s + assert_equal "google.com", @klass.new("com", "google").to_s + assert_equal "www.google.com", @klass.new("com", "google", "www").to_s + end + + def test_to_a + assert_equal [nil, nil, "com"], @klass.new("com").to_a + assert_equal [nil, "google", "com"], @klass.new("com", "google").to_a + assert_equal ["www", "google", "com"], @klass.new("com", "google", "www").to_a + end + + + def test_tld + assert_equal "com", @klass.new("com", "google", "www").tld + end + + def test_sld + assert_equal "google", @klass.new("com", "google", "www").sld + end + + def test_trd + assert_equal "www", @klass.new("com", "google", "www").trd + end + + + def test_name + assert_equal "com", @klass.new("com").name + assert_equal "google.com", @klass.new("com", "google").name + assert_equal "www.google.com", @klass.new("com", "google", "www").name + end + + def test_domain + assert_nil @klass.new("com").domain + assert_nil @klass.new("tldnotlisted").domain + assert_equal "google.com", @klass.new("com", "google").domain + assert_equal "google.tldnotlisted", @klass.new("tldnotlisted", "google").domain + assert_equal "google.com", @klass.new("com", "google", "www").domain + assert_equal "google.tldnotlisted", @klass.new("tldnotlisted", "google", "www").domain + end + + def test_subdomain + assert_nil @klass.new("com").subdomain + assert_nil @klass.new("tldnotlisted").subdomain + assert_nil @klass.new("com", "google").subdomain + assert_nil @klass.new("tldnotlisted", "google").subdomain + assert_equal "www.google.com", @klass.new("com", "google", "www").subdomain + assert_equal "www.google.tldnotlisted", @klass.new("tldnotlisted", "google", "www").subdomain + end + + + def test_domain_question + assert !@klass.new("com").domain? + assert @klass.new("com", "example").domain? + assert @klass.new("com", "example", "www").domain? + assert @klass.new("tldnotlisted", "example").domain? + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb new file mode 100644 index 0000000..75099ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "test_helper" + +class ErrorsTest < Minitest::Test + + # Inherits from StandardError + def test_error_inheritance + assert_kind_of StandardError, + PublicSuffix::Error.new + end + + # Inherits from PublicSuffix::Error + def test_domain_invalid_inheritance + assert_kind_of PublicSuffix::Error, + PublicSuffix::DomainInvalid.new + end + + # Inherits from PublicSuffix::DomainInvalid + def test_domain_not_allowed_inheritance + assert_kind_of PublicSuffix::DomainInvalid, + PublicSuffix::DomainNotAllowed.new + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/list_test.rb b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/list_test.rb new file mode 100644 index 0000000..9852935 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/public_suffix-4.0.6/test/unit/list_test.rb @@ -0,0 +1,241 @@ +# frozen_string_literal: true + +require "test_helper" + +class PublicSuffix::ListTest < Minitest::Test + + def setup + @list = PublicSuffix::List.new + end + + def teardown + PublicSuffix::List.default = nil + end + + + def test_initialize + assert_instance_of PublicSuffix::List, @list + assert_equal 0, @list.size + end + + + def test_equality_with_self + list = PublicSuffix::List.new + assert_equal list, list + end + + def test_equality_with_internals + rule = PublicSuffix::Rule.factory("com") + assert_equal PublicSuffix::List.new.add(rule), PublicSuffix::List.new.add(rule) + end + + def test_each_without_block + list = PublicSuffix::List.parse(< 3.2.0' + gem 'simplecov', '~> 0.9.2', :require => false + if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('2.0.0') + gem 'term-ansicolor', '~> 1.3.2', :require => false + gem 'tins', '~> 1.6.0', :require => false + end + gem 'coveralls', '~> 0.7.11', :require => false +end + +group :documentation do + gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false + gem 'yard', '~> 0.8.7.6', :require => false + gem 'inch', '~> 0.5.10', :platforms => :mri, :require => false + gem 'redcarpet', '~> 3.2.2', platforms: :mri # understands github markdown +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/LICENSE b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/LICENSE new file mode 100644 index 0000000..5336718 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/LICENSE @@ -0,0 +1,144 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as +defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that +is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that +control, are controlled by, or are under common control with that entity. For the purposes +of this definition, "control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or otherwise, or (ii) ownership +of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of +such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by +this License. + +"Source" form shall mean the preferred form for making modifications, including but not +limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of +a Source form, including but not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available +under the License, as indicated by a copyright notice that is included in or attached to the +work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on +(or derived from) the Work and for which the editorial revisions, annotations, elaborations, +or other modifications represent, as a whole, an original work of authorship. For the +purposes of this License, Derivative Works shall not include works that remain separable +from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works +thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work +and any modifications or additions to that Work or Derivative Works thereof, that is +intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by +an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, verbal, or written +communication sent to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and issue tracking +systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and +improving the Work, but excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a +Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each +Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such Derivative +Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each +Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license +applies only to those patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent litigation against +any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or +a Contribution incorporated within the Work constitutes direct or contributory patent +infringement, then any patent licenses granted to You under this License for that Work shall +terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works +thereof in any medium, with or without modifications, and in Source or Object form, provided +that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; +and + +You must cause any modified files to carry prominent notices stating that You changed the +files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all +copyright, patent, trademark, and attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative +Works that You distribute must include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not pertain to any part of the +Derivative Works, in at least one of the following places: within a NOTICE text file +distributed as part of the Derivative Works; within the Source form or documentation, if +provided along with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of the NOTICE +file are for informational purposes only and do not modify the License. You may add Your own +attribution notices within Derivative Works that You distribute, alongside or as an addendum +to the NOTICE text from the Work, provided that such additional attribution notices cannot +be construed as modifying the License. You may add Your own copyright statement to Your +modifications and may provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution +intentionally submitted for inclusion in the Work by You to the Licensor shall be under the +terms and conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of any +separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for reasonable and +customary use in describing the origin of the Work and reproducing the content of the NOTICE +file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, +Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, +without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, +MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for +determining the appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort +(including negligence), contract, or otherwise, unless required by applicable law (such as +deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, or +consequential damages of any character arising as a result of this License or out of the use +or inability to use the Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all other commercial damages or +losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative +Works thereof, You may choose to offer, and charge a fee for, acceptance of support, +warranty, indemnity, or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only on Your own behalf and on +Your sole responsibility, not on behalf of any other Contributor, and only if You agree to +indemnify, defend, and hold each Contributor harmless for any liability incurred by, or +claims asserted against, such Contributor by reason of your accepting any such warranty or +additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/README.md b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/README.md new file mode 100644 index 0000000..535a791 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/README.md @@ -0,0 +1,60 @@ +# Threadsafe (Inactive, code moved to concurrent-ruby gem and repo.) + +[![Gem Version](https://badge.fury.io/rb/thread_safe.svg)](http://badge.fury.io/rb/thread_safe) [![Build Status](https://travis-ci.org/ruby-concurrency/thread_safe.svg?branch=master)](https://travis-ci.org/ruby-concurrency/thread_safe) [![Coverage Status](https://img.shields.io/coveralls/ruby-concurrency/thread_safe/master.svg)](https://coveralls.io/r/ruby-concurrency/thread_safe) [![Code Climate](https://codeclimate.com/github/ruby-concurrency/thread_safe.svg)](https://codeclimate.com/github/ruby-concurrency/thread_safe) [![Dependency Status](https://gemnasium.com/ruby-concurrency/thread_safe.svg)](https://gemnasium.com/ruby-concurrency/thread_safe) [![License](https://img.shields.io/badge/license-apache-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +A collection of thread-safe versions of common core Ruby classes. + +__This code base is now part of the concurrent-ruby gem +at https://github.com/ruby-concurrency/concurrent-ruby. +The code in this repository is no longer maintained.__ + +## Installation + +Add this line to your application's Gemfile: + + gem 'thread_safe' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install thread_safe + +## Usage + +```ruby +require 'thread_safe' + +sa = ThreadSafe::Array.new # supports standard Array.new forms +sh = ThreadSafe::Hash.new # supports standard Hash.new forms +``` + +`ThreadSafe::Cache` also exists, as a hash-like object, and should have +much better performance characteristics esp. under high concurrency than +`ThreadSafe::Hash`. However, `ThreadSafe::Cache` is not strictly semantically +equivalent to a ruby `Hash` -- for instance, it does not necessarily retain +ordering by insertion time as `Hash` does. For most uses it should do fine +though, and we recommend you consider `ThreadSafe::Cache` instead of +`ThreadSafe::Hash` for your concurrency-safe hash needs. It understands some +options when created (depending on your ruby platform) that control some of the +internals - when unsure just leave them out: + + +```ruby +require 'thread_safe' + +cache = ThreadSafe::Cache.new +``` + +## Contributing + +1. Fork it +2. Clone it (`git clone git@github.com:you/thread_safe.git`) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Build the jar (`rake jar`) NOTE: Requires JRuby +5. Install dependencies (`bundle install`) +6. Commit your changes (`git commit -am 'Added some feature'`) +7. Push to the branch (`git push origin my-new-feature`) +8. Create new Pull Request diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/Rakefile b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/Rakefile new file mode 100644 index 0000000..3559a98 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/Rakefile @@ -0,0 +1,62 @@ +require 'bundler/gem_tasks' +require 'rspec' +require 'rspec/core/rake_task' + +## safely load all the rake tasks in the `tasks` directory +def safe_load(file) + begin + load file + rescue LoadError => ex + puts "Error loading rake tasks from '#{file}' but will continue..." + puts ex.message + end +end + +Dir.glob('tasks/**/*.rake').each do |rakefile| + safe_load rakefile +end + +task :default => :test + +if defined?(JRUBY_VERSION) + require 'ant' + + directory 'pkg/classes' + directory 'pkg/tests' + + desc "Clean up build artifacts" + task :clean do + rm_rf "pkg/classes" + rm_rf "pkg/tests" + rm_rf "lib/thread_safe/jruby_cache_backend.jar" + end + + desc "Compile the extension" + task :compile => "pkg/classes" do |t| + ant.javac :srcdir => "ext", :destdir => t.prerequisites.first, + :source => "1.5", :target => "1.5", :debug => true, + :classpath => "${java.class.path}:${sun.boot.class.path}" + end + + desc "Build the jar" + task :jar => :compile do + ant.jar :basedir => "pkg/classes", :destfile => "lib/thread_safe/jruby_cache_backend.jar", :includes => "**/*.class" + end + + desc "Build test jar" + task 'test-jar' => 'pkg/tests' do |t| + ant.javac :srcdir => 'spec/src', :destdir => t.prerequisites.first, + :source => "1.5", :target => "1.5", :debug => true + + ant.jar :basedir => 'pkg/tests', :destfile => 'spec/package.jar', :includes => '**/*.class' + end + + task :package => [ :clean, :compile, :jar, 'test-jar' ] +else + # No need to package anything for non-jruby rubies + task :package +end + +RSpec::Core::RakeTask.new :test => :package do |t| + t.rspec_opts = '--color --backtrace --tag ~unfinished --seed 1 --format documentation ./spec' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/examples/bench_cache.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/examples/bench_cache.rb new file mode 100755 index 0000000..14171c9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/examples/bench_cache.rb @@ -0,0 +1,35 @@ +#!/usr/bin/env ruby -wKU + +require "benchmark" +require "thread_safe" + +hash = {} +cache = ThreadSafe::Cache.new + +ENTRIES = 10_000 + +ENTRIES.times do |i| + hash[i] = i + cache[i] = i +end + +TESTS = 40_000_000 +Benchmark.bmbm do |results| + key = rand(10_000) + + results.report('Hash#[]') do + TESTS.times { hash[key] } + end + + results.report('Cache#[]') do + TESTS.times { cache[key] } + end + + results.report('Hash#each_pair') do + (TESTS / ENTRIES).times { hash.each_pair {|k,v| v} } + end + + results.report('Cache#each_pair') do + (TESTS / ENTRIES).times { cache.each_pair {|k,v| v} } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java new file mode 100644 index 0000000..97f9d3f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java @@ -0,0 +1,245 @@ +package org.jruby.ext.thread_safe; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8; +import org.jruby.ext.thread_safe.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyCacheBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyClass jrubyRefClass = runtime.defineClassUnder("JRubyCacheBackend", runtime.getObject(), BACKEND_ALLOCATOR, runtime.getModule("ThreadSafe")); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyCacheBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyCacheBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyCacheBackend", parent="Object") + public static class JRubyCacheBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap map; + + private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8(initialCapacity, loadFactor); + } else { + return new org.jruby.ext.thread_safe.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyCacheBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyCacheBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..90ae1a2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package org.jruby.ext.thread_safe.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap { + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun mf); + public V computeIfPresent(K key, BiFun mf); + public V compute(K key, BiFun mf); + public V merge(K key, V value, BiFun mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..c85b989 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package org.jruby.ext.thread_safe.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + * + *
    + *
+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java new file mode 100644 index 0000000..22d0cbc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package org.jruby.ext.thread_safe.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java new file mode 100644 index 0000000..8651cdc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package org.jruby.ext.thread_safe.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..4e1e33a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; +import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray initTable() { + AtomicReferenceArray tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { + int n = tab.length(); + AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; AtomicReferenceArray t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray tab = new AtomicReferenceArray(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..e7a1c5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..ee69567 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..81ba441 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package org.jruby.ext.thread_safe.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + *

Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java new file mode 100644 index 0000000..2c5bcea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java @@ -0,0 +1,15 @@ +package thread_safe; + +import java.io.IOException; + +import org.jruby.Ruby; +import org.jruby.ext.thread_safe.JRubyCacheBackendLibrary; +import org.jruby.runtime.load.BasicLibraryService; + +// can't name this JRubyCacheBackendService or else JRuby doesn't pick this up +public class JrubyCacheBackendService implements BasicLibraryService { + public boolean basicLoad(final Ruby runtime) throws IOException { + new JRubyCacheBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe.rb new file mode 100644 index 0000000..5b8fb27 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe.rb @@ -0,0 +1,65 @@ +require 'thread_safe/version' +require 'thread_safe/synchronized_delegator' + +module ThreadSafe + autoload :Cache, 'thread_safe/cache' + autoload :Util, 'thread_safe/util' + + # Various classes within allows for +nil+ values to be stored, so a special +NULL+ token is required to indicate the "nil-ness". + NULL = Object.new + + if defined?(JRUBY_VERSION) + require 'jruby/synchronized' + + # A thread-safe subclass of Array. This version locks + # against the object itself for every method call, + # ensuring only one thread can be reading or writing + # at a time. This includes iteration methods like + # #each. + class Array < ::Array + include JRuby::Synchronized + end + + # A thread-safe subclass of Hash. This version locks + # against the object itself for every method call, + # ensuring only one thread can be reading or writing + # at a time. This includes iteration methods like + # #each. + class Hash < ::Hash + include JRuby::Synchronized + end + elsif !defined?(RUBY_ENGINE) || RUBY_ENGINE == 'ruby' + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + Array = ::Array + Hash = ::Hash + elsif defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' + require 'monitor' + + class Hash < ::Hash; end + class Array < ::Array; end + + [Hash, Array].each do |klass| + klass.class_eval do + private + def _mon_initialize + @_monitor = Monitor.new unless @_monitor # avoid double initialisation + end + + def self.allocate + obj = super + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{method}(*args) + @_monitor.synchronize { super } + end + RUBY_EVAL + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb new file mode 100644 index 0000000..7e710b5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb @@ -0,0 +1,908 @@ +module ThreadSafe + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + class AtomicReferenceCacheBackend + class Table < Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + class Node + extend Util::Volatile + attr_volatile :hash, :value, :next + + include Util::CheapLockable + + bit_shift = Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Util::CPU_COUNT > 1 ? Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_aquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_aquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb new file mode 100644 index 0000000..265c0e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb @@ -0,0 +1,161 @@ +require 'thread' + +module ThreadSafe + autoload :JRubyCacheBackend, 'thread_safe/jruby_cache_backend' + autoload :MriCacheBackend, 'thread_safe/mri_cache_backend' + autoload :NonConcurrentCacheBackend, 'thread_safe/non_concurrent_cache_backend' + autoload :AtomicReferenceCacheBackend, 'thread_safe/atomic_reference_cache_backend' + autoload :SynchronizedCacheBackend, 'thread_safe/synchronized_cache_backend' + + ConcurrentCacheBackend = if defined?(RUBY_ENGINE) + case RUBY_ENGINE + when 'jruby'; JRubyCacheBackend + when 'ruby'; MriCacheBackend + when 'rbx'; AtomicReferenceCacheBackend + else + warn 'ThreadSafe: unsupported Ruby engine, using a fully synchronized ThreadSafe::Cache implementation' if $VERBOSE + SynchronizedCacheBackend + end + else + MriCacheBackend + end + + class Cache < ConcurrentCacheBackend + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + alias_method :get, :[] + alias_method :put, :[]= + + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end unless method_defined?(:value?) + + def keys + arr = [] + each_pair {|k, v| arr << k} + arr + end unless method_defined?(:keys) + + def values + arr = [] + each_pair {|k, v| arr << v} + arr + end unless method_defined?(:values) + + def each_key + each_pair {|k, v| yield k} + end unless method_defined?(:each_key) + + def each_value + each_pair {|k, v| yield v} + end unless method_defined?(:each_value) + + def key(value) + each_pair {|k, v| return k if v == value} + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + def empty? + each_pair {|k, v| return false} + true + end unless method_defined?(:empty?) + + def size + count = 0 + each_pair {|k, v| count += 1} + count + end unless method_defined?(:size) + + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair {|k, v| h[k] = v} + h + end + + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + private + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair {|k, v| self[k] = v} + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(0.class) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive #{0.class}" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb new file mode 100644 index 0000000..13e57b3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb @@ -0,0 +1,60 @@ +module ThreadSafe + class MriCacheBackend < NonConcurrentCacheBackend + # We can get away with a single global write lock (instead of a per-instance + # one) because of the GVL/green threads. + # + # NOTE: a neat idea of writing a c-ext to manually perform atomic + # put_if_absent, while relying on Ruby not releasing a GVL while calling a + # c-ext will not work because of the potentially Ruby implemented `#hash` + # and `#eql?` key methods. + WRITE_LOCK = Mutex.new + + def []=(key, value) + WRITE_LOCK.synchronize { super } + end + + def compute_if_absent(key) + if stored_value = _get(key) # fast non-blocking path for the most likely case + stored_value + else + WRITE_LOCK.synchronize { super } + end + end + + def compute_if_present(key) + WRITE_LOCK.synchronize { super } + end + + def compute(key) + WRITE_LOCK.synchronize { super } + end + + def merge_pair(key, value) + WRITE_LOCK.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + WRITE_LOCK.synchronize { super } + end + + def replace_if_exists(key, new_value) + WRITE_LOCK.synchronize { super } + end + + def get_and_set(key, value) + WRITE_LOCK.synchronize { super } + end + + def delete(key) + WRITE_LOCK.synchronize { super } + end + + def delete_pair(key, value) + WRITE_LOCK.synchronize { super } + end + + def clear + WRITE_LOCK.synchronize { super } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb new file mode 100644 index 0000000..111ae63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb @@ -0,0 +1,135 @@ +module ThreadSafe + class NonConcurrentCacheBackend + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedCacheBackend which uses a non-reentrant mutex for perfomance + # reasons. + def initialize(options = nil) + @backend = {} + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(@backend[key])) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = @backend[key] + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def value?(value) + @backend.value?(value) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + alias_method :_get, :[] + alias_method :_set, :[]= + private :_get, :_set + private + def initialize_copy(other) + super + @backend = {} + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb new file mode 100644 index 0000000..2af3b8c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb @@ -0,0 +1,77 @@ +module ThreadSafe + class SynchronizedCacheBackend < NonConcurrentCacheBackend + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def value?(value) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..2fc351d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb @@ -0,0 +1,43 @@ +require 'delegate' +require 'monitor' + +# This class provides a trivial way to synchronize all calls to a given object +# by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls +# around the delegated `#send`. Example: +# +# array = [] # not thread-safe on many impls +# array = SynchronizedDelegator.new([]) # thread-safe +# +# A simple `Monitor` provides a very coarse-grained way to synchronize a given +# object, in that it will cause synchronization for methods that have no need +# for it, but this is a trivial way to get thread-safety where none may exist +# currently on some implementations. +# +# This class is currently being considered for inclusion into stdlib, via +# https://bugs.ruby-lang.org/issues/8556 +class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + +end unless defined?(SynchronizedDelegator) diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb new file mode 100644 index 0000000..bad4238 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb @@ -0,0 +1,16 @@ +module ThreadSafe + module Util + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + CPU_COUNT = 16 # is there a way to determine this? + + autoload :AtomicReference, 'thread_safe/util/atomic_reference' + autoload :Adder, 'thread_safe/util/adder' + autoload :CheapLockable, 'thread_safe/util/cheap_lockable' + autoload :PowerOfTwoTuple, 'thread_safe/util/power_of_two_tuple' + autoload :Striped64, 'thread_safe/util/striped64' + autoload :Volatile, 'thread_safe/util/volatile' + autoload :VolatileTuple, 'thread_safe/util/volatile_tuple' + autoload :XorShiftRandom, 'thread_safe/util/xor_shift_random' + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb new file mode 100644 index 0000000..aefda5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb @@ -0,0 +1,62 @@ +module ThreadSafe + module Util + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb new file mode 100644 index 0000000..04800f1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb @@ -0,0 +1,44 @@ +module ThreadSafe + module Util + AtomicReference = + if defined?(Rubinius::AtomicReference) + # An overhead-less atomic reference. + Rubinius::AtomicReference + else + begin + require 'atomic' + defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic + rescue LoadError, NameError + class FullLockingAtomicReference + def initialize(value = nil) + @___mutex = Mutex.new + @___value = value + end + + def get + @___mutex.synchronize { @___value } + end + alias_method :value, :get + + def set(new_value) + @___mutex.synchronize { @___value = new_value } + end + alias_method :value=, :set + + def compare_and_set(old_value, new_value) + return false unless @___mutex.try_lock + begin + return false unless @___value.equal? old_value + @___value = new_value + ensure + @___mutex.unlock + end + true + end + end + + FullLockingAtomicReference + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..143276a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,106 @@ +module ThreadSafe + module Util + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..3e2d076 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,26 @@ +module ThreadSafe + module Util + class PowerOfTwoTuple < VolatileTuple + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb new file mode 100644 index 0000000..702fa14 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb @@ -0,0 +1,222 @@ +module ThreadSafe + module Util + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + class Striped64 + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + class Cell < AtomicReference + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + attr_reader *(Array.new(12).map {|i| :"padding_#{i}"}) + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb new file mode 100644 index 0000000..e050e21 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb @@ -0,0 +1,64 @@ +module ThreadSafe + module Util + module Volatile + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of the +AtomicReference+s. + # + # Usage: + # class Foo + # extend ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb new file mode 100644 index 0000000..500beb7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb @@ -0,0 +1,46 @@ +module ThreadSafe + module Util + # A fixed size array with volatile volatile getters/setters. + # Usage: + # arr = VolatileTuple.new(16) + # arr.volatile_set(0, :foo) + # arr.volatile_get(0) # => :foo + # arr.cas(0, :foo, :bar) # => true + # arr.volatile_get(0) # => :bar + class VolatileTuple + include Enumerable + + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array + + def initialize(size) + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = AtomicReference.new + i += 1 + end + end + + def volatile_get(i) + @tuple[i].get + end + + def volatile_set(i, value) + @tuple[i].set(value) + end + + def compare_and_set(i, old_value, new_value) + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + def size + @tuple.size + end + + def each + @tuple.each {|ref| yield ref.get} + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..89442ff --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,41 @@ +module ThreadSafe + module Util + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb new file mode 100644 index 0000000..88821f6 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb @@ -0,0 +1,21 @@ +module ThreadSafe + VERSION = "0.3.6" +end + +# NOTE: <= 0.2.0 used Threadsafe::VERSION +# @private +module Threadsafe + + # @private + def self.const_missing(name) + name = name.to_sym + if ThreadSafe.const_defined?(name) + warn "[DEPRECATION] `Threadsafe::#{name}' is deprecated, use `ThreadSafe::#{name}' instead." + ThreadSafe.const_get(name) + else + warn "[DEPRECATION] the `Threadsafe' module is deprecated, please use `ThreadSafe` instead." + super + end + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/.gitignore b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/spec_helper.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/spec_helper.rb new file mode 100644 index 0000000..e22fe20 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/spec_helper.rb @@ -0,0 +1,31 @@ +require 'simplecov' +require 'coveralls' +require 'logger' + +SimpleCov.formatter = SimpleCov::Formatter::MultiFormatter[ + SimpleCov::Formatter::HTMLFormatter, + Coveralls::SimpleCov::Formatter +] + +SimpleCov.start do + project_name 'thread_safe' + add_filter '/coverage/' + add_filter '/pkg/' + add_filter '/spec/' + add_filter '/tasks/' + add_filter '/yard-template/' +end + +$VERBOSE = nil # suppress our deprecation warnings +require 'thread_safe' + +logger = Logger.new($stderr) +logger.level = Logger::WARN + +# import all the support files +Dir[File.join(File.dirname(__FILE__), 'support/**/*.rb')].each { |f| require File.expand_path(f) } + +RSpec.configure do |config| + #config.raise_errors_for_deprecations! + config.order = 'random' +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java new file mode 100644 index 0000000..beb6c02 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java @@ -0,0 +1,21 @@ +package thread_safe; + +import java.security.Permission; +import java.util.ArrayList; +import java.util.List; + +public class SecurityManager extends java.lang.SecurityManager { + private final List deniedPermissions = + new ArrayList(); + + @Override + public void checkPermission(Permission p) { + if (deniedPermissions.contains(p)) { + throw new SecurityException("Denied!"); + } + } + + public void deny(Permission p) { + deniedPermissions.add(p); + } +} diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/.gitignore b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threads.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threads.rb new file mode 100644 index 0000000..1251fa4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threads.rb @@ -0,0 +1 @@ +THREADS = (RUBY_ENGINE == 'ruby' ? 100 : 10) diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb new file mode 100644 index 0000000..ac3c66a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb @@ -0,0 +1,75 @@ +module ThreadSafe + module Test + class Latch + def initialize(count = 1) + @count = count + @mutex = Mutex.new + @cond = ConditionVariable.new + end + + def release + @mutex.synchronize do + @count -= 1 if @count > 0 + @cond.broadcast if @count.zero? + end + end + + def await + @mutex.synchronize do + @cond.wait @mutex if @count > 0 + end + end + end + + class Barrier < Latch + def await + @mutex.synchronize do + if @count.zero? # fall through + elsif @count > 0 + @count -= 1 + @count.zero? ? @cond.broadcast : @cond.wait(@mutex) + end + end + end + end + + class HashCollisionKey + attr_reader :hash, :key + def initialize(key, hash = key.hash % 3) + @key = key + @hash = hash + end + + def eql?(other) + other.kind_of?(self.class) && @key.eql?(other.key) + end + + def even? + @key.even? + end + + def <=>(other) + @key <=> other.key + end + end + + # having 4 separate HCK classes helps for a more thorough CHMV8 testing + class HashCollisionKey2 < HashCollisionKey; end + class HashCollisionKeyNoCompare < HashCollisionKey + def <=>(other) + 0 + end + end + class HashCollisionKey4 < HashCollisionKeyNoCompare; end + + HASH_COLLISION_CLASSES = [HashCollisionKey, HashCollisionKey2, HashCollisionKeyNoCompare, HashCollisionKey4] + + def self.HashCollisionKey(key, hash = key.hash % 3) + HASH_COLLISION_CLASSES[rand(4)].new(key, hash) + end + + class HashCollisionKeyNonComparable < HashCollisionKey + undef <=> + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/.gitignore b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb new file mode 100644 index 0000000..f3752ea --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb @@ -0,0 +1,18 @@ +module ThreadSafe + describe Array do + let!(:ary) { described_class.new } + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do + ary << i + ary.each { |x| x * 2 } + ary.shift + ary.last + end + end + end.map(&:join) + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb new file mode 100644 index 0000000..f29dc54 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb @@ -0,0 +1,507 @@ +Thread.abort_on_exception = true + +module ThreadSafe + describe 'CacheTorture' do + THREAD_COUNT = 40 + KEY_COUNT = (((2**13) - 2) * 0.75).to_i # get close to the doubling cliff + LOW_KEY_COUNT = (((2**8 ) - 2) * 0.75).to_i # get close to the doubling cliff + + INITIAL_VALUE_CACHE_SETUP = lambda do |options, keys| + cache = ThreadSafe::Cache.new + initial_value = options[:initial_value] || 0 + keys.each { |key| cache[key] = initial_value } + cache + end + ZERO_VALUE_CACHE_SETUP = lambda do |options, keys| + INITIAL_VALUE_CACHE_SETUP.call(options.merge(:initial_value => 0), keys) + end + + DEFAULTS = { + key_count: KEY_COUNT, + thread_count: THREAD_COUNT, + loop_count: 1, + prelude: '', + cache_setup: lambda { |options, keys| ThreadSafe::Cache.new } + } + + LOW_KEY_COUNT_OPTIONS = {loop_count: 150, key_count: LOW_KEY_COUNT} + SINGLE_KEY_COUNT_OPTIONS = {loop_count: 100_000, key_count: 1} + + it 'concurrency' do + code = <<-RUBY_EVAL + cache[key] + cache[key] = key + cache[key] + cache.delete(key) + RUBY_EVAL + do_thread_loop(:concurrency, code) + end + + it '#put_if_absent' do + do_thread_loop( + :put_if_absent, + 'acc += 1 unless cache.put_if_absent(key, key)', + key_count: 100_000 + ) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it '#compute_put_if_absent' do + code = <<-RUBY_EVAL + if key.even? + cache.compute_if_absent(key) { acc += 1; key } + else + acc += 1 unless cache.put_if_absent(key, key) + end + RUBY_EVAL + do_thread_loop(:compute_if_absent, code) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it '#compute_if_absent_and_present' do + compute_if_absent_and_present + compute_if_absent_and_present(LOW_KEY_COUNT_OPTIONS) + compute_if_absent_and_present(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_to_zero' do + add_remove_to_zero + add_remove_to_zero(LOW_KEY_COUNT_OPTIONS) + add_remove_to_zero(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_to_zero_via_merge_pair' do + add_remove_to_zero_via_merge_pair + add_remove_to_zero_via_merge_pair(LOW_KEY_COUNT_OPTIONS) + add_remove_to_zero_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove' do + add_remove + add_remove(LOW_KEY_COUNT_OPTIONS) + add_remove(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_via_compute' do + add_remove_via_compute + add_remove_via_compute(LOW_KEY_COUNT_OPTIONS) + add_remove_via_compute(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'emove_via_compute_if_absent_present' do + add_remove_via_compute_if_absent_present + add_remove_via_compute_if_absent_present(LOW_KEY_COUNT_OPTIONS) + add_remove_via_compute_if_absent_present(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_indiscriminate' do + add_remove_indiscriminate + add_remove_indiscriminate(LOW_KEY_COUNT_OPTIONS) + add_remove_indiscriminate(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up' do + count_up + count_up(LOW_KEY_COUNT_OPTIONS) + count_up(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up_via_compute' do + count_up_via_compute + count_up_via_compute(LOW_KEY_COUNT_OPTIONS) + count_up_via_compute(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up_via_merge_pair' do + count_up_via_merge_pair + count_up_via_merge_pair(LOW_KEY_COUNT_OPTIONS) + count_up_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_race' do + prelude = 'change = (rand(2) == 1) ? 1 : -1' + code = <<-RUBY_EVAL + v = cache[key] + acc += change if cache.replace_pair(key, v, v + change) + RUBY_EVAL + do_thread_loop( + :count_race, + code, + loop_count: 5, + prelude: prelude, + cache_setup: ZERO_VALUE_CACHE_SETUP + ) do |result, cache, options, keys| + result_sum = sum(result) + expect(sum(keys.map { |key| cache[key] })).to eq result_sum + expect(sum(cache.values)).to eq result_sum + expect(options[:key_count]).to eq cache.size + end + end + + it 'get_and_set_new' do + code = 'acc += 1 unless cache.get_and_set(key, key)' + do_thread_loop(:get_and_set_new, code) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it 'get_and_set_existing' do + code = 'acc += 1 if cache.get_and_set(key, key) == -1' + do_thread_loop( + :get_and_set_existing, + code, + cache_setup: INITIAL_VALUE_CACHE_SETUP, + initial_value: -1 + ) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + private + + def compute_if_absent_and_present(opts = {}) + prelude = 'on_present = rand(2) == 1' + code = <<-RUBY_EVAL + if on_present + cache.compute_if_present(key) { |old_value| acc += 1; old_value + 1 } + else + cache.compute_if_absent(key) { acc += 1; 1 } + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + stored_sum = 0 + stored_key_count = 0 + keys.each do |k| + if value = cache[k] + stored_sum += value + stored_key_count += 1 + end + end + expect(stored_sum).to eq sum(result) + expect(stored_key_count).to eq cache.size + end + end + + def add_remove(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + acc += 1 unless cache.put_if_absent(key, key) + else + acc -= 1 if cache.delete_pair(key, key) + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_via_compute(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + cache.compute(key) do |old_value| + if do_add + acc += 1 unless old_value + key + else + acc -= 1 if old_value + nil + end + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_via_compute_if_absent_present(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + cache.compute_if_absent(key) { acc += 1; key } + else + cache.compute_if_present(key) { acc -= 1; nil } + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_indiscriminate(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + acc += 1 unless cache.put_if_absent(key, key) + else + acc -= 1 if cache.delete(key) + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def count_up(opts = {}) + code = <<-RUBY_EVAL + v = cache[key] + acc += 1 if cache.replace_pair(key, v, v + 1) + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, cache_setup: ZERO_VALUE_CACHE_SETUP}.merge(opts) + ) do |result, cache, options, keys| + expect_count_up(result, cache, options, keys) + end + end + + def count_up_via_compute(opts = {}) + code = <<-RUBY_EVAL + cache.compute(key) do |old_value| + acc += 1 + old_value ? old_value + 1 : 1 + end + RUBY_EVAL + do_thread_loop( + __method__, + code, {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_count_up(result, cache, options, keys) + result.inject(nil) do |previous_value, next_value| # since compute guarantees atomicity all count ups should be equal + expect(previous_value).to eq next_value if previous_value + next_value + end + end + end + + def count_up_via_merge_pair(opts = {}) + code = <<-RUBY_EVAL + cache.merge_pair(key, 1) { |old_value| old_value + 1 } + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + all_match = true + expected_value = options[:loop_count] * options[:thread_count] + keys.each do |key| + value = cache[key] + if expected_value != value + all_match = false + break + end + end + expect(all_match).to be_truthy + end + end + + def add_remove_to_zero(opts = {}) + code = <<-RUBY_EVAL + acc += 1 unless cache.put_if_absent(key, key) + acc -= 1 if cache.delete_pair(key, key) + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_to_zero_via_merge_pair(opts = {}) + code = <<-RUBY_EVAL + acc += (cache.merge_pair(key, key) {}) ? 1 : -1 + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def do_thread_loop(name, code, options = {}, &block) + options = DEFAULTS.merge(options) + meth = define_loop(name, code, options[:prelude]) + keys = to_keys_array(options[:key_count]) + run_thread_loop(meth, keys, options, &block) + + if options[:key_count] > 1 + options[:key_count] = (options[:key_count] / 40).to_i + keys = to_hash_collision_keys_array(options[:key_count]) + run_thread_loop( + meth, + keys, + options.merge(loop_count: options[:loop_count] * 5), + &block + ) + end + end + + def run_thread_loop(meth, keys, options, &block) + cache = options[:cache_setup].call(options, keys) + barrier = ThreadSafe::Test::Barrier.new(options[:thread_count]) + result = (1..options[:thread_count]).map do + Thread.new do + setup_sync_and_start_loop( + meth, + cache, + keys, + barrier, + options[:loop_count] + ) + end + end.map(&:value) + block.call(result, cache, options, keys) if block_given? + end + + def setup_sync_and_start_loop(meth, cache, keys, barrier, loop_count) + my_keys = keys.shuffle + barrier.await + if my_keys.size == 1 + key = my_keys.first + send("#{meth}_single_key", cache, key, loop_count) + else + send("#{meth}_multiple_keys", cache, my_keys, loop_count) + end + end + + def define_loop(name, body, prelude) + inner_meth_name = :"_#{name}_loop_inner" + outer_meth_name = :"_#{name}_loop_outer" + # looping is splitted into the "loop methods" to trigger the JIT + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{inner_meth_name}_multiple_keys(cache, keys, i, length, acc) + #{prelude} + target = i + length + while i < target + key = keys[i] + #{body} + i += 1 + end + acc + end unless method_defined?(:#{inner_meth_name}_multiple_keys) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{inner_meth_name}_single_key(cache, key, i, length, acc) + #{prelude} + target = i + length + while i < target + #{body} + i += 1 + end + acc + end unless method_defined?(:#{inner_meth_name}_single_key) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{outer_meth_name}_multiple_keys(cache, keys, loop_count) + total_length = keys.size + acc = 0 + inc = 100 + loop_count.times do + i = 0 + pre_loop_inc = total_length % inc + acc = #{inner_meth_name}_multiple_keys(cache, keys, i, pre_loop_inc, acc) + i += pre_loop_inc + while i < total_length + acc = #{inner_meth_name}_multiple_keys(cache, keys, i, inc, acc) + i += inc + end + end + acc + end unless method_defined?(:#{outer_meth_name}_multiple_keys) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{outer_meth_name}_single_key(cache, key, loop_count) + acc = 0 + i = 0 + inc = 100 + + pre_loop_inc = loop_count % inc + acc = #{inner_meth_name}_single_key(cache, key, i, pre_loop_inc, acc) + i += pre_loop_inc + + while i < loop_count + acc = #{inner_meth_name}_single_key(cache, key, i, inc, acc) + i += inc + end + acc + end unless method_defined?(:#{outer_meth_name}_single_key) + RUBY_EVAL + outer_meth_name + end + + def to_keys_array(key_count) + arr = [] + key_count.times {|i| arr << i} + arr + end + + def to_hash_collision_keys_array(key_count) + to_keys_array(key_count).map { |key| ThreadSafe::Test::HashCollisionKey(key) } + end + + def sum(result) + result.inject(0) { |acc, i| acc + i } + end + + def expect_standard_accumulator_test_result(result, cache, options, keys) + expect_all_key_mappings_exist(cache, keys) + expect(options[:key_count]).to eq sum(result) + expect(options[:key_count]).to eq cache.size + end + + def expect_all_key_mappings_exist(cache, keys, all_must_exist = true) + keys.each do |key| + value = cache[key] + if value || all_must_exist + expect(key).to eq value unless key == value # don't do a bazzilion assertions unless necessary + end + end + end + + def expect_count_up(result, cache, options, keys) + keys.each do |key| + value = cache[key] + expect(value).to be_truthy unless value + end + expect(sum(cache.values)).to eq sum(result) + expect(options[:key_count]).to eq cache.size + end + end unless RUBY_ENGINE == 'rbx' || ENV['TRAVIS'] +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb new file mode 100644 index 0000000..ff02857 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb @@ -0,0 +1,943 @@ +Thread.abort_on_exception = true + +module ThreadSafe + describe Cache do + before(:each) do + @cache = described_class.new + end + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do |j| + key = i * 1000 + j + @cache[key] = i + @cache[key] + @cache.delete(key) + end + end + end.map(&:join) + end + + it 'retrieval' do + expect_size_change(1) do + expect(nil).to eq @cache[:a] + expect(nil).to eq @cache.get(:a) + @cache[:a] = 1 + expect(1).to eq @cache[:a] + expect(1).to eq @cache.get(:a) + end + end + + it '#put_if_absent' do + with_or_without_default_proc do + expect_size_change(1) do + expect(nil).to eq @cache.put_if_absent(:a, 1) + expect(1).to eq @cache.put_if_absent(:a, 1) + expect(1).to eq @cache.put_if_absent(:a, 2) + expect(1).to eq @cache[:a] + end + end + end + + describe '#compute_if_absent' do + it 'common' do + with_or_without_default_proc do + expect_size_change(3) do + expect(1).to eq @cache.compute_if_absent(:a) { 1 } + expect(1).to eq @cache.compute_if_absent(:a) { 2 } + expect(1).to eq @cache[:a] + + @cache[:b] = nil + expect(nil).to eq @cache.compute_if_absent(:b) { 1 } + expect(nil).to eq @cache.compute_if_absent(:c) {} + expect(nil).to eq @cache[:c] + expect(true).to eq @cache.key?(:c) + end + end + end + + it 'with return' do + with_or_without_default_proc do + expect_handles_return_lambda(:compute_if_absent, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + expect_handles_exception(:compute_if_absent, :a) + end + end + + it 'atomicity' do + late_compute_threads_count = 10 + late_put_if_absent_threads_count = 10 + getter_threads_count = 5 + compute_started = ThreadSafe::Test::Latch.new(1) + compute_proceed = ThreadSafe::Test::Latch.new( + late_compute_threads_count + + late_put_if_absent_threads_count + + getter_threads_count + ) + block_until_compute_started = lambda do |name| + # what does it mean? + if (v = @cache[:a]) != nil + expect(nil).to v + end + compute_proceed.release + compute_started.await + end + + expect_size_change 1 do + late_compute_threads = Array.new(late_compute_threads_count) do + Thread.new do + block_until_compute_started.call('compute_if_absent') + expect(1).to eq @cache.compute_if_absent(:a) { fail } + end + end + + late_put_if_absent_threads = Array.new(late_put_if_absent_threads_count) do + Thread.new do + block_until_compute_started.call('put_if_absent') + expect(1).to eq @cache.put_if_absent(:a, 2) + end + end + + getter_threads = Array.new(getter_threads_count) do + Thread.new do + block_until_compute_started.call('getter') + Thread.pass while @cache[:a].nil? + expect(1).to eq @cache[:a] + end + end + + Thread.new do + @cache.compute_if_absent(:a) do + compute_started.release + compute_proceed.await + sleep(0.2) + 1 + end + end.join + (late_compute_threads + + late_put_if_absent_threads + + getter_threads).each(&:join) + end + end + end + + describe '#compute_if_present' do + it 'common' do + with_or_without_default_proc do + expect_no_size_change do + expect(nil).to eq @cache.compute_if_present(:a) {} + expect(nil).to eq @cache.compute_if_present(:a) { 1 } + expect(nil).to eq @cache.compute_if_present(:a) { fail } + expect(false).to eq @cache.key?(:a) + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.compute_if_present(:a) { 1 } + expect(1).to eq @cache[:a] + expect(2).to eq @cache.compute_if_present(:a) { 2 } + expect(2).to eq @cache[:a] + expect(false).to eq @cache.compute_if_present(:a) { false } + expect(false).to eq @cache[:a] + + @cache[:a] = 1 + yielded = false + @cache.compute_if_present(:a) do |old_value| + yielded = true + expect(1).to eq old_value + 2 + end + expect(true).to eq yielded + end + + expect_size_change(-1) do + expect(nil).to eq @cache.compute_if_present(:a) {} + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache.compute_if_present(:a) { 1 } + expect(false).to eq @cache.key?(:a) + end + end + end + + it 'with return' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_return_lambda(:compute_if_present, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_exception(:compute_if_present, :a) + end + end + end + + describe '#compute' do + it 'common' do + with_or_without_default_proc do + expect_no_size_change do + expect_compute(:a, nil, nil) {} + end + + expect_size_change(1) do + expect_compute(:a, nil, 1) { 1 } + expect_compute(:a, 1, 2) { 2 } + expect_compute(:a, 2, false) { false } + expect(false).to eq @cache[:a] + end + + expect_size_change(-1) do + expect_compute(:a, false, nil) {} + end + end + end + + it 'with return' do + with_or_without_default_proc do + expect_handles_return_lambda(:compute, :a) + @cache[:a] = 1 + expect_handles_return_lambda(:compute, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + expect_handles_exception(:compute, :a) + @cache[:a] = 2 + expect_handles_exception(:compute, :a) + end + end + end + + describe '#merge_pair' do + it 'common' do + with_or_without_default_proc do + expect_size_change(1) do + expect(nil).to eq @cache.merge_pair(:a, nil) { fail } + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache[:a] + end + + expect_no_size_change do + expect_merge_pair(:a, nil, nil, false) { false } + expect_merge_pair(:a, nil, false, 1) { 1 } + expect_merge_pair(:a, nil, 1, 2) { 2 } + end + + expect_size_change(-1) do + expect_merge_pair(:a, nil, 2, nil) {} + expect(false).to eq @cache.key?(:a) + end + end + end + + it 'with return' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_return_lambda(:merge_pair, :a, 2) + end + end + + it 'exception' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_exception(:merge_pair, :a, 2) + end + end + end + + it 'updates dont block reads' do + getters_count = 20 + key_klass = ThreadSafe::Test::HashCollisionKey + keys = [key_klass.new(1, 100), + key_klass.new(2, 100), + key_klass.new(3, 100)] # hash colliding keys + inserted_keys = [] + + keys.each do |key, i| + compute_started = ThreadSafe::Test::Latch.new(1) + compute_finished = ThreadSafe::Test::Latch.new(1) + getters_started = ThreadSafe::Test::Latch.new(getters_count) + getters_finished = ThreadSafe::Test::Latch.new(getters_count) + + computer_thread = Thread.new do + getters_started.await + @cache.compute_if_absent(key) do + compute_started.release + getters_finished.await + 1 + end + compute_finished.release + end + + getter_threads = (1..getters_count).map do + Thread.new do + getters_started.release + inserted_keys.each do |inserted_key| + expect(true).to eq @cache.key?(inserted_key) + expect(1).to eq @cache[inserted_key] + end + expect(false).to eq @cache.key?(key) + compute_started.await + inserted_keys.each do |inserted_key| + expect(true).to eq @cache.key?(inserted_key) + expect(1).to eq @cache[inserted_key] + end + expect(false).to eq @cache.key?(key) + expect(nil).to eq @cache[key] + getters_finished.release + compute_finished.await + expect(true).to eq @cache.key?(key) + expect(1).to eq @cache[key] + end + end + + (getter_threads << computer_thread).map do |t| + expect(t.join(2)).to be_truthy + end # asserting no deadlocks + inserted_keys << key + end + end + + specify 'collision resistance' do + expect_collision_resistance( + (0..1000).map { |i| ThreadSafe::Test::HashCollisionKey(i, 1) } + ) + end + + specify 'collision resistance with arrays' do + special_array_class = Class.new(Array) do + def key # assert_collision_resistance expects to be able to call .key to get the "real" key + first.key + end + end + # Test collision resistance with a keys that say they responds_to <=>, but then raise exceptions + # when actually called (ie: an Array filled with non-comparable keys). + # See https://github.com/headius/thread_safe/issues/19 for more info. + expect_collision_resistance( + (0..100).map do |i| + special_array_class.new( + [ThreadSafe::Test::HashCollisionKeyNonComparable.new(i, 1)] + ) + end + ) + end + + it '#replace_pair' do + with_or_without_default_proc do + expect_no_size_change do + expect(false).to eq @cache.replace_pair(:a, 1, 2) + expect(false).to eq @cache.replace_pair(:a, nil, nil) + expect(false).to eq @cache.key?(:a) + end + end + + @cache[:a] = 1 + expect_no_size_change do + expect(true).to eq @cache.replace_pair(:a, 1, 2) + expect(false).to eq @cache.replace_pair(:a, 1, 2) + expect(2).to eq @cache[:a] + expect(true).to eq @cache.replace_pair(:a, 2, 2) + expect(2).to eq @cache[:a] + expect(true).to eq @cache.replace_pair(:a, 2, nil) + expect(false).to eq @cache.replace_pair(:a, 2, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(true).to eq @cache.replace_pair(:a, nil, nil) + expect(true).to eq @cache.key?(:a) + expect(true).to eq @cache.replace_pair(:a, nil, 1) + expect(1).to eq @cache[:a] + end + end + + it '#replace_if_exists' do + with_or_without_default_proc do + expect_no_size_change do + expect(nil).to eq @cache.replace_if_exists(:a, 1) + expect(false).to eq @cache.key?(:a) + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.replace_if_exists(:a, 2) + expect(2).to eq @cache[:a] + expect(2).to eq @cache.replace_if_exists(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.replace_if_exists(:a, 1) + expect(1).to eq @cache[:a] + end + end + end + + it '#get_and_set' do + with_or_without_default_proc do + expect(nil).to eq @cache.get_and_set(:a, 1) + expect(true).to eq @cache.key?(:a) + expect(1).to eq @cache[:a] + expect(1).to eq @cache.get_and_set(:a, 2) + expect(2).to eq @cache.get_and_set(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.get_and_set(:a, 1) + expect(1).to eq @cache[:a] + end + end + + it '#key' do + with_or_without_default_proc do + expect(nil).to eq @cache.key(1) + @cache[:a] = 1 + expect(:a).to eq @cache.key(1) + expect(nil).to eq @cache.key(0) + end + end + + it '#key?' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + @cache[:a] = 1 + expect(true).to eq @cache.key?(:a) + end + end + + it '#value?' do + with_or_without_default_proc do + expect(false).to eq @cache.value?(1) + @cache[:a] = 1 + expect(true).to eq @cache.value?(1) + end + end + + it '#delete' do + with_or_without_default_proc do |default_proc_set| + expect_no_size_change do + expect(nil).to eq @cache.delete(:a) + end + @cache[:a] = 1 + expect_size_change -1 do + expect(1).to eq @cache.delete(:a) + end + expect_no_size_change do + expect(nil).to eq @cache[:a] unless default_proc_set + + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache.delete(:a) + end + end + end + + it '#delete_pair' do + with_or_without_default_proc do + expect_no_size_change do + expect(false).to eq @cache.delete_pair(:a, 2) + expect(false).to eq @cache.delete_pair(:a, nil) + end + @cache[:a] = 1 + expect_no_size_change do + expect(false).to eq @cache.delete_pair(:a, 2) + end + expect_size_change(-1) do + expect(1).to eq @cache[:a] + expect(true).to eq @cache.delete_pair(:a, 1) + expect(false).to eq @cache.delete_pair(:a, 1) + expect(false).to eq @cache.key?(:a) + end + end + end + + specify 'default proc' do + @cache = cache_with_default_proc(1) + expect_no_size_change do + expect(false).to eq @cache.key?(:a) + end + expect_size_change(1) do + expect(1).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + end + + specify 'falsy default proc' do + @cache = cache_with_default_proc(nil) + expect_no_size_change do + expect(false).to eq @cache.key?(:a) + end + expect_size_change(1) do + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + end + + describe '#fetch' do + it 'common' do + with_or_without_default_proc do |default_proc_set| + expect_no_size_change do + expect(1).to eq @cache.fetch(:a, 1) + expect(1).to eq @cache.fetch(:a) { 1 } + expect(false).to eq @cache.key?(:a) + + expect(nil).to eq @cache[:a] unless default_proc_set + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.fetch(:a) { fail } + end + + expect { @cache.fetch(:b) }.to raise_error(KeyError) + + expect_no_size_change do + expect(1).to eq @cache.fetch(:b, :c) {1} # assert block supersedes default value argument + expect(false).to eq @cache.key?(:b) + end + end + end + + it 'falsy' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + + expect_no_size_change do + expect(nil).to eq @cache.fetch(:a, nil) + expect(false).to eq @cache.fetch(:a, false) + expect(nil).to eq @cache.fetch(:a) {} + expect(false).to eq @cache.fetch(:a) { false } + end + + @cache[:a] = nil + expect_no_size_change do + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.fetch(:a) { fail } + end + end + end + + it 'with return' do + with_or_without_default_proc do + r = fetch_with_return + # r = lambda do + # @cache.fetch(:a) { return 10 } + # end.call + + expect_no_size_change do + expect(10).to eq r + expect(false).to eq @cache.key?(:a) + end + end + end + end + + describe '#fetch_or_store' do + it 'common' do + with_or_without_default_proc do |default_proc_set| + expect_size_change(1) do + expect(1).to eq @cache.fetch_or_store(:a, 1) + expect(1).to eq @cache[:a] + end + + @cache.delete(:a) + + expect_size_change 1 do + expect(1).to eq @cache.fetch_or_store(:a) { 1 } + expect(1).to eq @cache[:a] + end + + expect_no_size_change do + expect(1).to eq @cache.fetch_or_store(:a) { fail } + end + + expect { @cache.fetch_or_store(:b) }. + to raise_error(KeyError) + + expect_size_change(1) do + expect(1).to eq @cache.fetch_or_store(:b, :c) { 1 } # assert block supersedes default value argument + expect(1).to eq @cache[:b] + end + end + end + + it 'falsy' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + + expect_size_change(1) do + expect(nil).to eq @cache.fetch_or_store(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(false).to eq @cache.fetch_or_store(:a, false) + expect(false).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(nil).to eq @cache.fetch_or_store(:a) {} + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(false).to eq @cache.fetch_or_store(:a) { false } + expect(false).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + + @cache[:a] = nil + expect_no_size_change do + expect(nil).to eq @cache.fetch_or_store(:a) { fail } + end + end + end + + it 'with return' do + with_or_without_default_proc do + r = fetch_or_store_with_return + + expect_no_size_change do + expect(10).to eq r + expect(false).to eq @cache.key?(:a) + end + end + end + end + + it '#clear' do + @cache[:a] = 1 + expect_size_change(-1) do + expect(@cache).to eq @cache.clear + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache[:a] + end + end + + describe '#each_pair' do + it 'common' do + @cache.each_pair { |k, v| fail } + expect(@cache).to eq @cache.each_pair {} + @cache[:a] = 1 + + h = {} + @cache.each_pair { |k, v| h[k] = v } + expect({:a => 1}).to eq h + + @cache[:b] = 2 + h = {} + @cache.each_pair { |k, v| h[k] = v } + expect({:a => 1, :b => 2}).to eq h + end + + it 'pair iterator' do + @cache[:a] = 1 + @cache[:b] = 2 + i = 0 + r = @cache.each_pair do |k, v| + if i == 0 + i += 1 + next + fail + elsif i == 1 + break :breaked + end + end + + expect(:breaked).to eq r + end + + it 'allows modification' do + @cache[:a] = 1 + @cache[:b] = 1 + @cache[:c] = 1 + + expect_size_change(1) do + @cache.each_pair do |k, v| + @cache[:z] = 1 + end + end + end + end + + it '#keys' do + expect([]).to eq @cache.keys + + @cache[1] = 1 + expect([1]).to eq @cache.keys + + @cache[2] = 2 + expect([1, 2]).to eq @cache.keys.sort + end + + it '#values' do + expect([]).to eq @cache.values + + @cache[1] = 1 + expect([1]).to eq @cache.values + + @cache[2] = 2 + expect([1, 2]).to eq @cache.values.sort + end + + it '#each_key' do + expect(@cache).to eq @cache.each_key { fail } + + @cache[1] = 1 + arr = [] + @cache.each_key { |k| arr << k } + expect([1]).to eq arr + + @cache[2] = 2 + arr = [] + @cache.each_key { |k| arr << k } + expect([1, 2]).to eq arr.sort + end + + it '#each_value' do + expect(@cache).to eq @cache.each_value { fail } + + @cache[1] = 1 + arr = [] + @cache.each_value { |k| arr << k } + expect([1]).to eq arr + + @cache[2] = 2 + arr = [] + @cache.each_value { |k| arr << k } + expect([1, 2]).to eq arr.sort + end + + it '#empty' do + expect(true).to eq @cache.empty? + @cache[:a] = 1 + expect(false).to eq @cache.empty? + end + + it 'options validation' do + expect_valid_options(nil) + expect_valid_options({}) + expect_valid_options(foo: :bar) + end + + it 'initial capacity options validation' do + expect_valid_option(:initial_capacity, nil) + expect_valid_option(:initial_capacity, 1) + expect_invalid_option(:initial_capacity, '') + expect_invalid_option(:initial_capacity, 1.0) + expect_invalid_option(:initial_capacity, -1) + end + + it 'load factor options validation' do + expect_valid_option(:load_factor, nil) + expect_valid_option(:load_factor, 0.01) + expect_valid_option(:load_factor, 0.75) + expect_valid_option(:load_factor, 1) + expect_invalid_option(:load_factor, '') + expect_invalid_option(:load_factor, 0) + expect_invalid_option(:load_factor, 1.1) + expect_invalid_option(:load_factor, 2) + expect_invalid_option(:load_factor, -1) + end + + it '#size' do + expect(0).to eq @cache.size + @cache[:a] = 1 + expect(1).to eq @cache.size + @cache[:b] = 1 + expect(2).to eq @cache.size + @cache.delete(:a) + expect(1).to eq @cache.size + @cache.delete(:b) + expect(0).to eq @cache.size + end + + it '#get_or_default' do + with_or_without_default_proc do + expect(1).to eq @cache.get_or_default(:a, 1) + expect(nil).to eq @cache.get_or_default(:a, nil) + expect(false).to eq @cache.get_or_default(:a, false) + expect(false).to eq @cache.key?(:a) + + @cache[:a] = 1 + expect(1).to eq @cache.get_or_default(:a, 2) + end + end + + it '#dup,#clone' do + [:dup, :clone].each do |meth| + cache = cache_with_default_proc(:default_value) + cache[:a] = 1 + dupped = cache.send(meth) + expect(1).to eq dupped[:a] + expect(1).to eq dupped.size + expect_size_change(1, cache) do + expect_no_size_change(dupped) do + cache[:b] = 1 + end + end + expect(false).to eq dupped.key?(:b) + expect_no_size_change(cache) do + expect_size_change(-1, dupped) do + dupped.delete(:a) + end + end + expect(false).to eq dupped.key?(:a) + expect(true).to eq cache.key?(:a) + # test default proc + expect_size_change(1, cache) do + expect_no_size_change dupped do + expect(:default_value).to eq cache[:c] + expect(false).to eq dupped.key?(:c) + end + end + expect_no_size_change cache do + expect_size_change 1, dupped do + expect(:default_value).to eq dupped[:d] + expect(false).to eq cache.key?(:d) + end + end + end + end + + it 'is unfreezable' do + expect { @cache.freeze }.to raise_error(NoMethodError) + end + + it 'marshal dump load' do + new_cache = Marshal.load(Marshal.dump(@cache)) + expect(new_cache).to be_an_instance_of ThreadSafe::Cache + expect(0).to eq new_cache.size + @cache[:a] = 1 + new_cache = Marshal.load(Marshal.dump(@cache)) + expect(1).to eq @cache[:a] + expect(1).to eq new_cache.size + end + + it 'marshal dump doesnt work with default proc' do + expect { Marshal.dump(ThreadSafe::Cache.new {}) }.to raise_error(TypeError) + end + + private + + def with_or_without_default_proc(&block) + block.call(false) + @cache = ThreadSafe::Cache.new { |h, k| h[k] = :default_value } + block.call(true) + end + + def cache_with_default_proc(default_value = 1) + ThreadSafe::Cache.new { |cache, k| cache[k] = default_value } + end + + def expect_size_change(change, cache = @cache, &block) + start = cache.size + block.call + expect(change).to eq cache.size - start + end + + def expect_valid_option(option_name, value) + expect_valid_options(option_name => value) + end + + def expect_valid_options(options) + c = ThreadSafe::Cache.new(options) + expect(c).to be_an_instance_of ThreadSafe::Cache + end + + def expect_invalid_option(option_name, value) + expect_invalid_options(option_name => value) + end + + def expect_invalid_options(options) + expect { ThreadSafe::Cache.new(options) }.to raise_error(ArgumentError) + end + + def expect_no_size_change(cache = @cache, &block) + expect_size_change(0, cache, &block) + end + + def expect_handles_return_lambda(method, key, *args) + before_had_key = @cache.key?(key) + before_had_value = before_had_key ? @cache[key] : nil + + returning_lambda = lambda do + @cache.send(method, key, *args) { return :direct_return } + end + + expect_no_size_change do + expect(:direct_return).to eq returning_lambda.call + expect(before_had_key).to eq @cache.key?(key) + expect(before_had_value).to eq @cache[key] if before_had_value + end + end + + class TestException < Exception; end + def expect_handles_exception(method, key, *args) + before_had_key = @cache.key?(key) + before_had_value = before_had_key ? @cache[key] : nil + + expect_no_size_change do + expect { @cache.send(method, key, *args) { raise TestException, '' } }. + to raise_error(TestException) + + expect(before_had_key).to eq @cache.key?(key) + expect(before_had_value).to eq @cache[key] if before_had_value + end + end + + def expect_compute(key, expected_old_value, expected_result, &block) + result = @cache.compute(:a) do |old_value| + expect(expected_old_value).to eq old_value + block.call + end + expect(expected_result).to eq result + end + + def expect_merge_pair(key, value, expected_old_value, expected_result, &block) + result = @cache.merge_pair(key, value) do |old_value| + expect(expected_old_value).to eq old_value + block.call + end + expect(expected_result).to eq result + end + + def expect_collision_resistance(keys) + keys.each { |k| @cache[k] = k.key } + 10.times do |i| + size = keys.size + while i < size + k = keys[i] + expect(k.key == @cache.delete(k) && + !@cache.key?(k) && + (@cache[k] = k.key; @cache[k] == k.key)).to be_truthy + i += 10 + end + end + expect(keys.all? { |k| @cache[k] == k.key }).to be_truthy + end + + # Took out for compatibility with Rubinius, see https://github.com/rubinius/rubinius/issues/1312 + def fetch_with_return + lambda do + @cache.fetch(:a) { return 10 } + end.call + end + + # Took out for compatibility with Rubinius, see https://github.com/rubinius/rubinius/issues/1312 + def fetch_or_store_with_return + lambda do + @cache.fetch_or_store(:a) { return 10 } + end.call + end + + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb new file mode 100644 index 0000000..1403b63 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb @@ -0,0 +1,17 @@ +module ThreadSafe + describe Hash do + let!(:hsh) { described_class.new } + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do |j| + hsh[i * 1000 + j] = i + hsh[i * 1000 + j] + hsh.delete(i * 1000 + j) + end + end + end.map(&:join) + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb new file mode 100644 index 0000000..3bd03e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb @@ -0,0 +1,27 @@ +if defined?(JRUBY_VERSION) && ENV['TEST_NO_UNSAFE'] + # to be used like this: rake test TEST_NO_UNSAFE=true + load 'test/package.jar' + java_import 'thread_safe.SecurityManager' + manager = SecurityManager.new + + # Prevent accessing internal classes + manager.deny(java.lang.RuntimePermission.new('accessClassInPackage.sun.misc')) + java.lang.System.setSecurityManager(manager) + + module ThreadSafe + describe 'no_unsafe' do + it 'security_manager_is_used' do + begin + java_import 'sun.misc.Unsafe' + fail + rescue SecurityError + end + end + + it 'no_unsafe_version_of_chmv8_is_used' do + require 'thread_safe/jruby_cache_backend' # make sure the jar has been loaded + expect(!Java::OrgJrubyExtThread_safe::JRubyCacheBackendLibrary::JRubyCacheBackend::CAN_USE_UNSAFE_CHM).to be_truthy + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb new file mode 100644 index 0000000..5302e2b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb @@ -0,0 +1,85 @@ +require 'thread_safe/synchronized_delegator.rb' + +module ThreadSafe + describe SynchronizedDelegator do + it 'wraps array' do + array = ::Array.new + sync_array = described_class.new(array) + + array << 1 + expect(1).to eq sync_array[0] + + sync_array << 2 + expect(2).to eq array[1] + end + + it 'synchronizes access' do + t1_continue, t2_continue = false, false + + hash = ::Hash.new do |hash, key| + t2_continue = true + unless hash.find { |e| e[1] == key.to_s } # just to do something + hash[key] = key.to_s + Thread.pass until t1_continue + end + end + sync_hash = described_class.new(hash) + sync_hash[1] = 'egy' + + t1 = Thread.new do + sync_hash[2] = 'dva' + sync_hash[3] # triggers t2_continue + end + + t2 = Thread.new do + Thread.pass until t2_continue + sync_hash[4] = '42' + end + + sleep(0.05) # sleep some to allow threads to boot + + until t2.status == 'sleep' do + Thread.pass + end + + expect(3).to eq hash.keys.size + + t1_continue = true + t1.join; t2.join + + expect(4).to eq sync_hash.size + end + + it 'synchronizes access with block' do + t1_continue, t2_continue = false, false + + array = ::Array.new + sync_array = described_class.new(array) + + t1 = Thread.new do + sync_array << 1 + sync_array.each do + t2_continue = true + Thread.pass until t1_continue + end + end + + t2 = Thread.new do + # sleep(0.01) + Thread.pass until t2_continue + sync_array << 2 + end + + until t2.status == 'sleep' || t2.status == false + Thread.pass + end + + expect(1).to eq array.size + + t1_continue = true + t1.join; t2.join + + expect([1, 2]).to eq array + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/tasks/update_doc.rake b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/tasks/update_doc.rake new file mode 100644 index 0000000..f2cab2f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/tasks/update_doc.rake @@ -0,0 +1,45 @@ +require 'yard' +YARD::Rake::YardocTask.new + +root = File.expand_path File.join(File.dirname(__FILE__), '..') + +namespace :yard do + + cmd = lambda do |command| + puts ">> executing: #{command}" + system command or raise "#{command} failed" + end + + desc 'Pushes generated documentation to github pages: http://ruby-concurrency.github.io/thread_safe/' + task :push => [:setup, :yard] do + + message = Dir.chdir(root) do + `git log -n 1 --oneline`.strip + end + puts "Generating commit: #{message}" + + Dir.chdir "#{root}/yardoc" do + cmd.call "git add -A" + cmd.call "git commit -m '#{message}'" + cmd.call 'git push origin gh-pages' + end + + end + + desc 'Setups second clone in ./yardoc dir for pushing doc to github' + task :setup do + + unless File.exist? "#{root}/yardoc/.git" + cmd.call "rm -rf #{root}/yardoc" if File.exist?("#{root}/yardoc") + Dir.chdir "#{root}" do + cmd.call 'git clone --single-branch --branch gh-pages git@github.com:ruby-concurrency/thread_safe.git ./yardoc' + end + end + Dir.chdir "#{root}/yardoc" do + cmd.call 'git fetch origin' + cmd.call 'git reset --hard origin/gh-pages' + end + + end + +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/thread_safe.gemspec b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/thread_safe.gemspec new file mode 100644 index 0000000..cf3faeb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/thread_safe.gemspec @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +$:.push File.expand_path('../lib', __FILE__) unless $:.include?('lib') +require 'thread_safe/version' + +Gem::Specification.new do |gem| + gem.authors = ["Charles Oliver Nutter", "thedarkone"] + gem.email = ["headius@headius.com", "thedarkone2@gmail.com"] + gem.summary = %q{Thread-safe collections and utilities for Ruby} + gem.description = %q{A collection of data structures and utilities to make thread-safe programming in Ruby easier} + gem.homepage = "https://github.com/ruby-concurrency/thread_safe" + + gem.files = `git ls-files`.split($\) + gem.files += ['lib/thread_safe/jruby_cache_backend.jar'] if defined?(JRUBY_VERSION) + gem.files -= ['.gitignore'] # see https://github.com/headius/thread_safe/issues/40#issuecomment-42315441 + gem.platform = 'java' if defined?(JRUBY_VERSION) + gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } + gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) + gem.name = "thread_safe" + gem.require_paths = ["lib"] + gem.version = ThreadSafe::VERSION + gem.license = "Apache-2.0" + + gem.add_development_dependency 'atomic', '= 1.1.16' + gem.add_development_dependency 'rake', '< 12.0' + gem.add_development_dependency 'rspec', '~> 3.2' +end diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css new file mode 100644 index 0000000..dfd9d85 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css @@ -0,0 +1,125 @@ +/* Override this file with custom rules */ + +body { + line-height: 18px; +} + +.docstring code, .docstring .object_link a, #filecontents code { + padding: 0px 3px 1px 3px; + border: 1px solid #eef; + background: #f5f5ff; +} + +#filecontents pre code, .docstring pre code { + border: none; + background: none; + padding: 0; +} + +#filecontents pre.code, .docstring pre.code, .tags pre.example, .docstring code, .docstring .object_link a, +#filecontents code { + -moz-border-radius: 2px; + -webkit-border-radius: 2px; +} + +/* syntax highlighting */ +.source_code { + display: none; + padding: 3px 8px; + border-left: 8px solid #ddd; + margin-top: 5px; +} + +#filecontents pre.code, .docstring pre.code, .source_code pre { + font-family: monospace; +} + +#filecontents pre.code, .docstring pre.code { + display: block; +} + +.source_code .lines { + padding-right: 12px; + color: #555; + text-align: right; +} + +#filecontents pre.code, .docstring pre.code, +.tags pre.example { + padding: 5px 12px; + margin-top: 4px; + border: 1px solid #eef; + background: #f5f5ff; +} + +pre.code { + color: #000; +} + +pre.code .info.file { + color: #555; +} + +pre.code .val { + color: #036A07; +} + +pre.code .tstring_content, +pre.code .heredoc_beg, pre.code .heredoc_end, +pre.code .qwords_beg, pre.code .qwords_end, +pre.code .tstring, pre.code .dstring { + color: #036A07; +} + +pre.code .fid, +pre.code .rubyid_new, +pre.code .rubyid_to_s, +pre.code .rubyid_to_sym, +pre.code .rubyid_to_f, +pre.code .rubyid_to_i, +pre.code .rubyid_each { + color: inherit; +} + +pre.code .comment { + color: #777; + font-style: italic; +} + +pre.code .const, pre.code .constant { + color: inherit; + font-weight: bold; + font-style: italic; +} + +pre.code .label, +pre.code .symbol { + color: #C5060B; +} + +pre.code .kw, +pre.code .rubyid_require, +pre.code .rubyid_extend, +pre.code .rubyid_include, +pre.code .int { + color: #0000FF; +} + +pre.code .ivar { + color: #660E7A; +} + +pre.code .gvar, +pre.code .rubyid_backref, +pre.code .rubyid_nth_ref { + color: #6D79DE; +} + +pre.code .regexp, .dregexp { + color: #036A07; +} + +pre.code a { + border-bottom: 1px dotted #bbf; +} + diff --git a/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb new file mode 100644 index 0000000..94c0273 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb @@ -0,0 +1,16 @@ +

+ + diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/.yardopts b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/.yardopts new file mode 100644 index 0000000..aa3b230 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/.yardopts @@ -0,0 +1,6 @@ +--no-private +lib/**/*.rb +- +CHANGES.md +LICENSE +README.md diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/CHANGES.md b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/CHANGES.md new file mode 100644 index 0000000..dfe39a5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/CHANGES.md @@ -0,0 +1,815 @@ +Version 1.2.8 - 8-Nov-2020 +-------------------------- + +* Added support for handling "slim" format zoneinfo files that are produced by + default by zic version 2020b and later. The POSIX-style TZ string is now used + calculate DST transition times after the final defined transition in the file. + The 64-bit section is now always used regardless of whether Time has support + for 64-bit times. +* Rubinius is no longer supported. + + +Version 1.2.7 - 2-Apr-2020 +-------------------------- + +* Fixed 'wrong number of arguments' errors when running on JRuby 9.0. #114. +* Fixed warnings when running on Ruby 2.8. #112. + + +Version 1.2.6 - 24-Dec-2019 +--------------------------- + +* Timezone#strftime('%s', time) will now return the correct number of seconds + since the epoch. #91. +* Removed the unused TZInfo::RubyDataSource::REQUIRE_PATH constant. +* Fixed "SecurityError: Insecure operation - require" exceptions when loading + data with recent Ruby releases in safe mode. +* Fixed warnings when running on Ruby 2.7. #106 and #111. + + +Version 1.2.5 - 4-Feb-2018 +-------------------------- + +* Support recursively (deep) freezing Country and Timezone instances. #80. +* Allow negative daylight savings time offsets to be derived when reading from + zoneinfo files. The utc_offset and std_offset are now derived correctly for + Europe/Dublin in the 2018a and 2018b releases of the Time Zone Database. + + +Version 1.2.4 - 26-Oct-2017 +--------------------------- + +* Ignore the leapseconds file that is included in zoneinfo directories installed + with version 2017c and later of the Time Zone Database. + + +Version 1.2.3 - 25-Mar-2017 +--------------------------- + +* Reduce the number of String objects allocated when loading zoneinfo files. + #54. +* Make Timezone#friendly_identifier compatible with frozen string literals. +* Improve the algorithm for deriving the utc_offset from zoneinfo files. This + now correctly handles Pacific/Apia switching from one side of the + International Date Line to the other whilst observing daylight savings time. + #66. +* Fix an UnknownTimezone exception when calling transitions_up_to or + offsets_up_to on a TimezoneProxy instance obtained from Timezone.get_proxy. +* Allow the Factory zone to be obtained from the Zoneinfo data source. +* Ignore the /usr/share/zoneinfo/timeconfig symlink included in Slackware + distributions. #64. +* Fix Timezone#strftime handling of %Z expansion when %Z is prefixed with more + than one percent. #31. +* Support expansion of %z, %:z, %::z and %:::z to the UTC offset of the time + zone in Timezone#strftime. #31 and #67. + + +Version 1.2.2 - 8-Aug-2014 +-------------------------- + +* Fix an error with duplicates being returned by Timezone#all_country_zones + and Timezone#all_country_zone_identifiers when used with tzinfo-data + v1.2014.6 or later. +* Use the zone1970.tab file for country timezone data if it is found in the + zoneinfo directory (and fallback to zone.tab if not). zone1970.tab was added + in tzdata 2014f. zone.tab is now deprecated. + + +Version 1.2.1 - 1-Jun-2014 +-------------------------- + +* Support zoneinfo files generated with zic version 2014c and later. +* On platforms that only support positive 32-bit timestamps, ensure that + conversions are accurate from the epoch instead of just from the first + transition after the epoch. +* Minor documentation improvements. + + +Version 1.2.0 - 26-May-2014 +--------------------------- + +* Raise the minimum supported Ruby version to 1.8.7. +* Support loading system zoneinfo data on FreeBSD, OpenBSD and Solaris. + Resolves #15. +* Add canonical_identifier and canonical_zone methods to Timezone. Resolves #16. +* Add a link to a DataSourceNotFound help page in the TZInfo::DataSourceNotFound + exception message. +* Load iso3166.tab and zone.tab files as UTF-8. +* Fix Timezone#local_to_utc returning local Time instances on systems using UTC + as the local time zone. Resolves #13. +* Fix == methods raising an exception when passed an instance of a different + class by making <=> return nil if passed a non-comparable argument. +* Eliminate "require 'rational'" warnings. Resolves #10. +* Eliminate "assigned but unused variable - info" warnings. Resolves #11. +* Switch to minitest v5 for unit tests. Resolves #18. + + +Version 1.1.0 - 25-Sep-2013 +--------------------------- + +* TZInfo is now thread safe. ThreadSafe::Cache is now used instead of Hash + to cache Timezone and Country instances returned by Timezone.get and + Country.get. The tzinfo gem now depends on thread_safe ~> 0.1. +* Added a transitions_up_to method to Timezone that returns a list of the times + where the UTC offset of the timezone changes. +* Added an offsets_up_to method to Timezone that returns the set of offsets + that have been observed in a defined timezone. +* Fixed a "can't modify frozen String" error when loading a Timezone from a + zoneinfo file using an identifier String that is both tainted and frozen. + Resolves #3. +* Support TZif3 format zoneinfo files (now produced by zic from tzcode version + 2013e onwards). +* Support using YARD to generate documentation (added a .yardopts file). +* Ignore the +VERSION file included in the zoneinfo directory on Mac OS X. +* Added a note to the documentation concerning 32-bit zoneinfo files (as + included with Mac OS X). + + +Version 1.0.1 - 22-Jun-2013 +--------------------------- + +* Fix a test case failure when tests are run from a directory that contains a + dot in the path (issue #29751). + + +Version 1.0.0 - 2-Jun-2013 +-------------------------- + +* Allow TZInfo to be used with different data sources instead of just the + built-in Ruby module data files. +* Include a data source that allows TZInfo to load data from the binary + zoneinfo files produced by zic and included with many Linux and Unix-like + distributions. +* Remove the definition and index Ruby modules from TZInfo and move them into + a separate TZInfo::Data library (available as the tzinfo-data gem). +* Default to using the TZInfo::Data library as the data source if it is + installed, otherwise use zoneinfo files instead. +* Preserve the nanoseconds of local timezone Time objects when performing + conversions (issue #29705). +* Don't add the tzinfo lib directory to the search path when requiring 'tzinfo'. + The tzinfo lib directory must now be in the search path before 'tzinfo' is + required. +* Add utc_start_time, utc_end_time, local_start_time and local_end_time instance + methods to TimezonePeriod. These return an identical value as the existing + utc_start, utc_end, local_start and local_end methods, but return Time + instances instead of DateTime. +* Make the start_transition, end_transition and offset properties of + TimezonePeriod protected. To access properties of the period, callers should + use other TimezonePeriod instance methods instead (issue #7655). + + +Version 0.3.53 (tzdata v2017b) - 23-Mar-2017 +-------------------------------------------- + +* Updated to tzdata version 2017b + (https://mm.icann.org/pipermail/tz-announce/2017-March/000046.html). + + +Version 0.3.52 (tzdata v2016h) - 28-Oct-2016 +-------------------------------------------- + +* Updated to tzdata version 2016h + (https://mm.icann.org/pipermail/tz-announce/2016-October/000042.html). + + +Version 0.3.51 (tzdata v2016f) - 5-Jul-2016 +------------------------------------------- + +* Updated to tzdata version 2016f + (https://mm.icann.org/pipermail/tz-announce/2016-July/000040.html). + + +Version 0.3.50 (tzdata v2016e) - 14-Jun-2016 +-------------------------------------------- + +* Updated to tzdata version 2016e + (https://mm.icann.org/pipermail/tz-announce/2016-June/000039.html). + + +Version 0.3.49 (tzdata v2016d) - 18-Apr-2016 +-------------------------------------------- + +* Updated to tzdata version 2016d + (https://mm.icann.org/pipermail/tz-announce/2016-April/000038.html). + + +Version 0.3.48 (tzdata v2016c) - 23-Mar-2016 +-------------------------------------------- + +* Updated to tzdata version 2016c + (https://mm.icann.org/pipermail/tz-announce/2016-March/000037.html). + + +Version 0.3.47 (tzdata v2016b) - 15-Mar-2016 +-------------------------------------------- + +* Updated to tzdata version 2016b + (https://mm.icann.org/pipermail/tz-announce/2016-March/000036.html). + + +Version 0.3.46 (tzdata v2015g) - 2-Dec-2015 +------------------------------------------- + +* From version 2015e, the IANA time zone database uses non-ASCII characters in + country names. Backport the encoding handling from TZInfo::Data to allow + TZInfo 0.3.x to support Ruby 1.9 (which would otherwise fail with an invalid + byte sequence error when loading the countries index). Resolves #41. + + +Version 0.3.45 (tzdata v2015g) - 3-Oct-2015 +------------------------------------------- + +* Updated to tzdata version 2015g + (https://mm.icann.org/pipermail/tz-announce/2015-October/000034.html). + + +Version 0.3.44 (tzdata v2015d) - 24-Apr-2015 +-------------------------------------------- + +* Updated to tzdata version 2015d + (https://mm.icann.org/pipermail/tz-announce/2015-April/000031.html). + + +Version 0.3.43 (tzdata v2015a) - 31-Jan-2015 +-------------------------------------------- + +* Updated to tzdata version 2015a + (https://mm.icann.org/pipermail/tz-announce/2015-January/000028.html). + + +Version 0.3.42 (tzdata v2014i) - 23-Oct-2014 +-------------------------------------------- + +* Updated to tzdata version 2014i + (https://mm.icann.org/pipermail/tz-announce/2014-October/000026.html). + + +Version 0.3.41 (tzdata v2014f) - 8-Aug-2014 +------------------------------------------- + +* Updated to tzdata version 2014f + (https://mm.icann.org/pipermail/tz-announce/2014-August/000023.html). + + +Version 0.3.40 (tzdata v2014e) - 10-Jul-2014 +-------------------------------------------- + +* Updated to tzdata version 2014e + (https://mm.icann.org/pipermail/tz-announce/2014-June/000022.html). + + +Version 0.3.39 (tzdata v2014a) - 9-Mar-2014 +------------------------------------------- + +* Updated to tzdata version 2014a + (https://mm.icann.org/pipermail/tz-announce/2014-March/000018.html). + + +Version 0.3.38 (tzdata v2013g) - 8-Oct-2013 +------------------------------------------- + +* Updated to tzdata version 2013g + (https://mm.icann.org/pipermail/tz-announce/2013-October/000015.html). + + +Version 0.3.37 (tzdata v2013b) - 11-Mar-2013 +-------------------------------------------- + +* Updated to tzdata version 2013b + (https://mm.icann.org/pipermail/tz-announce/2013-March/000010.html). + + +Version 0.3.36 (tzdata v2013a) - 3-Mar-2013 +------------------------------------------- + +* Updated to tzdata version 2013a + (https://mm.icann.org/pipermail/tz-announce/2013-March/000009.html). +* Fix TimezoneTransitionInfo#eql? incorrectly returning false when running on + Ruby 2.0. +* Change eql? and == implementations to test the class of the passed in object + instead of checking individual properties with 'respond_to?'. + + +Version 0.3.35 (tzdata v2012i) - 4-Nov-2012 +------------------------------------------- + +* Updated to tzdata version 2012i + (https://mm.icann.org/pipermail/tz-announce/2012-November/000007.html). + + +Version 0.3.34 (tzdata v2012h) - 27-Oct-2012 +-------------------------------------------- + +* Updated to tzdata version 2012h + (https://mm.icann.org/pipermail/tz-announce/2012-October/000006.html). + + +Version 0.3.33 (tzdata v2012c) - 8-Apr-2012 +------------------------------------------- + +* Updated to tzdata version 2012c + (https://mm.icann.org/pipermail/tz/2012-April/017627.html). + + +Version 0.3.32 (tzdata v2012b) - 4-Mar-2012 +------------------------------------------- + +* Updated to tzdata version 2012b + (https://mm.icann.org/pipermail/tz/2012-March/017524.html). + + +Version 0.3.31 (tzdata v2011n) - 6-Nov-2011 +------------------------------------------- + +* Updated to tzdata version 2011n + (https://mm.icann.org/pipermail/tz/2011-October/017201.html). + + +Version 0.3.30 (tzdata v2011k) - 29-Sep-2011 +-------------------------------------------- + +* Updated to tzdata version 2011k + (https://mm.icann.org/pipermail/tz/2011-September/008889.html). + + +Version 0.3.29 (tzdata v2011h) - 27-Jun-2011 +-------------------------------------------- + +* Updated to tzdata version 2011h + (https://mm.icann.org/pipermail/tz/2011-June/008576.html). +* Allow the default value of the local_to_utc and period_for_local dst + parameter to be specified globally with a Timezone.default_dst attribute. + Thanks to Kurt Werle for the suggestion and patch. + + + Version 0.3.28 (tzdata v2011g) - 13-Jun-2011 +--------------------------------------------- + +* Add support for Ruby 1.9.3 (trunk revision 31668 and later). Thanks to + Aaron Patterson for reporting the problems running on the new version. + Closes #29233. + + +Version 0.3.27 (tzdata v2011g) - 26-Apr-2011 +-------------------------------------------- + +* Updated to tzdata version 2011g + (https://mm.icann.org/pipermail/tz/2011-April/016875.html). + + +Version 0.3.26 (tzdata v2011e) - 2-Apr-2011 +------------------------------------------- + +* Updated to tzdata version 2011e + (https://mm.icann.org/pipermail/tz/2011-April/016809.html). + + +Version 0.3.25 (tzdata v2011d) - 14-Mar-2011 +-------------------------------------------- + +* Updated to tzdata version 2011d + (https://mm.icann.org/pipermail/tz/2011-March/016746.html). + + +Version 0.3.24 (tzdata v2010o) - 15-Jan-2011 +-------------------------------------------- + +* Updated to tzdata version 2010o + (https://mm.icann.org/pipermail/tz/2010-November/016517.html). + + +Version 0.3.23 (tzdata v2010l) - 19-Aug-2010 +-------------------------------------------- + +* Updated to tzdata version 2010l + (https://mm.icann.org/pipermail/tz/2010-August/016360.html). + + +Version 0.3.22 (tzdata v2010j) - 29-May-2010 +-------------------------------------------- + +* Corrected file permissions issue with 0.3.21 release. + + +Version 0.3.21 (tzdata v2010j) - 28-May-2010 +-------------------------------------------- + +* Updated to tzdata version 2010j + (https://mm.icann.org/pipermail/tz/2010-May/016211.html). +* Change invalid timezone check to exclude characters not used in timezone + identifiers and avoid 'character class has duplicated range' warnings with + Ruby 1.9.2. +* Ruby 1.9.2 has deprecated "require 'rational'", but older versions of + Ruby need rational to be required. Require rational only when the Rational + module has not already been loaded. +* Remove circular requires (now a warning in Ruby 1.9.2). Instead of using + requires in each file for dependencies, tzinfo.rb now requires all tzinfo + files. If you were previously requiring files within the tzinfo directory + (e.g. require 'tzinfo/timezone'), then you will now have to + require 'tzinfo' instead. + + +Version 0.3.20 (tzdata v2010i) - 19-Apr-2010 +-------------------------------------------- + +* Updated to tzdata version 2010i + (https://mm.icann.org/pipermail/tz/2010-April/016184.html). + + +Version 0.3.19 (tzdata v2010h) - 5-Apr-2010 +------------------------------------------- + +* Updated to tzdata version 2010h + (https://mm.icann.org/pipermail/tz/2010-April/016161.html). + + +Version 0.3.18 (tzdata v2010g) - 29-Mar-2010 +-------------------------------------------- + +* Updated to tzdata version 2010g + (https://mm.icann.org/pipermail/tz/2010-March/016140.html). + + +Version 0.3.17 (tzdata v2010e) - 8-Mar-2010 +------------------------------------------- + +* Updated to tzdata version 2010e + (https://mm.icann.org/pipermail/tz/2010-March/016088.html). + + +Version 0.3.16 (tzdata v2009u) - 5-Jan-2010 +------------------------------------------- + +* Support the use of '-' to denote '0' as an offset in the tz data files. + Used for the first time in the SAVE field in tzdata v2009u. +* Updated to tzdata version 2009u + (https://mm.icann.org/pipermail/tz/2009-December/016001.html). + + +Version 0.3.15 (tzdata v2009p) - 26-Oct-2009 +-------------------------------------------- + +* Updated to tzdata version 2009p + (https://mm.icann.org/pipermail/tz/2009-October/015889.html). +* Added a description to the gem spec. +* List test files in test_files instead of files in the gem spec. + + +Version 0.3.14 (tzdata v2009l) - 19-Aug-2009 +-------------------------------------------- + +* Updated to tzdata version 2009l + (https://mm.icann.org/pipermail/tz/2009-August/015729.html). +* Include current directory in load path to allow running tests on + Ruby 1.9.2, which doesn't include it by default any more. + + +Version 0.3.13 (tzdata v2009f) - 15-Apr-2009 +-------------------------------------------- + +* Updated to tzdata version 2009f + (https://mm.icann.org/pipermail/tz/2009-April/015544.html). +* Untaint the timezone module filename after validation to allow use + with $SAFE == 1 (e.g. under mod_ruby). Thanks to Dmitry Borodaenko for + the suggestion. Closes #25349. + + +Version 0.3.12 (tzdata v2008i) - 12-Nov-2008 +-------------------------------------------- + +* Updated to tzdata version 2008i + (https://mm.icann.org/pipermail/tz/2008-October/015260.html). + + +Version 0.3.11 (tzdata v2008g) - 7-Oct-2008 +------------------------------------------- + +* Updated to tzdata version 2008g + (https://mm.icann.org/pipermail/tz/2008-October/015139.html). +* Support Ruby 1.9.0-5. Rational.new! has now been removed in Ruby 1.9. + Only use Rational.new! if it is available (it is preferable in Ruby 1.8 + for performance reasons). Thanks to Jeremy Kemper and Pratik Naik for + reporting this. Closes #22312. +* Apply a patch from Pratik Naik to replace assert calls that have been + deprecated in the Ruby svn trunk. Closes #22308. + + +Version 0.3.10 (tzdata v2008f) - 16-Sep-2008 +-------------------------------------------- + +* Updated to tzdata version 2008f + (https://mm.icann.org/pipermail/tz/2008-September/015090.html). + + +Version 0.3.9 (tzdata v2008c) - 27-May-2008 +------------------------------------------- + +* Updated to tzdata version 2008c + (https://mm.icann.org/pipermail/tz/2008-May/014956.html). +* Support loading timezone data in the latest trunk versions of Ruby 1.9. + Rational.new! is now private, so call it using Rational.send :new! instead. + Thanks to Jeremy Kemper and Pratik Naik for spotting this. Closes #19184. +* Prevent warnings from being output when running Ruby with the -v or -w + command line options. Thanks to Paul McMahon for the patch. Closes #19719. + + +Version 0.3.8 (tzdata v2008b) - 24-Mar-2008 +------------------------------------------- + +* Updated to tzdata version 2008b + (https://mm.icann.org/pipermail/tz/2008-March/014910.html). +* Support loading timezone data in Ruby 1.9.0. Use DateTime.new! if it is + available instead of DateTime.new0 when constructing transition times. + DateTime.new! was added in Ruby 1.8.6. DateTime.new0 was removed in + Ruby 1.9.0. Thanks to Joshua Peek for reporting this. Closes #17606. +* Modify some of the equality test cases to cope with the differences + between Ruby 1.8.6 and Ruby 1.9.0. + + +Version 0.3.7 (tzdata v2008a) - 10-Mar-2008 +------------------------------------------- + +* Updated to tzdata version 2008a + (https://mm.icann.org/pipermail/tz/2008-March/014851.html). + + +Version 0.3.6 (tzdata v2007k) - 1-Jan-2008 +------------------------------------------ + +* Updated to tzdata version 2007k + (https://mm.icann.org/pipermail/tz/2007-December/014765.html). +* Removed deprecated RubyGems autorequire option. + + +Version 0.3.5 (tzdata v2007h) - 1-Oct-2007 +------------------------------------------ + +* Updated to tzdata version 2007h + (https://mm.icann.org/pipermail/tz/2007-October/014585.html). + + +Version 0.3.4 (tzdata v2007g) - 21-Aug-2007 +------------------------------------------- + +* Updated to tzdata version 2007g + (https://mm.icann.org/pipermail/tz/2007-August/014499.html). + + +Version 0.3.3 (tzdata v2006p) - 27-Nov-2006 +------------------------------------------- + +* Updated to tzdata version 2006p + (https://mm.icann.org/pipermail/tz/2006-November/013999.html). + + +Version 0.3.2 (tzdata v2006n) - 11-Oct-2006 +------------------------------------------- + +* Updated to tzdata version 2006n + (https://mm.icann.org/pipermail/tz/2006-October/013911.html). Note that this release of + tzdata removes the country Serbia and Montenegro (CS) and replaces it with + separate Serbia (RS) and Montenegro (ME) entries. + + +Version 0.3.1 (tzdata v2006j) - 21-Aug-2006 +------------------------------------------- + +* Remove colon from case statements to avoid warning in Ruby 1.8.5. #5198. +* Use temporary variable to avoid dynamic string warning from rdoc. +* Updated to tzdata version 2006j + (https://mm.icann.org/pipermail/tz/2006-August/013767.html). + + +Version 0.3.0 (tzdata v2006g) - 17-Jul-2006 +------------------------------------------- + +* New timezone data format. Timezone data now occupies less space on disk and + takes less memory once loaded. #4142, #4144. +* Timezone data is defined in modules rather than classes. Timezone instances + returned by Timezone.get are no longer instances of data classes, but are + instead instances of new DataTimezone and LinkedTimezone classes. +* Timezone instances can now be used with Marshal.dump and Marshal.load. #4240. +* Added a Timezone.get_proxy method that returns a TimezoneProxy object for a + given identifier. +* Country index data is now defined in a single module that is independent + of the Country class implementation. +* Country instances can now be used with Marshal.dump and Marshal.load. #4240. +* Country has a new zone_info method that returns CountryTimezone objects + containing additional information (latitude, longitude and a description) + relating to each Timezone. #4140. +* Timezones within a Country are now returned in an order that makes + geographic sense. +* The zdumptest utility now checks local to utc conversions in addition to + utc to local conversions. +* eql? method defined on Country and Timezone that is equivalent to ==. +* The == method of Timezone no longer raises an exception when passed an object + with no identifier method. +* The == method of Country no longer raises an exception when passed an object + with no code method. +* hash method defined on Country that returns the hash of the code. +* hash method defined on Timezone that returns the hash of the identifier. +* Miscellaneous API documentation corrections and improvements. +* Timezone definition and indexes are now excluded from rdoc (the contents were + previously ignored with #:nodoc: anyway). +* Removed no longer needed #:nodoc: directives from timezone data files (which + are now excluded from the rdoc build). +* Installation of the gem now causes rdoc API documentation to be generated. + #4905. +* When optimizing transitions to generate zone definitions, check the + UTC and standard offsets separately rather than just the total offset to UTC. + Fixes an incorrect abbreviation issue with Europe/London, Europe/Dublin and + Pacific/Auckland. +* Eliminated unnecessary .nil? calls to give a minor performance gain. +* Timezone.all and Timezone.all_identifiers now return all the + Timezones/identifiers rather than just those associated with countries. #4146. +* Added all_data_zones, all_data_zone_identifiers, all_linked_zones and + all_linked_zone_identifiers class methods to Timezone. +* Added a strftime method to Timezone that converts a time in UTC to local + time and then returns it formatted. %Z is replaced with the Timezone + abbreviation for the given time (for example, EST or EDT). #4143. +* Fix escaping of quotes in TZDataParser. This affected country names and + descriptions of timezones within countries. + + +Version 0.2.2 (tzdata v2006g) - 17-May-2006 +------------------------------------------- + +* Use class-scoped instance variables to store the Timezone identifier and + singleton instance. Loading a linked zone no longer causes the parent + zone's identifier to be changed. The instance method of a linked zone class + also now returns an instance of the linked zone class rather than the parent + class. #4502. +* The zdumptest utility now compares the TZInfo zone identifier with the zdump + zone identifier. +* The zdumptestall utility now exits if not supplied with enough parameters. +* Updated to tzdata version 2006g + (https://mm.icann.org/pipermail/tz/2006-May/013590.html). + + +Version 0.2.1 (tzdata v2006d) - 17-Apr-2006 +------------------------------------------- + +* Fix a performance issue caused in 0.2.0 with Timezone.local_to_utc. + Conversions performed on TimeOrDateTime instances passed to <=> are now + cached as originally intended. Thanks to Michael Smedberg for spotting this. +* Fix a performance issue with the local_to_utc period search algorithm + originally implemented in 0.1.0. The condition that was supposed to cause + the search to terminate when enough periods had been found was only being + evaluated in a small subset of cases. Thanks to Michael Smedberg and + Jamis Buck for reporting this. +* Added abbreviation as an alias for TimezonePeriod.zone_identifier. +* Updated to tzdata version 2006d + (https://mm.icann.org/pipermail/tz/2006-April/013517.html). +* Ignore any offset in DateTimes passed in (as is already done for Times). + All of the following now refer to the same UTC time (15:40 on 17 April 2006). + Previously, the DateTime in the second line would have been interpreted + as 20:40. + + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0).new_offset(Rational(5, 24))) + tz.utc_to_local(Time.utc(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(Time.local(2006, 4, 17, 15, 40, 0)) + + +Version 0.2.0 (tzdata v2006c) - 3-Apr-2006 +------------------------------------------ + +* Use timestamps rather than DateTime objects in zone files for times between + 1970 and 2037 (the range of Time). +* Don't convert passed in Time objects to DateTime in most cases (provides + a substantial performance improvement). +* Allow integer timestamps (time in seconds since 1970-1-1) to be used as well + as Time and DateTime objects in all public methods that take times as + parameters. +* Tool to compare TZInfo conversions with output from zdump. +* TZDataParser zone generation algorithm rewritten. Now based on the zic code. + TZInfo is now 100% compatible with zic/zdump output. +* Riyadh Solar Time zones now included again (generation time has been reduced + with TZDataParser changes). +* Use binary mode when writing zone and country files to get Unix (\n) new + lines. +* Omit unnecessary quotes in zone identifier symbols. +* Omit the final transition to DST if there is a prior transition in the last + year processed to standard time. +* Updated to tzdata version 2006c + (https://mm.icann.org/pipermail/tz/2006-April/013500.html). + + +Version 0.1.2 (tzdata v2006a) - 5-Feb-2006 +------------------------------------------ + +* Add lib directory to the load path when tzinfo is required. Makes it easier + to use tzinfo gem when unpacked to vendor directory in rails. +* Updated to tzdata version 2006a + (https://mm.icann.org/pipermail/tz/2006-January/013311.html). +* build_tz_classes rake task now handles running svn add and svn delete as new + timezones and countries are added and old ones are removed. +* Return a better error when attempting to use a Timezone instance that was + constructed with Timezone.new(nil). This will occur when using Rails' + composed_of. When the timezone identifier in the database is null, attempting + to use the Timezone will now result in an UnknownTimezone exception rather + than a NameError. + + +Version 0.1.1 (tzdata v2005q) - 18-Dec-2005 +------------------------------------------- + +* Timezones that are defined by a single unbounded period (e.g. UTC) now + work again. +* Updated to tzdata version 2005q. + + +Version 0.1.0 (tzdata v2005n) - 27-Nov-2005 +------------------------------------------- + +* period_for_local and local_to_utc now allow resolution of ambiguous + times (e.g. when switching from daylight savings to standard time). + The behaviour of these methods when faced with an ambiguous local time + has now changed. If you are using these methods you should check + the documentation. Thanks to Cliff Matthews for suggesting this change. +* Added require 'date' to timezone.rb (date isn't loaded by default in all + environments). +* Use rake to build packages and documentation. +* License file is now included in gem distribution. +* Dates in definitions stored as Astronomical Julian Day numbers rather than + as civil dates (improves performance creating DateTime instances). +* Added options to TZDataParser to allow generation of specific zones and + countries. +* Moved TimezonePeriod class to timezone_period.rb. +* New TimezonePeriodList class to store TimezonePeriods for a timezone and + perform searches for periods. +* Timezones now defined using blocks. TimezonePeriods are only instantiated + when they are needed. Thanks to Jamis Buck for the suggestion. +* Add options to TZDataParser to allow exclusion of specific zones and + countries. +* Exclude the Riyadh Solar Time zones. The rules are only for 1987 to 1989 and + take a long time to generate and process. Riyadh Solar Time is no longer + observed. +* The last TimezonePeriod for each Timezone is now written out with an + unbounded rather than arbitrary end time. +* Construct the Rational offset in TimezonePeriod once when the TimezonePeriod + is constructed rather than each time it is needed. +* Timezone and Country now keep a cache of loaded instances to avoid running + require which can be slow on some platforms. +* Updated to tzdata version 2005n. + + +Version 0.0.4 (tzdata v2005m) - 18-Sep-2005 +------------------------------------------- + +* Removed debug output accidentally included in the previous release. +* Fixed a bug in the generation of friendly zone identifiers (was inserting + apostrophes into UTC, GMT, etc). +* Fixed Country <=> operator (was comparing non-existent attribute) +* Fixed Timezone.period_for_local error when period not found. +* Added testcases for Timezone, TimezoneProxy, TimezonePeriod, Country and + some selected timezones. + + +Version 0.0.3 (tzdata v2005m) - 17-Sep-2005 +------------------------------------------- + +* Reduced visibility of some methods added in Timezone#setup and Country#setup. +* Added name method to Timezone (returns the identifier). +* Added friendly_identifier method to Timezone. Returns a more friendly version + of the identifier. +* Added to_s method to Timezone. Returns the friendly identifier. +* Added == and <=> operators to Timezone (compares identifiers). +* Timezone now includes Comparable. +* Added to_s method to Country. +* Added == and <=> operators to Country (compares ISO 3166 country codes). +* Country now includes Comparable. +* New TimezoneProxy class that behaves the same as a Timezone but doesn't + actually load in its definition until it is actually required. +* Modified Timezone and Country methods that return Timezones to return + TimezoneProxy instances instead. This makes these methods much quicker. + + In Ruby on Rails, you can now show a drop-down list of all timezones using the + Rails time_zone_select helper method: + + <%= time_zone_select 'user', 'time_zone', TZInfo::Timezone.all.sort, :model => TZInfo::Timezone %> + + +Version 0.0.2 (tzdata v2005m) - 13-Sep-2005 +------------------------------------------- + +* Country and Timezone data is now loaded into class rather than instance + variables. This makes Timezone links more efficient and saves memory if + creating specific Timezone and Country classes directly. +* TimezonePeriod zone_identifier is now defined as a symbol to save memory + (was previously a string). +* TimezonePeriod zone_identifiers that were previously '' are now :Unknown. +* Timezones and Countries can now be returned using Timezone.new(identifier) + and Country.new(identifier). When passed an identifier, the new method + calls get to return an instance of the specified timezone or country. +* Added new class methods to Timezone to return sets of zones and identifiers. + +Thanks to Scott Barron of Lunchbox Software for the suggestions in his +article about using TZInfo with Rails +(https://web.archive.org/web/20060425190845/http://lunchroom.lunchboxsoftware.com/pages/tzinfo_rails) + + +Version 0.0.1 (tzdata v2005m) - 29-Aug-2005 +------------------------------------------- + +* First release. diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/LICENSE b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/LICENSE new file mode 100644 index 0000000..6e74e29 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2005-2020 Philip Ross + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/README.md b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/README.md new file mode 100644 index 0000000..50766f8 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/README.md @@ -0,0 +1,152 @@ +TZInfo - Ruby Timezone Library +============================== + +[![RubyGems](https://img.shields.io/gem/v/tzinfo)](https://rubygems.org/gems/tzinfo) [![Travis CI Build](https://img.shields.io/travis/com/tzinfo/tzinfo/1.2?logo=travis)](https://travis-ci.com/tzinfo/tzinfo) [![AppVeyor Build](https://img.shields.io/appveyor/build/philr/tzinfo/1.2?logo=appveyor)](https://ci.appveyor.com/project/philr/tzinfo/branch/1.2) + +[TZInfo](https://tzinfo.github.io) provides daylight savings aware +transformations between times in different timezones. + + +Data Sources +------------ + +TZInfo requires a source of timezone data. There are two built-in options: + +1. The TZInfo::Data library (the tzinfo-data gem). TZInfo::Data contains a set + of Ruby modules that are generated from the [IANA Time Zone Database](https://www.iana.org/time-zones). +2. A zoneinfo directory. Most Unix-like systems include a zoneinfo directory + containing timezone definitions. These are also generated from the + [IANA Time Zone Database](https://www.iana.org/time-zones). + +By default, TZInfo::Data will be used. If TZInfo::Data is not available (i.e. +if `require 'tzinfo/data'` fails), then TZInfo will search for a zoneinfo +directory instead (using the search path specified by +`TZInfo::ZoneinfoDataSource::DEFAULT_SEARCH_PATH`). + +If no data source can be found, a `TZInfo::DataSourceNotFound` exception will be +raised when TZInfo is used. Further information is available +[in the wiki](https://tzinfo.github.io/datasourcenotfound) to help with +resolving `TZInfo::DataSourceNotFound` errors. + +The default data source selection can be overridden using +`TZInfo::DataSource.set`. + +Custom data sources can also be used. See `TZInfo::DataSource.set` for +further details. + + +Installation +------------ + +The TZInfo gem can be installed by running: + + gem install tzinfo + +To use the Ruby modules as the data source, TZInfo::Data will also need to be +installed: + + gem install tzinfo-data + + +Example Usage +------------- + +The following code will obtain the America/New_York timezone (as an instance +of `TZInfo::Timezone`) and convert a time in UTC to local New York time: + + require 'tzinfo' + + tz = TZInfo::Timezone.get('America/New_York') + local = tz.utc_to_local(Time.utc(2005,8,29,15,35,0)) + +Note that the local Time returned will have a UTC timezone (`local.zone` will +return `"UTC"`). This is because the Time class in older (but still supported by +TZInfo) versions of Ruby can only handle two timezones: UTC and the system local +timezone. + +To convert from a local time to UTC, the `local_to_utc` method can be used as +follows: + + utc = tz.local_to_utc(local) + +Note that the timezone information of the local Time object is ignored (TZInfo +will just read the date and time and treat them as if there were in the `tz` +timezone). The following two lines will return the same result regardless of +the system's local timezone: + + tz.local_to_utc(Time.local(2006,6,26,1,0,0)) + tz.local_to_utc(Time.utc(2006,6,26,1,0,0)) + +To obtain information about the rules in force at a particular UTC or local +time, the `TZInfo::Timezone.period_for_utc` and +`TZInfo::Timezone.period_for_local` methods can be used. Both of these methods +return `TZInfo::TimezonePeriod` objects. The following gets the identifier for +the period (in this case EDT). + + period = tz.period_for_utc(Time.utc(2005,8,29,15,35,0)) + id = period.zone_identifier + +The current local time in a `Timezone` can be obtained with the +`TZInfo::Timezone#now` method: + + now = tz.now + +All methods in TZInfo that operate on a time can be used with either `Time` or +`DateTime` instances or with Integer timestamps (i.e. as returned by +`Time#to_i`). The type of the values returned will match the type passed in. + +A list of all the available timezone identifiers can be obtained using the +`TZInfo::Timezone.all_identifiers` method. `TZInfo::Timezone.all` can be called +to get an `Array` of all the `TZInfo::Timezone` instances. + +Timezones can also be accessed by country (using an ISO 3166-1 alpha-2 country +code). The following code retrieves the `TZInfo::Country` instance representing +the USA (country code 'US') and then gets all the timezone identifiers used in +the USA. + + us = TZInfo::Country.get('US') + timezones = us.zone_identifiers + +The `TZInfo::Country#zone_info` method provides an additional description and +geographic location for each timezone in a country. + +A list of all the available country codes can be obtained using the +`TZInfo::Country.all_codes` method. `TZInfo::Country.all` can be called to get +an `Array` of all the `Country` instances. + +For further detail, please refer to the API documentation for the +`TZInfo::Timezone` and `TZInfo::Country` classes. + + +Thread-Safety +------------- + +The `TZInfo::Country` and `TZInfo::Timezone` classes are thread-safe. It is safe +to use class and instance methods of `TZInfo::Country` and `TZInfo::Timezone` in +concurrently executing threads. Instances of both classes can be shared across +thread boundaries. + + +Documentation +------------- + +API documentation for TZInfo is available on [RubyDoc.info](https://rubydoc.info/gems/tzinfo/frames). + + +License +------- + +TZInfo is released under the MIT license, see LICENSE for details. + + +Source Code +----------- + +Source code for TZInfo is available on [GitHub](https://github.com/tzinfo/tzinfo). + + +Issue Tracker +------------- + +Please post any bugs, issues, feature requests or questions to the +[GitHub issue tracker](https://github.com/tzinfo/tzinfo/issues). diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/Rakefile b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/Rakefile new file mode 100644 index 0000000..3cabb07 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/Rakefile @@ -0,0 +1,107 @@ +require 'rubygems' +require 'rubygems/package_task' +require 'fileutils' +require 'rake/testtask' + +# Ignore errors loading rdoc/task (the rdoc tasks will be excluded if +# rdoc is unavailable). +begin + require 'rdoc/task' +rescue LoadError, RuntimeError +end + +BASE_DIR = File.expand_path(File.dirname(__FILE__)) + +task :default => [:test] + +spec = eval(File.read('tzinfo.gemspec')) + +class TZInfoPackageTask < Gem::PackageTask + alias_method :orig_sh, :sh + private :orig_sh + + def sh(*cmd, &block) + if cmd[0] == '__tar_with_owner__' && cmd[1] =~ /\A-?[zjcvf]+\z/ + opts = cmd[1] + cmd = ['tar', 'c', '--owner', '0', '--group', '0', "#{opts.start_with?('-') ? '' : '-'}#{opts.gsub('c', '')}"] + cmd.drop(2) + elsif cmd.first =~ /\A__tar_with_owner__ -?([zjcvf]+)(.*)\z/ + opts = $1 + args = $2 + cmd[0] = "tar c --owner 0 --group 0 -#{opts.gsub('c', '')}#{args}" + end + + orig_sh(*cmd, &block) + end +end + +def add_signing_key(spec) + # Attempt to find the private key and add options to sign the gem if found. + private_key_path = File.expand_path(File.join(BASE_DIR, '..', 'key', 'gem-private_key.pem')) + + if File.exist?(private_key_path) + spec = spec.clone + spec.signing_key = private_key_path + spec.cert_chain = [File.join(BASE_DIR, 'gem-public_cert.pem')] + else + puts 'WARNING: Private key not found. Not signing gem file.' + end + + spec +end + +package_task = TZInfoPackageTask.new(add_signing_key(spec)) do |pkg| + pkg.need_zip = true + pkg.need_tar_gz = true + pkg.tar_command = '__tar_with_owner__' +end + +# Skip the rdoc task if RDoc::Task is unavailable +if defined?(RDoc) && defined?(RDoc::Task) + RDoc::Task.new do |rdoc| + rdoc.rdoc_dir = 'doc' + rdoc.options.concat spec.rdoc_options + rdoc.rdoc_files.include(spec.extra_rdoc_files) + rdoc.rdoc_files.include('lib') + end +end + +Rake::Task[package_task.package_dir_path].enhance do + recurse_chmod(package_task.package_dir_path) +end + +Rake::Task[:package].enhance do + FileUtils.rm_rf(package_task.package_dir_path) +end + +def recurse_chmod(dir) + File.chmod(0755, dir) + + Dir.entries(dir).each do |entry| + if entry != '.' && entry != '..' + path = File.join(dir, entry) + if File.directory?(path) + recurse_chmod(path) + else + File.chmod(0644, path) + end + end + end +end + +desc 'Run tests using RubyDataSource, then ZoneinfoDataSource' +task :test => [:test_ruby, :test_zoneinfo] do +end + +def setup_tests(test_task, type) + test_task.libs = [File.join(BASE_DIR, 'lib')] + test_task.pattern = File.join(BASE_DIR, 'test', "ts_all_#{type}.rb") + test_task.warning = true +end + +Rake::TestTask.new(:test_ruby) do |t| + setup_tests(t, :ruby) +end + +Rake::TestTask.new(:test_zoneinfo) do |t| + setup_tests(t, :zoneinfo) +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo.rb new file mode 100644 index 0000000..7a11ef7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo.rb @@ -0,0 +1,43 @@ +# Top level module for TZInfo. +module TZInfo +end + +require 'tzinfo/ruby_core_support' +require 'tzinfo/offset_rationals' +require 'tzinfo/time_or_datetime' + +require 'tzinfo/timezone_definition' + +require 'tzinfo/timezone_offset' +require 'tzinfo/timezone_transition' +require 'tzinfo/transition_rule' +require 'tzinfo/annual_rules' +require 'tzinfo/timezone_transition_definition' + +require 'tzinfo/timezone_index_definition' + +require 'tzinfo/timezone_info' +require 'tzinfo/data_timezone_info' +require 'tzinfo/linked_timezone_info' +require 'tzinfo/transition_data_timezone_info' +require 'tzinfo/zoneinfo_timezone_info' + +require 'tzinfo/data_source' +require 'tzinfo/ruby_data_source' +require 'tzinfo/posix_time_zone_parser' +require 'tzinfo/zoneinfo_data_source' + +require 'tzinfo/timezone_period' +require 'tzinfo/timezone' +require 'tzinfo/info_timezone' +require 'tzinfo/data_timezone' +require 'tzinfo/linked_timezone' +require 'tzinfo/timezone_proxy' + +require 'tzinfo/country_index_definition' +require 'tzinfo/country_info' +require 'tzinfo/ruby_country_info' +require 'tzinfo/zoneinfo_country_info' + +require 'tzinfo/country' +require 'tzinfo/country_timezone' diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb new file mode 100644 index 0000000..fec436b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb @@ -0,0 +1,51 @@ +module TZInfo + # A set of rules that define when transitions occur in time zones with + # annually occurring daylight savings time. + # + # @private + class AnnualRules #:nodoc: + # Returned by #transitions. #offset is the TimezoneOffset that applies + # from the UTC TimeOrDateTime #at. #previous_offset is the prior + # TimezoneOffset. + Transition = Struct.new(:offset, :previous_offset, :at) + + # The standard offset that applies when daylight savings time is not in + # force. + attr_reader :std_offset + + # The offset that applies when daylight savings time is in force. + attr_reader :dst_offset + + # The rule that determines when daylight savings time starts. + attr_reader :dst_start_rule + + # The rule that determines when daylight savings time ends. + attr_reader :dst_end_rule + + # Initializes a new {AnnualRules} instance. + def initialize(std_offset, dst_offset, dst_start_rule, dst_end_rule) + @std_offset = std_offset + @dst_offset = dst_offset + @dst_start_rule = dst_start_rule + @dst_end_rule = dst_end_rule + end + + # Returns the transitions between standard and daylight savings time for a + # given year. The results are ordered by time of occurrence (earliest to + # latest). + def transitions(year) + start_dst = apply_rule(@dst_start_rule, @std_offset, @dst_offset, year) + end_dst = apply_rule(@dst_end_rule, @dst_offset, @std_offset, year) + + end_dst.at < start_dst.at ? [end_dst, start_dst] : [start_dst, end_dst] + end + + private + + # Applies a given rule between offsets on a year. + def apply_rule(rule, from_offset, to_offset, year) + at = rule.at(from_offset, year) + Transition.new(to_offset, from_offset, at) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb new file mode 100644 index 0000000..0dccebd --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb @@ -0,0 +1,196 @@ +require 'thread_safe' + +module TZInfo + # Raised by Country#get if the code given is not valid. + class InvalidCountryCode < StandardError + end + + # The Country class represents an ISO 3166-1 country. It can be used to + # obtain a list of Timezones for a country. For example: + # + # us = Country.get('US') + # us.zone_identifiers + # us.zones + # us.zone_info + # + # The Country class is thread-safe. It is safe to use class and instance + # methods of Country in concurrently executing threads. Instances of Country + # can be shared across thread boundaries. + # + # Country information available through TZInfo is intended as an aid for + # users, to help them select time zone data appropriate for their practical + # needs. It is not intended to take or endorse any position on legal or + # territorial claims. + class Country + include Comparable + + # Defined countries. + # + # @!visibility private + @@countries = nil + + # Whether the countries index has been loaded yet. + # + # @!visibility private + @@index_loaded = false + + # Gets a Country by its ISO 3166-1 alpha-2 code. Raises an + # InvalidCountryCode exception if it couldn't be found. + def self.get(identifier) + instance = @@countries[identifier] + + unless instance + # Thread-safety: It is possible that multiple equivalent Country + # instances could be created here in concurrently executing threads. + # The consequences of this are that the data may be loaded more than + # once (depending on the data source) and memoized calculations could + # be discarded. The performance benefit of ensuring that only a single + # instance is created is unlikely to be worth the overhead of only + # allowing one Country to be loaded at a time. + info = data_source.load_country_info(identifier) + instance = Country.new(info) + @@countries[identifier] = instance + end + + instance + end + + # If identifier is a CountryInfo object, initializes the Country instance, + # otherwise calls get(identifier). + def self.new(identifier) + if identifier.kind_of?(CountryInfo) + instance = super() + instance.send :setup, identifier + instance + else + get(identifier) + end + end + + # Returns an Array of all the valid country codes. + def self.all_codes + data_source.country_codes + end + + # Returns an Array of all the defined Countries. + def self.all + data_source.country_codes.collect {|code| get(code)} + end + + # The ISO 3166-1 alpha-2 country code. + def code + @info.code + end + + # The name of the country. + def name + @info.name + end + + # Alias for name. + def to_s + name + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@info.code}>" + end + + # Returns a frozen array of all the zone identifiers for the country. These + # are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Returned zone identifiers may refer to cities and regions outside of the + # country. This will occur if the zone covers multiple countries. Any zones + # referring to a city or region in a different country will be listed after + # those relating to this country. + def zone_identifiers + @info.zone_identifiers + end + alias zone_names zone_identifiers + + # An array of all the Timezones for this country. Returns TimezoneProxy + # objects to avoid the overhead of loading Timezone definitions until + # a conversion is actually required. The Timezones are returned in an order + # that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Identifiers of the zones returned may refer to cities and regions outside + # of the country. This will occur if the zone covers multiple countries. Any + # zones referring to a city or region in a different country will be listed + # after those relating to this country. + def zones + zone_identifiers.collect {|id| + Timezone.get_proxy(id) + } + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances (containing extra information about each zone). + # These are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Identifiers and descriptions of the zones returned may refer to cities and + # regions outside of the country. This will occur if the zone covers + # multiple countries. Any zones referring to a city or region in a different + # country will be listed after those relating to this country. + def zone_info + @info.zones + end + + # Compare two Countries based on their code. Returns -1 if c is less + # than self, 0 if c is equal to self and +1 if c is greater than self. + # + # Returns nil if c is not comparable with Country instances. + def <=>(c) + return nil unless c.is_a?(Country) + code <=> c.code + end + + # Returns true if and only if the code of c is equal to the code of this + # Country. + def eql?(c) + self == c + end + + # Returns a hash value for this Country. + def hash + code.hash + end + + # Dumps this Country for marshalling. + def _dump(limit) + code + end + + # Loads a marshalled Country. + def self._load(data) + Country.get(data) + end + + private + # Called by Country.new to initialize a new Country instance. The info + # parameter is a CountryInfo that defines the country. + def setup(info) + @info = info + end + + # Initializes @@countries. + def self.init_countries + @@countries = ThreadSafe::Cache.new + end + init_countries + + # Returns the current DataSource + def self.data_source + DataSource.get + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb new file mode 100644 index 0000000..431790b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb @@ -0,0 +1,31 @@ +module TZInfo + # The country index file includes CountryIndexDefinition which provides + # a country method used to define each country in the index. + # + # @private + module CountryIndexDefinition #:nodoc: + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval { @countries = {} } + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Defines a country with an ISO 3166 country code, name and block. The + # block will be evaluated to obtain all the timezones for the country. + # Calls Country.country_defined with the definition of each country. + def country(code, name, &block) + @countries[code] = RubyCountryInfo.new(code, name, &block) + end + + # Returns a frozen hash of all the countries that have been defined in + # the index. + def countries + @countries.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb new file mode 100644 index 0000000..e9d71c7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb @@ -0,0 +1,42 @@ +module TZInfo + # Represents a country and references to its timezones as returned by a + # DataSource. + class CountryInfo + # The ISO 3166 country code. + attr_reader :code + + # The name of the country. + attr_reader :name + + # Constructs a new CountryInfo with an ISO 3166 country code and name + def initialize(code, name) + @code = code + @name = name + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@code>" + end + + # Returns a frozen array of all the zone identifiers for the country. + # The identifiers are ordered by importance according to the DataSource. + def zone_identifiers + raise_not_implemented('zone_identifiers') + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances. + # + # The timezones are ordered by importance according to the DataSource. + def zones + raise_not_implemented('zones') + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb new file mode 100644 index 0000000..d46bec0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb @@ -0,0 +1,135 @@ +module TZInfo + # A Timezone within a Country. This contains extra information about the + # Timezone that is specific to the Country (a Timezone could be used by + # multiple countries). + class CountryTimezone + # The zone identifier. + attr_reader :identifier + + # A description of this timezone in relation to the country, e.g. + # "Eastern Time". This is usually nil for countries having only a single + # Timezone. + attr_reader :description + + class << self + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude are specified as + # rationals - a numerator and denominator. For performance reasons, the + # numerators and denominators must be specified in their lowest form. + # + # For use internally within TZInfo. + # + # @!visibility private + alias :new! :new + + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude must be specified + # as instances of Rational. + # + # CountryTimezone instances should normally only be constructed when + # creating new DataSource implementations. + def new(identifier, latitude, longitude, description = nil) + super(identifier, latitude, nil, longitude, nil, description) + end + end + + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude are specified as + # rationals - a numerator and denominator. For performance reasons, the + # numerators and denominators must be specified in their lowest form. + # + # @!visibility private + def initialize(identifier, latitude_numerator, latitude_denominator, + longitude_numerator, longitude_denominator, description = nil) #:nodoc: + @identifier = identifier + + if latitude_numerator.kind_of?(Rational) + @latitude = latitude_numerator + else + @latitude = nil + @latitude_numerator = latitude_numerator + @latitude_denominator = latitude_denominator + end + + if longitude_numerator.kind_of?(Rational) + @longitude = longitude_numerator + else + @longitude = nil + @longitude_numerator = longitude_numerator + @longitude_denominator = longitude_denominator + end + + @description = description + end + + # The Timezone (actually a TimezoneProxy for performance reasons). + def timezone + Timezone.get_proxy(@identifier) + end + + # if description is not nil, this method returns description; otherwise it + # returns timezone.friendly_identifier(true). + def description_or_friendly_identifier + description || timezone.friendly_identifier(true) + end + + # The latitude of this timezone in degrees as a Rational. + def latitude + # Thread-safety: It is possible that the value of @latitude may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @latitude is only + # calculated once. + unless @latitude + result = RubyCoreSupport.rational_new!(@latitude_numerator, @latitude_denominator) + return result if frozen? + @latitude = result + end + + @latitude + end + + # The longitude of this timezone in degrees as a Rational. + def longitude + # Thread-safety: It is possible that the value of @longitude may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @longitude is only + # calculated once. + unless @longitude + result = RubyCoreSupport.rational_new!(@longitude_numerator, @longitude_denominator) + return result if frozen? + @longitude = result + end + + @longitude + end + + # Returns true if and only if the given CountryTimezone is equal to the + # current CountryTimezone (has the same identifer, latitude, longitude + # and description). + def ==(ct) + ct.kind_of?(CountryTimezone) && + identifier == ct.identifier && latitude == ct.latitude && + longitude == ct.longitude && description == ct.description + end + + # Returns true if and only if the given CountryTimezone is equal to the + # current CountryTimezone (has the same identifer, latitude, longitude + # and description). + def eql?(ct) + self == ct + end + + # Returns a hash of this CountryTimezone. + def hash + @identifier.hash ^ + (@latitude ? @latitude.numerator.hash ^ @latitude.denominator.hash : @latitude_numerator.hash ^ @latitude_denominator.hash) ^ + (@longitude ? @longitude.numerator.hash ^ @longitude.denominator.hash : @longitude_numerator.hash ^ @longitude_denominator.hash) ^ + @description.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier>" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb new file mode 100644 index 0000000..c5deeac --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb @@ -0,0 +1,190 @@ +require 'thread' + +module TZInfo + # InvalidDataSource is raised if the DataSource is used doesn't implement one + # of the required methods. + class InvalidDataSource < StandardError + end + + # DataSourceNotFound is raised if no data source could be found (i.e. + # if 'tzinfo/data' cannot be found on the load path and no valid zoneinfo + # directory can be found on the system). + class DataSourceNotFound < StandardError + end + + # The base class for data sources of timezone and country data. + # + # Use DataSource.set to change the data source being used. + class DataSource + # The currently selected data source. + @@instance = nil + + # Mutex used to ensure the default data source is only created once. + @@default_mutex = Mutex.new + + # Returns the currently selected DataSource instance. + def self.get + # If a DataSource hasn't been manually set when the first request is + # made to obtain a DataSource, then a Default data source is created. + + # This is done at the first request rather than when TZInfo is loaded to + # avoid unnecessary (or in some cases potentially harmful) attempts to + # find a suitable DataSource. + + # A Mutex is used to ensure that only a single default instance is + # created (having two different DataSources in use simultaneously could + # cause unexpected results). + + unless @@instance + @@default_mutex.synchronize do + set(create_default_data_source) unless @@instance + end + end + + @@instance + end + + # Sets the currently selected data source for Timezone and Country data. + # + # This should usually be set to one of the two standard data source types: + # + # * +:ruby+ - read data from the Ruby modules included in the TZInfo::Data + # library (tzinfo-data gem). + # * +:zoneinfo+ - read data from the zoneinfo files included with most + # Unix-like operating sytems (e.g. in /usr/share/zoneinfo). + # + # To set TZInfo to use one of the standard data source types, call + # \TZInfo::DataSource.set in one of the following ways: + # + # TZInfo::DataSource.set(:ruby) + # TZInfo::DataSource.set(:zoneinfo) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file) + # + # \DataSource.set(:zoneinfo) will automatically search for the zoneinfo + # directory by checking the paths specified in + # ZoneinfoDataSource.search_paths. ZoneinfoDirectoryNotFound will be raised + # if no valid zoneinfo directory could be found. + # + # \DataSource.set(:zoneinfo, zoneinfo_dir) uses the specified zoneinfo + # directory as the data source. If the directory is not a valid zoneinfo + # directory, an InvalidZoneinfoDirectory exception will be raised. + # + # \DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file) uses the + # specified zoneinfo directory as the data source, but loads the iso3166.tab + # file from an alternate path. If the directory is not a valid zoneinfo + # directory, an InvalidZoneinfoDirectory exception will be raised. + # + # Custom data sources can be created by subclassing TZInfo::DataSource and + # implementing the following methods: + # + # * \load_timezone_info + # * \timezone_identifiers + # * \data_timezone_identifiers + # * \linked_timezone_identifiers + # * \load_country_info + # * \country_codes + # + # To have TZInfo use the custom data source, call \DataSource.set + # as follows: + # + # TZInfo::DataSource.set(CustomDataSource.new) + # + # To avoid inconsistent data, \DataSource.set should be called before + # accessing any Timezone or Country data. + # + # If \DataSource.set is not called, TZInfo will by default use TZInfo::Data + # as the data source. If TZInfo::Data is not available (i.e. if require + # 'tzinfo/data' fails), then TZInfo will search for a zoneinfo directory + # instead (using the search path specified by + # TZInfo::ZoneinfoDataSource::DEFAULT_SEARCH_PATH). + def self.set(data_source_or_type, *args) + if data_source_or_type.kind_of?(DataSource) + @@instance = data_source_or_type + elsif data_source_or_type == :ruby + @@instance = RubyDataSource.new + elsif data_source_or_type == :zoneinfo + @@instance = ZoneinfoDataSource.new(*args) + else + raise ArgumentError, 'data_source_or_type must be a DataSource instance or a data source type (:ruby)' + end + end + + # Returns a TimezoneInfo instance for a given identifier. The TimezoneInfo + # instance should derive from either DataTimzoneInfo for timezones that + # define their own data or LinkedTimezoneInfo for links or aliases to + # other timezones. + # + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + raise_invalid_data_source('load_timezone_info') + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + raise_invalid_data_source('timezone_identifiers') + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + def data_timezone_identifiers + raise_invalid_data_source('data_timezone_identifiers') + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + def linked_timezone_identifiers + raise_invalid_data_source('linked_timezone_identifiers') + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + raise_invalid_data_source('load_country_info') + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + raise_invalid_data_source('country_codes') + end + + # Returns the name of this DataSource. + def to_s + "Default DataSource" + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}>" + end + + private + + # Creates a DataSource instance for use as the default. Used if + # no preference has been specified manually. + def self.create_default_data_source + has_tzinfo_data = false + + begin + require 'tzinfo/data' + has_tzinfo_data = true + rescue LoadError + end + + return RubyDataSource.new if has_tzinfo_data + + begin + return ZoneinfoDataSource.new + rescue ZoneinfoDirectoryNotFound + raise DataSourceNotFound, "No source of timezone data could be found.\nPlease refer to https://tzinfo.github.io/datasourcenotfound for help resolving this error." + end + end + + def raise_invalid_data_source(method_name) + raise InvalidDataSource, "#{method_name} not defined" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb new file mode 100644 index 0000000..94958c9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb @@ -0,0 +1,58 @@ +module TZInfo + + # A Timezone based on a DataTimezoneInfo. + # + # @private + class DataTimezone < InfoTimezone #:nodoc: + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + # + # If no TimezonePeriod could be found, PeriodNotFound is raised. + def period_for_utc(utc) + info.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Raises PeriodNotFound if no periods are found for the given time. + def periods_for_local(local) + info.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + info.transitions_up_to(utc_to, utc_from) + end + + # Returns the canonical zone for this Timezone. + # + # For a DataTimezone, this is always self. + def canonical_zone + self + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb new file mode 100644 index 0000000..7d0fa5f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb @@ -0,0 +1,55 @@ +module TZInfo + # Represents a defined timezone containing transition data. + class DataTimezoneInfo < TimezoneInfo + + # Returns the TimezonePeriod for the given UTC time. + def period_for_utc(utc) + raise_not_implemented('period_for_utc') + end + + # Returns the set of TimezonePeriods for the given local time as an array. + # Results returned are ordered by increasing UTC start date. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + raise_not_implemented('periods_for_local') + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + raise_not_implemented('transitions_up_to') + end + + # Constructs a Timezone instance for the timezone represented by this + # DataTimezoneInfo. + def create_timezone + DataTimezone.new(self) + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb new file mode 100644 index 0000000..4eb014b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb @@ -0,0 +1,30 @@ +module TZInfo + + # A Timezone based on a TimezoneInfo. + # + # @private + class InfoTimezone < Timezone #:nodoc: + + # Constructs a new InfoTimezone with a TimezoneInfo instance. + def self.new(info) + tz = super() + tz.send(:setup, info) + tz + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + @info.identifier + end + + protected + # The TimezoneInfo for this Timezone. + def info + @info + end + + def setup(info) + @info = info + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb new file mode 100644 index 0000000..c4f4a0d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb @@ -0,0 +1,63 @@ +module TZInfo + + # A Timezone based on a LinkedTimezoneInfo. + # + # @private + class LinkedTimezone < InfoTimezone #:nodoc: + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + # + # If no TimezonePeriod could be found, PeriodNotFound is raised. + def period_for_utc(utc) + @linked_timezone.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Raises PeriodNotFound if no periods are found for the given time. + def periods_for_local(local) + @linked_timezone.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + @linked_timezone.transitions_up_to(utc_to, utc_from) + end + + # Returns the canonical zone for this Timezone. + # + # For a LinkedTimezone, this is the canonical zone of the link target. + def canonical_zone + @linked_timezone.canonical_zone + end + + protected + def setup(info) + super(info) + @linked_timezone = Timezone.get(info.link_to_identifier) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb new file mode 100644 index 0000000..f7961d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb @@ -0,0 +1,26 @@ +module TZInfo + # Represents a timezone that is defined as a link or alias to another zone. + class LinkedTimezoneInfo < TimezoneInfo + + # The zone that provides the data (that this zone is an alias for). + attr_reader :link_to_identifier + + # Constructs a new LinkedTimezoneInfo with an identifier and the identifier + # of the zone linked to. + def initialize(identifier, link_to_identifier) + super(identifier) + @link_to_identifier = link_to_identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier,#@link_to_identifier>" + end + + # Constructs a Timezone instance for the timezone represented by this + # DataTimezoneInfo. + def create_timezone + LinkedTimezone.new(self) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb new file mode 100644 index 0000000..2a50a08 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb @@ -0,0 +1,77 @@ +require 'rational' unless defined?(Rational) + +module TZInfo + + # Provides a method for getting Rationals for a timezone offset in seconds. + # Pre-reduced rationals are returned for all the half-hour intervals between + # -14 and +14 hours to avoid having to call gcd at runtime. + # + # @private + module OffsetRationals #:nodoc: + @@rational_cache = { + -50400 => RubyCoreSupport.rational_new!(-7,12), + -48600 => RubyCoreSupport.rational_new!(-9,16), + -46800 => RubyCoreSupport.rational_new!(-13,24), + -45000 => RubyCoreSupport.rational_new!(-25,48), + -43200 => RubyCoreSupport.rational_new!(-1,2), + -41400 => RubyCoreSupport.rational_new!(-23,48), + -39600 => RubyCoreSupport.rational_new!(-11,24), + -37800 => RubyCoreSupport.rational_new!(-7,16), + -36000 => RubyCoreSupport.rational_new!(-5,12), + -34200 => RubyCoreSupport.rational_new!(-19,48), + -32400 => RubyCoreSupport.rational_new!(-3,8), + -30600 => RubyCoreSupport.rational_new!(-17,48), + -28800 => RubyCoreSupport.rational_new!(-1,3), + -27000 => RubyCoreSupport.rational_new!(-5,16), + -25200 => RubyCoreSupport.rational_new!(-7,24), + -23400 => RubyCoreSupport.rational_new!(-13,48), + -21600 => RubyCoreSupport.rational_new!(-1,4), + -19800 => RubyCoreSupport.rational_new!(-11,48), + -18000 => RubyCoreSupport.rational_new!(-5,24), + -16200 => RubyCoreSupport.rational_new!(-3,16), + -14400 => RubyCoreSupport.rational_new!(-1,6), + -12600 => RubyCoreSupport.rational_new!(-7,48), + -10800 => RubyCoreSupport.rational_new!(-1,8), + -9000 => RubyCoreSupport.rational_new!(-5,48), + -7200 => RubyCoreSupport.rational_new!(-1,12), + -5400 => RubyCoreSupport.rational_new!(-1,16), + -3600 => RubyCoreSupport.rational_new!(-1,24), + -1800 => RubyCoreSupport.rational_new!(-1,48), + 0 => RubyCoreSupport.rational_new!(0,1), + 1800 => RubyCoreSupport.rational_new!(1,48), + 3600 => RubyCoreSupport.rational_new!(1,24), + 5400 => RubyCoreSupport.rational_new!(1,16), + 7200 => RubyCoreSupport.rational_new!(1,12), + 9000 => RubyCoreSupport.rational_new!(5,48), + 10800 => RubyCoreSupport.rational_new!(1,8), + 12600 => RubyCoreSupport.rational_new!(7,48), + 14400 => RubyCoreSupport.rational_new!(1,6), + 16200 => RubyCoreSupport.rational_new!(3,16), + 18000 => RubyCoreSupport.rational_new!(5,24), + 19800 => RubyCoreSupport.rational_new!(11,48), + 21600 => RubyCoreSupport.rational_new!(1,4), + 23400 => RubyCoreSupport.rational_new!(13,48), + 25200 => RubyCoreSupport.rational_new!(7,24), + 27000 => RubyCoreSupport.rational_new!(5,16), + 28800 => RubyCoreSupport.rational_new!(1,3), + 30600 => RubyCoreSupport.rational_new!(17,48), + 32400 => RubyCoreSupport.rational_new!(3,8), + 34200 => RubyCoreSupport.rational_new!(19,48), + 36000 => RubyCoreSupport.rational_new!(5,12), + 37800 => RubyCoreSupport.rational_new!(7,16), + 39600 => RubyCoreSupport.rational_new!(11,24), + 41400 => RubyCoreSupport.rational_new!(23,48), + 43200 => RubyCoreSupport.rational_new!(1,2), + 45000 => RubyCoreSupport.rational_new!(25,48), + 46800 => RubyCoreSupport.rational_new!(13,24), + 48600 => RubyCoreSupport.rational_new!(9,16), + 50400 => RubyCoreSupport.rational_new!(7,12)}.freeze + + # Returns a Rational expressing the fraction of a day that offset in + # seconds represents (i.e. equivalent to Rational(offset, 86400)). + def rational_for_offset(offset) + @@rational_cache[offset] || Rational(offset, 86400) + end + module_function :rational_for_offset + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb new file mode 100644 index 0000000..af2d7e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb @@ -0,0 +1,136 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'strscan' + +module TZInfo + # An {InvalidPosixTimeZone} exception is raised if an invalid POSIX-style + # time zone string is encountered. + # + # @private + class InvalidPosixTimeZone < StandardError #:nodoc: + end + + # A parser for POSIX-style TZ strings used in zoneinfo files and specified + # by tzfile.5 and tzset.3. + # + # @private + class PosixTimeZoneParser #:nodoc: + # Parses a POSIX-style TZ string, returning either a TimezoneOffset or + # an AnnualRules instance. + def parse(tz_string) + raise InvalidPosixTimeZone unless tz_string.kind_of?(String) + return nil if tz_string.empty? + + s = StringScanner.new(tz_string) + check_scan(s, /([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + std_abbrev = s[1] || s[2] + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + std_offset = get_offset_from_hms(s[1], s[2], s[3]) + + if s.scan(/([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + dst_abbrev = s[1] || s[2] + + if s.scan(/([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + dst_offset = get_offset_from_hms(s[1], s[2], s[3]) + else + # POSIX is negative for ahead of UTC. + dst_offset = std_offset - 3600 + end + + dst_difference = std_offset - dst_offset + + start_rule = parse_rule(s, 'start') + end_rule = parse_rule(s, 'end') + + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." if s.rest? + + if start_rule.is_always_first_day_of_year? && start_rule.transition_at == 0 && + end_rule.is_always_last_day_of_year? && end_rule.transition_at == 86400 + dst_difference + # Constant daylight savings time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym) + else + AnnualRules.new( + TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym), + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym), + start_rule, + end_rule) + end + elsif !s.rest? + # Constant standard time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym) + else + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." + end + end + + private + + # Parses the rule from the TZ string, returning a TransitionRule. + def parse_rule(s, type) + check_scan(s, /,(?: (?: J(\d+) ) | (\d+) | (?: M(\d+)\.(\d)\.(\d) ) )/x) + julian_day_of_year = s[1] + absolute_day_of_year = s[2] + month = s[3] + week = s[4] + day_of_week = s[5] + + if s.scan(/\//) + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + transition_at = get_seconds_after_midnight_from_hms(s[1], s[2], s[3]) + else + transition_at = 7200 + end + + begin + if julian_day_of_year + JulianDayOfYearTransitionRule.new(julian_day_of_year.to_i, transition_at) + elsif absolute_day_of_year + AbsoluteDayOfYearTransitionRule.new(absolute_day_of_year.to_i, transition_at) + elsif week == '5' + LastDayOfMonthTransitionRule.new(month.to_i, day_of_week.to_i, transition_at) + else + DayOfMonthTransitionRule.new(month.to_i, week.to_i, day_of_week.to_i, transition_at) + end + rescue ArgumentError => e + raise InvalidPosixTimeZone, "Invalid #{type} rule in POSIX-style time zone string: #{e}" + end + end + + # Returns an offset in seconds from hh:mm:ss values. The value can be + # negative. -02:33:12 would represent 2 hours, 33 minutes and 12 seconds + # ahead of UTC. + def get_offset_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in offset for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in offset for POSIX-style time zone string." if s > 59 + magnitude = (h.abs * 60 + m) * 60 + s + h < 0 ? -magnitude : magnitude + end + + # Returns the seconds from midnight from hh:mm:ss values. Hours can exceed + # 24 for a time on the following day. Hours can be negative to subtract + # hours from midnight on the given day. -02:33:12 represents 22:33:12 on + # the prior day. + def get_seconds_after_midnight_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in time for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in time for POSIX-style time zone string." if s > 59 + (h * 3600) + m * 60 + s + end + + # Scans for a pattern and raises an exception if the pattern does not + # match the input. + def check_scan(s, pattern) + result = s.scan(pattern) + raise InvalidPosixTimeZone, "Expected '#{s.rest}' to match #{pattern} in POSIX-style time zone string." unless result + result + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb new file mode 100644 index 0000000..7ba776b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb @@ -0,0 +1,169 @@ +require 'date' +require 'rational' unless defined?(Rational) + +module TZInfo + + # Methods to support different versions of Ruby. + # + # @private + module RubyCoreSupport #:nodoc: + + # Use Rational.new! for performance reasons in Ruby 1.8. + # This has been removed from 1.9, but Rational performs better. + if Rational.respond_to? :new! + def self.rational_new!(numerator, denominator = 1) + Rational.new!(numerator, denominator) + end + else + def self.rational_new!(numerator, denominator = 1) + Rational(numerator, denominator) + end + end + + # Ruby 1.8.6 introduced new! and deprecated new0. + # Ruby 1.9.0 removed new0. + # Ruby trunk revision 31668 removed the new! method. + # Still support new0 for better performance on older versions of Ruby (new0 indicates + # that the rational has already been reduced to its lowest terms). + # Fallback to jd with conversion from ajd if new! and new0 are unavailable. + if DateTime.respond_to? :new! + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + DateTime.new!(ajd, of, sg) + end + elsif DateTime.respond_to? :new0 + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + DateTime.new0(ajd, of, sg) + end + else + HALF_DAYS_IN_DAY = rational_new!(1, 2) + + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + # Convert from an Astronomical Julian Day number to a civil Julian Day number. + jd = ajd + of + HALF_DAYS_IN_DAY + + # Ruby trunk revision 31862 changed the behaviour of DateTime.jd so that it will no + # longer accept a fractional civil Julian Day number if further arguments are specified. + # Calculate the hours, minutes and seconds to pass to jd. + + jd_i = jd.to_i + jd_i -= 1 if jd < 0 + hours = (jd - jd_i) * 24 + hours_i = hours.to_i + minutes = (hours - hours_i) * 60 + minutes_i = minutes.to_i + seconds = (minutes - minutes_i) * 60 + + DateTime.jd(jd_i, hours_i, minutes_i, seconds, of, sg) + end + end + + # DateTime in Ruby 1.8.6 doesn't consider times within the 60th second to be + # valid. When attempting to specify such a DateTime, subtract the fractional + # part and then add it back later + if Date.respond_to?(:valid_time?) && !Date.valid_time?(0, 0, rational_new!(59001, 1000)) # 0:0:59.001 + def self.datetime_new(y=-4712, m=1, d=1, h=0, min=0, s=0, of=0, sg=Date::ITALY) + if !s.kind_of?(Integer) && s > 59 + dt = DateTime.new(y, m, d, h, min, 59, of, sg) + dt + (s - 59) / 86400 + else + DateTime.new(y, m, d, h, min, s, of, sg) + end + end + else + def self.datetime_new(y=-4712, m=1, d=1, h=0, min=0, s=0, of=0, sg=Date::ITALY) + DateTime.new(y, m, d, h, min, s, of, sg) + end + end + + # Returns true if Time on the runtime platform supports Times defined + # by negative 32-bit timestamps, otherwise false. + begin + Time.at(-1) + Time.at(-2147483648) + + def self.time_supports_negative + true + end + rescue ArgumentError + def self.time_supports_negative + false + end + end + + # Returns true if Time on the runtime platform supports Times defined by + # 64-bit timestamps, otherwise false. + begin + Time.at(-2147483649) + Time.at(2147483648) + + def self.time_supports_64bit + true + end + rescue RangeError + def self.time_supports_64bit + false + end + end + + # Return the result of Time#nsec if it exists, otherwise return the + # result of Time#usec * 1000. + if Time.method_defined?(:nsec) + def self.time_nsec(time) + time.nsec + end + else + def self.time_nsec(time) + time.usec * 1000 + end + end + + # Call String#force_encoding if this version of Ruby has encoding support + # otherwise treat as a no-op. + if String.method_defined?(:force_encoding) + def self.force_encoding(str, encoding) + str.force_encoding(encoding) + end + else + def self.force_encoding(str, encoding) + str + end + end + + + # Wrapper for File.open that supports passing hash options for specifying + # encodings on Ruby 1.9+. The options are ignored on earlier versions of + # Ruby. + if RUBY_VERSION =~ /\A1\.[0-8]\./ + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, &block) + end + elsif RUBY_VERSION =~ /\A1\.9\./ + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, opts, &block) + end + else + # Evaluate method as a string because **opts isn't valid syntax prior to + # Ruby 2.0. + eval(<<-EOF + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, **opts, &block) + end + EOF + ) + end + + + # Object#untaint is a deprecated no-op in Ruby >= 2.7 and will be removed in + # 3.2. Add a refinement to either silence the warning, or supply the method + # if needed. + if !Object.new.respond_to?(:untaint) || RUBY_VERSION =~ /\A(\d+)\.(\d+)(?:\.|\z)/ && ($1 == '2' && $2.to_i >= 7 || $1.to_i >= 3) + module UntaintExt + refine Object do + def untaint + self + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb new file mode 100644 index 0000000..e51915d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb @@ -0,0 +1,74 @@ +module TZInfo + # Represents information about a country returned by RubyDataSource. + # + # @private + class RubyCountryInfo < CountryInfo #:nodoc: + # Constructs a new CountryInfo with an ISO 3166 country code, name and + # block. The block will be evaluated to obtain the timezones for the + # country when the zones are first needed. + def initialize(code, name, &block) + super(code, name) + @block = block + @zones = nil + @zone_identifiers = nil + end + + # Returns a frozen array of all the zone identifiers for the country. These + # are in the order they were added using the timezone method. + def zone_identifiers + # Thread-safety: It is possible that the value of @zone_identifiers may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zone_identifiers is only + # calculated once. + + unless @zone_identifiers + result = zones.collect {|zone| zone.identifier}.freeze + return result if frozen? + @zone_identifiers = result + end + + @zone_identifiers + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances. These are in the order they were added using + # the timezone method. + def zones + # Thread-safety: It is possible that the value of @zones may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zones is only + # calculated once. + + unless @zones + zones = Zones.new + @block.call(zones) if @block + result = zones.list.freeze + return result if frozen? + @block = nil + @zones = result + end + + @zones + end + + # An instance of the Zones class is passed to the block used to define + # timezones. + # + # @private + class Zones #:nodoc: + attr_reader :list + + def initialize + @list = [] + end + + # Called by the index data to define a timezone for the country. + def timezone(identifier, latitude_numerator, latitude_denominator, + longitude_numerator, longitude_denominator, description = nil) + @list << CountryTimezone.new!(identifier, latitude_numerator, + latitude_denominator, longitude_numerator, longitude_denominator, + description) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb new file mode 100644 index 0000000..b5a6752 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb @@ -0,0 +1,140 @@ +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + + # A DataSource that loads data from the set of Ruby modules included in the + # TZInfo::Data library (tzinfo-data gem). + # + # To have TZInfo use this DataSource, call TZInfo::DataSource.set as follows: + # + # TZInfo::DataSource.set(:ruby) + class RubyDataSource < DataSource + # Whether the timezone index has been loaded yet. + @@timezone_index_loaded = false + + # Whether the country index has been loaded yet. + @@country_index_loaded = false + + # Initializes a new RubyDataSource instance. + def initialize + tzinfo_data = File.join('tzinfo', 'data') + begin + require(tzinfo_data) + + data_file = File.join('', 'tzinfo', 'data.rb') + path = $".reverse_each.detect {|p| p.end_with?(data_file) } + if path + @base_path = File.join(File.dirname(path), 'data').untaint + else + @base_path = tzinfo_data + end + rescue LoadError + @base_path = tzinfo_data + end + end + + # Returns a TimezoneInfo instance for a given identifier. + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + raise InvalidTimezoneIdentifier, 'Invalid identifier' if identifier !~ /^[A-Za-z0-9\+\-_]+(\/[A-Za-z0-9\+\-_]+)*$/ + + identifier = identifier.gsub(/-/, '__m__').gsub(/\+/, '__p__') + + # Untaint identifier after it has been reassigned to a new string. We + # don't want to modify the original identifier. identifier may also be + # frozen and therefore cannot be untainted. + identifier.untaint + + identifier = identifier.split('/') + begin + require_definition(identifier) + + m = Data::Definitions + identifier.each {|part| + m = m.const_get(part) + } + + m.get + rescue LoadError, NameError => e + raise InvalidTimezoneIdentifier, e.message + end + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.timezones + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + def data_timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.data_timezones + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + def linked_timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.linked_timezones + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + load_country_index + info = Data::Indexes::Countries.countries[code] + raise InvalidCountryCode, 'Invalid country code' unless info + info + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + load_country_index + Data::Indexes::Countries.countries.keys.freeze + end + + # Returns the name of this DataSource. + def to_s + "Ruby DataSource" + end + + private + + # Requires a zone definition by its identifier (split on /). + def require_definition(identifier) + require_data(*(['definitions'] + identifier)) + end + + # Requires an index by its name. + def require_index(name) + require_data(*['indexes', name]) + end + + # Requires a file from tzinfo/data. + def require_data(*file) + require(File.join(@base_path, *file)) + end + + # Loads in the index of timezones if it hasn't already been loaded. + def load_timezone_index + unless @@timezone_index_loaded + require_index('timezones') + @@timezone_index_loaded = true + end + end + + # Loads in the index of countries if it hasn't already been loaded. + def load_country_index + unless @@country_index_loaded + require_index('countries') + @@country_index_loaded = true + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb new file mode 100644 index 0000000..56c3314 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb @@ -0,0 +1,351 @@ +require 'date' +require 'rational' unless defined?(Rational) +require 'time' + +module TZInfo + # Used by TZInfo internally to represent either a Time, DateTime or + # an Integer timestamp (seconds since 1970-01-01 00:00:00). + class TimeOrDateTime + include Comparable + + # Constructs a new TimeOrDateTime. timeOrDateTime can be a Time, DateTime + # or Integer. If using a Time or DateTime, any time zone information + # is ignored. + # + # Integer timestamps must be within the range supported by Time on the + # platform being used. + def initialize(timeOrDateTime) + @time = nil + @datetime = nil + @timestamp = nil + + if timeOrDateTime.is_a?(Time) + @time = timeOrDateTime + + # Avoid using the slower Rational class unless necessary. + nsec = RubyCoreSupport.time_nsec(@time) + usec = nsec % 1000 == 0 ? nsec / 1000 : Rational(nsec, 1000) + + @time = Time.utc(@time.year, @time.mon, @time.mday, @time.hour, @time.min, @time.sec, usec) unless @time.utc? + @orig = @time + elsif timeOrDateTime.is_a?(DateTime) + @datetime = timeOrDateTime + @datetime = @datetime.new_offset(0) unless @datetime.offset == 0 + @orig = @datetime + else + @timestamp = timeOrDateTime.to_i + + if !RubyCoreSupport.time_supports_64bit && (@timestamp > 2147483647 || @timestamp < -2147483648 || (@timestamp < 0 && !RubyCoreSupport.time_supports_negative)) + raise RangeError, 'Timestamp is outside the supported range of Time on this platform' + end + + @orig = @timestamp + end + end + + # Returns the time as a Time. + # + # When converting from a DateTime, the result is truncated to microsecond + # precision. + def to_time + # Thread-safety: It is possible that the value of @time may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @time is only + # calculated once. + + unless @time + result = if @timestamp + Time.at(@timestamp).utc + else + Time.utc(year, mon, mday, hour, min, sec, usec) + end + + return result if frozen? + @time = result + end + + @time + end + + # Returns the time as a DateTime. + # + # When converting from a Time, the result is truncated to microsecond + # precision. + def to_datetime + # Thread-safety: It is possible that the value of @datetime may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @datetime is only + # calculated once. + + unless @datetime + # Avoid using Rational unless necessary. + u = usec + s = u == 0 ? sec : Rational(sec * 1000000 + u, 1000000) + result = RubyCoreSupport.datetime_new(year, mon, mday, hour, min, s) + return result if frozen? + @datetime = result + end + + @datetime + end + + # Returns the time as an integer timestamp. + def to_i + # Thread-safety: It is possible that the value of @timestamp may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @timestamp is only + # calculated once. + + unless @timestamp + result = to_time.to_i + return result if frozen? + @timestamp = result + end + + @timestamp + end + + # Returns the time as the original time passed to new. + def to_orig + @orig + end + + # Returns a string representation of the TimeOrDateTime. + def to_s + if @orig.is_a?(Time) + "Time: #{@orig.to_s}" + elsif @orig.is_a?(DateTime) + "DateTime: #{@orig.to_s}" + else + "Timestamp: #{@orig.to_s}" + end + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@orig.inspect}>" + end + + # Returns the year. + def year + if @time + @time.year + elsif @datetime + @datetime.year + else + to_time.year + end + end + + # Returns the month of the year (1..12). + def mon + if @time + @time.mon + elsif @datetime + @datetime.mon + else + to_time.mon + end + end + alias :month :mon + + # Returns the day of the month (1..n). + def mday + if @time + @time.mday + elsif @datetime + @datetime.mday + else + to_time.mday + end + end + alias :day :mday + + # Returns the day of the week (0..6 for Sunday to Saturday). + def wday + if @time + @time.wday + elsif @datetime + @datetime.wday + else + to_time.wday + end + end + + # Returns the hour of the day (0..23). + def hour + if @time + @time.hour + elsif @datetime + @datetime.hour + else + to_time.hour + end + end + + # Returns the minute of the hour (0..59). + def min + if @time + @time.min + elsif @datetime + @datetime.min + else + to_time.min + end + end + + # Returns the second of the minute (0..60). (60 for a leap second). + def sec + if @time + @time.sec + elsif @datetime + @datetime.sec + else + to_time.sec + end + end + + # Returns the number of microseconds for the time. + def usec + if @time + @time.usec + elsif @datetime + # Ruby 1.8 has sec_fraction (of which the documentation says + # 'I do NOT recommend you to use this method'). sec_fraction no longer + # exists in Ruby 1.9. + + # Calculate the sec_fraction from the day_fraction. + ((@datetime.day_fraction - OffsetRationals.rational_for_offset(@datetime.hour * 3600 + @datetime.min * 60 + @datetime.sec)) * 86400000000).to_i + else + 0 + end + end + + # Compares this TimeOrDateTime with another Time, DateTime, timestamp + # (Integer) or TimeOrDateTime. Returns -1, 0 or +1 depending + # whether the receiver is less than, equal to, or greater than + # timeOrDateTime. + # + # Returns nil if the passed in timeOrDateTime is not comparable with + # TimeOrDateTime instances. + # + # Comparisons involving a DateTime will be performed using DateTime#<=>. + # Comparisons that don't involve a DateTime, but include a Time will be + # performed with Time#<=>. Otherwise comparisons will be performed with + # Integer#<=>. + def <=>(timeOrDateTime) + return nil unless timeOrDateTime.is_a?(TimeOrDateTime) || + timeOrDateTime.is_a?(Time) || + timeOrDateTime.is_a?(DateTime) || + timeOrDateTime.respond_to?(:to_i) + + unless timeOrDateTime.is_a?(TimeOrDateTime) + timeOrDateTime = TimeOrDateTime.wrap(timeOrDateTime) + end + + orig = timeOrDateTime.to_orig + + if @orig.is_a?(DateTime) || orig.is_a?(DateTime) + # If either is a DateTime, assume it is there for a reason + # (i.e. for its larger range of acceptable values on 32-bit systems). + to_datetime <=> timeOrDateTime.to_datetime + elsif @orig.is_a?(Time) || orig.is_a?(Time) + to_time <=> timeOrDateTime.to_time + else + to_i <=> timeOrDateTime.to_i + end + end + + # Adds a number of seconds to the TimeOrDateTime. Returns a new + # TimeOrDateTime, preserving what the original constructed type was. + # If the original type is a Time and the resulting calculation goes out of + # range for Times, then an exception will be raised by the Time class. + def +(seconds) + if seconds == 0 + self + else + if @orig.is_a?(DateTime) + TimeOrDateTime.new(@orig + OffsetRationals.rational_for_offset(seconds)) + else + # + defined for Time and Integer + TimeOrDateTime.new(@orig + seconds) + end + end + end + + # Subtracts a number of seconds from the TimeOrDateTime. Returns a new + # TimeOrDateTime, preserving what the original constructed type was. + # If the original type is a Time and the resulting calculation goes out of + # range for Times, then an exception will be raised by the Time class. + def -(seconds) + self + (-seconds) + end + + # Similar to the + operator, but converts to a DateTime based TimeOrDateTime + # where the Time or Integer timestamp to go out of the allowed range for a + # Time, converts to a DateTime based TimeOrDateTime. + # + # Note that the range of Time varies based on the platform. + def add_with_convert(seconds) + if seconds == 0 + self + else + if @orig.is_a?(DateTime) + TimeOrDateTime.new(@orig + OffsetRationals.rational_for_offset(seconds)) + else + # A Time or timestamp. + result = to_i + seconds + + if ((result > 2147483647 || result < -2147483648) && !RubyCoreSupport.time_supports_64bit) || (result < 0 && !RubyCoreSupport.time_supports_negative) + result = TimeOrDateTime.new(to_datetime + OffsetRationals.rational_for_offset(seconds)) + else + result = TimeOrDateTime.new(@orig + seconds) + end + end + end + end + + # Returns true if todt represents the same time and was originally + # constructed with the same type (DateTime, Time or timestamp) as this + # TimeOrDateTime. + def eql?(todt) + todt.kind_of?(TimeOrDateTime) && to_orig.eql?(todt.to_orig) + end + + # Returns a hash of this TimeOrDateTime. + def hash + @orig.hash + end + + # If no block is given, returns a TimeOrDateTime wrapping the given + # timeOrDateTime. If a block is specified, a TimeOrDateTime is constructed + # and passed to the block. The result of the block must be a TimeOrDateTime. + # + # The result of the block will be converted to the type of the originally + # passed in timeOrDateTime and then returned as the result of wrap. + # + # timeOrDateTime can be a Time, DateTime, timestamp (Integer) or + # TimeOrDateTime. If a TimeOrDateTime is passed in, no new TimeOrDateTime + # will be constructed and the value passed to wrap will be used when + # calling the block. + def self.wrap(timeOrDateTime) + t = timeOrDateTime.is_a?(TimeOrDateTime) ? timeOrDateTime : TimeOrDateTime.new(timeOrDateTime) + + if block_given? + t = yield t + + if timeOrDateTime.is_a?(TimeOrDateTime) + t + elsif timeOrDateTime.is_a?(Time) + t.to_time + elsif timeOrDateTime.is_a?(DateTime) + t.to_datetime + else + t.to_i + end + else + t + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb new file mode 100644 index 0000000..ee838b9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb @@ -0,0 +1,673 @@ +require 'date' +require 'set' +require 'thread_safe' + +module TZInfo + # AmbiguousTime is raised to indicates that a specified time in a local + # timezone has more than one possible equivalent UTC time. This happens when + # transitioning from daylight savings time to standard time where the clocks + # are rolled back. + # + # AmbiguousTime is raised by period_for_local and local_to_utc when using an + # ambiguous time and not specifying any means to resolve the ambiguity. + class AmbiguousTime < StandardError + end + + # PeriodNotFound is raised to indicate that no TimezonePeriod matching a given + # time could be found. + class PeriodNotFound < StandardError + end + + # Raised by Timezone#get if the identifier given is not valid. + class InvalidTimezoneIdentifier < StandardError + end + + # Raised if an attempt is made to use a timezone created with + # Timezone.new(nil). + class UnknownTimezone < StandardError + end + + # Timezone is the base class of all timezones. It provides a factory method, + # 'get', to access timezones by identifier. Once a specific Timezone has been + # retrieved, DateTimes, Times and timestamps can be converted between the UTC + # and the local time for the zone. For example: + # + # tz = TZInfo::Timezone.get('America/New_York') + # puts tz.utc_to_local(DateTime.new(2005,8,29,15,35,0)).to_s + # puts tz.local_to_utc(Time.utc(2005,8,29,11,35,0)).to_s + # puts tz.utc_to_local(1125315300).to_s + # + # Each time conversion method returns an object of the same type it was + # passed. + # + # The Timezone class is thread-safe. It is safe to use class and instance + # methods of Timezone in concurrently executing threads. Instances of Timezone + # can be shared across thread boundaries. + class Timezone + include Comparable + + # Cache of loaded zones by identifier to avoid using require if a zone + # has already been loaded. + # + # @!visibility private + @@loaded_zones = nil + + # Default value of the dst parameter of the local_to_utc and + # period_for_local methods. + # + # @!visibility private + @@default_dst = nil + + # Sets the default value of the optional dst parameter of the + # local_to_utc and period_for_local methods. Can be set to nil, true or + # false. + # + # The value of default_dst defaults to nil if unset. + def self.default_dst=(value) + @@default_dst = value.nil? ? nil : !!value + end + + # Gets the default value of the optional dst parameter of the + # local_to_utc and period_for_local methods. Can be set to nil, true or + # false. + def self.default_dst + @@default_dst + end + + # Returns a timezone by its identifier (e.g. "Europe/London", + # "America/Chicago" or "UTC"). + # + # Raises InvalidTimezoneIdentifier if the timezone couldn't be found. + def self.get(identifier) + instance = @@loaded_zones[identifier] + + unless instance + # Thread-safety: It is possible that multiple equivalent Timezone + # instances could be created here in concurrently executing threads. + # The consequences of this are that the data may be loaded more than + # once (depending on the data source) and memoized calculations could + # be discarded. The performance benefit of ensuring that only a single + # instance is created is unlikely to be worth the overhead of only + # allowing one Timezone to be loaded at a time. + info = data_source.load_timezone_info(identifier) + instance = info.create_timezone + @@loaded_zones[instance.identifier] = instance + end + + instance + end + + # Returns a proxy for the Timezone with the given identifier. The proxy + # will cause the real timezone to be loaded when an attempt is made to + # find a period or convert a time. get_proxy will not validate the + # identifier. If an invalid identifier is specified, no exception will be + # raised until the proxy is used. + def self.get_proxy(identifier) + TimezoneProxy.new(identifier) + end + + # If identifier is nil calls super(), otherwise calls get. An identfier + # should always be passed in when called externally. + def self.new(identifier = nil) + if identifier + get(identifier) + else + super() + end + end + + # Returns an array containing all the available Timezones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all + get_proxies(all_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones. + def self.all_identifiers + data_source.timezone_identifiers + end + + # Returns an array containing all the available Timezones that are based + # on data (are not links to other Timezones). + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_data_zones + get_proxies(all_data_zone_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones that are based on data (are not links to other Timezones).. + def self.all_data_zone_identifiers + data_source.data_timezone_identifiers + end + + # Returns an array containing all the available Timezones that are links + # to other Timezones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_linked_zones + get_proxies(all_linked_zone_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones that are links to other Timezones. + def self.all_linked_zone_identifiers + data_source.linked_timezone_identifiers + end + + # Returns all the Timezones defined for all Countries. This is not the + # complete set of Timezones as some are not country specific (e.g. + # 'Etc/GMT'). + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_country_zones + Country.all_codes.inject([]) do |zones,country| + zones += Country.get(country).zones + end.uniq + end + + # Returns all the zone identifiers defined for all Countries. This is not the + # complete set of zone identifiers as some are not country specific (e.g. + # 'Etc/GMT'). You can obtain a Timezone instance for a given identifier + # with the get method. + def self.all_country_zone_identifiers + Country.all_codes.inject([]) do |zones,country| + zones += Country.get(country).zone_identifiers + end.uniq + end + + # Returns all US Timezone instances. A shortcut for + # TZInfo::Country.get('US').zones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.us_zones + Country.get('US').zones + end + + # Returns all US zone identifiers. A shortcut for + # TZInfo::Country.get('US').zone_identifiers. + def self.us_zone_identifiers + Country.get('US').zone_identifiers + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + raise_unknown_timezone + end + + # An alias for identifier. + def name + # Don't use alias, as identifier gets overridden. + identifier + end + + # Returns a friendlier version of the identifier. + def to_s + friendly_identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{identifier}>" + end + + # Returns a friendlier version of the identifier. Set skip_first_part to + # omit the first part of the identifier (typically a region name) where + # there is more than one part. + # + # For example: + # + # Timezone.get('Europe/Paris').friendly_identifier(false) #=> "Europe - Paris" + # Timezone.get('Europe/Paris').friendly_identifier(true) #=> "Paris" + # Timezone.get('America/Indiana/Knox').friendly_identifier(false) #=> "America - Knox, Indiana" + # Timezone.get('America/Indiana/Knox').friendly_identifier(true) #=> "Knox, Indiana" + def friendly_identifier(skip_first_part = false) + parts = identifier.split('/') + if parts.empty? + # shouldn't happen + identifier + elsif parts.length == 1 + parts[0] + else + prefix = skip_first_part ? nil : "#{parts[0]} - " + + parts = parts.drop(1).map do |part| + part.gsub!(/_/, ' ') + + if part.index(/[a-z]/) + # Missing a space if a lower case followed by an upper case and the + # name isn't McXxxx. + part.gsub!(/([^M][a-z])([A-Z])/, '\1 \2') + part.gsub!(/([M][a-bd-z])([A-Z])/, '\1 \2') + + # Missing an apostrophe if two consecutive upper case characters. + part.gsub!(/([A-Z])([A-Z])/, '\1\'\2') + end + + part + end + + "#{prefix}#{parts.reverse.join(', ')}" + end + end + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + def period_for_utc(utc) + raise_unknown_timezone + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how ambiguities should be resolved. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + raise_unknown_timezone + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + raise_unknown_timezone + end + + # Returns the canonical Timezone instance for this Timezone. + # + # The IANA Time Zone database contains two types of definition: Zones and + # Links. Zones are defined by rules that set out when transitions occur. + # Links are just references to fully defined Zone, creating an alias for + # that Zone. + # + # Links are commonly used where a time zone has been renamed in a + # release of the Time Zone database. For example, the Zone US/Eastern was + # renamed as America/New_York. A US/Eastern Link was added in its place, + # linking to (and creating an alias for) for America/New_York. + # + # Links are also used for time zones that are currently identical to a full + # Zone, but that are administered seperately. For example, Europe/Vatican is + # a Link to (and alias for) Europe/Rome. + # + # For a full Zone, canonical_zone returns self. + # + # For a Link, canonical_zone returns a Timezone instance representing the + # full Zone that the link targets. + # + # TZInfo can be used with different data sources (see the documentation for + # TZInfo::DataSource). Please note that some DataSource implementations may + # not support distinguishing between full Zones and Links and will treat all + # time zones as full Zones. In this case, the canonical_zone will always + # return self. + # + # There are two built-in DataSource implementations. RubyDataSource (which + # will be used if the tzinfo-data gem is available) supports Link zones. + # ZoneinfoDataSource returns Link zones as if they were full Zones. If the + # canonical_zone or canonical_identifier methods are required, the + # tzinfo-data gem should be installed. + # + # The TZInfo::DataSource.get method can be used to check which DataSource + # implementation is being used. + def canonical_zone + raise_unknown_timezone + end + + # Returns the TimezonePeriod for the given local time. local can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in local is ignored (it is treated as a time in the current + # timezone). + # + # Warning: There are local times that have no equivalent UTC times (e.g. + # in the transition from standard time to daylight savings time). There are + # also local times that have more than one UTC equivalent (e.g. in the + # transition from daylight savings time to standard time). + # + # In the first case (no equivalent UTC time), a PeriodNotFound exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an AmbiguousTime + # exception will be raised unless the optional dst parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the dst parameter can be used to select whether the + # daylight savings time or local time is used. For example, + # + # Timezone.get('America/New_York').period_for_local(DateTime.new(2004,10,31,1,30,0)) + # + # would raise an AmbiguousTime exception. + # + # Specifying dst=true would the daylight savings period from April to + # October 2004. Specifying dst=false would return the standard period + # from October 2004 to April 2005. + # + # If the dst parameter does not resolve the ambiguity, and a block is + # specified, it is called. The block must take a single parameter - an + # array of the periods that need to be resolved. The block can select and + # return a single period or return nil or an empty array + # to cause an AmbiguousTime exception to be raised. + # + # The default value of the dst parameter can be specified by setting + # Timezone.default_dst. If default_dst is not set, or is set to nil, then + # an AmbiguousTime exception will be raised in ambiguous situations unless + # a block is given to resolve the ambiguity. + def period_for_local(local, dst = Timezone.default_dst) + results = periods_for_local(local) + + if results.empty? + raise PeriodNotFound + elsif results.size < 2 + results.first + else + # ambiguous result try to resolve + + if !dst.nil? + matches = results.find_all {|period| period.dst? == dst} + results = matches if !matches.empty? + end + + if results.size < 2 + results.first + else + # still ambiguous, try the block + + if block_given? + results = yield results + end + + if results.is_a?(TimezonePeriod) + results + elsif results && results.size == 1 + results.first + else + raise AmbiguousTime, "#{local} is an ambiguous local time." + end + end + end + end + + # Converts a time in UTC to the local timezone. utc can either be + # a DateTime, Time or timestamp (Time.to_i). The returned time has the same + # type as utc. Any timezone information in utc is ignored (it is treated as + # a UTC time). + def utc_to_local(utc) + TimeOrDateTime.wrap(utc) {|wrapped| + period_for_utc(wrapped).to_local(wrapped) + } + end + + # Converts a time in the local timezone to UTC. local can either be + # a DateTime, Time or timestamp (Time.to_i). The returned time has the same + # type as local. Any timezone information in local is ignored (it is treated + # as a local time). + # + # Warning: There are local times that have no equivalent UTC times (e.g. + # in the transition from standard time to daylight savings time). There are + # also local times that have more than one UTC equivalent (e.g. in the + # transition from daylight savings time to standard time). + # + # In the first case (no equivalent UTC time), a PeriodNotFound exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an AmbiguousTime + # exception will be raised unless the optional dst parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the dst parameter can be used to select whether the + # daylight savings time or local time is used. For example, + # + # Timezone.get('America/New_York').local_to_utc(DateTime.new(2004,10,31,1,30,0)) + # + # would raise an AmbiguousTime exception. + # + # Specifying dst=true would return 2004-10-31 5:30:00. Specifying dst=false + # would return 2004-10-31 6:30:00. + # + # If the dst parameter does not resolve the ambiguity, and a block is + # specified, it is called. The block must take a single parameter - an + # array of the periods that need to be resolved. The block can return a + # single period to use to convert the time or return nil or an empty array + # to cause an AmbiguousTime exception to be raised. + # + # The default value of the dst parameter can be specified by setting + # Timezone.default_dst. If default_dst is not set, or is set to nil, then + # an AmbiguousTime exception will be raised in ambiguous situations unless + # a block is given to resolve the ambiguity. + def local_to_utc(local, dst = Timezone.default_dst) + TimeOrDateTime.wrap(local) {|wrapped| + if block_given? + period = period_for_local(wrapped, dst) {|periods| yield periods } + else + period = period_for_local(wrapped, dst) + end + + period.to_utc(wrapped) + } + end + + # Returns information about offsets used by the Timezone up to a given + # date and time, specified using UTC (utc_to). The information is returned + # as an Array of TimezoneOffset instances. + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only offsets used from + # that date and time forward will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. + # + # Offsets may be returned in any order. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # offsets_up_to raises an ArgumentError exception. + def offsets_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + transitions = transitions_up_to(utc_to, utc_from) + + if transitions.empty? + # No transitions in the range, find the period that covers it. + + if utc_from + # Use the from date as it is inclusive. + period = period_for_utc(utc_from) + else + # utc_to is exclusive, so this can't be used with period_for_utc. + # However, any time earlier than utc_to can be used. + + # Subtract 1 hour (since this is one of the cached OffsetRationals). + # Use add_with_convert so that conversion to DateTime is performed if + # required. + period = period_for_utc(utc_to.add_with_convert(-3600)) + end + + [period.offset] + else + result = Set.new + + first = transitions.first + result << first.previous_offset unless utc_from && first.at == utc_from + + transitions.each do |t| + result << t.offset + end + + result.to_a + end + end + + # Returns the canonical identifier for this Timezone. + # + # This is a shortcut for calling canonical_zone.identifier. Please refer + # to the canonical_zone documentation for further information. + def canonical_identifier + canonical_zone.identifier + end + + # Returns the current time in the timezone as a Time. + def now + utc_to_local(Time.now.utc) + end + + # Returns the TimezonePeriod for the current time. + def current_period + period_for_utc(Time.now.utc) + end + + # Returns the current Time and TimezonePeriod as an array. The first element + # is the time, the second element is the period. + def current_period_and_time + utc = Time.now.utc + period = period_for_utc(utc) + [period.to_local(utc), period] + end + + alias :current_time_and_period :current_period_and_time + + # Converts a time in UTC to local time and returns it as a string according + # to the given format. + # + # The formatting is identical to Time.strftime and DateTime.strftime, except + # %Z and %z are replaced with the timezone abbreviation (for example, EST or + # EDT) and offset for the specified Timezone and time. + # + # The offset can be formatted as follows: + # + # - %z - hour and minute (e.g. +0500) + # - %:z - hour and minute separated with a colon (e.g. +05:00) + # - %::z - hour minute and second separated with colons (e.g. +05:00:00) + # - %:::z - hour only (e.g. +05) + # + # Timezone#strftime currently handles the replacement of %z. From TZInfo + # version 2.0.0, %z will be passed to Time#strftime and DateTime#strftime + # instead. Some of the formatting options may cease to be available + # depending on the version of Ruby in use (for example, %:::z is only + # supported by Time#strftime from MRI version 2.0.0 onwards). + def strftime(format, utc = Time.now.utc) + utc = TimeOrDateTime.wrap(utc) + period = period_for_utc(utc) + local_wrapped = period.to_local(utc) + local = local_wrapped.to_orig + local = local_wrapped.to_time unless local.kind_of?(Time) || local.kind_of?(DateTime) + abbreviation = period.abbreviation.to_s.gsub(/%/, '%%') + + format = format.gsub(/%(%*)([sZ]|:*z)/) do + if $1.length.odd? + # Escaped literal percent or series of percents. Pass on to strftime. + "#$1%#$2" + elsif $2 == "s" + "#$1#{utc.to_i}" + elsif $2 == "Z" + "#$1#{abbreviation}" + else + m, s = period.utc_total_offset.divmod(60) + h, m = m.divmod(60) + case $2.length + when 1 + "#$1#{'%+03d%02d' % [h,m]}" + when 2 + "#$1#{'%+03d:%02d' % [h,m]}" + when 3 + "#$1#{'%+03d:%02d:%02d' % [h,m,s]}" + when 4 + "#$1#{'%+03d' % [h]}" + else # more than 3 colons - not a valid option + # Passing the invalid format string through to Time#strftime or + # DateTime#strtime would normally result in it being returned in the + # result. However, with Ruby 1.8.7 on Windows (as tested with Ruby + # 1.8.7-p374 from https://rubyinstaller.org/downloads/archives), + # this causes Time#strftime to always return an empty string (e.g. + # Time.now.strftime('a %::::z b') returns ''). + # + # Escape the percent to force it to be evaluated as a literal. + "#$1%%#$2" + end + end + end + + local.strftime(format) + end + + # Compares two Timezones based on their identifier. Returns -1 if tz is less + # than self, 0 if tz is equal to self and +1 if tz is greater than self. + # + # Returns nil if tz is not comparable with Timezone instances. + def <=>(tz) + return nil unless tz.is_a?(Timezone) + identifier <=> tz.identifier + end + + # Returns true if and only if the identifier of tz is equal to the + # identifier of this Timezone. + def eql?(tz) + self == tz + end + + # Returns a hash of this Timezone. + def hash + identifier.hash + end + + # Dumps this Timezone for marshalling. + def _dump(limit) + identifier + end + + # Loads a marshalled Timezone. + def self._load(data) + Timezone.get(data) + end + + private + # Initializes @@loaded_zones. + def self.init_loaded_zones + @@loaded_zones = ThreadSafe::Cache.new + end + init_loaded_zones + + # Returns an array of proxies corresponding to the given array of + # identifiers. + def self.get_proxies(identifiers) + identifiers.collect {|identifier| get_proxy(identifier)} + end + + # Returns the current DataSource. + def self.data_source + DataSource.get + end + + # Raises an UnknownTimezone exception. + def raise_unknown_timezone + raise UnknownTimezone, 'TZInfo::Timezone constructed directly' + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb new file mode 100644 index 0000000..5ceb686 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb @@ -0,0 +1,36 @@ +module TZInfo + + # TimezoneDefinition is included into Timezone definition modules. + # TimezoneDefinition provides the methods for defining timezones. + # + # @private + module TimezoneDefinition #:nodoc: + # Add class methods to the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Returns and yields a TransitionDataTimezoneInfo object to define a + # timezone. + def timezone(identifier) + yield @timezone = TransitionDataTimezoneInfo.new(identifier) + end + + # Defines a linked timezone. + def linked_timezone(identifier, link_to_identifier) + @timezone = LinkedTimezoneInfo.new(identifier, link_to_identifier) + end + + # Returns the last TimezoneInfo to be defined with timezone or + # linked_timezone. + def get + @timezone + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb new file mode 100644 index 0000000..59dc953 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb @@ -0,0 +1,54 @@ +module TZInfo + # The timezone index file includes TimezoneIndexDefinition which provides + # methods used to define timezones in the index. + # + # @private + module TimezoneIndexDefinition #:nodoc: + # Add class methods to the includee and initialize class instance variables. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval do + @timezones = [] + @data_timezones = [] + @linked_timezones = [] + end + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Defines a timezone based on data. + def timezone(identifier) + @timezones << identifier + @data_timezones << identifier + end + + # Defines a timezone which is a link to another timezone. + def linked_timezone(identifier) + @timezones << identifier + @linked_timezones << identifier + end + + # Returns a frozen array containing the identifiers of all the timezones. + # Identifiers appear in the order they were defined in the index. + def timezones + @timezones.freeze + end + + # Returns a frozen array containing the identifiers of all data timezones. + # Identifiers appear in the order they were defined in the index. + def data_timezones + @data_timezones.freeze + end + + # Returns a frozen array containing the identifiers of all linked + # timezones. Identifiers appear in the order they were defined in + # the index. + def linked_timezones + @linked_timezones.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb new file mode 100644 index 0000000..13f66ba --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb @@ -0,0 +1,30 @@ +module TZInfo + # Represents a timezone defined by a data source. + class TimezoneInfo + + # The timezone identifier. + attr_reader :identifier + + # Constructs a new TimezoneInfo with an identifier. + def initialize(identifier) + @identifier = identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier>" + end + + # Constructs a Timezone instance for the timezone represented by this + # TimezoneInfo. + def create_timezone + raise_not_implemented('create_timezone') + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb new file mode 100644 index 0000000..dbce0e9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb @@ -0,0 +1,101 @@ +module TZInfo + # Represents an offset defined in a Timezone data file. + class TimezoneOffset + # The base offset of the timezone from UTC in seconds. This does not include + # any adjustment made for daylight savings time and will typically remain + # constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use utc_total_offset instead. + # + # Note that zoneinfo files only include the value of utc_total_offset and a + # DST flag. When using ZoneinfoDataSource, the utc_offset will be derived + # from changes to the UTC total offset and the DST flag. As a consequence, + # utc_total_offset will always be correct, but utc_offset may be inaccurate. + # + # If you require utc_offset to be accurate, install the tzinfo-data gem and + # set RubyDataSource as the DataSource. + attr_reader :utc_offset + + # The offset from the time zone's standard time in seconds. Zero + # when daylight savings time is not in effect. Non-zero (usually 3600 = 1 + # hour) if daylight savings is being observed. + # + # Note that zoneinfo files only include the value of utc_total_offset and + # a DST flag. When using DataSources::ZoneinfoDataSource, the std_offset + # will be derived from changes to the UTC total offset and the DST flag. As + # a consequence, utc_total_offset will always be correct, but std_offset + # may be inaccurate. + # + # If you require std_offset to be accurate, install the tzinfo-data gem + # and set RubyDataSource as the DataSource. + attr_reader :std_offset + + # The total offset of this observance from UTC in seconds + # (utc_offset + std_offset). + attr_reader :utc_total_offset + + # The abbreviation that identifies this observance, e.g. "GMT" + # (Greenwich Mean Time) or "BST" (British Summer Time) for "Europe/London". The returned identifier is a + # symbol. + attr_reader :abbreviation + + # Constructs a new TimezoneOffset. utc_offset and std_offset are specified + # in seconds. + def initialize(utc_offset, std_offset, abbreviation) + @utc_offset = utc_offset + @std_offset = std_offset + @abbreviation = abbreviation + + @utc_total_offset = @utc_offset + @std_offset + end + + # True if std_offset is non-zero. + def dst? + @std_offset != 0 + end + + # Converts a UTC Time, DateTime or integer timestamp to local time, based on + # the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_local(utc) + TimeOrDateTime.wrap(utc) {|wrapped| + wrapped + @utc_total_offset + } + end + + # Converts a local Time, DateTime or integer timestamp to UTC, based on the + # offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_utc(local) + TimeOrDateTime.wrap(local) {|wrapped| + wrapped - @utc_total_offset + } + end + + # Returns true if and only if toi has the same utc_offset, std_offset + # and abbreviation as this TimezoneOffset. + def ==(toi) + toi.kind_of?(TimezoneOffset) && + utc_offset == toi.utc_offset && std_offset == toi.std_offset && abbreviation == toi.abbreviation + end + + # Returns true if and only if toi has the same utc_offset, std_offset + # and abbreviation as this TimezoneOffset. + def eql?(toi) + self == toi + end + + # Returns a hash of this TimezoneOffset. + def hash + utc_offset.hash ^ std_offset.hash ^ abbreviation.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@utc_offset,#@std_offset,#@abbreviation>" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb new file mode 100644 index 0000000..2805841 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb @@ -0,0 +1,245 @@ +module TZInfo + # A period of time in a timezone where the same offset from UTC applies. + # + # All the methods that take times accept instances of Time or DateTime as well + # as Integer timestamps. + class TimezonePeriod + # The TimezoneTransition that defines the start of this TimezonePeriod + # (may be nil if unbounded). + attr_reader :start_transition + + # The TimezoneTransition that defines the end of this TimezonePeriod + # (may be nil if unbounded). + attr_reader :end_transition + + # The TimezoneOffset for this period. + attr_reader :offset + + # Initializes a new TimezonePeriod. + # + # TimezonePeriod instances should not normally be constructed manually. + def initialize(start_transition, end_transition, offset = nil) + @start_transition = start_transition + @end_transition = end_transition + + if offset + raise ArgumentError, 'Offset specified with transitions' if @start_transition || @end_transition + @offset = offset + else + if @start_transition + @offset = @start_transition.offset + elsif @end_transition + @offset = @end_transition.previous_offset + else + raise ArgumentError, 'No offset specified and no transitions to determine it from' + end + end + + @utc_total_offset_rational = nil + end + + # The base offset of the timezone from UTC in seconds. This does not include + # any adjustment made for daylight savings time and will typically remain + # constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use utc_total_offset instead. + # + # Note that zoneinfo files only include the value of utc_total_offset and a + # DST flag. When using ZoneinfoDataSource, the utc_offset will be derived + # from changes to the UTC total offset and the DST flag. As a consequence, + # utc_total_offset will always be correct, but utc_offset may be inaccurate. + # + # If you require utc_offset to be accurate, install the tzinfo-data gem and + # set RubyDataSource as the DataSource. + def utc_offset + @offset.utc_offset + end + + # The offset from the time zone's standard time in seconds. Zero + # when daylight savings time is not in effect. Non-zero (usually 3600 = 1 + # hour) if daylight savings is being observed. + # + # Note that zoneinfo files only include the value of utc_total_offset and + # a DST flag. When using DataSources::ZoneinfoDataSource, the std_offset + # will be derived from changes to the UTC total offset and the DST flag. As + # a consequence, utc_total_offset will always be correct, but std_offset + # may be inaccurate. + # + # If you require std_offset to be accurate, install the tzinfo-data gem + # and set RubyDataSource as the DataSource. + def std_offset + @offset.std_offset + end + + # The identifier of this period, e.g. "GMT" (Greenwich Mean Time) or "BST" + # (British Summer Time) for "Europe/London". The returned identifier is a + # symbol. + def abbreviation + @offset.abbreviation + end + alias :zone_identifier :abbreviation + + # Total offset from UTC (seconds). Equal to utc_offset + std_offset. + def utc_total_offset + @offset.utc_total_offset + end + + # Total offset from UTC (days). Result is a Rational. + def utc_total_offset_rational + # Thread-safety: It is possible that the value of + # @utc_total_offset_rational may be calculated multiple times in + # concurrently executing threads. It is not worth the overhead of locking + # to ensure that @zone_identifiers is only calculated once. + + unless @utc_total_offset_rational + result = OffsetRationals.rational_for_offset(utc_total_offset) + return result if frozen? + @utc_total_offset_rational = result + end + @utc_total_offset_rational + end + + # The start time of the period in UTC as a DateTime. May be nil if unbounded. + def utc_start + @start_transition ? @start_transition.at.to_datetime : nil + end + + # The start time of the period in UTC as a Time. May be nil if unbounded. + def utc_start_time + @start_transition ? @start_transition.at.to_time : nil + end + + # The end time of the period in UTC as a DateTime. May be nil if unbounded. + def utc_end + @end_transition ? @end_transition.at.to_datetime : nil + end + + # The end time of the period in UTC as a Time. May be nil if unbounded. + def utc_end_time + @end_transition ? @end_transition.at.to_time : nil + end + + # The start time of the period in local time as a DateTime. May be nil if + # unbounded. + def local_start + @start_transition ? @start_transition.local_start_at.to_datetime : nil + end + + # The start time of the period in local time as a Time. May be nil if + # unbounded. + def local_start_time + @start_transition ? @start_transition.local_start_at.to_time : nil + end + + # The end time of the period in local time as a DateTime. May be nil if + # unbounded. + def local_end + @end_transition ? @end_transition.local_end_at.to_datetime : nil + end + + # The end time of the period in local time as a Time. May be nil if + # unbounded. + def local_end_time + @end_transition ? @end_transition.local_end_at.to_time : nil + end + + # true if daylight savings is in effect for this period; otherwise false. + def dst? + @offset.dst? + end + + # true if this period is valid for the given UTC DateTime; otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def valid_for_utc?(utc) + utc_after_start?(utc) && utc_before_end?(utc) + end + + # true if the given UTC DateTime is after the start of the period + # (inclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def utc_after_start?(utc) + !@start_transition || @start_transition.at <= utc + end + + # true if the given UTC DateTime is before the end of the period + # (exclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def utc_before_end?(utc) + !@end_transition || @end_transition.at > utc + end + + # true if this period is valid for the given local DateTime; otherwise + # false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def valid_for_local?(local) + local_after_start?(local) && local_before_end?(local) + end + + # true if the given local DateTime is after the start of the period + # (inclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def local_after_start?(local) + !@start_transition || @start_transition.local_start_at <= local + end + + # true if the given local DateTime is before the end of the period + # (exclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def local_before_end?(local) + !@end_transition || @end_transition.local_end_at > local + end + + # Converts a UTC DateTime to local time based on the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_local(utc) + @offset.to_local(utc) + end + + # Converts a local DateTime to UTC based on the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_utc(local) + @offset.to_utc(local) + end + + # Returns true if this TimezonePeriod is equal to p. This compares the + # start_transition, end_transition and offset using ==. + def ==(p) + p.kind_of?(TimezonePeriod) && + start_transition == p.start_transition && + end_transition == p.end_transition && + offset == p.offset + end + + # Returns true if this TimezonePeriods is equal to p. This compares the + # start_transition, end_transition and offset using eql? + def eql?(p) + p.kind_of?(TimezonePeriod) && + start_transition.eql?(p.start_transition) && + end_transition.eql?(p.end_transition) && + offset.eql?(p.offset) + end + + # Returns a hash of this TimezonePeriod. + def hash + result = @start_transition.hash ^ @end_transition.hash + result ^= @offset.hash unless @start_transition || @end_transition + result + end + + # Returns internal object state as a programmer-readable string. + def inspect + result = "#<#{self.class}: #{@start_transition.inspect},#{@end_transition.inspect}" + result << ",#{@offset.inspect}>" unless @start_transition || @end_transition + result + '>' + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb new file mode 100644 index 0000000..c913011 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb @@ -0,0 +1,105 @@ +module TZInfo + + # A proxy class representing a timezone with a given identifier. TimezoneProxy + # inherits from Timezone and can be treated like any Timezone loaded with + # Timezone.get. + # + # The first time an attempt is made to access the data for the timezone, the + # real Timezone is loaded. If the proxy's identifier was not valid, then an + # exception will be raised at this point. + class TimezoneProxy < Timezone + # Construct a new TimezoneProxy for the given identifier. The identifier + # is not checked when constructing the proxy. It will be validated on the + # when the real Timezone is loaded. + def self.new(identifier) + # Need to override new to undo the behaviour introduced in Timezone#new. + tzp = super() + tzp.send(:setup, identifier) + tzp + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + @real_timezone ? @real_timezone.identifier : @identifier + end + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + def period_for_utc(utc) + real_timezone.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + real_timezone.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time (to). + # + # A from date and time may also be supplied using the from parameter. If + # from is not nil, only transitions from that date and time onwards will be + # returned. + # + # Comparisons with to are exclusive. Comparisons with from are inclusive. + # If a transition falls precisely on to, it will be excluded. If a + # transition falls on from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # to and from can be specified using either a Time, DateTime, Time or + # Timestamp. + # + # If from is specified and to is not greater than from, then an + # ArgumentError exception is raised. + # + # ArgumentError is raised if to is nil or of either to or from are + # Timestamps with unspecified offsets. + def transitions_up_to(to, from = nil) + real_timezone.transitions_up_to(to, from) + end + + # Returns the canonical zone for this Timezone. + def canonical_zone + real_timezone.canonical_zone + end + + # Dumps this TimezoneProxy for marshalling. + def _dump(limit) + identifier + end + + # Loads a marshalled TimezoneProxy. + def self._load(data) + TimezoneProxy.new(data) + end + + private + def setup(identifier) + @identifier = identifier + @real_timezone = nil + end + + def real_timezone + # Thread-safety: It is possible that the value of @real_timezone may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @real_timezone is only + # calculated once. + unless @real_timezone + result = Timezone.get(@identifier) + return result if frozen? + @real_timezone = result + end + + @real_timezone + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb new file mode 100644 index 0000000..b905c62 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb @@ -0,0 +1,130 @@ +module TZInfo + # Represents a transition from one timezone offset to another at a particular + # date and time. + class TimezoneTransition + # The offset this transition changes to (a TimezoneOffset instance). + attr_reader :offset + + # The offset this transition changes from (a TimezoneOffset instance). + attr_reader :previous_offset + + # Initializes a new TimezoneTransition. + # + # TimezoneTransition instances should not normally be constructed manually. + def initialize(offset, previous_offset) + @offset = offset + @previous_offset = previous_offset + @local_end_at = nil + @local_start_at = nil + end + + # A TimeOrDateTime instance representing the UTC time when this transition + # occurs. + def at + raise_not_implemented('at') + end + + # The UTC time when this transition occurs, returned as a DateTime instance. + def datetime + at.to_datetime + end + + # The UTC time when this transition occurs, returned as a Time instance. + def time + at.to_time + end + + # A TimeOrDateTime instance representing the local time when this transition + # causes the previous observance to end (calculated from at using + # previous_offset). + def local_end_at + # Thread-safety: It is possible that the value of @local_end_at may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @local_end_at is only + # calculated once. + + unless @local_end_at + result = at.add_with_convert(@previous_offset.utc_total_offset) + return result if frozen? + @local_end_at = result + end + + @local_end_at + end + + # The local time when this transition causes the previous observance to end, + # returned as a DateTime instance. + def local_end + local_end_at.to_datetime + end + + # The local time when this transition causes the previous observance to end, + # returned as a Time instance. + def local_end_time + local_end_at.to_time + end + + # A TimeOrDateTime instance representing the local time when this transition + # causes the next observance to start (calculated from at using offset). + def local_start_at + # Thread-safety: It is possible that the value of @local_start_at may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @local_start_at is only + # calculated once. + + unless @local_start_at + result = at.add_with_convert(@offset.utc_total_offset) + return result if frozen? + @local_start_at = result + end + + @local_start_at + end + + # The local time when this transition causes the next observance to start, + # returned as a DateTime instance. + def local_start + local_start_at.to_datetime + end + + # The local time when this transition causes the next observance to start, + # returned as a Time instance. + def local_start_time + local_start_at.to_time + end + + # Returns true if this TimezoneTransition is equal to the given + # TimezoneTransition. Two TimezoneTransition instances are + # considered to be equal by == if offset, previous_offset and at are all + # equal. + def ==(tti) + tti.kind_of?(TimezoneTransition) && + offset == tti.offset && previous_offset == tti.previous_offset && at == tti.at + end + + # Returns true if this TimezoneTransition is equal to the given + # TimezoneTransition. Two TimezoneTransition instances are + # considered to be equal by eql? if offset, previous_offset and at are all + # equal and the type used to define at in both instances is the same. + def eql?(tti) + tti.kind_of?(TimezoneTransition) && + offset == tti.offset && previous_offset == tti.previous_offset && at.eql?(tti.at) + end + + # Returns a hash of this TimezoneTransition instance. + def hash + @offset.hash ^ @previous_offset.hash ^ at.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{at.inspect},#{@offset.inspect}>" + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb new file mode 100644 index 0000000..016816b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb @@ -0,0 +1,104 @@ +module TZInfo + # A TimezoneTransition defined by as integer timestamp, as a rational to + # create a DateTime or as both. + # + # @private + class TimezoneTransitionDefinition < TimezoneTransition #:nodoc: + # The numerator of the DateTime if the transition time is defined as a + # DateTime, otherwise the transition time as a timestamp. + attr_reader :numerator_or_time + protected :numerator_or_time + + # Either the denominator of the DateTime if the transition time is defined + # as a DateTime, otherwise nil. + attr_reader :denominator + protected :denominator + + # Creates a new TimezoneTransitionDefinition with the given offset, + # previous_offset (both TimezoneOffset instances) and UTC time. + # + # The time can be specified as a timestamp, as a rational to create a + # DateTime, or as both. + # + # If both a timestamp and rational are given, then the rational will only + # be used if the timestamp falls outside of the range of Time on the + # platform being used at runtime. + # + # DateTimes are created from the rational as follows: + # + # RubyCoreSupport.datetime_new!(RubyCoreSupport.rational_new!(numerator, denominator), 0, Date::ITALY) + # + # For performance reasons, the numerator and denominator must be specified + # in their lowest form. + def initialize(offset, previous_offset, numerator_or_timestamp, denominator_or_numerator = nil, denominator = nil) + super(offset, previous_offset) + + if denominator + numerator = denominator_or_numerator + timestamp = numerator_or_timestamp + elsif denominator_or_numerator + numerator = numerator_or_timestamp + denominator = denominator_or_numerator + timestamp = nil + else + numerator = nil + denominator = nil + timestamp = numerator_or_timestamp + end + + # Determine whether to use the timestamp or the numerator and denominator. + if numerator && ( + !timestamp || + (timestamp < 0 && !RubyCoreSupport.time_supports_negative) || + ((timestamp < -2147483648 || timestamp > 2147483647) && !RubyCoreSupport.time_supports_64bit) + ) + + @numerator_or_time = numerator + @denominator = denominator + else + @numerator_or_time = timestamp + @denominator = nil + end + + @at = nil + end + + # A TimeOrDateTime instance representing the UTC time when this transition + # occurs. + def at + # Thread-safety: It is possible that the value of @at may be calculated + # multiple times in concurrently executing threads. It is not worth the + # overhead of locking to ensure that @at is only calculated once. + + unless @at + result = unless @denominator + TimeOrDateTime.new(@numerator_or_time) + else + r = RubyCoreSupport.rational_new!(@numerator_or_time, @denominator) + dt = RubyCoreSupport.datetime_new!(r, 0, Date::ITALY) + TimeOrDateTime.new(dt) + end + + return result if frozen? + @at = result + end + + @at + end + + # Returns true if this TimezoneTransitionDefinition is equal to the given + # TimezoneTransitionDefinition. Two TimezoneTransitionDefinition instances + # are considered to be equal by eql? if offset, previous_offset, + # numerator_or_time and denominator are all equal. + def eql?(tti) + tti.kind_of?(TimezoneTransitionDefinition) && + offset == tti.offset && previous_offset == tti.previous_offset && + numerator_or_time == tti.numerator_or_time && denominator == tti.denominator + end + + # Returns a hash of this TimezoneTransitionDefinition instance. + def hash + @offset.hash ^ @previous_offset.hash ^ @numerator_or_time.hash ^ @denominator.hash + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb new file mode 100644 index 0000000..026bf22 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb @@ -0,0 +1,274 @@ +module TZInfo + # Raised if no offsets have been defined when calling period_for_utc or + # periods_for_local. Indicates an error in the timezone data. + class NoOffsetsDefined < StandardError + end + + # Represents a data timezone defined by a set of offsets and a set + # of transitions. + # + # @private + class TransitionDataTimezoneInfo < DataTimezoneInfo #:nodoc: + + # Constructs a new TransitionDataTimezoneInfo with its identifier. + def initialize(identifier) + super(identifier) + @offsets = {} + @transitions = [] + @previous_offset = nil + @transitions_index = nil + end + + # Defines a offset. The id uniquely identifies this offset within the + # timezone. utc_offset and std_offset define the offset in seconds of + # standard time from UTC and daylight savings from standard time + # respectively. abbreviation describes the timezone offset (e.g. GMT, BST, + # EST or EDT). + # + # The first offset to be defined is treated as the offset that applies + # until the first transition. This will usually be in Local Mean Time (LMT). + # + # ArgumentError will be raised if the id is already defined. + def offset(id, utc_offset, std_offset, abbreviation) + raise ArgumentError, 'Offset already defined' if @offsets.has_key?(id) + + offset = TimezoneOffset.new(utc_offset, std_offset, abbreviation) + @offsets[id] = offset + @previous_offset = offset unless @previous_offset + end + + # Defines a transition. Transitions must be defined in chronological order. + # ArgumentError will be raised if a transition is added out of order. + # offset_id refers to an id defined with offset. ArgumentError will be + # raised if the offset_id cannot be found. numerator_or_time and + # denomiator specify the time the transition occurs as. See + # TimezoneTransition for more detail about specifying times. + def transition(year, month, offset_id, numerator_or_timestamp, denominator_or_numerator = nil, denominator = nil) + offset = @offsets[offset_id] + raise ArgumentError, 'Offset not found' unless offset + + if @transitions_index + if year < @last_year || (year == @last_year && month < @last_month) + raise ArgumentError, 'Transitions must be increasing date order' + end + + # Record the position of the first transition with this index. + index = transition_index(year, month) + @transitions_index[index] ||= @transitions.length + + # Fill in any gaps + (index - 1).downto(0) do |i| + break if @transitions_index[i] + @transitions_index[i] = @transitions.length + end + else + @transitions_index = [@transitions.length] + @start_year = year + @start_month = month + end + + @transitions << TimezoneTransitionDefinition.new(offset, @previous_offset, + numerator_or_timestamp, denominator_or_numerator, denominator) + @last_year = year + @last_month = month + @previous_offset = offset + end + + # Returns the TimezonePeriod for the given UTC time. + # Raises NoOffsetsDefined if no offsets have been defined. + def period_for_utc(utc) + unless @transitions.empty? + utc = TimeOrDateTime.wrap(utc) + index = transition_index(utc.year, utc.mon) + + start_transition = nil + start = transition_before_end(index) + if start + start.downto(0) do |i| + if @transitions[i].at <= utc + start_transition = @transitions[i] + break + end + end + end + + end_transition = nil + start = transition_after_start(index) + if start + start.upto(@transitions.length - 1) do |i| + if @transitions[i].at > utc + end_transition = @transitions[i] + break + end + end + end + + if start_transition || end_transition + TimezonePeriod.new(start_transition, end_transition) + else + # Won't happen since there are transitions. Must always find one + # transition that is either >= or < the specified time. + raise 'No transitions found in search' + end + else + raise NoOffsetsDefined, 'No offsets have been defined' unless @previous_offset + TimezonePeriod.new(nil, nil, @previous_offset) + end + end + + # Returns the set of TimezonePeriods for the given local time as an array. + # Results returned are ordered by increasing UTC start date. + # Returns an empty array if no periods are found for the given time. + # Raises NoOffsetsDefined if no offsets have been defined. + def periods_for_local(local) + unless @transitions.empty? + local = TimeOrDateTime.wrap(local) + index = transition_index(local.year, local.mon) + + result = [] + + start_index = transition_after_start(index - 1) + if start_index && @transitions[start_index].local_end_at > local + if start_index > 0 + if @transitions[start_index - 1].local_start_at <= local + result << TimezonePeriod.new(@transitions[start_index - 1], @transitions[start_index]) + end + else + result << TimezonePeriod.new(nil, @transitions[start_index]) + end + end + + end_index = transition_before_end(index + 1) + + if end_index + start_index = end_index unless start_index + + start_index.upto(transition_before_end(index + 1)) do |i| + if @transitions[i].local_start_at <= local + if i + 1 < @transitions.length + if @transitions[i + 1].local_end_at > local + result << TimezonePeriod.new(@transitions[i], @transitions[i + 1]) + end + else + result << TimezonePeriod.new(@transitions[i], nil) + end + end + end + end + + result + else + raise NoOffsetsDefined, 'No offsets have been defined' unless @previous_offset + [TimezonePeriod.new(nil, nil, @previous_offset)] + end + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + unless @transitions.empty? + if utc_from + from = transition_after_start(transition_index(utc_from.year, utc_from.mon)) + + if from + while from < @transitions.length && @transitions[from].at < utc_from + from += 1 + end + + if from >= @transitions.length + return [] + end + else + # utc_from is later than last transition. + return [] + end + else + from = 0 + end + + to = transition_before_end(transition_index(utc_to.year, utc_to.mon)) + + if to + while to >= 0 && @transitions[to].at >= utc_to + to -= 1 + end + + if to < 0 + return [] + end + else + # utc_to is earlier than first transition. + return [] + end + + @transitions[from..to] + else + [] + end + end + + private + # Returns the index into the @transitions_index array for a given year + # and month. + def transition_index(year, month) + index = (year - @start_year) * 2 + index += 1 if month > 6 + index -= 1 if @start_month > 6 + index + end + + # Returns the index into @transitions of the first transition that occurs + # on or after the start of the given index into @transitions_index. + # Returns nil if there are no such transitions. + def transition_after_start(index) + if index >= @transitions_index.length + nil + else + index = 0 if index < 0 + @transitions_index[index] + end + end + + # Returns the index into @transitions of the first transition that occurs + # before the end of the given index into @transitions_index. + # Returns nil if there are no such transitions. + def transition_before_end(index) + index = index + 1 + + if index <= 0 + nil + elsif index >= @transitions_index.length + @transitions.length - 1 + else + @transitions_index[index] - 1 + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb new file mode 100644 index 0000000..b8d1605 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb @@ -0,0 +1,325 @@ +require 'date' + +module TZInfo + # Base class for rules definining the transition between standard and daylight + # savings time. + class TransitionRule #:nodoc: + # Returns the number of seconds after midnight local time on the day + # identified by the rule at which the transition occurs. Can be negative to + # denote a time on the prior day. Can be greater than or equal to 86,400 to + # denote a time of the following day. + attr_reader :transition_at + + # Initializes a new TransitionRule. + def initialize(transition_at) + raise ArgumentError, 'Invalid transition_at' unless transition_at.kind_of?(Integer) + @transition_at = transition_at + end + + # Calculates the UTC time of the transition from a given offset on a given + # year. + def at(offset, year) + day = get_day(year) + day.add_with_convert(@transition_at - offset.utc_total_offset) + end + + # Determines if this TransitionRule is equal to another instance. + def ==(r) + r.kind_of?(TransitionRule) && @transition_at == r.transition_at + end + alias eql? == + + # Returns a hash based on hash_args (defaulting to transition_at). + def hash + hash_args.hash + end + + protected + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@transition_at] + end + + def new_time_or_datetime(year, month = 1, day = 1) + result = if ((year >= 2039 || (year == 2038 && (month >= 2 || (month == 1 && day >= 20)))) && !RubyCoreSupport.time_supports_64bit) || + (year < 1970 && !RubyCoreSupport.time_supports_negative) + + # Time handles 29 February on a non-leap year as 1 March. + # DateTime rejects. Advance manually. + if month == 2 && day == 29 && !Date.gregorian_leap?(year) + month = 3 + day = 1 + end + + RubyCoreSupport.datetime_new(year, month, day) + else + Time.utc(year, month, day) + end + + TimeOrDateTime.wrap(result) + end + end + + # A base class for transition rules that activate based on an integer day of + # the year. + # + # @private + class DayOfYearTransitionRule < TransitionRule #:nodoc: + # Initializes a new DayOfYearTransitionRule. + def initialize(day, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid day' unless day.kind_of?(Integer) + @seconds = day * 86400 + end + + # Determines if this DayOfYearTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfYearTransitionRule) && @seconds == r.seconds + end + alias eql? == + + protected + + # @return [Integer] the day multipled by the number of seconds in a day. + attr_reader :seconds + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@seconds] + super + end + end + + # Defines transitions that occur on the zero-based nth day of the year. + # + # Day 0 is 1 January. + # + # Leap days are counted. Day 59 will be 29 February on a leap year and 1 March + # on a non-leap year. Day 365 will be 31 December on a leap year and 1 January + # the following year on a non-leap year. + # + # @private + class AbsoluteDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # Initializes a new AbsoluteDayOfYearTransitionRule. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 0 && day <= 365 + end + + # Returns true if the day specified by this transition is the first in the + # year (a day number of 0), otherwise false. + def is_always_first_day_of_year? + seconds == 0 + end + + # @returns false. + def is_always_last_day_of_year? + false + end + + # Determines if this AbsoluteDayOfYearTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(AbsoluteDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + new_time_or_datetime(year).add_with_convert(seconds) + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [AbsoluteDayOfYearTransitionRule] + super + end + end + + # Defines transitions that occur on the one-based nth Julian day of the year. + # + # Leap days are not counted. Day 1 is 1 January. Day 60 is always 1 March. + # Day 365 is always 31 December. + # + # @private + class JulianDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # The 60 days in seconds. + LEAP = 60 * 86400 + + # The length of a non-leap year in seconds. + YEAR = 365 * 86400 + + # Initializes a new JulianDayOfYearTransitionRule. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 1 && day <= 365 + end + + # Returns true if the day specified by this transition is the first in the + # year (a day number of 1), otherwise false. + def is_always_first_day_of_year? + seconds == 86400 + end + + # Returns true if the day specified by this transition is the last in the + # year (a day number of 365), otherwise false. + def is_always_last_day_of_year? + seconds == YEAR + end + + # Determines if this JulianDayOfYearTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(JulianDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + # Returns 1 March on non-leap years. + leap = new_time_or_datetime(year, 2, 29) + diff = seconds - LEAP + diff += 86400 if diff >= 0 && leap.mday == 29 + leap.add_with_convert(diff) + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [JulianDayOfYearTransitionRule] + super + end + end + + # A base class for rules that transition on a particular day of week of a + # given week (subclasses specify which week of the month). + # + # @private + class DayOfWeekTransitionRule < TransitionRule #:nodoc: + # Initializes a new DayOfWeekTransitionRule. + def initialize(month, day_of_week, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid month' unless month.kind_of?(Integer) && month >= 1 && month <= 12 + raise ArgumentError, 'Invalid day_of_week' unless day_of_week.kind_of?(Integer) && day_of_week >= 0 && day_of_week <= 6 + @month = month + @day_of_week = day_of_week + end + + # Returns false. + def is_always_first_day_of_year? + false + end + + # Returns false. + def is_always_last_day_of_year? + false + end + + # Determines if this DayOfWeekTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfWeekTransitionRule) && @month == r.month && @day_of_week == r.day_of_week + end + alias eql? == + + protected + + # Returns the month of the year (1 to 12). + attr_reader :month + + # Returns the day of the week (0 to 6 for Sunday to Monday). + attr_reader :day_of_week + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@month, @day_of_week] + super + end + end + + # A rule that transitions on the nth occurrence of a particular day of week + # of a calendar month. + # + # @private + class DayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new DayOfMonthTransitionRule. + def initialize(month, week, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + raise ArgumentError, 'Invalid week' unless week.kind_of?(Integer) && week >= 1 && week <= 4 + @offset_start = (week - 1) * 7 + 1 + end + + # Determines if this DayOfMonthTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfMonthTransitionRule) && @offset_start == r.offset_start + end + alias eql? == + + protected + + # Returns the day the week starts on for a month starting on a Sunday. + attr_reader :offset_start + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + candidate = new_time_or_datetime(year, month, @offset_start) + diff = day_of_week - candidate.wday + + if diff < 0 + candidate.add_with_convert((7 + diff) * 86400) + elsif diff > 0 + candidate.add_with_convert(diff * 86400) + else + candidate + end + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@offset_start] + super + end + end + + # A rule that transitions on the last occurrence of a particular day of week + # of a calendar month. + # + # @private + class LastDayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new LastDayOfMonthTransitionRule. + def initialize(month, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + end + + # Determines if this LastDayOfMonthTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(LastDayOfMonthTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + next_month = month + 1 + if next_month == 13 + year += 1 + next_month = 1 + end + + candidate = new_time_or_datetime(year, next_month).add_with_convert(-86400) + diff = candidate.wday - day_of_week + + if diff < 0 + candidate - (diff + 7) * 86400 + elsif diff > 0 + candidate - diff * 86400 + else + candidate + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb new file mode 100644 index 0000000..c99acaa --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb @@ -0,0 +1,37 @@ +module TZInfo + # Represents information about a country returned by ZoneinfoDataSource. + # + # @private + class ZoneinfoCountryInfo < CountryInfo #:nodoc: + # Constructs a new CountryInfo with an ISO 3166 country code, name and + # an array of CountryTimezones. + def initialize(code, name, zones) + super(code, name) + @zones = zones.dup.freeze + @zone_identifiers = nil + end + + # Returns a frozen array of all the zone identifiers for the country ordered + # geographically, most populous first. + def zone_identifiers + # Thread-safety: It is possible that the value of @zone_identifiers may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zone_identifiers is only + # calculated once. + + unless @zone_identifiers + result = zones.collect {|zone| zone.identifier}.freeze + return result if frozen? + @zone_identifiers = result + end + + @zone_identifiers + end + + # Returns a frozen array of all the timezones for the for the country + # ordered geographically, most populous first. + def zones + @zones + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb new file mode 100644 index 0000000..3959090 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb @@ -0,0 +1,497 @@ +module TZInfo + # Use send as a workaround for an issue on JRuby 9.2.9.0 where using the + # refinement causes calls to RubyCoreSupport.file_open to fail to pass the + # block parameter. + # + # https://travis-ci.org/tzinfo/tzinfo/jobs/628812051#L1931 + # https://github.com/jruby/jruby/issues/6009 + send(:using, TZInfo::RubyCoreSupport::UntaintExt) if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) + + # An InvalidZoneinfoDirectory exception is raised if the DataSource is + # set to a specific zoneinfo path, which is not a valid zoneinfo directory + # (i.e. a directory containing index files named iso3166.tab and zone.tab + # as well as other timezone files). + class InvalidZoneinfoDirectory < StandardError + end + + # A ZoneinfoDirectoryNotFound exception is raised if no valid zoneinfo + # directory could be found when checking the paths listed in + # ZoneinfoDataSource.search_path. A valid zoneinfo directory is one that + # contains timezone files, a country code index file named iso3166.tab and a + # timezone index file named zone1970.tab or zone.tab. + class ZoneinfoDirectoryNotFound < StandardError + end + + # A DataSource that loads data from a 'zoneinfo' directory containing + # compiled "TZif" version 3 (or earlier) files in addition to iso3166.tab and + # zone1970.tab or zone.tab index files. + # + # To have TZInfo load the system zoneinfo files, call TZInfo::DataSource.set + # as follows: + # + # TZInfo::DataSource.set(:zoneinfo) + # + # To load zoneinfo files from a particular directory, pass the directory to + # TZInfo::DataSource.set: + # + # TZInfo::DataSource.set(:zoneinfo, directory) + # + # Note that the platform used at runtime may limit the range of available + # transition data that can be loaded from zoneinfo files. There are two + # factors to consider: + # + # First of all, the zoneinfo support in TZInfo makes use of Ruby's Time class. + # On 32-bit builds of Ruby 1.8, the Time class only supports 32-bit + # timestamps. This means that only Times between 1901-12-13 20:45:52 and + # 2038-01-19 03:14:07 can be represented. Furthermore, certain platforms only + # allow for positive 32-bit timestamps (notably Windows), making the earliest + # representable time 1970-01-01 00:00:00. + # + # 64-bit builds of Ruby 1.8 and all builds of Ruby 1.9 support 64-bit + # timestamps. This means that there is no practical restriction on the range + # of the Time class on these platforms. + # + # TZInfo will only load transitions that fall within the supported range of + # the Time class. Any queries performed on times outside of this range may + # give inaccurate results. + # + # The second factor concerns the zoneinfo files. Versions of the 'zic' tool + # (used to build zoneinfo files) that were released prior to February 2006 + # created zoneinfo files that used 32-bit integers for transition timestamps. + # Later versions of zic produce zoneinfo files that use 64-bit integers. If + # you have 32-bit zoneinfo files on your system, then any queries falling + # outside of the range 1901-12-13 20:45:52 to 2038-01-19 03:14:07 may be + # inaccurate. + # + # Most modern platforms include 64-bit zoneinfo files. However, Mac OS X (up + # to at least 10.8.4) still uses 32-bit zoneinfo files. + # + # To check whether your zoneinfo files contain 32-bit or 64-bit transition + # data, you can run the following code (substituting the identifier of the + # zone you want to test for zone_identifier): + # + # TZInfo::DataSource.set(:zoneinfo) + # dir = TZInfo::DataSource.get.zoneinfo_dir + # File.open(File.join(dir, zone_identifier), 'r') {|f| f.read(5) } + # + # If the last line returns "TZif\\x00", then you have a 32-bit zoneinfo file. + # If it returns "TZif2" or "TZif3" then you have a 64-bit zoneinfo file. + # + # If you require support for 64-bit transitions, but are restricted to 32-bit + # zoneinfo support, then you may want to consider using TZInfo::RubyDataSource + # instead. + class ZoneinfoDataSource < DataSource + # The default value of ZoneinfoDataSource.search_path. + DEFAULT_SEARCH_PATH = ['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'].freeze + + # The default value of ZoneinfoDataSource.alternate_iso3166_tab_search_path. + DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH = ['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'].freeze + + # Paths to be checked to find the system zoneinfo directory. + @@search_path = DEFAULT_SEARCH_PATH.dup + + # Paths to possible alternate iso3166.tab files (used to locate the + # system-wide iso3166.tab files on FreeBSD and OpenBSD). + @@alternate_iso3166_tab_search_path = DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH.dup + + # An Array of directories that will be checked to find the system zoneinfo + # directory. + # + # Directories are checked in the order they appear in the Array. + # + # The default value is ['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo']. + def self.search_path + @@search_path + end + + # Sets the directories to be checked when locating the system zoneinfo + # directory. + # + # Can be set to an Array of directories or a String containing directories + # separated with File::PATH_SEPARATOR. + # + # Directories are checked in the order they appear in the Array or String. + # + # Set to nil to revert to the default paths. + def self.search_path=(search_path) + @@search_path = process_search_path(search_path, DEFAULT_SEARCH_PATH) + end + + # An Array of paths that will be checked to find an alternate iso3166.tab + # file if one was not included in the zoneinfo directory (for example, on + # FreeBSD and OpenBSD systems). + # + # Paths are checked in the order they appear in the array. + # + # The default value is ['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166']. + def self.alternate_iso3166_tab_search_path + @@alternate_iso3166_tab_search_path + end + + # Sets the paths to check to locate an alternate iso3166.tab file if one was + # not included in the zoneinfo directory. + # + # Can be set to an Array of directories or a String containing directories + # separated with File::PATH_SEPARATOR. + # + # Paths are checked in the order they appear in the array. + # + # Set to nil to revert to the default paths. + def self.alternate_iso3166_tab_search_path=(alternate_iso3166_tab_search_path) + @@alternate_iso3166_tab_search_path = process_search_path(alternate_iso3166_tab_search_path, DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH) + end + + # The zoneinfo directory being used. + attr_reader :zoneinfo_dir + + # Creates a new ZoneinfoDataSource. + # + # If zoneinfo_dir is specified, it will be checked and used as the source + # of zoneinfo files. + # + # The directory must contain a file named iso3166.tab and a file named + # either zone1970.tab or zone.tab. These may either be included in the root + # of the directory or in a 'tab' sub-directory and named 'country.tab' and + # 'zone_sun.tab' respectively (as is the case on Solaris. + # + # Additionally, the path to iso3166.tab can be overridden using the + # alternate_iso3166_tab_path parameter. + # + # InvalidZoneinfoDirectory will be raised if the iso3166.tab and + # zone1970.tab or zone.tab files cannot be found using the zoneinfo_dir and + # alternate_iso3166_tab_path parameters. + # + # If zoneinfo_dir is not specified or nil, the paths referenced in + # search_path are searched in order to find a valid zoneinfo directory + # (one that contains zone1970.tab or zone.tab and iso3166.tab files as + # above). + # + # The paths referenced in alternate_iso3166_tab_search_path are also + # searched to find an iso3166.tab file if one of the searched zoneinfo + # directories doesn't contain an iso3166.tab file. + # + # If no valid directory can be found by searching, ZoneinfoDirectoryNotFound + # will be raised. + def initialize(zoneinfo_dir = nil, alternate_iso3166_tab_path = nil) + if zoneinfo_dir + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(zoneinfo_dir, alternate_iso3166_tab_path) + + unless iso3166_tab_path && zone_tab_path + raise InvalidZoneinfoDirectory, "#{zoneinfo_dir} is not a directory or doesn't contain a iso3166.tab file and a zone1970.tab or zone.tab file." + end + + @zoneinfo_dir = zoneinfo_dir + else + @zoneinfo_dir, iso3166_tab_path, zone_tab_path = find_zoneinfo_dir + + unless @zoneinfo_dir && iso3166_tab_path && zone_tab_path + raise ZoneinfoDirectoryNotFound, "None of the paths included in TZInfo::ZoneinfoDataSource.search_path are valid zoneinfo directories." + end + end + + @zoneinfo_dir = File.expand_path(@zoneinfo_dir).freeze + @timezone_index = load_timezone_index.freeze + @country_index = load_country_index(iso3166_tab_path, zone_tab_path).freeze + @posix_tz_parser = PosixTimeZoneParser.new + end + + # Returns a TimezoneInfo instance for a given identifier. + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + begin + if @timezone_index.include?(identifier) + path = File.join(@zoneinfo_dir, identifier) + + # Untaint path rather than identifier. We don't want to modify + # identifier. identifier may also be frozen and therefore cannot be + # untainted. + path.untaint + + begin + ZoneinfoTimezoneInfo.new(identifier, path, @posix_tz_parser) + rescue InvalidZoneinfoFile => e + raise InvalidTimezoneIdentifier, e.message + end + else + raise InvalidTimezoneIdentifier, 'Invalid identifier' + end + rescue Errno::ENOENT, Errno::ENAMETOOLONG, Errno::ENOTDIR + raise InvalidTimezoneIdentifier, 'Invalid identifier' + rescue Errno::EACCES => e + raise InvalidTimezoneIdentifier, e.message + end + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + @timezone_index + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + # + # For ZoneinfoDataSource, this will always be identical to + # timezone_identifers. + def data_timezone_identifiers + @timezone_index + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + # + # For ZoneinfoDataSource, this will always be an empty array. + def linked_timezone_identifiers + [].freeze + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + info = @country_index[code] + raise InvalidCountryCode, 'Invalid country code' unless info + info + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + @country_index.keys.freeze + end + + # Returns the name and information about this DataSource. + def to_s + "Zoneinfo DataSource: #{@zoneinfo_dir}" + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@zoneinfo_dir}>" + end + + private + + # Processes a path for use as the search_path or + # alternate_iso3166_tab_search_path. + def self.process_search_path(path, default) + if path + if path.kind_of?(String) + path.split(File::PATH_SEPARATOR) + else + path.collect {|p| p.to_s} + end + else + default.dup + end + end + + # Validates a zoneinfo directory and returns the paths to the iso3166.tab + # and zone1970.tab or zone.tab files if valid. If the directory is not + # valid, returns nil. + # + # The path to the iso3166.tab file may be overriden by passing in a path. + # This is treated as either absolute or relative to the current working + # directory. + def validate_zoneinfo_dir(path, iso3166_tab_path = nil) + if File.directory?(path) + if iso3166_tab_path + return nil unless File.file?(iso3166_tab_path) + else + iso3166_tab_path = resolve_tab_path(path, ['iso3166.tab'], 'country.tab') + return nil unless iso3166_tab_path + end + + zone_tab_path = resolve_tab_path(path, ['zone1970.tab', 'zone.tab'], 'zone_sun.tab') + return nil unless zone_tab_path + + [iso3166_tab_path, zone_tab_path] + else + nil + end + end + + # Attempts to resolve the path to a tab file given its standard names and + # tab sub-directory name (as used on Solaris). + def resolve_tab_path(zoneinfo_path, standard_names, tab_name) + standard_names.each do |standard_name| + path = File.join(zoneinfo_path, standard_name) + return path if File.file?(path) + end + + path = File.join(zoneinfo_path, 'tab', tab_name) + return path if File.file?(path) + + nil + end + + # Finds a zoneinfo directory using search_path and + # alternate_iso3166_tab_search_path. Returns the paths to the directory, + # the iso3166.tab file and the zone.tab file or nil if not found. + def find_zoneinfo_dir + alternate_iso3166_tab_path = self.class.alternate_iso3166_tab_search_path.detect do |path| + File.file?(path) + end + + self.class.search_path.each do |path| + # Try without the alternate_iso3166_tab_path first. + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + + if alternate_iso3166_tab_path + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path, alternate_iso3166_tab_path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + end + end + + # Not found. + nil + end + + # Scans @zoneinfo_dir and returns an Array of available timezone + # identifiers. + def load_timezone_index + index = [] + + # Ignoring particular files: + # +VERSION is included on Mac OS X. + # leapseconds is a list of leap seconds. + # localtime is the current local timezone (may be a link). + # posix, posixrules and right are directories containing other versions of the zoneinfo files. + # src is a directory containing the tzdata source included on Solaris. + # timeconfig is a symlink included on Slackware. + + enum_timezones(nil, ['+VERSION', 'leapseconds', 'localtime', 'posix', 'posixrules', 'right', 'src', 'timeconfig']) do |identifier| + index << identifier + end + + index.sort + end + + # Recursively scans a directory of timezones, calling the passed in block + # for each identifier found. + def enum_timezones(dir, exclude = [], &block) + Dir.foreach(dir ? File.join(@zoneinfo_dir, dir) : @zoneinfo_dir) do |entry| + unless entry =~ /\./ || exclude.include?(entry) + entry.untaint + path = dir ? File.join(dir, entry) : entry + full_path = File.join(@zoneinfo_dir, path) + + if File.directory?(full_path) + enum_timezones(path, [], &block) + elsif File.file?(full_path) + yield path + end + end + end + end + + # Uses the iso3166.tab and zone1970.tab or zone.tab files to build an index + # of the available countries and their timezones. + def load_country_index(iso3166_tab_path, zone_tab_path) + + # Handle standard 3 to 4 column zone.tab files as well as the 4 to 5 + # column format used by Solaris. + # + # On Solaris, an extra column before the comment gives an optional + # linked/alternate timezone identifier (or '-' if not set). + # + # Additionally, there is a section at the end of the file for timezones + # covering regions. These are given lower-case "country" codes. The timezone + # identifier column refers to a continent instead of an identifier. These + # lines will be ignored by TZInfo. + # + # Since the last column is optional in both formats, testing for the + # Solaris format is done in two passes. The first pass identifies if there + # are any lines using 5 columns. + + + # The first column is allowed to be a comma separated list of country + # codes, as used in zone1970.tab (introduced in tzdata 2014f). + # + # The first country code in the comma-separated list is the country that + # contains the city the zone identifer is based on. The first country + # code on each line is considered to be primary with the others + # secondary. + # + # The zones for each country are ordered primary first, then secondary. + # Within the primary and secondary groups, the zones are ordered by their + # order in the file. + + file_is_5_column = false + zone_tab = [] + + RubyCoreSupport.open_file(zone_tab_path, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + + if line =~ /\A([A-Z]{2}(?:,[A-Z]{2})*)\t(?:([+\-])(\d{2})(\d{2})([+\-])(\d{3})(\d{2})|([+\-])(\d{2})(\d{2})(\d{2})([+\-])(\d{3})(\d{2})(\d{2}))\t([^\t]+)(?:\t([^\t]+))?(?:\t([^\t]+))?\z/ + codes = $1 + + if $2 + latitude = dms_to_rational($2, $3, $4) + longitude = dms_to_rational($5, $6, $7) + else + latitude = dms_to_rational($8, $9, $10, $11) + longitude = dms_to_rational($12, $13, $14, $15) + end + + zone_identifier = $16 + column4 = $17 + column5 = $18 + + file_is_5_column = true if column5 + + zone_tab << [codes.split(','.freeze), zone_identifier, latitude, longitude, column4, column5] + end + end + end + + primary_zones = {} + secondary_zones = {} + + zone_tab.each do |codes, zone_identifier, latitude, longitude, column4, column5| + description = file_is_5_column ? column5 : column4 + country_timezone = CountryTimezone.new(zone_identifier, latitude, longitude, description) + + # codes will always have at least one element + + (primary_zones[codes.first] ||= []) << country_timezone + + codes[1..-1].each do |code| + (secondary_zones[code] ||= []) << country_timezone + end + end + + countries = {} + + RubyCoreSupport.open_file(iso3166_tab_path, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + + # Handle both the two column alpha-2 and name format used in the tz + # database as well as the 4 column alpha-2, alpha-3, numeric-3 and + # name format used by FreeBSD and OpenBSD. + + if line =~ /\A([A-Z]{2})(?:\t[A-Z]{3}\t[0-9]{3})?\t(.+)\z/ + code = $1 + name = $2 + zones = (primary_zones[code] || []) + (secondary_zones[code] || []) + + countries[code] = ZoneinfoCountryInfo.new(code, name, zones) + end + end + end + + countries + end + + # Converts degrees, minutes and seconds to a Rational. + def dms_to_rational(sign, degrees, minutes, seconds = nil) + result = degrees.to_i + Rational(minutes.to_i, 60) + result += Rational(seconds.to_i, 3600) if seconds + result = -result if sign == '-'.freeze + result + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb new file mode 100644 index 0000000..4f28a19 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb @@ -0,0 +1,515 @@ +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + + # An InvalidZoneinfoFile exception is raised if an attempt is made to load an + # invalid zoneinfo file. + class InvalidZoneinfoFile < StandardError + end + + # Represents a timezone defined by a compiled zoneinfo TZif (\0, 2 or 3) file. + # + # @private + class ZoneinfoTimezoneInfo < TransitionDataTimezoneInfo #:nodoc: + # The year to generate transitions up to. + # + # @private + GENERATE_UP_TO = RubyCoreSupport.time_supports_64bit ? Time.now.utc.year + 100 : 2037 + + # Minimum supported timestamp (inclusive). + # + # Time.utc(1700, 1, 1).to_i + MIN_TIMESTAMP = -8520336000 + + # Maximum supported timestamp (exclusive). + # + # Time.utc(2500, 1, 1).to_i + MAX_TIMESTAMP = 16725225600 + + # Constructs the new ZoneinfoTimezoneInfo with an identifier, path + # to the file and parser to use to parse the POSIX-like TZ string. + def initialize(identifier, file_path, posix_tz_parser) + super(identifier) + + File.open(file_path, 'rb') do |file| + parse(file, posix_tz_parser) + end + end + + private + # Unpack will return unsigned 32-bit integers. Translate to + # signed 32-bit. + def make_signed_int32(long) + long >= 0x80000000 ? long - 0x100000000 : long + end + + # Unpack will return a 64-bit integer as two unsigned 32-bit integers + # (most significant first). Translate to signed 64-bit + def make_signed_int64(high, low) + unsigned = (high << 32) | low + unsigned >= 0x8000000000000000 ? unsigned - 0x10000000000000000 : unsigned + end + + # Read bytes from file and check that the correct number of bytes could + # be read. Raises InvalidZoneinfoFile if the number of bytes didn't match + # the number requested. + def check_read(file, bytes) + result = file.read(bytes) + + unless result && result.length == bytes + raise InvalidZoneinfoFile, "Expected #{bytes} bytes reading '#{file.path}', but got #{result ? result.length : 0} bytes" + end + + result + end + + # Zoneinfo files don't include the offset from standard time (std_offset) + # for DST periods. Derive the base offset (utc_offset) where DST is + # observed from either the previous or next non-DST period. + # + # Returns the index of the offset to be used prior to the first + # transition. + def derive_offsets(transitions, offsets) + # The first non-DST offset (if there is one) is the offset observed + # before the first transition. Fallback to the first DST offset if there + # are no non-DST offsets. + first_non_dst_offset_index = offsets.index {|o| !o[:is_dst] } + first_offset_index = first_non_dst_offset_index || 0 + return first_offset_index if transitions.empty? + + # Determine the utc_offset of the next non-dst offset at each transition. + utc_offset_from_next = nil + + transitions.reverse_each do |transition| + offset = offsets[transition[:offset]] + if offset[:is_dst] + transition[:utc_offset_from_next] = utc_offset_from_next if utc_offset_from_next + else + utc_offset_from_next = offset[:utc_total_offset] + end + end + + utc_offset_from_previous = first_non_dst_offset_index ? offsets[first_non_dst_offset_index][:utc_total_offset] : nil + defined_offsets = {} + + transitions.each do |transition| + offset_index = transition[:offset] + offset = offsets[offset_index] + utc_total_offset = offset[:utc_total_offset] + + if offset[:is_dst] + utc_offset_from_next = transition[:utc_offset_from_next] + + difference_to_previous = (utc_total_offset - (utc_offset_from_previous || utc_total_offset)).abs + difference_to_next = (utc_total_offset - (utc_offset_from_next || utc_total_offset)).abs + + utc_offset = if difference_to_previous == 3600 + utc_offset_from_previous + elsif difference_to_next == 3600 + utc_offset_from_next + elsif difference_to_previous > 0 && difference_to_next > 0 + difference_to_previous < difference_to_next ? utc_offset_from_previous : utc_offset_from_next + elsif difference_to_previous > 0 + utc_offset_from_previous + elsif difference_to_next > 0 + utc_offset_from_next + else + # No difference, assume a 1 hour offset from standard time. + utc_total_offset - 3600 + end + + if !offset[:utc_offset] + offset[:utc_offset] = utc_offset + defined_offsets[offset] = offset_index + elsif offset[:utc_offset] != utc_offset + # An earlier transition has already derived a different + # utc_offset. Define a new offset or reuse an existing identically + # defined offset. + new_offset = offset.dup + new_offset[:utc_offset] = utc_offset + + offset_index = defined_offsets[new_offset] + + unless offset_index + offsets << new_offset + offset_index = offsets.length - 1 + defined_offsets[new_offset] = offset_index + end + + transition[:offset] = offset_index + end + else + utc_offset_from_previous = utc_total_offset + end + end + + first_offset_index + end + + # Remove transitions before a minimum supported value. If there is not a + # transition exactly on the minimum supported value move the latest from + # before up to the minimum supported value. + def remove_unsupported_negative_transitions(transitions, min_supported) + result = transitions.drop_while {|t| t[:at] < min_supported } + if result.empty? || (result[0][:at] > min_supported && result.length < transitions.length) + last_before = transitions[-1 - result.length] + last_before[:at] = min_supported + [last_before] + result + else + result + end + end + + # Determines if the offset from a transition matches the offset from a + # rule. This is a looser match than TimezoneOffset#==, not requiring that + # the utc_offset and std_offset both match (which have to be derived for + # transitions, but are known for rules. + def offset_matches_rule?(offset, rule_offset) + offset[:utc_total_offset] == rule_offset.utc_total_offset && + offset[:is_dst] == rule_offset.dst? && + offset[:abbr] == rule_offset.abbreviation.to_s + end + + # Determins if the offset from a transition exactly matches the offset + # from a rule. + def offset_equals_rule?(offset, rule_offset) + offset_matches_rule?(offset, rule_offset) && + (offset[:utc_offset] || (offset[:is_dst] ? offset[:utc_total_offset] - 3600 : offset[:utc_total_offset])) == rule_offset.utc_offset + end + + # Finds an offset hash that is an exact match to the rule offset specified. + def find_existing_offset_index(offsets, rule_offset) + offsets.find_index {|o| offset_equals_rule?(o, rule_offset) } + end + + # Gets an existing matching offset index or adds a new offset hash for a + # rule offset. + def get_rule_offset_index(offsets, offset) + index = find_existing_offset_index(offsets, offset) + unless index + index = offsets.length + offsets << {:utc_total_offset => offset.utc_total_offset, :utc_offset => offset.utc_offset, :is_dst => offset.dst?, :abbr => offset.abbreviation} + end + index + end + + # Gets a hash mapping rule offsets to indexes in offsets, creating new + # offset hashes if required. + def get_rule_offset_indexes(offsets, annual_rules) + { + annual_rules.std_offset => get_rule_offset_index(offsets, annual_rules.std_offset), + annual_rules.dst_offset => get_rule_offset_index(offsets, annual_rules.dst_offset) + } + end + + # Converts an array of rule transitions to hashes. + def convert_transitions_to_hashes(offset_indexes, transitions) + transitions.map {|t| {:at => t.at.to_i, :offset => offset_indexes[t.offset]} } + end + + # Apply the rules from the TZ string when there were no defined + # transitions. Checks for a matching offset. Returns the rules-based + # constant offset or generates transitions from 1970 until 100 years into + # the future (at the time of loading zoneinfo_timezone_info.rb) or 2037 if + # limited to 32-bit Times. + def apply_rules_without_transitions(file, offsets, first_offset_index, rules) + first_offset = offsets[first_offset_index] + + if rules.kind_of?(TimezoneOffset) + unless offset_matches_rule?(first_offset, rules) + raise InvalidZoneinfoFile, "Constant offset POSIX-style TZ string does not match constant offset in file '#{file.path}'." + end + + first_offset[:utc_offset] = rules.utc_offset + [] + else + transitions = 1970.upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten + first_transition = transitions[0] + + if offset_matches_rule?(first_offset, first_transition.previous_offset) + # Correct the first offset if it isn't an exact match. + first_offset[:utc_offset] = first_transition.previous_offset.utc_offset + else + # Not transitioning from the designated first offset. + if offset_matches_rule?(first_offset, first_transition.offset) + # Correct the first offset if it isn't an exact match. + first_offset[:utc_offset] = first_transition.offset.utc_offset + + # Skip an unnecessary transition to the first offset. + transitions.shift + end + + # If the first offset doesn't match either the offset or previous + # offset, then it will be retained. + end + + offset_indexes = get_rule_offset_indexes(offsets, rules) + convert_transitions_to_hashes(offset_indexes, transitions) + end + end + + # Validates the rules offset against the offset of the last defined + # transition. Replaces the transition with an equivalent using the rules + # offset if the rules give a different definition for the base offset. + def replace_last_transition_offset_if_valid_and_needed(file, transitions, offsets) + last_transition = transitions.last + last_offset = offsets[last_transition[:offset]] + rule_offset = yield last_offset + + unless offset_matches_rule?(last_offset, rule_offset) + raise InvalidZoneinfoFile, "Offset from POSIX-style TZ string does not match final transition in file '#{file.path}'." + end + + # The total_utc_offset and abbreviation must always be the same. The + # base utc_offset and std_offset might differ. In which case the rule + # should be used as it will be more precise. + last_offset[:utc_offset] = rule_offset.utc_offset + last_transition + end + + # todo: port over validate_and_fix_last_defined_transition_offset + # when fixing the previous offset will need to define a new one + + # Validates the offset indicated to be observed by the rules before the + # first generated transition against the offset of the last defined + # transition. + # + # Fix the last defined transition if it differ on just base/std offsets + # (which are derived). Raise an error if the observed UTC offset or + # abbreviations differ. + def validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, first_rule_offset) + offset_of_last_defined = offsets[last_defined[:offset]] + + if offset_equals_rule?(offset_of_last_defined, first_rule_offset) + last_defined + else + if offset_matches_rule?(offset_of_last_defined, first_rule_offset) + # The same overall offset, but differing in the base or std + # offset (which are derived). Correct by using the rule. + + offset_index = get_rule_offset_index(offsets, first_rule_offset) + {:at => last_defined[:at], :offset => offset_index} + else + raise InvalidZoneinfoFile, "The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{file.path}'." + end + end + end + + # Apply the rules from the TZ string when there were defined transitions. + # Checks for a matching offset with the last transition. Redefines the + # last transition if required and if the rules don't specific a constant + # offset, generates transitions until 100 years into the future (at the + # time of loading zoneinfo_timezone_info.rb) or 2037 if limited to 32-bit + # Times. + def apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules) + last_defined = transitions[-1] + + if rules.kind_of?(TimezoneOffset) + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, rules) + else + previous_offset_index = transitions.length > 1 ? transitions[-2][:offset] : first_offset_index + previous_offset = offsets[previous_offset_index] + last_year = (Time.at(last_defined[:at]).utc + previous_offset[:utc_total_offset]).year + + if last_year <= GENERATE_UP_TO + generated = rules.transitions(last_year).find_all {|t| t.at > last_defined[:at] } + + (last_year + 1).upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten + + unless generated.empty? + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, generated[0].previous_offset) + rule_offset_indexes = get_rule_offset_indexes(offsets, rules) + transitions.concat(convert_transitions_to_hashes(rule_offset_indexes, generated)) + end + end + end + end + + # Defines an offset for the timezone based on the given index and offset + # Hash. + def define_offset(index, offset) + utc_total_offset = offset[:utc_total_offset] + utc_offset = offset[:utc_offset] + + if utc_offset + # DST offset with base utc_offset derived by derive_offsets. + std_offset = utc_total_offset - utc_offset + elsif offset[:is_dst] + # DST offset unreferenced by a transition (offset in use before the + # first transition). No derived base UTC offset, so assume 1 hour + # DST. + utc_offset = utc_total_offset - 3600 + std_offset = 3600 + else + # Non-DST offset. + utc_offset = utc_total_offset + std_offset = 0 + end + + offset index, utc_offset, std_offset, offset[:abbr].untaint.to_sym + end + + # Parses a zoneinfo file and intializes the DataTimezoneInfo structures. + def parse(file, posix_tz_parser) + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + if magic != 'TZif' + raise InvalidZoneinfoFile, "The file '#{file.path}' does not start with the expected header." + end + + if version == '2' || version == '3' + # Skip the first 32-bit section and read the header of the second + # 64-bit section. The 64-bit section is always used even if the + # runtime platform doesn't support 64-bit timestamps. In "slim" format + # zoneinfo files the 32-bit section will be empty. + file.seek(timecnt * 5 + typecnt * 6 + charcnt + leapcnt * 8 + ttisstdcnt + ttisutccnt, IO::SEEK_CUR) + + prev_version = version + + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + unless magic == 'TZif' && (version == prev_version) + raise InvalidZoneinfoFile, "The file '#{file.path}' contains an invalid 64-bit section header." + end + + using_64bit = true + elsif version != '3' && version != '2' && version != "\0" + raise InvalidZoneinfoFile, "The file '#{file.path}' contains a version of the zoneinfo format that is not currently supported." + else + using_64bit = false + end + + unless leapcnt == 0 + raise InvalidZoneinfoFile, "The zoneinfo file '#{file.path}' contains leap second data. TZInfo requires zoneinfo files that omit leap seconds." + end + + transitions = [] + + if using_64bit + timecnt.times do |i| + high, low = check_read(file, 8).unpack('NN'.freeze) + transition_time = make_signed_int64(high, low) + transitions << {:at => transition_time} + end + else + timecnt.times do |i| + transition_time = make_signed_int32(check_read(file, 4).unpack('N'.freeze)[0]) + transitions << {:at => transition_time} + end + end + + timecnt.times do |i| + localtime_type = check_read(file, 1).unpack('C'.freeze)[0] + transitions[i][:offset] = localtime_type + end + + offsets = [] + + typecnt.times do |i| + gmtoff, isdst, abbrind = check_read(file, 6).unpack('NCC'.freeze) + gmtoff = make_signed_int32(gmtoff) + isdst = isdst == 1 + offset = {:utc_total_offset => gmtoff, :is_dst => isdst, :abbr_index => abbrind} + + unless isdst + offset[:utc_offset] = gmtoff + end + + offsets << offset + end + + abbrev = check_read(file, charcnt) + + if using_64bit + # Skip to the POSIX-style TZ string. + file.seek(ttisstdcnt + ttisutccnt, IO::SEEK_CUR) # + leapcnt * 8, but leapcnt is checked above and guaranteed to be 0. + tz_string_start = check_read(file, 1) + raise InvalidZoneinfoFile, "Expected newline starting POSIX-style TZ string in file '#{file.path}'." unless tz_string_start == "\n" + tz_string = RubyCoreSupport.force_encoding(file.readline("\n"), 'UTF-8') + raise InvalidZoneinfoFile, "Expected newline ending POSIX-style TZ string in file '#{file.path}'." unless tz_string.chomp!("\n") + + begin + rules = posix_tz_parser.parse(tz_string) + rescue InvalidPosixTimeZone => e + raise InvalidZoneinfoFile, "Failed to parse POSIX-style TZ string in file '#{file.path}': #{e}" + end + else + rules = nil + end + + offsets.each do |o| + abbrev_start = o[:abbr_index] + raise InvalidZoneinfoFile, "Abbreviation index is out of range in file '#{file.path}'" unless abbrev_start < abbrev.length + + abbrev_end = abbrev.index("\0", abbrev_start) + raise InvalidZoneinfoFile, "Missing abbreviation null terminator in file '#{file.path}'" unless abbrev_end + + o[:abbr] = RubyCoreSupport.force_encoding(abbrev[abbrev_start...abbrev_end], 'UTF-8') + end + + transitions.each do |t| + if t[:offset] < 0 || t[:offset] >= offsets.length + raise InvalidZoneinfoFile, "Invalid offset referenced by transition in file '#{file.path}'." + end + end + + # Derive the offsets from standard time (std_offset). + first_offset_index = derive_offsets(transitions, offsets) + + # Filter out transitions that are not supported by Time on this + # platform. + unless transitions.empty? + if !RubyCoreSupport.time_supports_negative + transitions = remove_unsupported_negative_transitions(transitions, 0) + elsif !RubyCoreSupport.time_supports_64bit + transitions = remove_unsupported_negative_transitions(transitions, -2**31) + else + # Ignore transitions that occur outside of a defined window. The + # transition index cannot handle a large range of transition times. + # + # This is primarily intended to ignore the far in the past + # transition added in zic 2014c (at timestamp -2**63 in zic 2014c + # and at the approximate time of the big bang from zic 2014d). + # + # Assumes MIN_TIMESTAMP is less than -2**31. + transitions = remove_unsupported_negative_transitions(transitions, MIN_TIMESTAMP) + end + + if !RubyCoreSupport.time_supports_64bit + i = transitions.find_index {|t| t[:at] >= 2**31 } + had_later_transition = !!i + transitions = transitions.first(i) if i + else + had_later_transition = false + end + end + + if rules && !had_later_transition + if transitions.empty? + transitions = apply_rules_without_transitions(file, offsets, first_offset_index, rules) + else + apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules) + end + end + + define_offset(first_offset_index, offsets[first_offset_index]) + + used_offset_indexes = transitions.map {|t| t[:offset] }.to_set + + offsets.each_with_index do |o, i| + define_offset(i, o) if i != first_offset_index && used_offset_indexes.include?(i) + end + + # Ignore transitions that occur outside of a defined window. The + # transition index cannot handle a large range of transition times. + transitions.each do |t| + at = t[:at] + break if at >= MAX_TIMESTAMP + time = Time.at(at).utc + transition time.year, time.mon, t[:offset], at + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb new file mode 100644 index 0000000..610d878 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb @@ -0,0 +1,95 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCAnnualRules < Minitest::Test + + def test_initialize + std_offset = TimezoneOffset.new(0, 0, 'GMT') + dst_offset = TimezoneOffset.new(0, 3600, 'BST') + dst_start_rule = FakeAlwaysDateAdjustmentRule.new(3, 1) + dst_end_rule = FakeAlwaysDateAdjustmentRule.new(10, 1) + + rules = AnnualRules.new(std_offset, dst_offset, dst_start_rule, dst_end_rule) + assert_same(std_offset, rules.std_offset) + assert_same(dst_offset, rules.dst_offset) + assert_same(dst_start_rule, rules.dst_start_rule) + assert_same(dst_end_rule, rules.dst_end_rule) + end + + [2020, 2021].each do |year| + define_method "test_transitions_for_dst_mid_year_#{year}" do + std_offset = TimezoneOffset.new(3600, 0, 'TEST') + dst_offset = TimezoneOffset.new(3600, 3600, 'TESTS') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(3, 21), + FakeAlwaysDateAdjustmentRule.new(10, 22) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 3600)), + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 7200)) + ] + + assert_equal(expected, result) + end + + define_method "test_transitions_for_dst_start_and_end_of_year_#{year}" do + std_offset = TimezoneOffset.new(3600, 0, 'TEST') + dst_offset = TimezoneOffset.new(3600, 3600, 'TESTS') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(10, 22), + FakeAlwaysDateAdjustmentRule.new(3, 21) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 7200)), + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 3600)) + ] + + assert_equal(expected, result) + end + + define_method "test_transitions_for_negative_dst_start_and_end_of_year_#{year}" do + std_offset = TimezoneOffset.new(7200, 0, 'TEST') + dst_offset = TimezoneOffset.new(7200, -3600, 'TESTW') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(10, 22), + FakeAlwaysDateAdjustmentRule.new(3, 21) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 3600)), + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 7200)) + ] + + assert_equal(expected, result) + end + end + + class FakeAlwaysDateAdjustmentRule + def initialize(month, day) + @month = month + @day = day + end + + def at(offset, year) + TimeOrDateTime.wrap(Time.utc(year, @month, @day, 0, 0, 0) - offset.utc_total_offset) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country.rb new file mode 100644 index 0000000..faa3bb5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country.rb @@ -0,0 +1,238 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCCountry < Minitest::Test + def setup + @orig_data_source = DataSource.get + Country.send :init_countries + end + + def teardown + DataSource.set(@orig_data_source) + end + + def test_get_valid + c = Country.get('GB') + + assert c + assert_equal('GB', c.code) + end + + def test_get_not_exist + assert_raises(InvalidCountryCode) { + Country.get('ZZ') + } + end + + def test_get_invalid + assert_raises(InvalidCountryCode) { + Country.get('../Countries/GB') + } + end + + def test_get_nil + assert_raises(InvalidCountryCode) { + Country.get(nil) + } + end + + def test_get_case + assert_raises(InvalidCountryCode) { + Country.get('gb') + } + end + + def test_get_tainted_loaded + Country.get('GB') + + safe_test(:unavailable => :skip) do + code = 'GB'.dup.taint + assert(code.tainted?) + country = Country.get(code) + assert_equal('GB', country.code) + assert(code.tainted?) + end + end + + def test_get_tainted_and_frozen_loaded + Country.get('GB') + + safe_test do + country = Country.get('GB'.dup.taint.freeze) + assert_equal('GB', country.code) + end + end + + def test_get_tainted_not_previously_loaded + safe_test(:unavailable => :skip) do + code = 'GB'.dup.taint + assert(code.tainted?) + country = Country.get(code) + assert_equal('GB', country.code) + assert(code.tainted?) + end + end + + def test_get_tainted_and_frozen_not_previously_loaded + safe_test do + country = Country.get('GB'.dup.taint.freeze) + assert_equal('GB', country.code) + end + end + + def test_new_nil + assert_raises(InvalidCountryCode) { + Country.new(nil) + } + end + + def test_new_arg + c = Country.new('GB') + assert_same(Country.get('GB'), c) + end + + def test_new_arg_not_exist + assert_raises(InvalidCountryCode) { + Country.new('ZZ') + } + end + + def test_all_codes + all_codes = Country.all_codes + assert_kind_of(Array, all_codes) + end + + def test_all + all = Country.all + assert_equal(Country.all_codes, all.collect {|c| c.code}) + end + + def test_code + assert_equal('US', Country.get('US').code) + end + + def test_name + assert_kind_of(String, Country.get('US').name) + end + + def test_to_s + assert_equal(Country.get('US').name, Country.get('US').to_s) + assert_equal(Country.get('GB').name, Country.get('GB').to_s) + end + + def test_zone_identifiers + zone_names = Country.get('US').zone_names + assert_kind_of(Array, zone_names) + assert_equal(true, zone_names.frozen?) + end + + def test_zone_names + assert_equal(Country.get('US').zone_identifiers, Country.get('US').zone_names) + end + + def test_zones + zones = Country.get('US').zones + assert_kind_of(Array, zones) + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.identifier}) + + zones.each {|z| assert_kind_of(TimezoneProxy, z)} + end + + def test_zone_info + zones = Country.get('US').zone_info + assert_kind_of(Array, zones) + assert_equal(true, zones.frozen?) + + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.identifier}) + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.timezone.identifier}) + + zones.each {|z| assert_kind_of(CountryTimezone, z)} + end + + def test_compare + assert_equal(0, Country.get('GB') <=> Country.get('GB')) + assert_equal(-1, Country.get('GB') <=> Country.get('US')) + assert_equal(1, Country.get('US') <=> Country.get('GB')) + assert_equal(-1, Country.get('FR') <=> Country.get('US')) + assert_equal(1, Country.get('US') <=> Country.get('FR')) + end + + def test_compare_non_comparable + assert_nil(Country.get('GB') <=> Object.new) + end + + def test_equality + assert_equal(true, Country.get('GB') == Country.get('GB')) + assert_equal(false, Country.get('GB') == Country.get('US')) + assert(!(Country.get('GB') == Object.new)) + end + + def test_eql + assert_equal(true, Country.get('GB').eql?(Country.get('GB'))) + assert_equal(false, Country.get('GB').eql?(Country.get('US'))) + assert(!Country.get('GB').eql?(Object.new)) + end + + def test_hash + assert_equal('GB'.hash, Country.get('GB').hash) + assert_equal('US'.hash, Country.get('US').hash) + end + + def test_marshal + c = Country.get('US') + + # Should get back the same instance because load calls Country.get. + assert_same(c, Marshal.load(Marshal.dump(c))) + end + + def test_reload + # If country gets reloaded for some reason, it needs to force a reload of + # the country index. + + assert_equal('US', Country.get('US').code) + + # Suppress redefined method warnings. + without_warnings do + load 'tzinfo/country.rb' + end + + assert_equal('US', Country.get('US').code) + end + + def test_get_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.get('GB') + end + end + + def test_new_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.new('GB') + end + end + + def test_all_codes_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.all_codes + end + end + + def test_all_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.all + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb new file mode 100644 index 0000000..16f76e0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb @@ -0,0 +1,69 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryIndexDefinition < Minitest::Test + + module CountriesTest1 + include CountryIndexDefinition + + country 'ZZ', 'Country One' do |c| + c.timezone 'Test/Zone/1', 3, 2, 41,20 + end + + country 'AA', 'Aland' do |c| + c.timezone 'Test/Zone/3', 71,30, 358, 15 + c.timezone 'Test/Zone/2', 41, 20, 211, 30 + end + + country 'TE', 'Three' + end + + module CountriesTest2 + include CountryIndexDefinition + + country 'CO', 'First Country' do |c| + end + end + + def test_module_1 + hash = CountriesTest1.countries + assert_equal(3, hash.length) + assert_equal(true, hash.frozen?) + + zz = hash['ZZ'] + aa = hash['AA'] + te = hash['TE'] + + assert_kind_of(RubyCountryInfo, zz) + assert_equal('ZZ', zz.code) + assert_equal('Country One', zz.name) + assert_equal(1, zz.zones.length) + assert_equal('Test/Zone/1', zz.zones[0].identifier) + + assert_kind_of(RubyCountryInfo, aa) + assert_equal('AA', aa.code) + assert_equal('Aland', aa.name) + assert_equal(2, aa.zones.length) + assert_equal('Test/Zone/3', aa.zones[0].identifier) + assert_equal('Test/Zone/2', aa.zones[1].identifier) + + assert_kind_of(RubyCountryInfo, te) + assert_equal('TE', te.code) + assert_equal('Three', te.name) + assert_equal(0, te.zones.length) + end + + def test_module_2 + hash = CountriesTest2.countries + assert_equal(1, hash.length) + assert_equal(true, hash.frozen?) + + co = hash['CO'] + + assert_kind_of(RubyCountryInfo, co) + assert_equal('CO', co.code) + assert_equal('First Country', co.name) + assert_equal(0, co.zones.length) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_info.rb new file mode 100644 index 0000000..12002d9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_info.rb @@ -0,0 +1,16 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryInfo < Minitest::Test + + def test_code + ci = CountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = CountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('Zzz', ci.name) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb new file mode 100644 index 0000000..77fd7d1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb @@ -0,0 +1,173 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryTimezone < Minitest::Test + def test_identifier_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal('Europe/London', ct.identifier) + end + + def test_identifier_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal('Europe/London', ct.identifier) + end + + def test_latitude_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_latitude_after_freeze_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct.freeze + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_latitude_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_longitude_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_longitude_after_freeze_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct.freeze + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_longitude_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_description_omit_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_nil(ct.description) + end + + def test_description_omit_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_nil(ct.description) + end + + def test_description_nil_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, nil) + assert_nil(ct.description) + end + + def test_description_nil_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16), nil) + assert_nil(ct.description) + end + + def test_description_new! + ct = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + assert_equal('Eastern Time', ct.description) + end + + def test_description_new + ct = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + assert_equal('Eastern Time', ct.description) + end + + def test_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_kind_of(TimezoneProxy, ct.timezone) + assert_equal('Europe/London', ct.timezone.identifier) + end + + def test_description_or_friendly_idenfier_no_description + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal('London', ct.description_or_friendly_identifier) + end + + def test_description_or_friendly_idenfier_description + ct = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + assert_equal('Eastern Time', ct.description_or_friendly_identifier) + end + + def test_equality_1 + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct3 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct4 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, 'Description') + ct5 = CountryTimezone.new!('Europe/LondonB', 2059, 40, -5, 16) + ct6 = CountryTimezone.new!('Europe/London', 2060, 40, -5, 16) + ct7 = CountryTimezone.new!('Europe/London', 2059, 40, -6, 16) + + assert_equal(true, ct1 == ct1) + assert_equal(true, ct1 == ct2) + assert_equal(true, ct1 == ct3) + assert_equal(false, ct1 == ct4) + assert_equal(false, ct1 == ct5) + assert_equal(false, ct1 == ct6) + assert_equal(false, ct1 == ct7) + end + + def test_equality_2 + ct1 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time2') + + assert_equal(true, ct1 == ct1) + assert_equal(false, ct1 == ct2) + end + + def test_equality_non_country_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + + assert_equal(false, ct == Object.new) + end + + def test_eql_1 + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct3 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct4 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, 'Description') + ct5 = CountryTimezone.new!('Europe/LondonB', 2059, 40, -5, 16) + ct6 = CountryTimezone.new!('Europe/London', 2060, 40, -5, 16) + ct7 = CountryTimezone.new!('Europe/London', 2059, 40, -6, 16) + + assert_equal(true, ct1.eql?(ct1)) + assert_equal(true, ct1.eql?(ct2)) + assert_equal(true, ct1.eql?(ct3)) + assert_equal(false, ct1.eql?(ct4)) + assert_equal(false, ct1.eql?(ct5)) + assert_equal(false, ct1.eql?(ct6)) + assert_equal(false, ct1.eql?(ct7)) + end + + def test_eql_2 + ct1 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time2') + + assert_equal(true, ct1.eql?(ct1)) + assert_equal(false, ct1.eql?(ct2)) + end + + def test_eql_non_country_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + + assert_equal(false, ct.eql?(Object.new)) + end + + def test_hash_new! + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + + assert_equal('Europe/London'.hash ^ 2059.hash ^ 40.hash ^ -5.hash ^ 16.hash ^ nil.hash, ct1.hash) + assert_equal('America/New_York'.hash ^ 48857.hash ^ 1200.hash ^ -266423.hash ^ 3600.hash ^ 'Eastern Time'.hash, ct2.hash) + end + + def test_hash_new + ct1 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct2 = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + + assert_equal('Europe/London'.hash ^ 2059.hash ^ 40.hash ^ -5.hash ^ 16.hash ^ nil.hash, ct1.hash) + assert_equal('America/New_York'.hash ^ 48857.hash ^ 1200.hash ^ -266423.hash ^ 3600.hash ^ 'Eastern Time'.hash, ct2.hash) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_source.rb new file mode 100644 index 0000000..3d11ffc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_source.rb @@ -0,0 +1,218 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'tmpdir' + +include TZInfo + +class TCDataSource < Minitest::Test + class InitDataSource < DataSource + end + + class DummyDataSource < DataSource + end + + def setup + @orig_data_source = DataSource.get + DataSource.set(InitDataSource.new) + @orig_search_path = ZoneinfoDataSource.search_path.clone + end + + def teardown + DataSource.set(@orig_data_source) + ZoneinfoDataSource.search_path = @orig_search_path + end + + def test_get + data_source = DataSource.get + assert_kind_of(InitDataSource, data_source) + end + + def test_get_default_ruby_only + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir] + + puts TZInfo::DataSource.get.class + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::RubyDataSource'], code, [TZINFO_TEST_DATA_DIR]) + end + + def test_get_default_zoneinfo_only + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir, '#{TZINFO_TEST_ZONEINFO_DIR}'] + + puts TZInfo::DataSource.get.class + puts TZInfo::DataSource.get.zoneinfo_dir + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns( + ['TZInfo::ZoneinfoDataSource', TZINFO_TEST_ZONEINFO_DIR], + code) + end + + def test_get_default_ruby_and_zoneinfo + code = <<-EOF + begin + TZInfo::ZoneinfoDataSource.search_path = ['#{TZINFO_TEST_ZONEINFO_DIR}'] + + puts TZInfo::DataSource.get.class + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::RubyDataSource'], code, [TZINFO_TEST_DATA_DIR]) + end + + def test_get_default_no_data + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir] + + begin + data_source = TZInfo::DataSource.get + puts "No exception raised, returned \#{data_source} instead" + rescue Exception => e + puts e.class + end + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::DataSourceNotFound'], code) + end + + def test_set_instance + DataSource.set(DummyDataSource.new) + data_source = DataSource.get + assert_kind_of(DummyDataSource, data_source) + end + + def test_set_standard_ruby + DataSource.set(:ruby) + data_source = DataSource.get + assert_kind_of(RubyDataSource, data_source) + end + + def test_set_standard_zoneinfo_search + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + ZoneinfoDataSource.search_path = [dir] + + DataSource.set(:zoneinfo) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_search_zone1970 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone1970.tab')) + + ZoneinfoDataSource.search_path = [dir] + + DataSource.set(:zoneinfo) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + DataSource.set(:zoneinfo, dir) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit_zone1970 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + DataSource.set(:zoneinfo, dir) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit_alternate_iso3166 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + zoneinfo_dir = File.join(dir, 'zoneinfo') + tab_dir = File.join(dir, 'tab') + + FileUtils.mkdir(zoneinfo_dir) + FileUtils.mkdir(tab_dir) + + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + iso3166_file = File.join(tab_dir, 'iso3166.tab') + FileUtils.touch(iso3166_file) + + DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_file) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_search_not_found + Dir.mktmpdir('tzinfo_test_dir') do |dir| + ZoneinfoDataSource.search_path = [dir] + + assert_raises(ZoneinfoDirectoryNotFound) do + DataSource.set(:zoneinfo) + end + + assert_kind_of(InitDataSource, DataSource.get) + end + end + + def test_set_standard_zoneinfo_explicit_invalid + Dir.mktmpdir('tzinfo_test_dir') do |dir| + assert_raises(InvalidZoneinfoDirectory) do + DataSource.set(:zoneinfo, dir) + end + + assert_kind_of(InitDataSource, DataSource.get) + end + end + + def test_set_standard_zoneinfo_wrong_arg_count + assert_raises(ArgumentError) do + DataSource.set(:zoneinfo, 1, 2, 3) + end + + assert_kind_of(InitDataSource, DataSource.get) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb new file mode 100644 index 0000000..15ee27b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb @@ -0,0 +1,99 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCDataTimezone < Minitest::Test + + class TestTimezoneInfo < TimezoneInfo + attr_reader :utc + attr_reader :local + attr_reader :utc_to + attr_reader :utc_from + + def initialize(identifier, utc_period, local_periods, transitions_up_to) + super(identifier) + @utc_period = utc_period + @local_periods = local_periods || [] + @transitions_up_to = transitions_up_to + end + + def period_for_utc(utc) + @utc = utc + @utc_period + end + + def periods_for_local(local) + @local = local + @local_periods + end + + def transitions_up_to(utc_to, utc_from = nil) + @utc_to = utc_to + @utc_from = utc_from + @transitions_up_to + end + end + + def test_identifier + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_equal('Test/Zone', tz.identifier) + end + + def test_period_for_utc + # Don't need actual TimezonePeriods. DataTimezone isn't supposed to do + # anything with them apart from return them. + period = Object.new + tti = TestTimezoneInfo.new('Test/Zone', period, [], []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(period, tz.period_for_utc(t)) + assert_same(t, tti.utc) + end + + def test_periods_for_local + # Don't need actual TimezonePeriods. DataTimezone isn't supposed to do + # anything with them apart from return them. + periods = [Object.new, Object.new] + tti = TestTimezoneInfo.new('Test/Zone', nil, periods, []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(periods, tz.periods_for_local(t)) + assert_same(t, tti.local) + end + + def test_periods_for_local_not_found + periods = [] + tti = TestTimezoneInfo.new('Test/Zone', nil, periods, []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(periods, tz.periods_for_local(t)) + assert_same(t, tti.local) + end + + def test_transitions_up_to + # Don't need actual TimezoneTransition instances. DataTimezone isn't + # supposed to do anything with them apart from return them. + transitions = [Object.new, Object.new] + tti = TestTimezoneInfo.new('Test/Zone', nil, nil, transitions) + tz = DataTimezone.new(tti) + + utc_to = Time.utc(2013, 1, 1, 0, 0, 0) + utc_from = Time.utc(2012, 1, 1, 0, 0, 0) + assert_same(transitions, tz.transitions_up_to(utc_to, utc_from)) + assert_same(utc_to, tti.utc_to) + assert_same(utc_from, tti.utc_from) + end + + def test_canonical_identifier + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_equal('Test/Zone', tz.canonical_identifier) + end + + def test_canonical_zone + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_same(tz, tz.canonical_zone) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb new file mode 100644 index 0000000..1b9d144 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb @@ -0,0 +1,18 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCDataTimezoneInfo < Minitest::Test + + def test_identifier + ti = DataTimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', ti.identifier) + end + + def test_construct_timezone + ti = DataTimezoneInfo.new('Test/Zone') + tz = ti.create_timezone + assert_kind_of(DataTimezone, tz) + assert_equal('Test/Zone', tz.identifier) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb new file mode 100644 index 0000000..c70a098 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb @@ -0,0 +1,34 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCInfoTimezone < Minitest::Test + + class TestInfoTimezone < InfoTimezone + attr_reader :setup_info + + protected + def setup(info) + super(info) + @setup_info = info + end + end + + def test_identifier + tz = InfoTimezone.new(TimezoneInfo.new('Test/Identifier')) + assert_equal('Test/Identifier', tz.identifier) + end + + def test_info + i = TimezoneInfo.new('Test/Identifier') + tz = InfoTimezone.new(i) + assert_same(i, tz.send(:info)) + end + + def test_setup + i = TimezoneInfo.new('Test/Identifier') + tz = TestInfoTimezone.new(i) + assert_same(i, tz.setup_info) + end +end + diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb new file mode 100644 index 0000000..5da9303 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb @@ -0,0 +1,155 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCLinkedTimezone < Minitest::Test + + class TestTimezone < Timezone + attr_reader :utc_period + attr_reader :local_periods + attr_reader :up_to_transitions + attr_reader :utc + attr_reader :local + attr_reader :utc_to + attr_reader :utc_from + + def self.new(identifier, no_local_periods = false) + tz = super() + tz.send(:setup, identifier, no_local_periods) + tz + end + + def identifier + @identifier + end + + def period_for_utc(utc) + @utc = utc + @utc_period + end + + def periods_for_local(local) + @local = local + raise PeriodNotFound if @no_local_periods + @local_periods + end + + def transitions_up_to(utc_to, utc_from = nil) + @utc_to = utc_to + @utc_from = utc_from + @up_to_transitions + end + + def canonical_zone + self + end + + private + def setup(identifier, no_local_periods) + @identifier = identifier + @no_local_periods = no_local_periods + + # Don't have to be real TimezonePeriod or TimezoneTransition objects + # (nothing will use them). + @utc_period = Object.new + @local_periods = [Object.new, Object.new] + @up_to_transitions = [Object.new, Object.new] + end + end + + + def setup + # Redefine Timezone.get to return a fake timezone. + # Use without_warnings to suppress redefined get method warning. + without_warnings do + def Timezone.get(identifier) + raise InvalidTimezoneIdentifier, 'Invalid identifier' if identifier == 'Invalid/Identifier' + + @timezones ||= {} + @timezones[identifier] ||= + identifier == 'Test/Recursive/Linked' ? + LinkedTimezone.new(LinkedTimezoneInfo.new(identifier, 'Test/Recursive/Data')) : + TestTimezone.new(identifier, identifier == 'Test/No/Local') + end + end + end + + def teardown + # Re-require timezone to reset. + # Suppress redefined method warnings. + without_warnings do + load 'tzinfo/timezone.rb' + end + end + + def test_identifier + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + assert_equal('Test/Zone', tz.identifier) + end + + def test_invalid_linked_identifier + assert_raises(InvalidTimezoneIdentifier) { LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Invalid/Identifier')) } + end + + def test_period_for_utc + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_same(linked_tz.utc_period, tz.period_for_utc(t)) + assert_same(t, linked_tz.utc) + end + + def test_periods_for_local + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_same(linked_tz.local_periods, tz.periods_for_local(t)) + assert_same(t, linked_tz.local) + end + + def test_periods_for_local_not_found + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/No/Local')) + linked_tz = Timezone.get('Test/No/Local') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_raises(PeriodNotFound) { tz.periods_for_local(t) } + assert_same(t, linked_tz.local) + end + + def test_transitions_up_to + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + utc_to = Time.utc(2013, 1, 1, 0, 0, 0) + utc_from = Time.utc(2012, 1, 1, 0, 0, 0) + assert_same(linked_tz.up_to_transitions, tz.transitions_up_to(utc_to, utc_from)) + assert_same(utc_to, linked_tz.utc_to) + assert_same(utc_from, linked_tz.utc_from) + end + + def test_canonical_identifier + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + assert_equal('Test/Linked', tz.canonical_identifier) + end + + def test_canonical_identifier_recursive + # Recursive links are not currently used in the Time Zone database, but + # will be supported by TZInfo. + + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Recursive/Linked')) + assert_equal('Test/Recursive/Data', tz.canonical_identifier) + end + + def test_canonical_zone + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + assert_same(linked_tz, tz.canonical_zone) + end + + def test_canonical_zone_recursive + # Recursive links are not currently used in the Time Zone database, but + # will be supported by TZInfo. + + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Recursive/Linked')) + linked_tz = Timezone.get('Test/Recursive/Data') + assert_same(linked_tz, tz.canonical_zone) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb new file mode 100644 index 0000000..4e55f4d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb @@ -0,0 +1,23 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCLinkedTimezoneInfo < Minitest::Test + + def test_identifier + lti = LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked') + assert_equal('Test/Zone', lti.identifier) + end + + def test_link_to_identifier + lti = LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked') + assert_equal('Test/Linked', lti.link_to_identifier) + end + + def test_construct_timezone + lti = LinkedTimezoneInfo.new('Test/Zone', 'Europe/London') + tz = lti.create_timezone + assert_kind_of(LinkedTimezone, tz) + assert_equal('Test/Zone', tz.identifier) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb new file mode 100644 index 0000000..2881fc7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb @@ -0,0 +1,23 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCOffsetRationals < Minitest::Test + def test_rational_for_offset + [0,1,2,3,4,-1,-2,-3,-4,30*60,-30*60,61*60,-61*60,14*60*60,-14*60*60,20*60*60,-20*60*60].each {|seconds| + assert_equal(Rational(seconds, 86400), OffsetRationals.rational_for_offset(seconds)) + } + end + + def test_rational_for_offset_constant + -28.upto(28) {|i| + seconds = i * 30 * 60 + + r1 = OffsetRationals.rational_for_offset(seconds) + r2 = OffsetRationals.rational_for_offset(seconds) + + assert_equal(Rational(seconds, 86400), r1) + assert_same(r1, r2) + } + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb new file mode 100644 index 0000000..1e8f153 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb @@ -0,0 +1,261 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCPosixTimeZoneParser < Minitest::Test + HOUR = 3600 + MINUTE = 60 + + class << self + private + + def append_time_to_rule(day_rule, time) + time ? "#{day_rule}/#{time}" : day_rule + end + + def define_invalid_dst_rule_tests(type, rule) + define_method "test_#{type}_dst_start_rule_for_invalid_#{rule}" do + tz_string = "STD-1DST,#{rule},300" + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + + define_method "test_#{type}_dst_end_rule_for_invalid_#{rule}" do + tz_string = "STD-1DST,60,#{rule}" + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + end + + def setup + @parser = PosixTimeZoneParser.new + end + + def test_empty_rule_returns_nil + result = @parser.parse('') + assert_nil(result) + end + + ABBREVIATIONS_WITH_OFFSETS = [ + ['UTC0', :UTC, 0], + ['U0', :U, 0], + ['West1', :West, -HOUR], + ['East-1', :East, HOUR], + ['<-05>5', :'-05', -5 * HOUR], + ['<+12>-12', :'+12', 12 * HOUR], + ['HMM2:30', :HMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMM02:30', :HHMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMM+02:30', :HHMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMSS-12:5:50', :HHMSS, 12 * HOUR + 5 * MINUTE + 50], + ['HHMMSS-12:05:50', :HHMMSS, 12 * HOUR + 5 * MINUTE + 50], + ['HHMMS-12:05:7', :HHMMS, 12 * HOUR + 5 * MINUTE + 7] + ] + + ABBREVIATIONS_WITH_OFFSETS.each do |(tz_string, expected_abbrev, expected_base_offset)| + define_method "test_std_only_returns_std_offset_#{tz_string}" do + result = @parser.parse(tz_string) + expected = TimezoneOffset.new(expected_base_offset, 0, expected_abbrev) + assert_equal(expected, result) + end + end + + ABBREVIATIONS_WITH_OFFSETS.each do |(abbrev_and_offset, expected_abbrev, expected_base_offset)| + define_method "test_std_offset_#{abbrev_and_offset}" do + result = @parser.parse(abbrev_and_offset + 'DST,60,300') + expected_std_offset = TimezoneOffset.new(expected_base_offset, 0, expected_abbrev) + assert_equal(expected_std_offset, result.std_offset) + end + end + + [ + ['Zero0One-1', :One, 0, HOUR], + ['Zero0One', :One, 0, HOUR], + ['Z0O', :O, 0, HOUR], + ['West1WestS0', :WestS, -HOUR, HOUR], + ['West1WestS', :WestS, -HOUR, HOUR], + ['East-1EastS-2', :EastS, HOUR, HOUR], + ['East-1EastS', :EastS, HOUR, HOUR], + ['Neg2NegS3', :NegS, -2 * HOUR, -HOUR], + ['<-05>5<-04>4', :'-04', -5 * HOUR, HOUR], + ['STD5<-04>4', :'-04', -5 * HOUR, HOUR], + ['<+12>-12<+13>-13', :'+13', 12 * HOUR, HOUR], + ['STD-12<+13>-13', :'+13', 12 * HOUR, HOUR], + ['HMM2:30SHMM1:15', :SHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMM02:30SHHMM01:15', :SHHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMM+02:30SHHMM+01:15', :SHHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMSS-12:5:50SHHMSS-13:4:30', :SHHMSS, 12 * HOUR + 5 * MINUTE + 50, 58 * MINUTE + 40], + ['HHMMSS-12:05:50SHHMMSS-13:04:30', :SHHMMSS, 12 * HOUR + 5 * MINUTE + 50, 58 * MINUTE + 40], + ['HHMMS-12:05:7SHHMMSS-13:06:8', :SHHMMSS, 12 * HOUR + 5 * MINUTE + 7, HOUR + MINUTE + 1], + ].each do |(abbrevs_and_offsets, expected_abbrev, expected_base_offset, expected_std_offset)| + define_method "test_dst_offset_#{abbrevs_and_offsets}" do + result = @parser.parse(abbrevs_and_offsets + ',60,300') + expected_dst_offset = TimezoneOffset.new(expected_base_offset, expected_std_offset, expected_abbrev) + assert_equal(expected_dst_offset, result.dst_offset) + end + end + + ['01:-1', '01:60', '01:00:-1', '01:00:60'].each do |abbrev_and_offset| + ['', 'DST,60,300'].each do |dst_suffix| + tz_string = abbrev_and_offset + dst_suffix + define_method "test_std_offset_invalid_#{tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + + tz_string = "STD1#{abbrev_and_offset},60,300" + define_method "test_dst_offset_invalid_#{tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + + [ + [nil, 2 * HOUR], + ['2', 2 * HOUR], + ['+2', 2 * HOUR], + ['-2', -2 * HOUR], + ['2:3:4', 2 * HOUR + 3 * MINUTE + 4], + ['02:03:04', 2 * HOUR + 3 * MINUTE + 4], + ['-2:3:4', -2 * HOUR + 3 * MINUTE + 4], # 22:03:04 on the day prior to the one specified + ['-02:03:04', -2 * HOUR + 3 * MINUTE + 4], + ['167', 167 * HOUR], + ['-167', -167 * HOUR] + ].each do |(time, expected_offset_from_midnight)| + [ + ['J1', 1], + ['J365', 365] + ].each do |(julian_day_rule, expected_julian_day)| + rule = append_time_to_rule(julian_day_rule, time) + + define_method "test_julian_day_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = JulianDayOfYearTransitionRule.new(expected_julian_day, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_julian_day_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = JulianDayOfYearTransitionRule.new(expected_julian_day, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['0', 0], + ['365', 365] + ].each do |(absolute_day_rule, expected_day)| + rule = append_time_to_rule(absolute_day_rule, time) + + define_method "test_absolute_day_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},J300") + expected_dst_start_rule = AbsoluteDayOfYearTransitionRule.new(expected_day, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_absolute_day_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,J60,#{rule}") + expected_dst_end_rule = AbsoluteDayOfYearTransitionRule.new(expected_day, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['M1.1.0', 1, 1, 0], + ['M12.4.6', 12, 4, 6] + ].each do |(day_of_month_rule, expected_month, expected_week, expected_day_of_week)| + rule = append_time_to_rule(day_of_month_rule, time) + + define_method "test_day_of_month_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = DayOfMonthTransitionRule.new(expected_month, expected_week, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_day_of_month_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = DayOfMonthTransitionRule.new(expected_month, expected_week, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['M1.5.0', 1, 0], + ['M12.5.6', 12, 6] + ].each do |(last_day_of_month_rule, expected_month, expected_day_of_week)| + rule = append_time_to_rule(last_day_of_month_rule, time) + + define_method "test_last_day_of_month_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = LastDayOfMonthTransitionRule.new(expected_month, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_last_day_of_month_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = LastDayOfMonthTransitionRule.new(expected_month, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + end + + ['J0', 'J366'].each do |julian_day_rule| + define_invalid_dst_rule_tests('julian_day', julian_day_rule) + end + + ['-1', '366'].each do |absolute_day_rule| + define_invalid_dst_rule_tests('absolute_day', absolute_day_rule) + end + + ['M0,1,0', 'M13,1,0', 'M6,0,0', 'M6,6,0', 'M6,1,-1', 'M6,1,7'].each do |day_of_month_rule| + define_invalid_dst_rule_tests('day_of_month', day_of_month_rule) + end + + ['M0,5,0', 'M13,5,0', 'M6,5,-1', 'M6,5,7'].each do |last_day_of_month_rule| + define_invalid_dst_rule_tests('last_day_of_month', last_day_of_month_rule) + end + + def test_invalid_dst_start_rule + assert_raises(InvalidPosixTimeZone) { @parser.parse('STD1DST,X60,300') } + end + + def test_invalid_dst_end_rule + assert_raises(InvalidPosixTimeZone) { @parser.parse('STD1DST,60,X300') } + end + + [ + ['STD5DST,0/0,J365/25', :DST, -5 * HOUR, HOUR], + ['STD-5DST,0/0,J365/25', :DST, 5 * HOUR, HOUR], + ['STD5DST3,0/0,J365/26', :DST, -5 * HOUR, 2 * HOUR], + ['STD5DST6,0/0,J365/23', :DST, -5 * HOUR, -HOUR], + ['STD5DST,J1/0,J365/25', :DST, -5 * HOUR, HOUR], + ['Winter5Summer,0/0,J365/25', :Summer, -5 * HOUR, HOUR], + ['<-05>5<-06>,0/0,J365/25', :'-06', -5 * HOUR, HOUR] + ].each do |(tz_string, expected_abbrev, expected_base_offset, expected_std_offset)| + define_method "test_dst_only_returns_continuous_offset_for_#{tz_string}" do + result = @parser.parse(tz_string) + expected = TimezoneOffset.new(expected_base_offset, expected_std_offset, expected_abbrev) + assert_equal(expected, result) + end + end + + def test_parses_tainted_string_in_safe_mode_and_returns_untainted_abbreviations + safe_test(:unavailable => :skip) do + result = @parser.parse('STD1DST,60,300'.dup.taint) + + assert_equal(:STD, result.std_offset.abbreviation) + assert_equal(:DST, result.dst_offset.abbreviation) + end + end + + ['STD1', 'STD1DST,60,300'].each do |tz_string| + tz_string += "-" + define_method "test_content_after_end_for_#{tz_string}" do + error = assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + assert_equal("Expected the end of a POSIX-style time zone string but found '-'.", error.message) + end + end + + ['X', 0].each do |invalid_tz_string| + define_method "test_invalid_tz_string_#{invalid_tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(invalid_tz_string) } + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb new file mode 100644 index 0000000..56c5a8a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb @@ -0,0 +1,168 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCRubyCoreSupport < Minitest::Test + def test_rational_new! + assert_equal(Rational(3,4), RubyCoreSupport.rational_new!(3,4)) + end + + def test_datetime_new! + assert_equal(DateTime.new(2008,10,5,12,0,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(2454745,0,2299161)) + assert_equal(DateTime.new(2008,10,5,13,0,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(2454745,Rational(1, 24),2299161)) + + assert_equal(DateTime.new(2008,10,5,20,30,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(117827777, 48), 0, 2299161)) + assert_equal(DateTime.new(2008,10,5,21,30,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(117827777, 48), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(2008,10,6,6,26,21, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(70696678127,28800), 0, 2299161)) + assert_equal(DateTime.new(2008,10,6,7,26,21, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(70696678127, 28800), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4712,1,1,12,0,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(0, 0, 2299161)) + assert_equal(DateTime.new(-4712,1,1,13,0,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(0, Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4713,12,31,23,58,59, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-43261, 86400), 0, 2299161)) + assert_equal(DateTime.new(-4712,1,1,0,58,59, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-43261, 86400), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4713,12,30,23,58,59, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-129661, 86400), 0, 2299161)) + assert_equal(DateTime.new(-4713,12,31,0,58,59, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-129661, 86400), Rational(1, 24), 2299161)) + end + + def test_datetime_new + assert_equal(DateTime.new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY), RubyCoreSupport.datetime_new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY)) + assert_equal(DateTime.new(2013, 2, 6, 23, 2, 36, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new(2013, 2, 6, 23, 2, 36, Rational(1,24), Date::ITALY)) + + assert_equal(DateTime.new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY) + Rational(1, 86400000), RubyCoreSupport.datetime_new(2012, 12, 31, 23, 59, 59 + Rational(1, 1000), 0, Date::ITALY)) + assert_equal(DateTime.new(2001, 10, 12, 12, 22, 59, Rational(1, 24), Date::ITALY) + Rational(501, 86400000), RubyCoreSupport.datetime_new(2001, 10, 12, 12, 22, 59 + Rational(501, 1000), Rational(1, 24), Date::ITALY)) + end + + def test_time_supports_negative + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59), Time.at(-1).utc) + else + assert_raises(ArgumentError) do + Time.at(-1) + end + end + end + + def test_time_supports_64_bit + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51), Time.at(-2147483649).utc) + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8), Time.at(2147483648).utc) + else + assert_raises(RangeError) do + Time.at(-2147483649) + end + + assert_raises(RangeError) do + Time.at(2147483648) + end + end + end + + def test_time_nsec + t = Time.utc(2013, 2, 6, 21, 56, 23, 567890 + Rational(123,1000)) + + if t.respond_to?(:nsec) + assert_equal(567890123, RubyCoreSupport.time_nsec(t)) + else + assert_equal(567890000, RubyCoreSupport.time_nsec(t)) + end + end + + def test_force_encoding + s = [0xC2, 0xA9].pack('c2') + + if s.respond_to?(:force_encoding) + # Ruby 1.9+ - should call String#force_encoding + assert_equal('ASCII-8BIT', s.encoding.name) + assert_equal(2, s.bytesize) + result = RubyCoreSupport.force_encoding(s, 'UTF-8') + assert_same(s, result) + assert_equal('UTF-8', s.encoding.name) + assert_equal(2, s.bytesize) + assert_equal(1, s.length) + assert_equal('©', s) + else + # Ruby 1.8 - no-op + result = RubyCoreSupport.force_encoding(s, 'UTF-8') + assert_same(s, result) + assert_equal('©', s) + end + end + + begin + SUPPORTS_ENCODING = !!Encoding + rescue NameError + SUPPORTS_ENCODING = false + end + + def check_open_file_test_file_bytes(test_file) + if SUPPORTS_ENCODING + File.open(test_file, 'r') do |file| + file.binmode + data = file.read(2) + refute_nil(data) + assert_equal(2, data.length) + bytes = data.unpack('C2') + assert_equal(0xC2, bytes[0]) + assert_equal(0xA9, bytes[1]) + end + end + end + + def check_open_file_test_file_content(file) + content = file.gets + refute_nil(content) + content.chomp! + + if SUPPORTS_ENCODING + assert_equal('UTF-8', content.encoding.name) + assert_equal(1, content.length) + assert_equal(2, content.bytesize) + assert_equal('©', content) + else + assert_equal('x', content) + end + end + + def test_open_file + Dir.mktmpdir('tzinfo_test') do |dir| + test_file = File.join(dir, 'test.txt') + + file = RubyCoreSupport.open_file(test_file, 'w', :external_encoding => 'UTF-8') + begin + file.puts(SUPPORTS_ENCODING ? '©' : 'x') + ensure + file.close + end + + check_open_file_test_file_bytes(test_file) + + file = RubyCoreSupport.open_file(test_file, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') + begin + check_open_file_test_file_content(file) + ensure + file.close + end + end + end + + def test_open_file_block + Dir.mktmpdir('tzinfo_test') do |dir| + test_file = File.join(dir, 'test.txt') + + RubyCoreSupport.open_file(test_file, 'w', :external_encoding => 'UTF-8') do |file| + file.puts(SUPPORTS_ENCODING ? '©' : 'x') + end + + check_open_file_test_file_bytes(test_file) + + RubyCoreSupport.open_file(test_file, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + check_open_file_test_file_content(file) + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb new file mode 100644 index 0000000..c99f09f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb @@ -0,0 +1,110 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCRubyCountryInfo < Minitest::Test + + def test_code + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('Zzz', ci.name) + end + + def test_zone_identifiers_empty + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers_no_block + ci = RubyCountryInfo.new('ZZ', 'Zzz') + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers_after_freeze + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + ci.freeze + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + end + + def test_zones_empty + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones_no_block + ci = RubyCountryInfo.new('ZZ', 'Zzz') + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + assert_equal([CountryTimezone.new!('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B'), + CountryTimezone.new!('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A'), + CountryTimezone.new!('ZZ/TimezoneC', -10, 3, -20, 7, 'C'), + CountryTimezone.new!('ZZ/TimezoneD', -10, 3, -20, 7)], + ci.zones) + assert(ci.zones.frozen?) + end + + def test_zones_after_freeze + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + ci.freeze + + assert_equal([CountryTimezone.new!('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B'), + CountryTimezone.new!('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A'), + CountryTimezone.new!('ZZ/TimezoneC', -10, 3, -20, 7, 'C'), + CountryTimezone.new!('ZZ/TimezoneD', -10, 3, -20, 7)], + ci.zones) + end + + def test_deferred_evaluate + block_called = false + + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + block_called = true + end + + assert_equal(false, block_called) + ci.zones + assert_equal(true, block_called) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb new file mode 100644 index 0000000..790dd8e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb @@ -0,0 +1,167 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCRubyDataSource < Minitest::Test + def setup + @data_source = RubyDataSource.new + end + + def test_initialize_not_found + # A failure to load tzinfo/data in initialize will not cause an exception. + # Attempts to load data files will subsequently fail. + code = <<-EOF + begin + ds = TZInfo::RubyDataSource.new + puts 'Initialized' + ds.load_timezone_info('Europe/London') + rescue Exception => e + puts e.class + end + EOF + + assert_sub_process_returns(['Initialized', 'TZInfo::InvalidTimezoneIdentifier'], code) + end + + def test_load_timezone_info_data + info = @data_source.load_timezone_info('Europe/London') + assert_kind_of(DataTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def test_load_timezone_info_linked + info = @data_source.load_timezone_info('UTC') + assert_kind_of(LinkedTimezoneInfo, info) + assert_equal('UTC', info.identifier) + assert_equal('Etc/UTC', info.link_to_identifier) + end + + def test_load_timezone_info_does_not_exist + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('Nowhere/Special') + end + end + + def test_load_timezone_info_invalid + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('../Definitions/UTC') + end + end + + def test_load_timezone_info_nil + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(nil) + end + end + + def test_load_timezone_info_case + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('europe/london') + end + end + + def test_load_timezone_info_plus + info = @data_source.load_timezone_info('Etc/GMT+1') + assert_equal('Etc/GMT+1', info.identifier) + end + + def test_load_timezone_info_minus + info = @data_source.load_timezone_info('Etc/GMT-1') + assert_equal('Etc/GMT-1', info.identifier) + end + + def test_load_timezone_info_tainted + skip_if_has_bug_14060 + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Amsterdam'.dup.taint + assert(identifier.tainted?) + info = @data_source.load_timezone_info(identifier) + assert_equal('Europe/Amsterdam', info.identifier) + assert(identifier.tainted?) + end + end + + def test_load_timezone_info_tainted_and_frozen + skip_if_has_bug_14060 + + safe_test do + info = @data_source.load_timezone_info('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', info.identifier) + end + end + + def test_timezone_identifiers + all = @data_source.timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.timezones, all) + assert_equal(true, all.frozen?) + end + + def test_data_timezone_identifiers + all_data = @data_source.data_timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.data_timezones, all_data) + assert_equal(true, all_data.frozen?) + end + + def test_linked_timezone_identifiers + all_linked = @data_source.linked_timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.linked_timezones, all_linked) + assert_equal(true, all_linked.frozen?) + end + + def test_load_country_info + info = @data_source.load_country_info('GB') + assert_equal('GB', info.code) + end + + def test_load_country_info_not_exist + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('ZZ') + end + end + + def test_load_country_info_invalid + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('../Countries/GB') + end + end + + def test_load_country_info_nil + assert_raises(InvalidCountryCode) do + @data_source.load_country_info(nil) + end + end + + def test_load_country_info_case + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('gb') + end + end + + def test_load_country_info_tainted + safe_test(:unavailable => :skip) do + code = 'NL'.dup.taint + assert(code.tainted?) + info = @data_source.load_country_info(code) + assert_equal('NL', info.code) + assert(code.tainted?) + end + end + + def test_load_country_info_tainted_and_frozen + safe_test do + info = @data_source.load_country_info('NL'.dup.taint.freeze) + assert_equal('NL', info.code) + end + end + + def test_country_codes + codes = @data_source.country_codes + assert_equal(TZInfo::Data::Indexes::Countries.countries.keys, codes) + assert_equal(true, codes.frozen?) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb new file mode 100644 index 0000000..d5ca73e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb @@ -0,0 +1,674 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'rational' unless defined?(Rational) + +include TZInfo + +class TCTimeOrDateTime < Minitest::Test + # Ruby 1.8.7 (built with GCC 8.3.0 on Ubuntu 19.04) fails to perform DateTime + # arithmetic operations correctly when sub-seconds are used. Detect this and + # disable the affected tests. + DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN = (DateTime.new(2019, 12, 22, 15, 0, Rational(1, 1000)) + Rational(1, 86400)).strftime('%Q') != '1577026801001' + puts "DateTime sub-second arithmetic is broken on this platform" if DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + + def test_initialize_time + assert_nothing_raised do + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + end + end + + def test_initialize_time_local + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3)) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + + def test_intialize_time_local_usec + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3, 721123)) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + + if Time.utc(2013, 1, 1).respond_to?(:nsec) + def test_initialize_time_local_nsec + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000))) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000)), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000)), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + end + + def test_initialize_time_utc_local + # Check that local Time instances on systems using UTC as the system + # time zone are still converted to UTC Time instances. + + # Note that this will only test will only work correctly on platforms where + # setting the TZ environment variable has an effect. If setting TZ has no + # effect, then this test will still pass. + + old_tz = ENV['TZ'] + begin + ENV['TZ'] = 'UTC' + tdt = TimeOrDateTime.new(Time.local(2014, 1, 11, 17, 18, 41)) + assert_equal(Time.utc(2014, 1, 11, 17, 18, 41), tdt.to_time) + assert_equal(Time.utc(2014, 1, 11, 17, 18, 41), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + ensure + ENV['TZ'] = old_tz + end + end + + def test_initialize_datetime_offset + tdt = TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3).new_offset(Rational(5, 24))) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), tdt.to_datetime) + assert_equal(0, tdt.to_datetime.offset) + end + + def test_initialize_datetime + assert_nothing_raised do + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + end + end + + def test_initialize_timestamp + assert_nothing_raised do + TimeOrDateTime.new(1143214323) + end + end + + def test_initialize_timestamp_string + assert_nothing_raised do + TimeOrDateTime.new('1143214323') + end + end + + unless RubyCoreSupport.time_supports_64bit + # Only define this test for non-64bit platforms. Some 64-bit Rubies support + # greater than 64-bit, others support less than the full range. In either + # case, times at the far ends of the range are so far in the future or past + # that they are not going to turn up in timezone data. + def test_initialize_timestamp_supported_range + assert_equal((2 ** 31) - 1, TimeOrDateTime.new((2 ** 31) - 1).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(2 ** 31) + end + + if RubyCoreSupport.time_supports_negative + assert_equal(-(2 ** 31), TimeOrDateTime.new(-(2 ** 31)).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(-(2 ** 31) - 1) + end + else + assert_equal(0, TimeOrDateTime.new(0).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(-1) + end + end + end + end + + def test_to_time + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000))).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(1143214323).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new('1143214323').to_time) + end + + def test_to_time_trunc_to_usec + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(7211239, 10000000))).to_time) + end + + def test_to_time_after_freeze + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).freeze.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(1143214323).freeze.to_time) + end + + def test_to_datetime + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000))).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(1143214323).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new('1143214323').to_datetime) + end + + def test_to_datetime_ruby186_bug + # DateTime.new in Ruby 1.8.6 won't allow a time to be specified using + # fractions of a second that is within the 60th second of a minute. + + # TimeOrDateTime has a workaround for this issue. + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 59) + Rational(721123, 86400000000), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 59, 721123)).to_datetime) + end + + def test_to_datetime_trunc_to_usec + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(9, 10))).to_datetime) + end + + def test_to_datetime_after_freeze + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).freeze.to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(1143214323).freeze.to_datetime) + end + + def test_to_i + assert_equal(1143214323, + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(1143214323).to_i) + assert_equal(1143214323, + TimeOrDateTime.new('1143214323').to_i) + end + + def test_to_i_after_freeze + assert_equal(1143214323, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).freeze.to_i) + assert_equal(1143214323, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).freeze.to_i) + end + + def test_to_orig + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).to_orig) + assert_equal(1143214323, + TimeOrDateTime.new(1143214323).to_orig) + assert_equal(1143214323, + TimeOrDateTime.new('1143214323').to_orig) + end + + def test_to_s + assert_equal("Time: #{Time.utc(2006, 3, 24, 15, 32, 3).to_s}", + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_s) + assert_equal("DateTime: #{DateTime.new(2006, 3, 24, 15, 32, 3)}", + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_s) + assert_equal('Timestamp: 1143214323', + TimeOrDateTime.new(1143214323).to_s) + assert_equal('Timestamp: 1143214323', + TimeOrDateTime.new('1143214323').to_s) + end + + def test_year + assert_equal(2006, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).year) + assert_equal(2006, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).year) + assert_equal(2006, TimeOrDateTime.new(1143214323).year) + end + + def test_mon + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).mon) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).mon) + assert_equal(3, TimeOrDateTime.new(1143214323).mon) + end + + def test_month + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).month) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).month) + assert_equal(3, TimeOrDateTime.new(1143214323).month) + end + + def test_mday + assert_equal(24, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).mday) + assert_equal(24, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).mday) + assert_equal(24, TimeOrDateTime.new(1143214323).mday) + end + + def test_day + assert_equal(24, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).day) + assert_equal(24, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).day) + assert_equal(24, TimeOrDateTime.new(1143214323).day) + end + + 19.upto(25).each_with_index do |mday,i| + define_method "test_wday_time_#{i}" do + assert_equal(i, TimeOrDateTime.new(Time.utc(2006, 3, mday)).wday) + end + + define_method "test_wday_datetime_#{i}" do + assert_equal(i, TimeOrDateTime.new(DateTime.new(2006, 3, mday)).wday) + end + + define_method "test_wday_timestamp_#{i}" do + assert_equal(i, TimeOrDateTime.new(Time.utc(2006, 3, mday).to_i).wday) + end + end + + def test_hour + assert_equal(15, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).hour) + assert_equal(15, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).hour) + assert_equal(15, TimeOrDateTime.new(1143214323).hour) + end + + def test_min + assert_equal(32, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).min) + assert_equal(32, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).min) + assert_equal(32, TimeOrDateTime.new(1143214323).min) + end + + def test_sec + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).sec) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).sec) + assert_equal(3, TimeOrDateTime.new(1143214323).sec) + end + + def test_usec + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).usec) + assert_equal(721123, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).usec) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).usec) + assert_equal(721123, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123,1000000))).usec) + assert_equal(0, TimeOrDateTime.new(1143214323).usec) + end + + def test_usec_after_to_i + val = TimeOrDateTime.new(Time.utc(2013, 2, 4, 22, 10, 33, 598000)) + assert_equal(Time.utc(2013, 2, 4, 22, 10, 33).to_i, val.to_i) + assert_equal(598000, val.usec) + end + + def test_compare_timeordatetime_time + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500001))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 499999))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000 + DATETIME_RESOLUTION))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000 - DATETIME_RESOLUTION))) + end + + def test_compare_timeordatetime_datetime + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000)))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000)))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000)))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000)))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000)))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000)))) + end + + def test_compare_timeordatetime_timestamp + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1111678323)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1111678323)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1111678323)) + end + + def test_compare_time + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500001)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 499999)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000 + DATETIME_RESOLUTION)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000 - DATETIME_RESOLUTION)) + end + + def test_compare_datetime + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000))) + end + + def test_compare_timestamp + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1111678323) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1111678323) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> 1111678323) + end + + def test_compare_timestamp_str + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1111678323') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1111678323') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(1143214323) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(1143214323) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(1143214323) <=> '1111678323') + end + + def test_compare_non_comparable + assert_nil(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Object.new) + assert_nil(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Object.new) + assert_nil(TimeOrDateTime.new(1143214323) <=> Object.new) + end + + def test_eql + assert_equal(true, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(Object.new)) + + assert_equal(true, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 722000)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(Object.new)) + + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(true, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4)))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(Object.new)) + + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)))) + assert_equal(true, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(722, 1000))))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(Object.new)) + + + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(true, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(true, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(1143214324))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(Object.new)) + end + + def test_hash + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3).hash, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).hash) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3).hash, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).hash) + assert_equal(1143214323.hash, TimeOrDateTime.new(1143214323).hash) + assert_equal(1143214323.hash, TimeOrDateTime.new('1143214323').hash) + end + + def test_add + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + 0).to_orig) + assert_equal(1143214323, (TimeOrDateTime.new(1143214323) + 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + 1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, (TimeOrDateTime.new(1143214323) + 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + (-1)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + (-1)).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, (TimeOrDateTime.new(1143214323) + (-1)).to_orig) + end + + def test_subtract + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - 0).to_orig) + assert_equal(1143214323, (TimeOrDateTime.new(1143214323) - 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - 1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, (TimeOrDateTime.new(1143214323) - 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - (-1)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - (-1)).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, (TimeOrDateTime.new(1143214323) - (-1)).to_orig) + end + + def test_add_with_convert + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(0).to_orig) + assert_equal(1143214323, TimeOrDateTime.new(1143214323).add_with_convert(0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, TimeOrDateTime.new(1143214323).add_with_convert(1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(-1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(-1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, TimeOrDateTime.new(1143214323).add_with_convert(-1).to_orig) + + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0)).add_with_convert(-1).to_orig) + assert_equal(-1, TimeOrDateTime.new(0).add_with_convert(-1).to_orig) + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59, 892000), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0, 892000)).add_with_convert(-1).to_orig) + + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52)).add_with_convert(-1).to_orig) + assert_equal(-2147483649, TimeOrDateTime.new(-2147483648).add_with_convert(-1).to_orig) + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51, 892000), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52, 892000)).add_with_convert(-1).to_orig) + else + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(-2147483648).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52, 892000)).add_with_convert(-1).to_orig) + end + else + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(0).add_with_convert(-1).to_orig) + assert_equal(RubyCoreSupport.datetime_new(1969, 12, 31, 23, 59, 59 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0, 892000)).add_with_convert(-1).to_orig) + end + + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7)).add_with_convert(1).to_orig) + assert_equal(2147483648, TimeOrDateTime.new(2147483647).add_with_convert(1).to_orig) + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8, 892000), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7, 892000)).add_with_convert(1).to_orig) + else + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(2147483647).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7, 892000)).add_with_convert(1).to_orig) + + assert_equal(Time.utc(2038, 1, 19, 3, 14, 7, 892000), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 6, 892000)).add_with_convert(1).to_orig) + end + end + + def test_wrap_time + t = TimeOrDateTime.wrap(Time.utc(2006, 3, 24, 15, 32, 3)) + assert_instance_of(TimeOrDateTime, t) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), t.to_orig) + end + + def test_wrap_datetime + t = TimeOrDateTime.wrap(DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_instance_of(TimeOrDateTime, t) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), t.to_orig) + end + + def test_wrap_timestamp + t = TimeOrDateTime.wrap(1143214323) + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + end + + def test_wrap_timestamp_str + t = TimeOrDateTime.wrap('1143214323') + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + end + + def test_wrap_timeordatetime + t = TimeOrDateTime.new(1143214323) + t2 = TimeOrDateTime.wrap(t) + assert_same(t, t2) + end + + def test_wrap_block_time + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), TimeOrDateTime.wrap(Time.utc(2006, 3, 24, 15, 32, 3)) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), t.to_orig) + t + 1 + }) + end + + def test_wrap_block_datetime + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), TimeOrDateTime.wrap(DateTime.new(2006, 3, 24, 15, 32, 3)) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timestamp + assert_equal(1143214324, TimeOrDateTime.wrap(1143214323) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timestamp_str + assert_equal(1143214324, TimeOrDateTime.wrap('1143214323') {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timeordatetime + t1 = TimeOrDateTime.new(1143214323) + + t2 = TimeOrDateTime.wrap(t1) {|t| + assert_same(t1, t) + t + 1 + } + + assert t2 + assert_instance_of(TimeOrDateTime, t2) + assert_equal(1143214324, t2.to_orig) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone.rb new file mode 100644 index 0000000..0dc0611 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone.rb @@ -0,0 +1,1361 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCTimezone < Minitest::Test + + class BlockCalled < StandardError + end + + class TestTimezone < Timezone + def self.new(identifier, period_for_utc = nil, periods_for_local = nil, expected = nil) + t = super() + t.send(:setup, identifier, period_for_utc, periods_for_local, expected) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + utc = TimeOrDateTime.wrap(utc) + raise "Unexpected utc #{utc} in period_for_utc" unless @expected.eql?(utc) + @period_for_utc + end + + def periods_for_local(local) + local = TimeOrDateTime.wrap(local) + raise "Unexpected local #{local} in periods_for_local" unless @expected.eql?(local) + @periods_for_local.clone + end + + def transitions_up_to(utc_to, utc_from = nil) + raise 'transitions_up_to called' + end + + private + def setup(identifier, period_for_utc, periods_for_local, expected) + @identifier = identifier + @period_for_utc = period_for_utc + @periods_for_local = periods_for_local || [] + @expected = TimeOrDateTime.wrap(expected) + end + end + + class OffsetsUpToTestTimezone < Timezone + def self.new(identifier, expected_utc_to, expected_utc_from, transitions_up_to) + t = super() + t.send(:setup, identifier, expected_utc_to, expected_utc_from, transitions_up_to) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + raise 'period_for_utc called' + end + + def periods_for_local(local) + raise 'periods_for_local called' + end + + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + raise "Unexpected utc_to #{utc_to || 'nil'} in transitions_up_to" unless @expected_utc_to.eql?(utc_to) + + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + raise "Unexpected utc_from #{utc_from || 'nil'} in transitions_up_to" unless @expected_utc_from.eql?(utc_from) + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + @transitions_up_to + end + + private + + def setup(identifier, expected_utc_to, expected_utc_from, transitions_up_to) + @identifier = identifier + @expected_utc_to = TimeOrDateTime.wrap(expected_utc_to) + @expected_utc_from = expected_utc_from ? TimeOrDateTime.wrap(expected_utc_from) : nil + @transitions_up_to = transitions_up_to + end + end + + class OffsetsUpToNoTransitionsTestTimezone < Timezone + def self.new(identifier, expected_utc_to, expected_utc_from, period_for_utc) + t = super() + t.send(:setup, identifier, expected_utc_to, expected_utc_from, period_for_utc) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + utc = TimeOrDateTime.wrap(utc) + + raise "Unexpected utc #{utc} in period_for_utc (should be utc_from)" if @expected_utc_from && !@expected_utc_from.eql?(utc) + raise "Unexpected utc #{utc} in period_for_utc (should be < utc_to)" if !@expected_utc_from && @expected_utc_to <= utc + + @period_for_utc + end + + def periods_for_local(local) + raise 'periods_for_local called' + end + + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + raise "Unexpected utc_to #{utc_to || 'nil'} in transitions_up_to" unless @expected_utc_to.eql?(utc_to) + + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + raise "Unexpected utc_from #{utc_from || 'nil'} in transitions_up_to" unless @expected_utc_from.eql?(utc_from) + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + [] + end + + private + + def setup(identifier, expected_utc_to, expected_utc_from, period_for_utc) + @identifier = identifier + @expected_utc_to = TimeOrDateTime.wrap(expected_utc_to) + @expected_utc_from = expected_utc_from ? TimeOrDateTime.wrap(expected_utc_from) : nil + @period_for_utc = period_for_utc + end + end + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def setup + @orig_default_dst = Timezone.default_dst + @orig_data_source = DataSource.get + Timezone.send :init_loaded_zones + end + + def teardown + Timezone.default_dst = @orig_default_dst + DataSource.set(@orig_data_source) + end + + def test_default_dst_initial_value + assert_nil(Timezone.default_dst) + end + + def test_set_default_dst + Timezone.default_dst = true + assert_equal(true, Timezone.default_dst) + Timezone.default_dst = false + assert_equal(false, Timezone.default_dst) + Timezone.default_dst = nil + assert_nil(Timezone.default_dst) + Timezone.default_dst = 0 + assert_equal(true, Timezone.default_dst) + end + + def test_get_valid_1 + tz = Timezone.get('Europe/London') + + assert_kind_of(DataTimezone, tz) + assert_equal('Europe/London', tz.identifier) + end + + def test_get_valid_2 + tz = Timezone.get('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo for any timezone. + if DataSource.get.load_timezone_info('UTC').kind_of?(LinkedTimezoneInfo) + assert_kind_of(LinkedTimezone, tz) + else + assert_kind_of(DataTimezone, tz) + end + + assert_equal('UTC', tz.identifier) + end + + def test_get_valid_3 + tz = Timezone.get('America/Argentina/Buenos_Aires') + + assert_kind_of(DataTimezone, tz) + assert_equal('America/Argentina/Buenos_Aires', tz.identifier) + end + + def test_get_same_instance + tz1 = Timezone.get('Europe/London') + tz2 = Timezone.get('Europe/London') + assert_same(tz1, tz2) + end + + def test_get_not_exist + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('Nowhere/Special') } + end + + def test_get_invalid + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('../Definitions/UTC') } + end + + def test_get_nil + assert_raises(InvalidTimezoneIdentifier) { Timezone.get(nil) } + end + + def test_get_case + Timezone.get('Europe/Prague') + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('Europe/prague') } + end + + def test_get_proxy_valid + proxy = Timezone.get_proxy('Europe/London') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('Europe/London', proxy.identifier) + end + + def test_get_proxy_not_exist + proxy = Timezone.get_proxy('Not/There') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('Not/There', proxy.identifier) + end + + def test_get_proxy_invalid + proxy = Timezone.get_proxy('../Invalid/Identifier') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('../Invalid/Identifier', proxy.identifier) + end + + def test_get_tainted_loaded + Timezone.get('Europe/Andorra') + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Andorra'.dup.taint + assert(identifier.tainted?) + tz = Timezone.get(identifier) + assert_equal('Europe/Andorra', tz.identifier) + assert(identifier.tainted?) + end + end + + def test_get_tainted_and_frozen_loaded + Timezone.get('Europe/Andorra') + + safe_test do + tz = Timezone.get('Europe/Andorra'.dup.taint.freeze) + assert_equal('Europe/Andorra', tz.identifier) + end + end + + def test_get_tainted_not_previously_loaded + skip_if_has_bug_14060 + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Andorra'.dup.taint + assert(identifier.tainted?) + tz = Timezone.get(identifier) + assert_equal('Europe/Andorra', tz.identifier) + assert(identifier.tainted?) + end + end + + def test_get_tainted_and_frozen_not_previously_loaded + skip_if_has_bug_14060 + + safe_test do + tz = Timezone.get('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', tz.identifier) + end + end + + def test_new_no_args + tz = Timezone.new + + assert_raises(UnknownTimezone) { tz.identifier } + assert_raises(UnknownTimezone) { tz.friendly_identifier } + assert_raises(UnknownTimezone) { tz.utc_to_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.local_to_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.periods_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.now } + assert_raises(UnknownTimezone) { tz.current_period_and_time } + assert_raises(UnknownTimezone) { tz.transitions_up_to(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.canonical_identifier } + assert_raises(UnknownTimezone) { tz.canonical_zone } + end + + def test_new_nil + tz = Timezone.new(nil) + + assert_raises(UnknownTimezone) { tz.identifier } + assert_raises(UnknownTimezone) { tz.friendly_identifier } + assert_raises(UnknownTimezone) { tz.utc_to_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.local_to_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.periods_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.now } + assert_raises(UnknownTimezone) { tz.current_period_and_time } + assert_raises(UnknownTimezone) { tz.transitions_up_to(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.canonical_identifier } + assert_raises(UnknownTimezone) { tz.canonical_zone } + end + + def test_new_arg + tz = Timezone.new('Europe/London') + assert_same(Timezone.get('Europe/London'), tz) + end + + def test_new_arg_not_exist + assert_raises(InvalidTimezoneIdentifier) { Timezone.new('Nowhere/Special') } + end + + def test_all + all = Timezone.all + expected = DataSource.get.timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all) + end + + def test_all_identifiers + all = Timezone.all_identifiers + assert_equal(DataSource.get.timezone_identifiers, all) + end + + def test_all_data_zones + all_data = Timezone.all_data_zones + expected = DataSource.get.data_timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all_data) + end + + def test_all_data_zone_identifiers + all_data = Timezone.all_data_zone_identifiers + assert_equal(DataSource.get.data_timezone_identifiers, all_data) + end + + def test_all_linked_zones + all_linked = Timezone.all_linked_zones + expected = DataSource.get.linked_timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all_linked) + end + + def test_all_linked_zone_identifiers + all_linked = Timezone.all_linked_zone_identifiers + assert_equal(DataSource.get.linked_timezone_identifiers, all_linked) + end + + def test_all_country_zones + # Probably should relax this test - just need all the zones, don't care + # about order. + expected = Country.all.inject([]) {|result,country| + result += country.zones + } + expected.uniq! + + all_country_zones = Timezone.all_country_zones + assert_equal(expected, all_country_zones) + + all_country_zone_identifiers = Timezone.all_country_zone_identifiers + assert_equal(all_country_zone_identifiers.length, all_country_zones.length) + + all_country_zones.each {|zone| + assert_kind_of(TimezoneProxy, zone) + assert(all_country_zone_identifiers.include?(zone.identifier)) + } + end + + def test_all_country_zone_identifiers + # Probably should relax this test - just need all the zones, don't care + # about order. + expected = Country.all.inject([]) {|result,country| + result += country.zone_identifiers + } + expected.uniq! + + assert_equal(expected, Timezone.all_country_zone_identifiers) + end + + def test_us_zones + # Probably should relax this test - just need all the zones, don't care + # about order. + us_zones = Timezone.us_zones + assert_equal(Country.get('US').zones.uniq, us_zones) + + us_zone_identifiers = Timezone.us_zone_identifiers + assert_equal(us_zone_identifiers.length, us_zones.length) + + us_zones.each {|zone| + assert_kind_of(TimezoneProxy, zone) + assert(us_zone_identifiers.include?(zone.identifier)) + } + end + + def test_us_zone_identifiers + # Probably should relax this test - just need all the zones, don't care + # about order. + assert_equal(Country.get('US').zone_identifiers.uniq, Timezone.us_zone_identifiers) + end + + def test_identifier + assert_raises(UnknownTimezone) { Timezone.new.identifier } + assert_equal('Europe/Paris', TestTimezone.new('Europe/Paris').identifier) + end + + def test_name + assert_raises(UnknownTimezone) { Timezone.new.name } + assert_equal('Europe/Paris', TestTimezone.new('Europe/Paris').name) + end + + def test_friendly_identifier + assert_equal('Paris', TestTimezone.new('Europe/Paris').friendly_identifier(true)) + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').friendly_identifier(false)) + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').friendly_identifier) + assert_equal('Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier(true)) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier(false)) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier) + assert_equal('Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier(true)) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier(false)) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier) + assert_equal('McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier(true)) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier(false)) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier) + assert_equal('GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier(true)) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier(false)) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier(true)) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier(false)) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier) + end + + if defined?(Encoding) + def test_friendly_identifier_non_binary_encoding + refute_equal(Encoding::ASCII_8BIT, TestTimezone.new('Europe/Paris').friendly_identifier(true).encoding) + refute_equal(Encoding::ASCII_8BIT, TestTimezone.new('Europe/Paris').friendly_identifier(false).encoding) + end + end + + def test_to_s + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').to_s) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').to_s) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').to_s) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').to_s) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').to_s) + assert_equal('UTC', TestTimezone.new('UTC').to_s) + end + + def test_period_for_local + dt = DateTime.new(2005,2,18,16,24,23) + dt2 = DateTime.new(2005,2,18,16,24,23).new_offset(Rational(5,24)) + dt3 = DateTime.new(2005,2,18,16,24,23 + Rational(789, 1000)) + t = Time.utc(2005,2,18,16,24,23) + t2 = Time.local(2005,2,18,16,24,23) + t3 = Time.utc(2005,2,18,16,24,23,789000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o1, o2, 1099184400), + TestTimezoneTransition.new(o2, o1, 1111885200)) + + dt_period = TestTimezone.new('Europe/London', nil, [period], dt).period_for_local(dt) + dt2_period = TestTimezone.new('Europe/London', nil, [period], dt2).period_for_local(dt2) + dt3_period = TestTimezone.new('Europe/London', nil, [period], dt3).period_for_local(dt3) + t_period = TestTimezone.new('Europe/London', nil, [period], t).period_for_local(t) + t2_period = TestTimezone.new('Europe/London', nil, [period], t2).period_for_local(t2) + t3_period = TestTimezone.new('Europe/London', nil, [period], t3).period_for_local(t3) + ts_period = TestTimezone.new('Europe/London', nil, [period], ts).period_for_local(ts) + + assert_equal(period, dt_period) + assert_equal(period, dt2_period) + assert_equal(period, dt3_period) + assert_equal(period, t_period) + assert_equal(period, t2_period) + assert_equal(period, t3_period) + assert_equal(period, ts_period) + end + + def test_period_for_local_invalid + dt = DateTime.new(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], dt) + + assert_raises(PeriodNotFound) do + tz.period_for_local(dt) + end + end + + def test_period_for_local_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,0,0) + dt2 = DateTime.new(2004,10,31,1,0,Rational(555,1000)) + t = Time.utc(2004,10,31,1,30,0) + t2 = Time.utc(2004,10,31,1,30,0,555000) + i = Time.utc(2004,10,31,1,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + dt2_tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt2) + t_tz = TestTimezone.new('America/New_York', nil, [p1, p2], t) + t2_tz = TestTimezone.new('America/New_York', nil, [p1, p2], t2) + i_tz = TestTimezone.new('America/New_York', nil, [p1, p2], i) + + assert_raises(AmbiguousTime) { dt_tz.period_for_local(dt) } + assert_raises(AmbiguousTime) { dt2_tz.period_for_local(dt2) } + assert_raises(AmbiguousTime) { t_tz.period_for_local(t) } + assert_raises(AmbiguousTime) { t2_tz.period_for_local(t2) } + assert_raises(AmbiguousTime) { i_tz.period_for_local(i) } + end + + def test_period_for_local_not_found + dt = DateTime.new(2004,4,4,2,0,0) + dt2 = DateTime.new(2004,4,4,2,0,Rational(987,1000)) + t = Time.utc(2004,4,4,2,30,0) + t2 = Time.utc(2004,4,4,2,30,0,987000) + i = Time.utc(2004,4,4,2,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [], dt) + dt2_tz = TestTimezone.new('America/New_York', nil, [], dt2) + t_tz = TestTimezone.new('America/New_York', nil, [], t) + t2_tz = TestTimezone.new('America/New_York', nil, [], t2) + i_tz = TestTimezone.new('America/New_York', nil, [], i) + + assert_raises(PeriodNotFound) { dt_tz.period_for_local(dt) } + assert_raises(PeriodNotFound) { dt2_tz.period_for_local(dt2) } + assert_raises(PeriodNotFound) { t_tz.period_for_local(t) } + assert_raises(PeriodNotFound) { t2_tz.period_for_local(t2) } + assert_raises(PeriodNotFound) { i_tz.period_for_local(i) } + end + + def test_period_for_local_default_dst_set_true + Timezone.default_dst = true + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p1, tz.period_for_local(dt)) + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_raises(AmbiguousTime) { tz.period_for_local(dt, nil) } + end + + def test_period_for_local_default_dst_set_false + Timezone.default_dst = false + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p2, tz.period_for_local(dt)) + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_raises(AmbiguousTime) { tz.period_for_local(dt, nil) } + end + + def test_period_for_local_dst_flag_resolved + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_equal(p1, tz.period_for_local(dt, true) {|periods| raise BlockCalled, 'should not be called' }) + assert_equal(p2, tz.period_for_local(dt, false) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_period_for_local_dst_block_called + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.period_for_local(dt) {|periods| + assert_equal([p1, p2], periods) + + # raise exception to test that the block was called + raise BlockCalled, 'should be raised' + } + } + + assert_equal(p1, tz.period_for_local(dt) {|periods| periods.first}) + assert_equal(p2, tz.period_for_local(dt) {|periods| periods.last}) + assert_equal(p1, tz.period_for_local(dt) {|periods| [periods.first]}) + assert_equal(p2, tz.period_for_local(dt) {|periods| [periods.last]}) + end + + def test_period_for_local_dst_cannot_resolve + # At midnight local time on Aug 5 1915 in Warsaw, the clocks were put back + # 24 minutes and both periods were non-DST. Hence the block should be + # called regardless of the value of the Boolean dst parameter. + + o0 = TimezoneOffset.new(5040, 0, :LMT) + o1 = TimezoneOffset.new(5040, 0, :WMT) + o2 = TimezoneOffset.new(3600, 0, :CET) + o3 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TestTimezoneTransition.new(o1, o0, DateTime.new(1879, 12, 31, 22, 36, 0)) + t2 = TestTimezoneTransition.new(o2, o1, DateTime.new(1915, 8, 4, 22, 36, 0)) + t3 = TestTimezoneTransition.new(o3, o2, DateTime.new(1916, 4, 30, 22, 0, 0)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(1915,8,4,23,40,0) + + tz = TestTimezone.new('Europe/Warsaw', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.period_for_local(dt, true) {|periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + } + } + + assert_raises(BlockCalled) { + tz.period_for_local(dt, false) {|periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + } + } + end + + def test_period_for_local_block_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| nil} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| periods} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| []} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| raise AmbiguousTime, 'Ambiguous time'} + end + end + + def test_utc_to_local + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5,24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5,24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(DateTime.new(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], dt).utc_to_local(dt)) + assert_equal(DateTime.new(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], dt2).utc_to_local(dt2)) + assert_equal(DateTime.new(2005,6,18,17,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', period, [], dtu).utc_to_local(dtu)) + assert_equal(DateTime.new(2005,6,18,17,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', period, [], dtu2).utc_to_local(dtu2)) + assert_equal(Time.utc(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], t).utc_to_local(t)) + assert_equal(Time.utc(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2)) + assert_equal(Time.utc(2005,6,18,17,24,23,567000), TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu)) + assert_equal(Time.utc(2005,6,18,17,24,23,567000), TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2)) + assert_equal(Time.utc(2005,6,18,17,24,23).to_i, TestTimezone.new('Europe/London', period, [], ts).utc_to_local(ts)) + end + + def test_utc_to_local_offset + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5,24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5,24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(0, TestTimezone.new('Europe/London', period, [], dt).utc_to_local(dt).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dt2).utc_to_local(dt2).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dtu).utc_to_local(dtu).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dtu2).utc_to_local(dtu2).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], t).utc_to_local(t).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], t).utc_to_local(t).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2).utc?) + end + + def test_local_to_utc + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5, 24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5, 24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(DateTime.new(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], dt).local_to_utc(dt)) + assert_equal(DateTime.new(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], dt2).local_to_utc(dt2)) + assert_equal(DateTime.new(2005,6,18,15,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', nil, [period], dtu).local_to_utc(dtu)) + assert_equal(DateTime.new(2005,6,18,15,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', nil, [period], dtu2).local_to_utc(dtu2)) + assert_equal(Time.utc(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t)) + assert_equal(Time.utc(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2)) + assert_equal(Time.utc(2005,6,18,15,24,23,567000), TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu)) + assert_equal(Time.utc(2005,6,18,15,24,23,567000), TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2)) + assert_equal(Time.utc(2005,6,18,15,24,23).to_i, TestTimezone.new('Europe/London', nil, [period], ts).local_to_utc(ts)) + end + + def test_local_to_utc_offset + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5, 24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5, 24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dt).local_to_utc(dt).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dt2).local_to_utc(dt2).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dtu).local_to_utc(dtu).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dtu2).local_to_utc(dtu2).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2).utc?) + end + + def test_local_to_utc_utc_local_returns_utc + # Check that UTC time instances are always returned even if the system + # is using UTC as the time zone. + + # Note that this will only test will only work correctly on platforms where + # setting the TZ environment variable has an effect. If setting TZ has no + # effect, then this test will still pass. + + old_tz = ENV['TZ'] + begin + ENV['TZ'] = 'UTC' + + tz = Timezone.get('America/New_York') + + t = tz.local_to_utc(Time.local(2014, 1, 11, 17, 18, 41)) + assert_equal(Time.utc(2014, 1, 11, 22, 18, 41), t) + assert(t.utc?) + ensure + ENV['TZ'] = old_tz + end + end + + def test_local_to_utc_invalid + dt = DateTime.new(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], dt) + assert_raises(PeriodNotFound) { tz.local_to_utc(dt) } + + t = Time.utc(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], t) + assert_raises(PeriodNotFound) { tz.local_to_utc(t) } + + i = Time.utc(2004,4,4,2,30,0).to_i + tz = TestTimezone.new('America/New_York', nil, [], i) + assert_raises(PeriodNotFound) { tz.local_to_utc(i) } + end + + def test_local_to_utc_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) } + + t = Time.utc(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], t) + assert_raises(AmbiguousTime) { tz.local_to_utc(t) } + + i = Time.utc(2004,10,31,1,30,0).to_i + tz = TestTimezone.new('America/New_York', nil, [p1, p2], i) + assert_raises(AmbiguousTime) { tz.local_to_utc(i) } + + f = Time.utc(2004,10,31,1,30,0,501).to_i + tz = TestTimezone.new('America/New_York', nil, [p1, p2], f) + assert_raises(AmbiguousTime) { tz.local_to_utc(f) } + end + + def test_local_to_utc_not_found + dt = DateTime.new(2004,4,4,2,0,0) + t = Time.utc(2004,4,4,2,30,0) + i = Time.utc(2004,4,4,2,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [], dt) + t_tz = TestTimezone.new('America/New_York', nil, [], t) + i_tz = TestTimezone.new('America/New_York', nil, [], i) + + assert_raises(PeriodNotFound) { dt_tz.local_to_utc(dt) } + assert_raises(PeriodNotFound) { t_tz.local_to_utc(t) } + assert_raises(PeriodNotFound) { i_tz.local_to_utc(i) } + end + + def test_local_to_utc_default_dst_set_true + Timezone.default_dst = true + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt, nil) } + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_default_dst_set_false + Timezone.default_dst = false + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt, nil) } + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_dst_flag_resolved + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true) {|periods| raise BlockCalled, 'should not be called' }) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_dst_block_called + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.local_to_utc(dt) {|periods| + assert_equal([p1, p2], periods) + + # raise exception to test that the block was called + raise BlockCalled, 'should be raised' + } + } + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| periods.first}) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| periods.last}) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| [periods.first]}) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| [periods.last]}) + end + + def test_local_to_utc_dst_cannot_resolve + # At midnight local time on Aug 5 1915 in Warsaw, the clocks were put back + # 24 minutes and both periods were non-DST. Hence the block should be + # called regardless of the value of the Boolean dst parameter. + + o0 = TimezoneOffset.new(5040, 0, :LMT) + o1 = TimezoneOffset.new(5040, 0, :WMT) + o2 = TimezoneOffset.new(3600, 0, :CET) + o3 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TestTimezoneTransition.new(o1, o0, DateTime.new(1879, 12, 31, 22, 36, 0)) + t2 = TestTimezoneTransition.new(o2, o1, DateTime.new(1915, 8, 4, 22, 36, 0)) + t3 = TestTimezoneTransition.new(o3, o2, DateTime.new(1916, 4, 30, 22, 0, 0)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(1915,8,4,23,40,0) + + tz = TestTimezone.new('Europe/Warsaw', nil, [p1, p2], dt) + + assert_raises(BlockCalled) do + tz.local_to_utc(dt, true) do |periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + end + end + + assert_raises(BlockCalled) do + tz.local_to_utc(dt, false) do |periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + end + end + + assert_equal(DateTime.new(1915,8,4,22,16,0), tz.local_to_utc(dt) {|periods| periods.first}) + assert_equal(DateTime.new(1915,8,4,22,40,0), tz.local_to_utc(dt) {|periods| periods.last}) + assert_equal(DateTime.new(1915,8,4,22,16,0), tz.local_to_utc(dt) {|periods| [periods.first]}) + assert_equal(DateTime.new(1915,8,4,22,40,0), tz.local_to_utc(dt) {|periods| [periods.last]}) + end + + def test_local_to_utc_block_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| nil} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| periods} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| []} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| raise AmbiguousTime, 'Ambiguous time'} } + end + + def test_offsets_up_to + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + o5 = TimezoneOffset.new(-21600, 0, :TESTS) + + t1 = TestTimezoneTransition.new(o2, o1, Time.utc(2010, 4,1,1,0,0).to_i) + t2 = TestTimezoneTransition.new(o3, o2, Time.utc(2010,10,1,1,0,0).to_i) + t3 = TestTimezoneTransition.new(o2, o3, Time.utc(2011, 3,1,1,0,0).to_i) + t4 = TestTimezoneTransition.new(o4, o2, Time.utc(2011, 4,1,1,0,0).to_i) + t5 = TestTimezoneTransition.new(o3, o4, Time.utc(2011,10,1,1,0,0).to_i) + t6 = TestTimezoneTransition.new(o5, o3, Time.utc(2012, 3,1,1,0,0).to_i) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,0), [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,0))) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0), nil, [t1, t2, t3, t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,1), [t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,1))) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2011,3,1,2,0,0), Time.utc(2011,3,1,0,0,0), [t3]). + offsets_up_to(Time.utc(2011,3,1,2,0,0), Time.utc(2011,3,1,0,0,0))) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0), Time.utc(2011,4,1,1,0,0), [t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0), Time.utc(2011,4,1,1,0,0))) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i)) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i)) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0).to_i, nil, [t1, t2, t3, t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0).to_i)) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i, [t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2011,3,1,2,0,0).to_i, Time.utc(2011,3,1,0,0,0).to_i, [t3]). + offsets_up_to(Time.utc(2011,3,1,2,0,0).to_i, Time.utc(2011,3,1,0,0,0).to_i)) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0).to_i, Time.utc(2011,4,1,1,0,0).to_i, [t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0).to_i, Time.utc(2011,4,1,1,0,0).to_i)) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,0), [t1, t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,0))) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,0), nil, [t1, t2, t3, t4, t5]). + offsets_up_to(DateTime.new(2012,3,1,1,0,0))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,1), [t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,1))) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2011,3,1,2,0,0), DateTime.new(2011,3,1,0,0,0), [t3]). + offsets_up_to(DateTime.new(2011,3,1,2,0,0), DateTime.new(2011,3,1,0,0,0))) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,0), DateTime.new(2011,4,1,1,0,0), [t4, t5]). + offsets_up_to(DateTime.new(2012,3,1,1,0,0), DateTime.new(2011,4,1,1,0,0))) + end + + def test_offsets_up_to_no_transitions + o = TimezoneOffset.new(600, 0, :LMT) + p = TimezonePeriod.new(nil, nil, o) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0), nil, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0))) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0), Time.utc(1990,1,1,1,0,0), p). + offsets_up_to(Time.utc(2000,1,1,1,0,0), Time.utc(1990,1,1,1,0,0))) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0).to_i, nil, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0).to_i)) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0).to_i, Time.utc(1990,1,1,1,0,0).to_i, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0).to_i, Time.utc(1990,1,1,1,0,0).to_i)) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', DateTime.new(2000,1,1,1,0,0), nil, p). + offsets_up_to(DateTime.new(2000,1,1,1,0,0))) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', DateTime.new(2000,1,1,1,0,0), DateTime.new(1990,1,1,1,0,0), p). + offsets_up_to(DateTime.new(2000,1,1,1,0,0), DateTime.new(1990,1,1,1,0,0))) + end + + def test_offsets_up_to_utc_to_not_greater_than_utc_from + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,8,1,0,0,0), Time.utc(2012,8,1,0,0,0), []). + offsets_up_to(Time.utc(2012,8,1,0,0,0), Time.utc(2012,8,1,0,0,0)) + end + + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i, []). + offsets_up_to(Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i) + end + + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0), []). + offsets_up_to(DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0)) + end + end + + def test_now + assert_kind_of(Time, Timezone.get('Europe/London').now) + end + + def test_current_period + assert_kind_of(TimezonePeriod, Timezone.get('Europe/London').current_period) + end + + def test_current_period_and_time + current = Timezone.get('Europe/London').current_period_and_time + assert_equal(2, current.length) + assert_kind_of(Time, current[0]) + assert_kind_of(TimezonePeriod, current[1]) + end + + def test_current_time_and_period + current = Timezone.get('Europe/London').current_time_and_period + assert_equal(2, current.length) + assert_kind_of(Time, current[0]) + assert_kind_of(TimezonePeriod, current[1]) + end + + def test_compare + assert_equal(0, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/London')) + assert_equal(-1, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/london')) + assert_equal(-1, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/Paris')) + assert_equal(1, TestTimezone.new('Europe/Paris') <=> TestTimezone.new('Europe/London')) + assert_equal(-1, TestTimezone.new('America/New_York') <=> TestTimezone.new('Europe/Paris')) + assert_equal(1, TestTimezone.new('Europe/Paris') <=> TestTimezone.new('America/New_York')) + end + + def test_compare_non_comparable + assert_nil(TestTimezone.new('Europe/London') <=> Object.new) + end + + def test_equality + assert_equal(true, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/London')) + assert_equal(false, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/london')) + assert_equal(false, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/Paris')) + assert(!(TestTimezone.new('Europe/London') == Object.new)) + end + + def test_eql + assert_equal(true, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/London'))) + assert_equal(false, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/london'))) + assert_equal(false, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/Paris'))) + assert(!TestTimezone.new('Europe/London').eql?(Object.new)) + end + + def test_hash + assert_equal('Europe/London'.hash, TestTimezone.new('Europe/London').hash) + assert_equal('America/New_York'.hash, TestTimezone.new('America/New_York').hash) + end + + def test_marshal_data + tz = Timezone.get('Europe/London') + assert_kind_of(DataTimezone, tz) + assert_same(tz, Marshal.load(Marshal.dump(tz))) + end + + def test_marshal_linked + tz = Timezone.get('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo for any timezone. + if DataSource.get.load_timezone_info('UTC').kind_of?(LinkedTimezoneInfo) + assert_kind_of(LinkedTimezone, tz) + else + assert_kind_of(DataTimezone, tz) + end + + assert_same(tz, Marshal.load(Marshal.dump(tz))) + end + + def test_strftime_datetime + tz = Timezone.get('Europe/London') + dt = DateTime.new(2006, 7, 15, 22, 12, 2) + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', dt)) + assert_equal('BST', tz.strftime('%Z', dt)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', dt)) + assert_equal('BST BST', tz.strftime('%Z %Z', dt)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', dt)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', dt)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', dt)) + end + + def test_strftime_time + tz = Timezone.get('Europe/London') + t = Time.utc(2006, 7, 15, 22, 12, 2) + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', t)) + assert_equal('BST', tz.strftime('%Z', t)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', t)) + assert_equal('BST BST', tz.strftime('%Z %Z', t)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', t)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', t)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', t)) + end + + def test_strftime_int + tz = Timezone.get('Europe/London') + i = Time.utc(2006, 7, 15, 22, 12, 2).to_i + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', i)) + assert_equal('BST', tz.strftime('%Z', i)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', i)) + assert_equal('BST BST', tz.strftime('%Z %Z', i)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', i)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', i)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', i)) + end + + def test_get_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.get('Europe/London') + end + end + + def test_new_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.new('Europe/London') + end + end + + def test_all_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all + end + end + + def test_all_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_identifiers + end + end + + def test_all_data_zones_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_data_zones + end + end + + def test_all_data_zone_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_data_zone_identifiers + end + end + + def test_all_linked_zones_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_linked_zones + end + end + + def test_all_linked_zone_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_linked_zone_identifiers + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb new file mode 100644 index 0000000..f7f2388 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb @@ -0,0 +1,113 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneDefinition < Minitest::Test + + module DataTest + include TimezoneDefinition + + timezone 'Test/Data/Zone' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + module LinkedTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone', 'Test/Linked_To/Zone' + end + + module DoubleDataTest + include TimezoneDefinition + + timezone 'Test/Data/Zone1' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + + timezone 'Test/Data/Zone2' do |tz| + tz.offset :o0, 75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + module DoubleLinkedTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone1', 'Test/Linked_To/Zone1' + linked_timezone 'Test/Linked/Zone2', 'Test/Linked_To/Zone2' + end + + module DataLinkedTest + include TimezoneDefinition + + timezone 'Test/Data/Zone1' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + + linked_timezone 'Test/Linked/Zone2', 'Test/Linked_To/Zone2' + end + + module LinkedDataTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone1', 'Test/Linked_To/Zone1' + + timezone 'Test/Data/Zone2' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + def test_data + assert_kind_of(TransitionDataTimezoneInfo, DataTest.get) + assert_equal('Test/Data/Zone', DataTest.get.identifier) + assert_equal(:LMT, DataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, DataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end + + def test_linked + assert_kind_of(LinkedTimezoneInfo, LinkedTest.get) + assert_equal('Test/Linked/Zone', LinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone', LinkedTest.get.link_to_identifier) + end + + def test_double_data + assert_kind_of(TransitionDataTimezoneInfo, DoubleDataTest.get) + assert_equal('Test/Data/Zone2', DoubleDataTest.get.identifier) + assert_equal(:LMT, DoubleDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, DoubleDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end + + def test_double_linked + assert_kind_of(LinkedTimezoneInfo, DoubleLinkedTest.get) + assert_equal('Test/Linked/Zone2', DoubleLinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone2', DoubleLinkedTest.get.link_to_identifier) + end + + def test_data_linked + assert_kind_of(LinkedTimezoneInfo, DataLinkedTest.get) + assert_equal('Test/Linked/Zone2', DataLinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone2', DataLinkedTest.get.link_to_identifier) + end + + def test_linked_data + assert_kind_of(TransitionDataTimezoneInfo, LinkedDataTest.get) + assert_equal('Test/Data/Zone2', LinkedDataTest.get.identifier) + assert_equal(:LMT, LinkedDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, LinkedDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb new file mode 100644 index 0000000..cd55dd9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb @@ -0,0 +1,73 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneIndexDefinition < Minitest::Test + + module TimezonesTest1 + include TimezoneIndexDefinition + + timezone 'Test/One' + timezone 'Test/Two' + linked_timezone 'Test/Three' + timezone 'Another/Zone' + linked_timezone 'And/Yet/Another' + end + + module TimezonesTest2 + include TimezoneIndexDefinition + + timezone 'Test/A/One' + timezone 'Test/A/Two' + timezone 'Test/A/Three' + end + + module TimezonesTest3 + include TimezoneIndexDefinition + + linked_timezone 'Test/B/One' + linked_timezone 'Test/B/Two' + linked_timezone 'Test/B/Three' + end + + module TimezonesTest4 + include TimezoneIndexDefinition + + end + + def test_timezones + assert_equal(['Test/One', 'Test/Two', 'Test/Three', 'Another/Zone', 'And/Yet/Another'], TimezonesTest1.timezones) + assert_equal(['Test/A/One', 'Test/A/Two', 'Test/A/Three'], TimezonesTest2.timezones) + assert_equal(['Test/B/One', 'Test/B/Two', 'Test/B/Three'], TimezonesTest3.timezones) + assert_equal([], TimezonesTest4.timezones) + + assert_equal(true, TimezonesTest1.timezones.frozen?) + assert_equal(true, TimezonesTest2.timezones.frozen?) + assert_equal(true, TimezonesTest3.timezones.frozen?) + assert_equal(true, TimezonesTest4.timezones.frozen?) + end + + def test_data_timezones + assert_equal(['Test/One', 'Test/Two', 'Another/Zone'], TimezonesTest1.data_timezones) + assert_equal(['Test/A/One', 'Test/A/Two', 'Test/A/Three'], TimezonesTest2.data_timezones) + assert_equal([], TimezonesTest3.data_timezones) + assert_equal([], TimezonesTest4.data_timezones) + + assert_equal(true, TimezonesTest1.data_timezones.frozen?) + assert_equal(true, TimezonesTest2.data_timezones.frozen?) + assert_equal(true, TimezonesTest3.data_timezones.frozen?) + assert_equal(true, TimezonesTest4.data_timezones.frozen?) + end + + def test_linked_timezones + assert_equal(['Test/Three', 'And/Yet/Another'], TimezonesTest1.linked_timezones) + assert_equal([], TimezonesTest2.linked_timezones) + assert_equal(['Test/B/One', 'Test/B/Two', 'Test/B/Three'], TimezonesTest3.linked_timezones) + assert_equal([], TimezonesTest4.linked_timezones) + + assert_equal(true, TimezonesTest1.linked_timezones.frozen?) + assert_equal(true, TimezonesTest2.linked_timezones.frozen?) + assert_equal(true, TimezonesTest3.linked_timezones.frozen?) + assert_equal(true, TimezonesTest4.linked_timezones.frozen?) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb new file mode 100644 index 0000000..fd70929 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb @@ -0,0 +1,11 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneInfo < Minitest::Test + + def test_identifier + ti = TimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', ti.identifier) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb new file mode 100644 index 0000000..a0599b7 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb @@ -0,0 +1,143 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneLondon < Minitest::Test + def test_2004 + #Europe/London Sun Mar 28 00:59:59 2004 UTC = Sun Mar 28 00:59:59 2004 GMT isdst=0 gmtoff=0 + #Europe/London Sun Mar 28 01:00:00 2004 UTC = Sun Mar 28 02:00:00 2004 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 31 00:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 31 01:00:00 2004 UTC = Sun Oct 31 01:00:00 2004 GMT isdst=0 gmtoff=0 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(2004,3,28,0,59,59), tz.utc_to_local(DateTime.new(2004,3,28,0,59,59))) + assert_equal(DateTime.new(2004,3,28,2,0,0), tz.utc_to_local(DateTime.new(2004,3,28,1,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,31,0,59,59))) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.utc_to_local(DateTime.new(2004,10,31,1,0,0))) + + assert_equal(DateTime.new(2004,3,28,0,59,59), tz.local_to_utc(DateTime.new(2004,3,28,0,59,59))) + assert_equal(DateTime.new(2004,3,28,1,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0))) + assert_equal(DateTime.new(2004,10,31,0,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), true)) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), false)) + assert_equal(DateTime.new(2004,10,31,0,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), true)) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,3,28,1,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,10,31,1,0,0)) } + + assert_equal(:GMT, tz.period_for_utc(DateTime.new(2004,3,28,0,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(2004,3,28,1,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(2004,10,31,0,59,59)).zone_identifier) + assert_equal(:GMT, tz.period_for_utc(DateTime.new(2004,10,31,1,0,0)).zone_identifier) + + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,3,28,0,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,3,28,2,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(2004,3,28,0,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(2004,3,28,1,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(2004,10,31,0,59,59)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(2004,10,31,1,0,0)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(2004,3,28,0,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,3,28,2,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,3,28,1,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,31,1,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(0, 0, :GMT), TimezoneOffset.new(0, 3600, :BST)], offsets) + end + + def test_1961 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1961. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #Europe/London Sun Mar 26 01:59:59 1961 UTC = Sun Mar 26 01:59:59 1961 GMT isdst=0 gmtoff=0 + #Europe/London Sun Mar 26 02:00:00 1961 UTC = Sun Mar 26 03:00:00 1961 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 29 01:59:59 1961 UTC = Sun Oct 29 02:59:59 1961 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 29 02:00:00 1961 UTC = Sun Oct 29 02:00:00 1961 GMT isdst=0 gmtoff=0 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(1961,3,26,1,59,59), tz.utc_to_local(DateTime.new(1961,3,26,1,59,59))) + assert_equal(DateTime.new(1961,3,26,3,0,0), tz.utc_to_local(DateTime.new(1961,3,26,2,0,0))) + assert_equal(DateTime.new(1961,10,29,2,59,59), tz.utc_to_local(DateTime.new(1961,10,29,1,59,59))) + assert_equal(DateTime.new(1961,10,29,2,0,0), tz.utc_to_local(DateTime.new(1961,10,29,2,0,0))) + + assert_equal(DateTime.new(1961,3,26,1,59,59), tz.local_to_utc(DateTime.new(1961,3,26,1,59,59))) + assert_equal(DateTime.new(1961,3,26,2,0,0), tz.local_to_utc(DateTime.new(1961,3,26,3,0,0))) + assert_equal(DateTime.new(1961,10,29,1,59,59), tz.local_to_utc(DateTime.new(1961,10,29,2,59,59), true)) + assert_equal(DateTime.new(1961,10,29,2,59,59), tz.local_to_utc(DateTime.new(1961,10,29,2,59,59), false)) + assert_equal(DateTime.new(1961,10,29,1,0,0), tz.local_to_utc(DateTime.new(1961,10,29,2,0,0), true)) + assert_equal(DateTime.new(1961,10,29,2,0,0), tz.local_to_utc(DateTime.new(1961,10,29,2,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1961,3,26,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1961,10,29,2,0,0)) } + + assert_equal(:GMT, tz.period_for_utc(DateTime.new(1961,3,26,1,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(1961,3,26,2,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(1961,10,29,1,59,59)).zone_identifier) + assert_equal(:GMT, tz.period_for_utc(DateTime.new(1961,10,29,2,0,0)).zone_identifier) + + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,3,26,1,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,3,26,3,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), false).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), false).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(1961,3,26,1,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(1961,3,26,2,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(1961,10,29,1,59,59)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(1961,10,29,2,0,0)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(1961,3,26,1,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,3,26,3,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), false).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), false).utc_total_offset) + + + transitions = tz.transitions_up_to(DateTime.new(1962,1,1,0,0,0), DateTime.new(1961,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1961,3,26,2,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1961,10,29,2,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1962,1,1,0,0,0), DateTime.new(1961,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(0, 0, :GMT), TimezoneOffset.new(0, 3600, :BST)], offsets) + end + end + + def test_time_boundary + #Europe/London Sat Oct 26 23:00:00 1968 UTC = Sun Oct 27 00:00:00 1968 GMT isdst=0 gmtoff=3600 + #Europe/London Sun Oct 31 01:59:59 1971 UTC = Sun Oct 31 02:59:59 1971 GMT isdst=0 gmtoff=3600 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(1970,1,1,1,0,0), tz.utc_to_local(DateTime.new(1970,1,1,0,0,0))) + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.local_to_utc(DateTime.new(1970,1,1,1,0,0))) + assert_equal(Time.utc(1970,1,1,1,0,0), tz.utc_to_local(Time.utc(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.local_to_utc(Time.utc(1970,1,1,1,0,0))) + assert_equal(3600, tz.utc_to_local(0)) + assert_equal(0, tz.local_to_utc(3600)) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb new file mode 100644 index 0000000..bbfa883 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb @@ -0,0 +1,142 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneMelbourne < Minitest::Test + def test_2004 + #Australia/Melbourne Sat Mar 27 15:59:59 2004 UTC = Sun Mar 28 02:59:59 2004 AEDT isdst=1 gmtoff=39600 + #Australia/Melbourne Sat Mar 27 16:00:00 2004 UTC = Sun Mar 28 02:00:00 2004 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 15:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 16:00:00 2004 UTC = Sun Oct 31 03:00:00 2004 AEDT isdst=1 gmtoff=39600 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(2004,3,28,2,59,59), tz.utc_to_local(DateTime.new(2004,3,27,15,59,59))) + assert_equal(DateTime.new(2004,3,28,2,0,0), tz.utc_to_local(DateTime.new(2004,3,27,16,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,30,15,59,59))) + assert_equal(DateTime.new(2004,10,31,3,0,0), tz.utc_to_local(DateTime.new(2004,10,30,16,0,0))) + + assert_equal(DateTime.new(2004,3,27,15,59,59), tz.local_to_utc(DateTime.new(2004,3,28,2,59,59), true)) + assert_equal(DateTime.new(2004,3,27,16,59,59), tz.local_to_utc(DateTime.new(2004,3,28,2,59,59), false)) + assert_equal(DateTime.new(2004,3,27,15,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0), true)) + assert_equal(DateTime.new(2004,3,27,16,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0), false)) + assert_equal(DateTime.new(2004,10,30,15,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59))) + assert_equal(DateTime.new(2004,10,30,16,0,0), tz.local_to_utc(DateTime.new(2004,10,31,3,0,0))) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,10,31,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,3,28,2,0,0)) } + + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(2004,3,27,15,59,59)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(2004,3,27,16,0,0)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(2004,10,30,15,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(2004,10,30,16,0,0)).zone_identifier) + + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), false).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), false).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,10,31,3,0,0)).zone_identifier) + + assert_equal(39600, tz.period_for_utc(DateTime.new(2004,3,27,15,59,59)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(2004,3,27,16,0,0)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(2004,10,30,15,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_utc(DateTime.new(2004,10,30,16,0,0)).utc_total_offset) + + assert_equal(39600, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), false).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), false).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,10,31,1,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(2004,10,31,3,0,0)).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,3,27,16,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,30,16,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(36000, 0, :AEST), TimezoneOffset.new(36000, 3600, :AEDT)], offsets) + end + + def test_1942 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1942. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #Australia/Melbourne Sat Mar 28 14:59:59 1942 UTC = Sun Mar 29 01:59:59 1942 AEDT isdst=1 gmtoff=39600 + #Australia/Melbourne Sat Mar 28 15:00:00 1942 UTC = Sun Mar 29 01:00:00 1942 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Sep 26 15:59:59 1942 UTC = Sun Sep 27 01:59:59 1942 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Sep 26 16:00:00 1942 UTC = Sun Sep 27 03:00:00 1942 AEDT isdst=1 gmtoff=39600 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(1942,3,29,1,59,59), tz.utc_to_local(DateTime.new(1942,3,28,14,59,59))) + assert_equal(DateTime.new(1942,3,29,1,0,0), tz.utc_to_local(DateTime.new(1942,3,28,15,0,0))) + assert_equal(DateTime.new(1942,9,27,1,59,59), tz.utc_to_local(DateTime.new(1942,9,26,15,59,59))) + assert_equal(DateTime.new(1942,9,27,3,0,0), tz.utc_to_local(DateTime.new(1942,9,26,16,0,0))) + + assert_equal(DateTime.new(1942,3,28,14,59,59), tz.local_to_utc(DateTime.new(1942,3,29,1,59,59), true)) + assert_equal(DateTime.new(1942,3,28,15,59,59), tz.local_to_utc(DateTime.new(1942,3,29,1,59,59), false)) + assert_equal(DateTime.new(1942,3,28,14,0,0), tz.local_to_utc(DateTime.new(1942,3,29,1,0,0), true)) + assert_equal(DateTime.new(1942,3,28,15,0,0), tz.local_to_utc(DateTime.new(1942,3,29,1,0,0), false)) + assert_equal(DateTime.new(1942,9,26,15,59,59), tz.local_to_utc(DateTime.new(1942,9,27,1,59,59))) + assert_equal(DateTime.new(1942,9,26,16,0,0), tz.local_to_utc(DateTime.new(1942,9,27,3,0,0))) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1942,9,27,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1942,3,29,1,0,0)) } + + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(1942,3,28,14,59,59)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(1942,3,28,15,0,0)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(1942,9,26,15,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(1942,9,26,16,0,0)).zone_identifier) + + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), false).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), false).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,9,27,1,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,9,27,3,0,0)).zone_identifier) + + assert_equal(39600, tz.period_for_utc(DateTime.new(1942,3,28,14,59,59)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(1942,3,28,15,0,0)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(1942,9,26,15,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_utc(DateTime.new(1942,9,26,16,0,0)).utc_total_offset) + + assert_equal(39600, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), false).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), false).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,9,27,1,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(1942,9,27,3,0,0)).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(1943,1,1,0,0,0), DateTime.new(1942,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1942,3,28,15,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1942,9,26,16,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1943,1,1,0,0,0), DateTime.new(1942,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(36000, 0, :AEST), TimezoneOffset.new(36000, 3600, :AEDT)], offsets) + end + end + + def test_time_boundary + #Australia/Melbourne Sat Mar 25 15:00:00 1944 UTC = Sun Mar 26 01:00:00 1944 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 15:59:59 1971 UTC = Sun Oct 31 01:59:59 1971 AEST isdst=0 gmtoff=36000 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(1970,1,1,10,0,0), tz.utc_to_local(DateTime.new(1970,1,1,0,0,0))) + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.local_to_utc(DateTime.new(1970,1,1,10,0,0))) + assert_equal(Time.utc(1970,1,1,10,0,0), tz.utc_to_local(Time.utc(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.local_to_utc(Time.utc(1970,1,1,10,0,0))) + assert_equal(36000, tz.utc_to_local(0)) + assert_equal(0, tz.local_to_utc(36000)) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb new file mode 100644 index 0000000..3e270a9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb @@ -0,0 +1,142 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneNewYork < Minitest::Test + def test_2004 + #America/New_York Sun Apr 4 06:59:59 2004 UTC = Sun Apr 4 01:59:59 2004 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 4 07:00:00 2004 UTC = Sun Apr 4 03:00:00 2004 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 31 05:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 31 06:00:00 2004 UTC = Sun Oct 31 01:00:00 2004 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(2004,4,4,1,59,59), tz.utc_to_local(DateTime.new(2004,4,4,6,59,59))) + assert_equal(DateTime.new(2004,4,4,3,0,0), tz.utc_to_local(DateTime.new(2004,4,4,7,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,31,5,59,59))) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.utc_to_local(DateTime.new(2004,10,31,6,0,0))) + + assert_equal(DateTime.new(2004,4,4,6,59,59), tz.local_to_utc(DateTime.new(2004,4,4,1,59,59))) + assert_equal(DateTime.new(2004,4,4,7,0,0), tz.local_to_utc(DateTime.new(2004,4,4,3,0,0))) + assert_equal(DateTime.new(2004,10,31,5,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), true)) + assert_equal(DateTime.new(2004,10,31,6,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), false)) + assert_equal(DateTime.new(2004,10,31,5,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), true)) + assert_equal(DateTime.new(2004,10,31,6,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,4,4,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,10,31,1,0,0)) } + + assert_equal(:EST, tz.period_for_utc(DateTime.new(2004,4,4,6,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(2004,4,4,7,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(2004,10,31,5,59,59)).zone_identifier) + assert_equal(:EST, tz.period_for_utc(DateTime.new(2004,10,31,6,0,0)).zone_identifier) + + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,4,4,1,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,4,4,3,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).zone_identifier) + + assert_equal(-18000, tz.period_for_utc(DateTime.new(2004,4,4,6,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(2004,4,4,7,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(2004,10,31,5,59,59)).utc_total_offset) + assert_equal(-18000, tz.period_for_utc(DateTime.new(2004,10,31,6,0,0)).utc_total_offset) + + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,4,4,1,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,4,4,3,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,4,4,7,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,31,6,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(-18000, 0, :EST), TimezoneOffset.new(-18000, 3600, :EDT)], offsets) + end + + def test_1957 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1957. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #America/New_York Sun Apr 28 06:59:59 1957 UTC = Sun Apr 28 01:59:59 1957 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 28 07:00:00 1957 UTC = Sun Apr 28 03:00:00 1957 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 27 05:59:59 1957 UTC = Sun Oct 27 01:59:59 1957 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 27 06:00:00 1957 UTC = Sun Oct 27 01:00:00 1957 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(1957,4,28,1,59,59), tz.utc_to_local(DateTime.new(1957,4,28,6,59,59))) + assert_equal(DateTime.new(1957,4,28,3,0,0), tz.utc_to_local(DateTime.new(1957,4,28,7,0,0))) + assert_equal(DateTime.new(1957,10,27,1,59,59), tz.utc_to_local(DateTime.new(1957,10,27,5,59,59))) + assert_equal(DateTime.new(1957,10,27,1,0,0), tz.utc_to_local(DateTime.new(1957,10,27,6,0,0))) + + assert_equal(DateTime.new(1957,4,28,6,59,59), tz.local_to_utc(DateTime.new(1957,4,28,1,59,59))) + assert_equal(DateTime.new(1957,4,28,7,0,0), tz.local_to_utc(DateTime.new(1957,4,28,3,0,0))) + assert_equal(DateTime.new(1957,10,27,5,59,59), tz.local_to_utc(DateTime.new(1957,10,27,1,59,59), true)) + assert_equal(DateTime.new(1957,10,27,6,59,59), tz.local_to_utc(DateTime.new(1957,10,27,1,59,59), false)) + assert_equal(DateTime.new(1957,10,27,5,0,0), tz.local_to_utc(DateTime.new(1957,10,27,1,0,0), true)) + assert_equal(DateTime.new(1957,10,27,6,0,0), tz.local_to_utc(DateTime.new(1957,10,27,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1957,4,28,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1957,10,27,1,0,0)) } + + assert_equal(:EST, tz.period_for_utc(DateTime.new(1957,4,28,6,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(1957,4,28,7,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(1957,10,27,5,59,59)).zone_identifier) + assert_equal(:EST, tz.period_for_utc(DateTime.new(1957,10,27,6,0,0)).zone_identifier) + + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,4,28,1,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,4,28,3,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), false).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), false).zone_identifier) + + assert_equal(-18000, tz.period_for_utc(DateTime.new(1957,4,28,6,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(1957,4,28,7,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(1957,10,27,5,59,59)).utc_total_offset) + assert_equal(-18000, tz.period_for_utc(DateTime.new(1957,10,27,6,0,0)).utc_total_offset) + + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,4,28,1,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,4,28,3,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), false).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(1958,1,1,0,0,0), DateTime.new(1957,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1957,4,28,7,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1957,10,27,6,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1958,1,1,0,0,0), DateTime.new(1957,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(-18000, 0, :EST), TimezoneOffset.new(-18000, 3600, :EDT)], offsets) + end + end + + def test_time_boundary + #America/New_York Sun Oct 26 06:00:00 1969 UTC = Sun Oct 26 01:00:00 1969 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 26 06:59:59 1970 UTC = Sun Apr 26 01:59:59 1970 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.utc_to_local(DateTime.new(1970,1,1,5,0,0))) + assert_equal(DateTime.new(1970,1,1,5,0,0), tz.local_to_utc(DateTime.new(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.utc_to_local(Time.utc(1970,1,1,5,0,0))) + assert_equal(Time.utc(1970,1,1,5,0,0), tz.local_to_utc(Time.utc(1970,1,1,0,0,0))) + assert_equal(0, tz.utc_to_local(18000)) + assert_equal(18000, tz.local_to_utc(0)) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb new file mode 100644 index 0000000..6e29e4d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb @@ -0,0 +1,126 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneOffset < Minitest::Test + + def test_utc_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000, o1.utc_offset) + assert_equal(-3600, o2.utc_offset) + end + + def test_std_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(0, o1.std_offset) + assert_equal(3600, o2.std_offset) + end + + def test_utc_total_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000, o1.utc_total_offset) + assert_equal(0, o2.utc_total_offset) + end + + def test_abbreviation + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(:TEST, o1.abbreviation) + assert_equal(:TEST2, o2.abbreviation) + end + + def test_dst + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(false, o1.dst?) + assert_equal(true, o2.dst?) + end + + def test_to_local + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(1148949080, o1.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), o1.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20, 782000), o1.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 782000))) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), o1.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20 + Rational(782, 1000)), o1.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(782, 1000)))) + assert_equal(1148949080, o1.to_local(1148931080)) + assert(TimeOrDateTime.new(1148949080).eql?(o1.to_local(TimeOrDateTime.new(1148931080)))) + + assert_equal(1148931080, o2.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20, 123000), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 123000))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(123, 1000)), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(123, 1000)))) + assert_equal(1148931080, o2.to_local(1148931080)) + assert(TimeOrDateTime.new(1148931080).eql?(o2.to_local(TimeOrDateTime.new(1148931080)))) + end + + def test_to_utc + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(1148913080, o1.to_utc(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 14, 31, 20), o1.to_utc(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 14, 31, 20, 913000), o1.to_utc(Time.utc(2006, 5, 29, 19, 31, 20, 913000))) + assert_equal(DateTime.new(2006, 5, 29, 14, 31, 20), o1.to_utc(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 14, 31, 20 + Rational(913,1000)), o1.to_utc(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(913,1000)))) + assert_equal(1148913080, o1.to_utc(1148931080)) + assert(TimeOrDateTime.new(1148913080).eql?(o1.to_utc(TimeOrDateTime.new(1148931080)))) + + assert_equal(1148931080, o2.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20, 323000), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 323000))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(323, 1000)), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(323, 1000)))) + assert_equal(1148931080, o2.to_utc(1148931080)) + assert(TimeOrDateTime.new(1148931080).eql?(o2.to_local(TimeOrDateTime.new(1148931080)))) + end + + def test_equality + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(18000, 0, :TEST) + o3 = TimezoneOffset.new(18001, 0, :TEST) + o4 = TimezoneOffset.new(18000, 1, :TEST) + o5 = TimezoneOffset.new(18000, 0, :TEST2) + + assert_equal(true, o1 == o1) + assert_equal(true, o1 == o2) + assert_equal(false, o1 == o3) + assert_equal(false, o1 == o4) + assert_equal(false, o1 == o5) + assert_equal(false, o1 == Object.new) + end + + def test_eql + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(18000, 0, :TEST) + o3 = TimezoneOffset.new(18001, 0, :TEST) + o4 = TimezoneOffset.new(18000, 1, :TEST) + o5 = TimezoneOffset.new(18000, 0, :TEST2) + + assert_equal(true, o1.eql?(o1)) + assert_equal(true, o1.eql?(o2)) + assert_equal(false, o1.eql?(o3)) + assert_equal(false, o1.eql?(o4)) + assert_equal(false, o1.eql?(o5)) + assert_equal(false, o1.eql?(Object.new)) + end + + def test_hash + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000.hash ^ 0.hash ^ :TEST.hash, o1.hash) + assert_equal(-3600.hash ^ 3600.hash ^ :TEST2.hash, o2.hash) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb new file mode 100644 index 0000000..bcfb8d5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb @@ -0,0 +1,555 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezonePeriod < Minitest::Test + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def test_initialize_start_end + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + p = TimezonePeriod.new(start_t, end_t) + + assert_same(start_t, p.start_transition) + assert_same(end_t, p.end_transition) + assert_same(dst, p.offset) + assert_equal(DateTime.new(2006,1,1,0,0,0), p.utc_start) + assert_equal(Time.utc(2006,1,1,0,0,0), p.utc_start_time) + assert_equal(DateTime.new(2006,1,2,0,0,0), p.utc_end) + assert_equal(Time.utc(2006,1,2,0,0,0), p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_equal(DateTime.new(2005,12,31,23,0,0), p.local_start) + assert_equal(Time.utc(2005,12,31,23,0,0), p.local_start_time) + assert_equal(DateTime.new(2006,1,1,23,0,0), p.local_end) + assert_equal(Time.utc(2006,1,1,23,0,0), p.local_end_time) + end + + def test_initialize_start_end_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + assert_raises(ArgumentError) { TimezonePeriod.new(start_t, end_t, special) } + end + + def test_initialize_start + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + + p = TimezonePeriod.new(start_t, nil) + + assert_same(start_t, p.start_transition) + assert_nil(p.end_transition) + assert_same(dst, p.offset) + assert_equal(DateTime.new(2006,1,1,0,0,0), p.utc_start) + assert_equal(Time.utc(2006,1,1,0,0,0), p.utc_start_time) + assert_nil(p.utc_end) + assert_nil(p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_equal(DateTime.new(2005,12,31,23,0,0), p.local_start) + assert_equal(Time.utc(2005,12,31,23,0,0), p.local_start_time) + assert_nil(p.local_end) + assert_nil(p.local_end_time) + end + + def test_initialize_start_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + + assert_raises(ArgumentError) { TimezonePeriod.new(start_t, nil, special) } + end + + def test_initialize_end + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + p = TimezonePeriod.new(nil, end_t) + + assert_nil(p.start_transition) + assert_same(end_t, p.end_transition) + assert_same(dst, p.offset) + assert_nil(p.utc_start) + assert_nil(p.utc_start_time) + assert_equal(DateTime.new(2006,1,2,0,0,0), p.utc_end) + assert_equal(Time.utc(2006,1,2,0,0,0), p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_nil(p.local_start) + assert_nil(p.local_start_time) + assert_equal(DateTime.new(2006,1,1,23,0,0), p.local_end) + assert_equal(Time.utc(2006,1,1,23,0,0), p.local_end_time) + end + + def test_initialize_end_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + assert_raises(ArgumentError) { TimezonePeriod.new(nil, end_t, special) } + end + + def test_initialize + assert_raises(ArgumentError) { TimezonePeriod.new(nil, nil) } + end + + def test_initialize_offset + special = TimezoneOffset.new(0, 0, :SPECIAL) + + p = TimezonePeriod.new(nil, nil, special) + + assert_nil(p.start_transition) + assert_nil(p.end_transition) + assert_same(special, p.offset) + assert_nil(p.utc_start) + assert_nil(p.utc_start_time) + assert_nil(p.utc_end) + assert_nil(p.utc_end_time) + assert_equal(0, p.utc_offset) + assert_equal(0, p.std_offset) + assert_equal(0, p.utc_total_offset) + assert_equal(Rational(0, 86400), p.utc_total_offset_rational) + assert_equal(:SPECIAL, p.zone_identifier) + assert_equal(:SPECIAL, p.abbreviation) + assert_nil(p.local_start) + assert_nil(p.local_start_time) + assert_nil(p.local_end) + assert_nil(p.local_end_time) + end + + def test_utc_total_offset_rational_after_freeze + o = TimezoneOffset.new(3600, 0, :TEST) + p = TimezonePeriod.new(nil, nil, o) + p.freeze + assert_equal(Rational(1, 24), p.utc_total_offset_rational) + end + + def test_dst + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, -3600, :TEST)) + p4 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 7200, :TEST)) + p5 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, -7200, :TEST)) + + assert_equal(true, p1.dst?) + assert_equal(false, p2.dst?) + assert_equal(true, p3.dst?) + assert_equal(true, p4.dst?) + assert_equal(true, p5.dst?) + end + + def test_valid_for_utc + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t1, nil) + p4 = TimezonePeriod.new(nil, nil, offset) + p5 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p1.valid_for_utc?(1104541262)) + assert_equal(true, p1.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p2.valid_for_utc?(1104541262)) + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p2.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p2.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p2.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p3.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p3.valid_for_utc?(1104541262)) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p3.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p3.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p4.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p4.valid_for_utc?(1104541262)) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(false, p5.valid_for_utc?(Time.utc(2005,1,1,1,1,1))) + assert_equal(false, p5.valid_for_utc?(1104541262)) + end + + def test_utc_after_start + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.utc_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.utc_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(false, p1.utc_after_start?(1104541260)) + assert_equal(true, p1.utc_after_start?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(false, p1.utc_after_start?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.utc_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.utc_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(true, p2.utc_after_start?(1104541260)) + + assert_equal(true, p3.utc_after_start?(Time.utc(2005,1,2,1,1,1))) + assert_equal(true, p3.utc_after_start?(1104627661)) + end + + def test_utc_before_end + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t1, nil) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.utc_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p1.utc_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(false, p1.utc_before_end?(1107309723)) + assert_equal(false, p1.utc_before_end?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(true, p1.utc_before_end?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.utc_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.utc_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(true, p2.utc_before_end?(1107309723)) + + assert_equal(false, p3.utc_before_end?(Time.utc(2005,1,2,1,1,1))) + assert_equal(false, p3.utc_before_end?(1104627661)) + end + + def test_valid_for_local + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, 1104544861) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 1, 1, 1, 1, 1)) + t5 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, nil) + p4 = TimezonePeriod.new(nil, nil, offset) + p5 = TimezonePeriod.new(t4, t5) + + assert_equal(true, p1.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.valid_for_local?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p1.valid_for_local?(1104541262)) + assert_equal(true, p1.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p1.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.valid_for_local?(1104541262)) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p2.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p2.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p3.valid_for_local?(1104541262)) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p3.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p3.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p4.valid_for_local?(1104541262)) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p4.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(false, p5.valid_for_utc?(Time.utc(2005,1,1,1,1,1))) + assert_equal(false, p5.valid_for_utc?(1104541262)) + end + + def test_local_after_start + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.local_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.local_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(false, p1.local_after_start?(1104541260)) + assert_equal(true, p1.local_after_start?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(false, p1.local_after_start?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.local_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.local_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(true, p2.local_after_start?(1104541260)) + + assert_equal(true, p3.local_after_start?(Time.utc(2005,1,2,1,1,1))) + assert_equal(true, p3.local_after_start?(1104627661)) + end + + def test_local_before_end + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t1, nil) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.local_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p1.local_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(false, p1.local_before_end?(1107309723)) + assert_equal(false, p1.local_before_end?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(true, p1.local_before_end?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.local_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.local_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(true, p2.local_before_end?(1107309723)) + + assert_equal(false, p3.local_before_end?(Time.utc(2005,1,2,1,1,1))) + assert_equal(false, p3.local_before_end?(1104627661)) + end + + def test_to_local + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(7200, 3600, :TEST)) + + assert_equal(DateTime.new(2005,1,19,22,0,0), p1.to_local(DateTime.new(2005,1,20,1,0,0))) + assert_equal(DateTime.new(2005,1,19,22,0,0 + Rational(512,1000)), p1.to_local(DateTime.new(2005,1,20,1,0,0 + Rational(512,1000)))) + assert_equal(Time.utc(2005,1,19,21,0,0), p2.to_local(Time.utc(2005,1,20,1,0,0))) + assert_equal(Time.utc(2005,1,19,21,0,0,512000), p2.to_local(Time.utc(2005,1,20,1,0,0,512000))) + assert_equal(1106193600, p3.to_local(1106182800)) + end + + def test_to_utc + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(7200, 3600, :TEST)) + + assert_equal(DateTime.new(2005,1,20,4,0,0), p1.to_utc(DateTime.new(2005,1,20,1,0,0))) + assert_equal(DateTime.new(2005,1,20,4,0,0 + Rational(571,1000)), p1.to_utc(DateTime.new(2005,1,20,1,0,0 + Rational(571,1000)))) + assert_equal(Time.utc(2005,1,20,5,0,0), p2.to_utc(Time.utc(2005,1,20,1,0,0))) + assert_equal(Time.utc(2005,1,20,5,0,0,571000), p2.to_utc(Time.utc(2005,1,20,1,0,0,571000))) + assert_equal(1106172000, p3.to_utc(1106182800)) + end + + def test_time_boundary_start + o1 = TimezoneOffset.new(-3600, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 0) + + p1 = TimezonePeriod.new(t1, nil) + + assert_equal(DateTime.new(1969,12,31,23,0,0), p1.local_start) + + # Conversion to Time will fail on systems that don't support negative times. + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969,12,31,23,0,0), p1.local_start_time) + end + end + + def test_time_boundary_end + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o2, o1, 2147482800) + + p1 = TimezonePeriod.new(nil, t1) + + assert_equal(DateTime.new(2038,1,19,4,0,0), p1.local_end) + + # Conversion to Time will fail on systems that don't support 64-bit times + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(2038,1,19,4,0,0), p1.local_end_time) + end + end + + def test_equality + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t1, t3) + p3 = TimezonePeriod.new(t2, t3) + p4 = TimezonePeriod.new(t3, nil) + p5 = TimezonePeriod.new(t3, nil) + p6 = TimezonePeriod.new(t4, nil) + p7 = TimezonePeriod.new(nil, t3) + p8 = TimezonePeriod.new(nil, t3) + p9 = TimezonePeriod.new(nil, t4) + p10 = TimezonePeriod.new(nil, nil, o1) + p11 = TimezonePeriod.new(nil, nil, o1) + p12 = TimezonePeriod.new(nil, nil, o2) + + assert_equal(true, p1 == p1) + assert_equal(true, p1 == p2) + assert_equal(true, p1 == p3) + assert_equal(false, p1 == p4) + assert_equal(false, p1 == p5) + assert_equal(false, p1 == p6) + assert_equal(false, p1 == p7) + assert_equal(false, p1 == p8) + assert_equal(false, p1 == p9) + assert_equal(false, p1 == p10) + assert_equal(false, p1 == p11) + assert_equal(false, p1 == p12) + assert_equal(false, p1 == Object.new) + + assert_equal(true, p4 == p4) + assert_equal(true, p4 == p5) + assert_equal(false, p4 == p6) + assert_equal(false, p4 == Object.new) + + assert_equal(true, p7 == p7) + assert_equal(true, p7 == p8) + assert_equal(false, p7 == p9) + assert_equal(false, p7 == Object.new) + + assert_equal(true, p10 == p10) + assert_equal(true, p10 == p11) + assert_equal(false, p10 == p12) + assert_equal(false, p10 == Object.new) + end + + def test_eql + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t1, t3) + p3 = TimezonePeriod.new(t2, t3) + p4 = TimezonePeriod.new(t3, nil) + p5 = TimezonePeriod.new(t3, nil) + p6 = TimezonePeriod.new(t4, nil) + p7 = TimezonePeriod.new(nil, t3) + p8 = TimezonePeriod.new(nil, t3) + p9 = TimezonePeriod.new(nil, t4) + p10 = TimezonePeriod.new(nil, nil, o1) + p11 = TimezonePeriod.new(nil, nil, o1) + p12 = TimezonePeriod.new(nil, nil, o2) + + assert_equal(true, p1.eql?(p1)) + assert_equal(true, p1.eql?(p2)) + assert_equal(false, p1.eql?(p3)) + assert_equal(false, p1.eql?(p4)) + assert_equal(false, p1.eql?(p5)) + assert_equal(false, p1.eql?(p6)) + assert_equal(false, p1.eql?(p7)) + assert_equal(false, p1.eql?(p8)) + assert_equal(false, p1.eql?(p9)) + assert_equal(false, p1.eql?(p10)) + assert_equal(false, p1.eql?(p11)) + assert_equal(false, p1.eql?(p12)) + assert_equal(false, p1.eql?(Object.new)) + + assert_equal(true, p4.eql?(p4)) + assert_equal(true, p4.eql?(p5)) + assert_equal(false, p4.eql?(p6)) + assert_equal(false, p4.eql?(Object.new)) + + assert_equal(true, p7.eql?(p7)) + assert_equal(true, p7.eql?(p8)) + assert_equal(false, p7.eql?(p9)) + assert_equal(false, p7.eql?(Object.new)) + + assert_equal(true, p10.eql?(p10)) + assert_equal(true, p10.eql?(p11)) + assert_equal(false, p10.eql?(p12)) + assert_equal(false, p10.eql?(Object.new)) + end + + def test_hash + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t2, nil) + p3 = TimezonePeriod.new(nil, t4) + p4 = TimezonePeriod.new(nil, nil, o1) + + assert_equal(t1.hash ^ t3.hash, p1.hash) + assert_equal(t2.hash ^ nil.hash, p2.hash) + assert_equal(nil.hash ^ t4.hash, p3.hash) + assert_equal(nil.hash ^ nil.hash ^ o1.hash, p4.hash) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb new file mode 100644 index 0000000..22ba3d4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb @@ -0,0 +1,136 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneProxy < Minitest::Test + def test_not_exist + proxy = TimezoneProxy.new('Nothing/Special') + t = Time.utc(2006,1,1,0,0,0) + assert_equal('Nothing/Special', proxy.identifier) + assert_equal('Nothing/Special', proxy.name) + assert_equal('Nothing - Special', proxy.friendly_identifier) + assert_equal('Nothing - Special', proxy.to_s) + + assert_raises(InvalidTimezoneIdentifier) { proxy.canonical_identifier } + assert_raises(InvalidTimezoneIdentifier) { proxy.canonical_zone } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_period } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_period_and_time } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_time_and_period } + assert_raises(InvalidTimezoneIdentifier) { proxy.local_to_utc(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.now } + assert_raises(InvalidTimezoneIdentifier) { proxy.offsets_up_to(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.period_for_local(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.period_for_utc(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.periods_for_local(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.strftime('%Z', t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.transitions_up_to(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.utc_to_local(t) } + end + + def test_valid + proxy = TimezoneProxy.new('Europe/London') + real = Timezone.get('Europe/London') + + t1 = Time.utc(2005,8,1,0,0,0) + t2 = Time.utc(2004,8,1,0,0,0) + + assert_equal(real.canonical_identifier, proxy.canonical_identifier) + assert_same(real.canonical_zone, proxy.canonical_zone) + assert_nothing_raised { proxy.current_period } + assert_nothing_raised { proxy.current_period_and_time } + assert_nothing_raised { proxy.current_time_and_period } + assert_equal(real.friendly_identifier(true), proxy.friendly_identifier(true)) + assert_equal(real.friendly_identifier(false), proxy.friendly_identifier(false)) + assert_equal(real.friendly_identifier, proxy.friendly_identifier) + assert_equal(real.identifier, proxy.identifier) + assert_equal(real.local_to_utc(t1), proxy.local_to_utc(t1)) + assert_equal(real.name, proxy.name) + assert_nothing_raised { proxy.now } + assert_equal(real.offsets_up_to(t1), proxy.offsets_up_to(t1)) + assert_equal(real.offsets_up_to(t1, t2), proxy.offsets_up_to(t1, t2)) + assert_equal(real.period_for_local(t1), proxy.period_for_local(t1)) + assert_equal(real.period_for_utc(t1), proxy.period_for_utc(t1)) + assert_equal(real.periods_for_local(t1), proxy.periods_for_local(t1)) + assert_equal(real.strftime('%Z', t1), proxy.strftime('%Z', t1)) + assert_equal(real.to_s, proxy.to_s) + assert_equal(real.transitions_up_to(t1), proxy.transitions_up_to(t1)) + assert_equal(real.transitions_up_to(t1, t2), proxy.transitions_up_to(t1, t2)) + assert_equal(real.utc_to_local(t1), proxy.utc_to_local(t1)) + + + assert(real == proxy) + assert(proxy == real) + assert_equal(0, real <=> proxy) + assert_equal(0, proxy <=> real) + end + + def test_canonical_linked + # Test that the implementation of canonical_zone and canonical_identifier + # are actually calling the real timezone and not just returning it and + # its identifier. + + real = Timezone.get('UTC') + proxy = TimezoneProxy.new('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo instances for any + # timezone. + if real.kind_of?(LinkedTimezone) + assert_equal('Etc/UTC', proxy.canonical_identifier) + assert_same(Timezone.get('Etc/UTC'), proxy.canonical_zone) + else + if DataSource.get.kind_of?(RubyDataSource) + # Not got a LinkedTimezone despite using a DataSource that supports it. + # Raise an exception as this shouldn't happen. + raise 'Non-LinkedTimezone instance returned for UTC using RubyDataSource' + end + + assert_equal('UTC', proxy.canonical_identifier) + assert_same(Timezone.get('UTC'), proxy.canonical_zone) + end + end + + def test_after_freeze + proxy = TimezoneProxy.new('Europe/London') + real = Timezone.get('Europe/London') + t = Time.utc(2017, 6, 1) + proxy.freeze + assert_equal('Europe/London', proxy.identifier) + assert_equal(real.utc_to_local(t), proxy.utc_to_local(t)) + end + + def test_equals + assert_equal(true, TimezoneProxy.new('Europe/London') == TimezoneProxy.new('Europe/London')) + assert_equal(false, TimezoneProxy.new('Europe/London') == TimezoneProxy.new('Europe/Paris')) + assert(!(TimezoneProxy.new('Europe/London') == Object.new)) + end + + def test_compare + assert_equal(0, TimezoneProxy.new('Europe/London') <=> TimezoneProxy.new('Europe/London')) + assert_equal(0, Timezone.get('Europe/London') <=> TimezoneProxy.new('Europe/London')) + assert_equal(0, TimezoneProxy.new('Europe/London') <=> Timezone.get('Europe/London')) + assert_equal(-1, TimezoneProxy.new('Europe/London') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, Timezone.get('Europe/London') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, TimezoneProxy.new('Europe/London') <=> Timezone.get('Europe/Paris')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> TimezoneProxy.new('Europe/London')) + assert_equal(1, Timezone.get('Europe/Paris') <=> TimezoneProxy.new('Europe/London')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> Timezone.get('Europe/London')) + assert_equal(-1, TimezoneProxy.new('America/New_York') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, Timezone.get('America/New_York') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, TimezoneProxy.new('America/New_York') <=> Timezone.get('Europe/Paris')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> TimezoneProxy.new('America/New_York')) + assert_equal(1, Timezone.get('Europe/Paris') <=> TimezoneProxy.new('America/New_York')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> Timezone.get('America/New_York')) + end + + def test_kind + assert_kind_of(Timezone, TimezoneProxy.new('America/New_York')) + end + + def test_marshal + tp = TimezoneProxy.new('Europe/London') + tp2 = Marshal.load(Marshal.dump(tp)) + + assert_kind_of(TimezoneProxy, tp2) + assert_equal('Europe/London', tp2.identifier) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb new file mode 100644 index 0000000..3dba518 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb @@ -0,0 +1,366 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'date' + +include TZInfo + +class TCTimezoneTransition < Minitest::Test + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def test_offset + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDT), t.offset) + end + + def test_previous_offset + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + + assert_equal(TimezoneOffset.new(3600, 0, :TST), t.previous_offset) + end + + def test_datetime + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t1.datetime) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t2.datetime) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t3.datetime) + end + + def test_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t1.time) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t2.time) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t3.time) + end + + def test_local_end_at + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert(TimeOrDateTime.new(1148952680).eql?(t1.local_end_at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 1, 31, 20)).eql?(t2.local_end_at)) + assert(TimeOrDateTime.new(Time.utc(2006, 5, 30, 1, 31, 20)).eql?(t3.local_end_at)) + end + + def test_local_end_at_after_freeze + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t.freeze + assert(TimeOrDateTime.new(1148952680).eql?(t.local_end_at)) + end + + def test_local_end + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t1.local_end) + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t2.local_end) + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t3.local_end) + end + + def test_local_end_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t1.local_end_time) + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t2.local_end_time) + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t3.local_end_time) + end + + def test_local_start_at + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert(TimeOrDateTime.new(1148956280).eql?(t1.local_start_at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 2, 31, 20)).eql?(t2.local_start_at)) + assert(TimeOrDateTime.new(Time.utc(2006, 5, 30, 2, 31, 20)).eql?(t3.local_start_at)) + end + + def test_local_start_at_after_freeze + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t.freeze + assert(TimeOrDateTime.new(1148956280).eql?(t.local_start_at)) + end + + def test_local_start + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t1.local_start) + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t2.local_start) + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t3.local_start) + end + + def test_local_start_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t1.local_start_time) + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t2.local_start_time) + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t3.local_start_time) + end + + if RubyCoreSupport.time_supports_negative + def test_local_end_at_before_negative_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), -2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147490000).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 19, 0, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_before_negative_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), -2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147486400).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 20, 0, 0)).eql?(t.local_start_at)) + end + end + end + + def test_local_end_at_before_epoch + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), 1800) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-5400).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 22, 30, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_before_epoch + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), 1800) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-1800).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 23, 30, 0)).eql?(t.local_start_at)) + end + end + + def test_local_end_at_after_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147486400).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 4, 0, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_after_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147490000).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 5, 0, 0)).eql?(t.local_start_at)) + end + end + + def test_equality_timestamp + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 1, 31, 21)) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_equality_datetime + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 1, 31, 21)) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_equality_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 21)) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_hash + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTA), + TimezoneOffset.new(3600, 0, :TSTA), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTB), + TimezoneOffset.new(3600, 0, :TSTB), DateTime.new(2006, 5, 30, 1, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTC), + TimezoneOffset.new(3600, 0, :TSTC), Time.utc(2006, 5, 30, 1, 31, 20)) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDTA).hash ^ + TimezoneOffset.new(3600, 0, :TSTA).hash ^ TimeOrDateTime.new(1148949080).hash, + t1.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDTB).hash ^ + TimezoneOffset.new(3600, 0, :TSTB).hash ^ TimeOrDateTime.new(DateTime.new(2006, 5, 30, 1, 31, 20)).hash, + t2.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDTC).hash ^ + TimezoneOffset.new(3600, 0, :TSTC).hash ^ TimeOrDateTime.new(Time.utc(2006, 5, 30, 1, 31, 20)).hash, + t3.hash) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb new file mode 100644 index 0000000..b66233b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb @@ -0,0 +1,295 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'date' + +include TZInfo + +class TCTimezoneTransitionDefinition < Minitest::Test + def test_initialize_timestamp_only + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + end + end + + def test_initialize_timestamp_and_datetime + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + end + end + + def test_initialize_datetime_only + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + end + end + + def test_at + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + + assert(TimeOrDateTime.new(1148949080).eql?(t1.at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 0, 31, 20)).eql?(t2.at)) + assert(TimeOrDateTime.new(1148949080).eql?(t3.at)) + end + + def test_at_before_negative_32_bit + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147483649).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 20, 45, 51)).eql?(t.at)) + end + end + + def test_at_before_epoch + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-1).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 23, 59, 59)).eql?(t.at)) + end + end + + def test_at_after_32bit + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147483648).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 3, 14, 8)).eql?(t.at)) + end + end + + def test_at_after_freeze + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t1.freeze + t2.freeze + assert(TimeOrDateTime.new(1148949080).eql?(t1.at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 0, 31, 20)).eql?(t2.at)) + end + + def test_eql_timestamp + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_datetime + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 7852433803, 3200) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 5300392727, 2160) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_timestamp_and_datetime + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148952681, 7852433803, 3200) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080, 5300392727, 2160) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_timestamp_and_datetime_before_negative_32bit + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 69573092117, 28800) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_eql_timestamp_and_datetime_before_epoch + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 210866759999, 86400) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_negative + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_eql_timestamp_and_datetime_after_32bit + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 3328347557, 1350) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_64bit + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_hash + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 1148949080.hash ^ nil.hash, + t1.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 5300392727.hash ^ 2160.hash, + t2.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 1148949080.hash ^ nil.hash, + t3.hash) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ -2147483649.hash ^ nil.hash, + t4.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 69573092117.hash ^ 28800.hash, + t4.hash) + end + + if RubyCoreSupport.time_supports_negative + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ -1.hash ^ nil.hash, + t5.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 210866759999.hash ^ 86400.hash, + t5.hash) + end + + if RubyCoreSupport.time_supports_64bit + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 2147483648.hash ^ nil.hash, + t6.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 3328347557.hash ^ 1350.hash, + t6.hash) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb new file mode 100644 index 0000000..212bca0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb @@ -0,0 +1,27 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneUTC < Minitest::Test + def test_2004 + tz = Timezone.get('UTC') + + assert_equal(DateTime.new(2004,1,1,0,0,0), tz.utc_to_local(DateTime.new(2004,1,1,0,0,0))) + assert_equal(DateTime.new(2004,12,31,23,59,59), tz.utc_to_local(DateTime.new(2004,12,31,23,59,59))) + + assert_equal(DateTime.new(2004,1,1,0,0,0), tz.local_to_utc(DateTime.new(2004,1,1,0,0,0))) + assert_equal(DateTime.new(2004,12,31,23,59,59), tz.local_to_utc(DateTime.new(2004,12,31,23,59,59))) + + assert_equal(:UTC, tz.period_for_utc(DateTime.new(2004,1,1,0,0,0)).zone_identifier) + assert_equal(:UTC, tz.period_for_utc(DateTime.new(2004,12,31,23,59,59)).zone_identifier) + + assert_equal(:UTC, tz.period_for_local(DateTime.new(2004,1,1,0,0,0)).zone_identifier) + assert_equal(:UTC, tz.period_for_local(DateTime.new(2004,12,31,23,59,59)).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(2004,1,1,0,0,0)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(2004,12,31,23,59,59)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(2004,1,1,0,0,0)).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,12,31,23,59,59)).utc_total_offset) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb new file mode 100644 index 0000000..cc4af72 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb @@ -0,0 +1,433 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTransitionDataTimezoneInfo < Minitest::Test + + def test_identifier + dti = TransitionDataTimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', dti.identifier) + end + + def test_offset + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_nothing_raised do + dti.offset :o1, -18000, 3600, :TEST + end + end + + def test_offset_already_defined + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, 3600, 0, :TEST + dti.offset :o2, 1800, 0, :TEST2 + + assert_raises(ArgumentError) { dti.offset :o1, 3600, 3600, :TESTD } + end + + def test_transition_timestamp + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 1149368400 + end + end + + def test_transition_datetime + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 19631123, 8 + end + end + + def test_transition_timestamp_and_datetime + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 1149368400, 19631123, 8 + end + end + + def test_transition_invalid_offset + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2006, 6, :o2, 1149454800 } + end + + def test_transition_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(ArgumentError) { dti.transition 2006, 6, :o1, 1149368400 } + end + + def test_transition_invalid_order_month + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2006, 5, :o2, 1146690000 } + end + + def test_transition_invalid_order_year + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2005, 7, :o2, 1120424400 } + end + + def test_period_for_utc + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2000, 4, :o2, Time.utc(2000, 4,1,1,0,0).to_i + dti.transition 2000, 10, :o3, Time.utc(2000,10,1,1,0,0).to_i + dti.transition 2001, 3, :o2, 58847269, 24 # (2001, 3,1,1,0,0) + dti.transition 2001, 4, :o4, Time.utc(2001, 4,1,1,0,0).to_i, 58848013, 24 + dti.transition 2001, 10, :o3, Time.utc(2001,10,1,1,0,0).to_i + dti.transition 2002, 10, :o3, Time.utc(2002,10,1,1,0,0).to_i + dti.transition 2003, 2, :o2, Time.utc(2003, 2,1,1,0,0).to_i + dti.transition 2003, 3, :o3, Time.utc(2003, 3,1,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000, 4,1,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2000,10,1,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2001, 3,1,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2001, 4,1,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2001,10,1,1,0,0).to_i) + t6 = TimezoneTransitionDefinition.new(o3, o3, Time.utc(2002,10,1,1,0,0).to_i) + t7 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2003, 2,1,1,0,0).to_i) + t8 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2003, 3,1,1,0,0).to_i) + + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(DateTime.new(1960, 1,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(DateTime.new(1999,12,1,0, 0, 0))) + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(Time.utc( 2000, 4,1,0,59,59))) + assert_equal(TimezonePeriod.new(t1, t2), dti.period_for_utc(DateTime.new(2000, 4,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t1, t2), dti.period_for_utc(Time.utc( 2000,10,1,0,59,59).to_i)) + assert_equal(TimezonePeriod.new(t2, t3), dti.period_for_utc(Time.utc( 2000,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t2, t3), dti.period_for_utc(Time.utc( 2001, 3,1,0,59,59))) + assert_equal(TimezonePeriod.new(t3, t4), dti.period_for_utc(Time.utc( 2001, 3,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t3, t4), dti.period_for_utc(Time.utc( 2001, 4,1,0,59,59))) + assert_equal(TimezonePeriod.new(t4, t5), dti.period_for_utc(Time.utc( 2001, 4,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t4, t5), dti.period_for_utc(Time.utc( 2001,10,1,0,59,59))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2001,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2002, 2,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2002,10,1,0,59,59))) + assert_equal(TimezonePeriod.new(t6, t7), dti.period_for_utc(Time.utc( 2002,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t6, t7), dti.period_for_utc(Time.utc( 2003, 2,1,0,59,59))) + assert_equal(TimezonePeriod.new(t7, t8), dti.period_for_utc(Time.utc( 2003, 2,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t7, t8), dti.period_for_utc(Time.utc( 2003, 3,1,0,59,59))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(Time.utc( 2003, 3,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(Time.utc( 2004, 1,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(DateTime.new(2050, 1,1,1, 0, 0))) + end + + def test_period_for_utc_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 0, :TEST + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(DateTime.new(2005,1,1,0,0,0))) + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(Time.utc(2005,1,1,0,0,0))) + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(Time.utc(2005,1,1,0,0,0).to_i)) + end + + def test_period_for_utc_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(NoOffsetsDefined) { dti.period_for_utc(DateTime.new(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.period_for_utc(Time.utc(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.period_for_utc(Time.utc(2005,1,1,0,0,0).to_i) } + end + + def test_periods_for_local + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2000, 4, :o2, 58839277, 24 # 2000,4,2,1,0,0 + dti.transition 2000, 10, :o3, Time.utc(2000,10,2,1,0,0).to_i, 58843669, 24 + dti.transition 2001, 3, :o2, Time.utc(2001, 3,2,1,0,0).to_i + dti.transition 2001, 4, :o4, Time.utc(2001, 4,2,1,0,0).to_i + dti.transition 2001, 10, :o3, Time.utc(2001,10,2,1,0,0).to_i + dti.transition 2002, 10, :o3, 58861189, 24 # 2002,10,2,1,0,0 + dti.transition 2003, 2, :o2, Time.utc(2003, 2,2,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000, 4,2,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2000,10,2,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2001, 3,2,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2001, 4,2,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2001,10,2,1,0,0).to_i) + t6 = TimezoneTransitionDefinition.new(o3, o3, Time.utc(2002,10,2,1,0,0).to_i) + t7 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2003, 2,2,1,0,0).to_i) + + + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(DateTime.new(1960, 1, 1, 1, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(DateTime.new(1999,12, 1, 0, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc( 2000, 1, 1,10, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc( 2000, 4, 1,20, 1,39))) + assert_equal([], dti.periods_for_local(Time.utc( 2000, 4, 1,20, 1,40))) + assert_equal([], dti.periods_for_local(Time.utc( 2000, 4, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t1, t2)], dti.periods_for_local(Time.utc( 2000, 4, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t1, t2)], dti.periods_for_local(DateTime.new(2000,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2000,10, 1,20, 0, 0).to_i)) + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(DateTime.new(2000,10, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2000,10, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2001, 3, 1,19,59,59))) + assert_equal([], dti.periods_for_local(Time.utc( 2001, 3, 1,20, 0, 0))) + assert_equal([], dti.periods_for_local(DateTime.new(2001, 3, 1,20, 30, 0))) + assert_equal([], dti.periods_for_local(Time.utc( 2001, 3, 1,20,59,59).to_i)) + assert_equal([TimezonePeriod.new(t3, t4)], dti.periods_for_local(Time.utc( 2001, 3, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t3, t4)], dti.periods_for_local(Time.utc( 2001, 4, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t3, t4), + TimezonePeriod.new(t4, t5)], dti.periods_for_local(DateTime.new(2001, 4, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t3, t4), + TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001, 4, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001, 4, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2001,10, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2002, 2, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2002,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t6, t7)], dti.periods_for_local(Time.utc( 2002,10, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t6, t7)], dti.periods_for_local(Time.utc( 2003, 2, 1,19,59,59))) + assert_equal([], dti.periods_for_local(Time.utc( 2003, 2, 1,20, 0, 0))) + assert_equal([], dti.periods_for_local(Time.utc( 2003, 2, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(Time.utc( 2003, 2, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(Time.utc( 2004, 2, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(DateTime.new(2040, 2, 1,20, 0, 0))) + end + + def test_periods_for_local_warsaw + dti = TransitionDataTimezoneInfo.new('Test/Europe/Warsaw') + dti.offset :o1, 5040, 0, :LMT + dti.offset :o2, 5040, 0, :WMT + dti.offset :o3, 3600, 0, :CET + dti.offset :o4, 3600, 3600, :CEST + + dti.transition 1879, 12, :o2, 288925853, 120 # 1879,12,31,22,36,0 + dti.transition 1915, 8, :o3, 290485733, 120 # 1915, 8, 4,22,36,0 + dti.transition 1916, 4, :o4, 29051813, 12 # 1916, 4,30,22, 0,0 + + o1 = TimezoneOffset.new(5040, 0, :LMT) + o2 = TimezoneOffset.new(5040, 0, :WMT) + o3 = TimezoneOffset.new(3600, 0, :CET) + o4 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TimezoneTransitionDefinition.new(o2, o1, 288925853, 120) + t2 = TimezoneTransitionDefinition.new(o3, o2, 290485733, 120) + t3 = TimezoneTransitionDefinition.new(o4, o3, 29051813, 12) + + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(DateTime.new(1915,8,4,23,40,0))) + end + + def test_periods_for_local_boundary + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -3600, 0, :TESTD + dti.offset :o2, -3600, 0, :TESTS + + dti.transition 2000, 7, :o2, Time.utc(2000,7,1,0,0,0).to_i + + o1 = TimezoneOffset.new(-3600, 0, :TESTD) + o2 = TimezoneOffset.new(-3600, 0, :TESTS) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000,7,1,0,0,0).to_i) + + # 2000-07-01 00:00:00 UTC is 2000-06-30 23:00:00 UTC-1 + # hence to find periods for local times between 2000-06-30 23:00:00 + # and 2000-07-01 00:00:00 a search has to be carried out in the next half + # year to the one containing the date we are looking for + + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc(2000,6,30,22,59,59))) + assert_equal([TimezonePeriod.new(t1, nil)], dti.periods_for_local(Time.utc(2000,6,30,23, 0, 0))) + assert_equal([TimezonePeriod.new(t1, nil)], dti.periods_for_local(Time.utc(2000,7, 1, 0, 0, 0))) + end + + def test_periods_for_local_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 0, :TEST + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(DateTime.new(2005,1,1,0,0,0))) + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(Time.utc(2005,1,1,0,0,0))) + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(Time.utc(2005,1,1,0,0,0).to_i)) + end + + def test_periods_for_local_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(NoOffsetsDefined) { dti.periods_for_local(DateTime.new(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.periods_for_local(Time.utc(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.periods_for_local(Time.utc(2005,1,1,0,0,0).to_i) } + end + + def test_transitions_up_to + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2010, 4, :o2, Time.utc(2010, 4,1,1,0,0).to_i + dti.transition 2010, 10, :o3, Time.utc(2010,10,1,1,0,0).to_i + dti.transition 2011, 3, :o2, 58934917, 24 # (2011, 3,1,1,0,0) + dti.transition 2011, 4, :o4, Time.utc(2011, 4,1,1,0,0).to_i, 58935661, 24 + dti.transition 2011, 10, :o3, Time.utc(2011,10,1,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2010, 4,1,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2010,10,1,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2011, 3,1,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2011, 4,1,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2011,10,1,1,0,0).to_i) + + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0), Time.utc(2000,1,1,0,0,0))) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1))) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1), Time.utc(2000,1,1,0,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,4,1,1,0,1), Time.utc(2010,10,1,1,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0), Time.utc(2010,4,1,1,0,1))) + assert_equal([t3], dti.transitions_up_to(Time.utc(2011,4,1,1,0,0), Time.utc(2010,10,1,1,0,1))) + assert_equal([], dti.transitions_up_to(Time.utc(2011,3,1,1,0,0), Time.utc(2010,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,0))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,1))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,0,1))) + assert_equal([t5], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0), Time.utc(2011,10,1,1,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0), Time.utc(2011,10,1,1,0,1))) + + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0).to_i, Time.utc(2000,1,1,0,0,0).to_i)) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1).to_i, Time.utc(2000,1,1,0,0,0).to_i)) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,4,1,1,0,1).to_i, Time.utc(2010,10,1,1,0,0).to_i)) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t3], dti.transitions_up_to(Time.utc(2011,4,1,1,0,0).to_i, Time.utc(2010,10,1,1,0,1).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2011,3,1,1,0,0).to_i, Time.utc(2010,10,1,1,0,1).to_i)) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0).to_i)) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i)) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i)) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t5], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i, Time.utc(2011,10,1,1,0,0).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i, Time.utc(2011,10,1,1,0,1).to_i)) + + assert_equal([], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,0))) + assert_equal([], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,0), DateTime.new(2000,1,1,0,0,0))) + assert_equal([t1], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,1))) + assert_equal([t1], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,1), DateTime.new(2000,1,1,0,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,4,1,1,0,1), DateTime.new(2010,10,1,1,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,0), DateTime.new(2010,4,1,1,0,1))) + assert_equal([t3], dti.transitions_up_to(DateTime.new(2011,4,1,1,0,0), DateTime.new(2010,10,1,1,0,1))) + assert_equal([], dti.transitions_up_to(DateTime.new(2011,3,1,1,0,0), DateTime.new(2010,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,0))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,Rational(DATETIME_RESOLUTION,1000000)))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1), DateTime.new(2010,4,1,1,0,0))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1), DateTime.new(2010,4,1,1,0,1))) + + # Ruby 1.8.7 (built with GCC 8.3.0 on Ubuntu 19.04) fails to perform + # DateTime comparisons correctly when sub-seconds are used. + to = DateTime.new(2011,10,1,1,0,1) + from = DateTime.new(2010,4,1,1,0,Rational(DATETIME_RESOLUTION,1000000)) + if to > from + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(to, from)) + else + puts "This platform does not consider the DateTime #{to.strftime('%Y-%m-%d %H:%m:%S.%N')} to be later than the DateTime #{from.strftime('%Y-%m-%d %H:%m:%S.%N')}" + end + + assert_equal([t5], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0), DateTime.new(2011,10,1,1,0,0))) + assert_equal([], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0), DateTime.new(2011,10,1,1,0,1))) + end + + def test_transitions_up_to_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i)) + assert_equal([], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0))) + end + + def test_transitions_up_to_utc_to_not_greater_than_utc_from + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + + assert_raises(ArgumentError) do + dti.transitions_up_to(Time.utc(2012,8,1,0,0,0), Time.utc(2013,8,1,0,0,0)) + end + + assert_raises(ArgumentError) do + dti.transitions_up_to(Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i) + end + + assert_raises(ArgumentError) do + dti.transitions_up_to(DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0)) + end + end + + def test_datetime_and_timestamp_use + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, 0, 0, :TESTS + dti.offset :o2, 0, 3600, :TESTD + + dti.transition 1901, 12, :o2, -2147483649, 69573092117, 28800 + dti.transition 1969, 12, :o1, -1, 210866759999, 86400 + dti.transition 2001, 9, :o2, 1000000000, 529666909, 216 + dti.transition 2038, 1, :o1, 2147483648, 3328347557, 1350 + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert(dti.period_for_utc(DateTime.new(1901,12,13,20,45,51)).start_transition.at.eql?(TimeOrDateTime.new(-2147483649))) + else + assert(dti.period_for_utc(DateTime.new(1901,12,13,20,45,51)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(1901,12,13,20,45,51)))) + end + + if RubyCoreSupport.time_supports_negative + assert(dti.period_for_utc(DateTime.new(1969,12,31,23,59,59)).start_transition.at.eql?(TimeOrDateTime.new(-1))) + else + assert(dti.period_for_utc(DateTime.new(1969,12,31,23,59,59)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(1969,12,31,23,59,59)))) + end + + assert(dti.period_for_utc(DateTime.new(2001,9,9,2,46,40)).start_transition.at.eql?(TimeOrDateTime.new(1000000000))) + + if RubyCoreSupport.time_supports_64bit + assert(dti.period_for_utc(DateTime.new(2038,1,19,3,14,8)).start_transition.at.eql?(TimeOrDateTime.new(2147483648))) + else + assert(dti.period_for_utc(DateTime.new(2038,1,19,3,14,8)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(2038,1,19,3,14,8)))) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb new file mode 100644 index 0000000..c180f0c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb @@ -0,0 +1,663 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTransitionRule < Minitest::Test + [-1, 0, 1].each do |transition_at| + define_method "test_transition_at_#{transition_at}" do + rule = TestTransitionRule.new(transition_at) + assert_equal(transition_at, rule.transition_at) + end + end + + [ + [-1, 19, 23, 59, 59], + [0, 20, 0, 0, 0], + [1, 20, 0, 0, 1], + [60, 20, 0, 1, 0], + [86399, 20, 23, 59, 59], + [86400, 21, 0, 0, 0] + ].each do |(transition_at, expected_day, expected_hour, expected_minute, expected_second)| + define_method "test_at_with_transition_at_#{transition_at}" do + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(transition_at) do |y| + assert_equal(2020, y) + TimeOrDateTime.wrap(Time.utc(2020, 3, 20)) + end + + result = rule.at(offset, 2020) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(2020, 3, expected_day, expected_hour, expected_minute, expected_second), result.to_time) + end + end + + [-7200, 0, 7200].each do |offset_seconds| + define_method "test_at_with_offset_#{offset_seconds}" do + offset = TimezoneOffset.new(offset_seconds, 0, 'TEST') + rule = TestTransitionRule.new(3600) do |y| + assert_equal(2020, y) + TimeOrDateTime.wrap(Time.utc(2020, 3, 20)) + end + + result = rule.at(offset, 2020) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(2020, 3, 20, 1, 0, 0) - offset_seconds, result.to_time) + end + end + + [2020, 2021].each do |year| + define_method "test_at_with_year_#{year}" do + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(3600) do |y| + assert_equal(year, y) + TimeOrDateTime.wrap(Time.utc(year, 3, 20)) + end + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, 3, 20, 1, 0, 0), result.to_time) + end + end + + def test_at_crosses_64_bit_boundary + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(11648) do |y| + assert_equal(2038, y) + TimeOrDateTime.wrap(Time.utc(2038, 1, 19)) + end + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), result.to_datetime) + end + + def test_at_crosses_negative_boundary + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(-1) do |y| + assert_equal(1970, y) + TimeOrDateTime.wrap(Time.utc(1970, 1, 1)) + end + + result = rule.at(offset, 1970) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_negative + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), result.to_datetime) + end + + NEGATIVE_DATES = [[1969, 12, 31]] + B32_DATES = [[1970, 1, 1], [2038, 1, 19]] + B64_DATES = [[2038, 1, 20], [2038, 2, 1], [2039, 1, 1]] + + B32_DATES.concat(RubyCoreSupport.time_supports_negative ? NEGATIVE_DATES : []).concat(RubyCoreSupport.time_supports_64bit ? B64_DATES : []).each do |(year, month, day)| + define_method "test_new_time_or_datetime_returns_time_based_instance_for_#{year}_#{month}_#{day}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, month, day) + assert_kind_of(TimeOrDateTime, result) + assert_kind_of(Time, result.to_orig) + assert_equal(Time.utc(year, month, day), result.to_orig) + end + end + + (RubyCoreSupport.time_supports_negative ? [] : NEGATIVE_DATES).concat(RubyCoreSupport.time_supports_64bit ? [] : B64_DATES).each do |(year, month, day)| + define_method "test_new_time_or_datetime_returns_datetime_based_instance_for_#{year}_#{month}_#{day}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, month, day) + assert_kind_of(TimeOrDateTime, result) + assert_kind_of(DateTime, result.to_orig) + assert_equal(DateTime.new(year, month, day), result.to_orig) + end + end + + [1900, 2021, 2039].each do |year| + define_method "test_new_time_or_datetime_returns_march_1_for_feb_29_on_non_leap_year_#{year}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, 2, 29) + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 3, 1), result.to_datetime) + end + end + + [1968, 2000, 2040].each do |year| + define_method "test_new_time_or_datetime_returns_feb_29_on_leap_year_#{year}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, 2, 29) + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 2, 29), result.to_datetime) + end + end + + class TestTransitionRule < TransitionRule + def initialize(transition_at, &block) + super(transition_at) + @get_day = block + end + + alias new_time_or_datetime! new_time_or_datetime + public :new_time_or_datetime! + + protected + + def get_day(year) + @get_day.call(year) + end + end +end + +module BaseTransitionRuleTestHelper + def test_invalid_transition_at + error = assert_raises(ArgumentError) { create_with_transition_at('0') } + assert_match(/\btransition_at(\b.*)?/, error.message) + end + + [:==, :eql?].each do |method| + define_method "test_not_equal_by_transition_at_with_#{method}" do + rule1 = create_with_transition_at(0) + rule2 = create_with_transition_at(1) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_other_with_#{method}" do + rule = create_with_transition_at(0) + assert_equal(false, rule.send(method, Object.new)) + end + end +end + +class TCAbsoluteDayOfYearTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + + [-1, 366, '0'].each do |value| + define_method "test_invalid_day_#{value}" do + error = assert_raises(ArgumentError) { AbsoluteDayOfYearTransitionRule.new(value) } + assert_match(/\bday\b/, error.message) + end + end + + [ + [2020, 0, 1, 1], + [2020, 58, 2, 28], + [2020, 59, 2, 29], + [2020, 365, 12, 31], + [2021, 59, 3, 1], + [2021, 365, 13, 1], + [2100, 59, 3, 1], + [2100, 365, 13, 1], + [2000, 59, 2, 29], + [2000, 365, 12, 31] + ].each do |(year, day, expected_month, expected_day)| + define_method "test_day_#{day}_of_year_#{year}" do + rule = AbsoluteDayOfYearTransitionRule.new(day, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + expected_year = year + if expected_month == 13 + expected_year += 1 + expected_month = 1 + end + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(expected_year, expected_month, expected_day, 1, 0, 0), result.to_datetime) + end + end + + def test_crosses_64_bit_boundary + # Internally the calculation starts with Jan 1 and adds days. Jan 20 2038 + # will require a DateTime on 32-bit only systems. + + rule = AbsoluteDayOfYearTransitionRule.new(19, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, 20, 1, 0, 0), result.to_datetime) + end + + def test_day_0_is_always_first_day_of_year + rule = AbsoluteDayOfYearTransitionRule.new(0) + assert_equal(true, rule.is_always_first_day_of_year?) + end + + [1, 365].each do |day| + define_method "test_day_#{day}_is_not_always_first_day_of_year" do + rule = AbsoluteDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_first_day_of_year?) + end + + define_method "test_day_#{day}_is_not_always_last_day_of_year" do + rule = AbsoluteDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_last_day_of_year?) + end + end + + [:==, :eql?].each do |method| + [ + [0, 0], + [0, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_equal_for_day_#{day}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + rule2 = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_day_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(0, 3600) + rule2 = AbsoluteDayOfYearTransitionRule.new(1, 3600) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_julian_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(1, 0) + rule2 = JulianDayOfYearTransitionRule.new(1, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [0, 0], + [0, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_hash_for_day_#{day}_and_transition_at_#{transition_at}" do + rule = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + expected = [AbsoluteDayOfYearTransitionRule, day * 86400, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + AbsoluteDayOfYearTransitionRule.new(1, transition_at) + end +end + +class TCJulianDayOfYearTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + + [0, 366, '1'].each do |value| + define_method "test_invalid_day_#{value}" do + error = assert_raises(ArgumentError) { JulianDayOfYearTransitionRule.new(value) } + assert_match(/\bday\b/, error.message) + end + end + + [2020, 2021, 2100, 2000].each do |year| + [ + [1, 1, 1], + [59, 2, 28], + [60, 3, 1], + [365, 12, 31] + ].each do |(day, expected_month, expected_day)| + define_method "test_day_#{day}_of_year_#{year}" do + rule = JulianDayOfYearTransitionRule.new(day, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, expected_month, expected_day, 1, 0, 0), result.to_datetime) + end + end + end + + def test_day_1_is_always_first_day_of_year + rule = JulianDayOfYearTransitionRule.new(1) + assert_equal(true, rule.is_always_first_day_of_year?) + end + + [2, 365].each do |day| + define_method "test_day_#{day}_is_not_always_first_day_of_year" do + rule = JulianDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_first_day_of_year?) + end + end + + def test_day_365_is_always_last_day_of_year + rule = JulianDayOfYearTransitionRule.new(365) + assert_equal(true, rule.is_always_last_day_of_year?) + end + + [1, 364].each do |day| + define_method "test_day_#{day}_is_not_always_last_day_of_year" do + rule = JulianDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_last_day_of_year?) + end + end + + [:==, :eql?].each do |method| + [ + [1, 0], + [1, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_equal_for_day_#{day}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(day, transition_at) + rule2 = JulianDayOfYearTransitionRule.new(day, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_day_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(1, 0) + rule2 = JulianDayOfYearTransitionRule.new(2, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_absolute_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(1, 0) + rule2 = AbsoluteDayOfYearTransitionRule.new(1, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 0], + [1, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_hash_for_day_#{day}_and_transition_at_#{transition_at}" do + rule = JulianDayOfYearTransitionRule.new(day, transition_at) + expected = [JulianDayOfYearTransitionRule, day * 86400, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + JulianDayOfYearTransitionRule.new(1, transition_at) + end +end + +module DayOfWeekTransitionRuleTestHelper + [-1, 0, 13, '1'].each do |month| + define_method "test_invalid_month_#{month}" do + error = assert_raises(ArgumentError) { create_with_month_and_day_of_week(month, 0) } + assert_match(/\bmonth\b/, error.message) + end + end + + [-1, 7, '0'].each do |day_of_week| + define_method "test_invalid_day_of_week_#{day_of_week}" do + error = assert_raises(ArgumentError) { create_with_month_and_day_of_week(1, day_of_week) } + assert_match(/\bday_of_week\b/, error.message) + end + end + + def test_is_not_always_first_day_of_year + rule = create_with_month_and_day_of_week(1, 0) + assert_equal(false, rule.is_always_first_day_of_year?) + end + + def test_is_not_always_last_day_of_year + rule = create_with_month_and_day_of_week(12, 6) + assert_equal(false, rule.is_always_last_day_of_year?) + end + + [:==, :eql?].each do |method| + define_method "test_not_equal_by_month_with_#{method}" do + rule1 = create_with_month_and_day_of_week(1, 0) + rule2 = create_with_month_and_day_of_week(2, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_by_day_of_week_with_#{method}" do + rule1 = create_with_month_and_day_of_week(1, 0) + rule2 = create_with_month_and_day_of_week(1, 1) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end +end + +class TCDayOfMonthTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + include DayOfWeekTransitionRuleTestHelper + + [-1, 0, 5, '1'].each do |week| + define_method "test_invalid_week_#{week}" do + error = assert_raises(ArgumentError) { DayOfMonthTransitionRule.new(1, week, 0) } + assert_match(/\bweek\b/, error.message) + end + end + + [ + # All possible first week start days. + [2020, 3, [1, 2, 3, 4, 5, 6, 7]], + [2021, 3, [7, 1, 2, 3, 4, 5, 6]], + [2022, 3, [6, 7, 1, 2, 3, 4, 5]], + [2023, 3, [5, 6, 7, 1, 2, 3, 4]], + [2018, 3, [4, 5, 6, 7, 1, 2, 3]], + [2024, 3, [3, 4, 5, 6, 7, 1, 2]], + [2025, 3, [2, 3, 4, 5, 6, 7, 1]], + + # All possible months. + [2019, 1, [6]], + [2019, 2, [3]], + [2019, 3, [3]], + [2019, 4, [7]], + [2019, 5, [5]], + [2019, 6, [2]], + [2019, 7, [7]], + [2019, 8, [4]], + [2019, 9, [1]], + [2019, 10, [6]], + [2019, 11, [3]], + [2019, 12, [1]] + ].each do |(year, month, days)| + days.each_with_index do |expected_day, day_of_week| + (1..4).each do |week| + define_method "test_month_#{month}_week_#{week}_and_day_of_week_#{day_of_week}_year_#{year}" do + rule = DayOfMonthTransitionRule.new(month, week, day_of_week, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, month, expected_day + (week - 1) * 7, 1, 0, 0), result.to_time) + end + end + end + end + + [[3, 3, 20], [4, 6, 23]].each do |(week, day_of_week, expected_day)| + define_method "test_crosses_64_bit_boundary_for_day_of_week_#{day_of_week}" do + rule = DayOfMonthTransitionRule.new(1, week, day_of_week, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, expected_day, 1, 0, 0), result.to_datetime) + end + end + + [:==, :eql?].each do |method| + [ + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 0], + [1, 2, 0, 0], + [2, 1, 0, 0] + ].each do |(month, week, day_of_week, transition_at)| + define_method "test_equal_for_month_#{month}_week_#{week}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + rule2 = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_week_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + rule2 = DayOfMonthTransitionRule.new(1, 2, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_last_day_of_month_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + rule2 = LastDayOfMonthTransitionRule.new(1, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 0], + [1, 2, 0, 0], + [2, 1, 0, 0] + ].each do |(month, week, day_of_week, transition_at)| + define_method "test_hash_for_month_#{month}_week_#{week}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}" do + rule = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + expected = [(week - 1) * 7 + 1, month, day_of_week, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + DayOfMonthTransitionRule.new(1, 1, 0, transition_at) + end + + def create_with_month_and_day_of_week(month, day_of_week) + DayOfMonthTransitionRule.new(month, 1, day_of_week) + end +end + +class TCLastDayOfMonthTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + include DayOfWeekTransitionRuleTestHelper + + [ + # All possible last days. + [2021, 10, [31, 25, 26, 27, 28, 29, 30]], + [2022, 10, [30, 31, 25, 26, 27, 28, 29]], + [2023, 10, [29, 30, 31, 25, 26, 27, 28]], + [2018, 10, [28, 29, 30, 31, 25, 26, 27]], + [2024, 10, [27, 28, 29, 30, 31, 25, 26]], + [2025, 10, [26, 27, 28, 29, 30, 31, 25]], + [2026, 10, [25, 26, 27, 28, 29, 30, 31]], + + # All possible months. + [2020, 1, [26]], + [2020, 2, [23]], + [2020, 3, [29]], + [2020, 4, [26]], + [2020, 5, [31]], + [2020, 6, [28]], + [2020, 7, [26]], + [2020, 8, [30]], + [2020, 9, [27]], + [2020, 10, [25]], + [2020, 11, [29]], + [2020, 12, [27]] + ].each do |(year, month, days)| + days.each_with_index do |expected_day, day_of_week| + define_method "test_month_#{month}_day_of_week_#{day_of_week}_year_#{year}" do + rule = LastDayOfMonthTransitionRule.new(month, day_of_week, 7200) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, month, expected_day, 2, 0, 0), result.to_time) + end + end + end + + [[2020, 6, 29], [2021, 0, 28], [2000, 2, 29], [2100, 0, 28]].each do |(year, day_of_week, expected_day)| + define_method "test_#{expected_day == 29 ? '' : 'non_'}leap_year_#{year}" do + rule = LastDayOfMonthTransitionRule.new(2, day_of_week, 7200) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 2, expected_day, 2, 0, 0), result.to_datetime) + end + end + + [:==, :eql?].each do |method| + [ + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [2, 0, 0] + ].each do |(month, day_of_week, transition_at)| + define_method "test_equal_for_month_#{month}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + rule2 = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_to_day_of_month_with_#{method}" do + rule1 = LastDayOfMonthTransitionRule.new(1, 0, 0) + rule2 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [2, 0, 0] + ].each do |(month, day_of_week, transition_at)| + define_method "test_hash_for_month_#{month}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}" do + rule = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + expected = [month, day_of_week, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + LastDayOfMonthTransitionRule.new(1, 0, transition_at) + end + + def create_with_month_and_day_of_week(month, day_of_week) + LastDayOfMonthTransitionRule.new(month, day_of_week) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb new file mode 100644 index 0000000..f54e9b1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb @@ -0,0 +1,78 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCZoneinfoCountryInfo < Minitest::Test + + def test_code + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert_equal('Zzz', ci.name) + end + + def test_zone_identifiers_empty + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + assert(ci.zone_identifiers.frozen?) + assert(!ci.zones.equal?(zones)) + assert(!zones.frozen?) + end + + def test_zone_identifiers_after_freeze + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + ci.freeze + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + end + + def test_zones_empty + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + + assert_equal([CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7))], + ci.zones) + assert(ci.zones.frozen?) + assert(!ci.zones.equal?(zones)) + assert(!zones.frozen?) + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb new file mode 100644 index 0000000..fe517cb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb @@ -0,0 +1,1204 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'fileutils' +require 'pathname' +require 'tmpdir' + +include TZInfo + +# Use send as a workaround for an issue on JRuby 9.2.9.0 where using the +# refinement causes calls to RubyCoreSupport.file_open to fail to pass the block +# parameter. +# +# https://travis-ci.org/tzinfo/tzinfo/jobs/628812051#L1931 +# https://github.com/jruby/jruby/issues/6009 +send(:using, TZInfo::RubyCoreSupport::UntaintExt) if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCZoneinfoDataSource < Minitest::Test + ZONEINFO_DIR = File.join(File.expand_path(File.dirname(__FILE__)), 'zoneinfo').untaint + + def setup + @orig_search_path = ZoneinfoDataSource.search_path.clone + @orig_alternate_iso3166_tab_search_path = ZoneinfoDataSource.alternate_iso3166_tab_search_path.clone + @orig_pwd = FileUtils.pwd + + # A zoneinfo directory containing files needed by the tests. + # The symlinks in this directory are set up in test_utils.rb. + @data_source = ZoneinfoDataSource.new(ZONEINFO_DIR) + end + + def teardown + ZoneinfoDataSource.search_path = @orig_search_path + ZoneinfoDataSource.alternate_iso3166_tab_search_path = @orig_alternate_iso3166_tab_search_path + FileUtils.chdir(@orig_pwd) + end + + def test_default_search_path + assert_equal(['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'], ZoneinfoDataSource.search_path) + assert_equal(false, ZoneinfoDataSource.search_path.frozen?) + end + + def test_set_search_path_default + ZoneinfoDataSource.search_path = ['/tmp/zoneinfo1', '/tmp/zoneinfo2'] + assert_equal(['/tmp/zoneinfo1', '/tmp/zoneinfo2'], ZoneinfoDataSource.search_path) + + ZoneinfoDataSource.search_path = nil + assert_equal(['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'], ZoneinfoDataSource.search_path) + assert_equal(false, ZoneinfoDataSource.search_path.frozen?) + end + + def test_set_search_path_array + path = ['/tmp/zoneinfo1', '/tmp/zoneinfo2'] + ZoneinfoDataSource.search_path = path + assert_equal(['/tmp/zoneinfo1', '/tmp/zoneinfo2'], ZoneinfoDataSource.search_path) + refute_same(path, ZoneinfoDataSource.search_path) + end + + def test_set_search_path_array_to_s + ZoneinfoDataSource.search_path = [Pathname.new('/tmp/zoneinfo3')] + assert_equal(['/tmp/zoneinfo3'], ZoneinfoDataSource.search_path) + end + + def test_set_search_path_string + ZoneinfoDataSource.search_path = ['/tmp/zoneinfo4', '/tmp/zoneinfo5'].join(File::PATH_SEPARATOR) + assert_equal(['/tmp/zoneinfo4', '/tmp/zoneinfo5'], ZoneinfoDataSource.search_path) + end + + def test_default_alternate_iso3166_tab_search_path + assert_equal(['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + assert_equal(false, ZoneinfoDataSource.alternate_iso3166_tab_search_path.frozen?) + end + + def test_set_alternate_iso3166_tab_search_path_default + ZoneinfoDataSource.alternate_iso3166_tab_search_path = ['/tmp/iso3166.tab', '/tmp/iso3166'] + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + + ZoneinfoDataSource.alternate_iso3166_tab_search_path = nil + assert_equal(['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + assert_equal(false, ZoneinfoDataSource.alternate_iso3166_tab_search_path.frozen?) + end + + def test_set_alternate_iso3166_tab_search_path_array + path = ['/tmp/iso3166.tab', '/tmp/iso3166'] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = path + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + refute_same(path, ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_set_alternate_iso3166_tab_search_path_array_to_s + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [Pathname.new('/tmp/iso3166.tab')] + assert_equal(['/tmp/iso3166.tab'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_set_alternate_iso3166_tab_search_path_string + ZoneinfoDataSource.alternate_iso3166_tab_search_path = ['/tmp/iso3166.tab', '/tmp/iso3166'].join(File::PATH_SEPARATOR) + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_new_search + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + file = File.join(dir1, 'file') + FileUtils.touch(File.join(dir2, 'zone.tab')) + FileUtils.touch(File.join(dir3, 'iso3166.tab')) + FileUtils.touch(File.join(dir4, 'zone.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir4, data_source.zoneinfo_dir) + end + end + end + end + end + + def test_new_search_zone1970 + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + file = File.join(dir1, 'file') + FileUtils.touch(File.join(dir2, 'zone1970.tab')) + FileUtils.touch(File.join(dir3, 'iso3166.tab')) + FileUtils.touch(File.join(dir4, 'zone1970.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir4, data_source.zoneinfo_dir) + end + end + end + end + end + + def test_new_search_solaris_tab_files + # Solaris names the tab files 'tab/country.tab' (iso3166.tab) and + # 'tab/zone_sun.tab' (zone.tab). + + Dir.mktmpdir('tzinfo_test_dir') do |dir| + tab = File.join(dir, 'tab') + FileUtils.mkdir(tab) + FileUtils.touch(File.join(tab, 'country.tab')) + FileUtils.touch(File.join(tab, 'zone_sun.tab')) + + ZoneinfoDataSource.search_path = [dir] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_search_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + tab_file = File.join(tab_dir, 'iso3166') + + ZoneinfoDataSource.search_path = [zoneinfo_dir] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [tab_file] + + assert_raises(ZoneinfoDirectoryNotFound) do + ZoneinfoDataSource.new + end + + FileUtils.touch(tab_file) + + data_source = ZoneinfoDataSource.new + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + end + + def test_new_search_not_found + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + Dir.mktmpdir('tzinfo_test_dir5') do |dir5| + file = File.join(dir1, 'file') + FileUtils.touch(file) + FileUtils.touch(File.join(dir2, 'zone.tab')) + FileUtils.touch(File.join(dir3, 'zone1970.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4, dir5] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + assert_raises(ZoneinfoDirectoryNotFound) do + ZoneinfoDataSource.new + end + end + end + end + end + end + end + + def test_new_search_relative + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.chdir(dir) + + ZoneinfoDataSource.search_path = ['.'] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + data_source = ZoneinfoDataSource.new + assert_equal(Pathname.new(dir).realpath.to_s, data_source.zoneinfo_dir) + + # Change out of the directory to allow it to be deleted on Windows. + FileUtils.chdir(@orig_pwd) + end + end + + def test_new_dir + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_zone1970 + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone1970.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_solaris_tab_files + # Solaris names the tab files 'tab/country.tab' (iso3166.tab) and + # 'tab/zone_sun.tab' (zone.tab). + + Dir.mktmpdir('tzinfo_test') do |dir| + tab = File.join(dir, 'tab') + FileUtils.mkdir(tab) + FileUtils.touch(File.join(tab, 'country.tab')) + FileUtils.touch(File.join(tab, 'zone_sun.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + tab_file = File.join(tab_dir, 'iso3166') + FileUtils.touch(tab_file) + + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [tab_file] + + assert_raises(InvalidZoneinfoDirectory) do + # The alternate_iso3166_tab_search_path should not be used. This should raise + # an exception. + ZoneinfoDataSource.new(zoneinfo_dir) + end + + data_source = ZoneinfoDataSource.new(zoneinfo_dir, tab_file) + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + end + + def test_new_dir_invalid + Dir.mktmpdir('tzinfo_test') do |dir| + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(dir) + end + end + end + + def test_new_dir_invalid_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(zoneinfo_dir, File.join(tab_dir, 'iso3166')) + end + end + end + end + + def test_new_dir_invalid_alternate_iso3166_path_overrides_valid + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'iso3166.tab')) + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(zoneinfo_dir, File.join(tab_dir, 'iso3166')) + end + end + end + end + + def test_new_file + Dir.mktmpdir('tzinfo_test') do |dir| + file = File.join(dir, 'file') + FileUtils.touch(file) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(file) + end + end + end + + def test_new_dir_relative + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.chdir(dir) + + data_source = ZoneinfoDataSource.new('.') + assert_equal(Pathname.new(dir).realpath.to_s, data_source.zoneinfo_dir) + + # Change out of the directory to allow it to be deleted on Windows. + FileUtils.chdir(@orig_pwd) + end + end + + def test_zoneinfo_dir + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + assert_equal(true, data_source.zoneinfo_dir.frozen?) + end + end + + def test_load_timezone_info_data + info = @data_source.load_timezone_info('Europe/London') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def test_load_timezone_info_linked + info = @data_source.load_timezone_info('UTC') + + # On platforms that don't support symlinks, 'UTC' will be created as a copy. + # Either way, a ZoneinfoTimezoneInfo should be returned. + + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('UTC', info.identifier) + end + + def test_load_timezone_info_does_not_exist + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('Nowhere/Special') + end + end + + def test_load_timezone_info_invalid + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('../Definitions/Europe/London') + end + end + + %w(leapseconds localtime).each do |file_name| + define_method("test_load_timezone_info_ignored_#{file_name}_file") do + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(file_name) + end + end + end + + def test_load_timezone_info_ignored_plus_version_file + # Mac OS X includes a file named +VERSION containing the tzdata version. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + File.open(File.join(dir, '+VERSION'), 'w') do |f| + f.binmode + f.write("2013a\n") + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('+VERSION') + end + end + end + + def test_load_timezone_info_ignored_timeconfig_symlink + # Slackware includes a symlink named timeconfig that points at /usr/sbin/timeconfig. + + Dir.mktmpdir('tzinfo_test_target') do |target_dir| + target_path = File.join(target_dir, 'timeconfig') + + File.open(target_path, 'w') do |f| + f.write("#!/bin/sh\n") + f.write("#\n") + f.write('# timeconfig Slackware Linux timezone configuration utility.\n') + end + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + symlink_path = File.join(dir, 'timeconfig') + begin + FileUtils.ln_s(target_path, symlink_path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + FileUtils.cp(target_path, symlink_path) + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('timeconfig') + end + end + end + end + + def test_load_timezone_info_nil + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(nil) + end + end + + def test_load_timezone_info_case + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('europe/london') + end + end + + def test_load_timezone_info_permission_denied + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + file = File.join(dir, 'UTC') + FileUtils.touch(file) + FileUtils.chmod(0200, file) + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('UTC') + end + end + end + + def test_load_timezone_info_directory + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + subdir = File.join(dir, 'Subdir') + FileUtils.mkdir(subdir) + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Subdir') + end + end + end + + def test_load_timezone_info_linked_absolute_outside + Dir.mktmpdir('tzinfo_test') do |dir| + Dir.mktmpdir('tzinfo_test') do |outside| + outside_file = File.join(outside, 'EST') + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), outside_file) + + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + file = File.join(dir, 'EST') + + begin + FileUtils.ln_s(outside_file, file) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('EST') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('EST', info.identifier) + end + end + end + + def test_load_timezone_info_linked_absolute_inside + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + link = File.join(dir, 'Link') + + begin + FileUtils.ln_s(File.join(File.expand_path(dir), 'EST'), link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Link', info.identifier) + end + end + + def test_load_timezone_info_linked_relative_outside + Dir.mktmpdir('tzinfo_test') do |root| + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(root, 'outside')) + + dir = File.join(root, 'zoneinfo') + FileUtils.mkdir(dir) + + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + link = File.join(dir, 'Link') + + begin + FileUtils.ln_s('../outside', link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + subdir = File.join(dir, 'Subdir') + subdir_link = File.join(subdir, 'Link') + FileUtils.mkdir(subdir) + FileUtils.ln_s('../../outside', subdir_link) + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Link', info.identifier) + + info = data_source.load_timezone_info('Subdir/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link', info.identifier) + end + end + + def test_load_timezone_info_linked_relative_parent_inside + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + subdir = File.join(dir, 'Subdir') + FileUtils.mkdir(subdir) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(subdir, 'EST')) + + subdir_link = File.join(subdir, 'Link') + begin + FileUtils.ln_s('../Subdir/EST', subdir_link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + subdir_link2 = File.join(subdir, 'Link2') + FileUtils.ln_s('../EST', subdir_link2) + + subdir2 = File.join(dir, 'Subdir2') + FileUtils.mkdir(subdir2) + subdir2_link = File.join(subdir2, 'Link') + FileUtils.ln_s('../Subdir/EST', subdir2_link) + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Subdir/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link', info.identifier) + + info = data_source.load_timezone_info('Subdir/Link2') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link2', info.identifier) + + info = data_source.load_timezone_info('Subdir2/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir2/Link', info.identifier) + end + end + + def test_load_timezone_info_invalid_file + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + File.open(File.join(dir, 'Zone'), 'wb') do |file| + file.write('NotAValidTZifFile') + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Zone') + end + end + end + + def test_load_timezone_info_invalid_file_2 + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + zone = File.join(dir, 'Zone') + + File.open(File.join(@data_source.zoneinfo_dir, 'EST')) do |src| + # Change header to TZif1 (which is not a valid header). + File.open(zone, 'wb') do |dest| + dest.write('TZif1') + src.pos = 5 + FileUtils.copy_stream(src, dest) + end + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Zone') + end + end + end + + def test_load_timezone_info_tainted + safe_test(:unavailable => :skip) do + identifier = 'Europe/Amsterdam'.dup.taint + assert(identifier.tainted?) + info = @data_source.load_timezone_info(identifier) + assert_equal('Europe/Amsterdam', info.identifier) + assert(identifier.tainted?) + end + end + + def test_load_timezone_info_tainted_and_frozen + safe_test do + info = @data_source.load_timezone_info('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', info.identifier) + end + end + + def test_load_timezone_info_tainted_zoneinfo_dir_safe_mode + safe_test(:unavailable => :skip) do + assert_raises(SecurityError) do + ZoneinfoDataSource.new(@data_source.zoneinfo_dir.dup.taint) + end + end + end + + def test_load_timezone_info_tainted_zoneinfo_dir + data_source = ZoneinfoDataSource.new(@data_source.zoneinfo_dir.dup.taint) + info = data_source.load_timezone_info('Europe/London') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def get_timezone_filenames(directory) + entries = Dir.glob(File.join(directory, '**', '*')) + + entries = entries.select do |file| + file.untaint + File.file?(file) + end + + entries = entries.collect {|file| file[directory.length + File::SEPARATOR.length, file.length - directory.length - File::SEPARATOR.length]} + + # Exclude right (with leapseconds) and posix (copy) directories; .tab files; leapseconds, localtime and posixrules files. + entries = entries.select do |file| + file !~ /\A(posix|right)\// && + file !~ /\.tab\z/ && + !%w(leapseconds localtime posixrules).include?(file) + end + + entries.sort + end + + def test_timezone_identifiers + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all = @data_source.timezone_identifiers + assert_kind_of(Array, all) + assert_array_same_items(expected, all) + assert_equal(true, all.frozen?) + end + + def test_data_timezone_identifiers + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all_data = @data_source.data_timezone_identifiers + assert_kind_of(Array, all_data) + assert_array_same_items(expected, all_data) + assert_equal(true, all_data.frozen?) + end + + def test_linked_timezone_identifiers + all_linked = @data_source.linked_timezone_identifiers + assert_kind_of(Array, all_linked) + assert_equal(true, all_linked.empty?) + assert_equal(true, all_linked.frozen?) + end + + def test_timezone_identifiers_safe_mode + safe_test do + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all = @data_source.timezone_identifiers + assert_kind_of(Array, all) + assert_array_same_items(expected, all) + assert_equal(true, all.frozen?) + end + end + + def test_timezone_identifiers_ignored_plus_version_file + # Mac OS X includes a file named +VERSION containing the tzdata version. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + File.open(File.join(dir, '+VERSION'), 'w') do |f| + f.binmode + f.write("2013a\n") + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + + def test_timezone_identifiers_ignored_timeconfig_symlink + # Slackware includes a symlink named timeconfig that points at /usr/sbin/timeconfig. + + Dir.mktmpdir('tzinfo_test_target') do |target_dir| + target_path = File.join(target_dir, 'timeconfig') + + File.open(target_path, 'w') do |f| + f.write("#!/bin/sh\n") + f.write("#\n") + f.write('# timeconfig Slackware Linux timezone configuration utility.\n') + end + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + symlink_path = File.join(dir, 'timeconfig') + begin + FileUtils.ln_s(target_path, symlink_path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + FileUtils.cp(target_path, symlink_path) + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + end + + def test_timezone_identifiers_ignored_src_directory + # Solaris includes a src directory containing the source timezone data files + # from the tzdata distribution. These should be ignored. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + src_dir = File.join(dir, 'src') + FileUtils.mkdir(src_dir) + + File.open(File.join(src_dir, 'europe'), 'w') do |f| + f.binmode + f.write("Zone\tEurope/London\t0:00\tEU\tGMT/BST\n") + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + + def test_load_country_info + info = @data_source.load_country_info('GB') + assert_equal('GB', info.code) + assert_equal('Britain (UK)', info.name) + end + + def test_load_country_info_not_exist + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('ZZ') + end + end + + def test_load_country_info_invalid + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('../Countries/GB') + end + end + + def test_load_country_info_nil + assert_raises(InvalidCountryCode) do + @data_source.load_country_info(nil) + end + end + + def test_load_country_info_case + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('gb') + end + end + + def test_load_country_info_tainted + safe_test(:unavailable => :skip) do + code = 'NL'.dup.taint + assert(code.tainted?) + info = @data_source.load_country_info(code) + assert_equal('NL', info.code) + assert(code.tainted?) + end + end + + def test_load_country_info_tainted_and_frozen + safe_test do + info = @data_source.load_country_info('NL'.dup.taint.freeze) + assert_equal('NL', info.code) + end + end + + def test_load_country_info_check_zones + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts('# iso3166.tab') + iso3166.puts('') + iso3166.puts("FC\tFake Country") + iso3166.puts("OC\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone.tab') + zone.puts('') + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_zone1970 + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts('# iso3166.tab') + iso3166.puts('') + iso3166.puts("AC\tAnother Country") + iso3166.puts("FC\tFake Country") + iso3166.puts("OC\tOther Country") + end + + # zone.tab will be ignored. + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone.tab') + zone.puts('') + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + # zone1970.tab will be used. + RubyCoreSupport.open_file(File.join(dir, 'zone1970.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone1970.tab') + zone.puts('') + zone.puts("AC,OC\t+0000+00000\tMiddle/Another/One\tAnother's One") + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC,OC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC,OC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + zone.puts("OC\t+5015+11426\tOther/Two") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('AC') + assert_equal('AC', info.code) + assert_equal('Another Country', info.name) + assert_equal(['Middle/Another/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Middle/Another/One', Rational(0, 1), Rational(0, 1), "Another's One")], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + # Testing the ordering of zones. A zone can either be primary (country + # code is the first in the first column), or secondary (country code is + # not the first). Should return all the primaries first in the order they + # appeared in the file, followed by all the secondaries in the order they + # appeared in file. + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One', 'Other/Two', 'Middle/Another/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Other/One', Rational(601, 12), Rational( 433, 30)), + CountryTimezone.new('Other/Two', Rational(201, 4), Rational(3433, 30)), + CountryTimezone.new('Middle/Another/One', Rational(0, 1), Rational(0, 1), "Another's One"), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_solaris_tab_files + # Solaris uses 5 columns instead of the usual 4 in zone_sun.tab. + # An extra column before the comment gives an optional linked/alternate + # timezone identifier (or '-' if not set). + # + # Additionally, there is a section at the end of the file for timezones + # covering regions. These are given lower-case "country" codes. The timezone + # identifier column refers to a continent instead of an identifier. These + # lines will be ignored by TZInfo. + + Dir.mktmpdir('tzinfo_test') do |dir| + tab_dir = File.join(dir, 'tab') + FileUtils.mkdir(tab_dir) + + RubyCoreSupport.open_file(File.join(tab_dir, 'country.tab'), 'w', :external_encoding => 'UTF-8') do |country| + country.puts('# country.tab') + country.puts('# Solaris') + country.puts("FC\tFake Country") + country.puts("OC\tOther Country") + end + + RubyCoreSupport.open_file(File.join(tab_dir, 'zone_sun.tab'), 'w', :external_encoding => 'UTF-8') do |zone_sun| + zone_sun.puts('# zone_sun.tab') + zone_sun.puts('# Solaris') + zone_sun.puts('# Countries') + zone_sun.puts("FC\t+513030-0000731\tFake/One\t-\tDescription of one") + zone_sun.puts("FC\t+353916+1394441\tFake/Two\tFake/Alias/Two\tAnother description") + zone_sun.puts("FC\t-2332-04637\tFake/Three\tFake/Alias/Three\tThis is Three") + zone_sun.puts("OC\t+5005+01426\tOther/One\tOther/Linked/One") + zone_sun.puts("OC\t+5015+01436\tOther/Two\t-") + zone_sun.puts('# Regions') + zone_sun.puts("ee\t+0000+00000\tEurope/\tEET") + zone_sun.puts("me\t+0000+00000\tEurope/\tMET") + zone_sun.puts("we\t+0000+00000\tEurope/\tWET") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One', 'Other/Two'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30)), + CountryTimezone.new('Other/Two', Rational(201, 4), Rational(73, 5))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_alternate_iso3166_file + Dir.mktmpdir('tzinfo_test') do |dir| + zoneinfo_dir = File.join(dir, 'zoneinfo') + tab_dir = File.join(dir, 'tab') + FileUtils.mkdir(zoneinfo_dir) + FileUtils.mkdir(tab_dir) + + tab_file = File.join(tab_dir, 'iso3166') + RubyCoreSupport.open_file(tab_file, 'w', :external_encoding => 'UTF-8') do |iso3166| + # Use the BSD 4 column format (alternate iso3166 is used on BSD). + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(zoneinfo_dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(zoneinfo_dir, tab_file) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_four_column_iso31611 + # OpenBSD and FreeBSD use a 4 column iso3166.tab file that includes + # alpha-3 and numeric-3 codes in addition to the alpha-2 and name in the + # tz database version. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + end + end + + def test_load_country_info_utf8 + # iso3166.tab is currently in ASCII (as of tzdata 2014f), but will be + # changed to UTF-8 in the future. + + # zone.tab is in ASCII, with no plans to change. Since ASCII is a subset of + # UTF-8, test that this is loaded in UTF-8 anyway. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("UT\tUnicode Test ✓") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("UT\t+513030-0000731\tUnicode✓/One\tUnicode Description ✓") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('UT') + assert_equal('UT', info.code) + assert_equal('Unicode Test ✓', info.name) + assert_equal(['Unicode✓/One'], info.zone_identifiers) + assert_equal([CountryTimezone.new('Unicode✓/One', Rational(6181, 120), Rational(-451, 3600), 'Unicode Description ✓')], info.zones) + end + end + + def test_load_country_info_utf8_zone1970 + # iso3166.tab is currently in ASCII (as of tzdata 2014f), but will be + # changed to UTF-8 in the future. + + # zone1970.tab is in UTF-8. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("UT\tUnicode Test ✓") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone1970.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("UT\t+513030-0000731\tUnicode✓/One\tUnicode Description ✓") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('UT') + assert_equal('UT', info.code) + assert_equal('Unicode Test ✓', info.name) + assert_equal(['Unicode✓/One'], info.zone_identifiers) + assert_equal([CountryTimezone.new('Unicode✓/One', Rational(6181, 120), Rational(-451, 3600), 'Unicode Description ✓')], info.zones) + end + end + + def test_country_codes + file_codes = [] + + RubyCoreSupport.open_file(File.join(@data_source.zoneinfo_dir, 'iso3166.tab'), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + file_codes << $1 if line =~ /\A([A-Z]{2})\t/ + end + end + + codes = @data_source.country_codes + assert_array_same_items(file_codes, codes) + assert_equal(true, codes.frozen?) + end + + def test_country_codes_four_column_iso3166 + # OpenBSD and FreeBSD use a 4 column iso3166.tab file that includes + # alpha-3 and numeric-3 codes in addition to the alpha-2 and name in the + # tz database version. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + codes = data_source.country_codes + assert_array_same_items(%w(FC OC), codes) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb new file mode 100644 index 0000000..c8c47a0 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb @@ -0,0 +1,2075 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'tempfile' + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + +class TCZoneinfoTimezoneInfo < Minitest::Test + class FakePosixTimeZoneParser + def initialize(&block) + @on_parse = block + end + + def parse(tz_string) + @on_parse.call(tz_string) + end + end + + begin + Time.at(-2147483649) + Time.at(2147483648) + SUPPORTS_64BIT = true + rescue RangeError + SUPPORTS_64BIT = false + end + + begin + Time.at(-1) + Time.at(-2147483648) + SUPPORTS_NEGATIVE = true + rescue ArgumentError + SUPPORTS_NEGATIVE = false + end + + def setup + @expect_tz_string = nil + @tz_parse_result = nil + @posix_tz_parser = FakePosixTimeZoneParser.new do |tz_string| + raise "Unexpected tz_string passed to PosixTimeZoneParser: #{tz_string}" unless tz_string == @expect_tz_string + raise InvalidPosixTimeZone, 'FakePosixTimeZoneParser Failure.' if @tz_parse_result == :fail + @tz_parse_result + end + end + + def assert_period(abbreviation, utc_offset, std_offset, dst, start_at, end_at, info) + if start_at + period = info.period_for_utc(start_at) + elsif end_at + period = info.period_for_utc(TimeOrDateTime.wrap(end_at).add_with_convert(-1).to_orig) + else + # no transitions, pick the epoch + period = info.period_for_utc(Time.utc(1970, 1, 1)) + end + + assert_equal(abbreviation, period.abbreviation, 'abbreviation') + assert_equal(utc_offset, period.utc_offset, 'utc_offset') + assert_equal(std_offset, period.std_offset, 'std_offset') + assert_equal(dst, period.dst?, 'dst') + + if start_at + refute_nil(period.utc_start_time, 'utc_start_time') + assert_equal(start_at, period.utc_start_time, 'utc_start_time') + else + assert_nil(period.utc_start_time, 'utc_start_time') + end + + if end_at + refute_nil(period.utc_end_time, 'utc_end_time') + assert_equal(end_at, period.utc_end_time, 'utc_end_time') + else + assert_nil(period.utc_end_time, 'utc_end_time') + end + end + + def convert_times_to_i(items, key = :at) + items.each do |item| + if item[key].kind_of?(Time) + item[key] = item[key].utc.to_i + end + end + end + + def select_with_32bit_values(items, key = :at) + items.select do |item| + i = item[key] + i >= -2147483648 && i <= 2147483647 + end + end + + def pack_int64_network_order(values) + values.collect {|value| [value >> 32, value & 0xFFFFFFFF]}.flatten.pack('NN' * values.length) + end + + def pack_int64_signed_network_order(values) + # Convert to the equivalent 64-bit unsigned integer with the same bit representation + pack_int64_network_order(values.collect {|value| value < 0 ? value + 0x10000000000000000 : value}) + end + + def write_tzif(format, offsets, transitions, tz_string, leaps, options = {}) + + # Options for testing malformed zoneinfo files. + magic = options[:magic] + section2_magic = options[:section2_magic] + abbrev_separator = options[:abbrev_separator] || "\0" + abbrev_offset_base = options[:abbrev_offset_base] || 0 + omit_tz_string_start_new_line = options[:omit_tz_string_start_new_line] + omit_tz_string_end_new_line = options[:omit_tz_string_end_new_line] + + unless magic + if format == 1 + magic = "TZif\0" + elsif format >= 2 + magic = "TZif#{format}" + else + raise ArgumentError, 'Invalid format specified' + end + end + + if section2_magic.kind_of?(Proc) + section2_magic = section2_magic.call(format) + else + section2_magic = magic unless section2_magic + end + + convert_times_to_i(transitions) + convert_times_to_i(leaps) + + abbrevs = offsets.collect {|o| o[:abbrev]}.uniq + + if abbrevs.length > 0 + abbrevs = abbrevs.collect {|a| a.encode('UTF-8')} if abbrevs.first.respond_to?(:encode) + + if abbrevs.first.respond_to?(:bytesize) + abbrevs_length = abbrevs.inject(0) {|sum, a| sum + a.bytesize + abbrev_separator.bytesize} + else + abbrevs_length = abbrevs.inject(0) {|sum, a| sum + a.length + abbrev_separator.length} + end + else + abbrevs_length = 0 + end + + b32_transitions = select_with_32bit_values(transitions) + b32_leaps = select_with_32bit_values(leaps) + + Tempfile.open('tzinfo-test-zone') do |file| + file.binmode + + file.write( + [magic, offsets.length, offsets.length, leaps.length, + b32_transitions.length, offsets.length, abbrevs_length].pack('a5 x15 NNNNNN')) + + unless b32_transitions.empty? + file.write(b32_transitions.collect {|t| t[:at]}.pack('N' * b32_transitions.length)) + file.write(b32_transitions.collect {|t| t[:offset_index]}.pack('C' * b32_transitions.length)) + end + + offsets.each do |offset| + index = abbrevs.index(offset[:abbrev]) + abbrev_offset = abbrev_offset_base + 0.upto(index - 1) {|i| abbrev_offset += abbrevs[i].length + 1} + + file.write([offset[:gmtoff], offset[:isdst] ? 1 : 0, abbrev_offset].pack('NCC')) + end + + abbrevs.each do |a| + file.write(a) + file.write(abbrev_separator) + end + + b32_leaps.each do |leap| + file.write([leap[:at], leap[:seconds]].pack('NN')) + end + + unless offsets.empty? + file.write("\0" * offsets.length * 2) + end + + if format >= 2 + file.write( + [section2_magic, offsets.length, offsets.length, leaps.length, + transitions.length, offsets.length, abbrevs_length].pack('a5 x15 NNNNNN')) + + unless transitions.empty? + file.write(pack_int64_signed_network_order(transitions.collect {|t| t[:at]})) + file.write(transitions.collect {|t| t[:offset_index]}.pack('C' * transitions.length)) + end + + offsets.each do |offset| + index = abbrevs.index(offset[:abbrev]) + abbrev_offset = abbrev_offset_base + 0.upto(index - 1) {|i| abbrev_offset += abbrevs[i].length + 1} + + file.write([offset[:gmtoff], offset[:isdst] ? 1 : 0, abbrev_offset].pack('NCC')) + end + + abbrevs.each do |a| + file.write(a) + file.write(abbrev_separator) + end + + leaps.each do |leap| + file.write(pack_int64_signed_network_order([leap[:at]])) + file.write([leap[:seconds]].pack('N')) + end + + unless offsets.empty? + file.write("\0" * offsets.length * 2) + end + + # Empty POSIX timezone string + file.write("\n") unless omit_tz_string_start_new_line + tz_string = tz_string.encode(Encoding::UTF_8) if tz_string.respond_to?(:encode) + file.write(tz_string) + file.write("\n") unless omit_tz_string_end_new_line + end + + file.flush + + yield file.path + end + end + + def tzif_test(offsets, transitions, options = {}, &block) + rules = options[:rules] + tz_string = options[:tz_string] || (rules ? "TEST_TZ_STRING_#{rand(1000000)}" : '') + leaps = options[:leaps] || [] + min_format = options[:min_format] || (tz_string.empty? ? 1 : 2) + + min_format.upto(3) do |format| + write_tzif(format, offsets, transitions, tz_string, leaps, options) do |path| + if format >= 2 + @tz_parse_result = rules + @expect_tz_string = tz_string + end + begin + yield path, format + ensure + @tz_parse_result = nil + @expect_tz_string = nil + end + end + end + end + + def test_load + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1971, 1, 2), :offset_index => 1}, + {:at => Time.utc(1980, 4, 22), :offset_index => 2}, + {:at => Time.utc(1980, 10, 21), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1971, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1971, 1, 2), Time.utc(1980, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1980, 4, 22), Time.utc(1980, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(1980, 10, 21), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_negative_utc_offset + offsets = [ + {:gmtoff => -12492, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -12000, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => -8400, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => -8400, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1971, 7, 9, 3, 0, 0), :offset_index => 1}, + {:at => Time.utc(1972, 10, 12, 3, 0, 0), :offset_index => 2}, + {:at => Time.utc(1973, 4, 29, 3, 0, 0), :offset_index => 1}, + {:at => Time.utc(1992, 4, 1, 4, 30, 0), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, -12492, 0, false, nil, Time.utc(1971, 7, 9, 3, 0, 0), info) + assert_period(:XST, -12000, 0, false, Time.utc(1971, 7, 9, 3, 0, 0), Time.utc(1972, 10, 12, 3, 0, 0), info) + assert_period(:XDT, -12000, 3600, true, Time.utc(1972, 10, 12, 3, 0, 0), Time.utc(1973, 4, 29, 3, 0, 0), info) + assert_period(:XST, -12000, 0, false, Time.utc(1973, 4, 29, 3, 0, 0), Time.utc(1992, 4, 1, 4, 30, 0), info) + assert_period(:XNST, -8400, 0, false, Time.utc(1992, 4, 1, 4, 30, 0), nil, info) + end + end + + def test_load_dst_first + offsets = [ + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1979, 1, 2), :offset_index => 2}, + {:at => Time.utc(1980, 4, 22), :offset_index => 0}, + {:at => Time.utc(1980, 10, 21), :offset_index => 2}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Two', path, @posix_tz_parser) + assert_equal('Zone/Two', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1979, 1, 2), info) + end + end + + def test_load_no_transitions + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, []) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/three', path, @posix_tz_parser) + assert_equal('Zone/three', info.identifier) + + assert_period(:LT, -12094, 0, false, nil, nil, info) + end + end + + def test_load_no_offsets + offsets = [] + transitions = [{:at => Time.utc(2000, 12, 31), :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_invalid_offset_index + offsets = [{:gmtoff => -0, :isdst => false, :abbrev => 'LMT'}] + transitions = [{:at => Time.utc(2000, 12, 31), :offset_index => 2}] + + tzif_test(offsets, transitions) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_with_leap_seconds + offsets = [{:gmtoff => -0, :isdst => false, :abbrev => 'LMT'}] + leaps = [{:at => Time.utc(1972,6,30,23,59,60), :seconds => 1}] + + tzif_test(offsets, [], :leaps => leaps) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_invalid_magic + ['TZif4', 'tzif2', '12345'].each do |magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :magic => magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone2', path, @posix_tz_parser) + end + end + end + end + + def test_load_invalid_section2_magic + ['TZif4', 'tzif2', '12345'].each do |section2_magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :min_format => 2, :section2_magic => section2_magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone4', path, @posix_tz_parser) + end + end + end + end + + def test_load_mismatched_section2_magic + minus_one = Proc.new {|f| f == 2 ? "TZif\0" : "TZif#{f - 1}" } + plus_one = Proc.new {|f| "TZif#{f + 1}" } + + [minus_one, plus_one].each do |section2_magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :min_format => 2, :section2_magic => section2_magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone5', path, @posix_tz_parser) + end + end + end + end + + def test_load_invalid_format + Tempfile.open('tzinfo-test-zone') do |file| + file.write('Invalid') + file.flush + + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone3', file.path, @posix_tz_parser) + end + end + end + + def test_load_missing_abbrev_null_termination + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions, :abbrev_separator => '^') do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_out_of_range_abbrev_offsets + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions, :abbrev_offset_base => 8) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_before_epoch + # Some platforms don't support negative timestamps for times before the + # epoch. Check that they are returned when supported and skipped when not. + + # Note the last transition before the epoch (and within the 32-bit range) is + # moved to the epoch on platforms that do not support negative timestamps. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -694224000, :offset_index => 1}, # Time.utc(1948, 1, 2) + {:at => -21945600, :offset_index => 2}, # Time.utc(1969, 4, 22) + {:at => Time.utc(1970, 10, 21), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Negative', path, @posix_tz_parser) + assert_equal('Zone/Negative', info.identifier) + + if SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1948, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1948, 1, 2), Time.utc(1969, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1969, 4, 22), Time.utc(1970, 10, 21), info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1970, 1, 1), Time.utc(1970, 10, 21), info) + end + + assert_period(:XST, 3600, 0, false, Time.utc(1970, 10, 21), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_on_epoch + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -694224000, :offset_index => 1}, # Time.utc(1948, 1, 2) + {:at => -21945600, :offset_index => 2}, # Time.utc(1969, 4, 22) + {:at => Time.utc(1970, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Negative', path, @posix_tz_parser) + assert_equal('Zone/Negative', info.identifier) + + if SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1948, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1948, 1, 2), Time.utc(1969, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1969, 4, 22), Time.utc(1970, 1, 1), info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + end + + assert_period(:XST, 3600, 0, false, Time.utc(1970, 1, 1), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_64bit + # The TZif version2 and later format contains both 32-bit and 64-bit + # sections. The 64-bit section is always used. + # + # Negative transitions before the supported range are moved to the start of + # the supported range. + # + # Transitions after 2**31 - 1 are discarded if 64-bit times aren't + # supported. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -3786739200, :offset_index => 1}, # Time.utc(1850, 1, 2) + {:at => Time.utc(2003, 4, 22), :offset_index => 2}, + {:at => Time.utc(2003, 10, 21), :offset_index => 1}, + {:at => 2240524800, :offset_index => 3}] # Time.utc(2040, 12, 31) + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFour', path, @posix_tz_parser) + assert_equal('Zone/SixtyFour', info.identifier) + + if SUPPORTS_64BIT && SUPPORTS_NEGATIVE && format >= 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1850, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1850, 1, 2), Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), Time.utc(2040, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2040, 12, 31), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), nil, info) + end + end + end + + def test_load_64bit_range + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. The last transition before the earliest + # supported time will be moved to that time if there isn't already a + # transition at that time. Transitions after the latest supported time are + # ignored. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**63, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => 2**63 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + # When the full range is supported, the following periods will be defined: + #assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**63).utc, info) + #assert_period(:XST, 3600, 0, false, Time.at(-2**63).utc, Time.utc(2014, 5, 27), info) + #assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**63 - 1).utc, info) + #assert_period(:LMT, 3542, 0, false, Time.at(2**63 - 1).utc, nil, info) + + # Without full range support, the following periods will be defined: + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1700, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(1700, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_64bit_range_transition_at_earliest_supported + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. The last transition before the earliest + # supported time will be moved to that time if there isn't already a + # transition at that time. Transitions after the latest supported time are + # ignored. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**63, :offset_index => 1}, + {:at => -8520336000, :offset_index => 2}, # Time.utc(1700, 1, 1).to_i + {:at => Time.utc(2014, 5, 27), :offset_index => 3}, + {:at => 2**63 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + # When the full range is supported, the following periods will be defined: + #assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**63).utc, info) + #assert_period(:XST, 3600, 0, false, Time.at(-2**63).utc, Time.utc(2014, 5, 27), info) + #assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**63 - 1).utc, info) + #assert_period(:LMT, 3542, 0, false, Time.at(2**63 - 1).utc, nil, info) + + # Without full range support, the following periods will be defined: + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1700, 1, 1), info) + assert_period(:XST2, 3600, 0, false, Time.utc(1700, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**31).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(-2**31).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XST2, 3600, 0, false, Time.utc(1970, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_supported_64bit_range + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. + + min_timestamp = -8520336000 # Time.utc(1700, 1, 1).to_i + max_timestamp = 16725225600 # Time.utc(2500, 1, 1).to_i + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => min_timestamp, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => max_timestamp - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SupportedSixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SupportedSixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_timestamp).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_timestamp).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(max_timestamp - 1).utc, info) + assert_period(:LMT, 3542, 0, false, Time.at(max_timestamp - 1).utc, nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_32bit_range + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**31, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => 2**31 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/ThirtyTwoRange', path, @posix_tz_parser) + assert_equal('Zone/ThirtyTwoRange', info.identifier) + + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**31 - 1), info) + assert_period(:LMT, 3542, 0, false, Time.at(2**31 - 1).utc, nil, info) + end + end + + def test_load_std_offset_changes + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_load_std_offset_changes_jump_to_double_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 4, 1), :offset_index => 1}, + {:at => Time.utc(2000, 5, 1), :offset_index => 2}, + {:at => Time.utc(2000, 6, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 6, 1), nil, info) + end + end + + def test_load_std_offset_changes_negative + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -10821, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -10800, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => -7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => -3600, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}, + {:at => Time.utc(2000, 5, 1), :offset_index => 3}, + {:at => Time.utc(2000, 6, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, -10821, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, -10800, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDDT, -10800, 7200, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XDDT, -10800, 7200, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 6, 1), nil, info) + end + end + + def test_load_starts_two_hour_std_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 3}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_starts_only_dst_transition_with_lmt + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/OnlyDST', path, @posix_tz_parser) + assert_equal('Zone/OnlyDST', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDT, 3542, 3658, true, Time.utc(2000, 1, 1), nil, info) + end + end + + def test_load_starts_only_dst_transition_without_lmt + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [{:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 1, 1), :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/OnlyDST', path, @posix_tz_parser) + assert_equal('Zone/OnlyDST', info.identifier) + + assert_period(:XDT, 3600, 3600, true, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 1, 1), nil, info) + end + end + + def test_load_switch_to_dst_and_change_utc_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + # Switch from non-DST to DST at the same time as moving the UTC offset + # back an hour (i.e. wall clock time doesn't change). + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'YST'}, + {:gmtoff => 3600, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:YST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 3600, true, Time.utc(2000, 2, 1), nil, info) + end + end + + def test_load_apia_international_dateline_change + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + # Pacific/Apia moved across the International Date Line whilst observing + # daylight savings time. + + offsets = [ + {:gmtoff => 45184, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -39600, :isdst => false, :abbrev => '-11'}, + {:gmtoff => -36000, :isdst => true, :abbrev => '-10'}, + {:gmtoff => 50400, :isdst => true, :abbrev => '+14'}, + {:gmtoff => 46800, :isdst => false, :abbrev => '+13'}] + + transitions = [ + {:at => Time.utc(2011, 4, 2, 14, 0, 0), :offset_index => 1}, + {:at => Time.utc(2011, 9, 24, 14, 0, 0), :offset_index => 2}, + {:at => Time.utc(2011, 12, 30, 10, 0, 0), :offset_index => 3}, + {:at => Time.utc(2012, 3, 31, 14, 0, 0), :offset_index => 4}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Test/Pacific/Apia', path, @posix_tz_parser) + assert_equal('Test/Pacific/Apia', info.identifier) + + assert_period( :LMT, 45184, 0, false, nil, Time.utc(2011, 4, 2, 14, 0, 0), info) + assert_period(:'-11', -39600, 0, false, Time.utc(2011, 4, 2, 14, 0, 0), Time.utc(2011, 9, 24, 14, 0, 0), info) + assert_period(:'-10', -39600, 3600, true, Time.utc(2011, 9, 24, 14, 0, 0), Time.utc(2011, 12, 30, 10, 0, 0), info) + assert_period(:'+14', 46800, 3600, true, Time.utc(2011, 12, 30, 10, 0, 0), Time.utc(2012, 3, 31, 14, 0, 0), info) + assert_period(:'+13', 46800, 0, false, Time.utc(2012, 3, 31, 14, 0, 0), nil, info) + end + end + + def test_load_offset_split_for_different_utc_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 3}, + {:at => Time.utc(2000, 6, 1), :offset_index => 2}, + {:at => Time.utc(2000, 7, 1), :offset_index => 1}, + {:at => Time.utc(2000, 8, 1), :offset_index => 3}, + {:at => Time.utc(2000, 9, 1), :offset_index => 1}, + {:at => Time.utc(2000, 10, 1), :offset_index => 2}, + {:at => Time.utc(2000, 11, 1), :offset_index => 3}, + {:at => Time.utc(2000, 12, 1), :offset_index => 2}] + + # XDT will be split and defined according to its surrounding standard time + # offsets. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SplitUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/SplitUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 7200, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 6, 1), Time.utc(2000, 7, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 7, 1), Time.utc(2000, 8, 1), info) + assert_period( :XDT, 3600, 7200, true, Time.utc(2000, 8, 1), Time.utc(2000, 9, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 9, 1), Time.utc(2000, 10, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 10, 1), Time.utc(2000, 11, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 11, 1), Time.utc(2000, 12, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 12, 1), nil, info) + + 1.upto(6) do |i| + assert_same(info.period_for_utc(Time.utc(2000, i, 1)).offset, info.period_for_utc(Time.utc(2000, i + 6, 1)).offset) + end + end + end + + def test_load_offset_utc_offset_taken_from_minimum_difference_minimum_after + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 2}] + + # XDT should use the closest utc_offset (7200) (and not an equivalent + # utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/MinimumUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/MinimumUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_utc_offset_taken_from_minimum_difference_minimum_before + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT should use the closest utc_offset (7200) (and not an equivalent + # utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/MinimumUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/MinimumUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_does_not_use_equal_utc_total_offset_equal_after + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 2}] + + # XDT will be based on the utc_offset of XST1 even though XST2 has an + # equivalent (or greater) utc_total_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetEqual', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetEqual', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_does_not_use_equal_utc_total_offset_equal_before + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT will be based on the utc_offset of XST1 even though XST2 has an + # equivalent (or greater) utc_total_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetEqual', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetEqual', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_both_adjacent_non_dst_equal_utc_total_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT will just assume an std_offset of +1 hour and calculate the utc_offset + # from utc_total_offset - std_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/AdjacentEqual', path, @posix_tz_parser) + assert_equal('Zone/AdjacentEqual', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_utc_offset_preserved_from_next + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT1'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT2'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 4}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}] + + # Both XDT1 and XDT2 should both use the closest utc_offset (7200) (and not + # an equivalent utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetPreserved', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetPreserved', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT1, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDT2, 7200, 3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_load_offset_utc_offset_preserved_from_previous + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT1'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT2'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 4}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}] + + # Both XDT1 and XDT2 should both use the closest utc_offset (7200) (and not + # an equivalent utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetPreserved', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetPreserved', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT1, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDT2, 7200, 3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_read_offset_negative_std_offset_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 0, :isdst => true, :abbrev => 'XWT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/NegativeStdOffsetDst', path, @posix_tz_parser) + assert_equal('Zone/NegativeStdOffsetDst', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 5, 1), nil, info) + end + end + + def test_read_offset_negative_std_offset_dst_initial_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 0, :isdst => true, :abbrev => 'XWT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/NegativeStdOffsetDstInitialDst', path, @posix_tz_parser) + assert_equal('Zone/NegativeStdOffsetDstInitialDst', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 5, 1), nil, info) + end + end + + def test_read_offset_prefer_base_offset_moves_to_dst_not_hour + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 1800, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 1800, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/BaseOffsetMovesToDstNotHour', path, @posix_tz_parser) + assert_equal('Zone/BaseOffsetMovesToDstNotHour', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 0, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 1800, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 1800, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_read_offset_prefer_base_offset_moves_from_dst_not_hour + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 1800, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 1800, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/BaseOffsetMovesFromDstNotHour', path, @posix_tz_parser) + assert_equal('Zone/BaseOffsetMovesFromDstNotHour', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 1800, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 1800, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 0, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_in_safe_mode + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, []) do |path, format| + # untaint only required for Ruby 1.9.2 + path.untaint + + safe_test do + info = ZoneinfoTimezoneInfo.new('Zone/three', path, @posix_tz_parser) + assert_equal('Zone/three', info.identifier) + + assert_period(:LT, -12094, 0, false, nil, nil, info) + end + end + end + + def test_load_encoding + # tzfile.5 doesn't specify an encoding, but the source data is in ASCII. + # ZoneinfoTimezoneInfo will load as UTF-8 (a superset of ASCII). + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST©'}] + + transitions = [ + {:at => Time.utc(1971, 1, 2), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1971, 1, 2), info) + assert_period(:"XST©", 3600, 0, false, Time.utc(1971, 1, 2), nil, info) + end + end + + def test_load_binmode + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + # Transition time that includes CRLF (4EFF0D0A). + # Test that this doesn't get corrupted by translating CRLF to LF. + transitions = [ + {:at => Time.utc(2011, 12, 31, 13, 24, 26), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2011, 12, 31, 13, 24, 26), info) + assert_period(:XST, 3600, 0, false, Time.utc(2011, 12, 31, 13, 24, 26), nil, info) + end + end + + def test_load_invalid_tz_string + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + + tzif_test(offsets, [], :rules => :fail) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/String', path, @posix_tz_parser) } + assert_equal("Failed to parse POSIX-style TZ string in file '#{path}': FakePosixTimeZoneParser Failure.", error.message) + end + end + + def test_load_tz_string_missing_start_newline + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + rules = TimezoneOffset.new(0, 0, 'UTC') + + tzif_test(offsets, [], :rules => rules, :omit_tz_string_start_new_line => true) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Missing/Start', path, @posix_tz_parser) } + assert_equal("Expected newline starting POSIX-style TZ string in file '#{path}'.", error.message) + end + end + + def test_load_tz_string_missing_end_newline + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + rules = TimezoneOffset.new(0, 0, 'UTC') + + tzif_test(offsets, [], :rules => rules, :omit_tz_string_end_new_line => true) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Missing/End', path, @posix_tz_parser) } + assert_equal("Expected newline ending POSIX-style TZ string in file '#{path}'.", error.message) + end + end + + [ + [false, 1, 0, 'TEST'], + [false, 0, 1, 'TEST'], + [false, 0, 0, 'TEST2'], + [false, -1, 1, 'TEST'], + [true, 0, 0, 'TEST'] + ].each do |(isdst, base_utc_offset, std_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_#{isdst ? 'dst' : 'std'}_constant_offset_#{base_utc_offset}_#{std_offset}_#{abbreviation}" do + offsets = [{:gmtoff => 0, :isdst => isdst, :abbrev => 'TEST'}] + rules = TimezoneOffset.new(base_utc_offset, std_offset, abbreviation) + + tzif_test(offsets, [], :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("Constant offset POSIX-style TZ string does not match constant offset in file '#{path}'.", error.message) + end + end + end + + [ + [3601, 'XST'], + [3600, 'YST'] + ].each do |(base_utc_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_final_std_transition_offset_#{base_utc_offset}_#{abbreviation}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 2, 0, 0) - 3600, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 7200, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(base_utc_offset, 0, abbreviation), + TimezoneOffset.new(3600, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 7200), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + end + + [ + [3601, 0, 'XST'], + [3600, 1, 'XST'], + [3600, 0, 'YST'] + ].each do |(base_utc_offset, std_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_final_dst_transition_offset_#{base_utc_offset}_#{std_offset}_#{abbreviation}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 2, 0, 0) - 3600, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(base_utc_offset, std_offset, abbreviation), + JulianDayOfYearTransitionRule.new(100, 7200), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + end + + [ + [3600, 0], + [3600, 3600], + [10800, -3600] + ].each do |(base_utc_offset, std_offset)| + rules = TimezoneOffset.new(base_utc_offset, std_offset, 'TEST') + + define_method "test_load_tz_string_uses_constant_offset_with_no_transitions_#{base_utc_offset}_#{std_offset}" do + offsets = [{:gmtoff => base_utc_offset + std_offset, :isdst => std_offset != 0, :abbrev => 'TEST'}] + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Constant/Offset', path, @posix_tz_parser) + assert_equal('Constant/Offset', info.identifier) + assert_period(:TEST, base_utc_offset, std_offset, std_offset != 0, nil, nil, info) + end + end + + define_method "test_load_tz_string_uses_constant_offset_after_last_transition_#{base_utc_offset}_#{std_offset}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => base_utc_offset + std_offset, :isdst => std_offset != 0, :abbrev => 'TEST'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1} + ] + + t0 = Time.utc(1971, 1, 2, 2, 0, 0) - 3542 + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Constant/After', path, @posix_tz_parser) + assert_equal('Constant/After', info.identifier) + assert_period(:LMT, 3542, 0, false, nil, t0, info) + assert_period(:TEST, base_utc_offset, std_offset, std_offset != 0, t0, nil, info) + end + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined + offsets = [{:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XST, 7200, 0, false, nil, Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_omitting_first_if_matches_first_offset + offsets = [{:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XDT, 7200, 3600, true, nil, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, Time.utc(1971, 4, 10, 1, 0, 0) - 7200, info) + + 1971.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_with_previous_offset_of_first_matching_first_offset + offsets = [{:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_correcting_initial_offset + offsets = [{:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(3600, 7200, 'XDDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XDDT, 3600, 7200, true, nil, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, info) # would be :XDT, 7200, 3600 otherwise + assert_period(:XST, 3600, 0, false, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, Time.utc(1971, 4, 10, 1, 0, 0) - 3600, info) + + 1971.upto(generate_up_to - 1).each do |year| + assert_period(:XDDT, 3600, 7200, true, Time.utc(year, 4, 10, 1, 0, 0) - 3600, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 3600, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 3600, info) + end + + assert_period(:XDDT, 3600, 7200, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 3600, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 3600, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_std_to_dst_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_dst_to_std_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_dst_to_std_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 1, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1982, 4, 10, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 10, 27, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 3600), + JulianDayOfYearTransitionRule.new(100, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, Time.utc(1982, 4, 10, 2, 0, 0) - 10800, info) + + 1982.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 7200, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_std_to_dst_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 1, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1982, 4, 10, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1983, 4, 10, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 3600), + JulianDayOfYearTransitionRule.new(100, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, Time.utc(1982, 4, 10, 2, 0, 0) - 10800, info) + + 1982.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 7200, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_negative_dst + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => false, :abbrev => 'XST'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(10800, 0, 'XST'), + TimezoneOffset.new(10800, -3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 7200), + JulianDayOfYearTransitionRule.new(100, 3600) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 10800, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 10800, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_single_transition_in_final_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + transitions = [ + {:at => Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Final/Year', path, @posix_tz_parser) + assert_equal('Final/Year', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_adds_nothing_if_transitions_up_to_final_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + transitions = [ + {:at => Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Final/Year', path, @posix_tz_parser) + assert_equal('Final/Year', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_corrects_offset_of_final_transition + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 4, 10, 1, 0, 0) - 3542, :offset_index => 1}] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(3600, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Correct/Final', path, @posix_tz_parser) + assert_equal('Correct/Final', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 4, 10, 1, 0, 0) - 3542, info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 4, 10, 1, 0, 0) - 3542, Time.utc(2000, 10, 27, 2, 0, 0) - 7200, info) # would be :XDT, 3542, 3658 without tz_string + assert_period(:XST, 3600, 0, false, Time.utc(2000, 10, 27, 2, 0, 0) - 7200, Time.utc(2001, 4, 10, 1, 0, 0) - 3600, info) + + 2001.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 3600, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 3600, Time.utc(year, 10, 27, 2, 0, 0) - 7200, info) + assert_period(:XST, 3600, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 7200, Time.utc(year + 1, 4, 10, 1, 0, 0) - 3600, info) + end + + assert_period(:XDT, 3600, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 3600, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 7200, info) + assert_period(:XST, 3600, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 7200, nil, info) + end + end + + def test_load_tz_string_specifies_transition_to_offset_of_final_transition_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(101, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/Offset', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + + def test_load_tz_string_specifies_transition_to_offset_of_final_transition_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(299, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/Offset', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + + unless SUPPORTS_64BIT + def test_generate_up_to_limited_to_2037_with_no_64_bit_support + assert_equal(2037, ZoneinfoTimezoneInfo::GENERATE_UP_TO) + end + + def test_load_tz_string_does_not_generate_if_transition_after_32_bit_range_with_no_64_bit_support + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => 2154474000 - 7200, :offset_index => 2}, # Time.utc(2038, 4, 10, 1, 0, 0).to_i + {:at => 2171757600 - 10800, :offset_index => 1} # Time.utc(2038, 10, 27, 2, 0, 0).to_i + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, nil, info) + end + end + end + + if SUPPORTS_NEGATIVE + def test_load_tz_string_generates_from_last_transition_if_before_1970_and_supports_negative + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1961, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1962, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1962, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Pre/1970', path, @posix_tz_parser) + assert_equal('Pre/1970', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1961, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1961, 1, 2, 2, 0, 0) - 7142, Time.utc(1962, 4, 10, 1, 0, 0) - 7200, info) + + 1962.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + else + def test_load_tz_string_generates_from_1970_if_last_transition_before_1970_and_does_not_support_negative + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => -283903200 - 7142, :offset_index => 1}, # Time.utc(1961, 1, 2, 2, 0, 0) + {:at => -243903600 - 7200, :offset_index => 2}, # Time.utc(1962, 4, 10, 1, 0, 0) + {:at => -226620000 - 10800, :offset_index => 1} # Time.utc(1962, 10, 27, 2, 0, 0) + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Pre/1970', path, @posix_tz_parser) + assert_equal('Pre/1970', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(1970, 1, 1), Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + end + + def test_load_tz_string_as_utf8 + offsets = [{:gmtoff => 3600, :isdst => false, :abbrev => 'áccént'}] + rules = TimezoneOffset.new(3600, 0, 'áccént') + + tzif_test(offsets, [], :tz_string => '<áccént>1', :rules => rules) do |path, format| + # FakePosixTimeZoneParser will test that the tz_string matches. + info = ZoneinfoTimezoneInfo.new('Test/Utf8', path, @posix_tz_parser) + assert_period(:'áccént', 3600, 0, false, nil, nil, info) + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/test_utils.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/test_utils.rb new file mode 100644 index 0000000..c97facf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/test_utils.rb @@ -0,0 +1,192 @@ +tests_dir = File.expand_path(File.dirname(__FILE__)) +tests_dir.untaint if RUBY_VERSION < '2.7' +TESTS_DIR = tests_dir +TZINFO_LIB_DIR = File.expand_path(File.join(TESTS_DIR, '..', 'lib')) +TZINFO_TEST_DATA_DIR = File.join(TESTS_DIR, 'tzinfo-data') +TZINFO_TEST_ZONEINFO_DIR = File.join(TESTS_DIR, 'zoneinfo') + +$:.unshift(TZINFO_LIB_DIR) unless $:.include?(TZINFO_LIB_DIR) + +# tzinfo-data contains a cut down copy of tzinfo-data for use in the tests. +# Add it to the load path. +$:.unshift(TZINFO_TEST_DATA_DIR) unless $:.include?(TZINFO_TEST_DATA_DIR) + +require 'minitest/autorun' +require 'tzinfo' +require 'fileutils' +require 'rbconfig' + +module TestUtils + ZONEINFO_SYMLINKS = [ + ['localtime', 'America/New_York'], + ['UTC', 'Etc/UTC']] + + + def self.prepare_test_zoneinfo_dir + ZONEINFO_SYMLINKS.each do |file, target| + path = File.join(TZINFO_TEST_ZONEINFO_DIR, file) + + File.delete(path) if File.exist?(path) + + begin + FileUtils.ln_s(target, path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + target_path = File.join(TZINFO_TEST_ZONEINFO_DIR, target) + FileUtils.cp(target_path, path) + end + end + end +end + +TestUtils.prepare_test_zoneinfo_dir + + + +module Kernel + # Suppresses any warnings raised in a specified block. + def without_warnings + old_verbose = $VERBOSE + begin + $VERBOSE = nil + yield + ensure + $-v = old_verbose + end + end + + def safe_test(options = {}) + # Ruby >= 2.7, JRuby and Rubinus don't support SAFE levels. + available = RUBY_VERSION < '2.7' && !(defined?(RUBY_ENGINE) && %w(jruby rbx).include?(RUBY_ENGINE)) + + if available || options[:unavailable] != :skip + thread = Thread.new do + orig_diff = Minitest::Assertions.diff + + if available + orig_safe = $SAFE + $SAFE = options[:level] || 1 + end + begin + # Disable the use of external diff tools during safe mode tests (since + # safe mode will prevent their use). The initial value is retrieved + # before activating safe mode because the first time + # Minitest::Assertions.diff is called, it will attempt to find a diff + # tool. Finding the diff tool will also fail in safe mode. + Minitest::Assertions.diff = nil + begin + yield + ensure + Minitest::Assertions.diff = orig_diff + end + ensure + if available + # On Ruby < 2.6, setting $SAFE affects only the current thread + # and the $SAFE level cannot be downgraded. Catch and ignore the + # SecurityError. + # On Ruby >= 2.6, setting $SAFE is global, and the $SAFE level + # can be downgraded. Restore $SAFE back to the original level. + begin + $SAFE = orig_safe + rescue SecurityError + end + end + end + end + + thread.join + end + end + + def skip_if_has_bug_14060 + # On Ruby 2.4.4 in safe mode, require will fail with a SecurityError for + # any file that has not previously been loaded, regardless of whether the + # file name is tainted. + # See https://bugs.ruby-lang.org/issues/14060#note-5. + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'ruby' && RUBY_VERSION == '2.4.4' + skip('Skipping test due to Ruby 2.4.4 being affected by Bug 14060 (see https://bugs.ruby-lang.org/issues/14060#note-5)') + end + end + + def assert_array_same_items(expected, actual, msg = nil) + full_message = message(msg, '') { diff(expected, actual) } + condition = (expected.size == actual.size) && (expected - actual == []) + assert(condition, full_message) + end + + def assert_sub_process_returns(expected_lines, code, extra_load_path = [], required = ['tzinfo']) + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'jruby' && JRUBY_VERSION.start_with?('9.0.') && RbConfig::CONFIG['host_os'] =~ /mswin/ + skip('JRuby 9.0 on Windows cannot handle writing to the IO instance returned by popen') + end + + ruby = File.join(RbConfig::CONFIG['bindir'], + RbConfig::CONFIG['ruby_install_name'] + RbConfig::CONFIG['EXEEXT']) + + load_path = [TZINFO_LIB_DIR] + extra_load_path + + # If RubyGems is loaded in the current process, then require it in the + # sub-process, as it may be needed in order to require dependencies. + if defined?(Gem) && Gem.instance_of?(Module) + required = ['rubygems'] + required + end + + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' + # Stop Rubinus from operating as irb. + args = ' -' + else + args = '' + end + + IO.popen("\"#{ruby}\"#{args}", 'r+') do |process| + load_path.each do |p| + process.puts("$:.unshift('#{p.gsub("'", "\\\\'")}')") + end + + required.each do |r| + process.puts("require '#{r.gsub("'", "\\\\'")}'") + end + + process.puts(code) + process.flush + process.close_write + + actual_lines = process.readlines + actual_lines = actual_lines.collect {|l| l.chomp} + assert_equal(expected_lines, actual_lines) + end + end + + def assert_nothing_raised(msg = nil) + begin + yield + rescue => e + full_message = message(msg) { exception_details(e, 'Exception raised: ') } + assert(false, full_message) + end + end +end + + +# Object#taint is a deprecated no-op in Ruby 2.7 and outputs a warning. It will +# be removed in 3.2. Silence the warning or supply a replacement. +if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) + module TaintExt + refine Object do + def taint + self + end + end + end +end + + +# JRuby 1.7.5 to 1.7.9 consider DateTime instances that differ by less than +# 1 millisecond to be equivalent (https://github.com/jruby/jruby/issues/1311). +# +# A few test cases compare at a resolution of 1 microsecond, so this causes +# failures on JRuby 1.7.5 to 1.7.9. +# +# Determine what the platform supports and adjust the tests accordingly. +DATETIME_RESOLUTION = (0..5).collect {|i| 10**i}.find {|i| (DateTime.new(2013,1,1,0,0,0) <=> DateTime.new(2013,1,1,0,0,Rational(i,1000000))) < 0} +raise 'Unable to compare DateTimes at a resolution less than one second on this platform' unless DATETIME_RESOLUTION diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all.rb new file mode 100644 index 0000000..f90836d --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all.rb @@ -0,0 +1,7 @@ +# Force a particular timezone to be local (helps find issues when local +# timezone isn't GMT). This won't work on Windows. +ENV['TZ'] = 'America/Los_Angeles' + +Dir[File.join(File.expand_path(File.dirname(__FILE__)), 'tc_*.rb')].each {|t| require t} + +puts "Using #{DataSource.get}" diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb new file mode 100644 index 0000000..5b822bf --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb @@ -0,0 +1,5 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils.rb') + +TZInfo::DataSource.set(:ruby) + +require File.join(File.expand_path(File.dirname(__FILE__)), 'ts_all.rb') diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb new file mode 100644 index 0000000..ae1ba15 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb @@ -0,0 +1,9 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils.rb') + +# Use a zoneinfo directory containing files needed by the tests. +# The symlinks in this directory are set up in test_utils.rb. +zoneinfo_path = File.join(File.expand_path(File.dirname(__FILE__)), 'zoneinfo') +zoneinfo_path.untaint if RUBY_VERSION < '2.7' +TZInfo::DataSource.set(:zoneinfo, zoneinfo_path) + +require File.join(File.expand_path(File.dirname(__FILE__)), 'ts_all.rb') diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb new file mode 100644 index 0000000..6c3721f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb @@ -0,0 +1,8 @@ +# Top level module for TZInfo. +module TZInfo + # Top level module for TZInfo::Data. + module Data + end +end + +require 'tzinfo/data/version' diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb new file mode 100644 index 0000000..19853ce --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb @@ -0,0 +1,89 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module America + module Argentina + module Buenos_Aires + include TimezoneDefinition + + timezone 'America/Argentina/Buenos_Aires' do |tz| + tz.offset :o0, -14028, 0, :LMT + tz.offset :o1, -15408, 0, :CMT + tz.offset :o2, -14400, 0, :'-04' + tz.offset :o3, -14400, 3600, :'-03' + tz.offset :o4, -10800, 0, :'-03' + tz.offset :o5, -10800, 3600, :'-02' + + tz.transition 1894, 10, :o1, -2372097972, 17374555169, 7200 + tz.transition 1920, 5, :o2, -1567453392, 1453467407, 600 + tz.transition 1930, 12, :o3, -1233432000, 7278935, 3 + tz.transition 1931, 4, :o2, -1222981200, 19411461, 8 + tz.transition 1931, 10, :o3, -1205956800, 7279889, 3 + tz.transition 1932, 3, :o2, -1194037200, 19414141, 8 + tz.transition 1932, 11, :o3, -1172865600, 7281038, 3 + tz.transition 1933, 3, :o2, -1162501200, 19417061, 8 + tz.transition 1933, 11, :o3, -1141329600, 7282133, 3 + tz.transition 1934, 3, :o2, -1130965200, 19419981, 8 + tz.transition 1934, 11, :o3, -1109793600, 7283228, 3 + tz.transition 1935, 3, :o2, -1099429200, 19422901, 8 + tz.transition 1935, 11, :o3, -1078257600, 7284323, 3 + tz.transition 1936, 3, :o2, -1067806800, 19425829, 8 + tz.transition 1936, 11, :o3, -1046635200, 7285421, 3 + tz.transition 1937, 3, :o2, -1036270800, 19428749, 8 + tz.transition 1937, 11, :o3, -1015099200, 7286516, 3 + tz.transition 1938, 3, :o2, -1004734800, 19431669, 8 + tz.transition 1938, 11, :o3, -983563200, 7287611, 3 + tz.transition 1939, 3, :o2, -973198800, 19434589, 8 + tz.transition 1939, 11, :o3, -952027200, 7288706, 3 + tz.transition 1940, 3, :o2, -941576400, 19437517, 8 + tz.transition 1940, 7, :o3, -931032000, 7289435, 3 + tz.transition 1941, 6, :o2, -900882000, 19441285, 8 + tz.transition 1941, 10, :o3, -890337600, 7290848, 3 + tz.transition 1943, 8, :o2, -833749200, 19447501, 8 + tz.transition 1943, 10, :o3, -827265600, 7293038, 3 + tz.transition 1946, 3, :o2, -752274000, 19455045, 8 + tz.transition 1946, 10, :o3, -733780800, 7296284, 3 + tz.transition 1963, 10, :o2, -197326800, 19506429, 8 + tz.transition 1963, 12, :o3, -190843200, 7315136, 3 + tz.transition 1964, 3, :o2, -184194000, 19507645, 8 + tz.transition 1964, 10, :o3, -164491200, 7316051, 3 + tz.transition 1965, 3, :o2, -152658000, 19510565, 8 + tz.transition 1965, 10, :o3, -132955200, 7317146, 3 + tz.transition 1966, 3, :o2, -121122000, 19513485, 8 + tz.transition 1966, 10, :o3, -101419200, 7318241, 3 + tz.transition 1967, 4, :o2, -86821200, 19516661, 8 + tz.transition 1967, 10, :o3, -71092800, 7319294, 3 + tz.transition 1968, 4, :o2, -54766800, 19519629, 8 + tz.transition 1968, 10, :o3, -39038400, 7320407, 3 + tz.transition 1969, 4, :o2, -23317200, 19522541, 8 + tz.transition 1969, 10, :o4, -7588800, 7321499, 3 + tz.transition 1974, 1, :o5, 128142000 + tz.transition 1974, 5, :o4, 136605600 + tz.transition 1988, 12, :o5, 596948400 + tz.transition 1989, 3, :o4, 605066400 + tz.transition 1989, 10, :o5, 624423600 + tz.transition 1990, 3, :o4, 636516000 + tz.transition 1990, 10, :o5, 656478000 + tz.transition 1991, 3, :o4, 667965600 + tz.transition 1991, 10, :o5, 687927600 + tz.transition 1992, 3, :o4, 699415200 + tz.transition 1992, 10, :o5, 719377200 + tz.transition 1993, 3, :o4, 731469600 + tz.transition 1999, 10, :o3, 938919600 + tz.transition 2000, 3, :o4, 952052400 + tz.transition 2007, 12, :o5, 1198983600 + tz.transition 2008, 3, :o4, 1205632800 + tz.transition 2008, 10, :o5, 1224385200 + tz.transition 2009, 3, :o4, 1237082400 + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb new file mode 100644 index 0000000..6802f53 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb @@ -0,0 +1,327 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module America + module New_York + include TimezoneDefinition + + timezone 'America/New_York' do |tz| + tz.offset :o0, -17762, 0, :LMT + tz.offset :o1, -18000, 0, :EST + tz.offset :o2, -18000, 3600, :EDT + tz.offset :o3, -18000, 3600, :EWT + tz.offset :o4, -18000, 3600, :EPT + + tz.transition 1883, 11, :o1, -2717650800, 57819197, 24 + tz.transition 1918, 3, :o2, -1633280400, 58120411, 24 + tz.transition 1918, 10, :o1, -1615140000, 9687575, 4 + tz.transition 1919, 3, :o2, -1601830800, 58129147, 24 + tz.transition 1919, 10, :o1, -1583690400, 9689031, 4 + tz.transition 1920, 3, :o2, -1570381200, 58137883, 24 + tz.transition 1920, 10, :o1, -1551636000, 9690515, 4 + tz.transition 1921, 4, :o2, -1536512400, 58147291, 24 + tz.transition 1921, 9, :o1, -1523210400, 9691831, 4 + tz.transition 1922, 4, :o2, -1504458000, 58156195, 24 + tz.transition 1922, 9, :o1, -1491760800, 9693287, 4 + tz.transition 1923, 4, :o2, -1473008400, 58164931, 24 + tz.transition 1923, 9, :o1, -1459706400, 9694771, 4 + tz.transition 1924, 4, :o2, -1441558800, 58173667, 24 + tz.transition 1924, 9, :o1, -1428256800, 9696227, 4 + tz.transition 1925, 4, :o2, -1410109200, 58182403, 24 + tz.transition 1925, 9, :o1, -1396807200, 9697683, 4 + tz.transition 1926, 4, :o2, -1378659600, 58191139, 24 + tz.transition 1926, 9, :o1, -1365357600, 9699139, 4 + tz.transition 1927, 4, :o2, -1347210000, 58199875, 24 + tz.transition 1927, 9, :o1, -1333908000, 9700595, 4 + tz.transition 1928, 4, :o2, -1315155600, 58208779, 24 + tz.transition 1928, 9, :o1, -1301853600, 9702079, 4 + tz.transition 1929, 4, :o2, -1283706000, 58217515, 24 + tz.transition 1929, 9, :o1, -1270404000, 9703535, 4 + tz.transition 1930, 4, :o2, -1252256400, 58226251, 24 + tz.transition 1930, 9, :o1, -1238954400, 9704991, 4 + tz.transition 1931, 4, :o2, -1220806800, 58234987, 24 + tz.transition 1931, 9, :o1, -1207504800, 9706447, 4 + tz.transition 1932, 4, :o2, -1189357200, 58243723, 24 + tz.transition 1932, 9, :o1, -1176055200, 9707903, 4 + tz.transition 1933, 4, :o2, -1157302800, 58252627, 24 + tz.transition 1933, 9, :o1, -1144605600, 9709359, 4 + tz.transition 1934, 4, :o2, -1125853200, 58261363, 24 + tz.transition 1934, 9, :o1, -1112551200, 9710843, 4 + tz.transition 1935, 4, :o2, -1094403600, 58270099, 24 + tz.transition 1935, 9, :o1, -1081101600, 9712299, 4 + tz.transition 1936, 4, :o2, -1062954000, 58278835, 24 + tz.transition 1936, 9, :o1, -1049652000, 9713755, 4 + tz.transition 1937, 4, :o2, -1031504400, 58287571, 24 + tz.transition 1937, 9, :o1, -1018202400, 9715211, 4 + tz.transition 1938, 4, :o2, -1000054800, 58296307, 24 + tz.transition 1938, 9, :o1, -986752800, 9716667, 4 + tz.transition 1939, 4, :o2, -968000400, 58305211, 24 + tz.transition 1939, 9, :o1, -955303200, 9718123, 4 + tz.transition 1940, 4, :o2, -936550800, 58313947, 24 + tz.transition 1940, 9, :o1, -923248800, 9719607, 4 + tz.transition 1941, 4, :o2, -905101200, 58322683, 24 + tz.transition 1941, 9, :o1, -891799200, 9721063, 4 + tz.transition 1942, 2, :o3, -880218000, 58329595, 24 + tz.transition 1945, 8, :o4, -769395600, 58360379, 24 + tz.transition 1945, 9, :o1, -765396000, 9726915, 4 + tz.transition 1946, 4, :o2, -747248400, 58366531, 24 + tz.transition 1946, 9, :o1, -733946400, 9728371, 4 + tz.transition 1947, 4, :o2, -715798800, 58375267, 24 + tz.transition 1947, 9, :o1, -702496800, 9729827, 4 + tz.transition 1948, 4, :o2, -684349200, 58384003, 24 + tz.transition 1948, 9, :o1, -671047200, 9731283, 4 + tz.transition 1949, 4, :o2, -652899600, 58392739, 24 + tz.transition 1949, 9, :o1, -639597600, 9732739, 4 + tz.transition 1950, 4, :o2, -620845200, 58401643, 24 + tz.transition 1950, 9, :o1, -608148000, 9734195, 4 + tz.transition 1951, 4, :o2, -589395600, 58410379, 24 + tz.transition 1951, 9, :o1, -576093600, 9735679, 4 + tz.transition 1952, 4, :o2, -557946000, 58419115, 24 + tz.transition 1952, 9, :o1, -544644000, 9737135, 4 + tz.transition 1953, 4, :o2, -526496400, 58427851, 24 + tz.transition 1953, 9, :o1, -513194400, 9738591, 4 + tz.transition 1954, 4, :o2, -495046800, 58436587, 24 + tz.transition 1954, 9, :o1, -481744800, 9740047, 4 + tz.transition 1955, 4, :o2, -463597200, 58445323, 24 + tz.transition 1955, 10, :o1, -447271200, 9741643, 4 + tz.transition 1956, 4, :o2, -431542800, 58454227, 24 + tz.transition 1956, 10, :o1, -415821600, 9743099, 4 + tz.transition 1957, 4, :o2, -400093200, 58462963, 24 + tz.transition 1957, 10, :o1, -384372000, 9744555, 4 + tz.transition 1958, 4, :o2, -368643600, 58471699, 24 + tz.transition 1958, 10, :o1, -352922400, 9746011, 4 + tz.transition 1959, 4, :o2, -337194000, 58480435, 24 + tz.transition 1959, 10, :o1, -321472800, 9747467, 4 + tz.transition 1960, 4, :o2, -305744400, 58489171, 24 + tz.transition 1960, 10, :o1, -289418400, 9748951, 4 + tz.transition 1961, 4, :o2, -273690000, 58498075, 24 + tz.transition 1961, 10, :o1, -257968800, 9750407, 4 + tz.transition 1962, 4, :o2, -242240400, 58506811, 24 + tz.transition 1962, 10, :o1, -226519200, 9751863, 4 + tz.transition 1963, 4, :o2, -210790800, 58515547, 24 + tz.transition 1963, 10, :o1, -195069600, 9753319, 4 + tz.transition 1964, 4, :o2, -179341200, 58524283, 24 + tz.transition 1964, 10, :o1, -163620000, 9754775, 4 + tz.transition 1965, 4, :o2, -147891600, 58533019, 24 + tz.transition 1965, 10, :o1, -131565600, 9756259, 4 + tz.transition 1966, 4, :o2, -116442000, 58541755, 24 + tz.transition 1966, 10, :o1, -100116000, 9757715, 4 + tz.transition 1967, 4, :o2, -84387600, 58550659, 24 + tz.transition 1967, 10, :o1, -68666400, 9759171, 4 + tz.transition 1968, 4, :o2, -52938000, 58559395, 24 + tz.transition 1968, 10, :o1, -37216800, 9760627, 4 + tz.transition 1969, 4, :o2, -21488400, 58568131, 24 + tz.transition 1969, 10, :o1, -5767200, 9762083, 4 + tz.transition 1970, 4, :o2, 9961200 + tz.transition 1970, 10, :o1, 25682400 + tz.transition 1971, 4, :o2, 41410800 + tz.transition 1971, 10, :o1, 57736800 + tz.transition 1972, 4, :o2, 73465200 + tz.transition 1972, 10, :o1, 89186400 + tz.transition 1973, 4, :o2, 104914800 + tz.transition 1973, 10, :o1, 120636000 + tz.transition 1974, 1, :o2, 126687600 + tz.transition 1974, 10, :o1, 152085600 + tz.transition 1975, 2, :o2, 162370800 + tz.transition 1975, 10, :o1, 183535200 + tz.transition 1976, 4, :o2, 199263600 + tz.transition 1976, 10, :o1, 215589600 + tz.transition 1977, 4, :o2, 230713200 + tz.transition 1977, 10, :o1, 247039200 + tz.transition 1978, 4, :o2, 262767600 + tz.transition 1978, 10, :o1, 278488800 + tz.transition 1979, 4, :o2, 294217200 + tz.transition 1979, 10, :o1, 309938400 + tz.transition 1980, 4, :o2, 325666800 + tz.transition 1980, 10, :o1, 341388000 + tz.transition 1981, 4, :o2, 357116400 + tz.transition 1981, 10, :o1, 372837600 + tz.transition 1982, 4, :o2, 388566000 + tz.transition 1982, 10, :o1, 404892000 + tz.transition 1983, 4, :o2, 420015600 + tz.transition 1983, 10, :o1, 436341600 + tz.transition 1984, 4, :o2, 452070000 + tz.transition 1984, 10, :o1, 467791200 + tz.transition 1985, 4, :o2, 483519600 + tz.transition 1985, 10, :o1, 499240800 + tz.transition 1986, 4, :o2, 514969200 + tz.transition 1986, 10, :o1, 530690400 + tz.transition 1987, 4, :o2, 544604400 + tz.transition 1987, 10, :o1, 562140000 + tz.transition 1988, 4, :o2, 576054000 + tz.transition 1988, 10, :o1, 594194400 + tz.transition 1989, 4, :o2, 607503600 + tz.transition 1989, 10, :o1, 625644000 + tz.transition 1990, 4, :o2, 638953200 + tz.transition 1990, 10, :o1, 657093600 + tz.transition 1991, 4, :o2, 671007600 + tz.transition 1991, 10, :o1, 688543200 + tz.transition 1992, 4, :o2, 702457200 + tz.transition 1992, 10, :o1, 719992800 + tz.transition 1993, 4, :o2, 733906800 + tz.transition 1993, 10, :o1, 752047200 + tz.transition 1994, 4, :o2, 765356400 + tz.transition 1994, 10, :o1, 783496800 + tz.transition 1995, 4, :o2, 796806000 + tz.transition 1995, 10, :o1, 814946400 + tz.transition 1996, 4, :o2, 828860400 + tz.transition 1996, 10, :o1, 846396000 + tz.transition 1997, 4, :o2, 860310000 + tz.transition 1997, 10, :o1, 877845600 + tz.transition 1998, 4, :o2, 891759600 + tz.transition 1998, 10, :o1, 909295200 + tz.transition 1999, 4, :o2, 923209200 + tz.transition 1999, 10, :o1, 941349600 + tz.transition 2000, 4, :o2, 954658800 + tz.transition 2000, 10, :o1, 972799200 + tz.transition 2001, 4, :o2, 986108400 + tz.transition 2001, 10, :o1, 1004248800 + tz.transition 2002, 4, :o2, 1018162800 + tz.transition 2002, 10, :o1, 1035698400 + tz.transition 2003, 4, :o2, 1049612400 + tz.transition 2003, 10, :o1, 1067148000 + tz.transition 2004, 4, :o2, 1081062000 + tz.transition 2004, 10, :o1, 1099202400 + tz.transition 2005, 4, :o2, 1112511600 + tz.transition 2005, 10, :o1, 1130652000 + tz.transition 2006, 4, :o2, 1143961200 + tz.transition 2006, 10, :o1, 1162101600 + tz.transition 2007, 3, :o2, 1173596400 + tz.transition 2007, 11, :o1, 1194156000 + tz.transition 2008, 3, :o2, 1205046000 + tz.transition 2008, 11, :o1, 1225605600 + tz.transition 2009, 3, :o2, 1236495600 + tz.transition 2009, 11, :o1, 1257055200 + tz.transition 2010, 3, :o2, 1268550000 + tz.transition 2010, 11, :o1, 1289109600 + tz.transition 2011, 3, :o2, 1299999600 + tz.transition 2011, 11, :o1, 1320559200 + tz.transition 2012, 3, :o2, 1331449200 + tz.transition 2012, 11, :o1, 1352008800 + tz.transition 2013, 3, :o2, 1362898800 + tz.transition 2013, 11, :o1, 1383458400 + tz.transition 2014, 3, :o2, 1394348400 + tz.transition 2014, 11, :o1, 1414908000 + tz.transition 2015, 3, :o2, 1425798000 + tz.transition 2015, 11, :o1, 1446357600 + tz.transition 2016, 3, :o2, 1457852400 + tz.transition 2016, 11, :o1, 1478412000 + tz.transition 2017, 3, :o2, 1489302000 + tz.transition 2017, 11, :o1, 1509861600 + tz.transition 2018, 3, :o2, 1520751600 + tz.transition 2018, 11, :o1, 1541311200 + tz.transition 2019, 3, :o2, 1552201200 + tz.transition 2019, 11, :o1, 1572760800 + tz.transition 2020, 3, :o2, 1583650800 + tz.transition 2020, 11, :o1, 1604210400 + tz.transition 2021, 3, :o2, 1615705200 + tz.transition 2021, 11, :o1, 1636264800 + tz.transition 2022, 3, :o2, 1647154800 + tz.transition 2022, 11, :o1, 1667714400 + tz.transition 2023, 3, :o2, 1678604400 + tz.transition 2023, 11, :o1, 1699164000 + tz.transition 2024, 3, :o2, 1710054000 + tz.transition 2024, 11, :o1, 1730613600 + tz.transition 2025, 3, :o2, 1741503600 + tz.transition 2025, 11, :o1, 1762063200 + tz.transition 2026, 3, :o2, 1772953200 + tz.transition 2026, 11, :o1, 1793512800 + tz.transition 2027, 3, :o2, 1805007600 + tz.transition 2027, 11, :o1, 1825567200 + tz.transition 2028, 3, :o2, 1836457200 + tz.transition 2028, 11, :o1, 1857016800 + tz.transition 2029, 3, :o2, 1867906800 + tz.transition 2029, 11, :o1, 1888466400 + tz.transition 2030, 3, :o2, 1899356400 + tz.transition 2030, 11, :o1, 1919916000 + tz.transition 2031, 3, :o2, 1930806000 + tz.transition 2031, 11, :o1, 1951365600 + tz.transition 2032, 3, :o2, 1962860400 + tz.transition 2032, 11, :o1, 1983420000 + tz.transition 2033, 3, :o2, 1994310000 + tz.transition 2033, 11, :o1, 2014869600 + tz.transition 2034, 3, :o2, 2025759600 + tz.transition 2034, 11, :o1, 2046319200 + tz.transition 2035, 3, :o2, 2057209200 + tz.transition 2035, 11, :o1, 2077768800 + tz.transition 2036, 3, :o2, 2088658800 + tz.transition 2036, 11, :o1, 2109218400 + tz.transition 2037, 3, :o2, 2120108400 + tz.transition 2037, 11, :o1, 2140668000 + tz.transition 2038, 3, :o2, 2152162800, 59171923, 24 + tz.transition 2038, 11, :o1, 2172722400, 9862939, 4 + tz.transition 2039, 3, :o2, 2183612400, 59180659, 24 + tz.transition 2039, 11, :o1, 2204172000, 9864395, 4 + tz.transition 2040, 3, :o2, 2215062000, 59189395, 24 + tz.transition 2040, 11, :o1, 2235621600, 9865851, 4 + tz.transition 2041, 3, :o2, 2246511600, 59198131, 24 + tz.transition 2041, 11, :o1, 2267071200, 9867307, 4 + tz.transition 2042, 3, :o2, 2277961200, 59206867, 24 + tz.transition 2042, 11, :o1, 2298520800, 9868763, 4 + tz.transition 2043, 3, :o2, 2309410800, 59215603, 24 + tz.transition 2043, 11, :o1, 2329970400, 9870219, 4 + tz.transition 2044, 3, :o2, 2341465200, 59224507, 24 + tz.transition 2044, 11, :o1, 2362024800, 9871703, 4 + tz.transition 2045, 3, :o2, 2372914800, 59233243, 24 + tz.transition 2045, 11, :o1, 2393474400, 9873159, 4 + tz.transition 2046, 3, :o2, 2404364400, 59241979, 24 + tz.transition 2046, 11, :o1, 2424924000, 9874615, 4 + tz.transition 2047, 3, :o2, 2435814000, 59250715, 24 + tz.transition 2047, 11, :o1, 2456373600, 9876071, 4 + tz.transition 2048, 3, :o2, 2467263600, 59259451, 24 + tz.transition 2048, 11, :o1, 2487823200, 9877527, 4 + tz.transition 2049, 3, :o2, 2499318000, 59268355, 24 + tz.transition 2049, 11, :o1, 2519877600, 9879011, 4 + tz.transition 2050, 3, :o2, 2530767600, 59277091, 24 + tz.transition 2050, 11, :o1, 2551327200, 9880467, 4 + tz.transition 2051, 3, :o2, 2562217200, 59285827, 24 + tz.transition 2051, 11, :o1, 2582776800, 9881923, 4 + tz.transition 2052, 3, :o2, 2593666800, 59294563, 24 + tz.transition 2052, 11, :o1, 2614226400, 9883379, 4 + tz.transition 2053, 3, :o2, 2625116400, 59303299, 24 + tz.transition 2053, 11, :o1, 2645676000, 9884835, 4 + tz.transition 2054, 3, :o2, 2656566000, 59312035, 24 + tz.transition 2054, 11, :o1, 2677125600, 9886291, 4 + tz.transition 2055, 3, :o2, 2688620400, 59320939, 24 + tz.transition 2055, 11, :o1, 2709180000, 9887775, 4 + tz.transition 2056, 3, :o2, 2720070000, 59329675, 24 + tz.transition 2056, 11, :o1, 2740629600, 9889231, 4 + tz.transition 2057, 3, :o2, 2751519600, 59338411, 24 + tz.transition 2057, 11, :o1, 2772079200, 9890687, 4 + tz.transition 2058, 3, :o2, 2782969200, 59347147, 24 + tz.transition 2058, 11, :o1, 2803528800, 9892143, 4 + tz.transition 2059, 3, :o2, 2814418800, 59355883, 24 + tz.transition 2059, 11, :o1, 2834978400, 9893599, 4 + tz.transition 2060, 3, :o2, 2846473200, 59364787, 24 + tz.transition 2060, 11, :o1, 2867032800, 9895083, 4 + tz.transition 2061, 3, :o2, 2877922800, 59373523, 24 + tz.transition 2061, 11, :o1, 2898482400, 9896539, 4 + tz.transition 2062, 3, :o2, 2909372400, 59382259, 24 + tz.transition 2062, 11, :o1, 2929932000, 9897995, 4 + tz.transition 2063, 3, :o2, 2940822000, 59390995, 24 + tz.transition 2063, 11, :o1, 2961381600, 9899451, 4 + tz.transition 2064, 3, :o2, 2972271600, 59399731, 24 + tz.transition 2064, 11, :o1, 2992831200, 9900907, 4 + tz.transition 2065, 3, :o2, 3003721200, 59408467, 24 + tz.transition 2065, 11, :o1, 3024280800, 9902363, 4 + tz.transition 2066, 3, :o2, 3035775600, 59417371, 24 + tz.transition 2066, 11, :o1, 3056335200, 9903847, 4 + tz.transition 2067, 3, :o2, 3067225200, 59426107, 24 + tz.transition 2067, 11, :o1, 3087784800, 9905303, 4 + tz.transition 2068, 3, :o2, 3098674800, 59434843, 24 + tz.transition 2068, 11, :o1, 3119234400, 9906759, 4 + tz.transition 2069, 3, :o2, 3130124400, 59443579, 24 + tz.transition 2069, 11, :o1, 3150684000, 9908215, 4 + tz.transition 2070, 3, :o2, 3161574000, 59452315, 24 + tz.transition 2070, 11, :o1, 3182133600, 9909671, 4 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb new file mode 100644 index 0000000..6acd078 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb @@ -0,0 +1,230 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Australia + module Melbourne + include TimezoneDefinition + + timezone 'Australia/Melbourne' do |tz| + tz.offset :o0, 34792, 0, :LMT + tz.offset :o1, 36000, 0, :AEST + tz.offset :o2, 36000, 3600, :AEDT + + tz.transition 1895, 1, :o1, -2364111592, 26062831051, 10800 + tz.transition 1916, 12, :o2, -1672567140, 3486569881, 1440 + tz.transition 1917, 3, :o1, -1665392400, 19370497, 8 + tz.transition 1941, 12, :o2, -883641600, 14582161, 6 + tz.transition 1942, 3, :o1, -876128400, 19443577, 8 + tz.transition 1942, 9, :o2, -860400000, 14583775, 6 + tz.transition 1943, 3, :o1, -844678800, 19446489, 8 + tz.transition 1943, 10, :o2, -828345600, 14586001, 6 + tz.transition 1944, 3, :o1, -813229200, 19449401, 8 + tz.transition 1971, 10, :o2, 57686400 + tz.transition 1972, 2, :o1, 67968000 + tz.transition 1972, 10, :o2, 89136000 + tz.transition 1973, 3, :o1, 100022400 + tz.transition 1973, 10, :o2, 120585600 + tz.transition 1974, 3, :o1, 131472000 + tz.transition 1974, 10, :o2, 152035200 + tz.transition 1975, 3, :o1, 162921600 + tz.transition 1975, 10, :o2, 183484800 + tz.transition 1976, 3, :o1, 194976000 + tz.transition 1976, 10, :o2, 215539200 + tz.transition 1977, 3, :o1, 226425600 + tz.transition 1977, 10, :o2, 246988800 + tz.transition 1978, 3, :o1, 257875200 + tz.transition 1978, 10, :o2, 278438400 + tz.transition 1979, 3, :o1, 289324800 + tz.transition 1979, 10, :o2, 309888000 + tz.transition 1980, 3, :o1, 320774400 + tz.transition 1980, 10, :o2, 341337600 + tz.transition 1981, 2, :o1, 352224000 + tz.transition 1981, 10, :o2, 372787200 + tz.transition 1982, 3, :o1, 384278400 + tz.transition 1982, 10, :o2, 404841600 + tz.transition 1983, 3, :o1, 415728000 + tz.transition 1983, 10, :o2, 436291200 + tz.transition 1984, 3, :o1, 447177600 + tz.transition 1984, 10, :o2, 467740800 + tz.transition 1985, 3, :o1, 478627200 + tz.transition 1985, 10, :o2, 499190400 + tz.transition 1986, 3, :o1, 511286400 + tz.transition 1986, 10, :o2, 530035200 + tz.transition 1987, 3, :o1, 542736000 + tz.transition 1987, 10, :o2, 561484800 + tz.transition 1988, 3, :o1, 574790400 + tz.transition 1988, 10, :o2, 594144000 + tz.transition 1989, 3, :o1, 606240000 + tz.transition 1989, 10, :o2, 625593600 + tz.transition 1990, 3, :o1, 637689600 + tz.transition 1990, 10, :o2, 657043200 + tz.transition 1991, 3, :o1, 667929600 + tz.transition 1991, 10, :o2, 688492800 + tz.transition 1992, 2, :o1, 699379200 + tz.transition 1992, 10, :o2, 719942400 + tz.transition 1993, 3, :o1, 731433600 + tz.transition 1993, 10, :o2, 751996800 + tz.transition 1994, 3, :o1, 762883200 + tz.transition 1994, 10, :o2, 783446400 + tz.transition 1995, 3, :o1, 796147200 + tz.transition 1995, 10, :o2, 814896000 + tz.transition 1996, 3, :o1, 828201600 + tz.transition 1996, 10, :o2, 846345600 + tz.transition 1997, 3, :o1, 859651200 + tz.transition 1997, 10, :o2, 877795200 + tz.transition 1998, 3, :o1, 891100800 + tz.transition 1998, 10, :o2, 909244800 + tz.transition 1999, 3, :o1, 922550400 + tz.transition 1999, 10, :o2, 941299200 + tz.transition 2000, 3, :o1, 954000000 + tz.transition 2000, 8, :o2, 967305600 + tz.transition 2001, 3, :o1, 985449600 + tz.transition 2001, 10, :o2, 1004198400 + tz.transition 2002, 3, :o1, 1017504000 + tz.transition 2002, 10, :o2, 1035648000 + tz.transition 2003, 3, :o1, 1048953600 + tz.transition 2003, 10, :o2, 1067097600 + tz.transition 2004, 3, :o1, 1080403200 + tz.transition 2004, 10, :o2, 1099152000 + tz.transition 2005, 3, :o1, 1111852800 + tz.transition 2005, 10, :o2, 1130601600 + tz.transition 2006, 4, :o1, 1143907200 + tz.transition 2006, 10, :o2, 1162051200 + tz.transition 2007, 3, :o1, 1174752000 + tz.transition 2007, 10, :o2, 1193500800 + tz.transition 2008, 4, :o1, 1207411200 + tz.transition 2008, 10, :o2, 1223136000 + tz.transition 2009, 4, :o1, 1238860800 + tz.transition 2009, 10, :o2, 1254585600 + tz.transition 2010, 4, :o1, 1270310400 + tz.transition 2010, 10, :o2, 1286035200 + tz.transition 2011, 4, :o1, 1301760000 + tz.transition 2011, 10, :o2, 1317484800 + tz.transition 2012, 3, :o1, 1333209600 + tz.transition 2012, 10, :o2, 1349539200 + tz.transition 2013, 4, :o1, 1365264000 + tz.transition 2013, 10, :o2, 1380988800 + tz.transition 2014, 4, :o1, 1396713600 + tz.transition 2014, 10, :o2, 1412438400 + tz.transition 2015, 4, :o1, 1428163200 + tz.transition 2015, 10, :o2, 1443888000 + tz.transition 2016, 4, :o1, 1459612800 + tz.transition 2016, 10, :o2, 1475337600 + tz.transition 2017, 4, :o1, 1491062400 + tz.transition 2017, 9, :o2, 1506787200 + tz.transition 2018, 3, :o1, 1522512000 + tz.transition 2018, 10, :o2, 1538841600 + tz.transition 2019, 4, :o1, 1554566400 + tz.transition 2019, 10, :o2, 1570291200 + tz.transition 2020, 4, :o1, 1586016000 + tz.transition 2020, 10, :o2, 1601740800 + tz.transition 2021, 4, :o1, 1617465600 + tz.transition 2021, 10, :o2, 1633190400 + tz.transition 2022, 4, :o1, 1648915200 + tz.transition 2022, 10, :o2, 1664640000 + tz.transition 2023, 4, :o1, 1680364800 + tz.transition 2023, 9, :o2, 1696089600 + tz.transition 2024, 4, :o1, 1712419200 + tz.transition 2024, 10, :o2, 1728144000 + tz.transition 2025, 4, :o1, 1743868800 + tz.transition 2025, 10, :o2, 1759593600 + tz.transition 2026, 4, :o1, 1775318400 + tz.transition 2026, 10, :o2, 1791043200 + tz.transition 2027, 4, :o1, 1806768000 + tz.transition 2027, 10, :o2, 1822492800 + tz.transition 2028, 4, :o1, 1838217600 + tz.transition 2028, 9, :o2, 1853942400 + tz.transition 2029, 3, :o1, 1869667200 + tz.transition 2029, 10, :o2, 1885996800 + tz.transition 2030, 4, :o1, 1901721600 + tz.transition 2030, 10, :o2, 1917446400 + tz.transition 2031, 4, :o1, 1933171200 + tz.transition 2031, 10, :o2, 1948896000 + tz.transition 2032, 4, :o1, 1964620800 + tz.transition 2032, 10, :o2, 1980345600 + tz.transition 2033, 4, :o1, 1996070400 + tz.transition 2033, 10, :o2, 2011795200 + tz.transition 2034, 4, :o1, 2027520000 + tz.transition 2034, 9, :o2, 2043244800 + tz.transition 2035, 3, :o1, 2058969600 + tz.transition 2035, 10, :o2, 2075299200 + tz.transition 2036, 4, :o1, 2091024000 + tz.transition 2036, 10, :o2, 2106748800 + tz.transition 2037, 4, :o1, 2122473600 + tz.transition 2037, 10, :o2, 2138198400 + tz.transition 2038, 4, :o1, 2153923200, 14793103, 6 + tz.transition 2038, 10, :o2, 2169648000, 14794195, 6 + tz.transition 2039, 4, :o1, 2185372800, 14795287, 6 + tz.transition 2039, 10, :o2, 2201097600, 14796379, 6 + tz.transition 2040, 3, :o1, 2216822400, 14797471, 6 + tz.transition 2040, 10, :o2, 2233152000, 14798605, 6 + tz.transition 2041, 4, :o1, 2248876800, 14799697, 6 + tz.transition 2041, 10, :o2, 2264601600, 14800789, 6 + tz.transition 2042, 4, :o1, 2280326400, 14801881, 6 + tz.transition 2042, 10, :o2, 2296051200, 14802973, 6 + tz.transition 2043, 4, :o1, 2311776000, 14804065, 6 + tz.transition 2043, 10, :o2, 2327500800, 14805157, 6 + tz.transition 2044, 4, :o1, 2343225600, 14806249, 6 + tz.transition 2044, 10, :o2, 2358950400, 14807341, 6 + tz.transition 2045, 4, :o1, 2374675200, 14808433, 6 + tz.transition 2045, 9, :o2, 2390400000, 14809525, 6 + tz.transition 2046, 3, :o1, 2406124800, 14810617, 6 + tz.transition 2046, 10, :o2, 2422454400, 14811751, 6 + tz.transition 2047, 4, :o1, 2438179200, 14812843, 6 + tz.transition 2047, 10, :o2, 2453904000, 14813935, 6 + tz.transition 2048, 4, :o1, 2469628800, 14815027, 6 + tz.transition 2048, 10, :o2, 2485353600, 14816119, 6 + tz.transition 2049, 4, :o1, 2501078400, 14817211, 6 + tz.transition 2049, 10, :o2, 2516803200, 14818303, 6 + tz.transition 2050, 4, :o1, 2532528000, 14819395, 6 + tz.transition 2050, 10, :o2, 2548252800, 14820487, 6 + tz.transition 2051, 4, :o1, 2563977600, 14821579, 6 + tz.transition 2051, 9, :o2, 2579702400, 14822671, 6 + tz.transition 2052, 4, :o1, 2596032000, 14823805, 6 + tz.transition 2052, 10, :o2, 2611756800, 14824897, 6 + tz.transition 2053, 4, :o1, 2627481600, 14825989, 6 + tz.transition 2053, 10, :o2, 2643206400, 14827081, 6 + tz.transition 2054, 4, :o1, 2658931200, 14828173, 6 + tz.transition 2054, 10, :o2, 2674656000, 14829265, 6 + tz.transition 2055, 4, :o1, 2690380800, 14830357, 6 + tz.transition 2055, 10, :o2, 2706105600, 14831449, 6 + tz.transition 2056, 4, :o1, 2721830400, 14832541, 6 + tz.transition 2056, 9, :o2, 2737555200, 14833633, 6 + tz.transition 2057, 3, :o1, 2753280000, 14834725, 6 + tz.transition 2057, 10, :o2, 2769609600, 14835859, 6 + tz.transition 2058, 4, :o1, 2785334400, 14836951, 6 + tz.transition 2058, 10, :o2, 2801059200, 14838043, 6 + tz.transition 2059, 4, :o1, 2816784000, 14839135, 6 + tz.transition 2059, 10, :o2, 2832508800, 14840227, 6 + tz.transition 2060, 4, :o1, 2848233600, 14841319, 6 + tz.transition 2060, 10, :o2, 2863958400, 14842411, 6 + tz.transition 2061, 4, :o1, 2879683200, 14843503, 6 + tz.transition 2061, 10, :o2, 2895408000, 14844595, 6 + tz.transition 2062, 4, :o1, 2911132800, 14845687, 6 + tz.transition 2062, 9, :o2, 2926857600, 14846779, 6 + tz.transition 2063, 3, :o1, 2942582400, 14847871, 6 + tz.transition 2063, 10, :o2, 2958912000, 14849005, 6 + tz.transition 2064, 4, :o1, 2974636800, 14850097, 6 + tz.transition 2064, 10, :o2, 2990361600, 14851189, 6 + tz.transition 2065, 4, :o1, 3006086400, 14852281, 6 + tz.transition 2065, 10, :o2, 3021811200, 14853373, 6 + tz.transition 2066, 4, :o1, 3037536000, 14854465, 6 + tz.transition 2066, 10, :o2, 3053260800, 14855557, 6 + tz.transition 2067, 4, :o1, 3068985600, 14856649, 6 + tz.transition 2067, 10, :o2, 3084710400, 14857741, 6 + tz.transition 2068, 3, :o1, 3100435200, 14858833, 6 + tz.transition 2068, 10, :o2, 3116764800, 14859967, 6 + tz.transition 2069, 4, :o1, 3132489600, 14861059, 6 + tz.transition 2069, 10, :o2, 3148214400, 14862151, 6 + tz.transition 2070, 4, :o1, 3163939200, 14863243, 6 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb new file mode 100644 index 0000000..82808df --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb @@ -0,0 +1,19 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module EST + include TimezoneDefinition + + timezone 'EST' do |tz| + tz.offset :o0, -18000, 0, :EST + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb new file mode 100644 index 0000000..57ab6ed --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module GMT__m__1 + include TimezoneDefinition + + timezone 'Etc/GMT-1' do |tz| + tz.offset :o0, 3600, 0, :'+01' + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb new file mode 100644 index 0000000..dec19e5 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module GMT__p__1 + include TimezoneDefinition + + timezone 'Etc/GMT+1' do |tz| + tz.offset :o0, -3600, 0, :'-01' + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb new file mode 100644 index 0000000..48fbcf2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module UTC + include TimezoneDefinition + + timezone 'Etc/UTC' do |tz| + tz.offset :o0, 0, 0, :UTC + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb new file mode 100644 index 0000000..bedf7e4 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb @@ -0,0 +1,273 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Amsterdam + include TimezoneDefinition + + timezone 'Europe/Amsterdam' do |tz| + tz.offset :o0, 1172, 0, :LMT + tz.offset :o1, 1172, 0, :AMT + tz.offset :o2, 1172, 3600, :NST + tz.offset :o3, 1200, 3600, :'+0120' + tz.offset :o4, 1200, 0, :'+0020' + tz.offset :o5, 3600, 3600, :CEST + tz.offset :o6, 3600, 0, :CET + + tz.transition 1834, 12, :o1, -4260212372, 51651636907, 21600 + tz.transition 1916, 4, :o2, -1693700372, 52293264907, 21600 + tz.transition 1916, 9, :o1, -1680484772, 52296568807, 21600 + tz.transition 1917, 4, :o2, -1663453172, 52300826707, 21600 + tz.transition 1917, 9, :o1, -1650147572, 52304153107, 21600 + tz.transition 1918, 4, :o2, -1633213172, 52308386707, 21600 + tz.transition 1918, 9, :o1, -1617488372, 52312317907, 21600 + tz.transition 1919, 4, :o2, -1601158772, 52316400307, 21600 + tz.transition 1919, 9, :o1, -1586038772, 52320180307, 21600 + tz.transition 1920, 4, :o2, -1569709172, 52324262707, 21600 + tz.transition 1920, 9, :o1, -1554589172, 52328042707, 21600 + tz.transition 1921, 4, :o2, -1538259572, 52332125107, 21600 + tz.transition 1921, 9, :o1, -1523139572, 52335905107, 21600 + tz.transition 1922, 3, :o2, -1507501172, 52339814707, 21600 + tz.transition 1922, 10, :o1, -1490566772, 52344048307, 21600 + tz.transition 1923, 6, :o2, -1470176372, 52349145907, 21600 + tz.transition 1923, 10, :o1, -1459117172, 52351910707, 21600 + tz.transition 1924, 3, :o2, -1443997172, 52355690707, 21600 + tz.transition 1924, 10, :o1, -1427667572, 52359773107, 21600 + tz.transition 1925, 6, :o2, -1406672372, 52365021907, 21600 + tz.transition 1925, 10, :o1, -1396217972, 52367635507, 21600 + tz.transition 1926, 5, :o2, -1376950772, 52372452307, 21600 + tz.transition 1926, 10, :o1, -1364768372, 52375497907, 21600 + tz.transition 1927, 5, :o2, -1345414772, 52380336307, 21600 + tz.transition 1927, 10, :o1, -1333318772, 52383360307, 21600 + tz.transition 1928, 5, :o2, -1313792372, 52388241907, 21600 + tz.transition 1928, 10, :o1, -1301264372, 52391373907, 21600 + tz.transition 1929, 5, :o2, -1282256372, 52396125907, 21600 + tz.transition 1929, 10, :o1, -1269814772, 52399236307, 21600 + tz.transition 1930, 5, :o2, -1250720372, 52404009907, 21600 + tz.transition 1930, 10, :o1, -1238365172, 52407098707, 21600 + tz.transition 1931, 5, :o2, -1219184372, 52411893907, 21600 + tz.transition 1931, 10, :o1, -1206915572, 52414961107, 21600 + tz.transition 1932, 5, :o2, -1186957172, 52419950707, 21600 + tz.transition 1932, 10, :o1, -1175465972, 52422823507, 21600 + tz.transition 1933, 5, :o2, -1156025972, 52427683507, 21600 + tz.transition 1933, 10, :o1, -1143411572, 52430837107, 21600 + tz.transition 1934, 5, :o2, -1124489972, 52435567507, 21600 + tz.transition 1934, 10, :o1, -1111961972, 52438699507, 21600 + tz.transition 1935, 5, :o2, -1092953972, 52443451507, 21600 + tz.transition 1935, 10, :o1, -1080512372, 52446561907, 21600 + tz.transition 1936, 5, :o2, -1061331572, 52451357107, 21600 + tz.transition 1936, 10, :o1, -1049062772, 52454424307, 21600 + tz.transition 1937, 5, :o2, -1029190772, 52459392307, 21600 + tz.transition 1937, 6, :o3, -1025745572, 52460253607, 21600 + tz.transition 1937, 10, :o4, -1017613200, 174874289, 72 + tz.transition 1938, 5, :o3, -998259600, 174890417, 72 + tz.transition 1938, 10, :o4, -986163600, 174900497, 72 + tz.transition 1939, 5, :o3, -966723600, 174916697, 72 + tz.transition 1939, 10, :o4, -954109200, 174927209, 72 + tz.transition 1940, 5, :o5, -935022000, 174943115, 72 + tz.transition 1942, 11, :o6, -857257200, 58335973, 24 + tz.transition 1943, 3, :o5, -844556400, 58339501, 24 + tz.transition 1943, 10, :o6, -828226800, 58344037, 24 + tz.transition 1944, 4, :o5, -812502000, 58348405, 24 + tz.transition 1944, 10, :o6, -796777200, 58352773, 24 + tz.transition 1945, 4, :o5, -781052400, 58357141, 24 + tz.transition 1945, 9, :o6, -766623600, 58361149, 24 + tz.transition 1977, 4, :o5, 228877200 + tz.transition 1977, 9, :o6, 243997200 + tz.transition 1978, 4, :o5, 260326800 + tz.transition 1978, 10, :o6, 276051600 + tz.transition 1979, 4, :o5, 291776400 + tz.transition 1979, 9, :o6, 307501200 + tz.transition 1980, 4, :o5, 323830800 + tz.transition 1980, 9, :o6, 338950800 + tz.transition 1981, 3, :o5, 354675600 + tz.transition 1981, 9, :o6, 370400400 + tz.transition 1982, 3, :o5, 386125200 + tz.transition 1982, 9, :o6, 401850000 + tz.transition 1983, 3, :o5, 417574800 + tz.transition 1983, 9, :o6, 433299600 + tz.transition 1984, 3, :o5, 449024400 + tz.transition 1984, 9, :o6, 465354000 + tz.transition 1985, 3, :o5, 481078800 + tz.transition 1985, 9, :o6, 496803600 + tz.transition 1986, 3, :o5, 512528400 + tz.transition 1986, 9, :o6, 528253200 + tz.transition 1987, 3, :o5, 543978000 + tz.transition 1987, 9, :o6, 559702800 + tz.transition 1988, 3, :o5, 575427600 + tz.transition 1988, 9, :o6, 591152400 + tz.transition 1989, 3, :o5, 606877200 + tz.transition 1989, 9, :o6, 622602000 + tz.transition 1990, 3, :o5, 638326800 + tz.transition 1990, 9, :o6, 654656400 + tz.transition 1991, 3, :o5, 670381200 + tz.transition 1991, 9, :o6, 686106000 + tz.transition 1992, 3, :o5, 701830800 + tz.transition 1992, 9, :o6, 717555600 + tz.transition 1993, 3, :o5, 733280400 + tz.transition 1993, 9, :o6, 749005200 + tz.transition 1994, 3, :o5, 764730000 + tz.transition 1994, 9, :o6, 780454800 + tz.transition 1995, 3, :o5, 796179600 + tz.transition 1995, 9, :o6, 811904400 + tz.transition 1996, 3, :o5, 828234000 + tz.transition 1996, 10, :o6, 846378000 + tz.transition 1997, 3, :o5, 859683600 + tz.transition 1997, 10, :o6, 877827600 + tz.transition 1998, 3, :o5, 891133200 + tz.transition 1998, 10, :o6, 909277200 + tz.transition 1999, 3, :o5, 922582800 + tz.transition 1999, 10, :o6, 941331600 + tz.transition 2000, 3, :o5, 954032400 + tz.transition 2000, 10, :o6, 972781200 + tz.transition 2001, 3, :o5, 985482000 + tz.transition 2001, 10, :o6, 1004230800 + tz.transition 2002, 3, :o5, 1017536400 + tz.transition 2002, 10, :o6, 1035680400 + tz.transition 2003, 3, :o5, 1048986000 + tz.transition 2003, 10, :o6, 1067130000 + tz.transition 2004, 3, :o5, 1080435600 + tz.transition 2004, 10, :o6, 1099184400 + tz.transition 2005, 3, :o5, 1111885200 + tz.transition 2005, 10, :o6, 1130634000 + tz.transition 2006, 3, :o5, 1143334800 + tz.transition 2006, 10, :o6, 1162083600 + tz.transition 2007, 3, :o5, 1174784400 + tz.transition 2007, 10, :o6, 1193533200 + tz.transition 2008, 3, :o5, 1206838800 + tz.transition 2008, 10, :o6, 1224982800 + tz.transition 2009, 3, :o5, 1238288400 + tz.transition 2009, 10, :o6, 1256432400 + tz.transition 2010, 3, :o5, 1269738000 + tz.transition 2010, 10, :o6, 1288486800 + tz.transition 2011, 3, :o5, 1301187600 + tz.transition 2011, 10, :o6, 1319936400 + tz.transition 2012, 3, :o5, 1332637200 + tz.transition 2012, 10, :o6, 1351386000 + tz.transition 2013, 3, :o5, 1364691600 + tz.transition 2013, 10, :o6, 1382835600 + tz.transition 2014, 3, :o5, 1396141200 + tz.transition 2014, 10, :o6, 1414285200 + tz.transition 2015, 3, :o5, 1427590800 + tz.transition 2015, 10, :o6, 1445734800 + tz.transition 2016, 3, :o5, 1459040400 + tz.transition 2016, 10, :o6, 1477789200 + tz.transition 2017, 3, :o5, 1490490000 + tz.transition 2017, 10, :o6, 1509238800 + tz.transition 2018, 3, :o5, 1521939600 + tz.transition 2018, 10, :o6, 1540688400 + tz.transition 2019, 3, :o5, 1553994000 + tz.transition 2019, 10, :o6, 1572138000 + tz.transition 2020, 3, :o5, 1585443600 + tz.transition 2020, 10, :o6, 1603587600 + tz.transition 2021, 3, :o5, 1616893200 + tz.transition 2021, 10, :o6, 1635642000 + tz.transition 2022, 3, :o5, 1648342800 + tz.transition 2022, 10, :o6, 1667091600 + tz.transition 2023, 3, :o5, 1679792400 + tz.transition 2023, 10, :o6, 1698541200 + tz.transition 2024, 3, :o5, 1711846800 + tz.transition 2024, 10, :o6, 1729990800 + tz.transition 2025, 3, :o5, 1743296400 + tz.transition 2025, 10, :o6, 1761440400 + tz.transition 2026, 3, :o5, 1774746000 + tz.transition 2026, 10, :o6, 1792890000 + tz.transition 2027, 3, :o5, 1806195600 + tz.transition 2027, 10, :o6, 1824944400 + tz.transition 2028, 3, :o5, 1837645200 + tz.transition 2028, 10, :o6, 1856394000 + tz.transition 2029, 3, :o5, 1869094800 + tz.transition 2029, 10, :o6, 1887843600 + tz.transition 2030, 3, :o5, 1901149200 + tz.transition 2030, 10, :o6, 1919293200 + tz.transition 2031, 3, :o5, 1932598800 + tz.transition 2031, 10, :o6, 1950742800 + tz.transition 2032, 3, :o5, 1964048400 + tz.transition 2032, 10, :o6, 1982797200 + tz.transition 2033, 3, :o5, 1995498000 + tz.transition 2033, 10, :o6, 2014246800 + tz.transition 2034, 3, :o5, 2026947600 + tz.transition 2034, 10, :o6, 2045696400 + tz.transition 2035, 3, :o5, 2058397200 + tz.transition 2035, 10, :o6, 2077146000 + tz.transition 2036, 3, :o5, 2090451600 + tz.transition 2036, 10, :o6, 2108595600 + tz.transition 2037, 3, :o5, 2121901200 + tz.transition 2037, 10, :o6, 2140045200 + tz.transition 2038, 3, :o5, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o6, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o5, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o6, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o5, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o6, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o5, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o6, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o5, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o6, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o5, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o6, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o5, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o6, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o5, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o6, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o5, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o6, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o5, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o6, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o5, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o6, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o5, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o6, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o5, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o6, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o5, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o6, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o5, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o6, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o5, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o6, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o5, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o6, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o5, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o6, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o5, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o6, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o5, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o6, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o5, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o6, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o5, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o6, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o5, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o6, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o5, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o6, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o5, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o6, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o5, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o6, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o5, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o6, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o5, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o6, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o5, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o6, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o5, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o6, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o5, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o6, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o5, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o6, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o5, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o6, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb new file mode 100644 index 0000000..d51c5dc --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb @@ -0,0 +1,198 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Andorra + include TimezoneDefinition + + timezone 'Europe/Andorra' do |tz| + tz.offset :o0, 364, 0, :LMT + tz.offset :o1, 0, 0, :WET + tz.offset :o2, 3600, 0, :CET + tz.offset :o3, 3600, 3600, :CEST + + tz.transition 1900, 12, :o1, -2177453164, 52172326709, 21600 + tz.transition 1946, 9, :o2, -733881600, 4864187, 2 + tz.transition 1985, 3, :o3, 481078800 + tz.transition 1985, 9, :o2, 496803600 + tz.transition 1986, 3, :o3, 512528400 + tz.transition 1986, 9, :o2, 528253200 + tz.transition 1987, 3, :o3, 543978000 + tz.transition 1987, 9, :o2, 559702800 + tz.transition 1988, 3, :o3, 575427600 + tz.transition 1988, 9, :o2, 591152400 + tz.transition 1989, 3, :o3, 606877200 + tz.transition 1989, 9, :o2, 622602000 + tz.transition 1990, 3, :o3, 638326800 + tz.transition 1990, 9, :o2, 654656400 + tz.transition 1991, 3, :o3, 670381200 + tz.transition 1991, 9, :o2, 686106000 + tz.transition 1992, 3, :o3, 701830800 + tz.transition 1992, 9, :o2, 717555600 + tz.transition 1993, 3, :o3, 733280400 + tz.transition 1993, 9, :o2, 749005200 + tz.transition 1994, 3, :o3, 764730000 + tz.transition 1994, 9, :o2, 780454800 + tz.transition 1995, 3, :o3, 796179600 + tz.transition 1995, 9, :o2, 811904400 + tz.transition 1996, 3, :o3, 828234000 + tz.transition 1996, 10, :o2, 846378000 + tz.transition 1997, 3, :o3, 859683600 + tz.transition 1997, 10, :o2, 877827600 + tz.transition 1998, 3, :o3, 891133200 + tz.transition 1998, 10, :o2, 909277200 + tz.transition 1999, 3, :o3, 922582800 + tz.transition 1999, 10, :o2, 941331600 + tz.transition 2000, 3, :o3, 954032400 + tz.transition 2000, 10, :o2, 972781200 + tz.transition 2001, 3, :o3, 985482000 + tz.transition 2001, 10, :o2, 1004230800 + tz.transition 2002, 3, :o3, 1017536400 + tz.transition 2002, 10, :o2, 1035680400 + tz.transition 2003, 3, :o3, 1048986000 + tz.transition 2003, 10, :o2, 1067130000 + tz.transition 2004, 3, :o3, 1080435600 + tz.transition 2004, 10, :o2, 1099184400 + tz.transition 2005, 3, :o3, 1111885200 + tz.transition 2005, 10, :o2, 1130634000 + tz.transition 2006, 3, :o3, 1143334800 + tz.transition 2006, 10, :o2, 1162083600 + tz.transition 2007, 3, :o3, 1174784400 + tz.transition 2007, 10, :o2, 1193533200 + tz.transition 2008, 3, :o3, 1206838800 + tz.transition 2008, 10, :o2, 1224982800 + tz.transition 2009, 3, :o3, 1238288400 + tz.transition 2009, 10, :o2, 1256432400 + tz.transition 2010, 3, :o3, 1269738000 + tz.transition 2010, 10, :o2, 1288486800 + tz.transition 2011, 3, :o3, 1301187600 + tz.transition 2011, 10, :o2, 1319936400 + tz.transition 2012, 3, :o3, 1332637200 + tz.transition 2012, 10, :o2, 1351386000 + tz.transition 2013, 3, :o3, 1364691600 + tz.transition 2013, 10, :o2, 1382835600 + tz.transition 2014, 3, :o3, 1396141200 + tz.transition 2014, 10, :o2, 1414285200 + tz.transition 2015, 3, :o3, 1427590800 + tz.transition 2015, 10, :o2, 1445734800 + tz.transition 2016, 3, :o3, 1459040400 + tz.transition 2016, 10, :o2, 1477789200 + tz.transition 2017, 3, :o3, 1490490000 + tz.transition 2017, 10, :o2, 1509238800 + tz.transition 2018, 3, :o3, 1521939600 + tz.transition 2018, 10, :o2, 1540688400 + tz.transition 2019, 3, :o3, 1553994000 + tz.transition 2019, 10, :o2, 1572138000 + tz.transition 2020, 3, :o3, 1585443600 + tz.transition 2020, 10, :o2, 1603587600 + tz.transition 2021, 3, :o3, 1616893200 + tz.transition 2021, 10, :o2, 1635642000 + tz.transition 2022, 3, :o3, 1648342800 + tz.transition 2022, 10, :o2, 1667091600 + tz.transition 2023, 3, :o3, 1679792400 + tz.transition 2023, 10, :o2, 1698541200 + tz.transition 2024, 3, :o3, 1711846800 + tz.transition 2024, 10, :o2, 1729990800 + tz.transition 2025, 3, :o3, 1743296400 + tz.transition 2025, 10, :o2, 1761440400 + tz.transition 2026, 3, :o3, 1774746000 + tz.transition 2026, 10, :o2, 1792890000 + tz.transition 2027, 3, :o3, 1806195600 + tz.transition 2027, 10, :o2, 1824944400 + tz.transition 2028, 3, :o3, 1837645200 + tz.transition 2028, 10, :o2, 1856394000 + tz.transition 2029, 3, :o3, 1869094800 + tz.transition 2029, 10, :o2, 1887843600 + tz.transition 2030, 3, :o3, 1901149200 + tz.transition 2030, 10, :o2, 1919293200 + tz.transition 2031, 3, :o3, 1932598800 + tz.transition 2031, 10, :o2, 1950742800 + tz.transition 2032, 3, :o3, 1964048400 + tz.transition 2032, 10, :o2, 1982797200 + tz.transition 2033, 3, :o3, 1995498000 + tz.transition 2033, 10, :o2, 2014246800 + tz.transition 2034, 3, :o3, 2026947600 + tz.transition 2034, 10, :o2, 2045696400 + tz.transition 2035, 3, :o3, 2058397200 + tz.transition 2035, 10, :o2, 2077146000 + tz.transition 2036, 3, :o3, 2090451600 + tz.transition 2036, 10, :o2, 2108595600 + tz.transition 2037, 3, :o3, 2121901200 + tz.transition 2037, 10, :o2, 2140045200 + tz.transition 2038, 3, :o3, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o2, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o3, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o2, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o3, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o2, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o3, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o2, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o3, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o2, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o3, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o2, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o3, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o2, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o3, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o2, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o3, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o2, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o3, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o2, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o3, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o2, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o3, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o2, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o3, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o2, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o3, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o2, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o3, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o2, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o3, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o2, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o3, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o2, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o3, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o2, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o3, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o2, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o3, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o2, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o3, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o2, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o3, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o2, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o3, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o2, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o3, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o2, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o3, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o2, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o3, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o2, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o3, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o2, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o3, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o2, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o3, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o2, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o3, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o2, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o3, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o2, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o3, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o2, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o3, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o2, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb new file mode 100644 index 0000000..942e008 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb @@ -0,0 +1,333 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module London + include TimezoneDefinition + + timezone 'Europe/London' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + tz.offset :o2, 0, 3600, :BST + tz.offset :o3, 0, 7200, :BDST + tz.offset :o4, 3600, 0, :BST + + tz.transition 1847, 12, :o1, -3852662325, 2760187969, 1152 + tz.transition 1916, 5, :o2, -1691964000, 29052055, 12 + tz.transition 1916, 10, :o1, -1680472800, 29053651, 12 + tz.transition 1917, 4, :o2, -1664143200, 29055919, 12 + tz.transition 1917, 9, :o1, -1650146400, 29057863, 12 + tz.transition 1918, 3, :o2, -1633903200, 29060119, 12 + tz.transition 1918, 9, :o1, -1617487200, 29062399, 12 + tz.transition 1919, 3, :o2, -1601848800, 29064571, 12 + tz.transition 1919, 9, :o1, -1586037600, 29066767, 12 + tz.transition 1920, 3, :o2, -1570399200, 29068939, 12 + tz.transition 1920, 10, :o1, -1552168800, 29071471, 12 + tz.transition 1921, 4, :o2, -1538344800, 29073391, 12 + tz.transition 1921, 10, :o1, -1522533600, 29075587, 12 + tz.transition 1922, 3, :o2, -1507500000, 29077675, 12 + tz.transition 1922, 10, :o1, -1490565600, 29080027, 12 + tz.transition 1923, 4, :o2, -1473631200, 29082379, 12 + tz.transition 1923, 9, :o1, -1460930400, 29084143, 12 + tz.transition 1924, 4, :o2, -1442786400, 29086663, 12 + tz.transition 1924, 9, :o1, -1428876000, 29088595, 12 + tz.transition 1925, 4, :o2, -1410732000, 29091115, 12 + tz.transition 1925, 10, :o1, -1396216800, 29093131, 12 + tz.transition 1926, 4, :o2, -1379282400, 29095483, 12 + tz.transition 1926, 10, :o1, -1364767200, 29097499, 12 + tz.transition 1927, 4, :o2, -1348437600, 29099767, 12 + tz.transition 1927, 10, :o1, -1333317600, 29101867, 12 + tz.transition 1928, 4, :o2, -1315778400, 29104303, 12 + tz.transition 1928, 10, :o1, -1301263200, 29106319, 12 + tz.transition 1929, 4, :o2, -1284328800, 29108671, 12 + tz.transition 1929, 10, :o1, -1269813600, 29110687, 12 + tz.transition 1930, 4, :o2, -1253484000, 29112955, 12 + tz.transition 1930, 10, :o1, -1238364000, 29115055, 12 + tz.transition 1931, 4, :o2, -1221429600, 29117407, 12 + tz.transition 1931, 10, :o1, -1206914400, 29119423, 12 + tz.transition 1932, 4, :o2, -1189980000, 29121775, 12 + tz.transition 1932, 10, :o1, -1175464800, 29123791, 12 + tz.transition 1933, 4, :o2, -1159135200, 29126059, 12 + tz.transition 1933, 10, :o1, -1143410400, 29128243, 12 + tz.transition 1934, 4, :o2, -1126476000, 29130595, 12 + tz.transition 1934, 10, :o1, -1111960800, 29132611, 12 + tz.transition 1935, 4, :o2, -1095631200, 29134879, 12 + tz.transition 1935, 10, :o1, -1080511200, 29136979, 12 + tz.transition 1936, 4, :o2, -1063576800, 29139331, 12 + tz.transition 1936, 10, :o1, -1049061600, 29141347, 12 + tz.transition 1937, 4, :o2, -1032127200, 29143699, 12 + tz.transition 1937, 10, :o1, -1017612000, 29145715, 12 + tz.transition 1938, 4, :o2, -1001282400, 29147983, 12 + tz.transition 1938, 10, :o1, -986162400, 29150083, 12 + tz.transition 1939, 4, :o2, -969228000, 29152435, 12 + tz.transition 1939, 11, :o1, -950479200, 29155039, 12 + tz.transition 1940, 2, :o2, -942012000, 29156215, 12 + tz.transition 1941, 5, :o3, -904518000, 58322845, 24 + tz.transition 1941, 8, :o2, -896050800, 58325197, 24 + tz.transition 1942, 4, :o3, -875487600, 58330909, 24 + tz.transition 1942, 8, :o2, -864601200, 58333933, 24 + tz.transition 1943, 4, :o3, -844038000, 58339645, 24 + tz.transition 1943, 8, :o2, -832546800, 58342837, 24 + tz.transition 1944, 4, :o3, -812588400, 58348381, 24 + tz.transition 1944, 9, :o2, -798073200, 58352413, 24 + tz.transition 1945, 4, :o3, -781052400, 58357141, 24 + tz.transition 1945, 7, :o2, -772066800, 58359637, 24 + tz.transition 1945, 10, :o1, -764805600, 29180827, 12 + tz.transition 1946, 4, :o2, -748476000, 29183095, 12 + tz.transition 1946, 10, :o1, -733356000, 29185195, 12 + tz.transition 1947, 3, :o2, -719445600, 29187127, 12 + tz.transition 1947, 4, :o3, -717030000, 58374925, 24 + tz.transition 1947, 8, :o2, -706748400, 58377781, 24 + tz.transition 1947, 11, :o1, -699487200, 29189899, 12 + tz.transition 1948, 3, :o2, -687996000, 29191495, 12 + tz.transition 1948, 10, :o1, -668037600, 29194267, 12 + tz.transition 1949, 4, :o2, -654732000, 29196115, 12 + tz.transition 1949, 10, :o1, -636588000, 29198635, 12 + tz.transition 1950, 4, :o2, -622072800, 29200651, 12 + tz.transition 1950, 10, :o1, -605743200, 29202919, 12 + tz.transition 1951, 4, :o2, -590623200, 29205019, 12 + tz.transition 1951, 10, :o1, -574293600, 29207287, 12 + tz.transition 1952, 4, :o2, -558568800, 29209471, 12 + tz.transition 1952, 10, :o1, -542239200, 29211739, 12 + tz.transition 1953, 4, :o2, -527119200, 29213839, 12 + tz.transition 1953, 10, :o1, -512604000, 29215855, 12 + tz.transition 1954, 4, :o2, -496274400, 29218123, 12 + tz.transition 1954, 10, :o1, -481154400, 29220223, 12 + tz.transition 1955, 4, :o2, -464220000, 29222575, 12 + tz.transition 1955, 10, :o1, -449704800, 29224591, 12 + tz.transition 1956, 4, :o2, -432165600, 29227027, 12 + tz.transition 1956, 10, :o1, -417650400, 29229043, 12 + tz.transition 1957, 4, :o2, -401320800, 29231311, 12 + tz.transition 1957, 10, :o1, -386200800, 29233411, 12 + tz.transition 1958, 4, :o2, -369266400, 29235763, 12 + tz.transition 1958, 10, :o1, -354751200, 29237779, 12 + tz.transition 1959, 4, :o2, -337816800, 29240131, 12 + tz.transition 1959, 10, :o1, -323301600, 29242147, 12 + tz.transition 1960, 4, :o2, -306972000, 29244415, 12 + tz.transition 1960, 10, :o1, -291852000, 29246515, 12 + tz.transition 1961, 3, :o2, -276732000, 29248615, 12 + tz.transition 1961, 10, :o1, -257983200, 29251219, 12 + tz.transition 1962, 3, :o2, -245282400, 29252983, 12 + tz.transition 1962, 10, :o1, -226533600, 29255587, 12 + tz.transition 1963, 3, :o2, -213228000, 29257435, 12 + tz.transition 1963, 10, :o1, -195084000, 29259955, 12 + tz.transition 1964, 3, :o2, -182383200, 29261719, 12 + tz.transition 1964, 10, :o1, -163634400, 29264323, 12 + tz.transition 1965, 3, :o2, -150933600, 29266087, 12 + tz.transition 1965, 10, :o1, -132184800, 29268691, 12 + tz.transition 1966, 3, :o2, -119484000, 29270455, 12 + tz.transition 1966, 10, :o1, -100735200, 29273059, 12 + tz.transition 1967, 3, :o2, -88034400, 29274823, 12 + tz.transition 1967, 10, :o1, -68680800, 29277511, 12 + tz.transition 1968, 2, :o2, -59004000, 29278855, 12 + tz.transition 1968, 10, :o4, -37242000, 58563755, 24 + tz.transition 1971, 10, :o1, 57722400 + tz.transition 1972, 3, :o2, 69818400 + tz.transition 1972, 10, :o1, 89172000 + tz.transition 1973, 3, :o2, 101268000 + tz.transition 1973, 10, :o1, 120621600 + tz.transition 1974, 3, :o2, 132717600 + tz.transition 1974, 10, :o1, 152071200 + tz.transition 1975, 3, :o2, 164167200 + tz.transition 1975, 10, :o1, 183520800 + tz.transition 1976, 3, :o2, 196221600 + tz.transition 1976, 10, :o1, 214970400 + tz.transition 1977, 3, :o2, 227671200 + tz.transition 1977, 10, :o1, 246420000 + tz.transition 1978, 3, :o2, 259120800 + tz.transition 1978, 10, :o1, 278474400 + tz.transition 1979, 3, :o2, 290570400 + tz.transition 1979, 10, :o1, 309924000 + tz.transition 1980, 3, :o2, 322020000 + tz.transition 1980, 10, :o1, 341373600 + tz.transition 1981, 3, :o2, 354675600 + tz.transition 1981, 10, :o1, 372819600 + tz.transition 1982, 3, :o2, 386125200 + tz.transition 1982, 10, :o1, 404269200 + tz.transition 1983, 3, :o2, 417574800 + tz.transition 1983, 10, :o1, 435718800 + tz.transition 1984, 3, :o2, 449024400 + tz.transition 1984, 10, :o1, 467773200 + tz.transition 1985, 3, :o2, 481078800 + tz.transition 1985, 10, :o1, 499222800 + tz.transition 1986, 3, :o2, 512528400 + tz.transition 1986, 10, :o1, 530672400 + tz.transition 1987, 3, :o2, 543978000 + tz.transition 1987, 10, :o1, 562122000 + tz.transition 1988, 3, :o2, 575427600 + tz.transition 1988, 10, :o1, 593571600 + tz.transition 1989, 3, :o2, 606877200 + tz.transition 1989, 10, :o1, 625626000 + tz.transition 1990, 3, :o2, 638326800 + tz.transition 1990, 10, :o1, 657075600 + tz.transition 1991, 3, :o2, 670381200 + tz.transition 1991, 10, :o1, 688525200 + tz.transition 1992, 3, :o2, 701830800 + tz.transition 1992, 10, :o1, 719974800 + tz.transition 1993, 3, :o2, 733280400 + tz.transition 1993, 10, :o1, 751424400 + tz.transition 1994, 3, :o2, 764730000 + tz.transition 1994, 10, :o1, 782874000 + tz.transition 1995, 3, :o2, 796179600 + tz.transition 1995, 10, :o1, 814323600 + tz.transition 1996, 3, :o2, 828234000 + tz.transition 1996, 10, :o1, 846378000 + tz.transition 1997, 3, :o2, 859683600 + tz.transition 1997, 10, :o1, 877827600 + tz.transition 1998, 3, :o2, 891133200 + tz.transition 1998, 10, :o1, 909277200 + tz.transition 1999, 3, :o2, 922582800 + tz.transition 1999, 10, :o1, 941331600 + tz.transition 2000, 3, :o2, 954032400 + tz.transition 2000, 10, :o1, 972781200 + tz.transition 2001, 3, :o2, 985482000 + tz.transition 2001, 10, :o1, 1004230800 + tz.transition 2002, 3, :o2, 1017536400 + tz.transition 2002, 10, :o1, 1035680400 + tz.transition 2003, 3, :o2, 1048986000 + tz.transition 2003, 10, :o1, 1067130000 + tz.transition 2004, 3, :o2, 1080435600 + tz.transition 2004, 10, :o1, 1099184400 + tz.transition 2005, 3, :o2, 1111885200 + tz.transition 2005, 10, :o1, 1130634000 + tz.transition 2006, 3, :o2, 1143334800 + tz.transition 2006, 10, :o1, 1162083600 + tz.transition 2007, 3, :o2, 1174784400 + tz.transition 2007, 10, :o1, 1193533200 + tz.transition 2008, 3, :o2, 1206838800 + tz.transition 2008, 10, :o1, 1224982800 + tz.transition 2009, 3, :o2, 1238288400 + tz.transition 2009, 10, :o1, 1256432400 + tz.transition 2010, 3, :o2, 1269738000 + tz.transition 2010, 10, :o1, 1288486800 + tz.transition 2011, 3, :o2, 1301187600 + tz.transition 2011, 10, :o1, 1319936400 + tz.transition 2012, 3, :o2, 1332637200 + tz.transition 2012, 10, :o1, 1351386000 + tz.transition 2013, 3, :o2, 1364691600 + tz.transition 2013, 10, :o1, 1382835600 + tz.transition 2014, 3, :o2, 1396141200 + tz.transition 2014, 10, :o1, 1414285200 + tz.transition 2015, 3, :o2, 1427590800 + tz.transition 2015, 10, :o1, 1445734800 + tz.transition 2016, 3, :o2, 1459040400 + tz.transition 2016, 10, :o1, 1477789200 + tz.transition 2017, 3, :o2, 1490490000 + tz.transition 2017, 10, :o1, 1509238800 + tz.transition 2018, 3, :o2, 1521939600 + tz.transition 2018, 10, :o1, 1540688400 + tz.transition 2019, 3, :o2, 1553994000 + tz.transition 2019, 10, :o1, 1572138000 + tz.transition 2020, 3, :o2, 1585443600 + tz.transition 2020, 10, :o1, 1603587600 + tz.transition 2021, 3, :o2, 1616893200 + tz.transition 2021, 10, :o1, 1635642000 + tz.transition 2022, 3, :o2, 1648342800 + tz.transition 2022, 10, :o1, 1667091600 + tz.transition 2023, 3, :o2, 1679792400 + tz.transition 2023, 10, :o1, 1698541200 + tz.transition 2024, 3, :o2, 1711846800 + tz.transition 2024, 10, :o1, 1729990800 + tz.transition 2025, 3, :o2, 1743296400 + tz.transition 2025, 10, :o1, 1761440400 + tz.transition 2026, 3, :o2, 1774746000 + tz.transition 2026, 10, :o1, 1792890000 + tz.transition 2027, 3, :o2, 1806195600 + tz.transition 2027, 10, :o1, 1824944400 + tz.transition 2028, 3, :o2, 1837645200 + tz.transition 2028, 10, :o1, 1856394000 + tz.transition 2029, 3, :o2, 1869094800 + tz.transition 2029, 10, :o1, 1887843600 + tz.transition 2030, 3, :o2, 1901149200 + tz.transition 2030, 10, :o1, 1919293200 + tz.transition 2031, 3, :o2, 1932598800 + tz.transition 2031, 10, :o1, 1950742800 + tz.transition 2032, 3, :o2, 1964048400 + tz.transition 2032, 10, :o1, 1982797200 + tz.transition 2033, 3, :o2, 1995498000 + tz.transition 2033, 10, :o1, 2014246800 + tz.transition 2034, 3, :o2, 2026947600 + tz.transition 2034, 10, :o1, 2045696400 + tz.transition 2035, 3, :o2, 2058397200 + tz.transition 2035, 10, :o1, 2077146000 + tz.transition 2036, 3, :o2, 2090451600 + tz.transition 2036, 10, :o1, 2108595600 + tz.transition 2037, 3, :o2, 2121901200 + tz.transition 2037, 10, :o1, 2140045200 + tz.transition 2038, 3, :o2, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o1, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o2, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o1, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o2, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o1, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o2, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o1, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o2, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o1, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o2, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o1, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o2, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o1, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o2, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o1, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o2, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o1, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o2, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o1, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o2, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o1, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o2, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o1, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o2, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o1, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o2, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o1, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o2, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o1, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o2, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o1, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o2, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o1, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o2, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o1, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o2, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o1, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o2, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o1, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o2, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o1, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o2, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o1, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o2, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o1, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o2, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o1, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o2, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o1, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o2, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o1, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o2, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o1, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o2, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o1, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o2, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o1, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o2, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o1, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o2, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o1, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o2, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o1, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o2, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o1, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb new file mode 100644 index 0000000..e16ba37 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb @@ -0,0 +1,277 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Paris + include TimezoneDefinition + + timezone 'Europe/Paris' do |tz| + tz.offset :o0, 561, 0, :LMT + tz.offset :o1, 561, 0, :PMT + tz.offset :o2, 0, 0, :WET + tz.offset :o3, 0, 3600, :WEST + tz.offset :o4, 3600, 3600, :CEST + tz.offset :o5, 3600, 0, :CET + tz.offset :o6, 0, 7200, :WEMT + + tz.transition 1891, 3, :o1, -2486592561, 69460055813, 28800 + tz.transition 1911, 3, :o2, -1855958961, 69670267013, 28800 + tz.transition 1916, 6, :o3, -1689814800, 58104707, 24 + tz.transition 1916, 10, :o2, -1680397200, 58107323, 24 + tz.transition 1917, 3, :o3, -1665363600, 58111499, 24 + tz.transition 1917, 10, :o2, -1648342800, 58116227, 24 + tz.transition 1918, 3, :o3, -1635123600, 58119899, 24 + tz.transition 1918, 10, :o2, -1616893200, 58124963, 24 + tz.transition 1919, 3, :o3, -1604278800, 58128467, 24 + tz.transition 1919, 10, :o2, -1585443600, 58133699, 24 + tz.transition 1920, 2, :o3, -1574038800, 58136867, 24 + tz.transition 1920, 10, :o2, -1552266000, 58142915, 24 + tz.transition 1921, 3, :o3, -1539997200, 58146323, 24 + tz.transition 1921, 10, :o2, -1520557200, 58151723, 24 + tz.transition 1922, 3, :o3, -1507510800, 58155347, 24 + tz.transition 1922, 10, :o2, -1490576400, 58160051, 24 + tz.transition 1923, 5, :o3, -1470618000, 58165595, 24 + tz.transition 1923, 10, :o2, -1459126800, 58168787, 24 + tz.transition 1924, 3, :o3, -1444006800, 58172987, 24 + tz.transition 1924, 10, :o2, -1427677200, 58177523, 24 + tz.transition 1925, 4, :o3, -1411952400, 58181891, 24 + tz.transition 1925, 10, :o2, -1396227600, 58186259, 24 + tz.transition 1926, 4, :o3, -1379293200, 58190963, 24 + tz.transition 1926, 10, :o2, -1364778000, 58194995, 24 + tz.transition 1927, 4, :o3, -1348448400, 58199531, 24 + tz.transition 1927, 10, :o2, -1333328400, 58203731, 24 + tz.transition 1928, 4, :o3, -1316394000, 58208435, 24 + tz.transition 1928, 10, :o2, -1301274000, 58212635, 24 + tz.transition 1929, 4, :o3, -1284339600, 58217339, 24 + tz.transition 1929, 10, :o2, -1269824400, 58221371, 24 + tz.transition 1930, 4, :o3, -1253494800, 58225907, 24 + tz.transition 1930, 10, :o2, -1238374800, 58230107, 24 + tz.transition 1931, 4, :o3, -1221440400, 58234811, 24 + tz.transition 1931, 10, :o2, -1206925200, 58238843, 24 + tz.transition 1932, 4, :o3, -1191200400, 58243211, 24 + tz.transition 1932, 10, :o2, -1175475600, 58247579, 24 + tz.transition 1933, 3, :o3, -1160355600, 58251779, 24 + tz.transition 1933, 10, :o2, -1143421200, 58256483, 24 + tz.transition 1934, 4, :o3, -1127696400, 58260851, 24 + tz.transition 1934, 10, :o2, -1111971600, 58265219, 24 + tz.transition 1935, 3, :o3, -1096851600, 58269419, 24 + tz.transition 1935, 10, :o2, -1080522000, 58273955, 24 + tz.transition 1936, 4, :o3, -1063587600, 58278659, 24 + tz.transition 1936, 10, :o2, -1049072400, 58282691, 24 + tz.transition 1937, 4, :o3, -1033347600, 58287059, 24 + tz.transition 1937, 10, :o2, -1017622800, 58291427, 24 + tz.transition 1938, 3, :o3, -1002502800, 58295627, 24 + tz.transition 1938, 10, :o2, -986173200, 58300163, 24 + tz.transition 1939, 4, :o3, -969238800, 58304867, 24 + tz.transition 1939, 11, :o2, -950490000, 58310075, 24 + tz.transition 1940, 2, :o3, -942012000, 29156215, 12 + tz.transition 1940, 6, :o4, -932436000, 29157545, 12 + tz.transition 1942, 11, :o5, -857257200, 58335973, 24 + tz.transition 1943, 3, :o4, -844556400, 58339501, 24 + tz.transition 1943, 10, :o5, -828226800, 58344037, 24 + tz.transition 1944, 4, :o4, -812502000, 58348405, 24 + tz.transition 1944, 8, :o6, -800071200, 29175929, 12 + tz.transition 1944, 10, :o3, -796266000, 58352915, 24 + tz.transition 1945, 4, :o6, -781052400, 58357141, 24 + tz.transition 1945, 9, :o5, -766623600, 58361149, 24 + tz.transition 1976, 3, :o4, 196819200 + tz.transition 1976, 9, :o5, 212540400 + tz.transition 1977, 4, :o4, 228877200 + tz.transition 1977, 9, :o5, 243997200 + tz.transition 1978, 4, :o4, 260326800 + tz.transition 1978, 10, :o5, 276051600 + tz.transition 1979, 4, :o4, 291776400 + tz.transition 1979, 9, :o5, 307501200 + tz.transition 1980, 4, :o4, 323830800 + tz.transition 1980, 9, :o5, 338950800 + tz.transition 1981, 3, :o4, 354675600 + tz.transition 1981, 9, :o5, 370400400 + tz.transition 1982, 3, :o4, 386125200 + tz.transition 1982, 9, :o5, 401850000 + tz.transition 1983, 3, :o4, 417574800 + tz.transition 1983, 9, :o5, 433299600 + tz.transition 1984, 3, :o4, 449024400 + tz.transition 1984, 9, :o5, 465354000 + tz.transition 1985, 3, :o4, 481078800 + tz.transition 1985, 9, :o5, 496803600 + tz.transition 1986, 3, :o4, 512528400 + tz.transition 1986, 9, :o5, 528253200 + tz.transition 1987, 3, :o4, 543978000 + tz.transition 1987, 9, :o5, 559702800 + tz.transition 1988, 3, :o4, 575427600 + tz.transition 1988, 9, :o5, 591152400 + tz.transition 1989, 3, :o4, 606877200 + tz.transition 1989, 9, :o5, 622602000 + tz.transition 1990, 3, :o4, 638326800 + tz.transition 1990, 9, :o5, 654656400 + tz.transition 1991, 3, :o4, 670381200 + tz.transition 1991, 9, :o5, 686106000 + tz.transition 1992, 3, :o4, 701830800 + tz.transition 1992, 9, :o5, 717555600 + tz.transition 1993, 3, :o4, 733280400 + tz.transition 1993, 9, :o5, 749005200 + tz.transition 1994, 3, :o4, 764730000 + tz.transition 1994, 9, :o5, 780454800 + tz.transition 1995, 3, :o4, 796179600 + tz.transition 1995, 9, :o5, 811904400 + tz.transition 1996, 3, :o4, 828234000 + tz.transition 1996, 10, :o5, 846378000 + tz.transition 1997, 3, :o4, 859683600 + tz.transition 1997, 10, :o5, 877827600 + tz.transition 1998, 3, :o4, 891133200 + tz.transition 1998, 10, :o5, 909277200 + tz.transition 1999, 3, :o4, 922582800 + tz.transition 1999, 10, :o5, 941331600 + tz.transition 2000, 3, :o4, 954032400 + tz.transition 2000, 10, :o5, 972781200 + tz.transition 2001, 3, :o4, 985482000 + tz.transition 2001, 10, :o5, 1004230800 + tz.transition 2002, 3, :o4, 1017536400 + tz.transition 2002, 10, :o5, 1035680400 + tz.transition 2003, 3, :o4, 1048986000 + tz.transition 2003, 10, :o5, 1067130000 + tz.transition 2004, 3, :o4, 1080435600 + tz.transition 2004, 10, :o5, 1099184400 + tz.transition 2005, 3, :o4, 1111885200 + tz.transition 2005, 10, :o5, 1130634000 + tz.transition 2006, 3, :o4, 1143334800 + tz.transition 2006, 10, :o5, 1162083600 + tz.transition 2007, 3, :o4, 1174784400 + tz.transition 2007, 10, :o5, 1193533200 + tz.transition 2008, 3, :o4, 1206838800 + tz.transition 2008, 10, :o5, 1224982800 + tz.transition 2009, 3, :o4, 1238288400 + tz.transition 2009, 10, :o5, 1256432400 + tz.transition 2010, 3, :o4, 1269738000 + tz.transition 2010, 10, :o5, 1288486800 + tz.transition 2011, 3, :o4, 1301187600 + tz.transition 2011, 10, :o5, 1319936400 + tz.transition 2012, 3, :o4, 1332637200 + tz.transition 2012, 10, :o5, 1351386000 + tz.transition 2013, 3, :o4, 1364691600 + tz.transition 2013, 10, :o5, 1382835600 + tz.transition 2014, 3, :o4, 1396141200 + tz.transition 2014, 10, :o5, 1414285200 + tz.transition 2015, 3, :o4, 1427590800 + tz.transition 2015, 10, :o5, 1445734800 + tz.transition 2016, 3, :o4, 1459040400 + tz.transition 2016, 10, :o5, 1477789200 + tz.transition 2017, 3, :o4, 1490490000 + tz.transition 2017, 10, :o5, 1509238800 + tz.transition 2018, 3, :o4, 1521939600 + tz.transition 2018, 10, :o5, 1540688400 + tz.transition 2019, 3, :o4, 1553994000 + tz.transition 2019, 10, :o5, 1572138000 + tz.transition 2020, 3, :o4, 1585443600 + tz.transition 2020, 10, :o5, 1603587600 + tz.transition 2021, 3, :o4, 1616893200 + tz.transition 2021, 10, :o5, 1635642000 + tz.transition 2022, 3, :o4, 1648342800 + tz.transition 2022, 10, :o5, 1667091600 + tz.transition 2023, 3, :o4, 1679792400 + tz.transition 2023, 10, :o5, 1698541200 + tz.transition 2024, 3, :o4, 1711846800 + tz.transition 2024, 10, :o5, 1729990800 + tz.transition 2025, 3, :o4, 1743296400 + tz.transition 2025, 10, :o5, 1761440400 + tz.transition 2026, 3, :o4, 1774746000 + tz.transition 2026, 10, :o5, 1792890000 + tz.transition 2027, 3, :o4, 1806195600 + tz.transition 2027, 10, :o5, 1824944400 + tz.transition 2028, 3, :o4, 1837645200 + tz.transition 2028, 10, :o5, 1856394000 + tz.transition 2029, 3, :o4, 1869094800 + tz.transition 2029, 10, :o5, 1887843600 + tz.transition 2030, 3, :o4, 1901149200 + tz.transition 2030, 10, :o5, 1919293200 + tz.transition 2031, 3, :o4, 1932598800 + tz.transition 2031, 10, :o5, 1950742800 + tz.transition 2032, 3, :o4, 1964048400 + tz.transition 2032, 10, :o5, 1982797200 + tz.transition 2033, 3, :o4, 1995498000 + tz.transition 2033, 10, :o5, 2014246800 + tz.transition 2034, 3, :o4, 2026947600 + tz.transition 2034, 10, :o5, 2045696400 + tz.transition 2035, 3, :o4, 2058397200 + tz.transition 2035, 10, :o5, 2077146000 + tz.transition 2036, 3, :o4, 2090451600 + tz.transition 2036, 10, :o5, 2108595600 + tz.transition 2037, 3, :o4, 2121901200 + tz.transition 2037, 10, :o5, 2140045200 + tz.transition 2038, 3, :o4, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o5, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o4, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o5, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o4, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o5, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o4, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o5, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o4, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o5, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o4, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o5, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o4, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o5, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o4, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o5, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o4, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o5, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o4, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o5, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o4, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o5, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o4, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o5, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o4, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o5, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o4, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o5, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o4, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o5, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o4, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o5, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o4, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o5, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o4, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o5, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o4, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o5, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o4, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o5, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o4, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o5, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o4, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o5, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o4, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o5, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o4, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o5, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o4, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o5, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o4, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o5, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o4, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o5, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o4, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o5, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o4, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o5, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o4, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o5, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o4, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o5, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o4, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o5, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o4, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o5, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb new file mode 100644 index 0000000..9db4208 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb @@ -0,0 +1,235 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Prague + include TimezoneDefinition + + timezone 'Europe/Prague' do |tz| + tz.offset :o0, 3464, 0, :LMT + tz.offset :o1, 3464, 0, :PMT + tz.offset :o2, 3600, 0, :CET + tz.offset :o3, 3600, 3600, :CEST + tz.offset :o4, 3600, -3600, :GMT + + tz.transition 1849, 12, :o1, -3786829064, 25884991367, 10800 + tz.transition 1891, 9, :o2, -2469401864, 26049669767, 10800 + tz.transition 1916, 4, :o3, -1693706400, 29051813, 12 + tz.transition 1916, 9, :o2, -1680483600, 58107299, 24 + tz.transition 1917, 4, :o3, -1663455600, 58112029, 24 + tz.transition 1917, 9, :o2, -1650150000, 58115725, 24 + tz.transition 1918, 4, :o3, -1632006000, 58120765, 24 + tz.transition 1918, 9, :o2, -1618700400, 58124461, 24 + tz.transition 1940, 4, :o3, -938905200, 58313293, 24 + tz.transition 1942, 11, :o2, -857257200, 58335973, 24 + tz.transition 1943, 3, :o3, -844556400, 58339501, 24 + tz.transition 1943, 10, :o2, -828226800, 58344037, 24 + tz.transition 1944, 4, :o3, -812502000, 58348405, 24 + tz.transition 1944, 10, :o2, -796777200, 58352773, 24 + tz.transition 1945, 4, :o3, -781052400, 58357141, 24 + tz.transition 1945, 10, :o2, -765327600, 58361509, 24 + tz.transition 1946, 5, :o3, -746578800, 58366717, 24 + tz.transition 1946, 10, :o2, -733359600, 58370389, 24 + tz.transition 1946, 12, :o4, -728517600, 29185867, 12 + tz.transition 1947, 2, :o2, -721260000, 29186875, 12 + tz.transition 1947, 4, :o3, -716425200, 58375093, 24 + tz.transition 1947, 10, :o2, -701910000, 58379125, 24 + tz.transition 1948, 4, :o3, -684975600, 58383829, 24 + tz.transition 1948, 10, :o2, -670460400, 58387861, 24 + tz.transition 1949, 4, :o3, -654217200, 58392373, 24 + tz.transition 1949, 10, :o2, -639010800, 58396597, 24 + tz.transition 1979, 4, :o3, 291776400 + tz.transition 1979, 9, :o2, 307501200 + tz.transition 1980, 4, :o3, 323830800 + tz.transition 1980, 9, :o2, 338950800 + tz.transition 1981, 3, :o3, 354675600 + tz.transition 1981, 9, :o2, 370400400 + tz.transition 1982, 3, :o3, 386125200 + tz.transition 1982, 9, :o2, 401850000 + tz.transition 1983, 3, :o3, 417574800 + tz.transition 1983, 9, :o2, 433299600 + tz.transition 1984, 3, :o3, 449024400 + tz.transition 1984, 9, :o2, 465354000 + tz.transition 1985, 3, :o3, 481078800 + tz.transition 1985, 9, :o2, 496803600 + tz.transition 1986, 3, :o3, 512528400 + tz.transition 1986, 9, :o2, 528253200 + tz.transition 1987, 3, :o3, 543978000 + tz.transition 1987, 9, :o2, 559702800 + tz.transition 1988, 3, :o3, 575427600 + tz.transition 1988, 9, :o2, 591152400 + tz.transition 1989, 3, :o3, 606877200 + tz.transition 1989, 9, :o2, 622602000 + tz.transition 1990, 3, :o3, 638326800 + tz.transition 1990, 9, :o2, 654656400 + tz.transition 1991, 3, :o3, 670381200 + tz.transition 1991, 9, :o2, 686106000 + tz.transition 1992, 3, :o3, 701830800 + tz.transition 1992, 9, :o2, 717555600 + tz.transition 1993, 3, :o3, 733280400 + tz.transition 1993, 9, :o2, 749005200 + tz.transition 1994, 3, :o3, 764730000 + tz.transition 1994, 9, :o2, 780454800 + tz.transition 1995, 3, :o3, 796179600 + tz.transition 1995, 9, :o2, 811904400 + tz.transition 1996, 3, :o3, 828234000 + tz.transition 1996, 10, :o2, 846378000 + tz.transition 1997, 3, :o3, 859683600 + tz.transition 1997, 10, :o2, 877827600 + tz.transition 1998, 3, :o3, 891133200 + tz.transition 1998, 10, :o2, 909277200 + tz.transition 1999, 3, :o3, 922582800 + tz.transition 1999, 10, :o2, 941331600 + tz.transition 2000, 3, :o3, 954032400 + tz.transition 2000, 10, :o2, 972781200 + tz.transition 2001, 3, :o3, 985482000 + tz.transition 2001, 10, :o2, 1004230800 + tz.transition 2002, 3, :o3, 1017536400 + tz.transition 2002, 10, :o2, 1035680400 + tz.transition 2003, 3, :o3, 1048986000 + tz.transition 2003, 10, :o2, 1067130000 + tz.transition 2004, 3, :o3, 1080435600 + tz.transition 2004, 10, :o2, 1099184400 + tz.transition 2005, 3, :o3, 1111885200 + tz.transition 2005, 10, :o2, 1130634000 + tz.transition 2006, 3, :o3, 1143334800 + tz.transition 2006, 10, :o2, 1162083600 + tz.transition 2007, 3, :o3, 1174784400 + tz.transition 2007, 10, :o2, 1193533200 + tz.transition 2008, 3, :o3, 1206838800 + tz.transition 2008, 10, :o2, 1224982800 + tz.transition 2009, 3, :o3, 1238288400 + tz.transition 2009, 10, :o2, 1256432400 + tz.transition 2010, 3, :o3, 1269738000 + tz.transition 2010, 10, :o2, 1288486800 + tz.transition 2011, 3, :o3, 1301187600 + tz.transition 2011, 10, :o2, 1319936400 + tz.transition 2012, 3, :o3, 1332637200 + tz.transition 2012, 10, :o2, 1351386000 + tz.transition 2013, 3, :o3, 1364691600 + tz.transition 2013, 10, :o2, 1382835600 + tz.transition 2014, 3, :o3, 1396141200 + tz.transition 2014, 10, :o2, 1414285200 + tz.transition 2015, 3, :o3, 1427590800 + tz.transition 2015, 10, :o2, 1445734800 + tz.transition 2016, 3, :o3, 1459040400 + tz.transition 2016, 10, :o2, 1477789200 + tz.transition 2017, 3, :o3, 1490490000 + tz.transition 2017, 10, :o2, 1509238800 + tz.transition 2018, 3, :o3, 1521939600 + tz.transition 2018, 10, :o2, 1540688400 + tz.transition 2019, 3, :o3, 1553994000 + tz.transition 2019, 10, :o2, 1572138000 + tz.transition 2020, 3, :o3, 1585443600 + tz.transition 2020, 10, :o2, 1603587600 + tz.transition 2021, 3, :o3, 1616893200 + tz.transition 2021, 10, :o2, 1635642000 + tz.transition 2022, 3, :o3, 1648342800 + tz.transition 2022, 10, :o2, 1667091600 + tz.transition 2023, 3, :o3, 1679792400 + tz.transition 2023, 10, :o2, 1698541200 + tz.transition 2024, 3, :o3, 1711846800 + tz.transition 2024, 10, :o2, 1729990800 + tz.transition 2025, 3, :o3, 1743296400 + tz.transition 2025, 10, :o2, 1761440400 + tz.transition 2026, 3, :o3, 1774746000 + tz.transition 2026, 10, :o2, 1792890000 + tz.transition 2027, 3, :o3, 1806195600 + tz.transition 2027, 10, :o2, 1824944400 + tz.transition 2028, 3, :o3, 1837645200 + tz.transition 2028, 10, :o2, 1856394000 + tz.transition 2029, 3, :o3, 1869094800 + tz.transition 2029, 10, :o2, 1887843600 + tz.transition 2030, 3, :o3, 1901149200 + tz.transition 2030, 10, :o2, 1919293200 + tz.transition 2031, 3, :o3, 1932598800 + tz.transition 2031, 10, :o2, 1950742800 + tz.transition 2032, 3, :o3, 1964048400 + tz.transition 2032, 10, :o2, 1982797200 + tz.transition 2033, 3, :o3, 1995498000 + tz.transition 2033, 10, :o2, 2014246800 + tz.transition 2034, 3, :o3, 2026947600 + tz.transition 2034, 10, :o2, 2045696400 + tz.transition 2035, 3, :o3, 2058397200 + tz.transition 2035, 10, :o2, 2077146000 + tz.transition 2036, 3, :o3, 2090451600 + tz.transition 2036, 10, :o2, 2108595600 + tz.transition 2037, 3, :o3, 2121901200 + tz.transition 2037, 10, :o2, 2140045200 + tz.transition 2038, 3, :o3, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o2, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o3, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o2, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o3, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o2, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o3, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o2, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o3, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o2, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o3, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o2, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o3, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o2, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o3, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o2, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o3, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o2, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o3, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o2, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o3, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o2, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o3, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o2, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o3, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o2, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o3, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o2, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o3, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o2, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o3, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o2, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o3, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o2, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o3, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o2, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o3, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o2, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o3, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o2, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o3, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o2, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o3, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o2, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o3, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o2, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o3, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o2, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o3, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o2, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o3, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o2, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o3, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o2, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o3, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o2, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o3, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o2, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o3, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o2, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o3, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o2, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o3, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o2, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o3, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o2, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb new file mode 100644 index 0000000..2fec70c --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb @@ -0,0 +1,16 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module UTC + include TimezoneDefinition + + linked_timezone 'UTC', 'Etc/UTC' + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb new file mode 100644 index 0000000..689e68b --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb @@ -0,0 +1,940 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Indexes + module Countries + include CountryIndexDefinition + + country 'AD', 'Andorra' do |c| + c.timezone 'Europe/Andorra', 85, 2, 91, 60 + end + country 'AE', 'United Arab Emirates' do |c| + c.timezone 'Asia/Dubai', 253, 10, 553, 10 + end + country 'AF', 'Afghanistan' do |c| + c.timezone 'Asia/Kabul', 2071, 60, 346, 5 + end + country 'AG', 'Antigua & Barbuda' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'AI', 'Anguilla' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'AL', 'Albania' do |c| + c.timezone 'Europe/Tirane', 124, 3, 119, 6 + end + country 'AM', 'Armenia' do |c| + c.timezone 'Asia/Yerevan', 2411, 60, 89, 2 + end + country 'AO', 'Angola' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'AQ', 'Antarctica' do |c| + c.timezone 'Antarctica/Casey', -3977, 60, 6631, 60, 'Casey' + c.timezone 'Antarctica/Davis', -823, 12, 2339, 30, 'Davis' + c.timezone 'Antarctica/DumontDUrville', -200, 3, 8401, 60, 'Dumont-d\'Urville' + c.timezone 'Antarctica/Mawson', -338, 5, 3773, 60, 'Mawson' + c.timezone 'Antarctica/Palmer', -324, 5, -641, 10, 'Palmer' + c.timezone 'Antarctica/Rothera', -2027, 30, -1022, 15, 'Rothera' + c.timezone 'Antarctica/Syowa', -124211, 1800, 3959, 100, 'Syowa' + c.timezone 'Antarctica/Troll', -259241, 3600, 507, 200, 'Troll' + c.timezone 'Antarctica/Vostok', -392, 5, 1069, 10, 'Vostok' + c.timezone 'Pacific/Auckland', -553, 15, 5243, 30, 'New Zealand time' + end + country 'AR', 'Argentina' do |c| + c.timezone 'America/Argentina/Buenos_Aires', -173, 5, -1169, 20, 'Buenos Aires (BA, CF)' + c.timezone 'America/Argentina/Cordoba', -157, 5, -3851, 60, 'Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)' + c.timezone 'America/Argentina/Salta', -1487, 60, -785, 12, 'Salta (SA, LP, NQ, RN)' + c.timezone 'America/Argentina/Jujuy', -1451, 60, -653, 10, 'Jujuy (JY)' + c.timezone 'America/Argentina/Tucuman', -1609, 60, -3913, 60, 'Tucumán (TM)' + c.timezone 'America/Argentina/Catamarca', -427, 15, -3947, 60, 'Catamarca (CT); Chubut (CH)' + c.timezone 'America/Argentina/La_Rioja', -883, 30, -1337, 20, 'La Rioja (LR)' + c.timezone 'America/Argentina/San_Juan', -473, 15, -4111, 60, 'San Juan (SJ)' + c.timezone 'America/Argentina/Mendoza', -1973, 60, -4129, 60, 'Mendoza (MZ)' + c.timezone 'America/Argentina/San_Luis', -1999, 60, -1327, 20, 'San Luis (SL)' + c.timezone 'America/Argentina/Rio_Gallegos', -1549, 30, -4153, 60, 'Santa Cruz (SC)' + c.timezone 'America/Argentina/Ushuaia', -274, 5, -683, 10, 'Tierra del Fuego (TF)' + end + country 'AS', 'Samoa (American)' do |c| + c.timezone 'Pacific/Pago_Pago', -214, 15, -1707, 10, 'Samoa, Midway' + end + country 'AT', 'Austria' do |c| + c.timezone 'Europe/Vienna', 2893, 60, 49, 3 + end + country 'AU', 'Australia' do |c| + c.timezone 'Australia/Lord_Howe', -631, 20, 1909, 12, 'Lord Howe Island' + c.timezone 'Antarctica/Macquarie', -109, 2, 3179, 20, 'Macquarie Island' + c.timezone 'Australia/Hobart', -2573, 60, 8839, 60, 'Tasmania (most areas)' + c.timezone 'Australia/Currie', -599, 15, 2158, 15, 'Tasmania (King Island)' + c.timezone 'Australia/Melbourne', -2269, 60, 4349, 30, 'Victoria' + c.timezone 'Australia/Sydney', -508, 15, 9073, 60, 'New South Wales (most areas)' + c.timezone 'Australia/Broken_Hill', -639, 20, 2829, 20, 'New South Wales (Yancowinna)' + c.timezone 'Australia/Brisbane', -412, 15, 4591, 30, 'Queensland (most areas)' + c.timezone 'Australia/Lindeman', -304, 15, 149, 1, 'Queensland (Whitsunday Islands)' + c.timezone 'Australia/Adelaide', -419, 12, 1663, 12, 'South Australia' + c.timezone 'Australia/Darwin', -187, 15, 785, 6, 'Northern Territory' + c.timezone 'Australia/Perth', -639, 20, 2317, 20, 'Western Australia (most areas)' + c.timezone 'Australia/Eucla', -1903, 60, 1933, 15, 'Western Australia (Eucla)' + end + country 'AW', 'Aruba' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'AX', 'Åland Islands' do |c| + c.timezone 'Europe/Helsinki', 361, 6, 749, 30 + end + country 'AZ', 'Azerbaijan' do |c| + c.timezone 'Asia/Baku', 2423, 60, 997, 20 + end + country 'BA', 'Bosnia & Herzegovina' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'BB', 'Barbados' do |c| + c.timezone 'America/Barbados', 131, 10, -3577, 60 + end + country 'BD', 'Bangladesh' do |c| + c.timezone 'Asia/Dhaka', 1423, 60, 1085, 12 + end + country 'BE', 'Belgium' do |c| + c.timezone 'Europe/Brussels', 305, 6, 13, 3 + end + country 'BF', 'Burkina Faso' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'BG', 'Bulgaria' do |c| + c.timezone 'Europe/Sofia', 2561, 60, 1399, 60 + end + country 'BH', 'Bahrain' do |c| + c.timezone 'Asia/Qatar', 1517, 60, 773, 15 + end + country 'BI', 'Burundi' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'BJ', 'Benin' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'BL', 'St Barthelemy' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'BM', 'Bermuda' do |c| + c.timezone 'Atlantic/Bermuda', 1937, 60, -1943, 30 + end + country 'BN', 'Brunei' do |c| + c.timezone 'Asia/Brunei', 74, 15, 1379, 12 + end + country 'BO', 'Bolivia' do |c| + c.timezone 'America/La_Paz', -33, 2, -1363, 20 + end + country 'BQ', 'Caribbean NL' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'BR', 'Brazil' do |c| + c.timezone 'America/Noronha', -77, 20, -389, 12, 'Atlantic islands' + c.timezone 'America/Belem', -29, 20, -2909, 60, 'Pará (east); Amapá' + c.timezone 'America/Fortaleza', -223, 60, -77, 2, 'Brazil (northeast: MA, PI, CE, RN, PB)' + c.timezone 'America/Recife', -161, 20, -349, 10, 'Pernambuco' + c.timezone 'America/Araguaina', -36, 5, -241, 5, 'Tocantins' + c.timezone 'America/Maceio', -29, 3, -2143, 60, 'Alagoas, Sergipe' + c.timezone 'America/Bahia', -779, 60, -2311, 60, 'Bahia' + c.timezone 'America/Sao_Paulo', -353, 15, -2797, 60, 'Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS)' + c.timezone 'America/Campo_Grande', -409, 20, -3277, 60, 'Mato Grosso do Sul' + c.timezone 'America/Cuiaba', -187, 12, -673, 12, 'Mato Grosso' + c.timezone 'America/Santarem', -73, 30, -823, 15, 'Pará (west)' + c.timezone 'America/Porto_Velho', -263, 30, -639, 10, 'Rondônia' + c.timezone 'America/Boa_Vista', 169, 60, -182, 3, 'Roraima' + c.timezone 'America/Manaus', -47, 15, -3601, 60, 'Amazonas (east)' + c.timezone 'America/Eirunepe', -20, 3, -1048, 15, 'Amazonas (west)' + c.timezone 'America/Rio_Branco', -299, 30, -339, 5, 'Acre' + end + country 'BS', 'Bahamas' do |c| + c.timezone 'America/Nassau', 301, 12, -1547, 20 + end + country 'BT', 'Bhutan' do |c| + c.timezone 'Asia/Thimphu', 412, 15, 1793, 20 + end + country 'BV', 'Bouvet Island' + country 'BW', 'Botswana' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'BY', 'Belarus' do |c| + c.timezone 'Europe/Minsk', 539, 10, 827, 30 + end + country 'BZ', 'Belize' do |c| + c.timezone 'America/Belize', 35, 2, -441, 5 + end + country 'CA', 'Canada' do |c| + c.timezone 'America/St_Johns', 1427, 30, -3163, 60, 'Newfoundland; Labrador (southeast)' + c.timezone 'America/Halifax', 893, 20, -318, 5, 'Atlantic - NS (most areas); PE' + c.timezone 'America/Glace_Bay', 231, 5, -1199, 20, 'Atlantic - NS (Cape Breton)' + c.timezone 'America/Moncton', 461, 10, -3887, 60, 'Atlantic - New Brunswick' + c.timezone 'America/Goose_Bay', 160, 3, -725, 12, 'Atlantic - Labrador (most areas)' + c.timezone 'America/Blanc-Sablon', 617, 12, -3427, 60, 'AST - QC (Lower North Shore)' + c.timezone 'America/Toronto', 873, 20, -4763, 60, 'Eastern - ON, QC (most areas)' + c.timezone 'America/Nipigon', 2941, 60, -1324, 15, 'Eastern - ON, QC (no DST 1967-73)' + c.timezone 'America/Thunder_Bay', 2903, 60, -357, 4, 'Eastern - ON (Thunder Bay)' + c.timezone 'America/Iqaluit', 956, 15, -1027, 15, 'Eastern - NU (most east areas)' + c.timezone 'America/Pangnirtung', 992, 15, -986, 15, 'Eastern - NU (Pangnirtung)' + c.timezone 'America/Atikokan', 175531, 3600, -54973, 600, 'EST - ON (Atikokan); NU (Coral H)' + c.timezone 'America/Winnipeg', 2993, 60, -1943, 20, 'Central - ON (west); Manitoba' + c.timezone 'America/Rainy_River', 2923, 60, -2837, 30, 'Central - ON (Rainy R, Ft Frances)' + c.timezone 'America/Resolute', 33613, 450, -22759, 240, 'Central - NU (Resolute)' + c.timezone 'America/Rankin_Inlet', 3769, 60, -331499, 3600, 'Central - NU (central)' + c.timezone 'America/Regina', 252, 5, -2093, 20, 'CST - SK (most areas)' + c.timezone 'America/Swift_Current', 3017, 60, -647, 6, 'CST - SK (midwest)' + c.timezone 'America/Edmonton', 1071, 20, -1702, 15, 'Mountain - AB; BC (E); SK (W)' + c.timezone 'America/Cambridge_Bay', 24881, 360, -37819, 360, 'Mountain - NU (west)' + c.timezone 'America/Yellowknife', 1249, 20, -2287, 20, 'Mountain - NT (central)' + c.timezone 'America/Inuvik', 246059, 3600, -8023, 60, 'Mountain - NT (west)' + c.timezone 'America/Creston', 491, 10, -6991, 60, 'MST - BC (Creston)' + c.timezone 'America/Dawson_Creek', 1793, 30, -3607, 30, 'MST - BC (Dawson Cr, Ft St John)' + c.timezone 'America/Fort_Nelson', 294, 5, -1227, 10, 'MST - BC (Ft Nelson)' + c.timezone 'America/Vancouver', 739, 15, -7387, 60, 'Pacific - BC (most areas)' + c.timezone 'America/Whitehorse', 3643, 60, -2701, 20, 'Pacific - Yukon (east)' + c.timezone 'America/Dawson', 961, 15, -1673, 12, 'Pacific - Yukon (west)' + end + country 'CC', 'Cocos (Keeling) Islands' do |c| + c.timezone 'Indian/Cocos', -73, 6, 1163, 12 + end + country 'CD', 'Congo (Dem. Rep.)' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CF', 'Central African Rep.' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CG', 'Congo (Rep.)' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CH', 'Switzerland' do |c| + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'CI', 'Côte d\'Ivoire' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'CK', 'Cook Islands' do |c| + c.timezone 'Pacific/Rarotonga', -637, 30, -4793, 30 + end + country 'CL', 'Chile' do |c| + c.timezone 'America/Santiago', -669, 20, -212, 3, 'Chile (most areas)' + c.timezone 'America/Punta_Arenas', -1063, 20, -851, 12, 'Region of Magallanes' + c.timezone 'Pacific/Easter', -543, 20, -3283, 30, 'Easter Island' + end + country 'CM', 'Cameroon' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CN', 'China' do |c| + c.timezone 'Asia/Shanghai', 937, 30, 1822, 15, 'Beijing Time' + c.timezone 'Asia/Urumqi', 219, 5, 1051, 12, 'Xinjiang Time' + end + country 'CO', 'Colombia' do |c| + c.timezone 'America/Bogota', 23, 5, -889, 12 + end + country 'CR', 'Costa Rica' do |c| + c.timezone 'America/Costa_Rica', 149, 15, -1009, 12 + end + country 'CU', 'Cuba' do |c| + c.timezone 'America/Havana', 347, 15, -2471, 30 + end + country 'CV', 'Cape Verde' do |c| + c.timezone 'Atlantic/Cape_Verde', 179, 12, -1411, 60 + end + country 'CW', 'Curaçao' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'CX', 'Christmas Island' do |c| + c.timezone 'Indian/Christmas', -125, 12, 6343, 60 + end + country 'CY', 'Cyprus' do |c| + c.timezone 'Asia/Nicosia', 211, 6, 1001, 30, 'Cyprus (most areas)' + c.timezone 'Asia/Famagusta', 2107, 60, 679, 20, 'Northern Cyprus' + end + country 'CZ', 'Czech Republic' do |c| + c.timezone 'Europe/Prague', 601, 12, 433, 30 + end + country 'DE', 'Germany' do |c| + c.timezone 'Europe/Berlin', 105, 2, 401, 30, 'Germany (most areas)' + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'DJ', 'Djibouti' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'DK', 'Denmark' do |c| + c.timezone 'Europe/Copenhagen', 167, 3, 151, 12 + end + country 'DM', 'Dominica' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'DO', 'Dominican Republic' do |c| + c.timezone 'America/Santo_Domingo', 277, 15, -699, 10 + end + country 'DZ', 'Algeria' do |c| + c.timezone 'Africa/Algiers', 2207, 60, 61, 20 + end + country 'EC', 'Ecuador' do |c| + c.timezone 'America/Guayaquil', -13, 6, -479, 6, 'Ecuador (mainland)' + c.timezone 'Pacific/Galapagos', -9, 10, -448, 5, 'Galápagos Islands' + end + country 'EE', 'Estonia' do |c| + c.timezone 'Europe/Tallinn', 713, 12, 99, 4 + end + country 'EG', 'Egypt' do |c| + c.timezone 'Africa/Cairo', 601, 20, 125, 4 + end + country 'EH', 'Western Sahara' do |c| + c.timezone 'Africa/El_Aaiun', 543, 20, -66, 5 + end + country 'ER', 'Eritrea' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'ES', 'Spain' do |c| + c.timezone 'Europe/Madrid', 202, 5, -221, 60, 'Spain (mainland)' + c.timezone 'Africa/Ceuta', 2153, 60, -319, 60, 'Ceuta, Melilla' + c.timezone 'Atlantic/Canary', 281, 10, -77, 5, 'Canary Islands' + end + country 'ET', 'Ethiopia' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'FI', 'Finland' do |c| + c.timezone 'Europe/Helsinki', 361, 6, 749, 30 + end + country 'FJ', 'Fiji' do |c| + c.timezone 'Pacific/Fiji', -272, 15, 2141, 12 + end + country 'FK', 'Falkland Islands' do |c| + c.timezone 'Atlantic/Stanley', -517, 10, -1157, 20 + end + country 'FM', 'Micronesia' do |c| + c.timezone 'Pacific/Chuuk', 89, 12, 9107, 60, 'Chuuk/Truk, Yap' + c.timezone 'Pacific/Pohnpei', 209, 30, 9493, 60, 'Pohnpei/Ponape' + c.timezone 'Pacific/Kosrae', 319, 60, 9779, 60, 'Kosrae' + end + country 'FO', 'Faroe Islands' do |c| + c.timezone 'Atlantic/Faroe', 3721, 60, -203, 30 + end + country 'FR', 'France' do |c| + c.timezone 'Europe/Paris', 733, 15, 7, 3 + end + country 'GA', 'Gabon' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'GB', 'Britain (UK)' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'GD', 'Grenada' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'GE', 'Georgia' do |c| + c.timezone 'Asia/Tbilisi', 2503, 60, 2689, 60 + end + country 'GF', 'French Guiana' do |c| + c.timezone 'America/Cayenne', 74, 15, -157, 3 + end + country 'GG', 'Guernsey' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'GH', 'Ghana' do |c| + c.timezone 'Africa/Accra', 111, 20, -13, 60 + end + country 'GI', 'Gibraltar' do |c| + c.timezone 'Europe/Gibraltar', 542, 15, -107, 20 + end + country 'GL', 'Greenland' do |c| + c.timezone 'America/Nuuk', 3851, 60, -776, 15, 'Greenland (most areas)' + c.timezone 'America/Danmarkshavn', 2303, 30, -56, 3, 'National Park (east coast)' + c.timezone 'America/Scoresbysund', 4229, 60, -659, 30, 'Scoresbysund/Ittoqqortoormiit' + c.timezone 'America/Thule', 2297, 30, -4127, 60, 'Thule/Pituffik' + end + country 'GM', 'Gambia' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'GN', 'Guinea' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'GP', 'Guadeloupe' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'GQ', 'Equatorial Guinea' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'GR', 'Greece' do |c| + c.timezone 'Europe/Athens', 1139, 30, 1423, 60 + end + country 'GS', 'South Georgia & the South Sandwich Islands' do |c| + c.timezone 'Atlantic/South_Georgia', -814, 15, -548, 15 + end + country 'GT', 'Guatemala' do |c| + c.timezone 'America/Guatemala', 439, 30, -5431, 60 + end + country 'GU', 'Guam' do |c| + c.timezone 'Pacific/Guam', 202, 15, 579, 4 + end + country 'GW', 'Guinea-Bissau' do |c| + c.timezone 'Africa/Bissau', 237, 20, -187, 12 + end + country 'GY', 'Guyana' do |c| + c.timezone 'America/Guyana', 34, 5, -349, 6 + end + country 'HK', 'Hong Kong' do |c| + c.timezone 'Asia/Hong_Kong', 1337, 60, 2283, 20 + end + country 'HM', 'Heard Island & McDonald Islands' + country 'HN', 'Honduras' do |c| + c.timezone 'America/Tegucigalpa', 141, 10, -5233, 60 + end + country 'HR', 'Croatia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'HT', 'Haiti' do |c| + c.timezone 'America/Port-au-Prince', 278, 15, -217, 3 + end + country 'HU', 'Hungary' do |c| + c.timezone 'Europe/Budapest', 95, 2, 229, 12 + end + country 'ID', 'Indonesia' do |c| + c.timezone 'Asia/Jakarta', -37, 6, 534, 5, 'Java, Sumatra' + c.timezone 'Asia/Pontianak', -1, 30, 328, 3, 'Borneo (west, central)' + c.timezone 'Asia/Makassar', -307, 60, 597, 5, 'Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)' + c.timezone 'Asia/Jayapura', -38, 15, 1407, 10, 'New Guinea (West Papua / Irian Jaya); Malukus/Moluccas' + end + country 'IE', 'Ireland' do |c| + c.timezone 'Europe/Dublin', 160, 3, -25, 4 + end + country 'IL', 'Israel' do |c| + c.timezone 'Asia/Jerusalem', 11441, 360, 63403, 1800 + end + country 'IM', 'Isle of Man' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'IN', 'India' do |c| + c.timezone 'Asia/Kolkata', 338, 15, 2651, 30 + end + country 'IO', 'British Indian Ocean Territory' do |c| + c.timezone 'Indian/Chagos', -22, 3, 869, 12 + end + country 'IQ', 'Iraq' do |c| + c.timezone 'Asia/Baghdad', 667, 20, 533, 12 + end + country 'IR', 'Iran' do |c| + c.timezone 'Asia/Tehran', 107, 3, 1543, 30 + end + country 'IS', 'Iceland' do |c| + c.timezone 'Atlantic/Reykjavik', 1283, 20, -437, 20 + end + country 'IT', 'Italy' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'JE', 'Jersey' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'JM', 'Jamaica' do |c| + c.timezone 'America/Jamaica', 12937, 720, -11519, 150 + end + country 'JO', 'Jordan' do |c| + c.timezone 'Asia/Amman', 639, 20, 539, 15 + end + country 'JP', 'Japan' do |c| + c.timezone 'Asia/Tokyo', 32089, 900, 503081, 3600 + end + country 'KE', 'Kenya' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'KG', 'Kyrgyzstan' do |c| + c.timezone 'Asia/Bishkek', 429, 10, 373, 5 + end + country 'KH', 'Cambodia' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'KI', 'Kiribati' do |c| + c.timezone 'Pacific/Tarawa', 17, 12, 173, 1, 'Gilbert Islands' + c.timezone 'Pacific/Enderbury', -47, 15, -2053, 12, 'Phoenix Islands' + c.timezone 'Pacific/Kiritimati', 28, 15, -472, 3, 'Line Islands' + end + country 'KM', 'Comoros' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'KN', 'St Kitts & Nevis' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'KP', 'Korea (North)' do |c| + c.timezone 'Asia/Pyongyang', 2341, 60, 503, 4 + end + country 'KR', 'Korea (South)' do |c| + c.timezone 'Asia/Seoul', 751, 20, 3809, 30 + end + country 'KW', 'Kuwait' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'KY', 'Cayman Islands' do |c| + c.timezone 'America/Panama', 269, 30, -1193, 15 + end + country 'KZ', 'Kazakhstan' do |c| + c.timezone 'Asia/Almaty', 173, 4, 1539, 20, 'Kazakhstan (most areas)' + c.timezone 'Asia/Qyzylorda', 224, 5, 982, 15, 'Qyzylorda/Kyzylorda/Kzyl-Orda' + c.timezone 'Asia/Qostanay', 266, 5, 3817, 60, 'Qostanay/Kostanay/Kustanay' + c.timezone 'Asia/Aqtobe', 3017, 60, 343, 6, 'Aqtöbe/Aktobe' + c.timezone 'Asia/Aqtau', 2671, 60, 754, 15, 'Mangghystaū/Mankistau' + c.timezone 'Asia/Atyrau', 2827, 60, 779, 15, 'Atyraū/Atirau/Gur\'yev' + c.timezone 'Asia/Oral', 3073, 60, 1027, 20, 'West Kazakhstan' + end + country 'LA', 'Laos' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'LB', 'Lebanon' do |c| + c.timezone 'Asia/Beirut', 2033, 60, 71, 2 + end + country 'LC', 'St Lucia' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'LI', 'Liechtenstein' do |c| + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'LK', 'Sri Lanka' do |c| + c.timezone 'Asia/Colombo', 104, 15, 1597, 20 + end + country 'LR', 'Liberia' do |c| + c.timezone 'Africa/Monrovia', 63, 10, -647, 60 + end + country 'LS', 'Lesotho' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'LT', 'Lithuania' do |c| + c.timezone 'Europe/Vilnius', 3281, 60, 1519, 60 + end + country 'LU', 'Luxembourg' do |c| + c.timezone 'Europe/Luxembourg', 248, 5, 123, 20 + end + country 'LV', 'Latvia' do |c| + c.timezone 'Europe/Riga', 1139, 20, 241, 10 + end + country 'LY', 'Libya' do |c| + c.timezone 'Africa/Tripoli', 329, 10, 791, 60 + end + country 'MA', 'Morocco' do |c| + c.timezone 'Africa/Casablanca', 673, 20, -91, 12 + end + country 'MC', 'Monaco' do |c| + c.timezone 'Europe/Monaco', 437, 10, 443, 60 + end + country 'MD', 'Moldova' do |c| + c.timezone 'Europe/Chisinau', 47, 1, 173, 6 + end + country 'ME', 'Montenegro' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'MF', 'St Martin (French)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'MG', 'Madagascar' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'MH', 'Marshall Islands' do |c| + c.timezone 'Pacific/Majuro', 143, 20, 856, 5, 'Marshall Islands (most areas)' + c.timezone 'Pacific/Kwajalein', 109, 12, 502, 3, 'Kwajalein' + end + country 'MK', 'North Macedonia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'ML', 'Mali' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'MM', 'Myanmar (Burma)' do |c| + c.timezone 'Asia/Yangon', 1007, 60, 577, 6 + end + country 'MN', 'Mongolia' do |c| + c.timezone 'Asia/Ulaanbaatar', 575, 12, 6413, 60, 'Mongolia (most areas)' + c.timezone 'Asia/Hovd', 2881, 60, 1833, 20, 'Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan' + c.timezone 'Asia/Choibalsan', 721, 15, 229, 2, 'Dornod, Sükhbaatar' + end + country 'MO', 'Macau' do |c| + c.timezone 'Asia/Macau', 7991, 360, 2725, 24 + end + country 'MP', 'Northern Mariana Islands' do |c| + c.timezone 'Pacific/Guam', 202, 15, 579, 4 + end + country 'MQ', 'Martinique' do |c| + c.timezone 'America/Martinique', 73, 5, -733, 12 + end + country 'MR', 'Mauritania' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'MS', 'Montserrat' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'MT', 'Malta' do |c| + c.timezone 'Europe/Malta', 359, 10, 871, 60 + end + country 'MU', 'Mauritius' do |c| + c.timezone 'Indian/Mauritius', -121, 6, 115, 2 + end + country 'MV', 'Maldives' do |c| + c.timezone 'Indian/Maldives', 25, 6, 147, 2 + end + country 'MW', 'Malawi' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'MX', 'Mexico' do |c| + c.timezone 'America/Mexico_City', 97, 5, -1983, 20, 'Central Time' + c.timezone 'America/Cancun', 253, 12, -2603, 30, 'Eastern Standard Time - Quintana Roo' + c.timezone 'America/Merida', 629, 30, -5377, 60, 'Central Time - Campeche, Yucatán' + c.timezone 'America/Monterrey', 77, 3, -6019, 60, 'Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas)' + c.timezone 'America/Matamoros', 155, 6, -195, 2, 'Central Time US - Coahuila, Nuevo León, Tamaulipas (US border)' + c.timezone 'America/Mazatlan', 1393, 60, -1277, 12, 'Mountain Time - Baja California Sur, Nayarit, Sinaloa' + c.timezone 'America/Chihuahua', 859, 30, -1273, 12, 'Mountain Time - Chihuahua (most areas)' + c.timezone 'America/Ojinaga', 887, 30, -1253, 12, 'Mountain Time US - Chihuahua (US border)' + c.timezone 'America/Hermosillo', 436, 15, -3329, 30, 'Mountain Standard Time - Sonora' + c.timezone 'America/Tijuana', 488, 15, -7021, 60, 'Pacific Time US - Baja California' + c.timezone 'America/Bahia_Banderas', 104, 5, -421, 4, 'Central Time - Bahía de Banderas' + end + country 'MY', 'Malaysia' do |c| + c.timezone 'Asia/Kuala_Lumpur', 19, 6, 1017, 10, 'Malaysia (peninsula)' + c.timezone 'Asia/Kuching', 31, 20, 331, 3, 'Sabah, Sarawak' + end + country 'MZ', 'Mozambique' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'NA', 'Namibia' do |c| + c.timezone 'Africa/Windhoek', -677, 30, 171, 10 + end + country 'NC', 'New Caledonia' do |c| + c.timezone 'Pacific/Noumea', -334, 15, 3329, 20 + end + country 'NE', 'Niger' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'NF', 'Norfolk Island' do |c| + c.timezone 'Pacific/Norfolk', -581, 20, 5039, 30 + end + country 'NG', 'Nigeria' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'NI', 'Nicaragua' do |c| + c.timezone 'America/Managua', 243, 20, -5177, 60 + end + country 'NL', 'Netherlands' do |c| + c.timezone 'Europe/Amsterdam', 1571, 30, 49, 10 + end + country 'NO', 'Norway' do |c| + c.timezone 'Europe/Oslo', 719, 12, 43, 4 + end + country 'NP', 'Nepal' do |c| + c.timezone 'Asia/Kathmandu', 1663, 60, 5119, 60 + end + country 'NR', 'Nauru' do |c| + c.timezone 'Pacific/Nauru', -31, 60, 2003, 12 + end + country 'NU', 'Niue' do |c| + c.timezone 'Pacific/Niue', -1141, 60, -2039, 12 + end + country 'NZ', 'New Zealand' do |c| + c.timezone 'Pacific/Auckland', -553, 15, 5243, 30, 'New Zealand time' + c.timezone 'Pacific/Chatham', -879, 20, -3531, 20, 'Chatham Islands' + end + country 'OM', 'Oman' do |c| + c.timezone 'Asia/Dubai', 253, 10, 553, 10 + end + country 'PA', 'Panama' do |c| + c.timezone 'America/Panama', 269, 30, -1193, 15 + end + country 'PE', 'Peru' do |c| + c.timezone 'America/Lima', -241, 20, -1541, 20 + end + country 'PF', 'French Polynesia' do |c| + c.timezone 'Pacific/Tahiti', -263, 15, -4487, 30, 'Society Islands' + c.timezone 'Pacific/Marquesas', -9, 1, -279, 2, 'Marquesas Islands' + c.timezone 'Pacific/Gambier', -347, 15, -2699, 20, 'Gambier Islands' + end + country 'PG', 'Papua New Guinea' do |c| + c.timezone 'Pacific/Port_Moresby', -19, 2, 883, 6, 'Papua New Guinea (most areas)' + c.timezone 'Pacific/Bougainville', -373, 60, 4667, 30, 'Bougainville' + end + country 'PH', 'Philippines' do |c| + c.timezone 'Asia/Manila', 175, 12, 121, 1 + end + country 'PK', 'Pakistan' do |c| + c.timezone 'Asia/Karachi', 373, 15, 1341, 20 + end + country 'PL', 'Poland' do |c| + c.timezone 'Europe/Warsaw', 209, 4, 21, 1 + end + country 'PM', 'St Pierre & Miquelon' do |c| + c.timezone 'America/Miquelon', 941, 20, -169, 3 + end + country 'PN', 'Pitcairn' do |c| + c.timezone 'Pacific/Pitcairn', -376, 15, -1561, 12 + end + country 'PR', 'Puerto Rico' do |c| + c.timezone 'America/Puerto_Rico', 11081, 600, -118991, 1800 + end + country 'PS', 'Palestine' do |c| + c.timezone 'Asia/Gaza', 63, 2, 517, 15, 'Gaza Strip' + c.timezone 'Asia/Hebron', 473, 15, 7019, 200, 'West Bank' + end + country 'PT', 'Portugal' do |c| + c.timezone 'Europe/Lisbon', 2323, 60, -137, 15, 'Portugal (mainland)' + c.timezone 'Atlantic/Madeira', 979, 30, -169, 10, 'Madeira Islands' + c.timezone 'Atlantic/Azores', 566, 15, -77, 3, 'Azores' + end + country 'PW', 'Palau' do |c| + c.timezone 'Pacific/Palau', 22, 3, 8069, 60 + end + country 'PY', 'Paraguay' do |c| + c.timezone 'America/Asuncion', -379, 15, -173, 3 + end + country 'QA', 'Qatar' do |c| + c.timezone 'Asia/Qatar', 1517, 60, 773, 15 + end + country 'RE', 'Réunion' do |c| + c.timezone 'Indian/Reunion', -313, 15, 832, 15, 'Réunion, Crozet, Scattered Islands' + end + country 'RO', 'Romania' do |c| + c.timezone 'Europe/Bucharest', 1333, 30, 261, 10 + end + country 'RS', 'Serbia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'RU', 'Russia' do |c| + c.timezone 'Europe/Kaliningrad', 3283, 60, 41, 2, 'MSK-01 - Kaliningrad' + c.timezone 'Europe/Moscow', 66907, 1200, 8464, 225, 'MSK+00 - Moscow area' + c.timezone 'Europe/Simferopol', 899, 20, 341, 10, 'Crimea' + c.timezone 'Europe/Kirov', 293, 5, 993, 20, 'MSK+00 - Kirov' + c.timezone 'Europe/Astrakhan', 927, 20, 961, 20, 'MSK+01 - Astrakhan' + c.timezone 'Europe/Volgograd', 731, 15, 533, 12, 'MSK+01 - Volgograd' + c.timezone 'Europe/Saratov', 1547, 30, 1381, 30, 'MSK+01 - Saratov' + c.timezone 'Europe/Ulyanovsk', 163, 3, 242, 5, 'MSK+01 - Ulyanovsk' + c.timezone 'Europe/Samara', 266, 5, 1003, 20, 'MSK+01 - Samara, Udmurtia' + c.timezone 'Asia/Yekaterinburg', 1137, 20, 303, 5, 'MSK+02 - Urals' + c.timezone 'Asia/Omsk', 55, 1, 367, 5, 'MSK+03 - Omsk' + c.timezone 'Asia/Novosibirsk', 1651, 30, 995, 12, 'MSK+04 - Novosibirsk' + c.timezone 'Asia/Barnaul', 1601, 30, 335, 4, 'MSK+04 - Altai' + c.timezone 'Asia/Tomsk', 113, 2, 2549, 30, 'MSK+04 - Tomsk' + c.timezone 'Asia/Novokuznetsk', 215, 4, 5227, 60, 'MSK+04 - Kemerovo' + c.timezone 'Asia/Krasnoyarsk', 3361, 60, 557, 6, 'MSK+04 - Krasnoyarsk area' + c.timezone 'Asia/Irkutsk', 784, 15, 313, 3, 'MSK+05 - Irkutsk, Buryatia' + c.timezone 'Asia/Chita', 1041, 20, 1702, 15, 'MSK+06 - Zabaykalsky' + c.timezone 'Asia/Yakutsk', 62, 1, 389, 3, 'MSK+06 - Lena River' + c.timezone 'Asia/Khandyga', 225563, 3600, 243997, 1800, 'MSK+06 - Tomponsky, Ust-Maysky' + c.timezone 'Asia/Vladivostok', 259, 6, 1979, 15, 'MSK+07 - Amur River' + c.timezone 'Asia/Ust-Nera', 232417, 3600, 10742, 75, 'MSK+07 - Oymyakonsky' + c.timezone 'Asia/Magadan', 1787, 30, 754, 5, 'MSK+08 - Magadan' + c.timezone 'Asia/Sakhalin', 1409, 30, 1427, 10, 'MSK+08 - Sakhalin Island' + c.timezone 'Asia/Srednekolymsk', 1012, 15, 9223, 60, 'MSK+08 - Sakha (E); North Kuril Is' + c.timezone 'Asia/Kamchatka', 3181, 60, 3173, 20, 'MSK+09 - Kamchatka' + c.timezone 'Asia/Anadyr', 259, 4, 10649, 60, 'MSK+09 - Bering Sea' + end + country 'RW', 'Rwanda' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'SA', 'Saudi Arabia' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'SB', 'Solomon Islands' do |c| + c.timezone 'Pacific/Guadalcanal', -143, 15, 801, 5 + end + country 'SC', 'Seychelles' do |c| + c.timezone 'Indian/Mahe', -14, 3, 832, 15 + end + country 'SD', 'Sudan' do |c| + c.timezone 'Africa/Khartoum', 78, 5, 488, 15 + end + country 'SE', 'Sweden' do |c| + c.timezone 'Europe/Stockholm', 178, 3, 361, 20 + end + country 'SG', 'Singapore' do |c| + c.timezone 'Asia/Singapore', 77, 60, 2077, 20 + end + country 'SH', 'St Helena' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SI', 'Slovenia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'SJ', 'Svalbard & Jan Mayen' do |c| + c.timezone 'Europe/Oslo', 719, 12, 43, 4 + end + country 'SK', 'Slovakia' do |c| + c.timezone 'Europe/Prague', 601, 12, 433, 30 + end + country 'SL', 'Sierra Leone' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SM', 'San Marino' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'SN', 'Senegal' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SO', 'Somalia' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'SR', 'Suriname' do |c| + c.timezone 'America/Paramaribo', 35, 6, -331, 6 + end + country 'SS', 'South Sudan' do |c| + c.timezone 'Africa/Juba', 97, 20, 1897, 60 + end + country 'ST', 'Sao Tome & Principe' do |c| + c.timezone 'Africa/Sao_Tome', 1, 3, 101, 15 + end + country 'SV', 'El Salvador' do |c| + c.timezone 'America/El_Salvador', 137, 10, -446, 5 + end + country 'SX', 'St Maarten (Dutch)' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'SY', 'Syria' do |c| + c.timezone 'Asia/Damascus', 67, 2, 363, 10 + end + country 'SZ', 'Eswatini (Swaziland)' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'TC', 'Turks & Caicos Is' do |c| + c.timezone 'America/Grand_Turk', 322, 15, -1067, 15 + end + country 'TD', 'Chad' do |c| + c.timezone 'Africa/Ndjamena', 727, 60, 301, 20 + end + country 'TF', 'French Southern & Antarctic Lands' do |c| + c.timezone 'Indian/Kerguelen', -17767, 360, 28087, 400, 'Kerguelen, St Paul Island, Amsterdam Island' + c.timezone 'Indian/Reunion', -313, 15, 832, 15, 'Réunion, Crozet, Scattered Islands' + end + country 'TG', 'Togo' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'TH', 'Thailand' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'TJ', 'Tajikistan' do |c| + c.timezone 'Asia/Dushanbe', 463, 12, 344, 5 + end + country 'TK', 'Tokelau' do |c| + c.timezone 'Pacific/Fakaofo', -281, 30, -5137, 30 + end + country 'TL', 'East Timor' do |c| + c.timezone 'Asia/Dili', -171, 20, 1507, 12 + end + country 'TM', 'Turkmenistan' do |c| + c.timezone 'Asia/Ashgabat', 759, 20, 3503, 60 + end + country 'TN', 'Tunisia' do |c| + c.timezone 'Africa/Tunis', 184, 5, 611, 60 + end + country 'TO', 'Tonga' do |c| + c.timezone 'Pacific/Tongatapu', -127, 6, -1051, 6 + end + country 'TR', 'Turkey' do |c| + c.timezone 'Europe/Istanbul', 2461, 60, 869, 30 + end + country 'TT', 'Trinidad & Tobago' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'TV', 'Tuvalu' do |c| + c.timezone 'Pacific/Funafuti', -511, 60, 10753, 60 + end + country 'TW', 'Taiwan' do |c| + c.timezone 'Asia/Taipei', 501, 20, 243, 2 + end + country 'TZ', 'Tanzania' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'UA', 'Ukraine' do |c| + c.timezone 'Europe/Kiev', 1513, 30, 1831, 60, 'Ukraine (most areas)' + c.timezone 'Europe/Uzhgorod', 2917, 60, 223, 10, 'Transcarpathia' + c.timezone 'Europe/Zaporozhye', 287, 6, 211, 6, 'Zaporozhye and east Lugansk' + c.timezone 'Europe/Simferopol', 899, 20, 341, 10, 'Crimea' + end + country 'UG', 'Uganda' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'UM', 'US minor outlying islands' do |c| + c.timezone 'Pacific/Wake', 1157, 60, 9997, 60, 'Wake Island' + c.timezone 'Pacific/Pago_Pago', -214, 15, -1707, 10, 'Samoa, Midway' + c.timezone 'Pacific/Honolulu', 15341, 720, -18943, 120, 'Hawaii' + end + country 'US', 'United States' do |c| + c.timezone 'America/New_York', 48857, 1200, -266423, 3600, 'Eastern (most areas)' + c.timezone 'America/Detroit', 152393, 3600, -19931, 240, 'Eastern - MI (most areas)' + c.timezone 'America/Kentucky/Louisville', 9181, 240, -154367, 1800, 'Eastern - KY (Louisville area)' + c.timezone 'America/Kentucky/Monticello', 132587, 3600, -101819, 1200, 'Eastern - KY (Wayne)' + c.timezone 'America/Indiana/Indianapolis', 23861, 600, -310169, 3600, 'Eastern - IN (most areas)' + c.timezone 'America/Indiana/Vincennes', 69619, 1800, -315103, 3600, 'Eastern - IN (Da, Du, K, Mn)' + c.timezone 'America/Indiana/Winamac', 29557, 720, -311771, 3600, 'Eastern - IN (Pulaski)' + c.timezone 'America/Indiana/Marengo', 17269, 450, -310841, 3600, 'Eastern - IN (Crawford)' + c.timezone 'America/Indiana/Petersburg', 138571, 3600, -314203, 3600, 'Eastern - IN (Pike)' + c.timezone 'America/Indiana/Vevay', 34873, 900, -153121, 1800, 'Eastern - IN (Switzerland)' + c.timezone 'America/Chicago', 837, 20, -1753, 20, 'Central (most areas)' + c.timezone 'America/Indiana/Tell_City', 136631, 3600, -312341, 3600, 'Central - IN (Perry)' + c.timezone 'America/Indiana/Knox', 9911, 240, -693, 8, 'Central - IN (Starke)' + c.timezone 'America/Menominee', 40597, 900, -105137, 1200, 'Central - MI (Wisconsin border)' + c.timezone 'America/North_Dakota/Center', 169619, 3600, -121559, 1200, 'Central - ND (Oliver)' + c.timezone 'America/North_Dakota/New_Salem', 9369, 200, -121693, 1200, 'Central - ND (Morton rural)' + c.timezone 'America/North_Dakota/Beulah', 56717, 1200, -916, 9, 'Central - ND (Mercer)' + c.timezone 'America/Denver', 47687, 1200, -125981, 1200, 'Mountain (most areas)' + c.timezone 'America/Boise', 157009, 3600, -46481, 400, 'Mountain - ID (south); OR (east)' + c.timezone 'America/Phoenix', 20069, 600, -16811, 150, 'MST - Arizona (except Navajo)' + c.timezone 'America/Los_Angeles', 30647, 900, -212837, 1800, 'Pacific' + c.timezone 'America/Anchorage', 44077, 720, -539641, 3600, 'Alaska (most areas)' + c.timezone 'America/Juneau', 209887, 3600, -483911, 3600, 'Alaska - Juneau area' + c.timezone 'America/Sitka', 41167, 720, -487087, 3600, 'Alaska - Sitka area' + c.timezone 'America/Metlakatla', 198457, 3600, -18947, 144, 'Alaska - Annette Island' + c.timezone 'America/Yakutat', 214369, 3600, -251509, 1800, 'Alaska - Yakutat' + c.timezone 'America/Nome', 58051, 900, -595463, 3600, 'Alaska (west)' + c.timezone 'America/Adak', 1297, 25, -635969, 3600, 'Aleutian Islands' + c.timezone 'Pacific/Honolulu', 15341, 720, -18943, 120, 'Hawaii' + end + country 'UY', 'Uruguay' do |c| + c.timezone 'America/Montevideo', -41891, 1200, -4497, 80 + end + country 'UZ', 'Uzbekistan' do |c| + c.timezone 'Asia/Samarkand', 119, 3, 334, 5, 'Uzbekistan (west)' + c.timezone 'Asia/Tashkent', 124, 3, 693, 10, 'Uzbekistan (east)' + end + country 'VA', 'Vatican City' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'VC', 'St Vincent' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VE', 'Venezuela' do |c| + c.timezone 'America/Caracas', 21, 2, -1004, 15 + end + country 'VG', 'Virgin Islands (UK)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VI', 'Virgin Islands (US)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VN', 'Vietnam' do |c| + c.timezone 'Asia/Ho_Chi_Minh', 43, 4, 320, 3, 'Vietnam (south)' + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'VU', 'Vanuatu' do |c| + c.timezone 'Pacific/Efate', -53, 3, 2021, 12 + end + country 'WF', 'Wallis & Futuna' do |c| + c.timezone 'Pacific/Wallis', -133, 10, -1057, 6 + end + country 'WS', 'Samoa (western)' do |c| + c.timezone 'Pacific/Apia', -83, 6, -2576, 15 + end + country 'YE', 'Yemen' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'YT', 'Mayotte' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'ZA', 'South Africa' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'ZM', 'Zambia' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'ZW', 'Zimbabwe' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb new file mode 100644 index 0000000..a42a257 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb @@ -0,0 +1,609 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Indexes + module Timezones + include TimezoneIndexDefinition + + timezone 'Africa/Abidjan' + timezone 'Africa/Accra' + linked_timezone 'Africa/Addis_Ababa' + timezone 'Africa/Algiers' + linked_timezone 'Africa/Asmara' + linked_timezone 'Africa/Asmera' + linked_timezone 'Africa/Bamako' + linked_timezone 'Africa/Bangui' + linked_timezone 'Africa/Banjul' + timezone 'Africa/Bissau' + linked_timezone 'Africa/Blantyre' + linked_timezone 'Africa/Brazzaville' + linked_timezone 'Africa/Bujumbura' + timezone 'Africa/Cairo' + timezone 'Africa/Casablanca' + timezone 'Africa/Ceuta' + linked_timezone 'Africa/Conakry' + linked_timezone 'Africa/Dakar' + linked_timezone 'Africa/Dar_es_Salaam' + linked_timezone 'Africa/Djibouti' + linked_timezone 'Africa/Douala' + timezone 'Africa/El_Aaiun' + linked_timezone 'Africa/Freetown' + linked_timezone 'Africa/Gaborone' + linked_timezone 'Africa/Harare' + timezone 'Africa/Johannesburg' + timezone 'Africa/Juba' + linked_timezone 'Africa/Kampala' + timezone 'Africa/Khartoum' + linked_timezone 'Africa/Kigali' + linked_timezone 'Africa/Kinshasa' + timezone 'Africa/Lagos' + linked_timezone 'Africa/Libreville' + linked_timezone 'Africa/Lome' + linked_timezone 'Africa/Luanda' + linked_timezone 'Africa/Lubumbashi' + linked_timezone 'Africa/Lusaka' + linked_timezone 'Africa/Malabo' + timezone 'Africa/Maputo' + linked_timezone 'Africa/Maseru' + linked_timezone 'Africa/Mbabane' + linked_timezone 'Africa/Mogadishu' + timezone 'Africa/Monrovia' + timezone 'Africa/Nairobi' + timezone 'Africa/Ndjamena' + linked_timezone 'Africa/Niamey' + linked_timezone 'Africa/Nouakchott' + linked_timezone 'Africa/Ouagadougou' + linked_timezone 'Africa/Porto-Novo' + timezone 'Africa/Sao_Tome' + linked_timezone 'Africa/Timbuktu' + timezone 'Africa/Tripoli' + timezone 'Africa/Tunis' + timezone 'Africa/Windhoek' + timezone 'America/Adak' + timezone 'America/Anchorage' + linked_timezone 'America/Anguilla' + linked_timezone 'America/Antigua' + timezone 'America/Araguaina' + timezone 'America/Argentina/Buenos_Aires' + timezone 'America/Argentina/Catamarca' + linked_timezone 'America/Argentina/ComodRivadavia' + timezone 'America/Argentina/Cordoba' + timezone 'America/Argentina/Jujuy' + timezone 'America/Argentina/La_Rioja' + timezone 'America/Argentina/Mendoza' + timezone 'America/Argentina/Rio_Gallegos' + timezone 'America/Argentina/Salta' + timezone 'America/Argentina/San_Juan' + timezone 'America/Argentina/San_Luis' + timezone 'America/Argentina/Tucuman' + timezone 'America/Argentina/Ushuaia' + linked_timezone 'America/Aruba' + timezone 'America/Asuncion' + timezone 'America/Atikokan' + linked_timezone 'America/Atka' + timezone 'America/Bahia' + timezone 'America/Bahia_Banderas' + timezone 'America/Barbados' + timezone 'America/Belem' + timezone 'America/Belize' + timezone 'America/Blanc-Sablon' + timezone 'America/Boa_Vista' + timezone 'America/Bogota' + timezone 'America/Boise' + linked_timezone 'America/Buenos_Aires' + timezone 'America/Cambridge_Bay' + timezone 'America/Campo_Grande' + timezone 'America/Cancun' + timezone 'America/Caracas' + linked_timezone 'America/Catamarca' + timezone 'America/Cayenne' + linked_timezone 'America/Cayman' + timezone 'America/Chicago' + timezone 'America/Chihuahua' + linked_timezone 'America/Coral_Harbour' + linked_timezone 'America/Cordoba' + timezone 'America/Costa_Rica' + timezone 'America/Creston' + timezone 'America/Cuiaba' + timezone 'America/Curacao' + timezone 'America/Danmarkshavn' + timezone 'America/Dawson' + timezone 'America/Dawson_Creek' + timezone 'America/Denver' + timezone 'America/Detroit' + linked_timezone 'America/Dominica' + timezone 'America/Edmonton' + timezone 'America/Eirunepe' + timezone 'America/El_Salvador' + linked_timezone 'America/Ensenada' + timezone 'America/Fort_Nelson' + linked_timezone 'America/Fort_Wayne' + timezone 'America/Fortaleza' + timezone 'America/Glace_Bay' + linked_timezone 'America/Godthab' + timezone 'America/Goose_Bay' + timezone 'America/Grand_Turk' + linked_timezone 'America/Grenada' + linked_timezone 'America/Guadeloupe' + timezone 'America/Guatemala' + timezone 'America/Guayaquil' + timezone 'America/Guyana' + timezone 'America/Halifax' + timezone 'America/Havana' + timezone 'America/Hermosillo' + timezone 'America/Indiana/Indianapolis' + timezone 'America/Indiana/Knox' + timezone 'America/Indiana/Marengo' + timezone 'America/Indiana/Petersburg' + timezone 'America/Indiana/Tell_City' + timezone 'America/Indiana/Vevay' + timezone 'America/Indiana/Vincennes' + timezone 'America/Indiana/Winamac' + linked_timezone 'America/Indianapolis' + timezone 'America/Inuvik' + timezone 'America/Iqaluit' + timezone 'America/Jamaica' + linked_timezone 'America/Jujuy' + timezone 'America/Juneau' + timezone 'America/Kentucky/Louisville' + timezone 'America/Kentucky/Monticello' + linked_timezone 'America/Knox_IN' + linked_timezone 'America/Kralendijk' + timezone 'America/La_Paz' + timezone 'America/Lima' + timezone 'America/Los_Angeles' + linked_timezone 'America/Louisville' + linked_timezone 'America/Lower_Princes' + timezone 'America/Maceio' + timezone 'America/Managua' + timezone 'America/Manaus' + linked_timezone 'America/Marigot' + timezone 'America/Martinique' + timezone 'America/Matamoros' + timezone 'America/Mazatlan' + linked_timezone 'America/Mendoza' + timezone 'America/Menominee' + timezone 'America/Merida' + timezone 'America/Metlakatla' + timezone 'America/Mexico_City' + timezone 'America/Miquelon' + timezone 'America/Moncton' + timezone 'America/Monterrey' + timezone 'America/Montevideo' + linked_timezone 'America/Montreal' + linked_timezone 'America/Montserrat' + timezone 'America/Nassau' + timezone 'America/New_York' + timezone 'America/Nipigon' + timezone 'America/Nome' + timezone 'America/Noronha' + timezone 'America/North_Dakota/Beulah' + timezone 'America/North_Dakota/Center' + timezone 'America/North_Dakota/New_Salem' + timezone 'America/Nuuk' + timezone 'America/Ojinaga' + timezone 'America/Panama' + timezone 'America/Pangnirtung' + timezone 'America/Paramaribo' + timezone 'America/Phoenix' + timezone 'America/Port-au-Prince' + timezone 'America/Port_of_Spain' + linked_timezone 'America/Porto_Acre' + timezone 'America/Porto_Velho' + timezone 'America/Puerto_Rico' + timezone 'America/Punta_Arenas' + timezone 'America/Rainy_River' + timezone 'America/Rankin_Inlet' + timezone 'America/Recife' + timezone 'America/Regina' + timezone 'America/Resolute' + timezone 'America/Rio_Branco' + linked_timezone 'America/Rosario' + linked_timezone 'America/Santa_Isabel' + timezone 'America/Santarem' + timezone 'America/Santiago' + timezone 'America/Santo_Domingo' + timezone 'America/Sao_Paulo' + timezone 'America/Scoresbysund' + linked_timezone 'America/Shiprock' + timezone 'America/Sitka' + linked_timezone 'America/St_Barthelemy' + timezone 'America/St_Johns' + linked_timezone 'America/St_Kitts' + linked_timezone 'America/St_Lucia' + linked_timezone 'America/St_Thomas' + linked_timezone 'America/St_Vincent' + timezone 'America/Swift_Current' + timezone 'America/Tegucigalpa' + timezone 'America/Thule' + timezone 'America/Thunder_Bay' + timezone 'America/Tijuana' + timezone 'America/Toronto' + linked_timezone 'America/Tortola' + timezone 'America/Vancouver' + linked_timezone 'America/Virgin' + timezone 'America/Whitehorse' + timezone 'America/Winnipeg' + timezone 'America/Yakutat' + timezone 'America/Yellowknife' + timezone 'Antarctica/Casey' + timezone 'Antarctica/Davis' + timezone 'Antarctica/DumontDUrville' + timezone 'Antarctica/Macquarie' + timezone 'Antarctica/Mawson' + linked_timezone 'Antarctica/McMurdo' + timezone 'Antarctica/Palmer' + timezone 'Antarctica/Rothera' + linked_timezone 'Antarctica/South_Pole' + timezone 'Antarctica/Syowa' + timezone 'Antarctica/Troll' + timezone 'Antarctica/Vostok' + linked_timezone 'Arctic/Longyearbyen' + linked_timezone 'Asia/Aden' + timezone 'Asia/Almaty' + timezone 'Asia/Amman' + timezone 'Asia/Anadyr' + timezone 'Asia/Aqtau' + timezone 'Asia/Aqtobe' + timezone 'Asia/Ashgabat' + linked_timezone 'Asia/Ashkhabad' + timezone 'Asia/Atyrau' + timezone 'Asia/Baghdad' + linked_timezone 'Asia/Bahrain' + timezone 'Asia/Baku' + timezone 'Asia/Bangkok' + timezone 'Asia/Barnaul' + timezone 'Asia/Beirut' + timezone 'Asia/Bishkek' + timezone 'Asia/Brunei' + linked_timezone 'Asia/Calcutta' + timezone 'Asia/Chita' + timezone 'Asia/Choibalsan' + linked_timezone 'Asia/Chongqing' + linked_timezone 'Asia/Chungking' + timezone 'Asia/Colombo' + linked_timezone 'Asia/Dacca' + timezone 'Asia/Damascus' + timezone 'Asia/Dhaka' + timezone 'Asia/Dili' + timezone 'Asia/Dubai' + timezone 'Asia/Dushanbe' + timezone 'Asia/Famagusta' + timezone 'Asia/Gaza' + linked_timezone 'Asia/Harbin' + timezone 'Asia/Hebron' + timezone 'Asia/Ho_Chi_Minh' + timezone 'Asia/Hong_Kong' + timezone 'Asia/Hovd' + timezone 'Asia/Irkutsk' + linked_timezone 'Asia/Istanbul' + timezone 'Asia/Jakarta' + timezone 'Asia/Jayapura' + timezone 'Asia/Jerusalem' + timezone 'Asia/Kabul' + timezone 'Asia/Kamchatka' + timezone 'Asia/Karachi' + linked_timezone 'Asia/Kashgar' + timezone 'Asia/Kathmandu' + linked_timezone 'Asia/Katmandu' + timezone 'Asia/Khandyga' + timezone 'Asia/Kolkata' + timezone 'Asia/Krasnoyarsk' + timezone 'Asia/Kuala_Lumpur' + timezone 'Asia/Kuching' + linked_timezone 'Asia/Kuwait' + linked_timezone 'Asia/Macao' + timezone 'Asia/Macau' + timezone 'Asia/Magadan' + timezone 'Asia/Makassar' + timezone 'Asia/Manila' + linked_timezone 'Asia/Muscat' + timezone 'Asia/Nicosia' + timezone 'Asia/Novokuznetsk' + timezone 'Asia/Novosibirsk' + timezone 'Asia/Omsk' + timezone 'Asia/Oral' + linked_timezone 'Asia/Phnom_Penh' + timezone 'Asia/Pontianak' + timezone 'Asia/Pyongyang' + timezone 'Asia/Qatar' + timezone 'Asia/Qostanay' + timezone 'Asia/Qyzylorda' + linked_timezone 'Asia/Rangoon' + timezone 'Asia/Riyadh' + linked_timezone 'Asia/Saigon' + timezone 'Asia/Sakhalin' + timezone 'Asia/Samarkand' + timezone 'Asia/Seoul' + timezone 'Asia/Shanghai' + timezone 'Asia/Singapore' + timezone 'Asia/Srednekolymsk' + timezone 'Asia/Taipei' + timezone 'Asia/Tashkent' + timezone 'Asia/Tbilisi' + timezone 'Asia/Tehran' + linked_timezone 'Asia/Tel_Aviv' + linked_timezone 'Asia/Thimbu' + timezone 'Asia/Thimphu' + timezone 'Asia/Tokyo' + timezone 'Asia/Tomsk' + linked_timezone 'Asia/Ujung_Pandang' + timezone 'Asia/Ulaanbaatar' + linked_timezone 'Asia/Ulan_Bator' + timezone 'Asia/Urumqi' + timezone 'Asia/Ust-Nera' + linked_timezone 'Asia/Vientiane' + timezone 'Asia/Vladivostok' + timezone 'Asia/Yakutsk' + timezone 'Asia/Yangon' + timezone 'Asia/Yekaterinburg' + timezone 'Asia/Yerevan' + timezone 'Atlantic/Azores' + timezone 'Atlantic/Bermuda' + timezone 'Atlantic/Canary' + timezone 'Atlantic/Cape_Verde' + linked_timezone 'Atlantic/Faeroe' + timezone 'Atlantic/Faroe' + linked_timezone 'Atlantic/Jan_Mayen' + timezone 'Atlantic/Madeira' + timezone 'Atlantic/Reykjavik' + timezone 'Atlantic/South_Georgia' + linked_timezone 'Atlantic/St_Helena' + timezone 'Atlantic/Stanley' + linked_timezone 'Australia/ACT' + timezone 'Australia/Adelaide' + timezone 'Australia/Brisbane' + timezone 'Australia/Broken_Hill' + linked_timezone 'Australia/Canberra' + timezone 'Australia/Currie' + timezone 'Australia/Darwin' + timezone 'Australia/Eucla' + timezone 'Australia/Hobart' + linked_timezone 'Australia/LHI' + timezone 'Australia/Lindeman' + timezone 'Australia/Lord_Howe' + timezone 'Australia/Melbourne' + linked_timezone 'Australia/NSW' + linked_timezone 'Australia/North' + timezone 'Australia/Perth' + linked_timezone 'Australia/Queensland' + linked_timezone 'Australia/South' + timezone 'Australia/Sydney' + linked_timezone 'Australia/Tasmania' + linked_timezone 'Australia/Victoria' + linked_timezone 'Australia/West' + linked_timezone 'Australia/Yancowinna' + linked_timezone 'Brazil/Acre' + linked_timezone 'Brazil/DeNoronha' + linked_timezone 'Brazil/East' + linked_timezone 'Brazil/West' + timezone 'CET' + timezone 'CST6CDT' + linked_timezone 'Canada/Atlantic' + linked_timezone 'Canada/Central' + linked_timezone 'Canada/Eastern' + linked_timezone 'Canada/Mountain' + linked_timezone 'Canada/Newfoundland' + linked_timezone 'Canada/Pacific' + linked_timezone 'Canada/Saskatchewan' + linked_timezone 'Canada/Yukon' + linked_timezone 'Chile/Continental' + linked_timezone 'Chile/EasterIsland' + linked_timezone 'Cuba' + timezone 'EET' + timezone 'EST' + timezone 'EST5EDT' + linked_timezone 'Egypt' + linked_timezone 'Eire' + timezone 'Etc/GMT' + linked_timezone 'Etc/GMT+0' + timezone 'Etc/GMT+1' + timezone 'Etc/GMT+10' + timezone 'Etc/GMT+11' + timezone 'Etc/GMT+12' + timezone 'Etc/GMT+2' + timezone 'Etc/GMT+3' + timezone 'Etc/GMT+4' + timezone 'Etc/GMT+5' + timezone 'Etc/GMT+6' + timezone 'Etc/GMT+7' + timezone 'Etc/GMT+8' + timezone 'Etc/GMT+9' + linked_timezone 'Etc/GMT-0' + timezone 'Etc/GMT-1' + timezone 'Etc/GMT-10' + timezone 'Etc/GMT-11' + timezone 'Etc/GMT-12' + timezone 'Etc/GMT-13' + timezone 'Etc/GMT-14' + timezone 'Etc/GMT-2' + timezone 'Etc/GMT-3' + timezone 'Etc/GMT-4' + timezone 'Etc/GMT-5' + timezone 'Etc/GMT-6' + timezone 'Etc/GMT-7' + timezone 'Etc/GMT-8' + timezone 'Etc/GMT-9' + linked_timezone 'Etc/GMT0' + linked_timezone 'Etc/Greenwich' + linked_timezone 'Etc/UCT' + timezone 'Etc/UTC' + linked_timezone 'Etc/Universal' + linked_timezone 'Etc/Zulu' + timezone 'Europe/Amsterdam' + timezone 'Europe/Andorra' + timezone 'Europe/Astrakhan' + timezone 'Europe/Athens' + linked_timezone 'Europe/Belfast' + timezone 'Europe/Belgrade' + timezone 'Europe/Berlin' + linked_timezone 'Europe/Bratislava' + timezone 'Europe/Brussels' + timezone 'Europe/Bucharest' + timezone 'Europe/Budapest' + linked_timezone 'Europe/Busingen' + timezone 'Europe/Chisinau' + timezone 'Europe/Copenhagen' + timezone 'Europe/Dublin' + timezone 'Europe/Gibraltar' + linked_timezone 'Europe/Guernsey' + timezone 'Europe/Helsinki' + linked_timezone 'Europe/Isle_of_Man' + timezone 'Europe/Istanbul' + linked_timezone 'Europe/Jersey' + timezone 'Europe/Kaliningrad' + timezone 'Europe/Kiev' + timezone 'Europe/Kirov' + timezone 'Europe/Lisbon' + linked_timezone 'Europe/Ljubljana' + timezone 'Europe/London' + timezone 'Europe/Luxembourg' + timezone 'Europe/Madrid' + timezone 'Europe/Malta' + linked_timezone 'Europe/Mariehamn' + timezone 'Europe/Minsk' + timezone 'Europe/Monaco' + timezone 'Europe/Moscow' + linked_timezone 'Europe/Nicosia' + timezone 'Europe/Oslo' + timezone 'Europe/Paris' + linked_timezone 'Europe/Podgorica' + timezone 'Europe/Prague' + timezone 'Europe/Riga' + timezone 'Europe/Rome' + timezone 'Europe/Samara' + linked_timezone 'Europe/San_Marino' + linked_timezone 'Europe/Sarajevo' + timezone 'Europe/Saratov' + timezone 'Europe/Simferopol' + linked_timezone 'Europe/Skopje' + timezone 'Europe/Sofia' + timezone 'Europe/Stockholm' + timezone 'Europe/Tallinn' + timezone 'Europe/Tirane' + linked_timezone 'Europe/Tiraspol' + timezone 'Europe/Ulyanovsk' + timezone 'Europe/Uzhgorod' + linked_timezone 'Europe/Vaduz' + linked_timezone 'Europe/Vatican' + timezone 'Europe/Vienna' + timezone 'Europe/Vilnius' + timezone 'Europe/Volgograd' + timezone 'Europe/Warsaw' + linked_timezone 'Europe/Zagreb' + timezone 'Europe/Zaporozhye' + timezone 'Europe/Zurich' + timezone 'Factory' + linked_timezone 'GB' + linked_timezone 'GB-Eire' + linked_timezone 'GMT' + linked_timezone 'GMT+0' + linked_timezone 'GMT-0' + linked_timezone 'GMT0' + linked_timezone 'Greenwich' + timezone 'HST' + linked_timezone 'Hongkong' + linked_timezone 'Iceland' + linked_timezone 'Indian/Antananarivo' + timezone 'Indian/Chagos' + timezone 'Indian/Christmas' + timezone 'Indian/Cocos' + linked_timezone 'Indian/Comoro' + timezone 'Indian/Kerguelen' + timezone 'Indian/Mahe' + timezone 'Indian/Maldives' + timezone 'Indian/Mauritius' + linked_timezone 'Indian/Mayotte' + timezone 'Indian/Reunion' + linked_timezone 'Iran' + linked_timezone 'Israel' + linked_timezone 'Jamaica' + linked_timezone 'Japan' + linked_timezone 'Kwajalein' + linked_timezone 'Libya' + timezone 'MET' + timezone 'MST' + timezone 'MST7MDT' + linked_timezone 'Mexico/BajaNorte' + linked_timezone 'Mexico/BajaSur' + linked_timezone 'Mexico/General' + linked_timezone 'NZ' + linked_timezone 'NZ-CHAT' + linked_timezone 'Navajo' + linked_timezone 'PRC' + timezone 'PST8PDT' + timezone 'Pacific/Apia' + timezone 'Pacific/Auckland' + timezone 'Pacific/Bougainville' + timezone 'Pacific/Chatham' + timezone 'Pacific/Chuuk' + timezone 'Pacific/Easter' + timezone 'Pacific/Efate' + timezone 'Pacific/Enderbury' + timezone 'Pacific/Fakaofo' + timezone 'Pacific/Fiji' + timezone 'Pacific/Funafuti' + timezone 'Pacific/Galapagos' + timezone 'Pacific/Gambier' + timezone 'Pacific/Guadalcanal' + timezone 'Pacific/Guam' + timezone 'Pacific/Honolulu' + linked_timezone 'Pacific/Johnston' + timezone 'Pacific/Kiritimati' + timezone 'Pacific/Kosrae' + timezone 'Pacific/Kwajalein' + timezone 'Pacific/Majuro' + timezone 'Pacific/Marquesas' + linked_timezone 'Pacific/Midway' + timezone 'Pacific/Nauru' + timezone 'Pacific/Niue' + timezone 'Pacific/Norfolk' + timezone 'Pacific/Noumea' + timezone 'Pacific/Pago_Pago' + timezone 'Pacific/Palau' + timezone 'Pacific/Pitcairn' + timezone 'Pacific/Pohnpei' + linked_timezone 'Pacific/Ponape' + timezone 'Pacific/Port_Moresby' + timezone 'Pacific/Rarotonga' + linked_timezone 'Pacific/Saipan' + linked_timezone 'Pacific/Samoa' + timezone 'Pacific/Tahiti' + timezone 'Pacific/Tarawa' + timezone 'Pacific/Tongatapu' + linked_timezone 'Pacific/Truk' + timezone 'Pacific/Wake' + timezone 'Pacific/Wallis' + linked_timezone 'Pacific/Yap' + linked_timezone 'Poland' + linked_timezone 'Portugal' + linked_timezone 'ROC' + linked_timezone 'ROK' + linked_timezone 'Singapore' + linked_timezone 'Turkey' + linked_timezone 'UCT' + linked_timezone 'US/Alaska' + linked_timezone 'US/Aleutian' + linked_timezone 'US/Arizona' + linked_timezone 'US/Central' + linked_timezone 'US/East-Indiana' + linked_timezone 'US/Eastern' + linked_timezone 'US/Hawaii' + linked_timezone 'US/Indiana-Starke' + linked_timezone 'US/Michigan' + linked_timezone 'US/Mountain' + linked_timezone 'US/Pacific' + linked_timezone 'US/Samoa' + linked_timezone 'UTC' + linked_timezone 'Universal' + linked_timezone 'W-SU' + timezone 'WET' + linked_timezone 'Zulu' + end + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb new file mode 100644 index 0000000..51e2d31 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb @@ -0,0 +1,20 @@ +module TZInfo + module Data + # TZInfo::Data version number. + VERSION = '1.2020.4' + + # TZInfo::Data version information. + module Version + # The format of the Ruby modules. The only format currently supported by + # TZInfo is version 1. + FORMAT = 1 + + # TZInfo::Data version number. + STRING = VERSION + + # The version of the {IANA Time Zone Database}[https://www.iana.org/time-zones] + # used to generate this version of TZInfo::Data. + TZDATA = '2020d' + end + end +end diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires new file mode 100644 index 0000000..d6f999b Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York new file mode 100644 index 0000000..2b6c2ee Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne new file mode 100644 index 0000000..c7160da Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/EST b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/EST new file mode 100644 index 0000000..3ae9691 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/EST differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC new file mode 100644 index 0000000..00841a6 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam new file mode 100644 index 0000000..4a6fa1d Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra new file mode 100644 index 0000000..38685d4 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London new file mode 100644 index 0000000..323cd38 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris new file mode 100644 index 0000000..00a2726 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague new file mode 100644 index 0000000..fb7c145 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory new file mode 100644 index 0000000..b4dd773 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab new file mode 100644 index 0000000..a4ff61a --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab @@ -0,0 +1,274 @@ +# ISO 3166 alpha-2 country codes +# +# This file is in the public domain, so clarified as of +# 2009-05-17 by Arthur David Olson. +# +# From Paul Eggert (2015-05-02): +# This file contains a table of two-letter country codes. Columns are +# separated by a single tab. Lines beginning with '#' are comments. +# All text uses UTF-8 encoding. The columns of the table are as follows: +# +# 1. ISO 3166-1 alpha-2 country code, current as of +# ISO 3166-1 N976 (2018-11-06). See: Updates on ISO 3166-1 +# https://isotc.iso.org/livelink/livelink/Open/16944257 +# 2. The usual English name for the coded region, +# chosen so that alphabetic sorting of subsets produces helpful lists. +# This is not the same as the English name in the ISO 3166 tables. +# +# The table is sorted by country code. +# +# This table is intended as an aid for users, to help them select time +# zone data appropriate for their practical needs. It is not intended +# to take or endorse any position on legal or territorial claims. +# +#country- +#code name of country, territory, area, or subdivision +AD Andorra +AE United Arab Emirates +AF Afghanistan +AG Antigua & Barbuda +AI Anguilla +AL Albania +AM Armenia +AO Angola +AQ Antarctica +AR Argentina +AS Samoa (American) +AT Austria +AU Australia +AW Aruba +AX Åland Islands +AZ Azerbaijan +BA Bosnia & Herzegovina +BB Barbados +BD Bangladesh +BE Belgium +BF Burkina Faso +BG Bulgaria +BH Bahrain +BI Burundi +BJ Benin +BL St Barthelemy +BM Bermuda +BN Brunei +BO Bolivia +BQ Caribbean NL +BR Brazil +BS Bahamas +BT Bhutan +BV Bouvet Island +BW Botswana +BY Belarus +BZ Belize +CA Canada +CC Cocos (Keeling) Islands +CD Congo (Dem. Rep.) +CF Central African Rep. +CG Congo (Rep.) +CH Switzerland +CI Côte d'Ivoire +CK Cook Islands +CL Chile +CM Cameroon +CN China +CO Colombia +CR Costa Rica +CU Cuba +CV Cape Verde +CW Curaçao +CX Christmas Island +CY Cyprus +CZ Czech Republic +DE Germany +DJ Djibouti +DK Denmark +DM Dominica +DO Dominican Republic +DZ Algeria +EC Ecuador +EE Estonia +EG Egypt +EH Western Sahara +ER Eritrea +ES Spain +ET Ethiopia +FI Finland +FJ Fiji +FK Falkland Islands +FM Micronesia +FO Faroe Islands +FR France +GA Gabon +GB Britain (UK) +GD Grenada +GE Georgia +GF French Guiana +GG Guernsey +GH Ghana +GI Gibraltar +GL Greenland +GM Gambia +GN Guinea +GP Guadeloupe +GQ Equatorial Guinea +GR Greece +GS South Georgia & the South Sandwich Islands +GT Guatemala +GU Guam +GW Guinea-Bissau +GY Guyana +HK Hong Kong +HM Heard Island & McDonald Islands +HN Honduras +HR Croatia +HT Haiti +HU Hungary +ID Indonesia +IE Ireland +IL Israel +IM Isle of Man +IN India +IO British Indian Ocean Territory +IQ Iraq +IR Iran +IS Iceland +IT Italy +JE Jersey +JM Jamaica +JO Jordan +JP Japan +KE Kenya +KG Kyrgyzstan +KH Cambodia +KI Kiribati +KM Comoros +KN St Kitts & Nevis +KP Korea (North) +KR Korea (South) +KW Kuwait +KY Cayman Islands +KZ Kazakhstan +LA Laos +LB Lebanon +LC St Lucia +LI Liechtenstein +LK Sri Lanka +LR Liberia +LS Lesotho +LT Lithuania +LU Luxembourg +LV Latvia +LY Libya +MA Morocco +MC Monaco +MD Moldova +ME Montenegro +MF St Martin (French) +MG Madagascar +MH Marshall Islands +MK North Macedonia +ML Mali +MM Myanmar (Burma) +MN Mongolia +MO Macau +MP Northern Mariana Islands +MQ Martinique +MR Mauritania +MS Montserrat +MT Malta +MU Mauritius +MV Maldives +MW Malawi +MX Mexico +MY Malaysia +MZ Mozambique +NA Namibia +NC New Caledonia +NE Niger +NF Norfolk Island +NG Nigeria +NI Nicaragua +NL Netherlands +NO Norway +NP Nepal +NR Nauru +NU Niue +NZ New Zealand +OM Oman +PA Panama +PE Peru +PF French Polynesia +PG Papua New Guinea +PH Philippines +PK Pakistan +PL Poland +PM St Pierre & Miquelon +PN Pitcairn +PR Puerto Rico +PS Palestine +PT Portugal +PW Palau +PY Paraguay +QA Qatar +RE Réunion +RO Romania +RS Serbia +RU Russia +RW Rwanda +SA Saudi Arabia +SB Solomon Islands +SC Seychelles +SD Sudan +SE Sweden +SG Singapore +SH St Helena +SI Slovenia +SJ Svalbard & Jan Mayen +SK Slovakia +SL Sierra Leone +SM San Marino +SN Senegal +SO Somalia +SR Suriname +SS South Sudan +ST Sao Tome & Principe +SV El Salvador +SX St Maarten (Dutch) +SY Syria +SZ Eswatini (Swaziland) +TC Turks & Caicos Is +TD Chad +TF French Southern & Antarctic Lands +TG Togo +TH Thailand +TJ Tajikistan +TK Tokelau +TL East Timor +TM Turkmenistan +TN Tunisia +TO Tonga +TR Turkey +TT Trinidad & Tobago +TV Tuvalu +TW Taiwan +TZ Tanzania +UA Ukraine +UG Uganda +UM US minor outlying islands +US United States +UY Uruguay +UZ Uzbekistan +VA Vatican City +VC St Vincent +VE Venezuela +VG Virgin Islands (UK) +VI Virgin Islands (US) +VN Vietnam +VU Vanuatu +WF Wallis & Futuna +WS Samoa (western) +YE Yemen +YT Mayotte +ZA South Africa +ZM Zambia +ZW Zimbabwe diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds new file mode 100644 index 0000000..3ce3ab2 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds @@ -0,0 +1,78 @@ +# Allowance for leap seconds added to each time zone file. + +# This file is in the public domain. + +# This file is generated automatically from the data in the public-domain +# NIST format leap-seconds.list file, which can be copied from +# +# or . +# For more about leap-seconds.list, please see +# The NTP Timescale and Leap Seconds +# . + +# The rules for leap seconds are specified in Annex 1 (Time scales) of: +# Standard-frequency and time-signal emissions. +# International Telecommunication Union - Radiocommunication Sector +# (ITU-R) Recommendation TF.460-6 (02/2002) +# . +# The International Earth Rotation and Reference Systems Service (IERS) +# periodically uses leap seconds to keep UTC to within 0.9 s of UT1 +# (a proxy for Earth's angle in space as measured by astronomers) +# and publishes leap second data in a copyrighted file +# . +# See: Levine J. Coordinated Universal Time and the leap second. +# URSI Radio Sci Bull. 2016;89(4):30-6. doi:10.23919/URSIRSB.2016.7909995 +# . + +# There were no leap seconds before 1972, as no official mechanism +# accounted for the discrepancy between atomic time (TAI) and the earth's +# rotation. The first ("1 Jan 1972") data line in leap-seconds.list +# does not denote a leap second; it denotes the start of the current definition +# of UTC. + +# All leap-seconds are Stationary (S) at the given UTC time. +# The correction (+ or -) is made at the given time, so in the unlikely +# event of a negative leap second, a line would look like this: +# Leap YEAR MON DAY 23:59:59 - S +# Typical lines look like this: +# Leap YEAR MON DAY 23:59:60 + S +Leap 1972 Jun 30 23:59:60 + S +Leap 1972 Dec 31 23:59:60 + S +Leap 1973 Dec 31 23:59:60 + S +Leap 1974 Dec 31 23:59:60 + S +Leap 1975 Dec 31 23:59:60 + S +Leap 1976 Dec 31 23:59:60 + S +Leap 1977 Dec 31 23:59:60 + S +Leap 1978 Dec 31 23:59:60 + S +Leap 1979 Dec 31 23:59:60 + S +Leap 1981 Jun 30 23:59:60 + S +Leap 1982 Jun 30 23:59:60 + S +Leap 1983 Jun 30 23:59:60 + S +Leap 1985 Jun 30 23:59:60 + S +Leap 1987 Dec 31 23:59:60 + S +Leap 1989 Dec 31 23:59:60 + S +Leap 1990 Dec 31 23:59:60 + S +Leap 1992 Jun 30 23:59:60 + S +Leap 1993 Jun 30 23:59:60 + S +Leap 1994 Jun 30 23:59:60 + S +Leap 1995 Dec 31 23:59:60 + S +Leap 1997 Jun 30 23:59:60 + S +Leap 1998 Dec 31 23:59:60 + S +Leap 2005 Dec 31 23:59:60 + S +Leap 2008 Dec 31 23:59:60 + S +Leap 2012 Jun 30 23:59:60 + S +Leap 2015 Jun 30 23:59:60 + S +Leap 2016 Dec 31 23:59:60 + S + +# UTC timestamp when this leap second list expires. +# Any additional leap seconds will come after this. +# This Expires line is commented out for now, +# so that pre-2020a zic implementations do not reject this file. +#Expires 2021 Jun 28 00:00:00 + +# POSIX timestamps for the data in this file: +#updated 1467936000 (2016-07-08 00:00:00 UTC) +#expires 1624838400 (2021-06-28 00:00:00 UTC) + +# Updated through IERS Bulletin C60 +# File expires on: 28 June 2021 diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London new file mode 100644 index 0000000..323cd38 Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules new file mode 100644 index 0000000..2b6c2ee Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London new file mode 100644 index 0000000..9b8560d Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London differ diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab new file mode 100644 index 0000000..8d056e3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab @@ -0,0 +1,452 @@ +# tzdb timezone descriptions (deprecated version) +# +# This file is in the public domain, so clarified as of +# 2009-05-17 by Arthur David Olson. +# +# From Paul Eggert (2018-06-27): +# This file is intended as a backward-compatibility aid for older programs. +# New programs should use zone1970.tab. This file is like zone1970.tab (see +# zone1970.tab's comments), but with the following additional restrictions: +# +# 1. This file contains only ASCII characters. +# 2. The first data column contains exactly one country code. +# +# Because of (2), each row stands for an area that is the intersection +# of a region identified by a country code and of a timezone where civil +# clocks have agreed since 1970; this is a narrower definition than +# that of zone1970.tab. +# +# This table is intended as an aid for users, to help them select timezones +# appropriate for their practical needs. It is not intended to take or +# endorse any position on legal or territorial claims. +# +#country- +#code coordinates TZ comments +AD +4230+00131 Europe/Andorra +AE +2518+05518 Asia/Dubai +AF +3431+06912 Asia/Kabul +AG +1703-06148 America/Antigua +AI +1812-06304 America/Anguilla +AL +4120+01950 Europe/Tirane +AM +4011+04430 Asia/Yerevan +AO -0848+01314 Africa/Luanda +AQ -7750+16636 Antarctica/McMurdo New Zealand time - McMurdo, South Pole +AQ -6617+11031 Antarctica/Casey Casey +AQ -6835+07758 Antarctica/Davis Davis +AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville +AQ -6736+06253 Antarctica/Mawson Mawson +AQ -6448-06406 Antarctica/Palmer Palmer +AQ -6734-06808 Antarctica/Rothera Rothera +AQ -690022+0393524 Antarctica/Syowa Syowa +AQ -720041+0023206 Antarctica/Troll Troll +AQ -7824+10654 Antarctica/Vostok Vostok +AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) +AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF) +AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN) +AR -2411-06518 America/Argentina/Jujuy Jujuy (JY) +AR -2649-06513 America/Argentina/Tucuman Tucuman (TM) +AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH) +AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR) +AR -3132-06831 America/Argentina/San_Juan San Juan (SJ) +AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ) +AR -3319-06621 America/Argentina/San_Luis San Luis (SL) +AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC) +AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF) +AS -1416-17042 Pacific/Pago_Pago +AT +4813+01620 Europe/Vienna +AU -3133+15905 Australia/Lord_Howe Lord Howe Island +AU -5430+15857 Antarctica/Macquarie Macquarie Island +AU -4253+14719 Australia/Hobart Tasmania (most areas) +AU -3956+14352 Australia/Currie Tasmania (King Island) +AU -3749+14458 Australia/Melbourne Victoria +AU -3352+15113 Australia/Sydney New South Wales (most areas) +AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna) +AU -2728+15302 Australia/Brisbane Queensland (most areas) +AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands) +AU -3455+13835 Australia/Adelaide South Australia +AU -1228+13050 Australia/Darwin Northern Territory +AU -3157+11551 Australia/Perth Western Australia (most areas) +AU -3143+12852 Australia/Eucla Western Australia (Eucla) +AW +1230-06958 America/Aruba +AX +6006+01957 Europe/Mariehamn +AZ +4023+04951 Asia/Baku +BA +4352+01825 Europe/Sarajevo +BB +1306-05937 America/Barbados +BD +2343+09025 Asia/Dhaka +BE +5050+00420 Europe/Brussels +BF +1222-00131 Africa/Ouagadougou +BG +4241+02319 Europe/Sofia +BH +2623+05035 Asia/Bahrain +BI -0323+02922 Africa/Bujumbura +BJ +0629+00237 Africa/Porto-Novo +BL +1753-06251 America/St_Barthelemy +BM +3217-06446 Atlantic/Bermuda +BN +0456+11455 Asia/Brunei +BO -1630-06809 America/La_Paz +BQ +120903-0681636 America/Kralendijk +BR -0351-03225 America/Noronha Atlantic islands +BR -0127-04829 America/Belem Para (east); Amapa +BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB) +BR -0803-03454 America/Recife Pernambuco +BR -0712-04812 America/Araguaina Tocantins +BR -0940-03543 America/Maceio Alagoas, Sergipe +BR -1259-03831 America/Bahia Bahia +BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS) +BR -2027-05437 America/Campo_Grande Mato Grosso do Sul +BR -1535-05605 America/Cuiaba Mato Grosso +BR -0226-05452 America/Santarem Para (west) +BR -0846-06354 America/Porto_Velho Rondonia +BR +0249-06040 America/Boa_Vista Roraima +BR -0308-06001 America/Manaus Amazonas (east) +BR -0640-06952 America/Eirunepe Amazonas (west) +BR -0958-06748 America/Rio_Branco Acre +BS +2505-07721 America/Nassau +BT +2728+08939 Asia/Thimphu +BW -2439+02555 Africa/Gaborone +BY +5354+02734 Europe/Minsk +BZ +1730-08812 America/Belize +CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast) +CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE +CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton) +CA +4606-06447 America/Moncton Atlantic - New Brunswick +CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas) +CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore) +CA +4339-07923 America/Toronto Eastern - ON, QC (most areas) +CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73) +CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay) +CA +6344-06828 America/Iqaluit Eastern - NU (most east areas) +CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung) +CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H) +CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba +CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances) +CA +744144-0944945 America/Resolute Central - NU (Resolute) +CA +624900-0920459 America/Rankin_Inlet Central - NU (central) +CA +5024-10439 America/Regina CST - SK (most areas) +CA +5017-10750 America/Swift_Current CST - SK (midwest) +CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W) +CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west) +CA +6227-11421 America/Yellowknife Mountain - NT (central) +CA +682059-1334300 America/Inuvik Mountain - NT (west) +CA +4906-11631 America/Creston MST - BC (Creston) +CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) +CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson) +CA +4916-12307 America/Vancouver Pacific - BC (most areas) +CA +6043-13503 America/Whitehorse Pacific - Yukon (east) +CA +6404-13925 America/Dawson Pacific - Yukon (west) +CC -1210+09655 Indian/Cocos +CD -0418+01518 Africa/Kinshasa Dem. Rep. of Congo (west) +CD -1140+02728 Africa/Lubumbashi Dem. Rep. of Congo (east) +CF +0422+01835 Africa/Bangui +CG -0416+01517 Africa/Brazzaville +CH +4723+00832 Europe/Zurich +CI +0519-00402 Africa/Abidjan +CK -2114-15946 Pacific/Rarotonga +CL -3327-07040 America/Santiago Chile (most areas) +CL -5309-07055 America/Punta_Arenas Region of Magallanes +CL -2709-10926 Pacific/Easter Easter Island +CM +0403+00942 Africa/Douala +CN +3114+12128 Asia/Shanghai Beijing Time +CN +4348+08735 Asia/Urumqi Xinjiang Time +CO +0436-07405 America/Bogota +CR +0956-08405 America/Costa_Rica +CU +2308-08222 America/Havana +CV +1455-02331 Atlantic/Cape_Verde +CW +1211-06900 America/Curacao +CX -1025+10543 Indian/Christmas +CY +3510+03322 Asia/Nicosia Cyprus (most areas) +CY +3507+03357 Asia/Famagusta Northern Cyprus +CZ +5005+01426 Europe/Prague +DE +5230+01322 Europe/Berlin Germany (most areas) +DE +4742+00841 Europe/Busingen Busingen +DJ +1136+04309 Africa/Djibouti +DK +5540+01235 Europe/Copenhagen +DM +1518-06124 America/Dominica +DO +1828-06954 America/Santo_Domingo +DZ +3647+00303 Africa/Algiers +EC -0210-07950 America/Guayaquil Ecuador (mainland) +EC -0054-08936 Pacific/Galapagos Galapagos Islands +EE +5925+02445 Europe/Tallinn +EG +3003+03115 Africa/Cairo +EH +2709-01312 Africa/El_Aaiun +ER +1520+03853 Africa/Asmara +ES +4024-00341 Europe/Madrid Spain (mainland) +ES +3553-00519 Africa/Ceuta Ceuta, Melilla +ES +2806-01524 Atlantic/Canary Canary Islands +ET +0902+03842 Africa/Addis_Ababa +FI +6010+02458 Europe/Helsinki +FJ -1808+17825 Pacific/Fiji +FK -5142-05751 Atlantic/Stanley +FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap +FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape +FM +0519+16259 Pacific/Kosrae Kosrae +FO +6201-00646 Atlantic/Faroe +FR +4852+00220 Europe/Paris +GA +0023+00927 Africa/Libreville +GB +513030-0000731 Europe/London +GD +1203-06145 America/Grenada +GE +4143+04449 Asia/Tbilisi +GF +0456-05220 America/Cayenne +GG +492717-0023210 Europe/Guernsey +GH +0533-00013 Africa/Accra +GI +3608-00521 Europe/Gibraltar +GL +6411-05144 America/Nuuk Greenland (most areas) +GL +7646-01840 America/Danmarkshavn National Park (east coast) +GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit +GL +7634-06847 America/Thule Thule/Pituffik +GM +1328-01639 Africa/Banjul +GN +0931-01343 Africa/Conakry +GP +1614-06132 America/Guadeloupe +GQ +0345+00847 Africa/Malabo +GR +3758+02343 Europe/Athens +GS -5416-03632 Atlantic/South_Georgia +GT +1438-09031 America/Guatemala +GU +1328+14445 Pacific/Guam +GW +1151-01535 Africa/Bissau +GY +0648-05810 America/Guyana +HK +2217+11409 Asia/Hong_Kong +HN +1406-08713 America/Tegucigalpa +HR +4548+01558 Europe/Zagreb +HT +1832-07220 America/Port-au-Prince +HU +4730+01905 Europe/Budapest +ID -0610+10648 Asia/Jakarta Java, Sumatra +ID -0002+10920 Asia/Pontianak Borneo (west, central) +ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west) +ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas +IE +5320-00615 Europe/Dublin +IL +314650+0351326 Asia/Jerusalem +IM +5409-00428 Europe/Isle_of_Man +IN +2232+08822 Asia/Kolkata +IO -0720+07225 Indian/Chagos +IQ +3321+04425 Asia/Baghdad +IR +3540+05126 Asia/Tehran +IS +6409-02151 Atlantic/Reykjavik +IT +4154+01229 Europe/Rome +JE +491101-0020624 Europe/Jersey +JM +175805-0764736 America/Jamaica +JO +3157+03556 Asia/Amman +JP +353916+1394441 Asia/Tokyo +KE -0117+03649 Africa/Nairobi +KG +4254+07436 Asia/Bishkek +KH +1133+10455 Asia/Phnom_Penh +KI +0125+17300 Pacific/Tarawa Gilbert Islands +KI -0308-17105 Pacific/Enderbury Phoenix Islands +KI +0152-15720 Pacific/Kiritimati Line Islands +KM -1141+04316 Indian/Comoro +KN +1718-06243 America/St_Kitts +KP +3901+12545 Asia/Pyongyang +KR +3733+12658 Asia/Seoul +KW +2920+04759 Asia/Kuwait +KY +1918-08123 America/Cayman +KZ +4315+07657 Asia/Almaty Kazakhstan (most areas) +KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda +KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay +KZ +5017+05710 Asia/Aqtobe Aqtobe/Aktobe +KZ +4431+05016 Asia/Aqtau Mangghystau/Mankistau +KZ +4707+05156 Asia/Atyrau Atyrau/Atirau/Gur'yev +KZ +5113+05121 Asia/Oral West Kazakhstan +LA +1758+10236 Asia/Vientiane +LB +3353+03530 Asia/Beirut +LC +1401-06100 America/St_Lucia +LI +4709+00931 Europe/Vaduz +LK +0656+07951 Asia/Colombo +LR +0618-01047 Africa/Monrovia +LS -2928+02730 Africa/Maseru +LT +5441+02519 Europe/Vilnius +LU +4936+00609 Europe/Luxembourg +LV +5657+02406 Europe/Riga +LY +3254+01311 Africa/Tripoli +MA +3339-00735 Africa/Casablanca +MC +4342+00723 Europe/Monaco +MD +4700+02850 Europe/Chisinau +ME +4226+01916 Europe/Podgorica +MF +1804-06305 America/Marigot +MG -1855+04731 Indian/Antananarivo +MH +0709+17112 Pacific/Majuro Marshall Islands (most areas) +MH +0905+16720 Pacific/Kwajalein Kwajalein +MK +4159+02126 Europe/Skopje +ML +1239-00800 Africa/Bamako +MM +1647+09610 Asia/Yangon +MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas) +MN +4801+09139 Asia/Hovd Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan +MN +4804+11430 Asia/Choibalsan Dornod, Sukhbaatar +MO +221150+1133230 Asia/Macau +MP +1512+14545 Pacific/Saipan +MQ +1436-06105 America/Martinique +MR +1806-01557 Africa/Nouakchott +MS +1643-06213 America/Montserrat +MT +3554+01431 Europe/Malta +MU -2010+05730 Indian/Mauritius +MV +0410+07330 Indian/Maldives +MW -1547+03500 Africa/Blantyre +MX +1924-09909 America/Mexico_City Central Time +MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo +MX +2058-08937 America/Merida Central Time - Campeche, Yucatan +MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo Leon, Tamaulipas (most areas) +MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo Leon, Tamaulipas (US border) +MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa +MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas) +MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border) +MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora +MX +3232-11701 America/Tijuana Pacific Time US - Baja California +MX +2048-10515 America/Bahia_Banderas Central Time - Bahia de Banderas +MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula) +MY +0133+11020 Asia/Kuching Sabah, Sarawak +MZ -2558+03235 Africa/Maputo +NA -2234+01706 Africa/Windhoek +NC -2216+16627 Pacific/Noumea +NE +1331+00207 Africa/Niamey +NF -2903+16758 Pacific/Norfolk +NG +0627+00324 Africa/Lagos +NI +1209-08617 America/Managua +NL +5222+00454 Europe/Amsterdam +NO +5955+01045 Europe/Oslo +NP +2743+08519 Asia/Kathmandu +NR -0031+16655 Pacific/Nauru +NU -1901-16955 Pacific/Niue +NZ -3652+17446 Pacific/Auckland New Zealand (most areas) +NZ -4357-17633 Pacific/Chatham Chatham Islands +OM +2336+05835 Asia/Muscat +PA +0858-07932 America/Panama +PE -1203-07703 America/Lima +PF -1732-14934 Pacific/Tahiti Society Islands +PF -0900-13930 Pacific/Marquesas Marquesas Islands +PF -2308-13457 Pacific/Gambier Gambier Islands +PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas) +PG -0613+15534 Pacific/Bougainville Bougainville +PH +1435+12100 Asia/Manila +PK +2452+06703 Asia/Karachi +PL +5215+02100 Europe/Warsaw +PM +4703-05620 America/Miquelon +PN -2504-13005 Pacific/Pitcairn +PR +182806-0660622 America/Puerto_Rico +PS +3130+03428 Asia/Gaza Gaza Strip +PS +313200+0350542 Asia/Hebron West Bank +PT +3843-00908 Europe/Lisbon Portugal (mainland) +PT +3238-01654 Atlantic/Madeira Madeira Islands +PT +3744-02540 Atlantic/Azores Azores +PW +0720+13429 Pacific/Palau +PY -2516-05740 America/Asuncion +QA +2517+05132 Asia/Qatar +RE -2052+05528 Indian/Reunion +RO +4426+02606 Europe/Bucharest +RS +4450+02030 Europe/Belgrade +RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad +RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area +# The obsolescent zone.tab format cannot represent Europe/Simferopol well. +# Put it in RU section and list as UA. See "territorial claims" above. +# Programs should use zone1970.tab instead; see above. +UA +4457+03406 Europe/Simferopol Crimea +RU +5836+04939 Europe/Kirov MSK+00 - Kirov +RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan +RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd +RU +5134+04602 Europe/Saratov MSK+01 - Saratov +RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk +RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia +RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals +RU +5500+07324 Asia/Omsk MSK+03 - Omsk +RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk +RU +5322+08345 Asia/Barnaul MSK+04 - Altai +RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk +RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo +RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area +RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia +RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky +RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River +RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky +RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River +RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky +RU +5934+15048 Asia/Magadan MSK+08 - Magadan +RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island +RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is +RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka +RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea +RW -0157+03004 Africa/Kigali +SA +2438+04643 Asia/Riyadh +SB -0932+16012 Pacific/Guadalcanal +SC -0440+05528 Indian/Mahe +SD +1536+03232 Africa/Khartoum +SE +5920+01803 Europe/Stockholm +SG +0117+10351 Asia/Singapore +SH -1555-00542 Atlantic/St_Helena +SI +4603+01431 Europe/Ljubljana +SJ +7800+01600 Arctic/Longyearbyen +SK +4809+01707 Europe/Bratislava +SL +0830-01315 Africa/Freetown +SM +4355+01228 Europe/San_Marino +SN +1440-01726 Africa/Dakar +SO +0204+04522 Africa/Mogadishu +SR +0550-05510 America/Paramaribo +SS +0451+03137 Africa/Juba +ST +0020+00644 Africa/Sao_Tome +SV +1342-08912 America/El_Salvador +SX +180305-0630250 America/Lower_Princes +SY +3330+03618 Asia/Damascus +SZ -2618+03106 Africa/Mbabane +TC +2128-07108 America/Grand_Turk +TD +1207+01503 Africa/Ndjamena +TF -492110+0701303 Indian/Kerguelen +TG +0608+00113 Africa/Lome +TH +1345+10031 Asia/Bangkok +TJ +3835+06848 Asia/Dushanbe +TK -0922-17114 Pacific/Fakaofo +TL -0833+12535 Asia/Dili +TM +3757+05823 Asia/Ashgabat +TN +3648+01011 Africa/Tunis +TO -2110-17510 Pacific/Tongatapu +TR +4101+02858 Europe/Istanbul +TT +1039-06131 America/Port_of_Spain +TV -0831+17913 Pacific/Funafuti +TW +2503+12130 Asia/Taipei +TZ -0648+03917 Africa/Dar_es_Salaam +UA +5026+03031 Europe/Kiev Ukraine (most areas) +UA +4837+02218 Europe/Uzhgorod Transcarpathia +UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk +UG +0019+03225 Africa/Kampala +UM +2813-17722 Pacific/Midway Midway Islands +UM +1917+16637 Pacific/Wake Wake Island +US +404251-0740023 America/New_York Eastern (most areas) +US +421953-0830245 America/Detroit Eastern - MI (most areas) +US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area) +US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne) +US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas) +US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn) +US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski) +US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford) +US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike) +US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland) +US +415100-0873900 America/Chicago Central (most areas) +US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry) +US +411745-0863730 America/Indiana/Knox Central - IN (Starke) +US +450628-0873651 America/Menominee Central - MI (Wisconsin border) +US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver) +US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural) +US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer) +US +394421-1045903 America/Denver Mountain (most areas) +US +433649-1161209 America/Boise Mountain - ID (south); OR (east) +US +332654-1120424 America/Phoenix MST - Arizona (except Navajo) +US +340308-1181434 America/Los_Angeles Pacific +US +611305-1495401 America/Anchorage Alaska (most areas) +US +581807-1342511 America/Juneau Alaska - Juneau area +US +571035-1351807 America/Sitka Alaska - Sitka area +US +550737-1313435 America/Metlakatla Alaska - Annette Island +US +593249-1394338 America/Yakutat Alaska - Yakutat +US +643004-1652423 America/Nome Alaska (west) +US +515248-1763929 America/Adak Aleutian Islands +US +211825-1575130 Pacific/Honolulu Hawaii +UY -345433-0561245 America/Montevideo +UZ +3940+06648 Asia/Samarkand Uzbekistan (west) +UZ +4120+06918 Asia/Tashkent Uzbekistan (east) +VA +415408+0122711 Europe/Vatican +VC +1309-06114 America/St_Vincent +VE +1030-06656 America/Caracas +VG +1827-06437 America/Tortola +VI +1821-06456 America/St_Thomas +VN +1045+10640 Asia/Ho_Chi_Minh +VU -1740+16825 Pacific/Efate +WF -1318-17610 Pacific/Wallis +WS -1350-17144 Pacific/Apia +YE +1245+04512 Asia/Aden +YT -1247+04514 Indian/Mayotte +ZA -2615+02800 Africa/Johannesburg +ZM -1525+02817 Africa/Lusaka +ZW -1750+03103 Africa/Harare diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab new file mode 100644 index 0000000..53ee77e --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab @@ -0,0 +1,384 @@ +# tzdb timezone descriptions +# +# This file is in the public domain. +# +# From Paul Eggert (2018-06-27): +# This file contains a table where each row stands for a timezone where +# civil timestamps have agreed since 1970. Columns are separated by +# a single tab. Lines beginning with '#' are comments. All text uses +# UTF-8 encoding. The columns of the table are as follows: +# +# 1. The countries that overlap the timezone, as a comma-separated list +# of ISO 3166 2-character country codes. See the file 'iso3166.tab'. +# 2. Latitude and longitude of the timezone's principal location +# in ISO 6709 sign-degrees-minutes-seconds format, +# either ±DDMM±DDDMM or ±DDMMSS±DDDMMSS, +# first latitude (+ is north), then longitude (+ is east). +# 3. Timezone name used in value of TZ environment variable. +# Please see the theory.html file for how these names are chosen. +# If multiple timezones overlap a country, each has a row in the +# table, with each column 1 containing the country code. +# 4. Comments; present if and only if a country has multiple timezones. +# +# If a timezone covers multiple countries, the most-populous city is used, +# and that country is listed first in column 1; any other countries +# are listed alphabetically by country code. The table is sorted +# first by country code, then (if possible) by an order within the +# country that (1) makes some geographical sense, and (2) puts the +# most populous timezones first, where that does not contradict (1). +# +# This table is intended as an aid for users, to help them select timezones +# appropriate for their practical needs. It is not intended to take or +# endorse any position on legal or territorial claims. +# +#country- +#codes coordinates TZ comments +AD +4230+00131 Europe/Andorra +AE,OM +2518+05518 Asia/Dubai +AF +3431+06912 Asia/Kabul +AL +4120+01950 Europe/Tirane +AM +4011+04430 Asia/Yerevan +AQ -6617+11031 Antarctica/Casey Casey +AQ -6835+07758 Antarctica/Davis Davis +AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville +AQ -6736+06253 Antarctica/Mawson Mawson +AQ -6448-06406 Antarctica/Palmer Palmer +AQ -6734-06808 Antarctica/Rothera Rothera +AQ -690022+0393524 Antarctica/Syowa Syowa +AQ -720041+0023206 Antarctica/Troll Troll +AQ -7824+10654 Antarctica/Vostok Vostok +AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) +AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF) +AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN) +AR -2411-06518 America/Argentina/Jujuy Jujuy (JY) +AR -2649-06513 America/Argentina/Tucuman Tucumán (TM) +AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH) +AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR) +AR -3132-06831 America/Argentina/San_Juan San Juan (SJ) +AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ) +AR -3319-06621 America/Argentina/San_Luis San Luis (SL) +AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC) +AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF) +AS,UM -1416-17042 Pacific/Pago_Pago Samoa, Midway +AT +4813+01620 Europe/Vienna +AU -3133+15905 Australia/Lord_Howe Lord Howe Island +AU -5430+15857 Antarctica/Macquarie Macquarie Island +AU -4253+14719 Australia/Hobart Tasmania (most areas) +AU -3956+14352 Australia/Currie Tasmania (King Island) +AU -3749+14458 Australia/Melbourne Victoria +AU -3352+15113 Australia/Sydney New South Wales (most areas) +AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna) +AU -2728+15302 Australia/Brisbane Queensland (most areas) +AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands) +AU -3455+13835 Australia/Adelaide South Australia +AU -1228+13050 Australia/Darwin Northern Territory +AU -3157+11551 Australia/Perth Western Australia (most areas) +AU -3143+12852 Australia/Eucla Western Australia (Eucla) +AZ +4023+04951 Asia/Baku +BB +1306-05937 America/Barbados +BD +2343+09025 Asia/Dhaka +BE +5050+00420 Europe/Brussels +BG +4241+02319 Europe/Sofia +BM +3217-06446 Atlantic/Bermuda +BN +0456+11455 Asia/Brunei +BO -1630-06809 America/La_Paz +BR -0351-03225 America/Noronha Atlantic islands +BR -0127-04829 America/Belem Pará (east); Amapá +BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB) +BR -0803-03454 America/Recife Pernambuco +BR -0712-04812 America/Araguaina Tocantins +BR -0940-03543 America/Maceio Alagoas, Sergipe +BR -1259-03831 America/Bahia Bahia +BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS) +BR -2027-05437 America/Campo_Grande Mato Grosso do Sul +BR -1535-05605 America/Cuiaba Mato Grosso +BR -0226-05452 America/Santarem Pará (west) +BR -0846-06354 America/Porto_Velho Rondônia +BR +0249-06040 America/Boa_Vista Roraima +BR -0308-06001 America/Manaus Amazonas (east) +BR -0640-06952 America/Eirunepe Amazonas (west) +BR -0958-06748 America/Rio_Branco Acre +BS +2505-07721 America/Nassau +BT +2728+08939 Asia/Thimphu +BY +5354+02734 Europe/Minsk +BZ +1730-08812 America/Belize +CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast) +CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE +CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton) +CA +4606-06447 America/Moncton Atlantic - New Brunswick +CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas) +CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore) +CA +4339-07923 America/Toronto Eastern - ON, QC (most areas) +CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73) +CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay) +CA +6344-06828 America/Iqaluit Eastern - NU (most east areas) +CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung) +CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H) +CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba +CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances) +CA +744144-0944945 America/Resolute Central - NU (Resolute) +CA +624900-0920459 America/Rankin_Inlet Central - NU (central) +CA +5024-10439 America/Regina CST - SK (most areas) +CA +5017-10750 America/Swift_Current CST - SK (midwest) +CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W) +CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west) +CA +6227-11421 America/Yellowknife Mountain - NT (central) +CA +682059-1334300 America/Inuvik Mountain - NT (west) +CA +4906-11631 America/Creston MST - BC (Creston) +CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) +CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson) +CA +4916-12307 America/Vancouver Pacific - BC (most areas) +CA +6043-13503 America/Whitehorse Pacific - Yukon (east) +CA +6404-13925 America/Dawson Pacific - Yukon (west) +CC -1210+09655 Indian/Cocos +CH,DE,LI +4723+00832 Europe/Zurich Swiss time +CI,BF,GM,GN,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan +CK -2114-15946 Pacific/Rarotonga +CL -3327-07040 America/Santiago Chile (most areas) +CL -5309-07055 America/Punta_Arenas Region of Magallanes +CL -2709-10926 Pacific/Easter Easter Island +CN +3114+12128 Asia/Shanghai Beijing Time +CN +4348+08735 Asia/Urumqi Xinjiang Time +CO +0436-07405 America/Bogota +CR +0956-08405 America/Costa_Rica +CU +2308-08222 America/Havana +CV +1455-02331 Atlantic/Cape_Verde +CW,AW,BQ,SX +1211-06900 America/Curacao +CX -1025+10543 Indian/Christmas +CY +3510+03322 Asia/Nicosia Cyprus (most areas) +CY +3507+03357 Asia/Famagusta Northern Cyprus +CZ,SK +5005+01426 Europe/Prague +DE +5230+01322 Europe/Berlin Germany (most areas) +DK +5540+01235 Europe/Copenhagen +DO +1828-06954 America/Santo_Domingo +DZ +3647+00303 Africa/Algiers +EC -0210-07950 America/Guayaquil Ecuador (mainland) +EC -0054-08936 Pacific/Galapagos Galápagos Islands +EE +5925+02445 Europe/Tallinn +EG +3003+03115 Africa/Cairo +EH +2709-01312 Africa/El_Aaiun +ES +4024-00341 Europe/Madrid Spain (mainland) +ES +3553-00519 Africa/Ceuta Ceuta, Melilla +ES +2806-01524 Atlantic/Canary Canary Islands +FI,AX +6010+02458 Europe/Helsinki +FJ -1808+17825 Pacific/Fiji +FK -5142-05751 Atlantic/Stanley +FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap +FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape +FM +0519+16259 Pacific/Kosrae Kosrae +FO +6201-00646 Atlantic/Faroe +FR +4852+00220 Europe/Paris +GB,GG,IM,JE +513030-0000731 Europe/London +GE +4143+04449 Asia/Tbilisi +GF +0456-05220 America/Cayenne +GH +0533-00013 Africa/Accra +GI +3608-00521 Europe/Gibraltar +GL +6411-05144 America/Nuuk Greenland (most areas) +GL +7646-01840 America/Danmarkshavn National Park (east coast) +GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit +GL +7634-06847 America/Thule Thule/Pituffik +GR +3758+02343 Europe/Athens +GS -5416-03632 Atlantic/South_Georgia +GT +1438-09031 America/Guatemala +GU,MP +1328+14445 Pacific/Guam +GW +1151-01535 Africa/Bissau +GY +0648-05810 America/Guyana +HK +2217+11409 Asia/Hong_Kong +HN +1406-08713 America/Tegucigalpa +HT +1832-07220 America/Port-au-Prince +HU +4730+01905 Europe/Budapest +ID -0610+10648 Asia/Jakarta Java, Sumatra +ID -0002+10920 Asia/Pontianak Borneo (west, central) +ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west) +ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas +IE +5320-00615 Europe/Dublin +IL +314650+0351326 Asia/Jerusalem +IN +2232+08822 Asia/Kolkata +IO -0720+07225 Indian/Chagos +IQ +3321+04425 Asia/Baghdad +IR +3540+05126 Asia/Tehran +IS +6409-02151 Atlantic/Reykjavik +IT,SM,VA +4154+01229 Europe/Rome +JM +175805-0764736 America/Jamaica +JO +3157+03556 Asia/Amman +JP +353916+1394441 Asia/Tokyo +KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi +KG +4254+07436 Asia/Bishkek +KI +0125+17300 Pacific/Tarawa Gilbert Islands +KI -0308-17105 Pacific/Enderbury Phoenix Islands +KI +0152-15720 Pacific/Kiritimati Line Islands +KP +3901+12545 Asia/Pyongyang +KR +3733+12658 Asia/Seoul +KZ +4315+07657 Asia/Almaty Kazakhstan (most areas) +KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda +KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay +KZ +5017+05710 Asia/Aqtobe Aqtöbe/Aktobe +KZ +4431+05016 Asia/Aqtau Mangghystaū/Mankistau +KZ +4707+05156 Asia/Atyrau Atyraū/Atirau/Gur'yev +KZ +5113+05121 Asia/Oral West Kazakhstan +LB +3353+03530 Asia/Beirut +LK +0656+07951 Asia/Colombo +LR +0618-01047 Africa/Monrovia +LT +5441+02519 Europe/Vilnius +LU +4936+00609 Europe/Luxembourg +LV +5657+02406 Europe/Riga +LY +3254+01311 Africa/Tripoli +MA +3339-00735 Africa/Casablanca +MC +4342+00723 Europe/Monaco +MD +4700+02850 Europe/Chisinau +MH +0709+17112 Pacific/Majuro Marshall Islands (most areas) +MH +0905+16720 Pacific/Kwajalein Kwajalein +MM +1647+09610 Asia/Yangon +MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas) +MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan +MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar +MO +221150+1133230 Asia/Macau +MQ +1436-06105 America/Martinique +MT +3554+01431 Europe/Malta +MU -2010+05730 Indian/Mauritius +MV +0410+07330 Indian/Maldives +MX +1924-09909 America/Mexico_City Central Time +MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo +MX +2058-08937 America/Merida Central Time - Campeche, Yucatán +MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas) +MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo León, Tamaulipas (US border) +MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa +MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas) +MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border) +MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora +MX +3232-11701 America/Tijuana Pacific Time US - Baja California +MX +2048-10515 America/Bahia_Banderas Central Time - Bahía de Banderas +MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula) +MY +0133+11020 Asia/Kuching Sabah, Sarawak +MZ,BI,BW,CD,MW,RW,ZM,ZW -2558+03235 Africa/Maputo Central Africa Time +NA -2234+01706 Africa/Windhoek +NC -2216+16627 Pacific/Noumea +NF -2903+16758 Pacific/Norfolk +NG,AO,BJ,CD,CF,CG,CM,GA,GQ,NE +0627+00324 Africa/Lagos West Africa Time +NI +1209-08617 America/Managua +NL +5222+00454 Europe/Amsterdam +NO,SJ +5955+01045 Europe/Oslo +NP +2743+08519 Asia/Kathmandu +NR -0031+16655 Pacific/Nauru +NU -1901-16955 Pacific/Niue +NZ,AQ -3652+17446 Pacific/Auckland New Zealand time +NZ -4357-17633 Pacific/Chatham Chatham Islands +PA,KY +0858-07932 America/Panama +PE -1203-07703 America/Lima +PF -1732-14934 Pacific/Tahiti Society Islands +PF -0900-13930 Pacific/Marquesas Marquesas Islands +PF -2308-13457 Pacific/Gambier Gambier Islands +PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas) +PG -0613+15534 Pacific/Bougainville Bougainville +PH +1435+12100 Asia/Manila +PK +2452+06703 Asia/Karachi +PL +5215+02100 Europe/Warsaw +PM +4703-05620 America/Miquelon +PN -2504-13005 Pacific/Pitcairn +PR +182806-0660622 America/Puerto_Rico +PS +3130+03428 Asia/Gaza Gaza Strip +PS +313200+0350542 Asia/Hebron West Bank +PT +3843-00908 Europe/Lisbon Portugal (mainland) +PT +3238-01654 Atlantic/Madeira Madeira Islands +PT +3744-02540 Atlantic/Azores Azores +PW +0720+13429 Pacific/Palau +PY -2516-05740 America/Asuncion +QA,BH +2517+05132 Asia/Qatar +RE,TF -2052+05528 Indian/Reunion Réunion, Crozet, Scattered Islands +RO +4426+02606 Europe/Bucharest +RS,BA,HR,ME,MK,SI +4450+02030 Europe/Belgrade +RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad +RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area +# Mention RU and UA alphabetically. See "territorial claims" above. +RU,UA +4457+03406 Europe/Simferopol Crimea +RU +5836+04939 Europe/Kirov MSK+00 - Kirov +RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan +RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd +RU +5134+04602 Europe/Saratov MSK+01 - Saratov +RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk +RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia +RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals +RU +5500+07324 Asia/Omsk MSK+03 - Omsk +RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk +RU +5322+08345 Asia/Barnaul MSK+04 - Altai +RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk +RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo +RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area +RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia +RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky +RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River +RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky +RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River +RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky +RU +5934+15048 Asia/Magadan MSK+08 - Magadan +RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island +RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is +RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka +RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea +SA,KW,YE +2438+04643 Asia/Riyadh +SB -0932+16012 Pacific/Guadalcanal +SC -0440+05528 Indian/Mahe +SD +1536+03232 Africa/Khartoum +SE +5920+01803 Europe/Stockholm +SG +0117+10351 Asia/Singapore +SR +0550-05510 America/Paramaribo +SS +0451+03137 Africa/Juba +ST +0020+00644 Africa/Sao_Tome +SV +1342-08912 America/El_Salvador +SY +3330+03618 Asia/Damascus +TC +2128-07108 America/Grand_Turk +TD +1207+01503 Africa/Ndjamena +TF -492110+0701303 Indian/Kerguelen Kerguelen, St Paul Island, Amsterdam Island +TH,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas) +TJ +3835+06848 Asia/Dushanbe +TK -0922-17114 Pacific/Fakaofo +TL -0833+12535 Asia/Dili +TM +3757+05823 Asia/Ashgabat +TN +3648+01011 Africa/Tunis +TO -2110-17510 Pacific/Tongatapu +TR +4101+02858 Europe/Istanbul +TT,AG,AI,BL,DM,GD,GP,KN,LC,MF,MS,VC,VG,VI +1039-06131 America/Port_of_Spain +TV -0831+17913 Pacific/Funafuti +TW +2503+12130 Asia/Taipei +UA +5026+03031 Europe/Kiev Ukraine (most areas) +UA +4837+02218 Europe/Uzhgorod Transcarpathia +UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk +UM +1917+16637 Pacific/Wake Wake Island +US +404251-0740023 America/New_York Eastern (most areas) +US +421953-0830245 America/Detroit Eastern - MI (most areas) +US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area) +US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne) +US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas) +US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn) +US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski) +US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford) +US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike) +US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland) +US +415100-0873900 America/Chicago Central (most areas) +US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry) +US +411745-0863730 America/Indiana/Knox Central - IN (Starke) +US +450628-0873651 America/Menominee Central - MI (Wisconsin border) +US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver) +US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural) +US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer) +US +394421-1045903 America/Denver Mountain (most areas) +US +433649-1161209 America/Boise Mountain - ID (south); OR (east) +US +332654-1120424 America/Phoenix MST - Arizona (except Navajo) +US +340308-1181434 America/Los_Angeles Pacific +US +611305-1495401 America/Anchorage Alaska (most areas) +US +581807-1342511 America/Juneau Alaska - Juneau area +US +571035-1351807 America/Sitka Alaska - Sitka area +US +550737-1313435 America/Metlakatla Alaska - Annette Island +US +593249-1394338 America/Yakutat Alaska - Yakutat +US +643004-1652423 America/Nome Alaska (west) +US +515248-1763929 America/Adak Aleutian Islands +US,UM +211825-1575130 Pacific/Honolulu Hawaii +UY -345433-0561245 America/Montevideo +UZ +3940+06648 Asia/Samarkand Uzbekistan (west) +UZ +4120+06918 Asia/Tashkent Uzbekistan (east) +VE +1030-06656 America/Caracas +VN +1045+10640 Asia/Ho_Chi_Minh Vietnam (south) +VU -1740+16825 Pacific/Efate +WF -1318-17610 Pacific/Wallis +WS -1350-17144 Pacific/Apia +ZA,LS,SZ -2615+02800 Africa/Johannesburg diff --git a/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/tzinfo.gemspec b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/tzinfo.gemspec new file mode 100644 index 0000000..c868d47 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/tzinfo.gemspec @@ -0,0 +1,21 @@ +Gem::Specification.new do |s| + s.name = 'tzinfo' + s.version = '1.2.8' + s.summary = 'Daylight savings aware timezone library' + s.description = 'TZInfo provides daylight savings aware transformations between times in different time zones.' + s.author = 'Philip Ross' + s.email = 'phil.ross@gmail.com' + s.homepage = 'https://tzinfo.github.io' + s.license = 'MIT' + s.files = %w(CHANGES.md LICENSE Rakefile README.md tzinfo.gemspec .yardopts) + + Dir['lib/**/*.rb'].delete_if {|f| f.include?('.svn')} + + Dir['test/**/*.rb'].delete_if {|f| f.include?('.svn')} + + Dir['test/zoneinfo/**/*'].delete_if {|f| f.include?('.svn') || File.symlink?(f)} + s.platform = Gem::Platform::RUBY + s.require_path = 'lib' + s.rdoc_options << '--title' << 'TZInfo' << + '--main' << 'README.md' + s.extra_rdoc_files = ['README.md', 'CHANGES.md', 'LICENSE'] + s.required_ruby_version = '>= 1.8.7' + s.add_dependency 'thread_safe', '~> 0.1' +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/CFPropertyList-3.0.2.gemspec b/vendor/bundle/ruby/2.6.0/specifications/CFPropertyList-3.0.2.gemspec new file mode 100644 index 0000000..b4c5910 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/CFPropertyList-3.0.2.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +# stub: CFPropertyList 3.0.2 ruby lib + +Gem::Specification.new do |s| + s.name = "CFPropertyList".freeze + s.version = "3.0.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Christian Kruse".freeze] + s.date = "2019-12-06" + s.description = "This is a module to read, write and manipulate both binary and XML property lists as defined by apple.".freeze + s.email = "cjk@defunct.ch".freeze + s.extra_rdoc_files = ["README.rdoc".freeze] + s.files = ["README.rdoc".freeze] + s.homepage = "http://github.com/ckruse/CFPropertyList".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Read, write and manipulate both binary and XML property lists as defined by apple".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, [">= 0.7.0"]) + else + s.add_dependency(%q.freeze, [">= 0.7.0"]) + end + else + s.add_dependency(%q.freeze, [">= 0.7.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/activesupport-5.2.4.4.gemspec b/vendor/bundle/ruby/2.6.0/specifications/activesupport-5.2.4.4.gemspec new file mode 100644 index 0000000..69b328f --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/activesupport-5.2.4.4.gemspec @@ -0,0 +1,44 @@ +# -*- encoding: utf-8 -*- +# stub: activesupport 5.2.4.4 ruby lib + +Gem::Specification.new do |s| + s.name = "activesupport".freeze + s.version = "5.2.4.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/rails/rails/blob/v5.2.4.4/activesupport/CHANGELOG.md", "source_code_uri" => "https://github.com/rails/rails/tree/v5.2.4.4/activesupport" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["David Heinemeier Hansson".freeze] + s.date = "2020-09-09" + s.description = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework. Rich support for multibyte strings, internationalization, time zones, and testing.".freeze + s.email = "david@loudthinking.com".freeze + s.homepage = "http://rubyonrails.org".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--encoding".freeze, "UTF-8".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.2.2".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, [">= 0.7", "< 2"]) + s.add_runtime_dependency(%q.freeze, ["~> 1.1"]) + s.add_runtime_dependency(%q.freeze, ["~> 5.1"]) + s.add_runtime_dependency(%q.freeze, ["~> 1.0", ">= 1.0.2"]) + else + s.add_dependency(%q.freeze, [">= 0.7", "< 2"]) + s.add_dependency(%q.freeze, ["~> 1.1"]) + s.add_dependency(%q.freeze, ["~> 5.1"]) + s.add_dependency(%q.freeze, ["~> 1.0", ">= 1.0.2"]) + end + else + s.add_dependency(%q.freeze, [">= 0.7", "< 2"]) + s.add_dependency(%q.freeze, ["~> 1.1"]) + s.add_dependency(%q.freeze, ["~> 5.1"]) + s.add_dependency(%q.freeze, ["~> 1.0", ">= 1.0.2"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/addressable-2.8.0.gemspec b/vendor/bundle/ruby/2.6.0/specifications/addressable-2.8.0.gemspec new file mode 100644 index 0000000..cd365f3 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/addressable-2.8.0.gemspec @@ -0,0 +1,39 @@ +# -*- encoding: utf-8 -*- +# stub: addressable 2.8.0 ruby lib + +Gem::Specification.new do |s| + s.name = "addressable".freeze + s.version = "2.8.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Bob Aman".freeze] + s.date = "2021-07-03" + s.description = "Addressable is an alternative implementation to the URI implementation that is\npart of Ruby's standard library. It is flexible, offers heuristic parsing, and\nadditionally provides extensive support for IRIs and URI templates.\n".freeze + s.email = "bob@sporkmonger.com".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["README.md".freeze] + s.homepage = "https://github.com/sporkmonger/addressable".freeze + s.licenses = ["Apache-2.0".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "URI Implementation".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_development_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/algoliasearch-1.27.5.gemspec b/vendor/bundle/ruby/2.6.0/specifications/algoliasearch-1.27.5.gemspec new file mode 100644 index 0000000..a0c6600 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/algoliasearch-1.27.5.gemspec @@ -0,0 +1,48 @@ +# -*- encoding: utf-8 -*- +# stub: algoliasearch 1.27.5 ruby lib + +Gem::Specification.new do |s| + s.name = "algoliasearch".freeze + s.version = "1.27.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/algolia/algoliasearch-client-ruby/issues", "changelog_uri" => "https://github.com/algolia/algoliasearch-client-ruby/blob/master/CHANGELOG.md", "documentation_uri" => "http://www.rubydoc.info/gems/algoliasearch", "homepage_uri" => "https://www.algolia.com/doc/api-client/ruby/getting-started/", "source_code_uri" => "https://github.com/algolia/algoliasearch-client-ruby" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Algolia".freeze] + s.date = "2020-10-28" + s.description = "A simple Ruby client for the algolia.com REST API".freeze + s.email = "contact@algolia.com".freeze + s.extra_rdoc_files = ["CHANGELOG.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.files = ["CHANGELOG.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.homepage = "https://github.com/algolia/algoliasearch-client-ruby".freeze + s.licenses = ["MIT".freeze] + s.post_install_message = "A new major version is available for Algolia! Please now use the https://rubygems.org/gems/algolia gem to get the latest features.".freeze + s.rubygems_version = "3.0.3.1".freeze + s.summary = "A simple Ruby client for the algolia.com REST API".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, [">= 1.5.1"]) + s.add_runtime_dependency(%q.freeze, ["~> 2.8", ">= 2.8.3"]) + s.add_development_dependency(%q.freeze, ["~> 0"]) + s.add_development_dependency(%q.freeze, ["~> 0"]) + s.add_development_dependency(%q.freeze, ["~> 0"]) + else + s.add_dependency(%q.freeze, [">= 1.5.1"]) + s.add_dependency(%q.freeze, ["~> 2.8", ">= 2.8.3"]) + s.add_dependency(%q.freeze, ["~> 0"]) + s.add_dependency(%q.freeze, ["~> 0"]) + s.add_dependency(%q.freeze, ["~> 0"]) + end + else + s.add_dependency(%q.freeze, [">= 1.5.1"]) + s.add_dependency(%q.freeze, ["~> 2.8", ">= 2.8.3"]) + s.add_dependency(%q.freeze, ["~> 0"]) + s.add_dependency(%q.freeze, ["~> 0"]) + s.add_dependency(%q.freeze, ["~> 0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/atomos-0.1.3.gemspec b/vendor/bundle/ruby/2.6.0/specifications/atomos-0.1.3.gemspec new file mode 100644 index 0000000..1d17732 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/atomos-0.1.3.gemspec @@ -0,0 +1,42 @@ +# -*- encoding: utf-8 -*- +# stub: atomos 0.1.3 ruby lib + +Gem::Specification.new do |s| + s.name = "atomos".freeze + s.version = "0.1.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Samuel Giddins".freeze] + s.bindir = "exe".freeze + s.date = "2018-08-07" + s.email = ["segiddins@segiddins.me".freeze] + s.homepage = "https://github.com/segiddins/atomos".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "A simple gem to atomically write files".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, ["~> 1.16"]) + s.add_development_dependency(%q.freeze, ["~> 10.0"]) + s.add_development_dependency(%q.freeze, ["~> 3.0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + else + s.add_dependency(%q.freeze, ["~> 1.16"]) + s.add_dependency(%q.freeze, ["~> 10.0"]) + s.add_dependency(%q.freeze, ["~> 3.0"]) + s.add_dependency(%q.freeze, [">= 0"]) + end + else + s.add_dependency(%q.freeze, ["~> 1.16"]) + s.add_dependency(%q.freeze, ["~> 10.0"]) + s.add_dependency(%q.freeze, ["~> 3.0"]) + s.add_dependency(%q.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/claide-1.0.3.gemspec b/vendor/bundle/ruby/2.6.0/specifications/claide-1.0.3.gemspec new file mode 100644 index 0000000..b02ffca --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/claide-1.0.3.gemspec @@ -0,0 +1,19 @@ +# -*- encoding: utf-8 -*- +# stub: claide 1.0.3 ruby lib + +Gem::Specification.new do |s| + s.name = "claide".freeze + s.version = "1.0.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2019-08-02" + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/CLAide".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "A small command-line interface framework.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/concurrent-ruby-1.1.7.gemspec b/vendor/bundle/ruby/2.6.0/specifications/concurrent-ruby-1.1.7.gemspec new file mode 100644 index 0000000..027fc52 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/concurrent-ruby-1.1.7.gemspec @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# stub: concurrent-ruby 1.1.7 ruby lib/concurrent-ruby + +Gem::Specification.new do |s| + s.name = "concurrent-ruby".freeze + s.version = "1.1.7" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/ruby-concurrency/concurrent-ruby/blob/master/CHANGELOG.md", "source_code_uri" => "https://github.com/ruby-concurrency/concurrent-ruby" } if s.respond_to? :metadata= + s.require_paths = ["lib/concurrent-ruby".freeze] + s.authors = ["Jerry D'Antonio".freeze, "Petr Chalupa".freeze, "The Ruby Concurrency Team".freeze] + s.date = "2020-08-08" + s.description = "Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.\nInspired by Erlang, Clojure, Go, JavaScript, actors, and classic concurrency patterns.\n".freeze + s.email = "concurrent-ruby@googlegroups.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "LICENSE.txt".freeze, "CHANGELOG.md".freeze] + s.files = ["CHANGELOG.md".freeze, "LICENSE.txt".freeze, "README.md".freeze] + s.homepage = "http://www.concurrent-ruby.com".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.9.3".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell, F#, C#, Java, and classic concurrency patterns.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/fuzzy_match-2.0.4.gemspec b/vendor/bundle/ruby/2.6.0/specifications/fuzzy_match-2.0.4.gemspec new file mode 100644 index 0000000..e499624 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/fuzzy_match-2.0.4.gemspec @@ -0,0 +1,63 @@ +# -*- encoding: utf-8 -*- +# stub: fuzzy_match 2.0.4 ruby lib + +Gem::Specification.new do |s| + s.name = "fuzzy_match".freeze + s.version = "2.0.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Seamus Abshere".freeze] + s.date = "2013-09-19" + s.description = "Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.".freeze + s.email = ["seamus@abshere.net".freeze] + s.executables = ["fuzzy_match".freeze] + s.files = ["bin/fuzzy_match".freeze] + s.homepage = "https://github.com/seamusabshere/fuzzy_match".freeze + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 3 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, [">= 0.4.0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 3"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 0"]) + else + s.add_dependency(%q.freeze, [">= 0.4.0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 3"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + end + else + s.add_dependency(%q.freeze, [">= 0.4.0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 3"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/httpclient-2.8.3.gemspec b/vendor/bundle/ruby/2.6.0/specifications/httpclient-2.8.3.gemspec new file mode 100644 index 0000000..46e15a1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/httpclient-2.8.3.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# stub: httpclient 2.8.3 ruby lib + +Gem::Specification.new do |s| + s.name = "httpclient".freeze + s.version = "2.8.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Hiroshi Nakamura".freeze] + s.date = "2016-12-09" + s.email = "nahi@ruby-lang.org".freeze + s.executables = ["httpclient".freeze] + s.files = ["bin/httpclient".freeze] + s.homepage = "https://github.com/nahi/httpclient".freeze + s.licenses = ["ruby".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "gives something like the functionality of libwww-perl (LWP) in Ruby".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/i18n-1.8.5.gemspec b/vendor/bundle/ruby/2.6.0/specifications/i18n-1.8.5.gemspec new file mode 100644 index 0000000..49fe2f1 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/i18n-1.8.5.gemspec @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# stub: i18n 1.8.5 ruby lib + +Gem::Specification.new do |s| + s.name = "i18n".freeze + s.version = "1.8.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.5".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/svenfuchs/i18n/issues", "changelog_uri" => "https://github.com/svenfuchs/i18n/releases", "documentation_uri" => "https://guides.rubyonrails.org/i18n.html", "source_code_uri" => "https://github.com/svenfuchs/i18n" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Sven Fuchs".freeze, "Joshua Harvey".freeze, "Matt Aimonetti".freeze, "Stephan Soller".freeze, "Saimon Moore".freeze, "Ryan Bigg".freeze] + s.date = "2020-07-23" + s.description = "New wave Internationalization support for Ruby.".freeze + s.email = "rails-i18n@googlegroups.com".freeze + s.homepage = "https://github.com/ruby-i18n/i18n".freeze + s.licenses = ["MIT".freeze] + s.post_install_message = "\nHEADS UP! i18n 1.1 changed fallbacks to exclude default locale.\nBut that may break your application.\n\nIf you are upgrading your Rails application from an older version of Rails:\n\nPlease check your Rails app for 'config.i18n.fallbacks = true'.\nIf you're using I18n (>= 1.1.0) and Rails (< 5.2.2), this should be\n'config.i18n.fallbacks = [I18n.default_locale]'.\nIf not, fallbacks will be broken in your app by I18n 1.1.x.\n\nIf you are starting a NEW Rails application, you can ignore this notice.\n\nFor more info see:\nhttps://github.com/svenfuchs/i18n/releases/tag/v1.1.0\n\n".freeze + s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "New wave Internationalization support for Ruby".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, ["~> 1.0"]) + else + s.add_dependency(%q.freeze, ["~> 1.0"]) + end + else + s.add_dependency(%q.freeze, ["~> 1.0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/json-2.3.1.gemspec b/vendor/bundle/ruby/2.6.0/specifications/json-2.3.1.gemspec new file mode 100644 index 0000000..ff282fc Binary files /dev/null and b/vendor/bundle/ruby/2.6.0/specifications/json-2.3.1.gemspec differ diff --git a/vendor/bundle/ruby/2.6.0/specifications/minitest-5.14.2.gemspec b/vendor/bundle/ruby/2.6.0/specifications/minitest-5.14.2.gemspec new file mode 100644 index 0000000..c4069f9 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/minitest-5.14.2.gemspec @@ -0,0 +1,41 @@ +# -*- encoding: utf-8 -*- +# stub: minitest 5.14.2 ruby lib + +Gem::Specification.new do |s| + s.name = "minitest".freeze + s.version = "5.14.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/seattlerb/minitest/issues", "homepage_uri" => "https://github.com/seattlerb/minitest" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Ryan Davis".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPjCCAiagAwIBAgIBBDANBgkqhkiG9w0BAQsFADBFMRMwEQYDVQQDDApyeWFu\nZC1ydWJ5MRkwFwYKCZImiZPyLGQBGRYJemVuc3BpZGVyMRMwEQYKCZImiZPyLGQB\nGRYDY29tMB4XDTE5MTIxMzAwMDIwNFoXDTIwMTIxMjAwMDIwNFowRTETMBEGA1UE\nAwwKcnlhbmQtcnVieTEZMBcGCgmSJomT8ixkARkWCXplbnNwaWRlcjETMBEGCgmS\nJomT8ixkARkWA2NvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALda\nb9DCgK+627gPJkB6XfjZ1itoOQvpqH1EXScSaba9/S2VF22VYQbXU1xQXL/WzCkx\ntaCPaLmfYIaFcHHCSY4hYDJijRQkLxPeB3xbOfzfLoBDbjvx5JxgJxUjmGa7xhcT\noOvjtt5P8+GSK9zLzxQP0gVLS/D0FmoE44XuDr3iQkVS2ujU5zZL84mMNqNB1znh\nGiadM9GHRaDiaxuX0cIUBj19T01mVE2iymf9I6bEsiayK/n6QujtyCbTWsAS9Rqt\nqhtV7HJxNKuPj/JFH0D2cswvzznE/a5FOYO68g+YCuFi5L8wZuuM8zzdwjrWHqSV\ngBEfoTEGr7Zii72cx+sCAwEAAaM5MDcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAw\nHQYDVR0OBBYEFEfFe9md/r/tj/Wmwpy+MI8d9k/hMA0GCSqGSIb3DQEBCwUAA4IB\nAQCkkcHqAa6IKLYGl93rn78J3L+LnqyxaA059n4IGMHWN5bv9KBQnIjOrpLadtYZ\nvhWkunWDKdfVapBEq5+T4HzqnsEXC3aCv6JEKJY6Zw7iSzl0M8hozuzRr+w46wvT\nfV2yTN6QTVxqbMsJJyjosks4ZdQYov2zdvQpt1HsLi+Qmckmg8SPZsd+T8uiiBCf\nb+1ORSM5eEfBQenPXy83LZcoQz8i6zVB4aAfTGGdhxjoMGUEmSZ6xpkOzmnGa9QK\nm5x9IDiApM+vCELNwDXXGNFEnQBBK+wAe4Pek8o1V1TTOxL1kGPewVOitX1p3xoN\nh7iEjga8iM1LbZUfiISZ+WrB\n-----END CERTIFICATE-----\n".freeze] + s.date = "2020-09-01" + s.description = "minitest provides a complete suite of testing facilities supporting\nTDD, BDD, mocking, and benchmarking.\n\n \"I had a class with Jim Weirich on testing last week and we were\n allowed to choose our testing frameworks. Kirk Haines and I were\n paired up and we cracked open the code for a few test\n frameworks...\n\n I MUST say that minitest is *very* readable / understandable\n compared to the 'other two' options we looked at. Nicely done and\n thank you for helping us keep our mental sanity.\"\n\n -- Wayne E. Seguin\n\nminitest/test is a small and incredibly fast unit testing framework.\nIt provides a rich set of assertions to make your tests clean and\nreadable.\n\nminitest/spec is a functionally complete spec engine. It hooks onto\nminitest/test and seamlessly bridges test assertions over to spec\nexpectations.\n\nminitest/benchmark is an awesome way to assert the performance of your\nalgorithms in a repeatable manner. Now you can assert that your newb\nco-worker doesn't replace your linear algorithm with an exponential\none!\n\nminitest/mock by Steven Baker, is a beautifully tiny mock (and stub)\nobject framework.\n\nminitest/pride shows pride in testing and adds coloring to your test\noutput. I guess it is an example of how to write IO pipes too. :P\n\nminitest/test is meant to have a clean implementation for language\nimplementors that need a minimal set of methods to bootstrap a working\ntest suite. For example, there is no magic involved for test-case\ndiscovery.\n\n \"Again, I can't praise enough the idea of a testing/specing\n framework that I can actually read in full in one sitting!\"\n\n -- Piotr Szotkowski\n\nComparing to rspec:\n\n rspec is a testing DSL. minitest is ruby.\n\n -- Adam Hawkins, \"Bow Before MiniTest\"\n\nminitest doesn't reinvent anything that ruby already provides, like:\nclasses, modules, inheritance, methods. This means you only have to\nlearn ruby to use minitest and all of your regular OO practices like\nextract-method refactorings still apply.".freeze + s.email = ["ryand-ruby@zenspider.com".freeze] + s.extra_rdoc_files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.homepage = "https://github.com/seattlerb/minitest".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--main".freeze, "README.rdoc".freeze] + s.required_ruby_version = Gem::Requirement.new([">= 2.2".freeze, "< 3.1".freeze]) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "minitest provides a complete suite of testing facilities supporting TDD, BDD, mocking, and benchmarking".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, [">= 4.0", "< 7"]) + s.add_development_dependency(%q.freeze, ["~> 3.22"]) + else + s.add_dependency(%q.freeze, [">= 4.0", "< 7"]) + s.add_dependency(%q.freeze, ["~> 3.22"]) + end + else + s.add_dependency(%q.freeze, [">= 4.0", "< 7"]) + s.add_dependency(%q.freeze, ["~> 3.22"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/nap-1.1.0.gemspec b/vendor/bundle/ruby/2.6.0/specifications/nap-1.1.0.gemspec new file mode 100644 index 0000000..b69e645 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/nap-1.1.0.gemspec @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# stub: nap 1.1.0 ruby lib + +Gem::Specification.new do |s| + s.name = "nap".freeze + s.version = "1.1.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Manfred Stienstra".freeze] + s.date = "2016-01-29" + s.description = " Nap is a really simple REST library. It allows you to perform HTTP requests\n with minimal amounts of code.\n".freeze + s.email = "manfred@fngtps.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "LICENSE".freeze] + s.files = ["LICENSE".freeze, "README.md".freeze] + s.homepage = "https://github.com/Fingertips/nap".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--charset=utf-8".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Nap is a really simple REST library.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, ["~> 10"]) + s.add_development_dependency(%q.freeze, ["~> 0.5"]) + else + s.add_dependency(%q.freeze, ["~> 10"]) + s.add_dependency(%q.freeze, ["~> 0.5"]) + end + else + s.add_dependency(%q.freeze, ["~> 10"]) + s.add_dependency(%q.freeze, ["~> 0.5"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/netrc-0.11.0.gemspec b/vendor/bundle/ruby/2.6.0/specifications/netrc-0.11.0.gemspec new file mode 100644 index 0000000..45140bb --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/netrc-0.11.0.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: netrc 0.11.0 ruby lib + +Gem::Specification.new do |s| + s.name = "netrc".freeze + s.version = "0.11.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Keith Rarick".freeze, "geemus (Wesley Beary)".freeze] + s.date = "2015-10-29" + s.description = "This library can read and update netrc files, preserving formatting including comments and whitespace.".freeze + s.email = "geemus@gmail.com".freeze + s.homepage = "https://github.com/geemus/netrc".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Library to read and write netrc files.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, [">= 0"]) + else + s.add_dependency(%q.freeze, [">= 0"]) + end + else + s.add_dependency(%q.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/public_suffix-4.0.6.gemspec b/vendor/bundle/ruby/2.6.0/specifications/public_suffix-4.0.6.gemspec new file mode 100644 index 0000000..47dd7ad --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/public_suffix-4.0.6.gemspec @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# stub: public_suffix 4.0.6 ruby lib + +Gem::Specification.new do |s| + s.name = "public_suffix".freeze + s.version = "4.0.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/weppos/publicsuffix-ruby/issues", "changelog_uri" => "https://github.com/weppos/publicsuffix-ruby/blob/master/CHANGELOG.md", "documentation_uri" => "https://rubydoc.info/gems/public_suffix/4.0.6", "homepage_uri" => "https://simonecarletti.com/code/publicsuffix-ruby", "source_code_uri" => "https://github.com/weppos/publicsuffix-ruby/tree/v4.0.6" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Simone Carletti".freeze] + s.date = "2020-09-02" + s.description = "PublicSuffix can parse and decompose a domain name into top level domain, domain and subdomains.".freeze + s.email = ["weppos@weppos.net".freeze] + s.extra_rdoc_files = ["LICENSE.txt".freeze] + s.files = ["LICENSE.txt".freeze] + s.homepage = "https://simonecarletti.com/code/publicsuffix-ruby".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Domain name parser based on the Public Suffix List.".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/thread_safe-0.3.6.gemspec b/vendor/bundle/ruby/2.6.0/specifications/thread_safe-0.3.6.gemspec new file mode 100644 index 0000000..071f999 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/thread_safe-0.3.6.gemspec @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# stub: thread_safe 0.3.6 ruby lib + +Gem::Specification.new do |s| + s.name = "thread_safe".freeze + s.version = "0.3.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Charles Oliver Nutter".freeze, "thedarkone".freeze] + s.date = "2017-02-22" + s.description = "A collection of data structures and utilities to make thread-safe programming in Ruby easier".freeze + s.email = ["headius@headius.com".freeze, "thedarkone2@gmail.com".freeze] + s.homepage = "https://github.com/ruby-concurrency/thread_safe".freeze + s.licenses = ["Apache-2.0".freeze] + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Thread-safe collections and utilities for Ruby".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q.freeze, ["= 1.1.16"]) + s.add_development_dependency(%q.freeze, ["< 12.0"]) + s.add_development_dependency(%q.freeze, ["~> 3.2"]) + else + s.add_dependency(%q.freeze, ["= 1.1.16"]) + s.add_dependency(%q.freeze, ["< 12.0"]) + s.add_dependency(%q.freeze, ["~> 3.2"]) + end + else + s.add_dependency(%q.freeze, ["= 1.1.16"]) + s.add_dependency(%q.freeze, ["< 12.0"]) + s.add_dependency(%q.freeze, ["~> 3.2"]) + end +end diff --git a/vendor/bundle/ruby/2.6.0/specifications/tzinfo-1.2.8.gemspec b/vendor/bundle/ruby/2.6.0/specifications/tzinfo-1.2.8.gemspec new file mode 100644 index 0000000..635ec91 --- /dev/null +++ b/vendor/bundle/ruby/2.6.0/specifications/tzinfo-1.2.8.gemspec @@ -0,0 +1,37 @@ +# -*- encoding: utf-8 -*- +# stub: tzinfo 1.2.8 ruby lib + +Gem::Specification.new do |s| + s.name = "tzinfo".freeze + s.version = "1.2.8" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Philip Ross".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPDCCAiSgAwIBAgIBATANBgkqhkiG9w0BAQsFADAkMSIwIAYDVQQDDBlwaGls\nLnJvc3MvREM9Z21haWwvREM9Y29tMB4XDTE5MTIyNDE0NTU0N1oXDTM5MTIyNDE0\nNTU0N1owJDEiMCAGA1UEAwwZcGhpbC5yb3NzL0RDPWdtYWlsL0RDPWNvbTCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJGcwfqn4ZsmPl0b1Lt9dCzExrE5\nEeP/CRQjBdGHkF+mSpi69XysxdwLdfg5SPr9LfxthUug4nNFd5fDCiXM8hYe9jQD\nTmkIQKNBh4fFpGngn9gyy+SumCXi6b5L6d/aMc59NAOM6LJ88TOdH1648dh5rq3C\nULq82n3gg4+u0HHGjRPuR/pnCFQCZbANYdX+UBWd0qkOJn/EreNKROmEeHr/xKuh\n2/GlKFKt9KLcW3hwBB4fHHVYUzRau7D1m9KbEERdg//qNDC4B7fD2BFJuPbM5S7J\n41VwDAh1O8B/Qpg0f+S83K4Kodw4MiPGsug55UkNtd3mGR/zZJ9WM03DSwkCAwEA\nAaN5MHcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAwHQYDVR0OBBYEFA+Z8zvfzBuA\nesoHIfz7+jxfUOcfMB4GA1UdEQQXMBWBE3BoaWwucm9zc0BnbWFpbC5jb20wHgYD\nVR0SBBcwFYETcGhpbC5yb3NzQGdtYWlsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA\nJ80xgZ3gGdQVA8N+8NJANU5HLuZIU9jOaAlziU9ImoTgPiOHKGZC4as1TwT4kBt1\nQcnu7YSANYRrxP5tpOHsWPF/MQYgerAFCZS5+PzOTudwZ+7OsMW4/EMHy6aCVHEd\nc7HzQRC4mSrDRpWxzyBnZ5nX5OAmIkKA8NgeKybT/4Ku6iFPPUQwlyxQaO+Wlxdo\nFqHwpjRyoiVSpe4RUTNK3d3qesWPYi7Lxn6k6ZZeEdvG6ya33AXktE3jmmF+jPR1\nJ3Zn/kSTjTekiaspyGbczC3PUaeJNxr+yCvR4sk71Xmk/GaKKGOHedJ1uj/LAXrA\nMR0mpl7b8zCg0PFC1J73uw==\n-----END CERTIFICATE-----\n".freeze] + s.date = "2020-11-08" + s.description = "TZInfo provides daylight savings aware transformations between times in different time zones.".freeze + s.email = "phil.ross@gmail.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "CHANGES.md".freeze, "LICENSE".freeze] + s.files = ["CHANGES.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.homepage = "https://tzinfo.github.io".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--title".freeze, "TZInfo".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.8.7".freeze) + s.rubygems_version = "3.0.3.1".freeze + s.summary = "Daylight savings aware timezone library".freeze + + s.installed_by_version = "3.0.3.1" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, ["~> 0.1"]) + else + s.add_dependency(%q.freeze, ["~> 0.1"]) + end + else + s.add_dependency(%q.freeze, ["~> 0.1"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/bin/fuzzy_match b/vendor/bundle/ruby/3.0.0/bin/fuzzy_match new file mode 100755 index 0000000..c340ac1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/bin/fuzzy_match @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'fuzzy_match' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('fuzzy_match', 'fuzzy_match', version) +else +gem "fuzzy_match", version +load Gem.bin_path("fuzzy_match", "fuzzy_match", version) +end diff --git a/vendor/bundle/ruby/3.0.0/bin/httpclient b/vendor/bundle/ruby/3.0.0/bin/httpclient new file mode 100755 index 0000000..a37f4dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/bin/httpclient @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'httpclient' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('httpclient', 'httpclient', version) +else +gem "httpclient", version +load Gem.bin_path("httpclient", "httpclient", version) +end diff --git a/vendor/bundle/ruby/3.0.0/bin/pod b/vendor/bundle/ruby/3.0.0/bin/pod new file mode 100755 index 0000000..a713675 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/bin/pod @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'cocoapods' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('cocoapods', 'pod', version) +else +gem "cocoapods", version +load Gem.bin_path("cocoapods", "pod", version) +end diff --git a/vendor/bundle/ruby/3.0.0/bin/sandbox-pod b/vendor/bundle/ruby/3.0.0/bin/sandbox-pod new file mode 100755 index 0000000..fe7a92e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/bin/sandbox-pod @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'cocoapods' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('cocoapods', 'sandbox-pod', version) +else +gem "cocoapods", version +load Gem.bin_path("cocoapods", "sandbox-pod", version) +end diff --git a/vendor/bundle/ruby/3.0.0/bin/xcodeproj b/vendor/bundle/ruby/3.0.0/bin/xcodeproj new file mode 100755 index 0000000..8358e7d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/bin/xcodeproj @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# +# This file was generated by RubyGems. +# +# The application 'xcodeproj' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'rubygems' + +version = ">= 0.a" + +str = ARGV.first +if str + str = str.b[/\A_(.*)_\z/, 1] + if str and Gem::Version.correct?(str) + version = str + ARGV.shift + end +end + +if Gem.respond_to?(:activate_bin_path) +load Gem.activate_bin_path('xcodeproj', 'xcodeproj', version) +else +gem "xcodeproj", version +load Gem.bin_path("xcodeproj", "xcodeproj", version) +end diff --git a/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.2.gem b/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.2.gem new file mode 100644 index 0000000..01cb910 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.2.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.5.gem b/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.5.gem new file mode 100644 index 0000000..173ad26 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/CFPropertyList-3.0.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/activesupport-5.2.4.4.gem b/vendor/bundle/ruby/3.0.0/cache/activesupport-5.2.4.4.gem new file mode 100644 index 0000000..3df1726 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/activesupport-5.2.4.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/activesupport-6.1.4.6.gem b/vendor/bundle/ruby/3.0.0/cache/activesupport-6.1.4.6.gem new file mode 100644 index 0000000..5a29ad1 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/activesupport-6.1.4.6.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/addressable-2.8.0.gem b/vendor/bundle/ruby/3.0.0/cache/addressable-2.8.0.gem new file mode 100644 index 0000000..1e41e1c Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/addressable-2.8.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/algoliasearch-1.27.5.gem b/vendor/bundle/ruby/3.0.0/cache/algoliasearch-1.27.5.gem new file mode 100644 index 0000000..4e068e8 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/algoliasearch-1.27.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/atomos-0.1.3.gem b/vendor/bundle/ruby/3.0.0/cache/atomos-0.1.3.gem new file mode 100644 index 0000000..c12e2f0 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/atomos-0.1.3.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/claide-1.0.3.gem b/vendor/bundle/ruby/3.0.0/cache/claide-1.0.3.gem new file mode 100644 index 0000000..16aeff7 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/claide-1.0.3.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/claide-1.1.0.gem b/vendor/bundle/ruby/3.0.0/cache/claide-1.1.0.gem new file mode 100644 index 0000000..6b0fb2d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/claide-1.1.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-1.11.2.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-1.11.2.gem new file mode 100644 index 0000000..ba98749 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-1.11.2.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-core-1.11.2.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-core-1.11.2.gem new file mode 100644 index 0000000..104f6a0 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-core-1.11.2.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.4.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.4.gem new file mode 100644 index 0000000..d44ae47 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.5.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.5.gem new file mode 100644 index 0000000..7dd920f Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-deintegrate-1.0.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.4.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.4.0.gem new file mode 100644 index 0000000..cfd7069 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.4.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.5.1.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.5.1.gem new file mode 100644 index 0000000..9709239 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-downloader-1.5.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-plugins-1.0.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-plugins-1.0.0.gem new file mode 100644 index 0000000..df8cba9 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-plugins-1.0.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.0.gem new file mode 100644 index 0000000..b4e5a72 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.1.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.1.gem new file mode 100644 index 0000000..55f8d32 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-search-1.0.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.5.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.5.0.gem new file mode 100644 index 0000000..d79e143 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.5.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.6.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.6.0.gem new file mode 100644 index 0000000..af70712 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-trunk-1.6.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/cocoapods-try-1.2.0.gem b/vendor/bundle/ruby/3.0.0/cache/cocoapods-try-1.2.0.gem new file mode 100644 index 0000000..535e4eb Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/cocoapods-try-1.2.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/colored2-3.1.2.gem b/vendor/bundle/ruby/3.0.0/cache/colored2-3.1.2.gem new file mode 100644 index 0000000..9757ae3 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/colored2-3.1.2.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.7.gem b/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.7.gem new file mode 100644 index 0000000..ae9b370 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.7.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.9.gem b/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.9.gem new file mode 100644 index 0000000..9ed64f2 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/concurrent-ruby-1.1.9.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/escape-0.0.4.gem b/vendor/bundle/ruby/3.0.0/cache/escape-0.0.4.gem new file mode 100644 index 0000000..0152d14 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/escape-0.0.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/ethon-0.15.0.gem b/vendor/bundle/ruby/3.0.0/cache/ethon-0.15.0.gem new file mode 100644 index 0000000..794eaec Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/ethon-0.15.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/ffi-1.13.1.gem b/vendor/bundle/ruby/3.0.0/cache/ffi-1.13.1.gem new file mode 100644 index 0000000..e59187e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/ffi-1.13.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/ffi-1.15.5.gem b/vendor/bundle/ruby/3.0.0/cache/ffi-1.15.5.gem new file mode 100644 index 0000000..a632047 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/ffi-1.15.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/fourflusher-2.3.1.gem b/vendor/bundle/ruby/3.0.0/cache/fourflusher-2.3.1.gem new file mode 100644 index 0000000..a1fd075 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/fourflusher-2.3.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/fuzzy_match-2.0.4.gem b/vendor/bundle/ruby/3.0.0/cache/fuzzy_match-2.0.4.gem new file mode 100644 index 0000000..bb9e714 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/fuzzy_match-2.0.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/gh_inspector-1.1.3.gem b/vendor/bundle/ruby/3.0.0/cache/gh_inspector-1.1.3.gem new file mode 100644 index 0000000..3d70b72 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/gh_inspector-1.1.3.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/httpclient-2.8.3.gem b/vendor/bundle/ruby/3.0.0/cache/httpclient-2.8.3.gem new file mode 100644 index 0000000..9c19ad4 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/httpclient-2.8.3.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/i18n-1.10.0.gem b/vendor/bundle/ruby/3.0.0/cache/i18n-1.10.0.gem new file mode 100644 index 0000000..a64602b Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/i18n-1.10.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/i18n-1.8.5.gem b/vendor/bundle/ruby/3.0.0/cache/i18n-1.8.5.gem new file mode 100644 index 0000000..f5c89ea Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/i18n-1.8.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/json-2.3.1.gem b/vendor/bundle/ruby/3.0.0/cache/json-2.3.1.gem new file mode 100644 index 0000000..1f89633 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/json-2.3.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/json-2.6.1.gem b/vendor/bundle/ruby/3.0.0/cache/json-2.6.1.gem new file mode 100644 index 0000000..0e3430c Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/json-2.6.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/minitest-5.14.2.gem b/vendor/bundle/ruby/3.0.0/cache/minitest-5.14.2.gem new file mode 100644 index 0000000..66232f7 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/minitest-5.14.2.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/minitest-5.15.0.gem b/vendor/bundle/ruby/3.0.0/cache/minitest-5.15.0.gem new file mode 100644 index 0000000..bb3c247 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/minitest-5.15.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/molinillo-0.6.6.gem b/vendor/bundle/ruby/3.0.0/cache/molinillo-0.6.6.gem new file mode 100644 index 0000000..22fb63f Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/molinillo-0.6.6.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/molinillo-0.8.0.gem b/vendor/bundle/ruby/3.0.0/cache/molinillo-0.8.0.gem new file mode 100644 index 0000000..e8b8deb Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/molinillo-0.8.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/nanaimo-0.3.0.gem b/vendor/bundle/ruby/3.0.0/cache/nanaimo-0.3.0.gem new file mode 100644 index 0000000..3a369b2 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/nanaimo-0.3.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/nap-1.1.0.gem b/vendor/bundle/ruby/3.0.0/cache/nap-1.1.0.gem new file mode 100644 index 0000000..1f6dd74 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/nap-1.1.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/netrc-0.11.0.gem b/vendor/bundle/ruby/3.0.0/cache/netrc-0.11.0.gem new file mode 100644 index 0000000..78226f3 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/netrc-0.11.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/public_suffix-4.0.6.gem b/vendor/bundle/ruby/3.0.0/cache/public_suffix-4.0.6.gem new file mode 100644 index 0000000..6f183f4 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/public_suffix-4.0.6.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/rexml-3.2.5.gem b/vendor/bundle/ruby/3.0.0/cache/rexml-3.2.5.gem new file mode 100644 index 0000000..5680fec Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/rexml-3.2.5.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/ruby-macho-1.4.0.gem b/vendor/bundle/ruby/3.0.0/cache/ruby-macho-1.4.0.gem new file mode 100644 index 0000000..52b3e10 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/ruby-macho-1.4.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/ruby-macho-2.5.1.gem b/vendor/bundle/ruby/3.0.0/cache/ruby-macho-2.5.1.gem new file mode 100644 index 0000000..4b0322b Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/ruby-macho-2.5.1.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/thread_safe-0.3.6.gem b/vendor/bundle/ruby/3.0.0/cache/thread_safe-0.3.6.gem new file mode 100644 index 0000000..7ee950f Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/thread_safe-0.3.6.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/typhoeus-1.4.0.gem b/vendor/bundle/ruby/3.0.0/cache/typhoeus-1.4.0.gem new file mode 100644 index 0000000..b71baf3 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/typhoeus-1.4.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/tzinfo-1.2.8.gem b/vendor/bundle/ruby/3.0.0/cache/tzinfo-1.2.8.gem new file mode 100644 index 0000000..f24bf8e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/tzinfo-1.2.8.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/tzinfo-2.0.4.gem b/vendor/bundle/ruby/3.0.0/cache/tzinfo-2.0.4.gem new file mode 100644 index 0000000..a3f9820 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/tzinfo-2.0.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.19.0.gem b/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.19.0.gem new file mode 100644 index 0000000..a4fef7b Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.19.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.21.0.gem b/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.21.0.gem new file mode 100644 index 0000000..b0b2dbf Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/xcodeproj-1.21.0.gem differ diff --git a/vendor/bundle/ruby/3.0.0/cache/zeitwerk-2.5.4.gem b/vendor/bundle/ruby/3.0.0/cache/zeitwerk-2.5.4.gem new file mode 100644 index 0000000..7c967d4 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/cache/zeitwerk-2.5.4.gem differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/gem_make.out b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/gem_make.out new file mode 100644 index 0000000..2a2ad7e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/gem_make.out @@ -0,0 +1,81 @@ +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c +/Users/arthur/.rvm/rubies/ruby-3.0.0/bin/ruby -I /Users/arthur/.rvm/rubies/ruby-3.0.0/lib/ruby/3.0.0 -r ./siteconf20220301-11532-f36q6z.rb extconf.rb +checking for ffi_prep_closure_loc() in -lffi... no +checking for ffi_prep_closure_loc() in -llibffi... no +checking for ffi_prep_closure_loc() in -llibffi-8... no +creating extconf.h +creating Makefile + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" clean + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c +make "DESTDIR=" +Configuring libffi +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure: line 18632: readelf: command not found +clang: error: unsupported option '-print-multi-os-directory' +clang: error: no input files +cd "/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21" && /Applications/Xcode.app/Contents/Developer/usr/bin/make +/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive +Making all in include +make[3]: Nothing to be done for `all'. +Making all in testsuite +make[3]: Nothing to be done for `all'. +Making all in man +make[3]: Nothing to be done for `all'. +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/prep_cif.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c -fno-common -DPIC -o src/.libs/prep_cif.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/types.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c -fno-common -DPIC -o src/.libs/types.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/raw_api.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c -fno-common -DPIC -o src/.libs/raw_api.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/java_raw_api.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c -fno-common -DPIC -o src/.libs/java_raw_api.o +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:328:46: warning: 'ffi_java_raw_size' is deprecated [-Wdeprecated-declarations] + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ^ +include/ffi.h:299:56: note: 'ffi_java_raw_size' has been explicitly marked deprecated here +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + ^ +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c:331:3: warning: 'ffi_java_ptrarray_to_raw' is deprecated [-Wdeprecated-declarations] + ffi_java_ptrarray_to_raw (cif, avalue, raw); + ^ +include/ffi.h:295:93: note: 'ffi_java_ptrarray_to_raw' has been explicitly marked deprecated here +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); + ^ +2 warnings generated. +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/closures.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c -fno-common -DPIC -o src/.libs/closures.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/arm/ffi.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c -fno-common -DPIC -o src/arm/.libs/ffi.o +/bin/sh ./libtool --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c -o src/arm/sysv.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S -fno-common -DPIC -o src/arm/.libs/sysv.o +/bin/sh ./libtool --tag=CC --mode=link gcc -fdeclspec -Wall -fexceptions -L/usr/local/opt/libffi/lib -o libffi_convenience.la src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: ar cru .libs/libffi_convenience.a src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o +libtool: link: ranlib .libs/libffi_convenience.a +libtool: link: ( cd ".libs" && rm -f "libffi_convenience.la" && ln -s "../libffi_convenience.la" "libffi_convenience.la" ) +/bin/sh ./libtool --tag=CC --mode=link gcc -fdeclspec -Wall -fexceptions -no-undefined -version-info `grep -v '^#' /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version` '-L/usr/local/opt/libffi/lib' -L/usr/local/opt/libffi/lib -o libffi.la -rpath /usr/local/lib src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/arm/ffi.lo src/arm/sysv.lo +libtool: link: gcc -fdeclspec -dynamiclib -o .libs/libffi.8.dylib src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/arm/.libs/ffi.o src/arm/.libs/sysv.o -L/usr/local/opt/libffi/lib -install_name /usr/local/lib/libffi.8.dylib -compatibility_version 10 -current_version 10.0 -Wl,-single_module +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +Undefined symbols for architecture arm64: + "_ffi_call", referenced from: + _ffi_raw_call in raw_api.o + _ffi_java_raw_call in java_raw_api.o + "_ffi_closure_trampoline_table_page", referenced from: + _ffi_trampoline_table_alloc in closures.o + "_ffi_prep_cif_machdep", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_cif_machdep_var", referenced from: + _ffi_prep_cif_core in prep_cif.o + "_ffi_prep_closure_loc", referenced from: + _ffi_prep_closure in prep_cif.o + _ffi_prep_raw_closure_loc in raw_api.o + _ffi_prep_java_raw_closure_loc in java_raw_api.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +make[3]: *** [libffi.la] Error 1 +make[2]: *** [all-recursive] Error 1 +make[1]: *** [all] Error 2 +make: *** ["/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21"/.libs/libffi_convenience.a] Error 2 + +make failed, exit code 2 diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/mkmf.log b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/mkmf.log new file mode 100644 index 0000000..56d130a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.13.1/mkmf.log @@ -0,0 +1,211 @@ +"pkg-config --exists libffi" +| pkg-config --libs libffi +=> "-lffi\n" +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 -lffi " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +| pkg-config --cflags-only-I libffi +=> "-I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi\n" +| pkg-config --cflags-only-other libffi +=> "\n" +| pkg-config --libs-only-l libffi +=> "-lffi\n" +package configuration for libffi +incflags: -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi +cflags: +ldflags: +libs: -lffi + +have_library: checking for ffi_prep_closure_loc() in -lffi... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi-8... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +extconf.h is: +/* begin */ +1: #ifndef EXTCONF_H +2: #define EXTCONF_H +3: #define HAVE_FFI_PREP_CIF_VAR 1 +4: #define USE_INTERNAL_LIBFFI 1 +5: #endif +/* end */ + diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/ffi_c.bundle b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/ffi_c.bundle new file mode 100755 index 0000000..8edc525 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/ffi_c.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/gem.build_complete b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/gem.build_complete new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/gem_make.out b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/gem_make.out new file mode 100644 index 0000000..889b90f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/gem_make.out @@ -0,0 +1,124 @@ +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c +/Users/arthur/.rvm/rubies/ruby-3.0.0/bin/ruby -I /Users/arthur/.rvm/rubies/ruby-3.0.0/lib/ruby/3.0.0 -r ./siteconf20220301-14832-i62wv8.rb extconf.rb +checking for ffi_prep_closure_loc() in -lffi... no +checking for ffi_prep_closure_loc() in -llibffi... no +checking for ffi_prep_closure_loc() in -llibffi-8... no +checking for whether -Wl,--exclude-libs,ALL is accepted as LDFLAGS... no +checking for whether -pthread is accepted as LDFLAGS... yes +creating extconf.h +creating Makefile + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c +make "DESTDIR=" clean + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c +make "DESTDIR=" +Configuring libffi +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure: line 18692: readelf: command not found +clang: error: unsupported option '-print-multi-os-directory' +clang: error: no input files +cd "/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21" && /Applications/Xcode.app/Contents/Developer/usr/bin/make +/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive +Making all in include +make[3]: Nothing to be done for `all'. +Making all in testsuite +make[3]: Nothing to be done for `all'. +Making all in man +make[3]: Nothing to be done for `all'. +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/prep_cif.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/prep_cif.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/prep_cif.c -fno-common -DPIC -o src/.libs/prep_cif.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/types.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/types.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/types.c -fno-common -DPIC -o src/.libs/types.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/raw_api.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/raw_api.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/raw_api.c -fno-common -DPIC -o src/.libs/raw_api.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/java_raw_api.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c -fno-common -DPIC -o src/.libs/java_raw_api.o +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c:328:46: warning: 'ffi_java_raw_size' is deprecated [-Wdeprecated-declarations] + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ^ +include/ffi.h:299:56: note: 'ffi_java_raw_size' has been explicitly marked deprecated here +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + ^ +/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c:331:3: warning: 'ffi_java_ptrarray_to_raw' is deprecated [-Wdeprecated-declarations] + ffi_java_ptrarray_to_raw (cif, avalue, raw); + ^ +include/ffi.h:295:93: note: 'ffi_java_ptrarray_to_raw' has been explicitly marked deprecated here +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); + ^ +2 warnings generated. +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/closures.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/closures.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/closures.c -fno-common -DPIC -o src/.libs/closures.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c -o src/aarch64/ffi.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffi.c +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -Wall -fexceptions -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffi.c -fno-common -DPIC -o src/aarch64/.libs/ffi.o +/bin/sh ./libtool --tag=CC --mode=compile gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -c -o src/aarch64/sysv.lo /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/sysv.S +libtool: compile: gcc -fdeclspec -DHAVE_CONFIG_H -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -I. -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include -Iinclude -I/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src -c /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/sysv.S -fno-common -DPIC -o src/aarch64/.libs/sysv.o +/bin/sh ./libtool --tag=CC --mode=link gcc -fdeclspec -Wall -fexceptions -L/usr/local/opt/libffi/lib -o libffi_convenience.la src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/aarch64/ffi.lo src/aarch64/sysv.lo +libtool: link: ar cru .libs/libffi_convenience.a src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/aarch64/.libs/ffi.o src/aarch64/.libs/sysv.o +libtool: link: ranlib .libs/libffi_convenience.a +libtool: link: ( cd ".libs" && rm -f "libffi_convenience.la" && ln -s "../libffi_convenience.la" "libffi_convenience.la" ) +/bin/sh ./libtool --tag=CC --mode=link gcc -fdeclspec -Wall -fexceptions -no-undefined -version-info `grep -v '^#' /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libtool-version` '-L/usr/local/opt/libffi/lib' -L/usr/local/opt/libffi/lib -o libffi.la -rpath /usr/local/lib src/prep_cif.lo src/types.lo src/raw_api.lo src/java_raw_api.lo src/closures.lo src/aarch64/ffi.lo src/aarch64/sysv.lo +libtool: link: gcc -fdeclspec -dynamiclib -o .libs/libffi.8.dylib src/.libs/prep_cif.o src/.libs/types.o src/.libs/raw_api.o src/.libs/java_raw_api.o src/.libs/closures.o src/aarch64/.libs/ffi.o src/aarch64/.libs/sysv.o -L/usr/local/opt/libffi/lib -install_name /usr/local/lib/libffi.8.dylib -compatibility_version 10 -current_version 10.0 -Wl,-single_module +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +libtool: link: (cd ".libs" && rm -f "libffi.dylib" && ln -s "libffi.8.dylib" "libffi.dylib") +libtool: link: ( cd ".libs" && rm -f "libffi.la" && ln -s "../libffi.la" "libffi.la" ) +compiling AbstractMemory.c +compiling ArrayType.c +compiling Buffer.c +compiling Call.c +compiling ClosurePool.c +compiling DynamicLibrary.c +compiling Function.c +compiling FunctionInfo.c +compiling LastError.c +compiling LongDouble.c +compiling MappedType.c +compiling MemoryPointer.c +compiling MethodHandle.c +compiling Platform.c +compiling Pointer.c +compiling Struct.c +compiling StructByValue.c +compiling StructLayout.c +compiling Thread.c +compiling Type.c +compiling Types.c +compiling Variadic.c +compiling ffi.c +linking shared-object ffi_c.bundle + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c +make "DESTDIR=" install +cd "/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21" && /Applications/Xcode.app/Contents/Developer/usr/bin/make +/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive +Making all in include +make[3]: Nothing to be done for `all'. +Making all in testsuite +make[3]: Nothing to be done for `all'. +Making all in man +make[3]: Nothing to be done for `all'. +make[3]: Nothing to be done for `all-am'. +compiling AbstractMemory.c +compiling ArrayType.c +compiling Buffer.c +compiling Call.c +compiling ClosurePool.c +compiling DynamicLibrary.c +compiling Function.c +compiling FunctionInfo.c +compiling LastError.c +compiling LongDouble.c +compiling MappedType.c +compiling MemoryPointer.c +compiling MethodHandle.c +compiling Platform.c +compiling Pointer.c +compiling Struct.c +compiling StructByValue.c +compiling StructLayout.c +compiling Thread.c +compiling Type.c +compiling Types.c +compiling Variadic.c +compiling ffi.c +linking shared-object ffi_c.bundle +/opt/homebrew/opt/coreutils/bin/ginstall -c -m 0755 ffi_c.bundle ./.gem.20220301-14832-l5yuww diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/mkmf.log b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/mkmf.log new file mode 100644 index 0000000..97824c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/ffi-1.15.5/mkmf.log @@ -0,0 +1,244 @@ +"pkg-config --exists libffi" +| pkg-config --libs libffi +=> "-lffi\n" +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 -lffi " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +| pkg-config --cflags-only-I libffi +=> "-I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi\n" +| pkg-config --cflags-only-other libffi +=> "\n" +| pkg-config --libs-only-l libffi +=> "-lffi\n" +package configuration for libffi +incflags: -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi +cflags: +ldflags: +libs: -lffi + +have_library: checking for ffi_prep_closure_loc() in -lffi... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -lffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +have_library: checking for ffi_prep_closure_loc() in -llibffi-8... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))ffi_prep_closure_loc; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -llibffi-8 -lffi " +conftest.c:3:10: fatal error: 'ffi.h' file not found +#include + ^~~~~~~ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void ffi_prep_closure_loc(); +17: int t(void) { ffi_prep_closure_loc(); return 0; } +/* end */ + +-------------------- + +block in append_ldflags: checking for whether -Wl,--exclude-libs,ALL is accepted as LDFLAGS... -------------------- no + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -Wl,--exclude-libs,ALL " +ld: unknown option: --exclude-libs +clang: error: linker command failed with exit code 1 (use -v to see invocation) +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +-------------------- + +block in append_ldflags: checking for whether -pthread is accepted as LDFLAGS... -------------------- yes + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lffi -lruby.3.0 -pthread " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +-------------------- + +extconf.h is: +/* begin */ +1: #ifndef EXTCONF_H +2: #define EXTCONF_H +3: #define HAVE_FFI_PREP_CIF_VAR 1 +4: #define USE_INTERNAL_LIBFFI 1 +5: #define USE_FFI_ALLOC 1 +6: #endif +/* end */ + diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/gem.build_complete b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/gem.build_complete new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/gem_make.out b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/gem_make.out new file mode 100644 index 0000000..32747f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/gem_make.out @@ -0,0 +1,13 @@ +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json +/Users/arthur/.rvm/rubies/ruby-3.0.0/bin/ruby -I /Users/arthur/.rvm/rubies/ruby-3.0.0/lib/ruby/3.0.0 -r ./siteconf20220301-11532-11xfgy.rb extconf.rb +creating Makefile + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json +make "DESTDIR=" clean + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json +make "DESTDIR=" +make: Nothing to be done for `all'. + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json +make "DESTDIR=" install diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/generator.bundle b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/generator.bundle new file mode 100755 index 0000000..5088adf Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/generator.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/parser.bundle b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/parser.bundle new file mode 100755 index 0000000..94caaec Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/json/ext/parser.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/mkmf.log b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/mkmf.log new file mode 100644 index 0000000..80de059 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.3.1/mkmf.log @@ -0,0 +1,62 @@ +have_func: checking for rb_enc_raise() in ruby.h... -------------------- yes + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +conftest.c:16:57: error: use of undeclared identifier 'rb_enc_raise' +int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } + ^ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void rb_enc_raise(); +17: int t(void) { rb_enc_raise(); return 0; } +/* end */ + +-------------------- + diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/gem.build_complete b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/gem.build_complete new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/gem_make.out b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/gem_make.out new file mode 100644 index 0000000..3cc6bf6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/gem_make.out @@ -0,0 +1,13 @@ +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json +/Users/arthur/.rvm/rubies/ruby-3.0.0/bin/ruby -I /Users/arthur/.rvm/rubies/ruby-3.0.0/lib/ruby/3.0.0 -r ./siteconf20220301-14832-thmafe.rb extconf.rb +creating Makefile + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json +make "DESTDIR=" clean + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json +make "DESTDIR=" +make: Nothing to be done for `all'. + +current directory: /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json +make "DESTDIR=" install diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/generator.bundle b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/generator.bundle new file mode 100755 index 0000000..0a5c4b7 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/generator.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/parser.bundle b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/parser.bundle new file mode 100755 index 0000000..1137b12 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/json/ext/parser.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/mkmf.log b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/mkmf.log new file mode 100644 index 0000000..48d2292 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/extensions/arm64-darwin-21/3.0.0/json-2.6.1/mkmf.log @@ -0,0 +1,113 @@ +have_func: checking for rb_enc_raise() in ruby.h... -------------------- yes + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ +1: #include "ruby.h" +2: +3: int main(int argc, char **argv) +4: { +5: return !!argv[argc]; +6: } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +conftest.c:16:57: error: use of undeclared identifier 'rb_enc_raise' +int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } + ^ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_raise; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void rb_enc_raise(); +17: int t(void) { rb_enc_raise(); return 0; } +/* end */ + +-------------------- + +have_func: checking for rb_enc_interned_str() in ruby.h... -------------------- yes + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +conftest.c:16:57: error: use of undeclared identifier 'rb_enc_interned_str' +int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_interned_str; return !p; } + ^ +1 error generated. +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: int t(void) { void ((*volatile p)()); p = (void ((*)()))rb_enc_interned_str; return !p; } +/* end */ + +"gcc -fdeclspec -o conftest -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/ruby/backward -I/Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 -I. -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe conftest.c -L. -L/Users/arthur/.rvm/rubies/ruby-3.0.0/lib -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -m64 -lruby.3.0 " +checked program was: +/* begin */ + 1: #include "ruby.h" + 2: + 3: #include + 4: + 5: /*top*/ + 6: extern int t(void); + 7: int main(int argc, char **argv) + 8: { + 9: if (argc > 1000000) { +10: int (* volatile tp)(void)=(int (*)(void))&t; +11: printf("%d", (*tp)()); +12: } +13: +14: return !!argv[argc]; +15: } +16: extern void rb_enc_interned_str(); +17: int t(void) { rb_enc_interned_str(); return 0; } +/* end */ + +-------------------- + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/LICENSE b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/LICENSE new file mode 100644 index 0000000..ba6ffb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 Christian Kruse, + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.md b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.md new file mode 100644 index 0000000..49f0c07 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.md @@ -0,0 +1,79 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +# Caution! + +In version 3.0.0 we dropped Ruby 1.8 compatibility. If you are using +Ruby 1.8 consider to update Ruby; if you can't upgrade, don't upgrade +CFPropertyList. + +# Installation + +You could either use ruby gems and install it via + +```bash +gem install CFPropertyList +``` + +or you could clone this repository and place it somewhere in your load path. + +Example: +```ruby +require 'cfpropertylist' +``` + +If you're using Rails, you can add it into your Gemfile + +```ruby +gem 'CFPropertyList' +``` + +# Usage + + ## create a arbitrary data structure of basic data types + +```ruby +data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } +} +``` + +## create CFPropertyList::List object + +```ruby +plist = CFPropertyList::List.new +``` + +## call CFPropertyList.guess() to create corresponding CFType values + +```ruby +plist.value = CFPropertyList.guess(data) +``` + +## write plist to file +```ruby +plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +``` + +## … later, read it again +```ruby +plist = CFPropertyList::List.new(:file => "example.plist") +data = CFPropertyList.native_types(plist.value) +``` + +# Author and license + +**Author:** Christian Kruse (mailto:cjk@wwwtech.de) + +**Copyright:** Copyright (c) 2010 + +**License:** MIT License + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.rdoc new file mode 100644 index 0000000..a71005c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/README.rdoc @@ -0,0 +1,43 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +== Installation + +You could either use ruby gems and install it via + + gem install CFPropertyList + +or you could clone this repository and place it somewhere in your load path. + +== Example + require 'cfpropertylist' + + # create a arbitrary data structure of basic data types + data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } + } + + # create CFPropertyList::List object + plist = CFPropertyList::List.new + + # call CFPropertyList.guess() to create corresponding CFType values + plist.value = CFPropertyList.guess(data) + + # write plist to file + plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) + + # … later, read it again + plist = CFPropertyList::List.new(:file => "example.plist") + data = CFPropertyList.native_types(plist.value) + +Author:: Christian Kruse (mailto:cjk@wwwtech.de) +Copyright:: Copyright (c) 2010 +License:: MIT License diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/THANKS b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/THANKS new file mode 100644 index 0000000..c981a51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/THANKS @@ -0,0 +1,7 @@ +Special thanks to: + +Steve Madsen for providing a lot of performance patches and bugfixes! +Have a look at his Github account: + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb new file mode 100644 index 0000000..f83fef7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist.rb @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +require 'cfpropertylist/rbCFPropertyList' + + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb new file mode 100644 index 0000000..da84b63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbBinaryCFPropertyList.rb @@ -0,0 +1,594 @@ +# -*- coding: utf-8 -*- + +require 'stringio' + +module CFPropertyList + # Binary PList parser class + class Binary + # Read a binary plist file + def load(opts) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + @object_ref_size = 0 + + @offsets = [] + + fd = nil + if(opts.has_key?(:file)) + fd = File.open(opts[:file],"rb") + file = opts[:file] + else + fd = StringIO.new(opts[:data],"rb") + file = "" + end + + # first, we read the trailer: 32 byte from the end + fd.seek(-32,IO::SEEK_END) + buff = fd.read(32) + + offset_size, object_ref_size, number_of_objects, top_object, table_offset = buff.unpack "x6CCx4Nx4Nx4N" + + # after that, get the offset table + fd.seek(table_offset, IO::SEEK_SET) + coded_offset_table = fd.read(number_of_objects * offset_size) + raise CFFormatError.new("#{file}: Format error!") unless coded_offset_table.bytesize == number_of_objects * offset_size + + @count_objects = number_of_objects + + # decode offset table + if(offset_size != 3) + formats = ["","C*","n*","","N*"] + @offsets = coded_offset_table.unpack(formats[offset_size]) + else + @offsets = coded_offset_table.unpack("C*").each_slice(3).map { + |x,y,z| (x << 16) | (y << 8) | z + } + end + + @object_ref_size = object_ref_size + val = read_binary_object_at(file,fd,top_object) + + fd.close + val + end + + + # Convert CFPropertyList to binary format; since we have to count our objects we simply unique CFDictionary and CFArray + def to_str(opts={}) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + + @offsets = [] + + binary_str = "bplist00" + + @object_refs = count_object_refs(opts[:root]) + + opts[:root].to_binary(self) + + next_offset = 8 + offsets = @object_table.map do |object| + offset = next_offset + next_offset += object.bytesize + offset + end + binary_str << @object_table.join + + table_offset = next_offset + offset_size = Binary.bytes_needed(table_offset) + + if offset_size < 8 + # Fast path: encode the entire offset array at once. + binary_str << offsets.pack((%w(C n N N)[offset_size - 1]) + '*') + else + # Slow path: host may be little or big endian, must pack each offset + # separately. + offsets.each do |offset| + binary_str << "#{Binary.pack_it_with_size(offset_size,offset)}" + end + end + + binary_str << [offset_size, object_ref_size(@object_refs)].pack("x6CC") + binary_str << [@object_table.size].pack("x4N") + binary_str << [0].pack("x4N") + binary_str << [table_offset].pack("x4N") + + binary_str + end + + def object_ref_size object_refs + Binary.bytes_needed(object_refs) + end + + # read a „null” type (i.e. null byte, marker byte, bool value) + def read_binary_null_type(length) + case length + when 0 then 0 # null byte + when 8 then CFBoolean.new(false) + when 9 then CFBoolean.new(true) + when 15 then 15 # fill type + else + raise CFFormatError.new("unknown null type: #{length}") + end + end + protected :read_binary_null_type + + # read a binary int value + def read_binary_int(fname,fd,length) + if length > 4 + raise CFFormatError.new("Integer greater than 16 bytes: #{length}") + end + + nbytes = 1 << length + + buff = fd.read(nbytes) + + CFInteger.new( + case length + when 0 then buff.unpack("C")[0] + when 1 then buff.unpack("n")[0] + when 2 then buff.unpack("N")[0] + # 8 byte integers are always signed + when 3 then buff.unpack("q>")[0] + # 16 byte integers are used to represent unsigned 8 byte integers + # where the unsigned value is stored in the lower 8 bytes and the + # upper 8 bytes are unused. + when 4 then buff.unpack("Q>Q>")[1] + end + ) + end + protected :read_binary_int + + # read a binary real value + def read_binary_real(fname,fd,length) + raise CFFormatError.new("Real greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFReal.new( + case length + when 0 # 1 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 1 # 2 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + else + fail "unexpected length: #{length}" + end + ) + end + protected :read_binary_real + + # read a binary date value + def read_binary_date(fname,fd,length) + raise CFFormatError.new("Date greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFDate.new( + case length + when 0 then # 1 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 1 then # 2 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + end, + CFDate::TIMESTAMP_APPLE + ) + end + protected :read_binary_date + + # Read a binary data value + def read_binary_data(fname,fd,length) + CFData.new(read_fd(fd, length), CFData::DATA_RAW) + end + protected :read_binary_data + + def read_fd fd, length + length > 0 ? fd.read(length) : "" + end + + # Read a binary string value + def read_binary_string(fname,fd,length) + buff = read_fd fd, length + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(buff) + end + protected :read_binary_string + + # Convert the given string from one charset to another + def Binary.charset_convert(str,from,to="UTF-8") + return str.dup.force_encoding(from).encode(to) if str.respond_to?("encode") + Iconv.conv(to,from,str) + end + + # Count characters considering character set + def Binary.charset_strlen(str,charset="UTF-8") + if str.respond_to?(:encode) + size = str.length + else + utf8_str = Iconv.conv("UTF-8",charset,str) + size = utf8_str.scan(/./mu).size + end + + # UTF-16 code units in the range D800-DBFF are the beginning of + # a surrogate pair, and count as one additional character for + # length calculation. + if charset =~ /^UTF-16/ + if str.respond_to?(:encode) + str.bytes.to_a.each_slice(2) { |pair| size += 1 if (0xd8..0xdb).include?(pair[0]) } + else + str.split('').each_slice(2) { |pair| size += 1 if ("\xd8".."\xdb").include?(pair[0]) } + end + end + + size + end + + # Read a unicode string value, coded as UTF-16BE + def read_binary_unicode_string(fname,fd,length) + # The problem is: we get the length of the string IN CHARACTERS; + # since a char in UTF-16 can be 16 or 32 bit long, we don't really know + # how long the string is in bytes + buff = fd.read(2*length) + + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(Binary.charset_convert(buff,"UTF-16BE","UTF-8")) + end + protected :read_binary_unicode_string + + def unpack_with_size(nbytes, buff) + format = ["C*", "n*", "N*", "N*"][nbytes - 1]; + + if nbytes == 3 + buff = "\0" + buff.scan(/.{1,3}/).join("\0") + end + + return buff.unpack(format) + end + + # Read an binary array value, including contained objects + def read_binary_array(fname,fd,length) + ary = [] + + # first: read object refs + if(length != 0) + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) #buff.unpack(@object_ref_size == 1 ? "C*" : "n*") + + # now: read objects + 0.upto(length-1) do |i| + object = read_binary_object_at(fname,fd,objects[i]) + ary.push object + end + end + + CFArray.new(ary) + end + protected :read_binary_array + + # Read a dictionary value, including contained objects + def read_binary_dict(fname,fd,length) + dict = {} + + # first: read keys + if(length != 0) then + buff = fd.read(length * @object_ref_size) + keys = unpack_with_size(@object_ref_size, buff) + + # second: read object refs + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) + + # read real keys and objects + 0.upto(length-1) do |i| + key = read_binary_object_at(fname,fd,keys[i]) + object = read_binary_object_at(fname,fd,objects[i]) + dict[key.value] = object + end + end + + CFDictionary.new(dict) + end + protected :read_binary_dict + + # Read an object type byte, decode it and delegate to the correct + # reader function + def read_binary_object(fname,fd) + # first: read the marker byte + buff = fd.read(1) + + object_length = buff.unpack("C*") + object_length = object_length[0] & 0xF + + buff = buff.unpack("H*") + object_type = buff[0][0].chr + + if(object_type != "0" && object_length == 15) then + object_length = read_binary_object(fname,fd) + object_length = object_length.value + end + + case object_type + when '0' # null, false, true, fillbyte + read_binary_null_type(object_length) + when '1' # integer + read_binary_int(fname,fd,object_length) + when '2' # real + read_binary_real(fname,fd,object_length) + when '3' # date + read_binary_date(fname,fd,object_length) + when '4' # data + read_binary_data(fname,fd,object_length) + when '5' # byte string, usually utf8 encoded + read_binary_string(fname,fd,object_length) + when '6' # unicode string (utf16be) + read_binary_unicode_string(fname,fd,object_length) + when '8' + CFUid.new(read_binary_int(fname, fd, object_length).value) + when 'a' # array + read_binary_array(fname,fd,object_length) + when 'd' # dictionary + read_binary_dict(fname,fd,object_length) + end + end + protected :read_binary_object + + # Read an object type byte at position $pos, decode it and delegate to the correct reader function + def read_binary_object_at(fname,fd,pos) + position = @offsets[pos] + fd.seek(position,IO::SEEK_SET) + read_binary_object(fname,fd) + end + protected :read_binary_object_at + + # pack an +int+ of +nbytes+ with size + def Binary.pack_it_with_size(nbytes,int) + case nbytes + when 1 then [int].pack('c') + when 2 then [int].pack('n') + when 4 then [int].pack('N') + when 8 + [int >> 32, int & 0xFFFFFFFF].pack('NN') + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + def Binary.pack_int_array_with_size(nbytes, array) + case nbytes + when 1 then array.pack('C*') + when 2 then array.pack('n*') + when 4 then array.pack('N*') + when 8 + array.map { |int| [int >> 32, int & 0xFFFFFFFF].pack('NN') }.join + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + # calculate how many bytes are needed to save +count+ + def Binary.bytes_needed(count) + case + when count < 2**8 then 1 + when count < 2**16 then 2 + when count < 2**32 then 4 + when count < 2**64 then 8 + else + raise CFFormatError.new("Data size too large: #{count}") + end + end + + # Create a type byte for binary format as defined by apple + def Binary.type_bytes(type, length) + if length < 15 + [(type << 4) | length].pack('C') + else + bytes = [(type << 4) | 0xF] + if length <= 0xFF + bytes.push(0x10, length).pack('CCC') # 1 byte length + elsif length <= 0xFFFF + bytes.push(0x11, length).pack('CCn') # 2 byte length + elsif length <= 0xFFFFFFFF + bytes.push(0x12, length).pack('CCN') # 4 byte length + elsif length <= 0x7FFFFFFFFFFFFFFF + bytes.push(0x13, length >> 32, length & 0xFFFFFFFF).pack('CCNN') # 8 byte length + else + raise CFFormatError.new("Integer too large: #{int}") + end + end + end + + def count_object_refs(object) + case object + when CFArray + contained_refs = 0 + object.value.each do |element| + if CFArray === element || CFDictionary === element + contained_refs += count_object_refs(element) + end + end + return object.value.size + contained_refs + when CFDictionary + contained_refs = 0 + object.value.each_value do |value| + if CFArray === value || CFDictionary === value + contained_refs += count_object_refs(value) + end + end + return object.value.keys.size * 2 + contained_refs + else + return 0 + end + end + + def Binary.ascii_string?(str) + if str.respond_to?(:ascii_only?) + str.ascii_only? + else + str !~ /[\x80-\xFF]/mn + end + end + + # Uniques and transforms a string value to binary format and adds it to the object table + def string_to_binary(val) + val = val.to_s + + @unique_table[val] ||= begin + if !Binary.ascii_string?(val) + val = Binary.charset_convert(val,"UTF-8","UTF-16BE") + bdata = Binary.type_bytes(0b0110, Binary.charset_strlen(val,"UTF-16BE")) + + val.force_encoding("ASCII-8BIT") if val.respond_to?("encode") + @object_table[@written_object_count] = bdata << val + else + bdata = Binary.type_bytes(0b0101,val.bytesize) + @object_table[@written_object_count] = bdata << val + end + + @written_object_count += 1 + @written_object_count - 1 + end + end + + # Codes an integer to binary format + def int_to_binary(value) + # Note: nbytes is actually an exponent. number of bytes = 2**nbytes. + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte unsigned integer + nbytes += 1 if value > 0xFFFF # 4 byte unsigned integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte unsigned integer + nbytes += 1 if value > 0x7FFFFFFFFFFFFFFF # 8 byte unsigned integer, stored in lower half of 16 bytes + nbytes = 3 if value < 0 # signed integers always stored in 8 bytes + + Binary.type_bytes(0b0001, nbytes) << + if nbytes < 4 + [value].pack(["C", "n", "N", "q>"][nbytes]) + else # nbytes == 4 + [0,value].pack("Q>Q>") + end + end + + # Codes a real value to binary format + def real_to_binary(val) + Binary.type_bytes(0b0010,3) << [val].pack("E").reverse + end + + # Converts a numeric value to binary and adds it to the object table + def num_to_binary(value) + @object_table[@written_object_count] = + if value.is_a?(CFInteger) + int_to_binary(value.value) + else + real_to_binary(value.value) + end + + @written_object_count += 1 + @written_object_count - 1 + end + + def uid_to_binary(value) + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte integer + nbytes += 1 if value > 0xFFFF # 4 byte integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte integer + nbytes = 3 if value < 0 # 8 byte integer, since signed + + @object_table[@written_object_count] = Binary.type_bytes(0b1000, nbytes) << + if nbytes < 3 + [value].pack( + if nbytes == 0 then "C" + elsif nbytes == 1 then "n" + else "N" + end + ) + else + # 64 bit signed integer; we need the higher and the lower 32 bit of the value + high_word = value >> 32 + low_word = value & 0xFFFFFFFF + [high_word,low_word].pack("NN") + end + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert date value (apple format) to binary and adds it to the object table + def date_to_binary(val) + val = val.getutc.to_f - CFDate::DATE_DIFF_APPLE_UNIX # CFDate is a real, number of seconds since 01/01/2001 00:00:00 GMT + + @object_table[@written_object_count] = + (Binary.type_bytes(0b0011, 3) << [val].pack("E").reverse) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert a bool value to binary and add it to the object table + def bool_to_binary(val) + + @object_table[@written_object_count] = val ? "\x9" : "\x8" # 0x9 is 1001, type indicator for true; 0x8 is 1000, type indicator for false + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert data value to binary format and add it to the object table + def data_to_binary(val) + @object_table[@written_object_count] = + (Binary.type_bytes(0b0100, val.bytesize) << val) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert array to binary format and add it to the object table + def array_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + #@object_refs += val.value.size + + values = val.value.map { |v| v.to_binary(self) } + bdata = Binary.type_bytes(0b1010, val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), + values) + + @object_table[saved_object_count] = bdata + saved_object_count + end + + # Convert dictionary to binary format and add it to the object table + def dict_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + + #@object_refs += val.value.keys.size * 2 + + keys_and_values = val.value.keys.map { |k| CFString.new(k).to_binary(self) } + keys_and_values.concat(val.value.values.map { |v| v.to_binary(self) }) + + bdata = Binary.type_bytes(0b1101,val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), keys_and_values) + + @object_table[saved_object_count] = bdata + return saved_object_count + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb new file mode 100644 index 0000000..e276565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPlistError.rb @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# Exceptions used: +# CFPlistError:: General base exception +# CFFormatError:: Format error +# CFTypeError:: Type error +# +# Easy and simple :-) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License + +# general plist error. All exceptions thrown are derived from this class. +class CFPlistError < StandardError +end + +# Exception thrown when format errors occur +class CFFormatError < CFPlistError +end + +# Exception thrown when type errors occur +class CFTypeError < CFPlistError +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb new file mode 100644 index 0000000..43eb1d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFPropertyList.rb @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- + +require 'kconv' +require 'date' +require 'time' + +# +# CFPropertyList implementation +# +# class to read, manipulate and write both XML and binary property list +# files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +# for more documentation. +# +# == Example +# require 'cfpropertylist' +# +# # create a arbitrary data structure of basic data types +# data = { +# 'name' => 'John Doe', +# 'missing' => true, +# 'last_seen' => Time.now, +# 'friends' => ['Jane Doe','Julian Doe'], +# 'likes' => { +# 'me' => false +# } +# } +# +# # create CFPropertyList::List object +# plist = CFPropertyList::List.new +# +# # call CFPropertyList.guess() to create corresponding CFType values +# # pass in optional :convert_unknown_to_string => true to convert things like symbols into strings. +# plist.value = CFPropertyList.guess(data) +# +# # write plist to file +# plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +# +# # … later, read it again +# plist = CFPropertyList::List.new(:file => "example.plist") +# data = CFPropertyList.native_types(plist.value) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License +module CFPropertyList + class << self + attr_accessor :xml_parser_interface + end + + # interface class for PList parsers + class ParserInterface + # load a plist + def load(opts={}) + return "" + end + + # convert a plist to string + def to_str(opts={}) + return true + end + end + + class XMLParserInterface < ParserInterface + def new_node(name) + end + + def new_text(val) + end + + def append_node(parent, child) + end + end +end + +dirname = File.dirname(__FILE__) +require dirname + '/rbCFPlistError.rb' +require dirname + '/rbCFTypes.rb' +require dirname + '/rbBinaryCFPropertyList.rb' +require dirname + '/rbPlainCFPropertyList.rb' + +begin + require dirname + '/rbLibXMLParser.rb' + temp = LibXML::XML::Parser::Options::NOBLANKS # check if we have a version with parser options + temp = false # avoid a warning + try_nokogiri = false + CFPropertyList.xml_parser_interface = CFPropertyList::LibXMLParser +rescue LoadError, NameError + try_nokogiri = true +end + +if try_nokogiri then + begin + require dirname + '/rbNokogiriParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::NokogiriXMLParser + rescue LoadError + require dirname + '/rbREXMLParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::ReXMLParser + end +end + + +module CFPropertyList + # Create CFType hierarchy by guessing the correct CFType, e.g. + # + # x = { + # 'a' => ['b','c','d'] + # } + # cftypes = CFPropertyList.guess(x) + # + # pass optional options hash. Only possible value actually: + # +convert_unknown_to_string+:: Convert unknown objects to string calling to_str() + # +converter_method+:: Convert unknown objects to known objects calling +method_name+ + # + # cftypes = CFPropertyList.guess(x,:convert_unknown_to_string => true,:converter_method => :to_hash, :converter_with_opts => true) + def guess(object, options = {}) + case object + when Integer then CFInteger.new(object) + when UidFixnum then CFUid.new(object) + when Float then CFReal.new(object) + when TrueClass, FalseClass then CFBoolean.new(object) + + when Blob + CFData.new(object, CFData::DATA_RAW) + + when String, Symbol + CFString.new(object.to_s) + + when Time, DateTime, Date + CFDate.new(object) + + when Array, Enumerator + ary = Array.new + object.each do |o| + ary.push CFPropertyList.guess(o, options) + end + CFArray.new(ary) + + when Hash + hsh = Hash.new + object.each_pair do |k,v| + k = k.to_s if k.is_a?(Symbol) + hsh[k] = CFPropertyList.guess(v, options) + end + CFDictionary.new(hsh) + else + case + when Object.const_defined?('BigDecimal') && object.is_a?(BigDecimal) + CFReal.new(object) + when object.respond_to?(:read) + raw_data = object.read + # treat the data as a bytestring (ASCII-8BIT) if Ruby supports it. Do this by forcing + # the encoding, on the assumption that the bytes were read correctly, and just tagged with + # an inappropriate encoding, rather than transcoding. + raw_data.force_encoding(Encoding::ASCII_8BIT) if raw_data.respond_to?(:force_encoding) + CFData.new(raw_data, CFData::DATA_RAW) + when options[:converter_method] && object.respond_to?(options[:converter_method]) + if options[:converter_with_opts] + CFPropertyList.guess(object.send(options[:converter_method],options),options) + else + CFPropertyList.guess(object.send(options[:converter_method]),options) + end + when options[:convert_unknown_to_string] + CFString.new(object.to_s) + else + raise CFTypeError.new("Unknown class #{object.class.to_s}. Try using :convert_unknown_to_string if you want to use unknown object types!") + end + end + end + + # Converts a CFType hiercharchy to native Ruby types + def native_types(object,keys_as_symbols=false) + return if object.nil? + + if(object.is_a?(CFDate) || object.is_a?(CFString) || object.is_a?(CFInteger) || object.is_a?(CFReal) || object.is_a?(CFBoolean)) || object.is_a?(CFUid) then + return object.value + elsif(object.is_a?(CFData)) then + return CFPropertyList::Blob.new(object.decoded_value) + elsif(object.is_a?(CFArray)) then + ary = [] + object.value.each do + |v| + ary.push CFPropertyList.native_types(v) + end + + return ary + elsif(object.is_a?(CFDictionary)) then + hsh = {} + object.value.each_pair do + |k,v| + k = k.to_sym if keys_as_symbols + hsh[k] = CFPropertyList.native_types(v) + end + + return hsh + end + end + + module_function :guess, :native_types + + # Class representing a CFPropertyList. Instantiate with #new + class List + # Format constant for binary format + FORMAT_BINARY = 1 + + # Format constant for XML format + FORMAT_XML = 2 + + # Format constant for the old plain format + FORMAT_PLAIN = 3 + + # Format constant for automatic format recognizing + FORMAT_AUTO = 0 + + @@parsers = [Binary, CFPropertyList.xml_parser_interface, PlainParser] + + # Path of PropertyList + attr_accessor :filename + # the original format of the PropertyList + attr_accessor :format + # the root value in the plist file + attr_accessor :value + # default value for XML generation; if true generate formatted XML + attr_accessor :formatted + + # initialize a new CFPropertyList, arguments are: + # + # :file:: Parse a file + # :format:: Format is one of FORMAT_BINARY or FORMAT_XML. Defaults to FORMAT_AUTO + # :data:: Parse a string + # + # All arguments are optional + def initialize(opts={}) + @filename = opts[:file] + @format = opts[:format] || FORMAT_AUTO + @data = opts[:data] + @formatted = opts[:formatted] + + load(@filename) unless @filename.nil? + load_str(@data) unless @data.nil? + end + + # returns a list of registered parsers + def self.parsers + @@parsers + end + + # set a list of parsers + def self.parsers=(val) + @@parsers = val + end + + # Load an XML PropertyList + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_xml(filename=nil) + load(filename,List::FORMAT_XML) + end + + # read a binary plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_binary(filename=nil) + load(filename,List::FORMAT_BINARY) + end + + # read a plain plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_plain(filename=nil) + load(filename,List::FORMAT_PLAIN) + end + + # load a plist from a XML string + # str:: The string containing the plist + def load_xml_str(str=nil) + load_str(str,List::FORMAT_XML) + end + + # load a plist from a binary string + # str:: The string containing the plist + def load_binary_str(str=nil) + load_str(str,List::FORMAT_BINARY) + end + + # load a plist from a plain string + # str:: The string containing the plist + def load_plain_str(str=nil) + load_str(str,List::FORMAT_PLAIN) + end + + # load a plist from a string + # str = nil:: The string containing the plist + # format = nil:: The format of the plist + def load_str(str=nil,format=nil) + str = @data if str.nil? + format = @format if format.nil? + + @value = {} + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:data => str}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + filetype = str[0..5] + version = str[6..7] + + prsr = nil + + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if str =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:data => str}) + end + end + + # Read a plist file + # file = nil:: The filename of the file to read. If nil, use +filename+ instance variable + # format = nil:: The format of the plist file. Auto-detect if nil + def load(file=nil,format=nil) + file = @filename if file.nil? + format = @format if format.nil? + @value = {} + + raise IOError.new("File #{file} not readable!") unless File.readable? file + + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:file => file}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + magic_number = IO.read(file,12) + raise IOError.new("File #{file} is empty.") unless magic_number + filetype = magic_number[0..5] + version = magic_number[6..7] + + prsr = nil + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if magic_number =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:file => file}) + end + + raise CFFormatError.new("Invalid format or parser error!") if @value.nil? + end + + # Serialize CFPropertyList object to specified format and write it to file + # file = nil:: The filename of the file to write to. Uses +filename+ instance variable if nil + # format = nil:: The format to save in. Uses +format+ instance variable if nil + def save(file=nil,format=nil,opts={}) + format = @format if format.nil? + file = @filename if file.nil? + + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + if(!File.exists?(file)) then + raise IOError.new("File #{file} not writable!") unless File.writable?(File.dirname(file)) + elsif(!File.writable?(file)) then + raise IOError.new("File #{file} not writable!") + end + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + prsr = @@parsers[format-1].new + + content = prsr.to_str(opts) + + File.open(file, 'wb') { + |fd| + fd.write content + } + end + + # convert plist to string + # format = List::FORMAT_BINARY:: The format to save the plist + # opts={}:: Pass parser options + def to_str(format=List::FORMAT_BINARY,opts={}) + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + prsr = @@parsers[format-1].new + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + return prsr.to_str(opts) + end + end +end + + +class Array + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Enumerator + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Hash + # convert a hash to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb new file mode 100644 index 0000000..8563721 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbCFTypes.rb @@ -0,0 +1,349 @@ +# -*- coding: utf-8 -*- +# +# CFTypes, e.g. CFString, CFInteger +# needed to create unambiguous plists +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2009 +# License:: MIT License + +require 'base64' + +module CFPropertyList + ## + # Blob is intended to distinguish between a Ruby String instance that should + # be converted to a CFString type and a Ruby String instance that should be + # converted to a CFData type + class Blob < String + end + + ## + # UidFixnum is intended to distinguish between a Ruby Integer + # instance that should be converted to a CFInteger/CFReal type and a + # Ruby Integer instance that should be converted to a CFUid type. + class UidFixnum < Integer + end + + # This class defines the base class for all CFType classes + # + class CFType + # value of the type + attr_accessor :value + + def initialize(value=nil) + @value = value + end + + def to_xml(parser) + end + + def to_binary(bplist) + end + + def to_plain(plist) + end + end + + # This class holds string values, both, UTF-8 and UTF-16BE + # It will convert the value to UTF-16BE if necessary (i.e. if non-ascii char contained) + class CFString < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('string') + n = parser.append_node(n, parser.new_text(@value)) unless @value.nil? + n + end + + # convert to binary + def to_binary(bplist) + bplist.string_to_binary(@value); + end + + def to_plain(plist) + if @value =~ /^\w+$/ + @value + else + quoted + end + end + + def quoted + str = '"' + @value.each_char do |c| + str << case c + when '"' + '\\"' + when '\\' + '\\' + when "\a" + "\\a" + when "\b" + "\\b" + when "\f" + "\\f" + when "\n" + "\n" + when "\v" + "\\v" + when "\r" + "\\r" + when "\t" + "\\t" + else + c + end + end + + str << '"' + end + end + + # This class holds integer/fixnum values + class CFInteger < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('integer') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds float values + class CFReal < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('real') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds Time values. While Apple uses seconds since 2001, + # the rest of the world uses seconds since 1970. So if you access value + # directly, you get the Time class. If you access via get_value you either + # geht the timestamp or the Apple timestamp + class CFDate < CFType + TIMESTAMP_APPLE = 0 + TIMESTAMP_UNIX = 1 + DATE_DIFF_APPLE_UNIX = 978307200 + + # create a XML date strimg from a time object + def CFDate.date_string(val) + # 2009-05-13T20:23:43Z + val.getutc.strftime("%Y-%m-%dT%H:%M:%SZ") + end + + # parse a XML date string + def CFDate.parse_date(val) + # 2009-05-13T20:23:43Z + val =~ %r{^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z$} + year,month,day,hour,min,sec = $1, $2, $3, $4, $5, $6 + return Time.utc(year,month,day,hour,min,sec).getlocal + end + + # set value to defined state + def initialize(value = nil,format=CFDate::TIMESTAMP_UNIX) + if(value.is_a?(Time) || value.nil?) then + @value = value.nil? ? Time.now : value + elsif value.instance_of? Date + @value = Time.utc(value.year, value.month, value.day, 0, 0, 0) + elsif value.instance_of? DateTime + @value = value.to_time.utc + else + set_value(value,format) + end + end + + # set value with timestamp, either Apple or UNIX + def set_value(value,format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value = Time.at(value) + else + @value = Time.at(value + CFDate::DATE_DIFF_APPLE_UNIX) + end + end + + # get timestamp, either UNIX or Apple timestamp + def get_value(format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value.to_i + else + @value.to_f - CFDate::DATE_DIFF_APPLE_UNIX + end + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('date') + n = parser.append_node(n, parser.new_text(CFDate::date_string(@value))) + n + end + + # convert to binary + def to_binary(bplist) + bplist.date_to_binary(@value) + end + + def to_plain(plist) + @value.strftime("%Y-%m-%d %H:%M:%S %z") + end + end + + # This class contains a boolean value + class CFBoolean < CFType + # convert to XML + def to_xml(parser) + parser.new_node(@value ? 'true' : 'false') + end + + # convert to binary + def to_binary(bplist) + bplist.bool_to_binary(@value); + end + + def to_plain(plist) + @value ? "true" : "false" + end + end + + # This class contains binary data values + class CFData < CFType + # Base64 encoded data + DATA_BASE64 = 0 + # Raw data + DATA_RAW = 1 + + # set value to defined state, either base64 encoded or raw + def initialize(value=nil,format=DATA_BASE64) + if(format == DATA_RAW) + @raw_value = value + else + @value = value + end + end + + # get base64 encoded value + def encoded_value + @value ||= "\n#{Base64.encode64(@raw_value).gsub("\n", '').scan(/.{1,76}/).join("\n")}\n" + end + + # get base64 decoded value + def decoded_value + @raw_value ||= Blob.new(Base64.decode64(@value)) + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('data') + n = parser.append_node(n, parser.new_text(encoded_value())) + n + end + + # convert to binary + def to_binary(bplist) + bplist.data_to_binary(decoded_value()) + end + + def to_plain(plist) + "<" + decoded_value.unpack("H*").join("") + ">" + end + end + + # This class contains an array of values + class CFArray < CFType + # create a new array CFType + def initialize(val=[]) + @value = val + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('array') + @value.each do |v| + n = parser.append_node(n, v.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.array_to_binary(self) + end + + def to_plain(plist) + ary = @value.map { |v| v.to_plain(plist) } + "( " + ary.join(", ") + " )" + end + end + + # this class contains a hash of values + class CFDictionary < CFType + # Create new CFDictonary type. + def initialize(value={}) + @value = value + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('dict') + @value.each_pair do |key, value| + k = parser.append_node(parser.new_node('key'), parser.new_text(key.to_s)) + n = parser.append_node(n, k) + n = parser.append_node(n, value.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.dict_to_binary(self) + end + + def to_plain(plist) + str = "{ " + cfstr = CFString.new() + + @value.each do |k,v| + cfstr.value = k + str << cfstr.to_plain(plist) + " = " + v.to_plain(plist) + "; " + end + + str << "}" + end + end + + class CFUid < CFType + def to_xml(parser) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_xml(parser) + end + + # convert to binary + def to_binary(bplist) + bplist.uid_to_binary(@value) + end + + def to_plain(plist) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_plain(plist) + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb new file mode 100644 index 0000000..0654f08 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbLibXMLParser.rb @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +require 'libxml' + +module CFPropertyList + # XML parser + class LibXMLParser < XMLParserInterface + PARSER_OPTIONS = LibXML::XML::Parser::Options::NOBLANKS|LibXML::XML::Parser::Options::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + + if(opts.has_key?(:file)) then + doc = LibXML::XML::Document.file(opts[:file],:options => PARSER_OPTIONS) + else + doc = LibXML::XML::Document.string(opts[:data],:options => PARSER_OPTIONS) + end + + if doc + root = doc.root.first + return import_xml(root) + end + rescue LibXML::XML::Error => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = LibXML::XML::Document.new + + doc.root = LibXML::XML::Node.new('plist') + doc.encoding = LibXML::XML::Encoding::UTF_8 + + doc.root['version'] = '1.0' + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + str = doc.to_s(:indent => opts[:formatted]) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + LibXML::XML::Node.new(name) + end + + def new_text(val) + LibXML::XML::Node.new_text(val) + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children? + n.first.content + else + n.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb new file mode 100644 index 0000000..e2de688 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbNokogiriParser.rb @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +require 'nokogiri' + +module CFPropertyList + # XML parser + class NokogiriXMLParser < ParserInterface + PARSER_OPTIONS = Nokogiri::XML::ParseOptions::NOBLANKS|Nokogiri::XML::ParseOptions::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + if(opts.has_key?(:file)) then + File.open(opts[:file], "rb") { |fd| doc = Nokogiri::XML::Document.parse(fd, nil, nil, PARSER_OPTIONS) } + else + doc = Nokogiri::XML::Document.parse(opts[:data], nil, nil, PARSER_OPTIONS) + end + + if doc + root = doc.root.children.first + return import_xml(root) + end + rescue Nokogiri::XML::SyntaxError => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = Nokogiri::XML::Document.new + @doc = doc + + doc.root = doc.create_element 'plist', :version => '1.0' + doc.encoding = 'UTF-8' + + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + s_opts = Nokogiri::XML::Node::SaveOptions::AS_XML + s_opts |= Nokogiri::XML::Node::SaveOptions::FORMAT if opts[:formatted] + + str = doc.serialize(:save_with => s_opts) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + @doc.create_element name + end + + def new_text(val) + @doc.create_text_node val + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children.empty? + n.content + else + n.children.first.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb new file mode 100644 index 0000000..fa698ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.2/lib/cfpropertylist/rbPlainCFPropertyList.rb @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- + +require 'strscan' + +module CFPropertyList + # XML parser + class PlainParser < XMLParserInterface + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + @doc = nil + + if(opts.has_key?(:file)) then + File.open(opts[:file], :external_encoding => "ASCII") do |fd| + @doc = StringScanner.new(fd.read) + end + else + @doc = StringScanner.new(opts[:data]) + end + + if @doc + root = import_plain + raise CFFormatError.new('content after root object') unless @doc.eos? + + return root + end + + raise CFFormatError.new('invalid plist string or file not found') + end + + SPACES_AND_COMMENTS = %r{((?:/\*.*?\*/)|(?://.*?$\n?)|(?:\s*))+}x + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + opts[:root].to_plain(self) + end + + protected + def skip_whitespaces + @doc.skip SPACES_AND_COMMENTS + end + + def read_dict + skip_whitespaces + hsh = {} + + while not @doc.scan(/\}/) + key = import_plain + raise CFFormatError.new("invalid dictionary format") if !key + + if key.is_a?(CFString) + key = key.value + elsif key.is_a?(CFInteger) or key.is_a?(CFReal) + key = key.value.to_s + else + raise CFFormatError.new("invalid key format") + end + + skip_whitespaces + + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/=/) + + skip_whitespaces + val = import_plain + + skip_whitespaces + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/;/) + skip_whitespaces + + hsh[key] = val + raise CFFormatError.new("invalid dictionary format") if @doc.eos? + end + + CFDictionary.new(hsh) + end + + def read_array + skip_whitespaces + ary = [] + + while not @doc.scan(/\)/) + val = import_plain + + return nil if not val or not val.value + skip_whitespaces + + if not @doc.skip(/,\s*/) + if @doc.scan(/\)/) + ary << val + return CFArray.new(ary) + end + + raise CFFormatError.new("invalid array format") + end + + ary << val + raise CFFormatError.new("invalid array format") if @doc.eos? + end + + CFArray.new(ary) + end + + def escape_char + case @doc.matched + when '"' + '"' + when '\\' + '\\' + when 'a' + "\a" + when 'b' + "\b" + when 'f' + "\f" + when 'n' + "\n" + when 'v' + "\v" + when 'r' + "\r" + when 't' + "\t" + when 'U' + @doc.scan(/.{4}/).hex.chr('utf-8') + end + end + + def read_quoted + str = '' + + while not @doc.scan(/"/) + if @doc.scan(/\\/) + @doc.scan(/./) + str << escape_char + + elsif @doc.eos? + raise CFFormatError.new("unterminated string") + + else @doc.scan(/./) + str << @doc.matched + end + end + + CFString.new(str) + end + + def read_unquoted + raise CFFormatError.new("unexpected end of file") if @doc.eos? + + if @doc.scan(/(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)(?:\s+(\+|-)(\d\d)(\d\d))?/) + year,month,day,hour,min,sec,pl_min,tz_hour, tz_min = @doc[1], @doc[2], @doc[3], @doc[4], @doc[5], @doc[6], @doc[7], @doc[8], @doc[9] + CFDate.new(Time.new(year, month, day, hour, min, sec, pl_min ? sprintf("%s%s:%s", pl_min, tz_hour, tz_min) : nil)) + + elsif @doc.scan(/-?\d+?\.\d+\b/) + CFReal.new(@doc.matched.to_f) + + elsif @doc.scan(/-?\d+\b/) + CFInteger.new(@doc.matched.to_i) + + elsif @doc.scan(/\b(true|false)\b/) + CFBoolean.new(@doc.matched == 'true') + else + CFString.new(@doc.scan(/\w+/)) + end + end + + def read_binary + @doc.scan(/(.*?)>/) + + hex_str = @doc[1].gsub(/ /, '') + CFData.new([hex_str].pack("H*"), CFData::DATA_RAW) + end + + # import the XML values + def import_plain + skip_whitespaces + ret = nil + + if @doc.scan(/\{/) # dict + ret = read_dict + elsif @doc.scan(/\(/) # array + ret = read_array + elsif @doc.scan(/"/) # string + ret = read_quoted + elsif @doc.scan(/ e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = REXML::Document.new + @doc = doc + + doc.context[:attribute_quote] = :quote + + doc.add_element 'plist', {'version' => '1.0'} + doc.root << opts[:root].to_xml(self) + + formatter = if opts[:formatted] then + f = REXML::Formatters::Pretty.new(2) + f.compact = true + f.width = Float::INFINITY + f + else + REXML::Formatters::Default.new + end + + str = formatter.write(doc.root, "") + str1 = "\n\n" + str + "\n" + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + + return str1 + end + + def new_node(name) + REXML::Element.new(name) + end + + def new_text(val) + val + end + + def append_node(parent, child) + if child.is_a?(String) then + parent.add_text child + else + parent.elements << child + end + parent + end + + protected + + # get the value of a DOM node + def get_value(n) + content = n.text + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + next if n.name == '#comment' + + if n.name == "key" then + key = get_value(n) + key = '' if key.nil? # REXML returns nil if key is empty + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + ret.value = '' if ret.value.nil? # REXML returns nil for empty elements' .text attribute + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/LICENSE b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/LICENSE new file mode 100644 index 0000000..ba6ffb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 Christian Kruse, + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.md b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.md new file mode 100644 index 0000000..49f0c07 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.md @@ -0,0 +1,79 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +# Caution! + +In version 3.0.0 we dropped Ruby 1.8 compatibility. If you are using +Ruby 1.8 consider to update Ruby; if you can't upgrade, don't upgrade +CFPropertyList. + +# Installation + +You could either use ruby gems and install it via + +```bash +gem install CFPropertyList +``` + +or you could clone this repository and place it somewhere in your load path. + +Example: +```ruby +require 'cfpropertylist' +``` + +If you're using Rails, you can add it into your Gemfile + +```ruby +gem 'CFPropertyList' +``` + +# Usage + + ## create a arbitrary data structure of basic data types + +```ruby +data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } +} +``` + +## create CFPropertyList::List object + +```ruby +plist = CFPropertyList::List.new +``` + +## call CFPropertyList.guess() to create corresponding CFType values + +```ruby +plist.value = CFPropertyList.guess(data) +``` + +## write plist to file +```ruby +plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +``` + +## … later, read it again +```ruby +plist = CFPropertyList::List.new(:file => "example.plist") +data = CFPropertyList.native_types(plist.value) +``` + +# Author and license + +**Author:** Christian Kruse (mailto:cjk@wwwtech.de) + +**Copyright:** Copyright (c) 2010 + +**License:** MIT License + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.rdoc new file mode 100644 index 0000000..a71005c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/README.rdoc @@ -0,0 +1,43 @@ +CFPropertyList implementation +class to read, manipulate and write both XML and binary property list +files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +for more documentation. + +== Installation + +You could either use ruby gems and install it via + + gem install CFPropertyList + +or you could clone this repository and place it somewhere in your load path. + +== Example + require 'cfpropertylist' + + # create a arbitrary data structure of basic data types + data = { + 'name' => 'John Doe', + 'missing' => true, + 'last_seen' => Time.now, + 'friends' => ['Jane Doe','Julian Doe'], + 'likes' => { + 'me' => false + } + } + + # create CFPropertyList::List object + plist = CFPropertyList::List.new + + # call CFPropertyList.guess() to create corresponding CFType values + plist.value = CFPropertyList.guess(data) + + # write plist to file + plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) + + # … later, read it again + plist = CFPropertyList::List.new(:file => "example.plist") + data = CFPropertyList.native_types(plist.value) + +Author:: Christian Kruse (mailto:cjk@wwwtech.de) +Copyright:: Copyright (c) 2010 +License:: MIT License diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/THANKS b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/THANKS new file mode 100644 index 0000000..c981a51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/THANKS @@ -0,0 +1,7 @@ +Special thanks to: + +Steve Madsen for providing a lot of performance patches and bugfixes! +Have a look at his Github account: + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist.rb new file mode 100644 index 0000000..f83fef7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist.rb @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +require 'cfpropertylist/rbCFPropertyList' + + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbBinaryCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbBinaryCFPropertyList.rb new file mode 100644 index 0000000..da84b63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbBinaryCFPropertyList.rb @@ -0,0 +1,594 @@ +# -*- coding: utf-8 -*- + +require 'stringio' + +module CFPropertyList + # Binary PList parser class + class Binary + # Read a binary plist file + def load(opts) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + @object_ref_size = 0 + + @offsets = [] + + fd = nil + if(opts.has_key?(:file)) + fd = File.open(opts[:file],"rb") + file = opts[:file] + else + fd = StringIO.new(opts[:data],"rb") + file = "" + end + + # first, we read the trailer: 32 byte from the end + fd.seek(-32,IO::SEEK_END) + buff = fd.read(32) + + offset_size, object_ref_size, number_of_objects, top_object, table_offset = buff.unpack "x6CCx4Nx4Nx4N" + + # after that, get the offset table + fd.seek(table_offset, IO::SEEK_SET) + coded_offset_table = fd.read(number_of_objects * offset_size) + raise CFFormatError.new("#{file}: Format error!") unless coded_offset_table.bytesize == number_of_objects * offset_size + + @count_objects = number_of_objects + + # decode offset table + if(offset_size != 3) + formats = ["","C*","n*","","N*"] + @offsets = coded_offset_table.unpack(formats[offset_size]) + else + @offsets = coded_offset_table.unpack("C*").each_slice(3).map { + |x,y,z| (x << 16) | (y << 8) | z + } + end + + @object_ref_size = object_ref_size + val = read_binary_object_at(file,fd,top_object) + + fd.close + val + end + + + # Convert CFPropertyList to binary format; since we have to count our objects we simply unique CFDictionary and CFArray + def to_str(opts={}) + @unique_table = {} + @count_objects = 0 + @object_refs = 0 + + @written_object_count = 0 + @object_table = [] + + @offsets = [] + + binary_str = "bplist00" + + @object_refs = count_object_refs(opts[:root]) + + opts[:root].to_binary(self) + + next_offset = 8 + offsets = @object_table.map do |object| + offset = next_offset + next_offset += object.bytesize + offset + end + binary_str << @object_table.join + + table_offset = next_offset + offset_size = Binary.bytes_needed(table_offset) + + if offset_size < 8 + # Fast path: encode the entire offset array at once. + binary_str << offsets.pack((%w(C n N N)[offset_size - 1]) + '*') + else + # Slow path: host may be little or big endian, must pack each offset + # separately. + offsets.each do |offset| + binary_str << "#{Binary.pack_it_with_size(offset_size,offset)}" + end + end + + binary_str << [offset_size, object_ref_size(@object_refs)].pack("x6CC") + binary_str << [@object_table.size].pack("x4N") + binary_str << [0].pack("x4N") + binary_str << [table_offset].pack("x4N") + + binary_str + end + + def object_ref_size object_refs + Binary.bytes_needed(object_refs) + end + + # read a „null” type (i.e. null byte, marker byte, bool value) + def read_binary_null_type(length) + case length + when 0 then 0 # null byte + when 8 then CFBoolean.new(false) + when 9 then CFBoolean.new(true) + when 15 then 15 # fill type + else + raise CFFormatError.new("unknown null type: #{length}") + end + end + protected :read_binary_null_type + + # read a binary int value + def read_binary_int(fname,fd,length) + if length > 4 + raise CFFormatError.new("Integer greater than 16 bytes: #{length}") + end + + nbytes = 1 << length + + buff = fd.read(nbytes) + + CFInteger.new( + case length + when 0 then buff.unpack("C")[0] + when 1 then buff.unpack("n")[0] + when 2 then buff.unpack("N")[0] + # 8 byte integers are always signed + when 3 then buff.unpack("q>")[0] + # 16 byte integers are used to represent unsigned 8 byte integers + # where the unsigned value is stored in the lower 8 bytes and the + # upper 8 bytes are unused. + when 4 then buff.unpack("Q>Q>")[1] + end + ) + end + protected :read_binary_int + + # read a binary real value + def read_binary_real(fname,fd,length) + raise CFFormatError.new("Real greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFReal.new( + case length + when 0 # 1 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 1 # 2 byte float? must be an error + raise CFFormatError.new("got #{length+1} byte float, must be an error!") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + else + fail "unexpected length: #{length}" + end + ) + end + protected :read_binary_real + + # read a binary date value + def read_binary_date(fname,fd,length) + raise CFFormatError.new("Date greater than 8 bytes: #{length}") if length > 3 + + nbytes = 1 << length + buff = fd.read(nbytes) + + CFDate.new( + case length + when 0 then # 1 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 1 then # 2 byte CFDate is an error + raise CFFormatError.new("#{length+1} byte CFDate, error") + when 2 then + buff.reverse.unpack("e")[0] + when 3 then + buff.reverse.unpack("E")[0] + end, + CFDate::TIMESTAMP_APPLE + ) + end + protected :read_binary_date + + # Read a binary data value + def read_binary_data(fname,fd,length) + CFData.new(read_fd(fd, length), CFData::DATA_RAW) + end + protected :read_binary_data + + def read_fd fd, length + length > 0 ? fd.read(length) : "" + end + + # Read a binary string value + def read_binary_string(fname,fd,length) + buff = read_fd fd, length + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(buff) + end + protected :read_binary_string + + # Convert the given string from one charset to another + def Binary.charset_convert(str,from,to="UTF-8") + return str.dup.force_encoding(from).encode(to) if str.respond_to?("encode") + Iconv.conv(to,from,str) + end + + # Count characters considering character set + def Binary.charset_strlen(str,charset="UTF-8") + if str.respond_to?(:encode) + size = str.length + else + utf8_str = Iconv.conv("UTF-8",charset,str) + size = utf8_str.scan(/./mu).size + end + + # UTF-16 code units in the range D800-DBFF are the beginning of + # a surrogate pair, and count as one additional character for + # length calculation. + if charset =~ /^UTF-16/ + if str.respond_to?(:encode) + str.bytes.to_a.each_slice(2) { |pair| size += 1 if (0xd8..0xdb).include?(pair[0]) } + else + str.split('').each_slice(2) { |pair| size += 1 if ("\xd8".."\xdb").include?(pair[0]) } + end + end + + size + end + + # Read a unicode string value, coded as UTF-16BE + def read_binary_unicode_string(fname,fd,length) + # The problem is: we get the length of the string IN CHARACTERS; + # since a char in UTF-16 can be 16 or 32 bit long, we don't really know + # how long the string is in bytes + buff = fd.read(2*length) + + @unique_table[buff] = true unless @unique_table.has_key?(buff) + CFString.new(Binary.charset_convert(buff,"UTF-16BE","UTF-8")) + end + protected :read_binary_unicode_string + + def unpack_with_size(nbytes, buff) + format = ["C*", "n*", "N*", "N*"][nbytes - 1]; + + if nbytes == 3 + buff = "\0" + buff.scan(/.{1,3}/).join("\0") + end + + return buff.unpack(format) + end + + # Read an binary array value, including contained objects + def read_binary_array(fname,fd,length) + ary = [] + + # first: read object refs + if(length != 0) + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) #buff.unpack(@object_ref_size == 1 ? "C*" : "n*") + + # now: read objects + 0.upto(length-1) do |i| + object = read_binary_object_at(fname,fd,objects[i]) + ary.push object + end + end + + CFArray.new(ary) + end + protected :read_binary_array + + # Read a dictionary value, including contained objects + def read_binary_dict(fname,fd,length) + dict = {} + + # first: read keys + if(length != 0) then + buff = fd.read(length * @object_ref_size) + keys = unpack_with_size(@object_ref_size, buff) + + # second: read object refs + buff = fd.read(length * @object_ref_size) + objects = unpack_with_size(@object_ref_size, buff) + + # read real keys and objects + 0.upto(length-1) do |i| + key = read_binary_object_at(fname,fd,keys[i]) + object = read_binary_object_at(fname,fd,objects[i]) + dict[key.value] = object + end + end + + CFDictionary.new(dict) + end + protected :read_binary_dict + + # Read an object type byte, decode it and delegate to the correct + # reader function + def read_binary_object(fname,fd) + # first: read the marker byte + buff = fd.read(1) + + object_length = buff.unpack("C*") + object_length = object_length[0] & 0xF + + buff = buff.unpack("H*") + object_type = buff[0][0].chr + + if(object_type != "0" && object_length == 15) then + object_length = read_binary_object(fname,fd) + object_length = object_length.value + end + + case object_type + when '0' # null, false, true, fillbyte + read_binary_null_type(object_length) + when '1' # integer + read_binary_int(fname,fd,object_length) + when '2' # real + read_binary_real(fname,fd,object_length) + when '3' # date + read_binary_date(fname,fd,object_length) + when '4' # data + read_binary_data(fname,fd,object_length) + when '5' # byte string, usually utf8 encoded + read_binary_string(fname,fd,object_length) + when '6' # unicode string (utf16be) + read_binary_unicode_string(fname,fd,object_length) + when '8' + CFUid.new(read_binary_int(fname, fd, object_length).value) + when 'a' # array + read_binary_array(fname,fd,object_length) + when 'd' # dictionary + read_binary_dict(fname,fd,object_length) + end + end + protected :read_binary_object + + # Read an object type byte at position $pos, decode it and delegate to the correct reader function + def read_binary_object_at(fname,fd,pos) + position = @offsets[pos] + fd.seek(position,IO::SEEK_SET) + read_binary_object(fname,fd) + end + protected :read_binary_object_at + + # pack an +int+ of +nbytes+ with size + def Binary.pack_it_with_size(nbytes,int) + case nbytes + when 1 then [int].pack('c') + when 2 then [int].pack('n') + when 4 then [int].pack('N') + when 8 + [int >> 32, int & 0xFFFFFFFF].pack('NN') + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + def Binary.pack_int_array_with_size(nbytes, array) + case nbytes + when 1 then array.pack('C*') + when 2 then array.pack('n*') + when 4 then array.pack('N*') + when 8 + array.map { |int| [int >> 32, int & 0xFFFFFFFF].pack('NN') }.join + else + raise CFFormatError.new("Don't know how to pack #{nbytes} byte integer") + end + end + + # calculate how many bytes are needed to save +count+ + def Binary.bytes_needed(count) + case + when count < 2**8 then 1 + when count < 2**16 then 2 + when count < 2**32 then 4 + when count < 2**64 then 8 + else + raise CFFormatError.new("Data size too large: #{count}") + end + end + + # Create a type byte for binary format as defined by apple + def Binary.type_bytes(type, length) + if length < 15 + [(type << 4) | length].pack('C') + else + bytes = [(type << 4) | 0xF] + if length <= 0xFF + bytes.push(0x10, length).pack('CCC') # 1 byte length + elsif length <= 0xFFFF + bytes.push(0x11, length).pack('CCn') # 2 byte length + elsif length <= 0xFFFFFFFF + bytes.push(0x12, length).pack('CCN') # 4 byte length + elsif length <= 0x7FFFFFFFFFFFFFFF + bytes.push(0x13, length >> 32, length & 0xFFFFFFFF).pack('CCNN') # 8 byte length + else + raise CFFormatError.new("Integer too large: #{int}") + end + end + end + + def count_object_refs(object) + case object + when CFArray + contained_refs = 0 + object.value.each do |element| + if CFArray === element || CFDictionary === element + contained_refs += count_object_refs(element) + end + end + return object.value.size + contained_refs + when CFDictionary + contained_refs = 0 + object.value.each_value do |value| + if CFArray === value || CFDictionary === value + contained_refs += count_object_refs(value) + end + end + return object.value.keys.size * 2 + contained_refs + else + return 0 + end + end + + def Binary.ascii_string?(str) + if str.respond_to?(:ascii_only?) + str.ascii_only? + else + str !~ /[\x80-\xFF]/mn + end + end + + # Uniques and transforms a string value to binary format and adds it to the object table + def string_to_binary(val) + val = val.to_s + + @unique_table[val] ||= begin + if !Binary.ascii_string?(val) + val = Binary.charset_convert(val,"UTF-8","UTF-16BE") + bdata = Binary.type_bytes(0b0110, Binary.charset_strlen(val,"UTF-16BE")) + + val.force_encoding("ASCII-8BIT") if val.respond_to?("encode") + @object_table[@written_object_count] = bdata << val + else + bdata = Binary.type_bytes(0b0101,val.bytesize) + @object_table[@written_object_count] = bdata << val + end + + @written_object_count += 1 + @written_object_count - 1 + end + end + + # Codes an integer to binary format + def int_to_binary(value) + # Note: nbytes is actually an exponent. number of bytes = 2**nbytes. + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte unsigned integer + nbytes += 1 if value > 0xFFFF # 4 byte unsigned integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte unsigned integer + nbytes += 1 if value > 0x7FFFFFFFFFFFFFFF # 8 byte unsigned integer, stored in lower half of 16 bytes + nbytes = 3 if value < 0 # signed integers always stored in 8 bytes + + Binary.type_bytes(0b0001, nbytes) << + if nbytes < 4 + [value].pack(["C", "n", "N", "q>"][nbytes]) + else # nbytes == 4 + [0,value].pack("Q>Q>") + end + end + + # Codes a real value to binary format + def real_to_binary(val) + Binary.type_bytes(0b0010,3) << [val].pack("E").reverse + end + + # Converts a numeric value to binary and adds it to the object table + def num_to_binary(value) + @object_table[@written_object_count] = + if value.is_a?(CFInteger) + int_to_binary(value.value) + else + real_to_binary(value.value) + end + + @written_object_count += 1 + @written_object_count - 1 + end + + def uid_to_binary(value) + nbytes = 0 + nbytes = 1 if value > 0xFF # 1 byte integer + nbytes += 1 if value > 0xFFFF # 4 byte integer + nbytes += 1 if value > 0xFFFFFFFF # 8 byte integer + nbytes = 3 if value < 0 # 8 byte integer, since signed + + @object_table[@written_object_count] = Binary.type_bytes(0b1000, nbytes) << + if nbytes < 3 + [value].pack( + if nbytes == 0 then "C" + elsif nbytes == 1 then "n" + else "N" + end + ) + else + # 64 bit signed integer; we need the higher and the lower 32 bit of the value + high_word = value >> 32 + low_word = value & 0xFFFFFFFF + [high_word,low_word].pack("NN") + end + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert date value (apple format) to binary and adds it to the object table + def date_to_binary(val) + val = val.getutc.to_f - CFDate::DATE_DIFF_APPLE_UNIX # CFDate is a real, number of seconds since 01/01/2001 00:00:00 GMT + + @object_table[@written_object_count] = + (Binary.type_bytes(0b0011, 3) << [val].pack("E").reverse) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert a bool value to binary and add it to the object table + def bool_to_binary(val) + + @object_table[@written_object_count] = val ? "\x9" : "\x8" # 0x9 is 1001, type indicator for true; 0x8 is 1000, type indicator for false + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert data value to binary format and add it to the object table + def data_to_binary(val) + @object_table[@written_object_count] = + (Binary.type_bytes(0b0100, val.bytesize) << val) + + @written_object_count += 1 + @written_object_count - 1 + end + + # Convert array to binary format and add it to the object table + def array_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + #@object_refs += val.value.size + + values = val.value.map { |v| v.to_binary(self) } + bdata = Binary.type_bytes(0b1010, val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), + values) + + @object_table[saved_object_count] = bdata + saved_object_count + end + + # Convert dictionary to binary format and add it to the object table + def dict_to_binary(val) + saved_object_count = @written_object_count + @written_object_count += 1 + + #@object_refs += val.value.keys.size * 2 + + keys_and_values = val.value.keys.map { |k| CFString.new(k).to_binary(self) } + keys_and_values.concat(val.value.values.map { |v| v.to_binary(self) }) + + bdata = Binary.type_bytes(0b1101,val.value.size) << + Binary.pack_int_array_with_size(object_ref_size(@object_refs), keys_and_values) + + @object_table[saved_object_count] = bdata + return saved_object_count + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPlistError.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPlistError.rb new file mode 100644 index 0000000..e276565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPlistError.rb @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# Exceptions used: +# CFPlistError:: General base exception +# CFFormatError:: Format error +# CFTypeError:: Type error +# +# Easy and simple :-) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License + +# general plist error. All exceptions thrown are derived from this class. +class CFPlistError < StandardError +end + +# Exception thrown when format errors occur +class CFFormatError < CFPlistError +end + +# Exception thrown when type errors occur +class CFTypeError < CFPlistError +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPropertyList.rb new file mode 100644 index 0000000..43eb1d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFPropertyList.rb @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- + +require 'kconv' +require 'date' +require 'time' + +# +# CFPropertyList implementation +# +# class to read, manipulate and write both XML and binary property list +# files (plist(5)) as defined by Apple. Have a look at CFPropertyList::List +# for more documentation. +# +# == Example +# require 'cfpropertylist' +# +# # create a arbitrary data structure of basic data types +# data = { +# 'name' => 'John Doe', +# 'missing' => true, +# 'last_seen' => Time.now, +# 'friends' => ['Jane Doe','Julian Doe'], +# 'likes' => { +# 'me' => false +# } +# } +# +# # create CFPropertyList::List object +# plist = CFPropertyList::List.new +# +# # call CFPropertyList.guess() to create corresponding CFType values +# # pass in optional :convert_unknown_to_string => true to convert things like symbols into strings. +# plist.value = CFPropertyList.guess(data) +# +# # write plist to file +# plist.save("example.plist", CFPropertyList::List::FORMAT_BINARY) +# +# # … later, read it again +# plist = CFPropertyList::List.new(:file => "example.plist") +# data = CFPropertyList.native_types(plist.value) +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2010 +# License:: MIT License +module CFPropertyList + class << self + attr_accessor :xml_parser_interface + end + + # interface class for PList parsers + class ParserInterface + # load a plist + def load(opts={}) + return "" + end + + # convert a plist to string + def to_str(opts={}) + return true + end + end + + class XMLParserInterface < ParserInterface + def new_node(name) + end + + def new_text(val) + end + + def append_node(parent, child) + end + end +end + +dirname = File.dirname(__FILE__) +require dirname + '/rbCFPlistError.rb' +require dirname + '/rbCFTypes.rb' +require dirname + '/rbBinaryCFPropertyList.rb' +require dirname + '/rbPlainCFPropertyList.rb' + +begin + require dirname + '/rbLibXMLParser.rb' + temp = LibXML::XML::Parser::Options::NOBLANKS # check if we have a version with parser options + temp = false # avoid a warning + try_nokogiri = false + CFPropertyList.xml_parser_interface = CFPropertyList::LibXMLParser +rescue LoadError, NameError + try_nokogiri = true +end + +if try_nokogiri then + begin + require dirname + '/rbNokogiriParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::NokogiriXMLParser + rescue LoadError + require dirname + '/rbREXMLParser.rb' + CFPropertyList.xml_parser_interface = CFPropertyList::ReXMLParser + end +end + + +module CFPropertyList + # Create CFType hierarchy by guessing the correct CFType, e.g. + # + # x = { + # 'a' => ['b','c','d'] + # } + # cftypes = CFPropertyList.guess(x) + # + # pass optional options hash. Only possible value actually: + # +convert_unknown_to_string+:: Convert unknown objects to string calling to_str() + # +converter_method+:: Convert unknown objects to known objects calling +method_name+ + # + # cftypes = CFPropertyList.guess(x,:convert_unknown_to_string => true,:converter_method => :to_hash, :converter_with_opts => true) + def guess(object, options = {}) + case object + when Integer then CFInteger.new(object) + when UidFixnum then CFUid.new(object) + when Float then CFReal.new(object) + when TrueClass, FalseClass then CFBoolean.new(object) + + when Blob + CFData.new(object, CFData::DATA_RAW) + + when String, Symbol + CFString.new(object.to_s) + + when Time, DateTime, Date + CFDate.new(object) + + when Array, Enumerator + ary = Array.new + object.each do |o| + ary.push CFPropertyList.guess(o, options) + end + CFArray.new(ary) + + when Hash + hsh = Hash.new + object.each_pair do |k,v| + k = k.to_s if k.is_a?(Symbol) + hsh[k] = CFPropertyList.guess(v, options) + end + CFDictionary.new(hsh) + else + case + when Object.const_defined?('BigDecimal') && object.is_a?(BigDecimal) + CFReal.new(object) + when object.respond_to?(:read) + raw_data = object.read + # treat the data as a bytestring (ASCII-8BIT) if Ruby supports it. Do this by forcing + # the encoding, on the assumption that the bytes were read correctly, and just tagged with + # an inappropriate encoding, rather than transcoding. + raw_data.force_encoding(Encoding::ASCII_8BIT) if raw_data.respond_to?(:force_encoding) + CFData.new(raw_data, CFData::DATA_RAW) + when options[:converter_method] && object.respond_to?(options[:converter_method]) + if options[:converter_with_opts] + CFPropertyList.guess(object.send(options[:converter_method],options),options) + else + CFPropertyList.guess(object.send(options[:converter_method]),options) + end + when options[:convert_unknown_to_string] + CFString.new(object.to_s) + else + raise CFTypeError.new("Unknown class #{object.class.to_s}. Try using :convert_unknown_to_string if you want to use unknown object types!") + end + end + end + + # Converts a CFType hiercharchy to native Ruby types + def native_types(object,keys_as_symbols=false) + return if object.nil? + + if(object.is_a?(CFDate) || object.is_a?(CFString) || object.is_a?(CFInteger) || object.is_a?(CFReal) || object.is_a?(CFBoolean)) || object.is_a?(CFUid) then + return object.value + elsif(object.is_a?(CFData)) then + return CFPropertyList::Blob.new(object.decoded_value) + elsif(object.is_a?(CFArray)) then + ary = [] + object.value.each do + |v| + ary.push CFPropertyList.native_types(v) + end + + return ary + elsif(object.is_a?(CFDictionary)) then + hsh = {} + object.value.each_pair do + |k,v| + k = k.to_sym if keys_as_symbols + hsh[k] = CFPropertyList.native_types(v) + end + + return hsh + end + end + + module_function :guess, :native_types + + # Class representing a CFPropertyList. Instantiate with #new + class List + # Format constant for binary format + FORMAT_BINARY = 1 + + # Format constant for XML format + FORMAT_XML = 2 + + # Format constant for the old plain format + FORMAT_PLAIN = 3 + + # Format constant for automatic format recognizing + FORMAT_AUTO = 0 + + @@parsers = [Binary, CFPropertyList.xml_parser_interface, PlainParser] + + # Path of PropertyList + attr_accessor :filename + # the original format of the PropertyList + attr_accessor :format + # the root value in the plist file + attr_accessor :value + # default value for XML generation; if true generate formatted XML + attr_accessor :formatted + + # initialize a new CFPropertyList, arguments are: + # + # :file:: Parse a file + # :format:: Format is one of FORMAT_BINARY or FORMAT_XML. Defaults to FORMAT_AUTO + # :data:: Parse a string + # + # All arguments are optional + def initialize(opts={}) + @filename = opts[:file] + @format = opts[:format] || FORMAT_AUTO + @data = opts[:data] + @formatted = opts[:formatted] + + load(@filename) unless @filename.nil? + load_str(@data) unless @data.nil? + end + + # returns a list of registered parsers + def self.parsers + @@parsers + end + + # set a list of parsers + def self.parsers=(val) + @@parsers = val + end + + # Load an XML PropertyList + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_xml(filename=nil) + load(filename,List::FORMAT_XML) + end + + # read a binary plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_binary(filename=nil) + load(filename,List::FORMAT_BINARY) + end + + # read a plain plist file + # filename = nil:: The filename to read from; if nil, read from the file defined by instance variable +filename+ + def load_plain(filename=nil) + load(filename,List::FORMAT_PLAIN) + end + + # load a plist from a XML string + # str:: The string containing the plist + def load_xml_str(str=nil) + load_str(str,List::FORMAT_XML) + end + + # load a plist from a binary string + # str:: The string containing the plist + def load_binary_str(str=nil) + load_str(str,List::FORMAT_BINARY) + end + + # load a plist from a plain string + # str:: The string containing the plist + def load_plain_str(str=nil) + load_str(str,List::FORMAT_PLAIN) + end + + # load a plist from a string + # str = nil:: The string containing the plist + # format = nil:: The format of the plist + def load_str(str=nil,format=nil) + str = @data if str.nil? + format = @format if format.nil? + + @value = {} + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:data => str}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + filetype = str[0..5] + version = str[6..7] + + prsr = nil + + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if str =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:data => str}) + end + end + + # Read a plist file + # file = nil:: The filename of the file to read. If nil, use +filename+ instance variable + # format = nil:: The format of the plist file. Auto-detect if nil + def load(file=nil,format=nil) + file = @filename if file.nil? + format = @format if format.nil? + @value = {} + + raise IOError.new("File #{file} not readable!") unless File.readable? file + + case format + when List::FORMAT_BINARY, List::FORMAT_XML, List::FORMAT_PLAIN then + prsr = @@parsers[format-1].new + @value = prsr.load({:file => file}) + + when List::FORMAT_AUTO then # what we now do is ugly, but neccessary to recognize the file format + magic_number = IO.read(file,12) + raise IOError.new("File #{file} is empty.") unless magic_number + filetype = magic_number[0..5] + version = magic_number[6..7] + + prsr = nil + if filetype == "bplist" then + raise CFFormatError.new("Wrong file version #{version}") unless version == "00" + prsr = Binary.new + @format = List::FORMAT_BINARY + else + if magic_number =~ /^<(\?xml|!DOCTYPE|plist)/ + prsr = CFPropertyList.xml_parser_interface.new + @format = List::FORMAT_XML + else + prsr = PlainParser.new + @format = List::FORMAT_PLAIN + end + end + + @value = prsr.load({:file => file}) + end + + raise CFFormatError.new("Invalid format or parser error!") if @value.nil? + end + + # Serialize CFPropertyList object to specified format and write it to file + # file = nil:: The filename of the file to write to. Uses +filename+ instance variable if nil + # format = nil:: The format to save in. Uses +format+ instance variable if nil + def save(file=nil,format=nil,opts={}) + format = @format if format.nil? + file = @filename if file.nil? + + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + if(!File.exists?(file)) then + raise IOError.new("File #{file} not writable!") unless File.writable?(File.dirname(file)) + elsif(!File.writable?(file)) then + raise IOError.new("File #{file} not writable!") + end + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + prsr = @@parsers[format-1].new + + content = prsr.to_str(opts) + + File.open(file, 'wb') { + |fd| + fd.write content + } + end + + # convert plist to string + # format = List::FORMAT_BINARY:: The format to save the plist + # opts={}:: Pass parser options + def to_str(format=List::FORMAT_BINARY,opts={}) + if format != FORMAT_BINARY && format != FORMAT_XML && format != FORMAT_PLAIN + raise CFFormatError.new("Format #{format} not supported, use List::FORMAT_BINARY or List::FORMAT_XML") + end + + prsr = @@parsers[format-1].new + + opts[:root] = @value + opts[:formatted] = @formatted unless opts.has_key?(:formatted) + + return prsr.to_str(opts) + end + end +end + + +class Array + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Enumerator + # convert an array to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +class Hash + # convert a hash to plist format + def to_plist(options={}) + options[:plist_format] ||= CFPropertyList::List::FORMAT_BINARY + + plist = CFPropertyList::List.new + plist.value = CFPropertyList.guess(self, options) + plist.to_str(options[:plist_format], options) + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFTypes.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFTypes.rb new file mode 100644 index 0000000..8563721 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbCFTypes.rb @@ -0,0 +1,349 @@ +# -*- coding: utf-8 -*- +# +# CFTypes, e.g. CFString, CFInteger +# needed to create unambiguous plists +# +# Author:: Christian Kruse (mailto:cjk@wwwtech.de) +# Copyright:: Copyright (c) 2009 +# License:: MIT License + +require 'base64' + +module CFPropertyList + ## + # Blob is intended to distinguish between a Ruby String instance that should + # be converted to a CFString type and a Ruby String instance that should be + # converted to a CFData type + class Blob < String + end + + ## + # UidFixnum is intended to distinguish between a Ruby Integer + # instance that should be converted to a CFInteger/CFReal type and a + # Ruby Integer instance that should be converted to a CFUid type. + class UidFixnum < Integer + end + + # This class defines the base class for all CFType classes + # + class CFType + # value of the type + attr_accessor :value + + def initialize(value=nil) + @value = value + end + + def to_xml(parser) + end + + def to_binary(bplist) + end + + def to_plain(plist) + end + end + + # This class holds string values, both, UTF-8 and UTF-16BE + # It will convert the value to UTF-16BE if necessary (i.e. if non-ascii char contained) + class CFString < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('string') + n = parser.append_node(n, parser.new_text(@value)) unless @value.nil? + n + end + + # convert to binary + def to_binary(bplist) + bplist.string_to_binary(@value); + end + + def to_plain(plist) + if @value =~ /^\w+$/ + @value + else + quoted + end + end + + def quoted + str = '"' + @value.each_char do |c| + str << case c + when '"' + '\\"' + when '\\' + '\\' + when "\a" + "\\a" + when "\b" + "\\b" + when "\f" + "\\f" + when "\n" + "\n" + when "\v" + "\\v" + when "\r" + "\\r" + when "\t" + "\\t" + else + c + end + end + + str << '"' + end + end + + # This class holds integer/fixnum values + class CFInteger < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('integer') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds float values + class CFReal < CFType + # convert to XML + def to_xml(parser) + n = parser.new_node('real') + n = parser.append_node(n, parser.new_text(@value.to_s)) + n + end + + # convert to binary + def to_binary(bplist) + bplist.num_to_binary(self) + end + + def to_plain(plist) + @value.to_s + end + end + + # This class holds Time values. While Apple uses seconds since 2001, + # the rest of the world uses seconds since 1970. So if you access value + # directly, you get the Time class. If you access via get_value you either + # geht the timestamp or the Apple timestamp + class CFDate < CFType + TIMESTAMP_APPLE = 0 + TIMESTAMP_UNIX = 1 + DATE_DIFF_APPLE_UNIX = 978307200 + + # create a XML date strimg from a time object + def CFDate.date_string(val) + # 2009-05-13T20:23:43Z + val.getutc.strftime("%Y-%m-%dT%H:%M:%SZ") + end + + # parse a XML date string + def CFDate.parse_date(val) + # 2009-05-13T20:23:43Z + val =~ %r{^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z$} + year,month,day,hour,min,sec = $1, $2, $3, $4, $5, $6 + return Time.utc(year,month,day,hour,min,sec).getlocal + end + + # set value to defined state + def initialize(value = nil,format=CFDate::TIMESTAMP_UNIX) + if(value.is_a?(Time) || value.nil?) then + @value = value.nil? ? Time.now : value + elsif value.instance_of? Date + @value = Time.utc(value.year, value.month, value.day, 0, 0, 0) + elsif value.instance_of? DateTime + @value = value.to_time.utc + else + set_value(value,format) + end + end + + # set value with timestamp, either Apple or UNIX + def set_value(value,format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value = Time.at(value) + else + @value = Time.at(value + CFDate::DATE_DIFF_APPLE_UNIX) + end + end + + # get timestamp, either UNIX or Apple timestamp + def get_value(format=CFDate::TIMESTAMP_UNIX) + if(format == CFDate::TIMESTAMP_UNIX) then + @value.to_i + else + @value.to_f - CFDate::DATE_DIFF_APPLE_UNIX + end + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('date') + n = parser.append_node(n, parser.new_text(CFDate::date_string(@value))) + n + end + + # convert to binary + def to_binary(bplist) + bplist.date_to_binary(@value) + end + + def to_plain(plist) + @value.strftime("%Y-%m-%d %H:%M:%S %z") + end + end + + # This class contains a boolean value + class CFBoolean < CFType + # convert to XML + def to_xml(parser) + parser.new_node(@value ? 'true' : 'false') + end + + # convert to binary + def to_binary(bplist) + bplist.bool_to_binary(@value); + end + + def to_plain(plist) + @value ? "true" : "false" + end + end + + # This class contains binary data values + class CFData < CFType + # Base64 encoded data + DATA_BASE64 = 0 + # Raw data + DATA_RAW = 1 + + # set value to defined state, either base64 encoded or raw + def initialize(value=nil,format=DATA_BASE64) + if(format == DATA_RAW) + @raw_value = value + else + @value = value + end + end + + # get base64 encoded value + def encoded_value + @value ||= "\n#{Base64.encode64(@raw_value).gsub("\n", '').scan(/.{1,76}/).join("\n")}\n" + end + + # get base64 decoded value + def decoded_value + @raw_value ||= Blob.new(Base64.decode64(@value)) + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('data') + n = parser.append_node(n, parser.new_text(encoded_value())) + n + end + + # convert to binary + def to_binary(bplist) + bplist.data_to_binary(decoded_value()) + end + + def to_plain(plist) + "<" + decoded_value.unpack("H*").join("") + ">" + end + end + + # This class contains an array of values + class CFArray < CFType + # create a new array CFType + def initialize(val=[]) + @value = val + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('array') + @value.each do |v| + n = parser.append_node(n, v.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.array_to_binary(self) + end + + def to_plain(plist) + ary = @value.map { |v| v.to_plain(plist) } + "( " + ary.join(", ") + " )" + end + end + + # this class contains a hash of values + class CFDictionary < CFType + # Create new CFDictonary type. + def initialize(value={}) + @value = value + end + + # convert to XML + def to_xml(parser) + n = parser.new_node('dict') + @value.each_pair do |key, value| + k = parser.append_node(parser.new_node('key'), parser.new_text(key.to_s)) + n = parser.append_node(n, k) + n = parser.append_node(n, value.to_xml(parser)) + end + n + end + + # convert to binary + def to_binary(bplist) + bplist.dict_to_binary(self) + end + + def to_plain(plist) + str = "{ " + cfstr = CFString.new() + + @value.each do |k,v| + cfstr.value = k + str << cfstr.to_plain(plist) + " = " + v.to_plain(plist) + "; " + end + + str << "}" + end + end + + class CFUid < CFType + def to_xml(parser) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_xml(parser) + end + + # convert to binary + def to_binary(bplist) + bplist.uid_to_binary(@value) + end + + def to_plain(plist) + CFDictionary.new({'CF$UID' => CFInteger.new(@value)}).to_plain(plist) + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbLibXMLParser.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbLibXMLParser.rb new file mode 100644 index 0000000..3642016 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbLibXMLParser.rb @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- + +require 'libxml' + +module CFPropertyList + # XML parser + class LibXMLParser < XMLParserInterface + LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER) + PARSER_OPTIONS = LibXML::XML::Parser::Options::NOBLANKS|LibXML::XML::Parser::Options::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + + if(opts.has_key?(:file)) then + doc = LibXML::XML::Document.file(opts[:file],:options => PARSER_OPTIONS) + else + doc = LibXML::XML::Document.string(opts[:data],:options => PARSER_OPTIONS) + end + + if doc + root = doc.root.first + return import_xml(root) + end + rescue LibXML::XML::Error => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = LibXML::XML::Document.new + + doc.root = LibXML::XML::Node.new('plist') + doc.encoding = LibXML::XML::Encoding::UTF_8 + + doc.root['version'] = '1.0' + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + str = doc.to_s(:indent => opts[:formatted]) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + LibXML::XML::Node.new(name) + end + + def new_text(val) + LibXML::XML::Node.new_text(val) + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children? + n.first.content + else + n.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.children? then + node.children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbNokogiriParser.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbNokogiriParser.rb new file mode 100644 index 0000000..e2de688 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbNokogiriParser.rb @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +require 'nokogiri' + +module CFPropertyList + # XML parser + class NokogiriXMLParser < ParserInterface + PARSER_OPTIONS = Nokogiri::XML::ParseOptions::NOBLANKS|Nokogiri::XML::ParseOptions::NONET + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + doc = nil + if(opts.has_key?(:file)) then + File.open(opts[:file], "rb") { |fd| doc = Nokogiri::XML::Document.parse(fd, nil, nil, PARSER_OPTIONS) } + else + doc = Nokogiri::XML::Document.parse(opts[:data], nil, nil, PARSER_OPTIONS) + end + + if doc + root = doc.root.children.first + return import_xml(root) + end + rescue Nokogiri::XML::SyntaxError => e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = Nokogiri::XML::Document.new + @doc = doc + + doc.root = doc.create_element 'plist', :version => '1.0' + doc.encoding = 'UTF-8' + + doc.root << opts[:root].to_xml(self) + + # ugly hack, but there's no other possibility I know + s_opts = Nokogiri::XML::Node::SaveOptions::AS_XML + s_opts |= Nokogiri::XML::Node::SaveOptions::FORMAT if opts[:formatted] + + str = doc.serialize(:save_with => s_opts) + str1 = String.new + first = false + str.each_line do |line| + str1 << line + unless(first) then + str1 << "\n" if line =~ /^\s*<\?xml/ + end + + first = true + end + + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + return str1 + end + + def new_node(name) + @doc.create_element name + end + + def new_text(val) + @doc.create_text_node val + end + + def append_node(parent, child) + parent << child + end + + protected + + # get the value of a DOM node + def get_value(n) + content = if n.children.empty? + n.content + else + n.children.first.content + end + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + + if n.name == "key" then + key = get_value(n) + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + children = node.children + + unless children.empty? then + children.each do |n| + next if n.text? # avoid a bug of libxml + next if n.comment? + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbPlainCFPropertyList.rb b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbPlainCFPropertyList.rb new file mode 100644 index 0000000..fa698ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/CFPropertyList-3.0.5/lib/cfpropertylist/rbPlainCFPropertyList.rb @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- + +require 'strscan' + +module CFPropertyList + # XML parser + class PlainParser < XMLParserInterface + # read a XML file + # opts:: + # * :file - The filename of the file to load + # * :data - The data to parse + def load(opts) + @doc = nil + + if(opts.has_key?(:file)) then + File.open(opts[:file], :external_encoding => "ASCII") do |fd| + @doc = StringScanner.new(fd.read) + end + else + @doc = StringScanner.new(opts[:data]) + end + + if @doc + root = import_plain + raise CFFormatError.new('content after root object') unless @doc.eos? + + return root + end + + raise CFFormatError.new('invalid plist string or file not found') + end + + SPACES_AND_COMMENTS = %r{((?:/\*.*?\*/)|(?://.*?$\n?)|(?:\s*))+}x + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + opts[:root].to_plain(self) + end + + protected + def skip_whitespaces + @doc.skip SPACES_AND_COMMENTS + end + + def read_dict + skip_whitespaces + hsh = {} + + while not @doc.scan(/\}/) + key = import_plain + raise CFFormatError.new("invalid dictionary format") if !key + + if key.is_a?(CFString) + key = key.value + elsif key.is_a?(CFInteger) or key.is_a?(CFReal) + key = key.value.to_s + else + raise CFFormatError.new("invalid key format") + end + + skip_whitespaces + + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/=/) + + skip_whitespaces + val = import_plain + + skip_whitespaces + raise CFFormatError.new("invalid dictionary format") unless @doc.scan(/;/) + skip_whitespaces + + hsh[key] = val + raise CFFormatError.new("invalid dictionary format") if @doc.eos? + end + + CFDictionary.new(hsh) + end + + def read_array + skip_whitespaces + ary = [] + + while not @doc.scan(/\)/) + val = import_plain + + return nil if not val or not val.value + skip_whitespaces + + if not @doc.skip(/,\s*/) + if @doc.scan(/\)/) + ary << val + return CFArray.new(ary) + end + + raise CFFormatError.new("invalid array format") + end + + ary << val + raise CFFormatError.new("invalid array format") if @doc.eos? + end + + CFArray.new(ary) + end + + def escape_char + case @doc.matched + when '"' + '"' + when '\\' + '\\' + when 'a' + "\a" + when 'b' + "\b" + when 'f' + "\f" + when 'n' + "\n" + when 'v' + "\v" + when 'r' + "\r" + when 't' + "\t" + when 'U' + @doc.scan(/.{4}/).hex.chr('utf-8') + end + end + + def read_quoted + str = '' + + while not @doc.scan(/"/) + if @doc.scan(/\\/) + @doc.scan(/./) + str << escape_char + + elsif @doc.eos? + raise CFFormatError.new("unterminated string") + + else @doc.scan(/./) + str << @doc.matched + end + end + + CFString.new(str) + end + + def read_unquoted + raise CFFormatError.new("unexpected end of file") if @doc.eos? + + if @doc.scan(/(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)(?:\s+(\+|-)(\d\d)(\d\d))?/) + year,month,day,hour,min,sec,pl_min,tz_hour, tz_min = @doc[1], @doc[2], @doc[3], @doc[4], @doc[5], @doc[6], @doc[7], @doc[8], @doc[9] + CFDate.new(Time.new(year, month, day, hour, min, sec, pl_min ? sprintf("%s%s:%s", pl_min, tz_hour, tz_min) : nil)) + + elsif @doc.scan(/-?\d+?\.\d+\b/) + CFReal.new(@doc.matched.to_f) + + elsif @doc.scan(/-?\d+\b/) + CFInteger.new(@doc.matched.to_i) + + elsif @doc.scan(/\b(true|false)\b/) + CFBoolean.new(@doc.matched == 'true') + else + CFString.new(@doc.scan(/\w+/)) + end + end + + def read_binary + @doc.scan(/(.*?)>/) + + hex_str = @doc[1].gsub(/ /, '') + CFData.new([hex_str].pack("H*"), CFData::DATA_RAW) + end + + # import the XML values + def import_plain + skip_whitespaces + ret = nil + + if @doc.scan(/\{/) # dict + ret = read_dict + elsif @doc.scan(/\(/) # array + ret = read_array + elsif @doc.scan(/"/) # string + ret = read_quoted + elsif @doc.scan(/ e + raise CFFormatError.new('invalid XML: ' + e.message) + end + + # serialize CFPropertyList object to XML + # opts = {}:: Specify options: :formatted - Use indention and line breaks + def to_str(opts={}) + doc = REXML::Document.new + @doc = doc + + doc.context[:attribute_quote] = :quote + + doc.add_element 'plist', {'version' => '1.0'} + doc.root << opts[:root].to_xml(self) + + formatter = if opts[:formatted] then + f = REXML::Formatters::Pretty.new(2) + f.compact = true + f.width = Float::INFINITY + f + else + REXML::Formatters::Default.new + end + + str = formatter.write(doc.root, "") + str1 = "\n\n" + str + "\n" + str1.force_encoding('UTF-8') if str1.respond_to?(:force_encoding) + + return str1 + end + + def new_node(name) + REXML::Element.new(name) + end + + def new_text(val) + val + end + + def append_node(parent, child) + if child.is_a?(String) then + parent.add_text child + else + parent.elements << child + end + parent + end + + protected + + # get the value of a DOM node + def get_value(n) + content = n.text + + content.force_encoding('UTF-8') if content.respond_to?(:force_encoding) + content + end + + # import the XML values + def import_xml(node) + ret = nil + + case node.name + when 'dict' + hsh = Hash.new + key = nil + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + next if n.name == '#comment' + + if n.name == "key" then + key = get_value(n) + key = '' if key.nil? # REXML returns nil if key is empty + else + raise CFFormatError.new("Format error!") if key.nil? + hsh[key] = import_xml(n) + key = nil + end + end + end + + if hsh['CF$UID'] and hsh.keys.length == 1 + ret = CFUid.new(hsh['CF$UID'].value) + else + ret = CFDictionary.new(hsh) + end + + when 'array' + ary = Array.new + + if node.has_elements? then + node.elements.each do |n| + next if n.name == '#text' # avoid a bug of libxml + ary.push import_xml(n) + end + end + + ret = CFArray.new(ary) + + when 'true' + ret = CFBoolean.new(true) + when 'false' + ret = CFBoolean.new(false) + when 'real' + ret = CFReal.new(get_value(node).to_f) + when 'integer' + ret = CFInteger.new(get_value(node).to_i) + when 'string' + ret = CFString.new(get_value(node)) + ret.value = '' if ret.value.nil? # REXML returns nil for empty elements' .text attribute + when 'data' + ret = CFData.new(get_value(node)) + when 'date' + ret = CFDate.new(CFDate.parse_date(get_value(node))) + end + + return ret + end + end +end + +# eof diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/CHANGELOG.md new file mode 100644 index 0000000..686b31d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/CHANGELOG.md @@ -0,0 +1,653 @@ +## Rails 5.2.4.4 (September 09, 2020) ## + +* No changes. + + +## Rails 5.2.4.3 (May 18, 2020) ## + +* [CVE-2020-8165] Deprecate Marshal.load on raw cache read in RedisCacheStore + +* [CVE-2020-8165] Avoid Marshal.load on raw cache value in MemCacheStore + +## Rails 5.2.4.1 (December 18, 2019) ## + +* No changes. + + +## Rails 5.2.4 (November 27, 2019) ## + +* Make ActiveSupport::Logger Fiber-safe. Fixes #36752. + + Use `Fiber.current.__id__` in `ActiveSupport::Logger#local_level=` in order + to make log level local to Ruby Fibers in addition to Threads. + + Example: + + logger = ActiveSupport::Logger.new(STDOUT) + logger.level = 1 + p "Main is debug? #{logger.debug?}" + + Fiber.new { + logger.local_level = 0 + p "Thread is debug? #{logger.debug?}" + }.resume + + p "Main is debug? #{logger.debug?}" + + Before: + + Main is debug? false + Thread is debug? true + Main is debug? true + + After: + + Main is debug? false + Thread is debug? true + Main is debug? false + + *Alexander Varnin* + + +## Rails 5.2.3 (March 27, 2019) ## + +* Add `ActiveSupport::HashWithIndifferentAccess#assoc`. + + `assoc` can now be called with either a string or a symbol. + + *Stefan Schüßler* + +* Fix `String#safe_constantize` throwing a `LoadError` for incorrectly cased constant references. + + *Keenan Brock* + +* Allow Range#=== and Range#cover? on Range + + `Range#cover?` can now accept a range argument like `Range#include?` and + `Range#===`. `Range#===` works correctly on Ruby 2.6. `Range#include?` is moved + into a new file, with these two methods. + + *utilum* + +* If the same block is `included` multiple times for a Concern, an exception is no longer raised. + + *Mark J. Titorenko*, *Vlad Bokov* + + +## Rails 5.2.2.1 (March 11, 2019) ## + +* No changes. + + +## Rails 5.2.2 (December 04, 2018) ## + +* Fix bug where `#to_options` for `ActiveSupport::HashWithIndifferentAccess` + would not act as alias for `#symbolize_keys`. + + *Nick Weiland* + +* Improve the logic that detects non-autoloaded constants. + + *Jan Habermann*, *Xavier Noria* + +* Fix bug where `URI.unescape` would fail with mixed Unicode/escaped character input: + + URI.unescape("\xe3\x83\x90") # => "バ" + URI.unescape("%E3%83%90") # => "バ" + URI.unescape("\xe3\x83\x90%E3%83%90") # => Encoding::CompatibilityError + + *Ashe Connor*, *Aaron Patterson* + + +## Rails 5.2.1.1 (November 27, 2018) ## + +* No changes. + + +## Rails 5.2.1 (August 07, 2018) ## + +* Redis cache store: `delete_matched` no longer blocks the Redis server. + (Switches from evaled Lua to a batched SCAN + DEL loop.) + + *Gleb Mazovetskiy* + +* Fix bug where `ActiveSupport::Timezone.all` would fail when tzinfo data for + any timezone defined in `ActiveSupport::TimeZone::MAPPING` is missing. + + *Dominik Sander* + +* Fix bug where `ActiveSupport::Cache` will massively inflate the storage + size when compression is enabled (which is true by default). This patch + does not attempt to repair existing data: please manually flush the cache + to clear out the problematic entries. + + *Godfrey Chan* + +* Fix `ActiveSupport::Cache#read_multi` bug with local cache enabled that was + returning instances of `ActiveSupport::Cache::Entry` instead of the raw values. + + *Jason Lee* + + +## Rails 5.2.0 (April 09, 2018) ## + +* Caching: MemCache and Redis `read_multi` and `fetch_multi` speedup. + Read from the local in-memory cache before consulting the backend. + + *Gabriel Sobrinho* + +* Return all mappings for a timezone identifier in `country_zones`. + + Some timezones like `Europe/London` have multiple mappings in + `ActiveSupport::TimeZone::MAPPING` so return all of them instead + of the first one found by using `Hash#value`. e.g: + + # Before + ActiveSupport::TimeZone.country_zones("GB") # => ["Edinburgh"] + + # After + ActiveSupport::TimeZone.country_zones("GB") # => ["Edinburgh", "London"] + + Fixes #31668. + + *Andrew White* + +* Add support for connection pooling on RedisCacheStore. + + *fatkodima* + +* Support hash as first argument in `assert_difference`. This allows to specify multiple + numeric differences in the same assertion. + + assert_difference ->{ Article.count } => 1, ->{ Post.count } => 2 + + *Julien Meichelbeck* + +* Add missing instrumentation for `read_multi` in `ActiveSupport::Cache::Store`. + + *Ignatius Reza Lesmana* + +* `assert_changes` will always assert that the expression changes, + regardless of `from:` and `to:` argument combinations. + + *Daniel Ma* + +* Use SHA-1 to generate non-sensitive digests, such as the ETag header. + + Enabled by default for new apps; upgrading apps can opt in by setting + `config.active_support.use_sha1_digests = true`. + + *Dmitri Dolguikh*, *Eugene Kenny* + +* Changed default behaviour of `ActiveSupport::SecurityUtils.secure_compare`, + to make it not leak length information even for variable length string. + + Renamed old `ActiveSupport::SecurityUtils.secure_compare` to `fixed_length_secure_compare`, + and started raising `ArgumentError` in case of length mismatch of passed strings. + + *Vipul A M* + +* Make `ActiveSupport::TimeZone.all` return only time zones that are in + `ActiveSupport::TimeZone::MAPPING`. + + Fixes #7245. + + *Chris LaRose* + +* MemCacheStore: Support expiring counters. + + Pass `expires_in: [seconds]` to `#increment` and `#decrement` options + to set the Memcached TTL (time-to-live) if the counter doesn't exist. + If the counter exists, Memcached doesn't extend its expiry when it's + incremented or decremented. + + ``` + Rails.cache.increment("my_counter", 1, expires_in: 2.minutes) + ``` + + *Takumasa Ochi* + +* Handle `TZInfo::AmbiguousTime` errors. + + Make `ActiveSupport::TimeWithZone` match Ruby's handling of ambiguous + times by choosing the later period, e.g. + + Ruby: + ``` + ENV["TZ"] = "Europe/Moscow" + Time.local(2014, 10, 26, 1, 0, 0) # => 2014-10-26 01:00:00 +0300 + ``` + + Before: + ``` + >> "2014-10-26 01:00:00".in_time_zone("Moscow") + TZInfo::AmbiguousTime: 26/10/2014 01:00 is an ambiguous local time. + ``` + + After: + ``` + >> "2014-10-26 01:00:00".in_time_zone("Moscow") + => Sun, 26 Oct 2014 01:00:00 MSK +03:00 + ``` + + Fixes #17395. + + *Andrew White* + +* Redis cache store. + + ``` + # Defaults to `redis://localhost:6379/0`. Only use for dev/test. + config.cache_store = :redis_cache_store + + # Supports all common cache store options (:namespace, :compress, + # :compress_threshold, :expires_in, :race_condition_ttl) and all + # Redis options. + cache_password = Rails.application.secrets.redis_cache_password + config.cache_store = :redis_cache_store, driver: :hiredis, + namespace: 'myapp-cache', compress: true, timeout: 1, + url: "redis://:#{cache_password}@myapp-cache-1:6379/0" + + # Supports Redis::Distributed with multiple hosts + config.cache_store = :redis_cache_store, driver: :hiredis + namespace: 'myapp-cache', compress: true, + url: %w[ + redis://myapp-cache-1:6379/0 + redis://myapp-cache-1:6380/0 + redis://myapp-cache-2:6379/0 + redis://myapp-cache-2:6380/0 + redis://myapp-cache-3:6379/0 + redis://myapp-cache-3:6380/0 + ] + + # Or pass a builder block + config.cache_store = :redis_cache_store, + namespace: 'myapp-cache', compress: true, + redis: -> { Redis.new … } + ``` + + Deployment note: Take care to use a *dedicated Redis cache* rather + than pointing this at your existing Redis server. It won't cope well + with mixed usage patterns and it won't expire cache entries by default. + + Redis cache server setup guide: https://redis.io/topics/lru-cache + + *Jeremy Daer* + +* Cache: Enable compression by default for values > 1kB. + + Compression has long been available, but opt-in and at a 16kB threshold. + It wasn't enabled by default due to CPU cost. Today it's cheap and typical + cache data is eminently compressible, such as HTML or JSON fragments. + Compression dramatically reduces Memcached/Redis mem usage, which means + the same cache servers can store more data, which means higher hit rates. + + To disable compression, pass `compress: false` to the initializer. + + *Jeremy Daer* + +* Allow `Range#include?` on TWZ ranges. + + In #11474 we prevented TWZ ranges being iterated over which matched + Ruby's handling of Time ranges and as a consequence `include?` + stopped working with both Time ranges and TWZ ranges. However in + ruby/ruby@b061634 support was added for `include?` to use `cover?` + for 'linear' objects. Since we have no way of making Ruby consider + TWZ instances as 'linear' we have to override `Range#include?`. + + Fixes #30799. + + *Andrew White* + +* Fix acronym support in `humanize`. + + Acronym inflections are stored with lowercase keys in the hash but + the match wasn't being lowercased before being looked up in the hash. + This shouldn't have any performance impact because before it would + fail to find the acronym and perform the `downcase` operation anyway. + + Fixes #31052. + + *Andrew White* + +* Add same method signature for `Time#prev_year` and `Time#next_year` + in accordance with `Date#prev_year`, `Date#next_year`. + + Allows pass argument for `Time#prev_year` and `Time#next_year`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_year # => 2016-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_year(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_year # => 2018-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_year(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_year # => 2016-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_year(1) # => 2016-09-16 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_year # => 2018-09-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_year(1) # => 2018-09-16 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* Add same method signature for `Time#prev_month` and `Time#next_month` + in accordance with `Date#prev_month`, `Date#next_month`. + + Allows pass argument for `Time#prev_month` and `Time#next_month`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_month # => 2017-08-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_month(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_month # => 2017-10-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_month(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_month # => 2017-08-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_month(1) # => 2017-08-16 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_month # => 2017-10-16 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_month(1) # => 2017-10-16 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* Add same method signature for `Time#prev_day` and `Time#next_day` + in accordance with `Date#prev_day`, `Date#next_day`. + + Allows pass argument for `Time#prev_day` and `Time#next_day`. + + Before: + ``` + Time.new(2017, 9, 16, 17, 0).prev_day # => 2017-09-15 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_day(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + + Time.new(2017, 9, 16, 17, 0).next_day # => 2017-09-17 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_day(1) + # => ArgumentError: wrong number of arguments (given 1, expected 0) + ``` + + After: + ``` + Time.new(2017, 9, 16, 17, 0).prev_day # => 2017-09-15 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).prev_day(1) # => 2017-09-15 17:00:00 +0300 + + Time.new(2017, 9, 16, 17, 0).next_day # => 2017-09-17 17:00:00 +0300 + Time.new(2017, 9, 16, 17, 0).next_day(1) # => 2017-09-17 17:00:00 +0300 + ``` + + *bogdanvlviv* + +* `IO#to_json` now returns the `to_s` representation, rather than + attempting to convert to an array. This fixes a bug where `IO#to_json` + would raise an `IOError` when called on an unreadable object. + + Fixes #26132. + + *Paul Kuruvilla* + +* Remove deprecated `halt_callback_chains_on_return_false` option. + + *Rafael Mendonça França* + +* Remove deprecated `:if` and `:unless` string filter for callbacks. + + *Rafael Mendonça França* + +* `Hash#slice` now falls back to Ruby 2.5+'s built-in definition if defined. + + *Akira Matsuda* + +* Deprecate `secrets.secret_token`. + + The architecture for secrets had a big upgrade between Rails 3 and Rails 4, + when the default changed from using `secret_token` to `secret_key_base`. + + `secret_token` has been soft deprecated in documentation for four years + but is still in place to support apps created before Rails 4. + Deprecation warnings have been added to help developers upgrade their + applications to `secret_key_base`. + + *claudiob*, *Kasper Timm Hansen* + +* Return an instance of `HashWithIndifferentAccess` from `HashWithIndifferentAccess#transform_keys`. + + *Yuji Yaginuma* + +* Add key rotation support to `MessageEncryptor` and `MessageVerifier`. + + This change introduces a `rotate` method to both the `MessageEncryptor` and + `MessageVerifier` classes. This method accepts the same arguments and + options as the given classes' constructor. The `encrypt_and_verify` method + for `MessageEncryptor` and the `verified` method for `MessageVerifier` also + accept an optional keyword argument `:on_rotation` block which is called + when a rotated instance is used to decrypt or verify the message. + + *Michael J Coyne* + +* Deprecate `Module#reachable?` method. + + *bogdanvlviv* + +* Add `config/credentials.yml.enc` to store production app secrets. + + Allows saving any authentication credentials for third party services + directly in repo encrypted with `config/master.key` or `ENV["RAILS_MASTER_KEY"]`. + + This will eventually replace `Rails.application.secrets` and the encrypted + secrets introduced in Rails 5.1. + + *DHH*, *Kasper Timm Hansen* + +* Add `ActiveSupport::EncryptedFile` and `ActiveSupport::EncryptedConfiguration`. + + Allows for stashing encrypted files or configuration directly in repo by + encrypting it with a key. + + Backs the new credentials setup above, but can also be used independently. + + *DHH*, *Kasper Timm Hansen* + +* `Module#delegate_missing_to` now raises `DelegationError` if target is nil, + similar to `Module#delegate`. + + *Anton Khamets* + +* Update `String#camelize` to provide feedback when wrong option is passed. + + `String#camelize` was returning nil without any feedback when an + invalid option was passed as a parameter. + + Previously: + + 'one_two'.camelize(true) + # => nil + + Now: + + 'one_two'.camelize(true) + # => ArgumentError: Invalid option, use either :upper or :lower. + + *Ricardo Díaz* + +* Fix modulo operations involving durations. + + Rails 5.1 introduced `ActiveSupport::Duration::Scalar` as a wrapper + around numeric values as a way of ensuring a duration was the outcome of + an expression. However, the implementation was missing support for modulo + operations. This support has now been added and should result in a duration + being returned from expressions involving modulo operations. + + Prior to Rails 5.1: + + 5.minutes % 2.minutes + # => 60 + + Now: + + 5.minutes % 2.minutes + # => 1 minute + + Fixes #29603 and #29743. + + *Sayan Chakraborty*, *Andrew White* + +* Fix division where a duration is the denominator. + + PR #29163 introduced a change in behavior when a duration was the denominator + in a calculation - this was incorrect as dividing by a duration should always + return a `Numeric`. The behavior of previous versions of Rails has been restored. + + Fixes #29592. + + *Andrew White* + +* Add purpose and expiry support to `ActiveSupport::MessageVerifier` and + `ActiveSupport::MessageEncryptor`. + + For instance, to ensure a message is only usable for one intended purpose: + + token = @verifier.generate("x", purpose: :shipping) + + @verifier.verified(token, purpose: :shipping) # => "x" + @verifier.verified(token) # => nil + + Or make it expire after a set time: + + @verifier.generate("x", expires_in: 1.month) + @verifier.generate("y", expires_at: Time.now.end_of_year) + + Showcased with `ActiveSupport::MessageVerifier`, but works the same for + `ActiveSupport::MessageEncryptor`'s `encrypt_and_sign` and `decrypt_and_verify`. + + Pull requests: #29599, #29854 + + *Assain Jaleel* + +* Make the order of `Hash#reverse_merge!` consistent with `HashWithIndifferentAccess`. + + *Erol Fornoles* + +* Add `freeze_time` helper which freezes time to `Time.now` in tests. + + *Prathamesh Sonpatki* + +* Default `ActiveSupport::MessageEncryptor` to use AES 256 GCM encryption. + + On for new Rails 5.2 apps. Upgrading apps can find the config as a new + framework default. + + *Assain Jaleel* + +* Cache: `write_multi`. + + Rails.cache.write_multi foo: 'bar', baz: 'qux' + + Plus faster fetch_multi with stores that implement `write_multi_entries`. + Keys that aren't found may be written to the cache store in one shot + instead of separate writes. + + The default implementation simply calls `write_entry` for each entry. + Stores may override if they're capable of one-shot bulk writes, like + Redis `MSET`. + + *Jeremy Daer* + +* Add default option to module and class attribute accessors. + + mattr_accessor :settings, default: {} + + Works for `mattr_reader`, `mattr_writer`, `cattr_accessor`, `cattr_reader`, + and `cattr_writer` as well. + + *Genadi Samokovarov* + +* Add `Date#prev_occurring` and `Date#next_occurring` to return specified next/previous occurring day of week. + + *Shota Iguchi* + +* Add default option to `class_attribute`. + + Before: + + class_attribute :settings + self.settings = {} + + Now: + + class_attribute :settings, default: {} + + *DHH* + +* `#singularize` and `#pluralize` now respect uncountables for the specified locale. + + *Eilis Hamilton* + +* Add `ActiveSupport::CurrentAttributes` to provide a thread-isolated attributes singleton. + Primary use case is keeping all the per-request attributes easily available to the whole system. + + *DHH* + +* Fix implicit coercion calculations with scalars and durations. + + Previously, calculations where the scalar is first would be converted to a duration + of seconds, but this causes issues with dates being converted to times, e.g: + + Time.zone = "Beijing" # => Asia/Shanghai + date = Date.civil(2017, 5, 20) # => Mon, 20 May 2017 + 2 * 1.day # => 172800 seconds + date + 2 * 1.day # => Mon, 22 May 2017 00:00:00 CST +08:00 + + Now, the `ActiveSupport::Duration::Scalar` calculation methods will try to maintain + the part structure of the duration where possible, e.g: + + Time.zone = "Beijing" # => Asia/Shanghai + date = Date.civil(2017, 5, 20) # => Mon, 20 May 2017 + 2 * 1.day # => 2 days + date + 2 * 1.day # => Mon, 22 May 2017 + + Fixes #29160, #28970. + + *Andrew White* + +* Add support for versioned cache entries. This enables the cache stores to recycle cache keys, greatly saving + on storage in cases with frequent churn. Works together with the separation of `#cache_key` and `#cache_version` + in Active Record and its use in Action Pack's fragment caching. + + *DHH* + +* Pass gem name and deprecation horizon to deprecation notifications. + + *Willem van Bergen* + +* Add support for `:offset` and `:zone` to `ActiveSupport::TimeWithZone#change`. + + *Andrew White* + +* Add support for `:offset` to `Time#change`. + + Fixes #28723. + + *Andrew White* + +* Add `fetch_values` for `HashWithIndifferentAccess`. + + The method was originally added to `Hash` in Ruby 2.3.0. + + *Josh Pencheon* + + +Please check [5-1-stable](https://github.com/rails/rails/blob/5-1-stable/activesupport/CHANGELOG.md) for previous changes. diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/MIT-LICENSE b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/MIT-LICENSE new file mode 100644 index 0000000..8f769c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2005-2018 David Heinemeier Hansson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/README.rdoc new file mode 100644 index 0000000..bbe25ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/README.rdoc @@ -0,0 +1,39 @@ += Active Support -- Utility classes and Ruby extensions from Rails + +Active Support is a collection of utility classes and standard library +extensions that were found useful for the Rails framework. These additions +reside in this package so they can be loaded as needed in Ruby projects +outside of Rails. + + +== Download and installation + +The latest version of Active Support can be installed with RubyGems: + + $ gem install activesupport + +Source code can be downloaded as part of the Rails project on GitHub: + +* https://github.com/rails/rails/tree/5-2-stable/activesupport + + +== License + +Active Support is released under the MIT license: + +* https://opensource.org/licenses/MIT + + +== Support + +API documentation is at: + +* http://api.rubyonrails.org + +Bug reports for the Ruby on Rails project can be filed here: + +* https://github.com/rails/rails/issues + +Feature requests should be discussed on the rails-core mailing list here: + +* https://groups.google.com/forum/?fromgroups#!forum/rubyonrails-core diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support.rb new file mode 100644 index 0000000..a4fb697 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +#-- +# Copyright (c) 2005-2018 David Heinemeier Hansson +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#++ + +require "securerandom" +require "active_support/dependencies/autoload" +require "active_support/version" +require "active_support/logger" +require "active_support/lazy_load_hooks" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + extend ActiveSupport::Autoload + + autoload :Concern + autoload :CurrentAttributes + autoload :Dependencies + autoload :DescendantsTracker + autoload :ExecutionWrapper + autoload :Executor + autoload :FileUpdateChecker + autoload :EventedFileUpdateChecker + autoload :LogSubscriber + autoload :Notifications + autoload :Reloader + + eager_autoload do + autoload :BacktraceCleaner + autoload :ProxyObject + autoload :Benchmarkable + autoload :Cache + autoload :Callbacks + autoload :Configurable + autoload :Deprecation + autoload :Digest + autoload :Gzip + autoload :Inflector + autoload :JSON + autoload :KeyGenerator + autoload :MessageEncryptor + autoload :MessageVerifier + autoload :Multibyte + autoload :NumberHelper + autoload :OptionMerger + autoload :OrderedHash + autoload :OrderedOptions + autoload :StringInquirer + autoload :TaggedLogging + autoload :XmlMini + autoload :ArrayInquirer + end + + autoload :Rescuable + autoload :SafeBuffer, "active_support/core_ext/string/output_safety" + autoload :TestCase + + def self.eager_load! + super + + NumberHelper.eager_load! + end + + cattr_accessor :test_order # :nodoc: + + def self.to_time_preserves_timezone + DateAndTime::Compatibility.preserve_timezone + end + + def self.to_time_preserves_timezone=(value) + DateAndTime::Compatibility.preserve_timezone = value + end +end + +autoload :I18n, "active_support/i18n" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb new file mode 100644 index 0000000..4adf446 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/all.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/time" +require "active_support/core_ext" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb new file mode 100644 index 0000000..b2b9e9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/array_inquirer.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module ActiveSupport + # Wrapping an array in an +ArrayInquirer+ gives a friendlier way to check + # its string-like contents: + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.phone? # => true + # variants.tablet? # => true + # variants.desktop? # => false + class ArrayInquirer < Array + # Passes each element of +candidates+ collection to ArrayInquirer collection. + # The method returns true if any element from the ArrayInquirer collection + # is equal to the stringified or symbolized form of any element in the +candidates+ collection. + # + # If +candidates+ collection is not given, method returns true. + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.any? # => true + # variants.any?(:phone, :tablet) # => true + # variants.any?('phone', 'desktop') # => true + # variants.any?(:desktop, :watch) # => false + def any?(*candidates) + if candidates.none? + super + else + candidates.any? do |candidate| + include?(candidate.to_sym) || include?(candidate.to_s) + end + end + end + + private + def respond_to_missing?(name, include_private = false) + (name[-1] == "?") || super + end + + def method_missing(name, *args) + if name[-1] == "?" + any?(name[0..-2]) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb new file mode 100644 index 0000000..16dd733 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/backtrace_cleaner.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module ActiveSupport + # Backtraces often include many lines that are not relevant for the context + # under review. This makes it hard to find the signal amongst the backtrace + # noise, and adds debugging time. With a BacktraceCleaner, filters and + # silencers are used to remove the noisy lines, so that only the most relevant + # lines remain. + # + # Filters are used to modify lines of data, while silencers are used to remove + # lines entirely. The typical filter use case is to remove lengthy path + # information from the start of each line, and view file paths relevant to the + # app directory instead of the file system root. The typical silencer use case + # is to exclude the output of a noisy library from the backtrace, so that you + # can focus on the rest. + # + # bc = ActiveSupport::BacktraceCleaner.new + # bc.add_filter { |line| line.gsub(Rails.root.to_s, '') } # strip the Rails.root prefix + # bc.add_silencer { |line| line =~ /puma|rubygems/ } # skip any lines from puma or rubygems + # bc.clean(exception.backtrace) # perform the cleanup + # + # To reconfigure an existing BacktraceCleaner (like the default one in Rails) + # and show as much data as possible, you can always call + # BacktraceCleaner#remove_silencers!, which will restore the + # backtrace to a pristine state. If you need to reconfigure an existing + # BacktraceCleaner so that it does not filter or modify the paths of any lines + # of the backtrace, you can call BacktraceCleaner#remove_filters! + # These two methods will give you a completely untouched backtrace. + # + # Inspired by the Quiet Backtrace gem by thoughtbot. + class BacktraceCleaner + def initialize + @filters, @silencers = [], [] + end + + # Returns the backtrace after all filters and silencers have been run + # against it. Filters run first, then silencers. + def clean(backtrace, kind = :silent) + filtered = filter_backtrace(backtrace) + + case kind + when :silent + silence(filtered) + when :noise + noise(filtered) + else + filtered + end + end + alias :filter :clean + + # Adds a filter from the block provided. Each line in the backtrace will be + # mapped against this filter. + # + # # Will turn "/my/rails/root/app/models/person.rb" into "/app/models/person.rb" + # backtrace_cleaner.add_filter { |line| line.gsub(Rails.root, '') } + def add_filter(&block) + @filters << block + end + + # Adds a silencer from the block provided. If the silencer returns +true+ + # for a given line, it will be excluded from the clean backtrace. + # + # # Will reject all lines that include the word "puma", like "/gems/puma/server.rb" or "/app/my_puma_server/rb" + # backtrace_cleaner.add_silencer { |line| line =~ /puma/ } + def add_silencer(&block) + @silencers << block + end + + # Removes all silencers, but leaves in the filters. Useful if your + # context of debugging suddenly expands as you suspect a bug in one of + # the libraries you use. + def remove_silencers! + @silencers = [] + end + + # Removes all filters, but leaves in the silencers. Useful if you suddenly + # need to see entire filepaths in the backtrace that you had already + # filtered out. + def remove_filters! + @filters = [] + end + + private + def filter_backtrace(backtrace) + @filters.each do |f| + backtrace = backtrace.map { |line| f.call(line) } + end + + backtrace + end + + def silence(backtrace) + @silencers.each do |s| + backtrace = backtrace.reject { |line| s.call(line) } + end + + backtrace + end + + def noise(backtrace) + backtrace - silence(backtrace) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb new file mode 100644 index 0000000..f481d68 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/benchmarkable.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/core_ext/benchmark" +require "active_support/core_ext/hash/keys" + +module ActiveSupport + module Benchmarkable + # Allows you to measure the execution time of a block in a template and + # records the result to the log. Wrap this block around expensive operations + # or possible bottlenecks to get a time reading for the operation. For + # example, let's say you thought your file processing method was taking too + # long; you could wrap it in a benchmark block. + # + # <% benchmark 'Process data files' do %> + # <%= expensive_files_operation %> + # <% end %> + # + # That would add something like "Process data files (345.2ms)" to the log, + # which you can then use to compare timings when optimizing your code. + # + # You may give an optional logger level (:debug, :info, + # :warn, :error) as the :level option. The + # default logger level value is :info. + # + # <% benchmark 'Low-level files', level: :debug do %> + # <%= lowlevel_files_operation %> + # <% end %> + # + # Finally, you can pass true as the third argument to silence all log + # activity (other than the timing information) from inside the block. This + # is great for boiling down a noisy block to just a single statement that + # produces one log line: + # + # <% benchmark 'Process data files', level: :info, silence: true do %> + # <%= expensive_and_chatty_files_operation %> + # <% end %> + def benchmark(message = "Benchmarking", options = {}) + if logger + options.assert_valid_keys(:level, :silence) + options[:level] ||= :info + + result = nil + ms = Benchmark.ms { result = options[:silence] ? logger.silence { yield } : yield } + logger.send(options[:level], "%s (%.1fms)" % [ message, ms ]) + result + else + yield + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb new file mode 100644 index 0000000..3fa7e6b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/builder.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +begin + require "builder" +rescue LoadError => e + $stderr.puts "You don't have builder installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb new file mode 100644 index 0000000..d06a497 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache.rb @@ -0,0 +1,808 @@ +# frozen_string_literal: true + +require "zlib" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # See ActiveSupport::Cache::Store for documentation. + module Cache + autoload :FileStore, "active_support/cache/file_store" + autoload :MemoryStore, "active_support/cache/memory_store" + autoload :MemCacheStore, "active_support/cache/mem_cache_store" + autoload :NullStore, "active_support/cache/null_store" + autoload :RedisCacheStore, "active_support/cache/redis_cache_store" + + # These options mean something to all cache implementations. Individual cache + # implementations may support additional options. + UNIVERSAL_OPTIONS = [:namespace, :compress, :compress_threshold, :expires_in, :race_condition_ttl] + + module Strategy + autoload :LocalCache, "active_support/cache/strategy/local_cache" + end + + class << self + # Creates a new Store object according to the given options. + # + # If no arguments are passed to this method, then a new + # ActiveSupport::Cache::MemoryStore object will be returned. + # + # If you pass a Symbol as the first argument, then a corresponding cache + # store class under the ActiveSupport::Cache namespace will be created. + # For example: + # + # ActiveSupport::Cache.lookup_store(:memory_store) + # # => returns a new ActiveSupport::Cache::MemoryStore object + # + # ActiveSupport::Cache.lookup_store(:mem_cache_store) + # # => returns a new ActiveSupport::Cache::MemCacheStore object + # + # Any additional arguments will be passed to the corresponding cache store + # class's constructor: + # + # ActiveSupport::Cache.lookup_store(:file_store, '/tmp/cache') + # # => same as: ActiveSupport::Cache::FileStore.new('/tmp/cache') + # + # If the first argument is not a Symbol, then it will simply be returned: + # + # ActiveSupport::Cache.lookup_store(MyOwnCacheStore.new) + # # => returns MyOwnCacheStore.new + def lookup_store(*store_option) + store, *parameters = *Array.wrap(store_option).flatten + + case store + when Symbol + retrieve_store_class(store).new(*parameters) + when nil + ActiveSupport::Cache::MemoryStore.new + else + store + end + end + + # Expands out the +key+ argument into a key that can be used for the + # cache store. Optionally accepts a namespace, and all keys will be + # scoped within that namespace. + # + # If the +key+ argument provided is an array, or responds to +to_a+, then + # each of elements in the array will be turned into parameters/keys and + # concatenated into a single key. For example: + # + # ActiveSupport::Cache.expand_cache_key([:foo, :bar]) # => "foo/bar" + # ActiveSupport::Cache.expand_cache_key([:foo, :bar], "namespace") # => "namespace/foo/bar" + # + # The +key+ argument can also respond to +cache_key+ or +to_param+. + def expand_cache_key(key, namespace = nil) + expanded_cache_key = (namespace ? "#{namespace}/" : "").dup + + if prefix = ENV["RAILS_CACHE_ID"] || ENV["RAILS_APP_VERSION"] + expanded_cache_key << "#{prefix}/" + end + + expanded_cache_key << retrieve_cache_key(key) + expanded_cache_key + end + + private + def retrieve_cache_key(key) + case + when key.respond_to?(:cache_key_with_version) then key.cache_key_with_version + when key.respond_to?(:cache_key) then key.cache_key + when key.is_a?(Array) then key.map { |element| retrieve_cache_key(element) }.to_param + when key.respond_to?(:to_a) then retrieve_cache_key(key.to_a) + else key.to_param + end.to_s + end + + # Obtains the specified cache store class, given the name of the +store+. + # Raises an error when the store class cannot be found. + def retrieve_store_class(store) + # require_relative cannot be used here because the class might be + # provided by another gem, like redis-activesupport for example. + require "active_support/cache/#{store}" + rescue LoadError => e + raise "Could not find cache store adapter for #{store} (#{e})" + else + ActiveSupport::Cache.const_get(store.to_s.camelize) + end + end + + # An abstract cache store class. There are multiple cache store + # implementations, each having its own additional features. See the classes + # under the ActiveSupport::Cache module, e.g. + # ActiveSupport::Cache::MemCacheStore. MemCacheStore is currently the most + # popular cache store for large production websites. + # + # Some implementations may not support all methods beyond the basic cache + # methods of +fetch+, +write+, +read+, +exist?+, and +delete+. + # + # ActiveSupport::Cache::Store can store any serializable Ruby object. + # + # cache = ActiveSupport::Cache::MemoryStore.new + # + # cache.read('city') # => nil + # cache.write('city', "Duckburgh") + # cache.read('city') # => "Duckburgh" + # + # Keys are always translated into Strings and are case sensitive. When an + # object is specified as a key and has a +cache_key+ method defined, this + # method will be called to define the key. Otherwise, the +to_param+ + # method will be called. Hashes and Arrays can also be used as keys. The + # elements will be delimited by slashes, and the elements within a Hash + # will be sorted by key so they are consistent. + # + # cache.read('city') == cache.read(:city) # => true + # + # Nil values can be cached. + # + # If your cache is on a shared infrastructure, you can define a namespace + # for your cache entries. If a namespace is defined, it will be prefixed on + # to every key. The namespace can be either a static value or a Proc. If it + # is a Proc, it will be invoked when each key is evaluated so that you can + # use application logic to invalidate keys. + # + # cache.namespace = -> { @last_mod_time } # Set the namespace to a variable + # @last_mod_time = Time.now # Invalidate the entire cache by changing namespace + # + # Cached data larger than 1kB are compressed by default. To turn off + # compression, pass compress: false to the initializer or to + # individual +fetch+ or +write+ method calls. The 1kB compression + # threshold is configurable with the :compress_threshold option, + # specified in bytes. + class Store + cattr_accessor :logger, instance_writer: true + + attr_reader :silence, :options + alias :silence? :silence + + class << self + private + def retrieve_pool_options(options) + {}.tap do |pool_options| + pool_options[:size] = options.delete(:pool_size) if options[:pool_size] + pool_options[:timeout] = options.delete(:pool_timeout) if options[:pool_timeout] + end + end + + def ensure_connection_pool_added! + require "connection_pool" + rescue LoadError => e + $stderr.puts "You don't have connection_pool installed in your application. Please add it to your Gemfile and run bundle install" + raise e + end + end + + # Creates a new cache. The options will be passed to any write method calls + # except for :namespace which can be used to set the global + # namespace for the cache. + def initialize(options = nil) + @options = options ? options.dup : {} + end + + # Silences the logger. + def silence! + @silence = true + self + end + + # Silences the logger within a block. + def mute + previous_silence, @silence = defined?(@silence) && @silence, true + yield + ensure + @silence = previous_silence + end + + # Fetches data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. + # + # If there is no such data in the cache (a cache miss), then +nil+ will be + # returned. However, if a block has been passed, that block will be passed + # the key and executed in the event of a cache miss. The return value of the + # block will be written to the cache under the given cache key, and that + # return value will be returned. + # + # cache.write('today', 'Monday') + # cache.fetch('today') # => "Monday" + # + # cache.fetch('city') # => nil + # cache.fetch('city') do + # 'Duckburgh' + # end + # cache.fetch('city') # => "Duckburgh" + # + # You may also specify additional options via the +options+ argument. + # Setting force: true forces a cache "miss," meaning we treat + # the cache value as missing even if it's present. Passing a block is + # required when +force+ is true so this always results in a cache write. + # + # cache.write('today', 'Monday') + # cache.fetch('today', force: true) { 'Tuesday' } # => 'Tuesday' + # cache.fetch('today', force: true) # => ArgumentError + # + # The +:force+ option is useful when you're calling some other method to + # ask whether you should force a cache write. Otherwise, it's clearer to + # just call Cache#write. + # + # Setting compress: false disables compression of the cache entry. + # + # Setting :expires_in will set an expiration time on the cache. + # All caches support auto-expiring content after a specified number of + # seconds. This value can be specified as an option to the constructor + # (in which case all entries will be affected), or it can be supplied to + # the +fetch+ or +write+ method to effect just one entry. + # + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 5.minutes) + # cache.write(key, value, expires_in: 1.minute) # Set a lower value for one entry + # + # Setting :version verifies the cache stored under name + # is of the same version. nil is returned on mismatches despite contents. + # This feature is used to support recyclable cache keys. + # + # Setting :race_condition_ttl is very useful in situations where + # a cache entry is used very frequently and is under heavy load. If a + # cache expires and due to heavy load several different processes will try + # to read data natively and then they all will try to write to cache. To + # avoid that case the first process to find an expired cache entry will + # bump the cache expiration time by the value set in :race_condition_ttl. + # Yes, this process is extending the time for a stale value by another few + # seconds. Because of extended life of the previous cache, other processes + # will continue to use slightly stale data for a just a bit longer. In the + # meantime that first process will go ahead and will write into cache the + # new value. After that all the processes will start getting the new value. + # The key is to keep :race_condition_ttl small. + # + # If the process regenerating the entry errors out, the entry will be + # regenerated after the specified number of seconds. Also note that the + # life of stale cache is extended only if it expired recently. Otherwise + # a new value is generated and :race_condition_ttl does not play + # any role. + # + # # Set all values to expire after one minute. + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 1.minute) + # + # cache.write('foo', 'original value') + # val_1 = nil + # val_2 = nil + # sleep 60 + # + # Thread.new do + # val_1 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # sleep 1 + # 'new value 1' + # end + # end + # + # Thread.new do + # val_2 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # 'new value 2' + # end + # end + # + # cache.fetch('foo') # => "original value" + # sleep 10 # First thread extended the life of cache by another 10 seconds + # cache.fetch('foo') # => "new value 1" + # val_1 # => "new value 1" + # val_2 # => "original value" + # + # Other options will be handled by the specific cache store implementation. + # Internally, #fetch calls #read_entry, and calls #write_entry on a cache + # miss. +options+ will be passed to the #read and #write calls. + # + # For example, MemCacheStore's #write method supports the +:raw+ + # option, which tells the memcached server to store all values as strings. + # We can use this option with #fetch too: + # + # cache = ActiveSupport::Cache::MemCacheStore.new + # cache.fetch("foo", force: true, raw: true) do + # :bar + # end + # cache.fetch('foo') # => "bar" + def fetch(name, options = nil) + if block_given? + options = merged_options(options) + key = normalize_key(name, options) + + entry = nil + instrument(:read, name, options) do |payload| + cached_entry = read_entry(key, options) unless options[:force] + entry = handle_expired_entry(cached_entry, key, options) + entry = nil if entry && entry.mismatched?(normalize_version(name, options)) + payload[:super_operation] = :fetch if payload + payload[:hit] = !!entry if payload + end + + if entry + get_entry_value(entry, name, options) + else + save_block_result_to_cache(name, options) { |_name| yield _name } + end + elsif options && options[:force] + raise ArgumentError, "Missing block: Calling `Cache#fetch` with `force: true` requires a block." + else + read(name, options) + end + end + + # Reads data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. Otherwise, + # +nil+ is returned. + # + # Note, if data was written with the :expires_in or :version options, + # both of these conditions are applied before the data is returned. + # + # Options are passed to the underlying cache implementation. + def read(name, options = nil) + options = merged_options(options) + key = normalize_key(name, options) + version = normalize_version(name, options) + + instrument(:read, name, options) do |payload| + entry = read_entry(key, options) + + if entry + if entry.expired? + delete_entry(key, options) + payload[:hit] = false if payload + nil + elsif entry.mismatched?(version) + payload[:hit] = false if payload + nil + else + payload[:hit] = true if payload + entry.value + end + else + payload[:hit] = false if payload + nil + end + end + end + + # Reads multiple values at once from the cache. Options can be passed + # in the last argument. + # + # Some cache implementation may optimize this method. + # + # Returns a hash mapping the names provided to the values found. + def read_multi(*names) + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + read_multi_entries(names, options).tap do |results| + payload[:hits] = results.keys + end + end + end + + # Cache Storage API to write multiple values at once. + def write_multi(hash, options = nil) + options = merged_options(options) + + instrument :write_multi, hash, options do |payload| + entries = hash.each_with_object({}) do |(name, value), memo| + memo[normalize_key(name, options)] = Entry.new(value, options.merge(version: normalize_version(name, options))) + end + + write_multi_entries entries, options + end + end + + # Fetches data from the cache, using the given keys. If there is data in + # the cache with the given keys, then that data is returned. Otherwise, + # the supplied block is called for each key for which there was no data, + # and the result will be written to the cache and returned. + # Therefore, you need to pass a block that returns the data to be written + # to the cache. If you do not want to write the cache when the cache is + # not found, use #read_multi. + # + # Options are passed to the underlying cache implementation. + # + # Returns a hash with the data for each of the names. For example: + # + # cache.write("bim", "bam") + # cache.fetch_multi("bim", "unknown_key") do |key| + # "Fallback value for key: #{key}" + # end + # # => { "bim" => "bam", + # # "unknown_key" => "Fallback value for key: unknown_key" } + # + def fetch_multi(*names) + raise ArgumentError, "Missing block: `Cache#fetch_multi` requires a block." unless block_given? + + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + read_multi_entries(names, options).tap do |results| + payload[:hits] = results.keys + payload[:super_operation] = :fetch_multi + + writes = {} + + (names - results.keys).each do |name| + results[name] = writes[name] = yield(name) + end + + write_multi writes, options + end + end + end + + # Writes the value to the cache, with the key. + # + # Options are passed to the underlying cache implementation. + def write(name, value, options = nil) + options = merged_options(options) + + instrument(:write, name, options) do + entry = Entry.new(value, options.merge(version: normalize_version(name, options))) + write_entry(normalize_key(name, options), entry, options) + end + end + + # Deletes an entry in the cache. Returns +true+ if an entry is deleted. + # + # Options are passed to the underlying cache implementation. + def delete(name, options = nil) + options = merged_options(options) + + instrument(:delete, name) do + delete_entry(normalize_key(name, options), options) + end + end + + # Returns +true+ if the cache contains an entry for the given key. + # + # Options are passed to the underlying cache implementation. + def exist?(name, options = nil) + options = merged_options(options) + + instrument(:exist?, name) do + entry = read_entry(normalize_key(name, options), options) + (entry && !entry.expired? && !entry.mismatched?(normalize_version(name, options))) || false + end + end + + # Deletes all entries with keys matching the pattern. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def delete_matched(matcher, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support delete_matched") + end + + # Increments an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def increment(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support increment") + end + + # Decrements an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def decrement(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support decrement") + end + + # Cleanups the cache by removing expired entries. + # + # Options are passed to the underlying cache implementation. + # + # All implementations may not support this method. + def cleanup(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support cleanup") + end + + # Clears the entire cache. Be careful with this method since it could + # affect other processes if shared cache is being used. + # + # The options hash is passed to the underlying cache implementation. + # + # All implementations may not support this method. + def clear(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support clear") + end + + private + # Adds the namespace defined in the options to a pattern designed to + # match keys. Implementations that support delete_matched should call + # this method to translate a pattern that matches names into one that + # matches namespaced keys. + def key_matcher(pattern, options) # :doc: + prefix = options[:namespace].is_a?(Proc) ? options[:namespace].call : options[:namespace] + if prefix + source = pattern.source + if source.start_with?("^") + source = source[1, source.length] + else + source = ".*#{source[0, source.length]}" + end + Regexp.new("^#{Regexp.escape(prefix)}:#{source}", pattern.options) + else + pattern + end + end + + # Reads an entry from the cache implementation. Subclasses must implement + # this method. + def read_entry(key, options) + raise NotImplementedError.new + end + + # Writes an entry to the cache implementation. Subclasses must implement + # this method. + def write_entry(key, entry, options) + raise NotImplementedError.new + end + + # Reads multiple entries from the cache implementation. Subclasses MAY + # implement this method. + def read_multi_entries(names, options) + results = {} + names.each do |name| + key = normalize_key(name, options) + version = normalize_version(name, options) + entry = read_entry(key, options) + + if entry + if entry.expired? + delete_entry(key, options) + elsif entry.mismatched?(version) + # Skip mismatched versions + else + results[name] = entry.value + end + end + end + results + end + + # Writes multiple entries to the cache implementation. Subclasses MAY + # implement this method. + def write_multi_entries(hash, options) + hash.each do |key, entry| + write_entry key, entry, options + end + end + + # Deletes an entry from the cache implementation. Subclasses must + # implement this method. + def delete_entry(key, options) + raise NotImplementedError.new + end + + # Merges the default options with ones specific to a method call. + def merged_options(call_options) + if call_options + options.merge(call_options) + else + options.dup + end + end + + # Expands and namespaces the cache key. May be overridden by + # cache stores to do additional normalization. + def normalize_key(key, options = nil) + namespace_key expanded_key(key), options + end + + # Prefix the key with a namespace string: + # + # namespace_key 'foo', namespace: 'cache' + # # => 'cache:foo' + # + # With a namespace block: + # + # namespace_key 'foo', namespace: -> { 'cache' } + # # => 'cache:foo' + def namespace_key(key, options = nil) + options = merged_options(options) + namespace = options[:namespace] + + if namespace.respond_to?(:call) + namespace = namespace.call + end + + if namespace + "#{namespace}:#{key}" + else + key + end + end + + # Expands key to be a consistent string value. Invokes +cache_key+ if + # object responds to +cache_key+. Otherwise, +to_param+ method will be + # called. If the key is a Hash, then keys will be sorted alphabetically. + def expanded_key(key) + return key.cache_key.to_s if key.respond_to?(:cache_key) + + case key + when Array + if key.size > 1 + key = key.collect { |element| expanded_key(element) } + else + key = key.first + end + when Hash + key = key.sort_by { |k, _| k.to_s }.collect { |k, v| "#{k}=#{v}" } + end + + key.to_param + end + + def normalize_version(key, options = nil) + (options && options[:version].try(:to_param)) || expanded_version(key) + end + + def expanded_version(key) + case + when key.respond_to?(:cache_version) then key.cache_version.to_param + when key.is_a?(Array) then key.map { |element| expanded_version(element) }.compact.to_param + when key.respond_to?(:to_a) then expanded_version(key.to_a) + end + end + + def instrument(operation, key, options = nil) + log { "Cache #{operation}: #{normalize_key(key, options)}#{options.blank? ? "" : " (#{options.inspect})"}" } + + payload = { key: key } + payload.merge!(options) if options.is_a?(Hash) + ActiveSupport::Notifications.instrument("cache_#{operation}.active_support", payload) { yield(payload) } + end + + def log + return unless logger && logger.debug? && !silence? + logger.debug(yield) + end + + def handle_expired_entry(entry, key, options) + if entry && entry.expired? + race_ttl = options[:race_condition_ttl].to_i + if (race_ttl > 0) && (Time.now.to_f - entry.expires_at <= race_ttl) + # When an entry has a positive :race_condition_ttl defined, put the stale entry back into the cache + # for a brief period while the entry is being recalculated. + entry.expires_at = Time.now + race_ttl + write_entry(key, entry, expires_in: race_ttl * 2) + else + delete_entry(key, options) + end + entry = nil + end + entry + end + + def get_entry_value(entry, name, options) + instrument(:fetch_hit, name, options) {} + entry.value + end + + def save_block_result_to_cache(name, options) + result = instrument(:generate, name, options) do + yield(name) + end + + write(name, result, options) + result + end + end + + # This class is used to represent cache entries. Cache entries have a value, an optional + # expiration time, and an optional version. The expiration time is used to support the :race_condition_ttl option + # on the cache. The version is used to support the :version option on the cache for rejecting + # mismatches. + # + # Since cache entries in most instances will be serialized, the internals of this class are highly optimized + # using short instance variable names that are lazily defined. + class Entry # :nodoc: + attr_reader :version + + DEFAULT_COMPRESS_LIMIT = 1.kilobyte + + # Creates a new cache entry for the specified value. Options supported are + # +:compress+, +:compress_threshold+, +:version+ and +:expires_in+. + def initialize(value, compress: true, compress_threshold: DEFAULT_COMPRESS_LIMIT, version: nil, expires_in: nil, **) + @value = value + @version = version + @created_at = Time.now.to_f + @expires_in = expires_in && expires_in.to_f + + compress!(compress_threshold) if compress + end + + def value + compressed? ? uncompress(@value) : @value + end + + def mismatched?(version) + @version && version && @version != version + end + + # Checks if the entry is expired. The +expires_in+ parameter can override + # the value set when the entry was created. + def expired? + @expires_in && @created_at + @expires_in <= Time.now.to_f + end + + def expires_at + @expires_in ? @created_at + @expires_in : nil + end + + def expires_at=(value) + if value + @expires_in = value.to_f - @created_at + else + @expires_in = nil + end + end + + # Returns the size of the cached value. This could be less than + # value.size if the data is compressed. + def size + case value + when NilClass + 0 + when String + @value.bytesize + else + @s ||= Marshal.dump(@value).bytesize + end + end + + # Duplicates the value in a class. This is used by cache implementations that don't natively + # serialize entries to protect against accidental cache modifications. + def dup_value! + if @value && !compressed? && !(@value.is_a?(Numeric) || @value == true || @value == false) + if @value.is_a?(String) + @value = @value.dup + else + @value = Marshal.load(Marshal.dump(@value)) + end + end + end + + private + def compress!(compress_threshold) + case @value + when nil, true, false, Numeric + uncompressed_size = 0 + when String + uncompressed_size = @value.bytesize + else + serialized = Marshal.dump(@value) + uncompressed_size = serialized.bytesize + end + + if uncompressed_size >= compress_threshold + serialized ||= Marshal.dump(@value) + compressed = Zlib::Deflate.deflate(serialized) + + if compressed.bytesize < uncompressed_size + @value = compressed + @compressed = true + end + end + end + + def compressed? + defined?(@compressed) + end + + def uncompress(value) + Marshal.load(Zlib::Inflate.inflate(value)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb new file mode 100644 index 0000000..a0f44aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/file_store.rb @@ -0,0 +1,196 @@ +# frozen_string_literal: true + +require "active_support/core_ext/marshal" +require "active_support/core_ext/file/atomic" +require "active_support/core_ext/string/conversions" +require "uri/common" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything on the filesystem. + # + # FileStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class FileStore < Store + prepend Strategy::LocalCache + attr_reader :cache_path + + DIR_FORMATTER = "%03X" + FILENAME_MAX_SIZE = 228 # max filename size on file system is 255, minus room for timestamp and random characters appended by Tempfile (used by atomic write) + FILEPATH_MAX_SIZE = 900 # max is 1024, plus some room + EXCLUDED_DIRS = [".", ".."].freeze + GITKEEP_FILES = [".gitkeep", ".keep"].freeze + + def initialize(cache_path, options = nil) + super(options) + @cache_path = cache_path.to_s + end + + # Deletes all items from the cache. In this case it deletes all the entries in the specified + # file store directory except for .keep or .gitkeep. Be careful which directory is specified in your + # config file when using +FileStore+ because everything in that directory will be deleted. + def clear(options = nil) + root_dirs = exclude_from(cache_path, EXCLUDED_DIRS + GITKEEP_FILES) + FileUtils.rm_r(root_dirs.collect { |f| File.join(cache_path, f) }) + rescue Errno::ENOENT + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + search_dir(cache_path) do |fname| + entry = read_entry(fname, options) + delete_entry(fname, options) if entry && entry.expired? + end + end + + # Increments an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrements an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + search_dir(cache_path) do |path| + key = file_path_key(path) + delete_entry(path, options) if key.match(matcher) + end + end + end + + private + + def read_entry(key, options) + if File.exist?(key) + File.open(key) { |f| Marshal.load(f) } + end + rescue => e + logger.error("FileStoreError (#{e}): #{e.message}") if logger + nil + end + + def write_entry(key, entry, options) + return false if options[:unless_exist] && File.exist?(key) + ensure_cache_path(File.dirname(key)) + File.atomic_write(key, cache_path) { |f| Marshal.dump(entry, f) } + true + end + + def delete_entry(key, options) + if File.exist?(key) + begin + File.delete(key) + delete_empty_directories(File.dirname(key)) + true + rescue => e + # Just in case the error was caused by another process deleting the file first. + raise e if File.exist?(key) + false + end + end + end + + # Lock a file for a block so only one process can modify it at a time. + def lock_file(file_name, &block) + if File.exist?(file_name) + File.open(file_name, "r+") do |f| + begin + f.flock File::LOCK_EX + yield + ensure + f.flock File::LOCK_UN + end + end + else + yield + end + end + + # Translate a key into a file path. + def normalize_key(key, options) + key = super + fname = URI.encode_www_form_component(key) + + if fname.size > FILEPATH_MAX_SIZE + fname = ActiveSupport::Digest.hexdigest(key) + end + + hash = Zlib.adler32(fname) + hash, dir_1 = hash.divmod(0x1000) + dir_2 = hash.modulo(0x1000) + fname_paths = [] + + # Make sure file name doesn't exceed file system limits. + begin + fname_paths << fname[0, FILENAME_MAX_SIZE] + fname = fname[FILENAME_MAX_SIZE..-1] + end until fname.blank? + + File.join(cache_path, DIR_FORMATTER % dir_1, DIR_FORMATTER % dir_2, *fname_paths) + end + + # Translate a file path into a key. + def file_path_key(path) + fname = path[cache_path.to_s.size..-1].split(File::SEPARATOR, 4).last + URI.decode_www_form_component(fname, Encoding::UTF_8) + end + + # Delete empty directories in the cache. + def delete_empty_directories(dir) + return if File.realpath(dir) == File.realpath(cache_path) + if exclude_from(dir, EXCLUDED_DIRS).empty? + Dir.delete(dir) rescue nil + delete_empty_directories(File.dirname(dir)) + end + end + + # Make sure a file path's directories exist. + def ensure_cache_path(path) + FileUtils.makedirs(path) unless File.exist?(path) + end + + def search_dir(dir, &callback) + return if !File.exist?(dir) + Dir.foreach(dir) do |d| + next if EXCLUDED_DIRS.include?(d) + name = File.join(dir, d) + if File.directory?(name) + search_dir(name, &callback) + else + callback.call name + end + end + end + + # Modifies the amount of an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def modify_value(name, amount, options) + file_name = normalize_key(name, options) + + lock_file(file_name) do + options = merged_options(options) + + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + + # Exclude entries from source directory + def exclude_from(source, excludes) + Dir.entries(source).reject { |f| excludes.include?(f) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb new file mode 100644 index 0000000..22c47e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/mem_cache_store.rb @@ -0,0 +1,197 @@ +# frozen_string_literal: true + +begin + require "dalli" +rescue LoadError => e + $stderr.puts "You don't have dalli installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end + +require "active_support/core_ext/array/extract_options" + +module ActiveSupport + module Cache + # A cache store implementation which stores data in Memcached: + # https://memcached.org + # + # This is currently the most popular cache store for production websites. + # + # Special features: + # - Clustering and load balancing. One can specify multiple memcached servers, + # and MemCacheStore will load balance between all available servers. If a + # server goes down, then MemCacheStore will ignore it until it comes back up. + # + # MemCacheStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class MemCacheStore < Store + # Provide support for raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, options) + if options[:raw] && local_cache + raw_entry = Entry.new(entry.value.to_s) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, options) + else + super + end + end + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + ESCAPE_KEY_CHARS = /[\x00-\x20%\x7F-\xFF]/n + + # Creates a new Dalli::Client instance with specified addresses and options. + # By default address is equal localhost:11211. + # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache + # # => # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache('localhost:10290') + # # => # + def self.build_mem_cache(*addresses) # :nodoc: + addresses = addresses.flatten + options = addresses.extract_options! + addresses = ["localhost:11211"] if addresses.empty? + pool_options = retrieve_pool_options(options) + + if pool_options.empty? + Dalli::Client.new(addresses, options) + else + ensure_connection_pool_added! + ConnectionPool.new(pool_options) { Dalli::Client.new(addresses, options.merge(threadsafe: false)) } + end + end + + # Creates a new MemCacheStore object, with the given memcached server + # addresses. Each address is either a host name, or a host-with-port string + # in the form of "host_name:port". For example: + # + # ActiveSupport::Cache::MemCacheStore.new("localhost", "server-downstairs.localnetwork:8229") + # + # If no addresses are specified, then MemCacheStore will connect to + # localhost port 11211 (the default memcached port). + def initialize(*addresses) + addresses = addresses.flatten + options = addresses.extract_options! + super(options) + + unless [String, Dalli::Client, NilClass].include?(addresses.first.class) + raise ArgumentError, "First argument must be an empty array, an array of hosts or a Dalli::Client instance." + end + if addresses.first.is_a?(Dalli::Client) + @data = addresses.first + else + mem_cache_options = options.dup + UNIVERSAL_OPTIONS.each { |name| mem_cache_options.delete(name) } + @data = self.class.build_mem_cache(*(addresses + [mem_cache_options])) + end + end + + # Increment a cached value. This method uses the memcached incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def increment(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:increment, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.incr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Decrement a cached value. This method uses the memcached decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def decrement(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:decrement, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.decr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Clear the entire cache on all memcached servers. This method should + # be used with care when shared cache is being used. + def clear(options = nil) + rescue_error_with(nil) { @data.with { |c| c.flush_all } } + end + + # Get the statistics from the memcached servers. + def stats + @data.with { |c| c.stats } + end + + private + # Read an entry from the cache. + def read_entry(key, options) + rescue_error_with(nil) { deserialize_entry(@data.with { |c| c.get(key, options) }) } + end + + # Write an entry to the cache. + def write_entry(key, entry, options) + method = options && options[:unless_exist] ? :add : :set + value = options[:raw] ? entry.value.to_s : entry + expires_in = options[:expires_in].to_i + if expires_in > 0 && !options[:raw] + # Set the memcache expire a few minutes in the future to support race condition ttls on read + expires_in += 5.minutes + end + rescue_error_with false do + @data.with { |c| c.send(method, key, value, expires_in, options) } + end + end + + # Reads multiple entries from the cache implementation. + def read_multi_entries(names, options) + keys_to_names = Hash[names.map { |name| [normalize_key(name, options), name] }] + + raw_values = @data.with { |c| c.get_multi(keys_to_names.keys) } + values = {} + + raw_values.each do |key, value| + entry = deserialize_entry(value) + + unless entry.expired? || entry.mismatched?(normalize_version(keys_to_names[key], options)) + values[keys_to_names[key]] = entry.value + end + end + + values + end + + # Delete an entry from the cache. + def delete_entry(key, options) + rescue_error_with(false) { @data.with { |c| c.delete(key) } } + end + + # Memcache keys are binaries. So we need to force their encoding to binary + # before applying the regular expression to ensure we are escaping all + # characters properly. + def normalize_key(key, options) + key = super.dup + key = key.force_encoding(Encoding::ASCII_8BIT) + key = key.gsub(ESCAPE_KEY_CHARS) { |match| "%#{match.getbyte(0).to_s(16).upcase}" } + key = "#{key[0, 213]}:md5:#{ActiveSupport::Digest.hexdigest(key)}" if key.size > 250 + key + end + + def deserialize_entry(entry) + if entry + entry.is_a?(Entry) ? entry : Entry.new(entry) + end + end + + def rescue_error_with(fallback) + yield + rescue Dalli::DalliError => e + logger.error("DalliError (#{e}): #{e.message}") if logger + fallback + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb new file mode 100644 index 0000000..564ac17 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/memory_store.rb @@ -0,0 +1,169 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything into memory in the + # same process. If you're running multiple Ruby on Rails server processes + # (which is the case if you're using Phusion Passenger or puma clustered mode), + # then this means that Rails server process instances won't be able + # to share cache data with each other and this may not be the most + # appropriate cache in that scenario. + # + # This cache has a bounded size specified by the :size options to the + # initializer (default is 32Mb). When the cache exceeds the allotted size, + # a cleanup will occur which tries to prune the cache down to three quarters + # of the maximum size by removing the least recently used entries. + # + # MemoryStore is thread-safe. + class MemoryStore < Store + def initialize(options = nil) + options ||= {} + super(options) + @data = {} + @key_access = {} + @max_size = options[:size] || 32.megabytes + @max_prune_time = options[:max_prune_time] || 2 + @cache_size = 0 + @monitor = Monitor.new + @pruning = false + end + + # Delete all data stored in a given cache store. + def clear(options = nil) + synchronize do + @data.clear + @key_access.clear + @cache_size = 0 + end + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + instrument(:cleanup, size: @data.size) do + keys = synchronize { @data.keys } + keys.each do |key| + entry = @data[key] + delete_entry(key, options) if entry && entry.expired? + end + end + end + + # To ensure entries fit within the specified memory prune the cache by removing the least + # recently accessed entries. + def prune(target_size, max_time = nil) + return if pruning? + @pruning = true + begin + start_time = Time.now + cleanup + instrument(:prune, target_size, from: @cache_size) do + keys = synchronize { @key_access.keys.sort { |a, b| @key_access[a].to_f <=> @key_access[b].to_f } } + keys.each do |key| + delete_entry(key, options) + return if @cache_size <= target_size || (max_time && Time.now - start_time > max_time) + end + end + ensure + @pruning = false + end + end + + # Returns true if the cache is currently being pruned. + def pruning? + @pruning + end + + # Increment an integer value in the cache. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrement an integer value in the cache. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + # Deletes cache entries if the cache key matches a given pattern. + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + keys = synchronize { @data.keys } + keys.each do |key| + delete_entry(key, options) if key.match(matcher) + end + end + end + + def inspect # :nodoc: + "<##{self.class.name} entries=#{@data.size}, size=#{@cache_size}, options=#{@options.inspect}>" + end + + # Synchronize calls to the cache. This should be called wherever the underlying cache implementation + # is not thread safe. + def synchronize(&block) # :nodoc: + @monitor.synchronize(&block) + end + + private + + PER_ENTRY_OVERHEAD = 240 + + def cached_size(key, entry) + key.to_s.bytesize + entry.size + PER_ENTRY_OVERHEAD + end + + def read_entry(key, options) + entry = @data[key] + synchronize do + if entry + @key_access[key] = Time.now.to_f + else + @key_access.delete(key) + end + end + entry + end + + def write_entry(key, entry, options) + entry.dup_value! + synchronize do + old_entry = @data[key] + return false if @data.key?(key) && options[:unless_exist] + if old_entry + @cache_size -= (old_entry.size - entry.size) + else + @cache_size += cached_size(key, entry) + end + @key_access[key] = Time.now.to_f + @data[key] = entry + prune(@max_size * 0.75, @max_prune_time) if @cache_size > @max_size + true + end + end + + def delete_entry(key, options) + synchronize do + @key_access.delete(key) + entry = @data.delete(key) + @cache_size -= cached_size(key, entry) if entry + !!entry + end + end + + def modify_value(name, amount, options) + synchronize do + options = merged_options(options) + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb new file mode 100644 index 0000000..1a5983d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/null_store.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module ActiveSupport + module Cache + # A cache store implementation which doesn't actually store anything. Useful in + # development and test environments where you don't want caching turned on but + # need to go through the caching interface. + # + # This cache does implement the local cache strategy, so values will actually + # be cached inside blocks that utilize this strategy. See + # ActiveSupport::Cache::Strategy::LocalCache for more details. + class NullStore < Store + prepend Strategy::LocalCache + + def clear(options = nil) + end + + def cleanup(options = nil) + end + + def increment(name, amount = 1, options = nil) + end + + def decrement(name, amount = 1, options = nil) + end + + def delete_matched(matcher, options = nil) + end + + private + def read_entry(key, options) + end + + def write_entry(key, entry, options) + true + end + + def delete_entry(key, options) + false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb new file mode 100644 index 0000000..825fc9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/redis_cache_store.rb @@ -0,0 +1,466 @@ +# frozen_string_literal: true + +begin + gem "redis", ">= 4.0.1" + require "redis" + require "redis/distributed" +rescue LoadError + warn "The Redis cache store requires the redis gem, version 4.0.1 or later. Please add it to your Gemfile: `gem \"redis\", \"~> 4.0\"`" + raise +end + +# Prefer the hiredis driver but don't require it. +begin + require "redis/connection/hiredis" +rescue LoadError +end + +require "digest/sha2" +require "active_support/core_ext/marshal" +require "active_support/core_ext/hash/transform_values" + +module ActiveSupport + module Cache + module ConnectionPoolLike + def with + yield self + end + end + + ::Redis.include(ConnectionPoolLike) + ::Redis::Distributed.include(ConnectionPoolLike) + + # Redis cache store. + # + # Deployment note: Take care to use a *dedicated Redis cache* rather + # than pointing this at your existing Redis server. It won't cope well + # with mixed usage patterns and it won't expire cache entries by default. + # + # Redis cache server setup guide: https://redis.io/topics/lru-cache + # + # * Supports vanilla Redis, hiredis, and Redis::Distributed. + # * Supports Memcached-like sharding across Redises with Redis::Distributed. + # * Fault tolerant. If the Redis server is unavailable, no exceptions are + # raised. Cache fetches are all misses and writes are dropped. + # * Local cache. Hot in-memory primary cache within block/middleware scope. + # * +read_multi+ and +write_multi+ support for Redis mget/mset. Use Redis::Distributed + # 4.0.1+ for distributed mget support. + # * +delete_matched+ support for Redis KEYS globs. + class RedisCacheStore < Store + # Keys are truncated with their own SHA2 digest if they exceed 1kB + MAX_KEY_BYTESIZE = 1024 + + DEFAULT_REDIS_OPTIONS = { + connect_timeout: 20, + read_timeout: 1, + write_timeout: 1, + reconnect_attempts: 0, + } + + DEFAULT_ERROR_HANDLER = -> (method:, returning:, exception:) do + if logger + logger.error { "RedisCacheStore: #{method} failed, returned #{returning.inspect}: #{exception.class}: #{exception.message}" } + end + end + + # The maximum number of entries to receive per SCAN call. + SCAN_BATCH_SIZE = 1000 + private_constant :SCAN_BATCH_SIZE + + # Support raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, options) + if options[:raw] && local_cache + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, options) + else + super + end + end + + def write_multi_entries(entries, options) + if options[:raw] && local_cache + raw_entries = entries.map do |key, entry| + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + end.to_h + + super(raw_entries, options) + else + super + end + end + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + class << self + # Factory method to create a new Redis instance. + # + # Handles four options: :redis block, :redis instance, single :url + # string, and multiple :url strings. + # + # Option Class Result + # :redis Proc -> options[:redis].call + # :redis Object -> options[:redis] + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + def build_redis(redis: nil, url: nil, **redis_options) #:nodoc: + urls = Array(url) + + if redis.is_a?(Proc) + redis.call + elsif redis + redis + elsif urls.size > 1 + build_redis_distributed_client urls: urls, **redis_options + else + build_redis_client url: urls.first, **redis_options + end + end + + private + def build_redis_distributed_client(urls:, **redis_options) + ::Redis::Distributed.new([], DEFAULT_REDIS_OPTIONS.merge(redis_options)).tap do |dist| + urls.each { |u| dist.add_node url: u } + end + end + + def build_redis_client(url:, **redis_options) + ::Redis.new DEFAULT_REDIS_OPTIONS.merge(redis_options.merge(url: url)) + end + end + + attr_reader :redis_options + attr_reader :max_key_bytesize + + # Creates a new Redis cache store. + # + # Handles three options: block provided to instantiate, single URL + # provided, and multiple URLs provided. + # + # :redis Proc -> options[:redis].call + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + # No namespace is set by default. Provide one if the Redis cache + # server is shared with other apps: namespace: 'myapp-cache'. + # + # Compression is enabled by default with a 1kB threshold, so cached + # values larger than 1kB are automatically compressed. Disable by + # passing compress: false or change the threshold by passing + # compress_threshold: 4.kilobytes. + # + # No expiry is set on cache entries by default. Redis is expected to + # be configured with an eviction policy that automatically deletes + # least-recently or -frequently used keys when it reaches max memory. + # See https://redis.io/topics/lru-cache for cache server setup. + # + # Race condition TTL is not set by default. This can be used to avoid + # "thundering herd" cache writes when hot cache entries are expired. + # See ActiveSupport::Cache::Store#fetch for more. + def initialize(namespace: nil, compress: true, compress_threshold: 1.kilobyte, expires_in: nil, race_condition_ttl: nil, error_handler: DEFAULT_ERROR_HANDLER, **redis_options) + @redis_options = redis_options + + @max_key_bytesize = MAX_KEY_BYTESIZE + @error_handler = error_handler + + super namespace: namespace, + compress: compress, compress_threshold: compress_threshold, + expires_in: expires_in, race_condition_ttl: race_condition_ttl + end + + def redis + @redis ||= begin + pool_options = self.class.send(:retrieve_pool_options, redis_options) + + if pool_options.any? + self.class.send(:ensure_connection_pool_added!) + ::ConnectionPool.new(pool_options) { self.class.build_redis(**redis_options) } + else + self.class.build_redis(**redis_options) + end + end + end + + def inspect + instance = @redis || @redis_options + "<##{self.class} options=#{options.inspect} redis=#{instance.inspect}>" + end + + # Cache Store API implementation. + # + # Read multiple values at once. Returns a hash of requested keys -> + # fetched values. + def read_multi(*names) + if mget_capable? + instrument(:read_multi, names, options) do |payload| + read_multi_mget(*names).tap do |results| + payload[:hits] = results.keys + end + end + else + super + end + end + + # Cache Store API implementation. + # + # Supports Redis KEYS glob patterns: + # + # h?llo matches hello, hallo and hxllo + # h*llo matches hllo and heeeello + # h[ae]llo matches hello and hallo, but not hillo + # h[^e]llo matches hallo, hbllo, ... but not hello + # h[a-b]llo matches hallo and hbllo + # + # Use \ to escape special characters if you want to match them verbatim. + # + # See https://redis.io/commands/KEYS for more. + # + # Failsafe: Raises errors. + def delete_matched(matcher, options = nil) + instrument :delete_matched, matcher do + unless String === matcher + raise ArgumentError, "Only Redis glob strings are supported: #{matcher.inspect}" + end + redis.with do |c| + pattern = namespace_key(matcher, options) + cursor = "0" + # Fetch keys in batches using SCAN to avoid blocking the Redis server. + begin + cursor, keys = c.scan(cursor, match: pattern, count: SCAN_BATCH_SIZE) + c.del(*keys) unless keys.empty? + end until cursor == "0" + end + end + end + + # Cache Store API implementation. + # + # Increment a cached value. This method uses the Redis incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def increment(name, amount = 1, options = nil) + instrument :increment, name, amount: amount do + failsafe :increment do + redis.with { |c| c.incrby normalize_key(name, options), amount } + end + end + end + + # Cache Store API implementation. + # + # Decrement a cached value. This method uses the Redis decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def decrement(name, amount = 1, options = nil) + instrument :decrement, name, amount: amount do + failsafe :decrement do + redis.with { |c| c.decrby normalize_key(name, options), amount } + end + end + end + + # Cache Store API implementation. + # + # Removes expired entries. Handled natively by Redis least-recently-/ + # least-frequently-used expiry, so manual cleanup is not supported. + def cleanup(options = nil) + super + end + + # Clear the entire cache on all Redis servers. Safe to use on + # shared servers if the cache is namespaced. + # + # Failsafe: Raises errors. + def clear(options = nil) + failsafe :clear do + if namespace = merged_options(options)[:namespace] + delete_matched "*", namespace: namespace + else + redis.with { |c| c.flushdb } + end + end + end + + def mget_capable? #:nodoc: + set_redis_capabilities unless defined? @mget_capable + @mget_capable + end + + def mset_capable? #:nodoc: + set_redis_capabilities unless defined? @mset_capable + @mset_capable + end + + private + def set_redis_capabilities + case redis + when Redis::Distributed + @mget_capable = true + @mset_capable = false + else + @mget_capable = true + @mset_capable = true + end + end + + # Store provider interface: + # Read an entry from the cache. + def read_entry(key, options = nil) + failsafe :read_entry do + raw = options&.fetch(:raw, false) + deserialize_entry(redis.with { |c| c.get(key) }, raw: raw) + end + end + + def read_multi_entries(names, _options) + if mget_capable? + read_multi_mget(*names) + else + super + end + end + + def read_multi_mget(*names) + options = names.extract_options! + options = merged_options(options) + raw = options&.fetch(:raw, false) + + keys = names.map { |name| normalize_key(name, options) } + + values = failsafe(:read_multi_mget, returning: {}) do + redis.with { |c| c.mget(*keys) } + end + + names.zip(values).each_with_object({}) do |(name, value), results| + if value + entry = deserialize_entry(value, raw: raw) + unless entry.nil? || entry.expired? || entry.mismatched?(normalize_version(name, options)) + results[name] = entry.value + end + end + end + end + + # Write an entry to the cache. + # + # Requires Redis 2.6.12+ for extended SET options. + def write_entry(key, entry, unless_exist: false, raw: false, expires_in: nil, race_condition_ttl: nil, **options) + serialized_entry = serialize_entry(entry, raw: raw) + + # If race condition TTL is in use, ensure that cache entries + # stick around a bit longer after they would have expired + # so we can purposefully serve stale entries. + if race_condition_ttl && expires_in && expires_in > 0 && !raw + expires_in += 5.minutes + end + + failsafe :write_entry, returning: false do + if unless_exist || expires_in + modifiers = {} + modifiers[:nx] = unless_exist + modifiers[:px] = (1000 * expires_in.to_f).ceil if expires_in + + redis.with { |c| c.set key, serialized_entry, modifiers } + else + redis.with { |c| c.set key, serialized_entry } + end + end + end + + # Delete an entry from the cache. + def delete_entry(key, options) + failsafe :delete_entry, returning: false do + redis.with { |c| c.del key } + end + end + + # Nonstandard store provider API to write multiple values at once. + def write_multi_entries(entries, expires_in: nil, **options) + if entries.any? + if mset_capable? && expires_in.nil? + failsafe :write_multi_entries do + redis.with { |c| c.mapped_mset(serialize_entries(entries, raw: options[:raw])) } + end + else + super + end + end + end + + # Truncate keys that exceed 1kB. + def normalize_key(key, options) + truncate_key super.b + end + + def truncate_key(key) + if key.bytesize > max_key_bytesize + suffix = ":sha2:#{::Digest::SHA2.hexdigest(key)}" + truncate_at = max_key_bytesize - suffix.bytesize + "#{key.byteslice(0, truncate_at)}#{suffix}" + else + key + end + end + + def deserialize_entry(serialized_entry, raw:) + if serialized_entry + entry = Marshal.load(serialized_entry) rescue serialized_entry + + written_raw = serialized_entry.equal?(entry) + if raw != written_raw + ActiveSupport::Deprecation.warn(<<-MSG.squish) + Using a different value for the raw option when reading and writing + to a cache key is deprecated for :redis_cache_store and Rails 6.0 + will stop automatically detecting the format when reading to avoid + marshal loading untrusted raw strings. + MSG + end + + entry.is_a?(Entry) ? entry : Entry.new(entry) + end + end + + def serialize_entry(entry, raw: false) + if raw + entry.value.to_s + else + Marshal.dump(entry) + end + end + + def serialize_entries(entries, raw: false) + entries.transform_values do |entry| + serialize_entry entry, raw: raw + end + end + + def failsafe(method, returning: nil) + yield + rescue ::Redis::BaseConnectionError => e + handle_exception exception: e, method: method, returning: returning + returning + end + + def handle_exception(exception:, method:, returning:) + if @error_handler + @error_handler.(method: method, exception: exception, returning: returning) + end + rescue => failsafe + warn "RedisCacheStore ignored exception in handle_exception: #{failsafe.class}: #{failsafe.message}\n #{failsafe.backtrace.join("\n ")}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb new file mode 100644 index 0000000..39b32fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache.rb @@ -0,0 +1,194 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" +require "active_support/core_ext/string/inflections" +require "active_support/per_thread_registry" + +module ActiveSupport + module Cache + module Strategy + # Caches that implement LocalCache will be backed by an in-memory cache for the + # duration of a block. Repeated calls to the cache for the same key will hit the + # in-memory cache for faster access. + module LocalCache + autoload :Middleware, "active_support/cache/strategy/local_cache_middleware" + + # Class for storing and registering the local caches. + class LocalCacheRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def cache_for(local_cache_key) + @registry[local_cache_key] + end + + def set_cache_for(local_cache_key, value) + @registry[local_cache_key] = value + end + + def self.set_cache_for(l, v); instance.set_cache_for l, v; end + def self.cache_for(l); instance.cache_for l; end + end + + # Simple memory backed cache. This cache is not thread safe and is intended only + # for serving as a temporary memory cache for a single thread. + class LocalStore < Store + def initialize + super + @data = {} + end + + # Don't allow synchronizing since it isn't thread safe. + def synchronize # :nodoc: + yield + end + + def clear(options = nil) + @data.clear + end + + def read_entry(key, options) + @data[key] + end + + def read_multi_entries(keys, options) + values = {} + + keys.each do |name| + entry = read_entry(name, options) + values[name] = entry.value if entry + end + + values + end + + def write_entry(key, value, options) + @data[key] = value + true + end + + def delete_entry(key, options) + !!@data.delete(key) + end + + def fetch_entry(key, options = nil) # :nodoc: + @data.fetch(key) { @data[key] = yield } + end + end + + # Use a local cache for the duration of block. + def with_local_cache + use_temporary_local_cache(LocalStore.new) { yield } + end + + # Middleware class can be inserted as a Rack handler to be local cache for the + # duration of request. + def middleware + @middleware ||= Middleware.new( + "ActiveSupport::Cache::Strategy::LocalCache", + local_cache_key) + end + + def clear(options = nil) # :nodoc: + return super unless cache = local_cache + cache.clear(options) + super + end + + def cleanup(options = nil) # :nodoc: + return super unless cache = local_cache + cache.clear + super + end + + def increment(name, amount = 1, options = nil) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, options) + value + end + + def decrement(name, amount = 1, options = nil) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, options) + value + end + + private + def read_entry(key, options) + if cache = local_cache + cache.fetch_entry(key) { super } + else + super + end + end + + def read_multi_entries(keys, options) + return super unless local_cache + + local_entries = local_cache.read_multi_entries(keys, options) + missed_keys = keys - local_entries.keys + + if missed_keys.any? + local_entries.merge!(super(missed_keys, options)) + else + local_entries + end + end + + def write_entry(key, entry, options) + if options[:unless_exist] + local_cache.delete_entry(key, options) if local_cache + else + local_cache.write_entry(key, entry, options) if local_cache + end + + super + end + + def delete_entry(key, options) + local_cache.delete_entry(key, options) if local_cache + super + end + + def write_cache_value(name, value, options) + name = normalize_key(name, options) + cache = local_cache + cache.mute do + if value + cache.write(name, value, options) + else + cache.delete(name, options) + end + end + end + + def local_cache_key + @local_cache_key ||= "#{self.class.name.underscore}_local_cache_#{object_id}".gsub(/[\/-]/, "_").to_sym + end + + def local_cache + LocalCacheRegistry.cache_for(local_cache_key) + end + + def bypass_local_cache + use_temporary_local_cache(nil) { yield } + end + + def use_temporary_local_cache(temporary_cache) + save_cache = LocalCacheRegistry.cache_for(local_cache_key) + begin + LocalCacheRegistry.set_cache_for(local_cache_key, temporary_cache) + yield + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, save_cache) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb new file mode 100644 index 0000000..62542bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/cache/strategy/local_cache_middleware.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "rack/body_proxy" +require "rack/utils" + +module ActiveSupport + module Cache + module Strategy + module LocalCache + #-- + # This class wraps up local storage for middlewares. Only the middleware method should + # construct them. + class Middleware # :nodoc: + attr_reader :name, :local_cache_key + + def initialize(name, local_cache_key) + @name = name + @local_cache_key = local_cache_key + @app = nil + end + + def new(app) + @app = app + self + end + + def call(env) + LocalCacheRegistry.set_cache_for(local_cache_key, LocalStore.new) + response = @app.call(env) + response[2] = ::Rack::BodyProxy.new(response[2]) do + LocalCacheRegistry.set_cache_for(local_cache_key, nil) + end + cleanup_on_body_close = true + response + rescue Rack::Utils::InvalidParameterError + [400, {}, []] + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, nil) unless + cleanup_on_body_close + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb new file mode 100644 index 0000000..9a3728d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/callbacks.rb @@ -0,0 +1,845 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/descendants_tracker" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/string/filters" +require "active_support/deprecation" +require "thread" + +module ActiveSupport + # Callbacks are code hooks that are run at key points in an object's life cycle. + # The typical use case is to have a base class define a set of callbacks + # relevant to the other functionality it supplies, so that subclasses can + # install callbacks that enhance or modify the base functionality without + # needing to override or redefine methods of the base class. + # + # Mixing in this module allows you to define the events in the object's + # life cycle that will support callbacks (via +ClassMethods.define_callbacks+), + # set the instance methods, procs, or callback objects to be called (via + # +ClassMethods.set_callback+), and run the installed callbacks at the + # appropriate times (via +run_callbacks+). + # + # Three kinds of callbacks are supported: before callbacks, run before a + # certain event; after callbacks, run after the event; and around callbacks, + # blocks that surround the event, triggering it when they yield. Callback code + # can be contained in instance methods, procs or lambdas, or callback objects + # that respond to certain predetermined methods. See +ClassMethods.set_callback+ + # for details. + # + # class Record + # include ActiveSupport::Callbacks + # define_callbacks :save + # + # def save + # run_callbacks :save do + # puts "- save" + # end + # end + # end + # + # class PersonRecord < Record + # set_callback :save, :before, :saving_message + # def saving_message + # puts "saving..." + # end + # + # set_callback :save, :after do |object| + # puts "saved" + # end + # end + # + # person = PersonRecord.new + # person.save + # + # Output: + # saving... + # - save + # saved + module Callbacks + extend Concern + + included do + extend ActiveSupport::DescendantsTracker + class_attribute :__callbacks, instance_writer: false, default: {} + end + + CALLBACK_FILTER_TYPES = [:before, :after, :around] + + # Runs the callbacks for the given event. + # + # Calls the before and around callbacks in the order they were set, yields + # the block (if given one), and then runs the after callbacks in reverse + # order. + # + # If the callback chain was halted, returns +false+. Otherwise returns the + # result of the block, +nil+ if no callbacks have been set, or +true+ + # if callbacks have been set but no block is given. + # + # run_callbacks :save do + # save + # end + # + #-- + # + # As this method is used in many places, and often wraps large portions of + # user code, it has an additional design goal of minimizing its impact on + # the visible call stack. An exception from inside a :before or :after + # callback can be as noisy as it likes -- but when control has passed + # smoothly through and into the supplied block, we want as little evidence + # as possible that we were here. + def run_callbacks(kind) + callbacks = __callbacks[kind.to_sym] + + if callbacks.empty? + yield if block_given? + else + env = Filters::Environment.new(self, false, nil) + next_sequence = callbacks.compile + + invoke_sequence = Proc.new do + skipped = nil + while true + current = next_sequence + current.invoke_before(env) + if current.final? + env.value = !env.halted && (!block_given? || yield) + elsif current.skip?(env) + (skipped ||= []) << current + next_sequence = next_sequence.nested + next + else + next_sequence = next_sequence.nested + begin + target, block, method, *arguments = current.expand_call_template(env, invoke_sequence) + target.send(method, *arguments, &block) + ensure + next_sequence = current + end + end + current.invoke_after(env) + skipped.pop.invoke_after(env) while skipped && skipped.first + break env.value + end + end + + # Common case: no 'around' callbacks defined + if next_sequence.final? + next_sequence.invoke_before(env) + env.value = !env.halted && (!block_given? || yield) + next_sequence.invoke_after(env) + env.value + else + invoke_sequence.call + end + end + end + + private + + # A hook invoked every time a before callback is halted. + # This can be overridden in ActiveSupport::Callbacks implementors in order + # to provide better debugging/logging. + def halted_callback_hook(filter) + end + + module Conditionals # :nodoc: + class Value + def initialize(&block) + @block = block + end + def call(target, value); @block.call(value); end + end + end + + module Filters + Environment = Struct.new(:target, :halted, :value) + + class Before + def self.build(callback_sequence, user_callback, user_conditions, chain_config, filter) + halted_lambda = chain_config[:terminator] + + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter) + else + halting(callback_sequence, user_callback, halted_lambda, filter) + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + if env.halted + target.send :halted_callback_hook, filter + end + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback, halted_lambda, filter) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + unless halted + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + + if env.halted + target.send :halted_callback_hook, filter + end + end + + env + end + end + private_class_method :halting + end + + class After + def self.build(callback_sequence, user_callback, user_conditions, chain_config) + if chain_config[:skip_after_callbacks_if_terminated] + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions) + else + halting(callback_sequence, user_callback) + end + else + if user_conditions.any? + conditional callback_sequence, user_callback, user_conditions + else + simple callback_sequence, user_callback + end + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback) + callback_sequence.after do |env| + unless env.halted + user_callback.call env.target, env.value + end + + env + end + end + private_class_method :halting + + def self.conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + + if user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :conditional + + def self.simple(callback_sequence, user_callback) + callback_sequence.after do |env| + user_callback.call env.target, env.value + + env + end + end + private_class_method :simple + end + end + + class Callback #:nodoc:# + def self.build(chain, filter, kind, options) + if filter.is_a?(String) + raise ArgumentError, <<-MSG.squish + Passing string to define a callback is not supported. See the `.set_callback` + documentation to see supported values. + MSG + end + + new chain.name, filter, kind, options, chain.config + end + + attr_accessor :kind, :name + attr_reader :chain_config + + def initialize(name, filter, kind, options, chain_config) + @chain_config = chain_config + @name = name + @kind = kind + @filter = filter + @key = compute_identifier filter + @if = check_conditionals(Array(options[:if])) + @unless = check_conditionals(Array(options[:unless])) + end + + def filter; @key; end + def raw_filter; @filter; end + + def merge_conditional_options(chain, if_option:, unless_option:) + options = { + if: @if.dup, + unless: @unless.dup + } + + options[:if].concat Array(unless_option) + options[:unless].concat Array(if_option) + + self.class.build chain, @filter, @kind, options + end + + def matches?(_kind, _filter) + @kind == _kind && filter == _filter + end + + def duplicates?(other) + case @filter + when Symbol + matches?(other.kind, other.filter) + else + false + end + end + + # Wraps code with filter + def apply(callback_sequence) + user_conditions = conditions_lambdas + user_callback = CallTemplate.build(@filter, self) + + case kind + when :before + Filters::Before.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config, @filter) + when :after + Filters::After.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config) + when :around + callback_sequence.around(user_callback, user_conditions) + end + end + + def current_scopes + Array(chain_config[:scope]).map { |s| public_send(s) } + end + + private + def check_conditionals(conditionals) + if conditionals.any? { |c| c.is_a?(String) } + raise ArgumentError, <<-MSG.squish + Passing string to be evaluated in :if and :unless conditional + options is not supported. Pass a symbol for an instance method, + or a lambda, proc or block, instead. + MSG + end + + conditionals + end + + def compute_identifier(filter) + case filter + when ::Proc + filter.object_id + else + filter + end + end + + def conditions_lambdas + @if.map { |c| CallTemplate.build(c, self).make_lambda } + + @unless.map { |c| CallTemplate.build(c, self).inverted_lambda } + end + end + + # A future invocation of user-supplied code (either as a callback, + # or a condition filter). + class CallTemplate # :nodoc: + def initialize(target, method, arguments, block) + @override_target = target + @method_name = method + @arguments = arguments + @override_block = block + end + + # Return the parts needed to make this call, with the given + # input values. + # + # Returns an array of the form: + # + # [target, block, method, *arguments] + # + # This array can be used as such: + # + # target.send(method, *arguments, &block) + # + # The actual invocation is left up to the caller to minimize + # call stack pollution. + def expand(target, value, block) + result = @arguments.map { |arg| + case arg + when :value; value + when :target; target + when :block; block || raise(ArgumentError) + end + } + + result.unshift @method_name + result.unshift @override_block || block + result.unshift @override_target || target + + # target, block, method, *arguments = result + # target.send(method, *arguments, &block) + result + end + + # Return a lambda that will make this call when given the input + # values. + def make_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + target.send(method, *arguments, &block) + end + end + + # Return a lambda that will make this call when given the input + # values, but then return the boolean inverse of that result. + def inverted_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + ! target.send(method, *arguments, &block) + end + end + + # Filters support: + # + # Symbols:: A method to call. + # Procs:: A proc to call with the object. + # Objects:: An object with a before_foo method on it to call. + # + # All of these objects are converted into a CallTemplate and handled + # the same after this point. + def self.build(filter, callback) + case filter + when Symbol + new(nil, filter, [], nil) + when Conditionals::Value + new(filter, :call, [:target, :value], nil) + when ::Proc + if filter.arity > 1 + new(nil, :instance_exec, [:target, :block], filter) + elsif filter.arity > 0 + new(nil, :instance_exec, [:target], filter) + else + new(nil, :instance_exec, [], filter) + end + else + method_to_call = callback.current_scopes.join("_") + + new(filter, method_to_call, [:target], nil) + end + end + end + + # Execute before and after filters in a sequence instead of + # chaining them with nested lambda calls, see: + # https://github.com/rails/rails/issues/18011 + class CallbackSequence # :nodoc: + def initialize(nested = nil, call_template = nil, user_conditions = nil) + @nested = nested + @call_template = call_template + @user_conditions = user_conditions + + @before = [] + @after = [] + end + + def before(&before) + @before.unshift(before) + self + end + + def after(&after) + @after.push(after) + self + end + + def around(call_template, user_conditions) + CallbackSequence.new(self, call_template, user_conditions) + end + + def skip?(arg) + arg.halted || !@user_conditions.all? { |c| c.call(arg.target, arg.value) } + end + + def nested + @nested + end + + def final? + !@call_template + end + + def expand_call_template(arg, block) + @call_template.expand(arg.target, arg.value, block) + end + + def invoke_before(arg) + @before.each { |b| b.call(arg) } + end + + def invoke_after(arg) + @after.each { |a| a.call(arg) } + end + end + + class CallbackChain #:nodoc:# + include Enumerable + + attr_reader :name, :config + + def initialize(name, config) + @name = name + @config = { + scope: [:kind], + terminator: default_terminator + }.merge!(config) + @chain = [] + @callbacks = nil + @mutex = Mutex.new + end + + def each(&block); @chain.each(&block); end + def index(o); @chain.index(o); end + def empty?; @chain.empty?; end + + def insert(index, o) + @callbacks = nil + @chain.insert(index, o) + end + + def delete(o) + @callbacks = nil + @chain.delete(o) + end + + def clear + @callbacks = nil + @chain.clear + self + end + + def initialize_copy(other) + @callbacks = nil + @chain = other.chain.dup + @mutex = Mutex.new + end + + def compile + @callbacks || @mutex.synchronize do + final_sequence = CallbackSequence.new + @callbacks ||= @chain.reverse.inject(final_sequence) do |callback_sequence, callback| + callback.apply callback_sequence + end + end + end + + def append(*callbacks) + callbacks.each { |c| append_one(c) } + end + + def prepend(*callbacks) + callbacks.each { |c| prepend_one(c) } + end + + protected + def chain; @chain; end + + private + + def append_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.push(callback) + end + + def prepend_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.unshift(callback) + end + + def remove_duplicates(callback) + @callbacks = nil + @chain.delete_if { |c| callback.duplicates?(c) } + end + + def default_terminator + Proc.new do |target, result_lambda| + terminate = true + catch(:abort) do + result_lambda.call + terminate = false + end + terminate + end + end + end + + module ClassMethods + def normalize_callback_params(filters, block) # :nodoc: + type = CALLBACK_FILTER_TYPES.include?(filters.first) ? filters.shift : :before + options = filters.extract_options! + filters.unshift(block) if block + [type, filters, options.dup] + end + + # This is used internally to append, prepend and skip callbacks to the + # CallbackChain. + def __update_callbacks(name) #:nodoc: + ([self] + ActiveSupport::DescendantsTracker.descendants(self)).reverse_each do |target| + chain = target.get_callbacks name + yield target, chain.dup + end + end + + # Install a callback for the given event. + # + # set_callback :save, :before, :before_method + # set_callback :save, :after, :after_method, if: :condition + # set_callback :save, :around, ->(r, block) { stuff; result = block.call; stuff } + # + # The second argument indicates whether the callback is to be run +:before+, + # +:after+, or +:around+ the event. If omitted, +:before+ is assumed. This + # means the first example above can also be written as: + # + # set_callback :save, :before_method + # + # The callback can be specified as a symbol naming an instance method; as a + # proc, lambda, or block; or as an object that responds to a certain method + # determined by the :scope argument to +define_callbacks+. + # + # If a proc, lambda, or block is given, its body is evaluated in the context + # of the current object. It can also optionally accept the current object as + # an argument. + # + # Before and around callbacks are called in the order that they are set; + # after callbacks are called in the reverse order. + # + # Around callbacks can access the return value from the event, if it + # wasn't halted, from the +yield+ call. + # + # ===== Options + # + # * :if - A symbol or an array of symbols, each naming an instance + # method or a proc; the callback will be called only when they all return + # a true value. + # * :unless - A symbol or an array of symbols, each naming an + # instance method or a proc; the callback will be called only when they + # all return a false value. + # * :prepend - If +true+, the callback will be prepended to the + # existing chain rather than appended. + def set_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + self_chain = get_callbacks name + mapped = filters.map do |filter| + Callback.build(self_chain, filter, type, options) + end + + __update_callbacks(name) do |target, chain| + options[:prepend] ? chain.prepend(*mapped) : chain.append(*mapped) + target.set_callbacks name, chain + end + end + + # Skip a previously set callback. Like +set_callback+, :if or + # :unless options may be passed in order to control when the + # callback is skipped. + # + # class Writer < Person + # skip_callback :validate, :before, :check_membership, if: -> { age > 18 } + # end + # + # An ArgumentError will be raised if the callback has not + # already been set (unless the :raise option is set to false). + def skip_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + options[:raise] = true unless options.key?(:raise) + + __update_callbacks(name) do |target, chain| + filters.each do |filter| + callback = chain.find { |c| c.matches?(type, filter) } + + if !callback && options[:raise] + raise ArgumentError, "#{type.to_s.capitalize} #{name} callback #{filter.inspect} has not been defined" + end + + if callback && (options.key?(:if) || options.key?(:unless)) + new_callback = callback.merge_conditional_options(chain, if_option: options[:if], unless_option: options[:unless]) + chain.insert(chain.index(callback), new_callback) + end + + chain.delete(callback) + end + target.set_callbacks name, chain + end + end + + # Remove all set callbacks for the given event. + def reset_callbacks(name) + callbacks = get_callbacks name + + ActiveSupport::DescendantsTracker.descendants(self).each do |target| + chain = target.get_callbacks(name).dup + callbacks.each { |c| chain.delete(c) } + target.set_callbacks name, chain + end + + set_callbacks(name, callbacks.dup.clear) + end + + # Define sets of events in the object life cycle that support callbacks. + # + # define_callbacks :validate + # define_callbacks :initialize, :save, :destroy + # + # ===== Options + # + # * :terminator - Determines when a before filter will halt the + # callback chain, preventing following before and around callbacks from + # being called and the event from being triggered. + # This should be a lambda to be executed. + # The current object and the result lambda of the callback will be provided + # to the terminator lambda. + # + # define_callbacks :validate, terminator: ->(target, result_lambda) { result_lambda.call == false } + # + # In this example, if any before validate callbacks returns +false+, + # any successive before and around callback is not executed. + # + # The default terminator halts the chain when a callback throws +:abort+. + # + # * :skip_after_callbacks_if_terminated - Determines if after + # callbacks should be terminated by the :terminator option. By + # default after callbacks are executed no matter if callback chain was + # terminated or not. This option has no effect if :terminator + # option is set to +nil+. + # + # * :scope - Indicates which methods should be executed when an + # object is used as a callback. + # + # class Audit + # def before(caller) + # puts 'Audit: before is called' + # end + # + # def before_save(caller) + # puts 'Audit: before_save is called' + # end + # end + # + # class Account + # include ActiveSupport::Callbacks + # + # define_callbacks :save + # set_callback :save, :before, Audit.new + # + # def save + # run_callbacks :save do + # puts 'save in main' + # end + # end + # end + # + # In the above case whenever you save an account the method + # Audit#before will be called. On the other hand + # + # define_callbacks :save, scope: [:kind, :name] + # + # would trigger Audit#before_save instead. That's constructed + # by calling #{kind}_#{name} on the given instance. In this + # case "kind" is "before" and "name" is "save". In this context +:kind+ + # and +:name+ have special meanings: +:kind+ refers to the kind of + # callback (before/after/around) and +:name+ refers to the method on + # which callbacks are being defined. + # + # A declaration like + # + # define_callbacks :save, scope: [:name] + # + # would call Audit#save. + # + # ===== Notes + # + # +names+ passed to +define_callbacks+ must not end with + # !, ? or =. + # + # Calling +define_callbacks+ multiple times with the same +names+ will + # overwrite previous callbacks registered with +set_callback+. + def define_callbacks(*names) + options = names.extract_options! + + names.each do |name| + name = name.to_sym + + set_callbacks name, CallbackChain.new(name, options) + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def _run_#{name}_callbacks(&block) + run_callbacks #{name.inspect}, &block + end + + def self._#{name}_callbacks + get_callbacks(#{name.inspect}) + end + + def self._#{name}_callbacks=(value) + set_callbacks(#{name.inspect}, value) + end + + def _#{name}_callbacks + __callbacks[#{name.inspect}] + end + RUBY + end + end + + protected + + def get_callbacks(name) # :nodoc: + __callbacks[name.to_sym] + end + + def set_callbacks(name, callbacks) # :nodoc: + self.__callbacks = __callbacks.merge(name.to_sym => callbacks) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb new file mode 100644 index 0000000..5d356a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concern.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +module ActiveSupport + # A typical module looks like this: + # + # module M + # def self.included(base) + # base.extend ClassMethods + # base.class_eval do + # scope :disabled, -> { where(disabled: true) } + # end + # end + # + # module ClassMethods + # ... + # end + # end + # + # By using ActiveSupport::Concern the above module could instead be + # written as: + # + # require 'active_support/concern' + # + # module M + # extend ActiveSupport::Concern + # + # included do + # scope :disabled, -> { where(disabled: true) } + # end + # + # class_methods do + # ... + # end + # end + # + # Moreover, it gracefully handles module dependencies. Given a +Foo+ module + # and a +Bar+ module which depends on the former, we would typically write the + # following: + # + # module Foo + # def self.included(base) + # base.class_eval do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # end + # + # module Bar + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Foo # We need to include this dependency for Bar + # include Bar # Bar is the module that Host really needs + # end + # + # But why should +Host+ care about +Bar+'s dependencies, namely +Foo+? We + # could try to hide these from +Host+ directly including +Foo+ in +Bar+: + # + # module Bar + # include Foo + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Bar + # end + # + # Unfortunately this won't work, since when +Foo+ is included, its base + # is the +Bar+ module, not the +Host+ class. With ActiveSupport::Concern, + # module dependencies are properly resolved: + # + # require 'active_support/concern' + # + # module Foo + # extend ActiveSupport::Concern + # included do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # + # module Bar + # extend ActiveSupport::Concern + # include Foo + # + # included do + # self.method_injected_by_foo + # end + # end + # + # class Host + # include Bar # It works, now Bar takes care of its dependencies + # end + module Concern + class MultipleIncludedBlocks < StandardError #:nodoc: + def initialize + super "Cannot define multiple 'included' blocks for a Concern" + end + end + + def self.extended(base) #:nodoc: + base.instance_variable_set(:@_dependencies, []) + end + + def append_features(base) + if base.instance_variable_defined?(:@_dependencies) + base.instance_variable_get(:@_dependencies) << self + false + else + return false if base < self + @_dependencies.each { |dep| base.include(dep) } + super + base.extend const_get(:ClassMethods) if const_defined?(:ClassMethods) + base.class_eval(&@_included_block) if instance_variable_defined?(:@_included_block) + end + end + + def included(base = nil, &block) + if base.nil? + if instance_variable_defined?(:@_included_block) + if @_included_block.source_location != block.source_location + raise MultipleIncludedBlocks + end + else + @_included_block = block + end + else + super + end + end + + def class_methods(&class_methods_module_definition) + mod = const_defined?(:ClassMethods, false) ? + const_get(:ClassMethods) : + const_set(:ClassMethods, Module.new) + + mod.module_eval(&class_methods_module_definition) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb new file mode 100644 index 0000000..a8455c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/load_interlock_aware_monitor.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Concurrency + # A monitor that will permit dependency loading while blocked waiting for + # the lock. + class LoadInterlockAwareMonitor < Monitor + # Enters an exclusive section, but allows dependency loading while blocked + def mon_enter + mon_try_enter || + ActiveSupport::Dependencies.interlock.permit_concurrent_loads { super } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb new file mode 100644 index 0000000..f18ccf1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/concurrency/share_lock.rb @@ -0,0 +1,227 @@ +# frozen_string_literal: true + +require "thread" +require "monitor" + +module ActiveSupport + module Concurrency + # A share/exclusive lock, otherwise known as a read/write lock. + # + # https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock + class ShareLock + include MonitorMixin + + # We track Thread objects, instead of just using counters, because + # we need exclusive locks to be reentrant, and we need to be able + # to upgrade share locks to exclusive. + + def raw_state # :nodoc: + synchronize do + threads = @sleeping.keys | @sharing.keys | @waiting.keys + threads |= [@exclusive_thread] if @exclusive_thread + + data = {} + + threads.each do |thread| + purpose, compatible = @waiting[thread] + + data[thread] = { + thread: thread, + sharing: @sharing[thread], + exclusive: @exclusive_thread == thread, + purpose: purpose, + compatible: compatible, + waiting: !!@waiting[thread], + sleeper: @sleeping[thread], + } + end + + # NB: Yields while holding our *internal* synchronize lock, + # which is supposed to be used only for a few instructions at + # a time. This allows the caller to inspect additional state + # without things changing out from underneath, but would have + # disastrous effects upon normal operation. Fortunately, this + # method is only intended to be called when things have + # already gone wrong. + yield data + end + end + + def initialize + super() + + @cv = new_cond + + @sharing = Hash.new(0) + @waiting = {} + @sleeping = {} + @exclusive_thread = nil + @exclusive_depth = 0 + end + + # Returns false if +no_wait+ is set and the lock is not + # immediately available. Otherwise, returns true after the lock + # has been acquired. + # + # +purpose+ and +compatible+ work together; while this thread is + # waiting for the exclusive lock, it will yield its share (if any) + # to any other attempt whose +purpose+ appears in this attempt's + # +compatible+ list. This allows a "loose" upgrade, which, being + # less strict, prevents some classes of deadlocks. + # + # For many resources, loose upgrades are sufficient: if a thread + # is awaiting a lock, it is not running any other code. With + # +purpose+ matching, it is possible to yield only to other + # threads whose activity will not interfere. + def start_exclusive(purpose: nil, compatible: [], no_wait: false) + synchronize do + unless @exclusive_thread == Thread.current + if busy_for_exclusive?(purpose) + return false if no_wait + + yield_shares(purpose: purpose, compatible: compatible, block_share: true) do + wait_for(:start_exclusive) { busy_for_exclusive?(purpose) } + end + end + @exclusive_thread = Thread.current + end + @exclusive_depth += 1 + + true + end + end + + # Relinquish the exclusive lock. Must only be called by the thread + # that called start_exclusive (and currently holds the lock). + def stop_exclusive(compatible: []) + synchronize do + raise "invalid unlock" if @exclusive_thread != Thread.current + + @exclusive_depth -= 1 + if @exclusive_depth == 0 + @exclusive_thread = nil + + if eligible_waiters?(compatible) + yield_shares(compatible: compatible, block_share: true) do + wait_for(:stop_exclusive) { @exclusive_thread || eligible_waiters?(compatible) } + end + end + @cv.broadcast + end + end + end + + def start_sharing + synchronize do + if @sharing[Thread.current] > 0 || @exclusive_thread == Thread.current + # We already hold a lock; nothing to wait for + elsif @waiting[Thread.current] + # We're nested inside a +yield_shares+ call: we'll resume as + # soon as there isn't an exclusive lock in our way + wait_for(:start_sharing) { @exclusive_thread } + else + # This is an initial / outermost share call: any outstanding + # requests for an exclusive lock get to go first + wait_for(:start_sharing) { busy_for_sharing?(false) } + end + @sharing[Thread.current] += 1 + end + end + + def stop_sharing + synchronize do + if @sharing[Thread.current] > 1 + @sharing[Thread.current] -= 1 + else + @sharing.delete Thread.current + @cv.broadcast + end + end + end + + # Execute the supplied block while holding the Exclusive lock. If + # +no_wait+ is set and the lock is not immediately available, + # returns +nil+ without yielding. Otherwise, returns the result of + # the block. + # + # See +start_exclusive+ for other options. + def exclusive(purpose: nil, compatible: [], after_compatible: [], no_wait: false) + if start_exclusive(purpose: purpose, compatible: compatible, no_wait: no_wait) + begin + yield + ensure + stop_exclusive(compatible: after_compatible) + end + end + end + + # Execute the supplied block while holding the Share lock. + def sharing + start_sharing + begin + yield + ensure + stop_sharing + end + end + + # Temporarily give up all held Share locks while executing the + # supplied block, allowing any +compatible+ exclusive lock request + # to proceed. + def yield_shares(purpose: nil, compatible: [], block_share: false) + loose_shares = previous_wait = nil + synchronize do + if loose_shares = @sharing.delete(Thread.current) + if previous_wait = @waiting[Thread.current] + purpose = nil unless purpose == previous_wait[0] + compatible &= previous_wait[1] + end + compatible |= [false] unless block_share + @waiting[Thread.current] = [purpose, compatible] + end + + @cv.broadcast + end + + begin + yield + ensure + synchronize do + wait_for(:yield_shares) { @exclusive_thread && @exclusive_thread != Thread.current } + + if previous_wait + @waiting[Thread.current] = previous_wait + else + @waiting.delete Thread.current + end + @sharing[Thread.current] = loose_shares if loose_shares + end + end + end + + private + + # Must be called within synchronize + def busy_for_exclusive?(purpose) + busy_for_sharing?(purpose) || + @sharing.size > (@sharing[Thread.current] > 0 ? 1 : 0) + end + + def busy_for_sharing?(purpose) + (@exclusive_thread && @exclusive_thread != Thread.current) || + @waiting.any? { |t, (_, c)| t != Thread.current && !c.include?(purpose) } + end + + def eligible_waiters?(compatible) + @waiting.any? { |t, (p, _)| compatible.include?(p) && @waiting.all? { |t2, (_, c2)| t == t2 || c2.include?(p) } } + end + + def wait_for(method) + @sleeping[Thread.current] = method + @cv.wait_while { yield } + ensure + @sleeping.delete Thread.current + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb new file mode 100644 index 0000000..4d6f781 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/configurable.rb @@ -0,0 +1,150 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/ordered_options" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +module ActiveSupport + # Configurable provides a config method to store and retrieve + # configuration options as an OrderedHash. + module Configurable + extend ActiveSupport::Concern + + class Configuration < ActiveSupport::InheritableOptions + def compile_methods! + self.class.compile_methods!(keys) + end + + # Compiles reader methods so we don't have to go through method_missing. + def self.compile_methods!(keys) + keys.reject { |m| method_defined?(m) }.each do |key| + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{key}; _get(#{key.inspect}); end + RUBY + end + end + end + + module ClassMethods + def config + @_config ||= if respond_to?(:superclass) && superclass.respond_to?(:config) + superclass.config.inheritable_copy + else + # create a new "anonymous" class that will host the compiled reader methods + Class.new(Configuration).new + end + end + + def configure + yield config + end + + # Allows you to add shortcut so that you don't have to refer to attribute + # through config. Also look at the example for config to contrast. + # + # Defines both class and instance config accessors. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access + # end + # + # User.allowed_access # => nil + # User.allowed_access = false + # User.allowed_access # => false + # + # user = User.new + # user.allowed_access # => false + # user.allowed_access = true + # user.allowed_access # => true + # + # User.allowed_access # => false + # + # The attribute name must be a valid method name in Ruby. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :"1_Badname" + # end + # # => NameError: invalid config attribute name + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_reader: false, instance_writer: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_accessor: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Also you can pass a block to set up the attribute with a default value. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :hair_colors do + # [:brown, :black, :blonde, :red] + # end + # end + # + # User.hair_colors # => [:brown, :black, :blonde, :red] + def config_accessor(*names) + options = names.extract_options! + + names.each do |name| + raise NameError.new("invalid config attribute name") unless /\A[_A-Za-z]\w*\z/.match?(name) + + reader, reader_line = "def #{name}; config.#{name}; end", __LINE__ + writer, writer_line = "def #{name}=(value); config.#{name} = value; end", __LINE__ + + singleton_class.class_eval reader, __FILE__, reader_line + singleton_class.class_eval writer, __FILE__, writer_line + + unless options[:instance_accessor] == false + class_eval reader, __FILE__, reader_line unless options[:instance_reader] == false + class_eval writer, __FILE__, writer_line unless options[:instance_writer] == false + end + send("#{name}=", yield) if block_given? + end + end + private :config_accessor + end + + # Reads and writes attributes from a configuration OrderedHash. + # + # require 'active_support/configurable' + # + # class User + # include ActiveSupport::Configurable + # end + # + # user = User.new + # + # user.config.allowed_access = true + # user.config.level = 1 + # + # user.config.allowed_access # => true + # user.config.level # => 1 + def config + @_config ||= self.class.config.inheritable_copy + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb new file mode 100644 index 0000000..f590605 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +Dir.glob(File.expand_path("core_ext/*.rb", __dir__)).each do |path| + require path +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb new file mode 100644 index 0000000..6d83b76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/array/access" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/grouping" +require "active_support/core_ext/array/prepend_and_append" +require "active_support/core_ext/array/inquiry" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb new file mode 100644 index 0000000..b7ff7a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/access.rb @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +class Array + # Returns the tail of the array from +position+. + # + # %w( a b c d ).from(0) # => ["a", "b", "c", "d"] + # %w( a b c d ).from(2) # => ["c", "d"] + # %w( a b c d ).from(10) # => [] + # %w().from(0) # => [] + # %w( a b c d ).from(-2) # => ["c", "d"] + # %w( a b c ).from(-10) # => [] + def from(position) + self[position, length] || [] + end + + # Returns the beginning of the array up to +position+. + # + # %w( a b c d ).to(0) # => ["a"] + # %w( a b c d ).to(2) # => ["a", "b", "c"] + # %w( a b c d ).to(10) # => ["a", "b", "c", "d"] + # %w().to(0) # => [] + # %w( a b c d ).to(-2) # => ["a", "b", "c"] + # %w( a b c ).to(-10) # => [] + def to(position) + if position >= 0 + take position + 1 + else + self[0..position] + end + end + + # Returns a copy of the Array without the specified elements. + # + # people = ["David", "Rafael", "Aaron", "Todd"] + # people.without "Aaron", "Todd" + # # => ["David", "Rafael"] + # + # Note: This is an optimization of Enumerable#without that uses Array#- + # instead of Array#reject for performance reasons. + def without(*elements) + self - elements + end + + # Equal to self[1]. + # + # %w( a b c d e ).second # => "b" + def second + self[1] + end + + # Equal to self[2]. + # + # %w( a b c d e ).third # => "c" + def third + self[2] + end + + # Equal to self[3]. + # + # %w( a b c d e ).fourth # => "d" + def fourth + self[3] + end + + # Equal to self[4]. + # + # %w( a b c d e ).fifth # => "e" + def fifth + self[4] + end + + # Equal to self[41]. Also known as accessing "the reddit". + # + # (1..42).to_a.forty_two # => 42 + def forty_two + self[41] + end + + # Equal to self[-3]. + # + # %w( a b c d e ).third_to_last # => "c" + def third_to_last + self[-3] + end + + # Equal to self[-2]. + # + # %w( a b c d e ).second_to_last # => "d" + def second_to_last + self[-2] + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb new file mode 100644 index 0000000..ea688ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/conversions.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" + +class Array + # Converts the array to a comma-separated sentence where the last element is + # joined by the connector word. + # + # You can pass the following options to change the default behavior. If you + # pass an option key that doesn't exist in the list below, it will raise an + # ArgumentError. + # + # ==== Options + # + # * :words_connector - The sign or word used to join the elements + # in arrays with two or more elements (default: ", "). + # * :two_words_connector - The sign or word used to join the elements + # in arrays with two elements (default: " and "). + # * :last_word_connector - The sign or word used to join the last element + # in arrays with three or more elements (default: ", and "). + # * :locale - If +i18n+ is available, you can set a locale and use + # the connector options defined on the 'support.array' namespace in the + # corresponding dictionary file. + # + # ==== Examples + # + # [].to_sentence # => "" + # ['one'].to_sentence # => "one" + # ['one', 'two'].to_sentence # => "one and two" + # ['one', 'two', 'three'].to_sentence # => "one, two, and three" + # + # ['one', 'two'].to_sentence(passing: 'invalid option') + # # => ArgumentError: Unknown key: :passing. Valid keys are: :words_connector, :two_words_connector, :last_word_connector, :locale + # + # ['one', 'two'].to_sentence(two_words_connector: '-') + # # => "one-two" + # + # ['one', 'two', 'three'].to_sentence(words_connector: ' or ', last_word_connector: ' or at least ') + # # => "one or two or at least three" + # + # Using :locale option: + # + # # Given this locale dictionary: + # # + # # es: + # # support: + # # array: + # # words_connector: " o " + # # two_words_connector: " y " + # # last_word_connector: " o al menos " + # + # ['uno', 'dos'].to_sentence(locale: :es) + # # => "uno y dos" + # + # ['uno', 'dos', 'tres'].to_sentence(locale: :es) + # # => "uno o dos o al menos tres" + def to_sentence(options = {}) + options.assert_valid_keys(:words_connector, :two_words_connector, :last_word_connector, :locale) + + default_connectors = { + words_connector: ", ", + two_words_connector: " and ", + last_word_connector: ", and " + } + if defined?(I18n) + i18n_connectors = I18n.translate(:'support.array', locale: options[:locale], default: {}) + default_connectors.merge!(i18n_connectors) + end + options = default_connectors.merge!(options) + + case length + when 0 + "" + when 1 + "#{self[0]}" + when 2 + "#{self[0]}#{options[:two_words_connector]}#{self[1]}" + else + "#{self[0...-1].join(options[:words_connector])}#{options[:last_word_connector]}#{self[-1]}" + end + end + + # Extends Array#to_s to convert a collection of elements into a + # comma separated id list if :db argument is given as the format. + # + # Blog.all.to_formatted_s(:db) # => "1,2,3" + # Blog.none.to_formatted_s(:db) # => "null" + # [1,2].to_formatted_s # => "[1, 2]" + def to_formatted_s(format = :default) + case format + when :db + if empty? + "null" + else + collect(&:id).join(",") + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a string that represents the array in XML by invoking +to_xml+ + # on each element. Active Record collections delegate their representation + # in XML to this method. + # + # All elements are expected to respond to +to_xml+, if any of them does + # not then an exception is raised. + # + # The root node reflects the class name of the first element in plural + # if all elements belong to the same type and that's not Hash: + # + # customer.projects.to_xml + # + # + # + # + # 20000.0 + # 1567 + # 2008-04-09 + # ... + # + # + # 57230.0 + # 1567 + # 2008-04-15 + # ... + # + # + # + # Otherwise the root element is "objects": + # + # [{ foo: 1, bar: 2}, { baz: 3}].to_xml + # + # + # + # + # 2 + # 1 + # + # + # 3 + # + # + # + # If the collection is empty the root element is "nil-classes" by default: + # + # [].to_xml + # + # + # + # + # To ensure a meaningful root element use the :root option: + # + # customer_with_no_projects.projects.to_xml(root: 'projects') + # + # + # + # + # By default name of the node for the children of root is root.singularize. + # You can change it with the :children option. + # + # The +options+ hash is passed downwards: + # + # Message.all.to_xml(skip_types: true) + # + # + # + # + # 2008-03-07T09:58:18+01:00 + # 1 + # 1 + # 2008-03-07T09:58:18+01:00 + # 1 + # + # + # + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder) + + options = options.dup + options[:indent] ||= 2 + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + options[:root] ||= \ + if first.class != Hash && all? { |e| e.is_a?(first.class) } + underscored = ActiveSupport::Inflector.underscore(first.class.name) + ActiveSupport::Inflector.pluralize(underscored).tr("/", "_") + else + "objects" + end + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + children = options.delete(:children) || root.singularize + attributes = options[:skip_types] ? {} : { type: "array" } + + if empty? + builder.tag!(root, attributes) + else + builder.tag!(root, attributes) do + each { |value| ActiveSupport::XmlMini.to_tag(children, value, options) } + yield builder if block_given? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb new file mode 100644 index 0000000..8c7cb2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/extract_options.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Hash + # By default, only instances of Hash itself are extractable. + # Subclasses of Hash may implement this method and return + # true to declare themselves as extractable. If a Hash + # is extractable, Array#extract_options! pops it from + # the Array when it is the last element of the Array. + def extractable_options? + instance_of?(Hash) + end +end + +class Array + # Extracts options from a set of arguments. Removes and returns the last + # element in the array if it's a hash, otherwise returns a blank hash. + # + # def options(*args) + # args.extract_options! + # end + # + # options(1, 2) # => {} + # options(1, 2, a: :b) # => {:a=>:b} + def extract_options! + if last.is_a?(Hash) && last.extractable_options? + pop + else + {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb new file mode 100644 index 0000000..67e760b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/grouping.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +class Array + # Splits or iterates over the array in groups of size +number+, + # padding any remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups_of(3) {|group| p group} + # ["1", "2", "3"] + # ["4", "5", "6"] + # ["7", "8", "9"] + # ["10", nil, nil] + # + # %w(1 2 3 4 5).in_groups_of(2, ' ') {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5", " "] + # + # %w(1 2 3 4 5).in_groups_of(2, false) {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5"] + def in_groups_of(number, fill_with = nil) + if number.to_i <= 0 + raise ArgumentError, + "Group size must be a positive integer, was #{number.inspect}" + end + + if fill_with == false + collection = self + else + # size % number gives how many extra we have; + # subtracting from number gives how many to add; + # modulo number ensures we don't add group of just fill. + padding = (number - size % number) % number + collection = dup.concat(Array.new(padding, fill_with)) + end + + if block_given? + collection.each_slice(number) { |slice| yield(slice) } + else + collection.each_slice(number).to_a + end + end + + # Splits or iterates over the array in +number+ of groups, padding any + # remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3) {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", nil] + # ["8", "9", "10", nil] + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3, ' ') {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", " "] + # ["8", "9", "10", " "] + # + # %w(1 2 3 4 5 6 7).in_groups(3, false) {|group| p group} + # ["1", "2", "3"] + # ["4", "5"] + # ["6", "7"] + def in_groups(number, fill_with = nil) + # size.div number gives minor group size; + # size % number gives how many objects need extra accommodation; + # each group hold either division or division + 1 items. + division = size.div number + modulo = size % number + + # create a new array avoiding dup + groups = [] + start = 0 + + number.times do |index| + length = division + (modulo > 0 && modulo > index ? 1 : 0) + groups << last_group = slice(start, length) + last_group << fill_with if fill_with != false && + modulo > 0 && length == division + start += length + end + + if block_given? + groups.each { |g| yield(g) } + else + groups + end + end + + # Divides the array into one or more subarrays based on a delimiting +value+ + # or the result of an optional block. + # + # [1, 2, 3, 4, 5].split(3) # => [[1, 2], [4, 5]] + # (1..10).to_a.split { |i| i % 3 == 0 } # => [[1, 2], [4, 5], [7, 8], [10]] + def split(value = nil) + arr = dup + result = [] + if block_given? + while (idx = arr.index { |i| yield i }) + result << arr.shift(idx) + arr.shift + end + else + while (idx = arr.index(value)) + result << arr.shift(idx) + arr.shift + end + end + result << arr + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb new file mode 100644 index 0000000..92c61bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/inquiry.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +require "active_support/array_inquirer" + +class Array + # Wraps the array in an +ArrayInquirer+ object, which gives a friendlier way + # to check its string-like contents. + # + # pets = [:cat, :dog].inquiry + # + # pets.cat? # => true + # pets.ferret? # => false + # + # pets.any?(:cat, :ferret) # => true + # pets.any?(:ferret, :alligator) # => false + def inquiry + ActiveSupport::ArrayInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb new file mode 100644 index 0000000..661971d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/prepend_and_append.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +class Array + # The human way of thinking about adding stuff to the end of a list is with append. + alias_method :append, :push unless [].respond_to?(:append) + + # The human way of thinking about adding stuff to the beginning of a list is with prepend. + alias_method :prepend, :unshift unless [].respond_to?(:prepend) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb new file mode 100644 index 0000000..d62f97e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/array/wrap.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +class Array + # Wraps its argument in an array unless it is already an array (or array-like). + # + # Specifically: + # + # * If the argument is +nil+ an empty array is returned. + # * Otherwise, if the argument responds to +to_ary+ it is invoked, and its result returned. + # * Otherwise, returns an array with the argument as its single element. + # + # Array.wrap(nil) # => [] + # Array.wrap([1, 2, 3]) # => [1, 2, 3] + # Array.wrap(0) # => [0] + # + # This method is similar in purpose to Kernel#Array, but there are some differences: + # + # * If the argument responds to +to_ary+ the method is invoked. Kernel#Array + # moves on to try +to_a+ if the returned value is +nil+, but Array.wrap returns + # an array with the argument as its single element right away. + # * If the returned value from +to_ary+ is neither +nil+ nor an +Array+ object, Kernel#Array + # raises an exception, while Array.wrap does not, it just returns the value. + # * It does not call +to_a+ on the argument, if the argument does not respond to +to_ary+ + # it returns an array with the argument as its single element. + # + # The last point is easily explained with some enumerables: + # + # Array(foo: :bar) # => [[:foo, :bar]] + # Array.wrap(foo: :bar) # => [{:foo=>:bar}] + # + # There's also a related idiom that uses the splat operator: + # + # [*object] + # + # which returns [] for +nil+, but calls to Array(object) otherwise. + # + # The differences with Kernel#Array explained above + # apply to the rest of objects. + def self.wrap(object) + if object.nil? + [] + elsif object.respond_to?(:to_ary) + object.to_ary || [object] + else + [object] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb new file mode 100644 index 0000000..641b58c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/benchmark.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "benchmark" + +class << Benchmark + # Benchmark realtime in milliseconds. + # + # Benchmark.realtime { User.all } + # # => 8.0e-05 + # + # Benchmark.ms { User.all } + # # => 0.074 + def ms + 1000 * realtime { yield } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb new file mode 100644 index 0000000..9e6a9d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb new file mode 100644 index 0000000..52bd229 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/big_decimal/conversions.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "bigdecimal" +require "bigdecimal/util" + +module ActiveSupport + module BigDecimalWithDefaultFormat #:nodoc: + def to_s(format = "F") + super(format) + end + end +end + +BigDecimal.prepend(ActiveSupport::BigDecimalWithDefaultFormat) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb new file mode 100644 index 0000000..1c110fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/class/subclasses" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb new file mode 100644 index 0000000..7928efb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/array/extract_options" + +class Class + # Declare a class-level attribute whose value is inheritable by subclasses. + # Subclasses can change their own value and it will not impact parent class. + # + # ==== Options + # + # * :instance_reader - Sets the instance reader method (defaults to true). + # * :instance_writer - Sets the instance writer method (defaults to true). + # * :instance_accessor - Sets both instance methods (defaults to true). + # * :instance_predicate - Sets a predicate method (defaults to true). + # * :default - Sets a default value for the attribute (defaults to nil). + # + # ==== Examples + # + # class Base + # class_attribute :setting + # end + # + # class Subclass < Base + # end + # + # Base.setting = true + # Subclass.setting # => true + # Subclass.setting = false + # Subclass.setting # => false + # Base.setting # => true + # + # In the above case as long as Subclass does not assign a value to setting + # by performing Subclass.setting = _something_, Subclass.setting + # would read value assigned to parent class. Once Subclass assigns a value then + # the value assigned by Subclass would be returned. + # + # This matches normal Ruby method inheritance: think of writing an attribute + # on a subclass as overriding the reader method. However, you need to be aware + # when using +class_attribute+ with mutable structures as +Array+ or +Hash+. + # In such cases, you don't want to do changes in place. Instead use setters: + # + # Base.setting = [] + # Base.setting # => [] + # Subclass.setting # => [] + # + # # Appending in child changes both parent and child because it is the same object: + # Subclass.setting << :foo + # Base.setting # => [:foo] + # Subclass.setting # => [:foo] + # + # # Use setters to not propagate changes: + # Base.setting = [] + # Subclass.setting += [:foo] + # Base.setting # => [] + # Subclass.setting # => [:foo] + # + # For convenience, an instance predicate method is defined as well. + # To skip it, pass instance_predicate: false. + # + # Subclass.setting? # => false + # + # Instances may overwrite the class value in the same way: + # + # Base.setting = true + # object = Base.new + # object.setting # => true + # object.setting = false + # object.setting # => false + # Base.setting # => true + # + # To opt out of the instance reader method, pass instance_reader: false. + # + # object.setting # => NoMethodError + # object.setting? # => NoMethodError + # + # To opt out of the instance writer method, pass instance_writer: false. + # + # object.setting = false # => NoMethodError + # + # To opt out of both instance methods, pass instance_accessor: false. + # + # To set a default value for the attribute, pass default:, like so: + # + # class_attribute :settings, default: {} + def class_attribute(*attrs) + options = attrs.extract_options! + instance_reader = options.fetch(:instance_accessor, true) && options.fetch(:instance_reader, true) + instance_writer = options.fetch(:instance_accessor, true) && options.fetch(:instance_writer, true) + instance_predicate = options.fetch(:instance_predicate, true) + default_value = options.fetch(:default, nil) + + attrs.each do |name| + singleton_class.silence_redefinition_of_method(name) + define_singleton_method(name) { nil } + + singleton_class.silence_redefinition_of_method("#{name}?") + define_singleton_method("#{name}?") { !!public_send(name) } if instance_predicate + + ivar = "@#{name}" + + singleton_class.silence_redefinition_of_method("#{name}=") + define_singleton_method("#{name}=") do |val| + singleton_class.class_eval do + redefine_method(name) { val } + end + + if singleton_class? + class_eval do + redefine_method(name) do + if instance_variable_defined? ivar + instance_variable_get ivar + else + singleton_class.send name + end + end + end + end + val + end + + if instance_reader + redefine_method(name) do + if instance_variable_defined?(ivar) + instance_variable_get ivar + else + self.class.public_send name + end + end + + redefine_method("#{name}?") { !!public_send(name) } if instance_predicate + end + + if instance_writer + redefine_method("#{name}=") do |val| + instance_variable_set ivar, val + end + end + + unless default_value.nil? + self.send("#{name}=", default_value) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb new file mode 100644 index 0000000..a77354e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/attribute_accessors.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +# cattr_* became mattr_* aliases in 7dfbd91b0780fbd6a1dd9bfbc176e10894871d2d, +# but we keep this around for libraries that directly require it knowing they +# want cattr_*. No need to deprecate. +require "active_support/core_ext/module/attribute_accessors" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb new file mode 100644 index 0000000..75e6533 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/class/subclasses.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +class Class + begin + # Test if this Ruby supports each_object against singleton_class + ObjectSpace.each_object(Numeric.singleton_class) {} + + # Returns an array with all classes that are < than its receiver. + # + # class C; end + # C.descendants # => [] + # + # class B < C; end + # C.descendants # => [B] + # + # class A < B; end + # C.descendants # => [B, A] + # + # class D < C; end + # C.descendants # => [B, A, D] + def descendants + descendants = [] + ObjectSpace.each_object(singleton_class) do |k| + next if k.singleton_class? + descendants.unshift k unless k == self + end + descendants + end + rescue StandardError # JRuby 9.0.4.0 and earlier + def descendants + descendants = [] + ObjectSpace.each_object(Class) do |k| + descendants.unshift k if k < self + end + descendants.uniq! + descendants + end + end + + # Returns an array with the direct children of +self+. + # + # class Foo; end + # class Bar < Foo; end + # class Baz < Bar; end + # + # Foo.subclasses # => [Bar] + def subclasses + subclasses, chain = [], descendants + chain.each do |k| + subclasses << k unless chain.any? { |c| c > k } + end + subclasses + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb new file mode 100644 index 0000000..cce73f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date/acts_like" +require "active_support/core_ext/date/blank" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/conversions" +require "active_support/core_ext/date/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb new file mode 100644 index 0000000..c8077f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Date + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb new file mode 100644 index 0000000..e6271c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class Date #:nodoc: + # No Date is blank: + # + # Date.today.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb new file mode 100644 index 0000000..1cd7acb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/calculations.rb @@ -0,0 +1,145 @@ +# frozen_string_literal: true + +require "date" +require "active_support/duration" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" + +class Date + include DateAndTime::Calculations + + class << self + attr_accessor :beginning_of_week_default + + # Returns the week start (e.g. :monday) for the current request, if this has been set (via Date.beginning_of_week=). + # If Date.beginning_of_week has not been set for the current request, returns the week start specified in config.beginning_of_week. + # If no config.beginning_of_week was specified, returns :monday. + def beginning_of_week + Thread.current[:beginning_of_week] || beginning_of_week_default || :monday + end + + # Sets Date.beginning_of_week to a week start (e.g. :monday) for current request/thread. + # + # This method accepts any of the following day symbols: + # :monday, :tuesday, :wednesday, :thursday, :friday, :saturday, :sunday + def beginning_of_week=(week_start) + Thread.current[:beginning_of_week] = find_beginning_of_week!(week_start) + end + + # Returns week start day symbol (e.g. :monday), or raises an +ArgumentError+ for invalid day symbol. + def find_beginning_of_week!(week_start) + raise ArgumentError, "Invalid beginning of week: #{week_start}" unless ::Date::DAYS_INTO_WEEK.key?(week_start) + week_start + end + + # Returns a new Date representing the date 1 day ago (i.e. yesterday's date). + def yesterday + ::Date.current.yesterday + end + + # Returns a new Date representing the date 1 day after today (i.e. tomorrow's date). + def tomorrow + ::Date.current.tomorrow + end + + # Returns Time.zone.today when Time.zone or config.time_zone are set, otherwise just returns Date.today. + def current + ::Time.zone ? ::Time.zone.today : ::Date.today + end + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then subtracts the specified number of seconds. + def ago(seconds) + in_time_zone.since(-seconds) + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then adds the specified number of seconds + def since(seconds) + in_time_zone.since(seconds) + end + alias :in :since + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + def beginning_of_day + in_time_zone + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the middle of the day (12:00) + def middle_of_day + in_time_zone.middle_of_day + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the end of the day (23:59:59) + def end_of_day + in_time_zone.end_of_day + end + alias :at_end_of_day :end_of_day + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + plus_with_duration(-other) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Provides precise Date calculations for years, months, and days. The +options+ parameter takes a hash with + # any of these keys: :years, :months, :weeks, :days. + def advance(options) + options = options.dup + d = self + d = d >> options.delete(:years) * 12 if options[:years] + d = d >> options.delete(:months) if options[:months] + d = d + options.delete(:weeks) * 7 if options[:weeks] + d = d + options.delete(:days) if options[:days] + d + end + + # Returns a new Date where one or more of the elements have been changed according to the +options+ parameter. + # The +options+ parameter is a hash with a combination of these keys: :year, :month, :day. + # + # Date.new(2007, 5, 12).change(day: 1) # => Date.new(2007, 5, 1) + # Date.new(2007, 5, 12).change(year: 2005, month: 1) # => Date.new(2005, 1, 12) + def change(options) + ::Date.new( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day) + ) + end + + # Allow Date to be compared with Time by converting to DateTime and relying on the <=> from there. + def compare_with_coercion(other) + if other.is_a?(Time) + to_datetime <=> other + else + compare_without_coercion(other) + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb new file mode 100644 index 0000000..870119d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/conversions.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/module/redefine_method" + +class Date + DATE_FORMATS = { + short: "%d %b", + long: "%B %d, %Y", + db: "%Y-%m-%d", + number: "%Y%m%d", + long_ordinal: lambda { |date| + day_format = ActiveSupport::Inflector.ordinalize(date.day) + date.strftime("%B #{day_format}, %Y") # => "April 25th, 2007" + }, + rfc822: "%d %b %Y", + iso8601: lambda { |date| date.iso8601 } + } + + # Convert to a formatted string. See DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_formatted_s(:db) # => "2007-11-10" + # date.to_s(:db) # => "2007-11-10" + # + # date.to_formatted_s(:short) # => "10 Nov" + # date.to_formatted_s(:number) # => "20071110" + # date.to_formatted_s(:long) # => "November 10, 2007" + # date.to_formatted_s(:long_ordinal) # => "November 10th, 2007" + # date.to_formatted_s(:rfc822) # => "10 Nov 2007" + # date.to_formatted_s(:iso8601) # => "2007-11-10" + # + # == Adding your own date formats to to_formatted_s + # You can add your own formats to the Date::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a date argument as the value. + # + # # config/initializers/date_formats.rb + # Date::DATE_FORMATS[:month_and_year] = '%B %Y' + # Date::DATE_FORMATS[:short_ordinal] = ->(date) { date.strftime("%B #{date.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + if formatter.respond_to?(:call) + formatter.call(self).to_s + else + strftime(formatter) + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005" + def readable_inspect + strftime("%a, %d %b %Y") + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + silence_redefinition_of_method :to_time + + # Converts a Date instance to a Time, where the time is set to the beginning of the day. + # The timezone can be either :local or :utc (default :local). + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_time # => 2007-11-10 00:00:00 0800 + # date.to_time(:local) # => 2007-11-10 00:00:00 0800 + # + # date.to_time(:utc) # => 2007-11-10 00:00:00 UTC + # + # NOTE: The :local timezone is Ruby's *process* timezone, i.e. ENV['TZ']. + # If the *application's* timezone is needed, then use +in_time_zone+ instead. + def to_time(form = :local) + raise ArgumentError, "Expected :local or :utc, got #{form.inspect}." unless [:local, :utc].include?(form) + ::Time.send(form, year, month, day) + end + + silence_redefinition_of_method :xmlschema + + # Returns a string which represents the time in used time zone as DateTime + # defined by XML Schema: + # + # date = Date.new(2015, 05, 23) # => Sat, 23 May 2015 + # date.xmlschema # => "2015-05-23T00:00:00+04:00" + def xmlschema + in_time_zone.xmlschema + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb new file mode 100644 index 0000000..2dcf97c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date/zones.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/date_and_time/zones" + +class Date + include DateAndTime::Zones +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb new file mode 100644 index 0000000..f6cb1a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/calculations.rb @@ -0,0 +1,374 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/try" + +module DateAndTime + module Calculations + DAYS_INTO_WEEK = { + monday: 0, + tuesday: 1, + wednesday: 2, + thursday: 3, + friday: 4, + saturday: 5, + sunday: 6 + } + WEEKEND_DAYS = [ 6, 0 ] + + # Returns a new date/time representing yesterday. + def yesterday + advance(days: -1) + end + + # Returns a new date/time the specified number of days ago. + def prev_day(days = 1) + advance(days: -days) + end + + # Returns a new date/time representing tomorrow. + def tomorrow + advance(days: 1) + end + + # Returns a new date/time the specified number of days in the future. + def next_day(days = 1) + advance(days: days) + end + + # Returns true if the date/time is today. + def today? + to_date == ::Date.current + end + + # Returns true if the date/time is in the past. + def past? + self < self.class.current + end + + # Returns true if the date/time is in the future. + def future? + self > self.class.current + end + + # Returns true if the date/time falls on a Saturday or Sunday. + def on_weekend? + WEEKEND_DAYS.include?(wday) + end + + # Returns true if the date/time does not fall on a Saturday or Sunday. + def on_weekday? + !WEEKEND_DAYS.include?(wday) + end + + # Returns a new date/time the specified number of days ago. + def days_ago(days) + advance(days: -days) + end + + # Returns a new date/time the specified number of days in the future. + def days_since(days) + advance(days: days) + end + + # Returns a new date/time the specified number of weeks ago. + def weeks_ago(weeks) + advance(weeks: -weeks) + end + + # Returns a new date/time the specified number of weeks in the future. + def weeks_since(weeks) + advance(weeks: weeks) + end + + # Returns a new date/time the specified number of months ago. + def months_ago(months) + advance(months: -months) + end + + # Returns a new date/time the specified number of months in the future. + def months_since(months) + advance(months: months) + end + + # Returns a new date/time the specified number of years ago. + def years_ago(years) + advance(years: -years) + end + + # Returns a new date/time the specified number of years in the future. + def years_since(years) + advance(years: years) + end + + # Returns a new date/time at the start of the month. + # + # today = Date.today # => Thu, 18 Jun 2015 + # today.beginning_of_month # => Mon, 01 Jun 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Thu, 18 Jun 2015 15:23:13 +0000 + # now.beginning_of_month # => Mon, 01 Jun 2015 00:00:00 +0000 + def beginning_of_month + first_hour(change(day: 1)) + end + alias :at_beginning_of_month :beginning_of_month + + # Returns a new date/time at the start of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_quarter # => Wed, 01 Jul 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_quarter # => Wed, 01 Jul 2015 00:00:00 +0000 + def beginning_of_quarter + first_quarter_month = [10, 7, 4, 1].detect { |m| m <= month } + beginning_of_month.change(month: first_quarter_month) + end + alias :at_beginning_of_quarter :beginning_of_quarter + + # Returns a new date/time at the end of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.end_of_quarter # => Wed, 30 Sep 2015 + # + # +DateTime+ objects will have a time set to 23:59:59. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.end_of_quarter # => Wed, 30 Sep 2015 23:59:59 +0000 + def end_of_quarter + last_quarter_month = [3, 6, 9, 12].detect { |m| m >= month } + beginning_of_month.change(month: last_quarter_month).end_of_month + end + alias :at_end_of_quarter :end_of_quarter + + # Returns a new date/time at the beginning of the year. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_year # => Thu, 01 Jan 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_year # => Thu, 01 Jan 2015 00:00:00 +0000 + def beginning_of_year + change(month: 1).beginning_of_month + end + alias :at_beginning_of_year :beginning_of_year + + # Returns a new date/time representing the given day in the next week. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week # => Mon, 11 May 2015 + # + # The +given_day_in_next_week+ defaults to the beginning of the week + # which is determined by +Date.beginning_of_week+ or +config.beginning_of_week+ + # when set. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week(:friday) # => Fri, 15 May 2015 + # + # +DateTime+ objects have their time set to 0:00 unless +same_time+ is true. + # + # now = DateTime.current # => Thu, 07 May 2015 13:31:16 +0000 + # now.next_week # => Mon, 11 May 2015 00:00:00 +0000 + def next_week(given_day_in_next_week = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_since(1).beginning_of_week.days_since(days_span(given_day_in_next_week))) + same_time ? copy_time_to(result) : result + end + + # Returns a new date/time representing the next weekday. + def next_weekday + if next_day.on_weekend? + next_week(:monday, same_time: true) + else + next_day + end + end + + # Returns a new date/time the specified number of months in the future. + def next_month(months = 1) + advance(months: months) + end + + # Short-hand for months_since(3) + def next_quarter + months_since(3) + end + + # Returns a new date/time the specified number of years in the future. + def next_year(years = 1) + advance(years: years) + end + + # Returns a new date/time representing the given day in the previous week. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 0:00 unless +same_time+ is true. + def prev_week(start_day = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_ago(1).beginning_of_week.days_since(days_span(start_day))) + same_time ? copy_time_to(result) : result + end + alias_method :last_week, :prev_week + + # Returns a new date/time representing the previous weekday. + def prev_weekday + if prev_day.on_weekend? + copy_time_to(beginning_of_week(:friday)) + else + prev_day + end + end + alias_method :last_weekday, :prev_weekday + + # Returns a new date/time the specified number of months ago. + def prev_month(months = 1) + advance(months: -months) + end + + # Short-hand for months_ago(1). + def last_month + months_ago(1) + end + + # Short-hand for months_ago(3). + def prev_quarter + months_ago(3) + end + alias_method :last_quarter, :prev_quarter + + # Returns a new date/time the specified number of years ago. + def prev_year(years = 1) + advance(years: -years) + end + + # Short-hand for years_ago(1). + def last_year + years_ago(1) + end + + # Returns the number of days to the start of the week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + def days_to_week_start(start_day = Date.beginning_of_week) + start_day_number = DAYS_INTO_WEEK[start_day] + current_day_number = wday != 0 ? wday - 1 : 6 + (current_day_number - start_day_number) % 7 + end + + # Returns a new date/time representing the start of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # +DateTime+ objects have their time set to 0:00. + def beginning_of_week(start_day = Date.beginning_of_week) + result = days_ago(days_to_week_start(start_day)) + acts_like?(:time) ? result.midnight : result + end + alias :at_beginning_of_week :beginning_of_week + + # Returns Monday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 0:00. + def monday + beginning_of_week(:monday) + end + + # Returns a new date/time representing the end of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 23:59:59. + def end_of_week(start_day = Date.beginning_of_week) + last_hour(days_since(6 - days_to_week_start(start_day))) + end + alias :at_end_of_week :end_of_week + + # Returns Sunday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 23:59:59. + def sunday + end_of_week(:monday) + end + + # Returns a new date/time representing the end of the month. + # DateTime objects will have a time set to 23:59:59. + def end_of_month + last_day = ::Time.days_in_month(month, year) + last_hour(days_since(last_day - day)) + end + alias :at_end_of_month :end_of_month + + # Returns a new date/time representing the end of the year. + # DateTime objects will have a time set to 23:59:59. + def end_of_year + change(month: 12).end_of_month + end + alias :at_end_of_year :end_of_year + + # Returns a Range representing the whole day of the current date/time. + def all_day + beginning_of_day..end_of_day + end + + # Returns a Range representing the whole week of the current date/time. + # Week starts on start_day, default is Date.beginning_of_week or config.beginning_of_week when set. + def all_week(start_day = Date.beginning_of_week) + beginning_of_week(start_day)..end_of_week(start_day) + end + + # Returns a Range representing the whole month of the current date/time. + def all_month + beginning_of_month..end_of_month + end + + # Returns a Range representing the whole quarter of the current date/time. + def all_quarter + beginning_of_quarter..end_of_quarter + end + + # Returns a Range representing the whole year of the current date/time. + def all_year + beginning_of_year..end_of_year + end + + # Returns a new date/time representing the next occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.next_occurring(:monday) # => Mon, 18 Dec 2017 + # today.next_occurring(:thursday) # => Thu, 21 Dec 2017 + def next_occurring(day_of_week) + current_day_number = wday != 0 ? wday - 1 : 6 + from_now = DAYS_INTO_WEEK.fetch(day_of_week) - current_day_number + from_now += 7 unless from_now > 0 + advance(days: from_now) + end + + # Returns a new date/time representing the previous occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.prev_occurring(:monday) # => Mon, 11 Dec 2017 + # today.prev_occurring(:thursday) # => Thu, 07 Dec 2017 + def prev_occurring(day_of_week) + current_day_number = wday != 0 ? wday - 1 : 6 + ago = current_day_number - DAYS_INTO_WEEK.fetch(day_of_week) + ago += 7 unless ago > 0 + advance(days: -ago) + end + + private + def first_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.beginning_of_day : date_or_time + end + + def last_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.end_of_day : date_or_time + end + + def days_span(day) + (DAYS_INTO_WEEK[day] - DAYS_INTO_WEEK[Date.beginning_of_week]) % 7 + end + + def copy_time_to(other) + other.change(hour: hour, min: min, sec: sec, nsec: try(:nsec)) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb new file mode 100644 index 0000000..d33c36e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" + +module DateAndTime + module Compatibility + # If true, +to_time+ preserves the timezone offset of receiver. + # + # NOTE: With Ruby 2.4+ the default for +to_time+ changed from + # converting to the local system time, to preserving the offset + # of the receiver. For backwards compatibility we're overriding + # this behavior, but new apps will have an initializer that sets + # this to true, because the new behavior is preferred. + mattr_accessor :preserve_timezone, instance_writer: false, default: false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb new file mode 100644 index 0000000..894fd9b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_and_time/zones.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module DateAndTime + module Zones + # Returns the simultaneous time in Time.zone if a zone is given or + # if Time.zone_default is set. Otherwise, it returns the current time. + # + # Time.zone = 'Hawaii' # => 'Hawaii' + # Time.utc(2000).in_time_zone # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Date.new(2000).in_time_zone # => Sat, 01 Jan 2000 00:00:00 HST -10:00 + # + # This method is similar to Time#localtime, except that it uses Time.zone as the local zone + # instead of the operating system's time zone. + # + # You can also pass in a TimeZone instance or string that identifies a TimeZone as an argument, + # and the conversion will be based on that zone instead of Time.zone. + # + # Time.utc(2000).in_time_zone('Alaska') # => Fri, 31 Dec 1999 15:00:00 AKST -09:00 + # Date.new(2000).in_time_zone('Alaska') # => Sat, 01 Jan 2000 00:00:00 AKST -09:00 + def in_time_zone(zone = ::Time.zone) + time_zone = ::Time.find_zone! zone + time = acts_like?(:time) ? self : nil + + if time_zone + time_with_zone(time, time_zone) + else + time || to_time + end + end + + private + + def time_with_zone(time, zone) + if time + ActiveSupport::TimeWithZone.new(time.utc? ? time : time.getutc, zone) + else + ActiveSupport::TimeWithZone.new(nil, zone, to_time(:utc)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb new file mode 100644 index 0000000..790dbee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_time/acts_like" +require "active_support/core_ext/date_time/blank" +require "active_support/core_ext/date_time/calculations" +require "active_support/core_ext/date_time/compatibility" +require "active_support/core_ext/date_time/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb new file mode 100644 index 0000000..5dccdfe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/acts_like.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/object/acts_like" + +class DateTime + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end + + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb new file mode 100644 index 0000000..a52c8bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class DateTime #:nodoc: + # No DateTime is ever blank: + # + # DateTime.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb new file mode 100644 index 0000000..e61b23f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/calculations.rb @@ -0,0 +1,211 @@ +# frozen_string_literal: true + +require "date" + +class DateTime + class << self + # Returns Time.zone.now.to_datetime when Time.zone or + # config.time_zone are set, otherwise returns + # Time.now.to_datetime. + def current + ::Time.zone ? ::Time.zone.now.to_datetime : ::Time.now.to_datetime + end + end + + # Returns the number of seconds since 00:00:00. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399 + def seconds_since_midnight + sec + (min * 60) + (hour * 3600) + end + + # Returns the number of seconds until 23:59:59. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # DateTime.new(2012, 8, 29, 0, 0, 0.5).subsec # => (1/2) + def subsec + sec_fraction + end + + # Returns a new DateTime where one or more of the elements have been changed + # according to the +options+ parameter. The time options (:hour, + # :min, :sec) reset cascadingly, so if only the hour is + # passed, then minute and sec is set to 0. If the hour and minute is passed, + # then sec is set to 0. The +options+ parameter takes a hash with any of these + # keys: :year, :month, :day, :hour, + # :min, :sec, :offset, :start. + # + # DateTime.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => DateTime.new(2012, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => DateTime.new(1981, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => DateTime.new(1981, 8, 29, 0, 0, 0) + def change(options) + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_fraction = Rational(new_nsec, 1000000000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + new_fraction = Rational(new_usec, 1000000) + end + + raise ArgumentError, "argument out of range" if new_fraction >= 1 + + ::DateTime.civil( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day), + options.fetch(:hour, hour), + options.fetch(:min, options[:hour] ? 0 : min), + options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_fraction, + options.fetch(:offset, offset), + options.fetch(:start, start) + ) + end + + # Uses Date to provide precise Time calculations for years, months, and days. + # The +options+ parameter takes a hash with any of these keys: :years, + # :months, :weeks, :days, :hours, + # :minutes, :seconds. + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.advance(options) + datetime_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + datetime_advanced_by_date + else + datetime_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new DateTime representing the time a number of seconds ago. + # Do not use this method in combination with x.months, use months_ago instead! + def ago(seconds) + since(-seconds) + end + + # Returns a new DateTime representing the time a number of seconds since the + # instance time. Do not use this method in combination with x.months, use + # months_since instead! + def since(seconds) + self + Rational(seconds.round, 86400) + end + alias :in :since + + # Returns a new DateTime representing the start of the day (0:00). + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new DateTime representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new DateTime representing the end of the day (23:59:59). + def end_of_day + change(hour: 23, min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_day :end_of_day + + # Returns a new DateTime representing the start of the hour (hh:00:00). + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new DateTime representing the end of the hour (hh:59:59). + def end_of_hour + change(min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new DateTime representing the start of the minute (hh:mm:00). + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new DateTime representing the end of the minute (hh:mm:59). + def end_of_minute + change(sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_minute :end_of_minute + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ).getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns a Time instance of the simultaneous time in the UTC timezone. + # + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)) # => Mon, 21 Feb 2005 10:11:12 -0600 + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)).utc # => Mon, 21 Feb 2005 16:11:12 UTC + def utc + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ) + end + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns +true+ if offset == 0. + def utc? + offset == 0 + end + + # Returns the offset value in seconds. + def utc_offset + (offset * 86400).to_i + end + + # Layers additional behavior on DateTime#<=> so that Time and + # ActiveSupport::TimeWithZone instances can be compared with a DateTime. + def <=>(other) + if other.respond_to? :to_datetime + super other.to_datetime rescue nil + else + super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb new file mode 100644 index 0000000..7600a06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/compatibility.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class DateTime + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return an instance of +Time+ with the same UTC offset + # as +self+ or an instance of +Time+ representing the same time + # in the local system timezone depending on the setting of + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? getlocal(utc_offset) : getlocal + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb new file mode 100644 index 0000000..29725c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/date_time/conversions.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/calculations" +require "active_support/values/time_zone" + +class DateTime + # Convert to a formatted string. See Time::DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # === Examples + # datetime = DateTime.civil(2007, 12, 4, 0, 0, 0, 0) # => Tue, 04 Dec 2007 00:00:00 +0000 + # + # datetime.to_formatted_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:number) # => "20071204000000" + # datetime.to_formatted_s(:short) # => "04 Dec 00:00" + # datetime.to_formatted_s(:long) # => "December 04, 2007 00:00" + # datetime.to_formatted_s(:long_ordinal) # => "December 4th, 2007 00:00" + # datetime.to_formatted_s(:rfc822) # => "Tue, 04 Dec 2007 00:00:00 +0000" + # datetime.to_formatted_s(:iso8601) # => "2007-12-04T00:00:00+00:00" + # + # == Adding your own datetime formats to to_formatted_s + # DateTime formats are shared with Time. You can add your own to the + # Time::DATE_FORMATS hash. Use the format name as the hash key and + # either a strftime string or Proc instance that takes a time or + # datetime argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = lambda { |time| time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s if instance_methods(false).include?(:to_s) + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # datetime = DateTime.civil(2000, 1, 1, 0, 0, 0, Rational(-6, 24)) + # datetime.formatted_offset # => "-06:00" + # datetime.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005 14:30:00 +0000". + def readable_inspect + to_s(:rfc822) + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + # Returns DateTime with local offset for given year if format is local else + # offset is zero. + # + # DateTime.civil_from_format :local, 2012 + # # => Sun, 01 Jan 2012 00:00:00 +0300 + # DateTime.civil_from_format :local, 2012, 12, 17 + # # => Mon, 17 Dec 2012 00:00:00 +0000 + def self.civil_from_format(utc_or_local, year, month = 1, day = 1, hour = 0, min = 0, sec = 0) + if utc_or_local.to_sym == :local + offset = ::Time.local(year, month, day).utc_offset.to_r / 86400 + else + offset = 0 + end + civil(year, month, day, hour, min, sec, offset) + end + + # Converts +self+ to a floating-point number of seconds, including fractional microseconds, since the Unix epoch. + def to_f + seconds_since_unix_epoch.to_f + sec_fraction + end + + # Converts +self+ to an integer number of seconds since the Unix epoch. + def to_i + seconds_since_unix_epoch.to_i + end + + # Returns the fraction of a second as microseconds + def usec + (sec_fraction * 1_000_000).to_i + end + + # Returns the fraction of a second as nanoseconds + def nsec + (sec_fraction * 1_000_000_000).to_i + end + + private + + def offset_in_seconds + (offset * 86400).to_i + end + + def seconds_since_unix_epoch + (jd - 2440588) * 86400 - offset_in_seconds + seconds_since_midnight + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb new file mode 100644 index 0000000..ce1427e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/digest/uuid" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb new file mode 100644 index 0000000..6e949a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/digest/uuid.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +require "securerandom" + +module Digest + module UUID + DNS_NAMESPACE = "k\xA7\xB8\x10\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + URL_NAMESPACE = "k\xA7\xB8\x11\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + OID_NAMESPACE = "k\xA7\xB8\x12\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + X500_NAMESPACE = "k\xA7\xB8\x14\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + + # Generates a v5 non-random UUID (Universally Unique IDentifier). + # + # Using Digest::MD5 generates version 3 UUIDs; Digest::SHA1 generates version 5 UUIDs. + # uuid_from_hash always generates the same UUID for a given name and namespace combination. + # + # See RFC 4122 for details of UUID at: https://www.ietf.org/rfc/rfc4122.txt + def self.uuid_from_hash(hash_class, uuid_namespace, name) + if hash_class == Digest::MD5 + version = 3 + elsif hash_class == Digest::SHA1 + version = 5 + else + raise ArgumentError, "Expected Digest::SHA1 or Digest::MD5, got #{hash_class.name}." + end + + hash = hash_class.new + hash.update(uuid_namespace) + hash.update(name) + + ary = hash.digest.unpack("NnnnnN") + ary[2] = (ary[2] & 0x0FFF) | (version << 12) + ary[3] = (ary[3] & 0x3FFF) | 0x8000 + + "%08x-%04x-%04x-%04x-%04x%08x" % ary + end + + # Convenience method for uuid_from_hash using Digest::MD5. + def self.uuid_v3(uuid_namespace, name) + uuid_from_hash(Digest::MD5, uuid_namespace, name) + end + + # Convenience method for uuid_from_hash using Digest::SHA1. + def self.uuid_v5(uuid_namespace, name) + uuid_from_hash(Digest::SHA1, uuid_namespace, name) + end + + # Convenience method for SecureRandom.uuid. + def self.uuid_v4 + SecureRandom.uuid + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb new file mode 100644 index 0000000..cea6f98 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/enumerable.rb @@ -0,0 +1,164 @@ +# frozen_string_literal: true + +module Enumerable + # Enumerable#sum was added in Ruby 2.4, but it only works with Numeric elements + # when we omit an identity. + # + # We tried shimming it to attempt the fast native method, rescue TypeError, + # and fall back to the compatible implementation, but that's much slower than + # just calling the compat method in the first place. + if Enumerable.instance_methods(false).include?(:sum) && !((?a..?b).sum rescue false) + # :stopdoc: + + # We can't use Refinements here because Refinements with Module which will be prepended + # doesn't work well https://bugs.ruby-lang.org/issues/13446 + alias :_original_sum_with_required_identity :sum + private :_original_sum_with_required_identity + + # :startdoc: + + # Calculates a sum from the elements. + # + # payments.sum { |p| p.price * p.tax_rate } + # payments.sum(&:price) + # + # The latter is a shortcut for: + # + # payments.inject(0) { |sum, p| sum + p.price } + # + # It can also calculate the sum without the use of a block. + # + # [5, 15, 10].sum # => 30 + # ['foo', 'bar'].sum # => "foobar" + # [[1, 2], [3, 1, 5]].sum # => [1, 2, 3, 1, 5] + # + # The default sum of an empty list is zero. You can override this default: + # + # [].sum(Payment.new(0)) { |i| i.amount } # => Payment.new(0) + def sum(identity = nil, &block) + if identity + _original_sum_with_required_identity(identity, &block) + elsif block_given? + map(&block).sum(identity) + else + inject(:+) || 0 + end + end + else + def sum(identity = nil, &block) + if block_given? + map(&block).sum(identity) + else + sum = identity ? inject(identity, :+) : inject(:+) + sum || identity || 0 + end + end + end + + # Convert an enumerable to a hash. + # + # people.index_by(&:login) + # # => { "nextangle" => , "chade-" => , ...} + # people.index_by { |person| "#{person.first_name} #{person.last_name}" } + # # => { "Chade- Fowlersburg-e" => , "David Heinemeier Hansson" => , ...} + def index_by + if block_given? + result = {} + each { |elem| result[yield(elem)] = elem } + result + else + to_enum(:index_by) { size if respond_to?(:size) } + end + end + + # Returns +true+ if the enumerable has more than 1 element. Functionally + # equivalent to enum.to_a.size > 1. Can be called with a block too, + # much like any?, so people.many? { |p| p.age > 26 } returns +true+ + # if more than one person is over 26. + def many? + cnt = 0 + if block_given? + any? do |element| + cnt += 1 if yield element + cnt > 1 + end + else + any? { (cnt += 1) > 1 } + end + end + + # The negative of the Enumerable#include?. Returns +true+ if the + # collection does not include the object. + def exclude?(object) + !include?(object) + end + + # Returns a copy of the enumerable without the specified elements. + # + # ["David", "Rafael", "Aaron", "Todd"].without "Aaron", "Todd" + # # => ["David", "Rafael"] + # + # {foo: 1, bar: 2, baz: 3}.without :bar + # # => {foo: 1, baz: 3} + def without(*elements) + reject { |element| elements.include?(element) } + end + + # Convert an enumerable to an array based on the given key. + # + # [{ name: "David" }, { name: "Rafael" }, { name: "Aaron" }].pluck(:name) + # # => ["David", "Rafael", "Aaron"] + # + # [{ id: 1, name: "David" }, { id: 2, name: "Rafael" }].pluck(:id, :name) + # # => [[1, "David"], [2, "Rafael"]] + def pluck(*keys) + if keys.many? + map { |element| keys.map { |key| element[key] } } + else + map { |element| element[keys.first] } + end + end +end + +class Range #:nodoc: + # Optimize range sum to use arithmetic progression if a block is not given and + # we have a range of numeric values. + def sum(identity = nil) + if block_given? || !(first.is_a?(Integer) && last.is_a?(Integer)) + super + else + actual_last = exclude_end? ? (last - 1) : last + if actual_last >= first + sum = identity || 0 + sum + (actual_last - first + 1) * (actual_last + first) / 2 + else + identity || 0 + end + end + end +end + +# Array#sum was added in Ruby 2.4 but it only works with Numeric elements. +# +# We tried shimming it to attempt the fast native method, rescue TypeError, +# and fall back to the compatible implementation, but that's much slower than +# just calling the compat method in the first place. +if Array.instance_methods(false).include?(:sum) && !(%w[a].sum rescue false) + # Using Refinements here in order not to expose our internal method + using Module.new { + refine Array do + alias :orig_sum :sum + end + } + + class Array + def sum(init = nil, &block) #:nodoc: + if init.is_a?(Numeric) || first.is_a?(Numeric) + init ||= 0 + orig_sum(init, &block) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb new file mode 100644 index 0000000..64553bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/file/atomic" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb new file mode 100644 index 0000000..9deceb1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/file/atomic.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require "fileutils" + +class File + # Write to a file atomically. Useful for situations where you don't + # want other processes or threads to see half-written files. + # + # File.atomic_write('important.file') do |file| + # file.write('hello') + # end + # + # This method needs to create a temporary file. By default it will create it + # in the same directory as the destination file. If you don't like this + # behavior you can provide a different directory but it must be on the + # same physical filesystem as the file you're trying to write. + # + # File.atomic_write('/data/something.important', '/data/tmp') do |file| + # file.write('hello') + # end + def self.atomic_write(file_name, temp_dir = dirname(file_name)) + require "tempfile" unless defined?(Tempfile) + + Tempfile.open(".#{basename(file_name)}", temp_dir) do |temp_file| + temp_file.binmode + return_val = yield temp_file + temp_file.close + + old_stat = if exist?(file_name) + # Get original file permissions + stat(file_name) + else + # If not possible, probe which are the default permissions in the + # destination directory. + probe_stat_in(dirname(file_name)) + end + + if old_stat + # Set correct permissions on new file + begin + chown(old_stat.uid, old_stat.gid, temp_file.path) + # This operation will affect filesystem ACL's + chmod(old_stat.mode, temp_file.path) + rescue Errno::EPERM, Errno::EACCES + # Changing file ownership failed, moving on. + end + end + + # Overwrite original file with temp file + rename(temp_file.path, file_name) + return_val + end + end + + # Private utility method. + def self.probe_stat_in(dir) #:nodoc: + basename = [ + ".permissions_check", + Thread.current.object_id, + Process.pid, + rand(1000000) + ].join(".") + + file_name = join(dir, basename) + FileUtils.touch(file_name) + stat(file_name) + ensure + FileUtils.rm_f(file_name) if file_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb new file mode 100644 index 0000000..e19aeaa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/compact" +require "active_support/core_ext/hash/conversions" +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/indifferent_access" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/hash/slice" +require "active_support/core_ext/hash/transform_values" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb new file mode 100644 index 0000000..d6364dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/compact.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +class Hash + unless Hash.instance_methods(false).include?(:compact) + # Returns a hash with non +nil+ values. + # + # hash = { a: true, b: false, c: nil } + # hash.compact # => { a: true, b: false } + # hash # => { a: true, b: false, c: nil } + # { c: nil }.compact # => {} + # { c: true }.compact # => { c: true } + def compact + select { |_, value| !value.nil? } + end + end + + unless Hash.instance_methods(false).include?(:compact!) + # Replaces current hash with non +nil+ values. + # Returns +nil+ if no changes were made, otherwise returns the hash. + # + # hash = { a: true, b: false, c: nil } + # hash.compact! # => { a: true, b: false } + # hash # => { a: true, b: false } + # { c: true }.compact! # => nil + def compact! + reject! { |_, value| value.nil? } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb new file mode 100644 index 0000000..5b48254 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/conversions.rb @@ -0,0 +1,263 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/time" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/string/inflections" + +class Hash + # Returns a string containing an XML representation of its receiver: + # + # { foo: 1, bar: 2 }.to_xml + # # => + # # + # # + # # 1 + # # 2 + # # + # + # To do so, the method loops over the pairs and builds nodes that depend on + # the _values_. Given a pair +key+, +value+: + # + # * If +value+ is a hash there's a recursive call with +key+ as :root. + # + # * If +value+ is an array there's a recursive call with +key+ as :root, + # and +key+ singularized as :children. + # + # * If +value+ is a callable object it must expect one or two arguments. Depending + # on the arity, the callable is invoked with the +options+ hash as first argument + # with +key+ as :root, and +key+ singularized as second argument. The + # callable can add nodes by using options[:builder]. + # + # {foo: lambda { |options, key| options[:builder].b(key) }}.to_xml + # # => "foo" + # + # * If +value+ responds to +to_xml+ the method is invoked with +key+ as :root. + # + # class Foo + # def to_xml(options) + # options[:builder].bar 'fooing!' + # end + # end + # + # { foo: Foo.new }.to_xml(skip_instruct: true) + # # => + # # + # # fooing! + # # + # + # * Otherwise, a node with +key+ as tag is created with a string representation of + # +value+ as text node. If +value+ is +nil+ an attribute "nil" set to "true" is added. + # Unless the option :skip_types exists and is true, an attribute "type" is + # added as well according to the following mapping: + # + # XML_TYPE_NAMES = { + # "Symbol" => "symbol", + # "Integer" => "integer", + # "BigDecimal" => "decimal", + # "Float" => "float", + # "TrueClass" => "boolean", + # "FalseClass" => "boolean", + # "Date" => "date", + # "DateTime" => "dateTime", + # "Time" => "dateTime" + # } + # + # By default the root node is "hash", but that's configurable via the :root option. + # + # The default XML builder is a fresh instance of Builder::XmlMarkup. You can + # configure your own builder with the :builder option. The method also accepts + # options like :dasherize and friends, they are forwarded to the builder. + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder) + + options = options.dup + options[:indent] ||= 2 + options[:root] ||= "hash" + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + + builder.tag!(root) do + each { |key, value| ActiveSupport::XmlMini.to_tag(key, value, options) } + yield builder if block_given? + end + end + + class << self + # Returns a Hash containing a collection of pairs when the key is the node name and the value is + # its content + # + # xml = <<-XML + # + # + # 1 + # 2 + # + # XML + # + # hash = Hash.from_xml(xml) + # # => {"hash"=>{"foo"=>1, "bar"=>2}} + # + # +DisallowedType+ is raised if the XML contains attributes with type="yaml" or + # type="symbol". Use Hash.from_trusted_xml to + # parse this XML. + # + # Custom +disallowed_types+ can also be passed in the form of an + # array. + # + # xml = <<-XML + # + # + # 1 + # "David" + # + # XML + # + # hash = Hash.from_xml(xml, ['integer']) + # # => ActiveSupport::XMLConverter::DisallowedType: Disallowed type attribute: "integer" + # + # Note that passing custom disallowed types will override the default types, + # which are Symbol and YAML. + def from_xml(xml, disallowed_types = nil) + ActiveSupport::XMLConverter.new(xml, disallowed_types).to_h + end + + # Builds a Hash from XML just like Hash.from_xml, but also allows Symbol and YAML. + def from_trusted_xml(xml) + from_xml xml, [] + end + end +end + +module ActiveSupport + class XMLConverter # :nodoc: + # Raised if the XML contains attributes with type="yaml" or + # type="symbol". Read Hash#from_xml for more details. + class DisallowedType < StandardError + def initialize(type) + super "Disallowed type attribute: #{type.inspect}" + end + end + + DISALLOWED_TYPES = %w(symbol yaml) + + def initialize(xml, disallowed_types = nil) + @xml = normalize_keys(XmlMini.parse(xml)) + @disallowed_types = disallowed_types || DISALLOWED_TYPES + end + + def to_h + deep_to_h(@xml) + end + + private + def normalize_keys(params) + case params + when Hash + Hash[params.map { |k, v| [k.to_s.tr("-", "_"), normalize_keys(v)] } ] + when Array + params.map { |v| normalize_keys(v) } + else + params + end + end + + def deep_to_h(value) + case value + when Hash + process_hash(value) + when Array + process_array(value) + when String + value + else + raise "can't typecast #{value.class.name} - #{value.inspect}" + end + end + + def process_hash(value) + if value.include?("type") && !value["type"].is_a?(Hash) && @disallowed_types.include?(value["type"]) + raise DisallowedType, value["type"] + end + + if become_array?(value) + _, entries = Array.wrap(value.detect { |k, v| not v.is_a?(String) }) + if entries.nil? || value["__content__"].try(:empty?) + [] + else + case entries + when Array + entries.collect { |v| deep_to_h(v) } + when Hash + [deep_to_h(entries)] + else + raise "can't typecast #{entries.inspect}" + end + end + elsif become_content?(value) + process_content(value) + + elsif become_empty_string?(value) + "" + elsif become_hash?(value) + xml_value = Hash[value.map { |k, v| [k, deep_to_h(v)] }] + + # Turn { files: { file: # } } into { files: # } so it is compatible with + # how multipart uploaded files from HTML appear + xml_value["file"].is_a?(StringIO) ? xml_value["file"] : xml_value + end + end + + def become_content?(value) + value["type"] == "file" || (value["__content__"] && (value.keys.size == 1 || value["__content__"].present?)) + end + + def become_array?(value) + value["type"] == "array" + end + + def become_empty_string?(value) + # { "string" => true } + # No tests fail when the second term is removed. + value["type"] == "string" && value["nil"] != "true" + end + + def become_hash?(value) + !nothing?(value) && !garbage?(value) + end + + def nothing?(value) + # blank or nil parsed values are represented by nil + value.blank? || value["nil"] == "true" + end + + def garbage?(value) + # If the type is the only element which makes it then + # this still makes the value nil, except if type is + # an XML node(where type['value'] is a Hash) + value["type"] && !value["type"].is_a?(::Hash) && value.size == 1 + end + + def process_content(value) + content = value["__content__"] + if parser = ActiveSupport::XmlMini::PARSING[value["type"]] + parser.arity == 1 ? parser.call(content) : parser.call(content, value) + else + content + end + end + + def process_array(value) + value.map! { |i| deep_to_h(i) } + value.length > 1 ? value : value.first + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb new file mode 100644 index 0000000..9bc50b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/deep_merge.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with +self+ and +other_hash+ merged recursively. + # + # h1 = { a: true, b: { c: [1, 2, 3] } } + # h2 = { a: false, b: { x: [3, 4, 5] } } + # + # h1.deep_merge(h2) # => { a: false, b: { c: [1, 2, 3], x: [3, 4, 5] } } + # + # Like with Hash#merge in the standard library, a block can be provided + # to merge values: + # + # h1 = { a: 100, b: 200, c: { c1: 100 } } + # h2 = { b: 250, c: { c1: 200 } } + # h1.deep_merge(h2) { |key, this_val, other_val| this_val + other_val } + # # => { a: 100, b: 450, c: { c1: 300 } } + def deep_merge(other_hash, &block) + dup.deep_merge!(other_hash, &block) + end + + # Same as +deep_merge+, but modifies +self+. + def deep_merge!(other_hash, &block) + merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + this_val.deep_merge(other_val, &block) + elsif block_given? + block.call(key, this_val, other_val) + else + other_val + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb new file mode 100644 index 0000000..6258610 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/except.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +class Hash + # Returns a hash that includes everything except given keys. + # hash = { a: true, b: false, c: nil } + # hash.except(:c) # => { a: true, b: false } + # hash.except(:a, :b) # => { c: nil } + # hash # => { a: true, b: false, c: nil } + # + # This is useful for limiting a set of parameters to everything but a few known toggles: + # @person.update(params[:person].except(:admin)) + def except(*keys) + dup.except!(*keys) + end + + # Removes the given keys from hash and returns it. + # hash = { a: true, b: false, c: nil } + # hash.except!(:c) # => { a: true, b: false } + # hash # => { a: true, b: false } + def except!(*keys) + keys.each { |key| delete(key) } + self + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb new file mode 100644 index 0000000..a38f33f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/indifferent_access.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "active_support/hash_with_indifferent_access" + +class Hash + # Returns an ActiveSupport::HashWithIndifferentAccess out of its receiver: + # + # { a: 1 }.with_indifferent_access['a'] # => 1 + def with_indifferent_access + ActiveSupport::HashWithIndifferentAccess.new(self) + end + + # Called when object is nested under an object that receives + # #with_indifferent_access. This method will be called on the current object + # by the enclosing object and is aliased to #with_indifferent_access by + # default. Subclasses of Hash may overwrite this method to return +self+ if + # converting to an ActiveSupport::HashWithIndifferentAccess would not be + # desirable. + # + # b = { b: 1 } + # { a: b }.with_indifferent_access['a'] # calls b.nested_under_indifferent_access + # # => {"b"=>1} + alias nested_under_indifferent_access with_indifferent_access +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb new file mode 100644 index 0000000..bdf196e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/keys.rb @@ -0,0 +1,172 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with all keys converted using the +block+ operation. + # + # hash = { name: 'Rob', age: '28' } + # + # hash.transform_keys { |key| key.to_s.upcase } # => {"NAME"=>"Rob", "AGE"=>"28"} + # + # If you do not provide a +block+, it will return an Enumerator + # for chaining with other methods: + # + # hash.transform_keys.with_index { |k, i| [k, i].join } # => {"name0"=>"Rob", "age1"=>"28"} + def transform_keys + return enum_for(:transform_keys) { size } unless block_given? + result = {} + each_key do |key| + result[yield(key)] = self[key] + end + result + end unless method_defined? :transform_keys + + # Destructively converts all keys using the +block+ operations. + # Same as +transform_keys+ but modifies +self+. + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end unless method_defined? :transform_keys! + + # Returns a new hash with all keys converted to strings. + # + # hash = { name: 'Rob', age: '28' } + # + # hash.stringify_keys + # # => {"name"=>"Rob", "age"=>"28"} + def stringify_keys + transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. Same as + # +stringify_keys+, but modifies +self+. + def stringify_keys! + transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. + # + # hash = { 'name' => 'Rob', 'age' => '28' } + # + # hash.symbolize_keys + # # => {:name=>"Rob", :age=>"28"} + def symbolize_keys + transform_keys { |key| key.to_sym rescue key } + end + alias_method :to_options, :symbolize_keys + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. Same as +symbolize_keys+, but modifies +self+. + def symbolize_keys! + transform_keys! { |key| key.to_sym rescue key } + end + alias_method :to_options!, :symbolize_keys! + + # Validates all keys in a hash match *valid_keys, raising + # +ArgumentError+ on a mismatch. + # + # Note that keys are treated differently than HashWithIndifferentAccess, + # meaning that string and symbol keys will not match. + # + # { name: 'Rob', years: '28' }.assert_valid_keys(:name, :age) # => raises "ArgumentError: Unknown key: :years. Valid keys are: :name, :age" + # { name: 'Rob', age: '28' }.assert_valid_keys('name', 'age') # => raises "ArgumentError: Unknown key: :name. Valid keys are: 'name', 'age'" + # { name: 'Rob', age: '28' }.assert_valid_keys(:name, :age) # => passes, raises nothing + def assert_valid_keys(*valid_keys) + valid_keys.flatten! + each_key do |k| + unless valid_keys.include?(k) + raise ArgumentError.new("Unknown key: #{k.inspect}. Valid keys are: #{valid_keys.map(&:inspect).join(', ')}") + end + end + end + + # Returns a new hash with all keys converted by the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_transform_keys{ |key| key.to_s.upcase } + # # => {"PERSON"=>{"NAME"=>"Rob", "AGE"=>"28"}} + def deep_transform_keys(&block) + _deep_transform_keys_in_object(self, &block) + end + + # Destructively converts all keys by using the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_transform_keys!(&block) + _deep_transform_keys_in_object!(self, &block) + end + + # Returns a new hash with all keys converted to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_stringify_keys + # # => {"person"=>{"name"=>"Rob", "age"=>"28"}} + def deep_stringify_keys + deep_transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_stringify_keys! + deep_transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. This includes the keys from the root hash + # and from all nested hashes and arrays. + # + # hash = { 'person' => { 'name' => 'Rob', 'age' => '28' } } + # + # hash.deep_symbolize_keys + # # => {:person=>{:name=>"Rob", :age=>"28"}} + def deep_symbolize_keys + deep_transform_keys { |key| key.to_sym rescue key } + end + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_symbolize_keys! + deep_transform_keys! { |key| key.to_sym rescue key } + end + + private + # support methods for deep transforming nested hashes and arrays + def _deep_transform_keys_in_object(object, &block) + case object + when Hash + object.each_with_object({}) do |(key, value), result| + result[yield(key)] = _deep_transform_keys_in_object(value, &block) + end + when Array + object.map { |e| _deep_transform_keys_in_object(e, &block) } + else + object + end + end + + def _deep_transform_keys_in_object!(object, &block) + case object + when Hash + object.keys.each do |key| + value = object.delete(key) + object[yield(key)] = _deep_transform_keys_in_object!(value, &block) + end + object + when Array + object.map! { |e| _deep_transform_keys_in_object!(e, &block) } + else + object + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb new file mode 100644 index 0000000..ef8d592 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/reverse_merge.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Hash + # Merges the caller into +other_hash+. For example, + # + # options = options.reverse_merge(size: 25, velocity: 10) + # + # is equivalent to + # + # options = { size: 25, velocity: 10 }.merge(options) + # + # This is particularly useful for initializing an options hash + # with default values. + def reverse_merge(other_hash) + other_hash.merge(self) + end + alias_method :with_defaults, :reverse_merge + + # Destructive +reverse_merge+. + def reverse_merge!(other_hash) + replace(reverse_merge(other_hash)) + end + alias_method :reverse_update, :reverse_merge! + alias_method :with_defaults!, :reverse_merge! +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb new file mode 100644 index 0000000..2bd0a56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/slice.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +class Hash + # Slices a hash to include only the given keys. Returns a hash containing + # the given keys. + # + # { a: 1, b: 2, c: 3, d: 4 }.slice(:a, :b) + # # => {:a=>1, :b=>2} + # + # This is useful for limiting an options hash to valid keys before + # passing to a method: + # + # def search(criteria = {}) + # criteria.assert_valid_keys(:mass, :velocity, :time) + # end + # + # search(options.slice(:mass, :velocity, :time)) + # + # If you have an array of keys you want to limit to, you should splat them: + # + # valid_keys = [:mass, :velocity, :time] + # search(options.slice(*valid_keys)) + def slice(*keys) + keys.each_with_object(Hash.new) { |k, hash| hash[k] = self[k] if has_key?(k) } + end unless method_defined?(:slice) + + # Replaces the hash with only the given keys. + # Returns a hash containing the removed key/value pairs. + # + # { a: 1, b: 2, c: 3, d: 4 }.slice!(:a, :b) + # # => {:c=>3, :d=>4} + def slice!(*keys) + omit = slice(*self.keys - keys) + hash = slice(*keys) + hash.default = default + hash.default_proc = default_proc if default_proc + replace(hash) + omit + end + + # Removes and returns the key/value pairs matching the given keys. + # + # { a: 1, b: 2, c: 3, d: 4 }.extract!(:a, :b) # => {:a=>1, :b=>2} + # { a: 1, b: 2 }.extract!(:a, :x) # => {:a=>1} + def extract!(*keys) + keys.each_with_object(self.class.new) { |key, result| result[key] = delete(key) if has_key?(key) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb new file mode 100644 index 0000000..4b19c9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/hash/transform_values.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with the results of running +block+ once for every value. + # The keys are unchanged. + # + # { a: 1, b: 2, c: 3 }.transform_values { |x| x * 2 } # => { a: 2, b: 4, c: 6 } + # + # If you do not provide a +block+, it will return an Enumerator + # for chaining with other methods: + # + # { a: 1, b: 2 }.transform_values.with_index { |v, i| [v, i].join.to_i } # => { a: 10, b: 21 } + def transform_values + return enum_for(:transform_values) { size } unless block_given? + return {} if empty? + result = self.class.new + each do |key, value| + result[key] = yield(value) + end + result + end unless method_defined? :transform_values + + # Destructively converts all values using the +block+ operations. + # Same as +transform_values+ but modifies +self+. + def transform_values! + return enum_for(:transform_values!) { size } unless block_given? + each do |key, value| + self[key] = yield(value) + end + end unless method_defined? :transform_values! + # TODO: Remove this file when supporting only Ruby 2.4+. +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb new file mode 100644 index 0000000..d227013 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support/core_ext/integer/multiple" +require "active_support/core_ext/integer/inflections" +require "active_support/core_ext/integer/time" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb new file mode 100644 index 0000000..aef3266 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/inflections.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "active_support/inflector" + +class Integer + # Ordinalize turns a number into an ordinal string used to denote the + # position in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinalize # => "1st" + # 2.ordinalize # => "2nd" + # 1002.ordinalize # => "1002nd" + # 1003.ordinalize # => "1003rd" + # -11.ordinalize # => "-11th" + # -1001.ordinalize # => "-1001st" + def ordinalize + ActiveSupport::Inflector.ordinalize(self) + end + + # Ordinal returns the suffix used to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinal # => "st" + # 2.ordinal # => "nd" + # 1002.ordinal # => "nd" + # 1003.ordinal # => "rd" + # -11.ordinal # => "th" + # -1001.ordinal # => "st" + def ordinal + ActiveSupport::Inflector.ordinal(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb new file mode 100644 index 0000000..e760666 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/multiple.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +class Integer + # Check whether the integer is evenly divisible by the argument. + # + # 0.multiple_of?(0) # => true + # 6.multiple_of?(5) # => false + # 10.multiple_of?(2) # => true + def multiple_of?(number) + number != 0 ? self % number == 0 : zero? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb new file mode 100644 index 0000000..5efb89c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/integer/time.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/numeric/time" + +class Integer + # Returns a Duration instance matching the number of months provided. + # + # 2.months # => 2 months + def months + ActiveSupport::Duration.months(self) + end + alias :month :months + + # Returns a Duration instance matching the number of years provided. + # + # 2.years # => 2 years + def years + ActiveSupport::Duration.years(self) + end + alias :year :years +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb new file mode 100644 index 0000000..0f4356f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/agnostics" +require "active_support/core_ext/kernel/concern" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/kernel/singleton_class" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb new file mode 100644 index 0000000..403b5f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/agnostics.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class Object + # Makes backticks behave (somewhat more) similarly on all platforms. + # On win32 `nonexistent_command` raises Errno::ENOENT; on Unix, the + # spawned shell prints a message to stderr and sets $?. We emulate + # Unix on the former but not the latter. + def `(command) #:nodoc: + super + rescue Errno::ENOENT => e + STDERR.puts "#$0: #{e}" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb new file mode 100644 index 0000000..0b2baed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/concern.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/concerning" + +module Kernel + module_function + + # A shortcut to define a toplevel concern, not within a module. + # + # See Module::Concerning for more. + def concern(topic, &module_definition) + Object.concern topic, &module_definition + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb new file mode 100644 index 0000000..9155bd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/reporting.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Kernel + module_function + + # Sets $VERBOSE to +nil+ for the duration of the block and back to its original + # value afterwards. + # + # silence_warnings do + # value = noisy_call # no warning voiced + # end + # + # noisy_call # warning voiced + def silence_warnings + with_warnings(nil) { yield } + end + + # Sets $VERBOSE to +true+ for the duration of the block and back to its + # original value afterwards. + def enable_warnings + with_warnings(true) { yield } + end + + # Sets $VERBOSE for the duration of the block and back to its original + # value afterwards. + def with_warnings(flag) + old_verbose, $VERBOSE = $VERBOSE, flag + yield + ensure + $VERBOSE = old_verbose + end + + # Blocks and ignores any exception passed as argument if raised within the block. + # + # suppress(ZeroDivisionError) do + # 1/0 + # puts 'This code is NOT reached' + # end + # + # puts 'This code gets executed and nothing related to ZeroDivisionError was seen' + def suppress(*exception_classes) + yield + rescue *exception_classes + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb new file mode 100644 index 0000000..6715eba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/kernel/singleton_class.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module Kernel + # class_eval on an object acts like singleton_class.class_eval. + def class_eval(*args, &block) + singleton_class.class_eval(*args, &block) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb new file mode 100644 index 0000000..750f858 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/load_error.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +class LoadError + # Returns true if the given path name (except perhaps for the ".rb" + # extension) is the missing file which caused the exception to be raised. + def is_missing?(location) + location.sub(/\.rb$/, "".freeze) == path.sub(/\.rb$/, "".freeze) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb new file mode 100644 index 0000000..0c72cd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/marshal.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module ActiveSupport + module MarshalWithAutoloading # :nodoc: + def load(source, proc = nil) + super(source, proc) + rescue ArgumentError, NameError => exc + if exc.message.match(%r|undefined class/module (.+?)(?:::)?\z|) + # try loading the class/module + loaded = $1.constantize + + raise unless $1 == loaded.name + + # if it is an IO we need to go back to read the object + source.rewind if source.respond_to?(:rewind) + retry + else + raise exc + end + end + end +end + +Marshal.singleton_class.prepend(ActiveSupport::MarshalWithAutoloading) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb new file mode 100644 index 0000000..d91e3fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/module/reachable" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/attribute_accessors_per_thread" +require "active_support/core_ext/module/attr_internal" +require "active_support/core_ext/module/concerning" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/module/deprecation" +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/module/remove_method" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb new file mode 100644 index 0000000..6f64d11 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/aliasing.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Module + # Allows you to make aliases for attributes, which includes + # getter, setter, and a predicate. + # + # class Content < ActiveRecord::Base + # # has a title attribute + # end + # + # class Email < Content + # alias_attribute :subject, :title + # end + # + # e = Email.find(1) + # e.title # => "Superstars" + # e.subject # => "Superstars" + # e.subject? # => true + # e.subject = "Megastars" + # e.title # => "Megastars" + def alias_attribute(new_name, old_name) + # The following reader methods use an explicit `self` receiver in order to + # support aliases that start with an uppercase letter. Otherwise, they would + # be resolved as constants instead. + module_eval <<-STR, __FILE__, __LINE__ + 1 + def #{new_name}; self.#{old_name}; end # def subject; self.title; end + def #{new_name}?; self.#{old_name}?; end # def subject?; self.title?; end + def #{new_name}=(v); self.#{old_name} = v; end # def subject=(v); self.title = v; end + STR + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb new file mode 100644 index 0000000..d1c86b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/anonymous.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Module + # A module may or may not have a name. + # + # module M; end + # M.name # => "M" + # + # m = Module.new + # m.name # => nil + # + # +anonymous?+ method returns true if module does not have a name, false otherwise: + # + # Module.new.anonymous? # => true + # + # module M; end + # M.anonymous? # => false + # + # A module gets a name when it is first assigned to a constant. Either + # via the +module+ or +class+ keyword or by an explicit assignment: + # + # m = Module.new # creates an anonymous module + # m.anonymous? # => true + # M = m # m gets a name here as a side-effect + # m.name # => "M" + # m.anonymous? # => false + def anonymous? + name.nil? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb new file mode 100644 index 0000000..7801f6d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attr_internal.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +class Module + # Declares an attribute reader backed by an internally-named instance variable. + def attr_internal_reader(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :reader) } + end + + # Declares an attribute writer backed by an internally-named instance variable. + def attr_internal_writer(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :writer) } + end + + # Declares an attribute reader and writer backed by an internally-named instance + # variable. + def attr_internal_accessor(*attrs) + attr_internal_reader(*attrs) + attr_internal_writer(*attrs) + end + alias_method :attr_internal, :attr_internal_accessor + + class << self; attr_accessor :attr_internal_naming_format end + self.attr_internal_naming_format = "@_%s" + + private + def attr_internal_ivar_name(attr) + Module.attr_internal_naming_format % attr + end + + def attr_internal_define(attr_name, type) + internal_name = attr_internal_ivar_name(attr_name).sub(/\A@/, "") + # use native attr_* methods as they are faster on some Ruby implementations + send("attr_#{type}", internal_name) + attr_name, internal_name = "#{attr_name}=", "#{internal_name}=" if type == :writer + alias_method attr_name, internal_name + remove_method internal_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb new file mode 100644 index 0000000..580baff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors.rb @@ -0,0 +1,215 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes. +class Module + # Defines a class attribute and creates a class and instance reader methods. + # The underlying class variable is set to +nil+, if it is not previously + # defined. All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_reader :hair_colors + # end + # + # HairColors.hair_colors # => nil + # HairColors.class_variable_set("@@hair_colors", [:brown, :black]) + # HairColors.hair_colors # => [:brown, :black] + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # If you want to opt out the creation on the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # module HairColors + # mattr_reader :hair_colors, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_reader :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + def mattr_reader(*syms, instance_reader: true, instance_accessor: true, default: nil) + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + @@#{sym} = nil unless defined? @@#{sym} + + def self.#{sym} + @@#{sym} + end + EOS + + if instance_reader && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym} + @@#{sym} + end + EOS + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + class_variable_set("@@#{sym}", sym_default_value) unless sym_default_value.nil? + end + end + alias :cattr_reader :mattr_reader + + # Defines a class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. All class and instance methods created + # will be public, even if this method is called with a private or protected + # access modifier. + # + # module HairColors + # mattr_writer :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black] + # Person.class_variable_get("@@hair_colors") # => [:brown, :black] + # Person.new.hair_colors = [:blonde, :red] + # HairColors.class_variable_get("@@hair_colors") # => [:blonde, :red] + # + # If you want to opt out the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # module HairColors + # mattr_writer :hair_colors, instance_writer: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:blonde, :red] # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_writer :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_writer(*syms, instance_writer: true, instance_accessor: true, default: nil) + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + @@#{sym} = nil unless defined? @@#{sym} + + def self.#{sym}=(obj) + @@#{sym} = obj + end + EOS + + if instance_writer && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym}=(obj) + @@#{sym} = obj + end + EOS + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + send("#{sym}=", sym_default_value) unless sym_default_value.nil? + end + end + alias :cattr_writer :mattr_writer + + # Defines both class and instance accessors for class attributes. + # All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_accessor :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black, :blonde, :red] + # HairColors.hair_colors # => [:brown, :black, :blonde, :red] + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + # + # If a subclass changes the value then that would also change the value for + # parent class. Similarly if parent class changes the value then that would + # change the value of subclasses too. + # + # class Male < Person + # end + # + # Male.new.hair_colors << :blue + # Person.new.hair_colors # => [:brown, :black, :blonde, :red, :blue] + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # module HairColors + # mattr_accessor :hair_colors, instance_writer: false, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # module HairColors + # mattr_accessor :hair_colors, instance_accessor: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_accessor :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_accessor(*syms, instance_reader: true, instance_writer: true, instance_accessor: true, default: nil, &blk) + mattr_reader(*syms, instance_reader: instance_reader, instance_accessor: instance_accessor, default: default, &blk) + mattr_writer(*syms, instance_writer: instance_writer, instance_accessor: instance_accessor, default: default) + end + alias :cattr_accessor :mattr_accessor +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb new file mode 100644 index 0000000..4b9b6ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb @@ -0,0 +1,150 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/regexp" + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes, but does so on a per-thread basis. +# +# So the values are scoped within the Thread.current space under the class name +# of the module. +class Module + # Defines a per-thread class attribute and creates class and instance reader methods. + # The underlying per-thread class variable is set to +nil+, if it is not previously defined. + # + # module Current + # thread_mattr_reader :user + # end + # + # Current.user # => nil + # Thread.current[:attr_Current_user] = "DHH" + # Current.user # => "DHH" + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # thread_mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # If you want to opt out of the creation of the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # class Current + # thread_mattr_reader :user, instance_reader: false + # end + # + # Current.new.user # => NoMethodError + def thread_mattr_reader(*syms) # :nodoc: + options = syms.extract_options! + + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym} + Thread.current["attr_" + name + "_#{sym}"] + end + EOS + + unless options[:instance_reader] == false || options[:instance_accessor] == false + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym} + self.class.#{sym} + end + EOS + end + end + end + alias :thread_cattr_reader :thread_mattr_reader + + # Defines a per-thread class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. + # + # module Current + # thread_mattr_writer :user + # end + # + # Current.user = "DHH" + # Thread.current[:attr_Current_user] # => "DHH" + # + # If you want to opt out of the creation of the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # class Current + # thread_mattr_writer :user, instance_writer: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + def thread_mattr_writer(*syms) # :nodoc: + options = syms.extract_options! + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym}=(obj) + Thread.current["attr_" + name + "_#{sym}"] = obj + end + EOS + + unless options[:instance_writer] == false || options[:instance_accessor] == false + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym}=(obj) + self.class.#{sym} = obj + end + EOS + end + end + end + alias :thread_cattr_writer :thread_mattr_writer + + # Defines both class and instance accessors for class attributes. + # + # class Account + # thread_mattr_accessor :user + # end + # + # Account.user = "DHH" + # Account.user # => "DHH" + # Account.new.user # => "DHH" + # + # If a subclass changes the value, the parent class' value is not changed. + # Similarly, if the parent class changes the value, the value of subclasses + # is not changed. + # + # class Customer < Account + # end + # + # Customer.user = "Rafael" + # Customer.user # => "Rafael" + # Account.user # => "DHH" + # + # To opt out of the instance writer method, pass instance_writer: false. + # To opt out of the instance reader method, pass instance_reader: false. + # + # class Current + # thread_mattr_accessor :user, instance_writer: false, instance_reader: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + # + # Or pass instance_accessor: false, to opt out both instance methods. + # + # class Current + # mattr_accessor :user, instance_accessor: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + def thread_mattr_accessor(*syms) + thread_mattr_reader(*syms) + thread_mattr_writer(*syms) + end + alias :thread_cattr_accessor :thread_mattr_accessor +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb new file mode 100644 index 0000000..7bbbf32 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/concerning.rb @@ -0,0 +1,134 @@ +# frozen_string_literal: true + +require "active_support/concern" + +class Module + # = Bite-sized separation of concerns + # + # We often find ourselves with a medium-sized chunk of behavior that we'd + # like to extract, but only mix in to a single class. + # + # Extracting a plain old Ruby object to encapsulate it and collaborate or + # delegate to the original object is often a good choice, but when there's + # no additional state to encapsulate or we're making DSL-style declarations + # about the parent class, introducing new collaborators can obfuscate rather + # than simplify. + # + # The typical route is to just dump everything in a monolithic class, perhaps + # with a comment, as a least-bad alternative. Using modules in separate files + # means tedious sifting to get a big-picture view. + # + # = Dissatisfying ways to separate small concerns + # + # == Using comments: + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # ## Event tracking + # has_many :events + # + # before_create :track_creation + # + # private + # def track_creation + # # ... + # end + # end + # + # == With an inline module: + # + # Noisy syntax. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # module EventTracking + # extend ActiveSupport::Concern + # + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # include EventTracking + # end + # + # == Mix-in noise exiled to its own file: + # + # Once our chunk of behavior starts pushing the scroll-to-understand-it + # boundary, we give in and move it to a separate file. At this size, the + # increased overhead can be a reasonable tradeoff even if it reduces our + # at-a-glance perception of how things work. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # include TodoEventTracking + # end + # + # = Introducing Module#concerning + # + # By quieting the mix-in noise, we arrive at a natural, low-ceremony way to + # separate bite-sized concerns. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # concerning :EventTracking do + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # end + # + # Todo.ancestors + # # => [Todo, Todo::EventTracking, ApplicationRecord, Object] + # + # This small step has some wonderful ripple effects. We can + # * grok the behavior of our class in one glance, + # * clean up monolithic junk-drawer classes by separating their concerns, and + # * stop leaning on protected/private for crude "this is internal stuff" modularity. + module Concerning + # Define a new concern and mix it in. + def concerning(topic, &block) + include concern(topic, &block) + end + + # A low-cruft shortcut to define a concern. + # + # concern :EventTracking do + # ... + # end + # + # is equivalent to + # + # module EventTracking + # extend ActiveSupport::Concern + # + # ... + # end + def concern(topic, &module_definition) + const_set topic, Module.new { + extend ::ActiveSupport::Concern + module_eval(&module_definition) + } + end + end + include Concerning +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb new file mode 100644 index 0000000..4310df3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/delegation.rb @@ -0,0 +1,287 @@ +# frozen_string_literal: true + +require "set" +require "active_support/core_ext/regexp" + +class Module + # Error generated by +delegate+ when a method is called on +nil+ and +allow_nil+ + # option is not used. + class DelegationError < NoMethodError; end + + RUBY_RESERVED_KEYWORDS = %w(alias and BEGIN begin break case class def defined? do + else elsif END end ensure false for if in module next nil not or redo rescue retry + return self super then true undef unless until when while yield) + DELEGATION_RESERVED_KEYWORDS = %w(_ arg args block) + DELEGATION_RESERVED_METHOD_NAMES = Set.new( + RUBY_RESERVED_KEYWORDS + DELEGATION_RESERVED_KEYWORDS + ).freeze + + # Provides a +delegate+ class method to easily expose contained objects' + # public methods as your own. + # + # ==== Options + # * :to - Specifies the target object + # * :prefix - Prefixes the new method with the target name or a custom prefix + # * :allow_nil - if set to true, prevents a +Module::DelegationError+ + # from being raised + # + # The macro receives one or more method names (specified as symbols or + # strings) and the name of the target object via the :to option + # (also a symbol or string). + # + # Delegation is particularly useful with Active Record associations: + # + # class Greeter < ActiveRecord::Base + # def hello + # 'hello' + # end + # + # def goodbye + # 'goodbye' + # end + # end + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, to: :greeter + # end + # + # Foo.new.hello # => "hello" + # Foo.new.goodbye # => NoMethodError: undefined method `goodbye' for # + # + # Multiple delegates to the same target are allowed: + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, :goodbye, to: :greeter + # end + # + # Foo.new.goodbye # => "goodbye" + # + # Methods can be delegated to instance variables, class variables, or constants + # by providing them as a symbols: + # + # class Foo + # CONSTANT_ARRAY = [0,1,2,3] + # @@class_array = [4,5,6,7] + # + # def initialize + # @instance_array = [8,9,10,11] + # end + # delegate :sum, to: :CONSTANT_ARRAY + # delegate :min, to: :@@class_array + # delegate :max, to: :@instance_array + # end + # + # Foo.new.sum # => 6 + # Foo.new.min # => 4 + # Foo.new.max # => 11 + # + # It's also possible to delegate a method to the class by using +:class+: + # + # class Foo + # def self.hello + # "world" + # end + # + # delegate :hello, to: :class + # end + # + # Foo.new.hello # => "world" + # + # Delegates can optionally be prefixed using the :prefix option. If the value + # is true, the delegate methods are prefixed with the name of the object being + # delegated to. + # + # Person = Struct.new(:name, :address) + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: true + # end + # + # john_doe = Person.new('John Doe', 'Vimmersvej 13') + # invoice = Invoice.new(john_doe) + # invoice.client_name # => "John Doe" + # invoice.client_address # => "Vimmersvej 13" + # + # It is also possible to supply a custom prefix. + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: :customer + # end + # + # invoice = Invoice.new(john_doe) + # invoice.customer_name # => 'John Doe' + # invoice.customer_address # => 'Vimmersvej 13' + # + # If the target is +nil+ and does not respond to the delegated method a + # +Module::DelegationError+ is raised. If you wish to instead return +nil+, + # use the :allow_nil option. + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile + # end + # + # User.new.age + # # => Module::DelegationError: User#age delegated to profile.age, but profile is nil + # + # But if not having a profile yet is fine and should not be an error + # condition: + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile, allow_nil: true + # end + # + # User.new.age # nil + # + # Note that if the target is not +nil+ then the call is attempted regardless of the + # :allow_nil option, and thus an exception is still raised if said object + # does not respond to the method: + # + # class Foo + # def initialize(bar) + # @bar = bar + # end + # + # delegate :name, to: :@bar, allow_nil: true + # end + # + # Foo.new("Bar").name # raises NoMethodError: undefined method `name' + # + # The target method must be public, otherwise it will raise +NoMethodError+. + def delegate(*methods, to: nil, prefix: nil, allow_nil: nil) + unless to + raise ArgumentError, "Delegation needs a target. Supply an options hash with a :to key as the last argument (e.g. delegate :hello, to: :greeter)." + end + + if prefix == true && /^[^a-z_]/.match?(to) + raise ArgumentError, "Can only automatically set the delegation prefix when delegating to a method." + end + + method_prefix = \ + if prefix + "#{prefix == true ? to : prefix}_" + else + "" + end + + location = caller_locations(1, 1).first + file, line = location.path, location.lineno + + to = to.to_s + to = "self.#{to}" if DELEGATION_RESERVED_METHOD_NAMES.include?(to) + + methods.map do |method| + # Attribute writer methods only accept one argument. Makes sure []= + # methods still accept two arguments. + definition = /[^\]]=$/.match?(method) ? "arg" : "*args, &block" + + # The following generated method calls the target exactly once, storing + # the returned value in a dummy variable. + # + # Reason is twofold: On one hand doing less calls is in general better. + # On the other hand it could be that the target has side-effects, + # whereas conceptually, from the user point of view, the delegator should + # be doing one call. + if allow_nil + method_def = [ + "def #{method_prefix}#{method}(#{definition})", + "_ = #{to}", + "if !_.nil? || nil.respond_to?(:#{method})", + " _.#{method}(#{definition})", + "end", + "end" + ].join ";" + else + exception = %(raise DelegationError, "#{self}##{method_prefix}#{method} delegated to #{to}.#{method}, but #{to} is nil: \#{self.inspect}") + + method_def = [ + "def #{method_prefix}#{method}(#{definition})", + " _ = #{to}", + " _.#{method}(#{definition})", + "rescue NoMethodError => e", + " if _.nil? && e.name == :#{method}", + " #{exception}", + " else", + " raise", + " end", + "end" + ].join ";" + end + + module_eval(method_def, file, line) + end + end + + # When building decorators, a common pattern may emerge: + # + # class Partition + # def initialize(event) + # @event = event + # end + # + # def person + # @event.detail.person || @event.creator + # end + # + # private + # def respond_to_missing?(name, include_private = false) + # @event.respond_to?(name, include_private) + # end + # + # def method_missing(method, *args, &block) + # @event.send(method, *args, &block) + # end + # end + # + # With Module#delegate_missing_to, the above is condensed to: + # + # class Partition + # delegate_missing_to :@event + # + # def initialize(event) + # @event = event + # end + # + # def person + # @event.detail.person || @event.creator + # end + # end + # + # The target can be anything callable within the object, e.g. instance + # variables, methods, constants, etc. + # + # The delegated method must be public on the target, otherwise it will + # raise +NoMethodError+. + def delegate_missing_to(target) + target = target.to_s + target = "self.#{target}" if DELEGATION_RESERVED_METHOD_NAMES.include?(target) + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def respond_to_missing?(name, include_private = false) + # It may look like an oversight, but we deliberately do not pass + # +include_private+, because they do not get delegated. + + #{target}.respond_to?(name) || super + end + + def method_missing(method, *args, &block) + if #{target}.respond_to?(method) + #{target}.public_send(method, *args, &block) + else + begin + super + rescue NoMethodError + if #{target}.nil? + raise DelegationError, "\#{method} delegated to #{target}, but #{target} is nil" + else + raise + end + end + end + end + RUBY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb new file mode 100644 index 0000000..71c42eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/deprecation.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Module + # deprecate :foo + # deprecate bar: 'message' + # deprecate :foo, :bar, baz: 'warning!', qux: 'gone!' + # + # You can also use custom deprecator instance: + # + # deprecate :foo, deprecator: MyLib::Deprecator.new + # deprecate :foo, bar: "warning!", deprecator: MyLib::Deprecator.new + # + # \Custom deprecators must respond to deprecation_warning(deprecated_method_name, message, caller_backtrace) + # method where you can implement your custom warning behavior. + # + # class MyLib::Deprecator + # def deprecation_warning(deprecated_method_name, message, caller_backtrace = nil) + # message = "#{deprecated_method_name} is deprecated and will be removed from MyLibrary | #{message}" + # Kernel.warn message + # end + # end + def deprecate(*method_names) + ActiveSupport::Deprecation.deprecate_methods(self, *method_names) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb new file mode 100644 index 0000000..c5bb598 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/introspection.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require "active_support/inflector" + +class Module + # Returns the name of the module containing this one. + # + # M::N.parent_name # => "M" + def parent_name + if defined?(@parent_name) + @parent_name + else + parent_name = name =~ /::[^:]+\Z/ ? $`.freeze : nil + @parent_name = parent_name unless frozen? + parent_name + end + end + + # Returns the module which contains this one according to its name. + # + # module M + # module N + # end + # end + # X = M::N + # + # M::N.parent # => M + # X.parent # => M + # + # The parent of top-level and anonymous modules is Object. + # + # M.parent # => Object + # Module.new.parent # => Object + def parent + parent_name ? ActiveSupport::Inflector.constantize(parent_name) : Object + end + + # Returns all the parents of this module according to its name, ordered from + # nested outwards. The receiver is not contained within the result. + # + # module M + # module N + # end + # end + # X = M::N + # + # M.parents # => [Object] + # M::N.parents # => [M, Object] + # X.parents # => [M, Object] + def parents + parents = [] + if parent_name + parts = parent_name.split("::") + until parts.empty? + parents << ActiveSupport::Inflector.constantize(parts * "::") + parts.pop + end + end + parents << Object unless parents.include? Object + parents + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb new file mode 100644 index 0000000..e9cbda5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/reachable.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/string/inflections" + +class Module + def reachable? #:nodoc: + !anonymous? && name.safe_constantize.equal?(self) + end + deprecate :reachable? +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb new file mode 100644 index 0000000..a0a6622 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/redefine_method.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +class Module + if RUBY_VERSION >= "2.3" + # Marks the named method as intended to be redefined, if it exists. + # Suppresses the Ruby method redefinition warning. Prefer + # #redefine_method where possible. + def silence_redefinition_of_method(method) + if method_defined?(method) || private_method_defined?(method) + # This suppresses the "method redefined" warning; the self-alias + # looks odd, but means we don't need to generate a unique name + alias_method method, method + end + end + else + def silence_redefinition_of_method(method) + if method_defined?(method) || private_method_defined?(method) + alias_method :__rails_redefine, method + remove_method :__rails_redefine + end + end + end + + # Replaces the existing method definition, if there is one, with the passed + # block as its body. + def redefine_method(method, &block) + visibility = method_visibility(method) + silence_redefinition_of_method(method) + define_method(method, &block) + send(visibility, method) + end + + # Replaces the existing singleton method definition, if there is one, with + # the passed block as its body. + def redefine_singleton_method(method, &block) + singleton_class.redefine_method(method, &block) + end + + def method_visibility(method) # :nodoc: + case + when private_method_defined?(method) + :private + when protected_method_defined?(method) + :protected + else + :public + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb new file mode 100644 index 0000000..97eb5f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/module/remove_method.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" + +class Module + # Removes the named method, if it exists. + def remove_possible_method(method) + if method_defined?(method) || private_method_defined?(method) + undef_method(method) + end + end + + # Removes the named singleton method, if it exists. + def remove_possible_singleton_method(method) + singleton_class.remove_possible_method(method) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb new file mode 100644 index 0000000..6d37cd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/name_error.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +class NameError + # Extract the name of the missing constant from the exception message. + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name + # end + # # => "HelloWorld" + def missing_name + # Since ruby v2.3.0 `did_you_mean` gem is loaded by default. + # It extends NameError#message with spell corrections which are SLOW. + # We should use original_message message instead. + message = respond_to?(:original_message) ? original_message : self.message + + if /undefined local variable or method/ !~ message + $1 if /((::)?([A-Z]\w*)(::[A-Z]\w*)*)$/ =~ message + end + end + + # Was this exception raised because the given name was missing? + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name?("HelloWorld") + # end + # # => true + def missing_name?(name) + if name.is_a? Symbol + self.name == name + else + missing_name == name.to_s + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb new file mode 100644 index 0000000..0b04e35 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/numeric/inquiry" +require "active_support/core_ext/numeric/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb new file mode 100644 index 0000000..b002eba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/bytes.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +class Numeric + KILOBYTE = 1024 + MEGABYTE = KILOBYTE * 1024 + GIGABYTE = MEGABYTE * 1024 + TERABYTE = GIGABYTE * 1024 + PETABYTE = TERABYTE * 1024 + EXABYTE = PETABYTE * 1024 + + # Enables the use of byte calculations and declarations, like 45.bytes + 2.6.megabytes + # + # 2.bytes # => 2 + def bytes + self + end + alias :byte :bytes + + # Returns the number of bytes equivalent to the kilobytes provided. + # + # 2.kilobytes # => 2048 + def kilobytes + self * KILOBYTE + end + alias :kilobyte :kilobytes + + # Returns the number of bytes equivalent to the megabytes provided. + # + # 2.megabytes # => 2_097_152 + def megabytes + self * MEGABYTE + end + alias :megabyte :megabytes + + # Returns the number of bytes equivalent to the gigabytes provided. + # + # 2.gigabytes # => 2_147_483_648 + def gigabytes + self * GIGABYTE + end + alias :gigabyte :gigabytes + + # Returns the number of bytes equivalent to the terabytes provided. + # + # 2.terabytes # => 2_199_023_255_552 + def terabytes + self * TERABYTE + end + alias :terabyte :terabytes + + # Returns the number of bytes equivalent to the petabytes provided. + # + # 2.petabytes # => 2_251_799_813_685_248 + def petabytes + self * PETABYTE + end + alias :petabyte :petabytes + + # Returns the number of bytes equivalent to the exabytes provided. + # + # 2.exabytes # => 2_305_843_009_213_693_952 + def exabytes + self * EXABYTE + end + alias :exabyte :exabytes +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb new file mode 100644 index 0000000..f6c2713 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/conversions.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/number_helper" +require "active_support/core_ext/module/deprecation" + +module ActiveSupport::NumericWithFormat + # Provides options for converting numbers into formatted strings. + # Options are provided for phone numbers, currency, percentage, + # precision, positional notation, file size and pretty printing. + # + # ==== Options + # + # For details on which formats use which options, see ActiveSupport::NumberHelper + # + # ==== Examples + # + # Phone Numbers: + # 5551234.to_s(:phone) # => "555-1234" + # 1235551234.to_s(:phone) # => "123-555-1234" + # 1235551234.to_s(:phone, area_code: true) # => "(123) 555-1234" + # 1235551234.to_s(:phone, delimiter: ' ') # => "123 555 1234" + # 1235551234.to_s(:phone, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # 1235551234.to_s(:phone, country_code: 1) # => "+1-123-555-1234" + # 1235551234.to_s(:phone, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # Currency: + # 1234567890.50.to_s(:currency) # => "$1,234,567,890.50" + # 1234567890.506.to_s(:currency) # => "$1,234,567,890.51" + # 1234567890.506.to_s(:currency, precision: 3) # => "$1,234,567,890.506" + # 1234567890.506.to_s(:currency, locale: :fr) # => "1 234 567 890,51 €" + # -1234567890.50.to_s(:currency, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + # + # Percentage: + # 100.to_s(:percentage) # => "100.000%" + # 100.to_s(:percentage, precision: 0) # => "100%" + # 1000.to_s(:percentage, delimiter: '.', separator: ',') # => "1.000,000%" + # 302.24398923423.to_s(:percentage, precision: 5) # => "302.24399%" + # 1000.to_s(:percentage, locale: :fr) # => "1 000,000%" + # 100.to_s(:percentage, format: '%n %') # => "100.000 %" + # + # Delimited: + # 12345678.to_s(:delimited) # => "12,345,678" + # 12345678.05.to_s(:delimited) # => "12,345,678.05" + # 12345678.to_s(:delimited, delimiter: '.') # => "12.345.678" + # 12345678.to_s(:delimited, delimiter: ',') # => "12,345,678" + # 12345678.05.to_s(:delimited, separator: ' ') # => "12,345,678 05" + # 12345678.05.to_s(:delimited, locale: :fr) # => "12 345 678,05" + # 98765432.98.to_s(:delimited, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # + # Rounded: + # 111.2345.to_s(:rounded) # => "111.235" + # 111.2345.to_s(:rounded, precision: 2) # => "111.23" + # 13.to_s(:rounded, precision: 5) # => "13.00000" + # 389.32314.to_s(:rounded, precision: 0) # => "389" + # 111.2345.to_s(:rounded, significant: true) # => "111" + # 111.2345.to_s(:rounded, precision: 1, significant: true) # => "100" + # 13.to_s(:rounded, precision: 5, significant: true) # => "13.000" + # 111.234.to_s(:rounded, locale: :fr) # => "111,234" + # 13.to_s(:rounded, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # 389.32314.to_s(:rounded, precision: 4, significant: true) # => "389.3" + # 1111.2345.to_s(:rounded, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + # + # Human-friendly size in Bytes: + # 123.to_s(:human_size) # => "123 Bytes" + # 1234.to_s(:human_size) # => "1.21 KB" + # 12345.to_s(:human_size) # => "12.1 KB" + # 1234567.to_s(:human_size) # => "1.18 MB" + # 1234567890.to_s(:human_size) # => "1.15 GB" + # 1234567890123.to_s(:human_size) # => "1.12 TB" + # 1234567890123456.to_s(:human_size) # => "1.1 PB" + # 1234567890123456789.to_s(:human_size) # => "1.07 EB" + # 1234567.to_s(:human_size, precision: 2) # => "1.2 MB" + # 483989.to_s(:human_size, precision: 2) # => "470 KB" + # 1234567.to_s(:human_size, precision: 2, separator: ',') # => "1,2 MB" + # 1234567890123.to_s(:human_size, precision: 5) # => "1.1228 TB" + # 524288000.to_s(:human_size, precision: 5) # => "500 MB" + # + # Human-friendly format: + # 123.to_s(:human) # => "123" + # 1234.to_s(:human) # => "1.23 Thousand" + # 12345.to_s(:human) # => "12.3 Thousand" + # 1234567.to_s(:human) # => "1.23 Million" + # 1234567890.to_s(:human) # => "1.23 Billion" + # 1234567890123.to_s(:human) # => "1.23 Trillion" + # 1234567890123456.to_s(:human) # => "1.23 Quadrillion" + # 1234567890123456789.to_s(:human) # => "1230 Quadrillion" + # 489939.to_s(:human, precision: 2) # => "490 Thousand" + # 489939.to_s(:human, precision: 4) # => "489.9 Thousand" + # 1234567.to_s(:human, precision: 4, + # significant: false) # => "1.2346 Million" + # 1234567.to_s(:human, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + def to_s(format = nil, options = nil) + case format + when nil + super() + when Integer, String + super(format) + when :phone + ActiveSupport::NumberHelper.number_to_phone(self, options || {}) + when :currency + ActiveSupport::NumberHelper.number_to_currency(self, options || {}) + when :percentage + ActiveSupport::NumberHelper.number_to_percentage(self, options || {}) + when :delimited + ActiveSupport::NumberHelper.number_to_delimited(self, options || {}) + when :rounded + ActiveSupport::NumberHelper.number_to_rounded(self, options || {}) + when :human + ActiveSupport::NumberHelper.number_to_human(self, options || {}) + when :human_size + ActiveSupport::NumberHelper.number_to_human_size(self, options || {}) + when Symbol + super() + else + super(format) + end + end +end + +# Ruby 2.4+ unifies Fixnum & Bignum into Integer. +if 0.class == Integer + Integer.prepend ActiveSupport::NumericWithFormat +else + Fixnum.prepend ActiveSupport::NumericWithFormat + Bignum.prepend ActiveSupport::NumericWithFormat +end +Float.prepend ActiveSupport::NumericWithFormat +BigDecimal.prepend ActiveSupport::NumericWithFormat diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb new file mode 100644 index 0000000..15334c9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/inquiry.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +unless 1.respond_to?(:positive?) # TODO: Remove this file when we drop support to ruby < 2.3 + class Numeric + # Returns true if the number is positive. + # + # 1.positive? # => true + # 0.positive? # => false + # -1.positive? # => false + def positive? + self > 0 + end + + # Returns true if the number is negative. + # + # -1.negative? # => true + # 0.negative? # => false + # 1.negative? # => false + def negative? + self < 0 + end + end + + class Complex + undef :positive? + undef :negative? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb new file mode 100644 index 0000000..bc4627f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/numeric/time.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/acts_like" + +class Numeric + # Returns a Duration instance matching the number of seconds provided. + # + # 2.seconds # => 2 seconds + def seconds + ActiveSupport::Duration.seconds(self) + end + alias :second :seconds + + # Returns a Duration instance matching the number of minutes provided. + # + # 2.minutes # => 2 minutes + def minutes + ActiveSupport::Duration.minutes(self) + end + alias :minute :minutes + + # Returns a Duration instance matching the number of hours provided. + # + # 2.hours # => 2 hours + def hours + ActiveSupport::Duration.hours(self) + end + alias :hour :hours + + # Returns a Duration instance matching the number of days provided. + # + # 2.days # => 2 days + def days + ActiveSupport::Duration.days(self) + end + alias :day :days + + # Returns a Duration instance matching the number of weeks provided. + # + # 2.weeks # => 2 weeks + def weeks + ActiveSupport::Duration.weeks(self) + end + alias :week :weeks + + # Returns a Duration instance matching the number of fortnights provided. + # + # 2.fortnights # => 4 weeks + def fortnights + ActiveSupport::Duration.weeks(self * 2) + end + alias :fortnight :fortnights + + # Returns the number of milliseconds equivalent to the seconds provided. + # Used with the standard time durations. + # + # 2.in_milliseconds # => 2000 + # 1.hour.in_milliseconds # => 3600000 + def in_milliseconds + self * 1000 + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb new file mode 100644 index 0000000..efd34cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/duplicable" +require "active_support/core_ext/object/deep_dup" +require "active_support/core_ext/object/try" +require "active_support/core_ext/object/inclusion" + +require "active_support/core_ext/object/conversions" +require "active_support/core_ext/object/instance_variables" + +require "active_support/core_ext/object/json" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/object/with_options" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb new file mode 100644 index 0000000..403ee20 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/acts_like.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class Object + # A duck-type assistant method. For example, Active Support extends Date + # to define an acts_like_date? method, and extends Time to define + # acts_like_time?. As a result, we can do x.acts_like?(:time) and + # x.acts_like?(:date) to do duck-type-safe comparisons, since classes that + # we want to act like Time simply need to define an acts_like_time? method. + def acts_like?(duck) + case duck + when :time + respond_to? :acts_like_time? + when :date + respond_to? :acts_like_date? + when :string + respond_to? :acts_like_string? + else + respond_to? :"acts_like_#{duck}?" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb new file mode 100644 index 0000000..2ca431a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/blank.rb @@ -0,0 +1,156 @@ +# frozen_string_literal: true + +require "active_support/core_ext/regexp" +require "concurrent/map" + +class Object + # An object is blank if it's false, empty, or a whitespace string. + # For example, +false+, '', ' ', +nil+, [], and {} are all blank. + # + # This simplifies + # + # !address || address.empty? + # + # to + # + # address.blank? + # + # @return [true, false] + def blank? + respond_to?(:empty?) ? !!empty? : !self + end + + # An object is present if it's not blank. + # + # @return [true, false] + def present? + !blank? + end + + # Returns the receiver if it's present otherwise returns +nil+. + # object.presence is equivalent to + # + # object.present? ? object : nil + # + # For example, something like + # + # state = params[:state] if params[:state].present? + # country = params[:country] if params[:country].present? + # region = state || country || 'US' + # + # becomes + # + # region = params[:state].presence || params[:country].presence || 'US' + # + # @return [Object] + def presence + self if present? + end +end + +class NilClass + # +nil+ is blank: + # + # nil.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class FalseClass + # +false+ is blank: + # + # false.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class TrueClass + # +true+ is not blank: + # + # true.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Array + # An array is blank if it's empty: + # + # [].blank? # => true + # [1,2,3].blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class Hash + # A hash is blank if it's empty: + # + # {}.blank? # => true + # { key: 'value' }.blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class String + BLANK_RE = /\A[[:space:]]*\z/ + ENCODED_BLANKS = Concurrent::Map.new do |h, enc| + h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) + end + + # A string is blank if it's empty or contains whitespaces only: + # + # ''.blank? # => true + # ' '.blank? # => true + # "\t\n\r".blank? # => true + # ' blah '.blank? # => false + # + # Unicode whitespace is supported: + # + # "\u00a0".blank? # => true + # + # @return [true, false] + def blank? + # The regexp that matches blank strings is expensive. For the case of empty + # strings we can speed up this method (~3.5x) with an empty? call. The + # penalty for the rest of strings is marginal. + empty? || + begin + BLANK_RE.match?(self) + rescue Encoding::CompatibilityError + ENCODED_BLANKS[self.encoding].match?(self) + end + end +end + +class Numeric #:nodoc: + # No number is blank: + # + # 1.blank? # => false + # 0.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Time #:nodoc: + # No Time is blank: + # + # Time.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb new file mode 100644 index 0000000..624fb8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/conversions.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/hash/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb new file mode 100644 index 0000000..c66c5eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/deep_dup.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" + +class Object + # Returns a deep copy of object if it's duplicable. If it's + # not duplicable, returns +self+. + # + # object = Object.new + # dup = object.deep_dup + # dup.instance_variable_set(:@a, 1) + # + # object.instance_variable_defined?(:@a) # => false + # dup.instance_variable_defined?(:@a) # => true + def deep_dup + duplicable? ? dup : self + end +end + +class Array + # Returns a deep copy of array. + # + # array = [1, [2, 3]] + # dup = array.deep_dup + # dup[1][2] = 4 + # + # array[1][2] # => nil + # dup[1][2] # => 4 + def deep_dup + map(&:deep_dup) + end +end + +class Hash + # Returns a deep copy of hash. + # + # hash = { a: { b: 'b' } } + # dup = hash.deep_dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => nil + # dup[:a][:c] # => "c" + def deep_dup + hash = dup + each_pair do |key, value| + if key.frozen? && ::String === key + hash[key] = value.deep_dup + else + hash.delete(key) + hash[key.deep_dup] = value.deep_dup + end + end + hash + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb new file mode 100644 index 0000000..9bb9908 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/duplicable.rb @@ -0,0 +1,156 @@ +# frozen_string_literal: true + +#-- +# Most objects are cloneable, but not all. For example you can't dup methods: +# +# method(:puts).dup # => TypeError: allocator undefined for Method +# +# Classes may signal their instances are not duplicable removing +dup+/+clone+ +# or raising exceptions from them. So, to dup an arbitrary object you normally +# use an optimistic approach and are ready to catch an exception, say: +# +# arbitrary_object.dup rescue object +# +# Rails dups objects in a few critical spots where they are not that arbitrary. +# That rescue is very expensive (like 40 times slower than a predicate), and it +# is often triggered. +# +# That's why we hardcode the following cases and check duplicable? instead of +# using that rescue idiom. +#++ +class Object + # Can you safely dup this object? + # + # False for method objects; + # true otherwise. + def duplicable? + true + end +end + +class NilClass + begin + nil.dup + rescue TypeError + + # +nil+ is not duplicable: + # + # nil.duplicable? # => false + # nil.dup # => TypeError: can't dup NilClass + def duplicable? + false + end + end +end + +class FalseClass + begin + false.dup + rescue TypeError + + # +false+ is not duplicable: + # + # false.duplicable? # => false + # false.dup # => TypeError: can't dup FalseClass + def duplicable? + false + end + end +end + +class TrueClass + begin + true.dup + rescue TypeError + + # +true+ is not duplicable: + # + # true.duplicable? # => false + # true.dup # => TypeError: can't dup TrueClass + def duplicable? + false + end + end +end + +class Symbol + begin + :symbol.dup # Ruby 2.4.x. + "symbol_from_string".to_sym.dup # Some symbols can't `dup` in Ruby 2.4.0. + rescue TypeError + + # Symbols are not duplicable: + # + # :my_symbol.duplicable? # => false + # :my_symbol.dup # => TypeError: can't dup Symbol + def duplicable? + false + end + end +end + +class Numeric + begin + 1.dup + rescue TypeError + + # Numbers are not duplicable: + # + # 3.duplicable? # => false + # 3.dup # => TypeError: can't dup Integer + def duplicable? + false + end + end +end + +require "bigdecimal" +class BigDecimal + # BigDecimals are duplicable: + # + # BigDecimal("1.2").duplicable? # => true + # BigDecimal("1.2").dup # => # + def duplicable? + true + end +end + +class Method + # Methods are not duplicable: + # + # method(:puts).duplicable? # => false + # method(:puts).dup # => TypeError: allocator undefined for Method + def duplicable? + false + end +end + +class Complex + begin + Complex(1).dup + rescue TypeError + + # Complexes are not duplicable: + # + # Complex(1).duplicable? # => false + # Complex(1).dup # => TypeError: can't copy Complex + def duplicable? + false + end + end +end + +class Rational + begin + Rational(1).dup + rescue TypeError + + # Rationals are not duplicable: + # + # Rational(1).duplicable? # => false + # Rational(1).dup # => TypeError: can't copy Rational + def duplicable? + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb new file mode 100644 index 0000000..6064e92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/inclusion.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +class Object + # Returns true if this object is included in the argument. Argument must be + # any object which responds to +#include?+. Usage: + # + # characters = ["Konata", "Kagami", "Tsukasa"] + # "Konata".in?(characters) # => true + # + # This will throw an +ArgumentError+ if the argument doesn't respond + # to +#include?+. + def in?(another_object) + another_object.include?(self) + rescue NoMethodError + raise ArgumentError.new("The parameter passed to #in? must respond to #include?") + end + + # Returns the receiver if it's included in the argument otherwise returns +nil+. + # Argument must be any object which responds to +#include?+. Usage: + # + # params[:bucket_type].presence_in %w( project calendar ) + # + # This will throw an +ArgumentError+ if the argument doesn't respond to +#include?+. + # + # @return [Object] + def presence_in(another_object) + in?(another_object) ? self : nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb new file mode 100644 index 0000000..12fdf84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/instance_variables.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Object + # Returns a hash with string keys that maps instance variable names without "@" to their + # corresponding values. + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_values # => {"x" => 0, "y" => 1} + def instance_values + Hash[instance_variables.map { |name| [name[1..-1], instance_variable_get(name)] }] + end + + # Returns an array of instance variable names as strings including "@". + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_variable_names # => ["@y", "@x"] + def instance_variable_names + instance_variables.map(&:to_s) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb new file mode 100644 index 0000000..f7c623f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/json.rb @@ -0,0 +1,227 @@ +# frozen_string_literal: true + +# Hack to load json gem first so we can overwrite its to_json. +require "json" +require "bigdecimal" +require "uri/generic" +require "pathname" +require "active_support/core_ext/big_decimal/conversions" # for #to_s +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +require "active_support/core_ext/object/instance_variables" +require "time" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/conversions" +require "active_support/core_ext/date/conversions" + +# The JSON gem adds a few modules to Ruby core classes containing :to_json definition, overwriting +# their default behavior. That said, we need to define the basic to_json method in all of them, +# otherwise they will always use to_json gem implementation, which is backwards incompatible in +# several cases (for instance, the JSON implementation for Hash does not work) with inheritance +# and consequently classes as ActiveSupport::OrderedHash cannot be serialized to json. +# +# On the other hand, we should avoid conflict with ::JSON.{generate,dump}(obj). Unfortunately, the +# JSON gem's encoder relies on its own to_json implementation to encode objects. Since it always +# passes a ::JSON::State object as the only argument to to_json, we can detect that and forward the +# calls to the original to_json method. +# +# It should be noted that when using ::JSON.{generate,dump} directly, ActiveSupport's encoder is +# bypassed completely. This means that as_json won't be invoked and the JSON gem will simply +# ignore any options it does not natively understand. This also means that ::JSON.{generate,dump} +# should give exactly the same results with or without active support. + +module ActiveSupport + module ToJsonWithActiveSupportEncoder # :nodoc: + def to_json(options = nil) + if options.is_a?(::JSON::State) + # Called from JSON.{generate,dump}, forward it to JSON gem's to_json + super(options) + else + # to_json is being invoked directly, use ActiveSupport's encoder + ActiveSupport::JSON.encode(self, options) + end + end + end +end + +[Object, Array, FalseClass, Float, Hash, Integer, NilClass, String, TrueClass, Enumerable].reverse_each do |klass| + klass.prepend(ActiveSupport::ToJsonWithActiveSupportEncoder) +end + +class Object + def as_json(options = nil) #:nodoc: + if respond_to?(:to_hash) + to_hash.as_json(options) + else + instance_values.as_json(options) + end + end +end + +class Struct #:nodoc: + def as_json(options = nil) + Hash[members.zip(values)].as_json(options) + end +end + +class TrueClass + def as_json(options = nil) #:nodoc: + self + end +end + +class FalseClass + def as_json(options = nil) #:nodoc: + self + end +end + +class NilClass + def as_json(options = nil) #:nodoc: + self + end +end + +class String + def as_json(options = nil) #:nodoc: + self + end +end + +class Symbol + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Numeric + def as_json(options = nil) #:nodoc: + self + end +end + +class Float + # Encoding Infinity or NaN to JSON should return "null". The default returns + # "Infinity" or "NaN" which are not valid JSON. + def as_json(options = nil) #:nodoc: + finite? ? self : nil + end +end + +class BigDecimal + # A BigDecimal would be naturally represented as a JSON number. Most libraries, + # however, parse non-integer JSON numbers directly as floats. Clients using + # those libraries would get in general a wrong number and no way to recover + # other than manually inspecting the string with the JSON code itself. + # + # That's why a JSON string is returned. The JSON literal is not numeric, but + # if the other end knows by contract that the data is supposed to be a + # BigDecimal, it still has the chance to post-process the string and get the + # real value. + def as_json(options = nil) #:nodoc: + finite? ? to_s : nil + end +end + +class Regexp + def as_json(options = nil) #:nodoc: + to_s + end +end + +module Enumerable + def as_json(options = nil) #:nodoc: + to_a.as_json(options) + end +end + +class IO + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Range + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Array + def as_json(options = nil) #:nodoc: + map { |v| options ? v.as_json(options.dup) : v.as_json } + end +end + +class Hash + def as_json(options = nil) #:nodoc: + # create a subset of the hash by applying :only or :except + subset = if options + if attrs = options[:only] + slice(*Array(attrs)) + elsif attrs = options[:except] + except(*Array(attrs)) + else + self + end + else + self + end + + Hash[subset.map { |k, v| [k.to_s, options ? v.as_json(options.dup) : v.as_json] }] + end +end + +class Time + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end +end + +class Date + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + strftime("%Y-%m-%d") + else + strftime("%Y/%m/%d") + end + end +end + +class DateTime + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + strftime("%Y/%m/%d %H:%M:%S %z") + end + end +end + +class URI::Generic #:nodoc: + def as_json(options = nil) + to_s + end +end + +class Pathname #:nodoc: + def as_json(options = nil) + to_s + end +end + +class Process::Status #:nodoc: + def as_json(options = nil) + { exitstatus: exitstatus, pid: pid } + end +end + +class Exception + def as_json(options = nil) + to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb new file mode 100644 index 0000000..6d2bdd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_param.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_query" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb new file mode 100644 index 0000000..bac6ff9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/to_query.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require "cgi" + +class Object + # Alias of to_s. + def to_param + to_s + end + + # Converts an object into a string suitable for use as a URL query string, + # using the given key as the param name. + def to_query(key) + "#{CGI.escape(key.to_param)}=#{CGI.escape(to_param.to_s)}" + end +end + +class NilClass + # Returns +self+. + def to_param + self + end +end + +class TrueClass + # Returns +self+. + def to_param + self + end +end + +class FalseClass + # Returns +self+. + def to_param + self + end +end + +class Array + # Calls to_param on all its elements and joins the result with + # slashes. This is used by url_for in Action Pack. + def to_param + collect(&:to_param).join "/" + end + + # Converts an array into a string suitable for use as a URL query string, + # using the given +key+ as the param name. + # + # ['Rails', 'coding'].to_query('hobbies') # => "hobbies%5B%5D=Rails&hobbies%5B%5D=coding" + def to_query(key) + prefix = "#{key}[]" + + if empty? + nil.to_query(prefix) + else + collect { |value| value.to_query(prefix) }.join "&" + end + end +end + +class Hash + # Returns a string representation of the receiver suitable for use as a URL + # query string: + # + # {name: 'David', nationality: 'Danish'}.to_query + # # => "name=David&nationality=Danish" + # + # An optional namespace can be passed to enclose key names: + # + # {name: 'David', nationality: 'Danish'}.to_query('user') + # # => "user%5Bname%5D=David&user%5Bnationality%5D=Danish" + # + # The string pairs "key=value" that conform the query string + # are sorted lexicographically in ascending order. + # + # This method is also aliased as +to_param+. + def to_query(namespace = nil) + query = collect do |key, value| + unless (value.is_a?(Hash) || value.is_a?(Array)) && value.empty? + value.to_query(namespace ? "#{namespace}[#{key}]" : key) + end + end.compact + + query.sort! unless namespace.to_s.include?("[]") + query.join("&") + end + + alias_method :to_param, :to_query +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb new file mode 100644 index 0000000..c874691 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/try.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +require "delegate" + +module ActiveSupport + module Tryable #:nodoc: + def try(*a, &b) + try!(*a, &b) if a.empty? || respond_to?(a.first) + end + + def try!(*a, &b) + if a.empty? && block_given? + if b.arity == 0 + instance_eval(&b) + else + yield self + end + else + public_send(*a, &b) + end + end + end +end + +class Object + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(*a, &b) + # + # Invokes the public method whose name goes as first argument just like + # +public_send+ does, except that if the receiver does not respond to it the + # call returns +nil+ rather than raising an exception. + # + # This method is defined to be able to write + # + # @person.try(:name) + # + # instead of + # + # @person.name if @person + # + # +try+ calls can be chained: + # + # @person.try(:spouse).try(:name) + # + # instead of + # + # @person.spouse.name if @person && @person.spouse + # + # +try+ will also return +nil+ if the receiver does not respond to the method: + # + # @person.try(:non_existing_method) # => nil + # + # instead of + # + # @person.non_existing_method if @person.respond_to?(:non_existing_method) # => nil + # + # +try+ returns +nil+ when called on +nil+ regardless of whether it responds + # to the method: + # + # nil.try(:to_i) # => nil, rather than 0 + # + # Arguments and blocks are forwarded to the method if invoked: + # + # @posts.try(:each_slice, 2) do |a, b| + # ... + # end + # + # The number of arguments in the signature must match. If the object responds + # to the method the call is attempted and +ArgumentError+ is still raised + # in case of argument mismatch. + # + # If +try+ is called without arguments it yields the receiver to a given + # block unless it is +nil+: + # + # @person.try do |p| + # ... + # end + # + # You can also call try with a block without accepting an argument, and the block + # will be instance_eval'ed instead: + # + # @person.try { upcase.truncate(50) } + # + # Please also note that +try+ is defined on +Object+. Therefore, it won't work + # with instances of classes that do not have +Object+ among their ancestors, + # like direct subclasses of +BasicObject+. + + ## + # :method: try! + # + # :call-seq: + # try!(*a, &b) + # + # Same as #try, but raises a +NoMethodError+ exception if the receiver is + # not +nil+ and does not implement the tried method. + # + # "a".try!(:upcase) # => "A" + # nil.try!(:upcase) # => nil + # 123.try!(:upcase) # => NoMethodError: undefined method `upcase' for 123:Integer +end + +class Delegator + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(a*, &b) + # + # See Object#try + + ## + # :method: try! + # + # :call-seq: + # try!(a*, &b) + # + # See Object#try! +end + +class NilClass + # Calling +try+ on +nil+ always returns +nil+. + # It becomes especially helpful when navigating through associations that may return +nil+. + # + # nil.try(:name) # => nil + # + # Without +try+ + # @person && @person.children.any? && @person.children.first.name + # + # With +try+ + # @person.try(:children).try(:first).try(:name) + def try(*args) + nil + end + + # Calling +try!+ on +nil+ always returns +nil+. + # + # nil.try!(:name) # => nil + def try!(*args) + nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb new file mode 100644 index 0000000..2838fd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/object/with_options.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require "active_support/option_merger" + +class Object + # An elegant way to factor duplication out of options passed to a series of + # method calls. Each method called in the block, with the block variable as + # the receiver, will have its options merged with the default +options+ hash + # provided. Each method called on the block variable must take an options + # hash as its final argument. + # + # Without with_options, this code contains duplication: + # + # class Account < ActiveRecord::Base + # has_many :customers, dependent: :destroy + # has_many :products, dependent: :destroy + # has_many :invoices, dependent: :destroy + # has_many :expenses, dependent: :destroy + # end + # + # Using with_options, we can remove the duplication: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do |assoc| + # assoc.has_many :customers + # assoc.has_many :products + # assoc.has_many :invoices + # assoc.has_many :expenses + # end + # end + # + # It can also be used with an explicit receiver: + # + # I18n.with_options locale: user.locale, scope: 'newsletter' do |i18n| + # subject i18n.t :subject + # body i18n.t :body, user_name: user.name + # end + # + # When you don't pass an explicit receiver, it executes the whole block + # in merging options context: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do + # has_many :customers + # has_many :products + # has_many :invoices + # has_many :expenses + # end + # end + # + # with_options can also be nested since the call is forwarded to its receiver. + # + # NOTE: Each nesting level will merge inherited defaults in addition to their own. + # + # class Post < ActiveRecord::Base + # with_options if: :persisted?, length: { minimum: 50 } do + # validates :content, if: -> { content.present? } + # end + # end + # + # The code is equivalent to: + # + # validates :content, length: { minimum: 50 }, if: -> { content.present? } + # + # Hence the inherited default for +if+ key is ignored. + # + # NOTE: You cannot call class methods implicitly inside of with_options. + # You can access these methods using the class name instead: + # + # class Phone < ActiveRecord::Base + # enum phone_number_type: [home: 0, office: 1, mobile: 2] + # + # with_options presence: true do + # validates :phone_number_type, inclusion: { in: Phone.phone_number_types.keys } + # end + # end + # + def with_options(options, &block) + option_merger = ActiveSupport::OptionMerger.new(self, options) + block.arity.zero? ? option_merger.instance_eval(&block) : block.call(option_merger) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb new file mode 100644 index 0000000..78814fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/conversions" +require "active_support/core_ext/range/compare_range" +require "active_support/core_ext/range/include_time_with_zone" +require "active_support/core_ext/range/overlaps" +require "active_support/core_ext/range/each" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb new file mode 100644 index 0000000..704041f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/compare_range.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +module ActiveSupport + module CompareWithRange #:nodoc: + # Extends the default Range#=== to support range comparisons. + # (1..5) === (1..5) # => true + # (1..5) === (2..3) # => true + # (1..5) === (2..6) # => false + # + # The native Range#=== behavior is untouched. + # ('a'..'f') === ('c') # => true + # (5..9) === (11) # => false + def ===(value) + if value.is_a?(::Range) + # 1...10 includes 1..9 but it does not include 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + + # Extends the default Range#include? to support range comparisons. + # (1..5).include?(1..5) # => true + # (1..5).include?(2..3) # => true + # (1..5).include?(2..6) # => false + # + # The native Range#include? behavior is untouched. + # ('a'..'f').include?('c') # => true + # (5..9).include?(11) # => false + def include?(value) + if value.is_a?(::Range) + # 1...10 includes 1..9 but it does not include 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + + # Extends the default Range#cover? to support range comparisons. + # (1..5).cover?(1..5) # => true + # (1..5).cover?(2..3) # => true + # (1..5).cover?(2..6) # => false + # + # The native Range#cover? behavior is untouched. + # ('a'..'f').cover?('c') # => true + # (5..9).cover?(11) # => false + def cover?(value) + if value.is_a?(::Range) + # 1...10 covers 1..9 but it does not cover 1..10. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + super(value.first) && value.last.send(operator, last) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::CompareWithRange) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb new file mode 100644 index 0000000..8832fbc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/conversions.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module ActiveSupport::RangeWithFormat + RANGE_FORMATS = { + db: -> (start, stop) do + case start + when String then "BETWEEN '#{start}' AND '#{stop}'" + else + "BETWEEN '#{start.to_s(:db)}' AND '#{stop.to_s(:db)}'" + end + end + } + + # Convert range to a formatted string. See RANGE_FORMATS for predefined formats. + # + # range = (1..100) # => 1..100 + # + # range.to_s # => "1..100" + # range.to_s(:db) # => "BETWEEN '1' AND '100'" + # + # == Adding your own range formats to to_s + # You can add your own formats to the Range::RANGE_FORMATS hash. + # Use the format name as the hash key and a Proc instance. + # + # # config/initializers/range_formats.rb + # Range::RANGE_FORMATS[:short] = ->(start, stop) { "Between #{start.to_s(:db)} and #{stop.to_s(:db)}" } + def to_s(format = :default) + if formatter = RANGE_FORMATS[format] + formatter.call(first, last) + else + super() + end + end + + alias_method :to_default_s, :to_s + alias_method :to_formatted_s, :to_s +end + +Range.prepend(ActiveSupport::RangeWithFormat) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb new file mode 100644 index 0000000..2f22cd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/each.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module EachTimeWithZone #:nodoc: + def each(&block) + ensure_iteration_allowed + super + end + + def step(n = 1, &block) + ensure_iteration_allowed + super + end + + private + + def ensure_iteration_allowed + raise TypeError, "can't iterate from #{first.class}" if first.is_a?(TimeWithZone) + end + end +end + +Range.prepend(ActiveSupport::EachTimeWithZone) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb new file mode 100644 index 0000000..4812e27 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_range.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/compare_range" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb new file mode 100644 index 0000000..5f80acf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/include_time_with_zone.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module IncludeTimeWithZone #:nodoc: + # Extends the default Range#include? to support ActiveSupport::TimeWithZone. + # + # (1.hour.ago..1.hour.from_now).include?(Time.current) # => true + # + def include?(value) + if first.is_a?(TimeWithZone) + cover?(value) + elsif last.is_a?(TimeWithZone) + cover?(value) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::IncludeTimeWithZone) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb new file mode 100644 index 0000000..f753607 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/range/overlaps.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +class Range + # Compare two ranges and see if they overlap each other + # (1..5).overlaps?(4..6) # => true + # (1..5).overlaps?(7..9) # => false + def overlaps?(other) + cover?(other.first) || other.cover?(first) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb new file mode 100644 index 0000000..efbd708 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/regexp.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +class Regexp #:nodoc: + def multiline? + options & MULTILINE == MULTILINE + end + + def match?(string, pos = 0) + !!match(string, pos) + end unless //.respond_to?(:match?) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb new file mode 100644 index 0000000..b4a491f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/securerandom.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "securerandom" + +module SecureRandom + BASE58_ALPHABET = ("0".."9").to_a + ("A".."Z").to_a + ("a".."z").to_a - ["0", "O", "I", "l"] + # SecureRandom.base58 generates a random base58 string. + # + # The argument _n_ specifies the length, of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # + # The result may contain alphanumeric characters except 0, O, I and l + # + # p SecureRandom.base58 # => "4kUgL2pdQMSCQtjE" + # p SecureRandom.base58(24) # => "77TMHrHJFvFDwodq8w7Ev2m7" + # + def self.base58(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(58) if idx >= 58 + BASE58_ALPHABET[idx] + end.join + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb new file mode 100644 index 0000000..757d15c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/filters" +require "active_support/core_ext/string/multibyte" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/string/output_safety" +require "active_support/core_ext/string/exclude" +require "active_support/core_ext/string/strip" +require "active_support/core_ext/string/inquiry" +require "active_support/core_ext/string/indent" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb new file mode 100644 index 0000000..58591bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/access.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +class String + # If you pass a single integer, returns a substring of one character at that + # position. The first character of the string is at position 0, the next at + # position 1, and so on. If a range is supplied, a substring containing + # characters at offsets given by the range is returned. In both cases, if an + # offset is negative, it is counted from the end of the string. Returns +nil+ + # if the initial offset falls outside the string. Returns an empty string if + # the beginning of the range is greater than the end of the string. + # + # str = "hello" + # str.at(0) # => "h" + # str.at(1..3) # => "ell" + # str.at(-2) # => "l" + # str.at(-2..-1) # => "lo" + # str.at(5) # => nil + # str.at(5..-1) # => "" + # + # If a Regexp is given, the matching portion of the string is returned. + # If a String is given, that given string is returned if it occurs in + # the string. In both cases, +nil+ is returned if there is no match. + # + # str = "hello" + # str.at(/lo/) # => "lo" + # str.at(/ol/) # => nil + # str.at("lo") # => "lo" + # str.at("ol") # => nil + def at(position) + self[position] + end + + # Returns a substring from the given position to the end of the string. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.from(0) # => "hello" + # str.from(3) # => "lo" + # str.from(-2) # => "lo" + # + # You can mix it with +to+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def from(position) + self[position..-1] + end + + # Returns a substring from the beginning of the string to the given position. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.to(0) # => "h" + # str.to(3) # => "hell" + # str.to(-2) # => "hell" + # + # You can mix it with +from+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def to(position) + self[0..position] + end + + # Returns the first character. If a limit is supplied, returns a substring + # from the beginning of the string until it reaches the limit value. If the + # given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.first # => "h" + # str.first(1) # => "h" + # str.first(2) # => "he" + # str.first(0) # => "" + # str.first(6) # => "hello" + def first(limit = 1) + if limit == 0 + "" + elsif limit >= size + dup + else + to(limit - 1) + end + end + + # Returns the last character of the string. If a limit is supplied, returns a substring + # from the end of the string until it reaches the limit value (counting backwards). If + # the given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.last # => "o" + # str.last(1) # => "o" + # str.last(2) # => "lo" + # str.last(0) # => "" + # str.last(6) # => "hello" + def last(limit = 1) + if limit == 0 + "" + elsif limit >= size + dup + else + from(-limit) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb new file mode 100644 index 0000000..35a5aa7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/behavior.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +class String + # Enables more predictable duck-typing on String-like classes. See Object#acts_like?. + def acts_like_string? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb new file mode 100644 index 0000000..29a88b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/conversions.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/time/calculations" + +class String + # Converts a string to a Time value. + # The +form+ can be either :utc or :local (default :local). + # + # The time is parsed using Time.parse method. + # If +form+ is :local, then the time is in the system timezone. + # If the date part is missing then the current date is used and if + # the time part is missing then it is assumed to be 00:00:00. + # + # "13-12-2012".to_time # => 2012-12-13 00:00:00 +0100 + # "06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13 06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time(:utc) # => 2012-12-13 06:12:00 UTC + # "12/13/2012".to_time # => ArgumentError: argument out of range + def to_time(form = :local) + parts = Date._parse(self, false) + used_keys = %i(year mon mday hour min sec sec_fraction offset) + return if (parts.keys & used_keys).empty? + + now = Time.now + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, form == :utc ? 0 : nil) + ) + + form == :utc ? time.utc : time.to_time + end + + # Converts a string to a Date value. + # + # "1-1-2012".to_date # => Sun, 01 Jan 2012 + # "01/01/2012".to_date # => Sun, 01 Jan 2012 + # "2012-12-13".to_date # => Thu, 13 Dec 2012 + # "12/13/2012".to_date # => ArgumentError: invalid date + def to_date + ::Date.parse(self, false) unless blank? + end + + # Converts a string to a DateTime value. + # + # "1-1-2012".to_datetime # => Sun, 01 Jan 2012 00:00:00 +0000 + # "01/01/2012 23:59:59".to_datetime # => Sun, 01 Jan 2012 23:59:59 +0000 + # "2012-12-13 12:50".to_datetime # => Thu, 13 Dec 2012 12:50:00 +0000 + # "12/13/2012".to_datetime # => ArgumentError: invalid date + def to_datetime + ::DateTime.parse(self, false) unless blank? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb new file mode 100644 index 0000000..8e46268 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/exclude.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class String + # The inverse of String#include?. Returns true if the string + # does not include the other string. + # + # "hello".exclude? "lo" # => false + # "hello".exclude? "ol" # => true + # "hello".exclude? ?h # => false + def exclude?(string) + !include?(string) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb new file mode 100644 index 0000000..66e721e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/filters.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +class String + # Returns the string, first removing all whitespace on both ends of + # the string, and then changing remaining consecutive whitespace + # groups into one space each. + # + # Note that it handles both ASCII and Unicode whitespace. + # + # %{ Multi-line + # string }.squish # => "Multi-line string" + # " foo bar \n \t boo".squish # => "foo bar boo" + def squish + dup.squish! + end + + # Performs a destructive squish. See String#squish. + # str = " foo bar \n \t boo" + # str.squish! # => "foo bar boo" + # str # => "foo bar boo" + def squish! + gsub!(/[[:space:]]+/, " ") + strip! + self + end + + # Returns a new string with all occurrences of the patterns removed. + # str = "foo bar test" + # str.remove(" test") # => "foo bar" + # str.remove(" test", /bar/) # => "foo " + # str # => "foo bar test" + def remove(*patterns) + dup.remove!(*patterns) + end + + # Alters the string by removing all occurrences of the patterns. + # str = "foo bar test" + # str.remove!(" test", /bar/) # => "foo " + # str # => "foo " + def remove!(*patterns) + patterns.each do |pattern| + gsub! pattern, "" + end + + self + end + + # Truncates a given +text+ after a given length if +text+ is longer than length: + # + # 'Once upon a time in a world far far away'.truncate(27) + # # => "Once upon a time in a wo..." + # + # Pass a string or regexp :separator to truncate +text+ at a natural break: + # + # 'Once upon a time in a world far far away'.truncate(27, separator: ' ') + # # => "Once upon a time in a..." + # + # 'Once upon a time in a world far far away'.truncate(27, separator: /\s/) + # # => "Once upon a time in a..." + # + # The last characters will be replaced with the :omission string (defaults to "...") + # for a total length not exceeding length: + # + # 'And they found that many people were sleeping better.'.truncate(25, omission: '... (continued)') + # # => "And they f... (continued)" + def truncate(truncate_at, options = {}) + return dup unless length > truncate_at + + omission = options[:omission] || "..." + length_with_room_for_omission = truncate_at - omission.length + stop = \ + if options[:separator] + rindex(options[:separator], length_with_room_for_omission) || length_with_room_for_omission + else + length_with_room_for_omission + end + + "#{self[0, stop]}#{omission}" + end + + # Truncates a given +text+ after a given number of words (words_count): + # + # 'Once upon a time in a world far far away'.truncate_words(4) + # # => "Once upon a time..." + # + # Pass a string or regexp :separator to specify a different separator of words: + # + # 'Once
upon
a
time
in
a
world'.truncate_words(5, separator: '
') + # # => "Once
upon
a
time
in..." + # + # The last characters will be replaced with the :omission string (defaults to "..."): + # + # 'And they found that many people were sleeping better.'.truncate_words(5, omission: '... (continued)') + # # => "And they found that many... (continued)" + def truncate_words(words_count, options = {}) + sep = options[:separator] || /\s+/ + sep = Regexp.escape(sep.to_s) unless Regexp === sep + if self =~ /\A((?>.+?#{sep}){#{words_count - 1}}.+?)#{sep}.*/m + $1 + (options[:omission] || "...") + else + dup + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb new file mode 100644 index 0000000..af9d181 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/indent.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +class String + # Same as +indent+, except it indents the receiver in-place. + # + # Returns the indented string, or +nil+ if there was nothing to indent. + def indent!(amount, indent_string = nil, indent_empty_lines = false) + indent_string = indent_string || self[/^[ \t]/] || " " + re = indent_empty_lines ? /^/ : /^(?!$)/ + gsub!(re, indent_string * amount) + end + + # Indents the lines in the receiver: + # + # < + # def some_method + # some_code + # end + # + # The second argument, +indent_string+, specifies which indent string to + # use. The default is +nil+, which tells the method to make a guess by + # peeking at the first indented line, and fallback to a space if there is + # none. + # + # " foo".indent(2) # => " foo" + # "foo\n\t\tbar".indent(2) # => "\t\tfoo\n\t\t\t\tbar" + # "foo".indent(2, "\t") # => "\t\tfoo" + # + # While +indent_string+ is typically one space or tab, it may be any string. + # + # The third argument, +indent_empty_lines+, is a flag that says whether + # empty lines should be indented. Default is false. + # + # "foo\n\nbar".indent(2) # => " foo\n\n bar" + # "foo\n\nbar".indent(2, nil, true) # => " foo\n \n bar" + # + def indent(amount, indent_string = nil, indent_empty_lines = false) + dup.tap { |_| _.indent!(amount, indent_string, indent_empty_lines) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb new file mode 100644 index 0000000..8af3017 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inflections.rb @@ -0,0 +1,254 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/inflector/transliterate" + +# String inflections define new methods on the String class to transform names for different purposes. +# For instance, you can figure out the name of a table from the name of a class. +# +# 'ScaleScore'.tableize # => "scale_scores" +# +class String + # Returns the plural form of the word in the string. + # + # If the optional parameter +count+ is specified, + # the singular form will be returned if count == 1. + # For any other value of +count+ the plural will be returned. + # + # If the optional parameter +locale+ is specified, + # the word will be pluralized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'post'.pluralize # => "posts" + # 'octopus'.pluralize # => "octopi" + # 'sheep'.pluralize # => "sheep" + # 'words'.pluralize # => "words" + # 'the blue mailman'.pluralize # => "the blue mailmen" + # 'CamelOctopus'.pluralize # => "CamelOctopi" + # 'apple'.pluralize(1) # => "apple" + # 'apple'.pluralize(2) # => "apples" + # 'ley'.pluralize(:es) # => "leyes" + # 'ley'.pluralize(1, :es) # => "ley" + def pluralize(count = nil, locale = :en) + locale = count if count.is_a?(Symbol) + if count == 1 + dup + else + ActiveSupport::Inflector.pluralize(self, locale) + end + end + + # The reverse of +pluralize+, returns the singular form of a word in a string. + # + # If the optional parameter +locale+ is specified, + # the word will be singularized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'posts'.singularize # => "post" + # 'octopi'.singularize # => "octopus" + # 'sheep'.singularize # => "sheep" + # 'word'.singularize # => "word" + # 'the blue mailmen'.singularize # => "the blue mailman" + # 'CamelOctopi'.singularize # => "CamelOctopus" + # 'leyes'.singularize(:es) # => "ley" + def singularize(locale = :en) + ActiveSupport::Inflector.singularize(self, locale) + end + + # +constantize+ tries to find a declared constant with the name specified + # in the string. It raises a NameError when the name is not in CamelCase + # or is not initialized. See ActiveSupport::Inflector.constantize + # + # 'Module'.constantize # => Module + # 'Class'.constantize # => Class + # 'blargle'.constantize # => NameError: wrong constant name blargle + def constantize + ActiveSupport::Inflector.constantize(self) + end + + # +safe_constantize+ tries to find a declared constant with the name specified + # in the string. It returns +nil+ when the name is not in CamelCase + # or is not initialized. See ActiveSupport::Inflector.safe_constantize + # + # 'Module'.safe_constantize # => Module + # 'Class'.safe_constantize # => Class + # 'blargle'.safe_constantize # => nil + def safe_constantize + ActiveSupport::Inflector.safe_constantize(self) + end + + # By default, +camelize+ converts strings to UpperCamelCase. If the argument to camelize + # is set to :lower then camelize produces lowerCamelCase. + # + # +camelize+ will also convert '/' to '::' which is useful for converting paths to namespaces. + # + # 'active_record'.camelize # => "ActiveRecord" + # 'active_record'.camelize(:lower) # => "activeRecord" + # 'active_record/errors'.camelize # => "ActiveRecord::Errors" + # 'active_record/errors'.camelize(:lower) # => "activeRecord::Errors" + def camelize(first_letter = :upper) + case first_letter + when :upper + ActiveSupport::Inflector.camelize(self, true) + when :lower + ActiveSupport::Inflector.camelize(self, false) + else + raise ArgumentError, "Invalid option, use either :upper or :lower." + end + end + alias_method :camelcase, :camelize + + # Capitalizes all the words and replaces some characters in the string to create + # a nicer looking title. +titleize+ is meant for creating pretty output. It is not + # used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # 'man from the boondocks'.titleize # => "Man From The Boondocks" + # 'x-men: the last stand'.titleize # => "X Men: The Last Stand" + # 'string_ending_with_id'.titleize(keep_id_suffix: true) # => "String Ending With Id" + def titleize(keep_id_suffix: false) + ActiveSupport::Inflector.titleize(self, keep_id_suffix: keep_id_suffix) + end + alias_method :titlecase, :titleize + + # The reverse of +camelize+. Makes an underscored, lowercase form from the expression in the string. + # + # +underscore+ will also change '::' to '/' to convert namespaces to paths. + # + # 'ActiveModel'.underscore # => "active_model" + # 'ActiveModel::Errors'.underscore # => "active_model/errors" + def underscore + ActiveSupport::Inflector.underscore(self) + end + + # Replaces underscores with dashes in the string. + # + # 'puni_puni'.dasherize # => "puni-puni" + def dasherize + ActiveSupport::Inflector.dasherize(self) + end + + # Removes the module part from the constant expression in the string. + # + # 'ActiveSupport::Inflector::Inflections'.demodulize # => "Inflections" + # 'Inflections'.demodulize # => "Inflections" + # '::Inflections'.demodulize # => "Inflections" + # ''.demodulize # => '' + # + # See also +deconstantize+. + def demodulize + ActiveSupport::Inflector.demodulize(self) + end + + # Removes the rightmost segment from the constant expression in the string. + # + # 'Net::HTTP'.deconstantize # => "Net" + # '::Net::HTTP'.deconstantize # => "::Net" + # 'String'.deconstantize # => "" + # '::String'.deconstantize # => "" + # ''.deconstantize # => "" + # + # See also +demodulize+. + def deconstantize + ActiveSupport::Inflector.deconstantize(self) + end + + # Replaces special characters in a string so that it may be used as part of a 'pretty' URL. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize(preserve_case: true)}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + def parameterize(separator: "-", preserve_case: false) + ActiveSupport::Inflector.parameterize(self, separator: separator, preserve_case: preserve_case) + end + + # Creates the name of a table like Rails does for models to table names. This method + # uses the +pluralize+ method on the last word in the string. + # + # 'RawScaledScorer'.tableize # => "raw_scaled_scorers" + # 'ham_and_egg'.tableize # => "ham_and_eggs" + # 'fancyCategory'.tableize # => "fancy_categories" + def tableize + ActiveSupport::Inflector.tableize(self) + end + + # Creates a class name from a plural table name like Rails does for table names to models. + # Note that this returns a string and not a class. (To convert to an actual class + # follow +classify+ with +constantize+.) + # + # 'ham_and_eggs'.classify # => "HamAndEgg" + # 'posts'.classify # => "Post" + def classify + ActiveSupport::Inflector.classify(self) + end + + # Capitalizes the first word, turns underscores into spaces, and (by default)strips a + # trailing '_id' if present. + # Like +titleize+, this is meant for creating pretty output. + # + # The capitalization of the first word can be turned off by setting the + # optional parameter +capitalize+ to false. + # By default, this parameter is true. + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'employee_salary'.humanize # => "Employee salary" + # 'author_id'.humanize # => "Author" + # 'author_id'.humanize(capitalize: false) # => "author" + # '_id'.humanize # => "Id" + # 'author_id'.humanize(keep_id_suffix: true) # => "Author Id" + def humanize(capitalize: true, keep_id_suffix: false) + ActiveSupport::Inflector.humanize(self, capitalize: capitalize, keep_id_suffix: keep_id_suffix) + end + + # Converts just the first character to uppercase. + # + # 'what a Lovely Day'.upcase_first # => "What a Lovely Day" + # 'w'.upcase_first # => "W" + # ''.upcase_first # => "" + def upcase_first + ActiveSupport::Inflector.upcase_first(self) + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # 'Message'.foreign_key # => "message_id" + # 'Message'.foreign_key(false) # => "messageid" + # 'Admin::Post'.foreign_key # => "post_id" + def foreign_key(separate_class_name_and_id_with_underscore = true) + ActiveSupport::Inflector.foreign_key(self, separate_class_name_and_id_with_underscore) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb new file mode 100644 index 0000000..a796d5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/inquiry.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" + +class String + # Wraps the current string in the ActiveSupport::StringInquirer class, + # which gives you a prettier way to test for equality. + # + # env = 'production'.inquiry + # env.production? # => true + # env.development? # => false + def inquiry + ActiveSupport::StringInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb new file mode 100644 index 0000000..07c0d16 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/multibyte.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "active_support/multibyte" + +class String + # == Multibyte proxy + # + # +mb_chars+ is a multibyte safe proxy for string methods. + # + # It creates and returns an instance of the ActiveSupport::Multibyte::Chars class which + # encapsulates the original string. A Unicode safe version of all the String methods are defined on this proxy + # class. If the proxy class doesn't respond to a certain method, it's forwarded to the encapsulated string. + # + # >> "lj".upcase + # => "lj" + # >> "lj".mb_chars.upcase.to_s + # => "LJ" + # + # NOTE: An above example is useful for pre Ruby 2.4. Ruby 2.4 supports Unicode case mappings. + # + # == Method chaining + # + # All the methods on the Chars proxy which normally return a string will return a Chars object. This allows + # method chaining on the result of any of these methods. + # + # name.mb_chars.reverse.length # => 12 + # + # == Interoperability and configuration + # + # The Chars object tries to be as interchangeable with String objects as possible: sorting and comparing between + # String and Char work like expected. The bang! methods change the internal string representation in the Chars + # object. Interoperability problems can be resolved easily with a +to_s+ call. + # + # For more information about the methods defined on the Chars proxy see ActiveSupport::Multibyte::Chars. For + # information about how to change the default Multibyte behavior see ActiveSupport::Multibyte. + def mb_chars + ActiveSupport::Multibyte.proxy_class.new(self) + end + + # Returns +true+ if string has utf_8 encoding. + # + # utf_8_str = "some string".encode "UTF-8" + # iso_str = "some string".encode "ISO-8859-1" + # + # utf_8_str.is_utf8? # => true + # iso_str.is_utf8? # => false + def is_utf8? + case encoding + when Encoding::UTF_8 + valid_encoding? + when Encoding::ASCII_8BIT, Encoding::US_ASCII + dup.force_encoding(Encoding::UTF_8).valid_encoding? + else + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb new file mode 100644 index 0000000..f3bdc29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/output_safety.rb @@ -0,0 +1,258 @@ +# frozen_string_literal: true + +require "erb" +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/redefine_method" +require "active_support/multibyte/unicode" + +class ERB + module Util + HTML_ESCAPE = { "&" => "&", ">" => ">", "<" => "<", '"' => """, "'" => "'" } + JSON_ESCAPE = { "&" => '\u0026', ">" => '\u003e', "<" => '\u003c', "\u2028" => '\u2028', "\u2029" => '\u2029' } + HTML_ESCAPE_ONCE_REGEXP = /["><']|&(?!([a-zA-Z]+|(#\d+)|(#[xX][\dA-Fa-f]+));)/ + JSON_ESCAPE_REGEXP = /[\u2028\u2029&><]/u + + # A utility method for escaping HTML tag characters. + # This method is also aliased as h. + # + # puts html_escape('is a > 0 & a < 10?') + # # => is a > 0 & a < 10? + def html_escape(s) + unwrapped_html_escape(s).html_safe + end + + silence_redefinition_of_method :h + alias h html_escape + + module_function :h + + singleton_class.silence_redefinition_of_method :html_escape + module_function :html_escape + + # HTML escapes strings but doesn't wrap them with an ActiveSupport::SafeBuffer. + # This method is not for public consumption! Seriously! + def unwrapped_html_escape(s) # :nodoc: + s = s.to_s + if s.html_safe? + s + else + CGI.escapeHTML(ActiveSupport::Multibyte::Unicode.tidy_bytes(s)) + end + end + module_function :unwrapped_html_escape + + # A utility method for escaping HTML without affecting existing escaped entities. + # + # html_escape_once('1 < 2 & 3') + # # => "1 < 2 & 3" + # + # html_escape_once('<< Accept & Checkout') + # # => "<< Accept & Checkout" + def html_escape_once(s) + result = ActiveSupport::Multibyte::Unicode.tidy_bytes(s.to_s).gsub(HTML_ESCAPE_ONCE_REGEXP, HTML_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :html_escape_once + + # A utility method for escaping HTML entities in JSON strings. Specifically, the + # &, > and < characters are replaced with their equivalent unicode escaped form - + # \u0026, \u003e, and \u003c. The Unicode sequences \u2028 and \u2029 are also + # escaped as they are treated as newline characters in some JavaScript engines. + # These sequences have identical meaning as the original characters inside the + # context of a JSON string, so assuming the input is a valid and well-formed + # JSON value, the output will have equivalent meaning when parsed: + # + # json = JSON.generate({ name: ""}) + # # => "{\"name\":\"\"}" + # + # json_escape(json) + # # => "{\"name\":\"\\u003C/script\\u003E\\u003Cscript\\u003Ealert('PWNED!!!')\\u003C/script\\u003E\"}" + # + # JSON.parse(json) == JSON.parse(json_escape(json)) + # # => true + # + # The intended use case for this method is to escape JSON strings before including + # them inside a script tag to avoid XSS vulnerability: + # + # + # + # It is necessary to +raw+ the result of +json_escape+, so that quotation marks + # don't get converted to " entities. +json_escape+ doesn't + # automatically flag the result as HTML safe, since the raw value is unsafe to + # use inside HTML attributes. + # + # If your JSON is being used downstream for insertion into the DOM, be aware of + # whether or not it is being inserted via +html()+. Most jQuery plugins do this. + # If that is the case, be sure to +html_escape+ or +sanitize+ any user-generated + # content returned by your JSON. + # + # If you need to output JSON elsewhere in your HTML, you can just do something + # like this, as any unsafe characters (including quotation marks) will be + # automatically escaped for you: + # + #
...
+ # + # WARNING: this helper only works with valid JSON. Using this on non-JSON values + # will open up serious XSS vulnerabilities. For example, if you replace the + # +current_user.to_json+ in the example above with user input instead, the browser + # will happily eval() that string as JavaScript. + # + # The escaping performed in this method is identical to those performed in the + # Active Support JSON encoder when +ActiveSupport.escape_html_entities_in_json+ is + # set to true. Because this transformation is idempotent, this helper can be + # applied even if +ActiveSupport.escape_html_entities_in_json+ is already true. + # + # Therefore, when you are unsure if +ActiveSupport.escape_html_entities_in_json+ + # is enabled, or if you are unsure where your JSON string originated from, it + # is recommended that you always apply this helper (other libraries, such as the + # JSON gem, do not provide this kind of protection by default; also some gems + # might override +to_json+ to bypass Active Support's encoder). + def json_escape(s) + result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :json_escape + end +end + +class Object + def html_safe? + false + end +end + +class Numeric + def html_safe? + true + end +end + +module ActiveSupport #:nodoc: + class SafeBuffer < String + UNSAFE_STRING_METHODS = %w( + capitalize chomp chop delete downcase gsub lstrip next reverse rstrip + slice squeeze strip sub succ swapcase tr tr_s upcase + ) + + alias_method :original_concat, :concat + private :original_concat + + # Raised when ActiveSupport::SafeBuffer#safe_concat is called on unsafe buffers. + class SafeConcatError < StandardError + def initialize + super "Could not concatenate to the buffer because it is not html safe." + end + end + + def [](*args) + if args.size < 2 + super + elsif html_safe? + new_safe_buffer = super + + if new_safe_buffer + new_safe_buffer.instance_variable_set :@html_safe, true + end + + new_safe_buffer + else + to_str[*args] + end + end + + def safe_concat(value) + raise SafeConcatError unless html_safe? + original_concat(value) + end + + def initialize(str = "") + @html_safe = true + super + end + + def initialize_copy(other) + super + @html_safe = other.html_safe? + end + + def clone_empty + self[0, 0] + end + + def concat(value) + super(html_escape_interpolated_argument(value)) + end + alias << concat + + def prepend(value) + super(html_escape_interpolated_argument(value)) + end + + def +(other) + dup.concat(other) + end + + def %(args) + case args + when Hash + escaped_args = Hash[args.map { |k, arg| [k, html_escape_interpolated_argument(arg)] }] + else + escaped_args = Array(args).map { |arg| html_escape_interpolated_argument(arg) } + end + + self.class.new(super(escaped_args)) + end + + def html_safe? + defined?(@html_safe) && @html_safe + end + + def to_s + self + end + + def to_param + to_str + end + + def encode_with(coder) + coder.represent_object nil, to_str + end + + UNSAFE_STRING_METHODS.each do |unsafe_method| + if unsafe_method.respond_to?(unsafe_method) + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def capitalize(*args, &block) + to_str.#{unsafe_method}(*args, &block) # to_str.capitalize(*args, &block) + end # end + + def #{unsafe_method}!(*args) # def capitalize!(*args) + @html_safe = false # @html_safe = false + super # super + end # end + EOT + end + end + + private + + def html_escape_interpolated_argument(arg) + (!html_safe? || arg.html_safe?) ? arg : CGI.escapeHTML(arg.to_s) + end + end +end + +class String + # Marks a string as trusted safe. It will be inserted into HTML with no + # additional escaping performed. It is your responsibility to ensure that the + # string contains no malicious content. This method is equivalent to the + # +raw+ helper in views. It is recommended that you use +sanitize+ instead of + # this method. It should never be called on user input. + def html_safe + ActiveSupport::SafeBuffer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb new file mode 100644 index 0000000..919eb7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/starts_ends_with.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +class String + alias_method :starts_with?, :start_with? + alias_method :ends_with?, :end_with? +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb new file mode 100644 index 0000000..cc26274 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/strip.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class String + # Strips indentation in heredocs. + # + # For example in + # + # if options[:usage] + # puts <<-USAGE.strip_heredoc + # This command does such and such. + # + # Supported options are: + # -h This message + # ... + # USAGE + # end + # + # the user would see the usage message aligned against the left margin. + # + # Technically, it looks for the least indented non-empty line + # in the whole string, and removes that amount of leading whitespace. + def strip_heredoc + gsub(/^#{scan(/^[ \t]*(?=\S)/).min}/, "".freeze) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb new file mode 100644 index 0000000..55dc231 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/string/zones.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/time/zones" + +class String + # Converts String to a TimeWithZone in the current zone if Time.zone or Time.zone_default + # is set, otherwise converts String to a Time via String#to_time + def in_time_zone(zone = ::Time.zone) + if zone + ::Time.find_zone!(zone).parse(self) + else + to_time + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb new file mode 100644 index 0000000..c809def --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/compatibility" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/time/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb new file mode 100644 index 0000000..8572b49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Time + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb new file mode 100644 index 0000000..120768d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/calculations.rb @@ -0,0 +1,315 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/conversions" +require "active_support/time_with_zone" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" +require "active_support/core_ext/date/calculations" + +class Time + include DateAndTime::Calculations + + COMMON_YEAR_DAYS_IN_MONTH = [nil, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + class << self + # Overriding case equality method so that it returns true for ActiveSupport::TimeWithZone instances + def ===(other) + super || (self == Time && other.is_a?(ActiveSupport::TimeWithZone)) + end + + # Returns the number of days in the given month. + # If no year is specified, it will use the current year. + def days_in_month(month, year = current.year) + if month == 2 && ::Date.gregorian_leap?(year) + 29 + else + COMMON_YEAR_DAYS_IN_MONTH[month] + end + end + + # Returns the number of days in the given year. + # If no year is specified, it will use the current year. + def days_in_year(year = current.year) + days_in_month(2, year) + 337 + end + + # Returns Time.zone.now when Time.zone or config.time_zone are set, otherwise just returns Time.now. + def current + ::Time.zone ? ::Time.zone.now : ::Time.now + end + + # Layers additional behavior on Time.at so that ActiveSupport::TimeWithZone and DateTime + # instances can be used when called with a single argument + def at_with_coercion(*args) + return at_without_coercion(*args) if args.size != 1 + + # Time.at can be called with a time or numerical value + time_or_number = args.first + + if time_or_number.is_a?(ActiveSupport::TimeWithZone) || time_or_number.is_a?(DateTime) + at_without_coercion(time_or_number.to_f).getlocal + else + at_without_coercion(time_or_number) + end + end + alias_method :at_without_coercion, :at + alias_method :at, :at_with_coercion + + # Creates a +Time+ instance from an RFC 3339 string. + # + # Time.rfc3339('1999-12-31T14:00:00-10:00') # => 2000-01-01 00:00:00 -1000 + # + # If the time or offset components are missing then an +ArgumentError+ will be raised. + # + # Time.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + end + end + + # Returns the number of seconds since 00:00:00. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0.0 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296.0 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399.0 + def seconds_since_midnight + to_i - change(hour: 0).to_i + (usec / 1.0e+6) + end + + # Returns the number of seconds until 23:59:59. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2) + def sec_fraction + subsec + end + + # Returns a new Time where one or more of the elements have been changed according + # to the +options+ parameter. The time options (:hour, :min, + # :sec, :usec, :nsec) reset cascadingly, so if only + # the hour is passed, then minute, sec, usec and nsec is set to 0. If the hour + # and minute is passed, then sec, usec and nsec is set to 0. The +options+ parameter + # takes a hash with any of these keys: :year, :month, :day, + # :hour, :min, :sec, :usec, :nsec, + # :offset. Pass either :usec or :nsec, not both. + # + # Time.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => Time.new(2012, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => Time.new(1981, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => Time.new(1981, 8, 29, 0, 0, 0) + def change(options) + new_year = options.fetch(:year, year) + new_month = options.fetch(:month, month) + new_day = options.fetch(:day, day) + new_hour = options.fetch(:hour, hour) + new_min = options.fetch(:min, options[:hour] ? 0 : min) + new_sec = options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_offset = options.fetch(:offset, nil) + + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_usec = Rational(new_nsec, 1000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + end + + raise ArgumentError, "argument out of range" if new_usec >= 1000000 + + new_sec += Rational(new_usec, 1000000) + + if new_offset + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, new_offset) + elsif utc? + ::Time.utc(new_year, new_month, new_day, new_hour, new_min, new_sec) + elsif zone + ::Time.local(new_year, new_month, new_day, new_hour, new_min, new_sec) + else + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, utc_offset) + end + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The +options+ parameter + # takes a hash with any of these keys: :years, :months, + # :weeks, :days, :hours, :minutes, + # :seconds. + # + # Time.new(2015, 8, 1, 14, 35, 0).advance(seconds: 1) # => 2015-08-01 14:35:01 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(minutes: 1) # => 2015-08-01 14:36:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(hours: 1) # => 2015-08-01 15:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(days: 1) # => 2015-08-02 14:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(weeks: 1) # => 2015-08-08 14:35:00 -0700 + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.advance(options) + d = d.gregorian if d.julian? + time_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + time_advanced_by_date + else + time_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new Time representing the time a number of seconds ago, this is basically a wrapper around the Numeric extension + def ago(seconds) + since(-seconds) + end + + # Returns a new Time representing the time a number of seconds since the instance time + def since(seconds) + self + seconds + rescue + to_datetime.since(seconds) + end + alias :in :since + + # Returns a new Time representing the start of the day (0:00) + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new Time representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new Time representing the end of the day, 23:59:59.999999 + def end_of_day + change( + hour: 23, + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_day :end_of_day + + # Returns a new Time representing the start of the hour (x:00) + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new Time representing the end of the hour, x:59:59.999999 + def end_of_hour + change( + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new Time representing the start of the minute (x:xx:00) + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new Time representing the end of the minute, x:xx:59.999999 + def end_of_minute + change( + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_minute :end_of_minute + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.until(self) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Time#- can also be used to determine the number of seconds between two Time instances. + # We're layering on additional behavior so that ActiveSupport::TimeWithZone instances + # are coerced into values that Time#- will recognize + def minus_with_coercion(other) + other = other.comparable_time if other.respond_to?(:comparable_time) + other.is_a?(DateTime) ? to_f - other.to_f : minus_without_coercion(other) + end + alias_method :minus_without_coercion, :- + alias_method :-, :minus_with_coercion + + # Layers additional behavior on Time#<=> so that DateTime and ActiveSupport::TimeWithZone instances + # can be chronologically compared with a Time + def compare_with_coercion(other) + # we're avoiding Time#to_datetime and Time#to_time because they're expensive + if other.class == Time + compare_without_coercion(other) + elsif other.is_a?(Time) + compare_without_coercion(other.to_time) + else + to_datetime <=> other + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion + + # Layers additional behavior on Time#eql? so that ActiveSupport::TimeWithZone instances + # can be eql? to an equivalent Time + def eql_with_coercion(other) + # if other is an ActiveSupport::TimeWithZone, coerce a Time instance from it so we can do eql? comparison + other = other.comparable_time if other.respond_to?(:comparable_time) + eql_without_coercion(other) + end + alias_method :eql_without_coercion, :eql? + alias_method :eql?, :eql_with_coercion +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb new file mode 100644 index 0000000..495e4f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class Time + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return +self+ or the time in the local system timezone depending + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? self : getlocal + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb new file mode 100644 index 0000000..345cb28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/conversions.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/values/time_zone" + +class Time + DATE_FORMATS = { + db: "%Y-%m-%d %H:%M:%S", + number: "%Y%m%d%H%M%S", + nsec: "%Y%m%d%H%M%S%9N", + usec: "%Y%m%d%H%M%S%6N", + time: "%H:%M", + short: "%d %b %H:%M", + long: "%B %d, %Y %H:%M", + long_ordinal: lambda { |time| + day_format = ActiveSupport::Inflector.ordinalize(time.day) + time.strftime("%B #{day_format}, %Y %H:%M") + }, + rfc822: lambda { |time| + offset_format = time.formatted_offset(false) + time.strftime("%a, %d %b %Y %H:%M:%S #{offset_format}") + }, + iso8601: lambda { |time| time.iso8601 } + } + + # Converts to a formatted string. See DATE_FORMATS for built-in formats. + # + # This method is aliased to to_s. + # + # time = Time.now # => 2007-01-18 06:10:17 -06:00 + # + # time.to_formatted_s(:time) # => "06:10" + # time.to_s(:time) # => "06:10" + # + # time.to_formatted_s(:db) # => "2007-01-18 06:10:17" + # time.to_formatted_s(:number) # => "20070118061017" + # time.to_formatted_s(:short) # => "18 Jan 06:10" + # time.to_formatted_s(:long) # => "January 18, 2007 06:10" + # time.to_formatted_s(:long_ordinal) # => "January 18th, 2007 06:10" + # time.to_formatted_s(:rfc822) # => "Thu, 18 Jan 2007 06:10:17 -0600" + # time.to_formatted_s(:iso8601) # => "2007-01-18T06:10:17-06:00" + # + # == Adding your own time formats to +to_formatted_s+ + # You can add your own formats to the Time::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a time argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = ->(time) { time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.local(2000).formatted_offset # => "-06:00" + # Time.local(2000).formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Aliased to +xmlschema+ for compatibility with +DateTime+ + alias_method :rfc3339, :xmlschema +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb new file mode 100644 index 0000000..a5588fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/time/zones.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date_and_time/zones" + +class Time + include DateAndTime::Zones + class << self + attr_accessor :zone_default + + # Returns the TimeZone for the current request, if this has been set (via Time.zone=). + # If Time.zone has not been set for the current request, returns the TimeZone specified in config.time_zone. + def zone + Thread.current[:time_zone] || zone_default + end + + # Sets Time.zone to a TimeZone object for the current request/thread. + # + # This method accepts any of the following: + # + # * A Rails TimeZone object. + # * An identifier for a Rails TimeZone object (e.g., "Eastern Time (US & Canada)", -5.hours). + # * A TZInfo::Timezone object. + # * An identifier for a TZInfo::Timezone object (e.g., "America/New_York"). + # + # Here's an example of how you might set Time.zone on a per request basis and reset it when the request is done. + # current_user.time_zone just needs to return a string identifying the user's preferred time zone: + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # def set_time_zone + # if logged_in? + # Time.use_zone(current_user.time_zone) { yield } + # else + # yield + # end + # end + # end + def zone=(time_zone) + Thread.current[:time_zone] = find_zone!(time_zone) + end + + # Allows override of Time.zone locally inside supplied block; + # resets Time.zone to existing value when done. + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # private + # + # def set_time_zone + # Time.use_zone(current_user.timezone) { yield } + # end + # end + # + # NOTE: This won't affect any ActiveSupport::TimeWithZone + # objects that have already been created, e.g. any model timestamp + # attributes that have been read before the block will remain in + # the application's default timezone. + def use_zone(time_zone) + new_zone = find_zone!(time_zone) + begin + old_zone, ::Time.zone = ::Time.zone, new_zone + yield + ensure + ::Time.zone = old_zone + end + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Raises an +ArgumentError+ for invalid time zones. + # + # Time.find_zone! "America/New_York" # => # + # Time.find_zone! "EST" # => # + # Time.find_zone! -5.hours # => # + # Time.find_zone! nil # => nil + # Time.find_zone! false # => false + # Time.find_zone! "NOT-A-TIMEZONE" # => ArgumentError: Invalid Timezone: NOT-A-TIMEZONE + def find_zone!(time_zone) + if !time_zone || time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + # Look up the timezone based on the identifier (unless we've been + # passed a TZInfo::Timezone) + unless time_zone.respond_to?(:period_for_local) + time_zone = ActiveSupport::TimeZone[time_zone] || TZInfo::Timezone.get(time_zone) + end + + # Return if a TimeZone instance, or wrap in a TimeZone instance if a TZInfo::Timezone + if time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + ActiveSupport::TimeZone.create(time_zone.name, nil, time_zone) + end + end + rescue TZInfo::InvalidTimezoneIdentifier + raise ArgumentError, "Invalid Timezone: #{time_zone}" + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Returns +nil+ for invalid time zones. + # + # Time.find_zone "America/New_York" # => # + # Time.find_zone "NOT-A-TIMEZONE" # => nil + def find_zone(time_zone) + find_zone!(time_zone) rescue nil + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb new file mode 100644 index 0000000..9def947 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/core_ext/uri.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "uri" +if RUBY_VERSION < "2.6.0" + require "active_support/core_ext/module/redefine_method" + URI::Parser.class_eval do + silence_redefinition_of_method :unescape + def unescape(str, escaped = /%[a-fA-F\d]{2}/) + # TODO: Are we actually sure that ASCII == UTF-8? + # YK: My initial experiments say yes, but let's be sure please + enc = str.encoding + enc = Encoding::UTF_8 if enc == Encoding::US_ASCII + str.dup.force_encoding(Encoding::ASCII_8BIT).gsub(escaped) { |match| [match[1, 2].hex].pack("C") }.force_encoding(enc) + end + end +end + +module URI + class << self + def parser + @parser ||= URI::Parser.new + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb new file mode 100644 index 0000000..4e6d8e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/current_attributes.rb @@ -0,0 +1,195 @@ +# frozen_string_literal: true + +module ActiveSupport + # Abstract super class that provides a thread-isolated attributes singleton, which resets automatically + # before and after each request. This allows you to keep all the per-request attributes easily + # available to the whole system. + # + # The following full app-like example demonstrates how to use a Current class to + # facilitate easy access to the global, per-request attributes without passing them deeply + # around everywhere: + # + # # app/models/current.rb + # class Current < ActiveSupport::CurrentAttributes + # attribute :account, :user + # attribute :request_id, :user_agent, :ip_address + # + # resets { Time.zone = nil } + # + # def user=(user) + # super + # self.account = user.account + # Time.zone = user.time_zone + # end + # end + # + # # app/controllers/concerns/authentication.rb + # module Authentication + # extend ActiveSupport::Concern + # + # included do + # before_action :authenticate + # end + # + # private + # def authenticate + # if authenticated_user = User.find_by(id: cookies.encrypted[:user_id]) + # Current.user = authenticated_user + # else + # redirect_to new_session_url + # end + # end + # end + # + # # app/controllers/concerns/set_current_request_details.rb + # module SetCurrentRequestDetails + # extend ActiveSupport::Concern + # + # included do + # before_action do + # Current.request_id = request.uuid + # Current.user_agent = request.user_agent + # Current.ip_address = request.ip + # end + # end + # end + # + # class ApplicationController < ActionController::Base + # include Authentication + # include SetCurrentRequestDetails + # end + # + # class MessagesController < ApplicationController + # def create + # Current.account.messages.create(message_params) + # end + # end + # + # class Message < ApplicationRecord + # belongs_to :creator, default: -> { Current.user } + # after_create { |message| Event.create(record: message) } + # end + # + # class Event < ApplicationRecord + # before_create do + # self.request_id = Current.request_id + # self.user_agent = Current.user_agent + # self.ip_address = Current.ip_address + # end + # end + # + # A word of caution: It's easy to overdo a global singleton like Current and tangle your model as a result. + # Current should only be used for a few, top-level globals, like account, user, and request details. + # The attributes stuck in Current should be used by more or less all actions on all requests. If you start + # sticking controller-specific attributes in there, you're going to create a mess. + class CurrentAttributes + include ActiveSupport::Callbacks + define_callbacks :reset + + class << self + # Returns singleton instance for this class in this thread. If none exists, one is created. + def instance + current_instances[name] ||= new + end + + # Declares one or more attributes that will be given both class and instance accessor methods. + def attribute(*names) + generated_attribute_methods.module_eval do + names.each do |name| + define_method(name) do + attributes[name.to_sym] + end + + define_method("#{name}=") do |attribute| + attributes[name.to_sym] = attribute + end + end + end + + names.each do |name| + define_singleton_method(name) do + instance.public_send(name) + end + + define_singleton_method("#{name}=") do |attribute| + instance.public_send("#{name}=", attribute) + end + end + end + + # Calls this block after #reset is called on the instance. Used for resetting external collaborators, like Time.zone. + def resets(&block) + set_callback :reset, :after, &block + end + + delegate :set, :reset, to: :instance + + def reset_all # :nodoc: + current_instances.each_value(&:reset) + end + + def clear_all # :nodoc: + reset_all + current_instances.clear + end + + private + def generated_attribute_methods + @generated_attribute_methods ||= Module.new.tap { |mod| include mod } + end + + def current_instances + Thread.current[:current_attributes_instances] ||= {} + end + + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end + + attr_accessor :attributes + + def initialize + @attributes = {} + end + + # Expose one or more attributes within a block. Old values are returned after the block concludes. + # Example demonstrating the common use of needing to set Current attributes outside the request-cycle: + # + # class Chat::PublicationJob < ApplicationJob + # def perform(attributes, room_number, creator) + # Current.set(person: creator) do + # Chat::Publisher.publish(attributes: attributes, room_number: room_number) + # end + # end + # end + def set(set_attributes) + old_attributes = compute_attributes(set_attributes.keys) + assign_attributes(set_attributes) + yield + ensure + assign_attributes(old_attributes) + end + + # Reset all attributes. Should be called before and after actions, when used as a per-request singleton. + def reset + run_callbacks :reset do + self.attributes = {} + end + end + + private + def assign_attributes(new_attributes) + new_attributes.each { |key, value| public_send("#{key}=", value) } + end + + def compute_attributes(keys) + keys.collect { |key| [ key, public_send(key) ] }.to_h + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb new file mode 100644 index 0000000..3957700 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies.rb @@ -0,0 +1,753 @@ +# frozen_string_literal: true + +require "set" +require "thread" +require "concurrent/map" +require "pathname" +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/load_error" +require "active_support/core_ext/name_error" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/dependencies/interlock" +require "active_support/inflector" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + extend self + + mattr_accessor :interlock, default: Interlock.new + + # :doc: + + # Execute the supplied block without interference from any + # concurrent loads. + def self.run_interlock + Dependencies.interlock.running { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.load_interlock + Dependencies.interlock.loading { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.unload_interlock + Dependencies.interlock.unloading { yield } + end + + # :nodoc: + + # Should we turn on Ruby warnings on the first load of dependent files? + mattr_accessor :warnings_on_first_load, default: false + + # All files ever loaded. + mattr_accessor :history, default: Set.new + + # All files currently loaded. + mattr_accessor :loaded, default: Set.new + + # Stack of files being loaded. + mattr_accessor :loading, default: [] + + # Should we load files or require them? + mattr_accessor :mechanism, default: ENV["NO_RELOAD"] ? :require : :load + + # The set of directories from which we may automatically load files. Files + # under these directories will be reloaded on each request in development mode, + # unless the directory also appears in autoload_once_paths. + mattr_accessor :autoload_paths, default: [] + + # The set of directories from which automatically loaded constants are loaded + # only once. All directories in this set must also be present in +autoload_paths+. + mattr_accessor :autoload_once_paths, default: [] + + # An array of qualified constant names that have been loaded. Adding a name + # to this array will cause it to be unloaded the next time Dependencies are + # cleared. + mattr_accessor :autoloaded_constants, default: [] + + # An array of constant names that need to be unloaded on every request. Used + # to allow arbitrary constants to be marked for unloading. + mattr_accessor :explicitly_unloadable_constants, default: [] + + # The WatchStack keeps a stack of the modules being watched as files are + # loaded. If a file in the process of being loaded (parent.rb) triggers the + # load of another file (child.rb) the stack will ensure that child.rb + # handles the new constants. + # + # If child.rb is being autoloaded, its constants will be added to + # autoloaded_constants. If it was being required, they will be discarded. + # + # This is handled by walking back up the watch stack and adding the constants + # found by child.rb to the list of original constants in parent.rb. + class WatchStack + include Enumerable + + # @watching is a stack of lists of constants being watched. For instance, + # if parent.rb is autoloaded, the stack will look like [[Object]]. If + # parent.rb then requires namespace/child.rb, the stack will look like + # [[Object], [Namespace]]. + + attr_reader :watching + + def initialize + @watching = [] + @stack = Hash.new { |h, k| h[k] = [] } + end + + def each(&block) + @stack.each(&block) + end + + def watching? + !@watching.empty? + end + + # Returns a list of new constants found since the last call to + # watch_namespaces. + def new_constants + constants = [] + + # Grab the list of namespaces that we're looking for new constants under + @watching.last.each do |namespace| + # Retrieve the constants that were present under the namespace when watch_namespaces + # was originally called + original_constants = @stack[namespace].last + + mod = Inflector.constantize(namespace) if Dependencies.qualified_const_defined?(namespace) + next unless mod.is_a?(Module) + + # Get a list of the constants that were added + new_constants = mod.constants(false) - original_constants + + # @stack[namespace] returns an Array of the constants that are being evaluated + # for that namespace. For instance, if parent.rb requires child.rb, the first + # element of @stack[Object] will be an Array of the constants that were present + # before parent.rb was required. The second element will be an Array of the + # constants that were present before child.rb was required. + @stack[namespace].each do |namespace_constants| + namespace_constants.concat(new_constants) + end + + # Normalize the list of new constants, and add them to the list we will return + new_constants.each do |suffix| + constants << ([namespace, suffix] - ["Object"]).join("::".freeze) + end + end + constants + ensure + # A call to new_constants is always called after a call to watch_namespaces + pop_modules(@watching.pop) + end + + # Add a set of modules to the watch stack, remembering the initial + # constants. + def watch_namespaces(namespaces) + @watching << namespaces.map do |namespace| + module_name = Dependencies.to_constant_name(namespace) + original_constants = Dependencies.qualified_const_defined?(module_name) ? + Inflector.constantize(module_name).constants(false) : [] + + @stack[module_name] << original_constants + module_name + end + end + + private + def pop_modules(modules) + modules.each { |mod| @stack[mod].pop } + end + end + + # An internal stack used to record which constants are loaded by any block. + mattr_accessor :constant_watch_stack, default: WatchStack.new + + # Module includes this module. + module ModuleConstMissing #:nodoc: + def self.append_features(base) + base.class_eval do + # Emulate #exclude via an ivar + return if defined?(@_const_missing) && @_const_missing + @_const_missing = instance_method(:const_missing) + remove_method(:const_missing) + end + super + end + + def self.exclude_from(base) + base.class_eval do + define_method :const_missing, @_const_missing + @_const_missing = nil + end + end + + def const_missing(const_name) + from_mod = anonymous? ? guess_for_anonymous(const_name) : self + Dependencies.load_missing_constant(from_mod, const_name) + end + + # We assume that the name of the module reflects the nesting + # (unless it can be proven that is not the case) and the path to the file + # that defines the constant. Anonymous modules cannot follow these + # conventions and therefore we assume that the user wants to refer to a + # top-level constant. + def guess_for_anonymous(const_name) + if Object.const_defined?(const_name) + raise NameError.new "#{const_name} cannot be autoloaded from an anonymous class or module", const_name + else + Object + end + end + + def unloadable(const_desc = self) + super(const_desc) + end + end + + # Object includes this module. + module Loadable #:nodoc: + def self.exclude_from(base) + base.class_eval do + define_method(:load, Kernel.instance_method(:load)) + private :load + end + end + + def require_or_load(file_name) + Dependencies.require_or_load(file_name) + end + + # :doc: + + # Interprets a file using mechanism and marks its defined + # constants as autoloaded. file_name can be either a string or + # respond to to_path. + # + # Use this method in code that absolutely needs a certain constant to be + # defined at that point. A typical use case is to make constant name + # resolution deterministic for constants with the same relative name in + # different namespaces whose evaluation would depend on load order + # otherwise. + def require_dependency(file_name, message = "No such file to load -- %s.rb") + file_name = file_name.to_path if file_name.respond_to?(:to_path) + unless file_name.is_a?(String) + raise ArgumentError, "the file name must either be a String or implement #to_path -- you passed #{file_name.inspect}" + end + + Dependencies.depend_on(file_name, message) + end + + # :nodoc: + + def load_dependency(file) + if Dependencies.load? && Dependencies.constant_watch_stack.watching? + descs = Dependencies.constant_watch_stack.watching.flatten.uniq + + Dependencies.new_constants_in(*descs) { yield } + else + yield + end + rescue Exception => exception # errors from loading file + exception.blame_file! file if exception.respond_to? :blame_file! + raise + end + + # Mark the given constant as unloadable. Unloadable constants are removed + # each time dependencies are cleared. + # + # Note that marking a constant for unloading need only be done once. Setup + # or init scripts may list each unloadable constant that may need unloading; + # each constant will be removed for every subsequent clear, as opposed to + # for the first clear. + # + # The provided constant descriptor may be a (non-anonymous) module or class, + # or a qualified constant name as a string or symbol. + # + # Returns +true+ if the constant was not previously marked for unloading, + # +false+ otherwise. + def unloadable(const_desc) + Dependencies.mark_for_unload const_desc + end + + private + + def load(file, wrap = false) + result = false + load_dependency(file) { result = super } + result + end + + def require(file) + result = false + load_dependency(file) { result = super } + result + end + end + + # Exception file-blaming. + module Blamable #:nodoc: + def blame_file!(file) + (@blamed_files ||= []).unshift file + end + + def blamed_files + @blamed_files ||= [] + end + + def describe_blame + return nil if blamed_files.empty? + "This error occurred while loading the following files:\n #{blamed_files.join "\n "}" + end + + def copy_blame!(exc) + @blamed_files = exc.blamed_files.clone + self + end + end + + def hook! + Object.class_eval { include Loadable } + Module.class_eval { include ModuleConstMissing } + Exception.class_eval { include Blamable } + end + + def unhook! + ModuleConstMissing.exclude_from(Module) + Loadable.exclude_from(Object) + end + + def load? + mechanism == :load + end + + def depend_on(file_name, message = "No such file to load -- %s.rb") + path = search_for_file(file_name) + require_or_load(path || file_name) + rescue LoadError => load_error + if file_name = load_error.message[/ -- (.*?)(\.rb)?$/, 1] + load_error.message.replace(message % file_name) + load_error.copy_blame!(load_error) + end + raise + end + + def clear + Dependencies.unload_interlock do + loaded.clear + loading.clear + remove_unloadable_constants! + end + end + + def require_or_load(file_name, const_path = nil) + file_name = $` if file_name =~ /\.rb\z/ + expanded = File.expand_path(file_name) + return if loaded.include?(expanded) + + Dependencies.load_interlock do + # Maybe it got loaded while we were waiting for our lock: + return if loaded.include?(expanded) + + # Record that we've seen this file *before* loading it to avoid an + # infinite loop with mutual dependencies. + loaded << expanded + loading << expanded + + begin + if load? + # Enable warnings if this file has not been loaded before and + # warnings_on_first_load is set. + load_args = ["#{file_name}.rb"] + load_args << const_path unless const_path.nil? + + if !warnings_on_first_load || history.include?(expanded) + result = load_file(*load_args) + else + enable_warnings { result = load_file(*load_args) } + end + else + result = require file_name + end + rescue Exception + loaded.delete expanded + raise + ensure + loading.pop + end + + # Record history *after* loading so first load gets warnings. + history << expanded + result + end + end + + # Is the provided constant path defined? + def qualified_const_defined?(path) + Object.const_defined?(path, false) + end + + # Given +path+, a filesystem path to a ruby file, return an array of + # constant paths which would cause Dependencies to attempt to load this + # file. + def loadable_constants_for_path(path, bases = autoload_paths) + path = $` if path =~ /\.rb\z/ + expanded_path = File.expand_path(path) + paths = [] + + bases.each do |root| + expanded_root = File.expand_path(root) + next unless expanded_path.start_with?(expanded_root) + + root_size = expanded_root.size + next if expanded_path[root_size] != ?/.freeze + + nesting = expanded_path[(root_size + 1)..-1] + paths << nesting.camelize unless nesting.blank? + end + + paths.uniq! + paths + end + + # Search for a file in autoload_paths matching the provided suffix. + def search_for_file(path_suffix) + path_suffix = path_suffix.sub(/(\.rb)?$/, ".rb".freeze) + + autoload_paths.each do |root| + path = File.join(root, path_suffix) + return path if File.file? path + end + nil # Gee, I sure wish we had first_match ;-) + end + + # Does the provided path_suffix correspond to an autoloadable module? + # Instead of returning a boolean, the autoload base for this module is + # returned. + def autoloadable_module?(path_suffix) + autoload_paths.each do |load_path| + return load_path if File.directory? File.join(load_path, path_suffix) + end + nil + end + + def load_once_path?(path) + # to_s works around a ruby issue where String#starts_with?(Pathname) + # will raise a TypeError: no implicit conversion of Pathname into String + autoload_once_paths.any? { |base| path.starts_with? base.to_s } + end + + # Attempt to autoload the provided module name by searching for a directory + # matching the expected path suffix. If found, the module is created and + # assigned to +into+'s constants with the name +const_name+. Provided that + # the directory was loaded from a reloadable base path, it is added to the + # set of constants that are to be unloaded. + def autoload_module!(into, const_name, qualified_name, path_suffix) + return nil unless base_path = autoloadable_module?(path_suffix) + mod = Module.new + into.const_set const_name, mod + autoloaded_constants << qualified_name unless autoload_once_paths.include?(base_path) + autoloaded_constants.uniq! + mod + end + + # Load the file at the provided path. +const_paths+ is a set of qualified + # constant names. When loading the file, Dependencies will watch for the + # addition of these constants. Each that is defined will be marked as + # autoloaded, and will be removed when Dependencies.clear is next called. + # + # If the second parameter is left off, then Dependencies will construct a + # set of names that the file at +path+ may define. See + # +loadable_constants_for_path+ for more details. + def load_file(path, const_paths = loadable_constants_for_path(path)) + const_paths = [const_paths].compact unless const_paths.is_a? Array + parent_paths = const_paths.collect { |const_path| const_path[/.*(?=::)/] || ::Object } + + result = nil + newly_defined_paths = new_constants_in(*parent_paths) do + result = Kernel.load path + end + + autoloaded_constants.concat newly_defined_paths unless load_once_path?(path) + autoloaded_constants.uniq! + result + end + + # Returns the constant path for the provided parent and constant name. + def qualified_name_for(mod, name) + mod_name = to_constant_name mod + mod_name == "Object" ? name.to_s : "#{mod_name}::#{name}" + end + + # Load the constant named +const_name+ which is missing from +from_mod+. If + # it is not possible to load the constant into from_mod, try its parent + # module using +const_missing+. + def load_missing_constant(from_mod, const_name) + unless qualified_const_defined?(from_mod.name) && Inflector.constantize(from_mod.name).equal?(from_mod) + raise ArgumentError, "A copy of #{from_mod} has been removed from the module tree but is still active!" + end + + qualified_name = qualified_name_for from_mod, const_name + path_suffix = qualified_name.underscore + + file_path = search_for_file(path_suffix) + + if file_path + expanded = File.expand_path(file_path) + expanded.sub!(/\.rb\z/, "".freeze) + + if loading.include?(expanded) + raise "Circular dependency detected while autoloading constant #{qualified_name}" + else + require_or_load(expanded, qualified_name) + raise LoadError, "Unable to autoload constant #{qualified_name}, expected #{file_path} to define it" unless from_mod.const_defined?(const_name, false) + return from_mod.const_get(const_name) + end + elsif mod = autoload_module!(from_mod, const_name, qualified_name, path_suffix) + return mod + elsif (parent = from_mod.parent) && parent != from_mod && + ! from_mod.parents.any? { |p| p.const_defined?(const_name, false) } + # If our parents do not have a constant named +const_name+ then we are free + # to attempt to load upwards. If they do have such a constant, then this + # const_missing must be due to from_mod::const_name, which should not + # return constants from from_mod's parents. + begin + # Since Ruby does not pass the nesting at the point the unknown + # constant triggered the callback we cannot fully emulate constant + # name lookup and need to make a trade-off: we are going to assume + # that the nesting in the body of Foo::Bar is [Foo::Bar, Foo] even + # though it might not be. Counterexamples are + # + # class Foo::Bar + # Module.nesting # => [Foo::Bar] + # end + # + # or + # + # module M::N + # module S::T + # Module.nesting # => [S::T, M::N] + # end + # end + # + # for example. + return parent.const_missing(const_name) + rescue NameError => e + raise unless e.missing_name? qualified_name_for(parent, const_name) + end + end + + name_error = NameError.new("uninitialized constant #{qualified_name}", const_name) + name_error.set_backtrace(caller.reject { |l| l.starts_with? __FILE__ }) + raise name_error + end + + # Remove the constants that have been autoloaded, and those that have been + # marked for unloading. Before each constant is removed a callback is sent + # to its class/module if it implements +before_remove_const+. + # + # The callback implementation should be restricted to cleaning up caches, etc. + # as the environment will be in an inconsistent state, e.g. other constants + # may have already been unloaded and not accessible. + def remove_unloadable_constants! + autoloaded_constants.each { |const| remove_constant const } + autoloaded_constants.clear + Reference.clear! + explicitly_unloadable_constants.each { |const| remove_constant const } + end + + class ClassCache + def initialize + @store = Concurrent::Map.new + end + + def empty? + @store.empty? + end + + def key?(key) + @store.key?(key) + end + + def get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.constantize(key) + end + alias :[] :get + + def safe_get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.safe_constantize(key) + end + + def store(klass) + return self unless klass.respond_to?(:name) + raise(ArgumentError, "anonymous classes cannot be cached") if klass.name.empty? + @store[klass.name] = klass + self + end + + def clear! + @store.clear + end + end + + Reference = ClassCache.new + + # Store a reference to a class +klass+. + def reference(klass) + Reference.store klass + end + + # Get the reference for class named +name+. + # Raises an exception if referenced class does not exist. + def constantize(name) + Reference.get(name) + end + + # Get the reference for class named +name+ if one exists. + # Otherwise returns +nil+. + def safe_constantize(name) + Reference.safe_get(name) + end + + # Determine if the given constant has been automatically loaded. + def autoloaded?(desc) + return false if desc.is_a?(Module) && desc.anonymous? + name = to_constant_name desc + return false unless qualified_const_defined?(name) + autoloaded_constants.include?(name) + end + + # Will the provided constant descriptor be unloaded? + def will_unload?(const_desc) + autoloaded?(const_desc) || + explicitly_unloadable_constants.include?(to_constant_name(const_desc)) + end + + # Mark the provided constant name for unloading. This constant will be + # unloaded on each request, not just the next one. + def mark_for_unload(const_desc) + name = to_constant_name const_desc + if explicitly_unloadable_constants.include? name + false + else + explicitly_unloadable_constants << name + true + end + end + + # Run the provided block and detect the new constants that were loaded during + # its execution. Constants may only be regarded as 'new' once -- so if the + # block calls +new_constants_in+ again, then the constants defined within the + # inner call will not be reported in this one. + # + # If the provided block does not run to completion, and instead raises an + # exception, any new constants are regarded as being only partially defined + # and will be removed immediately. + def new_constants_in(*descs) + constant_watch_stack.watch_namespaces(descs) + success = false + + begin + yield # Now yield to the code that is to define new constants. + success = true + ensure + new_constants = constant_watch_stack.new_constants + + return new_constants if success + + # Remove partially loaded constants. + new_constants.each { |c| remove_constant(c) } + end + end + + # Convert the provided const desc to a qualified constant name (as a string). + # A module, class, symbol, or string may be provided. + def to_constant_name(desc) #:nodoc: + case desc + when String then desc.sub(/^::/, "") + when Symbol then desc.to_s + when Module + desc.name || + raise(ArgumentError, "Anonymous modules have no name to be referenced by") + else raise TypeError, "Not a valid constant descriptor: #{desc.inspect}" + end + end + + def remove_constant(const) #:nodoc: + # Normalize ::Foo, ::Object::Foo, Object::Foo, Object::Object::Foo, etc. as Foo. + normalized = const.to_s.sub(/\A::/, "") + normalized.sub!(/\A(Object::)+/, "") + + constants = normalized.split("::") + to_remove = constants.pop + + # Remove the file path from the loaded list. + file_path = search_for_file(const.underscore) + if file_path + expanded = File.expand_path(file_path) + expanded.sub!(/\.rb\z/, "") + loaded.delete(expanded) + end + + if constants.empty? + parent = Object + else + # This method is robust to non-reachable constants. + # + # Non-reachable constants may be passed if some of the parents were + # autoloaded and already removed. It is easier to do a sanity check + # here than require the caller to be clever. We check the parent + # rather than the very const argument because we do not want to + # trigger Kernel#autoloads, see the comment below. + parent_name = constants.join("::") + return unless qualified_const_defined?(parent_name) + parent = constantize(parent_name) + end + + # In an autoloaded user.rb like this + # + # autoload :Foo, 'foo' + # + # class User < ActiveRecord::Base + # end + # + # we correctly register "Foo" as being autoloaded. But if the app does + # not use the "Foo" constant we need to be careful not to trigger + # loading "foo.rb" ourselves. While #const_defined? and #const_get? do + # require the file, #autoload? and #remove_const don't. + # + # We are going to remove the constant nonetheless ---which exists as + # far as Ruby is concerned--- because if the user removes the macro + # call from a class or module that were not autoloaded, as in the + # example above with Object, accessing to that constant must err. + unless parent.autoload?(to_remove) + begin + constantized = parent.const_get(to_remove, false) + rescue NameError + # The constant is no longer reachable, just skip it. + return + else + constantized.before_remove_const if constantized.respond_to?(:before_remove_const) + end + end + + begin + parent.instance_eval { remove_const to_remove } + rescue NameError + # The constant is no longer reachable, just skip it. + end + end + end +end + +ActiveSupport::Dependencies.hook! diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb new file mode 100644 index 0000000..1cee85d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/autoload.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" + +module ActiveSupport + # Autoload and eager load conveniences for your library. + # + # This module allows you to define autoloads based on + # Rails conventions (i.e. no need to define the path + # it is automatically guessed based on the filename) + # and also define a set of constants that needs to be + # eager loaded: + # + # module MyLib + # extend ActiveSupport::Autoload + # + # autoload :Model + # + # eager_autoload do + # autoload :Cache + # end + # end + # + # Then your library can be eager loaded by simply calling: + # + # MyLib.eager_load! + module Autoload + def self.extended(base) # :nodoc: + base.class_eval do + @_autoloads = {} + @_under_path = nil + @_at_path = nil + @_eager_autoload = false + end + end + + def autoload(const_name, path = @_at_path) + unless path + full = [name, @_under_path, const_name.to_s].compact.join("::") + path = Inflector.underscore(full) + end + + if @_eager_autoload + @_autoloads[const_name] = path + end + + super const_name, path + end + + def autoload_under(path) + @_under_path, old_path = path, @_under_path + yield + ensure + @_under_path = old_path + end + + def autoload_at(path) + @_at_path, old_path = path, @_at_path + yield + ensure + @_at_path = old_path + end + + def eager_autoload + old_eager, @_eager_autoload = @_eager_autoload, true + yield + ensure + @_eager_autoload = old_eager + end + + def eager_load! + @_autoloads.each_value { |file| require file } + end + + def autoloads + @_autoloads + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb new file mode 100644 index 0000000..948be75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/dependencies/interlock.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "active_support/concurrency/share_lock" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + class Interlock + def initialize # :nodoc: + @lock = ActiveSupport::Concurrency::ShareLock.new + end + + def loading + @lock.exclusive(purpose: :load, compatible: [:load], after_compatible: [:load]) do + yield + end + end + + def unloading + @lock.exclusive(purpose: :unload, compatible: [:load, :unload], after_compatible: [:load, :unload]) do + yield + end + end + + def start_unloading + @lock.start_exclusive(purpose: :unload, compatible: [:load, :unload]) + end + + def done_unloading + @lock.stop_exclusive(compatible: [:load, :unload]) + end + + def start_running + @lock.start_sharing + end + + def done_running + @lock.stop_sharing + end + + def running + @lock.sharing do + yield + end + end + + def permit_concurrent_loads + @lock.yield_shares(compatible: [:load]) do + yield + end + end + + def raw_state(&block) # :nodoc: + @lock.raw_state(&block) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb new file mode 100644 index 0000000..a1ad2ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "singleton" + +module ActiveSupport + # \Deprecation specifies the API used by Rails to deprecate methods, instance + # variables, objects and constants. + class Deprecation + # active_support.rb sets an autoload for ActiveSupport::Deprecation. + # + # If these requires were at the top of the file the constant would not be + # defined by the time their files were loaded. Since some of them reopen + # ActiveSupport::Deprecation its autoload would be triggered, resulting in + # a circular require warning for active_support/deprecation.rb. + # + # So, we define the constant first, and load dependencies later. + require "active_support/deprecation/instance_delegator" + require "active_support/deprecation/behaviors" + require "active_support/deprecation/reporting" + require "active_support/deprecation/constant_accessor" + require "active_support/deprecation/method_wrappers" + require "active_support/deprecation/proxy_wrappers" + require "active_support/core_ext/module/deprecation" + + include Singleton + include InstanceDelegator + include Behavior + include Reporting + include MethodWrapper + + # The version number in which the deprecated behavior will be removed, by default. + attr_accessor :deprecation_horizon + + # It accepts two parameters on initialization. The first is a version of library + # and the second is a library name. + # + # ActiveSupport::Deprecation.new('2.0', 'MyLibrary') + def initialize(deprecation_horizon = "6.0", gem_name = "Rails") + self.gem_name = gem_name + self.deprecation_horizon = deprecation_horizon + # By default, warnings are not silenced and debugging is off. + self.silenced = false + self.debug = false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb new file mode 100644 index 0000000..3abd25a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/behaviors.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require "active_support/notifications" + +module ActiveSupport + # Raised when ActiveSupport::Deprecation::Behavior#behavior is set with :raise. + # You would set :raise, as a behavior to raise errors and proactively report exceptions from deprecations. + class DeprecationException < StandardError + end + + class Deprecation + # Default warning behaviors per Rails.env. + DEFAULT_BEHAVIORS = { + raise: ->(message, callstack, deprecation_horizon, gem_name) { + e = DeprecationException.new(message) + e.set_backtrace(callstack.map(&:to_s)) + raise e + }, + + stderr: ->(message, callstack, deprecation_horizon, gem_name) { + $stderr.puts(message) + $stderr.puts callstack.join("\n ") if debug + }, + + log: ->(message, callstack, deprecation_horizon, gem_name) { + logger = + if defined?(Rails.logger) && Rails.logger + Rails.logger + else + require "active_support/logger" + ActiveSupport::Logger.new($stderr) + end + logger.warn message + logger.debug callstack.join("\n ") if debug + }, + + notify: ->(message, callstack, deprecation_horizon, gem_name) { + notification_name = "deprecation.#{gem_name.underscore.tr('/', '_')}" + ActiveSupport::Notifications.instrument(notification_name, + message: message, + callstack: callstack, + gem_name: gem_name, + deprecation_horizon: deprecation_horizon) + }, + + silence: ->(message, callstack, deprecation_horizon, gem_name) {}, + } + + # Behavior module allows to determine how to display deprecation messages. + # You can create a custom behavior or set any from the +DEFAULT_BEHAVIORS+ + # constant. Available behaviors are: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to +$stderr+. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # For more information you can read the documentation of the +behavior=+ method. + module Behavior + # Whether to print a backtrace along with the warning. + attr_accessor :debug + + # Returns the current behavior or if one isn't set, defaults to +:stderr+. + def behavior + @behavior ||= [DEFAULT_BEHAVIORS[:stderr]] + end + + # Sets the behavior to the specified value. Can be a single value, array, + # or an object that responds to +call+. + # + # Available behaviors: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to +$stderr+. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # Deprecation warnings raised by gems are not affected by this setting + # because they happen before Rails boots up. + # + # ActiveSupport::Deprecation.behavior = :stderr + # ActiveSupport::Deprecation.behavior = [:stderr, :log] + # ActiveSupport::Deprecation.behavior = MyCustomHandler + # ActiveSupport::Deprecation.behavior = ->(message, callstack, deprecation_horizon, gem_name) { + # # custom stuff + # } + def behavior=(behavior) + @behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + private + def arity_coerce(behavior) + unless behavior.respond_to?(:call) + raise ArgumentError, "#{behavior.inspect} is not a valid deprecation behavior." + end + + if behavior.arity == 4 || behavior.arity == -1 + behavior + else + -> message, callstack, _, _ { behavior.call(message, callstack) } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb new file mode 100644 index 0000000..1ed0015 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/constant_accessor.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + # DeprecatedConstantAccessor transforms a constant into a deprecated one by + # hooking +const_missing+. + # + # It takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. + # + # The deprecated constant now returns the same object as the new one rather + # than a proxy object, so it can be used transparently in +rescue+ blocks + # etc. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # include ActiveSupport::Deprecation::DeprecatedConstantAccessor + # deprecate_constant 'PLANETS', 'PLANETS_POST_2006' + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + module DeprecatedConstantAccessor + def self.included(base) + require "active_support/inflector/methods" + + extension = Module.new do + def const_missing(missing_const_name) + if class_variable_defined?(:@@_deprecated_constants) + if (replacement = class_variable_get(:@@_deprecated_constants)[missing_const_name.to_s]) + replacement[:deprecator].warn(replacement[:message] || "#{name}::#{missing_const_name} is deprecated! Use #{replacement[:new]} instead.", caller_locations) + return ActiveSupport::Inflector.constantize(replacement[:new].to_s) + end + end + super + end + + def deprecate_constant(const_name, new_constant, message: nil, deprecator: ActiveSupport::Deprecation.instance) + class_variable_set(:@@_deprecated_constants, {}) unless class_variable_defined?(:@@_deprecated_constants) + class_variable_get(:@@_deprecated_constants)[const_name.to_s] = { new: new_constant, message: message, deprecator: deprecator } + end + end + base.singleton_class.prepend extension + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb new file mode 100644 index 0000000..8beda37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/instance_delegator.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/singleton_class" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class Deprecation + module InstanceDelegator # :nodoc: + def self.included(base) + base.extend(ClassMethods) + base.singleton_class.prepend(OverrideDelegators) + base.public_class_method :new + end + + module ClassMethods # :nodoc: + def include(included_module) + included_module.instance_methods.each { |m| method_added(m) } + super + end + + def method_added(method_name) + singleton_class.delegate(method_name, to: :instance) + end + end + + module OverrideDelegators # :nodoc: + def warn(message = nil, callstack = nil) + callstack ||= caller_locations(2) + super + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb new file mode 100644 index 0000000..3c5d9ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/method_wrappers.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/array/extract_options" + +module ActiveSupport + class Deprecation + module MethodWrapper + # Declare that a method has been deprecated. + # + # class Fred + # def aaa; end + # def bbb; end + # def ccc; end + # def ddd; end + # def eee; end + # end + # + # Using the default deprecator: + # ActiveSupport::Deprecation.deprecate_methods(Fred, :aaa, bbb: :zzz, ccc: 'use Bar#ccc instead') + # # => Fred + # + # Fred.new.aaa + # # DEPRECATION WARNING: aaa is deprecated and will be removed from Rails 5.1. (called from irb_binding at (irb):10) + # # => nil + # + # Fred.new.bbb + # # DEPRECATION WARNING: bbb is deprecated and will be removed from Rails 5.1 (use zzz instead). (called from irb_binding at (irb):11) + # # => nil + # + # Fred.new.ccc + # # DEPRECATION WARNING: ccc is deprecated and will be removed from Rails 5.1 (use Bar#ccc instead). (called from irb_binding at (irb):12) + # # => nil + # + # Passing in a custom deprecator: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # ActiveSupport::Deprecation.deprecate_methods(Fred, ddd: :zzz, deprecator: custom_deprecator) + # # => [:ddd] + # + # Fred.new.ddd + # DEPRECATION WARNING: ddd is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):15) + # # => nil + # + # Using a custom deprecator directly: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # custom_deprecator.deprecate_methods(Fred, eee: :zzz) + # # => [:eee] + # + # Fred.new.eee + # DEPRECATION WARNING: eee is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):18) + # # => nil + def deprecate_methods(target_module, *method_names) + options = method_names.extract_options! + deprecator = options.delete(:deprecator) || self + method_names += options.keys + mod = Module.new + + method_names.each do |method_name| + if target_module.method_defined?(method_name) || target_module.private_method_defined?(method_name) + aliased_method, punctuation = method_name.to_s.sub(/([?!=])$/, ""), $1 + with_method = "#{aliased_method}_with_deprecation#{punctuation}" + without_method = "#{aliased_method}_without_deprecation#{punctuation}" + + target_module.send(:define_method, with_method) do |*args, &block| + deprecator.deprecation_warning(method_name, options[method_name]) + send(without_method, *args, &block) + end + + target_module.send(:alias_method, without_method, method_name) + target_module.send(:alias_method, method_name, with_method) + + case + when target_module.protected_method_defined?(without_method) + target_module.send(:protected, method_name) + when target_module.private_method_defined?(without_method) + target_module.send(:private, method_name) + end + else + mod.send(:define_method, method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, options[method_name]) + super(*args, &block) + end + end + end + + target_module.prepend(mod) unless mod.instance_methods(false).empty? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb new file mode 100644 index 0000000..896c0d2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/proxy_wrappers.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require "active_support/core_ext/regexp" + +module ActiveSupport + class Deprecation + class DeprecationProxy #:nodoc: + def self.new(*args, &block) + object = args.first + + return object unless object + super + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + private + def method_missing(called, *args, &block) + warn caller_locations, called, args + target.__send__(called, *args, &block) + end + end + + # DeprecatedObjectProxy transforms an object into a deprecated one. It + # takes an object, a deprecation message and optionally a deprecator. The + # deprecator defaults to +ActiveSupport::Deprecator+ if none is specified. + # + # deprecated_object = ActiveSupport::Deprecation::DeprecatedObjectProxy.new(Object.new, "This object is now deprecated") + # # => # + # + # deprecated_object.to_s + # DEPRECATION WARNING: This object is now deprecated. + # (Backtrace) + # # => "#" + class DeprecatedObjectProxy < DeprecationProxy + def initialize(object, message, deprecator = ActiveSupport::Deprecation.instance) + @object = object + @message = message + @deprecator = deprecator + end + + private + def target + @object + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + + # DeprecatedInstanceVariableProxy transforms an instance variable into a + # deprecated one. It takes an instance of a class, a method on that class + # and an instance variable. It optionally takes a deprecator as the last + # argument. The deprecator defaults to +ActiveSupport::Deprecator+ if none + # is specified. + # + # class Example + # def initialize + # @request = ActiveSupport::Deprecation::DeprecatedInstanceVariableProxy.new(self, :request, :@request) + # @_request = :special_request + # end + # + # def request + # @_request + # end + # + # def old_request + # @request + # end + # end + # + # example = Example.new + # # => # + # + # example.old_request.to_s + # # => DEPRECATION WARNING: @request is deprecated! Call request.to_s instead of + # @request.to_s + # (Backtrace information…) + # "special_request" + # + # example.request.to_s + # # => "special_request" + class DeprecatedInstanceVariableProxy < DeprecationProxy + def initialize(instance, method, var = "@#{method}", deprecator = ActiveSupport::Deprecation.instance) + @instance = instance + @method = method + @var = var + @deprecator = deprecator + end + + private + def target + @instance.__send__(@method) + end + + def warn(callstack, called, args) + @deprecator.warn("#{@var} is deprecated! Call #{@method}.#{called} instead of #{@var}.#{called}. Args: #{args.inspect}", callstack) + end + end + + # DeprecatedConstantProxy transforms a constant into a deprecated one. It + # takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. The deprecated constant + # now returns the value of the new one. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + class DeprecatedConstantProxy < DeprecationProxy + def initialize(old_const, new_const, deprecator = ActiveSupport::Deprecation.instance, message: "#{old_const} is deprecated! Use #{new_const} instead.") + require "active_support/inflector/methods" + + @old_const = old_const + @new_const = new_const + @deprecator = deprecator + @message = message + end + + # Returns the class of the new constant. + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # PLANETS.class # => Array + def class + target.class + end + + private + def target + ActiveSupport::Inflector.constantize(@new_const.to_s) + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb new file mode 100644 index 0000000..7075b5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/deprecation/reporting.rb @@ -0,0 +1,114 @@ +# frozen_string_literal: true + +require "rbconfig" + +module ActiveSupport + class Deprecation + module Reporting + # Whether to print a message (silent mode) + attr_accessor :silenced + # Name of gem where method is deprecated + attr_accessor :gem_name + + # Outputs a deprecation warning to the output configured by + # ActiveSupport::Deprecation.behavior. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + def warn(message = nil, callstack = nil) + return if silenced + + callstack ||= caller_locations(2) + deprecation_message(callstack, message).tap do |m| + behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + end + end + + # Silence deprecation warnings within the block. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + # + # ActiveSupport::Deprecation.silence do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + def silence + old_silenced, @silenced = @silenced, true + yield + ensure + @silenced = old_silenced + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + deprecated_method_warning(deprecated_method_name, message).tap do |msg| + warn(msg, caller_backtrace) + end + end + + private + # Outputs a deprecation warning message + # + # deprecated_method_warning(:method_name) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon}" + # deprecated_method_warning(:method_name, :another_method) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (use another_method instead)" + # deprecated_method_warning(:method_name, "Optional message") + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (Optional message)" + def deprecated_method_warning(method_name, message = nil) + warning = "#{method_name} is deprecated and will be removed from #{gem_name} #{deprecation_horizon}" + case message + when Symbol then "#{warning} (use #{message} instead)" + when String then "#{warning} (#{message})" + else warning + end + end + + def deprecation_message(callstack, message = nil) + message ||= "You are using deprecated behavior which will be removed from the next major or minor release." + "DEPRECATION WARNING: #{message} #{deprecation_caller_message(callstack)}" + end + + def deprecation_caller_message(callstack) + file, line, method = extract_callstack(callstack) + if file + if line && method + "(called from #{method} at #{file}:#{line})" + else + "(called from #{file}:#{line})" + end + end + end + + def extract_callstack(callstack) + return _extract_callstack(callstack) if callstack.first.is_a? String + + offending_line = callstack.find { |frame| + frame.absolute_path && !ignored_callstack(frame.absolute_path) + } || callstack.first + + [offending_line.path, offending_line.lineno, offending_line.label] + end + + def _extract_callstack(callstack) + warn "Please pass `caller_locations` to the deprecation API" if $VERBOSE + offending_line = callstack.find { |line| !ignored_callstack(line) } || callstack.first + + if offending_line + if md = offending_line.match(/^(.+?):(\d+)(?::in `(.*?)')?/) + md.captures + else + offending_line + end + end + end + + RAILS_GEM_ROOT = File.expand_path("../../../..", __dir__) + "/" + + def ignored_callstack(path) + path.start_with?(RAILS_GEM_ROOT) || path.start_with?(RbConfig::CONFIG["rubylibdir"]) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb new file mode 100644 index 0000000..a4cee78 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/descendants_tracker.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +module ActiveSupport + # This module provides an internal implementation to track descendants + # which is faster than iterating through ObjectSpace. + module DescendantsTracker + @@direct_descendants = {} + + class << self + def direct_descendants(klass) + @@direct_descendants[klass] || [] + end + + def descendants(klass) + arr = [] + accumulate_descendants(klass, arr) + arr + end + + def clear + if defined? ActiveSupport::Dependencies + @@direct_descendants.each do |klass, descendants| + if ActiveSupport::Dependencies.autoloaded?(klass) + @@direct_descendants.delete(klass) + else + descendants.reject! { |v| ActiveSupport::Dependencies.autoloaded?(v) } + end + end + else + @@direct_descendants.clear + end + end + + # This is the only method that is not thread safe, but is only ever called + # during the eager loading phase. + def store_inherited(klass, descendant) + (@@direct_descendants[klass] ||= []) << descendant + end + + private + def accumulate_descendants(klass, acc) + if direct_descendants = @@direct_descendants[klass] + acc.concat(direct_descendants) + direct_descendants.each { |direct_descendant| accumulate_descendants(direct_descendant, acc) } + end + end + end + + def inherited(base) + DescendantsTracker.store_inherited(self, base) + super + end + + def direct_descendants + DescendantsTracker.direct_descendants(self) + end + + def descendants + DescendantsTracker.descendants(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb new file mode 100644 index 0000000..fba10fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/digest.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + class Digest #:nodoc: + class <(other) + if Scalar === other || Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + else + nil + end + end + + def +(other) + if Duration === other + seconds = value + other.parts[:seconds] + new_parts = other.parts.merge(seconds: seconds) + new_value = value + other.value + + Duration.new(new_value, new_parts) + else + calculate(:+, other) + end + end + + def -(other) + if Duration === other + seconds = value - other.parts[:seconds] + new_parts = other.parts.map { |part, other_value| [part, -other_value] }.to_h + new_parts = new_parts.merge(seconds: seconds) + new_value = value - other.value + + Duration.new(new_value, new_parts) + else + calculate(:-, other) + end + end + + def *(other) + if Duration === other + new_parts = other.parts.map { |part, other_value| [part, value * other_value] }.to_h + new_value = value * other.value + + Duration.new(new_value, new_parts) + else + calculate(:*, other) + end + end + + def /(other) + if Duration === other + value / other.value + else + calculate(:/, other) + end + end + + def %(other) + if Duration === other + Duration.build(value % other.value) + else + calculate(:%, other) + end + end + + private + def calculate(op, other) + if Scalar === other + Scalar.new(value.public_send(op, other.value)) + elsif Numeric === other + Scalar.new(value.public_send(op, other)) + else + raise_type_error(other) + end + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end + + SECONDS_PER_MINUTE = 60 + SECONDS_PER_HOUR = 3600 + SECONDS_PER_DAY = 86400 + SECONDS_PER_WEEK = 604800 + SECONDS_PER_MONTH = 2629746 # 1/12 of a gregorian year + SECONDS_PER_YEAR = 31556952 # length of a gregorian year (365.2425 days) + + PARTS_IN_SECONDS = { + seconds: 1, + minutes: SECONDS_PER_MINUTE, + hours: SECONDS_PER_HOUR, + days: SECONDS_PER_DAY, + weeks: SECONDS_PER_WEEK, + months: SECONDS_PER_MONTH, + years: SECONDS_PER_YEAR + }.freeze + + PARTS = [:years, :months, :weeks, :days, :hours, :minutes, :seconds].freeze + + attr_accessor :value, :parts + + autoload :ISO8601Parser, "active_support/duration/iso8601_parser" + autoload :ISO8601Serializer, "active_support/duration/iso8601_serializer" + + class << self + # Creates a new Duration from string formatted according to ISO 8601 Duration. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # This method allows negative parts to be present in pattern. + # If invalid string is provided, it will raise +ActiveSupport::Duration::ISO8601Parser::ParsingError+. + def parse(iso8601duration) + parts = ISO8601Parser.new(iso8601duration).parse! + new(calculate_total_seconds(parts), parts) + end + + def ===(other) #:nodoc: + other.is_a?(Duration) + rescue ::NoMethodError + false + end + + def seconds(value) #:nodoc: + new(value, [[:seconds, value]]) + end + + def minutes(value) #:nodoc: + new(value * SECONDS_PER_MINUTE, [[:minutes, value]]) + end + + def hours(value) #:nodoc: + new(value * SECONDS_PER_HOUR, [[:hours, value]]) + end + + def days(value) #:nodoc: + new(value * SECONDS_PER_DAY, [[:days, value]]) + end + + def weeks(value) #:nodoc: + new(value * SECONDS_PER_WEEK, [[:weeks, value]]) + end + + def months(value) #:nodoc: + new(value * SECONDS_PER_MONTH, [[:months, value]]) + end + + def years(value) #:nodoc: + new(value * SECONDS_PER_YEAR, [[:years, value]]) + end + + # Creates a new Duration from a seconds value that is converted + # to the individual parts: + # + # ActiveSupport::Duration.build(31556952).parts # => {:years=>1} + # ActiveSupport::Duration.build(2716146).parts # => {:months=>1, :days=>1} + # + def build(value) + parts = {} + remainder = value.to_f + + PARTS.each do |part| + unless part == :seconds + part_in_seconds = PARTS_IN_SECONDS[part] + parts[part] = remainder.div(part_in_seconds) + remainder = (remainder % part_in_seconds).round(9) + end + end + + parts[:seconds] = remainder + + new(value, parts) + end + + private + + def calculate_total_seconds(parts) + parts.inject(0) do |total, (part, value)| + total + value * PARTS_IN_SECONDS[part] + end + end + end + + def initialize(value, parts) #:nodoc: + @value, @parts = value, parts.to_h + @parts.default = 0 + @parts.reject! { |k, v| v.zero? } + end + + def coerce(other) #:nodoc: + if Scalar === other + [other, self] + else + [Scalar.new(other), self] + end + end + + # Compares one Duration with another or a Numeric to this Duration. + # Numeric values are treated as seconds. + def <=>(other) + if Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + end + end + + # Adds another Duration or a Numeric to this Duration. Numeric values + # are treated as seconds. + def +(other) + if Duration === other + parts = @parts.dup + other.parts.each do |(key, value)| + parts[key] += value + end + Duration.new(value + other.value, parts) + else + seconds = @parts[:seconds] + other + Duration.new(value + other, @parts.merge(seconds: seconds)) + end + end + + # Subtracts another Duration or a Numeric from this Duration. Numeric + # values are treated as seconds. + def -(other) + self + (-other) + end + + # Multiplies this Duration by a Numeric and returns a new Duration. + def *(other) + if Scalar === other || Duration === other + Duration.new(value * other.value, parts.map { |type, number| [type, number * other.value] }) + elsif Numeric === other + Duration.new(value * other, parts.map { |type, number| [type, number * other] }) + else + raise_type_error(other) + end + end + + # Divides this Duration by a Numeric and returns a new Duration. + def /(other) + if Scalar === other + Duration.new(value / other.value, parts.map { |type, number| [type, number / other.value] }) + elsif Duration === other + value / other.value + elsif Numeric === other + Duration.new(value / other, parts.map { |type, number| [type, number / other] }) + else + raise_type_error(other) + end + end + + # Returns the modulo of this Duration by another Duration or Numeric. + # Numeric values are treated as seconds. + def %(other) + if Duration === other || Scalar === other + Duration.build(value % other.value) + elsif Numeric === other + Duration.build(value % other) + else + raise_type_error(other) + end + end + + def -@ #:nodoc: + Duration.new(-value, parts.map { |type, number| [type, -number] }) + end + + def is_a?(klass) #:nodoc: + Duration == klass || value.is_a?(klass) + end + alias :kind_of? :is_a? + + def instance_of?(klass) # :nodoc: + Duration == klass || value.instance_of?(klass) + end + + # Returns +true+ if +other+ is also a Duration instance with the + # same +value+, or if other == value. + def ==(other) + if Duration === other + other.value == value + else + other == value + end + end + + # Returns the amount of seconds a duration covers as a string. + # For more information check to_i method. + # + # 1.day.to_s # => "86400" + def to_s + @value.to_s + end + + # Returns the number of seconds that this Duration represents. + # + # 1.minute.to_i # => 60 + # 1.hour.to_i # => 3600 + # 1.day.to_i # => 86400 + # + # Note that this conversion makes some assumptions about the + # duration of some periods, e.g. months are always 1/12 of year + # and years are 365.2425 days: + # + # # equivalent to (1.year / 12).to_i + # 1.month.to_i # => 2629746 + # + # # equivalent to 365.2425.days.to_i + # 1.year.to_i # => 31556952 + # + # In such cases, Ruby's core + # Date[http://ruby-doc.org/stdlib/libdoc/date/rdoc/Date.html] and + # Time[http://ruby-doc.org/stdlib/libdoc/time/rdoc/Time.html] should be used for precision + # date and time arithmetic. + def to_i + @value.to_i + end + + # Returns +true+ if +other+ is also a Duration instance, which has the + # same parts as this one. + def eql?(other) + Duration === other && other.value.eql?(value) + end + + def hash + @value.hash + end + + # Calculates a new Time or Date that is as far in the future + # as this Duration represents. + def since(time = ::Time.current) + sum(1, time) + end + alias :from_now :since + alias :after :since + + # Calculates a new Time or Date that is as far in the past + # as this Duration represents. + def ago(time = ::Time.current) + sum(-1, time) + end + alias :until :ago + alias :before :ago + + def inspect #:nodoc: + return "0 seconds" if parts.empty? + + parts. + reduce(::Hash.new(0)) { |h, (l, r)| h[l] += r; h }. + sort_by { |unit, _ | PARTS.index(unit) }. + map { |unit, val| "#{val} #{val == 1 ? unit.to_s.chop : unit.to_s}" }. + to_sentence(locale: ::I18n.default_locale) + end + + def as_json(options = nil) #:nodoc: + to_i + end + + def init_with(coder) #:nodoc: + initialize(coder["value"], coder["parts"]) + end + + def encode_with(coder) #:nodoc: + coder.map = { "value" => @value, "parts" => @parts } + end + + # Build ISO 8601 Duration string for this duration. + # The +precision+ parameter can be used to limit seconds' precision of duration. + def iso8601(precision: nil) + ISO8601Serializer.new(self, precision: precision).serialize + end + + private + + def sum(sign, time = ::Time.current) + parts.inject(time) do |t, (type, number)| + if t.acts_like?(:time) || t.acts_like?(:date) + if type == :seconds + t.since(sign * number) + elsif type == :minutes + t.since(sign * number * 60) + elsif type == :hours + t.since(sign * number * 3600) + else + t.advance(type => sign * number) + end + else + raise ::ArgumentError, "expected a time or date, got #{time.inspect}" + end + end + end + + def respond_to_missing?(method, _) + value.respond_to?(method) + end + + def method_missing(method, *args, &block) + value.public_send(method, *args, &block) + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb new file mode 100644 index 0000000..1847eea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_parser.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require "strscan" +require "active_support/core_ext/regexp" + +module ActiveSupport + class Duration + # Parses a string formatted according to ISO 8601 Duration into the hash. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # + # This parser allows negative parts to be present in pattern. + class ISO8601Parser # :nodoc: + class ParsingError < ::ArgumentError; end + + PERIOD_OR_COMMA = /\.|,/ + PERIOD = ".".freeze + COMMA = ",".freeze + + SIGN_MARKER = /\A\-|\+|/ + DATE_MARKER = /P/ + TIME_MARKER = /T/ + DATE_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(Y|M|D|W)/ + TIME_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(H|M|S)/ + + DATE_TO_PART = { "Y" => :years, "M" => :months, "W" => :weeks, "D" => :days } + TIME_TO_PART = { "H" => :hours, "M" => :minutes, "S" => :seconds } + + DATE_COMPONENTS = [:years, :months, :days] + TIME_COMPONENTS = [:hours, :minutes, :seconds] + + attr_reader :parts, :scanner + attr_accessor :mode, :sign + + def initialize(string) + @scanner = StringScanner.new(string) + @parts = {} + @mode = :start + @sign = 1 + end + + def parse! + while !finished? + case mode + when :start + if scan(SIGN_MARKER) + self.sign = (scanner.matched == "-") ? -1 : 1 + self.mode = :sign + else + raise_parsing_error + end + + when :sign + if scan(DATE_MARKER) + self.mode = :date + else + raise_parsing_error + end + + when :date + if scan(TIME_MARKER) + self.mode = :time + elsif scan(DATE_COMPONENT) + parts[DATE_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + when :time + if scan(TIME_COMPONENT) + parts[TIME_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + end + end + + validate! + parts + end + + private + + def finished? + scanner.eos? + end + + # Parses number which can be a float with either comma or period. + def number + PERIOD_OR_COMMA.match?(scanner[1]) ? scanner[1].tr(COMMA, PERIOD).to_f : scanner[1].to_i + end + + def scan(pattern) + scanner.scan(pattern) + end + + def raise_parsing_error(reason = nil) + raise ParsingError, "Invalid ISO 8601 duration: #{scanner.string.inspect} #{reason}".strip + end + + # Checks for various semantic errors as stated in ISO 8601 standard. + def validate! + raise_parsing_error("is empty duration") if parts.empty? + + # Mixing any of Y, M, D with W is invalid. + if parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + raise_parsing_error("mixing weeks with other date parts not allowed") + end + + # Specifying an empty T part is invalid. + if mode == :time && (parts.keys & TIME_COMPONENTS).empty? + raise_parsing_error("time part marker is present but time part is empty") + end + + fractions = parts.values.reject(&:zero?).select { |a| (a % 1) != 0 } + unless fractions.empty? || (fractions.size == 1 && fractions.last == @parts.values.reject(&:zero?).last) + raise_parsing_error "(only last part can be fractional)" + end + + true + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb new file mode 100644 index 0000000..bb177ae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/duration/iso8601_serializer.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/transform_values" + +module ActiveSupport + class Duration + # Serializes duration to string according to ISO 8601 Duration format. + class ISO8601Serializer # :nodoc: + def initialize(duration, precision: nil) + @duration = duration + @precision = precision + end + + # Builds and returns output string. + def serialize + parts, sign = normalize + return "PT0S".freeze if parts.empty? + + output = "P".dup + output << "#{parts[:years]}Y" if parts.key?(:years) + output << "#{parts[:months]}M" if parts.key?(:months) + output << "#{parts[:weeks]}W" if parts.key?(:weeks) + output << "#{parts[:days]}D" if parts.key?(:days) + time = "".dup + time << "#{parts[:hours]}H" if parts.key?(:hours) + time << "#{parts[:minutes]}M" if parts.key?(:minutes) + if parts.key?(:seconds) + time << "#{sprintf(@precision ? "%0.0#{@precision}f" : '%g', parts[:seconds])}S" + end + output << "T#{time}" unless time.empty? + "#{sign}#{output}" + end + + private + + # Return pair of duration's parts and whole duration sign. + # Parts are summarized (as they can become repetitive due to addition, etc). + # Zero parts are removed as not significant. + # If all parts are negative it will negate all of them and return minus as a sign. + def normalize + parts = @duration.parts.each_with_object(Hash.new(0)) do |(k, v), p| + p[k] += v unless v.zero? + end + # If all parts are negative - let's make a negative duration + sign = "" + if parts.values.all? { |v| v < 0 } + sign = "-" + parts.transform_values!(&:-@) + end + [parts, sign] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb new file mode 100644 index 0000000..b17695f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_configuration.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require "yaml" +require "active_support/encrypted_file" +require "active_support/ordered_options" +require "active_support/core_ext/object/inclusion" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class EncryptedConfiguration < EncryptedFile + delegate :[], :fetch, to: :config + delegate_missing_to :options + + def initialize(config_path:, key_path:, env_key:, raise_if_missing_key:) + super content_path: config_path, key_path: key_path, + env_key: env_key, raise_if_missing_key: raise_if_missing_key + end + + # Allow a config to be started without a file present + def read + super + rescue ActiveSupport::EncryptedFile::MissingContentError + "" + end + + def write(contents) + deserialize(contents) + + super + end + + def config + @config ||= deserialize(read).deep_symbolize_keys + end + + private + def options + @options ||= ActiveSupport::InheritableOptions.new(config) + end + + def serialize(config) + config.present? ? YAML.dump(config) : "" + end + + def deserialize(config) + YAML.load(config).presence || {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb new file mode 100644 index 0000000..c66f1b5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/encrypted_file.rb @@ -0,0 +1,99 @@ +# frozen_string_literal: true + +require "pathname" +require "active_support/message_encryptor" + +module ActiveSupport + class EncryptedFile + class MissingContentError < RuntimeError + def initialize(content_path) + super "Missing encrypted content file in #{content_path}." + end + end + + class MissingKeyError < RuntimeError + def initialize(key_path:, env_key:) + super \ + "Missing encryption key to decrypt file with. " + + "Ask your team for your master key and write it to #{key_path} or put it in the ENV['#{env_key}']." + end + end + + CIPHER = "aes-128-gcm" + + def self.generate_key + SecureRandom.hex(ActiveSupport::MessageEncryptor.key_len(CIPHER)) + end + + + attr_reader :content_path, :key_path, :env_key, :raise_if_missing_key + + def initialize(content_path:, key_path:, env_key:, raise_if_missing_key:) + @content_path, @key_path = Pathname.new(content_path), Pathname.new(key_path) + @env_key, @raise_if_missing_key = env_key, raise_if_missing_key + end + + def key + read_env_key || read_key_file || handle_missing_key + end + + def read + if !key.nil? && content_path.exist? + decrypt content_path.binread + else + raise MissingContentError, content_path + end + end + + def write(contents) + IO.binwrite "#{content_path}.tmp", encrypt(contents) + FileUtils.mv "#{content_path}.tmp", content_path + end + + def change(&block) + writing read, &block + end + + + private + def writing(contents) + tmp_file = "#{Process.pid}.#{content_path.basename.to_s.chomp('.enc')}" + tmp_path = Pathname.new File.join(Dir.tmpdir, tmp_file) + tmp_path.binwrite contents + + yield tmp_path + + updated_contents = tmp_path.binread + + write(updated_contents) if updated_contents != contents + ensure + FileUtils.rm(tmp_path) if tmp_path.exist? + end + + + def encrypt(contents) + encryptor.encrypt_and_sign contents + end + + def decrypt(contents) + encryptor.decrypt_and_verify contents + end + + def encryptor + @encryptor ||= ActiveSupport::MessageEncryptor.new([ key ].pack("H*"), cipher: CIPHER) + end + + + def read_env_key + ENV[env_key] + end + + def read_key_file + key_path.binread.strip if key_path.exist? + end + + def handle_missing_key + raise MissingKeyError, key_path: key_path, env_key: env_key if raise_if_missing_key + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb new file mode 100644 index 0000000..97e982e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/evented_file_update_checker.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: true + +require "set" +require "pathname" +require "concurrent/atomic/atomic_boolean" + +module ActiveSupport + # Allows you to "listen" to changes in a file system. + # The evented file updater does not hit disk when checking for updates + # instead it uses platform specific file system events to trigger a change + # in state. + # + # The file checker takes an array of files to watch or a hash specifying directories + # and file extensions to watch. It also takes a block that is called when + # EventedFileUpdateChecker#execute is run or when EventedFileUpdateChecker#execute_if_updated + # is run and there have been changes to the file system. + # + # Note: Forking will cause the first call to `updated?` to return `true`. + # + # Example: + # + # checker = ActiveSupport::EventedFileUpdateChecker.new(["/tmp/foo"]) { puts "changed" } + # checker.updated? + # # => false + # checker.execute_if_updated + # # => nil + # + # FileUtils.touch("/tmp/foo") + # + # checker.updated? + # # => true + # checker.execute_if_updated + # # => "changed" + # + class EventedFileUpdateChecker #:nodoc: all + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize an EventedFileUpdateChecker" + end + + @ph = PathHelper.new + @files = files.map { |f| @ph.xpath(f) }.to_set + + @dirs = {} + dirs.each do |dir, exts| + @dirs[@ph.xpath(dir)] = Array(exts).map { |ext| @ph.normalize_extension(ext) } + end + + @block = block + @updated = Concurrent::AtomicBoolean.new(false) + @lcsp = @ph.longest_common_subpath(@dirs.keys) + @pid = Process.pid + @boot_mutex = Mutex.new + + if (@dtw = directories_to_watch).any? + # Loading listen triggers warnings. These are originated by a legit + # usage of attr_* macros for private attributes, but adds a lot of noise + # to our test suite. Thus, we lazy load it and disable warnings locally. + silence_warnings do + begin + require "listen" + rescue LoadError => e + raise LoadError, "Could not load the 'listen' gem. Add `gem 'listen'` to the development group of your Gemfile", e.backtrace + end + end + end + boot! + end + + def updated? + @boot_mutex.synchronize do + if @pid != Process.pid + boot! + @pid = Process.pid + @updated.make_true + end + end + @updated.true? + end + + def execute + @updated.make_false + @block.call + end + + def execute_if_updated + if updated? + yield if block_given? + execute + true + end + end + + private + def boot! + Listen.to(*@dtw, &method(:changed)).start + end + + def changed(modified, added, removed) + unless updated? + @updated.make_true if (modified + added + removed).any? { |f| watching?(f) } + end + end + + def watching?(file) + file = @ph.xpath(file) + + if @files.member?(file) + true + elsif file.directory? + false + else + ext = @ph.normalize_extension(file.extname) + + file.dirname.ascend do |dir| + if @dirs.fetch(dir, []).include?(ext) + break true + elsif dir == @lcsp || dir.root? + break false + end + end + end + end + + def directories_to_watch + dtw = (@files + @dirs.keys).map { |f| @ph.existing_parent(f) } + dtw.compact! + dtw.uniq! + + normalized_gem_paths = Gem.path.map { |path| File.join path, "" } + dtw = dtw.reject do |path| + normalized_gem_paths.any? { |gem_path| path.to_s.start_with?(gem_path) } + end + + @ph.filter_out_descendants(dtw) + end + + class PathHelper + def xpath(path) + Pathname.new(path).expand_path + end + + def normalize_extension(ext) + ext.to_s.sub(/\A\./, "") + end + + # Given a collection of Pathname objects returns the longest subpath + # common to all of them, or +nil+ if there is none. + def longest_common_subpath(paths) + return if paths.empty? + + lcsp = Pathname.new(paths[0]) + + paths[1..-1].each do |path| + until ascendant_of?(lcsp, path) + if lcsp.root? + # If we get here a root directory is not an ascendant of path. + # This may happen if there are paths in different drives on + # Windows. + return + else + lcsp = lcsp.parent + end + end + end + + lcsp + end + + # Returns the deepest existing ascendant, which could be the argument itself. + def existing_parent(dir) + dir.ascend do |ascendant| + break ascendant if ascendant.directory? + end + end + + # Filters out directories which are descendants of others in the collection (stable). + def filter_out_descendants(dirs) + return dirs if dirs.length < 2 + + dirs_sorted_by_nparts = dirs.sort_by { |dir| dir.each_filename.to_a.length } + descendants = [] + + until dirs_sorted_by_nparts.empty? + dir = dirs_sorted_by_nparts.shift + + dirs_sorted_by_nparts.reject! do |possible_descendant| + ascendant_of?(dir, possible_descendant) && descendants << possible_descendant + end + end + + # Array#- preserves order. + dirs - descendants + end + + private + + def ascendant_of?(base, other) + base != other && other.ascend do |ascendant| + break true if base == ascendant + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb new file mode 100644 index 0000000..f48c586 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/execution_wrapper.rb @@ -0,0 +1,128 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + class ExecutionWrapper + include ActiveSupport::Callbacks + + Null = Object.new # :nodoc: + def Null.complete! # :nodoc: + end + + define_callbacks :run + define_callbacks :complete + + def self.to_run(*args, &block) + set_callback(:run, *args, &block) + end + + def self.to_complete(*args, &block) + set_callback(:complete, *args, &block) + end + + RunHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + hook_state[hook] = hook.run + end + end + + CompleteHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + if hook_state.key?(hook) + hook.complete hook_state[hook] + end + end + alias after before + end + + # Register an object to be invoked during both the +run+ and + # +complete+ steps. + # + # +hook.complete+ will be passed the value returned from +hook.run+, + # and will only be invoked if +run+ has previously been called. + # (Mostly, this means it won't be invoked if an exception occurs in + # a preceding +to_run+ block; all ordinary +to_complete+ blocks are + # invoked in that situation.) + def self.register_hook(hook, outer: false) + if outer + to_run RunHook.new(hook), prepend: true + to_complete :after, CompleteHook.new(hook) + else + to_run RunHook.new(hook) + to_complete CompleteHook.new(hook) + end + end + + # Run this execution. + # + # Returns an instance, whose +complete!+ method *must* be invoked + # after the work has been performed. + # + # Where possible, prefer +wrap+. + def self.run! + if active? + Null + else + new.tap do |instance| + success = nil + begin + instance.run! + success = true + ensure + instance.complete! unless success + end + end + end + end + + # Perform the work in the supplied block as an execution. + def self.wrap + return yield if active? + + instance = run! + begin + yield + ensure + instance.complete! + end + end + + class << self # :nodoc: + attr_accessor :active + end + + def self.inherited(other) # :nodoc: + super + other.active = Concurrent::Hash.new + end + + self.active = Concurrent::Hash.new + + def self.active? # :nodoc: + @active[Thread.current] + end + + def run! # :nodoc: + self.class.active[Thread.current] = true + run_callbacks(:run) + end + + # Complete this in-flight execution. This method *must* be called + # exactly once on the result of any call to +run!+. + # + # Where possible, prefer +wrap+. + def complete! + run_callbacks(:complete) + ensure + self.class.active.delete Thread.current + end + + private + def hook_state + @_hook_state ||= {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb new file mode 100644 index 0000000..ce391b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/executor.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + class Executor < ExecutionWrapper + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb new file mode 100644 index 0000000..1a0bb10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/file_update_checker.rb @@ -0,0 +1,163 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/calculations" + +module ActiveSupport + # FileUpdateChecker specifies the API used by Rails to watch files + # and control reloading. The API depends on four methods: + # + # * +initialize+ which expects two parameters and one block as + # described below. + # + # * +updated?+ which returns a boolean if there were updates in + # the filesystem or not. + # + # * +execute+ which executes the given block on initialization + # and updates the latest watched files and timestamp. + # + # * +execute_if_updated+ which just executes the block if it was updated. + # + # After initialization, a call to +execute_if_updated+ must execute + # the block only if there was really a change in the filesystem. + # + # This class is used by Rails to reload the I18n framework whenever + # they are changed upon a new request. + # + # i18n_reloader = ActiveSupport::FileUpdateChecker.new(paths) do + # I18n.reload! + # end + # + # ActiveSupport::Reloader.to_prepare do + # i18n_reloader.execute_if_updated + # end + class FileUpdateChecker + # It accepts two parameters on initialization. The first is an array + # of files and the second is an optional hash of directories. The hash must + # have directories as keys and the value is an array of extensions to be + # watched under that directory. + # + # This method must also receive a block that will be called once a path + # changes. The array of files and list of directories cannot be changed + # after FileUpdateChecker has been initialized. + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize a FileUpdateChecker" + end + + @files = files.freeze + @glob = compile_glob(dirs) + @block = block + + @watched = nil + @updated_at = nil + + @last_watched = watched + @last_update_at = updated_at(@last_watched) + end + + # Check if any of the entries were updated. If so, the watched and/or + # updated_at values are cached until the block is executed via +execute+ + # or +execute_if_updated+. + def updated? + current_watched = watched + if @last_watched.size != current_watched.size + @watched = current_watched + true + else + current_updated_at = updated_at(current_watched) + if @last_update_at < current_updated_at + @watched = current_watched + @updated_at = current_updated_at + true + else + false + end + end + end + + # Executes the given block and updates the latest watched files and + # timestamp. + def execute + @last_watched = watched + @last_update_at = updated_at(@last_watched) + @block.call + ensure + @watched = nil + @updated_at = nil + end + + # Execute the block given if updated. + def execute_if_updated + if updated? + yield if block_given? + execute + true + else + false + end + end + + private + + def watched + @watched || begin + all = @files.select { |f| File.exist?(f) } + all.concat(Dir[@glob]) if @glob + all + end + end + + def updated_at(paths) + @updated_at || max_mtime(paths) || Time.at(0) + end + + # This method returns the maximum mtime of the files in +paths+, or +nil+ + # if the array is empty. + # + # Files with a mtime in the future are ignored. Such abnormal situation + # can happen for example if the user changes the clock by hand. It is + # healthy to consider this edge case because with mtimes in the future + # reloading is not triggered. + def max_mtime(paths) + time_now = Time.now + max_mtime = nil + + # Time comparisons are performed with #compare_without_coercion because + # AS redefines these operators in a way that is much slower and does not + # bring any benefit in this particular code. + # + # Read t1.compare_without_coercion(t2) < 0 as t1 < t2. + paths.each do |path| + mtime = File.mtime(path) + + next if time_now.compare_without_coercion(mtime) < 0 + + if max_mtime.nil? || max_mtime.compare_without_coercion(mtime) < 0 + max_mtime = mtime + end + end + + max_mtime + end + + def compile_glob(hash) + hash.freeze # Freeze so changes aren't accidentally pushed + return if hash.empty? + + globs = hash.map do |key, value| + "#{escape(key)}/**/*#{compile_ext(value)}" + end + "{#{globs.join(",")}}" + end + + def escape(key) + key.gsub(",", '\,') + end + + def compile_ext(array) + array = Array(array) + return if array.empty? + ".{#{array.join(",")}}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb new file mode 100644 index 0000000..9e53a55 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gem_version.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module ActiveSupport + # Returns the version of the currently loaded Active Support as a Gem::Version. + def self.gem_version + Gem::Version.new VERSION::STRING + end + + module VERSION + MAJOR = 5 + MINOR = 2 + TINY = 4 + PRE = "4" + + STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".") + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb new file mode 100644 index 0000000..7ffa6d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/gzip.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "zlib" +require "stringio" + +module ActiveSupport + # A convenient wrapper for the zlib standard library that allows + # compression/decompression of strings with gzip. + # + # gzip = ActiveSupport::Gzip.compress('compress me!') + # # => "\x1F\x8B\b\x00o\x8D\xCDO\x00\x03K\xCE\xCF-(J-.V\xC8MU\x04\x00R>n\x83\f\x00\x00\x00" + # + # ActiveSupport::Gzip.decompress(gzip) + # # => "compress me!" + module Gzip + class Stream < StringIO + def initialize(*) + super + set_encoding "BINARY" + end + def close; rewind; end + end + + # Decompresses a gzipped string. + def self.decompress(source) + Zlib::GzipReader.wrap(StringIO.new(source), &:read) + end + + # Compresses a string using gzip. + def self.compress(source, level = Zlib::DEFAULT_COMPRESSION, strategy = Zlib::DEFAULT_STRATEGY) + output = Stream.new + gz = Zlib::GzipWriter.new(output, level, strategy) + gz.write(source) + gz.close + output.string + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb new file mode 100644 index 0000000..24d3aed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/hash_with_indifferent_access.rb @@ -0,0 +1,395 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" + +module ActiveSupport + # Implements a hash where keys :foo and "foo" are considered + # to be the same. + # + # rgb = ActiveSupport::HashWithIndifferentAccess.new + # + # rgb[:black] = '#000000' + # rgb[:black] # => '#000000' + # rgb['black'] # => '#000000' + # + # rgb['white'] = '#FFFFFF' + # rgb[:white] # => '#FFFFFF' + # rgb['white'] # => '#FFFFFF' + # + # Internally symbols are mapped to strings when used as keys in the entire + # writing interface (calling []=, merge, etc). This + # mapping belongs to the public interface. For example, given: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # + # You are guaranteed that the key is returned as a string: + # + # hash.keys # => ["a"] + # + # Technically other types of keys are accepted: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # hash[0] = 0 + # hash # => {"a"=>1, 0=>0} + # + # but this class is intended for use cases where strings or symbols are the + # expected keys and it is convenient to understand both as the same. For + # example the +params+ hash in Ruby on Rails. + # + # Note that core extensions define Hash#with_indifferent_access: + # + # rgb = { black: '#000000', white: '#FFFFFF' }.with_indifferent_access + # + # which may be handy. + # + # To access this class outside of Rails, require the core extension with: + # + # require "active_support/core_ext/hash/indifferent_access" + # + # which will, in turn, require this file. + class HashWithIndifferentAccess < Hash + # Returns +true+ so that Array#extract_options! finds members of + # this class. + def extractable_options? + true + end + + def with_indifferent_access + dup + end + + def nested_under_indifferent_access + self + end + + def initialize(constructor = {}) + if constructor.respond_to?(:to_hash) + super() + update(constructor) + + hash = constructor.to_hash + self.default = hash.default if hash.default + self.default_proc = hash.default_proc if hash.default_proc + else + super(constructor) + end + end + + def self.[](*args) + new.merge!(Hash[*args]) + end + + alias_method :regular_writer, :[]= unless method_defined?(:regular_writer) + alias_method :regular_update, :update unless method_defined?(:regular_update) + + # Assigns a new value to the hash: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:key] = 'value' + # + # This value can be later fetched using either +:key+ or 'key'. + def []=(key, value) + regular_writer(convert_key(key), convert_value(value, for: :assignment)) + end + + alias_method :store, :[]= + + # Updates the receiver in-place, merging in the hash passed as argument: + # + # hash_1 = ActiveSupport::HashWithIndifferentAccess.new + # hash_1[:key] = 'value' + # + # hash_2 = ActiveSupport::HashWithIndifferentAccess.new + # hash_2[:key] = 'New Value!' + # + # hash_1.update(hash_2) # => {"key"=>"New Value!"} + # + # The argument can be either an + # ActiveSupport::HashWithIndifferentAccess or a regular +Hash+. + # In either case the merge respects the semantics of indifferent access. + # + # If the argument is a regular hash with keys +:key+ and +"key"+ only one + # of the values end up in the receiver, but which one is unspecified. + # + # When given a block, the value for duplicated keys will be determined + # by the result of invoking the block with the duplicated key, the value + # in the receiver, and the value in +other_hash+. The rules for duplicated + # keys follow the semantics of indifferent access: + # + # hash_1[:key] = 10 + # hash_2['key'] = 12 + # hash_1.update(hash_2) { |key, old, new| old + new } # => {"key"=>22} + def update(other_hash) + if other_hash.is_a? HashWithIndifferentAccess + super(other_hash) + else + other_hash.to_hash.each_pair do |key, value| + if block_given? && key?(key) + value = yield(convert_key(key), self[key], value) + end + regular_writer(convert_key(key), convert_value(value)) + end + self + end + end + + alias_method :merge!, :update + + # Checks the hash for a key matching the argument passed in: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['key'] = 'value' + # hash.key?(:key) # => true + # hash.key?('key') # => true + def key?(key) + super(convert_key(key)) + end + + alias_method :include?, :key? + alias_method :has_key?, :key? + alias_method :member?, :key? + + # Same as Hash#[] where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters['foo'] # => 1 + # counters[:foo] # => 1 + # counters[:zoo] # => nil + def [](key) + super(convert_key(key)) + end + + # Same as Hash#assoc where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.assoc('foo') # => ["foo", 1] + # counters.assoc(:foo) # => ["foo", 1] + # counters.assoc(:zoo) # => nil + def assoc(key) + super(convert_key(key)) + end + + # Same as Hash#fetch where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.fetch('foo') # => 1 + # counters.fetch(:bar, 0) # => 0 + # counters.fetch(:bar) { |key| 0 } # => 0 + # counters.fetch(:zoo) # => KeyError: key not found: "zoo" + def fetch(key, *extras) + super(convert_key(key), *extras) + end + + if Hash.new.respond_to?(:dig) + # Same as Hash#dig where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = { bar: 1 } + # + # counters.dig('foo', 'bar') # => 1 + # counters.dig(:foo, :bar) # => 1 + # counters.dig(:zoo) # => nil + def dig(*args) + args[0] = convert_key(args[0]) if args.size > 0 + super(*args) + end + end + + # Same as Hash#default where the key passed as argument can be + # either a string or a symbol: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(1) + # hash.default # => 1 + # + # hash = ActiveSupport::HashWithIndifferentAccess.new { |hash, key| key } + # hash.default # => nil + # hash.default('foo') # => 'foo' + # hash.default(:foo) # => 'foo' + def default(*args) + super(*args.map { |arg| convert_key(arg) }) + end + + # Returns an array of the values at the specified indices: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.values_at('a', 'b') # => ["x", "y"] + def values_at(*indices) + indices.collect { |key| self[convert_key(key)] } + end + + # Returns an array of the values at the specified indices, but also + # raises an exception when one of the keys can't be found. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.fetch_values('a', 'b') # => ["x", "y"] + # hash.fetch_values('a', 'c') { |key| 'z' } # => ["x", "z"] + # hash.fetch_values('a', 'c') # => KeyError: key not found: "c" + def fetch_values(*indices, &block) + indices.collect { |key| fetch(key, &block) } + end if Hash.method_defined?(:fetch_values) + + # Returns a shallow copy of the hash. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new({ a: { b: 'b' } }) + # dup = hash.dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => "c" + # dup[:a][:c] # => "c" + def dup + self.class.new(self).tap do |new_hash| + set_defaults(new_hash) + end + end + + # This method has the same semantics of +update+, except it does not + # modify the receiver but rather returns a new hash with indifferent + # access with the result of the merge. + def merge(hash, &block) + dup.update(hash, &block) + end + + # Like +merge+ but the other way around: Merges the receiver into the + # argument and returns a new hash with indifferent access as result: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['a'] = nil + # hash.reverse_merge(a: 0, b: 1) # => {"a"=>nil, "b"=>1} + def reverse_merge(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults, :reverse_merge + + # Same semantics as +reverse_merge+ but modifies the receiver in-place. + def reverse_merge!(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults!, :reverse_merge! + + # Replaces the contents of this hash with other_hash. + # + # h = { "a" => 100, "b" => 200 } + # h.replace({ "c" => 300, "d" => 400 }) # => {"c"=>300, "d"=>400} + def replace(other_hash) + super(self.class.new(other_hash)) + end + + # Removes the specified key from the hash. + def delete(key) + super(convert_key(key)) + end + + def stringify_keys!; self end + def deep_stringify_keys!; self end + def stringify_keys; dup end + def deep_stringify_keys; dup end + undef :symbolize_keys! + undef :deep_symbolize_keys! + def symbolize_keys; to_hash.symbolize_keys! end + alias_method :to_options, :symbolize_keys + def deep_symbolize_keys; to_hash.deep_symbolize_keys! end + def to_options!; self end + + def select(*args, &block) + return to_enum(:select) unless block_given? + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + return to_enum(:reject) unless block_given? + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def transform_values(*args, &block) + return to_enum(:transform_values) unless block_given? + dup.tap { |hash| hash.transform_values!(*args, &block) } + end + + def transform_keys(*args, &block) + return to_enum(:transform_keys) unless block_given? + dup.tap { |hash| hash.transform_keys!(*args, &block) } + end + + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end + + def slice(*keys) + keys.map! { |key| convert_key(key) } + self.class.new(super) + end + + def slice!(*keys) + keys.map! { |key| convert_key(key) } + super + end + + def compact + dup.tap(&:compact!) + end + + # Convert to a regular hash with string keys. + def to_hash + _new_hash = Hash.new + set_defaults(_new_hash) + + each do |key, value| + _new_hash[key] = convert_value(value, for: :to_hash) + end + _new_hash + end + + private + def convert_key(key) # :doc: + key.kind_of?(Symbol) ? key.to_s : key + end + + def convert_value(value, options = {}) # :doc: + if value.is_a? Hash + if options[:for] == :to_hash + value.to_hash + else + value.nested_under_indifferent_access + end + elsif value.is_a?(Array) + if options[:for] != :assignment || value.frozen? + value = value.dup + end + value.map! { |e| convert_value(e, options) } + else + value + end + end + + def set_defaults(target) # :doc: + if default_proc + target.default_proc = default_proc.dup + else + target.default = default + end + end + end +end + +# :stopdoc: + +HashWithIndifferentAccess = ActiveSupport::HashWithIndifferentAccess diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb new file mode 100644 index 0000000..d60b3ef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +begin + require "i18n" +rescue LoadError => e + $stderr.puts "The i18n gem is not available. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/lazy_load_hooks" + +ActiveSupport.run_load_hooks(:i18n) +I18n.load_path << File.expand_path("locale/en.yml", __dir__) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb new file mode 100644 index 0000000..ee24ebd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/i18n_railtie.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/file_update_checker" +require "active_support/core_ext/array/wrap" + +# :enddoc: + +module I18n + class Railtie < Rails::Railtie + config.i18n = ActiveSupport::OrderedOptions.new + config.i18n.railties_load_path = [] + config.i18n.load_path = [] + config.i18n.fallbacks = ActiveSupport::OrderedOptions.new + + # Set the i18n configuration after initialization since a lot of + # configuration is still usually done in application initializers. + config.after_initialize do |app| + I18n::Railtie.initialize_i18n(app) + end + + # Trigger i18n config before any eager loading has happened + # so it's ready if any classes require it when eager loaded. + config.before_eager_load do |app| + I18n::Railtie.initialize_i18n(app) + end + + @i18n_inited = false + + # Setup i18n configuration. + def self.initialize_i18n(app) + return if @i18n_inited + + fallbacks = app.config.i18n.delete(:fallbacks) + + # Avoid issues with setting the default_locale by disabling available locales + # check while configuring. + enforce_available_locales = app.config.i18n.delete(:enforce_available_locales) + enforce_available_locales = I18n.enforce_available_locales if enforce_available_locales.nil? + I18n.enforce_available_locales = false + + reloadable_paths = [] + app.config.i18n.each do |setting, value| + case setting + when :railties_load_path + reloadable_paths = value + app.config.i18n.load_path.unshift(*value.flat_map(&:existent)) + when :load_path + I18n.load_path += value + else + I18n.send("#{setting}=", value) + end + end + + init_fallbacks(fallbacks) if fallbacks && validate_fallbacks(fallbacks) + + # Restore available locales check so it will take place from now on. + I18n.enforce_available_locales = enforce_available_locales + + directories = watched_dirs_with_extensions(reloadable_paths) + reloader = app.config.file_watcher.new(I18n.load_path.dup, directories) do + I18n.load_path.keep_if { |p| File.exist?(p) } + I18n.load_path |= reloadable_paths.flat_map(&:existent) + + I18n.reload! + end + + app.reloaders << reloader + app.reloader.to_run do + reloader.execute_if_updated { require_unload_lock! } + end + reloader.execute + + @i18n_inited = true + end + + def self.include_fallbacks_module + I18n.backend.class.include(I18n::Backend::Fallbacks) + end + + def self.init_fallbacks(fallbacks) + include_fallbacks_module + + args = \ + case fallbacks + when ActiveSupport::OrderedOptions + [*(fallbacks[:defaults] || []) << fallbacks[:map]].compact + when Hash, Array + Array.wrap(fallbacks) + else # TrueClass + [I18n.default_locale] + end + + if args.empty? || args.first.is_a?(Hash) + args.unshift I18n.default_locale + end + + I18n.fallbacks = I18n::Locale::Fallbacks.new(*args) + end + + def self.validate_fallbacks(fallbacks) + case fallbacks + when ActiveSupport::OrderedOptions + !fallbacks.empty? + when TrueClass, Array, Hash + true + else + raise "Unexpected fallback type #{fallbacks.inspect}" + end + end + + def self.watched_dirs_with_extensions(paths) + paths.each_with_object({}) do |path, result| + result[path.absolute_current] = path.extensions + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb new file mode 100644 index 0000000..baf1cb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflections.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/inflections" + +#-- +# Defines the standard inflection rules. These are the starting point for +# new projects and are not considered complete. The current set of inflection +# rules is frozen. This means, we do not change them to become more complete. +# This is a safety measure to keep existing applications from breaking. +#++ +module ActiveSupport + Inflector.inflections(:en) do |inflect| + inflect.plural(/$/, "s") + inflect.plural(/s$/i, "s") + inflect.plural(/^(ax|test)is$/i, '\1es') + inflect.plural(/(octop|vir)us$/i, '\1i') + inflect.plural(/(octop|vir)i$/i, '\1i') + inflect.plural(/(alias|status)$/i, '\1es') + inflect.plural(/(bu)s$/i, '\1ses') + inflect.plural(/(buffal|tomat)o$/i, '\1oes') + inflect.plural(/([ti])um$/i, '\1a') + inflect.plural(/([ti])a$/i, '\1a') + inflect.plural(/sis$/i, "ses") + inflect.plural(/(?:([^f])fe|([lr])f)$/i, '\1\2ves') + inflect.plural(/(hive)$/i, '\1s') + inflect.plural(/([^aeiouy]|qu)y$/i, '\1ies') + inflect.plural(/(x|ch|ss|sh)$/i, '\1es') + inflect.plural(/(matr|vert|ind)(?:ix|ex)$/i, '\1ices') + inflect.plural(/^(m|l)ouse$/i, '\1ice') + inflect.plural(/^(m|l)ice$/i, '\1ice') + inflect.plural(/^(ox)$/i, '\1en') + inflect.plural(/^(oxen)$/i, '\1') + inflect.plural(/(quiz)$/i, '\1zes') + + inflect.singular(/s$/i, "") + inflect.singular(/(ss)$/i, '\1') + inflect.singular(/(n)ews$/i, '\1ews') + inflect.singular(/([ti])a$/i, '\1um') + inflect.singular(/((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$/i, '\1sis') + inflect.singular(/(^analy)(sis|ses)$/i, '\1sis') + inflect.singular(/([^f])ves$/i, '\1fe') + inflect.singular(/(hive)s$/i, '\1') + inflect.singular(/(tive)s$/i, '\1') + inflect.singular(/([lr])ves$/i, '\1f') + inflect.singular(/([^aeiouy]|qu)ies$/i, '\1y') + inflect.singular(/(s)eries$/i, '\1eries') + inflect.singular(/(m)ovies$/i, '\1ovie') + inflect.singular(/(x|ch|ss|sh)es$/i, '\1') + inflect.singular(/^(m|l)ice$/i, '\1ouse') + inflect.singular(/(bus)(es)?$/i, '\1') + inflect.singular(/(o)es$/i, '\1') + inflect.singular(/(shoe)s$/i, '\1') + inflect.singular(/(cris|test)(is|es)$/i, '\1is') + inflect.singular(/^(a)x[ie]s$/i, '\1xis') + inflect.singular(/(octop|vir)(us|i)$/i, '\1us') + inflect.singular(/(alias|status)(es)?$/i, '\1') + inflect.singular(/^(ox)en/i, '\1') + inflect.singular(/(vert|ind)ices$/i, '\1ex') + inflect.singular(/(matr)ices$/i, '\1ix') + inflect.singular(/(quiz)zes$/i, '\1') + inflect.singular(/(database)s$/i, '\1') + + inflect.irregular("person", "people") + inflect.irregular("man", "men") + inflect.irregular("child", "children") + inflect.irregular("sex", "sexes") + inflect.irregular("move", "moves") + inflect.irregular("zombie", "zombies") + + inflect.uncountable(%w(equipment information rice money species series fish sheep jeans police)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb new file mode 100644 index 0000000..d77f04c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +# in case active_support/inflector is required without the rest of active_support +require "active_support/inflector/inflections" +require "active_support/inflector/transliterate" +require "active_support/inflector/methods" + +require "active_support/inflections" +require "active_support/core_ext/string/inflections" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb new file mode 100644 index 0000000..7e5dff1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/inflections.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "active_support/core_ext/array/prepend_and_append" +require "active_support/core_ext/regexp" +require "active_support/i18n" +require "active_support/deprecation" + +module ActiveSupport + module Inflector + extend self + + # A singleton instance of this class is yielded by Inflector.inflections, + # which can then be used to specify additional inflection rules. If passed + # an optional locale, rules for other languages can be specified. The + # default locale is :en. Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.plural /^(ox)$/i, '\1\2en' + # inflect.singular /^(ox)en/i, '\1' + # + # inflect.irregular 'octopus', 'octopi' + # + # inflect.uncountable 'equipment' + # end + # + # New rules are added at the top. So in the example above, the irregular + # rule for octopus will now be the first of the pluralization and + # singularization rules that is runs. This guarantees that your rules run + # before any of the rules that may already have been loaded. + class Inflections + @__instance__ = Concurrent::Map.new + + class Uncountables < Array + def initialize + @regex_array = [] + super + end + + def delete(entry) + super entry + @regex_array.delete(to_regex(entry)) + end + + def <<(*word) + add(word) + end + + def add(words) + words = words.flatten.map(&:downcase) + concat(words) + @regex_array += words.map { |word| to_regex(word) } + self + end + + def uncountable?(str) + @regex_array.any? { |regex| regex.match? str } + end + + private + def to_regex(string) + /\b#{::Regexp.escape(string)}\Z/i + end + end + + def self.instance(locale = :en) + @__instance__[locale] ||= new + end + + attr_reader :plurals, :singulars, :uncountables, :humans, :acronyms, :acronym_regex + deprecate :acronym_regex + + attr_reader :acronyms_camelize_regex, :acronyms_underscore_regex # :nodoc: + + def initialize + @plurals, @singulars, @uncountables, @humans, @acronyms = [], [], Uncountables.new, [], {} + define_acronym_regex_patterns + end + + # Private, for the test suite. + def initialize_dup(orig) # :nodoc: + %w(plurals singulars uncountables humans acronyms).each do |scope| + instance_variable_set("@#{scope}", orig.send(scope).dup) + end + define_acronym_regex_patterns + end + + # Specifies a new acronym. An acronym must be specified as it will appear + # in a camelized string. An underscore string that contains the acronym + # will retain the acronym when passed to +camelize+, +humanize+, or + # +titleize+. A camelized string that contains the acronym will maintain + # the acronym when titleized or humanized, and will convert the acronym + # into a non-delimited single lowercase word when passed to +underscore+. + # + # acronym 'HTML' + # titleize 'html' # => 'HTML' + # camelize 'html' # => 'HTML' + # underscore 'MyHTML' # => 'my_html' + # + # The acronym, however, must occur as a delimited unit and not be part of + # another word for conversions to recognize it: + # + # acronym 'HTTP' + # camelize 'my_http_delimited' # => 'MyHTTPDelimited' + # camelize 'https' # => 'Https', not 'HTTPs' + # underscore 'HTTPS' # => 'http_s', not 'https' + # + # acronym 'HTTPS' + # camelize 'https' # => 'HTTPS' + # underscore 'HTTPS' # => 'https' + # + # Note: Acronyms that are passed to +pluralize+ will no longer be + # recognized, since the acronym will not occur as a delimited unit in the + # pluralized result. To work around this, you must specify the pluralized + # form as an acronym as well: + # + # acronym 'API' + # camelize(pluralize('api')) # => 'Apis' + # + # acronym 'APIs' + # camelize(pluralize('api')) # => 'APIs' + # + # +acronym+ may be used to specify any word that contains an acronym or + # otherwise needs to maintain a non-standard capitalization. The only + # restriction is that the word must begin with a capital letter. + # + # acronym 'RESTful' + # underscore 'RESTful' # => 'restful' + # underscore 'RESTfulController' # => 'restful_controller' + # titleize 'RESTfulController' # => 'RESTful Controller' + # camelize 'restful' # => 'RESTful' + # camelize 'restful_controller' # => 'RESTfulController' + # + # acronym 'McDonald' + # underscore 'McDonald' # => 'mcdonald' + # camelize 'mcdonald' # => 'McDonald' + def acronym(word) + @acronyms[word.downcase] = word + define_acronym_regex_patterns + end + + # Specifies a new pluralization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def plural(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @plurals.prepend([rule, replacement]) + end + + # Specifies a new singularization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def singular(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @singulars.prepend([rule, replacement]) + end + + # Specifies a new irregular that applies to both pluralization and + # singularization at the same time. This can only be used for strings, not + # regular expressions. You simply pass the irregular in singular and + # plural form. + # + # irregular 'octopus', 'octopi' + # irregular 'person', 'people' + def irregular(singular, plural) + @uncountables.delete(singular) + @uncountables.delete(plural) + + s0 = singular[0] + srest = singular[1..-1] + + p0 = plural[0] + prest = plural[1..-1] + + if s0.upcase == p0.upcase + plural(/(#{s0})#{srest}$/i, '\1' + prest) + plural(/(#{p0})#{prest}$/i, '\1' + prest) + + singular(/(#{s0})#{srest}$/i, '\1' + srest) + singular(/(#{p0})#{prest}$/i, '\1' + srest) + else + plural(/#{s0.upcase}(?i)#{srest}$/, p0.upcase + prest) + plural(/#{s0.downcase}(?i)#{srest}$/, p0.downcase + prest) + plural(/#{p0.upcase}(?i)#{prest}$/, p0.upcase + prest) + plural(/#{p0.downcase}(?i)#{prest}$/, p0.downcase + prest) + + singular(/#{s0.upcase}(?i)#{srest}$/, s0.upcase + srest) + singular(/#{s0.downcase}(?i)#{srest}$/, s0.downcase + srest) + singular(/#{p0.upcase}(?i)#{prest}$/, s0.upcase + srest) + singular(/#{p0.downcase}(?i)#{prest}$/, s0.downcase + srest) + end + end + + # Specifies words that are uncountable and should not be inflected. + # + # uncountable 'money' + # uncountable 'money', 'information' + # uncountable %w( money information rice ) + def uncountable(*words) + @uncountables.add(words) + end + + # Specifies a humanized form of a string by a regular expression rule or + # by a string mapping. When using a regular expression based replacement, + # the normal humanize formatting is called after the replacement. When a + # string is used, the human form should be specified as desired (example: + # 'The name', not 'the_name'). + # + # human /_cnt$/i, '\1_count' + # human 'legacy_col_person_name', 'Name' + def human(rule, replacement) + @humans.prepend([rule, replacement]) + end + + # Clears the loaded inflections within a given scope (default is + # :all). Give the scope as a symbol of the inflection type, the + # options are: :plurals, :singulars, :uncountables, + # :humans. + # + # clear :all + # clear :plurals + def clear(scope = :all) + case scope + when :all + @plurals, @singulars, @uncountables, @humans = [], [], Uncountables.new, [] + else + instance_variable_set "@#{scope}", [] + end + end + + private + + def define_acronym_regex_patterns + @acronym_regex = @acronyms.empty? ? /(?=a)b/ : /#{@acronyms.values.join("|")}/ + @acronyms_camelize_regex = /^(?:#{@acronym_regex}(?=\b|[A-Z_])|\w)/ + @acronyms_underscore_regex = /(?:(?<=([A-Za-z\d]))|\b)(#{@acronym_regex})(?=\b|[^a-z])/ + end + end + + # Yields a singleton instance of Inflector::Inflections so you can specify + # additional inflector rules. If passed an optional locale, rules for other + # languages can be specified. If not specified, defaults to :en. + # Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.uncountable 'rails' + # end + def inflections(locale = :en) + if block_given? + yield Inflections.instance(locale) + else + Inflections.instance(locale) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb new file mode 100644 index 0000000..ad90a0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/methods.rb @@ -0,0 +1,410 @@ +# frozen_string_literal: true + +require "active_support/inflections" +require "active_support/core_ext/regexp" + +module ActiveSupport + # The Inflector transforms words from singular to plural, class names to table + # names, modularized class names to ones without, and class names to foreign + # keys. The default inflections for pluralization, singularization, and + # uncountable words are kept in inflections.rb. + # + # The Rails core team has stated patches for the inflections library will not + # be accepted in order to avoid breaking legacy applications which may be + # relying on errant inflections. If you discover an incorrect inflection and + # require it for your application or wish to define rules for languages other + # than English, please correct or add them yourself (explained below). + module Inflector + extend self + + # Returns the plural form of the word in the string. + # + # If passed an optional +locale+ parameter, the word will be + # pluralized using rules defined for that language. By default, + # this parameter is set to :en. + # + # pluralize('post') # => "posts" + # pluralize('octopus') # => "octopi" + # pluralize('sheep') # => "sheep" + # pluralize('words') # => "words" + # pluralize('CamelOctopus') # => "CamelOctopi" + # pluralize('ley', :es) # => "leyes" + def pluralize(word, locale = :en) + apply_inflections(word, inflections(locale).plurals, locale) + end + + # The reverse of #pluralize, returns the singular form of a word in a + # string. + # + # If passed an optional +locale+ parameter, the word will be + # singularized using rules defined for that language. By default, + # this parameter is set to :en. + # + # singularize('posts') # => "post" + # singularize('octopi') # => "octopus" + # singularize('sheep') # => "sheep" + # singularize('word') # => "word" + # singularize('CamelOctopi') # => "CamelOctopus" + # singularize('leyes', :es) # => "ley" + def singularize(word, locale = :en) + apply_inflections(word, inflections(locale).singulars, locale) + end + + # Converts strings to UpperCamelCase. + # If the +uppercase_first_letter+ parameter is set to false, then produces + # lowerCamelCase. + # + # Also converts '/' to '::' which is useful for converting + # paths to namespaces. + # + # camelize('active_model') # => "ActiveModel" + # camelize('active_model', false) # => "activeModel" + # camelize('active_model/errors') # => "ActiveModel::Errors" + # camelize('active_model/errors', false) # => "activeModel::Errors" + # + # As a rule of thumb you can think of +camelize+ as the inverse of + # #underscore, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def camelize(term, uppercase_first_letter = true) + string = term.to_s + if uppercase_first_letter + string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize } + else + string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase } + end + string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{inflections.acronyms[$2] || $2.capitalize}" } + string.gsub!("/".freeze, "::".freeze) + string + end + + # Makes an underscored, lowercase form from the expression in the string. + # + # Changes '::' to '/' to convert namespaces to paths. + # + # underscore('ActiveModel') # => "active_model" + # underscore('ActiveModel::Errors') # => "active_model/errors" + # + # As a rule of thumb you can think of +underscore+ as the inverse of + # #camelize, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def underscore(camel_cased_word) + return camel_cased_word unless /[A-Z-]|::/.match?(camel_cased_word) + word = camel_cased_word.to_s.gsub("::".freeze, "/".freeze) + word.gsub!(inflections.acronyms_underscore_regex) { "#{$1 && '_'.freeze }#{$2.downcase}" } + word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2'.freeze) + word.gsub!(/([a-z\d])([A-Z])/, '\1_\2'.freeze) + word.tr!("-".freeze, "_".freeze) + word.downcase! + word + end + + # Tweaks an attribute name for display to end users. + # + # Specifically, performs these transformations: + # + # * Applies human inflection rules to the argument. + # * Deletes leading underscores, if any. + # * Removes a "_id" suffix if present. + # * Replaces underscores with spaces, if any. + # * Downcases all words except acronyms. + # * Capitalizes the first word. + # The capitalization of the first word can be turned off by setting the + # +:capitalize+ option to false (default is true). + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true (default is false). + # + # humanize('employee_salary') # => "Employee salary" + # humanize('author_id') # => "Author" + # humanize('author_id', capitalize: false) # => "author" + # humanize('_id') # => "Id" + # humanize('author_id', keep_id_suffix: true) # => "Author Id" + # + # If "SSL" was defined to be an acronym: + # + # humanize('ssl_error') # => "SSL error" + # + def humanize(lower_case_and_underscored_word, capitalize: true, keep_id_suffix: false) + result = lower_case_and_underscored_word.to_s.dup + + inflections.humans.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + + result.sub!(/\A_+/, "".freeze) + unless keep_id_suffix + result.sub!(/_id\z/, "".freeze) + end + result.tr!("_".freeze, " ".freeze) + + result.gsub!(/([a-z\d]*)/i) do |match| + "#{inflections.acronyms[match.downcase] || match.downcase}" + end + + if capitalize + result.sub!(/\A\w/) { |match| match.upcase } + end + + result + end + + # Converts just the first character to uppercase. + # + # upcase_first('what a Lovely Day') # => "What a Lovely Day" + # upcase_first('w') # => "W" + # upcase_first('') # => "" + def upcase_first(string) + string.length > 0 ? string[0].upcase.concat(string[1..-1]) : "" + end + + # Capitalizes all the words and replaces some characters in the string to + # create a nicer looking title. +titleize+ is meant for creating pretty + # output. It is not used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # titleize('man from the boondocks') # => "Man From The Boondocks" + # titleize('x-men: the last stand') # => "X Men: The Last Stand" + # titleize('TheManWithoutAPast') # => "The Man Without A Past" + # titleize('raiders_of_the_lost_ark') # => "Raiders Of The Lost Ark" + # titleize('string_ending_with_id', keep_id_suffix: true) # => "String Ending With Id" + def titleize(word, keep_id_suffix: false) + humanize(underscore(word), keep_id_suffix: keep_id_suffix).gsub(/\b(? "raw_scaled_scorers" + # tableize('ham_and_egg') # => "ham_and_eggs" + # tableize('fancyCategory') # => "fancy_categories" + def tableize(class_name) + pluralize(underscore(class_name)) + end + + # Creates a class name from a plural table name like Rails does for table + # names to models. Note that this returns a string and not a Class (To + # convert to an actual class follow +classify+ with #constantize). + # + # classify('ham_and_eggs') # => "HamAndEgg" + # classify('posts') # => "Post" + # + # Singular names are not handled correctly: + # + # classify('calculus') # => "Calculus" + def classify(table_name) + # strip out any leading schema name + camelize(singularize(table_name.to_s.sub(/.*\./, "".freeze))) + end + + # Replaces underscores with dashes in the string. + # + # dasherize('puni_puni') # => "puni-puni" + def dasherize(underscored_word) + underscored_word.tr("_".freeze, "-".freeze) + end + + # Removes the module part from the expression in the string. + # + # demodulize('ActiveSupport::Inflector::Inflections') # => "Inflections" + # demodulize('Inflections') # => "Inflections" + # demodulize('::Inflections') # => "Inflections" + # demodulize('') # => "" + # + # See also #deconstantize. + def demodulize(path) + path = path.to_s + if i = path.rindex("::") + path[(i + 2)..-1] + else + path + end + end + + # Removes the rightmost segment from the constant expression in the string. + # + # deconstantize('Net::HTTP') # => "Net" + # deconstantize('::Net::HTTP') # => "::Net" + # deconstantize('String') # => "" + # deconstantize('::String') # => "" + # deconstantize('') # => "" + # + # See also #demodulize. + def deconstantize(path) + path.to_s[0, path.rindex("::") || 0] # implementation based on the one in facets' Module#spacename + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # foreign_key('Message') # => "message_id" + # foreign_key('Message', false) # => "messageid" + # foreign_key('Admin::Post') # => "post_id" + def foreign_key(class_name, separate_class_name_and_id_with_underscore = true) + underscore(demodulize(class_name)) + (separate_class_name_and_id_with_underscore ? "_id" : "id") + end + + # Tries to find a constant with the name specified in the argument string. + # + # constantize('Module') # => Module + # constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # constantize('C') # => 'outside', same as ::C + # end + # + # NameError is raised when the name is not in CamelCase or the constant is + # unknown. + def constantize(camel_cased_word) + names = camel_cased_word.split("::".freeze) + + # Trigger a built-in NameError exception including the ill-formed constant in the message. + Object.const_get(camel_cased_word) if names.empty? + + # Remove the first blank element in case of '::ClassName' notation. + names.shift if names.size > 1 && names.first.empty? + + names.inject(Object) do |constant, name| + if constant == Object + constant.const_get(name) + else + candidate = constant.const_get(name) + next candidate if constant.const_defined?(name, false) + next candidate unless Object.const_defined?(name) + + # Go down the ancestors to check if it is owned directly. The check + # stops when we reach Object or the end of ancestors tree. + constant = constant.ancestors.inject(constant) do |const, ancestor| + break const if ancestor == Object + break ancestor if ancestor.const_defined?(name, false) + const + end + + # owner is in Object, so raise + constant.const_get(name, false) + end + end + end + + # Tries to find a constant with the name specified in the argument string. + # + # safe_constantize('Module') # => Module + # safe_constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # safe_constantize('C') # => 'outside', same as ::C + # end + # + # +nil+ is returned when the name is not in CamelCase or the constant (or + # part of it) is unknown. + # + # safe_constantize('blargle') # => nil + # safe_constantize('UnknownModule') # => nil + # safe_constantize('UnknownModule::Foo::Bar') # => nil + def safe_constantize(camel_cased_word) + constantize(camel_cased_word) + rescue NameError => e + raise if e.name && !(camel_cased_word.to_s.split("::").include?(e.name.to_s) || + e.name.to_s == camel_cased_word.to_s) + rescue ArgumentError => e + raise unless /not missing constant #{const_regexp(camel_cased_word)}!$/.match?(e.message) + rescue LoadError => e + raise unless /Unable to autoload constant #{const_regexp(camel_cased_word)}/.match?(e.message) + end + + # Returns the suffix that should be added to a number to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinal(1) # => "st" + # ordinal(2) # => "nd" + # ordinal(1002) # => "nd" + # ordinal(1003) # => "rd" + # ordinal(-11) # => "th" + # ordinal(-1021) # => "st" + def ordinal(number) + abs_number = number.to_i.abs + + if (11..13).include?(abs_number % 100) + "th" + else + case abs_number % 10 + when 1; "st" + when 2; "nd" + when 3; "rd" + else "th" + end + end + end + + # Turns a number into an ordinal string used to denote the position in an + # ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinalize(1) # => "1st" + # ordinalize(2) # => "2nd" + # ordinalize(1002) # => "1002nd" + # ordinalize(1003) # => "1003rd" + # ordinalize(-11) # => "-11th" + # ordinalize(-1021) # => "-1021st" + def ordinalize(number) + "#{number}#{ordinal(number)}" + end + + private + + # Mounts a regular expression, returned as a string to ease interpolation, + # that will match part by part the given constant. + # + # const_regexp("Foo::Bar::Baz") # => "Foo(::Bar(::Baz)?)?" + # const_regexp("::") # => "::" + def const_regexp(camel_cased_word) + parts = camel_cased_word.split("::".freeze) + + return Regexp.escape(camel_cased_word) if parts.blank? + + last = parts.pop + + parts.reverse.inject(last) do |acc, part| + part.empty? ? acc : "#{part}(::#{acc})?" + end + end + + # Applies inflection rules for +singularize+ and +pluralize+. + # + # If passed an optional +locale+ parameter, the uncountables will be + # found for that locale. + # + # apply_inflections('post', inflections.plurals, :en) # => "posts" + # apply_inflections('posts', inflections.singulars, :en) # => "post" + def apply_inflections(word, rules, locale = :en) + result = word.to_s.dup + + if word.empty? || inflections(locale).uncountables.uncountable?(result) + result + else + rules.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb new file mode 100644 index 0000000..6f2ca49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/inflector/transliterate.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/multibyte" +require "active_support/i18n" + +module ActiveSupport + module Inflector + # Replaces non-ASCII characters with an ASCII approximation, or if none + # exists, a replacement character which defaults to "?". + # + # transliterate('Ærøskøbing') + # # => "AEroskobing" + # + # Default approximations are provided for Western/Latin characters, + # e.g, "ø", "ñ", "é", "ß", etc. + # + # This method is I18n aware, so you can set up custom approximations for a + # locale. This can be useful, for example, to transliterate German's "ü" + # and "ö" to "ue" and "oe", or to add support for transliterating Russian + # to ASCII. + # + # In order to make your custom transliterations available, you must set + # them as the i18n.transliterate.rule i18n key: + # + # # Store the transliterations in locales/de.yml + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # # Or set them using Ruby + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: { + # 'ü' => 'ue', + # 'ö' => 'oe' + # } + # } + # }) + # + # The value for i18n.transliterate.rule can be a simple Hash that + # maps characters to ASCII approximations as shown above, or, for more + # complex requirements, a Proc: + # + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: ->(string) { MyTransliterator.transliterate(string) } + # } + # }) + # + # Now you can have different transliterations for each locale: + # + # I18n.locale = :en + # transliterate('Jürgen') + # # => "Jurgen" + # + # I18n.locale = :de + # transliterate('Jürgen') + # # => "Juergen" + def transliterate(string, replacement = "?".freeze) + raise ArgumentError, "Can only transliterate strings. Received #{string.class.name}" unless string.is_a?(String) + + I18n.transliterate( + ActiveSupport::Multibyte::Unicode.normalize( + ActiveSupport::Multibyte::Unicode.tidy_bytes(string), :c), + replacement: replacement) + end + + # Replaces special characters in a string so that it may be used as part of + # a 'pretty' URL. + # + # parameterize("Donald E. Knuth") # => "donald-e-knuth" + # parameterize("^très|Jolie-- ") # => "tres-jolie" + # + # To use a custom separator, override the +separator+ argument. + # + # parameterize("Donald E. Knuth", separator: '_') # => "donald_e_knuth" + # parameterize("^très|Jolie__ ", separator: '_') # => "tres_jolie" + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # parameterize("Donald E. Knuth", preserve_case: true) # => "Donald-E-Knuth" + # parameterize("^très|Jolie-- ", preserve_case: true) # => "tres-Jolie" + # + # It preserves dashes and underscores unless they are used as separators: + # + # parameterize("^très|Jolie__ ") # => "tres-jolie__" + # parameterize("^très|Jolie-- ", separator: "_") # => "tres_jolie--" + # parameterize("^très_Jolie-- ", separator: ".") # => "tres_jolie--" + # + def parameterize(string, separator: "-", preserve_case: false) + # Replace accented chars with their ASCII equivalents. + parameterized_string = transliterate(string) + + # Turn unwanted chars into the separator. + parameterized_string.gsub!(/[^a-z0-9\-_]+/i, separator) + + unless separator.nil? || separator.empty? + if separator == "-".freeze + re_duplicate_separator = /-{2,}/ + re_leading_trailing_separator = /^-|-$/i + else + re_sep = Regexp.escape(separator) + re_duplicate_separator = /#{re_sep}{2,}/ + re_leading_trailing_separator = /^#{re_sep}|#{re_sep}$/i + end + # No more than one of the separator in a row. + parameterized_string.gsub!(re_duplicate_separator, separator) + # Remove leading/trailing separator. + parameterized_string.gsub!(re_leading_trailing_separator, "".freeze) + end + + parameterized_string.downcase! unless preserve_case + parameterized_string + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb new file mode 100644 index 0000000..d788717 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/json/decoding" +require "active_support/json/encoding" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb new file mode 100644 index 0000000..8c0e016 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/decoding.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/delegation" +require "json" + +module ActiveSupport + # Look for and parse json strings that look like ISO 8601 times. + mattr_accessor :parse_json_times + + module JSON + # matches YAML-formatted dates + DATE_REGEX = /^\d{4}-\d{2}-\d{2}$/ + DATETIME_REGEX = /^(?:\d{4}-\d{2}-\d{2}|\d{4}-\d{1,2}-\d{1,2}[T \t]+\d{1,2}:\d{2}:\d{2}(\.[0-9]*)?(([ \t]*)Z|[-+]\d{2}?(:\d{2})?)?)$/ + + class << self + # Parses a JSON string (JavaScript Object Notation) into a hash. + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.decode("{\"team\":\"rails\",\"players\":\"36\"}") + # => {"team" => "rails", "players" => "36"} + def decode(json) + data = ::JSON.parse(json, quirks_mode: true) + + if ActiveSupport.parse_json_times + convert_dates_from(data) + else + data + end + end + + # Returns the class of the error that will be raised when there is an + # error in decoding JSON. Using this method means you won't directly + # depend on the ActiveSupport's JSON implementation, in case it changes + # in the future. + # + # begin + # obj = ActiveSupport::JSON.decode(some_string) + # rescue ActiveSupport::JSON.parse_error + # Rails.logger.warn("Attempted to decode invalid JSON: #{some_string}") + # end + def parse_error + ::JSON::ParserError + end + + private + + def convert_dates_from(data) + case data + when nil + nil + when DATE_REGEX + begin + Date.parse(data) + rescue ArgumentError + data + end + when DATETIME_REGEX + begin + Time.zone.parse(data) + rescue ArgumentError + data + end + when Array + data.map! { |d| convert_dates_from(d) } + when Hash + data.each do |key, value| + data[key] = convert_dates_from(value) + end + else + data + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb new file mode 100644 index 0000000..1339c75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/json/encoding.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/json" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class << self + delegate :use_standard_json_time_format, :use_standard_json_time_format=, + :time_precision, :time_precision=, + :escape_html_entities_in_json, :escape_html_entities_in_json=, + :json_encoder, :json_encoder=, + to: :'ActiveSupport::JSON::Encoding' + end + + module JSON + # Dumps objects in JSON (JavaScript Object Notation). + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.encode({ team: 'rails', players: '36' }) + # # => "{\"team\":\"rails\",\"players\":\"36\"}" + def self.encode(value, options = nil) + Encoding.json_encoder.new(options).encode(value) + end + + module Encoding #:nodoc: + class JSONGemEncoder #:nodoc: + attr_reader :options + + def initialize(options = nil) + @options = options || {} + end + + # Encode the given object into a JSON string + def encode(value) + stringify jsonify value.as_json(options.dup) + end + + private + # Rails does more escaping than the JSON gem natively does (we + # escape \u2028 and \u2029 and optionally >, <, & to work around + # certain browser problems). + ESCAPED_CHARS = { + "\u2028" => '\u2028', + "\u2029" => '\u2029', + ">" => '\u003e', + "<" => '\u003c', + "&" => '\u0026', + } + + ESCAPE_REGEX_WITH_HTML_ENTITIES = /[\u2028\u2029><&]/u + ESCAPE_REGEX_WITHOUT_HTML_ENTITIES = /[\u2028\u2029]/u + + # This class wraps all the strings we see and does the extra escaping + class EscapedString < String #:nodoc: + def to_json(*) + if Encoding.escape_html_entities_in_json + super.gsub ESCAPE_REGEX_WITH_HTML_ENTITIES, ESCAPED_CHARS + else + super.gsub ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, ESCAPED_CHARS + end + end + + def to_s + self + end + end + + # Mark these as private so we don't leak encoding-specific constructs + private_constant :ESCAPED_CHARS, :ESCAPE_REGEX_WITH_HTML_ENTITIES, + :ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, :EscapedString + + # Convert an object into a "JSON-ready" representation composed of + # primitives like Hash, Array, String, Numeric, + # and +true+/+false+/+nil+. + # Recursively calls #as_json to the object to recursively build a + # fully JSON-ready object. + # + # This allows developers to implement #as_json without having to + # worry about what base types of objects they are allowed to return + # or having to remember to call #as_json recursively. + # + # Note: the +options+ hash passed to +object.to_json+ is only passed + # to +object.as_json+, not any of this method's recursive +#as_json+ + # calls. + def jsonify(value) + case value + when String + EscapedString.new(value) + when Numeric, NilClass, TrueClass, FalseClass + value.as_json + when Hash + Hash[value.map { |k, v| [jsonify(k), jsonify(v)] }] + when Array + value.map { |v| jsonify(v) } + else + jsonify value.as_json + end + end + + # Encode a "jsonified" Ruby data structure using the JSON gem + def stringify(jsonified) + ::JSON.generate(jsonified, quirks_mode: true, max_nesting: false) + end + end + + class << self + # If true, use ISO 8601 format for dates and times. Otherwise, fall back + # to the Active Support legacy format. + attr_accessor :use_standard_json_time_format + + # If true, encode >, <, & as escaped unicode sequences (e.g. > as \u003e) + # as a safety measure. + attr_accessor :escape_html_entities_in_json + + # Sets the precision of encoded time values. + # Defaults to 3 (equivalent to millisecond precision) + attr_accessor :time_precision + + # Sets the encoder used by Rails to encode Ruby objects into JSON strings + # in +Object#to_json+ and +ActiveSupport::JSON.encode+. + attr_accessor :json_encoder + end + + self.use_standard_json_time_format = true + self.escape_html_entities_in_json = true + self.json_encoder = JSONGemEncoder + self.time_precision = 3 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb new file mode 100644 index 0000000..78f7d7c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/key_generator.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "openssl" + +module ActiveSupport + # KeyGenerator is a simple wrapper around OpenSSL's implementation of PBKDF2. + # It can be used to derive a number of keys for various purposes from a given secret. + # This lets Rails applications have a single secure secret, but avoid reusing that + # key in multiple incompatible contexts. + class KeyGenerator + def initialize(secret, options = {}) + @secret = secret + # The default iterations are higher than required for our key derivation uses + # on the off chance someone uses this for password storage + @iterations = options[:iterations] || 2**16 + end + + # Returns a derived key suitable for use. The default key_size is chosen + # to be compatible with the default settings of ActiveSupport::MessageVerifier. + # i.e. OpenSSL::Digest::SHA1#block_length + def generate_key(salt, key_size = 64) + OpenSSL::PKCS5.pbkdf2_hmac_sha1(@secret, salt, @iterations, key_size) + end + end + + # CachingKeyGenerator is a wrapper around KeyGenerator which allows users to avoid + # re-executing the key generation process when it's called using the same salt and + # key_size. + class CachingKeyGenerator + def initialize(key_generator) + @key_generator = key_generator + @cache_keys = Concurrent::Map.new + end + + # Returns a derived key suitable for use. + def generate_key(*args) + @cache_keys[args.join] ||= @key_generator.generate_key(*args) + end + end + + class LegacyKeyGenerator # :nodoc: + SECRET_MIN_LENGTH = 30 # Characters + + def initialize(secret) + ensure_secret_secure(secret) + @secret = secret + end + + def generate_key(salt) + @secret + end + + private + + # To prevent users from using something insecure like "Password" we make sure that the + # secret they've provided is at least 30 characters in length. + def ensure_secret_secure(secret) + if secret.blank? + raise ArgumentError, "A secret is required to generate an integrity hash " \ + "for cookie session data. Set a secret_key_base of at least " \ + "#{SECRET_MIN_LENGTH} characters in via `bin/rails credentials:edit`." + end + + if secret.length < SECRET_MIN_LENGTH + raise ArgumentError, "Secret should be something secure, " \ + "like \"#{SecureRandom.hex(16)}\". The value you " \ + "provided, \"#{secret}\", is shorter than the minimum length " \ + "of #{SECRET_MIN_LENGTH} characters." + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb new file mode 100644 index 0000000..dc8080c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/lazy_load_hooks.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module ActiveSupport + # lazy_load_hooks allows Rails to lazily load a lot of components and thus + # making the app boot faster. Because of this feature now there is no need to + # require ActiveRecord::Base at boot time purely to apply + # configuration. Instead a hook is registered that applies configuration once + # ActiveRecord::Base is loaded. Here ActiveRecord::Base is + # used as example but this feature can be applied elsewhere too. + # + # Here is an example where +on_load+ method is called to register a hook. + # + # initializer 'active_record.initialize_timezone' do + # ActiveSupport.on_load(:active_record) do + # self.time_zone_aware_attributes = true + # self.default_timezone = :utc + # end + # end + # + # When the entirety of +ActiveRecord::Base+ has been + # evaluated then +run_load_hooks+ is invoked. The very last line of + # +ActiveRecord::Base+ is: + # + # ActiveSupport.run_load_hooks(:active_record, ActiveRecord::Base) + module LazyLoadHooks + def self.extended(base) # :nodoc: + base.class_eval do + @load_hooks = Hash.new { |h, k| h[k] = [] } + @loaded = Hash.new { |h, k| h[k] = [] } + @run_once = Hash.new { |h, k| h[k] = [] } + end + end + + # Declares a block that will be executed when a Rails component is fully + # loaded. + # + # Options: + # + # * :yield - Yields the object that run_load_hooks to +block+. + # * :run_once - Given +block+ will run only once. + def on_load(name, options = {}, &block) + @loaded[name].each do |base| + execute_hook(name, base, options, block) + end + + @load_hooks[name] << [block, options] + end + + def run_load_hooks(name, base = Object) + @loaded[name] << base + @load_hooks[name].each do |hook, options| + execute_hook(name, base, options, hook) + end + end + + private + + def with_execution_control(name, block, once) + unless @run_once[name].include?(block) + @run_once[name] << block if once + + yield + end + end + + def execute_hook(name, base, options, block) + with_execution_control(name, block, options[:run_once]) do + if options[:yield] + block.call(base) + else + base.instance_eval(&block) + end + end + end + end + + extend LazyLoadHooks +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml new file mode 100644 index 0000000..c64b759 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/locale/en.yml @@ -0,0 +1,135 @@ +en: + date: + formats: + # Use the strftime parameters for formats. + # When no format has been given, it uses default. + # You can provide other formats here if you like! + default: "%Y-%m-%d" + short: "%b %d" + long: "%B %d, %Y" + + day_names: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday] + abbr_day_names: [Sun, Mon, Tue, Wed, Thu, Fri, Sat] + + # Don't forget the nil at the beginning; there's no such thing as a 0th month + month_names: [~, January, February, March, April, May, June, July, August, September, October, November, December] + abbr_month_names: [~, Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec] + # Used in date_select and datetime_select. + order: + - year + - month + - day + + time: + formats: + default: "%a, %d %b %Y %H:%M:%S %z" + short: "%d %b %H:%M" + long: "%B %d, %Y %H:%M" + am: "am" + pm: "pm" + +# Used in array.to_sentence. + support: + array: + words_connector: ", " + two_words_connector: " and " + last_word_connector: ", and " + number: + # Used in NumberHelper.number_to_delimited() + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: "." + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: "," + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3 + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false + # If set, the zeros after the decimal separator will always be stripped (eg.: 1.200 will be 1.2) + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_currency() + currency: + format: + # Where is the currency sign? %u is the currency unit, %n the number (default: $5.00) + format: "%u%n" + unit: "$" + # These five are to override number.format and are optional + separator: "." + delimiter: "," + precision: 2 + significant: false + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_percentage() + percentage: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + format: "%n%" + + # Used in NumberHelper.number_to_rounded() + precision: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_human_size() and NumberHelper.number_to_human() + human: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + precision: 3 + significant: true + strip_insignificant_zeros: true + # Used in number_to_human_size() + storage_units: + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u" + units: + byte: + one: "Byte" + other: "Bytes" + kb: "KB" + mb: "MB" + gb: "GB" + tb: "TB" + pb: "PB" + eb: "EB" + # Used in NumberHelper.number_to_human() + decimal_units: + format: "%n %u" + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "" + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: Thousand + million: Million + billion: Billion + trillion: Trillion + quadrillion: Quadrillion diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb new file mode 100644 index 0000000..0f7be06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/class/attribute" +require "active_support/subscriber" + +module ActiveSupport + # ActiveSupport::LogSubscriber is an object set to consume + # ActiveSupport::Notifications with the sole purpose of logging them. + # The log subscriber dispatches notifications to a registered object based + # on its given namespace. + # + # An example would be Active Record log subscriber responsible for logging + # queries: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # "#{event.payload[:name]} (#{event.duration}) #{event.payload[:sql]}" + # end + # end + # end + # + # And it's finally registered as: + # + # ActiveRecord::LogSubscriber.attach_to :active_record + # + # Since we need to know all instance methods before attaching the log + # subscriber, the line above should be called after your + # ActiveRecord::LogSubscriber definition. + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the sql method. + # + # Log subscriber also has some helpers to deal with logging and automatically + # flushes all logs when the request finishes (via action_dispatch.callback + # notification) in a Rails environment. + class LogSubscriber < Subscriber + # Embed in a String to clear all previous ANSI sequences. + CLEAR = "\e[0m" + BOLD = "\e[1m" + + # Colors + BLACK = "\e[30m" + RED = "\e[31m" + GREEN = "\e[32m" + YELLOW = "\e[33m" + BLUE = "\e[34m" + MAGENTA = "\e[35m" + CYAN = "\e[36m" + WHITE = "\e[37m" + + mattr_accessor :colorize_logging, default: true + + class << self + def logger + @logger ||= if defined?(Rails) && Rails.respond_to?(:logger) + Rails.logger + end + end + + attr_writer :logger + + def log_subscribers + subscribers + end + + # Flush all log_subscribers' logger. + def flush_all! + logger.flush if logger.respond_to?(:flush) + end + end + + def logger + LogSubscriber.logger + end + + def start(name, id, payload) + super if logger + end + + def finish(name, id, payload) + super if logger + rescue => e + if logger + logger.error "Could not log #{name.inspect} event. #{e.class}: #{e.message} #{e.backtrace}" + end + end + + private + + %w(info debug warn error fatal unknown).each do |level| + class_eval <<-METHOD, __FILE__, __LINE__ + 1 + def #{level}(progname = nil, &block) + logger.#{level}(progname, &block) if logger + end + METHOD + end + + # Set color by using a symbol or one of the defined constants. If a third + # option is set to +true+, it also adds bold to the string. This is based + # on the Highline implementation and will automatically append CLEAR to the + # end of the returned String. + def color(text, color, bold = false) # :doc: + return text unless colorize_logging + color = self.class.const_get(color.upcase) if color.is_a?(Symbol) + bold = bold ? BOLD : "" + "#{bold}#{color}#{text}#{CLEAR}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb new file mode 100644 index 0000000..3f19ef5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/log_subscriber/test_helper.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "active_support/log_subscriber" +require "active_support/logger" +require "active_support/notifications" + +module ActiveSupport + class LogSubscriber + # Provides some helpers to deal with testing log subscribers by setting up + # notifications. Take for instance Active Record subscriber tests: + # + # class SyncLogSubscriberTest < ActiveSupport::TestCase + # include ActiveSupport::LogSubscriber::TestHelper + # + # setup do + # ActiveRecord::LogSubscriber.attach_to(:active_record) + # end + # + # def test_basic_query_logging + # Developer.all.to_a + # wait + # assert_equal 1, @logger.logged(:debug).size + # assert_match(/Developer Load/, @logger.logged(:debug).last) + # assert_match(/SELECT \* FROM "developers"/, @logger.logged(:debug).last) + # end + # end + # + # All you need to do is to ensure that your log subscriber is added to + # Rails::Subscriber, as in the second line of the code above. The test + # helpers are responsible for setting up the queue, subscriptions and + # turning colors in logs off. + # + # The messages are available in the @logger instance, which is a logger with + # limited powers (it actually does not send anything to your output), and + # you can collect them doing @logger.logged(level), where level is the level + # used in logging, like info, debug, warn and so on. + module TestHelper + def setup # :nodoc: + @logger = MockLogger.new + @notifier = ActiveSupport::Notifications::Fanout.new + + ActiveSupport::LogSubscriber.colorize_logging = false + + @old_notifier = ActiveSupport::Notifications.notifier + set_logger(@logger) + ActiveSupport::Notifications.notifier = @notifier + end + + def teardown # :nodoc: + set_logger(nil) + ActiveSupport::Notifications.notifier = @old_notifier + end + + class MockLogger + include ActiveSupport::Logger::Severity + + attr_reader :flush_count + attr_accessor :level + + def initialize(level = DEBUG) + @flush_count = 0 + @level = level + @logged = Hash.new { |h, k| h[k] = [] } + end + + def method_missing(level, message = nil) + if block_given? + @logged[level] << yield + else + @logged[level] << message + end + end + + def logged(level) + @logged[level].compact.map { |l| l.to_s.strip } + end + + def flush + @flush_count += 1 + end + + ActiveSupport::Logger::Severity.constants.each do |severity| + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{severity.downcase}? + #{severity} >= @level + end + EOT + end + end + + # Wait notifications to be published. + def wait + @notifier.wait + end + + # Overwrite if you use another logger in your log subscriber. + # + # def logger + # ActiveRecord::Base.logger = @logger + # end + def set_logger(logger) + ActiveSupport::LogSubscriber.logger = logger + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb new file mode 100644 index 0000000..8152a18 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +require "active_support/logger_silence" +require "active_support/logger_thread_safe_level" +require "logger" + +module ActiveSupport + class Logger < ::Logger + include ActiveSupport::LoggerThreadSafeLevel + include LoggerSilence + + # Returns true if the logger destination matches one of the sources + # + # logger = Logger.new(STDOUT) + # ActiveSupport::Logger.logger_outputs_to?(logger, STDOUT) + # # => true + def self.logger_outputs_to?(logger, *sources) + logdev = logger.instance_variable_get("@logdev") + logger_source = logdev.dev if logdev.respond_to?(:dev) + sources.any? { |source| source == logger_source } + end + + # Broadcasts logs to multiple loggers. + def self.broadcast(logger) # :nodoc: + Module.new do + define_method(:add) do |*args, &block| + logger.add(*args, &block) + super(*args, &block) + end + + define_method(:<<) do |x| + logger << x + super(x) + end + + define_method(:close) do + logger.close + super() + end + + define_method(:progname=) do |name| + logger.progname = name + super(name) + end + + define_method(:formatter=) do |formatter| + logger.formatter = formatter + super(formatter) + end + + define_method(:level=) do |level| + logger.level = level + super(level) + end + + define_method(:local_level=) do |level| + logger.local_level = level if logger.respond_to?(:local_level=) + super(level) if respond_to?(:local_level=) + end + + define_method(:silence) do |level = Logger::ERROR, &block| + if logger.respond_to?(:silence) + logger.silence(level) do + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + else + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + end + end + end + + def initialize(*args) + super + @formatter = SimpleFormatter.new + after_initialize if respond_to? :after_initialize + end + + def add(severity, message = nil, progname = nil, &block) + return true if @logdev.nil? || (severity || UNKNOWN) < level + super + end + + Logger::Severity.constants.each do |severity| + class_eval(<<-EOT, __FILE__, __LINE__ + 1) + def #{severity.downcase}? # def debug? + Logger::#{severity} >= level # DEBUG >= level + end # end + EOT + end + + # Simple formatter which only displays the message. + class SimpleFormatter < ::Logger::Formatter + # This method is invoked when a log event occurs + def call(severity, timestamp, progname, msg) + "#{String === msg ? msg : msg.inspect}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb new file mode 100644 index 0000000..89f32b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_silence.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "concurrent" + +module LoggerSilence + extend ActiveSupport::Concern + + included do + cattr_accessor :silencer, default: true + end + + # Silences the logger for the duration of the block. + def silence(temporary_level = Logger::ERROR) + if silencer + begin + old_local_level = local_level + self.local_level = temporary_level + + yield self + ensure + self.local_level = old_local_level + end + else + yield self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb new file mode 100644 index 0000000..52295d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/logger_thread_safe_level.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "fiber" + +module ActiveSupport + module LoggerThreadSafeLevel # :nodoc: + extend ActiveSupport::Concern + + def after_initialize + @local_levels = Concurrent::Map.new(initial_capacity: 2) + end + + def local_log_id + Fiber.current.__id__ + end + + def local_level + @local_levels[local_log_id] + end + + def local_level=(level) + if level + @local_levels[local_log_id] = level + else + @local_levels.delete(local_log_id) + end + end + + def level + local_level || super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb new file mode 100644 index 0000000..8b73270 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_encryptor.rb @@ -0,0 +1,229 @@ +# frozen_string_literal: true + +require "openssl" +require "base64" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/message_verifier" +require "active_support/messages/metadata" + +module ActiveSupport + # MessageEncryptor is a simple way to encrypt values which get stored + # somewhere you don't trust. + # + # The cipher text and initialization vector are base64 encoded and returned + # to you. + # + # This can be used in situations similar to the MessageVerifier, but + # where you don't want users to be able to determine the value of the payload. + # + # len = ActiveSupport::MessageEncryptor.key_len + # salt = SecureRandom.random_bytes(len) + # key = ActiveSupport::KeyGenerator.new('password').generate_key(salt, len) # => "\x89\xE0\x156\xAC..." + # crypt = ActiveSupport::MessageEncryptor.new(key) # => # + # encrypted_data = crypt.encrypt_and_sign('my secret data') # => "NlFBTTMwOUV5UlA1QlNEN2xkY2d6eThYWWh..." + # crypt.decrypt_and_verify(encrypted_data) # => "my secret data" + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = crypt.encrypt_and_sign("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # crypt.decrypt_and_verify(token, purpose: :login) # => "this is the chair" + # crypt.decrypt_and_verify(token, purpose: :shipping) # => nil + # crypt.decrypt_and_verify(token) # => nil + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = crypt.encrypt_and_sign("the conversation is lively") + # crypt.decrypt_and_verify(token, purpose: :scare_tactics) # => nil + # crypt.decrypt_and_verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # crypt.encrypt_and_sign(parcel, expires_in: 1.month) + # crypt.encrypt_and_sign(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned upto the expire time. + # Thereafter, verifying returns +nil+. + # + # === Rotating keys + # + # MessageEncryptor also supports rotating out old configurations by falling + # back to a stack of encryptors. Call +rotate+ to build and add an encryptor + # so +decrypt_and_verify+ will also try the fallback. + # + # By default any rotated encryptors use the values of the primary + # encryptor unless specified otherwise. + # + # You'd give your encryptor the new defaults: + # + # crypt = ActiveSupport::MessageEncryptor.new(@secret, cipher: "aes-256-gcm") + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # crypt.rotate old_secret # Fallback to an old secret instead of @secret. + # crypt.rotate cipher: "aes-256-cbc" # Fallback to an old cipher instead of aes-256-gcm. + # + # Though if both the secret and the cipher was changed at the same time, + # the above should be combined into: + # + # crypt.rotate old_secret, cipher: "aes-256-cbc" + class MessageEncryptor + prepend Messages::Rotator::Encryptor + + cattr_accessor :use_authenticated_message_encryption, instance_accessor: false, default: false + + class << self + def default_cipher #:nodoc: + if use_authenticated_message_encryption + "aes-256-gcm" + else + "aes-256-cbc" + end + end + end + + module NullSerializer #:nodoc: + def self.load(value) + value + end + + def self.dump(value) + value + end + end + + module NullVerifier #:nodoc: + def self.verify(value) + value + end + + def self.generate(value) + value + end + end + + class InvalidMessage < StandardError; end + OpenSSLCipherError = OpenSSL::Cipher::CipherError + + # Initialize a new MessageEncryptor. +secret+ must be at least as long as + # the cipher key size. For the default 'aes-256-gcm' cipher, this is 256 + # bits. If you are using a user-entered secret, you can generate a suitable + # key by using ActiveSupport::KeyGenerator or a similar key + # derivation function. + # + # First additional parameter is used as the signature key for +MessageVerifier+. + # This allows you to specify keys to encrypt and sign data. + # + # ActiveSupport::MessageEncryptor.new('secret', 'signature_secret') + # + # Options: + # * :cipher - Cipher to use. Can be any cipher returned by + # OpenSSL::Cipher.ciphers. Default is 'aes-256-gcm'. + # * :digest - String of digest to use for signing. Default is + # +SHA1+. Ignored when using an AEAD cipher like 'aes-256-gcm'. + # * :serializer - Object serializer to use. Default is +Marshal+. + def initialize(secret, *signature_key_or_options) + options = signature_key_or_options.extract_options! + sign_secret = signature_key_or_options.first + @secret = secret + @sign_secret = sign_secret + @cipher = options[:cipher] || self.class.default_cipher + @digest = options[:digest] || "SHA1" unless aead_mode? + @verifier = resolve_verifier + @serializer = options[:serializer] || Marshal + end + + # Encrypt and sign a message. We need to sign the message in order to avoid + # padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def encrypt_and_sign(value, expires_at: nil, expires_in: nil, purpose: nil) + verifier.generate(_encrypt(value, expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + end + + # Decrypt and verify a message. We need to verify the message in order to + # avoid padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def decrypt_and_verify(data, purpose: nil, **) + _decrypt(verifier.verify(data), purpose) + end + + # Given a cipher, returns the key length of the cipher to help generate the key of desired size + def self.key_len(cipher = default_cipher) + OpenSSL::Cipher.new(cipher).key_len + end + + private + def _encrypt(value, **metadata_options) + cipher = new_cipher + cipher.encrypt + cipher.key = @secret + + # Rely on OpenSSL for the initialization vector + iv = cipher.random_iv + cipher.auth_data = "" if aead_mode? + + encrypted_data = cipher.update(Messages::Metadata.wrap(@serializer.dump(value), metadata_options)) + encrypted_data << cipher.final + + blob = "#{::Base64.strict_encode64 encrypted_data}--#{::Base64.strict_encode64 iv}" + blob = "#{blob}--#{::Base64.strict_encode64 cipher.auth_tag}" if aead_mode? + blob + end + + def _decrypt(encrypted_message, purpose) + cipher = new_cipher + encrypted_data, iv, auth_tag = encrypted_message.split("--".freeze).map { |v| ::Base64.strict_decode64(v) } + + # Currently the OpenSSL bindings do not raise an error if auth_tag is + # truncated, which would allow an attacker to easily forge it. See + # https://github.com/ruby/openssl/issues/63 + raise InvalidMessage if aead_mode? && (auth_tag.nil? || auth_tag.bytes.length != 16) + + cipher.decrypt + cipher.key = @secret + cipher.iv = iv + if aead_mode? + cipher.auth_tag = auth_tag + cipher.auth_data = "" + end + + decrypted_data = cipher.update(encrypted_data) + decrypted_data << cipher.final + + message = Messages::Metadata.verify(decrypted_data, purpose) + @serializer.load(message) if message + rescue OpenSSLCipherError, TypeError, ArgumentError + raise InvalidMessage + end + + def new_cipher + OpenSSL::Cipher.new(@cipher) + end + + def verifier + @verifier + end + + def aead_mode? + @aead_mode ||= new_cipher.authenticated? + end + + def resolve_verifier + if aead_mode? + NullVerifier + else + MessageVerifier.new(@sign_secret || @secret, digest: @digest, serializer: NullSerializer) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb new file mode 100644 index 0000000..83c39c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/message_verifier.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: true + +require "base64" +require "active_support/core_ext/object/blank" +require "active_support/security_utils" +require "active_support/messages/metadata" +require "active_support/messages/rotator" + +module ActiveSupport + # +MessageVerifier+ makes it easy to generate and verify messages which are + # signed to prevent tampering. + # + # This is useful for cases like remember-me tokens and auto-unsubscribe links + # where the session store isn't suitable or available. + # + # Remember Me: + # cookies[:remember_me] = @verifier.generate([@user.id, 2.weeks.from_now]) + # + # In the authentication filter: + # + # id, time = @verifier.verify(cookies[:remember_me]) + # if Time.now < time + # self.current_user = User.find(id) + # end + # + # By default it uses Marshal to serialize the message. If you want to use + # another serialization method, you can set the serializer in the options + # hash upon initialization: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', serializer: YAML) + # + # +MessageVerifier+ creates HMAC signatures using SHA1 hash algorithm by default. + # If you want to use a different hash algorithm, you can change it by providing + # +:digest+ key as an option while initializing the verifier: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', digest: 'SHA256') + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = @verifier.generate("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # @verifier.verified(token, purpose: :login) # => "this is the chair" + # @verifier.verified(token, purpose: :shipping) # => nil + # @verifier.verified(token) # => nil + # + # @verifier.verify(token, purpose: :login) # => "this is the chair" + # @verifier.verify(token, purpose: :shipping) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => ActiveSupport::MessageVerifier::InvalidSignature + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = @verifier.generate("the conversation is lively") + # @verifier.verified(token, purpose: :scare_tactics) # => nil + # @verifier.verified(token) # => "the conversation is lively" + # + # @verifier.verify(token, purpose: :scare_tactics) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # @verifier.generate(parcel, expires_in: 1.month) + # @verifier.generate(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned upto the expire time. + # Thereafter, the +verified+ method returns +nil+ while +verify+ raises + # ActiveSupport::MessageVerifier::InvalidSignature. + # + # === Rotating keys + # + # MessageVerifier also supports rotating out old configurations by falling + # back to a stack of verifiers. Call +rotate+ to build and add a verifier to + # so either +verified+ or +verify+ will also try verifying with the fallback. + # + # By default any rotated verifiers use the values of the primary + # verifier unless specified otherwise. + # + # You'd give your verifier the new defaults: + # + # verifier = ActiveSupport::MessageVerifier.new(@secret, digest: "SHA512", serializer: JSON) + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # verifier.rotate old_secret # Fallback to an old secret instead of @secret. + # verifier.rotate digest: "SHA256" # Fallback to an old digest instead of SHA512. + # verifier.rotate serializer: Marshal # Fallback to an old serializer instead of JSON. + # + # Though the above would most likely be combined into one rotation: + # + # verifier.rotate old_secret, digest: "SHA256", serializer: Marshal + class MessageVerifier + prepend Messages::Rotator::Verifier + + class InvalidSignature < StandardError; end + + def initialize(secret, options = {}) + raise ArgumentError, "Secret should not be nil." unless secret + @secret = secret + @digest = options[:digest] || "SHA1" + @serializer = options[:serializer] || Marshal + end + + # Checks if a signed message could have been generated by signing an object + # with the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # verifier.valid_message?(signed_message) # => true + # + # tampered_message = signed_message.chop # editing the message invalidates the signature + # verifier.valid_message?(tampered_message) # => false + def valid_message?(signed_message) + return if signed_message.nil? || !signed_message.valid_encoding? || signed_message.blank? + + data, digest = signed_message.split("--".freeze) + data.present? && digest.present? && ActiveSupport::SecurityUtils.secure_compare(digest, generate_digest(data)) + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # + # signed_message = verifier.generate 'a private message' + # verifier.verified(signed_message) # => 'a private message' + # + # Returns +nil+ if the message was not signed with the same secret. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verified(signed_message) # => nil + # + # Returns +nil+ if the message is not Base64-encoded. + # + # invalid_message = "f--46a0120593880c733a53b6dad75b42ddc1c8996d" + # verifier.verified(invalid_message) # => nil + # + # Raises any error raised while decoding the signed message. + # + # incompatible_message = "test--dad7b06c94abba8d46a15fafaef56c327665d5ff" + # verifier.verified(incompatible_message) # => TypeError: incompatible marshal file format + def verified(signed_message, purpose: nil, **) + if valid_message?(signed_message) + begin + data = signed_message.split("--".freeze)[0] + message = Messages::Metadata.verify(decode(data), purpose) + @serializer.load(message) if message + rescue ArgumentError => argument_error + return if argument_error.message.include?("invalid base64") + raise + end + end + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # + # verifier.verify(signed_message) # => 'a private message' + # + # Raises +InvalidSignature+ if the message was not signed with the same + # secret or was not Base64-encoded. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verify(signed_message) # => ActiveSupport::MessageVerifier::InvalidSignature + def verify(*args) + verified(*args) || raise(InvalidSignature) + end + + # Generates a signed message for the provided value. + # + # The message is signed with the +MessageVerifier+'s secret. Without knowing + # the secret, the original value cannot be extracted from the message. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # verifier.generate 'a private message' # => "BAhJIhRwcml2YXRlLW1lc3NhZ2UGOgZFVA==--e2d724331ebdee96a10fb99b089508d1c72bd772" + def generate(value, expires_at: nil, expires_in: nil, purpose: nil) + data = encode(Messages::Metadata.wrap(@serializer.dump(value), expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + "#{data}--#{generate_digest(data)}" + end + + private + def encode(data) + ::Base64.strict_encode64(data) + end + + def decode(data) + ::Base64.strict_decode64(data) + end + + def generate_digest(data) + require "openssl" unless defined?(OpenSSL) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.const_get(@digest).new, @secret, data) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb new file mode 100644 index 0000000..e97caac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/metadata.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require "time" + +module ActiveSupport + module Messages #:nodoc: + class Metadata #:nodoc: + def initialize(message, expires_at = nil, purpose = nil) + @message, @expires_at, @purpose = message, expires_at, purpose + end + + def as_json(options = {}) + { _rails: { message: @message, exp: @expires_at, pur: @purpose } } + end + + class << self + def wrap(message, expires_at: nil, expires_in: nil, purpose: nil) + if expires_at || expires_in || purpose + JSON.encode new(encode(message), pick_expiry(expires_at, expires_in), purpose) + else + message + end + end + + def verify(message, purpose) + extract_metadata(message).verify(purpose) + end + + private + def pick_expiry(expires_at, expires_in) + if expires_at + expires_at.utc.iso8601(3) + elsif expires_in + Time.now.utc.advance(seconds: expires_in).iso8601(3) + end + end + + def extract_metadata(message) + data = JSON.decode(message) rescue nil + + if data.is_a?(Hash) && data.key?("_rails") + new(decode(data["_rails"]["message"]), data["_rails"]["exp"], data["_rails"]["pur"]) + else + new(message) + end + end + + def encode(message) + ::Base64.strict_encode64(message) + end + + def decode(message) + ::Base64.strict_decode64(message) + end + end + + def verify(purpose) + @message if match?(purpose) && fresh? + end + + private + def match?(purpose) + @purpose.to_s == purpose.to_s + end + + def fresh? + @expires_at.nil? || Time.now.utc < Time.iso8601(@expires_at) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb new file mode 100644 index 0000000..bd50d6d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotation_configuration.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + class RotationConfiguration # :nodoc: + attr_reader :signed, :encrypted + + def initialize + @signed, @encrypted = [], [] + end + + def rotate(kind, *args) + case kind + when :signed + @signed << args + when :encrypted + @encrypted << args + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb new file mode 100644 index 0000000..823a399 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/messages/rotator.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + module Rotator # :nodoc: + def initialize(*, **options) + super + + @options = options + @rotations = [] + end + + def rotate(*secrets, **options) + @rotations << build_rotation(*secrets, @options.merge(options)) + end + + module Encryptor + include Rotator + + def decrypt_and_verify(*args, on_rotation: nil, **options) + super + rescue MessageEncryptor::InvalidMessage, MessageVerifier::InvalidSignature + run_rotations(on_rotation) { |encryptor| encryptor.decrypt_and_verify(*args, options) } || raise + end + + private + def build_rotation(secret = @secret, sign_secret = @sign_secret, options) + self.class.new(secret, sign_secret, options) + end + end + + module Verifier + include Rotator + + def verified(*args, on_rotation: nil, **options) + super || run_rotations(on_rotation) { |verifier| verifier.verified(*args, options) } + end + + private + def build_rotation(secret = @secret, options) + self.class.new(secret, options) + end + end + + private + def run_rotations(on_rotation) + @rotations.find do |rotation| + if message = yield(rotation) rescue next + on_rotation.call if on_rotation + return message + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb new file mode 100644 index 0000000..3fe3a05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport #:nodoc: + module Multibyte + autoload :Chars, "active_support/multibyte/chars" + autoload :Unicode, "active_support/multibyte/unicode" + + # The proxy class returned when calling mb_chars. You can use this accessor + # to configure your own proxy class so you can support other encodings. See + # the ActiveSupport::Multibyte::Chars implementation for an example how to + # do this. + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + def self.proxy_class=(klass) + @proxy_class = klass + end + + # Returns the current proxy class. + def self.proxy_class + @proxy_class ||= ActiveSupport::Multibyte::Chars + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb new file mode 100644 index 0000000..8152b8f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/chars.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +require "active_support/json" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/regexp" + +module ActiveSupport #:nodoc: + module Multibyte #:nodoc: + # Chars enables you to work transparently with UTF-8 encoding in the Ruby + # String class without having extensive knowledge about the encoding. A + # Chars object accepts a string upon initialization and proxies String + # methods in an encoding safe manner. All the normal String methods are also + # implemented on the proxy. + # + # String methods are proxied through the Chars object, and can be accessed + # through the +mb_chars+ method. Methods which would normally return a + # String object now return a Chars object so methods can be chained. + # + # 'The Perfect String '.mb_chars.downcase.strip.normalize + # # => # + # + # Chars objects are perfectly interchangeable with String objects as long as + # no explicit class checks are made. If certain methods do explicitly check + # the class, call +to_s+ before you pass chars objects to them. + # + # bad.explicit_checking_method 'T'.mb_chars.downcase.to_s + # + # The default Chars implementation assumes that the encoding of the string + # is UTF-8, if you want to handle different encodings you can write your own + # multibyte string handler and configure it through + # ActiveSupport::Multibyte.proxy_class. + # + # class CharsForUTF32 + # def size + # @wrapped_string.size / 4 + # end + # + # def self.accepts?(string) + # string.length % 4 == 0 + # end + # end + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + class Chars + include Comparable + attr_reader :wrapped_string + alias to_s wrapped_string + alias to_str wrapped_string + + delegate :<=>, :=~, :acts_like_string?, to: :wrapped_string + + # Creates a new Chars instance by wrapping _string_. + def initialize(string) + @wrapped_string = string + @wrapped_string.force_encoding(Encoding::UTF_8) unless @wrapped_string.frozen? + end + + # Forward all undefined methods to the wrapped string. + def method_missing(method, *args, &block) + result = @wrapped_string.__send__(method, *args, &block) + if /!$/.match?(method) + self if result + else + result.kind_of?(String) ? chars(result) : result + end + end + + # Returns +true+ if _obj_ responds to the given method. Private methods + # are included in the search only if the optional second parameter + # evaluates to +true+. + def respond_to_missing?(method, include_private) + @wrapped_string.respond_to?(method, include_private) + end + + # Returns +true+ when the proxy class can handle the string. Returns + # +false+ otherwise. + def self.consumes?(string) + string.encoding == Encoding::UTF_8 + end + + # Works just like String#split, with the exception that the items + # in the resulting list are Chars instances instead of String. This makes + # chaining methods easier. + # + # 'Café périferôl'.mb_chars.split(/é/).map { |part| part.upcase.to_s } # => ["CAF", " P", "RIFERÔL"] + def split(*args) + @wrapped_string.split(*args).map { |i| self.class.new(i) } + end + + # Works like String#slice!, but returns an instance of + # Chars, or +nil+ if the string was not modified. The string will not be + # modified if the range given is out of bounds + # + # string = 'Welcome' + # string.mb_chars.slice!(3) # => # + # string # => 'Welome' + # string.mb_chars.slice!(0..3) # => # + # string # => 'me' + def slice!(*args) + string_sliced = @wrapped_string.slice!(*args) + if string_sliced + chars(string_sliced) + end + end + + # Reverses all characters in the string. + # + # 'Café'.mb_chars.reverse.to_s # => 'éfaC' + def reverse + chars(Unicode.unpack_graphemes(@wrapped_string).reverse.flatten.pack("U*")) + end + + # Limits the byte size of the string to a number of bytes without breaking + # characters. Usable when the storage for a string is limited for some + # reason. + # + # 'こんにちは'.mb_chars.limit(7).to_s # => "こん" + def limit(limit) + slice(0...translate_offset(limit)) + end + + # Converts characters in the string to uppercase. + # + # 'Laurent, où sont les tests ?'.mb_chars.upcase.to_s # => "LAURENT, OÙ SONT LES TESTS ?" + def upcase + chars Unicode.upcase(@wrapped_string) + end + + # Converts characters in the string to lowercase. + # + # 'VĚDA A VÝZKUM'.mb_chars.downcase.to_s # => "věda a výzkum" + def downcase + chars Unicode.downcase(@wrapped_string) + end + + # Converts characters in the string to the opposite case. + # + # 'El Cañón'.mb_chars.swapcase.to_s # => "eL cAÑÓN" + def swapcase + chars Unicode.swapcase(@wrapped_string) + end + + # Converts the first character to uppercase and the remainder to lowercase. + # + # 'über'.mb_chars.capitalize.to_s # => "Über" + def capitalize + (slice(0) || chars("")).upcase + (slice(1..-1) || chars("")).downcase + end + + # Capitalizes the first letter of every word, when possible. + # + # "ÉL QUE SE ENTERÓ".mb_chars.titleize.to_s # => "Él Que Se Enteró" + # "日本語".mb_chars.titleize.to_s # => "日本語" + def titleize + chars(downcase.to_s.gsub(/\b('?\S)/u) { Unicode.upcase($1) }) + end + alias_method :titlecase, :titleize + + # Returns the KC normalization of the string by default. NFKC is + # considered the best normalization form for passing strings to databases + # and validations. + # + # * form - The form you want to normalize in. Should be one of the following: + # :c, :kc, :d, or :kd. Default is + # ActiveSupport::Multibyte::Unicode.default_normalization_form + def normalize(form = nil) + chars(Unicode.normalize(@wrapped_string, form)) + end + + # Performs canonical decomposition on all the characters. + # + # 'é'.length # => 2 + # 'é'.mb_chars.decompose.to_s.length # => 3 + def decompose + chars(Unicode.decompose(:canonical, @wrapped_string.codepoints.to_a).pack("U*")) + end + + # Performs composition on all the characters. + # + # 'é'.length # => 3 + # 'é'.mb_chars.compose.to_s.length # => 2 + def compose + chars(Unicode.compose(@wrapped_string.codepoints.to_a).pack("U*")) + end + + # Returns the number of grapheme clusters in the string. + # + # 'क्षि'.mb_chars.length # => 4 + # 'क्षि'.mb_chars.grapheme_length # => 3 + def grapheme_length + Unicode.unpack_graphemes(@wrapped_string).length + end + + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(force = false) + chars(Unicode.tidy_bytes(@wrapped_string, force)) + end + + def as_json(options = nil) #:nodoc: + to_s.as_json(options) + end + + %w(capitalize downcase reverse tidy_bytes upcase).each do |method| + define_method("#{method}!") do |*args| + @wrapped_string = send(method, *args).to_s + self + end + end + + private + + def translate_offset(byte_offset) + return nil if byte_offset.nil? + return 0 if @wrapped_string == "" + + begin + @wrapped_string.byteslice(0...byte_offset).unpack("U*").length + rescue ArgumentError + byte_offset -= 1 + retry + end + end + + def chars(string) + self.class.new(string) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb new file mode 100644 index 0000000..f923061 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/multibyte/unicode.rb @@ -0,0 +1,394 @@ +# frozen_string_literal: true + +module ActiveSupport + module Multibyte + module Unicode + extend self + + # A list of all available normalization forms. + # See http://www.unicode.org/reports/tr15/tr15-29.html for more + # information about normalization. + NORMALIZATION_FORMS = [:c, :kc, :d, :kd] + + # The Unicode version that is supported by the implementation + UNICODE_VERSION = "9.0.0" + + # The default normalization used for operations that require + # normalization. It can be set to any of the normalizations + # in NORMALIZATION_FORMS. + # + # ActiveSupport::Multibyte::Unicode.default_normalization_form = :c + attr_accessor :default_normalization_form + @default_normalization_form = :kc + + # Hangul character boundaries and properties + HANGUL_SBASE = 0xAC00 + HANGUL_LBASE = 0x1100 + HANGUL_VBASE = 0x1161 + HANGUL_TBASE = 0x11A7 + HANGUL_LCOUNT = 19 + HANGUL_VCOUNT = 21 + HANGUL_TCOUNT = 28 + HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT + HANGUL_SCOUNT = 11172 + HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT + + # Detect whether the codepoint is in a certain character class. Returns + # +true+ when it's in the specified character class and +false+ otherwise. + # Valid character classes are: :cr, :lf, :l, + # :v, :lv, :lvt and :t. + # + # Primarily used by the grapheme cluster support. + def in_char_class?(codepoint, classes) + classes.detect { |c| database.boundary[c] === codepoint } ? true : false + end + + # Unpack the string at grapheme boundaries. Returns a list of character + # lists. + # + # Unicode.unpack_graphemes('क्षि') # => [[2325, 2381], [2359], [2367]] + # Unicode.unpack_graphemes('Café') # => [[67], [97], [102], [233]] + def unpack_graphemes(string) + codepoints = string.codepoints.to_a + unpacked = [] + pos = 0 + marker = 0 + eoc = codepoints.length + while (pos < eoc) + pos += 1 + previous = codepoints[pos - 1] + current = codepoints[pos] + + # See http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundary_Rules + should_break = + if pos == eoc + true + # GB3. CR X LF + elsif previous == database.boundary[:cr] && current == database.boundary[:lf] + false + # GB4. (Control|CR|LF) ÷ + elsif previous && in_char_class?(previous, [:control, :cr, :lf]) + true + # GB5. ÷ (Control|CR|LF) + elsif in_char_class?(current, [:control, :cr, :lf]) + true + # GB6. L X (L|V|LV|LVT) + elsif database.boundary[:l] === previous && in_char_class?(current, [:l, :v, :lv, :lvt]) + false + # GB7. (LV|V) X (V|T) + elsif in_char_class?(previous, [:lv, :v]) && in_char_class?(current, [:v, :t]) + false + # GB8. (LVT|T) X (T) + elsif in_char_class?(previous, [:lvt, :t]) && database.boundary[:t] === current + false + # GB9. X (Extend | ZWJ) + elsif in_char_class?(current, [:extend, :zwj]) + false + # GB9a. X SpacingMark + elsif database.boundary[:spacingmark] === current + false + # GB9b. Prepend X + elsif database.boundary[:prepend] === previous + false + # GB10. (E_Base | EBG) Extend* X E_Modifier + elsif (marker...pos).any? { |i| in_char_class?(codepoints[i], [:e_base, :e_base_gaz]) && codepoints[i + 1...pos].all? { |c| database.boundary[:extend] === c } } && database.boundary[:e_modifier] === current + false + # GB11. ZWJ X (Glue_After_Zwj | EBG) + elsif database.boundary[:zwj] === previous && in_char_class?(current, [:glue_after_zwj, :e_base_gaz]) + false + # GB12. ^ (RI RI)* RI X RI + # GB13. [^RI] (RI RI)* RI X RI + elsif codepoints[marker..pos].all? { |c| database.boundary[:regional_indicator] === c } && codepoints[marker..pos].count { |c| database.boundary[:regional_indicator] === c }.even? + false + # GB999. Any ÷ Any + else + true + end + + if should_break + unpacked << codepoints[marker..pos - 1] + marker = pos + end + end + unpacked + end + + # Reverse operation of unpack_graphemes. + # + # Unicode.pack_graphemes(Unicode.unpack_graphemes('क्षि')) # => 'क्षि' + def pack_graphemes(unpacked) + unpacked.flatten.pack("U*") + end + + # Re-order codepoints so the string becomes canonical. + def reorder_characters(codepoints) + length = codepoints.length - 1 + pos = 0 + while pos < length do + cp1, cp2 = database.codepoints[codepoints[pos]], database.codepoints[codepoints[pos + 1]] + if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0) + codepoints[pos..pos + 1] = cp2.code, cp1.code + pos += (pos > 0 ? -1 : 1) + else + pos += 1 + end + end + codepoints + end + + # Decompose composed characters to the decomposed form. + def decompose(type, codepoints) + codepoints.inject([]) do |decomposed, cp| + # if it's a hangul syllable starter character + if HANGUL_SBASE <= cp && cp < HANGUL_SLAST + sindex = cp - HANGUL_SBASE + ncp = [] # new codepoints + ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT + ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT + tindex = sindex % HANGUL_TCOUNT + ncp << (HANGUL_TBASE + tindex) unless tindex == 0 + decomposed.concat ncp + # if the codepoint is decomposable in with the current decomposition type + elsif (ncp = database.codepoints[cp].decomp_mapping) && (!database.codepoints[cp].decomp_type || type == :compatibility) + decomposed.concat decompose(type, ncp.dup) + else + decomposed << cp + end + end + end + + # Compose decomposed characters to the composed form. + def compose(codepoints) + pos = 0 + eoa = codepoints.length - 1 + starter_pos = 0 + starter_char = codepoints[0] + previous_combining_class = -1 + while pos < eoa + pos += 1 + lindex = starter_char - HANGUL_LBASE + # -- Hangul + if 0 <= lindex && lindex < HANGUL_LCOUNT + vindex = codepoints[starter_pos + 1] - HANGUL_VBASE rescue vindex = -1 + if 0 <= vindex && vindex < HANGUL_VCOUNT + tindex = codepoints[starter_pos + 2] - HANGUL_TBASE rescue tindex = -1 + if 0 <= tindex && tindex < HANGUL_TCOUNT + j = starter_pos + 2 + eoa -= 2 + else + tindex = 0 + j = starter_pos + 1 + eoa -= 1 + end + codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE + end + starter_pos += 1 + starter_char = codepoints[starter_pos] + # -- Other characters + else + current_char = codepoints[pos] + current = database.codepoints[current_char] + if current.combining_class > previous_combining_class + if ref = database.composition_map[starter_char] + composition = ref[current_char] + else + composition = nil + end + unless composition.nil? + codepoints[starter_pos] = composition + starter_char = composition + codepoints.delete_at pos + eoa -= 1 + pos -= 1 + previous_combining_class = -1 + else + previous_combining_class = current.combining_class + end + else + previous_combining_class = current.combining_class + end + if current.combining_class == 0 + starter_pos = pos + starter_char = codepoints[pos] + end + end + end + codepoints + end + + # Rubinius' String#scrub, however, doesn't support ASCII-incompatible chars. + if !defined?(Rubinius) + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + string.scrub { |bad| recode_windows1252_chars(bad) } + end + else + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + + # We can't transcode to the same format, so we choose a nearly-identical encoding. + # We're going to 'transcode' bytes from UTF-8 when possible, then fall back to + # CP1252 when we get errors. The final string will be 'converted' back to UTF-8 + # before returning. + reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_16LE) + + source = string.dup + out = "".force_encoding(Encoding::UTF_16LE) + + loop do + reader.primitive_convert(source, out) + _, _, _, error_bytes, _ = reader.primitive_errinfo + break if error_bytes.nil? + out << error_bytes.encode(Encoding::UTF_16LE, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + reader.finish + + out.encode!(Encoding::UTF_8) + end + end + + # Returns the KC normalization of the string by default. NFKC is + # considered the best normalization form for passing strings to databases + # and validations. + # + # * string - The string to perform normalization on. + # * form - The form you want to normalize in. Should be one of + # the following: :c, :kc, :d, or :kd. + # Default is ActiveSupport::Multibyte::Unicode.default_normalization_form. + def normalize(string, form = nil) + form ||= @default_normalization_form + # See http://www.unicode.org/reports/tr15, Table 1 + codepoints = string.codepoints.to_a + case form + when :d + reorder_characters(decompose(:canonical, codepoints)) + when :c + compose(reorder_characters(decompose(:canonical, codepoints))) + when :kd + reorder_characters(decompose(:compatibility, codepoints)) + when :kc + compose(reorder_characters(decompose(:compatibility, codepoints))) + else + raise ArgumentError, "#{form} is not a valid normalization variant", caller + end.pack("U*".freeze) + end + + def downcase(string) + apply_mapping string, :lowercase_mapping + end + + def upcase(string) + apply_mapping string, :uppercase_mapping + end + + def swapcase(string) + apply_mapping string, :swapcase_mapping + end + + # Holds data about a codepoint in the Unicode database. + class Codepoint + attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping + + # Initializing Codepoint object with default values + def initialize + @combining_class = 0 + @uppercase_mapping = 0 + @lowercase_mapping = 0 + end + + def swapcase_mapping + uppercase_mapping > 0 ? uppercase_mapping : lowercase_mapping + end + end + + # Holds static data from the Unicode database. + class UnicodeDatabase + ATTRIBUTES = :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252 + + attr_writer(*ATTRIBUTES) + + def initialize + @codepoints = Hash.new(Codepoint.new) + @composition_exclusion = [] + @composition_map = {} + @boundary = {} + @cp1252 = {} + end + + # Lazy load the Unicode database so it's only loaded when it's actually used + ATTRIBUTES.each do |attr_name| + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{attr_name} # def codepoints + load # load + @#{attr_name} # @codepoints + end # end + EOS + end + + # Loads the Unicode database and returns all the internal objects of + # UnicodeDatabase. + def load + begin + @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = File.open(self.class.filename, "rb") { |f| Marshal.load f.read } + rescue => e + raise IOError.new("Couldn't load the Unicode tables for UTF8Handler (#{e.message}), ActiveSupport::Multibyte is unusable") + end + + # Redefine the === method so we can write shorter rules for grapheme cluster breaks + @boundary.each_key do |k| + @boundary[k].instance_eval do + def ===(other) + detect { |i| i === other } ? true : false + end + end if @boundary[k].kind_of?(Array) + end + + # define attr_reader methods for the instance variables + class << self + attr_reader(*ATTRIBUTES) + end + end + + # Returns the directory in which the data files are stored. + def self.dirname + File.expand_path("../values", __dir__) + end + + # Returns the filename for the data file for this version. + def self.filename + File.expand_path File.join(dirname, "unicode_tables.dat") + end + end + + private + + def apply_mapping(string, mapping) + database.codepoints + string.each_codepoint.map do |codepoint| + cp = database.codepoints[codepoint] + if cp && (ncp = cp.send(mapping)) && ncp > 0 + ncp + else + codepoint + end + end.pack("U*") + end + + def recode_windows1252_chars(string) + string.encode(Encoding::UTF_8, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + def database + @database ||= UnicodeDatabase.new + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb new file mode 100644 index 0000000..6207de8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications.rb @@ -0,0 +1,216 @@ +# frozen_string_literal: true + +require "active_support/notifications/instrumenter" +require "active_support/notifications/fanout" +require "active_support/per_thread_registry" + +module ActiveSupport + # = Notifications + # + # ActiveSupport::Notifications provides an instrumentation API for + # Ruby. + # + # == Instrumenters + # + # To instrument an event you just need to do: + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # That first executes the block and then notifies all subscribers once done. + # + # In the example above +render+ is the name of the event, and the rest is called + # the _payload_. The payload is a mechanism that allows instrumenters to pass + # extra information to subscribers. Payloads consist of a hash whose contents + # are arbitrary and generally depend on the event. + # + # == Subscribers + # + # You can consume those events and the information they provide by registering + # a subscriber. + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for this notification + # payload # => Hash, the payload + # end + # + # For instance, let's store all "render" events in an array: + # + # events = [] + # + # ActiveSupport::Notifications.subscribe('render') do |*args| + # events << ActiveSupport::Notifications::Event.new(*args) + # end + # + # That code returns right away, you are just subscribing to "render" events. + # The block is saved and will be called whenever someone instruments "render": + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # event = events.first + # event.name # => "render" + # event.duration # => 10 (in milliseconds) + # event.payload # => { extra: :information } + # + # The block in the subscribe call gets the name of the event, start + # timestamp, end timestamp, a string with a unique identifier for that event + # (something like "535801666f04d0298cd6"), and a hash with the payload, in + # that order. + # + # If an exception happens during that particular instrumentation the payload will + # have a key :exception with an array of two elements as value: a string with + # the name of the exception class, and the exception message. + # The :exception_object key of the payload will have the exception + # itself as the value. + # + # As the previous example depicts, the class ActiveSupport::Notifications::Event + # is able to take the arguments as they come and provide an object-oriented + # interface to that data. + # + # It is also possible to pass an object which responds to call method + # as the second parameter to the subscribe method instead of a block: + # + # module ActionController + # class PageRequest + # def call(name, started, finished, unique_id, payload) + # Rails.logger.debug ['notification:', name, started, finished, unique_id, payload].join(' ') + # end + # end + # end + # + # ActiveSupport::Notifications.subscribe('process_action.action_controller', ActionController::PageRequest.new) + # + # resulting in the following output within the logs including a hash with the payload: + # + # notification: process_action.action_controller 2012-04-13 01:08:35 +0300 2012-04-13 01:08:35 +0300 af358ed7fab884532ec7 { + # controller: "Devise::SessionsController", + # action: "new", + # params: {"action"=>"new", "controller"=>"devise/sessions"}, + # format: :html, + # method: "GET", + # path: "/login/sign_in", + # status: 200, + # view_runtime: 279.3080806732178, + # db_runtime: 40.053 + # } + # + # You can also subscribe to all events whose name matches a certain regexp: + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # ... + # end + # + # and even pass no argument to subscribe, in which case you are subscribing + # to all events. + # + # == Temporary Subscriptions + # + # Sometimes you do not want to subscribe to an event for the entire life of + # the application. There are two ways to unsubscribe. + # + # WARNING: The instrumentation framework is designed for long-running subscribers, + # use this feature sparingly because it wipes some internal caches and that has + # a negative impact on performance. + # + # === Subscribe While a Block Runs + # + # You can subscribe to some event temporarily while some block runs. For + # example, in + # + # callback = lambda {|*args| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record") do + # ... + # end + # + # the callback will be called for all "sql.active_record" events instrumented + # during the execution of the block. The callback is unsubscribed automatically + # after that. + # + # === Manual Unsubscription + # + # The +subscribe+ method returns a subscriber object: + # + # subscriber = ActiveSupport::Notifications.subscribe("render") do |*args| + # ... + # end + # + # To prevent that block from being called anymore, just unsubscribe passing + # that reference: + # + # ActiveSupport::Notifications.unsubscribe(subscriber) + # + # You can also unsubscribe by passing the name of the subscriber object. Note + # that this will unsubscribe all subscriptions with the given name: + # + # ActiveSupport::Notifications.unsubscribe("render") + # + # == Default Queue + # + # Notifications ships with a queue implementation that consumes and publishes events + # to all log subscribers. You can use any queue implementation you want. + # + module Notifications + class << self + attr_accessor :notifier + + def publish(name, *args) + notifier.publish(name, *args) + end + + def instrument(name, payload = {}) + if notifier.listening?(name) + instrumenter.instrument(name, payload) { yield payload if block_given? } + else + yield payload if block_given? + end + end + + def subscribe(*args, &block) + notifier.subscribe(*args, &block) + end + + def subscribed(callback, *args, &block) + subscriber = subscribe(*args, &callback) + yield + ensure + unsubscribe(subscriber) + end + + def unsubscribe(subscriber_or_name) + notifier.unsubscribe(subscriber_or_name) + end + + def instrumenter + InstrumentationRegistry.instance.instrumenter_for(notifier) + end + end + + # This class is a registry which holds all of the +Instrumenter+ objects + # in a particular thread local. To access the +Instrumenter+ object for a + # particular +notifier+, you can call the following method: + # + # InstrumentationRegistry.instrumenter_for(notifier) + # + # The instrumenters for multiple notifiers are held in a single instance of + # this class. + class InstrumentationRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def instrumenter_for(notifier) + @registry[notifier] ||= Instrumenter.new(notifier) + end + end + + self.notifier = Fanout.new + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb new file mode 100644 index 0000000..f4a766f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/fanout.rb @@ -0,0 +1,159 @@ +# frozen_string_literal: true + +require "mutex_m" +require "concurrent/map" + +module ActiveSupport + module Notifications + # This is a default queue implementation that ships with Notifications. + # It just pushes events to all registered log subscribers. + # + # This class is thread safe. All methods are reentrant. + class Fanout + include Mutex_m + + def initialize + @subscribers = [] + @listeners_for = Concurrent::Map.new + super + end + + def subscribe(pattern = nil, callable = nil, &block) + subscriber = Subscribers.new(pattern, callable || block) + synchronize do + @subscribers << subscriber + @listeners_for.clear + end + subscriber + end + + def unsubscribe(subscriber_or_name) + synchronize do + case subscriber_or_name + when String + @subscribers.reject! { |s| s.matches?(subscriber_or_name) } + else + @subscribers.delete(subscriber_or_name) + end + + @listeners_for.clear + end + end + + def start(name, id, payload) + listeners_for(name).each { |s| s.start(name, id, payload) } + end + + def finish(name, id, payload, listeners = listeners_for(name)) + listeners.each { |s| s.finish(name, id, payload) } + end + + def publish(name, *args) + listeners_for(name).each { |s| s.publish(name, *args) } + end + + def listeners_for(name) + # this is correctly done double-checked locking (Concurrent::Map's lookups have volatile semantics) + @listeners_for[name] || synchronize do + # use synchronisation when accessing @subscribers + @listeners_for[name] ||= @subscribers.select { |s| s.subscribed_to?(name) } + end + end + + def listening?(name) + listeners_for(name).any? + end + + # This is a sync queue, so there is no waiting. + def wait + end + + module Subscribers # :nodoc: + def self.new(pattern, listener) + if listener.respond_to?(:start) && listener.respond_to?(:finish) + subscriber = Evented.new pattern, listener + else + subscriber = Timed.new pattern, listener + end + + unless pattern + AllMessages.new(subscriber) + else + subscriber + end + end + + class Evented #:nodoc: + def initialize(pattern, delegate) + @pattern = pattern + @delegate = delegate + @can_publish = delegate.respond_to?(:publish) + end + + def publish(name, *args) + if @can_publish + @delegate.publish name, *args + end + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def subscribed_to?(name) + @pattern === name + end + + def matches?(name) + @pattern && @pattern === name + end + end + + class Timed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = Thread.current[:_timestack] ||= [] + timestack.push Time.now + end + + def finish(name, id, payload) + timestack = Thread.current[:_timestack] + started = timestack.pop + @delegate.call(name, started, Time.now, id, payload) + end + end + + class AllMessages # :nodoc: + def initialize(delegate) + @delegate = delegate + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def publish(name, *args) + @delegate.publish name, *args + end + + def subscribed_to?(name) + true + end + + alias :matches? :=== + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb new file mode 100644 index 0000000..e99f5ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/notifications/instrumenter.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require "securerandom" + +module ActiveSupport + module Notifications + # Instrumenters are stored in a thread local. + class Instrumenter + attr_reader :id + + def initialize(notifier) + @id = unique_id + @notifier = notifier + end + + # Instrument the given block by measuring the time taken to execute it + # and publish it. Notice that events get sent even if an error occurs + # in the passed-in block. + def instrument(name, payload = {}) + # some of the listeners might have state + listeners_state = start name, payload + begin + yield payload + rescue Exception => e + payload[:exception] = [e.class.name, e.message] + payload[:exception_object] = e + raise e + ensure + finish_with_state listeners_state, name, payload + end + end + + # Send a start notification with +name+ and +payload+. + def start(name, payload) + @notifier.start name, @id, payload + end + + # Send a finish notification with +name+ and +payload+. + def finish(name, payload) + @notifier.finish name, @id, payload + end + + def finish_with_state(listeners_state, name, payload) + @notifier.finish name, @id, payload, listeners_state + end + + private + + def unique_id + SecureRandom.hex(10) + end + end + + class Event + attr_reader :name, :time, :transaction_id, :payload, :children + attr_accessor :end + + def initialize(name, start, ending, transaction_id, payload) + @name = name + @payload = payload.dup + @time = start + @transaction_id = transaction_id + @end = ending + @children = [] + @duration = nil + end + + # Returns the difference in milliseconds between when the execution of the + # event started and when it ended. + # + # ActiveSupport::Notifications.subscribe('wait') do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # ActiveSupport::Notifications.instrument('wait') do + # sleep 1 + # end + # + # @event.duration # => 1000.138 + def duration + @duration ||= 1000.0 * (self.end - time) + end + + def <<(event) + @children << event + end + + def parent_of?(event) + @children.include? event + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb new file mode 100644 index 0000000..8fd6e93 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper.rb @@ -0,0 +1,371 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + extend ActiveSupport::Autoload + + eager_autoload do + autoload :NumberConverter + autoload :RoundingHelper + autoload :NumberToRoundedConverter + autoload :NumberToDelimitedConverter + autoload :NumberToHumanConverter + autoload :NumberToHumanSizeConverter + autoload :NumberToPhoneConverter + autoload :NumberToCurrencyConverter + autoload :NumberToPercentageConverter + end + + extend self + + # Formats a +number+ into a phone number (US by default e.g., (555) + # 123-9876). You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :area_code - Adds parentheses around the area code. + # * :delimiter - Specifies the delimiter to use + # (defaults to "-"). + # * :extension - Specifies an extension to add to the + # end of the generated number. + # * :country_code - Sets the country code for the phone + # number. + # * :pattern - Specifies how the number is divided into three + # groups with the custom regexp to override the default format. + # ==== Examples + # + # number_to_phone(5551234) # => "555-1234" + # number_to_phone('5551234') # => "555-1234" + # number_to_phone(1235551234) # => "123-555-1234" + # number_to_phone(1235551234, area_code: true) # => "(123) 555-1234" + # number_to_phone(1235551234, delimiter: ' ') # => "123 555 1234" + # number_to_phone(1235551234, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # number_to_phone(1235551234, country_code: 1) # => "+1-123-555-1234" + # number_to_phone('123a456') # => "123a456" + # + # number_to_phone(1235551234, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # number_to_phone(75561234567, pattern: /(\d{1,4})(\d{4})(\d{4})$/, area_code: true) + # # => "(755) 6123-4567" + # number_to_phone(13312345678, pattern: /(\d{3})(\d{4})(\d{4})$/) + # # => "133-1234-5678" + def number_to_phone(number, options = {}) + NumberToPhoneConverter.convert(number, options) + end + + # Formats a +number+ into a currency string (e.g., $13.65). You + # can customize the format in the +options+ hash. + # + # The currency unit and number formatting of the current locale will be used + # unless otherwise specified in the provided options. No currency conversion + # is performed. If the user is given a way to change their locale, they will + # also be able to change the relative value of the currency displayed with + # this helper. If your application will ever support multiple locales, you + # may want to specify a constant :locale option or consider + # using a library capable of currency conversion. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the level of precision (defaults + # to 2). + # * :unit - Sets the denomination of the currency + # (defaults to "$"). + # * :separator - Sets the separator between the units + # (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :format - Sets the format for non-negative numbers + # (defaults to "%u%n"). Fields are %u for the + # currency, and %n for the number. + # * :negative_format - Sets the format for negative + # numbers (defaults to prepending a hyphen to the formatted + # number given by :format). Accepts the same fields + # than :format, except %n is here the + # absolute value of the number. + # + # ==== Examples + # + # number_to_currency(1234567890.50) # => "$1,234,567,890.50" + # number_to_currency(1234567890.506) # => "$1,234,567,890.51" + # number_to_currency(1234567890.506, precision: 3) # => "$1,234,567,890.506" + # number_to_currency(1234567890.506, locale: :fr) # => "1 234 567 890,51 €" + # number_to_currency('123a456') # => "$123a456" + # + # number_to_currency(-1234567890.50, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + def number_to_currency(number, options = {}) + NumberToCurrencyConverter.convert(number, options) + end + + # Formats a +number+ as a percentage string (e.g., 65%). You can + # customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # * :format - Specifies the format of the percentage + # string The number field is %n (defaults to "%n%"). + # + # ==== Examples + # + # number_to_percentage(100) # => "100.000%" + # number_to_percentage('98') # => "98.000%" + # number_to_percentage(100, precision: 0) # => "100%" + # number_to_percentage(1000, delimiter: '.', separator: ',') # => "1.000,000%" + # number_to_percentage(302.24398923423, precision: 5) # => "302.24399%" + # number_to_percentage(1000, locale: :fr) # => "1000,000%" + # number_to_percentage(1000, precision: nil) # => "1000%" + # number_to_percentage('98a') # => "98a%" + # number_to_percentage(100, format: '%n %') # => "100.000 %" + def number_to_percentage(number, options = {}) + NumberToPercentageConverter.convert(number, options) + end + + # Formats a +number+ with grouped thousands using +delimiter+ + # (e.g., 12,324). You can customize the format in the +options+ + # hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter_pattern - Sets a custom regular expression used for + # deriving the placement of delimiter. Helpful when using currency formats + # like INR. + # + # ==== Examples + # + # number_to_delimited(12345678) # => "12,345,678" + # number_to_delimited('123456') # => "123,456" + # number_to_delimited(12345678.05) # => "12,345,678.05" + # number_to_delimited(12345678, delimiter: '.') # => "12.345.678" + # number_to_delimited(12345678, delimiter: ',') # => "12,345,678" + # number_to_delimited(12345678.05, separator: ' ') # => "12,345,678 05" + # number_to_delimited(12345678.05, locale: :fr) # => "12 345 678,05" + # number_to_delimited('112a') # => "112a" + # number_to_delimited(98765432.98, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # number_to_delimited("123456.78", + # delimiter_pattern: /(\d+?)(?=(\d\d)+(\d)(?!\d))/) + # # => "1,23,456.78" + def number_to_delimited(number, options = {}) + NumberToDelimitedConverter.convert(number, options) + end + + # Formats a +number+ with the specified level of + # :precision (e.g., 112.32 has a precision of 2 if + # +:significant+ is +false+, and 5 if +:significant+ is +true+). + # You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_rounded(111.2345) # => "111.235" + # number_to_rounded(111.2345, precision: 2) # => "111.23" + # number_to_rounded(13, precision: 5) # => "13.00000" + # number_to_rounded(389.32314, precision: 0) # => "389" + # number_to_rounded(111.2345, significant: true) # => "111" + # number_to_rounded(111.2345, precision: 1, significant: true) # => "100" + # number_to_rounded(13, precision: 5, significant: true) # => "13.000" + # number_to_rounded(13, precision: nil) # => "13" + # number_to_rounded(111.234, locale: :fr) # => "111,234" + # + # number_to_rounded(13, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # + # number_to_rounded(389.32314, precision: 4, significant: true) # => "389.3" + # number_to_rounded(1111.2345, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + def number_to_rounded(number, options = {}) + NumberToRoundedConverter.convert(number, options) + end + + # Formats the bytes in +number+ into a more understandable + # representation (e.g., giving it 1500 yields 1.5 KB). This + # method is useful for reporting file sizes to users. You can + # customize the format in the +options+ hash. + # + # See number_to_human if you want to pretty-print a + # generic number. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # + # ==== Examples + # + # number_to_human_size(123) # => "123 Bytes" + # number_to_human_size(1234) # => "1.21 KB" + # number_to_human_size(12345) # => "12.1 KB" + # number_to_human_size(1234567) # => "1.18 MB" + # number_to_human_size(1234567890) # => "1.15 GB" + # number_to_human_size(1234567890123) # => "1.12 TB" + # number_to_human_size(1234567890123456) # => "1.1 PB" + # number_to_human_size(1234567890123456789) # => "1.07 EB" + # number_to_human_size(1234567, precision: 2) # => "1.2 MB" + # number_to_human_size(483989, precision: 2) # => "470 KB" + # number_to_human_size(1234567, precision: 2, separator: ',') # => "1,2 MB" + # number_to_human_size(1234567890123, precision: 5) # => "1.1228 TB" + # number_to_human_size(524288000, precision: 5) # => "500 MB" + def number_to_human_size(number, options = {}) + NumberToHumanSizeConverter.convert(number, options) + end + + # Pretty prints (formats and approximates) a number in a way it + # is more readable by humans (eg.: 1200000000 becomes "1.2 + # Billion"). This is useful for numbers that can get very large + # (and too hard to read). + # + # See number_to_human_size if you want to print a file + # size. + # + # You can also define your own unit-quantifier names if you want + # to use other decimal units (eg.: 1500 becomes "1.5 + # kilometers", 0.150 becomes "150 milliliters", etc). You may + # define a wide range of unit quantifiers, even fractional ones + # (centi, deci, mili, etc). + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # * :units - A Hash of unit quantifier names. Or a + # string containing an i18n scope where to find this hash. It + # might have the following keys: + # * *integers*: :unit, :ten, + # :hundred, :thousand, :million, + # :billion, :trillion, + # :quadrillion + # * *fractionals*: :deci, :centi, + # :mili, :micro, :nano, + # :pico, :femto + # * :format - Sets the format of the output string + # (defaults to "%n %u"). The field types are: + # * %u - The quantifier (ex.: 'thousand') + # * %n - The number + # + # ==== Examples + # + # number_to_human(123) # => "123" + # number_to_human(1234) # => "1.23 Thousand" + # number_to_human(12345) # => "12.3 Thousand" + # number_to_human(1234567) # => "1.23 Million" + # number_to_human(1234567890) # => "1.23 Billion" + # number_to_human(1234567890123) # => "1.23 Trillion" + # number_to_human(1234567890123456) # => "1.23 Quadrillion" + # number_to_human(1234567890123456789) # => "1230 Quadrillion" + # number_to_human(489939, precision: 2) # => "490 Thousand" + # number_to_human(489939, precision: 4) # => "489.9 Thousand" + # number_to_human(1234567, precision: 4, + # significant: false) # => "1.2346 Million" + # number_to_human(1234567, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + # + # number_to_human(500000000, precision: 5) # => "500 Million" + # number_to_human(12345012345, significant: false) # => "12.345 Billion" + # + # Non-significant zeros after the decimal separator are stripped + # out by default (set :strip_insignificant_zeros to + # +false+ to change that): + # + # number_to_human(12.00001) # => "12" + # number_to_human(12.00001, strip_insignificant_zeros: false) # => "12.0" + # + # ==== Custom Unit Quantifiers + # + # You can also use your own custom unit quantifiers: + # number_to_human(500000, units: { unit: 'ml', thousand: 'lt' }) # => "500 lt" + # + # If in your I18n locale you have: + # + # distance: + # centi: + # one: "centimeter" + # other: "centimeters" + # unit: + # one: "meter" + # other: "meters" + # thousand: + # one: "kilometer" + # other: "kilometers" + # billion: "gazillion-distance" + # + # Then you could do: + # + # number_to_human(543934, units: :distance) # => "544 kilometers" + # number_to_human(54393498, units: :distance) # => "54400 kilometers" + # number_to_human(54393498000, units: :distance) # => "54.4 gazillion-distance" + # number_to_human(343, units: :distance, precision: 1) # => "300 meters" + # number_to_human(1, units: :distance) # => "1 meter" + # number_to_human(0.34, units: :distance) # => "34 centimeters" + def number_to_human(number, options = {}) + NumberToHumanConverter.convert(number, options) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb new file mode 100644 index 0000000..06ba797 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_converter.rb @@ -0,0 +1,184 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/keys" +require "active_support/i18n" +require "active_support/core_ext/class/attribute" + +module ActiveSupport + module NumberHelper + class NumberConverter # :nodoc: + # Default and i18n option namespace per class + class_attribute :namespace + + # Does the object need a number that is a valid float? + class_attribute :validate_float + + attr_reader :number, :opts + + DEFAULTS = { + # Used in number_to_delimited + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: { + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: ".", + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: ",", + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3, + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false, + # If set, the zeros after the decimal separator will always be stripped (eg.: 1.200 will be 1.2) + strip_insignificant_zeros: false + }, + + # Used in number_to_currency + currency: { + format: { + format: "%u%n", + negative_format: "-%u%n", + unit: "$", + # These five are to override number.format and are optional + separator: ".", + delimiter: ",", + precision: 2, + significant: false, + strip_insignificant_zeros: false + } + }, + + # Used in number_to_percentage + percentage: { + format: { + delimiter: "", + format: "%n%" + } + }, + + # Used in number_to_rounded + precision: { + format: { + delimiter: "" + } + }, + + # Used in number_to_human_size and number_to_human + human: { + format: { + # These five are to override number.format and are optional + delimiter: "", + precision: 3, + significant: true, + strip_insignificant_zeros: true + }, + # Used in number_to_human_size + storage_units: { + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u", + units: { + byte: "Bytes", + kb: "KB", + mb: "MB", + gb: "GB", + tb: "TB" + } + }, + # Used in number_to_human + decimal_units: { + format: "%n %u", + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: { + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "", + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: "Thousand", + million: "Million", + billion: "Billion", + trillion: "Trillion", + quadrillion: "Quadrillion" + } + } + } + } + + def self.convert(number, options) + new(number, options).execute + end + + def initialize(number, options) + @number = number + @opts = options.symbolize_keys + end + + def execute + if !number + nil + elsif validate_float? && !valid_float? + number + else + convert + end + end + + private + + def options + @options ||= format_options.merge(opts) + end + + def format_options + default_format_options.merge!(i18n_format_options) + end + + def default_format_options + options = DEFAULTS[:format].dup + options.merge!(DEFAULTS[namespace][:format]) if namespace + options + end + + def i18n_format_options + locale = opts[:locale] + options = I18n.translate(:'number.format', locale: locale, default: {}).dup + + if namespace + options.merge!(I18n.translate(:"number.#{namespace}.format", locale: locale, default: {})) + end + + options + end + + def translate_number_value_with_default(key, i18n_options = {}) + I18n.translate(key, { default: default_value(key), scope: :number }.merge!(i18n_options)) + end + + def translate_in_locale(key, i18n_options = {}) + translate_number_value_with_default(key, { locale: options[:locale] }.merge(i18n_options)) + end + + def default_value(key) + key.split(".").reduce(DEFAULTS) { |defaults, k| defaults[k.to_sym] } + end + + def valid_float? + Float(number) + rescue ArgumentError, TypeError + false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb new file mode 100644 index 0000000..3f037c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_currency_converter.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "active_support/core_ext/numeric/inquiry" + +module ActiveSupport + module NumberHelper + class NumberToCurrencyConverter < NumberConverter # :nodoc: + self.namespace = :currency + + def convert + number = self.number.to_s.strip + format = options[:format] + + if number.to_f.negative? + format = options[:negative_format] + number = absolute_value(number) + end + + rounded_number = NumberToRoundedConverter.convert(number, options) + format.gsub("%n".freeze, rounded_number).gsub("%u".freeze, options[:unit]) + end + + private + + def absolute_value(number) + number.respond_to?(:abs) ? number.abs : number.sub(/\A-/, "") + end + + def options + @options ||= begin + defaults = default_format_options.merge(i18n_opts) + # Override negative format if format options are given + defaults[:negative_format] = "-#{opts[:format]}" if opts[:format] + defaults.merge!(opts) + end + end + + def i18n_opts + # Set International negative format if it does not exist + i18n = i18n_format_options + i18n[:negative_format] ||= "-#{i18n[:format]}" if i18n[:format] + i18n + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb new file mode 100644 index 0000000..d5b5706 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_delimited_converter.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToDelimitedConverter < NumberConverter #:nodoc: + self.validate_float = true + + DEFAULT_DELIMITER_REGEX = /(\d)(?=(\d\d\d)+(?!\d))/ + + def convert + parts.join(options[:separator]) + end + + private + + def parts + left, right = number.to_s.split(".".freeze) + left.gsub!(delimiter_pattern) do |digit_to_delimit| + "#{digit_to_delimit}#{options[:delimiter]}" + end + [left, right].compact + end + + def delimiter_pattern + options.fetch(:delimiter_pattern, DEFAULT_DELIMITER_REGEX) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb new file mode 100644 index 0000000..03eb667 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_converter.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToHumanConverter < NumberConverter # :nodoc: + DECIMAL_UNITS = { 0 => :unit, 1 => :ten, 2 => :hundred, 3 => :thousand, 6 => :million, 9 => :billion, 12 => :trillion, 15 => :quadrillion, + -1 => :deci, -2 => :centi, -3 => :mili, -6 => :micro, -9 => :nano, -12 => :pico, -15 => :femto } + INVERTED_DECIMAL_UNITS = DECIMAL_UNITS.invert + + self.namespace = :human + self.validate_float = true + + def convert # :nodoc: + @number = RoundingHelper.new(options).round(number) + @number = Float(number) + + # for backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + units = opts[:units] + exponent = calculate_exponent(units) + @number = number / (10**exponent) + + rounded_number = NumberToRoundedConverter.convert(number, options) + unit = determine_unit(units, exponent) + format.gsub("%n".freeze, rounded_number).gsub("%u".freeze, unit).strip + end + + private + + def format + options[:format] || translate_in_locale("human.decimal_units.format") + end + + def determine_unit(units, exponent) + exp = DECIMAL_UNITS[exponent] + case units + when Hash + units[exp] || "" + when String, Symbol + I18n.translate("#{units}.#{exp}", locale: options[:locale], count: number.to_i) + else + translate_in_locale("human.decimal_units.units.#{exp}", count: number.to_i) + end + end + + def calculate_exponent(units) + exponent = number != 0 ? Math.log10(number.abs).floor : 0 + unit_exponents(units).find { |e| exponent >= e } || 0 + end + + def unit_exponents(units) + case units + when Hash + units + when String, Symbol + I18n.translate(units.to_s, locale: options[:locale], raise: true) + when nil + translate_in_locale("human.decimal_units.units", raise: true) + else + raise ArgumentError, ":units must be a Hash or String translation scope." + end.keys.map { |e_name| INVERTED_DECIMAL_UNITS[e_name] }.sort_by(&:-@) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb new file mode 100644 index 0000000..842f2fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_human_size_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToHumanSizeConverter < NumberConverter #:nodoc: + STORAGE_UNITS = [:byte, :kb, :mb, :gb, :tb, :pb, :eb] + + self.namespace = :human + self.validate_float = true + + def convert + @number = Float(number) + + # for backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + if smaller_than_base? + number_to_format = number.to_i.to_s + else + human_size = number / (base**exponent) + number_to_format = NumberToRoundedConverter.convert(human_size, options) + end + conversion_format.gsub("%n".freeze, number_to_format).gsub("%u".freeze, unit) + end + + private + + def conversion_format + translate_number_value_with_default("human.storage_units.format", locale: options[:locale], raise: true) + end + + def unit + translate_number_value_with_default(storage_unit_key, locale: options[:locale], count: number.to_i, raise: true) + end + + def storage_unit_key + key_end = smaller_than_base? ? "byte" : STORAGE_UNITS[exponent] + "human.storage_units.units.#{key_end}" + end + + def exponent + max = STORAGE_UNITS.size - 1 + exp = (Math.log(number) / Math.log(base)).to_i + exp = max if exp > max # avoid overflow for the highest unit + exp + end + + def smaller_than_base? + number.to_i < base + end + + def base + 1024 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb new file mode 100644 index 0000000..4dcdad2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_percentage_converter.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToPercentageConverter < NumberConverter # :nodoc: + self.namespace = :percentage + + def convert + rounded_number = NumberToRoundedConverter.convert(number, options) + options[:format].gsub("%n".freeze, rounded_number) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb new file mode 100644 index 0000000..96410f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_phone_converter.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToPhoneConverter < NumberConverter #:nodoc: + def convert + str = country_code(opts[:country_code]).dup + str << convert_to_phone_number(number.to_s.strip) + str << phone_ext(opts[:extension]) + end + + private + + def convert_to_phone_number(number) + if opts[:area_code] + convert_with_area_code(number) + else + convert_without_area_code(number) + end + end + + def convert_with_area_code(number) + default_pattern = /(\d{1,3})(\d{3})(\d{4}$)/ + number.gsub!(regexp_pattern(default_pattern), + "(\\1) \\2#{delimiter}\\3") + number + end + + def convert_without_area_code(number) + default_pattern = /(\d{0,3})(\d{3})(\d{4})$/ + number.gsub!(regexp_pattern(default_pattern), + "\\1#{delimiter}\\2#{delimiter}\\3") + number.slice!(0, 1) if start_with_delimiter?(number) + number + end + + def start_with_delimiter?(number) + delimiter.present? && number.start_with?(delimiter) + end + + def delimiter + opts[:delimiter] || "-" + end + + def country_code(code) + code.blank? ? "" : "+#{code}#{delimiter}" + end + + def phone_ext(ext) + ext.blank? ? "" : " x #{ext}" + end + + def regexp_pattern(default_pattern) + opts.fetch :pattern, default_pattern + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb new file mode 100644 index 0000000..eb528a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/number_to_rounded_converter.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class NumberToRoundedConverter < NumberConverter # :nodoc: + self.namespace = :precision + self.validate_float = true + + def convert + helper = RoundingHelper.new(options) + rounded_number = helper.round(number) + + if precision = options[:precision] + if options[:significant] && precision > 0 + digits = helper.digit_count(rounded_number) + precision -= digits + precision = 0 if precision < 0 # don't let it be negative + end + + formatted_string = + if BigDecimal === rounded_number && rounded_number.finite? + s = rounded_number.to_s("F") + s << "0".freeze * precision + a, b = s.split(".".freeze, 2) + a << ".".freeze + a << b[0, precision] + else + "%00.#{precision}f" % rounded_number + end + else + formatted_string = rounded_number + end + + delimited_number = NumberToDelimitedConverter.convert(formatted_string, options) + format_number(delimited_number) + end + + private + + def strip_insignificant_zeros + options[:strip_insignificant_zeros] + end + + def format_number(number) + if strip_insignificant_zeros + escaped_separator = Regexp.escape(options[:separator]) + number.sub(/(#{escaped_separator})(\d*[1-9])?0+\z/, '\1\2').sub(/#{escaped_separator}\z/, "") + else + number + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb new file mode 100644 index 0000000..2ad8d49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/number_helper/rounding_helper.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class RoundingHelper # :nodoc: + attr_reader :options + + def initialize(options) + @options = options + end + + def round(number) + return number unless precision + number = convert_to_decimal(number) + if significant && precision > 0 + round_significant(number) + else + round_without_significant(number) + end + end + + def digit_count(number) + return 1 if number.zero? + (Math.log10(absolute_number(number)) + 1).floor + end + + private + def round_without_significant(number) + number = number.round(precision) + number = number.to_i if precision == 0 && number.finite? + number = number.abs if number.zero? # prevent showing negative zeros + number + end + + def round_significant(number) + return 0 if number.zero? + digits = digit_count(number) + multiplier = 10**(digits - precision) + (number / BigDecimal(multiplier.to_f.to_s)).round * multiplier + end + + def convert_to_decimal(number) + case number + when Float, String + BigDecimal(number.to_s) + when Rational + BigDecimal(number, digit_count(number.to_i) + precision) + else + number.to_d + end + end + + def precision + options[:precision] + end + + def significant + options[:significant] + end + + def absolute_number(number) + number.respond_to?(:abs) ? number.abs : number.to_d.abs + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb new file mode 100644 index 0000000..ab9ca72 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/option_merger.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" + +module ActiveSupport + class OptionMerger #:nodoc: + instance_methods.each do |method| + undef_method(method) if method !~ /^(__|instance_eval|class|object_id)/ + end + + def initialize(context, options) + @context, @options = context, options + end + + private + def method_missing(method, *arguments, &block) + if arguments.first.is_a?(Proc) + proc = arguments.pop + arguments << lambda { |*args| @options.deep_merge(proc.call(*args)) } + else + arguments << (arguments.last.respond_to?(:to_hash) ? @options.deep_merge(arguments.pop) : @options.dup) + end + + @context.__send__(method, *arguments, &block) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb new file mode 100644 index 0000000..5758513 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_hash.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "yaml" + +YAML.add_builtin_type("omap") do |type, val| + ActiveSupport::OrderedHash[val.map { |v| v.to_a.first }] +end + +module ActiveSupport + # DEPRECATED: ActiveSupport::OrderedHash implements a hash that preserves + # insertion order. + # + # oh = ActiveSupport::OrderedHash.new + # oh[:a] = 1 + # oh[:b] = 2 + # oh.keys # => [:a, :b], this order is guaranteed + # + # Also, maps the +omap+ feature for YAML files + # (See http://yaml.org/type/omap.html) to support ordered items + # when loading from yaml. + # + # ActiveSupport::OrderedHash is namespaced to prevent conflicts + # with other implementations. + class OrderedHash < ::Hash + def to_yaml_type + "!tag:yaml.org,2002:omap" + end + + def encode_with(coder) + coder.represent_seq "!omap", map { |k, v| { k => v } } + end + + def select(*args, &block) + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def nested_under_indifferent_access + self + end + + # Returns true to make sure that this hash is extractable via Array#extract_options! + def extractable_options? + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb new file mode 100644 index 0000000..6ba1346 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/ordered_options.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + # Usually key value pairs are handled something like this: + # + # h = {} + # h[:boy] = 'John' + # h[:girl] = 'Mary' + # h[:boy] # => 'John' + # h[:girl] # => 'Mary' + # h[:dog] # => nil + # + # Using +OrderedOptions+, the above code could be reduced to: + # + # h = ActiveSupport::OrderedOptions.new + # h.boy = 'John' + # h.girl = 'Mary' + # h.boy # => 'John' + # h.girl # => 'Mary' + # h.dog # => nil + # + # To raise an exception when the value is blank, append a + # bang to the key name, like: + # + # h.dog! # => raises KeyError: :dog is blank + # + class OrderedOptions < Hash + alias_method :_get, :[] # preserve the original #[] method + protected :_get # make it protected + + def []=(key, value) + super(key.to_sym, value) + end + + def [](key) + super(key.to_sym) + end + + def method_missing(name, *args) + name_string = name.to_s.dup + if name_string.chomp!("=") + self[name_string] = args.first + else + bangs = name_string.chomp!("!") + + if bangs + self[name_string].presence || raise(KeyError.new(":#{name_string} is blank")) + else + self[name_string] + end + end + end + + def respond_to_missing?(name, include_private) + true + end + end + + # +InheritableOptions+ provides a constructor to build an +OrderedOptions+ + # hash inherited from another hash. + # + # Use this if you already have some hash and you want to create a new one based on it. + # + # h = ActiveSupport::InheritableOptions.new({ girl: 'Mary', boy: 'John' }) + # h.girl # => 'Mary' + # h.boy # => 'John' + class InheritableOptions < OrderedOptions + def initialize(parent = nil) + if parent.kind_of?(OrderedOptions) + # use the faster _get when dealing with OrderedOptions + super() { |h, k| parent._get(k) } + elsif parent + super() { |h, k| parent[k] } + else + super() + end + end + + def inheritable_copy + self.class.new(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb new file mode 100644 index 0000000..eb92fb4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/per_thread_registry.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # NOTE: This approach has been deprecated for end-user code in favor of {thread_mattr_accessor}[rdoc-ref:Module#thread_mattr_accessor] and friends. + # Please use that approach instead. + # + # This module is used to encapsulate access to thread local variables. + # + # Instead of polluting the thread locals namespace: + # + # Thread.current[:connection_handler] + # + # you define a class that extends this module: + # + # module ActiveRecord + # class RuntimeRegistry + # extend ActiveSupport::PerThreadRegistry + # + # attr_accessor :connection_handler + # end + # end + # + # and invoke the declared instance accessors as class methods. So + # + # ActiveRecord::RuntimeRegistry.connection_handler = connection_handler + # + # sets a connection handler local to the current thread, and + # + # ActiveRecord::RuntimeRegistry.connection_handler + # + # returns a connection handler local to the current thread. + # + # This feature is accomplished by instantiating the class and storing the + # instance as a thread local keyed by the class name. In the example above + # a key "ActiveRecord::RuntimeRegistry" is stored in Thread.current. + # The class methods proxy to said thread local instance. + # + # If the class has an initializer, it must accept no arguments. + module PerThreadRegistry + def self.extended(object) + object.instance_variable_set "@per_thread_registry_key", object.name.freeze + end + + def instance + Thread.current[@per_thread_registry_key] ||= new + end + + private + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb new file mode 100644 index 0000000..0965fcd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/proxy_object.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module ActiveSupport + # A class with no predefined methods that behaves similarly to Builder's + # BlankSlate. Used for proxy classes. + class ProxyObject < ::BasicObject + undef_method :== + undef_method :equal? + + # Let ActiveSupport::ProxyObject at least raise exceptions. + def raise(*args) + ::Object.send(:raise, *args) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb new file mode 100644 index 0000000..5c34a0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rails.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +# This is private interface. +# +# Rails components cherry pick from Active Support as needed, but there are a +# few features that are used for sure in some way or another and it is not worth +# putting individual requires absolutely everywhere. Think blank? for example. +# +# This file is loaded by every Rails component except Active Support itself, +# but it does not belong to the Rails public interface. It is internal to +# Rails and can change anytime. + +# Defines Object#blank? and Object#present?. +require "active_support/core_ext/object/blank" + +# Rails own autoload, eager_load, etc. +require "active_support/dependencies/autoload" + +# Support for ClassMethods and the included macro. +require "active_support/concern" + +# Defines Class#class_attribute. +require "active_support/core_ext/class/attribute" + +# Defines Module#delegate. +require "active_support/core_ext/module/delegation" + +# Defines ActiveSupport::Deprecation. +require "active_support/deprecation" + +# Defines Regexp#match?. +# +# This should be removed when Rails needs Ruby 2.4 or later, and the require +# added where other Regexp extensions are being used (easy to grep). +require "active_support/core_ext/regexp" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb new file mode 100644 index 0000000..605b50d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/railtie.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/i18n_railtie" + +module ActiveSupport + class Railtie < Rails::Railtie # :nodoc: + config.active_support = ActiveSupport::OrderedOptions.new + + config.eager_load_namespaces << ActiveSupport + + initializer "active_support.set_authenticated_message_encryption" do |app| + config.after_initialize do + unless app.config.active_support.use_authenticated_message_encryption.nil? + ActiveSupport::MessageEncryptor.use_authenticated_message_encryption = + app.config.active_support.use_authenticated_message_encryption + end + end + end + + initializer "active_support.reset_all_current_attributes_instances" do |app| + app.reloader.before_class_unload { ActiveSupport::CurrentAttributes.clear_all } + app.executor.to_run { ActiveSupport::CurrentAttributes.reset_all } + app.executor.to_complete { ActiveSupport::CurrentAttributes.reset_all } + end + + initializer "active_support.deprecation_behavior" do |app| + if deprecation = app.config.active_support.deprecation + ActiveSupport::Deprecation.behavior = deprecation + end + end + + # Sets the default value for Time.zone + # If assigned value cannot be matched to a TimeZone, an exception will be raised. + initializer "active_support.initialize_time_zone" do |app| + begin + TZInfo::DataSource.get + rescue TZInfo::DataSourceNotFound => e + raise e.exception "tzinfo-data is not present. Please add gem 'tzinfo-data' to your Gemfile and run bundle install" + end + require "active_support/core_ext/time/zones" + Time.zone_default = Time.find_zone!(app.config.time_zone) + end + + # Sets the default week start + # If assigned value is not a valid day symbol (e.g. :sunday, :monday, ...), an exception will be raised. + initializer "active_support.initialize_beginning_of_week" do |app| + require "active_support/core_ext/date/calculations" + beginning_of_week_default = Date.find_beginning_of_week!(app.config.beginning_of_week) + + Date.beginning_of_week_default = beginning_of_week_default + end + + initializer "active_support.require_master_key" do |app| + if app.config.respond_to?(:require_master_key) && app.config.require_master_key + begin + app.credentials.key + rescue ActiveSupport::EncryptedFile::MissingKeyError => error + $stderr.puts error.message + exit 1 + end + end + end + + initializer "active_support.set_configs" do |app| + app.config.active_support.each do |k, v| + k = "#{k}=" + ActiveSupport.send(k, v) if ActiveSupport.respond_to? k + end + end + + initializer "active_support.set_hash_digest_class" do |app| + config.after_initialize do + if app.config.active_support.use_sha1_digests + ActiveSupport::Digest.hash_digest_class = ::Digest::SHA1 + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb new file mode 100644 index 0000000..b26d9c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/reloader.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + #-- + # This class defines several callbacks: + # + # to_prepare -- Run once at application startup, and also from + # +to_run+. + # + # to_run -- Run before a work run that is reloading. If + # +reload_classes_only_on_change+ is true (the default), the class + # unload will have already occurred. + # + # to_complete -- Run after a work run that has reloaded. If + # +reload_classes_only_on_change+ is false, the class unload will + # have occurred after the work run, but before this callback. + # + # before_class_unload -- Run immediately before the classes are + # unloaded. + # + # after_class_unload -- Run immediately after the classes are + # unloaded. + # + class Reloader < ExecutionWrapper + define_callbacks :prepare + + define_callbacks :class_unload + + # Registers a callback that will run once at application startup and every time the code is reloaded. + def self.to_prepare(*args, &block) + set_callback(:prepare, *args, &block) + end + + # Registers a callback that will run immediately before the classes are unloaded. + def self.before_class_unload(*args, &block) + set_callback(:class_unload, *args, &block) + end + + # Registers a callback that will run immediately after the classes are unloaded. + def self.after_class_unload(*args, &block) + set_callback(:class_unload, :after, *args, &block) + end + + to_run(:after) { self.class.prepare! } + + # Initiate a manual reload + def self.reload! + executor.wrap do + new.tap do |instance| + begin + instance.run! + ensure + instance.complete! + end + end + end + prepare! + end + + def self.run! # :nodoc: + if check! + super + else + Null + end + end + + # Run the supplied block as a work unit, reloading code as needed + def self.wrap + executor.wrap do + super + end + end + + class_attribute :executor, default: Executor + class_attribute :check, default: lambda { false } + + def self.check! # :nodoc: + @should_reload ||= check.call + end + + def self.reloaded! # :nodoc: + @should_reload = false + end + + def self.prepare! # :nodoc: + new.run_callbacks(:prepare) + end + + def initialize + super + @locked = false + end + + # Acquire the ActiveSupport::Dependencies::Interlock unload lock, + # ensuring it will be released automatically + def require_unload_lock! + unless @locked + ActiveSupport::Dependencies.interlock.start_unloading + @locked = true + end + end + + # Release the unload lock if it has been previously obtained + def release_unload_lock! + if @locked + @locked = false + ActiveSupport::Dependencies.interlock.done_unloading + end + end + + def run! # :nodoc: + super + release_unload_lock! + end + + def class_unload!(&block) # :nodoc: + require_unload_lock! + run_callbacks(:class_unload, &block) + end + + def complete! # :nodoc: + super + self.class.reloaded! + ensure + release_unload_lock! + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb new file mode 100644 index 0000000..e0fa29c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/rescuable.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # Rescuable module adds support for easier exception handling. + module Rescuable + extend Concern + + included do + class_attribute :rescue_handlers, default: [] + end + + module ClassMethods + # Rescue exceptions raised in controller actions. + # + # rescue_from receives a series of exception classes or class + # names, and a trailing :with option with the name of a method + # or a Proc object to be called to handle them. Alternatively a block can + # be given. + # + # Handlers that take one argument will be called with the exception, so + # that the exception can be inspected when dealing with it. + # + # Handlers are inherited. They are searched from right to left, from + # bottom to top, and up the hierarchy. The handler of the first class for + # which exception.is_a?(klass) holds true is the one invoked, if + # any. + # + # class ApplicationController < ActionController::Base + # rescue_from User::NotAuthorized, with: :deny_access # self defined exception + # rescue_from ActiveRecord::RecordInvalid, with: :show_errors + # + # rescue_from 'MyAppError::Base' do |exception| + # render xml: exception, status: 500 + # end + # + # private + # def deny_access + # ... + # end + # + # def show_errors(exception) + # exception.record.new_record? ? ... + # end + # end + # + # Exceptions raised inside exception handlers are not propagated up. + def rescue_from(*klasses, with: nil, &block) + unless with + if block_given? + with = block + else + raise ArgumentError, "Need a handler. Pass the with: keyword argument or provide a block." + end + end + + klasses.each do |klass| + key = if klass.is_a?(Module) && klass.respond_to?(:===) + klass.name + elsif klass.is_a?(String) + klass + else + raise ArgumentError, "#{klass.inspect} must be an Exception class or a String referencing an Exception class" + end + + # Put the new handler at the end because the list is read in reverse. + self.rescue_handlers += [[key, with]] + end + end + + # Matches an exception to a handler based on the exception class. + # + # If no handler matches the exception, check for a handler matching the + # (optional) exception.cause. If no handler matches the exception or its + # cause, this returns +nil+, so you can deal with unhandled exceptions. + # Be sure to re-raise unhandled exceptions if this is what you expect. + # + # begin + # … + # rescue => exception + # rescue_with_handler(exception) || raise + # end + # + # Returns the exception if it was handled and +nil+ if it was not. + def rescue_with_handler(exception, object: self, visited_exceptions: []) + visited_exceptions << exception + + if handler = handler_for_rescue(exception, object: object) + handler.call exception + exception + elsif exception + if visited_exceptions.include?(exception.cause) + nil + else + rescue_with_handler(exception.cause, object: object, visited_exceptions: visited_exceptions) + end + end + end + + def handler_for_rescue(exception, object: self) #:nodoc: + case rescuer = find_rescue_handler(exception) + when Symbol + method = object.method(rescuer) + if method.arity == 0 + -> e { method.call } + else + method + end + when Proc + if rescuer.arity == 0 + -> e { object.instance_exec(&rescuer) } + else + -> e { object.instance_exec(e, &rescuer) } + end + end + end + + private + def find_rescue_handler(exception) + if exception + # Handlers are in order of declaration but the most recently declared + # is the highest priority match, so we search for matching handlers + # in reverse. + _, handler = rescue_handlers.reverse_each.detect do |class_or_name, _| + if klass = constantize_rescue_handler_class(class_or_name) + klass === exception + end + end + + handler + end + end + + def constantize_rescue_handler_class(class_or_name) + case class_or_name + when String, Symbol + begin + # Try a lexical lookup first since we support + # + # class Super + # rescue_from 'Error', with: … + # end + # + # class Sub + # class Error < StandardError; end + # end + # + # so an Error raised in Sub will hit the 'Error' handler. + const_get class_or_name + rescue NameError + class_or_name.safe_constantize + end + else + class_or_name + end + end + end + + # Delegates to the class method, but uses the instance as the subject for + # rescue_from handlers (method calls, instance_exec blocks). + def rescue_with_handler(exception) + self.class.rescue_with_handler exception, object: self + end + + # Internal handler lookup. Delegates to class method. Some libraries call + # this directly, so keeping it around for compatibility. + def handler_for_rescue(exception) #:nodoc: + self.class.handler_for_rescue exception, object: self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb new file mode 100644 index 0000000..20b6b9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/security_utils.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "digest/sha2" + +module ActiveSupport + module SecurityUtils + # Constant time string comparison, for fixed length strings. + # + # The values compared should be of fixed length, such as strings + # that have already been processed by HMAC. Raises in case of length mismatch. + def fixed_length_secure_compare(a, b) + raise ArgumentError, "string length mismatch." unless a.bytesize == b.bytesize + + l = a.unpack "C#{a.bytesize}" + + res = 0 + b.each_byte { |byte| res |= byte ^ l.shift } + res == 0 + end + module_function :fixed_length_secure_compare + + # Constant time string comparison, for variable length strings. + # + # The values are first processed by SHA256, so that we don't leak length info + # via timing attacks. + def secure_compare(a, b) + fixed_length_secure_compare(::Digest::SHA256.hexdigest(a), ::Digest::SHA256.hexdigest(b)) && a == b + end + module_function :secure_compare + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb new file mode 100644 index 0000000..a3af367 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/string_inquirer.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +module ActiveSupport + # Wrapping a string in this class gives you a prettier way to test + # for equality. The value returned by Rails.env is wrapped + # in a StringInquirer object, so instead of calling this: + # + # Rails.env == 'production' + # + # you can call this: + # + # Rails.env.production? + # + # == Instantiating a new StringInquirer + # + # vehicle = ActiveSupport::StringInquirer.new('car') + # vehicle.car? # => true + # vehicle.bike? # => false + class StringInquirer < String + private + + def respond_to_missing?(method_name, include_private = false) + (method_name[-1] == "?") || super + end + + def method_missing(method_name, *arguments) + if method_name[-1] == "?" + self == method_name[0..-2] + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb new file mode 100644 index 0000000..9291b48 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/subscriber.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/per_thread_registry" +require "active_support/notifications" + +module ActiveSupport + # ActiveSupport::Subscriber is an object set to consume + # ActiveSupport::Notifications. The subscriber dispatches notifications to + # a registered object based on its given namespace. + # + # An example would be an Active Record subscriber responsible for collecting + # statistics about queries: + # + # module ActiveRecord + # class StatsSubscriber < ActiveSupport::Subscriber + # attach_to :active_record + # + # def sql(event) + # Statsd.timing("sql.#{event.payload[:name]}", event.duration) + # end + # end + # end + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the +sql+ method. + class Subscriber + class << self + # Attach the subscriber to a namespace. + def attach_to(namespace, subscriber = new, notifier = ActiveSupport::Notifications) + @namespace = namespace + @subscriber = subscriber + @notifier = notifier + + subscribers << subscriber + + # Add event subscribers for all existing methods on the class. + subscriber.public_methods(false).each do |event| + add_event_subscriber(event) + end + end + + # Adds event subscribers for all new methods added to the class. + def method_added(event) + # Only public methods are added as subscribers, and only if a notifier + # has been set up. This means that subscribers will only be set up for + # classes that call #attach_to. + if public_method_defined?(event) && notifier + add_event_subscriber(event) + end + end + + def subscribers + @@subscribers ||= [] + end + + # TODO Change this to private once we've dropped Ruby 2.2 support. + # Workaround for Ruby 2.2 "private attribute?" warning. + protected + + attr_reader :subscriber, :notifier, :namespace + + private + + def add_event_subscriber(event) # :doc: + return if %w{ start finish }.include?(event.to_s) + + pattern = "#{event}.#{namespace}" + + # Don't add multiple subscribers (eg. if methods are redefined). + return if subscriber.patterns.include?(pattern) + + subscriber.patterns << pattern + notifier.subscribe(pattern, subscriber) + end + end + + attr_reader :patterns # :nodoc: + + def initialize + @queue_key = [self.class.name, object_id].join "-" + @patterns = [] + super + end + + def start(name, id, payload) + e = ActiveSupport::Notifications::Event.new(name, now, nil, id, payload) + parent = event_stack.last + parent << e if parent + + event_stack.push e + end + + def finish(name, id, payload) + finished = now + event = event_stack.pop + event.end = finished + event.payload.merge!(payload) + + method = name.split(".".freeze).first + send(method, event) + end + + private + + def event_stack + SubscriberQueueRegistry.instance.get_queue(@queue_key) + end + + def now + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + end + + # This is a registry for all the event stacks kept for subscribers. + # + # See the documentation of ActiveSupport::PerThreadRegistry + # for further details. + class SubscriberQueueRegistry # :nodoc: + extend PerThreadRegistry + + def initialize + @registry = {} + end + + def get_queue(queue_key) + @registry[queue_key] ||= [] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb new file mode 100644 index 0000000..8561cba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/tagged_logging.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/object/blank" +require "logger" +require "active_support/logger" + +module ActiveSupport + # Wraps any standard Logger object to provide tagging capabilities. + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged('BCX') { logger.info 'Stuff' } # Logs "[BCX] Stuff" + # logger.tagged('BCX', "Jason") { logger.info 'Stuff' } # Logs "[BCX] [Jason] Stuff" + # logger.tagged('BCX') { logger.tagged('Jason') { logger.info 'Stuff' } } # Logs "[BCX] [Jason] Stuff" + # + # This is used by the default Rails.logger as configured by Railties to make + # it easy to stamp log lines with subdomains, request ids, and anything else + # to aid debugging of multi-user production applications. + module TaggedLogging + module Formatter # :nodoc: + # This method is invoked when a log event occurs. + def call(severity, timestamp, progname, msg) + super(severity, timestamp, progname, "#{tags_text}#{msg}") + end + + def tagged(*tags) + new_tags = push_tags(*tags) + yield self + ensure + pop_tags(new_tags.size) + end + + def push_tags(*tags) + tags.flatten.reject(&:blank?).tap do |new_tags| + current_tags.concat new_tags + end + end + + def pop_tags(size = 1) + current_tags.pop size + end + + def clear_tags! + current_tags.clear + end + + def current_tags + # We use our object ID here to avoid conflicting with other instances + thread_key = @thread_key ||= "activesupport_tagged_logging_tags:#{object_id}".freeze + Thread.current[thread_key] ||= [] + end + + def tags_text + tags = current_tags + if tags.any? + tags.collect { |tag| "[#{tag}] " }.join + end + end + end + + def self.new(logger) + # Ensure we set a default formatter so we aren't extending nil! + logger.formatter ||= ActiveSupport::Logger::SimpleFormatter.new + logger.formatter.extend Formatter + logger.extend(self) + end + + delegate :push_tags, :pop_tags, :clear_tags!, to: :formatter + + def tagged(*tags) + formatter.tagged(*tags) { yield self } + end + + def flush + clear_tags! + super if defined?(super) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb new file mode 100644 index 0000000..69d69c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/test_case.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +gem "minitest" # make sure we get the gem, not stdlib +require "minitest" +require "active_support/testing/tagged_logging" +require "active_support/testing/setup_and_teardown" +require "active_support/testing/assertions" +require "active_support/testing/deprecation" +require "active_support/testing/declarative" +require "active_support/testing/isolation" +require "active_support/testing/constant_lookup" +require "active_support/testing/time_helpers" +require "active_support/testing/file_fixtures" + +module ActiveSupport + class TestCase < ::Minitest::Test + Assertion = Minitest::Assertion + + class << self + # Sets the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order = :random # => :random + # + # Valid values are: + # * +:random+ (to run tests in random order) + # * +:parallel+ (to run tests in parallel) + # * +:sorted+ (to run tests alphabetically by method name) + # * +:alpha+ (equivalent to +:sorted+) + def test_order=(new_order) + ActiveSupport.test_order = new_order + end + + # Returns the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order # => :random + # + # Possible values are +:random+, +:parallel+, +:alpha+, +:sorted+. + # Defaults to +:random+. + def test_order + ActiveSupport.test_order ||= :random + end + end + + alias_method :method_name, :name + + include ActiveSupport::Testing::TaggedLogging + prepend ActiveSupport::Testing::SetupAndTeardown + include ActiveSupport::Testing::Assertions + include ActiveSupport::Testing::Deprecation + include ActiveSupport::Testing::TimeHelpers + include ActiveSupport::Testing::FileFixtures + extend ActiveSupport::Testing::Declarative + + # test/unit backwards compatibility methods + alias :assert_raise :assert_raises + alias :assert_not_empty :refute_empty + alias :assert_not_equal :refute_equal + alias :assert_not_in_delta :refute_in_delta + alias :assert_not_in_epsilon :refute_in_epsilon + alias :assert_not_includes :refute_includes + alias :assert_not_instance_of :refute_instance_of + alias :assert_not_kind_of :refute_kind_of + alias :assert_no_match :refute_match + alias :assert_not_nil :refute_nil + alias :assert_not_operator :refute_operator + alias :assert_not_predicate :refute_predicate + alias :assert_not_respond_to :refute_respond_to + alias :assert_not_same :refute_same + + ActiveSupport.run_load_hooks(:active_support_test_case, self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb new file mode 100644 index 0000000..a891ff6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/assertions.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Assertions + UNTRACKED = Object.new # :nodoc: + + # Asserts that an expression is not truthy. Passes if object is + # +nil+ or +false+. "Truthy" means "considered true in a conditional" + # like if foo. + # + # assert_not nil # => true + # assert_not false # => true + # assert_not 'foo' # => Expected "foo" to be nil or false + # + # An error message can be specified. + # + # assert_not foo, 'foo should be false' + def assert_not(object, message = nil) + message ||= "Expected #{mu_pp(object)} to be nil or false" + assert !object, message + end + + # Assertion that the block should not raise an exception. + # + # Passes if evaluated code in the yielded block raises no exception. + # + # assert_nothing_raised do + # perform_service(param: 'no_exception') + # end + def assert_nothing_raised + yield + end + + # Test numeric difference between the return value of an expression as a + # result of what is evaluated in the yielded block. + # + # assert_difference 'Article.count' do + # post :create, params: { article: {...} } + # end + # + # An arbitrary expression is passed in and evaluated. + # + # assert_difference 'Article.last.comments(:reload).size' do + # post :create, params: { comment: {...} } + # end + # + # An arbitrary positive or negative difference can be specified. + # The default is 1. + # + # assert_difference 'Article.count', -1 do + # post :delete, params: { id: ... } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_difference [ 'Article.count', 'Post.count' ], 2 do + # post :create, params: { article: {...} } + # end + # + # A hash of expressions/numeric differences can also be passed in and evaluated. + # + # assert_difference ->{ Article.count } => 1, ->{ Notification.count } => 2 do + # post :create, params: { article: {...} } + # end + # + # A lambda or a list of lambdas can be passed in and evaluated: + # + # assert_difference ->{ Article.count }, 2 do + # post :create, params: { article: {...} } + # end + # + # assert_difference [->{ Article.count }, ->{ Post.count }], 2 do + # post :create, params: { article: {...} } + # end + # + # An error message can be specified. + # + # assert_difference 'Article.count', -1, 'An Article should be destroyed' do + # post :delete, params: { id: ... } + # end + def assert_difference(expression, *args, &block) + expressions = + if expression.is_a?(Hash) + message = args[0] + expression + else + difference = args[0] || 1 + message = args[1] + Hash[Array(expression).map { |e| [e, difference] }] + end + + exps = expressions.keys.map { |e| + e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } + } + before = exps.map(&:call) + + retval = yield + + expressions.zip(exps, before) do |(code, diff), exp, before_value| + error = "#{code.inspect} didn't change by #{diff}" + error = "#{message}.\n#{error}" if message + assert_equal(before_value + diff, exp.call, error) + end + + retval + end + + # Assertion that the numeric result of evaluating an expression is not + # changed before and after invoking the passed in block. + # + # assert_no_difference 'Article.count' do + # post :create, params: { article: invalid_attributes } + # end + # + # An error message can be specified. + # + # assert_no_difference 'Article.count', 'An Article should not be created' do + # post :create, params: { article: invalid_attributes } + # end + def assert_no_difference(expression, message = nil, &block) + assert_difference expression, 0, message, &block + end + + # Assertion that the result of evaluating an expression is changed before + # and after invoking the passed in block. + # + # assert_changes 'Status.all_good?' do + # post :create, params: { status: { ok: false } } + # end + # + # You can pass the block as a string to be evaluated in the context of + # the block. A lambda can be passed for the block as well. + # + # assert_changes -> { Status.all_good? } do + # post :create, params: { status: { ok: false } } + # end + # + # The assertion is useful to test side effects. The passed block can be + # anything that can be converted to string with #to_s. + # + # assert_changes :@object do + # @object = 42 + # end + # + # The keyword arguments :from and :to can be given to specify the + # expected initial value and the expected value after the block was + # executed. + # + # assert_changes :@object, from: nil, to: :foo do + # @object = :foo + # end + # + # An error message can be specified. + # + # assert_changes -> { Status.all_good? }, 'Expected the status to be bad' do + # post :create, params: { status: { incident: true } } + # end + def assert_changes(expression, message = nil, from: UNTRACKED, to: UNTRACKED, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = yield + + unless from == UNTRACKED + error = "#{expression.inspect} isn't #{from.inspect}" + error = "#{message}.\n#{error}" if message + assert from === before, error + end + + after = exp.call + + error = "#{expression.inspect} didn't change" + error = "#{error}. It was already #{to}" if before == to + error = "#{message}.\n#{error}" if message + assert before != after, error + + unless to == UNTRACKED + error = "#{expression.inspect} didn't change to #{to}" + error = "#{message}.\n#{error}" if message + assert to === after, error + end + + retval + end + + # Assertion that the result of evaluating an expression is not changed before + # and after invoking the passed in block. + # + # assert_no_changes 'Status.all_good?' do + # post :create, params: { status: { ok: true } } + # end + # + # An error message can be specified. + # + # assert_no_changes -> { Status.all_good? }, 'Expected the status to be good' do + # post :create, params: { status: { ok: false } } + # end + def assert_no_changes(expression, message = nil, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = yield + after = exp.call + + error = "#{expression.inspect} did change to #{after}" + error = "#{message}.\n#{error}" if message + assert before == after, error + + retval + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb new file mode 100644 index 0000000..889b416 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/autorun.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +gem "minitest" + +require "minitest" + +Minitest.autorun diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb new file mode 100644 index 0000000..51167e9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/constant_lookup.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/inflector" + +module ActiveSupport + module Testing + # Resolves a constant from a minitest spec name. + # + # Given the following spec-style test: + # + # describe WidgetsController, :index do + # describe "authenticated user" do + # describe "returns widgets" do + # it "has a controller that exists" do + # assert_kind_of WidgetsController, @controller + # end + # end + # end + # end + # + # The test will have the following name: + # + # "WidgetsController::index::authenticated user::returns widgets" + # + # The constant WidgetsController can be resolved from the name. + # The following code will resolve the constant: + # + # controller = determine_constant_from_test_name(name) do |constant| + # Class === constant && constant < ::ActionController::Metal + # end + module ConstantLookup + extend ::ActiveSupport::Concern + + module ClassMethods # :nodoc: + def determine_constant_from_test_name(test_name) + names = test_name.split "::" + while names.size > 0 do + names.last.sub!(/Test$/, "") + begin + constant = names.join("::").safe_constantize + break(constant) if yield(constant) + ensure + names.pop + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb new file mode 100644 index 0000000..7c34036 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/declarative.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Declarative + unless defined?(Spec) + # Helper to define a test method using a String. Under the hood, it replaces + # spaces with underscores and defines the test method. + # + # test "verify something" do + # ... + # end + def test(name, &block) + test_name = "test_#{name.gsub(/\s+/, '_')}".to_sym + defined = method_defined? test_name + raise "#{test_name} is already defined in #{self}" if defined + if block_given? + define_method(test_name, &block) + else + define_method(test_name) do + flunk "No implementation provided for #{name}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb new file mode 100644 index 0000000..f655435 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/deprecation.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require "active_support/deprecation" +require "active_support/core_ext/regexp" + +module ActiveSupport + module Testing + module Deprecation #:nodoc: + def assert_deprecated(match = nil, deprecator = nil, &block) + result, warnings = collect_deprecations(deprecator, &block) + assert !warnings.empty?, "Expected a deprecation warning within the block but received none" + if match + match = Regexp.new(Regexp.escape(match)) unless match.is_a?(Regexp) + assert warnings.any? { |w| match.match?(w) }, "No deprecation warning matched #{match}: #{warnings.join(', ')}" + end + result + end + + def assert_not_deprecated(deprecator = nil, &block) + result, deprecations = collect_deprecations(deprecator, &block) + assert deprecations.empty?, "Expected no deprecation warning within the block but received #{deprecations.size}: \n #{deprecations * "\n "}" + result + end + + def collect_deprecations(deprecator = nil) + deprecator ||= ActiveSupport::Deprecation + old_behavior = deprecator.behavior + deprecations = [] + deprecator.behavior = Proc.new do |message, callstack| + deprecations << message + end + result = yield + [result, deprecations] + ensure + deprecator.behavior = old_behavior + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb new file mode 100644 index 0000000..ad923d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/file_fixtures.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Adds simple access to sample files called file fixtures. + # File fixtures are normal files stored in + # ActiveSupport::TestCase.file_fixture_path. + # + # File fixtures are represented as +Pathname+ objects. + # This makes it easy to extract specific information: + # + # file_fixture("example.txt").read # get the file's content + # file_fixture("example.mp3").size # get the file size + module FileFixtures + extend ActiveSupport::Concern + + included do + class_attribute :file_fixture_path, instance_writer: false + end + + # Returns a +Pathname+ to the fixture file named +fixture_name+. + # + # Raises +ArgumentError+ if +fixture_name+ can't be found. + def file_fixture(fixture_name) + path = Pathname.new(File.join(file_fixture_path, fixture_name)) + + if path.exist? + path + else + msg = "the directory '%s' does not contain a file named '%s'" + raise ArgumentError, msg % [file_fixture_path, fixture_name] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb new file mode 100644 index 0000000..562f985 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/isolation.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Isolation + require "thread" + + def self.included(klass) #:nodoc: + klass.class_eval do + parallelize_me! + end + end + + def self.forking_env? + !ENV["NO_FORK"] && Process.respond_to?(:fork) + end + + def run + serialized = run_in_isolation do + super + end + + Marshal.load(serialized) + end + + module Forking + def run_in_isolation(&blk) + read, write = IO.pipe + read.binmode + write.binmode + + pid = fork do + read.close + yield + begin + if error? + failures.map! { |e| + begin + Marshal.dump e + e + rescue TypeError + ex = Exception.new e.message + ex.set_backtrace e.backtrace + Minitest::UnexpectedError.new ex + end + } + end + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + result = Marshal.dump(test_result) + end + + write.puts [result].pack("m") + exit! + end + + write.close + result = read.read + Process.wait2(pid) + result.unpack("m")[0] + end + end + + module Subprocess + ORIG_ARGV = ARGV.dup unless defined?(ORIG_ARGV) + + # Crazy H4X to get this working in windows / jruby with + # no forking. + def run_in_isolation(&blk) + require "tempfile" + + if ENV["ISOLATION_TEST"] + yield + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + File.open(ENV["ISOLATION_OUTPUT"], "w") do |file| + file.puts [Marshal.dump(test_result)].pack("m") + end + exit! + else + Tempfile.open("isolation") do |tmpfile| + env = { + "ISOLATION_TEST" => self.class.name, + "ISOLATION_OUTPUT" => tmpfile.path + } + + test_opts = "-n#{self.class.name}##{name}" + + load_path_args = [] + $-I.each do |p| + load_path_args << "-I" + load_path_args << File.expand_path(p) + end + + child = IO.popen([env, Gem.ruby, *load_path_args, $0, *ORIG_ARGV, test_opts]) + + begin + Process.wait(child.pid) + rescue Errno::ECHILD # The child process may exit before we wait + nil + end + + return tmpfile.read.unpack("m")[0] + end + end + end + end + + include forking_env? ? Forking : Subprocess + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb new file mode 100644 index 0000000..c635800 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/method_call_assertions.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +require "minitest/mock" + +module ActiveSupport + module Testing + module MethodCallAssertions # :nodoc: + private + def assert_called(object, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + + object.stub(method_name, proc { times_called += 1; returns }) { yield } + + error = "Expected #{method_name} to be called #{times} times, " \ + "but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + assert_equal times, times_called, error + end + + def assert_called_with(object, method_name, args = [], returns: nil) + mock = Minitest::Mock.new + + if args.all? { |arg| arg.is_a?(Array) } + args.each { |arg| mock.expect(:call, returns, arg) } + else + mock.expect(:call, returns, args) + end + + object.stub(method_name, mock) { yield } + + mock.verify + end + + def assert_not_called(object, method_name, message = nil, &block) + assert_called(object, method_name, message, times: 0, &block) + end + + def stub_any_instance(klass, instance: klass.new) + klass.stub(:new, instance) { yield instance } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb new file mode 100644 index 0000000..35321cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/setup_and_teardown.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + module Testing + # Adds support for +setup+ and +teardown+ callbacks. + # These callbacks serve as a replacement to overwriting the + # #setup and #teardown methods of your TestCase. + # + # class ExampleTest < ActiveSupport::TestCase + # setup do + # # ... + # end + # + # teardown do + # # ... + # end + # end + module SetupAndTeardown + def self.prepended(klass) + klass.include ActiveSupport::Callbacks + klass.define_callbacks :setup, :teardown + klass.extend ClassMethods + end + + module ClassMethods + # Add a callback, which runs before TestCase#setup. + def setup(*args, &block) + set_callback(:setup, :before, *args, &block) + end + + # Add a callback, which runs after TestCase#teardown. + def teardown(*args, &block) + set_callback(:teardown, :after, *args, &block) + end + end + + def before_setup # :nodoc: + super + run_callbacks :setup + end + + def after_teardown # :nodoc: + begin + run_callbacks :teardown + rescue => e + self.failures << Minitest::UnexpectedError.new(e) + end + + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb new file mode 100644 index 0000000..d070a17 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/stream.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Stream #:nodoc: + private + + def silence_stream(stream) + old_stream = stream.dup + stream.reopen(IO::NULL) + stream.sync = true + yield + ensure + stream.reopen(old_stream) + old_stream.close + end + + def quietly + silence_stream(STDOUT) do + silence_stream(STDERR) do + yield + end + end + end + + def capture(stream) + stream = stream.to_s + captured_stream = Tempfile.new(stream) + stream_io = eval("$#{stream}") + origin_stream = stream_io.dup + stream_io.reopen(captured_stream) + + yield + + stream_io.rewind + return captured_stream.read + ensure + captured_stream.close + captured_stream.unlink + stream_io.reopen(origin_stream) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb new file mode 100644 index 0000000..9ca50c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/tagged_logging.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Logs a "PostsControllerTest: test name" heading before each test to + # make test.log easier to search and follow along with. + module TaggedLogging #:nodoc: + attr_writer :tagged_logger + + def before_setup + if tagged_logger && tagged_logger.info? + heading = "#{self.class}: #{name}" + divider = "-" * heading.size + tagged_logger.info divider + tagged_logger.info heading + tagged_logger.info divider + end + super + end + + private + def tagged_logger + @tagged_logger ||= (defined?(Rails.logger) && Rails.logger) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb new file mode 100644 index 0000000..998a51a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/testing/time_helpers.rb @@ -0,0 +1,200 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/string/strip" # for strip_heredoc +require "active_support/core_ext/time/calculations" +require "concurrent/map" + +module ActiveSupport + module Testing + class SimpleStubs # :nodoc: + Stub = Struct.new(:object, :method_name, :original_method) + + def initialize + @stubs = Concurrent::Map.new { |h, k| h[k] = {} } + end + + def stub_object(object, method_name, &block) + if stub = stubbing(object, method_name) + unstub_object(stub) + end + + new_name = "__simple_stub__#{method_name}" + + @stubs[object.object_id][method_name] = Stub.new(object, method_name, new_name) + + object.singleton_class.send :alias_method, new_name, method_name + object.define_singleton_method(method_name, &block) + end + + def unstub_all! + @stubs.each_value do |object_stubs| + object_stubs.each_value do |stub| + unstub_object(stub) + end + end + @stubs.clear + end + + def stubbing(object, method_name) + @stubs[object.object_id][method_name] + end + + private + + def unstub_object(stub) + singleton_class = stub.object.singleton_class + singleton_class.send :silence_redefinition_of_method, stub.method_name + singleton_class.send :alias_method, stub.method_name, stub.original_method + singleton_class.send :undef_method, stub.original_method + end + end + + # Contains helpers that help you test passage of time. + module TimeHelpers + def after_teardown + travel_back + super + end + + # Changes current time to the time in the future or in the past by a given time difference by + # stubbing +Time.now+, +Date.today+, and +DateTime.now+. The stubs are automatically removed + # at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day + # Time.current # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # Date.current # => Sun, 10 Nov 2013 + # DateTime.current # => Sun, 10 Nov 2013 15:34:49 -0500 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day do + # User.create.created_at # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel(duration, &block) + travel_to Time.now + duration, &block + end + + # Changes current time to the given time by stubbing +Time.now+, + # +Date.today+, and +DateTime.now+ to return the time or date passed into this method. + # The stubs are automatically removed at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # Date.current # => Wed, 24 Nov 2004 + # DateTime.current # => Wed, 24 Nov 2004 01:04:44 -0500 + # + # Dates are taken as their timestamp at the beginning of the day in the + # application time zone. Time.current returns said timestamp, + # and Time.now its equivalent in the system time zone. Similarly, + # Date.current returns a date equal to the argument, and + # Date.today the date according to Time.now, which may + # be different. (Note that you rarely want to deal with Time.now, + # or Date.today, in order to honor the application time zone + # please always use Time.current and Date.current.) + # + # Note that the usec for the time passed will be set to 0 to prevent rounding + # errors with external services, like MySQL (which will round instead of floor, + # leading to off-by-one-second errors). + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) do + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_to(date_or_time) + if block_given? && simple_stubs.stubbing(Time, :now) + travel_to_nested_block_call = <<-MSG.strip_heredoc + + Calling `travel_to` with a block, when we have previously already made a call to `travel_to`, can lead to confusing time stubbing. + + Instead of: + + travel_to 2.days.from_now do + # 2 days from today + travel_to 3.days.from_now do + # 5 days from today + end + end + + preferred way to achieve above is: + + travel 2.days do + # 2 days from today + end + + travel 5.days do + # 5 days from today + end + + MSG + raise travel_to_nested_block_call + end + + if date_or_time.is_a?(Date) && !date_or_time.is_a?(DateTime) + now = date_or_time.midnight.to_time + else + now = date_or_time.to_time.change(usec: 0) + end + + simple_stubs.stub_object(Time, :now) { at(now.to_i) } + simple_stubs.stub_object(Date, :today) { jd(now.to_date.jd) } + simple_stubs.stub_object(DateTime, :now) { jd(now.to_date.jd, now.hour, now.min, now.sec, Rational(now.utc_offset, 86400)) } + + if block_given? + begin + yield + ensure + travel_back + end + end + end + + # Returns the current time back to its original state, by removing the stubs added by + # +travel+ and +travel_to+. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 01, 04, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # travel_back + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_back + simple_stubs.unstub_all! + end + + # Calls +travel_to+ with +Time.now+. + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time + # sleep(1) + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time do + # sleep(1) + # User.create.created_at # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # end + # Time.current # => Sun, 09 Jul 2017 15:34:50 EST -05:00 + def freeze_time(&block) + travel_to Time.now, &block + end + + private + + def simple_stubs + @simple_stubs ||= SimpleStubs.new + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb new file mode 100644 index 0000000..5185467 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + autoload :Duration, "active_support/duration" + autoload :TimeWithZone, "active_support/time_with_zone" + autoload :TimeZone, "active_support/values/time_zone" +end + +require "date" +require "time" + +require "active_support/core_ext/time" +require "active_support/core_ext/date" +require "active_support/core_ext/date_time" + +require "active_support/core_ext/integer/time" +require "active_support/core_ext/numeric/time" + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb new file mode 100644 index 0000000..20650ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/time_with_zone.rb @@ -0,0 +1,551 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/values/time_zone" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + # A Time-like class that can represent a time in any time zone. Necessary + # because standard Ruby Time instances are limited to UTC and the + # system's ENV['TZ'] zone. + # + # You shouldn't ever need to create a TimeWithZone instance directly via +new+. + # Instead use methods +local+, +parse+, +at+ and +now+ on TimeZone instances, + # and +in_time_zone+ on Time and DateTime instances. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.local(2007, 2, 10, 15, 30, 45) # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.parse('2007-02-10 15:30:45') # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.at(1171139445) # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # Time.zone.now # => Sun, 18 May 2008 13:07:55 EDT -04:00 + # Time.utc(2007, 2, 10, 20, 30, 45).in_time_zone # => Sat, 10 Feb 2007 15:30:45 EST -05:00 + # + # See Time and TimeZone for further documentation of these methods. + # + # TimeWithZone instances implement the same API as Ruby Time instances, so + # that Time and TimeWithZone instances are interchangeable. + # + # t = Time.zone.now # => Sun, 18 May 2008 13:27:25 EDT -04:00 + # t.hour # => 13 + # t.dst? # => true + # t.utc_offset # => -14400 + # t.zone # => "EDT" + # t.to_s(:rfc822) # => "Sun, 18 May 2008 13:27:25 -0400" + # t + 1.day # => Mon, 19 May 2008 13:27:25 EDT -04:00 + # t.beginning_of_year # => Tue, 01 Jan 2008 00:00:00 EST -05:00 + # t > Time.utc(1999) # => true + # t.is_a?(Time) # => true + # t.is_a?(ActiveSupport::TimeWithZone) # => true + class TimeWithZone + # Report class name as 'Time' to thwart type checking. + def self.name + "Time" + end + + PRECISIONS = Hash.new { |h, n| h[n] = "%FT%T.%#{n}N".freeze } + PRECISIONS[0] = "%FT%T".freeze + + include Comparable, DateAndTime::Compatibility + attr_reader :time_zone + + def initialize(utc_time, time_zone, local_time = nil, period = nil) + @utc = utc_time ? transfer_time_values_to_utc_constructor(utc_time) : nil + @time_zone, @time = time_zone, local_time + @period = @utc ? period : get_period_and_ensure_valid_local_time(period) + end + + # Returns a Time instance that represents the time in +time_zone+. + def time + @time ||= period.to_local(@utc) + end + + # Returns a Time instance of the simultaneous time in the UTC timezone. + def utc + @utc ||= period.to_utc(@time) + end + alias_method :comparable_time, :utc + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns the underlying TZInfo::TimezonePeriod. + def period + @period ||= time_zone.period_for_utc(@utc) + end + + # Returns the simultaneous time in Time.zone, or the specified zone. + def in_time_zone(new_zone = ::Time.zone) + return self if time_zone == new_zone + utc.in_time_zone(new_zone) + end + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc.getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns true if the current time is within Daylight Savings Time for the + # specified time zone. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.parse("2012-5-30").dst? # => true + # Time.zone.parse("2012-11-30").dst? # => false + def dst? + period.dst? + end + alias_method :isdst, :dst? + + # Returns true if the current time zone is set to UTC. + # + # Time.zone = 'UTC' # => 'UTC' + # Time.zone.now.utc? # => true + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.now.utc? # => false + def utc? + period.offset.abbreviation == :UTC || period.offset.abbreviation == :UCT + end + alias_method :gmt?, :utc? + + # Returns the offset from current time to UTC time in seconds. + def utc_offset + period.utc_total_offset + end + alias_method :gmt_offset, :utc_offset + alias_method :gmtoff, :utc_offset + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.formatted_offset(true) # => "-05:00" + # Time.zone.now.formatted_offset(false) # => "-0500" + # Time.zone = 'UTC' # => "UTC" + # Time.zone.now.formatted_offset(true, "0") # => "0" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Returns the time zone abbreviation. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.zone # => "EST" + def zone + period.zone_identifier.to_s + end + + # Returns a string of the object's date, time, zone, and offset from UTC. + # + # Time.zone.now.inspect # => "Thu, 04 Dec 2014 11:00:25 EST -05:00" + def inspect + "#{time.strftime('%a, %d %b %Y %H:%M:%S')} #{zone} #{formatted_offset}" + end + + # Returns a string of the object's date and time in the ISO 8601 standard + # format. + # + # Time.zone.now.xmlschema # => "2014-12-04T11:02:37-05:00" + def xmlschema(fraction_digits = 0) + "#{time.strftime(PRECISIONS[fraction_digits.to_i])}#{formatted_offset(true, 'Z'.freeze)}" + end + alias_method :iso8601, :xmlschema + alias_method :rfc3339, :xmlschema + + # Coerces time to a string for JSON encoding. The default format is ISO 8601. + # You can get %Y/%m/%d %H:%M:%S +offset style by setting + # ActiveSupport::JSON::Encoding.use_standard_json_time_format + # to +false+. + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = true + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005-02-01T05:15:10.000-10:00" + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = false + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005/02/01 05:15:10 -1000" + def as_json(options = nil) + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{time.strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end + + def init_with(coder) #:nodoc: + initialize(coder["utc"], coder["zone"], coder["time"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:ActiveSupport::TimeWithZone" + coder.map = { "utc" => utc, "zone" => time_zone, "time" => time } + end + + # Returns a string of the object's date and time in the format used by + # HTTP requests. + # + # Time.zone.now.httpdate # => "Tue, 01 Jan 2013 04:39:43 GMT" + def httpdate + utc.httpdate + end + + # Returns a string of the object's date and time in the RFC 2822 standard + # format. + # + # Time.zone.now.rfc2822 # => "Tue, 01 Jan 2013 04:51:39 +0000" + def rfc2822 + to_s(:rfc822) + end + alias_method :rfc822, :rfc2822 + + # Returns a string of the object's date and time. + # Accepts an optional format: + # * :default - default value, mimics Ruby Time#to_s format. + # * :db - format outputs time in UTC :db time. See Time#to_formatted_s(:db). + # * Any key in Time::DATE_FORMATS can be used. See active_support/core_ext/time/conversions.rb. + def to_s(format = :default) + if format == :db + utc.to_s(format) + elsif formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" # mimicking Ruby Time#to_s format + end + end + alias_method :to_formatted_s, :to_s + + # Replaces %Z directive with +zone before passing to Time#strftime, + # so that zone information is correct. + def strftime(format) + format = format.gsub(/((?:\A|[^%])(?:%%)*)%Z/, "\\1#{zone}") + getlocal(utc_offset).strftime(format) + end + + # Use the time in UTC for comparisons. + def <=>(other) + utc <=> other + end + + # Returns true if the current object's time is within the specified + # +min+ and +max+ time. + def between?(min, max) + utc.between?(min, max) + end + + # Returns true if the current object's time is in the past. + def past? + utc.past? + end + + # Returns true if the current object's time falls within + # the current day. + def today? + time.today? + end + + # Returns true if the current object's time is in the future. + def future? + utc.future? + end + + # Returns +true+ if +other+ is equal to current object. + def eql?(other) + other.eql?(utc) + end + + def hash + utc.hash + end + + # Adds an interval of time to the current object's time and returns that + # value as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now + 1000 # => Sun, 02 Nov 2014 01:43:08 EDT -04:00 + # + # If we're adding a Duration of variable length (i.e., years, months, days), + # move forward from #time, otherwise move forward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time + 24.hours will advance exactly 24 hours, while a + # time + 1.day will advance 23-25 hours, depending on the day. + # + # now + 24.hours # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now + 1.day # => Mon, 03 Nov 2014 01:26:28 EST -05:00 + def +(other) + if duration_of_variable_length?(other) + method_missing(:+, other) + else + result = utc.acts_like?(:date) ? utc.since(other) : utc + other rescue utc.since(other) + result.in_time_zone(time_zone) + end + end + alias_method :since, :+ + alias_method :in, :+ + + # Returns a new TimeWithZone object that represents the difference between + # the current object's time and the +other+ time. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now - 1000 # => Mon, 03 Nov 2014 00:09:48 EST -05:00 + # + # If subtracting a Duration of variable length (i.e., years, months, days), + # move backward from #time, otherwise move backward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time - 24.hours will go subtract exactly 24 hours, while a + # time - 1.day will subtract 23-25 hours, depending on the day. + # + # now - 24.hours # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now - 1.day # => Sun, 02 Nov 2014 00:26:28 EDT -04:00 + def -(other) + if other.acts_like?(:time) + to_time - other.to_time + elsif duration_of_variable_length?(other) + method_missing(:-, other) + else + result = utc.acts_like?(:date) ? utc.ago(other) : utc - other rescue utc.ago(other) + result.in_time_zone(time_zone) + end + end + + # Subtracts an interval of time from the current object's time and returns + # the result as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28 EST -05:00 + # now.ago(1000) # => Mon, 03 Nov 2014 00:09:48 EST -05:00 + # + # If we're subtracting a Duration of variable length (i.e., years, months, + # days), move backward from #time, otherwise move backward from #utc, for + # accuracy when moving across DST boundaries. + # + # For instance, time.ago(24.hours) will move back exactly 24 hours, + # while time.ago(1.day) will move back 23-25 hours, depending on + # the day. + # + # now.ago(24.hours) # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now.ago(1.day) # => Sun, 02 Nov 2014 00:26:28 EDT -04:00 + def ago(other) + since(-other) + end + + # Returns a new +ActiveSupport::TimeWithZone+ where one or more of the elements have + # been changed according to the +options+ parameter. The time options (:hour, + # :min, :sec, :usec, :nsec) reset cascadingly, + # so if only the hour is passed, then minute, sec, usec and nsec is set to 0. If the + # hour and minute is passed, then sec, usec and nsec is set to 0. The +options+ + # parameter takes a hash with any of these keys: :year, :month, + # :day, :hour, :min, :sec, :usec, + # :nsec, :offset, :zone. Pass either :usec + # or :nsec, not both. Similarly, pass either :zone or + # :offset, not both. + # + # t = Time.zone.now # => Fri, 14 Apr 2017 11:45:15 EST -05:00 + # t.change(year: 2020) # => Tue, 14 Apr 2020 11:45:15 EST -05:00 + # t.change(hour: 12) # => Fri, 14 Apr 2017 12:00:00 EST -05:00 + # t.change(min: 30) # => Fri, 14 Apr 2017 11:30:00 EST -05:00 + # t.change(offset: "-10:00") # => Fri, 14 Apr 2017 11:45:15 HST -10:00 + # t.change(zone: "Hawaii") # => Fri, 14 Apr 2017 11:45:15 HST -10:00 + def change(options) + if options[:zone] && options[:offset] + raise ArgumentError, "Can't change both :offset and :zone at the same time: #{options.inspect}" + end + + new_time = time.change(options) + + if options[:zone] + new_zone = ::Time.find_zone(options[:zone]) + elsif options[:offset] + new_zone = ::Time.find_zone(new_time.utc_offset) + end + + new_zone ||= time_zone + periods = new_zone.periods_for_local(new_time) + + self.class.new(nil, new_zone, new_time, periods.include?(period) ? period : nil) + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The result is returned as a + # new TimeWithZone object. + # + # The +options+ parameter takes a hash with any of these keys: + # :years, :months, :weeks, :days, + # :hours, :minutes, :seconds. + # + # If advancing by a value of variable length (i.e., years, weeks, months, + # days), move forward from #time, otherwise move forward from #utc, for + # accuracy when moving across DST boundaries. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28 EDT -04:00 + # now.advance(seconds: 1) # => Sun, 02 Nov 2014 01:26:29 EDT -04:00 + # now.advance(minutes: 1) # => Sun, 02 Nov 2014 01:27:28 EDT -04:00 + # now.advance(hours: 1) # => Sun, 02 Nov 2014 01:26:28 EST -05:00 + # now.advance(days: 1) # => Mon, 03 Nov 2014 01:26:28 EST -05:00 + # now.advance(weeks: 1) # => Sun, 09 Nov 2014 01:26:28 EST -05:00 + # now.advance(months: 1) # => Tue, 02 Dec 2014 01:26:28 EST -05:00 + # now.advance(years: 1) # => Mon, 02 Nov 2015 01:26:28 EST -05:00 + def advance(options) + # If we're advancing a value of variable length (i.e., years, weeks, months, days), advance from #time, + # otherwise advance from #utc, for accuracy when moving across DST boundaries + if options.values_at(:years, :weeks, :months, :days).any? + method_missing(:advance, options) + else + utc.advance(options).in_time_zone(time_zone) + end + end + + %w(year mon month day mday wday yday hour min sec usec nsec to_date).each do |method_name| + class_eval <<-EOV, __FILE__, __LINE__ + 1 + def #{method_name} # def month + time.#{method_name} # time.month + end # end + EOV + end + + # Returns Array of parts of Time in sequence of + # [seconds, minutes, hours, day, month, year, weekday, yearday, dst?, zone]. + # + # now = Time.zone.now # => Tue, 18 Aug 2015 02:29:27 UTC +00:00 + # now.to_a # => [27, 29, 2, 18, 8, 2015, 2, 230, false, "UTC"] + def to_a + [time.sec, time.min, time.hour, time.day, time.mon, time.year, time.wday, time.yday, dst?, zone] + end + + # Returns the object's date and time as a floating point number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_f # => 1417709320.285418 + def to_f + utc.to_f + end + + # Returns the object's date and time as an integer number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_i # => 1417709320 + def to_i + utc.to_i + end + alias_method :tv_sec, :to_i + + # Returns the object's date and time as a rational number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_r # => (708854548642709/500000) + def to_r + utc.to_r + end + + # Returns an instance of DateTime with the timezone's UTC offset + # + # Time.zone.now.to_datetime # => Tue, 18 Aug 2015 02:32:20 +0000 + # Time.current.in_time_zone('Hawaii').to_datetime # => Mon, 17 Aug 2015 16:32:20 -1000 + def to_datetime + @to_datetime ||= utc.to_datetime.new_offset(Rational(utc_offset, 86_400)) + end + + # Returns an instance of +Time+, either with the same UTC offset + # as +self+ or in the local system timezone depending on the setting + # of +ActiveSupport.to_time_preserves_timezone+. + def to_time + if preserve_timezone + @to_time_with_instance_offset ||= getlocal(utc_offset) + else + @to_time_with_system_offset ||= getlocal + end + end + + # So that +self+ acts_like?(:time). + def acts_like_time? + true + end + + # Say we're a Time to thwart type checking. + def is_a?(klass) + klass == ::Time || super + end + alias_method :kind_of?, :is_a? + + # An instance of ActiveSupport::TimeWithZone is never blank + def blank? + false + end + + def freeze + # preload instance variables before freezing + period; utc; time; to_datetime; to_time + super + end + + def marshal_dump + [utc, time_zone.name, time] + end + + def marshal_load(variables) + initialize(variables[0].utc, ::Time.find_zone(variables[1]), variables[2].utc) + end + + # respond_to_missing? is not called in some cases, such as when type conversion is + # performed with Kernel#String + def respond_to?(sym, include_priv = false) + # ensure that we're not going to throw and rescue from NoMethodError in method_missing which is slow + return false if sym.to_sym == :to_str + super + end + + # Ensure proxy class responds to all methods that underlying time instance + # responds to. + def respond_to_missing?(sym, include_priv) + return false if sym.to_sym == :acts_like_date? + time.respond_to?(sym, include_priv) + end + + # Send the missing method to +time+ instance, and wrap result in a new + # TimeWithZone with the existing +time_zone+. + def method_missing(sym, *args, &block) + wrap_with_time_zone time.__send__(sym, *args, &block) + rescue NoMethodError => e + raise e, e.message.sub(time.inspect, inspect), e.backtrace + end + + private + def get_period_and_ensure_valid_local_time(period) + # we don't want a Time.local instance enforcing its own DST rules as well, + # so transfer time values to a utc constructor if necessary + @time = transfer_time_values_to_utc_constructor(@time) unless @time.utc? + begin + period || @time_zone.period_for_local(@time) + rescue ::TZInfo::PeriodNotFound + # time is in the "spring forward" hour gap, so we're moving the time forward one hour and trying again + @time += 1.hour + retry + end + end + + def transfer_time_values_to_utc_constructor(time) + # avoid creating another Time object if possible + return time if time.instance_of?(::Time) && time.utc? + ::Time.utc(time.year, time.month, time.day, time.hour, time.min, time.sec + time.subsec) + end + + def duration_of_variable_length?(obj) + ActiveSupport::Duration === obj && obj.parts.any? { |p| [:years, :months, :weeks, :days].include?(p[0]) } + end + + def wrap_with_time_zone(time) + if time.acts_like?(:time) + periods = time_zone.periods_for_local(time) + self.class.new(nil, time_zone, time, periods.include?(period) ? period : nil) + elsif time.is_a?(Range) + wrap_with_time_zone(time.begin)..wrap_with_time_zone(time.end) + else + time + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb new file mode 100644 index 0000000..90501bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/time_zone.rb @@ -0,0 +1,565 @@ +# frozen_string_literal: true + +require "tzinfo" +require "concurrent/map" +require "active_support/core_ext/object/blank" + +module ActiveSupport + # The TimeZone class serves as a wrapper around TZInfo::Timezone instances. + # It allows us to do the following: + # + # * Limit the set of zones provided by TZInfo to a meaningful subset of 134 + # zones. + # * Retrieve and display zones with a friendlier name + # (e.g., "Eastern Time (US & Canada)" instead of "America/New_York"). + # * Lazily load TZInfo::Timezone instances only when they're needed. + # * Create ActiveSupport::TimeWithZone instances via TimeZone's +local+, + # +parse+, +at+ and +now+ methods. + # + # If you set config.time_zone in the Rails Application, you can + # access this TimeZone object via Time.zone: + # + # # application.rb: + # class Application < Rails::Application + # config.time_zone = 'Eastern Time (US & Canada)' + # end + # + # Time.zone # => # + # Time.zone.name # => "Eastern Time (US & Canada)" + # Time.zone.now # => Sun, 18 May 2008 14:30:44 EDT -04:00 + class TimeZone + # Keys are Rails TimeZone names, values are TZInfo identifiers. + MAPPING = { + "International Date Line West" => "Etc/GMT+12", + "Midway Island" => "Pacific/Midway", + "American Samoa" => "Pacific/Pago_Pago", + "Hawaii" => "Pacific/Honolulu", + "Alaska" => "America/Juneau", + "Pacific Time (US & Canada)" => "America/Los_Angeles", + "Tijuana" => "America/Tijuana", + "Mountain Time (US & Canada)" => "America/Denver", + "Arizona" => "America/Phoenix", + "Chihuahua" => "America/Chihuahua", + "Mazatlan" => "America/Mazatlan", + "Central Time (US & Canada)" => "America/Chicago", + "Saskatchewan" => "America/Regina", + "Guadalajara" => "America/Mexico_City", + "Mexico City" => "America/Mexico_City", + "Monterrey" => "America/Monterrey", + "Central America" => "America/Guatemala", + "Eastern Time (US & Canada)" => "America/New_York", + "Indiana (East)" => "America/Indiana/Indianapolis", + "Bogota" => "America/Bogota", + "Lima" => "America/Lima", + "Quito" => "America/Lima", + "Atlantic Time (Canada)" => "America/Halifax", + "Caracas" => "America/Caracas", + "La Paz" => "America/La_Paz", + "Santiago" => "America/Santiago", + "Newfoundland" => "America/St_Johns", + "Brasilia" => "America/Sao_Paulo", + "Buenos Aires" => "America/Argentina/Buenos_Aires", + "Montevideo" => "America/Montevideo", + "Georgetown" => "America/Guyana", + "Puerto Rico" => "America/Puerto_Rico", + "Greenland" => "America/Godthab", + "Mid-Atlantic" => "Atlantic/South_Georgia", + "Azores" => "Atlantic/Azores", + "Cape Verde Is." => "Atlantic/Cape_Verde", + "Dublin" => "Europe/Dublin", + "Edinburgh" => "Europe/London", + "Lisbon" => "Europe/Lisbon", + "London" => "Europe/London", + "Casablanca" => "Africa/Casablanca", + "Monrovia" => "Africa/Monrovia", + "UTC" => "Etc/UTC", + "Belgrade" => "Europe/Belgrade", + "Bratislava" => "Europe/Bratislava", + "Budapest" => "Europe/Budapest", + "Ljubljana" => "Europe/Ljubljana", + "Prague" => "Europe/Prague", + "Sarajevo" => "Europe/Sarajevo", + "Skopje" => "Europe/Skopje", + "Warsaw" => "Europe/Warsaw", + "Zagreb" => "Europe/Zagreb", + "Brussels" => "Europe/Brussels", + "Copenhagen" => "Europe/Copenhagen", + "Madrid" => "Europe/Madrid", + "Paris" => "Europe/Paris", + "Amsterdam" => "Europe/Amsterdam", + "Berlin" => "Europe/Berlin", + "Bern" => "Europe/Zurich", + "Zurich" => "Europe/Zurich", + "Rome" => "Europe/Rome", + "Stockholm" => "Europe/Stockholm", + "Vienna" => "Europe/Vienna", + "West Central Africa" => "Africa/Algiers", + "Bucharest" => "Europe/Bucharest", + "Cairo" => "Africa/Cairo", + "Helsinki" => "Europe/Helsinki", + "Kyiv" => "Europe/Kiev", + "Riga" => "Europe/Riga", + "Sofia" => "Europe/Sofia", + "Tallinn" => "Europe/Tallinn", + "Vilnius" => "Europe/Vilnius", + "Athens" => "Europe/Athens", + "Istanbul" => "Europe/Istanbul", + "Minsk" => "Europe/Minsk", + "Jerusalem" => "Asia/Jerusalem", + "Harare" => "Africa/Harare", + "Pretoria" => "Africa/Johannesburg", + "Kaliningrad" => "Europe/Kaliningrad", + "Moscow" => "Europe/Moscow", + "St. Petersburg" => "Europe/Moscow", + "Volgograd" => "Europe/Volgograd", + "Samara" => "Europe/Samara", + "Kuwait" => "Asia/Kuwait", + "Riyadh" => "Asia/Riyadh", + "Nairobi" => "Africa/Nairobi", + "Baghdad" => "Asia/Baghdad", + "Tehran" => "Asia/Tehran", + "Abu Dhabi" => "Asia/Muscat", + "Muscat" => "Asia/Muscat", + "Baku" => "Asia/Baku", + "Tbilisi" => "Asia/Tbilisi", + "Yerevan" => "Asia/Yerevan", + "Kabul" => "Asia/Kabul", + "Ekaterinburg" => "Asia/Yekaterinburg", + "Islamabad" => "Asia/Karachi", + "Karachi" => "Asia/Karachi", + "Tashkent" => "Asia/Tashkent", + "Chennai" => "Asia/Kolkata", + "Kolkata" => "Asia/Kolkata", + "Mumbai" => "Asia/Kolkata", + "New Delhi" => "Asia/Kolkata", + "Kathmandu" => "Asia/Kathmandu", + "Astana" => "Asia/Dhaka", + "Dhaka" => "Asia/Dhaka", + "Sri Jayawardenepura" => "Asia/Colombo", + "Almaty" => "Asia/Almaty", + "Novosibirsk" => "Asia/Novosibirsk", + "Rangoon" => "Asia/Rangoon", + "Bangkok" => "Asia/Bangkok", + "Hanoi" => "Asia/Bangkok", + "Jakarta" => "Asia/Jakarta", + "Krasnoyarsk" => "Asia/Krasnoyarsk", + "Beijing" => "Asia/Shanghai", + "Chongqing" => "Asia/Chongqing", + "Hong Kong" => "Asia/Hong_Kong", + "Urumqi" => "Asia/Urumqi", + "Kuala Lumpur" => "Asia/Kuala_Lumpur", + "Singapore" => "Asia/Singapore", + "Taipei" => "Asia/Taipei", + "Perth" => "Australia/Perth", + "Irkutsk" => "Asia/Irkutsk", + "Ulaanbaatar" => "Asia/Ulaanbaatar", + "Seoul" => "Asia/Seoul", + "Osaka" => "Asia/Tokyo", + "Sapporo" => "Asia/Tokyo", + "Tokyo" => "Asia/Tokyo", + "Yakutsk" => "Asia/Yakutsk", + "Darwin" => "Australia/Darwin", + "Adelaide" => "Australia/Adelaide", + "Canberra" => "Australia/Melbourne", + "Melbourne" => "Australia/Melbourne", + "Sydney" => "Australia/Sydney", + "Brisbane" => "Australia/Brisbane", + "Hobart" => "Australia/Hobart", + "Vladivostok" => "Asia/Vladivostok", + "Guam" => "Pacific/Guam", + "Port Moresby" => "Pacific/Port_Moresby", + "Magadan" => "Asia/Magadan", + "Srednekolymsk" => "Asia/Srednekolymsk", + "Solomon Is." => "Pacific/Guadalcanal", + "New Caledonia" => "Pacific/Noumea", + "Fiji" => "Pacific/Fiji", + "Kamchatka" => "Asia/Kamchatka", + "Marshall Is." => "Pacific/Majuro", + "Auckland" => "Pacific/Auckland", + "Wellington" => "Pacific/Auckland", + "Nuku'alofa" => "Pacific/Tongatapu", + "Tokelau Is." => "Pacific/Fakaofo", + "Chatham Is." => "Pacific/Chatham", + "Samoa" => "Pacific/Apia" + } + + UTC_OFFSET_WITH_COLON = "%s%02d:%02d" + UTC_OFFSET_WITHOUT_COLON = UTC_OFFSET_WITH_COLON.tr(":", "") + + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + + class << self + # Assumes self represents an offset from UTC in seconds (as returned from + # Time#utc_offset) and turns this into an +HH:MM formatted string. + # + # ActiveSupport::TimeZone.seconds_to_utc_offset(-21_600) # => "-06:00" + def seconds_to_utc_offset(seconds, colon = true) + format = colon ? UTC_OFFSET_WITH_COLON : UTC_OFFSET_WITHOUT_COLON + sign = (seconds < 0 ? "-" : "+") + hours = seconds.abs / 3600 + minutes = (seconds.abs % 3600) / 60 + format % [sign, hours, minutes] + end + + def find_tzinfo(name) + TZInfo::Timezone.new(MAPPING[name] || name) + end + + alias_method :create, :new + + # Returns a TimeZone instance with the given name, or +nil+ if no + # such TimeZone instance exists. (This exists to support the use of + # this class with the +composed_of+ macro.) + def new(name) + self[name] + end + + # Returns an array of all TimeZone objects. There are multiple + # TimeZone objects per time zone, in many cases, to make it easier + # for users to find their own time zone. + def all + @zones ||= zones_map.values.sort + end + + # Locate a specific time zone object. If the argument is a string, it + # is interpreted to mean the name of the timezone to locate. If it is a + # numeric value it is either the hour offset, or the second offset, of the + # timezone to find. (The first one with that offset will be returned.) + # Returns +nil+ if no such time zone is known to the system. + def [](arg) + case arg + when String + begin + @lazy_zones_map[arg] ||= create(arg) + rescue TZInfo::InvalidTimezoneIdentifier + nil + end + when Numeric, ActiveSupport::Duration + arg *= 3600 if arg.abs <= 13 + all.find { |z| z.utc_offset == arg.to_i } + else + raise ArgumentError, "invalid argument to TimeZone[]: #{arg.inspect}" + end + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the USA. + def us_zones + country_zones(:us) + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the country specified by its ISO 3166-1 Alpha2 code. + def country_zones(country_code) + code = country_code.to_s.upcase + @country_zones[code] ||= load_country_zones(code) + end + + def clear #:nodoc: + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + @zones = nil + @zones_map = nil + end + + private + def load_country_zones(code) + country = TZInfo::Country.get(code) + country.zone_identifiers.map do |tz_id| + if MAPPING.value?(tz_id) + MAPPING.inject([]) do |memo, (key, value)| + memo << self[key] if value == tz_id + memo + end + else + create(tz_id, nil, TZInfo::Timezone.new(tz_id)) + end + end.flatten(1).sort! + end + + def zones_map + @zones_map ||= MAPPING.each_with_object({}) do |(name, _), zones| + timezone = self[name] + zones[name] = timezone if timezone + end + end + end + + include Comparable + attr_reader :name + attr_reader :tzinfo + + # Create a new TimeZone object with the given name and offset. The + # offset is the number of seconds that this time zone is offset from UTC + # (GMT). Seconds were chosen as the offset unit because that is the unit + # that Ruby uses to represent time zone offsets (see Time#utc_offset). + def initialize(name, utc_offset = nil, tzinfo = nil) + @name = name + @utc_offset = utc_offset + @tzinfo = tzinfo || TimeZone.find_tzinfo(name) + end + + # Returns the offset of this time zone from UTC in seconds. + def utc_offset + if @utc_offset + @utc_offset + else + tzinfo.current_period.utc_offset if tzinfo && tzinfo.current_period + end + end + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # zone = ActiveSupport::TimeZone['Central Time (US & Canada)'] + # zone.formatted_offset # => "-06:00" + # zone.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc_offset == 0 && alternate_utc_string || self.class.seconds_to_utc_offset(utc_offset, colon) + end + + # Compare this time zone to the parameter. The two are compared first on + # their offsets, and then by name. + def <=>(zone) + return unless zone.respond_to? :utc_offset + result = (utc_offset <=> zone.utc_offset) + result = (name <=> zone.name) if result == 0 + result + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def =~(re) + re === name || re === MAPPING[name] + end + + # Returns a textual representation of this time zone. + def to_s + "(GMT#{formatted_offset}) #{name}" + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from given values. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.local(2007, 2, 1, 15, 30, 45) # => Thu, 01 Feb 2007 15:30:45 HST -10:00 + def local(*args) + time = Time.utc(*args) + ActiveSupport::TimeWithZone.new(nil, self, time) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from number of seconds since the Unix epoch. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.utc(2000).to_f # => 946684800.0 + # Time.zone.at(946684800.0) # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + def at(secs) + Time.at(secs).utc.in_time_zone(self) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an ISO 8601 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31T14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time components are missing then they will be set to zero. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31') # => Fri, 31 Dec 1999 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ will be raised unlike +parse+ + # which usually returns +nil+ when given an invalid date string. + def iso8601(str) + parts = Date._iso8601(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + + if parts[:offset] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from parsed string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.parse('1999-12-31 14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.parse('22:30:00') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.parse('Mar 2000') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ could be raised. + def parse(str, now = now()) + parts_to_time(Date._parse(str, false), now) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an RFC 3339 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('2000-01-01T00:00:00Z') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time or zone components are missing then an +ArgumentError+ will + # be raised. This is much stricter than either +parse+ or +iso8601+ which + # allow for missing components. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + + TimeWithZone.new(time.utc, self) + end + + # Parses +str+ according to +format+ and returns an ActiveSupport::TimeWithZone. + # + # Assumes that +str+ is a time in the time zone +self+, + # unless +format+ includes an explicit time zone. + # (This is the same behavior as +parse+.) + # In either case, the returned TimeWithZone has the timezone of +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.strptime('1999-12-31 14:00:00', '%Y-%m-%d %H:%M:%S') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.strptime('22:30:00', '%H:%M:%S') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.strptime('Mar 2000', '%b %Y') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + def strptime(str, format, now = now()) + parts_to_time(DateTime._strptime(str, format), now) + end + + # Returns an ActiveSupport::TimeWithZone instance representing the current + # time in the time zone represented by +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.now # => Wed, 23 Jan 2008 20:24:27 HST -10:00 + def now + time_now.utc.in_time_zone(self) + end + + # Returns the current date in this time zone. + def today + tzinfo.now.to_date + end + + # Returns the next date in this time zone. + def tomorrow + today + 1 + end + + # Returns the previous date in this time zone. + def yesterday + today - 1 + end + + # Adjust the given time to the simultaneous time in the time zone + # represented by +self+. Returns a Time.utc() instance -- if you want an + # ActiveSupport::TimeWithZone instance, use Time#in_time_zone() instead. + def utc_to_local(time) + tzinfo.utc_to_local(time) + end + + # Adjust the given time to the simultaneous time in UTC. Returns a + # Time.utc() instance. + def local_to_utc(time, dst = true) + tzinfo.local_to_utc(time, dst) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_utc(time) + tzinfo.period_for_utc(time) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_local(time, dst = true) + tzinfo.period_for_local(time, dst) { |periods| periods.last } + end + + def periods_for_local(time) #:nodoc: + tzinfo.periods_for_local(time) + end + + def init_with(coder) #:nodoc: + initialize(coder["name"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:#{self.class}" + coder.map = { "name" => tzinfo.name } + end + + private + def parts_to_time(parts, now) + raise ArgumentError, "invalid date" if parts.nil? + return if parts.empty? + + if parts[:seconds] + time = Time.at(parts[:seconds]) + else + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, parts[:year] || parts[:mon] ? 1 : now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + end + + if parts[:offset] || parts[:seconds] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + def time_now + Time.now + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat new file mode 100644 index 0000000..f7d9c48 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/values/unicode_tables.dat differ diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb new file mode 100644 index 0000000..928838c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/version.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require_relative "gem_version" + +module ActiveSupport + # Returns the version of the currently loaded ActiveSupport as a Gem::Version + def self.version + gem_version + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb new file mode 100644 index 0000000..f087e2c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +require "time" +require "base64" +require "bigdecimal" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/date_time/calculations" + +module ActiveSupport + # = XmlMini + # + # To use the much faster libxml parser: + # gem 'libxml-ruby', '=0.9.7' + # XmlMini.backend = 'LibXML' + module XmlMini + extend self + + # This module decorates files deserialized using Hash.from_xml with + # the original_filename and content_type methods. + module FileLike #:nodoc: + attr_writer :original_filename, :content_type + + def original_filename + @original_filename || "untitled" + end + + def content_type + @content_type || "application/octet-stream" + end + end + + DEFAULT_ENCODINGS = { + "binary" => "base64" + } unless defined?(DEFAULT_ENCODINGS) + + unless defined?(TYPE_NAMES) + TYPE_NAMES = { + "Symbol" => "symbol", + "Integer" => "integer", + "BigDecimal" => "decimal", + "Float" => "float", + "TrueClass" => "boolean", + "FalseClass" => "boolean", + "Date" => "date", + "DateTime" => "dateTime", + "Time" => "dateTime", + "Array" => "array", + "Hash" => "hash" + } + + # No need to map these on Ruby 2.4+ + TYPE_NAMES["Fixnum"] = "integer" unless 0.class == Integer + TYPE_NAMES["Bignum"] = "integer" unless 0.class == Integer + end + + FORMATTING = { + "symbol" => Proc.new { |symbol| symbol.to_s }, + "date" => Proc.new { |date| date.to_s(:db) }, + "dateTime" => Proc.new { |time| time.xmlschema }, + "binary" => Proc.new { |binary| ::Base64.encode64(binary) }, + "yaml" => Proc.new { |yaml| yaml.to_yaml } + } unless defined?(FORMATTING) + + # TODO use regexp instead of Date.parse + unless defined?(PARSING) + PARSING = { + "symbol" => Proc.new { |symbol| symbol.to_s.to_sym }, + "date" => Proc.new { |date| ::Date.parse(date) }, + "datetime" => Proc.new { |time| Time.xmlschema(time).utc rescue ::DateTime.parse(time).utc }, + "integer" => Proc.new { |integer| integer.to_i }, + "float" => Proc.new { |float| float.to_f }, + "decimal" => Proc.new do |number| + if String === number + begin + BigDecimal(number) + rescue ArgumentError + BigDecimal(number.to_f.to_s) + end + else + BigDecimal(number) + end + end, + "boolean" => Proc.new { |boolean| %w(1 true).include?(boolean.to_s.strip) }, + "string" => Proc.new { |string| string.to_s }, + "yaml" => Proc.new { |yaml| YAML.load(yaml) rescue yaml }, + "base64Binary" => Proc.new { |bin| ::Base64.decode64(bin) }, + "binary" => Proc.new { |bin, entity| _parse_binary(bin, entity) }, + "file" => Proc.new { |file, entity| _parse_file(file, entity) } + } + + PARSING.update( + "double" => PARSING["float"], + "dateTime" => PARSING["datetime"] + ) + end + + attr_accessor :depth + self.depth = 100 + + delegate :parse, to: :backend + + def backend + current_thread_backend || @backend + end + + def backend=(name) + backend = name && cast_backend_name_to_module(name) + self.current_thread_backend = backend if current_thread_backend + @backend = backend + end + + def with_backend(name) + old_backend = current_thread_backend + self.current_thread_backend = name && cast_backend_name_to_module(name) + yield + ensure + self.current_thread_backend = old_backend + end + + def to_tag(key, value, options) + type_name = options.delete(:type) + merged_options = options.merge(root: key, skip_instruct: true) + + if value.is_a?(::Method) || value.is_a?(::Proc) + if value.arity == 1 + value.call(merged_options) + else + value.call(merged_options, key.to_s.singularize) + end + elsif value.respond_to?(:to_xml) + value.to_xml(merged_options) + else + type_name ||= TYPE_NAMES[value.class.name] + type_name ||= value.class.name if value && !value.respond_to?(:to_str) + type_name = type_name.to_s if type_name + type_name = "dateTime" if type_name == "datetime" + + key = rename_key(key.to_s, options) + + attributes = options[:skip_types] || type_name.nil? ? {} : { type: type_name } + attributes[:nil] = true if value.nil? + + encoding = options[:encoding] || DEFAULT_ENCODINGS[type_name] + attributes[:encoding] = encoding if encoding + + formatted_value = FORMATTING[type_name] && !value.nil? ? + FORMATTING[type_name].call(value) : value + + options[:builder].tag!(key, formatted_value, attributes) + end + end + + def rename_key(key, options = {}) + camelize = options[:camelize] + dasherize = !options.has_key?(:dasherize) || options[:dasherize] + if camelize + key = true == camelize ? key.camelize : key.camelize(camelize) + end + key = _dasherize(key) if dasherize + key + end + + private + + def _dasherize(key) + # $2 must be a non-greedy regex for this to work + left, middle, right = /\A(_*)(.*?)(_*)\Z/.match(key.strip)[1, 3] + "#{left}#{middle.tr('_ ', '--')}#{right}" + end + + # TODO: Add support for other encodings + def _parse_binary(bin, entity) + case entity["encoding"] + when "base64" + ::Base64.decode64(bin) + else + bin + end + end + + def _parse_file(file, entity) + f = StringIO.new(::Base64.decode64(file)) + f.extend(FileLike) + f.original_filename = entity["name"] + f.content_type = entity["content_type"] + f + end + + def current_thread_backend + Thread.current[:xml_mini_backend] + end + + def current_thread_backend=(name) + Thread.current[:xml_mini_backend] = name && cast_backend_name_to_module(name) + end + + def cast_backend_name_to_module(name) + if name.is_a?(Module) + name + else + require "active_support/xml_mini/#{name.downcase}" + ActiveSupport.const_get("XmlMini_#{name}") + end + end + end + + XmlMini.backend = "REXML" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb new file mode 100644 index 0000000..7f94a64 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/jdom.rb @@ -0,0 +1,183 @@ +# frozen_string_literal: true + +raise "JRuby is required to use the JDOM backend for XmlMini" unless RUBY_PLATFORM.include?("java") + +require "jruby" +include Java + +require "active_support/core_ext/object/blank" + +java_import javax.xml.parsers.DocumentBuilder unless defined? DocumentBuilder +java_import javax.xml.parsers.DocumentBuilderFactory unless defined? DocumentBuilderFactory +java_import java.io.StringReader unless defined? StringReader +java_import org.xml.sax.InputSource unless defined? InputSource +java_import org.xml.sax.Attributes unless defined? Attributes +java_import org.w3c.dom.Node unless defined? Node + +module ActiveSupport + module XmlMini_JDOM #:nodoc: + extend self + + CONTENT_KEY = "__content__".freeze + + NODE_TYPE_NAMES = %w{ATTRIBUTE_NODE CDATA_SECTION_NODE COMMENT_NODE DOCUMENT_FRAGMENT_NODE + DOCUMENT_NODE DOCUMENT_TYPE_NODE ELEMENT_NODE ENTITY_NODE ENTITY_REFERENCE_NODE NOTATION_NODE + PROCESSING_INSTRUCTION_NODE TEXT_NODE} + + node_type_map = {} + NODE_TYPE_NAMES.each { |type| node_type_map[Node.send(type)] = type } + + # Parse an XML Document string or IO into a simple hash using Java's jdom. + # data:: + # XML Document string or IO to parse + def parse(data) + if data.respond_to?(:read) + data = data.read + end + + if data.blank? + {} + else + @dbf = DocumentBuilderFactory.new_instance + # secure processing of java xml + # https://archive.is/9xcQQ + @dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) + @dbf.setFeature("http://xml.org/sax/features/external-general-entities", false) + @dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false) + @dbf.setFeature(javax.xml.XMLConstants::FEATURE_SECURE_PROCESSING, true) + xml_string_reader = StringReader.new(data) + xml_input_source = InputSource.new(xml_string_reader) + doc = @dbf.new_document_builder.parse(xml_input_source) + merge_element!({ CONTENT_KEY => "" }, doc.document_element, XmlMini.depth) + end + end + + private + + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise "Document too deep!" if depth == 0 + delete_empty(hash) + merge!(hash, element.tag_name, collapse(element, depth)) + end + + def delete_empty(hash) + hash.delete(CONTENT_KEY) if hash[CONTENT_KEY] == "" + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + child_nodes = element.child_nodes + if child_nodes.length > 0 + (0...child_nodes.length).each do |i| + child = child_nodes.item(i) + merge_element!(hash, child, depth - 1) unless child.node_type == Node.TEXT_NODE + end + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + delete_empty(hash) + text_children = texts(element) + if text_children.join.empty? + hash + else + # must use value to prevent double-escaping + merge!(hash, CONTENT_KEY, text_children.join) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attribute_hash = {} + attributes = element.attributes + (0...attributes.length).each do |i| + attribute_hash[CONTENT_KEY] ||= "" + attribute_hash[attributes.item(i).name] = attributes.item(i).value + end + attribute_hash + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def texts(element) + texts = [] + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + texts << item.get_data + end + end + texts + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + text = "".dup + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + text << item.get_data.strip + end + end + text.strip.length == 0 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb new file mode 100644 index 0000000..0b000fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxml.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXML #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Parser.io(data).parse.to_hash + end + end + end +end + +module LibXML #:nodoc: + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__".freeze + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + each_child do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= "".dup + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + each_attr { |a| node_hash[a.name] = a.value } + + hash + end + end + end +end + +# :enddoc: + +LibXML::XML::Document.include(LibXML::Conversions::Document) +LibXML::XML::Node.include(LibXML::Conversions::Node) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb new file mode 100644 index 0000000..dcf16e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/libxmlsax.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXMLSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder + include LibXML::XML::SaxParser::Callbacks + + CONTENT_KEY = "__content__".freeze + HASH_SIZE_KEY = "__hash_size__".freeze + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def on_start_document + @hash = { CONTENT_KEY => "".dup } + @hash_stack = [@hash] + end + + def on_end_document + @hash = @hash_stack.pop + @hash.delete(CONTENT_KEY) + end + + def on_start_element(name, attrs = {}) + new_hash = { CONTENT_KEY => "".dup }.merge!(attrs) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def on_end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def on_characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :on_cdata_block, :on_characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER) + parser = LibXML::XML::SaxParser.io(data) + document = document_class.new + + parser.callbacks = document + parser.parse + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb new file mode 100644 index 0000000..5ee6fc8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogiri.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_Nokogiri #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml / nokogiri. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + doc = Nokogiri::XML(data) + raise doc.errors.first if doc.errors.length > 0 + doc.to_hash + end + end + + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__".freeze + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + children.each do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= "".dup + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank and there are child tags + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + attribute_nodes.each { |a| node_hash[a.node_name] = a.value } + + hash + end + end + end + + Nokogiri::XML::Document.include(Conversions::Document) + Nokogiri::XML::Node.include(Conversions::Node) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb new file mode 100644 index 0000000..b01ed00 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/nokogirisax.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_NokogiriSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder < Nokogiri::XML::SAX::Document + CONTENT_KEY = "__content__".freeze + HASH_SIZE_KEY = "__hash_size__".freeze + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def start_document + @hash = {} + @hash_stack = [@hash] + end + + def end_document + raise "Parse stack not empty!" if @hash_stack.size > 1 + end + + def error(error_message) + raise error_message + end + + def start_element(name, attrs = []) + new_hash = { CONTENT_KEY => "".dup }.merge!(Hash[attrs]) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :cdata_block, :characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + document = document_class.new + parser = Nokogiri::XML::SAX::Parser.new(document) + parser.parse(data) + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb new file mode 100644 index 0000000..32458d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-5.2.4.4/lib/active_support/xml_mini/rexml.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_REXML #:nodoc: + extend self + + CONTENT_KEY = "__content__".freeze + + # Parse an XML Document string or IO into a simple hash. + # + # Same as XmlSimple::xml_in but doesn't shoot itself in the foot, + # and uses the defaults from Active Support. + # + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + silence_warnings { require "rexml/document" } unless defined?(REXML::Document) + doc = REXML::Document.new(data) + + if doc.root + merge_element!({}, doc.root, XmlMini.depth) + else + raise REXML::ParseException, + "The document #{doc.to_s.inspect} does not have a valid root" + end + end + end + + private + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise REXML::ParseException, "The document is too deep" if depth == 0 + merge!(hash, element.name, collapse(element, depth)) + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + if element.has_elements? + element.each_element { |child| merge_element!(hash, child, depth - 1) } + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + unless element.has_text? + hash + else + # must use value to prevent double-escaping + texts = "".dup + element.texts.each { |t| texts << t.value } + merge!(hash, CONTENT_KEY, texts) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attributes = {} + element.attributes.each { |n, v| attributes[n] = v } + attributes + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + element.texts.join.blank? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/CHANGELOG.md new file mode 100644 index 0000000..2161d9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/CHANGELOG.md @@ -0,0 +1,675 @@ +## Rails 6.1.4.6 (February 11, 2022) ## + +* Fix Reloader method signature to work with the new Executor signature + + +## Rails 6.1.4.5 (February 11, 2022) ## + +* No changes. + + +## Rails 6.1.4.4 (December 15, 2021) ## + +* No changes. + + +## Rails 6.1.4.3 (December 14, 2021) ## + +* No changes. + + +## Rails 6.1.4.2 (December 14, 2021) ## + +* No changes. + + +## Rails 6.1.4.1 (August 19, 2021) ## + +* No changes. + + +## Rails 6.1.4 (June 24, 2021) ## + +* MemCacheStore: convert any underlying value (including `false`) to an `Entry`. + + See [#42559](https://github.com/rails/rails/pull/42559). + + *Alex Ghiculescu* + +* Fix bug in `number_with_precision` when using large `BigDecimal` values. + + Fixes #42302. + + *Federico Aldunate*, *Zachary Scott* + +* Check byte size instead of length on `secure_compare`. + + *Tietew* + +* Fix `Time.at` to not lose `:in` option. + + *Ryuta Kamizono* + +* Require a path for `config.cache_store = :file_store`. + + *Alex Ghiculescu* + +* Avoid having to store complex object in the default translation file. + + *Rafael Mendonça França* + + +## Rails 6.1.3.2 (May 05, 2021) ## + +* No changes. + + +## Rails 6.1.3.1 (March 26, 2021) ## + +* No changes. + + +## Rails 6.1.3 (February 17, 2021) ## + +* No changes. + + +## Rails 6.1.2.1 (February 10, 2021) ## + +* No changes. + + +## Rails 6.1.2 (February 09, 2021) ## + +* `ActiveSupport::Cache::MemCacheStore` now accepts an explicit `nil` for its `addresses` argument. + + ```ruby + config.cache_store = :mem_cache_store, nil + + # is now equivalent to + + config.cache_store = :mem_cache_store + + # and is also equivalent to + + config.cache_store = :mem_cache_store, ENV["MEMCACHE_SERVERS"] || "localhost:11211" + + # which is the fallback behavior of Dalli + ``` + + This helps those migrating from `:dalli_store`, where an explicit `nil` was permitted. + + *Michael Overmeyer* + + +## Rails 6.1.1 (January 07, 2021) ## + +* Change `IPAddr#to_json` to match the behavior of the json gem returning the string representation + instead of the instance variables of the object. + + Before: + + ```ruby + IPAddr.new("127.0.0.1").to_json + # => "{\"addr\":2130706433,\"family\":2,\"mask_addr\":4294967295}" + ``` + + After: + + ```ruby + IPAddr.new("127.0.0.1").to_json + # => "\"127.0.0.1\"" + ``` + + +## Rails 6.1.0 (December 09, 2020) ## + +* Ensure `MemoryStore` disables compression by default. Reverts behavior of + `MemoryStore` to its prior rails `5.1` behavior. + + *Max Gurewitz* + +* Calling `iso8601` on negative durations retains the negative sign on individual + digits instead of prepending it. + + This change is required so we can interoperate with PostgreSQL, which prefers + negative signs for each component. + + Compatibility with other iso8601 parsers which support leading negatives as well + as negatives per component is still retained. + + Before: + + (-1.year - 1.day).iso8601 + # => "-P1Y1D" + + After: + + (-1.year - 1.day).iso8601 + # => "P-1Y-1D" + + *Vipul A M* + +* Remove deprecated `ActiveSupport::Notifications::Instrumenter#end=`. + + *Rafael Mendonça França* + +* Deprecate `ActiveSupport::Multibyte::Unicode.default_normalization_form`. + + *Rafael Mendonça França* + +* Remove deprecated `ActiveSupport::Multibyte::Unicode.pack_graphemes`, + `ActiveSupport::Multibyte::Unicode.unpack_graphemes`, + `ActiveSupport::Multibyte::Unicode.normalize`, + `ActiveSupport::Multibyte::Unicode.downcase`, + `ActiveSupport::Multibyte::Unicode.upcase` and `ActiveSupport::Multibyte::Unicode.swapcase`. + + *Rafael Mendonça França* + +* Remove deprecated `ActiveSupport::Multibyte::Chars#consumes?` and `ActiveSupport::Multibyte::Chars#normalize`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/range/include_range`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/hash/transform_values`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/hash/compact`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/array/prepend_and_append`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/numeric/inquiry`. + + *Rafael Mendonça França* + +* Remove deprecated file `active_support/core_ext/module/reachable`. + + *Rafael Mendonça França* + +* Remove deprecated `Module#parent_name`, `Module#parent` and `Module#parents`. + + *Rafael Mendonça França* + +* Remove deprecated `ActiveSupport::LoggerThreadSafeLevel#after_initialize`. + + *Rafael Mendonça França* + +* Remove deprecated `LoggerSilence` constant. + + *Rafael Mendonça França* + +* Remove deprecated fallback to `I18n.default_local` when `config.i18n.fallbacks` is empty. + + *Rafael Mendonça França* + +* Remove entries from local cache on `RedisCacheStore#delete_matched` + + Fixes #38627 + + *ojab* + +* Speed up `ActiveSupport::SecurityUtils.fixed_length_secure_compare` by using + `OpenSSL.fixed_length_secure_compare`, if available. + + *Nate Matykiewicz* + +* `ActiveSupport::Cache::MemCacheStore` now checks `ENV["MEMCACHE_SERVERS"]` before falling back to `"localhost:11211"` if configured without any addresses. + + ```ruby + config.cache_store = :mem_cache_store + + # is now equivalent to + + config.cache_store = :mem_cache_store, ENV["MEMCACHE_SERVERS"] || "localhost:11211" + + # instead of + + config.cache_store = :mem_cache_store, "localhost:11211" # ignores ENV["MEMCACHE_SERVERS"] + ``` + + *Sam Bostock* + +* `ActiveSupport::Subscriber#attach_to` now accepts an `inherit_all:` argument. When set to true, + it allows a subscriber to receive events for methods defined in the subscriber's ancestor class(es). + + ```ruby + class ActionControllerSubscriber < ActiveSupport::Subscriber + attach_to :action_controller + + def start_processing(event) + info "Processing by #{event.payload[:controller]}##{event.payload[:action]} as #{format}" + end + + def redirect_to(event) + info { "Redirected to #{event.payload[:location]}" } + end + end + + # We detach ActionControllerSubscriber from the :action_controller namespace so that our CustomActionControllerSubscriber + # can provide its own instrumentation for certain events in the namespace + ActionControllerSubscriber.detach_from(:action_controller) + + class CustomActionControllerSubscriber < ActionControllerSubscriber + attach_to :action_controller, inherit_all: true + + def start_processing(event) + info "A custom response to start_processing events" + end + + # => CustomActionControllerSubscriber will process events for "start_processing.action_controller" notifications + # using its own #start_processing implementation, while retaining ActionControllerSubscriber's instrumentation + # for "redirect_to.action_controller" notifications + end + ``` + + *Adrianna Chang* + +* Allow the digest class used to generate non-sensitive digests to be configured with `config.active_support.hash_digest_class`. + + `config.active_support.use_sha1_digests` is deprecated in favour of `config.active_support.hash_digest_class = ::Digest::SHA1`. + + *Dirkjan Bussink* + +* Fix bug to make memcached write_entry expire correctly with unless_exist + + *Jye Lee* + +* Add `ActiveSupport::Duration` conversion methods + + `in_seconds`, `in_minutes`, `in_hours`, `in_days`, `in_weeks`, `in_months`, and `in_years` return the respective duration covered. + + *Jason York* + +* Fixed issue in `ActiveSupport::Cache::RedisCacheStore` not passing options + to `read_multi` causing `fetch_multi` to not work properly + + *Rajesh Sharma* + +* Fixed issue in `ActiveSupport::Cache::MemCacheStore` which caused duplicate compression, + and caused the provided `compression_threshold` to not be respected. + + *Max Gurewitz* + +* Prevent `RedisCacheStore` and `MemCacheStore` from performing compression + when reading entries written with `raw: true`. + + *Max Gurewitz* + +* `URI.parser` is deprecated and will be removed in Rails 6.2. Use + `URI::DEFAULT_PARSER` instead. + + *Jean Boussier* + +* `require_dependency` has been documented to be _obsolete_ in `:zeitwerk` + mode. The method is not deprecated as such (yet), but applications are + encouraged to not use it. + + In `:zeitwerk` mode, semantics match Ruby's and you do not need to be + defensive with load order. Just refer to classes and modules normally. If + the constant name is dynamic, camelize if needed, and constantize. + + *Xavier Noria* + +* Add 3rd person aliases of `Symbol#start_with?` and `Symbol#end_with?`. + + ```ruby + :foo.starts_with?("f") # => true + :foo.ends_with?("o") # => true + ``` + + *Ryuta Kamizono* + +* Add override of unary plus for `ActiveSupport::Duration`. + + `+ 1.second` is now identical to `+1.second` to prevent errors + where a seemingly innocent change of formatting leads to a change in the code behavior. + + Before: + ```ruby + +1.second.class + # => ActiveSupport::Duration + (+ 1.second).class + # => Integer + ``` + + After: + ```ruby + +1.second.class + # => ActiveSupport::Duration + (+ 1.second).class + # => ActiveSupport::Duration + ``` + + Fixes #39079. + + *Roman Kushnir* + +* Add subsec to `ActiveSupport::TimeWithZone#inspect`. + + Before: + + Time.at(1498099140).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00 UTC +00:00" + Time.at(1498099140, 123456780, :nsec).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00 UTC +00:00" + Time.at(1498099140 + Rational("1/3")).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00 UTC +00:00" + + After: + + Time.at(1498099140).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00.000000000 UTC +00:00" + Time.at(1498099140, 123456780, :nsec).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00.123456780 UTC +00:00" + Time.at(1498099140 + Rational("1/3")).in_time_zone.inspect + # => "Thu, 22 Jun 2017 02:39:00.333333333 UTC +00:00" + + *akinomaeni* + +* Calling `ActiveSupport::TaggedLogging#tagged` without a block now returns a tagged logger. + + ```ruby + logger.tagged("BCX").info("Funky time!") # => [BCX] Funky time! + ``` + + *Eugene Kenny* + +* Align `Range#cover?` extension behavior with Ruby behavior for backwards ranges. + + `(1..10).cover?(5..3)` now returns `false`, as it does in plain Ruby. + + Also update `#include?` and `#===` behavior to match. + + *Michael Groeneman* + +* Update to TZInfo v2.0.0. + + This changes the output of `ActiveSupport::TimeZone.utc_to_local`, but + can be controlled with the + `ActiveSupport.utc_to_local_returns_utc_offset_times` config. + + New Rails 6.1 apps have it enabled by default, existing apps can upgrade + via the config in config/initializers/new_framework_defaults_6_1.rb + + See the `utc_to_local_returns_utc_offset_times` documentation for details. + + *Phil Ross*, *Jared Beck* + +* Add Date and Time `#yesterday?` and `#tomorrow?` alongside `#today?`. + + Aliased to `#prev_day?` and `#next_day?` to match the existing `#prev/next_day` methods. + + *Jatin Dhankhar* + +* Add `Enumerable#pick` to complement `ActiveRecord::Relation#pick`. + + *Eugene Kenny* + +* [Breaking change] `ActiveSupport::Callbacks#halted_callback_hook` now receive a 2nd argument: + + `ActiveSupport::Callbacks#halted_callback_hook` now receive the name of the callback + being halted as second argument. + This change will allow you to differentiate which callbacks halted the chain + and act accordingly. + + ```ruby + class Book < ApplicationRecord + before_save { throw(:abort) } + before_create { throw(:abort) } + + def halted_callback_hook(filter, callback_name) + Rails.logger.info("Book couldn't be #{callback_name}d") + end + + Book.create # => "Book couldn't be created" + book.save # => "Book couldn't be saved" + end + ``` + + *Edouard Chin* + +* Support `prepend` with `ActiveSupport::Concern`. + + Allows a module with `extend ActiveSupport::Concern` to be prepended. + + module Imposter + extend ActiveSupport::Concern + + # Same as `included`, except only run when prepended. + prepended do + end + end + + class Person + prepend Imposter + end + + Class methods are prepended to the base class, concerning is also + updated: `concerning :Imposter, prepend: true do`. + + *Jason Karns*, *Elia Schito* + +* Deprecate using `Range#include?` method to check the inclusion of a value + in a date time range. It is recommended to use `Range#cover?` method + instead of `Range#include?` to check the inclusion of a value + in a date time range. + + *Vishal Telangre* + +* Support added for a `round_mode` parameter, in all number helpers. (See: `BigDecimal::mode`.) + + ```ruby + number_to_currency(1234567890.50, precision: 0, round_mode: :half_down) # => "$1,234,567,890" + number_to_percentage(302.24398923423, precision: 5, round_mode: :down) # => "302.24398%" + number_to_rounded(389.32314, precision: 0, round_mode: :ceil) # => "390" + number_to_human_size(483989, precision: 2, round_mode: :up) # => "480 KB" + number_to_human(489939, precision: 2, round_mode: :floor) # => "480 Thousand" + + 485000.to_s(:human, precision: 2, round_mode: :half_even) # => "480 Thousand" + ``` + + *Tom Lord* + +* `Array#to_sentence` no longer returns a frozen string. + + Before: + + ['one', 'two'].to_sentence.frozen? + # => true + + After: + + ['one', 'two'].to_sentence.frozen? + # => false + + *Nicolas Dular* + +* When an instance of `ActiveSupport::Duration` is converted to an `iso8601` duration string, if `weeks` are mixed with `date` parts, the `week` part will be converted to days. + This keeps the parser and serializer on the same page. + + ```ruby + duration = ActiveSupport::Duration.build(1000000) + # 1 week, 4 days, 13 hours, 46 minutes, and 40.0 seconds + + duration_iso = duration.iso8601 + # P11DT13H46M40S + + ActiveSupport::Duration.parse(duration_iso) + # 11 days, 13 hours, 46 minutes, and 40 seconds + + duration = ActiveSupport::Duration.build(604800) + # 1 week + + duration_iso = duration.iso8601 + # P1W + + ActiveSupport::Duration.parse(duration_iso) + # 1 week + ``` + + *Abhishek Sarkar* + +* Add block support to `ActiveSupport::Testing::TimeHelpers#travel_back`. + + *Tim Masliuchenko* + +* Update `ActiveSupport::Messages::Metadata#fresh?` to work for cookies with expiry set when + `ActiveSupport.parse_json_times = true`. + + *Christian Gregg* + +* Support symbolic links for `content_path` in `ActiveSupport::EncryptedFile`. + + *Takumi Shotoku* + +* Improve `Range#===`, `Range#include?`, and `Range#cover?` to work with beginless (startless) + and endless range targets. + + *Allen Hsu*, *Andrew Hodgkinson* + +* Don't use `Process#clock_gettime(CLOCK_THREAD_CPUTIME_ID)` on Solaris. + + *Iain Beeston* + +* Prevent `ActiveSupport::Duration.build(value)` from creating instances of + `ActiveSupport::Duration` unless `value` is of type `Numeric`. + + Addresses the errant set of behaviours described in #37012 where + `ActiveSupport::Duration` comparisons would fail confusingly + or return unexpected results when comparing durations built from instances of `String`. + + Before: + + small_duration_from_string = ActiveSupport::Duration.build('9') + large_duration_from_string = ActiveSupport::Duration.build('100000000000000') + small_duration_from_int = ActiveSupport::Duration.build(9) + + large_duration_from_string > small_duration_from_string + # => false + + small_duration_from_string == small_duration_from_int + # => false + + small_duration_from_int < large_duration_from_string + # => ArgumentError (comparison of ActiveSupport::Duration::Scalar with ActiveSupport::Duration failed) + + large_duration_from_string > small_duration_from_int + # => ArgumentError (comparison of String with ActiveSupport::Duration failed) + + After: + + small_duration_from_string = ActiveSupport::Duration.build('9') + # => TypeError (can't build an ActiveSupport::Duration from a String) + + *Alexei Emam* + +* Add `ActiveSupport::Cache::Store#delete_multi` method to delete multiple keys from the cache store. + + *Peter Zhu* + +* Support multiple arguments in `HashWithIndifferentAccess` for `merge` and `update` methods, to + follow Ruby 2.6 addition. + + *Wojciech Wnętrzak* + +* Allow initializing `thread_mattr_*` attributes via `:default` option. + + class Scraper + thread_mattr_reader :client, default: Api::Client.new + end + + *Guilherme Mansur* + +* Add `compact_blank` for those times when you want to remove #blank? values from + an Enumerable (also `compact_blank!` on Hash, Array, ActionController::Parameters). + + *Dana Sherson* + +* Make ActiveSupport::Logger Fiber-safe. + + Use `Fiber.current.__id__` in `ActiveSupport::Logger#local_level=` in order + to make log level local to Ruby Fibers in addition to Threads. + + Example: + + logger = ActiveSupport::Logger.new(STDOUT) + logger.level = 1 + puts "Main is debug? #{logger.debug?}" + + Fiber.new { + logger.local_level = 0 + puts "Thread is debug? #{logger.debug?}" + }.resume + + puts "Main is debug? #{logger.debug?}" + + Before: + + Main is debug? false + Thread is debug? true + Main is debug? true + + After: + + Main is debug? false + Thread is debug? true + Main is debug? false + + Fixes #36752. + + *Alexander Varnin* + +* Allow the `on_rotation` proc used when decrypting/verifying a message to be + passed at the constructor level. + + Before: + + crypt = ActiveSupport::MessageEncryptor.new('long_secret') + crypt.decrypt_and_verify(encrypted_message, on_rotation: proc { ... }) + crypt.decrypt_and_verify(another_encrypted_message, on_rotation: proc { ... }) + + After: + + crypt = ActiveSupport::MessageEncryptor.new('long_secret', on_rotation: proc { ... }) + crypt.decrypt_and_verify(encrypted_message) + crypt.decrypt_and_verify(another_encrypted_message) + + *Edouard Chin* + +* `delegate_missing_to` would raise a `DelegationError` if the object + delegated to was `nil`. Now the `allow_nil` option has been added to enable + the user to specify they want `nil` returned in this case. + + *Matthew Tanous* + +* `truncate` would return the original string if it was too short to be truncated + and a frozen string if it were long enough to be truncated. Now truncate will + consistently return an unfrozen string regardless. This behavior is consistent + with `gsub` and `strip`. + + Before: + + 'foobar'.truncate(5).frozen? + # => true + 'foobar'.truncate(6).frozen? + # => false + + After: + + 'foobar'.truncate(5).frozen? + # => false + 'foobar'.truncate(6).frozen? + # => false + + *Jordan Thomas* + + +Please check [6-0-stable](https://github.com/rails/rails/blob/6-0-stable/activesupport/CHANGELOG.md) for previous changes. diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/MIT-LICENSE b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/MIT-LICENSE new file mode 100644 index 0000000..dbae085 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2005-2020 David Heinemeier Hansson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/README.rdoc new file mode 100644 index 0000000..c2df6d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/README.rdoc @@ -0,0 +1,40 @@ += Active Support -- Utility classes and Ruby extensions from Rails + +Active Support is a collection of utility classes and standard library +extensions that were found useful for the Rails framework. These additions +reside in this package so they can be loaded as needed in Ruby projects +outside of Rails. + +You can read more about the extensions in the {Active Support Core Extensions}[https://edgeguides.rubyonrails.org/active_support_core_extensions.html] guide. + +== Download and installation + +The latest version of Active Support can be installed with RubyGems: + + $ gem install activesupport + +Source code can be downloaded as part of the Rails project on GitHub: + +* https://github.com/rails/rails/tree/main/activesupport + + +== License + +Active Support is released under the MIT license: + +* https://opensource.org/licenses/MIT + + +== Support + +API documentation is at: + +* https://api.rubyonrails.org + +Bug reports for the Ruby on Rails project can be filed here: + +* https://github.com/rails/rails/issues + +Feature requests should be discussed on the rails-core mailing list here: + +* https://discuss.rubyonrails.org/c/rubyonrails-core diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support.rb new file mode 100644 index 0000000..6cf116a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +#-- +# Copyright (c) 2005-2020 David Heinemeier Hansson +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#++ + +require "securerandom" +require "active_support/dependencies/autoload" +require "active_support/version" +require "active_support/logger" +require "active_support/lazy_load_hooks" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + extend ActiveSupport::Autoload + + autoload :Concern + autoload :ActionableError + autoload :ConfigurationFile + autoload :CurrentAttributes + autoload :Dependencies + autoload :DescendantsTracker + autoload :ExecutionWrapper + autoload :Executor + autoload :FileUpdateChecker + autoload :EventedFileUpdateChecker + autoload :ForkTracker + autoload :LogSubscriber + autoload :Notifications + autoload :Reloader + autoload :SecureCompareRotator + + eager_autoload do + autoload :BacktraceCleaner + autoload :ProxyObject + autoload :Benchmarkable + autoload :Cache + autoload :Callbacks + autoload :Configurable + autoload :Deprecation + autoload :Digest + autoload :Gzip + autoload :Inflector + autoload :JSON + autoload :KeyGenerator + autoload :MessageEncryptor + autoload :MessageVerifier + autoload :Multibyte + autoload :NumberHelper + autoload :OptionMerger + autoload :OrderedHash + autoload :OrderedOptions + autoload :StringInquirer + autoload :EnvironmentInquirer + autoload :TaggedLogging + autoload :XmlMini + autoload :ArrayInquirer + end + + autoload :Rescuable + autoload :SafeBuffer, "active_support/core_ext/string/output_safety" + autoload :TestCase + + def self.eager_load! + super + + NumberHelper.eager_load! + end + + cattr_accessor :test_order # :nodoc: + + def self.to_time_preserves_timezone + DateAndTime::Compatibility.preserve_timezone + end + + def self.to_time_preserves_timezone=(value) + DateAndTime::Compatibility.preserve_timezone = value + end + + def self.utc_to_local_returns_utc_offset_times + DateAndTime::Compatibility.utc_to_local_returns_utc_offset_times + end + + def self.utc_to_local_returns_utc_offset_times=(value) + DateAndTime::Compatibility.utc_to_local_returns_utc_offset_times = value + end +end + +autoload :I18n, "active_support/i18n" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/actionable_error.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/actionable_error.rb new file mode 100644 index 0000000..7db14cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/actionable_error.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module ActiveSupport + # Actionable errors let's you define actions to resolve an error. + # + # To make an error actionable, include the ActiveSupport::ActionableError + # module and invoke the +action+ class macro to define the action. An action + # needs a name and a block to execute. + module ActionableError + extend Concern + + class NonActionable < StandardError; end + + included do + class_attribute :_actions, default: {} + end + + def self.actions(error) # :nodoc: + case error + when ActionableError, -> it { Class === it && it < ActionableError } + error._actions + else + {} + end + end + + def self.dispatch(error, name) # :nodoc: + actions(error).fetch(name).call + rescue KeyError + raise NonActionable, "Cannot find action \"#{name}\"" + end + + module ClassMethods + # Defines an action that can resolve the error. + # + # class PendingMigrationError < MigrationError + # include ActiveSupport::ActionableError + # + # action "Run pending migrations" do + # ActiveRecord::Tasks::DatabaseTasks.migrate + # end + # end + def action(name, &block) + _actions[name] = block + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/all.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/all.rb new file mode 100644 index 0000000..4adf446 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/all.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/time" +require "active_support/core_ext" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/array_inquirer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/array_inquirer.rb new file mode 100644 index 0000000..0cbba0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/array_inquirer.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "active_support/core_ext/symbol/starts_ends_with" + +module ActiveSupport + # Wrapping an array in an +ArrayInquirer+ gives a friendlier way to check + # its string-like contents: + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.phone? # => true + # variants.tablet? # => true + # variants.desktop? # => false + class ArrayInquirer < Array + # Passes each element of +candidates+ collection to ArrayInquirer collection. + # The method returns true if any element from the ArrayInquirer collection + # is equal to the stringified or symbolized form of any element in the +candidates+ collection. + # + # If +candidates+ collection is not given, method returns true. + # + # variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet]) + # + # variants.any? # => true + # variants.any?(:phone, :tablet) # => true + # variants.any?('phone', 'desktop') # => true + # variants.any?(:desktop, :watch) # => false + def any?(*candidates) + if candidates.none? + super + else + candidates.any? do |candidate| + include?(candidate.to_sym) || include?(candidate.to_s) + end + end + end + + private + def respond_to_missing?(name, include_private = false) + name.end_with?("?") || super + end + + def method_missing(name, *args) + if name.end_with?("?") + any?(name[0..-2]) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/backtrace_cleaner.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/backtrace_cleaner.rb new file mode 100644 index 0000000..03c7dc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/backtrace_cleaner.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +module ActiveSupport + # Backtraces often include many lines that are not relevant for the context + # under review. This makes it hard to find the signal amongst the backtrace + # noise, and adds debugging time. With a BacktraceCleaner, filters and + # silencers are used to remove the noisy lines, so that only the most relevant + # lines remain. + # + # Filters are used to modify lines of data, while silencers are used to remove + # lines entirely. The typical filter use case is to remove lengthy path + # information from the start of each line, and view file paths relevant to the + # app directory instead of the file system root. The typical silencer use case + # is to exclude the output of a noisy library from the backtrace, so that you + # can focus on the rest. + # + # bc = ActiveSupport::BacktraceCleaner.new + # bc.add_filter { |line| line.gsub(Rails.root.to_s, '') } # strip the Rails.root prefix + # bc.add_silencer { |line| /puma|rubygems/.match?(line) } # skip any lines from puma or rubygems + # bc.clean(exception.backtrace) # perform the cleanup + # + # To reconfigure an existing BacktraceCleaner (like the default one in Rails) + # and show as much data as possible, you can always call + # BacktraceCleaner#remove_silencers!, which will restore the + # backtrace to a pristine state. If you need to reconfigure an existing + # BacktraceCleaner so that it does not filter or modify the paths of any lines + # of the backtrace, you can call BacktraceCleaner#remove_filters! + # These two methods will give you a completely untouched backtrace. + # + # Inspired by the Quiet Backtrace gem by thoughtbot. + class BacktraceCleaner + def initialize + @filters, @silencers = [], [] + add_gem_filter + add_gem_silencer + add_stdlib_silencer + end + + # Returns the backtrace after all filters and silencers have been run + # against it. Filters run first, then silencers. + def clean(backtrace, kind = :silent) + filtered = filter_backtrace(backtrace) + + case kind + when :silent + silence(filtered) + when :noise + noise(filtered) + else + filtered + end + end + alias :filter :clean + + # Adds a filter from the block provided. Each line in the backtrace will be + # mapped against this filter. + # + # # Will turn "/my/rails/root/app/models/person.rb" into "/app/models/person.rb" + # backtrace_cleaner.add_filter { |line| line.gsub(Rails.root, '') } + def add_filter(&block) + @filters << block + end + + # Adds a silencer from the block provided. If the silencer returns +true+ + # for a given line, it will be excluded from the clean backtrace. + # + # # Will reject all lines that include the word "puma", like "/gems/puma/server.rb" or "/app/my_puma_server/rb" + # backtrace_cleaner.add_silencer { |line| /puma/.match?(line) } + def add_silencer(&block) + @silencers << block + end + + # Removes all silencers, but leaves in the filters. Useful if your + # context of debugging suddenly expands as you suspect a bug in one of + # the libraries you use. + def remove_silencers! + @silencers = [] + end + + # Removes all filters, but leaves in the silencers. Useful if you suddenly + # need to see entire filepaths in the backtrace that you had already + # filtered out. + def remove_filters! + @filters = [] + end + + private + FORMATTED_GEMS_PATTERN = /\A[^\/]+ \([\w.]+\) / + + def add_gem_filter + gems_paths = (Gem.path | [Gem.default_dir]).map { |p| Regexp.escape(p) } + return if gems_paths.empty? + + gems_regexp = %r{\A(#{gems_paths.join('|')})/(bundler/)?gems/([^/]+)-([\w.]+)/(.*)} + gems_result = '\3 (\4) \5' + add_filter { |line| line.sub(gems_regexp, gems_result) } + end + + def add_gem_silencer + add_silencer { |line| FORMATTED_GEMS_PATTERN.match?(line) } + end + + def add_stdlib_silencer + add_silencer { |line| line.start_with?(RbConfig::CONFIG["rubylibdir"]) } + end + + def filter_backtrace(backtrace) + @filters.each do |f| + backtrace = backtrace.map { |line| f.call(line) } + end + + backtrace + end + + def silence(backtrace) + @silencers.each do |s| + backtrace = backtrace.reject { |line| s.call(line) } + end + + backtrace + end + + def noise(backtrace) + backtrace.select do |line| + @silencers.any? do |s| + s.call(line) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/benchmarkable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/benchmarkable.rb new file mode 100644 index 0000000..abd0d59 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/benchmarkable.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/core_ext/benchmark" +require "active_support/core_ext/hash/keys" + +module ActiveSupport + module Benchmarkable + # Allows you to measure the execution time of a block in a template and + # records the result to the log. Wrap this block around expensive operations + # or possible bottlenecks to get a time reading for the operation. For + # example, let's say you thought your file processing method was taking too + # long; you could wrap it in a benchmark block. + # + # <% benchmark 'Process data files' do %> + # <%= expensive_files_operation %> + # <% end %> + # + # That would add something like "Process data files (345.2ms)" to the log, + # which you can then use to compare timings when optimizing your code. + # + # You may give an optional logger level (:debug, :info, + # :warn, :error) as the :level option. The + # default logger level value is :info. + # + # <% benchmark 'Low-level files', level: :debug do %> + # <%= lowlevel_files_operation %> + # <% end %> + # + # Finally, you can pass true as the third argument to silence all log + # activity (other than the timing information) from inside the block. This + # is great for boiling down a noisy block to just a single statement that + # produces one log line: + # + # <% benchmark 'Process data files', level: :info, silence: true do %> + # <%= expensive_and_chatty_files_operation %> + # <% end %> + def benchmark(message = "Benchmarking", options = {}) + if logger + options.assert_valid_keys(:level, :silence) + options[:level] ||= :info + + result = nil + ms = Benchmark.ms { result = options[:silence] ? logger.silence { yield } : yield } + logger.public_send(options[:level], "%s (%.1fms)" % [ message, ms ]) + result + else + yield + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/builder.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/builder.rb new file mode 100644 index 0000000..3fa7e6b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/builder.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +begin + require "builder" +rescue LoadError => e + $stderr.puts "You don't have builder installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache.rb new file mode 100644 index 0000000..ecc6085 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache.rb @@ -0,0 +1,878 @@ +# frozen_string_literal: true + +require "zlib" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/enumerable" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/try" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # See ActiveSupport::Cache::Store for documentation. + module Cache + autoload :FileStore, "active_support/cache/file_store" + autoload :MemoryStore, "active_support/cache/memory_store" + autoload :MemCacheStore, "active_support/cache/mem_cache_store" + autoload :NullStore, "active_support/cache/null_store" + autoload :RedisCacheStore, "active_support/cache/redis_cache_store" + + # These options mean something to all cache implementations. Individual cache + # implementations may support additional options. + UNIVERSAL_OPTIONS = [:namespace, :compress, :compress_threshold, :expires_in, :race_condition_ttl, :coder] + + module Strategy + autoload :LocalCache, "active_support/cache/strategy/local_cache" + end + + class << self + # Creates a new Store object according to the given options. + # + # If no arguments are passed to this method, then a new + # ActiveSupport::Cache::MemoryStore object will be returned. + # + # If you pass a Symbol as the first argument, then a corresponding cache + # store class under the ActiveSupport::Cache namespace will be created. + # For example: + # + # ActiveSupport::Cache.lookup_store(:memory_store) + # # => returns a new ActiveSupport::Cache::MemoryStore object + # + # ActiveSupport::Cache.lookup_store(:mem_cache_store) + # # => returns a new ActiveSupport::Cache::MemCacheStore object + # + # Any additional arguments will be passed to the corresponding cache store + # class's constructor: + # + # ActiveSupport::Cache.lookup_store(:file_store, '/tmp/cache') + # # => same as: ActiveSupport::Cache::FileStore.new('/tmp/cache') + # + # If the first argument is not a Symbol, then it will simply be returned: + # + # ActiveSupport::Cache.lookup_store(MyOwnCacheStore.new) + # # => returns MyOwnCacheStore.new + def lookup_store(store = nil, *parameters) + case store + when Symbol + options = parameters.extract_options! + # clean this up once Ruby 2.7 support is dropped + # see https://github.com/rails/rails/pull/41522#discussion_r581186602 + if options.empty? + retrieve_store_class(store).new(*parameters) + else + retrieve_store_class(store).new(*parameters, **options) + end + when Array + lookup_store(*store) + when nil + ActiveSupport::Cache::MemoryStore.new + else + store + end + end + + # Expands out the +key+ argument into a key that can be used for the + # cache store. Optionally accepts a namespace, and all keys will be + # scoped within that namespace. + # + # If the +key+ argument provided is an array, or responds to +to_a+, then + # each of elements in the array will be turned into parameters/keys and + # concatenated into a single key. For example: + # + # ActiveSupport::Cache.expand_cache_key([:foo, :bar]) # => "foo/bar" + # ActiveSupport::Cache.expand_cache_key([:foo, :bar], "namespace") # => "namespace/foo/bar" + # + # The +key+ argument can also respond to +cache_key+ or +to_param+. + def expand_cache_key(key, namespace = nil) + expanded_cache_key = namespace ? +"#{namespace}/" : +"" + + if prefix = ENV["RAILS_CACHE_ID"] || ENV["RAILS_APP_VERSION"] + expanded_cache_key << "#{prefix}/" + end + + expanded_cache_key << retrieve_cache_key(key) + expanded_cache_key + end + + private + def retrieve_cache_key(key) + case + when key.respond_to?(:cache_key_with_version) then key.cache_key_with_version + when key.respond_to?(:cache_key) then key.cache_key + when key.is_a?(Array) then key.map { |element| retrieve_cache_key(element) }.to_param + when key.respond_to?(:to_a) then retrieve_cache_key(key.to_a) + else key.to_param + end.to_s + end + + # Obtains the specified cache store class, given the name of the +store+. + # Raises an error when the store class cannot be found. + def retrieve_store_class(store) + # require_relative cannot be used here because the class might be + # provided by another gem, like redis-activesupport for example. + require "active_support/cache/#{store}" + rescue LoadError => e + raise "Could not find cache store adapter for #{store} (#{e})" + else + ActiveSupport::Cache.const_get(store.to_s.camelize) + end + end + + # An abstract cache store class. There are multiple cache store + # implementations, each having its own additional features. See the classes + # under the ActiveSupport::Cache module, e.g. + # ActiveSupport::Cache::MemCacheStore. MemCacheStore is currently the most + # popular cache store for large production websites. + # + # Some implementations may not support all methods beyond the basic cache + # methods of +fetch+, +write+, +read+, +exist?+, and +delete+. + # + # ActiveSupport::Cache::Store can store any serializable Ruby object. + # + # cache = ActiveSupport::Cache::MemoryStore.new + # + # cache.read('city') # => nil + # cache.write('city', "Duckburgh") + # cache.read('city') # => "Duckburgh" + # + # Keys are always translated into Strings and are case sensitive. When an + # object is specified as a key and has a +cache_key+ method defined, this + # method will be called to define the key. Otherwise, the +to_param+ + # method will be called. Hashes and Arrays can also be used as keys. The + # elements will be delimited by slashes, and the elements within a Hash + # will be sorted by key so they are consistent. + # + # cache.read('city') == cache.read(:city) # => true + # + # Nil values can be cached. + # + # If your cache is on a shared infrastructure, you can define a namespace + # for your cache entries. If a namespace is defined, it will be prefixed on + # to every key. The namespace can be either a static value or a Proc. If it + # is a Proc, it will be invoked when each key is evaluated so that you can + # use application logic to invalidate keys. + # + # cache.namespace = -> { @last_mod_time } # Set the namespace to a variable + # @last_mod_time = Time.now # Invalidate the entire cache by changing namespace + # + # Cached data larger than 1kB are compressed by default. To turn off + # compression, pass compress: false to the initializer or to + # individual +fetch+ or +write+ method calls. The 1kB compression + # threshold is configurable with the :compress_threshold option, + # specified in bytes. + class Store + DEFAULT_CODER = Marshal + + cattr_accessor :logger, instance_writer: true + + attr_reader :silence, :options + alias :silence? :silence + + class << self + private + def retrieve_pool_options(options) + {}.tap do |pool_options| + pool_options[:size] = options.delete(:pool_size) if options[:pool_size] + pool_options[:timeout] = options.delete(:pool_timeout) if options[:pool_timeout] + end + end + + def ensure_connection_pool_added! + require "connection_pool" + rescue LoadError => e + $stderr.puts "You don't have connection_pool installed in your application. Please add it to your Gemfile and run bundle install" + raise e + end + end + + # Creates a new cache. The options will be passed to any write method calls + # except for :namespace which can be used to set the global + # namespace for the cache. + def initialize(options = nil) + @options = options ? options.dup : {} + @coder = @options.delete(:coder) { self.class::DEFAULT_CODER } || NullCoder + end + + # Silences the logger. + def silence! + @silence = true + self + end + + # Silences the logger within a block. + def mute + previous_silence, @silence = defined?(@silence) && @silence, true + yield + ensure + @silence = previous_silence + end + + # Fetches data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. + # + # If there is no such data in the cache (a cache miss), then +nil+ will be + # returned. However, if a block has been passed, that block will be passed + # the key and executed in the event of a cache miss. The return value of the + # block will be written to the cache under the given cache key, and that + # return value will be returned. + # + # cache.write('today', 'Monday') + # cache.fetch('today') # => "Monday" + # + # cache.fetch('city') # => nil + # cache.fetch('city') do + # 'Duckburgh' + # end + # cache.fetch('city') # => "Duckburgh" + # + # You may also specify additional options via the +options+ argument. + # Setting force: true forces a cache "miss," meaning we treat + # the cache value as missing even if it's present. Passing a block is + # required when +force+ is true so this always results in a cache write. + # + # cache.write('today', 'Monday') + # cache.fetch('today', force: true) { 'Tuesday' } # => 'Tuesday' + # cache.fetch('today', force: true) # => ArgumentError + # + # The +:force+ option is useful when you're calling some other method to + # ask whether you should force a cache write. Otherwise, it's clearer to + # just call Cache#write. + # + # Setting skip_nil: true will not cache nil result: + # + # cache.fetch('foo') { nil } + # cache.fetch('bar', skip_nil: true) { nil } + # cache.exist?('foo') # => true + # cache.exist?('bar') # => false + # + # + # Setting compress: false disables compression of the cache entry. + # + # Setting :expires_in will set an expiration time on the cache. + # All caches support auto-expiring content after a specified number of + # seconds. This value can be specified as an option to the constructor + # (in which case all entries will be affected), or it can be supplied to + # the +fetch+ or +write+ method to effect just one entry. + # + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 5.minutes) + # cache.write(key, value, expires_in: 1.minute) # Set a lower value for one entry + # + # Setting :version verifies the cache stored under name + # is of the same version. nil is returned on mismatches despite contents. + # This feature is used to support recyclable cache keys. + # + # Setting :race_condition_ttl is very useful in situations where + # a cache entry is used very frequently and is under heavy load. If a + # cache expires and due to heavy load several different processes will try + # to read data natively and then they all will try to write to cache. To + # avoid that case the first process to find an expired cache entry will + # bump the cache expiration time by the value set in :race_condition_ttl. + # Yes, this process is extending the time for a stale value by another few + # seconds. Because of extended life of the previous cache, other processes + # will continue to use slightly stale data for a just a bit longer. In the + # meantime that first process will go ahead and will write into cache the + # new value. After that all the processes will start getting the new value. + # The key is to keep :race_condition_ttl small. + # + # If the process regenerating the entry errors out, the entry will be + # regenerated after the specified number of seconds. Also note that the + # life of stale cache is extended only if it expired recently. Otherwise + # a new value is generated and :race_condition_ttl does not play + # any role. + # + # # Set all values to expire after one minute. + # cache = ActiveSupport::Cache::MemoryStore.new(expires_in: 1.minute) + # + # cache.write('foo', 'original value') + # val_1 = nil + # val_2 = nil + # sleep 60 + # + # Thread.new do + # val_1 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # sleep 1 + # 'new value 1' + # end + # end + # + # Thread.new do + # val_2 = cache.fetch('foo', race_condition_ttl: 10.seconds) do + # 'new value 2' + # end + # end + # + # cache.fetch('foo') # => "original value" + # sleep 10 # First thread extended the life of cache by another 10 seconds + # cache.fetch('foo') # => "new value 1" + # val_1 # => "new value 1" + # val_2 # => "original value" + # + # Other options will be handled by the specific cache store implementation. + # Internally, #fetch calls #read_entry, and calls #write_entry on a cache + # miss. +options+ will be passed to the #read and #write calls. + # + # For example, MemCacheStore's #write method supports the +:raw+ + # option, which tells the memcached server to store all values as strings. + # We can use this option with #fetch too: + # + # cache = ActiveSupport::Cache::MemCacheStore.new + # cache.fetch("foo", force: true, raw: true) do + # :bar + # end + # cache.fetch('foo') # => "bar" + def fetch(name, options = nil, &block) + if block_given? + options = merged_options(options) + key = normalize_key(name, options) + + entry = nil + instrument(:read, name, options) do |payload| + cached_entry = read_entry(key, **options, event: payload) unless options[:force] + entry = handle_expired_entry(cached_entry, key, options) + entry = nil if entry && entry.mismatched?(normalize_version(name, options)) + payload[:super_operation] = :fetch if payload + payload[:hit] = !!entry if payload + end + + if entry + get_entry_value(entry, name, options) + else + save_block_result_to_cache(name, options, &block) + end + elsif options && options[:force] + raise ArgumentError, "Missing block: Calling `Cache#fetch` with `force: true` requires a block." + else + read(name, options) + end + end + + # Reads data from the cache, using the given key. If there is data in + # the cache with the given key, then that data is returned. Otherwise, + # +nil+ is returned. + # + # Note, if data was written with the :expires_in or + # :version options, both of these conditions are applied before + # the data is returned. + # + # Options are passed to the underlying cache implementation. + def read(name, options = nil) + options = merged_options(options) + key = normalize_key(name, options) + version = normalize_version(name, options) + + instrument(:read, name, options) do |payload| + entry = read_entry(key, **options, event: payload) + + if entry + if entry.expired? + delete_entry(key, **options) + payload[:hit] = false if payload + nil + elsif entry.mismatched?(version) + payload[:hit] = false if payload + nil + else + payload[:hit] = true if payload + entry.value + end + else + payload[:hit] = false if payload + nil + end + end + end + + # Reads multiple values at once from the cache. Options can be passed + # in the last argument. + # + # Some cache implementation may optimize this method. + # + # Returns a hash mapping the names provided to the values found. + def read_multi(*names) + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + read_multi_entries(names, **options, event: payload).tap do |results| + payload[:hits] = results.keys + end + end + end + + # Cache Storage API to write multiple values at once. + def write_multi(hash, options = nil) + options = merged_options(options) + + instrument :write_multi, hash, options do |payload| + entries = hash.each_with_object({}) do |(name, value), memo| + memo[normalize_key(name, options)] = Entry.new(value, **options.merge(version: normalize_version(name, options))) + end + + write_multi_entries entries, **options + end + end + + # Fetches data from the cache, using the given keys. If there is data in + # the cache with the given keys, then that data is returned. Otherwise, + # the supplied block is called for each key for which there was no data, + # and the result will be written to the cache and returned. + # Therefore, you need to pass a block that returns the data to be written + # to the cache. If you do not want to write the cache when the cache is + # not found, use #read_multi. + # + # Returns a hash with the data for each of the names. For example: + # + # cache.write("bim", "bam") + # cache.fetch_multi("bim", "unknown_key") do |key| + # "Fallback value for key: #{key}" + # end + # # => { "bim" => "bam", + # # "unknown_key" => "Fallback value for key: unknown_key" } + # + # Options are passed to the underlying cache implementation. For example: + # + # cache.fetch_multi("fizz", expires_in: 5.seconds) do |key| + # "buzz" + # end + # # => {"fizz"=>"buzz"} + # cache.read("fizz") + # # => "buzz" + # sleep(6) + # cache.read("fizz") + # # => nil + def fetch_multi(*names) + raise ArgumentError, "Missing block: `Cache#fetch_multi` requires a block." unless block_given? + + options = names.extract_options! + options = merged_options(options) + + instrument :read_multi, names, options do |payload| + reads = read_multi_entries(names, **options) + writes = {} + ordered = names.index_with do |name| + reads.fetch(name) { writes[name] = yield(name) } + end + + payload[:hits] = reads.keys + payload[:super_operation] = :fetch_multi + + write_multi(writes, options) + + ordered + end + end + + # Writes the value to the cache, with the key. + # + # Options are passed to the underlying cache implementation. + def write(name, value, options = nil) + options = merged_options(options) + + instrument(:write, name, options) do + entry = Entry.new(value, **options.merge(version: normalize_version(name, options))) + write_entry(normalize_key(name, options), entry, **options) + end + end + + # Deletes an entry in the cache. Returns +true+ if an entry is deleted. + # + # Options are passed to the underlying cache implementation. + def delete(name, options = nil) + options = merged_options(options) + + instrument(:delete, name) do + delete_entry(normalize_key(name, options), **options) + end + end + + # Deletes multiple entries in the cache. + # + # Options are passed to the underlying cache implementation. + def delete_multi(names, options = nil) + options = merged_options(options) + names.map! { |key| normalize_key(key, options) } + + instrument :delete_multi, names do + delete_multi_entries(names, **options) + end + end + + # Returns +true+ if the cache contains an entry for the given key. + # + # Options are passed to the underlying cache implementation. + def exist?(name, options = nil) + options = merged_options(options) + + instrument(:exist?, name) do |payload| + entry = read_entry(normalize_key(name, options), **options, event: payload) + (entry && !entry.expired? && !entry.mismatched?(normalize_version(name, options))) || false + end + end + + # Deletes all entries with keys matching the pattern. + # + # Options are passed to the underlying cache implementation. + # + # Some implementations may not support this method. + def delete_matched(matcher, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support delete_matched") + end + + # Increments an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # Some implementations may not support this method. + def increment(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support increment") + end + + # Decrements an integer value in the cache. + # + # Options are passed to the underlying cache implementation. + # + # Some implementations may not support this method. + def decrement(name, amount = 1, options = nil) + raise NotImplementedError.new("#{self.class.name} does not support decrement") + end + + # Cleanups the cache by removing expired entries. + # + # Options are passed to the underlying cache implementation. + # + # Some implementations may not support this method. + def cleanup(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support cleanup") + end + + # Clears the entire cache. Be careful with this method since it could + # affect other processes if shared cache is being used. + # + # The options hash is passed to the underlying cache implementation. + # + # Some implementations may not support this method. + def clear(options = nil) + raise NotImplementedError.new("#{self.class.name} does not support clear") + end + + private + # Adds the namespace defined in the options to a pattern designed to + # match keys. Implementations that support delete_matched should call + # this method to translate a pattern that matches names into one that + # matches namespaced keys. + def key_matcher(pattern, options) # :doc: + prefix = options[:namespace].is_a?(Proc) ? options[:namespace].call : options[:namespace] + if prefix + source = pattern.source + if source.start_with?("^") + source = source[1, source.length] + else + source = ".*#{source[0, source.length]}" + end + Regexp.new("^#{Regexp.escape(prefix)}:#{source}", pattern.options) + else + pattern + end + end + + # Reads an entry from the cache implementation. Subclasses must implement + # this method. + def read_entry(key, **options) + raise NotImplementedError.new + end + + # Writes an entry to the cache implementation. Subclasses must implement + # this method. + def write_entry(key, entry, **options) + raise NotImplementedError.new + end + + def serialize_entry(entry) + @coder.dump(entry) + end + + def deserialize_entry(payload) + payload.nil? ? nil : @coder.load(payload) + end + + # Reads multiple entries from the cache implementation. Subclasses MAY + # implement this method. + def read_multi_entries(names, **options) + names.each_with_object({}) do |name, results| + key = normalize_key(name, options) + entry = read_entry(key, **options) + + next unless entry + + version = normalize_version(name, options) + + if entry.expired? + delete_entry(key, **options) + elsif !entry.mismatched?(version) + results[name] = entry.value + end + end + end + + # Writes multiple entries to the cache implementation. Subclasses MAY + # implement this method. + def write_multi_entries(hash, **options) + hash.each do |key, entry| + write_entry key, entry, **options + end + end + + # Deletes an entry from the cache implementation. Subclasses must + # implement this method. + def delete_entry(key, **options) + raise NotImplementedError.new + end + + # Deletes multiples entries in the cache implementation. Subclasses MAY + # implement this method. + def delete_multi_entries(entries, **options) + entries.count { |key| delete_entry(key, **options) } + end + + # Merges the default options with ones specific to a method call. + def merged_options(call_options) + if call_options + if options.empty? + call_options + else + options.merge(call_options) + end + else + options + end + end + + # Expands and namespaces the cache key. May be overridden by + # cache stores to do additional normalization. + def normalize_key(key, options = nil) + namespace_key expanded_key(key), options + end + + # Prefix the key with a namespace string: + # + # namespace_key 'foo', namespace: 'cache' + # # => 'cache:foo' + # + # With a namespace block: + # + # namespace_key 'foo', namespace: -> { 'cache' } + # # => 'cache:foo' + def namespace_key(key, options = nil) + options = merged_options(options) + namespace = options[:namespace] + + if namespace.respond_to?(:call) + namespace = namespace.call + end + + if key && key.encoding != Encoding::UTF_8 + key = key.dup.force_encoding(Encoding::UTF_8) + end + + if namespace + "#{namespace}:#{key}" + else + key + end + end + + # Expands key to be a consistent string value. Invokes +cache_key+ if + # object responds to +cache_key+. Otherwise, +to_param+ method will be + # called. If the key is a Hash, then keys will be sorted alphabetically. + def expanded_key(key) + return key.cache_key.to_s if key.respond_to?(:cache_key) + + case key + when Array + if key.size > 1 + key.collect { |element| expanded_key(element) } + else + expanded_key(key.first) + end + when Hash + key.collect { |k, v| "#{k}=#{v}" }.sort! + else + key + end.to_param + end + + def normalize_version(key, options = nil) + (options && options[:version].try(:to_param)) || expanded_version(key) + end + + def expanded_version(key) + case + when key.respond_to?(:cache_version) then key.cache_version.to_param + when key.is_a?(Array) then key.map { |element| expanded_version(element) }.tap(&:compact!).to_param + when key.respond_to?(:to_a) then expanded_version(key.to_a) + end + end + + def instrument(operation, key, options = nil) + if logger && logger.debug? && !silence? + logger.debug "Cache #{operation}: #{normalize_key(key, options)}#{options.blank? ? "" : " (#{options.inspect})"}" + end + + payload = { key: key, store: self.class.name } + payload.merge!(options) if options.is_a?(Hash) + ActiveSupport::Notifications.instrument("cache_#{operation}.active_support", payload) { yield(payload) } + end + + def handle_expired_entry(entry, key, options) + if entry && entry.expired? + race_ttl = options[:race_condition_ttl].to_i + if (race_ttl > 0) && (Time.now.to_f - entry.expires_at <= race_ttl) + # When an entry has a positive :race_condition_ttl defined, put the stale entry back into the cache + # for a brief period while the entry is being recalculated. + entry.expires_at = Time.now + race_ttl + write_entry(key, entry, expires_in: race_ttl * 2) + else + delete_entry(key, **options) + end + entry = nil + end + entry + end + + def get_entry_value(entry, name, options) + instrument(:fetch_hit, name, options) { } + entry.value + end + + def save_block_result_to_cache(name, options) + result = instrument(:generate, name, options) do + yield(name) + end + + write(name, result, options) unless result.nil? && options[:skip_nil] + result + end + end + + module NullCoder # :nodoc: + class << self + def load(payload) + payload + end + + def dump(entry) + entry + end + end + end + + # This class is used to represent cache entries. Cache entries have a value, an optional + # expiration time, and an optional version. The expiration time is used to support the :race_condition_ttl option + # on the cache. The version is used to support the :version option on the cache for rejecting + # mismatches. + # + # Since cache entries in most instances will be serialized, the internals of this class are highly optimized + # using short instance variable names that are lazily defined. + class Entry # :nodoc: + attr_reader :version + + DEFAULT_COMPRESS_LIMIT = 1.kilobyte + + # Creates a new cache entry for the specified value. Options supported are + # +:compress+, +:compress_threshold+, +:version+ and +:expires_in+. + def initialize(value, compress: true, compress_threshold: DEFAULT_COMPRESS_LIMIT, version: nil, expires_in: nil, **) + @value = value + @version = version + @created_at = Time.now.to_f + @expires_in = expires_in && expires_in.to_f + + compress!(compress_threshold) if compress + end + + def value + compressed? ? uncompress(@value) : @value + end + + def mismatched?(version) + @version && version && @version != version + end + + # Checks if the entry is expired. The +expires_in+ parameter can override + # the value set when the entry was created. + def expired? + @expires_in && @created_at + @expires_in <= Time.now.to_f + end + + def expires_at + @expires_in ? @created_at + @expires_in : nil + end + + def expires_at=(value) + if value + @expires_in = value.to_f - @created_at + else + @expires_in = nil + end + end + + # Returns the size of the cached value. This could be less than + # value.bytesize if the data is compressed. + def bytesize + case value + when NilClass + 0 + when String + @value.bytesize + else + @s ||= Marshal.dump(@value).bytesize + end + end + + # Duplicates the value in a class. This is used by cache implementations that don't natively + # serialize entries to protect against accidental cache modifications. + def dup_value! + if @value && !compressed? && !(@value.is_a?(Numeric) || @value == true || @value == false) + if @value.is_a?(String) + @value = @value.dup + else + @value = Marshal.load(Marshal.dump(@value)) + end + end + end + + private + def compress!(compress_threshold) + case @value + when nil, true, false, Numeric + uncompressed_size = 0 + when String + uncompressed_size = @value.bytesize + else + serialized = Marshal.dump(@value) + uncompressed_size = serialized.bytesize + end + + if uncompressed_size >= compress_threshold + serialized ||= Marshal.dump(@value) + compressed = Zlib::Deflate.deflate(serialized) + + if compressed.bytesize < uncompressed_size + @value = compressed + @compressed = true + end + end + end + + def compressed? + defined?(@compressed) + end + + def uncompress(value) + Marshal.load(Zlib::Inflate.inflate(value)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/file_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/file_store.rb new file mode 100644 index 0000000..2ab632a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/file_store.rb @@ -0,0 +1,196 @@ +# frozen_string_literal: true + +require "active_support/core_ext/marshal" +require "active_support/core_ext/file/atomic" +require "active_support/core_ext/string/conversions" +require "uri/common" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything on the filesystem. + # + # FileStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class FileStore < Store + prepend Strategy::LocalCache + attr_reader :cache_path + + DIR_FORMATTER = "%03X" + FILENAME_MAX_SIZE = 226 # max filename size on file system is 255, minus room for timestamp, pid, and random characters appended by Tempfile (used by atomic write) + FILEPATH_MAX_SIZE = 900 # max is 1024, plus some room + GITKEEP_FILES = [".gitkeep", ".keep"].freeze + + def initialize(cache_path, **options) + super(options) + @cache_path = cache_path.to_s + end + + # Advertise cache versioning support. + def self.supports_cache_versioning? + true + end + + # Deletes all items from the cache. In this case it deletes all the entries in the specified + # file store directory except for .keep or .gitkeep. Be careful which directory is specified in your + # config file when using +FileStore+ because everything in that directory will be deleted. + def clear(options = nil) + root_dirs = (Dir.children(cache_path) - GITKEEP_FILES) + FileUtils.rm_r(root_dirs.collect { |f| File.join(cache_path, f) }) + rescue Errno::ENOENT, Errno::ENOTEMPTY + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + search_dir(cache_path) do |fname| + entry = read_entry(fname, **options) + delete_entry(fname, **options) if entry && entry.expired? + end + end + + # Increments an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrements an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + search_dir(cache_path) do |path| + key = file_path_key(path) + delete_entry(path, **options) if key.match(matcher) + end + end + end + + private + def read_entry(key, **options) + if File.exist?(key) + entry = File.open(key) { |f| deserialize_entry(f.read) } + entry if entry.is_a?(Cache::Entry) + end + rescue => e + logger.error("FileStoreError (#{e}): #{e.message}") if logger + nil + end + + def write_entry(key, entry, **options) + return false if options[:unless_exist] && File.exist?(key) + ensure_cache_path(File.dirname(key)) + File.atomic_write(key, cache_path) { |f| f.write(serialize_entry(entry)) } + true + end + + def delete_entry(key, **options) + if File.exist?(key) + begin + File.delete(key) + delete_empty_directories(File.dirname(key)) + true + rescue => e + # Just in case the error was caused by another process deleting the file first. + raise e if File.exist?(key) + false + end + end + end + + # Lock a file for a block so only one process can modify it at a time. + def lock_file(file_name, &block) + if File.exist?(file_name) + File.open(file_name, "r+") do |f| + f.flock File::LOCK_EX + yield + ensure + f.flock File::LOCK_UN + end + else + yield + end + end + + # Translate a key into a file path. + def normalize_key(key, options) + key = super + fname = URI.encode_www_form_component(key) + + if fname.size > FILEPATH_MAX_SIZE + fname = ActiveSupport::Digest.hexdigest(key) + end + + hash = Zlib.adler32(fname) + hash, dir_1 = hash.divmod(0x1000) + dir_2 = hash.modulo(0x1000) + + # Make sure file name doesn't exceed file system limits. + if fname.length < FILENAME_MAX_SIZE + fname_paths = fname + else + fname_paths = [] + begin + fname_paths << fname[0, FILENAME_MAX_SIZE] + fname = fname[FILENAME_MAX_SIZE..-1] + end until fname.blank? + end + + File.join(cache_path, DIR_FORMATTER % dir_1, DIR_FORMATTER % dir_2, fname_paths) + end + + # Translate a file path into a key. + def file_path_key(path) + fname = path[cache_path.to_s.size..-1].split(File::SEPARATOR, 4).last + URI.decode_www_form_component(fname, Encoding::UTF_8) + end + + # Delete empty directories in the cache. + def delete_empty_directories(dir) + return if File.realpath(dir) == File.realpath(cache_path) + if Dir.children(dir).empty? + Dir.delete(dir) rescue nil + delete_empty_directories(File.dirname(dir)) + end + end + + # Make sure a file path's directories exist. + def ensure_cache_path(path) + FileUtils.makedirs(path) unless File.exist?(path) + end + + def search_dir(dir, &callback) + return if !File.exist?(dir) + Dir.each_child(dir) do |d| + name = File.join(dir, d) + if File.directory?(name) + search_dir(name, &callback) + else + callback.call name + end + end + end + + # Modifies the amount of an already existing integer value that is stored in the cache. + # If the key is not found nothing is done. + def modify_value(name, amount, options) + file_name = normalize_key(name, options) + + lock_file(file_name) do + options = merged_options(options) + + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/mem_cache_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/mem_cache_store.rb new file mode 100644 index 0000000..bb04a94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/mem_cache_store.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +begin + require "dalli" +rescue LoadError => e + $stderr.puts "You don't have dalli installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end + +require "active_support/core_ext/enumerable" +require "active_support/core_ext/marshal" +require "active_support/core_ext/array/extract_options" + +module ActiveSupport + module Cache + # A cache store implementation which stores data in Memcached: + # https://memcached.org + # + # This is currently the most popular cache store for production websites. + # + # Special features: + # - Clustering and load balancing. One can specify multiple memcached servers, + # and MemCacheStore will load balance between all available servers. If a + # server goes down, then MemCacheStore will ignore it until it comes back up. + # + # MemCacheStore implements the Strategy::LocalCache strategy which implements + # an in-memory cache inside of a block. + class MemCacheStore < Store + DEFAULT_CODER = NullCoder # Dalli automatically Marshal values + + # Provide support for raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, **options) + if options[:raw] && local_cache + raw_entry = Entry.new(entry.value.to_s) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, **options) + else + super + end + end + end + + # Advertise cache versioning support. + def self.supports_cache_versioning? + true + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + ESCAPE_KEY_CHARS = /[\x00-\x20%\x7F-\xFF]/n + + # Creates a new Dalli::Client instance with specified addresses and options. + # If no addresses are provided, we give nil to Dalli::Client, so it uses its fallbacks: + # - ENV["MEMCACHE_SERVERS"] (if defined) + # - "127.0.0.1:11211" (otherwise) + # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache + # # => # + # ActiveSupport::Cache::MemCacheStore.build_mem_cache('localhost:10290') + # # => # + def self.build_mem_cache(*addresses) # :nodoc: + addresses = addresses.flatten + options = addresses.extract_options! + addresses = nil if addresses.compact.empty? + pool_options = retrieve_pool_options(options) + + if pool_options.empty? + Dalli::Client.new(addresses, options) + else + ensure_connection_pool_added! + ConnectionPool.new(pool_options) { Dalli::Client.new(addresses, options.merge(threadsafe: false)) } + end + end + + # Creates a new MemCacheStore object, with the given memcached server + # addresses. Each address is either a host name, or a host-with-port string + # in the form of "host_name:port". For example: + # + # ActiveSupport::Cache::MemCacheStore.new("localhost", "server-downstairs.localnetwork:8229") + # + # If no addresses are provided, but ENV['MEMCACHE_SERVERS'] is defined, it will be used instead. Otherwise, + # MemCacheStore will connect to localhost:11211 (the default memcached port). + def initialize(*addresses) + addresses = addresses.flatten + options = addresses.extract_options! + super(options) + + unless [String, Dalli::Client, NilClass].include?(addresses.first.class) + raise ArgumentError, "First argument must be an empty array, an array of hosts or a Dalli::Client instance." + end + if addresses.first.is_a?(Dalli::Client) + @data = addresses.first + else + mem_cache_options = options.dup + UNIVERSAL_OPTIONS.each { |name| mem_cache_options.delete(name) } + @data = self.class.build_mem_cache(*(addresses + [mem_cache_options])) + end + end + + # Increment a cached value. This method uses the memcached incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def increment(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:increment, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.incr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Decrement a cached value. This method uses the memcached decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + def decrement(name, amount = 1, options = nil) + options = merged_options(options) + instrument(:decrement, name, amount: amount) do + rescue_error_with nil do + @data.with { |c| c.decr(normalize_key(name, options), amount, options[:expires_in]) } + end + end + end + + # Clear the entire cache on all memcached servers. This method should + # be used with care when shared cache is being used. + def clear(options = nil) + rescue_error_with(nil) { @data.with { |c| c.flush_all } } + end + + # Get the statistics from the memcached servers. + def stats + @data.with { |c| c.stats } + end + + private + # Read an entry from the cache. + def read_entry(key, **options) + rescue_error_with(nil) { deserialize_entry(@data.with { |c| c.get(key, options) }) } + end + + # Write an entry to the cache. + def write_entry(key, entry, **options) + method = options[:unless_exist] ? :add : :set + value = options[:raw] ? entry.value.to_s : serialize_entry(entry) + expires_in = options[:expires_in].to_i + if options[:race_condition_ttl] && expires_in > 0 && !options[:raw] + # Set the memcache expire a few minutes in the future to support race condition ttls on read + expires_in += 5.minutes + end + rescue_error_with false do + # The value "compress: false" prevents duplicate compression within Dalli. + @data.with { |c| c.send(method, key, value, expires_in, **options, compress: false) } + end + end + + # Reads multiple entries from the cache implementation. + def read_multi_entries(names, **options) + keys_to_names = names.index_by { |name| normalize_key(name, options) } + + raw_values = @data.with { |c| c.get_multi(keys_to_names.keys) } + values = {} + + raw_values.each do |key, value| + entry = deserialize_entry(value) + + unless entry.expired? || entry.mismatched?(normalize_version(keys_to_names[key], options)) + values[keys_to_names[key]] = entry.value + end + end + + values + end + + # Delete an entry from the cache. + def delete_entry(key, **options) + rescue_error_with(false) { @data.with { |c| c.delete(key) } } + end + + # Memcache keys are binaries. So we need to force their encoding to binary + # before applying the regular expression to ensure we are escaping all + # characters properly. + def normalize_key(key, options) + key = super + + if key + key = key.dup.force_encoding(Encoding::ASCII_8BIT) + key = key.gsub(ESCAPE_KEY_CHARS) { |match| "%#{match.getbyte(0).to_s(16).upcase}" } + key = "#{key[0, 213]}:md5:#{ActiveSupport::Digest.hexdigest(key)}" if key.size > 250 + end + + key + end + + def deserialize_entry(payload) + entry = super + entry = Entry.new(entry, compress: false) unless entry.nil? || entry.is_a?(Entry) + entry + end + + def rescue_error_with(fallback) + yield + rescue Dalli::DalliError => e + logger.error("DalliError (#{e}): #{e.message}") if logger + fallback + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/memory_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/memory_store.rb new file mode 100644 index 0000000..4bb8993 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/memory_store.rb @@ -0,0 +1,195 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Cache + # A cache store implementation which stores everything into memory in the + # same process. If you're running multiple Ruby on Rails server processes + # (which is the case if you're using Phusion Passenger or puma clustered mode), + # then this means that Rails server process instances won't be able + # to share cache data with each other and this may not be the most + # appropriate cache in that scenario. + # + # This cache has a bounded size specified by the :size options to the + # initializer (default is 32Mb). When the cache exceeds the allotted size, + # a cleanup will occur which tries to prune the cache down to three quarters + # of the maximum size by removing the least recently used entries. + # + # Unlike other Cache store implementations, MemoryStore does not compress + # values by default. MemoryStore does not benefit from compression as much + # as other Store implementations, as it does not send data over a network. + # However, when compression is enabled, it still pays the full cost of + # compression in terms of cpu use. + # + # MemoryStore is thread-safe. + class MemoryStore < Store + module DupCoder # :nodoc: + class << self + def load(entry) + entry = entry.dup + entry.dup_value! + entry + end + + def dump(entry) + entry.dup_value! + entry + end + end + end + + DEFAULT_CODER = DupCoder + + def initialize(options = nil) + options ||= {} + # Disable compression by default. + options[:compress] ||= false + super(options) + @data = {} + @max_size = options[:size] || 32.megabytes + @max_prune_time = options[:max_prune_time] || 2 + @cache_size = 0 + @monitor = Monitor.new + @pruning = false + end + + # Advertise cache versioning support. + def self.supports_cache_versioning? + true + end + + # Delete all data stored in a given cache store. + def clear(options = nil) + synchronize do + @data.clear + @cache_size = 0 + end + end + + # Preemptively iterates through all stored keys and removes the ones which have expired. + def cleanup(options = nil) + options = merged_options(options) + instrument(:cleanup, size: @data.size) do + keys = synchronize { @data.keys } + keys.each do |key| + entry = @data[key] + delete_entry(key, **options) if entry && entry.expired? + end + end + end + + # To ensure entries fit within the specified memory prune the cache by removing the least + # recently accessed entries. + def prune(target_size, max_time = nil) + return if pruning? + @pruning = true + begin + start_time = Concurrent.monotonic_time + cleanup + instrument(:prune, target_size, from: @cache_size) do + keys = synchronize { @data.keys } + keys.each do |key| + delete_entry(key, **options) + return if @cache_size <= target_size || (max_time && Concurrent.monotonic_time - start_time > max_time) + end + end + ensure + @pruning = false + end + end + + # Returns true if the cache is currently being pruned. + def pruning? + @pruning + end + + # Increment an integer value in the cache. + def increment(name, amount = 1, options = nil) + modify_value(name, amount, options) + end + + # Decrement an integer value in the cache. + def decrement(name, amount = 1, options = nil) + modify_value(name, -amount, options) + end + + # Deletes cache entries if the cache key matches a given pattern. + def delete_matched(matcher, options = nil) + options = merged_options(options) + instrument(:delete_matched, matcher.inspect) do + matcher = key_matcher(matcher, options) + keys = synchronize { @data.keys } + keys.each do |key| + delete_entry(key, **options) if key.match(matcher) + end + end + end + + def inspect # :nodoc: + "#<#{self.class.name} entries=#{@data.size}, size=#{@cache_size}, options=#{@options.inspect}>" + end + + # Synchronize calls to the cache. This should be called wherever the underlying cache implementation + # is not thread safe. + def synchronize(&block) # :nodoc: + @monitor.synchronize(&block) + end + + private + PER_ENTRY_OVERHEAD = 240 + + def cached_size(key, payload) + key.to_s.bytesize + payload.bytesize + PER_ENTRY_OVERHEAD + end + + def read_entry(key, **options) + entry = nil + synchronize do + payload = @data.delete(key) + if payload + @data[key] = payload + entry = deserialize_entry(payload) + end + end + entry + end + + def write_entry(key, entry, **options) + payload = serialize_entry(entry) + synchronize do + return false if options[:unless_exist] && @data.key?(key) + + old_payload = @data[key] + if old_payload + @cache_size -= (old_payload.bytesize - payload.bytesize) + else + @cache_size += cached_size(key, payload) + end + @data[key] = payload + prune(@max_size * 0.75, @max_prune_time) if @cache_size > @max_size + true + end + end + + def delete_entry(key, **options) + synchronize do + payload = @data.delete(key) + @cache_size -= cached_size(key, payload) if payload + !!payload + end + end + + def modify_value(name, amount, options) + options = merged_options(options) + synchronize do + if num = read(name, options) + num = num.to_i + amount + write(name, num, options) + num + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/null_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/null_store.rb new file mode 100644 index 0000000..5a6b97b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/null_store.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module ActiveSupport + module Cache + # A cache store implementation which doesn't actually store anything. Useful in + # development and test environments where you don't want caching turned on but + # need to go through the caching interface. + # + # This cache does implement the local cache strategy, so values will actually + # be cached inside blocks that utilize this strategy. See + # ActiveSupport::Cache::Strategy::LocalCache for more details. + class NullStore < Store + prepend Strategy::LocalCache + + # Advertise cache versioning support. + def self.supports_cache_versioning? + true + end + + def clear(options = nil) + end + + def cleanup(options = nil) + end + + def increment(name, amount = 1, options = nil) + end + + def decrement(name, amount = 1, options = nil) + end + + def delete_matched(matcher, options = nil) + end + + private + def read_entry(key, **options) + end + + def write_entry(key, entry, **options) + true + end + + def delete_entry(key, **options) + false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/redis_cache_store.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/redis_cache_store.rb new file mode 100644 index 0000000..1afecb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/redis_cache_store.rb @@ -0,0 +1,493 @@ +# frozen_string_literal: true + +begin + gem "redis", ">= 4.0.1" + require "redis" + require "redis/distributed" +rescue LoadError + warn "The Redis cache store requires the redis gem, version 4.0.1 or later. Please add it to your Gemfile: `gem \"redis\", \"~> 4.0\"`" + raise +end + +# Prefer the hiredis driver but don't require it. +begin + require "redis/connection/hiredis" +rescue LoadError +end + +require "digest/sha2" +require "active_support/core_ext/marshal" + +module ActiveSupport + module Cache + module ConnectionPoolLike + def with + yield self + end + end + + ::Redis.include(ConnectionPoolLike) + ::Redis::Distributed.include(ConnectionPoolLike) + + # Redis cache store. + # + # Deployment note: Take care to use a *dedicated Redis cache* rather + # than pointing this at your existing Redis server. It won't cope well + # with mixed usage patterns and it won't expire cache entries by default. + # + # Redis cache server setup guide: https://redis.io/topics/lru-cache + # + # * Supports vanilla Redis, hiredis, and Redis::Distributed. + # * Supports Memcached-like sharding across Redises with Redis::Distributed. + # * Fault tolerant. If the Redis server is unavailable, no exceptions are + # raised. Cache fetches are all misses and writes are dropped. + # * Local cache. Hot in-memory primary cache within block/middleware scope. + # * +read_multi+ and +write_multi+ support for Redis mget/mset. Use Redis::Distributed + # 4.0.1+ for distributed mget support. + # * +delete_matched+ support for Redis KEYS globs. + class RedisCacheStore < Store + # Keys are truncated with their own SHA2 digest if they exceed 1kB + MAX_KEY_BYTESIZE = 1024 + + DEFAULT_REDIS_OPTIONS = { + connect_timeout: 20, + read_timeout: 1, + write_timeout: 1, + reconnect_attempts: 0, + } + + DEFAULT_ERROR_HANDLER = -> (method:, returning:, exception:) do + if logger + logger.error { "RedisCacheStore: #{method} failed, returned #{returning.inspect}: #{exception.class}: #{exception.message}" } + end + end + + # The maximum number of entries to receive per SCAN call. + SCAN_BATCH_SIZE = 1000 + private_constant :SCAN_BATCH_SIZE + + # Advertise cache versioning support. + def self.supports_cache_versioning? + true + end + + # Support raw values in the local cache strategy. + module LocalCacheWithRaw # :nodoc: + private + def write_entry(key, entry, **options) + if options[:raw] && local_cache + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + super(key, raw_entry, **options) + else + super + end + end + + def write_multi_entries(entries, **options) + if options[:raw] && local_cache + raw_entries = entries.map do |key, entry| + raw_entry = Entry.new(serialize_entry(entry, raw: true)) + raw_entry.expires_at = entry.expires_at + end.to_h + + super(raw_entries, **options) + else + super + end + end + end + + prepend Strategy::LocalCache + prepend LocalCacheWithRaw + + class << self + # Factory method to create a new Redis instance. + # + # Handles four options: :redis block, :redis instance, single :url + # string, and multiple :url strings. + # + # Option Class Result + # :redis Proc -> options[:redis].call + # :redis Object -> options[:redis] + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + def build_redis(redis: nil, url: nil, **redis_options) #:nodoc: + urls = Array(url) + + if redis.is_a?(Proc) + redis.call + elsif redis + redis + elsif urls.size > 1 + build_redis_distributed_client urls: urls, **redis_options + else + build_redis_client url: urls.first, **redis_options + end + end + + private + def build_redis_distributed_client(urls:, **redis_options) + ::Redis::Distributed.new([], DEFAULT_REDIS_OPTIONS.merge(redis_options)).tap do |dist| + urls.each { |u| dist.add_node url: u } + end + end + + def build_redis_client(url:, **redis_options) + ::Redis.new DEFAULT_REDIS_OPTIONS.merge(redis_options.merge(url: url)) + end + end + + attr_reader :redis_options + attr_reader :max_key_bytesize + + # Creates a new Redis cache store. + # + # Handles four options: :redis block, :redis instance, single :url + # string, and multiple :url strings. + # + # Option Class Result + # :redis Proc -> options[:redis].call + # :redis Object -> options[:redis] + # :url String -> Redis.new(url: …) + # :url Array -> Redis::Distributed.new([{ url: … }, { url: … }, …]) + # + # No namespace is set by default. Provide one if the Redis cache + # server is shared with other apps: namespace: 'myapp-cache'. + # + # Compression is enabled by default with a 1kB threshold, so cached + # values larger than 1kB are automatically compressed. Disable by + # passing compress: false or change the threshold by passing + # compress_threshold: 4.kilobytes. + # + # No expiry is set on cache entries by default. Redis is expected to + # be configured with an eviction policy that automatically deletes + # least-recently or -frequently used keys when it reaches max memory. + # See https://redis.io/topics/lru-cache for cache server setup. + # + # Race condition TTL is not set by default. This can be used to avoid + # "thundering herd" cache writes when hot cache entries are expired. + # See ActiveSupport::Cache::Store#fetch for more. + def initialize(namespace: nil, compress: true, compress_threshold: 1.kilobyte, coder: DEFAULT_CODER, expires_in: nil, race_condition_ttl: nil, error_handler: DEFAULT_ERROR_HANDLER, **redis_options) + @redis_options = redis_options + + @max_key_bytesize = MAX_KEY_BYTESIZE + @error_handler = error_handler + + super namespace: namespace, + compress: compress, compress_threshold: compress_threshold, + expires_in: expires_in, race_condition_ttl: race_condition_ttl, + coder: coder + end + + def redis + @redis ||= begin + pool_options = self.class.send(:retrieve_pool_options, redis_options) + + if pool_options.any? + self.class.send(:ensure_connection_pool_added!) + ::ConnectionPool.new(pool_options) { self.class.build_redis(**redis_options) } + else + self.class.build_redis(**redis_options) + end + end + end + + def inspect + instance = @redis || @redis_options + "#<#{self.class} options=#{options.inspect} redis=#{instance.inspect}>" + end + + # Cache Store API implementation. + # + # Read multiple values at once. Returns a hash of requested keys -> + # fetched values. + def read_multi(*names) + if mget_capable? + instrument(:read_multi, names, options) do |payload| + read_multi_mget(*names).tap do |results| + payload[:hits] = results.keys + end + end + else + super + end + end + + # Cache Store API implementation. + # + # Supports Redis KEYS glob patterns: + # + # h?llo matches hello, hallo and hxllo + # h*llo matches hllo and heeeello + # h[ae]llo matches hello and hallo, but not hillo + # h[^e]llo matches hallo, hbllo, ... but not hello + # h[a-b]llo matches hallo and hbllo + # + # Use \ to escape special characters if you want to match them verbatim. + # + # See https://redis.io/commands/KEYS for more. + # + # Failsafe: Raises errors. + def delete_matched(matcher, options = nil) + instrument :delete_matched, matcher do + unless String === matcher + raise ArgumentError, "Only Redis glob strings are supported: #{matcher.inspect}" + end + redis.with do |c| + pattern = namespace_key(matcher, options) + cursor = "0" + # Fetch keys in batches using SCAN to avoid blocking the Redis server. + nodes = c.respond_to?(:nodes) ? c.nodes : [c] + + nodes.each do |node| + begin + cursor, keys = node.scan(cursor, match: pattern, count: SCAN_BATCH_SIZE) + node.del(*keys) unless keys.empty? + end until cursor == "0" + end + end + end + end + + # Cache Store API implementation. + # + # Increment a cached value. This method uses the Redis incr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def increment(name, amount = 1, options = nil) + instrument :increment, name, amount: amount do + failsafe :increment do + options = merged_options(options) + key = normalize_key(name, options) + + redis.with do |c| + c.incrby(key, amount).tap do + write_key_expiry(c, key, options) + end + end + end + end + end + + # Cache Store API implementation. + # + # Decrement a cached value. This method uses the Redis decr atomic + # operator and can only be used on values written with the :raw option. + # Calling it on a value not stored with :raw will initialize that value + # to zero. + # + # Failsafe: Raises errors. + def decrement(name, amount = 1, options = nil) + instrument :decrement, name, amount: amount do + failsafe :decrement do + options = merged_options(options) + key = normalize_key(name, options) + + redis.with do |c| + c.decrby(key, amount).tap do + write_key_expiry(c, key, options) + end + end + end + end + end + + # Cache Store API implementation. + # + # Removes expired entries. Handled natively by Redis least-recently-/ + # least-frequently-used expiry, so manual cleanup is not supported. + def cleanup(options = nil) + super + end + + # Clear the entire cache on all Redis servers. Safe to use on + # shared servers if the cache is namespaced. + # + # Failsafe: Raises errors. + def clear(options = nil) + failsafe :clear do + if namespace = merged_options(options)[:namespace] + delete_matched "*", namespace: namespace + else + redis.with { |c| c.flushdb } + end + end + end + + def mget_capable? #:nodoc: + set_redis_capabilities unless defined? @mget_capable + @mget_capable + end + + def mset_capable? #:nodoc: + set_redis_capabilities unless defined? @mset_capable + @mset_capable + end + + private + def set_redis_capabilities + case redis + when Redis::Distributed + @mget_capable = true + @mset_capable = false + else + @mget_capable = true + @mset_capable = true + end + end + + # Store provider interface: + # Read an entry from the cache. + def read_entry(key, **options) + failsafe :read_entry do + raw = options&.fetch(:raw, false) + deserialize_entry(redis.with { |c| c.get(key) }, raw: raw) + end + end + + def read_multi_entries(names, **options) + if mget_capable? + read_multi_mget(*names, **options) + else + super + end + end + + def read_multi_mget(*names) + options = names.extract_options! + options = merged_options(options) + return {} if names == [] + raw = options&.fetch(:raw, false) + + keys = names.map { |name| normalize_key(name, options) } + + values = failsafe(:read_multi_mget, returning: {}) do + redis.with { |c| c.mget(*keys) } + end + + names.zip(values).each_with_object({}) do |(name, value), results| + if value + entry = deserialize_entry(value, raw: raw) + unless entry.nil? || entry.expired? || entry.mismatched?(normalize_version(name, options)) + results[name] = entry.value + end + end + end + end + + # Write an entry to the cache. + # + # Requires Redis 2.6.12+ for extended SET options. + def write_entry(key, entry, unless_exist: false, raw: false, expires_in: nil, race_condition_ttl: nil, **options) + serialized_entry = serialize_entry(entry, raw: raw) + + # If race condition TTL is in use, ensure that cache entries + # stick around a bit longer after they would have expired + # so we can purposefully serve stale entries. + if race_condition_ttl && expires_in && expires_in > 0 && !raw + expires_in += 5.minutes + end + + failsafe :write_entry, returning: false do + if unless_exist || expires_in + modifiers = {} + modifiers[:nx] = unless_exist + modifiers[:px] = (1000 * expires_in.to_f).ceil if expires_in + + redis.with { |c| c.set key, serialized_entry, **modifiers } + else + redis.with { |c| c.set key, serialized_entry } + end + end + end + + def write_key_expiry(client, key, options) + if options[:expires_in] && client.ttl(key).negative? + client.expire key, options[:expires_in].to_i + end + end + + # Delete an entry from the cache. + def delete_entry(key, options) + failsafe :delete_entry, returning: false do + redis.with { |c| c.del key } + end + end + + # Deletes multiple entries in the cache. Returns the number of entries deleted. + def delete_multi_entries(entries, **_options) + redis.with { |c| c.del(entries) } + end + + # Nonstandard store provider API to write multiple values at once. + def write_multi_entries(entries, expires_in: nil, **options) + if entries.any? + if mset_capable? && expires_in.nil? + failsafe :write_multi_entries do + redis.with { |c| c.mapped_mset(serialize_entries(entries, raw: options[:raw])) } + end + else + super + end + end + end + + # Truncate keys that exceed 1kB. + def normalize_key(key, options) + truncate_key super&.b + end + + def truncate_key(key) + if key && key.bytesize > max_key_bytesize + suffix = ":sha2:#{::Digest::SHA2.hexdigest(key)}" + truncate_at = max_key_bytesize - suffix.bytesize + "#{key.byteslice(0, truncate_at)}#{suffix}" + else + key + end + end + + def deserialize_entry(payload, raw:) + if payload && raw + Entry.new(payload, compress: false) + else + super(payload) + end + end + + def serialize_entry(entry, raw: false) + if raw + entry.value.to_s + else + super(entry) + end + end + + def serialize_entries(entries, raw: false) + entries.transform_values do |entry| + serialize_entry entry, raw: raw + end + end + + def failsafe(method, returning: nil) + yield + rescue ::Redis::BaseError => e + handle_exception exception: e, method: method, returning: returning + returning + end + + def handle_exception(exception:, method:, returning:) + if @error_handler + @error_handler.(method: method, exception: exception, returning: returning) + end + rescue => failsafe + warn "RedisCacheStore ignored exception in handle_exception: #{failsafe.class}: #{failsafe.message}\n #{failsafe.backtrace.join("\n ")}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache.rb new file mode 100644 index 0000000..96b4e1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/inflections" +require "active_support/per_thread_registry" + +module ActiveSupport + module Cache + module Strategy + # Caches that implement LocalCache will be backed by an in-memory cache for the + # duration of a block. Repeated calls to the cache for the same key will hit the + # in-memory cache for faster access. + module LocalCache + autoload :Middleware, "active_support/cache/strategy/local_cache_middleware" + + # Class for storing and registering the local caches. + class LocalCacheRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def cache_for(local_cache_key) + @registry[local_cache_key] + end + + def set_cache_for(local_cache_key, value) + @registry[local_cache_key] = value + end + + def self.set_cache_for(l, v); instance.set_cache_for l, v; end + def self.cache_for(l); instance.cache_for l; end + end + + # Simple memory backed cache. This cache is not thread safe and is intended only + # for serving as a temporary memory cache for a single thread. + class LocalStore < Store + def initialize + super + @data = {} + end + + # Don't allow synchronizing since it isn't thread safe. + def synchronize # :nodoc: + yield + end + + def clear(options = nil) + @data.clear + end + + def read_entry(key, **options) + @data[key] + end + + def read_multi_entries(keys, **options) + values = {} + + keys.each do |name| + entry = read_entry(name, **options) + values[name] = entry.value if entry + end + + values + end + + def write_entry(key, entry, **options) + entry.dup_value! + @data[key] = entry + true + end + + def delete_entry(key, **options) + !!@data.delete(key) + end + + def fetch_entry(key, options = nil) # :nodoc: + entry = @data.fetch(key) { @data[key] = yield } + dup_entry = entry.dup + dup_entry&.dup_value! + dup_entry + end + end + + # Use a local cache for the duration of block. + def with_local_cache + use_temporary_local_cache(LocalStore.new) { yield } + end + + # Middleware class can be inserted as a Rack handler to be local cache for the + # duration of request. + def middleware + @middleware ||= Middleware.new( + "ActiveSupport::Cache::Strategy::LocalCache", + local_cache_key) + end + + def clear(**options) # :nodoc: + return super unless cache = local_cache + cache.clear(options) + super + end + + def cleanup(**options) # :nodoc: + return super unless cache = local_cache + cache.clear + super + end + + def delete_matched(matcher, options = nil) # :nodoc: + return super unless cache = local_cache + cache.clear + super + end + + def increment(name, amount = 1, **options) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, **options) + value + end + + def decrement(name, amount = 1, **options) # :nodoc: + return super unless local_cache + value = bypass_local_cache { super } + write_cache_value(name, value, **options) + value + end + + private + def read_entry(key, **options) + if cache = local_cache + hit = true + value = cache.fetch_entry(key) do + hit = false + super + end + options[:event][:store] = cache.class.name if hit && options[:event] + value + else + super + end + end + + def read_multi_entries(keys, **options) + return super unless local_cache + + local_entries = local_cache.read_multi_entries(keys, **options) + missed_keys = keys - local_entries.keys + + if missed_keys.any? + local_entries.merge!(super(missed_keys, **options)) + else + local_entries + end + end + + def write_entry(key, entry, **options) + if options[:unless_exist] + local_cache.delete_entry(key, **options) if local_cache + else + local_cache.write_entry(key, entry, **options) if local_cache + end + + super + end + + def delete_entry(key, **options) + local_cache.delete_entry(key, **options) if local_cache + super + end + + def write_cache_value(name, value, **options) + name = normalize_key(name, options) + cache = local_cache + cache.mute do + if value + cache.write(name, value, options) + else + cache.delete(name, **options) + end + end + end + + def local_cache_key + @local_cache_key ||= "#{self.class.name.underscore}_local_cache_#{object_id}".gsub(/[\/-]/, "_").to_sym + end + + def local_cache + LocalCacheRegistry.cache_for(local_cache_key) + end + + def bypass_local_cache + use_temporary_local_cache(nil) { yield } + end + + def use_temporary_local_cache(temporary_cache) + save_cache = LocalCacheRegistry.cache_for(local_cache_key) + begin + LocalCacheRegistry.set_cache_for(local_cache_key, temporary_cache) + yield + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, save_cache) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache_middleware.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache_middleware.rb new file mode 100644 index 0000000..62542bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/cache/strategy/local_cache_middleware.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "rack/body_proxy" +require "rack/utils" + +module ActiveSupport + module Cache + module Strategy + module LocalCache + #-- + # This class wraps up local storage for middlewares. Only the middleware method should + # construct them. + class Middleware # :nodoc: + attr_reader :name, :local_cache_key + + def initialize(name, local_cache_key) + @name = name + @local_cache_key = local_cache_key + @app = nil + end + + def new(app) + @app = app + self + end + + def call(env) + LocalCacheRegistry.set_cache_for(local_cache_key, LocalStore.new) + response = @app.call(env) + response[2] = ::Rack::BodyProxy.new(response[2]) do + LocalCacheRegistry.set_cache_for(local_cache_key, nil) + end + cleanup_on_body_close = true + response + rescue Rack::Utils::InvalidParameterError + [400, {}, []] + ensure + LocalCacheRegistry.set_cache_for(local_cache_key, nil) unless + cleanup_on_body_close + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/callbacks.rb new file mode 100644 index 0000000..1a55f8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/callbacks.rb @@ -0,0 +1,862 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/descendants_tracker" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/string/filters" +require "thread" + +module ActiveSupport + # Callbacks are code hooks that are run at key points in an object's life cycle. + # The typical use case is to have a base class define a set of callbacks + # relevant to the other functionality it supplies, so that subclasses can + # install callbacks that enhance or modify the base functionality without + # needing to override or redefine methods of the base class. + # + # Mixing in this module allows you to define the events in the object's + # life cycle that will support callbacks (via +ClassMethods.define_callbacks+), + # set the instance methods, procs, or callback objects to be called (via + # +ClassMethods.set_callback+), and run the installed callbacks at the + # appropriate times (via +run_callbacks+). + # + # By default callbacks are halted by throwing +:abort+. + # See +ClassMethods.define_callbacks+ for details. + # + # Three kinds of callbacks are supported: before callbacks, run before a + # certain event; after callbacks, run after the event; and around callbacks, + # blocks that surround the event, triggering it when they yield. Callback code + # can be contained in instance methods, procs or lambdas, or callback objects + # that respond to certain predetermined methods. See +ClassMethods.set_callback+ + # for details. + # + # class Record + # include ActiveSupport::Callbacks + # define_callbacks :save + # + # def save + # run_callbacks :save do + # puts "- save" + # end + # end + # end + # + # class PersonRecord < Record + # set_callback :save, :before, :saving_message + # def saving_message + # puts "saving..." + # end + # + # set_callback :save, :after do |object| + # puts "saved" + # end + # end + # + # person = PersonRecord.new + # person.save + # + # Output: + # saving... + # - save + # saved + module Callbacks + extend Concern + + included do + extend ActiveSupport::DescendantsTracker + class_attribute :__callbacks, instance_writer: false, default: {} + end + + CALLBACK_FILTER_TYPES = [:before, :after, :around] + + # Runs the callbacks for the given event. + # + # Calls the before and around callbacks in the order they were set, yields + # the block (if given one), and then runs the after callbacks in reverse + # order. + # + # If the callback chain was halted, returns +false+. Otherwise returns the + # result of the block, +nil+ if no callbacks have been set, or +true+ + # if callbacks have been set but no block is given. + # + # run_callbacks :save do + # save + # end + # + #-- + # + # As this method is used in many places, and often wraps large portions of + # user code, it has an additional design goal of minimizing its impact on + # the visible call stack. An exception from inside a :before or :after + # callback can be as noisy as it likes -- but when control has passed + # smoothly through and into the supplied block, we want as little evidence + # as possible that we were here. + def run_callbacks(kind) + callbacks = __callbacks[kind.to_sym] + + if callbacks.empty? + yield if block_given? + else + env = Filters::Environment.new(self, false, nil) + next_sequence = callbacks.compile + + # Common case: no 'around' callbacks defined + if next_sequence.final? + next_sequence.invoke_before(env) + env.value = !env.halted && (!block_given? || yield) + next_sequence.invoke_after(env) + env.value + else + invoke_sequence = Proc.new do + skipped = nil + + while true + current = next_sequence + current.invoke_before(env) + if current.final? + env.value = !env.halted && (!block_given? || yield) + elsif current.skip?(env) + (skipped ||= []) << current + next_sequence = next_sequence.nested + next + else + next_sequence = next_sequence.nested + begin + target, block, method, *arguments = current.expand_call_template(env, invoke_sequence) + target.send(method, *arguments, &block) + ensure + next_sequence = current + end + end + current.invoke_after(env) + skipped.pop.invoke_after(env) while skipped&.first + break env.value + end + end + + invoke_sequence.call + end + end + end + + private + # A hook invoked every time a before callback is halted. + # This can be overridden in ActiveSupport::Callbacks implementors in order + # to provide better debugging/logging. + def halted_callback_hook(filter, name) + end + + module Conditionals # :nodoc: + class Value + def initialize(&block) + @block = block + end + def call(target, value); @block.call(value); end + end + end + + module Filters + Environment = Struct.new(:target, :halted, :value) + + class Before + def self.build(callback_sequence, user_callback, user_conditions, chain_config, filter, name) + halted_lambda = chain_config[:terminator] + + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter, name) + else + halting(callback_sequence, user_callback, halted_lambda, filter, name) + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter, name) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + if env.halted + target.send :halted_callback_hook, filter, name + end + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback, halted_lambda, filter, name) + callback_sequence.before do |env| + target = env.target + value = env.value + halted = env.halted + + unless halted + result_lambda = -> { user_callback.call target, value } + env.halted = halted_lambda.call(target, result_lambda) + if env.halted + target.send :halted_callback_hook, filter, name + end + end + + env + end + end + private_class_method :halting + end + + class After + def self.build(callback_sequence, user_callback, user_conditions, chain_config) + if chain_config[:skip_after_callbacks_if_terminated] + if user_conditions.any? + halting_and_conditional(callback_sequence, user_callback, user_conditions) + else + halting(callback_sequence, user_callback) + end + else + if user_conditions.any? + conditional callback_sequence, user_callback, user_conditions + else + simple callback_sequence, user_callback + end + end + end + + def self.halting_and_conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + halted = env.halted + + if !halted && user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :halting_and_conditional + + def self.halting(callback_sequence, user_callback) + callback_sequence.after do |env| + unless env.halted + user_callback.call env.target, env.value + end + + env + end + end + private_class_method :halting + + def self.conditional(callback_sequence, user_callback, user_conditions) + callback_sequence.after do |env| + target = env.target + value = env.value + + if user_conditions.all? { |c| c.call(target, value) } + user_callback.call target, value + end + + env + end + end + private_class_method :conditional + + def self.simple(callback_sequence, user_callback) + callback_sequence.after do |env| + user_callback.call env.target, env.value + + env + end + end + private_class_method :simple + end + end + + class Callback #:nodoc:# + def self.build(chain, filter, kind, options) + if filter.is_a?(String) + raise ArgumentError, <<-MSG.squish + Passing string to define a callback is not supported. See the `.set_callback` + documentation to see supported values. + MSG + end + + new chain.name, filter, kind, options, chain.config + end + + attr_accessor :kind, :name + attr_reader :chain_config + + def initialize(name, filter, kind, options, chain_config) + @chain_config = chain_config + @name = name + @kind = kind + @filter = filter + @key = compute_identifier filter + @if = check_conditionals(options[:if]) + @unless = check_conditionals(options[:unless]) + end + + def filter; @key; end + def raw_filter; @filter; end + + def merge_conditional_options(chain, if_option:, unless_option:) + options = { + if: @if.dup, + unless: @unless.dup + } + + options[:if].concat Array(unless_option) + options[:unless].concat Array(if_option) + + self.class.build chain, @filter, @kind, options + end + + def matches?(_kind, _filter) + @kind == _kind && filter == _filter + end + + def duplicates?(other) + case @filter + when Symbol + matches?(other.kind, other.filter) + else + false + end + end + + # Wraps code with filter + def apply(callback_sequence) + user_conditions = conditions_lambdas + user_callback = CallTemplate.build(@filter, self) + + case kind + when :before + Filters::Before.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config, @filter, name) + when :after + Filters::After.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config) + when :around + callback_sequence.around(user_callback, user_conditions) + end + end + + def current_scopes + Array(chain_config[:scope]).map { |s| public_send(s) } + end + + private + EMPTY_ARRAY = [].freeze + private_constant :EMPTY_ARRAY + + def check_conditionals(conditionals) + return EMPTY_ARRAY if conditionals.blank? + + conditionals = Array(conditionals) + if conditionals.any? { |c| c.is_a?(String) } + raise ArgumentError, <<-MSG.squish + Passing string to be evaluated in :if and :unless conditional + options is not supported. Pass a symbol for an instance method, + or a lambda, proc or block, instead. + MSG + end + + conditionals.freeze + end + + def compute_identifier(filter) + case filter + when ::Proc + filter.object_id + else + filter + end + end + + def conditions_lambdas + @if.map { |c| CallTemplate.build(c, self).make_lambda } + + @unless.map { |c| CallTemplate.build(c, self).inverted_lambda } + end + end + + # A future invocation of user-supplied code (either as a callback, + # or a condition filter). + class CallTemplate # :nodoc: + def initialize(target, method, arguments, block) + @override_target = target + @method_name = method + @arguments = arguments + @override_block = block + end + + # Return the parts needed to make this call, with the given + # input values. + # + # Returns an array of the form: + # + # [target, block, method, *arguments] + # + # This array can be used as such: + # + # target.send(method, *arguments, &block) + # + # The actual invocation is left up to the caller to minimize + # call stack pollution. + def expand(target, value, block) + expanded = [@override_target || target, @override_block || block, @method_name] + + @arguments.each do |arg| + case arg + when :value then expanded << value + when :target then expanded << target + when :block then expanded << (block || raise(ArgumentError)) + end + end + + expanded + end + + # Return a lambda that will make this call when given the input + # values. + def make_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + target.send(method, *arguments, &block) + end + end + + # Return a lambda that will make this call when given the input + # values, but then return the boolean inverse of that result. + def inverted_lambda + lambda do |target, value, &block| + target, block, method, *arguments = expand(target, value, block) + ! target.send(method, *arguments, &block) + end + end + + # Filters support: + # + # Symbols:: A method to call. + # Procs:: A proc to call with the object. + # Objects:: An object with a before_foo method on it to call. + # + # All of these objects are converted into a CallTemplate and handled + # the same after this point. + def self.build(filter, callback) + case filter + when Symbol + new(nil, filter, [], nil) + when Conditionals::Value + new(filter, :call, [:target, :value], nil) + when ::Proc + if filter.arity > 1 + new(nil, :instance_exec, [:target, :block], filter) + elsif filter.arity > 0 + new(nil, :instance_exec, [:target], filter) + else + new(nil, :instance_exec, [], filter) + end + else + method_to_call = callback.current_scopes.join("_") + + new(filter, method_to_call, [:target], nil) + end + end + end + + # Execute before and after filters in a sequence instead of + # chaining them with nested lambda calls, see: + # https://github.com/rails/rails/issues/18011 + class CallbackSequence # :nodoc: + def initialize(nested = nil, call_template = nil, user_conditions = nil) + @nested = nested + @call_template = call_template + @user_conditions = user_conditions + + @before = [] + @after = [] + end + + def before(&before) + @before.unshift(before) + self + end + + def after(&after) + @after.push(after) + self + end + + def around(call_template, user_conditions) + CallbackSequence.new(self, call_template, user_conditions) + end + + def skip?(arg) + arg.halted || !@user_conditions.all? { |c| c.call(arg.target, arg.value) } + end + + attr_reader :nested + + def final? + !@call_template + end + + def expand_call_template(arg, block) + @call_template.expand(arg.target, arg.value, block) + end + + def invoke_before(arg) + @before.each { |b| b.call(arg) } + end + + def invoke_after(arg) + @after.each { |a| a.call(arg) } + end + end + + class CallbackChain #:nodoc:# + include Enumerable + + attr_reader :name, :config + + def initialize(name, config) + @name = name + @config = { + scope: [:kind], + terminator: default_terminator + }.merge!(config) + @chain = [] + @callbacks = nil + @mutex = Mutex.new + end + + def each(&block); @chain.each(&block); end + def index(o); @chain.index(o); end + def empty?; @chain.empty?; end + + def insert(index, o) + @callbacks = nil + @chain.insert(index, o) + end + + def delete(o) + @callbacks = nil + @chain.delete(o) + end + + def clear + @callbacks = nil + @chain.clear + self + end + + def initialize_copy(other) + @callbacks = nil + @chain = other.chain.dup + @mutex = Mutex.new + end + + def compile + @callbacks || @mutex.synchronize do + final_sequence = CallbackSequence.new + @callbacks ||= @chain.reverse.inject(final_sequence) do |callback_sequence, callback| + callback.apply callback_sequence + end + end + end + + def append(*callbacks) + callbacks.each { |c| append_one(c) } + end + + def prepend(*callbacks) + callbacks.each { |c| prepend_one(c) } + end + + protected + attr_reader :chain + + private + def append_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.push(callback) + end + + def prepend_one(callback) + @callbacks = nil + remove_duplicates(callback) + @chain.unshift(callback) + end + + def remove_duplicates(callback) + @callbacks = nil + @chain.delete_if { |c| callback.duplicates?(c) } + end + + def default_terminator + Proc.new do |target, result_lambda| + terminate = true + catch(:abort) do + result_lambda.call + terminate = false + end + terminate + end + end + end + + module ClassMethods + def normalize_callback_params(filters, block) # :nodoc: + type = CALLBACK_FILTER_TYPES.include?(filters.first) ? filters.shift : :before + options = filters.extract_options! + filters.unshift(block) if block + [type, filters, options.dup] + end + + # This is used internally to append, prepend and skip callbacks to the + # CallbackChain. + def __update_callbacks(name) #:nodoc: + ([self] + ActiveSupport::DescendantsTracker.descendants(self)).reverse_each do |target| + chain = target.get_callbacks name + yield target, chain.dup + end + end + + # Install a callback for the given event. + # + # set_callback :save, :before, :before_method + # set_callback :save, :after, :after_method, if: :condition + # set_callback :save, :around, ->(r, block) { stuff; result = block.call; stuff } + # + # The second argument indicates whether the callback is to be run +:before+, + # +:after+, or +:around+ the event. If omitted, +:before+ is assumed. This + # means the first example above can also be written as: + # + # set_callback :save, :before_method + # + # The callback can be specified as a symbol naming an instance method; as a + # proc, lambda, or block; or as an object that responds to a certain method + # determined by the :scope argument to +define_callbacks+. + # + # If a proc, lambda, or block is given, its body is evaluated in the context + # of the current object. It can also optionally accept the current object as + # an argument. + # + # Before and around callbacks are called in the order that they are set; + # after callbacks are called in the reverse order. + # + # Around callbacks can access the return value from the event, if it + # wasn't halted, from the +yield+ call. + # + # ===== Options + # + # * :if - A symbol or an array of symbols, each naming an instance + # method or a proc; the callback will be called only when they all return + # a true value. + # + # If a proc is given, its body is evaluated in the context of the + # current object. It can also optionally accept the current object as + # an argument. + # * :unless - A symbol or an array of symbols, each naming an + # instance method or a proc; the callback will be called only when they + # all return a false value. + # + # If a proc is given, its body is evaluated in the context of the + # current object. It can also optionally accept the current object as + # an argument. + # * :prepend - If +true+, the callback will be prepended to the + # existing chain rather than appended. + def set_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + self_chain = get_callbacks name + mapped = filters.map do |filter| + Callback.build(self_chain, filter, type, options) + end + + __update_callbacks(name) do |target, chain| + options[:prepend] ? chain.prepend(*mapped) : chain.append(*mapped) + target.set_callbacks name, chain + end + end + + # Skip a previously set callback. Like +set_callback+, :if or + # :unless options may be passed in order to control when the + # callback is skipped. + # + # class Writer < Person + # skip_callback :validate, :before, :check_membership, if: -> { age > 18 } + # end + # + # An ArgumentError will be raised if the callback has not + # already been set (unless the :raise option is set to false). + def skip_callback(name, *filter_list, &block) + type, filters, options = normalize_callback_params(filter_list, block) + + options[:raise] = true unless options.key?(:raise) + + __update_callbacks(name) do |target, chain| + filters.each do |filter| + callback = chain.find { |c| c.matches?(type, filter) } + + if !callback && options[:raise] + raise ArgumentError, "#{type.to_s.capitalize} #{name} callback #{filter.inspect} has not been defined" + end + + if callback && (options.key?(:if) || options.key?(:unless)) + new_callback = callback.merge_conditional_options(chain, if_option: options[:if], unless_option: options[:unless]) + chain.insert(chain.index(callback), new_callback) + end + + chain.delete(callback) + end + target.set_callbacks name, chain + end + end + + # Remove all set callbacks for the given event. + def reset_callbacks(name) + callbacks = get_callbacks name + + ActiveSupport::DescendantsTracker.descendants(self).each do |target| + chain = target.get_callbacks(name).dup + callbacks.each { |c| chain.delete(c) } + target.set_callbacks name, chain + end + + set_callbacks(name, callbacks.dup.clear) + end + + # Define sets of events in the object life cycle that support callbacks. + # + # define_callbacks :validate + # define_callbacks :initialize, :save, :destroy + # + # ===== Options + # + # * :terminator - Determines when a before filter will halt the + # callback chain, preventing following before and around callbacks from + # being called and the event from being triggered. + # This should be a lambda to be executed. + # The current object and the result lambda of the callback will be provided + # to the terminator lambda. + # + # define_callbacks :validate, terminator: ->(target, result_lambda) { result_lambda.call == false } + # + # In this example, if any before validate callbacks returns +false+, + # any successive before and around callback is not executed. + # + # The default terminator halts the chain when a callback throws +:abort+. + # + # * :skip_after_callbacks_if_terminated - Determines if after + # callbacks should be terminated by the :terminator option. By + # default after callbacks are executed no matter if callback chain was + # terminated or not. This option has no effect if :terminator + # option is set to +nil+. + # + # * :scope - Indicates which methods should be executed when an + # object is used as a callback. + # + # class Audit + # def before(caller) + # puts 'Audit: before is called' + # end + # + # def before_save(caller) + # puts 'Audit: before_save is called' + # end + # end + # + # class Account + # include ActiveSupport::Callbacks + # + # define_callbacks :save + # set_callback :save, :before, Audit.new + # + # def save + # run_callbacks :save do + # puts 'save in main' + # end + # end + # end + # + # In the above case whenever you save an account the method + # Audit#before will be called. On the other hand + # + # define_callbacks :save, scope: [:kind, :name] + # + # would trigger Audit#before_save instead. That's constructed + # by calling #{kind}_#{name} on the given instance. In this + # case "kind" is "before" and "name" is "save". In this context +:kind+ + # and +:name+ have special meanings: +:kind+ refers to the kind of + # callback (before/after/around) and +:name+ refers to the method on + # which callbacks are being defined. + # + # A declaration like + # + # define_callbacks :save, scope: [:name] + # + # would call Audit#save. + # + # ===== Notes + # + # +names+ passed to +define_callbacks+ must not end with + # !, ? or =. + # + # Calling +define_callbacks+ multiple times with the same +names+ will + # overwrite previous callbacks registered with +set_callback+. + def define_callbacks(*names) + options = names.extract_options! + + names.each do |name| + name = name.to_sym + + ([self] + ActiveSupport::DescendantsTracker.descendants(self)).each do |target| + target.set_callbacks name, CallbackChain.new(name, options) + end + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def _run_#{name}_callbacks(&block) + run_callbacks #{name.inspect}, &block + end + + def self._#{name}_callbacks + get_callbacks(#{name.inspect}) + end + + def self._#{name}_callbacks=(value) + set_callbacks(#{name.inspect}, value) + end + + def _#{name}_callbacks + __callbacks[#{name.inspect}] + end + RUBY + end + end + + protected + def get_callbacks(name) # :nodoc: + __callbacks[name.to_sym] + end + + if Module.instance_method(:method_defined?).arity == 1 # Ruby 2.5 and older + def set_callbacks(name, callbacks) # :nodoc: + self.__callbacks = __callbacks.merge(name.to_sym => callbacks) + end + else # Ruby 2.6 and newer + def set_callbacks(name, callbacks) # :nodoc: + unless singleton_class.method_defined?(:__callbacks, false) + self.__callbacks = __callbacks.dup + end + self.__callbacks[name.to_sym] = callbacks + self.__callbacks + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concern.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concern.rb new file mode 100644 index 0000000..e88862b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concern.rb @@ -0,0 +1,215 @@ +# frozen_string_literal: true + +module ActiveSupport + # A typical module looks like this: + # + # module M + # def self.included(base) + # base.extend ClassMethods + # base.class_eval do + # scope :disabled, -> { where(disabled: true) } + # end + # end + # + # module ClassMethods + # ... + # end + # end + # + # By using ActiveSupport::Concern the above module could instead be + # written as: + # + # require "active_support/concern" + # + # module M + # extend ActiveSupport::Concern + # + # included do + # scope :disabled, -> { where(disabled: true) } + # end + # + # class_methods do + # ... + # end + # end + # + # Moreover, it gracefully handles module dependencies. Given a +Foo+ module + # and a +Bar+ module which depends on the former, we would typically write the + # following: + # + # module Foo + # def self.included(base) + # base.class_eval do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # end + # + # module Bar + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Foo # We need to include this dependency for Bar + # include Bar # Bar is the module that Host really needs + # end + # + # But why should +Host+ care about +Bar+'s dependencies, namely +Foo+? We + # could try to hide these from +Host+ directly including +Foo+ in +Bar+: + # + # module Bar + # include Foo + # def self.included(base) + # base.method_injected_by_foo + # end + # end + # + # class Host + # include Bar + # end + # + # Unfortunately this won't work, since when +Foo+ is included, its base + # is the +Bar+ module, not the +Host+ class. With ActiveSupport::Concern, + # module dependencies are properly resolved: + # + # require "active_support/concern" + # + # module Foo + # extend ActiveSupport::Concern + # included do + # def self.method_injected_by_foo + # ... + # end + # end + # end + # + # module Bar + # extend ActiveSupport::Concern + # include Foo + # + # included do + # self.method_injected_by_foo + # end + # end + # + # class Host + # include Bar # It works, now Bar takes care of its dependencies + # end + # + # === Prepending concerns + # + # Just like include, concerns also support prepend with a corresponding + # prepended do callback. module ClassMethods or class_methods do are + # prepended as well. + # + # prepend is also used for any dependencies. + module Concern + class MultipleIncludedBlocks < StandardError #:nodoc: + def initialize + super "Cannot define multiple 'included' blocks for a Concern" + end + end + + class MultiplePrependBlocks < StandardError #:nodoc: + def initialize + super "Cannot define multiple 'prepended' blocks for a Concern" + end + end + + def self.extended(base) #:nodoc: + base.instance_variable_set(:@_dependencies, []) + end + + def append_features(base) #:nodoc: + if base.instance_variable_defined?(:@_dependencies) + base.instance_variable_get(:@_dependencies) << self + false + else + return false if base < self + @_dependencies.each { |dep| base.include(dep) } + super + base.extend const_get(:ClassMethods) if const_defined?(:ClassMethods) + base.class_eval(&@_included_block) if instance_variable_defined?(:@_included_block) + end + end + + def prepend_features(base) #:nodoc: + if base.instance_variable_defined?(:@_dependencies) + base.instance_variable_get(:@_dependencies).unshift self + false + else + return false if base < self + @_dependencies.each { |dep| base.prepend(dep) } + super + base.singleton_class.prepend const_get(:ClassMethods) if const_defined?(:ClassMethods) + base.class_eval(&@_prepended_block) if instance_variable_defined?(:@_prepended_block) + end + end + + # Evaluate given block in context of base class, + # so that you can write class macros here. + # When you define more than one +included+ block, it raises an exception. + def included(base = nil, &block) + if base.nil? + if instance_variable_defined?(:@_included_block) + if @_included_block.source_location != block.source_location + raise MultipleIncludedBlocks + end + else + @_included_block = block + end + else + super + end + end + + # Evaluate given block in context of base class, + # so that you can write class macros here. + # When you define more than one +prepended+ block, it raises an exception. + def prepended(base = nil, &block) + if base.nil? + if instance_variable_defined?(:@_prepended_block) + if @_prepended_block.source_location != block.source_location + raise MultiplePrependBlocks + end + else + @_prepended_block = block + end + else + super + end + end + + # Define class methods from given block. + # You can define private class methods as well. + # + # module Example + # extend ActiveSupport::Concern + # + # class_methods do + # def foo; puts 'foo'; end + # + # private + # def bar; puts 'bar'; end + # end + # end + # + # class Buzz + # include Example + # end + # + # Buzz.foo # => "foo" + # Buzz.bar # => private method 'bar' called for Buzz:Class(NoMethodError) + def class_methods(&class_methods_module_definition) + mod = const_defined?(:ClassMethods, false) ? + const_get(:ClassMethods) : + const_set(:ClassMethods, Module.new) + + mod.module_eval(&class_methods_module_definition) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/load_interlock_aware_monitor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/load_interlock_aware_monitor.rb new file mode 100644 index 0000000..480c34c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/load_interlock_aware_monitor.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require "monitor" + +module ActiveSupport + module Concurrency + # A monitor that will permit dependency loading while blocked waiting for + # the lock. + class LoadInterlockAwareMonitor < Monitor + EXCEPTION_NEVER = { Exception => :never }.freeze + EXCEPTION_IMMEDIATE = { Exception => :immediate }.freeze + private_constant :EXCEPTION_NEVER, :EXCEPTION_IMMEDIATE + + # Enters an exclusive section, but allows dependency loading while blocked + def mon_enter + mon_try_enter || + ActiveSupport::Dependencies.interlock.permit_concurrent_loads { super } + end + + def synchronize + Thread.handle_interrupt(EXCEPTION_NEVER) do + mon_enter + + begin + Thread.handle_interrupt(EXCEPTION_IMMEDIATE) do + yield + end + ensure + mon_exit + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/share_lock.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/share_lock.rb new file mode 100644 index 0000000..eae7d44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/concurrency/share_lock.rb @@ -0,0 +1,226 @@ +# frozen_string_literal: true + +require "thread" +require "monitor" + +module ActiveSupport + module Concurrency + # A share/exclusive lock, otherwise known as a read/write lock. + # + # https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock + class ShareLock + include MonitorMixin + + # We track Thread objects, instead of just using counters, because + # we need exclusive locks to be reentrant, and we need to be able + # to upgrade share locks to exclusive. + + def raw_state # :nodoc: + synchronize do + threads = @sleeping.keys | @sharing.keys | @waiting.keys + threads |= [@exclusive_thread] if @exclusive_thread + + data = {} + + threads.each do |thread| + purpose, compatible = @waiting[thread] + + data[thread] = { + thread: thread, + sharing: @sharing[thread], + exclusive: @exclusive_thread == thread, + purpose: purpose, + compatible: compatible, + waiting: !!@waiting[thread], + sleeper: @sleeping[thread], + } + end + + # NB: Yields while holding our *internal* synchronize lock, + # which is supposed to be used only for a few instructions at + # a time. This allows the caller to inspect additional state + # without things changing out from underneath, but would have + # disastrous effects upon normal operation. Fortunately, this + # method is only intended to be called when things have + # already gone wrong. + yield data + end + end + + def initialize + super() + + @cv = new_cond + + @sharing = Hash.new(0) + @waiting = {} + @sleeping = {} + @exclusive_thread = nil + @exclusive_depth = 0 + end + + # Returns false if +no_wait+ is set and the lock is not + # immediately available. Otherwise, returns true after the lock + # has been acquired. + # + # +purpose+ and +compatible+ work together; while this thread is + # waiting for the exclusive lock, it will yield its share (if any) + # to any other attempt whose +purpose+ appears in this attempt's + # +compatible+ list. This allows a "loose" upgrade, which, being + # less strict, prevents some classes of deadlocks. + # + # For many resources, loose upgrades are sufficient: if a thread + # is awaiting a lock, it is not running any other code. With + # +purpose+ matching, it is possible to yield only to other + # threads whose activity will not interfere. + def start_exclusive(purpose: nil, compatible: [], no_wait: false) + synchronize do + unless @exclusive_thread == Thread.current + if busy_for_exclusive?(purpose) + return false if no_wait + + yield_shares(purpose: purpose, compatible: compatible, block_share: true) do + wait_for(:start_exclusive) { busy_for_exclusive?(purpose) } + end + end + @exclusive_thread = Thread.current + end + @exclusive_depth += 1 + + true + end + end + + # Relinquish the exclusive lock. Must only be called by the thread + # that called start_exclusive (and currently holds the lock). + def stop_exclusive(compatible: []) + synchronize do + raise "invalid unlock" if @exclusive_thread != Thread.current + + @exclusive_depth -= 1 + if @exclusive_depth == 0 + @exclusive_thread = nil + + if eligible_waiters?(compatible) + yield_shares(compatible: compatible, block_share: true) do + wait_for(:stop_exclusive) { @exclusive_thread || eligible_waiters?(compatible) } + end + end + @cv.broadcast + end + end + end + + def start_sharing + synchronize do + if @sharing[Thread.current] > 0 || @exclusive_thread == Thread.current + # We already hold a lock; nothing to wait for + elsif @waiting[Thread.current] + # We're nested inside a +yield_shares+ call: we'll resume as + # soon as there isn't an exclusive lock in our way + wait_for(:start_sharing) { @exclusive_thread } + else + # This is an initial / outermost share call: any outstanding + # requests for an exclusive lock get to go first + wait_for(:start_sharing) { busy_for_sharing?(false) } + end + @sharing[Thread.current] += 1 + end + end + + def stop_sharing + synchronize do + if @sharing[Thread.current] > 1 + @sharing[Thread.current] -= 1 + else + @sharing.delete Thread.current + @cv.broadcast + end + end + end + + # Execute the supplied block while holding the Exclusive lock. If + # +no_wait+ is set and the lock is not immediately available, + # returns +nil+ without yielding. Otherwise, returns the result of + # the block. + # + # See +start_exclusive+ for other options. + def exclusive(purpose: nil, compatible: [], after_compatible: [], no_wait: false) + if start_exclusive(purpose: purpose, compatible: compatible, no_wait: no_wait) + begin + yield + ensure + stop_exclusive(compatible: after_compatible) + end + end + end + + # Execute the supplied block while holding the Share lock. + def sharing + start_sharing + begin + yield + ensure + stop_sharing + end + end + + # Temporarily give up all held Share locks while executing the + # supplied block, allowing any +compatible+ exclusive lock request + # to proceed. + def yield_shares(purpose: nil, compatible: [], block_share: false) + loose_shares = previous_wait = nil + synchronize do + if loose_shares = @sharing.delete(Thread.current) + if previous_wait = @waiting[Thread.current] + purpose = nil unless purpose == previous_wait[0] + compatible &= previous_wait[1] + end + compatible |= [false] unless block_share + @waiting[Thread.current] = [purpose, compatible] + end + + @cv.broadcast + end + + begin + yield + ensure + synchronize do + wait_for(:yield_shares) { @exclusive_thread && @exclusive_thread != Thread.current } + + if previous_wait + @waiting[Thread.current] = previous_wait + else + @waiting.delete Thread.current + end + @sharing[Thread.current] = loose_shares if loose_shares + end + end + end + + private + # Must be called within synchronize + def busy_for_exclusive?(purpose) + busy_for_sharing?(purpose) || + @sharing.size > (@sharing[Thread.current] > 0 ? 1 : 0) + end + + def busy_for_sharing?(purpose) + (@exclusive_thread && @exclusive_thread != Thread.current) || + @waiting.any? { |t, (_, c)| t != Thread.current && !c.include?(purpose) } + end + + def eligible_waiters?(compatible) + @waiting.any? { |t, (p, _)| compatible.include?(p) && @waiting.all? { |t2, (_, c2)| t == t2 || c2.include?(p) } } + end + + def wait_for(method) + @sleeping[Thread.current] = method + @cv.wait_while { yield } + ensure + @sleeping.delete Thread.current + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configurable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configurable.rb new file mode 100644 index 0000000..92bd411 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configurable.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/ordered_options" + +module ActiveSupport + # Configurable provides a config method to store and retrieve + # configuration options as an OrderedOptions. + module Configurable + extend ActiveSupport::Concern + + class Configuration < ActiveSupport::InheritableOptions + def compile_methods! + self.class.compile_methods!(keys) + end + + # Compiles reader methods so we don't have to go through method_missing. + def self.compile_methods!(keys) + keys.reject { |m| method_defined?(m) }.each do |key| + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{key}; _get(#{key.inspect}); end + RUBY + end + end + end + + module ClassMethods + def config + @_config ||= if respond_to?(:superclass) && superclass.respond_to?(:config) + superclass.config.inheritable_copy + else + # create a new "anonymous" class that will host the compiled reader methods + Class.new(Configuration).new + end + end + + def configure + yield config + end + + # Allows you to add shortcut so that you don't have to refer to attribute + # through config. Also look at the example for config to contrast. + # + # Defines both class and instance config accessors. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access + # end + # + # User.allowed_access # => nil + # User.allowed_access = false + # User.allowed_access # => false + # + # user = User.new + # user.allowed_access # => false + # user.allowed_access = true + # user.allowed_access # => true + # + # User.allowed_access # => false + # + # The attribute name must be a valid method name in Ruby. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :"1_Badname" + # end + # # => NameError: invalid config attribute name + # + # To omit the instance writer method, pass instance_writer: false. + # To omit the instance reader method, pass instance_reader: false. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_reader: false, instance_writer: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Or pass instance_accessor: false, to omit both instance methods. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :allowed_access, instance_accessor: false + # end + # + # User.allowed_access = false + # User.allowed_access # => false + # + # User.new.allowed_access = true # => NoMethodError + # User.new.allowed_access # => NoMethodError + # + # Also you can pass a block to set up the attribute with a default value. + # + # class User + # include ActiveSupport::Configurable + # config_accessor :hair_colors do + # [:brown, :black, :blonde, :red] + # end + # end + # + # User.hair_colors # => [:brown, :black, :blonde, :red] + def config_accessor(*names, instance_reader: true, instance_writer: true, instance_accessor: true) # :doc: + names.each do |name| + raise NameError.new("invalid config attribute name") unless /\A[_A-Za-z]\w*\z/.match?(name) + + reader, reader_line = "def #{name}; config.#{name}; end", __LINE__ + writer, writer_line = "def #{name}=(value); config.#{name} = value; end", __LINE__ + + singleton_class.class_eval reader, __FILE__, reader_line + singleton_class.class_eval writer, __FILE__, writer_line + + if instance_accessor + class_eval reader, __FILE__, reader_line if instance_reader + class_eval writer, __FILE__, writer_line if instance_writer + end + send("#{name}=", yield) if block_given? + end + end + private :config_accessor + end + + # Reads and writes attributes from a configuration OrderedOptions. + # + # require "active_support/configurable" + # + # class User + # include ActiveSupport::Configurable + # end + # + # user = User.new + # + # user.config.allowed_access = true + # user.config.level = 1 + # + # user.config.allowed_access # => true + # user.config.level # => 1 + def config + @_config ||= self.class.config.inheritable_copy + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configuration_file.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configuration_file.rb new file mode 100644 index 0000000..024f0e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/configuration_file.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module ActiveSupport + # Reads a YAML configuration file, evaluating any ERB, then + # parsing the resulting YAML. + # + # Warns in case of YAML confusing characters, like invisible + # non-breaking spaces. + class ConfigurationFile # :nodoc: + class FormatError < StandardError; end + + def initialize(content_path) + @content_path = content_path.to_s + @content = read content_path + end + + def self.parse(content_path, **options) + new(content_path).parse(**options) + end + + def parse(context: nil, **options) + source = render(context) + if YAML.respond_to?(:unsafe_load) + YAML.unsafe_load(source, **options) || {} + else + YAML.load(source, **options) || {} + end + rescue Psych::SyntaxError => error + raise "YAML syntax error occurred while parsing #{@content_path}. " \ + "Please note that YAML must be consistently indented using spaces. Tabs are not allowed. " \ + "Error: #{error.message}" + end + + private + def read(content_path) + require "yaml" + require "erb" + + File.read(content_path).tap do |content| + if content.include?("\u00A0") + warn "File contains invisible non-breaking spaces, you may want to remove those" + end + end + end + + def render(context) + erb = ERB.new(@content).tap { |e| e.filename = @content_path } + context ? erb.result(context) : erb.result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext.rb new file mode 100644 index 0000000..3f5d081 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +Dir.glob(File.expand_path("core_ext/*.rb", __dir__)).sort.each do |path| + require path +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array.rb new file mode 100644 index 0000000..88b6567 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/array/access" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/array/extract" +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/array/grouping" +require "active_support/core_ext/array/inquiry" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/access.rb new file mode 100644 index 0000000..ea01e58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/access.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +class Array + # Returns the tail of the array from +position+. + # + # %w( a b c d ).from(0) # => ["a", "b", "c", "d"] + # %w( a b c d ).from(2) # => ["c", "d"] + # %w( a b c d ).from(10) # => [] + # %w().from(0) # => [] + # %w( a b c d ).from(-2) # => ["c", "d"] + # %w( a b c ).from(-10) # => [] + def from(position) + self[position, length] || [] + end + + # Returns the beginning of the array up to +position+. + # + # %w( a b c d ).to(0) # => ["a"] + # %w( a b c d ).to(2) # => ["a", "b", "c"] + # %w( a b c d ).to(10) # => ["a", "b", "c", "d"] + # %w().to(0) # => [] + # %w( a b c d ).to(-2) # => ["a", "b", "c"] + # %w( a b c ).to(-10) # => [] + def to(position) + if position >= 0 + take position + 1 + else + self[0..position] + end + end + + # Returns a new array that includes the passed elements. + # + # [ 1, 2, 3 ].including(4, 5) # => [ 1, 2, 3, 4, 5 ] + # [ [ 0, 1 ] ].including([ [ 1, 0 ] ]) # => [ [ 0, 1 ], [ 1, 0 ] ] + def including(*elements) + self + elements.flatten(1) + end + + # Returns a copy of the Array excluding the specified elements. + # + # ["David", "Rafael", "Aaron", "Todd"].excluding("Aaron", "Todd") # => ["David", "Rafael"] + # [ [ 0, 1 ], [ 1, 0 ] ].excluding([ [ 1, 0 ] ]) # => [ [ 0, 1 ] ] + # + # Note: This is an optimization of Enumerable#excluding that uses Array#- + # instead of Array#reject for performance reasons. + def excluding(*elements) + self - elements.flatten(1) + end + + # Alias for #excluding. + def without(*elements) + excluding(*elements) + end + + # Equal to self[1]. + # + # %w( a b c d e ).second # => "b" + def second + self[1] + end + + # Equal to self[2]. + # + # %w( a b c d e ).third # => "c" + def third + self[2] + end + + # Equal to self[3]. + # + # %w( a b c d e ).fourth # => "d" + def fourth + self[3] + end + + # Equal to self[4]. + # + # %w( a b c d e ).fifth # => "e" + def fifth + self[4] + end + + # Equal to self[41]. Also known as accessing "the reddit". + # + # (1..42).to_a.forty_two # => 42 + def forty_two + self[41] + end + + # Equal to self[-3]. + # + # %w( a b c d e ).third_to_last # => "c" + def third_to_last + self[-3] + end + + # Equal to self[-2]. + # + # %w( a b c d e ).second_to_last # => "d" + def second_to_last + self[-2] + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/conversions.rb new file mode 100644 index 0000000..0780c4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/conversions.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" + +class Array + # Converts the array to a comma-separated sentence where the last element is + # joined by the connector word. + # + # You can pass the following options to change the default behavior. If you + # pass an option key that doesn't exist in the list below, it will raise an + # ArgumentError. + # + # ==== Options + # + # * :words_connector - The sign or word used to join the elements + # in arrays with two or more elements (default: ", "). + # * :two_words_connector - The sign or word used to join the elements + # in arrays with two elements (default: " and "). + # * :last_word_connector - The sign or word used to join the last element + # in arrays with three or more elements (default: ", and "). + # * :locale - If +i18n+ is available, you can set a locale and use + # the connector options defined on the 'support.array' namespace in the + # corresponding dictionary file. + # + # ==== Examples + # + # [].to_sentence # => "" + # ['one'].to_sentence # => "one" + # ['one', 'two'].to_sentence # => "one and two" + # ['one', 'two', 'three'].to_sentence # => "one, two, and three" + # + # ['one', 'two'].to_sentence(passing: 'invalid option') + # # => ArgumentError: Unknown key: :passing. Valid keys are: :words_connector, :two_words_connector, :last_word_connector, :locale + # + # ['one', 'two'].to_sentence(two_words_connector: '-') + # # => "one-two" + # + # ['one', 'two', 'three'].to_sentence(words_connector: ' or ', last_word_connector: ' or at least ') + # # => "one or two or at least three" + # + # Using :locale option: + # + # # Given this locale dictionary: + # # + # # es: + # # support: + # # array: + # # words_connector: " o " + # # two_words_connector: " y " + # # last_word_connector: " o al menos " + # + # ['uno', 'dos'].to_sentence(locale: :es) + # # => "uno y dos" + # + # ['uno', 'dos', 'tres'].to_sentence(locale: :es) + # # => "uno o dos o al menos tres" + def to_sentence(options = {}) + options.assert_valid_keys(:words_connector, :two_words_connector, :last_word_connector, :locale) + + default_connectors = { + words_connector: ", ", + two_words_connector: " and ", + last_word_connector: ", and " + } + if defined?(I18n) + i18n_connectors = I18n.translate(:'support.array', locale: options[:locale], default: {}) + default_connectors.merge!(i18n_connectors) + end + options = default_connectors.merge!(options) + + case length + when 0 + +"" + when 1 + +"#{self[0]}" + when 2 + +"#{self[0]}#{options[:two_words_connector]}#{self[1]}" + else + +"#{self[0...-1].join(options[:words_connector])}#{options[:last_word_connector]}#{self[-1]}" + end + end + + # Extends Array#to_s to convert a collection of elements into a + # comma separated id list if :db argument is given as the format. + # + # Blog.all.to_formatted_s(:db) # => "1,2,3" + # Blog.none.to_formatted_s(:db) # => "null" + # [1,2].to_formatted_s # => "[1, 2]" + def to_formatted_s(format = :default) + case format + when :db + if empty? + "null" + else + collect(&:id).join(",") + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a string that represents the array in XML by invoking +to_xml+ + # on each element. Active Record collections delegate their representation + # in XML to this method. + # + # All elements are expected to respond to +to_xml+, if any of them does + # not then an exception is raised. + # + # The root node reflects the class name of the first element in plural + # if all elements belong to the same type and that's not Hash: + # + # customer.projects.to_xml + # + # + # + # + # 20000.0 + # 1567 + # 2008-04-09 + # ... + # + # + # 57230.0 + # 1567 + # 2008-04-15 + # ... + # + # + # + # Otherwise the root element is "objects": + # + # [{ foo: 1, bar: 2}, { baz: 3}].to_xml + # + # + # + # + # 2 + # 1 + # + # + # 3 + # + # + # + # If the collection is empty the root element is "nil-classes" by default: + # + # [].to_xml + # + # + # + # + # To ensure a meaningful root element use the :root option: + # + # customer_with_no_projects.projects.to_xml(root: 'projects') + # + # + # + # + # By default name of the node for the children of root is root.singularize. + # You can change it with the :children option. + # + # The +options+ hash is passed downwards: + # + # Message.all.to_xml(skip_types: true) + # + # + # + # + # 2008-03-07T09:58:18+01:00 + # 1 + # 1 + # 2008-03-07T09:58:18+01:00 + # 1 + # + # + # + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder::XmlMarkup) + + options = options.dup + options[:indent] ||= 2 + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + options[:root] ||= \ + if first.class != Hash && all? { |e| e.is_a?(first.class) } + underscored = ActiveSupport::Inflector.underscore(first.class.name) + ActiveSupport::Inflector.pluralize(underscored).tr("/", "_") + else + "objects" + end + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + children = options.delete(:children) || root.singularize + attributes = options[:skip_types] ? {} : { type: "array" } + + if empty? + builder.tag!(root, attributes) + else + builder.tag!(root, attributes) do + each { |value| ActiveSupport::XmlMini.to_tag(children, value, options) } + yield builder if block_given? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract.rb new file mode 100644 index 0000000..cc5a8a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class Array + # Removes and returns the elements for which the block returns a true value. + # If no block is given, an Enumerator is returned instead. + # + # numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + # odd_numbers = numbers.extract! { |number| number.odd? } # => [1, 3, 5, 7, 9] + # numbers # => [0, 2, 4, 6, 8] + def extract! + return to_enum(:extract!) { size } unless block_given? + + extracted_elements = [] + + reject! do |element| + extracted_elements << element if yield(element) + end + + extracted_elements + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract_options.rb new file mode 100644 index 0000000..8c7cb2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/extract_options.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Hash + # By default, only instances of Hash itself are extractable. + # Subclasses of Hash may implement this method and return + # true to declare themselves as extractable. If a Hash + # is extractable, Array#extract_options! pops it from + # the Array when it is the last element of the Array. + def extractable_options? + instance_of?(Hash) + end +end + +class Array + # Extracts options from a set of arguments. Removes and returns the last + # element in the array if it's a hash, otherwise returns a blank hash. + # + # def options(*args) + # args.extract_options! + # end + # + # options(1, 2) # => {} + # options(1, 2, a: :b) # => {:a=>:b} + def extract_options! + if last.is_a?(Hash) && last.extractable_options? + pop + else + {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/grouping.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/grouping.rb new file mode 100644 index 0000000..67e760b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/grouping.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +class Array + # Splits or iterates over the array in groups of size +number+, + # padding any remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups_of(3) {|group| p group} + # ["1", "2", "3"] + # ["4", "5", "6"] + # ["7", "8", "9"] + # ["10", nil, nil] + # + # %w(1 2 3 4 5).in_groups_of(2, ' ') {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5", " "] + # + # %w(1 2 3 4 5).in_groups_of(2, false) {|group| p group} + # ["1", "2"] + # ["3", "4"] + # ["5"] + def in_groups_of(number, fill_with = nil) + if number.to_i <= 0 + raise ArgumentError, + "Group size must be a positive integer, was #{number.inspect}" + end + + if fill_with == false + collection = self + else + # size % number gives how many extra we have; + # subtracting from number gives how many to add; + # modulo number ensures we don't add group of just fill. + padding = (number - size % number) % number + collection = dup.concat(Array.new(padding, fill_with)) + end + + if block_given? + collection.each_slice(number) { |slice| yield(slice) } + else + collection.each_slice(number).to_a + end + end + + # Splits or iterates over the array in +number+ of groups, padding any + # remaining slots with +fill_with+ unless it is +false+. + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3) {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", nil] + # ["8", "9", "10", nil] + # + # %w(1 2 3 4 5 6 7 8 9 10).in_groups(3, ' ') {|group| p group} + # ["1", "2", "3", "4"] + # ["5", "6", "7", " "] + # ["8", "9", "10", " "] + # + # %w(1 2 3 4 5 6 7).in_groups(3, false) {|group| p group} + # ["1", "2", "3"] + # ["4", "5"] + # ["6", "7"] + def in_groups(number, fill_with = nil) + # size.div number gives minor group size; + # size % number gives how many objects need extra accommodation; + # each group hold either division or division + 1 items. + division = size.div number + modulo = size % number + + # create a new array avoiding dup + groups = [] + start = 0 + + number.times do |index| + length = division + (modulo > 0 && modulo > index ? 1 : 0) + groups << last_group = slice(start, length) + last_group << fill_with if fill_with != false && + modulo > 0 && length == division + start += length + end + + if block_given? + groups.each { |g| yield(g) } + else + groups + end + end + + # Divides the array into one or more subarrays based on a delimiting +value+ + # or the result of an optional block. + # + # [1, 2, 3, 4, 5].split(3) # => [[1, 2], [4, 5]] + # (1..10).to_a.split { |i| i % 3 == 0 } # => [[1, 2], [4, 5], [7, 8], [10]] + def split(value = nil) + arr = dup + result = [] + if block_given? + while (idx = arr.index { |i| yield i }) + result << arr.shift(idx) + arr.shift + end + else + while (idx = arr.index(value)) + result << arr.shift(idx) + arr.shift + end + end + result << arr + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/inquiry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/inquiry.rb new file mode 100644 index 0000000..92c61bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/inquiry.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +require "active_support/array_inquirer" + +class Array + # Wraps the array in an +ArrayInquirer+ object, which gives a friendlier way + # to check its string-like contents. + # + # pets = [:cat, :dog].inquiry + # + # pets.cat? # => true + # pets.ferret? # => false + # + # pets.any?(:cat, :ferret) # => true + # pets.any?(:ferret, :alligator) # => false + def inquiry + ActiveSupport::ArrayInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/wrap.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/wrap.rb new file mode 100644 index 0000000..d62f97e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/array/wrap.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +class Array + # Wraps its argument in an array unless it is already an array (or array-like). + # + # Specifically: + # + # * If the argument is +nil+ an empty array is returned. + # * Otherwise, if the argument responds to +to_ary+ it is invoked, and its result returned. + # * Otherwise, returns an array with the argument as its single element. + # + # Array.wrap(nil) # => [] + # Array.wrap([1, 2, 3]) # => [1, 2, 3] + # Array.wrap(0) # => [0] + # + # This method is similar in purpose to Kernel#Array, but there are some differences: + # + # * If the argument responds to +to_ary+ the method is invoked. Kernel#Array + # moves on to try +to_a+ if the returned value is +nil+, but Array.wrap returns + # an array with the argument as its single element right away. + # * If the returned value from +to_ary+ is neither +nil+ nor an +Array+ object, Kernel#Array + # raises an exception, while Array.wrap does not, it just returns the value. + # * It does not call +to_a+ on the argument, if the argument does not respond to +to_ary+ + # it returns an array with the argument as its single element. + # + # The last point is easily explained with some enumerables: + # + # Array(foo: :bar) # => [[:foo, :bar]] + # Array.wrap(foo: :bar) # => [{:foo=>:bar}] + # + # There's also a related idiom that uses the splat operator: + # + # [*object] + # + # which returns [] for +nil+, but calls to Array(object) otherwise. + # + # The differences with Kernel#Array explained above + # apply to the rest of objects. + def self.wrap(object) + if object.nil? + [] + elsif object.respond_to?(:to_ary) + object.to_ary || [object] + else + [object] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/benchmark.rb new file mode 100644 index 0000000..f6e1b72 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/benchmark.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "benchmark" + +class << Benchmark + # Benchmark realtime in milliseconds. + # + # Benchmark.realtime { User.all } + # # => 8.0e-05 + # + # Benchmark.ms { User.all } + # # => 0.074 + def ms(&block) + 1000 * realtime(&block) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal.rb new file mode 100644 index 0000000..9e6a9d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal/conversions.rb new file mode 100644 index 0000000..52bd229 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/big_decimal/conversions.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "bigdecimal" +require "bigdecimal/util" + +module ActiveSupport + module BigDecimalWithDefaultFormat #:nodoc: + def to_s(format = "F") + super(format) + end + end +end + +BigDecimal.prepend(ActiveSupport::BigDecimalWithDefaultFormat) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class.rb new file mode 100644 index 0000000..1c110fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/class/subclasses" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute.rb new file mode 100644 index 0000000..ec78845 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" + +class Class + # Declare a class-level attribute whose value is inheritable by subclasses. + # Subclasses can change their own value and it will not impact parent class. + # + # ==== Options + # + # * :instance_reader - Sets the instance reader method (defaults to true). + # * :instance_writer - Sets the instance writer method (defaults to true). + # * :instance_accessor - Sets both instance methods (defaults to true). + # * :instance_predicate - Sets a predicate method (defaults to true). + # * :default - Sets a default value for the attribute (defaults to nil). + # + # ==== Examples + # + # class Base + # class_attribute :setting + # end + # + # class Subclass < Base + # end + # + # Base.setting = true + # Subclass.setting # => true + # Subclass.setting = false + # Subclass.setting # => false + # Base.setting # => true + # + # In the above case as long as Subclass does not assign a value to setting + # by performing Subclass.setting = _something_, Subclass.setting + # would read value assigned to parent class. Once Subclass assigns a value then + # the value assigned by Subclass would be returned. + # + # This matches normal Ruby method inheritance: think of writing an attribute + # on a subclass as overriding the reader method. However, you need to be aware + # when using +class_attribute+ with mutable structures as +Array+ or +Hash+. + # In such cases, you don't want to do changes in place. Instead use setters: + # + # Base.setting = [] + # Base.setting # => [] + # Subclass.setting # => [] + # + # # Appending in child changes both parent and child because it is the same object: + # Subclass.setting << :foo + # Base.setting # => [:foo] + # Subclass.setting # => [:foo] + # + # # Use setters to not propagate changes: + # Base.setting = [] + # Subclass.setting += [:foo] + # Base.setting # => [] + # Subclass.setting # => [:foo] + # + # For convenience, an instance predicate method is defined as well. + # To skip it, pass instance_predicate: false. + # + # Subclass.setting? # => false + # + # Instances may overwrite the class value in the same way: + # + # Base.setting = true + # object = Base.new + # object.setting # => true + # object.setting = false + # object.setting # => false + # Base.setting # => true + # + # To opt out of the instance reader method, pass instance_reader: false. + # + # object.setting # => NoMethodError + # object.setting? # => NoMethodError + # + # To opt out of the instance writer method, pass instance_writer: false. + # + # object.setting = false # => NoMethodError + # + # To opt out of both instance methods, pass instance_accessor: false. + # + # To set a default value for the attribute, pass default:, like so: + # + # class_attribute :settings, default: {} + def class_attribute(*attrs, instance_accessor: true, + instance_reader: instance_accessor, instance_writer: instance_accessor, instance_predicate: true, default: nil) + + class_methods, methods = [], [] + attrs.each do |name| + unless name.is_a?(Symbol) || name.is_a?(String) + raise TypeError, "#{name.inspect} is not a symbol nor a string" + end + + class_methods << <<~RUBY # In case the method exists and is not public + silence_redefinition_of_method def #{name} + end + RUBY + + methods << <<~RUBY if instance_reader + silence_redefinition_of_method def #{name} + defined?(@#{name}) ? @#{name} : self.class.#{name} + end + RUBY + + class_methods << <<~RUBY + silence_redefinition_of_method def #{name}=(value) + redefine_method(:#{name}) { value } if singleton_class? + redefine_singleton_method(:#{name}) { value } + value + end + RUBY + + methods << <<~RUBY if instance_writer + silence_redefinition_of_method(:#{name}=) + attr_writer :#{name} + RUBY + + if instance_predicate + class_methods << "silence_redefinition_of_method def #{name}?; !!self.#{name}; end" + if instance_reader + methods << "silence_redefinition_of_method def #{name}?; !!self.#{name}; end" + end + end + end + + location = caller_locations(1, 1).first + class_eval(["class << self", *class_methods, "end", *methods].join(";").tr("\n", ";"), location.path, location.lineno) + + attrs.each { |name| public_send("#{name}=", default) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute_accessors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute_accessors.rb new file mode 100644 index 0000000..a77354e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/attribute_accessors.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +# cattr_* became mattr_* aliases in 7dfbd91b0780fbd6a1dd9bfbc176e10894871d2d, +# but we keep this around for libraries that directly require it knowing they +# want cattr_*. No need to deprecate. +require "active_support/core_ext/module/attribute_accessors" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/subclasses.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/subclasses.rb new file mode 100644 index 0000000..568b413 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/class/subclasses.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +class Class + # Returns an array with all classes that are < than its receiver. + # + # class C; end + # C.descendants # => [] + # + # class B < C; end + # C.descendants # => [B] + # + # class A < B; end + # C.descendants # => [B, A] + # + # class D < C; end + # C.descendants # => [B, A, D] + def descendants + ObjectSpace.each_object(singleton_class).reject do |k| + k.singleton_class? || k == self + end + end + + # Returns an array with the direct children of +self+. + # + # class Foo; end + # class Bar < Foo; end + # class Baz < Bar; end + # + # Foo.subclasses # => [Bar] + def subclasses + descendants.select { |descendant| descendant.superclass == self } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date.rb new file mode 100644 index 0000000..cce73f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date/acts_like" +require "active_support/core_ext/date/blank" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/conversions" +require "active_support/core_ext/date/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/acts_like.rb new file mode 100644 index 0000000..c8077f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Date + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/blank.rb new file mode 100644 index 0000000..e6271c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class Date #:nodoc: + # No Date is blank: + # + # Date.today.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/calculations.rb new file mode 100644 index 0000000..d03a8d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/calculations.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +require "date" +require "active_support/duration" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" + +class Date + include DateAndTime::Calculations + + class << self + attr_accessor :beginning_of_week_default + + # Returns the week start (e.g. :monday) for the current request, if this has been set (via Date.beginning_of_week=). + # If Date.beginning_of_week has not been set for the current request, returns the week start specified in config.beginning_of_week. + # If no config.beginning_of_week was specified, returns :monday. + def beginning_of_week + Thread.current[:beginning_of_week] || beginning_of_week_default || :monday + end + + # Sets Date.beginning_of_week to a week start (e.g. :monday) for current request/thread. + # + # This method accepts any of the following day symbols: + # :monday, :tuesday, :wednesday, :thursday, :friday, :saturday, :sunday + def beginning_of_week=(week_start) + Thread.current[:beginning_of_week] = find_beginning_of_week!(week_start) + end + + # Returns week start day symbol (e.g. :monday), or raises an +ArgumentError+ for invalid day symbol. + def find_beginning_of_week!(week_start) + raise ArgumentError, "Invalid beginning of week: #{week_start}" unless ::Date::DAYS_INTO_WEEK.key?(week_start) + week_start + end + + # Returns a new Date representing the date 1 day ago (i.e. yesterday's date). + def yesterday + ::Date.current.yesterday + end + + # Returns a new Date representing the date 1 day after today (i.e. tomorrow's date). + def tomorrow + ::Date.current.tomorrow + end + + # Returns Time.zone.today when Time.zone or config.time_zone are set, otherwise just returns Date.today. + def current + ::Time.zone ? ::Time.zone.today : ::Date.today + end + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then subtracts the specified number of seconds. + def ago(seconds) + in_time_zone.since(-seconds) + end + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + # and then adds the specified number of seconds + def since(seconds) + in_time_zone.since(seconds) + end + alias :in :since + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the beginning of the day (0:00) + def beginning_of_day + in_time_zone + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the middle of the day (12:00) + def middle_of_day + in_time_zone.middle_of_day + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Converts Date to a Time (or DateTime if necessary) with the time portion set to the end of the day (23:59:59) + def end_of_day + in_time_zone.end_of_day + end + alias :at_end_of_day :end_of_day + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + plus_with_duration(-other) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Provides precise Date calculations for years, months, and days. The +options+ parameter takes a hash with + # any of these keys: :years, :months, :weeks, :days. + def advance(options) + d = self + + d = d >> options[:years] * 12 if options[:years] + d = d >> options[:months] if options[:months] + d = d + options[:weeks] * 7 if options[:weeks] + d = d + options[:days] if options[:days] + + d + end + + # Returns a new Date where one or more of the elements have been changed according to the +options+ parameter. + # The +options+ parameter is a hash with a combination of these keys: :year, :month, :day. + # + # Date.new(2007, 5, 12).change(day: 1) # => Date.new(2007, 5, 1) + # Date.new(2007, 5, 12).change(year: 2005, month: 1) # => Date.new(2005, 1, 12) + def change(options) + ::Date.new( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day) + ) + end + + # Allow Date to be compared with Time by converting to DateTime and relying on the <=> from there. + def compare_with_coercion(other) + if other.is_a?(Time) + to_datetime <=> other + else + compare_without_coercion(other) + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/conversions.rb new file mode 100644 index 0000000..050a62b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/conversions.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/date/zones" +require "active_support/core_ext/module/redefine_method" + +class Date + DATE_FORMATS = { + short: "%d %b", + long: "%B %d, %Y", + db: "%Y-%m-%d", + inspect: "%Y-%m-%d", + number: "%Y%m%d", + long_ordinal: lambda { |date| + day_format = ActiveSupport::Inflector.ordinalize(date.day) + date.strftime("%B #{day_format}, %Y") # => "April 25th, 2007" + }, + rfc822: "%d %b %Y", + iso8601: lambda { |date| date.iso8601 } + } + + # Convert to a formatted string. See DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_formatted_s(:db) # => "2007-11-10" + # date.to_s(:db) # => "2007-11-10" + # + # date.to_formatted_s(:short) # => "10 Nov" + # date.to_formatted_s(:number) # => "20071110" + # date.to_formatted_s(:long) # => "November 10, 2007" + # date.to_formatted_s(:long_ordinal) # => "November 10th, 2007" + # date.to_formatted_s(:rfc822) # => "10 Nov 2007" + # date.to_formatted_s(:iso8601) # => "2007-11-10" + # + # == Adding your own date formats to to_formatted_s + # You can add your own formats to the Date::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a date argument as the value. + # + # # config/initializers/date_formats.rb + # Date::DATE_FORMATS[:month_and_year] = '%B %Y' + # Date::DATE_FORMATS[:short_ordinal] = ->(date) { date.strftime("%B #{date.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + if formatter.respond_to?(:call) + formatter.call(self).to_s + else + strftime(formatter) + end + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005" + def readable_inspect + strftime("%a, %d %b %Y") + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + silence_redefinition_of_method :to_time + + # Converts a Date instance to a Time, where the time is set to the beginning of the day. + # The timezone can be either :local or :utc (default :local). + # + # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 + # + # date.to_time # => 2007-11-10 00:00:00 0800 + # date.to_time(:local) # => 2007-11-10 00:00:00 0800 + # + # date.to_time(:utc) # => 2007-11-10 00:00:00 UTC + # + # NOTE: The :local timezone is Ruby's *process* timezone, i.e. ENV['TZ']. + # If the *application's* timezone is needed, then use +in_time_zone+ instead. + def to_time(form = :local) + raise ArgumentError, "Expected :local or :utc, got #{form.inspect}." unless [:local, :utc].include?(form) + ::Time.public_send(form, year, month, day) + end + + silence_redefinition_of_method :xmlschema + + # Returns a string which represents the time in used time zone as DateTime + # defined by XML Schema: + # + # date = Date.new(2015, 05, 23) # => Sat, 23 May 2015 + # date.xmlschema # => "2015-05-23T00:00:00+04:00" + def xmlschema + in_time_zone.xmlschema + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/zones.rb new file mode 100644 index 0000000..2dcf97c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date/zones.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/date_and_time/zones" + +class Date + include DateAndTime::Zones +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/calculations.rb new file mode 100644 index 0000000..21cfeed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/calculations.rb @@ -0,0 +1,364 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/try" +require "active_support/core_ext/date_time/conversions" + +module DateAndTime + module Calculations + DAYS_INTO_WEEK = { + sunday: 0, + monday: 1, + tuesday: 2, + wednesday: 3, + thursday: 4, + friday: 5, + saturday: 6 + } + WEEKEND_DAYS = [ 6, 0 ] + + # Returns a new date/time representing yesterday. + def yesterday + advance(days: -1) + end + + # Returns a new date/time representing tomorrow. + def tomorrow + advance(days: 1) + end + + # Returns true if the date/time is today. + def today? + to_date == ::Date.current + end + + # Returns true if the date/time is tomorrow. + def tomorrow? + to_date == ::Date.current.tomorrow + end + alias :next_day? :tomorrow? + + # Returns true if the date/time is yesterday. + def yesterday? + to_date == ::Date.current.yesterday + end + alias :prev_day? :yesterday? + + # Returns true if the date/time is in the past. + def past? + self < self.class.current + end + + # Returns true if the date/time is in the future. + def future? + self > self.class.current + end + + # Returns true if the date/time falls on a Saturday or Sunday. + def on_weekend? + WEEKEND_DAYS.include?(wday) + end + + # Returns true if the date/time does not fall on a Saturday or Sunday. + def on_weekday? + !WEEKEND_DAYS.include?(wday) + end + + # Returns true if the date/time falls before date_or_time. + def before?(date_or_time) + self < date_or_time + end + + # Returns true if the date/time falls after date_or_time. + def after?(date_or_time) + self > date_or_time + end + + # Returns a new date/time the specified number of days ago. + def days_ago(days) + advance(days: -days) + end + + # Returns a new date/time the specified number of days in the future. + def days_since(days) + advance(days: days) + end + + # Returns a new date/time the specified number of weeks ago. + def weeks_ago(weeks) + advance(weeks: -weeks) + end + + # Returns a new date/time the specified number of weeks in the future. + def weeks_since(weeks) + advance(weeks: weeks) + end + + # Returns a new date/time the specified number of months ago. + def months_ago(months) + advance(months: -months) + end + + # Returns a new date/time the specified number of months in the future. + def months_since(months) + advance(months: months) + end + + # Returns a new date/time the specified number of years ago. + def years_ago(years) + advance(years: -years) + end + + # Returns a new date/time the specified number of years in the future. + def years_since(years) + advance(years: years) + end + + # Returns a new date/time at the start of the month. + # + # today = Date.today # => Thu, 18 Jun 2015 + # today.beginning_of_month # => Mon, 01 Jun 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Thu, 18 Jun 2015 15:23:13 +0000 + # now.beginning_of_month # => Mon, 01 Jun 2015 00:00:00 +0000 + def beginning_of_month + first_hour(change(day: 1)) + end + alias :at_beginning_of_month :beginning_of_month + + # Returns a new date/time at the start of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_quarter # => Wed, 01 Jul 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_quarter # => Wed, 01 Jul 2015 00:00:00 +0000 + def beginning_of_quarter + first_quarter_month = month - (2 + month) % 3 + beginning_of_month.change(month: first_quarter_month) + end + alias :at_beginning_of_quarter :beginning_of_quarter + + # Returns a new date/time at the end of the quarter. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.end_of_quarter # => Wed, 30 Sep 2015 + # + # +DateTime+ objects will have a time set to 23:59:59. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.end_of_quarter # => Wed, 30 Sep 2015 23:59:59 +0000 + def end_of_quarter + last_quarter_month = month + (12 - month) % 3 + beginning_of_month.change(month: last_quarter_month).end_of_month + end + alias :at_end_of_quarter :end_of_quarter + + # Returns a new date/time at the beginning of the year. + # + # today = Date.today # => Fri, 10 Jul 2015 + # today.beginning_of_year # => Thu, 01 Jan 2015 + # + # +DateTime+ objects will have a time set to 0:00. + # + # now = DateTime.current # => Fri, 10 Jul 2015 18:41:29 +0000 + # now.beginning_of_year # => Thu, 01 Jan 2015 00:00:00 +0000 + def beginning_of_year + change(month: 1).beginning_of_month + end + alias :at_beginning_of_year :beginning_of_year + + # Returns a new date/time representing the given day in the next week. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week # => Mon, 11 May 2015 + # + # The +given_day_in_next_week+ defaults to the beginning of the week + # which is determined by +Date.beginning_of_week+ or +config.beginning_of_week+ + # when set. + # + # today = Date.today # => Thu, 07 May 2015 + # today.next_week(:friday) # => Fri, 15 May 2015 + # + # +DateTime+ objects have their time set to 0:00 unless +same_time+ is true. + # + # now = DateTime.current # => Thu, 07 May 2015 13:31:16 +0000 + # now.next_week # => Mon, 11 May 2015 00:00:00 +0000 + def next_week(given_day_in_next_week = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_since(1).beginning_of_week.days_since(days_span(given_day_in_next_week))) + same_time ? copy_time_to(result) : result + end + + # Returns a new date/time representing the next weekday. + def next_weekday + if next_day.on_weekend? + next_week(:monday, same_time: true) + else + next_day + end + end + + # Short-hand for months_since(3) + def next_quarter + months_since(3) + end + + # Returns a new date/time representing the given day in the previous week. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 0:00 unless +same_time+ is true. + def prev_week(start_day = Date.beginning_of_week, same_time: false) + result = first_hour(weeks_ago(1).beginning_of_week.days_since(days_span(start_day))) + same_time ? copy_time_to(result) : result + end + alias_method :last_week, :prev_week + + # Returns a new date/time representing the previous weekday. + def prev_weekday + if prev_day.on_weekend? + copy_time_to(beginning_of_week(:friday)) + else + prev_day + end + end + alias_method :last_weekday, :prev_weekday + + # Short-hand for months_ago(1). + def last_month + months_ago(1) + end + + # Short-hand for months_ago(3). + def prev_quarter + months_ago(3) + end + alias_method :last_quarter, :prev_quarter + + # Short-hand for years_ago(1). + def last_year + years_ago(1) + end + + # Returns the number of days to the start of the week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + def days_to_week_start(start_day = Date.beginning_of_week) + start_day_number = DAYS_INTO_WEEK.fetch(start_day) + (wday - start_day_number) % 7 + end + + # Returns a new date/time representing the start of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # +DateTime+ objects have their time set to 0:00. + def beginning_of_week(start_day = Date.beginning_of_week) + result = days_ago(days_to_week_start(start_day)) + acts_like?(:time) ? result.midnight : result + end + alias :at_beginning_of_week :beginning_of_week + + # Returns Monday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 0:00. + def monday + beginning_of_week(:monday) + end + + # Returns a new date/time representing the end of this week on the given day. + # Week is assumed to start on +start_day+, default is + # +Date.beginning_of_week+ or +config.beginning_of_week+ when set. + # DateTime objects have their time set to 23:59:59. + def end_of_week(start_day = Date.beginning_of_week) + last_hour(days_since(6 - days_to_week_start(start_day))) + end + alias :at_end_of_week :end_of_week + + # Returns Sunday of this week assuming that week starts on Monday. + # +DateTime+ objects have their time set to 23:59:59. + def sunday + end_of_week(:monday) + end + + # Returns a new date/time representing the end of the month. + # DateTime objects will have a time set to 23:59:59. + def end_of_month + last_day = ::Time.days_in_month(month, year) + last_hour(days_since(last_day - day)) + end + alias :at_end_of_month :end_of_month + + # Returns a new date/time representing the end of the year. + # DateTime objects will have a time set to 23:59:59. + def end_of_year + change(month: 12).end_of_month + end + alias :at_end_of_year :end_of_year + + # Returns a Range representing the whole day of the current date/time. + def all_day + beginning_of_day..end_of_day + end + + # Returns a Range representing the whole week of the current date/time. + # Week starts on start_day, default is Date.beginning_of_week or config.beginning_of_week when set. + def all_week(start_day = Date.beginning_of_week) + beginning_of_week(start_day)..end_of_week(start_day) + end + + # Returns a Range representing the whole month of the current date/time. + def all_month + beginning_of_month..end_of_month + end + + # Returns a Range representing the whole quarter of the current date/time. + def all_quarter + beginning_of_quarter..end_of_quarter + end + + # Returns a Range representing the whole year of the current date/time. + def all_year + beginning_of_year..end_of_year + end + + # Returns a new date/time representing the next occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.next_occurring(:monday) # => Mon, 18 Dec 2017 + # today.next_occurring(:thursday) # => Thu, 21 Dec 2017 + def next_occurring(day_of_week) + from_now = DAYS_INTO_WEEK.fetch(day_of_week) - wday + from_now += 7 unless from_now > 0 + advance(days: from_now) + end + + # Returns a new date/time representing the previous occurrence of the specified day of week. + # + # today = Date.today # => Thu, 14 Dec 2017 + # today.prev_occurring(:monday) # => Mon, 11 Dec 2017 + # today.prev_occurring(:thursday) # => Thu, 07 Dec 2017 + def prev_occurring(day_of_week) + ago = wday - DAYS_INTO_WEEK.fetch(day_of_week) + ago += 7 unless ago > 0 + advance(days: -ago) + end + + private + def first_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.beginning_of_day : date_or_time + end + + def last_hour(date_or_time) + date_or_time.acts_like?(:time) ? date_or_time.end_of_day : date_or_time + end + + def days_span(day) + (DAYS_INTO_WEEK.fetch(day) - DAYS_INTO_WEEK.fetch(Date.beginning_of_week)) % 7 + end + + def copy_time_to(other) + other.change(hour: hour, min: min, sec: sec, nsec: try(:nsec)) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/compatibility.rb new file mode 100644 index 0000000..b40a0fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/compatibility.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" + +module DateAndTime + module Compatibility + # If true, +to_time+ preserves the timezone offset of receiver. + # + # NOTE: With Ruby 2.4+ the default for +to_time+ changed from + # converting to the local system time, to preserving the offset + # of the receiver. For backwards compatibility we're overriding + # this behavior, but new apps will have an initializer that sets + # this to true, because the new behavior is preferred. + mattr_accessor :preserve_timezone, instance_writer: false, default: false + + # Change the output of ActiveSupport::TimeZone.utc_to_local. + # + # When `true`, it returns local times with an UTC offset, with `false` local + # times are returned as UTC. + # + # # Given this zone: + # zone = ActiveSupport::TimeZone["Eastern Time (US & Canada)"] + # + # # With `utc_to_local_returns_utc_offset_times = false`, local time is converted to UTC: + # zone.utc_to_local(Time.utc(2000, 1)) # => 1999-12-31 19:00:00 UTC + # + # # With `utc_to_local_returns_utc_offset_times = true`, local time is returned with UTC offset: + # zone.utc_to_local(Time.utc(2000, 1)) # => 1999-12-31 19:00:00 -0500 + mattr_accessor :utc_to_local_returns_utc_offset_times, instance_writer: false, default: false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/zones.rb new file mode 100644 index 0000000..fb6a27c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_and_time/zones.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module DateAndTime + module Zones + # Returns the simultaneous time in Time.zone if a zone is given or + # if Time.zone_default is set. Otherwise, it returns the current time. + # + # Time.zone = 'Hawaii' # => 'Hawaii' + # Time.utc(2000).in_time_zone # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Date.new(2000).in_time_zone # => Sat, 01 Jan 2000 00:00:00 HST -10:00 + # + # This method is similar to Time#localtime, except that it uses Time.zone as the local zone + # instead of the operating system's time zone. + # + # You can also pass in a TimeZone instance or string that identifies a TimeZone as an argument, + # and the conversion will be based on that zone instead of Time.zone. + # + # Time.utc(2000).in_time_zone('Alaska') # => Fri, 31 Dec 1999 15:00:00 AKST -09:00 + # Date.new(2000).in_time_zone('Alaska') # => Sat, 01 Jan 2000 00:00:00 AKST -09:00 + def in_time_zone(zone = ::Time.zone) + time_zone = ::Time.find_zone! zone + time = acts_like?(:time) ? self : nil + + if time_zone + time_with_zone(time, time_zone) + else + time || to_time + end + end + + private + def time_with_zone(time, zone) + if time + ActiveSupport::TimeWithZone.new(time.utc? ? time : time.getutc, zone) + else + ActiveSupport::TimeWithZone.new(nil, zone, to_time(:utc)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time.rb new file mode 100644 index 0000000..790dbee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_time/acts_like" +require "active_support/core_ext/date_time/blank" +require "active_support/core_ext/date_time/calculations" +require "active_support/core_ext/date_time/compatibility" +require "active_support/core_ext/date_time/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/acts_like.rb new file mode 100644 index 0000000..5dccdfe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/acts_like.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/object/acts_like" + +class DateTime + # Duck-types as a Date-like class. See Object#acts_like?. + def acts_like_date? + true + end + + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/blank.rb new file mode 100644 index 0000000..a52c8bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/blank.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "date" + +class DateTime #:nodoc: + # No DateTime is ever blank: + # + # DateTime.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/calculations.rb new file mode 100644 index 0000000..bc670c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/calculations.rb @@ -0,0 +1,211 @@ +# frozen_string_literal: true + +require "date" + +class DateTime + class << self + # Returns Time.zone.now.to_datetime when Time.zone or + # config.time_zone are set, otherwise returns + # Time.now.to_datetime. + def current + ::Time.zone ? ::Time.zone.now.to_datetime : ::Time.now.to_datetime + end + end + + # Returns the number of seconds since 00:00:00. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399 + def seconds_since_midnight + sec + (min * 60) + (hour * 3600) + end + + # Returns the number of seconds until 23:59:59. + # + # DateTime.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # DateTime.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # DateTime.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # DateTime.new(2012, 8, 29, 0, 0, 0.5).subsec # => (1/2) + def subsec + sec_fraction + end + + # Returns a new DateTime where one or more of the elements have been changed + # according to the +options+ parameter. The time options (:hour, + # :min, :sec) reset cascadingly, so if only the hour is + # passed, then minute and sec is set to 0. If the hour and minute is passed, + # then sec is set to 0. The +options+ parameter takes a hash with any of these + # keys: :year, :month, :day, :hour, + # :min, :sec, :offset, :start. + # + # DateTime.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => DateTime.new(2012, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => DateTime.new(1981, 8, 1, 22, 35, 0) + # DateTime.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => DateTime.new(1981, 8, 29, 0, 0, 0) + def change(options) + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_fraction = Rational(new_nsec, 1000000000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + new_fraction = Rational(new_usec, 1000000) + end + + raise ArgumentError, "argument out of range" if new_fraction >= 1 + + ::DateTime.civil( + options.fetch(:year, year), + options.fetch(:month, month), + options.fetch(:day, day), + options.fetch(:hour, hour), + options.fetch(:min, options[:hour] ? 0 : min), + options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_fraction, + options.fetch(:offset, offset), + options.fetch(:start, start) + ) + end + + # Uses Date to provide precise Time calculations for years, months, and days. + # The +options+ parameter takes a hash with any of these keys: :years, + # :months, :weeks, :days, :hours, + # :minutes, :seconds. + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.advance(options) + datetime_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + datetime_advanced_by_date + else + datetime_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new DateTime representing the time a number of seconds ago. + # Do not use this method in combination with x.months, use months_ago instead! + def ago(seconds) + since(-seconds) + end + + # Returns a new DateTime representing the time a number of seconds since the + # instance time. Do not use this method in combination with x.months, use + # months_since instead! + def since(seconds) + self + Rational(seconds, 86400) + end + alias :in :since + + # Returns a new DateTime representing the start of the day (0:00). + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new DateTime representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new DateTime representing the end of the day (23:59:59). + def end_of_day + change(hour: 23, min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_day :end_of_day + + # Returns a new DateTime representing the start of the hour (hh:00:00). + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new DateTime representing the end of the hour (hh:59:59). + def end_of_hour + change(min: 59, sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new DateTime representing the start of the minute (hh:mm:00). + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new DateTime representing the end of the minute (hh:mm:59). + def end_of_minute + change(sec: 59, usec: Rational(999999999, 1000)) + end + alias :at_end_of_minute :end_of_minute + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ).getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns a Time instance of the simultaneous time in the UTC timezone. + # + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)) # => Mon, 21 Feb 2005 10:11:12 -0600 + # DateTime.civil(2005, 2, 21, 10, 11, 12, Rational(-6, 24)).utc # => Mon, 21 Feb 2005 16:11:12 UTC + def utc + utc = new_offset(0) + + Time.utc( + utc.year, utc.month, utc.day, + utc.hour, utc.min, utc.sec + utc.sec_fraction + ) + end + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns +true+ if offset == 0. + def utc? + offset == 0 + end + + # Returns the offset value in seconds. + def utc_offset + (offset * 86400).to_i + end + + # Layers additional behavior on DateTime#<=> so that Time and + # ActiveSupport::TimeWithZone instances can be compared with a DateTime. + def <=>(other) + if other.respond_to? :to_datetime + super other.to_datetime rescue nil + else + super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/compatibility.rb new file mode 100644 index 0000000..7600a06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/compatibility.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class DateTime + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return an instance of +Time+ with the same UTC offset + # as +self+ or an instance of +Time+ representing the same time + # in the local system timezone depending on the setting of + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? getlocal(utc_offset) : getlocal + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/conversions.rb new file mode 100644 index 0000000..231bf87 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/date_time/conversions.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "date" +require "active_support/inflector/methods" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/calculations" +require "active_support/values/time_zone" + +class DateTime + # Convert to a formatted string. See Time::DATE_FORMATS for predefined formats. + # + # This method is aliased to to_s. + # + # === Examples + # datetime = DateTime.civil(2007, 12, 4, 0, 0, 0, 0) # => Tue, 04 Dec 2007 00:00:00 +0000 + # + # datetime.to_formatted_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:db) # => "2007-12-04 00:00:00" + # datetime.to_s(:number) # => "20071204000000" + # datetime.to_formatted_s(:short) # => "04 Dec 00:00" + # datetime.to_formatted_s(:long) # => "December 04, 2007 00:00" + # datetime.to_formatted_s(:long_ordinal) # => "December 4th, 2007 00:00" + # datetime.to_formatted_s(:rfc822) # => "Tue, 04 Dec 2007 00:00:00 +0000" + # datetime.to_formatted_s(:iso8601) # => "2007-12-04T00:00:00+00:00" + # + # == Adding your own datetime formats to to_formatted_s + # DateTime formats are shared with Time. You can add your own to the + # Time::DATE_FORMATS hash. Use the format name as the hash key and + # either a strftime string or Proc instance that takes a time or + # datetime argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = lambda { |time| time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s if instance_methods(false).include?(:to_s) + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # datetime = DateTime.civil(2000, 1, 1, 0, 0, 0, Rational(-6, 24)) + # datetime.formatted_offset # => "-06:00" + # datetime.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005 14:30:00 +0000". + def readable_inspect + to_s(:rfc822) + end + alias_method :default_inspect, :inspect + alias_method :inspect, :readable_inspect + + # Returns DateTime with local offset for given year if format is local else + # offset is zero. + # + # DateTime.civil_from_format :local, 2012 + # # => Sun, 01 Jan 2012 00:00:00 +0300 + # DateTime.civil_from_format :local, 2012, 12, 17 + # # => Mon, 17 Dec 2012 00:00:00 +0000 + def self.civil_from_format(utc_or_local, year, month = 1, day = 1, hour = 0, min = 0, sec = 0) + if utc_or_local.to_sym == :local + offset = ::Time.local(year, month, day).utc_offset.to_r / 86400 + else + offset = 0 + end + civil(year, month, day, hour, min, sec, offset) + end + + # Converts +self+ to a floating-point number of seconds, including fractional microseconds, since the Unix epoch. + def to_f + seconds_since_unix_epoch.to_f + sec_fraction + end + + # Converts +self+ to an integer number of seconds since the Unix epoch. + def to_i + seconds_since_unix_epoch.to_i + end + + # Returns the fraction of a second as microseconds + def usec + (sec_fraction * 1_000_000).to_i + end + + # Returns the fraction of a second as nanoseconds + def nsec + (sec_fraction * 1_000_000_000).to_i + end + + private + def offset_in_seconds + (offset * 86400).to_i + end + + def seconds_since_unix_epoch + (jd - 2440588) * 86400 - offset_in_seconds + seconds_since_midnight + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest.rb new file mode 100644 index 0000000..ce1427e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/digest/uuid" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest/uuid.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest/uuid.rb new file mode 100644 index 0000000..6e949a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/digest/uuid.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +require "securerandom" + +module Digest + module UUID + DNS_NAMESPACE = "k\xA7\xB8\x10\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + URL_NAMESPACE = "k\xA7\xB8\x11\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + OID_NAMESPACE = "k\xA7\xB8\x12\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + X500_NAMESPACE = "k\xA7\xB8\x14\x9D\xAD\x11\xD1\x80\xB4\x00\xC0O\xD40\xC8" #:nodoc: + + # Generates a v5 non-random UUID (Universally Unique IDentifier). + # + # Using Digest::MD5 generates version 3 UUIDs; Digest::SHA1 generates version 5 UUIDs. + # uuid_from_hash always generates the same UUID for a given name and namespace combination. + # + # See RFC 4122 for details of UUID at: https://www.ietf.org/rfc/rfc4122.txt + def self.uuid_from_hash(hash_class, uuid_namespace, name) + if hash_class == Digest::MD5 + version = 3 + elsif hash_class == Digest::SHA1 + version = 5 + else + raise ArgumentError, "Expected Digest::SHA1 or Digest::MD5, got #{hash_class.name}." + end + + hash = hash_class.new + hash.update(uuid_namespace) + hash.update(name) + + ary = hash.digest.unpack("NnnnnN") + ary[2] = (ary[2] & 0x0FFF) | (version << 12) + ary[3] = (ary[3] & 0x3FFF) | 0x8000 + + "%08x-%04x-%04x-%04x-%04x%08x" % ary + end + + # Convenience method for uuid_from_hash using Digest::MD5. + def self.uuid_v3(uuid_namespace, name) + uuid_from_hash(Digest::MD5, uuid_namespace, name) + end + + # Convenience method for uuid_from_hash using Digest::SHA1. + def self.uuid_v5(uuid_namespace, name) + uuid_from_hash(Digest::SHA1, uuid_namespace, name) + end + + # Convenience method for SecureRandom.uuid. + def self.uuid_v4 + SecureRandom.uuid + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/enumerable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/enumerable.rb new file mode 100644 index 0000000..97c918a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/enumerable.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true + +module Enumerable + INDEX_WITH_DEFAULT = Object.new + private_constant :INDEX_WITH_DEFAULT + + # Enumerable#sum was added in Ruby 2.4, but it only works with Numeric elements + # when we omit an identity. + + # :stopdoc: + + # We can't use Refinements here because Refinements with Module which will be prepended + # doesn't work well https://bugs.ruby-lang.org/issues/13446 + alias :_original_sum_with_required_identity :sum + private :_original_sum_with_required_identity + + # :startdoc: + + # Calculates a sum from the elements. + # + # payments.sum { |p| p.price * p.tax_rate } + # payments.sum(&:price) + # + # The latter is a shortcut for: + # + # payments.inject(0) { |sum, p| sum + p.price } + # + # It can also calculate the sum without the use of a block. + # + # [5, 15, 10].sum # => 30 + # ['foo', 'bar'].sum # => "foobar" + # [[1, 2], [3, 1, 5]].sum # => [1, 2, 3, 1, 5] + # + # The default sum of an empty list is zero. You can override this default: + # + # [].sum(Payment.new(0)) { |i| i.amount } # => Payment.new(0) + def sum(identity = nil, &block) + if identity + _original_sum_with_required_identity(identity, &block) + elsif block_given? + map(&block).sum(identity) + else + inject(:+) || 0 + end + end + + # Convert an enumerable to a hash, using the block result as the key and the + # element as the value. + # + # people.index_by(&:login) + # # => { "nextangle" => , "chade-" => , ...} + # + # people.index_by { |person| "#{person.first_name} #{person.last_name}" } + # # => { "Chade- Fowlersburg-e" => , "David Heinemeier Hansson" => , ...} + def index_by + if block_given? + result = {} + each { |elem| result[yield(elem)] = elem } + result + else + to_enum(:index_by) { size if respond_to?(:size) } + end + end + + # Convert an enumerable to a hash, using the element as the key and the block + # result as the value. + # + # post = Post.new(title: "hey there", body: "what's up?") + # + # %i( title body ).index_with { |attr_name| post.public_send(attr_name) } + # # => { title: "hey there", body: "what's up?" } + # + # If an argument is passed instead of a block, it will be used as the value + # for all elements: + # + # %i( created_at updated_at ).index_with(Time.now) + # # => { created_at: 2020-03-09 22:31:47, updated_at: 2020-03-09 22:31:47 } + def index_with(default = INDEX_WITH_DEFAULT) + if block_given? + result = {} + each { |elem| result[elem] = yield(elem) } + result + elsif default != INDEX_WITH_DEFAULT + result = {} + each { |elem| result[elem] = default } + result + else + to_enum(:index_with) { size if respond_to?(:size) } + end + end + + # Returns +true+ if the enumerable has more than 1 element. Functionally + # equivalent to enum.to_a.size > 1. Can be called with a block too, + # much like any?, so people.many? { |p| p.age > 26 } returns +true+ + # if more than one person is over 26. + def many? + cnt = 0 + if block_given? + any? do |element| + cnt += 1 if yield element + cnt > 1 + end + else + any? { (cnt += 1) > 1 } + end + end + + # Returns a new array that includes the passed elements. + # + # [ 1, 2, 3 ].including(4, 5) + # # => [ 1, 2, 3, 4, 5 ] + # + # ["David", "Rafael"].including %w[ Aaron Todd ] + # # => ["David", "Rafael", "Aaron", "Todd"] + def including(*elements) + to_a.including(*elements) + end + + # The negative of the Enumerable#include?. Returns +true+ if the + # collection does not include the object. + def exclude?(object) + !include?(object) + end + + # Returns a copy of the enumerable excluding the specified elements. + # + # ["David", "Rafael", "Aaron", "Todd"].excluding "Aaron", "Todd" + # # => ["David", "Rafael"] + # + # ["David", "Rafael", "Aaron", "Todd"].excluding %w[ Aaron Todd ] + # # => ["David", "Rafael"] + # + # {foo: 1, bar: 2, baz: 3}.excluding :bar + # # => {foo: 1, baz: 3} + def excluding(*elements) + elements.flatten!(1) + reject { |element| elements.include?(element) } + end + + # Alias for #excluding. + def without(*elements) + excluding(*elements) + end + + # Extract the given key from each element in the enumerable. + # + # [{ name: "David" }, { name: "Rafael" }, { name: "Aaron" }].pluck(:name) + # # => ["David", "Rafael", "Aaron"] + # + # [{ id: 1, name: "David" }, { id: 2, name: "Rafael" }].pluck(:id, :name) + # # => [[1, "David"], [2, "Rafael"]] + def pluck(*keys) + if keys.many? + map { |element| keys.map { |key| element[key] } } + else + key = keys.first + map { |element| element[key] } + end + end + + # Extract the given key from the first element in the enumerable. + # + # [{ name: "David" }, { name: "Rafael" }, { name: "Aaron" }].pick(:name) + # # => "David" + # + # [{ id: 1, name: "David" }, { id: 2, name: "Rafael" }].pick(:id, :name) + # # => [1, "David"] + def pick(*keys) + return if none? + + if keys.many? + keys.map { |key| first[key] } + else + first[keys.first] + end + end + + # Returns a new +Array+ without the blank items. + # Uses Object#blank? for determining if an item is blank. + # + # [1, "", nil, 2, " ", [], {}, false, true].compact_blank + # # => [1, 2, true] + # + # Set.new([nil, "", 1, 2]) + # # => [2, 1] (or [1, 2]) + # + # When called on a +Hash+, returns a new +Hash+ without the blank values. + # + # { a: "", b: 1, c: nil, d: [], e: false, f: true }.compact_blank + # #=> { b: 1, f: true } + def compact_blank + reject(&:blank?) + end +end + +class Hash + # Hash#reject has its own definition, so this needs one too. + def compact_blank #:nodoc: + reject { |_k, v| v.blank? } + end + + # Removes all blank values from the +Hash+ in place and returns self. + # Uses Object#blank? for determining if a value is blank. + # + # h = { a: "", b: 1, c: nil, d: [], e: false, f: true } + # h.compact_blank! + # # => { b: 1, f: true } + def compact_blank! + # use delete_if rather than reject! because it always returns self even if nothing changed + delete_if { |_k, v| v.blank? } + end +end + +class Range #:nodoc: + # Optimize range sum to use arithmetic progression if a block is not given and + # we have a range of numeric values. + def sum(identity = nil) + if block_given? || !(first.is_a?(Integer) && last.is_a?(Integer)) + super + else + actual_last = exclude_end? ? (last - 1) : last + if actual_last >= first + sum = identity || 0 + sum + (actual_last - first + 1) * (actual_last + first) / 2 + else + identity || 0 + end + end + end +end + +# Using Refinements here in order not to expose our internal method +using Module.new { + refine Array do + alias :orig_sum :sum + end +} + +class Array #:nodoc: + # Array#sum was added in Ruby 2.4 but it only works with Numeric elements. + def sum(init = nil, &block) + if init.is_a?(Numeric) || first.is_a?(Numeric) + init ||= 0 + orig_sum(init, &block) + else + super + end + end + + # Removes all blank elements from the +Array+ in place and returns self. + # Uses Object#blank? for determining if an item is blank. + # + # a = [1, "", nil, 2, " ", [], {}, false, true] + # a.compact_blank! + # # => [1, 2, true] + def compact_blank! + # use delete_if rather than reject! because it always returns self even if nothing changed + delete_if(&:blank?) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file.rb new file mode 100644 index 0000000..64553bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/file/atomic" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file/atomic.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file/atomic.rb new file mode 100644 index 0000000..9deceb1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/file/atomic.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require "fileutils" + +class File + # Write to a file atomically. Useful for situations where you don't + # want other processes or threads to see half-written files. + # + # File.atomic_write('important.file') do |file| + # file.write('hello') + # end + # + # This method needs to create a temporary file. By default it will create it + # in the same directory as the destination file. If you don't like this + # behavior you can provide a different directory but it must be on the + # same physical filesystem as the file you're trying to write. + # + # File.atomic_write('/data/something.important', '/data/tmp') do |file| + # file.write('hello') + # end + def self.atomic_write(file_name, temp_dir = dirname(file_name)) + require "tempfile" unless defined?(Tempfile) + + Tempfile.open(".#{basename(file_name)}", temp_dir) do |temp_file| + temp_file.binmode + return_val = yield temp_file + temp_file.close + + old_stat = if exist?(file_name) + # Get original file permissions + stat(file_name) + else + # If not possible, probe which are the default permissions in the + # destination directory. + probe_stat_in(dirname(file_name)) + end + + if old_stat + # Set correct permissions on new file + begin + chown(old_stat.uid, old_stat.gid, temp_file.path) + # This operation will affect filesystem ACL's + chmod(old_stat.mode, temp_file.path) + rescue Errno::EPERM, Errno::EACCES + # Changing file ownership failed, moving on. + end + end + + # Overwrite original file with temp file + rename(temp_file.path, file_name) + return_val + end + end + + # Private utility method. + def self.probe_stat_in(dir) #:nodoc: + basename = [ + ".permissions_check", + Thread.current.object_id, + Process.pid, + rand(1000000) + ].join(".") + + file_name = join(dir, basename) + FileUtils.touch(file_name) + stat(file_name) + ensure + FileUtils.rm_f(file_name) if file_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash.rb new file mode 100644 index 0000000..2f0901d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/conversions" +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/deep_transform_values" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/indifferent_access" +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/hash/slice" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/conversions.rb new file mode 100644 index 0000000..2b5e484 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/conversions.rb @@ -0,0 +1,263 @@ +# frozen_string_literal: true + +require "active_support/xml_mini" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/object/try" +require "active_support/core_ext/array/wrap" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/string/inflections" + +class Hash + # Returns a string containing an XML representation of its receiver: + # + # { foo: 1, bar: 2 }.to_xml + # # => + # # + # # + # # 1 + # # 2 + # # + # + # To do so, the method loops over the pairs and builds nodes that depend on + # the _values_. Given a pair +key+, +value+: + # + # * If +value+ is a hash there's a recursive call with +key+ as :root. + # + # * If +value+ is an array there's a recursive call with +key+ as :root, + # and +key+ singularized as :children. + # + # * If +value+ is a callable object it must expect one or two arguments. Depending + # on the arity, the callable is invoked with the +options+ hash as first argument + # with +key+ as :root, and +key+ singularized as second argument. The + # callable can add nodes by using options[:builder]. + # + # {foo: lambda { |options, key| options[:builder].b(key) }}.to_xml + # # => "foo" + # + # * If +value+ responds to +to_xml+ the method is invoked with +key+ as :root. + # + # class Foo + # def to_xml(options) + # options[:builder].bar 'fooing!' + # end + # end + # + # { foo: Foo.new }.to_xml(skip_instruct: true) + # # => + # # + # # fooing! + # # + # + # * Otherwise, a node with +key+ as tag is created with a string representation of + # +value+ as text node. If +value+ is +nil+ an attribute "nil" set to "true" is added. + # Unless the option :skip_types exists and is true, an attribute "type" is + # added as well according to the following mapping: + # + # XML_TYPE_NAMES = { + # "Symbol" => "symbol", + # "Integer" => "integer", + # "BigDecimal" => "decimal", + # "Float" => "float", + # "TrueClass" => "boolean", + # "FalseClass" => "boolean", + # "Date" => "date", + # "DateTime" => "dateTime", + # "Time" => "dateTime" + # } + # + # By default the root node is "hash", but that's configurable via the :root option. + # + # The default XML builder is a fresh instance of Builder::XmlMarkup. You can + # configure your own builder with the :builder option. The method also accepts + # options like :dasherize and friends, they are forwarded to the builder. + def to_xml(options = {}) + require "active_support/builder" unless defined?(Builder::XmlMarkup) + + options = options.dup + options[:indent] ||= 2 + options[:root] ||= "hash" + options[:builder] ||= Builder::XmlMarkup.new(indent: options[:indent]) + + builder = options[:builder] + builder.instruct! unless options.delete(:skip_instruct) + + root = ActiveSupport::XmlMini.rename_key(options[:root].to_s, options) + + builder.tag!(root) do + each { |key, value| ActiveSupport::XmlMini.to_tag(key, value, options) } + yield builder if block_given? + end + end + + class << self + # Returns a Hash containing a collection of pairs when the key is the node name and the value is + # its content + # + # xml = <<-XML + # + # + # 1 + # 2 + # + # XML + # + # hash = Hash.from_xml(xml) + # # => {"hash"=>{"foo"=>1, "bar"=>2}} + # + # +DisallowedType+ is raised if the XML contains attributes with type="yaml" or + # type="symbol". Use Hash.from_trusted_xml to + # parse this XML. + # + # Custom +disallowed_types+ can also be passed in the form of an + # array. + # + # xml = <<-XML + # + # + # 1 + # "David" + # + # XML + # + # hash = Hash.from_xml(xml, ['integer']) + # # => ActiveSupport::XMLConverter::DisallowedType: Disallowed type attribute: "integer" + # + # Note that passing custom disallowed types will override the default types, + # which are Symbol and YAML. + def from_xml(xml, disallowed_types = nil) + ActiveSupport::XMLConverter.new(xml, disallowed_types).to_h + end + + # Builds a Hash from XML just like Hash.from_xml, but also allows Symbol and YAML. + def from_trusted_xml(xml) + from_xml xml, [] + end + end +end + +module ActiveSupport + class XMLConverter # :nodoc: + # Raised if the XML contains attributes with type="yaml" or + # type="symbol". Read Hash#from_xml for more details. + class DisallowedType < StandardError + def initialize(type) + super "Disallowed type attribute: #{type.inspect}" + end + end + + DISALLOWED_TYPES = %w(symbol yaml) + + def initialize(xml, disallowed_types = nil) + @xml = normalize_keys(XmlMini.parse(xml)) + @disallowed_types = disallowed_types || DISALLOWED_TYPES + end + + def to_h + deep_to_h(@xml) + end + + private + def normalize_keys(params) + case params + when Hash + Hash[params.map { |k, v| [k.to_s.tr("-", "_"), normalize_keys(v)] } ] + when Array + params.map { |v| normalize_keys(v) } + else + params + end + end + + def deep_to_h(value) + case value + when Hash + process_hash(value) + when Array + process_array(value) + when String + value + else + raise "can't typecast #{value.class.name} - #{value.inspect}" + end + end + + def process_hash(value) + if value.include?("type") && !value["type"].is_a?(Hash) && @disallowed_types.include?(value["type"]) + raise DisallowedType, value["type"] + end + + if become_array?(value) + _, entries = Array.wrap(value.detect { |k, v| not v.is_a?(String) }) + if entries.nil? || value["__content__"].try(:empty?) + [] + else + case entries + when Array + entries.collect { |v| deep_to_h(v) } + when Hash + [deep_to_h(entries)] + else + raise "can't typecast #{entries.inspect}" + end + end + elsif become_content?(value) + process_content(value) + + elsif become_empty_string?(value) + "" + elsif become_hash?(value) + xml_value = value.transform_values { |v| deep_to_h(v) } + + # Turn { files: { file: # } } into { files: # } so it is compatible with + # how multipart uploaded files from HTML appear + xml_value["file"].is_a?(StringIO) ? xml_value["file"] : xml_value + end + end + + def become_content?(value) + value["type"] == "file" || (value["__content__"] && (value.keys.size == 1 || value["__content__"].present?)) + end + + def become_array?(value) + value["type"] == "array" + end + + def become_empty_string?(value) + # { "string" => true } + # No tests fail when the second term is removed. + value["type"] == "string" && value["nil"] != "true" + end + + def become_hash?(value) + !nothing?(value) && !garbage?(value) + end + + def nothing?(value) + # blank or nil parsed values are represented by nil + value.blank? || value["nil"] == "true" + end + + def garbage?(value) + # If the type is the only element which makes it then + # this still makes the value nil, except if type is + # an XML node(where type['value'] is a Hash) + value["type"] && !value["type"].is_a?(::Hash) && value.size == 1 + end + + def process_content(value) + content = value["__content__"] + if parser = ActiveSupport::XmlMini::PARSING[value["type"]] + parser.arity == 1 ? parser.call(content) : parser.call(content, value) + else + content + end + end + + def process_array(value) + value.map! { |i| deep_to_h(i) } + value.length > 1 ? value : value.first + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_merge.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_merge.rb new file mode 100644 index 0000000..9bc50b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_merge.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with +self+ and +other_hash+ merged recursively. + # + # h1 = { a: true, b: { c: [1, 2, 3] } } + # h2 = { a: false, b: { x: [3, 4, 5] } } + # + # h1.deep_merge(h2) # => { a: false, b: { c: [1, 2, 3], x: [3, 4, 5] } } + # + # Like with Hash#merge in the standard library, a block can be provided + # to merge values: + # + # h1 = { a: 100, b: 200, c: { c1: 100 } } + # h2 = { b: 250, c: { c1: 200 } } + # h1.deep_merge(h2) { |key, this_val, other_val| this_val + other_val } + # # => { a: 100, b: 450, c: { c1: 300 } } + def deep_merge(other_hash, &block) + dup.deep_merge!(other_hash, &block) + end + + # Same as +deep_merge+, but modifies +self+. + def deep_merge!(other_hash, &block) + merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + this_val.deep_merge(other_val, &block) + elsif block_given? + block.call(key, this_val, other_val) + else + other_val + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_transform_values.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_transform_values.rb new file mode 100644 index 0000000..8ad85c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/deep_transform_values.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with all values converted by the block operation. + # This includes the values from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_transform_values{ |value| value.to_s.upcase } + # # => {person: {name: "ROB", age: "28"}} + def deep_transform_values(&block) + _deep_transform_values_in_object(self, &block) + end + + # Destructively converts all values by using the block operation. + # This includes the values from the root hash and from all + # nested hashes and arrays. + def deep_transform_values!(&block) + _deep_transform_values_in_object!(self, &block) + end + + private + # Support methods for deep transforming nested hashes and arrays. + def _deep_transform_values_in_object(object, &block) + case object + when Hash + object.transform_values { |value| _deep_transform_values_in_object(value, &block) } + when Array + object.map { |e| _deep_transform_values_in_object(e, &block) } + else + yield(object) + end + end + + def _deep_transform_values_in_object!(object, &block) + case object + when Hash + object.transform_values! { |value| _deep_transform_values_in_object!(value, &block) } + when Array + object.map! { |e| _deep_transform_values_in_object!(e, &block) } + else + yield(object) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/except.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/except.rb new file mode 100644 index 0000000..ec96929 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/except.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +class Hash + # Returns a hash that includes everything except given keys. + # hash = { a: true, b: false, c: nil } + # hash.except(:c) # => { a: true, b: false } + # hash.except(:a, :b) # => { c: nil } + # hash # => { a: true, b: false, c: nil } + # + # This is useful for limiting a set of parameters to everything but a few known toggles: + # @person.update(params[:person].except(:admin)) + def except(*keys) + slice(*self.keys - keys) + end unless method_defined?(:except) + + # Removes the given keys from hash and returns it. + # hash = { a: true, b: false, c: nil } + # hash.except!(:c) # => { a: true, b: false } + # hash # => { a: true, b: false } + def except!(*keys) + keys.each { |key| delete(key) } + self + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/indifferent_access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/indifferent_access.rb new file mode 100644 index 0000000..a38f33f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/indifferent_access.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "active_support/hash_with_indifferent_access" + +class Hash + # Returns an ActiveSupport::HashWithIndifferentAccess out of its receiver: + # + # { a: 1 }.with_indifferent_access['a'] # => 1 + def with_indifferent_access + ActiveSupport::HashWithIndifferentAccess.new(self) + end + + # Called when object is nested under an object that receives + # #with_indifferent_access. This method will be called on the current object + # by the enclosing object and is aliased to #with_indifferent_access by + # default. Subclasses of Hash may overwrite this method to return +self+ if + # converting to an ActiveSupport::HashWithIndifferentAccess would not be + # desirable. + # + # b = { b: 1 } + # { a: b }.with_indifferent_access['a'] # calls b.nested_under_indifferent_access + # # => {"b"=>1} + alias nested_under_indifferent_access with_indifferent_access +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/keys.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/keys.rb new file mode 100644 index 0000000..f2db61f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/keys.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +class Hash + # Returns a new hash with all keys converted to strings. + # + # hash = { name: 'Rob', age: '28' } + # + # hash.stringify_keys + # # => {"name"=>"Rob", "age"=>"28"} + def stringify_keys + transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. Same as + # +stringify_keys+, but modifies +self+. + def stringify_keys! + transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. + # + # hash = { 'name' => 'Rob', 'age' => '28' } + # + # hash.symbolize_keys + # # => {:name=>"Rob", :age=>"28"} + def symbolize_keys + transform_keys { |key| key.to_sym rescue key } + end + alias_method :to_options, :symbolize_keys + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. Same as +symbolize_keys+, but modifies +self+. + def symbolize_keys! + transform_keys! { |key| key.to_sym rescue key } + end + alias_method :to_options!, :symbolize_keys! + + # Validates all keys in a hash match *valid_keys, raising + # +ArgumentError+ on a mismatch. + # + # Note that keys are treated differently than HashWithIndifferentAccess, + # meaning that string and symbol keys will not match. + # + # { name: 'Rob', years: '28' }.assert_valid_keys(:name, :age) # => raises "ArgumentError: Unknown key: :years. Valid keys are: :name, :age" + # { name: 'Rob', age: '28' }.assert_valid_keys('name', 'age') # => raises "ArgumentError: Unknown key: :name. Valid keys are: 'name', 'age'" + # { name: 'Rob', age: '28' }.assert_valid_keys(:name, :age) # => passes, raises nothing + def assert_valid_keys(*valid_keys) + valid_keys.flatten! + each_key do |k| + unless valid_keys.include?(k) + raise ArgumentError.new("Unknown key: #{k.inspect}. Valid keys are: #{valid_keys.map(&:inspect).join(', ')}") + end + end + end + + # Returns a new hash with all keys converted by the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_transform_keys{ |key| key.to_s.upcase } + # # => {"PERSON"=>{"NAME"=>"Rob", "AGE"=>"28"}} + def deep_transform_keys(&block) + _deep_transform_keys_in_object(self, &block) + end + + # Destructively converts all keys by using the block operation. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_transform_keys!(&block) + _deep_transform_keys_in_object!(self, &block) + end + + # Returns a new hash with all keys converted to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + # + # hash = { person: { name: 'Rob', age: '28' } } + # + # hash.deep_stringify_keys + # # => {"person"=>{"name"=>"Rob", "age"=>"28"}} + def deep_stringify_keys + deep_transform_keys(&:to_s) + end + + # Destructively converts all keys to strings. + # This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_stringify_keys! + deep_transform_keys!(&:to_s) + end + + # Returns a new hash with all keys converted to symbols, as long as + # they respond to +to_sym+. This includes the keys from the root hash + # and from all nested hashes and arrays. + # + # hash = { 'person' => { 'name' => 'Rob', 'age' => '28' } } + # + # hash.deep_symbolize_keys + # # => {:person=>{:name=>"Rob", :age=>"28"}} + def deep_symbolize_keys + deep_transform_keys { |key| key.to_sym rescue key } + end + + # Destructively converts all keys to symbols, as long as they respond + # to +to_sym+. This includes the keys from the root hash and from all + # nested hashes and arrays. + def deep_symbolize_keys! + deep_transform_keys! { |key| key.to_sym rescue key } + end + + private + # Support methods for deep transforming nested hashes and arrays. + def _deep_transform_keys_in_object(object, &block) + case object + when Hash + object.each_with_object({}) do |(key, value), result| + result[yield(key)] = _deep_transform_keys_in_object(value, &block) + end + when Array + object.map { |e| _deep_transform_keys_in_object(e, &block) } + else + object + end + end + + def _deep_transform_keys_in_object!(object, &block) + case object + when Hash + object.keys.each do |key| + value = object.delete(key) + object[yield(key)] = _deep_transform_keys_in_object!(value, &block) + end + object + when Array + object.map! { |e| _deep_transform_keys_in_object!(e, &block) } + else + object + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/reverse_merge.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/reverse_merge.rb new file mode 100644 index 0000000..ef8d592 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/reverse_merge.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Hash + # Merges the caller into +other_hash+. For example, + # + # options = options.reverse_merge(size: 25, velocity: 10) + # + # is equivalent to + # + # options = { size: 25, velocity: 10 }.merge(options) + # + # This is particularly useful for initializing an options hash + # with default values. + def reverse_merge(other_hash) + other_hash.merge(self) + end + alias_method :with_defaults, :reverse_merge + + # Destructive +reverse_merge+. + def reverse_merge!(other_hash) + replace(reverse_merge(other_hash)) + end + alias_method :reverse_update, :reverse_merge! + alias_method :with_defaults!, :reverse_merge! +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/slice.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/slice.rb new file mode 100644 index 0000000..56bc5de --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/hash/slice.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +class Hash + # Replaces the hash with only the given keys. + # Returns a hash containing the removed key/value pairs. + # + # hash = { a: 1, b: 2, c: 3, d: 4 } + # hash.slice!(:a, :b) # => {:c=>3, :d=>4} + # hash # => {:a=>1, :b=>2} + def slice!(*keys) + omit = slice(*self.keys - keys) + hash = slice(*keys) + hash.default = default + hash.default_proc = default_proc if default_proc + replace(hash) + omit + end + + # Removes and returns the key/value pairs matching the given keys. + # + # hash = { a: 1, b: 2, c: 3, d: 4 } + # hash.extract!(:a, :b) # => {:a=>1, :b=>2} + # hash # => {:c=>3, :d=>4} + def extract!(*keys) + keys.each_with_object(self.class.new) { |key, result| result[key] = delete(key) if has_key?(key) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer.rb new file mode 100644 index 0000000..d227013 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support/core_ext/integer/multiple" +require "active_support/core_ext/integer/inflections" +require "active_support/core_ext/integer/time" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/inflections.rb new file mode 100644 index 0000000..aef3266 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/inflections.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require "active_support/inflector" + +class Integer + # Ordinalize turns a number into an ordinal string used to denote the + # position in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinalize # => "1st" + # 2.ordinalize # => "2nd" + # 1002.ordinalize # => "1002nd" + # 1003.ordinalize # => "1003rd" + # -11.ordinalize # => "-11th" + # -1001.ordinalize # => "-1001st" + def ordinalize + ActiveSupport::Inflector.ordinalize(self) + end + + # Ordinal returns the suffix used to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # 1.ordinal # => "st" + # 2.ordinal # => "nd" + # 1002.ordinal # => "nd" + # 1003.ordinal # => "rd" + # -11.ordinal # => "th" + # -1001.ordinal # => "st" + def ordinal + ActiveSupport::Inflector.ordinal(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/multiple.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/multiple.rb new file mode 100644 index 0000000..bd57a90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/multiple.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +class Integer + # Check whether the integer is evenly divisible by the argument. + # + # 0.multiple_of?(0) # => true + # 6.multiple_of?(5) # => false + # 10.multiple_of?(2) # => true + def multiple_of?(number) + number == 0 ? self == 0 : self % number == 0 + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/time.rb new file mode 100644 index 0000000..5efb89c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/integer/time.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/numeric/time" + +class Integer + # Returns a Duration instance matching the number of months provided. + # + # 2.months # => 2 months + def months + ActiveSupport::Duration.months(self) + end + alias :month :months + + # Returns a Duration instance matching the number of years provided. + # + # 2.years # => 2 years + def years + ActiveSupport::Duration.years(self) + end + alias :year :years +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel.rb new file mode 100644 index 0000000..7708069 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/concern" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/kernel/singleton_class" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/concern.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/concern.rb new file mode 100644 index 0000000..0b2baed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/concern.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/concerning" + +module Kernel + module_function + + # A shortcut to define a toplevel concern, not within a module. + # + # See Module::Concerning for more. + def concern(topic, &module_definition) + Object.concern topic, &module_definition + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/reporting.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/reporting.rb new file mode 100644 index 0000000..9155bd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/reporting.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Kernel + module_function + + # Sets $VERBOSE to +nil+ for the duration of the block and back to its original + # value afterwards. + # + # silence_warnings do + # value = noisy_call # no warning voiced + # end + # + # noisy_call # warning voiced + def silence_warnings + with_warnings(nil) { yield } + end + + # Sets $VERBOSE to +true+ for the duration of the block and back to its + # original value afterwards. + def enable_warnings + with_warnings(true) { yield } + end + + # Sets $VERBOSE for the duration of the block and back to its original + # value afterwards. + def with_warnings(flag) + old_verbose, $VERBOSE = $VERBOSE, flag + yield + ensure + $VERBOSE = old_verbose + end + + # Blocks and ignores any exception passed as argument if raised within the block. + # + # suppress(ZeroDivisionError) do + # 1/0 + # puts 'This code is NOT reached' + # end + # + # puts 'This code gets executed and nothing related to ZeroDivisionError was seen' + def suppress(*exception_classes) + yield + rescue *exception_classes + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/singleton_class.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/singleton_class.rb new file mode 100644 index 0000000..6715eba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/kernel/singleton_class.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module Kernel + # class_eval on an object acts like singleton_class.class_eval. + def class_eval(*args, &block) + singleton_class.class_eval(*args, &block) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/load_error.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/load_error.rb new file mode 100644 index 0000000..03df2dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/load_error.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +class LoadError + # Returns true if the given path name (except perhaps for the ".rb" + # extension) is the missing file which caused the exception to be raised. + def is_missing?(location) + location.delete_suffix(".rb") == path.to_s.delete_suffix(".rb") + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/marshal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/marshal.rb new file mode 100644 index 0000000..5ff0e34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/marshal.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/inflections" + +module ActiveSupport + module MarshalWithAutoloading # :nodoc: + def load(source, proc = nil) + super(source, proc) + rescue ArgumentError, NameError => exc + if exc.message.match(%r|undefined class/module (.+?)(?:::)?\z|) + # try loading the class/module + loaded = $1.constantize + + raise unless $1 == loaded.name + + # if it is an IO we need to go back to read the object + source.rewind if source.respond_to?(:rewind) + retry + else + raise exc + end + end + end +end + +Marshal.singleton_class.prepend(ActiveSupport::MarshalWithAutoloading) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module.rb new file mode 100644 index 0000000..542af98 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/attribute_accessors_per_thread" +require "active_support/core_ext/module/attr_internal" +require "active_support/core_ext/module/concerning" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/module/deprecation" +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/module/remove_method" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/aliasing.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/aliasing.rb new file mode 100644 index 0000000..6f64d11 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/aliasing.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class Module + # Allows you to make aliases for attributes, which includes + # getter, setter, and a predicate. + # + # class Content < ActiveRecord::Base + # # has a title attribute + # end + # + # class Email < Content + # alias_attribute :subject, :title + # end + # + # e = Email.find(1) + # e.title # => "Superstars" + # e.subject # => "Superstars" + # e.subject? # => true + # e.subject = "Megastars" + # e.title # => "Megastars" + def alias_attribute(new_name, old_name) + # The following reader methods use an explicit `self` receiver in order to + # support aliases that start with an uppercase letter. Otherwise, they would + # be resolved as constants instead. + module_eval <<-STR, __FILE__, __LINE__ + 1 + def #{new_name}; self.#{old_name}; end # def subject; self.title; end + def #{new_name}?; self.#{old_name}?; end # def subject?; self.title?; end + def #{new_name}=(v); self.#{old_name} = v; end # def subject=(v); self.title = v; end + STR + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/anonymous.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/anonymous.rb new file mode 100644 index 0000000..d1c86b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/anonymous.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Module + # A module may or may not have a name. + # + # module M; end + # M.name # => "M" + # + # m = Module.new + # m.name # => nil + # + # +anonymous?+ method returns true if module does not have a name, false otherwise: + # + # Module.new.anonymous? # => true + # + # module M; end + # M.anonymous? # => false + # + # A module gets a name when it is first assigned to a constant. Either + # via the +module+ or +class+ keyword or by an explicit assignment: + # + # m = Module.new # creates an anonymous module + # m.anonymous? # => true + # M = m # m gets a name here as a side-effect + # m.name # => "M" + # m.anonymous? # => false + def anonymous? + name.nil? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attr_internal.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attr_internal.rb new file mode 100644 index 0000000..3bd66ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attr_internal.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +class Module + # Declares an attribute reader backed by an internally-named instance variable. + def attr_internal_reader(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :reader) } + end + + # Declares an attribute writer backed by an internally-named instance variable. + def attr_internal_writer(*attrs) + attrs.each { |attr_name| attr_internal_define(attr_name, :writer) } + end + + # Declares an attribute reader and writer backed by an internally-named instance + # variable. + def attr_internal_accessor(*attrs) + attr_internal_reader(*attrs) + attr_internal_writer(*attrs) + end + alias_method :attr_internal, :attr_internal_accessor + + class << self; attr_accessor :attr_internal_naming_format end + self.attr_internal_naming_format = "@_%s" + + private + def attr_internal_ivar_name(attr) + Module.attr_internal_naming_format % attr + end + + def attr_internal_define(attr_name, type) + internal_name = attr_internal_ivar_name(attr_name).delete_prefix("@") + # use native attr_* methods as they are faster on some Ruby implementations + public_send("attr_#{type}", internal_name) + attr_name, internal_name = "#{attr_name}=", "#{internal_name}=" if type == :writer + alias_method attr_name, internal_name + remove_method internal_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors.rb new file mode 100644 index 0000000..1db905f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors.rb @@ -0,0 +1,206 @@ +# frozen_string_literal: true + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes. +class Module + # Defines a class attribute and creates a class and instance reader methods. + # The underlying class variable is set to +nil+, if it is not previously + # defined. All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_reader :hair_colors + # end + # + # HairColors.hair_colors # => nil + # HairColors.class_variable_set("@@hair_colors", [:brown, :black]) + # HairColors.hair_colors # => [:brown, :black] + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # To omit the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # module HairColors + # mattr_reader :hair_colors, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_reader :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + def mattr_reader(*syms, instance_reader: true, instance_accessor: true, default: nil, location: nil) + raise TypeError, "module attributes should be defined directly on class, not singleton" if singleton_class? + location ||= caller_locations(1, 1).first + + definition = [] + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + + definition << "def self.#{sym}; @@#{sym}; end" + + if instance_reader && instance_accessor + definition << "def #{sym}; @@#{sym}; end" + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + class_variable_set("@@#{sym}", sym_default_value) unless sym_default_value.nil? && class_variable_defined?("@@#{sym}") + end + + module_eval(definition.join(";"), location.path, location.lineno) + end + alias :cattr_reader :mattr_reader + + # Defines a class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. All class and instance methods created + # will be public, even if this method is called with a private or protected + # access modifier. + # + # module HairColors + # mattr_writer :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black] + # Person.class_variable_get("@@hair_colors") # => [:brown, :black] + # Person.new.hair_colors = [:blonde, :red] + # HairColors.class_variable_get("@@hair_colors") # => [:blonde, :red] + # + # To omit the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # module HairColors + # mattr_writer :hair_colors, instance_writer: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:blonde, :red] # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_writer :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_writer(*syms, instance_writer: true, instance_accessor: true, default: nil, location: nil) + raise TypeError, "module attributes should be defined directly on class, not singleton" if singleton_class? + location ||= caller_locations(1, 1).first + + definition = [] + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /\A[_A-Za-z]\w*\z/.match?(sym) + definition << "def self.#{sym}=(val); @@#{sym} = val; end" + + if instance_writer && instance_accessor + definition << "def #{sym}=(val); @@#{sym} = val; end" + end + + sym_default_value = (block_given? && default.nil?) ? yield : default + class_variable_set("@@#{sym}", sym_default_value) unless sym_default_value.nil? && class_variable_defined?("@@#{sym}") + end + + module_eval(definition.join(";"), location.path, location.lineno) + end + alias :cattr_writer :mattr_writer + + # Defines both class and instance accessors for class attributes. + # All class and instance methods created will be public, even if + # this method is called with a private or protected access modifier. + # + # module HairColors + # mattr_accessor :hair_colors + # end + # + # class Person + # include HairColors + # end + # + # HairColors.hair_colors = [:brown, :black, :blonde, :red] + # HairColors.hair_colors # => [:brown, :black, :blonde, :red] + # Person.new.hair_colors # => [:brown, :black, :blonde, :red] + # + # If a subclass changes the value then that would also change the value for + # parent class. Similarly if parent class changes the value then that would + # change the value of subclasses too. + # + # class Citizen < Person + # end + # + # Citizen.new.hair_colors << :blue + # Person.new.hair_colors # => [:brown, :black, :blonde, :red, :blue] + # + # To omit the instance writer method, pass instance_writer: false. + # To omit the instance reader method, pass instance_reader: false. + # + # module HairColors + # mattr_accessor :hair_colors, instance_writer: false, instance_reader: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # Or pass instance_accessor: false, to omit both instance methods. + # + # module HairColors + # mattr_accessor :hair_colors, instance_accessor: false + # end + # + # class Person + # include HairColors + # end + # + # Person.new.hair_colors = [:brown] # => NoMethodError + # Person.new.hair_colors # => NoMethodError + # + # You can set a default value for the attribute. + # + # module HairColors + # mattr_accessor :hair_colors, default: [:brown, :black, :blonde, :red] + # end + # + # class Person + # include HairColors + # end + # + # Person.class_variable_get("@@hair_colors") # => [:brown, :black, :blonde, :red] + def mattr_accessor(*syms, instance_reader: true, instance_writer: true, instance_accessor: true, default: nil, &blk) + location = caller_locations(1, 1).first + mattr_reader(*syms, instance_reader: instance_reader, instance_accessor: instance_accessor, default: default, location: location, &blk) + mattr_writer(*syms, instance_writer: instance_writer, instance_accessor: instance_accessor, default: default, location: location) + end + alias :cattr_accessor :mattr_accessor +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb new file mode 100644 index 0000000..ea40343 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/attribute_accessors_per_thread.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +# Extends the module object with class/module and instance accessors for +# class/module attributes, just like the native attr* accessors for instance +# attributes, but does so on a per-thread basis. +# +# So the values are scoped within the Thread.current space under the class name +# of the module. +class Module + # Defines a per-thread class attribute and creates class and instance reader methods. + # The underlying per-thread class variable is set to +nil+, if it is not previously defined. + # + # module Current + # thread_mattr_reader :user + # end + # + # Current.user # => nil + # Thread.current[:attr_Current_user] = "DHH" + # Current.user # => "DHH" + # + # The attribute name must be a valid method name in Ruby. + # + # module Foo + # thread_mattr_reader :"1_Badname" + # end + # # => NameError: invalid attribute name: 1_Badname + # + # To omit the instance reader method, pass + # instance_reader: false or instance_accessor: false. + # + # class Current + # thread_mattr_reader :user, instance_reader: false + # end + # + # Current.new.user # => NoMethodError + def thread_mattr_reader(*syms, instance_reader: true, instance_accessor: true, default: nil) # :nodoc: + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym} + Thread.current["attr_" + name + "_#{sym}"] + end + EOS + + if instance_reader && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym} + self.class.#{sym} + end + EOS + end + + Thread.current["attr_" + name + "_#{sym}"] = default unless default.nil? + end + end + alias :thread_cattr_reader :thread_mattr_reader + + # Defines a per-thread class attribute and creates a class and instance writer methods to + # allow assignment to the attribute. + # + # module Current + # thread_mattr_writer :user + # end + # + # Current.user = "DHH" + # Thread.current[:attr_Current_user] # => "DHH" + # + # To omit the instance writer method, pass + # instance_writer: false or instance_accessor: false. + # + # class Current + # thread_mattr_writer :user, instance_writer: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + def thread_mattr_writer(*syms, instance_writer: true, instance_accessor: true, default: nil) # :nodoc: + syms.each do |sym| + raise NameError.new("invalid attribute name: #{sym}") unless /^[_A-Za-z]\w*$/.match?(sym) + + # The following generated method concatenates `name` because we want it + # to work with inheritance via polymorphism. + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def self.#{sym}=(obj) + Thread.current["attr_" + name + "_#{sym}"] = obj + end + EOS + + if instance_writer && instance_accessor + class_eval(<<-EOS, __FILE__, __LINE__ + 1) + def #{sym}=(obj) + self.class.#{sym} = obj + end + EOS + end + + public_send("#{sym}=", default) unless default.nil? + end + end + alias :thread_cattr_writer :thread_mattr_writer + + # Defines both class and instance accessors for class attributes. + # + # class Account + # thread_mattr_accessor :user + # end + # + # Account.user = "DHH" + # Account.user # => "DHH" + # Account.new.user # => "DHH" + # + # If a subclass changes the value, the parent class' value is not changed. + # Similarly, if the parent class changes the value, the value of subclasses + # is not changed. + # + # class Customer < Account + # end + # + # Customer.user = "Rafael" + # Customer.user # => "Rafael" + # Account.user # => "DHH" + # + # To omit the instance writer method, pass instance_writer: false. + # To omit the instance reader method, pass instance_reader: false. + # + # class Current + # thread_mattr_accessor :user, instance_writer: false, instance_reader: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + # + # Or pass instance_accessor: false, to omit both instance methods. + # + # class Current + # thread_mattr_accessor :user, instance_accessor: false + # end + # + # Current.new.user = "DHH" # => NoMethodError + # Current.new.user # => NoMethodError + def thread_mattr_accessor(*syms, instance_reader: true, instance_writer: true, instance_accessor: true, default: nil) + thread_mattr_reader(*syms, instance_reader: instance_reader, instance_accessor: instance_accessor, default: default) + thread_mattr_writer(*syms, instance_writer: instance_writer, instance_accessor: instance_accessor) + end + alias :thread_cattr_accessor :thread_mattr_accessor +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/concerning.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/concerning.rb new file mode 100644 index 0000000..36f5f85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/concerning.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require "active_support/concern" + +class Module + # = Bite-sized separation of concerns + # + # We often find ourselves with a medium-sized chunk of behavior that we'd + # like to extract, but only mix in to a single class. + # + # Extracting a plain old Ruby object to encapsulate it and collaborate or + # delegate to the original object is often a good choice, but when there's + # no additional state to encapsulate or we're making DSL-style declarations + # about the parent class, introducing new collaborators can obfuscate rather + # than simplify. + # + # The typical route is to just dump everything in a monolithic class, perhaps + # with a comment, as a least-bad alternative. Using modules in separate files + # means tedious sifting to get a big-picture view. + # + # = Dissatisfying ways to separate small concerns + # + # == Using comments: + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # ## Event tracking + # has_many :events + # + # before_create :track_creation + # + # private + # def track_creation + # # ... + # end + # end + # + # == With an inline module: + # + # Noisy syntax. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # module EventTracking + # extend ActiveSupport::Concern + # + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # include EventTracking + # end + # + # == Mix-in noise exiled to its own file: + # + # Once our chunk of behavior starts pushing the scroll-to-understand-it + # boundary, we give in and move it to a separate file. At this size, the + # increased overhead can be a reasonable tradeoff even if it reduces our + # at-a-glance perception of how things work. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # include TodoEventTracking + # end + # + # = Introducing Module#concerning + # + # By quieting the mix-in noise, we arrive at a natural, low-ceremony way to + # separate bite-sized concerns. + # + # class Todo < ApplicationRecord + # # Other todo implementation + # # ... + # + # concerning :EventTracking do + # included do + # has_many :events + # before_create :track_creation + # end + # + # private + # def track_creation + # # ... + # end + # end + # end + # + # Todo.ancestors + # # => [Todo, Todo::EventTracking, ApplicationRecord, Object] + # + # This small step has some wonderful ripple effects. We can + # * grok the behavior of our class in one glance, + # * clean up monolithic junk-drawer classes by separating their concerns, and + # * stop leaning on protected/private for crude "this is internal stuff" modularity. + # + # === Prepending concerning + # + # concerning supports a prepend: true argument which will prepend the + # concern instead of using include for it. + module Concerning + # Define a new concern and mix it in. + def concerning(topic, prepend: false, &block) + method = prepend ? :prepend : :include + __send__(method, concern(topic, &block)) + end + + # A low-cruft shortcut to define a concern. + # + # concern :EventTracking do + # ... + # end + # + # is equivalent to + # + # module EventTracking + # extend ActiveSupport::Concern + # + # ... + # end + def concern(topic, &module_definition) + const_set topic, Module.new { + extend ::ActiveSupport::Concern + module_eval(&module_definition) + } + end + end + include Concerning +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/delegation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/delegation.rb new file mode 100644 index 0000000..a44a3e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/delegation.rb @@ -0,0 +1,330 @@ +# frozen_string_literal: true + +require "set" + +class Module + # Error generated by +delegate+ when a method is called on +nil+ and +allow_nil+ + # option is not used. + class DelegationError < NoMethodError; end + + RUBY_RESERVED_KEYWORDS = %w(alias and BEGIN begin break case class def defined? do + else elsif END end ensure false for if in module next nil not or redo rescue retry + return self super then true undef unless until when while yield) + DELEGATION_RESERVED_KEYWORDS = %w(_ arg args block) + DELEGATION_RESERVED_METHOD_NAMES = Set.new( + RUBY_RESERVED_KEYWORDS + DELEGATION_RESERVED_KEYWORDS + ).freeze + + # Provides a +delegate+ class method to easily expose contained objects' + # public methods as your own. + # + # ==== Options + # * :to - Specifies the target object name as a symbol or string + # * :prefix - Prefixes the new method with the target name or a custom prefix + # * :allow_nil - If set to true, prevents a +Module::DelegationError+ + # from being raised + # * :private - If set to true, changes method visibility to private + # + # The macro receives one or more method names (specified as symbols or + # strings) and the name of the target object via the :to option + # (also a symbol or string). + # + # Delegation is particularly useful with Active Record associations: + # + # class Greeter < ActiveRecord::Base + # def hello + # 'hello' + # end + # + # def goodbye + # 'goodbye' + # end + # end + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, to: :greeter + # end + # + # Foo.new.hello # => "hello" + # Foo.new.goodbye # => NoMethodError: undefined method `goodbye' for # + # + # Multiple delegates to the same target are allowed: + # + # class Foo < ActiveRecord::Base + # belongs_to :greeter + # delegate :hello, :goodbye, to: :greeter + # end + # + # Foo.new.goodbye # => "goodbye" + # + # Methods can be delegated to instance variables, class variables, or constants + # by providing them as a symbols: + # + # class Foo + # CONSTANT_ARRAY = [0,1,2,3] + # @@class_array = [4,5,6,7] + # + # def initialize + # @instance_array = [8,9,10,11] + # end + # delegate :sum, to: :CONSTANT_ARRAY + # delegate :min, to: :@@class_array + # delegate :max, to: :@instance_array + # end + # + # Foo.new.sum # => 6 + # Foo.new.min # => 4 + # Foo.new.max # => 11 + # + # It's also possible to delegate a method to the class by using +:class+: + # + # class Foo + # def self.hello + # "world" + # end + # + # delegate :hello, to: :class + # end + # + # Foo.new.hello # => "world" + # + # Delegates can optionally be prefixed using the :prefix option. If the value + # is true, the delegate methods are prefixed with the name of the object being + # delegated to. + # + # Person = Struct.new(:name, :address) + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: true + # end + # + # john_doe = Person.new('John Doe', 'Vimmersvej 13') + # invoice = Invoice.new(john_doe) + # invoice.client_name # => "John Doe" + # invoice.client_address # => "Vimmersvej 13" + # + # It is also possible to supply a custom prefix. + # + # class Invoice < Struct.new(:client) + # delegate :name, :address, to: :client, prefix: :customer + # end + # + # invoice = Invoice.new(john_doe) + # invoice.customer_name # => 'John Doe' + # invoice.customer_address # => 'Vimmersvej 13' + # + # The delegated methods are public by default. + # Pass private: true to change that. + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :first_name, to: :profile + # delegate :date_of_birth, to: :profile, private: true + # + # def age + # Date.today.year - date_of_birth.year + # end + # end + # + # User.new.first_name # => "Tomas" + # User.new.date_of_birth # => NoMethodError: private method `date_of_birth' called for # + # User.new.age # => 2 + # + # If the target is +nil+ and does not respond to the delegated method a + # +Module::DelegationError+ is raised. If you wish to instead return +nil+, + # use the :allow_nil option. + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile + # end + # + # User.new.age + # # => Module::DelegationError: User#age delegated to profile.age, but profile is nil + # + # But if not having a profile yet is fine and should not be an error + # condition: + # + # class User < ActiveRecord::Base + # has_one :profile + # delegate :age, to: :profile, allow_nil: true + # end + # + # User.new.age # nil + # + # Note that if the target is not +nil+ then the call is attempted regardless of the + # :allow_nil option, and thus an exception is still raised if said object + # does not respond to the method: + # + # class Foo + # def initialize(bar) + # @bar = bar + # end + # + # delegate :name, to: :@bar, allow_nil: true + # end + # + # Foo.new("Bar").name # raises NoMethodError: undefined method `name' + # + # The target method must be public, otherwise it will raise +NoMethodError+. + def delegate(*methods, to: nil, prefix: nil, allow_nil: nil, private: nil) + unless to + raise ArgumentError, "Delegation needs a target. Supply a keyword argument 'to' (e.g. delegate :hello, to: :greeter)." + end + + if prefix == true && /^[^a-z_]/.match?(to) + raise ArgumentError, "Can only automatically set the delegation prefix when delegating to a method." + end + + method_prefix = \ + if prefix + "#{prefix == true ? to : prefix}_" + else + "" + end + + location = caller_locations(1, 1).first + file, line = location.path, location.lineno + + to = to.to_s + to = "self.#{to}" if DELEGATION_RESERVED_METHOD_NAMES.include?(to) + + method_def = [] + method_names = [] + + methods.map do |method| + method_name = prefix ? "#{method_prefix}#{method}" : method + method_names << method_name.to_sym + + # Attribute writer methods only accept one argument. Makes sure []= + # methods still accept two arguments. + definition = if /[^\]]=$/.match?(method) + "arg" + elsif RUBY_VERSION >= "2.7" + "..." + else + "*args, &block" + end + + # The following generated method calls the target exactly once, storing + # the returned value in a dummy variable. + # + # Reason is twofold: On one hand doing less calls is in general better. + # On the other hand it could be that the target has side-effects, + # whereas conceptually, from the user point of view, the delegator should + # be doing one call. + if allow_nil + method = method.to_s + + method_def << + "def #{method_name}(#{definition})" << + " _ = #{to}" << + " if !_.nil? || nil.respond_to?(:#{method})" << + " _.#{method}(#{definition})" << + " end" << + "end" + else + method = method.to_s + method_name = method_name.to_s + + method_def << + "def #{method_name}(#{definition})" << + " _ = #{to}" << + " _.#{method}(#{definition})" << + "rescue NoMethodError => e" << + " if _.nil? && e.name == :#{method}" << + %( raise DelegationError, "#{self}##{method_name} delegated to #{to}.#{method}, but #{to} is nil: \#{self.inspect}") << + " else" << + " raise" << + " end" << + "end" + end + end + module_eval(method_def.join(";"), file, line) + private(*method_names) if private + method_names + end + + # When building decorators, a common pattern may emerge: + # + # class Partition + # def initialize(event) + # @event = event + # end + # + # def person + # detail.person || creator + # end + # + # private + # def respond_to_missing?(name, include_private = false) + # @event.respond_to?(name, include_private) + # end + # + # def method_missing(method, *args, &block) + # @event.send(method, *args, &block) + # end + # end + # + # With Module#delegate_missing_to, the above is condensed to: + # + # class Partition + # delegate_missing_to :@event + # + # def initialize(event) + # @event = event + # end + # + # def person + # detail.person || creator + # end + # end + # + # The target can be anything callable within the object, e.g. instance + # variables, methods, constants, etc. + # + # The delegated method must be public on the target, otherwise it will + # raise +DelegationError+. If you wish to instead return +nil+, + # use the :allow_nil option. + # + # The marshal_dump and _dump methods are exempt from + # delegation due to possible interference when calling + # Marshal.dump(object), should the delegation target method + # of object add or remove instance variables. + def delegate_missing_to(target, allow_nil: nil) + target = target.to_s + target = "self.#{target}" if DELEGATION_RESERVED_METHOD_NAMES.include?(target) + + module_eval <<-RUBY, __FILE__, __LINE__ + 1 + def respond_to_missing?(name, include_private = false) + # It may look like an oversight, but we deliberately do not pass + # +include_private+, because they do not get delegated. + + return false if name == :marshal_dump || name == :_dump + #{target}.respond_to?(name) || super + end + + def method_missing(method, *args, &block) + if #{target}.respond_to?(method) + #{target}.public_send(method, *args, &block) + else + begin + super + rescue NoMethodError + if #{target}.nil? + if #{allow_nil == true} + nil + else + raise DelegationError, "\#{method} delegated to #{target}, but #{target} is nil" + end + else + raise + end + end + end + end + ruby2_keywords(:method_missing) if respond_to?(:ruby2_keywords, true) + RUBY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/deprecation.rb new file mode 100644 index 0000000..71c42eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/deprecation.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class Module + # deprecate :foo + # deprecate bar: 'message' + # deprecate :foo, :bar, baz: 'warning!', qux: 'gone!' + # + # You can also use custom deprecator instance: + # + # deprecate :foo, deprecator: MyLib::Deprecator.new + # deprecate :foo, bar: "warning!", deprecator: MyLib::Deprecator.new + # + # \Custom deprecators must respond to deprecation_warning(deprecated_method_name, message, caller_backtrace) + # method where you can implement your custom warning behavior. + # + # class MyLib::Deprecator + # def deprecation_warning(deprecated_method_name, message, caller_backtrace = nil) + # message = "#{deprecated_method_name} is deprecated and will be removed from MyLibrary | #{message}" + # Kernel.warn message + # end + # end + def deprecate(*method_names) + ActiveSupport::Deprecation.deprecate_methods(self, *method_names) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/introspection.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/introspection.rb new file mode 100644 index 0000000..7cdcab5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/introspection.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/filters" +require "active_support/inflector" + +class Module + # Returns the name of the module containing this one. + # + # M::N.module_parent_name # => "M" + def module_parent_name + if defined?(@parent_name) + @parent_name + else + parent_name = name =~ /::[^:]+\z/ ? -$` : nil + @parent_name = parent_name unless frozen? + parent_name + end + end + + # Returns the module which contains this one according to its name. + # + # module M + # module N + # end + # end + # X = M::N + # + # M::N.module_parent # => M + # X.module_parent # => M + # + # The parent of top-level and anonymous modules is Object. + # + # M.module_parent # => Object + # Module.new.module_parent # => Object + def module_parent + module_parent_name ? ActiveSupport::Inflector.constantize(module_parent_name) : Object + end + + # Returns all the parents of this module according to its name, ordered from + # nested outwards. The receiver is not contained within the result. + # + # module M + # module N + # end + # end + # X = M::N + # + # M.module_parents # => [Object] + # M::N.module_parents # => [M, Object] + # X.module_parents # => [M, Object] + def module_parents + parents = [] + if module_parent_name + parts = module_parent_name.split("::") + until parts.empty? + parents << ActiveSupport::Inflector.constantize(parts * "::") + parts.pop + end + end + parents << Object unless parents.include? Object + parents + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/redefine_method.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/redefine_method.rb new file mode 100644 index 0000000..5bd8e6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/redefine_method.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +class Module + # Marks the named method as intended to be redefined, if it exists. + # Suppresses the Ruby method redefinition warning. Prefer + # #redefine_method where possible. + def silence_redefinition_of_method(method) + if method_defined?(method) || private_method_defined?(method) + # This suppresses the "method redefined" warning; the self-alias + # looks odd, but means we don't need to generate a unique name + alias_method method, method + end + end + + # Replaces the existing method definition, if there is one, with the passed + # block as its body. + def redefine_method(method, &block) + visibility = method_visibility(method) + silence_redefinition_of_method(method) + define_method(method, &block) + send(visibility, method) + end + + # Replaces the existing singleton method definition, if there is one, with + # the passed block as its body. + def redefine_singleton_method(method, &block) + singleton_class.redefine_method(method, &block) + end + + def method_visibility(method) # :nodoc: + case + when private_method_defined?(method) + :private + when protected_method_defined?(method) + :protected + else + :public + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/remove_method.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/remove_method.rb new file mode 100644 index 0000000..97eb5f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/module/remove_method.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" + +class Module + # Removes the named method, if it exists. + def remove_possible_method(method) + if method_defined?(method) || private_method_defined?(method) + undef_method(method) + end + end + + # Removes the named singleton method, if it exists. + def remove_possible_singleton_method(method) + singleton_class.remove_possible_method(method) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/name_error.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/name_error.rb new file mode 100644 index 0000000..15255d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/name_error.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +class NameError + # Extract the name of the missing constant from the exception message. + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name + # end + # # => "HelloWorld" + def missing_name + # Since ruby v2.3.0 `did_you_mean` gem is loaded by default. + # It extends NameError#message with spell corrections which are SLOW. + # We should use original_message message instead. + message = respond_to?(:original_message) ? original_message : self.message + return unless message.start_with?("uninitialized constant ") + + receiver = begin + self.receiver + rescue ArgumentError + nil + end + + if receiver == Object + name.to_s + elsif receiver + "#{real_mod_name(receiver)}::#{self.name}" + else + if match = message.match(/((::)?([A-Z]\w*)(::[A-Z]\w*)*)$/) + match[1] + end + end + end + + # Was this exception raised because the given name was missing? + # + # begin + # HelloWorld + # rescue NameError => e + # e.missing_name?("HelloWorld") + # end + # # => true + def missing_name?(name) + if name.is_a? Symbol + self.name == name + else + missing_name == name.to_s + end + end + + private + UNBOUND_METHOD_MODULE_NAME = Module.instance_method(:name) + private_constant :UNBOUND_METHOD_MODULE_NAME + + if UnboundMethod.method_defined?(:bind_call) + def real_mod_name(mod) + UNBOUND_METHOD_MODULE_NAME.bind_call(mod) + end + else + def real_mod_name(mod) + UNBOUND_METHOD_MODULE_NAME.bind(mod).call + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric.rb new file mode 100644 index 0000000..fe77847 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require "active_support/core_ext/numeric/bytes" +require "active_support/core_ext/numeric/time" +require "active_support/core_ext/numeric/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/bytes.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/bytes.rb new file mode 100644 index 0000000..b002eba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/bytes.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +class Numeric + KILOBYTE = 1024 + MEGABYTE = KILOBYTE * 1024 + GIGABYTE = MEGABYTE * 1024 + TERABYTE = GIGABYTE * 1024 + PETABYTE = TERABYTE * 1024 + EXABYTE = PETABYTE * 1024 + + # Enables the use of byte calculations and declarations, like 45.bytes + 2.6.megabytes + # + # 2.bytes # => 2 + def bytes + self + end + alias :byte :bytes + + # Returns the number of bytes equivalent to the kilobytes provided. + # + # 2.kilobytes # => 2048 + def kilobytes + self * KILOBYTE + end + alias :kilobyte :kilobytes + + # Returns the number of bytes equivalent to the megabytes provided. + # + # 2.megabytes # => 2_097_152 + def megabytes + self * MEGABYTE + end + alias :megabyte :megabytes + + # Returns the number of bytes equivalent to the gigabytes provided. + # + # 2.gigabytes # => 2_147_483_648 + def gigabytes + self * GIGABYTE + end + alias :gigabyte :gigabytes + + # Returns the number of bytes equivalent to the terabytes provided. + # + # 2.terabytes # => 2_199_023_255_552 + def terabytes + self * TERABYTE + end + alias :terabyte :terabytes + + # Returns the number of bytes equivalent to the petabytes provided. + # + # 2.petabytes # => 2_251_799_813_685_248 + def petabytes + self * PETABYTE + end + alias :petabyte :petabytes + + # Returns the number of bytes equivalent to the exabytes provided. + # + # 2.exabytes # => 2_305_843_009_213_693_952 + def exabytes + self * EXABYTE + end + alias :exabyte :exabytes +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/conversions.rb new file mode 100644 index 0000000..3e623e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/conversions.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/number_helper" + +module ActiveSupport + module NumericWithFormat + # Provides options for converting numbers into formatted strings. + # Options are provided for phone numbers, currency, percentage, + # precision, positional notation, file size and pretty printing. + # + # ==== Options + # + # For details on which formats use which options, see ActiveSupport::NumberHelper + # + # ==== Examples + # + # Phone Numbers: + # 5551234.to_s(:phone) # => "555-1234" + # 1235551234.to_s(:phone) # => "123-555-1234" + # 1235551234.to_s(:phone, area_code: true) # => "(123) 555-1234" + # 1235551234.to_s(:phone, delimiter: ' ') # => "123 555 1234" + # 1235551234.to_s(:phone, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # 1235551234.to_s(:phone, country_code: 1) # => "+1-123-555-1234" + # 1235551234.to_s(:phone, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # Currency: + # 1234567890.50.to_s(:currency) # => "$1,234,567,890.50" + # 1234567890.506.to_s(:currency) # => "$1,234,567,890.51" + # 1234567890.506.to_s(:currency, precision: 3) # => "$1,234,567,890.506" + # 1234567890.506.to_s(:currency, round_mode: :down) # => "$1,234,567,890.50" + # 1234567890.506.to_s(:currency, locale: :fr) # => "1 234 567 890,51 €" + # -1234567890.50.to_s(:currency, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # 1234567890.50.to_s(:currency, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + # + # Percentage: + # 100.to_s(:percentage) # => "100.000%" + # 100.to_s(:percentage, precision: 0) # => "100%" + # 1000.to_s(:percentage, delimiter: '.', separator: ',') # => "1.000,000%" + # 302.24398923423.to_s(:percentage, precision: 5) # => "302.24399%" + # 302.24398923423.to_s(:percentage, round_mode: :down) # => "302.243%" + # 1000.to_s(:percentage, locale: :fr) # => "1 000,000%" + # 100.to_s(:percentage, format: '%n %') # => "100.000 %" + # + # Delimited: + # 12345678.to_s(:delimited) # => "12,345,678" + # 12345678.05.to_s(:delimited) # => "12,345,678.05" + # 12345678.to_s(:delimited, delimiter: '.') # => "12.345.678" + # 12345678.to_s(:delimited, delimiter: ',') # => "12,345,678" + # 12345678.05.to_s(:delimited, separator: ' ') # => "12,345,678 05" + # 12345678.05.to_s(:delimited, locale: :fr) # => "12 345 678,05" + # 98765432.98.to_s(:delimited, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # + # Rounded: + # 111.2345.to_s(:rounded) # => "111.235" + # 111.2345.to_s(:rounded, precision: 2) # => "111.23" + # 111.2345.to_s(:rounded, precision: 2, round_mode: :up) # => "111.24" + # 13.to_s(:rounded, precision: 5) # => "13.00000" + # 389.32314.to_s(:rounded, precision: 0) # => "389" + # 111.2345.to_s(:rounded, significant: true) # => "111" + # 111.2345.to_s(:rounded, precision: 1, significant: true) # => "100" + # 13.to_s(:rounded, precision: 5, significant: true) # => "13.000" + # 111.234.to_s(:rounded, locale: :fr) # => "111,234" + # 13.to_s(:rounded, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # 389.32314.to_s(:rounded, precision: 4, significant: true) # => "389.3" + # 1111.2345.to_s(:rounded, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + # + # Human-friendly size in Bytes: + # 123.to_s(:human_size) # => "123 Bytes" + # 1234.to_s(:human_size) # => "1.21 KB" + # 12345.to_s(:human_size) # => "12.1 KB" + # 1234567.to_s(:human_size) # => "1.18 MB" + # 1234567890.to_s(:human_size) # => "1.15 GB" + # 1234567890123.to_s(:human_size) # => "1.12 TB" + # 1234567890123456.to_s(:human_size) # => "1.1 PB" + # 1234567890123456789.to_s(:human_size) # => "1.07 EB" + # 1234567.to_s(:human_size, precision: 2) # => "1.2 MB" + # 1234567.to_s(:human_size, precision: 2, round_mode: :up) # => "1.3 MB" + # 483989.to_s(:human_size, precision: 2) # => "470 KB" + # 1234567.to_s(:human_size, precision: 2, separator: ',') # => "1,2 MB" + # 1234567890123.to_s(:human_size, precision: 5) # => "1.1228 TB" + # 524288000.to_s(:human_size, precision: 5) # => "500 MB" + # + # Human-friendly format: + # 123.to_s(:human) # => "123" + # 1234.to_s(:human) # => "1.23 Thousand" + # 12345.to_s(:human) # => "12.3 Thousand" + # 1234567.to_s(:human) # => "1.23 Million" + # 1234567890.to_s(:human) # => "1.23 Billion" + # 1234567890123.to_s(:human) # => "1.23 Trillion" + # 1234567890123456.to_s(:human) # => "1.23 Quadrillion" + # 1234567890123456789.to_s(:human) # => "1230 Quadrillion" + # 489939.to_s(:human, precision: 2) # => "490 Thousand" + # 489939.to_s(:human, precision: 2, round_mode: :down) # => "480 Thousand" + # 489939.to_s(:human, precision: 4) # => "489.9 Thousand" + # 1234567.to_s(:human, precision: 4, + # significant: false) # => "1.2346 Million" + # 1234567.to_s(:human, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + def to_s(format = nil, options = nil) + case format + when nil + super() + when Integer, String + super(format) + when :phone + ActiveSupport::NumberHelper.number_to_phone(self, options || {}) + when :currency + ActiveSupport::NumberHelper.number_to_currency(self, options || {}) + when :percentage + ActiveSupport::NumberHelper.number_to_percentage(self, options || {}) + when :delimited + ActiveSupport::NumberHelper.number_to_delimited(self, options || {}) + when :rounded + ActiveSupport::NumberHelper.number_to_rounded(self, options || {}) + when :human + ActiveSupport::NumberHelper.number_to_human(self, options || {}) + when :human_size + ActiveSupport::NumberHelper.number_to_human_size(self, options || {}) + when Symbol + super() + else + super(format) + end + end + end +end + +Integer.prepend ActiveSupport::NumericWithFormat +Float.prepend ActiveSupport::NumericWithFormat +BigDecimal.prepend ActiveSupport::NumericWithFormat diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/time.rb new file mode 100644 index 0000000..bc4627f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/numeric/time.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/date/acts_like" + +class Numeric + # Returns a Duration instance matching the number of seconds provided. + # + # 2.seconds # => 2 seconds + def seconds + ActiveSupport::Duration.seconds(self) + end + alias :second :seconds + + # Returns a Duration instance matching the number of minutes provided. + # + # 2.minutes # => 2 minutes + def minutes + ActiveSupport::Duration.minutes(self) + end + alias :minute :minutes + + # Returns a Duration instance matching the number of hours provided. + # + # 2.hours # => 2 hours + def hours + ActiveSupport::Duration.hours(self) + end + alias :hour :hours + + # Returns a Duration instance matching the number of days provided. + # + # 2.days # => 2 days + def days + ActiveSupport::Duration.days(self) + end + alias :day :days + + # Returns a Duration instance matching the number of weeks provided. + # + # 2.weeks # => 2 weeks + def weeks + ActiveSupport::Duration.weeks(self) + end + alias :week :weeks + + # Returns a Duration instance matching the number of fortnights provided. + # + # 2.fortnights # => 4 weeks + def fortnights + ActiveSupport::Duration.weeks(self * 2) + end + alias :fortnight :fortnights + + # Returns the number of milliseconds equivalent to the seconds provided. + # Used with the standard time durations. + # + # 2.in_milliseconds # => 2000 + # 1.hour.in_milliseconds # => 3600000 + def in_milliseconds + self * 1000 + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object.rb new file mode 100644 index 0000000..efd34cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/object/duplicable" +require "active_support/core_ext/object/deep_dup" +require "active_support/core_ext/object/try" +require "active_support/core_ext/object/inclusion" + +require "active_support/core_ext/object/conversions" +require "active_support/core_ext/object/instance_variables" + +require "active_support/core_ext/object/json" +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/object/with_options" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/acts_like.rb new file mode 100644 index 0000000..403ee20 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/acts_like.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class Object + # A duck-type assistant method. For example, Active Support extends Date + # to define an acts_like_date? method, and extends Time to define + # acts_like_time?. As a result, we can do x.acts_like?(:time) and + # x.acts_like?(:date) to do duck-type-safe comparisons, since classes that + # we want to act like Time simply need to define an acts_like_time? method. + def acts_like?(duck) + case duck + when :time + respond_to? :acts_like_time? + when :date + respond_to? :acts_like_date? + when :string + respond_to? :acts_like_string? + else + respond_to? :"acts_like_#{duck}?" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/blank.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/blank.rb new file mode 100644 index 0000000..f36fef6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/blank.rb @@ -0,0 +1,155 @@ +# frozen_string_literal: true + +require "concurrent/map" + +class Object + # An object is blank if it's false, empty, or a whitespace string. + # For example, +nil+, '', ' ', [], {}, and +false+ are all blank. + # + # This simplifies + # + # !address || address.empty? + # + # to + # + # address.blank? + # + # @return [true, false] + def blank? + respond_to?(:empty?) ? !!empty? : !self + end + + # An object is present if it's not blank. + # + # @return [true, false] + def present? + !blank? + end + + # Returns the receiver if it's present otherwise returns +nil+. + # object.presence is equivalent to + # + # object.present? ? object : nil + # + # For example, something like + # + # state = params[:state] if params[:state].present? + # country = params[:country] if params[:country].present? + # region = state || country || 'US' + # + # becomes + # + # region = params[:state].presence || params[:country].presence || 'US' + # + # @return [Object] + def presence + self if present? + end +end + +class NilClass + # +nil+ is blank: + # + # nil.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class FalseClass + # +false+ is blank: + # + # false.blank? # => true + # + # @return [true] + def blank? + true + end +end + +class TrueClass + # +true+ is not blank: + # + # true.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Array + # An array is blank if it's empty: + # + # [].blank? # => true + # [1,2,3].blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class Hash + # A hash is blank if it's empty: + # + # {}.blank? # => true + # { key: 'value' }.blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? +end + +class String + BLANK_RE = /\A[[:space:]]*\z/ + ENCODED_BLANKS = Concurrent::Map.new do |h, enc| + h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) + end + + # A string is blank if it's empty or contains whitespaces only: + # + # ''.blank? # => true + # ' '.blank? # => true + # "\t\n\r".blank? # => true + # ' blah '.blank? # => false + # + # Unicode whitespace is supported: + # + # "\u00a0".blank? # => true + # + # @return [true, false] + def blank? + # The regexp that matches blank strings is expensive. For the case of empty + # strings we can speed up this method (~3.5x) with an empty? call. The + # penalty for the rest of strings is marginal. + empty? || + begin + BLANK_RE.match?(self) + rescue Encoding::CompatibilityError + ENCODED_BLANKS[self.encoding].match?(self) + end + end +end + +class Numeric #:nodoc: + # No number is blank: + # + # 1.blank? # => false + # 0.blank? # => false + # + # @return [false] + def blank? + false + end +end + +class Time #:nodoc: + # No Time is blank: + # + # Time.now.blank? # => false + # + # @return [false] + def blank? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/conversions.rb new file mode 100644 index 0000000..624fb8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/conversions.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_param" +require "active_support/core_ext/object/to_query" +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/hash/conversions" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/deep_dup.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/deep_dup.rb new file mode 100644 index 0000000..5c90391 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/deep_dup.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" + +class Object + # Returns a deep copy of object if it's duplicable. If it's + # not duplicable, returns +self+. + # + # object = Object.new + # dup = object.deep_dup + # dup.instance_variable_set(:@a, 1) + # + # object.instance_variable_defined?(:@a) # => false + # dup.instance_variable_defined?(:@a) # => true + def deep_dup + duplicable? ? dup : self + end +end + +class Array + # Returns a deep copy of array. + # + # array = [1, [2, 3]] + # dup = array.deep_dup + # dup[1][2] = 4 + # + # array[1][2] # => nil + # dup[1][2] # => 4 + def deep_dup + map(&:deep_dup) + end +end + +class Hash + # Returns a deep copy of hash. + # + # hash = { a: { b: 'b' } } + # dup = hash.deep_dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => nil + # dup[:a][:c] # => "c" + def deep_dup + hash = dup + each_pair do |key, value| + if (::String === key && key.frozen?) || ::Symbol === key + hash[key] = value.deep_dup + else + hash.delete(key) + hash[key.deep_dup] = value.deep_dup + end + end + hash + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/duplicable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/duplicable.rb new file mode 100644 index 0000000..3ebcdca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/duplicable.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +#-- +# Most objects are cloneable, but not all. For example you can't dup methods: +# +# method(:puts).dup # => TypeError: allocator undefined for Method +# +# Classes may signal their instances are not duplicable removing +dup+/+clone+ +# or raising exceptions from them. So, to dup an arbitrary object you normally +# use an optimistic approach and are ready to catch an exception, say: +# +# arbitrary_object.dup rescue object +# +# Rails dups objects in a few critical spots where they are not that arbitrary. +# That rescue is very expensive (like 40 times slower than a predicate), and it +# is often triggered. +# +# That's why we hardcode the following cases and check duplicable? instead of +# using that rescue idiom. +#++ +class Object + # Can you safely dup this object? + # + # False for method objects; + # true otherwise. + def duplicable? + true + end +end + +class Method + # Methods are not duplicable: + # + # method(:puts).duplicable? # => false + # method(:puts).dup # => TypeError: allocator undefined for Method + def duplicable? + false + end +end + +class UnboundMethod + # Unbound methods are not duplicable: + # + # method(:puts).unbind.duplicable? # => false + # method(:puts).unbind.dup # => TypeError: allocator undefined for UnboundMethod + def duplicable? + false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/inclusion.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/inclusion.rb new file mode 100644 index 0000000..6064e92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/inclusion.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +class Object + # Returns true if this object is included in the argument. Argument must be + # any object which responds to +#include?+. Usage: + # + # characters = ["Konata", "Kagami", "Tsukasa"] + # "Konata".in?(characters) # => true + # + # This will throw an +ArgumentError+ if the argument doesn't respond + # to +#include?+. + def in?(another_object) + another_object.include?(self) + rescue NoMethodError + raise ArgumentError.new("The parameter passed to #in? must respond to #include?") + end + + # Returns the receiver if it's included in the argument otherwise returns +nil+. + # Argument must be any object which responds to +#include?+. Usage: + # + # params[:bucket_type].presence_in %w( project calendar ) + # + # This will throw an +ArgumentError+ if the argument doesn't respond to +#include?+. + # + # @return [Object] + def presence_in(another_object) + in?(another_object) ? self : nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/instance_variables.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/instance_variables.rb new file mode 100644 index 0000000..12fdf84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/instance_variables.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +class Object + # Returns a hash with string keys that maps instance variable names without "@" to their + # corresponding values. + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_values # => {"x" => 0, "y" => 1} + def instance_values + Hash[instance_variables.map { |name| [name[1..-1], instance_variable_get(name)] }] + end + + # Returns an array of instance variable names as strings including "@". + # + # class C + # def initialize(x, y) + # @x, @y = x, y + # end + # end + # + # C.new(0, 1).instance_variable_names # => ["@y", "@x"] + def instance_variable_names + instance_variables.map(&:to_s) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/json.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/json.rb new file mode 100644 index 0000000..a65e273 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/json.rb @@ -0,0 +1,239 @@ +# frozen_string_literal: true + +# Hack to load json gem first so we can overwrite its to_json. +require "json" +require "bigdecimal" +require "ipaddr" +require "uri/generic" +require "pathname" +require "active_support/core_ext/big_decimal/conversions" # for #to_s +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +require "active_support/core_ext/object/instance_variables" +require "time" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/date_time/conversions" +require "active_support/core_ext/date/conversions" + +#-- +# The JSON gem adds a few modules to Ruby core classes containing :to_json definition, overwriting +# their default behavior. That said, we need to define the basic to_json method in all of them, +# otherwise they will always use to_json gem implementation, which is backwards incompatible in +# several cases (for instance, the JSON implementation for Hash does not work) with inheritance +# and consequently classes as ActiveSupport::OrderedHash cannot be serialized to json. +# +# On the other hand, we should avoid conflict with ::JSON.{generate,dump}(obj). Unfortunately, the +# JSON gem's encoder relies on its own to_json implementation to encode objects. Since it always +# passes a ::JSON::State object as the only argument to to_json, we can detect that and forward the +# calls to the original to_json method. +# +# It should be noted that when using ::JSON.{generate,dump} directly, ActiveSupport's encoder is +# bypassed completely. This means that as_json won't be invoked and the JSON gem will simply +# ignore any options it does not natively understand. This also means that ::JSON.{generate,dump} +# should give exactly the same results with or without active support. + +module ActiveSupport + module ToJsonWithActiveSupportEncoder # :nodoc: + def to_json(options = nil) + if options.is_a?(::JSON::State) + # Called from JSON.{generate,dump}, forward it to JSON gem's to_json + super(options) + else + # to_json is being invoked directly, use ActiveSupport's encoder + ActiveSupport::JSON.encode(self, options) + end + end + end +end + +[Enumerable, Object, Array, FalseClass, Float, Hash, Integer, NilClass, String, TrueClass].reverse_each do |klass| + klass.prepend(ActiveSupport::ToJsonWithActiveSupportEncoder) +end + +class Object + def as_json(options = nil) #:nodoc: + if respond_to?(:to_hash) + to_hash.as_json(options) + else + instance_values.as_json(options) + end + end +end + +class Struct #:nodoc: + def as_json(options = nil) + Hash[members.zip(values)].as_json(options) + end +end + +class TrueClass + def as_json(options = nil) #:nodoc: + self + end +end + +class FalseClass + def as_json(options = nil) #:nodoc: + self + end +end + +class NilClass + def as_json(options = nil) #:nodoc: + self + end +end + +class String + def as_json(options = nil) #:nodoc: + self + end +end + +class Symbol + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Numeric + def as_json(options = nil) #:nodoc: + self + end +end + +class Float + # Encoding Infinity or NaN to JSON should return "null". The default returns + # "Infinity" or "NaN" which are not valid JSON. + def as_json(options = nil) #:nodoc: + finite? ? self : nil + end +end + +class BigDecimal + # A BigDecimal would be naturally represented as a JSON number. Most libraries, + # however, parse non-integer JSON numbers directly as floats. Clients using + # those libraries would get in general a wrong number and no way to recover + # other than manually inspecting the string with the JSON code itself. + # + # That's why a JSON string is returned. The JSON literal is not numeric, but + # if the other end knows by contract that the data is supposed to be a + # BigDecimal, it still has the chance to post-process the string and get the + # real value. + def as_json(options = nil) #:nodoc: + finite? ? to_s : nil + end +end + +class Regexp + def as_json(options = nil) #:nodoc: + to_s + end +end + +module Enumerable + def as_json(options = nil) #:nodoc: + to_a.as_json(options) + end +end + +class IO + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Range + def as_json(options = nil) #:nodoc: + to_s + end +end + +class Array + def as_json(options = nil) #:nodoc: + map { |v| options ? v.as_json(options.dup) : v.as_json } + end +end + +class Hash + def as_json(options = nil) #:nodoc: + # create a subset of the hash by applying :only or :except + subset = if options + if attrs = options[:only] + slice(*Array(attrs)) + elsif attrs = options[:except] + except(*Array(attrs)) + else + self + end + else + self + end + + result = {} + subset.each do |k, v| + result[k.to_s] = options ? v.as_json(options.dup) : v.as_json + end + result + end +end + +class Time + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end +end + +class Date + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + strftime("%Y-%m-%d") + else + strftime("%Y/%m/%d") + end + end +end + +class DateTime + def as_json(options = nil) #:nodoc: + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + strftime("%Y/%m/%d %H:%M:%S %z") + end + end +end + +class URI::Generic #:nodoc: + def as_json(options = nil) + to_s + end +end + +class Pathname #:nodoc: + def as_json(options = nil) + to_s + end +end + +class IPAddr # :nodoc: + def as_json(options = nil) + to_s + end +end + +class Process::Status #:nodoc: + def as_json(options = nil) + { exitstatus: exitstatus, pid: pid } + end +end + +class Exception + def as_json(options = nil) + to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_param.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_param.rb new file mode 100644 index 0000000..6d2bdd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_param.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/to_query" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_query.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_query.rb new file mode 100644 index 0000000..bac6ff9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/to_query.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require "cgi" + +class Object + # Alias of to_s. + def to_param + to_s + end + + # Converts an object into a string suitable for use as a URL query string, + # using the given key as the param name. + def to_query(key) + "#{CGI.escape(key.to_param)}=#{CGI.escape(to_param.to_s)}" + end +end + +class NilClass + # Returns +self+. + def to_param + self + end +end + +class TrueClass + # Returns +self+. + def to_param + self + end +end + +class FalseClass + # Returns +self+. + def to_param + self + end +end + +class Array + # Calls to_param on all its elements and joins the result with + # slashes. This is used by url_for in Action Pack. + def to_param + collect(&:to_param).join "/" + end + + # Converts an array into a string suitable for use as a URL query string, + # using the given +key+ as the param name. + # + # ['Rails', 'coding'].to_query('hobbies') # => "hobbies%5B%5D=Rails&hobbies%5B%5D=coding" + def to_query(key) + prefix = "#{key}[]" + + if empty? + nil.to_query(prefix) + else + collect { |value| value.to_query(prefix) }.join "&" + end + end +end + +class Hash + # Returns a string representation of the receiver suitable for use as a URL + # query string: + # + # {name: 'David', nationality: 'Danish'}.to_query + # # => "name=David&nationality=Danish" + # + # An optional namespace can be passed to enclose key names: + # + # {name: 'David', nationality: 'Danish'}.to_query('user') + # # => "user%5Bname%5D=David&user%5Bnationality%5D=Danish" + # + # The string pairs "key=value" that conform the query string + # are sorted lexicographically in ascending order. + # + # This method is also aliased as +to_param+. + def to_query(namespace = nil) + query = collect do |key, value| + unless (value.is_a?(Hash) || value.is_a?(Array)) && value.empty? + value.to_query(namespace ? "#{namespace}[#{key}]" : key) + end + end.compact + + query.sort! unless namespace.to_s.include?("[]") + query.join("&") + end + + alias_method :to_param, :to_query +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/try.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/try.rb new file mode 100644 index 0000000..999141a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/try.rb @@ -0,0 +1,158 @@ +# frozen_string_literal: true + +require "delegate" + +module ActiveSupport + module Tryable #:nodoc: + def try(method_name = nil, *args, &b) + if method_name.nil? && block_given? + if b.arity == 0 + instance_eval(&b) + else + yield self + end + elsif respond_to?(method_name) + public_send(method_name, *args, &b) + end + end + ruby2_keywords(:try) if respond_to?(:ruby2_keywords, true) + + def try!(method_name = nil, *args, &b) + if method_name.nil? && block_given? + if b.arity == 0 + instance_eval(&b) + else + yield self + end + else + public_send(method_name, *args, &b) + end + end + ruby2_keywords(:try!) if respond_to?(:ruby2_keywords, true) + end +end + +class Object + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(*a, &b) + # + # Invokes the public method whose name goes as first argument just like + # +public_send+ does, except that if the receiver does not respond to it the + # call returns +nil+ rather than raising an exception. + # + # This method is defined to be able to write + # + # @person.try(:name) + # + # instead of + # + # @person.name if @person + # + # +try+ calls can be chained: + # + # @person.try(:spouse).try(:name) + # + # instead of + # + # @person.spouse.name if @person && @person.spouse + # + # +try+ will also return +nil+ if the receiver does not respond to the method: + # + # @person.try(:non_existing_method) # => nil + # + # instead of + # + # @person.non_existing_method if @person.respond_to?(:non_existing_method) # => nil + # + # +try+ returns +nil+ when called on +nil+ regardless of whether it responds + # to the method: + # + # nil.try(:to_i) # => nil, rather than 0 + # + # Arguments and blocks are forwarded to the method if invoked: + # + # @posts.try(:each_slice, 2) do |a, b| + # ... + # end + # + # The number of arguments in the signature must match. If the object responds + # to the method the call is attempted and +ArgumentError+ is still raised + # in case of argument mismatch. + # + # If +try+ is called without arguments it yields the receiver to a given + # block unless it is +nil+: + # + # @person.try do |p| + # ... + # end + # + # You can also call try with a block without accepting an argument, and the block + # will be instance_eval'ed instead: + # + # @person.try { upcase.truncate(50) } + # + # Please also note that +try+ is defined on +Object+. Therefore, it won't work + # with instances of classes that do not have +Object+ among their ancestors, + # like direct subclasses of +BasicObject+. + + ## + # :method: try! + # + # :call-seq: + # try!(*a, &b) + # + # Same as #try, but raises a +NoMethodError+ exception if the receiver is + # not +nil+ and does not implement the tried method. + # + # "a".try!(:upcase) # => "A" + # nil.try!(:upcase) # => nil + # 123.try!(:upcase) # => NoMethodError: undefined method `upcase' for 123:Integer +end + +class Delegator + include ActiveSupport::Tryable + + ## + # :method: try + # + # :call-seq: + # try(a*, &b) + # + # See Object#try + + ## + # :method: try! + # + # :call-seq: + # try!(a*, &b) + # + # See Object#try! +end + +class NilClass + # Calling +try+ on +nil+ always returns +nil+. + # It becomes especially helpful when navigating through associations that may return +nil+. + # + # nil.try(:name) # => nil + # + # Without +try+ + # @person && @person.children.any? && @person.children.first.name + # + # With +try+ + # @person.try(:children).try(:first).try(:name) + def try(_method_name = nil, *) + nil + end + + # Calling +try!+ on +nil+ always returns +nil+. + # + # nil.try!(:name) # => nil + def try!(_method_name = nil, *) + nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/with_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/with_options.rb new file mode 100644 index 0000000..1d46add --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/object/with_options.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require "active_support/option_merger" + +class Object + # An elegant way to factor duplication out of options passed to a series of + # method calls. Each method called in the block, with the block variable as + # the receiver, will have its options merged with the default +options+ hash + # provided. Each method called on the block variable must take an options + # hash as its final argument. + # + # Without with_options, this code contains duplication: + # + # class Account < ActiveRecord::Base + # has_many :customers, dependent: :destroy + # has_many :products, dependent: :destroy + # has_many :invoices, dependent: :destroy + # has_many :expenses, dependent: :destroy + # end + # + # Using with_options, we can remove the duplication: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do |assoc| + # assoc.has_many :customers + # assoc.has_many :products + # assoc.has_many :invoices + # assoc.has_many :expenses + # end + # end + # + # It can also be used with an explicit receiver: + # + # I18n.with_options locale: user.locale, scope: 'newsletter' do |i18n| + # subject i18n.t :subject + # body i18n.t :body, user_name: user.name + # end + # + # When you don't pass an explicit receiver, it executes the whole block + # in merging options context: + # + # class Account < ActiveRecord::Base + # with_options dependent: :destroy do + # has_many :customers + # has_many :products + # has_many :invoices + # has_many :expenses + # end + # end + # + # with_options can also be nested since the call is forwarded to its receiver. + # + # NOTE: Each nesting level will merge inherited defaults in addition to their own. + # + # class Post < ActiveRecord::Base + # with_options if: :persisted?, length: { minimum: 50 } do + # validates :content, if: -> { content.present? } + # end + # end + # + # The code is equivalent to: + # + # validates :content, length: { minimum: 50 }, if: -> { content.present? } + # + # Hence the inherited default for +if+ key is ignored. + # + # NOTE: You cannot call class methods implicitly inside of with_options. + # You can access these methods using the class name instead: + # + # class Phone < ActiveRecord::Base + # enum phone_number_type: { home: 0, office: 1, mobile: 2 } + # + # with_options presence: true do + # validates :phone_number_type, inclusion: { in: Phone.phone_number_types.keys } + # end + # end + # + def with_options(options, &block) + option_merger = ActiveSupport::OptionMerger.new(self, options) + block.arity.zero? ? option_merger.instance_eval(&block) : block.call(option_merger) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range.rb new file mode 100644 index 0000000..78814fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/conversions" +require "active_support/core_ext/range/compare_range" +require "active_support/core_ext/range/include_time_with_zone" +require "active_support/core_ext/range/overlaps" +require "active_support/core_ext/range/each" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/compare_range.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/compare_range.rb new file mode 100644 index 0000000..c6d5b77 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/compare_range.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +module ActiveSupport + module CompareWithRange + # Extends the default Range#=== to support range comparisons. + # (1..5) === (1..5) # => true + # (1..5) === (2..3) # => true + # (1..5) === (1...6) # => true + # (1..5) === (2..6) # => false + # + # The native Range#=== behavior is untouched. + # ('a'..'f') === ('c') # => true + # (5..9) === (11) # => false + # + # The given range must be fully bounded, with both start and end. + def ===(value) + if value.is_a?(::Range) + is_backwards_op = value.exclude_end? ? :>= : :> + return false if value.begin && value.end && value.begin.public_send(is_backwards_op, value.end) + # 1...10 includes 1..9 but it does not include 1..10. + # 1..10 includes 1...11 but it does not include 1...12. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + value_max = !exclude_end? && value.exclude_end? ? value.max : value.last + super(value.first) && (self.end.nil? || value_max.public_send(operator, last)) + else + super + end + end + + # Extends the default Range#include? to support range comparisons. + # (1..5).include?(1..5) # => true + # (1..5).include?(2..3) # => true + # (1..5).include?(1...6) # => true + # (1..5).include?(2..6) # => false + # + # The native Range#include? behavior is untouched. + # ('a'..'f').include?('c') # => true + # (5..9).include?(11) # => false + # + # The given range must be fully bounded, with both start and end. + def include?(value) + if value.is_a?(::Range) + is_backwards_op = value.exclude_end? ? :>= : :> + return false if value.begin && value.end && value.begin.public_send(is_backwards_op, value.end) + # 1...10 includes 1..9 but it does not include 1..10. + # 1..10 includes 1...11 but it does not include 1...12. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + value_max = !exclude_end? && value.exclude_end? ? value.max : value.last + super(value.first) && (self.end.nil? || value_max.public_send(operator, last)) + else + super + end + end + + # Extends the default Range#cover? to support range comparisons. + # (1..5).cover?(1..5) # => true + # (1..5).cover?(2..3) # => true + # (1..5).cover?(1...6) # => true + # (1..5).cover?(2..6) # => false + # + # The native Range#cover? behavior is untouched. + # ('a'..'f').cover?('c') # => true + # (5..9).cover?(11) # => false + # + # The given range must be fully bounded, with both start and end. + def cover?(value) + if value.is_a?(::Range) + is_backwards_op = value.exclude_end? ? :>= : :> + return false if value.begin && value.end && value.begin.public_send(is_backwards_op, value.end) + # 1...10 covers 1..9 but it does not cover 1..10. + # 1..10 covers 1...11 but it does not cover 1...12. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + value_max = !exclude_end? && value.exclude_end? ? value.max : value.last + super(value.first) && (self.end.nil? || value_max.public_send(operator, last)) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::CompareWithRange) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/conversions.rb new file mode 100644 index 0000000..024e32d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/conversions.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module ActiveSupport + module RangeWithFormat + RANGE_FORMATS = { + db: -> (start, stop) do + case start + when String then "BETWEEN '#{start}' AND '#{stop}'" + else + "BETWEEN '#{start.to_s(:db)}' AND '#{stop.to_s(:db)}'" + end + end + } + + # Convert range to a formatted string. See RANGE_FORMATS for predefined formats. + # + # range = (1..100) # => 1..100 + # + # range.to_s # => "1..100" + # range.to_s(:db) # => "BETWEEN '1' AND '100'" + # + # == Adding your own range formats to to_s + # You can add your own formats to the Range::RANGE_FORMATS hash. + # Use the format name as the hash key and a Proc instance. + # + # # config/initializers/range_formats.rb + # Range::RANGE_FORMATS[:short] = ->(start, stop) { "Between #{start.to_s(:db)} and #{stop.to_s(:db)}" } + def to_s(format = :default) + if formatter = RANGE_FORMATS[format] + formatter.call(first, last) + else + super() + end + end + + alias_method :to_default_s, :to_s + alias_method :to_formatted_s, :to_s + end +end + +Range.prepend(ActiveSupport::RangeWithFormat) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/each.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/each.rb new file mode 100644 index 0000000..2d86997 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/each.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module EachTimeWithZone #:nodoc: + def each(&block) + ensure_iteration_allowed + super + end + + def step(n = 1, &block) + ensure_iteration_allowed + super + end + + private + def ensure_iteration_allowed + raise TypeError, "can't iterate from #{first.class}" if first.is_a?(TimeWithZone) + end + end +end + +Range.prepend(ActiveSupport::EachTimeWithZone) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/include_time_with_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/include_time_with_zone.rb new file mode 100644 index 0000000..89e911b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/include_time_with_zone.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" +require "active_support/deprecation" + +module ActiveSupport + module IncludeTimeWithZone #:nodoc: + # Extends the default Range#include? to support ActiveSupport::TimeWithZone. + # + # (1.hour.ago..1.hour.from_now).include?(Time.current) # => true + # + def include?(value) + if self.begin.is_a?(TimeWithZone) || self.end.is_a?(TimeWithZone) + ActiveSupport::Deprecation.warn(<<-MSG.squish) + Using `Range#include?` to check the inclusion of a value in + a date time range is deprecated. + It is recommended to use `Range#cover?` instead of `Range#include?` to + check the inclusion of a value in a date time range. + MSG + cover?(value) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::IncludeTimeWithZone) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/overlaps.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/overlaps.rb new file mode 100644 index 0000000..f753607 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/range/overlaps.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +class Range + # Compare two ranges and see if they overlap each other + # (1..5).overlaps?(4..6) # => true + # (1..5).overlaps?(7..9) # => false + def overlaps?(other) + cover?(other.first) || other.cover?(first) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/regexp.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/regexp.rb new file mode 100644 index 0000000..15534ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/regexp.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +class Regexp + # Returns +true+ if the regexp has the multiline flag set. + # + # (/./).multiline? # => false + # (/./m).multiline? # => true + # + # Regexp.new(".").multiline? # => false + # Regexp.new(".", Regexp::MULTILINE).multiline? # => true + def multiline? + options & MULTILINE == MULTILINE + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/securerandom.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/securerandom.rb new file mode 100644 index 0000000..ef812f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/securerandom.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "securerandom" + +module SecureRandom + BASE58_ALPHABET = ("0".."9").to_a + ("A".."Z").to_a + ("a".."z").to_a - ["0", "O", "I", "l"] + BASE36_ALPHABET = ("0".."9").to_a + ("a".."z").to_a + + # SecureRandom.base58 generates a random base58 string. + # + # The argument _n_ specifies the length of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # + # The result may contain alphanumeric characters except 0, O, I and l. + # + # p SecureRandom.base58 # => "4kUgL2pdQMSCQtjE" + # p SecureRandom.base58(24) # => "77TMHrHJFvFDwodq8w7Ev2m7" + def self.base58(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(58) if idx >= 58 + BASE58_ALPHABET[idx] + end.join + end + + # SecureRandom.base36 generates a random base36 string in lowercase. + # + # The argument _n_ specifies the length of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # This method can be used over +base58+ if a deterministic case key is necessary. + # + # The result will contain alphanumeric characters in lowercase. + # + # p SecureRandom.base36 # => "4kugl2pdqmscqtje" + # p SecureRandom.base36(24) # => "77tmhrhjfvfdwodq8w7ev2m7" + def self.base36(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(36) if idx >= 36 + BASE36_ALPHABET[idx] + end.join + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string.rb new file mode 100644 index 0000000..757d15c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/filters" +require "active_support/core_ext/string/multibyte" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/string/output_safety" +require "active_support/core_ext/string/exclude" +require "active_support/core_ext/string/strip" +require "active_support/core_ext/string/inquiry" +require "active_support/core_ext/string/indent" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/access.rb new file mode 100644 index 0000000..f6a14c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/access.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +class String + # If you pass a single integer, returns a substring of one character at that + # position. The first character of the string is at position 0, the next at + # position 1, and so on. If a range is supplied, a substring containing + # characters at offsets given by the range is returned. In both cases, if an + # offset is negative, it is counted from the end of the string. Returns +nil+ + # if the initial offset falls outside the string. Returns an empty string if + # the beginning of the range is greater than the end of the string. + # + # str = "hello" + # str.at(0) # => "h" + # str.at(1..3) # => "ell" + # str.at(-2) # => "l" + # str.at(-2..-1) # => "lo" + # str.at(5) # => nil + # str.at(5..-1) # => "" + # + # If a Regexp is given, the matching portion of the string is returned. + # If a String is given, that given string is returned if it occurs in + # the string. In both cases, +nil+ is returned if there is no match. + # + # str = "hello" + # str.at(/lo/) # => "lo" + # str.at(/ol/) # => nil + # str.at("lo") # => "lo" + # str.at("ol") # => nil + def at(position) + self[position] + end + + # Returns a substring from the given position to the end of the string. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.from(0) # => "hello" + # str.from(3) # => "lo" + # str.from(-2) # => "lo" + # + # You can mix it with +to+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def from(position) + self[position, length] + end + + # Returns a substring from the beginning of the string to the given position. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.to(0) # => "h" + # str.to(3) # => "hell" + # str.to(-2) # => "hell" + # + # You can mix it with +from+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def to(position) + position += size if position < 0 + self[0, position + 1] || +"" + end + + # Returns the first character. If a limit is supplied, returns a substring + # from the beginning of the string until it reaches the limit value. If the + # given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.first # => "h" + # str.first(1) # => "h" + # str.first(2) # => "he" + # str.first(0) # => "" + # str.first(6) # => "hello" + def first(limit = 1) + self[0, limit] || raise(ArgumentError, "negative limit") + end + + # Returns the last character of the string. If a limit is supplied, returns a substring + # from the end of the string until it reaches the limit value (counting backwards). If + # the given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.last # => "o" + # str.last(1) # => "o" + # str.last(2) # => "lo" + # str.last(0) # => "" + # str.last(6) # => "hello" + def last(limit = 1) + self[[length - limit, 0].max, limit] || raise(ArgumentError, "negative limit") + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/behavior.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/behavior.rb new file mode 100644 index 0000000..35a5aa7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/behavior.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +class String + # Enables more predictable duck-typing on String-like classes. See Object#acts_like?. + def acts_like_string? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/conversions.rb new file mode 100644 index 0000000..e84a390 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/conversions.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/time/calculations" + +class String + # Converts a string to a Time value. + # The +form+ can be either :utc or :local (default :local). + # + # The time is parsed using Time.parse method. + # If +form+ is :local, then the time is in the system timezone. + # If the date part is missing then the current date is used and if + # the time part is missing then it is assumed to be 00:00:00. + # + # "13-12-2012".to_time # => 2012-12-13 00:00:00 +0100 + # "06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13 06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time(:utc) # => 2012-12-13 06:12:00 UTC + # "12/13/2012".to_time # => ArgumentError: argument out of range + # "1604326192".to_time # => ArgumentError: argument out of range + def to_time(form = :local) + parts = Date._parse(self, false) + used_keys = %i(year mon mday hour min sec sec_fraction offset) + return if (parts.keys & used_keys).empty? + + now = Time.now + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, form == :utc ? 0 : nil) + ) + + form == :utc ? time.utc : time.to_time + end + + # Converts a string to a Date value. + # + # "1-1-2012".to_date # => Sun, 01 Jan 2012 + # "01/01/2012".to_date # => Sun, 01 Jan 2012 + # "2012-12-13".to_date # => Thu, 13 Dec 2012 + # "12/13/2012".to_date # => ArgumentError: invalid date + def to_date + ::Date.parse(self, false) unless blank? + end + + # Converts a string to a DateTime value. + # + # "1-1-2012".to_datetime # => Sun, 01 Jan 2012 00:00:00 +0000 + # "01/01/2012 23:59:59".to_datetime # => Sun, 01 Jan 2012 23:59:59 +0000 + # "2012-12-13 12:50".to_datetime # => Thu, 13 Dec 2012 12:50:00 +0000 + # "12/13/2012".to_datetime # => ArgumentError: invalid date + def to_datetime + ::DateTime.parse(self, false) unless blank? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/exclude.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/exclude.rb new file mode 100644 index 0000000..8e46268 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/exclude.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class String + # The inverse of String#include?. Returns true if the string + # does not include the other string. + # + # "hello".exclude? "lo" # => false + # "hello".exclude? "ol" # => true + # "hello".exclude? ?h # => false + def exclude?(string) + !include?(string) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/filters.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/filters.rb new file mode 100644 index 0000000..7f28bd5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/filters.rb @@ -0,0 +1,145 @@ +# frozen_string_literal: true + +class String + # Returns the string, first removing all whitespace on both ends of + # the string, and then changing remaining consecutive whitespace + # groups into one space each. + # + # Note that it handles both ASCII and Unicode whitespace. + # + # %{ Multi-line + # string }.squish # => "Multi-line string" + # " foo bar \n \t boo".squish # => "foo bar boo" + def squish + dup.squish! + end + + # Performs a destructive squish. See String#squish. + # str = " foo bar \n \t boo" + # str.squish! # => "foo bar boo" + # str # => "foo bar boo" + def squish! + gsub!(/[[:space:]]+/, " ") + strip! + self + end + + # Returns a new string with all occurrences of the patterns removed. + # str = "foo bar test" + # str.remove(" test") # => "foo bar" + # str.remove(" test", /bar/) # => "foo " + # str # => "foo bar test" + def remove(*patterns) + dup.remove!(*patterns) + end + + # Alters the string by removing all occurrences of the patterns. + # str = "foo bar test" + # str.remove!(" test", /bar/) # => "foo " + # str # => "foo " + def remove!(*patterns) + patterns.each do |pattern| + gsub! pattern, "" + end + + self + end + + # Truncates a given +text+ after a given length if +text+ is longer than length: + # + # 'Once upon a time in a world far far away'.truncate(27) + # # => "Once upon a time in a wo..." + # + # Pass a string or regexp :separator to truncate +text+ at a natural break: + # + # 'Once upon a time in a world far far away'.truncate(27, separator: ' ') + # # => "Once upon a time in a..." + # + # 'Once upon a time in a world far far away'.truncate(27, separator: /\s/) + # # => "Once upon a time in a..." + # + # The last characters will be replaced with the :omission string (defaults to "...") + # for a total length not exceeding length: + # + # 'And they found that many people were sleeping better.'.truncate(25, omission: '... (continued)') + # # => "And they f... (continued)" + def truncate(truncate_at, options = {}) + return dup unless length > truncate_at + + omission = options[:omission] || "..." + length_with_room_for_omission = truncate_at - omission.length + stop = \ + if options[:separator] + rindex(options[:separator], length_with_room_for_omission) || length_with_room_for_omission + else + length_with_room_for_omission + end + + +"#{self[0, stop]}#{omission}" + end + + # Truncates +text+ to at most bytesize bytes in length without + # breaking string encoding by splitting multibyte characters or breaking + # grapheme clusters ("perceptual characters") by truncating at combining + # characters. + # + # >> "🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪".size + # => 20 + # >> "🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪".bytesize + # => 80 + # >> "🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪🔪".truncate_bytes(20) + # => "🔪🔪🔪🔪…" + # + # The truncated text ends with the :omission string, defaulting + # to "…", for a total length not exceeding bytesize. + def truncate_bytes(truncate_at, omission: "…") + omission ||= "" + + case + when bytesize <= truncate_at + dup + when omission.bytesize > truncate_at + raise ArgumentError, "Omission #{omission.inspect} is #{omission.bytesize}, larger than the truncation length of #{truncate_at} bytes" + when omission.bytesize == truncate_at + omission.dup + else + self.class.new.tap do |cut| + cut_at = truncate_at - omission.bytesize + + scan(/\X/) do |grapheme| + if cut.bytesize + grapheme.bytesize <= cut_at + cut << grapheme + else + break + end + end + + cut << omission + end + end + end + + # Truncates a given +text+ after a given number of words (words_count): + # + # 'Once upon a time in a world far far away'.truncate_words(4) + # # => "Once upon a time..." + # + # Pass a string or regexp :separator to specify a different separator of words: + # + # 'Once
upon
a
time
in
a
world'.truncate_words(5, separator: '
') + # # => "Once
upon
a
time
in..." + # + # The last characters will be replaced with the :omission string (defaults to "..."): + # + # 'And they found that many people were sleeping better.'.truncate_words(5, omission: '... (continued)') + # # => "And they found that many... (continued)" + def truncate_words(words_count, options = {}) + sep = options[:separator] || /\s+/ + sep = Regexp.escape(sep.to_s) unless Regexp === sep + if self =~ /\A((?>.+?#{sep}){#{words_count - 1}}.+?)#{sep}.*/m + $1 + (options[:omission] || "...") + else + dup + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/indent.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/indent.rb new file mode 100644 index 0000000..af9d181 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/indent.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +class String + # Same as +indent+, except it indents the receiver in-place. + # + # Returns the indented string, or +nil+ if there was nothing to indent. + def indent!(amount, indent_string = nil, indent_empty_lines = false) + indent_string = indent_string || self[/^[ \t]/] || " " + re = indent_empty_lines ? /^/ : /^(?!$)/ + gsub!(re, indent_string * amount) + end + + # Indents the lines in the receiver: + # + # < + # def some_method + # some_code + # end + # + # The second argument, +indent_string+, specifies which indent string to + # use. The default is +nil+, which tells the method to make a guess by + # peeking at the first indented line, and fallback to a space if there is + # none. + # + # " foo".indent(2) # => " foo" + # "foo\n\t\tbar".indent(2) # => "\t\tfoo\n\t\t\t\tbar" + # "foo".indent(2, "\t") # => "\t\tfoo" + # + # While +indent_string+ is typically one space or tab, it may be any string. + # + # The third argument, +indent_empty_lines+, is a flag that says whether + # empty lines should be indented. Default is false. + # + # "foo\n\nbar".indent(2) # => " foo\n\n bar" + # "foo\n\nbar".indent(2, nil, true) # => " foo\n \n bar" + # + def indent(amount, indent_string = nil, indent_empty_lines = false) + dup.tap { |_| _.indent!(amount, indent_string, indent_empty_lines) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inflections.rb new file mode 100644 index 0000000..9bf465b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inflections.rb @@ -0,0 +1,293 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/inflector/transliterate" + +# String inflections define new methods on the String class to transform names for different purposes. +# For instance, you can figure out the name of a table from the name of a class. +# +# 'ScaleScore'.tableize # => "scale_scores" +# +class String + # Returns the plural form of the word in the string. + # + # If the optional parameter +count+ is specified, + # the singular form will be returned if count == 1. + # For any other value of +count+ the plural will be returned. + # + # If the optional parameter +locale+ is specified, + # the word will be pluralized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'post'.pluralize # => "posts" + # 'octopus'.pluralize # => "octopi" + # 'sheep'.pluralize # => "sheep" + # 'words'.pluralize # => "words" + # 'the blue mailman'.pluralize # => "the blue mailmen" + # 'CamelOctopus'.pluralize # => "CamelOctopi" + # 'apple'.pluralize(1) # => "apple" + # 'apple'.pluralize(2) # => "apples" + # 'ley'.pluralize(:es) # => "leyes" + # 'ley'.pluralize(1, :es) # => "ley" + # + # See ActiveSupport::Inflector.pluralize. + def pluralize(count = nil, locale = :en) + locale = count if count.is_a?(Symbol) + if count == 1 + dup + else + ActiveSupport::Inflector.pluralize(self, locale) + end + end + + # The reverse of +pluralize+, returns the singular form of a word in a string. + # + # If the optional parameter +locale+ is specified, + # the word will be singularized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'posts'.singularize # => "post" + # 'octopi'.singularize # => "octopus" + # 'sheep'.singularize # => "sheep" + # 'word'.singularize # => "word" + # 'the blue mailmen'.singularize # => "the blue mailman" + # 'CamelOctopi'.singularize # => "CamelOctopus" + # 'leyes'.singularize(:es) # => "ley" + # + # See ActiveSupport::Inflector.singularize. + def singularize(locale = :en) + ActiveSupport::Inflector.singularize(self, locale) + end + + # +constantize+ tries to find a declared constant with the name specified + # in the string. It raises a NameError when the name is not in CamelCase + # or is not initialized. + # + # 'Module'.constantize # => Module + # 'Class'.constantize # => Class + # 'blargle'.constantize # => NameError: wrong constant name blargle + # + # See ActiveSupport::Inflector.constantize. + def constantize + ActiveSupport::Inflector.constantize(self) + end + + # +safe_constantize+ tries to find a declared constant with the name specified + # in the string. It returns +nil+ when the name is not in CamelCase + # or is not initialized. + # + # 'Module'.safe_constantize # => Module + # 'Class'.safe_constantize # => Class + # 'blargle'.safe_constantize # => nil + # + # See ActiveSupport::Inflector.safe_constantize. + def safe_constantize + ActiveSupport::Inflector.safe_constantize(self) + end + + # By default, +camelize+ converts strings to UpperCamelCase. If the argument to camelize + # is set to :lower then camelize produces lowerCamelCase. + # + # +camelize+ will also convert '/' to '::' which is useful for converting paths to namespaces. + # + # 'active_record'.camelize # => "ActiveRecord" + # 'active_record'.camelize(:lower) # => "activeRecord" + # 'active_record/errors'.camelize # => "ActiveRecord::Errors" + # 'active_record/errors'.camelize(:lower) # => "activeRecord::Errors" + # + # +camelize+ is also aliased as +camelcase+. + # + # See ActiveSupport::Inflector.camelize. + def camelize(first_letter = :upper) + case first_letter + when :upper + ActiveSupport::Inflector.camelize(self, true) + when :lower + ActiveSupport::Inflector.camelize(self, false) + else + raise ArgumentError, "Invalid option, use either :upper or :lower." + end + end + alias_method :camelcase, :camelize + + # Capitalizes all the words and replaces some characters in the string to create + # a nicer looking title. +titleize+ is meant for creating pretty output. It is not + # used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'man from the boondocks'.titleize # => "Man From The Boondocks" + # 'x-men: the last stand'.titleize # => "X Men: The Last Stand" + # 'string_ending_with_id'.titleize(keep_id_suffix: true) # => "String Ending With Id" + # + # +titleize+ is also aliased as +titlecase+. + # + # See ActiveSupport::Inflector.titleize. + def titleize(keep_id_suffix: false) + ActiveSupport::Inflector.titleize(self, keep_id_suffix: keep_id_suffix) + end + alias_method :titlecase, :titleize + + # The reverse of +camelize+. Makes an underscored, lowercase form from the expression in the string. + # + # +underscore+ will also change '::' to '/' to convert namespaces to paths. + # + # 'ActiveModel'.underscore # => "active_model" + # 'ActiveModel::Errors'.underscore # => "active_model/errors" + # + # See ActiveSupport::Inflector.underscore. + def underscore + ActiveSupport::Inflector.underscore(self) + end + + # Replaces underscores with dashes in the string. + # + # 'puni_puni'.dasherize # => "puni-puni" + # + # See ActiveSupport::Inflector.dasherize. + def dasherize + ActiveSupport::Inflector.dasherize(self) + end + + # Removes the module part from the constant expression in the string. + # + # 'ActiveSupport::Inflector::Inflections'.demodulize # => "Inflections" + # 'Inflections'.demodulize # => "Inflections" + # '::Inflections'.demodulize # => "Inflections" + # ''.demodulize # => '' + # + # See ActiveSupport::Inflector.demodulize. + # + # See also +deconstantize+. + def demodulize + ActiveSupport::Inflector.demodulize(self) + end + + # Removes the rightmost segment from the constant expression in the string. + # + # 'Net::HTTP'.deconstantize # => "Net" + # '::Net::HTTP'.deconstantize # => "::Net" + # 'String'.deconstantize # => "" + # '::String'.deconstantize # => "" + # ''.deconstantize # => "" + # + # See ActiveSupport::Inflector.deconstantize. + # + # See also +demodulize+. + def deconstantize + ActiveSupport::Inflector.deconstantize(self) + end + + # Replaces special characters in a string so that it may be used as part of a 'pretty' URL. + # + # If the optional parameter +locale+ is specified, + # the word will be parameterized as a word of that language. + # By default, this parameter is set to nil and it will use + # the configured I18n.locale. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize(preserve_case: true)}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # See ActiveSupport::Inflector.parameterize. + def parameterize(separator: "-", preserve_case: false, locale: nil) + ActiveSupport::Inflector.parameterize(self, separator: separator, preserve_case: preserve_case, locale: locale) + end + + # Creates the name of a table like Rails does for models to table names. This method + # uses the +pluralize+ method on the last word in the string. + # + # 'RawScaledScorer'.tableize # => "raw_scaled_scorers" + # 'ham_and_egg'.tableize # => "ham_and_eggs" + # 'fancyCategory'.tableize # => "fancy_categories" + # + # See ActiveSupport::Inflector.tableize. + def tableize + ActiveSupport::Inflector.tableize(self) + end + + # Creates a class name from a plural table name like Rails does for table names to models. + # Note that this returns a string and not a class. (To convert to an actual class + # follow +classify+ with +constantize+.) + # + # 'ham_and_eggs'.classify # => "HamAndEgg" + # 'posts'.classify # => "Post" + # + # See ActiveSupport::Inflector.classify. + def classify + ActiveSupport::Inflector.classify(self) + end + + # Capitalizes the first word, turns underscores into spaces, and (by default)strips a + # trailing '_id' if present. + # Like +titleize+, this is meant for creating pretty output. + # + # The capitalization of the first word can be turned off by setting the + # optional parameter +capitalize+ to false. + # By default, this parameter is true. + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'employee_salary'.humanize # => "Employee salary" + # 'author_id'.humanize # => "Author" + # 'author_id'.humanize(capitalize: false) # => "author" + # '_id'.humanize # => "Id" + # 'author_id'.humanize(keep_id_suffix: true) # => "Author Id" + # + # See ActiveSupport::Inflector.humanize. + def humanize(capitalize: true, keep_id_suffix: false) + ActiveSupport::Inflector.humanize(self, capitalize: capitalize, keep_id_suffix: keep_id_suffix) + end + + # Converts just the first character to uppercase. + # + # 'what a Lovely Day'.upcase_first # => "What a Lovely Day" + # 'w'.upcase_first # => "W" + # ''.upcase_first # => "" + # + # See ActiveSupport::Inflector.upcase_first. + def upcase_first + ActiveSupport::Inflector.upcase_first(self) + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # 'Message'.foreign_key # => "message_id" + # 'Message'.foreign_key(false) # => "messageid" + # 'Admin::Post'.foreign_key # => "post_id" + # + # See ActiveSupport::Inflector.foreign_key. + def foreign_key(separate_class_name_and_id_with_underscore = true) + ActiveSupport::Inflector.foreign_key(self, separate_class_name_and_id_with_underscore) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inquiry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inquiry.rb new file mode 100644 index 0000000..d78ad9b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/inquiry.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" +require "active_support/environment_inquirer" + +class String + # Wraps the current string in the ActiveSupport::StringInquirer class, + # which gives you a prettier way to test for equality. + # + # env = 'production'.inquiry + # env.production? # => true + # env.development? # => false + def inquiry + ActiveSupport::StringInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/multibyte.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/multibyte.rb new file mode 100644 index 0000000..0542121 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/multibyte.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +require "active_support/multibyte" + +class String + # == Multibyte proxy + # + # +mb_chars+ is a multibyte safe proxy for string methods. + # + # It creates and returns an instance of the ActiveSupport::Multibyte::Chars class which + # encapsulates the original string. A Unicode safe version of all the String methods are defined on this proxy + # class. If the proxy class doesn't respond to a certain method, it's forwarded to the encapsulated string. + # + # >> "lj".mb_chars.upcase.to_s + # => "LJ" + # + # NOTE: Ruby 2.4 and later support native Unicode case mappings: + # + # >> "lj".upcase + # => "LJ" + # + # == Method chaining + # + # All the methods on the Chars proxy which normally return a string will return a Chars object. This allows + # method chaining on the result of any of these methods. + # + # name.mb_chars.reverse.length # => 12 + # + # == Interoperability and configuration + # + # The Chars object tries to be as interchangeable with String objects as possible: sorting and comparing between + # String and Char work like expected. The bang! methods change the internal string representation in the Chars + # object. Interoperability problems can be resolved easily with a +to_s+ call. + # + # For more information about the methods defined on the Chars proxy see ActiveSupport::Multibyte::Chars. For + # information about how to change the default Multibyte behavior see ActiveSupport::Multibyte. + def mb_chars + ActiveSupport::Multibyte.proxy_class.new(self) + end + + # Returns +true+ if string has utf_8 encoding. + # + # utf_8_str = "some string".encode "UTF-8" + # iso_str = "some string".encode "ISO-8859-1" + # + # utf_8_str.is_utf8? # => true + # iso_str.is_utf8? # => false + def is_utf8? + case encoding + when Encoding::UTF_8, Encoding::US_ASCII + valid_encoding? + when Encoding::ASCII_8BIT + dup.force_encoding(Encoding::UTF_8).valid_encoding? + else + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/output_safety.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/output_safety.rb new file mode 100644 index 0000000..b01cd78 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/output_safety.rb @@ -0,0 +1,315 @@ +# frozen_string_literal: true + +require "erb" +require "active_support/core_ext/module/redefine_method" +require "active_support/multibyte/unicode" + +class ERB + module Util + HTML_ESCAPE = { "&" => "&", ">" => ">", "<" => "<", '"' => """, "'" => "'" } + JSON_ESCAPE = { "&" => '\u0026', ">" => '\u003e', "<" => '\u003c', "\u2028" => '\u2028', "\u2029" => '\u2029' } + HTML_ESCAPE_ONCE_REGEXP = /["><']|&(?!([a-zA-Z]+|(#\d+)|(#[xX][\dA-Fa-f]+));)/ + JSON_ESCAPE_REGEXP = /[\u2028\u2029&><]/u + + # A utility method for escaping HTML tag characters. + # This method is also aliased as h. + # + # puts html_escape('is a > 0 & a < 10?') + # # => is a > 0 & a < 10? + def html_escape(s) + unwrapped_html_escape(s).html_safe + end + + silence_redefinition_of_method :h + alias h html_escape + + module_function :h + + singleton_class.silence_redefinition_of_method :html_escape + module_function :html_escape + + # HTML escapes strings but doesn't wrap them with an ActiveSupport::SafeBuffer. + # This method is not for public consumption! Seriously! + def unwrapped_html_escape(s) # :nodoc: + s = s.to_s + if s.html_safe? + s + else + CGI.escapeHTML(ActiveSupport::Multibyte::Unicode.tidy_bytes(s)) + end + end + module_function :unwrapped_html_escape + + # A utility method for escaping HTML without affecting existing escaped entities. + # + # html_escape_once('1 < 2 & 3') + # # => "1 < 2 & 3" + # + # html_escape_once('<< Accept & Checkout') + # # => "<< Accept & Checkout" + def html_escape_once(s) + result = ActiveSupport::Multibyte::Unicode.tidy_bytes(s.to_s).gsub(HTML_ESCAPE_ONCE_REGEXP, HTML_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :html_escape_once + + # A utility method for escaping HTML entities in JSON strings. Specifically, the + # &, > and < characters are replaced with their equivalent unicode escaped form - + # \u0026, \u003e, and \u003c. The Unicode sequences \u2028 and \u2029 are also + # escaped as they are treated as newline characters in some JavaScript engines. + # These sequences have identical meaning as the original characters inside the + # context of a JSON string, so assuming the input is a valid and well-formed + # JSON value, the output will have equivalent meaning when parsed: + # + # json = JSON.generate({ name: ""}) + # # => "{\"name\":\"\"}" + # + # json_escape(json) + # # => "{\"name\":\"\\u003C/script\\u003E\\u003Cscript\\u003Ealert('PWNED!!!')\\u003C/script\\u003E\"}" + # + # JSON.parse(json) == JSON.parse(json_escape(json)) + # # => true + # + # The intended use case for this method is to escape JSON strings before including + # them inside a script tag to avoid XSS vulnerability: + # + # + # + # It is necessary to +raw+ the result of +json_escape+, so that quotation marks + # don't get converted to " entities. +json_escape+ doesn't + # automatically flag the result as HTML safe, since the raw value is unsafe to + # use inside HTML attributes. + # + # If your JSON is being used downstream for insertion into the DOM, be aware of + # whether or not it is being inserted via html(). Most jQuery plugins do this. + # If that is the case, be sure to +html_escape+ or +sanitize+ any user-generated + # content returned by your JSON. + # + # If you need to output JSON elsewhere in your HTML, you can just do something + # like this, as any unsafe characters (including quotation marks) will be + # automatically escaped for you: + # + #
...
+ # + # WARNING: this helper only works with valid JSON. Using this on non-JSON values + # will open up serious XSS vulnerabilities. For example, if you replace the + # +current_user.to_json+ in the example above with user input instead, the browser + # will happily eval() that string as JavaScript. + # + # The escaping performed in this method is identical to those performed in the + # Active Support JSON encoder when +ActiveSupport.escape_html_entities_in_json+ is + # set to true. Because this transformation is idempotent, this helper can be + # applied even if +ActiveSupport.escape_html_entities_in_json+ is already true. + # + # Therefore, when you are unsure if +ActiveSupport.escape_html_entities_in_json+ + # is enabled, or if you are unsure where your JSON string originated from, it + # is recommended that you always apply this helper (other libraries, such as the + # JSON gem, do not provide this kind of protection by default; also some gems + # might override +to_json+ to bypass Active Support's encoder). + def json_escape(s) + result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :json_escape + end +end + +class Object + def html_safe? + false + end +end + +class Numeric + def html_safe? + true + end +end + +module ActiveSupport #:nodoc: + class SafeBuffer < String + UNSAFE_STRING_METHODS = %w( + capitalize chomp chop delete delete_prefix delete_suffix + downcase lstrip next reverse rstrip scrub slice squeeze strip + succ swapcase tr tr_s unicode_normalize upcase + ) + + UNSAFE_STRING_METHODS_WITH_BACKREF = %w(gsub sub) + + alias_method :original_concat, :concat + private :original_concat + + # Raised when ActiveSupport::SafeBuffer#safe_concat is called on unsafe buffers. + class SafeConcatError < StandardError + def initialize + super "Could not concatenate to the buffer because it is not html safe." + end + end + + def [](*args) + if html_safe? + new_string = super + + return unless new_string + + new_safe_buffer = new_string.is_a?(SafeBuffer) ? new_string : SafeBuffer.new(new_string) + new_safe_buffer.instance_variable_set :@html_safe, true + new_safe_buffer + else + to_str[*args] + end + end + + def safe_concat(value) + raise SafeConcatError unless html_safe? + original_concat(value) + end + + def initialize(str = "") + @html_safe = true + super + end + + def initialize_copy(other) + super + @html_safe = other.html_safe? + end + + def clone_empty + self[0, 0] + end + + def concat(value) + super(html_escape_interpolated_argument(value)) + end + alias << concat + + def insert(index, value) + super(index, html_escape_interpolated_argument(value)) + end + + def prepend(value) + super(html_escape_interpolated_argument(value)) + end + + def replace(value) + super(html_escape_interpolated_argument(value)) + end + + def []=(*args) + if args.length == 3 + super(args[0], args[1], html_escape_interpolated_argument(args[2])) + else + super(args[0], html_escape_interpolated_argument(args[1])) + end + end + + def +(other) + dup.concat(other) + end + + def *(*) + new_string = super + new_safe_buffer = new_string.is_a?(SafeBuffer) ? new_string : SafeBuffer.new(new_string) + new_safe_buffer.instance_variable_set(:@html_safe, @html_safe) + new_safe_buffer + end + + def %(args) + case args + when Hash + escaped_args = args.transform_values { |arg| html_escape_interpolated_argument(arg) } + else + escaped_args = Array(args).map { |arg| html_escape_interpolated_argument(arg) } + end + + self.class.new(super(escaped_args)) + end + + def html_safe? + defined?(@html_safe) && @html_safe + end + + def to_s + self + end + + def to_param + to_str + end + + def encode_with(coder) + coder.represent_object nil, to_str + end + + UNSAFE_STRING_METHODS.each do |unsafe_method| + if unsafe_method.respond_to?(unsafe_method) + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def capitalize(*args, &block) + to_str.#{unsafe_method}(*args, &block) # to_str.capitalize(*args, &block) + end # end + + def #{unsafe_method}!(*args) # def capitalize!(*args) + @html_safe = false # @html_safe = false + super # super + end # end + EOT + end + end + + UNSAFE_STRING_METHODS_WITH_BACKREF.each do |unsafe_method| + if unsafe_method.respond_to?(unsafe_method) + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def gsub(*args, &block) + if block # if block + to_str.#{unsafe_method}(*args) { |*params| # to_str.gsub(*args) { |*params| + set_block_back_references(block, $~) # set_block_back_references(block, $~) + block.call(*params) # block.call(*params) + } # } + else # else + to_str.#{unsafe_method}(*args) # to_str.gsub(*args) + end # end + end # end + + def #{unsafe_method}!(*args, &block) # def gsub!(*args, &block) + @html_safe = false # @html_safe = false + if block # if block + super(*args) { |*params| # super(*args) { |*params| + set_block_back_references(block, $~) # set_block_back_references(block, $~) + block.call(*params) # block.call(*params) + } # } + else # else + super # super + end # end + end # end + EOT + end + end + + private + def html_escape_interpolated_argument(arg) + (!html_safe? || arg.html_safe?) ? arg : CGI.escapeHTML(arg.to_s) + end + + def set_block_back_references(block, match_data) + block.binding.eval("proc { |m| $~ = m }").call(match_data) + rescue ArgumentError + # Can't create binding from C level Proc + end + end +end + +class String + # Marks a string as trusted safe. It will be inserted into HTML with no + # additional escaping performed. It is your responsibility to ensure that the + # string contains no malicious content. This method is equivalent to the + # +raw+ helper in views. It is recommended that you use +sanitize+ instead of + # this method. It should never be called on user input. + def html_safe + ActiveSupport::SafeBuffer.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/starts_ends_with.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/starts_ends_with.rb new file mode 100644 index 0000000..1e21637 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/starts_ends_with.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +class String + alias :starts_with? :start_with? + alias :ends_with? :end_with? +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/strip.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/strip.rb new file mode 100644 index 0000000..60e9952 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/strip.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +class String + # Strips indentation in heredocs. + # + # For example in + # + # if options[:usage] + # puts <<-USAGE.strip_heredoc + # This command does such and such. + # + # Supported options are: + # -h This message + # ... + # USAGE + # end + # + # the user would see the usage message aligned against the left margin. + # + # Technically, it looks for the least indented non-empty line + # in the whole string, and removes that amount of leading whitespace. + def strip_heredoc + gsub(/^#{scan(/^[ \t]*(?=\S)/).min}/, "").tap do |stripped| + stripped.freeze if frozen? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/zones.rb new file mode 100644 index 0000000..55dc231 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/string/zones.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/time/zones" + +class String + # Converts String to a TimeWithZone in the current zone if Time.zone or Time.zone_default + # is set, otherwise converts String to a Time via String#to_time + def in_time_zone(zone = ::Time.zone) + if zone + ::Time.find_zone!(zone).parse(self) + else + to_time + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol.rb new file mode 100644 index 0000000..709fed2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/symbol/starts_ends_with" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol/starts_ends_with.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol/starts_ends_with.rb new file mode 100644 index 0000000..655a539 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/symbol/starts_ends_with.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +class Symbol + def start_with?(*prefixes) + to_s.start_with?(*prefixes) + end unless method_defined?(:start_with?) + + def end_with?(*suffixes) + to_s.end_with?(*suffixes) + end unless method_defined?(:end_with?) + + alias :starts_with? :start_with? + alias :ends_with? :end_with? +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time.rb new file mode 100644 index 0000000..c809def --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/compatibility" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/time/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/acts_like.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/acts_like.rb new file mode 100644 index 0000000..8572b49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Time + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/calculations.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/calculations.rb new file mode 100644 index 0000000..d9a130d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/calculations.rb @@ -0,0 +1,363 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/conversions" +require "active_support/time_with_zone" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/module/remove_method" + +class Time + include DateAndTime::Calculations + + COMMON_YEAR_DAYS_IN_MONTH = [nil, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + class << self + # Overriding case equality method so that it returns true for ActiveSupport::TimeWithZone instances + def ===(other) + super || (self == Time && other.is_a?(ActiveSupport::TimeWithZone)) + end + + # Returns the number of days in the given month. + # If no year is specified, it will use the current year. + def days_in_month(month, year = current.year) + if month == 2 && ::Date.gregorian_leap?(year) + 29 + else + COMMON_YEAR_DAYS_IN_MONTH[month] + end + end + + # Returns the number of days in the given year. + # If no year is specified, it will use the current year. + def days_in_year(year = current.year) + days_in_month(2, year) + 337 + end + + # Returns Time.zone.now when Time.zone or config.time_zone are set, otherwise just returns Time.now. + def current + ::Time.zone ? ::Time.zone.now : ::Time.now + end + + # Layers additional behavior on Time.at so that ActiveSupport::TimeWithZone and DateTime + # instances can be used when called with a single argument + def at_with_coercion(*args) + return at_without_coercion(*args) if args.size != 1 + + # Time.at can be called with a time or numerical value + time_or_number = args.first + + if time_or_number.is_a?(ActiveSupport::TimeWithZone) + at_without_coercion(time_or_number.to_r).getlocal + elsif time_or_number.is_a?(DateTime) + at_without_coercion(time_or_number.to_f).getlocal + else + at_without_coercion(time_or_number) + end + end + ruby2_keywords(:at_with_coercion) if respond_to?(:ruby2_keywords, true) + alias_method :at_without_coercion, :at + alias_method :at, :at_with_coercion + + # Creates a +Time+ instance from an RFC 3339 string. + # + # Time.rfc3339('1999-12-31T14:00:00-10:00') # => 2000-01-01 00:00:00 -1000 + # + # If the time or offset components are missing then an +ArgumentError+ will be raised. + # + # Time.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + end + end + + # Returns the number of seconds since 00:00:00. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0.0 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296.0 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399.0 + def seconds_since_midnight + to_i - change(hour: 0).to_i + (usec / 1.0e+6) + end + + # Returns the number of seconds until 23:59:59. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2) + def sec_fraction + subsec + end + + unless Time.method_defined?(:floor) + def floor(precision = 0) + change(nsec: 0) + subsec.floor(precision) + end + end + + # Restricted Ruby version due to a bug in `Time#ceil` + # See https://bugs.ruby-lang.org/issues/17025 for more details + if RUBY_VERSION <= "2.8" + remove_possible_method :ceil + def ceil(precision = 0) + change(nsec: 0) + subsec.ceil(precision) + end + end + + # Returns a new Time where one or more of the elements have been changed according + # to the +options+ parameter. The time options (:hour, :min, + # :sec, :usec, :nsec) reset cascadingly, so if only + # the hour is passed, then minute, sec, usec and nsec is set to 0. If the hour + # and minute is passed, then sec, usec and nsec is set to 0. The +options+ parameter + # takes a hash with any of these keys: :year, :month, :day, + # :hour, :min, :sec, :usec, :nsec, + # :offset. Pass either :usec or :nsec, not both. + # + # Time.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => Time.new(2012, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => Time.new(1981, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => Time.new(1981, 8, 29, 0, 0, 0) + def change(options) + new_year = options.fetch(:year, year) + new_month = options.fetch(:month, month) + new_day = options.fetch(:day, day) + new_hour = options.fetch(:hour, hour) + new_min = options.fetch(:min, options[:hour] ? 0 : min) + new_sec = options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_offset = options.fetch(:offset, nil) + + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_usec = Rational(new_nsec, 1000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + end + + raise ArgumentError, "argument out of range" if new_usec >= 1000000 + + new_sec += Rational(new_usec, 1000000) + + if new_offset + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, new_offset) + elsif utc? + ::Time.utc(new_year, new_month, new_day, new_hour, new_min, new_sec) + elsif zone + ::Time.local(new_year, new_month, new_day, new_hour, new_min, new_sec) + else + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, utc_offset) + end + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The +options+ parameter + # takes a hash with any of these keys: :years, :months, + # :weeks, :days, :hours, :minutes, + # :seconds. + # + # Time.new(2015, 8, 1, 14, 35, 0).advance(seconds: 1) # => 2015-08-01 14:35:01 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(minutes: 1) # => 2015-08-01 14:36:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(hours: 1) # => 2015-08-01 15:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(days: 1) # => 2015-08-02 14:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(weeks: 1) # => 2015-08-08 14:35:00 -0700 + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.gregorian.advance(options) + time_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + time_advanced_by_date + else + time_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new Time representing the time a number of seconds ago, this is basically a wrapper around the Numeric extension + def ago(seconds) + since(-seconds) + end + + # Returns a new Time representing the time a number of seconds since the instance time + def since(seconds) + self + seconds + rescue + to_datetime.since(seconds) + end + alias :in :since + + # Returns a new Time representing the start of the day (0:00) + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new Time representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new Time representing the end of the day, 23:59:59.999999 + def end_of_day + change( + hour: 23, + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_day :end_of_day + + # Returns a new Time representing the start of the hour (x:00) + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new Time representing the end of the hour, x:59:59.999999 + def end_of_hour + change( + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new Time representing the start of the minute (x:xx:00) + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new Time representing the end of the minute, x:xx:59.999999 + def end_of_minute + change( + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_minute :end_of_minute + + def plus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) #:nodoc: + if ActiveSupport::Duration === other + other.until(self) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Time#- can also be used to determine the number of seconds between two Time instances. + # We're layering on additional behavior so that ActiveSupport::TimeWithZone instances + # are coerced into values that Time#- will recognize + def minus_with_coercion(other) + other = other.comparable_time if other.respond_to?(:comparable_time) + other.is_a?(DateTime) ? to_f - other.to_f : minus_without_coercion(other) + end + alias_method :minus_without_coercion, :- + alias_method :-, :minus_with_coercion + + # Layers additional behavior on Time#<=> so that DateTime and ActiveSupport::TimeWithZone instances + # can be chronologically compared with a Time + def compare_with_coercion(other) + # we're avoiding Time#to_datetime and Time#to_time because they're expensive + if other.class == Time + compare_without_coercion(other) + elsif other.is_a?(Time) + compare_without_coercion(other.to_time) + else + to_datetime <=> other + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion + + # Layers additional behavior on Time#eql? so that ActiveSupport::TimeWithZone instances + # can be eql? to an equivalent Time + def eql_with_coercion(other) + # if other is an ActiveSupport::TimeWithZone, coerce a Time instance from it so we can do eql? comparison + other = other.comparable_time if other.respond_to?(:comparable_time) + eql_without_coercion(other) + end + alias_method :eql_without_coercion, :eql? + alias_method :eql?, :eql_with_coercion + + # Returns a new time the specified number of days ago. + def prev_day(days = 1) + advance(days: -days) + end + + # Returns a new time the specified number of days in the future. + def next_day(days = 1) + advance(days: days) + end + + # Returns a new time the specified number of months ago. + def prev_month(months = 1) + advance(months: -months) + end + + # Returns a new time the specified number of months in the future. + def next_month(months = 1) + advance(months: months) + end + + # Returns a new time the specified number of years ago. + def prev_year(years = 1) + advance(years: -years) + end + + # Returns a new time the specified number of years in the future. + def next_year(years = 1) + advance(years: years) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/compatibility.rb new file mode 100644 index 0000000..495e4f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class Time + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return +self+ or the time in the local system timezone depending + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? self : getlocal + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/conversions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/conversions.rb new file mode 100644 index 0000000..d61a191 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/conversions.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +require "time" +require "active_support/inflector/methods" +require "active_support/values/time_zone" + +class Time + DATE_FORMATS = { + db: "%Y-%m-%d %H:%M:%S", + inspect: "%Y-%m-%d %H:%M:%S.%9N %z", + number: "%Y%m%d%H%M%S", + nsec: "%Y%m%d%H%M%S%9N", + usec: "%Y%m%d%H%M%S%6N", + time: "%H:%M", + short: "%d %b %H:%M", + long: "%B %d, %Y %H:%M", + long_ordinal: lambda { |time| + day_format = ActiveSupport::Inflector.ordinalize(time.day) + time.strftime("%B #{day_format}, %Y %H:%M") + }, + rfc822: lambda { |time| + offset_format = time.formatted_offset(false) + time.strftime("%a, %d %b %Y %H:%M:%S #{offset_format}") + }, + iso8601: lambda { |time| time.iso8601 } + } + + # Converts to a formatted string. See DATE_FORMATS for built-in formats. + # + # This method is aliased to to_s. + # + # time = Time.now # => 2007-01-18 06:10:17 -06:00 + # + # time.to_formatted_s(:time) # => "06:10" + # time.to_s(:time) # => "06:10" + # + # time.to_formatted_s(:db) # => "2007-01-18 06:10:17" + # time.to_formatted_s(:number) # => "20070118061017" + # time.to_formatted_s(:short) # => "18 Jan 06:10" + # time.to_formatted_s(:long) # => "January 18, 2007 06:10" + # time.to_formatted_s(:long_ordinal) # => "January 18th, 2007 06:10" + # time.to_formatted_s(:rfc822) # => "Thu, 18 Jan 2007 06:10:17 -0600" + # time.to_formatted_s(:iso8601) # => "2007-01-18T06:10:17-06:00" + # + # == Adding your own time formats to +to_formatted_s+ + # You can add your own formats to the Time::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a time argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = ->(time) { time.strftime("%B #{time.day.ordinalize}") } + def to_formatted_s(format = :default) + if formatter = DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + to_default_s + end + end + alias_method :to_default_s, :to_s + alias_method :to_s, :to_formatted_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.local(2000).formatted_offset # => "-06:00" + # Time.local(2000).formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Aliased to +xmlschema+ for compatibility with +DateTime+ + alias_method :rfc3339, :xmlschema +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/zones.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/zones.rb new file mode 100644 index 0000000..a5588fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/time/zones.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date_and_time/zones" + +class Time + include DateAndTime::Zones + class << self + attr_accessor :zone_default + + # Returns the TimeZone for the current request, if this has been set (via Time.zone=). + # If Time.zone has not been set for the current request, returns the TimeZone specified in config.time_zone. + def zone + Thread.current[:time_zone] || zone_default + end + + # Sets Time.zone to a TimeZone object for the current request/thread. + # + # This method accepts any of the following: + # + # * A Rails TimeZone object. + # * An identifier for a Rails TimeZone object (e.g., "Eastern Time (US & Canada)", -5.hours). + # * A TZInfo::Timezone object. + # * An identifier for a TZInfo::Timezone object (e.g., "America/New_York"). + # + # Here's an example of how you might set Time.zone on a per request basis and reset it when the request is done. + # current_user.time_zone just needs to return a string identifying the user's preferred time zone: + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # def set_time_zone + # if logged_in? + # Time.use_zone(current_user.time_zone) { yield } + # else + # yield + # end + # end + # end + def zone=(time_zone) + Thread.current[:time_zone] = find_zone!(time_zone) + end + + # Allows override of Time.zone locally inside supplied block; + # resets Time.zone to existing value when done. + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # private + # + # def set_time_zone + # Time.use_zone(current_user.timezone) { yield } + # end + # end + # + # NOTE: This won't affect any ActiveSupport::TimeWithZone + # objects that have already been created, e.g. any model timestamp + # attributes that have been read before the block will remain in + # the application's default timezone. + def use_zone(time_zone) + new_zone = find_zone!(time_zone) + begin + old_zone, ::Time.zone = ::Time.zone, new_zone + yield + ensure + ::Time.zone = old_zone + end + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Raises an +ArgumentError+ for invalid time zones. + # + # Time.find_zone! "America/New_York" # => # + # Time.find_zone! "EST" # => # + # Time.find_zone! -5.hours # => # + # Time.find_zone! nil # => nil + # Time.find_zone! false # => false + # Time.find_zone! "NOT-A-TIMEZONE" # => ArgumentError: Invalid Timezone: NOT-A-TIMEZONE + def find_zone!(time_zone) + if !time_zone || time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + # Look up the timezone based on the identifier (unless we've been + # passed a TZInfo::Timezone) + unless time_zone.respond_to?(:period_for_local) + time_zone = ActiveSupport::TimeZone[time_zone] || TZInfo::Timezone.get(time_zone) + end + + # Return if a TimeZone instance, or wrap in a TimeZone instance if a TZInfo::Timezone + if time_zone.is_a?(ActiveSupport::TimeZone) + time_zone + else + ActiveSupport::TimeZone.create(time_zone.name, nil, time_zone) + end + end + rescue TZInfo::InvalidTimezoneIdentifier + raise ArgumentError, "Invalid Timezone: #{time_zone}" + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Returns +nil+ for invalid time zones. + # + # Time.find_zone "America/New_York" # => # + # Time.find_zone "NOT-A-TIMEZONE" # => nil + def find_zone(time_zone) + find_zone!(time_zone) rescue nil + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/uri.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/uri.rb new file mode 100644 index 0000000..1cb36f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/core_ext/uri.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "uri" + +if RUBY_VERSION < "2.6.0" + require "active_support/core_ext/module/redefine_method" + URI::Parser.class_eval do + silence_redefinition_of_method :unescape + def unescape(str, escaped = /%[a-fA-F\d]{2}/) + # TODO: Are we actually sure that ASCII == UTF-8? + # YK: My initial experiments say yes, but let's be sure please + enc = str.encoding + enc = Encoding::UTF_8 if enc == Encoding::US_ASCII + str.dup.force_encoding(Encoding::ASCII_8BIT).gsub(escaped) { |match| [match[1, 2].hex].pack("C") }.force_encoding(enc) + end + end +end + +module URI + class << self + def parser + ActiveSupport::Deprecation.warn(<<-MSG.squish) + URI.parser is deprecated and will be removed in Rails 6.2. + Use `URI::DEFAULT_PARSER` instead. + MSG + URI::DEFAULT_PARSER + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes.rb new file mode 100644 index 0000000..9be8f88 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +require "active_support/callbacks" +require "active_support/core_ext/enumerable" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # Abstract super class that provides a thread-isolated attributes singleton, which resets automatically + # before and after each request. This allows you to keep all the per-request attributes easily + # available to the whole system. + # + # The following full app-like example demonstrates how to use a Current class to + # facilitate easy access to the global, per-request attributes without passing them deeply + # around everywhere: + # + # # app/models/current.rb + # class Current < ActiveSupport::CurrentAttributes + # attribute :account, :user + # attribute :request_id, :user_agent, :ip_address + # + # resets { Time.zone = nil } + # + # def user=(user) + # super + # self.account = user.account + # Time.zone = user.time_zone + # end + # end + # + # # app/controllers/concerns/authentication.rb + # module Authentication + # extend ActiveSupport::Concern + # + # included do + # before_action :authenticate + # end + # + # private + # def authenticate + # if authenticated_user = User.find_by(id: cookies.encrypted[:user_id]) + # Current.user = authenticated_user + # else + # redirect_to new_session_url + # end + # end + # end + # + # # app/controllers/concerns/set_current_request_details.rb + # module SetCurrentRequestDetails + # extend ActiveSupport::Concern + # + # included do + # before_action do + # Current.request_id = request.uuid + # Current.user_agent = request.user_agent + # Current.ip_address = request.ip + # end + # end + # end + # + # class ApplicationController < ActionController::Base + # include Authentication + # include SetCurrentRequestDetails + # end + # + # class MessagesController < ApplicationController + # def create + # Current.account.messages.create(message_params) + # end + # end + # + # class Message < ApplicationRecord + # belongs_to :creator, default: -> { Current.user } + # after_create { |message| Event.create(record: message) } + # end + # + # class Event < ApplicationRecord + # before_create do + # self.request_id = Current.request_id + # self.user_agent = Current.user_agent + # self.ip_address = Current.ip_address + # end + # end + # + # A word of caution: It's easy to overdo a global singleton like Current and tangle your model as a result. + # Current should only be used for a few, top-level globals, like account, user, and request details. + # The attributes stuck in Current should be used by more or less all actions on all requests. If you start + # sticking controller-specific attributes in there, you're going to create a mess. + class CurrentAttributes + include ActiveSupport::Callbacks + define_callbacks :reset + + class << self + # Returns singleton instance for this class in this thread. If none exists, one is created. + def instance + current_instances[current_instances_key] ||= new + end + + # Declares one or more attributes that will be given both class and instance accessor methods. + def attribute(*names) + generated_attribute_methods.module_eval do + names.each do |name| + define_method(name) do + attributes[name.to_sym] + end + + define_method("#{name}=") do |attribute| + attributes[name.to_sym] = attribute + end + end + end + + names.each do |name| + define_singleton_method(name) do + instance.public_send(name) + end + + define_singleton_method("#{name}=") do |attribute| + instance.public_send("#{name}=", attribute) + end + end + end + + # Calls this block before #reset is called on the instance. Used for resetting external collaborators that depend on current values. + def before_reset(&block) + set_callback :reset, :before, &block + end + + # Calls this block after #reset is called on the instance. Used for resetting external collaborators, like Time.zone. + def resets(&block) + set_callback :reset, :after, &block + end + alias_method :after_reset, :resets + + delegate :set, :reset, to: :instance + + def reset_all # :nodoc: + current_instances.each_value(&:reset) + end + + def clear_all # :nodoc: + reset_all + current_instances.clear + end + + private + def generated_attribute_methods + @generated_attribute_methods ||= Module.new.tap { |mod| include mod } + end + + def current_instances + Thread.current[:current_attributes_instances] ||= {} + end + + def current_instances_key + @current_instances_key ||= name.to_sym + end + + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end + + attr_accessor :attributes + + def initialize + @attributes = {} + end + + # Expose one or more attributes within a block. Old values are returned after the block concludes. + # Example demonstrating the common use of needing to set Current attributes outside the request-cycle: + # + # class Chat::PublicationJob < ApplicationJob + # def perform(attributes, room_number, creator) + # Current.set(person: creator) do + # Chat::Publisher.publish(attributes: attributes, room_number: room_number) + # end + # end + # end + def set(set_attributes) + old_attributes = compute_attributes(set_attributes.keys) + assign_attributes(set_attributes) + yield + ensure + assign_attributes(old_attributes) + end + + # Reset all attributes. Should be called before and after actions, when used as a per-request singleton. + def reset + run_callbacks :reset do + self.attributes = {} + end + end + + private + def assign_attributes(new_attributes) + new_attributes.each { |key, value| public_send("#{key}=", value) } + end + + def compute_attributes(keys) + keys.index_with { |key| public_send(key) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes/test_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes/test_helper.rb new file mode 100644 index 0000000..2016384 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/current_attributes/test_helper.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +module ActiveSupport::CurrentAttributes::TestHelper # :nodoc: + def before_setup + ActiveSupport::CurrentAttributes.reset_all + super + end + + def after_teardown + super + ActiveSupport::CurrentAttributes.reset_all + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies.rb new file mode 100644 index 0000000..6ab9d1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies.rb @@ -0,0 +1,828 @@ +# frozen_string_literal: true + +require "set" +require "thread" +require "concurrent/map" +require "pathname" +require "active_support/core_ext/module/aliasing" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/introspection" +require "active_support/core_ext/module/anonymous" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/load_error" +require "active_support/core_ext/name_error" +require "active_support/dependencies/interlock" +require "active_support/inflector" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + extend self + + UNBOUND_METHOD_MODULE_NAME = Module.instance_method(:name) + private_constant :UNBOUND_METHOD_MODULE_NAME + + mattr_accessor :interlock, default: Interlock.new + + # :doc: + + # Execute the supplied block without interference from any + # concurrent loads. + def self.run_interlock + Dependencies.interlock.running { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.load_interlock + Dependencies.interlock.loading { yield } + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.unload_interlock + Dependencies.interlock.unloading { yield } + end + + # :nodoc: + + # Should we turn on Ruby warnings on the first load of dependent files? + mattr_accessor :warnings_on_first_load, default: false + + # All files ever loaded. + mattr_accessor :history, default: Set.new + + # All files currently loaded. + mattr_accessor :loaded, default: Set.new + + # Stack of files being loaded. + mattr_accessor :loading, default: [] + + # Should we load files or require them? + mattr_accessor :mechanism, default: ENV["NO_RELOAD"] ? :require : :load + + # The set of directories from which we may automatically load files. Files + # under these directories will be reloaded on each request in development mode, + # unless the directory also appears in autoload_once_paths. + mattr_accessor :autoload_paths, default: [] + + # The set of directories from which automatically loaded constants are loaded + # only once. All directories in this set must also be present in +autoload_paths+. + mattr_accessor :autoload_once_paths, default: [] + + # This is a private set that collects all eager load paths during bootstrap. + # Useful for Zeitwerk integration. Its public interface is the config.* path + # accessors of each engine. + mattr_accessor :_eager_load_paths, default: Set.new + + # An array of qualified constant names that have been loaded. Adding a name + # to this array will cause it to be unloaded the next time Dependencies are + # cleared. + mattr_accessor :autoloaded_constants, default: [] + + # An array of constant names that need to be unloaded on every request. Used + # to allow arbitrary constants to be marked for unloading. + mattr_accessor :explicitly_unloadable_constants, default: [] + + # The logger used when tracing autoloads. + mattr_accessor :logger + + # If true, trace autoloads with +logger.debug+. + mattr_accessor :verbose, default: false + + # The WatchStack keeps a stack of the modules being watched as files are + # loaded. If a file in the process of being loaded (parent.rb) triggers the + # load of another file (child.rb) the stack will ensure that child.rb + # handles the new constants. + # + # If child.rb is being autoloaded, its constants will be added to + # autoloaded_constants. If it was being required, they will be discarded. + # + # This is handled by walking back up the watch stack and adding the constants + # found by child.rb to the list of original constants in parent.rb. + class WatchStack + include Enumerable + + # @watching is a stack of lists of constants being watched. For instance, + # if parent.rb is autoloaded, the stack will look like [[Object]]. If + # parent.rb then requires namespace/child.rb, the stack will look like + # [[Object], [Namespace]]. + + attr_reader :watching + + def initialize + @watching = [] + @stack = Hash.new { |h, k| h[k] = [] } + end + + def each(&block) + @stack.each(&block) + end + + def watching? + !@watching.empty? + end + + # Returns a list of new constants found since the last call to + # watch_namespaces. + def new_constants + constants = [] + + # Grab the list of namespaces that we're looking for new constants under + @watching.last.each do |namespace| + # Retrieve the constants that were present under the namespace when watch_namespaces + # was originally called + original_constants = @stack[namespace].last + + mod = Inflector.constantize(namespace) if Dependencies.qualified_const_defined?(namespace) + next unless mod.is_a?(Module) + + # Get a list of the constants that were added + new_constants = mod.constants(false) - original_constants + + # @stack[namespace] returns an Array of the constants that are being evaluated + # for that namespace. For instance, if parent.rb requires child.rb, the first + # element of @stack[Object] will be an Array of the constants that were present + # before parent.rb was required. The second element will be an Array of the + # constants that were present before child.rb was required. + @stack[namespace].each do |namespace_constants| + namespace_constants.concat(new_constants) + end + + # Normalize the list of new constants, and add them to the list we will return + new_constants.each do |suffix| + constants << ([namespace, suffix] - ["Object"]).join("::") + end + end + constants + ensure + # A call to new_constants is always called after a call to watch_namespaces + pop_modules(@watching.pop) + end + + # Add a set of modules to the watch stack, remembering the initial + # constants. + def watch_namespaces(namespaces) + @watching << namespaces.map do |namespace| + module_name = Dependencies.to_constant_name(namespace) + original_constants = Dependencies.qualified_const_defined?(module_name) ? + Inflector.constantize(module_name).constants(false) : [] + + @stack[module_name] << original_constants + module_name + end + end + + private + def pop_modules(modules) + modules.each { |mod| @stack[mod].pop } + end + end + + # An internal stack used to record which constants are loaded by any block. + mattr_accessor :constant_watch_stack, default: WatchStack.new + + # Module includes this module. + module ModuleConstMissing #:nodoc: + def self.append_features(base) + base.class_eval do + # Emulate #exclude via an ivar + return if defined?(@_const_missing) && @_const_missing + @_const_missing = instance_method(:const_missing) + remove_method(:const_missing) + end + super + end + + def self.exclude_from(base) + base.class_eval do + define_method :const_missing, @_const_missing + @_const_missing = nil + end + end + + def self.include_into(base) + base.include(self) + append_features(base) + end + + def const_missing(const_name) + from_mod = anonymous? ? guess_for_anonymous(const_name) : self + Dependencies.load_missing_constant(from_mod, const_name) + end + + # We assume that the name of the module reflects the nesting + # (unless it can be proven that is not the case) and the path to the file + # that defines the constant. Anonymous modules cannot follow these + # conventions and therefore we assume that the user wants to refer to a + # top-level constant. + def guess_for_anonymous(const_name) + if Object.const_defined?(const_name) + raise NameError.new "#{const_name} cannot be autoloaded from an anonymous class or module", const_name + else + Object + end + end + + def unloadable(const_desc = self) + super(const_desc) + end + end + + # Object includes this module. + module Loadable #:nodoc: + def self.exclude_from(base) + base.class_eval do + define_method(:load, Kernel.instance_method(:load)) + private :load + + define_method(:require, Kernel.instance_method(:require)) + private :require + end + end + + def self.include_into(base) + base.include(self) + + if base.instance_method(:load).owner == base + base.remove_method(:load) + end + + if base.instance_method(:require).owner == base + base.remove_method(:require) + end + end + + def require_or_load(file_name) + Dependencies.require_or_load(file_name) + end + + # :doc: + + # Warning: This method is obsolete in +:zeitwerk+ mode. In + # +:zeitwerk+ mode semantics match Ruby's and you do not need to be + # defensive with load order. Just refer to classes and modules normally. + # If the constant name is dynamic, camelize if needed, and constantize. + # + # In +:classic+ mode, interprets a file using +mechanism+ and marks its + # defined constants as autoloaded. +file_name+ can be either a string or + # respond to to_path. + # + # In +:classic+ mode, use this method in code that absolutely needs a + # certain constant to be defined at that point. A typical use case is to + # make constant name resolution deterministic for constants with the same + # relative name in different namespaces whose evaluation would depend on + # load order otherwise. + # + # Engines that do not control the mode in which their parent application + # runs should call +require_dependency+ where needed in case the runtime + # mode is +:classic+. + def require_dependency(file_name, message = "No such file to load -- %s.rb") + file_name = file_name.to_path if file_name.respond_to?(:to_path) + unless file_name.is_a?(String) + raise ArgumentError, "the file name must either be a String or implement #to_path -- you passed #{file_name.inspect}" + end + + Dependencies.depend_on(file_name, message) + end + + # :nodoc: + + def load_dependency(file) + if Dependencies.load? && Dependencies.constant_watch_stack.watching? + descs = Dependencies.constant_watch_stack.watching.flatten.uniq + + Dependencies.new_constants_in(*descs) { yield } + else + yield + end + rescue Exception => exception # errors from loading file + exception.blame_file! file if exception.respond_to? :blame_file! + raise + end + + # Mark the given constant as unloadable. Unloadable constants are removed + # each time dependencies are cleared. + # + # Note that marking a constant for unloading need only be done once. Setup + # or init scripts may list each unloadable constant that may need unloading; + # each constant will be removed for every subsequent clear, as opposed to + # for the first clear. + # + # The provided constant descriptor may be a (non-anonymous) module or class, + # or a qualified constant name as a string or symbol. + # + # Returns +true+ if the constant was not previously marked for unloading, + # +false+ otherwise. + def unloadable(const_desc) + Dependencies.mark_for_unload const_desc + end + + private + def load(file, wrap = false) + result = false + load_dependency(file) { result = super } + result + end + + def require(file) + result = false + load_dependency(file) { result = super } + result + end + end + + # Exception file-blaming. + module Blamable #:nodoc: + def blame_file!(file) + (@blamed_files ||= []).unshift file + end + + def blamed_files + @blamed_files ||= [] + end + + def describe_blame + return nil if blamed_files.empty? + "This error occurred while loading the following files:\n #{blamed_files.join "\n "}" + end + + def copy_blame!(exc) + @blamed_files = exc.blamed_files.clone + self + end + end + + def hook! + Loadable.include_into(Object) + ModuleConstMissing.include_into(Module) + Exception.include(Blamable) + end + + def unhook! + ModuleConstMissing.exclude_from(Module) + Loadable.exclude_from(Object) + end + + def load? + mechanism == :load + end + + def depend_on(file_name, message = "No such file to load -- %s.rb") + path = search_for_file(file_name) + require_or_load(path || file_name) + rescue LoadError => load_error + if file_name = load_error.message[/ -- (.*?)(\.rb)?$/, 1] + load_error_message = if load_error.respond_to?(:original_message) + load_error.original_message + else + load_error.message + end + load_error_message.replace(message % file_name) + load_error.copy_blame!(load_error) + end + raise + end + + def clear + Dependencies.unload_interlock do + loaded.clear + loading.clear + remove_unloadable_constants! + end + end + + def require_or_load(file_name, const_path = nil) + file_name = file_name.chomp(".rb") + expanded = File.expand_path(file_name) + return if loaded.include?(expanded) + + Dependencies.load_interlock do + # Maybe it got loaded while we were waiting for our lock: + return if loaded.include?(expanded) + + # Record that we've seen this file *before* loading it to avoid an + # infinite loop with mutual dependencies. + loaded << expanded + loading << expanded + + begin + if load? + # Enable warnings if this file has not been loaded before and + # warnings_on_first_load is set. + load_args = ["#{file_name}.rb"] + load_args << const_path unless const_path.nil? + + if !warnings_on_first_load || history.include?(expanded) + result = load_file(*load_args) + else + enable_warnings { result = load_file(*load_args) } + end + else + result = require file_name + end + rescue Exception + loaded.delete expanded + raise + ensure + loading.pop + end + + # Record history *after* loading so first load gets warnings. + history << expanded + result + end + end + + # Is the provided constant path defined? + def qualified_const_defined?(path) + Object.const_defined?(path, false) + end + + # Given +path+, a filesystem path to a ruby file, return an array of + # constant paths which would cause Dependencies to attempt to load this + # file. + def loadable_constants_for_path(path, bases = autoload_paths) + path = path.chomp(".rb") + expanded_path = File.expand_path(path) + paths = [] + + bases.each do |root| + expanded_root = File.expand_path(root) + next unless expanded_path.start_with?(expanded_root) + + root_size = expanded_root.size + next if expanded_path[root_size] != ?/ + + nesting = expanded_path[(root_size + 1)..-1] + paths << nesting.camelize unless nesting.blank? + end + + paths.uniq! + paths + end + + # Search for a file in autoload_paths matching the provided suffix. + def search_for_file(path_suffix) + path_suffix += ".rb" unless path_suffix.end_with?(".rb") + + autoload_paths.each do |root| + path = File.join(root, path_suffix) + return path if File.file? path + end + nil # Gee, I sure wish we had first_match ;-) + end + + # Does the provided path_suffix correspond to an autoloadable module? + # Instead of returning a boolean, the autoload base for this module is + # returned. + def autoloadable_module?(path_suffix) + autoload_paths.each do |load_path| + return load_path if File.directory? File.join(load_path, path_suffix) + end + nil + end + + def load_once_path?(path) + # to_s works around a ruby issue where String#start_with?(Pathname) + # will raise a TypeError: no implicit conversion of Pathname into String + autoload_once_paths.any? { |base| path.start_with?(base.to_s) } + end + + # Attempt to autoload the provided module name by searching for a directory + # matching the expected path suffix. If found, the module is created and + # assigned to +into+'s constants with the name +const_name+. Provided that + # the directory was loaded from a reloadable base path, it is added to the + # set of constants that are to be unloaded. + def autoload_module!(into, const_name, qualified_name, path_suffix) + return nil unless base_path = autoloadable_module?(path_suffix) + mod = Module.new + into.const_set const_name, mod + log("constant #{qualified_name} autoloaded (module autovivified from #{File.join(base_path, path_suffix)})") + autoloaded_constants << qualified_name unless autoload_once_paths.include?(base_path) + autoloaded_constants.uniq! + mod + end + + # Load the file at the provided path. +const_paths+ is a set of qualified + # constant names. When loading the file, Dependencies will watch for the + # addition of these constants. Each that is defined will be marked as + # autoloaded, and will be removed when Dependencies.clear is next called. + # + # If the second parameter is left off, then Dependencies will construct a + # set of names that the file at +path+ may define. See + # +loadable_constants_for_path+ for more details. + def load_file(path, const_paths = loadable_constants_for_path(path)) + const_paths = [const_paths].compact unless const_paths.is_a? Array + parent_paths = const_paths.collect { |const_path| const_path[/.*(?=::)/] || ::Object } + + result = nil + newly_defined_paths = new_constants_in(*parent_paths) do + result = Kernel.load path + end + + autoloaded_constants.concat newly_defined_paths unless load_once_path?(path) + autoloaded_constants.uniq! + result + end + + # Returns the constant path for the provided parent and constant name. + def qualified_name_for(mod, name) + mod_name = to_constant_name mod + mod_name == "Object" ? name.to_s : "#{mod_name}::#{name}" + end + + # Load the constant named +const_name+ which is missing from +from_mod+. If + # it is not possible to load the constant into from_mod, try its parent + # module using +const_missing+. + def load_missing_constant(from_mod, const_name) + from_mod_name = real_mod_name(from_mod) + unless qualified_const_defined?(from_mod_name) && Inflector.constantize(from_mod_name).equal?(from_mod) + raise ArgumentError, "A copy of #{from_mod} has been removed from the module tree but is still active!" + end + + qualified_name = qualified_name_for(from_mod, const_name) + path_suffix = qualified_name.underscore + + file_path = search_for_file(path_suffix) + + if file_path + expanded = File.expand_path(file_path) + expanded.delete_suffix!(".rb") + + if loading.include?(expanded) + raise "Circular dependency detected while autoloading constant #{qualified_name}" + else + require_or_load(expanded, qualified_name) + + if from_mod.const_defined?(const_name, false) + log("constant #{qualified_name} autoloaded from #{expanded}.rb") + return from_mod.const_get(const_name) + else + raise LoadError, "Unable to autoload constant #{qualified_name}, expected #{file_path} to define it" + end + end + elsif mod = autoload_module!(from_mod, const_name, qualified_name, path_suffix) + return mod + elsif (parent = from_mod.module_parent) && parent != from_mod && + ! from_mod.module_parents.any? { |p| p.const_defined?(const_name, false) } + # If our parents do not have a constant named +const_name+ then we are free + # to attempt to load upwards. If they do have such a constant, then this + # const_missing must be due to from_mod::const_name, which should not + # return constants from from_mod's parents. + begin + # Since Ruby does not pass the nesting at the point the unknown + # constant triggered the callback we cannot fully emulate constant + # name lookup and need to make a trade-off: we are going to assume + # that the nesting in the body of Foo::Bar is [Foo::Bar, Foo] even + # though it might not be. Counterexamples are + # + # class Foo::Bar + # Module.nesting # => [Foo::Bar] + # end + # + # or + # + # module M::N + # module S::T + # Module.nesting # => [S::T, M::N] + # end + # end + # + # for example. + return parent.const_missing(const_name) + rescue NameError => e + raise unless e.missing_name? qualified_name_for(parent, const_name) + end + end + + name_error = uninitialized_constant(qualified_name, const_name, receiver: from_mod) + name_error.set_backtrace(caller.reject { |l| l.start_with? __FILE__ }) + raise name_error + end + + # Remove the constants that have been autoloaded, and those that have been + # marked for unloading. Before each constant is removed a callback is sent + # to its class/module if it implements +before_remove_const+. + # + # The callback implementation should be restricted to cleaning up caches, etc. + # as the environment will be in an inconsistent state, e.g. other constants + # may have already been unloaded and not accessible. + def remove_unloadable_constants! + log("removing unloadable constants") + autoloaded_constants.each { |const| remove_constant const } + autoloaded_constants.clear + Reference.clear! + explicitly_unloadable_constants.each { |const| remove_constant const } + end + + class ClassCache + def initialize + @store = Concurrent::Map.new + end + + def empty? + @store.empty? + end + + def key?(key) + @store.key?(key) + end + + def get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.constantize(key) + end + alias :[] :get + + def safe_get(key) + key = key.name if key.respond_to?(:name) + @store[key] ||= Inflector.safe_constantize(key) + end + + def store(klass) + return self unless klass.respond_to?(:name) + raise(ArgumentError, "anonymous classes cannot be cached") if klass.name.empty? + @store[klass.name] = klass + self + end + + def clear! + @store.clear + end + end + + Reference = ClassCache.new + + # Store a reference to a class +klass+. + def reference(klass) + Reference.store klass + end + + # Get the reference for class named +name+. + # Raises an exception if referenced class does not exist. + def constantize(name) + Reference.get(name) + end + + # Get the reference for class named +name+ if one exists. + # Otherwise returns +nil+. + def safe_constantize(name) + Reference.safe_get(name) + end + + # Determine if the given constant has been automatically loaded. + def autoloaded?(desc) + return false if desc.is_a?(Module) && real_mod_name(desc).nil? + name = to_constant_name desc + return false unless qualified_const_defined?(name) + autoloaded_constants.include?(name) + end + + # Will the provided constant descriptor be unloaded? + def will_unload?(const_desc) + autoloaded?(const_desc) || + explicitly_unloadable_constants.include?(to_constant_name(const_desc)) + end + + # Mark the provided constant name for unloading. This constant will be + # unloaded on each request, not just the next one. + def mark_for_unload(const_desc) + name = to_constant_name const_desc + if explicitly_unloadable_constants.include? name + false + else + explicitly_unloadable_constants << name + true + end + end + + # Run the provided block and detect the new constants that were loaded during + # its execution. Constants may only be regarded as 'new' once -- so if the + # block calls +new_constants_in+ again, then the constants defined within the + # inner call will not be reported in this one. + # + # If the provided block does not run to completion, and instead raises an + # exception, any new constants are regarded as being only partially defined + # and will be removed immediately. + def new_constants_in(*descs) + constant_watch_stack.watch_namespaces(descs) + success = false + + begin + yield # Now yield to the code that is to define new constants. + success = true + ensure + new_constants = constant_watch_stack.new_constants + + return new_constants if success + + # Remove partially loaded constants. + new_constants.each { |c| remove_constant(c) } + end + end + + # Convert the provided const desc to a qualified constant name (as a string). + # A module, class, symbol, or string may be provided. + def to_constant_name(desc) #:nodoc: + case desc + when String then desc.delete_prefix("::") + when Symbol then desc.to_s + when Module + real_mod_name(desc) || + raise(ArgumentError, "Anonymous modules have no name to be referenced by") + else raise TypeError, "Not a valid constant descriptor: #{desc.inspect}" + end + end + + def remove_constant(const) #:nodoc: + # Normalize ::Foo, ::Object::Foo, Object::Foo, Object::Object::Foo, etc. as Foo. + normalized = const.to_s.delete_prefix("::") + normalized.sub!(/\A(Object::)+/, "") + + constants = normalized.split("::") + to_remove = constants.pop + + # Remove the file path from the loaded list. + file_path = search_for_file(const.underscore) + if file_path + expanded = File.expand_path(file_path) + expanded.delete_suffix!(".rb") + loaded.delete(expanded) + end + + if constants.empty? + parent = Object + else + # This method is robust to non-reachable constants. + # + # Non-reachable constants may be passed if some of the parents were + # autoloaded and already removed. It is easier to do a sanity check + # here than require the caller to be clever. We check the parent + # rather than the very const argument because we do not want to + # trigger Kernel#autoloads, see the comment below. + parent_name = constants.join("::") + return unless qualified_const_defined?(parent_name) + parent = constantize(parent_name) + end + + # In an autoloaded user.rb like this + # + # autoload :Foo, 'foo' + # + # class User < ActiveRecord::Base + # end + # + # we correctly register "Foo" as being autoloaded. But if the app does + # not use the "Foo" constant we need to be careful not to trigger + # loading "foo.rb" ourselves. While #const_defined? and #const_get? do + # require the file, #autoload? and #remove_const don't. + # + # We are going to remove the constant nonetheless ---which exists as + # far as Ruby is concerned--- because if the user removes the macro + # call from a class or module that were not autoloaded, as in the + # example above with Object, accessing to that constant must err. + unless parent.autoload?(to_remove) + begin + constantized = parent.const_get(to_remove, false) + rescue NameError + # The constant is no longer reachable, just skip it. + return + else + constantized.before_remove_const if constantized.respond_to?(:before_remove_const) + end + end + + begin + parent.instance_eval { remove_const to_remove } + rescue NameError + # The constant is no longer reachable, just skip it. + end + end + + def log(message) + logger.debug("autoloading: #{message}") if logger && verbose + end + + private + if RUBY_VERSION < "2.6" + def uninitialized_constant(qualified_name, const_name, receiver:) + NameError.new("uninitialized constant #{qualified_name}", const_name) + end + else + def uninitialized_constant(qualified_name, const_name, receiver:) + NameError.new("uninitialized constant #{qualified_name}", const_name, receiver: receiver) + end + end + + # Returns the original name of a class or module even if `name` has been + # overridden. + def real_mod_name(mod) + UNBOUND_METHOD_MODULE_NAME.bind(mod).call + end + end +end + +ActiveSupport::Dependencies.hook! diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/autoload.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/autoload.rb new file mode 100644 index 0000000..1cee85d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/autoload.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" + +module ActiveSupport + # Autoload and eager load conveniences for your library. + # + # This module allows you to define autoloads based on + # Rails conventions (i.e. no need to define the path + # it is automatically guessed based on the filename) + # and also define a set of constants that needs to be + # eager loaded: + # + # module MyLib + # extend ActiveSupport::Autoload + # + # autoload :Model + # + # eager_autoload do + # autoload :Cache + # end + # end + # + # Then your library can be eager loaded by simply calling: + # + # MyLib.eager_load! + module Autoload + def self.extended(base) # :nodoc: + base.class_eval do + @_autoloads = {} + @_under_path = nil + @_at_path = nil + @_eager_autoload = false + end + end + + def autoload(const_name, path = @_at_path) + unless path + full = [name, @_under_path, const_name.to_s].compact.join("::") + path = Inflector.underscore(full) + end + + if @_eager_autoload + @_autoloads[const_name] = path + end + + super const_name, path + end + + def autoload_under(path) + @_under_path, old_path = path, @_under_path + yield + ensure + @_under_path = old_path + end + + def autoload_at(path) + @_at_path, old_path = path, @_at_path + yield + ensure + @_at_path = old_path + end + + def eager_autoload + old_eager, @_eager_autoload = @_eager_autoload, true + yield + ensure + @_eager_autoload = old_eager + end + + def eager_load! + @_autoloads.each_value { |file| require file } + end + + def autoloads + @_autoloads + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/interlock.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/interlock.rb new file mode 100644 index 0000000..948be75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/interlock.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "active_support/concurrency/share_lock" + +module ActiveSupport #:nodoc: + module Dependencies #:nodoc: + class Interlock + def initialize # :nodoc: + @lock = ActiveSupport::Concurrency::ShareLock.new + end + + def loading + @lock.exclusive(purpose: :load, compatible: [:load], after_compatible: [:load]) do + yield + end + end + + def unloading + @lock.exclusive(purpose: :unload, compatible: [:load, :unload], after_compatible: [:load, :unload]) do + yield + end + end + + def start_unloading + @lock.start_exclusive(purpose: :unload, compatible: [:load, :unload]) + end + + def done_unloading + @lock.stop_exclusive(compatible: [:load, :unload]) + end + + def start_running + @lock.start_sharing + end + + def done_running + @lock.stop_sharing + end + + def running + @lock.sharing do + yield + end + end + + def permit_concurrent_loads + @lock.yield_shares(compatible: [:load]) do + yield + end + end + + def raw_state(&block) # :nodoc: + @lock.raw_state(&block) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/zeitwerk_integration.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/zeitwerk_integration.rb new file mode 100644 index 0000000..2155fba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/dependencies/zeitwerk_integration.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true + +require "set" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + module Dependencies + module ZeitwerkIntegration # :nodoc: all + module Decorations + def clear + Dependencies.unload_interlock do + Rails.autoloaders.main.reload + rescue Zeitwerk::ReloadingDisabledError + raise "reloading is disabled because config.cache_classes is true" + end + end + + def constantize(cpath) + ActiveSupport::Inflector.constantize(cpath) + end + + def safe_constantize(cpath) + ActiveSupport::Inflector.safe_constantize(cpath) + end + + def autoloaded_constants + Rails.autoloaders.main.unloadable_cpaths + end + + def autoloaded?(object) + cpath = object.is_a?(Module) ? real_mod_name(object) : object.to_s + Rails.autoloaders.main.unloadable_cpath?(cpath) + end + + def verbose=(verbose) + l = verbose ? logger || Rails.logger : nil + Rails.autoloaders.each { |autoloader| autoloader.logger = l } + end + + def unhook! + :no_op + end + end + + module RequireDependency + def require_dependency(filename) + filename = filename.to_path if filename.respond_to?(:to_path) + if abspath = ActiveSupport::Dependencies.search_for_file(filename) + require abspath + else + require filename + end + end + end + + module Inflector + # Concurrent::Map is not needed. This is a private class, and overrides + # must be defined while the application boots. + @overrides = {} + + def self.camelize(basename, _abspath) + @overrides[basename] || basename.camelize + end + + def self.inflect(overrides) + @overrides.merge!(overrides) + end + end + + class << self + def take_over(enable_reloading:) + setup_autoloaders(enable_reloading) + freeze_paths + decorate_dependencies + end + + private + def setup_autoloaders(enable_reloading) + Dependencies.autoload_paths.each do |autoload_path| + # Zeitwerk only accepts existing directories in `push_dir` to + # prevent misconfigurations. + next unless File.directory?(autoload_path) + + autoloader = \ + autoload_once?(autoload_path) ? Rails.autoloaders.once : Rails.autoloaders.main + + autoloader.push_dir(autoload_path) + autoloader.do_not_eager_load(autoload_path) unless eager_load?(autoload_path) + end + + Rails.autoloaders.main.enable_reloading if enable_reloading + Rails.autoloaders.each(&:setup) + end + + def autoload_once?(autoload_path) + Dependencies.autoload_once_paths.include?(autoload_path) + end + + def eager_load?(autoload_path) + Dependencies._eager_load_paths.member?(autoload_path) + end + + def freeze_paths + Dependencies.autoload_paths.freeze + Dependencies.autoload_once_paths.freeze + Dependencies._eager_load_paths.freeze + end + + def decorate_dependencies + Dependencies.unhook! + Dependencies.singleton_class.prepend(Decorations) + Object.prepend(RequireDependency) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation.rb new file mode 100644 index 0000000..006e404 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "singleton" + +module ActiveSupport + # \Deprecation specifies the API used by Rails to deprecate methods, instance + # variables, objects and constants. + class Deprecation + # active_support.rb sets an autoload for ActiveSupport::Deprecation. + # + # If these requires were at the top of the file the constant would not be + # defined by the time their files were loaded. Since some of them reopen + # ActiveSupport::Deprecation its autoload would be triggered, resulting in + # a circular require warning for active_support/deprecation.rb. + # + # So, we define the constant first, and load dependencies later. + require "active_support/deprecation/instance_delegator" + require "active_support/deprecation/behaviors" + require "active_support/deprecation/reporting" + require "active_support/deprecation/disallowed" + require "active_support/deprecation/constant_accessor" + require "active_support/deprecation/method_wrappers" + require "active_support/deprecation/proxy_wrappers" + require "active_support/core_ext/module/deprecation" + require "concurrent/atomic/thread_local_var" + + include Singleton + include InstanceDelegator + include Behavior + include Reporting + include Disallowed + include MethodWrapper + + # The version number in which the deprecated behavior will be removed, by default. + attr_accessor :deprecation_horizon + + # It accepts two parameters on initialization. The first is a version of library + # and the second is a library name. + # + # ActiveSupport::Deprecation.new('2.0', 'MyLibrary') + def initialize(deprecation_horizon = "6.2", gem_name = "Rails") + self.gem_name = gem_name + self.deprecation_horizon = deprecation_horizon + # By default, warnings are not silenced and debugging is off. + self.silenced = false + self.debug = false + @silenced_thread = Concurrent::ThreadLocalVar.new(false) + @explicitly_allowed_warnings = Concurrent::ThreadLocalVar.new(nil) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/behaviors.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/behaviors.rb new file mode 100644 index 0000000..9d1fc78 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/behaviors.rb @@ -0,0 +1,122 @@ +# frozen_string_literal: true + +require "active_support/notifications" + +module ActiveSupport + # Raised when ActiveSupport::Deprecation::Behavior#behavior is set with :raise. + # You would set :raise, as a behavior to raise errors and proactively report exceptions from deprecations. + class DeprecationException < StandardError + end + + class Deprecation + # Default warning behaviors per Rails.env. + DEFAULT_BEHAVIORS = { + raise: ->(message, callstack, deprecation_horizon, gem_name) { + e = DeprecationException.new(message) + e.set_backtrace(callstack.map(&:to_s)) + raise e + }, + + stderr: ->(message, callstack, deprecation_horizon, gem_name) { + $stderr.puts(message) + $stderr.puts callstack.join("\n ") if debug + }, + + log: ->(message, callstack, deprecation_horizon, gem_name) { + logger = + if defined?(Rails.logger) && Rails.logger + Rails.logger + else + require "active_support/logger" + ActiveSupport::Logger.new($stderr) + end + logger.warn message + logger.debug callstack.join("\n ") if debug + }, + + notify: ->(message, callstack, deprecation_horizon, gem_name) { + notification_name = "deprecation.#{gem_name.underscore.tr('/', '_')}" + ActiveSupport::Notifications.instrument(notification_name, + message: message, + callstack: callstack, + gem_name: gem_name, + deprecation_horizon: deprecation_horizon) + }, + + silence: ->(message, callstack, deprecation_horizon, gem_name) { }, + } + + # Behavior module allows to determine how to display deprecation messages. + # You can create a custom behavior or set any from the +DEFAULT_BEHAVIORS+ + # constant. Available behaviors are: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to $stderr. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # For more information you can read the documentation of the +behavior=+ method. + module Behavior + # Whether to print a backtrace along with the warning. + attr_accessor :debug + + # Returns the current behavior or if one isn't set, defaults to +:stderr+. + def behavior + @behavior ||= [DEFAULT_BEHAVIORS[:stderr]] + end + + # Returns the current behavior for disallowed deprecations or if one isn't set, defaults to +:raise+. + def disallowed_behavior + @disallowed_behavior ||= [DEFAULT_BEHAVIORS[:raise]] + end + + # Sets the behavior to the specified value. Can be a single value, array, + # or an object that responds to +call+. + # + # Available behaviors: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to $stderr. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # Deprecation warnings raised by gems are not affected by this setting + # because they happen before Rails boots up. + # + # ActiveSupport::Deprecation.behavior = :stderr + # ActiveSupport::Deprecation.behavior = [:stderr, :log] + # ActiveSupport::Deprecation.behavior = MyCustomHandler + # ActiveSupport::Deprecation.behavior = ->(message, callstack, deprecation_horizon, gem_name) { + # # custom stuff + # } + def behavior=(behavior) + @behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + # Sets the behavior for disallowed deprecations (those configured by + # ActiveSupport::Deprecation.disallowed_warnings=) to the specified + # value. As with +behavior=+, this can be a single value, array, or an + # object that responds to +call+. + def disallowed_behavior=(behavior) + @disallowed_behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + private + def arity_coerce(behavior) + unless behavior.respond_to?(:call) + raise ArgumentError, "#{behavior.inspect} is not a valid deprecation behavior." + end + + if behavior.arity == 4 || behavior.arity == -1 + behavior + else + -> message, callstack, _, _ { behavior.call(message, callstack) } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/constant_accessor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/constant_accessor.rb new file mode 100644 index 0000000..1ed0015 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/constant_accessor.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + # DeprecatedConstantAccessor transforms a constant into a deprecated one by + # hooking +const_missing+. + # + # It takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. + # + # The deprecated constant now returns the same object as the new one rather + # than a proxy object, so it can be used transparently in +rescue+ blocks + # etc. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # include ActiveSupport::Deprecation::DeprecatedConstantAccessor + # deprecate_constant 'PLANETS', 'PLANETS_POST_2006' + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + module DeprecatedConstantAccessor + def self.included(base) + require "active_support/inflector/methods" + + extension = Module.new do + def const_missing(missing_const_name) + if class_variable_defined?(:@@_deprecated_constants) + if (replacement = class_variable_get(:@@_deprecated_constants)[missing_const_name.to_s]) + replacement[:deprecator].warn(replacement[:message] || "#{name}::#{missing_const_name} is deprecated! Use #{replacement[:new]} instead.", caller_locations) + return ActiveSupport::Inflector.constantize(replacement[:new].to_s) + end + end + super + end + + def deprecate_constant(const_name, new_constant, message: nil, deprecator: ActiveSupport::Deprecation.instance) + class_variable_set(:@@_deprecated_constants, {}) unless class_variable_defined?(:@@_deprecated_constants) + class_variable_get(:@@_deprecated_constants)[const_name.to_s] = { new: new_constant, message: message, deprecator: deprecator } + end + end + base.singleton_class.prepend extension + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/disallowed.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/disallowed.rb new file mode 100644 index 0000000..096ecaa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/disallowed.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + module Disallowed + # Sets the criteria used to identify deprecation messages which should be + # disallowed. Can be an array containing strings, symbols, or regular + # expressions. (Symbols are treated as strings). These are compared against + # the text of the generated deprecation warning. + # + # Additionally the scalar symbol +:all+ may be used to treat all + # deprecations as disallowed. + # + # Deprecations matching a substring or regular expression will be handled + # using the configured +ActiveSupport::Deprecation.disallowed_behavior+ + # rather than +ActiveSupport::Deprecation.behavior+ + attr_writer :disallowed_warnings + + # Returns the configured criteria used to identify deprecation messages + # which should be treated as disallowed. + def disallowed_warnings + @disallowed_warnings ||= [] + end + + private + def deprecation_disallowed?(message) + disallowed = ActiveSupport::Deprecation.disallowed_warnings + return false if explicitly_allowed?(message) + return true if disallowed == :all + disallowed.any? do |rule| + case rule + when String, Symbol + message.include?(rule.to_s) + when Regexp + rule.match?(message) + end + end + end + + def explicitly_allowed?(message) + allowances = @explicitly_allowed_warnings.value + return false unless allowances + return true if allowances == :all + allowances = [allowances] unless allowances.kind_of?(Array) + allowances.any? do |rule| + case rule + when String, Symbol + message.include?(rule.to_s) + when Regexp + rule.match?(message) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/instance_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/instance_delegator.rb new file mode 100644 index 0000000..59dd30a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/instance_delegator.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class Deprecation + module InstanceDelegator # :nodoc: + def self.included(base) + base.extend(ClassMethods) + base.singleton_class.prepend(OverrideDelegators) + base.public_class_method :new + end + + module ClassMethods # :nodoc: + def include(included_module) + included_module.instance_methods.each { |m| method_added(m) } + super + end + + def method_added(method_name) + singleton_class.delegate(method_name, to: :instance) + end + end + + module OverrideDelegators # :nodoc: + def warn(message = nil, callstack = nil) + callstack ||= caller_locations(2) + super + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/method_wrappers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/method_wrappers.rb new file mode 100644 index 0000000..e6cf28a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/method_wrappers.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/module/redefine_method" + +module ActiveSupport + class Deprecation + module MethodWrapper + # Declare that a method has been deprecated. + # + # class Fred + # def aaa; end + # def bbb; end + # def ccc; end + # def ddd; end + # def eee; end + # end + # + # Using the default deprecator: + # ActiveSupport::Deprecation.deprecate_methods(Fred, :aaa, bbb: :zzz, ccc: 'use Bar#ccc instead') + # # => Fred + # + # Fred.new.aaa + # # DEPRECATION WARNING: aaa is deprecated and will be removed from Rails 5.1. (called from irb_binding at (irb):10) + # # => nil + # + # Fred.new.bbb + # # DEPRECATION WARNING: bbb is deprecated and will be removed from Rails 5.1 (use zzz instead). (called from irb_binding at (irb):11) + # # => nil + # + # Fred.new.ccc + # # DEPRECATION WARNING: ccc is deprecated and will be removed from Rails 5.1 (use Bar#ccc instead). (called from irb_binding at (irb):12) + # # => nil + # + # Passing in a custom deprecator: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # ActiveSupport::Deprecation.deprecate_methods(Fred, ddd: :zzz, deprecator: custom_deprecator) + # # => [:ddd] + # + # Fred.new.ddd + # DEPRECATION WARNING: ddd is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):15) + # # => nil + # + # Using a custom deprecator directly: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # custom_deprecator.deprecate_methods(Fred, eee: :zzz) + # # => [:eee] + # + # Fred.new.eee + # DEPRECATION WARNING: eee is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):18) + # # => nil + def deprecate_methods(target_module, *method_names) + options = method_names.extract_options! + deprecator = options.delete(:deprecator) || self + method_names += options.keys + mod = nil + + method_names.each do |method_name| + message = options[method_name] + if target_module.method_defined?(method_name) || target_module.private_method_defined?(method_name) + method = target_module.instance_method(method_name) + target_module.module_eval do + redefine_method(method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, message) + method.bind(self).call(*args, &block) + end + ruby2_keywords(method_name) if respond_to?(:ruby2_keywords, true) + end + else + mod ||= Module.new + mod.module_eval do + define_method(method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, message) + super(*args, &block) + end + ruby2_keywords(method_name) if respond_to?(:ruby2_keywords, true) + end + end + end + + target_module.prepend(mod) if mod + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/proxy_wrappers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/proxy_wrappers.rb new file mode 100644 index 0000000..4bc112d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/proxy_wrappers.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + class DeprecationProxy #:nodoc: + def self.new(*args, &block) + object = args.first + + return object unless object + super + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + private + def method_missing(called, *args, &block) + warn caller_locations, called, args + target.__send__(called, *args, &block) + end + end + + # DeprecatedObjectProxy transforms an object into a deprecated one. It + # takes an object, a deprecation message and optionally a deprecator. The + # deprecator defaults to +ActiveSupport::Deprecator+ if none is specified. + # + # deprecated_object = ActiveSupport::Deprecation::DeprecatedObjectProxy.new(Object.new, "This object is now deprecated") + # # => # + # + # deprecated_object.to_s + # DEPRECATION WARNING: This object is now deprecated. + # (Backtrace) + # # => "#" + class DeprecatedObjectProxy < DeprecationProxy + def initialize(object, message, deprecator = ActiveSupport::Deprecation.instance) + @object = object + @message = message + @deprecator = deprecator + end + + private + def target + @object + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + + # DeprecatedInstanceVariableProxy transforms an instance variable into a + # deprecated one. It takes an instance of a class, a method on that class + # and an instance variable. It optionally takes a deprecator as the last + # argument. The deprecator defaults to +ActiveSupport::Deprecator+ if none + # is specified. + # + # class Example + # def initialize + # @request = ActiveSupport::Deprecation::DeprecatedInstanceVariableProxy.new(self, :request, :@request) + # @_request = :special_request + # end + # + # def request + # @_request + # end + # + # def old_request + # @request + # end + # end + # + # example = Example.new + # # => # + # + # example.old_request.to_s + # # => DEPRECATION WARNING: @request is deprecated! Call request.to_s instead of + # @request.to_s + # (Backtrace information…) + # "special_request" + # + # example.request.to_s + # # => "special_request" + class DeprecatedInstanceVariableProxy < DeprecationProxy + def initialize(instance, method, var = "@#{method}", deprecator = ActiveSupport::Deprecation.instance) + @instance = instance + @method = method + @var = var + @deprecator = deprecator + end + + private + def target + @instance.__send__(@method) + end + + def warn(callstack, called, args) + @deprecator.warn("#{@var} is deprecated! Call #{@method}.#{called} instead of #{@var}.#{called}. Args: #{args.inspect}", callstack) + end + end + + # DeprecatedConstantProxy transforms a constant into a deprecated one. It + # takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. The deprecated constant + # now returns the value of the new one. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace information…) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + class DeprecatedConstantProxy < Module + def self.new(*args, **options, &block) + object = args.first + + return object unless object + super + end + + def initialize(old_const, new_const, deprecator = ActiveSupport::Deprecation.instance, message: "#{old_const} is deprecated! Use #{new_const} instead.") + Kernel.require "active_support/inflector/methods" + + @old_const = old_const + @new_const = new_const + @deprecator = deprecator + @message = message + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + # Don't give a deprecation warning on methods that IRB may invoke + # during tab-completion. + delegate :hash, :instance_methods, :name, :respond_to?, to: :target + + # Returns the class of the new constant. + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # PLANETS.class # => Array + def class + target.class + end + + private + def target + ActiveSupport::Inflector.constantize(@new_const.to_s) + end + + def const_missing(name) + @deprecator.warn(@message, caller_locations) + target.const_get(name) + end + + def method_missing(called, *args, &block) + @deprecator.warn(@message, caller_locations) + target.__send__(called, *args, &block) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/reporting.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/reporting.rb new file mode 100644 index 0000000..51514eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/deprecation/reporting.rb @@ -0,0 +1,157 @@ +# frozen_string_literal: true + +require "rbconfig" + +module ActiveSupport + class Deprecation + module Reporting + # Whether to print a message (silent mode) + attr_writer :silenced + # Name of gem where method is deprecated + attr_accessor :gem_name + + # Outputs a deprecation warning to the output configured by + # ActiveSupport::Deprecation.behavior. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + def warn(message = nil, callstack = nil) + return if silenced + + callstack ||= caller_locations(2) + deprecation_message(callstack, message).tap do |m| + if deprecation_disallowed?(message) + disallowed_behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + else + behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + end + end + end + + # Silence deprecation warnings within the block. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + # + # ActiveSupport::Deprecation.silence do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + def silence(&block) + @silenced_thread.bind(true, &block) + end + + # Allow previously disallowed deprecation warnings within the block. + # allowed_warnings can be an array containing strings, symbols, or regular + # expressions. (Symbols are treated as strings). These are compared against + # the text of deprecation warning messages generated within the block. + # Matching warnings will be exempt from the rules set by + # +ActiveSupport::Deprecation.disallowed_warnings+ + # + # The optional if: argument accepts a truthy/falsy value or an object that + # responds to .call. If truthy, then matching warnings will be allowed. + # If falsey then the method yields to the block without allowing the warning. + # + # ActiveSupport::Deprecation.disallowed_behavior = :raise + # ActiveSupport::Deprecation.disallowed_warnings = [ + # "something broke" + # ] + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => ActiveSupport::DeprecationException + # + # ActiveSupport::Deprecation.allow ['something broke'] do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + # + # ActiveSupport::Deprecation.allow ['something broke'], if: Rails.env.production? do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => ActiveSupport::DeprecationException for dev/test, nil for production + def allow(allowed_warnings = :all, if: true, &block) + conditional = binding.local_variable_get(:if) + conditional = conditional.call if conditional.respond_to?(:call) + if conditional + @explicitly_allowed_warnings.bind(allowed_warnings, &block) + else + yield + end + end + + def silenced + @silenced || @silenced_thread.value + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + deprecated_method_warning(deprecated_method_name, message).tap do |msg| + warn(msg, caller_backtrace) + end + end + + private + # Outputs a deprecation warning message + # + # deprecated_method_warning(:method_name) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon}" + # deprecated_method_warning(:method_name, :another_method) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (use another_method instead)" + # deprecated_method_warning(:method_name, "Optional message") + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (Optional message)" + def deprecated_method_warning(method_name, message = nil) + warning = "#{method_name} is deprecated and will be removed from #{gem_name} #{deprecation_horizon}" + case message + when Symbol then "#{warning} (use #{message} instead)" + when String then "#{warning} (#{message})" + else warning + end + end + + def deprecation_message(callstack, message = nil) + message ||= "You are using deprecated behavior which will be removed from the next major or minor release." + "DEPRECATION WARNING: #{message} #{deprecation_caller_message(callstack)}" + end + + def deprecation_caller_message(callstack) + file, line, method = extract_callstack(callstack) + if file + if line && method + "(called from #{method} at #{file}:#{line})" + else + "(called from #{file}:#{line})" + end + end + end + + def extract_callstack(callstack) + return _extract_callstack(callstack) if callstack.first.is_a? String + + offending_line = callstack.find { |frame| + frame.absolute_path && !ignored_callstack(frame.absolute_path) + } || callstack.first + + [offending_line.path, offending_line.lineno, offending_line.label] + end + + def _extract_callstack(callstack) + warn "Please pass `caller_locations` to the deprecation API" if $VERBOSE + offending_line = callstack.find { |line| !ignored_callstack(line) } || callstack.first + + if offending_line + if md = offending_line.match(/^(.+?):(\d+)(?::in `(.*?)')?/) + md.captures + else + offending_line + end + end + end + + RAILS_GEM_ROOT = File.expand_path("../../../..", __dir__) + "/" + + def ignored_callstack(path) + path.start_with?(RAILS_GEM_ROOT) || path.start_with?(RbConfig::CONFIG["rubylibdir"]) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/descendants_tracker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/descendants_tracker.rb new file mode 100644 index 0000000..2362914 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/descendants_tracker.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +require "weakref" + +module ActiveSupport + # This module provides an internal implementation to track descendants + # which is faster than iterating through ObjectSpace. + module DescendantsTracker + @@direct_descendants = {} + + class << self + def direct_descendants(klass) + descendants = @@direct_descendants[klass] + descendants ? descendants.to_a : [] + end + alias_method :subclasses, :direct_descendants + + def descendants(klass) + arr = [] + accumulate_descendants(klass, arr) + arr + end + + def clear + if defined? ActiveSupport::Dependencies + @@direct_descendants.each do |klass, descendants| + if Dependencies.autoloaded?(klass) + @@direct_descendants.delete(klass) + else + descendants.reject! { |v| Dependencies.autoloaded?(v) } + end + end + else + @@direct_descendants.clear + end + end + + # This is the only method that is not thread safe, but is only ever called + # during the eager loading phase. + def store_inherited(klass, descendant) + (@@direct_descendants[klass] ||= DescendantsArray.new) << descendant + end + + private + def accumulate_descendants(klass, acc) + if direct_descendants = @@direct_descendants[klass] + direct_descendants.each do |direct_descendant| + acc << direct_descendant + accumulate_descendants(direct_descendant, acc) + end + end + end + end + + def inherited(base) + DescendantsTracker.store_inherited(self, base) + super + end + + def direct_descendants + DescendantsTracker.direct_descendants(self) + end + alias_method :subclasses, :direct_descendants + + def descendants + DescendantsTracker.descendants(self) + end + + # DescendantsArray is an array that contains weak references to classes. + class DescendantsArray # :nodoc: + include Enumerable + + def initialize + @refs = [] + end + + def initialize_copy(orig) + @refs = @refs.dup + end + + def <<(klass) + @refs << WeakRef.new(klass) + end + + def each + @refs.reject! do |ref| + yield ref.__getobj__ + false + rescue WeakRef::RefError + true + end + self + end + + def refs_size + @refs.size + end + + def cleanup! + @refs.delete_if { |ref| !ref.weakref_alive? } + end + + def reject! + @refs.reject! do |ref| + yield ref.__getobj__ + rescue WeakRef::RefError + true + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/digest.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/digest.rb new file mode 100644 index 0000000..fba10fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/digest.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + class Digest #:nodoc: + class <(other) + if Scalar === other || Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + else + nil + end + end + + def +(other) + if Duration === other + seconds = value + other.parts.fetch(:seconds, 0) + new_parts = other.parts.merge(seconds: seconds) + new_value = value + other.value + + Duration.new(new_value, new_parts) + else + calculate(:+, other) + end + end + + def -(other) + if Duration === other + seconds = value - other.parts.fetch(:seconds, 0) + new_parts = other.parts.transform_values(&:-@) + new_parts = new_parts.merge(seconds: seconds) + new_value = value - other.value + + Duration.new(new_value, new_parts) + else + calculate(:-, other) + end + end + + def *(other) + if Duration === other + new_parts = other.parts.transform_values { |other_value| value * other_value } + new_value = value * other.value + + Duration.new(new_value, new_parts) + else + calculate(:*, other) + end + end + + def /(other) + if Duration === other + value / other.value + else + calculate(:/, other) + end + end + + def %(other) + if Duration === other + Duration.build(value % other.value) + else + calculate(:%, other) + end + end + + private + def calculate(op, other) + if Scalar === other + Scalar.new(value.public_send(op, other.value)) + elsif Numeric === other + Scalar.new(value.public_send(op, other)) + else + raise_type_error(other) + end + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end + + SECONDS_PER_MINUTE = 60 + SECONDS_PER_HOUR = 3600 + SECONDS_PER_DAY = 86400 + SECONDS_PER_WEEK = 604800 + SECONDS_PER_MONTH = 2629746 # 1/12 of a gregorian year + SECONDS_PER_YEAR = 31556952 # length of a gregorian year (365.2425 days) + + PARTS_IN_SECONDS = { + seconds: 1, + minutes: SECONDS_PER_MINUTE, + hours: SECONDS_PER_HOUR, + days: SECONDS_PER_DAY, + weeks: SECONDS_PER_WEEK, + months: SECONDS_PER_MONTH, + years: SECONDS_PER_YEAR + }.freeze + + PARTS = [:years, :months, :weeks, :days, :hours, :minutes, :seconds].freeze + + attr_accessor :value, :parts + + autoload :ISO8601Parser, "active_support/duration/iso8601_parser" + autoload :ISO8601Serializer, "active_support/duration/iso8601_serializer" + + class << self + # Creates a new Duration from string formatted according to ISO 8601 Duration. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # This method allows negative parts to be present in pattern. + # If invalid string is provided, it will raise +ActiveSupport::Duration::ISO8601Parser::ParsingError+. + def parse(iso8601duration) + parts = ISO8601Parser.new(iso8601duration).parse! + new(calculate_total_seconds(parts), parts) + end + + def ===(other) #:nodoc: + other.is_a?(Duration) + rescue ::NoMethodError + false + end + + def seconds(value) #:nodoc: + new(value, seconds: value) + end + + def minutes(value) #:nodoc: + new(value * SECONDS_PER_MINUTE, minutes: value) + end + + def hours(value) #:nodoc: + new(value * SECONDS_PER_HOUR, hours: value) + end + + def days(value) #:nodoc: + new(value * SECONDS_PER_DAY, days: value) + end + + def weeks(value) #:nodoc: + new(value * SECONDS_PER_WEEK, weeks: value) + end + + def months(value) #:nodoc: + new(value * SECONDS_PER_MONTH, months: value) + end + + def years(value) #:nodoc: + new(value * SECONDS_PER_YEAR, years: value) + end + + # Creates a new Duration from a seconds value that is converted + # to the individual parts: + # + # ActiveSupport::Duration.build(31556952).parts # => {:years=>1} + # ActiveSupport::Duration.build(2716146).parts # => {:months=>1, :days=>1} + # + def build(value) + unless value.is_a?(::Numeric) + raise TypeError, "can't build an #{self.name} from a #{value.class.name}" + end + + parts = {} + remainder = value.round(9) + + PARTS.each do |part| + unless part == :seconds + part_in_seconds = PARTS_IN_SECONDS[part] + parts[part] = remainder.div(part_in_seconds) + remainder %= part_in_seconds + end + end unless value == 0 + + parts[:seconds] = remainder + + new(value, parts) + end + + private + def calculate_total_seconds(parts) + parts.inject(0) do |total, (part, value)| + total + value * PARTS_IN_SECONDS[part] + end + end + end + + def initialize(value, parts) #:nodoc: + @value, @parts = value, parts + @parts.reject! { |k, v| v.zero? } unless value == 0 + end + + def coerce(other) #:nodoc: + case other + when Scalar + [other, self] + when Duration + [Scalar.new(other.value), self] + else + [Scalar.new(other), self] + end + end + + # Compares one Duration with another or a Numeric to this Duration. + # Numeric values are treated as seconds. + def <=>(other) + if Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + end + end + + # Adds another Duration or a Numeric to this Duration. Numeric values + # are treated as seconds. + def +(other) + if Duration === other + parts = @parts.merge(other.parts) do |_key, value, other_value| + value + other_value + end + Duration.new(value + other.value, parts) + else + seconds = @parts.fetch(:seconds, 0) + other + Duration.new(value + other, @parts.merge(seconds: seconds)) + end + end + + # Subtracts another Duration or a Numeric from this Duration. Numeric + # values are treated as seconds. + def -(other) + self + (-other) + end + + # Multiplies this Duration by a Numeric and returns a new Duration. + def *(other) + if Scalar === other || Duration === other + Duration.new(value * other.value, parts.transform_values { |number| number * other.value }) + elsif Numeric === other + Duration.new(value * other, parts.transform_values { |number| number * other }) + else + raise_type_error(other) + end + end + + # Divides this Duration by a Numeric and returns a new Duration. + def /(other) + if Scalar === other + Duration.new(value / other.value, parts.transform_values { |number| number / other.value }) + elsif Duration === other + value / other.value + elsif Numeric === other + Duration.new(value / other, parts.transform_values { |number| number / other }) + else + raise_type_error(other) + end + end + + # Returns the modulo of this Duration by another Duration or Numeric. + # Numeric values are treated as seconds. + def %(other) + if Duration === other || Scalar === other + Duration.build(value % other.value) + elsif Numeric === other + Duration.build(value % other) + else + raise_type_error(other) + end + end + + def -@ #:nodoc: + Duration.new(-value, parts.transform_values(&:-@)) + end + + def +@ #:nodoc: + self + end + + def is_a?(klass) #:nodoc: + Duration == klass || value.is_a?(klass) + end + alias :kind_of? :is_a? + + def instance_of?(klass) # :nodoc: + Duration == klass || value.instance_of?(klass) + end + + # Returns +true+ if +other+ is also a Duration instance with the + # same +value+, or if other == value. + def ==(other) + if Duration === other + other.value == value + else + other == value + end + end + + # Returns the amount of seconds a duration covers as a string. + # For more information check to_i method. + # + # 1.day.to_s # => "86400" + def to_s + @value.to_s + end + + # Returns the number of seconds that this Duration represents. + # + # 1.minute.to_i # => 60 + # 1.hour.to_i # => 3600 + # 1.day.to_i # => 86400 + # + # Note that this conversion makes some assumptions about the + # duration of some periods, e.g. months are always 1/12 of year + # and years are 365.2425 days: + # + # # equivalent to (1.year / 12).to_i + # 1.month.to_i # => 2629746 + # + # # equivalent to 365.2425.days.to_i + # 1.year.to_i # => 31556952 + # + # In such cases, Ruby's core + # Date[https://ruby-doc.org/stdlib/libdoc/date/rdoc/Date.html] and + # Time[https://ruby-doc.org/stdlib/libdoc/time/rdoc/Time.html] should be used for precision + # date and time arithmetic. + def to_i + @value.to_i + end + alias :in_seconds :to_i + + # Returns the amount of minutes a duration covers as a float + # + # 1.day.in_minutes # => 1440.0 + def in_minutes + in_seconds / SECONDS_PER_MINUTE.to_f + end + + # Returns the amount of hours a duration covers as a float + # + # 1.day.in_hours # => 24.0 + def in_hours + in_seconds / SECONDS_PER_HOUR.to_f + end + + # Returns the amount of days a duration covers as a float + # + # 12.hours.in_days # => 0.5 + def in_days + in_seconds / SECONDS_PER_DAY.to_f + end + + # Returns the amount of weeks a duration covers as a float + # + # 2.months.in_weeks # => 8.696 + def in_weeks + in_seconds / SECONDS_PER_WEEK.to_f + end + + # Returns the amount of months a duration covers as a float + # + # 9.weeks.in_months # => 2.07 + def in_months + in_seconds / SECONDS_PER_MONTH.to_f + end + + # Returns the amount of years a duration covers as a float + # + # 30.days.in_years # => 0.082 + def in_years + in_seconds / SECONDS_PER_YEAR.to_f + end + + # Returns +true+ if +other+ is also a Duration instance, which has the + # same parts as this one. + def eql?(other) + Duration === other && other.value.eql?(value) + end + + def hash + @value.hash + end + + # Calculates a new Time or Date that is as far in the future + # as this Duration represents. + def since(time = ::Time.current) + sum(1, time) + end + alias :from_now :since + alias :after :since + + # Calculates a new Time or Date that is as far in the past + # as this Duration represents. + def ago(time = ::Time.current) + sum(-1, time) + end + alias :until :ago + alias :before :ago + + def inspect #:nodoc: + return "#{value} seconds" if parts.empty? + + parts. + sort_by { |unit, _ | PARTS.index(unit) }. + map { |unit, val| "#{val} #{val == 1 ? unit.to_s.chop : unit.to_s}" }. + to_sentence(locale: ::I18n.default_locale) + end + + def as_json(options = nil) #:nodoc: + to_i + end + + def init_with(coder) #:nodoc: + initialize(coder["value"], coder["parts"]) + end + + def encode_with(coder) #:nodoc: + coder.map = { "value" => @value, "parts" => @parts } + end + + # Build ISO 8601 Duration string for this duration. + # The +precision+ parameter can be used to limit seconds' precision of duration. + def iso8601(precision: nil) + ISO8601Serializer.new(self, precision: precision).serialize + end + + private + def sum(sign, time = ::Time.current) + unless time.acts_like?(:time) || time.acts_like?(:date) + raise ::ArgumentError, "expected a time or date, got #{time.inspect}" + end + + if parts.empty? + time.since(sign * value) + else + parts.inject(time) do |t, (type, number)| + if type == :seconds + t.since(sign * number) + elsif type == :minutes + t.since(sign * number * 60) + elsif type == :hours + t.since(sign * number * 3600) + else + t.advance(type => sign * number) + end + end + end + end + + def respond_to_missing?(method, _) + value.respond_to?(method) + end + + def method_missing(method, *args, &block) + value.public_send(method, *args, &block) + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_parser.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_parser.rb new file mode 100644 index 0000000..83f3b28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_parser.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +require "strscan" + +module ActiveSupport + class Duration + # Parses a string formatted according to ISO 8601 Duration into the hash. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # + # This parser allows negative parts to be present in pattern. + class ISO8601Parser # :nodoc: + class ParsingError < ::ArgumentError; end + + PERIOD_OR_COMMA = /\.|,/ + PERIOD = "." + COMMA = "," + + SIGN_MARKER = /\A\-|\+|/ + DATE_MARKER = /P/ + TIME_MARKER = /T/ + DATE_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(Y|M|D|W)/ + TIME_COMPONENT = /(\-?\d+(?:[.,]\d+)?)(H|M|S)/ + + DATE_TO_PART = { "Y" => :years, "M" => :months, "W" => :weeks, "D" => :days } + TIME_TO_PART = { "H" => :hours, "M" => :minutes, "S" => :seconds } + + DATE_COMPONENTS = [:years, :months, :days] + TIME_COMPONENTS = [:hours, :minutes, :seconds] + + attr_reader :parts, :scanner + attr_accessor :mode, :sign + + def initialize(string) + @scanner = StringScanner.new(string) + @parts = {} + @mode = :start + @sign = 1 + end + + def parse! + while !finished? + case mode + when :start + if scan(SIGN_MARKER) + self.sign = (scanner.matched == "-") ? -1 : 1 + self.mode = :sign + else + raise_parsing_error + end + + when :sign + if scan(DATE_MARKER) + self.mode = :date + else + raise_parsing_error + end + + when :date + if scan(TIME_MARKER) + self.mode = :time + elsif scan(DATE_COMPONENT) + parts[DATE_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + when :time + if scan(TIME_COMPONENT) + parts[TIME_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + end + end + + validate! + parts + end + + private + def finished? + scanner.eos? + end + + # Parses number which can be a float with either comma or period. + def number + PERIOD_OR_COMMA.match?(scanner[1]) ? scanner[1].tr(COMMA, PERIOD).to_f : scanner[1].to_i + end + + def scan(pattern) + scanner.scan(pattern) + end + + def raise_parsing_error(reason = nil) + raise ParsingError, "Invalid ISO 8601 duration: #{scanner.string.inspect} #{reason}".strip + end + + # Checks for various semantic errors as stated in ISO 8601 standard. + def validate! + raise_parsing_error("is empty duration") if parts.empty? + + # Mixing any of Y, M, D with W is invalid. + if parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + raise_parsing_error("mixing weeks with other date parts not allowed") + end + + # Specifying an empty T part is invalid. + if mode == :time && (parts.keys & TIME_COMPONENTS).empty? + raise_parsing_error("time part marker is present but time part is empty") + end + + fractions = parts.values.reject(&:zero?).select { |a| (a % 1) != 0 } + unless fractions.empty? || (fractions.size == 1 && fractions.last == @parts.values.reject(&:zero?).last) + raise_parsing_error "(only last part can be fractional)" + end + + true + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_serializer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_serializer.rb new file mode 100644 index 0000000..9629627 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/duration/iso8601_serializer.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + class Duration + # Serializes duration to string according to ISO 8601 Duration format. + class ISO8601Serializer # :nodoc: + DATE_COMPONENTS = %i(years months days) + + def initialize(duration, precision: nil) + @duration = duration + @precision = precision + end + + # Builds and returns output string. + def serialize + parts = normalize + return "PT0S" if parts.empty? + + output = +"P" + output << "#{parts[:years]}Y" if parts.key?(:years) + output << "#{parts[:months]}M" if parts.key?(:months) + output << "#{parts[:days]}D" if parts.key?(:days) + output << "#{parts[:weeks]}W" if parts.key?(:weeks) + time = +"" + time << "#{parts[:hours]}H" if parts.key?(:hours) + time << "#{parts[:minutes]}M" if parts.key?(:minutes) + if parts.key?(:seconds) + time << "#{sprintf(@precision ? "%0.0#{@precision}f" : '%g', parts[:seconds])}S" + end + output << "T#{time}" unless time.empty? + output + end + + private + # Return pair of duration's parts and whole duration sign. + # Parts are summarized (as they can become repetitive due to addition, etc). + # Zero parts are removed as not significant. + # If all parts are negative it will negate all of them and return minus as a sign. + def normalize + parts = @duration.parts.each_with_object(Hash.new(0)) do |(k, v), p| + p[k] += v unless v.zero? + end + + # Convert weeks to days and remove weeks if mixed with date parts + if week_mixed_with_date?(parts) + parts[:days] += parts.delete(:weeks) * SECONDS_PER_WEEK / SECONDS_PER_DAY + end + + parts + end + + def week_mixed_with_date?(parts) + parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_configuration.rb new file mode 100644 index 0000000..cc1d026 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_configuration.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "yaml" +require "active_support/encrypted_file" +require "active_support/ordered_options" +require "active_support/core_ext/object/inclusion" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class EncryptedConfiguration < EncryptedFile + delegate :[], :fetch, to: :config + delegate_missing_to :options + + def initialize(config_path:, key_path:, env_key:, raise_if_missing_key:) + super content_path: config_path, key_path: key_path, + env_key: env_key, raise_if_missing_key: raise_if_missing_key + end + + # Allow a config to be started without a file present + def read + super + rescue ActiveSupport::EncryptedFile::MissingContentError + "" + end + + def write(contents) + deserialize(contents) + + super + end + + def config + @config ||= deserialize(read).deep_symbolize_keys + end + + private + def options + @options ||= ActiveSupport::InheritableOptions.new(config) + end + + def deserialize(config) + YAML.load(config).presence || {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_file.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_file.rb new file mode 100644 index 0000000..a35cc54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/encrypted_file.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true + +require "pathname" +require "tmpdir" +require "active_support/message_encryptor" + +module ActiveSupport + class EncryptedFile + class MissingContentError < RuntimeError + def initialize(content_path) + super "Missing encrypted content file in #{content_path}." + end + end + + class MissingKeyError < RuntimeError + def initialize(key_path:, env_key:) + super \ + "Missing encryption key to decrypt file with. " + + "Ask your team for your master key and write it to #{key_path} or put it in the ENV['#{env_key}']." + end + end + + class InvalidKeyLengthError < RuntimeError + def initialize + super "Encryption key must be exactly #{EncryptedFile.expected_key_length} characters." + end + end + + CIPHER = "aes-128-gcm" + + def self.generate_key + SecureRandom.hex(ActiveSupport::MessageEncryptor.key_len(CIPHER)) + end + + def self.expected_key_length # :nodoc: + @expected_key_length ||= generate_key.length + end + + + attr_reader :content_path, :key_path, :env_key, :raise_if_missing_key + + def initialize(content_path:, key_path:, env_key:, raise_if_missing_key:) + @content_path = Pathname.new(content_path).yield_self { |path| path.symlink? ? path.realpath : path } + @key_path = Pathname.new(key_path) + @env_key, @raise_if_missing_key = env_key, raise_if_missing_key + end + + def key + read_env_key || read_key_file || handle_missing_key + end + + def read + if !key.nil? && content_path.exist? + decrypt content_path.binread + else + raise MissingContentError, content_path + end + end + + def write(contents) + IO.binwrite "#{content_path}.tmp", encrypt(contents) + FileUtils.mv "#{content_path}.tmp", content_path + end + + def change(&block) + writing read, &block + end + + + private + def writing(contents) + tmp_file = "#{Process.pid}.#{content_path.basename.to_s.chomp('.enc')}" + tmp_path = Pathname.new File.join(Dir.tmpdir, tmp_file) + tmp_path.binwrite contents + + yield tmp_path + + updated_contents = tmp_path.binread + + write(updated_contents) if updated_contents != contents + ensure + FileUtils.rm(tmp_path) if tmp_path&.exist? + end + + + def encrypt(contents) + check_key_length + encryptor.encrypt_and_sign contents + end + + def decrypt(contents) + encryptor.decrypt_and_verify contents + end + + def encryptor + @encryptor ||= ActiveSupport::MessageEncryptor.new([ key ].pack("H*"), cipher: CIPHER) + end + + + def read_env_key + ENV[env_key] + end + + def read_key_file + return @key_file_contents if defined?(@key_file_contents) + @key_file_contents = (key_path.binread.strip if key_path.exist?) + end + + def handle_missing_key + raise MissingKeyError.new(key_path: key_path, env_key: env_key) if raise_if_missing_key + end + + def check_key_length + raise InvalidKeyLengthError if key&.length != self.class.expected_key_length + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/environment_inquirer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/environment_inquirer.rb new file mode 100644 index 0000000..05361d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/environment_inquirer.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" + +module ActiveSupport + class EnvironmentInquirer < StringInquirer #:nodoc: + DEFAULT_ENVIRONMENTS = ["development", "test", "production"] + def initialize(env) + super(env) + + DEFAULT_ENVIRONMENTS.each do |default| + instance_variable_set :"@#{default}", env == default + end + end + + DEFAULT_ENVIRONMENTS.each do |env| + class_eval "def #{env}?; @#{env}; end" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/evented_file_update_checker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/evented_file_update_checker.rb new file mode 100644 index 0000000..f9bc3be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/evented_file_update_checker.rb @@ -0,0 +1,170 @@ +# frozen_string_literal: true + +require "set" +require "pathname" +require "concurrent/atomic/atomic_boolean" +require "listen" +require "active_support/fork_tracker" + +module ActiveSupport + # Allows you to "listen" to changes in a file system. + # The evented file updater does not hit disk when checking for updates + # instead it uses platform specific file system events to trigger a change + # in state. + # + # The file checker takes an array of files to watch or a hash specifying directories + # and file extensions to watch. It also takes a block that is called when + # EventedFileUpdateChecker#execute is run or when EventedFileUpdateChecker#execute_if_updated + # is run and there have been changes to the file system. + # + # Note: Forking will cause the first call to `updated?` to return `true`. + # + # Example: + # + # checker = ActiveSupport::EventedFileUpdateChecker.new(["/tmp/foo"]) { puts "changed" } + # checker.updated? + # # => false + # checker.execute_if_updated + # # => nil + # + # FileUtils.touch("/tmp/foo") + # + # checker.updated? + # # => true + # checker.execute_if_updated + # # => "changed" + # + class EventedFileUpdateChecker #:nodoc: all + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize an EventedFileUpdateChecker" + end + + @block = block + @core = Core.new(files, dirs) + ObjectSpace.define_finalizer(self, @core.finalizer) + end + + def updated? + if @core.restart? + @core.thread_safely(&:restart) + @core.updated.make_true + end + + @core.updated.true? + end + + def execute + @core.updated.make_false + @block.call + end + + def execute_if_updated + if updated? + yield if block_given? + execute + true + end + end + + class Core + attr_reader :updated + + def initialize(files, dirs) + @files = files.map { |file| Pathname(file).expand_path }.to_set + + @dirs = dirs.each_with_object({}) do |(dir, exts), hash| + hash[Pathname(dir).expand_path] = Array(exts).map { |ext| ext.to_s.sub(/\A\.?/, ".") }.to_set + end + + @common_path = common_path(@dirs.keys) + + @dtw = directories_to_watch + @missing = [] + + @updated = Concurrent::AtomicBoolean.new(false) + @mutex = Mutex.new + + start + @after_fork = ActiveSupport::ForkTracker.after_fork { start } + end + + def finalizer + proc do + stop + ActiveSupport::ForkTracker.unregister(@after_fork) + end + end + + def thread_safely + @mutex.synchronize do + yield self + end + end + + def start + normalize_dirs! + @dtw, @missing = [*@dtw, *@missing].partition(&:exist?) + @listener = @dtw.any? ? Listen.to(*@dtw, &method(:changed)) : nil + @listener&.start + end + + def stop + @listener&.stop + end + + def restart + stop + start + end + + def restart? + @missing.any?(&:exist?) + end + + def normalize_dirs! + @dirs.transform_keys! do |dir| + dir.exist? ? dir.realpath : dir + end + end + + def changed(modified, added, removed) + unless @updated.true? + @updated.make_true if (modified + added + removed).any? { |f| watching?(f) } + end + end + + def watching?(file) + file = Pathname(file) + + if @files.member?(file) + true + elsif file.directory? + false + else + ext = file.extname + + file.dirname.ascend do |dir| + matching = @dirs[dir] + + if matching && (matching.empty? || matching.include?(ext)) + break true + elsif dir == @common_path || dir.root? + break false + end + end + end + end + + def directories_to_watch + dtw = @dirs.keys | @files.map(&:dirname) + accounted_for = dtw.to_set + Gem.path.map { |path| Pathname(path) } + dtw.reject { |dir| dir.ascend.drop(1).any? { |parent| accounted_for.include?(parent) } } + end + + def common_path(paths) + paths.map { |path| path.ascend.to_a }.reduce(&:&)&.first + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/execution_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/execution_wrapper.rb new file mode 100644 index 0000000..07c4f43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/execution_wrapper.rb @@ -0,0 +1,132 @@ +# frozen_string_literal: true + +require "active_support/callbacks" +require "concurrent/hash" + +module ActiveSupport + class ExecutionWrapper + include ActiveSupport::Callbacks + + Null = Object.new # :nodoc: + def Null.complete! # :nodoc: + end + + define_callbacks :run + define_callbacks :complete + + def self.to_run(*args, &block) + set_callback(:run, *args, &block) + end + + def self.to_complete(*args, &block) + set_callback(:complete, *args, &block) + end + + RunHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + hook_state[hook] = hook.run + end + end + + CompleteHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + if hook_state.key?(hook) + hook.complete hook_state[hook] + end + end + alias after before + end + + # Register an object to be invoked during both the +run+ and + # +complete+ steps. + # + # +hook.complete+ will be passed the value returned from +hook.run+, + # and will only be invoked if +run+ has previously been called. + # (Mostly, this means it won't be invoked if an exception occurs in + # a preceding +to_run+ block; all ordinary +to_complete+ blocks are + # invoked in that situation.) + def self.register_hook(hook, outer: false) + if outer + to_run RunHook.new(hook), prepend: true + to_complete :after, CompleteHook.new(hook) + else + to_run RunHook.new(hook) + to_complete CompleteHook.new(hook) + end + end + + # Run this execution. + # + # Returns an instance, whose +complete!+ method *must* be invoked + # after the work has been performed. + # + # Where possible, prefer +wrap+. + def self.run!(reset: false) + if reset + lost_instance = active.delete(Thread.current) + lost_instance&.complete! + else + return Null if active? + end + + new.tap do |instance| + success = nil + begin + instance.run! + success = true + ensure + instance.complete! unless success + end + end + end + + # Perform the work in the supplied block as an execution. + def self.wrap + return yield if active? + + instance = run! + begin + yield + ensure + instance.complete! + end + end + + class << self # :nodoc: + attr_accessor :active + end + + def self.inherited(other) # :nodoc: + super + other.active = Concurrent::Hash.new + end + + self.active = Concurrent::Hash.new + + def self.active? # :nodoc: + @active.key?(Thread.current) + end + + def run! # :nodoc: + self.class.active[Thread.current] = self + run_callbacks(:run) + end + + # Complete this in-flight execution. This method *must* be called + # exactly once on the result of any call to +run!+. + # + # Where possible, prefer +wrap+. + def complete! + run_callbacks(:complete) + ensure + self.class.active.delete Thread.current + end + + private + def hook_state + @_hook_state ||= {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/executor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/executor.rb new file mode 100644 index 0000000..ce391b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/executor.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + class Executor < ExecutionWrapper + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/file_update_checker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/file_update_checker.rb new file mode 100644 index 0000000..9b665e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/file_update_checker.rb @@ -0,0 +1,162 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/calculations" + +module ActiveSupport + # FileUpdateChecker specifies the API used by Rails to watch files + # and control reloading. The API depends on four methods: + # + # * +initialize+ which expects two parameters and one block as + # described below. + # + # * +updated?+ which returns a boolean if there were updates in + # the filesystem or not. + # + # * +execute+ which executes the given block on initialization + # and updates the latest watched files and timestamp. + # + # * +execute_if_updated+ which just executes the block if it was updated. + # + # After initialization, a call to +execute_if_updated+ must execute + # the block only if there was really a change in the filesystem. + # + # This class is used by Rails to reload the I18n framework whenever + # they are changed upon a new request. + # + # i18n_reloader = ActiveSupport::FileUpdateChecker.new(paths) do + # I18n.reload! + # end + # + # ActiveSupport::Reloader.to_prepare do + # i18n_reloader.execute_if_updated + # end + class FileUpdateChecker + # It accepts two parameters on initialization. The first is an array + # of files and the second is an optional hash of directories. The hash must + # have directories as keys and the value is an array of extensions to be + # watched under that directory. + # + # This method must also receive a block that will be called once a path + # changes. The array of files and list of directories cannot be changed + # after FileUpdateChecker has been initialized. + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize a FileUpdateChecker" + end + + @files = files.freeze + @glob = compile_glob(dirs) + @block = block + + @watched = nil + @updated_at = nil + + @last_watched = watched + @last_update_at = updated_at(@last_watched) + end + + # Check if any of the entries were updated. If so, the watched and/or + # updated_at values are cached until the block is executed via +execute+ + # or +execute_if_updated+. + def updated? + current_watched = watched + if @last_watched.size != current_watched.size + @watched = current_watched + true + else + current_updated_at = updated_at(current_watched) + if @last_update_at < current_updated_at + @watched = current_watched + @updated_at = current_updated_at + true + else + false + end + end + end + + # Executes the given block and updates the latest watched files and + # timestamp. + def execute + @last_watched = watched + @last_update_at = updated_at(@last_watched) + @block.call + ensure + @watched = nil + @updated_at = nil + end + + # Execute the block given if updated. + def execute_if_updated + if updated? + yield if block_given? + execute + true + else + false + end + end + + private + def watched + @watched || begin + all = @files.select { |f| File.exist?(f) } + all.concat(Dir[@glob]) if @glob + all + end + end + + def updated_at(paths) + @updated_at || max_mtime(paths) || Time.at(0) + end + + # This method returns the maximum mtime of the files in +paths+, or +nil+ + # if the array is empty. + # + # Files with a mtime in the future are ignored. Such abnormal situation + # can happen for example if the user changes the clock by hand. It is + # healthy to consider this edge case because with mtimes in the future + # reloading is not triggered. + def max_mtime(paths) + time_now = Time.now + max_mtime = nil + + # Time comparisons are performed with #compare_without_coercion because + # AS redefines these operators in a way that is much slower and does not + # bring any benefit in this particular code. + # + # Read t1.compare_without_coercion(t2) < 0 as t1 < t2. + paths.each do |path| + mtime = File.mtime(path) + + next if time_now.compare_without_coercion(mtime) < 0 + + if max_mtime.nil? || max_mtime.compare_without_coercion(mtime) < 0 + max_mtime = mtime + end + end + + max_mtime + end + + def compile_glob(hash) + hash.freeze # Freeze so changes aren't accidentally pushed + return if hash.empty? + + globs = hash.map do |key, value| + "#{escape(key)}/**/*#{compile_ext(value)}" + end + "{#{globs.join(",")}}" + end + + def escape(key) + key.gsub(",", '\,') + end + + def compile_ext(array) + array = Array(array) + return if array.empty? + ".{#{array.join(",")}}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/fork_tracker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/fork_tracker.rb new file mode 100644 index 0000000..2f25037 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/fork_tracker.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module ActiveSupport + module ForkTracker # :nodoc: + module CoreExt + def fork(*) + if block_given? + super do + ForkTracker.check! + yield + end + else + unless pid = super + ForkTracker.check! + end + pid + end + end + ruby2_keywords(:fork) if respond_to?(:ruby2_keywords, true) + end + + module CoreExtPrivate + include CoreExt + + private + def fork(*) + super + end + ruby2_keywords(:fork) if respond_to?(:ruby2_keywords, true) + end + + @pid = Process.pid + @callbacks = [] + + class << self + def check! + if @pid != Process.pid + @callbacks.each(&:call) + @pid = Process.pid + end + end + + def hook! + if Process.respond_to?(:fork) + ::Object.prepend(CoreExtPrivate) + ::Kernel.prepend(CoreExtPrivate) + ::Kernel.singleton_class.prepend(CoreExt) + ::Process.singleton_class.prepend(CoreExt) + end + end + + def after_fork(&block) + @callbacks << block + block + end + + def unregister(callback) + @callbacks.delete(callback) + end + end + end +end + +ActiveSupport::ForkTracker.hook! diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gem_version.rb new file mode 100644 index 0000000..80ef04e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gem_version.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module ActiveSupport + # Returns the version of the currently loaded Active Support as a Gem::Version. + def self.gem_version + Gem::Version.new VERSION::STRING + end + + module VERSION + MAJOR = 6 + MINOR = 1 + TINY = 4 + PRE = "6" + + STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".") + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gzip.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gzip.rb new file mode 100644 index 0000000..7ffa6d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/gzip.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "zlib" +require "stringio" + +module ActiveSupport + # A convenient wrapper for the zlib standard library that allows + # compression/decompression of strings with gzip. + # + # gzip = ActiveSupport::Gzip.compress('compress me!') + # # => "\x1F\x8B\b\x00o\x8D\xCDO\x00\x03K\xCE\xCF-(J-.V\xC8MU\x04\x00R>n\x83\f\x00\x00\x00" + # + # ActiveSupport::Gzip.decompress(gzip) + # # => "compress me!" + module Gzip + class Stream < StringIO + def initialize(*) + super + set_encoding "BINARY" + end + def close; rewind; end + end + + # Decompresses a gzipped string. + def self.decompress(source) + Zlib::GzipReader.wrap(StringIO.new(source), &:read) + end + + # Compresses a string using gzip. + def self.compress(source, level = Zlib::DEFAULT_COMPRESSION, strategy = Zlib::DEFAULT_STRATEGY) + output = Stream.new + gz = Zlib::GzipWriter.new(output, level, strategy) + gz.write(source) + gz.close + output.string + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/hash_with_indifferent_access.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/hash_with_indifferent_access.rb new file mode 100644 index 0000000..4e574cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/hash_with_indifferent_access.rb @@ -0,0 +1,423 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" + +module ActiveSupport + # Implements a hash where keys :foo and "foo" are considered + # to be the same. + # + # rgb = ActiveSupport::HashWithIndifferentAccess.new + # + # rgb[:black] = '#000000' + # rgb[:black] # => '#000000' + # rgb['black'] # => '#000000' + # + # rgb['white'] = '#FFFFFF' + # rgb[:white] # => '#FFFFFF' + # rgb['white'] # => '#FFFFFF' + # + # Internally symbols are mapped to strings when used as keys in the entire + # writing interface (calling []=, merge, etc). This + # mapping belongs to the public interface. For example, given: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # + # You are guaranteed that the key is returned as a string: + # + # hash.keys # => ["a"] + # + # Technically other types of keys are accepted: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # hash[0] = 0 + # hash # => {"a"=>1, 0=>0} + # + # but this class is intended for use cases where strings or symbols are the + # expected keys and it is convenient to understand both as the same. For + # example the +params+ hash in Ruby on Rails. + # + # Note that core extensions define Hash#with_indifferent_access: + # + # rgb = { black: '#000000', white: '#FFFFFF' }.with_indifferent_access + # + # which may be handy. + # + # To access this class outside of Rails, require the core extension with: + # + # require "active_support/core_ext/hash/indifferent_access" + # + # which will, in turn, require this file. + class HashWithIndifferentAccess < Hash + # Returns +true+ so that Array#extract_options! finds members of + # this class. + def extractable_options? + true + end + + def with_indifferent_access + dup + end + + def nested_under_indifferent_access + self + end + + def initialize(constructor = {}) + if constructor.respond_to?(:to_hash) + super() + update(constructor) + + hash = constructor.is_a?(Hash) ? constructor : constructor.to_hash + self.default = hash.default if hash.default + self.default_proc = hash.default_proc if hash.default_proc + else + super(constructor) + end + end + + def self.[](*args) + new.merge!(Hash[*args]) + end + + alias_method :regular_writer, :[]= unless method_defined?(:regular_writer) + alias_method :regular_update, :update unless method_defined?(:regular_update) + + # Assigns a new value to the hash: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:key] = 'value' + # + # This value can be later fetched using either +:key+ or 'key'. + def []=(key, value) + regular_writer(convert_key(key), convert_value(value, conversion: :assignment)) + end + + alias_method :store, :[]= + + # Updates the receiver in-place, merging in the hashes passed as arguments: + # + # hash_1 = ActiveSupport::HashWithIndifferentAccess.new + # hash_1[:key] = 'value' + # + # hash_2 = ActiveSupport::HashWithIndifferentAccess.new + # hash_2[:key] = 'New Value!' + # + # hash_1.update(hash_2) # => {"key"=>"New Value!"} + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash.update({ "a" => 1 }, { "b" => 2 }) # => { "a" => 1, "b" => 2 } + # + # The arguments can be either an + # ActiveSupport::HashWithIndifferentAccess or a regular +Hash+. + # In either case the merge respects the semantics of indifferent access. + # + # If the argument is a regular hash with keys +:key+ and "key" only one + # of the values end up in the receiver, but which one is unspecified. + # + # When given a block, the value for duplicated keys will be determined + # by the result of invoking the block with the duplicated key, the value + # in the receiver, and the value in +other_hash+. The rules for duplicated + # keys follow the semantics of indifferent access: + # + # hash_1[:key] = 10 + # hash_2['key'] = 12 + # hash_1.update(hash_2) { |key, old, new| old + new } # => {"key"=>22} + def update(*other_hashes, &block) + if other_hashes.size == 1 + update_with_single_argument(other_hashes.first, block) + else + other_hashes.each do |other_hash| + update_with_single_argument(other_hash, block) + end + end + self + end + + alias_method :merge!, :update + + # Checks the hash for a key matching the argument passed in: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['key'] = 'value' + # hash.key?(:key) # => true + # hash.key?('key') # => true + def key?(key) + super(convert_key(key)) + end + + alias_method :include?, :key? + alias_method :has_key?, :key? + alias_method :member?, :key? + + # Same as Hash#[] where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters['foo'] # => 1 + # counters[:foo] # => 1 + # counters[:zoo] # => nil + def [](key) + super(convert_key(key)) + end + + # Same as Hash#assoc where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.assoc('foo') # => ["foo", 1] + # counters.assoc(:foo) # => ["foo", 1] + # counters.assoc(:zoo) # => nil + def assoc(key) + super(convert_key(key)) + end + + # Same as Hash#fetch where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.fetch('foo') # => 1 + # counters.fetch(:bar, 0) # => 0 + # counters.fetch(:bar) { |key| 0 } # => 0 + # counters.fetch(:zoo) # => KeyError: key not found: "zoo" + def fetch(key, *extras) + super(convert_key(key), *extras) + end + + # Same as Hash#dig where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = { bar: 1 } + # + # counters.dig('foo', 'bar') # => 1 + # counters.dig(:foo, :bar) # => 1 + # counters.dig(:zoo) # => nil + def dig(*args) + args[0] = convert_key(args[0]) if args.size > 0 + super(*args) + end + + # Same as Hash#default where the key passed as argument can be + # either a string or a symbol: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(1) + # hash.default # => 1 + # + # hash = ActiveSupport::HashWithIndifferentAccess.new { |hash, key| key } + # hash.default # => nil + # hash.default('foo') # => 'foo' + # hash.default(:foo) # => 'foo' + def default(*args) + super(*args.map { |arg| convert_key(arg) }) + end + + # Returns an array of the values at the specified indices: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.values_at('a', 'b') # => ["x", "y"] + def values_at(*keys) + super(*keys.map { |key| convert_key(key) }) + end + + # Returns an array of the values at the specified indices, but also + # raises an exception when one of the keys can't be found. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.fetch_values('a', 'b') # => ["x", "y"] + # hash.fetch_values('a', 'c') { |key| 'z' } # => ["x", "z"] + # hash.fetch_values('a', 'c') # => KeyError: key not found: "c" + def fetch_values(*indices, &block) + super(*indices.map { |key| convert_key(key) }, &block) + end + + # Returns a shallow copy of the hash. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new({ a: { b: 'b' } }) + # dup = hash.dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => "c" + # dup[:a][:c] # => "c" + def dup + self.class.new(self).tap do |new_hash| + set_defaults(new_hash) + end + end + + # This method has the same semantics of +update+, except it does not + # modify the receiver but rather returns a new hash with indifferent + # access with the result of the merge. + def merge(*hashes, &block) + dup.update(*hashes, &block) + end + + # Like +merge+ but the other way around: Merges the receiver into the + # argument and returns a new hash with indifferent access as result: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['a'] = nil + # hash.reverse_merge(a: 0, b: 1) # => {"a"=>nil, "b"=>1} + def reverse_merge(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults, :reverse_merge + + # Same semantics as +reverse_merge+ but modifies the receiver in-place. + def reverse_merge!(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults!, :reverse_merge! + + # Replaces the contents of this hash with other_hash. + # + # h = { "a" => 100, "b" => 200 } + # h.replace({ "c" => 300, "d" => 400 }) # => {"c"=>300, "d"=>400} + def replace(other_hash) + super(self.class.new(other_hash)) + end + + # Removes the specified key from the hash. + def delete(key) + super(convert_key(key)) + end + + # Returns a hash with indifferent access that includes everything except given keys. + # hash = { a: "x", b: "y", c: 10 }.with_indifferent_access + # hash.except(:a, "b") # => {c: 10}.with_indifferent_access + # hash # => { a: "x", b: "y", c: 10 }.with_indifferent_access + def except(*keys) + slice(*self.keys - keys.map { |key| convert_key(key) }) + end + alias_method :without, :except + + def stringify_keys!; self end + def deep_stringify_keys!; self end + def stringify_keys; dup end + def deep_stringify_keys; dup end + undef :symbolize_keys! + undef :deep_symbolize_keys! + def symbolize_keys; to_hash.symbolize_keys! end + alias_method :to_options, :symbolize_keys + def deep_symbolize_keys; to_hash.deep_symbolize_keys! end + def to_options!; self end + + def select(*args, &block) + return to_enum(:select) unless block_given? + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + return to_enum(:reject) unless block_given? + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def transform_values(*args, &block) + return to_enum(:transform_values) unless block_given? + dup.tap { |hash| hash.transform_values!(*args, &block) } + end + + def transform_keys(*args, &block) + return to_enum(:transform_keys) unless block_given? + dup.tap { |hash| hash.transform_keys!(*args, &block) } + end + + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end + + def slice(*keys) + keys.map! { |key| convert_key(key) } + self.class.new(super) + end + + def slice!(*keys) + keys.map! { |key| convert_key(key) } + super + end + + def compact + dup.tap(&:compact!) + end + + # Convert to a regular hash with string keys. + def to_hash + _new_hash = Hash.new + set_defaults(_new_hash) + + each do |key, value| + _new_hash[key] = convert_value(value, conversion: :to_hash) + end + _new_hash + end + + private + if Symbol.method_defined?(:name) + def convert_key(key) + key.kind_of?(Symbol) ? key.name : key + end + else + def convert_key(key) + key.kind_of?(Symbol) ? key.to_s : key + end + end + + def convert_value(value, conversion: nil) + if value.is_a? Hash + if conversion == :to_hash + value.to_hash + else + value.nested_under_indifferent_access + end + elsif value.is_a?(Array) + if conversion != :assignment || value.frozen? + value = value.dup + end + value.map! { |e| convert_value(e, conversion: conversion) } + else + value + end + end + + def set_defaults(target) + if default_proc + target.default_proc = default_proc.dup + else + target.default = default + end + end + + def update_with_single_argument(other_hash, block) + if other_hash.is_a? HashWithIndifferentAccess + regular_update(other_hash, &block) + else + other_hash.to_hash.each_pair do |key, value| + if block && key?(key) + value = block.call(convert_key(key), self[key], value) + end + regular_writer(convert_key(key), convert_value(value)) + end + end + end + end +end + +# :stopdoc: + +HashWithIndifferentAccess = ActiveSupport::HashWithIndifferentAccess diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n.rb new file mode 100644 index 0000000..39dab1c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +begin + require "i18n" +rescue LoadError => e + $stderr.puts "The i18n gem is not available. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/lazy_load_hooks" + +ActiveSupport.run_load_hooks(:i18n) +I18n.load_path << File.expand_path("locale/en.yml", __dir__) +I18n.load_path << File.expand_path("locale/en.rb", __dir__) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n_railtie.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n_railtie.rb new file mode 100644 index 0000000..094f65a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/i18n_railtie.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/core_ext/array/wrap" + +# :enddoc: + +module I18n + class Railtie < Rails::Railtie + config.i18n = ActiveSupport::OrderedOptions.new + config.i18n.railties_load_path = [] + config.i18n.load_path = [] + config.i18n.fallbacks = ActiveSupport::OrderedOptions.new + + config.eager_load_namespaces << I18n + + # Set the i18n configuration after initialization since a lot of + # configuration is still usually done in application initializers. + config.after_initialize do |app| + I18n::Railtie.initialize_i18n(app) + end + + # Trigger i18n config before any eager loading has happened + # so it's ready if any classes require it when eager loaded. + config.before_eager_load do |app| + I18n::Railtie.initialize_i18n(app) + end + + @i18n_inited = false + + # Setup i18n configuration. + def self.initialize_i18n(app) + return if @i18n_inited + + fallbacks = app.config.i18n.delete(:fallbacks) + + # Avoid issues with setting the default_locale by disabling available locales + # check while configuring. + enforce_available_locales = app.config.i18n.delete(:enforce_available_locales) + enforce_available_locales = I18n.enforce_available_locales if enforce_available_locales.nil? + I18n.enforce_available_locales = false + + reloadable_paths = [] + app.config.i18n.each do |setting, value| + case setting + when :railties_load_path + reloadable_paths = value + app.config.i18n.load_path.unshift(*value.flat_map(&:existent)) + when :load_path + I18n.load_path += value + when :raise_on_missing_translations + forward_raise_on_missing_translations_config(app) + else + I18n.public_send("#{setting}=", value) + end + end + + init_fallbacks(fallbacks) if fallbacks && validate_fallbacks(fallbacks) + + # Restore available locales check so it will take place from now on. + I18n.enforce_available_locales = enforce_available_locales + + directories = watched_dirs_with_extensions(reloadable_paths) + reloader = app.config.file_watcher.new(I18n.load_path.dup, directories) do + I18n.load_path.keep_if { |p| File.exist?(p) } + I18n.load_path |= reloadable_paths.flat_map(&:existent) + end + + app.reloaders << reloader + app.reloader.to_run do + reloader.execute_if_updated { require_unload_lock! } + end + reloader.execute + + @i18n_inited = true + end + + def self.forward_raise_on_missing_translations_config(app) + ActiveSupport.on_load(:action_view) do + self.raise_on_missing_translations = app.config.i18n.raise_on_missing_translations + end + + ActiveSupport.on_load(:action_controller) do + AbstractController::Translation.raise_on_missing_translations = app.config.i18n.raise_on_missing_translations + end + end + + def self.include_fallbacks_module + I18n.backend.class.include(I18n::Backend::Fallbacks) + end + + def self.init_fallbacks(fallbacks) + include_fallbacks_module + + args = \ + case fallbacks + when ActiveSupport::OrderedOptions + [*(fallbacks[:defaults] || []) << fallbacks[:map]].compact + when Hash, Array + Array.wrap(fallbacks) + else # TrueClass + [I18n.default_locale] + end + + I18n.fallbacks = I18n::Locale::Fallbacks.new(*args) + end + + def self.validate_fallbacks(fallbacks) + case fallbacks + when ActiveSupport::OrderedOptions + !fallbacks.empty? + when TrueClass, Array, Hash + true + else + raise "Unexpected fallback type #{fallbacks.inspect}" + end + end + + def self.watched_dirs_with_extensions(paths) + paths.each_with_object({}) do |path, result| + result[path.absolute_current] = path.extensions + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflections.rb new file mode 100644 index 0000000..baf1cb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflections.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/inflections" + +#-- +# Defines the standard inflection rules. These are the starting point for +# new projects and are not considered complete. The current set of inflection +# rules is frozen. This means, we do not change them to become more complete. +# This is a safety measure to keep existing applications from breaking. +#++ +module ActiveSupport + Inflector.inflections(:en) do |inflect| + inflect.plural(/$/, "s") + inflect.plural(/s$/i, "s") + inflect.plural(/^(ax|test)is$/i, '\1es') + inflect.plural(/(octop|vir)us$/i, '\1i') + inflect.plural(/(octop|vir)i$/i, '\1i') + inflect.plural(/(alias|status)$/i, '\1es') + inflect.plural(/(bu)s$/i, '\1ses') + inflect.plural(/(buffal|tomat)o$/i, '\1oes') + inflect.plural(/([ti])um$/i, '\1a') + inflect.plural(/([ti])a$/i, '\1a') + inflect.plural(/sis$/i, "ses") + inflect.plural(/(?:([^f])fe|([lr])f)$/i, '\1\2ves') + inflect.plural(/(hive)$/i, '\1s') + inflect.plural(/([^aeiouy]|qu)y$/i, '\1ies') + inflect.plural(/(x|ch|ss|sh)$/i, '\1es') + inflect.plural(/(matr|vert|ind)(?:ix|ex)$/i, '\1ices') + inflect.plural(/^(m|l)ouse$/i, '\1ice') + inflect.plural(/^(m|l)ice$/i, '\1ice') + inflect.plural(/^(ox)$/i, '\1en') + inflect.plural(/^(oxen)$/i, '\1') + inflect.plural(/(quiz)$/i, '\1zes') + + inflect.singular(/s$/i, "") + inflect.singular(/(ss)$/i, '\1') + inflect.singular(/(n)ews$/i, '\1ews') + inflect.singular(/([ti])a$/i, '\1um') + inflect.singular(/((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$/i, '\1sis') + inflect.singular(/(^analy)(sis|ses)$/i, '\1sis') + inflect.singular(/([^f])ves$/i, '\1fe') + inflect.singular(/(hive)s$/i, '\1') + inflect.singular(/(tive)s$/i, '\1') + inflect.singular(/([lr])ves$/i, '\1f') + inflect.singular(/([^aeiouy]|qu)ies$/i, '\1y') + inflect.singular(/(s)eries$/i, '\1eries') + inflect.singular(/(m)ovies$/i, '\1ovie') + inflect.singular(/(x|ch|ss|sh)es$/i, '\1') + inflect.singular(/^(m|l)ice$/i, '\1ouse') + inflect.singular(/(bus)(es)?$/i, '\1') + inflect.singular(/(o)es$/i, '\1') + inflect.singular(/(shoe)s$/i, '\1') + inflect.singular(/(cris|test)(is|es)$/i, '\1is') + inflect.singular(/^(a)x[ie]s$/i, '\1xis') + inflect.singular(/(octop|vir)(us|i)$/i, '\1us') + inflect.singular(/(alias|status)(es)?$/i, '\1') + inflect.singular(/^(ox)en/i, '\1') + inflect.singular(/(vert|ind)ices$/i, '\1ex') + inflect.singular(/(matr)ices$/i, '\1ix') + inflect.singular(/(quiz)zes$/i, '\1') + inflect.singular(/(database)s$/i, '\1') + + inflect.irregular("person", "people") + inflect.irregular("man", "men") + inflect.irregular("child", "children") + inflect.irregular("sex", "sexes") + inflect.irregular("move", "moves") + inflect.irregular("zombie", "zombies") + + inflect.uncountable(%w(equipment information rice money species series fish sheep jeans police)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector.rb new file mode 100644 index 0000000..d77f04c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +# in case active_support/inflector is required without the rest of active_support +require "active_support/inflector/inflections" +require "active_support/inflector/transliterate" +require "active_support/inflector/methods" + +require "active_support/inflections" +require "active_support/core_ext/string/inflections" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/inflections.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/inflections.rb new file mode 100644 index 0000000..99228a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/inflections.rb @@ -0,0 +1,255 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "active_support/i18n" + +module ActiveSupport + module Inflector + extend self + + # A singleton instance of this class is yielded by Inflector.inflections, + # which can then be used to specify additional inflection rules. If passed + # an optional locale, rules for other languages can be specified. The + # default locale is :en. Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.plural /^(ox)$/i, '\1\2en' + # inflect.singular /^(ox)en/i, '\1' + # + # inflect.irregular 'octopus', 'octopi' + # + # inflect.uncountable 'equipment' + # end + # + # New rules are added at the top. So in the example above, the irregular + # rule for octopus will now be the first of the pluralization and + # singularization rules that is runs. This guarantees that your rules run + # before any of the rules that may already have been loaded. + class Inflections + @__instance__ = Concurrent::Map.new + + class Uncountables < Array + def initialize + @regex_array = [] + super + end + + def delete(entry) + super entry + @regex_array.delete(to_regex(entry)) + end + + def <<(*word) + add(word) + end + + def add(words) + words = words.flatten.map(&:downcase) + concat(words) + @regex_array += words.map { |word| to_regex(word) } + self + end + + def uncountable?(str) + @regex_array.any? { |regex| regex.match? str } + end + + private + def to_regex(string) + /\b#{::Regexp.escape(string)}\Z/i + end + end + + def self.instance(locale = :en) + @__instance__[locale] ||= new + end + + attr_reader :plurals, :singulars, :uncountables, :humans, :acronyms + + attr_reader :acronyms_camelize_regex, :acronyms_underscore_regex # :nodoc: + + def initialize + @plurals, @singulars, @uncountables, @humans, @acronyms = [], [], Uncountables.new, [], {} + define_acronym_regex_patterns + end + + # Private, for the test suite. + def initialize_dup(orig) # :nodoc: + %w(plurals singulars uncountables humans acronyms).each do |scope| + instance_variable_set("@#{scope}", orig.public_send(scope).dup) + end + define_acronym_regex_patterns + end + + # Specifies a new acronym. An acronym must be specified as it will appear + # in a camelized string. An underscore string that contains the acronym + # will retain the acronym when passed to +camelize+, +humanize+, or + # +titleize+. A camelized string that contains the acronym will maintain + # the acronym when titleized or humanized, and will convert the acronym + # into a non-delimited single lowercase word when passed to +underscore+. + # + # acronym 'HTML' + # titleize 'html' # => 'HTML' + # camelize 'html' # => 'HTML' + # underscore 'MyHTML' # => 'my_html' + # + # The acronym, however, must occur as a delimited unit and not be part of + # another word for conversions to recognize it: + # + # acronym 'HTTP' + # camelize 'my_http_delimited' # => 'MyHTTPDelimited' + # camelize 'https' # => 'Https', not 'HTTPs' + # underscore 'HTTPS' # => 'http_s', not 'https' + # + # acronym 'HTTPS' + # camelize 'https' # => 'HTTPS' + # underscore 'HTTPS' # => 'https' + # + # Note: Acronyms that are passed to +pluralize+ will no longer be + # recognized, since the acronym will not occur as a delimited unit in the + # pluralized result. To work around this, you must specify the pluralized + # form as an acronym as well: + # + # acronym 'API' + # camelize(pluralize('api')) # => 'Apis' + # + # acronym 'APIs' + # camelize(pluralize('api')) # => 'APIs' + # + # +acronym+ may be used to specify any word that contains an acronym or + # otherwise needs to maintain a non-standard capitalization. The only + # restriction is that the word must begin with a capital letter. + # + # acronym 'RESTful' + # underscore 'RESTful' # => 'restful' + # underscore 'RESTfulController' # => 'restful_controller' + # titleize 'RESTfulController' # => 'RESTful Controller' + # camelize 'restful' # => 'RESTful' + # camelize 'restful_controller' # => 'RESTfulController' + # + # acronym 'McDonald' + # underscore 'McDonald' # => 'mcdonald' + # camelize 'mcdonald' # => 'McDonald' + def acronym(word) + @acronyms[word.downcase] = word + define_acronym_regex_patterns + end + + # Specifies a new pluralization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def plural(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @plurals.prepend([rule, replacement]) + end + + # Specifies a new singularization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def singular(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @singulars.prepend([rule, replacement]) + end + + # Specifies a new irregular that applies to both pluralization and + # singularization at the same time. This can only be used for strings, not + # regular expressions. You simply pass the irregular in singular and + # plural form. + # + # irregular 'octopus', 'octopi' + # irregular 'person', 'people' + def irregular(singular, plural) + @uncountables.delete(singular) + @uncountables.delete(plural) + + s0 = singular[0] + srest = singular[1..-1] + + p0 = plural[0] + prest = plural[1..-1] + + if s0.upcase == p0.upcase + plural(/(#{s0})#{srest}$/i, '\1' + prest) + plural(/(#{p0})#{prest}$/i, '\1' + prest) + + singular(/(#{s0})#{srest}$/i, '\1' + srest) + singular(/(#{p0})#{prest}$/i, '\1' + srest) + else + plural(/#{s0.upcase}(?i)#{srest}$/, p0.upcase + prest) + plural(/#{s0.downcase}(?i)#{srest}$/, p0.downcase + prest) + plural(/#{p0.upcase}(?i)#{prest}$/, p0.upcase + prest) + plural(/#{p0.downcase}(?i)#{prest}$/, p0.downcase + prest) + + singular(/#{s0.upcase}(?i)#{srest}$/, s0.upcase + srest) + singular(/#{s0.downcase}(?i)#{srest}$/, s0.downcase + srest) + singular(/#{p0.upcase}(?i)#{prest}$/, s0.upcase + srest) + singular(/#{p0.downcase}(?i)#{prest}$/, s0.downcase + srest) + end + end + + # Specifies words that are uncountable and should not be inflected. + # + # uncountable 'money' + # uncountable 'money', 'information' + # uncountable %w( money information rice ) + def uncountable(*words) + @uncountables.add(words) + end + + # Specifies a humanized form of a string by a regular expression rule or + # by a string mapping. When using a regular expression based replacement, + # the normal humanize formatting is called after the replacement. When a + # string is used, the human form should be specified as desired (example: + # 'The name', not 'the_name'). + # + # human /_cnt$/i, '\1_count' + # human 'legacy_col_person_name', 'Name' + def human(rule, replacement) + @humans.prepend([rule, replacement]) + end + + # Clears the loaded inflections within a given scope (default is + # :all). Give the scope as a symbol of the inflection type, the + # options are: :plurals, :singulars, :uncountables, + # :humans. + # + # clear :all + # clear :plurals + def clear(scope = :all) + case scope + when :all + @plurals, @singulars, @uncountables, @humans = [], [], Uncountables.new, [] + else + instance_variable_set "@#{scope}", [] + end + end + + private + def define_acronym_regex_patterns + @acronym_regex = @acronyms.empty? ? /(?=a)b/ : /#{@acronyms.values.join("|")}/ + @acronyms_camelize_regex = /^(?:#{@acronym_regex}(?=\b|[A-Z_])|\w)/ + @acronyms_underscore_regex = /(?:(?<=([A-Za-z\d]))|\b)(#{@acronym_regex})(?=\b|[^a-z])/ + end + end + + # Yields a singleton instance of Inflector::Inflections so you can specify + # additional inflector rules. If passed an optional locale, rules for other + # languages can be specified. If not specified, defaults to :en. + # Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.uncountable 'rails' + # end + def inflections(locale = :en) + if block_given? + yield Inflections.instance(locale) + else + Inflections.instance(locale) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/methods.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/methods.rb new file mode 100644 index 0000000..ad13653 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/methods.rb @@ -0,0 +1,401 @@ +# frozen_string_literal: true + +require "active_support/inflections" +require "active_support/core_ext/object/blank" + +module ActiveSupport + # The Inflector transforms words from singular to plural, class names to table + # names, modularized class names to ones without, and class names to foreign + # keys. The default inflections for pluralization, singularization, and + # uncountable words are kept in inflections.rb. + # + # The Rails core team has stated patches for the inflections library will not + # be accepted in order to avoid breaking legacy applications which may be + # relying on errant inflections. If you discover an incorrect inflection and + # require it for your application or wish to define rules for languages other + # than English, please correct or add them yourself (explained below). + module Inflector + extend self + + # Returns the plural form of the word in the string. + # + # If passed an optional +locale+ parameter, the word will be + # pluralized using rules defined for that language. By default, + # this parameter is set to :en. + # + # pluralize('post') # => "posts" + # pluralize('octopus') # => "octopi" + # pluralize('sheep') # => "sheep" + # pluralize('words') # => "words" + # pluralize('CamelOctopus') # => "CamelOctopi" + # pluralize('ley', :es) # => "leyes" + def pluralize(word, locale = :en) + apply_inflections(word, inflections(locale).plurals, locale) + end + + # The reverse of #pluralize, returns the singular form of a word in a + # string. + # + # If passed an optional +locale+ parameter, the word will be + # singularized using rules defined for that language. By default, + # this parameter is set to :en. + # + # singularize('posts') # => "post" + # singularize('octopi') # => "octopus" + # singularize('sheep') # => "sheep" + # singularize('word') # => "word" + # singularize('CamelOctopi') # => "CamelOctopus" + # singularize('leyes', :es) # => "ley" + def singularize(word, locale = :en) + apply_inflections(word, inflections(locale).singulars, locale) + end + + # Converts strings to UpperCamelCase. + # If the +uppercase_first_letter+ parameter is set to false, then produces + # lowerCamelCase. + # + # Also converts '/' to '::' which is useful for converting + # paths to namespaces. + # + # camelize('active_model') # => "ActiveModel" + # camelize('active_model', false) # => "activeModel" + # camelize('active_model/errors') # => "ActiveModel::Errors" + # camelize('active_model/errors', false) # => "activeModel::Errors" + # + # As a rule of thumb you can think of +camelize+ as the inverse of + # #underscore, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def camelize(term, uppercase_first_letter = true) + string = term.to_s + if uppercase_first_letter + string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize } + else + string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase } + end + string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{inflections.acronyms[$2] || $2.capitalize}" } + string.gsub!("/", "::") + string + end + + # Makes an underscored, lowercase form from the expression in the string. + # + # Changes '::' to '/' to convert namespaces to paths. + # + # underscore('ActiveModel') # => "active_model" + # underscore('ActiveModel::Errors') # => "active_model/errors" + # + # As a rule of thumb you can think of +underscore+ as the inverse of + # #camelize, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def underscore(camel_cased_word) + return camel_cased_word unless /[A-Z-]|::/.match?(camel_cased_word) + word = camel_cased_word.to_s.gsub("::", "/") + word.gsub!(inflections.acronyms_underscore_regex) { "#{$1 && '_' }#{$2.downcase}" } + word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2') + word.gsub!(/([a-z\d])([A-Z])/, '\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # Tweaks an attribute name for display to end users. + # + # Specifically, performs these transformations: + # + # * Applies human inflection rules to the argument. + # * Deletes leading underscores, if any. + # * Removes a "_id" suffix if present. + # * Replaces underscores with spaces, if any. + # * Downcases all words except acronyms. + # * Capitalizes the first word. + # The capitalization of the first word can be turned off by setting the + # +:capitalize+ option to false (default is true). + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true (default is false). + # + # humanize('employee_salary') # => "Employee salary" + # humanize('author_id') # => "Author" + # humanize('author_id', capitalize: false) # => "author" + # humanize('_id') # => "Id" + # humanize('author_id', keep_id_suffix: true) # => "Author Id" + # + # If "SSL" was defined to be an acronym: + # + # humanize('ssl_error') # => "SSL error" + # + def humanize(lower_case_and_underscored_word, capitalize: true, keep_id_suffix: false) + result = lower_case_and_underscored_word.to_s.dup + + inflections.humans.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + + result.sub!(/\A_+/, "") + unless keep_id_suffix + result.delete_suffix!("_id") + end + result.tr!("_", " ") + + result.gsub!(/([a-z\d]*)/i) do |match| + "#{inflections.acronyms[match.downcase] || match.downcase}" + end + + if capitalize + result.sub!(/\A\w/) { |match| match.upcase } + end + + result + end + + # Converts just the first character to uppercase. + # + # upcase_first('what a Lovely Day') # => "What a Lovely Day" + # upcase_first('w') # => "W" + # upcase_first('') # => "" + def upcase_first(string) + string.length > 0 ? string[0].upcase.concat(string[1..-1]) : "" + end + + # Capitalizes all the words and replaces some characters in the string to + # create a nicer looking title. +titleize+ is meant for creating pretty + # output. It is not used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # titleize('man from the boondocks') # => "Man From The Boondocks" + # titleize('x-men: the last stand') # => "X Men: The Last Stand" + # titleize('TheManWithoutAPast') # => "The Man Without A Past" + # titleize('raiders_of_the_lost_ark') # => "Raiders Of The Lost Ark" + # titleize('string_ending_with_id', keep_id_suffix: true) # => "String Ending With Id" + def titleize(word, keep_id_suffix: false) + humanize(underscore(word), keep_id_suffix: keep_id_suffix).gsub(/\b(? "raw_scaled_scorers" + # tableize('ham_and_egg') # => "ham_and_eggs" + # tableize('fancyCategory') # => "fancy_categories" + def tableize(class_name) + pluralize(underscore(class_name)) + end + + # Creates a class name from a plural table name like Rails does for table + # names to models. Note that this returns a string and not a Class (To + # convert to an actual class follow +classify+ with #constantize). + # + # classify('ham_and_eggs') # => "HamAndEgg" + # classify('posts') # => "Post" + # + # Singular names are not handled correctly: + # + # classify('calculus') # => "Calculu" + def classify(table_name) + # strip out any leading schema name + camelize(singularize(table_name.to_s.sub(/.*\./, ""))) + end + + # Replaces underscores with dashes in the string. + # + # dasherize('puni_puni') # => "puni-puni" + def dasherize(underscored_word) + underscored_word.tr("_", "-") + end + + # Removes the module part from the expression in the string. + # + # demodulize('ActiveSupport::Inflector::Inflections') # => "Inflections" + # demodulize('Inflections') # => "Inflections" + # demodulize('::Inflections') # => "Inflections" + # demodulize('') # => "" + # + # See also #deconstantize. + def demodulize(path) + path = path.to_s + if i = path.rindex("::") + path[(i + 2)..-1] + else + path + end + end + + # Removes the rightmost segment from the constant expression in the string. + # + # deconstantize('Net::HTTP') # => "Net" + # deconstantize('::Net::HTTP') # => "::Net" + # deconstantize('String') # => "" + # deconstantize('::String') # => "" + # deconstantize('') # => "" + # + # See also #demodulize. + def deconstantize(path) + path.to_s[0, path.rindex("::") || 0] # implementation based on the one in facets' Module#spacename + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # foreign_key('Message') # => "message_id" + # foreign_key('Message', false) # => "messageid" + # foreign_key('Admin::Post') # => "post_id" + def foreign_key(class_name, separate_class_name_and_id_with_underscore = true) + underscore(demodulize(class_name)) + (separate_class_name_and_id_with_underscore ? "_id" : "id") + end + + # Tries to find a constant with the name specified in the argument string. + # + # constantize('Module') # => Module + # constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # constantize('C') # => 'outside', same as ::C + # end + # + # NameError is raised when the name is not in CamelCase or the constant is + # unknown. + def constantize(camel_cased_word) + if camel_cased_word.blank? || !camel_cased_word.include?("::") + Object.const_get(camel_cased_word) + else + names = camel_cased_word.split("::") + + # Trigger a built-in NameError exception including the ill-formed constant in the message. + Object.const_get(camel_cased_word) if names.empty? + + # Remove the first blank element in case of '::ClassName' notation. + names.shift if names.size > 1 && names.first.empty? + + names.inject(Object) do |constant, name| + if constant == Object + constant.const_get(name) + else + candidate = constant.const_get(name) + next candidate if constant.const_defined?(name, false) + next candidate unless Object.const_defined?(name) + + # Go down the ancestors to check if it is owned directly. The check + # stops when we reach Object or the end of ancestors tree. + constant = constant.ancestors.inject(constant) do |const, ancestor| + break const if ancestor == Object + break ancestor if ancestor.const_defined?(name, false) + const + end + + # owner is in Object, so raise + constant.const_get(name, false) + end + end + end + end + + # Tries to find a constant with the name specified in the argument string. + # + # safe_constantize('Module') # => Module + # safe_constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # safe_constantize('C') # => 'outside', same as ::C + # end + # + # +nil+ is returned when the name is not in CamelCase or the constant (or + # part of it) is unknown. + # + # safe_constantize('blargle') # => nil + # safe_constantize('UnknownModule') # => nil + # safe_constantize('UnknownModule::Foo::Bar') # => nil + def safe_constantize(camel_cased_word) + constantize(camel_cased_word) + rescue NameError => e + raise if e.name && !(camel_cased_word.to_s.split("::").include?(e.name.to_s) || + e.name.to_s == camel_cased_word.to_s) + rescue LoadError => e + message = e.respond_to?(:original_message) ? e.original_message : e.message + raise unless /Unable to autoload constant #{const_regexp(camel_cased_word)}/.match?(message) + end + + # Returns the suffix that should be added to a number to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinal(1) # => "st" + # ordinal(2) # => "nd" + # ordinal(1002) # => "nd" + # ordinal(1003) # => "rd" + # ordinal(-11) # => "th" + # ordinal(-1021) # => "st" + def ordinal(number) + I18n.translate("number.nth.ordinals", number: number) + end + + # Turns a number into an ordinal string used to denote the position in an + # ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinalize(1) # => "1st" + # ordinalize(2) # => "2nd" + # ordinalize(1002) # => "1002nd" + # ordinalize(1003) # => "1003rd" + # ordinalize(-11) # => "-11th" + # ordinalize(-1021) # => "-1021st" + def ordinalize(number) + I18n.translate("number.nth.ordinalized", number: number) + end + + private + # Mounts a regular expression, returned as a string to ease interpolation, + # that will match part by part the given constant. + # + # const_regexp("Foo::Bar::Baz") # => "Foo(::Bar(::Baz)?)?" + # const_regexp("::") # => "::" + def const_regexp(camel_cased_word) + parts = camel_cased_word.split("::") + + return Regexp.escape(camel_cased_word) if parts.blank? + + last = parts.pop + + parts.reverse!.inject(last) do |acc, part| + part.empty? ? acc : "#{part}(::#{acc})?" + end + end + + # Applies inflection rules for +singularize+ and +pluralize+. + # + # If passed an optional +locale+ parameter, the uncountables will be + # found for that locale. + # + # apply_inflections('post', inflections.plurals, :en) # => "posts" + # apply_inflections('posts', inflections.singulars, :en) # => "post" + def apply_inflections(word, rules, locale = :en) + result = word.to_s.dup + + if word.empty? || inflections(locale).uncountables.uncountable?(result) + result + else + rules.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/transliterate.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/transliterate.rb new file mode 100644 index 0000000..c398b25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/inflector/transliterate.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/multibyte" +require "active_support/i18n" + +module ActiveSupport + module Inflector + ALLOWED_ENCODINGS_FOR_TRANSLITERATE = [Encoding::UTF_8, Encoding::US_ASCII, Encoding::GB18030].freeze + + # Replaces non-ASCII characters with an ASCII approximation, or if none + # exists, a replacement character which defaults to "?". + # + # transliterate('Ærøskøbing') + # # => "AEroskobing" + # + # Default approximations are provided for Western/Latin characters, + # e.g, "ø", "ñ", "é", "ß", etc. + # + # This method is I18n aware, so you can set up custom approximations for a + # locale. This can be useful, for example, to transliterate German's "ü" + # and "ö" to "ue" and "oe", or to add support for transliterating Russian + # to ASCII. + # + # In order to make your custom transliterations available, you must set + # them as the i18n.transliterate.rule i18n key: + # + # # Store the transliterations in locales/de.yml + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # # Or set them using Ruby + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: { + # 'ü' => 'ue', + # 'ö' => 'oe' + # } + # } + # }) + # + # The value for i18n.transliterate.rule can be a simple Hash that + # maps characters to ASCII approximations as shown above, or, for more + # complex requirements, a Proc: + # + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: ->(string) { MyTransliterator.transliterate(string) } + # } + # }) + # + # Now you can have different transliterations for each locale: + # + # transliterate('Jürgen', locale: :en) + # # => "Jurgen" + # + # transliterate('Jürgen', locale: :de) + # # => "Juergen" + # + # Transliteration is restricted to UTF-8, US-ASCII and GB18030 strings + # Other encodings will raise an ArgumentError. + def transliterate(string, replacement = "?", locale: nil) + string = string.dup if string.frozen? + raise ArgumentError, "Can only transliterate strings. Received #{string.class.name}" unless string.is_a?(String) + raise ArgumentError, "Cannot transliterate strings with #{string.encoding} encoding" unless ALLOWED_ENCODINGS_FOR_TRANSLITERATE.include?(string.encoding) + + input_encoding = string.encoding + + # US-ASCII is a subset of UTF-8 so we'll force encoding as UTF-8 if + # US-ASCII is given. This way we can let tidy_bytes handle the string + # in the same way as we do for UTF-8 + string.force_encoding(Encoding::UTF_8) if string.encoding == Encoding::US_ASCII + + # GB18030 is Unicode compatible but is not a direct mapping so needs to be + # transcoded. Using invalid/undef :replace will result in loss of data in + # the event of invalid characters, but since tidy_bytes will replace + # invalid/undef with a "?" we're safe to do the same beforehand + string.encode!(Encoding::UTF_8, invalid: :replace, undef: :replace) if string.encoding == Encoding::GB18030 + + transliterated = I18n.transliterate( + ActiveSupport::Multibyte::Unicode.tidy_bytes(string).unicode_normalize(:nfc), + replacement: replacement, + locale: locale + ) + + # Restore the string encoding of the input if it was not UTF-8. + # Apply invalid/undef :replace as tidy_bytes does + transliterated.encode!(input_encoding, invalid: :replace, undef: :replace) if input_encoding != transliterated.encoding + + transliterated + end + + # Replaces special characters in a string so that it may be used as part of + # a 'pretty' URL. + # + # parameterize("Donald E. Knuth") # => "donald-e-knuth" + # parameterize("^très|Jolie-- ") # => "tres-jolie" + # + # To use a custom separator, override the +separator+ argument. + # + # parameterize("Donald E. Knuth", separator: '_') # => "donald_e_knuth" + # parameterize("^très|Jolie__ ", separator: '_') # => "tres_jolie" + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # parameterize("Donald E. Knuth", preserve_case: true) # => "Donald-E-Knuth" + # parameterize("^très|Jolie-- ", preserve_case: true) # => "tres-Jolie" + # + # It preserves dashes and underscores unless they are used as separators: + # + # parameterize("^très|Jolie__ ") # => "tres-jolie__" + # parameterize("^très|Jolie-- ", separator: "_") # => "tres_jolie--" + # parameterize("^très_Jolie-- ", separator: ".") # => "tres_jolie--" + # + # If the optional parameter +locale+ is specified, + # the word will be parameterized as a word of that language. + # By default, this parameter is set to nil and it will use + # the configured I18n.locale. + def parameterize(string, separator: "-", preserve_case: false, locale: nil) + # Replace accented chars with their ASCII equivalents. + parameterized_string = transliterate(string, locale: locale) + + # Turn unwanted chars into the separator. + parameterized_string.gsub!(/[^a-z0-9\-_]+/i, separator) + + unless separator.nil? || separator.empty? + if separator == "-" + re_duplicate_separator = /-{2,}/ + re_leading_trailing_separator = /^-|-$/i + else + re_sep = Regexp.escape(separator) + re_duplicate_separator = /#{re_sep}{2,}/ + re_leading_trailing_separator = /^#{re_sep}|#{re_sep}$/i + end + # No more than one of the separator in a row. + parameterized_string.gsub!(re_duplicate_separator, separator) + # Remove leading/trailing separator. + parameterized_string.gsub!(re_leading_trailing_separator, "") + end + + parameterized_string.downcase! unless preserve_case + parameterized_string + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json.rb new file mode 100644 index 0000000..d788717 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/json/decoding" +require "active_support/json/encoding" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/decoding.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/decoding.rb new file mode 100644 index 0000000..e40957e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/decoding.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/delegation" +require "json" + +module ActiveSupport + # Look for and parse json strings that look like ISO 8601 times. + mattr_accessor :parse_json_times + + module JSON + # matches YAML-formatted dates + DATE_REGEX = /\A\d{4}-\d{2}-\d{2}\z/ + DATETIME_REGEX = /\A(?:\d{4}-\d{2}-\d{2}|\d{4}-\d{1,2}-\d{1,2}[T \t]+\d{1,2}:\d{2}:\d{2}(\.[0-9]*)?(([ \t]*)Z|[-+]\d{2}?(:\d{2})?)?)\z/ + + class << self + # Parses a JSON string (JavaScript Object Notation) into a hash. + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.decode("{\"team\":\"rails\",\"players\":\"36\"}") + # => {"team" => "rails", "players" => "36"} + def decode(json) + data = ::JSON.parse(json, quirks_mode: true) + + if ActiveSupport.parse_json_times + convert_dates_from(data) + else + data + end + end + + # Returns the class of the error that will be raised when there is an + # error in decoding JSON. Using this method means you won't directly + # depend on the ActiveSupport's JSON implementation, in case it changes + # in the future. + # + # begin + # obj = ActiveSupport::JSON.decode(some_string) + # rescue ActiveSupport::JSON.parse_error + # Rails.logger.warn("Attempted to decode invalid JSON: #{some_string}") + # end + def parse_error + ::JSON::ParserError + end + + private + def convert_dates_from(data) + case data + when nil + nil + when DATE_REGEX + begin + Date.parse(data) + rescue ArgumentError + data + end + when DATETIME_REGEX + begin + Time.zone.parse(data) + rescue ArgumentError + data + end + when Array + data.map! { |d| convert_dates_from(d) } + when Hash + data.transform_values! do |value| + convert_dates_from(value) + end + else + data + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/encoding.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/encoding.rb new file mode 100644 index 0000000..04de632 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/json/encoding.rb @@ -0,0 +1,138 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/json" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class << self + delegate :use_standard_json_time_format, :use_standard_json_time_format=, + :time_precision, :time_precision=, + :escape_html_entities_in_json, :escape_html_entities_in_json=, + :json_encoder, :json_encoder=, + to: :'ActiveSupport::JSON::Encoding' + end + + module JSON + # Dumps objects in JSON (JavaScript Object Notation). + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.encode({ team: 'rails', players: '36' }) + # # => "{\"team\":\"rails\",\"players\":\"36\"}" + def self.encode(value, options = nil) + Encoding.json_encoder.new(options).encode(value) + end + + module Encoding #:nodoc: + class JSONGemEncoder #:nodoc: + attr_reader :options + + def initialize(options = nil) + @options = options || {} + end + + # Encode the given object into a JSON string + def encode(value) + stringify jsonify value.as_json(options.dup) + end + + private + # Rails does more escaping than the JSON gem natively does (we + # escape \u2028 and \u2029 and optionally >, <, & to work around + # certain browser problems). + ESCAPED_CHARS = { + "\u2028" => '\u2028', + "\u2029" => '\u2029', + ">" => '\u003e', + "<" => '\u003c', + "&" => '\u0026', + } + + ESCAPE_REGEX_WITH_HTML_ENTITIES = /[\u2028\u2029><&]/u + ESCAPE_REGEX_WITHOUT_HTML_ENTITIES = /[\u2028\u2029]/u + + # This class wraps all the strings we see and does the extra escaping + class EscapedString < String #:nodoc: + def to_json(*) + if Encoding.escape_html_entities_in_json + s = super + s.gsub! ESCAPE_REGEX_WITH_HTML_ENTITIES, ESCAPED_CHARS + s + else + s = super + s.gsub! ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, ESCAPED_CHARS + s + end + end + + def to_s + self + end + end + + # Mark these as private so we don't leak encoding-specific constructs + private_constant :ESCAPED_CHARS, :ESCAPE_REGEX_WITH_HTML_ENTITIES, + :ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, :EscapedString + + # Convert an object into a "JSON-ready" representation composed of + # primitives like Hash, Array, String, Numeric, + # and +true+/+false+/+nil+. + # Recursively calls #as_json to the object to recursively build a + # fully JSON-ready object. + # + # This allows developers to implement #as_json without having to + # worry about what base types of objects they are allowed to return + # or having to remember to call #as_json recursively. + # + # Note: the +options+ hash passed to +object.to_json+ is only passed + # to +object.as_json+, not any of this method's recursive +#as_json+ + # calls. + def jsonify(value) + case value + when String + EscapedString.new(value) + when Numeric, NilClass, TrueClass, FalseClass + value.as_json + when Hash + result = {} + value.each do |k, v| + result[jsonify(k)] = jsonify(v) + end + result + when Array + value.map { |v| jsonify(v) } + else + jsonify value.as_json + end + end + + # Encode a "jsonified" Ruby data structure using the JSON gem + def stringify(jsonified) + ::JSON.generate(jsonified, quirks_mode: true, max_nesting: false) + end + end + + class << self + # If true, use ISO 8601 format for dates and times. Otherwise, fall back + # to the Active Support legacy format. + attr_accessor :use_standard_json_time_format + + # If true, encode >, <, & as escaped unicode sequences (e.g. > as \u003e) + # as a safety measure. + attr_accessor :escape_html_entities_in_json + + # Sets the precision of encoded time values. + # Defaults to 3 (equivalent to millisecond precision) + attr_accessor :time_precision + + # Sets the encoder used by Rails to encode Ruby objects into JSON strings + # in +Object#to_json+ and +ActiveSupport::JSON.encode+. + attr_accessor :json_encoder + end + + self.use_standard_json_time_format = true + self.escape_html_entities_in_json = true + self.json_encoder = JSONGemEncoder + self.time_precision = 3 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/key_generator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/key_generator.rb new file mode 100644 index 0000000..21f7ab1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/key_generator.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "openssl" + +module ActiveSupport + # KeyGenerator is a simple wrapper around OpenSSL's implementation of PBKDF2. + # It can be used to derive a number of keys for various purposes from a given secret. + # This lets Rails applications have a single secure secret, but avoid reusing that + # key in multiple incompatible contexts. + class KeyGenerator + def initialize(secret, options = {}) + @secret = secret + # The default iterations are higher than required for our key derivation uses + # on the off chance someone uses this for password storage + @iterations = options[:iterations] || 2**16 + end + + # Returns a derived key suitable for use. The default key_size is chosen + # to be compatible with the default settings of ActiveSupport::MessageVerifier. + # i.e. OpenSSL::Digest::SHA1#block_length + def generate_key(salt, key_size = 64) + OpenSSL::PKCS5.pbkdf2_hmac_sha1(@secret, salt, @iterations, key_size) + end + end + + # CachingKeyGenerator is a wrapper around KeyGenerator which allows users to avoid + # re-executing the key generation process when it's called using the same salt and + # key_size. + class CachingKeyGenerator + def initialize(key_generator) + @key_generator = key_generator + @cache_keys = Concurrent::Map.new + end + + # Returns a derived key suitable for use. + def generate_key(*args) + @cache_keys[args.join("|")] ||= @key_generator.generate_key(*args) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/lazy_load_hooks.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/lazy_load_hooks.rb new file mode 100644 index 0000000..c6f7ccf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/lazy_load_hooks.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +module ActiveSupport + # lazy_load_hooks allows Rails to lazily load a lot of components and thus + # making the app boot faster. Because of this feature now there is no need to + # require ActiveRecord::Base at boot time purely to apply + # configuration. Instead a hook is registered that applies configuration once + # ActiveRecord::Base is loaded. Here ActiveRecord::Base is + # used as example but this feature can be applied elsewhere too. + # + # Here is an example where +on_load+ method is called to register a hook. + # + # initializer 'active_record.initialize_timezone' do + # ActiveSupport.on_load(:active_record) do + # self.time_zone_aware_attributes = true + # self.default_timezone = :utc + # end + # end + # + # When the entirety of +ActiveRecord::Base+ has been + # evaluated then +run_load_hooks+ is invoked. The very last line of + # +ActiveRecord::Base+ is: + # + # ActiveSupport.run_load_hooks(:active_record, ActiveRecord::Base) + module LazyLoadHooks + def self.extended(base) # :nodoc: + base.class_eval do + @load_hooks = Hash.new { |h, k| h[k] = [] } + @loaded = Hash.new { |h, k| h[k] = [] } + @run_once = Hash.new { |h, k| h[k] = [] } + end + end + + # Declares a block that will be executed when a Rails component is fully + # loaded. + # + # Options: + # + # * :yield - Yields the object that run_load_hooks to +block+. + # * :run_once - Given +block+ will run only once. + def on_load(name, options = {}, &block) + @loaded[name].each do |base| + execute_hook(name, base, options, block) + end + + @load_hooks[name] << [block, options] + end + + def run_load_hooks(name, base = Object) + @loaded[name] << base + @load_hooks[name].each do |hook, options| + execute_hook(name, base, options, hook) + end + end + + private + def with_execution_control(name, block, once) + unless @run_once[name].include?(block) + @run_once[name] << block if once + + yield + end + end + + def execute_hook(name, base, options, block) + with_execution_control(name, block, options[:run_once]) do + if options[:yield] + block.call(base) + else + if base.is_a?(Module) + base.class_eval(&block) + else + base.instance_eval(&block) + end + end + end + end + end + + extend LazyLoadHooks +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.rb new file mode 100644 index 0000000..29eb9de --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +{ + en: { + number: { + nth: { + ordinals: lambda do |_key, options| + number = options[:number] + case number + when 1; "st" + when 2; "nd" + when 3; "rd" + when 4, 5, 6, 7, 8, 9, 10, 11, 12, 13; "th" + else + num_modulo = number.to_i.abs % 100 + num_modulo %= 10 if num_modulo > 13 + case num_modulo + when 1; "st" + when 2; "nd" + when 3; "rd" + else "th" + end + end + end, + + ordinalized: lambda do |_key, options| + number = options[:number] + "#{number}#{ActiveSupport::Inflector.ordinal(number)}" + end + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.yml b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.yml new file mode 100644 index 0000000..725ec35 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/locale/en.yml @@ -0,0 +1,139 @@ +en: + date: + formats: + # Use the strftime parameters for formats. + # When no format has been given, it uses default. + # You can provide other formats here if you like! + default: "%Y-%m-%d" + short: "%b %d" + long: "%B %d, %Y" + + day_names: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday] + abbr_day_names: [Sun, Mon, Tue, Wed, Thu, Fri, Sat] + + # Don't forget the nil at the beginning; there's no such thing as a 0th month + month_names: [~, January, February, March, April, May, June, July, August, September, October, November, December] + abbr_month_names: [~, Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec] + # Used in date_select and datetime_select. + order: + - year + - month + - day + + time: + formats: + default: "%a, %d %b %Y %H:%M:%S %z" + short: "%d %b %H:%M" + long: "%B %d, %Y %H:%M" + am: "am" + pm: "pm" + +# Used in array.to_sentence. + support: + array: + words_connector: ", " + two_words_connector: " and " + last_word_connector: ", and " + number: + # Used in NumberHelper.number_to_delimited() + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: "." + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: "," + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3 + # Determine how rounding is performed (see BigDecimal::mode) + round_mode: default + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false + # If set, the zeros after the decimal separator will always be stripped (e.g.: 1.200 will be 1.2) + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_currency() + currency: + format: + # Where is the currency sign? %u is the currency unit, %n the number (default: $5.00) + format: "%u%n" + unit: "$" + # These six are to override number.format and are optional + separator: "." + delimiter: "," + precision: 2 + # round_mode: + significant: false + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_percentage() + percentage: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + format: "%n%" + + # Used in NumberHelper.number_to_rounded() + precision: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_human_size() and NumberHelper.number_to_human() + human: + format: + # These six are to override number.format and are optional + # separator: + delimiter: "" + precision: 3 + # round_mode: + significant: true + strip_insignificant_zeros: true + # Used in number_to_human_size() + storage_units: + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u" + units: + byte: + one: "Byte" + other: "Bytes" + kb: "KB" + mb: "MB" + gb: "GB" + tb: "TB" + pb: "PB" + eb: "EB" + # Used in NumberHelper.number_to_human() + decimal_units: + format: "%n %u" + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "" + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: Thousand + million: Million + billion: Billion + trillion: Trillion + quadrillion: Quadrillion diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber.rb new file mode 100644 index 0000000..af90bcc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber.rb @@ -0,0 +1,142 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/class/attribute" +require "active_support/subscriber" + +module ActiveSupport + # ActiveSupport::LogSubscriber is an object set to consume + # ActiveSupport::Notifications with the sole purpose of logging them. + # The log subscriber dispatches notifications to a registered object based + # on its given namespace. + # + # An example would be Active Record log subscriber responsible for logging + # queries: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # info "#{event.payload[:name]} (#{event.duration}) #{event.payload[:sql]}" + # end + # end + # end + # + # And it's finally registered as: + # + # ActiveRecord::LogSubscriber.attach_to :active_record + # + # Since we need to know all instance methods before attaching the log + # subscriber, the line above should be called after your + # ActiveRecord::LogSubscriber definition. + # + # A logger also needs to be set with ActiveRecord::LogSubscriber.logger=. + # This is assigned automatically in a Rails environment. + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event + # (ActiveSupport::Notifications::Event) to the sql method. + # + # Being an ActiveSupport::Notifications consumer, + # ActiveSupport::LogSubscriber exposes a simple interface to check if + # instrumented code raises an exception. It is common to log a different + # message in case of an error, and this can be achieved by extending + # the previous example: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # exception = event.payload[:exception] + # + # if exception + # exception_object = event.payload[:exception_object] + # + # error "[ERROR] #{event.payload[:name]}: #{exception.join(', ')} " \ + # "(#{exception_object.backtrace.first})" + # else + # # standard logger code + # end + # end + # end + # end + # + # Log subscriber also has some helpers to deal with logging and automatically + # flushes all logs when the request finishes + # (via action_dispatch.callback notification) in a Rails environment. + class LogSubscriber < Subscriber + # Embed in a String to clear all previous ANSI sequences. + CLEAR = "\e[0m" + BOLD = "\e[1m" + + # Colors + BLACK = "\e[30m" + RED = "\e[31m" + GREEN = "\e[32m" + YELLOW = "\e[33m" + BLUE = "\e[34m" + MAGENTA = "\e[35m" + CYAN = "\e[36m" + WHITE = "\e[37m" + + mattr_accessor :colorize_logging, default: true + + class << self + def logger + @logger ||= if defined?(Rails) && Rails.respond_to?(:logger) + Rails.logger + end + end + + attr_writer :logger + + def log_subscribers + subscribers + end + + # Flush all log_subscribers' logger. + def flush_all! + logger.flush if logger.respond_to?(:flush) + end + + private + def fetch_public_methods(subscriber, inherit_all) + subscriber.public_methods(inherit_all) - LogSubscriber.public_instance_methods(true) + end + end + + def logger + LogSubscriber.logger + end + + def start(name, id, payload) + super if logger + end + + def finish(name, id, payload) + super if logger + rescue => e + if logger + logger.error "Could not log #{name.inspect} event. #{e.class}: #{e.message} #{e.backtrace}" + end + end + + private + %w(info debug warn error fatal unknown).each do |level| + class_eval <<-METHOD, __FILE__, __LINE__ + 1 + def #{level}(progname = nil, &block) + logger.#{level}(progname, &block) if logger + end + METHOD + end + + # Set color by using a symbol or one of the defined constants. If a third + # option is set to +true+, it also adds bold to the string. This is based + # on the Highline implementation and will automatically append CLEAR to the + # end of the returned String. + def color(text, color, bold = false) # :doc: + return text unless colorize_logging + color = self.class.const_get(color.upcase) if color.is_a?(Symbol) + bold = bold ? BOLD : "" + "#{bold}#{color}#{text}#{CLEAR}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber/test_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber/test_helper.rb new file mode 100644 index 0000000..3f19ef5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/log_subscriber/test_helper.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "active_support/log_subscriber" +require "active_support/logger" +require "active_support/notifications" + +module ActiveSupport + class LogSubscriber + # Provides some helpers to deal with testing log subscribers by setting up + # notifications. Take for instance Active Record subscriber tests: + # + # class SyncLogSubscriberTest < ActiveSupport::TestCase + # include ActiveSupport::LogSubscriber::TestHelper + # + # setup do + # ActiveRecord::LogSubscriber.attach_to(:active_record) + # end + # + # def test_basic_query_logging + # Developer.all.to_a + # wait + # assert_equal 1, @logger.logged(:debug).size + # assert_match(/Developer Load/, @logger.logged(:debug).last) + # assert_match(/SELECT \* FROM "developers"/, @logger.logged(:debug).last) + # end + # end + # + # All you need to do is to ensure that your log subscriber is added to + # Rails::Subscriber, as in the second line of the code above. The test + # helpers are responsible for setting up the queue, subscriptions and + # turning colors in logs off. + # + # The messages are available in the @logger instance, which is a logger with + # limited powers (it actually does not send anything to your output), and + # you can collect them doing @logger.logged(level), where level is the level + # used in logging, like info, debug, warn and so on. + module TestHelper + def setup # :nodoc: + @logger = MockLogger.new + @notifier = ActiveSupport::Notifications::Fanout.new + + ActiveSupport::LogSubscriber.colorize_logging = false + + @old_notifier = ActiveSupport::Notifications.notifier + set_logger(@logger) + ActiveSupport::Notifications.notifier = @notifier + end + + def teardown # :nodoc: + set_logger(nil) + ActiveSupport::Notifications.notifier = @old_notifier + end + + class MockLogger + include ActiveSupport::Logger::Severity + + attr_reader :flush_count + attr_accessor :level + + def initialize(level = DEBUG) + @flush_count = 0 + @level = level + @logged = Hash.new { |h, k| h[k] = [] } + end + + def method_missing(level, message = nil) + if block_given? + @logged[level] << yield + else + @logged[level] << message + end + end + + def logged(level) + @logged[level].compact.map { |l| l.to_s.strip } + end + + def flush + @flush_count += 1 + end + + ActiveSupport::Logger::Severity.constants.each do |severity| + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{severity.downcase}? + #{severity} >= @level + end + EOT + end + end + + # Wait notifications to be published. + def wait + @notifier.wait + end + + # Overwrite if you use another logger in your log subscriber. + # + # def logger + # ActiveRecord::Base.logger = @logger + # end + def set_logger(logger) + ActiveSupport::LogSubscriber.logger = logger + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger.rb new file mode 100644 index 0000000..1e241c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require "active_support/logger_silence" +require "active_support/logger_thread_safe_level" +require "logger" + +module ActiveSupport + class Logger < ::Logger + include LoggerSilence + + # Returns true if the logger destination matches one of the sources + # + # logger = Logger.new(STDOUT) + # ActiveSupport::Logger.logger_outputs_to?(logger, STDOUT) + # # => true + def self.logger_outputs_to?(logger, *sources) + logdev = logger.instance_variable_get(:@logdev) + logger_source = logdev.dev if logdev.respond_to?(:dev) + sources.any? { |source| source == logger_source } + end + + # Broadcasts logs to multiple loggers. + def self.broadcast(logger) # :nodoc: + Module.new do + define_method(:add) do |*args, &block| + logger.add(*args, &block) + super(*args, &block) + end + + define_method(:<<) do |x| + logger << x + super(x) + end + + define_method(:close) do + logger.close + super() + end + + define_method(:progname=) do |name| + logger.progname = name + super(name) + end + + define_method(:formatter=) do |formatter| + logger.formatter = formatter + super(formatter) + end + + define_method(:level=) do |level| + logger.level = level + super(level) + end + + define_method(:local_level=) do |level| + logger.local_level = level if logger.respond_to?(:local_level=) + super(level) if respond_to?(:local_level=) + end + + define_method(:silence) do |level = Logger::ERROR, &block| + if logger.respond_to?(:silence) + logger.silence(level) do + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + else + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + end + end + end + + def initialize(*args, **kwargs) + super + @formatter = SimpleFormatter.new + end + + # Simple formatter which only displays the message. + class SimpleFormatter < ::Logger::Formatter + # This method is invoked when a log event occurs + def call(severity, timestamp, progname, msg) + "#{String === msg ? msg : msg.inspect}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_silence.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_silence.rb new file mode 100644 index 0000000..8567eff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_silence.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/logger_thread_safe_level" + +module ActiveSupport + module LoggerSilence + extend ActiveSupport::Concern + + included do + cattr_accessor :silencer, default: true + include ActiveSupport::LoggerThreadSafeLevel + end + + # Silences the logger for the duration of the block. + def silence(severity = Logger::ERROR) + silencer ? log_at(severity) { yield self } : yield(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_thread_safe_level.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_thread_safe_level.rb new file mode 100644 index 0000000..1de9ecd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/logger_thread_safe_level.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "concurrent" +require "fiber" + +module ActiveSupport + module LoggerThreadSafeLevel # :nodoc: + extend ActiveSupport::Concern + + included do + cattr_accessor :local_levels, default: Concurrent::Map.new(initial_capacity: 2), instance_accessor: false + end + + Logger::Severity.constants.each do |severity| + class_eval(<<-EOT, __FILE__, __LINE__ + 1) + def #{severity.downcase}? # def debug? + Logger::#{severity} >= level # DEBUG >= level + end # end + EOT + end + + def local_log_id + Fiber.current.__id__ + end + + def local_level + self.class.local_levels[local_log_id] + end + + def local_level=(level) + case level + when Integer + self.class.local_levels[local_log_id] = level + when Symbol + self.class.local_levels[local_log_id] = Logger::Severity.const_get(level.to_s.upcase) + when nil + self.class.local_levels.delete(local_log_id) + else + raise ArgumentError, "Invalid log level: #{level.inspect}" + end + end + + def level + local_level || super + end + + # Change the thread-local level for the duration of the given block. + def log_at(level) + old_local_level, self.local_level = local_level, level + yield + ensure + self.local_level = old_local_level + end + + # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+. + # FIXME: Remove when the minimum Ruby version supports overriding Logger#level. + def add(severity, message = nil, progname = nil, &block) #:nodoc: + severity ||= UNKNOWN + progname ||= @progname + + return true if @logdev.nil? || severity < level + + if message.nil? + if block_given? + message = yield + else + message = progname + progname = @progname + end + end + + @logdev.write \ + format_message(format_severity(severity), Time.now, progname, message) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_encryptor.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_encryptor.rb new file mode 100644 index 0000000..4cf4a44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_encryptor.rb @@ -0,0 +1,224 @@ +# frozen_string_literal: true + +require "openssl" +require "base64" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/message_verifier" +require "active_support/messages/metadata" + +module ActiveSupport + # MessageEncryptor is a simple way to encrypt values which get stored + # somewhere you don't trust. + # + # The cipher text and initialization vector are base64 encoded and returned + # to you. + # + # This can be used in situations similar to the MessageVerifier, but + # where you don't want users to be able to determine the value of the payload. + # + # len = ActiveSupport::MessageEncryptor.key_len + # salt = SecureRandom.random_bytes(len) + # key = ActiveSupport::KeyGenerator.new('password').generate_key(salt, len) # => "\x89\xE0\x156\xAC..." + # crypt = ActiveSupport::MessageEncryptor.new(key) # => # + # encrypted_data = crypt.encrypt_and_sign('my secret data') # => "NlFBTTMwOUV5UlA1QlNEN2xkY2d6eThYWWh..." + # crypt.decrypt_and_verify(encrypted_data) # => "my secret data" + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = crypt.encrypt_and_sign("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # crypt.decrypt_and_verify(token, purpose: :login) # => "this is the chair" + # crypt.decrypt_and_verify(token, purpose: :shipping) # => nil + # crypt.decrypt_and_verify(token) # => nil + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = crypt.encrypt_and_sign("the conversation is lively") + # crypt.decrypt_and_verify(token, purpose: :scare_tactics) # => nil + # crypt.decrypt_and_verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # crypt.encrypt_and_sign(parcel, expires_in: 1.month) + # crypt.encrypt_and_sign(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned up to the expire time. + # Thereafter, verifying returns +nil+. + # + # === Rotating keys + # + # MessageEncryptor also supports rotating out old configurations by falling + # back to a stack of encryptors. Call +rotate+ to build and add an encryptor + # so +decrypt_and_verify+ will also try the fallback. + # + # By default any rotated encryptors use the values of the primary + # encryptor unless specified otherwise. + # + # You'd give your encryptor the new defaults: + # + # crypt = ActiveSupport::MessageEncryptor.new(@secret, cipher: "aes-256-gcm") + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # crypt.rotate old_secret # Fallback to an old secret instead of @secret. + # crypt.rotate cipher: "aes-256-cbc" # Fallback to an old cipher instead of aes-256-gcm. + # + # Though if both the secret and the cipher was changed at the same time, + # the above should be combined into: + # + # crypt.rotate old_secret, cipher: "aes-256-cbc" + class MessageEncryptor + prepend Messages::Rotator::Encryptor + + cattr_accessor :use_authenticated_message_encryption, instance_accessor: false, default: false + + class << self + def default_cipher #:nodoc: + if use_authenticated_message_encryption + "aes-256-gcm" + else + "aes-256-cbc" + end + end + end + + module NullSerializer #:nodoc: + def self.load(value) + value + end + + def self.dump(value) + value + end + end + + module NullVerifier #:nodoc: + def self.verify(value) + value + end + + def self.generate(value) + value + end + end + + class InvalidMessage < StandardError; end + OpenSSLCipherError = OpenSSL::Cipher::CipherError + + # Initialize a new MessageEncryptor. +secret+ must be at least as long as + # the cipher key size. For the default 'aes-256-gcm' cipher, this is 256 + # bits. If you are using a user-entered secret, you can generate a suitable + # key by using ActiveSupport::KeyGenerator or a similar key + # derivation function. + # + # First additional parameter is used as the signature key for +MessageVerifier+. + # This allows you to specify keys to encrypt and sign data. + # + # ActiveSupport::MessageEncryptor.new('secret', 'signature_secret') + # + # Options: + # * :cipher - Cipher to use. Can be any cipher returned by + # OpenSSL::Cipher.ciphers. Default is 'aes-256-gcm'. + # * :digest - String of digest to use for signing. Default is + # +SHA1+. Ignored when using an AEAD cipher like 'aes-256-gcm'. + # * :serializer - Object serializer to use. Default is +Marshal+. + def initialize(secret, sign_secret = nil, cipher: nil, digest: nil, serializer: nil) + @secret = secret + @sign_secret = sign_secret + @cipher = cipher || self.class.default_cipher + @digest = digest || "SHA1" unless aead_mode? + @verifier = resolve_verifier + @serializer = serializer || Marshal + end + + # Encrypt and sign a message. We need to sign the message in order to avoid + # padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def encrypt_and_sign(value, expires_at: nil, expires_in: nil, purpose: nil) + verifier.generate(_encrypt(value, expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + end + + # Decrypt and verify a message. We need to verify the message in order to + # avoid padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def decrypt_and_verify(data, purpose: nil, **) + _decrypt(verifier.verify(data), purpose) + end + + # Given a cipher, returns the key length of the cipher to help generate the key of desired size + def self.key_len(cipher = default_cipher) + OpenSSL::Cipher.new(cipher).key_len + end + + private + def _encrypt(value, **metadata_options) + cipher = new_cipher + cipher.encrypt + cipher.key = @secret + + # Rely on OpenSSL for the initialization vector + iv = cipher.random_iv + cipher.auth_data = "" if aead_mode? + + encrypted_data = cipher.update(Messages::Metadata.wrap(@serializer.dump(value), **metadata_options)) + encrypted_data << cipher.final + + blob = "#{::Base64.strict_encode64 encrypted_data}--#{::Base64.strict_encode64 iv}" + blob = "#{blob}--#{::Base64.strict_encode64 cipher.auth_tag}" if aead_mode? + blob + end + + def _decrypt(encrypted_message, purpose) + cipher = new_cipher + encrypted_data, iv, auth_tag = encrypted_message.split("--").map { |v| ::Base64.strict_decode64(v) } + + # Currently the OpenSSL bindings do not raise an error if auth_tag is + # truncated, which would allow an attacker to easily forge it. See + # https://github.com/ruby/openssl/issues/63 + raise InvalidMessage if aead_mode? && (auth_tag.nil? || auth_tag.bytes.length != 16) + + cipher.decrypt + cipher.key = @secret + cipher.iv = iv + if aead_mode? + cipher.auth_tag = auth_tag + cipher.auth_data = "" + end + + decrypted_data = cipher.update(encrypted_data) + decrypted_data << cipher.final + + message = Messages::Metadata.verify(decrypted_data, purpose) + @serializer.load(message) if message + rescue OpenSSLCipherError, TypeError, ArgumentError + raise InvalidMessage + end + + def new_cipher + OpenSSL::Cipher.new(@cipher) + end + + attr_reader :verifier + + def aead_mode? + @aead_mode ||= new_cipher.authenticated? + end + + def resolve_verifier + if aead_mode? + NullVerifier + else + MessageVerifier.new(@sign_secret || @secret, digest: @digest, serializer: NullSerializer) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_verifier.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_verifier.rb new file mode 100644 index 0000000..ba992a1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/message_verifier.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: true + +require "base64" +require "active_support/core_ext/object/blank" +require "active_support/security_utils" +require "active_support/messages/metadata" +require "active_support/messages/rotator" + +module ActiveSupport + # +MessageVerifier+ makes it easy to generate and verify messages which are + # signed to prevent tampering. + # + # This is useful for cases like remember-me tokens and auto-unsubscribe links + # where the session store isn't suitable or available. + # + # Remember Me: + # cookies[:remember_me] = @verifier.generate([@user.id, 2.weeks.from_now]) + # + # In the authentication filter: + # + # id, time = @verifier.verify(cookies[:remember_me]) + # if Time.now < time + # self.current_user = User.find(id) + # end + # + # By default it uses Marshal to serialize the message. If you want to use + # another serialization method, you can set the serializer in the options + # hash upon initialization: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', serializer: YAML) + # + # +MessageVerifier+ creates HMAC signatures using SHA1 hash algorithm by default. + # If you want to use a different hash algorithm, you can change it by providing + # +:digest+ key as an option while initializing the verifier: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', digest: 'SHA256') + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = @verifier.generate("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # @verifier.verified(token, purpose: :login) # => "this is the chair" + # @verifier.verified(token, purpose: :shipping) # => nil + # @verifier.verified(token) # => nil + # + # @verifier.verify(token, purpose: :login) # => "this is the chair" + # @verifier.verify(token, purpose: :shipping) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => ActiveSupport::MessageVerifier::InvalidSignature + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = @verifier.generate("the conversation is lively") + # @verifier.verified(token, purpose: :scare_tactics) # => nil + # @verifier.verified(token) # => "the conversation is lively" + # + # @verifier.verify(token, purpose: :scare_tactics) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # @verifier.generate(parcel, expires_in: 1.month) + # @verifier.generate(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned up to the expire time. + # Thereafter, the +verified+ method returns +nil+ while +verify+ raises + # ActiveSupport::MessageVerifier::InvalidSignature. + # + # === Rotating keys + # + # MessageVerifier also supports rotating out old configurations by falling + # back to a stack of verifiers. Call +rotate+ to build and add a verifier to + # so either +verified+ or +verify+ will also try verifying with the fallback. + # + # By default any rotated verifiers use the values of the primary + # verifier unless specified otherwise. + # + # You'd give your verifier the new defaults: + # + # verifier = ActiveSupport::MessageVerifier.new(@secret, digest: "SHA512", serializer: JSON) + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # verifier.rotate old_secret # Fallback to an old secret instead of @secret. + # verifier.rotate digest: "SHA256" # Fallback to an old digest instead of SHA512. + # verifier.rotate serializer: Marshal # Fallback to an old serializer instead of JSON. + # + # Though the above would most likely be combined into one rotation: + # + # verifier.rotate old_secret, digest: "SHA256", serializer: Marshal + class MessageVerifier + prepend Messages::Rotator::Verifier + + class InvalidSignature < StandardError; end + + def initialize(secret, digest: nil, serializer: nil) + raise ArgumentError, "Secret should not be nil." unless secret + @secret = secret + @digest = digest || "SHA1" + @serializer = serializer || Marshal + end + + # Checks if a signed message could have been generated by signing an object + # with the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # verifier.valid_message?(signed_message) # => true + # + # tampered_message = signed_message.chop # editing the message invalidates the signature + # verifier.valid_message?(tampered_message) # => false + def valid_message?(signed_message) + return if signed_message.nil? || !signed_message.valid_encoding? || signed_message.blank? + + data, digest = signed_message.split("--") + data.present? && digest.present? && ActiveSupport::SecurityUtils.secure_compare(digest, generate_digest(data)) + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # + # signed_message = verifier.generate 'a private message' + # verifier.verified(signed_message) # => 'a private message' + # + # Returns +nil+ if the message was not signed with the same secret. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verified(signed_message) # => nil + # + # Returns +nil+ if the message is not Base64-encoded. + # + # invalid_message = "f--46a0120593880c733a53b6dad75b42ddc1c8996d" + # verifier.verified(invalid_message) # => nil + # + # Raises any error raised while decoding the signed message. + # + # incompatible_message = "test--dad7b06c94abba8d46a15fafaef56c327665d5ff" + # verifier.verified(incompatible_message) # => TypeError: incompatible marshal file format + def verified(signed_message, purpose: nil, **) + if valid_message?(signed_message) + begin + data = signed_message.split("--")[0] + message = Messages::Metadata.verify(decode(data), purpose) + @serializer.load(message) if message + rescue ArgumentError => argument_error + return if argument_error.message.include?("invalid base64") + raise + end + end + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # + # verifier.verify(signed_message) # => 'a private message' + # + # Raises +InvalidSignature+ if the message was not signed with the same + # secret or was not Base64-encoded. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verify(signed_message) # => ActiveSupport::MessageVerifier::InvalidSignature + def verify(*args, **options) + verified(*args, **options) || raise(InvalidSignature) + end + + # Generates a signed message for the provided value. + # + # The message is signed with the +MessageVerifier+'s secret. + # Returns Base64-encoded message joined with the generated signature. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # verifier.generate 'a private message' # => "BAhJIhRwcml2YXRlLW1lc3NhZ2UGOgZFVA==--e2d724331ebdee96a10fb99b089508d1c72bd772" + def generate(value, expires_at: nil, expires_in: nil, purpose: nil) + data = encode(Messages::Metadata.wrap(@serializer.dump(value), expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + "#{data}--#{generate_digest(data)}" + end + + private + def encode(data) + ::Base64.strict_encode64(data) + end + + def decode(data) + ::Base64.strict_decode64(data) + end + + def generate_digest(data) + require "openssl" unless defined?(OpenSSL) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.const_get(@digest).new, @secret, data) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/metadata.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/metadata.rb new file mode 100644 index 0000000..4734cce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/metadata.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "time" + +module ActiveSupport + module Messages #:nodoc: + class Metadata #:nodoc: + def initialize(message, expires_at = nil, purpose = nil) + @message, @purpose = message, purpose + @expires_at = expires_at.is_a?(String) ? parse_expires_at(expires_at) : expires_at + end + + def as_json(options = {}) + { _rails: { message: @message, exp: @expires_at, pur: @purpose } } + end + + class << self + def wrap(message, expires_at: nil, expires_in: nil, purpose: nil) + if expires_at || expires_in || purpose + JSON.encode new(encode(message), pick_expiry(expires_at, expires_in), purpose) + else + message + end + end + + def verify(message, purpose) + extract_metadata(message).verify(purpose) + end + + private + def pick_expiry(expires_at, expires_in) + if expires_at + expires_at.utc.iso8601(3) + elsif expires_in + Time.now.utc.advance(seconds: expires_in).iso8601(3) + end + end + + def extract_metadata(message) + data = JSON.decode(message) rescue nil + + if data.is_a?(Hash) && data.key?("_rails") + new(decode(data["_rails"]["message"]), data["_rails"]["exp"], data["_rails"]["pur"]) + else + new(message) + end + end + + def encode(message) + ::Base64.strict_encode64(message) + end + + def decode(message) + ::Base64.strict_decode64(message) + end + end + + def verify(purpose) + @message if match?(purpose) && fresh? + end + + private + def match?(purpose) + @purpose.to_s == purpose.to_s + end + + def fresh? + @expires_at.nil? || Time.now.utc < @expires_at + end + + def parse_expires_at(expires_at) + if ActiveSupport.use_standard_json_time_format + Time.iso8601(expires_at) + else + Time.parse(expires_at) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotation_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotation_configuration.rb new file mode 100644 index 0000000..eef05fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotation_configuration.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + class RotationConfiguration # :nodoc: + attr_reader :signed, :encrypted + + def initialize + @signed, @encrypted = [], [] + end + + def rotate(kind, *args, **options) + args << options unless options.empty? + case kind + when :signed + @signed << args + when :encrypted + @encrypted << args + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotator.rb new file mode 100644 index 0000000..b19e185 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/messages/rotator.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + module Rotator # :nodoc: + def initialize(*secrets, on_rotation: nil, **options) + super(*secrets, **options) + + @options = options + @rotations = [] + @on_rotation = on_rotation + end + + def rotate(*secrets, **options) + @rotations << build_rotation(*secrets, @options.merge(options)) + end + + module Encryptor + include Rotator + + def decrypt_and_verify(*args, on_rotation: @on_rotation, **options) + super + rescue MessageEncryptor::InvalidMessage, MessageVerifier::InvalidSignature + run_rotations(on_rotation) { |encryptor| encryptor.decrypt_and_verify(*args, **options) } || raise + end + + private + def build_rotation(secret = @secret, sign_secret = @sign_secret, options) + self.class.new(secret, sign_secret, **options) + end + end + + module Verifier + include Rotator + + def verified(*args, on_rotation: @on_rotation, **options) + super || run_rotations(on_rotation) { |verifier| verifier.verified(*args, **options) } + end + + private + def build_rotation(secret = @secret, options) + self.class.new(secret, **options) + end + end + + private + def run_rotations(on_rotation) + @rotations.find do |rotation| + if message = yield(rotation) rescue next + on_rotation&.call + return message + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte.rb new file mode 100644 index 0000000..3fe3a05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport #:nodoc: + module Multibyte + autoload :Chars, "active_support/multibyte/chars" + autoload :Unicode, "active_support/multibyte/unicode" + + # The proxy class returned when calling mb_chars. You can use this accessor + # to configure your own proxy class so you can support other encodings. See + # the ActiveSupport::Multibyte::Chars implementation for an example how to + # do this. + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + def self.proxy_class=(klass) + @proxy_class = klass + end + + # Returns the current proxy class. + def self.proxy_class + @proxy_class ||= ActiveSupport::Multibyte::Chars + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/chars.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/chars.rb new file mode 100644 index 0000000..d58ab1b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/chars.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true + +require "active_support/json" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/symbol/starts_ends_with" +require "active_support/core_ext/module/delegation" + +module ActiveSupport #:nodoc: + module Multibyte #:nodoc: + # Chars enables you to work transparently with UTF-8 encoding in the Ruby + # String class without having extensive knowledge about the encoding. A + # Chars object accepts a string upon initialization and proxies String + # methods in an encoding safe manner. All the normal String methods are also + # implemented on the proxy. + # + # String methods are proxied through the Chars object, and can be accessed + # through the +mb_chars+ method. Methods which would normally return a + # String object now return a Chars object so methods can be chained. + # + # 'The Perfect String '.mb_chars.downcase.strip + # # => # + # + # Chars objects are perfectly interchangeable with String objects as long as + # no explicit class checks are made. If certain methods do explicitly check + # the class, call +to_s+ before you pass chars objects to them. + # + # bad.explicit_checking_method 'T'.mb_chars.downcase.to_s + # + # The default Chars implementation assumes that the encoding of the string + # is UTF-8, if you want to handle different encodings you can write your own + # multibyte string handler and configure it through + # ActiveSupport::Multibyte.proxy_class. + # + # class CharsForUTF32 + # def size + # @wrapped_string.size / 4 + # end + # + # def self.accepts?(string) + # string.length % 4 == 0 + # end + # end + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + class Chars + include Comparable + attr_reader :wrapped_string + alias to_s wrapped_string + alias to_str wrapped_string + + delegate :<=>, :=~, :match?, :acts_like_string?, to: :wrapped_string + + # Creates a new Chars instance by wrapping _string_. + def initialize(string) + @wrapped_string = string + @wrapped_string.force_encoding(Encoding::UTF_8) unless @wrapped_string.frozen? + end + + # Forward all undefined methods to the wrapped string. + def method_missing(method, *args, &block) + result = @wrapped_string.__send__(method, *args, &block) + if method.end_with?("!") + self if result + else + result.kind_of?(String) ? chars(result) : result + end + end + + # Returns +true+ if _obj_ responds to the given method. Private methods + # are included in the search only if the optional second parameter + # evaluates to +true+. + def respond_to_missing?(method, include_private) + @wrapped_string.respond_to?(method, include_private) + end + + # Works just like String#split, with the exception that the items + # in the resulting list are Chars instances instead of String. This makes + # chaining methods easier. + # + # 'Café périferôl'.mb_chars.split(/é/).map { |part| part.upcase.to_s } # => ["CAF", " P", "RIFERÔL"] + def split(*args) + @wrapped_string.split(*args).map { |i| self.class.new(i) } + end + + # Works like String#slice!, but returns an instance of + # Chars, or +nil+ if the string was not modified. The string will not be + # modified if the range given is out of bounds + # + # string = 'Welcome' + # string.mb_chars.slice!(3) # => # + # string # => 'Welome' + # string.mb_chars.slice!(0..3) # => # + # string # => 'me' + def slice!(*args) + string_sliced = @wrapped_string.slice!(*args) + if string_sliced + chars(string_sliced) + end + end + + # Reverses all characters in the string. + # + # 'Café'.mb_chars.reverse.to_s # => 'éfaC' + def reverse + chars(@wrapped_string.scan(/\X/).reverse.join) + end + + # Limits the byte size of the string to a number of bytes without breaking + # characters. Usable when the storage for a string is limited for some + # reason. + # + # 'こんにちは'.mb_chars.limit(7).to_s # => "こん" + def limit(limit) + chars(@wrapped_string.truncate_bytes(limit, omission: nil)) + end + + # Capitalizes the first letter of every word, when possible. + # + # "ÉL QUE SE ENTERÓ".mb_chars.titleize.to_s # => "Él Que Se Enteró" + # "日本語".mb_chars.titleize.to_s # => "日本語" + def titleize + chars(downcase.to_s.gsub(/\b('?\S)/u) { $1.upcase }) + end + alias_method :titlecase, :titleize + + # Performs canonical decomposition on all the characters. + # + # 'é'.length # => 2 + # 'é'.mb_chars.decompose.to_s.length # => 3 + def decompose + chars(Unicode.decompose(:canonical, @wrapped_string.codepoints.to_a).pack("U*")) + end + + # Performs composition on all the characters. + # + # 'é'.length # => 3 + # 'é'.mb_chars.compose.to_s.length # => 2 + def compose + chars(Unicode.compose(@wrapped_string.codepoints.to_a).pack("U*")) + end + + # Returns the number of grapheme clusters in the string. + # + # 'क्षि'.mb_chars.length # => 4 + # 'क्षि'.mb_chars.grapheme_length # => 3 + def grapheme_length + @wrapped_string.scan(/\X/).length + end + + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(force = false) + chars(Unicode.tidy_bytes(@wrapped_string, force)) + end + + def as_json(options = nil) #:nodoc: + to_s.as_json(options) + end + + %w(reverse tidy_bytes).each do |method| + define_method("#{method}!") do |*args| + @wrapped_string = public_send(method, *args).to_s + self + end + end + + private + def chars(string) + self.class.new(string) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/unicode.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/unicode.rb new file mode 100644 index 0000000..552790b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/multibyte/unicode.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +module ActiveSupport + module Multibyte + module Unicode + extend self + + # The Unicode version that is supported by the implementation + UNICODE_VERSION = RbConfig::CONFIG["UNICODE_VERSION"] + + def default_normalization_form + ActiveSupport::Deprecation.warn( + "ActiveSupport::Multibyte::Unicode.default_normalization_form is deprecated and will be removed in Rails 6.2." + ) + end + + def default_normalization_form=(_) + ActiveSupport::Deprecation.warn( + "ActiveSupport::Multibyte::Unicode.default_normalization_form= is deprecated and will be removed in Rails 6.2." + ) + end + + # Decompose composed characters to the decomposed form. + def decompose(type, codepoints) + if type == :compatibility + codepoints.pack("U*").unicode_normalize(:nfkd).codepoints + else + codepoints.pack("U*").unicode_normalize(:nfd).codepoints + end + end + + # Compose decomposed characters to the composed form. + def compose(codepoints) + codepoints.pack("U*").unicode_normalize(:nfc).codepoints + end + + # Rubinius' String#scrub, however, doesn't support ASCII-incompatible chars. + if !defined?(Rubinius) + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(string, force = false) + return string if string.empty? || string.ascii_only? + return recode_windows1252_chars(string) if force + string.scrub { |bad| recode_windows1252_chars(bad) } + end + else + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + + # We can't transcode to the same format, so we choose a nearly-identical encoding. + # We're going to 'transcode' bytes from UTF-8 when possible, then fall back to + # CP1252 when we get errors. The final string will be 'converted' back to UTF-8 + # before returning. + reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_16LE) + + source = string.dup + out = "".force_encoding(Encoding::UTF_16LE) + + loop do + reader.primitive_convert(source, out) + _, _, _, error_bytes, _ = reader.primitive_errinfo + break if error_bytes.nil? + out << error_bytes.encode(Encoding::UTF_16LE, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + reader.finish + + out.encode!(Encoding::UTF_8) + end + end + + private + def recode_windows1252_chars(string) + string.encode(Encoding::UTF_8, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications.rb new file mode 100644 index 0000000..d1227c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications.rb @@ -0,0 +1,280 @@ +# frozen_string_literal: true + +require "active_support/notifications/instrumenter" +require "active_support/notifications/fanout" +require "active_support/per_thread_registry" + +module ActiveSupport + # = Notifications + # + # ActiveSupport::Notifications provides an instrumentation API for + # Ruby. + # + # == Instrumenters + # + # To instrument an event you just need to do: + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # That first executes the block and then notifies all subscribers once done. + # + # In the example above +render+ is the name of the event, and the rest is called + # the _payload_. The payload is a mechanism that allows instrumenters to pass + # extra information to subscribers. Payloads consist of a hash whose contents + # are arbitrary and generally depend on the event. + # + # == Subscribers + # + # You can consume those events and the information they provide by registering + # a subscriber. + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # Here, the +start+ and +finish+ values represent wall-clock time. If you are + # concerned about accuracy, you can register a monotonic subscriber. + # + # ActiveSupport::Notifications.monotonic_subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Monotonic time, when the instrumented block started execution + # finish # => Monotonic time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # The +start+ and +finish+ values above represent monotonic time. + # + # For instance, let's store all "render" events in an array: + # + # events = [] + # + # ActiveSupport::Notifications.subscribe('render') do |*args| + # events << ActiveSupport::Notifications::Event.new(*args) + # end + # + # That code returns right away, you are just subscribing to "render" events. + # The block is saved and will be called whenever someone instruments "render": + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # event = events.first + # event.name # => "render" + # event.duration # => 10 (in milliseconds) + # event.payload # => { extra: :information } + # + # The block in the subscribe call gets the name of the event, start + # timestamp, end timestamp, a string with a unique identifier for that event's instrumenter + # (something like "535801666f04d0298cd6"), and a hash with the payload, in + # that order. + # + # If an exception happens during that particular instrumentation the payload will + # have a key :exception with an array of two elements as value: a string with + # the name of the exception class, and the exception message. + # The :exception_object key of the payload will have the exception + # itself as the value: + # + # event.payload[:exception] # => ["ArgumentError", "Invalid value"] + # event.payload[:exception_object] # => # + # + # As the earlier example depicts, the class ActiveSupport::Notifications::Event + # is able to take the arguments as they come and provide an object-oriented + # interface to that data. + # + # It is also possible to pass an object which responds to call method + # as the second parameter to the subscribe method instead of a block: + # + # module ActionController + # class PageRequest + # def call(name, started, finished, unique_id, payload) + # Rails.logger.debug ['notification:', name, started, finished, unique_id, payload].join(' ') + # end + # end + # end + # + # ActiveSupport::Notifications.subscribe('process_action.action_controller', ActionController::PageRequest.new) + # + # resulting in the following output within the logs including a hash with the payload: + # + # notification: process_action.action_controller 2012-04-13 01:08:35 +0300 2012-04-13 01:08:35 +0300 af358ed7fab884532ec7 { + # controller: "Devise::SessionsController", + # action: "new", + # params: {"action"=>"new", "controller"=>"devise/sessions"}, + # format: :html, + # method: "GET", + # path: "/login/sign_in", + # status: 200, + # view_runtime: 279.3080806732178, + # db_runtime: 40.053 + # } + # + # You can also subscribe to all events whose name matches a certain regexp: + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # ... + # end + # + # and even pass no argument to subscribe, in which case you are subscribing + # to all events. + # + # == Temporary Subscriptions + # + # Sometimes you do not want to subscribe to an event for the entire life of + # the application. There are two ways to unsubscribe. + # + # WARNING: The instrumentation framework is designed for long-running subscribers, + # use this feature sparingly because it wipes some internal caches and that has + # a negative impact on performance. + # + # === Subscribe While a Block Runs + # + # You can subscribe to some event temporarily while some block runs. For + # example, in + # + # callback = lambda {|*args| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record") do + # ... + # end + # + # the callback will be called for all "sql.active_record" events instrumented + # during the execution of the block. The callback is unsubscribed automatically + # after that. + # + # To record +started+ and +finished+ values with monotonic time, + # specify the optional :monotonic option to the + # subscribed method. The :monotonic option is set + # to +false+ by default. + # + # callback = lambda {|name, started, finished, unique_id, payload| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record", monotonic: true) do + # ... + # end + # + # === Manual Unsubscription + # + # The +subscribe+ method returns a subscriber object: + # + # subscriber = ActiveSupport::Notifications.subscribe("render") do |*args| + # ... + # end + # + # To prevent that block from being called anymore, just unsubscribe passing + # that reference: + # + # ActiveSupport::Notifications.unsubscribe(subscriber) + # + # You can also unsubscribe by passing the name of the subscriber object. Note + # that this will unsubscribe all subscriptions with the given name: + # + # ActiveSupport::Notifications.unsubscribe("render") + # + # Subscribers using a regexp or other pattern-matching object will remain subscribed + # to all events that match their original pattern, unless those events match a string + # passed to +unsubscribe+: + # + # subscriber = ActiveSupport::Notifications.subscribe(/render/) { } + # ActiveSupport::Notifications.unsubscribe('render_template.action_view') + # subscriber.matches?('render_template.action_view') # => false + # subscriber.matches?('render_partial.action_view') # => true + # + # == Default Queue + # + # Notifications ships with a queue implementation that consumes and publishes events + # to all log subscribers. You can use any queue implementation you want. + # + module Notifications + class << self + attr_accessor :notifier + + def publish(name, *args) + notifier.publish(name, *args) + end + + def instrument(name, payload = {}) + if notifier.listening?(name) + instrumenter.instrument(name, payload) { yield payload if block_given? } + else + yield payload if block_given? + end + end + + # Subscribe to a given event name with the passed +block+. + # + # You can subscribe to events by passing a String to match exact event + # names, or by passing a Regexp to match all events that match a pattern. + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # The +block+ will receive five parameters with information about the event: + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # If the block passed to the method only takes one parameter, + # it will yield an event object to the block: + # + # ActiveSupport::Notifications.subscribe(/render/) do |event| + # @event = event + # end + def subscribe(pattern = nil, callback = nil, &block) + notifier.subscribe(pattern, callback, monotonic: false, &block) + end + + def monotonic_subscribe(pattern = nil, callback = nil, &block) + notifier.subscribe(pattern, callback, monotonic: true, &block) + end + + def subscribed(callback, pattern = nil, monotonic: false, &block) + subscriber = notifier.subscribe(pattern, callback, monotonic: monotonic) + yield + ensure + unsubscribe(subscriber) + end + + def unsubscribe(subscriber_or_name) + notifier.unsubscribe(subscriber_or_name) + end + + def instrumenter + InstrumentationRegistry.instance.instrumenter_for(notifier) + end + end + + # This class is a registry which holds all of the +Instrumenter+ objects + # in a particular thread local. To access the +Instrumenter+ object for a + # particular +notifier+, you can call the following method: + # + # InstrumentationRegistry.instrumenter_for(notifier) + # + # The instrumenters for multiple notifiers are held in a single instance of + # this class. + class InstrumentationRegistry # :nodoc: + extend ActiveSupport::PerThreadRegistry + + def initialize + @registry = {} + end + + def instrumenter_for(notifier) + @registry[notifier] ||= Instrumenter.new(notifier) + end + end + + self.notifier = Fanout.new + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/fanout.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/fanout.rb new file mode 100644 index 0000000..5fba834 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/fanout.rb @@ -0,0 +1,259 @@ +# frozen_string_literal: true + +require "mutex_m" +require "concurrent/map" +require "set" +require "active_support/core_ext/object/try" + +module ActiveSupport + module Notifications + # This is a default queue implementation that ships with Notifications. + # It just pushes events to all registered log subscribers. + # + # This class is thread safe. All methods are reentrant. + class Fanout + include Mutex_m + + def initialize + @string_subscribers = Hash.new { |h, k| h[k] = [] } + @other_subscribers = [] + @listeners_for = Concurrent::Map.new + super + end + + def subscribe(pattern = nil, callable = nil, monotonic: false, &block) + subscriber = Subscribers.new(pattern, callable || block, monotonic) + synchronize do + if String === pattern + @string_subscribers[pattern] << subscriber + @listeners_for.delete(pattern) + else + @other_subscribers << subscriber + @listeners_for.clear + end + end + subscriber + end + + def unsubscribe(subscriber_or_name) + synchronize do + case subscriber_or_name + when String + @string_subscribers[subscriber_or_name].clear + @listeners_for.delete(subscriber_or_name) + @other_subscribers.each { |sub| sub.unsubscribe!(subscriber_or_name) } + else + pattern = subscriber_or_name.try(:pattern) + if String === pattern + @string_subscribers[pattern].delete(subscriber_or_name) + @listeners_for.delete(pattern) + else + @other_subscribers.delete(subscriber_or_name) + @listeners_for.clear + end + end + end + end + + def start(name, id, payload) + listeners_for(name).each { |s| s.start(name, id, payload) } + end + + def finish(name, id, payload, listeners = listeners_for(name)) + listeners.each { |s| s.finish(name, id, payload) } + end + + def publish(name, *args) + listeners_for(name).each { |s| s.publish(name, *args) } + end + + def listeners_for(name) + # this is correctly done double-checked locking (Concurrent::Map's lookups have volatile semantics) + @listeners_for[name] || synchronize do + # use synchronisation when accessing @subscribers + @listeners_for[name] ||= + @string_subscribers[name] + @other_subscribers.select { |s| s.subscribed_to?(name) } + end + end + + def listening?(name) + listeners_for(name).any? + end + + # This is a sync queue, so there is no waiting. + def wait + end + + module Subscribers # :nodoc: + def self.new(pattern, listener, monotonic) + subscriber_class = monotonic ? MonotonicTimed : Timed + + if listener.respond_to?(:start) && listener.respond_to?(:finish) + subscriber_class = Evented + else + # Doing all this to detect a block like `proc { |x| }` vs + # `proc { |*x| }` or `proc { |**x| }` + if listener.respond_to?(:parameters) + params = listener.parameters + if params.length == 1 && params.first.first == :opt + subscriber_class = EventObject + end + end + end + + wrap_all pattern, subscriber_class.new(pattern, listener) + end + + def self.wrap_all(pattern, subscriber) + unless pattern + AllMessages.new(subscriber) + else + subscriber + end + end + + class Matcher #:nodoc: + attr_reader :pattern, :exclusions + + def self.wrap(pattern) + return pattern if String === pattern + new(pattern) + end + + def initialize(pattern) + @pattern = pattern + @exclusions = Set.new + end + + def unsubscribe!(name) + exclusions << -name if pattern === name + end + + def ===(name) + pattern === name && !exclusions.include?(name) + end + end + + class Evented #:nodoc: + attr_reader :pattern + + def initialize(pattern, delegate) + @pattern = Matcher.wrap(pattern) + @delegate = delegate + @can_publish = delegate.respond_to?(:publish) + end + + def publish(name, *args) + if @can_publish + @delegate.publish name, *args + end + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def subscribed_to?(name) + pattern === name + end + + def matches?(name) + pattern && pattern === name + end + + def unsubscribe!(name) + pattern.unsubscribe!(name) + end + end + + class Timed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = Thread.current[:_timestack] ||= [] + timestack.push Time.now + end + + def finish(name, id, payload) + timestack = Thread.current[:_timestack] + started = timestack.pop + @delegate.call(name, started, Time.now, id, payload) + end + end + + class MonotonicTimed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = Thread.current[:_timestack_monotonic] ||= [] + timestack.push Concurrent.monotonic_time + end + + def finish(name, id, payload) + timestack = Thread.current[:_timestack_monotonic] + started = timestack.pop + @delegate.call(name, started, Concurrent.monotonic_time, id, payload) + end + end + + class EventObject < Evented + def start(name, id, payload) + stack = Thread.current[:_event_stack] ||= [] + event = build_event name, id, payload + event.start! + stack.push event + end + + def finish(name, id, payload) + stack = Thread.current[:_event_stack] + event = stack.pop + event.payload = payload + event.finish! + @delegate.call event + end + + private + def build_event(name, id, payload) + ActiveSupport::Notifications::Event.new name, nil, nil, id, payload + end + end + + class AllMessages # :nodoc: + def initialize(delegate) + @delegate = delegate + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def publish(name, *args) + @delegate.publish name, *args + end + + def subscribed_to?(name) + true + end + + def unsubscribe!(*) + false + end + + alias :matches? :=== + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/instrumenter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/instrumenter.rb new file mode 100644 index 0000000..e1a9fe3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/notifications/instrumenter.rb @@ -0,0 +1,155 @@ +# frozen_string_literal: true + +require "securerandom" + +module ActiveSupport + module Notifications + # Instrumenters are stored in a thread local. + class Instrumenter + attr_reader :id + + def initialize(notifier) + @id = unique_id + @notifier = notifier + end + + # Given a block, instrument it by measuring the time taken to execute + # and publish it. Without a block, simply send a message via the + # notifier. Notice that events get sent even if an error occurs in the + # passed-in block. + def instrument(name, payload = {}) + # some of the listeners might have state + listeners_state = start name, payload + begin + yield payload if block_given? + rescue Exception => e + payload[:exception] = [e.class.name, e.message] + payload[:exception_object] = e + raise e + ensure + finish_with_state listeners_state, name, payload + end + end + + # Send a start notification with +name+ and +payload+. + def start(name, payload) + @notifier.start name, @id, payload + end + + # Send a finish notification with +name+ and +payload+. + def finish(name, payload) + @notifier.finish name, @id, payload + end + + def finish_with_state(listeners_state, name, payload) + @notifier.finish name, @id, payload, listeners_state + end + + private + def unique_id + SecureRandom.hex(10) + end + end + + class Event + attr_reader :name, :time, :end, :transaction_id, :children + attr_accessor :payload + + def initialize(name, start, ending, transaction_id, payload) + @name = name + @payload = payload.dup + @time = start + @transaction_id = transaction_id + @end = ending + @children = [] + @cpu_time_start = 0 + @cpu_time_finish = 0 + @allocation_count_start = 0 + @allocation_count_finish = 0 + end + + # Record information at the time this event starts + def start! + @time = now + @cpu_time_start = now_cpu + @allocation_count_start = now_allocations + end + + # Record information at the time this event finishes + def finish! + @cpu_time_finish = now_cpu + @end = now + @allocation_count_finish = now_allocations + end + + # Returns the CPU time (in milliseconds) passed since the call to + # +start!+ and the call to +finish!+ + def cpu_time + (@cpu_time_finish - @cpu_time_start) * 1000 + end + + # Returns the idle time time (in milliseconds) passed since the call to + # +start!+ and the call to +finish!+ + def idle_time + duration - cpu_time + end + + # Returns the number of allocations made since the call to +start!+ and + # the call to +finish!+ + def allocations + @allocation_count_finish - @allocation_count_start + end + + # Returns the difference in milliseconds between when the execution of the + # event started and when it ended. + # + # ActiveSupport::Notifications.subscribe('wait') do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # ActiveSupport::Notifications.instrument('wait') do + # sleep 1 + # end + # + # @event.duration # => 1000.138 + def duration + 1000.0 * (self.end - time) + end + + def <<(event) + @children << event + end + + def parent_of?(event) + @children.include? event + end + + private + def now + Concurrent.monotonic_time + end + + begin + Process.clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID) + + def now_cpu + Process.clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID) + end + rescue + def now_cpu + 0 + end + end + + if defined?(JRUBY_VERSION) + def now_allocations + 0 + end + else + def now_allocations + GC.stat :total_allocated_objects + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper.rb new file mode 100644 index 0000000..93d4791 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper.rb @@ -0,0 +1,397 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + extend ActiveSupport::Autoload + + eager_autoload do + autoload :NumberConverter + autoload :RoundingHelper + autoload :NumberToRoundedConverter + autoload :NumberToDelimitedConverter + autoload :NumberToHumanConverter + autoload :NumberToHumanSizeConverter + autoload :NumberToPhoneConverter + autoload :NumberToCurrencyConverter + autoload :NumberToPercentageConverter + end + + extend self + + # Formats a +number+ into a phone number (US by default e.g., (555) + # 123-9876). You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :area_code - Adds parentheses around the area code. + # * :delimiter - Specifies the delimiter to use + # (defaults to "-"). + # * :extension - Specifies an extension to add to the + # end of the generated number. + # * :country_code - Sets the country code for the phone + # number. + # * :pattern - Specifies how the number is divided into three + # groups with the custom regexp to override the default format. + # ==== Examples + # + # number_to_phone(5551234) # => "555-1234" + # number_to_phone('5551234') # => "555-1234" + # number_to_phone(1235551234) # => "123-555-1234" + # number_to_phone(1235551234, area_code: true) # => "(123) 555-1234" + # number_to_phone(1235551234, delimiter: ' ') # => "123 555 1234" + # number_to_phone(1235551234, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # number_to_phone(1235551234, country_code: 1) # => "+1-123-555-1234" + # number_to_phone('123a456') # => "123a456" + # + # number_to_phone(1235551234, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # number_to_phone(75561234567, pattern: /(\d{1,4})(\d{4})(\d{4})$/, area_code: true) + # # => "(755) 6123-4567" + # number_to_phone(13312345678, pattern: /(\d{3})(\d{4})(\d{4})$/) + # # => "133-1234-5678" + def number_to_phone(number, options = {}) + NumberToPhoneConverter.convert(number, options) + end + + # Formats a +number+ into a currency string (e.g., $13.65). You + # can customize the format in the +options+ hash. + # + # The currency unit and number formatting of the current locale will be used + # unless otherwise specified in the provided options. No currency conversion + # is performed. If the user is given a way to change their locale, they will + # also be able to change the relative value of the currency displayed with + # this helper. If your application will ever support multiple locales, you + # may want to specify a constant :locale option or consider + # using a library capable of currency conversion. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the level of precision (defaults + # to 2). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :unit - Sets the denomination of the currency + # (defaults to "$"). + # * :separator - Sets the separator between the units + # (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :format - Sets the format for non-negative numbers + # (defaults to "%u%n"). Fields are %u for the + # currency, and %n for the number. + # * :negative_format - Sets the format for negative + # numbers (defaults to prepending a hyphen to the formatted + # number given by :format). Accepts the same fields + # than :format, except %n is here the + # absolute value of the number. + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_currency(1234567890.50) # => "$1,234,567,890.50" + # number_to_currency(1234567890.506) # => "$1,234,567,890.51" + # number_to_currency(1234567890.506, precision: 3) # => "$1,234,567,890.506" + # number_to_currency(1234567890.506, locale: :fr) # => "1 234 567 890,51 €" + # number_to_currency('123a456') # => "$123a456" + # + # number_to_currency("123a456", raise: true) # => InvalidNumberError + # + # number_to_currency(-0.456789, precision: 0) + # # => "$0" + # number_to_currency(-1234567890.50, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + # number_to_currency(1234567890.50, strip_insignificant_zeros: true) + # # => "$1,234,567,890.5" + # number_to_currency(1234567890.50, precision: 0, round_mode: :up) + # # => "$1,234,567,891" + def number_to_currency(number, options = {}) + NumberToCurrencyConverter.convert(number, options) + end + + # Formats a +number+ as a percentage string (e.g., 65%). You can + # customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # * :format - Specifies the format of the percentage + # string The number field is %n (defaults to "%n%"). + # + # ==== Examples + # + # number_to_percentage(100) # => "100.000%" + # number_to_percentage('98') # => "98.000%" + # number_to_percentage(100, precision: 0) # => "100%" + # number_to_percentage(1000, delimiter: '.', separator: ',') # => "1.000,000%" + # number_to_percentage(302.24398923423, precision: 5) # => "302.24399%" + # number_to_percentage(1000, locale: :fr) # => "1000,000%" + # number_to_percentage(1000, precision: nil) # => "1000%" + # number_to_percentage('98a') # => "98a%" + # number_to_percentage(100, format: '%n %') # => "100.000 %" + # number_to_percentage(302.24398923423, precision: 5, round_mode: :down) # => "302.24398%" + def number_to_percentage(number, options = {}) + NumberToPercentageConverter.convert(number, options) + end + + # Formats a +number+ with grouped thousands using +delimiter+ + # (e.g., 12,324). You can customize the format in the +options+ + # hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter_pattern - Sets a custom regular expression used for + # deriving the placement of delimiter. Helpful when using currency formats + # like INR. + # + # ==== Examples + # + # number_to_delimited(12345678) # => "12,345,678" + # number_to_delimited('123456') # => "123,456" + # number_to_delimited(12345678.05) # => "12,345,678.05" + # number_to_delimited(12345678, delimiter: '.') # => "12.345.678" + # number_to_delimited(12345678, delimiter: ',') # => "12,345,678" + # number_to_delimited(12345678.05, separator: ' ') # => "12,345,678 05" + # number_to_delimited(12345678.05, locale: :fr) # => "12 345 678,05" + # number_to_delimited('112a') # => "112a" + # number_to_delimited(98765432.98, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # number_to_delimited("123456.78", + # delimiter_pattern: /(\d+?)(?=(\d\d)+(\d)(?!\d))/) + # # => "1,23,456.78" + def number_to_delimited(number, options = {}) + NumberToDelimitedConverter.convert(number, options) + end + + # Formats a +number+ with the specified level of + # :precision (e.g., 112.32 has a precision of 2 if + # +:significant+ is +false+, and 5 if +:significant+ is +true+). + # You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_rounded(111.2345) # => "111.235" + # number_to_rounded(111.2345, precision: 2) # => "111.23" + # number_to_rounded(13, precision: 5) # => "13.00000" + # number_to_rounded(389.32314, precision: 0) # => "389" + # number_to_rounded(111.2345, significant: true) # => "111" + # number_to_rounded(111.2345, precision: 1, significant: true) # => "100" + # number_to_rounded(13, precision: 5, significant: true) # => "13.000" + # number_to_rounded(13, precision: nil) # => "13" + # number_to_rounded(389.32314, precision: 0, round_mode: :up) # => "390" + # number_to_rounded(111.234, locale: :fr) # => "111,234" + # + # number_to_rounded(13, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # + # number_to_rounded(389.32314, precision: 4, significant: true) # => "389.3" + # number_to_rounded(1111.2345, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + def number_to_rounded(number, options = {}) + NumberToRoundedConverter.convert(number, options) + end + + # Formats the bytes in +number+ into a more understandable + # representation (e.g., giving it 1500 yields 1.46 KB). This + # method is useful for reporting file sizes to users. You can + # customize the format in the +options+ hash. + # + # See number_to_human if you want to pretty-print a + # generic number. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # + # ==== Examples + # + # number_to_human_size(123) # => "123 Bytes" + # number_to_human_size(1234) # => "1.21 KB" + # number_to_human_size(12345) # => "12.1 KB" + # number_to_human_size(1234567) # => "1.18 MB" + # number_to_human_size(1234567890) # => "1.15 GB" + # number_to_human_size(1234567890123) # => "1.12 TB" + # number_to_human_size(1234567890123456) # => "1.1 PB" + # number_to_human_size(1234567890123456789) # => "1.07 EB" + # number_to_human_size(1234567, precision: 2) # => "1.2 MB" + # number_to_human_size(483989, precision: 2) # => "470 KB" + # number_to_human_size(483989, precision: 2, round_mode: :up) # => "480 KB" + # number_to_human_size(1234567, precision: 2, separator: ',') # => "1,2 MB" + # number_to_human_size(1234567890123, precision: 5) # => "1.1228 TB" + # number_to_human_size(524288000, precision: 5) # => "500 MB" + def number_to_human_size(number, options = {}) + NumberToHumanSizeConverter.convert(number, options) + end + + # Pretty prints (formats and approximates) a number in a way it + # is more readable by humans (e.g.: 1200000000 becomes "1.2 + # Billion"). This is useful for numbers that can get very large + # (and too hard to read). + # + # See number_to_human_size if you want to print a file + # size. + # + # You can also define your own unit-quantifier names if you want + # to use other decimal units (e.g.: 1500 becomes "1.5 + # kilometers", 0.150 becomes "150 milliliters", etc). You may + # define a wide range of unit quantifiers, even fractional ones + # (centi, deci, mili, etc). + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # * :units - A Hash of unit quantifier names. Or a + # string containing an i18n scope where to find this hash. It + # might have the following keys: + # * *integers*: :unit, :ten, + # :hundred, :thousand, :million, + # :billion, :trillion, + # :quadrillion + # * *fractionals*: :deci, :centi, + # :mili, :micro, :nano, + # :pico, :femto + # * :format - Sets the format of the output string + # (defaults to "%n %u"). The field types are: + # * %u - The quantifier (ex.: 'thousand') + # * %n - The number + # + # ==== Examples + # + # number_to_human(123) # => "123" + # number_to_human(1234) # => "1.23 Thousand" + # number_to_human(12345) # => "12.3 Thousand" + # number_to_human(1234567) # => "1.23 Million" + # number_to_human(1234567890) # => "1.23 Billion" + # number_to_human(1234567890123) # => "1.23 Trillion" + # number_to_human(1234567890123456) # => "1.23 Quadrillion" + # number_to_human(1234567890123456789) # => "1230 Quadrillion" + # number_to_human(489939, precision: 2) # => "490 Thousand" + # number_to_human(489939, precision: 4) # => "489.9 Thousand" + # number_to_human(489939, precision: 2 + # , round_mode: :down) # => "480 Thousand" + # number_to_human(1234567, precision: 4, + # significant: false) # => "1.2346 Million" + # number_to_human(1234567, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + # + # number_to_human(500000000, precision: 5) # => "500 Million" + # number_to_human(12345012345, significant: false) # => "12.345 Billion" + # + # Non-significant zeros after the decimal separator are stripped + # out by default (set :strip_insignificant_zeros to + # +false+ to change that): + # + # number_to_human(12.00001) # => "12" + # number_to_human(12.00001, strip_insignificant_zeros: false) # => "12.0" + # + # ==== Custom Unit Quantifiers + # + # You can also use your own custom unit quantifiers: + # number_to_human(500000, units: { unit: 'ml', thousand: 'lt' }) # => "500 lt" + # + # If in your I18n locale you have: + # + # distance: + # centi: + # one: "centimeter" + # other: "centimeters" + # unit: + # one: "meter" + # other: "meters" + # thousand: + # one: "kilometer" + # other: "kilometers" + # billion: "gazillion-distance" + # + # Then you could do: + # + # number_to_human(543934, units: :distance) # => "544 kilometers" + # number_to_human(54393498, units: :distance) # => "54400 kilometers" + # number_to_human(54393498000, units: :distance) # => "54.4 gazillion-distance" + # number_to_human(343, units: :distance, precision: 1) # => "300 meters" + # number_to_human(1, units: :distance) # => "1 meter" + # number_to_human(0.34, units: :distance) # => "34 centimeters" + def number_to_human(number, options = {}) + NumberToHumanConverter.convert(number, options) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_converter.rb new file mode 100644 index 0000000..75509f1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_converter.rb @@ -0,0 +1,183 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/keys" +require "active_support/i18n" +require "active_support/core_ext/class/attribute" + +module ActiveSupport + module NumberHelper + class NumberConverter # :nodoc: + # Default and i18n option namespace per class + class_attribute :namespace + + # Does the object need a number that is a valid float? + class_attribute :validate_float + + attr_reader :number, :opts + + DEFAULTS = { + # Used in number_to_delimited + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: { + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: ".", + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: ",", + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3, + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false, + # If set, the zeros after the decimal separator will always be stripped (e.g.: 1.200 will be 1.2) + strip_insignificant_zeros: false + }, + + # Used in number_to_currency + currency: { + format: { + format: "%u%n", + negative_format: "-%u%n", + unit: "$", + # These five are to override number.format and are optional + separator: ".", + delimiter: ",", + precision: 2, + significant: false, + strip_insignificant_zeros: false + } + }, + + # Used in number_to_percentage + percentage: { + format: { + delimiter: "", + format: "%n%" + } + }, + + # Used in number_to_rounded + precision: { + format: { + delimiter: "" + } + }, + + # Used in number_to_human_size and number_to_human + human: { + format: { + # These five are to override number.format and are optional + delimiter: "", + precision: 3, + significant: true, + strip_insignificant_zeros: true + }, + # Used in number_to_human_size + storage_units: { + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u", + units: { + byte: "Bytes", + kb: "KB", + mb: "MB", + gb: "GB", + tb: "TB" + } + }, + # Used in number_to_human + decimal_units: { + format: "%n %u", + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: { + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "", + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: "Thousand", + million: "Million", + billion: "Billion", + trillion: "Trillion", + quadrillion: "Quadrillion" + } + } + } + } + + def self.convert(number, options) + new(number, options).execute + end + + def initialize(number, options) + @number = number + @opts = options.symbolize_keys + end + + def execute + if !number + nil + elsif validate_float? && !valid_float? + number + else + convert + end + end + + private + def options + @options ||= format_options.merge(opts) + end + + def format_options + default_format_options.merge!(i18n_format_options) + end + + def default_format_options + options = DEFAULTS[:format].dup + options.merge!(DEFAULTS[namespace][:format]) if namespace + options + end + + def i18n_format_options + locale = opts[:locale] + options = I18n.translate(:'number.format', locale: locale, default: {}).dup + + if namespace + options.merge!(I18n.translate(:"number.#{namespace}.format", locale: locale, default: {})) + end + + options + end + + def translate_number_value_with_default(key, **i18n_options) + I18n.translate(key, **{ default: default_value(key), scope: :number }.merge!(i18n_options)) + end + + def translate_in_locale(key, **i18n_options) + translate_number_value_with_default(key, **{ locale: options[:locale] }.merge(i18n_options)) + end + + def default_value(key) + key.split(".").reduce(DEFAULTS) { |defaults, k| defaults[k.to_sym] } + end + + def valid_float? + Float(number) + rescue ArgumentError, TypeError + false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_currency_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_currency_converter.rb new file mode 100644 index 0000000..c0efaba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_currency_converter.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToCurrencyConverter < NumberConverter # :nodoc: + self.namespace = :currency + + def convert + number = self.number.to_s.strip + format = options[:format] + + if number.sub!(/^-/, "") && + (options[:precision] != 0 || number.to_f > 0.5) + format = options[:negative_format] + end + + rounded_number = NumberToRoundedConverter.convert(number, options) + format.gsub("%n", rounded_number).gsub("%u", options[:unit]) + end + + private + def options + @options ||= begin + defaults = default_format_options.merge(i18n_opts) + # Override negative format if format options are given + defaults[:negative_format] = "-#{opts[:format]}" if opts[:format] + defaults.merge!(opts) + end + end + + def i18n_opts + # Set International negative format if it does not exist + i18n = i18n_format_options + i18n[:negative_format] ||= "-#{i18n[:format]}" if i18n[:format] + i18n + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_delimited_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_delimited_converter.rb new file mode 100644 index 0000000..3514442 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_delimited_converter.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToDelimitedConverter < NumberConverter #:nodoc: + self.validate_float = true + + DEFAULT_DELIMITER_REGEX = /(\d)(?=(\d\d\d)+(?!\d))/ + + def convert + parts.join(options[:separator]) + end + + private + def parts + left, right = number.to_s.split(".") + left.gsub!(delimiter_pattern) do |digit_to_delimit| + "#{digit_to_delimit}#{options[:delimiter]}" + end + [left, right].compact + end + + def delimiter_pattern + options.fetch(:delimiter_pattern, DEFAULT_DELIMITER_REGEX) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_converter.rb new file mode 100644 index 0000000..3f92628 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_converter.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToHumanConverter < NumberConverter # :nodoc: + DECIMAL_UNITS = { 0 => :unit, 1 => :ten, 2 => :hundred, 3 => :thousand, 6 => :million, 9 => :billion, 12 => :trillion, 15 => :quadrillion, + -1 => :deci, -2 => :centi, -3 => :mili, -6 => :micro, -9 => :nano, -12 => :pico, -15 => :femto } + INVERTED_DECIMAL_UNITS = DECIMAL_UNITS.invert + + self.namespace = :human + self.validate_float = true + + def convert # :nodoc: + @number = RoundingHelper.new(options).round(number) + @number = Float(number) + + # For backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files. + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + units = opts[:units] + exponent = calculate_exponent(units) + @number = number / (10**exponent) + + rounded_number = NumberToRoundedConverter.convert(number, options) + unit = determine_unit(units, exponent) + format.gsub("%n", rounded_number).gsub("%u", unit).strip + end + + private + def format + options[:format] || translate_in_locale("human.decimal_units.format") + end + + def determine_unit(units, exponent) + exp = DECIMAL_UNITS[exponent] + case units + when Hash + units[exp] || "" + when String, Symbol + I18n.translate("#{units}.#{exp}", locale: options[:locale], count: number.to_i) + else + translate_in_locale("human.decimal_units.units.#{exp}", count: number.to_i) + end + end + + def calculate_exponent(units) + exponent = number != 0 ? Math.log10(number.abs).floor : 0 + unit_exponents(units).find { |e| exponent >= e } || 0 + end + + def unit_exponents(units) + case units + when Hash + units + when String, Symbol + I18n.translate(units.to_s, locale: options[:locale], raise: true) + when nil + translate_in_locale("human.decimal_units.units", raise: true) + else + raise ArgumentError, ":units must be a Hash or String translation scope." + end.keys.map { |e_name| INVERTED_DECIMAL_UNITS[e_name] }.sort_by(&:-@) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_size_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_size_converter.rb new file mode 100644 index 0000000..a9b7ce2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_human_size_converter.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToHumanSizeConverter < NumberConverter #:nodoc: + STORAGE_UNITS = [:byte, :kb, :mb, :gb, :tb, :pb, :eb] + + self.namespace = :human + self.validate_float = true + + def convert + @number = Float(number) + + # For backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files. + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + if smaller_than_base? + number_to_format = number.to_i.to_s + else + human_size = number / (base**exponent) + number_to_format = NumberToRoundedConverter.convert(human_size, options) + end + conversion_format.gsub("%n", number_to_format).gsub("%u", unit) + end + + private + def conversion_format + translate_number_value_with_default("human.storage_units.format", locale: options[:locale], raise: true) + end + + def unit + translate_number_value_with_default(storage_unit_key, locale: options[:locale], count: number.to_i, raise: true) + end + + def storage_unit_key + key_end = smaller_than_base? ? "byte" : STORAGE_UNITS[exponent] + "human.storage_units.units.#{key_end}" + end + + def exponent + max = STORAGE_UNITS.size - 1 + exp = (Math.log(number) / Math.log(base)).to_i + exp = max if exp > max # avoid overflow for the highest unit + exp + end + + def smaller_than_base? + number.to_i < base + end + + def base + 1024 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_percentage_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_percentage_converter.rb new file mode 100644 index 0000000..0c2e190 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_percentage_converter.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToPercentageConverter < NumberConverter # :nodoc: + self.namespace = :percentage + + def convert + rounded_number = NumberToRoundedConverter.convert(number, options) + options[:format].gsub("%n", rounded_number) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_phone_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_phone_converter.rb new file mode 100644 index 0000000..21eadfd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_phone_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToPhoneConverter < NumberConverter #:nodoc: + def convert + str = country_code(opts[:country_code]).dup + str << convert_to_phone_number(number.to_s.strip) + str << phone_ext(opts[:extension]) + end + + private + def convert_to_phone_number(number) + if opts[:area_code] + convert_with_area_code(number) + else + convert_without_area_code(number) + end + end + + def convert_with_area_code(number) + default_pattern = /(\d{1,3})(\d{3})(\d{4}$)/ + number.gsub!(regexp_pattern(default_pattern), + "(\\1) \\2#{delimiter}\\3") + number + end + + def convert_without_area_code(number) + default_pattern = /(\d{0,3})(\d{3})(\d{4})$/ + number.gsub!(regexp_pattern(default_pattern), + "\\1#{delimiter}\\2#{delimiter}\\3") + number.slice!(0, 1) if start_with_delimiter?(number) + number + end + + def start_with_delimiter?(number) + delimiter.present? && number.start_with?(delimiter) + end + + def delimiter + opts[:delimiter] || "-" + end + + def country_code(code) + code.blank? ? "" : "+#{code}#{delimiter}" + end + + def phone_ext(ext) + ext.blank? ? "" : " x #{ext}" + end + + def regexp_pattern(default_pattern) + opts.fetch :pattern, default_pattern + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_rounded_converter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_rounded_converter.rb new file mode 100644 index 0000000..f48a515 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/number_to_rounded_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToRoundedConverter < NumberConverter # :nodoc: + self.namespace = :precision + self.validate_float = true + + def convert + helper = RoundingHelper.new(options) + rounded_number = helper.round(number) + + if precision = options[:precision] + if options[:significant] && precision > 0 + digits = helper.digit_count(rounded_number) + precision -= digits + precision = 0 if precision < 0 # don't let it be negative + end + + formatted_string = + if rounded_number.finite? + s = rounded_number.to_s("F") + a, b = s.split(".", 2) + if precision != 0 + b << "0" * precision + a << "." + a << b[0, precision] + end + a + else + # Infinity/NaN + "%f" % rounded_number + end + else + formatted_string = rounded_number + end + + delimited_number = NumberToDelimitedConverter.convert(formatted_string, options) + format_number(delimited_number) + end + + private + def strip_insignificant_zeros + options[:strip_insignificant_zeros] + end + + def format_number(number) + if strip_insignificant_zeros + escaped_separator = Regexp.escape(options[:separator]) + number.sub(/(#{escaped_separator})(\d*[1-9])?0+\z/, '\1\2').sub(/#{escaped_separator}\z/, "") + else + number + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/rounding_helper.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/rounding_helper.rb new file mode 100644 index 0000000..56b996a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/number_helper/rounding_helper.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class RoundingHelper # :nodoc: + attr_reader :options + + def initialize(options) + @options = options + end + + def round(number) + precision = absolute_precision(number) + return number unless precision + + rounded_number = convert_to_decimal(number).round(precision, options.fetch(:round_mode, :default).to_sym) + rounded_number.zero? ? rounded_number.abs : rounded_number # prevent showing negative zeros + end + + def digit_count(number) + return 1 if number.zero? + (Math.log10(number.abs) + 1).floor + end + + private + def convert_to_decimal(number) + case number + when Float, String + BigDecimal(number.to_s) + when Rational + BigDecimal(number, digit_count(number.to_i) + options[:precision]) + else + number.to_d + end + end + + def absolute_precision(number) + if significant && options[:precision] > 0 + options[:precision] - digit_count(convert_to_decimal(number)) + else + options[:precision] + end + end + + def significant + options[:significant] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/option_merger.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/option_merger.rb new file mode 100644 index 0000000..c7f7c0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/option_merger.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/symbol/starts_ends_with" + +module ActiveSupport + class OptionMerger #:nodoc: + instance_methods.each do |method| + undef_method(method) unless method.start_with?("__", "instance_eval", "class", "object_id") + end + + def initialize(context, options) + @context, @options = context, options + end + + private + def method_missing(method, *arguments, &block) + options = nil + if arguments.first.is_a?(Proc) + proc = arguments.pop + arguments << lambda { |*args| @options.deep_merge(proc.call(*args)) } + elsif arguments.last.respond_to?(:to_hash) + options = @options.deep_merge(arguments.pop) + else + options = @options + end + + invoke_method(method, arguments, options, &block) + end + + if RUBY_VERSION >= "2.7" + def invoke_method(method, arguments, options, &block) + if options + @context.__send__(method, *arguments, **options, &block) + else + @context.__send__(method, *arguments, &block) + end + end + else + def invoke_method(method, arguments, options, &block) + arguments << options.dup if options + @context.__send__(method, *arguments, &block) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_hash.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_hash.rb new file mode 100644 index 0000000..ad11524 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_hash.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "yaml" + +YAML.add_builtin_type("omap") do |type, val| + ActiveSupport::OrderedHash[val.map { |v| v.to_a.first }] +end + +module ActiveSupport + # DEPRECATED: ActiveSupport::OrderedHash implements a hash that preserves + # insertion order. + # + # oh = ActiveSupport::OrderedHash.new + # oh[:a] = 1 + # oh[:b] = 2 + # oh.keys # => [:a, :b], this order is guaranteed + # + # Also, maps the +omap+ feature for YAML files + # (See https://yaml.org/type/omap.html) to support ordered items + # when loading from yaml. + # + # ActiveSupport::OrderedHash is namespaced to prevent conflicts + # with other implementations. + class OrderedHash < ::Hash + def to_yaml_type + "!tag:yaml.org,2002:omap" + end + + def encode_with(coder) + coder.represent_seq "!omap", map { |k, v| { k => v } } + end + + def select(*args, &block) + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def nested_under_indifferent_access + self + end + + # Returns true to make sure that this hash is extractable via Array#extract_options! + def extractable_options? + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_options.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_options.rb new file mode 100644 index 0000000..ba14907 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/ordered_options.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + # +OrderedOptions+ inherits from +Hash+ and provides dynamic accessor methods. + # + # With a +Hash+, key-value pairs are typically managed like this: + # + # h = {} + # h[:boy] = 'John' + # h[:girl] = 'Mary' + # h[:boy] # => 'John' + # h[:girl] # => 'Mary' + # h[:dog] # => nil + # + # Using +OrderedOptions+, the above code can be written as: + # + # h = ActiveSupport::OrderedOptions.new + # h.boy = 'John' + # h.girl = 'Mary' + # h.boy # => 'John' + # h.girl # => 'Mary' + # h.dog # => nil + # + # To raise an exception when the value is blank, append a + # bang to the key name, like: + # + # h.dog! # => raises KeyError: :dog is blank + # + class OrderedOptions < Hash + alias_method :_get, :[] # preserve the original #[] method + protected :_get # make it protected + + def []=(key, value) + super(key.to_sym, value) + end + + def [](key) + super(key.to_sym) + end + + def method_missing(name, *args) + name_string = +name.to_s + if name_string.chomp!("=") + self[name_string] = args.first + else + bangs = name_string.chomp!("!") + + if bangs + self[name_string].presence || raise(KeyError.new(":#{name_string} is blank")) + else + self[name_string] + end + end + end + + def respond_to_missing?(name, include_private) + true + end + + def extractable_options? + true + end + + def inspect + "#<#{self.class.name} #{super}>" + end + end + + # +InheritableOptions+ provides a constructor to build an +OrderedOptions+ + # hash inherited from another hash. + # + # Use this if you already have some hash and you want to create a new one based on it. + # + # h = ActiveSupport::InheritableOptions.new({ girl: 'Mary', boy: 'John' }) + # h.girl # => 'Mary' + # h.boy # => 'John' + class InheritableOptions < OrderedOptions + def initialize(parent = nil) + if parent.kind_of?(OrderedOptions) + # use the faster _get when dealing with OrderedOptions + super() { |h, k| parent._get(k) } + elsif parent + super() { |h, k| parent[k] } + else + super() + end + end + + def inheritable_copy + self.class.new(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/parameter_filter.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/parameter_filter.rb new file mode 100644 index 0000000..1347782 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/parameter_filter.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" + +module ActiveSupport + # +ParameterFilter+ allows you to specify keys for sensitive data from + # hash-like object and replace corresponding value. Filtering only certain + # sub-keys from a hash is possible by using the dot notation: + # 'credit_card.number'. If a proc is given, each key and value of a hash and + # all sub-hashes are passed to it, where the value or the key can be replaced + # using String#replace or similar methods. + # + # ActiveSupport::ParameterFilter.new([:password]) + # => replaces the value to all keys matching /password/i with "[FILTERED]" + # + # ActiveSupport::ParameterFilter.new([:foo, "bar"]) + # => replaces the value to all keys matching /foo|bar/i with "[FILTERED]" + # + # ActiveSupport::ParameterFilter.new(["credit_card.code"]) + # => replaces { credit_card: {code: "xxxx"} } with "[FILTERED]", does not + # change { file: { code: "xxxx"} } + # + # ActiveSupport::ParameterFilter.new([-> (k, v) do + # v.reverse! if /secret/i.match?(k) + # end]) + # => reverses the value to all keys matching /secret/i + class ParameterFilter + FILTERED = "[FILTERED]" # :nodoc: + + # Create instance with given filters. Supported type of filters are +String+, +Regexp+, and +Proc+. + # Other types of filters are treated as +String+ using +to_s+. + # For +Proc+ filters, key, value, and optional original hash is passed to block arguments. + # + # ==== Options + # + # * :mask - A replaced object when filtered. Defaults to "[FILTERED]". + def initialize(filters = [], mask: FILTERED) + @filters = filters + @mask = mask + end + + # Mask value of +params+ if key matches one of filters. + def filter(params) + compiled_filter.call(params) + end + + # Returns filtered value for given key. For +Proc+ filters, third block argument is not populated. + def filter_param(key, value) + @filters.empty? ? value : compiled_filter.value_for_key(key, value) + end + + private + def compiled_filter + @compiled_filter ||= CompiledFilter.compile(@filters, mask: @mask) + end + + class CompiledFilter # :nodoc: + def self.compile(filters, mask:) + return lambda { |params| params.dup } if filters.empty? + + strings, regexps, blocks, deep_regexps, deep_strings = [], [], [], nil, nil + + filters.each do |item| + case item + when Proc + blocks << item + when Regexp + if item.to_s.include?("\\.") + (deep_regexps ||= []) << item + else + regexps << item + end + else + s = Regexp.escape(item.to_s) + if s.include?("\\.") + (deep_strings ||= []) << s + else + strings << s + end + end + end + + regexps << Regexp.new(strings.join("|"), true) unless strings.empty? + (deep_regexps ||= []) << Regexp.new(deep_strings.join("|"), true) if deep_strings&.any? + + new regexps, deep_regexps, blocks, mask: mask + end + + attr_reader :regexps, :deep_regexps, :blocks + + def initialize(regexps, deep_regexps, blocks, mask:) + @regexps = regexps + @deep_regexps = deep_regexps&.any? ? deep_regexps : nil + @blocks = blocks + @mask = mask + end + + def call(params, parents = [], original_params = params) + filtered_params = params.class.new + + params.each do |key, value| + filtered_params[key] = value_for_key(key, value, parents, original_params) + end + + filtered_params + end + + def value_for_key(key, value, parents = [], original_params = nil) + parents.push(key) if deep_regexps + if regexps.any? { |r| r.match?(key.to_s) } + value = @mask + elsif deep_regexps && (joined = parents.join(".")) && deep_regexps.any? { |r| r.match?(joined) } + value = @mask + elsif value.is_a?(Hash) + value = call(value, parents, original_params) + elsif value.is_a?(Array) + # If we don't pop the current parent it will be duplicated as we + # process each array value. + parents.pop if deep_regexps + value = value.map { |v| value_for_key(key, v, parents, original_params) } + # Restore the parent stack after processing the array. + parents.push(key) if deep_regexps + elsif blocks.any? + key = key.dup if key.duplicable? + value = value.dup if value.duplicable? + blocks.each { |b| b.arity == 2 ? b.call(key, value) : b.call(key, value, original_params) } + end + parents.pop if deep_regexps + value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/per_thread_registry.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/per_thread_registry.rb new file mode 100644 index 0000000..bf5a3b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/per_thread_registry.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # NOTE: This approach has been deprecated for end-user code in favor of {thread_mattr_accessor}[rdoc-ref:Module#thread_mattr_accessor] and friends. + # Please use that approach instead. + # + # This module is used to encapsulate access to thread local variables. + # + # Instead of polluting the thread locals namespace: + # + # Thread.current[:connection_handler] + # + # you define a class that extends this module: + # + # module ActiveRecord + # class RuntimeRegistry + # extend ActiveSupport::PerThreadRegistry + # + # attr_accessor :connection_handler + # end + # end + # + # and invoke the declared instance accessors as class methods. So + # + # ActiveRecord::RuntimeRegistry.connection_handler = connection_handler + # + # sets a connection handler local to the current thread, and + # + # ActiveRecord::RuntimeRegistry.connection_handler + # + # returns a connection handler local to the current thread. + # + # This feature is accomplished by instantiating the class and storing the + # instance as a thread local keyed by the class name. In the example above + # a key "ActiveRecord::RuntimeRegistry" is stored in Thread.current. + # The class methods proxy to said thread local instance. + # + # If the class has an initializer, it must accept no arguments. + module PerThreadRegistry + def self.extended(object) + object.instance_variable_set :@per_thread_registry_key, object.name.freeze + end + + def instance + Thread.current[@per_thread_registry_key] ||= new + end + + private + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/proxy_object.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/proxy_object.rb new file mode 100644 index 0000000..0965fcd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/proxy_object.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module ActiveSupport + # A class with no predefined methods that behaves similarly to Builder's + # BlankSlate. Used for proxy classes. + class ProxyObject < ::BasicObject + undef_method :== + undef_method :equal? + + # Let ActiveSupport::ProxyObject at least raise exceptions. + def raise(*args) + ::Object.send(:raise, *args) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rails.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rails.rb new file mode 100644 index 0000000..75676a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rails.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# This is a private interface. +# +# Rails components cherry pick from Active Support as needed, but there are a +# few features that are used for sure in some way or another and it is not worth +# putting individual requires absolutely everywhere. Think blank? for example. +# +# This file is loaded by every Rails component except Active Support itself, +# but it does not belong to the Rails public interface. It is internal to +# Rails and can change anytime. + +# Defines Object#blank? and Object#present?. +require "active_support/core_ext/object/blank" + +# Support for ClassMethods and the included macro. +require "active_support/concern" + +# Defines Class#class_attribute. +require "active_support/core_ext/class/attribute" + +# Defines Module#delegate. +require "active_support/core_ext/module/delegation" + +# Defines ActiveSupport::Deprecation. +require "active_support/deprecation" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/railtie.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/railtie.rb new file mode 100644 index 0000000..c36453f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/railtie.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/i18n_railtie" + +module ActiveSupport + class Railtie < Rails::Railtie # :nodoc: + config.active_support = ActiveSupport::OrderedOptions.new + + config.eager_load_namespaces << ActiveSupport + + initializer "active_support.set_authenticated_message_encryption" do |app| + config.after_initialize do + unless app.config.active_support.use_authenticated_message_encryption.nil? + ActiveSupport::MessageEncryptor.use_authenticated_message_encryption = + app.config.active_support.use_authenticated_message_encryption + end + end + end + + initializer "active_support.reset_all_current_attributes_instances" do |app| + app.reloader.before_class_unload { ActiveSupport::CurrentAttributes.clear_all } + app.executor.to_run { ActiveSupport::CurrentAttributes.reset_all } + app.executor.to_complete { ActiveSupport::CurrentAttributes.reset_all } + + ActiveSupport.on_load(:active_support_test_case) do + require "active_support/current_attributes/test_helper" + include ActiveSupport::CurrentAttributes::TestHelper + end + end + + initializer "active_support.deprecation_behavior" do |app| + if deprecation = app.config.active_support.deprecation + ActiveSupport::Deprecation.behavior = deprecation + end + + if disallowed_deprecation = app.config.active_support.disallowed_deprecation + ActiveSupport::Deprecation.disallowed_behavior = disallowed_deprecation + end + + if disallowed_warnings = app.config.active_support.disallowed_deprecation_warnings + ActiveSupport::Deprecation.disallowed_warnings = disallowed_warnings + end + end + + # Sets the default value for Time.zone + # If assigned value cannot be matched to a TimeZone, an exception will be raised. + initializer "active_support.initialize_time_zone" do |app| + begin + TZInfo::DataSource.get + rescue TZInfo::DataSourceNotFound => e + raise e.exception "tzinfo-data is not present. Please add gem 'tzinfo-data' to your Gemfile and run bundle install" + end + require "active_support/core_ext/time/zones" + Time.zone_default = Time.find_zone!(app.config.time_zone) + end + + # Sets the default week start + # If assigned value is not a valid day symbol (e.g. :sunday, :monday, ...), an exception will be raised. + initializer "active_support.initialize_beginning_of_week" do |app| + require "active_support/core_ext/date/calculations" + beginning_of_week_default = Date.find_beginning_of_week!(app.config.beginning_of_week) + + Date.beginning_of_week_default = beginning_of_week_default + end + + initializer "active_support.require_master_key" do |app| + if app.config.respond_to?(:require_master_key) && app.config.require_master_key + begin + app.credentials.key + rescue ActiveSupport::EncryptedFile::MissingKeyError => error + $stderr.puts error.message + exit 1 + end + end + end + + initializer "active_support.set_configs" do |app| + app.config.active_support.each do |k, v| + k = "#{k}=" + ActiveSupport.public_send(k, v) if ActiveSupport.respond_to? k + end + end + + initializer "active_support.set_hash_digest_class" do |app| + config.after_initialize do + if app.config.active_support.use_sha1_digests + ActiveSupport::Deprecation.warn(<<-MSG.squish) + config.active_support.use_sha1_digests is deprecated and will + be removed from Rails 6.2. Use + config.active_support.hash_digest_class = ::Digest::SHA1 instead. + MSG + ActiveSupport::Digest.hash_digest_class = ::Digest::SHA1 + end + + if klass = app.config.active_support.hash_digest_class + ActiveSupport::Digest.hash_digest_class = klass + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/reloader.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/reloader.rb new file mode 100644 index 0000000..e751866 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/reloader.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" +require "active_support/executor" + +module ActiveSupport + #-- + # This class defines several callbacks: + # + # to_prepare -- Run once at application startup, and also from + # +to_run+. + # + # to_run -- Run before a work run that is reloading. If + # +reload_classes_only_on_change+ is true (the default), the class + # unload will have already occurred. + # + # to_complete -- Run after a work run that has reloaded. If + # +reload_classes_only_on_change+ is false, the class unload will + # have occurred after the work run, but before this callback. + # + # before_class_unload -- Run immediately before the classes are + # unloaded. + # + # after_class_unload -- Run immediately after the classes are + # unloaded. + # + class Reloader < ExecutionWrapper + define_callbacks :prepare + + define_callbacks :class_unload + + # Registers a callback that will run once at application startup and every time the code is reloaded. + def self.to_prepare(*args, &block) + set_callback(:prepare, *args, &block) + end + + # Registers a callback that will run immediately before the classes are unloaded. + def self.before_class_unload(*args, &block) + set_callback(:class_unload, *args, &block) + end + + # Registers a callback that will run immediately after the classes are unloaded. + def self.after_class_unload(*args, &block) + set_callback(:class_unload, :after, *args, &block) + end + + to_run(:after) { self.class.prepare! } + + # Initiate a manual reload + def self.reload! + executor.wrap do + new.tap do |instance| + instance.run! + ensure + instance.complete! + end + end + prepare! + end + + def self.run!(reset: false) # :nodoc: + if check! + super + else + Null + end + end + + # Run the supplied block as a work unit, reloading code as needed + def self.wrap + executor.wrap do + super + end + end + + class_attribute :executor, default: Executor + class_attribute :check, default: lambda { false } + + def self.check! # :nodoc: + @should_reload ||= check.call + end + + def self.reloaded! # :nodoc: + @should_reload = false + end + + def self.prepare! # :nodoc: + new.run_callbacks(:prepare) + end + + def initialize + super + @locked = false + end + + # Acquire the ActiveSupport::Dependencies::Interlock unload lock, + # ensuring it will be released automatically + def require_unload_lock! + unless @locked + ActiveSupport::Dependencies.interlock.start_unloading + @locked = true + end + end + + # Release the unload lock if it has been previously obtained + def release_unload_lock! + if @locked + @locked = false + ActiveSupport::Dependencies.interlock.done_unloading + end + end + + def run! # :nodoc: + super + release_unload_lock! + end + + def class_unload!(&block) # :nodoc: + require_unload_lock! + run_callbacks(:class_unload, &block) + end + + def complete! # :nodoc: + super + self.class.reloaded! + ensure + release_unload_lock! + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rescuable.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rescuable.rb new file mode 100644 index 0000000..2b96567 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/rescuable.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # Rescuable module adds support for easier exception handling. + module Rescuable + extend Concern + + included do + class_attribute :rescue_handlers, default: [] + end + + module ClassMethods + # Registers exception classes with a handler to be called by rescue_with_handler. + # + # rescue_from receives a series of exception classes or class + # names, and an exception handler specified by a trailing :with + # option containing the name of a method or a Proc object. Alternatively, a block + # can be given as the handler. + # + # Handlers that take one argument will be called with the exception, so + # that the exception can be inspected when dealing with it. + # + # Handlers are inherited. They are searched from right to left, from + # bottom to top, and up the hierarchy. The handler of the first class for + # which exception.is_a?(klass) holds true is the one invoked, if + # any. + # + # class ApplicationController < ActionController::Base + # rescue_from User::NotAuthorized, with: :deny_access # self defined exception + # rescue_from ActiveRecord::RecordInvalid, with: :show_errors + # + # rescue_from 'MyAppError::Base' do |exception| + # render xml: exception, status: 500 + # end + # + # private + # def deny_access + # ... + # end + # + # def show_errors(exception) + # exception.record.new_record? ? ... + # end + # end + # + # Exceptions raised inside exception handlers are not propagated up. + def rescue_from(*klasses, with: nil, &block) + unless with + if block_given? + with = block + else + raise ArgumentError, "Need a handler. Pass the with: keyword argument or provide a block." + end + end + + klasses.each do |klass| + key = if klass.is_a?(Module) && klass.respond_to?(:===) + klass.name + elsif klass.is_a?(String) + klass + else + raise ArgumentError, "#{klass.inspect} must be an Exception class or a String referencing an Exception class" + end + + # Put the new handler at the end because the list is read in reverse. + self.rescue_handlers += [[key, with]] + end + end + + # Matches an exception to a handler based on the exception class. + # + # If no handler matches the exception, check for a handler matching the + # (optional) exception.cause. If no handler matches the exception or its + # cause, this returns +nil+, so you can deal with unhandled exceptions. + # Be sure to re-raise unhandled exceptions if this is what you expect. + # + # begin + # … + # rescue => exception + # rescue_with_handler(exception) || raise + # end + # + # Returns the exception if it was handled and +nil+ if it was not. + def rescue_with_handler(exception, object: self, visited_exceptions: []) + visited_exceptions << exception + + if handler = handler_for_rescue(exception, object: object) + handler.call exception + exception + elsif exception + if visited_exceptions.include?(exception.cause) + nil + else + rescue_with_handler(exception.cause, object: object, visited_exceptions: visited_exceptions) + end + end + end + + def handler_for_rescue(exception, object: self) #:nodoc: + case rescuer = find_rescue_handler(exception) + when Symbol + method = object.method(rescuer) + if method.arity == 0 + -> e { method.call } + else + method + end + when Proc + if rescuer.arity == 0 + -> e { object.instance_exec(&rescuer) } + else + -> e { object.instance_exec(e, &rescuer) } + end + end + end + + private + def find_rescue_handler(exception) + if exception + # Handlers are in order of declaration but the most recently declared + # is the highest priority match, so we search for matching handlers + # in reverse. + _, handler = rescue_handlers.reverse_each.detect do |class_or_name, _| + if klass = constantize_rescue_handler_class(class_or_name) + klass === exception + end + end + + handler + end + end + + def constantize_rescue_handler_class(class_or_name) + case class_or_name + when String, Symbol + begin + # Try a lexical lookup first since we support + # + # class Super + # rescue_from 'Error', with: … + # end + # + # class Sub + # class Error < StandardError; end + # end + # + # so an Error raised in Sub will hit the 'Error' handler. + const_get class_or_name + rescue NameError + class_or_name.safe_constantize + end + else + class_or_name + end + end + end + + # Delegates to the class method, but uses the instance as the subject for + # rescue_from handlers (method calls, instance_exec blocks). + def rescue_with_handler(exception) + self.class.rescue_with_handler exception, object: self + end + + # Internal handler lookup. Delegates to class method. Some libraries call + # this directly, so keeping it around for compatibility. + def handler_for_rescue(exception) #:nodoc: + self.class.handler_for_rescue exception, object: self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/secure_compare_rotator.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/secure_compare_rotator.rb new file mode 100644 index 0000000..269703c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/secure_compare_rotator.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/security_utils" +require "active_support/messages/rotator" + +module ActiveSupport + # The ActiveSupport::SecureCompareRotator is a wrapper around +ActiveSupport::SecurityUtils.secure_compare+ + # and allows you to rotate a previously defined value to a new one. + # + # It can be used as follow: + # + # rotator = ActiveSupport::SecureCompareRotator.new('new_production_value') + # rotator.rotate('previous_production_value') + # rotator.secure_compare!('previous_production_value') + # + # One real use case example would be to rotate a basic auth credentials: + # + # class MyController < ApplicationController + # def authenticate_request + # rotator = ActiveSupport::SecureComparerotator.new('new_password') + # rotator.rotate('old_password') + # + # authenticate_or_request_with_http_basic do |username, password| + # rotator.secure_compare!(password) + # rescue ActiveSupport::SecureCompareRotator::InvalidMatch + # false + # end + # end + # end + class SecureCompareRotator + include SecurityUtils + prepend Messages::Rotator + + InvalidMatch = Class.new(StandardError) + + def initialize(value, **_options) + @value = value + end + + def secure_compare!(other_value, on_rotation: @on_rotation) + secure_compare(@value, other_value) || + run_rotations(on_rotation) { |wrapper| wrapper.secure_compare!(other_value) } || + raise(InvalidMatch) + end + + private + def build_rotation(previous_value, _options) + self.class.new(previous_value) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/security_utils.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/security_utils.rb new file mode 100644 index 0000000..aa00474 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/security_utils.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module ActiveSupport + module SecurityUtils + # Constant time string comparison, for fixed length strings. + # + # The values compared should be of fixed length, such as strings + # that have already been processed by HMAC. Raises in case of length mismatch. + + if defined?(OpenSSL.fixed_length_secure_compare) + def fixed_length_secure_compare(a, b) + OpenSSL.fixed_length_secure_compare(a, b) + end + else + def fixed_length_secure_compare(a, b) + raise ArgumentError, "string length mismatch." unless a.bytesize == b.bytesize + + l = a.unpack "C#{a.bytesize}" + + res = 0 + b.each_byte { |byte| res |= byte ^ l.shift } + res == 0 + end + end + module_function :fixed_length_secure_compare + + # Secure string comparison for strings of variable length. + # + # While a timing attack would not be able to discern the content of + # a secret compared via secure_compare, it is possible to determine + # the secret length. This should be considered when using secure_compare + # to compare weak, short secrets to user input. + def secure_compare(a, b) + a.bytesize == b.bytesize && fixed_length_secure_compare(a, b) + end + module_function :secure_compare + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/string_inquirer.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/string_inquirer.rb new file mode 100644 index 0000000..5ff3f4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/string_inquirer.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require "active_support/core_ext/symbol/starts_ends_with" + +module ActiveSupport + # Wrapping a string in this class gives you a prettier way to test + # for equality. The value returned by Rails.env is wrapped + # in a StringInquirer object, so instead of calling this: + # + # Rails.env == 'production' + # + # you can call this: + # + # Rails.env.production? + # + # == Instantiating a new StringInquirer + # + # vehicle = ActiveSupport::StringInquirer.new('car') + # vehicle.car? # => true + # vehicle.bike? # => false + class StringInquirer < String + private + def respond_to_missing?(method_name, include_private = false) + method_name.end_with?("?") || super + end + + def method_missing(method_name, *arguments) + if method_name.end_with?("?") + self == method_name[0..-2] + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/subscriber.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/subscriber.rb new file mode 100644 index 0000000..24f8681 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/subscriber.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true + +require "active_support/per_thread_registry" +require "active_support/notifications" + +module ActiveSupport + # ActiveSupport::Subscriber is an object set to consume + # ActiveSupport::Notifications. The subscriber dispatches notifications to + # a registered object based on its given namespace. + # + # An example would be an Active Record subscriber responsible for collecting + # statistics about queries: + # + # module ActiveRecord + # class StatsSubscriber < ActiveSupport::Subscriber + # attach_to :active_record + # + # def sql(event) + # Statsd.timing("sql.#{event.payload[:name]}", event.duration) + # end + # end + # end + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the +sql+ method. + # + # We can detach a subscriber as well: + # + # ActiveRecord::StatsSubscriber.detach_from(:active_record) + class Subscriber + class << self + # Attach the subscriber to a namespace. + def attach_to(namespace, subscriber = new, notifier = ActiveSupport::Notifications, inherit_all: false) + @namespace = namespace + @subscriber = subscriber + @notifier = notifier + @inherit_all = inherit_all + + subscribers << subscriber + + # Add event subscribers for all existing methods on the class. + fetch_public_methods(subscriber, inherit_all).each do |event| + add_event_subscriber(event) + end + end + + # Detach the subscriber from a namespace. + def detach_from(namespace, notifier = ActiveSupport::Notifications) + @namespace = namespace + @subscriber = find_attached_subscriber + @notifier = notifier + + return unless subscriber + + subscribers.delete(subscriber) + + # Remove event subscribers of all existing methods on the class. + fetch_public_methods(subscriber, true).each do |event| + remove_event_subscriber(event) + end + + # Reset notifier so that event subscribers will not add for new methods added to the class. + @notifier = nil + end + + # Adds event subscribers for all new methods added to the class. + def method_added(event) + # Only public methods are added as subscribers, and only if a notifier + # has been set up. This means that subscribers will only be set up for + # classes that call #attach_to. + if public_method_defined?(event) && notifier + add_event_subscriber(event) + end + end + + def subscribers + @@subscribers ||= [] + end + + private + attr_reader :subscriber, :notifier, :namespace + + def add_event_subscriber(event) # :doc: + return if invalid_event?(event) + + pattern = prepare_pattern(event) + + # Don't add multiple subscribers (e.g. if methods are redefined). + return if pattern_subscribed?(pattern) + + subscriber.patterns[pattern] = notifier.subscribe(pattern, subscriber) + end + + def remove_event_subscriber(event) # :doc: + return if invalid_event?(event) + + pattern = prepare_pattern(event) + + return unless pattern_subscribed?(pattern) + + notifier.unsubscribe(subscriber.patterns[pattern]) + subscriber.patterns.delete(pattern) + end + + def find_attached_subscriber + subscribers.find { |attached_subscriber| attached_subscriber.instance_of?(self) } + end + + def invalid_event?(event) + %i{ start finish }.include?(event.to_sym) + end + + def prepare_pattern(event) + "#{event}.#{namespace}" + end + + def pattern_subscribed?(pattern) + subscriber.patterns.key?(pattern) + end + + def fetch_public_methods(subscriber, inherit_all) + subscriber.public_methods(inherit_all) - Subscriber.public_instance_methods(true) + end + end + + attr_reader :patterns # :nodoc: + + def initialize + @queue_key = [self.class.name, object_id].join "-" + @patterns = {} + super + end + + def start(name, id, payload) + event = ActiveSupport::Notifications::Event.new(name, nil, nil, id, payload) + event.start! + parent = event_stack.last + parent << event if parent + + event_stack.push event + end + + def finish(name, id, payload) + event = event_stack.pop + event.finish! + event.payload.merge!(payload) + + method = name.split(".").first + send(method, event) + end + + private + def event_stack + SubscriberQueueRegistry.instance.get_queue(@queue_key) + end + end + + # This is a registry for all the event stacks kept for subscribers. + # + # See the documentation of ActiveSupport::PerThreadRegistry + # for further details. + class SubscriberQueueRegistry # :nodoc: + extend PerThreadRegistry + + def initialize + @registry = {} + end + + def get_queue(queue_key) + @registry[queue_key] ||= [] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/tagged_logging.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/tagged_logging.rb new file mode 100644 index 0000000..ed55101 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/tagged_logging.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/object/blank" +require "logger" +require "active_support/logger" + +module ActiveSupport + # Wraps any standard Logger object to provide tagging capabilities. + # + # May be called with a block: + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged('BCX') { logger.info 'Stuff' } # Logs "[BCX] Stuff" + # logger.tagged('BCX', "Jason") { logger.info 'Stuff' } # Logs "[BCX] [Jason] Stuff" + # logger.tagged('BCX') { logger.tagged('Jason') { logger.info 'Stuff' } } # Logs "[BCX] [Jason] Stuff" + # + # If called without a block, a new logger will be returned with applied tags: + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged("BCX").info "Stuff" # Logs "[BCX] Stuff" + # logger.tagged("BCX", "Jason").info "Stuff" # Logs "[BCX] [Jason] Stuff" + # logger.tagged("BCX").tagged("Jason").info "Stuff" # Logs "[BCX] [Jason] Stuff" + # + # This is used by the default Rails.logger as configured by Railties to make + # it easy to stamp log lines with subdomains, request ids, and anything else + # to aid debugging of multi-user production applications. + module TaggedLogging + module Formatter # :nodoc: + # This method is invoked when a log event occurs. + def call(severity, timestamp, progname, msg) + super(severity, timestamp, progname, "#{tags_text}#{msg}") + end + + def tagged(*tags) + new_tags = push_tags(*tags) + yield self + ensure + pop_tags(new_tags.size) + end + + def push_tags(*tags) + tags.flatten! + tags.reject!(&:blank?) + current_tags.concat tags + tags + end + + def pop_tags(size = 1) + current_tags.pop size + end + + def clear_tags! + current_tags.clear + end + + def current_tags + # We use our object ID here to avoid conflicting with other instances + thread_key = @thread_key ||= "activesupport_tagged_logging_tags:#{object_id}" + Thread.current[thread_key] ||= [] + end + + def tags_text + tags = current_tags + if tags.one? + "[#{tags[0]}] " + elsif tags.any? + tags.collect { |tag| "[#{tag}] " }.join + end + end + end + + module LocalTagStorage # :nodoc: + attr_accessor :current_tags + + def self.extended(base) + base.current_tags = [] + end + end + + def self.new(logger) + logger = logger.dup + + if logger.formatter + logger.formatter = logger.formatter.dup + else + # Ensure we set a default formatter so we aren't extending nil! + logger.formatter = ActiveSupport::Logger::SimpleFormatter.new + end + + logger.formatter.extend Formatter + logger.extend(self) + end + + delegate :push_tags, :pop_tags, :clear_tags!, to: :formatter + + def tagged(*tags) + if block_given? + formatter.tagged(*tags) { yield self } + else + logger = ActiveSupport::TaggedLogging.new(self) + logger.formatter.extend LocalTagStorage + logger.push_tags(*formatter.current_tags, *tags) + logger + end + end + + def flush + clear_tags! + super if defined?(super) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/test_case.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/test_case.rb new file mode 100644 index 0000000..7be4108 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/test_case.rb @@ -0,0 +1,163 @@ +# frozen_string_literal: true + +gem "minitest" # make sure we get the gem, not stdlib +require "minitest" +require "active_support/testing/tagged_logging" +require "active_support/testing/setup_and_teardown" +require "active_support/testing/assertions" +require "active_support/testing/deprecation" +require "active_support/testing/declarative" +require "active_support/testing/isolation" +require "active_support/testing/constant_lookup" +require "active_support/testing/time_helpers" +require "active_support/testing/file_fixtures" +require "active_support/testing/parallelization" +require "concurrent/utility/processor_counter" + +module ActiveSupport + class TestCase < ::Minitest::Test + Assertion = Minitest::Assertion + + class << self + # Sets the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order = :random # => :random + # + # Valid values are: + # * +:random+ (to run tests in random order) + # * +:parallel+ (to run tests in parallel) + # * +:sorted+ (to run tests alphabetically by method name) + # * +:alpha+ (equivalent to +:sorted+) + def test_order=(new_order) + ActiveSupport.test_order = new_order + end + + # Returns the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order # => :random + # + # Possible values are +:random+, +:parallel+, +:alpha+, +:sorted+. + # Defaults to +:random+. + def test_order + ActiveSupport.test_order ||= :random + end + + # Parallelizes the test suite. + # + # Takes a +workers+ argument that controls how many times the process + # is forked. For each process a new database will be created suffixed + # with the worker number. + # + # test-database-0 + # test-database-1 + # + # If ENV["PARALLEL_WORKERS"] is set the workers argument will be ignored + # and the environment variable will be used instead. This is useful for CI + # environments, or other environments where you may need more workers than + # you do for local testing. + # + # If the number of workers is set to +1+ or fewer, the tests will not be + # parallelized. + # + # If +workers+ is set to +:number_of_processors+, the number of workers will be + # set to the actual core count on the machine you are on. + # + # The default parallelization method is to fork processes. If you'd like to + # use threads instead you can pass with: :threads to the +parallelize+ + # method. Note the threaded parallelization does not create multiple + # database and will not work with system tests at this time. + # + # parallelize(workers: :number_of_processors, with: :threads) + # + # The threaded parallelization uses minitest's parallel executor directly. + # The processes parallelization uses a Ruby DRb server. + def parallelize(workers: :number_of_processors, with: :processes) + workers = Concurrent.physical_processor_count if workers == :number_of_processors + workers = ENV["PARALLEL_WORKERS"].to_i if ENV["PARALLEL_WORKERS"] + + return if workers <= 1 + + executor = case with + when :processes + Testing::Parallelization.new(workers) + when :threads + Minitest::Parallel::Executor.new(workers) + else + raise ArgumentError, "#{with} is not a supported parallelization executor." + end + + self.lock_threads = false if defined?(self.lock_threads) && with == :threads + + Minitest.parallel_executor = executor + + parallelize_me! + end + + # Set up hook for parallel testing. This can be used if you have multiple + # databases or any behavior that needs to be run after the process is forked + # but before the tests run. + # + # Note: this feature is not available with the threaded parallelization. + # + # In your +test_helper.rb+ add the following: + # + # class ActiveSupport::TestCase + # parallelize_setup do + # # create databases + # end + # end + def parallelize_setup(&block) + ActiveSupport::Testing::Parallelization.after_fork_hook do |worker| + yield worker + end + end + + # Clean up hook for parallel testing. This can be used to drop databases + # if your app uses multiple write/read databases or other clean up before + # the tests finish. This runs before the forked process is closed. + # + # Note: this feature is not available with the threaded parallelization. + # + # In your +test_helper.rb+ add the following: + # + # class ActiveSupport::TestCase + # parallelize_teardown do + # # drop databases + # end + # end + def parallelize_teardown(&block) + ActiveSupport::Testing::Parallelization.run_cleanup_hook do |worker| + yield worker + end + end + end + + alias_method :method_name, :name + + include ActiveSupport::Testing::TaggedLogging + prepend ActiveSupport::Testing::SetupAndTeardown + include ActiveSupport::Testing::Assertions + include ActiveSupport::Testing::Deprecation + include ActiveSupport::Testing::TimeHelpers + include ActiveSupport::Testing::FileFixtures + extend ActiveSupport::Testing::Declarative + + # test/unit backwards compatibility methods + alias :assert_raise :assert_raises + alias :assert_not_empty :refute_empty + alias :assert_not_equal :refute_equal + alias :assert_not_in_delta :refute_in_delta + alias :assert_not_in_epsilon :refute_in_epsilon + alias :assert_not_includes :refute_includes + alias :assert_not_instance_of :refute_instance_of + alias :assert_not_kind_of :refute_kind_of + alias :assert_no_match :refute_match + alias :assert_not_nil :refute_nil + alias :assert_not_operator :refute_operator + alias :assert_not_predicate :refute_predicate + alias :assert_not_respond_to :refute_respond_to + alias :assert_not_same :refute_same + + ActiveSupport.run_load_hooks(:active_support_test_case, self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/assertions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/assertions.rb new file mode 100644 index 0000000..d564cad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/assertions.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +require "active_support/core_ext/enumerable" + +module ActiveSupport + module Testing + module Assertions + UNTRACKED = Object.new # :nodoc: + + # Asserts that an expression is not truthy. Passes if object is + # +nil+ or +false+. "Truthy" means "considered true in a conditional" + # like if foo. + # + # assert_not nil # => true + # assert_not false # => true + # assert_not 'foo' # => Expected "foo" to be nil or false + # + # An error message can be specified. + # + # assert_not foo, 'foo should be false' + def assert_not(object, message = nil) + message ||= "Expected #{mu_pp(object)} to be nil or false" + assert !object, message + end + + # Assertion that the block should not raise an exception. + # + # Passes if evaluated code in the yielded block raises no exception. + # + # assert_nothing_raised do + # perform_service(param: 'no_exception') + # end + def assert_nothing_raised + yield + rescue => error + raise Minitest::UnexpectedError.new(error) + end + + # Test numeric difference between the return value of an expression as a + # result of what is evaluated in the yielded block. + # + # assert_difference 'Article.count' do + # post :create, params: { article: {...} } + # end + # + # An arbitrary expression is passed in and evaluated. + # + # assert_difference 'Article.last.comments(:reload).size' do + # post :create, params: { comment: {...} } + # end + # + # An arbitrary positive or negative difference can be specified. + # The default is 1. + # + # assert_difference 'Article.count', -1 do + # post :delete, params: { id: ... } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_difference [ 'Article.count', 'Post.count' ], 2 do + # post :create, params: { article: {...} } + # end + # + # A hash of expressions/numeric differences can also be passed in and evaluated. + # + # assert_difference ->{ Article.count } => 1, ->{ Notification.count } => 2 do + # post :create, params: { article: {...} } + # end + # + # A lambda or a list of lambdas can be passed in and evaluated: + # + # assert_difference ->{ Article.count }, 2 do + # post :create, params: { article: {...} } + # end + # + # assert_difference [->{ Article.count }, ->{ Post.count }], 2 do + # post :create, params: { article: {...} } + # end + # + # An error message can be specified. + # + # assert_difference 'Article.count', -1, 'An Article should be destroyed' do + # post :delete, params: { id: ... } + # end + def assert_difference(expression, *args, &block) + expressions = + if expression.is_a?(Hash) + message = args[0] + expression + else + difference = args[0] || 1 + message = args[1] + Array(expression).index_with(difference) + end + + exps = expressions.keys.map { |e| + e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } + } + before = exps.map(&:call) + + retval = assert_nothing_raised(&block) + + expressions.zip(exps, before) do |(code, diff), exp, before_value| + error = "#{code.inspect} didn't change by #{diff}" + error = "#{message}.\n#{error}" if message + assert_equal(before_value + diff, exp.call, error) + end + + retval + end + + # Assertion that the numeric result of evaluating an expression is not + # changed before and after invoking the passed in block. + # + # assert_no_difference 'Article.count' do + # post :create, params: { article: invalid_attributes } + # end + # + # A lambda can be passed in and evaluated. + # + # assert_no_difference -> { Article.count } do + # post :create, params: { article: invalid_attributes } + # end + # + # An error message can be specified. + # + # assert_no_difference 'Article.count', 'An Article should not be created' do + # post :create, params: { article: invalid_attributes } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_no_difference [ 'Article.count', -> { Post.count } ] do + # post :create, params: { article: invalid_attributes } + # end + def assert_no_difference(expression, message = nil, &block) + assert_difference expression, 0, message, &block + end + + # Assertion that the result of evaluating an expression is changed before + # and after invoking the passed in block. + # + # assert_changes 'Status.all_good?' do + # post :create, params: { status: { ok: false } } + # end + # + # You can pass the block as a string to be evaluated in the context of + # the block. A lambda can be passed for the block as well. + # + # assert_changes -> { Status.all_good? } do + # post :create, params: { status: { ok: false } } + # end + # + # The assertion is useful to test side effects. The passed block can be + # anything that can be converted to string with #to_s. + # + # assert_changes :@object do + # @object = 42 + # end + # + # The keyword arguments :from and :to can be given to specify the + # expected initial value and the expected value after the block was + # executed. + # + # assert_changes :@object, from: nil, to: :foo do + # @object = :foo + # end + # + # An error message can be specified. + # + # assert_changes -> { Status.all_good? }, 'Expected the status to be bad' do + # post :create, params: { status: { incident: true } } + # end + def assert_changes(expression, message = nil, from: UNTRACKED, to: UNTRACKED, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = assert_nothing_raised(&block) + + unless from == UNTRACKED + error = "Expected change from #{from.inspect}" + error = "#{message}.\n#{error}" if message + assert from === before, error + end + + after = exp.call + + error = "#{expression.inspect} didn't change" + error = "#{error}. It was already #{to}" if before == to + error = "#{message}.\n#{error}" if message + assert_not_equal before, after, error + + unless to == UNTRACKED + error = "Expected change to #{to}\n" + error = "#{message}.\n#{error}" if message + assert to === after, error + end + + retval + end + + # Assertion that the result of evaluating an expression is not changed before + # and after invoking the passed in block. + # + # assert_no_changes 'Status.all_good?' do + # post :create, params: { status: { ok: true } } + # end + # + # An error message can be specified. + # + # assert_no_changes -> { Status.all_good? }, 'Expected the status to be good' do + # post :create, params: { status: { ok: false } } + # end + def assert_no_changes(expression, message = nil, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = assert_nothing_raised(&block) + after = exp.call + + error = "#{expression.inspect} changed" + error = "#{message}.\n#{error}" if message + + if before.nil? + assert_nil after, error + else + assert_equal before, after, error + end + + retval + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/autorun.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/autorun.rb new file mode 100644 index 0000000..889b416 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/autorun.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +gem "minitest" + +require "minitest" + +Minitest.autorun diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/constant_lookup.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/constant_lookup.rb new file mode 100644 index 0000000..51167e9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/constant_lookup.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/inflector" + +module ActiveSupport + module Testing + # Resolves a constant from a minitest spec name. + # + # Given the following spec-style test: + # + # describe WidgetsController, :index do + # describe "authenticated user" do + # describe "returns widgets" do + # it "has a controller that exists" do + # assert_kind_of WidgetsController, @controller + # end + # end + # end + # end + # + # The test will have the following name: + # + # "WidgetsController::index::authenticated user::returns widgets" + # + # The constant WidgetsController can be resolved from the name. + # The following code will resolve the constant: + # + # controller = determine_constant_from_test_name(name) do |constant| + # Class === constant && constant < ::ActionController::Metal + # end + module ConstantLookup + extend ::ActiveSupport::Concern + + module ClassMethods # :nodoc: + def determine_constant_from_test_name(test_name) + names = test_name.split "::" + while names.size > 0 do + names.last.sub!(/Test$/, "") + begin + constant = names.join("::").safe_constantize + break(constant) if yield(constant) + ensure + names.pop + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/declarative.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/declarative.rb new file mode 100644 index 0000000..7c34036 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/declarative.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Declarative + unless defined?(Spec) + # Helper to define a test method using a String. Under the hood, it replaces + # spaces with underscores and defines the test method. + # + # test "verify something" do + # ... + # end + def test(name, &block) + test_name = "test_#{name.gsub(/\s+/, '_')}".to_sym + defined = method_defined? test_name + raise "#{test_name} is already defined in #{self}" if defined + if block_given? + define_method(test_name, &block) + else + define_method(test_name) do + flunk "No implementation provided for #{name}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/deprecation.rb new file mode 100644 index 0000000..18d63d2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/deprecation.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/deprecation" + +module ActiveSupport + module Testing + module Deprecation #:nodoc: + def assert_deprecated(match = nil, deprecator = nil, &block) + result, warnings = collect_deprecations(deprecator, &block) + assert !warnings.empty?, "Expected a deprecation warning within the block but received none" + if match + match = Regexp.new(Regexp.escape(match)) unless match.is_a?(Regexp) + assert warnings.any? { |w| match.match?(w) }, "No deprecation warning matched #{match}: #{warnings.join(', ')}" + end + result + end + + def assert_not_deprecated(deprecator = nil, &block) + result, deprecations = collect_deprecations(deprecator, &block) + assert deprecations.empty?, "Expected no deprecation warning within the block but received #{deprecations.size}: \n #{deprecations * "\n "}" + result + end + + def collect_deprecations(deprecator = nil) + deprecator ||= ActiveSupport::Deprecation + old_behavior = deprecator.behavior + deprecations = [] + deprecator.behavior = Proc.new do |message, callstack| + deprecations << message + end + result = yield + [result, deprecations] + ensure + deprecator.behavior = old_behavior + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/file_fixtures.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/file_fixtures.rb new file mode 100644 index 0000000..4eb7a88 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/file_fixtures.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/concern" + +module ActiveSupport + module Testing + # Adds simple access to sample files called file fixtures. + # File fixtures are normal files stored in + # ActiveSupport::TestCase.file_fixture_path. + # + # File fixtures are represented as +Pathname+ objects. + # This makes it easy to extract specific information: + # + # file_fixture("example.txt").read # get the file's content + # file_fixture("example.mp3").size # get the file size + module FileFixtures + extend ActiveSupport::Concern + + included do + class_attribute :file_fixture_path, instance_writer: false + end + + # Returns a +Pathname+ to the fixture file named +fixture_name+. + # + # Raises +ArgumentError+ if +fixture_name+ can't be found. + def file_fixture(fixture_name) + path = Pathname.new(File.join(file_fixture_path, fixture_name)) + + if path.exist? + path + else + msg = "the directory '%s' does not contain a file named '%s'" + raise ArgumentError, msg % [file_fixture_path, fixture_name] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/isolation.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/isolation.rb new file mode 100644 index 0000000..652a10d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/isolation.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Isolation + require "thread" + + def self.included(klass) #:nodoc: + klass.class_eval do + parallelize_me! + end + end + + def self.forking_env? + !ENV["NO_FORK"] && Process.respond_to?(:fork) + end + + def run + serialized = run_in_isolation do + super + end + + Marshal.load(serialized) + end + + module Forking + def run_in_isolation(&blk) + read, write = IO.pipe + read.binmode + write.binmode + + pid = fork do + read.close + yield + begin + if error? + failures.map! { |e| + begin + Marshal.dump e + e + rescue TypeError + ex = Exception.new e.message + ex.set_backtrace e.backtrace + Minitest::UnexpectedError.new ex + end + } + end + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + result = Marshal.dump(test_result) + end + + write.puts [result].pack("m") + exit! + end + + write.close + result = read.read + Process.wait2(pid) + result.unpack1("m") + end + end + + module Subprocess + ORIG_ARGV = ARGV.dup unless defined?(ORIG_ARGV) + + # Crazy H4X to get this working in windows / jruby with + # no forking. + def run_in_isolation(&blk) + require "tempfile" + + if ENV["ISOLATION_TEST"] + yield + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + File.open(ENV["ISOLATION_OUTPUT"], "w") do |file| + file.puts [Marshal.dump(test_result)].pack("m") + end + exit! + else + Tempfile.open("isolation") do |tmpfile| + env = { + "ISOLATION_TEST" => self.class.name, + "ISOLATION_OUTPUT" => tmpfile.path + } + + test_opts = "-n#{self.class.name}##{name}" + + load_path_args = [] + $-I.each do |p| + load_path_args << "-I" + load_path_args << File.expand_path(p) + end + + child = IO.popen([env, Gem.ruby, *load_path_args, $0, *ORIG_ARGV, test_opts]) + + begin + Process.wait(child.pid) + rescue Errno::ECHILD # The child process may exit before we wait + nil + end + + return tmpfile.read.unpack1("m") + end + end + end + end + + include forking_env? ? Forking : Subprocess + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/method_call_assertions.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/method_call_assertions.rb new file mode 100644 index 0000000..03c38be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/method_call_assertions.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require "minitest/mock" + +module ActiveSupport + module Testing + module MethodCallAssertions # :nodoc: + private + def assert_called(object, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + + object.stub(method_name, proc { times_called += 1; returns }) { yield } + + error = "Expected #{method_name} to be called #{times} times, " \ + "but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + assert_equal times, times_called, error + end + + def assert_called_with(object, method_name, args, returns: nil) + mock = Minitest::Mock.new + + if args.all? { |arg| arg.is_a?(Array) } + args.each { |arg| mock.expect(:call, returns, arg) } + else + mock.expect(:call, returns, args) + end + + object.stub(method_name, mock) { yield } + + mock.verify + end + + def assert_not_called(object, method_name, message = nil, &block) + assert_called(object, method_name, message, times: 0, &block) + end + + def assert_called_on_instance_of(klass, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + klass.define_method("stubbed_#{method_name}") do |*| + times_called += 1 + + returns + end + + klass.alias_method "original_#{method_name}", method_name + klass.alias_method method_name, "stubbed_#{method_name}" + + yield + + error = "Expected #{method_name} to be called #{times} times, but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + + assert_equal times, times_called, error + ensure + klass.alias_method method_name, "original_#{method_name}" + klass.undef_method "original_#{method_name}" + klass.undef_method "stubbed_#{method_name}" + end + + def assert_not_called_on_instance_of(klass, method_name, message = nil, &block) + assert_called_on_instance_of(klass, method_name, message, times: 0, &block) + end + + def stub_any_instance(klass, instance: klass.new) + klass.stub(:new, instance) { yield instance } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization.rb new file mode 100644 index 0000000..fc6e689 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "drb" +require "drb/unix" unless Gem.win_platform? +require "active_support/core_ext/module/attribute_accessors" +require "active_support/testing/parallelization/server" +require "active_support/testing/parallelization/worker" + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + @@after_fork_hooks = [] + + def self.after_fork_hook(&blk) + @@after_fork_hooks << blk + end + + cattr_reader :after_fork_hooks + + @@run_cleanup_hooks = [] + + def self.run_cleanup_hook(&blk) + @@run_cleanup_hooks << blk + end + + cattr_reader :run_cleanup_hooks + + def initialize(worker_count) + @worker_count = worker_count + @queue_server = Server.new + @worker_pool = [] + @url = DRb.start_service("drbunix:", @queue_server).uri + end + + def start + @worker_pool = @worker_count.times.map do |worker| + Worker.new(worker, @url).start + end + end + + def <<(work) + @queue_server << work + end + + def shutdown + @queue_server.shutdown + @worker_pool.each { |pid| Process.waitpid pid } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/server.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/server.rb new file mode 100644 index 0000000..492101e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/server.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require "drb" +require "drb/unix" unless Gem.win_platform? + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + class Server + include DRb::DRbUndumped + + def initialize + @queue = Queue.new + @active_workers = Concurrent::Map.new + @in_flight = Concurrent::Map.new + end + + def record(reporter, result) + raise DRb::DRbConnError if result.is_a?(DRb::DRbUnknown) + + @in_flight.delete([result.klass, result.name]) + + reporter.synchronize do + reporter.record(result) + end + end + + def <<(o) + o[2] = DRbObject.new(o[2]) if o + @queue << o + end + + def pop + if test = @queue.pop + @in_flight[[test[0].to_s, test[1]]] = test + test + end + end + + def start_worker(worker_id) + @active_workers[worker_id] = true + end + + def stop_worker(worker_id) + @active_workers.delete(worker_id) + end + + def active_workers? + @active_workers.size > 0 + end + + def shutdown + # Wait for initial queue to drain + while @queue.length != 0 + sleep 0.1 + end + + @queue.close + + # Wait until all workers have finished + while active_workers? + sleep 0.1 + end + + @in_flight.values.each do |(klass, name, reporter)| + result = Minitest::Result.from(klass.new(name)) + error = RuntimeError.new("result not reported") + error.set_backtrace([""]) + result.failures << Minitest::UnexpectedError.new(error) + reporter.synchronize do + reporter.record(result) + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/worker.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/worker.rb new file mode 100644 index 0000000..60c504a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/parallelization/worker.rb @@ -0,0 +1,100 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + class Worker + def initialize(number, url) + @id = SecureRandom.uuid + @number = number + @url = url + @setup_exception = nil + end + + def start + fork do + set_process_title("(starting)") + + DRb.stop_service + + @queue = DRbObject.new_with_uri(@url) + @queue.start_worker(@id) + + begin + after_fork + rescue => @setup_exception; end + + work_from_queue + ensure + set_process_title("(stopping)") + + run_cleanup + @queue.stop_worker(@id) + end + end + + def work_from_queue + while job = @queue.pop + perform_job(job) + end + end + + def perform_job(job) + klass = job[0] + method = job[1] + reporter = job[2] + + set_process_title("#{klass}##{method}") + + result = klass.with_info_handler reporter do + Minitest.run_one_method(klass, method) + end + + safe_record(reporter, result) + end + + def safe_record(reporter, result) + add_setup_exception(result) if @setup_exception + + begin + @queue.record(reporter, result) + rescue DRb::DRbConnError + result.failures.map! do |failure| + if failure.respond_to?(:error) + # minitest >5.14.0 + error = DRb::DRbRemoteError.new(failure.error) + else + error = DRb::DRbRemoteError.new(failure.exception) + end + Minitest::UnexpectedError.new(error) + end + @queue.record(reporter, result) + end + + set_process_title("(idle)") + end + + def after_fork + Parallelization.after_fork_hooks.each do |cb| + cb.call(@number) + end + end + + def run_cleanup + Parallelization.run_cleanup_hooks.each do |cb| + cb.call(@number) + end + end + + private + def add_setup_exception(result) + result.failures.prepend Minitest::UnexpectedError.new(@setup_exception) + end + + def set_process_title(status) + Process.setproctitle("Rails test worker #{@number} - #{status}") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/setup_and_teardown.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/setup_and_teardown.rb new file mode 100644 index 0000000..35321cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/setup_and_teardown.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + module Testing + # Adds support for +setup+ and +teardown+ callbacks. + # These callbacks serve as a replacement to overwriting the + # #setup and #teardown methods of your TestCase. + # + # class ExampleTest < ActiveSupport::TestCase + # setup do + # # ... + # end + # + # teardown do + # # ... + # end + # end + module SetupAndTeardown + def self.prepended(klass) + klass.include ActiveSupport::Callbacks + klass.define_callbacks :setup, :teardown + klass.extend ClassMethods + end + + module ClassMethods + # Add a callback, which runs before TestCase#setup. + def setup(*args, &block) + set_callback(:setup, :before, *args, &block) + end + + # Add a callback, which runs after TestCase#teardown. + def teardown(*args, &block) + set_callback(:teardown, :after, *args, &block) + end + end + + def before_setup # :nodoc: + super + run_callbacks :setup + end + + def after_teardown # :nodoc: + begin + run_callbacks :teardown + rescue => e + self.failures << Minitest::UnexpectedError.new(e) + end + + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/stream.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/stream.rb new file mode 100644 index 0000000..f895a74 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/stream.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Stream #:nodoc: + private + def silence_stream(stream) + old_stream = stream.dup + stream.reopen(IO::NULL) + stream.sync = true + yield + ensure + stream.reopen(old_stream) + old_stream.close + end + + def quietly + silence_stream(STDOUT) do + silence_stream(STDERR) do + yield + end + end + end + + def capture(stream) + stream = stream.to_s + captured_stream = Tempfile.new(stream) + stream_io = eval("$#{stream}") + origin_stream = stream_io.dup + stream_io.reopen(captured_stream) + + yield + + stream_io.rewind + captured_stream.read + ensure + captured_stream.close + captured_stream.unlink + stream_io.reopen(origin_stream) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/tagged_logging.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/tagged_logging.rb new file mode 100644 index 0000000..9ca50c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/tagged_logging.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Logs a "PostsControllerTest: test name" heading before each test to + # make test.log easier to search and follow along with. + module TaggedLogging #:nodoc: + attr_writer :tagged_logger + + def before_setup + if tagged_logger && tagged_logger.info? + heading = "#{self.class}: #{name}" + divider = "-" * heading.size + tagged_logger.info divider + tagged_logger.info heading + tagged_logger.info divider + end + super + end + + private + def tagged_logger + @tagged_logger ||= (defined?(Rails.logger) && Rails.logger) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/time_helpers.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/time_helpers.rb new file mode 100644 index 0000000..ec97381 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/testing/time_helpers.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/time/calculations" +require "concurrent/map" + +module ActiveSupport + module Testing + # Manages stubs for TimeHelpers + class SimpleStubs # :nodoc: + Stub = Struct.new(:object, :method_name, :original_method) + + def initialize + @stubs = Concurrent::Map.new { |h, k| h[k] = {} } + end + + # Stubs object.method_name with the given block + # If the method is already stubbed, remove that stub + # so that removing this stub will restore the original implementation. + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # target = Time.zone.local(2004, 11, 24, 1, 4, 44) + # simple_stubs.stub_object(Time, :now) { at(target.to_i) } + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + def stub_object(object, method_name, &block) + if stub = stubbing(object, method_name) + unstub_object(stub) + end + + new_name = "__simple_stub__#{method_name}" + + @stubs[object.object_id][method_name] = Stub.new(object, method_name, new_name) + + object.singleton_class.alias_method new_name, method_name + object.define_singleton_method(method_name, &block) + end + + # Remove all object-method stubs held by this instance + def unstub_all! + @stubs.each_value do |object_stubs| + object_stubs.each_value do |stub| + unstub_object(stub) + end + end + @stubs.clear + end + + # Returns the Stub for object#method_name + # (nil if it is not stubbed) + def stubbing(object, method_name) + @stubs[object.object_id][method_name] + end + + # Returns true if any stubs are set, false if there are none + def stubbed? + !@stubs.empty? + end + + private + # Restores the original object.method described by the Stub + def unstub_object(stub) + singleton_class = stub.object.singleton_class + singleton_class.silence_redefinition_of_method stub.method_name + singleton_class.alias_method stub.method_name, stub.original_method + singleton_class.undef_method stub.original_method + end + end + + # Contains helpers that help you test passage of time. + module TimeHelpers + def after_teardown + travel_back + super + end + + # Changes current time to the time in the future or in the past by a given time difference by + # stubbing +Time.now+, +Date.today+, and +DateTime.now+. The stubs are automatically removed + # at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day + # Time.current # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # Date.current # => Sun, 10 Nov 2013 + # DateTime.current # => Sun, 10 Nov 2013 15:34:49 -0500 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day do + # User.create.created_at # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel(duration, &block) + travel_to Time.now + duration, &block + end + + # Changes current time to the given time by stubbing +Time.now+, + # +Date.today+, and +DateTime.now+ to return the time or date passed into this method. + # The stubs are automatically removed at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # Date.current # => Wed, 24 Nov 2004 + # DateTime.current # => Wed, 24 Nov 2004 01:04:44 -0500 + # + # Dates are taken as their timestamp at the beginning of the day in the + # application time zone. Time.current returns said timestamp, + # and Time.now its equivalent in the system time zone. Similarly, + # Date.current returns a date equal to the argument, and + # Date.today the date according to Time.now, which may + # be different. (Note that you rarely want to deal with Time.now, + # or Date.today, in order to honor the application time zone + # please always use Time.current and Date.current.) + # + # Note that the usec for the time passed will be set to 0 to prevent rounding + # errors with external services, like MySQL (which will round instead of floor, + # leading to off-by-one-second errors). + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) do + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_to(date_or_time) + if block_given? && simple_stubs.stubbing(Time, :now) + travel_to_nested_block_call = <<~MSG + + Calling `travel_to` with a block, when we have previously already made a call to `travel_to`, can lead to confusing time stubbing. + + Instead of: + + travel_to 2.days.from_now do + # 2 days from today + travel_to 3.days.from_now do + # 5 days from today + end + end + + preferred way to achieve above is: + + travel 2.days do + # 2 days from today + end + + travel 5.days do + # 5 days from today + end + + MSG + raise travel_to_nested_block_call + end + + if date_or_time.is_a?(Date) && !date_or_time.is_a?(DateTime) + now = date_or_time.midnight.to_time + else + now = date_or_time.to_time.change(usec: 0) + end + + simple_stubs.stub_object(Time, :now) { at(now.to_i) } + simple_stubs.stub_object(Date, :today) { jd(now.to_date.jd) } + simple_stubs.stub_object(DateTime, :now) { jd(now.to_date.jd, now.hour, now.min, now.sec, Rational(now.utc_offset, 86400)) } + + if block_given? + begin + yield + ensure + travel_back + end + end + end + + # Returns the current time back to its original state, by removing the stubs added by + # +travel+, +travel_to+, and +freeze_time+. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # + # travel_back + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # This method also accepts a block, which brings the stubs back at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # + # travel_back do + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # end + # + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + def travel_back + stubbed_time = Time.current if block_given? && simple_stubs.stubbed? + + simple_stubs.unstub_all! + yield if block_given? + ensure + travel_to stubbed_time if stubbed_time + end + alias_method :unfreeze_time, :travel_back + + # Calls +travel_to+ with +Time.now+. + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time + # sleep(1) + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time do + # sleep(1) + # User.create.created_at # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # end + # Time.current # => Sun, 09 Jul 2017 15:34:50 EST -05:00 + def freeze_time(&block) + travel_to Time.now, &block + end + + private + def simple_stubs + @simple_stubs ||= SimpleStubs.new + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time.rb new file mode 100644 index 0000000..5185467 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + autoload :Duration, "active_support/duration" + autoload :TimeWithZone, "active_support/time_with_zone" + autoload :TimeZone, "active_support/values/time_zone" +end + +require "date" +require "time" + +require "active_support/core_ext/time" +require "active_support/core_ext/date" +require "active_support/core_ext/date_time" + +require "active_support/core_ext/integer/time" +require "active_support/core_ext/numeric/time" + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time_with_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time_with_zone.rb new file mode 100644 index 0000000..03d49d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/time_with_zone.rb @@ -0,0 +1,585 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/values/time_zone" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + # A Time-like class that can represent a time in any time zone. Necessary + # because standard Ruby Time instances are limited to UTC and the + # system's ENV['TZ'] zone. + # + # You shouldn't ever need to create a TimeWithZone instance directly via +new+. + # Instead use methods +local+, +parse+, +at+ and +now+ on TimeZone instances, + # and +in_time_zone+ on Time and DateTime instances. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.local(2007, 2, 10, 15, 30, 45) # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.parse('2007-02-10 15:30:45') # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.at(1171139445) # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.now # => Sun, 18 May 2008 13:07:55.754107581 EDT -04:00 + # Time.utc(2007, 2, 10, 20, 30, 45).in_time_zone # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # + # See Time and TimeZone for further documentation of these methods. + # + # TimeWithZone instances implement the same API as Ruby Time instances, so + # that Time and TimeWithZone instances are interchangeable. + # + # t = Time.zone.now # => Sun, 18 May 2008 13:27:25.031505668 EDT -04:00 + # t.hour # => 13 + # t.dst? # => true + # t.utc_offset # => -14400 + # t.zone # => "EDT" + # t.to_s(:rfc822) # => "Sun, 18 May 2008 13:27:25 -0400" + # t + 1.day # => Mon, 19 May 2008 13:27:25.031505668 EDT -04:00 + # t.beginning_of_year # => Tue, 01 Jan 2008 00:00:00.000000000 EST -05:00 + # t > Time.utc(1999) # => true + # t.is_a?(Time) # => true + # t.is_a?(ActiveSupport::TimeWithZone) # => true + class TimeWithZone + # Report class name as 'Time' to thwart type checking. + def self.name + "Time" + end + + PRECISIONS = Hash.new { |h, n| h[n] = "%FT%T.%#{n}N" } + PRECISIONS[0] = "%FT%T" + + include Comparable, DateAndTime::Compatibility + attr_reader :time_zone + + def initialize(utc_time, time_zone, local_time = nil, period = nil) + @utc = utc_time ? transfer_time_values_to_utc_constructor(utc_time) : nil + @time_zone, @time = time_zone, local_time + @period = @utc ? period : get_period_and_ensure_valid_local_time(period) + end + + # Returns a Time instance that represents the time in +time_zone+. + def time + @time ||= incorporate_utc_offset(@utc, utc_offset) + end + + # Returns a Time instance of the simultaneous time in the UTC timezone. + def utc + @utc ||= incorporate_utc_offset(@time, -utc_offset) + end + alias_method :comparable_time, :utc + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns the underlying TZInfo::TimezonePeriod. + def period + @period ||= time_zone.period_for_utc(@utc) + end + + # Returns the simultaneous time in Time.zone, or the specified zone. + def in_time_zone(new_zone = ::Time.zone) + return self if time_zone == new_zone + utc.in_time_zone(new_zone) + end + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc.getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns true if the current time is within Daylight Savings Time for the + # specified time zone. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.parse("2012-5-30").dst? # => true + # Time.zone.parse("2012-11-30").dst? # => false + def dst? + period.dst? + end + alias_method :isdst, :dst? + + # Returns true if the current time zone is set to UTC. + # + # Time.zone = 'UTC' # => 'UTC' + # Time.zone.now.utc? # => true + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.now.utc? # => false + def utc? + zone == "UTC" || zone == "UCT" + end + alias_method :gmt?, :utc? + + # Returns the offset from current time to UTC time in seconds. + def utc_offset + period.observed_utc_offset + end + alias_method :gmt_offset, :utc_offset + alias_method :gmtoff, :utc_offset + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.formatted_offset(true) # => "-05:00" + # Time.zone.now.formatted_offset(false) # => "-0500" + # Time.zone = 'UTC' # => "UTC" + # Time.zone.now.formatted_offset(true, "0") # => "0" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Returns the time zone abbreviation. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.zone # => "EST" + def zone + period.abbreviation + end + + # Returns a string of the object's date, time, zone, and offset from UTC. + # + # Time.zone.now.inspect # => "Thu, 04 Dec 2014 11:00:25.624541392 EST -05:00" + def inspect + "#{time.strftime('%a, %d %b %Y %H:%M:%S.%9N')} #{zone} #{formatted_offset}" + end + + # Returns a string of the object's date and time in the ISO 8601 standard + # format. + # + # Time.zone.now.xmlschema # => "2014-12-04T11:02:37-05:00" + def xmlschema(fraction_digits = 0) + "#{time.strftime(PRECISIONS[fraction_digits.to_i])}#{formatted_offset(true, 'Z')}" + end + alias_method :iso8601, :xmlschema + alias_method :rfc3339, :xmlschema + + # Coerces time to a string for JSON encoding. The default format is ISO 8601. + # You can get %Y/%m/%d %H:%M:%S +offset style by setting + # ActiveSupport::JSON::Encoding.use_standard_json_time_format + # to +false+. + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = true + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005-02-01T05:15:10.000-10:00" + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = false + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005/02/01 05:15:10 -1000" + def as_json(options = nil) + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{time.strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end + + def init_with(coder) #:nodoc: + initialize(coder["utc"], coder["zone"], coder["time"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:ActiveSupport::TimeWithZone" + coder.map = { "utc" => utc, "zone" => time_zone, "time" => time } + end + + # Returns a string of the object's date and time in the format used by + # HTTP requests. + # + # Time.zone.now.httpdate # => "Tue, 01 Jan 2013 04:39:43 GMT" + def httpdate + utc.httpdate + end + + # Returns a string of the object's date and time in the RFC 2822 standard + # format. + # + # Time.zone.now.rfc2822 # => "Tue, 01 Jan 2013 04:51:39 +0000" + def rfc2822 + to_s(:rfc822) + end + alias_method :rfc822, :rfc2822 + + # Returns a string of the object's date and time. + # Accepts an optional format: + # * :default - default value, mimics Ruby Time#to_s format. + # * :db - format outputs time in UTC :db time. See Time#to_formatted_s(:db). + # * Any key in Time::DATE_FORMATS can be used. See active_support/core_ext/time/conversions.rb. + def to_s(format = :default) + if format == :db + utc.to_s(format) + elsif formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" # mimicking Ruby Time#to_s format + end + end + alias_method :to_formatted_s, :to_s + + # Replaces %Z directive with +zone before passing to Time#strftime, + # so that zone information is correct. + def strftime(format) + format = format.gsub(/((?:\A|[^%])(?:%%)*)%Z/, "\\1#{zone}") + getlocal(utc_offset).strftime(format) + end + + # Use the time in UTC for comparisons. + def <=>(other) + utc <=> other + end + alias_method :before?, :< + alias_method :after?, :> + + # Returns true if the current object's time is within the specified + # +min+ and +max+ time. + def between?(min, max) + utc.between?(min, max) + end + + # Returns true if the current object's time is in the past. + def past? + utc.past? + end + + # Returns true if the current object's time falls within + # the current day. + def today? + time.today? + end + + # Returns true if the current object's time falls within + # the next day (tomorrow). + def tomorrow? + time.tomorrow? + end + alias :next_day? :tomorrow? + + # Returns true if the current object's time falls within + # the previous day (yesterday). + def yesterday? + time.yesterday? + end + alias :prev_day? :yesterday? + + # Returns true if the current object's time is in the future. + def future? + utc.future? + end + + # Returns +true+ if +other+ is equal to current object. + def eql?(other) + other.eql?(utc) + end + + def hash + utc.hash + end + + # Adds an interval of time to the current object's time and returns that + # value as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now + 1000 # => Sun, 02 Nov 2014 01:43:08.725182881 EDT -04:00 + # + # If we're adding a Duration of variable length (i.e., years, months, days), + # move forward from #time, otherwise move forward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time + 24.hours will advance exactly 24 hours, while a + # time + 1.day will advance 23-25 hours, depending on the day. + # + # now + 24.hours # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now + 1.day # => Mon, 03 Nov 2014 01:26:28.725182881 EST -05:00 + def +(other) + if duration_of_variable_length?(other) + method_missing(:+, other) + else + result = utc.acts_like?(:date) ? utc.since(other) : utc + other rescue utc.since(other) + result.in_time_zone(time_zone) + end + end + alias_method :since, :+ + alias_method :in, :+ + + # Subtracts an interval of time and returns a new TimeWithZone object unless + # the other value +acts_like?+ time. Then it will return a Float of the difference + # between the two times that represents the difference between the current + # object's time and the +other+ time. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now - 1000 # => Mon, 03 Nov 2014 00:09:48.725182881 EST -05:00 + # + # If subtracting a Duration of variable length (i.e., years, months, days), + # move backward from #time, otherwise move backward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time - 24.hours will go subtract exactly 24 hours, while a + # time - 1.day will subtract 23-25 hours, depending on the day. + # + # now - 24.hours # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now - 1.day # => Sun, 02 Nov 2014 00:26:28.725182881 EDT -04:00 + # + # If both the TimeWithZone object and the other value act like Time, a Float + # will be returned. + # + # Time.zone.now - 1.day.ago # => 86399.999967 + # + def -(other) + if other.acts_like?(:time) + to_time - other.to_time + elsif duration_of_variable_length?(other) + method_missing(:-, other) + else + result = utc.acts_like?(:date) ? utc.ago(other) : utc - other rescue utc.ago(other) + result.in_time_zone(time_zone) + end + end + + # Subtracts an interval of time from the current object's time and returns + # the result as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now.ago(1000) # => Mon, 03 Nov 2014 00:09:48.725182881 EST -05:00 + # + # If we're subtracting a Duration of variable length (i.e., years, months, + # days), move backward from #time, otherwise move backward from #utc, for + # accuracy when moving across DST boundaries. + # + # For instance, time.ago(24.hours) will move back exactly 24 hours, + # while time.ago(1.day) will move back 23-25 hours, depending on + # the day. + # + # now.ago(24.hours) # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now.ago(1.day) # => Sun, 02 Nov 2014 00:26:28.725182881 EDT -04:00 + def ago(other) + since(-other) + end + + # Returns a new +ActiveSupport::TimeWithZone+ where one or more of the elements have + # been changed according to the +options+ parameter. The time options (:hour, + # :min, :sec, :usec, :nsec) reset cascadingly, + # so if only the hour is passed, then minute, sec, usec and nsec is set to 0. If the + # hour and minute is passed, then sec, usec and nsec is set to 0. The +options+ + # parameter takes a hash with any of these keys: :year, :month, + # :day, :hour, :min, :sec, :usec, + # :nsec, :offset, :zone. Pass either :usec + # or :nsec, not both. Similarly, pass either :zone or + # :offset, not both. + # + # t = Time.zone.now # => Fri, 14 Apr 2017 11:45:15.116992711 EST -05:00 + # t.change(year: 2020) # => Tue, 14 Apr 2020 11:45:15.116992711 EST -05:00 + # t.change(hour: 12) # => Fri, 14 Apr 2017 12:00:00.116992711 EST -05:00 + # t.change(min: 30) # => Fri, 14 Apr 2017 11:30:00.116992711 EST -05:00 + # t.change(offset: "-10:00") # => Fri, 14 Apr 2017 11:45:15.116992711 HST -10:00 + # t.change(zone: "Hawaii") # => Fri, 14 Apr 2017 11:45:15.116992711 HST -10:00 + def change(options) + if options[:zone] && options[:offset] + raise ArgumentError, "Can't change both :offset and :zone at the same time: #{options.inspect}" + end + + new_time = time.change(options) + + if options[:zone] + new_zone = ::Time.find_zone(options[:zone]) + elsif options[:offset] + new_zone = ::Time.find_zone(new_time.utc_offset) + end + + new_zone ||= time_zone + periods = new_zone.periods_for_local(new_time) + + self.class.new(nil, new_zone, new_time, periods.include?(period) ? period : nil) + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The result is returned as a + # new TimeWithZone object. + # + # The +options+ parameter takes a hash with any of these keys: + # :years, :months, :weeks, :days, + # :hours, :minutes, :seconds. + # + # If advancing by a value of variable length (i.e., years, weeks, months, + # days), move forward from #time, otherwise move forward from #utc, for + # accuracy when moving across DST boundaries. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28.558049687 EDT -04:00 + # now.advance(seconds: 1) # => Sun, 02 Nov 2014 01:26:29.558049687 EDT -04:00 + # now.advance(minutes: 1) # => Sun, 02 Nov 2014 01:27:28.558049687 EDT -04:00 + # now.advance(hours: 1) # => Sun, 02 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(days: 1) # => Mon, 03 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(weeks: 1) # => Sun, 09 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(months: 1) # => Tue, 02 Dec 2014 01:26:28.558049687 EST -05:00 + # now.advance(years: 1) # => Mon, 02 Nov 2015 01:26:28.558049687 EST -05:00 + def advance(options) + # If we're advancing a value of variable length (i.e., years, weeks, months, days), advance from #time, + # otherwise advance from #utc, for accuracy when moving across DST boundaries + if options.values_at(:years, :weeks, :months, :days).any? + method_missing(:advance, options) + else + utc.advance(options).in_time_zone(time_zone) + end + end + + %w(year mon month day mday wday yday hour min sec usec nsec to_date).each do |method_name| + class_eval <<-EOV, __FILE__, __LINE__ + 1 + def #{method_name} # def month + time.#{method_name} # time.month + end # end + EOV + end + + # Returns Array of parts of Time in sequence of + # [seconds, minutes, hours, day, month, year, weekday, yearday, dst?, zone]. + # + # now = Time.zone.now # => Tue, 18 Aug 2015 02:29:27.485278555 UTC +00:00 + # now.to_a # => [27, 29, 2, 18, 8, 2015, 2, 230, false, "UTC"] + def to_a + [time.sec, time.min, time.hour, time.day, time.mon, time.year, time.wday, time.yday, dst?, zone] + end + + # Returns the object's date and time as a floating point number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_f # => 1417709320.285418 + def to_f + utc.to_f + end + + # Returns the object's date and time as an integer number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_i # => 1417709320 + def to_i + utc.to_i + end + alias_method :tv_sec, :to_i + + # Returns the object's date and time as a rational number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_r # => (708854548642709/500000) + def to_r + utc.to_r + end + + # Returns an instance of DateTime with the timezone's UTC offset + # + # Time.zone.now.to_datetime # => Tue, 18 Aug 2015 02:32:20 +0000 + # Time.current.in_time_zone('Hawaii').to_datetime # => Mon, 17 Aug 2015 16:32:20 -1000 + def to_datetime + @to_datetime ||= utc.to_datetime.new_offset(Rational(utc_offset, 86_400)) + end + + # Returns an instance of +Time+, either with the same UTC offset + # as +self+ or in the local system timezone depending on the setting + # of +ActiveSupport.to_time_preserves_timezone+. + def to_time + if preserve_timezone + @to_time_with_instance_offset ||= getlocal(utc_offset) + else + @to_time_with_system_offset ||= getlocal + end + end + + # So that +self+ acts_like?(:time). + def acts_like_time? + true + end + + # Say we're a Time to thwart type checking. + def is_a?(klass) + klass == ::Time || super + end + alias_method :kind_of?, :is_a? + + # An instance of ActiveSupport::TimeWithZone is never blank + def blank? + false + end + + def freeze + # preload instance variables before freezing + period; utc; time; to_datetime; to_time + super + end + + def marshal_dump + [utc, time_zone.name, time] + end + + def marshal_load(variables) + initialize(variables[0].utc, ::Time.find_zone(variables[1]), variables[2].utc) + end + + # respond_to_missing? is not called in some cases, such as when type conversion is + # performed with Kernel#String + def respond_to?(sym, include_priv = false) + # ensure that we're not going to throw and rescue from NoMethodError in method_missing which is slow + return false if sym.to_sym == :to_str + super + end + + # Ensure proxy class responds to all methods that underlying time instance + # responds to. + def respond_to_missing?(sym, include_priv) + return false if sym.to_sym == :acts_like_date? + time.respond_to?(sym, include_priv) + end + + # Send the missing method to +time+ instance, and wrap result in a new + # TimeWithZone with the existing +time_zone+. + def method_missing(sym, *args, &block) + wrap_with_time_zone time.__send__(sym, *args, &block) + rescue NoMethodError => e + raise e, e.message.sub(time.inspect, inspect), e.backtrace + end + + private + SECONDS_PER_DAY = 86400 + + def incorporate_utc_offset(time, offset) + if time.kind_of?(Date) + time + Rational(offset, SECONDS_PER_DAY) + else + time + offset + end + end + + def get_period_and_ensure_valid_local_time(period) + # we don't want a Time.local instance enforcing its own DST rules as well, + # so transfer time values to a utc constructor if necessary + @time = transfer_time_values_to_utc_constructor(@time) unless @time.utc? + begin + period || @time_zone.period_for_local(@time) + rescue ::TZInfo::PeriodNotFound + # time is in the "spring forward" hour gap, so we're moving the time forward one hour and trying again + @time += 1.hour + retry + end + end + + def transfer_time_values_to_utc_constructor(time) + # avoid creating another Time object if possible + return time if time.instance_of?(::Time) && time.utc? + ::Time.utc(time.year, time.month, time.day, time.hour, time.min, time.sec + time.subsec) + end + + def duration_of_variable_length?(obj) + ActiveSupport::Duration === obj && obj.parts.any? { |p| [:years, :months, :weeks, :days].include?(p[0]) } + end + + def wrap_with_time_zone(time) + if time.acts_like?(:time) + periods = time_zone.periods_for_local(time) + self.class.new(nil, time_zone, time, periods.include?(period) ? period : nil) + elsif time.is_a?(Range) + wrap_with_time_zone(time.begin)..wrap_with_time_zone(time.end) + else + time + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/values/time_zone.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/values/time_zone.rb new file mode 100644 index 0000000..4c289a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/values/time_zone.rb @@ -0,0 +1,580 @@ +# frozen_string_literal: true + +require "tzinfo" +require "concurrent/map" + +module ActiveSupport + # The TimeZone class serves as a wrapper around TZInfo::Timezone instances. + # It allows us to do the following: + # + # * Limit the set of zones provided by TZInfo to a meaningful subset of 134 + # zones. + # * Retrieve and display zones with a friendlier name + # (e.g., "Eastern Time (US & Canada)" instead of "America/New_York"). + # * Lazily load TZInfo::Timezone instances only when they're needed. + # * Create ActiveSupport::TimeWithZone instances via TimeZone's +local+, + # +parse+, +at+ and +now+ methods. + # + # If you set config.time_zone in the Rails Application, you can + # access this TimeZone object via Time.zone: + # + # # application.rb: + # class Application < Rails::Application + # config.time_zone = 'Eastern Time (US & Canada)' + # end + # + # Time.zone # => # + # Time.zone.name # => "Eastern Time (US & Canada)" + # Time.zone.now # => Sun, 18 May 2008 14:30:44 EDT -04:00 + class TimeZone + # Keys are Rails TimeZone names, values are TZInfo identifiers. + MAPPING = { + "International Date Line West" => "Etc/GMT+12", + "Midway Island" => "Pacific/Midway", + "American Samoa" => "Pacific/Pago_Pago", + "Hawaii" => "Pacific/Honolulu", + "Alaska" => "America/Juneau", + "Pacific Time (US & Canada)" => "America/Los_Angeles", + "Tijuana" => "America/Tijuana", + "Mountain Time (US & Canada)" => "America/Denver", + "Arizona" => "America/Phoenix", + "Chihuahua" => "America/Chihuahua", + "Mazatlan" => "America/Mazatlan", + "Central Time (US & Canada)" => "America/Chicago", + "Saskatchewan" => "America/Regina", + "Guadalajara" => "America/Mexico_City", + "Mexico City" => "America/Mexico_City", + "Monterrey" => "America/Monterrey", + "Central America" => "America/Guatemala", + "Eastern Time (US & Canada)" => "America/New_York", + "Indiana (East)" => "America/Indiana/Indianapolis", + "Bogota" => "America/Bogota", + "Lima" => "America/Lima", + "Quito" => "America/Lima", + "Atlantic Time (Canada)" => "America/Halifax", + "Caracas" => "America/Caracas", + "La Paz" => "America/La_Paz", + "Santiago" => "America/Santiago", + "Newfoundland" => "America/St_Johns", + "Brasilia" => "America/Sao_Paulo", + "Buenos Aires" => "America/Argentina/Buenos_Aires", + "Montevideo" => "America/Montevideo", + "Georgetown" => "America/Guyana", + "Puerto Rico" => "America/Puerto_Rico", + "Greenland" => "America/Godthab", + "Mid-Atlantic" => "Atlantic/South_Georgia", + "Azores" => "Atlantic/Azores", + "Cape Verde Is." => "Atlantic/Cape_Verde", + "Dublin" => "Europe/Dublin", + "Edinburgh" => "Europe/London", + "Lisbon" => "Europe/Lisbon", + "London" => "Europe/London", + "Casablanca" => "Africa/Casablanca", + "Monrovia" => "Africa/Monrovia", + "UTC" => "Etc/UTC", + "Belgrade" => "Europe/Belgrade", + "Bratislava" => "Europe/Bratislava", + "Budapest" => "Europe/Budapest", + "Ljubljana" => "Europe/Ljubljana", + "Prague" => "Europe/Prague", + "Sarajevo" => "Europe/Sarajevo", + "Skopje" => "Europe/Skopje", + "Warsaw" => "Europe/Warsaw", + "Zagreb" => "Europe/Zagreb", + "Brussels" => "Europe/Brussels", + "Copenhagen" => "Europe/Copenhagen", + "Madrid" => "Europe/Madrid", + "Paris" => "Europe/Paris", + "Amsterdam" => "Europe/Amsterdam", + "Berlin" => "Europe/Berlin", + "Bern" => "Europe/Zurich", + "Zurich" => "Europe/Zurich", + "Rome" => "Europe/Rome", + "Stockholm" => "Europe/Stockholm", + "Vienna" => "Europe/Vienna", + "West Central Africa" => "Africa/Algiers", + "Bucharest" => "Europe/Bucharest", + "Cairo" => "Africa/Cairo", + "Helsinki" => "Europe/Helsinki", + "Kyiv" => "Europe/Kiev", + "Riga" => "Europe/Riga", + "Sofia" => "Europe/Sofia", + "Tallinn" => "Europe/Tallinn", + "Vilnius" => "Europe/Vilnius", + "Athens" => "Europe/Athens", + "Istanbul" => "Europe/Istanbul", + "Minsk" => "Europe/Minsk", + "Jerusalem" => "Asia/Jerusalem", + "Harare" => "Africa/Harare", + "Pretoria" => "Africa/Johannesburg", + "Kaliningrad" => "Europe/Kaliningrad", + "Moscow" => "Europe/Moscow", + "St. Petersburg" => "Europe/Moscow", + "Volgograd" => "Europe/Volgograd", + "Samara" => "Europe/Samara", + "Kuwait" => "Asia/Kuwait", + "Riyadh" => "Asia/Riyadh", + "Nairobi" => "Africa/Nairobi", + "Baghdad" => "Asia/Baghdad", + "Tehran" => "Asia/Tehran", + "Abu Dhabi" => "Asia/Muscat", + "Muscat" => "Asia/Muscat", + "Baku" => "Asia/Baku", + "Tbilisi" => "Asia/Tbilisi", + "Yerevan" => "Asia/Yerevan", + "Kabul" => "Asia/Kabul", + "Ekaterinburg" => "Asia/Yekaterinburg", + "Islamabad" => "Asia/Karachi", + "Karachi" => "Asia/Karachi", + "Tashkent" => "Asia/Tashkent", + "Chennai" => "Asia/Kolkata", + "Kolkata" => "Asia/Kolkata", + "Mumbai" => "Asia/Kolkata", + "New Delhi" => "Asia/Kolkata", + "Kathmandu" => "Asia/Kathmandu", + "Astana" => "Asia/Dhaka", + "Dhaka" => "Asia/Dhaka", + "Sri Jayawardenepura" => "Asia/Colombo", + "Almaty" => "Asia/Almaty", + "Novosibirsk" => "Asia/Novosibirsk", + "Rangoon" => "Asia/Rangoon", + "Bangkok" => "Asia/Bangkok", + "Hanoi" => "Asia/Bangkok", + "Jakarta" => "Asia/Jakarta", + "Krasnoyarsk" => "Asia/Krasnoyarsk", + "Beijing" => "Asia/Shanghai", + "Chongqing" => "Asia/Chongqing", + "Hong Kong" => "Asia/Hong_Kong", + "Urumqi" => "Asia/Urumqi", + "Kuala Lumpur" => "Asia/Kuala_Lumpur", + "Singapore" => "Asia/Singapore", + "Taipei" => "Asia/Taipei", + "Perth" => "Australia/Perth", + "Irkutsk" => "Asia/Irkutsk", + "Ulaanbaatar" => "Asia/Ulaanbaatar", + "Seoul" => "Asia/Seoul", + "Osaka" => "Asia/Tokyo", + "Sapporo" => "Asia/Tokyo", + "Tokyo" => "Asia/Tokyo", + "Yakutsk" => "Asia/Yakutsk", + "Darwin" => "Australia/Darwin", + "Adelaide" => "Australia/Adelaide", + "Canberra" => "Australia/Melbourne", + "Melbourne" => "Australia/Melbourne", + "Sydney" => "Australia/Sydney", + "Brisbane" => "Australia/Brisbane", + "Hobart" => "Australia/Hobart", + "Vladivostok" => "Asia/Vladivostok", + "Guam" => "Pacific/Guam", + "Port Moresby" => "Pacific/Port_Moresby", + "Magadan" => "Asia/Magadan", + "Srednekolymsk" => "Asia/Srednekolymsk", + "Solomon Is." => "Pacific/Guadalcanal", + "New Caledonia" => "Pacific/Noumea", + "Fiji" => "Pacific/Fiji", + "Kamchatka" => "Asia/Kamchatka", + "Marshall Is." => "Pacific/Majuro", + "Auckland" => "Pacific/Auckland", + "Wellington" => "Pacific/Auckland", + "Nuku'alofa" => "Pacific/Tongatapu", + "Tokelau Is." => "Pacific/Fakaofo", + "Chatham Is." => "Pacific/Chatham", + "Samoa" => "Pacific/Apia" + } + + UTC_OFFSET_WITH_COLON = "%s%02d:%02d" # :nodoc: + UTC_OFFSET_WITHOUT_COLON = UTC_OFFSET_WITH_COLON.tr(":", "") # :nodoc: + private_constant :UTC_OFFSET_WITH_COLON, :UTC_OFFSET_WITHOUT_COLON + + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + + class << self + # Assumes self represents an offset from UTC in seconds (as returned from + # Time#utc_offset) and turns this into an +HH:MM formatted string. + # + # ActiveSupport::TimeZone.seconds_to_utc_offset(-21_600) # => "-06:00" + def seconds_to_utc_offset(seconds, colon = true) + format = colon ? UTC_OFFSET_WITH_COLON : UTC_OFFSET_WITHOUT_COLON + sign = (seconds < 0 ? "-" : "+") + hours = seconds.abs / 3600 + minutes = (seconds.abs % 3600) / 60 + format % [sign, hours, minutes] + end + + def find_tzinfo(name) + TZInfo::Timezone.get(MAPPING[name] || name) + end + + alias_method :create, :new + + # Returns a TimeZone instance with the given name, or +nil+ if no + # such TimeZone instance exists. (This exists to support the use of + # this class with the +composed_of+ macro.) + def new(name) + self[name] + end + + # Returns an array of all TimeZone objects. There are multiple + # TimeZone objects per time zone, in many cases, to make it easier + # for users to find their own time zone. + def all + @zones ||= zones_map.values.sort + end + + # Locate a specific time zone object. If the argument is a string, it + # is interpreted to mean the name of the timezone to locate. If it is a + # numeric value it is either the hour offset, or the second offset, of the + # timezone to find. (The first one with that offset will be returned.) + # Returns +nil+ if no such time zone is known to the system. + def [](arg) + case arg + when String + begin + @lazy_zones_map[arg] ||= create(arg) + rescue TZInfo::InvalidTimezoneIdentifier + nil + end + when Numeric, ActiveSupport::Duration + arg *= 3600 if arg.abs <= 13 + all.find { |z| z.utc_offset == arg.to_i } + else + raise ArgumentError, "invalid argument to TimeZone[]: #{arg.inspect}" + end + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the USA. + def us_zones + country_zones(:us) + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the country specified by its ISO 3166-1 Alpha2 code. + def country_zones(country_code) + code = country_code.to_s.upcase + @country_zones[code] ||= load_country_zones(code) + end + + def clear #:nodoc: + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + @zones = nil + @zones_map = nil + end + + private + def load_country_zones(code) + country = TZInfo::Country.get(code) + country.zone_identifiers.flat_map do |tz_id| + if MAPPING.value?(tz_id) + MAPPING.inject([]) do |memo, (key, value)| + memo << self[key] if value == tz_id + memo + end + else + create(tz_id, nil, TZInfo::Timezone.get(tz_id)) + end + end.sort! + end + + def zones_map + @zones_map ||= MAPPING.each_with_object({}) do |(name, _), zones| + timezone = self[name] + zones[name] = timezone if timezone + end + end + end + + include Comparable + attr_reader :name + attr_reader :tzinfo + + # Create a new TimeZone object with the given name and offset. The + # offset is the number of seconds that this time zone is offset from UTC + # (GMT). Seconds were chosen as the offset unit because that is the unit + # that Ruby uses to represent time zone offsets (see Time#utc_offset). + def initialize(name, utc_offset = nil, tzinfo = nil) + @name = name + @utc_offset = utc_offset + @tzinfo = tzinfo || TimeZone.find_tzinfo(name) + end + + # Returns the offset of this time zone from UTC in seconds. + def utc_offset + @utc_offset || tzinfo&.current_period&.base_utc_offset + end + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # zone = ActiveSupport::TimeZone['Central Time (US & Canada)'] + # zone.formatted_offset # => "-06:00" + # zone.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc_offset == 0 && alternate_utc_string || self.class.seconds_to_utc_offset(utc_offset, colon) + end + + # Compare this time zone to the parameter. The two are compared first on + # their offsets, and then by name. + def <=>(zone) + return unless zone.respond_to? :utc_offset + result = (utc_offset <=> zone.utc_offset) + result = (name <=> zone.name) if result == 0 + result + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def =~(re) + re === name || re === MAPPING[name] + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def match?(re) + (re == name) || (re == MAPPING[name]) || + ((Regexp === re) && (re.match?(name) || re.match?(MAPPING[name]))) + end + + # Returns a textual representation of this time zone. + def to_s + "(GMT#{formatted_offset}) #{name}" + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from given values. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.local(2007, 2, 1, 15, 30, 45) # => Thu, 01 Feb 2007 15:30:45 HST -10:00 + def local(*args) + time = Time.utc(*args) + ActiveSupport::TimeWithZone.new(nil, self, time) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from number of seconds since the Unix epoch. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.utc(2000).to_f # => 946684800.0 + # Time.zone.at(946684800.0) # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # A second argument can be supplied to specify sub-second precision. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.at(946684800, 123456.789).nsec # => 123456789 + def at(*args) + Time.at(*args).utc.in_time_zone(self) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an ISO 8601 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31T14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time components are missing then they will be set to zero. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31') # => Fri, 31 Dec 1999 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ will be raised unlike +parse+ + # which usually returns +nil+ when given an invalid date string. + def iso8601(str) + parts = Date._iso8601(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + + if parts[:offset] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from parsed string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.parse('1999-12-31 14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.parse('22:30:00') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.parse('Mar 2000') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ could be raised. + def parse(str, now = now()) + parts_to_time(Date._parse(str, false), now) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an RFC 3339 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('2000-01-01T00:00:00Z') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time or zone components are missing then an +ArgumentError+ will + # be raised. This is much stricter than either +parse+ or +iso8601+ which + # allow for missing components. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + + TimeWithZone.new(time.utc, self) + end + + # Parses +str+ according to +format+ and returns an ActiveSupport::TimeWithZone. + # + # Assumes that +str+ is a time in the time zone +self+, + # unless +format+ includes an explicit time zone. + # (This is the same behavior as +parse+.) + # In either case, the returned TimeWithZone has the timezone of +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.strptime('1999-12-31 14:00:00', '%Y-%m-%d %H:%M:%S') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.strptime('22:30:00', '%H:%M:%S') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.strptime('Mar 2000', '%b %Y') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + def strptime(str, format, now = now()) + parts_to_time(DateTime._strptime(str, format), now) + end + + # Returns an ActiveSupport::TimeWithZone instance representing the current + # time in the time zone represented by +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.now # => Wed, 23 Jan 2008 20:24:27 HST -10:00 + def now + time_now.utc.in_time_zone(self) + end + + # Returns the current date in this time zone. + def today + tzinfo.now.to_date + end + + # Returns the next date in this time zone. + def tomorrow + today + 1 + end + + # Returns the previous date in this time zone. + def yesterday + today - 1 + end + + # Adjust the given time to the simultaneous time in the time zone + # represented by +self+. Returns a local time with the appropriate offset + # -- if you want an ActiveSupport::TimeWithZone instance, use + # Time#in_time_zone() instead. + # + # As of tzinfo 2, utc_to_local returns a Time with a non-zero utc_offset. + # See the +utc_to_local_returns_utc_offset_times+ config for more info. + def utc_to_local(time) + tzinfo.utc_to_local(time).yield_self do |t| + ActiveSupport.utc_to_local_returns_utc_offset_times ? + t : Time.utc(t.year, t.month, t.day, t.hour, t.min, t.sec, t.sec_fraction) + end + end + + # Adjust the given time to the simultaneous time in UTC. Returns a + # Time.utc() instance. + def local_to_utc(time, dst = true) + tzinfo.local_to_utc(time, dst) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_utc(time) + tzinfo.period_for_utc(time) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_local(time, dst = true) + tzinfo.period_for_local(time, dst) { |periods| periods.last } + end + + def periods_for_local(time) #:nodoc: + tzinfo.periods_for_local(time) + end + + def init_with(coder) #:nodoc: + initialize(coder["name"]) + end + + def encode_with(coder) #:nodoc: + coder.tag = "!ruby/object:#{self.class}" + coder.map = { "name" => tzinfo.name } + end + + private + def parts_to_time(parts, now) + raise ArgumentError, "invalid date" if parts.nil? + return if parts.empty? + + if parts[:seconds] + time = Time.at(parts[:seconds]) + else + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, parts[:year] || parts[:mon] ? 1 : now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + end + + if parts[:offset] || parts[:seconds] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + def time_now + Time.now + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/version.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/version.rb new file mode 100644 index 0000000..928838c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/version.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require_relative "gem_version" + +module ActiveSupport + # Returns the version of the currently loaded ActiveSupport as a Gem::Version + def self.version + gem_version + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini.rb new file mode 100644 index 0000000..f6ae08b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini.rb @@ -0,0 +1,201 @@ +# frozen_string_literal: true + +require "time" +require "base64" +require "bigdecimal" +require "bigdecimal/util" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/date_time/calculations" + +module ActiveSupport + # = XmlMini + # + # To use the much faster libxml parser: + # gem 'libxml-ruby', '=0.9.7' + # XmlMini.backend = 'LibXML' + module XmlMini + extend self + + # This module decorates files deserialized using Hash.from_xml with + # the original_filename and content_type methods. + module FileLike #:nodoc: + attr_writer :original_filename, :content_type + + def original_filename + @original_filename || "untitled" + end + + def content_type + @content_type || "application/octet-stream" + end + end + + DEFAULT_ENCODINGS = { + "binary" => "base64" + } unless defined?(DEFAULT_ENCODINGS) + + unless defined?(TYPE_NAMES) + TYPE_NAMES = { + "Symbol" => "symbol", + "Integer" => "integer", + "BigDecimal" => "decimal", + "Float" => "float", + "TrueClass" => "boolean", + "FalseClass" => "boolean", + "Date" => "date", + "DateTime" => "dateTime", + "Time" => "dateTime", + "Array" => "array", + "Hash" => "hash" + } + end + + FORMATTING = { + "symbol" => Proc.new { |symbol| symbol.to_s }, + "date" => Proc.new { |date| date.to_s(:db) }, + "dateTime" => Proc.new { |time| time.xmlschema }, + "binary" => Proc.new { |binary| ::Base64.encode64(binary) }, + "yaml" => Proc.new { |yaml| yaml.to_yaml } + } unless defined?(FORMATTING) + + # TODO use regexp instead of Date.parse + unless defined?(PARSING) + PARSING = { + "symbol" => Proc.new { |symbol| symbol.to_s.to_sym }, + "date" => Proc.new { |date| ::Date.parse(date) }, + "datetime" => Proc.new { |time| Time.xmlschema(time).utc rescue ::DateTime.parse(time).utc }, + "integer" => Proc.new { |integer| integer.to_i }, + "float" => Proc.new { |float| float.to_f }, + "decimal" => Proc.new do |number| + if String === number + number.to_d + else + BigDecimal(number) + end + end, + "boolean" => Proc.new { |boolean| %w(1 true).include?(boolean.to_s.strip) }, + "string" => Proc.new { |string| string.to_s }, + "yaml" => Proc.new { |yaml| YAML.load(yaml) rescue yaml }, + "base64Binary" => Proc.new { |bin| ::Base64.decode64(bin) }, + "binary" => Proc.new { |bin, entity| _parse_binary(bin, entity) }, + "file" => Proc.new { |file, entity| _parse_file(file, entity) } + } + + PARSING.update( + "double" => PARSING["float"], + "dateTime" => PARSING["datetime"] + ) + end + + attr_accessor :depth + self.depth = 100 + + delegate :parse, to: :backend + + def backend + current_thread_backend || @backend + end + + def backend=(name) + backend = name && cast_backend_name_to_module(name) + self.current_thread_backend = backend if current_thread_backend + @backend = backend + end + + def with_backend(name) + old_backend = current_thread_backend + self.current_thread_backend = name && cast_backend_name_to_module(name) + yield + ensure + self.current_thread_backend = old_backend + end + + def to_tag(key, value, options) + type_name = options.delete(:type) + merged_options = options.merge(root: key, skip_instruct: true) + + if value.is_a?(::Method) || value.is_a?(::Proc) + if value.arity == 1 + value.call(merged_options) + else + value.call(merged_options, key.to_s.singularize) + end + elsif value.respond_to?(:to_xml) + value.to_xml(merged_options) + else + type_name ||= TYPE_NAMES[value.class.name] + type_name ||= value.class.name if value && !value.respond_to?(:to_str) + type_name = type_name.to_s if type_name + type_name = "dateTime" if type_name == "datetime" + + key = rename_key(key.to_s, options) + + attributes = options[:skip_types] || type_name.nil? ? {} : { type: type_name } + attributes[:nil] = true if value.nil? + + encoding = options[:encoding] || DEFAULT_ENCODINGS[type_name] + attributes[:encoding] = encoding if encoding + + formatted_value = FORMATTING[type_name] && !value.nil? ? + FORMATTING[type_name].call(value) : value + + options[:builder].tag!(key, formatted_value, attributes) + end + end + + def rename_key(key, options = {}) + camelize = options[:camelize] + dasherize = !options.has_key?(:dasherize) || options[:dasherize] + if camelize + key = true == camelize ? key.camelize : key.camelize(camelize) + end + key = _dasherize(key) if dasherize + key + end + + private + def _dasherize(key) + # $2 must be a non-greedy regex for this to work + left, middle, right = /\A(_*)(.*?)(_*)\Z/.match(key.strip)[1, 3] + "#{left}#{middle.tr('_ ', '--')}#{right}" + end + + # TODO: Add support for other encodings + def _parse_binary(bin, entity) + case entity["encoding"] + when "base64" + ::Base64.decode64(bin) + else + bin + end + end + + def _parse_file(file, entity) + f = StringIO.new(::Base64.decode64(file)) + f.extend(FileLike) + f.original_filename = entity["name"] + f.content_type = entity["content_type"] + f + end + + def current_thread_backend + Thread.current[:xml_mini_backend] + end + + def current_thread_backend=(name) + Thread.current[:xml_mini_backend] = name && cast_backend_name_to_module(name) + end + + def cast_backend_name_to_module(name) + if name.is_a?(Module) + name + else + require "active_support/xml_mini/#{name.downcase}" + ActiveSupport.const_get("XmlMini_#{name}") + end + end + end + + XmlMini.backend = "REXML" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/jdom.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/jdom.rb new file mode 100644 index 0000000..12ca19a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/jdom.rb @@ -0,0 +1,182 @@ +# frozen_string_literal: true + +raise "JRuby is required to use the JDOM backend for XmlMini" unless RUBY_PLATFORM.include?("java") + +require "jruby" +include Java + +require "active_support/core_ext/object/blank" + +java_import javax.xml.parsers.DocumentBuilder unless defined? DocumentBuilder +java_import javax.xml.parsers.DocumentBuilderFactory unless defined? DocumentBuilderFactory +java_import java.io.StringReader unless defined? StringReader +java_import org.xml.sax.InputSource unless defined? InputSource +java_import org.xml.sax.Attributes unless defined? Attributes +java_import org.w3c.dom.Node unless defined? Node + +module ActiveSupport + module XmlMini_JDOM #:nodoc: + extend self + + CONTENT_KEY = "__content__" + + NODE_TYPE_NAMES = %w{ATTRIBUTE_NODE CDATA_SECTION_NODE COMMENT_NODE DOCUMENT_FRAGMENT_NODE + DOCUMENT_NODE DOCUMENT_TYPE_NODE ELEMENT_NODE ENTITY_NODE ENTITY_REFERENCE_NODE NOTATION_NODE + PROCESSING_INSTRUCTION_NODE TEXT_NODE} + + node_type_map = {} + NODE_TYPE_NAMES.each { |type| node_type_map[Node.send(type)] = type } + + # Parse an XML Document string or IO into a simple hash using Java's jdom. + # data:: + # XML Document string or IO to parse + def parse(data) + if data.respond_to?(:read) + data = data.read + end + + if data.blank? + {} + else + @dbf = DocumentBuilderFactory.new_instance + # secure processing of java xml + # https://archive.is/9xcQQ + @dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) + @dbf.setFeature("http://xml.org/sax/features/external-general-entities", false) + @dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false) + @dbf.setFeature(javax.xml.XMLConstants::FEATURE_SECURE_PROCESSING, true) + xml_string_reader = StringReader.new(data) + xml_input_source = InputSource.new(xml_string_reader) + doc = @dbf.new_document_builder.parse(xml_input_source) + merge_element!({ CONTENT_KEY => "" }, doc.document_element, XmlMini.depth) + end + end + + private + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise "Document too deep!" if depth == 0 + delete_empty(hash) + merge!(hash, element.tag_name, collapse(element, depth)) + end + + def delete_empty(hash) + hash.delete(CONTENT_KEY) if hash[CONTENT_KEY] == "" + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + child_nodes = element.child_nodes + if child_nodes.length > 0 + (0...child_nodes.length).each do |i| + child = child_nodes.item(i) + merge_element!(hash, child, depth - 1) unless child.node_type == Node.TEXT_NODE + end + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + delete_empty(hash) + text_children = texts(element) + if text_children.join.empty? + hash + else + # must use value to prevent double-escaping + merge!(hash, CONTENT_KEY, text_children.join) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attribute_hash = {} + attributes = element.attributes + (0...attributes.length).each do |i| + attribute_hash[CONTENT_KEY] ||= "" + attribute_hash[attributes.item(i).name] = attributes.item(i).value + end + attribute_hash + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def texts(element) + texts = [] + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + texts << item.get_data + end + end + texts + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + text = +"" + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + text << item.get_data.strip + end + end + text.strip.length == 0 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxml.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxml.rb new file mode 100644 index 0000000..c2e999e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxml.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXML #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Parser.io(data).parse.to_hash + end + end + end +end + +module LibXML #:nodoc: + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__" + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + each_child do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= +"" + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + each_attr { |a| node_hash[a.name] = a.value } + + hash + end + end + end +end + +# :enddoc: + +LibXML::XML::Document.include(LibXML::Conversions::Document) +LibXML::XML::Node.include(LibXML::Conversions::Node) diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxmlsax.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxmlsax.rb new file mode 100644 index 0000000..ac8acdf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/libxmlsax.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXMLSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder + include LibXML::XML::SaxParser::Callbacks + + CONTENT_KEY = "__content__" + HASH_SIZE_KEY = "__hash_size__" + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def on_start_document + @hash = { CONTENT_KEY => +"" } + @hash_stack = [@hash] + end + + def on_end_document + @hash = @hash_stack.pop + @hash.delete(CONTENT_KEY) + end + + def on_start_element(name, attrs = {}) + new_hash = { CONTENT_KEY => +"" }.merge!(attrs) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def on_end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def on_characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :on_cdata_block, :on_characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER) + parser = LibXML::XML::SaxParser.io(data) + document = document_class.new + + parser.callbacks = document + parser.parse + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogiri.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogiri.rb new file mode 100644 index 0000000..f76513f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogiri.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_Nokogiri #:nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml / nokogiri. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + doc = Nokogiri::XML(data) + raise doc.errors.first if doc.errors.length > 0 + doc.to_hash + end + end + + module Conversions #:nodoc: + module Document #:nodoc: + def to_hash + root.to_hash + end + end + + module Node #:nodoc: + CONTENT_ROOT = "__content__" + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + children.each do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= +"" + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank and there are child tags + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + attribute_nodes.each { |a| node_hash[a.node_name] = a.value } + + hash + end + end + end + + Nokogiri::XML::Document.include(Conversions::Document) + Nokogiri::XML::Node.include(Conversions::Node) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogirisax.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogirisax.rb new file mode 100644 index 0000000..55cd72e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/nokogirisax.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_NokogiriSAX #:nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder < Nokogiri::XML::SAX::Document + CONTENT_KEY = "__content__" + HASH_SIZE_KEY = "__hash_size__" + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def start_document + @hash = {} + @hash_stack = [@hash] + end + + def end_document + raise "Parse stack not empty!" if @hash_stack.size > 1 + end + + def error(error_message) + raise error_message + end + + def start_element(name, attrs = []) + new_hash = { CONTENT_KEY => +"" }.merge!(Hash[attrs]) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :cdata_block, :characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + document = document_class.new + parser = Nokogiri::XML::SAX::Parser.new(document) + parser.parse(data) + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/rexml.rb b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/rexml.rb new file mode 100644 index 0000000..c700959 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/activesupport-6.1.4.6/lib/active_support/xml_mini/rexml.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_REXML #:nodoc: + extend self + + CONTENT_KEY = "__content__" + + # Parse an XML Document string or IO into a simple hash. + # + # Same as XmlSimple::xml_in but doesn't shoot itself in the foot, + # and uses the defaults from Active Support. + # + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + require_rexml unless defined?(REXML::Document) + doc = REXML::Document.new(data) + + if doc.root + merge_element!({}, doc.root, XmlMini.depth) + else + raise REXML::ParseException, + "The document #{doc.to_s.inspect} does not have a valid root" + end + end + end + + private + def require_rexml + silence_warnings { require "rexml/document" } + rescue LoadError => e + $stderr.puts "You don't have rexml installed in your application. Please add it to your Gemfile and run bundle install" + raise e + end + + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise REXML::ParseException, "The document is too deep" if depth == 0 + merge!(hash, element.name, collapse(element, depth)) + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + if element.has_elements? + element.each_element { |child| merge_element!(hash, child, depth - 1) } + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + unless element.has_text? + hash + else + # must use value to prevent double-escaping + texts = +"" + element.texts.each { |t| texts << t.value } + merge!(hash, CONTENT_KEY, texts) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attributes = {} + element.attributes.each { |n, v| attributes[n] = v } + attributes + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + element.texts.join.blank? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/CHANGELOG.md new file mode 100644 index 0000000..4a9f866 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/CHANGELOG.md @@ -0,0 +1,246 @@ +# Addressable 2.8.0 +- fixes ReDoS vulnerability in Addressable::Template#match +- no longer replaces `+` with spaces in queries for non-http(s) schemes +- fixed encoding ipv6 literals +- the `:compacted` flag for `normalized_query` now dedupes parameters +- fix broken `escape_component` alias +- dropping support for Ruby 2.0 and 2.1 +- adding Ruby 3.0 compatibility for development tasks +- drop support for `rack-mount` and remove Addressable::Template#generate +- performance improvements +- switch CI/CD to GitHub Actions + +# Addressable 2.7.0 +- added `:compacted` flag to `normalized_query` +- `heuristic_parse` handles `mailto:` more intuitively +- dropped explicit support for JRuby 9.0.5.0 +- compatibility w/ public_suffix 4.x +- performance improvements + +# Addressable 2.6.0 +- added `tld=` method to allow assignment to the public suffix +- most `heuristic_parse` patterns are now case-insensitive +- `heuristic_parse` handles more `file://` URI variations +- fixes bug in `heuristic_parse` when uri starts with digit +- fixes bug in `request_uri=` with query strings +- fixes template issues with `nil` and `?` operator +- `frozen_string_literal` pragmas added +- minor performance improvements in regexps +- fixes to eliminate warnings + +# Addressable 2.5.2 +- better support for frozen string literals +- fixed bug w/ uppercase characters in scheme +- IDNA errors w/ emoji URLs +- compatibility w/ public_suffix 3.x + +# Addressable 2.5.1 +- allow unicode normalization to be disabled for URI Template expansion +- removed duplicate test + +# Addressable 2.5.0 +- dropping support for Ruby 1.9 +- adding support for Ruby 2.4 preview +- add support for public suffixes and tld; first runtime dependency +- hostname escaping should match RFC; underscores in hostnames no longer escaped +- paths beginning with // and missing an authority are now considered invalid +- validation now also takes place after setting a path +- handle backslashes in authority more like a browser for `heuristic_parse` +- unescaped backslashes in host now raise an `InvalidURIError` +- `merge!`, `join!`, `omit!` and `normalize!` don't disable deferred validation +- `heuristic_parse` now trims whitespace before parsing +- host parts longer than 63 bytes will be ignored and not passed to libidn +- normalized values always encoded as UTF-8 + +# Addressable 2.4.0 +- support for 1.8.x dropped +- double quotes in a host now raises an error +- newlines in host will no longer get unescaped during normalization +- stricter handling of bogus scheme values +- stricter handling of encoded port values +- calling `require 'addressable'` will now load both the URI and Template files +- assigning to the `hostname` component with an `IPAddr` object is now supported +- assigning to the `origin` component is now supported +- fixed minor bug where an exception would be thrown for a missing ACE suffix +- better partial expansion of URI templates + +# Addressable 2.3.8 +- fix warnings +- update dependency gems +- support for 1.8.x officially deprecated + +# Addressable 2.3.7 +- fix scenario in which invalid URIs don't get an exception until inspected +- handle hostnames with two adjacent periods correctly +- upgrade of RSpec + +# Addressable 2.3.6 +- normalization drops empty query string +- better handling in template extract for missing values +- template modifier for `'?'` now treated as optional +- fixed issue where character class parameters were modified +- templates can now be tested for equality +- added `:sorted` option to normalization of query strings +- fixed issue with normalization of hosts given in `'example.com.'` form + +# Addressable 2.3.5 +- added Addressable::URI#empty? method +- Addressable::URI#hostname methods now strip square brackets from IPv6 hosts +- compatibility with Net::HTTP in Ruby 2.0.0 +- Addressable::URI#route_from should always give relative URIs + +# Addressable 2.3.4 +- fixed issue with encoding altering its inputs +- query string normalization now leaves ';' characters alone +- FakeFS is detected before attempting to load unicode tables +- additional testing to ensure frozen objects don't cause problems + +# Addressable 2.3.3 +- fixed issue with converting common primitives during template expansion +- fixed port encoding issue +- removed a few warnings +- normalize should now ignore %2B in query strings +- the IDNA logic should now be handled by libidn in Ruby 1.9 +- no template match should now result in nil instead of an empty MatchData +- added license information to gemspec + +# Addressable 2.3.2 +- added Addressable::URI#default_port method +- fixed issue with Marshalling Unicode data on Windows +- improved heuristic parsing to better handle IPv4 addresses + +# Addressable 2.3.1 +- fixed missing unicode data file + +# Addressable 2.3.0 +- updated Addressable::Template to use RFC 6570, level 4 +- fixed compatibility problems with some versions of Ruby +- moved unicode tables into a data file for performance reasons +- removing support for multiple query value notations + +# Addressable 2.2.8 +- fixed issues with dot segment removal code +- form encoding can now handle multiple values per key +- updated development environment + +# Addressable 2.2.7 +- fixed issues related to Addressable::URI#query_values= +- the Addressable::URI.parse method is now polymorphic + +# Addressable 2.2.6 +- changed the way ambiguous paths are handled +- fixed bug with frozen URIs +- https supported in heuristic parsing + +# Addressable 2.2.5 +- 'parsing' a pre-parsed URI object is now a dup operation +- introduced conditional support for libidn +- fixed normalization issue on ampersands in query strings +- added additional tests around handling of query strings + +# Addressable 2.2.4 +- added origin support from draft-ietf-websec-origin-00 +- resolved issue with attempting to navigate below root +- fixed bug with string splitting in query strings + +# Addressable 2.2.3 +- added :flat_array notation for query strings + +# Addressable 2.2.2 +- fixed issue with percent escaping of '+' character in query strings + +# Addressable 2.2.1 +- added support for application/x-www-form-urlencoded. + +# Addressable 2.2.0 +- added site methods +- improved documentation + +# Addressable 2.1.2 +- added HTTP request URI methods +- better handling of Windows file paths +- validation_deferred boolean replaced with defer_validation block +- normalization of percent-encoded paths should now be correct +- fixed issue with constructing URIs with relative paths +- fixed warnings + +# Addressable 2.1.1 +- more type checking changes +- fixed issue with unicode normalization +- added method to find template defaults +- symbolic keys are now allowed in template mappings +- numeric values and symbolic values are now allowed in template mappings + +# Addressable 2.1.0 +- refactored URI template support out into its own class +- removed extract method due to being useless and unreliable +- removed Addressable::URI.expand_template +- removed Addressable::URI#extract_mapping +- added partial template expansion +- fixed minor bugs in the parse and heuristic_parse methods +- fixed incompatibility with Ruby 1.9.1 +- fixed bottleneck in Addressable::URI#hash and Addressable::URI#to_s +- fixed unicode normalization exception +- updated query_values methods to better handle subscript notation +- worked around issue with freezing URIs +- improved specs + +# Addressable 2.0.2 +- fixed issue with URI template expansion +- fixed issue with percent escaping characters 0-15 + +# Addressable 2.0.1 +- fixed issue with query string assignment +- fixed issue with improperly encoded components + +# Addressable 2.0.0 +- the initialize method now takes an options hash as its only parameter +- added query_values method to URI class +- completely replaced IDNA implementation with pure Ruby +- renamed Addressable::ADDRESSABLE_VERSION to Addressable::VERSION +- completely reworked the Rakefile +- changed the behavior of the port method significantly +- Addressable::URI.encode_segment, Addressable::URI.unencode_segment renamed +- documentation is now in YARD format +- more rigorous type checking +- to_str method implemented, implicit conversion to Strings now allowed +- Addressable::URI#omit method added, Addressable::URI#merge method replaced +- updated URI Template code to match v 03 of the draft spec +- added a bunch of new specifications + +# Addressable 1.0.4 +- switched to using RSpec's pending system for specs that rely on IDN +- fixed issue with creating URIs with paths that are not prefixed with '/' + +# Addressable 1.0.3 +- implemented a hash method + +# Addressable 1.0.2 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.1 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.0 +- heuristic parse method added +- parsing is slightly more strict +- replaced to_h with to_hash +- fixed routing methods +- improved specifications +- improved heckle rake task +- no surviving heckle mutations + +# Addressable 0.1.2 +- improved normalization +- fixed bug in joining algorithm +- updated specifications + +# Addressable 0.1.1 +- updated documentation +- added URI Template variable extraction + +# Addressable 0.1.0 +- initial release +- implementation based on RFC 3986, 3987 +- support for IRIs via libidn +- support for the URI Template draft spec diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Gemfile new file mode 100644 index 0000000..2684c15 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Gemfile @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +gemspec(path: __FILE__ == "(eval)" ? ".." : ".") + +group :test do + gem 'rspec', '~> 3.8' + gem 'rspec-its', '~> 1.3' +end + +group :coverage do + gem "coveralls", "> 0.7", require: false, platforms: :mri + gem "simplecov", require: false +end + +group :development do + gem 'launchy', '~> 2.4', '>= 2.4.3' + gem 'redcarpet', :platform => :mri_19 + gem 'yard' +end + +group :test, :development do + gem 'memory_profiler' + gem "rake", ">= 12.3.3" +end + +gem "idn-ruby", platform: :mri diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/LICENSE.txt new file mode 100644 index 0000000..ef51da2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/README.md b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/README.md new file mode 100644 index 0000000..9892f61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/README.md @@ -0,0 +1,121 @@ +# Addressable + +
+
Homepage
github.com/sporkmonger/addressable
+
Author
Bob Aman
+
Copyright
Copyright © Bob Aman
+
License
Apache 2.0
+
+ +[![Gem Version](https://img.shields.io/gem/dt/addressable.svg)][gem] +[![Build Status](https://github.com/sporkmonger/addressable/workflows/CI/badge.svg)][actions] +[![Test Coverage Status](https://img.shields.io/coveralls/sporkmonger/addressable.svg)][coveralls] +[![Documentation Coverage Status](https://inch-ci.org/github/sporkmonger/addressable.svg?branch=master)][inch] + +[gem]: https://rubygems.org/gems/addressable +[actions]: https://github.com/sporkmonger/addressable/actions +[coveralls]: https://coveralls.io/r/sporkmonger/addressable +[inch]: https://inch-ci.org/github/sporkmonger/addressable + +# Description + +Addressable is an alternative implementation to the URI implementation +that is part of Ruby's standard library. It is flexible, offers heuristic +parsing, and additionally provides extensive support for IRIs and URI templates. + +Addressable closely conforms to RFC 3986, RFC 3987, and RFC 6570 (level 4). + +# Reference + +- {Addressable::URI} +- {Addressable::Template} + +# Example usage + +```ruby +require "addressable/uri" + +uri = Addressable::URI.parse("http://example.com/path/to/resource/") +uri.scheme +#=> "http" +uri.host +#=> "example.com" +uri.path +#=> "/path/to/resource/" + +uri = Addressable::URI.parse("http://www.詹姆斯.com/") +uri.normalize +#=> # +``` + + +# URI Templates + +For more details, see [RFC 6570](https://www.rfc-editor.org/rfc/rfc6570.txt). + + +```ruby + +require "addressable/template" + +template = Addressable::Template.new("http://example.com/{?query*}") +template.expand({ + "query" => { + 'foo' => 'bar', + 'color' => 'red' + } +}) +#=> # + +template = Addressable::Template.new("http://example.com/{?one,two,three}") +template.partial_expand({"one" => "1", "three" => 3}).pattern +#=> "http://example.com/?one=1{&two}&three=3" + +template = Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" +) +uri = Addressable::URI.parse( + "http://example.com/a/b/c/?one=1&two=2#foo" +) +template.extract(uri) +#=> +# { +# "host" => "example.com", +# "segments" => ["a", "b", "c"], +# "one" => "1", +# "two" => "2", +# "fragment" => "foo" +# } +``` + +# Install + +```console +$ gem install addressable +``` + +You may optionally turn on native IDN support by installing libidn and the +idn gem: + +```console +$ sudo apt-get install libidn11-dev # Debian/Ubuntu +$ brew install libidn # OS X +$ gem install idn-ruby +``` + +# Semantic Versioning + +This project uses [Semantic Versioning](https://semver.org/). You can (and should) specify your +dependency using a pessimistic version constraint covering the major and minor +values: + +```ruby +spec.add_dependency 'addressable', '~> 2.7' +``` + +If you need a specific bug fix, you can also specify minimum tiny versions +without preventing updates to the latest minor release: + +```ruby +spec.add_dependency 'addressable', '~> 2.3', '>= 2.3.7' +``` diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Rakefile new file mode 100644 index 0000000..b7e0ff3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/Rakefile @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +require 'rubygems' +require 'rake' + +require File.join(File.dirname(__FILE__), 'lib', 'addressable', 'version') + +PKG_DISPLAY_NAME = 'Addressable' +PKG_NAME = PKG_DISPLAY_NAME.downcase +PKG_VERSION = Addressable::VERSION::STRING +PKG_FILE_NAME = "#{PKG_NAME}-#{PKG_VERSION}" + +RELEASE_NAME = "REL #{PKG_VERSION}" + +PKG_SUMMARY = "URI Implementation" +PKG_DESCRIPTION = <<-TEXT +Addressable is an alternative implementation to the URI implementation that is +part of Ruby's standard library. It is flexible, offers heuristic parsing, and +additionally provides extensive support for IRIs and URI templates. +TEXT + +PKG_FILES = FileList[ + "lib/**/*", "spec/**/*", "vendor/**/*", "data/**/*", + "tasks/**/*", + "[A-Z]*", "Rakefile" +].exclude(/pkg/).exclude(/database\.yml/). + exclude(/Gemfile\.lock/).exclude(/[_\.]git$/) + +task :default => "spec" + +WINDOWS = (RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/) rescue false +SUDO = WINDOWS ? '' : ('sudo' unless ENV['SUDOLESS']) + +Dir['tasks/**/*.rake'].each { |rake| load rake } diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/addressable.gemspec b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/addressable.gemspec new file mode 100644 index 0000000..12666a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/addressable.gemspec @@ -0,0 +1,37 @@ +# -*- encoding: utf-8 -*- +# stub: addressable 2.8.0 ruby lib + +Gem::Specification.new do |s| + s.name = "addressable".freeze + s.version = "2.8.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Bob Aman".freeze] + s.date = "2021-07-03" + s.description = "Addressable is an alternative implementation to the URI implementation that is\npart of Ruby's standard library. It is flexible, offers heuristic parsing, and\nadditionally provides extensive support for IRIs and URI templates.\n".freeze + s.email = "bob@sporkmonger.com".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["CHANGELOG.md".freeze, "Gemfile".freeze, "LICENSE.txt".freeze, "README.md".freeze, "Rakefile".freeze, "addressable.gemspec".freeze, "data/unicode.data".freeze, "lib/addressable.rb".freeze, "lib/addressable/idna.rb".freeze, "lib/addressable/idna/native.rb".freeze, "lib/addressable/idna/pure.rb".freeze, "lib/addressable/template.rb".freeze, "lib/addressable/uri.rb".freeze, "lib/addressable/version.rb".freeze, "spec/addressable/idna_spec.rb".freeze, "spec/addressable/net_http_compat_spec.rb".freeze, "spec/addressable/security_spec.rb".freeze, "spec/addressable/template_spec.rb".freeze, "spec/addressable/uri_spec.rb".freeze, "spec/spec_helper.rb".freeze, "tasks/clobber.rake".freeze, "tasks/gem.rake".freeze, "tasks/git.rake".freeze, "tasks/metrics.rake".freeze, "tasks/profile.rake".freeze, "tasks/rspec.rake".freeze, "tasks/yard.rake".freeze] + s.homepage = "https://github.com/sporkmonger/addressable".freeze + s.licenses = ["Apache-2.0".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.0.3".freeze + s.summary = "URI Implementation".freeze + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_runtime_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_development_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end + else + s.add_dependency(%q.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q.freeze, [">= 1.0", "< 3.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/data/unicode.data b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/data/unicode.data new file mode 100644 index 0000000..cdfc224 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/data/unicode.data differ diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable.rb new file mode 100644 index 0000000..b4e98b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require 'addressable/uri' +require 'addressable/template' diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna.rb new file mode 100644 index 0000000..e41c1f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +begin + require "addressable/idna/native" +rescue LoadError + # libidn or the idn gem was not available, fall back on a pure-Ruby + # implementation... + require "addressable/idna/pure" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb new file mode 100644 index 0000000..84de8e8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/native.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "idn" + +module Addressable + module IDNA + def self.punycode_encode(value) + IDN::Punycode.encode(value.to_s) + end + + def self.punycode_decode(value) + IDN::Punycode.decode(value.to_s) + end + + def self.unicode_normalize_kc(value) + IDN::Stringprep.nfkc_normalize(value.to_s) + end + + def self.to_ascii(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toASCII(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + + def self.to_unicode(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toUnicode(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb new file mode 100644 index 0000000..7a0c1fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/idna/pure.rb @@ -0,0 +1,678 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +module Addressable + module IDNA + # This module is loosely based on idn_actionmailer by Mick Staugaard, + # the unicode library by Yoshida Masato, and the punycode implementation + # by Kazuhiro Nishiyama. Most of the code was copied verbatim, but + # some reformatting was done, and some translation from C was done. + # + # Without their code to work from as a base, we'd all still be relying + # on the presence of libidn. Which nobody ever seems to have installed. + # + # Original sources: + # http://github.com/staugaard/idn_actionmailer + # http://www.yoshidam.net/Ruby.html#unicode + # http://rubyforge.org/frs/?group_id=2550 + + + UNICODE_TABLE = File.expand_path( + File.join(File.dirname(__FILE__), '../../..', 'data/unicode.data') + ) + + ACE_PREFIX = "xn--" + + UTF8_REGEX = /\A(?: + [\x09\x0A\x0D\x20-\x7E] # ASCII + | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )*\z/mnx + + UTF8_REGEX_MULTIBYTE = /(?: + [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )/mnx + + # :startdoc: + + # Converts from a Unicode internationalized domain name to an ASCII + # domain name as described in RFC 3490. + def self.to_ascii(input) + input = input.to_s unless input.is_a?(String) + input = input.dup + if input.respond_to?(:force_encoding) + input.force_encoding(Encoding::ASCII_8BIT) + end + if input =~ UTF8_REGEX && input =~ UTF8_REGEX_MULTIBYTE + parts = unicode_downcase(input).split('.') + parts.map! do |part| + if part.respond_to?(:force_encoding) + part.force_encoding(Encoding::ASCII_8BIT) + end + if part =~ UTF8_REGEX && part =~ UTF8_REGEX_MULTIBYTE + ACE_PREFIX + punycode_encode(unicode_normalize_kc(part)) + else + part + end + end + parts.join('.') + else + input + end + end + + # Converts from an ASCII domain name to a Unicode internationalized + # domain name as described in RFC 3490. + def self.to_unicode(input) + input = input.to_s unless input.is_a?(String) + parts = input.split('.') + parts.map! do |part| + if part =~ /^#{ACE_PREFIX}(.+)/ + begin + punycode_decode(part[/^#{ACE_PREFIX}(.+)/, 1]) + rescue Addressable::IDNA::PunycodeBadInput + # toUnicode is explicitly defined as never-fails by the spec + part + end + else + part + end + end + output = parts.join('.') + if output.respond_to?(:force_encoding) + output.force_encoding(Encoding::UTF_8) + end + output + end + + # Unicode normalization form KC. + def self.unicode_normalize_kc(input) + input = input.to_s unless input.is_a?(String) + unpacked = input.unpack("U*") + unpacked = + unicode_compose(unicode_sort_canonical(unicode_decompose(unpacked))) + return unpacked.pack("U*") + end + + ## + # Unicode aware downcase method. + # + # @api private + # @param [String] input + # The input string. + # @return [String] The downcased result. + def self.unicode_downcase(input) + input = input.to_s unless input.is_a?(String) + unpacked = input.unpack("U*") + unpacked.map! { |codepoint| lookup_unicode_lowercase(codepoint) } + return unpacked.pack("U*") + end + private_class_method :unicode_downcase + + def self.unicode_compose(unpacked) + unpacked_result = [] + length = unpacked.length + + return unpacked if length == 0 + + starter = unpacked[0] + starter_cc = lookup_unicode_combining_class(starter) + starter_cc = 256 if starter_cc != 0 + for i in 1...length + ch = unpacked[i] + + if (starter_cc == 0 && + (composite = unicode_compose_pair(starter, ch)) != nil) + starter = composite + else + unpacked_result << starter + starter = ch + end + end + unpacked_result << starter + return unpacked_result + end + private_class_method :unicode_compose + + def self.unicode_compose_pair(ch_one, ch_two) + if ch_one >= HANGUL_LBASE && ch_one < HANGUL_LBASE + HANGUL_LCOUNT && + ch_two >= HANGUL_VBASE && ch_two < HANGUL_VBASE + HANGUL_VCOUNT + # Hangul L + V + return HANGUL_SBASE + ( + (ch_one - HANGUL_LBASE) * HANGUL_VCOUNT + (ch_two - HANGUL_VBASE) + ) * HANGUL_TCOUNT + elsif ch_one >= HANGUL_SBASE && + ch_one < HANGUL_SBASE + HANGUL_SCOUNT && + (ch_one - HANGUL_SBASE) % HANGUL_TCOUNT == 0 && + ch_two >= HANGUL_TBASE && ch_two < HANGUL_TBASE + HANGUL_TCOUNT + # Hangul LV + T + return ch_one + (ch_two - HANGUL_TBASE) + end + + p = [] + + ucs4_to_utf8(ch_one, p) + ucs4_to_utf8(ch_two, p) + + return lookup_unicode_composition(p) + end + private_class_method :unicode_compose_pair + + def self.ucs4_to_utf8(char, buffer) + if char < 128 + buffer << char + elsif char < 2048 + buffer << (char >> 6 | 192) + buffer << (char & 63 | 128) + elsif char < 0x10000 + buffer << (char >> 12 | 224) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x200000 + buffer << (char >> 18 | 240) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x4000000 + buffer << (char >> 24 | 248) + buffer << (char >> 18 & 63 | 128) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + elsif char < 0x80000000 + buffer << (char >> 30 | 252) + buffer << (char >> 24 & 63 | 128) + buffer << (char >> 18 & 63 | 128) + buffer << (char >> 12 & 63 | 128) + buffer << (char >> 6 & 63 | 128) + buffer << (char & 63 | 128) + end + end + private_class_method :ucs4_to_utf8 + + def self.unicode_sort_canonical(unpacked) + unpacked = unpacked.dup + i = 1 + length = unpacked.length + + return unpacked if length < 2 + + while i < length + last = unpacked[i-1] + ch = unpacked[i] + last_cc = lookup_unicode_combining_class(last) + cc = lookup_unicode_combining_class(ch) + if cc != 0 && last_cc != 0 && last_cc > cc + unpacked[i] = last + unpacked[i-1] = ch + i -= 1 if i > 1 + else + i += 1 + end + end + return unpacked + end + private_class_method :unicode_sort_canonical + + def self.unicode_decompose(unpacked) + unpacked_result = [] + for cp in unpacked + if cp >= HANGUL_SBASE && cp < HANGUL_SBASE + HANGUL_SCOUNT + l, v, t = unicode_decompose_hangul(cp) + unpacked_result << l + unpacked_result << v if v + unpacked_result << t if t + else + dc = lookup_unicode_compatibility(cp) + unless dc + unpacked_result << cp + else + unpacked_result.concat(unicode_decompose(dc.unpack("U*"))) + end + end + end + return unpacked_result + end + private_class_method :unicode_decompose + + def self.unicode_decompose_hangul(codepoint) + sindex = codepoint - HANGUL_SBASE; + if sindex < 0 || sindex >= HANGUL_SCOUNT + l = codepoint + v = t = nil + return l, v, t + end + l = HANGUL_LBASE + sindex / HANGUL_NCOUNT + v = HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT + t = HANGUL_TBASE + sindex % HANGUL_TCOUNT + if t == HANGUL_TBASE + t = nil + end + return l, v, t + end + private_class_method :unicode_decompose_hangul + + def self.lookup_unicode_combining_class(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + (codepoint_data[UNICODE_DATA_COMBINING_CLASS] || 0) : + 0) + end + private_class_method :lookup_unicode_combining_class + + def self.lookup_unicode_compatibility(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + codepoint_data[UNICODE_DATA_COMPATIBILITY] : nil) + end + private_class_method :lookup_unicode_compatibility + + def self.lookup_unicode_lowercase(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + (codepoint_data[UNICODE_DATA_LOWERCASE] || codepoint) : + codepoint) + end + private_class_method :lookup_unicode_lowercase + + def self.lookup_unicode_composition(unpacked) + return COMPOSITION_TABLE[unpacked] + end + private_class_method :lookup_unicode_composition + + HANGUL_SBASE = 0xac00 + HANGUL_LBASE = 0x1100 + HANGUL_LCOUNT = 19 + HANGUL_VBASE = 0x1161 + HANGUL_VCOUNT = 21 + HANGUL_TBASE = 0x11a7 + HANGUL_TCOUNT = 28 + HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT # 588 + HANGUL_SCOUNT = HANGUL_LCOUNT * HANGUL_NCOUNT # 11172 + + UNICODE_DATA_COMBINING_CLASS = 0 + UNICODE_DATA_EXCLUSION = 1 + UNICODE_DATA_CANONICAL = 2 + UNICODE_DATA_COMPATIBILITY = 3 + UNICODE_DATA_UPPERCASE = 4 + UNICODE_DATA_LOWERCASE = 5 + UNICODE_DATA_TITLECASE = 6 + + begin + if defined?(FakeFS) + fakefs_state = FakeFS.activated? + FakeFS.deactivate! + end + # This is a sparse Unicode table. Codepoints without entries are + # assumed to have the value: [0, 0, nil, nil, nil, nil, nil] + UNICODE_DATA = File.open(UNICODE_TABLE, "rb") do |file| + Marshal.load(file.read) + end + ensure + if defined?(FakeFS) + FakeFS.activate! if fakefs_state + end + end + + COMPOSITION_TABLE = {} + UNICODE_DATA.each do |codepoint, data| + canonical = data[UNICODE_DATA_CANONICAL] + exclusion = data[UNICODE_DATA_EXCLUSION] + + if canonical && exclusion == 0 + COMPOSITION_TABLE[canonical.unpack("C*")] = codepoint + end + end + + UNICODE_MAX_LENGTH = 256 + ACE_MAX_LENGTH = 256 + + PUNYCODE_BASE = 36 + PUNYCODE_TMIN = 1 + PUNYCODE_TMAX = 26 + PUNYCODE_SKEW = 38 + PUNYCODE_DAMP = 700 + PUNYCODE_INITIAL_BIAS = 72 + PUNYCODE_INITIAL_N = 0x80 + PUNYCODE_DELIMITER = 0x2D + + PUNYCODE_MAXINT = 1 << 64 + + PUNYCODE_PRINT_ASCII = + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + " !\"\#$%&'()*+,-./" + + "0123456789:;<=>?" + + "@ABCDEFGHIJKLMNO" + + "PQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmno" + + "pqrstuvwxyz{|}~\n" + + # Input is invalid. + class PunycodeBadInput < StandardError; end + # Output would exceed the space provided. + class PunycodeBigOutput < StandardError; end + # Input needs wider integers to process. + class PunycodeOverflow < StandardError; end + + def self.punycode_encode(unicode) + unicode = unicode.to_s unless unicode.is_a?(String) + input = unicode.unpack("U*") + output = [0] * (ACE_MAX_LENGTH + 1) + input_length = input.size + output_length = [ACE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + delta = out = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: + input_length.times do |j| + if punycode_basic?(input[j]) + if max_out - out < 2 + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + output[out] = input[j] + out += 1 + end + end + + h = b = out + + # h is the number of code points that have been handled, b is the + # number of basic code points, and out is the number of characters + # that have been output. + + if b > 0 + output[out] = PUNYCODE_DELIMITER + out += 1 + end + + # Main encoding loop: + + while h < input_length + # All non-basic code points < n have been + # handled already. Find the next larger one: + + m = PUNYCODE_MAXINT + input_length.times do |j| + m = input[j] if (n...m) === input[j] + end + + # Increase delta enough to advance the decoder's + # state to , but guard against overflow: + + if m - n > (PUNYCODE_MAXINT - delta) / (h + 1) + raise PunycodeOverflow, "Input needs wider integers to process." + end + delta += (m - n) * (h + 1) + n = m + + input_length.times do |j| + # Punycode does not need to check whether input[j] is basic: + if input[j] < n + delta += 1 + if delta == 0 + raise PunycodeOverflow, + "Input needs wider integers to process." + end + end + + if input[j] == n + # Represent delta as a generalized variable-length integer: + + q = delta; k = PUNYCODE_BASE + while true + if out >= max_out + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if q < t + output[out] = + punycode_encode_digit(t + (q - t) % (PUNYCODE_BASE - t)) + out += 1 + q = (q - t) / (PUNYCODE_BASE - t) + k += PUNYCODE_BASE + end + + output[out] = punycode_encode_digit(q) + out += 1 + bias = punycode_adapt(delta, h + 1, h == b) + delta = 0 + h += 1 + end + end + + delta += 1 + n += 1 + end + + output_length[0] = out + + outlen = out + outlen.times do |j| + c = output[j] + unless c >= 0 && c <= 127 + raise StandardError, "Invalid output char." + end + unless PUNYCODE_PRINT_ASCII[c] + raise PunycodeBadInput, "Input is invalid." + end + end + + output[0..outlen].map { |x| x.chr }.join("").sub(/\0+\z/, "") + end + private_class_method :punycode_encode + + def self.punycode_decode(punycode) + input = [] + output = [] + + if ACE_MAX_LENGTH * 2 < punycode.size + raise PunycodeBigOutput, "Output would exceed the space provided." + end + punycode.each_byte do |c| + unless c >= 0 && c <= 127 + raise PunycodeBadInput, "Input is invalid." + end + input.push(c) + end + + input_length = input.length + output_length = [UNICODE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + + out = i = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: Let b be the number of input code + # points before the last delimiter, or 0 if there is none, then + # copy the first b code points to the output. + + b = 0 + input_length.times do |j| + b = j if punycode_delimiter?(input[j]) + end + if b > max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + b.times do |j| + unless punycode_basic?(input[j]) + raise PunycodeBadInput, "Input is invalid." + end + output[out] = input[j] + out+=1 + end + + # Main decoding loop: Start just after the last delimiter if any + # basic code points were copied; start at the beginning otherwise. + + in_ = b > 0 ? b + 1 : 0 + while in_ < input_length + + # in_ is the index of the next character to be consumed, and + # out is the number of code points in the output array. + + # Decode a generalized variable-length integer into delta, + # which gets added to i. The overflow checking is easier + # if we increase i as we go, then subtract off its starting + # value at the end to obtain delta. + + oldi = i; w = 1; k = PUNYCODE_BASE + while true + if in_ >= input_length + raise PunycodeBadInput, "Input is invalid." + end + digit = punycode_decode_digit(input[in_]) + in_+=1 + if digit >= PUNYCODE_BASE + raise PunycodeBadInput, "Input is invalid." + end + if digit > (PUNYCODE_MAXINT - i) / w + raise PunycodeOverflow, "Input needs wider integers to process." + end + i += digit * w + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if digit < t + if w > PUNYCODE_MAXINT / (PUNYCODE_BASE - t) + raise PunycodeOverflow, "Input needs wider integers to process." + end + w *= PUNYCODE_BASE - t + k += PUNYCODE_BASE + end + + bias = punycode_adapt(i - oldi, out + 1, oldi == 0) + + # I was supposed to wrap around from out + 1 to 0, + # incrementing n each time, so we'll fix that now: + + if i / (out + 1) > PUNYCODE_MAXINT - n + raise PunycodeOverflow, "Input needs wider integers to process." + end + n += i / (out + 1) + i %= out + 1 + + # Insert n at position i of the output: + + # not needed for Punycode: + # raise PUNYCODE_INVALID_INPUT if decode_digit(n) <= base + if out >= max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + #memmove(output + i + 1, output + i, (out - i) * sizeof *output) + output[i + 1, out - i] = output[i, out - i] + output[i] = n + i += 1 + + out += 1 + end + + output_length[0] = out + + output.pack("U*") + end + private_class_method :punycode_decode + + def self.punycode_basic?(codepoint) + codepoint < 0x80 + end + private_class_method :punycode_basic? + + def self.punycode_delimiter?(codepoint) + codepoint == PUNYCODE_DELIMITER + end + private_class_method :punycode_delimiter? + + def self.punycode_encode_digit(d) + d + 22 + 75 * ((d < 26) ? 1 : 0) + end + private_class_method :punycode_encode_digit + + # Returns the numeric value of a basic codepoint + # (for use in representing integers) in the range 0 to + # base - 1, or PUNYCODE_BASE if codepoint does not represent a value. + def self.punycode_decode_digit(codepoint) + if codepoint - 48 < 10 + codepoint - 22 + elsif codepoint - 65 < 26 + codepoint - 65 + elsif codepoint - 97 < 26 + codepoint - 97 + else + PUNYCODE_BASE + end + end + private_class_method :punycode_decode_digit + + # Bias adaptation method + def self.punycode_adapt(delta, numpoints, firsttime) + delta = firsttime ? delta / PUNYCODE_DAMP : delta >> 1 + # delta >> 1 is a faster way of doing delta / 2 + delta += delta / numpoints + difference = PUNYCODE_BASE - PUNYCODE_TMIN + + k = 0 + while delta > (difference * PUNYCODE_TMAX) / 2 + delta /= difference + k += PUNYCODE_BASE + end + + k + (difference + 1) * delta / (delta + PUNYCODE_SKEW) + end + private_class_method :punycode_adapt + end + # :startdoc: +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/template.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/template.rb new file mode 100644 index 0000000..45f6ae6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/template.rb @@ -0,0 +1,1031 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/uri" + +module Addressable + ## + # This is an implementation of a URI template based on + # RFC 6570 (http://tools.ietf.org/html/rfc6570). + class Template + # Constants used throughout the template code. + anything = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + + + variable_char_class = + Addressable::URI::CharacterClasses::ALPHA + + Addressable::URI::CharacterClasses::DIGIT + '_' + + var_char = + "(?>(?:[#{variable_char_class}]|%[a-fA-F0-9][a-fA-F0-9])+)" + RESERVED = + "(?:[#{anything}]|%[a-fA-F0-9][a-fA-F0-9])" + UNRESERVED = + "(?:[#{ + Addressable::URI::CharacterClasses::UNRESERVED + }]|%[a-fA-F0-9][a-fA-F0-9])" + variable = + "(?:#{var_char}(?:\\.?#{var_char})*)" + varspec = + "(?:(#{variable})(\\*|:\\d+)?)" + VARNAME = + /^#{variable}$/ + VARSPEC = + /^#{varspec}$/ + VARIABLE_LIST = + /^#{varspec}(?:,#{varspec})*$/ + operator = + "+#./;?&=,!@|" + EXPRESSION = + /\{([#{operator}])?(#{varspec}(?:,#{varspec})*)\}/ + + + LEADERS = { + '?' => '?', + '/' => '/', + '#' => '#', + '.' => '.', + ';' => ';', + '&' => '&' + } + JOINERS = { + '?' => '&', + '.' => '.', + ';' => ';', + '&' => '&', + '/' => '/' + } + + ## + # Raised if an invalid template value is supplied. + class InvalidTemplateValueError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class InvalidTemplateOperatorError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class TemplateOperatorAbortedError < StandardError + end + + ## + # This class represents the data that is extracted when a Template + # is matched against a URI. + class MatchData + ## + # Creates a new MatchData object. + # MatchData objects should never be instantiated directly. + # + # @param [Addressable::URI] uri + # The URI that the template was matched against. + def initialize(uri, template, mapping) + @uri = uri.dup.freeze + @template = template + @mapping = mapping.dup.freeze + end + + ## + # @return [Addressable::URI] + # The URI that the Template was matched against. + attr_reader :uri + + ## + # @return [Addressable::Template] + # The Template used for the match. + attr_reader :template + + ## + # @return [Hash] + # The mapping that resulted from the match. + # Note that this mapping does not include keys or values for + # variables that appear in the Template, but are not present + # in the URI. + attr_reader :mapping + + ## + # @return [Array] + # The list of variables that were present in the Template. + # Note that this list will include variables which do not appear + # in the mapping because they were not present in URI. + def variables + self.template.variables + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # @return [Array] + # The list of values that were captured by the Template. + # Note that this list will include nils for any variables which + # were in the Template, but did not appear in the URI. + def values + @values ||= self.variables.inject([]) do |accu, key| + accu << self.mapping[key] + accu + end + end + alias_method :captures, :values + + ## + # Accesses captured values by name or by index. + # + # @param [String, Symbol, Fixnum] key + # Capture index or name. Note that when accessing by with index + # of 0, the full URI will be returned. The intention is to mimic + # the ::MatchData#[] behavior. + # + # @param [#to_int, nil] len + # If provided, an array of values will be returend with the given + # parameter used as length. + # + # @return [Array, String, nil] + # The captured value corresponding to the index or name. If the + # value was not provided or the key is unknown, nil will be + # returned. + # + # If the second parameter is provided, an array of that length will + # be returned instead. + def [](key, len = nil) + if len + to_a[key, len] + elsif String === key or Symbol === key + mapping[key.to_s] + else + to_a[key] + end + end + + ## + # @return [Array] + # Array with the matched URI as first element followed by the captured + # values. + def to_a + [to_s, *values] + end + + ## + # @return [String] + # The matched URI as String. + def to_s + uri.to_s + end + alias_method :string, :to_s + + # Returns multiple captured values at once. + # + # @param [String, Symbol, Fixnum] *indexes + # Indices of the captures to be returned + # + # @return [Array] + # Values corresponding to given indices. + # + # @see Addressable::Template::MatchData#[] + def values_at(*indexes) + indexes.map { |i| self[i] } + end + + ## + # Returns a String representation of the MatchData's state. + # + # @return [String] The MatchData's state, as a String. + def inspect + sprintf("#<%s:%#0x RESULT:%s>", + self.class.to_s, self.object_id, self.mapping.inspect) + end + + ## + # Dummy method for code expecting a ::MatchData instance + # + # @return [String] An empty string. + def pre_match + "" + end + alias_method :post_match, :pre_match + end + + ## + # Creates a new Addressable::Template object. + # + # @param [#to_str] pattern The URI Template pattern. + # + # @return [Addressable::Template] The initialized Template object. + def initialize(pattern) + if !pattern.respond_to?(:to_str) + raise TypeError, "Can't convert #{pattern.class} into String." + end + @pattern = pattern.to_str.dup.freeze + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.variables + self.variable_defaults + self.named_captures + super + end + + ## + # @return [String] The Template object's pattern. + attr_reader :pattern + + ## + # Returns a String representation of the Template object's state. + # + # @return [String] The Template object's state, as a String. + def inspect + sprintf("#<%s:%#0x PATTERN:%s>", + self.class.to_s, self.object_id, self.pattern) + end + + ## + # Returns true if the Template objects are equal. This method + # does NOT normalize either Template before doing the comparison. + # + # @param [Object] template The Template to compare. + # + # @return [TrueClass, FalseClass] + # true if the Templates are equivalent, false + # otherwise. + def ==(template) + return false unless template.kind_of?(Template) + return self.pattern == template.pattern + end + + ## + # Addressable::Template makes no distinction between `==` and `eql?`. + # + # @see #== + alias_method :eql?, :== + + ## + # Extracts a mapping from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).extract(uri, ExampleProcessor) + # #=> {"query" => "an example search query"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{second}/" + # ).extract(uri, ExampleProcessor) + # #=> {"first" => "a", "second" => "b/c"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{-list|/|second}/" + # ).extract(uri) + # #=> {"first" => "a", "second" => ["b", "c"]} + def extract(uri, processor=nil) + match_data = self.match(uri, processor) + return (match_data ? match_data.mapping : nil) + end + + ## + # Extracts match data from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # match = Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["query"] + # match.captures + # #=> ["an example search query"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}/{+second}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", "b/c"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}{/second*}/" + # ).match(uri) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", ["b", "c"]] + def match(uri, processor=nil) + uri = Addressable::URI.parse(uri) unless uri.is_a?(Addressable::URI) + mapping = {} + + # First, we need to process the pattern, and extract the values. + expansions, expansion_regexp = + parse_template_pattern(pattern, processor) + + return nil unless uri.to_str.match(expansion_regexp) + unparsed_values = uri.to_str.scan(expansion_regexp).flatten + + if uri.to_str == pattern + return Addressable::Template::MatchData.new(uri, self, mapping) + elsif expansions.size > 0 + index = 0 + expansions.each do |expansion| + _, operator, varlist = *expansion.match(EXPRESSION) + varlist.split(',').each do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + mapping[name] ||= nil + case operator + when nil, '+', '#', '/', '.' + unparsed_value = unparsed_values[index] + name = varspec[VARSPEC, 1] + value = unparsed_value + value = value.split(JOINERS[operator]) if value && modifier == '*' + when ';', '?', '&' + if modifier == '*' + if unparsed_values[index] + value = unparsed_values[index].split(JOINERS[operator]) + value = value.inject({}) do |acc, v| + key, val = v.split('=') + val = "" if val.nil? + acc[key] = val + acc + end + end + else + if (unparsed_values[index]) + name, value = unparsed_values[index].split('=') + value = "" if value.nil? + end + end + end + if processor != nil && processor.respond_to?(:restore) + value = processor.restore(name, value) + end + if processor == nil + if value.is_a?(Hash) + value = value.inject({}){|acc, (k, v)| + acc[Addressable::URI.unencode_component(k)] = + Addressable::URI.unencode_component(v) + acc + } + elsif value.is_a?(Array) + value = value.map{|v| Addressable::URI.unencode_component(v) } + else + value = Addressable::URI.unencode_component(value) + end + end + if !mapping.has_key?(name) || mapping[name].nil? + # Doesn't exist, set to value (even if value is nil) + mapping[name] = value + end + index = index + 1 + end + end + return Addressable::Template::MatchData.new(uri, self, mapping) + else + return nil + end + end + + ## + # Expands a URI template into another URI template. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::Template] The partially expanded URI template. + # + # @example + # Addressable::Template.new( + # "http://example.com/{one}/{two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/1/{two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/?one=1{&two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two,three}/" + # ).partial_expand({"one" => "1", "three" => 3}).pattern + # #=> "http://example.com/?one=1{&two}&three=3" + def partial_expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_partial_capture(mapping, capture, processor, normalize_values) + end + return Addressable::Template.new(result) + end + + ## + # Expands a URI template into a full URI. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::URI] The expanded URI template. + # + # @example + # class ExampleProcessor + # def self.validate(name, value) + # return !!(value =~ /^[\w ]+$/) if name == "query" + # return true + # end + # + # def self.transform(name, value) + # return value.gsub(/ /, "+") if name == "query" + # return value + # end + # end + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"}, + # ExampleProcessor + # ).to_str + # #=> "http://example.com/search/an+example+search+query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"} + # ).to_str + # #=> "http://example.com/search/an%20example%20search%20query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "bogus!"}, + # ExampleProcessor + # ).to_str + # #=> Addressable::Template::InvalidTemplateValueError + def expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_capture(mapping, capture, processor, normalize_values) + end + return Addressable::URI.parse(result) + end + + ## + # Returns an Array of variables used within the template pattern. + # The variables are listed in the Array in the order they appear within + # the pattern. Multiple occurrences of a variable within a pattern are + # not represented in this Array. + # + # @return [Array] The variables present in the template's pattern. + def variables + @variables ||= ordered_variable_defaults.map { |var, val| var }.uniq + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # Returns a mapping of variables to their default values specified + # in the template. Variables without defaults are not returned. + # + # @return [Hash] Mapping of template variables to their defaults + def variable_defaults + @variable_defaults ||= + Hash[*ordered_variable_defaults.reject { |k, v| v.nil? }.flatten] + end + + ## + # Coerces a template into a `Regexp` object. This regular expression will + # behave very similarly to the actual template, and should match the same + # URI values, but it cannot fully handle, for example, values that would + # extract to an `Array`. + # + # @return [Regexp] A regular expression which should match the template. + def to_regexp + _, source = parse_template_pattern(pattern) + Regexp.new(source) + end + + ## + # Returns the source of the coerced `Regexp`. + # + # @return [String] The source of the `Regexp` given by {#to_regexp}. + # + # @api private + def source + self.to_regexp.source + end + + ## + # Returns the named captures of the coerced `Regexp`. + # + # @return [Hash] The named captures of the `Regexp` given by {#to_regexp}. + # + # @api private + def named_captures + self.to_regexp.named_captures + end + + private + def ordered_variable_defaults + @ordered_variable_defaults ||= begin + expansions, _ = parse_template_pattern(pattern) + expansions.map do |capture| + _, _, varlist = *capture.match(EXPRESSION) + varlist.split(',').map do |varspec| + varspec[VARSPEC, 1] + end + end.flatten + end + end + + + ## + # Loops through each capture and expands any values available in mapping + # + # @param [Hash] mapping + # Set of keys to expand + # @param [String] capture + # The expression to expand + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_partial_capture(mapping, capture, processor = nil, + normalize_values = true) + _, operator, varlist = *capture.match(EXPRESSION) + + vars = varlist.split(",") + + if operator == "?" + # partial expansion of form style query variables sometimes requires a + # slight reordering of the variables to produce a valid url. + first_to_expand = vars.find { |varspec| + _, name, _ = *varspec.match(VARSPEC) + mapping.key?(name) && !mapping[name].nil? + } + + vars = [first_to_expand] + vars.reject {|varspec| varspec == first_to_expand} if first_to_expand + end + + vars. + inject("".dup) do |acc, varspec| + _, name, _ = *varspec.match(VARSPEC) + next_val = if mapping.key? name + transform_capture(mapping, "{#{operator}#{varspec}}", + processor, normalize_values) + else + "{#{operator}#{varspec}}" + end + # If we've already expanded at least one '?' operator with non-empty + # value, change to '&' + operator = "&" if (operator == "?") && (next_val != "") + acc << next_val + end + end + + ## + # Transforms a mapped value so that values can be substituted into the + # template. + # + # @param [Hash] mapping The mapping to replace captures + # @param [String] capture + # The expression to replace + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_capture(mapping, capture, processor=nil, + normalize_values=true) + _, operator, varlist = *capture.match(EXPRESSION) + return_value = varlist.split(',').inject([]) do |acc, varspec| + _, name, modifier = *varspec.match(VARSPEC) + value = mapping[name] + unless value == nil || value == {} + allow_reserved = %w(+ #).include?(operator) + # Common primitives where the .to_s output is well-defined + if Numeric === value || Symbol === value || + value == true || value == false + value = value.to_s + end + length = modifier.gsub(':', '').to_i if modifier =~ /^:\d+/ + + unless (Hash === value) || + value.respond_to?(:to_ary) || value.respond_to?(:to_str) + raise TypeError, + "Can't convert #{value.class} into String or Array." + end + + value = normalize_value(value) if normalize_values + + if processor == nil || !processor.respond_to?(:transform) + # Handle percent escaping + if allow_reserved + encode_map = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + else + encode_map = Addressable::URI::CharacterClasses::UNRESERVED + end + if value.kind_of?(Array) + transformed_value = value.map do |val| + if length + Addressable::URI.encode_component(val[0...length], encode_map) + else + Addressable::URI.encode_component(val, encode_map) + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + elsif value.kind_of?(Hash) + transformed_value = value.map do |key, val| + if modifier == "*" + "#{ + Addressable::URI.encode_component( key, encode_map) + }=#{ + Addressable::URI.encode_component( val, encode_map) + }" + else + "#{ + Addressable::URI.encode_component( key, encode_map) + },#{ + Addressable::URI.encode_component( val, encode_map) + }" + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + else + if length + transformed_value = Addressable::URI.encode_component( + value[0...length], encode_map) + else + transformed_value = Addressable::URI.encode_component( + value, encode_map) + end + end + end + + # Process, if we've got a processor + if processor != nil + if processor.respond_to?(:validate) + if !processor.validate(name, value) + display_value = value.kind_of?(Array) ? value.inspect : value + raise InvalidTemplateValueError, + "#{name}=#{display_value} is an invalid template value." + end + end + if processor.respond_to?(:transform) + transformed_value = processor.transform(name, value) + if normalize_values + transformed_value = normalize_value(transformed_value) + end + end + end + acc << [name, transformed_value] + end + acc + end + return "" if return_value.empty? + join_values(operator, return_value) + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [String, Nil] operator One of the operators from the set + # (?,&,+,#,;,/,.), or nil if there wasn't one. + # @param [Array] return_value + # The set of return values (as [variable_name, value] tuples) that will + # be joined together. + # + # @return [String] The transformed mapped value + def join_values(operator, return_value) + leader = LEADERS.fetch(operator, '') + joiner = JOINERS.fetch(operator, ',') + case operator + when '&', '?' + leader + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + v.join(joiner) + elsif v.is_a?(Array) + v.map{|inner_value| "#{k}=#{inner_value}"}.join(joiner) + else + "#{k}=#{v}" + end + }.join(joiner) + when ';' + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + ';' + v.join(";") + elsif v.is_a?(Array) + ';' + v.map{|inner_value| "#{k}=#{inner_value}"}.join(";") + else + v && v != '' ? ";#{k}=#{v}" : ";#{k}" + end + }.join + else + leader + return_value.map{|k,v| v}.join(joiner) + end + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [Hash, Array, String] value + # Normalizes keys and values with IDNA#unicode_normalize_kc + # + # @return [Hash, Array, String] The normalized values + def normalize_value(value) + unless value.is_a?(Hash) + value = value.respond_to?(:to_ary) ? value.to_ary : value.to_str + end + + # Handle unicode normalization + if value.kind_of?(Array) + value.map! { |val| Addressable::IDNA.unicode_normalize_kc(val) } + elsif value.kind_of?(Hash) + value = value.inject({}) { |acc, (k, v)| + acc[Addressable::IDNA.unicode_normalize_kc(k)] = + Addressable::IDNA.unicode_normalize_kc(v) + acc + } + else + value = Addressable::IDNA.unicode_normalize_kc(value) + end + value + end + + ## + # Generates a hash with string keys + # + # @param [Hash] mapping A mapping hash to normalize + # + # @return [Hash] + # A hash with stringified keys + def normalize_keys(mapping) + return mapping.inject({}) do |accu, pair| + name, value = pair + if Symbol === name + name = name.to_s + elsif name.respond_to?(:to_str) + name = name.to_str + else + raise TypeError, + "Can't convert #{name.class} into String." + end + accu[name] = value + accu + end + end + + ## + # Generates the Regexp that parses a template pattern. Memoizes the + # value if template processor not set (processors may not be deterministic) + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_template_pattern(pattern, processor = nil) + if processor.nil? && pattern == @pattern + @cached_template_parse ||= + parse_new_template_pattern(pattern, processor) + else + parse_new_template_pattern(pattern, processor) + end + end + + ## + # Generates the Regexp that parses a template pattern. + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_new_template_pattern(pattern, processor = nil) + # Escape the pattern. The two gsubs restore the escaped curly braces + # back to their original form. Basically, escape everything that isn't + # within an expansion. + escaped_pattern = Regexp.escape( + pattern + ).gsub(/\\\{(.*?)\\\}/) do |escaped| + escaped.gsub(/\\(.)/, "\\1") + end + + expansions = [] + + # Create a regular expression that captures the values of the + # variables in the URI. + regexp_string = escaped_pattern.gsub( EXPRESSION ) do |expansion| + + expansions << expansion + _, operator, varlist = *expansion.match(EXPRESSION) + leader = Regexp.escape(LEADERS.fetch(operator, '')) + joiner = Regexp.escape(JOINERS.fetch(operator, ',')) + combined = varlist.split(',').map do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + + result = processor && processor.respond_to?(:match) ? processor.match(name) : nil + if result + "(?<#{name}>#{ result })" + else + group = case operator + when '+' + "#{ RESERVED }*?" + when '#' + "#{ RESERVED }*?" + when '/' + "#{ UNRESERVED }*?" + when '.' + "#{ UNRESERVED.gsub('\.', '') }*?" + when ';' + "#{ UNRESERVED }*=?#{ UNRESERVED }*?" + when '?' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + when '&' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + else + "#{ UNRESERVED }*?" + end + if modifier == '*' + "(?<#{name}>#{group}(?:#{joiner}?#{group})*)?" + else + "(?<#{name}>#{group})?" + end + end + end.join("#{joiner}?") + "(?:|#{leader}#{combined})" + end + + # Ensure that the regular expression matches the whole URI. + regexp_string = "^#{regexp_string}$" + return expansions, Regexp.new(regexp_string) + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/uri.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/uri.rb new file mode 100644 index 0000000..6b5f4fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/uri.rb @@ -0,0 +1,2556 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/idna" +require "public_suffix" + +## +# Addressable is a library for processing links and URIs. +module Addressable + ## + # This is an implementation of a URI parser based on + # RFC 3986, + # RFC 3987. + class URI + ## + # Raised if something other than a uri is supplied. + class InvalidURIError < StandardError + end + + ## + # Container for the character classes specified in + # RFC 3986. + module CharacterClasses + ALPHA = "a-zA-Z" + DIGIT = "0-9" + GEN_DELIMS = "\\:\\/\\?\\#\\[\\]\\@" + SUB_DELIMS = "\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=" + RESERVED = GEN_DELIMS + SUB_DELIMS + UNRESERVED = ALPHA + DIGIT + "\\-\\.\\_\\~" + PCHAR = UNRESERVED + SUB_DELIMS + "\\:\\@" + SCHEME = ALPHA + DIGIT + "\\-\\+\\." + HOST = UNRESERVED + SUB_DELIMS + "\\[\\:\\]" + AUTHORITY = PCHAR + "\\[\\:\\]" + PATH = PCHAR + "\\/" + QUERY = PCHAR + "\\/\\?" + FRAGMENT = PCHAR + "\\/\\?" + end + + module NormalizeCharacterClasses + HOST = /[^#{CharacterClasses::HOST}]/ + UNRESERVED = /[^#{CharacterClasses::UNRESERVED}]/ + PCHAR = /[^#{CharacterClasses::PCHAR}]/ + SCHEME = /[^#{CharacterClasses::SCHEME}]/ + FRAGMENT = /[^#{CharacterClasses::FRAGMENT}]/ + QUERY = %r{[^a-zA-Z0-9\-\.\_\~\!\$\'\(\)\*\+\,\=\:\@\/\?%]|%(?!2B|2b)} + end + + SLASH = '/' + EMPTY_STR = '' + + URIREGEX = /^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/ + + PORT_MAPPING = { + "http" => 80, + "https" => 443, + "ftp" => 21, + "tftp" => 69, + "sftp" => 22, + "ssh" => 22, + "svn+ssh" => 22, + "telnet" => 23, + "nntp" => 119, + "gopher" => 70, + "wais" => 210, + "ldap" => 389, + "prospero" => 1525 + }.freeze + + ## + # Returns a URI object based on the parsed string. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # + # @return [Addressable::URI] The parsed URI. + def self.parse(uri) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + # Otherwise, convert to a String + begin + uri = uri.to_str + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{uri.class} into String." + end if not uri.is_a? String + + # This Regexp supplied as an example in RFC 3986, and it works great. + scan = uri.scan(URIREGEX) + fragments = scan[0] + scheme = fragments[1] + authority = fragments[3] + path = fragments[4] + query = fragments[6] + fragment = fragments[8] + user = nil + password = nil + host = nil + port = nil + if authority != nil + # The Regexp above doesn't split apart the authority. + userinfo = authority[/^([^\[\]]*)@/, 1] + if userinfo != nil + user = userinfo.strip[/^([^:]*):?/, 1] + password = userinfo.strip[/:(.*)$/, 1] + end + host = authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + port = authority[/:([^:@\[\]]*?)$/, 1] + end + if port == EMPTY_STR + port = nil + end + + return new( + :scheme => scheme, + :user => user, + :password => password, + :host => host, + :port => port, + :path => path, + :query => query, + :fragment => fragment + ) + end + + ## + # Converts an input to a URI. The input does not have to be a valid + # URI — the method will use heuristics to guess what URI was intended. + # This is not standards-compliant, merely user-friendly. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # @param [Hash] hints + # A Hash of hints to the heuristic parser. + # Defaults to {:scheme => "http"}. + # + # @return [Addressable::URI] The parsed URI. + def self.heuristic_parse(uri, hints={}) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + # Otherwise, convert to a String + uri = uri.to_str.dup.strip + hints = { + :scheme => "http" + }.merge(hints) + case uri + when /^http:\//i + uri.sub!(/^http:\/+/i, "http://") + when /^https:\//i + uri.sub!(/^https:\/+/i, "https://") + when /^feed:\/+http:\//i + uri.sub!(/^feed:\/+http:\/+/i, "feed:http://") + when /^feed:\//i + uri.sub!(/^feed:\/+/i, "feed://") + when %r[^file:/{4}]i + uri.sub!(%r[^file:/+]i, "file:////") + when %r[^file://localhost/]i + uri.sub!(%r[^file://localhost/+]i, "file:///") + when %r[^file:/+]i + uri.sub!(%r[^file:/+]i, "file:///") + when /^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/ + uri.sub!(/^/, hints[:scheme] + "://") + when /\A\d+\..*:\d+\z/ + uri = "#{hints[:scheme]}://#{uri}" + end + match = uri.match(URIREGEX) + fragments = match.captures + authority = fragments[3] + if authority && authority.length > 0 + new_authority = authority.tr("\\", "/").gsub(" ", "%20") + # NOTE: We want offset 4, not 3! + offset = match.offset(4) + uri = uri.dup + uri[offset[0]...offset[1]] = new_authority + end + parsed = self.parse(uri) + if parsed.scheme =~ /^[^\/?#\.]+\.[^\/?#]+$/ + parsed = self.parse(hints[:scheme] + "://" + uri) + end + if parsed.path.include?(".") + if parsed.path[/\b@\b/] + parsed.scheme = "mailto" unless parsed.scheme + elsif new_host = parsed.path[/^([^\/]+\.[^\/]*)/, 1] + parsed.defer_validation do + new_path = parsed.path.sub( + Regexp.new("^" + Regexp.escape(new_host)), EMPTY_STR) + parsed.host = new_host + parsed.path = new_path + parsed.scheme = hints[:scheme] unless parsed.scheme + end + end + end + return parsed + end + + ## + # Converts a path to a file scheme URI. If the path supplied is + # relative, it will be returned as a relative URI. If the path supplied + # is actually a non-file URI, it will parse the URI as if it had been + # parsed with Addressable::URI.parse. Handles all of the + # various Microsoft-specific formats for specifying paths. + # + # @param [String, Addressable::URI, #to_str] path + # Typically a String path to a file or directory, but + # will return a sensible return value if an absolute URI is supplied + # instead. + # + # @return [Addressable::URI] + # The parsed file scheme URI or the original URI if some other URI + # scheme was provided. + # + # @example + # base = Addressable::URI.convert_path("/absolute/path/") + # uri = Addressable::URI.convert_path("relative/path") + # (base + uri).to_s + # #=> "file:///absolute/path/relative/path" + # + # Addressable::URI.convert_path( + # "c:\\windows\\My Documents 100%20\\foo.txt" + # ).to_s + # #=> "file:///c:/windows/My%20Documents%20100%20/foo.txt" + # + # Addressable::URI.convert_path("http://example.com/").to_s + # #=> "http://example.com/" + def self.convert_path(path) + # If we were given nil, return nil. + return nil unless path + # If a URI object is passed, just return itself. + return path if path.kind_of?(self) + if !path.respond_to?(:to_str) + raise TypeError, "Can't convert #{path.class} into String." + end + # Otherwise, convert to a String + path = path.to_str.strip + + path.sub!(/^file:\/?\/?/, EMPTY_STR) if path =~ /^file:\/?\/?/ + path = SLASH + path if path =~ /^([a-zA-Z])[\|:]/ + uri = self.parse(path) + + if uri.scheme == nil + # Adjust windows-style uris + uri.path.sub!(/^\/?([a-zA-Z])[\|:][\\\/]/) do + "/#{$1.downcase}:/" + end + uri.path.tr!("\\", SLASH) + if File.exist?(uri.path) && + File.stat(uri.path).directory? + uri.path.chomp!(SLASH) + uri.path = uri.path + '/' + end + + # If the path is absolute, set the scheme and host. + if uri.path.start_with?(SLASH) + uri.scheme = "file" + uri.host = EMPTY_STR + end + uri.normalize! + end + + return uri + end + + ## + # Joins several URIs together. + # + # @param [String, Addressable::URI, #to_str] *uris + # The URIs to join. + # + # @return [Addressable::URI] The joined URI. + # + # @example + # base = "http://example.com/" + # uri = Addressable::URI.parse("relative/path") + # Addressable::URI.join(base, uri) + # #=> # + def self.join(*uris) + uri_objects = uris.collect do |uri| + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + uri.kind_of?(self) ? uri : self.parse(uri.to_str) + end + result = uri_objects.shift.dup + for uri in uri_objects + result.join!(uri) + end + return result + end + + ## + # Tables used to optimize encoding operations in `self.encode_component` + # and `self.normalize_component` + SEQUENCE_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%02x", c) + end.join + end + + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%%%02X", c) + end.join + end + + ## + # Percent encodes a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' through + # '9' to be percent encoded. If a Regexp is passed, the + # value /[^b-zB-Z0-9]/ would have the same effect. A set of + # useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [Regexp] upcase_encoded + # A string of characters that may already be percent encoded, and whose + # encodings should be upcased. This allows normalization of percent + # encodings for characters not included in the + # character_class. + # + # @return [String] The encoded component. + # + # @example + # Addressable::URI.encode_component("simple/example", "b-zB-Z0-9") + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component("simple/example", /[^b-zB-Z0-9]/) + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component( + # "simple/example", Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + def self.encode_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + upcase_encoded='') + return nil if component.nil? + + begin + if component.kind_of?(Symbol) || + component.kind_of?(Numeric) || + component.kind_of?(TrueClass) || + component.kind_of?(FalseClass) + component = component.to_s + else + component = component.to_str + end + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + character_class = /[^#{character_class}]/ + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + # Avoiding gsub! because there are edge cases with frozen strings + component = component.gsub(character_class) do |sequence| + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE[sequence] + end + if upcase_encoded.length > 0 + upcase_encoded_chars = upcase_encoded.chars.map do |char| + SEQUENCE_ENCODING_TABLE[char] + end + component = component.gsub(/%(#{upcase_encoded_chars.join('|')})/, + &:upcase) + end + return component + end + + class << self + alias_method :escape_component, :encode_component + end + + ## + # Unencodes any percent encoded characters within a URI component. + # This method may be used for unencoding either components or full URIs, + # however, it is recommended to use the unencode_component + # alias when unencoding components. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI or component to unencode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @param [String] leave_encoded + # A string of characters to leave encoded. If a percent encoded character + # in this list is encountered then it will remain percent encoded. + # + # @return [String, Addressable::URI] + # The unencoded component or URI. + # The return type is determined by the return_type + # parameter. + def self.unencode(uri, return_type=String, leave_encoded='') + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri = uri.dup + # Seriously, only use UTF-8. I'm really not kidding! + uri.force_encoding("utf-8") + + unless leave_encoded.empty? + leave_encoded = leave_encoded.dup.force_encoding("utf-8") + end + + result = uri.gsub(/%[0-9a-f]{2}/iu) do |sequence| + c = sequence[1..3].to_i(16).chr + c.force_encoding("utf-8") + leave_encoded.include?(c) ? sequence : c + end + result.force_encoding("utf-8") + if return_type == String + return result + elsif return_type == ::Addressable::URI + return ::Addressable::URI.parse(result) + end + end + + class << self + alias_method :unescape, :unencode + alias_method :unencode_component, :unencode + alias_method :unescape_component, :unencode + end + + + ## + # Normalizes the encoding of a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' + # through '9' to be percent encoded. If a Regexp is passed, + # the value /[^b-zB-Z0-9]/ would have the same effect. A + # set of useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [String] leave_encoded + # When character_class is a String then + # leave_encoded is a string of characters that should remain + # percent encoded while normalizing the component; if they appear percent + # encoded in the original component, then they will be upcased ("%2f" + # normalized to "%2F") but otherwise left alone. + # + # @return [String] The normalized component. + # + # @example + # Addressable::URI.normalize_component("simpl%65/%65xampl%65", "b-zB-Z") + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", /[^b-zB-Z]/ + # ) + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", + # Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + # Addressable::URI.normalize_component( + # "one%20two%2fthree%26four", + # "0-9a-zA-Z &/", + # "/" + # ) + # => "one two%2Fthree&four" + def self.normalize_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + leave_encoded='') + return nil if component.nil? + + begin + component = component.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + leave_re = if leave_encoded.length > 0 + character_class = "#{character_class}%" unless character_class.include?('%') + + "|%(?!#{leave_encoded.chars.map do |char| + seq = SEQUENCE_ENCODING_TABLE[char] + [seq.upcase, seq.downcase] + end.flatten.join('|')})" + end + + character_class = if leave_re + /[^#{character_class}]#{leave_re}/ + else + /[^#{character_class}]/ + end + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + unencoded = self.unencode_component(component, String, leave_encoded) + begin + encoded = self.encode_component( + Addressable::IDNA.unicode_normalize_kc(unencoded), + character_class, + leave_encoded + ) + rescue ArgumentError + encoded = self.encode_component(unencoded) + end + encoded.force_encoding(Encoding::UTF_8) + return encoded + end + + ## + # Percent encodes any special characters in the URI. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.encode(uri, return_type=String) + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(uri_object.scheme, + Addressable::URI::CharacterClasses::SCHEME), + :authority => self.encode_component(uri_object.authority, + Addressable::URI::CharacterClasses::AUTHORITY), + :path => self.encode_component(uri_object.path, + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(uri_object.query, + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(uri_object.fragment, + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + class << self + alias_method :escape, :encode + end + + ## + # Normalizes the encoding of a URI. Characters within a hostname are + # not percent encoded to allow for internationalized domain names. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.normalized_encode(uri, return_type=String) + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + components = { + :scheme => self.unencode_component(uri_object.scheme), + :user => self.unencode_component(uri_object.user), + :password => self.unencode_component(uri_object.password), + :host => self.unencode_component(uri_object.host), + :port => (uri_object.port.nil? ? nil : uri_object.port.to_s), + :path => self.unencode_component(uri_object.path), + :query => self.unencode_component(uri_object.query), + :fragment => self.unencode_component(uri_object.fragment) + } + components.each do |key, value| + if value != nil + begin + components[key] = + Addressable::IDNA.unicode_normalize_kc(value.to_str) + rescue ArgumentError + # Likely a malformed UTF-8 character, skip unicode normalization + components[key] = value.to_str + end + end + end + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(components[:scheme], + Addressable::URI::CharacterClasses::SCHEME), + :user => self.encode_component(components[:user], + Addressable::URI::CharacterClasses::UNRESERVED), + :password => self.encode_component(components[:password], + Addressable::URI::CharacterClasses::UNRESERVED), + :host => components[:host], + :port => components[:port], + :path => self.encode_component(components[:path], + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(components[:query], + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(components[:fragment], + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + ## + # Encodes a set of key/value pairs according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [#to_hash, #to_ary] form_values + # The form values to encode. + # + # @param [TrueClass, FalseClass] sort + # Sort the key/value pairs prior to encoding. + # Defaults to false. + # + # @return [String] + # The encoded value. + def self.form_encode(form_values, sort=false) + if form_values.respond_to?(:to_hash) + form_values = form_values.to_hash.to_a + elsif form_values.respond_to?(:to_ary) + form_values = form_values.to_ary + else + raise TypeError, "Can't convert #{form_values.class} into Array." + end + + form_values = form_values.inject([]) do |accu, (key, value)| + if value.kind_of?(Array) + value.each do |v| + accu << [key.to_s, v.to_s] + end + else + accu << [key.to_s, value.to_s] + end + accu + end + + if sort + # Useful for OAuth and optimizing caching systems + form_values = form_values.sort + end + escaped_form_values = form_values.map do |(key, value)| + # Line breaks are CRLF pairs + [ + self.encode_component( + key.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+"), + self.encode_component( + value.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+") + ] + end + return escaped_form_values.map do |(key, value)| + "#{key}=#{value}" + end.join("&") + end + + ## + # Decodes a String according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [String, #to_str] encoded_value + # The form values to decode. + # + # @return [Array] + # The decoded values. + # This is not a Hash because of the possibility for + # duplicate keys. + def self.form_unencode(encoded_value) + if !encoded_value.respond_to?(:to_str) + raise TypeError, "Can't convert #{encoded_value.class} into String." + end + encoded_value = encoded_value.to_str + split_values = encoded_value.split("&").map do |pair| + pair.split("=", 2) + end + return split_values.map do |(key, value)| + [ + key ? self.unencode_component( + key.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n") : nil, + value ? (self.unencode_component( + value.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n")) : nil + ] + end + end + + ## + # Creates a new uri object from component parts. + # + # @option [String, #to_str] scheme The scheme component. + # @option [String, #to_str] user The user component. + # @option [String, #to_str] password The password component. + # @option [String, #to_str] userinfo + # The userinfo component. If this is supplied, the user and password + # components must be omitted. + # @option [String, #to_str] host The host component. + # @option [String, #to_str] port The port component. + # @option [String, #to_str] authority + # The authority component. If this is supplied, the user, password, + # userinfo, host, and port components must be omitted. + # @option [String, #to_str] path The path component. + # @option [String, #to_str] query The query component. + # @option [String, #to_str] fragment The fragment component. + # + # @return [Addressable::URI] The constructed URI object. + def initialize(options={}) + if options.has_key?(:authority) + if (options.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if options.has_key?(:userinfo) + if (options.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + self.defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + self.scheme = options[:scheme] if options[:scheme] + self.user = options[:user] if options[:user] + self.password = options[:password] if options[:password] + self.userinfo = options[:userinfo] if options[:userinfo] + self.host = options[:host] if options[:host] + self.port = options[:port] if options[:port] + self.authority = options[:authority] if options[:authority] + self.path = options[:path] if options[:path] + self.query = options[:query] if options[:query] + self.query_values = options[:query_values] if options[:query_values] + self.fragment = options[:fragment] if options[:fragment] + end + self.to_s + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.normalized_scheme + self.normalized_user + self.normalized_password + self.normalized_userinfo + self.normalized_host + self.normalized_port + self.normalized_authority + self.normalized_site + self.normalized_path + self.normalized_query + self.normalized_fragment + self.hash + super + end + + ## + # The scheme component for this URI. + # + # @return [String] The scheme component. + def scheme + return defined?(@scheme) ? @scheme : nil + end + + ## + # The scheme component for this URI, normalized. + # + # @return [String] The scheme component, normalized. + def normalized_scheme + return nil unless self.scheme + @normalized_scheme ||= begin + if self.scheme =~ /^\s*ssh\+svn\s*$/i + "svn+ssh".dup + else + Addressable::URI.normalize_component( + self.scheme.strip.downcase, + Addressable::URI::NormalizeCharacterClasses::SCHEME + ) + end + end + # All normalized values should be UTF-8 + @normalized_scheme.force_encoding(Encoding::UTF_8) if @normalized_scheme + @normalized_scheme + end + + ## + # Sets the scheme component for this URI. + # + # @param [String, #to_str] new_scheme The new scheme component. + def scheme=(new_scheme) + if new_scheme && !new_scheme.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_scheme.class} into String." + elsif new_scheme + new_scheme = new_scheme.to_str + end + if new_scheme && new_scheme !~ /\A[a-z][a-z0-9\.\+\-]*\z/i + raise InvalidURIError, "Invalid scheme format: '#{new_scheme}'" + end + @scheme = new_scheme + @scheme = nil if @scheme.to_s.strip.empty? + + # Reset dependent values + remove_instance_variable(:@normalized_scheme) if defined?(@normalized_scheme) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The user component for this URI. + # + # @return [String] The user component. + def user + return defined?(@user) ? @user : nil + end + + ## + # The user component for this URI, normalized. + # + # @return [String] The user component, normalized. + def normalized_user + return nil unless self.user + return @normalized_user if defined?(@normalized_user) + @normalized_user ||= begin + if normalized_scheme =~ /https?/ && self.user.strip.empty? && + (!self.password || self.password.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.user.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + @normalized_user.force_encoding(Encoding::UTF_8) if @normalized_user + @normalized_user + end + + ## + # Sets the user component for this URI. + # + # @param [String, #to_str] new_user The new user component. + def user=(new_user) + if new_user && !new_user.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_user.class} into String." + end + @user = new_user ? new_user.to_str : nil + + # You can't have a nil user with a non-nil password + if password != nil + @user = EMPTY_STR if @user.nil? + end + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_user) if defined?(@normalized_user) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The password component for this URI. + # + # @return [String] The password component. + def password + return defined?(@password) ? @password : nil + end + + ## + # The password component for this URI, normalized. + # + # @return [String] The password component, normalized. + def normalized_password + return nil unless self.password + return @normalized_password if defined?(@normalized_password) + @normalized_password ||= begin + if self.normalized_scheme =~ /https?/ && self.password.strip.empty? && + (!self.user || self.user.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.password.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + if @normalized_password + @normalized_password.force_encoding(Encoding::UTF_8) + end + @normalized_password + end + + ## + # Sets the password component for this URI. + # + # @param [String, #to_str] new_password The new password component. + def password=(new_password) + if new_password && !new_password.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_password.class} into String." + end + @password = new_password ? new_password.to_str : nil + + # You can't have a nil user with a non-nil password + @password ||= nil + @user ||= nil + if @password != nil + @user = EMPTY_STR if @user.nil? + end + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_password) if defined?(@normalized_password) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The userinfo component for this URI. + # Combines the user and password components. + # + # @return [String] The userinfo component. + def userinfo + current_user = self.user + current_password = self.password + (current_user || current_password) && @userinfo ||= begin + if current_user && current_password + "#{current_user}:#{current_password}" + elsif current_user && !current_password + "#{current_user}" + end + end + end + + ## + # The userinfo component for this URI, normalized. + # + # @return [String] The userinfo component, normalized. + def normalized_userinfo + return nil unless self.userinfo + return @normalized_userinfo if defined?(@normalized_userinfo) + @normalized_userinfo ||= begin + current_user = self.normalized_user + current_password = self.normalized_password + if !current_user && !current_password + nil + elsif current_user && current_password + "#{current_user}:#{current_password}".dup + elsif current_user && !current_password + "#{current_user}".dup + end + end + # All normalized values should be UTF-8 + if @normalized_userinfo + @normalized_userinfo.force_encoding(Encoding::UTF_8) + end + @normalized_userinfo + end + + ## + # Sets the userinfo component for this URI. + # + # @param [String, #to_str] new_userinfo The new userinfo component. + def userinfo=(new_userinfo) + if new_userinfo && !new_userinfo.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_userinfo.class} into String." + end + new_user, new_password = if new_userinfo + [ + new_userinfo.to_str.strip[/^(.*):/, 1], + new_userinfo.to_str.strip[/:(.*)$/, 1] + ] + else + [nil, nil] + end + + # Password assigned first to ensure validity in case of nil + self.password = new_password + self.user = new_user + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The host component for this URI. + # + # @return [String] The host component. + def host + return defined?(@host) ? @host : nil + end + + ## + # The host component for this URI, normalized. + # + # @return [String] The host component, normalized. + def normalized_host + return nil unless self.host + + @normalized_host ||= begin + if !self.host.strip.empty? + result = ::Addressable::IDNA.to_ascii( + URI.unencode_component(self.host.strip.downcase) + ) + if result =~ /[^\.]\.$/ + # Single trailing dots are unnecessary. + result = result[0...-1] + end + result = Addressable::URI.normalize_component( + result, + NormalizeCharacterClasses::HOST + ) + result + else + EMPTY_STR.dup + end + end + # All normalized values should be UTF-8 + if @normalized_host && !@normalized_host.empty? + @normalized_host.force_encoding(Encoding::UTF_8) + end + @normalized_host + end + + ## + # Sets the host component for this URI. + # + # @param [String, #to_str] new_host The new host component. + def host=(new_host) + if new_host && !new_host.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_host.class} into String." + end + @host = new_host ? new_host.to_str : nil + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_host) if defined?(@normalized_host) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # This method is same as URI::Generic#host except + # brackets for IPv6 (and 'IPvFuture') addresses are removed. + # + # @see Addressable::URI#host + # + # @return [String] The hostname for this URI. + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + + ## + # This method is same as URI::Generic#host= except + # the argument can be a bare IPv6 address (or 'IPvFuture'). + # + # @see Addressable::URI#host= + # + # @param [String, #to_str] new_hostname The new hostname for this URI. + def hostname=(new_hostname) + if new_hostname && + (new_hostname.respond_to?(:ipv4?) || new_hostname.respond_to?(:ipv6?)) + new_hostname = new_hostname.to_s + elsif new_hostname && !new_hostname.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_hostname.class} into String." + end + v = new_hostname ? new_hostname.to_str : nil + v = "[#{v}]" if /\A\[.*\]\z/ !~ v && /:/ =~ v + self.host = v + end + + ## + # Returns the top-level domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").tld # => "co.uk" + def tld + PublicSuffix.parse(self.host, ignore_private: true).tld + end + + ## + # Sets the top-level domain for this URI. + # + # @param [String, #to_str] new_tld The new top-level domain. + def tld=(new_tld) + replaced_tld = host.sub(/#{tld}\z/, new_tld) + self.host = PublicSuffix::Domain.new(replaced_tld).to_s + end + + ## + # Returns the public suffix domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").domain # => "example.co.uk" + def domain + PublicSuffix.domain(self.host, ignore_private: true) + end + + ## + # The authority component for this URI. + # Combines the user, password, host, and port components. + # + # @return [String] The authority component. + def authority + self.host && @authority ||= begin + authority = String.new + if self.userinfo != nil + authority << "#{self.userinfo}@" + end + authority << self.host + if self.port != nil + authority << ":#{self.port}" + end + authority + end + end + + ## + # The authority component for this URI, normalized. + # + # @return [String] The authority component, normalized. + def normalized_authority + return nil unless self.authority + @normalized_authority ||= begin + authority = String.new + if self.normalized_userinfo != nil + authority << "#{self.normalized_userinfo}@" + end + authority << self.normalized_host + if self.normalized_port != nil + authority << ":#{self.normalized_port}" + end + authority + end + # All normalized values should be UTF-8 + if @normalized_authority + @normalized_authority.force_encoding(Encoding::UTF_8) + end + @normalized_authority + end + + ## + # Sets the authority component for this URI. + # + # @param [String, #to_str] new_authority The new authority component. + def authority=(new_authority) + if new_authority + if !new_authority.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_authority.class} into String." + end + new_authority = new_authority.to_str + new_userinfo = new_authority[/^([^\[\]]*)@/, 1] + if new_userinfo + new_user = new_userinfo.strip[/^([^:]*):?/, 1] + new_password = new_userinfo.strip[/:(.*)$/, 1] + end + new_host = new_authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + new_port = + new_authority[/:([^:@\[\]]*?)$/, 1] + end + + # Password assigned first to ensure validity in case of nil + self.password = defined?(new_password) ? new_password : nil + self.user = defined?(new_user) ? new_user : nil + self.host = defined?(new_host) ? new_host : nil + self.port = defined?(new_port) ? new_port : nil + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. + # + # @return [String] The serialized origin. + def origin + if self.scheme && self.authority + if self.normalized_port + "#{self.normalized_scheme}://#{self.normalized_host}" + + ":#{self.normalized_port}" + else + "#{self.normalized_scheme}://#{self.normalized_host}" + end + else + "null" + end + end + + ## + # Sets the origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. This assignment will reset the `userinfo` + # component. + # + # @param [String, #to_str] new_origin The new origin component. + def origin=(new_origin) + if new_origin + if !new_origin.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_origin.class} into String." + end + new_origin = new_origin.to_str + new_scheme = new_origin[/^([^:\/?#]+):\/\//, 1] + unless new_scheme + raise InvalidURIError, 'An origin cannot omit the scheme.' + end + new_host = new_origin[/:\/\/([^\/?#:]+)/, 1] + unless new_host + raise InvalidURIError, 'An origin cannot omit the host.' + end + new_port = new_origin[/:([^:@\[\]\/]*?)$/, 1] + end + + self.scheme = defined?(new_scheme) ? new_scheme : nil + self.host = defined?(new_host) ? new_host : nil + self.port = defined?(new_port) ? new_port : nil + self.userinfo = nil + + # Reset dependent values + remove_instance_variable(:@userinfo) if defined?(@userinfo) + remove_instance_variable(:@normalized_userinfo) if defined?(@normalized_userinfo) + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_authority) if defined?(@normalized_authority) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + # Returns an array of known ip-based schemes. These schemes typically + # use a similar URI form: + # //:@:/ + def self.ip_based_schemes + return self.port_mapping.keys + end + + # Returns a hash of common IP-based schemes and their default port + # numbers. Adding new schemes to this hash, as necessary, will allow + # for better URI normalization. + def self.port_mapping + PORT_MAPPING + end + + ## + # The port component for this URI. + # This is the port number actually given in the URI. This does not + # infer port numbers from default values. + # + # @return [Integer] The port component. + def port + return defined?(@port) ? @port : nil + end + + ## + # The port component for this URI, normalized. + # + # @return [Integer] The port component, normalized. + def normalized_port + return nil unless self.port + return @normalized_port if defined?(@normalized_port) + @normalized_port ||= begin + if URI.port_mapping[self.normalized_scheme] == self.port + nil + else + self.port + end + end + end + + ## + # Sets the port component for this URI. + # + # @param [String, Integer, #to_s] new_port The new port component. + def port=(new_port) + if new_port != nil && new_port.respond_to?(:to_str) + new_port = Addressable::URI.unencode_component(new_port.to_str) + end + + if new_port.respond_to?(:valid_encoding?) && !new_port.valid_encoding? + raise InvalidURIError, "Invalid encoding in port" + end + + if new_port != nil && !(new_port.to_s =~ /^\d+$/) + raise InvalidURIError, + "Invalid port number: #{new_port.inspect}" + end + + @port = new_port.to_s.to_i + @port = nil if @port == 0 + + # Reset dependent values + remove_instance_variable(:@authority) if defined?(@authority) + remove_instance_variable(:@normalized_port) if defined?(@normalized_port) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The inferred port component for this URI. + # This method will normalize to the default port for the URI's scheme if + # the port isn't explicitly specified in the URI. + # + # @return [Integer] The inferred port component. + def inferred_port + if self.port.to_i == 0 + self.default_port + else + self.port.to_i + end + end + + ## + # The default port for this URI's scheme. + # This method will always returns the default port for the URI's scheme + # regardless of the presence of an explicit port in the URI. + # + # @return [Integer] The default port. + def default_port + URI.port_mapping[self.scheme.strip.downcase] if self.scheme + end + + ## + # The combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The components that identify a site. + def site + (self.scheme || self.authority) && @site ||= begin + site_string = "".dup + site_string << "#{self.scheme}:" if self.scheme != nil + site_string << "//#{self.authority}" if self.authority != nil + site_string + end + end + + ## + # The normalized combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The normalized components that identify a site. + def normalized_site + return nil unless self.site + @normalized_site ||= begin + site_string = "".dup + if self.normalized_scheme != nil + site_string << "#{self.normalized_scheme}:" + end + if self.normalized_authority != nil + site_string << "//#{self.normalized_authority}" + end + site_string + end + # All normalized values should be UTF-8 + @normalized_site.force_encoding(Encoding::UTF_8) if @normalized_site + @normalized_site + end + + ## + # Sets the site value for this URI. + # + # @param [String, #to_str] new_site The new site value. + def site=(new_site) + if new_site + if !new_site.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_site.class} into String." + end + new_site = new_site.to_str + # These two regular expressions derived from the primary parsing + # expression + self.scheme = new_site[/^(?:([^:\/?#]+):)?(?:\/\/(?:[^\/?#]*))?$/, 1] + self.authority = new_site[ + /^(?:(?:[^:\/?#]+):)?(?:\/\/([^\/?#]*))?$/, 1 + ] + else + self.scheme = nil + self.authority = nil + end + end + + ## + # The path component for this URI. + # + # @return [String] The path component. + def path + return defined?(@path) ? @path : EMPTY_STR + end + + NORMPATH = /^(?!\/)[^\/:]*:.*$/ + ## + # The path component for this URI, normalized. + # + # @return [String] The path component, normalized. + def normalized_path + @normalized_path ||= begin + path = self.path.to_s + if self.scheme == nil && path =~ NORMPATH + # Relative paths with colons in the first segment are ambiguous. + path = path.sub(":", "%2F") + end + # String#split(delimeter, -1) uses the more strict splitting behavior + # found by default in Python. + result = path.strip.split(SLASH, -1).map do |segment| + Addressable::URI.normalize_component( + segment, + Addressable::URI::NormalizeCharacterClasses::PCHAR + ) + end.join(SLASH) + + result = URI.normalize_path(result) + if result.empty? && + ["http", "https", "ftp", "tftp"].include?(self.normalized_scheme) + result = SLASH.dup + end + result + end + # All normalized values should be UTF-8 + @normalized_path.force_encoding(Encoding::UTF_8) if @normalized_path + @normalized_path + end + + ## + # Sets the path component for this URI. + # + # @param [String, #to_str] new_path The new path component. + def path=(new_path) + if new_path && !new_path.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_path.class} into String." + end + @path = (new_path || EMPTY_STR).to_str + if !@path.empty? && @path[0..0] != SLASH && host != nil + @path = "/#{@path}" + end + + # Reset dependent values + remove_instance_variable(:@normalized_path) if defined?(@normalized_path) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The basename, if any, of the file in the path component. + # + # @return [String] The path's basename. + def basename + # Path cannot be nil + return File.basename(self.path).sub(/;[^\/]*$/, EMPTY_STR) + end + + ## + # The extname, if any, of the file in the path component. + # Empty string if there is no extension. + # + # @return [String] The path's extname. + def extname + return nil unless self.path + return File.extname(self.basename) + end + + ## + # The query component for this URI. + # + # @return [String] The query component. + def query + return defined?(@query) ? @query : nil + end + + ## + # The query component for this URI, normalized. + # + # @return [String] The query component, normalized. + def normalized_query(*flags) + return nil unless self.query + return @normalized_query if defined?(@normalized_query) + @normalized_query ||= begin + modified_query_class = Addressable::URI::CharacterClasses::QUERY.dup + # Make sure possible key-value pair delimiters are escaped. + modified_query_class.sub!("\\&", "").sub!("\\;", "") + pairs = (query || "").split("&", -1) + pairs.delete_if(&:empty?).uniq! if flags.include?(:compacted) + pairs.sort! if flags.include?(:sorted) + component = pairs.map do |pair| + Addressable::URI.normalize_component( + pair, + Addressable::URI::NormalizeCharacterClasses::QUERY, + "+" + ) + end.join("&") + component == "" ? nil : component + end + # All normalized values should be UTF-8 + @normalized_query.force_encoding(Encoding::UTF_8) if @normalized_query + @normalized_query + end + + ## + # Sets the query component for this URI. + # + # @param [String, #to_str] new_query The new query component. + def query=(new_query) + if new_query && !new_query.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_query.class} into String." + end + @query = new_query ? new_query.to_str : nil + + # Reset dependent values + remove_instance_variable(:@normalized_query) if defined?(@normalized_query) + remove_composite_values + end + + ## + # Converts the query component to a Hash value. + # + # @param [Class] return_type The return type desired. Value must be either + # `Hash` or `Array`. + # + # @return [Hash, Array, nil] The query string parsed as a Hash or Array + # or nil if the query string is blank. + # + # @example + # Addressable::URI.parse("?one=1&two=2&three=3").query_values + # #=> {"one" => "1", "two" => "2", "three" => "3"} + # Addressable::URI.parse("?one=two&one=three").query_values(Array) + # #=> [["one", "two"], ["one", "three"]] + # Addressable::URI.parse("?one=two&one=three").query_values(Hash) + # #=> {"one" => "three"} + # Addressable::URI.parse("?").query_values + # #=> {} + # Addressable::URI.parse("").query_values + # #=> nil + def query_values(return_type=Hash) + empty_accumulator = Array == return_type ? [] : {} + if return_type != Hash && return_type != Array + raise ArgumentError, "Invalid return type. Must be Hash or Array." + end + return nil if self.query == nil + split_query = self.query.split("&").map do |pair| + pair.split("=", 2) if pair && !pair.empty? + end.compact + return split_query.inject(empty_accumulator.dup) do |accu, pair| + # I'd rather use key/value identifiers instead of array lookups, + # but in this case I really want to maintain the exact pair structure, + # so it's best to make all changes in-place. + pair[0] = URI.unencode_component(pair[0]) + if pair[1].respond_to?(:to_str) + value = pair[1].to_str + # I loathe the fact that I have to do this. Stupid HTML 4.01. + # Treating '+' as a space was just an unbelievably bad idea. + # There was nothing wrong with '%20'! + # If it ain't broke, don't fix it! + value = value.tr("+", " ") if ["http", "https", nil].include?(scheme) + pair[1] = URI.unencode_component(value) + end + if return_type == Hash + accu[pair[0]] = pair[1] + else + accu << pair + end + accu + end + end + + ## + # Sets the query component for this URI from a Hash object. + # An empty Hash or Array will result in an empty query string. + # + # @param [Hash, #to_hash, Array] new_query_values The new query values. + # + # @example + # uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', 'c'], ['b', 'd'], ['b', 'e']] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', ['c', 'd', 'e']]] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['flag'], ['key', 'value']] + # uri.query + # # => "flag&key=value" + def query_values=(new_query_values) + if new_query_values == nil + self.query = nil + return nil + end + + if !new_query_values.is_a?(Array) + if !new_query_values.respond_to?(:to_hash) + raise TypeError, + "Can't convert #{new_query_values.class} into Hash." + end + new_query_values = new_query_values.to_hash + new_query_values = new_query_values.map do |key, value| + key = key.to_s if key.kind_of?(Symbol) + [key, value] + end + # Useful default for OAuth and caching. + # Only to be used for non-Array inputs. Arrays should preserve order. + new_query_values.sort! + end + + # new_query_values have form [['key1', 'value1'], ['key2', 'value2']] + buffer = "".dup + new_query_values.each do |key, value| + encoded_key = URI.encode_component( + key, CharacterClasses::UNRESERVED + ) + if value == nil + buffer << "#{encoded_key}&" + elsif value.kind_of?(Array) + value.each do |sub_value| + encoded_value = URI.encode_component( + sub_value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + else + encoded_value = URI.encode_component( + value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + end + self.query = buffer.chop + end + + ## + # The HTTP request URI for this URI. This is the path and the + # query string. + # + # @return [String] The request URI required for an HTTP request. + def request_uri + return nil if self.absolute? && self.scheme !~ /^https?$/i + return ( + (!self.path.empty? ? self.path : SLASH) + + (self.query ? "?#{self.query}" : EMPTY_STR) + ) + end + + ## + # Sets the HTTP request URI for this URI. + # + # @param [String, #to_str] new_request_uri The new HTTP request URI. + def request_uri=(new_request_uri) + if !new_request_uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_request_uri.class} into String." + end + if self.absolute? && self.scheme !~ /^https?$/i + raise InvalidURIError, + "Cannot set an HTTP request URI for a non-HTTP URI." + end + new_request_uri = new_request_uri.to_str + path_component = new_request_uri[/^([^\?]*)\??(?:.*)$/, 1] + query_component = new_request_uri[/^(?:[^\?]*)\?(.*)$/, 1] + path_component = path_component.to_s + path_component = (!path_component.empty? ? path_component : SLASH) + self.path = path_component + self.query = query_component + + # Reset dependent values + remove_composite_values + end + + ## + # The fragment component for this URI. + # + # @return [String] The fragment component. + def fragment + return defined?(@fragment) ? @fragment : nil + end + + ## + # The fragment component for this URI, normalized. + # + # @return [String] The fragment component, normalized. + def normalized_fragment + return nil unless self.fragment + return @normalized_fragment if defined?(@normalized_fragment) + @normalized_fragment ||= begin + component = Addressable::URI.normalize_component( + self.fragment, + Addressable::URI::NormalizeCharacterClasses::FRAGMENT + ) + component == "" ? nil : component + end + # All normalized values should be UTF-8 + if @normalized_fragment + @normalized_fragment.force_encoding(Encoding::UTF_8) + end + @normalized_fragment + end + + ## + # Sets the fragment component for this URI. + # + # @param [String, #to_str] new_fragment The new fragment component. + def fragment=(new_fragment) + if new_fragment && !new_fragment.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_fragment.class} into String." + end + @fragment = new_fragment ? new_fragment.to_str : nil + + # Reset dependent values + remove_instance_variable(:@normalized_fragment) if defined?(@normalized_fragment) + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # Determines if the scheme indicates an IP-based protocol. + # + # @return [TrueClass, FalseClass] + # true if the scheme indicates an IP-based protocol. + # false otherwise. + def ip_based? + if self.scheme + return URI.ip_based_schemes.include?( + self.scheme.strip.downcase) + end + return false + end + + ## + # Determines if the URI is relative. + # + # @return [TrueClass, FalseClass] + # true if the URI is relative. false + # otherwise. + def relative? + return self.scheme.nil? + end + + ## + # Determines if the URI is absolute. + # + # @return [TrueClass, FalseClass] + # true if the URI is absolute. false + # otherwise. + def absolute? + return !relative? + end + + ## + # Joins two URIs together. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + def join(uri) + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + if !uri.kind_of?(URI) + # Otherwise, convert to a String, then parse. + uri = URI.parse(uri.to_str) + end + if uri.to_s.empty? + return self.dup + end + + joined_scheme = nil + joined_user = nil + joined_password = nil + joined_host = nil + joined_port = nil + joined_path = nil + joined_query = nil + joined_fragment = nil + + # Section 5.2.2 of RFC 3986 + if uri.scheme != nil + joined_scheme = uri.scheme + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.authority != nil + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.path == nil || uri.path.empty? + joined_path = self.path + if uri.query != nil + joined_query = uri.query + else + joined_query = self.query + end + else + if uri.path[0..0] == SLASH + joined_path = URI.normalize_path(uri.path) + else + base_path = self.path.dup + base_path = EMPTY_STR if base_path == nil + base_path = URI.normalize_path(base_path) + + # Section 5.2.3 of RFC 3986 + # + # Removes the right-most path segment from the base path. + if base_path.include?(SLASH) + base_path.sub!(/\/[^\/]+$/, SLASH) + else + base_path = EMPTY_STR + end + + # If the base path is empty and an authority segment has been + # defined, use a base path of SLASH + if base_path.empty? && self.authority != nil + base_path = SLASH + end + + joined_path = URI.normalize_path(base_path + uri.path) + end + joined_query = uri.query + end + joined_user = self.user + joined_password = self.password + joined_host = self.host + joined_port = self.port + end + joined_scheme = self.scheme + end + joined_fragment = uri.fragment + + return self.class.new( + :scheme => joined_scheme, + :user => joined_user, + :password => joined_password, + :host => joined_host, + :port => joined_port, + :path => joined_path, + :query => joined_query, + :fragment => joined_fragment + ) + end + alias_method :+, :join + + ## + # Destructive form of join. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + # + # @see Addressable::URI#join + def join!(uri) + replace_self(self.join(uri)) + end + + ## + # Merges a URI with a Hash of components. + # This method has different behavior from join. Any + # components present in the hash parameter will override the + # original components. The path component is not treated specially. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Hash#merge + def merge(hash) + if !hash.respond_to?(:to_hash) + raise TypeError, "Can't convert #{hash.class} into Hash." + end + hash = hash.to_hash + + if hash.has_key?(:authority) + if (hash.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if hash.has_key?(:userinfo) + if (hash.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + uri = self.class.new + uri.defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + uri.scheme = + hash.has_key?(:scheme) ? hash[:scheme] : self.scheme + if hash.has_key?(:authority) + uri.authority = + hash.has_key?(:authority) ? hash[:authority] : self.authority + end + if hash.has_key?(:userinfo) + uri.userinfo = + hash.has_key?(:userinfo) ? hash[:userinfo] : self.userinfo + end + if !hash.has_key?(:userinfo) && !hash.has_key?(:authority) + uri.user = + hash.has_key?(:user) ? hash[:user] : self.user + uri.password = + hash.has_key?(:password) ? hash[:password] : self.password + end + if !hash.has_key?(:authority) + uri.host = + hash.has_key?(:host) ? hash[:host] : self.host + uri.port = + hash.has_key?(:port) ? hash[:port] : self.port + end + uri.path = + hash.has_key?(:path) ? hash[:path] : self.path + uri.query = + hash.has_key?(:query) ? hash[:query] : self.query + uri.fragment = + hash.has_key?(:fragment) ? hash[:fragment] : self.fragment + end + + return uri + end + + ## + # Destructive form of merge. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Addressable::URI#merge + def merge!(uri) + replace_self(self.merge(uri)) + end + + ## + # Returns the shortest normalized relative form of this URI that uses the + # supplied URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_to. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route from. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the original URI. + def route_from(uri) + uri = URI.parse(uri).normalize + normalized_self = self.normalize + if normalized_self.relative? + raise ArgumentError, "Expected absolute URI, got: #{self.to_s}" + end + if uri.relative? + raise ArgumentError, "Expected absolute URI, got: #{uri.to_s}" + end + if normalized_self == uri + return Addressable::URI.parse("##{normalized_self.fragment}") + end + components = normalized_self.to_hash + if normalized_self.scheme == uri.scheme + components[:scheme] = nil + if normalized_self.authority == uri.authority + components[:user] = nil + components[:password] = nil + components[:host] = nil + components[:port] = nil + if normalized_self.path == uri.path + components[:path] = nil + if normalized_self.query == uri.query + components[:query] = nil + end + else + if uri.path != SLASH and components[:path] + self_splitted_path = split_path(components[:path]) + uri_splitted_path = split_path(uri.path) + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + while !self_splitted_path.empty? && !uri_splitted_path.empty? and self_dir == uri_dir + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + end + components[:path] = (uri_splitted_path.fill('..') + [self_dir] + self_splitted_path).join(SLASH) + end + end + end + end + # Avoid network-path references. + if components[:host] != nil + components[:scheme] = normalized_self.scheme + end + return Addressable::URI.new( + :scheme => components[:scheme], + :user => components[:user], + :password => components[:password], + :host => components[:host], + :port => components[:port], + :path => components[:path], + :query => components[:query], + :fragment => components[:fragment] + ) + end + + ## + # Returns the shortest normalized relative form of the supplied URI that + # uses this URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_from. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route to. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the supplied URI. + def route_to(uri) + return URI.parse(uri).route_from(self) + end + + ## + # Returns a normalized URI object. + # + # NOTE: This method does not attempt to fully conform to specifications. + # It exists largely to correct other people's failures to read the + # specifications, and also to deal with caching issues since several + # different URIs may represent the same resource and should not be + # cached multiple times. + # + # @return [Addressable::URI] The normalized URI. + def normalize + # This is a special exception for the frequently misused feed + # URI scheme. + if normalized_scheme == "feed" + if self.to_s =~ /^feed:\/*http:\/*/ + return URI.parse( + self.to_s[/^feed:\/*(http:\/*.*)/, 1] + ).normalize + end + end + + return self.class.new( + :scheme => normalized_scheme, + :authority => normalized_authority, + :path => normalized_path, + :query => normalized_query, + :fragment => normalized_fragment + ) + end + + ## + # Destructively normalizes this URI object. + # + # @return [Addressable::URI] The normalized URI. + # + # @see Addressable::URI#normalize + def normalize! + replace_self(self.normalize) + end + + ## + # Creates a URI suitable for display to users. If semantic attacks are + # likely, the application should try to detect these and warn the user. + # See RFC 3986, + # section 7.6 for more information. + # + # @return [Addressable::URI] A URI suitable for display purposes. + def display_uri + display_uri = self.normalize + display_uri.host = ::Addressable::IDNA.to_unicode(display_uri.host) + return display_uri + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison, and allows comparison + # against Strings. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ===(uri) + if uri.respond_to?(:normalize) + uri_string = uri.normalize.to_s + else + begin + uri_string = ::Addressable::URI.parse(uri).normalize.to_s + rescue InvalidURIError, TypeError + return false + end + end + return self.normalize.to_s == uri_string + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ==(uri) + return false unless uri.kind_of?(URI) + return self.normalize.to_s == uri.normalize.to_s + end + + ## + # Returns true if the URI objects are equal. This method + # does NOT normalize either URI before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def eql?(uri) + return false unless uri.kind_of?(URI) + return self.to_s == uri.to_s + end + + ## + # A hash value that will make a URI equivalent to its normalized + # form. + # + # @return [Integer] A hash of the URI. + def hash + @hash ||= self.to_s.hash * -1 + end + + ## + # Clones the URI object. + # + # @return [Addressable::URI] The cloned URI. + def dup + duplicated_uri = self.class.new( + :scheme => self.scheme ? self.scheme.dup : nil, + :user => self.user ? self.user.dup : nil, + :password => self.password ? self.password.dup : nil, + :host => self.host ? self.host.dup : nil, + :port => self.port, + :path => self.path ? self.path.dup : nil, + :query => self.query ? self.query.dup : nil, + :fragment => self.fragment ? self.fragment.dup : nil + ) + return duplicated_uri + end + + ## + # Omits components from a URI. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @example + # uri = Addressable::URI.parse("http://example.com/path?query") + # #=> # + # uri.omit(:scheme, :authority) + # #=> # + def omit(*components) + invalid_components = components - [ + :scheme, :user, :password, :userinfo, :host, :port, :authority, + :path, :query, :fragment + ] + unless invalid_components.empty? + raise ArgumentError, + "Invalid component names: #{invalid_components.inspect}." + end + duplicated_uri = self.dup + duplicated_uri.defer_validation do + components.each do |component| + duplicated_uri.send((component.to_s + "=").to_sym, nil) + end + duplicated_uri.user = duplicated_uri.normalized_user + end + duplicated_uri + end + + ## + # Destructive form of omit. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @see Addressable::URI#omit + def omit!(*components) + replace_self(self.omit(*components)) + end + + ## + # Determines if the URI is an empty string. + # + # @return [TrueClass, FalseClass] + # Returns true if empty, false otherwise. + def empty? + return self.to_s.empty? + end + + ## + # Converts the URI to a String. + # + # @return [String] The URI's String representation. + def to_s + if self.scheme == nil && self.path != nil && !self.path.empty? && + self.path =~ NORMPATH + raise InvalidURIError, + "Cannot assemble URI string with ambiguous path: '#{self.path}'" + end + @uri_string ||= begin + uri_string = String.new + uri_string << "#{self.scheme}:" if self.scheme != nil + uri_string << "//#{self.authority}" if self.authority != nil + uri_string << self.path.to_s + uri_string << "?#{self.query}" if self.query != nil + uri_string << "##{self.fragment}" if self.fragment != nil + uri_string.force_encoding(Encoding::UTF_8) + uri_string + end + end + + ## + # URI's are glorified Strings. Allow implicit conversion. + alias_method :to_str, :to_s + + ## + # Returns a Hash of the URI components. + # + # @return [Hash] The URI as a Hash of components. + def to_hash + return { + :scheme => self.scheme, + :user => self.user, + :password => self.password, + :host => self.host, + :port => self.port, + :path => self.path, + :query => self.query, + :fragment => self.fragment + } + end + + ## + # Returns a String representation of the URI object's state. + # + # @return [String] The URI object's state, as a String. + def inspect + sprintf("#<%s:%#0x URI:%s>", URI.to_s, self.object_id, self.to_s) + end + + ## + # This method allows you to make several changes to a URI simultaneously, + # which separately would cause validation errors, but in conjunction, + # are valid. The URI will be revalidated as soon as the entire block has + # been executed. + # + # @param [Proc] block + # A set of operations to perform on a given URI. + def defer_validation + raise LocalJumpError, "No block given." unless block_given? + @validation_deferred = true + yield + @validation_deferred = false + validate + return nil + end + + protected + SELF_REF = '.' + PARENT = '..' + + RULE_2A = /\/\.\/|\/\.$/ + RULE_2B_2C = /\/([^\/]*)\/\.\.\/|\/([^\/]*)\/\.\.$/ + RULE_2D = /^\.\.?\/?/ + RULE_PREFIXED_PARENT = /^\/\.\.?\/|^(\/\.\.?)+\/?$/ + + ## + # Resolves paths to their simplest form. + # + # @param [String] path The path to normalize. + # + # @return [String] The normalized path. + def self.normalize_path(path) + # Section 5.2.4 of RFC 3986 + + return nil if path.nil? + normalized_path = path.dup + begin + mod = nil + mod ||= normalized_path.gsub!(RULE_2A, SLASH) + + pair = normalized_path.match(RULE_2B_2C) + parent, current = pair[1], pair[2] if pair + if pair && ((parent != SELF_REF && parent != PARENT) || + (current != SELF_REF && current != PARENT)) + mod ||= normalized_path.gsub!( + Regexp.new( + "/#{Regexp.escape(parent.to_s)}/\\.\\./|" + + "(/#{Regexp.escape(current.to_s)}/\\.\\.$)" + ), SLASH + ) + end + + mod ||= normalized_path.gsub!(RULE_2D, EMPTY_STR) + # Non-standard, removes prefixed dotted segments from path. + mod ||= normalized_path.gsub!(RULE_PREFIXED_PARENT, SLASH) + end until mod.nil? + + return normalized_path + end + + ## + # Ensures that the URI is valid. + def validate + return if !!@validation_deferred + if self.scheme != nil && self.ip_based? && + (self.host == nil || self.host.empty?) && + (self.path == nil || self.path.empty?) + raise InvalidURIError, + "Absolute URI missing hierarchical segment: '#{self.to_s}'" + end + if self.host == nil + if self.port != nil || + self.user != nil || + self.password != nil + raise InvalidURIError, "Hostname not supplied: '#{self.to_s}'" + end + end + if self.path != nil && !self.path.empty? && self.path[0..0] != SLASH && + self.authority != nil + raise InvalidURIError, + "Cannot have a relative path with an authority set: '#{self.to_s}'" + end + if self.path != nil && !self.path.empty? && + self.path[0..1] == SLASH + SLASH && self.authority == nil + raise InvalidURIError, + "Cannot have a path with two leading slashes " + + "without an authority set: '#{self.to_s}'" + end + unreserved = CharacterClasses::UNRESERVED + sub_delims = CharacterClasses::SUB_DELIMS + if !self.host.nil? && (self.host =~ /[<>{}\/\\\?\#\@"[[:space:]]]/ || + (self.host[/^\[(.*)\]$/, 1] != nil && self.host[/^\[(.*)\]$/, 1] !~ + Regexp.new("^[#{unreserved}#{sub_delims}:]*$"))) + raise InvalidURIError, "Invalid character in host: '#{self.host.to_s}'" + end + return nil + end + + ## + # Replaces the internal state of self with the specified URI's state. + # Used in destructive operations to avoid massive code repetition. + # + # @param [Addressable::URI] uri The URI to replace self with. + # + # @return [Addressable::URI] self. + def replace_self(uri) + # Reset dependent values + instance_variables.each do |var| + if instance_variable_defined?(var) && var != :@validation_deferred + remove_instance_variable(var) + end + end + + @scheme = uri.scheme + @user = uri.user + @password = uri.password + @host = uri.host + @port = uri.port + @path = uri.path + @query = uri.query + @fragment = uri.fragment + return self + end + + ## + # Splits path string with "/" (slash). + # It is considered that there is empty string after last slash when + # path ends with slash. + # + # @param [String] path The path to split. + # + # @return [Array] An array of parts of path. + def split_path(path) + splitted = path.split(SLASH) + splitted << EMPTY_STR if path.end_with? SLASH + splitted + end + + ## + # Resets composite values for the entire URI + # + # @api private + def remove_composite_values + remove_instance_variable(:@uri_string) if defined?(@uri_string) + remove_instance_variable(:@hash) if defined?(@hash) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/version.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/version.rb new file mode 100644 index 0000000..2efe434 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/lib/addressable/version.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +# encoding:utf-8 +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +# Used to prevent the class/module from being loaded more than once +if !defined?(Addressable::VERSION) + module Addressable + module VERSION + MAJOR = 2 + MINOR = 8 + TINY = 0 + + STRING = [MAJOR, MINOR, TINY].join('.') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb new file mode 100644 index 0000000..4104b37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/idna_spec.rb @@ -0,0 +1,302 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +# Have to use RubyGems to load the idn gem. +require "rubygems" + +require "addressable/idna" + +shared_examples_for "converting from unicode to ASCII" do + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_ascii("www.google.com")).to eq("www.google.com") + end + + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_ascii(long)).to eq(long) + end + + it "should convert 'www.詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.詹姆斯.com" + )).to eq("www.xn--8ws00zhy3a.com") + end + + it "should convert 'www.Iñtërnâtiônàlizætiøn.com' correctly" do + "www.Iñtërnâtiônàlizætiøn.com" + expect(Addressable::IDNA.to_ascii( + "www.I\xC3\xB1t\xC3\xABrn\xC3\xA2ti\xC3\xB4" + + "n\xC3\xA0liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert 'www.Iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.In\xCC\x83te\xCC\x88rna\xCC\x82tio\xCC\x82n" + + "a\xCC\x80liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\214\343\201\204\343\202\217\343\201\221\343\201\256" + + "\343\202\217\343\201\213\343\202\211\343\201\252\343\201\204\343\201" + + "\251\343\202\201\343\201\204\343\202\223\343\202\201\343\201\204\343" + + "\201\256\343\202\211\343\201\271\343\202\213\343\201\276\343\201\240" + + "\343\201\252\343\201\214\343\201\217\343\201\227\343\201\252\343\201" + + "\204\343\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\213\343\202\231\343\201\204\343\202\217\343\201\221" + + "\343\201\256\343\202\217\343\201\213\343\202\211\343\201\252\343\201" + + "\204\343\201\250\343\202\231\343\202\201\343\201\204\343\202\223\343" + + "\202\201\343\201\204\343\201\256\343\202\211\343\201\270\343\202\231" + + "\343\202\213\343\201\276\343\201\237\343\202\231\343\201\252\343\201" + + "\213\343\202\231\343\201\217\343\201\227\343\201\252\343\201\204\343" + + "\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert '点心和烤鸭.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_ascii( + "点心和烤鸭.w3.mag.keio.ac.jp" + )).to eq("xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉힢힣.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "가각갂갃간갅갆갇갈갉힢힣.com" + )).to eq("xn--o39acdefghijk5883jma.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + )).to eq("xn--9cs565brid46mda086o.com") + end + + it "should convert 'リ宠퐱〹.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\230\345\256\240\355\220\261\343\200\271.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'リ宠퐱卄.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\343\203\252\345\256\240\355\220\261\345\215\204.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_ascii( + "\341\206\265" + )).to eq("xn--4ud") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\257" + )).to eq("xn--4ud") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_ascii( + "\360\237\214\271\360\237\214\271\360\237\214\271.ws" + )).to eq("xn--2h8haa.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_ascii( + "example..host" + )).to eq("example..host") + end +end + +shared_examples_for "converting from ASCII to unicode" do + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_unicode(long)).to eq(long) + end + + it "should return the identity conversion when punycode decode fails" do + expect(Addressable::IDNA.to_unicode("xn--zckp1cyg1.sblo.jp")).to eq( + "xn--zckp1cyg1.sblo.jp") + end + + it "should return the identity conversion when the ACE prefix has no suffix" do + expect(Addressable::IDNA.to_unicode("xn--...-")).to eq("xn--...-") + end + + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_unicode("www.google.com")).to eq( + "www.google.com") + end + + it "should convert 'www.詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--8ws00zhy3a.com" + )).to eq("www.詹姆斯.com") + end + + it "should convert '詹姆斯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--8ws00zhy3a.com" + )).to eq("詹姆斯.com") + end + + it "should convert 'www.iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("www.iñtërnâtiônàlizætiøn.com") + end + + it "should convert 'iñtërnâtiônàlizætiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("iñtërnâtiônàlizætiøn.com") + end + + it "should convert " + + "'www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + )).to eq( + "www.ほんとうにながいわけのわからないどめいんめいのらべるまだながくしないとたりない.w3.mag.keio.ac.jp" + ) + end + + it "should convert '点心和烤鸭.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp" + )).to eq("点心和烤鸭.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉힢힣.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--o39acdefghijk5883jma.com" + )).to eq("가각갂갃간갅갆갇갈갉힢힣.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--9cs565brid46mda086o.com" + )).to eq( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + ) + end + + it "should convert 'リ宠퐱卄.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--eek174hoxfpr4k.com" + )).to eq("\343\203\252\345\256\240\355\220\261\345\215\204.com") + end + + it "should convert 'ᆵ' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--4ud" + )).to eq("\341\206\265") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--2h8haa.ws" + )).to eq("\360\237\214\271\360\237\214\271\360\237\214\271.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_unicode( + "example..host" + )).to eq("example..host") + end + + it "should normalize 'string' correctly" do + expect(Addressable::IDNA.unicode_normalize_kc(:'string')).to eq("string") + expect(Addressable::IDNA.unicode_normalize_kc("string")).to eq("string") + end +end + +describe Addressable::IDNA, "when using the pure-Ruby implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + + begin + require "fiber" + + it "should not blow up inside fibers" do + f = Fiber.new do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + f.resume + end + rescue LoadError + # Fibers aren't supported in this version of Ruby, skip this test. + warn('Fibers unsupported.') + end +end + +begin + require "idn" + + describe Addressable::IDNA, "when using the native-code implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/native.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + end +rescue LoadError => error + raise error if ENV["CI"] && TestHelper.native_supported? + + # Cannot test the native implementation without libidn support. + warn('Could not load native IDN implementation.') +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb new file mode 100644 index 0000000..8663a86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/net_http_compat_spec.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "net/http" + +describe Net::HTTP do + it "should be compatible with Addressable" do + response_body = + Net::HTTP.get(Addressable::URI.parse('http://www.google.com/')) + expect(response_body).not_to be_nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb new file mode 100644 index 0000000..601e808 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/security_spec.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('%%30%30') + expect(uri.path).to eq('%%30%30') + expect(uri.normalize.path).to eq('%2500') + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/%%30%30') + expect(uri.path).to eq('/%%30%30') + expect(uri.normalize.path).to eq('/%2500') + end +end + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.path).to eq('لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.normalize.path).to eq( + '%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.path).to eq('/لُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ 冗') + expect(uri.normalize.path).to eq( + '/%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb new file mode 100644 index 0000000..d47589a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/template_spec.rb @@ -0,0 +1,1460 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "bigdecimal" +require "timeout" +require "addressable/template" + +shared_examples_for 'expands' do |tests| + tests.each do |template, expansion| + exp = expansion.is_a?(Array) ? expansion.first : expansion + it "#{template} to #{exp}" do + tmpl = Addressable::Template.new(template).expand(subject) + if expansion.is_a?(Array) + expect(expansion.any?{|i| i == tmpl.to_str}).to be true + else + expect(tmpl.to_str).to eq(expansion) + end + end + end +end + +describe "eql?" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to be_eql(other_template) + expect(other_template).to be_eql(template) + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).to_not be_eql(other_template) + expect(other_template).to_not be_eql(template) + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).to_not be_eql(addressable_uri) + expect(addressable_uri).to_not be_eql(addressable_template) + end +end + +describe "==" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to eq other_template + expect(other_template).to eq template + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).not_to eq other_template + expect(other_template).not_to eq template + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).not_to eq addressable_uri + expect(addressable_uri).not_to eq addressable_template + end +end + +describe "Type conversion" do + subject { + { + :var => true, + :hello => 1234, + :nothing => nil, + :sym => :symbolic, + :decimal => BigDecimal('1') + } + } + + it_behaves_like 'expands', { + '{var}' => 'true', + '{hello}' => '1234', + '{nothing}' => '', + '{sym}' => 'symbolic', + '{decimal}' => RUBY_VERSION < '2.4.0' ? '0.1E1' : '0.1e1' + } +end + +describe "Level 1:" do + subject { + {:var => "value", :hello => "Hello World!"} + } + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21' + } +end + +describe "Level 2" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar" + } + } + context "Operator +:" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar' + } + end + context "Operator #:" do + it_behaves_like 'expands', { + 'X{#var}' => 'X#value', + 'X{#hello}' => 'X#Hello%20World!' + } + end +end + +describe "Level 3" do + subject { + { + :var => "value", + :hello => "Hello World!", + :empty => "", + :path => "/foo/bar", + :x => "1024", + :y => "768" + } + } + context "Operator nil (multiple vars):" do + it_behaves_like 'expands', { + 'map?{x,y}' => 'map?1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768' + } + end + context "Operator + (multiple vars):" do + it_behaves_like 'expands', { + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here' + } + end + context "Operator # (multiple vars):" do + it_behaves_like 'expands', { + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here' + } + end + context "Operator ." do + it_behaves_like 'expands', { + 'X{.var}' => 'X.value', + 'X{.x,y}' => 'X.1024.768' + } + end + context "Operator /" do + it_behaves_like 'expands', { + '{/var}' => '/value', + '{/var,x}/here' => '/value/1024/here' + } + end + context "Operator ;" do + it_behaves_like 'expands', { + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty' + } + end + context "Operator ?" do + it_behaves_like 'expands', { + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=' + } + end + context "Operator &" do + it_behaves_like 'expands', { + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=' + } + end +end + +describe "Level 4" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar", + :semi => ";", + :list => %w(red green blue), + :keys => {"semi" => ';', "dot" => '.', "comma" => ','} + } + } + context "Expansion with value modifiers" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => [ + 'semi,%3B,dot,.,comma,%2C', + 'dot,.,semi,%3B,comma,%2C', + 'comma,%2C,semi,%3B,dot,.', + 'semi,%3B,comma,%2C,dot,.', + 'dot,.,comma,%2C,semi,%3B', + 'comma,%2C,dot,.,semi,%3B' + ], + '{keys*}' => [ + 'semi=%3B,dot=.,comma=%2C', + 'dot=.,semi=%3B,comma=%2C', + 'comma=%2C,semi=%3B,dot=.', + 'semi=%3B,comma=%2C,dot=.', + 'dot=.,comma=%2C,semi=%3B', + 'comma=%2C,dot=.,semi=%3B' + ] + } + end + context "Operator + with value modifiers" do + it_behaves_like 'expands', { + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => [ + 'semi,;,dot,.,comma,,', + 'dot,.,semi,;,comma,,', + 'comma,,,semi,;,dot,.', + 'semi,;,comma,,,dot,.', + 'dot,.,comma,,,semi,;', + 'comma,,,dot,.,semi,;' + ], + '{+keys*}' => [ + 'semi=;,dot=.,comma=,', + 'dot=.,semi=;,comma=,', + 'comma=,,semi=;,dot=.', + 'semi=;,comma=,,dot=.', + 'dot=.,comma=,,semi=;', + 'comma=,,dot=.,semi=;' + ] + } + end + context "Operator # with value modifiers" do + it_behaves_like 'expands', { + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => [ + '#semi,;,dot,.,comma,,', + '#dot,.,semi,;,comma,,', + '#comma,,,semi,;,dot,.', + '#semi,;,comma,,,dot,.', + '#dot,.,comma,,,semi,;', + '#comma,,,dot,.,semi,;' + ], + '{#keys*}' => [ + '#semi=;,dot=.,comma=,', + '#dot=.,semi=;,comma=,', + '#comma=,,semi=;,dot=.', + '#semi=;,comma=,,dot=.', + '#dot=.,comma=,,semi=;', + '#comma=,,dot=.,semi=;' + ] + } + end + context "Operator . with value modifiers" do + it_behaves_like 'expands', { + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => [ + 'X.semi,%3B,dot,.,comma,%2C', + 'X.dot,.,semi,%3B,comma,%2C', + 'X.comma,%2C,semi,%3B,dot,.', + 'X.semi,%3B,comma,%2C,dot,.', + 'X.dot,.,comma,%2C,semi,%3B', + 'X.comma,%2C,dot,.,semi,%3B' + ], + 'X{.keys*}' => [ + 'X.semi=%3B.dot=..comma=%2C', + 'X.dot=..semi=%3B.comma=%2C', + 'X.comma=%2C.semi=%3B.dot=.', + 'X.semi=%3B.comma=%2C.dot=.', + 'X.dot=..comma=%2C.semi=%3B', + 'X.comma=%2C.dot=..semi=%3B' + ] + } + end + context "Operator / with value modifiers" do + it_behaves_like 'expands', { + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => [ + '/semi,%3B,dot,.,comma,%2C', + '/dot,.,semi,%3B,comma,%2C', + '/comma,%2C,semi,%3B,dot,.', + '/semi,%3B,comma,%2C,dot,.', + '/dot,.,comma,%2C,semi,%3B', + '/comma,%2C,dot,.,semi,%3B' + ], + '{/keys*}' => [ + '/semi=%3B/dot=./comma=%2C', + '/dot=./semi=%3B/comma=%2C', + '/comma=%2C/semi=%3B/dot=.', + '/semi=%3B/comma=%2C/dot=.', + '/dot=./comma=%2C/semi=%3B', + '/comma=%2C/dot=./semi=%3B' + ] + } + end + context "Operator ; with value modifiers" do + it_behaves_like 'expands', { + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => [ + ';keys=semi,%3B,dot,.,comma,%2C', + ';keys=dot,.,semi,%3B,comma,%2C', + ';keys=comma,%2C,semi,%3B,dot,.', + ';keys=semi,%3B,comma,%2C,dot,.', + ';keys=dot,.,comma,%2C,semi,%3B', + ';keys=comma,%2C,dot,.,semi,%3B' + ], + '{;keys*}' => [ + ';semi=%3B;dot=.;comma=%2C', + ';dot=.;semi=%3B;comma=%2C', + ';comma=%2C;semi=%3B;dot=.', + ';semi=%3B;comma=%2C;dot=.', + ';dot=.;comma=%2C;semi=%3B', + ';comma=%2C;dot=.;semi=%3B' + ] + } + end + context "Operator ? with value modifiers" do + it_behaves_like 'expands', { + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => [ + '?keys=semi,%3B,dot,.,comma,%2C', + '?keys=dot,.,semi,%3B,comma,%2C', + '?keys=comma,%2C,semi,%3B,dot,.', + '?keys=semi,%3B,comma,%2C,dot,.', + '?keys=dot,.,comma,%2C,semi,%3B', + '?keys=comma,%2C,dot,.,semi,%3B' + ], + '{?keys*}' => [ + '?semi=%3B&dot=.&comma=%2C', + '?dot=.&semi=%3B&comma=%2C', + '?comma=%2C&semi=%3B&dot=.', + '?semi=%3B&comma=%2C&dot=.', + '?dot=.&comma=%2C&semi=%3B', + '?comma=%2C&dot=.&semi=%3B' + ] + } + end + context "Operator & with value modifiers" do + it_behaves_like 'expands', { + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => [ + '&keys=semi,%3B,dot,.,comma,%2C', + '&keys=dot,.,semi,%3B,comma,%2C', + '&keys=comma,%2C,semi,%3B,dot,.', + '&keys=semi,%3B,comma,%2C,dot,.', + '&keys=dot,.,comma,%2C,semi,%3B', + '&keys=comma,%2C,dot,.,semi,%3B' + ], + '{&keys*}' => [ + '&semi=%3B&dot=.&comma=%2C', + '&dot=.&semi=%3B&comma=%2C', + '&comma=%2C&semi=%3B&dot=.', + '&semi=%3B&comma=%2C&dot=.', + '&dot=.&comma=%2C&semi=%3B', + '&comma=%2C&dot=.&semi=%3B' + ] + } + end +end +describe "Modifiers" do + subject { + { + :var => "value", + :semi => ";", + :year => %w(1965 2000 2012), + :dom => %w(example com) + } + } + context "length" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{var}' => 'value', + '{semi}' => '%3B', + '{semi:2}' => '%3B' + } + end + context "explode" do + it_behaves_like 'expands', { + 'find{?year*}' => 'find?year=1965&year=2000&year=2012', + 'www{.dom*}' => 'www.example.com', + } + end +end +describe "Expansion" do + subject { + { + :count => ["one", "two", "three"], + :dom => ["example", "com"], + :dub => "me/too", + :hello => "Hello World!", + :half => "50%", + :var => "value", + :who => "fred", + :base => "http://example.com/home/", + :path => "/foo/bar", + :list => ["red", "green", "blue"], + :keys => {"semi" => ";","dot" => ".","comma" => ","}, + :v => "6", + :x => "1024", + :y => "768", + :empty => "", + :empty_keys => {}, + :undef => nil + } + } + context "concatenation" do + it_behaves_like 'expands', { + '{count}' => 'one,two,three', + '{count*}' => 'one,two,three', + '{/count}' => '/one,two,three', + '{/count*}' => '/one/two/three', + '{;count}' => ';count=one,two,three', + '{;count*}' => ';count=one;count=two;count=three', + '{?count}' => '?count=one,two,three', + '{?count*}' => '?count=one&count=two&count=three', + '{&count*}' => '&count=one&count=two&count=three' + } + end + context "simple expansion" do + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21', + '{half}' => '50%25', + 'O{empty}X' => 'OX', + 'O{undef}X' => 'OX', + '{x,y}' => '1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768', + '?{x,empty}' => '?1024,', + '?{x,undef}' => '?1024', + '?{undef,y}' => '?768', + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => [ + 'semi,%3B,dot,.,comma,%2C', + 'dot,.,semi,%3B,comma,%2C', + 'comma,%2C,semi,%3B,dot,.', + 'semi,%3B,comma,%2C,dot,.', + 'dot,.,comma,%2C,semi,%3B', + 'comma,%2C,dot,.,semi,%3B' + ], + '{keys*}' => [ + 'semi=%3B,dot=.,comma=%2C', + 'dot=.,semi=%3B,comma=%2C', + 'comma=%2C,semi=%3B,dot=.', + 'semi=%3B,comma=%2C,dot=.', + 'dot=.,comma=%2C,semi=%3B', + 'comma=%2C,dot=.,semi=%3B' + ] + } + end + context "reserved expansion (+)" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+half}' => '50%25', + '{base}index' => 'http%3A%2F%2Fexample.com%2Fhome%2Findex', + '{+base}index' => 'http://example.com/home/index', + 'O{+empty}X' => 'OX', + 'O{+undef}X' => 'OX', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar', + 'up{+path}{var}/here' => 'up/foo/barvalue/here', + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here', + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => [ + 'semi,;,dot,.,comma,,', + 'dot,.,semi,;,comma,,', + 'comma,,,semi,;,dot,.', + 'semi,;,comma,,,dot,.', + 'dot,.,comma,,,semi,;', + 'comma,,,dot,.,semi,;' + ], + '{+keys*}' => [ + 'semi=;,dot=.,comma=,', + 'dot=.,semi=;,comma=,', + 'comma=,,semi=;,dot=.', + 'semi=;,comma=,,dot=.', + 'dot=.,comma=,,semi=;', + 'comma=,,dot=.,semi=;' + ] + } + end + context "fragment expansion (#)" do + it_behaves_like 'expands', { + '{#var}' => '#value', + '{#hello}' => '#Hello%20World!', + '{#half}' => '#50%25', + 'foo{#empty}' => 'foo#', + 'foo{#undef}' => 'foo', + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here', + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => [ + '#semi,;,dot,.,comma,,', + '#dot,.,semi,;,comma,,', + '#comma,,,semi,;,dot,.', + '#semi,;,comma,,,dot,.', + '#dot,.,comma,,,semi,;', + '#comma,,,dot,.,semi,;' + ], + '{#keys*}' => [ + '#semi=;,dot=.,comma=,', + '#dot=.,semi=;,comma=,', + '#comma=,,semi=;,dot=.', + '#semi=;,comma=,,dot=.', + '#dot=.,comma=,,semi=;', + '#comma=,,dot=.,semi=;' + ] + } + end + context "label expansion (.)" do + it_behaves_like 'expands', { + '{.who}' => '.fred', + '{.who,who}' => '.fred.fred', + '{.half,who}' => '.50%25.fred', + 'www{.dom*}' => 'www.example.com', + 'X{.var}' => 'X.value', + 'X{.empty}' => 'X.', + 'X{.undef}' => 'X', + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => [ + 'X.semi,%3B,dot,.,comma,%2C', + 'X.dot,.,semi,%3B,comma,%2C', + 'X.comma,%2C,semi,%3B,dot,.', + 'X.semi,%3B,comma,%2C,dot,.', + 'X.dot,.,comma,%2C,semi,%3B', + 'X.comma,%2C,dot,.,semi,%3B' + ], + 'X{.keys*}' => [ + 'X.semi=%3B.dot=..comma=%2C', + 'X.dot=..semi=%3B.comma=%2C', + 'X.comma=%2C.semi=%3B.dot=.', + 'X.semi=%3B.comma=%2C.dot=.', + 'X.dot=..comma=%2C.semi=%3B', + 'X.comma=%2C.dot=..semi=%3B' + ], + 'X{.empty_keys}' => 'X', + 'X{.empty_keys*}' => 'X' + } + end + context "path expansion (/)" do + it_behaves_like 'expands', { + '{/who}' => '/fred', + '{/who,who}' => '/fred/fred', + '{/half,who}' => '/50%25/fred', + '{/who,dub}' => '/fred/me%2Ftoo', + '{/var}' => '/value', + '{/var,empty}' => '/value/', + '{/var,undef}' => '/value', + '{/var,x}/here' => '/value/1024/here', + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => [ + '/semi,%3B,dot,.,comma,%2C', + '/dot,.,semi,%3B,comma,%2C', + '/comma,%2C,semi,%3B,dot,.', + '/semi,%3B,comma,%2C,dot,.', + '/dot,.,comma,%2C,semi,%3B', + '/comma,%2C,dot,.,semi,%3B' + ], + '{/keys*}' => [ + '/semi=%3B/dot=./comma=%2C', + '/dot=./semi=%3B/comma=%2C', + '/comma=%2C/semi=%3B/dot=.', + '/semi=%3B/comma=%2C/dot=.', + '/dot=./comma=%2C/semi=%3B', + '/comma=%2C/dot=./semi=%3B' + ] + } + end + context "path-style expansion (;)" do + it_behaves_like 'expands', { + '{;who}' => ';who=fred', + '{;half}' => ';half=50%25', + '{;empty}' => ';empty', + '{;v,empty,who}' => ';v=6;empty;who=fred', + '{;v,bar,who}' => ';v=6;who=fred', + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty', + '{;x,y,undef}' => ';x=1024;y=768', + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => [ + ';keys=semi,%3B,dot,.,comma,%2C', + ';keys=dot,.,semi,%3B,comma,%2C', + ';keys=comma,%2C,semi,%3B,dot,.', + ';keys=semi,%3B,comma,%2C,dot,.', + ';keys=dot,.,comma,%2C,semi,%3B', + ';keys=comma,%2C,dot,.,semi,%3B' + ], + '{;keys*}' => [ + ';semi=%3B;dot=.;comma=%2C', + ';dot=.;semi=%3B;comma=%2C', + ';comma=%2C;semi=%3B;dot=.', + ';semi=%3B;comma=%2C;dot=.', + ';dot=.;comma=%2C;semi=%3B', + ';comma=%2C;dot=.;semi=%3B' + ] + } + end + context "form query expansion (?)" do + it_behaves_like 'expands', { + '{?who}' => '?who=fred', + '{?half}' => '?half=50%25', + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=', + '{?x,y,undef}' => '?x=1024&y=768', + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => [ + '?keys=semi,%3B,dot,.,comma,%2C', + '?keys=dot,.,semi,%3B,comma,%2C', + '?keys=comma,%2C,semi,%3B,dot,.', + '?keys=semi,%3B,comma,%2C,dot,.', + '?keys=dot,.,comma,%2C,semi,%3B', + '?keys=comma,%2C,dot,.,semi,%3B' + ], + '{?keys*}' => [ + '?semi=%3B&dot=.&comma=%2C', + '?dot=.&semi=%3B&comma=%2C', + '?comma=%2C&semi=%3B&dot=.', + '?semi=%3B&comma=%2C&dot=.', + '?dot=.&comma=%2C&semi=%3B', + '?comma=%2C&dot=.&semi=%3B' + ] + } + end + context "form query expansion (&)" do + it_behaves_like 'expands', { + '{&who}' => '&who=fred', + '{&half}' => '&half=50%25', + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=', + '{&x,y,undef}' => '&x=1024&y=768', + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => [ + '&keys=semi,%3B,dot,.,comma,%2C', + '&keys=dot,.,semi,%3B,comma,%2C', + '&keys=comma,%2C,semi,%3B,dot,.', + '&keys=semi,%3B,comma,%2C,dot,.', + '&keys=dot,.,comma,%2C,semi,%3B', + '&keys=comma,%2C,dot,.,semi,%3B' + ], + '{&keys*}' => [ + '&semi=%3B&dot=.&comma=%2C', + '&dot=.&semi=%3B&comma=%2C', + '&comma=%2C&semi=%3B&dot=.', + '&semi=%3B&comma=%2C&dot=.', + '&dot=.&comma=%2C&semi=%3B', + '&comma=%2C&dot=.&semi=%3B' + ] + } + end + context "non-string key in match data" do + subject {Addressable::Template.new("http://example.com/{one}")} + + it "raises TypeError" do + expect { subject.expand(Object.new => "1") }.to raise_error TypeError + end + end +end + +class ExampleTwoProcessor + def self.restore(name, value) + return value.gsub(/-/, " ") if name == "query" + return value + end + + def self.match(name) + return ".*?" if name == "first" + return ".*" + end + def self.validate(name, value) + return !!(value =~ /^[\w ]+$/) if name == "query" + return true + end + + def self.transform(name, value) + return value.gsub(/ /, "+") if name == "query" + return value + end +end + +class DumbProcessor + def self.match(name) + return ".*?" if name == "first" + end +end + +describe Addressable::Template do + describe 'initialize' do + context 'with a non-string' do + it 'raises a TypeError' do + expect { Addressable::Template.new(nil) }.to raise_error(TypeError) + end + end + end + + describe 'freeze' do + subject { Addressable::Template.new("http://example.com/{first}/{+second}/") } + it 'freezes the template' do + expect(subject.freeze).to be_frozen + end + end + + describe "Matching" do + let(:uri){ + Addressable::URI.parse( + "http://example.com/search/an-example-search-query/" + ) + } + let(:uri2){ + Addressable::URI.parse("http://example.com/a/b/c/") + } + let(:uri3){ + Addressable::URI.parse("http://example.com/;a=1;b=2;c=3;first=foo") + } + let(:uri4){ + Addressable::URI.parse("http://example.com/?a=1&b=2&c=3&first=foo") + } + let(:uri5){ + "http://example.com/foo" + } + context "first uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/search/{query}/" + ).match(uri, ExampleTwoProcessor) + } + its(:variables){ should == ["query"] } + its(:captures){ should == ["an example search query"] } + end + + context "second uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, ExampleTwoProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri with DumbProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, DumbProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri" do + subject { + Addressable::Template.new( + "http://example.com/{first}{/second*}/" + ).match(uri2) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", ["b","c"]] } + end + context "third uri" do + subject { + Addressable::Template.new( + "http://example.com/{;hash*,first}" + ).match(uri3) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first" => "foo"}, nil] } + end + # Note that this expansion is impossible to revert deterministically - the + # * operator means first could have been a key of hash or a separate key. + # Semantically, a separate key is more likely, but both are possible. + context "fourth uri" do + subject { + Addressable::Template.new( + "http://example.com/{?hash*,first}" + ).match(uri4) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first"=> "foo"}, nil] } + end + context "fifth uri" do + subject { + Addressable::Template.new( + "http://example.com/{path}{?hash*,first}" + ).match(uri5) + } + its(:variables){ should == ["path", "hash", "first"] } + its(:captures){ should == ["foo", nil, nil] } + end + end + + describe 'match' do + subject { Addressable::Template.new('http://example.com/first/second/') } + context 'when the URI is the same as the template' do + it 'returns the match data itself with an empty mapping' do + uri = Addressable::URI.parse('http://example.com/first/second/') + match_data = subject.match(uri) + expect(match_data).to be_an Addressable::Template::MatchData + expect(match_data.uri).to eq(uri) + expect(match_data.template).to eq(subject) + expect(match_data.mapping).to be_empty + expect(match_data.inspect).to be_an String + end + end + end + + describe "extract" do + let(:template) { + Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" + ) + } + let(:uri){ "http://example.com/a/b/c/?one=1&two=2#foo" } + let(:uri2){ "http://example.com/a/b/c/#foo" } + it "should be able to extract with queries" do + expect(template.extract(uri)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => "1", + "bogus" => nil, + "two" => "2", + "fragment" => "foo" + }) + end + it "should be able to extract without queries" do + expect(template.extract(uri2)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => nil, + "bogus" => nil, + "two" => nil, + "fragment" => "foo" + }) + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.extract("/path") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.extract("/path?page=1") + expect(data["page"]).to eq("1") + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.extract("/path?per_page=1") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.extract("/path?page=2&per_page=1") + expect(data["page"]).to eq("2") + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + end + end + + describe "Partial expand with symbols" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1", :three => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand form style query with missing param at beginning" do + subject { + Addressable::Template.new("http://example.com/{?one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:two => "2").pattern).to eq( + "http://example.com/?two=2{&one}/" + ) + end + end + context "issue #307 - partial_expand form query with nil params" do + subject do + Addressable::Template.new("http://example.com/{?one,two,three}/") + end + it "builds a new pattern with two=nil" do + expect(subject.partial_expand(two: nil).pattern).to eq( + "http://example.com/{?one}{&three}/" + ) + end + it "builds a new pattern with one=nil and two=nil" do + expect(subject.partial_expand(one: nil, two: nil).pattern).to eq( + "http://example.com/{?three}/" + ) + end + it "builds a new pattern with one=1 and two=nil" do + expect(subject.partial_expand(one: 1, two: nil).pattern).to eq( + "http://example.com/?one=1{&three}/" + ) + end + it "builds a new pattern with one=nil and two=2" do + expect(subject.partial_expand(one: nil, two: 2).pattern).to eq( + "http://example.com/?two=2{&three}/" + ) + end + it "builds a new pattern with one=nil" do + expect(subject.partial_expand(one: nil).pattern).to eq( + "http://example.com/{?two}{&three}/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + context "partial expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/{resource}/{query}/") + end + it "normalizes unicode by default" do + template = subject.partial_expand("query" => "Cafe\u0301") + expect(template.pattern).to eq( + "http://example.com/{resource}/Caf%C3%A9/" + ) + end + + it "does not normalize unicode when byte semantics requested" do + template = subject.partial_expand({"query" => "Cafe\u0301"}, nil, false) + expect(template.pattern).to eq( + "http://example.com/{resource}/Cafe%CC%81/" + ) + end + end + end + describe "Partial expand with strings" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + describe "Expand" do + context "expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/search/{query}/") + end + it "normalizes unicode by default" do + uri = subject.expand("query" => "Cafe\u0301").to_str + expect(uri).to eq("http://example.com/search/Caf%C3%A9/") + end + + it "does not normalize unicode when byte semantics requested" do + uri = subject.expand({ "query" => "Cafe\u0301" }, nil, false).to_str + expect(uri).to eq("http://example.com/search/Cafe%CC%81/") + end + end + context "expand with a processor" do + subject { + Addressable::Template.new("http://example.com/search/{query}/") + } + it "processes spaces" do + expect(subject.expand({"query" => "an example search query"}, + ExampleTwoProcessor).to_str).to eq( + "http://example.com/search/an+example+search+query/" + ) + end + it "validates" do + expect{ + subject.expand({"query" => "Bogus!"}, + ExampleTwoProcessor).to_str + }.to raise_error(Addressable::Template::InvalidTemplateValueError) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + context "Matching with operators" do + describe "Level 1:" do + subject { Addressable::Template.new("foo{foo}/{bar}baz") } + it "can match" do + data = subject.match("foofoo/bananabaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("banana") + end + it "can fail" do + expect(subject.match("bar/foo")).to be_nil + expect(subject.match("foobaz")).to be_nil + end + it "can match empty" do + data = subject.match("foo/baz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 2:" do + subject { Addressable::Template.new("foo{+foo}{#bar}baz") } + it "can match" do + data = subject.match("foo/test/banana#bazbaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("baz") + end + it "can match empty level 2 #" do + data = subject.match("foo/test/bananabaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo/test/banana#baz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("") + end + it "can match empty level 2 +" do + data = subject.match("foobaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo#barbaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 3:" do + context "no operator" do + subject { Addressable::Template.new("foo{foo,bar}baz") } + it "can match" do + data = subject.match("foofoo,barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "+ operator" do + subject { Addressable::Template.new("foo{+foo,bar}baz") } + it "can match" do + data = subject.match("foofoo/bar,barbaz") + expect(data.mapping["bar"]).to eq("foo/bar,bar") + expect(data.mapping["foo"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context ". operator" do + subject { Addressable::Template.new("foo{.foo,bar}baz") } + it "can match" do + data = subject.match("foo.foo.barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "/ operator" do + subject { Addressable::Template.new("foo{/foo,bar}baz") } + it "can match" do + data = subject.match("foo/foo/barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "; operator" do + subject { Addressable::Template.new("foo{;foo,bar,baz}baz") } + it "can match" do + data = subject.match("foo;foo=bar%20baz;bar=foo;bazbaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + expect(data.mapping["baz"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar baz)) + end + end + context "? operator" do + context "test" do + subject { Addressable::Template.new("foo{?foo,bar}baz") } + it "can match" do + data = subject.match("foo?foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.match("/path") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.match("/path?page=1") + expect(data.mapping["page"]).to eq("1") + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.match("/path?per_page=1") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.match("/path?page=2&per_page=1") + expect(data.mapping["page"]).to eq("2") + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + end + + context "issue #71" do + subject { Addressable::Template.new("http://cyberscore.dev/api/users{?username}") } + it "can match" do + data = subject.match("http://cyberscore.dev/api/users?username=foobaz") + expect(data.mapping["username"]).to eq("foobaz") + end + it "lists vars" do + expect(subject.variables).to eq(%w(username)) + expect(subject.keys).to eq(%w(username)) + end + end + end + context "& operator" do + subject { Addressable::Template.new("foo{&foo,bar}baz") } + it "can match" do + data = subject.match("foo&foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + end + end + + context "support regexes:" do + context "EXPRESSION" do + subject { Addressable::Template::EXPRESSION } + it "should be able to match an expression" do + expect(subject).to match("{foo}") + expect(subject).to match("{foo,9}") + expect(subject).to match("{foo.bar,baz}") + expect(subject).to match("{+foo.bar,baz}") + expect(subject).to match("{foo,foo%20bar}") + expect(subject).to match("{#foo:20,baz*}") + expect(subject).to match("stuff{#foo:20,baz*}things") + end + it "should fail on non vars" do + expect(subject).not_to match("!{foo") + expect(subject).not_to match("{foo.bar.}") + expect(subject).not_to match("!{}") + end + end + context "VARNAME" do + subject { Addressable::Template::VARNAME } + it "should be able to match a variable" do + expect(subject).to match("foo") + expect(subject).to match("9") + expect(subject).to match("foo.bar") + expect(subject).to match("foo_bar") + expect(subject).to match("foo_bar.baz") + expect(subject).to match("foo%20bar") + expect(subject).to match("foo%20bar.baz") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("foo.bar.") + expect(subject).not_to match("foo%2%00bar") + expect(subject).not_to match("foo_ba%r") + expect(subject).not_to match("foo_bar*") + expect(subject).not_to match("foo_bar:20") + end + + it 'should parse in a reasonable time' do + expect do + Timeout.timeout(0.1) do + expect(subject).not_to match("0"*25 + "!") + end + end.not_to raise_error + end + end + context "VARIABLE_LIST" do + subject { Addressable::Template::VARIABLE_LIST } + it "should be able to match a variable list" do + expect(subject).to match("foo,bar") + expect(subject).to match("foo") + expect(subject).to match("foo,bar*,baz") + expect(subject).to match("foo.bar,bar_baz*,baz:12") + end + it "should fail on non vars" do + expect(subject).not_to match(",foo,bar*,baz") + expect(subject).not_to match("foo,*bar,baz") + expect(subject).not_to match("foo,,bar*,baz") + end + end + context "VARSPEC" do + subject { Addressable::Template::VARSPEC } + it "should be able to match a variable with modifier" do + expect(subject).to match("9:8") + expect(subject).to match("foo.bar*") + expect(subject).to match("foo_bar:12") + expect(subject).to match("foo_bar.baz*") + expect(subject).to match("foo%20bar:12") + expect(subject).to match("foo%20bar.baz*") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("*foo") + expect(subject).not_to match("fo*o") + expect(subject).not_to match("fo:o") + expect(subject).not_to match("foo:") + end + end + end +end + +describe Addressable::Template::MatchData do + let(:template) { Addressable::Template.new('{foo}/{bar}') } + subject(:its) { template.match('ab/cd') } + its(:uri) { should == Addressable::URI.parse('ab/cd') } + its(:template) { should == template } + its(:mapping) { should == { 'foo' => 'ab', 'bar' => 'cd' } } + its(:variables) { should == ['foo', 'bar'] } + its(:keys) { should == ['foo', 'bar'] } + its(:names) { should == ['foo', 'bar'] } + its(:values) { should == ['ab', 'cd'] } + its(:captures) { should == ['ab', 'cd'] } + its(:to_a) { should == ['ab/cd', 'ab', 'cd'] } + its(:to_s) { should == 'ab/cd' } + its(:string) { should == its.to_s } + its(:pre_match) { should == "" } + its(:post_match) { should == "" } + + describe 'values_at' do + it 'returns an array with the values' do + expect(its.values_at(0, 2)).to eq(['ab/cd', 'cd']) + end + it 'allows mixing integer an string keys' do + expect(its.values_at('foo', 1)).to eq(['ab', 'ab']) + end + it 'accepts unknown keys' do + expect(its.values_at('baz', 'foo')).to eq([nil, 'ab']) + end + end + + describe '[]' do + context 'string key' do + it 'returns the corresponding capture' do + expect(its['foo']).to eq('ab') + expect(its['bar']).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its['baz']).to be_nil + end + end + context 'symbol key' do + it 'returns the corresponding capture' do + expect(its[:foo]).to eq('ab') + expect(its[:bar]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[:baz]).to be_nil + end + end + context 'integer key' do + it 'returns the full URI for index 0' do + expect(its[0]).to eq('ab/cd') + end + it 'returns the corresponding capture' do + expect(its[1]).to eq('ab') + expect(its[2]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[3]).to be_nil + end + end + context 'other key' do + it 'raises an exception' do + expect { its[Object.new] }.to raise_error(TypeError) + end + end + context 'with length' do + it 'returns an array starting at index with given length' do + expect(its[0, 2]).to eq(['ab/cd', 'ab']) + expect(its[2, 1]).to eq(['cd']) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb new file mode 100644 index 0000000..00baaac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/addressable/uri_spec.rb @@ -0,0 +1,6665 @@ +# frozen_string_literal: true + +# coding: utf-8 +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "uri" +require "ipaddr" + +if !"".respond_to?("force_encoding") + class String + def force_encoding(encoding) + @encoding = encoding + end + + def encoding + @encoding ||= Encoding::ASCII_8BIT + end + end + + class Encoding + def initialize(name) + @name = name + end + + def to_s + return @name + end + + UTF_8 = Encoding.new("UTF-8") + ASCII_8BIT = Encoding.new("US-ASCII") + end +end + +module Fake + module URI + class HTTP + def initialize(uri) + @uri = uri + end + + def to_s + return @uri.to_s + end + + alias :to_str :to_s + end + end +end + +describe Addressable::URI, "when created with a non-numeric port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a invalid encoded port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "%eb") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a non-string scheme" do + it "should raise an error" do + expect do + Addressable::URI.new(:scheme => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string password" do + it "should raise an error" do + expect do + Addressable::URI.new(:password => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string userinfo" do + it "should raise an error" do + expect do + Addressable::URI.new(:userinfo => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string authority" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string path" do + it "should raise an error" do + expect do + Addressable::URI.new(:path => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string query" do + it "should raise an error" do + expect do + Addressable::URI.new(:query => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string fragment" do + it "should raise an error" do + expect do + Addressable::URI.new(:fragment => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a scheme but no hierarchical " + + "segment" do + it "should raise an error" do + expect do + Addressable::URI.parse("http:") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "quote handling" do + describe 'in host name' do + it "should raise an error for single quote" do + expect do + Addressable::URI.parse("http://local\"host/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + end +end + +describe Addressable::URI, "newline normalization" do + it "should not accept newlines in scheme" do + expect do + Addressable::URI.parse("ht%0atp://localhost/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not unescape newline in path" do + uri = Addressable::URI.parse("http://localhost/%0a").normalize + expect(uri.to_s).to eq("http://localhost/%0A") + end + + it "should not unescape newline in hostname" do + uri = Addressable::URI.parse("http://local%0ahost/").normalize + expect(uri.to_s).to eq("http://local%0Ahost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://foo%0abar@localhost/").normalize + expect(uri.to_s).to eq("http://foo%0Abar@localhost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://example:foo%0abar@example/").normalize + expect(uri.to_s).to eq("http://example:foo%0Abar@example/") + end + + it "should not accept newline in hostname" do + uri = Addressable::URI.parse("http://localhost/") + expect do + uri.host = "local\nhost" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with ambiguous path" do + it "should raise an error" do + expect do + Addressable::URI.parse("::http") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with an invalid host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => "") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "sub-delims characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::SUB_DELIMS.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "unreserved characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::UNRESERVED.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created from nil components" do + before do + @uri = Addressable::URI.new + end + + it "should have a nil site value" do + expect(@uri.site).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should be an empty uri" do + expect(@uri.to_s).to eq("") + end + + it "should have a nil default port" do + expect(@uri.default_port).to eq(nil) + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should raise an error if the scheme is set to whitespace" do + expect do + @uri.scheme = "\t \n" + end.to raise_error(Addressable::URI::InvalidURIError, /'\t \n'/) + end + + it "should raise an error if the scheme is set to all digits" do + expect do + @uri.scheme = "123" + end.to raise_error(Addressable::URI::InvalidURIError, /'123'/) + end + + it "should raise an error if the scheme begins with a digit" do + expect do + @uri.scheme = "1scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'1scheme'/) + end + + it "should raise an error if the scheme begins with a plus" do + expect do + @uri.scheme = "+scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\+scheme'/) + end + + it "should raise an error if the scheme begins with a dot" do + expect do + @uri.scheme = ".scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\.scheme'/) + end + + it "should raise an error if the scheme begins with a dash" do + expect do + @uri.scheme = "-scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'-scheme'/) + end + + it "should raise an error if the scheme contains an illegal character" do + expect do + @uri.scheme = "scheme!" + end.to raise_error(Addressable::URI::InvalidURIError, /'scheme!'/) + end + + it "should raise an error if the scheme contains whitespace" do + expect do + @uri.scheme = "sch eme" + end.to raise_error(Addressable::URI::InvalidURIError, /'sch eme'/) + end + + it "should raise an error if the scheme contains a newline" do + expect do + @uri.scheme = "sch\neme" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.user = "user" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.password = "pass" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.scheme = "http" + @uri.fragment = "fragment" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.fragment = "fragment" + @uri.scheme = "http" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when initialized from individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", + :user => "user", + :password => "password", + :host => "example.com", + :port => 8080, + :path => "/path", + :query => "query=value", + :fragment => "fragment" + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'com' for #tld" do + expect(@uri.tld).to eq("com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when initialized from " + + "frozen individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http".freeze, + :user => "user".freeze, + :password => "password".freeze, + :host => "example.com".freeze, + :port => "8080".freeze, + :path => "/path".freeze, + :query => "query=value".freeze, + :fragment => "fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a frozen string" do + before do + @uri = Addressable::URI.parse( + "http://user:password@example.com:8080/path?query=value#fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.new.freeze + end + + it "returns nil for #scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "returns nil for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq(nil) + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns nil for #host" do + expect(@uri.host).to eq(nil) + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq(nil) + end + + it "returns nil for #authority" do + expect(@uri.authority).to eq(nil) + end + + it "returns nil for #normalized_authority" do + expect(@uri.normalized_authority).to eq(nil) + end + + it "returns nil for #port" do + expect(@uri.port).to eq(nil) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + end + + it "returns nil for #default_port" do + expect(@uri.default_port).to eq(nil) + end + + it "returns nil for #site" do + expect(@uri.site).to eq(nil) + end + + it "returns nil for #normalized_site" do + expect(@uri.normalized_site).to eq(nil) + end + + it "returns '' for #path" do + expect(@uri.path).to eq('') + end + + it "returns '' for #normalized_path" do + expect(@uri.normalized_path).to eq('') + end + + it "returns nil for #query" do + expect(@uri.query).to eq(nil) + end + + it "returns nil for #normalized_query" do + expect(@uri.normalized_query).to eq(nil) + end + + it "returns nil for #fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "returns nil for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq(nil) + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('') + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.parse( + "HTTP://example.com.:%38%30/%70a%74%68?a=%31#1%323" + ).freeze + end + + it "returns 'HTTP' for #scheme" do + expect(@uri.scheme).to eq("HTTP") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + expect(@uri.normalize.scheme).to eq("http") + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns 'example.com.' for #host" do + expect(@uri.host).to eq("example.com.") + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + expect(@uri.normalize.host).to eq("example.com") + end + + it "returns 'example.com.:80' for #authority" do + expect(@uri.authority).to eq("example.com.:80") + end + + it "returns 'example.com:80' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("example.com") + expect(@uri.normalize.authority).to eq("example.com") + end + + it "returns 80 for #port" do + expect(@uri.port).to eq(80) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + expect(@uri.normalize.port).to eq(nil) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'HTTP://example.com.:80' for #site" do + expect(@uri.site).to eq("HTTP://example.com.:80") + end + + it "returns 'http://example.com' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://example.com") + expect(@uri.normalize.site).to eq("http://example.com") + end + + it "returns '/%70a%74%68' for #path" do + expect(@uri.path).to eq("/%70a%74%68") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + expect(@uri.normalize.path).to eq("/path") + end + + it "returns 'a=%31' for #query" do + expect(@uri.query).to eq("a=%31") + end + + it "returns 'a=1' for #normalized_query" do + expect(@uri.normalized_query).to eq("a=1") + expect(@uri.normalize.query).to eq("a=1") + end + + it "returns '/%70a%74%68?a=%31' for #request_uri" do + expect(@uri.request_uri).to eq("/%70a%74%68?a=%31") + end + + it "returns '1%323' for #fragment" do + expect(@uri.fragment).to eq("1%323") + end + + it "returns '123' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("123") + expect(@uri.normalize.fragment).to eq("123") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('HTTP://example.com.:80/%70a%74%68?a=%31#1%323') + expect(@uri.normalize.to_s).to eq('http://example.com/path?a=1#123') + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when created from string components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com" + ) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should be equal to the equivalent parsed URI" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should raise an error if invalid components omitted" do + expect do + @uri.omit(:bogus) + end.to raise_error(ArgumentError) + expect do + @uri.omit(:scheme, :bogus, :path) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a nil host but " + + "non-nil authority components" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :password => "pass", :port => 80) + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both an authority and a user" do + it "should raise an error" do + expect do + Addressable::URI.new( + :user => "user", :authority => "user@example.com:80" + ) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with an authority and no port" do + before do + @uri = Addressable::URI.new(:authority => "user@example.com") + end + + it "should not infer a port" do + expect(@uri.port).to eq(nil) + expect(@uri.default_port).to eq(nil) + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a site value of '//user@example.com'" do + expect(@uri.site).to eq("//user@example.com") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when created with a host with trailing dots" do + before do + @uri = Addressable::URI.new(:authority => "example...") + end + + it "should have a stable normalized form" do + expect(@uri.normalize.normalize.normalize.host).to eq( + @uri.normalize.host + ) + end +end + +describe Addressable::URI, "when created with a host with a backslash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example\\example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a slash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example/example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a space" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both a userinfo and a user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :userinfo => "user:pass") + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but a host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com", :path => "path" + ) + end + + it "should prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/path")) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should have an origin of 'http://example.com" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but no host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :path => "path" + ) + end + + it "should not prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http:path")) + end + + it "should have a site value of 'http:'" do + expect(@uri.site).to eq("http:") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from an Addressable::URI object" do + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end +end + +describe Addressable::URI, "when parsed from something that looks " + + "like a URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(Fake::URI::HTTP.new("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a standard library URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(URI.parse("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from ''" do + before do + @uri = Addressable::URI.parse("") + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ftp://ftp.is.co.za/rfc/rfc1808.txt'" do + before do + @uri = Addressable::URI.parse("ftp://ftp.is.co.za/rfc/rfc1808.txt") + end + + it "should use the 'ftp' scheme" do + expect(@uri.scheme).to eq("ftp") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'ftp.is.co.za'" do + expect(@uri.host).to eq("ftp.is.co.za") + end + + it "should have inferred_port of 21" do + expect(@uri.inferred_port).to eq(21) + end + + it "should have a path of '/rfc/rfc1808.txt'" do + expect(@uri.path).to eq("/rfc/rfc1808.txt") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'ftp://ftp.is.co.za'" do + expect(@uri.origin).to eq('ftp://ftp.is.co.za') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'http://www.ietf.org/rfc/rfc2396.txt'" do + before do + @uri = Addressable::URI.parse("http://www.ietf.org/rfc/rfc2396.txt") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'www.ietf.org'" do + expect(@uri.host).to eq("www.ietf.org") + end + + it "should have inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/rfc/rfc2396.txt'" do + expect(@uri.path).to eq("/rfc/rfc2396.txt") + end + + it "should have a request URI of '/rfc/rfc2396.txt'" do + expect(@uri.request_uri).to eq("/rfc/rfc2396.txt") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme).to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + expect(@uri.omit(:path).to_s).to eq("http://www.ietf.org") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme) + expect(@uri.to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + end + + it "should have an origin of 'http://www.ietf.org'" do + expect(@uri.origin).to eq('http://www.ietf.org') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ldap://[2001:db8::7]/c=GB?objectClass?one'" do + before do + @uri = Addressable::URI.parse("ldap://[2001:db8::7]/c=GB?objectClass?one") + end + + it "should use the 'ldap' scheme" do + expect(@uri.scheme).to eq("ldap") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of '[2001:db8::7]'" do + expect(@uri.host).to eq("[2001:db8::7]") + end + + it "should have inferred_port of 389" do + expect(@uri.inferred_port).to eq(389) + end + + it "should have a path of '/c=GB'" do + expect(@uri.path).to eq("/c=GB") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should not allow request URI assignment" do + expect do + @uri.request_uri = "/" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have a query of 'objectClass?one'" do + expect(@uri.query).to eq("objectClass?one") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme, :authority).to_s).to eq("/c=GB?objectClass?one") + expect(@uri.omit(:path).to_s).to eq("ldap://[2001:db8::7]?objectClass?one") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme, :authority) + expect(@uri.to_s).to eq("/c=GB?objectClass?one") + end + + it "should raise an error if omission would create an invalid URI" do + expect do + @uri.omit(:authority, :path) + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have an origin of 'ldap://[2001:db8::7]'" do + expect(@uri.origin).to eq('ldap://[2001:db8::7]') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'mailto:John.Doe@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:John.Doe@example.com") + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of 'John.Doe@example.com'" do + expect(@uri.path).to eq("John.Doe@example.com") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 2 of RFC 6068 +describe Addressable::URI, "when parsed from " + + "'mailto:?to=addr1@an.example,addr2@an.example'" do + before do + @uri = Addressable::URI.parse( + "mailto:?to=addr1@an.example,addr2@an.example" + ) + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should have the To: field value parameterized" do + expect(@uri.query_values(Hash)["to"]).to eq( + "addr1@an.example,addr2@an.example" + ) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'news:comp.infosystems.www.servers.unix'" do + before do + @uri = Addressable::URI.parse("news:comp.infosystems.www.servers.unix") + end + + it "should use the 'news' scheme" do + expect(@uri.scheme).to eq("news") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'comp.infosystems.www.servers.unix'" do + expect(@uri.path).to eq("comp.infosystems.www.servers.unix") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'tel:+1-816-555-1212'" do + before do + @uri = Addressable::URI.parse("tel:+1-816-555-1212") + end + + it "should use the 'tel' scheme" do + expect(@uri.scheme).to eq("tel") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of '+1-816-555-1212'" do + expect(@uri.path).to eq("+1-816-555-1212") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'telnet://192.0.2.16:80/'" do + before do + @uri = Addressable::URI.parse("telnet://192.0.2.16:80/") + end + + it "should use the 'telnet' scheme" do + expect(@uri.scheme).to eq("telnet") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of 80" do + expect(@uri.port).to eq(80) + end + + it "should have a inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a default_port of 23" do + expect(@uri.default_port).to eq(23) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'telnet://192.0.2.16:80'" do + expect(@uri.origin).to eq('telnet://192.0.2.16:80') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'urn:oasis:names:specification:docbook:dtd:xml:4.1.2'" do + before do + @uri = Addressable::URI.parse( + "urn:oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should use the 'urn' scheme" do + expect(@uri.scheme).to eq("urn") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'oasis:names:specification:docbook:dtd:xml:4.1.2'" do + expect(@uri.path).to eq("oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when heuristically parsed from " + + "'192.0.2.16:8000/path'" do + before do + @uri = Addressable::URI.heuristic_parse("192.0.2.16:8000/path") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of '8000'" do + expect(@uri.port).to eq(8000) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/path'" do + expect(@uri.path).to eq("/path") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'http://192.0.2.16:8000'" do + expect(@uri.origin).to eq('http://192.0.2.16:8000') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com'" do + before do + @uri = Addressable::URI.parse("http://example.com") + end + + it "when inspected, should have the correct URI" do + expect(@uri.inspect).to include("http://example.com") + end + + it "when inspected, should have the correct class name" do + expect(@uri.inspect).to include("Addressable::URI") + end + + it "when inspected, should have the correct object id" do + expect(@uri.inspect).to include("%#0x" % @uri.object_id) + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should be considered ip-based" do + expect(@uri).to be_ip_based + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not have a specified port" do + expect(@uri.port).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + expect(@uri.query_values).to eq(nil) + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should not be exactly equal to 42" do + expect(@uri.eql?(42)).to eq(false) + end + + it "should not be equal to 42" do + expect(@uri == 42).to eq(false) + end + + it "should not be roughly equal to 42" do + expect(@uri === 42).to eq(false) + end + + it "should be exactly equal to http://example.com" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com") + expect(@uri.join!(@uri).to_s).to eq("http://example.com") + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://EXAMPLE.com after assignment" do + @uri.origin = "http://EXAMPLE.com" + expect(@uri.hash).to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should have a different hash from http://EXAMPLE.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should not allow origin assignment without scheme" do + expect do + @uri.origin = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment without host" do + expect do + @uri.origin = "http://" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment with bogus type" do + expect do + @uri.origin = :bogus + end.to raise_error(TypeError) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equivalent to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should have a route of '/path/' to 'http://example.com/path/'" do + expect(@uri.route_to("http://example.com/path/")).to eq( + Addressable::URI.parse("/path/") + ) + end + + it "should have a route of '..' from 'http://example.com/path/'" do + expect(@uri.route_from("http://example.com/path/")).to eq( + Addressable::URI.parse("..") + ) + end + + it "should have a route of '#' to 'http://example.com/'" do + expect(@uri.route_to("http://example.com/")).to eq( + Addressable::URI.parse("#") + ) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "when joined with 'relative/path' should be " + + "'http://example.com/relative/path'" do + expect(@uri.join('relative/path')).to eq( + Addressable::URI.parse("http://example.com/relative/path") + ) + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + @uri.join(42) + end.to raise_error(TypeError) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct username after assignment" do + @uri.user = "user@123!" + expect(@uri.user).to eq("user@123!") + expect(@uri.normalized_user).to eq("user%40123%21") + expect(@uri.password).to eq(nil) + expect(@uri.normalize.to_s).to eq("http://user%40123%21@example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "#secret@123!" + expect(@uri.password).to eq("#secret@123!") + expect(@uri.normalized_password).to eq("%23secret%40123%21") + expect(@uri.user).to eq("") + expect(@uri.normalize.to_s).to eq("http://:%23secret%40123%21@example.com/") + expect(@uri.omit(:password).to_s).to eq("http://example.com") + end + + it "should have the correct user/pass after repeated assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "" + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.user = nil + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct user/pass after userinfo assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +# Section 5.1.2 of RFC 2616 +describe Addressable::URI, "when parsed from " + + "'HTTP://www.w3.org/pub/WWW/TheProject.html'" do + before do + @uri = Addressable::URI.parse("HTTP://www.w3.org/pub/WWW/TheProject.html") + end + + it "should have the correct request URI" do + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/pub/WWW/TheProject.html?" + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html?") + expect(@uri.path).to eq("/pub/WWW/TheProject.html") + expect(@uri.query).to eq("") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html" + expect(@uri.request_uri).to eq("/some/where/else.html") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq(nil) + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html?query?string" + expect(@uri.request_uri).to eq("/some/where/else.html?query?string") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq("query?string") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "?x=y" + expect(@uri.request_uri).to eq("/?x=y") + expect(@uri.path).to eq("/") + expect(@uri.query).to eq("x=y") + end + + it "should raise an error if the site value is set to something bogus" do + expect do + @uri.site = 42 + end.to raise_error(TypeError) + end + + it "should raise an error if the request URI is set to something bogus" do + expect do + @uri.request_uri = 42 + end.to raise_error(TypeError) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "www.w3.org", + :port => nil, + :path => "/pub/WWW/TheProject.html", + :query => nil, + :fragment => nil + }) + end + + it "should have an origin of 'http://www.w3.org'" do + expect(@uri.origin).to eq('http://www.w3.org') + end +end + +describe Addressable::URI, "when parsing IPv6 addresses" do + it "should not raise an error for " + + "'http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[::1]/'" do + Addressable::URI.parse("http://[::1]/") + end + + it "should not raise an error for " + + "'http://[fe80::1]/'" do + Addressable::URI.parse("http://[fe80::1]/") + end + + it "should raise an error for " + + "'http://[]/'" do + expect do + Addressable::URI.parse("http://[]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPv6 address" do + subject { Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") } + its(:host) { should == '[3ffe:1900:4545:3:200:f8ff:fe21:67cf]' } + its(:hostname) { should == '3ffe:1900:4545:3:200:f8ff:fe21:67cf' } +end + +describe Addressable::URI, "when assigning IPv6 address" do + it "should allow to set bare IPv6 address as hostname" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should allow to set bare IPv6 address as hostname with IPAddr object" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = IPAddr.new('3ffe:1900:4545:3:200:f8ff:fe21:67cf') + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should not allow to set bare IPv6 address as host" do + uri = Addressable::URI.parse("http://[::1]/") + skip "not checked" + expect do + uri.host = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPvFuture addresses" do + it "should not raise an error for " + + "'http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[v12.fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v12.fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[va0.::1]/'" do + Addressable::URI.parse("http://[va0.::1]/") + end + + it "should not raise an error for " + + "'http://[v255.fe80::1]/'" do + Addressable::URI.parse("http://[v255.fe80::1]/") + end + + it "should raise an error for " + + "'http://[v0.]/'" do + expect do + Addressable::URI.parse("http://[v0.]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/'" do + before do + @uri = Addressable::URI.parse("http://example.com/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to HTTP://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("HTTP://example.com/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://Example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://Example.com/")) + end + + it "should have the correct username after assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have the same hash as its duplicate" do + expect(@uri.hash).to eq(@uri.dup.hash) + end + + it "should have a different hash from its equivalent String value" do + expect(@uri.hash).not_to eq(@uri.to_s.hash) + end + + it "should have the same hash as an equal URI" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com/" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should have the same hash as http://example.com after assignment" do + @uri.path = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query_values = {} + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/# after assignment" do + @uri.fragment = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/#").hash) + end + + it "should have a different hash from http://example.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com?#'" do + before do + @uri = Addressable::URI.parse("http://example.com?#") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => "", + :fragment => "" + }) + end + + it "should have a request URI of '/?'" do + expect(@uri.request_uri).to eq("/?") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq("http://example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://@example.com/'" do + before do + @uri = Addressable::URI.parse("http://@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com./'" do + before do + @uri = Addressable::URI.parse("http://example.com./") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com/'" do + before do + @uri = Addressable::URI.parse("http://:@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => "", + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'HTTP://EXAMPLE.COM/'" do + before do + @uri = Addressable::URI.parse("HTTP://EXAMPLE.COM/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "EXAMPLE.COM", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.co.uk/'" do + before do + @uri = Addressable::URI.parse("http://www.example.co.uk/") + end + + it "should have an origin of 'http://www.example.co.uk'" do + expect(@uri.origin).to eq('http://www.example.co.uk') + end + + it "should have a tld of 'co.uk'" do + expect(@uri.tld).to eq('co.uk') + end + + it "should have a domain of 'example.co.uk'" do + expect(@uri.domain).to eq('example.co.uk') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://sub_domain.blogspot.com/'" do + before do + @uri = Addressable::URI.parse("http://sub_domain.blogspot.com/") + end + + it "should have an origin of 'http://sub_domain.blogspot.com'" do + expect(@uri.origin).to eq('http://sub_domain.blogspot.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end + + it "should have a domain of 'blogspot.com'" do + expect(@uri.domain).to eq('blogspot.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/~smith/'" do + before do + @uri = Addressable::URI.parse("http://example.com/~smith/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7Esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7Esmith/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7esmith/")) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%E8'" do + before do + @uri = Addressable::URI.parse("http://example.com/%E8") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%E8" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%E8" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path%2Fsegment/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path%2Fsegment/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be equal to 'http://example.com/path%2Fsegment/'" do + expect(@uri.normalize).to be_eql( + Addressable::URI.parse("http://example.com/path%2Fsegment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri).not_to eq( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri.normalize).not_to be_eql( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/?%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/?%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/?%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/#%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/#%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/#%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/#%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%C3%87'" do + before do + @uri = Addressable::URI.parse("http://example.com/%C3%87") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to 'http://example.com/C%CC%A7'" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/C%CC%A7")) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%C3%87" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%C3%87" + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.normalized_encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri).to_s).to eq( + "http://example.com/%25C3%2587" + ) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri, Addressable::URI)).to eq( + Addressable::URI.parse("http://example.com/%25C3%2587") + ) + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=string'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=string") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have a query string of 'q=string'" do + expect(@uri.query).to eq("q=string") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:80/'" do + before do + @uri = Addressable::URI.parse("http://example.com:80/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:80'" do + expect(@uri.authority).to eq("example.com:80") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have explicit port 80" do + expect(@uri.port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:80/" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com:80/"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com:80/") + expect(@uri.join!(@uri).to_s).to eq("http://example.com:80/") + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equal to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 80, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:80/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:80/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:8080/'" do + before do + @uri = Addressable::URI.parse("http://example.com:8080/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:8080'" do + expect(@uri.authority).to eq("example.com:8080") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 8080" do + expect(@uri.inferred_port).to eq(8080) + end + + it "should have explicit port 8080" do + expect(@uri.port).to eq(8080) + end + + it "should have default port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com:8080/"))).to eq(true) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of '../../' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 8080, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com:8080'" do + expect(@uri.origin).to eq('http://example.com:8080') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:8080/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:8080/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:%38%30/'" do + before do + @uri = Addressable::URI.parse("http://example.com:%38%30/") + end + + it "should have the correct port" do + expect(@uri.port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%2E/'" do + before do + @uri = Addressable::URI.parse("http://example.com/%2E/") + end + + it "should be considered to be in normal form" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + @uri.normalize.should be_eql(@uri) + end + + it "should normalize to 'http://example.com/%2E/'" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + expect(@uri.normalize).to eq("http://example.com/%2E/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/../..'" do + before do + @uri = Addressable::URI.parse("http://example.com/../..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'/..//example.com'" do + before do + @uri = Addressable::URI.parse("/..//example.com") + end + + it "should become invalid when normalized" do + expect do + @uri.normalize + end.to raise_error(Addressable::URI::InvalidURIError, /authority/) + end + + it "should have a path of '/..//example.com'" do + expect(@uri.path).to eq("/..//example.com") + end +end + +describe Addressable::URI, "when parsed from '/a/b/c/./../../g'" do + before do + @uri = Addressable::URI.parse("/a/b/c/./../../g") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to '/a/g'" do + expect(@uri.normalize.to_s).to eq("/a/g") + end +end + +describe Addressable::URI, "when parsed from 'mid/content=5/../6'" do + before do + @uri = Addressable::URI.parse("mid/content=5/../6") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to 'mid/6'" do + expect(@uri.normalize.to_s).to eq("mid/6") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.com///../'" do + before do + @uri = Addressable::URI.parse('http://www.example.com///../') + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://www.example.com//'" do + expect(@uri.normalize.to_s).to eq("http://www.example.com//") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path/to/resource/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path/to/resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource/'" do + expect(@uri.path).to eq("/path/to/resource/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com/path/to/resource/"))).to eq(true) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of '../' from " + + "'http://example.com/path/to/resource/sub'" do + expect(@uri.route_from("http://example.com/path/to/resource/sub")).to eq( + Addressable::URI.parse("../") + ) + end + + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/another'" do + expect(@uri.route_from("http://example.com/path/to/another")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/res'" do + expect(@uri.route_from("http://example.com/path/to/res")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of '../../path/to/resource/' from " + + "'http://example.com/to/resource/'" do + expect(@uri.route_from("http://example.com/to/resource/")).to eq( + Addressable::URI.parse("../../path/to/resource/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/path/to/resource/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative/path/to/resource'" do + before do + @uri = Addressable::URI.parse("relative/path/to/resource") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative/path/to/resource'" do + expect(@uri.path).to eq("relative/path/to/resource") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + end + + it "when joined with 'another/relative/path' should be " + + "'relative/path/to/another/relative/path'" do + expect(@uri.join('another/relative/path')).to eq( + Addressable::URI.parse("relative/path/to/another/relative/path") + ) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative_path_with_no_slashes'" do + before do + @uri = Addressable::URI.parse("relative_path_with_no_slashes") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative_path_with_no_slashes'" do + expect(@uri.path).to eq("relative_path_with_no_slashes") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "when joined with 'another_relative_path' should be " + + "'another_relative_path'" do + expect(@uri.join('another_relative_path')).to eq( + Addressable::URI.parse("another_relative_path") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt'" do + expect(@uri.path).to eq("/file.txt") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;parameter'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;parameter") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;parameter'" do + expect(@uri.path).to eq("/file.txt;parameter") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;x=y'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;x=y") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;x=y'" do + expect(@uri.path).to eq("/file.txt;x=y") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'svn+ssh://developername@rubyforge.org/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "svn+ssh://developername@rubyforge.org/var/svn/project" + ) + end + + it "should have a scheme of 'svn+ssh'" do + expect(@uri.scheme).to eq("svn+ssh") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'ssh+svn://developername@RUBYFORGE.ORG/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "ssh+svn://developername@RUBYFORGE.ORG/var/svn/project" + ) + end + + it "should have a scheme of 'ssh+svn'" do + expect(@uri.scheme).to eq("ssh+svn") + end + + it "should have a normalized scheme of 'svn+ssh'" do + expect(@uri.normalized_scheme).to eq("svn+ssh") + end + + it "should have a normalized site of 'svn+ssh'" do + expect(@uri.normalized_site).to eq("svn+ssh://developername@rubyforge.org") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'mailto:user@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:user@example.com") + end + + it "should have a scheme of 'mailto'" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'user@example.com'" do + expect(@uri.path).to eq("user@example.com") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'tag:example.com,2006-08-18:/path/to/something'" do + before do + @uri = Addressable::URI.parse( + "tag:example.com,2006-08-18:/path/to/something") + end + + it "should have a scheme of 'tag'" do + expect(@uri.scheme).to eq("tag") + end + + it "should be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'example.com,2006-08-18:/path/to/something'" do + expect(@uri.path).to eq("example.com,2006-08-18:/path/to/something") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/x;y/'" do + before do + @uri = Addressable::URI.parse("http://example.com/x;y/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?x=1&y=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?x=1&y=2") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'view-source:http://example.com/'" do + before do + @uri = Addressable::URI.parse("view-source:http://example.com/") + end + + it "should have a scheme of 'view-source'" do + expect(@uri.scheme).to eq("view-source") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + before do + @uri = Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'user:pass@example.com'" do + expect(@uri.authority).to eq("user:pass@example.com") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource'" do + expect(@uri.path).to eq("/path/to/resource") + end + + it "should have a query string of 'query=x'" do + expect(@uri.query).to eq("query=x") + end + + it "should have a fragment of 'fragment'" do + expect(@uri.fragment).to eq("fragment") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a route of '../../' to " + + "'http://user:pass@example.com/path/'" do + expect(@uri.route_to("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'to/resource?query=x#fragment' " + + "from 'http://user:pass@example.com/path/'" do + expect(@uri.route_from("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("to/resource?query=x#fragment") + ) + end + + it "should have a route of '?query=x#fragment' " + + "from 'http://user:pass@example.com/path/to/resource'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/resource")).to eq( + Addressable::URI.parse("?query=x#fragment") + ) + end + + it "should have a route of '#fragment' " + + "from 'http://user:pass@example.com/path/to/resource?query=x'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x")).to eq( + Addressable::URI.parse("#fragment") + ) + end + + it "should have a route of '#fragment' from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + )).to eq(Addressable::URI.parse("#fragment")) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "should have a route of " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment' " + + "from 'http://example.com/path/to/'" do + expect(@uri.route_from("http://elsewhere.com/path/to/")).to eq( + Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + ) + end + + it "should have the correct scheme after assignment" do + @uri.scheme = "ftp" + expect(@uri.scheme).to eq("ftp") + expect(@uri.to_s).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + expect(@uri.to_str).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct site segment after assignment" do + @uri.site = "https://newuser:newpass@example.com:443" + expect(@uri.scheme).to eq("https") + expect(@uri.authority).to eq("newuser:newpass@example.com:443") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(443) + expect(@uri.inferred_port).to eq(443) + expect(@uri.to_s).to eq( + "https://newuser:newpass@example.com:443" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:newpass@example.com:80" + expect(@uri.authority).to eq("newuser:newpass@example.com:80") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(80) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com:80" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.authority).to eq("newuser:newpass@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.authority).to eq("newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.authority).to eq("user:newpass@example.com") + end + + it "should have the correct host after assignment" do + @uri.host = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should have the correct host after assignment" do + @uri.hostname = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.hostname).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should raise an error if assigning a bogus object to the hostname" do + expect do + @uri.hostname = Object.new + end.to raise_error(TypeError) + end + + it "should have the correct port after assignment" do + @uri.port = 8080 + expect(@uri.port).to eq(8080) + expect(@uri.authority).to eq("user:pass@example.com:8080") + end + + it "should have the correct origin after assignment" do + @uri.origin = "http://newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("newexample.com") + end + + it "should have the correct path after assignment" do + @uri.path = "/newpath/to/resource" + expect(@uri.path).to eq("/newpath/to/resource") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/newpath/to/resource?query=x#fragment" + ) + end + + it "should have the correct scheme and authority after nil assignment" do + @uri.site = nil + expect(@uri.scheme).to eq(nil) + expect(@uri.authority).to eq(nil) + expect(@uri.to_s).to eq("/path/to/resource?query=x#fragment") + end + + it "should have the correct scheme and authority after assignment" do + @uri.site = "file://" + expect(@uri.scheme).to eq("file") + expect(@uri.authority).to eq("") + expect(@uri.to_s).to eq("file:///path/to/resource?query=x#fragment") + end + + it "should have the correct path after nil assignment" do + @uri.path = nil + expect(@uri.path).to eq("") + expect(@uri.to_s).to eq( + "http://user:pass@example.com?query=x#fragment" + ) + end + + it "should have the correct query string after assignment" do + @uri.query = "newquery=x" + expect(@uri.query).to eq("newquery=x") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?newquery=x#fragment" + ) + @uri.query = nil + expect(@uri.query).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource#fragment" + ) + end + + it "should have the correct query string after hash assignment" do + @uri.query_values = {"?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther"} + expect(@uri.query.split("&")).to include("%3Fuestion%20mark=%3Dsign") + expect(@uri.query.split("&")).to include("hello=g%C3%BCnther") + expect(@uri.query_values).to eq({ + "?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther" + }) + end + + it "should have the correct query string after flag hash assignment" do + @uri.query_values = {'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil} + expect(@uri.query.split("&")).to include("flag%3F1") + expect(@uri.query.split("&")).to include("fl%3Dag2") + expect(@uri.query.split("&")).to include("flag3") + expect(@uri.query_values(Array).sort).to eq([["fl=ag2"], ["flag3"], ["flag?1"]]) + expect(@uri.query_values(Hash)).to eq({ + 'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil + }) + end + + it "should raise an error if query values are set to a bogus type" do + expect do + @uri.query_values = "bogus" + end.to raise_error(TypeError) + end + + it "should have the correct fragment after assignment" do + @uri.fragment = "newfragment" + expect(@uri.fragment).to eq("newfragment") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + + @uri.fragment = nil + expect(@uri.fragment).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => "newfragment").to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => nil).to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => "newuser:newpass").to_s).to eq( + "http://newuser:newpass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => nil).to_s).to eq( + "http://example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:path => "newpath").to_s).to eq( + "http://user:pass@example.com/newpath?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:port => "42", :path => "newpath", :query => "").to_s).to eq( + "http://user:pass@example.com:42/newpath?#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:authority => "foo:bar@baz:42").to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + # Ensure the operation was not destructive + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a destructive merge" do + @uri.merge!(:authority => "foo:bar@baz:42") + # Ensure the operation was destructive + expect(@uri.to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:authority => "bar@baz:bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge(42) + end.to raise_error(TypeError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge("http://example.com/") + end.to raise_error(TypeError) + end + + it "should fail to merge with both authority and subcomponents" do + expect do + @uri.merge(:authority => "foo:bar@baz:42", :port => "42") + end.to raise_error(ArgumentError) + end + + it "should fail to merge with both userinfo and subcomponents" do + expect do + @uri.merge(:userinfo => "foo:bar", :user => "foo") + end.to raise_error(ArgumentError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/search?q=Q%26A'" do + + before do + @uri = Addressable::URI.parse("http://example.com/search?q=Q%26A") + end + + it "should have a query of 'q=Q%26A'" do + expect(@uri.query).to eq("q=Q%26A") + end + + it "should have query_values of {'q' => 'Q&A'}" do + expect(@uri.query_values).to eq({ 'q' => 'Q&A' }) + end + + it "should normalize to the original uri " + + "(with the ampersand properly percent-encoded)" do + expect(@uri.normalize.to_s).to eq("http://example.com/search?q=Q%26A") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&x=b") + end + + it "should have a query of '&x=b'" do + expect(@uri.query).to eq("&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='one;two'&x=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q='one;two'&x=1") + end + + it "should have a query of 'q='one;two'&x=1'" do + expect(@uri.query).to eq("q='one;two'&x=1") + end + + it "should have query_values of {\"q\" => \"'one;two'\", \"x\" => \"1\"}" do + expect(@uri.query_values).to eq({"q" => "'one;two'", "x" => "1"}) + end + + it "should escape the ';' character when normalizing to avoid ambiguity " + + "with the W3C HTML 4.01 specification" do + # HTML 4.01 Section B.2.2 + expect(@uri.normalize.query).to eq("q='one%3Btwo'&x=1") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&&x=b") + end + + it "should have a query of '&&x=b'" do + expect(@uri.query).to eq("&&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a&&x=b") + end + + it "should have a query of 'q=a&&x=b'" do + expect(@uri.query).to eq("q=a&&x=b") + end + + it "should have query_values of {'q' => 'a, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => 'a', 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q&&x=b") + end + + it "should have a query of 'q&&x=b'" do + expect(@uri.query).to eq("q&&x=b") + end + + it "should have query_values of {'q' => true, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => nil, 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a+b") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq({'q' => 'a b'}) + end + + it "should have a normalized query of 'q=a+b'" do + expect(@uri.normalized_query).to eq("q=a+b") + end +end + +describe Addressable::URI, "when parsed from 'https://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("https://example.com/?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'example.com?q=a+b'" do + before do + @uri = Addressable::URI.parse("example.com?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'mailto:?q=a+b'" do + before do + @uri = Addressable::URI.parse("mailto:?q=a+b") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq("q" => "a+b") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a%2bb'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a%2bb") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a%2bb") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq({'q' => 'a+b'}) + end + + it "should have a normalized query of 'q=a%2Bb'" do + expect(@uri.normalized_query).to eq("q=a%2Bb") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=%2B&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=%2B&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=+&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=+&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?b=1&a=2&c=3'" do + before do + @uri = Addressable::URI.parse("http://example/?b=1&a=2&c=3") + end + + it "should have a sorted normalized query of 'a=2&b=1&c=3'" do + expect(@uri.normalized_query(:sorted)).to eq("a=2&b=1&c=3") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?&a&&c&'" do + before do + @uri = Addressable::URI.parse("http://example/?&a&&c&") + end + + it "should have a compacted normalized query of 'a&c'" do + expect(@uri.normalized_query(:compacted)).to eq("a&c") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=1") + end + + it "should have a compacted normalized query of 'a=1'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=2") + end + + it "should have a compacted normalized query of 'a=1&a=2'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1&a=2") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/sound%2bvision'" do + before do + @uri = Addressable::URI.parse("http://example.com/sound%2bvision") + end + + it "should have a normalized path of '/sound+vision'" do + expect(@uri.normalized_path).to eq('/sound+vision') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=") + end + + it "should have a query of 'q='" do + expect(@uri.query).to eq("q=") + end + + it "should have query_values of {'q' => ''}" do + expect(@uri.query_values).to eq({'q' => ''}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user@example.com'" do + before do + @uri = Addressable::URI.parse("http://user@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should have a userinfo of 'user'" do + expect(@uri.userinfo).to eq("user") + end + + it "should have a normalized userinfo of 'user'" do + expect(@uri.normalized_userinfo).to eq("user") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have default_port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:newpass@example.com") + end + + it "should have the correct userinfo segment after nil assignment" do + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser@example.com" + expect(@uri.authority).to eq("newuser@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should raise an error after nil assignment of authority segment" do + expect do + # This would create an invalid URI + @uri.authority = nil + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:@example.com'" do + before do + @uri = Addressable::URI.parse("http://user:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of 'user:'" do + expect(@uri.normalized_userinfo).to eq("user:") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:@example.com" + expect(@uri.authority).to eq("newuser:@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:pass@example.com'" do + before do + @uri = Addressable::URI.parse("http://:pass@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a userinfo of ':pass'" do + expect(@uri.userinfo).to eq(":pass") + end + + it "should have a normalized userinfo of ':pass'" do + expect(@uri.normalized_userinfo).to eq(":pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("pass") + expect(@uri.to_s).to eq("http://newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":newpass@example.com" + expect(@uri.authority).to eq(":newpass@example.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:newpass@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com'" do + before do + @uri = Addressable::URI.parse("http://:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of nil" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":@newexample.com" + expect(@uri.authority).to eq(":@newexample.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("newexample.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:@newexample.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'#example'" do + before do + @uri = Addressable::URI.parse("#example") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of nil" do + expect(@uri.host).to eq(nil) + end + + it "should have a site of nil" do + expect(@uri.site).to eq(nil) + end + + it "should have a normalized_site of nil" do + expect(@uri.normalized_site).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a query string of nil" do + expect(@uri.query).to eq(nil) + end + + it "should have a fragment of 'example'" do + expect(@uri.fragment).to eq("example") + end +end + +describe Addressable::URI, "when parsed from " + + "the network-path reference '//example.com/'" do + before do + @uri = Addressable::URI.parse("//example.com/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'feed://http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed://http://example.com/") + end + + it "should have a host of 'http'" do + expect(@uri.host).to eq("http") + end + + it "should have a path of '//example.com/'" do + expect(@uri.path).to eq("//example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'feed:http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed:http://example.com/") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + expect(@uri.normalize!.to_s).to eq("http://example.com/") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'example://a/b/c/%7Bfoo%7D'" do + before do + @uri = Addressable::URI.parse("example://a/b/c/%7Bfoo%7D") + end + + # Section 6.2.2 of RFC 3986 + it "should be equivalent to eXAMPLE://a/./b/../b/%63/%7bfoo%7d" do + expect(@uri).to eq( + Addressable::URI.parse("eXAMPLE://a/./b/../b/%63/%7bfoo%7d") + ) + end + + it "should have an origin of 'example://a'" do + expect(@uri.origin).to eq('example://a') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/indirect/path/./to/../resource/'" do + before do + @uri = Addressable::URI.parse( + "http://example.com/indirect/path/./to/../resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/indirect/path/./to/../resource/'" do + expect(@uri.path).to eq("/indirect/path/./to/../resource/") + end + + # Section 6.2.2.3 of RFC 3986 + it "should have a normalized path of '/indirect/path/resource/'" do + expect(@uri.normalize.path).to eq("/indirect/path/resource/") + expect(@uri.normalize!.path).to eq("/indirect/path/resource/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://under_score.example.com/'" do + it "should not cause an error" do + expect do + Addressable::URI.parse("http://under_score.example.com/") + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from " + + "'./this:that'" do + before do + @uri = Addressable::URI.parse("./this:that") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'this:that'" do + before do + @uri = Addressable::URI.parse("this:that") + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should have a scheme of 'this'" do + expect(@uri.scheme).to eq("this") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?'" do + before do + @uri = Addressable::URI.parse("?") + end + + it "should normalize to ''" do + expect(@uri.normalize.to_s).to eq("") + end + + it "should have the correct return type" do + expect(@uri.query_values).to eq({}) + expect(@uri.query_values(Hash)).to eq({}) + expect(@uri.query_values(Array)).to eq([]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1&two=2&three=3'" do + before do + @uri = Addressable::URI.parse("?one=1&two=2&three=3") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1", "two" => "2", "three" => "3"}) + end + + it "should raise an error for invalid return type values" do + expect do + @uri.query_values(Integer) + end.to raise_error(ArgumentError) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1"], ["two", "2"], ["three", "3"] + ]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1=uno&two=2=dos'" do + before do + @uri = Addressable::URI.parse("?one=1=uno&two=2=dos") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1=uno", "two" => "2=dos"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1=uno"], ["two", "2=dos"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one[two][three]=four'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one[two][three]" => "four"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one.two.three=four'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three]=four&one[two][five]=six'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four&one[two][five]=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three]" => "four", "one[two][five]" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"], ["one[two][five]", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one.two.three=four&one.two.five=six'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four&one.two.five=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four", "one.two.five" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"], ["one.two.five", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one=two&one=three'" do + before do + @uri = Addressable::URI.parse( + "?one=two&one=three&one=four" + ) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq( + [['one', 'two'], ['one', 'three'], ['one', 'four']] + ) + end + + it "should have correct hash query values" do + skip("This is probably more desirable behavior.") + expect(@uri.query_values(Hash)).to eq( + {'one' => ['two', 'three', 'four']} + ) + end + + it "should handle assignment with keys of mixed type" do + @uri.query_values = @uri.query_values(Hash).merge({:one => 'three'}) + expect(@uri.query_values(Hash)).to eq({'one' => 'three'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][]=four&one[two][three][]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][]=four&one[two][three][]=five" + ) + end + + it "should have correct query values" do + expect(@uri.query_values(Hash)).to eq({"one[two][three][]" => "five"}) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three][]", "four"], ["one[two][three][]", "five"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][0]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][0]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][0]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][1]=four&one[two][three][0]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][1]=four&one[two][three][0]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][1]" => "four", "one[two][three][0]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][2]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][2]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][2]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/") + end + + it "should be equivalent to 'http://www.xn--8ws00zhy3a.com/'" do + expect(@uri).to eq( + Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.詹姆斯.com/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/ some spaces /'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/ some spaces /") + end + + it "should be equivalent to " + + "'http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/'" do + expect(@uri).to eq( + Addressable::URI.parse( + "http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.詹姆斯.com/%20some%20spaces%20/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.xn--8ws00zhy3a.com/'" do + before do + @uri = Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + end + + it "should be displayed as http://www.詹姆斯.com/" do + expect(@uri.display_uri.to_s).to eq("http://www.詹姆斯.com/") + end + + it "should properly force the encoding" do + display_string = @uri.display_uri.to_str + expect(display_string).to eq("http://www.詹姆斯.com/") + if display_string.respond_to?(:encoding) + expect(display_string.encoding.to_s).to eq(Encoding::UTF_8.to_s) + end + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.詹姆斯.com/atomtests/iri/詹.html'" do + before do + @uri = Addressable::URI.parse("http://www.詹姆斯.com/atomtests/iri/詹.html") + end + + it "should normalize to " + + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + end +end + +describe Addressable::URI, "when parsed from a percent-encoded IRI" do + before do + @uri = Addressable::URI.parse( + "http://www.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA" + + "%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3" + + "%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82" + + "%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0" + + "%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3" + + "%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp" + ) + end + + it "should normalize to something sane" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + end + + it "should have the correct origin" do + expect(@uri.origin).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end +end + +describe Addressable::URI, "with a base uri of 'http://a/b/c/d;p?q'" do + before do + @uri = Addressable::URI.parse("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g:h' should resolve to g:h" do + expect((@uri + "g:h").to_s).to eq("g:h") + expect(Addressable::URI.join(@uri, "g:h").to_s).to eq("g:h") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g' should resolve to http://a/b/c/g" do + expect((@uri + "g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './g' should resolve to http://a/b/c/g" do + expect((@uri + "./g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "./g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g/' should resolve to http://a/b/c/g/" do + expect((@uri + "g/").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "g/").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '/g' should resolve to http://a/g" do + expect((@uri + "/g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/g").to_s).to eq("http://a/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '//g' should resolve to http://g" do + expect((@uri + "//g").to_s).to eq("http://g") + expect(Addressable::URI.join(@uri.to_s, "//g").to_s).to eq("http://g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '?y' should resolve to http://a/b/c/d;p?y" do + expect((@uri + "?y").to_s).to eq("http://a/b/c/d;p?y") + expect(Addressable::URI.join(@uri.to_s, "?y").to_s).to eq("http://a/b/c/d;p?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y' should resolve to http://a/b/c/g?y" do + expect((@uri + "g?y").to_s).to eq("http://a/b/c/g?y") + expect(Addressable::URI.join(@uri.to_s, "g?y").to_s).to eq("http://a/b/c/g?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '#s' should resolve to http://a/b/c/d;p?q#s" do + expect((@uri + "#s").to_s).to eq("http://a/b/c/d;p?q#s") + expect(Addressable::URI.join(@uri.to_s, "#s").to_s).to eq( + "http://a/b/c/d;p?q#s" + ) + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g#s' should resolve to http://a/b/c/g#s" do + expect((@uri + "g#s").to_s).to eq("http://a/b/c/g#s") + expect(Addressable::URI.join(@uri.to_s, "g#s").to_s).to eq("http://a/b/c/g#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y#s' should resolve to http://a/b/c/g?y#s" do + expect((@uri + "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with ';x' should resolve to http://a/b/c/;x" do + expect((@uri + ";x").to_s).to eq("http://a/b/c/;x") + expect(Addressable::URI.join(@uri.to_s, ";x").to_s).to eq("http://a/b/c/;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x' should resolve to http://a/b/c/g;x" do + expect((@uri + "g;x").to_s).to eq("http://a/b/c/g;x") + expect(Addressable::URI.join(@uri.to_s, "g;x").to_s).to eq("http://a/b/c/g;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x?y#s' should resolve to http://a/b/c/g;x?y#s" do + expect((@uri + "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '' should resolve to http://a/b/c/d;p?q" do + expect((@uri + "").to_s).to eq("http://a/b/c/d;p?q") + expect(Addressable::URI.join(@uri.to_s, "").to_s).to eq("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '.' should resolve to http://a/b/c/" do + expect((@uri + ".").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, ".").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './' should resolve to http://a/b/c/" do + expect((@uri + "./").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, "./").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '..' should resolve to http://a/b/" do + expect((@uri + "..").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "..").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../' should resolve to http://a/b/" do + expect((@uri + "../").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "../").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../g' should resolve to http://a/b/g" do + expect((@uri + "../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../..' should resolve to http://a/" do + expect((@uri + "../..").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../..").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../' should resolve to http://a/" do + expect((@uri + "../../").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../../").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../g' should resolve to http://a/g" do + expect((@uri + "../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../g' should resolve to http://a/g" do + expect((@uri + "../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../../g").to_s).to eq("http://a/g") + end + + it "when joined with '../.././../g' should resolve to http://a/g" do + expect((@uri + "../.././../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../.././../g").to_s).to eq( + "http://a/g" + ) + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../../g' should resolve to http://a/g" do + expect((@uri + "../../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join( + @uri.to_s, "../../../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/./g' should resolve to http://a/g" do + expect((@uri + "/./g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/./g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/../g' should resolve to http://a/g" do + expect((@uri + "/../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g.' should resolve to http://a/b/c/g." do + expect((@uri + "g.").to_s).to eq("http://a/b/c/g.") + expect(Addressable::URI.join(@uri.to_s, "g.").to_s).to eq("http://a/b/c/g.") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '.g' should resolve to http://a/b/c/.g" do + expect((@uri + ".g").to_s).to eq("http://a/b/c/.g") + expect(Addressable::URI.join(@uri.to_s, ".g").to_s).to eq("http://a/b/c/.g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g..' should resolve to http://a/b/c/g.." do + expect((@uri + "g..").to_s).to eq("http://a/b/c/g..") + expect(Addressable::URI.join(@uri.to_s, "g..").to_s).to eq("http://a/b/c/g..") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '..g' should resolve to http://a/b/c/..g" do + expect((@uri + "..g").to_s).to eq("http://a/b/c/..g") + expect(Addressable::URI.join(@uri.to_s, "..g").to_s).to eq("http://a/b/c/..g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './../g' should resolve to http://a/b/g" do + expect((@uri + "./../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "./../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './g/.' should resolve to http://a/b/c/g/" do + expect((@uri + "./g/.").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "./g/.").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/./h' should resolve to http://a/b/c/g/h" do + expect((@uri + "g/./h").to_s).to eq("http://a/b/c/g/h") + expect(Addressable::URI.join(@uri.to_s, "g/./h").to_s).to eq("http://a/b/c/g/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/../h' should resolve to http://a/b/c/h" do + expect((@uri + "g/../h").to_s).to eq("http://a/b/c/h") + expect(Addressable::URI.join(@uri.to_s, "g/../h").to_s).to eq("http://a/b/c/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/./y' " + + "should resolve to http://a/b/c/g;x=1/y" do + expect((@uri + "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/../y' should resolve to http://a/b/c/y" do + expect((@uri + "g;x=1/../y").to_s).to eq("http://a/b/c/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/../y").to_s).to eq("http://a/b/c/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/./x' " + + "should resolve to http://a/b/c/g?y/./x" do + expect((@uri + "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/../x' " + + "should resolve to http://a/b/c/g?y/../x" do + expect((@uri + "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/./x' " + + "should resolve to http://a/b/c/g#s/./x" do + expect((@uri + "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/../x' " + + "should resolve to http://a/b/c/g#s/../x" do + expect((@uri + "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'http:g' should resolve to http:g" do + expect((@uri + "http:g").to_s).to eq("http:g") + expect(Addressable::URI.join(@uri.to_s, "http:g").to_s).to eq("http:g") + end + + # Edge case to be sure + it "when joined with '//example.com/' should " + + "resolve to http://example.com/" do + expect((@uri + "//example.com/").to_s).to eq("http://example.com/") + expect(Addressable::URI.join( + @uri.to_s, "//example.com/").to_s).to eq("http://example.com/") + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + Addressable::URI.join(@uri, 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when converting the path " + + "'relative/path/to/something'" do + before do + @path = 'relative/path/to/something' + end + + it "should convert to " + + "\'relative/path/to/something\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("relative/path/to/something") + end + + it "should join with an absolute file path correctly" do + @base = Addressable::URI.convert_path("/absolute/path/") + @uri = Addressable::URI.convert_path(@path) + expect((@base + @uri).to_str).to eq( + "file:///absolute/path/relative/path/to/something" + ) + end +end + +describe Addressable::URI, "when converting a bogus path" do + it "should raise a TypeError" do + expect do + Addressable::URI.convert_path(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given a UNIX root directory" do + before do + @path = "/" + end + + it "should convert to \'file:///\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given a Windows root directory" do + before do + @path = "C:\\" + end + + it "should convert to \'file:///c:/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path '/one/two/'" do + before do + @path = '/one/two/' + end + + it "should convert to " + + "\'file:///one/two/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///one/two/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the tld " do + it "'uk' should have a tld of 'uk'" do + uri = Addressable::URI.parse("http://example.com") + uri.tld = "uk" + + expect(uri.tld).to eq("uk") + end + + context "which " do + let (:uri) { Addressable::URI.parse("http://www.comrade.net/path/to/source/") } + + it "contains a subdomain" do + uri.tld = "co.uk" + + expect(uri.to_s).to eq("http://www.comrade.co.uk/path/to/source/") + end + + it "is part of the domain" do + uri.tld = "com" + + expect(uri.to_s).to eq("http://www.comrade.com/path/to/source/") + end + end +end + +describe Addressable::URI, "when given the path " + + "'c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file://c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file://c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:/c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:/c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:///c|/windows/My%20Documents%20100%20/foo.txt'" do + before do + @path = "file:///c|/windows/My%20Documents%20100%20/foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given an http protocol URI" do + before do + @path = "http://example.com/" + end + + it "should not do any conversion at all" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("http://example.com/") + end +end + +class SuperString + def initialize(string) + @string = string.to_s + end + + def to_str + return @string + end +end + +describe Addressable::URI, "when parsing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.parse(42) + end.to raise_error(TypeError) + end + + it "should correctly parse heuristically anything with a 'to_str' method" do + Addressable::URI.heuristic_parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.heuristic_parse(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when form encoding a hash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["&one", "/1"], ["=two", "?2"], [":three", "#3"]] + )).to eq("%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => "one two three"} + )).to eq("q=one+two+three") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"key" => nil} + )).to eq("key=") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => ["one", "two", "three"]} + )).to eq("q=one&q=two&q=three") + end + + it "should result in correctly encoded newlines" do + expect(Addressable::URI.form_encode( + {"text" => "one\ntwo\rthree\r\nfour\n\r"} + )).to eq("text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A") + end + + it "should result in a sorted percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["a", "1"], ["dup", "3"], ["dup", "2"]], true + )).to eq("a=1&dup=2&dup=3") + end +end + +describe Addressable::URI, "when form encoding a non-Array object" do + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_encode(42) + end.to raise_error(TypeError) + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form encoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_encode( + {"value" => " %&+£€"} + )).to eq("value=+%25%26%2B%C2%A3%E2%82%AC") + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form unencoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "value=+%25%26%2B%C2%A3%E2%82%AC" + )).to eq([["value", " %&+£€"]]) + end +end + +describe Addressable::URI, "when form unencoding a string" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233" + )).to eq([["&one", "/1"], ["=two", "?2"], [":three", "#3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "q=one+two+three" + )).to eq([["q", "one two three"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A" + )).to eq([["text", "one\ntwo\nthree\nfour\n\n"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "a=1&dup=2&dup=3" + )).to eq([["a", "1"], ["dup", "2"], ["dup", "3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "key" + )).to eq([["key", nil]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode("GivenName=Ren%C3%A9")).to eq( + [["GivenName", "René"]] + ) + end +end + +describe Addressable::URI, "when form unencoding a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.form_unencode(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_unencode(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.normalize_component(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component("component", 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a path with an encoded slash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.parse("/path%2Fsegment/").normalize.path).to eq( + "/path%2Fsegment/" + ) + end +end + +describe Addressable::URI, "when normalizing a partially encoded string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially % encoded%21" + )).to eq("partially%20%25%20encoded!") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially %25 encoded!" + )).to eq("partially%20%25%20encoded!") + end +end + +describe Addressable::URI, "when normalizing a unicode sequence" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/C%CC%A7" + )).to eq("/%C3%87") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/%C3%87" + )).to eq("/%C3%87") + end +end + +describe Addressable::URI, "when normalizing a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("günther")).to eq( + "g%C3%BCnther" + ) + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("g%C3%BCnther")).to eq( + "g%C3%BCnther" + ) + end +end + +describe Addressable::URI, "when normalizing a string but leaving some characters encoded" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("%58X%59Y%5AZ", "0-9a-zXY", "Y")).to eq( + "XX%59Y%5A%5A" + ) + end + + it "should not modify the character class" do + character_class = "0-9a-zXY" + + character_class_copy = character_class.dup + + Addressable::URI.normalize_component("%58X%59Y%5AZ", character_class, "Y") + + expect(character_class).to eq(character_class_copy) + end +end + +describe Addressable::URI, "when encoding IP literals" do + it "should work for IPv4" do + input = "http://127.0.0.1/" + expect(Addressable::URI.encode(input)).to eq(input) + end + + it "should work for IPv6" do + input = "http://[fe80::200:f8ff:fe21:67cf]/" + expect(Addressable::URI.encode(input)).to eq(input) + end +end + +describe Addressable::URI, "when encoding a string with existing encodings to upcase" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("JK%4c", "0-9A-IKM-Za-z%", "L")).to eq("%4AK%4C") + end +end + +describe Addressable::URI, "when encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("günther")).to eq("g%C3%BCnther") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "günther", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("g%C3%BCnther") + end +end + +describe Addressable::URI, "when form encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode({"GivenName" => "René"})).to eq( + "GivenName=Ren%C3%A9" + ) + end +end + +describe Addressable::URI, "when encoding a string with ASCII chars 0-15" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("one\ntwo")).to eq("one%0Atwo") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "one\ntwo", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("one%0Atwo") + end +end + +describe Addressable::URI, "when unencoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.unencode_component("g%C3%BCnther")).to eq("günther") + end + + it "should consistently use UTF-8 internally" do + expect(Addressable::URI.unencode_component("ski=%BA%DAɫ")).to eq("ski=\xBA\xDAɫ") + end + + it "should result in correct percent encoded sequence as a URI" do + expect(Addressable::URI.unencode( + "/path?g%C3%BCnther", ::Addressable::URI + )).to eq(Addressable::URI.new( + :path => "/path", :query => "günther" + )) + end +end + +describe Addressable::URI, "when partially unencoding a string" do + it "should unencode all characters by default" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String)).to eq('%%~~++') + end + + it "should unencode characters not in leave_encoded" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '~')).to eq('%%~%7e++') + end + + it "should leave characters in leave_encoded alone" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '%~+')).to eq('%%25~%7e+%2b') + end +end + +describe Addressable::URI, "when unencoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.unencode_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.unencode("/path?g%C3%BCnther", Integer) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when encoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.normalized_encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component("günther", Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component(Object.new) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/'" do + before do + @input = "http://example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should not raise error when frozen" do + expect do + Addressable::URI.heuristic_parse(@input).freeze.to_s + end.not_to raise_error + end +end + +describe Addressable::URI, "when given the input " + + "'https://example.com/'" do + before do + @input = "https://example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http:example.com/'" do + before do + @input = "http:example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should heuristically parse to 'http://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'https:example.com/'" do + before do + @input = "https:example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end + + it "should heuristically parse to 'https://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/example.com/'" do + before do + @input = "http://example.com/example.com/" + end + + it "should heuristically parse to 'http://example.com/example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix\\.example.com/'" do + before do + @input = "http://prefix\\.example.com/" + end + + it "should heuristically parse to 'http://prefix/.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix") + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end + + it "should heuristically parse to 'http://prefix/.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p:\\/'" do + before do + @input = "http://p:\\/" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://'" do + before do + @input = "http://p://" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://p'" do + before do + @input = "http://p://p" + end + + it "should heuristically parse to 'http://p//p'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//p") + end + + it "should heuristically parse to 'http://p//p' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//p") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix .example.com/'" do + before do + @input = "http://prefix .example.com/" + end + + # Justification here being that no browser actually tries to resolve this. + # They all treat this as a web search. + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%20.example.com") + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end + + it "should heuristically parse to 'http://prefix%20.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "' http://www.example.com/ '" do + before do + @input = " http://www.example.com/ " + end + + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.scheme).to eq("http") + expect(@uri.path).to eq("/") + expect(@uri.to_s).to eq("http://www.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix%2F.example.com/'" do + before do + @input = "http://prefix%2F.example.com/" + end + + it "should heuristically parse to 'http://prefix%2F.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%2F.example.com") + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end + + it "should heuristically parse to 'http://prefix%2F.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'/path/to/resource'" do + before do + @input = "/path/to/resource" + end + + it "should heuristically parse to '/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'relative/path/to/resource'" do + before do + @input = "relative/path/to/resource" + end + + it "should heuristically parse to 'relative/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("relative/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com'" do + before do + @input = "example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com' and a scheme hint of 'ftp'" do + before do + @input = "example.com" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com:21' and a scheme hint of 'ftp'" do + before do + @input = "example.com:21" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com:21'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com:21") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com/path/to/resource'" do + before do + @input = "example.com/path/to/resource" + end + + it "should heuristically parse to 'http://example.com/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'http:///example.com'" do + before do + @input = "http:///example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input which "\ + "start with digits and has specified port" do + before do + @input = "7777.example.org:8089" + end + + it "should heuristically parse to 'http://7777.example.org:8089'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("http://7777.example.org:8089") + end +end + +describe Addressable::URI, "when given the input " + + "'feed:///example.com'" do + before do + @input = "feed:///example.com" + end + + it "should heuristically parse to 'feed://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'file://localhost/path/to/resource/'" do + before do + @input = "file://localhost/path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://path/to/resource/'" do + before do + @input = "file://path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://///path/to/resource/'" do + before do + @input = "file:///////path/to/resource/" + end + + it "should heuristically parse to 'file:////path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:////path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'feed://http://example.com'" do + before do + @input = "feed://http://example.com" + end + + it "should heuristically parse to 'feed:http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed:http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "::URI.parse('http://example.com')" do + before do + @input = ::URI.parse('http://example.com') + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input: 'user@domain.com'" do + before do + @input = "user@domain.com" + end + + context "for heuristic parse" do + it "should remain 'mailto:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("mailto:#{@input}") + expect(uri.to_s).to eq("mailto:user@domain.com") + end + + it "should have a scheme of 'mailto'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("mailto:user@domain.com") + expect(uri.scheme).to eq("mailto") + end + + it "should remain 'acct:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("acct:#{@input}") + expect(uri.to_s).to eq("acct:user@domain.com") + end + + context "HTTP" do + before do + @uri = Addressable::URI.heuristic_parse("http://#{@input}/") + end + + it "should remain 'http://user@domain.com/'" do + expect(@uri.to_s).to eq("http://user@domain.com/") + end + + it "should have the username 'user' for HTTP basic authentication" do + expect(@uri.user).to eq("user") + end + end + end +end + +describe Addressable::URI, "when assigning query values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign {:a => 'a', :b => ['c', 'd', 'e']}" do + @uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + expect(@uri.query).to eq("a=a&b=c&b=d&b=e") + end + + it "should raise an error attempting to assign {'a' => {'b' => ['c']}}" do + expect do + @uri.query_values = { 'a' => {'b' => ['c'] } } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:b => '2', :a => {:c => '1'}}" do + expect do + @uri.query_values = {:b => '2', :a => {:c => '1'}} + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => 'c', :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => "a", :b => [{:c => "c", :d => "d"}, {:e => "e", :f => "f"}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => true, :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => 'a', :b => [{:c => true, :d => 'd'}, {:e => 'e', :f => 'f'}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should correctly assign {:a => 1, :b => 1.5}" do + @uri.query_values = { :a => 1, :b => 1.5 } + expect(@uri.query).to eq("a=1&b=1.5") + end + + it "should raise an error attempting to assign " + + "{:z => 1, :f => [2, {999.1 => [3,'4']}, ['h', 'i']], " + + ":a => {:b => ['c', 'd'], :e => true, :y => 0.5}}" do + expect do + @uri.query_values = { + :z => 1, + :f => [ 2, {999.1 => [3,'4']}, ['h', 'i'] ], + :a => { :b => ['c', 'd'], :e => true, :y => 0.5 } + } + end.to raise_error(TypeError) + end + + it "should correctly assign {}" do + @uri.query_values = {} + expect(@uri.query).to eq('') + end + + it "should correctly assign nil" do + @uri.query_values = nil + expect(@uri.query).to eq(nil) + end + + it "should correctly sort {'ab' => 'c', :ab => 'a', :a => 'x'}" do + @uri.query_values = {'ab' => 'c', :ab => 'a', :a => 'x'} + expect(@uri.query).to eq("a=x&ab=a&ab=c") + end + + it "should correctly assign " + + "[['b', 'c'], ['b', 'a'], ['a', 'a']]" do + # Order can be guaranteed in this format, so preserve it. + @uri.query_values = [['b', 'c'], ['b', 'a'], ['a', 'a']] + expect(@uri.query).to eq("b=c&b=a&a=a") + end + + it "should preserve query string order" do + query_string = (('a'..'z').to_a.reverse.map { |e| "#{e}=#{e}" }).join("&") + @uri.query = query_string + original_uri = @uri.to_s + @uri.query_values = @uri.query_values(Array) + expect(@uri.to_s).to eq(original_uri) + end + + describe 'when a hash with mixed types is assigned to query_values' do + it 'should not raise an error' do + skip 'Issue #94' + expect { subject.query_values = { "page" => "1", :page => 2 } }.to_not raise_error + end + end +end + +describe Addressable::URI, "when assigning path values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + expect(@uri.path).to eq("acct:bob@sporkmonger.com") + expect(@uri.normalize.to_str).to eq("acct%2Fbob@sporkmonger.com") + expect { @uri.to_s }.to raise_error( + Addressable::URI::InvalidURIError + ) + end + + it "should correctly assign paths containing colons" do + @uri.path = "/acct:bob@sporkmonger.com" + @uri.authority = "example.com" + expect(@uri.normalize.to_str).to eq("//example.com/acct:bob@sporkmonger.com") + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "something" + expect(@uri.normalize.to_str).to eq("something:acct:bob@sporkmonger.com") + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.scheme = "http" + @uri.host = "example.com" + @uri.path = "acct:bob@sporkmonger.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "http" + @uri.host = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "uuid:0b3ecf60-3f93-11df-a9c3-001f5bfffe12" + @uri.scheme = "urn" + end.not_to raise_error + end +end + +describe Addressable::URI, "when initializing a subclass of Addressable::URI" do + before do + @uri = Class.new(Addressable::URI).new + end + + it "should have the same class after being parsed" do + expect(@uri.class).to eq(Addressable::URI.parse(@uri).class) + end + + it "should have the same class as its duplicate" do + expect(@uri.class).to eq(@uri.dup.class) + end + + it "should have the same class after being normalized" do + expect(@uri.class).to eq(@uri.normalize.class) + end + + it "should have the same class after being merged" do + expect(@uri.class).to eq(@uri.merge(:path => 'path').class) + end + + it "should have the same class after being joined" do + expect(@uri.class).to eq(@uri.join('path').class) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/spec_helper.rb new file mode 100644 index 0000000..bd8e395 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/spec/spec_helper.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'bundler/setup' +require 'rspec/its' + +begin + require 'coveralls' + Coveralls.wear! do + add_filter "spec/" + add_filter "vendor/" + end +rescue LoadError + warn "warning: coveralls gem not found; skipping Coveralls" + require 'simplecov' + SimpleCov.start do + add_filter "spec/" + add_filter "vendor/" + end +end if Gem.loaded_specs.key?("simplecov") + +class TestHelper + def self.native_supported? + mri = RUBY_ENGINE == "ruby" + windows = RUBY_PLATFORM.include?("mingw") + + mri && !windows + end +end + +RSpec.configure do |config| + config.warnings = true + config.filter_run_when_matching :focus +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/clobber.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/clobber.rake new file mode 100644 index 0000000..a9e32b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/clobber.rake @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +desc "Remove all build products" +task "clobber" diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/gem.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/gem.rake new file mode 100644 index 0000000..1f793ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/gem.rake @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +require "rubygems/package_task" + +namespace :gem do + GEM_SPEC = Gem::Specification.new do |s| + s.name = PKG_NAME + s.version = PKG_VERSION + s.summary = PKG_SUMMARY + s.description = PKG_DESCRIPTION + + s.files = PKG_FILES.to_a + + s.extra_rdoc_files = %w( README.md ) + s.rdoc_options.concat ["--main", "README.md"] + + if !s.respond_to?(:add_development_dependency) + puts "Cannot build Gem with this version of RubyGems." + exit(1) + end + + s.required_ruby_version = ">= 2.0" + + s.add_runtime_dependency "public_suffix", ">= 2.0.2", "< 5.0" + s.add_development_dependency "bundler", ">= 1.0", "< 3.0" + + s.require_path = "lib" + + s.author = "Bob Aman" + s.email = "bob@sporkmonger.com" + s.homepage = "https://github.com/sporkmonger/addressable" + s.license = "Apache-2.0" + end + + Gem::PackageTask.new(GEM_SPEC) do |p| + p.gem_spec = GEM_SPEC + p.need_tar = true + p.need_zip = true + end + + desc "Generates .gemspec file" + task :gemspec do + spec_string = GEM_SPEC.to_ruby + File.open("#{GEM_SPEC.name}.gemspec", "w") do |file| + file.write spec_string + end + end + + desc "Show information about the gem" + task :debug do + puts GEM_SPEC.to_ruby + end + + desc "Install the gem" + task :install => ["clobber", "gem:package"] do + sh "#{SUDO} gem install --local pkg/#{GEM_SPEC.full_name}" + end + + desc "Uninstall the gem" + task :uninstall do + installed_list = Gem.source_index.find_name(PKG_NAME) + if installed_list && + (installed_list.collect { |s| s.version.to_s}.include?(PKG_VERSION)) + sh( + "#{SUDO} gem uninstall --version '#{PKG_VERSION}' " + + "--ignore-dependencies --executables #{PKG_NAME}" + ) + end + end + + desc "Reinstall the gem" + task :reinstall => [:uninstall, :install] + + desc "Package for release" + task :release => ["gem:package", "gem:gemspec"] do |t| + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PROJ.version}" if v != PKG_VERSION + pkg = "pkg/#{GEM_SPEC.full_name}" + + changelog = File.open("CHANGELOG.md") { |file| file.read } + + puts "Releasing #{PKG_NAME} v. #{PKG_VERSION}" + Rake::Task["git:tag:create"].invoke + end +end + +desc "Alias to gem:package" +task "gem" => "gem:package" + +task "gem:release" => "gem:gemspec" + +task "clobber" => ["gem:clobber_package"] diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/git.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/git.rake new file mode 100644 index 0000000..1238c8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/git.rake @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +namespace :git do + namespace :tag do + desc "List tags from the Git repository" + task :list do + tags = `git tag -l` + tags.gsub!("\r", "") + tags = tags.split("\n").sort {|a, b| b <=> a } + puts tags.join("\n") + end + + desc "Create a new tag in the Git repository" + task :create do + changelog = File.open("CHANGELOG.md", "r") { |file| file.read } + puts "-" * 80 + puts changelog + puts "-" * 80 + puts + + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PKG_VERSION}" if v != PKG_VERSION + + git_status = `git status` + if git_status !~ /^nothing to commit/ + abort "Working directory isn't clean." + end + + tag = "#{PKG_NAME}-#{PKG_VERSION}" + msg = "Release #{PKG_NAME}-#{PKG_VERSION}" + + existing_tags = `git tag -l #{PKG_NAME}-*`.split('\n') + if existing_tags.include?(tag) + warn("Tag already exists, deleting...") + unless system "git tag -d #{tag}" + abort "Tag deletion failed." + end + end + puts "Creating git tag '#{tag}'..." + unless system "git tag -a -m \"#{msg}\" #{tag}" + abort "Tag creation failed." + end + end + end +end + +task "gem:release" => "git:tag:create" diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/metrics.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/metrics.rake new file mode 100644 index 0000000..107cc24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/metrics.rake @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +namespace :metrics do + task :lines do + lines, codelines, total_lines, total_codelines = 0, 0, 0, 0 + for file_name in FileList["lib/**/*.rb"] + f = File.open(file_name) + while line = f.gets + lines += 1 + next if line =~ /^\s*$/ + next if line =~ /^\s*#/ + codelines += 1 + end + puts "L: #{sprintf("%4d", lines)}, " + + "LOC #{sprintf("%4d", codelines)} | #{file_name}" + total_lines += lines + total_codelines += codelines + + lines, codelines = 0, 0 + end + + puts "Total: Lines #{total_lines}, LOC #{total_codelines}" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/profile.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/profile.rake new file mode 100644 index 0000000..b697d48 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/profile.rake @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +namespace :profile do + desc "Profile Template match memory allocations" + task :template_match_memory do + require "memory_profiler" + require "addressable/template" + + start_at = Time.now.to_f + template = Addressable::Template.new("http://example.com/{?one,two,three}") + report = MemoryProfiler.report do + 30_000.times do + template.match( + "http://example.com/?one=one&two=floo&three=me" + ) + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end + + desc "Profile URI parse memory allocations" + task :memory do + require "memory_profiler" + require "addressable/uri" + if ENV["IDNA_MODE"] == "pure" + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + start_at = Time.now.to_f + report = MemoryProfiler.report do + 30_000.times do + Addressable::URI.parse( + "http://google.com/stuff/../?with_lots=of¶ms=asdff#!stuff" + ).normalize + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(**print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/rspec.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/rspec.rake new file mode 100644 index 0000000..e3d9f01 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/rspec.rake @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require "rspec/core/rake_task" + +namespace :spec do + RSpec::Core::RakeTask.new(:simplecov) do |t| + t.pattern = FileList['spec/**/*_spec.rb'] + t.rspec_opts = %w[--color --format documentation] unless ENV["CI"] + end + + namespace :simplecov do + desc "Browse the code coverage report." + task :browse => "spec:simplecov" do + require "launchy" + Launchy.open("coverage/index.html") + end + end +end + +desc "Alias to spec:simplecov" +task "spec" => "spec:simplecov" + +task "clobber" => ["spec:clobber_simplecov"] diff --git a/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/yard.rake b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/yard.rake new file mode 100644 index 0000000..515f960 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/addressable-2.8.0/tasks/yard.rake @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "rake" + +begin + require "yard" + require "yard/rake/yardoc_task" + + namespace :doc do + desc "Generate Yardoc documentation" + YARD::Rake::YardocTask.new do |yardoc| + yardoc.name = "yard" + yardoc.options = ["--verbose", "--markup", "markdown"] + yardoc.files = FileList[ + "lib/**/*.rb", "ext/**/*.c", + "README.md", "CHANGELOG.md", "LICENSE.txt" + ].exclude(/idna/) + end + end + + task "clobber" => ["doc:clobber_yard"] + + desc "Alias to doc:yard" + task "doc" => "doc:yard" +rescue LoadError + # If yard isn't available, it's not the end of the world + desc "Alias to doc:rdoc" + task "doc" => "doc:rdoc" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.rspec b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.rspec new file mode 100644 index 0000000..397921f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.rspec @@ -0,0 +1,2 @@ +--color +--format d diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.travis.yml new file mode 100644 index 0000000..4c090ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/.travis.yml @@ -0,0 +1,33 @@ +language: ruby + +branches: + only: + - master + +rvm: +- 1.9.3 +- 2.0 +- 2.1 +- 2.2 +- 2.3 +- 2.4 +- 2.5 +- jruby +- rbx-3 + +matrix: + allow_failures: + - rvm: rbx-3 + - rvm: jruby + include: + - rvm: 1.8.7 + dist: precise + + +cache: bundler + +before_script: + - wget https://alg.li/algolia-keys && chmod +x algolia-keys + +script: + - if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [[ ! "$TRAVIS_PULL_REQUEST_SLUG" =~ ^algolia\/ ]]; then eval $(./algolia-keys export) && bundle exec rspec --tag ~maintainers_only; else bundle exec rspec; fi diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/CHANGELOG.md new file mode 100755 index 0000000..26969f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/CHANGELOG.md @@ -0,0 +1,454 @@ +# ChangeLog + +## Unreleased + +## [1.27.4](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.3...1.27.4) (2020-09-16) + +**Fixed** + +* Retrieve all objects when using `copy_index` from `AccountClient` class ([399](https://github.com/algolia/algoliasearch-client-ruby/pull/399)) + +## [1.27.3](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.2...1.27.3) (2020-06-03) + +**Fixed** + +* Replace expired certificate within embedded certificate chain ([9087dd1](https://github.com/algolia/algoliasearch-client-ruby/commit/9087dd14a97bf77c9391a3360c4803edf686086d)) + +## [1.27.2](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.1...1.27.2) (2020-04-28) + +**Fixed** + +* In `search_user_id`, retrieve param `cluster` instead of `clusterName`. [368](https://github.com/algolia/algoliasearch-client-ruby/issues/368) + +## [1.27.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.0...1.27.1) (2019-09-26) + +**Fixed** + +* Update `Algolia::Index.exists` method to `Algolia::Index.exists?`. [364](https://github.com/algolia/algoliasearch-client-ruby/issues/364) + +## [1.27.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.27.0) (2019-09-16) + +**Added** + +* Introduce `Algolia::Index.exists` method. [358](https://github.com/algolia/algoliasearch-client-ruby/issues/358) + + Check whether an index exists or not. + +* Introduce `Algolia::Index.find_object` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Find object by the given condition. + +* Introduce `Algolia::Index.get_object_position` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Retrieve the given object position in a set of results. + +* Introduce `Algolia.get_secured_api_key_remaining_validity` method. [361](https://github.com/algolia/algoliasearch-client-ruby/issues/361) + + Returns the remaining validity time for the given API key in seconds. + + +## [1.26.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.26.0...1.26.1) (2019-07-31) + +### Chore + +- stop using coveralls because of a GPL-licensed transitive dep ([d2fbe8c](https://github.com/algolia/algoliasearch-client-ruby/commit/d2fbe8c)) + + +[1.26.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.26.0) (2019-02-12) + +**Added** + +* Introduce `Algolia.restore_api_key` method. + + If you delete your API key by mistake, you can now restore it via + this new method. This especially useful if this key is used in a + mobile app or somewhere you can't update easily. + + +## [1.25.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.2) (2018-12-19) + +## [1.25.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.1) (2018-12-19) + +**Fixed** + +* Missing `insights.rb` in gemspec - [7d2f3ab](https://github.com/algolia/algoliasearch-client-ruby/commit/7d2f3abe6e4338f0f7364f6f52ac1d371f066464) + +## [1.25.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.0) (2018-12-19) + +**Added** + +* Introduce Insights client to send events to Algolia Insights API - [326](https://github.com/algolia/algoliasearch-client-ruby/issues/326) + +* Add `multiple_get_objects` to retrieve objects by objectID across multiple indices - [329](https://github.com/algolia/algoliasearch-client-ruby/issues/329) + +**Modified** + +* Use the correct `hitsPerPage` key when exporting index resources - [319](https://github.com/algolia/algoliasearch-client-ruby/issues/319) + +## [1.24.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.24.0) (2018-11-28) + +* Feat(adds-account-client-copy-index): adds `copy_index` to account client ([#324](https://github.com/algolia/algoliasearch-client-ruby/pull/324)) +* Feat(replace-all-methods): adds `replace_all_rules`, `replace_all_objects` and `replace_all_synonyms` to search index ([#323](https://github.com/algolia/algoliasearch-client-ruby/pull/323)) +* Feat(scoped-copy-methods): adds `copy_settings`, `copy_synonyms` and `copy_rules` to search client ([#322](https://github.com/algolia/algoliasearch-client-ruby/pull/322)) + +## [1.23.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.2) (2018-06-19) + +* Fix(analytics): gem without new analytics class (#306) + +## [1.23.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.0) (2018-06-19) + +* Feat(analytics): introduce new analytics class +* Chore(rake): use unshift to keep compat with older ruby versions +* Ruby version must be after client version in ua +* Fix ua tests with new format +* Rewrite ua +* Feat(ua): add ruby version +* Fix(syntax): this isn't php +* Tests(mcm): use unique userid everytime +* Tests(mcm): introduce auto_retry for more stable tests +* Feat(client): expose waittask in the client (#302) +* Fix(travis): always test on the latest patches (#295) + +## [1.22.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.22.0) (2018-05-30) + +* Rename license file (#297) +* Fix release task (#294) +* Introduce multi cluster management (#285) +* Fix(browse): ensure cursor is passed in the body (#288) +* Chore(md): update contribution-related files (#293) + +## [1.21.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.21.0) (2018-05-24) + +* Fix(tests): fix warning for unspecified exception (#287) +* Fix release task missing github link (#291) +* Api review (#292) + +## [1.20.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.1) (2018-05-15) + +* Fix changelog link in gemspec (#290) +* Utils: move to changelog.md and add rake task for release (#289) + +## [1.20.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.0) (2018-05-07) + +* Feat: deprecate api keys methods on index in favor of client ones (#286) +* Chore(gemfile): remove useless dependencies (#280) +* Fix(env): adding default env var (#279) +* Chore(travis): test against Rubinius 3 (#281) +* Fix: prevent saving a rule with an empty `objectID` (#283) + +## [1.19.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.2) (2018-04-03) + +* Fix `Algolia.delete_index` wrong number of arguments (#277) + +## [1.19.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.1) (2017-12-18) + +* Fix hard dependency on `hashdiff` (#262) + +## [1.19.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.0) (2017-12-15) + +* Add request options to any method using API calls (#213) +* Add `export_synonyms` index method (#260) +* Add `export_rules` index method (#261) + +## [1.18.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.5) (2017-12-07) + +* Fix missing requirement + +## [1.18.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.4) (2017-12-06) + +* Remove remaining unnecessary requirements (#256) +* Remove Gemfile.lock (#257) + +## [1.18.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.3) (2017-12-04) + +* Remove Bundler and RubyGems requirements (#253) + +## [1.18.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.2) (2017-11-28) + +* Add (undocumented) gzip option to disable gzip (#240) + +## [1.18.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.1) (2017-11-15) + +* Fix `get_logs` always returning type `all` (#244) +* New scopes to `copy_index` method (#243) + +## [1.18.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.0) (2017-11-02) + +* Allow to reuse the webmocks using `Algolia::WebMock.mock!` (#256) + +## [1.17.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.17.0) (2017-10-10) + +* New `delete_by` method + +## [1.16.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.16.0) (2017-09-14) + +* New Query Rules API + +## [1.15.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.1) (2017-08-17) + +* Fixed regression introduced in 1.15.0 + +## [1.15.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.0) (2017-08-17) + +* Make `delete_by_query` not `wait_task` by default (also, make it mockable) +* Add a new `delete_by_query!` doing the last `wait_task` + +## [1.14.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.14.0) (2017-07-31) + +* Ability to override the underlying user-agent + +## [1.13.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.13.0) (2017-03-17) + +* Add a `index.get_task_status(taskID)` method (#199) + +## [1.12.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.7) (2017-03-01) + +* Renamed all `*_user_key` methods to `*_api_key` + +## [1.12.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.6) (2017-01-25) + +* Upgraded `httpclient` to 2.8.3 + +## [1.12.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.5) (2016-12-07) + +* Fixed retry strategy not keeping the `current_host` first (#163) + +## [1.12.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.4) (2016-12-07) + +* Fix DNS tests + +## [1.12.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.3) (2016-12-06) + +* Allow for multiple clients on different app ids on the same thread + +## [1.12.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.2) (2016-12-05) + +* Fix client scoped methods + +## [1.12.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.1) (2016-11-25) + +* Rename `search_facet` to `search_for_facet_values` + +## [1.12.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.0) (2016-10-31) + +* Add `search_facet` + +## [1.11.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.11.0) (2016-08-21) + +* Upgraded to httpclient 2.8.1 to avoid reseting the keep-alive while changing timeouts + +## [1.10.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.10.0) (2016-07-11) + +* `{get,set}_settings` now take optional custom query parameters + +## [1.9.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.9.0) (2016-06-17) + +* New synonyms API + +## [1.8.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.1) (2016-04-14) + +* Ensure we're using an absolute path for the `ca-bundle.crt` file (could fix some `OpenSSL::X509::StoreError: system lib` errors) + +## [1.8.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.0) (2016-04-06) + +* Upgraded to `httpclient` 2.7.1 (includes ruby 2.3.0 deprecation fixes) +* Upgraded WebMock URLs + +## [1.7.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.7.0) (2016-01-09) + +* New `generate_secured_api_key` embedding the filters in the resulting key + +## [1.6.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.1) (2015-08-01) + +* Search queries are now using POST requests + +## [1.6.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.0) (2015-07-19) + +* Ability to instantiate multiple API clients in the same process (was using a class variable before). + +## [1.5.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.1) (2015-07-14) + +* Ability to retrieve a single page from a cursor with `browse_from` + +## [1.5.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.0) (2015-06-05) + +* New cursor-based `browse()` implementation taking query parameters + +## [1.4.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.3) (2015-05-27) + +* Do not call `WebMock.disable!` in the helper + +## [1.4.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4,2) (2015-05-04) + +* Add new methods to add/update api key +* Add batch method to target multiple indices +* Add strategy parameter for the multipleQueries +* Add new method to generate secured api key from query parameters + +## [1.4.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.1) (2015-04-10) + +* Force the default connect/read/write/search/batch timeouts to Algolia-specific values + +## [1.4.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.0) (2015-03-17) + +* High-available DNS: search queries are now targeting `APPID-DSN.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. +* High-available DNS: Indexing queries are targeting `APPID.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. + +## [1.3.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.1) (2014-11-29) + +* Fixed wrong deployed version (1.3.0 was based on 1.2.13 instead of 1.2.14) + +## [1.3.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.0) (2014-11-29) + +* Use `algolia.net` domain instead of `algolia.io` + +## [1.2.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.14) (2014-11-10) + +* Force the underlying `httpclient` dependency to be >= 2.4 in the gemspec as well +* Ability to force the SSL version + +## [1.2.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.13) (2014-10-22) + +* Fix the loop on hosts to retry when the http code is different than 200, 201, 400, 403, 404 + +## [1.2.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.12) (2014-10-08) + +* Upgrade to `httpclient` 2.4 +* Do not reset the timeout on each requests + +## [1.2.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.11) (2014-09-14) + +* Ability to update API keys + +## [1.2.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.10) (2014-08-22) + +* Using Digest to remove "Digest::Digest is deprecated; Use Digest" warning (author: @dglancy) + +## [1.2.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.9) (2014-07-10) + +* Expose `connect_timeout`, `receive_timeout` and `send_timeout` +* Add new `delete_by_query` method to delete all objects matching a specific query +* Add new `get_objects` method to retrieve a list of objects from a single API call +* Add a helper to perform disjunctive faceting + +## [1.2.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.8) (2014-03-27) + +* Catch all exceptions before retrying with another host + +## [1.2.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.7) (2014-03-24) + +* Ruby 1.8 compatibility + +## [1.2.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.6) (2014-03-19) + +* Raise an exception if no `APPLICATION_ID` is provided +* Ability to get last API call errors +* Ability to send multiple queries using a single API call +* Secured API keys generation is now based on secured HMAC-SHA256 + +## [1.2.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.5) (2014-02-24) + +* Ability to generate secured API key from a list of tags + optional `user_token` +* Ability to specify a list of indexes targeted by the user key + +## [1.2.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.4) (2014-02-21) + +* Add `delete_objects` + +## [1.2.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.3) (2014-02-10) + +* `add_object`: POST request if `objectID` is `nil` OR `empty` + +## [1.2.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.2) (2014-01-11) + +* Expose `batch` requests + +## [1.2.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.1) (2014-01-07) + +* Removed `jeweler` since it doesn't support platform specific deps (see https://github.com/technicalpickles/jeweler/issues/170) + +## [1.2.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.0) (2014-01-07) + +* Removed `curb` dependency and switched on `httpclient` to avoid fork-safety issue (see issue #5) + +## [1.1.18](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.18) (2014-01-06) + +* Fixed batch request builder (broken since last refactoring) + +## [1.1.17](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.17) (2014-01-02) + +* Ability to use IP rate limit behind a proxy forwarding the end-user's IP +* Add documentation for the new `attributeForDistinct` setting and `distinct` search parameter + +## [1.1.16](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.16) (2013-12-16) + +* Add arguments type-checking +* Normalize save_object/partial_update/add_object signatures +* Force dependencies versions + +## [1.1.15](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.15) (2013-12-16) + +* Embed ca-bundle.crt + +## [1.1.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.14) (2013-12-11) + +* Added `index.add_user_key(acls, validity, rate_limit, maxApiCalls)`` + +## [1.1.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.13) (2013-12-10) + +* WebMock integration + +## [1.1.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.12) (2013-12-05) + +* Add `browse` command + +## [1.1.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.11) (2013-11-29) + +* Remove rubysl (rbx required dependencies) + +## [1.1.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.10) (2013-11-29) + +* Fixed gzip handling bug + +## [1.1.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.9) (2013-11-28) + +* Added gzip support + +## [1.1.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.8) (2013-11-26) + +* Add `partial_update_objects` method + +## [1.1.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.7) (2013-11-08) + +* Search params: encode array-based parameters (`tagFilters`, `facetFilters`, ...) + +## [1.1.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.6) (2013-11-07) + +* Index: `clear` and `clear!` methods can now be used the delete the whole content of an index +* User keys: plug new `maxQueriesPerIPPerHour` and `maxHitsPerQuery` parameters + +## [1.1.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.5) (2013-10-17) + +* Version is now part of the user-agent + +## [1.1.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.4) (2013-10-17) + +* Fixed `wait_task` not sleeping at all + +## [1.1.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.3) (2013-10-15) + +* Fixed thread-safety issues +* Curl sessions are now thread-local + +## [1.1.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.2) (2013-10-02) + +* Fixed instance/class method conflict + +## [1.1.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.1) (2013-10-01) + +* Updated documentation +* Plug copy/move index + +## [1.1.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.0) (2013-09-17) + +* Initial import diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile new file mode 100644 index 0000000..e05662e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile @@ -0,0 +1,28 @@ +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Load algoliasearch.gemspec dependencies +gemspec + +# See https://github.com/algolia/algoliasearch-client-ruby/pull/257/files/36bcd0b1c4d05776dcbdb362c15a609c81f41cde +if Gem::Version.new(RUBY_VERSION) <= Gem::Version.new('1.9.3') + gem 'hashdiff', '< 0.3.6' # Hashdiff 0.3.6 no longer supports Ruby 1.8 + gem 'highline', '< 1.7.0' + gem 'mime-types', '< 2.0' + gem 'rubysl', '~> 2.0', :platform => :rbx +else + gem 'rubysl', '~> 2.2', :platform => :rbx +end + +group :development do + gem 'rake' + gem 'rdoc' + gem 'travis' +end + +group :test do + gem 'rspec', '>= 2.5.0' + gem 'webmock' + gem 'simplecov' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile.lock new file mode 100644 index 0000000..998dc7c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Gemfile.lock @@ -0,0 +1,99 @@ +PATH + remote: . + specs: + algoliasearch (1.27.4) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + +GEM + remote: https://rubygems.org/ + specs: + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + backports (3.18.2) + connection_pool (2.2.3) + crack (0.4.3) + safe_yaml (~> 1.0.0) + diff-lcs (1.4.4) + docile (1.3.2) + ethon (0.12.0) + ffi (>= 1.3.0) + faraday (0.17.3) + multipart-post (>= 1.2, < 3) + faraday_middleware (0.14.0) + faraday (>= 0.7.4, < 1.0) + ffi (1.13.1) + gh (0.14.0) + addressable + backports + faraday (~> 0.8) + multi_json (~> 1.0) + net-http-persistent (>= 2.7) + net-http-pipeline + hashdiff (1.0.1) + highline (1.7.10) + httpclient (2.8.3) + json (2.3.1) + launchy (2.5.0) + addressable (~> 2.7) + multi_json (1.15.0) + multipart-post (2.1.1) + net-http-persistent (4.0.0) + connection_pool (~> 2.2) + net-http-pipeline (1.0.1) + public_suffix (4.0.6) + pusher-client (0.6.2) + json + websocket (~> 1.0) + rake (13.0.1) + rdoc (6.2.1) + rspec (3.9.0) + rspec-core (~> 3.9.0) + rspec-expectations (~> 3.9.0) + rspec-mocks (~> 3.9.0) + rspec-core (3.9.2) + rspec-support (~> 3.9.3) + rspec-expectations (3.9.2) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-mocks (3.9.1) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-support (3.9.3) + safe_yaml (1.0.5) + simplecov (0.19.0) + docile (~> 1.1) + simplecov-html (~> 0.11) + simplecov-html (0.12.2) + travis (1.8.13) + backports + faraday (~> 0.9) + faraday_middleware (~> 0.9, >= 0.9.1) + gh (~> 0.13) + highline (~> 1.6) + launchy (~> 2.1) + pusher-client (~> 0.4) + typhoeus (~> 0.6, >= 0.6.8) + typhoeus (0.8.0) + ethon (>= 0.8.0) + webmock (3.8.3) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff (>= 0.4.0, < 2.0.0) + websocket (1.2.8) + +PLATFORMS + ruby + +DEPENDENCIES + algoliasearch! + rake + rdoc + rspec (>= 2.5.0) + rubysl (~> 2.2) + simplecov + travis + webmock + +BUNDLED WITH + 1.17.2 diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/LICENSE b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/LICENSE new file mode 100644 index 0000000..fddf416 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-Present Algolia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/README.md b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/README.md new file mode 100644 index 0000000..a19b944 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/README.md @@ -0,0 +1,61 @@ +

+ + Algolia for Ruby + + +

The perfect starting point to integrate Algolia within your Ruby project

+ +

+ Build Status + Gem Version + License +

+

+ +

+ Documentation • + Rails • + Community Forum • + Stack Overflow • + Report a bug • + FAQ • + Support +

+ +## ✨ Features + +- Thin & minimal low-level HTTP client to interact with Algolia's API +- Supports Ruby `^1.8.7`. + +## 💡 Getting Started + +First, install Algolia Ruby API Client via the [RubyGems](https://rubygems.org/) package manager: +```bash +gem install algoliasearch +``` + +Then, create objects on your index: + + +```ruby +Algolia.init(application_id: 'YourApplicationID', + api_key: 'YourAPIKey') +index = Algolia::Index.new('your_index_name') + +index.save_objects([objectID: 1, name: 'Foo']) +``` + +Finally, you may begin searching a object using the `search` method: +```ruby +objects = index.search('Fo') +``` + +For full documentation, visit the **[Algolia Ruby API Client](https://www.algolia.com/doc/api-client/getting-started/install/ruby/)**. + +## ❓ Troubleshooting + +Encountering an issue? Before reaching out to support, we recommend heading to our [FAQ](https://www.algolia.com/doc/api-client/troubleshooting/faq/ruby/) where you will find answers for the most common issues and gotchas with the client. + +## 📄 License + +Algolia Ruby API Client is an open-sourced software licensed under the [MIT license](LICENSE.md). diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Rakefile b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Rakefile new file mode 100644 index 0000000..b328a8b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/Rakefile @@ -0,0 +1,111 @@ +# encoding: utf-8 + +require 'bundler/gem_tasks' + +begin + Bundler.setup(:default, :development) +rescue Bundler::BundlerError => e + $stderr.puts e.message + $stderr.puts "Run `bundle install` to install missing gems" + exit e.status_code +end +require 'rake' + +require File.expand_path('../lib/algolia/version', __FILE__) + +require 'rake/testtask' +Rake::TestTask.new(:test) do |test| + test.libs << 'lib' << 'test' + test.pattern = 'test/**/test_*.rb' + test.verbose = true +end + +require 'rdoc/task' +Rake::RDocTask.new do |rdoc| + version = Algolia::VERSION + rdoc.rdoc_dir = 'rdoc' + rdoc.title = "algoliasearch #{version}" + rdoc.rdoc_files.include('README*') + rdoc.rdoc_files.include('lib/**/*.rb') +end + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new(:spec) + +task :default => :spec + +namespace :algolia do + GEM_VERSION_FILE = File.expand_path('../lib/algolia/version.rb', __FILE__) + GIT_TAG_URL = 'https://github.com/algolia/algoliasearch-client-ruby/releases/tag/' + + def last_commit_date + `git log -1 --date=short --format=%cd`.chomp + end + + def latest_tag + `git describe --tags --abbrev=0`.chomp + end + + def changelog(git_start = latest_tag(), git_end = 'HEAD', format = '%s') + `git log --no-decorate --no-merges --pretty=format:#{format} #{git_start}..#{git_end}` + end + + desc 'Write latest changes to CHANGELOG.md' + task :changelog, [:version] do |t, args| + # Filters-out commits containing some keywords and adds header + exceptions_regexp = Regexp.union(['README']) + title = "## [%s](%s%s) (%s)\n\n" % [args[:version], GIT_TAG_URL, args[:version], last_commit_date] + changes = changelog.each_line + .map { |line| (exceptions_regexp === line) ? nil : "* #{line.capitalize}" } + .unshift(title) + .append("\n\n") + .join + + puts changes + puts "\n\e[31mDo you want to update the CHANGELOG.md with the text above? [y/N]\e[0m" + exit if STDIN.gets.chomp.downcase != 'y' + + # Rewrite CHANGELOG.md + old_changes = File.readlines('CHANGELOG.md', 'r').join + File.open('CHANGELOG.md', 'w') { |file| file.write(changes, old_changes) } + + puts 'CHANGELOG.md successfully updated' + end + + desc 'Bump gem version' + task :semver, [:version] do |t, args| + + File.open(GEM_VERSION_FILE, 'w') do |file| + file.write <<~SEMVER + module Algolia + VERSION = "#{args[:version]}" + end + SEMVER + end + + # This force to reload the file with the newest version. + # Hence, `gemspec.version` invoked by Bundler later on will be correct. + load GEM_VERSION_FILE + + puts "Bumped gem version from #{latest_tag} to #{args[:version]}" + end + + desc 'Release a new version of this gem' + task :release, [:version] => [:changelog, :semver] do |t, args| + `git add #{File.expand_path('../lib/algolia/version.rb', __FILE__)} CHANGELOG.md` + `git commit -m "Bump to version #{args[:version]}"` + + # Invoke Bundler :release task + # https://github.com/bundler/bundler/blob/master/lib/bundler/gem_helper.rb + # + Rake::Task[:release].invoke + end +end + +module Bundler + class GemHelper + def version_tag + "#{version}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec new file mode 100644 index 0000000..eeb620c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec @@ -0,0 +1,86 @@ +# -*- encoding: utf-8 -*- + +require 'date' +require File.join(File.dirname(__FILE__), 'lib', 'algolia', 'version') + +Gem::Specification.new do |s| + s.name = "algoliasearch" + s.version = Algolia::VERSION + s.authors = ["Algolia"] + s.email = "contact@algolia.com" + + s.date = Date.today + s.licenses = ["MIT"] + s.summary = "A simple Ruby client for the algolia.com REST API" + s.description = "A simple Ruby client for the algolia.com REST API" + s.homepage = "https://github.com/algolia/algoliasearch-client-ruby" + + s.metadata = { + "bug_tracker_uri" => "https://github.com/algolia/algoliasearch-client-ruby/issues", + "changelog_uri" => "https://github.com/algolia/algoliasearch-client-ruby/blob/master/CHANGELOG.md", + "documentation_uri" => "http://www.rubydoc.info/gems/algoliasearch", + "homepage_uri" => "https://www.algolia.com/doc/api-client/ruby/getting-started/", + "source_code_uri" => "https://github.com/algolia/algoliasearch-client-ruby" + } + + s.post_install_message = "A new major version is available for Algolia! Please now use the https://rubygems.org/gems/algolia gem to get the latest features." + + s.require_paths = ["lib"] + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + + s.extra_rdoc_files = [ + "CHANGELOG.md", + "LICENSE", + "README.md" + ] + s.files = [ + ".rspec", + ".travis.yml", + "CHANGELOG.md", + "Gemfile", + "Gemfile.lock", + "LICENSE", + "README.md", + "Rakefile", + "algoliasearch.gemspec", + "contacts.json", + "lib/algolia/analytics.rb", + "lib/algolia/account_client.rb", + "lib/algolia/client.rb", + "lib/algolia/error.rb", + "lib/algolia/index.rb", + "lib/algolia/insights.rb", + "lib/algolia/protocol.rb", + "lib/algolia/version.rb", + "lib/algolia/webmock.rb", + "lib/algoliasearch.rb", + "resources/ca-bundle.crt", + "spec/account_client_spec.rb", + "spec/client_spec.rb", + "spec/mock_spec.rb", + "spec/spec_helper.rb", + "spec/stub_spec.rb" + ] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + if defined?(RUBY_VERSION) && RUBY_VERSION < '2.0' + s.add_runtime_dependency 'json', '>= 1.5.1', '< 2.3' + else + s.add_runtime_dependency 'json', '>= 1.5.1' + end + s.add_runtime_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_development_dependency 'travis', '~> 0' + s.add_development_dependency 'rake', '~> 0' + s.add_development_dependency 'rdoc', '~> 0' + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/contacts.json b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/contacts.json new file mode 100644 index 0000000..8ba54fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/contacts.json @@ -0,0 +1,7504 @@ +[ + { + "firstname": "Essie", + "lastname": "Vaill", + "company": "Litronic Industries", + "address": "14225 Hancock Dr", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-345-0962", + "fax": "907-345-1215", + "email": "essie@vaill.com", + "web": "http://www.essievaill.com", + "followers": 3574 + }, + { + "firstname": "Cruz", + "lastname": "Roudabush", + "company": "Meridian Products", + "address": "2202 S Central Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85004", + "phone": "602-252-4827", + "fax": "602-252-4009", + "email": "cruz@roudabush.com", + "web": "http://www.cruzroudabush.com", + "followers": 6548 + }, + { + "firstname": "Billie", + "lastname": "Tinnes", + "company": "D & M Plywood Inc", + "address": "28 W 27th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10001", + "phone": "212-889-5775", + "fax": "212-889-5764", + "email": "billie@tinnes.com", + "web": "http://www.billietinnes.com", + "followers": 3536 + }, + { + "firstname": "Zackary", + "lastname": "Mockus", + "company": "Metropolitan Elevator Co", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-0638", + "fax": "732-442-5218", + "email": "zackary@mockus.com", + "web": "http://www.zackarymockus.com", + "followers": 1497 + }, + { + "firstname": "Rosemarie", + "lastname": "Fifield", + "company": "Technology Services", + "address": "3131 N Nimitz Hwy #-105", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-836-8966", + "fax": "808-836-6008", + "email": "rosemarie@fifield.com", + "web": "http://www.rosemariefifield.com", + "followers": 4812 + }, + { + "firstname": "Bernard", + "lastname": "Laboy", + "company": "Century 21 Keewaydin Prop", + "address": "22661 S Frontage Rd", + "city": "Channahon", + "county": "Will", + "state": "IL", + "zip": "60410", + "phone": "815-467-0487", + "fax": "815-467-1244", + "email": "bernard@laboy.com", + "web": "http://www.bernardlaboy.com", + "followers": 6891 + }, + { + "firstname": "Sue", + "lastname": "Haakinson", + "company": "Kim Peacock Beringhause", + "address": "9617 N Metro Pky W", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85051", + "phone": "602-953-2753", + "fax": "602-953-0355", + "email": "sue@haakinson.com", + "web": "http://www.suehaakinson.com", + "followers": 5787 + }, + { + "firstname": "Valerie", + "lastname": "Pou", + "company": "Sea Port Record One Stop Inc", + "address": "7475 Hamilton Blvd", + "city": "Trexlertown", + "county": "Lehigh", + "state": "PA", + "zip": "18087", + "phone": "610-395-8743", + "fax": "610-395-6995", + "email": "valerie@pou.com", + "web": "http://www.valeriepou.com", + "followers": 8990 + }, + { + "firstname": "Lashawn", + "lastname": "Hasty", + "company": "Kpff Consulting Engineers", + "address": "815 S Glendora Ave", + "city": "West Covina", + "county": "Los Angeles", + "state": "CA", + "zip": "91790", + "phone": "626-960-6738", + "fax": "626-960-1503", + "email": "lashawn@hasty.com", + "web": "http://www.lashawnhasty.com", + "followers": 2131 + }, + { + "firstname": "Marianne", + "lastname": "Earman", + "company": "Albers Technologies Corp", + "address": "6220 S Orange Blossom Trl", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32809", + "phone": "407-857-0431", + "fax": "407-857-2506", + "email": "marianne@earman.com", + "web": "http://www.marianneearman.com", + "followers": 1992 + }, + { + "firstname": "Justina", + "lastname": "Dragaj", + "company": "Uchner, David D Esq", + "address": "2552 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38112", + "phone": "901-327-5336", + "fax": "901-327-2911", + "email": "justina@dragaj.com", + "web": "http://www.justinadragaj.com", + "followers": 7149 + }, + { + "firstname": "Mandy", + "lastname": "Mcdonnell", + "company": "Southern Vermont Surveys", + "address": "343 Bush St Se", + "city": "Salem", + "county": "Marion", + "state": "OR", + "zip": "97302", + "phone": "503-371-8219", + "fax": "503-371-1118", + "email": "mandy@mcdonnell.com", + "web": "http://www.mandymcdonnell.com", + "followers": 1329 + }, + { + "firstname": "Conrad", + "lastname": "Lanfear", + "company": "Kahler, Karen T Esq", + "address": "49 Roche Way", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-758-0314", + "fax": "330-758-3536", + "email": "conrad@lanfear.com", + "web": "http://www.conradlanfear.com", + "followers": 2906 + }, + { + "firstname": "Cyril", + "lastname": "Behen", + "company": "National Paper & Envelope Corp", + "address": "1650 S Harbor Blvd", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92802", + "phone": "714-772-5050", + "fax": "714-772-3859", + "email": "cyril@behen.com", + "web": "http://www.cyrilbehen.com", + "followers": 7784 + }, + { + "firstname": "Shelley", + "lastname": "Groden", + "company": "Norton, Robert L Esq", + "address": "110 Broadway St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-229-3017", + "fax": "210-229-9757", + "email": "shelley@groden.com", + "web": "http://www.shelleygroden.com", + "followers": 2012 + }, + { + "firstname": "Rosalind", + "lastname": "Krenzke", + "company": "Waldein Manufacturing", + "address": "7000 Bass Lake Rd #-200", + "city": "Minneapolis", + "county": "Hennepin", + "state": "MN", + "zip": "55428", + "phone": "763-537-4194", + "fax": "763-537-3885", + "email": "rosalind@krenzke.com", + "web": "http://www.rosalindkrenzke.com", + "followers": 5547 + }, + { + "firstname": "Davis", + "lastname": "Brevard", + "company": "Transit Trading Corp", + "address": "6715 Tippecanoe Rd", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-6346", + "fax": "330-533-8211", + "email": "davis@brevard.com", + "web": "http://www.davisbrevard.com", + "followers": 4259 + }, + { + "firstname": "Winnie", + "lastname": "Reich", + "company": "Pacific Seating Co", + "address": "1535 Hawkins Blvd", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79925", + "phone": "915-771-2730", + "fax": "915-771-5729", + "email": "winnie@reich.com", + "web": "http://www.winniereich.com", + "followers": 6621 + }, + { + "firstname": "Trudy", + "lastname": "Worlds", + "company": "Shapiro, Mark R Esq", + "address": "24907 Tibbitts Aven #-b", + "city": "Valencia", + "county": "Los Angeles", + "state": "CA", + "zip": "91355", + "phone": "661-257-3083", + "fax": "661-257-0924", + "email": "trudy@worlds.com", + "web": "http://www.trudyworlds.com", + "followers": 5782 + }, + { + "firstname": "Deshawn", + "lastname": "Inafuku", + "company": "Telair Div Of Teleflex Inc", + "address": "3508 Leopard St", + "city": "Corpus Christi", + "county": "Nueces", + "state": "TX", + "zip": "78408", + "phone": "361-884-8433", + "fax": "361-884-3985", + "email": "deshawn@inafuku.com", + "web": "http://www.deshawninafuku.com", + "followers": 1195 + }, + { + "firstname": "Claudio", + "lastname": "Loose", + "company": "Audiotek Electronics", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-8514", + "fax": "732-442-1775", + "email": "claudio@loose.com", + "web": "http://www.claudioloose.com", + "followers": 6043 + }, + { + "firstname": "Sal", + "lastname": "Pindell", + "company": "Wrigley, Robert I Esq", + "address": "1112 Se 1st St", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47713", + "phone": "812-421-4804", + "fax": "812-421-7625", + "email": "sal@pindell.com", + "web": "http://www.salpindell.com", + "followers": 4359 + }, + { + "firstname": "Cristina", + "lastname": "Sharper", + "company": "Tax Office", + "address": "111 W 40th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10018", + "phone": "212-719-3952", + "fax": "212-719-0754", + "email": "cristina@sharper.com", + "web": "http://www.cristinasharper.com", + "followers": 4823 + }, + { + "firstname": "Betty Jane", + "lastname": "Mccamey", + "company": "Vita Foods Inc", + "address": "100 E Broad St", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-225-0900", + "fax": "614-225-1612", + "email": "cary@mccamey.com", + "web": "http://www.carymccamey.com", + "followers": 8863 + }, + { + "firstname": "Haley", + "lastname": "Rocheford", + "company": "Davis, Robert L Esq", + "address": "6030 Greenwood Plaza Blvd", + "city": "Englewood", + "county": "Arapahoe", + "state": "CO", + "zip": "80111", + "phone": "303-689-7729", + "fax": "303-689-5219", + "email": "haley@rocheford.com", + "web": "http://www.haleyrocheford.com", + "followers": 9872 + }, + { + "firstname": "Dorothea", + "lastname": "Sweem", + "company": "Ehrmann, Rolfe F Esq", + "address": "100 Thanet Circ", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08648", + "phone": "609-896-5871", + "fax": "609-896-2099", + "email": "dorothea@sweem.com", + "web": "http://www.dorotheasweem.com", + "followers": 8897 + }, + { + "firstname": "Fannie", + "lastname": "Steese", + "company": "Chiapete, W Richard Esq", + "address": "926 E Park Ave", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32301", + "phone": "850-222-8103", + "fax": "850-222-0105", + "email": "fannie@steese.com", + "web": "http://www.fanniesteese.com", + "followers": 7140 + }, + { + "firstname": "Allyson", + "lastname": "Gillispie", + "company": "De Friese Theo & Sons", + "address": "1722 White Horse Mercerville R", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08619", + "phone": "609-584-1794", + "fax": "609-584-0645", + "email": "allyson@gillispie.com", + "web": "http://www.allysongillispie.com", + "followers": 1414 + }, + { + "firstname": "Roger", + "lastname": "Seid", + "company": "Yoshida, Gerald C Esq", + "address": "3738 N Monroe St", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32303", + "phone": "850-422-1535", + "fax": "850-422-8152", + "email": "roger@seid.com", + "web": "http://www.rogerseid.com", + "followers": 6661 + }, + { + "firstname": "Dollie", + "lastname": "Daquino", + "company": "Jd Edwards & Co", + "address": "1005 Congress Ave", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-9636", + "fax": "512-478-9874", + "email": "dollie@daquino.com", + "web": "http://www.dolliedaquino.com", + "followers": 5262 + }, + { + "firstname": "Eva", + "lastname": "Seahorn", + "company": "Saunders Appraisal Inc", + "address": "3 Northern Blvd", + "city": "Amherst", + "county": "Hillsborough", + "state": "NH", + "zip": "03031", + "phone": "603-673-6072", + "fax": "603-673-5009", + "email": "eva@seahorn.com", + "web": "http://www.evaseahorn.com", + "followers": 9192 + }, + { + "firstname": "Mamie", + "lastname": "Mcintee", + "company": "Jacobs, Brian Realtor", + "address": "2810 Jacobs Ave", + "city": "Eureka", + "county": "Humboldt", + "state": "CA", + "zip": "95501", + "phone": "707-443-0621", + "fax": "707-443-9147", + "email": "mamie@mcintee.com", + "web": "http://www.mamiemcintee.com", + "followers": 6954 + }, + { + "firstname": "Lyndon", + "lastname": "Bellerdine", + "company": "A B C Lock & Key", + "address": "200 California St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94111", + "phone": "415-705-1956", + "fax": "415-705-2887", + "email": "lyndon@bellerdine.com", + "web": "http://www.lyndonbellerdine.com", + "followers": 146 + }, + { + "firstname": "Lashonda", + "lastname": "Derouen", + "company": "Travel Agent Magazine", + "address": "101 Royal St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-3394", + "fax": "703-684-0307", + "email": "lashonda@derouen.com", + "web": "http://www.lashondaderouen.com", + "followers": 3792 + }, + { + "firstname": "Jacklyn", + "lastname": "Emayo", + "company": "Super 8 Motel Temple", + "address": "101 Us Highway 46", + "city": "Fairfield", + "county": "Essex", + "state": "NJ", + "zip": "07004", + "phone": "973-882-3960", + "fax": "973-882-1908", + "email": "jacklyn@emayo.com", + "web": "http://www.jacklynemayo.com", + "followers": 4575 + }, + { + "firstname": "Rubin", + "lastname": "Crotts", + "company": "Misko, Charles G Esq", + "address": "303 N Indian Canyon Dr", + "city": "Palm Springs", + "county": "Riverside", + "state": "CA", + "zip": "92262", + "phone": "760-327-0337", + "fax": "760-327-0929", + "email": "rubin@crotts.com", + "web": "http://www.rubincrotts.com", + "followers": 4736 + }, + { + "firstname": "Boris", + "lastname": "Catino", + "company": "Dream Homes Usa Inc", + "address": "645 Church St", + "city": "Norfolk", + "county": "Norfolk City", + "state": "VA", + "zip": "23510", + "phone": "757-627-8408", + "fax": "757-627-6195", + "email": "boris@catino.com", + "web": "http://www.boriscatino.com", + "followers": 2330 + }, + { + "firstname": "Jannie", + "lastname": "Bowditch", + "company": "Lindsays Landing Rv Pk & Mrna", + "address": "1102 Main St", + "city": "Grandview", + "county": "Jackson", + "state": "MO", + "zip": "64030", + "phone": "816-765-0961", + "fax": "816-765-3469", + "email": "jannie@bowditch.com", + "web": "http://www.janniebowditch.com", + "followers": 7302 + }, + { + "firstname": "Colin", + "lastname": "Altonen", + "company": "Smith, Arthur D Esq", + "address": "1201 18th St", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80202", + "phone": "303-292-5477", + "fax": "303-292-4239", + "email": "colin@altonen.com", + "web": "http://www.colinaltonen.com", + "followers": 2587 + }, + { + "firstname": "Kerry", + "lastname": "Evertt", + "company": "Washington Crossing Inn", + "address": "337 S North Lake Blvd", + "city": "Altamonte Springs", + "county": "Seminole", + "state": "FL", + "zip": "32701", + "phone": "407-332-9851", + "fax": "407-332-1718", + "email": "kerry@evertt.com", + "web": "http://www.kerryevertt.com", + "followers": 6663 + }, + { + "firstname": "Kathie", + "lastname": "Argenti", + "company": "Microtel Franchise & Dev Corp", + "address": "410 Front St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-9253", + "fax": "218-828-1401", + "email": "kathie@argenti.com", + "web": "http://www.kathieargenti.com", + "followers": 6260 + }, + { + "firstname": "Henrietta", + "lastname": "Cintora", + "company": "Keyes, Judith Droz Esq", + "address": "1063 Fm Wzzw", + "city": "Milton", + "county": "Cabell", + "state": "WV", + "zip": "25541", + "phone": "304-743-5440", + "fax": "304-743-7475", + "email": "henrietta@cintora.com", + "web": "http://www.henriettacintora.com", + "followers": 9622 + }, + { + "firstname": "Mariano", + "lastname": "Maury", + "company": "Donut & Sandwich Shop", + "address": "1092 Saint Georges Ave", + "city": "Rahway", + "county": "Union", + "state": "NJ", + "zip": "07065", + "phone": "732-388-1579", + "fax": "732-388-9355", + "email": "mariano@maury.com", + "web": "http://www.marianomaury.com", + "followers": 6254 + }, + { + "firstname": "Lottie", + "lastname": "Voll", + "company": "Mason, Joseph G Esq", + "address": "55 E 10th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97401", + "phone": "541-342-7282", + "fax": "541-342-4657", + "email": "lottie@voll.com", + "web": "http://www.lottievoll.com", + "followers": 1092 + }, + { + "firstname": "Ofelia", + "lastname": "Sheffler", + "company": "Rimpsey Agency", + "address": "628 Pacific Ave", + "city": "Oxnard", + "county": "Ventura", + "state": "CA", + "zip": "93030", + "phone": "805-483-7161", + "fax": "805-483-5693", + "email": "ofelia@sheffler.com", + "web": "http://www.ofeliasheffler.com", + "followers": 1096 + }, + { + "firstname": "Gaston", + "lastname": "Cieloszyk", + "company": "Center For Hope Hospice Inc", + "address": "1160 Wccs", + "city": "Homer City", + "county": "Indiana", + "state": "PA", + "zip": "15748", + "phone": "724-479-0355", + "fax": "724-479-7077", + "email": "gaston@cieloszyk.com", + "web": "http://www.gastoncieloszyk.com", + "followers": 7409 + }, + { + "firstname": "Karla", + "lastname": "Ken", + "company": "Nicollet Process Engineering", + "address": "2135 11th St", + "city": "Rockford", + "county": "Winnebago", + "state": "IL", + "zip": "61104", + "phone": "815-968-0369", + "fax": "815-968-7904", + "email": "karla@ken.com", + "web": "http://www.karlaken.com", + "followers": 1296 + }, + { + "firstname": "Avery", + "lastname": "Parbol", + "company": "Schlackman, William H", + "address": "22343 Se Stark St", + "city": "Gresham", + "county": "Multnomah", + "state": "OR", + "zip": "97030", + "phone": "503-666-1948", + "fax": "503-666-9241", + "email": "avery@parbol.com", + "web": "http://www.averyparbol.com", + "followers": 3515 + }, + { + "firstname": "John", + "lastname": "Chipley", + "company": "Manpower Temporary Services", + "address": "2 Horizon Rd #-2", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-224-7741", + "fax": "201-224-7282", + "email": "john@chipley.com", + "web": "http://www.johnchipley.com", + "followers": 7710 + }, + { + "firstname": "Luella", + "lastname": "Pliner", + "company": "United Waste Systems", + "address": "3437 N 12th Ave", + "city": "Pensacola", + "county": "Escambia", + "state": "FL", + "zip": "32503", + "phone": "850-434-2521", + "fax": "850-434-5228", + "email": "luella@pliner.com", + "web": "http://www.luellapliner.com", + "followers": 5191 + }, + { + "firstname": "Elvira", + "lastname": "Blumenthal", + "company": "Stell Mortgage Investors", + "address": "108 Washington St", + "city": "Keokuk", + "county": "Lee", + "state": "IA", + "zip": "52632", + "phone": "319-524-6237", + "fax": "319-524-9435", + "email": "elvira@blumenthal.com", + "web": "http://www.elvirablumenthal.com", + "followers": 6616 + }, + { + "firstname": "Tyree", + "lastname": "Courrege", + "company": "Stitch Craft", + "address": "13201 Northwest Fwy", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77040", + "phone": "713-690-9216", + "fax": "713-690-4043", + "email": "tyree@courrege.com", + "web": "http://www.tyreecourrege.com", + "followers": 5210 + }, + { + "firstname": "Ramon", + "lastname": "Amaral", + "company": "Air Academy Federal Credit Un", + "address": "700 W Highway 287", + "city": "Lander", + "county": "Fremont", + "state": "WY", + "zip": "82520", + "phone": "307-332-2667", + "fax": "307-332-3893", + "email": "ramon@amaral.com", + "web": "http://www.ramonamaral.com", + "followers": 8659 + }, + { + "firstname": "Wilfredo", + "lastname": "Gidley", + "company": "Mclaughlin, John F Esq", + "address": "2255 Kuhio Ave #-1203", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-924-2610", + "fax": "808-924-7666", + "email": "wilfredo@gidley.com", + "web": "http://www.wilfredogidley.com", + "followers": 8860 + }, + { + "firstname": "Gracie", + "lastname": "Ehn", + "company": "P C Systems", + "address": "Kahala", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96816", + "phone": "808-247-8624", + "fax": "808-247-7982", + "email": "gracie@ehn.com", + "web": "http://www.gracieehn.com", + "followers": 2870 + }, + { + "firstname": "Dorthy", + "lastname": "Alexy", + "company": "Frank Siviglia & Co", + "address": "Pearlridge", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-247-4421", + "fax": "808-247-7192", + "email": "dorthy@alexy.com", + "web": "http://www.dorthyalexy.com", + "followers": 1029 + }, + { + "firstname": "Bertie", + "lastname": "Luby", + "company": "Puckett, Dennis L Esq", + "address": "Windward", + "city": "Kaneohe", + "county": "Honolulu", + "state": "HI", + "zip": "96744", + "phone": "808-247-8062", + "fax": "808-247-2529", + "email": "bertie@luby.com", + "web": "http://www.bertieluby.com", + "followers": 2660 + }, + { + "firstname": "Rudy", + "lastname": "Kuhle", + "company": "General Insurcorp Inc", + "address": "1418 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-628-9987", + "fax": "212-628-1234", + "email": "rudy@kuhle.com", + "web": "http://www.rudykuhle.com", + "followers": 7201 + }, + { + "firstname": "Gale", + "lastname": "Nolau", + "company": "Lust, C James Esq", + "address": "104 N Aurora St", + "city": "Ithaca", + "county": "Tompkins", + "state": "NY", + "zip": "14850", + "phone": "607-277-1567", + "fax": "607-277-6524", + "email": "gale@nolau.com", + "web": "http://www.galenolau.com", + "followers": 6842 + }, + { + "firstname": "Kenya", + "lastname": "Bruni", + "company": "Hurley, Thomas J Jr", + "address": "280 N Midland Ave", + "city": "Saddle Brook", + "county": "Bergen", + "state": "NJ", + "zip": "07663", + "phone": "201-646-9077", + "fax": "201-646-8526", + "email": "kenya@bruni.com", + "web": "http://www.kenyabruni.com", + "followers": 9368 + }, + { + "firstname": "Tricia", + "lastname": "Kruss", + "company": "Edwards, Elwood L", + "address": "4685 Ne 14th St", + "city": "Des Moines", + "county": "Polk", + "state": "IA", + "zip": "50313", + "phone": "515-262-3267", + "fax": "515-262-6264", + "email": "tricia@kruss.com", + "web": "http://www.triciakruss.com", + "followers": 9671 + }, + { + "firstname": "Mack", + "lastname": "Jurasin", + "company": "Sherman, Michael D Esq", + "address": "1180 Dora Hwy", + "city": "Pulaski", + "county": "Pulaski", + "state": "VA", + "zip": "24301", + "phone": "540-980-4958", + "fax": "540-980-2978", + "email": "mack@jurasin.com", + "web": "http://www.mackjurasin.com", + "followers": 4557 + }, + { + "firstname": "Margarito", + "lastname": "Kornbau", + "company": "Acker Knitting Mills Inc", + "address": "303 W 15th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-0371", + "fax": "512-478-4449", + "email": "margarito@kornbau.com", + "web": "http://www.margaritokornbau.com", + "followers": 2072 + }, + { + "firstname": "Lucien", + "lastname": "Iurato", + "company": "Anderson Consulting", + "address": "3918 16th Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11218", + "phone": "718-871-7952", + "fax": "718-871-3483", + "email": "lucien@iurato.com", + "web": "http://www.lucieniurato.com", + "followers": 9434 + }, + { + "firstname": "Jarvis", + "lastname": "Galas", + "company": "Younghans & Burke", + "address": "307 E President St", + "city": "Savannah", + "county": "Chatham", + "state": "GA", + "zip": "31401", + "phone": "912-236-8524", + "fax": "912-236-8705", + "email": "jarvis@galas.com", + "web": "http://www.jarvisgalas.com", + "followers": 2359 + }, + { + "firstname": "Billie", + "lastname": "Cowley", + "company": "Spears, Robert M Esq", + "address": "1700 Street Rd", + "city": "Warrington", + "county": "Bucks", + "state": "PA", + "zip": "18976", + "phone": "215-548-0842", + "fax": "215-548-4706", + "email": "billie@cowley.com", + "web": "http://www.billiecowley.com", + "followers": 2416 + }, + { + "firstname": "Jacinto", + "lastname": "Gawron", + "company": "Matt Kokkonen Insurance Agency", + "address": "1740 House", + "city": "Lumberville", + "county": "Bucks", + "state": "PA", + "zip": "18933", + "phone": "215-297-0120", + "fax": "215-297-5442", + "email": "jacinto@gawron.com", + "web": "http://www.jacintogawron.com", + "followers": 310 + }, + { + "firstname": "Randall", + "lastname": "Kluemper", + "company": "Lifestyles Organization", + "address": "Rt 16", + "city": "North Conway", + "county": "Carroll", + "state": "NH", + "zip": "03860", + "phone": "603-356-3217", + "fax": "603-356-6174", + "email": "randall@kluemper.com", + "web": "http://www.randallkluemper.com", + "followers": 5669 + }, + { + "firstname": "Enrique", + "lastname": "Oroark", + "company": "Callaghan, Kathleen M Esq", + "address": "34 W 17th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10011", + "phone": "212-366-5568", + "fax": "212-366-6877", + "email": "enrique@oroark.com", + "web": "http://www.enriqueoroark.com", + "followers": 3911 + }, + { + "firstname": "Alva", + "lastname": "Pennigton", + "company": "Citizens Savings Bank", + "address": "1275 County Road 210 W", + "city": "Jacksonville", + "county": "Saint Johns", + "state": "FL", + "zip": "32259", + "phone": "904-260-2345", + "fax": "904-260-3735", + "email": "alva@pennigton.com", + "web": "http://www.alvapennigton.com", + "followers": 7564 + }, + { + "firstname": "Socorro", + "lastname": "Balandran", + "company": "Mooring", + "address": "401 S Main St", + "city": "Greensburg", + "county": "Westmoreland", + "state": "PA", + "zip": "15601", + "phone": "724-834-6908", + "fax": "724-834-8831", + "email": "socorro@balandran.com", + "web": "http://www.socorrobalandran.com", + "followers": 7056 + }, + { + "firstname": "Nadia", + "lastname": "Wilshire", + "company": "Midwest Undercar Distributors", + "address": "1801 West Ave S", + "city": "La Crosse", + "county": "La Crosse", + "state": "WI", + "zip": "54601", + "phone": "608-788-4965", + "fax": "608-788-5946", + "email": "nadia@wilshire.com", + "web": "http://www.nadiawilshire.com", + "followers": 9311 + }, + { + "firstname": "Reginald", + "lastname": "Humes", + "company": "Cowley & Chidester", + "address": "44 N Main St", + "city": "Wolfeboro", + "county": "Carroll", + "state": "NH", + "zip": "03894", + "phone": "603-569-7730", + "fax": "603-569-8142", + "email": "reginald@humes.com", + "web": "http://www.reginaldhumes.com", + "followers": 8347 + }, + { + "firstname": "Lynda", + "lastname": "Caraway", + "company": "Lowe Art Museum", + "address": "1822 Spring Garden St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19130", + "phone": "215-564-3171", + "fax": "215-564-2241", + "email": "lynda@caraway.com", + "web": "http://www.lyndacaraway.com", + "followers": 3853 + }, + { + "firstname": "Saundra", + "lastname": "Mcaulay", + "company": "Rcf Inc", + "address": "2401 Cleveland Rd W", + "city": "Huron", + "county": "Erie", + "state": "OH", + "zip": "44839", + "phone": "419-433-5558", + "fax": "419-433-9756", + "email": "saundra@mcaulay.com", + "web": "http://www.saundramcaulay.com", + "followers": 1620 + }, + { + "firstname": "Allan", + "lastname": "Schwantd", + "company": "Micro Wire Products", + "address": "406 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-9666", + "fax": "503-434-3863", + "email": "allan@schwantd.com", + "web": "http://www.allanschwantd.com", + "followers": 6069 + }, + { + "firstname": "Wilmer", + "lastname": "Constantineau", + "company": "Nutra Source", + "address": "1745 W 18th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-345-4729", + "fax": "541-345-4884", + "email": "wilmer@constantineau.com", + "web": "http://www.wilmerconstantineau.com", + "followers": 1648 + }, + { + "firstname": "Savannah", + "lastname": "Kesich", + "company": "Wbnd Am", + "address": "221 Main", + "city": "Park City", + "county": "Summit", + "state": "UT", + "zip": "84060", + "phone": "435-645-0986", + "fax": "435-645-9504", + "email": "savannah@kesich.com", + "web": "http://www.savannahkesich.com", + "followers": 7325 + }, + { + "firstname": "Dwain", + "lastname": "Cuttitta", + "company": "Kintech Stamping Inc", + "address": "1919 Connecticut Ave Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20009", + "phone": "202-265-7854", + "fax": "202-265-9475", + "email": "dwain@cuttitta.com", + "web": "http://www.dwaincuttitta.com", + "followers": 8300 + }, + { + "firstname": "Krystle", + "lastname": "Stika", + "company": "Signature Inn", + "address": "3730 Fm", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77068", + "phone": "281-537-5324", + "fax": "281-537-3235", + "email": "krystle@stika.com", + "web": "http://www.krystlestika.com", + "followers": 2603 + }, + { + "firstname": "Felipe", + "lastname": "Gould", + "company": "Black, Ronald H", + "address": "2308 Bienville Blvd", + "city": "Ocean Springs", + "county": "Jackson", + "state": "MS", + "zip": "39564", + "phone": "228-875-2811", + "fax": "228-875-6402", + "email": "felipe@gould.com", + "web": "http://www.felipegould.com", + "followers": 9237 + }, + { + "firstname": "Steve", + "lastname": "Schorr", + "company": "Midwest Fire Protection Inc", + "address": "1810 N King St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-842-7045", + "fax": "808-842-7338", + "email": "steve@schorr.com", + "web": "http://www.steveschorr.com", + "followers": 1468 + }, + { + "firstname": "Naomi", + "lastname": "Caetano", + "company": "Bashlin Industries Inc", + "address": "50 Spring St #-1", + "city": "Cresskill", + "county": "Bergen", + "state": "NJ", + "zip": "07626", + "phone": "201-569-3572", + "fax": "201-569-5795", + "email": "naomi@caetano.com", + "web": "http://www.naomicaetano.com", + "followers": 1743 + }, + { + "firstname": "Melody", + "lastname": "Saddat", + "company": "Richards, Edward W Esq", + "address": "3540 S 84th St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68124", + "phone": "402-397-0581", + "fax": "402-397-8391", + "email": "melody@saddat.com", + "web": "http://www.melodysaddat.com", + "followers": 2442 + }, + { + "firstname": "Mitchel", + "lastname": "Harnar", + "company": "Copycat Quick Print", + "address": "1810 Pioneer Ave", + "city": "Cheyenne", + "county": "Laramie", + "state": "WY", + "zip": "82001", + "phone": "307-632-0256", + "fax": "307-632-2516", + "email": "mitchel@harnar.com", + "web": "http://www.mitchelharnar.com", + "followers": 4662 + }, + { + "firstname": "Sharlene", + "lastname": "Circelli", + "company": "Calibron Systems Inc", + "address": "4018 W Clearwater Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-783-5167", + "fax": "509-783-7346", + "email": "sharlene@circelli.com", + "web": "http://www.sharlenecircelli.com", + "followers": 6539 + }, + { + "firstname": "Sean", + "lastname": "Bonnet", + "company": "Corporate Alternatives Inc", + "address": "3043 Ridge Rd", + "city": "Lansing", + "county": "Cook", + "state": "IL", + "zip": "60438", + "phone": "708-474-4766", + "fax": "708-474-0011", + "email": "sean@bonnet.com", + "web": "http://www.seanbonnet.com", + "followers": 867 + }, + { + "firstname": "Travis", + "lastname": "Brockert", + "company": "Santa Cruz Title Co", + "address": "7828 N 19th Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85021", + "phone": "602-995-1362", + "fax": "602-995-0966", + "email": "travis@brockert.com", + "web": "http://www.travisbrockert.com", + "followers": 7421 + }, + { + "firstname": "Candice", + "lastname": "Bruckman", + "company": "Fernando Foods Inc", + "address": "611 1st Ave N", + "city": "Humboldt", + "county": "Humboldt", + "state": "IA", + "zip": "50548", + "phone": "515-332-0809", + "fax": "515-332-9083", + "email": "candice@bruckman.com", + "web": "http://www.candicebruckman.com", + "followers": 7084 + }, + { + "firstname": "Mabel", + "lastname": "Weeden", + "company": "Pepsi Cola Gen Bottlers Inc", + "address": "300 E Phillips St", + "city": "Richardson", + "county": "Dallas", + "state": "TX", + "zip": "75081", + "phone": "972-235-5619", + "fax": "972-235-1843", + "email": "mabel@weeden.com", + "web": "http://www.mabelweeden.com", + "followers": 2674 + }, + { + "firstname": "Armando", + "lastname": "Papik", + "company": "Cryogenic Society Of America", + "address": "615 W Markham St", + "city": "Little Rock", + "county": "Pulaski", + "state": "AR", + "zip": "72201", + "phone": "501-376-4154", + "fax": "501-376-0608", + "email": "armando@papik.com", + "web": "http://www.armandopapik.com", + "followers": 7152 + }, + { + "firstname": "Kevin", + "lastname": "Edd", + "company": "Peebles, William J Esq", + "address": "64 Dyerville Ave", + "city": "Johnston", + "county": "Providence", + "state": "RI", + "zip": "02919", + "phone": "401-453-8514", + "fax": "401-453-7085", + "email": "kevin@edd.com", + "web": "http://www.kevinedd.com", + "followers": 3568 + }, + { + "firstname": "Raphael", + "lastname": "Bickel", + "company": "S Shamash & Sons Inc", + "address": "550 N Brand Blvd #-800", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-246-1195", + "fax": "818-246-4734", + "email": "raphael@bickel.com", + "web": "http://www.raphaelbickel.com", + "followers": 1365 + }, + { + "firstname": "Darren", + "lastname": "Merlin", + "company": "Pozzuolo & Perkiss Pc", + "address": "550 N Edward St", + "city": "Decatur", + "county": "Macon", + "state": "IL", + "zip": "62522", + "phone": "217-428-0453", + "fax": "217-428-1491", + "email": "darren@merlin.com", + "web": "http://www.darrenmerlin.com", + "followers": 7653 + }, + { + "firstname": "Francis", + "lastname": "Soo", + "company": "Allen Industrial Supply", + "address": "218 W Main St", + "city": "Sparta", + "county": "Monroe", + "state": "WI", + "zip": "54656", + "phone": "608-269-7306", + "fax": "608-269-3359", + "email": "francis@soo.com", + "web": "http://www.francissoo.com", + "followers": 2482 + }, + { + "firstname": "Nelly", + "lastname": "Jakuboski", + "company": "Hammerman, Stanley M Esq", + "address": "103 Main St", + "city": "Ridgefield", + "county": "Fairfield", + "state": "CT", + "zip": "06877", + "phone": "203-438-9250", + "fax": "203-438-5109", + "email": "nelly@jakuboski.com", + "web": "http://www.nellyjakuboski.com", + "followers": 5338 + }, + { + "firstname": "Mitzi", + "lastname": "Ihenyen", + "company": "Helm, Norman O", + "address": "979 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-838-8303", + "fax": "212-838-3221", + "email": "mitzi@ihenyen.com", + "web": "http://www.mitziihenyen.com", + "followers": 9264 + }, + { + "firstname": "Kathleen", + "lastname": "Beresnyak", + "company": "R & E Associates", + "address": "100 W 25th Ave", + "city": "San Mateo", + "county": "San Mateo", + "state": "CA", + "zip": "94403", + "phone": "650-349-6809", + "fax": "650-349-5962", + "email": "kathleen@beresnyak.com", + "web": "http://www.kathleenberesnyak.com", + "followers": 2853 + }, + { + "firstname": "Adela", + "lastname": "Cervantsz", + "company": "Arizona Awards", + "address": "102 5th St N", + "city": "Clanton", + "county": "Chilton", + "state": "AL", + "zip": "35045", + "phone": "205-755-4137", + "fax": "205-755-1034", + "email": "adela@cervantsz.com", + "web": "http://www.adelacervantsz.com", + "followers": 9876 + }, + { + "firstname": "Randal", + "lastname": "Gansen", + "company": "Quik Print", + "address": "1 First Federal Plz", + "city": "Rochester", + "county": "Monroe", + "state": "NY", + "zip": "14614", + "phone": "585-238-8558", + "fax": "585-238-7764", + "email": "randal@gansen.com", + "web": "http://www.randalgansen.com", + "followers": 4019 + }, + { + "firstname": "Alyssa", + "lastname": "Biasotti", + "company": "Johnson Hardware Co", + "address": "22 James St", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-1878", + "fax": "845-343-5354", + "email": "alyssa@biasotti.com", + "web": "http://www.alyssabiasotti.com", + "followers": 3684 + }, + { + "firstname": "Janet", + "lastname": "Schaffter", + "company": "Hall, Camden M Esq", + "address": "131 Rimbach St", + "city": "Hammond", + "county": "Lake", + "state": "IN", + "zip": "46320", + "phone": "219-853-9283", + "fax": "219-853-9329", + "email": "janet@schaffter.com", + "web": "http://www.janetschaffter.com", + "followers": 2431 + }, + { + "firstname": "Armando", + "lastname": "Kolm", + "company": "Cooper & Cooper Cpas", + "address": "201 N Main St", + "city": "Anderson", + "county": "Anderson", + "state": "SC", + "zip": "29621", + "phone": "864-260-3642", + "fax": "864-260-9205", + "email": "armando@kolm.com", + "web": "http://www.armandokolm.com", + "followers": 4357 + }, + { + "firstname": "Gil", + "lastname": "Scarpa", + "company": "Hughes, James D Esq", + "address": "12 E Broad St", + "city": "Hazleton", + "county": "Luzerne", + "state": "PA", + "zip": "18201", + "phone": "570-459-9281", + "fax": "570-459-5191", + "email": "gil@scarpa.com", + "web": "http://www.gilscarpa.com", + "followers": 7691 + }, + { + "firstname": "Vanessa", + "lastname": "Lewallen", + "company": "Fargo Glass & Paint Co", + "address": "5 E Main", + "city": "Centerburg", + "county": "Knox", + "state": "OH", + "zip": "43011", + "phone": "740-625-8098", + "fax": "740-625-1696", + "email": "vanessa@lewallen.com", + "web": "http://www.vanessalewallen.com", + "followers": 2710 + }, + { + "firstname": "Burton", + "lastname": "Brining", + "company": "Corcoran Machine Company", + "address": "135 E Liberty St", + "city": "Wooster", + "county": "Wayne", + "state": "OH", + "zip": "44691", + "phone": "330-262-5481", + "fax": "330-262-7555", + "email": "burton@brining.com", + "web": "http://www.burtonbrining.com", + "followers": 8158 + }, + { + "firstname": "Rosalie", + "lastname": "Krigger", + "company": "Aaron, William Esq", + "address": "330 Route 211 E", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-2313", + "fax": "845-343-2979", + "email": "rosalie@krigger.com", + "web": "http://www.rosaliekrigger.com", + "followers": 1411 + }, + { + "firstname": "Tammie", + "lastname": "Schwartzwalde", + "company": "Cox, Thomas E", + "address": "415 Center St", + "city": "Ironton", + "county": "Lawrence", + "state": "OH", + "zip": "45638", + "phone": "740-532-5488", + "fax": "740-532-0319", + "email": "tammie@schwartzwalde.com", + "web": "http://www.tammieschwartzwalde.com", + "followers": 1367 + }, + { + "firstname": "Darrin", + "lastname": "Neiss", + "company": "Delaney, James J Jr", + "address": "101 W Central Blvd", + "city": "Kewanee", + "county": "Henry", + "state": "IL", + "zip": "61443", + "phone": "309-852-5127", + "fax": "309-852-8638", + "email": "darrin@neiss.com", + "web": "http://www.darrinneiss.com", + "followers": 5748 + }, + { + "firstname": "Rosalia", + "lastname": "Kennemur", + "company": "Reagan, Thomas J Esq", + "address": "222 S 10th St", + "city": "Oakdale", + "county": "Allen", + "state": "LA", + "zip": "71463", + "phone": "318-335-5586", + "fax": "318-335-1873", + "email": "rosalia@kennemur.com", + "web": "http://www.rosaliakennemur.com", + "followers": 5984 + }, + { + "firstname": "Callie", + "lastname": "Leboeuf", + "company": "Town Motors", + "address": "100 S 2nd Ave", + "city": "Alpena", + "county": "Alpena", + "state": "MI", + "zip": "49707", + "phone": "989-354-3344", + "fax": "989-354-3712", + "email": "callie@leboeuf.com", + "web": "http://www.callieleboeuf.com", + "followers": 3607 + }, + { + "firstname": "Patty", + "lastname": "Bernasconi", + "company": "Porter Wright Morris & Arthur", + "address": "851 Fort Street Mall", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-531-2621", + "fax": "808-531-6234", + "email": "patty@bernasconi.com", + "web": "http://www.pattybernasconi.com", + "followers": 3012 + }, + { + "firstname": "Elmo", + "lastname": "Gabouer", + "company": "Conduit & Foundation Corp", + "address": "275 W Bridge St", + "city": "New Hope", + "county": "Bucks", + "state": "PA", + "zip": "18938", + "phone": "215-862-6538", + "fax": "215-862-7006", + "email": "elmo@gabouer.com", + "web": "http://www.elmogabouer.com", + "followers": 9593 + }, + { + "firstname": "Logan", + "lastname": "Muhl", + "company": "Brown, Phillip C Esq", + "address": "126 S Bellevue Ave", + "city": "Langhorne", + "county": "Bucks", + "state": "PA", + "zip": "19047", + "phone": "215-757-6124", + "fax": "215-757-2785", + "email": "logan@muhl.com", + "web": "http://www.loganmuhl.com", + "followers": 741 + }, + { + "firstname": "Vivian", + "lastname": "Brzostowski", + "company": "Savage, Philip M Iii", + "address": "118 Mill St", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-2791", + "fax": "215-788-3902", + "email": "vivian@brzostowski.com", + "web": "http://www.vivianbrzostowski.com", + "followers": 1121 + }, + { + "firstname": "Efren", + "lastname": "Baucher", + "company": "R O Binson Inc", + "address": "Rts 232 & 413", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-598-4644", + "fax": "215-598-5929", + "email": "efren@baucher.com", + "web": "http://www.efrenbaucher.com", + "followers": 8199 + }, + { + "firstname": "Kurtis", + "lastname": "Mcbay", + "company": "P C Enterprises Ltd", + "address": "737 Levittown Ctr", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19055", + "phone": "215-946-6048", + "fax": "215-946-6458", + "email": "kurtis@mcbay.com", + "web": "http://www.kurtismcbay.com", + "followers": 8315 + }, + { + "firstname": "Guillermo", + "lastname": "Tsang", + "company": "Gillis, Donald W Esq", + "address": "16 Highland Park Way", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19056", + "phone": "215-949-7912", + "fax": "215-949-8919", + "email": "guillermo@tsang.com", + "web": "http://www.guillermotsang.com", + "followers": 4007 + }, + { + "firstname": "Milton", + "lastname": "Kuhlman", + "company": "Imag Corp", + "address": "237 Jackson St Sw", + "city": "Camden", + "county": "Ouachita", + "state": "AR", + "zip": "71701", + "phone": "870-836-9021", + "fax": "870-836-2283", + "email": "milton@kuhlman.com", + "web": "http://www.miltonkuhlman.com", + "followers": 7034 + }, + { + "firstname": "Naomi", + "lastname": "Greenly", + "company": "Kpmg Peat Marwick Llp", + "address": "1400 Gault Ave N", + "city": "Fort Payne", + "county": "De Kalb", + "state": "AL", + "zip": "35967", + "phone": "256-845-1216", + "fax": "256-845-2469", + "email": "naomi@greenly.com", + "web": "http://www.naomigreenly.com", + "followers": 916 + }, + { + "firstname": "Mary", + "lastname": "Maurizio", + "company": "Carey Filter White & Boland", + "address": "404 Main St", + "city": "Delta", + "county": "Fulton", + "state": "OH", + "zip": "43515", + "phone": "419-822-7176", + "fax": "419-822-0591", + "email": "mary@maurizio.com", + "web": "http://www.marymaurizio.com", + "followers": 6083 + }, + { + "firstname": "Caitlin", + "lastname": "Reiniger", + "company": "White, Lawrence R Esq", + "address": "140 N Columbus St", + "city": "Galion", + "county": "Crawford", + "state": "OH", + "zip": "44833", + "phone": "419-468-6910", + "fax": "419-468-9018", + "email": "caitlin@reiniger.com", + "web": "http://www.caitlinreiniger.com", + "followers": 641 + }, + { + "firstname": "Coleman", + "lastname": "Cuneo", + "company": "M & M Mars", + "address": "25 E High St", + "city": "Waynesburg", + "county": "Greene", + "state": "PA", + "zip": "15370", + "phone": "724-627-4378", + "fax": "724-627-2305", + "email": "coleman@cuneo.com", + "web": "http://www.colemancuneo.com", + "followers": 8657 + }, + { + "firstname": "Rachel", + "lastname": "Larrison", + "company": "Ipa The Editing House", + "address": "3721 Oberlin Ave", + "city": "Lorain", + "county": "Lorain", + "state": "OH", + "zip": "44053", + "phone": "440-282-3729", + "fax": "440-282-6918", + "email": "rachel@larrison.com", + "web": "http://www.rachellarrison.com", + "followers": 4562 + }, + { + "firstname": "Dwayne", + "lastname": "Maddalena", + "company": "Ebbeson, James O Esq", + "address": "532 Court St", + "city": "Pekin", + "county": "Tazewell", + "state": "IL", + "zip": "61554", + "phone": "309-347-1137", + "fax": "309-347-9282", + "email": "dwayne@maddalena.com", + "web": "http://www.dwaynemaddalena.com", + "followers": 7384 + }, + { + "firstname": "Angelique", + "lastname": "Schermerhorn", + "company": "Safety Direct Inc", + "address": "511 Saint Johns Ave", + "city": "Palatka", + "county": "Putnam", + "state": "FL", + "zip": "32177", + "phone": "386-328-7869", + "fax": "386-328-1499", + "email": "angelique@schermerhorn.com", + "web": "http://www.angeliqueschermerhorn.com", + "followers": 6181 + }, + { + "firstname": "Junior", + "lastname": "Wadlinger", + "company": "Sonos Music", + "address": "185 E Market St", + "city": "Warren", + "county": "Trumbull", + "state": "OH", + "zip": "44481", + "phone": "330-393-9794", + "fax": "330-393-6808", + "email": "junior@wadlinger.com", + "web": "http://www.juniorwadlinger.com", + "followers": 7690 + }, + { + "firstname": "Darrel", + "lastname": "Tork", + "company": "S & T Machining", + "address": "2121 S Mannheim Rd", + "city": "Westchester", + "county": "Cook", + "state": "IL", + "zip": "60154", + "phone": "708-865-8091", + "fax": "708-865-8984", + "email": "darrel@tork.com", + "web": "http://www.darreltork.com", + "followers": 9708 + }, + { + "firstname": "Lana", + "lastname": "Garrigus", + "company": "Russell Builders & Hardware", + "address": "118 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-2642", + "fax": "503-434-8121", + "email": "lana@garrigus.com", + "web": "http://www.lanagarrigus.com", + "followers": 3048 + }, + { + "firstname": "Jonathon", + "lastname": "Waldall", + "company": "Mission Hills Escrow", + "address": "300 Hampton St", + "city": "Walterboro", + "county": "Colleton", + "state": "SC", + "zip": "29488", + "phone": "843-549-9461", + "fax": "843-549-0125", + "email": "jonathon@waldall.com", + "web": "http://www.jonathonwaldall.com", + "followers": 8039 + }, + { + "firstname": "Kristine", + "lastname": "Paker", + "company": "Chagrin Valley Massotherapy", + "address": "301 N Pine St", + "city": "Creston", + "county": "Union", + "state": "IA", + "zip": "50801", + "phone": "641-782-7169", + "fax": "641-782-7962", + "email": "kristine@paker.com", + "web": "http://www.kristinepaker.com", + "followers": 7977 + }, + { + "firstname": "Dwain", + "lastname": "Agricola", + "company": "Beatty Satchell Everngam & Co", + "address": "211 N Main St", + "city": "Leitchfield", + "county": "Grayson", + "state": "KY", + "zip": "42754", + "phone": "270-259-5194", + "fax": "270-259-0821", + "email": "dwain@agricola.com", + "web": "http://www.dwainagricola.com", + "followers": 8410 + }, + { + "firstname": "Jewel", + "lastname": "Agresta", + "company": "Md Assn Cert Pub Accts Inc", + "address": "4565 Harrison St", + "city": "Hillside", + "county": "Cook", + "state": "IL", + "zip": "60162", + "phone": "708-449-7139", + "fax": "708-449-2963", + "email": "jewel@agresta.com", + "web": "http://www.jewelagresta.com", + "followers": 293 + }, + { + "firstname": "Georgette", + "lastname": "Bandyk", + "company": "Specialty Alumn Castings Inc", + "address": "1965 Wakefield Ave", + "city": "Petersburg", + "county": "Petersburg City", + "state": "VA", + "zip": "23805", + "phone": "804-796-2746", + "fax": "804-796-5351", + "email": "georgette@bandyk.com", + "web": "http://www.georgettebandyk.com", + "followers": 9865 + }, + { + "firstname": "Geri", + "lastname": "Forness", + "company": "Quality Dynamics Group", + "address": "Capitol Ave", + "city": "Corydon", + "county": "Harrison", + "state": "IN", + "zip": "47112", + "phone": "812-738-9416", + "fax": "812-738-4816", + "email": "geri@forness.com", + "web": "http://www.geriforness.com", + "followers": 7788 + }, + { + "firstname": "Modesto", + "lastname": "Scroggie", + "company": "Bulloch, Bruce Cpa", + "address": "300 Orlando Dr", + "city": "Raritan", + "county": "Somerset", + "state": "NJ", + "zip": "08869", + "phone": "908-980-5621", + "fax": "908-980-9842", + "email": "modesto@scroggie.com", + "web": "http://www.modestoscroggie.com", + "followers": 5671 + }, + { + "firstname": "Curt", + "lastname": "Menedez", + "company": "J & J Machinery Repair Inc", + "address": "207 Yukon", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33604", + "phone": "813-932-8602", + "fax": "813-932-4548", + "email": "curt@menedez.com", + "web": "http://www.curtmenedez.com", + "followers": 1311 + }, + { + "firstname": "Karen", + "lastname": "Zombo", + "company": "Healthcare Family Credit Union", + "address": "3112 W Kennedy Blvd", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33609", + "phone": "813-872-4288", + "fax": "813-872-8262", + "email": "karen@zombo.com", + "web": "http://www.karenzombo.com", + "followers": 2543 + }, + { + "firstname": "Lora", + "lastname": "Lendor", + "company": "Advanced Electromagnetics Inc", + "address": "7 W Darlington Ave", + "city": "Kissimmee", + "county": "Osceola", + "state": "FL", + "zip": "34741", + "phone": "407-870-0382", + "fax": "407-870-6229", + "email": "lora@lendor.com", + "web": "http://www.loralendor.com", + "followers": 5947 + }, + { + "firstname": "Felipe", + "lastname": "Mahone", + "company": "Apartment Mart", + "address": "1001 Bishop St #-2850", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-536-3239", + "fax": "808-536-1231", + "email": "felipe@mahone.com", + "web": "http://www.felipemahone.com", + "followers": 4427 + }, + { + "firstname": "Rosalyn", + "lastname": "Daulton", + "company": "Rodgard Corp", + "address": "300 Broadacres Dr", + "city": "Bloomfield", + "county": "Essex", + "state": "NJ", + "zip": "07003", + "phone": "973-338-8552", + "fax": "973-338-1603", + "email": "rosalyn@daulton.com", + "web": "http://www.rosalyndaulton.com", + "followers": 2667 + }, + { + "firstname": "Marquita", + "lastname": "Bousman", + "company": "Constantine, Katherine A Esq", + "address": "30 Highland Ave", + "city": "Warwick", + "county": "Orange", + "state": "NY", + "zip": "10990", + "phone": "845-986-0909", + "fax": "845-986-2447", + "email": "marquita@bousman.com", + "web": "http://www.marquitabousman.com", + "followers": 4315 + }, + { + "firstname": "Carla", + "lastname": "Sirbaugh", + "company": "Urso, Natale L Esq", + "address": "110 S La Brea Ave #-22", + "city": "Inglewood", + "county": "Los Angeles", + "state": "CA", + "zip": "90301", + "phone": "310-412-6653", + "fax": "310-412-1067", + "email": "carla@sirbaugh.com", + "web": "http://www.carlasirbaugh.com", + "followers": 9701 + }, + { + "firstname": "Wes", + "lastname": "Fontanella", + "company": "Woodside Travel Trust", + "address": "1369 W Redondo Beach Blvd", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90247", + "phone": "310-515-3065", + "fax": "310-515-2515", + "email": "wes@fontanella.com", + "web": "http://www.wesfontanella.com", + "followers": 1717 + }, + { + "firstname": "Meredith", + "lastname": "Ivrin", + "company": "Hamilton Financial Corp", + "address": "323 N Gilbert St", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-446-7172", + "fax": "217-446-2369", + "email": "meredith@ivrin.com", + "web": "http://www.meredithivrin.com", + "followers": 7827 + }, + { + "firstname": "Laurie", + "lastname": "Bigg", + "company": "Essc Inc", + "address": "14500 Lakeside Cir", + "city": "Sterling Heights", + "county": "Macomb", + "state": "MI", + "zip": "48313", + "phone": "586-247-6171", + "fax": "586-247-9791", + "email": "laurie@bigg.com", + "web": "http://www.lauriebigg.com", + "followers": 8684 + }, + { + "firstname": "Barton", + "lastname": "Friesner", + "company": "Optical Supply", + "address": "1 Summit Ct", + "city": "Fishkill", + "county": "Dutchess", + "state": "NY", + "zip": "12524", + "phone": "845-896-6652", + "fax": "845-896-1692", + "email": "barton@friesner.com", + "web": "http://www.bartonfriesner.com", + "followers": 4889 + }, + { + "firstname": "Sophie", + "lastname": "Langner", + "company": "Kapetanakis, Alexander Esq", + "address": "535 Ward Ave #-204", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96814", + "phone": "808-545-7695", + "fax": "808-545-8636", + "email": "sophie@langner.com", + "web": "http://www.sophielangner.com", + "followers": 1596 + }, + { + "firstname": "Garfield", + "lastname": "Lijewski", + "company": "Denker, Aaron Esq", + "address": "6401 N Lincoln Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60645", + "phone": "773-976-3827", + "fax": "773-976-5586", + "email": "garfield@lijewski.com", + "web": "http://www.garfieldlijewski.com", + "followers": 5955 + }, + { + "firstname": "Warren", + "lastname": "Speach", + "company": "E Norwalk Crmc Tile & Mrbl Co", + "address": "361 Park Ave", + "city": "Scotch Plains", + "county": "Union", + "state": "NJ", + "zip": "07076", + "phone": "908-322-3846", + "fax": "908-322-6744", + "email": "warren@speach.com", + "web": "http://www.warrenspeach.com", + "followers": 6741 + }, + { + "firstname": "Madonna", + "lastname": "Cosby", + "company": "Emanuel Reider Architects Inc", + "address": "135 Main St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94105", + "phone": "415-956-4437", + "fax": "415-956-5134", + "email": "madonna@cosby.com", + "web": "http://www.madonnacosby.com", + "followers": 3985 + }, + { + "firstname": "Valeria", + "lastname": "Lingbeek", + "company": "Recreation Director", + "address": "state", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-968-8421", + "fax": "215-968-1567", + "email": "valeria@lingbeek.com", + "web": "http://www.valerialingbeek.com", + "followers": 8824 + }, + { + "firstname": "Heath", + "lastname": "Vanalphen", + "company": "California Stat Min & Mnrl Mus", + "address": "227 Commercial St", + "city": "Provincetown", + "county": "Barnstable", + "state": "MA", + "zip": "02657", + "phone": "508-487-6010", + "fax": "508-487-0597", + "email": "heath@vanalphen.com", + "web": "http://www.heathvanalphen.com", + "followers": 6846 + }, + { + "firstname": "Marisa", + "lastname": "Woldridge", + "company": "Wegner, Tim Esq", + "address": "153 Baltimore St", + "city": "Cumberland", + "county": "Allegany", + "state": "MD", + "zip": "21502", + "phone": "301-759-7421", + "fax": "301-759-9676", + "email": "marisa@woldridge.com", + "web": "http://www.marisawoldridge.com", + "followers": 6009 + }, + { + "firstname": "Rene", + "lastname": "Dummermuth", + "company": "Super 8 Motel", + "address": "2 Ridgedale Ave", + "city": "Cedar Knolls", + "county": "Morris", + "state": "NJ", + "zip": "07927", + "phone": "973-292-7918", + "fax": "973-292-5898", + "email": "rene@dummermuth.com", + "web": "http://www.renedummermuth.com", + "followers": 1687 + }, + { + "firstname": "Helga", + "lastname": "Windle", + "company": "Loew, Andrea H Esq", + "address": "99185 Moanalua Rd #-101", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-487-7779", + "fax": "808-487-6258", + "email": "helga@windle.com", + "web": "http://www.helgawindle.com", + "followers": 56 + }, + { + "firstname": "Margot", + "lastname": "Arenburg", + "company": "Mcivor, Carolyn Md", + "address": "736 N Mills Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32803", + "phone": "407-896-1593", + "fax": "407-896-6679", + "email": "margot@arenburg.com", + "web": "http://www.margotarenburg.com", + "followers": 7445 + }, + { + "firstname": "Sheila", + "lastname": "Holloran", + "company": "Warehouse On Wheels", + "address": "126 S Main St", + "city": "Clyde", + "county": "Sandusky", + "state": "OH", + "zip": "43410", + "phone": "419-547-9428", + "fax": "419-547-4835", + "email": "sheila@holloran.com", + "web": "http://www.sheilaholloran.com", + "followers": 9682 + }, + { + "firstname": "Melinda", + "lastname": "Carleton", + "company": "Cenol Co", + "address": "395 Revilo Ave", + "city": "Shirley", + "county": "Suffolk", + "state": "NY", + "zip": "11967", + "phone": "631-399-1636", + "fax": "631-399-6025", + "email": "melinda@carleton.com", + "web": "http://www.melindacarleton.com", + "followers": 7154 + }, + { + "firstname": "Ike", + "lastname": "Zeolla", + "company": "Halpin, Irene A Esq", + "address": "1900 L St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-331-1409", + "fax": "202-331-7781", + "email": "ike@zeolla.com", + "web": "http://www.ikezeolla.com", + "followers": 7418 + }, + { + "firstname": "Elmo", + "lastname": "Dagenais", + "company": "P C Routing Inc", + "address": "12914 Old Stage Rd", + "city": "Chester", + "county": "Chesterfield", + "state": "VA", + "zip": "23831", + "phone": "804-796-5647", + "fax": "804-796-9493", + "email": "elmo@dagenais.com", + "web": "http://www.elmodagenais.com", + "followers": 7355 + }, + { + "firstname": "Valentine", + "lastname": "Granberry", + "company": "Sunnyvale Travel", + "address": "1019 Shadick Dr", + "city": "Orange City", + "county": "Volusia", + "state": "FL", + "zip": "32763", + "phone": "407-775-4269", + "fax": "407-775-0598", + "email": "valentine@granberry.com", + "web": "http://www.valentinegranberry.com", + "followers": 7021 + }, + { + "firstname": "Waldo", + "lastname": "Sisk", + "company": "Muller Drugs Inc", + "address": "2211 Us Highway 19", + "city": "Holiday", + "county": "Pasco", + "state": "FL", + "zip": "34691", + "phone": "727-934-3827", + "fax": "727-934-7181", + "email": "waldo@sisk.com", + "web": "http://www.waldosisk.com", + "followers": 2109 + }, + { + "firstname": "Robt", + "lastname": "Braithwaite", + "company": "Meyer, Janet Md", + "address": "320 W Mclane St", + "city": "Osceola", + "county": "Clarke", + "state": "IA", + "zip": "50213", + "phone": "641-342-1276", + "fax": "641-342-6031", + "email": "robt@braithwaite.com", + "web": "http://www.robtbraithwaite.com", + "followers": 1336 + }, + { + "firstname": "Corinne", + "lastname": "Cowan", + "company": "Ward Equipment Co", + "address": "20 Montana Ave", + "city": "Laurel", + "county": "Yellowstone", + "state": "MT", + "zip": "59044", + "phone": "406-628-4030", + "fax": "406-628-9418", + "email": "corinne@cowan.com", + "web": "http://www.corinnecowan.com", + "followers": 7049 + }, + { + "firstname": "Rebeca", + "lastname": "Brumet", + "company": "Kingston Office Supplies Inc", + "address": "936 N Western Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60622", + "phone": "773-772-4015", + "fax": "773-772-1603", + "email": "rebeca@brumet.com", + "web": "http://www.rebecabrumet.com", + "followers": 202 + }, + { + "firstname": "Lynn", + "lastname": "Saulsberry", + "company": "Printing Factory Inc", + "address": "2725 W Mcdowell Rd", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85009", + "phone": "602-272-8326", + "fax": "602-272-3143", + "email": "lynn@saulsberry.com", + "web": "http://www.lynnsaulsberry.com", + "followers": 5265 + }, + { + "firstname": "Hannah", + "lastname": "Facio", + "company": "Cmptr Pros For Scl", + "address": "115 E Church St", + "city": "Elberton", + "county": "Elbert", + "state": "GA", + "zip": "30635", + "phone": "706-283-8280", + "fax": "706-283-6916", + "email": "hannah@facio.com", + "web": "http://www.hannahfacio.com", + "followers": 4321 + }, + { + "firstname": "Benjamin", + "lastname": "Schkade", + "company": "Port Brownsville Pub Scale Inc", + "address": "1636 E 1st Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-278-8687", + "fax": "907-278-7166", + "email": "benjamin@schkade.com", + "web": "http://www.benjaminschkade.com", + "followers": 5846 + }, + { + "firstname": "Athena", + "lastname": "Fontanilla", + "company": "Willamette Hobbies", + "address": "5020 Germantown Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19144", + "phone": "215-438-9675", + "fax": "215-438-1716", + "email": "athena@fontanilla.com", + "web": "http://www.athenafontanilla.com", + "followers": 5342 + }, + { + "firstname": "Alene", + "lastname": "Rabeck", + "company": "Bucks County Of", + "address": "475 E 162nd St", + "city": "South Holland", + "county": "Cook", + "state": "IL", + "zip": "60473", + "phone": "708-333-8056", + "fax": "708-333-2125", + "email": "alene@rabeck.com", + "web": "http://www.alenerabeck.com", + "followers": 2815 + }, + { + "firstname": "Yvette", + "lastname": "Kokoska", + "company": "Automation Products Inc", + "address": "200 Valley Dr", + "city": "Brisbane", + "county": "San Mateo", + "state": "CA", + "zip": "94005", + "phone": "650-468-3592", + "fax": "650-468-7716", + "email": "yvette@kokoska.com", + "web": "http://www.yvettekokoska.com", + "followers": 6175 + }, + { + "firstname": "Petra", + "lastname": "Clemmens", + "company": "Belton Industries Inc", + "address": "980 N Federal Hwy", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33432", + "phone": "561-394-2152", + "fax": "561-394-1574", + "email": "petra@clemmens.com", + "web": "http://www.petraclemmens.com", + "followers": 5263 + }, + { + "firstname": "Carmel", + "lastname": "Overfelt", + "company": "Woodworkers Supply Inc", + "address": "6801 Lake Worth Rd", + "city": "Lake Worth", + "county": "Palm Beach", + "state": "FL", + "zip": "33467", + "phone": "561-965-5167", + "fax": "561-965-1433", + "email": "carmel@overfelt.com", + "web": "http://www.carmeloverfelt.com", + "followers": 5868 + }, + { + "firstname": "Danette", + "lastname": "Fostervold", + "company": "Flach, Douglas Esq", + "address": "6920 Santa Teresa Blvd", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95119", + "phone": "408-225-1319", + "fax": "408-225-5205", + "email": "danette@fostervold.com", + "web": "http://www.danettefostervold.com", + "followers": 1315 + }, + { + "firstname": "Vince", + "lastname": "Ettel", + "company": "Breen Trucking", + "address": "408 Main St", + "city": "Springfield", + "county": "Sarpy", + "state": "NE", + "zip": "68059", + "phone": "402-399-6999", + "fax": "402-399-6478", + "email": "vince@ettel.com", + "web": "http://www.vinceettel.com", + "followers": 7780 + }, + { + "firstname": "Davis", + "lastname": "Heideman", + "company": "Dennis J Wall Atty At Law Pa", + "address": "801 W 5th St", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76102", + "phone": "817-332-7902", + "fax": "817-332-5439", + "email": "davis@heideman.com", + "web": "http://www.davisheideman.com", + "followers": 4778 + }, + { + "firstname": "Bradly", + "lastname": "Hasselvander", + "company": "Public Works Department Office", + "address": "2302 Artesia Blvd", + "city": "Redondo Beach", + "county": "Los Angeles", + "state": "CA", + "zip": "90278", + "phone": "310-374-2374", + "fax": "310-374-2363", + "email": "bradly@hasselvander.com", + "web": "http://www.bradlyhasselvander.com", + "followers": 7831 + }, + { + "firstname": "Nathanial", + "lastname": "Phoenix", + "company": "Precision Steel Rule Die Co", + "address": "1000 Nw 105th St", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73114", + "phone": "405-748-7637", + "fax": "405-748-1856", + "email": "nathanial@phoenix.com", + "web": "http://www.nathanialphoenix.com", + "followers": 8308 + }, + { + "firstname": "Lamar", + "lastname": "Mckibben", + "company": "Battaglia, Jack M Esq", + "address": "1620 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-864-7338", + "fax": "415-864-7623", + "email": "lamar@mckibben.com", + "web": "http://www.lamarmckibben.com", + "followers": 4193 + }, + { + "firstname": "Shanna", + "lastname": "Numkena", + "company": "Anderson Independent Mail", + "address": "1426 5th Pl Nw", + "city": "Rochester", + "county": "Olmsted", + "state": "MN", + "zip": "55901", + "phone": "507-280-1856", + "fax": "507-280-6844", + "email": "shanna@numkena.com", + "web": "http://www.shannanumkena.com", + "followers": 1364 + }, + { + "firstname": "Helena", + "lastname": "Suermann", + "company": "Stubenberge, James A Esq", + "address": "897 Independence Ave", + "city": "Mountain View", + "county": "Santa Clara", + "state": "CA", + "zip": "94043", + "phone": "650-965-0255", + "fax": "650-965-3368", + "email": "helena@suermann.com", + "web": "http://www.helenasuermann.com", + "followers": 4536 + }, + { + "firstname": "Delphine", + "lastname": "Helmich", + "company": "Friends Hospital", + "address": "50 Aviation Way", + "city": "Watsonville", + "county": "Santa Cruz", + "state": "CA", + "zip": "95076", + "phone": "831-763-4348", + "fax": "831-763-0923", + "email": "delphine@helmich.com", + "web": "http://www.delphinehelmich.com", + "followers": 7383 + }, + { + "firstname": "Barbara", + "lastname": "Hindley", + "company": "Kirin Amgen", + "address": "904 N Lake St", + "city": "Burbank", + "county": "Los Angeles", + "state": "CA", + "zip": "91502", + "phone": "818-841-8886", + "fax": "818-841-8221", + "email": "barbara@hindley.com", + "web": "http://www.barbarahindley.com", + "followers": 9155 + }, + { + "firstname": "Sheryl", + "lastname": "Sisofo", + "company": "Thrifty Sign Stop", + "address": "1049 S Mccord Rd", + "city": "Holland", + "county": "Lucas", + "state": "OH", + "zip": "43528", + "phone": "419-865-8702", + "fax": "419-865-1836", + "email": "sheryl@sisofo.com", + "web": "http://www.sherylsisofo.com", + "followers": 5693 + }, + { + "firstname": "Robyn", + "lastname": "Christophel", + "company": "Woodward, John C Esq", + "address": "3420 E Flamingo Rd", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89121", + "phone": "702-458-1072", + "fax": "702-458-2093", + "email": "robyn@christophel.com", + "web": "http://www.robynchristophel.com", + "followers": 3971 + }, + { + "firstname": "Gayla", + "lastname": "Geimer", + "company": "Ortman Mccain Co", + "address": "1280 Price Ave", + "city": "Pomona", + "county": "Los Angeles", + "state": "CA", + "zip": "91767", + "phone": "909-620-6453", + "fax": "909-620-2768", + "email": "gayla@geimer.com", + "web": "http://www.gaylageimer.com", + "followers": 8969 + }, + { + "firstname": "Evan", + "lastname": "Pyfrom", + "company": "Nevada Baking Co", + "address": "5430 Alpha Rd", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-661-4625", + "fax": "214-661-8804", + "email": "evan@pyfrom.com", + "web": "http://www.evanpyfrom.com", + "followers": 2516 + }, + { + "firstname": "Chad", + "lastname": "Miklas", + "company": "Red Carpet Inn", + "address": "31 S Grove St", + "city": "East Aurora", + "county": "Erie", + "state": "NY", + "zip": "14052", + "phone": "716-655-2736", + "fax": "716-655-2749", + "email": "chad@miklas.com", + "web": "http://www.chadmiklas.com", + "followers": 5357 + }, + { + "firstname": "Trey", + "lastname": "Tout", + "company": "Breen, Sean E Esq", + "address": "100 Mbc Dr", + "city": "Shawano", + "county": "Shawano", + "state": "WI", + "zip": "54166", + "phone": "715-526-6806", + "fax": "715-526-2421", + "email": "trey@tout.com", + "web": "http://www.treytout.com", + "followers": 205 + }, + { + "firstname": "Isabell", + "lastname": "Armout", + "company": "True Electric Corp", + "address": "7895 S Cessna Ave", + "city": "Gaithersburg", + "county": "Montgomery", + "state": "MD", + "zip": "20879", + "phone": "301-921-0406", + "fax": "301-921-1251", + "email": "isabell@armout.com", + "web": "http://www.isabellarmout.com", + "followers": 4878 + }, + { + "firstname": "Alejandro", + "lastname": "Mascall", + "company": "Railway Educational Bureau", + "address": "2350 Duke St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-2882", + "fax": "703-684-8561", + "email": "alejandro@mascall.com", + "web": "http://www.alejandromascall.com", + "followers": 3512 + }, + { + "firstname": "Kennith", + "lastname": "Kirklin", + "company": "Sears Roebuck And Co", + "address": "2303 21st Ave S", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37212", + "phone": "615-385-1598", + "fax": "615-385-6946", + "email": "kennith@kirklin.com", + "web": "http://www.kennithkirklin.com", + "followers": 5087 + }, + { + "firstname": "Ike", + "lastname": "Benthin", + "company": "Lee, Harry Esq", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-5277", + "fax": "415-255-6543", + "email": "ike@benthin.com", + "web": "http://www.ikebenthin.com", + "followers": 8473 + }, + { + "firstname": "Donald", + "lastname": "Sherretts", + "company": "Nylonge Corporation", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-7718", + "fax": "415-255-7088", + "email": "donald@sherretts.com", + "web": "http://www.donaldsherretts.com", + "followers": 2332 + }, + { + "firstname": "Lina", + "lastname": "Hybarger", + "company": "L & H Central Office", + "address": "1828 Jefferson Pl Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-833-4983", + "fax": "202-833-3174", + "email": "lina@hybarger.com", + "web": "http://www.linahybarger.com", + "followers": 9793 + }, + { + "firstname": "Rebekah", + "lastname": "Padley", + "company": "Reed Engineering Inc", + "address": "200 E Delawr Pl", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60611", + "phone": "312-944-1877", + "fax": "312-944-1477", + "email": "rebekah@padley.com", + "web": "http://www.rebekahpadley.com", + "followers": 3839 + }, + { + "firstname": "Marion", + "lastname": "Gaulden", + "company": "Madden, John H Jr", + "address": "200 W South St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-9335", + "fax": "434-979-2694", + "email": "marion@gaulden.com", + "web": "http://www.mariongaulden.com", + "followers": 5625 + }, + { + "firstname": "Maurine", + "lastname": "Monroy", + "company": "Central Distribution System", + "address": "2000 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-947-8922", + "fax": "201-947-4235", + "email": "maurine@monroy.com", + "web": "http://www.maurinemonroy.com", + "followers": 5828 + }, + { + "firstname": "Rosanna", + "lastname": "Sandrock", + "company": "Computer X Consulting", + "address": "1797 Lakewood Ter Se", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30315", + "phone": "404-627-4604", + "fax": "404-627-4276", + "email": "rosanna@sandrock.com", + "web": "http://www.rosannasandrock.com", + "followers": 3044 + }, + { + "firstname": "Marcelino", + "lastname": "Maggs", + "company": "Rascher & Betzold Inc", + "address": "201 E Pine St", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32801", + "phone": "407-420-1152", + "fax": "407-420-7195", + "email": "marcelino@maggs.com", + "web": "http://www.marcelinomaggs.com", + "followers": 5320 + }, + { + "firstname": "Florine", + "lastname": "Willardson", + "company": "Lunt, Donald C Esq", + "address": "5605 Ne 105th Ave", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97220", + "phone": "503-256-6559", + "fax": "503-256-8982", + "email": "florine@willardson.com", + "web": "http://www.florinewillardson.com", + "followers": 2336 + }, + { + "firstname": "Jude", + "lastname": "Haza", + "company": "Howard Fabrication", + "address": "1348 Liberty Pike", + "city": "Franklin", + "county": "Williamson", + "state": "TN", + "zip": "37067", + "phone": "615-790-3984", + "fax": "615-790-3042", + "email": "jude@haza.com", + "web": "http://www.judehaza.com", + "followers": 7311 + }, + { + "firstname": "Eldon", + "lastname": "Sutch", + "company": "Friesen And Kane Public Accts", + "address": "1818 E Atlantic St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-743-2414", + "fax": "215-743-2529", + "email": "eldon@sutch.com", + "web": "http://www.eldonsutch.com", + "followers": 6895 + }, + { + "firstname": "Lashonda", + "lastname": "Enote", + "company": "Nichols Village The Inn", + "address": "6301 Owensmouth Ave", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91367", + "phone": "818-704-8490", + "fax": "818-704-7539", + "email": "lashonda@enote.com", + "web": "http://www.lashondaenote.com", + "Note": "Ancien Dailymotion, recontré à LeWeb London 2012", + "followers": 6383 + }, + { + "firstname": "Marla", + "lastname": "Folz", + "company": "Odonoghue C Kevin", + "address": "201 Electronics Blvd Sw", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35824", + "phone": "256-464-3329", + "fax": "256-464-6964", + "email": "marla@folz.com", + "web": "http://www.marlafolz.com", + "Note": "Product Manager at Sage France & WebMaster of ConseilsMarketing.Fr Interview at LEWeb", + "followers": 5861 + }, + { + "firstname": "Reginald", + "lastname": "Lunan", + "company": "Healey Chevy Olds Buick Geo", + "address": "985 Parker Ct", + "city": "Santa Clara", + "county": "Santa Clara", + "state": "CA", + "zip": "95050", + "phone": "408-727-1747", + "fax": "408-727-0884", + "email": "reginald@lunan.com", + "web": "http://www.reginaldlunan.com", + "followers": 7075 + }, + { + "firstname": "Kyle", + "lastname": "Lindauer", + "company": "Gem Tec Inc", + "address": "2000 E Jefferson St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-258-5196", + "fax": "602-258-8609", + "email": "kyle@lindauer.com", + "web": "http://www.kylelindauer.com", + "followers": 6277 + }, + { + "firstname": "Son", + "lastname": "Marschke", + "company": "Evenings Dlght Fireplaces", + "address": "1119 Wheeler Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18510", + "phone": "570-969-0886", + "fax": "570-969-8176", + "email": "son@marschke.com", + "web": "http://www.sonmarschke.com", + "followers": 3481 + }, + { + "firstname": "Johnie", + "lastname": "Minaai", + "company": "Darling, Pamela E", + "address": "2100 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-592-4771", + "fax": "201-592-8423", + "email": "johnie@minaai.com", + "web": "http://www.johnieminaai.com", + "followers": 5903 + }, + { + "firstname": "Kelli", + "lastname": "Varrato", + "company": "Frances Meyer Inc", + "address": "2505 Congress St", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92110", + "phone": "858-298-3969", + "fax": "858-298-6695", + "email": "kelli@varrato.com", + "web": "http://www.kellivarrato.com", + "followers": 9891 + }, + { + "firstname": "Neva", + "lastname": "Marsell", + "company": "Comfort Inn Wilshire", + "address": "1312 W Lincoln Ave", + "city": "Olivia", + "county": "Renville", + "state": "MN", + "zip": "56277", + "phone": "320-523-4975", + "fax": "320-523-8378", + "email": "neva@marsell.com", + "web": "http://www.nevamarsell.com", + "followers": 4114 + }, + { + "firstname": "Brice", + "lastname": "Hedglin", + "company": "Cupkovic, Walter D Esq", + "address": "2809 Granny White Pike", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37204", + "phone": "615-292-9016", + "fax": "615-292-9027", + "email": "brice@hedglin.com", + "web": "http://www.bricehedglin.com", + "followers": 7730 + }, + { + "firstname": "Terrance", + "lastname": "Nimmer", + "company": "C D Short Foods Inc", + "address": "1400 N Woodward Ave", + "city": "Bloomfield Hills", + "county": "Oakland", + "state": "MI", + "zip": "48304", + "phone": "248-647-0653", + "fax": "248-647-1999", + "email": "terrance@nimmer.com", + "web": "http://www.terrancenimmer.com", + "followers": 7388 + }, + { + "firstname": "Carol", + "lastname": "Krisman", + "company": "Uniglobe Transeas Travel", + "address": "100 E 85th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-472-7877", + "fax": "212-472-9579", + "email": "carol@krisman.com", + "web": "http://www.carolkrisman.com", + "followers": 5985 + }, + { + "firstname": "Dollie", + "lastname": "Pillitteri", + "company": "Jiffy Moving & Storage Company", + "address": "4024 Merchant Rd", + "city": "Fort Wayne", + "county": "Allen", + "state": "IN", + "zip": "46818", + "phone": "260-489-3094", + "fax": "260-489-4697", + "email": "dollie@pillitteri.com", + "web": "http://www.dolliepillitteri.com", + "followers": 2624 + }, + { + "firstname": "Mellissa", + "lastname": "Sule", + "company": "Dowse, Geoffrey Esq", + "address": "92 Argonaut #-270", + "city": "Aliso Viejo", + "county": "Orange", + "state": "CA", + "zip": "92656", + "phone": "949-768-6176", + "fax": "949-768-8107", + "email": "mellissa@sule.com", + "web": "http://www.mellissasule.com", + "followers": 2709 + }, + { + "firstname": "Antony", + "lastname": "Thierauf", + "company": "Gutzwiller, Robert H Esq", + "address": "4915 Industrial Way", + "city": "Coeur d Alene", + "county": "Kootenai", + "state": "ID", + "zip": "83814", + "phone": "208-667-5252", + "fax": "208-667-5935", + "email": "antony@thierauf.com", + "web": "http://www.antonythierauf.com", + "followers": 1044 + }, + { + "firstname": "Reina", + "lastname": "Reisenauer", + "company": "Terrance Fox", + "address": "207 N Main St", + "city": "Hutchins", + "county": "Dallas", + "state": "TX", + "zip": "75141", + "phone": "972-225-9930", + "fax": "972-225-9569", + "email": "reina@reisenauer.com", + "web": "http://www.reinareisenauer.com", + "followers": 2953 + }, + { + "firstname": "Zane", + "lastname": "Sulikowski", + "company": "Meijer Associates Credit Union", + "address": "2375 3rd St", + "city": "Riverside", + "county": "Riverside", + "state": "CA", + "zip": "92507", + "phone": "951-683-4479", + "fax": "951-683-9932", + "email": "zane@sulikowski.com", + "web": "http://www.zanesulikowski.com", + "followers": 7275 + }, + { + "firstname": "Hilario", + "lastname": "Cassa", + "company": "Independence Assocaites Inc", + "address": "2222 Santa Monica Blvd", + "city": "Santa Monica", + "county": "Los Angeles", + "state": "CA", + "zip": "90404", + "phone": "310-828-6710", + "fax": "310-828-1895", + "email": "hilario@cassa.com", + "web": "http://www.hilariocassa.com", + "followers": 994 + }, + { + "firstname": "Veronica", + "lastname": "Radman", + "company": "Martin, Anthony D Esq", + "address": "235 W Main St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-3306", + "fax": "434-979-9777", + "email": "veronica@radman.com", + "web": "http://www.veronicaradman.com", + "followers": 7568 + }, + { + "firstname": "Teri", + "lastname": "Erlewine", + "company": "League Of Kans Municipalities", + "address": "370 34th St St", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33711", + "phone": "727-327-3850", + "fax": "727-327-8494", + "email": "teri@erlewine.com", + "web": "http://www.terierlewine.com", + "followers": 6077 + }, + { + "firstname": "Alissa", + "lastname": "Mountjoy", + "company": "Technical & Mgmt Svc Corp", + "address": "6585 Commerce Blvd", + "city": "Rohnert Park", + "county": "Sonoma", + "state": "CA", + "zip": "94928", + "phone": "707-585-9715", + "fax": "707-585-7011", + "email": "alissa@mountjoy.com", + "web": "http://www.alissamountjoy.com", + "followers": 4886 + }, + { + "firstname": "Helene", + "lastname": "Iberg", + "company": "Spec Check Inc", + "address": "24800 Rockside Rd", + "city": "Bedford", + "county": "Cuyahoga", + "state": "OH", + "zip": "44146", + "phone": "440-786-6052", + "fax": "440-786-9246", + "email": "helene@iberg.com", + "web": "http://www.heleneiberg.com", + "followers": 716 + }, + { + "firstname": "Lona", + "lastname": "Scronce", + "company": "L & L Builders", + "address": "Rte 6 & 209", + "city": "Matamoras", + "county": "Pike", + "state": "PA", + "zip": "18336", + "phone": "570-296-4820", + "fax": "570-296-2054", + "email": "lona@scronce.com", + "web": "http://www.lonascronce.com", + "followers": 4687 + }, + { + "firstname": "Jeremy", + "lastname": "Lampi", + "company": "E Henderson Inc", + "address": "150 Sawkill Ave", + "city": "Milford", + "county": "Pike", + "state": "PA", + "zip": "18337", + "phone": "570-296-7797", + "fax": "570-296-4647", + "email": "jeremy@lampi.com", + "web": "http://www.jeremylampi.com", + "followers": 4714 + }, + { + "firstname": "Mitch", + "lastname": "Schattner", + "company": "Cosgrove Eisenberg & Kiley Pc", + "address": "3001 Geary Blvd", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94118", + "phone": "415-668-8105", + "fax": "415-668-5841", + "email": "mitch@schattner.com", + "web": "http://www.mitchschattner.com", + "followers": 4388 + }, + { + "firstname": "Hans", + "lastname": "Carlan", + "company": "Midlen & Guillot Chartered", + "address": "509 W 4th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-276-2956", + "fax": "907-276-6002", + "email": "hans@carlan.com", + "web": "http://www.hanscarlan.com", + "followers": 985 + }, + { + "firstname": "Concetta", + "lastname": "Sarchett", + "company": "Barco/chromatics Inc", + "address": "2405 Grand Blvd", + "city": "Kansas City", + "county": "Jackson", + "state": "MO", + "zip": "64108", + "phone": "816-274-3833", + "fax": "816-274-6897", + "email": "concetta@sarchett.com", + "web": "http://www.concettasarchett.com", + "followers": 3086 + }, + { + "firstname": "Isaac", + "lastname": "Zackery", + "company": "Holiday Inn Of Issaquah", + "address": "321 Palmer Rd", + "city": "Denville", + "county": "Morris", + "state": "NJ", + "zip": "07834", + "phone": "973-328-5943", + "fax": "973-328-1903", + "email": "isaac@zackery.com", + "web": "http://www.isaaczackery.com", + "followers": 501 + }, + { + "firstname": "Doug", + "lastname": "Matrisciano", + "company": "Cefpi", + "address": "12500 Ne 10th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-451-5906", + "fax": "425-451-1273", + "email": "doug@matrisciano.com", + "web": "http://www.dougmatrisciano.com", + "followers": 9054 + }, + { + "firstname": "Devon", + "lastname": "Samrah", + "company": "Software Pursuits Inc", + "address": "1219 Pine Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32824", + "phone": "407-240-2401", + "fax": "407-240-8312", + "email": "devon@samrah.com", + "web": "http://www.devonsamrah.com", + "followers": 6795 + }, + { + "firstname": "Amos", + "lastname": "Linnan", + "company": "Quincy, Jim", + "address": "3960 W 26th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60623", + "phone": "773-277-8332", + "fax": "773-277-4756", + "email": "amos@linnan.com", + "web": "http://www.amoslinnan.com", + "followers": 6297 + }, + { + "firstname": "Manuel", + "lastname": "Dienhart", + "company": "Bohning Co Ltd", + "address": "40 E Mcmicken Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45210", + "phone": "513-357-4669", + "fax": "513-357-7989", + "email": "manuel@dienhart.com", + "web": "http://www.manueldienhart.com", + "followers": 6771 + }, + { + "firstname": "Audra", + "lastname": "Cantu", + "company": "Chesapeake Telephone Systems", + "address": "935 S 2nd St", + "city": "Plainfield", + "county": "Union", + "state": "NJ", + "zip": "07063", + "phone": "908-756-1816", + "fax": "908-756-5441", + "email": "audra@cantu.com", + "web": "http://www.audracantu.com", + "followers": 3088 + }, + { + "firstname": "Keisha", + "lastname": "Ransonet", + "company": "Hsk Decker", + "address": "1049 Lakloey Dr", + "city": "North Pole", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99705", + "phone": "907-488-6897", + "fax": "907-488-2093", + "email": "keisha@ransonet.com", + "web": "http://www.keisharansonet.com", + "followers": 5340 + }, + { + "firstname": "Rolando", + "lastname": "Baumann", + "company": "Erie Brush Co", + "address": "921 Sw Washington St #-321", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97205", + "phone": "503-241-6723", + "fax": "503-241-7691", + "email": "rolando@baumann.com", + "web": "http://www.rolandobaumann.com", + "followers": 9754 + }, + { + "firstname": "Maryanne", + "lastname": "Whyman", + "company": "Walls, Robert E Esq", + "address": "1008 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-1137", + "fax": "213-748-0447", + "email": "maryanne@whyman.com", + "web": "http://www.maryannewhyman.com", + "followers": 844 + }, + { + "firstname": "Kurtis", + "lastname": "Asberry", + "company": "Budgetel Inns", + "address": "Box #-37223", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79937", + "phone": "915-591-1621", + "fax": "915-591-3614", + "email": "kurtis@asberry.com", + "web": "http://www.kurtisasberry.com", + "followers": 8502 + }, + { + "firstname": "Ed", + "lastname": "Gompf", + "company": "Corro Therm Inc", + "address": "3 B Floor Care & Hskpg Svc", + "city": "Catawissa", + "county": "Columbia", + "state": "PA", + "zip": "17820", + "phone": "570-799-2838", + "fax": "570-799-4583", + "email": "ed@gompf.com", + "web": "http://www.edgompf.com", + "followers": 8705 + }, + { + "firstname": "Norman", + "lastname": "Betance", + "company": "Precision Electric Co Inc", + "address": "7949 E Acoma Dr", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85260", + "phone": "480-991-7884", + "fax": "480-991-6547", + "email": "norman@betance.com", + "web": "http://www.normanbetance.com", + "followers": 4602 + }, + { + "firstname": "Berta", + "lastname": "Karczewski", + "company": "Sather Eng Inc", + "address": "1035 N Mcqueen Rd #-133", + "city": "Gilbert", + "county": "Maricopa", + "state": "AZ", + "zip": "85233", + "phone": "480-926-0770", + "fax": "480-926-7533", + "email": "berta@karczewski.com", + "web": "http://www.bertakarczewski.com", + "followers": 1093 + }, + { + "firstname": "Mac", + "lastname": "Marksberry", + "company": "Pursell, David B Esq", + "address": "112 W Plum", + "city": "Doniphan", + "county": "Hall", + "state": "NE", + "zip": "68832", + "phone": "402-845-4275", + "fax": "402-845-8229", + "email": "mac@marksberry.com", + "web": "http://www.macmarksberry.com", + "followers": 6081 + }, + { + "firstname": "Sandra", + "lastname": "Graen", + "company": "Action Remediation Co", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-3844", + "fax": "320-587-7201", + "email": "sandra@graen.com", + "web": "http://www.sandragraen.com", + "followers": 4844 + }, + { + "firstname": "Lee", + "lastname": "Javens", + "company": "Dyer, James R Esq", + "address": "6086 N Lyons Rd", + "city": "Burlington", + "county": "Racine", + "state": "WI", + "zip": "53105", + "phone": "262-763-9582", + "fax": "262-763-3845", + "email": "lee@javens.com", + "web": "http://www.leejavens.com", + "followers": 2954 + }, + { + "firstname": "Fran", + "lastname": "Zanders", + "company": "River City Body Co", + "address": "6312 S Yellowstone Hwy", + "city": "Idaho Falls", + "county": "Bonneville", + "state": "ID", + "zip": "83402", + "phone": "208-525-6418", + "fax": "208-525-5501", + "email": "fran@zanders.com", + "web": "http://www.franzanders.com", + "followers": 9590 + }, + { + "firstname": "Lane", + "lastname": "Brantz", + "company": "Kings Inn", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-2903", + "fax": "320-587-3448", + "email": "lane@brantz.com", + "web": "http://www.lanebrantz.com", + "followers": 361 + }, + { + "firstname": "Bess", + "lastname": "Marso", + "company": "Lesher Printers Inc", + "address": "15542 Chemical Ln", + "city": "Huntington Beach", + "county": "Orange", + "state": "CA", + "zip": "92649", + "phone": "714-895-4582", + "fax": "714-895-8188", + "email": "bess@marso.com", + "web": "http://www.bessmarso.com", + "followers": 9552 + }, + { + "firstname": "Tamara", + "lastname": "Declue", + "company": "Glen Burnie The Bank Of", + "address": "1900 W Loop S #-600", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-3958", + "fax": "713-871-6355", + "email": "tamara@declue.com", + "web": "http://www.tamaradeclue.com", + "followers": 3229 + }, + { + "firstname": "Denise", + "lastname": "Speegle", + "company": "Shipley Oil Company", + "address": "2078 Foster Ave", + "city": "Wheeling", + "county": "Cook", + "state": "IL", + "zip": "60090", + "phone": "847-870-8743", + "fax": "847-870-6026", + "email": "denise@speegle.com", + "web": "http://www.denisespeegle.com", + "followers": 1139 + }, + { + "firstname": "Lynda", + "lastname": "Youtsey", + "company": "Accurate Color Inc", + "address": "1370 S Bertelsen Rd", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-342-0606", + "fax": "541-342-0655", + "email": "lynda@youtsey.com", + "web": "http://www.lyndayoutsey.com", + "followers": 378 + }, + { + "firstname": "Diann", + "lastname": "Burigsay", + "company": "Snyder, Stephen E Esq", + "address": "13026 S Normandie Ave", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90249", + "phone": "310-321-8278", + "fax": "310-321-0564", + "email": "diann@burigsay.com", + "web": "http://www.diannburigsay.com", + "followers": 6193 + }, + { + "firstname": "Mari", + "lastname": "Hwang", + "company": "Play Craft Pontoon Co", + "address": "23352 El Toro Rd", + "city": "Lake Forest", + "county": "Orange", + "state": "CA", + "zip": "92630", + "phone": "949-583-6901", + "fax": "949-583-7758", + "email": "mari@hwang.com", + "web": "http://www.marihwang.com", + "followers": 6719 + }, + { + "firstname": "Shanna", + "lastname": "Neundorfer", + "company": "Door Systems", + "address": "833 E Allegheny Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-426-9722", + "fax": "215-426-8416", + "email": "shanna@neundorfer.com", + "web": "http://www.shannaneundorfer.com", + "followers": 9759 + }, + { + "firstname": "Sherwood", + "lastname": "Detillier", + "company": "Minteq International", + "address": "Box #-851", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91365", + "phone": "818-703-9160", + "fax": "818-703-0447", + "email": "sherwood@detillier.com", + "web": "http://www.sherwooddetillier.com", + "followers": 6816 + }, + { + "firstname": "Walton", + "lastname": "Schwallie", + "company": "Nielsen, Laura W Md", + "address": "154 Main St", + "city": "Upton", + "county": "Worcester", + "state": "MA", + "zip": "01568", + "phone": "508-529-8783", + "fax": "508-529-8368", + "email": "walton@schwallie.com", + "web": "http://www.waltonschwallie.com", + "followers": 4498 + }, + { + "firstname": "Judy", + "lastname": "Gartenmayer", + "company": "Pro Infusion Pharm Inc", + "address": "3260 W New Haven Ave", + "city": "Melbourne", + "county": "Brevard", + "state": "FL", + "zip": "32904", + "phone": "321-676-3091", + "fax": "321-676-3378", + "email": "judy@gartenmayer.com", + "web": "http://www.judygartenmayer.com", + "followers": 5730 + }, + { + "firstname": "Antione", + "lastname": "Mccleary", + "company": "Cerberus Pyrotronics", + "address": "1110 25th Ave N", + "city": "Fargo", + "county": "Cass", + "state": "ND", + "zip": "58102", + "phone": "701-293-8410", + "fax": "701-293-7439", + "email": "antione@mccleary.com", + "web": "http://www.antionemccleary.com", + "followers": 5273 + }, + { + "firstname": "Kay", + "lastname": "Ganguli", + "company": "Hanson, Bruce Esq", + "address": "3763 Scripps Dr", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89103", + "phone": "702-876-3089", + "fax": "702-876-9367", + "email": "kay@ganguli.com", + "web": "http://www.kayganguli.com", + "followers": 6368 + }, + { + "firstname": "Oma", + "lastname": "Duffy", + "company": "Laun Law Offices", + "address": "255 Industrial Dr", + "city": "Franklin", + "county": "Warren", + "state": "OH", + "zip": "45005", + "phone": "937-746-7537", + "fax": "937-746-4129", + "email": "oma@duffy.com", + "web": "http://www.omaduffy.com", + "followers": 9375 + }, + { + "firstname": "Devon", + "lastname": "Teston", + "company": "Bilton, Dean H Esq", + "address": "1900 W Loop S", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-9773", + "fax": "713-871-0838", + "email": "devon@teston.com", + "web": "http://www.devonteston.com", + "followers": 3426 + }, + { + "firstname": "Jade", + "lastname": "Erlebach", + "company": "Vickery Tape & Label Co Inc", + "address": "975 Flynn Rd", + "city": "Camarillo", + "county": "Ventura", + "state": "CA", + "zip": "93012", + "phone": "805-445-8331", + "fax": "805-445-9961", + "email": "jade@erlebach.com", + "web": "http://www.jadeerlebach.com", + "followers": 4178 + }, + { + "firstname": "Roseann", + "lastname": "Jerko", + "company": "Larry Farmer Appraisal Co Inc", + "address": "850 Glen Ave", + "city": "Moorestown", + "county": "Burlington", + "state": "NJ", + "zip": "08057", + "phone": "856-866-4945", + "fax": "856-866-1542", + "email": "roseann@jerko.com", + "web": "http://www.roseannjerko.com", + "followers": 5397 + }, + { + "firstname": "Ruthie", + "lastname": "Zortman", + "company": "Review Monterey Peninsula", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-1410", + "fax": "510-651-1242", + "email": "ruthie@zortman.com", + "web": "http://www.ruthiezortman.com", + "followers": 6079 + }, + { + "firstname": "Leif", + "lastname": "Arguin", + "company": "Cmplt Cmptg Solutions Ne", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-4937", + "fax": "510-651-8302", + "email": "leif@arguin.com", + "web": "http://www.leifarguin.com", + "followers": 6571 + }, + { + "firstname": "Millicent", + "lastname": "Ekstrom", + "company": "Personal Creations Inc", + "address": "151 Brown St #-b", + "city": "Lawrenceburg", + "county": "Dearborn", + "state": "IN", + "zip": "47025", + "phone": "812-537-7287", + "fax": "812-537-5442", + "email": "millicent@ekstrom.com", + "web": "http://www.millicentekstrom.com", + "followers": 5739 + }, + { + "firstname": "Val", + "lastname": "Oborne", + "company": "Stone Container Corporation", + "address": "7846 Clybourn Ave", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-767-1347", + "fax": "818-767-5123", + "email": "val@oborne.com", + "web": "http://www.valoborne.com", + "followers": 6746 + }, + { + "firstname": "Bridgett", + "lastname": "Retort", + "company": "Womens Resource & Refrl Ntwrk", + "address": "6747 Signat Dr", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77041", + "phone": "713-466-7259", + "fax": "713-466-3278", + "email": "bridgett@retort.com", + "web": "http://www.bridgettretort.com", + "followers": 3060 + }, + { + "firstname": "Tia", + "lastname": "Lino", + "company": "Superior Gundrilling", + "address": "35375 Highway 228", + "city": "Brownsville", + "county": "Linn", + "state": "OR", + "zip": "97327", + "phone": "541-466-2483", + "fax": "541-466-1661", + "email": "tia@lino.com", + "web": "http://www.tialino.com", + "followers": 8942 + }, + { + "firstname": "Jarrett", + "lastname": "Kenzie", + "company": "Young Men Christian Assn Cnty", + "address": "11551 Riverpark Way", + "city": "Chesterfield", + "county": "Chesterfield", + "state": "VA", + "zip": "23838", + "phone": "804-739-3007", + "fax": "804-739-7905", + "email": "jarrett@kenzie.com", + "web": "http://www.jarrettkenzie.com", + "followers": 9459 + }, + { + "firstname": "Mara", + "lastname": "Vanderzwaag", + "company": "Brown & Brown Law Office", + "address": "2550 E Lucas Dr", + "city": "Beaumont", + "county": "Jefferson", + "state": "TX", + "zip": "77703", + "phone": "409-892-1231", + "fax": "409-892-8492", + "email": "mara@vanderzwaag.com", + "web": "http://www.maravanderzwaag.com", + "followers": 2331 + }, + { + "firstname": "Tiffany", + "lastname": "Knust", + "company": "Vantage Products", + "address": "1425 Koll Cir #-107", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95112", + "phone": "408-453-0357", + "fax": "408-453-1525", + "email": "tiffany@knust.com", + "web": "http://www.tiffanyknust.com", + "followers": 2896 + }, + { + "firstname": "Fabian", + "lastname": "Mcshaw", + "company": "Bodik, Michael G Esq", + "address": "6023 Garfield Ave", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90040", + "phone": "323-726-5319", + "fax": "323-726-8499", + "email": "fabian@mcshaw.com", + "web": "http://www.fabianmcshaw.com", + "followers": 4892 + }, + { + "firstname": "Annabelle", + "lastname": "Coger", + "company": "Center For Resource Management", + "address": "1200 Shreveport Barksdale Hwy", + "city": "Shreveport", + "county": "Caddo", + "state": "LA", + "zip": "71105", + "phone": "318-865-8418", + "fax": "318-865-7381", + "email": "annabelle@coger.com", + "web": "http://www.annabellecoger.com", + "followers": 2623 + }, + { + "firstname": "Marisa", + "lastname": "Smiler", + "company": "Star Systems Inc", + "address": "200 Broadhollow Rd", + "city": "Melville", + "county": "Suffolk", + "state": "NY", + "zip": "11747", + "phone": "631-673-3339", + "fax": "631-673-1556", + "email": "marisa@smiler.com", + "web": "http://www.marisasmiler.com", + "followers": 4693 + }, + { + "firstname": "Samantha", + "lastname": "Bordwell", + "company": "Interwest Freight System Inc", + "address": "23405 Sw 152nd Ct", + "city": "Homestead", + "county": "Miami-Dade", + "state": "FL", + "zip": "33032", + "phone": "305-247-8402", + "fax": "305-247-4599", + "email": "samantha@bordwell.com", + "web": "http://www.samanthabordwell.com", + "followers": 6109 + }, + { + "firstname": "Felecia", + "lastname": "Riedl", + "company": "Benson, John S", + "address": "333 Andrew Ave", + "city": "Salt Lake City", + "county": "Salt Lake", + "state": "UT", + "zip": "84115", + "phone": "801-486-6484", + "fax": "801-486-6755", + "email": "felecia@riedl.com", + "web": "http://www.feleciariedl.com", + "followers": 7849 + }, + { + "firstname": "Kris", + "lastname": "Persson", + "company": "Tweel, Ronald R Esq", + "address": "1765 Sw Highway 97", + "city": "Madras", + "county": "Jefferson", + "state": "OR", + "zip": "97741", + "phone": "541-475-8404", + "fax": "541-475-0021", + "email": "kris@persson.com", + "web": "http://www.krispersson.com", + "followers": 232 + }, + { + "firstname": "Kylie", + "lastname": "Bridgeman", + "company": "Thomas & Libowitz Pa", + "address": "118 Lenzner Ct", + "city": "Sewickley", + "county": "Allegheny", + "state": "PA", + "zip": "15143", + "phone": "412-741-4604", + "fax": "412-741-4236", + "email": "kylie@bridgeman.com", + "web": "http://www.kyliebridgeman.com", + "followers": 9868 + }, + { + "firstname": "Eduardo", + "lastname": "Bellendir", + "company": "Powers & Assocs", + "address": "1800 Pine Run Rd", + "city": "Wilkes Barre", + "county": "Luzerne", + "state": "PA", + "zip": "18702", + "phone": "570-822-0721", + "fax": "570-822-6267", + "email": "eduardo@bellendir.com", + "web": "http://www.eduardobellendir.com", + "followers": 597 + }, + { + "firstname": "Waldo", + "lastname": "Edberg", + "company": "Bush Building Corporation", + "address": "2017 W Jackson St", + "city": "Tupelo", + "county": "Lee", + "state": "MS", + "zip": "38801", + "phone": "662-842-4133", + "fax": "662-842-8645", + "email": "waldo@edberg.com", + "web": "http://www.waldoedberg.com", + "followers": 6609 + }, + { + "firstname": "Brent", + "lastname": "Vaidya", + "company": "Crain Industries", + "address": "45 Church St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06906", + "phone": "203-359-2824", + "fax": "203-359-6466", + "email": "brent@vaidya.com", + "web": "http://www.brentvaidya.com", + "followers": 3729 + }, + { + "firstname": "Bette", + "lastname": "Barcelona", + "company": "Fischer, William R Esq", + "address": "432 Lignite Ave", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-6738", + "fax": "907-456-4144", + "email": "bette@barcelona.com", + "web": "http://www.bettebarcelona.com", + "followers": 5857 + }, + { + "firstname": "Rich", + "lastname": "Gleave", + "company": "Finkelstein, Bernard A Cpa", + "address": "827 E 10th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-277-9294", + "fax": "907-277-2227", + "email": "rich@gleave.com", + "web": "http://www.richgleave.com", + "followers": 488 + }, + { + "firstname": "Lyman", + "lastname": "Whittley", + "company": "Berry, Robert A Esq", + "address": "3030 Bridgeway", + "city": "Sausalito", + "county": "Marin", + "state": "CA", + "zip": "94965", + "phone": "415-332-9570", + "fax": "415-332-7303", + "email": "lyman@whittley.com", + "web": "http://www.lymanwhittley.com", + "followers": 7950 + }, + { + "firstname": "Maryann", + "lastname": "Garnette", + "company": "Catholic University Of", + "address": "582 Centerville Rd", + "city": "Lancaster", + "county": "Lancaster", + "state": "PA", + "zip": "17601", + "phone": "717-560-6671", + "fax": "717-560-5625", + "email": "maryann@garnette.com", + "web": "http://www.maryanngarnette.com", + "followers": 5412 + }, + { + "firstname": "Jimmie", + "lastname": "Zarzycki", + "company": "John C Auth", + "address": "215 E Pikes Peak Ave", + "city": "Colorado Springs", + "county": "El Paso", + "state": "CO", + "zip": "80903", + "phone": "719-632-0667", + "fax": "719-632-5612", + "email": "jimmie@zarzycki.com", + "web": "http://www.jimmiezarzycki.com", + "followers": 4291 + }, + { + "firstname": "Gisela", + "lastname": "Kosicki", + "company": "Lisher, John L Esq", + "address": "22140 Ventura Blvd #-4", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91364", + "phone": "818-713-6306", + "fax": "818-713-8346", + "email": "gisela@kosicki.com", + "web": "http://www.giselakosicki.com", + "followers": 377 + }, + { + "firstname": "Marlene", + "lastname": "Hammeren", + "company": "Chamot, Philip S Esq", + "address": "1000 Monte Sano Blvd Se", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35801", + "phone": "256-533-8674", + "fax": "256-533-1176", + "email": "marlene@hammeren.com", + "web": "http://www.marlenehammeren.com", + "followers": 428 + }, + { + "firstname": "Kris", + "lastname": "Stanzak", + "company": "Cenref Labs", + "address": "3100 Dodge St", + "city": "Dubuque", + "county": "Dubuque", + "state": "IA", + "zip": "52003", + "phone": "563-557-2588", + "fax": "563-557-6308", + "email": "kris@stanzak.com", + "web": "http://www.krisstanzak.com", + "followers": 368 + }, + { + "firstname": "Roman", + "lastname": "Simone", + "company": "Spottswood, William B Esq", + "address": "610 W Main St", + "city": "Batavia", + "county": "Clermont", + "state": "OH", + "zip": "45103", + "phone": "513-732-3089", + "fax": "513-732-2547", + "email": "roman@simone.com", + "web": "http://www.romansimone.com", + "followers": 3460 + }, + { + "firstname": "Cathryn", + "lastname": "Nicolaus", + "company": "Perkins Photo/graphics Inc", + "address": "2575 State Highway 32", + "city": "Chico", + "county": "Butte", + "state": "CA", + "zip": "95973", + "phone": "530-345-4627", + "fax": "530-345-8372", + "email": "cathryn@nicolaus.com", + "web": "http://www.cathrynnicolaus.com", + "followers": 9494 + }, + { + "firstname": "Lana", + "lastname": "Keels", + "company": "Veron, J Michael Esq", + "address": "711 Park Ave", + "city": "Freehold", + "county": "Monmouth", + "state": "NJ", + "zip": "07728", + "phone": "732-462-1106", + "fax": "732-462-3575", + "email": "lana@keels.com", + "web": "http://www.lanakeels.com", + "followers": 2796 + }, + { + "firstname": "Malissa", + "lastname": "Ziesemer", + "company": "Electron Rentals Inc", + "address": "330 S Ocean Blvd", + "city": "Palm Beach", + "county": "Palm Beach", + "state": "FL", + "zip": "33480", + "phone": "561-655-6443", + "fax": "561-655-9129", + "email": "malissa@ziesemer.com", + "web": "http://www.malissaziesemer.com", + "followers": 7525 + }, + { + "firstname": "Pamala", + "lastname": "Brodtmann", + "company": "Gagen, William E Jr", + "address": "342 Seaside Ave", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-926-0776", + "fax": "808-926-6173", + "email": "pamala@brodtmann.com", + "web": "http://www.pamalabrodtmann.com", + "followers": 134 + }, + { + "firstname": "Heriberto", + "lastname": "Tivis", + "company": "Newood", + "address": "1135 Kildaire Farm Rd", + "city": "Cary", + "county": "Wake", + "state": "NC", + "zip": "27511", + "phone": "919-460-8104", + "fax": "919-460-4304", + "email": "heriberto@tivis.com", + "web": "http://www.heribertotivis.com", + "followers": 2418 + }, + { + "firstname": "Edgardo", + "lastname": "Prudente", + "company": "R A Hamilton Corp", + "address": "1110 N Highway 360", + "city": "Grand Prairie", + "county": "Dallas", + "state": "TX", + "zip": "75050", + "phone": "972-660-3960", + "fax": "972-660-0934", + "email": "edgardo@prudente.com", + "web": "http://www.edgardoprudente.com", + "followers": 3269 + }, + { + "firstname": "Fred", + "lastname": "Kunde", + "company": "Swanson, Victoria C Esq", + "address": "5321 Sterling Center Dr", + "city": "Westlake Village", + "county": "Ventura", + "state": "CA", + "zip": "91361", + "phone": "805-991-9740", + "fax": "805-991-4665", + "email": "fred@kunde.com", + "web": "http://www.fredkunde.com", + "followers": 7741 + }, + { + "firstname": "Pilar", + "lastname": "Suddeth", + "company": "Slant Fin Corp", + "address": "3700 Campus Dr", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92660", + "phone": "949-852-5463", + "fax": "949-852-9027", + "email": "pilar@suddeth.com", + "web": "http://www.pilarsuddeth.com", + "followers": 4094 + }, + { + "firstname": "Eliseo", + "lastname": "Wice", + "company": "A Limousine Service", + "address": "711 W 38th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78705", + "phone": "512-458-0034", + "fax": "512-458-7226", + "email": "eliseo@wice.com", + "web": "http://www.eliseowice.com", + "followers": 7508 + }, + { + "firstname": "Bridget", + "lastname": "Knightly", + "company": "Teamsters Union Local 20", + "address": "66 Flint St", + "city": "Asheville", + "county": "Buncombe", + "state": "NC", + "zip": "28801", + "phone": "828-251-0817", + "fax": "828-251-4242", + "email": "bridget@knightly.com", + "web": "http://www.bridgetknightly.com", + "followers": 1212 + }, + { + "firstname": "Tori", + "lastname": "Villaescusa", + "company": "Church Point Whol Gr Co Inc", + "address": "1006 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-7617", + "fax": "213-748-5668", + "email": "tori@villaescusa.com", + "web": "http://www.torivillaescusa.com", + "followers": 9093 + }, + { + "firstname": "Claire", + "lastname": "Moyerman", + "company": "Grt Nthrn Shoe Rpr & Dry Clnrs", + "address": "2280 S Xanadu Way #-300", + "city": "Aurora", + "county": "Arapahoe", + "state": "CO", + "zip": "80014", + "phone": "303-337-5701", + "fax": "303-337-5796", + "email": "claire@moyerman.com", + "web": "http://www.clairemoyerman.com", + "followers": 5959 + }, + { + "firstname": "Judi", + "lastname": "Kivel", + "company": "Postal Place At 111th Square", + "address": "125 N Emporia St", + "city": "Wichita", + "county": "Sedgwick", + "state": "KS", + "zip": "67202", + "phone": "316-267-2178", + "fax": "316-267-5183", + "email": "judi@kivel.com", + "web": "http://www.judikivel.com", + "followers": 6139 + }, + { + "firstname": "Terrell", + "lastname": "Rodda", + "company": "Woodland Village Nursing Home", + "address": "200 Cottontail Ln", + "city": "Somerset", + "county": "Somerset", + "state": "NJ", + "zip": "08873", + "phone": "732-563-5361", + "fax": "732-563-1293", + "email": "terrell@rodda.com", + "web": "http://www.terrellrodda.com", + "followers": 3803 + }, + { + "firstname": "Jasmin", + "lastname": "Gum", + "company": "White, Samuel I Esq", + "address": "43 Parmenter Rd", + "city": "Hudson", + "county": "Middlesex", + "state": "MA", + "zip": "01749", + "phone": "978-562-2524", + "fax": "978-562-5062", + "email": "jasmin@gum.com", + "web": "http://www.jasmingum.com", + "followers": 9074 + }, + { + "firstname": "Bridget", + "lastname": "Bottella", + "company": "Mid State Construction Prod", + "address": "112 E Pecan St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-224-9708", + "fax": "210-224-4881", + "email": "bridget@bottella.com", + "web": "http://www.bridgetbottella.com", + "followers": 5951 + }, + { + "firstname": "Tami", + "lastname": "Trybus", + "company": "Seawright, Rodney", + "address": "3963 Virginia Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45227", + "phone": "513-561-1096", + "fax": "513-561-7531", + "email": "tami@trybus.com", + "web": "http://www.tamitrybus.com", + "followers": 370 + }, + { + "firstname": "Maynard", + "lastname": "Kaewprasert", + "company": "Decatur Studio Inc", + "address": "1224 S Hope St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-747-6026", + "fax": "213-747-3088", + "email": "maynard@kaewprasert.com", + "web": "http://www.maynardkaewprasert.com", + "followers": 711 + }, + { + "firstname": "Viola", + "lastname": "Mcsorley", + "company": "First Bank Of Brunswick", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-7428", + "fax": "708-496-8587", + "email": "viola@mcsorley.com", + "web": "http://www.violamcsorley.com", + "followers": 7687 + }, + { + "firstname": "Reggie", + "lastname": "Streu", + "company": "Als Motors Inc", + "address": "15 Henderson Dr", + "city": "Caldwell", + "county": "Essex", + "state": "NJ", + "zip": "07006", + "phone": "973-575-5898", + "fax": "973-575-9328", + "email": "reggie@streu.com", + "web": "http://www.reggiestreu.com", + "followers": 971 + }, + { + "firstname": "Rena", + "lastname": "Griffeth", + "company": "Reliable Optical", + "address": "19901 Nordhoff St", + "city": "Northridge", + "county": "Los Angeles", + "state": "CA", + "zip": "91324", + "phone": "818-709-9165", + "fax": "818-709-2649", + "email": "rena@griffeth.com", + "web": "http://www.renagriffeth.com", + "followers": 6336 + }, + { + "firstname": "Pierre", + "lastname": "Salera", + "company": "Ridley Ridley & Burnette", + "address": "1601 S Shamrock Ave", + "city": "Monrovia", + "county": "Los Angeles", + "state": "CA", + "zip": "91016", + "phone": "626-303-9233", + "fax": "626-303-2569", + "email": "pierre@salera.com", + "web": "http://www.pierresalera.com", + "followers": 6542 + }, + { + "firstname": "Carolina", + "lastname": "Kinlaw", + "company": "Loftus, Daniel B Esq", + "address": "3m County", + "city": "Belle Mead", + "county": "Somerset", + "state": "NJ", + "zip": "08502", + "phone": "908-874-0864", + "fax": "908-874-4873", + "email": "carolina@kinlaw.com", + "web": "http://www.carolinakinlaw.com", + "followers": 8944 + }, + { + "firstname": "Alejandra", + "lastname": "Prenatt", + "company": "Mitchell De Burring Co", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-6958", + "fax": "708-496-4617", + "email": "alejandra@prenatt.com", + "web": "http://www.alejandraprenatt.com", + "followers": 4118 + }, + { + "firstname": "Quintin", + "lastname": "Isacson", + "company": "Spectrum Constrctn & Dev Corp", + "address": "1015 N Cahuenga Blvd", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90038", + "phone": "323-469-0643", + "fax": "323-469-3082", + "email": "quintin@isacson.com", + "web": "http://www.quintinisacson.com", + "followers": 94 + }, + { + "firstname": "Robin", + "lastname": "Grotz", + "company": "Hackett, Peter J", + "address": "2601 Summerhill Rd", + "city": "Texarkana", + "county": "Bowie", + "state": "TX", + "zip": "75503", + "phone": "903-792-2081", + "fax": "903-792-5309", + "email": "robin@grotz.com", + "web": "http://www.robingrotz.com", + "followers": 2532 + }, + { + "firstname": "Lacy", + "lastname": "Woodfin", + "company": "Enviro Dynamics", + "address": "130 Wyoming Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18503", + "phone": "570-348-3754", + "fax": "570-348-4204", + "email": "lacy@woodfin.com", + "web": "http://www.lacywoodfin.com", + "followers": 4644 + }, + { + "firstname": "Daniel", + "lastname": "Zill", + "company": "Safety Team Inc", + "address": "550 N Brand Blvd #-1940", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-507-7207", + "fax": "818-507-1925", + "email": "daniel@zill.com", + "web": "http://www.danielzill.com", + "followers": 2051 + }, + { + "firstname": "Reina", + "lastname": "Wolchesky", + "company": "Highland Management Group", + "address": "305 W Washington St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-7281", + "fax": "218-828-3231", + "email": "reina@wolchesky.com", + "web": "http://www.reinawolchesky.com", + "followers": 9240 + }, + { + "firstname": "Marc", + "lastname": "Wanger", + "company": "Welsh Company", + "address": "33 Harrison Ave", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02111", + "phone": "617-426-6393", + "fax": "617-426-1114", + "email": "marc@wanger.com", + "web": "http://www.marcwanger.com", + "followers": 2846 + }, + { + "firstname": "Damion", + "lastname": "Matkin", + "company": "Data Ware Development Inc", + "address": "5830 Downing St #-d", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80216", + "phone": "303-295-4797", + "fax": "303-295-3867", + "email": "damion@matkin.com", + "web": "http://www.damionmatkin.com", + "followers": 5752 + }, + { + "firstname": "Lucius", + "lastname": "Winchester", + "company": "Barrett Paving Materials Inc", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-9458", + "fax": "630-289-9033", + "email": "lucius@winchester.com", + "web": "http://www.luciuswinchester.com", + "followers": 8815 + }, + { + "firstname": "Petra", + "lastname": "Mcnichol", + "company": "Hyacinth Foundation Aids Proj", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-8190", + "fax": "630-289-8985", + "email": "petra@mcnichol.com", + "web": "http://www.petramcnichol.com", + "followers": 9459 + }, + { + "firstname": "Katina", + "lastname": "Ramano", + "company": "Fuhrmann, Chris C Esq", + "address": "580 Fountain Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11208", + "phone": "718-272-2553", + "fax": "718-272-8498", + "email": "katina@ramano.com", + "web": "http://www.katinaramano.com", + "followers": 924 + }, + { + "firstname": "Leslie", + "lastname": "Cackowski", + "company": "Re, Matthew R Esq", + "address": "2103 W Main St", + "city": "Farmington", + "county": "San Juan", + "state": "NM", + "zip": "87401", + "phone": "505-325-3933", + "fax": "505-325-9042", + "email": "leslie@cackowski.com", + "web": "http://www.lesliecackowski.com", + "followers": 3028 + }, + { + "firstname": "Cristopher", + "lastname": "Wiget", + "company": "Roche, Patrick Esq", + "address": "4354 Highway 64", + "city": "Kirtland", + "county": "San Juan", + "state": "NM", + "zip": "87417", + "phone": "505-598-9742", + "fax": "505-598-3063", + "email": "cristopher@wiget.com", + "web": "http://www.cristopherwiget.com", + "followers": 2445 + }, + { + "firstname": "Garth", + "lastname": "Skiffington", + "company": "Fox & Killbride", + "address": "22 Mill St", + "city": "Paterson", + "county": "Passaic", + "state": "NJ", + "zip": "07501", + "phone": "973-684-7654", + "fax": "973-684-6309", + "email": "garth@skiffington.com", + "web": "http://www.garthskiffington.com", + "followers": 1725 + }, + { + "firstname": "Brendan", + "lastname": "Qin", + "company": "Allegro Copy & Print", + "address": "305 Griffin Ave Sw", + "city": "Eastman", + "county": "Dodge", + "state": "GA", + "zip": "31023", + "phone": "478-374-5686", + "fax": "478-374-6992", + "email": "brendan@qin.com", + "web": "http://www.brendanqin.com", + "followers": 8035 + }, + { + "firstname": "Chase", + "lastname": "Furler", + "company": "Hostetler & Kowalik", + "address": "3333 Se 21st St", + "city": "Topeka", + "county": "Shawnee", + "state": "KS", + "zip": "66607", + "phone": "785-354-7091", + "fax": "785-354-3042", + "email": "chase@furler.com", + "web": "http://www.chasefurler.com", + "followers": 2977 + }, + { + "firstname": "Marietta", + "lastname": "Bjornberg", + "company": "Consolidated Mechanical Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-0501", + "fax": "406-728-5507", + "email": "marietta@bjornberg.com", + "web": "http://www.mariettabjornberg.com", + "followers": 54 + }, + { + "firstname": "Carmella", + "lastname": "Wishman", + "company": "Meicher Cpa", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-6772", + "fax": "406-728-7668", + "email": "carmella@wishman.com", + "web": "http://www.carmellawishman.com", + "followers": 624 + }, + { + "firstname": "Erica", + "lastname": "Eyrich", + "company": "C & I Computer Services Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-7293", + "fax": "406-728-4789", + "email": "erica@eyrich.com", + "web": "http://www.ericaeyrich.com", + "followers": 8217 + }, + { + "firstname": "Lucius", + "lastname": "Bagnoli", + "company": "American Music Teacher", + "address": "404 W Boonville New Harmony Rd", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-867-2916", + "fax": "812-867-2593", + "email": "lucius@bagnoli.com", + "web": "http://www.luciusbagnoli.com", + "followers": 8666 + }, + { + "firstname": "Bart", + "lastname": "Hachey", + "company": "Pip Printing", + "address": "490 S Broad St", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-9769", + "fax": "330-533-6543", + "email": "bart@hachey.com", + "web": "http://www.barthachey.com", + "followers": 8457 + }, + { + "firstname": "Isiah", + "lastname": "Phernetton", + "company": "Wyckoff Florist & Gifts", + "address": "246 Griffing Ave", + "city": "Riverhead", + "county": "Suffolk", + "state": "NY", + "zip": "11901", + "phone": "631-727-0917", + "fax": "631-727-2147", + "email": "isiah@phernetton.com", + "web": "http://www.isiahphernetton.com", + "followers": 6662 + }, + { + "firstname": "Morton", + "lastname": "Crummell", + "company": "Boykin Management Co", + "address": "5485 Conestoga Ct", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80301", + "phone": "303-546-3698", + "fax": "303-546-1589", + "email": "morton@crummell.com", + "web": "http://www.mortoncrummell.com", + "followers": 8143 + }, + { + "firstname": "Prince", + "lastname": "Kauk", + "company": "Mckesson Drug Co", + "address": "2320 W Louise Dr", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85027", + "phone": "623-581-7435", + "fax": "623-581-2472", + "email": "prince@kauk.com", + "web": "http://www.princekauk.com", + "followers": 177 + }, + { + "firstname": "Marta", + "lastname": "Horner", + "company": "Scisci, Pasquale M", + "address": "4694 Alvarado Canyon Rd #-f", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92120", + "phone": "858-265-2270", + "fax": "858-265-6604", + "email": "marta@horner.com", + "web": "http://www.martahorner.com", + "followers": 5749 + }, + { + "firstname": "Teodoro", + "lastname": "Gaboury", + "company": "Bostek, Eva M Dvm", + "address": "4251 Glenwood Ave", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-783-8123", + "fax": "330-783-9674", + "email": "teodoro@gaboury.com", + "web": "http://www.teodorogaboury.com", + "followers": 3663 + }, + { + "firstname": "Jess", + "lastname": "Assad", + "company": "Livingston City", + "address": "9100 F St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68127", + "phone": "402-331-0470", + "fax": "402-331-4974", + "email": "jess@assad.com", + "web": "http://www.jessassad.com", + "followers": 6985 + }, + { + "firstname": "Freeman", + "lastname": "Soula", + "company": "Henson, Claudia", + "address": "Hwy 8e E", + "city": "Mena", + "county": "Polk", + "state": "AR", + "zip": "71953", + "phone": "479-394-6308", + "fax": "479-394-7509", + "email": "freeman@soula.com", + "web": "http://www.freemansoula.com", + "followers": 7262 + }, + { + "firstname": "Rita", + "lastname": "Center", + "company": "R D Playman Co", + "address": "4013 Cameron St #-b", + "city": "Lafayette", + "county": "Lafayette", + "state": "LA", + "zip": "70506", + "phone": "337-269-8825", + "fax": "337-269-7011", + "email": "rita@center.com", + "web": "http://www.ritacenter.com", + "followers": 4484 + }, + { + "firstname": "Kira", + "lastname": "Papen", + "company": "Tile City & Carpet Of Pa", + "address": "8387 University Ave", + "city": "La Mesa", + "county": "San Diego", + "state": "CA", + "zip": "91941", + "phone": "619-464-3649", + "fax": "619-464-8499", + "email": "kira@papen.com", + "web": "http://www.kirapapen.com", + "followers": 5711 + }, + { + "firstname": "Miquel", + "lastname": "Demicco", + "company": "Welk Resort Center", + "address": "5921 S Middlefield Rd", + "city": "Littleton", + "county": "Jefferson", + "state": "CO", + "zip": "80123", + "phone": "303-730-8080", + "fax": "303-730-8087", + "email": "miquel@demicco.com", + "web": "http://www.miqueldemicco.com", + "followers": 6863 + }, + { + "firstname": "William", + "lastname": "Mahmud", + "company": "Campbell Inn", + "address": "1003 Northern Blvd", + "city": "Manhasset", + "county": "Nassau", + "state": "NY", + "zip": "11030", + "phone": "516-365-3496", + "fax": "516-365-0493", + "email": "william@mahmud.com", + "web": "http://www.williammahmud.com", + "followers": 3591 + }, + { + "firstname": "Lacy", + "lastname": "Belmont", + "company": "House Boat Rentals Inc", + "address": "585 Bedford Rd", + "city": "Bedford Hills", + "county": "Westchester", + "state": "NY", + "zip": "10507", + "phone": "914-241-8888", + "fax": "914-241-2272", + "email": "lacy@belmont.com", + "web": "http://www.lacybelmont.com", + "followers": 1399 + }, + { + "firstname": "Van", + "lastname": "Leanen", + "company": "Mail Boxes Etc", + "address": "Bus Rt 54 & Hh", + "city": "Lake Ozark", + "county": "Camden", + "state": "MO", + "zip": "65049", + "phone": "573-365-0319", + "fax": "573-365-1055", + "email": "van@leanen.com", + "web": "http://www.vanleanen.com", + "followers": 1949 + }, + { + "firstname": "Mayme", + "lastname": "Staub", + "company": "River House Hotel", + "address": "2470 Lamington Rd", + "city": "Bedminster", + "county": "Somerset", + "state": "NJ", + "zip": "07921", + "phone": "908-234-9338", + "fax": "908-234-9433", + "email": "mayme@staub.com", + "web": "http://www.maymestaub.com", + "followers": 2216 + }, + { + "firstname": "Gregg", + "lastname": "Guevarra", + "company": "Arbor Center", + "address": "221 S Kerr Ave", + "city": "Wilmington", + "county": "New Hanover", + "state": "NC", + "zip": "28403", + "phone": "910-799-9811", + "fax": "910-799-1965", + "email": "gregg@guevarra.com", + "web": "http://www.greggguevarra.com", + "followers": 2704 + }, + { + "firstname": "Minh", + "lastname": "Leclare", + "company": "Osach, Ronald C Esq", + "address": "106 S 4th St", + "city": "Forest City", + "county": "Winnebago", + "state": "IA", + "zip": "50436", + "phone": "641-582-0973", + "fax": "641-582-0424", + "email": "minh@leclare.com", + "web": "http://www.minhleclare.com", + "followers": 7681 + }, + { + "firstname": "Joey", + "lastname": "Sedore", + "company": "Goodwill Speciality Co", + "address": "1647 E Palmdale Blvd", + "city": "Palmdale", + "county": "Los Angeles", + "state": "CA", + "zip": "93550", + "phone": "661-273-4188", + "fax": "661-273-6263", + "email": "joey@sedore.com", + "web": "http://www.joeysedore.com", + "followers": 273 + }, + { + "firstname": "Jeanie", + "lastname": "Dalen", + "company": "Ivs Media Inc", + "address": "2900 Ford Rd", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-5062", + "fax": "215-788-7666", + "email": "jeanie@dalen.com", + "web": "http://www.jeaniedalen.com", + "followers": 290 + }, + { + "firstname": "Eddie", + "lastname": "Gauer", + "company": "Easy Mail", + "address": "506 Kellogg Ave", + "city": "Ames", + "county": "Story", + "state": "IA", + "zip": "50010", + "phone": "515-233-2381", + "fax": "515-233-6551", + "email": "eddie@gauer.com", + "web": "http://www.eddiegauer.com", + "followers": 2350 + }, + { + "firstname": "Jessie", + "lastname": "Barkle", + "company": "Days Inn Airport By Mall Amer", + "address": "1002 S Treadaway Blvd", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79602", + "phone": "325-677-1190", + "fax": "325-677-2343", + "email": "jessie@barkle.com", + "web": "http://www.jessiebarkle.com", + "followers": 5427 + }, + { + "firstname": "Deandre", + "lastname": "Resendiz", + "company": "Sav Mart", + "address": "4 Trvl Svc Carlson Trvl Ways", + "city": "Twin Falls", + "county": "Twin Falls", + "state": "ID", + "zip": "83301", + "phone": "208-733-8306", + "fax": "208-733-3476", + "email": "deandre@resendiz.com", + "web": "http://www.deandreresendiz.com", + "followers": 1120 + }, + { + "firstname": "Janet", + "lastname": "Rathrock", + "company": "Ryder, Edward A Esq", + "address": "2181 Harlem Rd", + "city": "Loves Park", + "county": "Winnebago", + "state": "IL", + "zip": "61111", + "phone": "815-877-4376", + "fax": "815-877-9538", + "email": "janet@rathrock.com", + "web": "http://www.janetrathrock.com", + "followers": 2725 + }, + { + "firstname": "Denice", + "lastname": "Nordlinger", + "company": "Bickel, Daniel R Cpa", + "address": "7210 Gateway Blvd E", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79915", + "phone": "915-593-2344", + "fax": "915-593-8069", + "email": "denice@nordlinger.com", + "web": "http://www.denicenordlinger.com", + "followers": 2003 + }, + { + "firstname": "Danny", + "lastname": "Dales", + "company": "Barrett Bindery Co", + "address": "3530 E Washington St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-225-9543", + "fax": "602-225-9028", + "email": "danny@dales.com", + "web": "http://www.dannydales.com", + "followers": 2843 + }, + { + "firstname": "Robbie", + "lastname": "Deshay", + "company": "Cornhusker Press", + "address": "1700 Terminal St", + "city": "West Sacramento", + "county": "Yolo", + "state": "CA", + "zip": "95691", + "phone": "916-372-5032", + "fax": "916-372-1333", + "email": "robbie@deshay.com", + "web": "http://www.robbiedeshay.com", + "followers": 3460 + }, + { + "firstname": "Carla", + "lastname": "Humble", + "company": "Express Printing Center", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-0662", + "fax": "818-768-1832", + "email": "carla@humble.com", + "web": "http://www.carlahumble.com", + "followers": 4639 + }, + { + "firstname": "Ashley", + "lastname": "Leonesio", + "company": "Martinique Resort Hotel", + "address": "15 Park Row", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10038", + "phone": "212-227-3681", + "fax": "212-227-4343", + "email": "ashley@leonesio.com", + "web": "http://www.ashleyleonesio.com", + "followers": 4801 + }, + { + "firstname": "Josephine", + "lastname": "Sotlar", + "company": "Shipp Storage", + "address": "400 1st St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20001", + "phone": "202-783-2772", + "fax": "202-783-8805", + "email": "josephine@sotlar.com", + "web": "http://www.josephinesotlar.com", + "followers": 515 + }, + { + "firstname": "Derek", + "lastname": "Kreutzbender", + "company": "Recycle Metals Corp", + "address": "411 E Wisconsin Ave", + "city": "Milwaukee", + "county": "Milwaukee", + "state": "WI", + "zip": "53202", + "phone": "414-271-5253", + "fax": "414-271-6234", + "email": "derek@kreutzbender.com", + "web": "http://www.derekkreutzbender.com", + "followers": 2430 + }, + { + "firstname": "Kira", + "lastname": "Staffon", + "company": "International Management Assoc", + "address": "933 Wiliwili St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96826", + "phone": "808-949-0941", + "fax": "808-949-8257", + "email": "kira@staffon.com", + "web": "http://www.kirastaffon.com", + "followers": 5769 + }, + { + "firstname": "Isaac", + "lastname": "Davensizer", + "company": "Dillon Measurement Instruments", + "address": "444 Nahua St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-544-5794", + "fax": "808-544-6357", + "email": "isaac@davensizer.com", + "web": "http://www.isaacdavensizer.com", + "followers": 3453 + }, + { + "firstname": "Reva", + "lastname": "Bayer", + "company": "Hirt, Stanley Esq", + "address": "4011 Wallingford Ave N", + "city": "Seattle", + "county": "King", + "state": "WA", + "zip": "98103", + "phone": "206-634-9998", + "fax": "206-634-2589", + "email": "reva@bayer.com", + "web": "http://www.revabayer.com", + "followers": 7415 + }, + { + "firstname": "Melvin", + "lastname": "Auteri", + "company": "Interstate Unltd Fed Crdt Un", + "address": "463 Beacon St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02115", + "phone": "617-247-8022", + "fax": "617-247-6002", + "email": "melvin@auteri.com", + "web": "http://www.melvinauteri.com", + "followers": 2705 + }, + { + "firstname": "Stephen", + "lastname": "Seiters", + "company": "Us Mortgage Corp", + "address": "814 Blue Mound Rd", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76131", + "phone": "817-947-3102", + "fax": "817-947-6272", + "email": "stephen@seiters.com", + "web": "http://www.stephenseiters.com", + "followers": 9568 + }, + { + "firstname": "Lucas", + "lastname": "Santellana", + "company": "Constantino, James P Esq", + "address": "4820 E Mcdowell Rd #-300", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85008", + "phone": "602-225-3469", + "fax": "602-225-8897", + "email": "lucas@santellana.com", + "web": "http://www.lucassantellana.com", + "followers": 8420 + }, + { + "firstname": "Traci", + "lastname": "Toomey", + "company": "Pea Press", + "address": "Box #-1948", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-9708", + "fax": "307-733-6525", + "email": "traci@toomey.com", + "web": "http://www.tracitoomey.com", + "followers": 7718 + }, + { + "firstname": "Vernice", + "lastname": "Resendes", + "company": "Sorrento Cheese Co Inc", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-6234", + "fax": "818-768-7585", + "email": "vernice@resendes.com", + "web": "http://www.verniceresendes.com", + "followers": 912 + }, + { + "firstname": "Hillary", + "lastname": "Holmes", + "company": "Dennis N Brager Law Offices Of", + "address": "7565 Green Valley Rd", + "city": "Placerville", + "county": "El Dorado", + "state": "CA", + "zip": "95667", + "phone": "530-626-1934", + "fax": "530-626-6128", + "email": "hillary@holmes.com", + "web": "http://www.hillaryholmes.com", + "followers": 7918 + }, + { + "firstname": "Robin", + "lastname": "Schartz", + "company": "Fairmount Country Club", + "address": "10175 Joerschke Dr", + "city": "Grass Valley", + "county": "Nevada", + "state": "CA", + "zip": "95945", + "phone": "530-477-9983", + "fax": "530-477-7396", + "email": "robin@schartz.com", + "web": "http://www.robinschartz.com", + "followers": 2169 + }, + { + "firstname": "Sabrina", + "lastname": "Deppert", + "company": "Americold", + "address": "2701 E Thomas Rd #-j", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85016", + "phone": "602-954-4343", + "fax": "602-954-6266", + "email": "sabrina@deppert.com", + "web": "http://www.sabrinadeppert.com", + "followers": 5488 + }, + { + "firstname": "Luciano", + "lastname": "Truiolo", + "company": "Currier Gallery Of Art", + "address": "435 Mira Vista Ter", + "city": "Pasadena", + "county": "Los Angeles", + "state": "CA", + "zip": "91105", + "phone": "626-792-6850", + "fax": "626-792-5166", + "email": "luciano@truiolo.com", + "web": "http://www.lucianotruiolo.com", + "followers": 9047 + }, + { + "firstname": "Ezekiel", + "lastname": "Mildon", + "company": "Ace Pro Pest Cntrl Inc", + "address": "10 Rogers St", + "city": "Cambridge", + "county": "Middlesex", + "state": "MA", + "zip": "02142", + "phone": "617-494-5618", + "fax": "617-494-3365", + "email": "ezekiel@mildon.com", + "web": "http://www.ezekielmildon.com", + "followers": 399 + }, + { + "firstname": "Hanna", + "lastname": "Cinkan", + "company": "Northbrook Flowers", + "address": "1150 Nw 72nd Ave #-333", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-477-7869", + "fax": "305-477-7089", + "email": "hanna@cinkan.com", + "web": "http://www.hannacinkan.com", + "followers": 4636 + }, + { + "firstname": "Kory", + "lastname": "Wooldridge", + "company": "Paper Stock Dealers Inc", + "address": "1525 E 53rd St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60615", + "phone": "312-753-6734", + "fax": "312-753-2693", + "email": "kory@wooldridge.com", + "web": "http://www.korywooldridge.com", + "followers": 4528 + }, + { + "firstname": "Darrel", + "lastname": "Ruffins", + "company": "Brickman, Arthur Cpa", + "address": "2800 4th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33704", + "phone": "727-821-8502", + "fax": "727-821-5104", + "email": "darrel@ruffins.com", + "web": "http://www.darrelruffins.com", + "followers": 6468 + }, + { + "firstname": "Miranda", + "lastname": "Hammitt", + "company": "Steindel, Carl R Md", + "address": "964 E Saint Francis St", + "city": "Brownsville", + "county": "Cameron", + "state": "TX", + "zip": "78520", + "phone": "956-541-5918", + "fax": "956-541-9021", + "email": "miranda@hammitt.com", + "web": "http://www.mirandahammitt.com", + "followers": 8847 + }, + { + "firstname": "Sadie", + "lastname": "Rowlett", + "company": "Port City Taxi Inc", + "address": "2659 Webster Ave", + "city": "Bronx", + "county": "Bronx", + "state": "NY", + "zip": "10458", + "phone": "718-365-1753", + "fax": "718-365-5353", + "email": "sadie@rowlett.com", + "web": "http://www.sadierowlett.com", + "followers": 9836 + }, + { + "firstname": "Deanna", + "lastname": "Gerbi", + "company": "Centerville Historical Society", + "address": "264 Broadway", + "city": "Jersey City", + "county": "Hudson", + "state": "NJ", + "zip": "07306", + "phone": "201-433-0391", + "fax": "201-433-3619", + "email": "deanna@gerbi.com", + "web": "http://www.deannagerbi.com", + "followers": 9112 + }, + { + "firstname": "Alfonso", + "lastname": "Griglen", + "company": "Paul D Friedman", + "address": "577 Township Road #-30s", + "city": "Ada", + "county": "Hardin", + "state": "OH", + "zip": "45810", + "phone": "419-634-3513", + "fax": "419-634-5733", + "email": "alfonso@griglen.com", + "web": "http://www.alfonsogriglen.com", + "followers": 7361 + }, + { + "firstname": "Vernon", + "lastname": "Engelman", + "company": "Atlas Metal Cutting Inc", + "address": "321 Watson St", + "city": "Ripon", + "county": "Fond du Lac", + "state": "WI", + "zip": "54971", + "phone": "920-748-1387", + "fax": "920-748-3703", + "email": "vernon@engelman.com", + "web": "http://www.vernonengelman.com", + "followers": 656 + }, + { + "firstname": "Johnnie", + "lastname": "Rheaves", + "company": "Roberts, James A Cpa", + "address": "2910 E La Cresta Ave", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92806", + "phone": "714-632-1291", + "fax": "714-632-7337", + "email": "johnnie@rheaves.com", + "web": "http://www.johnnierheaves.com", + "followers": 2812 + }, + { + "firstname": "Ella", + "lastname": "Pahnke", + "company": "Davis, Randle S Esq", + "address": "2640 Junction Hwy", + "city": "Kerrville", + "county": "Kerr", + "state": "TX", + "zip": "78028", + "phone": "830-367-8513", + "fax": "830-367-9231", + "email": "ella@pahnke.com", + "web": "http://www.ellapahnke.com", + "followers": 4762 + }, + { + "firstname": "Veronica", + "lastname": "Achorn", + "company": "Guy Spradling", + "address": "5201 Hanawalt Dr", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79903", + "phone": "915-772-3217", + "fax": "915-772-2346", + "email": "veronica@achorn.com", + "web": "http://www.veronicaachorn.com", + "followers": 9145 + }, + { + "firstname": "Kasey", + "lastname": "Nguyen", + "company": "Pettine, Paul A Iii", + "address": "5603 Arapahoe Ave", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80303", + "phone": "303-440-3916", + "fax": "303-440-2452", + "email": "kasey@nguyen.com", + "web": "http://www.kaseynguyen.com", + "followers": 2867 + }, + { + "firstname": "Frankie", + "lastname": "Morein", + "company": "Oliver, Jerrold B Esq", + "address": "104 North St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06902", + "phone": "203-975-3712", + "fax": "203-975-4688", + "email": "frankie@morein.com", + "web": "http://www.frankiemorein.com", + "followers": 5663 + }, + { + "firstname": "Elaine", + "lastname": "Renzi", + "company": "Delvel Chem Co", + "address": "221 E 59th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-826-7966", + "fax": "212-826-2043", + "email": "elaine@renzi.com", + "web": "http://www.elainerenzi.com", + "followers": 9525 + }, + { + "firstname": "Timothy", + "lastname": "Janski", + "company": "Snyder Chevrolet Olds Geo Co", + "address": "800 E Dimond Blvd", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-344-4330", + "fax": "907-344-4086", + "email": "timothy@janski.com", + "web": "http://www.timothyjanski.com", + "followers": 1000 + }, + { + "firstname": "Warren", + "lastname": "Hacher", + "company": "Josel, Stephen C Esq", + "address": "4600 S 1st St", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79605", + "phone": "325-691-7220", + "fax": "325-691-7394", + "email": "warren@hacher.com", + "web": "http://www.warrenhacher.com", + "followers": 6287 + }, + { + "firstname": "Brant", + "lastname": "Darnel", + "company": "Cody, Daniel S Esq", + "address": "8250 Tyler Blvd", + "city": "Mentor", + "county": "Lake", + "state": "OH", + "zip": "44060", + "phone": "440-974-8416", + "fax": "440-974-7476", + "email": "brant@darnel.com", + "web": "http://www.brantdarnel.com", + "followers": 5217 + }, + { + "firstname": "Mara", + "lastname": "Rineheart", + "company": "Berry Naturipe Growers", + "address": "39 W 21st St", + "city": "Northampton", + "county": "Northampton", + "state": "PA", + "zip": "18067", + "phone": "610-262-2444", + "fax": "610-262-6836", + "email": "mara@rineheart.com", + "web": "http://www.mararineheart.com", + "followers": 9213 + }, + { + "firstname": "Karen", + "lastname": "Flierl", + "company": "Cook Vetter Doerhoff", + "address": "501 N I #-h35", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78702", + "phone": "512-477-1826", + "fax": "512-477-1407", + "email": "karen@flierl.com", + "web": "http://www.karenflierl.com", + "followers": 754 + }, + { + "firstname": "Virgil", + "lastname": "Chinni", + "company": "Dorfman Abrams Music & Co", + "address": "248 Libby St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-9811", + "fax": "808-841-9646", + "email": "virgil@chinni.com", + "web": "http://www.virgilchinni.com", + "followers": 6740 + }, + { + "firstname": "Jimmie", + "lastname": "Kertzman", + "company": "Oregon Pacific Trading Co", + "address": "279 Puuhale Rd", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-2883", + "fax": "808-841-1772", + "email": "jimmie@kertzman.com", + "web": "http://www.jimmiekertzman.com", + "followers": 4795 + }, + { + "firstname": "Leif", + "lastname": "Bachta", + "company": "Hislop, Lorna Brumfield Esq", + "address": "91246 Oihana St", + "city": "Kapolei", + "county": "Honolulu", + "state": "HI", + "zip": "96707", + "phone": "808-682-8942", + "fax": "808-682-2789", + "email": "leif@bachta.com", + "web": "http://www.leifbachta.com", + "followers": 6016 + }, + { + "firstname": "Ione", + "lastname": "Kucera", + "company": "Tweedy Penney & Crawford", + "address": "94210 Pupukahi St #-201a", + "city": "Waipahu", + "county": "Honolulu", + "state": "HI", + "zip": "96797", + "phone": "808-671-5253", + "fax": "808-671-3048", + "email": "ione@kucera.com", + "web": "http://www.ionekucera.com", + "followers": 17 + }, + { + "firstname": "Doreen", + "lastname": "Sakurai", + "company": "Airlifter", + "address": "211 E 50th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-759-4757", + "fax": "212-759-7548", + "email": "doreen@sakurai.com", + "web": "http://www.doreensakurai.com", + "followers": 8051 + }, + { + "firstname": "Joel", + "lastname": "Nardo", + "company": "New Era Canning Co", + "address": "5150 Town Center Cir", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33486", + "phone": "561-395-2277", + "fax": "561-395-7825", + "email": "joel@nardo.com", + "web": "http://www.joelnardo.com", + "followers": 9041 + }, + { + "firstname": "Neil", + "lastname": "Backus", + "company": "Smith Capital Management", + "address": "521 5th Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10175", + "phone": "212-537-4955", + "fax": "212-537-1181", + "email": "neil@backus.com", + "web": "http://www.neilbackus.com", + "followers": 9529 + }, + { + "firstname": "Fausto", + "lastname": "Marks", + "company": "Donohue, Brian C Esq", + "address": "200 Stuart St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02116", + "phone": "617-451-9353", + "fax": "617-451-2136", + "email": "fausto@marks.com", + "web": "http://www.faustomarks.com", + "followers": 9763 + }, + { + "firstname": "Christa", + "lastname": "Bodenschatz", + "company": "Stationers Inc", + "address": "Tilton Rd", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-443-6280", + "fax": "217-443-6382", + "email": "christa@bodenschatz.com", + "web": "http://www.christabodenschatz.com", + "followers": 1744 + }, + { + "firstname": "Chi", + "lastname": "Greenlaw", + "company": "E & T Screw Machine Products", + "address": "4300 N Miller Rd #-143", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85251", + "phone": "480-946-1537", + "fax": "480-946-1657", + "email": "chi@greenlaw.com", + "web": "http://www.chigreenlaw.com", + "followers": 2707 + }, + { + "firstname": "Kyle", + "lastname": "Ferri", + "company": "Ames Plumbing & Heating", + "address": "1801 E 5th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-2216", + "fax": "907-272-7109", + "email": "kyle@ferri.com", + "web": "http://www.kyleferri.com", + "followers": 7533 + }, + { + "firstname": "Freida", + "lastname": "Michelfelder", + "company": "Fun Discovery Inc", + "address": "117 Martin Luther King Jr Dr S", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30303", + "phone": "404-521-3372", + "fax": "404-521-3223", + "email": "freida@michelfelder.com", + "web": "http://www.freidamichelfelder.com", + "followers": 4298 + }, + { + "firstname": "Bryant", + "lastname": "Bouliouris", + "company": "Medlin, Charles K Jr", + "address": "480 W Pearl Ave", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-8286", + "fax": "307-733-6041", + "email": "bryant@bouliouris.com", + "web": "http://www.bryantbouliouris.com", + "followers": 9520 + }, + { + "firstname": "Emilia", + "lastname": "Oxley", + "company": "Buckeye Reserve Title", + "address": "14651 Dallas Pky", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-702-8125", + "fax": "214-702-4766", + "email": "emilia@oxley.com", + "web": "http://www.emiliaoxley.com", + "followers": 6873 + }, + { + "firstname": "Naomi", + "lastname": "Mcraven", + "company": "Oak Brook Capital Corp", + "address": "13456 Se 27th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-641-5463", + "fax": "425-641-5923", + "email": "naomi@mcraven.com", + "web": "http://www.naomimcraven.com", + "followers": 7029 + }, + { + "firstname": "Dionne", + "lastname": "Borycz", + "company": "Pepsi Cola Dr Pepper Bottling", + "address": "77 S Washington St #-207", + "city": "Rockville", + "county": "Montgomery", + "state": "MD", + "zip": "20850", + "phone": "301-294-0154", + "fax": "301-294-7523", + "email": "dionne@borycz.com", + "web": "http://www.dionneborycz.com", + "followers": 6973 + }, + { + "firstname": "Jimmy", + "lastname": "Hrobsky", + "company": "Howard Johnson", + "address": "8253 Ronson Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-268-4663", + "fax": "858-268-4964", + "email": "jimmy@hrobsky.com", + "web": "http://www.jimmyhrobsky.com", + "followers": 544 + }, + { + "firstname": "Peggy", + "lastname": "Hohlstein", + "company": "Steritek Inc", + "address": "Rt 20", + "city": "Westfield", + "county": "Hampden", + "state": "MA", + "zip": "01085", + "phone": "413-543-2933", + "fax": "413-543-3805", + "email": "peggy@hohlstein.com", + "web": "http://www.peggyhohlstein.com", + "followers": 8885 + }, + { + "firstname": "Genevieve", + "lastname": "Kekiwi", + "company": "Lawson, John F Esq", + "address": "8300 Bell Ter", + "city": "Newburgh", + "county": "Warrick", + "state": "IN", + "zip": "47630", + "phone": "812-477-3620", + "fax": "812-477-3646", + "email": "genevieve@kekiwi.com", + "web": "http://www.genevievekekiwi.com", + "followers": 8134 + }, + { + "firstname": "Terra", + "lastname": "Plagge", + "company": "Beach, Jeffrey E", + "address": "60 Minute Photo Colormax", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47715", + "phone": "812-477-9524", + "fax": "812-477-9617", + "email": "terra@plagge.com", + "web": "http://www.terraplagge.com", + "followers": 4323 + }, + { + "firstname": "Allie", + "lastname": "Pumphrey", + "company": "Asher, Ronald L Md", + "address": "501 N Weinbach Ave", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-477-0753", + "fax": "812-477-4604", + "email": "allie@pumphrey.com", + "web": "http://www.alliepumphrey.com", + "followers": 3985 + }, + { + "firstname": "Katina", + "lastname": "Survant", + "company": "Kgtv Channel 10", + "address": "590 N 2nd E", + "city": "Mountain Home", + "county": "Elmore", + "state": "ID", + "zip": "83647", + "phone": "208-587-3734", + "fax": "208-587-5574", + "email": "katina@survant.com", + "web": "http://www.katinasurvant.com", + "followers": 3440 + }, + { + "firstname": "Marta", + "lastname": "Warran", + "company": "Wirth, John T Esq", + "address": "2929 W Kennewick Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-735-8388", + "fax": "509-735-9193", + "email": "marta@warran.com", + "web": "http://www.martawarran.com", + "followers": 9891 + }, + { + "firstname": "Rebekah", + "lastname": "Lindboe", + "company": "Granite Corporation", + "address": "600 Las Colinas Blvd E", + "city": "Irving", + "county": "Dallas", + "state": "TX", + "zip": "75039", + "phone": "972-556-1121", + "fax": "972-556-0801", + "email": "rebekah@lindboe.com", + "web": "http://www.rebekahlindboe.com", + "followers": 180 + }, + { + "firstname": "Roxie", + "lastname": "Varenhorst", + "company": "Good Neighbor Real Estate", + "address": "1301 Dublin Rd", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-487-2917", + "fax": "614-487-4227", + "email": "roxie@varenhorst.com", + "web": "http://www.roxievarenhorst.com", + "followers": 1352 + }, + { + "firstname": "Kennith", + "lastname": "Peto", + "company": "Kirschbaum, Thomas A Esq", + "address": "2080 Peachtree Industrial Ct", + "city": "Atlanta", + "county": "Dekalb", + "state": "GA", + "zip": "30341", + "phone": "770-455-4277", + "fax": "770-455-6746", + "email": "kennith@peto.com", + "web": "http://www.kennithpeto.com", + "followers": 8164 + }, + { + "firstname": "Darrell", + "lastname": "Amrich", + "company": "Harris, James P Iii", + "address": "64 W Convenient", + "city": "Apex", + "county": "Wake", + "state": "NC", + "zip": "27502", + "phone": "919-362-8201", + "fax": "919-362-7475", + "email": "darrell@amrich.com", + "web": "http://www.darrellamrich.com", + "followers": 9098 + }, + { + "firstname": "Savannah", + "lastname": "Loffier", + "company": "Saint Charles Catv", + "address": "501 S Johnstone Ave", + "city": "Bartlesville", + "county": "Washington", + "state": "OK", + "zip": "74003", + "phone": "918-337-3201", + "fax": "918-337-4947", + "email": "savannah@loffier.com", + "web": "http://www.savannahloffier.com", + "followers": 7227 + }, + { + "firstname": "Martin", + "lastname": "Carley", + "company": "Heil, John P Esq", + "address": "680 Country W", + "city": "Sylva", + "county": "Jackson", + "state": "NC", + "zip": "28779", + "phone": "828-586-3914", + "fax": "828-586-8059", + "email": "martin@carley.com", + "web": "http://www.martincarley.com", + "followers": 7412 + }, + { + "firstname": "Lacy", + "lastname": "Hyten", + "company": "Buy & Sell Press", + "address": "1 Palmer Sq", + "city": "Princeton", + "county": "Mercer", + "state": "NJ", + "zip": "08542", + "phone": "609-683-3558", + "fax": "609-683-0649", + "email": "lacy@hyten.com", + "web": "http://www.lacyhyten.com", + "followers": 2184 + }, + { + "firstname": "Forest", + "lastname": "Orea", + "company": "Clark, Francis J", + "address": "6858 S Ashland Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60636", + "phone": "773-436-4531", + "fax": "773-436-2636", + "email": "forest@orea.com", + "web": "http://www.forestorea.com", + "followers": 8308 + }, + { + "firstname": "Courtney", + "lastname": "Shishido", + "company": "Beymers Jewelry", + "address": "145 W 6th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-279-2737", + "fax": "907-279-2025", + "email": "courtney@shishido.com", + "web": "http://www.courtneyshishido.com", + "followers": 6286 + }, + { + "firstname": "Annette", + "lastname": "Frietas", + "company": "Monsanto Chemical Company", + "address": "45 W 46th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10036", + "phone": "212-944-8670", + "fax": "212-944-9464", + "email": "annette@frietas.com", + "web": "http://www.annettefrietas.com", + "followers": 9185 + }, + { + "firstname": "Karyn", + "lastname": "Jinks", + "company": "Maslen, David Esq", + "address": "2318 N Galloway Ave", + "city": "Mesquite", + "county": "Dallas", + "state": "TX", + "zip": "75150", + "phone": "972-289-4090", + "fax": "972-289-3319", + "email": "karyn@jinks.com", + "web": "http://www.karynjinks.com", + "followers": 3549 + }, + { + "firstname": "Edwin", + "lastname": "Lavelli", + "company": "Letterguide Co", + "address": "W 1st St", + "city": "East Liverpool", + "county": "Columbiana", + "state": "OH", + "zip": "43920", + "phone": "330-385-4581", + "fax": "330-385-9959", + "email": "edwin@lavelli.com", + "web": "http://www.edwinlavelli.com", + "followers": 1645 + }, + { + "firstname": "Jimmie", + "lastname": "Barninger", + "company": "California Paint & Wlpaper Str", + "address": "Box #-4038", + "city": "Modesto", + "county": "Stanislaus", + "state": "CA", + "zip": "95352", + "phone": "209-525-7568", + "fax": "209-525-4389", + "email": "jimmie@barninger.com", + "web": "http://www.jimmiebarninger.com", + "followers": 3947 + }, + { + "firstname": "Merle", + "lastname": "Wyrosdick", + "company": "Keil, James J Esq", + "address": "1350 Campus Pky", + "city": "Neptune", + "county": "Monmouth", + "state": "NJ", + "zip": "07753", + "phone": "732-938-7301", + "fax": "732-938-7237", + "email": "merle@wyrosdick.com", + "web": "http://www.merlewyrosdick.com", + "followers": 5762 + }, + { + "firstname": "Amelia", + "lastname": "Caputo", + "company": "Security Marketing Agency", + "address": "1800 Airport Way", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-1748", + "fax": "907-456-7535", + "email": "amelia@caputo.com", + "web": "http://www.ameliacaputo.com", + "followers": 9583 + }, + { + "firstname": "Germaine", + "lastname": "Bruski", + "company": "Alloy Founders", + "address": "2301 S Cushman St", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-8225", + "fax": "907-456-9261", + "email": "germaine@bruski.com", + "web": "http://www.germainebruski.com", + "followers": 5075 + }, + { + "firstname": "Willa", + "lastname": "Dutt", + "company": "Gutmann Leather Co Inc", + "address": "2110 Peger Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-2885", + "fax": "907-456-2187", + "email": "willa@dutt.com", + "web": "http://www.willadutt.com", + "followers": 2775 + }, + { + "firstname": "Cherie", + "lastname": "Fuhri", + "company": "Continental Baking Co", + "address": "3679 College Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-9072", + "fax": "907-456-8467", + "email": "cherie@fuhri.com", + "web": "http://www.cheriefuhri.com", + "followers": 5839 + }, + { + "firstname": "Tyron", + "lastname": "Quillman", + "company": "Analysts International Corp", + "address": "5300 Shawnee Rd", + "city": "Alexandria", + "county": "Fairfax", + "state": "VA", + "zip": "22312", + "phone": "703-354-9266", + "fax": "703-354-2554", + "email": "tyron@quillman.com", + "web": "http://www.tyronquillman.com", + "followers": 9439 + }, + { + "firstname": "Charity", + "lastname": "Dyckman", + "company": "Marriott, Frank Jr", + "address": "6927 Old Seward Hwy", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99518", + "phone": "907-349-9880", + "fax": "907-349-4449", + "email": "charity@dyckman.com", + "web": "http://www.charitydyckman.com", + "followers": 2058 + }, + { + "firstname": "Nanette", + "lastname": "Turansky", + "company": "Sheraton Society Hill Hotel", + "address": "52 S 2nd St", + "city": "Easton", + "county": "Northampton", + "state": "PA", + "zip": "18042", + "phone": "610-250-6188", + "fax": "610-250-4334", + "email": "nanette@turansky.com", + "web": "http://www.nanetteturansky.com", + "followers": 5905 + }, + { + "firstname": "Cherie", + "lastname": "Schronce", + "company": "Varda, John Duncan Esq", + "address": "21603 Devonshire St", + "city": "Chatsworth", + "county": "Los Angeles", + "state": "CA", + "zip": "91311", + "phone": "818-718-2001", + "fax": "818-718-7339", + "email": "cherie@schronce.com", + "web": "http://www.cherieschronce.com", + "followers": 9409 + }, + { + "firstname": "Theron", + "lastname": "Hambright", + "company": "Sensor Oil And Gas Inc", + "address": "905 Brooks Ave", + "city": "Holland", + "county": "Ottawa", + "state": "MI", + "zip": "49423", + "phone": "616-392-2074", + "fax": "616-392-0226", + "email": "theron@hambright.com", + "web": "http://www.theronhambright.com", + "followers": 6532 + }, + { + "firstname": "Laurie", + "lastname": "Bibbs", + "company": "Action Nursing Care Llc", + "address": "2101 Claremont Ave Ne", + "city": "Albuquerque", + "county": "Bernalillo", + "state": "NM", + "zip": "87107", + "phone": "505-881-2899", + "fax": "505-881-3771", + "email": "laurie@bibbs.com", + "web": "http://www.lauriebibbs.com", + "followers": 5422 + }, + { + "firstname": "Angelo", + "lastname": "Ferentz", + "company": "Reagan, William L Esq", + "address": "3220 E 26th St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90023", + "phone": "323-262-5047", + "fax": "323-262-8693", + "email": "angelo@ferentz.com", + "web": "http://www.angeloferentz.com", + "followers": 6580 + }, + { + "firstname": "Denver", + "lastname": "Topete", + "company": "Insight Cablevision", + "address": "5770 Morehouse Dr", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92121", + "phone": "858-457-3538", + "fax": "858-457-0465", + "email": "denver@topete.com", + "web": "http://www.denvertopete.com", + "followers": 6542 + }, + { + "firstname": "Tommie", + "lastname": "Reuland", + "company": "Schaff, Michael D Esq", + "address": "1166 Arroyo St", + "city": "San Fernando", + "county": "Los Angeles", + "state": "CA", + "zip": "91340", + "phone": "818-361-4035", + "fax": "818-361-7493", + "email": "tommie@reuland.com", + "web": "http://www.tommiereuland.com", + "followers": 7735 + }, + { + "firstname": "Delmer", + "lastname": "Delucas", + "company": "American Processing Co Inc", + "address": "2770 Walden Ave", + "city": "Buffalo", + "county": "Erie", + "state": "NY", + "zip": "14225", + "phone": "716-874-1439", + "fax": "716-874-3467", + "email": "delmer@delucas.com", + "web": "http://www.delmerdelucas.com", + "followers": 8605 + }, + { + "firstname": "Latisha", + "lastname": "Bahls", + "company": "Search South Inc", + "address": "3 E 4th St", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45202", + "phone": "513-784-5007", + "fax": "513-784-5275", + "email": "latisha@bahls.com", + "web": "http://www.latishabahls.com", + "followers": 8504 + }, + { + "firstname": "Simone", + "lastname": "Lundie", + "company": "Casebolt, Victor S Esq", + "address": "701 S 17th St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19146", + "phone": "215-732-9026", + "fax": "215-732-8257", + "email": "simone@lundie.com", + "web": "http://www.simonelundie.com", + "followers": 2165 + }, + { + "firstname": "Ross", + "lastname": "Spurger", + "company": "Hoover Group Inc", + "address": "710 S Illinois Ave", + "city": "Carbondale", + "county": "Jackson", + "state": "IL", + "zip": "62901", + "phone": "618-453-9968", + "fax": "618-453-6144", + "email": "ross@spurger.com", + "web": "http://www.rossspurger.com", + "followers": 361 + }, + { + "firstname": "Abel", + "lastname": "Tuter", + "company": "Wernsing Plumbing & Heating", + "address": "2457 Perkiomen Ave", + "city": "Reading", + "county": "Berks", + "state": "PA", + "zip": "19606", + "phone": "610-370-6549", + "fax": "610-370-0856", + "email": "abel@tuter.com", + "web": "http://www.abeltuter.com", + "followers": 6215 + }, + { + "firstname": "Beverley", + "lastname": "Bunche", + "company": "Nurses Organization Vets Affrs", + "address": "1727 Nw 79th Ave", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-591-4141", + "fax": "305-591-7751", + "email": "beverley@bunche.com", + "web": "http://www.beverleybunche.com", + "followers": 8121 + }, + { + "firstname": "Lizzie", + "lastname": "Torregrossa", + "company": "Salerno & Son", + "address": "78 Faunce Corner Rd", + "city": "North Dartmouth", + "county": "Bristol", + "state": "MA", + "zip": "02747", + "phone": "508-997-1409", + "fax": "508-997-9846", + "email": "lizzie@torregrossa.com", + "web": "http://www.lizzietorregrossa.com", + "followers": 1134 + }, + { + "firstname": "Tia", + "lastname": "Neumaier", + "company": "Carterville Mini Storage", + "address": "2500 Maitland Center Pky", + "city": "Maitland", + "county": "Orange", + "state": "FL", + "zip": "32751", + "phone": "407-660-7426", + "fax": "407-660-7628", + "email": "tia@neumaier.com", + "web": "http://www.tianeumaier.com", + "followers": 3010 + }, + { + "firstname": "Lesa", + "lastname": "Chantry", + "company": "Lutz Cichy Selig & Zeronda", + "address": "1119 N Bodine St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19123", + "phone": "215-923-0136", + "fax": "215-923-9492", + "email": "lesa@chantry.com", + "web": "http://www.lesachantry.com", + "followers": 5201 + }, + { + "firstname": "Marcelo", + "lastname": "Arostegui", + "company": "Moraschs Quality Meats", + "address": "76 Mall Comp", + "city": "Branson", + "county": "Taney", + "state": "MO", + "zip": "65616", + "phone": "417-336-9702", + "fax": "417-336-2664", + "email": "marcelo@arostegui.com", + "web": "http://www.marceloarostegui.com", + "followers": 1195 + }, + { + "firstname": "Jimmie", + "lastname": "Hardgrove", + "company": "Prosthodontic Associates", + "address": "305 E 47th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10017", + "phone": "212-980-0445", + "fax": "212-980-6914", + "email": "jimmie@hardgrove.com", + "web": "http://www.jimmiehardgrove.com", + "followers": 1002 + }, + { + "firstname": "Renae", + "lastname": "Eldrige", + "company": "Pexco Packaging Corp", + "address": "1716 Rt 77", + "city": "Attica", + "county": "Wyoming", + "state": "NY", + "zip": "14011", + "phone": "585-591-3118", + "fax": "585-591-9104", + "email": "renae@eldrige.com", + "web": "http://www.renaeeldrige.com", + "followers": 4105 + }, + { + "firstname": "Tisha", + "lastname": "Gorder", + "company": "Paroly Rampart Sec Systems", + "address": "77 W Huron St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60610", + "phone": "312-642-2897", + "fax": "312-642-2664", + "email": "tisha@gorder.com", + "web": "http://www.tishagorder.com", + "followers": 3305 + }, + { + "firstname": "Clarice", + "lastname": "Knower", + "company": "Yaffa, Andrew B Esq", + "address": "210 W 79th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60620", + "phone": "773-846-1489", + "fax": "773-846-1462", + "email": "clarice@knower.com", + "web": "http://www.clariceknower.com", + "followers": 5497 + }, + { + "firstname": "Sybil", + "lastname": "Marmerchant", + "company": "Blvd Distillers & Importers", + "address": "12 E 7th Ave", + "city": "York", + "county": "York", + "state": "PA", + "zip": "17404", + "phone": "717-845-4718", + "fax": "717-845-4736", + "email": "sybil@marmerchant.com", + "web": "http://www.sybilmarmerchant.com", + "followers": 4656 + }, + { + "firstname": "Floyd", + "lastname": "Veazey", + "company": "Ramada Inn San Fran Arprt N", + "address": "2877 E Florence Ave", + "city": "Huntington Park", + "county": "Los Angeles", + "state": "CA", + "zip": "90255", + "phone": "323-862-9133", + "fax": "323-862-5589", + "email": "floyd@veazey.com", + "web": "http://www.floydveazey.com", + "followers": 4208 + }, + { + "firstname": "Reyna", + "lastname": "Bangle", + "company": "C & R Contractors Inc", + "address": "85 Long Island Expy", + "city": "New Hyde Park", + "county": "Nassau", + "state": "NY", + "zip": "11040", + "phone": "516-627-8715", + "fax": "516-627-9033", + "email": "reyna@bangle.com", + "web": "http://www.reynabangle.com", + "followers": 9261 + }, + { + "firstname": "Owen", + "lastname": "Sparacino", + "company": "Murtha, Thomas D Esq", + "address": "2865 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38111", + "phone": "901-324-9274", + "fax": "901-324-4381", + "email": "owen@sparacino.com", + "web": "http://www.owensparacino.com", + "followers": 154 + }, + { + "firstname": "Eli", + "lastname": "Bettner", + "company": "Shea, David J Esq", + "address": "825 W Main Ave", + "city": "Brewster", + "county": "Okanogan", + "state": "WA", + "zip": "98812", + "phone": "509-689-7964", + "fax": "509-689-1394", + "email": "eli@bettner.com", + "web": "http://www.elibettner.com", + "followers": 9176 + }, + { + "firstname": "Taylor", + "lastname": "Fogerty", + "company": "Khoury Factory Outlet", + "address": "2000 W 120th Ave", + "city": "Denver", + "county": "Adams", + "state": "CO", + "zip": "80234", + "phone": "303-465-0070", + "fax": "303-465-2109", + "email": "taylor@fogerty.com", + "web": "http://www.taylorfogerty.com", + "followers": 2464 + }, + { + "firstname": "Reva", + "lastname": "Lecates", + "company": "First American Rlty Assoc Inc", + "address": "829 S 14th St", + "city": "Fernandina Beach", + "county": "Nassau", + "state": "FL", + "zip": "32034", + "phone": "904-261-0604", + "fax": "904-261-2123", + "email": "reva@lecates.com", + "web": "http://www.revalecates.com", + "followers": 8963 + }, + { + "firstname": "Rodrigo", + "lastname": "Wildrick", + "company": "Midwest Wrecking Company Inc", + "address": "1201 34th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33713", + "phone": "727-323-5060", + "fax": "727-323-5982", + "email": "rodrigo@wildrick.com", + "web": "http://www.rodrigowildrick.com", + "followers": 1406 + }, + { + "firstname": "George", + "lastname": "Tukis", + "company": "Andow Personnel Services", + "address": "8 Inn Of Americus", + "city": "Americus", + "county": "Sumter", + "state": "GA", + "zip": "31709", + "phone": "229-924-0263", + "fax": "229-924-4251", + "email": "george@tukis.com", + "web": "http://www.georgetukis.com", + "followers": 5243 + }, + { + "firstname": "Titus", + "lastname": "Rodreguez", + "company": "Vasquez & Co", + "address": "7677 Engineer Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-571-0819", + "fax": "858-571-9047", + "email": "titus@rodreguez.com", + "web": "http://www.titusrodreguez.com", + "followers": 9258 + }, + { + "firstname": "Emilio", + "lastname": "Lampkin", + "company": "Hendricks, Kenneth J Cpa", + "address": "834 Lois Dr", + "city": "Williamstown", + "county": "Gloucester", + "state": "NJ", + "zip": "08094", + "phone": "856-629-8933", + "fax": "856-629-3337", + "email": "emilio@lampkin.com", + "web": "http://www.emiliolampkin.com", + "followers": 5810 + }, + { + "firstname": "Maryjane", + "lastname": "Arata", + "company": "Larson, John R Esq", + "address": "342 Wolverine Way", + "city": "Sparks", + "county": "Washoe", + "state": "NV", + "zip": "89431", + "phone": "775-352-5822", + "fax": "775-352-1557", + "email": "maryjane@arata.com", + "web": "http://www.maryjanearata.com", + "followers": 5044 + }, + { + "firstname": "Marcie", + "lastname": "Shulz", + "company": "United Printers Inc", + "address": "Blanco Rd", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78216", + "phone": "210-524-6711", + "fax": "210-524-1693", + "email": "marcie@shulz.com", + "web": "http://www.marcieshulz.com", + "followers": 4807 + }, + { + "firstname": "Celia", + "lastname": "Slavin", + "company": "Acra Aerospace Inc", + "address": "8750 W Bryn Mawr Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60631", + "phone": "773-256-3550", + "fax": "773-256-2162", + "email": "celia@slavin.com", + "web": "http://www.celiaslavin.com", + "followers": 3493 + }, + { + "firstname": "Suzette", + "lastname": "Devaughan", + "company": "Anchor Graphics Inc", + "address": "385 Prospect Ave", + "city": "Hackensack", + "county": "Bergen", + "state": "NJ", + "zip": "07601", + "phone": "201-342-8964", + "fax": "201-342-9862", + "email": "suzette@devaughan.com", + "web": "http://www.suzettedevaughan.com", + "followers": 6410 + }, + { + "firstname": "Christian", + "lastname": "Marnell", + "company": "Wenick, George D Esq", + "address": "2750 Springboro Rd", + "city": "Dayton", + "county": "Montgomery", + "state": "OH", + "zip": "45439", + "phone": "937-293-9728", + "fax": "937-293-6782", + "email": "christian@marnell.com", + "web": "http://www.christianmarnell.com", + "followers": 3071 + }, + { + "firstname": "Misty", + "lastname": "Ericksen", + "company": "Graham & Associates Inc", + "address": "1808 2nd", + "city": "Cheney", + "county": "Spokane", + "state": "WA", + "zip": "99004", + "phone": "509-235-8873", + "fax": "509-235-1856", + "email": "misty@ericksen.com", + "web": "http://www.mistyericksen.com", + "followers": 1779 + }, + { + "firstname": "Bert", + "lastname": "Schadle", + "company": "Guaranty Chevrolet Geo", + "address": "500 Sw Loop #-820", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76115", + "phone": "817-921-5560", + "fax": "817-921-5913", + "email": "bert@schadle.com", + "web": "http://www.bertschadle.com", + "followers": 9753 + }, + { + "firstname": "Bertram", + "lastname": "Quertermous", + "company": "Florida Mining & Materials", + "address": "222 Delaware Ave", + "city": "Wilmington", + "county": "New Castle", + "state": "DE", + "zip": "19801", + "phone": "302-655-8039", + "fax": "302-655-4522", + "email": "bertram@quertermous.com", + "web": "http://www.bertramquertermous.com", + "followers": 4349 + }, + { + "firstname": "Buster", + "lastname": "Wubbel", + "company": "Collins, Joseph B Esq", + "address": "315 Us Rt 1", + "city": "Fairless Hills", + "county": "Bucks", + "state": "PA", + "zip": "19030", + "phone": "215-943-3689", + "fax": "215-943-6049", + "email": "buster@wubbel.com", + "web": "http://www.busterwubbel.com", + "followers": 6911 + }, + { + "firstname": "Mildred", + "lastname": "Gallegas", + "company": "Rogers, William A Jr", + "address": "310 Ridge Rd", + "city": "Claymont", + "county": "New Castle", + "state": "DE", + "zip": "19703", + "phone": "302-792-8044", + "fax": "302-792-1282", + "email": "mildred@gallegas.com", + "web": "http://www.mildredgallegas.com", + "followers": 8618 + }, + { + "firstname": "Pat", + "lastname": "Hoshaw", + "company": "Jorgensen, James L Esq", + "address": "30 Matthews Rd", + "city": "Malvern", + "county": "Chester", + "state": "PA", + "zip": "19355", + "phone": "610-644-7836", + "fax": "610-644-3252", + "email": "pat@hoshaw.com", + "web": "http://www.pathoshaw.com", + "followers": 6030 + }, + { + "firstname": "Marshall", + "lastname": "Hutch", + "company": "Nako, Joy Y", + "address": "Route 519", + "city": "Eighty Four", + "county": "Washington", + "state": "PA", + "zip": "15330", + "phone": "724-225-1729", + "fax": "724-225-7064", + "email": "marshall@hutch.com", + "web": "http://www.marshallhutch.com", + "followers": 6225 + }, + { + "firstname": "Don", + "lastname": "Mestler", + "company": "Coldwell Bnkr Hearthside Rltrs", + "address": "State Hwy #-31", + "city": "Pennington", + "county": "Mercer", + "state": "NJ", + "zip": "08534", + "phone": "609-737-2033", + "fax": "609-737-2374", + "email": "don@mestler.com", + "web": "http://www.donmestler.com", + "followers": 6967 + }, + { + "firstname": "Emery", + "lastname": "Reek", + "company": "Metri Tech Engineering Inc", + "address": "85 S Beachview Dr", + "city": "Jekyll Island", + "county": "Glynn", + "state": "GA", + "zip": "31527", + "phone": "912-635-3866", + "fax": "912-635-4039", + "email": "emery@reek.com", + "web": "http://www.emeryreek.com", + "followers": 2476 + }, + { + "firstname": "Ray", + "lastname": "Srock", + "company": "Tilt Lock Inc", + "address": "8700 E Pinnacle Peak Rd", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85255", + "phone": "480-585-6138", + "fax": "480-585-4983", + "email": "ray@srock.com", + "web": "http://www.raysrock.com", + "followers": 2387 + }, + { + "firstname": "Nickolas", + "lastname": "Khosravi", + "company": "Brennan, Mary V Esq", + "address": "120 Tustin Ave", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92663", + "phone": "949-646-6578", + "fax": "949-646-0043", + "email": "nickolas@khosravi.com", + "web": "http://www.nickolaskhosravi.com", + "followers": 3074 + }, + { + "firstname": "Aileen", + "lastname": "Mottern", + "company": "Bennett Hallmark Cards", + "address": "2053 Lemoine Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-944-1664", + "fax": "201-944-3382", + "email": "aileen@mottern.com", + "web": "http://www.aileenmottern.com", + "followers": 6519 + }, + { + "firstname": "Chad", + "lastname": "Araiza", + "company": "Christiansen, David L Cpa", + "address": "536 Grand Ave", + "city": "Schofield", + "county": "Marathon", + "state": "WI", + "zip": "54476", + "phone": "715-359-8700", + "fax": "715-359-3579", + "email": "chad@araiza.com", + "web": "http://www.chadaraiza.com", + "followers": 4865 + }, + { + "firstname": "Beverly", + "lastname": "Cambel", + "company": "Mcintyre Mcintyre & Mcintyre", + "address": "630 W 8th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-3953", + "fax": "907-272-3618", + "email": "beverly@cambel.com", + "web": "http://www.beverlycambel.com", + "followers": 2393 + }, + { + "firstname": "Janice", + "lastname": "Twiet", + "company": "Henley, Cal Pa", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-6739", + "fax": "909-874-2594", + "email": "janice@twiet.com", + "web": "http://www.janicetwiet.com", + "followers": 7340 + }, + { + "firstname": "Byron", + "lastname": "Fortuna", + "company": "Jackson & Collins Pa", + "address": "700 Sw Higgins Ave", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59803", + "phone": "406-549-8320", + "fax": "406-549-4641", + "email": "byron@fortuna.com", + "web": "http://www.byronfortuna.com", + "followers": 8913 + }, + { + "firstname": "Lynette", + "lastname": "Setlock", + "company": "George S Olive & Co", + "address": "614 W Superior Ave", + "city": "Cleveland", + "county": "Cuyahoga", + "state": "OH", + "zip": "44113", + "phone": "216-566-2265", + "fax": "216-566-2299", + "email": "lynette@setlock.com", + "web": "http://www.lynettesetlock.com", + "followers": 8002 + }, + { + "firstname": "Willard", + "lastname": "Roughen", + "company": "Nakamura Oyama & Assocs Inc", + "address": "9 5officce Product Cent", + "city": "Arlington", + "county": "Tarrant", + "state": "TX", + "zip": "76012", + "phone": "817-265-1847", + "fax": "817-265-0322", + "email": "willard@roughen.com", + "web": "http://www.willardroughen.com", + "followers": 861 + }, + { + "firstname": "Elisa", + "lastname": "Gracely", + "company": "Alexander, Christine T Esq", + "address": "1805 Kings Hwy", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11229", + "phone": "718-627-1421", + "fax": "718-627-9346", + "email": "elisa@gracely.com", + "web": "http://www.elisagracely.com", + "followers": 5321 + }, + { + "firstname": "Jeri", + "lastname": "Farstvedt", + "company": "Regan, Denis J Esq", + "address": "16133 Ventura Blvd #-700", + "city": "Encino", + "county": "Los Angeles", + "state": "CA", + "zip": "91436", + "phone": "818-986-8843", + "fax": "818-986-6786", + "email": "jeri@farstvedt.com", + "web": "http://www.jerifarstvedt.com", + "followers": 9529 + }, + { + "firstname": "Stacey", + "lastname": "Blow", + "company": "Schechter, Jeffrey S Esq", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-0274", + "fax": "909-874-8538", + "email": "stacey@blow.com", + "web": "http://www.staceyblow.com", + "followers": 6685 + }, + { + "firstname": "Bryan", + "lastname": "Rovell", + "company": "All N All Shop", + "address": "90 Hackensack St", + "city": "East Rutherford", + "county": "Bergen", + "state": "NJ", + "zip": "07073", + "phone": "201-939-2788", + "fax": "201-939-9079", + "email": "bryan@rovell.com", + "web": "http://www.bryanrovell.com", + "followers": 2687 + }, + { + "firstname": "Joey", + "lastname": "Bolick", + "company": "Utility Trailer Sales", + "address": "7700 N Council Rd", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73132", + "phone": "405-728-5972", + "fax": "405-728-5244", + "email": "joey@bolick.com", + "web": "http://www.joeybolick.com", + "followers": 8465 + } +] diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb new file mode 100644 index 0000000..2039b59 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb @@ -0,0 +1,99 @@ +require 'algolia/error' + +module Algolia + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server for cross-app operations. + # + class AccountClient + class << self + # + # Copies settings, synonyms, rules and objects from the source index to the + # destination index. The replicas of the source index should not be copied. + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index(source_index, destination_index, request_options = {}) + raise AlgoliaError.new('The indexes are in the same application. Use Algolia::Client.copy_index instead.') if source_index.client.application_id == destination_index.client.application_id + + begin + settings = destination_index.get_settings() + rescue AlgoliaError + # Destination index does not exists. We can proceed. + else + raise AlgoliaError.new("Destination index already exists. Please delete it before copying index across applications.") + end + + responses = [] + + settings = source_index.get_settings() + responses << destination_index.set_settings(settings, {}, request_options) + + synonyms = [] + source_index.export_synonyms(100, request_options) do |synonym| + synonym.delete('_highlightResult') + synonyms << synonym + end + + responses << destination_index.batch_synonyms(synonyms, false, false, request_options) + + rules = [] + source_index.export_rules(100, request_options) do |rule| + rule.delete('_highlightResult') + rules << rule + end + responses << destination_index.batch_rules(rules, false, false, request_options) + + # Copy objects + responses = [] + batch = [] + batch_size = 1000 + count = 0 + + source_index.browse do |obj| + batch << obj + count += 1 + + if count == batch_size + responses << destination_index.save_objects(batch, request_options) + batch = [] + count = 0 + end + end + + if batch.any? + responses << destination_index.save_objects(batch, request_options) + end + + responses + end + + # + # The method copy settings, synonyms, rules and objects from the source index + # to the destination index and wait end of indexing. The replicas of the + # source index should not be copied + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index!(source_index, destination_index, request_options = {}) + responses = self.copy_index(source_index, destination_index, request_options) + + responses.each do |res| + destination_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb new file mode 100644 index 0000000..5c9c8b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb @@ -0,0 +1,75 @@ +module Algolia + + class Analytics + attr_reader :ssl, :ssl_version, :headers + API_URL='https://analytics.algolia.com' + + def initialize(client, params) + @client = client + @headers = params[:headers] + end + + def get_ab_tests(params = {}) + params = { + :offset => 0, + :limit => 10, + }.merge(params) + + perform_request(:GET, Protocol.ab_tests_uri, params) + end + + def get_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:GET, Protocol.ab_tests_uri(ab_test_id)) + end + + def add_ab_test(ab_test) + perform_request(:POST, Protocol.ab_tests_uri, {}, ab_test.to_json) + end + + def stop_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:POST, Protocol.ab_tests_stop_uri(ab_test_id)) + end + + def delete_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:DELETE, Protocol.ab_tests_uri(ab_test_id)) + end + + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + @client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + private + + def perform_request(method, url, params = {}, data = {}) + http = HTTPClient.new + + url = API_URL + url + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :GET + http.get(url, { :header => @headers }) + when :POST + http.post(url, { :body => data, :header => @headers }) + when :DELETE + http.delete(url, { :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb new file mode 100644 index 0000000..08c72f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb @@ -0,0 +1,1131 @@ +require 'algolia/protocol' +require 'algolia/error' +require 'algolia/version' +require 'json' +require 'zlib' +require 'openssl' +require 'base64' + +module Algolia + WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY = 100 + + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server. Uses the HTTPClient library for low-level HTTP communication. + # + class Client + attr_reader :ssl, :ssl_version, :hosts, :search_hosts, :application_id, :api_key, :headers, :connect_timeout, :send_timeout, :receive_timeout, :search_timeout, :batch_timeout + + DEFAULT_CONNECT_TIMEOUT = 2 + DEFAULT_RECEIVE_TIMEOUT = 30 + DEFAULT_SEND_TIMEOUT = 30 + DEFAULT_BATCH_TIMEOUT = 120 + DEFAULT_SEARCH_TIMEOUT = 5 + DEFAULT_USER_AGENT = ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"] + + def initialize(data = {}) + raise ArgumentError.new('No APPLICATION_ID provided, please set :application_id') if data[:application_id].nil? + + @ssl = data[:ssl].nil? ? true : data[:ssl] + @ssl_version = data[:ssl_version].nil? ? nil : data[:ssl_version] + @gzip = data[:gzip].nil? ? true : data[:gzip] + @application_id = data[:application_id] + @api_key = data[:api_key] + @hosts = data[:hosts] || (["#{@application_id}.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @search_hosts = data[:search_hosts] || data[:hosts] || (["#{@application_id}-dsn.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @connect_timeout = data[:connect_timeout] || DEFAULT_CONNECT_TIMEOUT + @send_timeout = data[:send_timeout] || DEFAULT_SEND_TIMEOUT + @batch_timeout = data[:batch_timeout] || DEFAULT_BATCH_TIMEOUT + @receive_timeout = data[:receive_timeout] || DEFAULT_RECEIVE_TIMEOUT + @search_timeout = data[:search_timeout] || DEFAULT_SEARCH_TIMEOUT + @headers = { + Protocol::HEADER_API_KEY => api_key, + Protocol::HEADER_APP_ID => application_id, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => DEFAULT_USER_AGENT.push(data[:user_agent]).compact.join('; ') + } + end + + def destroy + Thread.current["algolia_search_hosts_#{application_id}"] = nil + Thread.current["algolia_hosts_#{application_id}"] = nil + Thread.current["algolia_host_index_#{application_id}"] = nil + Thread.current["algolia_search_host_index_#{application_id}"] = nil + end + + # + # Initialize a new index + # + def init_index(name) + Index.new(name, self) + end + + # + # Initialize analytics helper + # + def init_analytics() + Analytics.new(self, { :headers => @headers }) + end + + # + # Allow to set custom headers + # + def set_extra_header(key, value) + headers[key] = value + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + headers[Protocol::HEADER_API_KEY] = admin_api_key + headers[Protocol::HEADER_FORWARDED_IP] = end_user_ip + headers[Protocol::HEADER_FORWARDED_API_KEY] = rate_limit_api_key + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def disable_rate_limit_forward + headers[Protocol::HEADER_API_KEY] = api_key + headers.delete(Protocol::HEADER_FORWARDED_IP) + headers.delete(Protocol::HEADER_FORWARDED_API_KEY) + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def with_rate_limits(end_user_ip, rate_limit_api_key, &block) + enable_rate_limit_forward(api_key, end_user_ip, rate_limit_api_key) + begin + yield + ensure + disable_rate_limit_forward + end + end + + # + # This method allows to query multiple indexes with one API call + # + # @param queries the array of hash representing the query and associated index name + # @param options - accepts those keys: + # - index_name_key the name of the key used to fetch the index_name (:index_name by default) + # - strategy define the strategy applied on the sequential searches (none by default) + # - request_options contains extra parameters to send with your query + # + def multiple_queries(queries, options = nil, strategy = nil) + if options.is_a?(Hash) + index_name_key = options.delete(:index_name_key) || options.delete('index_name_key') + strategy = options.delete(:strategy) || options.delete('strategy') + request_options = options.delete(:request_options) || options.delete('request_options') + else + # Deprecated def multiple_queries(queries, index_name_key, strategy) + index_name_key = options + end + index_name_key ||= :index_name + strategy ||= 'none' + request_options ||= {} + + requests = { + :requests => queries.map do |query| + query = query.dup + index_name = query.delete(index_name_key) || query.delete(index_name_key.to_s) + raise ArgumentError.new("Missing '#{index_name_key}' option") if index_name.nil? + encoded_params = Hash[query.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + { :indexName => index_name, :params => Protocol.to_query(encoded_params) } + end + } + post(Protocol.multiple_queries_uri(strategy), requests.to_json, :search, request_options) + end + + # + # Get objects by objectID across multiple indexes + # + # @param requests [ + # { "indexName" => index_name_1, "objectID" => "obj1" }, + # { "indexName" => index_name_2, "objectID" => "obj2" } + # ] + # + def multiple_get_objects(requests, request_options = {}) + post(Protocol.objects_uri, {:requests => requests}.to_json, :search, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def list_indexes(request_options = {}) + get(Protocol.indexes_uri, :read, request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index(src_index, dst_index, request_options = {}) + request = { 'operation' => 'move', 'destination' => dst_index } + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index!(src_index, dst_index, request_options = {}) + res = move_index(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index(src_index, dst_index, scope = nil, request_options = {}) + request = { 'operation' => 'copy', 'destination' => dst_index } + request['scope'] = scope unless scope.nil? + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index!(src_index, dst_index, scope = nil, request_options = {}) + res = copy_index(src_index, dst_index, scope, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's settings (destination's settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['settings'], request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings!(src_index, dst_index, request_options = {}) + res = copy_settings(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's synonyms (destination's synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['synonyms'], request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms!(src_index, dst_index, request_options = {}) + res = copy_synonyms(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's rules (destination's rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['rules'], request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules!(src_index, dst_index, request_options = {}) + res = copy_rules(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an index + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index(name, request_options = {}) + init_index(name).delete(request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index!(name, request_options = {}) + init_index(name).delete!(request_options) + end + + # + # Return last logs entries. + # + # @param options - accepts those keys: + # - offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry) - Default = 0 + # - length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000 - Default = 10 + # - type Type of log entries to retrieve ("all", "query", "build" or "error") - Default = 'all' + # - request_options contains extra parameters to send with your query + # + def get_logs(options = nil, length = nil, type = nil) + if options.is_a?(Hash) + offset = options.delete('offset') || options.delete(:offset) + length = options.delete('length') || options.delete(:length) + type = options.delete('type') || options.delete(:type) + request_options = options.delete('request_options') || options.delete(:request_options) + else + # Deprecated def get_logs(offset, length, type) + offset = options + end + length ||= 10 + type = 'all' if type.nil? + type = type ? 'error' : 'all' if type.is_a?(true.class) + request_options ||= {} + + get(Protocol.logs(offset, length, type), :write, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def list_api_keys(request_options = {}) + get(Protocol.keys_uri, :read, request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def get_api_key(key, request_options = {}) + get(Protocol.key_uri(key), :read, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, maxQueriesPerIPPerHour, maxHitsPerQuery, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + post(Protocol.keys_uri, params.to_json, :write, request_options) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + + put(Protocol.key_uri(key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + def delete_api_key(key, request_options = {}) + delete(Protocol.key_uri(key), :write, request_options) + end + + # + # Restore a deleted api key + # + def restore_api_key(key, request_options = {}) + post(Protocol.restore_key_uri(key), :write, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def batch(operations, request_options = {}) + post(Protocol.batch_uri, { 'requests' => operations }.to_json, :batch, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def batch!(operations, request_options = {}) + res = batch(operations, request_options) + res['taskID'].each do |index, taskID| + wait_task(index, taskID, WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(index_name, taskID, request_options = {}) + get(Protocol.task_uri(index_name, taskID), :read, request_options)['status'] + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + loop do + status = get_task_status(index_name, taskID, request_options) + if status == 'published' + return + end + sleep(time_before_retry.to_f / 1000) + end + end + + def get_personalization_strategy(request_options = {}) + get(Protocol.personalization_strategy_uri, :read, request_options) + end + + def set_personalization_strategy(strategy, request_options = {}) + post(Protocol.personalization_strategy_uri, strategy.to_json, :write, request_options) + end + + # + # Multicluster management + # + def list_clusters(request_options = {}) + get(Protocol.clusters_uri, :read, request_options) + end + + def list_user_ids(page = 0, hits_per_page = 20, request_options = {}) + get(Protocol.list_ids_uri(page, hits_per_page), :read, request_options) + end + + def get_top_user_ids(request_options = {}) + get(Protocol.cluster_top_user_uri, :read, request_options) + end + + def assign_user_id(user_id, cluster_name, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + body = { :cluster => cluster_name } + post(Protocol.cluster_mapping_uri, body.to_json, :write, request_options) + end + + def get_user_id(user_id, request_options = {}) + get(Protocol.cluster_mapping_uri(user_id), :read, request_options) + end + + def remove_user_id(user_id, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + delete(Protocol.cluster_mapping_uri, :write, request_options) + end + + def search_user_id(query, cluster_name = nil, page = nil, hits_per_page = nil, request_options = {}) + body = { :query => query } + body[:cluster] = cluster_name unless cluster_name.nil? + body[:page] = page unless page.nil? + body[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + post(Protocol.search_user_id_uri, body.to_json, :read, request_options) + end + + # Perform an HTTP request for the given uri and method + # with common basic response handling. Will raise a + # AlgoliaProtocolError if the response has an error status code, + # and will return the parsed JSON body on success, if there is one. + # + def request(uri, method, data = nil, type = :write, request_options = {}) + exceptions = [] + + connect_timeout = @connect_timeout + send_timeout = if type == :search + @search_timeout + elsif type == :batch + type = :write + @batch_timeout + else + @send_timeout + end + receive_timeout = type == :search ? @search_timeout : @receive_timeout + + thread_local_hosts(type != :write).each_with_index do |host, i| + connect_timeout += 2 if i == 2 + send_timeout += 10 if i == 2 + receive_timeout += 10 if i == 2 + + thread_index_key = type != :write ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + Thread.current[thread_index_key] = host[:index] + host[:last_call] = Time.now.to_i + + host[:session].connect_timeout = connect_timeout + host[:session].send_timeout = send_timeout + host[:session].receive_timeout = receive_timeout + begin + return perform_request(host[:session], host[:base_url] + uri, method, data, request_options) + rescue AlgoliaProtocolError => e + raise if e.code / 100 == 4 + exceptions << e + rescue => e + exceptions << e + end + host[:session].reset_all + end + raise AlgoliaProtocolError.new(0, "Cannot reach any host: #{exceptions.map { |e| e.to_s }.join(', ')}") + end + + def get(uri, type = :write, request_options = {}) + request(uri, :GET, nil, type, request_options) + end + + def post(uri, body = {}, type = :write, request_options = {}) + request(uri, :POST, body, type, request_options) + end + + def put(uri, body = {}, type = :write, request_options = {}) + request(uri, :PUT, body, type, request_options) + end + + def delete(uri, type = :write, request_options = {}) + request(uri, :DELETE, nil, type, request_options) + end + + private + + # + # This method returns a thread-local array of sessions + # + def thread_local_hosts(read) + thread_hosts_key = read ? "algolia_search_hosts_#{application_id}" : "algolia_hosts_#{application_id}" + Thread.current[thread_hosts_key] ||= (read ? search_hosts : hosts).each_with_index.map do |host, i| + client = HTTPClient.new + client.ssl_config.ssl_version = @ssl_version if @ssl && @ssl_version + client.transparent_gzip_decompression = @gzip + client.ssl_config.add_trust_ca File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'resources', 'ca-bundle.crt')) + { + :index => i, + :base_url => "http#{@ssl ? 's' : ''}://#{host}", + :session => client, + :last_call => nil + } + end + hosts = Thread.current[thread_hosts_key] + thread_index_key = read ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + current_host = Thread.current[thread_index_key].to_i # `to_i` to ensure first call is 0 + # we want to always target host 0 first + # if the current host is not 0, then we want to use it first only if (we never used it OR we're using it since less than 1 minute) + if current_host != 0 && (hosts[current_host][:last_call].nil? || hosts[current_host][:last_call] > Time.now.to_i - 60) + # first host will be `current_host` + first = hosts[current_host] + [first] + hosts.reject { |h| h[:index] == 0 || h == first } + hosts.select { |h| h[:index] == 0 } + else + # first host will be `0` + hosts + end + end + + def perform_request(session, url, method, data, request_options) + hs = {} + extra_headers = request_options[:headers] || request_options['headers'] || {} + @headers.each { |key, val| hs[key.to_s] = val } + extra_headers.each { |key, val| hs[key.to_s] = val } + response = case method + when :GET + session.get(url, { :header => hs }) + when :POST + session.post(url, { :body => data, :header => hs }) + when :PUT + session.put(url, { :body => data, :header => hs }) + when :DELETE + session.delete(url, { :header => hs }) + end + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content} (#{response.code})") + end + return JSON.parse(response.content) + end + + def add_header_to_request_options(request_options, headers_to_add) + if !request_options['headers'].is_a?(Hash) + if request_options[:headers].is_a?(Hash) + request_options['headers'] = request_options[:headers] + request_options.delete(:headers) + else + request_options['headers'] = {} + end + end + + request_options['headers'].merge!(headers_to_add) + request_options + end + + # Deprecated + alias_method :list_user_keys, :list_api_keys + alias_method :get_user_key, :get_api_key + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + end + + # Module methods + # ------------------------------------------------------------ + + # A singleton client + # Always use Algolia.client to retrieve the client object. + @@client = nil + + # + # Initialize the singleton instance of Client which is used by all API methods + # + def Algolia.init(options = {}) + application_id = ENV['ALGOLIA_APP_ID'] || ENV['ALGOLIA_API_ID'] || ENV['ALGOLIA_APPLICATION_ID'] + api_key = ENV['ALGOLIA_REST_API_KEY'] || ENV['ALGOLIA_API_KEY'] + + defaulted = { :application_id => application_id, :api_key => api_key } + defaulted.merge!(options) + + @@client = Client.new(defaulted) + end + + # + # Allow to set custom headers + # + def Algolia.set_extra_header(key, value) + Algolia.client.set_extra_header(key, value) + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the + # X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def Algolia.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + Algolia.client.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def Algolia.disable_rate_limit_forward + Algolia.client.disable_rate_limit_forward + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def Algolia.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + Algolia.client.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + end + + # + # Generate a secured and public API Key from a list of tagFilters and an + # optional user token identifying the current user + # + # @param private_api_key your private API Key + # @param tag_filters the list of tags applied to the query (used as security) + # @param user_token an optional token identifying the current user + # + def Algolia.generate_secured_api_key(private_api_key, tag_filters_or_params, user_token = nil) + if tag_filters_or_params.is_a?(Hash) && user_token.nil? + encoded_params = Hash[tag_filters_or_params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + query_str = Protocol.to_query(encoded_params) + hmac = OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, query_str) + Base64.encode64("#{hmac}#{query_str}").gsub("\n", '') + else + tag_filters = if tag_filters_or_params.is_a?(Array) + tag_filters = tag_filters_or_params.map { |t| t.is_a?(Array) ? "(#{t.join(',')})" : t }.join(',') + else + tag_filters_or_params + end + raise ArgumentError.new('Attribute "tag_filters" must be a list of tags') if !tag_filters.is_a?(String) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, "#{tag_filters}#{user_token.to_s}") + end + end + + # + # Returns the remaining validity time for the given API key in seconds + # + # @param [String] secured_api_key the secured API key to check + # + # @return [Integer] remaining validity in seconds + # + def Algolia.get_secured_api_key_remaining_validity(secured_api_key) + now = Time.now.to_i + decoded_key = Base64.decode64(secured_api_key) + regex = 'validUntil=(\d+)' + matches = decoded_key.match(regex) + + if matches === nil + raise ValidUntilNotFoundError.new('The SecuredAPIKey doesn\'t have a validUntil parameter.') + end + + valid_until = matches[1].to_i + + valid_until - now + end + + # + # This method allows to query multiple indexes with one API call + # + def Algolia.multiple_queries(queries, options = nil, strategy = nil) + Algolia.client.multiple_queries(queries, options, strategy) + end + + # + # This method allows to get objects (records) via objectID across + # multiple indexes with one API call + # + def Algolia.multiple_get_objects(requests, request_options = {}) + Algolia.client.multiple_get_objects(requests, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_indexes(request_options = {}) + Algolia.client.list_indexes(request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index(src_index, dst_index, request_options = {}) + Algolia.client.move_index(src_index, dst_index, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index!(src_index, dst_index, request_options = {}) + Algolia.client.move_index!(src_index, dst_index, request_options) + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index!(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index!(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings(src_index, dst_index, request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings!(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings!(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms!(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms!(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules!(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules!(src_index, dst_index, request_options) + end + + # + # Delete an index + # + def Algolia.delete_index(name, request_options = {}) + Algolia.client.delete_index(name, request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # + def Algolia.delete_index!(name, request_options = {}) + Algolia.client.delete_index!(name, request_options) + end + + # + # Return last logs entries. + # + # @param offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry). + # @param length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000. + # @param type Specify the type of entries you want to retrieve - default: "all" + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_logs(options = nil, length = nil, type = nil) + Algolia.client.get_logs(options, length, type) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_api_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Deprecated + # + def Algolia.list_user_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_api_key(key, request_options = {}) + Algolia.client.get_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.get_user_key(key, request_options = {}) + Algolia.client.get_user_key(key, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a NSDictionary that + # can contains the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.add_user_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, maxQueriesPerIPPerHour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.update_user_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Delete an existing user key + # + def Algolia.delete_api_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Restore an existing api key + # + def Algolia.restore_api_key(key, request_options = {}) + Algolia.client.restore_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.delete_user_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def Algolia.batch(requests, request_options = {}) + Algolia.client.batch(requests, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def Algolia.batch!(requests, request_options = {}) + Algolia.client.batch!(requests, request_options) + end + + # + # Wait until task is completed by the engine + # + def Algolia.wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + Algolia.client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + def Algolia.get_task_status(index_name, taskID, request_options = {}) + Algolia.client.get_task_status(index_name, taskID, request_options = {}) + end + # + # Used mostly for testing. Lets you delete the api key global vars. + # + def Algolia.destroy + @@client.destroy unless @@client.nil? + @@client = nil + self + end + + def Algolia.client + if !@@client + raise AlgoliaError, 'API not initialized' + end + @@client + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb new file mode 100644 index 0000000..4b76b7b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb @@ -0,0 +1,31 @@ +module Algolia + + # Base exception class for errors thrown by the Algolia + # client library. AlgoliaError will be raised by any + # network operation if Algolia.init() has not been called. + class AlgoliaError < StandardError #Exception ... why? A:http://www.skorks.com/2009/09/ruby-exceptions-and-exception-handling/ + end + + # An exception class raised when the REST API returns an error. + # The error code and message will be parsed out of the HTTP response, + # which is also included in the response attribute. + class AlgoliaProtocolError < AlgoliaError + attr_accessor :code + attr_accessor :message + + def initialize(code, message) + self.code = code + self.message = message + super("#{self.code}: #{self.message}") + end + end + + # An exception class raised when the given object was not found. + class AlgoliaObjectNotFoundError < AlgoliaError + end + + # An exception class raised when the validUntil parameter is not found + class ValidUntilNotFoundError < AlgoliaError + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb new file mode 100644 index 0000000..dbc64d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb @@ -0,0 +1,1355 @@ +require 'algolia/client' +require 'algolia/error' + +module Algolia + + class Index + attr_accessor :name, :client + + def initialize(name, client = nil) + self.name = name + self.client = client || Algolia.client + end + + # + # Delete an index + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete(request_options = {}) + client.delete(Protocol.index_uri(name), :write, request_options) + end + alias_method :delete_index, :delete + + # + # Delete an index and wait until the deletion has been processed + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete!(request_options = {}) + res = delete(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :delete_index!, :delete! + + # + # Add an object in this index + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param request_options contains extra parameters to send with your query + # + def add_object(object, objectID = nil, request_options = {}) + check_object(object) + if objectID.nil? || objectID.to_s.empty? + client.post(Protocol.index_uri(name), object.to_json, :write, request_options) + else + client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options) + end + end + + # + # Add an object in this index and wait end of indexing + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param Request options object. Contains extra URL parameters or headers + # + def add_object!(object, objectID = nil, request_options = {}) + res = add_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add several objects in this index + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects(objects, request_options = {}) + batch(build_batch('addObject', objects, false), request_options) + end + + # + # Add several objects in this index and wait end of indexing + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects!(objects, request_options = {}) + res = add_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search inside the index + # + # @param query the full text query + # @param args (optional) if set, contains an associative array with query parameters: + # - page: (integer) Pagination parameter used to select the page to retrieve. + # Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9 + # - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20. + # - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size). + # Attributes are separated with a comma (for example "name,address"). + # You can also use a string array encoding (for example ["name","address"]). + # By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index. + # - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query. + # Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]). + # If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted. + # You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted. + # A matchLevel is returned for each highlighted attribute and can contain: + # - full: if all the query terms were found in the attribute, + # - partial: if only some of the query terms were found, + # - none: if none of the query terms were found. + # - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`). + # Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10). + # You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed. + # - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3. + # - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7. + # - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute. + # - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma). + # For example aroundLatLng=47.316669,5.016670). + # You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision + # (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng). + # For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma. + # The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`. + # You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000. + # You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]). + # - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas. + # To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3). + # You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3). + # At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}). + # - facetFilters: filter the query by a list of facets. + # Facets are separated by commas and each facet is encoded as `attributeName:value`. + # For example: `facetFilters=category:Book,author:John%20Doe`. + # You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`). + # - facets: List of object attributes that you want to use for faceting. + # Attributes are separated with a comma (for example `"category,author"` ). + # You can also use a JSON string array encoding (for example ["category","author"]). + # Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter. + # You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**. + # - queryType: select how the query words are interpreted, it can be one of the following value: + # - prefixAll: all query words are interpreted as prefixes, + # - prefixLast: only the last word is interpreted as a prefix (default behavior), + # - prefixNone: no query word is interpreted as a prefix. This option is not recommended. + # - optionalWords: a string that contains the list of words that should be considered as optional when found in the query. + # The list of words is comma separated. + # - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set. + # This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter, + # all hits containing a duplicate value for the attributeForDistinct attribute are removed from results. + # For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best + # one is kept and others are removed. + # @param request_options contains extra parameters to send with your query + # + def search(query, params = {}, request_options = {}) + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + encoded_params[:query] = query + client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options) + end + + class IndexBrowser + def initialize(client, name, params) + @client = client + @name = name + @params = params + @cursor = params[:cursor] || params['cursor'] || nil + end + + def browse(request_options = {}, &block) + loop do + answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options) + answer['hits'].each do |hit| + if block.arity == 2 + yield hit, @cursor + else + yield hit + end + end + @cursor = answer['cursor'] + break if @cursor.nil? + end + end + end + + # + # Browse all index content + # + # @param queryParameters The hash of query parameters to use to browse + # To browse from a specific cursor, just add a ":cursor" parameters + # @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first) + # @param request_options contains extra parameters to send with your query + # + # @DEPRECATED: + # @param page Pagination parameter used to select the page to retrieve. + # @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000. + # + def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block) + params = {} + if page_or_query_parameters.is_a?(Hash) + params.merge!(page_or_query_parameters) + else + params[:page] = page_or_query_parameters unless page_or_query_parameters.nil? + end + if hits_per_page.is_a?(Hash) + params.merge!(hits_per_page) + else + params[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + end + + if block_given? + IndexBrowser.new(client, name, params).browse(request_options, &block) + else + params[:page] ||= 0 + params[:hitsPerPage] ||= 1000 + client.get(Protocol.browse_uri(name, params), :read, request_options) + end + end + + # + # Browse a single page from a specific cursor + # + # @param request_options contains extra parameters to send with your query + # + def browse_from(cursor, hits_per_page = 1000, request_options = {}) + client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options) + end + + # + # Get an object from this index + # + # @param objectID the unique identifier of the object to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_object(objectID, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + if attributes_to_retrieve.nil? + client.get(Protocol.object_uri(name, objectID, nil), :read, request_options) + else + client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options) + end + end + + # + # Get a list of objects from this index + # + # @param objectIDs the array of unique identifier of the objects to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + requests = objectIDs.map do |objectID| + req = { :indexName => name, :objectID => objectID.to_s } + req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil? + req + end + client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results'] + end + + # + # Find object by the given condition. + # + # Options can be passed in request_options body: + # - query (string): pass a query + # - paginate (bool): choose if you want to iterate through all the + # documents (true) or only the first page (false). Default is true. + # The function takes a block to filter the results from search query + # Usage example: + # index.find_object({'query' => '', 'paginate' => true}) {|obj| obj.key?('company') and obj['company'] == 'Apple'} + # + # @param request_options contains extra parameters to send with your query + # + # @return [Hash] the matching object and its position in the result set + # + def find_object(request_options = {}) + paginate = true + page = 0 + + query = request_options[:query] || request_options['query'] || '' + request_options.delete(:query) + request_options.delete('query') + + if request_options.has_key? :paginate + paginate = request_options[:paginate] + end + + if request_options.has_key? 'paginate' + paginate = request_options['paginate'] + end + + request_options.delete(:paginate) + request_options.delete('paginate') + + while true + request_options['page'] = page + res = search(query, request_options) + + res['hits'].each_with_index do |hit, i| + if yield(hit) + return { + 'object' => hit, + 'position' => i, + 'page' => page, + } + end + end if block_given? + + has_next_page = page + 1 < res['nbPages'] + if !paginate || !has_next_page + raise AlgoliaObjectNotFoundError.new('Object not found') + end + + page += 1 + end + end + + # + # Retrieve the given object position in a set of results. + # + # @param [Array] objects the result set to browse + # @param [String] object_id the object to look for + # + # @return [Integer] position of the object, or -1 if it's not in the array + # + def self.get_object_position(objects, object_id) + objects['hits'].find_index { |hit| hit['objectID'] == object_id } || -1 + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(taskID, request_options = {}) + client.get_task_status(name, taskID, request_options) + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + client.wait_task(name, taskID, time_before_retry, request_options) + end + + # + # Override the content of an object + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object(object, objectID = nil, request_options = {}) + client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options) + end + + # + # Override the content of object and wait end of indexing + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object!(object, objectID = nil, request_options = {}) + res = save_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the content of several objects + # + # @param objects the array of objects to save, each object must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_objects(objects, request_options = {}) + batch(build_batch('updateObject', objects, true), request_options) + end + + # + # Override the content of several objects and wait end of indexing + # + # @param objects the array of objects to save, each object must contain an objectID attribute + # @param request_options contains extra parameters to send with your query + # + def save_objects!(objects, request_options = {}) + res = save_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the current objects by the given array of objects and wait end of indexing. Settings, + # synonyms and query rules are untouched. The objects are replaced without any downtime. + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects(objects, request_options = {}) + safe = request_options[:safe] || request_options['safe'] || false + request_options.delete(:safe) + request_options.delete('safe') + + tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s) + + responses = [] + + scope = ['settings', 'synonyms', 'rules'] + res = @client.copy_index(@name, tmp_index.name, scope, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + batch = [] + batch_size = 1000 + count = 0 + + objects.each do |object| + batch << object + count += 1 + if count == batch_size + res = tmp_index.add_objects(batch, request_options) + responses << res + batch = [] + count = 0 + end + end + + if batch.any? + res = tmp_index.add_objects(batch, request_options) + responses << res + end + + if safe + responses.each do |res| + tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + res = @client.move_index(tmp_index.name, @name, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + + # + # Override the current objects by the given array of objects and wait end of indexing + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects!(objects, request_options = {}) + replace_all_objects(objects, request_options.merge(:safe => true)) + end + + # + # Update partially an object (only update attributes passed in argument) + # + # @param object the object attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {}) + client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options) + end + + # + # Partially override the content of several objects + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects(objects, create_if_not_exits = true, request_options = {}) + if create_if_not_exits + batch(build_batch('partialUpdateObject', objects, true), request_options) + else + batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options) + end + end + + # + # Partially override the content of several objects and wait end of indexing + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects!(objects, create_if_not_exits = true, request_options = {}) + res = partial_update_objects(objects, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Update partially an object (only update attributes passed in argument) and wait indexing + # + # @param object the attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {}) + res = partial_update_object(object, objectID, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an object from the index + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object(objectID, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.delete(Protocol.object_uri(name, objectID), :write, request_options) + end + + # + # Delete an object from the index and wait end of indexing + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object!(objectID, request_options = {}) + res = delete_object(objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete several objects + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects(objects, request_options = {}) + check_array(objects) + batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options) + end + + # + # Delete several objects and wait end of indexing + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects!(objects, request_options = {}) + res = delete_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete all objects matching a query + # This method retrieves all objects synchronously but deletes in batch + # asynchronously + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query(query, params = nil, request_options = {}) + raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil? + params = sanitized_delete_by_query_params(params) + + params[:query] = query + params[:hitsPerPage] = 1000 + params[:distinct] = false + params[:attributesToRetrieve] = ['objectID'] + params[:cursor] = '' + ids = [] + + while params[:cursor] != nil + result = browse(params, nil, request_options) + + params[:cursor] = result['cursor'] + + hits = result['hits'] + break if hits.empty? + + ids += hits.map { |hit| hit['objectID'] } + end + + delete_objects(ids, request_options) + end + + # + # Delete all objects matching a query and wait end of indexing + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query!(query, params = nil, request_options = {}) + res = delete_by_query(query, params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided + # + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by(params, request_options = {}) + raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil? + params = sanitized_delete_by_query_params(params) + client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options) + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided and waits for the end of indexing + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by!(params, request_options = {}) + res = delete_by(params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete the index content + # + # @param request_options contains extra parameters to send with your query + # + def clear(request_options = {}) + client.post(Protocol.clear_uri(name), {}, :write, request_options) + end + alias_method :clear_index, :clear + + # + # Delete the index content and wait end of indexing + # + def clear!(request_options = {}) + res = clear(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :clear_index!, :clear! + + # + # Set settings for this index + # + def set_settings(new_settings, options = {}, request_options = {}) + client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options) + end + + # + # Set settings for this index and wait end of indexing + # + def set_settings!(new_settings, options = {}, request_options = {}) + res = set_settings(new_settings, options, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Get settings of this index + # + def get_settings(options = {}, request_options = {}) + options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion'] + client.get(Protocol.settings_uri(name, options).to_s, :read, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # Deprecated: Please us `client.list_api_keys` instead. + def list_api_keys(request_options = {}) + client.get(Protocol.index_keys_uri(name), :read, request_options) + end + + # + # Get ACL of a user key + # + # Deprecated: Please us `client.get_api_key` instead. + def get_api_key(key, request_options = {}) + client.get(Protocol.index_key_uri(name, key), :read, request_options) + end + + # + # Create a new user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that can + # contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + #
 # Deprecated: Please use `client.add_api_key` instead + def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options) + end + + # + # Update a user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that + # can contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + # + # Deprecated: Please use `client.update_api_key` instead + def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + # Deprecated: Please use `client.delete_api_key` instead + def delete_api_key(key, request_options = {}) + client.delete(Protocol.index_key_uri(name, key), :write, request_options) + end + + # + # Send a batch request + # + def batch(request, request_options = {}) + client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options) + end + + # + # Send a batch request and wait the end of the indexing + # + def batch!(request, request_options = {}) + res = batch(request, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search for facet values + # + # @param facet_name Name of the facet to search. It must have been declared in the + # index's`attributesForFaceting` setting with the `searchable()` modifier. + # @param facet_query Text to search for in the facet's values + # @param search_parameters An optional query to take extra search parameters into account. + # These parameters apply to index objects like in a regular search query. + # Only facet values contained in the matched objects will be returned. + # @param request_options contains extra parameters to send with your query + # + def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {}) + params = search_parameters.clone + params['facetQuery'] = facet_query + client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options) + end + + # deprecated + alias_method :search_facet, :search_for_facet_values + + # + # Perform a search with disjunctive facets generating as many queries as number of disjunctive facets + # + # @param query the query + # @param disjunctive_facets the array of disjunctive facets + # @param params a hash representing the regular query parameters + # @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements + # ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] } + # @param request_options contains extra parameters to send with your query + # + def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {}) + raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array) + raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty? + + # extract disjunctive facets & associated refinements + disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String) + disjunctive_refinements = {} + refinements.each do |k, v| + disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s) + end + + # build queries + queries = [] + ## hits + regular facets query + filters = [] + refinements.to_a.each do |k, values| + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters }) + ## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet) + disjunctive_facets.each do |disjunctive_facet| + filters = [] + refinements.each do |k, values| + if k.to_s != disjunctive_facet.to_s + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + end + queries << params.merge({ + :index_name => self.name, + :query => query, + :page => 0, + :hitsPerPage => 1, + :attributesToRetrieve => [], + :attributesToHighlight => [], + :attributesToSnippet => [], + :facets => disjunctive_facet, + :facetFilters => filters, + :analytics => false + }) + end + answers = client.multiple_queries(queries, { :request_options => request_options }) + + # aggregate answers + ## first answer stores the hits + regular facets + aggregated_answer = answers['results'][0] + ## others store the disjunctive facets + aggregated_answer['disjunctiveFacets'] = {} + answers['results'].each_with_index do |a, i| + next if i == 0 + a['facets'].each do |facet, values| + ## add the facet to the disjunctive facet hash + aggregated_answer['disjunctiveFacets'][facet] = values + ## concatenate missing refinements + (disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r| + if aggregated_answer['disjunctiveFacets'][facet][r].nil? + aggregated_answer['disjunctiveFacets'][facet][r] = 0 + end + end + end + end + + aggregated_answer + end + + # + # Alias of Algolia.list_indexes + # + # @param request_options contains extra parameters to send with your query + # + def Index.all(request_options = {}) + Algolia.list_indexes(request_options) + end + + # + # Search synonyms + # + # @param query the query + # @param params an optional hash of :type, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_synonyms(query, params = {}, request_options = {}) + type = params[:type] || params['type'] + type = type.join(',') if type.is_a?(Array) + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :type => type.to_s, + :page => page, + :hitsPerPage => hits_per_page + } + client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options) + end + + # + # Get a synonym + # + # @param objectID the synonym objectID + # @param request_options contains extra parameters to send with your query + def get_synonym(objectID, request_options = {}) + client.get(Protocol.synonym_uri(name, objectID), :read, request_options) + end + + # + # Delete a synonym + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_synonym(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Save a synonym + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {}) + client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options) + end + + # + # Save a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {}) + res = save_synonym(objectID, synonym, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Clear all synonyms + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all synonyms and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms!(forward_to_replicas = false, request_options = {}) + res = clear_synonyms(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add/Update an array of synonyms + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options) + end + + # + # Add/Update an array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Replace synonyms in the index by the given array of synonyms + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms(synonyms, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_synonyms(synonyms, forward_to_replicas, true, request_options) + end + + # + # Replace synonyms in the index by the given array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms!(synonyms, request_options = {}) + res = replace_all_synonyms(synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of synonyms + # Accepts an optional block to which it will pass each synonym + # Also returns an array with all the synonyms + # + # @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_synonyms(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |synonym| + res << synonym + yield synonym if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Search rules + # + # @param query the query + # @param params an optional hash of :anchoring, :context, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_rules(query, params = {}, request_options = {}) + anchoring = params[:anchoring] + context = params[:context] + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :page => page, + :hitsPerPage => hits_per_page + } + params[:anchoring] = anchoring unless anchoring.nil? + params[:context] = context unless context.nil? + client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options) + end + + # + # Get a rule + # + # @param objectID the rule objectID + # @param request_options contains extra parameters to send with your query + # + def get_rule(objectID, request_options = {}) + client.get(Protocol.rule_uri(name, objectID), :read, request_options) + end + + # + # Delete a rule + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_rule(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Save a rule + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule(objectID, rule, forward_to_replicas = false, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options) + end + + # + # Save a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {}) + res = save_rule(objectID, rule, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Clear all rules + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all rules and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules!(forward_to_replicas = false, request_options = {}) + res = clear_rules(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Add/Update an array of rules + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options) + end + + # + # Add/Update an array of rules and wait the end of indexing + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Replace rules in the index by the given array of rules + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules(rules, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_rules(rules, forward_to_replicas, true, request_options) + end + + # + # Replace rules in the index by the given array of rules and wait the end of indexing + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules!(rules, request_options = {}) + res = replace_all_rules(rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of rules + # Accepts an optional block to which it will pass each rule + # Also returns an array with all the rules + # + # @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_rules(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_rules('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |rule| + res << rule + yield rule if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Check whether an index exists or not + # + # @return [Boolean] + # + def exists + begin + get_settings + rescue AlgoliaProtocolError => e + if e.code === 404 + return false + end + + raise e + end + return true + end + + # + # Aliases the exists method + # + alias :exists? :exists + + # Deprecated + alias_method :get_user_key, :get_api_key + alias_method :list_user_keys, :list_api_keys + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + + private + + def check_array(object) + raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array) + end + + def check_object(object, in_array = false) + case object + when Array + raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array') + when String, Integer, Float, TrueClass, FalseClass, NilClass + raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}") + else + # ok + end + end + + def get_objectID(object, objectID = nil) + check_object(object) + objectID ||= object[:objectID] || object['objectID'] + raise ArgumentError.new("Missing 'objectID'") if objectID.nil? + return objectID + end + + def build_batch(action, objects, with_object_id = false) + check_array(objects) + { + :requests => objects.map { |object| + check_object(object, true) + h = { :action => action, :body => object } + h[:objectID] = get_objectID(object).to_s if with_object_id + h + } + } + end + + def sanitized_delete_by_query_params(params) + params ||= {} + params.delete(:hitsPerPage) + params.delete('hitsPerPage') + params.delete(:attributesToRetrieve) + params.delete('attributesToRetrieve') + params + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb new file mode 100644 index 0000000..a3d5883 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb @@ -0,0 +1,131 @@ +module Algolia + + class Insights + MIN_RUBY_VERSION = '1.9.0' + + def initialize(app_id, api_key, region = 'us', params = {}) + headers = params[:headers] || {} + @app_id = app_id + @api_key = api_key + @url = "https://insights.#{region}.algolia.io" + @headers = headers.merge({ + Protocol::HEADER_APP_ID => app_id, + Protocol::HEADER_API_KEY => api_key, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"].join('; ') + }) + end + + def user(user_token) + UserInsights.new(self, user_token) + end + + def send_event(event) + send_events([event]) + end + + def send_events(events) + perform_request(:POST, '/1/events', {}, { 'events' => events }.to_json) + end + + private + + def perform_request(method, path, params = {}, data = {}) + http = HTTPClient.new + + url = @url + path + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :POST + http.post(url, { :body => data, :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + end + + class UserInsights + def initialize(insights, user_token) + @insights = insights + @user_token = user_token + end + + def clicked_object_ids(event_name, index_name, object_ids, request_options = {}) + clicked({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def clicked_object_ids_after_search(event_name, index_name, object_ids, positions, query_id, request_options = {}) + clicked({ + 'objectIDs' => object_ids, + 'positions' => positions, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def clicked_filters(event_name, index_name, filters, request_options = {}) + clicked({ 'filters' => filters }, event_name, index_name, request_options) + end + + def converted_object_ids(event_name, index_name, object_ids, request_options = {}) + converted({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def converted_object_ids_after_search(event_name, index_name, object_ids, query_id, request_options = {}) + converted({ + 'objectIDs' => object_ids, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def converted_filters(event_name, index_name, filters, request_options = {}) + converted({ 'filters' => filters }, event_name, index_name, request_options) + end + + def viewed_object_ids(event_name, index_name, object_ids, request_options = {}) + viewed({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def viewed_filters(event_name, index_name, filters, request_options = {}) + viewed({ 'filters' => filters }, event_name, index_name, request_options) + end + + private + + def clicked(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'click', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def converted(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'conversion', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def viewed(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'view', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def send_event(event) + @insights.send_event(event.merge({ 'userToken' => @user_token})) + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb new file mode 100644 index 0000000..849ee17 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb @@ -0,0 +1,211 @@ +require 'cgi' + +module Algolia + # A module which encapsulates the specifics of Algolia's REST API. + module Protocol + + # Basics + + # The version of the REST API implemented by this module. + VERSION = 1 + + # HTTP Headers + # ---------------------------------------- + + # The HTTP header used for passing your application ID to the Algolia API. + HEADER_APP_ID = "X-Algolia-Application-Id" + + # The HTTP header used for passing your API key to the Algolia API. + HEADER_API_KEY = "X-Algolia-API-Key" + HEADER_FORWARDED_IP = "X-Forwarded-For" + HEADER_FORWARDED_API_KEY = "X-Forwarded-API-Key" + + # HTTP ERROR CODES + # ---------------------------------------- + + ERROR_BAD_REQUEST = 400 + ERROR_FORBIDDEN = 403 + ERROR_NOT_FOUND = 404 + + # URI Helpers + # ---------------------------------------- + + # Construct a uri to list available indexes + def Protocol.indexes_uri + "/#{VERSION}/indexes" + end + + def Protocol.multiple_queries_uri(strategy = 'none') + "/#{VERSION}/indexes/*/queries?strategy=#{strategy}" + end + + def Protocol.objects_uri + "/#{VERSION}/indexes/*/objects" + end + + # Construct a uri referencing a given Algolia index + def Protocol.index_uri(index) + "/#{VERSION}/indexes/#{CGI.escape(index)}" + end + + def Protocol.batch_uri(index = nil) + "#{index.nil? ? "/#{VERSION}/indexes/*" : index_uri(index)}/batch" + end + + def Protocol.index_operation_uri(index) + "#{index_uri(index)}/operation" + end + + def Protocol.task_uri(index, task_id) + "#{index_uri(index)}/task/#{task_id}" + end + + def Protocol.object_uri(index, object_id, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}#{params}" + end + + def Protocol.search_uri(index, query, params = {}) + params = params.nil? || params.size == 0 ? '' : "&#{to_query(params)}" + "#{index_uri(index)}?query=#{CGI.escape(query)}&#{params}" + end + + def Protocol.search_post_uri(index) + "#{index_uri(index)}/query" + end + + def Protocol.browse_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/browse#{params}" + end + + def Protocol.search_facet_uri(index, facet) + "#{index_uri(index)}/facets/#{CGI.escape(facet)}/query" + end + + def Protocol.partial_object_uri(index, object_id, create_if_not_exits = true) + params = create_if_not_exits ? '' : '?createIfNotExists=false' + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}/partial#{params}" + end + + def Protocol.settings_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/settings#{params}" + end + + def Protocol.clear_uri(index) + "#{index_uri(index)}/clear" + end + + def Protocol.logs(offset, length, type) + "/#{VERSION}/logs?offset=#{offset}&length=#{length}&type=#{type}" + end + + def Protocol.keys_uri + "/#{VERSION}/keys" + end + + def Protocol.key_uri(key) + "/#{VERSION}/keys/#{key}" + end + + def Protocol.restore_key_uri(key) + "/#{VERSION}/keys/#{key}/restore" + end + + def Protocol.index_key_uri(index, key) + "#{index_uri(index)}/keys/#{key}" + end + + def Protocol.index_keys_uri(index) + "#{index_uri(index)}/keys" + end + + def Protocol.to_query(params) + params.map do |k, v| + "#{CGI.escape(k.to_s)}=#{CGI.escape(v.to_s)}" + end.join('&') + end + + def Protocol.synonyms_uri(index) + "#{index_uri(index)}/synonyms" + end + + def Protocol.synonym_uri(index, object_id) + "#{synonyms_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_synonyms_uri(index) + "#{synonyms_uri(index)}/search" + end + + def Protocol.clear_synonyms_uri(index) + "#{synonyms_uri(index)}/clear" + end + + def Protocol.batch_synonyms_uri(index) + "#{synonyms_uri(index)}/batch" + end + + def Protocol.rules_uri(index) + "#{index_uri(index)}/rules" + end + + def Protocol.rule_uri(index, object_id) + "#{rules_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_rules_uri(index) + "#{rules_uri(index)}/search" + end + + def Protocol.clear_rules_uri(index) + "#{rules_uri(index)}/clear" + end + + def Protocol.batch_rules_uri(index) + "#{rules_uri(index)}/batch" + end + + def Protocol.delete_by_uri(index) + "#{index_uri(index)}/deleteByQuery" + end + + def Protocol.personalization_strategy_uri + "/1/recommendation/personalization/strategy" + end + + def Protocol.clusters_uri + "/#{VERSION}/clusters" + end + + def Protocol.cluster_mapping_uri(user_id = nil) + user_id = "/#{CGI.escape(user_id)}" if user_id + + "/#{VERSION}/clusters/mapping" + user_id.to_s + end + + def Protocol.list_ids_uri(page, hits_per_page) + Protocol.cluster_mapping_uri+"?page=#{CGI.escape(page.to_s)}&hitsPerPage=#{CGI.escape(hits_per_page.to_s)}" + end + + def Protocol.cluster_top_user_uri + "/#{VERSION}/clusters/mapping/top" + end + + def Protocol.search_user_id_uri + "/#{VERSION}/clusters/mapping/search" + end + + def Protocol.ab_tests_uri(ab_test = nil) + ab_test = "/#{ab_test}" if ab_test + + "/2/abtests" + ab_test.to_s + end + + def Protocol.ab_tests_stop_uri(ab_test) + "/2/abtests/#{ab_test}/stop" + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb new file mode 100644 index 0000000..3926b62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb @@ -0,0 +1,3 @@ +module Algolia + VERSION = "1.27.5" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb new file mode 100644 index 0000000..e3d112f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb @@ -0,0 +1,54 @@ +begin + require 'webmock' +rescue LoadError + puts 'WebMock was not found, please add "gem \'webmock\'" to your Gemfile.' + exit 1 +end + +module Algolia + class WebMock + def self.mock! + # list indexes + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/).to_return(:body => '{ "items": [] }') + # query index + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + # delete index + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # clear index + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/clear/).to_return(:body => '{ "taskID": 42 }') + # add object + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # save object + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # partial update + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+\/partial/).to_return(:body => '{ "taskID": 42 }') + # get object + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{}') + # delete object + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # batch + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/batch/).to_return(:body => '{ "taskID": 42 }') + # settings + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{}') + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{ "taskID": 42 }') + # browse + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/browse/).to_return(:body => '{ "hits": [] }') + # operations + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/operation/).to_return(:body => '{ "taskID": 42 }') + # tasks + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/task\/[^\/]+/).to_return(:body => '{ "status": "published" }') + # index keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ "keys": [] }') + # global keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ "keys": [] }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + # query POST + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/query/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + end + end +end + +Algolia::WebMock.mock! diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb new file mode 100644 index 0000000..f000b23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb @@ -0,0 +1,26 @@ +## ---------------------------------------------------------------------- +## +## Ruby client for algolia.com +## A quick library for playing with algolia.com's REST API for object storage. +## Thanks to Sylvain Utard for the initial version of the library +## ---------------------------------------------------------------------- +require 'json' +if !defined?(RUBY_ENGINE) && defined?(RUBY_VERSION) && RUBY_VERSION == '1.8.7' + # work-around a limitation from nahi/httpclient, using the undefined RUBY_ENGINE constant + RUBY_ENGINE = 'ruby1.8' + require 'httpclient' + Object.send(:remove_const, :RUBY_ENGINE) +else + require 'httpclient' +end +require 'date' +require 'cgi' +require 'pathname' + +cwd = Pathname(__FILE__).dirname +$:.unshift(cwd.to_s) unless $:.include?(cwd.to_s) || $:.include?(cwd.expand_path.to_s) + +require 'algolia/index' +require 'algolia/analytics' +require 'algolia/insights' +require 'algolia/account_client' diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt new file mode 100644 index 0000000..52e0c4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt @@ -0,0 +1,3908 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Sat Dec 29 20:03:40 2012 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + +# @(#) $RCSfile: certdata.txt,v $ $Revision: 1.87 $ $Date: 2012/12/29 16:32:45 $ + +USERTrust RSA root CA +===================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 1 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENnAVljANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMTAeFw05ODEy +MTAxODEwMjNaFw0xODEyMTAxODQwMjNaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUxMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQCgbIGpzzQeJN3+hijM3oMv+V7UQtLodGBmE5gGHKlREmlvMVW5SXIACH7TpWJE +NySZj9mDSI+ZbZUTu0M7LklOiDfBu1h//uG9+LthzfNHwJmm8fOR6Hh8AMthyUQncWlVSn5JTe2i +o74CTADKAqjuAQIxZA9SLRN0dja1erQtcQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTExDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMTAxODEwMjNagQ8yMDE4MTIxMDE4MTAyM1owCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFGp5fpFpRhgTCgJ3pVlbYJglDqL4MB0GA1UdDgQWBBRqeX6RaUYYEwoCd6VZW2CYJQ6i+DAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +ACIS2Hod3IEGtgllsofIH160L+nEHvI8wbsEkBFKg05+k7lNQseSJqBcNJo4cvj9axY+IO6CizEq +kzaFI4iKPANo08kJD038bKTaKHKTDomAsH3+gG9lbRgzl4vCa4nuYD3Im+9/KzJic5PLPON74nZ4 +RbyhkwS7hp86W0N6w4pl +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 3 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENm7TzjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMjAeFw05ODEy +MDkxOTE3MjZaFw0xODEyMDkxOTQ3MjZaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUyMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQC/k48Xku8zExjrEH9OFr//Bo8qhbxe+SSmJIi2A7fBw18DW9Fvrn5C6mYjuGOD +VvsoLeE4i7TuqAHhzhy2iCoiRoX7n6dwqUcUP87eZfCocfdPJmyMvMa1795JJ/9IKn3oTQPMx7JS +xhcxEzu1TdvIxPbDDyQq2gyd55FbgM2UnQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTIxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMDkxOTE3MjZagQ8yMDE4MTIwOTE5MTcyNlowCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFB6CTShlgDzJQW6sNS5ay97u+DlbMB0GA1UdDgQWBBQegk0oZYA8yUFurDUuWsve7vg5WzAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +AEeNg61i8tuwnkUiBbmi1gMOOHLnnvx75pO2mqWilMg0HZHRxdf0CiUPPXiBng+xZ8SQTGPdXqfi +up/1902lMXucKS1M/mQ+7LZT/uqb7YLbdHVLB3luHtgZg3Pe9T7Qtd7nS2h9Qy4qIOF+oHhEngj1 +mPnHfxsb1gYgAlihw6ID +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgd +k4xWArzZbxpvUjZudVYKVdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIq +WpDBucSmFc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0Jh9ZrbWB85a7FkCMM +XErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2uluIncrKTdcu1OofdPvAbT6shkdHvC +lUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68DzFc6PLZ +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazAeFw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjx +nNuX6Zr8wgQGE75fUsjMHiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRC +wiNPStjwDqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cCAwEA +ATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9jinb3/7aHmZuovCfTK +1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAXrXfMSTWqz9iP0b63GJZHc2pUIjRk +LbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnInjBJ7xUS0rg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAN2E1Lm0+afY8wR4nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/E +bRrsC+MO8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjVojYJ +rKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjbPG7PoBMAGrgnoeS+ +Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP26KbqxzcSXKMpHgLZ2x87tNcPVkeB +FQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vrn5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +q2aN17O6x5q25lXQBfGfMY1aqtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/N +y9Sn2WCVhDr4wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrspSCAaWihT37h +a88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4E1Z5T21Q6huwtVexN2ZYI/Pc +D98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29y +azE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ug +b25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y +aXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArwoNwtUs22e5LeWUJ92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6 +tW8UvxDOJxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUYwZF7 +C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9okoqQHgiBVrKtaaNS +0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjNqWm6o+sdDZykIKbBoMXRRkwXbdKs +Zj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/ESrg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0 +JhU8wI1NQ0kdvekhktdmnLfexbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf +0xwLRtxyID+u7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RIsH/7NiXaldDx +JBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTPcjnhsUPgKM+351psE2tJs//j +GHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0xOTEyMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo3QwcjARBglghkgBhvhC +AQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGAvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdER +gL7YibkIozH5oSQJFrlwMB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0B +AQUFAAOCAQEAWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQh7A6tcOdBTcS +o8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18f3v/rxzP5tsHrV7bhZ3QKw0z +2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfNB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjX +OP/swNlQ8C5LWK5Gb9Auw2DaclVyvUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 2 +============================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEXMBUGA1UE +ChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0y +MB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoT +DkVxdWlmYXggU2VjdXJlMSYwJAYDVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn +2Z0GvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/BPO3QSQ5 +BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0CAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUx +JjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTkwNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9e +uSBIplBqy/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAAyGgq3oThr1 +jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia +78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUm +V+GRMOrN +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +UTN-USER First-Network Applications +=================================== +-----BEGIN CERTIFICATE----- +MIIEZDCCA0ygAwIBAgIQRL4Mi1AAJLQR0zYwS8AzdzANBgkqhkiG9w0BAQUFADCBozELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzAp +BgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBBcHBsaWNhdGlvbnMwHhcNOTkwNzA5MTg0ODM5 +WhcNMTkwNzA5MTg1NzQ5WjCBozELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5T +YWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzApBgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBB +cHBsaWNhdGlvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCz+5Gh5DZVhawGNFug +mliy+LUPBXeDrjKxdpJo7CNKyXY/45y2N3kDuatpjQclthln5LAbGHNhSuh+zdMvZOOmfAz6F4Cj +DUeJT1FxL+78P/m4FoCHiZMlIJpDgmkkdihZNaEdwH+DBmQWICzTSaSFtMBhf1EI+GgVkYDLpdXu +Ozr0hAReYFmnjDRy7rh4xdE7EkpvfmUnuaRVxblvQ6TFHSyZwFKkeEwVs0CYCGtDxgGwenv1axwi +P8vv/6jQOkt2FZ7S0cYu49tXGzKiuG/ohqY/cKvlcJKrRB5AUPuco2LkbG6gyN7igEL66S/ozjIE +j3yNtxyjNTwV3Z7DrpelAgMBAAGjgZEwgY4wCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8w +HQYDVR0OBBYEFPqGydvguul49Uuo1hXf8NPhahQ8ME8GA1UdHwRIMEYwRKBCoECGPmh0dHA6Ly9j +cmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LU5ldHdvcmtBcHBsaWNhdGlvbnMuY3JsMA0G +CSqGSIb3DQEBBQUAA4IBAQCk8yXM0dSRgyLQzDKrm5ZONJFUICU0YV8qAhXhi6r/fWRRzwr/vH3Y +IWp4yy9Rb/hCHTO967V7lMPDqaAt39EpHx3+jz+7qEUqf9FuVSTiuwL7MT++6LzsQCv4AdRWOOTK +RIK1YSAhZ2X28AvnNPilwpyjXEAfhZOVBt5P1CeptqX8Fs1zMT+4ZSfP1FMa8Kxun08FDAOBp4Qp +xFq9ZFdyrTvPNximmMatBrTcCKME1SmklpoSZ0qMYEWd8SOasACcaLWYUNPvji6SZbFIPiG+FTAq +DbUMo2s/rn9X9R+WfN9v3YIwLGUbQErNaLly7HF27FSOH4UMAWr6pjisH8SE +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 1 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBJDANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MxIENBMB4XDTAxMDQwNjEwNDkxM1oXDTIxMDQw +NjEwNDkxM1owOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALWJHytPZwp5/8Ue+H88 +7dF+2rDNbS82rDTG29lkFwhjMDMiikzujrsPDUJVyZ0upe/3p4zDq7mXy47vPxVnqIJyY1MPQYx9 +EJUkoVqlBvqSV536pQHydekfvFYmUk54GWVYVQNYwBSujHxVX3BbdyMGNpfzJLWaRpXk3w0LBUXl +0fIdgrvGE+D+qnr9aTCU89JFhfzyMlsy3uhsXR/LpCJ0sICOXZT3BgBLqdReLjVQCfOAl/QMF645 +2F/NM8EcyonCIvdFEu1eEpOdY6uCLrnrQkFEy0oaAIINnvmLVz5MxxftLItyM19yejhW1ebZrgUa +HXVFsculJRwSVzb9IjcCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQIR+IMi/ZT +iFIwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCLGrLJXWG04bkruVPRsoWdd44W7hE9 +28Jj2VuXZfsSZ9gqXLar5V7DtxYvyOirHYr9qxp81V9jz9yw3Xe5qObSIjiHBxTZ/75Wtf0HDjxV +yhbMp6Z3N/vbXB9OWQaHowND9Rart4S9Tu+fMTfwRvFAttEMpWT4Y14h21VOTzF2nBBhjrZTOqMR +vq9tfB69ri3iDGnHhVNoomG6xT60eVR4ngrHAr5i0RGCS2UvkVrCqIexVmiUefkl98HVrhq4uz2P +qYo4Ffdz0Fpg0YCw8NzVUM1O7pJIae2yIx4wzMiUyLb1O4Z/P6Yun/Y+LLWSlj7fLJOK/4GMDw9Z +IRlXvVWa +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +TDC OCES Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFGTCCBAGgAwIBAgIEPki9xDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJESzEMMAoGA1UE +ChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTAeFw0wMzAyMTEwODM5MzBaFw0zNzAyMTEwOTA5 +MzBaMDExCzAJBgNVBAYTAkRLMQwwCgYDVQQKEwNUREMxFDASBgNVBAMTC1REQyBPQ0VTIENBMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGL2YSCyz8DGhdfjeebM7fI5kqSXLmSjhFuH +nEz9pPPEXyG9VhDr2y5h7JNp46PMvZnDBfwGuMo2HP6QjklMxFaaL1a8z3sM8W9Hpg1DTeLpHTk0 +zY0s2RKY+ePhwUp8hjjEqcRhiNJerxomTdXkoCJHhNlktxmW/OwZ5LKXJk5KTMuPJItUGBxIYXvV +iGjaXbXqzRowwYCDdlCqT9HU3Tjw7xb04QxQBr/q+3pJoSgrHPb8FTKjdGqPqcNiKXEx5TukYBde +dObaE+3pHx8b0bJoc8YQNHVGEBDjkAB2QMuLt0MJIf+rTpPGWOmlgtt3xDqZsXKVSQTwtyv6e1mO +3QIDAQABo4ICNzCCAjMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgewGA1UdIASB +5DCB4TCB3gYIKoFQgSkBAQEwgdEwLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuY2VydGlmaWthdC5k +ay9yZXBvc2l0b3J5MIGdBggrBgEFBQcCAjCBkDAKFgNUREMwAwIBARqBgUNlcnRpZmlrYXRlciBm +cmEgZGVubmUgQ0EgdWRzdGVkZXMgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4xLiBDZXJ0aWZp +Y2F0ZXMgZnJvbSB0aGlzIENBIGFyZSBpc3N1ZWQgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4x +LjARBglghkgBhvhCAQEEBAMCAAcwgYEGA1UdHwR6MHgwSKBGoESkQjBAMQswCQYDVQQGEwJESzEM +MAoGA1UEChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTENMAsGA1UEAxMEQ1JMMTAsoCqgKIYm +aHR0cDovL2NybC5vY2VzLmNlcnRpZmlrYXQuZGsvb2Nlcy5jcmwwKwYDVR0QBCQwIoAPMjAwMzAy +MTEwODM5MzBagQ8yMDM3MDIxMTA5MDkzMFowHwYDVR0jBBgwFoAUYLWF7FZkfhIZJ2cdUBVLc647 ++RIwHQYDVR0OBBYEFGC1hexWZH4SGSdnHVAVS3OuO/kSMB0GCSqGSIb2fQdBAAQQMA4bCFY2LjA6 +NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEACromJkbTc6gJ82sLMJn9iuFXehHTuJTXCRBuo7E4 +A9G28kNBKWKnctj7fAXmMXAnVBhOinxO5dHKjHiIzxvTkIvmI/gLDjNDfZziChmPyQE+dF10yYsc +A+UYyAFMP8uXBV2YcaaYb7Z8vTd/vuGTJW1v8AqtFxjhA7wHKcitJuj4YfD9IQl+mo6paH1IYnK9 +AOoBmbgGglGBTvH1tJFUuSN6AJqfXY3gPGS5GhKSKseCRHI53OI8xthV9RVOyAUO28bQYqbsFbS1 +AoLbrIyigfCbmTH1ICCoiGEKB5+U/NDXG8wuF/MEJ3Zn61SD/aSQfgY9BKNDLdr8C2LqL19iUw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Email Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0 +BgNVBAMTLVVUTi1VU0VSRmlyc3QtQ2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05 +OTA3MDkxNzI4NTBaFw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQx +FzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsx +ITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UEAxMtVVROLVVTRVJGaXJz +dC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWlsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3BYHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIx +B8dOtINknS4p1aJkxIW9hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8 +om+rWV6lL8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLmSGHG +TPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM1tZUOt4KpLoDd7Nl +yP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws6wIDAQABo4G5MIG2MAsGA1UdDwQE +AwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNV +HR8EUTBPME2gS6BJhkdodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGll +bnRBdXRoZW50aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u7mFVbwQ+zzne +xRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0xtcgBEXkzYABurorbs6q15L+ +5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQrfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarV +NZ1yQAOJujEdxRBoUp7fooXFXAimeOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZ +w7JHpsIyYdfHb0gkUSeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +UTN USERFirst Object Root CA +============================ +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAb +BgNVBAMTFFVUTi1VU0VSRmlyc3QtT2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAz +NlowgZUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkx +HjAcBgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3dy51c2Vy +dHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicPHxzfOpuCaDDASmEd8S8O+r5596Uj71VR +loTN2+O5bj4x2AogZ8f02b+U60cEPgLOKqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQ +w5ujm9M89RKZd7G3CeBo5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vu +lBe3/IW+pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehbkkj7 +RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUCAwEAAaOBrzCBrDAL +BgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU2u1kdBScFDyr3ZmpvVsoTYs8 +ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtT2JqZWN0LmNybDApBgNVHSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQw +DQYJKoZIhvcNAQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXBmMiKVl0+7kNO +PmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU4U3GDZlDAQ0Slox4nb9QorFE +qmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK581OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCG +hU3IfdeLA/5u1fedFqySLKAj5ZyRUh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Qualified (Class QA) Root +================================= +-----BEGIN CERTIFICATE----- +MIIG0TCCBbmgAwIBAgIBezANBgkqhkiG9w0BAQUFADCByTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMUIwQAYDVQQDEzlOZXRMb2NrIE1pbm9zaXRldHQgS296amVn +eXpvaSAoQ2xhc3MgUUEpIFRhbnVzaXR2YW55a2lhZG8xHjAcBgkqhkiG9w0BCQEWD2luZm9AbmV0 +bG9jay5odTAeFw0wMzAzMzAwMTQ3MTFaFw0yMjEyMTUwMTQ3MTFaMIHJMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRvbnNhZ2kgS2Z0 +LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxQjBABgNVBAMTOU5ldExvY2sgTWlub3NpdGV0 +dCBLb3pqZWd5em9pIChDbGFzcyBRQSkgVGFudXNpdHZhbnlraWFkbzEeMBwGCSqGSIb3DQEJARYP +aW5mb0BuZXRsb2NrLmh1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1Ilstg91IRV +CacbvWy5FPSKAtt2/GoqeKvld/Bu4IwjZ9ulZJm53QE+b+8tmjwi8F3JV6BVQX/yQ15YglMxZc4e +8ia6AFQer7C8HORSjKAyr7c3sVNnaHRnUPYtLmTeriZ539+Zhqurf4XsoPuAzPS4DB6TRWO53Lhb +m+1bOdRfYrCnjnxmOCyqsQhjF2d9zL2z8cM/z1A57dEZgxXbhxInlrfa6uWdvLrqOU+L73Sa58XQ +0uqGURzk/mQIKAR5BevKxXEOC++r6uwSEaEYBTJp0QwsGj0lmT+1fMptsK6ZmfoIYOcZwvK9UdPM +0wKswREMgM6r3JSda6M5UzrWhQIDAMV9o4ICwDCCArwwEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAQYwggJ1BglghkgBhvhCAQ0EggJmFoICYkZJR1lFTEVNISBFemVuIHRhbnVzaXR2 +YW55IGEgTmV0TG9jayBLZnQuIE1pbm9zaXRldHQgU3pvbGdhbHRhdGFzaSBTemFiYWx5emF0YWJh +biBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBBIG1pbm9zaXRldHQgZWxla3Ryb25p +a3VzIGFsYWlyYXMgam9naGF0YXMgZXJ2ZW55ZXN1bGVzZW5laywgdmFsYW1pbnQgZWxmb2dhZGFz +YW5hayBmZWx0ZXRlbGUgYSBNaW5vc2l0ZXR0IFN6b2xnYWx0YXRhc2kgU3phYmFseXphdGJhbiwg +YXogQWx0YWxhbm9zIFN6ZXJ6b2Rlc2kgRmVsdGV0ZWxla2JlbiBlbG9pcnQgZWxsZW5vcnplc2kg +ZWxqYXJhcyBtZWd0ZXRlbGUuIEEgZG9rdW1lbnR1bW9rIG1lZ3RhbGFsaGF0b2sgYSBodHRwczov +L3d3dy5uZXRsb2NrLmh1L2RvY3MvIGNpbWVuIHZhZ3kga2VyaGV0b2sgYXogaW5mb0BuZXRsb2Nr +Lm5ldCBlLW1haWwgY2ltZW4uIFdBUk5JTkchIFRoZSBpc3N1YW5jZSBhbmQgdGhlIHVzZSBvZiB0 +aGlzIGNlcnRpZmljYXRlIGFyZSBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIFF1YWxpZmllZCBDUFMg +YXZhaWxhYmxlIGF0IGh0dHBzOi8vd3d3Lm5ldGxvY2suaHUvZG9jcy8gb3IgYnkgZS1tYWlsIGF0 +IGluZm9AbmV0bG9jay5uZXQwHQYDVR0OBBYEFAlqYhaSsFq7VQ7LdTI6MuWyIckoMA0GCSqGSIb3 +DQEBBQUAA4IBAQCRalCc23iBmz+LQuM7/KbD7kPgz/PigDVJRXYC4uMvBcXxKufAQTPGtpvQMznN +wNuhrWw3AkxYQTvyl5LGSKjN5Yo5iWH5Upfpvfb5lHTocQ68d4bDBsxafEp+NFAwLvt/MpqNPfMg +W/hqyobzMUwsWYACff44yTB1HLdV47yfuqhthCgFdbOLDcCRVCHnpgu0mfVRQdzNo0ci2ccBgcTc +R08m6h/t280NmPSjnLRzMkqWmf68f8glWPhY83ZmiVSkpj7EUFy6iRiCdUgh0k8T6GB+B3bbELVR +5qq5aKrN9p2QdRLqOBrKROi3macqaJVmlaut74nLYKkGEsaUR+ko +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Firmaprofesional Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEVzCCAz+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBnTELMAkGA1UEBhMCRVMxIjAgBgNVBAcT +GUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMTOUF1dG9yaWRhZCBkZSBDZXJ0aWZp +Y2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODEmMCQGCSqGSIb3DQEJARYXY2FA +ZmlybWFwcm9mZXNpb25hbC5jb20wHhcNMDExMDI0MjIwMDAwWhcNMTMxMDI0MjIwMDAwWjCBnTEL +MAkGA1UEBhMCRVMxIjAgBgNVBAcTGUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMT +OUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2 +ODEmMCQGCSqGSIb3DQEJARYXY2FAZmlybWFwcm9mZXNpb25hbC5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDnIwNvbyOlXnjOlSztlB5uCp4Bx+ow0Syd3Tfom5h5VtP8c9/Qit5V +j1H5WuretXDE7aTt/6MNbg9kUDGvASdYrv5sp0ovFy3Tc9UTHI9ZpTQsHVQERc1ouKDAA6XPhUJH +lShbz++AbOCQl4oBPB3zhxAwJkh91/zpnZFx/0GaqUC1N5wpIE8fUuOgfRNtVLcK3ulqTgesrBlf +3H5idPayBQC6haD9HThuy1q7hryUZzM1gywfI834yJFxzJeL764P3CkDG8A563DtwW4O2GcLiam8 +NeTvtjS0pbbELaW+0MOUJEjb35bTALVmGotmBQ/dPz/LP6pemkr4tErvlTcbAgMBAAGjgZ8wgZww +KgYDVR0RBCMwIYYfaHR0cDovL3d3dy5maXJtYXByb2Zlc2lvbmFsLmNvbTASBgNVHRMBAf8ECDAG +AQH/AgEBMCsGA1UdEAQkMCKADzIwMDExMDI0MjIwMDAwWoEPMjAxMzEwMjQyMjAwMDBaMA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUMwugZtHq2s7eYpMEKFK1FH84aLcwDQYJKoZIhvcNAQEFBQAD +ggEBAEdz/o0nVPD11HecJ3lXV7cVVuzH2Fi3AQL0M+2TUIiefEaxvT8Ub/GzR0iLjJcG1+p+o1wq +u00vR+L4OQbJnC4xGgN49Lw4xiKLMzHwFgQEffl25EvXwOaD7FnMP97/T2u3Z36mhoEyIwOdyPdf +wUpgpZKpsaSgYMN4h7Mi8yrrW6ntBas3D7Hi05V2Y1Z0jFhyGzflZKG+TQyTmAyX9odtsz/ny4Cm +7YjHX1BiAuiZdBbQ5rQ58SfLyEDW44YQqSMSkuBpQWOnryULwMWSyx6Yo1q6xTMPoJcB3X/ge9YG +VM+h4k0460tQtcsm9MracEpqoeJ5quGnM/b9Sh/22WA= +-----END CERTIFICATE----- + +Wells Fargo Root CA +=================== +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIEOeSXnjANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC1dlbGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTEvMC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDAxMDExMTY0MTI4WhcNMjEwMTE0MTY0MTI4WjCBgjELMAkGA1UEBhMCVVMxFDASBgNVBAoTC1dl +bGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEv +MC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVqDM7Jvk0/82bfuUER84A4n135zHCLielTWi5MbqNQ1mX +x3Oqfz1cQJ4F5aHiidlMuD+b+Qy0yGIZLEWukR5zcUHESxP9cMIlrCL1dQu3U+SlK93OvRw6esP3 +E48mVJwWa2uv+9iWsWCaSOAlIiR5NM4OJgALTqv9i86C1y8IcGjBqAr5dE8Hq6T54oN+J3N0Prj5 +OEL8pahbSCOz6+MlsoCultQKnMJ4msZoGK43YjdeUXWoWGPAUe5AeH6orxqg4bB4nVCMe+ez/I4j +sNtlAHCEAQgAFG5Uhpq6zPk3EPbg3oQtnaSFN9OH4xXQwReQfhkhahKpdv0SAulPIV4XAgMBAAGj +YTBfMA8GA1UdEwEB/wQFMAMBAf8wTAYDVR0gBEUwQzBBBgtghkgBhvt7hwcBCzAyMDAGCCsGAQUF +BwIBFiRodHRwOi8vd3d3LndlbGxzZmFyZ28uY29tL2NlcnRwb2xpY3kwDQYJKoZIhvcNAQEFBQAD +ggEBANIn3ZwKdyu7IvICtUpKkfnRLb7kuxpo7w6kAOnu5+/u9vnldKTC2FJYxHT7zmu1Oyl5GFrv +m+0fazbuSCUlFLZWohDo7qd/0D+j0MNdJu4HzMPBJCGHHt8qElNvQRbn7a6U+oxy+hNH8Dx+rn0R +OhPs7fpvcmR7nX1/Jv16+yWt6j4pf0zjAFcysLPp7VMX2YuyFA4w6OXVE8Zkr8QA1dhYJPz1j+zx +x32l2w8n0cbyQIjmH/ZhqPRCyLk306m+LFZ4wnKbWV01QIroTmMatukgalHizqSQ33ZwmVxwQ023 +tqcZZE6St8WRPH9IFmV7Fv3L/PvZ1dZPIWU7Sn9Ho/s= +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Platinum CA - G2 +========================== +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWduIFBsYXRpbnVtIENBIC0gRzIw +HhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAwWjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMM +U3dpc3NTaWduIEFHMSMwIQYDVQQDExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu +669yIIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2HtnIuJpX+UF +eNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+6ixuEFGSzH7VozPY1kne +WCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5objM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIo +j5+saCB9bzuohTEJfwvH6GXp43gOCWcwizSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/6 +8++QHkwFix7qepF6w9fl+zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34T +aNhxKFrYzt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaPpZjy +domyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtFKwH3HBqi7Ri6Cr2D ++m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuWae5ogObnmLo2t/5u7Su9IPhlGdpV +CX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMBAAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCv +zAeHFUdvOMW0ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUAA4ICAQAIhab1 +Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0uMoI3LQwnkAHFmtllXcBrqS3 +NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4 +U99REJNi54Av4tHgvI42Rncz7Lj7jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8 +KV2LwUvJ4ooTHbG/u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl +9x8DYSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1puEa+S1B +aYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXaicYwu+uPyyIIoK6q8QNs +OktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbGDI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSY +Mdp08YSTcU1f+2BY0fvEwW2JorsgH51xkcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAci +IfNAChs0B0QTwoRqjt8ZWr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +S-TRUST Authentication and Encryption Root CA 2005 PN +===================================================== +-----BEGIN CERTIFICATE----- +MIIEezCCA2OgAwIBAgIQNxkY5lNUfBq1uMtZWts1tzANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCREUxIDAeBgNVBAgTF0JhZGVuLVd1ZXJ0dGVtYmVyZyAoQlcpMRIwEAYDVQQHEwlTdHV0dGdh +cnQxKTAnBgNVBAoTIERldXRzY2hlciBTcGFya2Fzc2VuIFZlcmxhZyBHbWJIMT4wPAYDVQQDEzVT +LVRSVVNUIEF1dGhlbnRpY2F0aW9uIGFuZCBFbmNyeXB0aW9uIFJvb3QgQ0EgMjAwNTpQTjAeFw0w +NTA2MjIwMDAwMDBaFw0zMDA2MjEyMzU5NTlaMIGuMQswCQYDVQQGEwJERTEgMB4GA1UECBMXQmFk +ZW4tV3VlcnR0ZW1iZXJnIChCVykxEjAQBgNVBAcTCVN0dXR0Z2FydDEpMCcGA1UEChMgRGV1dHNj +aGVyIFNwYXJrYXNzZW4gVmVybGFnIEdtYkgxPjA8BgNVBAMTNVMtVFJVU1QgQXV0aGVudGljYXRp +b24gYW5kIEVuY3J5cHRpb24gUm9vdCBDQSAyMDA1OlBOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA2bVKwdMz6tNGs9HiTNL1toPQb9UY6ZOvJ44TzbUlNlA0EmQpoVXhOmCTnijJ4/Ob +4QSwI7+Vio5bG0F/WsPoTUzVJBY+h0jUJ67m91MduwwA7z5hca2/OnpYH5Q9XIHV1W/fuJvS9eXL +g3KSwlOyggLrra1fFi2SU3bxibYs9cEv4KdKb6AwajLrmnQDaHgTncovmwsdvs91DSaXm8f1Xgqf +eN+zvOyauu9VjxuapgdjKRdZYgkqeQd3peDRF2npW932kKvimAoA0SVtnteFhy+S8dF2g08LOlk3 +KC8zpxdQ1iALCvQm+Z845y2kuJuJja2tyWp9iRe79n+Ag3rm7QIDAQABo4GSMIGPMBIGA1UdEwEB +/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgEGMCkGA1UdEQQiMCCkHjAcMRowGAYDVQQDExFTVFJv +bmxpbmUxLTIwNDgtNTAdBgNVHQ4EFgQUD8oeXHngovMpttKFswtKtWXsa1IwHwYDVR0jBBgwFoAU +D8oeXHngovMpttKFswtKtWXsa1IwDQYJKoZIhvcNAQEFBQADggEBAK8B8O0ZPCjoTVy7pWMciDMD +pwCHpB8gq9Yc4wYfl35UvbfRssnV2oDsF9eK9XvCAPbpEW+EoFolMeKJ+aQAPzFoLtU96G7m1R08 +P7K9n3frndOMusDXtk3sU5wPBG7qNWdX4wple5A64U8+wwCSersFiXOMy6ZNwPv2AtawB6MDwidA +nwzkhYItr5pCHdDHjfhA7p0GVxzZotiAFP7hYy0yh9WUUpY6RsZxlj33mA6ykaqP2vROJAA5Veit +F7nTNCtKqUDMFypVZUF0Qn71wK/Ik63yGFs9iQzbRzkk+OBM8h+wPQrKBU6JIRrjKpms/H+h8Q8b +Hz2eBIPdltkdOpQ= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign CA +========== +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0MRMwEQYDVQQD +EwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTMy +MThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMTCkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNp +Z24xCzAJBgNVBAYTAklMMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49q +ROR+WCf4C9DklBKK8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTy +P2Q298CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb2CEJKHxN +GGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxCejVb7Us6eva1jsz/D3zk +YDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7KpiXd3DTKaCQeQzC6zJMw9kglcq/QytNuEM +rkvF7zuZ2SOzW120V+x0cAwqTwIDAQABo4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAy +oDCgLoYsaHR0cDovL2ZlZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0P +AQH/BAQDAgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRLAZs+ +VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWdfoPPbrxHbvUanlR2 +QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0McXS6hMTXcpuEfDhOZAYnKuGntewI +mbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb +/627HOkthIDYIb6FUtnUdLlphbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VG +zT2ouvDzuFYkRes3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCED9pHoGc8JpK83P/uUii5N0wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAxIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDlGb9to1ZhLZlIcfZn3rmN67eehoAKkQ76OCWvRoiC5XOooJskXQ0fzGVuDLDQ +VoQYh5oGmxChc9+0WDlrbsH2FdWoqD+qEgaNMax/sDTXjzRniAnNFBHiTkVWaR94AoDa3EeRKbs2 +yWNcxeDXLYd7obcysHswuiovMaruo2fa2wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFgVKTk8d6Pa +XCUDfGD67gmZPCcQcMgMCeazh88K4hiWNWLMv5sneYlfycQJ9M61Hd8qveXbhpxoJeUwfLaJFf5n +0a3hUKw8fGJLj7qE1xIVGx/KXQ/BUpQqEZnae88MNhPVNdwQGVnqlMEAv3WP2fr9dgTbYruQagPZ +RjXZ+Hxb +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +TC TrustCenter Universal CA III +=============================== +-----BEGIN CERTIFICATE----- +MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAe +Fw0wOTA5MDkwODE1MjdaFw0yOTEyMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNU +QyBUcnVzdENlbnRlciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0Ex +KDAmBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF5+cvAqBNLaT6hdqbJYUt +QCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYvDIRlzg9uwliT6CwLOunBjvvya8o84pxO +juT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8vzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+Eut +CHnNaYlAJ/Uqwa1D7KRTyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1 +M4BDj5yjdipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBhMB8G +A1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI4jANBgkqhkiG9w0BAQUFAAOCAQEA +g8ev6n9NCjw5sWi+e22JLumzCecYV42FmhfzdkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+ +KGwWaODIl0YgoGhnYIg5IFHYaAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhK +BgePxLcHsU0GDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV +CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPHLQNjO9Po5KIq +woIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb new file mode 100644 index 0000000..26a1f58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb @@ -0,0 +1,89 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'securerandom' + +describe 'Account client' do + before(:all) do + client_1 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_1 = client_1.init_index(index_name('account_client_1')) + + client_2 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_2 = client_2.init_index(index_name('account_client_2')) + + client_3 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_2'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_2'] + }) + + @index_3 = client_3.init_index(index_name('account_client_3')) + + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + after(:all) do + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + it 'should not allow operations in the same application' do + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_2) + }.to raise_exception( + Algolia::AlgoliaError, + 'The indexes are in the same application. Use Algolia::Client.copy_index instead.' + ) + end + + it 'should perform a cross app copy index and assert that destination must not exist' do + + @index_1.save_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + + @index_1.batch_rules! ([ + { + :objectID => 'one', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + ]) + + @index_1.batch_synonyms! ([ + {:objectID => 'one', :type => 'synonym', :synonyms => ['San Francisco', 'SF']} + ]) + + @index_1.set_settings! ({:searchableAttributes => ['objectID']}) + + Algolia::AccountClient.copy_index!(@index_1, @index_3) + + res = @index_3.search('') + res['nbHits'].should eq(1500) + + res = @index_3.search_rules('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + res = @index_3.search_synonyms('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + @index_3.get_settings['searchableAttributes'].should eq(['objectID']) + + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_3) + }.to raise_exception( + Algolia::AlgoliaError, + 'Destination index already exists. Please delete it before copying index across applications.' + ) + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/client_spec.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/client_spec.rb new file mode 100644 index 0000000..fb664ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/client_spec.rb @@ -0,0 +1,1426 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'base64' + +def is_include(array, attr, value) + array.each do |elt| + if elt[attr] == value + return true + end + end + return false +end + +describe 'API keys', :maintainers_only => true do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + def wait_key(index, key, &block) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_key_missing(index, key) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + def wait_global_key(key, &block) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_global_key_missing(key) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + it "should test index keys" do + @index.set_settings!({}) # ensure the index exists + + resIndex = @index.list_api_keys + newIndexKey = @index.add_api_key(['search']) + newIndexKey['key'].should_not eq("") + wait_key(@index, newIndexKey['key']) + resIndexAfter = @index.list_api_keys + is_include(resIndex['keys'], 'value', newIndexKey['key']).should eq(false) + is_include(resIndexAfter['keys'], 'value', newIndexKey['key']).should eq(true) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('search') + @index.update_api_key(newIndexKey['key'], ['addObject']) + wait_key(@index, newIndexKey['key']) do |key| + key['acl'] == ['addObject'] + end + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('addObject') + @index.delete_api_key(newIndexKey['key']) + wait_key_missing(@index, newIndexKey['key']) + resIndexEnd = @index.list_api_keys + is_include(resIndexEnd['keys'], 'value', newIndexKey['key']).should eq(false) + end + + it "should test global keys" do + res = Algolia.list_api_keys + newKey = Algolia.add_api_key(['search']) + newKey['key'].should_not eq("") + wait_global_key(newKey['key']) + resAfter = Algolia.list_api_keys + is_include(res['keys'], 'value', newKey['key']).should eq(false) + is_include(resAfter['keys'], 'value', newKey['key']).should eq(true) + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('search') + Algolia.update_api_key(newKey['key'], ['addObject']) + wait_global_key(newKey['key']) do |key| + key['acl'] == ['addObject'] + end + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('addObject') + Algolia.delete_api_key(newKey['key']) + wait_global_key_missing(newKey['key']) + resEnd = Algolia.list_api_keys + is_include(resEnd['keys'], 'value', newKey['key']).should eq(false) + + # Restore the deleted key + Algolia.restore_api_key(newKey['key']) + wait_global_key(newKey['key']) + key_end = Algolia.list_api_keys + is_include(key_end['keys'], 'value', newKey['key']).should eq(true) + + # Re-delete the key + Algolia.delete_api_key(newKey['key']) + end + + it "Check add keys" do + newIndexKey = @index.add_api_key(['search']) + newIndexKey.should have_key('key') + newIndexKey['key'].should be_a(String) + newIndexKey.should have_key('createdAt') + newIndexKey['createdAt'].should be_a(String) + sleep 5 # no task ID here + resIndex = @index.list_api_keys + resIndex.should have_key('keys') + resIndex['keys'].should be_a(Array) + resIndex['keys'][0].should have_key('value') + resIndex['keys'][0]['value'].should be_a(String) + resIndex['keys'][0].should have_key('acl') + resIndex['keys'][0]['acl'].should be_a(Array) + resIndex['keys'][0].should have_key('validity') + resIndex['keys'][0]['validity'].should be_a(Integer) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey.should have_key('value') + indexKey['value'].should be_a(String) + indexKey.should have_key('acl') + indexKey['acl'].should be_a(Array) + indexKey.should have_key('validity') + indexKey['validity'].should be_a(Integer) + task = @index.delete_api_key(newIndexKey['key']) + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + end +end + +describe 'Client' do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + it "should tell if index exists" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + expect(@index.exists?).to be true + end + + it "should tell if index does not exist" do + index = Algolia::Index.new('nonexistent_index') + expect(index.exists?).to be false + end + + it "should add a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + @index.partial_update_object!({ :name => "Robert Doe"}, "1") + res = @index.search("robert") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, or add it if it doesn't exist" do + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(0) + @index.partial_update_object!({ :email => "tonny@parker.org" }, "1") + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, but don't add it if it doesn't exist" do + @index.partial_update_object!({ :email => "alex@boom.org" }, "51", false) + res = @index.search("alex@boom.org") + res["hits"].length.should eq(0) + end + + it "should partial update a batch of objects, and add them if they don't exist" do + batch = [ + { :objectID => "1", :email => "john@wanna.org" }, + { :objectID => "2", :email => "robert@wanna.org" } + ] + @index.partial_update_objects!(batch) + res = @index.search("@wanna.org") + res["hits"].length.should eq(2) + end + + it "should partial update a batch of objects, but don't add them if they don't exist" do + create_if_not_exits = false + batch = [ + { :objectID => "11", :email => "john@be.org" }, + { :objectID => "22", :email => "robert@be.org" } + ] + @index.partial_update_objects!(batch, create_if_not_exits) + res = @index.search("@be.org") + res["hits"].length.should eq(0) + end + + it "should add a set of objects" do + @index.add_objects!([ + { :name => "Another", :email => "another1@example.org" }, + { :name => "Another", :email => "another2@example.org" } + ]) + res = @index.search("another") + res["hits"].length.should eq(2) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "2") + res = @index.search("john") + res["hits"].length.should eq(2) + @index.partial_update_objects!([{ :name => "Robert Doe", :objectID => "1"}, { :name => "Robert Doe", :objectID => "2"}]) + res = @index.search("robert") + res["hits"].length.should eq(2) + end + + it "should save a set of objects with their ids" do + @index.save_object!({ :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }) + res = @index.search("objectid") + res["hits"].length.should eq(1) + end + + it "should save a set of objects with their ids" do + @index.save_objects!([ + { :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }, + { :name => "objectid", :email => "objectid2@example.org", :objectID => 102 } + ]) + res = @index.search("objectid") + res["hits"].length.should eq(2) + end + + it "should replace all objects" do + @index.save_objects!([{:objectID => '1'}, {:objectID => '2'}]) + @index.replace_all_objects!([{'color' => 'black'}, {:objectID => '4', 'color' => 'green'}]) + + res = @index.search('') + res["hits"].length.should eq(2) + res = @index.search('black') + res["hits"][0]['color'].should eq('black') + @index.get_object('4')['color'].should eq('green') + end + + it "should throw an exception if invalid argument" do + expect { @index.add_object!([ {:name => "test"} ]) }.to raise_error(ArgumentError) + expect { @index.add_objects!([ [ {:name => "test"} ] ]) }.to raise_error(ArgumentError) + expect { @index.save_object(1) }.to raise_error(ArgumentError) + expect { @index.save_object("test") }.to raise_error(ArgumentError) + expect { @index.save_object({ :objectID => 42 }.to_json) }.to raise_error(ArgumentError) + expect { @index.save_objects([{}, ""]) }.to raise_error(ArgumentError) + expect { @index.save_objects([1]) }.to raise_error(ArgumentError) + expect { @index.save_objects!([1]) }.to raise_error(ArgumentError) + expect { @index.save_object({ :foo => 42 }) }.to raise_error(ArgumentError) # missing objectID + end + + it "should be thread safe" do + @index.clear! + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + + threads = [] + 64.times do + t = Thread.new do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + threads << t + end + threads.each { |t| t.join } + end + + if !defined?(RUBY_ENGINE) || RUBY_ENGINE != 'jruby' + it "should be fork safe" do + 8.times do + Process.fork do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + end + Process.waitall + end + end + + it "should clear the index" do + @index.clear! + @index.search("")["hits"].length.should eq(0) + end + + it "should have another index after" do + index = Algolia::Index.new(safe_index_name("àlgol?a")) + begin + index.delete_index! + rescue + # friends_2 does not exist + end + res = Algolia.list_indexes + is_include(res['items'], 'name', safe_index_name('àlgol?a')).should eq(false) + index.add_object!({ :name => "Robert" }) + resAfter = Algolia.list_indexes + is_include(resAfter['items'], 'name', safe_index_name('àlgol?a')).should eq(true) + end + + it "should get a object" do + @index.clear_index + @index.add_object!({:firstname => "Robert"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + res["nbHits"].should eq(2) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq(res['hits'][0]['firstname']) + + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq(res['hits'][0]['firstname']) + + objects = @index.get_objects([ res['hits'][0]['objectID'], res['hits'][1]['objectID'] ]) + objects.size.should eq(2) + end + + it "should restrict attributesToRetrieve" do + @index.clear_index + @index.add_object({:firstname => "Robert", :lastname => "foo", :objectID => 1}) + @index.add_object!({:firstname => "Robert2", :lastname => "bar", :objectID => 2}) + objects = @index.get_objects([1, 2], ['firstname']) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects([1, 2], [:firstname]) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects(["1", "2"], 'firstname,lastname') + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "lastname"=>"foo", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "lastname"=>"bar", "objectID"=>"2"}) + end + + it "should delete the object" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + @index.delete_object!(res['hits'][0]['objectID']) + @index.search('')['nbHits'].should eq(0) + end + + it "should not delete the index because the objectID is blank" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + expect { @index.delete_object('') }.to raise_error(ArgumentError) + expect { @index.delete_object!(nil) }.to raise_error(ArgumentError) + @index.search('')['nbHits'].should eq(1) + end + + it "should delete several objects" do + @index.clear + @index.add_object!({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(2) + @index.delete_objects!(res['hits'].map { |h| h['objectID'] }) + @index.search('')['nbHits'].should eq(0) + end + + it "should delete several objects by query" do + @index.clear + @index.add_object({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by_query!('rob') + @index.search('')['nbHits'].should eq(0) + end + + it "should not wipe the entire index with delete_by_query" do + expect { @index.delete_by_query(nil) }.to raise_error(ArgumentError) + end + + context 'delete_by' do + it 'should not wipe the entire index' do + expect { @index.delete_by(nil) }.to raise_error(ArgumentError) + end + + it 'should fail with query passed' do + @index.clear + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + expect { @index.delete_by({ 'query' => 'abc' }) }.to raise_error(Algolia::AlgoliaProtocolError) + @index.search('')['nbHits'].should eq(2) + end + + it 'should work with filters' do + @index.clear + @index.set_settings!({:attributesForFaceting => ['firstname']}) + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by!({ 'filters' => 'firstname:Robert1' }) + @index.search('')['nbHits'].should eq(1) + end + end + + it 'should find objects when needed' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + + index.save_objects!([ + {:company => 'Algolia', :name => 'Julien Lemoine', :objectID => 'julien-lemoine'}, + {:company => 'Algolia', :name => 'Nicolas Dessaigne', :objectID => 'nicolas-dessaigne'}, + {:company => 'Amazon', :name =>' "Jeff Bezos', :objectID => '162590850'}, + {:company => 'Apple', :name => 'Steve Jobs', :objectID => '162590860'}, + {:company => 'Apple', :name => 'Steve Wozniak', :objectID => '162590870'}, + {:company => 'Arista Networks', :name => 'Jayshree Ullal', :objectID => '162590880'}, + {:company => 'Google', :name => 'Larry Page', :objectID => '162590890'}, + {:company => 'Google', :name => 'Rob Pike', :objectID => '162590900'}, + {:company => 'Google', :name => 'Sergueï Brin', :objectID => '162590910'}, + {:company => 'Microsoft', :name => 'Bill Gates', :objectID => '162590920'}, + {:company => 'SpaceX', :name => 'Elon Musk', :objectID => '162590930'}, + {:company => 'Tesla', :name => 'Elon Musk', :objectID => '162590940'}, + {:company => 'Yahoo', :name => 'Marissa Mayer', :objectID => '162590950'}, + ]) + + res = index.search('algolia') + Algolia::Index.get_object_position(res, 'nicolas-dessaigne').should eq(0) + Algolia::Index.get_object_position(res, 'julien-lemoine').should eq(1) + Algolia::Index.get_object_position(res, '').should eq(-1) + + expect { + index.find_object({'query' => '', 'paginate' => true}) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => true}) { false } + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true}) { true } + obj['position'].should eq(0) + obj['page'].should eq(0) + + # we use a lambda and convert it to a block with `&` + # so as not to repeat the condition + condition = lambda do |obj| + obj.key?('company') and obj['company'] == 'Apple' + end + + expect { + index.find_object({'query' => 'algolia', 'paginate' => true}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => false, 'hitsPerPage' => 5}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true, 'hitsPerPage' => 5}, &condition) + obj['position'].should eq(0) + obj['page'].should eq(2) + end + + it "should copy the index" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + @index.delete_index! + + index.search('')['nbHits'].should eq(1) + index.delete_index! + end + + it "should copy only settings" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + res = @index.set_settings!({ + 'searchableAttributes' => ['one'], + }) + + @index.wait_task(res['taskID']) + Algolia.copy_settings!(@index.name, index.name) + @index.delete_index! + + index.get_settings['searchableAttributes'].should eq(['one']) + index.delete_index! + end + + it "should copy only synonyms" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_synonym!('foo', { + :objectID => 'foo', :synonyms => ['car', 'vehicle', 'auto'], :type => 'synonym', + }) + + Algolia.copy_synonyms!(@index.name, index.name) + @index.delete_index! + + index.get_synonym('foo')['objectID'].should eq('foo') + index.delete_index! + end + + it "should copy only rules" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_rule!('bar', { + :objectID => 'bar', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + }) + + Algolia.copy_rules!(@index.name, index.name) + @index.delete_index! + + index.get_rule('bar')['objectID'].should eq('bar') + index.delete_index! + end + + it "should copy parts of the index only" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + @index.search('')['nbHits'].should eq(1) + @index.search_synonyms('')['nbHits'].should eq(2) + + res = Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à"), ["synonyms"]) + + @index.delete_index! + + index.search_synonyms('')['nbHits'].should eq(2) + index.delete_index! + end + + it "should move the index" do + @index.clear_index rescue "friends does not exist" + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.move_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + + index.search('')['nbHits'].should eq(1) + index.delete_index + end + + it "should retrieve the object" do + @index.clear_index rescue "friends does not exist" + @index.add_object!({:firstname => "Robert"}) + + res = @index.browse + + res['hits'].size.should eq(1) + res['hits'][0]['firstname'].should eq("Robert") + end + + it "should get logs" do + + expect { + Algolia::Index.new(safe_index_name('thisdefinitelyshouldntexist')).get_settings + }.to raise_error(Algolia::AlgoliaProtocolError) + res = Algolia.get_logs(0, 20, true) + + res['logs'].size.should > 0 + (res['logs'][0]['answer_code'].to_i / 100).should eq(4) + end + + it "should search on multipleIndex" do + @index.clear_index! rescue "Not fatal" + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res["results"][0]["hits"].length.should eq(1) + + res = Algolia.multiple_queries([{"indexName" => safe_index_name("àlgol?a"), "query" => ""}], "indexName") + res["results"][0]["hits"].length.should eq(1) + end + + it "should get multiple objectIDs" do + index_name_1 = safe_index_name("àlgol?a-multi") + index_1 = Algolia::Index.new(index_name_1) + index_1.save_object!({:objectID => "obj1-multi-get", :name => 'test'}) + + index_name_2 = safe_index_name("àlgol?a-multi") + index_2 = Algolia::Index.new(index_name_2) + index_2.save_object!({:objectID => "obj2-multi-get", :name => 'another index'}) + + requests = [ + { "indexName" => index_name_1, "objectID" => "obj1-multi-get" }, + { "indexName" => index_name_2, "objectID" => "obj2-multi-get" } + ] + + response = Algolia.multiple_get_objects(requests) + + response['results'].count.should eq(2) + + index_1.delete_index rescue "not fatal" + index_2.delete_index rescue "not fatal" + end + + it "should throw if the index_name is missing in multiple_queries" do + expect { Algolia.multiple_queries([{"query" => ""}]) }.to raise_error(ArgumentError) + end + + it "should accept custom batch" do + @index.clear_index! rescue "Not fatal" + request = { "requests" => [ + { + "action" => "addObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger"} + }, + { + "action" => "addObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger", + "objectID" => "43"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"}, + "objectID" => "42" + } + ]} + res = @index.batch!(request) + @index.search('')['nbHits'].should eq(4) + end + + it "should allow an array of tags" do + @index.add_object!({ :name => "P1", :_tags => "t1" }) + @index.add_object!({ :name => "P2", :_tags => "t1" }) + @index.add_object!({ :name => "P3", :_tags => "t2" }) + @index.add_object!({ :name => "P4", :_tags => "t3" }) + @index.add_object!({ :name => "P5", :_tags => ["t3", "t4"] }) + + @index.search("", { :tagFilters => ["t1"] })['hits'].length.should eq(2) # t1 + @index.search("", { :tagFilters => ["t1", "t2"] })['hits'].length.should eq(0) # t1 AND t2 + @index.search("", { :tagFilters => ["t3", "t4"] })['hits'].length.should eq(1) # t3 AND t4 + @index.search("", { :tagFilters => [["t1", "t2"]] })['hits'].length.should eq(3) # t1 OR t2 + end + + it "should be facetable" do + @index.clear! + @index.set_settings( { :attributesForFacetting => ["f", "g"] }) + @index.add_object!({ :name => "P1", :f => "f1", :g => "g1" }) + @index.add_object!({ :name => "P2", :f => "f1", :g => "g2" }) + @index.add_object!({ :name => "P3", :f => "f2", :g => "g2" }) + @index.add_object!({ :name => "P4", :f => "f3", :g => "g2" }) + + res = @index.search("", { :facets => "f" }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1"] }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1", "g:g2"] }) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"]] }) + res['nbHits'].should eq(4) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"], "g:g1"] }) + res['nbHits'].should eq(1) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + res['facets']['g']['g1'].should eq(1) + res['facets']['g']['g2'].should be_nil + end + + it "should handle slash in objectId" do + @index.clear_index!() + @index.add_object!({:firstname => "Robert", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Robert') + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq('Robert') + + @index.save_object!({:firstname => "George", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('George') + + @index.partial_update_object!({:firstname => "Sylvain", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Sylvain') + + end + + it "Check attributes list_indexes:" do + res = Algolia::Index.all + res.should have_key('items') + res['items'][0].should have_key('name') + res['items'][0]['name'].should be_a(String) + res['items'][0].should have_key('createdAt') + res['items'][0]['createdAt'].should be_a(String) + res['items'][0].should have_key('updatedAt') + res['items'][0]['updatedAt'].should be_a(String) + res['items'][0].should have_key('entries') + res['items'][0]['entries'].should be_a(Integer) + res['items'][0].should have_key('pendingTask') + [true, false].should include(res['items'][0]['pendingTask']) + end + + it 'Check attributes search : ' do + res = @index.search('') + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes delete_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à2")) + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = index.delete_index() + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes clear_index : ' do + task = @index.clear_index + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add object : ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }) + task.should have_key('createdAt') + task['createdAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + end + + it 'Check attributes add object id: ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + #task.to_s.should eq("") + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes partial update: ' do + task = @index.partial_update_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes delete object: ' do + @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = @index.delete_object("1") + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add objects: ' do + task = @index.add_objects([{ :name => "John Doe", :email => "john@doe.org", :objectID => "1" }]) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectIDs') + task['objectIDs'].should be_a(Array) + end + + it 'Check attributes browse: ' do + res = @index.browse() + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes get settings: ' do + task = @index.set_settings({}) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + end + + it 'Check attributes move_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.move_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + end + + it 'Check attributes copy_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.copy_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + index2.delete_index + end + + it 'Check attributes wait_task : ' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.client.get(Algolia::Protocol.task_uri(safe_index_name("àlgol?a"), task['objectID'])) + task.should have_key('status') + task['status'].should be_a(String) + task.should have_key('pendingTask') + [true, false].should include(task['pendingTask']) + end + + it 'Check attributes get_task_status' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + status = @index.get_task_status(task["taskID"]) + status.should be_a(String) + end + + it 'Check attributes log : ' do + logs = Algolia.get_logs() + logs.should have_key('logs') + logs['logs'].should be_a(Array) + logs['logs'][0].should have_key('timestamp') + logs['logs'][0]['timestamp'].should be_a(String) + logs['logs'][0].should have_key('method') + logs['logs'][0]['method'].should be_a(String) + logs['logs'][0].should have_key('answer_code') + logs['logs'][0]['answer_code'].should be_a(String) + logs['logs'][0].should have_key('query_body') + logs['logs'][0]['query_body'].should be_a(String) + logs['logs'][0].should have_key('answer') + logs['logs'][0]['answer'].should be_a(String) + logs['logs'][0].should have_key('url') + logs['logs'][0]['url'].should be_a(String) + logs['logs'][0].should have_key('ip') + logs['logs'][0]['ip'].should be_a(String) + logs['logs'][0].should have_key('query_headers') + logs['logs'][0]['query_headers'].should be_a(String) + logs['logs'][0].should have_key('sha1') + logs['logs'][0]['sha1'].should be_a(String) + end + + it 'should generate secured api keys (old syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)') + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)')) + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)', 42) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)42')) + key = Algolia.generate_secured_api_key('my_api_key', ['public']) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public')) + key = Algolia.generate_secured_api_key('my_api_key', ['public', ['premium','vip']]) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public,(premium,vip)')) + end + + it 'should generate secured api keys (new syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', :tagFilters => '(public,user1)') + key.should eq(Base64.encode64("#{OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'tagFilters=%28public%2Cuser1%29')}tagFilters=%28public%2Cuser1%29").gsub("\n", '')) + key = Algolia.generate_secured_api_key('182634d8894831d5dbce3b3185c50881', :tagFilters => '(public,user1)', :userToken => 42) + # in ruby 1.8.7, the map iteration doesn't have the same ordering, + # making the hash slightly different + expected_keys = [ + 'ZDU0N2YzZjA3NGZkZGM2OTUxNzY3NzhkZDI3YWFkMjhhNzU5OTBiOGIyYTgyYzFmMjFjZTY4NTA0ODNiN2I1ZnVzZXJUb2tlbj00MiZ0YWdGaWx0ZXJzPSUyOHB1YmxpYyUyQ3VzZXIxJTI5', + 'OGYwN2NlNTdlOGM2ZmM4MjA5NGM0ZmYwNTk3MDBkNzMzZjQ0MDI3MWZjNTNjM2Y3YTAzMWM4NTBkMzRiNTM5YnRhZ0ZpbHRlcnM9JTI4cHVibGljJTJDdXNlcjElMjkmdXNlclRva2VuPTQy' + ] + expected_keys.include?(key).should eq(true) + end + + it 'Check attributes multipleQueries' do + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res.should have_key('results') + res['results'].should be_a(Array) + res['results'][0].should have_key('hits') + res['results'][0]['hits'].should be_a(Array) + res['results'][0].should have_key('page') + res['results'][0]['page'].should be_a(Integer) + res['results'][0].should have_key('nbHits') + res['results'][0]['nbHits'].should be_a(Integer) + res['results'][0].should have_key('nbPages') + res['results'][0]['nbPages'].should be_a(Integer) + res['results'][0].should have_key('hitsPerPage') + res['results'][0]['hitsPerPage'].should be_a(Integer) + res['results'][0].should have_key('processingTimeMS') + res['results'][0]['processingTimeMS'].should be_a(Integer) + res['results'][0].should have_key('query') + res['results'][0]['query'].should be_a(String) + res['results'][0].should have_key('params') + res['results'][0]['params'].should be_a(String) + end + + it 'should handle facet search' do + objects = { + :snoopy => { + :objectID => '1', + 'name' => 'Snoopy', + :kind => ['dog', 'animal'], + :born => 1950, + :series => 'Peanuts' + }, + :woodstock => { + :objectID => '2', + :name => 'Woodstock', + :kind => ['bird', 'animal'], + :born => 1960, + :series => 'Peanuts' + }, + :charlie => { + :objectID => '3', + :name => 'Charlie Brown', + :kind => ['human'], + :born => 1950, + :series => 'Peanuts' + }, + :hobbes => { + :objectID => '4', + :name => 'Hobbes', + :kind => ['tiger', 'animal', 'teddy'], + :born => 1985, + :series => 'Calvin & Hobbes' + }, + :calvin => { + :objectID => '5', + :name => 'Calvin', + :kind => ['human'], + :born => 1985, + :series => 'Calvin & Hobbes' + } + } + + index = Algolia::Index.new(safe_index_name('test_facet_search')) + index.set_settings({ + :attributesForFaceting => [ + 'searchable(series)', + 'kind' + ] + }) + index.add_objects! objects.values + + query = { + :facetFilters => ['kind:animal'], + :numericFilters => ['born >= 1955'] + } + answer = index.search_for_facet_values 'series', 'Peanutz', query + expect(answer['facetHits'].size).to eq(1) + expect(answer['facetHits'].first['value']).to eq('Peanuts') + expect(answer['facetHits'].first['count']).to eq(1) + end + + it 'should handle disjunctive faceting' do + index = Algolia::Index.new(safe_index_name("test_hotels")) + index.set_settings :attributesForFacetting => ['city', 'stars', 'facilities'] + index.clear_index rescue nil + index.add_objects! [ + { :name => 'Hotel A', :stars => '*', :facilities => ['wifi', 'bath', 'spa'], :city => 'Paris' }, + { :name => 'Hotel B', :stars => '*', :facilities => ['wifi'], :city => 'Paris' }, + { :name => 'Hotel C', :stars => '**', :facilities => ['bath'], :city => 'San Francisco' }, + { :name => 'Hotel D', :stars => '****', :facilities => ['spa'], :city => 'Paris' }, + { :name => 'Hotel E', :stars => '****', :facilities => ['spa'], :city => 'New York' }, + ] + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }) + answer['nbHits'].should eq(5) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['**'].should eq(1) + answer['disjunctiveFacets']['stars']['****'].should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'], :city => ['Paris'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*', '****'], :city => ['Paris'] }) + answer['nbHits'].should eq(3) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + end + + it 'should apply jobs one after another if synchronous' do + index = Algolia::Index.new(safe_index_name("sync")) + begin + index.add_object! :objectID => 1 + answer = index.search('') + answer['nbHits'].should eq(1) + answer['hits'][0]['objectID'].to_i.should eq(1) + index.clear_index! + index.add_object! :objectID => 2 + index.add_object! :objectID => 3 + answer = index.search('') + answer['nbHits'].should eq(2) + answer['hits'][0]['objectID'].to_i.should_not eq(1) + ensure + index.delete_index + end + end + + it "should send a custom batch" do + batch = [ + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "11", :email => "john@be.org" }}, + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "22", :email => "robert@be.org" }} + ] + Algolia.batch!(batch) + res = @index.search("@be.org") + res["hits"].length.should eq(2) + end + + def test_browse(expected, *args) + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + hits = {} + @index.browse(*args) do |hit| + hits[hit['objectID']] = true + end + hits.size.should eq(expected) + end + + it "should browse the index using cursors" do + test_browse(1500) + test_browse(500, 1, 1000) + test_browse(0, 2, 1000) + end + + it "should browse the index using cursors specifying hitsPerPage" do + test_browse(1500, { :hitsPerPage => 500 }) + end + + it "should browse the index using cursors specifying params" do + test_browse(1, { :hitsPerPage => 500, :numericFilters => 'i=42' }) + test_browse(42, { :numericFilters => 'i<=42' }) + end + + it "should browse the index using cursors from a cursor" do + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + answer = @index.browse(0, 1000) + + hits = {} + @index.browse(:cursor => answer['cursor']) do |hit, cursor| + hits[hit['objectID']] = true + cursor.should eq(answer['cursor']) + end + hits.size.should eq(500) + + @index.browse_from(answer['cursor'])['hits'].size.should eq(500) + end + + it "should test synonyms" do + @index.add_object! :name => '589 Howard St., San Francisco' + @index.search('Howard St San Francisco')['nbHits'].should eq(1) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + @index.search('Howard St SF')['nbHits'].should eq(1) + + synonym = @index.get_synonym('city') + synonym['objectID'].should eq('city') + synonym['type'].should eq('synonym') + + @index.search('Howard Street')['nbHits'].should eq(1) + + synonyms_block = [] + synonyms_ret = @index.export_synonyms(1) do |s| + synonyms_block << s + end + + s0 = synonyms_search.map { |s| s['objectID'] }.sort + s1 = synonyms_block.map { |s| s['objectID'] }.sort + s2 = synonyms_ret.map { |s| s['objectID'] }.sort + + s0.should eq(s1) + s1.should eq(s2) + + @index.delete_synonym! 'city' + @index.search('Howard Street SF')['nbHits'].should eq(0) + + @index.clear_synonyms! + @index.search_synonyms('')['nbHits'].should eq(0) + end + + it "should replace all synonyms" do + @index.batch_synonyms! ([ + {:objectID => '1', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '2', :type => 'altCorrection1', :word => 'foo', :corrections => ['st']} + ]) + + @index.replace_all_synonyms! ([ + {:objectID => '3', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '4', :type => 'altCorrection1', :word => 'bar', :corrections => ['st']} + ]) + + synonym = @index.get_synonym('4')['objectID'].should eq('4') + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + end + + it 'should test synonyms Export Query' do + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'us', :type => 'synonym', :synonyms => ['US', 'USA', 'Untied States of America'] }, + { :objectID => 'ie', :type => 'synonym', :synonyms => ['IE', 'IRL', 'Ireland'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + + expect(@index).to receive(:search_synonyms).and_call_original.at_least(4) + @index.export_synonyms(1) + + @index.clear_synonyms! + end + + it 'should test Query Rules' do + rule_1 = { + :objectID => '42', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + rule_2 = { + :objectID => '2', + :condition => { :pattern => 'Pura', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'Pura Vida' } } + } + + result = @index.save_rule!(rule_1[:objectID], rule_1) + result.should have_key('taskID') + result.should have_key('updatedAt') + + @index.get_rule(rule_1[:objectID])['objectID'].should eq(rule_1[:objectID]) + + @index.search_rules('better')['nbHits'].should eq(1) + @index.search_rules('', { :anchoring => 'contains' })['nbHits'].should eq(1) + + @index.delete_rule!(rule_1[:objectID]) + @index.search_rules('')['nbHits'].should eq(0) + + @index.batch_rules!([rule_1, rule_2]) + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + + rules_block = [] + rules_ret = @index.export_rules(1) do |r| + rules_block << r + end + + r0 = rules_search.map { |r| r['objectID'] }.sort + r1 = rules_block.map { |r| r['objectID'] }.sort + r2 = rules_ret.map { |r| r['objectID'] }.sort + + r0.should eq(r1) + r1.should eq(r2) + + @index.clear_rules! + @index.search_rules('')['nbHits'].should eq(0) + end + + it "should replace all rules" do + rule_1 = { + :objectID => '1', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + rule_2 = { + :objectID => '2', + :condition => {:pattern => 'Pura', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'Pura Vida'}} + } + + @index.batch_rules! [rule_1, rule_2] + + rule_1[:objectID] = '3' + rule_2[:objectID] = '4' + @index.replace_all_rules!([rule_1, rule_2]) + + @index.get_rule('4')['objectID'].should eq('4') + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + end + + it 'should not save a query rule with an empty objectID' do + rule = { + :objectID => '', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + + expect { @index.save_rule!(nil, rule) }.to raise_error(ArgumentError) + expect { @index.save_rule!(rule[:objectID], rule) }.to raise_error(ArgumentError) + end + + it "should use request options" do + expect{Algolia.list_indexes}.to_not raise_error + + expect{Algolia.list_indexes('headers' => { 'X-Algolia-API-Key' => 'NotExistentAPIKey' })}.to raise_error(Algolia::AlgoliaProtocolError) + end + + it 'should retrieve the remaining validity time in seconds' do + now = Time.now.to_i + + key = Algolia.generate_secured_api_key('foo', :validUntil => now - (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be < 0 + + key = Algolia.generate_secured_api_key('foo', :validUntil => now + (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be > 0 + + key = Algolia.generate_secured_api_key('foo', []) + expect { Algolia.get_secured_api_key_remaining_validity(key) }.to raise_error(Algolia::ValidUntilNotFoundError) + end + + context 'DNS timeout' do + before(:each) do + @client = Algolia::Client.new :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'], + :hosts => [ + "10.0.0.1", # this will timeout + "#{ENV['ALGOLIA_APPLICATION_ID']}.algolia.net", + "#{ENV['ALGOLIA_APPLICATION_ID']}-1.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-2.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-3.algolianet.com" + ], + :connect_timeout => 5 + @client.destroy # make sure the thread-local vars are reseted + end + + it "should fallback to the 2nd host after a few seconds" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + end + + it "should re-use the working (2nd) host after the 1st one failed" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + start_time = Time.now + @client.list_indexes # re-use the 2nd one + expect(start_time.to_i).to be <= Time.now.to_i + 1 + end + end + + context 'Custom User Agent' do + before(:all) do + WebMock.enable! + end + + before(:each) do + @client = Algolia::Client.new( + :application_id => ENV['ALGOLIA_APPLICATION_ID'], + :api_key => ENV['ALGOLIA_API_KEY'], + :user_agent => 'test agent' + ) + @client.destroy # make sure the thread-local vars are reseted + end + + it "should use a custom user-agent" do + WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/). + to_return(:status => 200, :body => '{}') + @client.list_indexes + expect(WebMock).to have_requested(:get, /https:\/\/.+-dsn.algolia(net\.com|\.net)\/1\/indexes/). + with(:headers => { 'User-Agent' => "Algolia for Ruby (#{::Algolia::VERSION}); Ruby (#{RUBY_VERSION}); test agent" }) + end + + after(:all) do + WebMock.disable! + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb new file mode 100644 index 0000000..8dae58a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +describe 'With a mocked client' do + + before(:all) do + WebMock.enable! + Algolia::WebMock.mock! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should add a simple object" do + index = Algolia::Index.new("friends") + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + index.search('').should == { "hits" => [ { "objectID" => 42 } ], "page" => 1, "hitsPerPage" => 1, "nbHits"=>1, "nbPages"=>1 } # mocked + index.list_api_keys + index.browse + index.clear + index.delete + index.delete_by_query 'test' + end + + after(:all) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb new file mode 100644 index 0000000..e317c1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb @@ -0,0 +1,69 @@ + +if ENV['COVERAGE'] + require 'simplecov' + SimpleCov.start +end + +require 'bundler/setup' + +Bundler.setup :test + +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) + +require 'algoliasearch' +require 'rspec' +require 'webmock/rspec' +require 'algolia/webmock' +require 'time' + +raise 'missing ALGOLIA_APPLICATION_ID or ALGOLIA_API_KEY environment variables' if ENV['ALGOLIA_APPLICATION_ID'].nil? || ENV['ALGOLIA_API_KEY'].nil? +Algolia.init :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'] + +RSpec.configure do |config| + config.mock_with :rspec + + config.before(:suite) do + WebMock.disable! + end + + config.after(:suite) do + WebMock.disable! + end +end + +# avoid concurrent access to the same index +def safe_index_name(name) + return name if ENV['TRAVIS'].to_s != "true" + id = ENV['TRAVIS_JOB_NUMBER'] + "TRAVIS_RUBY_#{name}-#{id}" +end + +# avoid concurrent access to the same index and follows the CTS standards. +def index_name(name) + date = DateTime.now.strftime('%Y-%m-%d_%H:%M:%S') + + instance = ENV['TRAVIS'].to_s == 'true' ? ENV['TRAVIS_JOB_NUMBER'] : 'unknown' + + 'ruby_%s_%s_%s' % [date, instance, name] +end + +def auto_retry(options = {}) + return if !block_given? + + max_retry = options[:max_retry] || 10 + retry_count = 0 + + loop do + begin + return yield + rescue => e + retry_count += 1 + if retry_count >= max_retry + raise e + else + sleep retry_count + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb new file mode 100644 index 0000000..9c92efa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +require 'webmock' + +describe 'With a rate limited client' do + + before(:each) do + WebMock.enable! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should pass the right headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.enable_rate_limit_forward ENV['ALGOLIA_API_KEY'], "1.2.3.4", "ratelimitapikey" + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + + it "should use original headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'] }). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 2 }", :headers => {}) + Algolia.disable_rate_limit_forward + index = Algolia::Index.new("friends") + index.search('bar')['fakeAttribute'].should == 2 + end + + it "should pass the right headers in the scope" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.with_rate_limits "1.2.3.4", "ratelimitapikey" do + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + end + + after(:each) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.gitignore b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.gitignore new file mode 100644 index 0000000..b04a8c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.gitignore @@ -0,0 +1,11 @@ +/.bundle/ +/.yardoc +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ + +# rspec failure tracking +.rspec_status diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rspec b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rspec new file mode 100644 index 0000000..34c5164 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rspec @@ -0,0 +1,3 @@ +--format documentation +--color +--require spec_helper diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop.yml new file mode 100644 index 0000000..3ffe2b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop.yml @@ -0,0 +1,2 @@ +inherit_from: .rubocop_todo.yml + diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop_todo.yml new file mode 100644 index 0000000..826a7a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.rubocop_todo.yml @@ -0,0 +1,32 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2018-02-02 08:32:23 -0800 using RuboCop version 0.52.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec +Gemspec/RequiredRubyVersion: + Exclude: + - 'atomos.gemspec' + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 14 + +# Offense count: 1 +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + - 'lib/atomos.rb' + +# Offense count: 7 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 97 diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.travis.yml new file mode 100644 index 0000000..6a8e36f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/.travis.yml @@ -0,0 +1,5 @@ +sudo: false +language: ruby +rvm: + - 2.5.0 +before_install: gem install bundler -v 1.16.1 diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..3399e24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at segiddins@squareup.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile new file mode 100644 index 0000000..2d1d7e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Specify your gem's dependencies in atomos.gemspec +gemspec diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile.lock new file mode 100644 index 0000000..edfb2e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Gemfile.lock @@ -0,0 +1,51 @@ +PATH + remote: . + specs: + atomos (0.1.3) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.3.0) + diff-lcs (1.3) + parallel (1.12.1) + parser (2.4.0.2) + ast (~> 2.3) + powerpack (0.1.1) + rainbow (3.0.0) + rake (10.5.0) + rspec (3.7.0) + rspec-core (~> 3.7.0) + rspec-expectations (~> 3.7.0) + rspec-mocks (~> 3.7.0) + rspec-core (3.7.1) + rspec-support (~> 3.7.0) + rspec-expectations (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-mocks (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-support (3.7.0) + rubocop (0.52.1) + parallel (~> 1.10) + parser (>= 2.4.0.2, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 4.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.9.0) + unicode-display_width (1.3.0) + +PLATFORMS + ruby + +DEPENDENCIES + atomos! + bundler (~> 1.16) + rake (~> 10.0) + rspec (~> 3.0) + rubocop + +BUNDLED WITH + 1.16.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/LICENSE.txt new file mode 100644 index 0000000..7a54c62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Samuel Giddins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/README.md b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/README.md new file mode 100644 index 0000000..de832a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/README.md @@ -0,0 +1,43 @@ +# Atomos + +Welcome to your new gem! In this directory, you'll find the files you need to be able to package up your Ruby library into a gem. Put your Ruby code in the file `lib/atomos`. To experiment with that code, run `bin/console` for an interactive prompt. + +TODO: Delete this and the text above, and describe your gem + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'atomos' +``` + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install atomos + +## Usage + +TODO: Write usage instructions here + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/atomos. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. + +## License + +The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). + +## Code of Conduct + +Everyone interacting in the Atomos project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/[USERNAME]/atomos/blob/master/CODE_OF_CONDUCT.md). diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Rakefile b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Rakefile new file mode 100644 index 0000000..8ce173e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/Rakefile @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require 'bundler/gem_tasks' + +require 'rspec/core/rake_task' +require 'rubocop/rake_task' + +RSpec::Core::RakeTask.new +RuboCop::RakeTask.new + +task default: %i[rubocop spec] diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/VERSION b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/VERSION new file mode 100644 index 0000000..b1e80bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/VERSION @@ -0,0 +1 @@ +0.1.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/atomos.gemspec b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/atomos.gemspec new file mode 100644 index 0000000..7ad4922 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/atomos.gemspec @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +Gem::Specification.new do |spec| + spec.name = 'atomos' + spec.version = File.read(File.expand_path('../VERSION', __FILE__)) + spec.authors = ['Samuel Giddins'] + spec.email = ['segiddins@segiddins.me'] + + spec.summary = 'A simple gem to atomically write files' + spec.homepage = 'https://github.com/segiddins/atomos' + spec.license = 'MIT' + + spec.files = `git ls-files -z`.split("\x0").reject do |f| + f.match(%r{^(test|spec|features)/}) + end + spec.bindir = 'exe' + spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } + spec.require_paths = ['lib'] + + spec.required_ruby_version = '>= 2.0' + + spec.add_development_dependency 'bundler', '~> 1.16' + spec.add_development_dependency 'rake', '~> 10.0' + spec.add_development_dependency 'rspec', '~> 3.0' + spec.add_development_dependency 'rubocop' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/console b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/console new file mode 100755 index 0000000..535613d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'bundler/setup' +require 'atomos' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require 'irb' +IRB.start(__FILE__) diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rake b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rake new file mode 100755 index 0000000..8226b57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rake @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rake' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rake', 'rake') diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rspec b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rspec new file mode 100755 index 0000000..d086973 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rspec @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rspec' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rspec-core', 'rspec') diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rubocop b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rubocop new file mode 100755 index 0000000..8424d87 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/rubocop @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rubocop' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rubocop', 'rubocop') diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/setup b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/setup new file mode 100755 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos.rb b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos.rb new file mode 100644 index 0000000..4b56d05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +require 'atomos/version' + +module Atomos + module_function + + # rubocop:disable Metrics/MethodLength + def atomic_write(dest, contents = nil, tmpdir: nil, &block) + unless contents.nil? ^ block.nil? + raise ArgumentError, 'must provide either contents or a block' + end + + tmpdir = Atomos.default_tmpdir_for_file(dest, tmpdir) + + require 'tempfile' + Tempfile.open(".atomos.#{File.basename(dest)}", tmpdir) do |tmpfile| + if contents + tmpfile << contents + else + retval = yield tmpfile + end + + tmpfile.close + + File.rename(tmpfile.path, dest) + + retval + end + end + # rubocop:enable Metrics/MethodLength + + def self.default_tmpdir_for_file(dest, tmpdir) + tmpdir ||= begin + require 'tmpdir' + Dir.tmpdir + end + + # Ensure the destination is on the same device as tmpdir + if File.stat(tmpdir).dev != File.stat(File.dirname(dest)).dev + # If not, use the directory of the destination as the tmpdir. + tmpdir = File.dirname(dest) + end + + tmpdir + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos/version.rb b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos/version.rb new file mode 100644 index 0000000..f52f703 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/atomos-0.1.3/lib/atomos/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Atomos + VERSION = File.read(File.expand_path('../../../VERSION', __FILE__)) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.gitignore b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.kick b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.kick new file mode 100644 index 0000000..0686cce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.kick @@ -0,0 +1,30 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bundle exec bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/[^/]*/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) +ignore(/^tmp/) + diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop.yml new file mode 100644 index 0000000..e3dc640 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_cocoapods.yml new file mode 100644 index 0000000..2da3d10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_cocoapods.yml @@ -0,0 +1,133 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_todo.yml new file mode 100644 index 0000000..e5a31c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.rubocop_todo.yml @@ -0,0 +1,70 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2016-03-09 18:40:14 -0600 using RuboCop version 0.38.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 3 +Lint/IneffectiveAccessModifier: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Lint/UnneededDisable: + Exclude: + - 'spec/command/banner_spec.rb' + +# Offense count: 1 +Performance/FixedSize: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Performance/StringReplacement: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: prefer_alias, prefer_alias_method +Style/Alias: + Exclude: + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: SingleLineConditionsOnly. +Style/ConditionalAssignment: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +Style/IfInsideElse: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 9 +# Cop supports --auto-correct. +Style/MutableConstant: + Exclude: + - 'lib/claide/ansi.rb' + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/claide/command/argument_suggester.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/UnneededInterpolation: + Exclude: + - 'lib/claide/command/argument_suggester.rb' diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.travis.yml new file mode 100644 index 0000000..562ab72 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.travis.yml @@ -0,0 +1,27 @@ +language: ruby + +dist: trusty + +addons: + code_climate: + repo_token: 46c8b29dd6711f35704e7c5a541486cbbf2cff8b2df8ce755bfc09917d3c1cbb +branches: + only: + - master + - /.+-stable$/ +rvm: + - 2.0.0-p647 + - 2.1.10 + - 2.2.9 + - 2.3.8 + - 2.4.5 + - 2.5.3 + - 2.6.2 +bundler_args: --without development +before_install: + # There is a bug in travis. When using system ruby, bundler is not + # installed and causes the default install action to fail. + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem install "bundler:~> 1.15.0"; else gem install "bundler:~> 1.15.0"; fi + # RubyGems 2.0.14 isn't a fun time on 2.0.0p648 + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem update --system; fi +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.yardopts b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.yardopts new file mode 100644 index 0000000..a647564 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/.yardopts @@ -0,0 +1 @@ +--markup markdown --protected --charset=utf-8 lib diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/CHANGELOG.md new file mode 100644 index 0000000..8a63e0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/CHANGELOG.md @@ -0,0 +1,254 @@ +# CLAide Changelog + +## 1.0.3 (2019-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly handle `--help` flags when using `argv.remainder!` after initialization + [Eric Amorde](https://github.com/amorde), + [tripleCC](https://github.com/tripleCC) + [#87](https://github.com/CocoaPods/CLAide/pull/87) + + +## 1.0.2 (2017-06-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Avoid a method redefinition warning when requiring `claide`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.1 (2016-10-10) + +##### Bug Fixes + +* Adds a fix for older versions of Rubygems when CLAide crashes. + [Samuel Giddins](https://github.com/segiddins) + [#73](https://github.com/CocoaPods/CLAide/issues/73) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix circular require of `claide/ansi` in `claide/ansi/string_escaper`. + [bootstraponline](https://github.com/bootstraponline) + [#66](https://github.com/CocoaPods/CLAide/issues/66) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Enhancements + +* Added `Command.option` to easily add a single option to a command class. + [Samuel Giddins](https://github.com/segiddins) + [#64](https://github.com/CocoaPods/CLAide/issues/64) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-03-08) + +##### Bug Fixes + +* Attempt to get the terminal width without shelling out to `tput`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* The plugin manager will now properly activate plugin gems, ensuring all of + their files are requirable. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.1 (2015-07-05) + +##### Bug Fixes + +* Fix a regression when contradictory flags were given in `ARGV` -- the last + flag given will once again be the value returned, and all entries for that key + are removed. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.0 (2015-07-02) + +##### Enhancements + +* Properly parse everything in `ARGV` after `--` as an argument. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/CLAide/issues/48) + +* Allow parsing an option that occurs multiple times. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.2 (2015-06-27) + +##### Enhancements + +* Add `ARGV#remainder!`, which returns all the remaining arguments, deleting + them from the receiver. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.1 (2015-02-25) + +###### Bug Fixes + +* Silence errors while loading plugins. + [Clément Beffa](https://github.com/cl3m) + [#44](https://github.com/CocoaPods/CLAide/issues/44) + + +## 0.8.0 (2014-12-25) + +###### Breaking + +* Removes the `ShellCompletionHelper` along with completion script for ZSH. This is out of the scope of CLAide. + [Eloy Durán](https://github.com/alloy) + [#43](https://github.com/CocoaPods/CLAide/issues/43) + +* Various refactoring replacing “Helper” API’s which specialised classes such as ArgumentSuggester, TextWrapper and PluginManager. + [Eloy Durán](https://github.com/alloy) + +###### Enhancements + +* Added convenience method to invoke commands more easily. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/40) + +* Changes to the PluginManager to handle multiple plugin prefixes, which by default adds the `clad` plugin prefix. + [Eloy Durán](https://github.com/alloy) + +## 0.7.0 (2014-09-11) + +###### Breaking + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +###### Enhancements + +* Improved messages for exceptions generated by plugins. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +* Use the Argument class to describe arguments. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* Support for argument alternatives and repeatable arguments (ellipsis). + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* No stack trace if --help and --vebose are combined. + [Marius Rackwitz](https://github.com/mrackwitz) + [#36](https://github.com/CocoaPods/CLAide/issues/36) + + +## 0.6.1 (2014-05-20) + +###### Bug Fixes + +* Respect the ANSI flag for the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + +* Underline the colon of the titles of the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.6.0 (2014-05-19) + +###### Enhancements + +* Use an array to describe arguments. + [Fabio Pelosin][fabiopelosin] + [#26](https://github.com/CocoaPods/CLAide/issues/26) + +* Improved layout and contents of help banner + [Fabio Pelosin](https://github.com/fabiopelosin) + [#25](https://github.com/CocoaPods/CLAide/pull/25) + +* Colorize option, arguments, and example commands in the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#12](https://github.com/CocoaPods/CLAide/issues/12) + +* Add support for ANSI escape sequences. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#17](https://github.com/CocoaPods/CLAide/issues/17) + [#20](https://github.com/CocoaPods/CLAide/pull/20) + [#24](https://github.com/CocoaPods/CLAide/pull/24) + +* Add support for completion script + [Fabio Pelosin](https://github.com/fabiopelosin) + [#19](https://github.com/CocoaPods/CLAide/pull/19) + +* Add support for version logic via the introduction of the `version` class + attribute to the `CLAide::Commmand` class. If a value for the attribute is + specified the `--version` flag is added. The `--version --verbose` flags + include the version of the plugins in the output. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#13](https://github.com/CocoaPods/CLAide/issues/13) + [#14](https://github.com/CocoaPods/CLAide/issues/14) + +## 0.5.0 (2014-03-26) + +###### Enhancements + +* Add a `ignore_in_command_lookup` option to commands, which makes it possible + to have anonymous command classes that are or only meant to provide common + functionality, but are otherwise completely ignored during parsing, command + lookup, and help banner printing. + [Eloy Durán](https://github.com/alloy) + +* Deprecate the `color` option in favor of `ansi`. This is more abstract and + can be used for commands that only prettify output by using, for instance, + the bold ANSI code. This applies to the `CLAide` APIs as well. + [Eloy Durán](https://github.com/alloy) + +* Add more hooks that allow the user to customize how to prettify output. + [Eloy Durán](https://github.com/alloy) + +* Word wrap option descriptions to terminal width. + [Eloy Durán](https://github.com/alloy) + [#6](https://github.com/CocoaPods/CLAide/issues/6) + + +## 0.4.0 (2013-11-14) + +###### Enhancements + +* Added support for plugins. + [Les Hill](https://github.com/leshill) + [#1](https://github.com/CocoaPods/CLAide/pull/1) diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile new file mode 100644 index 0000000..6cc443e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile @@ -0,0 +1,23 @@ +source 'https://rubygems.org' + +gemspec + +gem 'rake' + +group :development do + gem 'kicker' + gem 'colored' # for examples +end + +group :spec do + gem 'bacon' + gem 'json', '< 2' + gem 'mocha-on-bacon' + gem 'prettybacon' + + install_if RUBY_VERSION >= '1.9.3' do + gem 'rubocop' + gem 'codeclimate-test-reporter', :require => nil + gem 'simplecov' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile.lock new file mode 100644 index 0000000..e8a9c08 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Gemfile.lock @@ -0,0 +1,74 @@ +PATH + remote: . + specs: + claide (1.0.3) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.2.0) + bacon (1.2.0) + codeclimate-test-reporter (0.4.1) + simplecov (>= 0.7.1, < 1.0.0) + colored (1.2) + docile (1.1.5) + ffi (1.9.6) + json (1.8.6) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + multi_json (1.10.1) + notify (0.5.2) + parser (2.3.0.6) + ast (~> 2.2) + powerpack (0.1.1) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (2.1.0) + rake (10.3.2) + rb-fsevent (0.9.4) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.3) + ffi (>= 0.5.0) + rubocop (0.38.0) + parser (>= 2.3.0.6, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.7.5) + simplecov (0.9.1) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + unicode-display_width (1.0.1) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + claide! + codeclimate-test-reporter + colored + json (< 2) + kicker + mocha-on-bacon + prettybacon + rake + rubocop + simplecov + +BUNDLED WITH + 1.14.6 diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/LICENSE b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/README.markdown b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/README.markdown new file mode 100644 index 0000000..5ffd237 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/README.markdown @@ -0,0 +1,118 @@ +# Hi, I’m Claide, your command-line tool aide. + +[![Build Status](https://img.shields.io/travis/CocoaPods/CLAide/master.svg?style=flat)](https://travis-ci.org/CocoaPods/CLAide) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/CLAide.svg?style=flat)](https://codeclimate.com/github/CocoaPods/CLAide) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/CLAide.svg?style=flat)](https://codeclimate.com/github/CocoaPods/CLAide) + +I was born out of a need for a _simple_ option and command parser, while still +providing an API that allows you to quickly create a full featured command-line +interface. + +## Install + +``` +$ [sudo] gem install claide +``` + + +## Usage + +For full documentation, on the API of CLAide, visit [rubydoc.info][docs]. + + +### Argument handling + +At its core, a library, such as myself, needs to parse the parameters specified +by the user. + +Working with parameters is done through the `CLAide::ARGV` class. It takes an +array of parameters and parses them as either flags, options, or arguments. + +| Parameter | Description | +| :---: | :---: | +| `--milk`, `--no-milk` | A boolean ‘flag’, which may be negated. | +| `--sweetner=honey` | A ‘option’ consists of a key, a ‘=’, and a value. | +| `tea` | A ‘argument’ is just a value. | + + +Accessing flags, options, and arguments, with the following methods, will also +remove the parameter from the remaining unprocessed parameters. + +```ruby +argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) +argv.shift_argument # => 'tea' +argv.shift_argument # => nil +argv.flag?('milk') # => false +argv.flag?('milk') # => nil +argv.option('sweetner') # => 'honey' +argv.option('sweetner') # => nil +``` + + +In case the requested flag or option is not present, `nil` is returned. You can +specify a default value to be used as the optional second method parameter: + +```ruby +argv = CLAide::ARGV.new(['tea']) +argv.flag?('milk', true) # => true +argv.option('sweetner', 'sugar') # => 'sugar' +``` + + +Unlike flags and options, accessing all of the arguments can be done in either +a preserving or mutating way: + +```ruby +argv = CLAide::ARGV.new(['tea', 'coffee']) +argv.arguments # => ['tea', 'coffee'] +argv.arguments! # => ['tea', 'coffee'] +argv.arguments # => [] +``` + + +### Command handling + +Commands are actions that a tool can perform. Every command is represented by +its own command class. + +Commands may be nested, in which case they inherit from the ‘super command’ +class. Some of these nested commands may not actually perform any work +themselves, but are rather used as ‘super commands’ _only_, in which case they +are ‘abtract commands’. + +Running commands is typically done through the `CLAide::Command.run(argv)` +method, which performs the following three steps: + +1. Parses the given parameters, finds the command class matching the parameters, + and instantiates it with the remaining parameters. It’s each nested command + class’ responsibility to remove the parameters it handles from the remaining + parameters, _before_ calling the `super` implementation. + +2. Asks the command instance to validate its parameters, but only _after_ + calling the `super` implementation. The `super` implementation will show a + help banner in case the `--help` flag is specified, not all parameters were + removed from the parameter list, or the command is an abstract command. + +3. Calls the `run` method on the command instance, where it may do its work. + +4. Catches _any_ uncaught exception and shows it to user in a meaningful way. + * A `Help` exception triggers a help banner to be shown for the command. + * A exception that includes the `InformativeError` module will show _only_ + the message, unless disabled with the `--verbose` flag; and in red, + depending on the color configuration. + * Any other type of exception will be passed to `Command.report_error(error)` + for custom error reporting (such as the one in [CocoaPods][report-error]). + +In case you want to call commands from _inside_ other commands, you should use +the `CLAide::Command.parse(argv)` method to retrieve an instance of the command +and call `run` on it. Unless you are using user-supplied parameters, there +should not be a need to validate the parameters. + +See the [example][example] for a illustration of how to define commands. + + +[travis]: https://secure.travis-ci.org/CocoaPods/CLAide +[travis-status]: https://secure.travis-ci.org/CocoaPods/CLAide.png +[docs]: http://www.rubydoc.info/github/CocoaPods/CLAide/index +[example]: https://github.com/CocoaPods/CLAide/blob/master/examples/make.rb +[report-error]: https://github.com/CocoaPods/CocoaPods/blob/054fe5c861d932219ec40a91c0439a7cfc3a420c/lib/cocoapods/command.rb#L36 diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Rakefile b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Rakefile new file mode 100644 index 0000000..6aba28a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/Rakefile @@ -0,0 +1,57 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Run specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + #-- Rubocop ----------------------------------------------------------------# + + desc 'Check code against RuboCop rules' + task :rubocop do + sh 'bundle exec rubocop' + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/claide.gemspec b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/claide.gemspec new file mode 100644 index 0000000..f40343c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/claide.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +$:.unshift File.expand_path('../lib', __FILE__) +require 'claide' + +Gem::Specification.new do |s| + s.name = "claide" + s.version = CLAide::VERSION + s.license = "MIT" + s.email = ["eloy.de.enige@gmail.com", "fabiopelosin@gmail.com"] + s.homepage = "https://github.com/CocoaPods/CLAide" + s.authors = ["Eloy Duran", "Fabio Pelosin"] + + s.summary = "A small command-line interface framework." + + s.files = `git ls-files -z`.split("\0").reject { |f| f =~ /\A(spec|examples)/i } + + ## Make sure you can build the gem on older versions of RubyGems too: + s.rubygems_version = "1.6.2" + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.specification_version = 3 if s.respond_to? :specification_version +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide.rb new file mode 100644 index 0000000..fd56e7c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide.rb @@ -0,0 +1,19 @@ +# encoding: utf-8 + +# The mods of interest are {CLAide::ARGV}, {CLAide::Command}, and +# {CLAide::InformativeError} +# +module CLAide + # @return [String] + # + # CLAide’s version, following [semver](http://semver.org). + # + VERSION = '1.0.3'.freeze + + require 'claide/ansi' + require 'claide/argument' + require 'claide/argv' + require 'claide/command' + require 'claide/help' + require 'claide/informative_error' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi.rb new file mode 100644 index 0000000..0839ed6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi.rb @@ -0,0 +1,126 @@ +# encoding: utf-8 + +require 'claide/ansi/cursor' +require 'claide/ansi/graphics' + +module CLAide + # Provides support for ANSI Escape sequences + # + # For more information see: + # + # - http://ascii-table.com/ansi-escape-sequences.php + # - http://en.wikipedia.org/wiki/ANSI_escape_code + # + # This functionality has been inspired and derived from the following gems: + # + # - colored + # - colorize + # + class ANSI + extend Cursor + extend Graphics + + class << self + # @return [Bool] Wether the string mixin should be disabled to return the + # original string. This method is intended to offer a central location + # where to disable ANSI logic without needed to implement conditionals + # across the code base of clients. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # ANSI.disabled = true + # "example".ansi.yellow #=> "example" + # + attr_accessor :disabled + end + + # @return [Hash{Symbol => Fixnum}] The text attributes codes by their + # English name. + # + TEXT_ATTRIBUTES = { + :bold => 1, + :underline => 4, + :blink => 5, + :reverse => 7, + :hidden => 8, + } + + # @return [Hash{Symbol => Fixnum}] The codes to disable a text attribute by + # their name. + # + TEXT_DISABLE_ATTRIBUTES = { + :bold => 21, + :underline => 24, + :blink => 25, + :reverse => 27, + :hidden => 28, + } + + # Return [String] The escape sequence to reset the graphics. + # + RESET_SEQUENCE = "\e[0m" + + # @return [Hash{Symbol => Fixnum}] The colors codes by their English name. + # + COLORS = { + :black => 0, + :red => 1, + :green => 2, + :yellow => 3, + :blue => 4, + :magenta => 5, + :cyan => 6, + :white => 7, + } + + # Return [String] The escape sequence for the default foreground color. + # + DEFAULT_FOREGROUND_COLOR = "\e[39m" + + # Return [String] The escape sequence for the default background color. + # + DEFAULT_BACKGROUND_COLOR = "\e[49m" + + # @return [Fixnum] The code of a key given the map. + # + # @param [Symbol] key + # The key for which the code is needed. + # + # @param [Hash{Symbol => Fixnum}] map + # A hash which associates each code to each key. + # + # @raise If the key is not provided. + # @raise If the key is not present in the map. + # + def self.code_for_key(key, map) + unless key + raise ArgumentError, 'A key must be provided' + end + code = map[key] + unless code + raise ArgumentError, "Unsupported key: `#{key}`" + end + code + end + end +end + +#-- String mixin -------------------------------------------------------------# + +require 'claide/ansi/string_escaper' + +class String + # @return [StringEscaper] An object which provides convenience methods to + # wrap the receiver in ANSI sequences. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # "example".ansi.on_red #=> "\e[41mexample\e[49m" + # "example".ansi.bold #=> "\e[1mexample\e[21m" + # + def ansi + CLAide::ANSI::StringEscaper.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb new file mode 100644 index 0000000..acfd5b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/cursor.rb @@ -0,0 +1,69 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the position + # of the cursor and to erase parts of text. + # + module Cursor + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] line + # The line where to place the cursor. + # + # @param [Fixnum] column + # The column where to place the cursor. + # + def self.set_cursor_position(line = 0, column = 0) + "\e[#{line};#{column}H" + end + + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] lines + # The amount of lines the cursor should be moved to. + # Negative values indicate up direction and positive ones + # down direction. + # + # @param [Fixnum] columns + # The amount of columns the cursor should be moved to. + # Negative values indicate left direction and positive ones + # right direction. + # + def self.move_cursor(lines, columns = 0) + lines_code = lines < 0 ? 'A' : 'B' + columns_code = columns > 0 ? 'C' : 'D' + "\e[#{lines.abs}#{lines_code};#{columns.abs}#{columns_code}" + end + + # @return [String] The escape sequence to save the cursor position. + # + def self.save_cursor_position + "\e[s" + end + + # @return [String] The escape sequence to restore the cursor to the + # previously saved position. This sequence also clears all the + # output after the position. + # + def self.restore_cursor_position + "\e[u" + end + + # @return [String] The escape sequence to erase the display. + # + def self.erase_display + "\e[2J" + end + + # @return [String] The escape sequence to erase a line form the + # cursor position to then end. + # + def self.erase_line + "\e[K" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb new file mode 100644 index 0000000..e5c2d15 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/graphics.rb @@ -0,0 +1,72 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the graphic + # mode. + # + module Graphics + # @return [String] The escape sequence for a text attribute. + # + # @param [Symbol] key + # The name of the text attribute. + # + def self.text_attribute(key) + code = ANSI.code_for_key(key, TEXT_ATTRIBUTES) + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color. + # + # @param [Symbol] key + # The name of the color. + # + def self.foreground_color(key) + code = ANSI.code_for_key(key, COLORS) + 30 + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color. + # + # @param [Symbol] key + # The name of the color. + # + def self.background_color(key) + code = ANSI.code_for_key(key, COLORS) + 40 + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.foreground_color_256(color) + code = [38, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.background_color_256(color) + code = [48, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a single or a list of codes. + # + # @param [Fixnum, Array] codes + # The code(s). + # + def self.graphics_mode(codes) + codes = Array(codes) + "\e[#{codes.join(';')}m" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb new file mode 100644 index 0000000..b6f461c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/ansi/string_escaper.rb @@ -0,0 +1,79 @@ +module CLAide + class ANSI + # Provides support to wrap strings in ANSI sequences according to the + # `ANSI.disabled` setting. + # + class StringEscaper < String + # @param [String] string The string to wrap. + # + def initialize(string) + super + end + + # @return [StringEscaper] Wraps a string in the given ANSI sequences, + # taking care of handling existing sequences for the same + # family of attributes (i.e. attributes terminated by the + # same sequence). + # + def wrap_in_ansi_sequence(open, close) + if ANSI.disabled + self + else + gsub!(close, open) + insert(0, open).insert(-1, close) + end + end + + # @return [StringEscaper] + # + # @param [Array] keys + # One or more keys corresponding to ANSI codes to apply to the + # string. + # + def apply(*keys) + keys.flatten.each do |key| + send(key) + end + self + end + + ANSI::COLORS.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each foreground color (e.g. #blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.foreground_color(key) + close = ANSI::DEFAULT_FOREGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each background color (e.g. #on_blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method "on_#{key}" do + open = Graphics.background_color(key) + close = ANSI::DEFAULT_BACKGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + end + + ANSI::TEXT_ATTRIBUTES.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each text attribute (e.g. #bold). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.text_attribute(key) + close_code = TEXT_DISABLE_ATTRIBUTES[key] + close = Graphics.graphics_mode(close_code) + wrap_in_ansi_sequence(open, close) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argument.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argument.rb new file mode 100644 index 0000000..4d54f29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argument.rb @@ -0,0 +1,62 @@ +# encoding: utf-8 + +module CLAide + # This class is used to represent individual arguments to present to + # the command help banner + # + class Argument + # The string used for ellipsis / repeatable arguments in the banner + # + ELLIPSIS = '...' + + # @return [Array] + # List of alternate names for the parameters + attr_reader :names + + # @return [Boolean] + # Indicates if the argument is required (not optional) + # + attr_accessor :required + alias_method :required?, :required + + # @return [Boolean] + # Indicates if the argument is repeatable (= can appear multiple + # times in the command, which is indicated by '...' in the banner) + # + attr_accessor :repeatable + alias_method :repeatable?, :repeatable + + # @param [String,Array] names + # List of the names of each parameter alternatives. + # For convenience, if there is only one alternative for that + # parameter, we can use a String instead of a 1-item Array + # + # @param [Boolean] required + # true if the parameter is required, false if it is optional + # + # @param [Boolean] repeatable + # If true, the argument can appear multiple times in the command. + # In that case, an ellipsis will be appended after the argument + # in the help banner. + # + # @example + # + # # A required parameter that can be either a NAME or URL + # Argument.new(%(NAME URL), true) + # + def initialize(names, required, repeatable = false) + @names = Array(names) + @required = required + @repeatable = repeatable + end + + # @return [Boolean] true on equality + # + # @param [Argument] other the Argument compared against + # + def ==(other) + other.is_a?(Argument) && + names == other.names && required == other.required + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argv.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argv.rb new file mode 100644 index 0000000..35374f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/argv.rb @@ -0,0 +1,329 @@ +# encoding: utf-8 + +module CLAide + # This class is responsible for parsing the parameters specified by the user, + # accessing individual parameters, and keep state by removing handled + # parameters. + # + class ARGV + # @return [ARGV] Coerces an object to the ARGV class if needed. + # + # @param [Object] argv + # The object which should be converted to the ARGV class. + # + def self.coerce(argv) + if argv.is_a?(ARGV) + argv + else + ARGV.new(argv) + end + end + + # @param [Array<#to_s>] argv + # A list of parameters. + # + def initialize(argv) + @entries = Parser.parse(argv) + end + + # @return [Boolean] Whether or not there are any remaining unhandled + # parameters. + # + def empty? + @entries.empty? + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format a user specifies it in. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder # => ['--no-milk', '--sweetner=honey'] + # + def remainder + @entries.map do |type, (key, value)| + case type + when :arg + key + when :flag + "--#{'no-' if value == false}#{key}" + when :option + "--#{key}=#{value}" + end + end + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format the user specified them. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder! # => ['--no-milk', '--sweetner=honey'] + # argv.remainder # => [] + # + def remainder! + remainder.tap { @entries.clear } + end + + # @return [Hash] A hash that consists of the remaining flags and options + # and their values. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.options # => { 'milk' => false, 'sweetner' => 'honey' } + # + def options + options = {} + @entries.each do |type, (key, value)| + options[key] = value unless type == :arg + end + options + end + + # @return [Array] A list of the remaining arguments. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white', 'biscuit'] + # + def arguments + @entries.map { |type, value| value if type == :arg }.compact + end + + # @return [Array] A list of the remaining arguments. + # + # @note This version also removes the arguments from the remaining + # parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.arguments # => ['tea', 'white', 'biscuit'] + # argv.arguments! # => ['tea', 'white', 'biscuit'] + # argv.arguments # => [] + # + def arguments! + arguments = [] + while arg = shift_argument + arguments << arg + end + arguments + end + + # @return [String] The first argument in the remaining parameters. + # + # @note This will remove the argument from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white'] + # + def shift_argument + if index = @entries.find_index { |type, _| type == :arg } + entry = @entries[index] + @entries.delete_at(index) + entry.last + end + end + + # @return [Boolean, nil] Returns `true` if the flag by the specified `name` + # is among the remaining parameters and is not negated. + # + # @param [String] name + # The name of the flag to look for among the remaining parameters. + # + # @param [Boolean] default + # The value that is returned in case the flag is not among the + # remaining parameters. + # + # @note This will remove the flag from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.flag?('milk') # => false + # argv.flag?('milk') # => nil + # argv.flag?('milk', true) # => true + # argv.remainder # => ['tea', '--sweetner=honey'] + # + def flag?(name, default = nil) + delete_entry(:flag, name, default, true) + end + + # @return [String, nil] Returns the value of the option by the specified + # `name` is among the remaining parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @param [String] default + # The value that is returned in case the option is not among the + # remaining parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetner=honey']) + # argv.option('sweetner') # => 'honey' + # argv.option('sweetner') # => nil + # argv.option('sweetner', 'sugar') # => 'sugar' + # argv.remainder # => ['tea', '--no-milk'] + # + def option(name, default = nil) + delete_entry(:option, name, default) + end + + # @return [Array] Returns an array of all the values of the option + # with the specified `name` among the remaining + # parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['--ignore=foo', '--ignore=bar']) + # argv.all_options('include') # => [] + # argv.all_options('ignore') # => ['bar', 'foo'] + # argv.remainder # => [] + # + def all_options(name) + options = [] + while entry = option(name) + options << entry + end + options + end + + private + + # @return [Array>] A list of tuples for each + # non consumed parameter, where the first entry is the `type` and + # the second entry the actual parsed parameter. + # + attr_reader :entries + + # @return [Bool, String, Nil] Removes an entry from the entries list and + # returns its value or the default value if the entry was not + # present. + # + # @param [Symbol] requested_type + # The type of the entry. + # + # @param [String] requested_key + # The key of the entry. + # + # @param [Bool, String, Nil] default + # The value which should be returned if the entry is not present. + # + # @param [Bool] delete_all + # Whether all values matching `requested_type` and `requested_key` + # should be deleted. + # + def delete_entry(requested_type, requested_key, default, delete_all = false) + pred = proc do |type, (key, _value)| + requested_key == key && requested_type == type + end + entry = entries.reverse_each.find(&pred) + delete_all ? entries.delete_if(&pred) : entries.delete(entry) + + entry.nil? ? default : entry.last.last + end + + module Parser + # @return [Array>] A list of tuples for each + # parameter, where the first entry is the `type` and the second + # entry the actual parsed parameter. + # + # @example + # + # list = parse(['tea', '--no-milk', '--sweetner=honey']) + # list # => [[:arg, "tea"], + # [:flag, ["milk", false]], + # [:option, ["sweetner", "honey"]]] + # + def self.parse(argv) + entries = [] + copy = argv.map(&:to_s) + double_dash = false + while argument = copy.shift + next if !double_dash && double_dash = (argument == '--') + type = double_dash ? :arg : argument_type(argument) + parsed_argument = parse_argument(type, argument) + entries << [type, parsed_argument] + end + entries + end + + # @return [Symbol] Returns the type of an argument. The types can be + # either: `:arg`, `:flag`, `:option`. + # + # @param [String] argument + # The argument to check. + # + def self.argument_type(argument) + if argument.start_with?('--') + if argument.include?('=') + :option + else + :flag + end + else + :arg + end + end + + # @return [String, Array] Returns the argument itself for + # normal arguments (like commands) and a tuple with the key and + # the value for options and flags. + # + # @param [Symbol] type + # The type of the argument. + # + # @param [String] argument + # The argument to check. + # + def self.parse_argument(type, argument) + case type + when :arg + return argument + when :flag + return parse_flag(argument) + when :option + return argument[2..-1].split('=', 2) + end + end + + # @return [String, Array] Returns the parameter + # describing a flag arguments. + # + # @param [String] argument + # The flag argument to check. + # + def self.parse_flag(argument) + if argument.start_with?('--no-') + key = argument[5..-1] + value = false + else + key = argument[2..-1] + value = true + end + [key, value] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command.rb new file mode 100644 index 0000000..6414b5c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command.rb @@ -0,0 +1,669 @@ +# encoding: utf-8 + +require 'claide/command/banner' +require 'claide/command/plugin_manager' +require 'claide/command/argument_suggester' + +module CLAide + # This class is used to build a command-line interface + # + # Each command is represented by a subclass of this class, which may be + # nested to create more granular commands. + # + # Following is an overview of the types of commands and what they should do. + # + # ### Any command type + # + # * Inherit from the command class under which the command should be nested. + # * Set {Command.summary} to a brief description of the command. + # * Override {Command.options} to return the options it handles and their + # descriptions and prepending them to the results of calling `super`. + # * Override {Command#initialize} if it handles any parameters. + # * Override {Command#validate!} to check if the required parameters the + # command handles are valid, or call {Command#help!} in case they’re not. + # + # ### Abstract command + # + # The following is needed for an abstract command: + # + # * Set {Command.abstract_command} to `true`. + # * Subclass the command. + # + # When the optional {Command.description} is specified, it will be shown at + # the top of the command’s help banner. + # + # ### Normal command + # + # The following is needed for a normal command: + # + # * Set {Command.arguments} to the description of the arguments this command + # handles. + # * Override {Command#run} to perform the actual work. + # + # When the optional {Command.description} is specified, it will be shown + # underneath the usage section of the command’s help banner. Otherwise this + # defaults to {Command.summary}. + # + class Command + class << self + # @return [Boolean] Indicates whether or not this command can actually + # perform work of itself, or that it only contains subcommands. + # + attr_accessor :abstract_command + alias_method :abstract_command?, :abstract_command + + # @return [Boolean] Indicates whether or not this command is used during + # command parsing and whether or not it should be shown in the + # help banner or to show its subcommands instead. + # + # Setting this to `true` implies it’s an abstract command. + # + attr_reader :ignore_in_command_lookup + alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup + def ignore_in_command_lookup=(flag) + @ignore_in_command_lookup = self.abstract_command = flag + end + + # @return [String] The subcommand which an abstract command should invoke + # by default. + # + attr_accessor :default_subcommand + + # @return [String] A brief description of the command, which is shown + # next to the command in the help banner of a parent command. + # + attr_accessor :summary + + # @return [String] A longer description of the command, which is shown + # underneath the usage section of the command’s help banner. Any + # indentation in this value will be ignored. + # + attr_accessor :description + + # @return [Array] The prefixes used to search for CLAide plugins. + # Plugins are loaded via their `_plugin.rb` file. + # Defaults to search for `claide` plugins. + # + def plugin_prefixes + @plugin_prefixes ||= ['claide'] + end + attr_writer :plugin_prefixes + + # @return [Array] + # A list of arguments the command handles. This is shown + # in the usage section of the command’s help banner. + # Each Argument in the array represents an argument by its name + # (or list of alternatives) and whether it's required or optional + # + def arguments + @arguments ||= [] + end + + # @param [Array] arguments + # An array listing the command arguments. + # Each Argument object describe the argument by its name + # (or list of alternatives) and whether it's required or optional + # + # @todo Remove deprecation + # + def arguments=(arguments) + if arguments.is_a?(Array) + if arguments.empty? || arguments[0].is_a?(Argument) + @arguments = arguments + else + self.arguments_array = arguments + end + else + self.arguments_string = arguments + end + end + + # @return [Boolean] The default value for {Command#ansi_output}. This + # defaults to `true` if `STDOUT` is connected to a TTY and + # `String` has the instance methods `#red`, `#green`, and + # `#yellow` (which are defined by, for instance, the + # [colored](https://github.com/defunkt/colored) gem). + # + def ansi_output + if @ansi_output.nil? + @ansi_output = STDOUT.tty? + end + @ansi_output + end + attr_writer :ansi_output + alias_method :ansi_output?, :ansi_output + + # @return [String] The name of the command. Defaults to a snake-cased + # version of the class’ name. + # + def command + @command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part| + part.downcase << '-' + end[0..-2] + end + attr_writer :command + + # @return [String] The version of the command. This value will be printed + # by the `--version` flag if used for the root command. + # + attr_accessor :version + end + + #-------------------------------------------------------------------------# + + # @return [String] The full command up-to this command, as it would be + # looked up during parsing. + # + # @note (see #ignore_in_command_lookup) + # + # @example + # + # BevarageMaker::Tea.full_command # => "beverage-maker tea" + # + def self.full_command + if superclass == Command + ignore_in_command_lookup? ? '' : command + else + if ignore_in_command_lookup? + superclass.full_command + else + "#{superclass.full_command} #{command}" + end + end + end + + # @return [Bool] Whether this is the root command class + # + def self.root_command? + superclass == CLAide::Command + end + + # @return [Array] A list of all command classes that are nested + # under this command. + # + def self.subcommands + @subcommands ||= [] + end + + # @return [Array] A list of command classes that are nested under + # this command _or_ the subcommands of those command classes in + # case the command class should be ignored in command lookup. + # + def self.subcommands_for_command_lookup + subcommands.map do |subcommand| + if subcommand.ignore_in_command_lookup? + subcommand.subcommands_for_command_lookup + else + subcommand + end + end.flatten + end + + # Searches the list of subcommands that should not be ignored for command + # lookup for a subcommand with the given `name`. + # + # @param [String] name + # The name of the subcommand to be found. + # + # @return [CLAide::Command, nil] The subcommand, if found. + # + def self.find_subcommand(name) + subcommands_for_command_lookup.find { |sc| sc.command == name } + end + + # @visibility private + # + # Automatically registers a subclass as a subcommand. + # + def self.inherited(subcommand) + subcommands << subcommand + end + + DEFAULT_ROOT_OPTIONS = [ + ['--version', 'Show the version of the tool'], + ] + + DEFAULT_OPTIONS = [ + ['--verbose', 'Show more debugging information'], + ['--no-ansi', 'Show output without ANSI codes'], + ['--help', 'Show help banner of specified command'], + ] + + # Should be overridden by a subclass if it handles any options. + # + # The subclass has to combine the result of calling `super` and its own + # list of options. The recommended way of doing this is by concatenating + # to this classes’ own options. + # + # @return [Array] + # + # A list of option name and description tuples. + # + # @example + # + # def self.options + # [ + # ['--verbose', 'Print more info'], + # ['--help', 'Print help banner'], + # ].concat(super) + # end + # + def self.options + if root_command? + DEFAULT_ROOT_OPTIONS + DEFAULT_OPTIONS + else + DEFAULT_OPTIONS + end + end + + # Adds a new option for the current command. + # + # This method can be used in conjunction with overriding `options`. + # + # @return [void] + # + # @example + # + # option '--help', 'Print help banner ' + # + def self.option(name, description) + mod = Module.new do + define_method(:options) do + [ + [name, description], + ].concat(super()) + end + end + extend(mod) + end + private_class_method :option + + # Handles root commands options if appropriate. + # + # @param [ARGV] argv + # The parameters of the command. + # + # @return [Bool] Whether any root command option was handled. + # + def handle_root_options(argv) + return false unless self.class.root_command? + if argv.flag?('version') + print_version + return true + end + false + end + + # Prints the version of the command optionally including plugins. + # + def print_version + puts self.class.version + if verbose? + PluginManager.specifications.each do |spec| + puts "#{spec.name}: #{spec.version}" + end + end + end + + # Instantiates the command class matching the parameters through + # {Command.parse}, validates it through {Command#validate!}, and runs it + # through {Command#run}. + # + # @note The ANSI support is configured before running a command to allow + # the same process to run multiple commands with different + # settings. For example a process with ANSI output enabled might + # want to programmatically invoke another command with the output + # enabled. + # + # @param [Array, ARGV] argv + # A list of parameters. For instance, the standard `ARGV` constant, + # which contains the parameters passed to the program. + # + # @return [void] + # + def self.run(argv = []) + plugin_prefixes.each do |plugin_prefix| + PluginManager.load_plugins(plugin_prefix) + end + + argv = ARGV.coerce(argv) + command = parse(argv) + ANSI.disabled = !command.ansi_output? + unless command.handle_root_options(argv) + command.validate! + command.run + end + rescue Object => exception + handle_exception(command, exception) + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] An instance of the command class that was matched by + # going through the arguments in the parameters and drilling down + # command classes. + # + def self.parse(argv) + argv = ARGV.coerce(argv) + cmd = argv.arguments.first + if cmd && subcommand = find_subcommand(cmd) + argv.shift_argument + subcommand.parse(argv) + elsif abstract_command? && default_subcommand + load_default_subcommand(argv) + else + new(argv) + end + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] Returns the default subcommand initialized with the + # given arguments. + # + def self.load_default_subcommand(argv) + unless subcommand = find_subcommand(default_subcommand) + raise 'Unable to find the default subcommand ' \ + "`#{default_subcommand}` for command `#{self}`." + end + result = subcommand.parse(argv) + result.invoked_as_default = true + result + end + + # Presents an exception to the user in a short manner in case of an + # `InformativeError` or in long form in other cases, + # + # @param [Command, nil] command + # The command from where the exception originated. + # + # @param [Object] exception + # The exception to present. + # + # @return [void] + # + def self.handle_exception(command, exception) + if exception.is_a?(InformativeError) + puts exception.message + if command.nil? || command.verbose? + puts + puts(*exception.backtrace) + end + exit exception.exit_status + else + report_error(exception) + end + end + + # Allows the application to perform custom error reporting, by overriding + # this method. + # + # @param [Exception] exception + # + # An exception that occurred while running a command through + # {Command.run}. + # + # @raise + # + # By default re-raises the specified exception. + # + # @return [void] + # + def self.report_error(exception) + plugins = PluginManager.plugins_involved_in_exception(exception) + unless plugins.empty? + puts '[!] The exception involves the following plugins:' \ + "\n - #{plugins.join("\n - ")}\n".ansi.yellow + end + raise exception + end + + # @visibility private + # + # @param [String] error_message + # The error message to show to the user. + # + # @param [Class] help_class + # The class to use to raise a ‘help’ error. + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def self.help!(error_message = nil, help_class = Help) + raise help_class.new(banner, error_message) + end + + # @visibility private + # + # Returns the banner for the command. + # + # @param [Class] banner_class + # The class to use to format help banners. + # + # @return [String] The banner for the command. + # + def self.banner(banner_class = Banner) + banner_class.new(self).formatted_banner + end + + # @visibility private + # + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def self.banner! + puts banner + exit 0 + end + + #-------------------------------------------------------------------------# + + # Set to `true` if the user specifies the `--verbose` option. + # + # @note + # + # If you want to make use of this value for your own configuration, you + # should check the value _after_ calling the `super` {Command#initialize} + # implementation. + # + # @return [Boolean] + # + # Wether or not backtraces should be included when presenting the user an + # exception that includes the {InformativeError} module. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # Set to `true` if {Command.ansi_output} returns `true` and the user + # did **not** specify the `--no-ansi` option. + # + # @note (see #verbose) + # + # @return [Boolean] + # + # Whether or not to use ANSI codes to prettify output. For instance, by + # default {InformativeError} exception messages will be colored red and + # subcommands in help banners green. + # + attr_accessor :ansi_output + alias_method :ansi_output?, :ansi_output + + # Set to `true` if initialized with a `--help` flag + # + # @return [Boolean] + # + # Whether the command was initialized with argv containing --help + # + attr_accessor :help_arg + alias_method :help?, :help_arg + + # Subclasses should override this method to remove the arguments/options + # they support from `argv` _before_ calling `super`. + # + # The `super` implementation sets the {#verbose} attribute based on whether + # or not the `--verbose` option is specified; and the {#ansi_output} + # attribute to `false` if {Command.ansi_output} returns `true`, but the + # user specified the `--no-ansi` option. + # + # @param [ARGV, Array] argv + # + # A list of (user-supplied) params that should be handled. + # + def initialize(argv) + argv = ARGV.coerce(argv) + @verbose = argv.flag?('verbose') + @ansi_output = argv.flag?('ansi', Command.ansi_output?) + @argv = argv + @help_arg = argv.flag?('help') + end + + # Convenience method. + # Instantiate the command and run it with the provided arguments at once. + # + # @note This method validate! the command before running it, but contrary to + # CLAide::Command::run, it does not load plugins nor exit on failure. + # It is up to the caller to rescue any possible exception raised. + # + # @param [String..., Array] args + # The arguments to initialize the command with + # + # @raise [Help] If validate! fails + # + def self.invoke(*args) + command = new(ARGV.new(args.flatten)) + command.validate! + command.run + end + + # @return [Bool] Whether the command was invoked by an abstract command by + # default. + # + attr_accessor :invoked_as_default + alias_method :invoked_as_default?, :invoked_as_default + + # Raises a Help exception if the `--help` option is specified, if `argv` + # still contains remaining arguments/options by the time it reaches this + # implementation, or when called on an ‘abstract command’. + # + # Subclasses should call `super` _before_ doing their own validation. This + # way when the user specifies the `--help` flag a help banner is shown, + # instead of possible actual validation errors. + # + # @raise [Help] + # + # @return [void] + # + def validate! + banner! if help? + unless @argv.empty? + argument = @argv.remainder.first + help! ArgumentSuggester.new(argument, self.class).suggestion + end + help! if self.class.abstract_command? + end + + # This method should be overridden by the command class to perform its + # work. + # + # @return [void] + # + def run + raise 'A subclass should override the `CLAide::Command#run` method to ' \ + 'actually perform some work.' + end + + protected + + # Returns the class of the invoked command + # + # @return [Command] + # + def invoked_command_class + if invoked_as_default? + self.class.superclass + else + self.class + end + end + + # @param [String] error_message + # A custom optional error message + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def help!(error_message = nil) + invoked_command_class.help!(error_message) + end + + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def banner! + invoked_command_class.banner! + end + + #-------------------------------------------------------------------------# + + # Handle deprecated form of self.arguments as an + # Array> like in: + # + # self.arguments = [ ['NAME', :required], ['QUERY', :optional] ] + # + # @todo Remove deprecated format support + # + def self.arguments_array=(arguments) + warn '[!] The signature of CLAide#arguments has changed. ' \ + "Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow + @arguments = arguments.map do |(name_str, type)| + names = name_str.split('|') + required = (type == :required) + Argument.new(names, required) + end + end + + # Handle deprecated form of self.arguments as a String, like in: + # + # self.arguments = 'NAME [QUERY]' + # + # @todo Remove deprecated format support + # + def self.arguments_string=(arguments) + warn '[!] The specification of arguments as a string has been' \ + " deprecated #{self}: `#{arguments}`".ansi.yellow + @arguments = arguments.split(' ').map do |argument| + if argument.start_with?('[') + Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false) + else + Argument.new(argument.split('|'), true) + end + end + end + + # Handle depracted form of assigning a plugin prefix. + # + # @todo Remove deprecated form. + # + def self.plugin_prefix=(prefix) + warn '[!] The specification of a singular plugin prefix has been ' \ + "deprecated. Use `#{self}::plugin_prefixes` instead." + plugin_prefixes << prefix + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb new file mode 100644 index 0000000..a13575c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/argument_suggester.rb @@ -0,0 +1,99 @@ +# encoding: utf-8 + +module CLAide + class Command + class ArgumentSuggester + # @param [String] argument + # The unrecognized argument for which to make a suggestion. + # + # @param [Class] command_class + # The class of the command which encountered the unrecognized + # arguments. + # + def initialize(argument, command_class) + @argument, @command_class = argument, command_class + @argument_type = ARGV::Parser.argument_type(@argument) + end + + # @return [Array] The list of the valid arguments for a command + # according to the type of the argument. + # + def possibilities + case @argument_type + when :option, :flag + @command_class.options.map(&:first) + when :arg + @command_class.subcommands_for_command_lookup.map(&:command) + end + end + + # @return [String] Returns a suggested argument from `possibilities` based + # on the `levenshtein_distance` score. + # + def suggested_argument + possibilities.sort_by do |element| + self.class.levenshtein_distance(@argument, element) + end.first + end + + # @return [String] Returns a message including a suggestion for the given + # suggestion. + # + def suggestion + argument_description = @argument_type == :arg ? 'command' : 'option' + if suggestion = suggested_argument + pretty_suggestion = self.class.prettify_suggestion(suggestion, + @argument_type) + "Unknown #{argument_description}: `#{@argument}`\n" \ + "Did you mean: #{pretty_suggestion}?" + else + "Unknown #{argument_description}: `#{@argument}`" + end + end + + # Prettifies the given validation suggestion according to the type. + # + # @param [String] suggestion + # The suggestion to prettify. + # + # @param [Type] argument_type + # The type of the suggestion: either `:command` or `:option`. + # + # @return [String] A handsome suggestion. + # + def self.prettify_suggestion(suggestion, argument_type) + case argument_type + when :option, :flag + suggestion = suggestion.to_s + suggestion.ansi.blue + when :arg + suggestion.ansi.green + end + end + + # Returns the Levenshtein distance between the given strings. + # From: http://rosettacode.org/wiki/Levenshtein_distance#Ruby + # + # @param [String] a + # The first string to compare. + # + # @param [String] b + # The second string to compare. + # + # @return [Fixnum] The distance between the strings. + def self.levenshtein_distance(a, b) + a, b = a.downcase, b.downcase + costs = Array(0..b.length) + (1..a.length).each do |i| + costs[0], nw = i, i - 1 + (1..b.length).each do |j| + costs[j], nw = [ + costs[j] + 1, costs[j - 1] + 1, a[i - 1] == b[j - 1] ? nw : nw + 1 + ].min, costs[j] + end + end + costs[b.length] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/banner.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/banner.rb new file mode 100644 index 0000000..d87c699 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/banner.rb @@ -0,0 +1,307 @@ +# encoding: utf-8 + +module CLAide + class Command + # Creates the formatted banner to present as help of the provided command + # class. + # + class Banner + # @return [Class] The command for which the banner should be created. + # + attr_accessor :command + + # @param [Class] command @see command + # + def initialize(command) + @command = command + end + + # @return [String] The banner for the command. + # + def formatted_banner + sections = [ + ['Usage', formatted_usage_description], + ['Commands', formatted_subcommand_summaries], + ['Options', formatted_options_description], + ] + banner = sections.map do |(title, body)| + [prettify_title("#{title}:"), body] unless body.empty? + end.compact.join("\n\n") + banner + end + + private + + # @!group Banner sections + #-----------------------------------------------------------------------# + + # @return [String] The indentation of the text. + # + TEXT_INDENT = 6 + + # @return [Fixnum] The maximum width of the text. + # + MAX_WIDTH = TEXT_INDENT + 80 + + # @return [Fixnum] The minimum between a name and its description. + # + DESCRIPTION_SPACES = 3 + + # @return [Fixnum] The minimum between a name and its description. + # + SUBCOMMAND_BULLET_SIZE = 2 + + # @return [String] The section describing the usage of the command. + # + def formatted_usage_description + message = command.description || command.summary || '' + message = TextWrapper.wrap_formatted_text(message, + TEXT_INDENT, + MAX_WIDTH) + message = prettify_message(command, message) + "#{signature}\n\n#{message}" + end + + # @return [String] The signature of the command. + # + def signature + full_command = command.full_command + sub_command = signature_sub_command + arguments = signature_arguments + result = prettify_signature(full_command, sub_command, arguments) + result.insert(0, '$ ') + result.insert(0, ' ' * (TEXT_INDENT - '$ '.size)) + end + + # @return [String] The subcommand indicator of the signature. + # + def signature_sub_command + return '[COMMAND]' if command.default_subcommand + return 'COMMAND' if command.subcommands.any? + end + + # @return [String] The arguments of the signature. + # + def signature_arguments + command.arguments.map do |arg| + names = arg.names.join('|') + names.concat(' ' + Argument::ELLIPSIS) if arg.repeatable? + arg.required? ? names : "[#{names}]" + end.join(' ') + end + + # @return [String] The section describing the subcommands of the command. + # + # @note The plus sign emphasizes the that the subcommands are added to + # the command. The square brackets conveys a sense of direction + # and indicates the gravitational force towards the default + # command. + # + def formatted_subcommand_summaries + subcommands = subcommands_for_banner + subcommands.map do |subcommand| + name = subcommand.command + bullet = (name == command.default_subcommand) ? '>' : '+' + name = "#{bullet} #{name}" + pretty_name = prettify_subcommand(name) + entry_description(pretty_name, subcommand.summary, name.size) + end.join("\n") + end + + # @return [String] The section describing the options of the command. + # + def formatted_options_description + options = command.options + options.map do |name, description| + pretty_name = prettify_option_name(name) + entry_description(pretty_name, description, name.size) + end.join("\n") + end + + # @return [String] The line describing a single entry (subcommand or + # option). + # + def entry_description(name, description, name_width) + max_name_width = compute_max_name_width + desc_start = max_name_width + (TEXT_INDENT - 2) + DESCRIPTION_SPACES + result = ' ' * (TEXT_INDENT - 2) + result << name + result << ' ' * DESCRIPTION_SPACES + result << ' ' * (max_name_width - name_width) + result << TextWrapper.wrap_with_indent(description, + desc_start, + MAX_WIDTH) + end + + # @!group Overrides + #-----------------------------------------------------------------------# + + # @return [String] A decorated title. + # + def prettify_title(title) + title.ansi.underline + end + + # @return [String] A decorated textual representation of the subcommand + # name. + # + def prettify_subcommand(name) + name.chomp.ansi.green + end + + # @return [String] A decorated textual representation of the option name. + # + # + def prettify_option_name(name) + name.chomp.ansi.blue + end + + # @return [String] A decorated textual representation of the command. + # + def prettify_signature(command, subcommand, argument) + components = [ + [command, :green], + [subcommand, :green], + [argument, :magenta], + ] + components.reduce('') do |memo, (string, ansi_key)| + next memo if !string || string.empty? + memo << ' ' << string.ansi.apply(ansi_key) + end.lstrip + end + + # @return [String] A decorated command description. + # + def prettify_message(command, message) + message = message.dup + command.arguments.each do |arg| + arg.names.each do |name| + message.gsub!("`#{name.gsub(/\.{3}$/, '')}`", '\0'.ansi.magenta) + end + end + command.options.each do |(name, _description)| + message.gsub!("`#{name}`", '\0'.ansi.blue) + end + message + end + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # @return [Array] The list of the subcommands to use in the + # banner. + # + def subcommands_for_banner + command.subcommands_for_command_lookup.reject do |subcommand| + subcommand.summary.nil? + end.sort_by(&:command) + end + + # @return [Fixnum] The width of the largest command name or of the + # largest option name. Used to align all the descriptions. + # + def compute_max_name_width + widths = [] + widths << command.options.map { |option| option.first.size } + widths << subcommands_for_banner.map do |cmd| + cmd.command.size + SUBCOMMAND_BULLET_SIZE + end.max + widths.flatten.compact.max || 1 + end + + module TextWrapper + # @return [String] Wraps a formatted string (e.g. markdown) by stripping + # heredoc indentation and wrapping by word to the terminal width + # taking into account a maximum one, and indenting the string. + # Code lines (i.e. indented by four spaces) are not wrapped. + # + # @param [String] string + # The string to format. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_formatted_text(string, indent = 0, max_width = 80) + paragraphs = strip_heredoc(string).split("\n\n") + paragraphs = paragraphs.map do |paragraph| + if paragraph.start_with?(' ' * 4) + paragraph.gsub!(/\n/, "\n#{' ' * indent}") + else + paragraph = wrap_with_indent(paragraph, indent, max_width) + end + paragraph.insert(0, ' ' * indent).rstrip + end + paragraphs.join("\n\n") + end + + # @return [String] Wraps a string to the terminal width taking into + # account the given indentation. + # + # @param [String] string + # The string to indent. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_with_indent(string, indent = 0, max_width = 80) + if terminal_width == 0 + width = max_width + else + width = [terminal_width, max_width].min + end + + full_line = string.gsub("\n", ' ') + available_width = width - indent + space = ' ' * indent + word_wrap(full_line, available_width).split("\n").join("\n#{space}") + end + + # @return [String] Lifted straight from ActionView. Thanks guys! + # + def self.word_wrap(line, line_width) + line.gsub(/(.{1,#{line_width}})(\s+|$)/, "\\1\n").strip + end + + # @return [String] Lifted straight from ActiveSupport. Thanks guys! + # + def self.strip_heredoc(string) + if min = string.scan(/^[ \t]*(?=\S)/).min + string.gsub(/^[ \t]{#{min.size}}/, '') + else + string + end + end + + # @!group Private helpers + #---------------------------------------------------------------------# + + # @return [Fixnum] The width of the current terminal unless being piped. + # + def self.terminal_width + @terminal_width ||= + (!ENV['CLAIDE_DISABLE_AUTO_WRAP'] && + STDOUT.tty? && + calculate_terminal_width) || 0 + end + + def self.calculate_terminal_width + require 'io/console' + STDOUT.winsize.last + rescue LoadError + (system('which tput > /dev/null 2>&1') && `tput cols`.to_i) || 0 + rescue + 0 + end + private_class_method :calculate_terminal_width + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb new file mode 100644 index 0000000..fd16047 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/command/plugin_manager.rb @@ -0,0 +1,123 @@ +# encoding: utf-8 + +module CLAide + class Command + # Handles plugin related logic logic for the `Command` class. + # + # Plugins are loaded the first time a command run and are identified by the + # prefix specified in the command class. Plugins must adopt the following + # conventions: + # + # - Support being loaded by a file located under the + # `lib/#{plugin_prefix}_plugin` relative path. + # - Be stored in a folder named after the plugin. + # + class PluginManager + # @return [Hash] The loaded plugins, + # grouped by plugin prefix. + # + def self.loaded_plugins + @loaded_plugins ||= {} + end + + # @return [Array] Loads plugins via RubyGems looking + # for files named after the `PLUGIN_PREFIX_plugin` and returns the + # specifications of the gems loaded successfully. + # Plugins are required safely. + # + def self.load_plugins(plugin_prefix) + loaded_plugins[plugin_prefix] ||= + plugin_gems_for_prefix(plugin_prefix).map do |spec, paths| + spec if safe_activate_and_require(spec, paths) + end.compact + end + + # @return [Array] The RubyGems specifications for the + # loaded plugins. + # + def self.specifications + loaded_plugins.values.flatten.uniq + end + + # @return [Array] The RubyGems specifications for the + # installed plugins that match the given `plugin_prefix`. + # + def self.installed_specifications_for_prefix(plugin_prefix) + loaded_plugins[plugin_prefix] || + plugin_gems_for_prefix(plugin_prefix).map(&:first) + end + + # @return [Array] The list of the plugins whose root path appears + # in the backtrace of an exception. + # + # @param [Exception] exception + # The exception to analyze. + # + def self.plugins_involved_in_exception(exception) + specifications.select do |gemspec| + exception.backtrace.any? do |line| + full_require_paths_for(gemspec).any? do |plugin_path| + line.include?(plugin_path) + end + end + end.map(&:name) + end + + # @group Helper Methods + + # @return [Array<[Gem::Specification, Array]>] + # Returns an array of tuples containing the specifications and + # plugin files to require for a given plugin prefix. + # + def self.plugin_gems_for_prefix(prefix) + glob = "#{prefix}_plugin#{Gem.suffix_pattern}" + Gem::Specification.latest_specs(true).map do |spec| + matches = spec.matches_for_glob(glob) + [spec, matches] unless matches.empty? + end.compact + end + + # Activates the given spec and requires the given paths. + # If any exception occurs it is caught and an + # informative message is printed. + # + # @param [Gem::Specification] spec + # The spec to be activated. + # + # @param [String] paths + # The paths to require. + # + # @return [Bool] Whether activation and requiring succeeded. + # + def self.safe_activate_and_require(spec, paths) + spec.activate + paths.each { |path| require(path) } + true + rescue Exception => exception # rubocop:disable RescueException + message = "\n---------------------------------------------" + message << "\nError loading the plugin `#{spec.full_name}`.\n" + message << "\n#{exception.class} - #{exception.message}" + message << "\n#{exception.backtrace.join("\n")}" + message << "\n---------------------------------------------\n" + warn message.ansi.yellow + false + end + + def self.full_require_paths_for(gemspec) + if gemspec.respond_to?(:full_require_paths) + return gemspec.full_require_paths + end + + # RubyGems < 2.2 + gemspec.require_paths.map do |require_path| + if require_path.include?(gemspec.full_gem_path) + require_path + else + File.join(gemspec.full_gem_path, require_path) + end + end + end + private_class_method :full_require_paths_for + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/help.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/help.rb new file mode 100644 index 0000000..7d3b183 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/help.rb @@ -0,0 +1,58 @@ +# encoding: utf-8 + +module CLAide + require 'claide/informative_error' + + # The exception class that is raised to indicate a help banner should be + # shown while running {Command.run}. + # + class Help < StandardError + include InformativeError + + # @return [String] The banner containing the usage instructions of the + # command to show in the help. + # + attr_reader :banner + + # @return [String] An optional error message that will be shown before the + # help banner. + # + attr_reader :error_message + + # @param [String] banner @see banner + # @param [String] error_message @see error_message + # + # @note If an error message is provided, the exit status, used to + # terminate the program with, will be set to `1`, otherwise a {Help} + # exception is treated as not being a real error and exits with `0`. + # + def initialize(banner, error_message = nil) + @banner = banner + @error_message = error_message + @exit_status = @error_message.nil? ? 0 : 1 + end + + # @return [String] The optional error message, colored in red if + # {Command.ansi_output} is set to `true`. + # + def formatted_error_message + if error_message + message = "[!] #{error_message}" + prettify_error_message(message) + end + end + + # @return [String] + # + def prettify_error_message(message) + message.ansi.red + end + + # @return [String] The optional error message, combined with the help + # banner of the command. + # + def message + [formatted_error_message, banner].compact.join("\n\n") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/informative_error.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/informative_error.rb new file mode 100644 index 0000000..8a92bd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.0.3/lib/claide/informative_error.rb @@ -0,0 +1,21 @@ +# encoding: utf-8 + +module CLAide + # Including this module into an exception class will ensure that when raised, + # while running {Command.run}, only the message of the exception will be + # shown to the user. Unless disabled with the `--verbose` flag. + # + # In addition, the message will be colored red, if {Command.ansi_output} + # is set to `true`. + # + module InformativeError + # @return [Numeric] The exist status code that should be used to terminate + # the program with. Defaults to `1`. + # + attr_writer :exit_status + + def exit_status + @exit_status ||= 1 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.github/workflows/ci.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.github/workflows/ci.yml new file mode 100644 index 0000000..54e1f05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.github/workflows/ci.yml @@ -0,0 +1,45 @@ +name: ci + +on: + pull_request: + + push: + branches: + - master + - '*-stable' + +jobs: + ci: + name: Ruby ${{ matrix.ruby.name }} + + runs-on: ubuntu-20.04 + + strategy: + fail-fast: false + + matrix: + ruby: + - { name: "2.3", value: 2.3.8 } + - { name: "2.4", value: 2.4.10 } + - { name: "2.5", value: 2.5.9 } + - { name: "2.6", value: 2.6.9 } + - { name: "2.7", value: 2.7.5 } + - { name: "3.0", value: 3.0.3 } + - { name: "3.1", value: 3.1.0 } + + steps: + - uses: actions/checkout@v2 + + - name: Setup ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby.value }} + bundler-cache: true + + - name: Run Test + run: bundle exec rake spec + + - name: Test & publish code coverage + uses: paambaati/codeclimate-action@v3.0.0 + env: + CC_TEST_REPORTER_ID: 46c8b29dd6711f35704e7c5a541486cbbf2cff8b2df8ce755bfc09917d3c1cbb diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.kick b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.kick new file mode 100644 index 0000000..0686cce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.kick @@ -0,0 +1,30 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bundle exec bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/[^/]*/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) +ignore(/^tmp/) + diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop.yml new file mode 100644 index 0000000..3ef9f69 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop.yml @@ -0,0 +1,6 @@ +require: + - rubocop-performance + +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..4702a3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_cocoapods.yml @@ -0,0 +1,151 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Style/Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +Style/SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +Lint/AssignmentInCondition: + Enabled: false + +# Allow backticks +Style/AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +Style/IfUnlessModifier: + Enabled: false + +# No enforced convention here. +Style/SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Style/Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInArrayLiteral: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInHashLiteral: + EnforcedStyleForMultiline: comma + +Layout/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +Style/GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Style/Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +Style/HashSyntax: + EnforcedStyle: hash_rockets + +Style/Lambda: + Enabled: false + +Layout/DotPosition: + EnforcedStyle: trailing + +Style/EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +Lint/AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Lint/Void: + Exclude: + - spec/**/* + +Style/ClassAndModuleChildren: + Exclude: + - spec/**/* + +Lint/UselessComparison: + Exclude: + - spec/**/* + +Lint/RaiseException: + Enabled: false + +Lint/StructNewOverride: + Enabled: false + +Style/HashEachMethods: + Enabled: false + +Style/HashTransformKeys: + Enabled: false + +Style/HashTransformValues: + Enabled: false diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_todo.yml new file mode 100644 index 0000000..926b32c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.rubocop_todo.yml @@ -0,0 +1,70 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2016-03-09 18:40:14 -0600 using RuboCop version 0.38.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 3 +Lint/IneffectiveAccessModifier: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Lint/RedundantCopDisableDirective: + Exclude: + - 'spec/command/banner_spec.rb' + +# Offense count: 1 +Performance/FixedSize: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Performance/StringReplacement: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: prefer_alias, prefer_alias_method +Style/Alias: + Exclude: + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: SingleLineConditionsOnly. +Style/ConditionalAssignment: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +Style/IfInsideElse: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 9 +# Cop supports --auto-correct. +Style/MutableConstant: + Exclude: + - 'lib/claide/ansi.rb' + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/claide/command/argument_suggester.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/RedundantInterpolation: + Exclude: + - 'lib/claide/command/argument_suggester.rb' diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.yardopts b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.yardopts new file mode 100644 index 0000000..a647564 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/.yardopts @@ -0,0 +1 @@ +--markup markdown --protected --charset=utf-8 lib diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/CHANGELOG.md new file mode 100644 index 0000000..2fd0d62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/CHANGELOG.md @@ -0,0 +1,265 @@ +# CLAide Changelog + +## 1.1.0 (2022-01-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.3 (2019-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly handle `--help` flags when using `argv.remainder!` after initialization + [Eric Amorde](https://github.com/amorde), + [tripleCC](https://github.com/tripleCC) + [#87](https://github.com/CocoaPods/CLAide/pull/87) + + +## 1.0.2 (2017-06-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Avoid a method redefinition warning when requiring `claide`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.1 (2016-10-10) + +##### Bug Fixes + +* Adds a fix for older versions of Rubygems when CLAide crashes. + [Samuel Giddins](https://github.com/segiddins) + [#73](https://github.com/CocoaPods/CLAide/issues/73) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix circular require of `claide/ansi` in `claide/ansi/string_escaper`. + [bootstraponline](https://github.com/bootstraponline) + [#66](https://github.com/CocoaPods/CLAide/issues/66) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Enhancements + +* Added `Command.option` to easily add a single option to a command class. + [Samuel Giddins](https://github.com/segiddins) + [#64](https://github.com/CocoaPods/CLAide/issues/64) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-03-08) + +##### Bug Fixes + +* Attempt to get the terminal width without shelling out to `tput`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* The plugin manager will now properly activate plugin gems, ensuring all of + their files are requirable. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.1 (2015-07-05) + +##### Bug Fixes + +* Fix a regression when contradictory flags were given in `ARGV` -- the last + flag given will once again be the value returned, and all entries for that key + are removed. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.0 (2015-07-02) + +##### Enhancements + +* Properly parse everything in `ARGV` after `--` as an argument. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/CLAide/issues/48) + +* Allow parsing an option that occurs multiple times. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.2 (2015-06-27) + +##### Enhancements + +* Add `ARGV#remainder!`, which returns all the remaining arguments, deleting + them from the receiver. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.1 (2015-02-25) + +###### Bug Fixes + +* Silence errors while loading plugins. + [Clément Beffa](https://github.com/cl3m) + [#44](https://github.com/CocoaPods/CLAide/issues/44) + + +## 0.8.0 (2014-12-25) + +###### Breaking + +* Removes the `ShellCompletionHelper` along with completion script for ZSH. This is out of the scope of CLAide. + [Eloy Durán](https://github.com/alloy) + [#43](https://github.com/CocoaPods/CLAide/issues/43) + +* Various refactoring replacing “Helper” API’s which specialised classes such as ArgumentSuggester, TextWrapper and PluginManager. + [Eloy Durán](https://github.com/alloy) + +###### Enhancements + +* Added convenience method to invoke commands more easily. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/40) + +* Changes to the PluginManager to handle multiple plugin prefixes, which by default adds the `clad` plugin prefix. + [Eloy Durán](https://github.com/alloy) + +## 0.7.0 (2014-09-11) + +###### Breaking + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +###### Enhancements + +* Improved messages for exceptions generated by plugins. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +* Use the Argument class to describe arguments. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* Support for argument alternatives and repeatable arguments (ellipsis). + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* No stack trace if --help and --vebose are combined. + [Marius Rackwitz](https://github.com/mrackwitz) + [#36](https://github.com/CocoaPods/CLAide/issues/36) + + +## 0.6.1 (2014-05-20) + +###### Bug Fixes + +* Respect the ANSI flag for the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + +* Underline the colon of the titles of the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.6.0 (2014-05-19) + +###### Enhancements + +* Use an array to describe arguments. + [Fabio Pelosin][fabiopelosin] + [#26](https://github.com/CocoaPods/CLAide/issues/26) + +* Improved layout and contents of help banner + [Fabio Pelosin](https://github.com/fabiopelosin) + [#25](https://github.com/CocoaPods/CLAide/pull/25) + +* Colorize option, arguments, and example commands in the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#12](https://github.com/CocoaPods/CLAide/issues/12) + +* Add support for ANSI escape sequences. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#17](https://github.com/CocoaPods/CLAide/issues/17) + [#20](https://github.com/CocoaPods/CLAide/pull/20) + [#24](https://github.com/CocoaPods/CLAide/pull/24) + +* Add support for completion script + [Fabio Pelosin](https://github.com/fabiopelosin) + [#19](https://github.com/CocoaPods/CLAide/pull/19) + +* Add support for version logic via the introduction of the `version` class + attribute to the `CLAide::Commmand` class. If a value for the attribute is + specified the `--version` flag is added. The `--version --verbose` flags + include the version of the plugins in the output. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#13](https://github.com/CocoaPods/CLAide/issues/13) + [#14](https://github.com/CocoaPods/CLAide/issues/14) + +## 0.5.0 (2014-03-26) + +###### Enhancements + +* Add a `ignore_in_command_lookup` option to commands, which makes it possible + to have anonymous command classes that are or only meant to provide common + functionality, but are otherwise completely ignored during parsing, command + lookup, and help banner printing. + [Eloy Durán](https://github.com/alloy) + +* Deprecate the `color` option in favor of `ansi`. This is more abstract and + can be used for commands that only prettify output by using, for instance, + the bold ANSI code. This applies to the `CLAide` APIs as well. + [Eloy Durán](https://github.com/alloy) + +* Add more hooks that allow the user to customize how to prettify output. + [Eloy Durán](https://github.com/alloy) + +* Word wrap option descriptions to terminal width. + [Eloy Durán](https://github.com/alloy) + [#6](https://github.com/CocoaPods/CLAide/issues/6) + + +## 0.4.0 (2013-11-14) + +###### Enhancements + +* Added support for plugins. + [Les Hill](https://github.com/leshill) + [#1](https://github.com/CocoaPods/CLAide/pull/1) diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile new file mode 100644 index 0000000..c33e9bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile @@ -0,0 +1,22 @@ +source 'https://rubygems.org' + +gemspec + +gem 'rake' + +group :development do + gem 'kicker' + gem 'colored' # for examples +end + +group :spec do + gem 'bacon' + gem 'json', '< 3' + gem 'mocha-on-bacon' + gem 'prettybacon' + + gem 'parallel', '<= 1.19.2' + gem 'rubocop', '<= 0.81.0' + gem 'rubocop-performance', '<= 1.5.2', :require => nil + gem 'simplecov' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile.lock new file mode 100644 index 0000000..22fe1a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Gemfile.lock @@ -0,0 +1,79 @@ +PATH + remote: . + specs: + claide (1.1.0) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.4.2) + bacon (1.2.0) + colored (1.2) + docile (1.1.5) + ffi (1.14.2) + jaro_winkler (1.5.4) + json (2.5.1) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + multi_json (1.10.1) + notify (0.5.2) + parallel (1.19.2) + parser (3.1.0.0) + ast (~> 2.4.1) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (3.0.0) + rake (10.3.2) + rb-fsevent (0.9.4) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.3) + ffi (>= 0.5.0) + rexml (3.2.5) + rubocop (0.81.0) + jaro_winkler (~> 1.5.1) + parallel (~> 1.10) + parser (>= 2.7.0.1) + rainbow (>= 2.2.2, < 4.0) + rexml + ruby-progressbar (~> 1.7) + unicode-display_width (>= 1.4.0, < 2.0) + rubocop-performance (1.5.2) + rubocop (>= 0.71.0) + ruby-progressbar (1.11.0) + simplecov (0.9.1) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + unicode-display_width (1.8.0) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + claide! + colored + json (< 3) + kicker + mocha-on-bacon + parallel (<= 1.19.2) + prettybacon + rake + rubocop (<= 0.81.0) + rubocop-performance (<= 1.5.2) + simplecov + +BUNDLED WITH + 2.3.4 diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/README.md b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/README.md new file mode 100644 index 0000000..f4ed63d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/README.md @@ -0,0 +1,115 @@ +# Hi, I’m Claide, your command-line tool aide. + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/claide/ci)](https://github.com/CocoaPods/claide/actions) +[![Gem Version](https://img.shields.io/gem/v/claide)](https://rubygems.org/gems/claide) + +I was born out of a need for a _simple_ option and command parser, while still +providing an API that allows you to quickly create a full featured command-line +interface. + +## Install + +``` +$ [sudo] gem install claide +``` + + +## Usage + +For full documentation, on the API of CLAide, visit [rubydoc.info][docs]. + + +### Argument handling + +At its core, a library, such as myself, needs to parse the parameters specified +by the user. + +Working with parameters is done through the `CLAide::ARGV` class. It takes an +array of parameters and parses them as either flags, options, or arguments. + +| Parameter | Description | +| :---: | :---: | +| `--milk`, `--no-milk` | A boolean ‘flag’, which may be negated. | +| `--sweetener=honey` | An ‘option’ consists of a key, a ‘=’, and a value. | +| `tea` | An ‘argument’ is just a value. | + + +Accessing flags, options, and arguments, with the following methods, will also +remove the parameter from the remaining unprocessed parameters. + +```ruby +argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) +argv.shift_argument # => 'tea' +argv.shift_argument # => nil +argv.flag?('milk') # => false +argv.flag?('milk') # => nil +argv.option('sweetener') # => 'honey' +argv.option('sweetener') # => nil +``` + + +In case the requested flag or option is not present, `nil` is returned. You can +specify a default value to be used as the optional second method parameter: + +```ruby +argv = CLAide::ARGV.new(['tea']) +argv.flag?('milk', true) # => true +argv.option('sweetener', 'sugar') # => 'sugar' +``` + + +Unlike flags and options, accessing all of the arguments can be done in either +a preserving or mutating way: + +```ruby +argv = CLAide::ARGV.new(['tea', 'coffee']) +argv.arguments # => ['tea', 'coffee'] +argv.arguments! # => ['tea', 'coffee'] +argv.arguments # => [] +``` + + +### Command handling + +Commands are actions that a tool can perform. Every command is represented by +its own command class. + +Commands may be nested, in which case they inherit from the ‘super command’ +class. Some of these nested commands may not actually perform any work +themselves, but are rather used as ‘super commands’ _only_, in which case they +are ‘abtract commands’. + +Running commands is typically done through the `CLAide::Command.run(argv)` +method, which performs the following three steps: + +1. Parses the given parameters, finds the command class matching the parameters, + and instantiates it with the remaining parameters. It’s each nested command + class’ responsibility to remove the parameters it handles from the remaining + parameters, _before_ calling the `super` implementation. + +2. Asks the command instance to validate its parameters, but only _after_ + calling the `super` implementation. The `super` implementation will show a + help banner in case the `--help` flag is specified, not all parameters were + removed from the parameter list, or the command is an abstract command. + +3. Calls the `run` method on the command instance, where it may do its work. + +4. Catches _any_ uncaught exception and shows it to user in a meaningful way. + * A `Help` exception triggers a help banner to be shown for the command. + * A exception that includes the `InformativeError` module will show _only_ + the message, unless disabled with the `--verbose` flag; and in red, + depending on the color configuration. + * Any other type of exception will be passed to `Command.report_error(error)` + for custom error reporting (such as the one in [CocoaPods][report-error]). + +In case you want to call commands from _inside_ other commands, you should use +the `CLAide::Command.parse(argv)` method to retrieve an instance of the command +and call `run` on it. Unless you are using user-supplied parameters, there +should not be a need to validate the parameters. + +See the [example][example] for a illustration of how to define commands. + + +[docs]: http://www.rubydoc.info/github/CocoaPods/CLAide/index +[example]: https://github.com/CocoaPods/CLAide/blob/master/examples/make.rb +[report-error]: https://github.com/CocoaPods/CocoaPods/blob/054fe5c861d932219ec40a91c0439a7cfc3a420c/lib/cocoapods/command.rb#L36 diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Rakefile new file mode 100644 index 0000000..dc22070 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/Rakefile @@ -0,0 +1,57 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Run specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + Rake::Task['rubocop'].invoke + end + + #-- Rubocop ----------------------------------------------------------------# + + desc 'Check code against RuboCop rules' + task :rubocop do + sh 'bundle exec rubocop' + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/claide.gemspec b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/claide.gemspec new file mode 100644 index 0000000..1f298d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/claide.gemspec @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +$:.unshift File.expand_path('../lib', __FILE__) +require File.expand_path('../lib/claide/gem_version', __FILE__) + +Gem::Specification.new do |s| + s.name = "claide" + s.version = CLAide::VERSION + s.license = "MIT" + s.email = ["eloy.de.enige@gmail.com", "fabiopelosin@gmail.com"] + s.homepage = "https://github.com/CocoaPods/CLAide" + s.authors = ["Eloy Duran", "Fabio Pelosin"] + + s.summary = "A small command-line interface framework." + + s.files = `git ls-files -z`.split("\0").reject { |f| f =~ /\A(spec|examples)/i } + + ## Make sure you can build the gem on older versions of RubyGems too: + s.rubygems_version = "1.6.2" + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.specification_version = 3 if s.respond_to? :specification_version + + s.required_ruby_version = ">= 2.3.0" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide.rb new file mode 100644 index 0000000..25d2c75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide.rb @@ -0,0 +1,13 @@ +# encoding: utf-8 + +# The mods of interest are {CLAide::ARGV}, {CLAide::Command}, and +# {CLAide::InformativeError} +# +module CLAide + require 'claide/ansi' + require 'claide/argument' + require 'claide/argv' + require 'claide/command' + require 'claide/help' + require 'claide/informative_error' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi.rb new file mode 100644 index 0000000..0839ed6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi.rb @@ -0,0 +1,126 @@ +# encoding: utf-8 + +require 'claide/ansi/cursor' +require 'claide/ansi/graphics' + +module CLAide + # Provides support for ANSI Escape sequences + # + # For more information see: + # + # - http://ascii-table.com/ansi-escape-sequences.php + # - http://en.wikipedia.org/wiki/ANSI_escape_code + # + # This functionality has been inspired and derived from the following gems: + # + # - colored + # - colorize + # + class ANSI + extend Cursor + extend Graphics + + class << self + # @return [Bool] Wether the string mixin should be disabled to return the + # original string. This method is intended to offer a central location + # where to disable ANSI logic without needed to implement conditionals + # across the code base of clients. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # ANSI.disabled = true + # "example".ansi.yellow #=> "example" + # + attr_accessor :disabled + end + + # @return [Hash{Symbol => Fixnum}] The text attributes codes by their + # English name. + # + TEXT_ATTRIBUTES = { + :bold => 1, + :underline => 4, + :blink => 5, + :reverse => 7, + :hidden => 8, + } + + # @return [Hash{Symbol => Fixnum}] The codes to disable a text attribute by + # their name. + # + TEXT_DISABLE_ATTRIBUTES = { + :bold => 21, + :underline => 24, + :blink => 25, + :reverse => 27, + :hidden => 28, + } + + # Return [String] The escape sequence to reset the graphics. + # + RESET_SEQUENCE = "\e[0m" + + # @return [Hash{Symbol => Fixnum}] The colors codes by their English name. + # + COLORS = { + :black => 0, + :red => 1, + :green => 2, + :yellow => 3, + :blue => 4, + :magenta => 5, + :cyan => 6, + :white => 7, + } + + # Return [String] The escape sequence for the default foreground color. + # + DEFAULT_FOREGROUND_COLOR = "\e[39m" + + # Return [String] The escape sequence for the default background color. + # + DEFAULT_BACKGROUND_COLOR = "\e[49m" + + # @return [Fixnum] The code of a key given the map. + # + # @param [Symbol] key + # The key for which the code is needed. + # + # @param [Hash{Symbol => Fixnum}] map + # A hash which associates each code to each key. + # + # @raise If the key is not provided. + # @raise If the key is not present in the map. + # + def self.code_for_key(key, map) + unless key + raise ArgumentError, 'A key must be provided' + end + code = map[key] + unless code + raise ArgumentError, "Unsupported key: `#{key}`" + end + code + end + end +end + +#-- String mixin -------------------------------------------------------------# + +require 'claide/ansi/string_escaper' + +class String + # @return [StringEscaper] An object which provides convenience methods to + # wrap the receiver in ANSI sequences. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # "example".ansi.on_red #=> "\e[41mexample\e[49m" + # "example".ansi.bold #=> "\e[1mexample\e[21m" + # + def ansi + CLAide::ANSI::StringEscaper.new(self) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb new file mode 100644 index 0000000..acfd5b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb @@ -0,0 +1,69 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the position + # of the cursor and to erase parts of text. + # + module Cursor + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] line + # The line where to place the cursor. + # + # @param [Fixnum] column + # The column where to place the cursor. + # + def self.set_cursor_position(line = 0, column = 0) + "\e[#{line};#{column}H" + end + + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] lines + # The amount of lines the cursor should be moved to. + # Negative values indicate up direction and positive ones + # down direction. + # + # @param [Fixnum] columns + # The amount of columns the cursor should be moved to. + # Negative values indicate left direction and positive ones + # right direction. + # + def self.move_cursor(lines, columns = 0) + lines_code = lines < 0 ? 'A' : 'B' + columns_code = columns > 0 ? 'C' : 'D' + "\e[#{lines.abs}#{lines_code};#{columns.abs}#{columns_code}" + end + + # @return [String] The escape sequence to save the cursor position. + # + def self.save_cursor_position + "\e[s" + end + + # @return [String] The escape sequence to restore the cursor to the + # previously saved position. This sequence also clears all the + # output after the position. + # + def self.restore_cursor_position + "\e[u" + end + + # @return [String] The escape sequence to erase the display. + # + def self.erase_display + "\e[2J" + end + + # @return [String] The escape sequence to erase a line form the + # cursor position to then end. + # + def self.erase_line + "\e[K" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb new file mode 100644 index 0000000..e5c2d15 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb @@ -0,0 +1,72 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the graphic + # mode. + # + module Graphics + # @return [String] The escape sequence for a text attribute. + # + # @param [Symbol] key + # The name of the text attribute. + # + def self.text_attribute(key) + code = ANSI.code_for_key(key, TEXT_ATTRIBUTES) + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color. + # + # @param [Symbol] key + # The name of the color. + # + def self.foreground_color(key) + code = ANSI.code_for_key(key, COLORS) + 30 + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color. + # + # @param [Symbol] key + # The name of the color. + # + def self.background_color(key) + code = ANSI.code_for_key(key, COLORS) + 40 + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.foreground_color_256(color) + code = [38, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.background_color_256(color) + code = [48, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a single or a list of codes. + # + # @param [Fixnum, Array] codes + # The code(s). + # + def self.graphics_mode(codes) + codes = Array(codes) + "\e[#{codes.join(';')}m" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb new file mode 100644 index 0000000..b6f461c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb @@ -0,0 +1,79 @@ +module CLAide + class ANSI + # Provides support to wrap strings in ANSI sequences according to the + # `ANSI.disabled` setting. + # + class StringEscaper < String + # @param [String] string The string to wrap. + # + def initialize(string) + super + end + + # @return [StringEscaper] Wraps a string in the given ANSI sequences, + # taking care of handling existing sequences for the same + # family of attributes (i.e. attributes terminated by the + # same sequence). + # + def wrap_in_ansi_sequence(open, close) + if ANSI.disabled + self + else + gsub!(close, open) + insert(0, open).insert(-1, close) + end + end + + # @return [StringEscaper] + # + # @param [Array] keys + # One or more keys corresponding to ANSI codes to apply to the + # string. + # + def apply(*keys) + keys.flatten.each do |key| + send(key) + end + self + end + + ANSI::COLORS.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each foreground color (e.g. #blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.foreground_color(key) + close = ANSI::DEFAULT_FOREGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each background color (e.g. #on_blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method "on_#{key}" do + open = Graphics.background_color(key) + close = ANSI::DEFAULT_BACKGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + end + + ANSI::TEXT_ATTRIBUTES.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each text attribute (e.g. #bold). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.text_attribute(key) + close_code = TEXT_DISABLE_ATTRIBUTES[key] + close = Graphics.graphics_mode(close_code) + wrap_in_ansi_sequence(open, close) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argument.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argument.rb new file mode 100644 index 0000000..4d54f29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argument.rb @@ -0,0 +1,62 @@ +# encoding: utf-8 + +module CLAide + # This class is used to represent individual arguments to present to + # the command help banner + # + class Argument + # The string used for ellipsis / repeatable arguments in the banner + # + ELLIPSIS = '...' + + # @return [Array] + # List of alternate names for the parameters + attr_reader :names + + # @return [Boolean] + # Indicates if the argument is required (not optional) + # + attr_accessor :required + alias_method :required?, :required + + # @return [Boolean] + # Indicates if the argument is repeatable (= can appear multiple + # times in the command, which is indicated by '...' in the banner) + # + attr_accessor :repeatable + alias_method :repeatable?, :repeatable + + # @param [String,Array] names + # List of the names of each parameter alternatives. + # For convenience, if there is only one alternative for that + # parameter, we can use a String instead of a 1-item Array + # + # @param [Boolean] required + # true if the parameter is required, false if it is optional + # + # @param [Boolean] repeatable + # If true, the argument can appear multiple times in the command. + # In that case, an ellipsis will be appended after the argument + # in the help banner. + # + # @example + # + # # A required parameter that can be either a NAME or URL + # Argument.new(%(NAME URL), true) + # + def initialize(names, required, repeatable = false) + @names = Array(names) + @required = required + @repeatable = repeatable + end + + # @return [Boolean] true on equality + # + # @param [Argument] other the Argument compared against + # + def ==(other) + other.is_a?(Argument) && + names == other.names && required == other.required + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argv.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argv.rb new file mode 100644 index 0000000..ecadfaf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/argv.rb @@ -0,0 +1,329 @@ +# encoding: utf-8 + +module CLAide + # This class is responsible for parsing the parameters specified by the user, + # accessing individual parameters, and keep state by removing handled + # parameters. + # + class ARGV + # @return [ARGV] Coerces an object to the ARGV class if needed. + # + # @param [Object] argv + # The object which should be converted to the ARGV class. + # + def self.coerce(argv) + if argv.is_a?(ARGV) + argv + else + ARGV.new(argv) + end + end + + # @param [Array<#to_s>] argv + # A list of parameters. + # + def initialize(argv) + @entries = Parser.parse(argv) + end + + # @return [Boolean] Whether or not there are any remaining unhandled + # parameters. + # + def empty? + @entries.empty? + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format a user specifies it in. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder # => ['--no-milk', '--sweetener=honey'] + # + def remainder + @entries.map do |type, (key, value)| + case type + when :arg + key + when :flag + "--#{'no-' if value == false}#{key}" + when :option + "--#{key}=#{value}" + end + end + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format the user specified them. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder! # => ['--no-milk', '--sweetener=honey'] + # argv.remainder # => [] + # + def remainder! + remainder.tap { @entries.clear } + end + + # @return [Hash] A hash that consists of the remaining flags and options + # and their values. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.options # => { 'milk' => false, 'sweetener' => 'honey' } + # + def options + options = {} + @entries.each do |type, (key, value)| + options[key] = value unless type == :arg + end + options + end + + # @return [Array] A list of the remaining arguments. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white', 'biscuit'] + # + def arguments + @entries.map { |type, value| value if type == :arg }.compact + end + + # @return [Array] A list of the remaining arguments. + # + # @note This version also removes the arguments from the remaining + # parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.arguments # => ['tea', 'white', 'biscuit'] + # argv.arguments! # => ['tea', 'white', 'biscuit'] + # argv.arguments # => [] + # + def arguments! + arguments = [] + while arg = shift_argument + arguments << arg + end + arguments + end + + # @return [String] The first argument in the remaining parameters. + # + # @note This will remove the argument from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white'] + # + def shift_argument + if index = @entries.find_index { |type, _| type == :arg } + entry = @entries[index] + @entries.delete_at(index) + entry.last + end + end + + # @return [Boolean, nil] Returns `true` if the flag by the specified `name` + # is among the remaining parameters and is not negated. + # + # @param [String] name + # The name of the flag to look for among the remaining parameters. + # + # @param [Boolean] default + # The value that is returned in case the flag is not among the + # remaining parameters. + # + # @note This will remove the flag from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.flag?('milk') # => false + # argv.flag?('milk') # => nil + # argv.flag?('milk', true) # => true + # argv.remainder # => ['tea', '--sweetener=honey'] + # + def flag?(name, default = nil) + delete_entry(:flag, name, default, true) + end + + # @return [String, nil] Returns the value of the option by the specified + # `name` is among the remaining parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @param [String] default + # The value that is returned in case the option is not among the + # remaining parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.option('sweetener') # => 'honey' + # argv.option('sweetener') # => nil + # argv.option('sweetener', 'sugar') # => 'sugar' + # argv.remainder # => ['tea', '--no-milk'] + # + def option(name, default = nil) + delete_entry(:option, name, default) + end + + # @return [Array] Returns an array of all the values of the option + # with the specified `name` among the remaining + # parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['--ignore=foo', '--ignore=bar']) + # argv.all_options('include') # => [] + # argv.all_options('ignore') # => ['bar', 'foo'] + # argv.remainder # => [] + # + def all_options(name) + options = [] + while entry = option(name) + options << entry + end + options + end + + private + + # @return [Array>] A list of tuples for each + # non consumed parameter, where the first entry is the `type` and + # the second entry the actual parsed parameter. + # + attr_reader :entries + + # @return [Bool, String, Nil] Removes an entry from the entries list and + # returns its value or the default value if the entry was not + # present. + # + # @param [Symbol] requested_type + # The type of the entry. + # + # @param [String] requested_key + # The key of the entry. + # + # @param [Bool, String, Nil] default + # The value which should be returned if the entry is not present. + # + # @param [Bool] delete_all + # Whether all values matching `requested_type` and `requested_key` + # should be deleted. + # + def delete_entry(requested_type, requested_key, default, delete_all = false) + pred = proc do |type, (key, _value)| + requested_key == key && requested_type == type + end + entry = entries.reverse_each.find(&pred) + delete_all ? entries.delete_if(&pred) : entries.delete(entry) + + entry.nil? ? default : entry.last.last + end + + module Parser + # @return [Array>] A list of tuples for each + # parameter, where the first entry is the `type` and the second + # entry the actual parsed parameter. + # + # @example + # + # list = parse(['tea', '--no-milk', '--sweetener=honey']) + # list # => [[:arg, "tea"], + # [:flag, ["milk", false]], + # [:option, ["sweetener", "honey"]]] + # + def self.parse(argv) + entries = [] + copy = argv.map(&:to_s) + double_dash = false + while argument = copy.shift + next if !double_dash && double_dash = (argument == '--') + type = double_dash ? :arg : argument_type(argument) + parsed_argument = parse_argument(type, argument) + entries << [type, parsed_argument] + end + entries + end + + # @return [Symbol] Returns the type of an argument. The types can be + # either: `:arg`, `:flag`, `:option`. + # + # @param [String] argument + # The argument to check. + # + def self.argument_type(argument) + if argument.start_with?('--') + if argument.include?('=') + :option + else + :flag + end + else + :arg + end + end + + # @return [String, Array] Returns the argument itself for + # normal arguments (like commands) and a tuple with the key and + # the value for options and flags. + # + # @param [Symbol] type + # The type of the argument. + # + # @param [String] argument + # The argument to check. + # + def self.parse_argument(type, argument) + case type + when :arg + return argument + when :flag + return parse_flag(argument) + when :option + return argument[2..-1].split('=', 2) + end + end + + # @return [String, Array] Returns the parameter + # describing a flag arguments. + # + # @param [String] argument + # The flag argument to check. + # + def self.parse_flag(argument) + if argument.start_with?('--no-') + key = argument[5..-1] + value = false + else + key = argument[2..-1] + value = true + end + [key, value] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command.rb new file mode 100644 index 0000000..6414b5c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command.rb @@ -0,0 +1,669 @@ +# encoding: utf-8 + +require 'claide/command/banner' +require 'claide/command/plugin_manager' +require 'claide/command/argument_suggester' + +module CLAide + # This class is used to build a command-line interface + # + # Each command is represented by a subclass of this class, which may be + # nested to create more granular commands. + # + # Following is an overview of the types of commands and what they should do. + # + # ### Any command type + # + # * Inherit from the command class under which the command should be nested. + # * Set {Command.summary} to a brief description of the command. + # * Override {Command.options} to return the options it handles and their + # descriptions and prepending them to the results of calling `super`. + # * Override {Command#initialize} if it handles any parameters. + # * Override {Command#validate!} to check if the required parameters the + # command handles are valid, or call {Command#help!} in case they’re not. + # + # ### Abstract command + # + # The following is needed for an abstract command: + # + # * Set {Command.abstract_command} to `true`. + # * Subclass the command. + # + # When the optional {Command.description} is specified, it will be shown at + # the top of the command’s help banner. + # + # ### Normal command + # + # The following is needed for a normal command: + # + # * Set {Command.arguments} to the description of the arguments this command + # handles. + # * Override {Command#run} to perform the actual work. + # + # When the optional {Command.description} is specified, it will be shown + # underneath the usage section of the command’s help banner. Otherwise this + # defaults to {Command.summary}. + # + class Command + class << self + # @return [Boolean] Indicates whether or not this command can actually + # perform work of itself, or that it only contains subcommands. + # + attr_accessor :abstract_command + alias_method :abstract_command?, :abstract_command + + # @return [Boolean] Indicates whether or not this command is used during + # command parsing and whether or not it should be shown in the + # help banner or to show its subcommands instead. + # + # Setting this to `true` implies it’s an abstract command. + # + attr_reader :ignore_in_command_lookup + alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup + def ignore_in_command_lookup=(flag) + @ignore_in_command_lookup = self.abstract_command = flag + end + + # @return [String] The subcommand which an abstract command should invoke + # by default. + # + attr_accessor :default_subcommand + + # @return [String] A brief description of the command, which is shown + # next to the command in the help banner of a parent command. + # + attr_accessor :summary + + # @return [String] A longer description of the command, which is shown + # underneath the usage section of the command’s help banner. Any + # indentation in this value will be ignored. + # + attr_accessor :description + + # @return [Array] The prefixes used to search for CLAide plugins. + # Plugins are loaded via their `_plugin.rb` file. + # Defaults to search for `claide` plugins. + # + def plugin_prefixes + @plugin_prefixes ||= ['claide'] + end + attr_writer :plugin_prefixes + + # @return [Array] + # A list of arguments the command handles. This is shown + # in the usage section of the command’s help banner. + # Each Argument in the array represents an argument by its name + # (or list of alternatives) and whether it's required or optional + # + def arguments + @arguments ||= [] + end + + # @param [Array] arguments + # An array listing the command arguments. + # Each Argument object describe the argument by its name + # (or list of alternatives) and whether it's required or optional + # + # @todo Remove deprecation + # + def arguments=(arguments) + if arguments.is_a?(Array) + if arguments.empty? || arguments[0].is_a?(Argument) + @arguments = arguments + else + self.arguments_array = arguments + end + else + self.arguments_string = arguments + end + end + + # @return [Boolean] The default value for {Command#ansi_output}. This + # defaults to `true` if `STDOUT` is connected to a TTY and + # `String` has the instance methods `#red`, `#green`, and + # `#yellow` (which are defined by, for instance, the + # [colored](https://github.com/defunkt/colored) gem). + # + def ansi_output + if @ansi_output.nil? + @ansi_output = STDOUT.tty? + end + @ansi_output + end + attr_writer :ansi_output + alias_method :ansi_output?, :ansi_output + + # @return [String] The name of the command. Defaults to a snake-cased + # version of the class’ name. + # + def command + @command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part| + part.downcase << '-' + end[0..-2] + end + attr_writer :command + + # @return [String] The version of the command. This value will be printed + # by the `--version` flag if used for the root command. + # + attr_accessor :version + end + + #-------------------------------------------------------------------------# + + # @return [String] The full command up-to this command, as it would be + # looked up during parsing. + # + # @note (see #ignore_in_command_lookup) + # + # @example + # + # BevarageMaker::Tea.full_command # => "beverage-maker tea" + # + def self.full_command + if superclass == Command + ignore_in_command_lookup? ? '' : command + else + if ignore_in_command_lookup? + superclass.full_command + else + "#{superclass.full_command} #{command}" + end + end + end + + # @return [Bool] Whether this is the root command class + # + def self.root_command? + superclass == CLAide::Command + end + + # @return [Array] A list of all command classes that are nested + # under this command. + # + def self.subcommands + @subcommands ||= [] + end + + # @return [Array] A list of command classes that are nested under + # this command _or_ the subcommands of those command classes in + # case the command class should be ignored in command lookup. + # + def self.subcommands_for_command_lookup + subcommands.map do |subcommand| + if subcommand.ignore_in_command_lookup? + subcommand.subcommands_for_command_lookup + else + subcommand + end + end.flatten + end + + # Searches the list of subcommands that should not be ignored for command + # lookup for a subcommand with the given `name`. + # + # @param [String] name + # The name of the subcommand to be found. + # + # @return [CLAide::Command, nil] The subcommand, if found. + # + def self.find_subcommand(name) + subcommands_for_command_lookup.find { |sc| sc.command == name } + end + + # @visibility private + # + # Automatically registers a subclass as a subcommand. + # + def self.inherited(subcommand) + subcommands << subcommand + end + + DEFAULT_ROOT_OPTIONS = [ + ['--version', 'Show the version of the tool'], + ] + + DEFAULT_OPTIONS = [ + ['--verbose', 'Show more debugging information'], + ['--no-ansi', 'Show output without ANSI codes'], + ['--help', 'Show help banner of specified command'], + ] + + # Should be overridden by a subclass if it handles any options. + # + # The subclass has to combine the result of calling `super` and its own + # list of options. The recommended way of doing this is by concatenating + # to this classes’ own options. + # + # @return [Array] + # + # A list of option name and description tuples. + # + # @example + # + # def self.options + # [ + # ['--verbose', 'Print more info'], + # ['--help', 'Print help banner'], + # ].concat(super) + # end + # + def self.options + if root_command? + DEFAULT_ROOT_OPTIONS + DEFAULT_OPTIONS + else + DEFAULT_OPTIONS + end + end + + # Adds a new option for the current command. + # + # This method can be used in conjunction with overriding `options`. + # + # @return [void] + # + # @example + # + # option '--help', 'Print help banner ' + # + def self.option(name, description) + mod = Module.new do + define_method(:options) do + [ + [name, description], + ].concat(super()) + end + end + extend(mod) + end + private_class_method :option + + # Handles root commands options if appropriate. + # + # @param [ARGV] argv + # The parameters of the command. + # + # @return [Bool] Whether any root command option was handled. + # + def handle_root_options(argv) + return false unless self.class.root_command? + if argv.flag?('version') + print_version + return true + end + false + end + + # Prints the version of the command optionally including plugins. + # + def print_version + puts self.class.version + if verbose? + PluginManager.specifications.each do |spec| + puts "#{spec.name}: #{spec.version}" + end + end + end + + # Instantiates the command class matching the parameters through + # {Command.parse}, validates it through {Command#validate!}, and runs it + # through {Command#run}. + # + # @note The ANSI support is configured before running a command to allow + # the same process to run multiple commands with different + # settings. For example a process with ANSI output enabled might + # want to programmatically invoke another command with the output + # enabled. + # + # @param [Array, ARGV] argv + # A list of parameters. For instance, the standard `ARGV` constant, + # which contains the parameters passed to the program. + # + # @return [void] + # + def self.run(argv = []) + plugin_prefixes.each do |plugin_prefix| + PluginManager.load_plugins(plugin_prefix) + end + + argv = ARGV.coerce(argv) + command = parse(argv) + ANSI.disabled = !command.ansi_output? + unless command.handle_root_options(argv) + command.validate! + command.run + end + rescue Object => exception + handle_exception(command, exception) + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] An instance of the command class that was matched by + # going through the arguments in the parameters and drilling down + # command classes. + # + def self.parse(argv) + argv = ARGV.coerce(argv) + cmd = argv.arguments.first + if cmd && subcommand = find_subcommand(cmd) + argv.shift_argument + subcommand.parse(argv) + elsif abstract_command? && default_subcommand + load_default_subcommand(argv) + else + new(argv) + end + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] Returns the default subcommand initialized with the + # given arguments. + # + def self.load_default_subcommand(argv) + unless subcommand = find_subcommand(default_subcommand) + raise 'Unable to find the default subcommand ' \ + "`#{default_subcommand}` for command `#{self}`." + end + result = subcommand.parse(argv) + result.invoked_as_default = true + result + end + + # Presents an exception to the user in a short manner in case of an + # `InformativeError` or in long form in other cases, + # + # @param [Command, nil] command + # The command from where the exception originated. + # + # @param [Object] exception + # The exception to present. + # + # @return [void] + # + def self.handle_exception(command, exception) + if exception.is_a?(InformativeError) + puts exception.message + if command.nil? || command.verbose? + puts + puts(*exception.backtrace) + end + exit exception.exit_status + else + report_error(exception) + end + end + + # Allows the application to perform custom error reporting, by overriding + # this method. + # + # @param [Exception] exception + # + # An exception that occurred while running a command through + # {Command.run}. + # + # @raise + # + # By default re-raises the specified exception. + # + # @return [void] + # + def self.report_error(exception) + plugins = PluginManager.plugins_involved_in_exception(exception) + unless plugins.empty? + puts '[!] The exception involves the following plugins:' \ + "\n - #{plugins.join("\n - ")}\n".ansi.yellow + end + raise exception + end + + # @visibility private + # + # @param [String] error_message + # The error message to show to the user. + # + # @param [Class] help_class + # The class to use to raise a ‘help’ error. + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def self.help!(error_message = nil, help_class = Help) + raise help_class.new(banner, error_message) + end + + # @visibility private + # + # Returns the banner for the command. + # + # @param [Class] banner_class + # The class to use to format help banners. + # + # @return [String] The banner for the command. + # + def self.banner(banner_class = Banner) + banner_class.new(self).formatted_banner + end + + # @visibility private + # + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def self.banner! + puts banner + exit 0 + end + + #-------------------------------------------------------------------------# + + # Set to `true` if the user specifies the `--verbose` option. + # + # @note + # + # If you want to make use of this value for your own configuration, you + # should check the value _after_ calling the `super` {Command#initialize} + # implementation. + # + # @return [Boolean] + # + # Wether or not backtraces should be included when presenting the user an + # exception that includes the {InformativeError} module. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # Set to `true` if {Command.ansi_output} returns `true` and the user + # did **not** specify the `--no-ansi` option. + # + # @note (see #verbose) + # + # @return [Boolean] + # + # Whether or not to use ANSI codes to prettify output. For instance, by + # default {InformativeError} exception messages will be colored red and + # subcommands in help banners green. + # + attr_accessor :ansi_output + alias_method :ansi_output?, :ansi_output + + # Set to `true` if initialized with a `--help` flag + # + # @return [Boolean] + # + # Whether the command was initialized with argv containing --help + # + attr_accessor :help_arg + alias_method :help?, :help_arg + + # Subclasses should override this method to remove the arguments/options + # they support from `argv` _before_ calling `super`. + # + # The `super` implementation sets the {#verbose} attribute based on whether + # or not the `--verbose` option is specified; and the {#ansi_output} + # attribute to `false` if {Command.ansi_output} returns `true`, but the + # user specified the `--no-ansi` option. + # + # @param [ARGV, Array] argv + # + # A list of (user-supplied) params that should be handled. + # + def initialize(argv) + argv = ARGV.coerce(argv) + @verbose = argv.flag?('verbose') + @ansi_output = argv.flag?('ansi', Command.ansi_output?) + @argv = argv + @help_arg = argv.flag?('help') + end + + # Convenience method. + # Instantiate the command and run it with the provided arguments at once. + # + # @note This method validate! the command before running it, but contrary to + # CLAide::Command::run, it does not load plugins nor exit on failure. + # It is up to the caller to rescue any possible exception raised. + # + # @param [String..., Array] args + # The arguments to initialize the command with + # + # @raise [Help] If validate! fails + # + def self.invoke(*args) + command = new(ARGV.new(args.flatten)) + command.validate! + command.run + end + + # @return [Bool] Whether the command was invoked by an abstract command by + # default. + # + attr_accessor :invoked_as_default + alias_method :invoked_as_default?, :invoked_as_default + + # Raises a Help exception if the `--help` option is specified, if `argv` + # still contains remaining arguments/options by the time it reaches this + # implementation, or when called on an ‘abstract command’. + # + # Subclasses should call `super` _before_ doing their own validation. This + # way when the user specifies the `--help` flag a help banner is shown, + # instead of possible actual validation errors. + # + # @raise [Help] + # + # @return [void] + # + def validate! + banner! if help? + unless @argv.empty? + argument = @argv.remainder.first + help! ArgumentSuggester.new(argument, self.class).suggestion + end + help! if self.class.abstract_command? + end + + # This method should be overridden by the command class to perform its + # work. + # + # @return [void] + # + def run + raise 'A subclass should override the `CLAide::Command#run` method to ' \ + 'actually perform some work.' + end + + protected + + # Returns the class of the invoked command + # + # @return [Command] + # + def invoked_command_class + if invoked_as_default? + self.class.superclass + else + self.class + end + end + + # @param [String] error_message + # A custom optional error message + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def help!(error_message = nil) + invoked_command_class.help!(error_message) + end + + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def banner! + invoked_command_class.banner! + end + + #-------------------------------------------------------------------------# + + # Handle deprecated form of self.arguments as an + # Array> like in: + # + # self.arguments = [ ['NAME', :required], ['QUERY', :optional] ] + # + # @todo Remove deprecated format support + # + def self.arguments_array=(arguments) + warn '[!] The signature of CLAide#arguments has changed. ' \ + "Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow + @arguments = arguments.map do |(name_str, type)| + names = name_str.split('|') + required = (type == :required) + Argument.new(names, required) + end + end + + # Handle deprecated form of self.arguments as a String, like in: + # + # self.arguments = 'NAME [QUERY]' + # + # @todo Remove deprecated format support + # + def self.arguments_string=(arguments) + warn '[!] The specification of arguments as a string has been' \ + " deprecated #{self}: `#{arguments}`".ansi.yellow + @arguments = arguments.split(' ').map do |argument| + if argument.start_with?('[') + Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false) + else + Argument.new(argument.split('|'), true) + end + end + end + + # Handle depracted form of assigning a plugin prefix. + # + # @todo Remove deprecated form. + # + def self.plugin_prefix=(prefix) + warn '[!] The specification of a singular plugin prefix has been ' \ + "deprecated. Use `#{self}::plugin_prefixes` instead." + plugin_prefixes << prefix + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb new file mode 100644 index 0000000..a13575c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb @@ -0,0 +1,99 @@ +# encoding: utf-8 + +module CLAide + class Command + class ArgumentSuggester + # @param [String] argument + # The unrecognized argument for which to make a suggestion. + # + # @param [Class] command_class + # The class of the command which encountered the unrecognized + # arguments. + # + def initialize(argument, command_class) + @argument, @command_class = argument, command_class + @argument_type = ARGV::Parser.argument_type(@argument) + end + + # @return [Array] The list of the valid arguments for a command + # according to the type of the argument. + # + def possibilities + case @argument_type + when :option, :flag + @command_class.options.map(&:first) + when :arg + @command_class.subcommands_for_command_lookup.map(&:command) + end + end + + # @return [String] Returns a suggested argument from `possibilities` based + # on the `levenshtein_distance` score. + # + def suggested_argument + possibilities.sort_by do |element| + self.class.levenshtein_distance(@argument, element) + end.first + end + + # @return [String] Returns a message including a suggestion for the given + # suggestion. + # + def suggestion + argument_description = @argument_type == :arg ? 'command' : 'option' + if suggestion = suggested_argument + pretty_suggestion = self.class.prettify_suggestion(suggestion, + @argument_type) + "Unknown #{argument_description}: `#{@argument}`\n" \ + "Did you mean: #{pretty_suggestion}?" + else + "Unknown #{argument_description}: `#{@argument}`" + end + end + + # Prettifies the given validation suggestion according to the type. + # + # @param [String] suggestion + # The suggestion to prettify. + # + # @param [Type] argument_type + # The type of the suggestion: either `:command` or `:option`. + # + # @return [String] A handsome suggestion. + # + def self.prettify_suggestion(suggestion, argument_type) + case argument_type + when :option, :flag + suggestion = suggestion.to_s + suggestion.ansi.blue + when :arg + suggestion.ansi.green + end + end + + # Returns the Levenshtein distance between the given strings. + # From: http://rosettacode.org/wiki/Levenshtein_distance#Ruby + # + # @param [String] a + # The first string to compare. + # + # @param [String] b + # The second string to compare. + # + # @return [Fixnum] The distance between the strings. + def self.levenshtein_distance(a, b) + a, b = a.downcase, b.downcase + costs = Array(0..b.length) + (1..a.length).each do |i| + costs[0], nw = i, i - 1 + (1..b.length).each do |j| + costs[j], nw = [ + costs[j] + 1, costs[j - 1] + 1, a[i - 1] == b[j - 1] ? nw : nw + 1 + ].min, costs[j] + end + end + costs[b.length] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/banner.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/banner.rb new file mode 100644 index 0000000..d87c699 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/banner.rb @@ -0,0 +1,307 @@ +# encoding: utf-8 + +module CLAide + class Command + # Creates the formatted banner to present as help of the provided command + # class. + # + class Banner + # @return [Class] The command for which the banner should be created. + # + attr_accessor :command + + # @param [Class] command @see command + # + def initialize(command) + @command = command + end + + # @return [String] The banner for the command. + # + def formatted_banner + sections = [ + ['Usage', formatted_usage_description], + ['Commands', formatted_subcommand_summaries], + ['Options', formatted_options_description], + ] + banner = sections.map do |(title, body)| + [prettify_title("#{title}:"), body] unless body.empty? + end.compact.join("\n\n") + banner + end + + private + + # @!group Banner sections + #-----------------------------------------------------------------------# + + # @return [String] The indentation of the text. + # + TEXT_INDENT = 6 + + # @return [Fixnum] The maximum width of the text. + # + MAX_WIDTH = TEXT_INDENT + 80 + + # @return [Fixnum] The minimum between a name and its description. + # + DESCRIPTION_SPACES = 3 + + # @return [Fixnum] The minimum between a name and its description. + # + SUBCOMMAND_BULLET_SIZE = 2 + + # @return [String] The section describing the usage of the command. + # + def formatted_usage_description + message = command.description || command.summary || '' + message = TextWrapper.wrap_formatted_text(message, + TEXT_INDENT, + MAX_WIDTH) + message = prettify_message(command, message) + "#{signature}\n\n#{message}" + end + + # @return [String] The signature of the command. + # + def signature + full_command = command.full_command + sub_command = signature_sub_command + arguments = signature_arguments + result = prettify_signature(full_command, sub_command, arguments) + result.insert(0, '$ ') + result.insert(0, ' ' * (TEXT_INDENT - '$ '.size)) + end + + # @return [String] The subcommand indicator of the signature. + # + def signature_sub_command + return '[COMMAND]' if command.default_subcommand + return 'COMMAND' if command.subcommands.any? + end + + # @return [String] The arguments of the signature. + # + def signature_arguments + command.arguments.map do |arg| + names = arg.names.join('|') + names.concat(' ' + Argument::ELLIPSIS) if arg.repeatable? + arg.required? ? names : "[#{names}]" + end.join(' ') + end + + # @return [String] The section describing the subcommands of the command. + # + # @note The plus sign emphasizes the that the subcommands are added to + # the command. The square brackets conveys a sense of direction + # and indicates the gravitational force towards the default + # command. + # + def formatted_subcommand_summaries + subcommands = subcommands_for_banner + subcommands.map do |subcommand| + name = subcommand.command + bullet = (name == command.default_subcommand) ? '>' : '+' + name = "#{bullet} #{name}" + pretty_name = prettify_subcommand(name) + entry_description(pretty_name, subcommand.summary, name.size) + end.join("\n") + end + + # @return [String] The section describing the options of the command. + # + def formatted_options_description + options = command.options + options.map do |name, description| + pretty_name = prettify_option_name(name) + entry_description(pretty_name, description, name.size) + end.join("\n") + end + + # @return [String] The line describing a single entry (subcommand or + # option). + # + def entry_description(name, description, name_width) + max_name_width = compute_max_name_width + desc_start = max_name_width + (TEXT_INDENT - 2) + DESCRIPTION_SPACES + result = ' ' * (TEXT_INDENT - 2) + result << name + result << ' ' * DESCRIPTION_SPACES + result << ' ' * (max_name_width - name_width) + result << TextWrapper.wrap_with_indent(description, + desc_start, + MAX_WIDTH) + end + + # @!group Overrides + #-----------------------------------------------------------------------# + + # @return [String] A decorated title. + # + def prettify_title(title) + title.ansi.underline + end + + # @return [String] A decorated textual representation of the subcommand + # name. + # + def prettify_subcommand(name) + name.chomp.ansi.green + end + + # @return [String] A decorated textual representation of the option name. + # + # + def prettify_option_name(name) + name.chomp.ansi.blue + end + + # @return [String] A decorated textual representation of the command. + # + def prettify_signature(command, subcommand, argument) + components = [ + [command, :green], + [subcommand, :green], + [argument, :magenta], + ] + components.reduce('') do |memo, (string, ansi_key)| + next memo if !string || string.empty? + memo << ' ' << string.ansi.apply(ansi_key) + end.lstrip + end + + # @return [String] A decorated command description. + # + def prettify_message(command, message) + message = message.dup + command.arguments.each do |arg| + arg.names.each do |name| + message.gsub!("`#{name.gsub(/\.{3}$/, '')}`", '\0'.ansi.magenta) + end + end + command.options.each do |(name, _description)| + message.gsub!("`#{name}`", '\0'.ansi.blue) + end + message + end + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # @return [Array] The list of the subcommands to use in the + # banner. + # + def subcommands_for_banner + command.subcommands_for_command_lookup.reject do |subcommand| + subcommand.summary.nil? + end.sort_by(&:command) + end + + # @return [Fixnum] The width of the largest command name or of the + # largest option name. Used to align all the descriptions. + # + def compute_max_name_width + widths = [] + widths << command.options.map { |option| option.first.size } + widths << subcommands_for_banner.map do |cmd| + cmd.command.size + SUBCOMMAND_BULLET_SIZE + end.max + widths.flatten.compact.max || 1 + end + + module TextWrapper + # @return [String] Wraps a formatted string (e.g. markdown) by stripping + # heredoc indentation and wrapping by word to the terminal width + # taking into account a maximum one, and indenting the string. + # Code lines (i.e. indented by four spaces) are not wrapped. + # + # @param [String] string + # The string to format. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_formatted_text(string, indent = 0, max_width = 80) + paragraphs = strip_heredoc(string).split("\n\n") + paragraphs = paragraphs.map do |paragraph| + if paragraph.start_with?(' ' * 4) + paragraph.gsub!(/\n/, "\n#{' ' * indent}") + else + paragraph = wrap_with_indent(paragraph, indent, max_width) + end + paragraph.insert(0, ' ' * indent).rstrip + end + paragraphs.join("\n\n") + end + + # @return [String] Wraps a string to the terminal width taking into + # account the given indentation. + # + # @param [String] string + # The string to indent. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_with_indent(string, indent = 0, max_width = 80) + if terminal_width == 0 + width = max_width + else + width = [terminal_width, max_width].min + end + + full_line = string.gsub("\n", ' ') + available_width = width - indent + space = ' ' * indent + word_wrap(full_line, available_width).split("\n").join("\n#{space}") + end + + # @return [String] Lifted straight from ActionView. Thanks guys! + # + def self.word_wrap(line, line_width) + line.gsub(/(.{1,#{line_width}})(\s+|$)/, "\\1\n").strip + end + + # @return [String] Lifted straight from ActiveSupport. Thanks guys! + # + def self.strip_heredoc(string) + if min = string.scan(/^[ \t]*(?=\S)/).min + string.gsub(/^[ \t]{#{min.size}}/, '') + else + string + end + end + + # @!group Private helpers + #---------------------------------------------------------------------# + + # @return [Fixnum] The width of the current terminal unless being piped. + # + def self.terminal_width + @terminal_width ||= + (!ENV['CLAIDE_DISABLE_AUTO_WRAP'] && + STDOUT.tty? && + calculate_terminal_width) || 0 + end + + def self.calculate_terminal_width + require 'io/console' + STDOUT.winsize.last + rescue LoadError + (system('which tput > /dev/null 2>&1') && `tput cols`.to_i) || 0 + rescue + 0 + end + private_class_method :calculate_terminal_width + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb new file mode 100644 index 0000000..184a5ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb @@ -0,0 +1,124 @@ +# encoding: utf-8 + +module CLAide + class Command + # Handles plugin related logic logic for the `Command` class. + # + # Plugins are loaded the first time a command run and are identified by the + # prefix specified in the command class. Plugins must adopt the following + # conventions: + # + # - Support being loaded by a file located under the + # `lib/#{plugin_prefix}_plugin` relative path. + # - Be stored in a folder named after the plugin. + # + class PluginManager + # @return [Hash] The loaded plugins, + # grouped by plugin prefix. + # + def self.loaded_plugins + @loaded_plugins ||= {} + end + + # @return [Array] Loads plugins via RubyGems looking + # for files named after the `PLUGIN_PREFIX_plugin` and returns the + # specifications of the gems loaded successfully. + # Plugins are required safely. + # + def self.load_plugins(plugin_prefix) + loaded_plugins[plugin_prefix] ||= + plugin_gems_for_prefix(plugin_prefix).map do |spec, paths| + spec if safe_require(paths) + end.compact + end + + # @return [Array] The RubyGems specifications for the + # loaded plugins. + # + def self.specifications + loaded_plugins.values.flatten.uniq + end + + # @return [Array] The RubyGems specifications for the + # installed plugins that match the given `plugin_prefix`. + # + def self.installed_specifications_for_prefix(plugin_prefix) + loaded_plugins[plugin_prefix] || + plugin_gems_for_prefix(plugin_prefix).map(&:first) + end + + # @return [Array] The list of the plugins whose root path appears + # in the backtrace of an exception. + # + # @param [Exception] exception + # The exception to analyze. + # + def self.plugins_involved_in_exception(exception) + specifications.select do |gemspec| + exception.backtrace.any? do |line| + full_require_paths_for(gemspec).any? do |plugin_path| + line.include?(plugin_path) + end + end + end.map(&:name) + end + + # @group Helper Methods + + # @return [Array<[Gem::Specification, Array]>] + # Returns an array of tuples containing the specifications and + # plugin files to require for a given plugin prefix. + # + def self.plugin_gems_for_prefix(prefix) + glob = "#{prefix}_plugin#{Gem.suffix_pattern}" + Gem::Specification.latest_specs(true).map do |spec| + matches = spec.matches_for_glob(glob) + [spec, matches] unless matches.empty? + end.compact + end + + # Requires the given paths. + # If any exception occurs it is caught and an + # informative message is printed. + # + # @param [String] paths + # The paths to require. + # + # @return [Bool] Whether requiring succeeded. + # + def self.safe_require(paths) + paths.each do |path| + begin + require(path) + rescue Exception => exception # rubocop:disable RescueException + message = "\n---------------------------------------------" + message << "\nError loading plugin file `#{path}`.\n" + message << "\n#{exception.class} - #{exception.message}" + message << "\n#{exception.backtrace.join("\n")}" + message << "\n---------------------------------------------\n" + warn message.ansi.yellow + return false + end + end + + true + end + + def self.full_require_paths_for(gemspec) + if gemspec.respond_to?(:full_require_paths) + return gemspec.full_require_paths + end + + # RubyGems < 2.2 + gemspec.require_paths.map do |require_path| + if require_path.include?(gemspec.full_gem_path) + require_path + else + File.join(gemspec.full_gem_path, require_path) + end + end + end + private_class_method :full_require_paths_for + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/gem_version.rb new file mode 100644 index 0000000..cdb020c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/gem_version.rb @@ -0,0 +1,7 @@ +module CLAide + # @return [String] + # + # CLAide’s version, following [semver](http://semver.org). + # + VERSION = '1.1.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/help.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/help.rb new file mode 100644 index 0000000..7d3b183 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/help.rb @@ -0,0 +1,58 @@ +# encoding: utf-8 + +module CLAide + require 'claide/informative_error' + + # The exception class that is raised to indicate a help banner should be + # shown while running {Command.run}. + # + class Help < StandardError + include InformativeError + + # @return [String] The banner containing the usage instructions of the + # command to show in the help. + # + attr_reader :banner + + # @return [String] An optional error message that will be shown before the + # help banner. + # + attr_reader :error_message + + # @param [String] banner @see banner + # @param [String] error_message @see error_message + # + # @note If an error message is provided, the exit status, used to + # terminate the program with, will be set to `1`, otherwise a {Help} + # exception is treated as not being a real error and exits with `0`. + # + def initialize(banner, error_message = nil) + @banner = banner + @error_message = error_message + @exit_status = @error_message.nil? ? 0 : 1 + end + + # @return [String] The optional error message, colored in red if + # {Command.ansi_output} is set to `true`. + # + def formatted_error_message + if error_message + message = "[!] #{error_message}" + prettify_error_message(message) + end + end + + # @return [String] + # + def prettify_error_message(message) + message.ansi.red + end + + # @return [String] The optional error message, combined with the help + # banner of the command. + # + def message + [formatted_error_message, banner].compact.join("\n\n") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/informative_error.rb b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/informative_error.rb new file mode 100644 index 0000000..8a92bd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/claide-1.1.0/lib/claide/informative_error.rb @@ -0,0 +1,21 @@ +# encoding: utf-8 + +module CLAide + # Including this module into an exception class will ensure that when raised, + # while running {Command.run}, only the message of the exception will be + # shown to the user. Unless disabled with the `--verbose` flag. + # + # In addition, the message will be colored red, if {Command.ansi_output} + # is set to `true`. + # + module InformativeError + # @return [Numeric] The exist status code that should be used to terminate + # the program with. Defaults to `1`. + # + attr_writer :exit_status + + def exit_status + @exit_status ||= 1 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/CHANGELOG.md new file mode 100644 index 0000000..0b85a54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/CHANGELOG.md @@ -0,0 +1,7794 @@ +# Installation & Update + +To install or update CocoaPods see this [guide](https://guides.cocoapods.org/using/index.html). + +To install release candidates run `[sudo] gem install cocoapods --pre` + +## 1.11.2 (2021-09-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not validate modular header dependencies for pre-built Swift pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10912](https://github.com/CocoaPods/CocoaPods/issues/10912) + + +## 1.11.1 (2021-09-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle spec repo urls with user info when determining if they are CDN. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10941](https://github.com/CocoaPods/CocoaPods/issues/10941) + +* Set `INFOPLIST_FILE` build setting to `$(SRCROOT)/App/App-Info.plist` during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10927](https://github.com/CocoaPods/CocoaPods/issues/10927) + +* Set `PRODUCT_BUNDLE_IDENTIFIER` for generated app during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10933](https://github.com/CocoaPods/CocoaPods/issues/10933) + + +## 1.11.0 (2021-09-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.11.0.rc.1 (2021-08-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly process multiple `xcframeworks` a pod provides. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10378](https://github.com/CocoaPods/CocoaPods/issues/10378) + + +## 1.11.0.beta.2 (2021-08-11) + +##### Enhancements + +* Integrate ODR categories into projects. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10855](https://github.com/CocoaPods/CocoaPods/pull/10855) + +##### Bug Fixes + +* Pass correct paths for `select_slice` method. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10430](https://github.com/CocoaPods/CocoaPods/issues/10430) + + +## 1.11.0.beta.1 (2021-08-09) + +##### Enhancements + +* Add support for integrating on demand resources. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [JunyiXie](https://github.com/JunyiXie) + [#9606](https://github.com/CocoaPods/CocoaPods/issues/9606) + [#10845](https://github.com/CocoaPods/CocoaPods/pull/10845) + +* Integrate `project_header_files` specified by specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9820](https://github.com/CocoaPods/CocoaPods/issues/9820) + +* Mark RealityComposer-projects (`.rcproject`) files defined in resources for compilation. + [Hendrik von Prince](https://github.com/parallaxe) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10793](https://github.com/CocoaPods/CocoaPods/pull/10793) + +* Integrate test specs and app specs of pre-built pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10795](https://github.com/CocoaPods/CocoaPods/pull/10795) + +* Add support for `before_headers` and `after_headers` script phase DSL. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10770](https://github.com/CocoaPods/CocoaPods/issues/10770) + +* Fix touch on a missing directory for dSYM copy phase script. + [alvarollmenezes](https://github.com/alvarollmenezes) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10488](https://github.com/CocoaPods/CocoaPods/issues/10488) + +* Check the podfile sources and plugin sources when printing warnings without explicitly using the master source. + [gonghonglou](https://github.com/gonghonglou) + [#10764](https://github.com/CocoaPods/CocoaPods/pull/10764) + +* Use relative paths in copy dsyms script. + [Mickey Knox](https://github.com/knox) + [#10583](https://github.com/CocoaPods/CocoaPods/pull/10583) + +* Use `OpenURI.open_uri` instead. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10597](https://github.com/CocoaPods/CocoaPods/issues/10597) + +* Set minimum supported Ruby version to 2.6. + [Igor Makarov](https://github.com/igor-makarov) + [#10412](https://github.com/CocoaPods/CocoaPods/pull/10412) + +* Improve compatibility with ActiveSupport 6 + [Jun Jiang](https://github.com/jasl) + [#10364](https://github.com/CocoaPods/CocoaPods/pull/10364) + +* Add a `pre_integrate_hook` API + [dcvz](https://github.com/dcvz) + [#9935](https://github.com/CocoaPods/CocoaPods/pull/9935) + +* Rewrite the only place dependent on `typhoeus`. + [Jun Jiang](https://github.com/jasl), [Igor Makarov](https://github.com/igor-makarov) + [#10346](https://github.com/CocoaPods/CocoaPods/pull/10346) + +* Add a `--update-sources` option to `pod repo push` so one can ensure sources are up-to-date. + [Elton Gao](https://github.com/gyfelton) + [Justin Martin](https://github.com/justinseanmartin) + +* Installing a local (`:path`) pod that defines script phases will no longer + produce warnings. + [Samuel Giddins](https://github.com/segiddins) + +* Allow building app & test spec targets that depend on a library that uses + Swift without requiring an empty Swift file be present. + [Samuel Giddins](https://github.com/segiddins) + +* Add flag to ignore prerelease versions when reporting latest version for outdated pods. + [cltnschlosser](https://github.com/cltnschlosser) + [#9916](https://github.com/CocoaPods/CocoaPods/pull/9916) + +* Add possibility to skip modulemap generation + [till0xff](https://github.com/till0xff) + [#10235](https://github.com/CocoaPods/CocoaPods/issues/10235) + +* Add a `--version` option to `pod spec cat` and `pod spec which` for listing the podspec of a specific version + [pietbrauer](https://github.com/pietbrauer) + [#10609](https://github.com/CocoaPods/CocoaPods/pull/10609) + +##### Bug Fixes + +* Fix resource variant groups in static frameworks + [Igor Makarov](https://github.com/igor-makarov) + [#10834](https://github.com/CocoaPods/CocoaPods/pull/10834) + [#10605](https://github.com/CocoaPods/CocoaPods/issues/10605) + +* Fix adding embed frameworks script phase to unit test targets if xcframeworks are present. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10652](https://github.com/CocoaPods/CocoaPods/issues/10652) + +* Remove unused `install_xcframework_library` code. + [Gio Lodi](https://github.com/mokagio) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10831](https://github.com/CocoaPods/CocoaPods/pull/10831) + +* Validate vendored library names after they have been expanded. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10832](https://github.com/CocoaPods/CocoaPods/pull/10832) + +* Place frameworks from xcframeworks into a unique folder name to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10106](https://github.com/CocoaPods/CocoaPods/issues/10106) + +* Update pod in Pods folder when changing the pod from branch to version in Podfie. + [gonghonglou](https://github.com/gonghonglou) + [#10825](https://github.com/CocoaPods/CocoaPods/pull/10825) + +* Bump addressable dependency to 2.8. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10802](https://github.com/CocoaPods/CocoaPods/issues/10802) + +* Dedup bcsymbolmap paths found from multiple vendored frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10373](https://github.com/CocoaPods/CocoaPods/issues/10373) + +* Correctly filter dependencies for pod variants across different platforms. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10777](https://github.com/CocoaPods/CocoaPods/issues/10777) + +* Generate default `Info.plist` for consumer app during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8570](https://github.com/CocoaPods/CocoaPods/issues/8570) + +* Fix lint subspec error when the name of subspec start with the pod name. + [XianpuMeng](https://github.com/XianpuMeng) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9906](https://github.com/CocoaPods/CocoaPods/issues/9906) + +* Update `ruby-macho` gem version to support 1.x and 2.x. + [Eric Chamberlain](https://github.com/PeqNP) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10390](https://github.com/CocoaPods/CocoaPods/issues/10390) + +* Respect `--configuration` option when analyzing via `pod lib lint --analyze`. + [Jenn Magder](https://github.com/jmagman) + [#10476](https://github.com/CocoaPods/CocoaPods/issues/10476) + +* Do not add dependencies to 'Link Binary With Libraries' phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10133](https://github.com/CocoaPods/CocoaPods/pull/10133) + +* Ensure cache integrity on concurrent installations. + [Erik Blomqvist](https://github.com/codiophile) + [#10013](https://github.com/CocoaPods/CocoaPods/issues/10013) + +* Force a clean install if installation options change. + [Sebastian Shanus](https://github.com/sebastianv1) + [#10016](https://github.com/CocoaPods/CocoaPods/pull/10016) + +* Correctly detect that a prebuilt pod uses Swift. + [Elton Gao](https://github.com/gyfelton) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8649](https://github.com/CocoaPods/CocoaPods/issues/8649) + +* fix: ensure cached spec path uniq + [SolaWing](https://github.com/SolaWing) + [#10231](https://github.com/CocoaPods/CocoaPods/issues/10231) + +* Set `knownRegions` on generated projects with localized resources to prevent Xcode from re-saving projects to disk. + [Eric Amorde](https://github.com/amorde) + [#10290](https://github.com/CocoaPods/CocoaPods/pull/10290) + +* Serialize schemes that do not need to be rewritten by Xcode. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.10.2 (2021-07-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix errors when archiving a Catalyst app which depends on a pod which uses `header_mappings_dir`. + [Thomas Goyne](https://github.com/tgoyne) + [#10224](https://github.com/CocoaPods/CocoaPods/pull/10224) + +* Fix missing `-ObjC` for static XCFrameworks - take 2 + [Paul Beusterien](https://github.com/paulb777) + [#10459](https://github.com/CocoaPods/CocoaPods/issuess/10459) + +* Change URL validation failure to a note + [Paul Beusterien](https://github.com/paulb777) + [#10291](https://github.com/CocoaPods/CocoaPods/issues/10291) + + +## 1.10.1 (2021-01-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix library name in LD `-l` flags for XCFrameworks containing libraries + [Wes Campaigne](https://github.com/Westacular) + [#10165](https://github.com/CocoaPods/CocoaPods/issues/10165) + +* Fix file extension replacement for resource paths when using static frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10206](https://github.com/CocoaPods/CocoaPods/issues/10206) + +* Fix processing of xcassets resources when pod target is static framework + [Federico Trimboli](https://github.com/fedetrim) + [#10175](https://github.com/CocoaPods/CocoaPods/pull/10175) + [#10170](https://github.com/CocoaPods/CocoaPods/issues/10170) + +* Fix missing `-ObjC` for static XCFrameworks + [Paul Beusterien](https://github.com/paulb777) + [#10234](https://github.com/CocoaPods/CocoaPods/pull/10234) + + +## 1.10.0 (2020-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Generate the correct LD `-l` flags for XCFrameworks containing libraries + [Wes Campaigne](https://github.com/Westacular) + [#10071](https://github.com/CocoaPods/CocoaPods/issues/10071) + +* Add support for automatically embedding XCFramework debug symbols for XCFrameworks generated with Xcode 12 + [johntmcintosh](https://github.com/johntmcintosh) + [#10111](https://github.com/CocoaPods/CocoaPods/issues/10111) + +## 1.10.0.rc.1 (2020-09-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix XCFramework slice selection + [lowip](https://github.com/lowip) + [#10026](https://github.com/CocoaPods/CocoaPods/issues/10026) + +* Honor test spec deployment target during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9999](https://github.com/CocoaPods/CocoaPods/pull/9999) + +* Ensure that incremental installation is able to set target dependencies for a + test spec that uses a custom `app_host_name` that is in a project that is not + regenerated. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.10.0.beta.2 (2020-08-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure that static frameworks are not embedded + [Bernard Gatt](https://github.com/BernardGatt) + [#9943](https://github.com/CocoaPods/CocoaPods/issues/9943) + +* Ensure that the non-compilable resource skipping in static frameworks happens only for the pod itself + [Igor Makarov](https://github.com/igor-makarov) + [#9922](https://github.com/CocoaPods/CocoaPods/pull/9922) + [#9920](https://github.com/CocoaPods/CocoaPods/issues/9920) + + +## 1.10.0.beta.1 (2020-07-17) + +##### Breaking + +* Bump minimum Ruby version to 2.3.3 (included with macOS High Sierra) + [Eric Amorde](https://github.com/amorde) + [#9821](https://github.com/CocoaPods/CocoaPods/issues/9821) + +##### Enhancements + +* Add the App Clip product symbol to the list of products that need embedding. + [Igor Makarov](https://github.com/igor-makarov) + [#9882](https://github.com/CocoaPods/CocoaPods/pull/9882) + +* Allow gem to run as root when passing argument flag `--allow-root` + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#8929](https://github.com/CocoaPods/CocoaPods/issues/8929) + +* Warn users to delete the master specs repo if its not explicitly used. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9871](https://github.com/CocoaPods/CocoaPods/pull/9871) + +* Use User Project's compatibilityVersion instead of objectVersion when + deciding when to use xcfilelists. + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#9140](https://github.com/CocoaPods/CocoaPods/issues/9140) + +* add a `--configuration` option to `pod lib lint` and `pod spec lint`. + [Gereon Steffens](https://github.com/gereons) + [#9686](https://github.com/CocoaPods/CocoaPods/issues/9686) + +* Add a `post_integrate_hook` API + [lucasmpaim](https://github.com/lucasmpaim) + [#7432](https://github.com/CocoaPods/CocoaPods/issues/7432) + +* Set the `BUILD_LIBRARY_FOR_DISTRIBUTION` build setting if integrating with a target that has the setting set to `YES`. + [Juanjo López](https://github.com/juanjonol) + [#9232](https://github.com/CocoaPods/CocoaPods/issues/9232) + +* Option to lint a specified set of test_specs + [Paul Beusterien](https://github.com/paulb777) + [#9392](https://github.com/CocoaPods/CocoaPods/pull/9392) + +* Add `--use-static-frameworks` lint option + [Paul Beusterien](https://github.com/paulb777) + [#9632](https://github.com/CocoaPods/CocoaPods/pull/9632) + +* Exclude the local spec-repos directory from Time Machine Backups. + [Jakob Krigovsky](https://github.com/sonicdoe) + [#8308](https://github.com/CocoaPods/CocoaPods/issues/8308) + +##### Bug Fixes + +* Override Xcode 12 default for erroring on quoted imports in umbrellas. + [Paul Beusterien](https://github.com/paulb777) + [#9902](https://github.com/CocoaPods/CocoaPods/issues/9902) + +* Remove bitcode symbol maps from embedded framework bundles + [Eric Amorde](https://github.com/amorde) + [#9681](https://github.com/CocoaPods/CocoaPods/issues/9681) + +* Prevent "source changed" message for every version change when using trunk source + [cltnschlosser](https://github.com/cltnschlosser) + [#9865](https://github.com/CocoaPods/CocoaPods/issues/9865) + +* When pod target is a static framework, save time by copying compiled resources + [Igor Makarov](https://github.com/igor-makarov) + [#9441](https://github.com/CocoaPods/CocoaPods/pull/9441) + +* Re-implement `bcsymbolmap` copying to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [mplorentz](https://github.com/mplorentz) + [#9734](https://github.com/CocoaPods/CocoaPods/pull/9734) + +* Fix Xcode 11 warning when setting Bundle Identifier in `info_plist` + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#9536](https://github.com/CocoaPods/CocoaPods/issues/9536) + +* Fix `incompatible encoding regexp match` for linting non-ascii pod name + [banjun](https://github.com/banjun) + [#9765](https://github.com/CocoaPods/CocoaPods/issues/9765) + [#9776](https://github.com/CocoaPods/CocoaPods/pull/9776) + +* Fix crash when targets missing in Podfile + [Paul Beusterien](https://github.com/paulb777) + [#9745](https://github.com/CocoaPods/CocoaPods/pull/9745) + +* Fix adding developer library search paths during pod validation. + [Nick Entin](https://github.com/NickEntin) + [#9736](https://github.com/CocoaPods/CocoaPods/pull/9736) + +* Fix an issue that caused multiple xcframework scripts to produce the same output files + [Eric Amorde](https://github.com/amorde) + [#9670](https://github.com/CocoaPods/CocoaPods/issues/9670) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix an issue preventing framework user targets with an xcframework dependency from building successfully + [Eric Amorde](https://github.com/amorde) + [#9525](https://github.com/CocoaPods/CocoaPods/issues/9525) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix an issue preventing xcframeworks that wrapped static libraries from linking successfully + [Eric Amorde](https://github.com/amorde) + [#9528](https://github.com/CocoaPods/CocoaPods/issues/9528) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix setting `swift_version` when deduplicate targets is turned off. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9689](https://github.com/CocoaPods/CocoaPods/pull/9689) + +* Honor prefix_header_file=false for subspecs + [Paul Beusterien](https://github.com/paulb777) + [#9687](https://github.com/CocoaPods/CocoaPods/pull/9687) + +* Do not clean user projects from sandbox. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9683](https://github.com/CocoaPods/CocoaPods/pull/9683) + +* Fix mapping of resource paths for app specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9676](https://github.com/CocoaPods/CocoaPods/pull/9676) + +* When preserving pod paths, preserve ALL the paths + [Igor Makarov](https://github.com/igor-makarov) + [#9483](https://github.com/CocoaPods/CocoaPods/pull/9483) + +* Re-implement `dSYM` copying and stripping to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9185](https://github.com/CocoaPods/CocoaPods/issues/9185) + +* Add support for running tests through the scheme of the app spec host of a test spec + [Eric Amorde](https://github.com/amorde) + [#9332](https://github.com/CocoaPods/CocoaPods/issues/9332) + +* Fix an issue that prevented variables in test bundle scheme settings from expanding + [Eric Amorde](https://github.com/amorde) + [#9539](https://github.com/CocoaPods/CocoaPods/pull/9539) + +* Fix project path handling issue that caused cmake projects to be incorrect + [Paul Beusterien](https://github.com/paulb777) + [Andrew](https://github.com/mad-rain) + [#6268](https://github.com/CocoaPods/CocoaPods/pull/6268) + +* Set `Missing Localizability` setting to `'YES'` to prevent warnings in Xcode 11 + [Eric Amorde](https://github.com/amorde) + [#9612](https://github.com/CocoaPods/CocoaPods/pull/9612) + +* Don't crash on non UTF-8 error message + [Kenji KATO](https://github.com/katoken-0215) + [#9706](https://github.com/CocoaPods/CocoaPods/pull/9706) + +* Fix XCFramework slice selection when having more archs in slice than requested with $ARCHS + [jerbob92](https://github.com/jerbob92) + [#9790](https://github.com/CocoaPods/CocoaPods/pull/9790) + +* Don't add app spec dependencies to the parent library's target in Xcode, + which was happening when the dependency's project was not being regenerated + due to incremental installation. + [segiddins][https://github.com/segiddins] + +* Add the trunk repo to the default `sources` for the `repo push` command + [Elf Sundae](https://github.com/ElfSundae) + [#9840](https://github.com/CocoaPods/CocoaPods/pull/9840) + +## 1.9.3 (2020-05-29) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.9.2 (2020-05-22) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.9.1 (2020-03-09) + +##### Enhancements + +##### Bug Fixes + +* Apply correct `SYSTEM_FRAMEWORK_SEARCH_PATHS` for `XCTUnwrap` fix. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9579](https://github.com/CocoaPods/CocoaPods/pull/9579) + +* Fix an issue that caused a build failure with vendored XCFrameworks on macOS + [Eric Amorde](https://github.com/amorde) + [#9572](https://github.com/CocoaPods/CocoaPods/issues/9572) + +* Fix an issue that prevented the correct XCFramework slice from being selected for watchOS extensions + [Eric Amorde](https://github.com/amorde) + [#9569](https://github.com/CocoaPods/CocoaPods/issues/9569) + + +## 1.9.0 (2020-02-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Also apply Xcode 11 `XCTUnwrap` fix to library and framework targets that weakly link `XCTest`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9518](https://github.com/CocoaPods/CocoaPods/pull/9518) + +* Fix dSYM handling for XCFrameworks. + [Eric Amorde](https://github.com/amorde) + [#9530](https://github.com/CocoaPods/CocoaPods/issues/9530) + +## 1.9.0.beta.3 (2020-02-04) + +##### Enhancements + +* PathList optimizations related to file system reads. + [manuyavuz](https://github.com/manuyavuz) + [#9428](https://github.com/CocoaPods/CocoaPods/pull/9428) + +##### Bug Fixes + +* Apply Xcode 11 `XCTUnwrap` fix to library and framework targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9500](https://github.com/CocoaPods/CocoaPods/pull/9500) + +* Fix resources script when building a project from a symlink. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9423](https://github.com/CocoaPods/CocoaPods/issues/9423) + +* Fix `pod install` crash on projects with atypical configuration names. + [Paul Beusterien](https://github.com/paulb777) + [#9465](https://github.com/CocoaPods/CocoaPods/pull/9465) + +* Fix an issue that caused iOS archives to be invalid when including a vendored XCFramework + [Eric Amorde](https://github.com/amorde) + [#9458](https://github.com/CocoaPods/CocoaPods/issues/9458) + +* Fix a bug where an incremental install missed library resources. + [Igor Makarov](https://github.com/igor-makarov) + [#9431](https://github.com/CocoaPods/CocoaPods/pull/9431) + +* Fix an issue that caused an incorrect warning to be emitted for CLI targets with static libraries + [Eric Amorde](https://github.com/amorde) + [#9498](https://github.com/CocoaPods/CocoaPods/issues/9498) + +## 1.9.0.beta.2 (2019-12-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix validator to properly integration project during `lint`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9416](https://github.com/CocoaPods/CocoaPods/pull/9416) + +## 1.9.0.beta.1 (2019-12-16) + +##### Enhancements + +* Support for scheme code coverage. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8921](https://github.com/CocoaPods/CocoaPods/issues/8921) + +* Support Swift version variants. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9230](https://github.com/CocoaPods/CocoaPods/pull/9230) + +* Configure dependencies per configuration. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9149](https://github.com/CocoaPods/CocoaPods/pull/9149) + +* Include Podfile Plugin changes for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9147](https://github.com/CocoaPods/CocoaPods/pull/9147) + +* Integrate `use_frameworks!` linkage DSL. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9099](https://github.com/CocoaPods/CocoaPods/issues/9099) + +* Add support for integrating dependency file in user script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9082](https://github.com/CocoaPods/CocoaPods/issues/9082) + +* Add support for XCFrameworks using the `vendored_frameworks` Podspec DSL. + [Eric Amorde](https://github.com/amorde) + [#9148](https://github.com/CocoaPods/CocoaPods/issues/9148) + +##### Bug Fixes + +* Move `run_podfile_post_install_hooks` call to execute right before projects are saved. + [Yusuf Sobh](https://github.com/yusufoos) + [#9379](https://github.com/CocoaPods/CocoaPods/issues/9379) + +* Do not apply header mapping copy if the spec does not provide a header mappings dir. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9308](https://github.com/CocoaPods/CocoaPods/issues/9308) + +* Fix issue where workspace was missing user project references during incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9237](https://github.com/CocoaPods/CocoaPods/issues/9237) + +* Search in users xcconfig's for figuring out when to set `APPLICATION_EXTENSION_API_ONLY`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9233](https://github.com/CocoaPods/CocoaPods/issues/9233) + +* Always generate a lockfile even if project integration is disabled. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9288](https://github.com/CocoaPods/CocoaPods/issues/9288) + +* Fix incremental installation with plugins that include arguments with different ordering. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9170](https://github.com/CocoaPods/CocoaPods/pull/9170) + +* Move custom `Copy Headers` script phase for header mappings before `Compile Sources`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9131](https://github.com/CocoaPods/CocoaPods/pull/9131) + +* Don't create a conflicting `LaunchScreen.storyboard` when an app spec contains a file + with that name in its `resources`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.8.4 (2019-10-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not crash if the repos dir is not setup. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9216](https://github.com/CocoaPods/CocoaPods/issues/9216) + +## 1.8.3 (2019-10-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix crash when running on mounted filesystems + [Paul Beusterien](https://github.com/paulb777) + [#9200](https://github.com/CocoaPods/CocoaPods/pull/9200) + + +## 1.8.1 (2019-09-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.8.0 (2019-09-23) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Include dependent vendored frameworks in linker flags + [Alex Coomans](https://github.com/drcapulet) + [#9045](https://github.com/CocoaPods/CocoaPods/pull/9045) + +* Correctly set deployment target for non library specs even if the root spec does not specify one. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9153](https://github.com/CocoaPods/CocoaPods/pull/9153) + +* Make `APPLICATION_EXTENSION_API_ONLY` build setting not break when performing a cached incremental install. + [Igor Makarov](https://github.com/igor-makarov) + [#8967](https://github.com/CocoaPods/CocoaPods/issues/8967) + [#9141](https://github.com/CocoaPods/CocoaPods/issues/9141) + [#9142](https://github.com/CocoaPods/CocoaPods/pull/9142) + +## 1.8.0.beta.2 (2019-08-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not verify deployment target version during resolution for non library specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9105](https://github.com/CocoaPods/CocoaPods/issues/9105) + +* Add `USE_RECURSIVE_SCRIPT_INPUTS_IN_SCRIPT_PHASES = YES` to all `.xcconfig`s + [Igor Makarov](https://github.com/igor-makarov) + [#8073](https://github.com/CocoaPods/CocoaPods/issues/8073) + [#9125](https://github.com/CocoaPods/CocoaPods/pull/9125) + [cocoapods-integration-specs#248](https://github.com/CocoaPods/cocoapods-integration-specs/pull/248) + +* Fix iOS app spec code signing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9110](https://github.com/CocoaPods/CocoaPods/issues/9110) + +* Add Apple watch device family to resource bundles built for WatchOS + [Aaron McDaniel](https://github.com/Spilly) + [#9075](https://github.com/CocoaPods/CocoaPods/issues/9075) + +## 1.8.0.beta.1 (2019-08-05) + +##### Enhancements + +* Allow Algolia search for CDNSource + [Igor Makarov](https://github.com/igor-makarov) + [#9015](https://github.com/CocoaPods/CocoaPods/issues/9015) + [#9046](https://github.com/CocoaPods/CocoaPods/pull/9046) + [Core#569](https://github.com/CocoaPods/Core/pull/569) + +* Using `repo push` now pushes to the current repo branch (`HEAD`) instead of `master` + [Jhonatan Avalos](https://github.com/baguio) + [#8630](https://github.com/CocoaPods/CocoaPods/pull/8630) + +* Add support for UI test specs with `test_type` value `:ui` + [Yavuz Nuzumlali](https://github.com/manuyavuz) + [#9002](https://github.com/CocoaPods/CocoaPods/pull/9002) + [Core#562](https://github.com/CocoaPods/Core/pull/562) + +* Replace git-based `MasterSource` with CDN-based `TrunkSource` + [Igor Makarov](https://github.com/igor-makarov) + [#8923](https://github.com/CocoaPods/CocoaPods/pull/8923) + [Core#552](https://github.com/CocoaPods/Core/pull/552) + +* Integrate a pod into a custom project name if specified. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) & [Sebastian Shanus](https://github.com/sebastianv1) + [#8939](https://github.com/CocoaPods/CocoaPods/pull/8939) + +* Performance optimization for large number of files related to cleaning sandbox directory during installation + [hovox](https://github.com/hovox) + [#8797](https://github.com/CocoaPods/CocoaPods/issues/8797) + +* Add support for Specification Info.plist DSL + [Eric Amorde](https://github.com/amorde) + [#8753](https://github.com/CocoaPods/CocoaPods/issues/8753) + [#3032](https://github.com/CocoaPods/CocoaPods/issues/3032) + +* Fix target definition display name for inhibit warnings message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8935](https://github.com/CocoaPods/CocoaPods/pull/8935) + +* Allow using an application defined by an app spec as the app host for a test spec. + [jkap](https://github.com/jkap) + [Samuel Giddins](https://github.com/segiddins) + [#8654](https://github.com/CocoaPods/CocoaPods/pull/8654) + +* Speed up dependency resolution when there are many requirements for the same pod + or many versions that do not satisfy the given requirements. + [Samuel Giddins](https://github.com/segiddins) + +* Add support for pods in abstract-only targets to be installed. + [Samuel Giddins](https://github.com/segiddins) + +* Emit a warning when attempting to integrate dynamic frameworks into command line tool targets + [Eric Amorde](https://github.com/amorde) + [#6493](https://github.com/CocoaPods/CocoaPods/issues/6493) + +* Always suggest `pod repo update` on dependency resolution conflict, + unless repo update was specifically requested. + [Artem Sheremet](https://github.com/dotdoom) + [#8768](https://github.com/CocoaPods/CocoaPods/pull/8768) + +* Set Default Module for Storyboards in resource bundle targets. + [James Treanor](https://github.com/jtreanor) + [#8890](https://github.com/CocoaPods/CocoaPods/pull/8890) + +* Print correct platform name when inferring target platform. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8916](https://github.com/CocoaPods/CocoaPods/pull/8916) + +* Do not re-write sandbox files if they have not changed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8983](https://github.com/CocoaPods/CocoaPods/pull/8983) + +* Added option to skip Pods.xcodeproj generation + [Itay Brenner](https://github.com/itaybre) + [8872](https://github.com/CocoaPods/CocoaPods/pull/8872) + +##### Bug Fixes + +* Update symlink script to prevent duplicate files + [Alex Coomans](https://github.com/drcapulet) + [#9035](https://github.com/CocoaPods/CocoaPods/pull/9035) + +* Use correct `header_mappings_dir` for subspecs + [Alex Coomans](https://github.com/drcapulet) + [#9019](https://github.com/CocoaPods/CocoaPods/pull/9019) + +* Make CDNSource show up in `pod repo env` + [Igor Makarov](https://github.com/igor-makarov) + [#9016](https://github.com/CocoaPods/CocoaPods/pull/9016) + +* Fix regenerating aggregate targets for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9009](https://github.com/CocoaPods/CocoaPods/pull/9009) + +* Fix heuristic for determining whether the source URL to be added is CDN + [Igor Makarov](https://github.com/igor-makarov) + [#9000](https://github.com/CocoaPods/CocoaPods/issues/9000) + [#8999](https://github.com/CocoaPods/CocoaPods/issues/8999) + +* Fix set `cache_root` from config file error + [tripleCC](https://github.com/tripleCC) + [#8515](https://github.com/CocoaPods/CocoaPods/issues/8515) + +* Set default build configurations for app / test specs when installing with + `integrate_targets: false`, ensuring the `Embed Frameworks` and + `Copy Resources` scripts will copy the necessary build artifacts. + [Samuel Giddins](https://github.com/segiddins) + +* No longer show a warning when using an optional include (`#include?`) to + include the Pods .xcconfig from the base .xcconfig file + [Rob Hudson](https://github.com/robtimp) + +* Remove stale podspecs from 'Local Podspecs' when installing non-local counterparts. + [Pär Strindevall](https://github.com/parski) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8914](https://github.com/CocoaPods/CocoaPods/pull/8914) + +* Fix inheriting search paths for test targets in `init` command. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8868](https://github.com/CocoaPods/CocoaPods/issues/8868) + +* Allow detecting `SWIFT_VERSION` build settings from user targets when some + xcconfig files are missing. + [Samuel Giddins](https://github.com/segiddins) + +* Only return library itself as a framework path for library specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9029](https://github.com/CocoaPods/CocoaPods/pull/9029) + +* Fix a bug that prevented dependencies in a plugin source from resolving + [Eric Amorde](https://github.com/amorde) + [#8540](https://github.com/CocoaPods/CocoaPods/issues/8540) + +* Store relative project and file paths in the incremental cache. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9041](https://github.com/CocoaPods/CocoaPods/pull/9041) + +* Use correct deployment target for test specs and app specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9040](https://github.com/CocoaPods/CocoaPods/pull/9040) + +* Allow overriding custom xcconfig entries set for compiling a library when + specifying an app or test spec. + [Samuel Giddins](https://github.com/segiddins) + +* Pass a non-browser user agent for social media validation + [Dov Frankel](https://github.com/abbeycode) + [CocoaPods/Core#571](https://github.com/CocoaPods/Core/pull/571) + [#9053](https://github.com/CocoaPods/Cocoapods/pull/9053) + [#9049](https://github.com/CocoaPods/CocoaPods/issues/9049) + +* Do not add CocoaPods script phases to targets that have no paths to embed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9069](https://github.com/CocoaPods/CocoaPods/pull/9069) + + +## 1.7.5 (2019-07-19) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not pass inhibit warnings compiler flags for Swift source files. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9013](https://github.com/CocoaPods/CocoaPods/issues/9013) + + +## 1.7.4 (2019-07-09) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle scheme configuration for specs with unsupported platforms. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8850](https://github.com/CocoaPods/CocoaPods/issues/8850) + + +## 1.7.3 (2019-06-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Honor the Swift version set by a dependency pod during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8940](https://github.com/CocoaPods/CocoaPods/issues/8940) + + +## 1.7.2 (2019-06-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Prevent crash when configuring schemes for subspecs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8880](https://github.com/CocoaPods/CocoaPods/issues/8880) + +* Attempt to use Swift version for dependencies during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8764](https://github.com/CocoaPods/CocoaPods/issues/8764) + + +## 1.7.1 (2019-05-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Stabilize product reference UUIDs to fix Xcode crashing with incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8845](https://github.com/CocoaPods/CocoaPods/pull/8845) + +* Fix a 1.7.0 regression in header directory paths when using static libraries + [Eric Amorde](https://github.com/amorde) + [#8836](https://github.com/CocoaPods/CocoaPods/issues/8836) + + +## 1.7.0 (2019-05-22) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix 1.7.0.rc.2 regression - Resources need to be added for test specs in library builds + [Paul Beusterien](https://github.com/paulb777) + [#8812](https://github.com/CocoaPods/CocoaPods/pull/8812) + +* Configure schemes regardless if they are being shared or not. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8815](https://github.com/CocoaPods/CocoaPods/pull/8815) + +* Update dSYM stripping string matcher for 64-bit only dSYMs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8827](https://github.com/CocoaPods/CocoaPods/issues/8827) + + +## 1.7.0.rc.2 (2019-05-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't add resources to a second test_spec pod target build phase + [Paul Beusterien](https://github.com/paulb777) + [#8173](https://github.com/CocoaPods/CocoaPods/issues/8173) + +* Fix 1.7.0.rc.1 static library regression for pods with `header_dir` attribute + [Paul Beusterien](https://github.com/paulb777) + [#8765](https://github.com/CocoaPods/CocoaPods/issues/8765) + +* Scope app spec dependent targets when no dedup'ing targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8770](https://github.com/CocoaPods/CocoaPods/pull/8770) + +* Fix embedding static frameworks in extensions while using `use_frameworks!` + [Martin Fiebig](https://github.com/mfiebig) + [#8798](https://github.com/CocoaPods/CocoaPods/pull/8798) + + +## 1.7.0.rc.1 (2019-05-02) + +##### Enhancements + +* Replace Pods project `Dependencies` group with `Development Pods` and `Pods` groups. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8659](https://github.com/CocoaPods/CocoaPods/issues/8659) + +* Add an error message for :podspec pods not matching the version in Pods and on disk + [orta](https://github.com/orta) + [#8676](https://github.com/CocoaPods/CocoaPods/issues/8676) + +##### Bug Fixes + +* Allow insecure loads in requires_app_host's Info.plist + [Paul Beusterien](https://github.com/paulb777) + [#8747](https://github.com/CocoaPods/CocoaPods/pull/8747) + +* Fix a regression for static libraries with a custom module name + [Eric Amorde](https://github.com/amorde) + [#8695](https://github.com/CocoaPods/CocoaPods/issues/8695) + +* Fix target cache key SPECS key ordering. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8657](https://github.com/CocoaPods/CocoaPods/issues/8657) + +* Fix regression not compiling xcdatamodeld files in static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#8702](https://github.com/CocoaPods/CocoaPods/issues/8702) + +* Fix: AppIcon not found when executing `pod lib lint` + [Itay Brenner](https://github.com/itaybre) + [#8648](https://github.com/CocoaPods/CocoaPods/issues/8648) + + +## 1.7.0.beta.3 (2019-03-28) + +##### Enhancements + +* Adds support for referring to other podspecs during validation + [Orta Therox](https://github.com/orta) + [#8536](https://github.com/CocoaPods/CocoaPods/pull/8536) + +##### Bug Fixes + +* Deintegrate deleted targets even if `incremental_installation` is turned on. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) & [Doug Mead](https://github.com/dmead28) + [#8638](https://github.com/CocoaPods/CocoaPods/pull/8638) + +* Reduce the probability of multiple project UUID collisions. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8636](https://github.com/CocoaPods/CocoaPods/pull/8636) + +* Resolved an issue that could cause spec repo updates to fail on CI servers. + [rpecka](https://github.com/rpecka) + [#7317](https://github.com/CocoaPods/CocoaPods/issues/7317) + + +## 1.7.0.beta.2 (2019-03-08) + +##### Enhancements + +* Integrate `xcfilelist` input/output paths for script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8585](https://github.com/CocoaPods/CocoaPods/pull/8585) + +##### Bug Fixes + +* Do not warn of `.swift-version` file being deprecated if pod does not use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8593](https://github.com/CocoaPods/CocoaPods/pull/8593) + +* Generate sibling targets for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) & [Igor Makarov](https://github.com/igor-makarov) + [#8577](https://github.com/CocoaPods/CocoaPods/issues/8577) + +* Validator should filter our app specs from subspec analysis. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8592](https://github.com/CocoaPods/CocoaPods/pull/8592) + +* Do not warn that `--swift-version` parameter is deprecated. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8586](https://github.com/CocoaPods/CocoaPods/pull/8586) + +* Include `bcsymbolmap` file output paths into script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8563](https://github.com/CocoaPods/CocoaPods/pull/8563) + +* Copy `bcsymbolmap` files into correct destination to avoid invalid app archives + [florianbuerger](https://github.com/florianbuerger) + [#8558](https://github.com/CocoaPods/CocoaPods/pull/8558) + +* Fix: unset GIT_DIR and GIT_WORK_TREE for git operations + [tripleCC](https://github.com/tripleCC) + [#7958](https://github.com/CocoaPods/CocoaPods/issues/7958) + +* Fix crash when running `pod update` with `--sources` and `--project-directory` + [tripleCC](https://github.com/tripleCC) + [#8565](https://github.com/CocoaPods/CocoaPods/issues/8565) + +* Do not use spaces around variable assignment in generated embed framework script + [florianbuerger](https://github.com/florianbuerger) + [#8548](https://github.com/CocoaPods/CocoaPods/pull/8548) + +* Do not link specs into user targets that are only used by app specs. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.7.0.beta.1 (2019-02-22) + +##### Enhancements + +* Copy `bcsymbolmap` files of a vendored framework. + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + [#8461](https://github.com/CocoaPods/CocoaPods/issues/8461) + +* Set the path of development pod groups to root directory of the Pod + [Eric Amorde](https://github.com/amorde) + [#8445](https://github.com/CocoaPods/CocoaPods/pull/8445) + [#8503](https://github.com/CocoaPods/CocoaPods/pull/8503) + +* Incremental Pod Installation + Enables only regenerating projects for pod targets that have changed since the previous installation. + This feature is gated by the `incremental_installation` option. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8319](https://github.com/CocoaPods/CocoaPods/issues/8319) + +* Podfile: Add a CDNSource automatically if it's not present, just like git source. + Convenience for CDNSource when specified as `source 'https://cdn.jsdelivr.net/cocoa/'`. + If source doesn't exist, it will be created. + [igor-makarov](https://github.com/igor-makarov) + [#8362](https://github.com/CocoaPods/CocoaPods/pull/8362) + +* Scheme configuration support. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7577](https://github.com/CocoaPods/CocoaPods/issues/7577) + +* Add support for `.rb` extension for Podfiles + [Eric Amorde](https://github.com/amorde) + [#8171](https://github.com/CocoaPods/CocoaPods/issues/8171) + +* Add CDN repo Source to allow retrieving specs from a web URL. + [igor-makarov](https://github.com/igor-makarov) + [#8268](https://github.com/CocoaPods/CocoaPods/issues/8268) (partial beta solution) + +* Multi Pod Project Generation Support. + Support for splitting the pods project into a subproject per pod target, gated by the `generate_multiple_pod_projects` installation option. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8253](https://github.com/CocoaPods/CocoaPods/issues/8253) + +* Don't add main for app specs. + [Derek Ostrander](https://github.com/dostrander) + [#8235](https://github.com/CocoaPods/CocoaPods/pull/8235) + +* Multiple Swift versions support + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8191](https://github.com/CocoaPods/CocoaPods/issues/8191) + +* Adds app spec project integration. + [Derek Ostrander](https://github.com/dostrander) + [#8158](https://github.com/CocoaPods/CocoaPods/pull/8158) + +* Add documentation for the Podfile installation options + [Eric Amorde](https://github.com/amorde) + [#8198](https://github.com/CocoaPods/CocoaPods/issues/8198) + [guides.cocoapods.org #142](https://github.com/CocoaPods/guides.cocoapods.org/issues/142) + +##### Bug Fixes + +* Clean up old integrated framework references. + [Dimitris Koutsogiorgas](https://github.com/dnkouts) + [#8296](https://github.com/CocoaPods/CocoaPods/issues/8296) + +* Always update sources specified with the `:source` option when `--repo-update` is specified + [Eric Amorde](https://github.com/amorde) + [#8421](https://github.com/CocoaPods/CocoaPods/issues/8421) + +* Set `showEnvVarsInLog` for script phases only when its disabled. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8400](https://github.com/CocoaPods/CocoaPods/pull/8400) + +* Fix error when execute pod list --update --verbose command + [tripleCC](https://github.com/tripleCC) + [#8404](https://github.com/CocoaPods/CocoaPods/pull/8404) + +* Remove `manifest` attribute from sandbox. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8260](https://github.com/CocoaPods/CocoaPods/pull/8260) + +* Don't have libraries build the app spec. + [Derek Ostrander](https://github.com/dostrander) + [#8244](https://github.com/CocoaPods/CocoaPods/pull/8244) + +* Fix HTTPs -> HTTPS in warning message + [CydeWeys](https://github.com/CydeWeys) + [#8354](https://github.com/CocoaPods/CocoaPods/issues/8354) + +* Add the `FRAMEWORK_SEARCH_PATHS` necessary to import `XCTest` when it is + linked as a weak framework. + [Samuel Giddins](https://github.com/segiddins) + +* Treat `USER_HEADER_SEARCH_PATHS` as a plural build setting. + [Samuel Giddins](https://github.com/segiddins) + [#8451](https://github.com/CocoaPods/CocoaPods/issues/8451) + +* Trying to add a spec repo with a `file://` URL on Ruby 2.6 won't fail with a + a git unknown option error. + [Samuel Giddins](https://github.com/segiddins) + +* Fixed test host delegate methods to not warn about unused arguments. + [Jacek Suliga](https://github.com/jmkk) + [#8521](https://github.com/CocoaPods/CocoaPods/pull/8521) + + +## 1.6.2 (2019-05-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure all embedded pod targets are copied over to the host target. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8608](https://github.com/CocoaPods/CocoaPods/issues/8608) + + +## 1.6.1 (2019-02-21) + +##### Enhancements + +* Add `--analyze` option for the linters. + [Paul Beusterien](https://github.com/paulb777) + [#8792](https://github.com/CocoaPods/CocoaPods/issues/8792) + +##### Bug Fixes + +* Properly link system frameworks and weak frameworks into dynamic framework targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8493](https://github.com/CocoaPods/CocoaPods/issues/8493) + + +## 1.6.0 (2019-02-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.6.0.rc.2 (2019-01-29) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix linking of vendored libraries and frameworks in pod targets + [Wes Campaigne](https://github.com/Westacular) + [#8453](https://github.com/CocoaPods/CocoaPods/issues/8453) + + +## 1.6.0.rc.1 (2019-01-25) + +##### Enhancements + +* Generate Info.plist files for static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#8287](https://github.com/CocoaPods/CocoaPods/issues/8287) + +##### Bug Fixes + +* Do not force 64-bit architectures on Xcode 10 + [Eric Amorde](https://github.com/amorde) + [#8242](https://github.com/CocoaPods/CocoaPods/issues/8242) + +* Fix running test specs that support iOS 8. + [Jeff Kelley](https://github.com/SlaunchaMan) + [#8286](https://github.com/CocoaPods/CocoaPods/pull/8286) + +* Remove linker flags that linked dynamic libraries & frameworks from the build + settings for pod targets. + [Samuel Giddins](https://github.com/segiddins) + [#8314](https://github.com/CocoaPods/CocoaPods/pull/8314) + +## 1.6.0.beta.2 (2018-10-17) + +##### Enhancements + +* Remove contraction from docs to fix rendering on the website. + [stevemoser](https://github.com/stevemoser) + [#8131](https://github.com/CocoaPods/CocoaPods/pull/8131) + +* Provide an installation option to preserve folder structure + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + [#8097](https://github.com/CocoaPods/CocoaPods/pull/8097) + +* Nests test specs host apps inside that Pod's directory for cleaner project + navigators. + [Derek Ostrander](https://github.com/dostrander) + +* mark_ruby_file_ref add indent width and tab width config + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + +* Print an error that will show up in Xcode's issue navigator upon unexpected + failures in the copy resources and embed frameworks script phases. + [Samuel Giddins](https://github.com/segiddins) + +* Validate that all generated `PBXNativeTarget`s contain source files to build, + so specs (including test specs) with no source files won't fail at runtime + due to the lack of a generated executable. + [Samuel Giddins](https://github.com/segiddins) + +* Print better promote message when unable to find a specification. + [Xinyu Zhao](https://github.com/X140Yu) + [#8064](https://github.com/CocoaPods/CocoaPods/issues/8064) + +* Silence warnings in headers for Pods with `inhibit_warnings => true` + [Guillaume Algis](https://github.com/guillaumealgis) + [#6401](https://github.com/CocoaPods/CocoaPods/pull/6401) + +* When resolving a locked dependency, source the spec from the locked + specs repository. + [Samuel Giddins](https://github.com/segiddins) + +* Slightly improve resolution speed for Podfiles that contain multiple targets + with the same dependencies. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Don't generate unencrypted source warnings for localhost. + [Paul Beusterien](https://github.com/paulb777) + [#8156](https://github.com/CocoaPods/CocoaPods/issues/8156) + +* Fix linting when armv7 is included but i386 isn't. + [Paul Beusterien](https://github.com/paulb777) + [#8129](https://github.com/CocoaPods/CocoaPods/issues/8129) + +* Provide an installation option to disable usage of input/output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8073](https://github.com/CocoaPods/CocoaPods/issues/8073) + +* Scope prefix header setting to each test spec. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8046](https://github.com/CocoaPods/CocoaPods/pull/8046) + +* Don't add incomplete subspec subset targets for extensions. + [Paul Beusterien](https://github.com/paulb777) + [#7850](https://github.com/CocoaPods/CocoaPods/issues/7850) + +* Clear out `MACH_O_TYPE` for unit test bundles that use static frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8031](https://github.com/CocoaPods/CocoaPods/issues/8031) + +* Fix `weak_frameworks` missing regression. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7872](https://github.com/CocoaPods/CocoaPods/issues/7872) + +* Fix line spacing for Swift error message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8024](https://github.com/CocoaPods/CocoaPods/pull/8024) + +* Improve validation for test_specs on specific platforms + [icecrystal23](https://github.com/icecrystal23) + [#7009](https://github.com/CocoaPods/CocoaPods/issues/7009) + +* Fix running `pod outdated` with externally-sourced pods. + [Samuel Giddins](https://github.com/segiddins) + [#8025](https://github.com/CocoaPods/CocoaPods/issues/8025) + +* Remove codesign suppression + [Jaehong Kang](https://github.com/sinoru) + [#7606](https://github.com/CocoaPods/CocoaPods/issues/7606) + + +## 1.6.0.beta.1 (2018-08-16) + +##### Enhancements + +* Every test spec will have its own xctest bundle. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [Jenn Kaplan](https://github.com/jkap) + [#7908](https://github.com/CocoaPods/CocoaPods/pull/7908) + +* Generate a separate app host per pod. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8005](https://github.com/CocoaPods/CocoaPods/pull/8005) + +* Add default launch screen storyboard to test app hosts. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7971](https://github.com/CocoaPods/CocoaPods/pull/7971) + +* Always display downloader error message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7625](https://github.com/CocoaPods/CocoaPods/issues/7625) + +* Warn instead of error when linting if `public_header_files` or + `private_header_files` do not match any files. + [Eric Amorde](https://github.com/amorde) + [#7427](https://github.com/CocoaPods/CocoaPods/issues/7427) + +* Add `--platforms` parameter to `pod spec lint` and `pod lib lint` to specify + which platforms to lint. + [Eric Amorde](https://github.com/amorde) + [#7783](https://github.com/CocoaPods/CocoaPods/issues/7783) + +* Warn if the `git://` protocol is used as the source of a pod. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7705](https://github.com/CocoaPods/CocoaPods/issues/7705) + +* Remove all xcode project state from target objects, + improving project generation performance. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7610](https://github.com/CocoaPods/CocoaPods/pull/7610) + +* Improve performance of Pods project generation by skipping native targets + for which dependent targets have already been added. + [Jacek Suliga](https://github.com/jmkk) + +* Refactor build settings generation to perform much better on large projects. + [Samuel Giddins](https://github.com/segiddins) + +* Make sure the temporary directory used to download a pod is removed, + even if an error is raised. + [augustorsouza](https://github.com/augustorsouza) + +* Avoid unlocking sources on every `pod install` when there are no + plugin post-install hooks for performance reasons. + [Samuel Giddins](https://github.com/segiddins) + +* Change shell script relative paths to use `${PODS_ROOT}` instead of + `${SRCROOT}/Pods`. + [Whirlwind](https://github.com/Whirlwind) + [#7878](https://github.com/CocoaPods/CocoaPods/pull/7878) + +* Set the path of the Pods group in the user project. + [Whirlwind](https://github.com/Whirlwind) + [#7886](https://github.com/CocoaPods/CocoaPods/pull/7886) + [#6194](https://github.com/CocoaPods/CocoaPods/issues/6194) + +* Add a `--deployment` flag to `pod install` that errors if there are any + changes to the Podfile or Lockfile. + [Samuel Giddins](https://github.com/segiddins) + +* Add `--use-modular-headers` flag to the `pod spec lint`, `pod lib lint`, + and `pod repo push` commands. + [Eric Amorde](https://github.com/amorde) + [#7683](https://github.com/CocoaPods/CocoaPods/issues/7683) + +##### Bug Fixes + +* Scope embedded pods to their host targets by their configuration. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8011](https://github.com/CocoaPods/CocoaPods/issues/8011) + +* Set the `SWIFT_VERSION` on resource bundle targets that contain compiled + sources and use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7950](https://github.com/CocoaPods/CocoaPods/issues/7950) + +* Do not ignore `--no-overwrite` parameter if a commit message is specified. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7926](https://github.com/CocoaPods/CocoaPods/issues/7926) + +* Generate `-ObjC` in `OTHER_LDFLAGS` for apps with static frameworks. + [Paul Beusterien](https://github.com/paulb777) + [#7946](https://github.com/CocoaPods/CocoaPods/pull/7946) + +* Do not display that a source was changed if it uses different casing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7883](https://github.com/CocoaPods/CocoaPods/pull/7883) + +* Set `CURRENT_PROJECT_VERSION` for generated app host targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7825](https://github.com/CocoaPods/CocoaPods/pull/7825) + +* Properly follow symlinks within macOS universal frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7587](https://github.com/CocoaPods/CocoaPods/issues/7587) + +* Validator adds a Swift file if any of the pod targets use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7738](https://github.com/CocoaPods/CocoaPods/issues/7738) + +* Fix `INFOPLIST_FILE` being overridden when set in a podspec's `pod_target_xcconfig`. + [Eric Amorde](https://github.com/amorde) + [#7530](https://github.com/CocoaPods/CocoaPods/issues/7530) + +* Raise an error if user target `SWIFT_VERSION` is missing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7770](https://github.com/CocoaPods/CocoaPods/issues/7770) + +* Fix the umbrella header import path when `header_dir` is specified in the + podspec and building a static library with modular headers enabled. + [chuganzy](https://github.com/chuganzy) + [#7724](https://github.com/CocoaPods/CocoaPods/pull/7724) + +* Do not symlink headers that belong to test specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7762](https://github.com/CocoaPods/CocoaPods/pull/7762) + +* Do not build pod target if it only contains script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7746](https://github.com/CocoaPods/CocoaPods/issues/7746) + +* Do not try to integrate uncreated test native targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7394](https://github.com/CocoaPods/CocoaPods/issues/7394) + +* Attempt to parse `SWIFT_VERSION` from xcconfig during target inspection. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7731](https://github.com/CocoaPods/CocoaPods/issues/7731) + +* Do not crash when creating build settings for a missing user build configuration. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7698](https://github.com/CocoaPods/CocoaPods/pull/7698) + +* Do not overwrite App host info plist when using multiple test specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7695](https://github.com/CocoaPods/CocoaPods/pull/7695) + +* Do not include test dependencies' input and output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7688](https://github.com/CocoaPods/CocoaPods/pull/7688) + +* Skip test file accessors for `uses_swift?` and `should_build?` methods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7671](https://github.com/CocoaPods/CocoaPods/pull/7671) + +* When integrating a vendored framework while building pods as static + libraries, public headers will be found via `FRAMEWORK_SEARCH_PATHS` + instead of via the sandbox headers store. + [Samuel Giddins](https://github.com/segiddins) + +* Improve performance of grouping pods by configuration. + [Samuel Giddins](https://github.com/segiddins) + +* Stop linking frameworks to static libraries to avoid warnings with the new build system. + [Samuel Giddins](https://github.com/segiddins) + [#7570](https://github.com/CocoaPods/CocoaPods/pull/7570) + +* Allow `EXPANDED_CODE_SIGN_IDENTITY` to be unset. + [Keith Smiley](https://github.com/keith) + [#7708](https://github.com/CocoaPods/CocoaPods/issues/7708) + +* Running `pod install` with static library modules no longer causes pods to + be recompiled. + [Samuel Giddins](https://github.com/segiddins) + +* A pod built as a static library linked into multiple targets will only build + as a module when all of the targets it is linked into have opted into it. + [Samuel Giddins](https://github.com/segiddins) + +* Use `CP_HOME_DIR` as the base for all default directories. + [mcfedr](https://github.com/mcfedr) + [#7917](https://github.com/CocoaPods/CocoaPods/pull/7917) + +* Exclude 32-bit architectures from Pod targets when the deployment target is + iOS 11.0 or higher. + [Eric Amorde](https://github.com/amorde) + [#7148](https://github.com/CocoaPods/CocoaPods/issues/7148) + +* Fail gracefully when the analyzer has dependencies to fetch, but has been + told not to fetch them. + [Samuel Giddins](https://github.com/segiddins) + +* Don't generate framework or resource scripts if they will not be used. + [Eric Amorde](https://github.com/amorde) + +* Fix a crash when loading the `macho` gem in certain environments. + [Eric Amorde](https://github.com/amorde) + [#7867](https://github.com/CocoaPods/CocoaPods/issues/7867) + + +## 1.5.3 (2018-05-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix compatibility with RubyGems 2.7.7. + [Samuel Giddins](https://github.com/segiddins) + [#7765](https://github.com/CocoaPods/CocoaPods/issues/7765) + [#7766](https://github.com/CocoaPods/CocoaPods/issues/7766) + [#7763](https://github.com/CocoaPods/CocoaPods/issues/7763) + + +## 1.5.2 (2018-05-09) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.5.1 (2018-05-07) + +##### Enhancements + +* Improve performance of the dependency resolver by removing duplicates for dependency nodes. + [Jacek Suliga](https://github.com/jmkk) + +##### Bug Fixes + +* Do not include test dependencies input and output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7688](https://github.com/CocoaPods/CocoaPods/pull/7688) + +* Remove [system] declaration attribute from generated module maps. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7589](https://github.com/CocoaPods/CocoaPods/issues/7589) + +* Properly namespace Info.plist names during target installation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7611](https://github.com/CocoaPods/CocoaPods/pull/7611) + +* Always generate FRAMEWORK_SEARCH_PATHS for vendored_frameworks. + [Paul Beusterien](https://github.com/paulb777) + [#7591](https://github.com/CocoaPods/CocoaPods/issues/7591) + +* Fix modular header access to header_dir's. + [Paul Beusterien](https://github.com/paulb777) + [#7597](https://github.com/CocoaPods/CocoaPods/issues/7597) + +* Fix static framework dependent target double linking without `use_frameworks`. + [Paul Beusterien](https://github.com/paulb777) + [#7592](https://github.com/CocoaPods/CocoaPods/issues/7592) + +* Make modular header private header access consistent with frameworks and static libraries. + [Paul Beusterien](https://github.com/paulb777) + [#7596](https://github.com/CocoaPods/CocoaPods/issues/7596) + +* Inhibit warnings for all dependencies during validation except for the one being validated. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7434](https://github.com/CocoaPods/CocoaPods/issues/7434) + +* Prevent duplicated targets from being stripped out from the framework search paths. + [Liquidsoul](https://github.com/liquidsoul) + [#7644](https://github.com/CocoaPods/CocoaPods/pull/7644) + +* Fix `assetcatalog_generated_info.plist` path in copy resources phase. + [Maxime Le Moine](https://github.com/MaximeLM) + [#7590](https://github.com/CocoaPods/CocoaPods/issues/7590) + +## 1.5.0 (2018-04-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Escape double quotes for module map contents + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7549](https://github.com/CocoaPods/CocoaPods/pull/7549) + +* Fix building Swift static library test specs. + [Samuel Giddins](https://github.com/segiddins) + +* Swift static libraries can be used in targets whose search paths are inherited. + [Samuel Giddins](https://github.com/segiddins) + +## 1.5.0.beta.1 (2018-03-23) + +##### Enhancements + +* Add `--exclude-pods` option to `pod update` to allow excluding specific pods from update + [Oleksandr Kruk](https://github.com/0mega) + [#7334](https://github.com/CocoaPods/CocoaPods/issues/7334) + +* Add support for mixed Objective-C and Swift static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7213](https://github.com/CocoaPods/CocoaPods/issues/7213) + +* Improve `pod install` performance for pods with exact file paths rather than glob patterns + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#7473](https://github.com/CocoaPods/CocoaPods/pull/7473) + +* Display a message when a pods source has changed during installation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7464](https://github.com/CocoaPods/CocoaPods/pull/7464) + +* Add support for modular header search paths, include "legacy" support. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7412](https://github.com/CocoaPods/CocoaPods/pull/7412) + +* Set direct and transitive dependency header search paths for pod targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7116](https://github.com/CocoaPods/CocoaPods/pull/7116) + +* Log target names missing host for libraries + [Keith Smiley](https://github.com/keith) + [#7346](https://github.com/CocoaPods/CocoaPods/pull/7346) + +* Add a `--no-overwrite` flag to `pod repo push` to disable overwriting + existing specs that have already been pushed. + [Samuel Giddins](https://github.com/segiddins) + +* Store which specs repo a pod comes from in the lockfile. + [Samuel Giddins](https://github.com/segiddins) + +* Add `set -u` to the copy frameworks and copy resources scripts. + [Keith Smiley](https://github.com/keith) + [#7180](https://github.com/CocoaPods/CocoaPods/pull/7180) + +* Allow integrating into static library targets without attempting to copy + resources or embed frameworks unless `UNLOCALIZED_RESOURCES_FOLDER_PATH` + or `FRAMEWORKS_FOLDER_PATH` is set. + [Samuel Giddins](https://github.com/segiddins) + +* Change color scheme of `pod outdated` from red-yellow-green to red-blue-green to be more colorblind friendly + [iv-mexx](https://github.com/iv-mexx) + [#7372](https://github.com/CocoaPods/CocoaPods/issues/7372) + +* Add support for integrating swift pods as static libraries. + [Danielle Tomlinson](https://github.com/dantoml) + [Samuel Giddins](https://github.com/segiddins) + [#6899](https://github.com/CocoaPods/CocoaPods/issues/6899) + +* Document format of POD_NAMES in pod update + [mrh-is](https://github.com/mrh-is) + +* Update validator to stream output as xcodebuild runs + [abbeycode](https://github.com/abbeycode) + [#7040](https://github.com/CocoaPods/CocoaPods/issues/7040) + +##### Bug Fixes + +* Create a generic Info.plist file for test targets + Use xcode default `PRODUCT_MODULE_NAME` for generated test targets + [Paul Zabelin](https://github.com/paulz) + [#7506](https://github.com/CocoaPods/CocoaPods/issues/7506) + +* Prevent `xcassets` compilation from stomping over the apps `xcassets` + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7003](https://github.com/CocoaPods/CocoaPods/issues/7003) + +* Fix script phase output path for `.xcasset` resources + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7511](https://github.com/CocoaPods/CocoaPods/issues/7511) + +* Fix `PRODUCT_MODULE_NAME` for generated test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7507](https://github.com/CocoaPods/CocoaPods/issues/7507) + +* Ensure `SWIFT_VERSION` is set for test only pod targets during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7498](https://github.com/CocoaPods/CocoaPods/issues/7498) + +* Fix iOS test native target signing settings + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7504](https://github.com/CocoaPods/CocoaPods/pull/7504) + +* Clear input/output paths if they exceed an arbitrary limit + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7362](https://github.com/CocoaPods/CocoaPods/issues/7362) + +* Warn instead of throwing an exception when a development pod specifies an invalid license file path + [Eric Amorde](https://github.com/amorde) + [#7377](https://github.com/CocoaPods/CocoaPods/issues/7377) + +* Better static frameworks transitive dependency error checking + [Paul Beusterien](https://github.com/paulb777) + [#7352](https://github.com/CocoaPods/CocoaPods/issues/7352) + +* Always update input/output paths even if they are empty + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7368](https://github.com/CocoaPods/CocoaPods/pull/7368) + +* Unique all available pre-release versions when displaying + [Samuel Giddins](https://github.com/segiddins) + [#7353](https://github.com/CocoaPods/CocoaPods/pull/7353) + +* Do not attempt compilation for pods with no sources and skipping import validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7336](https://github.com/CocoaPods/CocoaPods/issues/7336) + +* Avoid adding copy resources and frameworks script phases when those phases + would not copy anything. + [Keith Smiley](https://github.com/keith) + [Samuel Giddins](https://github.com/segiddins) + +* Speed up `pod install` times by up to 50% for very large project. + [Samuel Giddins](https://github.com/segiddins) + +* Avoid dependency resolution conflicts when a pod depends upon a local pod. + [Samuel Giddins](https://github.com/segiddins) + +* Fix legacy header search paths that broke due to #7116 and #7412. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7445](https://github.com/CocoaPods/CocoaPods/pull/7445) + +* Stop adding header search paths that do not contain any headers. + [Samuel Giddins](https://github.com/segiddins) + +* Do not warn when http source uses `file:///` URI scheme + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7460](https://github.com/CocoaPods/CocoaPods/issues/7460) + +* Remove bogus `PROVISIONING_PROFILE_SPECIFIER` value from Pods project. + [Ruenzuo](https://github.com/Ruenzuo) + [#6964](https://github.com/CocoaPods/CocoaPods/issues/6964) + +* Fix returning absolute paths from glob, fixes issue with static framework and public headers. + [Morgan McKenzie](https://github.com/rmtmckenzie) + [#7463](https://github.com/CocoaPods/CocoaPods/issues/7463) + +* Improve messages when integrating Swift pods as static libraries. + [Marcelo Fabri](https://github.com/marcelofabri) + [#7495](https://github.com/CocoaPods/CocoaPods/issues/7495) + +## 1.4.0 (2018-01-18) + +##### Enhancements + +* Show warning when Pod source uses unencrypted HTTP + [KrauseFx](https://github.com/KrauseFx) + [#7293](https://github.com/CocoaPods/CocoaPods/issues/7293) + +##### Bug Fixes + +* Do not include test spec resources and framework paths of dependent targets into test scripts + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7318](https://github.com/CocoaPods/CocoaPods/pull/7318) + +* Restore `development_pod_targets` public method in installer + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7292](https://github.com/CocoaPods/CocoaPods/pull/7292) + +* Fix resolution when multiple sources provide the same pods, and there are + (potential) dependencies between the sources. + [Samuel Giddins](https://github.com/segiddins) + [#7031](https://github.com/CocoaPods/CocoaPods/issues/7031) + +* Ensure that externally-sourced (e.g. local & git) pods are allowed to resolve + to prerelease versions. + [segiddins](https://github.com/segiddins) + +## 1.4.0.rc.1 (2017-12-16) + +##### Enhancements + +* Integrate `swift_version` DSL support into pod targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7134](https://github.com/CocoaPods/CocoaPods/issues/7134) + +* Add color indication to output of `pod outdated` + [iv-mexx](https://github.com/iv-mexx) + [#7204](https://github.com/CocoaPods/CocoaPods/pull/7204) + +* Set syntax of podspecs from development pods to Ruby when appropriate + [Eric Amorde](https://github.com/amorde) + [#7278](https://github.com/CocoaPods/CocoaPods/pull/7278) + +* Add support for editing the podspec, license, README, license, and docs of local development pods + [Eric Amorde](https://github.com/amorde) + [#7093](https://github.com/CocoaPods/CocoaPods/pull/7093) + +* Show warning when SDK provider tries to push a version with an unencrypted HTTP source + [KrauseFx](https://github.com/KrauseFx) + [#7250](https://github.com/CocoaPods/CocoaPods/pull/7250) + +##### Bug Fixes + +* Deduplicate output path file names for resources and frameworks + [Eric Amorde](https://github.com/amorde) + [#7259](https://github.com/CocoaPods/CocoaPods/issues/7259) + +* Allow installation of a pod with its own Swift version on multiple targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7261](https://github.com/CocoaPods/CocoaPods/pull/7261) + +* Quote framework names in OTHER_LDFLAGS + [Tyler Stromberg](https://github.com/AquaGeek) + [#7185](https://github.com/CocoaPods/CocoaPods/issues/7185) + +* Fix static framework archive regression from #7187 + [Paul Beusterien](https://github.com/paulb777) + [#7225](https://github.com/CocoaPods/CocoaPods/issues/7225) + +* Install resource bundles and embed frameworks for every test target's configuration + [Nickolay Tarbayev](https://github.com/tarbayev) + [#7012](https://github.com/CocoaPods/CocoaPods/issues/7012) + +* Set `SWIFT_VERSION` to test native targets during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7216](https://github.com/CocoaPods/CocoaPods/pull/7216) + +* Add copied resources' paths to "Copy Pods Resources" output file list + [igor-makarov](https://github.com/igor-makarov) + [#6936](https://github.com/CocoaPods/CocoaPods/issues/6936) + +* Do not link system frameworks of test specs to library targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7205](https://github.com/CocoaPods/CocoaPods/pull/7205) + +* Be more lenient when stripping frameworks and dSYMs for non fat binaries + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7196](https://github.com/CocoaPods/CocoaPods/issues/7196) + [#5854](https://github.com/CocoaPods/CocoaPods/issues/5854) + +* Do not display script phases warnings multiple times per platform + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7193](https://github.com/CocoaPods/CocoaPods/pull/7193) + +* Fix unnecessary whole project recompilation with static frameworks + [Vladimir Gorbenko](https://github.com/volodg) + [#7187](https://github.com/CocoaPods/CocoaPods/issues/7187) + +* Prevent passing empty string to git when running `pod repo update --silent` + [Jon Sorrells](https://github.com/jonsorrells) + [#7176](https://github.com/CocoaPods/CocoaPods/issues/7176) + +* Do not propagate test spec frameworks and libraries into pod target xcconfig + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7172](https://github.com/CocoaPods/CocoaPods/issues/7172) + +* Set language to Swift for test native targets if any dependencies use Swift + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7170](https://github.com/CocoaPods/CocoaPods/issues/7170) + +* Prevent multiple script phases from stripping vendored dSYM + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7166](https://github.com/CocoaPods/CocoaPods/pull/7166) + +* Static library headers should all be `Project` in Xcode header build phase + [Paul Beusterien](https://github.com/paulb777) + [#4496](https://github.com/CocoaPods/CocoaPods/issues/4496) + +* Fix archiving apps with static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7158](https://github.com/CocoaPods/CocoaPods/issues/7158) + +## 1.4.0.beta.2 (2017-10-24) + +##### Enhancements + +* Integrate execution position for shell script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7101](https://github.com/CocoaPods/CocoaPods/pull/7101) + +* Add support to integrate script phases from podspecs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7092](https://github.com/CocoaPods/CocoaPods/pull/7092) + +* Add support for preventing pch file generation with the skip_pch podspec attribute + [Paul Beusterien](https://github.com/paulb777) + [#7044](https://github.com/CocoaPods/CocoaPods/pull/7044) + +* Add app host support for test specs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6953](https://github.com/CocoaPods/CocoaPods/issues/6953) + +* Add support for resources in source static library frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7100](https://github.com/CocoaPods/CocoaPods/pull/7100) + +##### Bug Fixes + +* Copy .swiftmodule into static_frameworks to enable access to Swift static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7140](https://github.com/CocoaPods/CocoaPods/issues/7140) + +* Fix docs for prefix header paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7149](https://github.com/CocoaPods/CocoaPods/pull/7149) + +* Fix integration `prefix_header_file` with test specs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7147](https://github.com/CocoaPods/CocoaPods/pull/7147) + +* Set the default Swift version to 3.2 during validation + [Victor Hugo Barros](https://github.com/heyzooi) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7136](https://github.com/CocoaPods/CocoaPods/pull/7136) + +* Better warning message for which Swift version was used during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7121](https://github.com/CocoaPods/CocoaPods/issues/7121) + +* Fix static_framework Swift pod dependencies and implement pod access to dependent vendored_framework modules + [Paul Beusterien](https://github.com/paulb777) + [#7117](https://github.com/CocoaPods/CocoaPods/issues/7117) + +* Strip vendored dSYMs during embed script phase + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7111](https://github.com/CocoaPods/CocoaPods/issues/7111) + +* Warn when a pod that was added or changed includes script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7110](https://github.com/CocoaPods/CocoaPods/pull/7110) + +* Build pod targets with script phases and integrate them properly + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7104](https://github.com/CocoaPods/CocoaPods/pull/7104) + +* Do not set a `CODE_SIGN_IDENTITY` for macOS app hosts or xctest bundles + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7103](https://github.com/CocoaPods/CocoaPods/pull/7103) + +* Fix framework and resources paths caching + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7068](https://github.com/CocoaPods/CocoaPods/pull/7068) + +* Build subspecs in static frameworks without error + [Paul Beusterien](https://github.com/paulb777) + [#7058](https://github.com/CocoaPods/CocoaPods/pull/7058) + +* Ensure `SYMROOT` is properly set for all user configurations + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7081](https://github.com/CocoaPods/CocoaPods/issues/7081) + +## 1.4.0.beta.1 (2017-09-24) + +##### Enhancements + +* Do not force include the master spec repo if plugins provide sources + [Eric Amorde](https://github.com/amorde) + [#7033](https://github.com/CocoaPods/CocoaPods/pull/7033) + +* Add custom shell script integration from Podfile + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6820](https://github.com/CocoaPods/CocoaPods/pull/6820) + +* Show full requirement trees when a version conflict is encountered during + dependency resolution. + [Samuel Giddins](https://github.com/segiddins) + +* Add support for source static library frameworks + [Paul Beusterien](https://github.com/paulb777) + [#6811](https://github.com/CocoaPods/CocoaPods/pull/6811) + +* Add Private Header support to static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#6969](https://github.com/CocoaPods/CocoaPods/pull/6969) + +* For source static frameworks, include frameworks from dependent targets and libraries in OTHER_LDFLAGS + [Paul Beusterien](https://github.com/paulb777) + [#6988](https://github.com/CocoaPods/CocoaPods/pull/6988) + +##### Bug Fixes + +* Deduplicate test specs correctly from pod variants and targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7036](https://github.com/CocoaPods/CocoaPods/pull/7036) + +* Do not merge `pod_target_xcconfig` from test specs into non test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7037](https://github.com/CocoaPods/CocoaPods/pull/7037) + +* Wrap `$PODS_CONFIGURATION_BUILD_DIR` and `$PODS_BUILD_DIR` with curlies + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7048](https://github.com/CocoaPods/CocoaPods/pull/7048) + +* Fix common paths sometimes calculating incorrectly + [amorde](https://github.com/amorde) + [#7028](https://github.com/CocoaPods/CocoaPods/pull/7028) + +* Do not code sign OSX targets for testing bundles + [Justin Martin](https://github.com/justinseanmartin) + [#7027](https://github.com/CocoaPods/CocoaPods/pull/7027) + +* Ensure a unique ID is generated for each resource bundle + [Justin Martin](https://github.com/justinseanmartin) + [#7015](https://github.com/CocoaPods/CocoaPods/pull/7015) + +* Do not include settings from file accessors of test specs into aggregate xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7019](https://github.com/CocoaPods/CocoaPods/pull/7019) + +* Use the resolver to identify which pod targets are test only + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [Justin Martin](https://github.com/justinseanmartin) + [#7014](https://github.com/CocoaPods/CocoaPods/pull/7014) + +* Perform code signing on xctest bundles in the Pods project generated by a test spec + [Justin Martin](https://github.com/justinseanmartin) + [#7013](https://github.com/CocoaPods/CocoaPods/pull/7013) + +* Exclude test resource and framework paths from aggregate targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7000](https://github.com/CocoaPods/CocoaPods/pull/7000) + +* Wrap platform warning message with quotes + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6968](https://github.com/CocoaPods/CocoaPods/pull/6968) + +* Wire dependencies for pod targets not part of any aggregate target + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6948](https://github.com/CocoaPods/CocoaPods/pull/6948) + +* Fix validation warnings when using --swift-version + [Danielle Tomlinson](https://github.com/dantoml) + [#6971](https://github.com/CocoaPods/CocoaPods/pull/6971) + +* Fix xcconfig boolean merging when substrings include yes or no + [Paul Beusterien](https://github.com/paulb777) + [#6997](https://github.com/CocoaPods/CocoaPods/pull/6997) + +* Filter out subset dependent targets from FRAMEWORK_SEARCH_PATHS + [Paul Beusterien](https://github.com/paulb777) + [#7002](https://github.com/CocoaPods/CocoaPods/pull/7002) + +* Propagate HEADER_SEARCH_PATHS settings from search paths + [Paul Beusterien](https://github.com/paulb777) + [#7006](https://github.com/CocoaPods/CocoaPods/pull/7006) + +## 1.3.1 (2017-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not use `--delete` when copying resources to app target folder + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6927](https://github.com/CocoaPods/CocoaPods/issues/6927) + +## 1.3.0 (2017-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure transitive dependencies are linked to test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6917](https://github.com/CocoaPods/CocoaPods/pull/6917) + +* Properly install pod targets with test specs within subspecs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6915](https://github.com/CocoaPods/CocoaPods/pull/6915) + +* Add `--skip-tests` support `push` to push command + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6893](https://github.com/CocoaPods/CocoaPods/pull/6893) + +## 1.3.0.rc.1 (2017-07-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Cache result of resource and framework paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6893](https://github.com/CocoaPods/CocoaPods/pull/6893) + +* Ensure source urls are set when spec has subspecs with dependencies + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6888](https://github.com/CocoaPods/CocoaPods/pull/6888) + +## 1.3.0.beta.3 (2017-07-19) + +##### Enhancements + +* Protect rsync tmp files from being deleted if two targets sync at the same time + [Justin Martin](https://github.com/justinseanmartin) + [#6873](https://github.com/CocoaPods/CocoaPods/pull/6873) + +* Include test schemes within library schemes + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6765](https://github.com/CocoaPods/CocoaPods/issues/6765) + +* Truncate extra groups in Development Pods when they are parents of all files + [Eric Amorde](https://github.com/amorde) + [#6814](https://github.com/CocoaPods/CocoaPods/pull/6814) + +* Do not re-write generated files that have not changed + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [dingjingpisces2015](https://github.com/dingjingpisces2015) + [#6825](https://github.com/CocoaPods/CocoaPods/pull/6825) + +##### Bug Fixes + +* Set the test xcconfig file to resource bundles used only by tests + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6886](https://github.com/CocoaPods/CocoaPods/pull/6886) + +* Integrate test targets to embed frameworks and resources + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6828](https://github.com/CocoaPods/CocoaPods/pull/6828) + +* Ensure resource bundle and test dependencies are set for test native targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6829](https://github.com/CocoaPods/CocoaPods/pull/6829) + +* Provide a better error message when references are missing for non-source files + [David Airapetyan](https://github.com/davidair) + [#4887](https://github.com/CocoaPods/CocoaPods/issues/4887) + +* Select unique module_name(s) across host target's and embedded targets' pod targets + [Anand Biligiri](https://github.com/abiligiri) + [#6711](https://github.com/CocoaPods/CocoaPods/issues/6711) + +## 1.3.0.beta.2 (2017-06-22) + +##### Enhancements +* Add inputs and outputs for resources script phase + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6806](https://github.com/CocoaPods/CocoaPods/pull/6806) + +* Simplify logic around framework input and output paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6803](https://github.com/CocoaPods/CocoaPods/pull/6803) + +* Add inputs and outputs to check manifest lock and embed framework script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6797](https://github.com/CocoaPods/CocoaPods/issues/6797) + +##### Bug Fixes + +* Remove 0.34 migration for a small boost in `pod install` time + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6783](hhttps://github.com/CocoaPods/CocoaPods/pull/6783) + +* Use a cache when figuring out if a pod target is test only + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6787](https://github.com/CocoaPods/CocoaPods/pull/6787) + +## 1.3.0.beta.1 (2017-06-06) + +##### Enhancements + +* Add validator support to run test specs during lint + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6753](https://github.com/CocoaPods/CocoaPods/pull/6753) + +* Fix to include proper runtime search paths for test native targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6727](https://github.com/CocoaPods/CocoaPods/pull/6727) + +* Aggregate targets should not include pod targets only used by tests + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6726](https://github.com/CocoaPods/CocoaPods/pull/6726) + +* Add support for test target creation in the pods project generator + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6703](https://github.com/CocoaPods/CocoaPods/pull/6703) + +* Copy dSYM for vendored frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#1698](https://github.com/CocoaPods/CocoaPods/issues/1698) + +* Prevents need for .swift-version file in Objective-C pods + [Austin Emmons](https://github.com/atreat) + [#6742](https://github.com/CocoaPods/CocoaPods/issues/6742) + +* Add a ipc command `podfile_json` converts a Podfile to JSON + [Dacaiguoguo](https://github.com/dacaiguoguogmail) + [#6779](https://github.com/CocoaPods/CocoaPods/pull/6779) + +##### Bug Fixes + +* Link `swiftSwiftOnoneSupport` for test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6769](https://github.com/CocoaPods/CocoaPods/pull/6769) + +* Do not double add search paths to test xcconfig from parent + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6768](https://github.com/CocoaPods/CocoaPods/pull/6768) + +* Ensure product name for tests is not overridden by custom build settings + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6766](https://github.com/CocoaPods/CocoaPods/pull/6766) + +* Do not use the same product name for test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6762](https://github.com/CocoaPods/CocoaPods/pull/6762) + +* Use unique temp folder during lint for parallel execution + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5117](https://github.com/CocoaPods/CocoaPods/issues/5117) + +* Stop adding `$(inherited)` for every static library linked + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6756](https://github.com/CocoaPods/CocoaPods/pull/6756) + +* Settings for dependent targets should include the parent target for test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6755](https://github.com/CocoaPods/CocoaPods/pull/6755) + +* Only check for valid Swift version for pod targets that use Swift + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6733](https://github.com/CocoaPods/CocoaPods/pull/6733) + +* Fix pod install error from 1.2.1 when working with static lib-only projects. + [Ben Asher](https://github.com/benasher44) + [#6673](https://github.com/CocoaPods/CocoaPods/issues/6673) + +* Use `git!` when executing `push` command in order to raise informative and set exit code. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6700](https://github.com/CocoaPods/CocoaPods/pull/6700) + +* Make copy resources echoes always return true to work around issue where Xcode stops handling build script output greater than \~440 characters (rdar://30607704). + [postmechanical](https://github.com/postmechanical) + [#6595](https://github.com/CocoaPods/CocoaPods/issues/6595) + +* Inherit pod defined values for `SWIFT_ACTIVE_COMPILATION_CONDITIONS`. + [Louis D'hauwe](https://github.com/louisdh) + [#6629](https://github.com/CocoaPods/CocoaPods/pull/6629) + +* Delete extraneous files in rsync destination. + [jgavris](https://github.com/jgavris) + [#6694](https://github.com/CocoaPods/CocoaPods/pull/6694) + +## 1.2.1 (2017-04-11) + +##### Enhancements + +* None. + +##### Bug Fixes + +* No master specs cloning when not needed for `pod lib lint`. + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6154](https://github.com/CocoaPods/CocoaPods/issues/6154) + + +## 1.2.1.rc.1 (2017-04-05) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix generating `LD_RUNPATH_SEARCH_PATHS` without `use_frameworks!` but consuming a vendored dynamic artifact. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6596](https://github.com/CocoaPods/CocoaPods/issues/6596) + +* Fix building with static lib subprojects (previously only supported framework subprojects). + [Ben Asher](https://github.com/benasher44) + [#5830](https://github.com/CocoaPods/CocoaPods/issues/5830) + [#6306](https://github.com/CocoaPods/CocoaPods/issues/6306) + +* Fix regression from #6457 to ensure a correct error message is given when a spec is not found. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6457](https://github.com/CocoaPods/CocoaPods/issues/6457) + +* Provide a better error message if a podspec is found but cannot be parsed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6457](https://github.com/CocoaPods/CocoaPods/issues/6457) + +* Only share pod target xcscheme if present during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6558](https://github.com/CocoaPods/CocoaPods/pull/6558) + +* Properly compile storyboard for watch device family. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6516](https://github.com/CocoaPods/CocoaPods/issues/6516) + +* Support git progress for `pod repo update` and `pod install --repo-update` + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6525](https://github.com/CocoaPods/CocoaPods/issues/6525) + +* Return new exit code (31) when spec not found + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6033](https://github.com/CocoaPods/CocoaPods/issues/6033) + +* Provide better error message when spec not found + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6033](https://github.com/CocoaPods/CocoaPods/issues/6033) + + +## 1.2.1.beta.1 (2017-03-08) + +##### Enhancements + +* Use red text when pod installation fails + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6534](https://github.com/CocoaPods/CocoaPods/issues/6534) + +* Provide installation option to disable multiple pod sources warnings. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6497](https://github.com/CocoaPods/CocoaPods/pull/6497) + +* Use the colored2 gem instead of colored. + [Orta Therox](https://github.com/orta) + [xcodeproj#463](https://github.com/CocoaPods/Xcodeproj/pull/463) + +* Cache results of dynamic_binary? + [Ken Wigginton](https://github.com/hailstorm350) + [#6434](https://github.com/CocoaPods/CocoaPods/pull/6434) + +* Created `NOMENCLATURE.md` to keep a glossary of the most common terms used in cocoapods. + [Rob Contreras](https://github.com/robcontreras) + [#2379](https://github.com/CocoaPods/CocoaPods/pull/2379) + +##### Bug Fixes + +* Ensure Core Data models get added to the compile sources phase for header generation. + [Ben Asher](https://github.com/benasher44) + [#6259](https://github.com/CocoaPods/CocoaPods/issues/6259) + +* Do not crash when attempting to install pod with no supported targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6465](https://github.com/CocoaPods/CocoaPods/issues/6465) + +* Correctly handle `OTHER_LDFLAGS` for targets with inherit search paths and source pods. + [Justin Martin](https://github.com/justinseanmartin) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6481](https://github.com/CocoaPods/CocoaPods/pull/6481) + +* Uses `${PODS_PODFILE_DIR_PATH}` for generated manifest lock script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5499](https://github.com/CocoaPods/CocoaPods/issues/5499) + +* Do not generate `UIRequiredDeviceCapabilities` for `tvOS` Info.plists. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6193](https://github.com/CocoaPods/CocoaPods/issues/6193) + +* Fix integration with vendored static frameworks and libraries. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6477](https://github.com/CocoaPods/CocoaPods/pull/6477) + +* Use `${SRCROOT}` rather than `${PODS_ROOT}` in the generated manifest lock script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5499](https://github.com/CocoaPods/CocoaPods/issues/5499) + +* Fix build phase resource references to point at PBXVariantGroups where relevant. + [Wes Campaigne](https://github.com/Westacular) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6373](https://github.com/CocoaPods/CocoaPods/issues/6373) + +* Correctly set runtime search paths for OSX unit test bundles when using frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6435](https://github.com/CocoaPods/CocoaPods/pull/6435) + +* Add `--skip-import-validation` to skip linking a pod during lint. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5670](https://github.com/CocoaPods/CocoaPods/issues/5670) + +* Updated the colored2 gem (previous version removed from rubygems.org). + [Ben Asher](https://github.com/benasher44) + [#6533](https://github.com/CocoaPods/CocoaPods/pull/6533) + +## 1.2.0 (2017-01-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not link static frameworks to targets that use `inherit! search_paths`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6065](https://github.com/CocoaPods/CocoaPods/issues/6065) + + +## 1.2.0.rc.1 (2017-01-13) + +##### Enhancements + +* Show git progress when downloading the CocoaPods Specs repo. + [Danielle Tomlinson](https://github.com/dantoml) + [#5937](https://github.com/CocoaPods/CocoaPods/issues/5937) + +* Move Installer target verification into the Xcode namespace + [Danielle Tomlinson](https://github.com/DanToml) + [#5607](https://github.com/CocoaPods/CocoaPods/pull/5607) + +##### Bug Fixes + +* None. + + +## 1.2.0.beta.3 (2016-12-28) + +##### Enhancements + +* `pod repo push` now accepts the `--swift-version` argument. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6217](https://github.com/CocoaPods/CocoaPods/issues/6217) + +* Output Swift targets when multiple versions of Swift are detected. + [Justin Martin](https://github.com/justinseanmartin) & [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6191](https://github.com/CocoaPods/CocoaPods/issues/6191) + +* [update] adding --sources to specify to only update pods from a repo + [Mark Schall](https://github.com/maschall) + [#5809](https://github.com/CocoaPods/CocoaPods/pull/5809) + +* Add aggregated search paths targets to vendored build settings + [Chris Ortman](https://github.com/chrisortman) + [Johannes Plunien](https://github.com/plu) + [#5512](https://github.com/CocoaPods/CocoaPods/issues/5512) + +* Use fetch and reset rather than a pull when updating specs repos. + [Danielle Tomlinson](https://github.com/dantoml) + [#6206](https://github.com/CocoaPods/CocoaPods/pull/6206) + +##### Bug Fixes + +* Fix default LD_RUNPATH_SEARCH_PATHS for host targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6006](https://github.com/CocoaPods/CocoaPods/issues/6006) + +* Fix codesigning issues when targets have spaces. + [Sam Gammon](https://github.com/sgammon) + [#6153](https://github.com/CocoaPods/CocoaPods/issues/6153) + +* Raise an exception if unable to find a reference for a path and handle symlink references. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5427](https://github.com/CocoaPods/CocoaPods/issues/5427) + +* Re-escaped backslashes in embed_frameworks generator + [Harlan Haskins](https://github.com/harlanhaskins) + [#6121](https://github.com/CocoaPods/CocoaPods/issues/6121) + +* Escape spaces in CONFIGURATION_BUILD_DIR when creating header folders symlink + [Dmitry Obukhov](https://github.com/stel) + [#6146](https://github.com/CocoaPods/CocoaPods/pull/6146) + +* Fail gracefully when downloading a podspec in `pod spec lint` fails. + [Samuel Giddins](https://github.com/segiddins) + +* Remove the `const_missing` hack for `Pod::SourcesManager`. + [Samuel Giddins](https://github.com/segiddins) + +* Fixed code signing issue causing lint failure on macOS. + [Paul Cantrell](https://github.com/pcantrell) + [#5645](https://github.com/CocoaPods/CocoaPods/issues/5645) + +* Raise an exception when using a git version prior to 1.8.5. + [Danielle Tomlinson](https://github.com/dantoml) + [#6078](https://github.com/CocoaPods/CocoaPods/issues/6078) + +* Fix framework support for frameworks in sub-projects. + [Ben Asher](https://github.com/benasher44) + [#6123](https://github.com/CocoaPods/CocoaPods/issues/6123) + +* Remove errors that prevent host/extension target mismatches, which Xcode will warn about. + [Ben Asher](https://github.com/benasher44) + [#6173](https://github.com/CocoaPods/CocoaPods/issues/6173) + + +## 1.2.0.beta.1 (2016-10-28) + +##### Enhancements + +* Generate `PODS_TARGET_SRCROOT` build setting for each pod target. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5375](https://github.com/CocoaPods/CocoaPods/issues/5375) + +* Add support for running CocoaPods on Linux. + [Samuel Giddins](https://github.com/segiddins) + +* Use native Ruby ASCII plist parsing and serialization, removing dependencies + on FFI, Xcode, and macOS. + [Samuel Giddins](https://github.com/segiddins) + +* Run codesigning in parallel in the embed frameworks build phase when + `COCOAPODS_PARALLEL_CODE_SIGN` is set to `true`. + [Ben Asher](https://github.com/benasher44) + [#6088](https://github.com/CocoaPods/CocoaPods/pull/6088) + +##### Bug Fixes + +* Add target-device tvOS in copy_resources generator. + [Konrad Feiler](https://github.com/Bersaelor) + [#6052](https://github.com/CocoaPods/CocoaPods/issues/6052) + +* Read the correct `SWIFT_VERSION` when generating target XCConfigs. + [Ben Asher](https://github.com/benasher44) + [#6067](https://github.com/CocoaPods/CocoaPods/issues/6067) + +* Don't explicitly set `ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES` to NO. + [Ben Asher](https://github.com/benasher44) + [#6064](https://github.com/CocoaPods/CocoaPods/issues/6064) + +* Redefine FOUNDATION_EXPORT for C-only pods in umbrella header. + [Chris Ballinger](https://github.com/chrisballinger) + [#6024](https://github.com/CocoaPods/CocoaPods/issues/6024) + + +## 1.1.1 (2016-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Strip newlines from .swift-version files. + [Danielle Tomlinson](https://github.com/dantoml) + [#6059](https://github.com/CocoaPods/CocoaPods/pull/6059) + + +## 1.1.0 (2016-10-19) + +##### Enhancements + +* Use host target for frameworks of XPC services. + [Ingmar Stein](https://github.com/IngmarStein) + [#6029](https://github.com/CocoaPods/CocoaPods/pull/6029) + +* Use Swift 3.0 by default during validation. + [Danielle Tomlinson](https://github.com/dantoml) + [#6042](https://github.com/CocoaPods/CocoaPods/pull/6042) + +* Exit with non-zero exit status if pod repo update fails + [Uku Loskit](https://github.com/UkuLoskit) + [#6037](https://github.com/CocoaPods/CocoaPods/issues/6037) + +* The validator has an API for accessing which version of Swift was used. + [Orta Therox](https://github.com/orta) + [#6049](https://github.com/CocoaPods/CocoaPods/pull/6049) + +##### Bug Fixes + +* None. + +* Redefine FOUNDATION_EXPORT for C-only pods in umbrella header. + [Chris Ballinger](https://github.com/chrisballinger) + [#6024](https://github.com/CocoaPods/CocoaPods/issues/6024) + +## 1.1.0.rc.3 (2016-10-11) + +##### Enhancements + +* Cache result of inhibit_warnings and include_in_build_config to speed up pod install. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5934](https://github.com/CocoaPods/CocoaPods/pull/5934) + +* Tell users about the .swift-version file on validation failures. + [Danielle Tomlinson](https://github.com/dantoml) + [#5951](https://github.com/CocoaPods/CocoaPods/pull/5951) + +* Improve performance of PathList.read_file_system + [Heath Borders](https://github.com/hborders) + [#5890](https://github.com/CocoaPods/CocoaPods/issues/5890) + +* Cache result of uses_swift and should_build to speed up pod install. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5837](https://github.com/CocoaPods/CocoaPods/pull/5837) + +* Remove uses of `cd` in generated scripts + [Ben Asher](https://github.com/benasher44) + [#5959](https://github.com/CocoaPods/CocoaPods/pull/5959) + +* Error with helpful message when integrating a pod into targets that have mismatched Swift versions. + [Ben Asher](https://github.com/benasher44) + [#5984](https://github.com/CocoaPods/CocoaPods/pull/5984) + +* Allow users to share pods between Objective-C and Swift targets. + [Danielle Tomlinson](https://github.com/dantoml) + [#5984](https://github.com/CocoaPods/CocoaPods/pull/5984) + +* Allow setting the linting Swift version via `--swift-version=VERSION` + [Danielle Tomlinson](https://github.com/dantoml) + [#5989](https://github.com/CocoaPods/CocoaPods/pull/5989) + +* Greenify pod install success message + [Stephen Hayes](https://github.com/schayes04) + [#5713](https://github.com/CocoaPods/CocoaPods/issues/5713) + +* Update EMBEDDED_CONTENT_CONTAINS_SWIFT flag behaviour based on xcode version. + [codymoorhouse](https://github.com/codymoorhouse) + [#5732](https://github.com/CocoaPods/CocoaPods/issues/5732) + +##### Bug Fixes + +* Remove special handling for messages apps + [Ben Asher](https://github.com/benasher44) + [#5860](https://github.com/CocoaPods/CocoaPods/issues/5860) + +* Ensure messages apps have an embed frameworks build phase + [Ben Asher](https://github.com/benasher44) + [#5860](https://github.com/CocoaPods/CocoaPods/issues/5860) + +* Fix linting of private pods when using libraries. + [Stefan Pühringer](https://github.com/b-ray) + [#5891](https://github.com/CocoaPods/CocoaPods/issues/5891) + + +## 1.1.0.rc.2 (2016-09-13) + +##### Enhancements + +* Use the SWIFT_VERSION when linting pods. To lint with Swift 3.0 + add a Swift Version file. `echo "3.0" >> .swift-version`. + [Danielle Tomlinson](https://github.com/dantoml) + [#5841](https://github.com/CocoaPods/CocoaPods/pull/5841) + +##### Bug Fixes + +* Correctly pass Pod:VERSION in `pod lib create`. + [Danielle Tomlinson](https://github.com/dantoml) + [#5840](https://github.com/CocoaPods/CocoaPods/issues/5840) + + +## 1.1.0.rc.1 (2016-09-10) + +##### Enhancements + +* + +##### Bug Fixes + +* Wrap generated import headers with __OBJC__ to fix C only pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5291](https://github.com/CocoaPods/CocoaPods/issues/5291) + +* Prevent crash when generating acknowledgements when license type is not specified. + [Marcelo Fabri](https://github.com/marcelofabri) + [#5826](https://github.com/CocoaPods/CocoaPods/issues/5826) + +* Pass full path to App.xcworkspace for spec validation, and use `git -C` for `pod repo push` git ops. + [Ben Asher](https://github.com/benasher44) + [#5805](https://github.com/CocoaPods/CocoaPods/issues/5805) + + +## 1.1.0.beta.2 (2016-09-03) + +##### Enhancements + +* Remove references to the pre-1.0 Migrator. + [Danielle Tomlinson](https://github.com/dantoml) + [#5635](https://github.com/CocoaPods/CocoaPods/pull/5635) + +* Improve performance of dependency resolution. + [yanzhiwei147](https://github.com/yanzhiwei147) + [#5510](https://github.com/CocoaPods/CocoaPods/pull/5510) + +* Add support for building Messages applications. + [Ben Asher](https://github.com/benasher44) + [#5726](https://github.com/CocoaPods/CocoaPods/pull/5726) + +* Improved messaging when missing host targets for embedded targets. + Improved support for framework-only projects. + [Ben Asher](https://github.com/benasher44) + [#5733](https://github.com/CocoaPods/CocoaPods/pull/5733) + +* Set ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES when appropriate. + [Ben Asher](https://github.com/benasher44) + [#5732](https://github.com/CocoaPods/CocoaPods/pull/5732) + +* Verify that embedded target platform and swift version matches the host. + [Ben Asher](https://github.com/benasher44) + [#5747](https://github.com/CocoaPods/CocoaPods/pull/5747) + +* Pass the version of CocoaPods to `pod lib create`'s configure script. + [orta](https://github.com/orta) + [#5787](https://github.com/CocoaPods/CocoaPods/pull/5787) + +* Improve host target detection for embedded targets + in sub-projects. + [Ben Asher](https://github.com/benasher44) + [#5622](https://github.com/CocoaPods/CocoaPods/issues/5622) + +##### Bug Fixes + +* Hash scope suffixes if they are over 50 characters to prevent file paths from being too long. + [Danielle Tomlinson](https://github.com/dantoml) + [#5491](https://github.com/CocoaPods/CocoaPods/issues/5491) + +* Fix codesigning identity on watchOS and tvOS targets. + [Danielle Tomlinson](https://github.com/dantoml) + [#5686](https://github.com/CocoaPods/CocoaPods/issues/5686) + +* Fix SWIFT_VERSION not being read when only defined at the project level. + [Ben Asher](https://github.com/benasher44) + [#5700](https://github.com/CocoaPods/CocoaPods/issues/5700) and [#5737](https://github.com/CocoaPods/CocoaPods/issues/5737) + +* Fix analyzer checking the compatibility of an embedded target with a host that has not been added the Podfile. + [Ben Asher](https://github.com/benasher44) + [#5783](https://github.com/CocoaPods/CocoaPods/issues/5783) + +## 1.1.0.beta.1 (2016-07-11) + +##### Enhancements + +* Move Pods Project generation to an `Xcode` Namespace. + [Daniel Tomlinson](https://github.com/dantoml) + [#5480](https://github.com/CocoaPods/CocoaPods/pull/5480) + +* Add the ability to inhibit swift warnings. + [Peter Ryszkiewicz](https://github.com/pRizz) + [#5414](https://github.com/CocoaPods/CocoaPods/pull/5414) + +* Use `git ls-remote` to skip full clones for branch dependencies. + [Juan Civile](https://github.com/champo) + [#5376](https://github.com/CocoaPods/CocoaPods/issues/5376) + +* [repo/push] --use-json to convert podspecs to JSON format when pushing. + [Mark Schall](https://github.com/maschall) + [#5568](https://github.com/CocoaPods/CocoaPods/pull/5568) + +* Set 'Allow app extension API only' for Messages extensions. + [Boris Bügling](https://github.com/neonichu) + [#5558](https://github.com/CocoaPods/CocoaPods/issues/5558) + +* Accept `pod repo push` with URL instead of only repo name. + [Mark Schall](https://github.com/maschall) + [#5572](https://github.com/CocoaPods/CocoaPods/pull/5572) + +* [Installer] Set the SWIFT_VERSION for CocoaPods generated targets. + [Danielle Tomlinson](https://github.com/DanToml) + [#5540](https://github.com/CocoaPods/CocoaPods/pull/5540) + +* Print message when skipping user project integration. + [Danielle Tomlinson](https://github.com/dantoml) + [#5517](https://github.com/CocoaPods/CocoaPods/issues/5517) + +* Show GitHub Issues that could be related to exceptions. + [Orta Therox](https://github.com/orta) + [#4817](https://github.com/CocoaPods/CocoaPods/issues/4817) + +* Improve handling of app extensions, watch os 1 extensions + and framework targets. + [Ben Asher](https://github.com/benasher44) + [#4203](https://github.com/CocoaPods/CocoaPods/issues/4203) + +* Add a license type to generated acknowledgements file in plist. + [Naoto Kaneko](https://github.com/naoty) + [#5436](https://github.com/CocoaPods/CocoaPods/pull/5436) + +##### Bug Fixes + +* Fix local pod platform conflict error message. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#5052](https://github.com/CocoaPods/CocoaPods/issues/5052) + +* Avoid use of `activesupport` version 5 to stay compatible with macOS system + Ruby. + [Boris Bügling](https://github.com/neonichu) + [#5602](https://github.com/CocoaPods/CocoaPods/issues/5602) + +* Fix installing pods with `use_frameworks` when deduplication is disabled. + [Samuel Giddins](https://github.com/segiddins) + [#5481](https://github.com/CocoaPods/CocoaPods/issues/5481) + +* Running `pod setup --silent` will now properly silence git output while + updating the repository. + [Samuel Giddins](https://github.com/segiddins) + +* Fix linting pods that depend upon `XCTest`. + [Samuel Giddins](https://github.com/segiddins) + [#5321](https://github.com/CocoaPods/CocoaPods/issues/5321) + +* Use `require` instead of `autoload` to solve an issue with loading + `fourflusher`. + [Boris Bügling](https://github.com/neonichu) + [#5445](https://github.com/CocoaPods/CocoaPods/issues/5445) + +* Resolve cyclic dependencies when creating pod targets. + [Juan Civile](https://github.com/champo) + [#5362](https://github.com/CocoaPods/CocoaPods/issues/5362) + +* Fix embedding frameworks in UI Testing bundles. + [Daniel Tomlinson](https://github.com/dantoml) + [#5250](https://github.com/CocoaPods/CocoaPods/issues/5250) + +* Ensure attempting to print a path in the error report doesn't itself error. + [Samuel Giddins](https://github.com/) + [#5541](https://github.com/CocoaPods/CocoaPods/issues/5541) + +* Fix linting with Xcode 8. + [Boris Bügling](https://github.com/neonichu) + [#5529](https://github.com/CocoaPods/CocoaPods/issues/5529) + +* Fix linting with Xcode 8 by disabling it entirely. + [Boris Bügling](https://github.com/neonichu) + [#5528](https://github.com/CocoaPods/CocoaPods/issues/5528) + +* Error during install when there are duplicate library names. + [Daniel Tomlinson](https://github.com/dantoml) + [#4014](https://github.com/CocoaPods/CocoaPods/issues/4014) + +* Make the `Check Pods Manifest.lock` script write errors to STDERR and improve + POSIX shell compatibility. + [Simon Warta](https://github.com/webmaster128) + [#5595](https://github.com/CocoaPods/CocoaPods/pull/5595) + + +## 1.0.1 (2016-06-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Symlink the header folders in the framework bundle's root directory + by a new shell script build phase if `header_mappings_dir` is used + with frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5313](https://github.com/CocoaPods/CocoaPods/issues/5313) + +* Removed emojis in Build Phases names — as it seems that some third party tools have trouble with them. + [Olivier Halligon](https://github.com/AliSoftware) + [#5382](https://github.com/CocoaPods/CocoaPods/pull/5382) + +* Ensure `Set` is defined before using it. + [Samuel Giddins](https://github.com/segiddins) + [#5287](https://github.com/CocoaPods/CocoaPods/issues/5287) + +* Add --target-device to ibtool invocation for XIBs + [Juan Civile](https://github.com/champo) + [#5282](https://github.com/CocoaPods/CocoaPods/issues/5282) + +* Fix error when executables cannot be found. + [Jan Berkel](https://github.com/jberkel) + [#5319](https://github.com/CocoaPods/CocoaPods/pull/5319) + +* Avoid removing all files when root directory contains unicode characters. + [Marc Boquet](https://github.com/marcboquet) + [#5294](https://github.com/CocoaPods/CocoaPods/issues/5294) + +* Guarding from crash if pod lib create has a + character in the name. + [William Entriken](https://github.com/fulldecent) + [CocoaPods/pod-template#69](https://github.com/CocoaPods/pod-template/issues/69) + +* Use target product types to determine whether a target is a test target when + running `pod init`. + [Samuel Giddins](https://github.com/segiddins) + [#5378](https://github.com/CocoaPods/CocoaPods/issues/5378) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* Validate that resource bundles declared in the podspec contain resources. + [Samuel Giddins](https://github.com/segiddins) + [#5218](https://github.com/CocoaPods/CocoaPods/issues/5218) + +* Improvements to the error messaging around missing dependencies. + [Orta Therox](https://github.com/orta) + [#5260](https://github.com/CocoaPods/CocoaPods/issues/5260) + +* Make sharing schemes for development pods an installation option + (`share_schemes_for_development_pods`) and disable sharing schemes + by default. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Fix search paths inheritance when there are transitive dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#5264](https://github.com/CocoaPods/CocoaPods/issues/5264) + + +## 1.0.0.rc.2 (2016-05-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle when an abstract target has no declared platform without crashing. + [Samuel Giddins](https://github.com/segiddins) + [#5236](https://github.com/CocoaPods/CocoaPods/issues/5236) + +* Don't recurse into child directories to find podspecs when running + `pod spec lint`. + [Samuel Giddins](https://github.com/segiddins) + [#5244](https://github.com/CocoaPods/CocoaPods/issues/5244) + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* The `pod init` command now uses target inheritance for test targets + in the generated Podfile. + [Orta Therox](https://github.com/orta) + [#4714](https://github.com/CocoaPods/CocoaPods/issues/4714) + +* Support customized build directories by letting user xcconfig definitions + rely on the new overridable alias build variable `PODS_BUILD_DIR`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5217](https://github.com/CocoaPods/CocoaPods/issues/5217) + +##### Bug Fixes + +* Fix for `pod repo push --help` throwing an error. + [Boris Bügling](https://github.com/neonichu) + [#5214](https://github.com/CocoaPods/CocoaPods/pull/5214) + +* The warning for not having utf-8 set as the default encoding for a + terminal now properly respects the `--no-ansi` argument. + [Joshua Kalpin](https://github.com/Kapin) + [#5199](https://github.com/CocoaPods/CocoaPods/pull/5199) + + +## 1.0.0.beta.8 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Headers from vendored frameworks no longer end up in the `HEADER_SEARCH_PATH` + when using frameworks. They are now assumed to be already present as modular + headers in the framework itself. + [Mark Spanbroek](https://github.com/markspanbroek) + [#5146](https://github.com/CocoaPods/CocoaPods/pull/5146) + +* Access to the `Pod::SourcesManager` constant has been restored, though its use + is considered deprecated and subject to removal at any time. Migrate to use + `Pod::Config.instance.sources_manager` in some manner as soon as possible. + [Samuel Giddins](https://github.com/segiddins) + +* Running `pod repo update --silent` will now properly silence git output while + updating the repository. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.7 (2016-04-15) + +##### Enhancements + +* When an unknown build configuration is mentioned in the Podfile, CocoaPods + will suggest the build configurations found in the user project. + [Samuel Giddins](https://github.com/segiddins) + [#5113](https://github.com/CocoaPods/CocoaPods/issues/5113) + +* Improved the error message when a matching spec cannot be found, + mentioning that now `pod repo update` is not implicit when running `pod + install`. + [Orta Therox](https://github.com/orta) + [#5135](https://github.com/CocoaPods/CocoaPods/issues/5135) + +* Add support for sharded specs directories. + [Samuel Giddins](https://github.com/segiddins) + [#5002](https://github.com/CocoaPods/CocoaPods/issues/5002) + +* Pass the build setting `OTHER_CODE_SIGN_FLAGS` to codesign for the generated + embed frameworks build phase's script, as Xcode does when signing natively. + [Václav Slavík](https://github.com/vslavik) + [#5087](https://github.com/CocoaPods/CocoaPods/pull/5087) + +##### Bug Fixes + +* Sort files from `Dir.glob` explicitly to produce same result on case sensitive + file system as result on case insensitive file system. + [Soutaro Matsumoto](https://github.com/soutaro) + +* Fix build path for resource bundles. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5034](https://github.com/CocoaPods/CocoaPods/issues/5034) + +* Rely on `TARGET_BUILD_DIR` instead of `CONFIGURATION_BUILD_DIR` in the + generated embed resources build phase's script, so that UI test targets can + be run. + [seaders](https://github.com/seaders) + [#5133](https://github.com/CocoaPods/CocoaPods/issues/5133) + +* Ensure that a `CFBundleVersion` is set for resource bundles' Info.plist + files. + [Samuel Giddins](https://github.com/segiddins) + [#4897](https://github.com/CocoaPods/CocoaPods/issues/4897) + + +## 1.0.0.beta.6 (2016-03-15) + +##### Breaking + +* Running `pod install` doesn't imply an automatic spec repo update. + The old behavior can be achieved by passing in the option `--repo-update` + or running `pod repo update`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5004](https://github.com/CocoaPods/CocoaPods/issues/5004) + +* Remove the configuration variable `skip_repo_update` as the default behavior + varies now between `pod install` and `pod (update|outdated)`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5017](https://github.com/CocoaPods/CocoaPods/issues/5017) + +##### Enhancements + +* The master specs repo will no longer perform 'no-op' git fetches. This should + help to reduce the load on GitHub's servers. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#5005](https://github.com/CocoaPods/CocoaPods/issues/5005) + [#4989](https://github.com/CocoaPods/CocoaPods/issues/4989) + +* The specs repos will no longer support shallow clones to reduce CPU load + on git servers. Pre-existing shallow clones of the `master` repo will + automatically be upgraded to deep clones when the repo is updated. + [Samuel Giddins](https://github.com/segiddins) + [#5016](https://github.com/CocoaPods/CocoaPods/issues/5016) + +* The validator will check that all `public_header_files` and + `private_header_files` are also present in `source_files`. + [Samuel Giddins](https://github.com/segiddins) + [#4936](https://github.com/CocoaPods/CocoaPods/issues/4936) + +##### Bug Fixes + +* The master specs repository can no longer be added via `pod repo add`, but + instead must be done via `pod setup`. + [Samuel Giddins](https://github.com/segiddins) + +* Print a friendly error message when the platform for a target cannot be + inferred. + [Samuel Giddins](https://github.com/segiddins) + [#4790](https://github.com/CocoaPods/CocoaPods/issues/4790) + +* Rely on `TARGET_BUILD_DIR` instead of `CONFIGURATION_BUILD_DIR` in the + generated embed frameworks build phase's script, so that UI test targets can + be run. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5022](https://github.com/CocoaPods/CocoaPods/issues/5022) + +* Fix build paths for resources bundles. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5028](https://github.com/CocoaPods/CocoaPods/pull/5028) + +* Validate that a Podfile does not declare the same target twice. + [Samuel Giddins](https://github.com/segiddins) + [#5029](https://github.com/CocoaPods/CocoaPods/issues/5029) + + +## 1.0.0.beta.5 (2016-03-08) + +##### Breaking + +* Development pods will no longer be implicitly unlocked. This makes CocoaPods respect + constraints related to dependencies of development pods in the lockfile. + + If you change the constraints of a dependency of your development pod and want to + override the locked version, you will have to use + `pod update ${DEPENDENCY_NAME}` manually. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#4211](https://github.com/CocoaPods/CocoaPods/issues/4211) + [#4577](https://github.com/CocoaPods/CocoaPods/issues/4577) + [#4580](https://github.com/CocoaPods/CocoaPods/issues/4580) + +##### Enhancements + +* Add the :package: emoji in front of CocoaPods Script Build Phases + to quickly and visually differentiate them from other phases. + [Olivier Halligon](https://github.com/AliSoftware) + [#4985](https://github.com/CocoaPods/CocoaPods/issues/4985) + +* Enable syntax highlighting on the Podfile in the generated + `Pods.xcodeproj`. + [Samuel Giddins](https://github.com/segiddins) + [#4962](https://github.com/CocoaPods/CocoaPods/issues/4962) + +##### Bug Fixes + +* Fixes paths passed for resources bundles in the copy resources script. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4954](https://github.com/CocoaPods/CocoaPods/pull/4954) + +* Fix saying the `master` specs repo exists when it has not been set up. + [Samuel Giddins](https://github.com/segiddins) + [#4955](https://github.com/CocoaPods/CocoaPods/issues/4955) + +* Move `${TARGET_DEVICE_ARGS}` out of the quotations for `--sdk` in the + `Copy Pods Resources` build phase. + [seaders](https://github.com/seaders) [#4940](https://github.com/CocoaPods/CocoaPods/issues/4940) + +* Handle when `$PATH` isn't set. + [Samuel Giddins](https://github.com/segiddins) + +* Module maps that are set per-platform will be installed for the correct + platform. + [Samuel Giddins](https://github.com/segiddins) + [#4968](https://github.com/CocoaPods/CocoaPods/issues/4968) + + +## 1.0.0.beta.4 (2016-02-24) + +##### Enhancements + +* Allow deduplication to take effect even when the same pod is used with + different sets of subspecs across different platforms. + This changes the general naming scheme scoped pod targets. They are + suffixed now on base of what makes them different among others for the + same root spec instead of being prefixed by the dependent target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Pass `COCOAPODS_VERSION` as environment variable when invoking the + `prepare_command`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4933](https://github.com/CocoaPods/CocoaPods/pull/4933) + +##### Bug Fixes + +* Pods are built by default in another scoping level of the build products + directory identified by their name to prevent name clashes among + dependencies. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Fix mixed integrations where static libraries are used along frameworks + from different target definitions in one Podfile. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Pass target device arguments to `ibtool` in the copy resources script, fixing + compilation of storyboards when targeting versions of iOS prior to iOS 8. + [seaders](https://github.com/seaders) + [#4913](https://github.com/CocoaPods/CocoaPods/issues/4913) + +* Fix `pod repo lint` when passed a path argument. + [Boris Bügling](https://github.com/neonichu) + [#4883](https://github.com/CocoaPods/CocoaPods/issues/4883) + + +## 1.0.0.beta.3 (2016-02-03) + +##### Breaking + +* Rename the `xcodeproj` Podfile directive to `project`. + [Marius Rackwitz](https://github.com/mrackwitz) + [Core#298](https://github.com/CocoaPods/Core/issues/298) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't try to embed project headers into frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4819](https://github.com/CocoaPods/CocoaPods/issues/4819) + +* Fix a crash in the analyzer when target deduplication is deactivated. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4751](https://github.com/CocoaPods/CocoaPods/issues/4751) + +* Handle CoreData mapping models with recursive resource globs. + [Eric Firestone](https://github.com/efirestone) + [#4809](https://github.com/CocoaPods/CocoaPods/pull/4809) + +* Generate valid xcconfig when target name includes spaces. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#4783](https://github.com/CocoaPods/CocoaPods/issues/4783) + +* Properly add resource files to resources build phase. + [Eric Firestone](https://github.com/efirestone) + [#4762](https://github.com/CocoaPods/CocoaPods/issues/4762) + +* Fix suggestion of sudo when it actually isn't needed. + [Marcel Jackwerth](https://github.com/sirlantis) + +* Set the `TARGET_DEVICE_FAMILY` to support both iPhone and iPad for iOS + resource bundle targets. + [Andy Rifken](https://github.com/arifken) + +* Share user schemes of `Pods.xcodeproj` after generating deterministic UUIDS. + [Samuel Giddins](https://github.com/segiddins) + +* Only attempt to `import` a framework during linting if the pod has source + files, and is thus being built by CocoaPods. + [Samuel Giddins](https://github.com/segiddins) + [#4823](https://github.com/CocoaPods/CocoaPods/issues/4823) + +* Determine whether an external source needs to be fetched when updating a + dependency regardless of subspec names. + [Samuel Giddins](https://github.com/segiddins) + [#4821](https://github.com/CocoaPods/CocoaPods/issues/4821) + + +## 1.0.0.beta.2 (2016-01-05) + +##### Enhancements + +* Present a friendly error suggesting running `pod install` when there are + missing local podspecs when running `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#4716](https://github.com/CocoaPods/CocoaPods/issues/4716) + +* Don't warn about setting base config when identical to current config. + [Jed Lewison](https://github.com/jedlewison) + [#4722](https://github.com/CocoaPods/CocoaPods/issues/4722) + +* Add `user_targets` method to the `UmbrellaTargetDescription` in the + post-install hooks context. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Always fetch a `:podspec` dependency's podspec when it is missing in the + `Pods` directory. + [Samuel Giddins](https://github.com/segiddins) + [#4717](https://github.com/CocoaPods/CocoaPods/issues/4717) + +* The `Info.plist` file will now be generated properly for resource bundles, + setting the proper `CFBundlePackageType` and omitting the `CFBundleExecutable` + key. + [Samuel Giddins](https://github.com/segiddins) + [Xcodeproj#259](https://github.com/CocoaPods/Xcodeproj/issues/259) + +* Fix crash when deintegrating due to major version change and there are + multiple root-level Xcode projects. + [Samuel Giddins](https://github.com/segiddins) + +* Ensure the `sandbox_root` attribute is set on the pre-install hooks context. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Breaking + +* The `link_with` Podfile DSL method has been removed in favor of target + inheritance. + [Samuel Giddins](https://github.com/segiddins) + +* The `:exclusive => true` Podfile DSL target option has been removed in favor + of the `inherit! :search_paths` directive. + [Samuel Giddins](https://github.com/segiddins) + +* The specification of `:head` dependencies has been removed. + [Samuel Giddins](https://github.com/segiddins) + [#4673](https://github.com/CocoaPods/CocoaPods/issues/4673) + +* The deprecated `:local` dependency option has been removed in favor of the + equivalent `:path` option. + [Samuel Giddins](https://github.com/segiddins) + +* The deprecated `dependency` method in the Podfile DSL has been removed in + favor of the equivalent `pod` method. + [Samuel Giddins](https://github.com/segiddins) + +* The deprecated `preferred_dependency` method in the Specification DSL has been + removed in favor of the equivalent `default_subspecs` method. + [Samuel Giddins](https://github.com/segiddins) + +* The `docset_url` Specification attribute has been removed. + [Samuel Giddins](https://github.com/segiddins) + [Core#284](https://github.com/CocoaPods/Core/issues/284) + +* Build configuration names are no longer set as pre-processor defines, but + rather `POD_CONFIGURATION_$CONFIGURATION_NAME` is defined in order to lessen + conflicts with pod code. + [#4143](https://github.com/CocoaPods/CocoaPods/issues/4143) + +##### Highlighted Enhancements That Need Testing + +* The Podfile DSL has been cleaned up, with the removal of confusing options and + the introduction of abstract targets, search paths-only inheritance, the + specification of installation options, and the removal of head dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#840](https://github.com/CocoaPods/CocoaPods/issues/840) + +##### Enhancements + +* Add the ability to add a custom commit message when pushing a spec. + [Bart Jacobs](https://github.com/bartjacobs) + [#4583](https://github.com/CocoaPods/CocoaPods/issues/4583) + +* Added support for `pod env` to print the pod environment without having to crash. + [Hemal Shah](https://github.com/hemal) + [#3660](https://github.com/CocoaPods/CocoaPods/issues/3660) + +* Add support for specifying :source with a pod dependency. + [Eric Firestone](https://github.com/efirestone) + [#4486](https://github.com/CocoaPods/CocoaPods/pull/4486) + +* Ask user to run `pod install` when a resource not found during in copy resources script. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + +* Add support to track `.def` sources. +* Add support to track `.def` files as headers. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#338](https://github.com/CocoaPods/Xcodeproj/pull/338) + +* `Pod::Installer::PostInstallHooksContext` now offers access to the `sandbox` + object. + [Marcelo Fabri](https://github.com/marcelofabri) + [#4487](https://github.com/CocoaPods/CocoaPods/pull/4487) + +* Improve sorting algorithm for `pod search`. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [cocoapods-search#12](https://github.com/CocoaPods/cocoapods-search/issues/12) + +* Improve `pod search` performance while using _`--full`_ flag. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [cocoapods-search#8](https://github.com/CocoaPods/cocoapods-search/issues/8) + +* Improve message when there is no spec in repos for dependency set in Podfile. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#4430](https://github.com/CocoaPods/CocoaPods/issues/4430) + +* Reduce the number of times the user's Xcode project is opened, speeding up + installation. + [Samuel Giddins](https://github.com/segiddins) + [#4374](https://github.com/CocoaPods/CocoaPods/issues/4374) + +* Improving the performance of Pod::Installer::Analyzer#generate_pod_targets + [Daniel Ribeiro](https://github.com/danielribeiro) + [#4399](https://github.com/CocoaPods/CocoaPods/pull/4399) + +* Framework pods that have a `header_mappings_dirs` set will now produce + frameworks with headers that respect the nesting. + [Samuel Giddins](https://github.com/segiddins) + +* The validator will now ensure that pods with a `header_mappings_dirs` have all + of their headers inside that directory. + [Samuel Giddins](https://github.com/segiddins) + +* Pods will be validated with the `-Wincomplete-umbrella` compiler flag to + ensure module maps are valid. + [Samuel Giddins](https://github.com/segiddins) + [#3428](https://github.com/CocoaPods/CocoaPods/issues/3428) + +* The validator will now attempt to build an app that imports the pod. + [Samuel Giddins](https://github.com/segiddins) + [#2095](https://github.com/CocoaPods/CocoaPods/issues/2095) + [#2134](https://github.com/CocoaPods/CocoaPods/issues/2134) + +* The `Info.plist` file's `CFBundleIdentifier` is now set via the + `PRODUCT_BUNDLE_IDENTIFIER` build setting, consistent with Xcode 7. + [Samuel Giddins](https://github.com/segiddins) + [#4426](https://github.com/CocoaPods/CocoaPods/issues/4426) + +* Externally-sourced pods will now have their specifications quickly linted. + [Samuel Giddins](https://github.com/segiddins) + +* Set the deployment target on pods to be that which is defined in the + podspec. + [Samuel Giddins](https://github.com/segiddins) + [#4354](https://github.com/CocoaPods/CocoaPods/issues/3454) + +* Set a deployment target for resource bundle targets. + [Samuel Giddins](https://github.com/segiddins) + [#3347](https://github.com/CocoaPods/CocoaPods/issues/3347) + +* Targets that are no longer integrated with CocoaPods will be properly + de-integrated when installation occurs. + [Samuel Giddins](https://github.com/segiddins) + +* Targets that are integrated will be ensured that they have all + CocoaPods-related settings and phases properly installed. + [Samuel Giddins](https://github.com/segiddins) + +* Total de-integration will happen whenever the major version of CocoaPods + changes, ensuring backwards-incompatible changes are properly applied. + [Samuel Giddins](https://github.com/segiddins) + +* The Podfile now allows specifying installation options via the `install!` + directive. + [Samuel Giddins](https://github.com/segiddins) + [Core#151](https://github.com/CocoaPods/Core/issues/151) + +* The Podfile now allows marking targets as `abstract` and specifying the pod + inheritance mode via the `inherit!` directive. + [Samuel Giddins](https://github.com/segiddins) + [#1249](https://github.com/CocoaPods/CocoaPods/issues/1249) + [#1626](https://github.com/CocoaPods/CocoaPods/issues/1626) + [#4001](https://github.com/CocoaPods/CocoaPods/issues/4001) + +##### Bug Fixes + +* Fix compiling of localized resources. + [Eric Firestone](https://github.com/efirestone) + [#1653](https://github.com/CocoaPods/CocoaPods/issues/1653) + +* Fix compiling of asset catalog files inside resource bundles. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#4501](https://github.com/CocoaPods/CocoaPods/issues/4501) + +* Prevent installer to be run from inside sandbox directory. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + +* Improve repo lint error message when no repo found with given name. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#4142](https://github.com/CocoaPods/CocoaPods/issues/4142) + +* Fix a crash in dependency resolution when running Ruby 2.3. + [Samuel Giddins](https://github.com/segiddins) + [#4345](https://github.com/CocoaPods/CocoaPods/issues/4345) + +* Fix handling of localized files in Pods installed as frameworks. + [Tim Bodeit](https://github.com/timbodeit) + [#2597](https://github.com/CocoaPods/CocoaPods/issues/2597) + +* Only include native targets when generating the Podfile in `pod init`. + [Samuel Giddins](https://github.com/segiddins) + [#2169](https://github.com/CocoaPods/CocoaPods/issues/2169) + +* Ensure that generated `Info.plist` files have a `CFBundleShortVersionString` + that is precisely three dot-separated numbers. + [Samuel Giddins](https://github.com/segiddins) + [#4421](https://github.com/CocoaPods/CocoaPods/issues/4421) + +* Set the `APPLICATION_EXTENSION_API_ONLY` build setting if integrating with a + tvOS extension target, or a target that has the setting set to `YES`. + [Samuel Giddins](https://github.com/segiddins) + [#3644](https://github.com/CocoaPods/CocoaPods/issues/3644) + [#4393](https://github.com/CocoaPods/CocoaPods/issues/4393) + +* Only the root directory of externally-sourced pods will be searched for + podspecs. + [Samuel Giddins](https://github.com/segiddins) + [#3683](https://github.com/CocoaPods/CocoaPods/issues/3683) + +* Remove the library name's extension when adding it in the "linker flags" build + setting to support dynamic libraries. + [Andrea Cremaschi](https://github.com/andreacremaschi) + [#4468](https://github.com/CocoaPods/CocoaPods/issues/4468) + +* Specifying relative subspec names to the linter is now supported. + [Samuel Giddins](https://github.com/segiddins) + [#1917](https://github.com/CocoaPods/CocoaPods/issues/1917) + +* Headers used to build a pod will no longer be duplicated for frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4420](https://github.com/CocoaPods/CocoaPods/issues/4420) + +* The `UIRequiredDeviceCapabilities` key is now specified in the `Info.plist` + file for tvOS pods built as frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4514](https://github.com/CocoaPods/CocoaPods/issues/4514) + +* Fix Swift code completion for Development Pods by using `realpath` for + symlinked source files. + [Boris Bügling](https://github.com/neonichu) + [#3777](https://github.com/CocoaPods/CocoaPods/issues/3777) + +* Avoid the duplicate UUID warning when a Pod is installed for multiple + platforms. + [Samuel Giddins](https://github.com/segiddins) + [#4521](https://github.com/CocoaPods/CocoaPods/issues/4521) + +* Changing the name of a target in a Podfile will no longer cause warnings about + being unable to set the base configuration XCConfig. + [Samuel Giddins](https://github.com/segiddins) + +* Ensure that linking multiple times against the same framework does not trigger + the duplicate module name check for frameworks. + [Boris Bügling](https://github.com/neonichu) + [Samuel Giddins](https://github.com/segiddins) + [#4550](https://github.com/CocoaPods/CocoaPods/issues/4550) + +* Fix lint in Xcode 7.2, it requires `-destination`. + [Boris Bügling](https://github.com/neonichu) + [#4652](https://github.com/CocoaPods/CocoaPods/pull/4652) + +* Empty podfiles / target blocks no longer break the user's Xcode project. + [Samuel Giddins](https://github.com/segiddins) + [#3617](https://github.com/CocoaPods/CocoaPods/issues/3617) + +* The pre-processor define for `DEBUG` will be set for all debug-based build + configurations when building pods. + [Samuel Giddins](https://github.com/segiddins) + [#4148](https://github.com/CocoaPods/CocoaPods/issues/4148) + + +## 0.39.0 (2015-10-09) + +##### Enhancements + +* Podfile-specified options are passed to plugins as hashes that treat string + and symbol keys identically. + [Samuel Giddins](https://github.com/segiddins) + [#3354](https://github.com/CocoaPods/CocoaPods/issues/3354) + +##### Bug Fixes + +* Only link dynamic vendored frameworks and libraries of pod dependencies. + [Kevin Coleman](https://github.com/kcoleman731) + [#4336](https://github.com/CocoaPods/CocoaPods/issues/4336) + + +## 0.39.0.rc.1 (2015-10-05) + +##### Enhancements + +* Support for adding dependency target vendored libraries and frameworks to build settings. + [Kevin Coleman](https://github.com/kcoleman731) + [#4278](https://github.com/CocoaPods/CocoaPods/pull/4278) + +* Always link the aggregate target as static to the user project. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4137](https://github.com/CocoaPods/CocoaPods/pull/4137) + + +## 0.39.0.beta.5 (2015-10-01) + +##### Breaking + +* Activesupport 4 is now required, breaking compatibility with applications + locked to `3.x.y`. + +##### Enhancements + +* The `EMBEDDED_CONTENT_CONTAINS_SWIFT` build setting will now be set when + appropriate. + [Samuel Giddins](https://github.com/segiddins) + +* The embed frameworks script will no longer manually copy over the Swift + runtime libraries on Xcode 7 and later. + [Samuel Giddins](https://github.com/segiddins) + [earltedly](https://github.com/segiddins) + [DJ Tarazona](https://github.com/djtarazona) + [#4188](https://github.com/CocoaPods/CocoaPods/issues/4188) + +* A post-install summary of the pods installed is now printed. + [Samuel Giddins](https://github.com/segiddins) + [#4124](https://github.com/CocoaPods/CocoaPods/issues/4124) + +##### Bug Fixes + +* Give a meaningful message for the case where there is no available stable + version for a pod, and there is no explicit version requirement. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#4197](https://github.com/CocoaPods/CocoaPods/issues/4197) + +* Use `watchsimulator` when validating pods with the watchOS platform. + [Thomas Kollbach](https://github.com/toto) + [#4130](https://github.com/CocoaPods/CocoaPods/issues/4130) + +* C or C++ preprocessor output files with `.i` extension now have their compiler + flags set correctly. + [Andrea Aresu](https://github.com/aaresu/) + +* Remove SDKROOT relative search path as it isn't needed anymore since XCTest. + [Boris Bügling](https://github.com/neonichu) + [#4219](https://github.com/CocoaPods/CocoaPods/issues/4219) + +* Podfile generated by `pod init` now specifies iOS 8.0 as the default platform + and includes `use_frameworks!` for Swift projects. + [Jamie Evans](https://github.com/JamieREvans) + +* Support for the new `tvos` platform. + [Boris Bügling](https://github.com/neonichu) + [#4152](https://github.com/CocoaPods/CocoaPods/pull/4152) + +* Either generate just one pod target or generate it once for each target + definition. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4034](https://github.com/CocoaPods/CocoaPods/issues/4034) + +* Stop setting `DYLIB_CURRENT_VERSION`, `CURRENT_PROJECT_VERSION`, and + `DYLIB_COMPATIBILITY_VERSION` for pods integrated as dynamic frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4083](https://github.com/CocoaPods/CocoaPods/issues/4083) + +* The headers folders paths for static library pods will be unset, fixing + validation when archives are uploaded to iTunes Connect. + [Boris Bügling](https://github.com/neonichu) + [Samuel Giddins](https://github.com/segiddins) + [#4119](https://github.com/CocoaPods/CocoaPods/issues/4119) + +* Don't require the `platform` attribute for targets without any declared pods + when running `pod install --no-integrate`. + [Sylvain Guillopé](https://github.com/sguillope) + [#3151](https://github.com/CocoaPods/CocoaPods/issues/3151) + +* Gracefully handle exception if creating the repos directory fails due to a + system error like a permission issue. + [Sylvain Guillopé](https://github.com/sguillope) + [#4177](https://github.com/CocoaPods/CocoaPods/issues/4177) + +## 0.39.0.beta.4 (2015-09-02) + +##### Bug Fixes + +* Using vendored frameworks without a `Headers` directory will no longer cause a + crash. + [Samuel Giddins](https://github.com/segiddins) + [#3967](https://github.com/CocoaPods/CocoaPods/issues/3967) + +* Computing the set of transitive dependencies for a pod target, + even if the target is scoped, will no longer smash the stack. + [Samuel Giddins](https://github.com/segiddins) + [#4092](https://github.com/CocoaPods/CocoaPods/issues/4092) + +* Take into account a specification's `exclude_files` when constructing resource + bundles. + [Samuel Giddins](https://github.com/segiddins) + [#4065](https://github.com/CocoaPods/CocoaPods/issues/4065) + +* Fix resolving to platform-compatible versions of transitive dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#4084](https://github.com/CocoaPods/CocoaPods/issues/4084) + + +## 0.39.0.beta.3 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.39.0.beta.2 (2015-08-27) + +##### Bug Fixes + +* Ensure all gem files are readable. + [Samuel Giddins](https://github.com/segiddins) + [#4085](https://github.com/CocoaPods/CocoaPods/issues/4085) + + +## 0.39.0.beta.1 (2015-08-26) + +##### Breaking + +* The `HEADER_SEARCH_PATHS` will no longer be constructed recursively. + [Samuel Giddins](https://github.com/segiddins) + [twoboxen](https://github.com/twoboxen) + [#1437](https://github.com/CocoaPods/CocoaPods/issues/1437) + [#3760](https://github.com/CocoaPods/CocoaPods/issues/3760) + +##### Enhancements + +* Collapse the namespaced public and private pod xcconfig into one single + xcconfig file. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3916](https://github.com/CocoaPods/CocoaPods/pull/3916) + +* Add `--sources` option to `push` command. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#3912](https://github.com/CocoaPods/CocoaPods/issues/3912) + +* Implicitly unlock all local dependencies when installing. + [Samuel Giddins](https://github.com/segiddins) + [#3764](https://github.com/CocoaPods/CocoaPods/issues/3764) + +* The resolver error message when a conflict occurred due to platform deployment + target mismatches will now explain that. + [Samuel Giddins](https://github.com/segiddins) + [#3926](https://github.com/CocoaPods/CocoaPods/issues/3926) + +* Add `:source_provider` hook to allow plugins to provide sources. + [Eric Amorde](https://github.com/amorde) + [#3190](https://github.com/CocoaPods/CocoaPods/issues/3190) + [#3792](https://github.com/CocoaPods/CocoaPods/pull/3792) + +* Remove embed frameworks build phase from target types, where it is not required. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3905](https://github.com/CocoaPods/CocoaPods/issues/3905) + [#4028](https://github.com/CocoaPods/CocoaPods/pull/4028) + +* Add a `--private` option to `pod spec lint`, `pod lib lint`, and + `pod repo push` that will ignore warnings that only apply to public + specifications and sources. + [Samuel Giddins](https://github.com/segiddins) + [Core#190](https://github.com/CocoaPods/Core/issues/190) + [#2682](https://github.com/CocoaPods/CocoaPods/issues/2682) + +* Add support for dynamic `vendored_frameworks` and `vendored_libraries`. + [Samuel Giddins](https://github.com/segiddins) + [#1993](https://github.com/CocoaPods/CocoaPods/issues/1993) + +##### Bug Fixes + +* Build settings specified in `pod_target_xcconfig` of a spec are also for + library targets only applied to the pod target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3906](https://github.com/CocoaPods/CocoaPods/issues/3906) + +* Use APPLICATION_EXTENSION_API_ONLY for watchOS 2 extensions. + [Boris Bügling](https://github.com/neonichu) + [#3920](https://github.com/CocoaPods/CocoaPods/pull/3920) + +* Prevent copying resources to installation directory when `SKIP_INSTALL` is enabled. + [Dominique d'Argent](https://github.com/nubbel) + [#3971](https://github.com/CocoaPods/CocoaPods/pull/3971) + +* Embed frameworks into app and watch extensions. + [Boris Bügling](https://github.com/neonichu) + [#4004](https://github.com/CocoaPods/CocoaPods/pull/4004) + +* Fix missing `$(inherited)` for generated xcconfig `LIBRARY_SEARCH_PATHS` + and `HEADER_SEARCH_PATHS` build settings. + [Tyler Fox](https://github.com/smileyborg) + [#3908](https://github.com/CocoaPods/CocoaPods/issues/3908) + +* Fix source locking/unlocking. + [Samuel Giddins](https://github.com/segiddins) + [#4059](https://github.com/CocoaPods/CocoaPods/issues/4059) + +* Include the `-ObjC` linker flag when static `vendored_frameworks` are present. + [Samuel Giddins](https://github.com/segiddins) + [#3870](https://github.com/CocoaPods/CocoaPods/issues/3870) + [#3992](https://github.com/CocoaPods/CocoaPods/issues/3992) + + +## 0.38.2 (2015-07-25) + +##### Bug Fixes + +* Fix generation of xcconfig files that specify both `-iquote` and `-isystem` + headers. + [Russ Bishop](https://github.com/russbishop) + [#3893](https://github.com/CocoaPods/CocoaPods/issues/3893) + +* Pods integrated as static libraries can no longer be imported as + modules, as that change had unexpected side-effects. + [Boris Bügling](https://github.com/neonichu) + [#3898](https://github.com/CocoaPods/CocoaPods/pull/3898) + [#3879](https://github.com/CocoaPods/CocoaPods/issues/3879) + [#3888](https://github.com/CocoaPods/CocoaPods/issues/3888) + [#3886](https://github.com/CocoaPods/CocoaPods/issues/3886) + [#3889](https://github.com/CocoaPods/CocoaPods/issues/3889) + [#3884](https://github.com/CocoaPods/CocoaPods/issues/3884) + +* Source file locking now happens after plugin and podfile post-install hooks + have run. + [Samuel Giddins](https://github.com/segiddins) + [#3529](https://github.com/CocoaPods/CocoaPods/issues/3529) + +* Only set project, dylib, and compatibility versions to valid, three integer + values. + [Samuel Giddins](https://github.com/segiddins) + [#3887](https://github.com/CocoaPods/CocoaPods/issues/3887) + + +## 0.38.1 (2015-07-23) + +##### Enhancements + +* Set project, dylib, and compatibility versions when building pods as + frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + +* Pods integrated as static libraries can now be imported as modules. + [Tomas Linhart](https://github.com/TomasLinhart) + [#3874](https://github.com/CocoaPods/CocoaPods/issues/3874) + +##### Bug Fixes + +* Ensure the aggregate `.xcconfig` file only has the settings for the + appropriate build configuration. + [Samuel Giddins](https://github.com/segiddins) + [#3842](https://github.com/CocoaPods/CocoaPods/issues/3842) + +* Show the correct error when `pod spec lint` finds multiple podspecs, and at + least one of them fails linting. + [Samuel Giddins](https://github.com/segiddins) + [#3869](https://github.com/CocoaPods/CocoaPods/issues/3869) + +* Set header search paths properly on the user target when `vendored_libraries` + Pods are used while integrating Pods as frameworks. + [Jonathan MacMillan](https://github.com/perotinus) + [#3857](https://github.com/CocoaPods/CocoaPods/issues/3857) + +* Only link public headers in the sandbox for Pods that are not being built + into dynamic frameworks, when integrating Pods as frameworks. + [Jonathan MacMillan](https://github.com/perotinus) + [#3867](https://github.com/CocoaPods/CocoaPods/issues/3867) + +* Don't lock resource files, only source files. + [Mason Glidden](https://github.com/mglidden) + [#3557](https://github.com/CocoaPods/CocoaPods/issues/3557) + +* Fix copying frameworks when integrating with today extensions. + [Samuel Giddins](https://github.com/segiddins) + [#3819](https://github.com/CocoaPods/CocoaPods/issues/3819) + + +## 0.38.0 (2015-07-18) + +##### Enhancements + +* Improve the message shown when trying to use Swift Pods without frameworks. + Now it includes the offending Pods so that the user can take action to remove + the Pods, if they don’t want to move to frameworks yet. + [Eloy Durán](https://github.com/alloy) + [#3830](https://github.com/CocoaPods/CocoaPods/pull/3830) + +##### Bug Fixes + +* Properly merge the `user_target_xcconfig`s of multiple subspecs. + [Samuel Giddins](https://github.com/segiddins) + [#3813](https://github.com/CocoaPods/CocoaPods/issues/3813) + + +## 0.38.0.beta.2 (2015-07-05) + +##### Enhancements + +* The resolver will now take supported platform deployment targets into account + when resolving dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#2443](https://github.com/CocoaPods/CocoaPods/issues/2443) + +* `Pods.xcodeproj` will now be written with deterministic UUIDs, vastly reducing + project churn and merge conflicts. This behavior can be disabled via the new + `COCOAPODS_DISABLE_DETERMINISTIC_UUIDS` environment variable. + [Samuel Giddins](https://github.com/segiddins) + +* [`cocoapods-stats`](https://github.com/CocoaPods/cocoapods-stats) + is now a default plugin. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure that the `prepare_command` is run even when skipping the download + cache. + [Samuel Giddins](https://github.com/segiddins) + [#3674](https://github.com/CocoaPods/CocoaPods/issues/3674) + +* Public headers inside a directory named `framework` should be linked in the + sandbox. + [Vincent Isambart](https://github.com/vincentisambart) + [#3751](https://github.com/CocoaPods/CocoaPods/issues/3751) + +* Properly support targets with spaces in their name in the embed frameworks + script. + [Samuel Giddins](https://github.com/segiddins) + [#3754](https://github.com/CocoaPods/CocoaPods/issues/3754) + +* Don't add the `-ObjC` linker flag if it's unnecessary. + [Samuel Giddins](https://github.com/segiddins) + [#3537](https://github.com/CocoaPods/CocoaPods/issues/3537) + +* Ensure that no duplicate framework or target dependencies are created. + [Samuel Giddins](https://github.com/segiddins) + [#3763](https://github.com/CocoaPods/CocoaPods/issues/3763) + + +## 0.38.0.beta.1 (2015-06-26) + +##### Highlighted Enhancement That Needs Testing + +* De-duplicate Pod Targets: CocoaPods now recognizes when a dependency is used + multiple times across different user targets, but needs to be built only once. + The targets in `Pods.xcodeproj` need to be duplicated when one of the following + applies: + * They are used on different platforms. + * They are used with differents sets of subspecs. + * They have any dependency which needs to be duplicated. + + You can opt-out of this behavior installation-wise, by setting the following + option in your `~/.cocoapods/config.yaml`: + ```yaml + deduplicate_targets: false + ``` + + [Marius Rackwitz](https://github.com/mrackwitz) + [#3550](https://github.com/CocoaPods/CocoaPods/issues/3550) + +##### Breaking + +* The CocoaPods environment header has been removed. + [Samuel Giddins](https://github.com/segiddins) + [#2390](https://github.com/CocoaPods/CocoaPods/issues/2390) + +* The `Installer` is passed directly to the `pre_install` and `post_install` + hooks defined in the Podfile, instead of the previously used + `Hooks::InstallerRepresentation`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3648](https://github.com/CocoaPods/CocoaPods/issues/3648) + +* Deprecate the `xcconfig` attribute in the Podspec DSL, which is replaced by + the new attributes `pod_target_xcconfig` and `user_target_xcconfig`. + [Marius Rackwitz](https://github.com/mrackwitz) + [CocoaPods#3465](https://github.com/CocoaPods/CocoaPods/issues/3465) + +##### Enhancements + +* The notice about a new version being available will now include our + recommendation of using the latest stable version. + [Hugo Tunius](https://github.com/k0nserv) + [#3667](https://github.com/CocoaPods/CocoaPods/pull/3667) + +* New commands `pod cache list` and `pod cache clean` allows you to see the + contents of the cache and clean it. + [Olivier Halligon](https://github.com/AliSoftware) + [#3508](https://github.com/CocoaPods/CocoaPods/issues/3508) + +* The download cache will automatically be reset when changing CocoaPods + versions. + [Samuel Giddins](https://github.com/segiddins) + [#3542](https://github.com/CocoaPods/CocoaPods/issues/3542) + +* Supports running pre-install hooks in plugins. This happens before the resolver + does its work, and offers easy access to the sandbox, podfile and lockfile via a + `PreInstallHooksContext` object. This also renames the post-install hooks from `HooksContext` + to `PostInstallHooksContext`. + [Orta Therox](https://github.com/orta) + [#3540](https://github.com/CocoaPods/cocoapods/issues/3409) + +* Allow passing additional arguments to `pod lib create`, which then get passed + as-is to the `configure` scripts. + [Samuel Giddins](https://github.com/segiddins) + [#2160](https://github.com/CocoaPods/CocoaPods/issues/2160) + +* Use `-analyzer-disable-all-checks` to disable static analyzer for + pods with `inhibit_warnings` enabled (requires Xcode >= 6.1). + [Dieter Komendera](https://github.com/kommen) + [#2402](https://github.com/CocoaPods/CocoaPods/issues/2402) + +* Cache globbing in `PathList` to speed up `pod install`. + [Vincent Isambart](https://github.com/vincentisambart) + [#3699](https://github.com/CocoaPods/CocoaPods/pull/3699) + +* CocoaPods will validate your podfile and try to identify problems + and conflicts in how you've specified the dependencies. + [Hugo Tunius](https://github.com/k0nserv) + [#995](https://github.com/CocoaPods/CocoaPods/issues/995) + +* `pod update` will now accept root pod names, even when only subspecs are + installed. + [Samuel Giddins](https://github.com/segiddins) + [#3689](https://github.com/CocoaPods/CocoaPods/issues/3689) + +* Support for the new `watchos` platform. + [Boris Bügling](https://github.com/neonichu) + [#3681](https://github.com/CocoaPods/CocoaPods/pull/3681) + +##### Bug Fixes + +* Added recursive support to the public headers of vendored frameworks + that are automatically linked in the sandbox. This fixes and issue + for framework header directories that contain sub-directories. + [Todd Casey](https://github.com/vhariable) + [#3161](https://github.com/CocoaPods/CocoaPods/issues/3161) + +* Public headers of vendored frameworks are now automatically linked in + the sandbox. That allows transitive inclusion of headers from other pods. + [Vincent Isambart](https://github.com/vincentisambart) + [#3161](https://github.com/CocoaPods/CocoaPods/issues/3161) + +* Fixes an issue that prevented static libraries from building. `OTHER_LIBTOOLFLAGS` + is no longer set to the value of `OTHER_LDFLAGS`. If you want to create a static + library that includes all dependencies for (internal/external) distribution then + you should use a tool like `cocoapods-packager`. + [Michael Moscardini](https://github.com/themackworth) + [#2747](https://github.com/CocoaPods/CocoaPods/issues/2747) + [#2704](https://github.com/CocoaPods/CocoaPods/issues/2704) + +* The embed frameworks script will now properly filter out symlinks to the + directories that are filtered, which fixes an issue when submitting to the + Mac App Store. + [Samuel Giddins](https://github.com/segiddins) + +* The error report template is now more robust against missing executables. + [Samuel Giddins](https://github.com/segiddins) + [#3719](https://github.com/CocoaPods/CocoaPods/issues/3719) + +* Attempting to specify a `git` source where a Podspec for the requested pod is + not found will have a more helpful error message. + [Samuel Giddins](https://github.com/segiddins) + +* `pod outdated` will now accept the `--no-repo-update` and `--no-integrate` + options. + [Samuel Giddins](https://github.com/segiddins) + +* Fixes an issue which prevented using a custom `CONFIGURATION_BUILD_DIR` when + integrating CocoaPods via dynamic frameworks. + [Tim Rosenblatt](https://github.com/timrosenblatt) + [#3675](https://github.com/CocoaPods/CocoaPods/pull/3675) + +* Pods frameworks in codesigned Mac apps are now signed. + [Nikolaj Schumacher](https://github.com/nschum) + [#3646](https://github.com/CocoaPods/CocoaPods/issues/3646) + + +## 0.37.2 (2015-05-27) + +##### Enhancements + +* Schemes of development pods will now be shared. + [Boris Bügling](https://github.com/neonichu) + [#3600](https://github.com/CocoaPods/CocoaPods/issues/3600) + +* Recognizes incomplete cache when the original download of a pod was + interrupted. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3561](https://github.com/CocoaPods/CocoaPods/issues/3561) + +* Allow opting out of pod source locking, meaning `pod try` yields editable + projects. + [Samuel Giddins](https://github.com/segiddins) + [cocoapods-try#31](https://github.com/CocoaPods/cocoapods-try/issues/31) + +##### Bug Fixes + +* `pod repo push` will now find and push JSON podspecs. + [#3494](https://github.com/CocoaPods/CocoaPods/issues/3494) + [Kyle Fuller](https://github.com/kylef) + +* Flush stdin/stderr and wait a bit in `executable`. + [Boris Bügling](https://github.com/neonichu) + [#3500](https://github.com/CocoaPods/CocoaPods/issues/3500) + +## 0.37.1 (2015-05-06) + +##### Bug Fixes + +* [Cache] Fixes a bug that caused that a pod, which was cached once is not updated + correctly when needed e.g. for `pod spec lint`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3498](https://github.com/CocoaPods/CocoaPods/issues/3498) + +* Only add the "Embed Pods Frameworks" script for application and unit test targets. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3440](https://github.com/CocoaPods/CocoaPods/issues/3440) + +* C++ source files with `.cc`, `.cxx` and `.c++` extensions now have their + compiler flags set correctly. + [Chongyu Zhu](https://github.com/lembacon) + [Kyle Fuller](https://github.com/kylef) + +* Handle broken symlinks when installing a Pod. + [Daniel Barden](https://github.com/dbarden) + [#3515](https://github.com/cocoapods/cocoapods/issues/3515) + +* Just remove write permissions from files, so executables are unaffected. + [Mason Glidden](https://github.com/mglidden) + [#3501](https://github.com/CocoaPods/CocoaPods/issues/3501) + +* Always copy the generated `Podfile.lock` to `Pods/Manifest.lock` so they are + guaranteed to match, character-by-character, after installation. + [Samuel Giddins](https://github.com/segiddins) + [#3502](https://github.com/CocoaPods/CocoaPods/issues/3502) + +* Don't generate an umbrella header when a custom module map is specified. This + avoids an incomplete module map warning. + [Samuel Giddins](https://github.com/segiddins) + +* Actually allow skipping the download cache by downloading directly to the + download target when requested. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.37.0 (2015-05-03) + +For more details, see 📝 [CocoaPods 0.37](https://blog.cocoapods.org/CocoaPods-0.37/) on our blog. + +##### Bug Fixes + +* Print the UTF-8 warning to STDERR. + [Matt Holgate](https://github.com/mjholgate) + + +## 0.37.0.rc.2 (2015-04-30) + +##### Bug Fixes + +* Handle caching specs that have subspecs with higher minimum deployment targets + without deleting needed source files. + [Samuel Giddins](https://github.com/segiddins) + [#3471](https://github.com/CocoaPods/CocoaPods/issues/3471) + +* Automatically detect JSON podspecs in `pod lib lint`. + [Samuel Giddins](https://github.com/segiddins) + [#3477](https://github.com/CocoaPods/CocoaPods/issues/3477) + + +## 0.37.0.rc.1 (2015-04-27) + +[Core](https://github.com/CocoaPods/Core/compare/0.37.0.beta.1...0.37.0.rc.1) +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.24.0...0.24.1) + +##### Enhancements + +* Add environment variable `COCOAPODS_SKIP_UPDATE_MESSAGE` to disable new + version message. + [Andrea Mazzini](https://github.com/andreamazz) + [#3364](https://github.com/CocoaPods/CocoaPods/issues/3364) + +* Use user project's object version for pods project. + [Boris Bügling](https://github.com/neonichu) + [#253](https://github.com/CocoaPods/Xcodeproj/issues/253) + +##### Bug Fixes + +* Adding `$(inherited)` to `FRAMEWORK_SEARCH_PATHS` build setting in xcconfig for aggregate. + [Tomohiro Kumagai](https://github.com/EZ-NET) + [#3429](https://github.com/CocoaPods/CocoaPods/pull/3429) + +* Don't crash when the downloader can't find an appropriate podspec in a `git` + pod. + [Samuel Giddins](https://github.com/segiddins) + [#3433](https://github.com/CocoaPods/CocoaPods/issues/3433) + +* Automatically lock Pod source files after installing. + [Mason Glidden](https://github.com/mglidden) + [#1154](https://github.com/CocoaPods/CocoaPods/issues/1154) + +* Handle subprocesses leaking STDOUT/STDERR pipes by more strictly managing + process lifetime and not allowing I/O to block completion of the task. + [Samuel Giddins](https://github.com/segiddins) + [#3101](https://github.com/CocoaPods/CocoaPods/issues/3101) + +* Do not create pod target if `source_files` only contains headers. + [Boris Bügling](https://github.com/neonichu) + [#3106](https://github.com/CocoaPods/CocoaPods/issues/3106) + +* Run a pod's `prepare_command` (if it has one) before it is cleaned in the + download cache. + [Marius Rackwitz](https://github.com/mrackwitz) + [Samuel Giddins](https://github.com/segiddins) + [#3436](https://github.com/CocoaPods/CocoaPods/issues/3436) + +* Don't set the `-fno-objc-arc` compiler flags for files for which the flag + makes no sense. + [Samuel Giddins](https://github.com/segiddins) + [#2559](https://github.com/CocoaPods/CocoaPods/issues/2559) + +* Also apply a pod's configuration to any resource targets defined by the pod. + [Tom Adriaenssen](https://github.com/inferis) + [#3463](https://github.com/CocoaPods/CocoaPods/issues/3463) + + +## 0.37.0.beta.1 (2015-04-18) + +##### Enhancements + +* Allow the specification of custom module map files. + [Samuel Giddins](https://github.com/segiddins) + [Marius Rackwitz](https://github.com/mrackwitz) + [#3145](https://github.com/CocoaPods/CocoaPods/issues/3145) + +* Show the source URI for local Pod specification repositories in + `pod repo list`. + [Kyle Fuller](https://github.com/kylef) + +* Only show a warning when there is a minimum deployment target mismatch + between target and spec, instead of throwing a hard error. + [Samuel Giddins](https://github.com/segiddins) + [#1241](https://github.com/CocoaPods/CocoaPods/issues/1241) + +* Add download caching for pods, which speeds up `pod install` and linting, + potentially by several orders of magnitude. + [Samuel Giddins](https://github.com/segiddins) + [#2863](https://github.com/CocoaPods/CocoaPods/issues/2863) + [#3172](https://github.com/CocoaPods/CocoaPods/issues/3172) + +* Add a `--fail-fast` option to both `pod spec lint` and `pod lib lint` that + causes the linter to exit as soon as a single subspec or platform fails + linting. + [Marius Rackwitz](https://github.com/mrackwitz) + +* Naïvely prevent base xcconfig warnings for targets that have custom + config files set. + [Chris Brauchli](https://github.com/cbrauchli) + [#2633](https://github.com/CocoaPods/CocoaPods/issues/2633) + +* Ensure private headers are declared as such in a framework's generated module + map. + [Samuel Giddins](https://github.com/segiddins) + [#2974](https://github.com/CocoaPods/CocoaPods/issues/2974) + +##### Bug Fixes + +* Do not pass code-sign arguments to xcodebuild when linting OS X targets. + [Boris Bügling](https://github.com/neonichu) + [#3310](https://github.com/CocoaPods/CocoaPods/issues/3310) + +* Fixes an issue showing the URL to remote resources in `pod repo list`. + [Kyle Fuller](https://github.com/kylef) + +* Fixes a problem with code signing when integrating CocoaPods + into a Today Widget extension. + [Christian Sampaio](https://github.com/chrisfsampaio) + [#3390](https://github.com/CocoaPods/CocoaPods/pull/3390) + + +## 0.36.4 (2015-04-16) + +##### Bug Fixes + +* Fixes various problems with Pods that use xcasset bundles. Pods that + use xcassets can now be used with the `pod :path` option. + [Kyle Fuller](https://github.com/kylef) + [#1549](https://github.com/CocoaPods/CocoaPods/issues/1549) + [#3384](https://github.com/CocoaPods/CocoaPods/pull/3383) + [#3358](https://github.com/CocoaPods/CocoaPods/pull/3358) + + +## 0.36.3 (2015-03-31) + +##### Bug Fixes + +* Fix using the downloader. + [Samuel Giddins](https://github.com/segiddins) + [#3344](https://github.com/CocoaPods/CocoaPods/issues/3344) + [#3345](https://github.com/CocoaPods/CocoaPods/issues/3345) + + +## 0.36.2 (2015-03-31) + +[Core](https://github.com/CocoaPods/Core/compare/0.36.1...0.36.2) + +##### Bug Fixes + +* Unique resources passed to the script generator. + [Diego Torres](https://github.com/dtorres) + [#3315](https://github.com/CocoaPods/CocoaPods/issues/3315) + [#3327](https://github.com/CocoaPods/CocoaPods/issues/3327) + +* Update the `Manifest.lock` when migrating local podspecs to JSON. This fixes + running `pod install` after upgrading to `0.36`. + [Samuel Giddins](https://github.com/segiddins) + [#3292](https://github.com/CocoaPods/CocoaPods/issues/3292) + [#3299](https://github.com/CocoaPods/CocoaPods/issues/3299) + + +## 0.36.1 (2015-03-27) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.23.0...0.23.1) + +##### Bug Fixes + +* Workarounds(✻) for the resource script's handling of `.xcasset` files. + [sodas](https://github.com/sodastsai) + [Tony Li](https://github.com/crazytonyli) + [Chongyu Zhu](https://github.com/lembacon) + [#3247](https://github.com/CocoaPods/CocoaPods/issues/3247) + [#3303](https://github.com/CocoaPods/CocoaPods/issues/3303) + +* Fix the sanitization of configuration names in the generated target + environment header. + [Samuel Giddins](https://github.com/segiddins) + [#3301](https://github.com/CocoaPods/CocoaPods/issues/3301) + +> _(✻) Note: these fixes are only temporary to avoid overriding the user project's `xcassets`. + We are aware that these workarounds are "too greedy" and thus user projects having different + `xcassets` for different targets will still have issues; we ([@AliSoftware](https://github.com/AliSoftware)) + are working on a deeper fix ([#3263](https://github.com/CocoaPods/CocoaPods/issues/3263)) for the next release._ + +## 0.36.0 (2015-03-11) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.22.0...0.23.0) + +For more details, see 📝 [CocoaPods 0.36](https://blog.cocoapods.org/CocoaPods-0.36/) on our blog. + +##### Enhancements + +* Allows Swift pods to have a deployment target under iOS 8.0 if they use + XCTest. + [Samuel Giddins](https://github.com/segiddins) + [#3225](https://github.com/CocoaPods/CocoaPods/issues/3225) + +##### Bug Fixes + +* Include Swift-specific build settings on target creation, i.e. disable optimizations + for debug configuration. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3238](https://github.com/CocoaPods/CocoaPods/issues/3238) + +* Only copy explicitly specified xcasset files into the bundle of the integrated target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3219](https://github.com/CocoaPods/CocoaPods/issues/3219) + +* Correctly filter Xcode warnings about the use of dynamic frameworks. + [Boris Bügling](https://github.com/neonichu) + +* Fixes warnings, when the aggregate target doesn't contain any pod target, which is build, + because `PODS_FRAMEWORK_BUILD_PATH` was added to `FRAMEWORK_SEARCH_PATHS`, but never created. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3217](https://github.com/CocoaPods/CocoaPods/issues/3217) + +* Allows the usage of `:head` dependencies even when the most recent published + version was a pre-release. + [Samuel Giddins](https://github.com/segiddins) + [#3212](https://github.com/CocoaPods/CocoaPods/issues/3212) + +* Limit the check for transitive static binaries to those which are directly linked to the user target. + [Boris Bügling](https://github.com/neonichu) + [#3194](https://github.com/CocoaPods/CocoaPods/issues/3194) + +* Lint to prevent dynamic libraries and frameworks from passing with iOS 7. + [Boris Bügling](https://github.com/neonichu) + [#3193](https://github.com/CocoaPods/CocoaPods/issues/3193) + +* Shows an informative error message when there is no base specification found + for a `:head` dependency. + [Samuel Giddins](https://github.com/segiddins) + [#3230](https://github.com/CocoaPods/CocoaPods/issues/3230) + +* Fix the `OTHER_SWIFT_FLAGS` generated, so it inherits previous definitions. + [Daniel Thorpe](https://github.com/danthorpe) + [#2983](https://github.com/CocoaPods/CocoaPods/issues/2983) + + +## 0.36.0.rc.1 (2015-02-24) + +##### Enhancements + +* Set the `APPLICATION_EXTENSION_API_ONLY` build setting if integrating with a watch extension target. + [Boris Bügling](https://github.com/neonichu) + [#3153](https://github.com/CocoaPods/CocoaPods/issues/3153) + +* Build for iOS simulator only during validation. This allows validation without having + provisioning profiles set up. + [Boris Bügling](https://github.com/neonichu) + [#3083](https://github.com/CocoaPods/CocoaPods/issues/3083) + [Swift#13](https://github.com/CocoaPods/swift/issues/13) + +* Explicitly inform the user to close existing project when switching to + a workspace for the first time. + [Kyle Fuller](https://github.com/kylef) + [#2996](https://github.com/CocoaPods/CocoaPods/issues/2996) + +* Automatically detect conflicts between framework names. + [Samuel Giddins](https://github.com/segiddins) + [#2943](https://github.com/CocoaPods/CocoaPods/issues/2943) + +* Use the proper `TMPDIR` for the CocoaPods process, instead of blindly using + `/tmp`. + [Samuel Giddins](https://github.com/segiddins) + +* Let lint fail for Swift pods supporting deployment targets below iOS 8.0. + [Boris Bügling](https://github.com/neonichu) + [#2963](https://github.com/CocoaPods/CocoaPods/issues/2963) + +* Reject installation if a static library is used as a transitive dependency + while integrating Pods as frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#2926](https://github.com/CocoaPods/CocoaPods/issues/2926) + +* Do not copy Swift standard libraries multiple times. + [Boris Bügling](https://github.com/neonichu) + [#3131](https://github.com/CocoaPods/CocoaPods/issues/3131) + +* Check for Xcode License Agreement before running commands. + [Xavi Matos](https://github.com/CalQL8ed-K-OS) + [#3002](https://github.com/CocoaPods/CocoaPods/issues/3002) + +* `pod update PODNAME` will update pods in a case-insensitive manner. + [Samuel Giddins](https://github.com/segiddins) + [#2992](https://github.com/CocoaPods/CocoaPods/issues/2992) + +* Allow specifying repo names to `pod {spec,lib} lint --sources`. + [Samuel Giddins](https://github.com/segiddins) + [#2685](https://github.com/CocoaPods/CocoaPods/issues/2685) + +* Require explicit use of `use_frameworks!` for Pods written in Swift. + [Boris Bügling](https://github.com/neonichu) + [#3029](https://github.com/CocoaPods/CocoaPods/issues/3029) + +* Lint as framework automatically. If needed, `--use-libraries` option + allows linting as a static library. + [Boris Bügling](https://github.com/neonichu) + [#2912](https://github.com/CocoaPods/CocoaPods/issues/2912) + +* Adding Xcode Legacy build location support for default Pods.xcodeproj. + It defaults to `${SRCROOT}/../build` but can be changed in a `post_install` + hook by using the `Project#symroot=` writer. + [Sam Marshall](https://github.com/samdmarshall) + +##### Bug Fixes + +* Set `SKIP_INSTALL=YES` for all generated targets to avoid producing + *Generic Xcode Archives* on Archive. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3188](https://github.com/CocoaPods/CocoaPods/issues/3188) + +* Added support for .tpp C++ header files in specs (previously were getting + filtered out and symlinks wouldn't get created in the Pods/Headers folder.) + [Honza Dvorsky](https://github.com/czechboy0) + [#3129](https://github.com/CocoaPods/CocoaPods/pull/3129) + +* Fixed installation for app-extension targets which had no dependencies + configured in the Podfile. + [Boris Bügling](https://github.com/neonichu) + [#3102](https://github.com/CocoaPods/CocoaPods/issues/3102) + +* Correct escaping of resource bundles in 'Copy Pods Resources' script. + [Seán Labastille](https://github.com/flufff42) + [#3082](https://github.com/CocoaPods/CocoaPods/issues/3082) + +* Correctly update sources when calling `pod outdated`, and also respect the + `--[no-]repo-update` flag. + [Samuel Giddins](https://github.com/segiddins) + [#3137](https://github.com/CocoaPods/CocoaPods/issues/3137) + +* Fix the `OTHER_SWIFT_FLAGS` generated, so `#if COCOAPODS` works in Swift. + [Samuel Giddins](https://github.com/segiddins) + [#2983](https://github.com/CocoaPods/CocoaPods/issues/2983) + +* Output a properly-formed `Podfile` when running `pod init` with a target that + contains a `'` in its name. + [Samuel Giddins](https://github.com/segiddins) + [#3136](https://github.com/CocoaPods/CocoaPods/issues/3136) + +* Remove the stored lockfile checkout source when switching to a development + pod. + [Samuel Giddins](https://github.com/segiddins) + [#3141](https://github.com/CocoaPods/CocoaPods/issues/3141) + +* Migrate local Ruby podspecs to JSON, allowing updating those pods to work. + [Samuel Giddins](https://github.com/segiddins) + [#3038](https://github.com/CocoaPods/CocoaPods/issues/3038) + +* Removing grep color markup in the embed frameworks script. + [Adriano Bonat](https://github.com/tanob) + [#3117](https://github.com/CocoaPods/CocoaPods/issues/3117) + +* Fixes an issue where `pod ipc list` and `pod ipc podfile` was returning an + error. + [Kyle Fuller](https://github.com/kylef) + [#3134](https://github.com/CocoaPods/CocoaPods/issues/3134) + +* Fixes an issue with spaces in the path to the user's developer tools. + [Boris Bügling](https://github.com/neonichu) + [#3181](https://github.com/CocoaPods/CocoaPods/issues/3181) + + +## 0.36.0.beta.2 (2015-01-28) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.21.0...0.21.2) + +##### Breaking + +* Changes the default spec repositories used from all configured spec + repositories, to the master spec repository when no spec repositories + are explicitly configured in a Podfile. + [Kyle Fuller](https://github.com/kylef) + [#2946](https://github.com/CocoaPods/CocoaPods/issues/2946) + +##### Enhancements + +* Set the APPLICATION_EXTENSION_API_ONLY build setting if integrating with an app extension target. + [Boris Bügling](https://github.com/neonichu) + [#2980](https://github.com/CocoaPods/CocoaPods/issues/2980) + +* Xcodebuild warnings will now be reported as `warning` during linting + instead of `note`. + [Hugo Tunius](https://github.com/K0nserv) + +* Copy only the resources required for the current build configuration. + [Samuel Giddins](https://github.com/segiddins) + [#2391](https://github.com/CocoaPods/CocoaPods/issues/2391) + +##### Bug Fixes + +* Ensure that linting fails if xcodebuild doesn't successfully build your Pod. + [Kyle Fuller](https://github.com/kylef) + [#2981](https://github.com/CocoaPods/CocoaPods/issues/2981) + [cocoapods-trunk#33](https://github.com/CocoaPods/cocoapods-trunk/issues/33) + +* Clone the master spec repository when no spec repositories are explicitly + defined in the Podfile. This fixes problems using CocoaPods for the first + time without any explicit spec repositories. + [Kyle Fuller](https://github.com/kylef) + [#2946](https://github.com/CocoaPods/CocoaPods/issues/2946) + +* Xcodebuild warnings with the string `error` in them will no longer be + linted as errors if they are in fact warnings. + [Hugo Tunius](https://github.com/K0nserv) + [#2579](https://github.com/CocoaPods/CocoaPods/issues/2579) + +* Any errors which occur during fetching of external podspecs over HTTP + will now be gracefully handled. + [Hugo Tunius](https://github.com/K0nserv) + [#2823](https://github.com/CocoaPods/CocoaPods/issues/2823) + +* When updating spec repositories only update the git sourced repos. + [Dustin Clark](https://github.com/clarkda) + [#2558](https://github.com/CocoaPods/CocoaPods/issues/2558) + +* Pods referenced via the `:podspec` option will have their podspecs properly + parsed in the local directory if the path points to a local file. + [Samuel Giddins](https://github.com/segiddins) + +* Fix an issue where using Swift frameworks in an Objective-C host application + causes an error because the Swift frameworks we're not code signed. + [Joseph Ross](https://github.com/jrosssavant) + [#3008](https://github.com/CocoaPods/CocoaPods/issues/3008) + + +## 0.36.0.beta.1 (2014-12-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.35.0...0.36.0.beta.1) +• [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.35.0...0.36.0.beta.1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.20.2...0.21.0) +• [CLAide](https://github.com/CocoaPods/CLAide/compare/v0.7.0...0.8.0) +• [Molinillo](https://github.com/CocoaPods/Molinillo/compare/0.1.2...0.2.0) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.8.0...0.8.1) +• [cocoapods-try](https://github.com/CocoaPods/cocoapods-try/compare/0.4.2...0.4.3) +• [cocoapods-trunk](https://github.com/CocoaPods/cocoapods-trunk/compare/0.4.1...0.5.0) +• [cocoapods-plugins](https://github.com/CocoaPods/cocoapods-plugins/compare/0.3.2...0.4.0) + +##### Highlighted Enhancement That Needs Testing + +* Support Frameworks & Swift: CocoaPods now recognizes Swift source files and + builds dynamic frameworks when necessary. A project can explicitly + opt-in via `use_frameworks!` in the Podfile, or if any dependency contains + Swift code, all pods for that target will be integrated as frameworks. + + As a pod author, you can change the module name of the built framework by + specifying a `module_name` in the podspec. The built frameworks are embedded into + the host application with a new shell script build phase in the user's + project allowing configuration-dependent pods. + + [Marius Rackwitz](https://github.com/mrackwitz) + [#2835](https://github.com/CocoaPods/CocoaPods/issues/2835) + +##### Breaking + +* Bundle Resources into Frameworks: Previously all resources were compiled and + copied into the `mainBundle`. Now Pods have to use + `[NSBundle bundleForClass:<#Class from Pod#>]` to access provided resources + relative to the bundle. + + [Boris Bügling](https://github.com/neonichu) + [#2835](https://github.com/CocoaPods/CocoaPods/issues/2730) + +* Only the hooks specified by usage of the `plugin` directive of the `Podfile` + will be run. Additionally, plugins that depend on hooks will have to update to + specify their 'plugin name' when registering the hooks in order to make it + possible for those hooks to be run. + [Samuel Giddins](https://github.com/segiddins) + [#2640](https://github.com/CocoaPods/CocoaPods/issues/2640) + +##### Enhancements + +* Do not generate targets for Pods without sources. + [Boris Bügling](https://github.com/neonichu) + [#2918](https://github.com/CocoaPods/CocoaPods/issues/2918) + +* Show the name of the source for each hook that is run in verbose mode. + [Samuel Giddins](https://github.com/segiddins) + [#2639](https://github.com/CocoaPods/CocoaPods/issues/2639) + +* Move pods' private headers to `Headers/Private` from `Headers/Build`. + Since some SCM ignore templates include `build` by default, this makes it + easier to check in the `Pods/` directory. + [Samuel Giddins](https://github.com/segiddins) + [#2623](https://github.com/CocoaPods/CocoaPods/issues/2623) + +* Validate that a specification's `public_header_files` and + `private_header_files` file patterns only match header files. + Also, validate that all file patterns, if given, match at least one file. + [Samuel Giddins](https://github.com/segiddins) + [#2914](https://github.com/CocoaPods/CocoaPods/issues/2914) + +* Installer changed to organize a development pod's source and resource files + into subgroups reflecting their organization in the filesystem. + [Imre mihaly](https://github.com/imihaly) + +##### Bug Fixes + +* Fix updating a pod that has subspec dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#2879](https://github.com/CocoaPods/CocoaPods/issues/2879) + +* Restore the `#define`s in the environment header when the `--no-integrate` + installation option is used. + [Samuel Giddins](https://github.com/segiddins) + [#2876](https://github.com/CocoaPods/CocoaPods/issues/2876) + +* Fix issues when trying to discover the xcodeproj automatically + but the current path contains special characters (`[`,`]`,`{`,`}`,`*`,`?`). + [Olivier Halligon](https://github.com/AliSoftware) + [#2852](https://github.com/CocoaPods/CocoaPods/issues/2852) + +* Fix linting subspecs that have a higher deployment target than the root + spec. + [Samuel Giddins](https://github.com/segiddins) + [#1919](https://github.com/CocoaPods/CocoaPods/issues/1919) + +* Fix the reading of podspecs that come from the `:git`, `:svn`, `:http`, or + `:hg` options in your `Podfile` that used context-dependent ruby code, such as + reading a file to determine the specification version. + [Samuel Giddins](https://github.com/segiddins) + [#2875](https://github.com/CocoaPods/CocoaPods/issues/2875) + +* Fix the updating of `:git`, `:svn`, and `:hg` dependencies when updating all + pods. + [Samuel Giddins](https://github.com/CocoaPods/CocoaPods/issues/2859) + [#2859](https://github.com/CocoaPods/CocoaPods/issues/2859) + +* Fix an issue when a user doesn't yet have any spec repositories configured. + [Boris Bügling](https://github.com/neonichu) + +* Fix an issue updating repositories when another spec repository doesn't + have a remote. + [Kyle Fuller](https://github.com/kylef) + [#2965](https://github.com/CocoaPods/CocoaPods/issues/2965) + + +## 0.35.0 (2014-11-19) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.34.4...0.35.0) +• [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.34.4...0.35.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.19.4...0.20.2) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.7.2...0.8.0) + +For more details, see 📝 [CocoaPods 0.35](https://blog.cocoapods.org/CocoaPods-0.35/) on our blog. + +##### Enhancements + +* Allow the specification of file patterns for the Podspec's `requires_arc` + attribute. + [Kyle Fuller](https://github.com/kylef) + [Samuel Giddins](https://github.com/segiddins) + [#532](https://github.com/CocoaPods/CocoaPods/issues/532) + +* From now on, pods installed directly from their repositories will be recorded + in the `Podfile.lock` file and will be guaranteed to be checked-out using the + same revision on subsequent installations. Examples of this are when using + the `:git`, `:svn`, or `:hg` options in your `Podfile`. + [Samuel Giddins](https://github.com/segiddins) + [#1058](https://github.com/CocoaPods/CocoaPods/issues/1058) + +##### Bug Fixes + +* Fix an output formatting issue with various commands like `pod search` + and `pod trunk`. + [Olivier Halligon](https://github.com/AliSoftware) + [#2603](https://github.com/CocoaPods/CocoaPods/issues/2603) + +* Show a helpful error message if the old resolver incorrectly activated a + pre-release version that now leads to a version conflict. + [Samuel Giddins](https://github.com/segiddins) + +* Provides a user friendly message when using `pod spec create` with a + repository that doesn't yet have any commits. + [Kyle Fuller](https://github.com/kylef) + [#2803](https://github.com/CocoaPods/CocoaPods/issues/2803) + +* Fixes an issue with integrating into projects where there is a slash in the + build configuration name. + [Kyle Fuller](https://github.com/kylef) + [#2767](https://github.com/CocoaPods/CocoaPods/issues/2767) + +* Pods will use `CLANG_ENABLE_OBJC_ARC = 'YES'` instead of + `CLANG_ENABLE_OBJC_ARC = 'NO'`. For pods with `requires_arc = false` the + `-fno-objc-arc` flag will be specified for the all source files. + [Hugo Tunius](https://github.com/K0nserv) + [#2262](https://github.com/CocoaPods/CocoaPods/issues/2262) + +* Fixed an issue that Core Data mapping models where not compiled when + copying resources to main application bundle. + [Yan Rabovik](https://github.com/rabovik) + +* Fix uninitialized constant Class::YAML crash in some cases. + [Tim Shadel](https://github.com/timshadel) + +##### Enhancements + +* `pod search`, `pod spec which`, `pod spec cat` and `pod spec edit` + now use plain text search by default instead of a regex. Especially + `pod search UIView+UI` now searches for pods containing exactly `UIView+UI` + in their name, not trying to interpret the `+` as a regular expression. + _Note: You can still use a regular expression with the new `--regex` flag that has + been added to these commands, e.g. `pod search --regex "(NS|UI)Color"`._ + [Olivier Halligon](https://github.com/AliSoftware) + [Core#188](https://github.com/CocoaPods/Core/issues/188) + +* Use `--allow-warnings` rather than `--error-only` for pod spec validation + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#2820](https://github.com/CocoaPods/CocoaPods/issues/2820) + +## 0.35.0.rc2 (2014-11-06) + +##### Enhancements + +* Allow the resolver to fail faster when there are unresolvable conflicts + involving the Lockfile. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Allows pre-release spec versions when a requirement has an external source + specified. + [Samuel Giddins](https://github.com/segiddins) + [#2768](https://github.com/CocoaPods/CocoaPods/issues/2768) + +* We no longer require git version 1.7.5 or greater. + [Kyle Fuller](https://github.com/kylef) + +* Fix the usage of `:head` pods. + [Samuel Giddins](https://github.com/segiddins) + [#2789](https://github.com/CocoaPods/CocoaPods/issues/2789) + +* Show a more informative message when attempting to lint a spec whose + source could not be downloaded. + [Samuel Giddins](https://github.com/segiddins) + [#2667](https://github.com/CocoaPods/CocoaPods/issues/2667) + [#2759](https://github.com/CocoaPods/CocoaPods/issues/2759) + +## 0.35.0.rc1 (2014-11-02) + +##### Highlighted Enhancements That Need Testing + +* The `Resolver` has been completely rewritten to use + [Molinillo](https://github.com/CocoaPods/Molinillo), an iterative dependency + resolution algorithm that automatically resolves version conflicts. + The order in which dependencies are declared in the `Podfile` no longer has + any effect on the resolution process. + + You should ensure that `pod install`, `pod update` and `pod update [NAME]` + work as expected and install the correct versions of your pods during + this RC1 release. + [Samuel Giddins](https://github.com/segiddins) + [#978](https://github.com/CocoaPods/CocoaPods/issues/978) + [#2002](https://github.com/CocoaPods/CocoaPods/issues/2002) + +##### Breaking + +* Support for older versions of Ruby has been dropped and CocoaPods now depends + on Ruby 2.0.0 or greater. This is due to the release of Xcode 6.0 which has + dropped support for OS X 10.8, which results in the minimum version of + Ruby pre-installed on OS X now being 2.0.0. + + If you are using a custom installation of Ruby older than 2.0.0, you + will need to update. Or even better, migrate to system Ruby. + [Kyle Fuller](https://github.com/kylef) + +* Attempts to resolve circular dependencies will now raise an exception. + [Samuel Giddins](https://github.com/segiddins) + [Molinillo#6](https://github.com/CocoaPods/Molinillo/issues/6) + +##### Enhancements + +* The use of implicit sources has been un-deprecated. By default, all available + spec-repos will be used. There should only be a need to specify explicit + sources if you want to specifically _exclude_ certain spec-repos, such as the + `master` spec-repo, if you want to declare the order of spec look-up + precedence, or if you want other users of a Podfile to automatically have a + spec-repo cloned on `pod install`. + [Eloy Durán](https://github.com/alloy) + +* The `pod push` command has been removed as it has been deprecated in favour of + `pod repo push` in CocoaPods 0.33. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Refactorings in preparation to framework support, which could break usage + of the Hooks API. + [Marius Rackwitz](https://github.com/mrackwitz) + [#2461](https://github.com/CocoaPods/CocoaPods/issues/2461) + +* Implicit dependencies are now locked, so simply running `pod install` will not + cause them to be updated when they shouldn't be. + [Samuel Giddins](https://github.com/segiddins) + [#2318](https://github.com/CocoaPods/CocoaPods/issues/2318) + [#2506](https://github.com/CocoaPods/CocoaPods/issues/2506) + +* Pre-release versions are only considered in the resolution process when there + are dependencies that explicitly reference pre-release requirements. + [Samuel Giddins](https://github.com/segiddins) + [#1489](https://github.com/CocoaPods/CocoaPods/issues/1489) + +* Only setup the master specs repo if required. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#2562](https://github.com/CocoaPods/CocoaPods/issues/2562) + +* `Sandbox::FileAccessor` now optionally includes expanded paths of headers of + vendored frameworks in `public_headers`. + [Eloy Durán](https://github.com/alloy) + [#2722](https://github.com/CocoaPods/CocoaPods/pull/2722) + +* Analysis is now halted and the user informed when there are multiple different + external sources for dependencies with the same root name. + The user is also now warned when there are duplicate dependencies in the + Podfile. + [Samuel Giddins](https://github.com/segiddins) + [#2738](https://github.com/CocoaPods/CocoaPods/issues/2738) + +* Multiple subspecs that point to the same external dependency will now only + cause that external source to be fetched once. + [Samuel Giddins](https://github.com/segiddins) + [#2743](https://github.com/CocoaPods/CocoaPods/issues/2743) + +##### Bug Fixes + +* Fixes an issue in the `XCConfigIntegrator` where not all targets that need + integration were being integrated, but were getting incorrect warnings about + the user having specified a custom base configuration. + [Eloy Durán](https://github.com/alloy) + [2752](https://github.com/CocoaPods/CocoaPods/issues/2752) + +* Do not try to clone spec-repos in `/`. + [Eloy Durán](https://github.com/alloy) + [#2723](https://github.com/CocoaPods/CocoaPods/issues/2723) + +* Improved sanitizing of configuration names which have a numeric prefix. + [Steffen Matthischke](https://github.com/HeEAaD) + [#2700](https://github.com/CocoaPods/CocoaPods/pull/2700) + +* Fixes an issues where headers from a podspec with one platform are exposed to + targets with a different platform. The headers are now only exposed to the + targets with the same platform. + [Michael Melanson](https://github.com/michaelmelanson) + [Kyle Fuller](https://github.com/kylef) + [#1249](https://github.com/CocoaPods/CocoaPods/issues/1249) + + +## 0.34.4 (2014-10-18) + +##### Bug Fixes + +* Fixes a crash when running `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#2624](https://github.com/CocoaPods/CocoaPods/issues/2624) + +* Ensure that external sources (as specified in the `Podfile`) are downloaded + when their source is missing, even if their specification is present. + [Samuel Giddins](https://github.com/segiddins) + [#2494](https://github.com/CocoaPods/CocoaPods/issues/2494) + +* Fixes an issue where running `pod install/update` while the Xcode project + is open can cause the open project to have build failures until Xcode + is restarted. + [Kyle Fuller](https://github.com/kylef) + [#2627](https://github.com/CocoaPods/CocoaPods/issues/2627) + [#2665](https://github.com/CocoaPods/CocoaPods/issues/2665) + +* Fixes a crash when using file URLs as a source. + [Kurry Tran](https://github.com/kurry) + [#2683](https://github.com/CocoaPods/CocoaPods/issues/2683) + +* Fixes an issue when using pods in static library targets and building with + Xcode 6 which requires `OTHER_LIBTOOLFLAGS` instead of `OTHER_LDFLAGS`, thus + basically reverting to the previous Xcode behaviour, for now at least. + [Kyle Fuller](https://github.com/kylef) + [Eloy Durán](https://github.com/alloy) + [#2666](https://github.com/CocoaPods/CocoaPods/issues/2666) + +* Fixes an issue running the resources script when Xcode is installed to a + directory with a space when compiling xcassets. + [Kyle Fuller](https://github.com/kylef) + [#2684](https://github.com/CocoaPods/CocoaPods/issues/2684) + +* Fixes an issue when installing Pods with resources to a target which + doesn't have any resources. + [Kyle Fuller](https://github.com/kylef) + [#2083](https://github.com/CocoaPods/CocoaPods/issues/2083) + +* Ensure that git 1.7.5 or newer is installed when running pod. + [Kyle Fuller](https://github.com/kylef) + [#2651](https://github.com/CocoaPods/CocoaPods/issues/2651) + + +## 0.34.2 (2014-10-08) + +##### Enhancements + +* Make the output of `pod outdated` show what running `pod update` will do. + Takes into account the sources specified in the `Podfile`. + [Samuel Giddins](https://github.com/segiddins) + [#2470](https://github.com/CocoaPods/CocoaPods/issues/2470) + +* Allows the use of the `GCC_PREPROCESSOR_DEFINITION` flag `${inherited}` + without emitting a warning. + [Samuel Giddins](https://github.com/segiddins) + [#2577](https://github.com/CocoaPods/CocoaPods/issues/2577) + +* Integration with user project will no longer replace an existing + base build configuration. + [Robert Jones](https://github.com/redshirtrob) + [#1736](https://github.com/CocoaPods/CocoaPods/issues/1736) + +##### Bug Fixes + +* Improved sanitizing of configuration names to avoid generating invalid + preprocessor definitions. + [Boris Bügling](https://github.com/neonichu) + [#2542](https://github.com/CocoaPods/CocoaPods/issues/2542) + +* More robust generation of source names from URLs. + [Samuel Giddins](https://github.com/segiddins) + [#2534](https://github.com/CocoaPods/CocoaPods/issues/2534) + +* Allow the `Validator` to only use specific sources. + Allows customizable source for `pod spec lint` and `pod lib lint`, + with both defaulting to `master`. + [Samuel Giddins](https://github.com/segiddins) + [#2543](https://github.com/CocoaPods/CocoaPods/issues/2543) + [cocoapods-trunk#28](https://github.com/CocoaPods/cocoapods-trunk/issues/28) + +* Takes into account the sources specified in `Podfile` running + `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#2553](https://github.com/CocoaPods/CocoaPods/issues/2553) + +* Ensures that the master repo is shallow cloned when added via a Podfile + `source` directive. + [Samuel Giddins](https://github.com/segiddins) + [#3586](https://github.com/CocoaPods/CocoaPods/issues/2586) + +* Ensures that the user project is not saved when there are no + user targets integrated. + [Samuel Giddins](https://github.com/segiddins) + [#2561](https://github.com/CocoaPods/CocoaPods/issues/2561) + [#2593](https://github.com/CocoaPods/CocoaPods/issues/2593) + +* Fix a crash when running `pod install` with an empty target that inherits a + pod from a parent target. + [Kyle Fuller](https://github.com/kylef) + [#2591](https://github.com/CocoaPods/CocoaPods/issues/2591) + +* Take into account versions of a Pod from all specified sources when + resolving dependencies. + [Thomas Visser](https://github.com/Thomvis) + [#2556](https://github.com/CocoaPods/CocoaPods/issues/2556) + +* Sanitize build configuration names in target environment header macros. + [Kra Larivain](https://github.com/olarivain) + [#2532](https://github.com/CocoaPods/CocoaPods/pull/2532) + + +## 0.34.1 (2014-09-26) + +##### Bug Fixes + +* Doesn't take into account the trailing `.git` in repository URLs when + trying to find a matching specs repo. + [Samuel Giddins](https://github.com/segiddins) + [#2526](https://github.com/CocoaPods/CocoaPods/issues/2526) + + +## 0.34.0 (2014-09-26) + +For more details, see 📝 [CocoaPods 0.34](https://blog.cocoapods.org/CocoaPods-0.34/) on our blog. + +##### Breaking + +* Add support for loading podspecs from *only* specific spec-repos via + `sources`. By default, when there are no sources specified in a Podfile all + source repos will be used. This has always been the case. However, this + implicit use of sources is now deprecated. Once you specify specific sources, + **no** repos will be included by default. For example: + + source 'https://github.com/artsy/Specs.git' + source 'https://github.com/CocoaPods/Specs.git' + + Any source URLs specified that have not yet been added will be cloned before + resolution begins. + [François Benaiteau](https://github.com/netbe) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Samuel Giddins](https://github.com/segiddins) + [#1143](https://github.com/CocoaPods/CocoaPods/pull/1143) + [Core#19](https://github.com/CocoaPods/Core/pull/19) + [Core#170](https://github.com/CocoaPods/Core/issues/170) + [#2515](https://github.com/CocoaPods/CocoaPods/issues/2515) + +##### Enhancements + +* Added the `pod repo list` command which lists all the repositories. + [Luis Ascorbe](https://github.com/lascorbe) + [#1455](https://github.com/CocoaPods/CocoaPods/issues/1455) + +##### Bug Fixes + +* Works around an Xcode issue where linting would fail even though `xcodebuild` + actually succeeds. Xcode.app also doesn't fail when this issue occurs, so it's + safe for us to do the same. + [Kra Larivain](https://github.com/olarivain) + [Boris Bügling](https://github.com/neonichu) + [Eloy Durán](https://github.com/alloy) + [Samuel E. Giddins](https://github.com/segiddins) + [#2394](https://github.com/CocoaPods/CocoaPods/issues/2394) + [#2395](https://github.com/CocoaPods/CocoaPods/pull/2395) + +* Fixes the detection of JSON podspecs included via `:path`. + [laiso](https://github.com/laiso) + [#2489](https://github.com/CocoaPods/CocoaPods/pull/2489) + +* Fixes an issue where `pod install` would crash during Plist building if any + pod has invalid UTF-8 characters in their title or description. + [Ladislav Martincik](https://github.com/martincik) + [#2482](https://github.com/CocoaPods/CocoaPods/issues/2482) + +* Fix crash when the URL of a private GitHub repo is passed to `pod spec + create` as an argument. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1543](https://github.com/CocoaPods/CocoaPods/issues/1543) + + +## 0.34.0.rc2 (2014-09-16) + +##### Bug Fixes + +* Fixes an issue where `pod lib lint` would crash if a podspec couldn't be + loaded. + [Kyle Fuller](https://github.com/kylef) + [#2147](https://github.com/CocoaPods/CocoaPods/issues/2147) + +* Fixes an issue where `pod init` would not add `source 'master'` to newly + created Podfiles. + [Ash Furrow](https://github.com/AshFurrow) + [#2473](https://github.com/CocoaPods/CocoaPods/issues/2473) + + +## 0.34.0.rc1 (2014-09-13) + +##### Breaking + +* The use of the `$PODS_ROOT` environment variable has been deprecated and + should not be used. It will be removed in future versions of CocoaPods. + [#2449](https://github.com/CocoaPods/CocoaPods/issues/2449) + +* Add support for loading podspecs from specific spec-repos _only_, a.k.a. ‘sources’. + By default, when not specifying any specific sources in your Podfile, the ‘master’ + spec-repo will be used, as was always the case. However, once you specify specific + sources the ‘master’ spec-repo will **not** be included by default. For example: + + source 'private-spec-repo' + source 'master' + + [François Benaiteau](https://github.com/netbe) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1143](https://github.com/CocoaPods/CocoaPods/pull/1143) + [Core#19](https://github.com/CocoaPods/Core/pull/19) + +* The `Pods` directory has been reorganized. This might require manual + intervention in projects where files generated by CocoaPods have manually been + imported into the user's project (common with the acknowledgements files). + [#1055](https://github.com/CocoaPods/CocoaPods/pull/1055) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Michele Titolo](https://github.com/mtitolo) + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#28](https://github.com/CocoaPods/CLAide/pull/28) + +* The specification `requires_arc` attribute now defaults to true. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CocoaPods#267](https://github.com/CocoaPods/CocoaPods/issues/267) + +##### Enhancements + +* Add support to specify dependencies per build configuration: + + pod 'Lookback', :configurations => ['Debug'] + + Currently configurations can only be specified per single Pod. + [Joachim Bengtsson](https://github.com/nevyn) + [Eloy Durán](https://github.com/alloy) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1791](https://github.com/CocoaPods/CocoaPods/pull/1791) + [#1668](https://github.com/CocoaPods/CocoaPods/pull/1668) + [#731](https://github.com/CocoaPods/CocoaPods/pull/731) + +* Improved performance of git downloads using shallow clone. + [Marin Usalj](https://github.com/supermarin) + [Fabio Pelosin](https://github.com/fabiopelosin) + [cocoapods-downloader#29](https://github.com/CocoaPods/cocoapods-downloader/pull/29) + +* Simplify installation: CocoaPods no longer requires the + compilation of the troublesome native extensions. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Xcodeproj#168](https://github.com/CocoaPods/Xcodeproj/pull/168) + [Xcodeproj#167](https://github.com/CocoaPods/Xcodeproj/issues/167) + +* Add hooks for plugins. Currently only the installer hook is supported. + A plugin can register itself to be activated after the installation with the + following syntax: + + Pod::HooksManager.register(:post_install) do |installer_context| + # implementation + end + + The `installer_context` is an instance of the `Pod::Installer:HooksContext` + class which provides the information about the installation. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#132](https://github.com/CocoaPods/Core/pull/1755) + +* Add a support for migrating the sandbox to new versions of CocoaPods. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Display an indication for deprecated Pods in the command line search. + [Hugo Tunius](https://github.com/k0nserv) + [#2180](https://github.com/CocoaPods/CocoaPods/issues/2180) + +* Use the CLIntegracon gem for the integration tests. + [Marius Rackwitz](https://github.com/mrackwitz) + [#2371](https://github.com/CocoaPods/CocoaPods/issues/2371) + +* Include configurations that a user explicitly specifies, in their Podfile, + when the `--no-integrate` option is specified. + [Eloy Durán](https://github.com/alloy) + +* Properly quote the `-isystem` values in the xcconfig files. + [Eloy Durán](https://github.com/alloy) + +* Remove the installation post install message which presents the CHANGELOG. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Eloy Durán](https://github.com/alloy) + +* Add support for user-specified project directories with the + `--project-directory` option. + [Samuel E. Giddins](https://github.com/segiddins) + [#2183](https://github.com/CocoaPods/CocoaPods/issues/2183) + +* Now the `plutil` tool is used when available to produce + output consistent with Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Indicate the name of the pod whose requirements cannot be satisfied. + [Seivan Heidari](https://github.com/seivan) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1938](https://github.com/CocoaPods/CocoaPods/issues/1938) + +* Add support for JSON specs to external sources (`:path`, `:git`, etc) + options. + [Kyle Fuller](https://github.com/kylef) + [#2320](https://github.com/CocoaPods/CocoaPods/issues/2320) + +* Generate the workspaces using the same output of Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +##### Bug Fixes + +* Fix `pod repo push` to first check if a Specs directory exists and if so + push there. + [Edward Valentini](edwardvalentini) + [#2060](https://github.com/CocoaPods/CocoaPods/issues/2060) + +* Fix `pod outdated` to not include subspecs. + [Ash Furrow](ashfurrow) + [#2136](https://github.com/CocoaPods/CocoaPods/issues/2136) + +* Always evaluate podspecs from the original podspec directory. This fixes + an issue when depending on a pod via `:path` and that pod's podspec uses + relative paths. + [Kyle Fuller](kylef) + [pod-template#50](https://github.com/CocoaPods/pod-template/issues/50) + +* Fix spec linting to not warn for missing license file in subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#132](https://github.com/CocoaPods/Core/issues/132) + +* Fix `pod init` so that it doesn't recurse when checking for Podfiles. + [Paddy O'Brien](https://github.com/tapi) + [#2181](https://github.com/CocoaPods/CocoaPods/issues/2181) + +* Fix missing XCTest framework in Xcode 6. + [Paul Williamson](squarefrog) + [#2296](https://github.com/CocoaPods/CocoaPods/issues/2296) + +* Support multiple values in `ARCHS`. + [Robert Zuber](https://github.com/z00b) + [#1904](https://github.com/CocoaPods/CocoaPods/issues/1904) + +* Fix static analysis in Xcode 6. + [Samuel E. Giddins](https://github.com/segiddins) + [#2402](https://github.com/CocoaPods/CocoaPods/issues/2402) + +* Fix an issue where a version of a spec will not be locked when using + multiple subspecs of a podspec. + [Kyle Fuller](https://github.com/kylef) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2135](https://github.com/CocoaPods/CocoaPods/issues/2135) + +* Fix an issue using JSON podspecs installed directly from a lib's + repository. + [Kyle Fuller](https://github.com/kylef) + [#2320](https://github.com/CocoaPods/CocoaPods/issues/2320) + +* Support and use quotes in the `OTHER_LDFLAGS` of xcconfigs to avoid + issues with targets containing a space character in their name. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +## 0.33.1 (2014-05-20) + +##### Bug Fixes + +* Fix `pod spec lint` for `json` podspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2157](https://github.com/CocoaPods/CocoaPods/issues/2157) + +* Fixed downloader issues related to `json` podspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2158](https://github.com/CocoaPods/CocoaPods/issues/2158) + +* Fixed `--no-ansi` flag in help banners. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + + +## 0.33.0 (2014-05-20) + +For more details, see 📝 [CocoaPods 0.33](https://blog.cocoapods.org/CocoaPods-0.33/) on our blog. + +##### Breaking + +* The deprecated `pre_install` and the `pod_install` hooks of the specification + class have been removed. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +##### Enhancements + +* Added the `cocoapods-trunk` plugin which introduces the `trunk` subcommand. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +* The `pod push` sub-command has been moved to the `pod repo push` sub-command. + Moreover pushing to the master repo from it has been disabled. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +* Overhauled command line interface. Add support for auto-completion script + (d). If auto-completion is enabled for your shell you can configure it for + CocoaPods with the following command: + + rm -f /usr/local/share/zsh/site-functions/\_pod + dpod --completion-script > /usr/local/share/zsh/site-functions/\_pod + exec zsh + + Currently only the Z shell is supported. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#25](https://github.com/CocoaPods/CLAide/issues/25) + [CLAide#20](https://github.com/CocoaPods/CLAide/issues/20) + [CLAide#19](https://github.com/CocoaPods/CLAide/issues/19) + [CLAide#17](https://github.com/CocoaPods/CLAide/issues/17) + [CLAide#12](https://github.com/CocoaPods/CLAide/issues/12) + +* The `--version` flag is now only supported for the root `pod` command. If + used in conjunction with the `--verbose` flag the version of the detected + plugins will be printed as well. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#13](https://github.com/CocoaPods/CLAide/issues/13) + [CLAide#14](https://github.com/CocoaPods/CLAide/issues/14) + +* The extremely meta `cocoaPods-plugins` is now installed by default providing + information about the available and the installed plug-ins. + [David Grandinetti](https://github.com/dbgrandi) + [Olivier Halligon](https://github.com/AliSoftware) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2092](https://github.com/CocoaPods/CocoaPods/issues/2092) + +* Validate the reachability of `social_media_url`, `documentation_url` and + `docset_url` in podspecs we while linting a specification. + [Kyle Fuller](https://github.com/kylef) + [#2025](https://github.com/CocoaPods/CocoaPods/issues/2025) + +* Print the current version when the repo/lockfile requires a higher version. + [Samuel E. Giddins](https://github.com/segiddins) + [#2049](https://github.com/CocoaPods/CocoaPods/issues/2049) + +* Show `help` when running the `pod` command instead of defaulting to `pod + install`. + [Kyle Fuller](https://github.com/kylef) + [#1771](https://github.com/CocoaPods/CocoaPods/issues/1771) + +##### Bug Fixes + +* Show the actual executable when external commands fail. + [Boris Bügling](https://github.com/neonichu) + [#2102](https://github.com/CocoaPods/CocoaPods/issues/2102) + +* Fixed support for file references in the workspace generated by CocoaPods. + [Kyle Fuller](https://github.com/kylef) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Xcodeproj#105](https://github.com/CocoaPods/Xcodeproj/pull/150) + +* Show a helpful error message when reading version information with merge + conflict. + [Samuel E. Giddins](https://github.com/segiddins) + [#1853](https://github.com/CocoaPods/CocoaPods/issues/1853) + +* Show deprecated specs when invoking `pod outdated`. + [Samuel E. Giddins](https://github.com/segiddins) + [#2003](https://github.com/CocoaPods/CocoaPods/issues/2003) + +* Fixes an issue where `pod repo update` may start an un-committed merge. + [Kyle Fuller](https://github.com/kylef) + [#2024](https://github.com/CocoaPods/CocoaPods/issues/2024) + +## 0.32.1 (2014-04-15) + +##### Bug Fixes + +* Fixed the Podfile `default_subspec` attribute in nested subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2050](https://github.com/CocoaPods/CocoaPods/issues/2050) + +## 0.32.0 (2014-04-15) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.31.1...0.32.0) +• [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.31.1...0.32.0) + +For more details, see 📝 [CocoaPods 0.32](https://blog.cocoapods.org/CocoaPods-0.32/) on our blog. + +##### Enhancements + +* Allow to update only a list of given pods with `pod update [POD_NAMES...]`. + [Marius Rackwitz](https://github.com/mrackwitz) + [CocoaPods#760](https://github.com/CocoaPods/CocoaPods/issues/760) + +* `pod update` prints the previous version of the updated pods. + [Andrea Mazzini](https://github.com/andreamazz) + [#2008](https://github.com/CocoaPods/CocoaPods/issues/2008) + +* `pod update` falls back to `pod install` if no Lockfile is present. + [Marius Rackwitz](https://github.com/mrackwitz) + +* File references in the Pods project for development Pods now are absolute if + the dependency is specified with an absolute paths. + [Samuel Ford](https://github.com/samuelwford) + [#1042](https://github.com/CocoaPods/CocoaPods/issues/1042) + +* Added `deprecated` and `deprecated_in_favor_of` attributes to Specification + DSL. + [Paul Young](https://github.com/paulyoung) + [Core#87](https://github.com/CocoaPods/Core/pull/87) + +* Numerous improvements to the validator and to the linter. + * Validate the reachability of screenshot URLs in podspecs while linting a + specification. + [Kyle Fuller](https://github.com/kylef) + [#2010](https://github.com/CocoaPods/CocoaPods/issues/2010) + * Support HTTP redirects when linting homepage and screenshots. + [Boris Bügling](https://github.com/neonichu) + [#2027](https://github.com/CocoaPods/CocoaPods/pull/2027) + * The linter now checks `framework` and `library` attributes for invalid + strings. + [Paul Williamson](https://github.com/squarefrog) + [Fabio Pelosin](fabiopelosin) + [Core#66](https://github.com/CocoaPods/Core/issues/66) + [Core#96](https://github.com/CocoaPods/Core/pull/96) + [Core#105](https://github.com/CocoaPods/Core/issues/105) + * The Linter will not check for comments anymore. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#108](https://github.com/CocoaPods/Core/issues/108) + * Removed legacy checks from the linter. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#108](https://github.com/CocoaPods/Core/issues/108) + * Added logic to handle subspecs and platform scopes to linter check of + the `requries_arc` attribute. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CocoaPods#2005](https://github.com/CocoaPods/CocoaPods/issues/2005) + * The linter no longer considers empty a Specification if it only specifies the + `resource_bundle` attribute. + [Joshua Kalpin](https://github.com/Kapin) + [#63](https://github.com/CocoaPods/Core/issues/63) + [#95](https://github.com/CocoaPods/Core/pull/95) + +* `pod lib create` is now using the `configure` file instead of the + `_CONFIGURE.rb` file. + [Piet Brauer](https://github.com/pietbrauer) + [Orta Therox](https://github.com/orta) + +* `pod lib create` now disallows any pod name that begins with a `.` + [Dustin Clark](https://github.com/clarkda) + [#2026](https://github.com/CocoaPods/CocoaPods/pull/2026) + [Core#97](https://github.com/CocoaPods/Core/pull/97) + [Core#98](https://github.com/CocoaPods/Core/issues/98) + +* Prevent the user from using `pod` commands as root. + [Kyle Fuller](https://github.com/kylef) + [#1815](https://github.com/CocoaPods/CocoaPods/issues/1815) + +* Dependencies declared with external sources now support HTTP downloads and + have improved support for all the options supported by the downloader. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* An informative error message is presented when merge conflict is detected in + a YAML file. + [Luis de la Rosa](https://github.com/luisdelarosa) + [#69](https://github.com/CocoaPods/Core/issues/69) + [#100](https://github.com/CocoaPods/Core/pull/100) + +##### Bug Fixes + +* Fixed the Podfile `default_subspec` attribute in nested subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1021](https://github.com/CocoaPods/CocoaPods/issues/1021) + +* Warn when including deprecated pods + [Samuel E. Giddins](https://github.com/segiddins) + [#2003](https://github.com/CocoaPods/CocoaPods/issues/2003) + + +## 0.31.1 (2014-04-01) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.31.0...0.31.1) +• [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.31.0...0.31.1) + +##### Minor Enhancements + +* The specification now strips the indentation of the `prefix_header` and + `prepare_command` to aide their declaration as a here document (similarly to + what it already does with the description). + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#51](https://github.com/CocoaPods/Core/issues/51) + +##### Bug Fixes + +* Fix linting for Pods which declare a private repo as the source. + [Boris Bügling](https://github.com/neonichu) + [Core#82](https://github.com/CocoaPods/Core/issues/82) + + +## 0.31.0 (2014-03-31) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.30.0...0.31.0) +• [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.30.0...0.31.0) + +For more details, see 📝 [CocoaPods 0.31](https://blog.cocoapods.org/CocoaPods-0.31/) on our blog. + +##### Enhancements + +* Warnings are not promoted to errors anymore to maximise compatibility with + existing libraries. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1629](https://github.com/CocoaPods/CocoaPods/issues/1629) + +* Include the versions of the Pods to the output of `pod list`. + [Stefan Damm](https://github.com/StefanDamm) + [Robert Zuber](https://github.com/z00b) + [#1617](https://github.com/CocoaPods/CocoaPods/issues/1617) + +* Generated prefix header file will now have unique prefix_header_contents for + Pods with subspecs. + [Luis de la Rosa](https://github.com/luisdelarosa) + [#1449](https://github.com/CocoaPods/CocoaPods/issues/1449) + +* The linter will now check the reachability of the homepage of Podspecs during + a full lint. + [Richard Lee](https://github.com/dlackty) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1704](https://github.com/CocoaPods/CocoaPods/issues/1704) + [Core#70](https://github.com/CocoaPods/Core/pull/70) + +* Improved detection of the last version of a specification in `pod spec` + subcommands. + [Laurent Sansonetti](https://github.com/lrz) + [#1953](https://github.com/CocoaPods/CocoaPods/pull/1953) + +* Display advised settings for Travis CI in the warning related presented when + the terminal encoding is not set to UTF-8. + [Richard Lee](https://github.com/dlackty) + [#1933](https://github.com/CocoaPods/CocoaPods/issues/1933) + [#1941](https://github.com/CocoaPods/CocoaPods/pull/1941) + +* Unset the `CDPATH` env variable before shelling-out to `prepare_command`. + [Marc Boquet](https://github.com/apalancat) + [#1943](https://github.com/CocoaPods/CocoaPods/pull/1943) + +##### Bug Fixes + +* Resolve crash related to the I18n deprecation warning. + [Eloy Durán](https://github.com/alloy) + [#1950](https://github.com/CocoaPods/CocoaPods/issues/1950) + +* Fix compilation issues related to the native Extension of Xcodeproj. + [Eloy Durán](https://github.com/alloy) + +* Robustness against user Git configuration and against merge commits in `pod + repo` subcommands. + [Boris Bügling](https://github.com/neonichu) + [#1949](https://github.com/CocoaPods/CocoaPods/issues/1949) + [#1978](https://github.com/CocoaPods/CocoaPods/pull/1978) + +* Gracefully inform the user if the `:head` option is not supported for a given + download strategy. + [Boris Bügling](https://github.com/neonichu) + [#1947](https://github.com/CocoaPods/CocoaPods/issues/1947) + [#1958](https://github.com/CocoaPods/CocoaPods/pull/1958) + +* Cleanup a pod directory if error occurs while downloading. + [Alex Rothenberg](https://github.com/alexrothenberg) + [#1842](https://github.com/CocoaPods/CocoaPods/issues/1842) + [#1960](https://github.com/CocoaPods/CocoaPods/pull/1960) + +* No longer warn for Github repositories with OAuth authentication. + [Boris Bügling](https://github.com/neonichu) + [#1928](https://github.com/CocoaPods/CocoaPods/issues/1928) + [Core#77](https://github.com/CocoaPods/Core/pull/77) + +* Fix for when using `s.version` as the `:tag` for a git repository in a + Podspec. + [Joel Parsons](https://github.com/joelparsons) + [#1721](https://github.com/CocoaPods/CocoaPods/issues/1721) + [Core#72](https://github.com/CocoaPods/Core/pull/72) + +* Improved escaping of paths in Git downloader. + [Vladimir Burdukov](https://github.com/chipp) + [cocoapods-downloader#14](https://github.com/CocoaPods/cocoapods-downloader/pull/14) + +* Podspec without explicitly set `requires_arc` attribute no longer passes the + lint. + [Richard Lee](https://github.com/dlackty) + [#1840](https://github.com/CocoaPods/CocoaPods/issues/1840) + [Core#71](https://github.com/CocoaPods/Core/pull/71) + +* Properly quote headers in the `-isystem` compiler flag of the aggregate + targets. + [Eloy Durán](https://github.com/alloy) + [#1862](https://github.com/CocoaPods/CocoaPods/issues/1862) + [#1894](https://github.com/CocoaPods/CocoaPods/pull/1894) + +## 0.30.0 (2014-03-29) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.29.0...0.30.0) + +For more details, see 📝 [CocoaPods 0.30](https://blog.cocoapods.org/CocoaPods-0.30/) on our blog. + +###### Enhancements + +* Radically reduce first run pod setup bandwidth by creating a shallow clone of + the ‘master’ repo by default. Use the `--no-shallow` option to perform a full + clone instead. + [Jeff Verkoeyen](https://github.com/jverkoey) + [#1803](https://github.com/CocoaPods/CocoaPods/pull/1803) + +* Improves the error message when searching with an invalid regular expression. + [Kyle Fuller](https://github.com/kylef) + +* Improves `pod init` to save Xcode project file in Podfile when one was supplied. + [Kyle Fuller](https://github.com/kylef) + +* Adds functionality to specify a template URL for the `pod lib create` command. + [Piet Brauer](https://github.com/pietbrauer) + +###### Bug Fixes + +* Fixes a bug with `pod repo remove` silently handling permission errors. + [Kyle Fuller](https://github.com/kylef) + [#1778](https://github.com/CocoaPods/CocoaPods/issues/1778) + +* `pod push` now properly checks that the repo has changed before attempting + to commit. This only affected pods with special characters (such as `+`) in + their names. + [Gordon Fontenot](https://github.com/gfontenot) + [#1739](https://github.com/CocoaPods/CocoaPods/pull/1739) + + +## 0.29.0 (2013-12-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.28.0...0.29.0) +• [CocoaPods-core](https://github.com/CocoaPods/Core/compare/0.28.0...0.29.0) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.2.0...0.3.0) + +For more details, see 📝 [CocoaPods 0.29](https://blog.cocoapods.org/CocoaPods-0.29/) on our blog. + +###### Breaking + +* The command `podfile_info` is now a plugin offered by CocoaPods. + As a result, the command has been removed from CocoaPods. + [Joshua Kalpin](https://github.com/Kapin) + [#1589](https://github.com/CocoaPods/CocoaPods/issues/1589) + +* JSON has been adopted as the format to store specifications. As a result + the `pod ipc spec` command returns a JSON representation and the YAML + specifications are not supported anymore. JSON specifications adopt the + `.podspec.json` extension. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1568](https://github.com/CocoaPods/CocoaPods/pull/1568) + +###### Enhancements + +* Introduced `pod try` the easiest way to test the example project of a pod. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1568](https://github.com/CocoaPods/CocoaPods/pull/1568) + +* Pod headers are now provided to the user target as a system + header. This means that any warnings in a Pod's code will show + under its target in Xcode's build navigator, and never under the + user target. + [Swizzlr](https://github.com/swizzlr) + [#1596](https://github.com/CocoaPods/CocoaPods/pull/1596) + +* Support LZMA2 compressed tarballs in the downloader. + [Kyle Fuller](https://github.com/kylef) + [cocoapods-downloader#5](https://github.com/CocoaPods/cocoapods-downloader/pull/5) + +* Add Bazaar support for installing directly from a repo. + [Fred McCann](https://github.com/fmccann) + [#1632](https://github.com/CocoaPods/CocoaPods/pull/1632) + +* The `pod search ` command now supports regular expressions + for the query parameter when searching using the option `--full`. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1643) + +* Pod lib lint now accepts multiple podspecs in the same folder. + [kra Larivain/OpenTable](https://github.com/opentable) + [#1635](https://github.com/CocoaPods/CocoaPods/pull/1635) + +* The `pod push` command will now silently test the upcoming CocoaPods trunk + service. The service is only tested when pushing to the master repo and the + test doesn't affect the normal workflow. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* The `pod search ` command now supports searching on cocoapods.org + when searching using the option `--web`. Options `--ios` and `--osx` are + fully supported. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1682) + +* The `pod search ` command now supports multiword queries when using + the `--web` option. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1682) + +###### Bug Fixes + +* Fixed a bug which resulted in `pod lib lint` not being able to find the + headers. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1566](https://github.com/CocoaPods/CocoaPods/issues/1566) + +* Fixed the developer frameworks search paths so that + `$(SDKROOT)/Developer/Library/Frameworks` is used for iOS and + `$(DEVELOPER_LIBRARY_DIR)/Frameworks` is used for OS X. + [Kevin Wales](https://github.com/kwales) + [#1562](https://github.com/CocoaPods/CocoaPods/pull/1562) + +* When updating the pod repos, repositories with unreachable remotes + are now ignored. This fixes an issue with certain private repositories. + [Joshua Kalpin](https://github.com/Kapin) + [#1595](https://github.com/CocoaPods/CocoaPods/pull/1595) + [#1571](https://github.com/CocoaPods/CocoaPods/issues/1571) + +* The linter will now display an error if a Pod's name contains whitespace. + [Joshua Kalpin](https://github.com/Kapin) + [Core#39](https://github.com/CocoaPods/Core/pull/39) + [#1610](https://github.com/CocoaPods/CocoaPods/issues/1610) + +* Having the silent flag enabled in the config will no longer cause issues + with `pod search`. In addition, the flag `--silent` is no longer supported + for the command. + [Joshua Kalpin](https://github.com/Kapin) + [#1627](https://github.com/CocoaPods/CocoaPods/pull/1627) + +* The linter will now display an error if a framework ends with `.framework` + (i.e. `QuartzCore.framework`). + [Joshua Kalpin](https://github.com/Kapin) + [#1331](https://github.com/CocoaPods/CocoaPods/issues/1336) + [Core#45](https://github.com/CocoaPods/Core/pull/45) + +* The linter will now display an error if a library ends with `.a` or `.dylib` + (i.e. `z.dylib`). It will also display an error if it begins with `lib` + (i.e. `libxml`). + [Joshua Kalpin](https://github.com/Kapin) + [Core#44](https://github.com/CocoaPods/Core/issues/44) + +* The ARCHS build setting can come back as an array when more than one + architecture is specified. + [Carson McDonald](https://github.com/carsonmcdonald) + [#1628](https://github.com/CocoaPods/CocoaPods/issues/1628) + +* Fixed all issues caused by `/tmp` being a symlink to `/private/tmp`. + This affected mostly `pod lib lint`, causing it to fail when the + Pod used `prefix_header_*` or when the pod headers imported headers + using the namespaced syntax (e.g. `#import `). + [kra Larivain/OpenTable](https://github.com/opentable) + [#1514](https://github.com/CocoaPods/CocoaPods/pull/1514) + +* Fixed an incorrect path being used in the example app Podfile generated by + `pod lib create`. + [Eloy Durán](https://github.com/alloy) + [cocoapods-try#5](https://github.com/CocoaPods/cocoapods-try/issues/5) + + +## 0.28.0 (2013-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.27.1...0.28.0) +• [CocoaPods-core](https://github.com/CocoaPods/Core/compare/0.27.1...0.28.0) +• [CLAide](https://github.com/CocoaPods/CLAide/compare/0.3.2...0.4.0) + +For more details, see 📝 [CocoaPods 0.28](https://blog.cocoapods.org/CocoaPods-0.28/) on our blog. + +###### Enhancements + +* CLAide now supports gem plugins. An example CocoaPods plugin can be found at + [open\_pod\_bay](https://github.com/leshill/open_pod_bay). + + As of yet there are no promises made yet on the APIs, so try to fail as + gracefully as possible in case a CocoaPods update breaks your usage. In these + cases, also please let us know what you would need, so we can take this into + account when we do finalize APIs. + + [Les Hill](https://github.com/leshill) + [CLAide#1](https://github.com/CocoaPods/CLAide/pull/1) + [#959](https://github.com/CocoaPods/CocoaPods/issues/959) + +###### Bug Fixes + +* Compiling `xcassets` with `actool` now uses `UNLOCALIZED_RESOURCES_FOLDER_PATH` + instead of `PRODUCT_NAME.WRAPPER_EXTENSION` as output directory as it is more + accurate and allows the project to overwrite `WRAPPER_NAME`. + [Marc Knaup](https://github.com/fluidsonic) + [#1556](https://github.com/CocoaPods/CocoaPods/pull/1556) + +* Added a condition to avoid compiling xcassets when `WRAPPER_EXTENSION` + is undefined, as it would be in the case of static libraries. This prevents + trying to copy the compiled files to a directory that does not exist. + [Noah McCann](https://github.com/nmccann) + [#1521](https://github.com/CocoaPods/CocoaPods/pull/1521) + +* Added additional condition to check if `actool` is available when compiling + `xcassets`. This prevents build failures of Xcode 5 projects on Travis CI (or + lower Xcode versions). + [Michal Konturek](https://github.com/michalkonturek) + [#1511](https://github.com/CocoaPods/CocoaPods/pull/1511) + +* Added a condition to properly handle universal or mac apps when compiling + xcassets. This prevents build errors in the xcassets compilation stage + particularly when using xctool to build. + [Ryan Marsh](https://github.com/ryanwmarsh) + [#1594](https://github.com/CocoaPods/CocoaPods/pull/1594) + +* Vendored Libraries now correctly affect whether a podspec is considered empty. + [Joshua Kalpin](https://github.com/Kapin) + [Core#38](https://github.com/CocoaPods/Core/pull/38) + +* Vendored Libraries and Vendored Frameworks now have their paths validated correctly. + [Joshua Kalpin](https://github.com/Kapin) + [#1567](https://github.com/CocoaPods/CocoaPods/pull/1567) + +* Gists are now correctly accepted with https. + [Joshua Kalpin](https://github.com/Kapin) + [Core#38](https://github.com/CocoaPods/Core/pull/38) + +* The `pod push` command is now more specific about the branch it pushes to. + [orta](http://orta.github.io) + [#1561](https://github.com/CocoaPods/CocoaPods/pull/1561) + +* Dtrace files are now properly left unflagged when installing, regardless of configuration. + [Swizzlr](https://github.com/swizzlr) + [#1560](https://github.com/CocoaPods/CocoaPods/pull/1560) + +* Users are now warned if their terminal encoding is not UTF-8. This fixes an issue + with a small percentage of pod names that are incompatible with ASCII. + [Joshua Kalpin](https://github.com/Kapin) + [#1570](https://github.com/CocoaPods/CocoaPods/pull/1570) + + +## 0.27.1 (2013-10-24) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.26.2...0.27.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.26.2...0.27.1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.13.0...0.14.0) + +For more details, see 📝 [CocoaPods 0.27 and improved installation UX](https://blog.cocoapods.org/CocoaPods-0.27-and-improved-installation-UX/) on our blog. + +###### Enhancements + +* The xcodeproj gem now comes bundled with prebuilt binaries for the Ruby + versions that come with OS X 10.8 and 10.9. Users now no longer need to + install the Xcode Command Line Tools or deal with the Ruby C header location. + [Eloy Durán](https://github.com/alloy) + [Xcodeproj#88](https://github.com/CocoaPods/Xcodeproj/issues/88) + +* Targets passed to the `link_with` method of the Podfile DSL no longer need + to be explicitly passed as an array. `link_with ['target1', 'target2']` can + now be written as `link_with 'target1', 'target2'`. + [Adam Sharp](https://github.com/sharplet) + [Core#30](https://github.com/CocoaPods/Core/pull/30) + +* The copy resources script now compiles xcassets resources. + [Ulrik Damm](https://github.com/ulrikdamm) + [#1427](https://github.com/CocoaPods/CocoaPods/pull/1427) + +* `pod repo` now support a `remove ['repo_name']` command. + [Joshua Kalpin](https://github.com/Kapin) + [#1493](https://github.com/CocoaPods/CocoaPods/issues/1493) + [#1484](https://github.com/CocoaPods/CocoaPods/issues/1484) + +###### Bug Fixes + +* The architecture is now set in the build settings of the user build + configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1450](https://github.com/CocoaPods/CocoaPods/issues/1462) + [#1462](https://github.com/CocoaPods/CocoaPods/issues/1462) + +* Fixed a crash related to CocoaPods being unable to resolve an unique build + setting of an user target with custom build configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1462](https://github.com/CocoaPods/CocoaPods/issues/1462) + [#1463](https://github.com/CocoaPods/CocoaPods/issues/1463) + [#1457](https://github.com/CocoaPods/CocoaPods/issues/1457) + +* Fixed a defect which prevented subspecs from being dependant on a pod with a + name closely matching the name of one of the subspec's parents. + [Noah McCann](https://github.com/nmccann) + [#29](https://github.com/CocoaPods/Core/pull/29) + +* The developer dir relative to the SDK is not added anymore if testing + frameworks are detected in OS X targets, as it doesn't exists, avoiding the + presentation of the relative warning in Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +## 0.26.2 (2013-10-09) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.26.1...0.26.2) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.26.1...0.26.2) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.11.1...0.13.0) + +###### Bug Fixes + +* Fixed a crash which was causing a failure in `pod lib create` if the name of + the Pod included spaces. As spaces are not supported now this is gracefully + handled with an informative message. + [Kyle Fuller](https://github.com/kylef) + [#1456](https://github.com/CocoaPods/CocoaPods/issues/1456) + +* If an user target doesn't specify an architecture the value specified for the + project is used in CocoaPods targets. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1450](https://github.com/CocoaPods/CocoaPods/issues/1450) + +* The Pods project now properly configures ARC on all build configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1454](https://github.com/CocoaPods/CocoaPods/issues/1454) + + +## 0.26.1 (2013-10-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.25.0...0.26.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.25.0...0.26.1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.11.1...0.12.0) + +For more details, see 📝 [CocoaPods 0.26](https://blog.cocoapods.org/CocoaPods-0.26/) on our blog. + +###### Enhancements + +* CocoaPods now creates and hides the schemes of its targets after every + installation. The schemes are not shared because the flag which keeps track + whether they should be visible is a user only flag. The schemes are still + present and to debug a single Pod it is possible to make its scheme visible + in the Schemes manager of Xcode. This is rarely needed though because the + user targets trigger the compilation of the Pod targets. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1185](https://github.com/CocoaPods/CocoaPods/pull/1185) + +* Installations which don't integrate a user target (lint subcommands and + `--no-integrate` option) now set the architecture of OS X Pod targets to + `$(ARCHS_STANDARD_64_BIT)` (Xcode 4 default value for new targets). This + fixes lint issues with Xcode 4. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1185](https://github.com/CocoaPods/CocoaPods/pull/1185) + +* Further improvements to the organization of the Pods project + + - The project is now is sorted by name with groups at the bottom. + - Source files are now stored in the root group of the spec, subspecs are not + stored in a `Subspec` group anymore and the products of the Pods all are + stored in the products group of the project. + - The frameworks are referenced relative to the Developer directory and + namespaced per platform. + + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1389](https://github.com/CocoaPods/CocoaPods/pull/1389) + [#1420](https://github.com/CocoaPods/CocoaPods/pull/1420) + +* Added the `documentation_url` DSL attribute to the specifications. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1273](https://github.com/CocoaPods/CocoaPods/pull/1273) + +###### Bug Fixes + +* The search paths of vendored frameworks and libraries now are always + specified relatively. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1405](https://github.com/CocoaPods/CocoaPods/pull/1405) + +* Fix an issue where CocoaPods would fail to work when used with an older + version of the Active Support gem. This fix raises the dependency version to + the earliest compatible version of Active Support. + [Kyle Fuller](https://github.com/kylef) + [#1407](https://github.com/CocoaPods/CocoaPods/issues/1407) + +* CocoaPods will not attempt to load anymore all the version of a specification + preventing crashes if those are incompatible. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1272](https://github.com/CocoaPods/CocoaPods/pull/1272) + + +## 0.25.0 (2013-09-20) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.24.0...0.25.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.24.0...0.25.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.10.1...0.11.0) + +###### Enhancements + +* Added support for Xcode 5. + + The generated Pods Xcode project is now compatible with `arm64` projects and + is updated to use Xcode 5’s default settings removing all warnings. + + **NOTE to users migrating projects from Xcode 4, or are still using Xcode 4:** + 1. The Pods Xcode project now sets the `ONLY_ACTIVE_ARCH` build setting to + `YES` in the `Debug` configuration. You _will_ have to set the same on your + project/target, otherwise the build _will_ fail. + 2. Ensure your project/target has an `ARCHS` value set, otherwise the build + _will_ fail. + 3. When building a **iOS** project from the command-line, with the `xcodebuild` + tool that comes with Xcode 4, you’ll need to completely disable this setting + by appending to your build command: `ONLY_ACTIVE_ARCH=NO`. + + [#1352](https://github.com/CocoaPods/CocoaPods/pull/1352) + +* Speed up project generation in `pod install` and `pod update`. + +* The pre and post install hooks that have been deprecated now include the name + and version of the spec that’s using them. + +###### Bug Fixes + +* Only create a single resource bundle for all targets. Prior to this change a + resource bundle included into multiple targets within the project would create + duplicately named targets in the Pods Xcode project, causing duplicately named + Schemes to be created on each invocation of `pod install`. All targets that + reference a given resource bundle now have dependencies on a single common + target. + + [Blake Watters](https://github.com/blakewatters) + [#1338](https://github.com/CocoaPods/CocoaPods/issues/1338) + +* Solved outstanding issues with CocoaPods resource bundles and Archive builds: + 1. The rsync task copies symlinks into the App Bundle, producing an invalid + app. This change add `--copy-links` to the rsync invocation to ensure the + target files are copied rather than the symlink. + 2. The Copy Resources script uses `TARGET_BUILD_DIR` which points to the App + Archiving folder during an Archive action. Switching to + `BUILT_PRODUCTS_DIR` instead ensures that the path is correct for all + actions and configurations. + + [Blake Watters](https://github.com/blakewatters) + [#1309](https://github.com/CocoaPods/CocoaPods/issues/1309) + [#1329](https://github.com/CocoaPods/CocoaPods/issues/1329) + +* Ensure resource bundles are copied to installation location on install actions + [Chris Gummer](https://github.com/chrisgummer) + [#1364](https://github.com/CocoaPods/CocoaPods/issues/1364) + +* Various bugfixes in Xcodeproj, refer to its [CHANGELOG](https://github.com/CocoaPods/Xcodeproj/blob/0.11.0/CHANGELOG.md) + for details. + + +## 0.24.0 (2013-09-04) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.23.0...0.24.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.23.0...0.24.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.1...0.9.0) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.1.1...0.2.0) + +###### Enhancements + +* Added `pod init` command which generates a Podfile according to the + targets of the project stored in the working directory and to the templates + stored in the `~/.cocoapods/templates` folder. Two templates are supported: + - the `Podfile.default` template for regular targets. + - and the `Podfile.test` template for test targets. + [Ian Ynda-Hummel](https://github.com/ianyh) + [#1106](https://github.com/CocoaPods/CocoaPods/issues/1106) + [#1045](https://github.com/CocoaPods/CocoaPods/issues/1045) + +* CocoaPods will now leverage the [xcproj](https://github.com/0xced/xcproj) + command line tool if available in the path of the user to touch saved + projects. This will result in projects being serialized in the exact format + used by Xcode eliminating merge conflicts and other related issues. To learn + more about how to install xcproj see its + [readme](https://github.com/0xced/xcproj). + [Cédric Luthi](https://github.com/0xced) + [#1275](https://github.com/CocoaPods/CocoaPods/issues/1275) + +* Rationalized and cleaned up Pods project group structure and path specification. + +* Create all necessary build configurations for *Pods.xcodeproj* at the project level. If the user’s project has more than just *Debug* and *Release* build configurations, they may be explicitly specified in the Podfile: +`xcodeproj 'MyApp', 'App Store' => :release, 'Debug' => :debug, 'Release' => :release` + If build configurations aren’t specified in the Podfile then they will be automatically picked from the user’s project in *Release* mode. + These changes will ensure that the `libPods.a` static library is not stripped for all configurations, as explained in [#1217](https://github.com/CocoaPods/CocoaPods/pull/1217). + [Cédric Luthi](https://github.com/0xced) + [#1294](https://github.com/CocoaPods/CocoaPods/issues/1294) + +* Added basic support for Bazaar repositories. + [Fred McCann](https://github.com/fmccann) + [cocoapods-downloader#4](https://github.com/CocoaPods/cocoapods-downloader/pull/4) + +###### Bug Fixes + +* Fixed crash in `pod spec cat`. + +* Use the `TARGET_BUILD_DIR` environment variable for installing resource bundles. + [Cédric Luthi](https://github.com/0xced) + [#1268](https://github.com/CocoaPods/CocoaPods/issues/1268) + +* CoreData versioned models are now properly handled respecting the contents of + the `.xccurrentversion` file. + [Ashton-W](https://github.com/Ashton-W) + [#1288](https://github.com/CocoaPods/CocoaPods/issues/1288), + [Xcodeproj#83](https://github.com/CocoaPods/Xcodeproj/pull/83) + +* OS X frameworks are now copied to the Resources folder using rsync to + properly overwrite existing files. + [Nikolaj Schumacher](https://github.com/nschum) + [#1063](https://github.com/CocoaPods/CocoaPods/issues/1063) + +* User defined build configurations are now added to the resource bundle + targets. + [#1309](https://github.com/CocoaPods/CocoaPods/issues/1309) + + +## 0.23.0 (2013-08-08) + + +## 0.23.0.rc1 (2013-08-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.3...0.23.0.rc1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.3...0.23.0.rc1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.1...0.9.0) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.1.1...0.1.2) + +###### Enhancements + +* Added `prepare_command` attribute to Specification DSL. The prepare command + will replace the `pre_install` hook. The `post_install` hook has also been + deprecated. + [#1247](https://github.com/CocoaPods/CocoaPods/issues/1247) + + The reason we provided Ruby hooks at first, was because we wanted to offer + the option to make any required configuration possible. By now, however, we + have a pretty good idea of the use-cases and are therefore locking down the + freedom that was once available. In turn, we’re adding attributes that can + replace the most common use-cases. _(See the enhancements directly following + this entry for more info)._ + + The second reason we need to lock this down is because this is the last + remaining obstacle to fully serialize specifications, which we need in order + to move to a ‘spec push’ web-service in the future. + +* Added `resource_bundles` attribute to the Specification DSL. + [#743](https://github.com/CocoaPods/CocoaPods/issues/743) + [#1186](https://github.com/CocoaPods/CocoaPods/issues/1186) + +* Added `vendored_frameworks` attribute to the Specification DSL. + [#809](https://github.com/CocoaPods/CocoaPods/issues/809) + [#1075](https://github.com/CocoaPods/CocoaPods/issues/1075) + +* Added `vendored_libraries` attribute to the Specification DSL. + [#809](https://github.com/CocoaPods/CocoaPods/issues/809) + [#1075](https://github.com/CocoaPods/CocoaPods/issues/1075) + +* Restructured `.cocoapods` folder to contain repos in a subdirectory. + [Ian Ynda-Hummel](https://github.com/ianyh) + [#1150](https://github.com/CocoaPods/CocoaPods/issues/1150) + +* Improved `pod spec create` template. + [#1223](https://github.com/CocoaPods/CocoaPods/issues/1223) + +* Added copy&paste-friendly dependency to `pod search`. + [#1073](https://github.com/CocoaPods/CocoaPods/issues/1073) + +* Improved performance of the installation of Pods with git + sources which specify a tag. + [#1077](https://github.com/CocoaPods/CocoaPods/issues/1077) + +* Core Data `xcdatamodeld` files are now properly referenced from the Pods + project. + [#1155](https://github.com/CocoaPods/CocoaPods/issues/1155) + +* Removed punctuation check from the specification validations. + [#1242](https://github.com/CocoaPods/CocoaPods/issues/1242) + +* Deprecated the `documentation` attribute of the Specification DSL. + [Core#20](https://github.com/CocoaPods/Core/issues/20) + +###### Bug Fixes + +* Fix copy resource script issue related to filenames with spaces. + [Denis Hennessy](https://github.com/dhennessy) + [#1231](https://github.com/CocoaPods/CocoaPods/issues/1231) + + + +## 0.22.3 (2013-07-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.2...0.22.3) + +###### Enhancements + +* Add support for .xcdatamodel resource files (in addition to .xcdatamodeld). + [#1201](https://github.com/CocoaPods/CocoaPods/pull/1201) + +###### Bug Fixes + +* Always exlude `USE_HEADERMAP` from the user’s project. + [#1216](https://github.com/CocoaPods/CocoaPods/issues/1216) + +* Use correct template repo when using the `pod lib create` command. + [#1214](https://github.com/CocoaPods/CocoaPods/issues/1214) + +* Fixed issue with `pod push` failing when the podspec is unchanged. It will now + report `[No change] ExamplePod (0.1.0)` and continue to push other podspecs if + they exist. [#1199](https://github.com/CocoaPods/CocoaPods/pull/1199) + +* Set STRIP_INSTALLED_PRODUCT = NO in the generated Pods project. This allows + Xcode to include symbols from CocoaPods in dSYMs during Archive builds. + [#1217](https://github.com/CocoaPods/CocoaPods/pull/1217) + +* Ensure the resource script doesn’t fail due to the resources list file not + existing when trying to delete it. + [#1198](https://github.com/CocoaPods/CocoaPods/pull/1198) + +* Fix handling of spaces in paths when compiling xcdatamodel(d) files. + [#1201](https://github.com/CocoaPods/CocoaPods/pull/1201) + + + +## 0.22.2 (2013-07-11) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.1...0.22.2) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.1...0.22.2) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.0...0.8.1) + +###### Enhancements + +* The build settings of the Pods project and of its target have been updated to + be in line with the new defaults of the future versions of Xcode. + +###### Bug fixes + +* Specifications defining build setting with the `[*]` syntax are now properly + handled. + [#1171](https://github.com/CocoaPods/CocoaPods/issues/1171) + +* The name of the files references are now properly set fixing a minor + regression introduced by CocoaPods 0.22.1 and matching more closely Xcode + behaviour. + +* The validator now builds the Pods target instead of the first target actually + performing the validation. + +* Build settings defined through the `xcconfig` attribute of a `podspec` are now + stripped of duplicate values when merged in an aggregate target. + [#1189](https://github.com/CocoaPods/CocoaPods/issues/1189) + + +## 0.22.1 (2013-07-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.0...0.22.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.0...0.22.1) + +###### Bug fixes + +* Fixed a crash related to target dependencies and subspecs. + [#1168](https://github.com/CocoaPods/CocoaPods/issues/1168) + + +## 0.22.0 (2013-07-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.21.0...0.22.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.21.0...0.22.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.7.1...0.8.0) + +###### Enhancements + +* Added the `pod lib create` subcommand which allows to create a new Pod + adhering to the best practices. The template is still a bit primitive + and we encourage users to provide feedback by submitting patches and issues + to https://github.com/CocoaPods/CocoaPods. + [#850](https://github.com/CocoaPods/CocoaPods/issues/850) + +* Added the `pod lib lint` subcommand which allows to lint the Pod stored + in the working directory (a pod spec in the root is needed). This subcommand + is equivalent to the deprecated `pod spec lint --local`. + [#850](https://github.com/CocoaPods/CocoaPods/issues/850) + +* The dependencies of the targets of the Pods project are now made explicit. + [#1165](https://github.com/CocoaPods/CocoaPods/issues/1165) + +* The size of the cache used for the git repos is now configurable. For more + details see + https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/config.rb#L7-L25 + [#1159](https://github.com/CocoaPods/CocoaPods/issues/1159) + +* The copy resources shell script now aborts if any error occurs. + [#1098](https://github.com/CocoaPods/CocoaPods/issues/1098) + +* The output of shell script build phases no longer includes environment + variables to reduce noise. + [#1122](https://github.com/CocoaPods/CocoaPods/issues/1122) + +* CocoaPods no longer sets the deprecated `ALWAYS_SEARCH_USER_PATHS` build + setting. + +###### Bug fixes + +* Pods whose head state changes now are correctly detected and reinstalled. + [#1160](https://github.com/CocoaPods/CocoaPods/issues/1160) + +* Fixed the library reppresentation of the hooks which caused issues with the + `#copy_resources_script_path` method. + [#1157](https://github.com/CocoaPods/CocoaPods/issues/1157) + +* Frameworks symlinks are not properly preserved by the copy resources script. + Thanks to Thomas Dohmke (ashtom) for the fix. + [#1063](https://github.com/CocoaPods/CocoaPods/issues/1063) + +## 0.21.0 (2013-07-01) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.21.0.rc1...0.21.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.21.0.rc1...0.21.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.7.0...0.7.1) + +###### Bug fixes + +* Fixed a linter issue related to the dedicated targets change. + [#1130](https://github.com/CocoaPods/CocoaPods/issues/1130) + +* Fixed xcconfig issues related to Pods including a dot in the name. + [#1152](https://github.com/CocoaPods/CocoaPods/issues/1152) + + +## 0.21.0.rc1 (2013-06-18) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.2...0.21.0.rc1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.20.2...0.21.0.rc1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.6.0...0.7.0) + +###### Enhancements + +* Pods are now built in dedicated targets. This enhancement isolates the build + environment of each Pod from other ones eliminating pollution issues. It also + introduces an important architectural improvement which lays the foundation + for the upcoming CocoaPods features. Stay tuned! This feature has been + implemented by [Jeremy Slater](https://github.com/jasl8r). + [#1011](https://github.com/CocoaPods/CocoaPods/issues/1011) + [#983](https://github.com/CocoaPods/CocoaPods/issues/983) + [#841](https://github.com/CocoaPods/CocoaPods/issues/841) + +* Reduced external dependencies and deprecation of Rake::FileList. + [#1080](https://github.com/CocoaPods/CocoaPods/issues/1080) + +###### Bug fixes + +* Fixed crash due to Podfile.lock containing multiple version requirements for + a Pod. [#1076](https://github.com/CocoaPods/CocoaPods/issues/1076) + +* Fixed a build error due to the copy resources script using the same temporary + file for multiple targets. + [#1099](https://github.com/CocoaPods/CocoaPods/issues/1099) + +## 0.20.2 (2013-05-26) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.1...0.20.2) + +###### Bug fixes + +* Ensure that, in a sandbox-pod env, RubyGems loads the CocoaPods gem on system + Ruby (1.8.7). + [#939](https://github.com/CocoaPods/CocoaPods/issues/939#issuecomment-18396063) +* Allow sandbox-pod to execute any tool inside the Xcode.app bundle. +* Allow sandbox-pod to execute any tool inside a rbenv prefix. + +## 0.20.1 (2013-05-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.0...0.20.1) +• [CLAide](https://github.com/CocoaPods/CLAide/compare/0.3.0...0.3.2) + +###### Bug fixes + +* Made sandbox-pod executable visible as it wasn't correctly configured in the + gemspec. +* Made sandbox-pod executable actually work when installed as a gem. (In which + case every executable is wrapped in a wrapper bin script and the DATA constant + can no longer be used.) +* Required CLAide 0.3.2 as 0.3.0 didn't include all the files in the gemspec + and 0.3.1 was not correctly processed by RubyGems. + +## 0.20.0 (2013-05-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.19.1...0.20.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.19.1...0.20.0) +• [cocoapods-downloader](https://github.com/CocoaPods/CLAide/compare/0.1.0...0.1.1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.5...0.6.0) +• [CLAide](https://github.com/CocoaPods/CLAide/compare/0.2.0...0.3.0) + +###### Enhancements + +* Introduces an experimental sandbox feature. + [#939](https://github.com/CocoaPods/CocoaPods/issues/939) + + Let’s face it, even though we have a great community that spends an amazing + amount of time on curating the specifications, the internet can be a hostile + place and the community is growing too large to take a naive approach any + longer. + + As such, we have started leveraging OS X’s sandbox facilities to disallow + unsanctioned operations. This is still very experimental and therefore has to + be used explicitely, for now, but that does **not** mean we don’t want you to + start using it and **report issues**. + + To use the sandbox, simply use the `sandbox-pod` command instead. E.g.: + + $ sandbox-pod install + + In case of issues, be sure to check `/var/log/system.log` for ‘deny’ messages. + For instance, here’s an example where the sandbox denies read access to `/`: + + May 16 00:23:35 Khaos kernel[0]: Sandbox: ruby(98430) deny file-read-data / + + **NOTE**: _The above example is actually one that we know of. We’re not sure + yet which process causes this, but there shouldn’t be a need for any process + to read data from the root path anyways._ + + **NOTE 2**: _At the moment the sandbox is not compatible with the `:path` option + when referencing Pods that are not stored within the directory of the Podfile._ + +* The naked `pod` command now defaults to `pod install`. + [#958](https://github.com/CocoaPods/CocoaPods/issues/958) + +* CocoaPods will look for the Podfile in the ancestors paths if one is + not available in the working directory. + [#940](https://github.com/CocoaPods/CocoaPods/issues/940) + +* Documentation generation has been removed from CocoaPods as it graduated + to CocoaDocs. This decision was taken because CocoaDocs is a much better + solution which doesn't clutter Xcode's docsets while still allowing + access to the docsets with Xcode and with Dash. Removing this feature + keeps the installer leaner and easier to develop and paves the way for the + upcoming sandbox. Private pods can use pre install hook to generate the + documentation. If there will be enough demand this feature might be + reintegrated as plugin (see + [#1037](https://github.com/CocoaPods/CocoaPods/issues/1037)). + +* Improved performance of the copy resources script and thus build time of + the integrated targets. Contribution by [@onato](https://github.com/onato) + [#1050](https://github.com/CocoaPods/CocoaPods/issues/1050). + +* The changelog for the current version is printed after CocoaPods is + installed/updated. + [#853](https://github.com/CocoaPods/CocoaPods/issues/853). + + +###### Bug fixes + +* Inheriting `inhibit_warnings` per pod is now working + [#1032](https://github.com/CocoaPods/CocoaPods/issues/1032) +* Fix copy resources script for iOS < 6 and OS X < 10.8 by removing the + `--reference-external-strings-file` + flag. [#1030](https://github.com/CocoaPods/CocoaPods/pull/1030) +* Fixed issues with the `:head` option of the Podfile. + [#1046](https://github.com/CocoaPods/CocoaPods/issues/1046) + [#1039](https://github.com/CocoaPods/CocoaPods/issues/1039) + +## 0.19.1 (2013-04-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.19.0...0.19.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.19.0...0.19.1) + +###### Bug fixes + +* Project-level preprocessor macros are not overwritten anymore. + [#903](https://github.com/CocoaPods/CocoaPods/issues/903) +* A Unique hash instances for the build settings of the Pods target is now + created resolving interferences in the hooks. + [#1014](https://github.com/CocoaPods/CocoaPods/issues/1014) + +## 0.19.0 (2013-04-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.18.1...0.19.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.18.1...0.19.0) + +###### Enhancements + +* Compile time introspection. Macro definitions which allow to inspect the + installed Pods and their version have been introduced in the build + environment of the Pod libraries + ([example](https://gist.github.com/fabiopelosin/5348551)). +* CocoaPods now defines the `COCOAPODS=1` macro in the Pod and the Client + targets. This is useful for libraries which conditionally expose interfaces. + [#903](https://github.com/CocoaPods/CocoaPods/issues/903) +* Added support for the `private_header_files` attribute of the Specification + DSL. + [#998](https://github.com/CocoaPods/CocoaPods/issues/998) +* CocoaPods now defines the deployment target of the Pods project computed as + the minimum deployment target of the Pods libraries. + [#556](https://github.com/CocoaPods/CocoaPods/issues/556) +* Added `pod podfile-info` command. Shows list of used Pods and their info + in a project or supplied Podfile. + Options: `--all` - with dependencies. `--md` - in Markdown. + [#855](https://github.com/CocoaPods/CocoaPods/issues/855) +* Added `pod help` command. You can still use the old format + with --help flag. + [#957](https://github.com/CocoaPods/CocoaPods/pull/957) +* Restored support for Podfiles named `CocoaPods.podfile`. Moreover, the + experimental YAML format of the Podfile now is associated with files named + `CocoaPods.podfile.yaml`. + [#1004](https://github.com/CocoaPods/CocoaPods/pull/1004) + +###### Deprecations + +* The `:local` flag in Podfile has been renamed to `:path` and the old syntax + has been deprecated. + [#971](https://github.com/CocoaPods/CocoaPods/issues/971) + +###### Bug fixes + +* Fixed issue related to `pod outdated` and external sources. + [#954](https://github.com/CocoaPods/CocoaPods/issues/954) +* Fixed issue with .svn folders in copy resources script. + [#972](https://github.com/CocoaPods/CocoaPods/issues/972) + +## 0.18.1 (2013-04-10) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.18.0...0.18.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.18.0...0.18.) + +###### Bug fixes + +* Fixed a bug introduced in 0.18 which cause compilation issue due to the + quoting of the inherited value in the xcconfigs. + [#956](https://github.com/CocoaPods/CocoaPods/issues/956) +* Robustness against user targets including build files with missing file + references. + [#938](https://github.com/CocoaPods/CocoaPods/issues/938) +* Partially fixed slow performance from the command line + [#919](https://github.com/CocoaPods/CocoaPods/issues/919) + + +## 0.18.0 (2013-04-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.2...0.18.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.2...0.18.0) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.2...0.5.5) + +###### Enhancements + +* Added the ability to inhibit warnings per pod. + Just pass `:inhibit_warnings => true` inline. + This feature has been implemented by Marin Usalj (@mneorr). + [#10](https://github.com/CocoaPods/Core/pull/10) + [#934](https://github.com/CocoaPods/CocoaPods/pull/934) +* Inhibiting warnings will also suppress the warnings of the static analyzer. +* A new build phase has been added to check that your + installation is in sync with the `Podfile.lock` and fail the build otherwise. + The new build phase will not be added automatically to targets already + integrated with CocoaPods, for integrating targets manually see [this + comment](https://github.com/CocoaPods/CocoaPods/pull/946#issuecomment-16042419). + This feature has been implemented by Ullrich Schäfer (@stigi). + [#946](https://github.com/CocoaPods/CocoaPods/pull/946) +* The `pod search` commands now accepts the `--ios` and the `--osx` arguments + to filter the results by platform. + [#625](https://github.com/CocoaPods/CocoaPods/issues/625) +* The developer frameworks are automatically added if `SenTestingKit` is + detected. There is no need to specify them in specifications anymore. + [#771](https://github.com/CocoaPods/CocoaPods/issues/771) +* The `--no-update` argument of the `install`, `update`, `outdated` subcommands + has been renamed to `--no-repo-update`. + [#913](https://github.com/CocoaPods/CocoaPods/issues/913) + +###### Bug fixes + +* Improved handling for Xcode projects containing non ASCII characters. + Special thanks to Cédric Luthi (@0xced), Vincent Isambart (@vincentisambart), + and Manfred Stienstra (@Manfred) for helping to develop the workaround. + [#926](https://github.com/CocoaPods/CocoaPods/issues/926) +* Corrected improper configuration of the PODS_ROOT xcconfig variable in + non-integrating installations. + [#918](https://github.com/CocoaPods/CocoaPods/issues/918) +* Improved support for pre-release versions using dashes. + [#935](https://github.com/CocoaPods/CocoaPods/issues/935) +* Documentation sets are now namespaced by pod solving improper attribution. + [#659](https://github.com/CocoaPods/CocoaPods/issues/659) + + +## 0.17.2 (2013-04-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.1...0.17.2) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.1...0.17.2) + +###### Bug fixes + +* Fix crash related to the specification of the workspace as a relative path. + [#920](https://github.com/CocoaPods/CocoaPods/issues/920) +* Fix an issue related to the `podspec` dsl directive of the Podfile for + specifications with internal dependencies. + [#928](https://github.com/CocoaPods/CocoaPods/issues/928) +* Fix crash related to search from the command line. + [#929](https://github.com/CocoaPods/CocoaPods/issues/929) + +###### Ancillary enhancements + +* Enabled the FileList deprecation warning in the Linter. +* CocoaPods will raise if versions requirements are specified for dependencies + with external sources. +* The exclude patterns now handle folders automatically. + + +## 0.17.1 (2013-03-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0...0.17.1) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0...0.17.1) + +###### Bug fixes + +* Always create the CACHE_ROOT directory when performing a search. + [#917](https://github.com/CocoaPods/CocoaPods/issues/917) + +## 0.17.0 (2013-03-29) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc7...0.17.0) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc7...0.17.0) + +#### GM + +###### Bug fixes + +* Don’t break when specifying doc options, but not appledoc ones. + [#906](https://github.com/CocoaPods/CocoaPods/issues/906) +* Sort resolved specifications. + [#907](https://github.com/CocoaPods/CocoaPods/issues/907) +* Subspecs do not need to include HEAD information. + [#905](https://github.com/CocoaPods/CocoaPods/issues/905) + +###### Ancillary enhancements + +* Allow the analyzer to do its work without updating sources. + [motion-cocoapods#50](https://github.com/HipByte/motion-cocoapods/pull/50) + +#### rc7 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc6...0.17.0.rc7) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc6...0.17.0.rc7) + +###### Bug fixes + +- Fixed an issue which lead to the missing declaration of the plural directives + of the Specification DSL. + [#816](https://github.com/CocoaPods/CocoaPods/issues/816) +- The resolver now respects the order of specification of the target + definitions. +- Restore usage of cache file to store a cache for expensive stats. +- Moved declaration of `Pod::FileList` to CocoaPods-core. + +###### Ancillary enhancements + +- Fine tuned the Specification linter and the health reporter of repositories. +- Search results are sorted. + +#### rc6 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc5...0.17.0.rc6) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc5...0.17.0.rc6) + +###### Bug fixes + +- CocoaPods updates the repositories by default. + [#872](https://github.com/CocoaPods/CocoaPods/issues/872) +- Fixed a crash which was present when the Podfile specifies a workspace. + [#871](https://github.com/CocoaPods/CocoaPods/issues/871) +- Fix for a bug which lead to a broken installation in paths containing + brackets and other glob metacharacters. + [#862](https://github.com/CocoaPods/CocoaPods/issues/862) +- Fix for a bug related to the case of the paths which lead to clean all files + in the directories of the Pods. + + +###### Ancillary enhancements + +- CocoaPods now maintains a search index which is updated incrementally instead + of analyzing all the specs every time. The search index can be updated + manually with the `pod ipc update-search-index` command. +- Enhancements to the `pod repo lint` command. +- CocoaPods will not create anymore the pre commit hook in the master repo + during setup. If already created it is possible remove it deleting the + `~/.cocoapods/master/.git/hooks/pre-commit` path. +- Improved support for linting and validating specs repo. + +#### rc5 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc4...0.17.0.rc5) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc4...0.17.0.rc5) + +###### Bug fixes + +- The `--no-clean` argument is not ignored anymore by the installer. +- Proper handling of file patterns ending with a slash. +- More user errors are raised as an informative. + +#### rc4 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc3...0.17.0.rc4) + +###### Bug fixes + +- Restored compatibility with `Podfile::TargetDefinition#copy_resources_script_name` + in the Podfile hooks. +- Updated copy resources script so that it will use base internationalization + [#846](https://github.com/CocoaPods/CocoaPods/issues/846) +- Robustness against an empty configuration file. +- Fixed a crash with `pod push` + [#848](https://github.com/CocoaPods/CocoaPods/issues/848) +- Fixed an issue which lead to the creation of a Pods project which would + crash Xcode. + [#854](https://github.com/CocoaPods/CocoaPods/issues/854) +- Fixed a crash related to a `PBXVariantGroup` present in the frameworks build + phase of client targets. + [#859](https://github.com/CocoaPods/CocoaPods/issues/859) + + +###### Ancillary enhancements + +- The `podspec` option of the `pod` directive of the Podfile DSL now accepts + folders. + +#### rc3 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc2...0.17.0.rc3 +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.0...0.5.1)) + +###### Bug fixes + +- CocoaPods will not crash anymore if the license file indicated on the spec + doesn't exits. +- Pre install hooks are called before the Pods are cleaned. +- Fixed and issue which prevent the inclusion of OTHER_CFLAGS and + OTHER_CPLUSPLUSFLAGS in the release builds of the Pods project. +- Fixed `pod lint --local` +- Fixed the `--allow-warnings` of `pod push` + [#835](https://github.com/CocoaPods/CocoaPods/issues/835) +- Added `copy_resources_script_name` to the library representation used in the + hooks. + [#837](https://github.com/CocoaPods/CocoaPods/issues/837) + +###### Ancillary enhancements + +- General improvements to `pod ipc`. +- Added `pod ipc repl` subcommand. + +#### rc2 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc1...0.17.0.rc2) +• [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc1...0.17.0.rc2) + +###### Bug fixes + +- Restored output coloring. +- Fixed a crash related to subspecs + [#819](https://github.com/CocoaPods/CocoaPods/issues/819) +- Git repos were not cached for dependencies with external sources. + [#820](https://github.com/CocoaPods/CocoaPods/issues/820) +- Restored support for directories for the preserve_patterns specification + attribute. + [#823](https://github.com/CocoaPods/CocoaPods/issues/823) + +#### rc1 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.4...0.17.0.rc1) +• [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.3...0.5.0) +• [cocoapods-core](https://github.com/CocoaPods/Core) +• [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader) + +###### __Notice__ + +At some point in future the master repo will be switched to the YAML format of +specifications. This means that specifications with hooks (or any other kind of +dynamic logic) will not be accepted. Please let us know if there is need for +other DSL attributes or any other kind of support. + +Currently the following specifications fail to load as they depended on the +CocoaPods internals and need to be updated: + +- LibComponentLogging-pods/0.0.1/LibComponentLogging-pods.podspec +- RestKit/0.9.3/RestKit.podspec +- Three20/1.0.11/Three20.podspec +- ARAnalytics/1.1/ARAnalytics.podspec + +Other specifications, might present compatibility issues for the reasons +presented below. + +###### __Breaking__ + +- Subspecs do **not** inherit the files patterns from the parent spec anymore. + This feature made the implementation more complicated and was not easy to + explain to podspecs maintainers. Compatibility can be easily fixed by adding + a 'Core' subspec. +- Support for inline podspecs has been removed. +- The support for Rake::FileList is being deprecated, in favor of a more + consistent DSL. Rake::FileList also presented issues because it would access + the file system as soon as it was converted to an array. +- The hooks architecture has been re-factored and might present + incompatibilities (please open an issue if appropriate). +- The `requires_arc` attribute default value is transitioning from `false` to + `true`. In the meanwhile a value is needed to pass the lint. +- Deprecated `copy_header_mapping` hook. +- Deprecated `exclude_header_search_paths` attribute. +- External sources are not supported in the dependencies of specifications + anymore. Actually they never have been supported, they just happened to work. + +###### DSL + +- Podfile: + - It is not needed to specify the platform anymore (unless not integrating) + as CocoaPods now can infer the platform from the integrated targets. +- Specification: + - `preferred_dependency` has been renamed to `default_subspec`. + - Added `exclude_files` attribute. + - Added `screenshots` attribute. + - Added default values for attributes like `source_files`. + +###### Enhancements + +- Released preview [documentation](http://docs.cocoapods.org). +- CocoaPods now has support for working in teams and not committing the Pods + folder, as it will keep track of the status of the Pods folder. + [#552](https://github.com/CocoaPods/CocoaPods/issues/552) +- Simplified installation: no specific version of ruby gems is required anymore. +- The workspace is written only if needed greatly reducing the occasions in + which Xcode asks to revert. +- The Lockfile is sorted reducing the SCM noise. + [#591](https://github.com/CocoaPods/CocoaPods/issues/591) +- Added Podfile, Frameworks, and Resources to the Pods project. + [#647](https://github.com/CocoaPods/CocoaPods/issues/647) + [#588](https://github.com/CocoaPods/CocoaPods/issues/588) +- Adds new subcommand `pod spec cat NAME` to print a spec file to standard output. +- Specification hooks are only called when the specification is installed. +- The `--no-clean` option of the `pod spec lint` command now displays the Pods + project for inspection. +- It is now possible to specify default values for the configuration in + `~/.cocoapods/config.yaml` ([default values](https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/config.rb#L17)). +- CocoaPods now checks the checksums of the installed specifications and + reinstalls them if needed. +- Support for YAML formats of the Podfile and the Specification. +- Added new command `pod ipc` to provide support for inter process + communication through YAML formats. +- CocoaPods now detects if the folder of a Pod is empty and reinstalls it. + [#534](https://github.com/CocoaPods/CocoaPods/issues/534) +- Install hooks and the `prefix_header_contents` attribute are supported in subspecs. + [#617](https://github.com/CocoaPods/CocoaPods/issues/617) +- Dashes are now supported in the versions of the Pods. + [#293](https://github.com/CocoaPods/CocoaPods/issues/293) + +###### Bug fixes + +- CocoaPods is not confused anymore by target definitions with different activated subspec. + [#535](https://github.com/CocoaPods/CocoaPods/issues/535) +- CocoaPods is not confused anymore by to dependencies from external sources. + [#548](https://github.com/CocoaPods/CocoaPods/issues/548) +- The git cache will always update against the remote if a tag is requested, + resolving issues where library maintainers where updating the tag after a + lint and would be confused by CocoaPods using the cached commit for the tag. + [#407](https://github.com/CocoaPods/CocoaPods/issues/407) + [#596](https://github.com/CocoaPods/CocoaPods/issues/596) + +###### Codebase + +- Major clean up and refactor of the whole code base. +- Extracted the core classes into + [cocoapods-core](https://github.com/CocoaPods/Core) gem. +- Extracted downloader into + [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader). +- Extracted command-line command & option handling into + [CLAide](https://github.com/CocoaPods/CLAide). + +## 0.16.4 (2013-02-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.3...0.16.4) + +###### Enhancements + +- Add explicit flattening option to `Downloader:Http`: `:flatten => true`. + [#814](https://github.com/CocoaPods/CocoaPods/pull/814) + [#812](https://github.com/CocoaPods/CocoaPods/issues/812) + [#1314](https://github.com/CocoaPods/Specs/pull/1314) + +###### Bug fixes + +- Explicitely require `date` in the gemspec for Ruby 2.0.0. + [34da3f7](https://github.com/CocoaPods/CocoaPods/commit/34da3f792b2a36fafacd4122e29025c9cf2ff38d) + +## 0.16.3 (2013-02-20) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.2...0.16.3) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.3...0.5.0) + +###### Bug fixes + +- Only flatten tarballs, **not** zipballs, from HTTP sources. A zipball can + contain single directories in the root that should be preserved, for instance + a framework bundle. This reverts part of the change in 0.16.1. + **NOTE** This will break some podspecs that were changed after 0.16.1. + [#783](https://github.com/CocoaPods/CocoaPods/pull/783) + [#727](https://github.com/CocoaPods/CocoaPods/issues/727) +- Never consider aggregate targets in the user’s project for integration. + [#729](https://github.com/CocoaPods/CocoaPods/issues/729) + [#784](https://github.com/CocoaPods/CocoaPods/issues/784) +- Support comments on all build phases, groups and targets in Xcode projects. + [#51](https://github.com/CocoaPods/Xcodeproj/pull/51) +- Ensure default Xcode project values are copied before being used. + [b43087c](https://github.com/CocoaPods/Xcodeproj/commit/b43087cb342d8d44b491e702faddf54a222b23c3) +- Block assertions in Release builds. + [#53](https://github.com/CocoaPods/Xcodeproj/pull/53) + [#803](https://github.com/CocoaPods/CocoaPods/pull/803) + [#802](https://github.com/CocoaPods/CocoaPods/issues/802) + + +###### Enhancements + +- Compile Core Data model files. + [#795](https://github.com/CocoaPods/CocoaPods/pull/795) +- Add `Xcodeproj::Differ`, which shows differences between Xcode projects. + [308941e](https://github.com/CocoaPods/Xcodeproj/commit/308941eeaa3bca817742c774fd584cc5ab1c8f84) + + +## 0.16.2 (2013-02-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.1...0.16.2) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.1...0.4.3) + +###### Bug fixes + +- Quote storyboard and xib paths in ‘copy resource’ script. + [#740](https://github.com/CocoaPods/CocoaPods/pull/740) +- Fix use of `podspec` directive in Podfile with no options specified. + [#768](https://github.com/CocoaPods/CocoaPods/pull/768) +- Generate Mac OS X Pods target with the specified deployment target. + [#757](https://github.com/CocoaPods/CocoaPods/issues/757) +- Disable libSystem objects for ARC libs that target older platforms. + This applies when the deployment target is set to < iOS 6.0 or OS X 10.8, + or not specified at all. + [#352](https://github.com/CocoaPods/Specs/issues/352) + [#1161](https://github.com/CocoaPods/Specs/pull/1161) +- Mark header source files as ‘Project’ not ‘Public’. + [#747](https://github.com/CocoaPods/CocoaPods/issues/747) +- Add `PBXGroup` as acceptable `PBXFileReference` value. + [#49](https://github.com/CocoaPods/Xcodeproj/pull/49) +- Make `xcodeproj show` without further arguments actually work. + [#45](https://github.com/CocoaPods/Xcodeproj/issues/45) + +###### Enhancements + +- Added support for pre-download over Mercurial. + [#750](https://github.com/CocoaPods/CocoaPods/pull/750) + +## 0.16.1 (2013-01-13) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0...0.16.1) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.0...0.4.1) + +###### Bug fixes + +- After unpacking source from a HTTP location, move the source into the parent + dir if the archive contained only one child. This is done to make it + consistent with how source from other types of locations are described in a + podspec. + **NOTE** This might break some podspecs that assumed the incorrect layout. + [#727](https://github.com/CocoaPods/CocoaPods/issues/727) + [#728](https://github.com/CocoaPods/CocoaPods/pull/728) +- Remove duplicate option in `pod update` command. + [#725](https://github.com/CocoaPods/CocoaPods/issues/725) +- Memory fixes in Xcodeproj. + [#43](https://github.com/CocoaPods/Xcodeproj/pull/43) + +###### Xcodeproj Enhancements + +- Sort contents of xcconfig files by setting name. + [#591](https://github.com/CocoaPods/CocoaPods/issues/591) +- Add helpers to get platform name, deployment target, and frameworks build phases +- Take SDKROOT into account when adding frameworks. + +## 0.16.0 (2012-11-22) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc5...master) + +###### Enhancements + +- Use Rake 0.9.4 + [#657](https://github.com/CocoaPods/CocoaPods/issues/657) + +## 0.16.0.rc5 (2012-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc4...0.16.0.rc5) + +###### Deprecated + +- The usage of specifications defined in a Podfile is deprecated. Use the + `:podspec` option with a file path instead. Complete removal will most + probably happen in 0.17.0. + [#549](https://github.com/CocoaPods/CocoaPods/issues/549) + [#616](https://github.com/CocoaPods/CocoaPods/issues/616) + [#525](https://github.com/CocoaPods/CocoaPods/issues/525) + +###### Bug fixes + +- Always consider inline podspecs as needing installation. +- Fix detection when the lib has already been integrated with the user’s target. + [#643](https://github.com/CocoaPods/CocoaPods/issues/643) + [#614](https://github.com/CocoaPods/CocoaPods/issues/614) + [#613](https://github.com/CocoaPods/CocoaPods/issues/613) + +## 0.16.0.rc4 (2012-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc3...0.16.0.rc4) + +###### Bug fixes + +- Fix for Rake 0.9.3 + [#657](https://github.com/CocoaPods/CocoaPods/issues/657) + +## 0.16.0.rc3 (2012-11-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc2...0.16.0.rc3) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.0.rc1...0.4.0.rc6) + +###### Enhancements + +- Added support for copying frameworks to the app bundle. + [#597](https://github.com/CocoaPods/CocoaPods/pull/597) + +###### Bug fixes + +- Ignore PBXReferenceProxy while integrating into user project. + [#626](https://github.com/CocoaPods/CocoaPods/issues/626) +- Added support for PBXAggregateTarget and PBXLegacyTarget. + [#615](https://github.com/CocoaPods/CocoaPods/issues/615) +- Added support for PBXReferenceProxy. + [#612](https://github.com/CocoaPods/CocoaPods/issues/612) + +## 0.16.0.rc2 (2012-10-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc1...0.16.0.rc2) + +###### Bug fixes + +- Fix for uninitialized constant Xcodeproj::Constants error. + +## 0.16.0.rc1 (2012-10-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.2...0.16.0.rc1) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.5...0.4.0.rc1) + +###### Enhancements + +- Xcodeproj partial rewrite. + [#565](https://github.com/CocoaPods/CocoaPods/issues/565) + [#561](https://github.com/CocoaPods/CocoaPods/pull/561) + - Performance improvements in the `Generating support files` phase. + - Better support for editing existing projects and sorting groups. + +## 0.15.2 (2012-10-19) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.1...0.15.2) + +###### Enhancements + +- Added support for `.hh` headers. + [#576](https://github.com/CocoaPods/CocoaPods/pull/576) + +###### Bug fixes + +- Restored support for running CocoaPods without a terminal. + [#575](https://github.com/CocoaPods/CocoaPods/issues/575) + [#577](https://github.com/CocoaPods/CocoaPods/issues/577) +- The git cache now always uses a barebones repo preventing a number of related issues. + [#581](https://github.com/CocoaPods/CocoaPods/issues/581) + [#569](https://github.com/CocoaPods/CocoaPods/issues/569) +- Improved fix for the issue that lead to empty directories for Pods. + [#572](https://github.com/CocoaPods/CocoaPods/issues/572) + [#602](https://github.com/CocoaPods/CocoaPods/issues/602) +- Xcodeproj robustness against invalid values, such as malformed UTF8. + [#592](https://github.com/CocoaPods/CocoaPods/issues/592) + +## 0.15.1 (2012-10-04) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.0...0.15.1) + +###### Enhancements + +- Show error if syntax error in Podfile or Podfile.lock. + +###### Bug fixes + +- Fixed an issue that lead to empty directories for Pods. + [#519](https://github.com/CocoaPods/CocoaPods/issues/519) + [#568](https://github.com/CocoaPods/CocoaPods/issues/568) +- Fixed a crash related to the RubyGems version informative. + [#570](https://github.com/CocoaPods/CocoaPods/issues/570) +- Fixed a crash for `pod outdated`. + [#567](https://github.com/CocoaPods/CocoaPods/issues/567) +- Fixed an issue that lead to excessively slow sets computation. + +## 0.15.0 (2012-10-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0...0.15.0) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.3...0.3.4) + +###### Enhancements + +- Pod `install` will update the specs repo only if needed. + [#533](https://github.com/CocoaPods/CocoaPods/issues/533) +- CocoaPods now searches for the highest version of a Pod on all the repos. + [#85](https://github.com/CocoaPods/CocoaPods/issues/85) +- Added a pre install hook to the Podfile and to root specifications. + [#486](https://github.com/CocoaPods/CocoaPods/issues/486) +- Support for `header_mappings_dir` attribute in subspecs. +- Added support for linting a Podspec using the files from its folder `pod spec + lint --local` +- Refactored UI. +- Added support for Podfiles named `CocoaPods.podfile` which allows to + associate an editor application in Mac OS X. + [#528](https://github.com/CocoaPods/CocoaPods/issues/528) +- Added config option to disable the new version available message. + [#448](https://github.com/CocoaPods/CocoaPods/issues/448) +- Added support for extracting `.tar.bz2` files + [#522](https://github.com/CocoaPods/CocoaPods/issues/522) +- Improved feedback for errors of repo subcommands. + [#505](https://github.com/CocoaPods/CocoaPods/issues/505) + + +###### Bug fixes + +- Subspecs namespacing has been restored. + [#541](https://github.com/CocoaPods/CocoaPods/issues/541) +- Improvements to the git cache that should be more robust. + [#517](https://github.com/CocoaPods/CocoaPods/issues/517) + - In certain conditions pod setup would execute twice. +- The git cache now is updated if a branch is not found + [#514](https://github.com/CocoaPods/CocoaPods/issues/514) +- Forcing UTF-8 encoding on licenses generation in Ruby 1.9. + [#530](https://github.com/CocoaPods/CocoaPods/issues/530) +- Added support for `.hpp` headers. + [#244](https://github.com/CocoaPods/CocoaPods/issues/244) + +## 0.14.0 (2012-09-10) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0.rc2...0.14.0) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.2...0.3.3) + +###### Bug fixes + +- In certain conditions the spec of an external would have been overridden + by the spec in the root of a Pod. + [#489](https://github.com/CocoaPods/CocoaPods/issues/489) +- CocoaPods now uses a recent version of Octokit. + [#490](https://github.com/CocoaPods/CocoaPods/issues/490) +- Fixed a bug that caused Pods with preferred dependencies to be always + installed. + [Specs#464](https://github.com/CocoaPods/CocoaPods/issues/464) +- Fixed Xcode 4.4+ artwork warning. + [Specs#508](https://github.com/CocoaPods/CocoaPods/issues/508) + +## 0.14.0.rc2 (2012-08-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0.rc1...0.14.0.rc2) + +###### Bug fixes + +- Fix incorrect name for Pods from external sources with preferred subspecs. + [#485](https://github.com/CocoaPods/CocoaPods/issues/485) +- Prevent duplication of Pod with a local source and mutliple activated specs. + [#485](https://github.com/CocoaPods/CocoaPods/issues/485) +- Fixed the `uninitialized constant Pod::Lockfile::Digest` error. + [#484](https://github.com/CocoaPods/CocoaPods/issues/484) + +## 0.14.0.rc1 (2012-08-28) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.13.0...0.14.0.rc1) • [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.1...0.3.2) + +###### Enhancements + +- Improve installation process by preserving the installed versions of Pods + across installations and machines. A Pod is reinstalled if: + - the version required in the Podfile changes and becomes incompatible with + the installed one. + [#191](https://github.com/CocoaPods/CocoaPods/issues/191) + - the external source changes. + - the head status changes (from disabled to enabled or vice-versa). +- Introduce `pod update` command that installs the dependencies of the Podfile + **ignoring** the lockfile `Podfile.lock`. + [#131](https://github.com/CocoaPods/CocoaPods/issues/131) +- Introduce `pod outdated` command that shows the pods with known updates. +- Add `:local` option for dependencies which will use the source files directly + from a local directory. This is usually used for libraries that are being + developed in parallel to the end product (application/library). + [#458](https://github.com/CocoaPods/CocoaPods/issues/458), + [#415](https://github.com/CocoaPods/CocoaPods/issues/415), + [#156](https://github.com/CocoaPods/CocoaPods/issues/156). +- Folders of Pods which are no longer required are removed during installation. + [#298](https://github.com/CocoaPods/CocoaPods/issues/298) +- Add meaningful error messages + - ia podspec can’t be found in the root of an external source. + [#385](https://github.com/CocoaPods/CocoaPods/issues/385), + [#338](https://github.com/CocoaPods/CocoaPods/issues/338), + [#337](https://github.com/CocoaPods/CocoaPods/issues/337). + - a subspec name is misspelled. + [#327](https://github.com/CocoaPods/CocoaPods/issues/327) + - an unrecognized command and/or argument is provided. +- The subversion downloader now does an export instead of a checkout, which + makes it play nicer with SCMs that store metadata in each directory. + [#245](https://github.com/CocoaPods/CocoaPods/issues/245) +- Now the Podfile is added to the Pods project for convenient editing. + +###### Bug fixes + +- The git cache now fetches the tags from the remote if it can’t find the + reference. +- Xcodeproj now builds on 10.6.8 and Travis CI without symlinking headers. +- Only try to install, add source files to the project, and clean a Pod once. + [#376](https://github.com/CocoaPods/CocoaPods/issues/376) + +###### Notes + +- External Pods might be reinstalled due to the migration to the new + `Podfile.lock`. +- The SCM reference of head Pods is not preserved across machines. +- Pods whose inline specification changed are not detected as modified. As a + workaround, remove their folder stored in `Pods`. +- Pods whose specification changed are not detected as modified. As a + workaround, remove their folder stored in `Pods`. + + +## 0.13.0 (2012-08-22) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.12.0...0.13.0) + +###### Enhancements + +- Add Podfile `podspec` which allows to use the dependencies of a podspec file. + [#162](https://github.com/CocoaPods/CocoaPods/issues/162) +- Check if any of the build settings defined in the xcconfig files is + overridden. [#92](https://github.com/CocoaPods/CocoaPods/issues/92) +- The Linter now checks that there are no compiler flags that disable warnings. + +###### Bug fixes + +- The final project isn’t affected anymore by the `inhibit_all_warnings!` + option. +- Support for redirects while using podspec from an url. + [#462](https://github.com/CocoaPods/CocoaPods/issues/462) + + +## 0.12.0 (2012-08-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.11.1...0.12.0) + +###### Enhancements + +- The documentation is generated using the public headers if they are + specified. +- In case of a download failure the installation is aborted and the error + message is shown. +- Git submodules are initialized only if requested. +- Don’t impose a certain structure of the user’s project by raising if no + ‘Frameworks’ group exists. + [#431](https://github.com/CocoaPods/CocoaPods/pull/431) +- Support for GitHub Gists in the linter. +- Allow specifying ARC settings in subspecs. +- Add Podfile `inhibit_all_warnings!` which will inhibit all warnings from the + Pods library. [#209](https://github.com/CocoaPods/CocoaPods/issues/209) +- Make the Pods Xcode project prettier by namespacing subspecs in nested + groups. [#466](https://github.com/CocoaPods/CocoaPods/pull/466) + + +## 0.11.1 (2012-08-09) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.11.0...0.11.1) + +###### Bug fixes + +- Fixed a crash related to subspecs without header files. [#449] +- Git submodules are loaded after the appropriate referenced is checked out and + will be not loaded anymore in the cache. [#451] +- Fixed SVN support for the head version. [#432] + + +## 0.11.0 (2012-08-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.10.0...0.11.0) + +###### Enhancements + +- Added support for public headers. [#440] +- Added `pod repo lint`. [#423] +- Improved support for `:head` option and SVN repositories. +- When integrating Pods with a project without "Frameworks" group in root of + the project, raise an informative message. + [#431](https://github.com/CocoaPods/CocoaPods/pull/431) +- Dropped support for legacy `config.ios?` and `config.osx?` + +###### Bug fixes + +- Version message now correctly terminates with a 0 exit status. +- Resolved an issue that lead to git error messages in the error report. + + +## 0.10.0 (2012-07-29) + +[CocoaPods](http://git.io/4i75YA) + +###### Enhancements + +- Added a `--local-only` option to `pod push` so that developers can push + locally and test before pushing to a remote. [#405](http://git.io/0ILJEw) +- Added line number information for errors generated in the Podfile. + [#408](http://git.io/fWQvMg) +- Pods stored in git repositories now initialize submodules. + [#406](http://git.io/L9ssSw) + +###### Bug fixes + +- Removed note about the post install hook form the linter. +- Improved xcodebuild error detection in the linter. +- Ensure the git cache exists, before updating it, when trying to install the + ‘bleeding edge’ of a pod. [#426](http://git.io/d4eqRA) +- Clean downloaded external pods **after** resolving and activating (sub)specs. + [#414](http://git.io/i77q_w) +- Support `tar.gz` as filename in a HTTP source. [#428](http://git.io/qhwKkA) + + +## 0.9.2 (2012-07-16) + +[CocoaPods](http://git.io/AVlRKg) • [Xcodeproj](http://git.io/xHbc0w) + +###### Bug fixes + +- When generating the PodsDummy class, make that class unique to each target. [#402](http://git.io/NntYiQ) +- Raise an informative error message when the platform in the `Podfile` is omitted or incorrect. [#403](http://git.io/k5EcUQ) + + +## 0.9.1 (2012-07-14) + +[CocoaPods](http://git.io/_kqAbw) + +###### Bug fixes + +- CocoaPods 0.9.x needs Xcodeproj 0.3.0. + + +## 0.9.0 (2012-07-14) + +[CocoaPods](http://git.io/kucJQw) • [Xcodeproj](http://git.io/5eLL8g) + +###### Enhancements + +- Force downloading the ‘bleeding edge’ version of a pod with the `:head` flag. [#392](http://git.io/t_NVRQ) +- Support for weak frameworks. [#263](http://git.io/XZDuog) +- Use double quotes when shelling out. This makes a url like `$HOME/local/lib` work. [#396](http://git.io/DnBzhA) + +###### Bug fixes + +- Relaxed linter to accepts pod that only specify paths to preserve (like TuneupJS). +- Gender neutralization of podfile documentation. [#384](http://git.io/MAsHXg) +- Exit early when using an old RubyGems version (< 1.4.0). These versions contain subtle bugs + related to prerelease version comparisons. Unfortunately, OS X >= 10.7 ships with 1.3.6. [#398](http://git.io/Lr7DoA) + + +## 0.8.0 (2012-07-09) + +[CocoaPods](http://git.io/RgMF3w) • [Xcodeproj](http://git.io/KBKE_Q) + +###### Breaking change + +Syntax change in Podfile: `dependency` has been replaced by `pod`. + +``ruby +platform :ios +pod 'JSONKit', '~> 1.4' +pod 'Reachability', '~> 2.0.4' +`` + +###### Bug fixes + +- Properly quote all paths given to Git. + + +## 0.7.0 (2012-07-06) + +[CocoaPods](http://git.io/Agia6A) • [Xcodeproj](http://git.io/mlqquw) + +###### Features + +- Added support for branches in git repos. +- Added support for linting remote files, i.e. `pod spec lint http://raw/file.podspec`. +- Improved `Spec create template`. +- The indentation is automatically stripped for podspecs strings. + +###### Bug fixes + +- The default warnings of Xcode are not overriden anymore. +- Improvements to the detection of the license files. +- Improvements to `pod spec lint`. +- CocoaPods is now case insensitive. + + +## 0.6.1 (2012-07-01) + +[CocoaPods](http://git.io/45wFjw) • [Xcodeproj](http://git.io/rRA4XQ) + +###### Bug fixes + +- Switched to master branch for specs repo. +- Fixed a crash with `pod spec lint` related to `preserve_paths`. +- Fixed a bug that caused subspecs to not inherit the compiler flags of the top level specification. +- Fixed a bug that caused duplication of system framworks. + + +## 0.6.0 (2012-07-01) + +A full list of all the changes since 0.5.1 can be found [here][6]. + + +### Link with specific targets + +CocoaPods can now integrate all the targets specified in your `Podfile`. + +To specify which target, in your Xcode project, a Pods target should be linked +with, use the `link_with` method like so: + +```ruby +platform :ios + +workspace 'MyWorkspace' + +link_with ['MyAppTarget', 'MyOtherAppTarget'] +dependency 'JSONKit' + +target :test, :exclusive => true do + xcodeproj 'TestProject', 'Test' => :debug + link_with 'TestRunnerTarget' + dependency 'Kiwi' +end +``` + +_NOTE: As you can see it can take either one target name, or an array of names._ + +* If no explicit Xcode workspace is specified and only **one** project exists in +the same directory as the Podfile, then the name of that project is used as the +workspace’s name. + +* If no explicit Xcode project is specified for a target, it will use the Xcode +project of the parent target. If no target specifies an expicit Xcode project +and there is only **one** project in the same directory as the Podfile then that +project will be used. + +* If no explicit target is specified, then the Pods target will be linked with +the first target in your project. So if you only have one target you do not +need to specify the target to link with. + +See [#76](https://github.com/CocoaPods/CocoaPods/issues/76) for more info. + +Finally, CocoaPods will add build configurations to the Pods project for all +configurations in the other projects in the workspace. By default the +configurations are based on the `Release` configuration, to base them on the +`Debug` configuration you will have to explicitely specify them as can be seen +above in the following line: + +```ruby +xcodeproj 'TestProject', 'Test' => :debug +``` + + +### Documentation + +CocoaPods will now generate documentation for every library with the +[`appledoc`][5] tool and install it into Xcode’s documentation viewer. + +You can customize the settings used like so: + +```ruby +s.documentation = { :appledoc => ['--product-name', 'My awesome project!'] } +``` + +Alternatively, you can specify a URL where an HTML version of the documentation +can be found: + +```ruby +s.documentation = { :html => 'http://example.com/docs/index.html' } +``` + +See [#149](https://github.com/CocoaPods/CocoaPods/issues/149) and +[#151](https://github.com/CocoaPods/CocoaPods/issues/151) for more info. + + +### Licenses & Documentation + +CocoaPods will now generate two 'Acknowledgements' files for each target specified +in your Podfile which contain the License details for each Pod used in that target +(assuming details have been specified in the Pod spec). + +There is a markdown file, for general consumption, as well as a property list file +that can be added to a settings bundle for an iOS application. + +You don't need to do anything for this to happen, it should just work. + +If you're not happy with the default boilerplate text generated for the title, header +and footnotes in the files, it's possible to customise these by overriding the methods +that generate the text in your `Podfile` like this: + +```ruby +class ::Pod::Generator::Acknowledgements + def header_text + "My custom header text" + end +end +``` + +You can even go one step further and customise the text on a per target basis by +checking against the target name, like this: + +```ruby +class ::Pod::Generator::Acknowledgements + def header_text + if @target_definition.label.end_with?("MyTargetName") + "Custom header text for MyTargetName" + else + "Custom header text for other targets" + end + end +end +``` + +Finally, here's a list of the methods that are available to override: + +```ruby +header_title +header_text +footnote_title +footnote_text +``` + + +### Introduced two new classes: LocalPod and Sandbox. + +The Sandbox represents the entire contents of the `POD_ROOT` (normally +`SOURCE_ROOT/Pods`). A LocalPod represents a pod that has been installed within +the Sandbox. + +These two classes can be used as better homes for various pieces of logic +currently spread throughout the installation process and provide a better API +for working with the contents of this directory. + + +### Xcodeproj API + +All Xcodeproj APIs are now in `snake_case`, instead of `camelCase`. If you are +manipulating the project from your Podfile's `post_install` hook, or from a +podspec, then update these method calls. + + +### Enhancements + +* [#188](https://github.com/CocoaPods/CocoaPods/pull/188): `list` command now + displays the specifications introduced in the master repo if it is given as an + option the number of days to take into account. + +* [#188](https://github.com/CocoaPods/CocoaPods/pull/188): Transferred search + layout improvements and options to `list` command. + +* [#166](https://github.com/CocoaPods/CocoaPods/issues/166): Added printing + of homepage and source to search results. + +* [#177](https://github.com/CocoaPods/CocoaPods/issues/177): Added `--stat` + option to display watchers and forks for pods hosted on GitHub. + +* [#177](https://github.com/CocoaPods/CocoaPods/issues/177): Introduced colors + and tuned layout of search. + +* [#112](https://github.com/CocoaPods/CocoaPods/issues/112): Introduced `--push` + option to `$ pod setup`. It configures the master spec repository to use the private + push URL. The change is preserved in future calls to `$ pod setup`. + +* [#153](https://github.com/CocoaPods/CocoaPods/issues/153): It is no longer + required to call `$ pod setup`. + +* [#163](https://github.com/CocoaPods/CocoaPods/issues/163): Print a template + for a new ticket when an error occurs. + +* Added a new Github-specific downloader that can download repositories as a + gzipped tarball. + +* No more global state is kept during resolving of dependencies. + +* Updated Xcodeproj to have a friendlier API. + + +### Fixes + +* [#142](https://github.com/CocoaPods/CocoaPods/issues/142): Xcode 4.3.2 no longer + supports passing the -fobj-arc flag to the linker and will fail to build. The + addition of this flag was a workaround for a compiler bug in previous versions. + This flag is no longer included by default - to keep using this flag, you need to + add `set_arc_compatibility_flag!` to your Podfile. + +* [#183](https://github.com/CocoaPods/CocoaPods/issues/183): Fix for + `.DS_Store` file in `~/.cocoapods` prevents `$ pod install` from running. + +* [#134](https://github.com/CocoaPods/CocoaPods/issues/134): Match + `IPHONEOS_DEPLOYMENT_TARGET` build setting with `deployment_target` option in + generated Pods project file. + +* [#142](https://github.com/CocoaPods/CocoaPods/issues/): Add `-fobjc-arc` to + `OTHER_LDFLAGS` if _any_ pods require ARC. + +* [#148](https://github.com/CocoaPods/CocoaPods/issues/148): External encoding + set to UTF-8 on Ruby 1.9 to fix crash caused by non-ascii characters in pod + description. + +* Ensure all header search paths are quoted in the xcconfig file. + +* Added weak quoting to `ibtool` input paths. + + +## 0.5.0 (2011-11-22) + +No longer requires MacRuby. Runs on MRI 1.8.7 (OS X system version) and 1.9.3. + +A full list of all the changes since 0.3.0 can be found [here][7]. + + +## 0.4.0 + +Oops, accidentally skipped this version. + + +## 0.3.0 (2011-11-12) + +### Multiple targets + +Add support for multiple static library targets in the Pods Xcode project with +different sets of depedencies. This means that you can create a separate +library which contains all dependencies, including extra ones that you only use +in, for instance, a debug or test build. [[docs][1]] + +```ruby +# This Podfile will build three static libraries: +# * libPods.a +# * libPods-debug.a +# * libPods-test.a + +# This dependency is included in the `default` target, which generates the +# `libPods.a` library, and all non-exclusive targets. +dependency 'SSCatalog' + +target :debug do + # This dependency is only included in the `debug` target, which generates + # the `libPods-debug.a` library. + dependency 'CocoaLumberjack' +end + +target :test, :exclusive => true do + # This dependency is *only* included in the `test` target, which generates + # the `libPods-test.a` library. + dependency 'Kiwi' +end +``` + +### Install libraries from anywhere + +A dependency can take a git url if the repo contains a podspec file in its +root, or a podspec can be loaded from a file or HTTP location. If no podspec is +available, a specification can be defined inline in the Podfile. [[docs][2]] + +```ruby +# From a spec repo. +dependency 'SSToolkit' + +# Directly from the Pod’s repo (if it contains a podspec). +dependency 'SSToolkit', :git => 'https://github.com/samsoffes/sstoolkit.git' + +# Directly from the Pod’s repo (if it contains a podspec) with a specific commit (or tag). +dependency 'SSToolkit', :git => 'https://github.com/samsoffes/sstoolkit.git', + :commit => '2adcd0f81740d6b0cd4589af98790eee3bd1ae7b' + +# From a podspec that's outside a spec repo _and_ the library’s repo. This can be a file or http url. +dependency 'SSToolkit', :podspec => 'https://raw.github.com/gist/1353347/ef1800da9c5f5d267a642b8d3950b41174f2a6d7/SSToolkit-0.1.1.podspec' + +# If no podspec is available anywhere, you can define one right in your Podfile. +dependency do |s| + s.name = 'SSToolkit' + s.version = '0.1.3' + s.platform = :ios + s.source = { :git => 'https://github.com/samsoffes/sstoolkit.git', :commit => '2adcd0f81740d6b0cd4589af98790eee3bd1ae7b' } + s.resources = 'Resources' + s.source_files = 'SSToolkit/**/*.{h,m}' + s.frameworks = 'QuartzCore', 'CoreGraphics' + + def s.post_install(target) + prefix_header = config.project_pods_root + target.prefix_header_filename + prefix_header.open('a') do |file| + file.puts(%{#ifdef __OBJC__\n#import "SSToolkitDefines.h"\n#endif}) + end + end +end +``` + +### Add a `post_install` hook to the Podfile class + +This allows the user to customize, for instance, the generated Xcode project +_before_ it’s written to disk. [[docs][3]] + +```ruby +# Enable garbage collection support for MacRuby applications. +post_install do |installer| + installer.project.targets.each do |target| + target.build_configurations.each do |config| + config.build_settings['GCC_ENABLE_OBJC_GC'] = 'supported' + end + end +end +``` + +### Manifest + +Generate a Podfile.lock file next to the Podfile, which contains a manifest of +your application’s dependencies and their dependencies. + +``` +PODS: + - JSONKit (1.4) + - LibComponentLogging-Core (1.1.4) + - LibComponentLogging-NSLog (1.0.2): + - LibComponentLogging-Core (>= 1.1.4) + - RestKit-JSON-JSONKit (0.9.3): + - JSONKit + - RestKit (= 0.9.3) + - RestKit-Network (0.9.3): + - LibComponentLogging-NSLog + - RestKit (= 0.9.3) + - RestKit-ObjectMapping (0.9.3): + - RestKit (= 0.9.3) + - RestKit-Network (= 0.9.3) + +DOWNLOAD_ONLY: + - RestKit (0.9.3) + +DEPENDENCIES: + - RestKit-JSON-JSONKit + - RestKit-ObjectMapping +``` + +### Generate Xcode projects from scratch + +We no longer ship template projects with the gem, but instead generate them +programmatically. This code has moved out into its own [Xcodeproj gem][4], +allowing you to automate Xcode related tasks. + + + + +[1]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L151 +[2]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L82 +[3]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L185 +[4]: https://github.com/CocoaPods/Xcodeproj +[5]: https://github.com/tomaz/appledoc +[6]: https://github.com/CocoaPods/CocoaPods/compare/0.5.1...0.6.0 +[7]: https://github.com/CocoaPods/CocoaPods/compare/0.3.10...0.5.0 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/LICENSE new file mode 100644 index 0000000..6e8a60d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/LICENSE @@ -0,0 +1,33 @@ +This project is licensed under the MIT license. + +Copyright (c) 2011 Eloy Durán , + Fabio Pelosin , + Samuel Giddins , + Marius Rackwitz , + Kyle Fuller , + Boris Bügling , + Orta Therox , + Olivier Halligon , + Danielle Tomlinson , + Dimitris Koutsogiorgas , + Paul Beusterien , and + Eric Amorde . + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/README.md new file mode 100644 index 0000000..5bc194b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/README.md @@ -0,0 +1,82 @@ +![CocoaPods Logo](https://raw.github.com/CocoaPods/shared_resources/master/assets/cocoapods-banner-readme.png) + +### CocoaPods: The Cocoa dependency manager + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/CocoaPods/Specs)](https://github.com/CocoaPods/CocoaPods/actions) +[![Gem Version](https://img.shields.io/gem/v/cocoapods)](https://rubygems.org/gems/cocoapods) +[![Maintainability](https://api.codeclimate.com/v1/badges/8f0fe544baf2ae1acc2b/maintainability)](https://codeclimate.com/github/CocoaPods/CocoaPods/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/8f0fe544baf2ae1acc2b/test_coverage)](https://codeclimate.com/github/CocoaPods/CocoaPods/test_coverage) + +CocoaPods manages dependencies for your Xcode projects. + +You specify the dependencies for your project in a simple text file: your `Podfile`. +CocoaPods recursively resolves dependencies between libraries, fetches +source code for all dependencies, and creates and maintains an Xcode +workspace to build your project. The latest released Xcode versions and the +prior version are supported. + +Installing and updating CocoaPods is very easy. Don't miss the [Installation +guide](https://guides.cocoapods.org/using/getting-started.html#installation) and the +[Getting Started guide](https://guides.cocoapods.org/using/getting-started.html). + +## Project Goals + +CocoaPods aims to improve the engagement with, and discoverability +of, third party open-source Cocoa libraries. These +project goals influence and drive the design of CocoaPods: + +- Create and share libraries, and use them in your own projects, + without creating extra work for library authors. Integrate + non-CocoaPods libraries and hack on your own fork of any + CocoaPods library with a simple transparent `Podspec` standard. +- Allow library authors to structure their libraries however they like. +- Save time for library authors by automating a lot of Xcode work not + related to their libraries' functionality. +- Support any source management system. (Currently supported are `git`, + `svn`, `mercurial`, `bazaar`, and various types of archives downloaded over HTTP.) +- Promote a culture of distributed collaboration on pods, but also provide + features only possible with a centralised solution to foster a community. +- Build tools on top of the core Cocoa development system, including those + typically deployed to other operating systems, such as web-services. +- Provide opinionated and automated integration, but make it completely + optional. You may manually integrate your CocoaPods dependencies + into your Xcode project as you see fit, with or without a workspace. +- Solve everyday problems for Cocoa and Xcode developers. + +## Sponsors + +Lovingly sponsored by a collection of companies, see the footer of [CocoaPods.org](https://cocoapods.org) for an up-to-date list. + +## Collaborate + +All CocoaPods development happens on GitHub. Contributions make for good karma and +we [welcome new](https://blog.cocoapods.org/starting-open-source/) contributors with joy. We take contributors seriously, and thus have a +contributor [code of conduct](CODE_OF_CONDUCT.md). + +## Links + +| Link | Description | +| :----- | :------ | +[CocoaPods.org](https://cocoapods.org/) | Homepage and search for Pods. +[@CocoaPods](https://twitter.com/CocoaPods) | Follow CocoaPods on Twitter to stay up to date. +[Blog](https://blog.cocoapods.org) | The CocoaPods blog. +[Mailing List](https://groups.google.com/group/cocoapods) | Feel free to ask any kind of question. +[Guides](https://guides.cocoapods.org) | Everything you want to know about CocoaPods. +[Changelog](https://github.com/CocoaPods/CocoaPods/blob/master/CHANGELOG.md) | See the changes introduced in each CocoaPods version. +[New Pods RSS](https://feeds.cocoapods.org/new-pods.rss) | Don't miss any new Pods. +[Code of Conduct](CODE_OF_CONDUCT.md) | Find out the standards we hold ourselves to. + +## Projects + +CocoaPods is composed of the following projects: + +| Status | Project | Description | Info | +| :-------- | :------ | :--- | :--- | +| [![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/CocoaPods/Specs)](https://github.com/CocoaPods/CocoaPods/actions) | [CocoaPods](https://github.com/CocoaPods/CocoaPods) | The CocoaPods command line tool. | [guides](https://guides.cocoapods.org) +| [![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/Core/Specs)](https://github.com/CocoaPods/Core/actions) | [CocoaPods Core](https://github.com/CocoaPods/Core) | Support for working with specifications and podfiles. | [docs](https://guides.cocoapods.org/contributing/components.html) +| [![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-downloader/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-downloader) |[CocoaPods Downloader](https://github.com/CocoaPods/cocoapods-downloader) | Downloaders for various source types. | [docs](https://www.rubydoc.info/gems/cocoapods-downloader) +| [![Build Status](https://img.shields.io/travis/CocoaPods/Xcodeproj/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Xcodeproj) | [Xcodeproj](https://github.com/CocoaPods/Xcodeproj) | Create and modify Xcode projects from Ruby. | [docs](https://www.rubydoc.info/gems/xcodeproj) +| [![Build Status](https://img.shields.io/travis/CocoaPods/CLAide/master.svg?style=flat)](https://travis-ci.org/CocoaPods/CLAide) | [CLAide](https://github.com/CocoaPods/CLAide) | A small command-line interface framework. | [docs](https://www.rubydoc.info/gems/claide) +| [![Build Status](https://img.shields.io/travis/CocoaPods/Molinillo/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Molinillo) | [Molinillo](https://github.com/CocoaPods/Molinillo) | A powerful generic dependency resolver. | [docs](https://www.rubydoc.info/gems/molinillo) +| [![Build Status](https://img.shields.io/travis/CocoaPods/CocoaPods-app/master.svg?style=flat)](https://travis-ci.org/CocoaPods/CocoaPods-app) | [CocoaPods.app](https://github.com/CocoaPods/CocoaPods-app) | A full-featured and standalone installation of CocoaPods. | [info](https://cocoapods.org/app) +| | [Master Repo ](https://github.com/CocoaPods/Specs) | Master repository of specifications. | [guides](https://guides.cocoapods.org/making/specs-and-specs-repo.html) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/pod b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/pod new file mode 100755 index 0000000..47f6a27 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/pod @@ -0,0 +1,56 @@ +#!/usr/bin/env ruby + +if Encoding.default_external != Encoding::UTF_8 + + if ARGV.include? '--no-ansi' + STDERR.puts <<-DOC + WARNING: CocoaPods requires your terminal to be using UTF-8 encoding. + Consider adding the following to ~/.profile: + + export LANG=en_US.UTF-8 + DOC + else + STDERR.puts <<-DOC + \e[33mWARNING: CocoaPods requires your terminal to be using UTF-8 encoding. + Consider adding the following to ~/.profile: + + export LANG=en_US.UTF-8 + \e[0m + DOC + end + +end + +if $PROGRAM_NAME == __FILE__ && !ENV['COCOAPODS_NO_BUNDLER'] + ENV['BUNDLE_GEMFILE'] = File.expand_path('../../Gemfile', __FILE__) + require 'rubygems' + require 'bundler/setup' + $LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) +elsif ENV['COCOAPODS_NO_BUNDLER'] + require 'rubygems' + gem 'cocoapods' +end + +STDOUT.sync = true if ENV['CP_STDOUT_SYNC'] == 'TRUE' + +require 'cocoapods' + +if profile_filename = ENV['COCOAPODS_PROFILE'] + require 'ruby-prof' + reporter = + case (profile_extname = File.extname(profile_filename)) + when '.txt' + RubyProf::FlatPrinterWithLineNumbers + when '.html' + RubyProf::GraphHtmlPrinter + when '.callgrind' + RubyProf::CallTreePrinter + else + raise "Unknown profiler format indicated by extension: #{profile_extname}" + end + File.open(profile_filename, 'w') do |io| + reporter.new(RubyProf.profile { Pod::Command.run(ARGV) }).print(io) + end +else + Pod::Command.run(ARGV) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/sandbox-pod b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/sandbox-pod new file mode 100755 index 0000000..2752885 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/bin/sandbox-pod @@ -0,0 +1,168 @@ +#!/usr/bin/env ruby +# encoding: utf-8 + +# This bin wrapper runs the `pod` command in a OS X sandbox. The reason for this +# is to ensure that people can’t use malicious code from pod specifications. +# +# It does this by creating a ‘seatbelt’ profile on the fly and executing the +# given command through `/usr/bin/sandbox-exec`. This profile format is an +# undocumented format, which uses TinyScheme to implement its DSL. +# +# Even though it uses a undocumented format, it’s actually very self-explanatory. +# Because we use a whitelist approach, `(deny default)`, any action that is +# denied is logged to `/var/log/system.log`. So tailing that should provide +# enough information on steps that need to be take to get something to work. +# +# For more information see: +# +# * https://github.com/CocoaPods/CocoaPods/issues/939 +# * http://reverse.put.as/wp-content/uploads/2011/08/The-Apple-Sandbox-BHDC2011-Slides.pdf +# * http://reverse.put.as/wp-content/uploads/2011/08/The-Apple-Sandbox-BHDC2011-Paper.pdf +# * https://github.com/s7ephen/OSX-Sandbox--Seatbelt--Profiles +# * `$ man sandbox-exec` +# * `$ ls /usr/share/sandbox` + +if $0 == __FILE__ + $:.unshift File.expand_path('../../lib', __FILE__) +end + +require 'pathname' +require 'cocoapods/config' +require 'rbconfig' +require 'erb' + +PROFILE_ERB_TEMPLATE = <<-EOS +(version 1) +(debug allow) + +(import "mDNSResponder.sb") + +(allow file-ioctl) +(allow sysctl-read) +(allow mach-lookup) +(allow ipc-posix-shm) +(allow process-fork) +(allow system-socket) + +; TODO make this stricter if possible +(allow network-outbound) + +(allow process-exec + (literal + "<%= pod_bin %>" + "<%= ruby_bin %>" + ) + (regex +<% prefixes.each do |prefix| %> + #"^<%= prefix %>/*" +<% end %> + ) +) + +(allow file-read-metadata) +(allow file-read* + ; This is currenly only added because using `xcodebuild` to build a resource + ; bundle target starts a FSEvents stream on `/`. No idea why that would be + ; needed, but for now it doesn’t seem like a real problem. + (literal "/") + (regex + ; TODO see if we can restrict this more, but it's going to be hard + #"^/Users/[^.]+/*" + ;#"^/Users/[^.]+/.netrc" + ;#"^/Users/[^.]+/.gemrc" + ;#"^/Users/[^.]+/.gem/*" + ;#"^/Users/[^.]+/Library/.*" + #"^/Library/*" + #"^/System/Library/*" + #"^/usr/lib/*" + #"^/usr/share/*" + #"^/private/*" + #"^/dev/*" + #"^<%= ruby_prefix %>" + #"^<%= pod_prefix %>" + #"^<%= xcode_app_path %>" + #"^<%= Pod::Config.instance.repos_dir %>" +<% prefixes.each do |prefix| %> + #"^<%= prefix %>/*" +<% end %> + ) +) + +(allow file-write* + (literal + "/dev/dtracehelper" + "/dev/null" + ) + (regex + #"^<%= Pod::Config.instance.project_root %>" + #"^<%= Pod::Config.instance.repos_dir %>" + #"^/Users/[^.]+/Library/Caches/CocoaPods/*" + #"^/dev/tty" + #"^/private/var" + ) +) + +(deny default) +EOS + +class Profile + def pod_bin + File.expand_path('../pod', __FILE__) + end + + def pod_prefix + File.expand_path('../..', pod_bin) + end + + def ruby_bin + File.join(RbConfig::CONFIG['bindir'], RbConfig::CONFIG['ruby_install_name']) + end + + def ruby_prefix + RbConfig::CONFIG['prefix'] + end + + def prefix_from_bin(bin_name) + unless (path = `which #{bin_name}`.strip).empty? + File.dirname(File.dirname(path)) + end + end + + def prefixes + prefixes = ['/bin', '/usr/bin', '/usr/libexec', xcode_app_path] + prefixes << `brew --prefix`.strip unless `which brew`.strip.empty? + + # From asking people, it seems MacPorts does not have a `prefix` command, like + # Homebrew does, so make an educated guess: + if port_prefix = prefix_from_bin('port') + prefixes << port_prefix + end + + if rbenv_prefix = prefix_from_bin('rbenv') + prefixes << rbenv_prefix + end + + prefixes + end + + def developer_prefix + `xcode-select --print-path`.strip + end + + def xcode_app_path + File.expand_path('../..', developer_prefix) + end + + # TODO: raise SAFE level (0) to 4 if possible. + def generate + ERB.new(PROFILE_ERB_TEMPLATE, 0, '>').result(binding) + end +end + +# Ensure the `pod` bin doesn’t think it needs to use Bundler. +ENV['COCOAPODS_NO_BUNDLER'] = '1' + +profile = Profile.new +# puts profile.generate +command = ['/usr/bin/sandbox-exec', '-p', profile.generate, profile.pod_bin, *ARGV] +exec(*command) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods.rb new file mode 100644 index 0000000..cd7728c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods.rb @@ -0,0 +1,78 @@ +require 'rubygems' +require 'xcodeproj' + +# It is very likely that we'll need these and as some of those paths will atm +# result in a I18n deprecation warning, we load those here now so that we can +# get rid of that warning. +require 'active_support/core_ext/string/strip' +require 'active_support/core_ext/string/inflections' +require 'active_support/core_ext/array/conversions' +# TODO: check what this actually does by the time we're going to add support for +# other locales. +require 'i18n' +if I18n.respond_to?(:enforce_available_locales=) + I18n.enforce_available_locales = false +end + +module Pod + require 'pathname' + require 'tmpdir' + + require 'cocoapods/gem_version' + require 'cocoapods/version_metadata' + require 'cocoapods-core' + require 'cocoapods/config' + require 'cocoapods/downloader' + + # Loaded immediately after dependencies to ensure proper override of their + # UI methods. + # + require 'cocoapods/user_interface' + + # Indicates an user error. This is defined in cocoapods-core. + # + class Informative < PlainInformative + def message + "[!] #{super}".red + end + end + + Xcodeproj::PlainInformative.send(:include, CLAide::InformativeError) + + autoload :AggregateTarget, 'cocoapods/target/aggregate_target' + autoload :Command, 'cocoapods/command' + autoload :Deintegrator, 'cocoapods_deintegrate' + autoload :Executable, 'cocoapods/executable' + autoload :ExternalSources, 'cocoapods/external_sources' + autoload :Installer, 'cocoapods/installer' + autoload :HooksManager, 'cocoapods/hooks_manager' + autoload :PodTarget, 'cocoapods/target/pod_target' + autoload :Project, 'cocoapods/project' + autoload :Resolver, 'cocoapods/resolver' + autoload :Sandbox, 'cocoapods/sandbox' + autoload :Target, 'cocoapods/target' + autoload :Validator, 'cocoapods/validator' + + module Generator + autoload :Acknowledgements, 'cocoapods/generator/acknowledgements' + autoload :Markdown, 'cocoapods/generator/acknowledgements/markdown' + autoload :Plist, 'cocoapods/generator/acknowledgements/plist' + autoload :BridgeSupport, 'cocoapods/generator/bridge_support' + autoload :Constant, 'cocoapods/generator/constant' + autoload :ScriptPhaseConstants, 'cocoapods/generator/script_phase_constants' + autoload :CopyResourcesScript, 'cocoapods/generator/copy_resources_script' + autoload :CopydSYMsScript, 'cocoapods/generator/copy_dsyms_script' + autoload :DummySource, 'cocoapods/generator/dummy_source' + autoload :EmbedFrameworksScript, 'cocoapods/generator/embed_frameworks_script' + autoload :CopyXCFrameworksScript, 'cocoapods/generator/copy_xcframework_script' + autoload :FileList, 'cocoapods/generator/file_list' + autoload :Header, 'cocoapods/generator/header' + autoload :InfoPlistFile, 'cocoapods/generator/info_plist_file' + autoload :ModuleMap, 'cocoapods/generator/module_map' + autoload :PrefixHeader, 'cocoapods/generator/prefix_header' + autoload :UmbrellaHeader, 'cocoapods/generator/umbrella_header' + autoload :AppTargetHelper, 'cocoapods/generator/app_target_helper' + end + + require 'cocoapods/core_overrides' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command.rb new file mode 100644 index 0000000..8c993fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command.rb @@ -0,0 +1,185 @@ +require 'colored2' +require 'claide' +require 'molinillo/errors' + +module Molinillo + class ResolverError + include CLAide::InformativeError + end +end + +module Pod + class PlainInformative + include CLAide::InformativeError + end + + class Command < CLAide::Command + require 'cocoapods/command/options/repo_update' + require 'cocoapods/command/options/project_directory' + include Options + + require 'cocoapods/command/cache' + require 'cocoapods/command/env' + require 'cocoapods/command/init' + require 'cocoapods/command/install' + require 'cocoapods/command/ipc' + require 'cocoapods/command/lib' + require 'cocoapods/command/list' + require 'cocoapods/command/outdated' + require 'cocoapods/command/repo' + require 'cocoapods/command/setup' + require 'cocoapods/command/spec' + require 'cocoapods/command/update' + + self.abstract_command = true + self.command = 'pod' + self.version = VERSION + self.description = 'CocoaPods, the Cocoa library package manager.' + self.plugin_prefixes = %w(claide cocoapods) + + def self.options + [ + ['--allow-root', 'Allows CocoaPods to run as root'], + ['--silent', 'Show nothing'], + ].concat(super) + end + + def self.run(argv) + ensure_not_root_or_allowed! argv + verify_minimum_git_version! + verify_xcode_license_approved! + + super(argv) + ensure + UI.print_warnings + end + + def self.report_error(exception) + case exception + when Interrupt + puts '[!] Cancelled'.red + Config.instance.verbose? ? raise : exit(1) + when SystemExit + raise + else + if ENV['COCOA_PODS_ENV'] != 'development' + puts UI::ErrorReport.report(exception) + UI::ErrorReport.search_for_exceptions(exception) + exit 1 + else + raise exception + end + end + end + + # @todo If a command is run inside another one some settings which where + # true might return false. + # + # @todo We should probably not even load colored unless needed. + # + # @todo Move silent flag to CLAide. + # + # @note It is important that the commands don't override the default + # settings if their flag is missing (i.e. their value is nil) + # + def initialize(argv) + super + config.silent = argv.flag?('silent', config.silent) + config.allow_root = argv.flag?('allow-root', config.allow_root) + config.verbose = self.verbose? unless verbose.nil? + unless self.ansi_output? + Colored2.disable! + String.send(:define_method, :colorize) { |string, _| string } + end + end + + # Ensure root user + # + # @return [void] + # + def self.ensure_not_root_or_allowed!(argv, uid = Process.uid, is_windows = Gem.win_platform?) + root_allowed = argv.include?('--allow-root') || !ENV['COCOAPODS_ALLOW_ROOT'].nil? + help! 'You cannot run CocoaPods as root.' unless root_allowed || uid != 0 || is_windows + end + + # Ensure that the master spec repo exists + # + # @return [void] + # + def ensure_master_spec_repo_exists! + unless config.sources_manager.master_repo_functional? + Setup.new(CLAide::ARGV.new([])).run + end + end + + #-------------------------------------------------------------------------# + + include Config::Mixin + + private + + # Returns a new {Gem::Version} based on the systems `git` version. + # + # @return [Gem::Version] + # + def self.git_version + raw_version = Executable.capture_command('git', ['--version']).first + unless match = raw_version.scan(/\d+\.\d+\.\d+/).first + raise "Failed to extract git version from `git --version` (#{raw_version.inspect})" + end + Gem::Version.new(match) + end + + # Checks that the git version is at least 1.8.5 + # + # @raise If the git version is older than 1.8.5 + # + # @return [void] + # + def self.verify_minimum_git_version! + if git_version < Gem::Version.new('1.8.5') + raise Informative, 'You need at least git version 1.8.5 to use CocoaPods' + end + end + + # Returns a new {Installer} parametrized from the {Config}. + # + # @return [Installer] + # + def installer_for_config + Installer.new(config.sandbox, config.podfile, config.lockfile) + end + + # Checks that the podfile exists. + # + # @raise If the podfile does not exists. + # + # @return [void] + # + def verify_podfile_exists! + unless config.podfile + raise Informative, "No `Podfile' found in the project directory." + end + end + + # Checks that the lockfile exists. + # + # @raise If the lockfile does not exists. + # + # @return [void] + # + def verify_lockfile_exists! + unless config.lockfile + raise Informative, "No `Podfile.lock' found in the project directory, run `pod install'." + end + end + + def self.verify_xcode_license_approved! + if `/usr/bin/xcrun clang 2>&1` =~ /license/ && !$?.success? + raise Informative, 'You have not agreed to the Xcode license, which ' \ + 'you must do to use CocoaPods. Agree to the license by running: ' \ + '`xcodebuild -license`.' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache.rb new file mode 100644 index 0000000..fd62ae1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache.rb @@ -0,0 +1,28 @@ +require 'cocoapods/downloader' +require 'cocoapods/command/cache/list' +require 'cocoapods/command/cache/clean' + +module Pod + class Command + class Cache < Command + self.abstract_command = true + self.summary = 'Manipulate the CocoaPods cache' + + self.description = <<-DESC + Manipulate the download cache for pods, like printing the cache content + or cleaning the pods cache. + DESC + + def initialize(argv) + @cache = Downloader::Cache.new(Config.instance.cache_root + 'Pods') + super + end + + private + + def pod_type(pod_cache_descriptor) + pod_cache_descriptor[:release] ? 'Release' : 'External' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/clean.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/clean.rb new file mode 100644 index 0000000..aa3563f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/clean.rb @@ -0,0 +1,90 @@ +module Pod + class Command + class Cache < Command + class Clean < Cache + self.summary = 'Remove the cache for pods' + + self.description = <<-DESC + Remove the cache for a given pod, or clear the cache completely. + + If there is multiple cache for various versions of the requested pod, + you will be asked which one to clean. Use `--all` to clean them all. + + If you do not give a pod `NAME`, you need to specify the `--all` + flag (this is to avoid cleaning all the cache by mistake). + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def self.options + [[ + '--all', 'Remove all the cached pods without asking' + ]].concat(super) + end + + def initialize(argv) + @pod_name = argv.shift_argument + @wipe_all = argv.flag?('all') + super + end + + def run + if @pod_name.nil? + # Note: at that point, @wipe_all is always true (thanks to `validate!`) + # Remove all + clear_cache + else + # Remove only cache for this pod + cache_descriptors = @cache.cache_descriptors_per_pod[@pod_name] + if cache_descriptors.nil? + UI.notice("No cache for pod named #{@pod_name} found") + elsif cache_descriptors.count > 1 && !@wipe_all + # Ask which to remove + choices = cache_descriptors.map { |c| "#{@pod_name} v#{c[:version]} (#{pod_type(c)})" } + index = UI.choose_from_array(choices, 'Which pod cache do you want to remove?') + remove_caches([cache_descriptors[index]]) + else + # Remove all found cache of this pod + remove_caches(cache_descriptors) + end + end + end + + def validate! + super + if @pod_name.nil? && !@wipe_all + # Security measure, to avoid removing the pod cache too agressively by mistake + help! 'You should either specify a pod name or use the --all flag' + end + end + + private + + # Removes the specified cache + # + # @param [Array] cache_descriptors + # An array of caches to remove, each specified with the same + # hash as cache_descriptors_per_pod especially :spec_file and :slug + # + def remove_caches(cache_descriptors) + cache_descriptors.each do |desc| + UI.message("Removing spec #{desc[:spec_file]} (v#{desc[:version]})") do + FileUtils.rm(desc[:spec_file]) + end + UI.message("Removing cache #{desc[:slug]}") do + FileUtils.rm_rf(desc[:slug]) + end + end + end + + def clear_cache + UI.message("Removing the whole cache dir #{@cache.root}") do + FileUtils.rm_rf(@cache.root) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/list.rb new file mode 100644 index 0000000..7c6fffd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/cache/list.rb @@ -0,0 +1,69 @@ +module Pod + class Command + class Cache < Command + class List < Cache + self.summary = 'List the paths of pod caches for each known pod' + + self.description = <<-DESC + Shows the content of the pods cache as a YAML tree output, organized by pod. + If `NAME` is given, only the caches for that pod will be included in the output. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def self.options + [[ + '--short', 'Only print the path relative to the cache root' + ]].concat(super) + end + + def initialize(argv) + @pod_name = argv.shift_argument + @short_output = argv.flag?('short') + super + end + + def run + UI.puts("$CACHE_ROOT: #{@cache.root}") if @short_output + if @pod_name.nil? # Print all + @cache.cache_descriptors_per_pod.each do |pod_name, cache_descriptors| + print_pod_cache_infos(pod_name, cache_descriptors) + end + else # Print only for the requested pod + cache_descriptors = @cache.cache_descriptors_per_pod[@pod_name] + if cache_descriptors.nil? + UI.notice("No cache for pod named #{@pod_name} found") + else + print_pod_cache_infos(@pod_name, cache_descriptors) + end + end + end + + private + + # Prints the list of specs & pod cache dirs for a single pod name. + # + # This output is valid YAML so it can be parsed with 3rd party tools + # + # @param [Array] cache_descriptors + # The various infos about a pod cache. Keys are + # :spec_file, :version, :release and :slug + # + def print_pod_cache_infos(pod_name, cache_descriptors) + UI.puts "#{pod_name}:" + cache_descriptors.each do |desc| + if @short_output + [:spec_file, :slug].each { |k| desc[k] = desc[k].relative_path_from(@cache.root) } + end + UI.puts(" - Version: #{desc[:version]}") + UI.puts(" Type: #{pod_type(desc)}") + UI.puts(" Spec: #{desc[:spec_file]}") + UI.puts(" Pod: #{desc[:slug]}") + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/env.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/env.rb new file mode 100644 index 0000000..fac03b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/env.rb @@ -0,0 +1,66 @@ +require 'cocoapods/user_interface/error_report' + +module Pod + class Command + class Env < Command + self.summary = 'Display pod environment' + self.description = 'Display pod environment.' + + def self.options + options = [] + options.concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + super + config.silent = false + end + + def run + UI.puts report + end + + def report + <<-EOS + +#{stack} +#{executable_path} +### Plugins + +``` +#{plugins_string} +``` +#{markdown_podfile} +EOS + end + + def stack + UI::ErrorReport.stack + end + + def markdown_podfile + UI::ErrorReport.markdown_podfile + end + + def plugins_string + UI::ErrorReport.plugins_string + end + + private + + def executable_path + <<-EOS +### Installation Source + +``` +Executable Path: #{actual_path} +``` +EOS + end + + def actual_path + $PROGRAM_NAME + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/init.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/init.rb new file mode 100644 index 0000000..1f27c9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/init.rb @@ -0,0 +1,122 @@ +require 'xcodeproj' +require 'active_support/core_ext/string/strip' + +module Pod + class Command + class Init < Command + self.summary = 'Generate a Podfile for the current directory' + self.description = <<-DESC + Creates a Podfile for the current directory if none currently exists. If + an `XCODEPROJ` project file is specified or if there is only a single + project file in the current directory, targets will be automatically + generated based on targets defined in the project. + + It is possible to specify a list of dependencies which will be used by + the template in the `Podfile.default` (normal targets) `Podfile.test` + (test targets) files which should be stored in the + `#{Config.instance.templates_dir}` folder. + DESC + self.arguments = [ + CLAide::Argument.new('XCODEPROJ', :false), + ] + + def initialize(argv) + @podfile_path = Pathname.pwd + 'Podfile' + @project_path = argv.shift_argument + @project_paths = Pathname.pwd.children.select { |pn| pn.extname == '.xcodeproj' } + super + end + + def validate! + super + raise Informative, 'Existing Podfile found in directory' unless config.podfile_path_in_dir(Pathname.pwd).nil? + if @project_path + help! "Xcode project at #{@project_path} does not exist" unless File.exist? @project_path + project_path = @project_path + else + raise Informative, 'No Xcode project found, please specify one' unless @project_paths.length > 0 + raise Informative, 'Multiple Xcode projects found, please specify one' unless @project_paths.length == 1 + project_path = @project_paths.first + end + @xcode_project = Xcodeproj::Project.open(project_path) + end + + def run + @podfile_path.open('w') { |f| f << podfile_template(@xcode_project) } + end + + private + + # @param [Xcodeproj::Project] project + # The Xcode project to generate a podfile for. + # + # @return [String] the text of the Podfile for the provided project + # + def podfile_template(project) + podfile = '' + podfile << "project '#{@project_path}'\n\n" if @project_path + podfile << <<-PLATFORM.strip_heredoc + # Uncomment the next line to define a global platform for your project + # platform :ios, '9.0' + PLATFORM + + # Split out the targets into app and test targets + test_targets, app_targets = project.native_targets.sort_by { |t| t.name.downcase }.partition(&:test_target_type?) + + app_targets.each do |app_target| + test_targets_for_app = test_targets.select do |target| + target.name.downcase.start_with?(app_target.name.downcase) + end + podfile << target_module(app_target, test_targets_for_app) + end + + podfile + end + + # @param [PBXNativeTarget] host the native host target for the module. + # + # @param [Array] tests the native test targets for the module. + # + # @return [String] the text for the target module. + # + def target_module(host, tests) + target_module = "\ntarget '#{host.name.gsub(/'/, "\\\\\'")}' do\n" + + target_module << <<-RUBY + # Comment the next line if you don't want to use dynamic frameworks + use_frameworks! + + RUBY + + target_module << template_contents(config.default_podfile_path, ' ', "Pods for #{host.name}\n") + + tests.each do |test| + target_module << "\n target '#{test.name.gsub(/'/, "\\\\\'")}' do\n" + unless Pod::AggregateTarget::EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include?(host.symbol_type) || test.symbol_type == :ui_test_bundle + target_module << " inherit! :search_paths\n" + end + target_module << template_contents(config.default_test_podfile_path, ' ', 'Pods for testing') + target_module << "\n end\n" + end + + target_module << "\nend\n" + end + + # @param [Pathname] path the path of the template to load contents from. + # + # @param [String] prefix the prefix to use for each line. + # + # @param [String] fallback the fallback contents to use if the path for the template does not exist. + # + # @return [String] the template contents for the given path. + # + def template_contents(path, prefix, fallback) + if path.exist? + path.read.chomp.lines.map { |line| "#{prefix}#{line}" }.join("\n") + else + "#{prefix}# #{fallback}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/install.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/install.rb new file mode 100644 index 0000000..27c7086 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/install.rb @@ -0,0 +1,56 @@ +module Pod + class Command + class Install < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Install project dependencies according to versions from a Podfile.lock' + + self.description = <<-DESC + Downloads all dependencies defined in `Podfile` and creates an Xcode + Pods library project in `./Pods`. + + The Xcode project file should be specified in your `Podfile` like this: + + project 'path/to/XcodeProject.xcodeproj' + + If no project is specified, then a search for an Xcode project will + be made. If more than one Xcode project is found, the command will + raise an error. + + This will configure the project to reference the Pods static library, + add a build configuration file, and add a post build script to copy + Pod resources. + + This may return one of several error codes if it encounters problems. + * `1` Generic error code + * `31` Spec not found (i.e out-of-date source repos, mistyped Pod name etc...) + DESC + + def self.options + [ + ['--repo-update', 'Force running `pod repo update` before install'], + ['--deployment', 'Disallow any changes to the Podfile or the Podfile.lock during installation'], + ['--clean-install', 'Ignore the contents of the project cache and force a full pod installation. This only ' \ + 'applies to projects that have enabled incremental installation'], + ].concat(super).reject { |(name, _)| name == '--no-repo-update' } + end + + def initialize(argv) + super + @deployment = argv.flag?('deployment', false) + @clean_install = argv.flag?('clean-install', false) + end + + def run + verify_podfile_exists! + installer = installer_for_config + installer.repo_update = repo_update?(:default => false) + installer.update = false + installer.deployment = @deployment + installer.clean_install = @clean_install + installer.install! + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc.rb new file mode 100644 index 0000000..88744dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc.rb @@ -0,0 +1,19 @@ +require 'cocoapods/command/ipc/list' +require 'cocoapods/command/ipc/podfile' +require 'cocoapods/command/ipc/podfile_json' +require 'cocoapods/command/ipc/repl' +require 'cocoapods/command/ipc/spec' +require 'cocoapods/command/ipc/update_search_index' + +module Pod + class Command + class IPC < Command + self.abstract_command = true + self.summary = 'Inter-process communication' + + def output_pipe + STDOUT + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/list.rb new file mode 100644 index 0000000..18fb3d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/list.rb @@ -0,0 +1,40 @@ +module Pod + class Command + class IPC < Command + class List < IPC + self.summary = 'Lists the specifications known to CocoaPods' + self.description = <<-DESC + Prints to STDOUT a YAML dictionary where the keys are the name of the + specifications and each corresponding value is a dictionary with + the following keys: + - defined_in_file + - version + - authors + - summary + - description + - platforms + DESC + + def run + require 'yaml' + sets = config.sources_manager.aggregate.all_sets + result = {} + sets.each do |set| + begin + spec = set.specification + result[spec.name] = { + 'authors' => spec.authors.keys, + 'summary' => spec.summary, + 'description' => spec.description, + 'platforms' => spec.available_platforms.map { |p| p.name.to_s }, + } + rescue DSLError + next + end + end + output_pipe.puts result.to_yaml + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile.rb new file mode 100644 index 0000000..5cbf9bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile.rb @@ -0,0 +1,31 @@ +module Pod + class Command + class IPC < Command + class Podfile < IPC + include ProjectDirectory + + self.summary = 'Converts a Podfile to YAML' + self.description = 'Converts a Podfile to YAML and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A Podfile path is required.' unless @path + end + + def run + require 'yaml' + podfile = Pod::Podfile.from_file(@path) + output_pipe.puts podfile.to_yaml + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile_json.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile_json.rb new file mode 100644 index 0000000..a6aca6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/podfile_json.rb @@ -0,0 +1,30 @@ +module Pod + class Command + class IPC < Command + class PodfileJSON < IPC + include ProjectDirectory + + self.summary = 'Converts a Podfile to JSON' + self.description = 'Converts a Podfile to JSON and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A Podfile path is required.' unless @path + end + + def run + podfile = Pod::Podfile.from_file(@path) + output_pipe.puts podfile.to_hash.to_json + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/repl.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/repl.rb new file mode 100644 index 0000000..75eee94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/repl.rb @@ -0,0 +1,51 @@ +module Pod + class Command + class IPC < Command + class Repl < IPC + include ProjectDirectory + + END_OF_OUTPUT_SIGNAL = "\n\r".freeze + + self.summary = 'The repl listens to commands on standard input' + self.description = <<-DESC + The repl listens to commands on standard input and prints their + result to standard output. + It accepts all the other ipc subcommands. The repl will signal the + end of output with the the ASCII CR+LF `\\n\\r`. + DESC + + def run + print_version + signal_end_of_output + listen + end + + def print_version + output_pipe.puts "version: '#{Pod::VERSION}'" + end + + def signal_end_of_output + output_pipe.puts(END_OF_OUTPUT_SIGNAL) + STDOUT.flush + end + + def listen + while repl_command = STDIN.gets + execute_repl_command(repl_command) + end + end + + def execute_repl_command(repl_command) + unless repl_command == '\n' + repl_commands = repl_command.split + subcommand = repl_commands.shift.capitalize + arguments = repl_commands + subcommand_class = Pod::Command::IPC.const_get(subcommand) + subcommand_class.new(CLAide::ARGV.new(arguments)).run + signal_end_of_output + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/spec.rb new file mode 100644 index 0000000..9bab8b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/spec.rb @@ -0,0 +1,29 @@ +module Pod + class Command + class IPC < Command + class Spec < IPC + self.summary = 'Converts a podspec to JSON' + self.description = 'Converts a podspec to JSON and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A specification path is required.' unless @path + end + + def run + require 'json' + spec = Specification.from_file(@path) + output_pipe.puts(spec.to_pretty_json) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/update_search_index.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/update_search_index.rb new file mode 100644 index 0000000..31c0ae5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/ipc/update_search_index.rb @@ -0,0 +1,24 @@ +module Pod + class Command + class IPC < Command + class UpdateSearchIndex < IPC + self.summary = 'Updates the search index' + self.description = <<-DESC + Updates the search index and prints its path to standard output. + The search index is a YAML encoded dictionary where the keys + are the names of the Pods and the values are a dictionary containing + the following information: + - version + - summary + - description + - authors + DESC + + def run + config.sources_manager.updated_search_index + output_pipe.puts(config.sources_manager.search_index_path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib.rb new file mode 100644 index 0000000..72450ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib.rb @@ -0,0 +1,11 @@ +require 'cocoapods/command/lib/create' +require 'cocoapods/command/lib/lint' + +module Pod + class Command + class Lib < Command + self.abstract_command = true + self.summary = 'Develop pods' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/create.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/create.rb new file mode 100644 index 0000000..038f842 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/create.rb @@ -0,0 +1,104 @@ +module Pod + class Command + class Lib < Command + class Create < Lib + self.summary = 'Creates a new Pod' + + self.description = <<-DESC + Creates a scaffold for the development of a new Pod named `NAME` + according to the CocoaPods best practices. + If a `TEMPLATE_URL`, pointing to a git repo containing a compatible + template, is specified, it will be used in place of the default one. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def self.options + [ + ['--template-url=URL', 'The URL of the git repo containing a compatible template'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @template_url = argv.option('template-url', TEMPLATE_REPO) + super + @additional_args = argv.remainder! + end + + def validate! + super + help! 'A name for the Pod is required.' unless @name + help! 'The Pod name cannot contain spaces.' if @name =~ /\s/ + help! 'The Pod name cannot contain plusses.' if @name =~ /\+/ + help! "The Pod name cannot begin with a '.'" if @name[0, 1] == '.' + end + + def run + clone_template + configure_template + print_info + end + + private + + #----------------------------------------# + + # !@group Private helpers + + extend Executable + executable :git + + TEMPLATE_REPO = 'https://github.com/CocoaPods/pod-template.git'.freeze + TEMPLATE_INFO_URL = 'https://github.com/CocoaPods/pod-template'.freeze + CREATE_NEW_POD_INFO_URL = 'https://guides.cocoapods.org/making/making-a-cocoapod'.freeze + + # Clones the template from the remote in the working directory using + # the name of the Pod. + # + # @return [void] + # + def clone_template + UI.section("Cloning `#{template_repo_url}` into `#{@name}`.") do + git! ['clone', template_repo_url, @name] + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def configure_template + UI.section("Configuring #{@name} template.") do + Dir.chdir(@name) do + if File.exist?('configure') + system({ 'COCOAPODS_VERSION' => Pod::VERSION }, './configure', @name, *@additional_args) + else + UI.warn 'Template does not have a configure file.' + end + end + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def print_info + UI.puts "\nTo learn more about the template see `#{template_repo_url}`." + UI.puts "To learn more about creating a new pod, see `#{CREATE_NEW_POD_INFO_URL}`." + end + + # Checks if a template URL is given else returns the TEMPLATE_REPO URL + # + # @return String + # + def template_repo_url + @template_url || TEMPLATE_REPO + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/lint.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/lint.rb new file mode 100644 index 0000000..83963a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/lib/lint.rb @@ -0,0 +1,145 @@ +module Pod + class Command + class Lib < Command + class Lint < Lib + self.summary = 'Validates a Pod' + + self.description = <<-DESC + Validates the Pod using the files in the working directory. + DESC + + self.arguments = [ + CLAide::Argument.new('PODSPEC_PATHS', false, true), + ] + + def self.options + [ + ['--quick', 'Lint skips checks that would require to download and build the spec'], + ['--allow-warnings', 'Lint validates even if warnings are present'], + ['--subspec=NAME', 'Lint validates only the given subspec'], + ['--no-subspecs', 'Lint skips validation of subspecs'], + ['--no-clean', 'Lint leaves the build directory intact for inspection'], + ['--fail-fast', 'Lint stops on the first failing platform or subspec'], + ['--use-libraries', 'Lint uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--use-static-frameworks', 'Lint uses static frameworks during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + "(defaults to #{Pod::TrunkSource::TRUNK_REPO_URL}). Multiple sources must be comma-delimited"], + ['--platforms=ios,macos', 'Lint against specific platforms (defaults to all platforms supported by the ' \ + 'podspec). Multiple platforms must be comma-delimited'], + ['--private', 'Lint skips checks that apply only to public specs'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used to lint the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--include-podspecs=**/*.podspec', 'Additional ancillary podspecs which are used for linting via :path'], + ['--external-podspecs=**/*.podspec', 'Additional ancillary podspecs which are used for linting '\ + 'via :podspec. If there are --include-podspecs, then these are removed from them'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--test-specs=test-spec1,test-spec2,etc', 'List of test specs to run'], + ['--analyze', 'Validate with the Xcode Static Analysis tool'], + ['--configuration=CONFIGURATION', 'Build using the given configuration (defaults to Release)'], + ].concat(super) + end + + def initialize(argv) + @quick = argv.flag?('quick') + @allow_warnings = argv.flag?('allow-warnings') + @clean = argv.flag?('clean', true) + @fail_fast = argv.flag?('fail-fast', false) + @subspecs = argv.flag?('subspecs', true) + @only_subspec = argv.option('subspec') + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @use_static_frameworks = argv.flag?('use-static-frameworks') + @source_urls = argv.option('sources', Pod::TrunkSource::TRUNK_REPO_URL).split(',') + @platforms = argv.option('platforms', '').split(',') + @private = argv.flag?('private', false) + @swift_version = argv.option('swift-version', nil) + @include_podspecs = argv.option('include-podspecs', nil) + @external_podspecs = argv.option('external-podspecs', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @test_specs = argv.option('test-specs', nil)&.split(',') + @analyze = argv.flag?('analyze', false) + @podspecs_paths = argv.arguments! + @configuration = argv.option('configuration', nil) + super + end + + def validate! + super + end + + def run + UI.puts + podspecs_to_lint.each do |podspec| + validator = Validator.new(podspec, @source_urls, @platforms) + validator.local = true + validator.quick = @quick + validator.no_clean = !@clean + validator.fail_fast = @fail_fast + validator.allow_warnings = @allow_warnings + validator.no_subspecs = !@subspecs || @only_subspec + validator.only_subspec = @only_subspec + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.use_static_frameworks = @use_static_frameworks + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.test_specs = @test_specs + validator.analyze = @analyze + validator.include_podspecs = @include_podspecs + validator.external_podspecs = @external_podspecs + validator.configuration = @configuration + validator.validate + + unless @clean + UI.puts "Pods workspace available at `#{validator.validation_dir}/App.xcworkspace` for inspection." + UI.puts + end + if validator.validated? + UI.puts "#{validator.spec.name} passed validation.".green + else + spec_name = podspec + spec_name = validator.spec.name if validator.spec + message = "#{spec_name} did not pass validation, due to #{validator.failure_reason}." + + if @clean + message << "\nYou can use the `--no-clean` option to inspect " \ + 'any issue.' + end + raise Informative, message + end + end + end + + private + + #----------------------------------------# + + # !@group Private helpers + + # @return [Pathname] The path of the podspec found in the current + # working directory. + # + # @raise If no podspec is found. + # @raise If multiple podspecs are found. + # + def podspecs_to_lint + if !@podspecs_paths.empty? + Array(@podspecs_paths) + else + podspecs = Pathname.glob(Pathname.pwd + '*.podspec{.json,}') + if podspecs.count.zero? + raise Informative, 'Unable to find a podspec in the working ' \ + 'directory' + end + podspecs + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/list.rb new file mode 100644 index 0000000..1f5f494 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/list.rb @@ -0,0 +1,37 @@ +module Pod + class Command + class List < Command + self.summary = 'List pods' + self.description = 'Lists all available pods.' + + def self.options + [ + ['--update', 'Run `pod repo update` before listing'], + ['--stats', 'Show additional stats (like GitHub watchers and forks)'], + ].concat(super) + end + + def initialize(argv) + @update = argv.flag?('update') + @stats = argv.flag?('stats') + super + end + + def run + update_if_necessary! + + sets = config.sources_manager.aggregate.all_sets + sets.each { |set| UI.pod(set, :name_and_version) } + UI.puts "\n#{sets.count} pods were found" + end + + def update_if_necessary! + UI.section("\nUpdating Spec Repositories\n".yellow) do + Repo::Update.new(CLAide::ARGV.new([])).run + end if @update + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/project_directory.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/project_directory.rb new file mode 100644 index 0000000..d12ab33 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/project_directory.rb @@ -0,0 +1,36 @@ +module Pod + class Command + module Options + # Provides support for commands to take a user-specified `project directory` + # + module ProjectDirectory + module Options + def options + [ + ['--project-directory=/project/dir/', 'The path to the root of the project directory'], + ].concat(super) + end + end + + def self.included(base) + base.extend(Options) + end + + def initialize(argv) + if project_directory = argv.option('project-directory') + @project_directory = Pathname.new(project_directory).expand_path + end + config.installation_root = @project_directory + super + end + + def validate! + super + if @project_directory && !@project_directory.directory? + raise Informative, "`#{@project_directory}` is not a valid directory." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/repo_update.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/repo_update.rb new file mode 100644 index 0000000..c6143a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/options/repo_update.rb @@ -0,0 +1,34 @@ +module Pod + class Command + module Options + # Provides support for commands to skip updating the spec repositories. + # + module RepoUpdate + module Options + def options + [ + ['--no-repo-update', 'Skip running `pod repo update` before install'], + ].concat(super) + end + end + + def self.included(base) + base.extend(Options) + end + + def repo_update?(default: false) + if @repo_update.nil? + default + else + @repo_update + end + end + + def initialize(argv) + @repo_update = argv.flag?('repo-update') + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/outdated.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/outdated.rb new file mode 100644 index 0000000..3730862 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/outdated.rb @@ -0,0 +1,151 @@ +module Pod + class Command + class Outdated < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Show outdated project dependencies' + + self.description = <<-DESC + Shows the outdated pods in the current Podfile.lock, but only those from + spec repos, not those from local/external sources. + DESC + + def self.options + [ + ['--ignore-prerelease', "Don't consider prerelease versions to be updates"], + ].concat(super) + end + + def initialize(argv) + @ignore_prerelease = argv.flag?('ignore-prerelease') + super + end + + # Run the command + # + def run + if updates.empty? + UI.puts 'No pod updates are available.'.yellow + else + UI.section 'The color indicates what happens when you run `pod update`' do + UI.puts "#{''.green}\t - Will be updated to the newest version" + UI.puts "#{''.blue}\t - Will be updated, but not to the newest version because of specified version in Podfile" + UI.puts "#{''.red}\t - Will not be updated because of specified version in Podfile" + UI.puts '' + end if ansi_output? + UI.section 'The following pod updates are available:' do + updates.each do |(name, from_version, matching_version, to_version)| + color = :blue + if matching_version == to_version + color = :green + elsif from_version == matching_version + color = :red + end + # For the specs, its necessary that to_s is called here even though it is redundant + # https://github.com/CocoaPods/CocoaPods/pull/7204#issuecomment-342512015 + UI.puts "- #{name} #{from_version.to_s.send(color)} -> #{matching_version.to_s.send(color)} " \ + "(latest version #{to_version.to_s})" # rubocop:disable Lint/StringConversionInInterpolation + end + end + end + + if deprecated_pods.any? + UI.section 'The following pods are deprecated:' do + deprecated_pods.each do |spec| + if spec.deprecated_in_favor_of + UI.puts "- #{spec.name}" \ + " (in favor of #{spec.deprecated_in_favor_of})" + else + UI.puts "- #{spec.name}" + end + end + end + end + end + + private + + def analyzer + @analyzer ||= begin + verify_podfile_exists! + Installer::Analyzer.new(config.sandbox, config.podfile, config.lockfile) + end + end + + def updates + @updates ||= begin + ensure_external_podspecs_present! + spec_sets.map do |set| + spec = set.specification + source_version = set.versions.find { |version| !@ignore_prerelease || !version.prerelease? } + pod_name = spec.root.name + lockfile_version = lockfile.version(pod_name) + if source_version > lockfile_version + matching_spec = unlocked_pods.find { |s| s.name == pod_name } + matching_version = + matching_spec ? matching_spec.version : '(unused)' + [pod_name, lockfile_version, matching_version, source_version] + end + end.compact.uniq + end + end + + def unlocked_pods + @unlocked_pods ||= begin + pods = [] + UI.titled_section('Analyzing dependencies') do + pods = Installer::Analyzer.new(config.sandbox, config.podfile). + analyze(:outdated). + specs_by_target.values.flatten.uniq + end + pods + end + end + + def deprecated_pods + @deprecated_pods ||= begin + spec_sets.map(&:specification).select do |spec| + spec.deprecated || spec.deprecated_in_favor_of + end.compact.uniq + end + end + + def spec_sets + @spec_sets ||= begin + analyzer.send(:update_repositories) if repo_update?(:default => true) + aggregate = Source::Aggregate.new(analyzer.sources) + installed_pods.map do |pod_name| + aggregate.search(Dependency.new(pod_name)) + end.compact.uniq + end + end + + def installed_pods + @installed_pods ||= begin + verify_podfile_exists! + + lockfile.pod_names + end + end + + def lockfile + @lockfile ||= begin + verify_lockfile_exists! + config.lockfile + end + end + + def ensure_external_podspecs_present! + return unless config.podfile + config.podfile.dependencies.each do |dep| + next if dep.external_source.nil? + unless config.sandbox.specification(dep.root_name) + raise Informative, 'You must run `pod install` first to ensure that the ' \ + "podspec for `#{dep.root_name}` has been fetched." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo.rb new file mode 100644 index 0000000..89078f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo.rb @@ -0,0 +1,30 @@ +require 'fileutils' +require 'cocoapods/command/repo/add' +require 'cocoapods/command/repo/add_cdn' +require 'cocoapods/command/repo/lint' +require 'cocoapods/command/repo/list' +require 'cocoapods/command/repo/push' +require 'cocoapods/command/repo/remove' +require 'cocoapods/command/repo/update' + +module Pod + class Command + class Repo < Command + self.abstract_command = true + + # @todo should not show a usage banner! + # + self.summary = 'Manage spec-repositories' + self.default_subcommand = 'list' + + #-----------------------------------------------------------------------# + + extend Executable + executable :git + + def dir + config.repos_dir + @name + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add.rb new file mode 100644 index 0000000..5f667a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add.rb @@ -0,0 +1,102 @@ +module Pod + class Command + class Repo < Command + class Add < Repo + self.summary = 'Add a spec repo' + + self.description = <<-DESC + Clones `URL` in the local spec-repos directory at `#{Config.instance.repos_dir}`. The + remote can later be referred to by `NAME`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('URL', true), + CLAide::Argument.new('BRANCH', false), + ] + + def self.options + [ + ['--progress', 'Show the progress of cloning the spec repository'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @url = argv.shift_argument + @branch = argv.shift_argument + @progress = argv.flag?('progress') + super + end + + def validate! + super + unless @name && @url + help! 'Adding a repo needs a `NAME` and a `URL`.' + end + if @name == 'trunk' + raise Informative, + "Repo name `trunk` is reserved for CocoaPods' main spec repo accessed via CDN." + end + end + + def run + section = "Cloning spec repo `#{@name}` from `#{@url}`" + section << " (branch `#{@branch}`)" if @branch + UI.section(section) do + create_repos_dir + clone_repo + checkout_branch + config.sources_manager.sources([dir.basename.to_s]).each(&:verify_compatibility!) + end + end + + private + + # Creates the repos directory specified in the configuration by + # `config.repos_dir`. + # + # @return [void] + # + # @raise If the directory cannot be created due to a system error. + # + def create_repos_dir + config.repos_dir.mkpath + rescue => e + raise Informative, "Could not create '#{config.repos_dir}', the CocoaPods repo cache directory.\n" \ + "#{e.class.name}: #{e.message}" + end + + # Clones the git spec-repo according to parameters passed to the + # command. + # + # @return [void] + # + def clone_repo + changes = if @progress + { :verbose => true } + else + {} + end + + config.with_changes(changes) do + Dir.chdir(config.repos_dir) do + command = ['clone', @url] + command << '--progress' if @progress + command << '--' << @name + git!(command) + end + end + end + + # Checks out the branch of the git spec-repo if provided. + # + # @return [void] + # + def checkout_branch + Dir.chdir(dir) { git!('checkout', @branch) } if @branch + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add_cdn.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add_cdn.rb new file mode 100644 index 0000000..53d249b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/add_cdn.rb @@ -0,0 +1,58 @@ +module Pod + class Command + class Repo < Command + class AddCDN < Repo + self.summary = 'Add a spec repo backed by a CDN' + + self.description = <<-DESC + Add `URL` to the local spec-repos directory at `#{Config.instance.repos_dir}`. The + remote can later be referred to by `NAME`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('URL', true), + ] + + def initialize(argv) + @name = argv.shift_argument + @url = argv.shift_argument + super + end + + def validate! + super + unless @name && @url + help! 'Adding a repo needs a `NAME` and a `URL`.' + end + if @name == 'master' + raise Informative, + 'To setup the master specs repo, please run `pod setup`.' + end + end + + def run + section = "Adding spec repo `#{@name}` with CDN `#{@url}`" + UI.section(section) do + save_url + config.sources_manager.sources([dir.basename.to_s]).each(&:verify_compatibility!) + end + end + + private + + # Saves the spec-repo URL to a '.url' file. + # + # @return [void] + # + def save_url + dir.mkpath + File.open(dir + '.url', 'w') { |file| file.write(@url) } + rescue => e + raise Informative, "Could not create '#{config.repos_dir}', the CocoaPods repo cache directory.\n" \ + "#{e.class.name}: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/lint.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/lint.rb new file mode 100644 index 0000000..c8763cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/lint.rb @@ -0,0 +1,82 @@ +module Pod + class Command + class Repo < Command + class Lint < Repo + self.summary = 'Validates all specs in a repo' + + self.description = <<-DESC + Lints the spec-repo `NAME`. If a directory is provided it is assumed + to be the root of a repo. Finally, if `NAME` is not provided this + will lint all the spec-repos known to CocoaPods. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME DIRECTORY), false), + ] + + def self.options + [ + ['--only-errors', 'Lint presents only the errors'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @only_errors = argv.flag?('only-errors') + super + end + + # Run the command + # + # @todo Part of this logic needs to be ported to cocoapods-core so web + # services can validate the repo. + # + # @todo add UI.print and enable print statements again. + # + def run + sources = if @name + if File.exist?(@name) + [Source.new(Pathname(@name))] + else + config.sources_manager.sources([@name]) + end + else + config.sources_manager.all + end + + sources.each do |source| + source.verify_compatibility! + UI.puts "\nLinting spec repo `#{source.name}`\n".yellow + + validator = Source::HealthReporter.new(source.repo) + validator.pre_check do |_name, _version| + UI.print '.' + end + report = validator.analyze + UI.puts + UI.puts + + report.pods_by_warning.each do |message, versions_by_name| + UI.puts "-> #{message}".yellow + versions_by_name.each { |name, versions| UI.puts " - #{name} (#{versions * ', '})" } + UI.puts + end + + report.pods_by_error.each do |message, versions_by_name| + UI.puts "-> #{message}".red + versions_by_name.each { |name, versions| UI.puts " - #{name} (#{versions * ', '})" } + UI.puts + end + + UI.puts "Analyzed #{report.analyzed_paths.count} podspecs files.\n\n" + if report.pods_by_error.count.zero? + UI.puts 'All the specs passed validation.'.green << "\n\n" + else + raise Informative, "#{report.pods_by_error.count} podspecs failed validation." + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/list.rb new file mode 100644 index 0000000..26d487e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/list.rb @@ -0,0 +1,94 @@ +module Pod + class Command + class Repo < Command + class List < Repo + self.summary = 'List repos' + + self.description = <<-DESC + List the repos from the local spec-repos directory at `#{Config.instance.repos_dir}`. + DESC + + def self.options + [['--count-only', 'Show the total number of repos']].concat(super) + end + + def initialize(argv) + @count_only = argv.flag?('count-only') + super + end + + # @output Examples: + # + # trunk + # - type: CDN + # - URL: https://cdn.cocoapods.org/ + # - path: /Users/lascorbe/.cocoapods/repos/trunk + # + # test + # - type: local copy + # - URL: file:///Users/lascorbe/.cocoapods/repos/test + # - path: /Users/lascorbe/.cocoapods/repos/test + # + def run + sources = config.sources_manager.all + print_sources(sources) unless @count_only + print_count_of_sources(sources) + end + + private + + # Pretty-prints the source at the given path. + # + # @param [Source] source + # The source repository to be printed. + # + # @return [void] + # + def print_source(source) + if source.is_a?(Pod::CDNSource) + UI.puts '- Type: CDN' + elsif source.git? + branch_name, = Executable.capture_command('git', %w(name-rev --name-only HEAD), :capture => :out, :chdir => source.repo) + branch_name.strip! + branch_name = 'unknown' if branch_name.empty? + UI.puts "- Type: git (#{branch_name})" + else + UI.puts "- Type: #{source.type}" + end + + UI.puts "- URL: #{source.url}" + UI.puts "- Path: #{source.repo}" + end + + # Pretty-prints the given sources. + # + # @param [Array] sources + # The sources that should be printed. + # + # @return [void] + # + def print_sources(sources) + sources.each do |source| + UI.title source.name do + print_source(source) + end + end + UI.puts "\n" + end + + # Pretty-prints the number of sources. + # + # @param [Array] sources + # The sources whose count should be printed. + # + # @return [void] + # + def print_count_of_sources(sources) + number_of_repos = sources.length + repo_string = number_of_repos != 1 ? 'repos' : 'repo' + UI.puts "#{number_of_repos} #{repo_string}".green + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/push.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/push.rb new file mode 100644 index 0000000..f1dc0b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/push.rb @@ -0,0 +1,307 @@ +require 'tempfile' +require 'fileutils' +require 'active_support/core_ext/string/inflections' + +module Pod + class Command + class Repo < Command + class Push < Repo + self.summary = 'Push new specifications to a spec-repo' + + self.description = <<-DESC + Validates `NAME.podspec` or `*.podspec` in the current working dir, + creates a directory and version folder for the pod in the local copy of + `REPO` (#{Config.instance.repos_dir}/[REPO]), copies the podspec file into the + version directory, and finally it pushes `REPO` to its remote. + DESC + + self.arguments = [ + CLAide::Argument.new('REPO', true), + CLAide::Argument.new('NAME.podspec', false), + ] + + def self.options + [ + ['--allow-warnings', 'Allows pushing even if there are warnings'], + ['--use-libraries', 'Linter uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + '(defaults to all available repos). Multiple sources must be comma-delimited'], + ['--local-only', 'Does not perform the step of pushing REPO to its remote'], + ['--no-private', 'Lint includes checks that apply only to public repos'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--commit-message="Fix bug in pod"', 'Add custom commit message. Opens default editor if no commit ' \ + 'message is specified'], + ['--use-json', 'Convert the podspec to JSON before pushing it to the repo'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used when linting the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--no-overwrite', 'Disallow pushing that would overwrite an existing spec'], + ['--update-sources', 'Make sure sources are up-to-date before a push'], + ].concat(super) + end + + def initialize(argv) + @allow_warnings = argv.flag?('allow-warnings') + @local_only = argv.flag?('local-only') + @repo = argv.shift_argument + @source = source_for_repo + @source_urls = argv.option('sources', config.sources_manager.all.map(&:url).append(Pod::TrunkSource::TRUNK_REPO_URL).uniq.join(',')).split(',') + @update_sources = argv.flag?('update-sources') + @podspec = argv.shift_argument + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers', false) + @private = argv.flag?('private', true) + @message = argv.option('commit-message') + @commit_message = argv.flag?('commit-message', false) + @use_json = argv.flag?('use-json') + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @allow_overwrite = argv.flag?('overwrite', true) + super + end + + def validate! + super + help! 'A spec-repo name or url is required.' unless @repo + unless @source && @source.repo.directory? + raise Informative, + "Unable to find the `#{@repo}` repo. " \ + 'If it has not yet been cloned, add it via `pod repo add`.' + end + end + + def run + open_editor if @commit_message && @message.nil? + check_if_push_allowed + update_sources if @update_sources + validate_podspec_files + check_repo_status + update_repo + add_specs_to_repo + push_repo unless @local_only + end + + #---------------------------------------------------------------------# + + private + + # @!group Push sub-steps + + extend Executable + executable :git + + # Open default editor to allow users to enter commit message + # + def open_editor + return if ENV['EDITOR'].nil? + + file = Tempfile.new('cocoapods') + File.chmod(0777, file.path) + file.close + + system("#{ENV['EDITOR']} #{file.path}") + @message = File.read file.path + end + + # Temporary check to ensure that users do not push accidentally private + # specs to the master repo. + # + def check_if_push_allowed + if @source.is_a?(CDNSource) + raise Informative, 'Cannot push to a CDN source, as it is read-only.' + end + + remotes, = Executable.capture_command('git', %w(remote --verbose), :capture => :merge, :chdir => repo_dir) + master_repo_urls = [ + 'git@github.com:CocoaPods/Specs.git', + 'https://github.com/CocoaPods/Specs.git', + ] + is_master_repo = master_repo_urls.any? do |url| + remotes.include?(url) + end + + if is_master_repo + raise Informative, 'To push to the CocoaPods master repo use ' \ + "the `pod trunk push` command.\n\nIf you are using a fork of " \ + 'the master repo for private purposes we recommend to migrate ' \ + 'to a clean private repo. To disable this check remove the ' \ + 'remote pointing to the CocoaPods master repo.' + end + end + + # Performs a full lint against the podspecs. + # + def validate_podspec_files + UI.puts "\nValidating #{'spec'.pluralize(count)}".yellow + podspec_files.each do |podspec| + validator = Validator.new(podspec, @source_urls) + validator.allow_warnings = @allow_warnings + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + begin + validator.validate + rescue => e + raise Informative, "The `#{podspec}` specification does not validate." \ + "\n\n#{e.message}" + end + raise Informative, "The `#{podspec}` specification does not validate." unless validator.validated? + end + end + + # Checks that the repo is clean. + # + # @raise If the repo is not clean. + # + # @todo Add specs for staged and unstaged files. + # + # @todo Gracefully handle the case where source is not under git + # source control. + # + # @return [void] + # + def check_repo_status + porcelain_status, = Executable.capture_command('git', %w(status --porcelain), :capture => :merge, :chdir => repo_dir) + clean = porcelain_status == '' + raise Informative, "The repo `#{@repo}` at #{UI.path repo_dir} is not clean" unless clean + end + + # Updates the git repo against the remote. + # + # @return [void] + # + def update_repo + UI.puts "Updating the `#{@repo}' repo\n".yellow + git!(%W(-C #{repo_dir} pull)) + end + + # Update sources if present + # + # @return [void] + # + def update_sources + return if @source_urls.nil? + @source_urls.each do |source_url| + source = config.sources_manager.source_with_name_or_url(source_url) + dir = source.specs_dir + UI.puts "Updating a source at #{dir} for #{source}" + git!(%W(-C #{dir} pull)) + end + end + + # Commits the podspecs to the source, which should be a git repo. + # + # @note The pre commit hook of the repo is skipped as the podspecs have + # already been linted. + # + # @return [void] + # + def add_specs_to_repo + UI.puts "\nAdding the #{'spec'.pluralize(count)} to the `#{@repo}' repo\n".yellow + podspec_files.each do |spec_file| + spec = Pod::Specification.from_file(spec_file) + output_path = @source.pod_path(spec.name) + spec.version.to_s + message = if @message && !@message.empty? + @message + elsif output_path.exist? + "[Fix] #{spec}" + elsif output_path.dirname.directory? + "[Update] #{spec}" + else + "[Add] #{spec}" + end + + if output_path.exist? && !@allow_overwrite + raise Informative, "#{spec} already exists and overwriting has been disabled." + end + + FileUtils.mkdir_p(output_path) + + if @use_json + json_file_name = "#{spec.name}.podspec.json" + json_file = File.join(output_path, json_file_name) + File.open(json_file, 'w') { |file| file.write(spec.to_pretty_json) } + else + FileUtils.cp(spec_file, output_path) + end + + # only commit if modified + if repo_git('status', '--porcelain').include?(spec.name) + UI.puts " - #{message}" + repo_git('add', spec.name) + repo_git('commit', '--no-verify', '-m', message) + else + UI.puts " - [No change] #{spec}" + end + end + end + + # Pushes the git repo against the remote. + # + # @return [void] + # + def push_repo + UI.puts "\nPushing the `#{@repo}' repo\n".yellow + repo_git('push', 'origin', 'HEAD') + end + + #---------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return result of calling the git! with args in repo_dir + # + def repo_git(*args) + git!(['-C', repo_dir] + args) + end + + # @return [Pathname] The directory of the repository. + # + def repo_dir + @source.specs_dir + end + + # @return [Array] The path of the specifications to push. + # + def podspec_files + if @podspec + path = Pathname(@podspec) + raise Informative, "Couldn't find #{@podspec}" unless path.exist? + [path] + else + files = Pathname.glob('*.podspec{,.json}') + raise Informative, "Couldn't find any podspec files in current directory" if files.empty? + files + end + end + + # @return [Integer] The number of the podspec files to push. + # + def count + podspec_files.count + end + + # Returns source for @repo + # + # @note If URL is invalid or repo doesn't exist, validate! will throw the error + # + # @return [Source] + # + def source_for_repo + config.sources_manager.source_with_name_or_url(@repo) unless @repo.nil? + rescue + nil + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/remove.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/remove.rb new file mode 100644 index 0000000..c8bb550 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/remove.rb @@ -0,0 +1,36 @@ +module Pod + class Command + class Repo < Command + class Remove < Repo + self.summary = 'Remove a spec repo' + + self.description = <<-DESC + Deletes the remote named `NAME` from the local spec-repos directory at `#{Config.instance.repos_dir}`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def validate! + super + help! 'Deleting a repo needs a `NAME`.' unless @name + help! "repo #{@name} does not exist" unless File.directory?(dir) + help! "You do not have permission to delete the #{@name} repository." \ + 'Perhaps try prefixing this command with sudo.' unless File.writable?(dir) + end + + def run + UI.section("Removing spec repo `#{@name}`") do + FileUtils.rm_rf(dir) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/update.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/update.rb new file mode 100644 index 0000000..3a68777 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/repo/update.rb @@ -0,0 +1,39 @@ +module Pod + class Command + class Repo < Command + class Update < Repo + self.summary = 'Update a spec repo' + + self.description = <<-DESC + Updates the local clone of the spec-repo `NAME`. If `NAME` is omitted + this will update all spec-repos in `#{Config.instance.repos_dir}`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def run + show_output = !config.silent? + config.sources_manager.update(@name, show_output) + exclude_repos_dir_from_backup + end + + private + + # Excludes the repos directory from backups. + # + # @return [void] + # + def exclude_repos_dir_from_backup + config.exclude_from_backup(config.repos_dir) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/setup.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/setup.rb new file mode 100644 index 0000000..26f454f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/setup.rb @@ -0,0 +1,18 @@ +require 'fileutils' + +module Pod + class Command + class Setup < Command + self.summary = 'Setup the CocoaPods environment' + + self.description = <<-DESC + Setup the CocoaPods environment + DESC + + def run + # Right now, no setup is needed + UI.puts 'Setup completed'.green + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec.rb new file mode 100644 index 0000000..26d1961 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec.rb @@ -0,0 +1,121 @@ +# encoding: utf-8 + +require 'active_support/core_ext/string/inflections' +require 'cocoapods/command/spec/create' +require 'cocoapods/command/spec/lint' +require 'cocoapods/command/spec/which' +require 'cocoapods/command/spec/cat' +require 'cocoapods/command/spec/edit' + +module Pod + class Command + class Spec < Command + self.abstract_command = true + self.summary = 'Manage pod specs' + + #-----------------------------------------------------------------------# + + # @todo some of the following methods can probably move to one of the + # subclasses. + + private + + # @param [String] query the regular expression string to validate + # + # @raise if the query is not a valid regular expression + # + def validate_regex!(query) + /#{query}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + + # @param [String] spec + # The name of the specification. + # + # @param [Bool,String] version_filter + # - If set to false, will return only the spec path for the latest version (the default). + # - If set to true, will return a list of all paths of all the versions of that spec. + # - If set to a String, will return only the spec path for the version specified by that string. + # + # @return [Pathname] the absolute path or paths of the given podspec + # + def get_path_of_spec(spec, version_filter = false) + sets = config.sources_manager.search_by_name(spec) + + if sets.count == 1 + set = sets.first + elsif sets.map(&:name).include?(spec) + set = sets.find { |s| s.name == spec } + else + names = sets.map(&:name) * ', ' + raise Informative, "More than one spec found for '#{spec}':\n#{names}" + end + + if version_filter.is_a? String + all_paths_from_set(set, version_filter).split(/\n/).first + elsif version_filter == true + all_paths_from_set(set) + else + best_spec, spec_source = spec_and_source_from_set(set) + pathname_from_spec(best_spec, spec_source) + end + end + + # @return [Pathname] the absolute path of the given spec and source + # + def pathname_from_spec(spec, _source) + Pathname(spec.defined_in_file) + end + + # @return [String] of spec paths one on each line + # + def all_paths_from_set(set, specific_version = nil) + paths = '' + + sources = set.sources + + sources.each do |source| + versions = source.versions(set.name) + + if specific_version + versions = versions.select { |v| v.version == specific_version } + end + + versions.each do |version| + spec = source.specification(set.name, version) + paths += "#{pathname_from_spec(spec, source)}\n" + end + end + + raise Informative, "Can't find spec for #{set.name}." if paths.empty? + + paths + end + + # @return [Specification, Source] the highest known specification with it's source of the given + # set. + # + def spec_and_source_from_set(set) + sources = set.sources + + best_source = best_version = nil + sources.each do |source| + versions = source.versions(set.name) + versions.each do |version| + if !best_version || version > best_version + best_source = source + best_version = version + end + end + end + + if !best_source || !best_version + raise Informative, "Unable to locate highest known specification for `#{set.name}'" + end + + [best_source.specification(set.name, best_version), best_source] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/cat.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/cat.rb new file mode 100644 index 0000000..c3a42c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/cat.rb @@ -0,0 +1,53 @@ +module Pod + class Command + class Spec < Command + class Cat < Spec + self.summary = 'Prints a spec file' + + self.description = <<-DESC + Prints the content of the podspec(s) whose name matches `QUERY` to standard output. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Pick from all versions of the given podspec'], + ['--version', 'Print a specific version of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + @version = argv.option('version') + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + filepath = if @show_all + specs = get_path_of_spec(query, @show_all).split(/\n/) + index = UI.choose_from_array(specs, "Which spec would you like to print [1-#{specs.count}]? ") + specs[index] + else + get_path_of_spec(query, @version) + end + + UI.puts File.read(filepath) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/create.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/create.rb new file mode 100644 index 0000000..35298b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/create.rb @@ -0,0 +1,283 @@ + +module Pod + class Command + class Spec < Command + class Create < Spec + self.summary = 'Create spec file stub.' + + self.description = <<-DESC + Creates a PodSpec, in the current working dir, called `NAME.podspec'. + If a GitHub url is passed the spec is prepopulated. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME https://github.com/USER/REPO), false), + ] + + def initialize(argv) + @name_or_url = argv.shift_argument + @url = argv.shift_argument + super + end + + def validate! + super + help! 'A pod name or repo URL is required.' unless @name_or_url + end + + def run + if repo_id_match = (@url || @name_or_url).match(%r{github.com/([^/\.]*\/[^/\.]*)\.*}) + repo_id = repo_id_match[1] + data = github_data_for_template(repo_id) + data[:name] = @name_or_url if @url + UI.puts semantic_versioning_notice(repo_id, data[:name]) if data[:version] == '0.0.1' + else + data = default_data_for_template(@name_or_url) + end + + spec = spec_template(data) + (Pathname.pwd + "#{data[:name]}.podspec").open('w') { |f| f << spec } + UI.puts "\nSpecification created at #{data[:name]}.podspec".green + end + + private + + #--------------------------------------# + + # Templates and GitHub information retrieval for spec create + # + # @todo It would be nice to have a template class that accepts options + # and uses the default ones if not provided. + # @todo The template is outdated. + + def default_data_for_template(name) + { + :name => name, + :version => '0.0.1', + :summary => "A short description of #{name}.", + :homepage => "http://EXAMPLE/#{name}", + :author_name => Executable.capture_command('git', %w(config --get user.name), :capture => :out).first.strip, + :author_email => Executable.capture_command('git', %w(config --get user.email), :capture => :out).first.strip, + :source_url => "http://EXAMPLE/#{name}.git", + :ref_type => ':tag', + :ref => '#{spec.version}', + } + end + + def github_data_for_template(repo_id) + repo = GitHub.repo(repo_id) + raise Informative, "Unable to fetch data for `#{repo_id}`" unless repo + user = GitHub.user(repo['owner']['login']) + raise Informative, "Unable to fetch data for `#{repo['owner']['login']}`" unless user + data = {} + + data[:name] = repo['name'] + data[:summary] = (repo['description'] || '').gsub(/["]/, '\"') + data[:homepage] = (repo['homepage'] && !repo['homepage'].empty?) ? repo['homepage'] : repo['html_url'] + data[:author_name] = user['name'] || user['login'] + data[:author_email] = user['email'] || 'email@address.com' + data[:source_url] = repo['clone_url'] + + data.merge suggested_ref_and_version(repo) + end + + def suggested_ref_and_version(repo) + tags = GitHub.tags(repo['html_url']).map { |tag| tag['name'] } + versions_tags = {} + tags.each do |tag| + clean_tag = tag.gsub(/^v(er)? ?/, '') + versions_tags[Gem::Version.new(clean_tag)] = tag if Gem::Version.correct?(clean_tag) + end + version = versions_tags.keys.sort.last || '0.0.1' + data = { :version => version } + if version == '0.0.1' + branches = GitHub.branches(repo['html_url']) + master_name = repo['master_branch'] || 'master' + master = branches.find { |branch| branch['name'] == master_name } + raise Informative, "Unable to find any commits on the master branch for the repository `#{repo['html_url']}`" unless master + data[:ref_type] = ':commit' + data[:ref] = master['commit']['sha'] + else + data[:ref_type] = ':tag' + data[:ref] = versions_tags[version] + data[:ref] = '#{spec.version}' if "#{version}" == versions_tags[version] + data[:ref] = 'v#{spec.version}' if "v#{version}" == versions_tags[version] + end + data + end + + def spec_template(data) + <<-SPEC +# +# Be sure to run `pod spec lint #{data[:name]}.podspec' to ensure this is a +# valid spec and to remove all comments including this before submitting the spec. +# +# To learn more about Podspec attributes see https://guides.cocoapods.org/syntax/podspec.html +# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/ +# + +Pod::Spec.new do |spec| + + # ――― Spec Metadata ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # These will help people to find your library, and whilst it + # can feel like a chore to fill in it's definitely to your advantage. The + # summary should be tweet-length, and the description more in depth. + # + + spec.name = "#{data[:name]}" + spec.version = "#{data[:version]}" + spec.summary = "#{data[:summary]}" + + # This description is used to generate tags and improve search results. + # * Think: What does it do? Why did you write it? What is the focus? + # * Try to keep it short, snappy and to the point. + # * Write the description between the DESC delimiters below. + # * Finally, don't worry about the indent, CocoaPods strips it! + spec.description = <<-DESC + DESC + + spec.homepage = "#{data[:homepage]}" + # spec.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif" + + + # ――― Spec License ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Licensing your code is important. See https://choosealicense.com for more info. + # CocoaPods will detect a license file if there is a named LICENSE* + # Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'. + # + + spec.license = "MIT (example)" + # spec.license = { :type => "MIT", :file => "FILE_LICENSE" } + + + # ――― Author Metadata ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Specify the authors of the library, with email addresses. Email addresses + # of the authors are extracted from the SCM log. E.g. $ git log. CocoaPods also + # accepts just a name if you'd rather not provide an email address. + # + # Specify a social_media_url where others can refer to, for example a twitter + # profile URL. + # + + spec.author = { "#{data[:author_name]}" => "#{data[:author_email]}" } + # Or just: spec.author = "#{data[:author_name]}" + # spec.authors = { "#{data[:author_name]}" => "#{data[:author_email]}" } + # spec.social_media_url = "https://twitter.com/#{data[:author_name]}" + + # ――― Platform Specifics ――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # If this Pod runs only on iOS or OS X, then specify the platform and + # the deployment target. You can optionally include the target after the platform. + # + + # spec.platform = :ios + # spec.platform = :ios, "5.0" + + # When using multiple platforms + # spec.ios.deployment_target = "5.0" + # spec.osx.deployment_target = "10.7" + # spec.watchos.deployment_target = "2.0" + # spec.tvos.deployment_target = "9.0" + + + # ――― Source Location ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Specify the location from where the source should be retrieved. + # Supports git, hg, bzr, svn and HTTP. + # + + spec.source = { :git => "#{data[:source_url]}", #{data[:ref_type]} => "#{data[:ref]}" } + + + # ――― Source Code ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # CocoaPods is smart about how it includes source code. For source files + # giving a folder will include any swift, h, m, mm, c & cpp files. + # For header files it will include any header in the folder. + # Not including the public_header_files will make all headers public. + # + + spec.source_files = "Classes", "Classes/**/*.{h,m}" + spec.exclude_files = "Classes/Exclude" + + # spec.public_header_files = "Classes/**/*.h" + + + # ――― Resources ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # A list of resources included with the Pod. These are copied into the + # target bundle with a build phase script. Anything else will be cleaned. + # You can preserve files from being cleaned, please don't preserve + # non-essential files like tests, examples and documentation. + # + + # spec.resource = "icon.png" + # spec.resources = "Resources/*.png" + + # spec.preserve_paths = "FilesToSave", "MoreFilesToSave" + + + # ――― Project Linking ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Link your library with frameworks, or libraries. Libraries do not include + # the lib prefix of their name. + # + + # spec.framework = "SomeFramework" + # spec.frameworks = "SomeFramework", "AnotherFramework" + + # spec.library = "iconv" + # spec.libraries = "iconv", "xml2" + + + # ――― Project Settings ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # If your library depends on compiler flags you can set them in the xcconfig hash + # where they will only apply to your library. If you depend on other Podspecs + # you can include multiple dependencies to ensure it works. + + # spec.requires_arc = true + + # spec.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" } + # spec.dependency "JSONKit", "~> 1.4" + +end + SPEC + end + + def semantic_versioning_notice(repo_id, repo) + <<-EOS + +#{'――― MARKDOWN TEMPLATE ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +I’ve recently added [#{repo}](https://github.com/CocoaPods/Specs/tree/master/#{repo}) to the [CocoaPods](https://github.com/CocoaPods/CocoaPods) package manager repo. + +CocoaPods is a tool for managing dependencies for OSX and iOS Xcode projects and provides a central repository for iOS/OSX libraries. This makes adding libraries to a project and updating them extremely easy and it will help users to resolve dependencies of the libraries they use. + +However, #{repo} doesn't have any version tags. I’ve added the current HEAD as version 0.0.1, but a version tag will make dependency resolution much easier. + +[Semantic version](https://semver.org) tags (instead of plain commit hashes/revisions) allow for [resolution of cross-dependencies](https://github.com/CocoaPods/Specs/wiki/Cross-dependencies-resolution-example). + +In case you didn’t know this yet; you can tag the current HEAD as, for instance, version 1.0.0, like so: + +``` +$ git tag -a 1.0.0 -m "Tag release 1.0.0" +$ git push --tags +``` + +#{'――― TEMPLATE END ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +#{'[!] This repo does not appear to have semantic version tags.'.yellow} + +After commiting the specification, consider opening a ticket with the template displayed above: + - link: https://github.com/#{repo_id}/issues/new + - title: Please add semantic version tags + EOS + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/edit.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/edit.rb new file mode 100644 index 0000000..1261929 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/edit.rb @@ -0,0 +1,87 @@ +module Pod + class Command + class Spec < Command + class Edit < Spec + self.summary = 'Edit a spec file' + + self.description = <<-DESC + Opens the podspec matching `QUERY` to be edited. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Pick from all versions of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + if @show_all + specs = get_path_of_spec(query, @show_all).split(/\n/) + message = "Which spec would you like to edit [1-#{specs.count}]? " + index = UI.choose_from_array(specs, message) + filepath = specs[index] + else + filepath = get_path_of_spec(query) + end + + exec_editor(filepath.to_s) if File.exist? filepath + raise Informative, "#{filepath} doesn't exist." + end + + def which_editor + editor = ENV['EDITOR'] + # If an editor wasn't set, try to pick a sane default + return editor unless editor.nil? + + editors = [ + # Find Sublime Text 2 + 'subl', + # Find Textmate + 'mate', + # Find BBEdit / TextWrangler + 'edit', + # Find Atom + 'atom', + # Default to vim + 'vim', + ] + editor = editors.find { |e| Pod::Executable.which(e) } + return editor if editor + + raise Informative, "Failed to open editor. Set your 'EDITOR' environment variable." + end + + def exec_editor(*args) + return if args.to_s.empty? + safe_exec(which_editor, *args) + end + + def safe_exec(cmd, *args) + # This buys us proper argument quoting and evaluation + # of environment variables in the cmd parameter. + exec('/bin/sh', '-i', '-c', cmd + ' "$@"', '--', *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/lint.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/lint.rb new file mode 100644 index 0000000..17d587c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/lint.rb @@ -0,0 +1,150 @@ +module Pod + class Command + class Spec < Command + class Lint < Spec + self.summary = 'Validates a spec file' + + self.description = <<-DESC + Validates `NAME.podspec`. If a `DIRECTORY` is provided, it validates + the podspec files found, including subfolders. In case + the argument is omitted, it defaults to the current working dir. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME.podspec DIRECTORY http://PATH/NAME.podspec), false, true), + ] + + def self.options + [ + ['--quick', 'Lint skips checks that would require to download and build the spec'], + ['--allow-warnings', 'Lint validates even if warnings are present'], + ['--subspec=NAME', 'Lint validates only the given subspec'], + ['--no-subspecs', 'Lint skips validation of subspecs'], + ['--no-clean', 'Lint leaves the build directory intact for inspection'], + ['--fail-fast', 'Lint stops on the first failing platform or subspec'], + ['--use-libraries', 'Lint uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--use-static-frameworks', 'Lint uses static frameworks during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + "(defaults to #{Pod::TrunkSource::TRUNK_REPO_URL}). Multiple sources must be comma-delimited"], + ['--platforms=ios,macos', 'Lint against specific platforms (defaults to all platforms supported by the ' \ + 'podspec). Multiple platforms must be comma-delimited'], + ['--private', 'Lint skips checks that apply only to public specs'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used to lint the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--test-specs=test-spec1,test-spec2,etc', 'List of test specs to run'], + ['--analyze', 'Validate with the Xcode Static Analysis tool'], + ['--configuration=CONFIGURATION', 'Build using the given configuration (defaults to Release)'], + ].concat(super) + end + + def initialize(argv) + @quick = argv.flag?('quick') + @allow_warnings = argv.flag?('allow-warnings') + @clean = argv.flag?('clean', true) + @fail_fast = argv.flag?('fail-fast', false) + @subspecs = argv.flag?('subspecs', true) + @only_subspec = argv.option('subspec') + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @use_static_frameworks = argv.flag?('use-static-frameworks') + @source_urls = argv.option('sources', Pod::TrunkSource::TRUNK_REPO_URL).split(',') + @platforms = argv.option('platforms', '').split(',') + @private = argv.flag?('private', false) + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @test_specs = argv.option('test-specs', nil)&.split(',') + @analyze = argv.flag?('analyze', false) + @podspecs_paths = argv.arguments! + @configuration = argv.option('configuration', nil) + super + end + + def run + UI.puts + failure_reasons = [] + podspecs_to_lint.each do |podspec| + validator = Validator.new(podspec, @source_urls, @platforms) + validator.quick = @quick + validator.no_clean = !@clean + validator.fail_fast = @fail_fast + validator.allow_warnings = @allow_warnings + validator.no_subspecs = !@subspecs || @only_subspec + validator.only_subspec = @only_subspec + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.use_static_frameworks = @use_static_frameworks + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.test_specs = @test_specs + validator.analyze = @analyze + validator.configuration = @configuration + validator.validate + failure_reasons << validator.failure_reason + + unless @clean + UI.puts "Pods workspace available at `#{validator.validation_dir}/App.xcworkspace` for inspection." + UI.puts + end + end + + count = podspecs_to_lint.count + UI.puts "Analyzed #{count} #{'podspec'.pluralize(count)}.\n\n" + + failure_reasons.compact! + if failure_reasons.empty? + lint_passed_message = count == 1 ? "#{podspecs_to_lint.first.basename} passed validation." : 'All the specs passed validation.' + UI.puts lint_passed_message.green << "\n\n" + else + raise Informative, if count == 1 + "The spec did not pass validation, due to #{failure_reasons.first}." + else + "#{failure_reasons.count} out of #{count} specs failed validation." + end + end + podspecs_tmp_dir.rmtree if podspecs_tmp_dir.exist? + end + + private + + def podspecs_to_lint + @podspecs_to_lint ||= begin + files = [] + @podspecs_paths << '.' if @podspecs_paths.empty? + @podspecs_paths.each do |path| + if path =~ %r{https?://} + require 'cocoapods/open-uri' + output_path = podspecs_tmp_dir + File.basename(path) + output_path.dirname.mkpath + begin + OpenURI.open_uri(path) do |io| + output_path.open('w') { |f| f << io.read } + end + rescue => e + raise Informative, "Downloading a podspec from `#{path}` failed: #{e}" + end + files << output_path + elsif (pathname = Pathname.new(path)).directory? + files += Pathname.glob(pathname + '*.podspec{.json,}') + raise Informative, 'No specs found in the current directory.' if files.empty? + else + files << (pathname = Pathname.new(path)) + raise Informative, "Unable to find a spec named `#{path}'." unless pathname.exist? && path.include?('.podspec') + end + end + files + end + end + + def podspecs_tmp_dir + Pathname.new(Dir.tmpdir) + 'CocoaPods/Lint_podspec' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/which.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/which.rb new file mode 100644 index 0000000..d52ef44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/spec/which.rb @@ -0,0 +1,45 @@ +module Pod + class Command + class Spec < Command + class Which < Spec + self.summary = 'Prints the path of the given spec' + + self.description = <<-DESC + Prints the path of the .podspec file(s) whose name matches `QUERY` + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Print all versions of the given podspec'], + ['--version', 'Print a specific version of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @version = argv.option('version') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + UI.puts get_path_of_spec(query, @show_all || @version) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/update.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/update.rb new file mode 100644 index 0000000..d1d1940 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/command/update.rb @@ -0,0 +1,104 @@ +module Pod + class Command + class Update < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Update outdated project dependencies and create new ' \ + 'Podfile.lock' + + self.description = <<-DESC + Updates the Pods identified by the specified `POD_NAMES`, which is a + space-delimited list of pod names. If no `POD_NAMES` are specified, it + updates all the Pods, ignoring the contents of the Podfile.lock. This + command is reserved for the update of dependencies; pod install should + be used to install changes to the Podfile. + DESC + + self.arguments = [ + CLAide::Argument.new('POD_NAMES', false, true), + ] + + def self.options + [ + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to update dependent pods. ' \ + 'Multiple sources must be comma-delimited'], + ['--exclude-pods=podName', 'Pods to exclude during update. Multiple pods must be comma-delimited'], + ['--clean-install', 'Ignore the contents of the project cache and force a full pod installation. This only ' \ + 'applies to projects that have enabled incremental installation'], + ].concat(super) + end + + def initialize(argv) + @pods = argv.arguments! + + @source_urls = argv.option('sources', '').split(',') + @excluded_pods = argv.option('exclude-pods', '').split(',') + @clean_install = argv.flag?('clean-install', false) + @source_pods = @source_urls.flat_map { |url| config.sources_manager.source_with_name_or_url(url).pods } + + super + end + + def run + verify_podfile_exists! + + installer = installer_for_config + installer.repo_update = repo_update?(:default => true) + installer.clean_install = @clean_install + if @pods.any? || @excluded_pods.any? || @source_pods.any? + verify_lockfile_exists! + verify_pods_are_installed! + verify_excluded_pods_are_installed! + + @pods += @source_pods.select { |pod| config.lockfile.pod_names.include?(pod) } + @pods = config.lockfile.pod_names.dup if @pods.empty? + @pods -= @excluded_pods + + installer.update = { :pods => @pods } + else + UI.puts 'Update all pods'.yellow + installer.update = true + end + installer.install! + end + + private + + # Check if all given pods are installed + # + def verify_pods_are_installed! + missing_pods = lockfile_missing_pods(@pods) + + unless missing_pods.empty? + message = if missing_pods.length > 1 + "Pods `#{missing_pods.join('`, `')}` are not " \ + 'installed and cannot be updated' + else + "The `#{missing_pods.first}` Pod is not installed " \ + 'and cannot be updated' + end + raise Informative, message + end + end + + # Check if excluded pods are installed + # + def verify_excluded_pods_are_installed! + missing_pods = lockfile_missing_pods(@excluded_pods) + + unless missing_pods.empty? + pluralized_words = missing_pods.length > 1 ? %w(Pods are) : %w(Pod is) + message = "Trying to skip `#{missing_pods.join('`, `')}` #{pluralized_words.first} " \ + "which #{pluralized_words.last} not installed" + raise Informative, message + end + end + + def lockfile_missing_pods(pods) + lockfile_roots = config.lockfile.pod_names.map { |pod| Specification.root_name(pod) } + pods.map { |pod| Specification.root_name(pod) }.uniq - lockfile_roots + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/config.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/config.rb new file mode 100644 index 0000000..2bb7f7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/config.rb @@ -0,0 +1,366 @@ +require 'active_support/multibyte/unicode' + +module Pod + # Stores the global configuration of CocoaPods. + # + class Config + # The default settings for the configuration. + # + # Users can specify custom settings in `~/.cocoapods/config.yaml`. + # An example of the contents of this file might look like: + # + # --- + # skip_repo_update: true + # new_version_message: false + # + DEFAULTS = { + :verbose => false, + :silent => false, + :skip_download_cache => !ENV['COCOAPODS_SKIP_CACHE'].nil?, + + :new_version_message => ENV['COCOAPODS_SKIP_UPDATE_MESSAGE'].nil?, + + :cache_root => Pathname.new(Dir.home) + 'Library/Caches/CocoaPods', + } + + # Applies the given changes to the config for the duration of the given + # block. + # + # @param [Hash<#to_sym,Object>] changes + # the changes to merge temporarily with the current config + # + # @yield [] is called while the changes are applied + # + def with_changes(changes) + old = {} + changes.keys.each do |key| + key = key.to_sym + old[key] = send(key) if respond_to?(key) + end + configure_with(changes) + yield if block_given? + ensure + configure_with(old) + end + + public + + #-------------------------------------------------------------------------# + + # @!group UI + + # @return [Bool] Whether CocoaPods should provide detailed output about the + # performed actions. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # @return [Bool] Whether CocoaPods should produce not output. + # + attr_accessor :silent + alias_method :silent?, :silent + + # @return [Bool] Whether CocoaPods is allowed to run as root. + # + attr_accessor :allow_root + alias_method :allow_root?, :allow_root + + # @return [Bool] Whether a message should be printed when a new version of + # CocoaPods is available. + # + attr_accessor :new_version_message + alias_method :new_version_message?, :new_version_message + + #-------------------------------------------------------------------------# + + # @!group Installation + + # @return [Bool] Whether the installer should skip the download cache. + # + attr_accessor :skip_download_cache + alias_method :skip_download_cache?, :skip_download_cache + + public + + #-------------------------------------------------------------------------# + + # @!group Cache + + # @return [Pathname] The directory where CocoaPods should cache remote data + # and other expensive to compute information. + # + attr_accessor :cache_root + + def cache_root + @cache_root.mkpath unless @cache_root.exist? + @cache_root + end + + public + + #-------------------------------------------------------------------------# + + # @!group Initialization + + def initialize(use_user_settings = true) + configure_with(DEFAULTS) + + unless ENV['CP_HOME_DIR'].nil? + @cache_root = home_dir + 'cache' + end + + if use_user_settings && user_settings_file.exist? + require 'yaml' + user_settings = YAML.load_file(user_settings_file) + configure_with(user_settings) + end + + unless ENV['CP_CACHE_DIR'].nil? + @cache_root = Pathname.new(ENV['CP_CACHE_DIR']).expand_path + end + end + + def verbose + @verbose && !silent + end + + public + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [Pathname] the directory where repos, templates and configuration + # files are stored. + # + def home_dir + @home_dir ||= Pathname.new(ENV['CP_HOME_DIR'] || '~/.cocoapods').expand_path + end + + # @return [Pathname] the directory where the CocoaPods sources are stored. + # + def repos_dir + @repos_dir ||= Pathname.new(ENV['CP_REPOS_DIR'] || (home_dir + 'repos')).expand_path + end + + attr_writer :repos_dir + + # @return [Source::Manager] the source manager for the spec repos in `repos_dir` + # + def sources_manager + return @sources_manager if @sources_manager && @sources_manager.repos_dir == repos_dir + @sources_manager = Source::Manager.new(repos_dir) + end + + # @return [Pathname] the directory where the CocoaPods templates are stored. + # + def templates_dir + @templates_dir ||= Pathname.new(ENV['CP_TEMPLATES_DIR'] || (home_dir + 'templates')).expand_path + end + + # @return [Pathname] the root of the CocoaPods installation where the + # Podfile is located. + # + def installation_root + @installation_root ||= begin + current_dir = Pathname.new(Dir.pwd.unicode_normalize(:nfkc)) + current_path = current_dir + until current_path.root? + if podfile_path_in_dir(current_path) + installation_root = current_path + unless current_path == current_dir + UI.puts("[in #{current_path}]") + end + break + else + current_path = current_path.parent + end + end + installation_root || current_dir + end + end + + attr_writer :installation_root + alias_method :project_root, :installation_root + + # @return [Pathname] The root of the sandbox. + # + def sandbox_root + @sandbox_root ||= installation_root + 'Pods' + end + + attr_writer :sandbox_root + alias_method :project_pods_root, :sandbox_root + + # @return [Sandbox] The sandbox of the current project. + # + def sandbox + @sandbox ||= Sandbox.new(sandbox_root) + end + + # @return [Podfile] The Podfile to use for the current execution. + # @return [Nil] If no Podfile is available. + # + def podfile + @podfile ||= Podfile.from_file(podfile_path) if podfile_path + end + attr_writer :podfile + + # @return [Lockfile] The Lockfile to use for the current execution. + # @return [Nil] If no Lockfile is available. + # + def lockfile + @lockfile ||= Lockfile.from_file(lockfile_path) if lockfile_path + end + + # Returns the path of the Podfile. + # + # @note The Podfile can be named either `CocoaPods.podfile.yaml`, + # `CocoaPods.podfile` or `Podfile`. The first two are preferred as + # they allow to specify an OS X UTI. + # + # @return [Pathname] + # @return [Nil] + # + def podfile_path + @podfile_path ||= podfile_path_in_dir(installation_root) + end + + # Returns the path of the Lockfile. + # + # @note The Lockfile is named `Podfile.lock`. + # + def lockfile_path + @lockfile_path ||= installation_root + 'Podfile.lock' + end + + # Returns the path of the default Podfile pods. + # + # @note The file is expected to be named Podfile.default + # + # @return [Pathname] + # + def default_podfile_path + @default_podfile_path ||= templates_dir + 'Podfile.default' + end + + # Returns the path of the default Podfile test pods. + # + # @note The file is expected to be named Podfile.test + # + # @return [Pathname] + # + def default_test_podfile_path + @default_test_podfile_path ||= templates_dir + 'Podfile.test' + end + + # @return [Pathname] The file to use to cache the search data. + # + def search_index_file + cache_root + 'search_index.json' + end + + private + + #-------------------------------------------------------------------------# + + # @!group Private helpers + + # @return [Pathname] The path of the file which contains the user settings. + # + def user_settings_file + home_dir + 'config.yaml' + end + + # Sets the values of the attributes with the given hash. + # + # @param [Hash{String,Symbol => Object}] values_by_key + # The values of the attributes grouped by key. + # + # @return [void] + # + def configure_with(values_by_key) + return unless values_by_key + values_by_key.each do |key, value| + if key.to_sym == :cache_root + value = Pathname.new(value).expand_path + end + instance_variable_set("@#{key}", value) + end + end + + # @return [Array] The filenames that the Podfile can have ordered + # by priority. + # + PODFILE_NAMES = [ + 'CocoaPods.podfile.yaml', + 'CocoaPods.podfile', + 'Podfile', + 'Podfile.rb', + ].freeze + + public + + # Returns the path of the Podfile in the given dir if any exists. + # + # @param [Pathname] dir + # The directory where to look for the Podfile. + # + # @return [Pathname] The path of the Podfile. + # @return [Nil] If not Podfile was found in the given dir + # + def podfile_path_in_dir(dir) + PODFILE_NAMES.each do |filename| + candidate = dir + filename + if candidate.file? + return candidate + end + end + nil + end + + # Excludes the given dir from Time Machine backups. + # + # @param [Pathname] dir + # The directory to exclude from Time Machine backups. + # + # @return [void] + # + def exclude_from_backup(dir) + return if Gem.win_platform? + system('tmutil', 'addexclusion', dir.to_s, %i(out err) => File::NULL) + end + + public + + #-------------------------------------------------------------------------# + + # @!group Singleton + + # @return [Config] the current config instance creating one if needed. + # + def self.instance + @instance ||= new + end + + # Sets the current config instance. If set to nil the config will be + # recreated when needed. + # + # @param [Config, Nil] the instance. + # + # @return [void] + # + class << self + attr_writer :instance + end + + # Provides support for accessing the configuration instance in other + # scopes. + # + module Mixin + def config + Config.instance + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/core_overrides.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/core_overrides.rb new file mode 100644 index 0000000..3daebdf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/core_overrides.rb @@ -0,0 +1 @@ +require 'cocoapods/sources_manager' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader.rb new file mode 100644 index 0000000..c36619f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader.rb @@ -0,0 +1,192 @@ +require 'cocoapods-downloader' +require 'claide/informative_error' +require 'fileutils' +require 'tmpdir' + +module Pod + module Downloader + require 'cocoapods/downloader/cache' + require 'cocoapods/downloader/request' + require 'cocoapods/downloader/response' + + # Downloads a pod from the given `request` to the given `target` location. + # + # @return [Response] The download response for this download. + # + # @param [Request] request + # the request that describes this pod download. + # + # @param [Pathname,Nil] target + # the location to which this pod should be downloaded. If `nil`, + # then the pod will only be cached. + # + # @param [Boolean] can_cache + # whether caching is allowed. + # + # @param [Pathname,Nil] cache_path + # the path used to cache pod downloads. + # + def self.download( + request, + target, + can_cache: true, + cache_path: Config.instance.cache_root + 'Pods' + ) + can_cache &&= !Config.instance.skip_download_cache + + request = preprocess_request(request) + + if can_cache + raise ArgumentError, 'Must provide a `cache_path` when caching.' unless cache_path + cache = Cache.new(cache_path) + result = cache.download_pod(request) + else + raise ArgumentError, 'Must provide a `target` when caching is disabled.' unless target + + require 'cocoapods/installer/pod_source_preparer' + result, = download_request(request, target) + Installer::PodSourcePreparer.new(result.spec, result.location).prepare! + end + + if target && result.location && target != result.location + UI.message "Copying #{request.name} from `#{result.location}` to #{UI.path target}", '> ' do + Cache.read_lock(result.location) do + FileUtils.rm_rf target + FileUtils.cp_r(result.location, target) + end + end + end + result + end + + # Performs the download from the given `request` to the given `target` location. + # + # @return [Response, Hash] + # The download response for this download, and the specifications + # for this download grouped by name. + # + # @param [Request] request + # the request that describes this pod download. + # + # @param [Pathname,Nil] target + # the location to which this pod should be downloaded. If `nil`, + # then the pod will only be cached. + # + def self.download_request(request, target) + result = Response.new + result.checkout_options = download_source(target, request.params) + result.location = target + + if request.released_pod? + result.spec = request.spec + podspecs = { request.name => request.spec } + else + podspecs = Sandbox::PodspecFinder.new(target).podspecs + podspecs[request.name] = request.spec if request.spec + podspecs.each do |name, spec| + if request.name == name + result.spec = spec + end + end + end + + [result, podspecs] + end + + private + + # Downloads a pod with the given `params` to `target`. + # + # @param [Pathname] target + # + # @param [Hash] params + # + # @return [Hash] The checkout options required to re-download this exact + # same source. + # + def self.download_source(target, params) + FileUtils.rm_rf(target) + downloader = Downloader.for_target(target, params) + downloader.download + target.mkpath + + if downloader.options_specific? + params + else + downloader.checkout_options + end + end + + # Return a new request after preprocessing by the downloader + # + # @param [Request] request + # the request that needs preprocessing + # + # @return [Request] the preprocessed request + # + def self.preprocess_request(request) + Request.new( + :spec => request.spec, + :released => request.released_pod?, + :name => request.name, + :params => Downloader.preprocess_options(request.params)) + end + + public + + class DownloaderError; include CLAide::InformativeError; end + + class Base + override_api do + def execute_command(executable, command, raise_on_failure = false) + Executable.execute_command(executable, command, raise_on_failure) + rescue CLAide::InformativeError => e + raise DownloaderError, e.message + end + + # Indicates that an action will be performed. The action is passed as a + # block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_action(message) + UI.section(" > #{message}", '', 1) do + yield + end + end + + # Indicates that a minor action will be performed. The action is passed + # as a block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_sub_action(message) + UI.section(" > #{message}", '', 2) do + yield + end + end + + # Prints an UI message. + # + # @param [String] message + # The message associated with the action. + # + # @return [void] + # + def ui_message(message) + UI.puts message + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/cache.rb new file mode 100644 index 0000000..5d990e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/cache.rb @@ -0,0 +1,322 @@ +require 'fileutils' +require 'tmpdir' + +module Pod + module Downloader + # The class responsible for managing Pod downloads, transparently caching + # them in a cache directory. + # + class Cache + # @return [Pathname] The root directory where this cache store its + # downloads. + # + attr_reader :root + + # Initialize a new instance + # + # @param [Pathname,String] root + # see {#root} + # + def initialize(root) + @root = Pathname(root) + ensure_matching_version + end + + # Downloads the Pod from the given `request` + # + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] the response from downloading `request` + # + def download_pod(request) + cached_pod(request) || uncached_pod(request) + rescue Informative + raise + rescue + UI.puts("\n[!] Error installing #{request.name}".red) + raise + end + + # @return [Hash>] + # A hash whose keys are the pod name + # And values are a hash with the following keys: + # :spec_file : path to the spec file + # :name : name of the pod + # :version : pod version + # :release : boolean to tell if that's a release pod + # :slug : the slug path where the pod cache is located + # + def cache_descriptors_per_pod + specs_dir = root + 'Specs' + release_specs_dir = specs_dir + 'Release' + return {} unless specs_dir.exist? + + spec_paths = specs_dir.find.select { |f| f.fnmatch('*.podspec.json') } + spec_paths.reduce({}) do |hash, spec_path| + spec = Specification.from_file(spec_path) + hash[spec.name] ||= [] + is_release = spec_path.to_s.start_with?(release_specs_dir.to_s) + request = Downloader::Request.new(:spec => spec, :released => is_release) + hash[spec.name] << { + :spec_file => spec_path, + :name => spec.name, + :version => spec.version, + :release => is_release, + :slug => root + request.slug, + } + hash + end + end + + # Convenience method for acquiring a shared lock to safely read from the + # cache. See `Cache.lock` for more details. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [block] &block + # the block to execute inside the lock. + # + # @return [void] + # + def self.read_lock(location, &block) + Cache.lock(location, File::LOCK_SH, &block) + end + + # Convenience method for acquiring an exclusive lock to safely write to + # the cache. See `Cache.lock` for more details. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [block] &block + # the block to execute inside the lock. + # + # @return [void] + # + def self.write_lock(location, &block) + Cache.lock(location, File::LOCK_EX, &block) + end + + # Creates a .lock file at `location`, aquires a lock of type + # `lock_type`, checks that it is valid, and executes passed block while + # holding on to that lock. Afterwards, the .lock file is deleted, which is + # why validation of the lock is necessary, as you might have a lock on a + # file that doesn't exist on the filesystem anymore. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [locking_constant] lock_type + # the type of lock, either exclusive (File::LOCK_EX) or shared + # (File::LOCK_SH). + # + # @return [void] + # + def self.lock(location, lock_type) + raise ArgumentError, 'no block given' unless block_given? + lockfile = "#{location}.lock" + f = nil + loop do + f.close if f + f = File.open(lockfile, File::CREAT, 0o644) + f.flock(lock_type) + break if Cache.valid_lock?(f, lockfile) + end + begin + yield location + ensure + if lock_type == File::LOCK_SH + f.flock(File::LOCK_EX) + File.delete(lockfile) if Cache.valid_lock?(f, lockfile) + else + File.delete(lockfile) + end + f.close + end + end + + # Checks that the lock is on a file that still exists on the filesystem. + # + # @param [File] file + # the actual file that we have a lock for. + # + # @param [String] filename + # the filename of the file that we have a lock for. + # + # @return [Boolean] + # true if `filename` still exists and is the same file as `file` + # + def self.valid_lock?(file, filename) + file.stat.ino == File.stat(filename).ino + rescue Errno::ENOENT + false + end + + private + + # Ensures the cache on disk was created with the same CocoaPods version as + # is currently running. + # + # @return [Void] + # + def ensure_matching_version + version_file = root + 'VERSION' + version = version_file.read.strip if version_file.file? + + root.rmtree if version != Pod::VERSION && root.exist? + root.mkpath + + version_file.open('w') { |f| f << Pod::VERSION } + end + + # @param [Request] request + # the request to be downloaded. + # + # @param [Hash] slug_opts + # the download options that should be used in constructing the + # cache slug for this request. + # + # @return [Pathname] The path for the Pod downloaded from the given + # `request`. + # + def path_for_pod(request, slug_opts = {}) + root + request.slug(**slug_opts) + end + + # @param [Request] request + # the request to be downloaded. + # + # @param [Hash] slug_opts + # the download options that should be used in constructing the + # cache slug for this request. + # + # @return [Pathname] The path for the podspec downloaded from the given + # `request`. + # + def path_for_spec(request, slug_opts = {}) + path = root + 'Specs' + request.slug(**slug_opts) + Pathname.new(path.to_path + '.podspec.json') + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] The download response for the given `request` that + # was found in the download cache. + # + def cached_pod(request) + cached_spec = cached_spec(request) + path = path_for_pod(request) + + return unless cached_spec && path.directory? + spec = request.spec || cached_spec + Response.new(path, spec, request.params) + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Specification] The cached specification for the given + # `request`. + # + def cached_spec(request) + path = path_for_spec(request) + path.file? && Specification.from_file(path) + rescue JSON::ParserError + nil + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] The download response for the given `request` that + # was not found in the download cache. + # + def uncached_pod(request) + in_tmpdir do |target| + result, podspecs = download(request, target) + result.location = nil + + podspecs.each do |name, spec| + destination = path_for_pod(request, :name => name, :params => result.checkout_options) + copy_and_clean(target, destination, spec) + write_spec(spec, path_for_spec(request, :name => name, :params => result.checkout_options)) + if request.name == name + result.location = destination + end + end + + result + end + end + + def download(request, target) + Downloader.download_request(request, target) + end + + # Performs the given block inside a temporary directory, + # which is removed at the end of the block's scope. + # + # @return [Object] The return value of the given block + # + def in_tmpdir(&blk) + tmpdir = Pathname(Dir.mktmpdir) + blk.call(tmpdir) + ensure + FileUtils.remove_entry(tmpdir, :force => true) if tmpdir && tmpdir.exist? + end + + # Copies the `source` directory to `destination`, cleaning the directory + # of any files unused by `spec`. + # + # @param [Pathname] source + # + # @param [Pathname] destination + # + # @param [Specification] spec + # + # @return [Void] + # + def copy_and_clean(source, destination, spec) + specs_by_platform = group_subspecs_by_platform(spec) + destination.parent.mkpath + Cache.write_lock(destination) do + FileUtils.rm_rf(destination) + FileUtils.cp_r(source, destination) + Pod::Installer::PodSourcePreparer.new(spec, destination).prepare! + Sandbox::PodDirCleaner.new(destination, specs_by_platform).clean! + end + end + + def group_subspecs_by_platform(spec) + specs_by_platform = {} + [spec, *spec.recursive_subspecs].each do |ss| + ss.available_platforms.each do |platform| + specs_by_platform[platform] ||= [] + specs_by_platform[platform] << ss + end + end + specs_by_platform + end + + # Writes the given `spec` to the given `path`. + # + # @param [Specification] spec + # the specification to be written. + # + # @param [Pathname] path + # the path the specification is to be written to. + # + # @return [Void] + # + def write_spec(spec, path) + path.dirname.mkpath + Cache.write_lock(path) do + path.open('w') { |f| f.write spec.to_pretty_json } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/request.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/request.rb new file mode 100644 index 0000000..10229aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/request.rb @@ -0,0 +1,86 @@ +require 'digest' + +module Pod + module Downloader + # This class represents a download request for a given Pod. + # + class Request + # @return [Specification,Nil] The specification for the pod whose download + # is being requested. + # + attr_reader :spec + + # @return [Boolean] Whether this download request is for a released pod. + # + attr_reader :released_pod + alias_method :released_pod?, :released_pod + + # @return [String] The name of the pod whose dowload is being requested. + # + attr_reader :name + + # @return [Hash] The download parameters for this request. + # + attr_reader :params + + # Initialize a new instance + # + # @param [Specification,Nil] spec + # see {#spec} + # + # @param [Boolean] released + # see {#released_pod} + # + # @param [String,Nil] name + # see {#name} + # + # @param [Hash,Nil] params + # see {#params} + # + def initialize(spec: nil, released: false, name: nil, params: false) + @released_pod = released + @spec = spec + @params = spec ? (spec.source && spec.source.dup) : params + @name = spec ? spec.name : name + + validate! + end + + # @param [String] name + # the name of the pod being downloaded. + # + # @param [Hash<#to_s, #to_s>] params + # the download parameters of the pod being downloaded. + # + # @param [Specification] spec + # the specification of the pod being downloaded. + # + # @return [String] The slug used to store the files resulting from this + # download request. + # + def slug(name: self.name, params: self.params, spec: self.spec) + checksum = spec && spec.checksum && '-' << spec.checksum[0, 5] + if released_pod? + "Release/#{name}/#{spec.version}#{checksum}" + else + opts = params.to_a.sort_by(&:first).map { |k, v| "#{k}=#{v}" }.join('-') + digest = Digest::MD5.hexdigest(opts) + "External/#{name}/#{digest}#{checksum}" + end + end + + private + + # Validates that the given request is well-formed. + # + # @return [Void] + # + def validate! + raise ArgumentError, 'Requires a name' unless name + raise ArgumentError, 'Must give a spec for a released download request' if released_pod? && !spec + raise ArgumentError, 'Requires a version if released' if released_pod? && !spec.version + raise ArgumentError, 'Requires params' unless params + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/response.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/response.rb new file mode 100644 index 0000000..9319b8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/downloader/response.rb @@ -0,0 +1,16 @@ +module Pod + module Downloader + # A response to a download request. + # + # @attr [Pathname] location + # the location where this downloaded pod is stored on disk. + # + # @attr [Specification] spec + # the specification that describes this downloaded pod. + # + # @attr [Hash] checkout_options + # the downloader parameters necessary to recreate this exact download. + # + Response = Struct.new(:location, :spec, :checkout_options) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/executable.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/executable.rb new file mode 100644 index 0000000..49cd27e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/executable.rb @@ -0,0 +1,247 @@ +module Pod + # Module which provides support for running executables. + # + # In a class it can be used as: + # + # extend Executable + # executable :git + # + # This will create two methods `git` and `git!` both accept a command but + # the later will raise on non successful executions. The methods return the + # output of the command. + # + module Executable + # Creates the methods for the executable with the given name. + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def executable(name) + define_method(name) do |*command| + Executable.execute_command(name, Array(command).flatten, false) + end + + define_method(name.to_s + '!') do |*command| + Executable.execute_command(name, Array(command).flatten, true) + end + end + + # Executes the given command displaying it if in verbose mode. + # + # @param [String] executable + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Bool] raise_on_failure + # Whether it should raise if the command fails. + # + # @raise If the executable could not be located. + # + # @raise If the command fails and the `raise_on_failure` is set to true. + # + # @return [String] the output of the command (STDOUT and STDERR). + # + def self.execute_command(executable, command, raise_on_failure = true) + bin = which!(executable) + + command = command.map(&:to_s) + if File.basename(bin) == 'tar.exe' + # Tar on Windows needs --force-local + command.push('--force-local') + end + full_command = "#{bin} #{command.join(' ')}" + + if Config.instance.verbose? + UI.message("$ #{full_command}") + stdout = Indenter.new(STDOUT) + stderr = Indenter.new(STDERR) + else + stdout = Indenter.new + stderr = Indenter.new + end + + status = popen3(bin, command, stdout, stderr) + stdout = stdout.join + stderr = stderr.join + output = stdout + stderr + unless status.success? + if raise_on_failure + raise Informative, "#{full_command}\n\n#{output}" + else + UI.message("[!] Failed: #{full_command}".red) + end + end + + output + end + + # Returns the absolute path to the binary with the given name on the current + # `PATH`, or `nil` if none is found. + # + # @param [String] program + # The name of the program being searched for. + # + # @return [String,Nil] The absolute path to the given program, or `nil` if + # it wasn't found in the current `PATH`. + # + def self.which(program) + program = program.to_s + paths = ENV.fetch('PATH') { '' }.split(File::PATH_SEPARATOR) + paths.unshift('./') + paths.uniq! + paths.each do |path| + bin = File.expand_path(program, path) + if Gem.win_platform? + bin += '.exe' + end + if File.file?(bin) && File.executable?(bin) + return bin + end + end + nil + end + + # Returns the absolute path to the binary with the given name on the current + # `PATH`, or raises if none is found. + # + # @param [String] program + # The name of the program being searched for. + # + # @return [String] The absolute path to the given program. + # + def self.which!(program) + which(program).tap do |bin| + raise Informative, "Unable to locate the executable `#{program}`" unless bin + end + end + + # Runs the given command, capturing the desired output. + # + # @param [String] executable + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Symbol] capture + # Whether it should raise if the command fails. + # + # @param [Hash] env + # Environment variables to be set for the command. + # + # @raise If the executable could not be located. + # + # @return [(String, Process::Status)] + # The desired captured output from the command, and the status from + # running the command. + # + def self.capture_command(executable, command, capture: :merge, env: {}, **kwargs) + bin = which!(executable) + + require 'open3' + command = command.map(&:to_s) + case capture + when :merge then Open3.capture2e(env, [bin, bin], *command, **kwargs) + when :both then Open3.capture3(env, [bin, bin], *command, **kwargs) + when :out then Open3.capture3(env, [bin, bin], *command, **kwargs).values_at(0, -1) + when :err then Open3.capture3(env, [bin, bin], *command, **kwargs).drop(1) + when :none then Open3.capture3(env, [bin, bin], *command, **kwargs).last + end + end + + # (see Executable.capture_command) + # + # @raise If running the command fails + # + def self.capture_command!(executable, command, **kwargs) + capture_command(executable, command, **kwargs).tap do |result| + result = Array(result) + status = result.last + unless status.success? + output = result[0..-2].join + raise Informative, "#{executable} #{command.join(' ')}\n\n#{output}".strip + end + end + end + + private + + def self.popen3(bin, command, stdout, stderr) + require 'open3' + Open3.popen3(bin, *command) do |i, o, e, t| + reader(o, stdout) + reader(e, stderr) + i.close + + status = t.value + + o.flush + e.flush + sleep(0.01) + + status + end + end + + def self.reader(input, output) + Thread.new do + buf = '' + begin + loop do + buf << input.readpartial(4096) + loop do + string, separator, buf = buf.partition(/[\r\n]/) + if separator.empty? + buf = string + break + end + output << (string << separator) + end + end + rescue EOFError, IOError + output << (buf << $/) unless buf.empty? + end + end + end + + #-------------------------------------------------------------------------# + + # Helper class that allows to write to an {IO} instance taking into account + # the UI indentation level. + # + class Indenter < ::Array + # @return [Fixnum] The indentation level of the UI. + # + attr_reader :indent + + # @return [IO] the {IO} to which the output should be printed. + # + attr_reader :io + + # Init a new Indenter + # + # @param [IO] io @see io + # + def initialize(io = nil) + @io = io + @indent = ' ' * UI.indentation_level + end + + # Stores a portion of the output and prints it to the {IO} instance. + # + # @param [String] value + # the output to print. + # + # @return [void] + # + def <<(value) + super + io << "#{indent}#{value}" if io + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources.rb new file mode 100644 index 0000000..389c139 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources.rb @@ -0,0 +1,57 @@ +require 'cocoapods/external_sources/abstract_external_source' +require 'cocoapods/external_sources/downloader_source' +require 'cocoapods/external_sources/path_source' +require 'cocoapods/external_sources/podspec_source' + +module Pod + # Provides support for initializing the correct concrete class of an external + # source. + # + module ExternalSources + # Instantiate a matching {AbstractExternalSource} for a given dependency. + # + # @param [Dependency] dependency + # the dependency + # + # @param [String] podfile_path + # @see AbstractExternalSource#podfile_path + # + # @param [Boolean] can_cache + # @see AbstractExternalSource#can_cache + # + # @return [AbstractExternalSource] an initialized instance of the concrete + # external source class associated with the option specified in the + # hash. + # + def self.from_dependency(dependency, podfile_path, can_cache) + from_params(dependency.external_source, dependency, podfile_path, can_cache) + end + + def self.from_params(params, dependency, podfile_path, can_cache) + name = dependency.root_name + if klass = concrete_class_from_params(params) + klass.new(name, params, podfile_path, can_cache) + else + msg = "Unknown external source parameters for `#{name}`: `#{params}`" + raise Informative, msg + end + end + + # Get the class to represent the defined source type of a dependency + # + # @param [Array] params + # the source params of the dependency + # + # @return [Class] + # + def self.concrete_class_from_params(params) + if params.key?(:podspec) + PodspecSource + elsif params.key?(:path) + PathSource + elsif Downloader.strategy_from_options(params) + DownloaderSource + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/abstract_external_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/abstract_external_source.rb new file mode 100644 index 0000000..dea7000 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/abstract_external_source.rb @@ -0,0 +1,205 @@ +module Pod + module ExternalSources + # Abstract class that defines the common behaviour of external sources. + # + class AbstractExternalSource + # @return [String] the name of the Pod described by this external source. + # + attr_reader :name + + # @return [Hash{Symbol => String}] the hash representation of the + # external source. + # + attr_reader :params + + # @return [String] the path where the podfile is defined to resolve + # relative paths. + # + attr_reader :podfile_path + + # @return [Boolean] Whether the source is allowed to touch the cache. + # + attr_reader :can_cache + alias_method :can_cache?, :can_cache + + # Initialize a new instance + # + # @param [String] name @see #name + # @param [Hash] params @see #params + # @param [String] podfile_path @see #podfile_path + # @param [Boolean] can_cache @see #can_cache + # + def initialize(name, params, podfile_path, can_cache = true) + @name = name + @params = params + @podfile_path = podfile_path + @can_cache = can_cache + end + + # @return [Bool] whether an external source source is equal to another + # according to the {#name} and to the {#params}. + # + def ==(other) + return false if other.nil? + name == other.name && params == other.params + end + + public + + # @!group Subclasses hooks + + # Fetches the external source from the remote according to the params. + # + # @param [Sandbox] _sandbox + # the sandbox where the specification should be stored. + # + # @return [void] + # + def fetch(_sandbox) + raise 'Abstract method' + end + + # @return [String] a string representation of the source suitable for UI. + # + def description + raise 'Abstract method' + end + + protected + + # Return the normalized path for a podspec for a relative declared path. + # + # @param [String] declared_path + # The path declared in the podfile. + # + # @return [String] The uri of the podspec appending the name of the file + # and expanding it if necessary. + # + # @note If the declared path is expanded only if the represents a path + # relative to the file system. + # + def normalized_podspec_path(declared_path) + extension = File.extname(declared_path) + if extension == '.podspec' || extension == '.json' + path_with_ext = declared_path + else + path_with_ext = "#{declared_path}/#{name}.podspec" + end + podfile_dir = File.dirname(podfile_path || '') + File.expand_path(path_with_ext, podfile_dir) + end + + private + + # @! Subclasses helpers + + # Pre-downloads a Pod passing the options to the downloader and informing + # the sandbox. + # + # @param [Sandbox] sandbox + # The sandbox where the Pod should be downloaded. + # + # @note To prevent a double download of the repository the pod is + # marked as pre-downloaded indicating to the installer that only + # clean operations are needed. + # + # @todo The downloader configuration is the same of the + # #{PodSourceInstaller} and it needs to be kept in sync. + # + # @return [void] + # + def pre_download(sandbox) + title = "Pre-downloading: `#{name}` #{description}" + UI.titled_section(title, :verbose_prefix => '-> ') do + target = sandbox.pod_dir(name) + begin + download_result = Downloader.download(download_request, target, :can_cache => can_cache) + rescue Pod::DSLError => e + raise Informative, "Failed to load '#{name}' podspec: #{e.message}" + rescue => e + raise Informative, "Failed to download '#{name}': #{e.message}" + end + + spec = download_result.spec + raise Informative, "Unable to find a specification for '#{name}'." unless spec + + # since the podspec might be cleaned, we want the checksum to refer + # to the json in the sandbox + spec.defined_in_file = nil + + store_podspec(sandbox, spec) + sandbox.store_pre_downloaded_pod(name) + sandbox.store_checkout_source(name, download_result.checkout_options) + end + end + + def download_request + Downloader::Request.new( + :name => name, + :params => params, + ) + end + + # Stores the podspec in the sandbox and marks it as from an external + # source. + # + # @param [Sandbox] sandbox + # The sandbox where the specification should be stored. + # + # @param [Pathname, String, Specification] spec + # The path of the specification or its contents. + # + # @note All the concrete implementations of #{fetch} should invoke this + # method. + # + # @note The sandbox ensures that the podspec exists and that the names + # match. + # + # @return [void] + # + def store_podspec(sandbox, spec, json = false) + begin + spec = case spec + when Pathname + Specification.from_file(spec) + when String + path = "#{name}.podspec" + path << '.json' if json + Specification.from_string(spec, path).tap { |s| s.defined_in_file = nil } + when Specification + spec.dup + else + raise "Unknown spec type: #{spec}" + end + rescue Pod::DSLError => e + raise Informative, "Failed to load '#{name}' podspec: #{e.message}" + end + + validate_podspec(spec) + sandbox.store_podspec(name, spec, true, true) + end + + def validate_podspec(podspec) + defined_in_file = podspec.defined_in_file + podspec.defined_in_file = nil + + validator = validator_for_podspec(podspec) + validator.quick = true + validator.allow_warnings = true + validator.ignore_public_only_results = true + Config.instance.with_changes(:silent => true) do + validator.validate + end + unless validator.validated? + raise Informative, "The `#{name}` pod failed to validate due to #{validator.failure_reason}:\n#{validator.results_message}" + end + ensure + podspec.defined_in_file = defined_in_file + end + + def validator_for_podspec(podspec) + Validator.new(podspec, [], []) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/downloader_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/downloader_source.rb new file mode 100644 index 0000000..0d26b62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/downloader_source.rb @@ -0,0 +1,30 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from a source handled + # by the downloader. Supports all the options of the downloader + # + # @note The podspec must be in the root of the repository and should have a + # name matching the one of the dependency. + # + class DownloaderSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + pre_download(sandbox) + end + + # @see AbstractExternalSource#description + # + def description + strategy = Downloader.strategy_from_options(params) + options = params.dup + url = options.delete(strategy) + result = "from `#{url}`" + options.each do |key, value| + result << ", #{key} `#{value}`" + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/path_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/path_source.rb new file mode 100644 index 0000000..fb7b013 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/path_source.rb @@ -0,0 +1,55 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from a path local to + # the machine running the installation. + # + class PathSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + title = "Fetching podspec for `#{name}` #{description}" + UI.section(title, '-> ') do + podspec = podspec_path + unless podspec.exist? + raise Informative, "No podspec found for `#{name}` in " \ + "`#{declared_path}`" + end + store_podspec(sandbox, podspec, podspec.extname == '.json') + is_absolute = absolute?(declared_path) + sandbox.store_local_path(name, podspec, is_absolute) + sandbox.remove_checkout_source(name) + end + end + + # @see AbstractExternalSource#description + # + def description + "from `#{declared_path}`" + end + + private + + # @!group Helpers + + # @return [String] The path as declared by the user. + # + def declared_path + result = params[:path] + result.to_s if result + end + + # @return [Pathname] The absolute path of the podspec. + # + def podspec_path + path = Pathname(normalized_podspec_path(declared_path)) + path.exist? ? path : Pathname("#{path}.json") + end + + # @return [Bool] + # + def absolute?(path) + Pathname(path).absolute? || path.to_s.start_with?('~') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/podspec_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/podspec_source.rb new file mode 100644 index 0000000..02b11a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/external_sources/podspec_source.rb @@ -0,0 +1,54 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from an URL. Can be + # http, file, etc. + # + class PodspecSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + title = "Fetching podspec for `#{name}` #{description}" + UI.titled_section(title, :verbose_prefix => '-> ') do + podspec_path = Pathname(podspec_uri) + is_json = podspec_path.extname == '.json' + if podspec_path.exist? + store_podspec(sandbox, podspec_path, is_json) + else + require 'cocoapods/open-uri' + begin + OpenURI.open_uri(podspec_uri) { |io| store_podspec(sandbox, io.read, is_json) } + rescue OpenURI::HTTPError => e + status = e.io.status.join(' ') + raise Informative, "Failed to fetch podspec for `#{name}` at `#{podspec_uri}`.\n Error: #{status}" + end + end + end + end + + # @see AbstractExternalSource#description + # + def description + "from `#{params[:podspec]}`" + end + + private + + # @!group Helpers + + # @return [String] The uri of the podspec appending the name of the file + # and expanding it if necessary. + # + # @note If the declared path is expanded only if the represents a path + # relative to the file system. + # + def podspec_uri + declared_path = params[:podspec].to_s + if declared_path =~ %r{^.+://} + declared_path + else + normalized_podspec_path(declared_path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/gem_version.rb new file mode 100644 index 0000000..62a87ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/gem_version.rb @@ -0,0 +1,5 @@ +module Pod + # The version of the CocoaPods command line tool. + # + VERSION = '1.11.2'.freeze unless defined? Pod::VERSION +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements.rb new file mode 100644 index 0000000..15f810f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements.rb @@ -0,0 +1,107 @@ +module Pod + module Generator + class Acknowledgements + # @return [Array] The classes of the acknowledgements generator + # subclasses. + # + def self.generators + [Plist, Markdown] + end + + # @return [Array] the list of the file accessors + # for the specs of the target that needs to generate the + # acknowledgements. + # + attr_reader :file_accessors + + # @param [Array] @see file_accessors. + # + def initialize(file_accessors) + @file_accessors = file_accessors + end + + #-----------------------------------------------------------------------# + + # !@group Configuration + + # @return [String] The title of the acknowledgements file. + # + def header_title + 'Acknowledgements' + end + + # @return [String] A text to present before listing the acknowledgements. + # + def header_text + 'This application makes use of the following third party libraries:' + end + + # @return [String] The title of the foot notes. + # + def footnote_title + '' + end + + # @return [String] the foot notes. + # + def footnote_text + 'Generated by CocoaPods - https://cocoapods.org' + end + + #-----------------------------------------------------------------------# + + private + + # !@group Private methods + + # @return [Array] The root specifications for which the + # acknowledgements should be generated. + # + def specs + file_accessors.map { |accessor| accessor.spec.root }.uniq + end + + # Returns the text of the license for the given spec. + # + # @param [Specification] spec + # the specification for which license is needed. + # + # @return [String] The text of the license. + # @return [Nil] If no license text could be found. + # + def license_text(spec) + return nil unless spec.license + text = spec.license[:text] + unless text + if license_file = spec.license[:file] + license_path = file_accessor(spec).root + license_file + if File.exist?(license_path) + text = IO.read(license_path) + else + UI.warn "Unable to read the license file `#{license_file}` " \ + "for the spec `#{spec}`" + end + elsif license_file = file_accessor(spec).license + text = IO.read(license_file) + end + end + text + end + + protected + + # Returns the file accessor for the given spec. + # + # @param [Specification] spec + # the specification for which the file accessor is needed. + # + # @return [Sandbox::FileAccessor] The file accessor. + # + def file_accessor(spec) + file_accessors.find { |accessor| accessor.spec.root == spec } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/markdown.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/markdown.rb new file mode 100644 index 0000000..6d7d63b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/markdown.rb @@ -0,0 +1,44 @@ +module Pod + module Generator + class Markdown < Acknowledgements + def self.path_from_basepath(path) + Pathname.new(path.dirname + "#{path.basename}.markdown") + end + + def save_as(path) + file = File.new(path, 'w') + file.write(licenses) + file.close + end + + # @return [String] The contents of the acknowledgements in Markdown format. + # + def generate + licenses + end + + def title_from_string(string, level) + unless string.empty? + '#' * level << " #{string}" + end + end + + def string_for_spec(spec) + if (license_text = license_text(spec)) + "\n" << title_from_string(spec.name, 2) << "\n\n" << license_text << "\n" + end + end + + def licenses + licenses_string = "#{title_from_string(header_title, 1)}\n#{header_text}\n" + specs.each do |spec| + if (license = string_for_spec(spec)) + license = license.force_encoding('UTF-8') if license.respond_to?(:force_encoding) + licenses_string += license + end + end + licenses_string += "#{title_from_string(footnote_title, 2)}#{footnote_text}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/plist.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/plist.rb new file mode 100644 index 0000000..5bc1c34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/acknowledgements/plist.rb @@ -0,0 +1,94 @@ +require 'stringio' + +module Pod + module Generator + class Plist < Acknowledgements + def self.path_from_basepath(path) + Pathname.new(path.dirname + "#{path.basename}.plist") + end + + def save_as(path) + Xcodeproj::Plist.write_to_path(plist_hash, path) + end + + # @return [String] The contents of the plist + # + def generate + plist = Nanaimo::Plist.new(plist_hash, :xml) + contents = StringIO.new + Nanaimo::Writer::XMLWriter.new(plist, :pretty => true, :output => contents, :strict => false).write + contents.string + end + + def plist_hash + { + :Title => plist_title, + :StringsTable => plist_title, + :PreferenceSpecifiers => licenses, + } + end + + def plist_title + 'Acknowledgements' + end + + def licenses + licences_array = [header_hash] + specs.each do |spec| + if (hash = hash_for_spec(spec)) + licences_array << hash + end + end + licences_array << footnote_hash + end + + def hash_for_spec(spec) + if (license = license_text(spec)) + hash = { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(spec.name), + :FooterText => sanitize_encoding(license), + } + hash[:License] = sanitize_encoding(spec.license[:type]) if spec.license[:type] + + hash + end + end + + def header_hash + { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(header_title), + :FooterText => sanitize_encoding(header_text), + } + end + + def footnote_hash + { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(footnote_title), + :FooterText => sanitize_encoding(footnote_text), + } + end + + #-----------------------------------------------------------------------# + + private + + # !@group Private methods + + # Returns the sanitized text with UTF-8 invalid characters eliminated. + # + # @param [String] text + # the text we want to sanitize. + # + # @return [String] The sanitized UTF-8 text. + # + def sanitize_encoding(text) + text.encode('UTF-8', :invalid => :replace, :undef => :replace, :replace => '') + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/app_target_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/app_target_helper.rb new file mode 100644 index 0000000..5b10e55 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/app_target_helper.rb @@ -0,0 +1,363 @@ +module Pod + module Generator + # Stores the common logic for creating app targets within projects including + # generating standard import and main files for app hosts. + # + module AppTargetHelper + # Adds a single app target to the given project with the provided name. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [Symbol] platform_name + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @param [String] product_basename + # The product basename to use for the target, defaults to `name`. + # + # @return [PBXNativeTarget] the new target that was created. + # + def self.add_app_target(project, platform_name, deployment_target, name = 'App', product_basename = nil) + project.new_target(:application, name, platform_name, deployment_target, nil, + nil, product_basename) + end + + # Creates and links an import file for the given pod target and into the given native target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated import file into. + # + # @param [PodTarget] pod_target + # the pod target to use for when generating the contents of the import file. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_app_project_import(project, target, pod_target, platform, name = 'App') + source_file = AppTargetHelper.create_app_import_source_file(project, pod_target, platform, name) + group = project[name] || project.new_group(name, name) + source_file_ref = group.new_file(source_file) + target.add_file_references([source_file_ref]) + end + + # Creates and links an empty Swift file for the given target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated import file into. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_empty_swift_file(project, target, name = 'App') + swift_file = project.path.dirname.+("#{name}/dummy.swift") + swift_file.parent.mkpath + File.write(swift_file, '') + group = project[name] || project.new_group(name, name) + swift_file_ref = group.new_file(swift_file) + target.add_file_references([swift_file_ref]) + end + + # Creates and links a default app host 'main.m' file. + # + # @param [Project] project + # the Xcodeproj to generate the main file into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated main file into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_app_host_main_file(project, target, platform, group, name = 'App') + source_file = AppTargetHelper.create_app_host_main_file(project, platform, name) + source_file_ref = group.new_file(source_file) + target.add_file_references([source_file_ref]) + end + + # Creates a default launchscreen storyboard. + # + # @param [Project] project + # the Xcodeproj to generate the launchscreen storyboard into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated launchscreen storyboard into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [PBXFileReference] the created file reference of the launchscreen storyboard. + # + def self.add_launchscreen_storyboard(project, target, group, deployment_target, name = 'App') + launch_storyboard_file = AppTargetHelper.create_launchscreen_storyboard_file(project, deployment_target, name) + launch_storyboard_ref = group.new_file(launch_storyboard_file) + target.resources_build_phase.add_file_reference(launch_storyboard_ref) + end + + # Adds the xctest framework search paths into the given target. + # + # @param [PBXNativeTarget] target + # the native target to add XCTest into. + # + # @return [void] + # + def self.add_xctest_search_paths(target) + requires_libs = target.platform_name == :ios && + Version.new(target.deployment_target) < Version.new('12.2') + + target.build_configurations.each do |configuration| + framework_search_paths = configuration.build_settings['FRAMEWORK_SEARCH_PATHS'] ||= '$(inherited)' + framework_search_paths << ' "$(PLATFORM_DIR)/Developer/Library/Frameworks"' + + if requires_libs + library_search_paths = configuration.build_settings['LIBRARY_SEARCH_PATHS'] ||= '$(inherited)' + library_search_paths << ' "$(PLATFORM_DIR)/Developer/usr/lib"' + end + end + end + + # Adds the provided swift version into the given target. + # + # @param [PBXNativeTarget] target + # the native target to add the swift version into. + # + # @param [String] swift_version + # the swift version to set to. + # + # @return [void] + # + def self.add_swift_version(target, swift_version) + raise 'Cannot set empty Swift version to target.' if swift_version.blank? + target.build_configurations.each do |configuration| + configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + + # Creates a default import file for the given pod target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PodTarget] pod_target + # the pod target to use for when generating the contents of the import file. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name of the folder to use and save the generated main file. + # + # @return [Pathname] the new source file that was generated. + # + def self.create_app_import_source_file(project, pod_target, platform, name = 'App') + language = pod_target.uses_swift? ? :swift : :objc + + if language == :swift + source_file = project.path.dirname.+("#{name}/main.swift") + source_file.parent.mkpath + import_statement = pod_target.should_build? && pod_target.defines_module? ? "import #{pod_target.product_module_name}\n" : '' + source_file.open('w') { |f| f << import_statement } + else + source_file = project.path.dirname.+("#{name}/main.m") + source_file.parent.mkpath + import_statement = if pod_target.should_build? && pod_target.defines_module? + "@import #{pod_target.product_module_name};\n" + else + header_name = "#{pod_target.product_module_name}/#{pod_target.product_module_name}.h" + if pod_target.sandbox.public_headers.root.+(header_name).file? + "#import <#{header_name}>\n" + else + '' + end + end + source_file.open('w') do |f| + f << "@import Foundation;\n" + f << "@import UIKit;\n" if platform == :ios || platform == :tvos + f << "@import Cocoa;\n" if platform == :osx + f << "#{import_statement}int main() {}\n" + end + end + source_file + end + + # Creates a default launchscreen storyboard file. + # + # @param [Project] project + # the Xcodeproj to generate the launchscreen storyboard into. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name of the folder to use and save the generated launchscreen storyboard file. + # + # @return [Pathname] the new launchscreen storyboard file that was generated. + # + def self.create_launchscreen_storyboard_file(project, deployment_target, name = 'App') + launch_storyboard_file = project.path.dirname.+("#{name}/LaunchScreen.storyboard") + launch_storyboard_file.parent.mkpath + if Version.new(deployment_target) >= Version.new('9.0') + File.write(launch_storyboard_file, LAUNCHSCREEN_STORYBOARD_CONTENTS) + else + File.write(launch_storyboard_file, LAUNCHSCREEN_STORYBOARD_CONTENTS_IOS_8) + end + launch_storyboard_file + end + + # Creates a default app host 'main.m' file. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] name + # The name of the folder to use and save the generated main file. + # + # @return [Pathname] the new source file that was generated. + # + def self.create_app_host_main_file(project, platform, name = 'App') + source_file = project.path.dirname.+("#{name}/main.m") + source_file.parent.mkpath + source_file.open('w') do |f| + case platform + when :ios, :tvos + f << IOS_APP_HOST_MAIN_CONTENTS + when :osx + f << MACOS_APP_HOST_MAIN_CONTENTS + end + end + source_file + end + + IOS_APP_HOST_MAIN_CONTENTS = < +#import + +@interface CPTestAppHostAppDelegate : UIResponder + +@property (nonatomic, strong) UIWindow *window; + +@end + +@implementation CPTestAppHostAppDelegate + +- (BOOL)application:(UIApplication *)__unused application didFinishLaunchingWithOptions:(NSDictionary *)__unused launchOptions +{ + self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]]; + self.window.rootViewController = [UIViewController new]; + + [self.window makeKeyAndVisible]; + + return YES; +} + +@end + +int main(int argc, char *argv[]) +{ + @autoreleasepool + { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([CPTestAppHostAppDelegate class])); + } +} +EOS + + MACOS_APP_HOST_MAIN_CONTENTS = < + +int main(int argc, const char * argv[]) { + return NSApplicationMain(argc, argv); +} +EOS + + LAUNCHSCREEN_STORYBOARD_CONTENTS_IOS_8 = <<-XML.strip_heredoc.freeze + + + + + + + + + + + + + + + + + + + + + + + + + + + + XML + + LAUNCHSCREEN_STORYBOARD_CONTENTS = <<-XML.strip_heredoc.freeze + + + + + + + + + + + + + + + + + + + + + + + + + + XML + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/bridge_support.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/bridge_support.rb new file mode 100644 index 0000000..fc77aba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/bridge_support.rb @@ -0,0 +1,22 @@ +module Pod + module Generator + class BridgeSupport + extend Executable + executable :gen_bridge_metadata + + attr_reader :headers + + def initialize(headers) + @headers = headers + end + + def search_paths + @headers.map { |header| "-I '#{header.dirname}'" }.uniq + end + + def save_as(pathname) + gen_bridge_metadata('-c', search_paths.join(' '), '-o', pathname, *headers) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/constant.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/constant.rb new file mode 100644 index 0000000..f7f96b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/constant.rb @@ -0,0 +1,19 @@ +module Pod + module Generator + # Generates a constant file. + # + class Constant + def initialize(contents) + @generate = contents + end + + attr_reader :generate + + def save_as(path) + path.open('w') do |f| + f.write(generate) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_dsyms_script.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_dsyms_script.rb new file mode 100644 index 0000000..6dd7797 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_dsyms_script.rb @@ -0,0 +1,56 @@ +module Pod + module Generator + class CopydSYMsScript + # @return [Array] dsym_paths the dSYM paths to include in the script contents. + # + attr_reader :dsym_paths + + # @return [Array] bcsymbolmap_paths the bcsymbolmap paths to include in the script contents. + # + attr_reader :bcsymbolmap_paths + + # Initialize a new instance + # + # @param [Array] dsym_paths @see dsym_paths + # @param [Array] bcsymbolmap_paths @see bcsymbolmap_paths + # + def initialize(dsym_paths, bcsymbolmap_paths) + @dsym_paths = Array(dsym_paths) + @bcsymbolmap_paths = Array(bcsymbolmap_paths) + end + + # Saves the copy dSYMs script to the given pathname. + # + # @param [Pathname] pathname + # The path where the copy dSYMs script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(generate) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The generated of the copy dSYMs script. + # + def generate + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} +#{Pod::Generator::ScriptPhaseConstants::STRIP_INVALID_ARCHITECTURES_METHOD} +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_DSYM_METHOD} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_BCSYMBOLMAP_METHOD} + SH + dsym_paths.each do |dsym_path| + script << %(install_dsym "#{dsym_path}"\n) + end + bcsymbolmap_paths.each do |bcsymbolmap_path| + script << %(install_bcsymbolmap "#{bcsymbolmap_path}"\n) + end + script + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_resources_script.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_resources_script.rb new file mode 100644 index 0000000..d81c59d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/copy_resources_script.rb @@ -0,0 +1,223 @@ +module Pod + module Generator + class CopyResourcesScript + # @return [Hash{String, Array{String}] A list of files relative to the + # project pods root, keyed by build configuration. + # + attr_reader :resources_by_config + + # @return [Platform] The platform of the library for which the copy + # resources script is needed. + # + attr_reader :platform + + # Initialize a new instance + # + # @param [Hash>] resources_by_config + # @see resources_by_config + # + # @param [Platform] platform + # @see platform + # + def initialize(resources_by_config, platform) + @resources_by_config = resources_by_config + @platform = platform + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the copy resources script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The contents of the copy resources script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [Hash{Symbol=>Version}] The minimum deployment target which + # supports the `--reference-external-strings-file` option for + # the `ibtool` command. + # + EXTERNAL_STRINGS_FILE_MIMINUM_DEPLOYMENT_TARGET = { + :ios => Version.new('6.0'), + :osx => Version.new('10.8'), + :watchos => Version.new('2.0'), + :tvos => Version.new('9.0'), + } + + # @return [Bool] Whether the external strings file is supported by the + # `ibtool` according to the deployment target of the platform. + # + def use_external_strings_file? + minimum_deployment_target = EXTERNAL_STRINGS_FILE_MIMINUM_DEPLOYMENT_TARGET[platform.name] + platform.deployment_target >= minimum_deployment_target + end + + # @return [String] The install resources shell function. + # + def install_resources_function + if use_external_strings_file? + INSTALL_RESOURCES_FUNCTION + else + INSTALL_RESOURCES_FUNCTION.gsub(' --reference-external-strings-file', '') + end + end + + # @return [String] The contents of the copy resources script. + # + def script + # Define install function + script = install_resources_function + + # Call function for each configuration-dependent resource + resources_by_config.each do |config, resources| + unless resources.empty? + script += %(if [[ "$CONFIGURATION" == "#{config}" ]]; then\n) + resources.each do |resource| + script += %( install_resource "#{resource}"\n) + end + script += "fi\n" + end + end + + script += RSYNC_CALL + script += XCASSETS_COMPILE + script + end + + INSTALL_RESOURCES_FUNCTION = < "$RESOURCES_TO_COPY" + +XCASSET_FILES=() + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +case "${TARGETED_DEVICE_FAMILY:-}" in + 1,2) + TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone" + ;; + 1) + TARGET_DEVICE_ARGS="--target-device iphone" + ;; + 2) + TARGET_DEVICE_ARGS="--target-device ipad" + ;; + 3) + TARGET_DEVICE_ARGS="--target-device tv" + ;; + 4) + TARGET_DEVICE_ARGS="--target-device watch" + ;; + *) + TARGET_DEVICE_ARGS="--target-device mac" + ;; +esac + +install_resource() +{ + if [[ "$1" = /* ]] ; then + RESOURCE_PATH="$1" + else + RESOURCE_PATH="${PODS_ROOT}/$1" + fi + if [[ ! -e "$RESOURCE_PATH" ]] ; then + cat << EOM +error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script. +EOM + exit 1 + fi + case $RESOURCE_PATH in + *\.storyboard) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *\.xib) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *.framework) + echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true + mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + ;; + *.xcdatamodel) + echo "xcrun momc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\\"" || true + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom" + ;; + *.xcdatamodeld) + echo "xcrun momc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\\"" || true + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd" + ;; + *.xcmappingmodel) + echo "xcrun mapc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\\"" || true + xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm" + ;; + *.xcassets) + ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH" + XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE") + ;; + *) + echo "$RESOURCE_PATH" || true + echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY" + ;; + esac +} +EOS + + RSYNC_CALL = <] List of xcframeworks to copy + # + attr_reader :xcframeworks + + # @return [Pathname] the root directory of the sandbox + # + attr_reader :sandbox_root + + # @return [Platform] the platform of the target for which this script will run + # + attr_reader :platform + + # Creates a script for copying XCFramework slcies into an intermediate build directory + # + # @param [Array] xcframeworks + # the list of xcframeworks to copy + # + # @param [Pathname] sandbox_root + # the root of the Sandbox into which this script will be installed + # + # @param [Platform] platform + # the platform of the target for which this script will be run + # + def initialize(xcframeworks, sandbox_root, platform) + @xcframeworks = xcframeworks + @sandbox_root = sandbox_root + @platform = platform + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the embed frameworks script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0o755, pathname.to_s) + end + + # @return [String] The contents of the embed frameworks script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [String] The contents of the prepare artifacts script. + # + def script + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} + +copy_dir() +{ + local source="$1" + local destination="$2" + + # Use filter instead of exclude so missing patterns don't throw errors. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" \\"${source}*\\" \\"${destination}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}" +} + +SELECT_SLICE_RETVAL="" + +select_slice() { + local paths=("$@") + # Locate the correct slice of the .xcframework for the current architectures + local target_path="" + + # Split archs on space so we can find a slice that has all the needed archs + local target_archs=$(echo $ARCHS | tr " " "\\n") + + local target_variant="" + if [[ "$PLATFORM_NAME" == *"simulator" ]]; then + target_variant="simulator" + fi + if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then + target_variant="maccatalyst" + fi + for i in ${!paths[@]}; do + local matched_all_archs="1" + for target_arch in $target_archs + do + if ! [[ "${paths[$i]}" == *"$target_variant"* ]]; then + matched_all_archs="0" + break + fi + + # Verifies that the path contains the variant string (simulator or maccatalyst) if the variant is set. + if [[ -z "$target_variant" && ("${paths[$i]}" == *"simulator"* || "${paths[$i]}" == *"maccatalyst"*) ]]; then + matched_all_archs="0" + break + fi + + # This regex matches all possible variants of the arch in the folder name: + # Let's say the folder name is: ios-armv7_armv7s_arm64_arm64e/CoconutLib.framework + # We match the following: -armv7_, _armv7s_, _arm64_ and _arm64e/. + # If we have a specific variant: ios-i386_x86_64-simulator/CoconutLib.framework + # We match the following: -i386_ and _x86_64- + # When the .xcframework wraps a static library, the folder name does not include + # any .framework. In that case, the folder name can be: ios-arm64_armv7 + # We also match _armv7$ to handle that case. + local target_arch_regex="[_\\-]${target_arch}([\\/_\\-]|$)" + if ! [[ "${paths[$i]}" =~ $target_arch_regex ]]; then + matched_all_archs="0" + break + fi + done + + if [[ "$matched_all_archs" == "1" ]]; then + # Found a matching slice + echo "Selected xcframework slice ${paths[$i]}" + SELECT_SLICE_RETVAL=${paths[$i]} + break + fi + done +} + +install_xcframework() { + local basepath="$1" + local name="$2" + local package_type="$3" + local paths=("${@:4}") + + # Locate the correct slice of the .xcframework for the current architectures + select_slice "${paths[@]}" + local target_path="$SELECT_SLICE_RETVAL" + if [[ -z "$target_path" ]]; then + echo "warning: [CP] Unable to find matching .xcframework slice in '${paths[@]}' for the current build architectures ($ARCHS)." + return + fi + local source="$basepath/$target_path" + + local destination="#{Pod::Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/${name}" + + if [ ! -d "$destination" ]; then + mkdir -p "$destination" + fi + + copy_dir "$source/" "$destination" + echo "Copied $source to $destination" +} + + SH + xcframeworks.each do |xcframework| + slices = xcframework.slices.select { |f| f.platform.symbolic_name == platform.symbolic_name } + next if slices.empty? + args = install_xcframework_args(xcframework, slices) + script << "install_xcframework #{args}\n" + end + + script << "\n" unless xcframeworks.empty? + script + end + + def shell_escape(value) + "\"#{value}\"" + end + + def install_xcframework_args(xcframework, slices) + root = xcframework.path + args = [shell_escape("${PODS_ROOT}/#{root.relative_path_from(sandbox_root)}")] + args << shell_escape(xcframework.target_name) + is_framework = xcframework.build_type.framework? + args << shell_escape(is_framework ? 'framework' : 'library') + slices.each do |slice| + args << shell_escape(slice.path.dirname.relative_path_from(root)) + end + args.join(' ') + end + + class << self + # @param [Pathname] xcframework_path + # the base path of the .xcframework bundle + # + # @return [Array] all found .dSYM paths + # + def dsym_folder(xcframework_path) + basename = File.basename(xcframework_path, '.xcframework') + dsym_basename = basename + '.dSYMs' + path = xcframework_path.dirname + dsym_basename + Pathname.new(path) if File.directory?(path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/dummy_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/dummy_source.rb new file mode 100644 index 0000000..d04261d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/dummy_source.rb @@ -0,0 +1,31 @@ +module Pod + module Generator + class DummySource + attr_reader :class_name + + def initialize(class_name_identifier) + validated_class_name_identifier = class_name_identifier.gsub(/[^0-9a-z_]/i, '_') + @class_name = "PodsDummy_#{validated_class_name_identifier}" + end + + # @return [String] the string contents of the dummy source file. + # + def generate + result = <<-source.strip_heredoc + #import + @interface #{class_name} : NSObject + @end + @implementation #{class_name} + @end + source + result + end + + def save_as(pathname) + pathname.open('w') do |source| + source.write(generate) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/embed_frameworks_script.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/embed_frameworks_script.rb new file mode 100644 index 0000000..d1772eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/embed_frameworks_script.rb @@ -0,0 +1,196 @@ +require 'cocoapods/xcode' + +module Pod + module Generator + class EmbedFrameworksScript + # @return [Hash{String => Array}] Multiple lists of frameworks per + # configuration. + # + attr_reader :frameworks_by_config + + # @return [Hash{String => Array}] Multiple lists of frameworks per + # configuration. + # + attr_reader :xcframeworks_by_config + + # @param [Hash{String => Array] frameworks_by_config + # @see #frameworks_by_config + # + # @param [Hash{String => Array] xcframeworks_by_config + # @see #xcframeworks_by_config + # + def initialize(frameworks_by_config, xcframeworks_by_config) + @frameworks_by_config = frameworks_by_config + @xcframeworks_by_config = xcframeworks_by_config + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the embed frameworks script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The contents of the embed frameworks script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [String] The contents of the embed frameworks script. + # + def script + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} +if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then + # If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy + # frameworks to, so exit 0 (signalling the script phase was successful). + exit 0 +fi + +echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" +mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + +COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}" +SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" +BCSYMBOLMAP_DIR="BCSymbolMaps" + + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +# Copies and strips a vendored framework +install_framework() +{ + if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then + local source="${BUILT_PRODUCTS_DIR}/$1" + elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then + local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" + elif [ -r "$1" ]; then + local source="$1" + fi + + local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + + if [ -L "${source}" ]; then + echo "Symlinked..." + source="$(readlink "${source}")" + fi + + if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then + # Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied + find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do + echo "Installing $f" + install_bcsymbolmap "$f" "$destination" + rm "$f" + done + rmdir "${source}/${BCSYMBOLMAP_DIR}" + fi + + # Use filter instead of exclude so missing patterns don't throw errors. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${source}\\" \\"${destination}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" + + local basename + basename="$(basename -s .framework "$1")" + binary="${destination}/${basename}.framework/${basename}" + + if ! [ -r "$binary" ]; then + binary="${destination}/${basename}" + elif [ -L "${binary}" ]; then + echo "Destination binary is symlinked..." + dirname="$(dirname "${binary}")" + binary="${dirname}/$(readlink "${binary}")" + fi + + # Strip invalid architectures so "fat" simulator / device frameworks work on device + if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then + strip_invalid_archs "$binary" + fi + + # Resign the code if required by the build settings to avoid unstable apps + code_sign_if_enabled "${destination}/$(basename "$1")" + + # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. + if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then + local swift_runtime_libs + swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\\\/\\(.+dylib\\).*/\\\\1/g | uniq -u) + for lib in $swift_runtime_libs; do + echo "rsync -auv \\"${SWIFT_STDLIB_PATH}/${lib}\\" \\"${destination}\\"" + rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" + code_sign_if_enabled "${destination}/${lib}" + done + fi +} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_DSYM_METHOD} +#{Pod::Generator::ScriptPhaseConstants::STRIP_INVALID_ARCHITECTURES_METHOD} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_BCSYMBOLMAP_METHOD} +# Signs a framework with the provided identity +code_sign_if_enabled() { + if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then + # Use the current code_sign_identity + echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" + local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'" + + if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + code_sign_cmd="$code_sign_cmd &" + fi + echo "$code_sign_cmd" + eval "$code_sign_cmd" + fi +} + SH + contents_by_config = Hash.new do |hash, key| + hash[key] = '' + end + frameworks_by_config.each do |config, frameworks| + frameworks.each do |framework| + contents_by_config[config] << %( install_framework "#{framework.source_path}"\n) + end + end + xcframeworks_by_config.each do |config, xcframeworks| + xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.each do |xcframework| + target_name = xcframework.target_name + name = xcframework.name + contents_by_config[config] << %( install_framework "#{Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{target_name}/#{name}.framework"\n) + end + end + script << "\n" unless contents_by_config.empty? + contents_by_config.keys.sort.each do |config| + contents = contents_by_config[config] + next if contents.empty? + script << %(if [[ "$CONFIGURATION" == "#{config}" ]]; then\n) + script << contents + script << "fi\n" + end + script << <<-SH.strip_heredoc + if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + wait + fi + SH + script + end + + # @param [Xcode::FrameworkPaths] framework_path + # the framework path containing the dSYM + # + # @return [String, Nil] the name of the dSYM binary, if found + # + def dsym_binary_name(framework_path) + return nil if framework_path.dsym_path.nil? + if (path = Pathname.glob(framework_path.dsym_path.join('Contents/Resources/DWARF', '**/*')).first) + File.basename(path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/file_list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/file_list.rb new file mode 100644 index 0000000..3e617f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/file_list.rb @@ -0,0 +1,39 @@ +module Pod + module Generator + # Generates an xcfilelist file. + # + class FileList + # @return [Array] The paths of the files in the file list. + # + attr_reader :paths + + # Initialize a new instance + # + # @param [Array] paths + # @see paths + # + def initialize(paths) + @paths = paths + end + + # Generates the contents of the file list. + # + # @return [String] + # + def generate + paths.join("\n") + end + + # Generates and saves the file list to the given path. + # + # @param [Pathname] path + # The path where the file list should be stored. + # + # @return [void] + # + def save_as(path) + path.open('w') { |file_list| file_list.write(generate) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/header.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/header.rb new file mode 100644 index 0000000..fd4fab8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/header.rb @@ -0,0 +1,103 @@ +module Pod + module Generator + # Generates a header file. + # + # According to the platform the header imports `UIKit/UIKit.h` or + # `Cocoa/Cocoa.h`. + # + class Header + # @return [Symbol] the platform for which the prefix header will be + # generated. + # + attr_reader :platform + + # @return [Array] The list of the headers to import. + # + attr_accessor :imports + + # @return [Array] The list of the modules to import. + # + attr_reader :module_imports + + # Initialize a new instance + # + # @param [Symbol] platform + # @see platform + # + def initialize(platform) + @platform = platform + @imports = [] + @module_imports = [] + end + + # Generates the contents of the header according to the platform. + # + # @note If the platform is iOS an import call to `UIKit/UIKit.h` is + # added to the top of the prefix header. For OS X `Cocoa/Cocoa.h` + # is imported. + # + # @return [String] + # + def generate + result = '' + result << "#ifdef __OBJC__\n" + result << generate_platform_import_header + result << "#else\n" + result << "#ifndef FOUNDATION_EXPORT\n" + result << "#if defined(__cplusplus)\n" + result << "#define FOUNDATION_EXPORT extern \"C\"\n" + result << "#else\n" + result << "#define FOUNDATION_EXPORT extern\n" + result << "#endif\n" + result << "#endif\n" + result << "#endif\n" + result << "\n" + + imports.each do |import| + result << %(#import "#{import}"\n) + end + + unless module_imports.empty? + module_imports.each do |import| + result << %(\n@import #{import}) + end + result << "\n" + end + + result + end + + # Generates and saves the header to the given path. + # + # @param [Pathname] path + # The path where the header should be stored. + # + # @return [void] + # + def save_as(path) + path.open('w') { |header| header.write(generate) } + end + + #-----------------------------------------------------------------------# + + protected + + # Generates the contents of the header according to the platform. + # + # @note If the platform is iOS an import call to `UIKit/UIKit.h` is + # added to the top of the header. For OS X `Cocoa/Cocoa.h` is + # imported. + # + # @return [String] + # + def generate_platform_import_header + case platform.name + when :ios then "#import \n" + when :tvos then "#import \n" + when :osx then "#import \n" + else "#import \n" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/info_plist_file.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/info_plist_file.rb new file mode 100644 index 0000000..810f1c5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/info_plist_file.rb @@ -0,0 +1,128 @@ +module Pod + module Generator + # Generates Info.plist files. A Info.plist file is generated for each + # Pod and for each Pod target definition, that requires to be built as + # framework. It states public attributes. + # + class InfoPlistFile + # @return [String] version The version to use for when generating this Info.plist file. + # + attr_reader :version + + # @return [Platform] The platform to use for when generating this Info.plist file. + # + attr_reader :platform + + # @return [Symbol] the CFBundlePackageType of the target this Info.plist + # file is for. + # + attr_reader :bundle_package_type + + # @return [Hash] any additional entries to include in this Info.plist + # + attr_reader :additional_entries + + # Initialize a new instance + # + # @param [String] version @see #version + # @param [Platform] platform @see #platform + # @param [Symbol] bundle_package_type @see #bundle_package_type + # @param [Hash] additional_entries @see #additional_entries + # + def initialize(version, platform, bundle_package_type = :fmwk, additional_entries = {}) + @version = version + @platform = platform + @bundle_package_type = bundle_package_type + @additional_entries = additional_entries + end + + # Generates and saves the Info.plist to the given path. + # + # @param [Pathname] path + # the path where the prefix header should be stored. + # + # @return [void] + # + def save_as(path) + contents = generate + path.open('w') do |f| + f.write(contents) + end + end + + # Generates the contents of the Info.plist + # + # @return [String] + # + def generate + to_plist(info) + end + + private + + def header + <<-PLIST + + + + PLIST + end + + def footer + <<-PLIST + + PLIST + end + + def to_plist(root) + serialize(root, header) << footer + end + + def serialize(value, output, indentation = 0) + indent = ' ' * indentation + case value + when Array + output << indent << "\n" + value.each { |v| serialize(v, output, indentation + 2) } + output << indent << "\n" + when Hash + output << indent << "\n" + value.to_a.sort_by(&:first).each do |key, v| + output << indent << ' ' << "#{key}\n" + serialize(v, output, indentation + 2) + end + output << indent << "\n" + when String + output << indent << "#{value}\n" + when true + output << indent << "\n" + when false + output << indent << "\n" + end + output + end + + def info + info = { + 'CFBundleIdentifier' => '${PRODUCT_BUNDLE_IDENTIFIER}', + 'CFBundleInfoDictionaryVersion' => '6.0', + 'CFBundleName' => '${PRODUCT_NAME}', + 'CFBundlePackageType' => bundle_package_type.to_s.upcase, + 'CFBundleShortVersionString' => version, + 'CFBundleSignature' => '????', + 'CFBundleVersion' => '${CURRENT_PROJECT_VERSION}', + 'NSPrincipalClass' => '', + 'CFBundleDevelopmentRegion' => 'en', + } + + info['CFBundleExecutable'] = '${EXECUTABLE_NAME}' if bundle_package_type != :bndl + info['CFBundleVersion'] = '1' if bundle_package_type == :bndl + info['NSPrincipalClass'] = 'NSApplication' if bundle_package_type == :appl && platform == :osx + + info.merge!(additional_entries) + + info + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/module_map.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/module_map.rb new file mode 100644 index 0000000..e3a9ea5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/module_map.rb @@ -0,0 +1,99 @@ +module Pod + module Generator + # Generates LLVM module map files. A module map file is generated for each + # Pod and for each Pod target definition that is built as a framework. It + # specifies a different umbrella header than usual to avoid name conflicts + # with existing headers of the podspec. + # + class ModuleMap + # @return [PodTarget, AggregateTarget] the target the module map is generated for. + # + attr_reader :target + + attr_reader :headers + + Header = Struct.new(:path, :umbrella, :private, :textual, :exclude, :size, :mtime) do + alias_method :private?, :private + def to_s + [ + (:private if private?), + (:textual if textual), + (:umbrella if umbrella), + (:exclude if exclude), + 'header', + %("#{path.to_s.gsub('"', '\"')}"), + attrs, + ].compact.join(' ') + end + + def attrs + attrs = { + 'size' => size, + 'mtime' => mtime, + }.reject { |_k, v| v.nil? } + return nil if attrs.empty? + attrs.to_s + end + end + + # Initialize a new instance + # + # @param [PodTarget, AggregateTarget] target @see target + # + def initialize(target) + @target = target + @headers = [ + Header.new(target.umbrella_header_path.basename, true), + ] + end + + # Generates and saves the Info.plist to the given path. + # + # @param [Pathname] path + # the path where the prefix header should be stored. + # + # @return [void] + # + def save_as(path) + contents = generate + path.open('w') do |f| + f.write(contents) + end + end + + # Generates the contents of the module.modulemap file. + # + # @return [String] + # + def generate + <<-MODULE_MAP.strip_heredoc +#{module_specifier_prefix}module #{target.product_module_name}#{module_declaration_attributes} { + #{headers.join("\n ")} + + export * + module * { export * } +} + MODULE_MAP + end + + private + + # The prefix to `module` to prepend in the module map. + # Ensures that only framework targets have `framework` prepended. + # + def module_specifier_prefix + if target.build_as_framework? + 'framework ' + else + '' + end + end + + # The suffix attributes to `module`. + # + def module_declaration_attributes + '' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/prefix_header.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/prefix_header.rb new file mode 100644 index 0000000..34036e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/prefix_header.rb @@ -0,0 +1,60 @@ +module Pod + module Generator + # Generates a prefix header file for a Pods library. The prefix header is + # generated according to the platform of the target and the pods. + # + # According to the platform the prefix header imports `UIKit/UIKit.h` or + # `Cocoa/Cocoa.h`. + # + class PrefixHeader < Header + # @return [Array] The file accessors for which to generate + # the prefix header. + # + attr_reader :file_accessors + + # Initialize a new instance + # + # @param [Array] file_accessors + # @see #file_accessors + # + # @param [Platform] platform + # @see Header#platform + # + def initialize(file_accessors, platform) + @file_accessors = file_accessors + super platform + end + + # Generates the contents of the prefix header according to the platform + # and the pods. + # + # @note Only unique prefix_header_contents are added to the prefix + # header. + # + # @return [String] + # + # @todo Subspecs can specify prefix header information too. + # @todo Check to see if we have a similar duplication issue with + # file_accessor.prefix_header. + # + def generate + result = super + + unique_prefix_header_contents = file_accessors.map do |file_accessor| + file_accessor.spec_consumer.prefix_header_contents + end.compact.uniq + + unique_prefix_header_contents.each do |prefix_header_contents| + result << prefix_header_contents + result << "\n" + end + + file_accessors.map(&:prefix_header).compact.uniq.each do |prefix_header| + result << Pathname(prefix_header).read + end + + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/script_phase_constants.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/script_phase_constants.rb new file mode 100644 index 0000000..ff95605 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/script_phase_constants.rb @@ -0,0 +1,100 @@ +module Pod + module Generator + module ScriptPhaseConstants + DEFAULT_SCRIPT_PHASE_HEADER = <<-SH.strip_heredoc.freeze +#!/bin/sh +set -e +set -u +set -o pipefail + +function on_error { + echo "$(realpath -mq "${0}"):$1: error: Unexpected failure" +} +trap 'on_error $LINENO' ERR + SH + + RSYNC_PROTECT_TMP_FILES = <<-SH.strip_heredoc.freeze +# This protects against multiple targets copying the same framework dependency at the same time. The solution +# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html +RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????") + SH + + STRIP_INVALID_ARCHITECTURES_METHOD = <<-SH.strip_heredoc.freeze +# Used as a return value for each invocation of `strip_invalid_archs` function. +STRIP_BINARY_RETVAL=0 + +# Strip invalid architectures +strip_invalid_archs() { + binary="$1" + warn_missing_arch=${2:-true} + # Get architectures for current target binary + binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)" + # Intersect them with the architectures we are building for + intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\\n' | sort | uniq -d)" + # If there are no archs supported by this binary then warn the user + if [[ -z "$intersected_archs" ]]; then + if [[ "$warn_missing_arch" == "true" ]]; then + echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)." + fi + STRIP_BINARY_RETVAL=1 + return + fi + stripped="" + for arch in $binary_archs; do + if ! [[ "${ARCHS}" == *"$arch"* ]]; then + # Strip non-valid architectures in-place + lipo -remove "$arch" -output "$binary" "$binary" + stripped="$stripped $arch" + fi + done + if [[ "$stripped" ]]; then + echo "Stripped $binary of architectures:$stripped" + fi + STRIP_BINARY_RETVAL=0 +} + SH + + INSTALL_DSYM_METHOD = <<-SH.strip_heredoc.freeze +# Copies and strips a vendored dSYM +install_dsym() { + local source="$1" + warn_missing_arch=${2:-true} + if [ -r "$source" ]; then + # Copy the dSYM into the targets temp dir. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${source}\\" \\"${DERIVED_FILES_DIR}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}" + + local basename + basename="$(basename -s .dSYM "$source")" + binary_name="$(ls "$source/Contents/Resources/DWARF")" + binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}" + + # Strip invalid architectures from the dSYM. + if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then + strip_invalid_archs "$binary" "$warn_missing_arch" + fi + if [[ $STRIP_BINARY_RETVAL == 0 ]]; then + # Move the stripped file into its final destination. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\\" \\"${DWARF_DSYM_FOLDER_PATH}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}" + else + # The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing. + mkdir -p "${DWARF_DSYM_FOLDER_PATH}" + touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM" + fi + fi +} + SH + + INSTALL_BCSYMBOLMAP_METHOD = <<-SH.strip_heredoc.freeze +# Copies the bcsymbolmap files of a vendored framework +install_bcsymbolmap() { + local bcsymbolmap_path="$1" + local destination="${BUILT_PRODUCTS_DIR}" + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${bcsymbolmap_path}\" \"${destination}\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}" +} + SH + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/umbrella_header.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/umbrella_header.rb new file mode 100644 index 0000000..a3e4a0f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/generator/umbrella_header.rb @@ -0,0 +1,46 @@ +module Pod + module Generator + # Generates an umbrella header file for clang modules, which are used by + # dynamic frameworks on iOS 8 and OSX 10.10 under the hood. + # + # If the target is a +PodTarget+, then the umbrella header is required + # to make all public headers in a convenient manner available without the + # need to write out header declarations for every library header. + # + class UmbrellaHeader < Header + # @return [Target] + # the target, which provides the product name + attr_reader :target + + # Initialize a new instance + # + # @param [Target] target + # @see target + # + def initialize(target) + super(target.platform) + @target = target + end + + # Generates the contents of the umbrella header according to the included + # pods. + # + # @return [String] + # + def generate + result = super + + result << "\n" + + result << <<-eos.strip_heredoc + FOUNDATION_EXPORT double #{target.product_module_name}VersionNumber; + FOUNDATION_EXPORT const unsigned char #{target.product_module_name}VersionString[]; + eos + + result << "\n" + + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/hooks_manager.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/hooks_manager.rb new file mode 100644 index 0000000..6cb5788 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/hooks_manager.rb @@ -0,0 +1,132 @@ +require 'active_support/core_ext/hash/indifferent_access' + +module Pod + # Provides support for the hook system of CocoaPods. The system is designed + # especially for plugins. Interested clients can register to notifications by + # name. + # + # The blocks, to prevent compatibility issues, will receive + # one and only one argument: a context object. This object should be simple + # storage of information (a typed hash). Notifications senders are + # responsible to indicate the class of the object associated with their + # notification name. + # + # Context object should not remove attribute accessors to not break + # compatibility with the plugins (this promise will be honoured strictly + # from CocoaPods 1.0). + # + module HooksManager + # Represents a single registered hook. + # + class Hook + # @return [String] + # The name of the plugin that registered the hook. + # + attr_reader :plugin_name + + # @return [String] + # The name of the hook. + # + attr_reader :name + + # @return [Proc] + # The block. + # + attr_reader :block + + # Initialize a new instance + # + # @param [String] name @see {#name}. + # + # @param [String] plugin_name @see {#plugin_name}. + # + # @param [Proc] block @see {#block}. + # + def initialize(name, plugin_name, block) + raise ArgumentError, 'Missing name' unless name + raise ArgumentError, 'Missing plugin_name' unless plugin_name + raise ArgumentError, 'Missing block' unless block + + @name = name + @plugin_name = plugin_name + @block = block + end + end + + class << self + # @return [Hash{Symbol => Array}] The list of the hooks that are + # registered for each hook name. + # + attr_reader :registrations + + # Registers a block for the hook with the given name. + # + # @param [String] plugin_name + # The name of the plugin the hook comes from. + # + # @param [Symbol] hook_name + # The name of the notification. + # + # @param [Proc] block + # The block. + # + def register(plugin_name, hook_name, &block) + @registrations ||= {} + @registrations[hook_name] ||= [] + @registrations[hook_name] << Hook.new(hook_name, plugin_name, block) + end + + # Returns all the hooks to run for the given event name + # and set of whitelisted plugins + # + # @see #run + # + # @return [Array] the hooks to run + # + def hooks_to_run(name, whitelisted_plugins = nil) + return [] unless registrations + hooks = registrations.fetch(name, []) + return hooks unless whitelisted_plugins + hooks.select { |hook| whitelisted_plugins.key?(hook.plugin_name) } + end + + # Runs all the registered blocks for the hook with the given name. + # + # @param [Symbol] name + # The name of the hook. + # + # @param [Object] context + # The context object which should be passed to the blocks. + # + # @param [Hash] whitelisted_plugins + # The plugins that should be run, in the form of a hash keyed by + # plugin name, where the values are the custom options that should + # be passed to the hook's block if it supports taking a second + # argument. + # + def run(name, context, whitelisted_plugins = nil) + raise ArgumentError, 'Missing name' unless name + raise ArgumentError, 'Missing options' unless context + + hooks = hooks_to_run(name, whitelisted_plugins) + return if hooks.empty? + + UI.message "- Running #{name.to_s.tr('_', ' ')} hooks" do + hooks.each do |hook| + UI.message "- #{hook.plugin_name} from " \ + "`#{hook.block.source_location.first}`" do + block = hook.block + if block.arity > 1 + user_options = whitelisted_plugins[hook.plugin_name] + user_options = user_options.with_indifferent_access if user_options + block.call(context, user_options) + else + block.call(context) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer.rb new file mode 100644 index 0000000..067b5fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer.rb @@ -0,0 +1,1044 @@ +require 'active_support/core_ext/string/inflections' +require 'fileutils' +require 'cocoapods/podfile' + +module Pod + # The Installer is responsible of taking a Podfile and transform it in the + # Pods libraries. It also integrates the user project so the Pods + # libraries can be used out of the box. + # + # The Installer is capable of doing incremental updates to an existing Pod + # installation. + # + # The Installer gets the information that it needs mainly from 3 files: + # + # - Podfile: The specification written by the user that contains + # information about targets and Pods. + # - Podfile.lock: Contains information about the pods that were previously + # installed and in concert with the Podfile provides information about + # which specific version of a Pod should be installed. This file is + # ignored in update mode. + # - Manifest.lock: A file contained in the Pods folder that keeps track of + # the pods installed in the local machine. This files is used once the + # exact versions of the Pods has been computed to detect if that version + # is already installed. This file is not intended to be kept under source + # control and is a copy of the Podfile.lock. + # + # The Installer is designed to work in environments where the Podfile folder + # is under source control and environments where it is not. The rest of the + # files, like the user project and the workspace are assumed to be under + # source control. + # + class Installer + autoload :Analyzer, 'cocoapods/installer/analyzer' + autoload :InstallationOptions, 'cocoapods/installer/installation_options' + autoload :PostInstallHooksContext, 'cocoapods/installer/post_install_hooks_context' + autoload :PreInstallHooksContext, 'cocoapods/installer/pre_install_hooks_context' + autoload :BaseInstallHooksContext, 'cocoapods/installer/base_install_hooks_context' + autoload :PostIntegrateHooksContext, 'cocoapods/installer/post_integrate_hooks_context' + autoload :PreIntegrateHooksContext, 'cocoapods/installer/pre_integrate_hooks_context' + autoload :SourceProviderHooksContext, 'cocoapods/installer/source_provider_hooks_context' + autoload :PodfileValidator, 'cocoapods/installer/podfile_validator' + autoload :PodSourceInstaller, 'cocoapods/installer/pod_source_installer' + autoload :PodSourcePreparer, 'cocoapods/installer/pod_source_preparer' + autoload :UserProjectIntegrator, 'cocoapods/installer/user_project_integrator' + autoload :Xcode, 'cocoapods/installer/xcode' + autoload :SandboxHeaderPathsInstaller, 'cocoapods/installer/sandbox_header_paths_installer' + autoload :SandboxDirCleaner, 'cocoapods/installer/sandbox_dir_cleaner' + autoload :ProjectCache, 'cocoapods/installer/project_cache/project_cache' + autoload :TargetUUIDGenerator, 'cocoapods/installer/target_uuid_generator' + + include Config::Mixin + + MASTER_SPECS_REPO_GIT_URL = 'https://github.com/CocoaPods/Specs.git'.freeze + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile specification that contains the information + # of the Pods that should be installed. + # + attr_reader :podfile + + # @return [Lockfile] The Lockfile that stores the information about the + # Pods previously installed on any machine. + # + attr_reader :lockfile + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Lockfile] lockfile @see #lockfile + # + def initialize(sandbox, podfile, lockfile = nil) + @sandbox = sandbox || raise(ArgumentError, 'Missing required argument `sandbox`') + @podfile = podfile || raise(ArgumentError, 'Missing required argument `podfile`') + @lockfile = lockfile + + @use_default_plugins = true + @has_dependencies = true + @pod_installers = [] + end + + # @return [Hash, Boolean, nil] Pods that have been requested to be + # updated or true if all Pods should be updated. + # If all Pods should been updated the contents of the Lockfile are + # not taken into account for deciding what Pods to install. + # + attr_accessor :update + + # @return [Boolean] Whether it has dependencies. Defaults to true. + # + attr_accessor :has_dependencies + alias_method :has_dependencies?, :has_dependencies + + # @return [Boolean] Whether the spec repos should be updated. + # + attr_accessor :repo_update + alias_method :repo_update?, :repo_update + + # @return [Boolean] Whether default plugins should be used during + # installation. Defaults to true. + # + attr_accessor :use_default_plugins + alias_method :use_default_plugins?, :use_default_plugins + + # @return [Boolean] Whether installation should verify that there are no + # Podfile or Lockfile changes. Defaults to false. + # + attr_accessor :deployment + alias_method :deployment?, :deployment + + # @return [Boolean] Whether installation should ignore the contents of the project cache + # when incremental installation is enabled. + # + attr_accessor :clean_install + alias_method :clean_install?, :clean_install + + #-------------------------------------------------------------------------# + + private + + # @return [Array] the pod installers created + # while installing pod targets + # + attr_reader :pod_installers + + # @return [ProjectInstallationCache] The installation cache stored in Pods/.project_cache/installation_cache + # + attr_reader :installation_cache + + # @return [ProjectMetadataCache] The metadata cache stored in Pods/.project_cache/metadata_cache + # + attr_reader :metadata_cache + + # @return [ProjectCacheVersion] The version of the project cache stored in Pods/.project_cache/version + # + attr_reader :project_cache_version + + #-------------------------------------------------------------------------# + + public + + # Installs the Pods. + # + # The installation process is mostly linear with a few minor complications + # to keep in mind: + # + # - The stored podspecs need to be cleaned before the resolution step + # otherwise the sandbox might return an old podspec and not download + # the new one from an external source. + # - The resolver might trigger the download of Pods from external sources + # necessary to retrieve their podspec (unless it is instructed not to + # do it). + # + # @return [void] + # + def install! + prepare + resolve_dependencies + download_dependencies + validate_targets + if installation_options.skip_pods_project_generation? + show_skip_pods_project_generation_message + else + integrate + end + write_lockfiles + perform_post_install_actions + end + + def show_skip_pods_project_generation_message + UI.section 'Skipping Pods Project Creation' + UI.section 'Skipping User Project Integration' + end + + def integrate + run_podfile_pre_integrate_hooks + generate_pods_project + if installation_options.integrate_targets? + integrate_user_project + else + UI.section 'Skipping User Project Integration' + end + end + + def analyze_project_cache + user_projects = aggregate_targets.map(&:user_project).compact.uniq + object_version = user_projects.min_by { |p| p.object_version.to_i }.object_version.to_i unless user_projects.empty? + + if !installation_options.incremental_installation + # Run entire installation. + ProjectCache::ProjectCacheAnalysisResult.new(pod_targets, aggregate_targets, {}, + analysis_result.all_user_build_configurations, object_version) + else + UI.message 'Analyzing Project Cache' do + @installation_cache = ProjectCache::ProjectInstallationCache.from_file(sandbox, sandbox.project_installation_cache_path) + @metadata_cache = ProjectCache::ProjectMetadataCache.from_file(sandbox, sandbox.project_metadata_cache_path) + @project_cache_version = ProjectCache::ProjectCacheVersion.from_file(sandbox.project_version_cache_path) + + force_clean_install = clean_install || project_cache_version.version != Version.create(VersionMetadata.project_cache_version) + cache_result = ProjectCache::ProjectCacheAnalyzer.new(sandbox, installation_cache, analysis_result.all_user_build_configurations, + object_version, plugins, pod_targets, aggregate_targets, installation_options.to_h, :clean_install => force_clean_install).analyze + aggregate_targets_to_generate = cache_result.aggregate_targets_to_generate || [] + pod_targets_to_generate = cache_result.pod_targets_to_generate + (aggregate_targets_to_generate + pod_targets_to_generate).each do |target| + UI.message "- Regenerating #{target.label}" + end + cache_result + end + end + end + + def prepare + # Raise if pwd is inside Pods + if Dir.pwd.start_with?(sandbox.root.to_path) + message = 'Command should be run from a directory outside Pods directory.' + message << "\n\n\tCurrent directory is #{UI.path(Pathname.pwd)}\n" + raise Informative, message + end + UI.message 'Preparing' do + deintegrate_if_different_major_version + sandbox.prepare + ensure_plugins_are_installed! + run_plugins_pre_install_hooks + end + end + + # @return [Analyzer] The analyzer used to resolve dependencies + # + def resolve_dependencies + plugin_sources = run_source_provider_hooks + analyzer = create_analyzer(plugin_sources) + + UI.section 'Updating local specs repositories' do + analyzer.update_repositories + end if repo_update? + + UI.section 'Analyzing dependencies' do + analyze(analyzer) + validate_build_configurations + end + + UI.section 'Verifying no changes' do + verify_no_podfile_changes! + verify_no_lockfile_changes! + end if deployment? + + analyzer + end + + def download_dependencies + UI.section 'Downloading dependencies' do + install_pod_sources + run_podfile_pre_install_hooks + clean_pod_sources + end + end + + # Stages the sandbox after analysis. + # + # @param [Sandbox] sandbox + # The sandbox to stage. + # + # @param [Array] pod_targets + # The list of all pod targets. + # + # @return [void] + # + def stage_sandbox(sandbox, pod_targets) + SandboxHeaderPathsInstaller.new(sandbox, pod_targets).install! + end + + #-------------------------------------------------------------------------# + + # @!group Pods Project Generation + + private + + def create_generator(pod_targets_to_generate, aggregate_targets_to_generate, build_configurations, project_object_version, generate_multiple_pod_projects = false) + if generate_multiple_pod_projects + Xcode::MultiPodsProjectGenerator.new(sandbox, aggregate_targets_to_generate, pod_targets_to_generate, + build_configurations, installation_options, config, project_object_version, metadata_cache) + else + Xcode::SinglePodsProjectGenerator.new(sandbox, aggregate_targets_to_generate, pod_targets_to_generate, build_configurations, installation_options, config, project_object_version) + end + end + + # Generates the Xcode project(s) that go inside the `Pods/` directory. + # + def generate_pods_project + stage_sandbox(sandbox, pod_targets) + + cache_analysis_result = analyze_project_cache + pod_targets_to_generate = cache_analysis_result.pod_targets_to_generate + aggregate_targets_to_generate = cache_analysis_result.aggregate_targets_to_generate + + clean_sandbox(pod_targets_to_generate) + + create_and_save_projects(pod_targets_to_generate, aggregate_targets_to_generate, + cache_analysis_result.build_configurations, cache_analysis_result.project_object_version) + SandboxDirCleaner.new(sandbox, pod_targets, aggregate_targets).clean! + + update_project_cache(cache_analysis_result, target_installation_results) + end + + def create_and_save_projects(pod_targets_to_generate, aggregate_targets_to_generate, build_configurations, project_object_version) + UI.section 'Generating Pods project' do + generator = create_generator(pod_targets_to_generate, aggregate_targets_to_generate, + build_configurations, project_object_version, + installation_options.generate_multiple_pod_projects) + + pod_project_generation_result = generator.generate! + @target_installation_results = pod_project_generation_result.target_installation_results + @pods_project = pod_project_generation_result.project + # The `pod_target_subprojects` is used for backwards compatibility so that consumers can iterate over + # all pod targets across projects without needing to open each one. + @pod_target_subprojects = pod_project_generation_result.projects_by_pod_targets.keys + @generated_projects = ([pods_project] + pod_target_subprojects || []).compact + @generated_pod_targets = pod_targets_to_generate + @generated_aggregate_targets = aggregate_targets_to_generate || [] + projects_by_pod_targets = pod_project_generation_result.projects_by_pod_targets + + predictabilize_uuids(generated_projects) if installation_options.deterministic_uuids? + stabilize_target_uuids(generated_projects) + + projects_writer = Xcode::PodsProjectWriter.new(sandbox, generated_projects, + target_installation_results.pod_target_installation_results, installation_options) + projects_writer.write! do + run_podfile_post_install_hooks + end + + pods_project_pod_targets = pod_targets_to_generate - projects_by_pod_targets.values.flatten + all_projects_by_pod_targets = {} + pods_project_by_targets = { pods_project => pods_project_pod_targets } if pods_project + all_projects_by_pod_targets.merge!(pods_project_by_targets) if pods_project_by_targets + all_projects_by_pod_targets.merge!(projects_by_pod_targets) if projects_by_pod_targets + all_projects_by_pod_targets.each do |project, pod_targets| + generator.configure_schemes(project, pod_targets, pod_project_generation_result) + end + end + end + + def predictabilize_uuids(projects) + UI.message('- Generating deterministic UUIDs') { Xcodeproj::Project.predictabilize_uuids(projects) } + end + + def stabilize_target_uuids(projects) + UI.message('- Stabilizing target UUIDs') { TargetUUIDGenerator.new(projects).generate! } + end + + #-------------------------------------------------------------------------# + + public + + # @!group Installation results + + # @return [Analyzer::AnalysisResult] the result of the analysis performed during installation + # + attr_reader :analysis_result + + # @return [Array] the installation results produced by the pods project + # generator + # + attr_reader :target_installation_results + + # @return [Pod::Project] the `Pods/Pods.xcodeproj` project. + # + attr_reader :pods_project + + # @return [Array] the subprojects nested under pods_project. + # + attr_reader :pod_target_subprojects + + # @return [Array] The model representations of an + # aggregation of pod targets generated for a target definition + # in the Podfile as result of the analyzer. + # + attr_reader :aggregate_targets + + # @return [Array] The model representations of pod targets + # generated as result of the analyzer. + # + attr_reader :pod_targets + + # @return [Array] The list of projects generated from the installation. + # + attr_reader :generated_projects + + # @return [Array] The list of pod targets that were generated from the installation. + # + attr_reader :generated_pod_targets + + # @return [Array] The list of aggregate targets that were generated from the installation. + # + attr_reader :generated_aggregate_targets + + # @return [Array] The specifications that were installed. + # + attr_accessor :installed_specs + + #-------------------------------------------------------------------------# + + private + + # @!group Installation steps + + # Performs the analysis. + # + # @param [Analyzer] analyzer the analyzer to use for analysis + # + # @return [void] + # + def analyze(analyzer = create_analyzer) + @analysis_result = analyzer.analyze + @aggregate_targets = @analysis_result.targets + @pod_targets = @analysis_result.pod_targets + end + + def create_analyzer(plugin_sources = nil) + Analyzer.new(sandbox, podfile, lockfile, plugin_sources, has_dependencies?, update) + end + + # Ensures that the white-listed build configurations are known to prevent + # silent typos. + # + # @raise If an unknown user configuration is found. + # + def validate_build_configurations + whitelisted_configs = pod_targets. + flat_map(&:target_definitions). + flat_map(&:all_whitelisted_configurations). + map(&:downcase). + uniq + all_user_configurations = analysis_result.all_user_build_configurations.keys.map(&:downcase) + + remainder = whitelisted_configs - all_user_configurations + unless remainder.empty? + raise Informative, + "Unknown #{'configuration'.pluralize(remainder.size)} whitelisted: #{remainder.sort.to_sentence}. " \ + "CocoaPods found #{all_user_configurations.sort.to_sentence}, did you mean one of these?" + end + end + + # @return [void] In this step we clean all the header folders for pod targets that will be + # regenerated from scratch and cleanup any pods that have been removed. + # + def clean_sandbox(pod_targets) + pod_targets.each do |pod_target| + pod_target.build_headers.implode_path!(pod_target.headers_sandbox) + sandbox.public_headers.implode_path!(pod_target.headers_sandbox) + end + + unless sandbox_state.deleted.empty? + title_options = { :verbose_prefix => '-> '.red } + sandbox_state.deleted.each do |pod_name| + UI.titled_section("Removing #{pod_name}".red, title_options) do + sandbox.clean_pod(pod_name) + end + end + end + end + + # @raise [Informative] If there are any Podfile changes + # + def verify_no_podfile_changes! + return unless analysis_result.podfile_needs_install? + + changed_state = analysis_result.podfile_state.to_s(:states => %i(added deleted changed)) + raise Informative, "There were changes to the podfile in deployment mode:\n#{changed_state}" + end + + # @raise [Informative] If there are any Lockfile changes + # + def verify_no_lockfile_changes! + new_lockfile = generate_lockfile + return if new_lockfile == lockfile + + return unless diff = Xcodeproj::Differ.hash_diff(lockfile.to_hash, new_lockfile.to_hash, :key_1 => 'Old Lockfile', :key_2 => 'New Lockfile') + pretty_diff = YAMLHelper.convert_hash(diff, Lockfile::HASH_KEY_ORDER, "\n\n") + pretty_diff.gsub!(':diff:', 'diff:'.yellow) + + raise Informative, "There were changes to the lockfile in deployment mode:\n#{pretty_diff}" + end + + # Downloads, installs the documentation and cleans the sources of the Pods + # which need to be installed. + # + # @return [void] + # + def install_pod_sources + @installed_specs = [] + pods_to_install = sandbox_state.added | sandbox_state.changed + title_options = { :verbose_prefix => '-> '.green } + root_specs.sort_by(&:name).each do |spec| + if pods_to_install.include?(spec.name) + if sandbox_state.changed.include?(spec.name) && sandbox.manifest + current_version = spec.version + previous_version = sandbox.manifest.version(spec.name) + has_changed_version = current_version != previous_version + current_repo = analysis_result.specs_by_source.detect { |key, values| break key if values.map(&:name).include?(spec.name) } + current_repo &&= (Pod::TrunkSource::TRUNK_REPO_NAME if current_repo.name == Pod::TrunkSource::TRUNK_REPO_NAME) || current_repo.url || current_repo.name + previous_spec_repo = sandbox.manifest.spec_repo(spec.name) + has_changed_repo = !previous_spec_repo.nil? && current_repo && !current_repo.casecmp(previous_spec_repo).zero? + title = "Installing #{spec.name} #{spec.version}" + title << " (was #{previous_version} and source changed to `#{current_repo}` from `#{previous_spec_repo}`)" if has_changed_version && has_changed_repo + title << " (was #{previous_version})" if has_changed_version && !has_changed_repo + title << " (source changed to `#{current_repo}` from `#{previous_spec_repo}`)" if !has_changed_version && has_changed_repo + else + title = "Installing #{spec}" + end + UI.titled_section(title.green, title_options) do + install_source_of_pod(spec.name) + end + else + UI.section("Using #{spec}", title_options[:verbose_prefix]) do + create_pod_installer(spec.name) + end + end + end + end + + def create_pod_installer(pod_name) + specs_by_platform = specs_for_pod(pod_name) + + if specs_by_platform.empty? + requiring_targets = pod_targets.select { |pt| pt.recursive_dependent_targets.any? { |dt| dt.pod_name == pod_name } } + message = "Could not install '#{pod_name}' pod" + message += ", dependended upon by #{requiring_targets.to_sentence}" unless requiring_targets.empty? + message += '. There is either no platform to build for, or no target to build.' + raise StandardError, message + end + + pod_installer = PodSourceInstaller.new(sandbox, podfile, specs_by_platform, :can_cache => installation_options.clean?) + pod_installers << pod_installer + pod_installer + end + + # The specifications matching the specified pod name + # + # @param [String] pod_name the name of the pod + # + # @return [Hash{Platform => Array}] the specifications grouped by platform + # + def specs_for_pod(pod_name) + pod_targets.each_with_object({}) do |pod_target, hash| + if pod_target.root_spec.name == pod_name + hash[pod_target.platform] ||= [] + hash[pod_target.platform].concat(pod_target.specs) + end + end + end + + # Install the Pods. If the resolver indicated that a Pod should be + # installed and it exits, it is removed and then reinstalled. In any case if + # the Pod doesn't exits it is installed. + # + # @return [void] + # + def install_source_of_pod(pod_name) + pod_installer = create_pod_installer(pod_name) + pod_installer.install! + @installed_specs.concat(pod_installer.specs_by_platform.values.flatten.uniq) + end + + # Cleans the sources of the Pods if the config instructs to do so. + # + # + def clean_pod_sources + return unless installation_options.clean? + return if installed_specs.empty? + pod_installers.each(&:clean!) + end + + # Unlocks the sources of the Pods. + # + def unlock_pod_sources + pod_installers.each do |installer| + pod_target = pod_targets.find { |target| target.pod_name == installer.name } + installer.unlock_files!(pod_target.file_accessors) + end + end + + # Locks the sources of the Pods if the config instructs to do so. + # + def lock_pod_sources + return unless installation_options.lock_pod_sources? + pod_installers.each do |installer| + pod_target = pod_targets.find { |target| target.pod_name == installer.name } + installer.lock_files!(pod_target.file_accessors) + end + end + + def validate_targets + validator = Xcode::TargetValidator.new(aggregate_targets, pod_targets, installation_options) + validator.validate! + end + + # Runs the registered callbacks for the plugins pre install hooks. + # + # @return [void] + # + def run_plugins_pre_install_hooks + context = PreInstallHooksContext.generate(sandbox, podfile, lockfile) + HooksManager.run(:pre_install, context, plugins) + end + + # Performs any post-installation actions + # + # @return [void] + # + def perform_post_install_actions + run_plugins_post_install_hooks + warn_for_deprecations + warn_for_installed_script_phases + warn_for_removing_git_master_specs_repo + print_post_install_message + end + + def print_post_install_message + podfile_dependencies = analysis_result.podfile_dependency_cache.podfile_dependencies.size + pods_installed = root_specs.size + title_options = { :verbose_prefix => '-> '.green } + UI.titled_section('Pod installation complete! ' \ + "There #{podfile_dependencies == 1 ? 'is' : 'are'} #{podfile_dependencies} " \ + "#{'dependency'.pluralize(podfile_dependencies)} from the Podfile " \ + "and #{pods_installed} total #{'pod'.pluralize(pods_installed)} installed.".green, + title_options) + end + + # Runs the registered callbacks for the plugins pre integrate hooks. + # + def run_plugins_pre_integrate_hooks + if any_plugin_pre_integrate_hooks? + context = PreIntegrateHooksContext.generate(sandbox, pods_project, aggregate_targets) + HooksManager.run(:pre_integrate, context, plugins) + end + end + + # Runs the registered callbacks for the plugins post install hooks. + # + def run_plugins_post_install_hooks + # This short-circuits because unlocking pod sources is expensive + if any_plugin_post_install_hooks? + unlock_pod_sources + + context = PostInstallHooksContext.generate(sandbox, pods_project, aggregate_targets) + HooksManager.run(:post_install, context, plugins) + end + + lock_pod_sources + end + + # Runs the registered callbacks for the plugins post integrate hooks. + # + def run_plugins_post_integrate_hooks + if any_plugin_post_integrate_hooks? + context = PostIntegrateHooksContext.generate(sandbox, pods_project, aggregate_targets) + HooksManager.run(:post_integrate, context, plugins) + end + end + + # @return [Boolean] whether there are any plugin pre-integrate hooks to run + # + def any_plugin_pre_integrate_hooks? + HooksManager.hooks_to_run(:pre_integrate, plugins).any? + end + + # @return [Boolean] whether there are any plugin post-install hooks to run + # + def any_plugin_post_install_hooks? + HooksManager.hooks_to_run(:post_install, plugins).any? + end + + # @return [Boolean] whether there are any plugin post-integrate hooks to run + # + def any_plugin_post_integrate_hooks? + HooksManager.hooks_to_run(:post_integrate, plugins).any? + end + + # Runs the registered callbacks for the source provider plugin hooks. + # + # @return [Array] the plugin sources + # + def run_source_provider_hooks + context = SourceProviderHooksContext.generate + HooksManager.run(:source_provider, context, plugins) + context.sources + end + + # Run the deintegrator against all projects in the installation root if the + # current CocoaPods major version part is different than the one in the + # lockfile. + # + # @return [void] + # + def deintegrate_if_different_major_version + return unless lockfile + return if lockfile.cocoapods_version.major == Version.create(VERSION).major + UI.section('Re-creating CocoaPods due to major version update.') do + projects = Pathname.glob(config.installation_root + '*.xcodeproj').map { |path| Xcodeproj::Project.open(path) } + deintegrator = Deintegrator.new + projects.each do |project| + config.with_changes(:silent => true) { deintegrator.deintegrate_project(project) } + project.save if project.dirty? + end + end + end + + # Ensures that all plugins specified in the {#podfile} are loaded. + # + # @return [void] + # + def ensure_plugins_are_installed! + require 'claide/command/plugin_manager' + + loaded_plugins = Command::PluginManager.specifications.map(&:name) + + podfile.plugins.keys.each do |plugin| + unless loaded_plugins.include? plugin + raise Informative, "Your Podfile requires that the plugin `#{plugin}` be installed. Please install it and try installation again." + end + end + end + + DEFAULT_PLUGINS = {} + + # Returns the plugins that should be run, as indicated by the default + # plugins and the podfile's plugins + # + # @return [Hash] The plugins to be used + # + def plugins + if use_default_plugins? + DEFAULT_PLUGINS.merge(podfile.plugins) + else + podfile.plugins + end + end + + # Prints a warning for any pods that are deprecated + # + # @return [void] + # + def warn_for_deprecations + deprecated_pods = root_specs.select do |spec| + spec.deprecated || spec.deprecated_in_favor_of + end + deprecated_pods.each do |spec| + if spec.deprecated_in_favor_of + UI.warn "#{spec.name} has been deprecated in " \ + "favor of #{spec.deprecated_in_favor_of}" + else + UI.warn "#{spec.name} has been deprecated" + end + end + end + + # Prints a warning for any pods that included script phases + # + # @return [void] + # + def warn_for_installed_script_phases + pods_to_install = sandbox_state.added | sandbox_state.changed + pod_targets.group_by(&:pod_name).each do |name, pod_targets| + if pods_to_install.include?(name) && !sandbox.local?(name) + script_phase_count = pod_targets.inject(0) { |sum, target| sum + target.script_phases.count } + unless script_phase_count.zero? + UI.warn "#{name} has added #{script_phase_count} #{'script phase'.pluralize(script_phase_count)}. " \ + 'Please inspect before executing a build. See `https://guides.cocoapods.org/syntax/podspec.html#script_phases` for more information.' + end + end + end + end + + # Prints a warning if the project is not explicitly using the git based master specs repo. + # + # Helps users to delete the git based master specs repo from the repos directory which reduces `--repo-update` + # speed and hopefully reduces Github workload. + # + # @return [void] + # + def warn_for_removing_git_master_specs_repo + return unless installation_options.warn_for_unused_master_specs_repo? + plugin_sources = run_source_provider_hooks + all_sources = podfile.sources + plugin_sources.map(&:url) + master_source = all_sources.find { |source| source == MASTER_SPECS_REPO_GIT_URL } + master_repo = config.sources_manager.all.find { |s| s.url == MASTER_SPECS_REPO_GIT_URL } + if master_source.nil? && !master_repo.nil? + UI.warn 'Your project does not explicitly specify the CocoaPods master specs repo. Since CDN is now used as the' \ + ' default, you may safely remove it from your repos directory via `pod repo remove master`. To suppress this warning' \ + ' please add `warn_for_unused_master_specs_repo => false` to your Podfile.' + end + end + + # @return [Lockfile] The lockfile to write to disk. + # + def generate_lockfile + external_source_pods = analysis_result.podfile_dependency_cache.podfile_dependencies.select(&:external_source).map(&:root_name).uniq + checkout_options = sandbox.checkout_sources.select { |root_name, _| external_source_pods.include? root_name } + Lockfile.generate(podfile, analysis_result.specifications, checkout_options, analysis_result.specs_by_source) + end + + # Writes the Podfile and the lock files. + # + # @return [void] + # + def write_lockfiles + @lockfile = generate_lockfile + + UI.message "- Writing Lockfile in #{UI.path config.lockfile_path}" do + # No need to invoke Sandbox#update_changed_file here since this logic already handles checking if the + # contents of the file are the same. + @lockfile.write_to_disk(config.lockfile_path) + end + + UI.message "- Writing Manifest in #{UI.path sandbox.manifest_path}" do + # No need to invoke Sandbox#update_changed_file here since this logic already handles checking if the + # contents of the file are the same. + @lockfile.write_to_disk(sandbox.manifest_path) + end + end + + # @param [ProjectCacheAnalysisResult] cache_analysis_result + # The cache analysis result for the current installation. + # + # @param [Hash{String => TargetInstallationResult}] target_installation_results + # The installation results for pod targets installed. + # + def update_project_cache(cache_analysis_result, target_installation_results) + return unless installation_cache || metadata_cache + installation_cache.update_cache_key_by_target_label!(cache_analysis_result.cache_key_by_target_label) + installation_cache.update_project_object_version!(cache_analysis_result.project_object_version) + installation_cache.update_build_configurations!(cache_analysis_result.build_configurations) + installation_cache.update_podfile_plugins!(plugins) + installation_cache.update_installation_options!(installation_options.to_h) + installation_cache.save_as(sandbox.project_installation_cache_path) + + metadata_cache.update_metadata!(target_installation_results.pod_target_installation_results || {}, + target_installation_results.aggregate_target_installation_results || {}) + metadata_cache.save_as(sandbox.project_metadata_cache_path) + + cache_version = ProjectCache::ProjectCacheVersion.new(VersionMetadata.project_cache_version) + cache_version.save_as(sandbox.project_version_cache_path) + end + + # Integrates the user projects adding the dependencies on the CocoaPods + # libraries, setting them up to use the xcconfigs and performing other + # actions. This step is also responsible of creating the workspace if + # needed. + # + # @return [void] + # + def integrate_user_project + UI.section "Integrating client #{'project'.pluralize(aggregate_targets.map(&:user_project_path).uniq.count)}" do + installation_root = config.installation_root + integrator = UserProjectIntegrator.new(podfile, sandbox, installation_root, aggregate_targets, generated_aggregate_targets, + :use_input_output_paths => !installation_options.disable_input_output_paths?) + integrator.integrate! + run_podfile_post_integrate_hooks + end + end + + #-------------------------------------------------------------------------# + + private + + # @!group Hooks + + # Runs the pre install hooks of the installed specs and of the Podfile. + # + # @return [void] + # + def run_podfile_pre_install_hooks + UI.message '- Running pre install hooks' do + executed = run_podfile_pre_install_hook + UI.message '- Podfile' if executed + end + end + + # Runs the pre install hook of the Podfile + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_pre_install_hook + podfile.pre_install!(self) + rescue => e + raise Informative, 'An error occurred while processing the pre-install ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the post integrate hooks of the installed specs and of the Podfile. + # + # @note Post integrate hooks run _after_ saving of project, so that they + # can alter it after it is written to the disk. + # + # @return [void] + # + def run_podfile_pre_integrate_hooks + UI.message '- Running pre integrate hooks' do + executed = run_podfile_pre_integrate_hook + UI.message '- Podfile' if executed + end + end + + # Runs the pre integrate hook of the Podfile. + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_pre_integrate_hook + podfile.pre_integrate!(self) + rescue => e + raise Informative, 'An error occurred while processing the pre-integrate ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the post install hooks of the installed specs and of the Podfile. + # + # @note Post install hooks run _before_ saving of project, so that they + # can alter it before it is written to the disk. + # + # @return [void] + # + def run_podfile_post_install_hooks + UI.message '- Running post install hooks' do + executed = run_podfile_post_install_hook + UI.message '- Podfile' if executed + end + end + + # Runs the post install hook of the Podfile + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_post_install_hook + podfile.post_install!(self) + rescue => e + raise Informative, 'An error occurred while processing the post-install ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the post integrate hooks of the installed specs and of the Podfile. + # + # @note Post integrate hooks run _after_ saving of project, so that they + # can alter it after it is written to the disk. + # + # @return [void] + # + def run_podfile_post_integrate_hooks + UI.message '- Running post integrate hooks' do + executed = run_podfile_post_integrate_hook + UI.message '- Podfile' if executed + end + end + + # Runs the post integrate hook of the Podfile. + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_post_integrate_hook + podfile.post_integrate!(self) + rescue => e + raise Informative, 'An error occurred while processing the post-integrate ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + #-------------------------------------------------------------------------# + + public + + # @param [Array] targets + # + # @return [Array] The targets of the development pods generated by + # the installation process. This can be used as a convenience method for external scripts. + # + def development_pod_targets(targets = pod_targets) + targets.select do |pod_target| + sandbox.local?(pod_target.pod_name) + end + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Array] All the root specifications of the + # installation. + # + def root_specs + analysis_result.specifications.map(&:root).uniq + end + + # @return [SpecsState] The state of the sandbox returned by the analyzer. + # + def sandbox_state + analysis_result.sandbox_state + end + + # @return [InstallationOptions] the installation options to use during install + # + def installation_options + podfile.installation_options + end + + #-------------------------------------------------------------------------# + + public + + # @!group Convenience Methods + + def self.targets_from_sandbox(sandbox, podfile, lockfile) + raise Informative, 'You must run `pod install` to be able to generate target information' unless lockfile + + new(sandbox, podfile, lockfile).instance_exec do + plugin_sources = run_source_provider_hooks + analyzer = create_analyzer(plugin_sources) + analyze(analyzer) + if analysis_result.podfile_needs_install? + raise Pod::Informative, 'The Podfile has changed, you must run `pod install`' + elsif analysis_result.sandbox_needs_install? + raise Pod::Informative, 'The `Pods` directory is out-of-date, you must run `pod install`' + end + + aggregate_targets + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer.rb new file mode 100644 index 0000000..9702736 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer.rb @@ -0,0 +1,1204 @@ +require 'cocoapods/podfile' + +module Pod + class Installer + # Analyzes the Podfile, the Lockfile, and the sandbox manifest to generate + # the information relative to a CocoaPods installation. + # + class Analyzer + include Config::Mixin + + autoload :AnalysisResult, 'cocoapods/installer/analyzer/analysis_result' + autoload :LockingDependencyAnalyzer, 'cocoapods/installer/analyzer/locking_dependency_analyzer' + autoload :PodfileDependencyCache, 'cocoapods/installer/analyzer/podfile_dependency_cache' + autoload :PodVariant, 'cocoapods/installer/analyzer/pod_variant' + autoload :PodVariantSet, 'cocoapods/installer/analyzer/pod_variant_set' + autoload :SandboxAnalyzer, 'cocoapods/installer/analyzer/sandbox_analyzer' + autoload :SpecsState, 'cocoapods/installer/analyzer/specs_state' + autoload :TargetInspectionResult, 'cocoapods/installer/analyzer/target_inspection_result' + autoload :TargetInspector, 'cocoapods/installer/analyzer/target_inspector' + + # @return [String] The version of iOS which requires binaries with only 64-bit architectures + # + IOS_64_BIT_ONLY_VERSION = Version.new('11.0') + + # @return [Integer] The Xcode object version until which 64-bit architectures should be manually specified + # + # Xcode 10 will automatically select the correct architectures based on deployment target + IOS_64_BIT_ONLY_PROJECT_VERSION = 50 + + # @return [Sandbox] The sandbox to use for this analysis. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile specification that contains the information of the Pods that should be installed. + # + attr_reader :podfile + + # @return [Lockfile, nil] The Lockfile, if available, that stores the information about the Pods previously installed. + # + attr_reader :lockfile + + # @return [Array] Sources provided by plugins or `nil`. + # + attr_reader :plugin_sources + + # @return [Bool] Whether the analysis has dependencies and thus sources must be configured. + # + # @note This is used by the `pod lib lint` command to prevent update of specs when not needed. + # + attr_reader :has_dependencies + alias_method :has_dependencies?, :has_dependencies + + # @return [Hash, Boolean, nil] Pods that have been requested to be updated or true if all Pods should be updated. + # This can be false if no pods should be updated. + # + attr_reader :pods_to_update + + # @return [InstallationOptions] the installation options specified by the Podfile + # + attr_reader :installation_options + + # @return [Source::Manager] the sources manager to use when resolving dependencies + # + attr_reader :sources_manager + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Lockfile, nil] lockfile @see #lockfile + # @param [Array] plugin_sources @see #plugin_sources + # @param [Boolean] has_dependencies @see #has_dependencies + # @param [Hash, Boolean, nil] pods_to_update @see #pods_to_update + # @param [Source::Manager] sources_manager @see #sources_manager + # + def initialize(sandbox, podfile, lockfile = nil, plugin_sources = nil, has_dependencies = true, + pods_to_update = false, sources_manager = Source::Manager.new(config.repos_dir)) + @sandbox = sandbox + @podfile = podfile + @lockfile = lockfile + @plugin_sources = plugin_sources + @has_dependencies = has_dependencies + @pods_to_update = pods_to_update + @installation_options = podfile.installation_options + @podfile_dependency_cache = PodfileDependencyCache.from_podfile(podfile) + @sources_manager = sources_manager + @result = nil + end + + # Performs the analysis. + # + # The Podfile and the Lockfile provide the information necessary to + # compute which specification should be installed. The manifest of the + # sandbox returns which specifications are installed. + # + # @param [Bool] allow_fetches + # whether external sources may be fetched + # + # @return [AnalysisResult] + # + def analyze(allow_fetches = true) + return @result if @result + validate_podfile! + validate_lockfile_version! + if installation_options.integrate_targets? + target_inspections = inspect_targets_to_integrate + else + verify_platforms_specified! + target_inspections = {} + end + podfile_state = generate_podfile_state + + store_existing_checkout_options + if allow_fetches == :outdated + # special-cased -- we're only really resolving for outdated, rather than doing a full analysis + elsif allow_fetches == true + fetch_external_sources(podfile_state) + elsif !dependencies_to_fetch(podfile_state).all?(&:local?) + raise Informative, 'Cannot analyze without fetching dependencies since the sandbox is not up-to-date. Run `pod install` to ensure all dependencies have been fetched.' \ + "\n The missing dependencies are:\n \t#{dependencies_to_fetch(podfile_state).reject(&:local?).join("\n \t")}" + end + + locked_dependencies = generate_version_locking_dependencies(podfile_state) + resolver_specs_by_target = resolve_dependencies(locked_dependencies) + validate_platforms(resolver_specs_by_target) + specifications = generate_specifications(resolver_specs_by_target) + aggregate_targets, pod_targets = generate_targets(resolver_specs_by_target, target_inspections) + sandbox_state = generate_sandbox_state(specifications) + specs_by_target = resolver_specs_by_target.each_with_object({}) do |rspecs_by_target, hash| + hash[rspecs_by_target[0]] = rspecs_by_target[1].map(&:spec) + end + specs_by_source = Hash[resolver_specs_by_target.values.flatten(1).group_by(&:source).map do |source, specs| + [source, specs.map(&:spec).uniq] + end] + sources.each { |s| specs_by_source[s] ||= [] } + @result = AnalysisResult.new(podfile_state, specs_by_target, specs_by_source, specifications, sandbox_state, + aggregate_targets, pod_targets, @podfile_dependency_cache) + end + + # Updates the git source repositories. + # + def update_repositories + sources.each do |source| + if source.updateable? + sources_manager.update(source.name, true) + else + UI.message "Skipping `#{source.name}` update because the repository is not an updateable repository." + end + end + @specs_updated = true + end + + # Returns the sources used to query for specifications. + # + # When no explicit Podfile sources or plugin sources are defined, this defaults to the master spec repository. + # + # @return [Array] the sources to be used in finding specifications, as specified by the podfile or all + # sources. + # + def sources + @sources ||= begin + sources = podfile.sources + plugin_sources = @plugin_sources || [] + + # Add any sources specified using the :source flag on individual dependencies. + dependency_sources = podfile_dependencies.map(&:podspec_repo).compact + all_dependencies_have_sources = dependency_sources.count == podfile_dependencies.count + + if all_dependencies_have_sources + sources = dependency_sources + elsif has_dependencies? && sources.empty? && plugin_sources.empty? + sources = [Pod::TrunkSource::TRUNK_REPO_URL] + dependency_sources + else + sources += dependency_sources + end + + result = sources.uniq.map do |source_url| + sources_manager.find_or_create_source_with_url(source_url) + end + unless plugin_sources.empty? + result.insert(0, *plugin_sources) + plugin_sources.each do |source| + sources_manager.add_source(source) + end + end + result + end + end + + #-----------------------------------------------------------------------# + + private + + # @!group Configuration + + # @return [Bool] Whether the version of the dependencies which did not + # change in the Podfile should be locked. + # + def update_mode? + pods_to_update != nil + end + + # @return [Symbol] Whether and how the dependencies in the Podfile + # should be updated. + # + def update_mode + if !pods_to_update + :none + elsif pods_to_update == true + :all + elsif !pods_to_update[:pods].nil? + :selected + end + end + + def podfile_dependencies + @podfile_dependency_cache.podfile_dependencies + end + + #-----------------------------------------------------------------------# + + def validate_podfile! + validator = Installer::PodfileValidator.new(podfile, @podfile_dependency_cache) + validator.validate + + unless validator.valid? + raise Informative, validator.message + end + validator.warnings.uniq.each { |w| UI.warn(w) } + end + + # @!group Analysis steps + + # @note The warning about the version of the Lockfile doesn't use the + # `UI.warn` method because it prints the output only at the end + # of the installation. At that time CocoaPods could have crashed. + # + def validate_lockfile_version! + if lockfile && lockfile.cocoapods_version > Version.new(VERSION) + STDERR.puts '[!] The version of CocoaPods used to generate ' \ + "the lockfile (#{lockfile.cocoapods_version}) is "\ + "higher than the version of the current executable (#{VERSION}). " \ + 'Incompatibility issues may arise.'.yellow + end + end + + # Compares the {Podfile} with the {Lockfile} in order to detect which + # dependencies should be locked. + # + # @return [SpecsState] the states of the Podfile specs. + # + # @note As the target definitions share the same sandbox they should have + # the same version of a Pod. For this reason this method returns + # the name of the Pod (root name of the dependencies) and doesn't + # group them by target definition. + # + # @return [SpecState] + # + def generate_podfile_state + if lockfile + pods_state = nil + UI.section 'Finding Podfile changes' do + pods_by_state = lockfile.detect_changes_with_podfile(podfile) + pods_state = SpecsState.new(pods_by_state) + pods_state.print if config.verbose? + end + pods_state + else + state = SpecsState.new + state.added.merge(podfile_dependencies.map(&:root_name)) + state + end + end + + # Copies the pod targets of any of the app embedded aggregate targets into + # their potential host aggregate target, if that potential host aggregate target's + # user_target hosts any of the app embedded aggregate targets' user_targets + # + # @param [AggregateTarget] aggregate_target the aggregate target whose user_target + # might host one or more of the embedded aggregate targets' user_targets + # + # @param [Array] embedded_aggregate_targets the aggregate targets + # representing the embedded targets to be integrated + # + # @param [Boolean] libraries_only if true, only library-type embedded + # targets are considered, otherwise, all other types are have + # their pods copied to their host targets as well (extensions, etc.) + # + # @return [Hash{String=>Array}] the additional pod targets to include to the host + # keyed by their configuration. + # + def embedded_target_pod_targets_by_host(aggregate_target, embedded_aggregate_targets, libraries_only) + return {} if aggregate_target.requires_host_target? + aggregate_user_target_uuids = Set.new(aggregate_target.user_targets.map(&:uuid)) + embedded_pod_targets_by_build_config = Hash.new([].freeze) + embedded_aggregate_targets.each do |embedded_aggregate_target| + # Skip non libraries in library-only mode + next if libraries_only && !embedded_aggregate_target.library? + next if aggregate_target.search_paths_aggregate_targets.include?(embedded_aggregate_target) + next unless embedded_aggregate_target.user_targets.any? do |embedded_user_target| + # You have to ask the host target's project for the host targets of + # the embedded target, as opposed to asking user_project for the + # embedded targets of the host target. The latter doesn't work when + # the embedded target lives in a sub-project. The lines below get + # the host target uuids for the embedded target and checks to see if + # those match to any of the user_target uuids in the aggregate_target. + host_target_uuids = Set.new(aggregate_target.user_project.host_targets_for_embedded_target(embedded_user_target).map(&:uuid)) + !aggregate_user_target_uuids.intersection(host_target_uuids).empty? + end + embedded_aggregate_target.user_build_configurations.each_key do |configuration_name| + pod_target_names = Set.new(aggregate_target.pod_targets_for_build_configuration(configuration_name).map(&:name)) + embedded_pod_targets = embedded_aggregate_target.pod_targets_for_build_configuration(configuration_name).select do |pod_target| + if !pod_target_names.include?(pod_target.name) && + aggregate_target.pod_targets.none? { |aggregate_pod_target| (pod_target.specs - aggregate_pod_target.specs).empty? } && + (libraries_only || pod_target.build_as_dynamic?) + pod_target.name + end + end + embedded_pod_targets_by_build_config[configuration_name] += embedded_pod_targets + end + end + embedded_pod_targets_by_build_config + end + + # Raises an error if there are embedded targets in the Podfile, but + # their host targets have not been declared in the Podfile. As it + # finds host targets, it collection information on host target types. + # + # @param [Array] aggregate_targets the generated + # aggregate targets + # + # @param [Array] embedded_aggregate_targets the aggregate targets + # representing the embedded targets to be integrated + # + def analyze_host_targets_in_podfile(aggregate_targets, embedded_aggregate_targets) + target_definitions_by_uuid = {} + cli_host_with_dynamic_linkage = [] + cli_product_type = 'com.apple.product-type.tool' + # Collect aggregate target definitions by uuid to later lookup host target + # definitions and verify their compatibility with their embedded targets + aggregate_targets.each do |target| + target.user_targets.each do |user_target| + target_definition = target.target_definition + target_definitions_by_uuid[user_target.uuid] = target_definition + if user_target.product_type == cli_product_type && target_definition.build_type.linkage == :dynamic + cli_host_with_dynamic_linkage << user_target + end + end + end + aggregate_target_user_projects = aggregate_targets.map(&:user_project) + embedded_targets_missing_hosts = [] + host_uuid_to_embedded_target_definitions = {} + # Search all of the known user projects for each embedded target's hosts + embedded_aggregate_targets.each do |target| + host_uuids = aggregate_target_user_projects.product(target.user_targets).flat_map do |user_project, user_target| + user_project.host_targets_for_embedded_target(user_target).map(&:uuid) + end + # For each host, keep track of its embedded target definitions + # to later verify each embedded target's compatiblity with its host, + # ignoring the hosts that aren't known to CocoaPods (no target + # definitions in the Podfile) + host_uuids.each do |uuid| + (host_uuid_to_embedded_target_definitions[uuid] ||= []) << target.target_definition if target_definitions_by_uuid.key? uuid + end + # If none of the hosts are known to CocoaPods (no target definitions + # in the Podfile), add it to the list of targets missing hosts + embedded_targets_missing_hosts << target unless host_uuids.any? do |uuid| + target_definitions_by_uuid.key? uuid + end + end + + unless cli_host_with_dynamic_linkage.empty? + UI.warn "The Podfile contains command line tool target(s) (#{cli_host_with_dynamic_linkage.map(&:name).to_sentence}) which are attempting to integrate dynamic frameworks or libraries." \ + "\n" \ + 'This may not behave as expected, because command line tools are usually distributed as a single binary and cannot contain their own dynamic dependencies.' + end + + unless embedded_targets_missing_hosts.empty? + embedded_targets_missing_hosts_product_types = Set.new embedded_targets_missing_hosts.flat_map(&:user_targets).map(&:symbol_type) + target_names = embedded_targets_missing_hosts.map do |target| + target.name.sub('Pods-', '') # Make the target names more recognizable to the user + end.join ', ' + # If the targets missing hosts are only frameworks, then this is likely + # a project for doing framework development. In that case, just warn that + # the frameworks that these targets depend on won't be integrated anywhere + if embedded_targets_missing_hosts_product_types.subset?(Set.new([:framework, :static_library])) + UI.warn "The Podfile contains framework or static library targets (#{target_names}), for which the Podfile does not contain host targets (targets which embed the framework)." \ + "\n" \ + 'If this project is for doing framework development, you can ignore this message. Otherwise, add a target to the Podfile that embeds these frameworks to make this message go away (e.g. a test target).' + else + raise Informative, "Unable to find host target(s) for #{target_names}. Please add the host targets for the embedded targets to the Podfile." \ + "\n" \ + 'Certain kinds of targets require a host target. A host target is a "parent" target which embeds a "child" target. These are example types of targets that need a host target:' \ + "\n- Framework" \ + "\n- App Extension" \ + "\n- Watch OS 1 Extension" \ + "\n- Messages Extension (except when used with a Messages Application)" + end + end + + target_mismatches = [] + host_uuid_to_embedded_target_definitions.each do |uuid, target_definitions| + host_target_definition = target_definitions_by_uuid[uuid] + target_definitions.each do |target_definition| + unless host_target_definition.uses_frameworks? == target_definition.uses_frameworks? + target_mismatches << "- #{host_target_definition.name} (#{host_target_definition.uses_frameworks?}) and #{target_definition.name} (#{target_definition.uses_frameworks?}) do not both set use_frameworks!." + end + end + end + + unless target_mismatches.empty? + heading = 'Unable to integrate the following embedded targets with their respective host targets (a host target is a "parent" target which embeds a "child" target like a framework or extension):' + raise Informative, heading + "\n\n" + target_mismatches.sort.uniq.join("\n") + end + end + + # Creates the models that represent the targets generated by CocoaPods. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # mapping of targets to resolved specs (containing information about test usage) + # aggregate targets + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @return [(Array, Array)] the list of aggregate targets generated, + # and the list of pod targets generated. + # + def generate_targets(resolver_specs_by_target, target_inspections) + resolver_specs_by_target = resolver_specs_by_target.reject { |td, _| td.abstract? && !td.platform } + pod_targets = generate_pod_targets(resolver_specs_by_target, target_inspections) + pod_targets_by_target_definition = group_pod_targets_by_target_definition(pod_targets, resolver_specs_by_target) + aggregate_targets = resolver_specs_by_target.keys.reject(&:abstract?).map do |target_definition| + generate_aggregate_target(target_definition, target_inspections, pod_targets_by_target_definition) + end + aggregate_targets.each do |target| + search_paths_aggregate_targets = aggregate_targets.select do |aggregate_target| + target.target_definition.targets_to_inherit_search_paths.include?(aggregate_target.target_definition) + end + target.search_paths_aggregate_targets.concat(search_paths_aggregate_targets).freeze + end + + aggregate_targets.each do |aggregate_target| + is_app_extension = !(aggregate_target.user_targets.map(&:symbol_type) & + [:app_extension, :watch_extension, :watch2_extension, :tv_extension, :messages_extension]).empty? + is_app_extension ||= aggregate_target.user_targets.any? do |user_target| + user_target.common_resolved_build_setting('APPLICATION_EXTENSION_API_ONLY', :resolve_against_xcconfig => true) == 'YES' + end + if is_app_extension + aggregate_target.mark_application_extension_api_only + aggregate_target.pod_targets.each(&:mark_application_extension_api_only) + end + + build_library_for_distribution = aggregate_target.user_targets.any? do |user_target| + user_target.common_resolved_build_setting('BUILD_LIBRARY_FOR_DISTRIBUTION', :resolve_against_xcconfig => true) == 'YES' + end + if build_library_for_distribution + aggregate_target.mark_build_library_for_distribution + aggregate_target.pod_targets.each(&:mark_build_library_for_distribution) + end + end + + if installation_options.integrate_targets? + # Copy embedded target pods that cannot have their pods embedded as frameworks to + # their host targets, and ensure we properly link library pods to their host targets + embedded_targets = aggregate_targets.select(&:requires_host_target?) + analyze_host_targets_in_podfile(aggregate_targets, embedded_targets) + + use_frameworks_embedded_targets, non_use_frameworks_embedded_targets = embedded_targets.partition(&:build_as_framework?) + aggregate_targets = aggregate_targets.map do |aggregate_target| + # For targets that require dynamic frameworks, we always have to copy their pods to their + # host targets because those frameworks will all be loaded from the host target's bundle + embedded_pod_targets = embedded_target_pod_targets_by_host(aggregate_target, use_frameworks_embedded_targets, false) + + # For targets that don't require dynamic frameworks, we only have to consider library-type + # targets because their host targets will still need to link their pods + embedded_pod_targets.merge!(embedded_target_pod_targets_by_host(aggregate_target, non_use_frameworks_embedded_targets, true)) + + next aggregate_target if embedded_pod_targets.empty? + aggregate_target.merge_embedded_pod_targets(embedded_pod_targets) + end + end + [aggregate_targets, pod_targets] + end + + # Setup the aggregate target for a single user target + # + # @param [TargetDefinition] target_definition + # the target definition for the user target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @param [Hash{TargetDefinition => Array}] pod_targets_by_target_definition + # the pod targets grouped by target. + # + # @return [AggregateTarget] + # + def generate_aggregate_target(target_definition, target_inspections, pod_targets_by_target_definition) + if installation_options.integrate_targets? + target_inspection = target_inspections[target_definition] + raise "missing inspection for #{target_definition.inspect}" unless target_inspection + target_requires_64_bit = Analyzer.requires_64_bit_archs?(target_definition.platform, target_inspection.project.object_version) + user_project = target_inspection.project + client_root = target_inspection.client_root + user_target_uuids = target_inspection.project_target_uuids + user_build_configurations = target_inspection.build_configurations + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : target_inspection.archs + else + target_requires_64_bit = Analyzer.requires_64_bit_archs?(target_definition.platform, nil) + user_project = nil + client_root = config.installation_root.realpath + user_target_uuids = [] + user_build_configurations = target_definition.build_configurations || Target::DEFAULT_BUILD_CONFIGURATIONS + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : [] + end + platform = target_definition.platform + build_configurations = user_build_configurations.keys.concat(target_definition.all_whitelisted_configurations).uniq + pod_targets_for_build_configuration = filter_pod_targets_for_target_definition(target_definition, + pod_targets_by_target_definition, + build_configurations) + build_type = target_definition.uses_frameworks? ? BuildType.static_framework : BuildType.static_library + AggregateTarget.new(sandbox, build_type, user_build_configurations, archs, platform, target_definition, + client_root, user_project, user_target_uuids, pod_targets_for_build_configuration) + end + + # Returns a filtered list of pod targets that should or should not be part of the target definition. Pod targets + # used by tests only are filtered. + # + # @return [Hash{TargetDefinition => Array}] + # + def group_pod_targets_by_target_definition(pod_targets, resolver_specs_by_target) + pod_targets_by_target_definition = Hash.new { |h, td| h[td] = [] } + pod_targets.each do |pod_target| + pod_target.target_definitions.each do |td| + pod_targets_by_target_definition[td] << pod_target + end + end + resolver_specs_by_target.each do |td, resolver_specs| + specs_by_pod_name = resolver_specs.group_by { |s| s.root.name } + specs_by_pod_name.reject! { |_, specs| specs.all?(&:used_by_non_library_targets_only?) } + pod_targets_by_target_definition[td].keep_if { |pod_target| specs_by_pod_name.key?(pod_target.pod_name) } + end + + pod_targets_by_target_definition + end + + # Returns a filtered list of pod targets that should or should not be part of the target definition. Pod targets + # used by tests only are filtered. + # + # @param [TargetDefinition] target_definition + # the target definition to use as the base for filtering + # + # @param [Hash{TargetDefinition => Array}] pod_targets_by_target_definition + # the pod targets grouped by target. + # + # @param [Array] build_configurations + # The list of all build configurations the targets will be built for. + # + # @return [Hash{String => Array}] + # the filtered list of pod targets, grouped by build configuration. + # + def filter_pod_targets_for_target_definition(target_definition, pod_targets_by_target_definition, + build_configurations) + pod_targets_by_build_config = Hash.new([].freeze) + build_configurations.each { |config| pod_targets_by_build_config[config] = [] } + + dependencies_by_root_name = @podfile_dependency_cache.target_definition_dependencies(target_definition).group_by(&:root_name) + + pod_targets_by_target_definition[target_definition].each do |pod_target| + pod_name = pod_target.pod_name + dependencies = dependencies_by_root_name[pod_name] || [] + + build_configurations.each do |configuration_name| + whitelists = dependencies.map do |dependency| + target_definition.pod_whitelisted_for_configuration?(dependency.name, configuration_name) + end.uniq + + case whitelists + when [], [true] then nil + when [false] then next + else + raise Informative, "The subspecs of `#{pod_name}` are linked to " \ + "different build configurations for the `#{target_definition}` " \ + 'target. CocoaPods does not currently support subspecs across ' \ + 'different build configurations.' + end + + pod_targets_by_build_config[configuration_name] << pod_target + end + end + + pod_targets_by_build_config + end + + # Setup the pod targets for an aggregate target. Deduplicates resulting + # targets by grouping by platform and subspec by their root + # to create a {PodTarget} for each spec. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @return [Array] + # + def generate_pod_targets(resolver_specs_by_target, target_inspections) + if installation_options.deduplicate_targets? + distinct_targets = resolver_specs_by_target.each_with_object({}) do |dependency, hash| + target_definition, dependent_specs = *dependency + dependent_specs.group_by(&:root).each do |root_spec, resolver_specs| + all_specs = resolver_specs.map(&:spec) + all_specs_by_type = all_specs.group_by(&:spec_type) + library_specs = all_specs_by_type[:library] || [] + test_specs = all_specs_by_type[:test] || [] + app_specs = all_specs_by_type[:app] || [] + build_type = determine_build_type(root_spec, target_definition.build_type) + pod_variant = PodVariant.new(library_specs, test_specs, app_specs, target_definition.platform, build_type) + hash[root_spec] ||= {} + (hash[root_spec][pod_variant] ||= []) << target_definition + pod_variant_spec = hash[root_spec].keys.find { |k| k == pod_variant } + pod_variant_spec.test_specs.concat(test_specs).uniq! + pod_variant_spec.app_specs.concat(app_specs).uniq! + end + end + + # Remap pod variants to a new instance that includes the Swift version since we now have the full set + # of target definitions. + distinct_targets = Hash[distinct_targets.map do |root, target_definitions_by_variant| + variants = Hash[target_definitions_by_variant.map do |variant, target_definitions| + swift_version = determine_swift_version(variant.root_spec, target_definitions) + [variant.scoped_with_swift_version(swift_version), target_definitions] + end] + [root, variants] + end] + + pod_targets = distinct_targets.flat_map do |_root, target_definitions_by_variant| + target_definitions_by_variant.each_value do |target_definitions| + target_definitions.reject!(&:abstract?) unless target_definitions.all?(&:abstract?) + end + suffixes = PodVariantSet.new(target_definitions_by_variant.keys).scope_suffixes + target_definitions_by_variant.map do |variant, target_definitions| + all_specs = variant.specs + variant.test_specs + variant.app_specs + generate_pod_target(target_definitions, variant.build_type, target_inspections, all_specs, + :scope_suffix => suffixes[variant], :swift_version => variant.swift_version) + end + end + + all_specs = resolver_specs_by_target.values.flatten.map(&:spec).uniq.group_by(&:name) + compute_pod_target_dependencies(pod_targets, all_specs) + else + dedupe_cache = {} + resolver_specs_by_target.flat_map do |target_definition, specs| + grouped_specs = specs.group_by(&:root).values.uniq + pod_targets = grouped_specs.flat_map do |pod_specs| + build_type = determine_build_type(pod_specs.first.spec, target_definition.build_type) + swift_version = determine_swift_version(pod_specs.first.spec, [target_definition]) + generate_pod_target([target_definition], build_type, target_inspections, pod_specs.map(&:spec), + :swift_version => swift_version).scoped(dedupe_cache) + end + + compute_pod_target_dependencies(pod_targets, specs.map(&:spec).group_by(&:name)) + end + end + end + + # Compute the dependencies for the set of pod targets. + # + # @param [Array] pod_targets + # pod targets. + # + # @param [Hash{String => Array}] all_specs + # specifications grouped by name. + # + # @return [Array] + # + def compute_pod_target_dependencies(pod_targets, all_specs) + pod_targets_by_name = pod_targets.group_by(&:pod_name).each_with_object({}) do |(name, values), hash| + # Sort the target by the number of activated subspecs, so that + # we prefer a minimal target as transitive dependency. + hash[name] = values.sort_by { |pt| pt.specs.count } + end + + pod_targets.each do |target| + dependencies_by_config = dependencies_for_specs(target.library_specs, target.platform, all_specs) + target.dependent_targets_by_config = Hash[dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + + target.test_dependent_targets_by_spec_name_by_config = target.test_specs.each_with_object({}) do |test_spec, hash| + test_dependencies_by_config = dependencies_for_specs([test_spec], target.platform, all_specs) + test_dependencies_by_config.each { |config, deps| deps.delete_if { |k, _| dependencies_by_config[config].key? k } } + hash[test_spec.name] = Hash[test_dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + end + + target.app_dependent_targets_by_spec_name_by_config = target.app_specs.each_with_object({}) do |app_spec, hash| + app_dependencies_by_config = dependencies_for_specs([app_spec], target.platform, all_specs) + app_dependencies_by_config.each { |config, deps| deps.delete_if { |k, _| dependencies_by_config[config].key? k } } + hash[app_spec.name] = Hash[app_dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + end + + target.test_app_hosts_by_spec = target.test_specs.each_with_object({}) do |test_spec, hash| + next unless app_host_name = test_spec.consumer(target.platform).app_host_name + app_host_spec = pod_targets_by_name[Specification.root_name(app_host_name)].flat_map(&:app_specs).find do |pt| + pt.name == app_host_name + end + app_host_dependencies = { app_host_spec.root => [app_host_spec] } + hash[test_spec] = [app_host_spec, filter_dependencies(app_host_dependencies, pod_targets_by_name, target).first] + end + end + end + + def filter_dependencies(dependencies, pod_targets_by_name, target) + dependencies.map do |root_spec, deps| + pod_targets_by_name[root_spec.name].find do |t| + next false if t.platform.symbolic_name != target.platform.symbolic_name || + # In the case of variants we must ensure that the platform this target is meant for is the same + # as the one we are interested in. + t.target_definitions.first.platform != target.target_definitions.first.platform || + # rather than target type or requires_frameworks? since we want to group by what was specified in that + # _target definition_. + t.build_as_framework? != target.build_as_framework? + spec_names = t.specs.map(&:name) + deps.all? { |dep| spec_names.include?(dep.name) } + end + end + end + + # Returns the specs upon which the given specs _directly_ depend. + # + # @note: This is implemented in the analyzer, because we don't have to + # care about the requirements after dependency resolution. + # + # @param [Array] specs + # The specs, whose dependencies should be returned. + # + # @param [Platform] platform + # The platform for which the dependencies should be returned. + # + # @param [Hash{String => Array}] all_specs + # All specifications which are installed alongside. + # + # @return [Hash{Symbol => Set}] + # + def dependencies_for_specs(specs, platform, all_specs) + dependent_specs = { + :debug => Set.new, + :release => Set.new, + } + + if !specs.empty? && !all_specs.empty? + specs.each do |s| + s.dependencies(platform).each do |dep| + all_specs[dep.name].each do |spec| + if spec.non_library_specification? + if s.test_specification? && spec.name == s.consumer(platform).app_host_name && spec.app_specification? + # This needs to be handled separately, since we _don't_ want to treat this as a "normal" dependency + next + end + raise Informative, "`#{s}` depends upon `#{spec}`, which is a `#{spec.spec_type}` spec." + end + + dependent_specs.each do |config, set| + next unless s.dependency_whitelisted_for_configuration?(dep, config) + set << spec + end + end + end + end + end + + Hash[dependent_specs.map { |k, v| [k, (v - specs).group_by(&:root)] }].freeze + end + + # Create a target for each spec group + # + # @param [Array] target_definitions + # the target definitions of the pod target + # + # @param [BuildType] build_type + # the BuildType to use for this pod target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @param [Array] specs + # the specifications of an equal root. + # + # @param [String] scope_suffix + # @see PodTarget#scope_suffix + # + # @param [String] swift_version + # @see PodTarget#swift_version + # + # @return [PodTarget] + # + def generate_pod_target(target_definitions, build_type, target_inspections, specs, scope_suffix: nil, + swift_version: nil) + target_inspections = target_inspections.select { |t, _| target_definitions.include?(t) }.values + object_version = target_inspections.map { |ti| ti.project.object_version }.min + target_requires_64_bit = target_definitions.all? { |td| Analyzer.requires_64_bit_archs?(td.platform, object_version) } + if !target_inspections.empty? + user_build_configurations = target_inspections.map(&:build_configurations).reduce({}, &:merge) + archs = if target_requires_64_bit + ['$(ARCHS_STANDARD_64_BIT)'] + else + target_inspections.flat_map(&:archs).compact.uniq.sort + end + else + user_build_configurations = Target::DEFAULT_BUILD_CONFIGURATIONS.merge( + target_definitions.map { |td| td.build_configurations || {} }.reduce({}, &:merge), + ) + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : [] + end + platform = determine_platform(specs, target_definitions, build_type) + file_accessors = create_file_accessors(specs, platform) + PodTarget.new(sandbox, build_type, user_build_configurations, archs, platform, specs, target_definitions, + file_accessors, scope_suffix, swift_version) + end + + # Creates the file accessors for a given pod. + # + # @param [Array] specs + # the specs to map each file accessor to. + # + # @param [Platform] platform + # the platform to use when generating each file accessor. + # + # @return [Array] + # + def create_file_accessors(specs, platform) + name = specs.first.name + pod_root = sandbox.pod_dir(name) + path_list = Sandbox::PathList.new(pod_root) + specs.map do |spec| + Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) + end + end + + # Calculates and returns the platform to use for the given list specs and target definitions. + # + # @note The platform is only determined by all library specs and ignores non library ones. Subspecs are always + # integrated in the same target as the root spec therefore the max deployment target is always returned + # across the specs passed. + # + # @param [Array] specs + # the specs to inspect and calculate the platform for. + # + # @param [Array] target_definitions + # the target definitions these specs are part of. + # + # @param [BuildType] build_type + # the #BuildType used for calculating the platform. + # + # @return [Platform] + # + def determine_platform(specs, target_definitions, build_type) + library_specs = specs.select(&:library_specification?) + platform_name = target_definitions.first.platform.name + default = Podfile::TargetDefinition::PLATFORM_DEFAULTS[platform_name] + deployment_target = library_specs.map do |library_spec| + Version.new(library_spec.deployment_target(platform_name) || default) + end.max + if platform_name == :ios && build_type.framework? + minimum = Version.new('8.0') + deployment_target = [deployment_target, minimum].max + end + Platform.new(platform_name, deployment_target) + end + + # Determines the Swift version for the given spec within a list of target definitions. If the pod author has + # provided a set of Swift versions supported by their pod then the max Swift version is chosen, unless the target + # definitions specify explicit requirements for supported Swift versions. Otherwise the Swift version is derived + # by the target definitions that integrate this pod. + # + # @param [Specification] spec + # the specs to inspect and determine what Swift version to use. + # + # @param [Array] target_definitions + # the target definitions the spec is part of. + # + # @return [String, nil] the computed Swift version or `nil` if the Swift version could not be determined. + # + def determine_swift_version(spec, target_definitions) + if spec.swift_versions.empty? + target_definitions.map(&:swift_version).compact.uniq.first + else + spec.swift_versions.sort.reverse_each.find do |swift_version| + target_definitions.all? do |td| + td.supports_swift_version?(swift_version) + end + end.to_s + end + end + + # Calculates and returns the #BuildType to use for the given spec. If the spec specifies `static_framework` then + # it is honored as long as the host #BuildType also requires its pods to be integrated as frameworks. + # + # @param [Specification] spec + # the spec to determine the #BuildType for. + # + # @param [BuildType] target_definition_build_type + # The desired #BuildType by the target definition that integrates this target. If the pod target spec does + # not specify explicitly a `static_framework` #BuildType then the one from the target definition is used. + # + # @return [BuildType] + # + def determine_build_type(spec, target_definition_build_type) + if target_definition_build_type.framework? + root_spec = spec.root + root_spec.static_framework ? BuildType.static_framework : target_definition_build_type + else + BuildType.static_library + end + end + + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + # + # These dependencies are passed to the {Resolver}, unless the installer + # is in update mode, to prevent it from upgrading the Pods that weren't + # changed in the {Podfile}. + # + # @param [SpecState] podfile_state + # the state of the podfile for which dependencies have or have not changed, added, deleted or updated. + # + # @return [Molinillo::DependencyGraph] the dependencies + # generated by the lockfile that prevent the resolver to update + # a Pod. + # + def generate_version_locking_dependencies(podfile_state) + if update_mode == :all || !lockfile + LockingDependencyAnalyzer.unlocked_dependency_graph + else + deleted_and_changed = podfile_state.changed + podfile_state.deleted + deleted_and_changed += pods_to_update[:pods] if update_mode == :selected + local_pod_names = podfile_dependencies.select(&:local?).map(&:root_name) + pods_to_unlock = local_pod_names.to_set.delete_if do |pod_name| + next unless sandbox_specification = sandbox.specification(pod_name) + sandbox_specification.checksum == lockfile.checksum(pod_name) + end + LockingDependencyAnalyzer.generate_version_locking_dependencies(lockfile, deleted_and_changed, pods_to_unlock) + end + end + + # Fetches the podspecs of external sources if modifications to the + # sandbox are allowed. + # + # @note In update mode all the external sources are refreshed while in + # normal mode they are refreshed only if added or changed in the + # Podfile. Moreover, in normal specifications for unchanged Pods + # which are missing or are generated from an local source are + # fetched as well. + # + # @note It is possible to perform this step before the resolution + # process because external sources identify a single specific + # version (checkout). If the other dependencies are not + # compatible with the version reported by the podspec of the + # external source the resolver will raise. + # + # @param [SpecState] podfile_state + # the state of the podfile for which dependencies have or have not changed, added, deleted or updated. + # + # @return [void] + # + def fetch_external_sources(podfile_state) + verify_no_pods_with_different_sources! + deps = dependencies_to_fetch(podfile_state) + pods = pods_to_fetch(podfile_state) + return if deps.empty? + UI.section 'Fetching external sources' do + deps.sort.each do |dependency| + fetch_external_source(dependency, !pods.include?(dependency.root_name)) + end + end + end + + def verify_no_pods_with_different_sources! + deps_with_different_sources = podfile_dependencies.group_by(&:root_name). + select { |_root_name, dependencies| dependencies.map(&:external_source).uniq.count > 1 } + deps_with_different_sources.each do |root_name, dependencies| + raise Informative, 'There are multiple dependencies with different ' \ + "sources for `#{root_name}` in #{UI.path podfile.defined_in_file}:" \ + "\n\n- #{dependencies.map(&:to_s).join("\n- ")}" + end + end + + def fetch_external_source(dependency, use_lockfile_options) + source = if use_lockfile_options && lockfile && checkout_options = lockfile.checkout_options_for_pod_named(dependency.root_name) + ExternalSources.from_params(checkout_options, dependency, podfile.defined_in_file, installation_options.clean?) + else + ExternalSources.from_dependency(dependency, podfile.defined_in_file, installation_options.clean?) + end + source.fetch(sandbox) + end + + def dependencies_to_fetch(podfile_state) + @deps_to_fetch ||= begin + deps_to_fetch = [] + deps_with_external_source = podfile_dependencies.select(&:external_source) + + if update_mode == :all + deps_to_fetch = deps_with_external_source + else + deps_to_fetch = deps_with_external_source.select { |dep| pods_to_fetch(podfile_state).include?(dep.root_name) } + deps_to_fetch_if_needed = deps_with_external_source.select { |dep| podfile_state.unchanged.include?(dep.root_name) } + deps_to_fetch += deps_to_fetch_if_needed.select do |dep| + sandbox.specification_path(dep.root_name).nil? || + !dep.external_source[:path].nil? || + !sandbox.pod_dir(dep.root_name).directory? || + checkout_requires_update?(dep) + end + end + deps_to_fetch.uniq(&:root_name) + end + end + + def checkout_requires_update?(dependency) + return true unless lockfile && sandbox.manifest + locked_checkout_options = lockfile.checkout_options_for_pod_named(dependency.root_name) + sandbox_checkout_options = sandbox.manifest.checkout_options_for_pod_named(dependency.root_name) + locked_checkout_options != sandbox_checkout_options + end + + def pods_to_fetch(podfile_state) + @pods_to_fetch ||= begin + pods_to_fetch = podfile_state.added + podfile_state.changed + if update_mode == :selected + pods_to_fetch += pods_to_update[:pods] + elsif update_mode == :all + pods_to_fetch += podfile_state.unchanged + podfile_state.deleted + end + pods_to_fetch += podfile_dependencies. + select { |dep| Hash(dep.external_source).key?(:podspec) && sandbox.specification_path(dep.root_name).nil? }. + map(&:root_name) + pods_to_fetch + end + end + + def store_existing_checkout_options + return unless lockfile + podfile_dependencies.select(&:external_source).each do |dep| + if checkout_options = lockfile.checkout_options_for_pod_named(dep.root_name) + sandbox.store_checkout_source(dep.root_name, checkout_options) + end + end + end + + # Converts the Podfile in a list of specifications grouped by target. + # + # @note As some dependencies might have external sources the resolver + # is aware of the {Sandbox} and interacts with it to download the + # podspecs of the external sources. This is necessary because the + # resolver needs their specifications to analyze their + # dependencies. + # + # @note The specifications of the external sources which are added, + # modified or removed need to deleted from the sandbox before the + # resolution process. Otherwise the resolver might use an + # incorrect specification instead of pre-downloading it. + # + # @note In update mode the resolver is set to always update the specs + # from external sources. + # + # @return [Hash{TargetDefinition => Array}] the specifications + # grouped by target. + # + def resolve_dependencies(locked_dependencies) + duplicate_dependencies = podfile_dependencies.group_by(&:name). + select { |_name, dependencies| dependencies.count > 1 } + duplicate_dependencies.each do |name, dependencies| + UI.warn "There are duplicate dependencies on `#{name}` in #{UI.path podfile.defined_in_file}:\n\n" \ + "- #{dependencies.map(&:to_s).join("\n- ")}" + end + + resolver_specs_by_target = nil + UI.section "Resolving dependencies of #{UI.path(podfile.defined_in_file) || 'Podfile'}" do + resolver = Pod::Resolver.new(sandbox, podfile, locked_dependencies, sources, @specs_updated, :sources_manager => sources_manager) + resolver_specs_by_target = resolver.resolve + resolver_specs_by_target.values.flatten(1).map(&:spec).each(&:validate_cocoapods_version) + end + resolver_specs_by_target + end + + # Warns for any specification that is incompatible with its target. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + def validate_platforms(resolver_specs_by_target) + resolver_specs_by_target.each do |target, specs| + specs.map(&:spec).each do |spec| + next unless target_platform = target.platform + unless spec.available_platforms.any? { |p| target_platform.supports?(p) } + UI.warn "The platform of the target `#{target.name}` " \ + "(#{target.platform}) may not be compatible with `#{spec}` which has " \ + "a minimum requirement of #{spec.available_platforms.join(' - ')}." + end + end + end + end + + # Returns the list of all the resolved specifications. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + # @return [Array] the list of the specifications. + # + def generate_specifications(resolver_specs_by_target) + resolver_specs_by_target.values.flatten.map(&:spec).uniq + end + + # Computes the state of the sandbox respect to the resolved + # specifications. + # + # @return [SpecsState] the representation of the state of the manifest + # specifications. + # + def generate_sandbox_state(specifications) + sandbox_state = nil + UI.section 'Comparing resolved specification to the sandbox manifest' do + sandbox_analyzer = SandboxAnalyzer.new(sandbox, podfile, specifications, update_mode?) + sandbox_state = sandbox_analyzer.analyze + sandbox_state.print + end + sandbox_state + end + + class << self + # @param [Platform] platform + # The platform to build against + # + # @param [String, Nil] object_version + # The user project's object version, or nil if not available + # + # @return [Boolean] Whether the platform requires 64-bit architectures + # + def requires_64_bit_archs?(platform, object_version) + return false unless platform + case platform.name + when :osx + true + when :ios + if (version = object_version) + platform.deployment_target >= IOS_64_BIT_ONLY_VERSION && version.to_i < IOS_64_BIT_ONLY_PROJECT_VERSION + else + platform.deployment_target >= IOS_64_BIT_ONLY_VERSION + end + when :watchos + false + when :tvos + false + end + end + end + + #-----------------------------------------------------------------------# + + # @!group Analysis sub-steps + + # Checks whether the platform is specified if not integrating + # + # @return [void] + # + def verify_platforms_specified! + return if installation_options.integrate_targets? + @podfile_dependency_cache.target_definition_list.each do |target_definition| + if !target_definition.empty? && target_definition.platform.nil? + raise Informative, 'It is necessary to specify the platform in the Podfile if not integrating.' + end + end + end + + # Precompute information for each target_definition in the Podfile + # + # @note The platforms are computed and added to each target_definition + # because it might be necessary to infer the platform from the + # user targets. + # + # @return [Hash{TargetDefinition => TargetInspectionResult}] + # + def inspect_targets_to_integrate + inspection_result = {} + UI.section 'Inspecting targets to integrate' do + inspectors = @podfile_dependency_cache.target_definition_list.map do |target_definition| + next if target_definition.abstract? + TargetInspector.new(target_definition, config.installation_root) + end.compact + inspectors.group_by(&:compute_project_path).each do |project_path, target_inspectors| + project = Xcodeproj::Project.open(project_path) + target_inspectors.each do |inspector| + target_definition = inspector.target_definition + results = inspector.compute_results(project) + inspection_result[target_definition] = results + UI.message('Using `ARCHS` setting to build architectures of ' \ + "target `#{target_definition.label}`: (`#{results.archs.join('`, `')}`)") + end + end + end + inspection_result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/analysis_result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/analysis_result.rb new file mode 100644 index 0000000..7da9fd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/analysis_result.rb @@ -0,0 +1,87 @@ +module Pod + class Installer + class Analyzer + # A simple container produced after a analysis is completed by the {Analyzer}. + # + class AnalysisResult + # @return [SpecsState] the states of the Podfile specs. + # + attr_reader :podfile_state + + # @return [Hash{TargetDefinition => Array}] the specifications grouped by target. + # + attr_reader :specs_by_target + + # @return [Hash{Source => Array}] the specifications grouped by spec repo source. + # + attr_reader :specs_by_source + + # @return [Array] the specifications of the resolved version of Pods that should be installed. + # + attr_reader :specifications + + # @return [SpecsState] the states of the {Sandbox} respect the resolved specifications. + # + attr_reader :sandbox_state + + # @return [Array] The aggregate targets created for each {TargetDefinition} from the {Podfile}. + # + attr_reader :targets + + # @return [Array] The pod targets created for all the aggregate targets. + # + attr_reader :pod_targets + + # @return [PodfileDependencyCache] the cache of all dependencies in the podfile. + # + attr_reader :podfile_dependency_cache + + def initialize(podfile_state, specs_by_target, specs_by_source, specifications, sandbox_state, targets, pod_targets, + podfile_dependency_cache) + @podfile_state = podfile_state + @specs_by_target = specs_by_target + @specs_by_source = specs_by_source + @specifications = specifications + @sandbox_state = sandbox_state + @targets = targets + @pod_targets = pod_targets + @podfile_dependency_cache = podfile_dependency_cache + end + + # @return [Hash{String=>Symbol}] A hash representing all the user build + # configurations across all integration targets. Each key + # corresponds to the name of a configuration and its value to + # its type (`:debug` or `:release`). + # + def all_user_build_configurations + targets.reduce({}) do |result, target| + result.merge(target.user_build_configurations) + end + end + + # @return [Bool] Whether an installation should be performed or this + # CocoaPods project is already up to date. + # + def needs_install? + podfile_needs_install? || sandbox_needs_install? + end + + # @return [Bool] Whether the podfile has changes respect to the lockfile. + # + def podfile_needs_install? + state = podfile_state + needing_install = state.added.length + state.changed.length + state.deleted.length + needing_install > 0 + end + + # @return [Bool] Whether the sandbox is in synch with the lockfile. + # + def sandbox_needs_install? + state = sandbox_state + needing_install = state.added.length + state.changed.length + state.deleted.length + needing_install > 0 + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb new file mode 100644 index 0000000..7cd419d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb @@ -0,0 +1,103 @@ +require 'molinillo/dependency_graph' + +module Pod + class Installer + class Analyzer + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + module LockingDependencyAnalyzer + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + # + # These dependencies are passed to the {Resolver}, unless the installer + # is in update mode, to prevent it from upgrading the Pods that weren't + # changed in the {Podfile}. + # + # @param [Lockfile] lockfile the lockfile containing dependency constraints + # + # @param [Array] pods_to_update + # List of pod names which needs to be updated because installer is + # in update mode for these pods. Pods in this list and all their recursive dependencies + # will not be included in generated dependency graph + # + # @param [Array] pods_to_unlock + # List of pod names whose version constraints will be removed from the generated dependency graph. + # Recursive dependencies of the pods won't be affected. This is currently used to force local pods + # to be evaluated again whenever checksum of the specification of the local pods changes. + # + # @return [Molinillo::DependencyGraph] the dependencies + # generated by the lockfile that prevent the resolver to update + # a Pod. + # + def self.generate_version_locking_dependencies(lockfile, pods_to_update, pods_to_unlock = []) + dependency_graph = Molinillo::DependencyGraph.new + + if lockfile + added_dependency_strings = Set.new + + explicit_dependencies = lockfile.dependencies + explicit_dependencies.each do |dependency| + dependency_graph.add_vertex(dependency.name, dependency, true) + end + + pods = lockfile.to_hash['PODS'] || [] + pods.each do |pod| + add_to_dependency_graph(pod, [], dependency_graph, pods_to_unlock, added_dependency_strings) + end + + pods_to_update = pods_to_update.flat_map do |u| + root_name = Specification.root_name(u).downcase + dependency_graph.vertices.each_key.select { |n| Specification.root_name(n).downcase == root_name } + end + + pods_to_update.each do |u| + dependency_graph.detach_vertex_named(u) + end + + dependency_graph.each do |vertex| + next unless dep = vertex.payload + dep.podspec_repo ||= lockfile.spec_repo(dep.root_name) + end + end + + dependency_graph + end + + # Generates a completely 'unlocked' dependency graph. + # + # @return [Molinillo::DependencyGraph] an empty dependency + # graph + # + def self.unlocked_dependency_graph + Molinillo::DependencyGraph.new + end + + private + + def self.add_child_vertex_to_graph(dependency_string, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + return unless added_dependency_strings.add?(dependency_string) + dependency = Dependency.from_string(dependency_string) + if pods_to_unlock.include?(dependency.root_name) + dependency = Dependency.new(dependency.name) + end + vertex = dependency_graph.add_child_vertex(dependency.name, nil, parents, nil) + dependency = vertex.payload.merge(dependency) if vertex.payload + vertex.payload = dependency + dependency + end + + def self.add_to_dependency_graph(object, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + case object + when String + add_child_vertex_to_graph(object, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + when Hash + object.each do |key, value| + dependency = add_child_vertex_to_graph(key, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + value.each { |v| add_to_dependency_graph(v, [dependency.name], dependency_graph, pods_to_unlock, added_dependency_strings) } + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant.rb new file mode 100644 index 0000000..3ede165 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant.rb @@ -0,0 +1,87 @@ +module Pod + class Installer + class Analyzer + # Bundles the information needed to setup a {PodTarget}. + class PodVariant + # @return [Array] the spec and subspecs for the target + # + attr_reader :specs + + # @return [Array] the test specs for the target + # + attr_reader :test_specs + + # @return [Array] the app specs for the target + # + attr_reader :app_specs + + # @return [Platform] the platform + # + attr_reader :platform + + # @return [BuildType] the build type of the target + # + attr_reader :build_type + + # @return [String] the Swift version of the target. + # + attr_reader :swift_version + + # Initialize a new instance from its attributes. + # + # @param [Array] specs @see #specs + # @param [Array] test_specs @see #test_specs + # @param [Array] app_specs @see #app_specs + # @param [Platform] platform @see #platform + # @param [BuildType] build_type @see #build_type + # @param [String] swift_version @see #swift_version + # + def initialize(specs, test_specs, app_specs, platform, build_type = BuildType.static_library, + swift_version = nil) + @specs = specs + @test_specs = test_specs + @app_specs = app_specs + @platform = platform + @build_type = build_type + @swift_version = swift_version + @hash = [specs, platform, build_type, swift_version].hash + end + + # @return [Specification] the root specification + # + def root_spec + specs.first.root + end + + # @note Non library specs are intentionally not included as part of the equality for pod variants since a pod + # variant should not be affected by the number of test nor app specs included. + # + # @return [Bool] whether the {PodVariant} is equal to another taking all all their attributes into account + # + def ==(other) + self.class == other.class && + build_type == other.build_type && + swift_version == other.swift_version && + platform == other.platform && + specs == other.specs + end + alias_method :eql?, :== + + # Hashes the instance by all its attributes. + # + # This adds support to make instances usable as Hash keys. + # + # @!visibility private + attr_reader :hash + + # @param [String] swift_version The swift version to use for this variant. + # + # @return [PodVariant] A copy of this pod variant with the specified Swift version. + # + def scoped_with_swift_version(swift_version) + PodVariant.new(specs, test_specs, app_specs, platform, build_type, swift_version) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant_set.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant_set.rb new file mode 100644 index 0000000..c2f3ebd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/pod_variant_set.rb @@ -0,0 +1,175 @@ +require 'set' + +module Pod + class Installer + class Analyzer + # Collects all {PodVariant}. + class PodVariantSet + # @return [Array] the different variants within this set. + # + attr_reader :variants + + # Initialize a new instance. + # + # @param [Array] variants @see #variants + # + def initialize(variants) + @variants = variants + end + + # Describes what makes each {PodVariant} distinct among the others. + # + # @return [Hash] + # + def scope_suffixes + return { variants.first => nil } if variants.count == 1 + Hash[scope_by_specs.map do |variant, scope| + require 'digest' + scope = Digest::MD5.hexdigest(scope)[0..7] if !scope.nil? && scope.length >= 50 + [variant, scope] + end] + end + + # Groups the collection by result of the block. + # + # @param [Block] block + # @return [Array] + # + def group_by(&block) + variants.group_by(&block).map { |_, v| PodVariantSet.new(v) } + end + + # @private + # + # Prepends the given scoped {PodVariant}s with another scoping label, if there + # was more than one group of {PodVariant}s given. + # + # @param [Array>] scoped_variants + # {PodVariant}s, which where grouped on base of a criteria, which is used + # in the block argument to generate a descriptive label. + # + # @param [Block] block + # takes a {PodVariant} and returns a scope suffix which is prepended, if + # necessary. + # + # @return [Hash] + # + def scope_if_necessary(scoped_variants, &block) + if scoped_variants.count == 1 + return scoped_variants.first + end + Hash[scoped_variants.flat_map do |variants| + variants.map do |variant, suffix| + prefix = block.call(variant) + scope = [prefix, suffix].compact.join('-') + [variant, !scope.empty? ? scope : nil] + end + end] + end + + # @private + # @return [Hash] + # + def scope_by_build_type + scope_if_necessary(group_by { |v| v.build_type.packaging }.map(&:scope_by_linkage)) do |variant| + variant.build_type.packaging + end + end + + # @private + # @return [Hash] + # + def scope_by_linkage + scope_if_necessary(group_by { |v| v.build_type.linkage }.map(&:scope_by_platform)) do |variant| + variant.build_type.linkage + end + end + + # @private + # @return [Hash] + # + def scope_by_platform + grouped_variants = group_by { |v| v.platform.name } + if grouped_variants.all? { |set| set.variants.count == 1 } + # => Platform name + platform_name_proc = proc { |v| Platform.string_name(v.platform.symbolic_name).tr(' ', '') } + else + grouped_variants = group_by(&:platform) + # => Platform name + SDK version + platform_name_proc = proc { |v| v.platform.to_s.tr(' ', '') } + end + scope_if_necessary(grouped_variants.map(&:scope_by_swift_version), &platform_name_proc) + end + + # @private + # @return [Hash] + # + def scope_by_swift_version + scope_if_necessary(group_by(&:swift_version).map(&:scope_without_suffix)) do |variant| + variant.swift_version ? "Swift#{variant.swift_version}" : '' + end + end + + # @private + # @return [Hash] + # + def scope_by_specs + root_spec = variants.first.root_spec + specs = [root_spec] + specs += if root_spec.default_subspecs.empty? + root_spec.subspecs.compact + else + root_spec.default_subspecs.map do |subspec_name| + root_spec.subspec_by_name("#{root_spec.name}/#{subspec_name}") + end + end + default_specs = Set.new(specs) + grouped_variants = group_by(&:specs) + all_spec_variants = grouped_variants.map { |set| set.variants.first.specs } + common_specs = all_spec_variants.map(&:to_set).flatten.inject(&:&) + omit_common_specs = common_specs.any? && common_specs.proper_superset?(default_specs) + scope_if_necessary(grouped_variants.map(&:scope_by_build_type)) do |variant| + specs = variant.specs.to_set + + # The current variant contains all default specs + omit_default_specs = default_specs.any? && default_specs.subset?(specs) + if omit_default_specs + specs -= default_specs + end + + # There are common specs, which are different from the default specs + if omit_common_specs + specs -= common_specs + end + + spec_names = specs.map do |spec| + spec.root? ? '.root' : spec.name.split('/')[1..-1].join('_') + end.sort + if spec_names.empty? + omit_common_specs ? '.common' : nil + else + if omit_common_specs + spec_names.unshift('.common') + elsif omit_default_specs + spec_names.unshift('.default') + end + spec_names.reduce('') do |acc, name| + "#{acc}#{acc.empty? || name[0] == '.' ? '' : '-'}#{name}" + end + end + end + end + + # @private + # + # Helps to define scope suffixes recursively. + # + # @return [Hash] + # + def scope_without_suffix + Hash[variants.map { |v| [v, nil] }] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb new file mode 100644 index 0000000..095e3bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb @@ -0,0 +1,55 @@ +module Pod + class Installer + class Analyzer + # Caches podfile & target definition dependencies, so they do not need to be re-computed + # from the internal hash on each access + # + class PodfileDependencyCache + # @return [Array] + # All the dependencies in the podfile + # + attr_reader :podfile_dependencies + + def initialize(podfile_dependencies, dependencies_by_target_definition) + @podfile_dependencies = podfile_dependencies + @dependencies_by_target_definition = dependencies_by_target_definition + end + + # Returns the dependencies for the given target definition + # + def target_definition_dependencies(target_definition) + @dependencies_by_target_definition[target_definition] || + raise(ArgumentError, "dependencies for #{target_definition.inspect} do not exist in the cache") + end + + # Returns a list of all of the target definitions in the Podfile + # + def target_definition_list + @dependencies_by_target_definition.keys + end + + # Creates a {PodfileDependencyCache} from the given {Podfile} + # + # @param [Podfile] podfile + # The {Podfile} from which dependencies should be cached + # + # @return [PodfileDependencyCache] + # A warmed, immutable cache of all the dependencies in the {Podfile} + # + def self.from_podfile(podfile) + raise ArgumentError, 'Must be initialized with a podfile' unless podfile + podfile_dependencies = [] + dependencies_by_target_definition = {} + podfile.target_definition_list.each do |target_definition| + deps = target_definition.dependencies.freeze + podfile_dependencies.concat deps + dependencies_by_target_definition[target_definition] = deps + end + podfile_dependencies.uniq! + + new(podfile_dependencies.freeze, dependencies_by_target_definition.freeze) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb new file mode 100644 index 0000000..d1df698 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb @@ -0,0 +1,267 @@ +module Pod + class Installer + class Analyzer + # Analyze the sandbox to detect which Pods should be removed, and which + # ones should be reinstalled. + # + # The logic is the following: + # + # Added + # - If not present in the sandbox lockfile. + # - The directory of the Pod doesn't exits. + # + # Changed + # - The version of the Pod changed. + # - The SHA of the specification file changed. + # - The specific installed (sub)specs of the same Pod changed. + # - The specification is from an external source and the + # installation process is in update mode. + # - The directory of the Pod is empty. + # - The Pod has been pre-downloaded. + # + # Removed + # - If a specification is present in the lockfile but not in the resolved + # specs. + # + # Unchanged + # - If none of the above conditions match. + # + class SandboxAnalyzer + # @return [Sandbox] The sandbox to analyze. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile to analyze dependencies. + # + attr_reader :podfile + + # @return [Array] The specifications returned by the + # resolver. + # + attr_reader :specs + + # @return [Bool] Whether the installation is performed in update mode. + # + attr_reader :update_mode + + alias_method :update_mode?, :update_mode + + # Init a new SandboxAnalyzer + # + # @param [Sandbox] sandbox @see sandbox + # @param [Podfile] podfile @see podfile + # @param [Array] specs @see specs + # @param [Bool] update_mode @see update_mode + # + def initialize(sandbox, podfile, specs, update_mode) + @sandbox = sandbox + @podfile = podfile + @specs = specs + @update_mode = update_mode + end + + # Performs the analysis to the detect the state of the sandbox respect + # to the resolved specifications. + # + # @return [void] + # + def analyze + state = SpecsState.new + if sandbox_manifest + all_names = (resolved_pods + sandbox_pods).uniq.sort + all_names.sort.each do |name| + state.add_name(name, pod_state(name)) + end + else + state.added.merge(resolved_pods) + end + state + end + + #---------------------------------------------------------------------# + + private + + # @!group Pod state + + # Returns the state of the Pod with the given name. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Symbol] The state + # + def pod_state(pod) + return :added if pod_added?(pod) + return :deleted if pod_deleted?(pod) + return :changed if pod_changed?(pod) + :unchanged + end + + # Returns whether the Pod with the given name should be installed. + # + # @note A Pod whose folder doesn't exists is considered added. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Bool] Whether the Pod is added. + # + def pod_added?(pod) + return true if resolved_pods.include?(pod) && !sandbox_pods.include?(pod) + return true if !sandbox.local?(pod) && !folder_exist?(pod) + false + end + + # Returns whether the Pod with the given name should be removed from + # the installation. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Bool] Whether the Pod is deleted. + # + def pod_deleted?(pod) + return true if !resolved_pods.include?(pod) && sandbox_pods.include?(pod) + false + end + + # Returns whether the Pod with the given name should be considered + # changed and thus should be reinstalled. + # + # @note In update mode, as there is no way to know if a remote source + # hash changed the Pods from external + # sources are always marked as changed. + # + # @note A Pod whose folder is empty is considered changed. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Bool] Whether the Pod is changed. + # + def pod_changed?(pod) + spec = root_spec(pod) + return true if spec.version != sandbox_version(pod) + return true if spec.checksum != sandbox_checksum(pod) + return true if resolved_spec_names(pod) != sandbox_spec_names(pod) + return true if podfile_dependency(pod) != sandbox_dependency(pod) + return true if sandbox.predownloaded?(pod) + return true if folder_empty?(pod) + false + end + + #---------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Lockfile] The manifest to use for the sandbox. + # + def sandbox_manifest + sandbox.manifest + end + + #--------------------------------------# + + # @return [Array] The name of the resolved Pods. + # + def resolved_pods + @resolved_pods ||= specs.map { |spec| spec.root.name }.uniq + end + + # @return [Array] The name of the Pods stored in the sandbox + # manifest. + # + def sandbox_pods + @sandbox_pods ||= sandbox_manifest.pod_names.map { |name| Specification.root_name(name) }.uniq + end + + # @return [Array] The name of the resolved specifications + # (includes subspecs). + # + # @param [String] pod + # the name of the Pod. + # + def resolved_spec_names(pod) + specs.select { |s| s.root.name == pod }.map(&:name).uniq.sort + end + + # @return [Array] The name of the specifications stored in the + # sandbox manifest (includes subspecs). + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_spec_names(pod) + sandbox_manifest.pod_names.select { |name| Specification.root_name(name) == pod }.uniq.sort + end + + # @return [Specification] The root specification for the Pod with the + # given name. + # + # @param [String] pod + # the name of the Pod. + # + def root_spec(pod) + specs.find { |s| s.root.name == pod }.root + end + + #--------------------------------------# + + # @return [Version] The version of Pod with the given name stored in + # the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_version(pod) + sandbox_manifest.version(pod) + end + + # @return [String] The checksum of the specification of the Pod with + # the given name stored in the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_checksum(pod) + sandbox_manifest.checksum(pod) + end + + # @return [Dependency, nil] The dependency with the given name stored in the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_dependency(pod) + sandbox_manifest.dependencies.find { |d| d.name == pod } + end + + #--------------------------------------# + + # @return [Dependency, nil] The dependency with the given name from the podfile. + # + # @param [String] pod + # the name of the Pod. + # + def podfile_dependency(pod) + podfile.dependencies.find { |d| d.name == pod } + end + + #--------------------------------------# + + def folder_exist?(pod) + sandbox.pod_dir(pod).exist? + end + + def folder_empty?(pod) + Dir.glob(sandbox.pod_dir(pod) + '*').empty? + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/specs_state.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/specs_state.rb new file mode 100644 index 0000000..b632d6b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/specs_state.rb @@ -0,0 +1,108 @@ +require 'set' + +module Pod + class Installer + class Analyzer + # This class represents the state of a collection of Pods. + # + # @note The names of the pods stored by this class are always the **root** + # name of the specification. + # + # @note The motivation for this class is to ensure that the names of the + # subspecs are added instead of the name of the Pods. + # + class SpecsState + # @return [Set] the names of the pods that were added. + # + attr_reader :added + + # @return [Set] the names of the pods that were changed. + # + attr_reader :changed + + # @return [Set] the names of the pods that were deleted. + # + attr_reader :deleted + + # @return [Set] the names of the pods that were unchanged. + # + attr_reader :unchanged + + # Initialize a new instance + # + # @param [Hash{Symbol=>String}] pods_by_state + # The name of the pods grouped by their state + # (`:added`, `:removed`, `:changed` or `:unchanged`). + # + def initialize(pods_by_state = nil) + @added = Set.new + @deleted = Set.new + @changed = Set.new + @unchanged = Set.new + + if pods_by_state + { + :added => :added, + :changed => :changed, + :removed => :deleted, + :unchanged => :unchanged, + }.each do |state, spec_state| + Array(pods_by_state[state]).each do |name| + add_name(name, spec_state) + end + end + end + end + + # Displays the state of each pod. + # + # @return [void] + # + def print + states = %i(added deleted changed unchanged) + lines(states).each do |line| + UI.message(line, '', 2) + end + end + + def to_s(states: %i(added deleted changed unchanged)) + lines(states).join("\n") + end + + # Adds the name of a Pod to the give state. + # + # @param [String] name + # the name of the Pod. + # + # @param [Symbol] state + # the state of the Pod. + # + # @return [void] + # + def add_name(name, state) + send(state) << Specification.root_name(name) + end + + private + + # @return [Array] A description of changes for the given states, + # one per line + # + def lines(states) + prefixes = { + :added => 'A'.green, + :deleted => 'R'.red, + :changed => 'M'.yellow, + :unchanged => '-', + } + + states.flat_map do |state| + send(state).sort.map do |pod| + prefixes[state.to_sym] + " #{pod}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspection_result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspection_result.rb new file mode 100644 index 0000000..21c72d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspection_result.rb @@ -0,0 +1,58 @@ +module Pod + class Installer + class Analyzer + class TargetInspectionResult + # @return [TargetDefinition] the target definition, whose project was + # inspected + # + attr_reader :target_definition + + # @return [Xcodeproj::Project] the user's Xcode project + # + attr_reader :project + + # @return [Array] the uuid of the user's targets + # + attr_reader :project_target_uuids + + # @return [Hash{String=>Symbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or + # `:release`). + # + attr_reader :build_configurations + + # @return [Platform] the platform of the user targets + # + attr_reader :platform + + # @return [Array] the architectures used by user's targets + # + attr_reader :archs + + # @return [Pathname] the path to the root of the project containing the user target + # + attr_reader :client_root + + # Initialize a new instance + # + # @param [TargetDefinition] target_definition @see #target_definition + # @param [Xcodeproj::Project] project @see #project + # @param [Array] project_target_uuids @see #project_target_uuids + # @param [Hash{String=>Symbol}] build_configurations @see #build_configurations + # @param [Platform] platform @see #platform + # @param [Array] archs @see #archs + # + def initialize(target_definition, project, project_target_uuids, build_configurations, platform, archs) + @target_definition = target_definition + @project = project + @project_target_uuids = project_target_uuids + @build_configurations = build_configurations + @platform = platform + @archs = archs + @client_root = Pathname.new(project.project_dir + project.root_object.project_dir_path).realpath + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspector.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspector.rb new file mode 100644 index 0000000..717bd44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/analyzer/target_inspector.rb @@ -0,0 +1,258 @@ +require 'active_support/core_ext/array/conversions' + +module Pod + class Installer + class Analyzer + class TargetInspector + PLATFORM_INFO_URL = 'https://guides.cocoapods.org/syntax/podfile.html#platform'.freeze + + # @return [TargetDefinition] the target definition to inspect + # + attr_reader :target_definition + + # @return [Pathname] the root of the CocoaPods installation where the + # Podfile is located + # + attr_reader :installation_root + + # Initialize a new instance + # + # @param [TargetDefinition] target_definition + # @see #target_definition + # + # @param [Pathname] installation_root + # @see #installation_root + # + def initialize(target_definition, installation_root) + @target_definition = target_definition + @installation_root = installation_root + end + + # Inspect the #target_definition + # + # @raise If no `user_project` is set + # + # @return [TargetInspectionResult] the result of the inspection of the target definition within the user project + # + def compute_results(user_project) + raise ArgumentError, 'Cannot compute results without a user project set' unless user_project + + targets = compute_targets(user_project) + project_target_uuids = targets.map(&:uuid) + build_configurations = compute_build_configurations(targets) + platform = compute_platform(targets) + archs = compute_archs(targets) + swift_version = compute_swift_version_from_targets(targets) + + result = TargetInspectionResult.new(target_definition, user_project, project_target_uuids, + build_configurations, platform, archs) + result.target_definition.swift_version = swift_version + result + end + + # Returns the path of the user project that the #target_definition + # should integrate. + # + # @raise If the project is implicit and there are multiple projects. + # + # @raise If the path doesn't exits. + # + # @return [Pathname] the path of the user project. + # + def compute_project_path + if target_definition.user_project_path + path = installation_root + target_definition.user_project_path + path = "#{path}.xcodeproj" unless File.extname(path) == '.xcodeproj' + path = Pathname.new(path) + unless path.exist? + raise Informative, 'Unable to find the Xcode project ' \ + "`#{path}` for the target `#{target_definition.label}`." + end + else + xcodeprojs = installation_root.children.select { |e| e.fnmatch('*.xcodeproj') } + if xcodeprojs.size == 1 + path = xcodeprojs.first + else + raise Informative, 'Could not automatically select an Xcode project. ' \ + "Specify one in your Podfile like so:\n\n" \ + " project 'path/to/Project.xcodeproj'\n" + end + end + path + end + + #-----------------------------------------------------------------------# + + private + + # Returns a list of the targets from the project of #target_definition + # that needs to be integrated. + # + # @note The method first looks if there is a target specified with + # the `link_with` option of the {TargetDefinition}. Otherwise + # it looks for the target that has the same name of the target + # definition. Finally if no target was found the first + # encountered target is returned (it is assumed to be the one + # to integrate in simple projects). + # + # @param [Xcodeproj::Project] user_project + # the user project + # + # @return [Array] + # + def compute_targets(user_project) + native_targets = user_project.native_targets + target = native_targets.find { |t| t.name == target_definition.name.to_s } + unless target + found = native_targets.map { |t| "`#{t.name}`" }.to_sentence + raise Informative, "Unable to find a target named `#{target_definition.name}` in project `#{Pathname(user_project.path).basename}`, did find #{found}." + end + [target] + end + + # @param [ArraySymbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or `:release`). + # + def compute_build_configurations(user_targets) + if user_targets + user_targets.flat_map { |t| t.build_configurations.map(&:name) }.each_with_object({}) do |name, hash| + hash[name] = name == 'Debug' ? :debug : :release + end.merge(target_definition.build_configurations || {}) + else + target_definition.build_configurations || {} + end + end + + # @param [Array Version.new(target.deployment_target) + deployment_target = Version.new(target.deployment_target) + end + end + + unless name + raise Informative, + "Unable to determine the platform for the `#{target_definition.name}` target." + end + + UI.warn "Automatically assigning platform `#{Platform.string_name(name)}` with version `#{deployment_target}` " \ + "on target `#{target_definition.name}` because no platform was specified. " \ + "Please specify a platform for this target in your Podfile. See `#{PLATFORM_INFO_URL}`." + + target_definition.set_platform(name, deployment_target) + Platform.new(name, deployment_target) + end + + # Computes the architectures relevant for the user's targets. + # + # @param [Array] + # + def compute_archs(user_targets) + user_targets.flat_map do |target| + Array(target.common_resolved_build_setting('ARCHS')) + end.compact.uniq.sort + end + + # Checks if any of the targets for the {TargetDefinition} computed before + # by #compute_user_project_targets is recommended to be build as a framework + # due the presence of Swift source code in any of the source build phases. + # + # @param [TargetDefinition] target_definition + # the target definition + # + # @param [Array] native_targets + # the targets which are checked for presence of Swift source code + # + # @return [Boolean] Whether the user project targets to integrate into + # uses Swift + # + def compute_recommends_frameworks(target_definition, native_targets) + file_predicate = nil + file_predicate = proc do |file_ref| + if file_ref.respond_to?(:last_known_file_type) + file_ref.last_known_file_type == 'sourcecode.swift' + elsif file_ref.respond_to?(:files) + file_ref.files.any?(&file_predicate) + else + false + end + end + target_definition.platform.supports_dynamic_frameworks? || native_targets.any? do |target| + target.source_build_phase.files.any? do |build_file| + file_predicate.call(build_file.file_ref) + end + end + end + + # Compute the Swift version for the target build configurations. If more + # than one Swift version is defined for a given target, then it will raise. + # + # @param [Array] targets + # the targets that are checked for Swift versions. + # + # @return [String] the targets Swift version or nil + # + def compute_swift_version_from_targets(targets) + versions_to_targets = targets.inject({}) do |memo, target| + # User project may have an xcconfig that specifies the `SWIFT_VERSION`. + # Xcodeproj handles that xcconfig either not being set or the file not being present on disk. + # After the first integration the xcconfig set is most probably + # the one that was generated from CocoaPods. See https://github.com/CocoaPods/CocoaPods/issues/7731 for + # more details. + versions = target.resolved_build_setting('SWIFT_VERSION', true).values + versions.each do |version| + memo[version] = [] if memo[version].nil? + memo[version] << target.name unless memo[version].include? target.name + end + memo + end + + case versions_to_targets.count + when 0 + nil + when 1 + versions_to_targets.keys.first + else + target_version_pairs = versions_to_targets.map do |version_names, target_names| + target_names.map { |target_name| [target_name, version_names] } + end + + sorted_pairs = target_version_pairs.flat_map { |i| i }.sort_by do |target_name, version_name| + "#{target_name} #{version_name}" + end + + formatted_output = sorted_pairs.map do |target, version_name| + "#{target}: Swift #{version_name}" + end.join("\n") + + raise Informative, "There may only be up to 1 unique SWIFT_VERSION per target. Found target(s) with multiple Swift versions:\n#{formatted_output}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/base_install_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/base_install_hooks_context.rb new file mode 100644 index 0000000..2e2dbd5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/base_install_hooks_context.rb @@ -0,0 +1,135 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class BaseInstallHooksContext + # @return [Sandbox] The Sandbox for the project. + # + attr_reader :sandbox + + # @return [String] The path to the sandbox root (`Pods` directory). + # + attr_reader :sandbox_root + + # @return [Xcodeproj::Project] The Pods Xcode project. + # + attr_reader :pods_project + + # @return [Array] The list of + # the CocoaPods umbrella targets generated by the installer. + # + attr_reader :umbrella_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox see #sandbox + # @param [String] sandbox_root see #sandbox_root + # @param [Xcodeproj::Project] pods_project see #pods_project + # @param [Array] umbrella_targets see #umbrella_targets + # + def initialize(sandbox, sandbox_root, pods_project, umbrella_targets) + @sandbox = sandbox + @sandbox_root = sandbox_root + @pods_project = pods_project + @umbrella_targets = umbrella_targets + end + + # @return [PostInstallHooksContext] Convenience class generator method + # + # @param [Sandbox] sandbox + # The sandbox + # + # @param [Project] pods_project + # The pods project. + # + # @param [Array] aggregate_targets + # The aggregate targets, which will been presented by an adequate + # {UmbrellaTargetDescription} in the generated context. + # + # @return [HooksContext] Convenience class method to generate the + # static context. + # + def self.generate(sandbox, pods_project, aggregate_targets) + umbrella_targets_descriptions = aggregate_targets.map do |umbrella| + user_project = umbrella.user_project + user_targets = umbrella.user_targets + specs = umbrella.specs + platform_name = umbrella.platform.name + platform_deployment_target = umbrella.platform.deployment_target.to_s + cocoapods_target_label = umbrella.label + UmbrellaTargetDescription.new(user_project, user_targets, specs, platform_name, platform_deployment_target, cocoapods_target_label) + end + + new(sandbox, sandbox.root.to_s, pods_project, umbrella_targets_descriptions) + end + + # Pure data class which describes an umbrella target. + # + class UmbrellaTargetDescription + # @return [Xcodeproj::Project] The user project into which this target + # is integrated. + # + attr_reader :user_project + + # @return [Array] + # The list of user targets integrated by this umbrella target. + # + attr_reader :user_targets + + # @return [Array] The list of the + # specifications of the target. + # + attr_reader :specs + + # @return [Symbol] The platform (either `:ios`, `:watchos`, `:tvos`, or `:osx`). + # + attr_reader :platform_name + + # @return [String] The deployment target. + # + attr_reader :platform_deployment_target + + # @return [String] The label for the target. + # + attr_reader :cocoapods_target_label + + # Initialize a new instance + # + # @param [Xcodeproj::Project] user_project see #user_project + # @param [Array] user_targets see #user_targets + # @param [Array] specs see #specs + # @param [Symbol] platform_name see #platform_name + # @param [String] platform_deployment_target see #platform_deployment_target + # @param [String] cocoapods_target_label see #cocoapods_target_label + # + def initialize(user_project, user_targets, specs, platform_name, platform_deployment_target, cocoapods_target_label) + @user_project = user_project + @user_targets = user_targets + @specs = specs + @platform_name = platform_name + @platform_deployment_target = platform_deployment_target + @cocoapods_target_label = cocoapods_target_label + end + + # @return [String] The path of the user project + # integrated by this target. + # + def user_project_path + user_project.path if user_project + end + + # @return [Array] The list of the UUIDs of the + # user targets integrated by this umbrella + # target. They can be used to find the + # targets opening the project They can be used + # to find the targets opening the project with + # Xcodeproj. + # + def user_target_uuids + user_targets.map(&:uuid) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/installation_options.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/installation_options.rb new file mode 100644 index 0000000..dd22909 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/installation_options.rb @@ -0,0 +1,195 @@ +require 'active_support/hash_with_indifferent_access' + +module Pod + class Installer + # Represents the installation options the user can customize via a + # `Podfile`. + # + class InstallationOptions + # Parses installation options from a podfile. + # + # @param [Podfile] podfile the podfile to parse installation options + # from. + # + # @raise [Informative] if `podfile` does not specify a `CocoaPods` + # install. + # + # @return [Self] + # + def self.from_podfile(podfile) + name, options = podfile.installation_method + unless name.downcase == 'cocoapods' + raise Informative, "Currently need to specify a `cocoapods` install, you chose `#{name}`." + end + new(options) + end + + # Defines a new installation option. + # + # @param [#to_s] name the name of the option. + # + # @param default the default value for the option. + # + # @param [Boolean] boolean whether the option has a boolean value. + # + # @return [void] + # + # @!macro [attach] option + # + # @note this option defaults to $2. + # + # @return [Boolean] the $1 $0 for installation. + # + def self.option(name, default, boolean: true) + name = name.to_s + raise ArgumentError, "The `#{name}` option is already defined" if defaults.key?(name) + defaults[name] = default + attr_accessor name + alias_method "#{name}?", name if boolean + end + + # @return [Hash] all known installation options and their + # default values. + # + def self.defaults + @defaults ||= {} + end + + # @return [Array] the names of all known installation options. + # + def self.all_options + defaults.keys + end + + # Initializes the installation options with a hash of options from a + # Podfile. + # + # @param [Hash] options the options to parse. + # + # @raise [Informative] if `options` contains any unknown keys. + # + def initialize(options = {}) + options = ActiveSupport::HashWithIndifferentAccess.new(options) + unknown_keys = options.keys - self.class.all_options.map(&:to_s) + raise Informative, "Unknown installation options: #{unknown_keys.to_sentence}." unless unknown_keys.empty? + self.class.defaults.each do |key, default| + value = options.fetch(key, default) + send("#{key}=", value) + end + end + + # @param [Boolean] include_defaults whether values that match the default + # for their option should be included. Defaults to `true`. + # + # @return [Hash] the options, keyed by option name. + # + def to_h(include_defaults: true) + self.class.defaults.reduce(ActiveSupport::HashWithIndifferentAccess.new) do |hash, (option, default)| + value = send(option) + hash[option] = value if include_defaults || value != default + hash + end + end + + def ==(other) + other.is_a?(self.class) && to_h == other.to_h + end + + alias_method :eql, :== + + def hash + to_h.hash + end + + # Whether to clean the sources of the pods during installation + # + # Cleaning removes any files not used by the pod as specified by the podspec and the platforms + # that the project supports + # + # @see {PodSourceInstaller#clean!} + # + option :clean, true + + # Whether to deduplicate pod targets + # + # Target deduplication adds suffixes to pod targets for the cases where a pod is included + # in multiple targets that have different requirements. For example, a pod named 'MyPod' with a subspec 'SubA' + # that is included in two targets as follows: + # + # target 'MyTargetA' do + # pod 'MyPod/SubA' + # end + # + # target 'MyTargetB' do + # pod 'MyPod' + # end + # + # will result in two Pod targets: `MyPod` and `MyPod-SubA` + # + option :deduplicate_targets, true + + # Whether to generate deterministic UUIDs when creating the Pods project + # + # @see {Xcodeproj#generate_uuid} + # + option :deterministic_uuids, true + + # Whether to integrate the installed pods into the user project + # + # If set to false, Pods will be downloaded and installed to the `Pods/` directory + # but they will not be integrated into your project. + # + option :integrate_targets, true + + # Whether to lock the source files of pods. Xcode will prompt to unlock the files when attempting to modify + # their contents + # + # @note There is a performance penalty to locking the pods during installation. If this is significantly + # impacting the duration of `pod install` for your project, you can try setting this to `false` + # + option :lock_pod_sources, true + + # Whether to emit a warning when multiple sources contain a Pod with the same name and version + # + option :warn_for_multiple_pod_sources, true + + # Whether to emit a warning if a project is not explicitly specifying the git based master specs repo and can + # instead use CDN which is the default. + # + option :warn_for_unused_master_specs_repo, true + + # Whether to share Xcode schemes for development pods. + # + # Schemes for development pods are created automatically but are not shared by default. + # + option :share_schemes_for_development_pods, false + + # Whether to disable the input & output paths of the CocoaPods script phases (Copy Frameworks & Copy Resources) + # + # @see https://github.com/CocoaPods/CocoaPods/issues/8073 + # + option :disable_input_output_paths, false + + # Whether to preserve the file structure of all Pods, including externally sourced pods. + # + # By default, the file structure of Pod sources is preserved only for development pods. Setting + # `:preserve_pod_file_structure` to `true` will _always_ preserve the file structure. + # + option :preserve_pod_file_structure, false + + # Whether to generate a project per pod target. Instead of creating 1 `Pods.xcodeproj`, this option will generate + # a project for every pod target that will be nested under the `Pods.xcodeproj`. + # + option :generate_multiple_pod_projects, false + + # Whether to enable only regenerating targets and their associate projects that have changed + # since the previous installation. + # + option :incremental_installation, false + + # Whether to skip generating the `Pods.xcodeproj` and perform only dependency resolution and downloading. + # + option :skip_pods_project_generation, false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_installer.rb new file mode 100644 index 0000000..d750a76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_installer.rb @@ -0,0 +1,224 @@ +require 'active_support/core_ext/string/strip' + +module Pod + class Installer + # Controller class responsible of installing the activated specifications + # of a single Pod. + # + # @note This class needs to consider all the activated specs of a Pod. + # + class PodSourceInstaller + UNENCRYPTED_PROTOCOLS = %w(http git).freeze + + # @return [Sandbox] The installation target. + # + attr_reader :sandbox + + # @return [Podfile] the podfile that should be integrated with the user + # projects. + # + attr_reader :podfile + + # @return [Hash{Symbol=>Array}] The specifications that need to be + # installed grouped by platform. + # + attr_reader :specs_by_platform + + # @return [Boolean] Whether the installer is allowed to touch the cache. + # + attr_reader :can_cache + alias_method :can_cache?, :can_cache + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Hash{Symbol=>Array}] specs_by_platform @see #specs_by_platform + # @param [Boolean] can_cache @see #can_cache + # + def initialize(sandbox, podfile, specs_by_platform, can_cache: true) + @sandbox = sandbox + @podfile = podfile + @specs_by_platform = specs_by_platform + @can_cache = can_cache + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<#{self.class} sandbox=#{sandbox.root} pod=#{root_spec.name}" + end + + # @return [String] The name of the pod this installer is installing. + # + def name + root_spec.name + end + + #-----------------------------------------------------------------------# + + public + + # @!group Installation + + # Creates the target in the Pods project and the relative support files. + # + # @return [void] + # + def install! + download_source unless predownloaded? || local? + PodSourcePreparer.new(root_spec, root).prepare! if local? + sandbox.remove_local_podspec(name) unless predownloaded? || local? || external? + end + + # Cleans the installations if appropriate. + # + # Cleaning the installation will remove any files that are not used during the build process, based on + # the podspec and platforms of the target that the pod is integrated into. + # + # @see {#clean_installation} + # + # @return [void] + # + def clean! + clean_installation unless local? + end + + # Locks the source files if appropriate. + # + # @return [void] + # + def lock_files!(file_accessors) + return if local? + unlocked_files = source_files(file_accessors).reject { |f| (File.stat(f).mode & 0o200).zero? } + FileUtils.chmod('u-w', unlocked_files) + end + + # Unlocks the source files if appropriate. + # + # @return [void] + # + def unlock_files!(file_accessors) + return if local? + FileUtils.chmod('u+w', source_files(file_accessors)) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Installation Steps + + # Downloads the source of the Pod. + # + # @return [void] + # + def download_source + verify_source_is_secure(root_spec) + download_result = Downloader.download(download_request, root, :can_cache => can_cache?) + + if (specific_source = download_result.checkout_options) && specific_source != root_spec.source + sandbox.store_checkout_source(root_spec.name, specific_source) + end + end + + # Verify the source of the spec is secure, which is used to show a warning to the user if that isn't the case + # This method doesn't verify all protocols, but currently only prohibits unencrypted 'http://' and 'git://'' + # connections. + # + # @return [void] + # + def verify_source_is_secure(root_spec) + return if root_spec.source.nil? || (root_spec.source[:http].nil? && root_spec.source[:git].nil?) + source = if !root_spec.source[:http].nil? + URI(root_spec.source[:http].to_s) + elsif !root_spec.source[:git].nil? + git_source = root_spec.source[:git].to_s + return unless git_source =~ /^#{URI.regexp}$/ + URI(git_source) + end + if UNENCRYPTED_PROTOCOLS.include?(source.scheme) && source.host != 'localhost' + UI.warn "'#{root_spec.name}' uses the unencrypted '#{source.scheme}' protocol to transfer the Pod. " \ + 'Please be sure you\'re in a safe network with only trusted hosts. ' \ + 'Otherwise, please reach out to the library author to notify them of this security issue.' + end + end + + def download_request + Downloader::Request.new( + :spec => root_spec, + :released => released?, + ) + end + + #-----------------------------------------------------------------------# + + private + + # Removes all the files not needed for the installation according to the + # specs by platform. + # + # @return [void] + # + def clean_installation + cleaner = Sandbox::PodDirCleaner.new(root, specs_by_platform) + cleaner.clean! + end + + # @!group Convenience methods. + + # @return [Array] the specification of the Pod used in + # this installation. + # + def specs + specs_by_platform.values.flatten + end + + # @return [Specification] the root specification of the Pod. + # + def root_spec + specs.first.root + end + + # @return [Pathname] the folder where the source of the Pod is located. + # + def root + sandbox.pod_dir(root_spec.name) + end + + # @return [Boolean] whether the source has been pre downloaded in the + # resolution process to retrieve its podspec. + # + def predownloaded? + sandbox.predownloaded_pods.include?(root_spec.name) + end + + # @return [Boolean] whether the pod uses the local option and thus + # CocoaPods should not interfere with the files of the user. + # + def local? + sandbox.local?(root_spec.name) + end + + # @return [Boolean] whether the pod uses an external source (e.g. :podspec) in the + # resolution process to retrieve its podspec. + # + def external? + @dependencies ||= podfile.dependencies.select(&:external?).map(&:name) + @dependencies.include?(root_spec.name) + end + + def released? + !local? && !predownloaded? && sandbox.specification(root_spec.name) != root_spec + end + + # @return [Array] The paths of the source files + # + def source_files(file_accessors) + file_accessors.flat_map(&:source_files) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_preparer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_preparer.rb new file mode 100644 index 0000000..c97d645 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pod_source_preparer.rb @@ -0,0 +1,77 @@ +module Pod + class Installer + # Controller class responsible of executing the prepare command + # of a single Pod. + # + class PodSourcePreparer + # @return [Specification] the root specification of the Pod. + # + attr_reader :spec + + # @return [Pathname] the folder where the source of the Pod is located. + # + attr_reader :path + + # Initialize a new instance + # + # @param [Specification] spec the root specification of the Pod. + # @param [Pathname] path the folder where the source of the Pod is located. + # + def initialize(spec, path) + raise "Given spec isn't a root spec, but must be." unless spec.root? + @spec = spec + @path = path + end + + #-----------------------------------------------------------------------# + + public + + # @!group Preparation + + # Executes the prepare command if there is one. + # + # @return [void] + # + def prepare! + run_prepare_command + end + + #-----------------------------------------------------------------------# + + private + + # @!group Preparation Steps + + extend Executable + executable :bash + + # Runs the prepare command bash script of the spec. + # + # @note Unsets the `CDPATH` env variable before running the + # shell script to avoid issues with relative paths + # (issue #1694). + # + # @return [void] + # + def run_prepare_command + return unless spec.prepare_command + UI.section(' > Running prepare command', '', 1) do + Dir.chdir(path) do + begin + ENV.delete('CDPATH') + ENV['COCOAPODS_VERSION'] = Pod::VERSION + prepare_command = spec.prepare_command.strip_heredoc.chomp + full_command = "\nset -e\n" + prepare_command + bash!('-c', full_command) + ensure + ENV.delete('COCOAPODS_VERSION') + end + end + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/podfile_validator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/podfile_validator.rb new file mode 100644 index 0000000..e324c7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/podfile_validator.rb @@ -0,0 +1,168 @@ +module Pod + class Installer + # Validate the podfile before installing to catch errors and + # problems + # + class PodfileValidator + # @return [Podfile] The podfile being validated + # + attr_reader :podfile + + # @return [Array] any errors that have occurred during the validation + # + attr_reader :errors + + # @return [Array] any warnings that have occurred during the validation + # + attr_reader :warnings + + # Initialize a new instance + # + # @param [Podfile] podfile + # The podfile to validate + # + # @param [Analyzer::PodfileDependencyCache] podfile_dependency_cache + # An (optional) cache of all the dependencies in the podfile + # + def initialize(podfile, podfile_dependency_cache = Analyzer::PodfileDependencyCache.from_podfile(podfile)) + @podfile = podfile + @podfile_dependency_cache = podfile_dependency_cache + @errors = [] + @warnings = [] + @validated = false + end + + # Validate the podfile + # Errors are added to the errors array + # + def validate + validate_installation_options + validate_pod_directives + validate_no_abstract_only_pods! + validate_dependencies_are_present! + validate_no_duplicate_targets! + + @validated = true + end + + # Wether the podfile is valid is not + # NOTE: Will execute `validate` if the podfile + # has not yet been validated + # + def valid? + validate unless @validated + + @validated && errors.empty? + end + + # A message describing any errors in the + # validation + # + def message + errors.join("\n") + end + + private + + def add_error(error) + errors << error + end + + def add_warning(warning) + warnings << warning + end + + def validate_installation_options + installation_options = podfile.installation_options + + # Validate `incremental_installation` depends on `generate_multiple_pod_projects` + invalid = installation_options.incremental_installation? && installation_options.incremental_installation != installation_options.generate_multiple_pod_projects + add_error 'The installation option `incremental_installation` requires the option `generate_multiple_pod_projects` to also be enabled.' if invalid + end + + def validate_pod_directives + @podfile_dependency_cache.podfile_dependencies.each do |dependency| + validate_conflicting_external_sources!(dependency) + end + end + + def validate_conflicting_external_sources!(dependency) + external_source = dependency.external_source + return false if external_source.nil? + + available_downloaders = Downloader.downloader_class_by_key.keys + specified_downloaders = external_source.select { |key| available_downloaders.include?(key) } + if specified_downloaders.size > 1 + add_error "The dependency `#{dependency.name}` specifies more than one download strategy(#{specified_downloaders.keys.join(',')})." \ + 'Only one is allowed' + end + + pod_spec_or_path = external_source[:podspec].present? || external_source[:path].present? + if pod_spec_or_path && specified_downloaders.size > 0 + add_error "The dependency `#{dependency.name}` specifies `podspec` or `path` in combination with other" \ + ' download strategies. This is not allowed' + end + end + + # Warns the user if the podfile is empty. + # + # @note The workspace is created in any case and all the user projects + # are added to it, however the projects are not integrated as + # there is no way to discern between target definitions which are + # empty and target definitions which just serve the purpose to + # wrap other ones. This is not an issue because empty target + # definitions generate empty libraries. + # + # @return [void] + # + def validate_dependencies_are_present! + if @podfile_dependency_cache.target_definition_list.all?(&:empty?) + add_warning 'The Podfile does not contain any dependencies.' + end + end + + # Verifies that no dependencies in the Podfile will end up not being built + # at all. In other words, all dependencies should belong to a non-abstract + # target, or be inherited by a target where `inheritance == complete`. + # + def validate_no_abstract_only_pods! + @podfile_dependency_cache.target_definition_list.each do |target_definition| + dependencies = @podfile_dependency_cache.target_definition_dependencies(target_definition) + next if dependencies.empty? + next unless target_definition.abstract? + + children = target_definition.recursive_children + next if children.any? { |child_target_definition| target_definition_inherits?(:parent => target_definition, :child => child_target_definition) } + + add_warning "The abstract target #{target_definition.name} is not inherited by a concrete target, " \ + "so the following dependencies won't make it into any targets in your project:" \ + "\n - #{dependencies.map(&:to_s).sort.join("\n - ")}" + + next if target_definition.platform + + add_error "The abstract target #{target_definition.name} must specify a platform since its dependencies are not inherited by a concrete target." + end + end + + def target_definition_inherits?(parent: nil, child: nil) + if parent == child + true + elsif child.exclusive? + false + else + target_definition_inherits?(:parent => parent, :child => child.parent) + end + end + + def validate_no_duplicate_targets! + @podfile_dependency_cache.target_definition_list.group_by { |td| [td.name, td.user_project_path] }. + each do |(name, project), definitions| + next unless definitions.size > 1 + error = "The target `#{name}` is declared multiple times" + error << " for the project `#{project}`" if project + add_error(error << '.') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_install_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_install_hooks_context.rb new file mode 100644 index 0000000..9f128d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_install_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PostInstallHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_integrate_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_integrate_hooks_context.rb new file mode 100644 index 0000000..17e1c3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/post_integrate_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PostIntegrateHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_install_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_install_hooks_context.rb new file mode 100644 index 0000000..7753abd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_install_hooks_context.rb @@ -0,0 +1,51 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer before analysis has been completed. + # + class PreInstallHooksContext + # @return [Podfile] The Podfile for the project. + # + attr_reader :podfile + + # @return [Sandbox] The Sandbox for the project. + # + attr_reader :sandbox + + # @return [String] The path to the sandbox root (`Pods` directory). + # + attr_reader :sandbox_root + + # @return [Lockfile] The Lockfile for the project. + # + attr_reader :lockfile + + # Initialize a new instance + # + # @param [Sandbox] sandbox see #sandbox + # @param [String] sandbox_root see #sandbox_root + # @param [Podfile] podfile see #podfile + # @param [Lockfile] lockfile see #lockfile + # + def initialize(podfile, sandbox, sandbox_root, lockfile) + @podfile = podfile + @sandbox = sandbox + @sandbox_root = sandbox_root + @lockfile = lockfile + end + + # @param [Sandbox] sandbox see {#sandbox} + # + # @param [Podfile] podfile see {#podfile} + # + # @param [Lockfile] lockfile see {#lockfile} + # + # @return [PreInstallHooksContext] Convenience class method to generate the + # static context. + # + def self.generate(sandbox, podfile, lockfile) + new(podfile, sandbox, sandbox.root.to_s, lockfile) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_integrate_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_integrate_hooks_context.rb new file mode 100644 index 0000000..7393613 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/pre_integrate_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PreIntegrateHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache.rb new file mode 100644 index 0000000..eac058c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache.rb @@ -0,0 +1,11 @@ +module Pod + class Installer + module ProjectCache + autoload :ProjectCacheAnalyzer, 'cocoapods/installer/project_cache/project_cache_analyzer' + autoload :ProjectInstallationCache, 'cocoapods/installer/project_cache/project_installation_cache' + autoload :ProjectMetadataCache, 'cocoapods/installer/project_cache/project_metadata_cache' + autoload :ProjectCacheAnalysisResult, 'cocoapods/installer/project_cache/project_cache_analysis_result' + autoload :ProjectCacheVersion, 'cocoapods/installer/project_cache/project_cache_version' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb new file mode 100644 index 0000000..50de9cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb @@ -0,0 +1,53 @@ +module Pod + class Installer + module ProjectCache + # The result object from analyzing the project cache. + # + class ProjectCacheAnalysisResult + # @return [Array] + # The list of pod targets that need to be regenerated. + # + attr_reader :pod_targets_to_generate + + # @return [Array] + # The list of aggregate targets that need to be regenerated. This can be nil if we don't want to + # generate ANY aggregate targets since we still want to be able to generate an empty list of aggregate + # targets. + # + attr_reader :aggregate_targets_to_generate + + # @return [Hash{String => TargetCacheKey}] + # Updated hash of target cache key by target label for all targets. + # + attr_reader :cache_key_by_target_label + + # @return [Hash{String => Symbol}] + # The build configurations to install with each target. + # + attr_reader :build_configurations + + # @return [Integer] + # The project object version to install with each target. + # + attr_reader :project_object_version + + # Initialize a new instance. + # + # @param [Array] pod_targets_to_generate @see #pod_targets_to_generate + # @param [Array TargetCacheKey}] cache_key_by_target_label @see #cache_key_by_target_label + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # + def initialize(pod_targets_to_generate, aggregate_targets_to_generate, cache_key_by_target_label, + build_configurations, project_object_version) + @pod_targets_to_generate = pod_targets_to_generate + @aggregate_targets_to_generate = aggregate_targets_to_generate + @cache_key_by_target_label = cache_key_by_target_label + @build_configurations = build_configurations + @project_object_version = project_object_version + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb new file mode 100644 index 0000000..b3ac5f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb @@ -0,0 +1,200 @@ +module Pod + class Installer + module ProjectCache + # Analyzes the project cache and computes which pod targets need to be generated. + # + class ProjectCacheAnalyzer + require 'cocoapods/installer/project_cache/project_cache_analysis_result' + + # @return [Sandbox] Project sandbox. + # + attr_reader :sandbox + + # @return [ProjectInstallationCache] The cache of targets that were previously installed. + # + attr_reader :cache + + # @return [Hash{String => Symbol}] The hash of user build configurations. + # + attr_reader :build_configurations + + # @return [Integer] The object version from the user project. + # + attr_reader :project_object_version + + # @return [Hash] The podfile plugins to be run for the installation. + # + attr_reader :podfile_plugins + + # @return [Array] The list of pod targets. + # + attr_reader :pod_targets + + # @return [Array] The list of aggregate targets. + # + attr_reader :aggregate_targets + + # @return [Hash] Hash of installation options. + # + attr_reader :installation_options + + # @return [Bool] Flag indicating if we want to ignore the cache and force a clean installation. + # + attr_reader :clean_install + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [ProjectInstallationCache] cache @see #cache + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # @param [Hash] podfile_plugins @see #podfile_plugins + # @param [Array] pod_targets @see #pod_targets + # @param [Array] aggregate_targets @see #aggregate_targets + # @param [Hash] installation_options @see #installation_options + # @param [Bool] clean_install @see #clean_install + # + def initialize(sandbox, cache, build_configurations, project_object_version, podfile_plugins, pod_targets, aggregate_targets, installation_options, + clean_install: false) + @sandbox = sandbox + @cache = cache + @build_configurations = build_configurations + @podfile_plugins = podfile_plugins + @pod_targets = pod_targets + @aggregate_targets = aggregate_targets + @project_object_version = project_object_version + @installation_options = installation_options + @clean_install = clean_install + end + + # @return [ProjectCacheAnalysisResult] + # Compares all targets stored against the cache and computes which targets need to be regenerated. + # + def analyze + target_by_label = Hash[(pod_targets + aggregate_targets).map { |target| [target.label, target] }] + cache_key_by_target_label = create_cache_key_mappings(target_by_label) + + full_install_results = ProjectCacheAnalysisResult.new(pod_targets, aggregate_targets, cache_key_by_target_label, + build_configurations, project_object_version) + if clean_install + UI.message 'Ignoring project cache from the provided `--clean-install` flag.' + return full_install_results + end + + # Bail out early since these properties affect all targets and their associate projects. + if cache.build_configurations != build_configurations || + cache.project_object_version != project_object_version || + YAMLHelper.convert(cache.podfile_plugins) != YAMLHelper.convert(podfile_plugins) || + YAMLHelper.convert(cache.installation_options) != YAMLHelper.convert(installation_options) + UI.message 'Ignoring project cache due to project configuration changes.' + return full_install_results + end + + if project_names_changed?(pod_targets, cache) + UI.message 'Ignoring project cache due to project name changes.' + return full_install_results + end + + pod_targets_to_generate = Set[] + aggregate_targets_to_generate = Set[] + added_targets = compute_added_targets(target_by_label, cache_key_by_target_label.keys, cache.cache_key_by_target_label.keys) + added_pod_targets, added_aggregate_targets = added_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(added_pod_targets) + aggregate_targets_to_generate.merge(added_aggregate_targets) + + removed_aggregate_target_labels = compute_removed_targets(cache_key_by_target_label.keys, cache.cache_key_by_target_label.keys) + + changed_targets = compute_changed_targets_from_cache(cache_key_by_target_label, target_by_label, cache) + changed_pod_targets, changed_aggregate_targets = changed_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(changed_pod_targets) + aggregate_targets_to_generate.merge(changed_aggregate_targets) + + dirty_targets = compute_dirty_targets(pod_targets + aggregate_targets) + dirty_pod_targets, dirty_aggregate_targets = dirty_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(dirty_pod_targets) + aggregate_targets_to_generate.merge(dirty_aggregate_targets) + + # Since multi xcodeproj will group targets by PodTarget#project_name into individual projects, we need to + # append these "sibling" targets to the list of targets we need to generate before finalizing the total list, + # otherwise we will end up with missing targets. + # + sibling_pod_targets = compute_sibling_pod_targets(pod_targets, pod_targets_to_generate) + pod_targets_to_generate.merge(sibling_pod_targets) + + # We either return the full list of aggregate targets or none since the aggregate targets go into the + # Pods.xcodeproj and so we need to regenerate all aggregate targets when regenerating Pods.xcodeproj. + total_aggregate_targets_to_generate = unless aggregate_targets_to_generate.empty? && removed_aggregate_target_labels.empty? + aggregate_targets + end + + ProjectCacheAnalysisResult.new(pod_targets_to_generate.to_a, total_aggregate_targets_to_generate, + cache_key_by_target_label, build_configurations, project_object_version) + end + + private + + def create_cache_key_mappings(target_by_label) + Hash[target_by_label.map do |label, target| + case target + when PodTarget + local = sandbox.local?(target.pod_name) + checkout_options = sandbox.checkout_sources[target.pod_name] + [label, TargetCacheKey.from_pod_target(sandbox, target, :is_local_pod => local, + :checkout_options => checkout_options)] + when AggregateTarget + [label, TargetCacheKey.from_aggregate_target(sandbox, target)] + else + raise "[BUG] Unknown target type #{target}" + end + end] + end + + def compute_added_targets(target_by_label, target_labels, cached_target_labels) + (target_labels - cached_target_labels).map do |label| + target_by_label[label] + end + end + + def compute_removed_targets(target_labels, cached_target_labels) + cached_target_labels - target_labels + end + + def compute_changed_targets_from_cache(cache_key_by_target_label, target_by_label, cache) + cache_key_by_target_label.each_with_object([]) do |(label, cache_key), changed_targets| + next unless cache.cache_key_by_target_label[label] + if cache_key.key_difference(cache.cache_key_by_target_label[label]) == :project + changed_targets << target_by_label[label] + end + end + end + + def compute_dirty_targets(targets) + targets.reject do |target| + support_files_dir_exists = File.exist? target.support_files_dir + xcodeproj_exists = case target + when PodTarget + File.exist? sandbox.pod_target_project_path(target.project_name) + when AggregateTarget + File.exist? sandbox.project_path + else + raise "[BUG] Unknown target type #{target}" + end + support_files_dir_exists && xcodeproj_exists + end + end + + def compute_sibling_pod_targets(pod_targets, pod_targets_to_generate) + pod_targets_by_project_name = pod_targets.group_by(&:project_name) + pod_targets_to_generate.flat_map { |t| pod_targets_by_project_name[t.project_name] } + end + + def project_names_changed?(pod_targets, cache) + pod_targets.any? do |pod_target| + next unless (target_cache_key = cache.cache_key_by_target_label[pod_target.label]) + target_cache_key.project_name != pod_target.project_name + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_version.rb new file mode 100644 index 0000000..65957a8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_cache_version.rb @@ -0,0 +1,43 @@ +module Pod + class Installer + module ProjectCache + # Object that stores, loads, and holds the version of the project cache. + # + class ProjectCacheVersion + # @return [Version] The version of the project cache. + # + attr_reader :version + + # Initialize a new instance. + # + # @param [Version] version @see #version + # + def initialize(version = Version.create('0')) + @version = version + end + + # Constructs a ProjectCacheVersion from a file. + # + # @param [String] path + # The path of the project cache + # + # @return [ProjectCacheVersion] + # + def self.from_file(path) + return ProjectCacheVersion.new unless File.exist?(path) + cached_version = Version.create(File.read(path)) + ProjectCacheVersion.new(cached_version) + end + + # @return [void] + # + # @param [String] path + # The path of the project cache to save. + # + def save_as(path) + Sandbox.update_changed_file(path, version.to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_installation_cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_installation_cache.rb new file mode 100644 index 0000000..daa6e04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_installation_cache.rb @@ -0,0 +1,103 @@ +module Pod + class Installer + module ProjectCache + # Represents the cache stored at Pods/.project/installation_cache + # + class ProjectInstallationCache + require 'cocoapods/installer/project_cache/target_cache_key' + + # @return [Hash{String => TargetCacheKey}] + # Stored hash of target cache key objects for every pod target. + # + attr_reader :cache_key_by_target_label + + # @return [Hash{String => Symbol}] + # Build configurations stored in the cache. + # + attr_reader :build_configurations + + # @return [Integer] + # Project object stored in the cache. + # + attr_reader :project_object_version + + # @return [Hash] + # Podfile plugins used with a particular install. + # + attr_reader :podfile_plugins + + # @return [Hash] + # Configured installation options + # + attr_reader :installation_options + + # Initializes a new instance. + # + # @param [Hash{String => TargetCacheKey}] cache_key_by_target_label @see #cache_key_by_target_label + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # @param [Hash] podfile_plugins @see #podfile_plugins + # @param [Hash] installation_options @see #installation_options + # + def initialize(cache_key_by_target_label = {}, build_configurations = nil, project_object_version = nil, podfile_plugins = {}, installation_options = {}) + @cache_key_by_target_label = cache_key_by_target_label + @build_configurations = build_configurations + @project_object_version = project_object_version + @podfile_plugins = podfile_plugins + @installation_options = installation_options + end + + def update_cache_key_by_target_label!(cache_key_by_target_label) + @cache_key_by_target_label = cache_key_by_target_label + end + + def update_build_configurations!(build_configurations) + @build_configurations = build_configurations + end + + def update_project_object_version!(project_object_version) + @project_object_version = project_object_version + end + + def update_podfile_plugins!(podfile_plugins) + @podfile_plugins = podfile_plugins + end + + def update_installation_options!(installation_options) + @installation_options = installation_options + end + + def save_as(path) + Pathname(path).dirname.mkpath + Sandbox.update_changed_file(path, YAMLHelper.convert(to_hash)) + end + + def self.from_file(sandbox, path) + return ProjectInstallationCache.new unless File.exist?(path) + contents = YAMLHelper.load_file(path) + cache_keys = contents.fetch('CACHE_KEYS', {}) + cache_key_by_target_label = Hash[cache_keys.map do |name, key_hash| + [name, TargetCacheKey.from_cache_hash(sandbox, key_hash)] + end] + project_object_version = contents['OBJECT_VERSION'] + build_configurations = contents['BUILD_CONFIGURATIONS'] + podfile_plugins = contents['PLUGINS'] + installation_options = contents['INSTALLATION_OPTIONS'] + ProjectInstallationCache.new(cache_key_by_target_label, build_configurations, project_object_version, podfile_plugins, installation_options) + end + + def to_hash + cache_key_contents = Hash[cache_key_by_target_label.map do |label, key| + [label, key.to_h] + end] + contents = { 'CACHE_KEYS' => cache_key_contents } + contents['BUILD_CONFIGURATIONS'] = build_configurations if build_configurations + contents['OBJECT_VERSION'] = project_object_version if project_object_version + contents['PLUGINS'] = podfile_plugins if podfile_plugins + contents['INSTALLATION_OPTIONS'] = installation_options if installation_options + contents + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_metadata_cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_metadata_cache.rb new file mode 100644 index 0000000..1fa8fe6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/project_metadata_cache.rb @@ -0,0 +1,73 @@ +module Pod + class Installer + module ProjectCache + # Represents the metadata cache + # + class ProjectMetadataCache + require 'cocoapods/installer/project_cache/target_metadata.rb' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Hash{String => TargetMetadata}] + # Hash of string by target metadata. + # + attr_reader :target_label_by_metadata + + # Initialize a new instance. + # + # @param [Sandbox] sandbox see #sandbox + # @param [Hash{String => TargetMetadata}] target_label_by_metadata @see #target_label_by_metadata + # + def initialize(sandbox, target_label_by_metadata = {}) + @sandbox = sandbox + @target_label_by_metadata = target_label_by_metadata + end + + def to_hash + Hash[target_label_by_metadata.map do |target_label, metdata| + [target_label, metdata.to_hash] + end] + end + + # Rewrites the entire cache to the given path. + # + # @param [String] path + # + # @return [void] + # + def save_as(path) + Sandbox.update_changed_file(path, YAMLHelper.convert_hash(to_hash, nil)) + end + + # Updates the metadata cache based on installation results. + # + # @param [Hash{String => TargetInstallationResult}] pod_target_installation_results + # The installation results for pod targets installed. + # + # @param [Hash{String => TargetInstallationResult}] aggregate_target_installation_results + # The installation results for aggregate targets installed. + # + def update_metadata!(pod_target_installation_results, aggregate_target_installation_results) + installation_results = pod_target_installation_results.values + aggregate_target_installation_results.values + installation_results.each do |installation_result| + native_target = installation_result.native_target + target_label_by_metadata[native_target.name] = TargetMetadata.from_native_target(sandbox, native_target) + # app targets need to be added to the cache because they can be used as app hosts for test targets, even if those test targets live inside a different pod (and thus project) + installation_result.app_native_targets.each_value do |app_target| + target_label_by_metadata[app_target.name] = TargetMetadata.from_native_target(sandbox, app_target) + end + end + end + + def self.from_file(sandbox, path) + return ProjectMetadataCache.new(sandbox) unless File.exist?(path) + contents = YAMLHelper.load_file(path) + target_by_label_metadata = Hash[contents.map { |target_label, hash| [target_label, TargetMetadata.from_hash(hash)] }] + ProjectMetadataCache.new(sandbox, target_by_label_metadata) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_cache_key.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_cache_key.rb new file mode 100644 index 0000000..71969f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_cache_key.rb @@ -0,0 +1,176 @@ +module Pod + class Installer + module ProjectCache + # Uniquely identifies a Target. + # + class TargetCacheKey + require 'cocoapods/target/pod_target.rb' + require 'cocoapods/target/aggregate_target.rb' + require 'digest' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Symbol] + # The type of target. Either aggregate or pod target. + # + attr_reader :type + + # @return [Hash{String => Object}] + # The hash containing key-value pairs that identify the target. + # + attr_reader :key_hash + + # Initialize a new instance. + # + # @param [Sandbox] sandbox see #sandbox + # @param [Symbol] type @see #type + # @param [Hash{String => Object}] key_hash @see #key_hash + # + def initialize(sandbox, type, key_hash) + @sandbox = sandbox + @type = type + @key_hash = key_hash + end + + # Equality function used to compare TargetCacheKey objects to each other. + # + # @param [TargetCacheKey] other + # Other object to compare itself against. + # + # @return [Symbol] The difference between this and another TargetCacheKey object. + # # Symbol :none means no difference. + # + def key_difference(other) + if other.type != type + :project + else + case type + when :pod_target + return :project if (other.key_hash.keys - key_hash.keys).any? + return :project if other.key_hash['CHECKSUM'] != key_hash['CHECKSUM'] + return :project if other.key_hash['SPECS'] != key_hash['SPECS'] + return :project if other.key_hash['PROJECT_NAME'] != key_hash['PROJECT_NAME'] + end + + this_files = key_hash['FILES'] + other_files = other.key_hash['FILES'] + return :project if this_files != other_files + + this_build_settings = key_hash['BUILD_SETTINGS_CHECKSUM'] + other_build_settings = other.key_hash['BUILD_SETTINGS_CHECKSUM'] + return :project if this_build_settings != other_build_settings + + this_checkout_options = key_hash['CHECKOUT_OPTIONS'] + other_checkout_options = other.key_hash['CHECKOUT_OPTIONS'] + return :project if this_checkout_options != other_checkout_options + + :none + end + end + + def to_h + key_hash + end + + # @return [String] + # The name of the project the target belongs to. + # + def project_name + key_hash['PROJECT_NAME'] + end + + # Creates a TargetCacheKey instance from the given hash. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [Hash{String => Object}] key_hash + # The hash used to construct a TargetCacheKey object. + # + # @return [TargetCacheKey] + # + def self.from_cache_hash(sandbox, key_hash) + cache_hash = key_hash.dup + if files = cache_hash['FILES'] + cache_hash['FILES'] = files.sort_by(&:downcase) + end + if specs = cache_hash['SPECS'] + cache_hash['SPECS'] = specs.sort_by(&:downcase) + end + type = cache_hash['CHECKSUM'] ? :pod_target : :aggregate + TargetCacheKey.new(sandbox, type, cache_hash) + end + + # Constructs a TargetCacheKey instance from a PodTarget. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [PodTarget] pod_target + # The pod target used to construct a TargetCacheKey object. + # + # @param [Bool] is_local_pod + # Used to also include its local files in the cache key. + # + # @param [Hash] checkout_options + # The checkout options for this pod target. + # + # @return [TargetCacheKey] + # + def self.from_pod_target(sandbox, pod_target, is_local_pod: false, checkout_options: nil) + build_settings = {} + build_settings[pod_target.label.to_s] = Hash[pod_target.build_settings.map do |k, v| + [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] + end] + pod_target.test_spec_build_settings_by_config.each do |name, settings_by_config| + build_settings[name] = Hash[settings_by_config.map { |k, v| [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] }] + end + pod_target.app_spec_build_settings_by_config.each do |name, settings_by_config| + build_settings[name] = Hash[settings_by_config.map { |k, v| [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] }] + end + + contents = { + 'CHECKSUM' => pod_target.root_spec.checksum, + 'SPECS' => pod_target.specs.map(&:to_s).sort_by(&:downcase), + 'BUILD_SETTINGS_CHECKSUM' => build_settings, + 'PROJECT_NAME' => pod_target.project_name, + } + if is_local_pod + relative_file_paths = pod_target.all_files.map { |f| f.relative_path_from(sandbox.root).to_s } + contents['FILES'] = relative_file_paths.sort_by(&:downcase) + end + contents['CHECKOUT_OPTIONS'] = checkout_options if checkout_options + TargetCacheKey.new(sandbox, :pod_target, contents) + end + + # Construct a TargetCacheKey instance from an AggregateTarget. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [AggregateTarget] aggregate_target + # The aggregate target used to construct a TargetCacheKey object. + # + # @return [TargetCacheKey] + # + def self.from_aggregate_target(sandbox, aggregate_target) + build_settings = {} + aggregate_target.user_build_configurations.keys.each do |configuration| + build_settings[configuration] = Digest::MD5.hexdigest(aggregate_target.build_settings(configuration).xcconfig.to_s) + end + + contents = { + 'BUILD_SETTINGS_CHECKSUM' => build_settings, + } + if aggregate_target.includes_resources? || aggregate_target.includes_on_demand_resources? + relative_resource_file_paths = aggregate_target.resource_paths_by_config.values.flatten.uniq + relative_on_demand_resource_file_paths = aggregate_target.on_demand_resources.map do |res| + res.relative_path_from(sandbox.project_path.dirname).to_s + end + contents['FILES'] = (relative_resource_file_paths + relative_on_demand_resource_file_paths).sort_by(&:downcase) + end + TargetCacheKey.new(sandbox, :aggregate, contents) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_metadata.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_metadata.rb new file mode 100644 index 0000000..62ea254 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/project_cache/target_metadata.rb @@ -0,0 +1,74 @@ +module Pod + class Installer + module ProjectCache + # Metadata used to reconstruct a PBXTargetDependency. + # + class TargetMetadata + # @return [String] + # The label of the native target. + # + attr_reader :target_label + + # @return [String] + # The UUID of the native target installed. + # + attr_reader :native_target_uuid + + # @return [String] + # The path of the container project the native target was installed into. + # + attr_reader :container_project_path + + # Initialize a new instance. + # + # @param [String] target_label @see #target_label + # @param [String] native_target_uuid @see #native_target_uuid + # @param [String] container_project_path @see #container_project_path + # + def initialize(target_label, native_target_uuid, container_project_path) + @target_label = target_label + @native_target_uuid = native_target_uuid + @container_project_path = container_project_path + end + + def to_hash + { + 'LABEL' => target_label, + 'UUID' => native_target_uuid, + 'PROJECT_PATH' => container_project_path, + } + end + + def to_s + "#{target_label} : #{native_target_uuid} : #{container_project_path}" + end + + # Constructs a TargetMetadata instance from a hash. + # + # @param [Hash] hash + # The hash used to construct a new TargetMetadata instance. + # + # @return [TargetMetadata] + # + def self.from_hash(hash) + TargetMetadata.new(hash['LABEL'], hash['UUID'], hash['PROJECT_PATH']) + end + + # Constructs a TargetMetadata instance from a native target. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [PBXNativeTarget] native_target + # The native target used to construct a TargetMetadata instance. + # + # @return [TargetMetadata] + # + def self.from_native_target(sandbox, native_target) + TargetMetadata.new(native_target.name, native_target.uuid, + native_target.project.path.relative_path_from(sandbox.root).to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_dir_cleaner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_dir_cleaner.rb new file mode 100644 index 0000000..7152d50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_dir_cleaner.rb @@ -0,0 +1,105 @@ +module Pod + class Installer + # Cleans up the sandbox directory by removing stale target support files and headers. + # + class SandboxDirCleaner + # @return [Sandbox] The sandbox directory that will be cleaned. + # + attr_reader :sandbox + + # @return [Array] + # The list of all pod targets that will be installed into the Sandbox. + # + attr_reader :pod_targets + + # @return [Array] + # The list of all aggregate targets that will be installed into the Sandbox. + # + attr_reader :aggregate_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # @param [Array] aggregate_targets @see #aggregate_targets + # + def initialize(sandbox, pod_targets, aggregate_targets) + @sandbox = sandbox + @pod_targets = pod_targets + @aggregate_targets = aggregate_targets + end + + def clean! + UI.message('Cleaning up sandbox directory') do + # Clean up Target Support Files Directory + target_support_dirs_to_install = (pod_targets + aggregate_targets).map(&:support_files_dir) + target_support_dirs = sandbox_target_support_dirs + + removed_target_support_dirs = target_support_dirs - target_support_dirs_to_install + removed_target_support_dirs.each { |dir| remove_dir(dir) } + + # Clean up Sandbox Headers Directory + sandbox_private_headers_to_install = pod_targets.flat_map do |pod_target| + if pod_target.header_mappings_by_file_accessor.empty? + [] + else + [pod_target.build_headers.root.join(pod_target.headers_sandbox)] + end + end + sandbox_public_headers_to_install = pod_targets.flat_map do |pod_target| + if pod_target.public_header_mappings_by_file_accessor.empty? + [] + else + [ + sandbox.public_headers.root.join(pod_target.headers_sandbox), + pod_target.module_map_path.dirname, + ].uniq + end + end + + removed_sandbox_public_headers = sandbox_public_headers - sandbox_public_headers_to_install + removed_sandbox_public_headers.each { |path| remove_dir(path) } + + removed_sandbox_private_headers = sandbox_private_headers(pod_targets) - sandbox_private_headers_to_install + removed_sandbox_private_headers.each { |path| remove_dir(path) } + + project_dir_names_to_install = pod_targets.map do |pod_target| + sandbox.pod_target_project_path(pod_target.project_name) + end + project_dir_names = sandbox_project_dir_names - [sandbox.project_path] + user_project_dir_names = aggregate_targets.map(&:user_project_path).uniq + + removed_project_dir_names = project_dir_names - user_project_dir_names - project_dir_names_to_install + removed_project_dir_names.each { |dir| remove_dir(dir) } + end + end + + private + + def sandbox_target_support_dirs + child_directories_of(sandbox.target_support_files_root) + end + + def sandbox_private_headers(pod_targets) + pod_targets.flat_map { |pod_target| child_directories_of(pod_target.build_headers.root) }.uniq + end + + def sandbox_project_dir_names + child_directories_of(sandbox.root).select { |d| d.extname == '.xcodeproj' } + end + + def sandbox_public_headers + child_directories_of(sandbox.public_headers.root) + end + + def child_directories_of(dir) + return [] unless dir.exist? + dir.children.select(&:directory?) + end + + def remove_dir(path) + FileUtils.rm_rf(path) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_header_paths_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_header_paths_installer.rb new file mode 100644 index 0000000..976afb5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/sandbox_header_paths_installer.rb @@ -0,0 +1,45 @@ +module Pod + class Installer + # Adds all the search paths into the sandbox HeaderStore and each pod target's HeaderStore. + # + class SandboxHeaderPathsInstaller + # @return [Sandbox] The sandbox to use for this analysis. + # + attr_reader :sandbox + + # @return [Array] The list of pod targets to analyze. + # + attr_reader :pod_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # + def initialize(sandbox, pod_targets) + @pod_targets = pod_targets + @sandbox = sandbox + end + + def install! + # Link all pod target header search paths into the HeaderStore. + pod_targets.each do |pod_target| + next if pod_target.build_as_framework? && pod_target.should_build? + install_target(pod_target) + end + end + + private + + def install_target(pod_target) + pod_target_header_mappings = pod_target.header_mappings_by_file_accessor.values + public_header_mappings = pod_target.public_header_mappings_by_file_accessor.values + added_build_headers = !pod_target_header_mappings.all?(&:empty?) + added_public_headers = !public_header_mappings.all?(&:empty?) + + pod_target.build_headers.add_search_path(pod_target.headers_sandbox, pod_target.platform) if added_build_headers + sandbox.public_headers.add_search_path(pod_target.headers_sandbox, pod_target.platform) if added_public_headers + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/source_provider_hooks_context.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/source_provider_hooks_context.rb new file mode 100644 index 0000000..b312dc0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/source_provider_hooks_context.rb @@ -0,0 +1,34 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer before spec sources have been created + # + class SourceProviderHooksContext + # @return [Array] The source objects to send to the installer + # + attr_reader :sources + + # @return [SourceProviderHooksContext] Convenience class method to generate the + # static context. + # + def self.generate + result = new + result + end + + def initialize + @sources = [] + end + + # @param [Source] source object to be added to the installer + # + # @return [void] + # + def add_source(source) + unless source.nil? + @sources << source + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/target_uuid_generator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/target_uuid_generator.rb new file mode 100644 index 0000000..28badbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/target_uuid_generator.rb @@ -0,0 +1,34 @@ +module Pod + class Installer + # Generates stable UUIDs for Native Targets. + # + class TargetUUIDGenerator < Xcodeproj::Project::UUIDGenerator + # This method override is used to ONLY generate stable UUIDs for PBXNativeTarget instances and their sibling PBXFileReference + # product reference in the project. Stable native target UUIDs are necessary for incremental installation + # because other projects reference the target and product reference by its UUID in the remoteGlobalIDString field. + # + # @param [Array] projects + # The list of projects used to generate stabe target UUIDs. + # + def generate_all_paths_by_objects(projects) + @paths_by_object = {} + projects.each do |project| + project_basename = project.path.basename.to_s + project.objects.each do |object| + @paths_by_object[object] = object.uuid + end + project.targets.each do |target| + @paths_by_object[target] = Digest::MD5.hexdigest(project_basename + target.name).upcase + if target.is_a? Xcodeproj::Project::Object::PBXNativeTarget + @paths_by_object[target.product_reference] = Digest::MD5.hexdigest(project_basename + 'product_reference' + target.name).upcase + end + end + end + end + + def uuid_for_path(path) + path + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator.rb new file mode 100644 index 0000000..b487c7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator.rb @@ -0,0 +1,280 @@ +require 'xcodeproj/workspace' +require 'xcodeproj/project' + +require 'active_support/core_ext/string/inflections' +require 'active_support/core_ext/array/conversions' + +module Pod + class Installer + # The {UserProjectIntegrator} integrates the libraries generated by + # TargetDefinitions of the {Podfile} with their correspondent user + # projects. + # + class UserProjectIntegrator + autoload :TargetIntegrator, 'cocoapods/installer/user_project_integrator/target_integrator' + + # @return [Podfile] the podfile that should be integrated with the user + # projects. + # + attr_reader :podfile + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [Pathname] the path of the installation. + # + # @todo This is only used to compute the workspace path in case that it + # should be inferred by the project. If the workspace should be in + # the same dir of the project, this could be removed. + # + attr_reader :installation_root + + # @return [Array] the targets represented in the Podfile. + # + attr_reader :targets + + # @return [Array] the targets that require integration. This will always be equal or a smaller + # subset of #targets. + # + attr_reader :targets_to_integrate + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Initialize a new instance + # + # @param [Podfile] podfile @see #podfile + # @param [Sandbox] sandbox @see #sandbox + # @param [Pathname] installation_root @see #installation_root + # @param [Array] targets @see #targets + # @param [Array] targets_to_integrate @see #targets_to_integrate + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(podfile, sandbox, installation_root, targets, targets_to_integrate, use_input_output_paths: true) + @podfile = podfile + @sandbox = sandbox + @installation_root = installation_root + @targets = targets + @targets_to_integrate = targets_to_integrate + @use_input_output_paths = use_input_output_paths + end + + # Integrates the user projects associated with the {TargetDefinitions} + # with the Pods project and its products. + # + # @return [void] + # + def integrate! + create_workspace + deintegrated_projects = deintegrate_removed_targets + integrate_user_targets + warn_about_xcconfig_overrides + projects_to_save = (user_projects_to_integrate + deintegrated_projects).uniq + save_projects(projects_to_save) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Integration steps + + # Creates and saved the workspace containing the Pods project and the + # user projects, if needed. + # + # @note If the workspace already contains the projects it is not saved + # to avoid Xcode from displaying the revert dialog: `Do you want to + # keep the Xcode version or revert to the version on disk?` + # + # @return [void] + # + def create_workspace + all_projects = user_project_paths.sort.push(sandbox.project_path).uniq + file_references = all_projects.map do |path| + relative_path = path.relative_path_from(workspace_path.dirname).to_s + Xcodeproj::Workspace::FileReference.new(relative_path, 'group') + end + + if workspace_path.exist? + workspace = Xcodeproj::Workspace.new_from_xcworkspace(workspace_path) + new_file_references = file_references - workspace.file_references + unless new_file_references.empty? + new_file_references.each { |fr| workspace << fr } + workspace.save_as(workspace_path) + end + + else + UI.notice "Please close any current Xcode sessions and use `#{workspace_path.basename}` for this project from now on." + workspace = Xcodeproj::Workspace.new(*file_references) + workspace.save_as(workspace_path) + end + end + + # Deintegrates the targets of the user projects that are no longer part of the installation. + # + # @return [Array] The list of projects that were deintegrated. + # + def deintegrate_removed_targets + Config.instance.with_changes(:silent => true) do + deintegrator = Deintegrator.new + all_project_targets = user_projects.flat_map(&:native_targets).uniq + all_native_targets = targets.flat_map(&:user_targets).uniq + targets_to_deintegrate = all_project_targets - all_native_targets + targets_to_deintegrate.each do |target| + deintegrator.deintegrate_target(target) + end + return targets_to_deintegrate.map(&:project).select(&:dirty?).uniq + end + end + + # Integrates the targets of the user projects with the libraries + # generated from the {Podfile}. + # + # @note {TargetDefinition} without dependencies are skipped prevent + # creating empty libraries for targets definitions which are only + # wrappers for others. + # + # @return [void] + # + def integrate_user_targets + target_integrators = targets_to_integrate.sort_by(&:name).map do |target| + TargetIntegrator.new(target, :use_input_output_paths => use_input_output_paths?) + end + target_integrators.each(&:integrate!) + end + + # Save all user projects. + # + # @param [Array] projects The projects to save. + # + # @return [void] + # + def save_projects(projects) + projects.each do |project| + if project.dirty? + project.save + else + # There is a bug in Xcode where the process of deleting and + # re-creating the xcconfig files used in the build + # configuration cause building the user project to fail until + # Xcode is relaunched. + # + # Touching/saving the project causes Xcode to reload these. + # + # https://github.com/CocoaPods/CocoaPods/issues/2665 + FileUtils.touch(project.path + 'project.pbxproj') + end + end + end + + IGNORED_KEYS = %w(CODE_SIGN_IDENTITY).freeze + INHERITED_FLAGS = %w($(inherited) ${inherited}).freeze + + # Checks whether the settings of the CocoaPods generated xcconfig are + # overridden by the build configuration of a target and prints a + # warning to inform the user if needed. + # + def warn_about_xcconfig_overrides + targets_to_integrate.each do |aggregate_target| + aggregate_target.user_targets.each do |user_target| + user_target.build_configurations.each do |config| + xcconfig = aggregate_target.xcconfigs[config.name] + if xcconfig + (xcconfig.to_hash.keys - IGNORED_KEYS).each do |key| + target_values = config.build_settings[key] + if target_values && + !INHERITED_FLAGS.any? { |flag| target_values.include?(flag) } + print_override_warning(aggregate_target, user_target, config, key) + end + end + end + end + end + end + end + + private + + # @!group Private Helpers + #-----------------------------------------------------------------------# + + # @return [Pathname] the path where the workspace containing the Pods + # project and the user projects should be saved. + # + def workspace_path + if podfile.workspace_path + declared_path = podfile.workspace_path + path_with_ext = File.extname(declared_path) == '.xcworkspace' ? declared_path : "#{declared_path}.xcworkspace" + podfile_dir = File.dirname(podfile.defined_in_file || '') + absolute_path = File.expand_path(path_with_ext, podfile_dir) + Pathname.new(absolute_path) + elsif user_project_paths.count == 1 + project = user_project_paths.first.basename('.xcodeproj') + installation_root + "#{project}.xcworkspace" + else + raise Informative, 'Could not automatically select an Xcode ' \ + "workspace. Specify one in your Podfile like so:\n\n" \ + " workspace 'path/to/Workspace.xcworkspace'\n" + end + end + + # @return [Array] the projects of all the targets that require integration. + # + # @note Empty target definitions are ignored. + # + def user_projects_to_integrate + targets_to_integrate.map(&:user_project).compact.uniq + end + + # @return [Array] the projects of all the targets regardless of whether they are integrated + # or not. + # + # @note Empty target definitions are ignored. + # + def user_projects + targets.map(&:user_project).compact.uniq + end + + # @return [Array] the paths of all the user projects from all targets regardless of whether they are + # integrated or not. + # + # @note Empty target definitions are ignored. + # + def user_project_paths + targets.map(&:user_project_path).compact.uniq + end + + # Prints a warning informing the user that a build configuration of + # the integrated target is overriding the CocoaPods build settings. + # + # @param [Target::AggregateTarget] aggregate_target + # The umbrella target. + # + # @param [Xcodeproj::PBXNativeTarget] user_target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @param [String] key + # The key of the overridden build setting. + # + def print_override_warning(aggregate_target, user_target, config, key) + actions = [ + 'Use the `$(inherited)` flag, or', + 'Remove the build settings from the target.', + ] + message = "The `#{user_target.name} [#{config.name}]` " \ + "target overrides the `#{key}` build setting defined in " \ + "`#{aggregate_target.xcconfig_relative_path(config.name)}'. " \ + 'This can lead to problems with the CocoaPods installation' + UI.warn(message, actions) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator.rb new file mode 100644 index 0000000..caec8dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator.rb @@ -0,0 +1,815 @@ +require 'active_support/core_ext/string/inflections' +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/target/build_settings' + +module Pod + class Installer + class UserProjectIntegrator + # This class is responsible for integrating the library generated by a + # {TargetDefinition} with its destination project. + # + class TargetIntegrator + autoload :XCConfigIntegrator, 'cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator' + + # @return [String] the string to use as prefix for every build phase added to the user project + # + BUILD_PHASE_PREFIX = '[CP] '.freeze + + # @return [String] the string to use as prefix for every build phase declared by the user within a podfile + # or podspec. + # + USER_BUILD_PHASE_PREFIX = '[CP-User] '.freeze + + # @return [String] the name of the check manifest phase + # + CHECK_MANIFEST_PHASE_NAME = 'Check Pods Manifest.lock'.freeze + + # @return [Array] the symbol types, which require that the pod + # frameworks are embedded in the output directory / product bundle. + # + # @note This does not include :app_extension or :watch_extension because + # these types must have their frameworks embedded in their host targets. + # For messages extensions, this only applies if it's embedded in a messages + # application. + # + EMBED_FRAMEWORK_TARGET_TYPES = [:application, :application_on_demand_install_capable, :unit_test_bundle, + :ui_test_bundle, :watch2_extension, :messages_application].freeze + + # @return [String] the name of the embed frameworks phase + # + EMBED_FRAMEWORK_PHASE_NAME = 'Embed Pods Frameworks'.freeze + + # @return [String] the name of the copy xcframeworks phase + # + COPY_XCFRAMEWORKS_PHASE_NAME = 'Copy XCFrameworks'.freeze + + # @return [String] the name of the copy resources phase + # + COPY_PODS_RESOURCES_PHASE_NAME = 'Copy Pods Resources'.freeze + + # @return [String] the name of the copy dSYM files phase + # + COPY_DSYM_FILES_PHASE_NAME = 'Copy dSYMs'.freeze + + # @return [Integer] the maximum number of input and output paths to use for a script phase + # + MAX_INPUT_OUTPUT_PATHS = 1000 + + # @return [Array] names of script phases that existed in previous versions of CocoaPods + # + REMOVED_SCRIPT_PHASE_NAMES = [ + 'Prepare Artifacts'.freeze, + ].freeze + + # @return [float] Returns Minimum Xcode Compatibility version for FileLists + # + MIN_FILE_LIST_COMPATIBILITY_VERSION = 9.3 + + # @return [String] Returns Minimum Xcode Object version for FileLists + # + MIN_FILE_LIST_OBJECT_VERSION = 50 + + # @return [AggregateTarget] the target that should be integrated. + # + attr_reader :target + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Init a new TargetIntegrator + # + # @param [AggregateTarget] target @see #target + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(target, use_input_output_paths: true) + @target = target + @use_input_output_paths = use_input_output_paths + end + + # @private + # + XCFileListConfigKey = Struct.new(:file_list_path, :file_list_relative_path) + + class << self + # @param [Xcodeproj::Project::Object::AbstractObject] object + # + # @return [Boolean] Whether input & output paths for the given object + # should be stored in a file list file. + # + def input_output_paths_use_filelist?(object) + unless object.project.root_object.compatibility_version.nil? + version_match = object.project.root_object.compatibility_version.match(/Xcode ([0-9]*\.[0-9]*)/).to_a + end + if version_match&.at(1).nil? + object.project.object_version.to_i >= MIN_FILE_LIST_OBJECT_VERSION + else + Pod::Version.new(version_match[1]) >= Pod::Version.new(MIN_FILE_LIST_COMPATIBILITY_VERSION) + end + end + + # Sets the input & output paths for the given script build phase. + # + # @param [Xcodeproj::Project::Object::PBXShellScriptBuildPhase] phase + # The phase to set input & output paths on. + # + # @param [Hash] input_paths_by_config + # + # @return [Void] + def set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + if input_output_paths_use_filelist?(phase) + [input_paths_by_config, output_paths_by_config].each do |hash| + hash.each do |file_list, files| + generator = Generator::FileList.new(files) + Xcode::PodsProjectGenerator::TargetInstallerHelper.update_changed_file(generator, file_list.file_list_path) + end + end + + phase.input_paths = nil + phase.output_paths = nil + phase.input_file_list_paths = input_paths_by_config.each_key.map(&:file_list_relative_path).uniq + phase.output_file_list_paths = output_paths_by_config.each_key.map(&:file_list_relative_path).uniq + else + input_paths = input_paths_by_config.values.flatten(1).uniq + output_paths = output_paths_by_config.values.flatten(1).uniq + TargetIntegrator.validate_input_output_path_limit(input_paths, output_paths) + + phase.input_paths = input_paths + phase.output_paths = output_paths + phase.input_file_list_paths = nil + phase.output_file_list_paths = nil + end + end + + # Adds a shell script build phase responsible to copy (embed) the frameworks + # generated by the TargetDefinition to the bundle of the product of the + # targets. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_embed_frameworks_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + EMBED_FRAMEWORK_PHASE_NAME) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Delete a 'Embed Pods Frameworks' Script Build Phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_embed_frameworks_script_phase_from_target(native_target) + remove_script_phase_from_target(native_target, EMBED_FRAMEWORK_PHASE_NAME) + end + + # Adds a shell script build phase responsible to copy the xcframework slice + # to the intermediate build directory. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_copy_xcframeworks_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + COPY_XCFRAMEWORKS_PHASE_NAME) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + reorder_script_phase(native_target, phase, :before_compile) + end + + # Delete a 'Copy XCFrameworks' Script Build Phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_copy_xcframeworks_script_phase_from_target(native_target) + remove_script_phase_from_target(native_target, COPY_XCFRAMEWORKS_PHASE_NAME) + end + + # Removes a script phase from a native target by name + # + # @param [PBXNativeTarget] native_target + # The target from which the script phased should be removed + # + # @param [String] phase_name + # The name of the script phase to remove + # + def remove_script_phase_from_target(native_target, phase_name) + build_phase = native_target.shell_script_build_phases.find { |bp| bp.name && bp.name.end_with?(phase_name) } + return unless build_phase.present? + native_target.build_phases.delete(build_phase) + end + + # Adds a shell script build phase responsible to copy the resources + # generated by the TargetDefinition to the bundle of the product of the + # targets. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_copy_resources_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase_name = COPY_PODS_RESOURCES_PHASE_NAME + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + phase_name) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Delete a 'Copy Pods Resources' script phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_copy_resources_script_phase_from_target(native_target) + build_phase = native_target.shell_script_build_phases.find { |bp| bp.name && bp.name.end_with?(COPY_PODS_RESOURCES_PHASE_NAME) } + return unless build_phase.present? + native_target.build_phases.delete(build_phase) + end + + # Creates or update a shell script build phase for the given target. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_phase_name + # The name of the script phase to use. + # + # @param [String] show_env_vars_in_log + # The value to set for show environment variables in the log during execution of this script phase or + # `nil` for not setting the value at all. + # + # @return [PBXShellScriptBuildPhase] The existing or newly created shell script build phase. + # + def create_or_update_shell_script_build_phase(native_target, script_phase_name, show_env_vars_in_log = '0') + build_phases = native_target.build_phases.grep(Xcodeproj::Project::Object::PBXShellScriptBuildPhase) + build_phases.find { |phase| phase.name && phase.name.end_with?(script_phase_name) }.tap { |p| p.name = script_phase_name if p } || + native_target.project.new(Xcodeproj::Project::Object::PBXShellScriptBuildPhase).tap do |phase| + UI.message("Adding Build Phase '#{script_phase_name}' to project.") do + phase.name = script_phase_name + unless show_env_vars_in_log.nil? + phase.show_env_vars_in_log = show_env_vars_in_log + end + native_target.build_phases << phase + end + end + end + + # Updates all target script phases for the current target, including creating or updating, deleting + # and re-ordering. + # + # @return [void] + # + def create_or_update_user_script_phases(script_phases, native_target) + script_phase_names = script_phases.map { |k| k[:name] } + # Delete script phases no longer present in the target. + native_target_script_phases = native_target.shell_script_build_phases.select do |bp| + !bp.name.nil? && bp.name.start_with?(USER_BUILD_PHASE_PREFIX) + end + native_target_script_phases.each do |script_phase| + script_phase_name_without_prefix = script_phase.name.sub(USER_BUILD_PHASE_PREFIX, '') + unless script_phase_names.include?(script_phase_name_without_prefix) + native_target.build_phases.delete(script_phase) + end + end + # Create or update the ones that are expected to be. + script_phases.each do |script_phase| + name_with_prefix = USER_BUILD_PHASE_PREFIX + script_phase[:name] + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, name_with_prefix, nil) + phase.shell_script = script_phase[:script] + phase.shell_path = script_phase[:shell_path] || '/bin/sh' + phase.input_paths = script_phase[:input_files] + phase.output_paths = script_phase[:output_files] + phase.input_file_list_paths = script_phase[:input_file_lists] + phase.output_file_list_paths = script_phase[:output_file_lists] + phase.dependency_file = script_phase[:dependency_file] + # At least with Xcode 10 `showEnvVarsInLog` is *NOT* set to any value even if it's checked and it only + # gets set to '0' if the user has explicitly disabled this. + if (show_env_vars_in_log = script_phase.fetch(:show_env_vars_in_log, '1')) == '0' + phase.show_env_vars_in_log = show_env_vars_in_log + end + + execution_position = script_phase[:execution_position] + reorder_script_phase(native_target, phase, execution_position) + end + end + + def reorder_script_phase(native_target, script_phase, execution_position) + return if execution_position == :any || execution_position.to_s.empty? + target_phase_type = case execution_position + when :before_compile, :after_compile + Xcodeproj::Project::Object::PBXSourcesBuildPhase + when :before_headers, :after_headers + Xcodeproj::Project::Object::PBXHeadersBuildPhase + else + raise ArgumentError, "Unknown execution position `#{execution_position}`" + end + order_before = case execution_position + when :before_compile, :before_headers + true + when :after_compile, :after_headers + false + else + raise ArgumentError, "Unknown execution position `#{execution_position}`" + end + + target_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(target_phase_type) + end + return if target_phase_index.nil? + script_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(Xcodeproj::Project::Object::PBXShellScriptBuildPhase) && !bp.name.nil? && bp.name == script_phase.name + end + if (order_before && script_phase_index > target_phase_index) || + (!order_before && script_phase_index < target_phase_index) + native_target.build_phases.move_from(script_phase_index, target_phase_index) + end + end + + # Script phases can have a limited number of input and output paths due to each one being exported to `env`. + # A large number can cause a build failure because of limitations in `env`. See issue + # https://github.com/CocoaPods/CocoaPods/issues/7362. + # + # @param [Array] input_paths + # The input paths to trim. + # + # @param [Array] output_paths + # The output paths to trim. + # + # @return [void] + # + def validate_input_output_path_limit(input_paths, output_paths) + if (input_paths.count + output_paths.count) > MAX_INPUT_OUTPUT_PATHS + input_paths.clear + output_paths.clear + end + end + + # Returns the resource output paths for all given input paths. + # + # @param [Array] resource_input_paths + # The input paths to map to. + # + # @return [Array] The resource output paths. + # + def resource_output_paths(resource_input_paths) + resource_input_paths.map do |resource_input_path| + base_path = '${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}' + extname = File.extname(resource_input_path) + basename = extname == '.xcassets' ? 'Assets' : File.basename(resource_input_path) + output_extension = Target.output_extension_for_resource(extname) + File.join(base_path, File.basename(basename, extname) + output_extension) + end.uniq + end + + # Returns the framework input paths for the given framework paths + # + # @param [Array] framework_paths + # The target's framework paths to map to input paths. + # + # @param [Array] xcframeworks + # The target's xcframeworks to map to input paths. + # + # @return [Array] The embed frameworks script input paths + # + def embed_frameworks_input_paths(framework_paths, xcframeworks) + input_paths = framework_paths.map(&:source_path) + # Only include dynamic xcframeworks as the input since we will not be copying static xcframework slices + xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.each do |xcframework| + name = xcframework.name + input_paths << "#{Pod::Target::BuildSettings.xcframework_intermediate_dir(xcframework)}/#{name}.framework/#{name}" + end + input_paths + end + + # Returns the framework output paths for the given framework paths + # + # @param [Array] framework_paths + # The framework input paths to map to output paths. + # + # @param [Array] xcframeworks + # The installed xcframeworks. + # + # @return [Array] The embed framework script output paths + # + def embed_frameworks_output_paths(framework_paths, xcframeworks) + paths = framework_paths.map do |framework_path| + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/#{File.basename(framework_path.source_path)}" + end.uniq + # Static xcframeworks are not copied to the build dir + # so only include dynamic artifacts that will be copied to the build folder + xcframework_paths = xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.map do |xcframework| + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/#{xcframework.name}.framework" + end + paths + xcframework_paths + end + + # Updates a projects native targets to include on demand resources specified by the supplied parameters. + # Note that currently, only app level targets are allowed to include on demand resources. + # + # @param [Sandbox] sandbox + # The sandbox to use for calculating ODR file references. + # + # @param [Xcodeproj::Project] project + # The project to update known asset tags as well as add the ODR group. + # + # @param [Xcodeproj::PBXNativeTarget, Array] native_targets + # The native targets to integrate on demand resources into. + # + # @param [Sandbox::FileAccessor, Array] file_accessors + # The file accessors that that provide the ODRs to integrate. + # + # @param [Xcodeproj::PBXGroup] parent_odr_group + # The group to use as the parent to add ODR file references into. + # + # @param [String] target_odr_group_name + # The name to use for the group created that contains the ODR file references. + # + # @return [void] + # + def update_on_demand_resources(sandbox, project, native_targets, file_accessors, parent_odr_group, + target_odr_group_name) + category_to_tags = {} + file_accessors = Array(file_accessors) + native_targets = Array(native_targets) + + # Target no longer provides ODR references so remove everything related to this target. + if file_accessors.all? { |fa| fa.on_demand_resources.empty? } + old_target_odr_group = parent_odr_group[target_odr_group_name] + old_odr_file_refs = old_target_odr_group&.recursive_children_groups&.each_with_object({}) do |group, hash| + hash[group.name] = group.files + end || {} + native_targets.each do |native_target| + native_target.remove_on_demand_resources(old_odr_file_refs) + update_on_demand_resources_build_settings(native_target, nil => old_odr_file_refs.keys) + end + old_target_odr_group&.remove_from_project + return + end + + target_odr_group = parent_odr_group[target_odr_group_name] || parent_odr_group.new_group(target_odr_group_name) + current_file_refs = target_odr_group.recursive_children_groups.flat_map(&:files) + + added_file_refs = file_accessors.flat_map do |file_accessor| + target_odr_files_refs = Hash[file_accessor.on_demand_resources.map do |tag, value| + tag_group = target_odr_group[tag] || target_odr_group.new_group(tag) + category_to_tags[value[:category]] ||= [] + category_to_tags[value[:category]] << tag + resources_file_refs = value[:paths].map do |resource| + odr_resource_file_ref = Pathname.new(resource).relative_path_from(sandbox.root) + tag_group.find_file_by_path(odr_resource_file_ref.to_s) || tag_group.new_file(odr_resource_file_ref) + end + [tag, resources_file_refs] + end] + native_targets.each do |native_target| + native_target.add_on_demand_resources(target_odr_files_refs) + end + target_odr_files_refs.values.flatten + end + + # if the target ODR file references were updated, make sure we remove the ones that are no longer present + # for the target. + remaining_refs = current_file_refs - added_file_refs + remaining_refs.each do |ref| + native_targets.each do |user_target| + user_target.resources_build_phase.remove_file_reference(ref) + end + ref.remove_from_project + end + target_odr_group.recursive_children_groups.each { |g| g.remove_from_project if g.empty? } + + attributes = project.root_object.attributes + attributes['KnownAssetTags'] = (attributes['KnownAssetTags'] ||= []) | category_to_tags.values.flatten + project.root_object.attributes = attributes + + native_targets.each do |native_target| + update_on_demand_resources_build_settings(native_target, category_to_tags) + end + end + + def update_on_demand_resources_build_settings(native_target, category_to_tags) + %w[ON_DEMAND_RESOURCES_INITIAL_INSTALL_TAGS ON_DEMAND_RESOURCES_PREFETCH_ORDER].each do |category_key| + native_target.build_configurations.each do |c| + key = case category_key + when 'ON_DEMAND_RESOURCES_INITIAL_INSTALL_TAGS' + :initial_install + when 'ON_DEMAND_RESOURCES_PREFETCH_ORDER' + :prefetched + else + :download_on_demand + end + tags_for_category = (c.build_settings[category_key] || '').split + category_to_tags_dup = category_to_tags.dup + tags_to_add = category_to_tags_dup.delete(key) || [] + tags_to_delete = category_to_tags_dup.values.flatten + tags_for_category = (tags_for_category + tags_to_add - tags_to_delete).flatten.compact.uniq + if tags_for_category.empty? + val = c.build_settings.delete(category_key) + native_target.project.mark_dirty! unless val.nil? + else + tags = tags_for_category.join(' ') + unless c.build_settings[category_key] == tags + c.build_settings[category_key] = tags + native_target.project.mark_dirty! + end + end + end + end + end + end + + # Integrates the user project targets. Only the targets that do **not** + # already have the Pods library in their frameworks build phase are + # processed. + # + # @return [void] + # + def integrate! + UI.section(integration_message) do + XCConfigIntegrator.integrate(target, native_targets) + + remove_obsolete_script_phases + add_pods_library + add_embed_frameworks_script_phase + remove_embed_frameworks_script_phase_from_embedded_targets + add_copy_resources_script_phase + add_check_manifest_lock_script_phase + add_user_script_phases + add_on_demand_resources + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class} for target `#{target.label}'>" + end + + private + + # @!group Integration steps + #---------------------------------------------------------------------# + + # Adds spec product reference to the frameworks build phase of the + # {TargetDefinition} integration libraries. Adds a file reference to + # the frameworks group of the project and adds it to the frameworks + # build phase of the targets. + # + # @return [void] + # + def add_pods_library + frameworks = user_project.frameworks_group + native_targets.each do |native_target| + build_phase = native_target.frameworks_build_phase + product_name = target.product_name + + # Delete previously integrated references. + product_build_files = build_phase.files.select do |build_file| + build_file.display_name =~ Pod::Deintegrator::FRAMEWORK_NAMES + end + + product_build_files.each do |product_file| + next unless product_name != product_file.display_name + UI.message("Removing old product reference `#{product_file.display_name}` from project.") + frameworks.remove_reference(product_file.file_ref) + build_phase.remove_build_file(product_file) + end + + # Find or create and add a reference for the current product type + new_product_ref = frameworks.files.find { |f| f.path == product_name } || + frameworks.new_product_ref_for_target(target.product_basename, target.product_type) + build_phase.build_file(new_product_ref) || + build_phase.add_file_reference(new_product_ref, true) + end + end + + # Find or create a 'Copy Pods Resources' build phase + # + # @return [void] + # + def add_copy_resources_script_phase + unless target.includes_resources? + native_targets.each do |native_target| + TargetIntegrator.remove_copy_resources_script_phase_from_target(native_target) + end + return + end + + script_path = target.copy_resources_script_relative_path + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths + target.resource_paths_by_config.each do |config, resource_paths| + input_paths_key = XCFileListConfigKey.new(target.copy_resources_script_input_files_path(config), + target.copy_resources_script_input_files_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + resource_paths + + output_paths_key = XCFileListConfigKey.new(target.copy_resources_script_output_files_path(config), + target.copy_resources_script_output_files_relative_path) + output_paths_by_config[output_paths_key] = TargetIntegrator.resource_output_paths(resource_paths) + end + end + + native_targets.each do |native_target| + # Static library targets cannot include resources. Skip this phase from being added instead. + next if native_target.symbol_type == :static_library + TargetIntegrator.create_or_update_copy_resources_script_phase_to_target(native_target, script_path, + input_paths_by_config, + output_paths_by_config) + end + end + + # Removes the embed frameworks build phase from embedded targets + # + # @note Older versions of CocoaPods would add this build phase to embedded + # targets. They should be removed on upgrade because embedded targets + # will have their frameworks embedded in their host targets. + # + def remove_embed_frameworks_script_phase_from_embedded_targets + return unless target.requires_host_target? + native_targets.each do |native_target| + if AggregateTarget::EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include? native_target.symbol_type + TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + end + end + end + + # Find or create a 'Embed Pods Frameworks' Copy Files Build Phase + # + # @return [void] + # + def add_embed_frameworks_script_phase + unless target.includes_frameworks? || (target.xcframeworks_by_config.values.flatten.any? { |xcf| xcf.build_type.dynamic_framework? }) + native_targets_to_embed_in.each do |native_target| + TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + end + return + end + + script_path = target.embed_frameworks_script_relative_path + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths? + configs = Set.new(target.framework_paths_by_config.keys + target.xcframeworks_by_config.keys).sort + configs.each do |config| + framework_paths = target.framework_paths_by_config[config] || [] + xcframeworks = target.xcframeworks_by_config[config] || [] + + input_paths_key = XCFileListConfigKey.new(target.embed_frameworks_script_input_files_path(config), target.embed_frameworks_script_input_files_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + TargetIntegrator.embed_frameworks_input_paths(framework_paths, xcframeworks) + + output_paths_key = XCFileListConfigKey.new(target.embed_frameworks_script_output_files_path(config), target.embed_frameworks_script_output_files_relative_path) + output_paths_by_config[output_paths_key] = TargetIntegrator.embed_frameworks_output_paths(framework_paths, xcframeworks) + end + end + + native_targets_to_embed_in.each do |native_target| + TargetIntegrator.create_or_update_embed_frameworks_script_phase_to_target(native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Updates all target script phases for the current target, including creating or updating, deleting + # and re-ordering. + # + # @return [void] + # + def add_user_script_phases + native_targets.each do |native_target| + TargetIntegrator.create_or_update_user_script_phases(target.target_definition.script_phases, native_target) + end + end + + # Adds a shell script build phase responsible for checking if the Pods + # locked in the Pods/Manifest.lock file are in sync with the Pods defined + # in the Podfile.lock. + # + # @note The build phase is appended to the front because to fail + # fast. + # + # @return [void] + # + def add_check_manifest_lock_script_phase + phase_name = CHECK_MANIFEST_PHASE_NAME + native_targets.each do |native_target| + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + phase_name) + native_target.build_phases.unshift(phase).uniq! unless native_target.build_phases.first == phase + phase.shell_script = <<-SH.strip_heredoc + diff "${PODS_PODFILE_DIR_PATH}/Podfile.lock" "${PODS_ROOT}/Manifest.lock" > /dev/null + if [ $? != 0 ] ; then + # print error to STDERR + echo "error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation." >&2 + exit 1 + fi + # This output is used by Xcode 'outputs' to avoid re-running this script phase. + echo "SUCCESS" > "${SCRIPT_OUTPUT_FILE_0}" + SH + phase.input_paths = %w(${PODS_PODFILE_DIR_PATH}/Podfile.lock ${PODS_ROOT}/Manifest.lock) + phase.output_paths = [target.check_manifest_lock_script_output_file_path] + end + end + + # @param [Array] removed_phase_names + # The names of the script phases that should be removed + # + def remove_obsolete_script_phases(removed_phase_names = REMOVED_SCRIPT_PHASE_NAMES) + native_targets.each do |native_target| + removed_phase_names.each do |phase_name| + TargetIntegrator.remove_script_phase_from_target(native_target, phase_name) + end + end + end + + def add_on_demand_resources + target.pod_targets.each do |pod_target| + # When integrating with the user's project we are only interested in integrating ODRs from library specs + # and not test specs or app specs. + library_file_accessors = pod_target.file_accessors.select { |fa| fa.spec.library_specification? } + target_odr_group_name = "#{pod_target.label}-OnDemandResources" + # The 'Pods' group would always be there for production code however for tests its sometimes not added. + # This ensures its always present and makes it easier for existing and new tests. + parent_odr_group = target.user_project.main_group['Pods'] || target.user_project.new_group('Pods') + TargetIntegrator.update_on_demand_resources(target.sandbox, target.user_project, target.user_targets, + library_file_accessors, parent_odr_group, target_odr_group_name) + end + end + + private + + # @!group Private Helpers + #---------------------------------------------------------------------# + + # @return [Array] The list of all the targets that + # match the given target. + # + def native_targets + @native_targets ||= target.user_targets + end + + # @return [Array] The list of all the targets that + # require that the pod frameworks are embedded in the output + # directory / product bundle. + # + def native_targets_to_embed_in + return [] if target.requires_host_target? + native_targets.select do |target| + EMBED_FRAMEWORK_TARGET_TYPES.include?(target.symbol_type) + end + end + + # Read the project from the disk to ensure that it is up to date as + # other TargetIntegrators might have modified it. + # + # @return [Project] + # + def user_project + target.user_project + end + + # @return [Specification::Consumer] the consumer for the specifications. + # + def spec_consumers + @spec_consumers ||= target.pod_targets.map(&:file_accessors).flatten.map(&:spec_consumer) + end + + # @return [String] the message that should be displayed for the target + # integration. + # + def integration_message + "Integrating target `#{target.name}` " \ + "(#{UI.path target.user_project_path} project)" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb new file mode 100644 index 0000000..f68acbe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb @@ -0,0 +1,179 @@ +module Pod + class Installer + class UserProjectIntegrator + class TargetIntegrator + # Configures an user target to use the CocoaPods xcconfigs which allow + # lo link against the Pods. + # + class XCConfigIntegrator + # Integrates the user target. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [Array] targets + # The native targets associated which should be integrated + # with the Pod bundle. + # + def self.integrate(pod_bundle, targets) + targets.each do |target| + target.build_configurations.each do |config| + set_target_xcconfig(pod_bundle, target, config) + end + end + end + + private + + # @!group Integration steps + #-------------------------------------------------------------------# + + # Creates a file reference to the xcconfig generated by + # CocoaPods (if needed) and sets it as the base configuration of + # build configuration of the user target. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [PBXNativeTarget] target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + def self.set_target_xcconfig(pod_bundle, target, config) + file_ref = create_xcconfig_ref(pod_bundle, config) + path = file_ref.path + + existing = config.base_configuration_reference + + if existing && existing != file_ref + if existing.real_path.to_path.start_with?(pod_bundle.sandbox.root.to_path << '/') + config.base_configuration_reference = file_ref + elsif !xcconfig_includes_target_xcconfig?(config.base_configuration_reference, path) + unless existing_config_is_identical_to_pod_config?(existing.real_path, pod_bundle.xcconfig_path(config.name)) + UI.warn 'CocoaPods did not set the base configuration of your ' \ + 'project because your project already has a custom ' \ + 'config set. In order for CocoaPods integration to work at ' \ + 'all, please either set the base configurations of the target ' \ + "`#{target.name}` to `#{path}` or include the `#{path}` in your " \ + "build configuration (#{UI.path(existing.real_path)})." + end + end + elsif config.base_configuration_reference.nil? || file_ref.nil? + config.base_configuration_reference = file_ref + end + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------# + + # Prints a warning informing the user that a build configuration of + # the integrated target is overriding the CocoaPods build settings. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [XcodeProj::PBXNativeTarget] target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @param [String] key + # The key of the overridden build setting. + # + def self.print_override_warning(pod_bundle, target, config, key) + actions = [ + 'Use the `$(inherited)` flag, or', + 'Remove the build settings from the target.', + ] + message = "The `#{target.name} [#{config.name}]` " \ + "target overrides the `#{key}` build setting defined in " \ + "`#{pod_bundle.pod_bundle.xcconfig_relative_path(config.name)}'. " \ + 'This can lead to problems with the CocoaPods installation' + UI.warn(message, actions) + end + + # Naively checks to see if a given PBXFileReference imports a given + # path. + # + # @param [PBXFileReference] base_config_ref + # A file reference to an `.xcconfig` file. + # + # @param [String] target_config_path + # The path to check for. + # + SILENCE_WARNINGS_STRING = '// @COCOAPODS_SILENCE_WARNINGS@ //' + def self.xcconfig_includes_target_xcconfig?(base_config_ref, target_config_path) + return unless base_config_ref && base_config_ref.real_path.file? + regex = %r{ + ^( + (\s* # Possible, but unlikely, space before include statement + \#include(\?)?\s+ # Include statement + ['"] # Open quote + (.*\/)? # Possible prefix to path + #{Regexp.quote(target_config_path)} # The path should end in the target_config_path + ['"] # Close quote + ) + | + (#{Regexp.quote(SILENCE_WARNINGS_STRING)}) # Token to treat xcconfig as good and silence pod install warnings + ) + }x + base_config_ref.real_path.readlines.find { |line| line =~ regex } + end + + # Checks to see if the config files at two paths exist and are identical + # + # @param The existing config path + # + # @param The pod config path + # + def self.existing_config_is_identical_to_pod_config?(existing_config_path, pod_config_path) + existing_config_path.file? && (!pod_config_path.file? || FileUtils.compare_file(existing_config_path, pod_config_path)) + end + + # Creates a file reference to the xcconfig generated by + # CocoaPods (if needed). + # If the Pods group not exists, create the group and set + # the location to the `Pods` directory. + # If the file reference exists, the location is different + # with the xcconfig's path and the symlink target paths + # are different, we will update the location. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @return [PBXFileReference] the xcconfig reference. + # + def self.create_xcconfig_ref(pod_bundle, config) + # Xcode root group's path is absolute, we must get the relative path of the sandbox to the user project + group_path = pod_bundle.relative_pods_root_path + group = config.project['Pods'] || config.project.new_group('Pods', group_path) + + # support user custom paths of Pods group and xcconfigs files. + group_path = Pathname.new(group.real_path) + xcconfig_path = Pathname.new(pod_bundle.xcconfig_path(config.name)) + path = xcconfig_path.relative_path_from(group_path) + + filename = path.basename.to_s + file_ref = group.files.find { |f| f.display_name == filename } + if file_ref && file_ref.path != path + file_ref_path = Pathname.new(file_ref.real_path) + if !file_ref_path.exist? || !xcconfig_path.exist? || file_ref_path.realpath != xcconfig_path.realpath + file_ref.path = path.to_s + end + end + + file_ref || group.new_file(path.to_s) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode.rb new file mode 100644 index 0000000..51da34c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode.rb @@ -0,0 +1,11 @@ +module Pod + class Installer + class Xcode + autoload :PodsProjectGenerator, 'cocoapods/installer/xcode/pods_project_generator' + autoload :SinglePodsProjectGenerator, 'cocoapods/installer/xcode/single_pods_project_generator' + autoload :MultiPodsProjectGenerator, 'cocoapods/installer/xcode/multi_pods_project_generator' + autoload :PodsProjectWriter, 'cocoapods/installer/xcode/pods_project_generator/pods_project_writer' + autoload :TargetValidator, 'cocoapods/installer/xcode/target_validator' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb new file mode 100644 index 0000000..613f19d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb @@ -0,0 +1,82 @@ +module Pod + class Installer + class Xcode + # The {MultiPodsProjectGenerator} handles generation of the 'Pods/Pods.xcodeproj' and Xcode projects + # for every {PodTarget}. All Pod Target projects are nested under the 'Pods.xcodeproj'. + # + class MultiPodsProjectGenerator < PodsProjectGenerator + # Generates `Pods/Pods.xcodeproj` and all pod target subprojects. + # + # @return [PodsProjectGeneratorResult] + # + def generate! + # Generate container Pods.xcodeproj. + container_project = create_container_project(aggregate_targets, sandbox.project_path) + + project_paths_by_pod_targets = pod_targets.group_by do |pod_target| + sandbox.pod_target_project_path(pod_target.project_name) + end + projects_by_pod_targets = Hash[project_paths_by_pod_targets.map do |project_path, pod_targets| + project = create_pods_project(pod_targets, project_path, container_project) + [project, pod_targets] + end] + + # Note: We must call `install_file_references` on all pod targets before installing them. + pod_target_installation_results = install_all_pod_targets(projects_by_pod_targets) + aggregate_target_installation_results = install_aggregate_targets_into_project(container_project, aggregate_targets) + target_installation_results = InstallationResults.new(pod_target_installation_results, aggregate_target_installation_results) + + integrate_targets(target_installation_results.pod_target_installation_results) + wire_target_dependencies(target_installation_results) + PodsProjectGeneratorResult.new(container_project, projects_by_pod_targets, target_installation_results) + end + + private + + def create_container_project(aggregate_targets, path) + return unless aggregate_targets + platforms = aggregate_targets.map(&:platform) + ProjectGenerator.new(sandbox, path, [], build_configurations, platforms, + project_object_version, config.podfile_path).generate! + end + + def create_pods_project(pod_targets, path, parent_project) + platforms = pod_targets.map(&:platform) + project = ProjectGenerator.new(sandbox, path, pod_targets, build_configurations, platforms, + project_object_version, false, :pod_target_subproject => true).generate! + # Instead of saving every subproject to disk, we can optimize this by creating a temporary folder + # the file reference can use so that we only have to call `save` once for all projects. + project.path.mkpath + if parent_project + pod_name = pod_name_from_grouping(pod_targets) + is_local = sandbox.local?(pod_name) + parent_project.add_pod_subproject(project, is_local) + end + + install_file_references(project, pod_targets) + project + end + + def install_all_pod_targets(projects_by_pod_targets) + UI.message '- Installing Pod Targets' do + projects_by_pod_targets.each_with_object({}) do |(project, pod_targets), target_installation_results| + target_installation_results.merge!(install_pod_targets(project, pod_targets)) + end + end + end + + def install_aggregate_targets_into_project(project, aggregate_targets) + return {} unless project + install_aggregate_targets(project, aggregate_targets) + end + + def pod_name_from_grouping(pod_targets) + # The presumption here for multi pods project is that we group by `pod_name`, thus the grouping of `pod_targets` + # should share the same `pod_name`. + raise '[BUG] Expected at least 1 pod target' if pod_targets.empty? + pod_targets.first.pod_name + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator.rb new file mode 100644 index 0000000..8e8dcfd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator.rb @@ -0,0 +1,291 @@ +module Pod + class Installer + class Xcode + # The {PodsProjectGenerator} handles generation of CocoaPods Xcode projects. + # + class PodsProjectGenerator + require 'cocoapods/installer/xcode/pods_project_generator/target_installer_helper' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_integrator' + require 'cocoapods/installer/xcode/pods_project_generator/target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/target_installation_result' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/file_references_installer' + require 'cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/project_generator' + require 'cocoapods/installer/xcode/pods_project_generator_result' + require 'cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer' + require 'cocoapods/native_target_extension.rb' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Array] The model representations of an + # aggregation of pod targets generated for a target definition + # in the Podfile. + # + attr_reader :aggregate_targets + + # @return [Array] The model representations of pod targets. + # + attr_reader :pod_targets + + # @return [Hash{String => Symbol}] The build configurations that need to be installed. + # + attr_reader :build_configurations + + # @return [InstallationOptions] the installation options from the Podfile. + # + attr_reader :installation_options + + # @return [Config] the global CocoaPods configuration. + # + attr_reader :config + + # @return [Integer] the object version for the projects we will generate. + # + attr_reader :project_object_version + + # @return [ProjectMetadataCache] the metadata cache used to reconstruct target dependencies. + # + attr_reader :metadata_cache + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] aggregate_targets @see #aggregate_targets + # @param [Array] pod_targets @see #pod_targets + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [InstallationOptions] installation_options @see #installation_options + # @param [Config] config @see #config + # @param [Integer] project_object_version @see #project_object_version + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, aggregate_targets, pod_targets, build_configurations, installation_options, config, + project_object_version, metadata_cache = nil) + @sandbox = sandbox + @aggregate_targets = aggregate_targets + @pod_targets = pod_targets + @build_configurations = build_configurations + @installation_options = installation_options + @config = config + @project_object_version = project_object_version + @metadata_cache = metadata_cache + end + + # Configure schemes for the specified project and pod targets. Schemes for development pods will be shared + # if requested by the integration. + # + # @param [PBXProject] project The project to configure schemes for. + # @param [Array] pod_targets The pod targets within that project to configure their schemes. + # @param [PodsProjectGeneratorResult] generator_result the result of the project generation + # + # @return [void] + # + def configure_schemes(project, pod_targets, generator_result) + pod_targets.each do |pod_target| + share_scheme = pod_target.should_build? && share_scheme_for_development_pod?(pod_target.pod_name) && sandbox.local?(pod_target.pod_name) + configure_schemes_for_pod_target(project, pod_target, share_scheme, generator_result) + end + end + + # @!attribute [Hash{String => TargetInstallationResult}] pod_target_installation_results + # @!attribute [Hash{String => TargetInstallationResult}] aggregate_target_installation_results + InstallationResults = Struct.new(:pod_target_installation_results, :aggregate_target_installation_results) + + private + + def install_file_references(project, pod_targets) + UI.message "- Installing files into #{project.project_name} project" do + installer = FileReferencesInstaller.new(sandbox, pod_targets, project, installation_options.preserve_pod_file_structure) + installer.install! + end + end + + def install_pod_targets(project, pod_targets) + umbrella_headers_by_dir = pod_targets.map do |pod_target| + next unless pod_target.should_build? && pod_target.defines_module? + pod_target.umbrella_header_path + end.compact.group_by(&:dirname) + + pod_target_installation_results = Hash[pod_targets.sort_by(&:name).map do |pod_target| + umbrella_headers_in_header_dir = umbrella_headers_by_dir[pod_target.module_map_path.dirname] + target_installer = PodTargetInstaller.new(sandbox, project, pod_target, umbrella_headers_in_header_dir) + [pod_target.name, target_installer.install!] + end] + + # Hook up system framework dependencies for the pod targets that were just installed. + pod_target_installation_result_values = pod_target_installation_results.values.compact + unless pod_target_installation_result_values.empty? + add_system_framework_dependencies(pod_target_installation_result_values) + end + + pod_target_installation_results + end + + def install_aggregate_targets(project, aggregate_targets) + UI.message '- Installing Aggregate Targets' do + aggregate_target_installation_results = Hash[aggregate_targets.sort_by(&:name).map do |target| + target_installer = AggregateTargetInstaller.new(sandbox, project, target) + [target.name, target_installer.install!] + end] + + aggregate_target_installation_results + end + end + + # @param [Hash{String => InstallationResult}] pod_target_installation_results + # the installations to integrate + # + # @return [void] + # + def integrate_targets(pod_target_installation_results) + pod_installations_to_integrate = pod_target_installation_results.values.select do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + !pod_target_installation_result.test_native_targets.empty? || + !pod_target_installation_result.app_native_targets.empty? || + pod_target.contains_script_phases? || + pod_target.framework_paths.values.flatten.any? { |paths| !paths.dsym_path.nil? } || + pod_target.xcframeworks.values.any? { |xcframeworks| !xcframeworks.empty? } + end + return if pod_installations_to_integrate.empty? + + UI.message '- Integrating targets' do + use_input_output_paths = !installation_options.disable_input_output_paths + pod_installations_to_integrate.each do |pod_target_installation_result| + PodTargetIntegrator.new(pod_target_installation_result, :use_input_output_paths => use_input_output_paths).integrate! + end + end + end + + def add_system_framework_dependencies(pod_target_installation_results) + sorted_installation_results = pod_target_installation_results.sort_by do |pod_target_installation_result| + pod_target_installation_result.target.name + end + sorted_installation_results.each do |target_installation_result| + pod_target = target_installation_result.target + next unless pod_target.should_build? + next if pod_target.build_as_static? + pod_target.file_accessors.each do |file_accessor| + native_target = target_installation_result.native_target_for_spec(file_accessor.spec) + add_system_frameworks_to_native_target(native_target, file_accessor) + end + end + end + + # Adds a target dependency for each pod spec to each aggregate target and + # links the pod targets among each other. + # + # @param [Array[Hash{String=>TargetInstallationResult}]] target_installation_results + # the installation results that were produced when all targets were installed. This includes + # pod target installation results and aggregate target installation results. + # + # @return [void] + # + def wire_target_dependencies(target_installation_results) + pod_target_installation_results_hash = target_installation_results.pod_target_installation_results + aggregate_target_installation_results_hash = target_installation_results.aggregate_target_installation_results + + AggregateTargetDependencyInstaller.new(sandbox, aggregate_target_installation_results_hash, + pod_target_installation_results_hash, metadata_cache).install! + + PodTargetDependencyInstaller.new(sandbox, pod_target_installation_results_hash, metadata_cache).install! + end + + # @param [String] pod The root name of the development pod. + # + # @return [Bool] whether the scheme for the given development pod should be + # shared. + # + def share_scheme_for_development_pod?(pod) + case dev_pods_to_share = installation_options.share_schemes_for_development_pods + when TrueClass, FalseClass, NilClass + dev_pods_to_share + when Array + dev_pods_to_share.any? { |dev_pod| dev_pod === pod } # rubocop:disable Style/CaseEquality + else + raise Informative, 'Unable to handle share_schemes_for_development_pods ' \ + "being set to #{dev_pods_to_share.inspect} -- please set it to true, " \ + 'false, or an array of pods to share schemes for.' + end + end + + #------------------------------------------------------------------------# + + # @! group Private Helpers + + def add_system_frameworks_to_native_target(native_target, file_accessor) + file_accessor.spec_consumer.frameworks.each do |framework| + native_target.add_system_framework(framework) + end + end + + # @param [Project] project + # the project of the pod target + # + # @param [Pod::PodTarget] pod_target + # the pod target for which to configure schemes + # + # @param [Boolean] share_scheme + # whether the created schemes should be shared + # + # @param [PodsProjectGeneratorResult] generator_result + # the project generation result + # + def configure_schemes_for_pod_target(project, pod_target, share_scheme, generator_result) + # Ignore subspecs because they do not provide a scheme configuration due to the fact that they are always + # merged with the root spec scheme. + specs = [pod_target.root_spec] + pod_target.test_specs + pod_target.app_specs + hosted_test_specs_by_host = Hash.new do |hash, key| + hash[key] = [] + end + pod_target.test_app_hosts_by_spec.each do |spec, (host_spec, host_target)| + if host_target == pod_target + hosted_test_specs_by_host[host_spec] << spec + end + end + is_custom_host = !hosted_test_specs_by_host.empty? + specs.each do |spec| + scheme_name = pod_target.spec_label(spec) + scheme_configuration = pod_target.scheme_for_spec(spec) + if !scheme_configuration.empty? || is_custom_host + scheme_path = Xcodeproj::XCScheme.user_data_dir(project.path) + "#{scheme_name}.xcscheme" + scheme = Xcodeproj::XCScheme.new(scheme_path) + command_line_arguments = scheme.launch_action.command_line_arguments + scheme_configuration.fetch(:launch_arguments, []).each do |launch_arg| + command_line_arguments.assign_argument(:argument => launch_arg, :enabled => true) + end + scheme.launch_action.command_line_arguments = command_line_arguments + environment_variables = scheme.launch_action.environment_variables + scheme_configuration.fetch(:environment_variables, {}).each do |k, v| + environment_variables.assign_variable(:key => k, :value => v) + end + scheme.launch_action.environment_variables = environment_variables + if scheme_configuration.key?(:code_coverage) + scheme.test_action.code_coverage_enabled = scheme_configuration[:code_coverage] + end + + hosted_test_specs_by_host[spec].each do |hosted_spec| + # We are an app spec which hosts this test spec. + # Include the test specs's test bundle within our scheme's test action + native_target = generator_result.native_target_for_spec(hosted_spec) + testable = Xcodeproj::XCScheme::TestAction::TestableReference.new(native_target) + scheme.test_action.add_testable(testable) + end + + if spec.test_specification? + # Default to using the test bundle to expand variables + native_target_for_expansion = generator_result.native_target_for_spec(spec) + macro_expansion = Xcodeproj::XCScheme::MacroExpansion.new(native_target_for_expansion) + scheme.launch_action.add_macro_expansion(macro_expansion) + end + scheme.save! + end + Xcodeproj::XCScheme.share_scheme(project.path, scheme_name) if share_scheme + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb new file mode 100644 index 0000000..7e26e9e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb @@ -0,0 +1,66 @@ +module Pod + class Installer + class Xcode + # Wires up the dependencies for aggregate targets from the target installation results + # + class AggregateTargetDependencyInstaller + require 'cocoapods/native_target_extension.rb' + + # @return [Hash{String => TargetInstallationResult}] The target installation results for pod targets. + # + attr_reader :pod_target_installation_results + + # @return [Hash{String => TargetInstallationResult}] The target installation results for aggregate targets. + # + attr_reader :aggregate_target_installation_results + + # @return [ProjectMetadataCache] The project metadata cache. + # + attr_reader :metadata_cache + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Hash{String => TargetInstallationResult}] aggregate_target_installation_results @see #aggregate_target_installation_results + # @param [Hash{String => TargetInstallationResult}] pod_target_installation_results @see #pod_target_installation_results + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, aggregate_target_installation_results, pod_target_installation_results, metadata_cache) + @sandbox = sandbox + @aggregate_target_installation_results = aggregate_target_installation_results + @pod_target_installation_results = pod_target_installation_results + @metadata_cache = metadata_cache + end + + def install! + aggregate_target_installation_results.values.each do |aggregate_target_installation_result| + aggregate_target = aggregate_target_installation_result.target + aggregate_native_target = aggregate_target_installation_result.native_target + project = aggregate_native_target.project + # Wire up dependencies that are part of inherit search paths for this aggregate target. + aggregate_target.search_paths_aggregate_targets.each do |search_paths_target| + aggregate_native_target.add_dependency(aggregate_target_installation_results[search_paths_target.name].native_target) + end + # Wire up all pod target dependencies to aggregate target. + aggregate_target.pod_targets.each do |pod_target| + if pod_target_installation_result = pod_target_installation_results[pod_target.name] + pod_target_native_target = pod_target_installation_result.native_target + aggregate_native_target.add_dependency(pod_target_native_target) + else + # Hit the cache + is_local = sandbox.local?(pod_target.pod_name) + cached_dependency = metadata_cache.target_label_by_metadata[pod_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, aggregate_native_target, cached_dependency) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb new file mode 100644 index 0000000..93ff4a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb @@ -0,0 +1,192 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Creates the targets which aggregate the Pods libraries in the Pods + # project and the relative support files. + # + class AggregateTargetInstaller < TargetInstaller + # @return [AggregateTarget] @see TargetInstaller#target + # + attr_reader :target + + # Creates the target in the Pods project and the relative support files. + # + # @return [TargetInstallationResult] the result of the installation of this target. + # + def install! + UI.message "- Installing target `#{target.name}` #{target.platform}" do + native_target = add_target + create_support_files_dir + create_support_files_group + create_xcconfig_file(native_target) + if target.build_as_framework? + create_info_plist_file(target.info_plist_path, native_target, target.version, target.platform) + create_module_map(native_target) + create_umbrella_header(native_target) + elsif target.uses_swift? + create_module_map(native_target) + create_umbrella_header(native_target) + end + # Because embedded targets live in their host target, CocoaPods + # copies all of the embedded target's pod_targets to its host + # targets. Having this script for the embedded target would + # cause an App Store rejection because frameworks cannot be + # embedded in embedded targets. + # + create_embed_frameworks_script if embed_frameworks_script_required? + create_bridge_support_file(native_target) + create_copy_resources_script if target.includes_resources? + create_acknowledgements + create_dummy_source(native_target) + clean_support_files_temp_dir + TargetInstallationResult.new(target, native_target) + end + end + + #-----------------------------------------------------------------------# + + private + + # @return [TargetDefinition] the target definition of the library. + # + def target_definition + target.target_definition + end + + # Ensure that vendored static frameworks and libraries are not linked + # twice to the aggregate target, which shares the xcconfig of the user + # target. + # + def custom_build_settings + settings = { + 'CODE_SIGN_IDENTITY[sdk=appletvos*]' => '', + 'CODE_SIGN_IDENTITY[sdk=iphoneos*]' => '', + 'CODE_SIGN_IDENTITY[sdk=watchos*]' => '', + 'MACH_O_TYPE' => 'staticlib', + 'OTHER_LDFLAGS' => '', + 'OTHER_LIBTOOLFLAGS' => '', + 'PODS_ROOT' => '$(SRCROOT)', + 'PRODUCT_BUNDLE_IDENTIFIER' => 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}', + 'SKIP_INSTALL' => 'YES', + + # Needed to ensure that static libraries won't try to embed the swift stdlib, + # since there's no where to embed in for a static library. + # Not necessary for dynamic frameworks either, since the aggregate targets are never shipped + # on their own, and are always further embedded into an app target. + 'ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES' => 'NO', + } + super.merge(settings) + end + + # @return [Boolean] whether this target requires an `Embed Frameworks` script phase + # + def embed_frameworks_script_required? + includes_dynamic_xcframeworks = target.xcframeworks_by_config.values.flatten.map(&:build_type).any?(&:dynamic_framework?) + (target.includes_frameworks? || includes_dynamic_xcframeworks) && !target.requires_host_target? + end + + # Creates the group that holds the references to the support files + # generated by this installer. + # + # @return [void] + # + def create_support_files_group + parent = project.support_files_group + name = target.name + dir = target.support_files_dir + @support_files_group = parent.new_group(name, dir) + end + + # Generates the contents of the xcconfig file and saves it to disk. + # + # @param [PBXNativeTarget] native_target + # the native target to link the module map file into. + # + # @return [void] + # + def create_xcconfig_file(native_target) + native_target.build_configurations.each do |configuration| + next unless target.user_build_configurations.key?(configuration.name) + path = target.xcconfig_path(configuration.name) + build_settings = target.build_settings(configuration.name) + update_changed_file(build_settings, path) + target.xcconfigs[configuration.name] = build_settings.xcconfig + xcconfig_file_ref = add_file_to_support_group(path) + configuration.base_configuration_reference = xcconfig_file_ref + end + end + + # Generates the bridge support metadata if requested by the {Podfile}. + # + # @note The bridge support metadata is added to the resources of the + # target because it is needed for environments interpreted at + # runtime. + # + # @param [PBXNativeTarget] native_target + # the native target to add the bridge support file into. + # + # @return [void] + # + def create_bridge_support_file(native_target) + if target.podfile.generate_bridge_support? + path = target.bridge_support_path + headers = native_target.headers_build_phase.files.map { |bf| sandbox.root + bf.file_ref.path } + generator = Generator::BridgeSupport.new(headers) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies the resources to the bundle of the client + # target. + # + # @note The bridge support file needs to be created before the prefix + # header, otherwise it will not be added to the resources script. + # + # @return [void] + # + def create_copy_resources_script + path = target.copy_resources_script_path + generator = Generator::CopyResourcesScript.new(target.resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Creates a script that embeds the frameworks to the bundle of the client + # target. + # + # @note We can't use Xcode default link libraries phase, because + # we need to ensure that we only copy the frameworks which are + # relevant for the current build configuration. + # + # @return [void] + # + def create_embed_frameworks_script + path = target.embed_frameworks_script_path + generator = Generator::EmbedFrameworksScript.new(target.framework_paths_by_config, target.xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Generates the acknowledgement files (markdown and plist) for the target. + # + # @return [void] + # + def create_acknowledgements + basepath = target.acknowledgements_basepath + Generator::Acknowledgements.generators.each do |generator_class| + path = generator_class.path_from_basepath(basepath) + file_accessors = target.pod_targets.map(&:file_accessors).flatten + generator = generator_class.new(file_accessors) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + #-----------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb new file mode 100644 index 0000000..dfffa0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb @@ -0,0 +1,154 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Installs an app host target to a given project. + # + class AppHostInstaller + include TargetInstallerHelper + + # @return [Sandbox] + # The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [Pod::Project] + # The project to install the app host into. + # + attr_reader :project + + # @return [Platform] the platform to use for this app host. + # + attr_reader :platform + + # @return [String] the name of the sub group. + # + attr_reader :subgroup_name + + # @return [String] the name of the group the app host installer will be installing within. + # + attr_reader :group_name + + # @return [String] the name of the app target label that will be used. + # + attr_reader :app_target_label + + # @return [Boolean] whether the app host installer should add main.m + # + attr_reader :add_main + + # @return [Boolean] whether the app host installer should add a launch screen storyboard + # + attr_reader :add_launchscreen_storyboard + + # @return [Hash] Info.plist entries for the app host + # + attr_reader :info_plist_entries + + # @return [String] product_basename + # The product basename to use for the target. + # + attr_reader :product_basename + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Pod::Project] project @see #project + # @param [Platform] platform @see #platform + # @param [String] subgroup_name @see #subgroup_name + # @param [String] group_name @see #group_name + # @param [String] app_target_label see #app_target_label + # @param [Boolean] add_main see #add_main + # @param [Hash] info_plist_entries see #info_plist_entries + # @param [String] product_basename see #product_basename + # + def initialize(sandbox, project, platform, subgroup_name, group_name, app_target_label, add_main: true, + add_launchscreen_storyboard: platform == :ios, info_plist_entries: {}, product_basename: nil) + @sandbox = sandbox + @project = project + @platform = platform + @subgroup_name = subgroup_name + @group_name = group_name + @app_target_label = app_target_label + @add_main = add_main + @add_launchscreen_storyboard = add_launchscreen_storyboard + @info_plist_entries = info_plist_entries + @product_basename = product_basename || app_target_label + target_group = project.pod_group(group_name) + @group = target_group[subgroup_name] || target_group.new_group(subgroup_name) + end + + # @return [PBXNativeTarget] the app host native target that was installed. + # + def install! + platform_name = platform.name + app_host_target = Pod::Generator::AppTargetHelper.add_app_target(project, platform_name, deployment_target, + app_target_label, product_basename) + app_host_target.build_configurations.each do |configuration| + configuration.build_settings['PRODUCT_NAME'] = product_basename + configuration.build_settings['PRODUCT_BUNDLE_IDENTIFIER'] = 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + if platform == :osx + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' + elsif platform == :ios + configuration.build_settings['CODE_SIGN_IDENTITY'] = 'iPhone Developer' + end + configuration.build_settings['CURRENT_PROJECT_VERSION'] = '1' + end + + Pod::Generator::AppTargetHelper.add_app_host_main_file(project, app_host_target, platform_name, @group, app_target_label) if add_main + Pod::Generator::AppTargetHelper.add_launchscreen_storyboard(project, app_host_target, @group, deployment_target, app_target_label) if add_launchscreen_storyboard + create_info_plist_file_with_sandbox(sandbox, app_host_info_plist_path, app_host_target, '1.0.0', platform, + :appl, :additional_entries => additional_info_plist_entries) + @group.new_file(app_host_info_plist_path) + app_host_target + end + + private + + ADDITIONAL_INFO_PLIST_ENTRIES = { + 'NSAppTransportSecurity' => { + 'NSAllowsArbitraryLoads' => true, + }, + }.freeze + + ADDITIONAL_IOS_INFO_PLIST_ENTRIES = { + 'UILaunchStoryboardName' => 'LaunchScreen', + 'UISupportedInterfaceOrientations' => %w( + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + ), + 'UISupportedInterfaceOrientations~ipad' => %w( + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + ), + }.freeze + + # @return [Hash] the additional Info.plist entries to be included + # + def additional_info_plist_entries + result = {} + result.merge!(ADDITIONAL_INFO_PLIST_ENTRIES) + result.merge!(ADDITIONAL_IOS_INFO_PLIST_ENTRIES) if platform == :ios + result.merge!(info_plist_entries) if info_plist_entries + result + end + + # @return [Pathname] The absolute path of the Info.plist to use for an app host. + # + def app_host_info_plist_path + project.path.dirname.+(subgroup_name).+("#{app_target_label}-Info.plist") + end + + # @return [String] The deployment target. + # + def deployment_target + platform.deployment_target.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb new file mode 100644 index 0000000..0287213 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb @@ -0,0 +1,329 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Controller class responsible of installing the file references of the + # specifications in the Pods project. + # + class FileReferencesInstaller + # Regex for extracting the region portion of a localized file path. Ex. `Resources/en.lproj` --> `en` + LOCALIZATION_REGION_FILEPATTERN_REGEX = /(\/|^)(?[^\/]*?)\.lproj(\/|$)/ + + # @return [Sandbox] The sandbox of the installation. + # + attr_reader :sandbox + + # @return [Array] The pod targets of the installation. + # + attr_reader :pod_targets + + # @return [Project] The project to install the file references into. + # + attr_reader :pods_project + + # @return [Bool] add support for preserving the file structure of externally sourced pods, in addition to local pods. + # + attr_reader :preserve_pod_file_structure + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # @param [Project] pods_project @see #pods_project + # @param [Bool] preserve_pod_file_structure @see #preserve_pod_file_structure + # + def initialize(sandbox, pod_targets, pods_project, preserve_pod_file_structure = false) + @sandbox = sandbox + @pod_targets = pod_targets + @pods_project = pods_project + @preserve_pod_file_structure = preserve_pod_file_structure + end + + # Installs the file references. + # + # @return [void] + # + def install! + refresh_file_accessors + prepare_pod_groups + add_source_files_references + add_frameworks_bundles + add_vendored_libraries + add_resources + add_developer_files unless sandbox.development_pods.empty? + link_headers + end + + #-----------------------------------------------------------------------# + + private + + # @!group Installation Steps + + # Reads the file accessors contents from the file system. + # + # @note The contents of the file accessors are modified by the clean + # step of the #{PodSourceInstaller} and by the pre install hooks. + # + # @return [void] + # + def refresh_file_accessors + file_accessors.reject do |file_accessor| + pod_name = file_accessor.spec.name + sandbox.local?(pod_name) + end.map(&:path_list).uniq.each(&:read_file_system) + end + + # Prepares the main groups to which all files will be added for the respective target + # + def prepare_pod_groups + file_accessors.each do |file_accessor| + pod_name = file_accessor.spec.name + next unless sandbox.local?(pod_name) + root_name = Specification.root_name(pod_name) + path = file_accessor.root + group = pods_project.group_for_spec(root_name) + group.set_path(path) unless group.path == path + end + end + + # Adds the source files of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively). + # + # @return [void] + # + def add_source_files_references + UI.message '- Adding source files' do + add_file_accessors_paths_to_pods_group(:source_files, nil, true) + end + end + + # Adds the bundled frameworks to the Pods project + # + # @return [void] + # + def add_frameworks_bundles + UI.message '- Adding frameworks' do + add_file_accessors_paths_to_pods_group(:vendored_frameworks, :frameworks) + end + end + + # Adds the bundled libraries to the Pods project + # + # @return [void] + # + def add_vendored_libraries + UI.message '- Adding libraries' do + add_file_accessors_paths_to_pods_group(:vendored_libraries, :frameworks) + end + end + + # Adds the resources of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively) in the resources group. + # + # @return [void] + # + def add_resources + UI.message '- Adding resources' do + refs = add_file_accessors_paths_to_pods_group(:resources, :resources, true) + refs.concat add_file_accessors_paths_to_pods_group(:resource_bundle_files, :resources, true) + add_known_regions(refs) + end + end + + def add_developer_files + UI.message '- Adding development pod helper files' do + file_accessors.each do |file_accessor| + pod_name = file_accessor.spec.name + next unless sandbox.local?(pod_name) + root_name = Specification.root_name(pod_name) + paths = file_accessor.developer_files + next if paths.empty? + group = pods_project.group_for_spec(root_name, :developer) + paths.each do |path| + ref = pods_project.add_file_reference(path, group, false) + if path.extname == '.podspec' + pods_project.mark_ruby_file_ref(ref) + end + end + end + end + end + + # Creates the link to the headers of the Pod in the sandbox. + # + # @return [void] + # + def link_headers + UI.message '- Linking headers' do + pod_targets.each do |pod_target| + # When integrating Pod as frameworks, built Pods are built into + # frameworks, whose headers are included inside the built + # framework. Those headers do not need to be linked from the + # sandbox. + next if pod_target.build_as_framework? && pod_target.should_build? + + pod_target_header_mappings = pod_target.header_mappings_by_file_accessor.values + pod_target_header_mappings.each do |header_mappings| + header_mappings.each do |namespaced_path, files| + pod_target.build_headers.add_files(namespaced_path, files) + end + end + + public_header_mappings = pod_target.public_header_mappings_by_file_accessor.values + public_header_mappings.each do |header_mappings| + header_mappings.each do |namespaced_path, files| + sandbox.public_headers.add_files(namespaced_path, files) + end + end + end + end + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private Helpers + + # @return [Array] The file accessors for all the + # specs platform combinations. + # + def file_accessors + @file_accessors ||= pod_targets.flat_map(&:file_accessors).compact + end + + # Adds file references to the list of the paths returned by the file + # accessor with the given key to the given group of the Pods project. + # + # @param [Symbol] file_accessor_key + # The method of the file accessor which would return the list of + # the paths. + # + # @param [Symbol] group_key + # The key of the group of the Pods project. + # + # @param [Bool] reflect_file_system_structure + # Whether organizing a local pod's files in subgroups inside + # the pod's group is allowed. + # + # @return [Array] the added file references + # + def add_file_accessors_paths_to_pods_group(file_accessor_key, group_key = nil, reflect_file_system_structure = false) + file_accessors.flat_map do |file_accessor| + paths = file_accessor.send(file_accessor_key) + paths = allowable_project_paths(paths) + next [] if paths.empty? + + pod_name = file_accessor.spec.name + preserve_pod_file_structure_flag = (sandbox.local?(pod_name) || preserve_pod_file_structure) && reflect_file_system_structure + base_path = preserve_pod_file_structure_flag ? common_path(paths) : nil + actual_group_key = preserve_pod_file_structure_flag ? nil : group_key + group = pods_project.group_for_spec(pod_name, actual_group_key) + paths.map do |path| + pods_project.add_file_reference(path, group, preserve_pod_file_structure_flag, base_path) + end + end + end + + # Filters a list of paths down to those paths which can be added to + # the Xcode project. Some paths are intermediates and only their children + # should be added, while some paths are treated as bundles and their + # children should not be added directly. + # + # @param [Array] paths + # The paths to files or directories on disk. + # + # @return [Array] The paths which can be added to the Xcode project + # + def allowable_project_paths(paths) + lproj_paths = Set.new + lproj_paths_with_files = Set.new + allowable_paths = paths.select do |path| + path_str = path.to_s + + # We add the directory for a Core Data model, but not the items in it. + next if path_str =~ /.*\.xcdatamodeld\/.+/i + + # We add the directory for a Core Data migration mapping, but not the items in it. + next if path_str =~ /.*\.xcmappingmodel\/.+/i + + # We add the directory for an asset catalog, but not the items in it. + next if path_str =~ /.*\.xcassets\/.+/i + + if path_str =~ /\.lproj(\/|$)/i + # If the element is an .lproj directory then save it and potentially + # add it later if we don't find any contained items. + if path_str =~ /\.lproj$/i && path.directory? + lproj_paths << path + next + end + + # Collect the paths for the .lproj directories that contain files. + lproj_path = /(^.*\.lproj)\/.*/i.match(path_str)[1] + lproj_paths_with_files << Pathname(lproj_path) + + # Directories nested within an .lproj directory are added as file + # system references so their contained items are not added directly. + next if path.dirname.dirname == lproj_path + end + + true + end + + # Only add the path for the .lproj directories that do not have anything + # within them added as well. This generally happens if the glob within the + # resources directory was not a recursive glob. + allowable_paths + lproj_paths.subtract(lproj_paths_with_files).to_a + end + + # Returns a Pathname of the nearest parent from which all the given paths descend. + # Converts each Pathname to a list of path components and finds the longest common prefix + # + # @param [Array] paths + # The paths to files or directories on disk. Must be absolute paths + # + # @return [Pathname] Pathname of the nearest parent shared by paths, or nil if none exists + # + def common_path(paths) + return nil if paths.empty? + strs = paths.map do |path| + unless path.absolute? + raise ArgumentError, "Paths must be absolute #{path}" + end + path.dirname.to_s + end + min, max = strs.minmax + min = min.split('/') + max = max.split('/') + idx = min.size.times { |i| break i if min[i] != max[i] } + result = Pathname.new(min[0...idx].join('/')) + # Don't consider "/" a common path + return result unless result.to_s == '' || result.to_s == '/' + end + + # Adds the known localization regions to the root of the project + # + # @param [Array] file_references the resource file references + # + def add_known_regions(file_references) + pattern = LOCALIZATION_REGION_FILEPATTERN_REGEX + regions = file_references.map do |ref| + if (match = ref.path.to_s.match(pattern)) + match[:region] + end + end.compact + + pods_project.root_object.known_regions = (pods_project.root_object.known_regions | regions).sort + end + + #-----------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb new file mode 100644 index 0000000..90ae39c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb @@ -0,0 +1,195 @@ +module Pod + class Installer + class Xcode + # Wires up the dependencies between targets from the target installation results + # + class PodTargetDependencyInstaller + require 'cocoapods/native_target_extension.rb' + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [TargetInstallationResults] The target installation results for pod targets. + # + attr_reader :pod_target_installation_results + + # @return [ProjectMetadataCache] The metadata cache for targets. + # + attr_reader :metadata_cache + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [TargetInstallationResults] pod_target_installation_results @see #pod_target_installation_results + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, pod_target_installation_results, metadata_cache) + @sandbox = sandbox + @pod_target_installation_results = pod_target_installation_results + @metadata_cache = metadata_cache + end + + def install! + # Wire up pod targets + pod_target_installation_results.values.each do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + native_target = pod_target_installation_result.native_target + project = native_target.project + + # First, wire up all resource bundles. + wire_resource_bundle_targets(pod_target_installation_result.resource_bundle_targets, + native_target, pod_target) + # Wire up all dependencies to this pod target, if any. + wire_target_dependencies(pod_target, native_target, project, pod_target_installation_results, + metadata_cache) + + # Wire up test native targets. + unless pod_target_installation_result.test_native_targets.empty? + wire_test_native_targets(pod_target, pod_target_installation_result, pod_target_installation_results, + project, metadata_cache) + end + + # Wire up app native targets. + unless pod_target_installation_result.app_native_targets.empty? + wire_app_native_targets(pod_target, pod_target_installation_result, pod_target_installation_results, + project, metadata_cache) + end + end + end + + private + + def wire_resource_bundle_targets(resource_bundle_targets, native_target, pod_target) + resource_bundle_targets.each do |resource_bundle_target| + native_target.add_dependency(resource_bundle_target) + if pod_target.build_as_dynamic_framework? && pod_target.should_build? + native_target.add_resources([resource_bundle_target.product_reference]) + end + end + end + + def wire_target_dependencies(pod_target, native_target, project, pod_target_installation_results, metadata_cache) + dependent_targets = pod_target.dependent_targets + dependent_targets.each do |dependent_target| + is_local = sandbox.local?(dependent_target.pod_name) + if installation_result = pod_target_installation_results[dependent_target.name] + dependent_project = installation_result.native_target.project + if dependent_project != project + project.add_pod_subproject(dependent_project, is_local) + end + native_target.add_dependency(installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, native_target, cached_dependency) + end + end + end + + def wire_test_native_targets(pod_target, installation_result, pod_target_installation_results, project, metadata_cache) + installation_result.test_specs_by_native_target.each do |test_native_target, test_spec| + resource_bundle_native_targets = installation_result.test_resource_bundle_targets[test_spec.name] || [] + resource_bundle_native_targets.each do |test_resource_bundle_target| + test_native_target.add_dependency(test_resource_bundle_target) + end + + test_dependent_targets = pod_target.test_dependent_targets_by_spec_name.fetch(test_spec.name, []).+([pod_target]).uniq + test_dependent_targets.each do |test_dependent_target| + is_local = sandbox.local?(test_dependent_target.pod_name) + if dependency_installation_result = pod_target_installation_results[test_dependent_target.name] + dependent_test_project = dependency_installation_result.native_target.project + if dependent_test_project != project + project.add_pod_subproject(dependent_test_project, is_local) + end + test_native_target.add_dependency(dependency_installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[test_dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, test_native_target, cached_dependency) + end + end + + if app_host_target_label = pod_target.app_host_target_label(test_spec) + app_host_pod_target_label, app_host_target_label = *app_host_target_label + wire_test_native_target_app_host(test_native_target, pod_target, pod_target_installation_results, project, metadata_cache, app_host_pod_target_label, app_host_target_label) + end + end + end + + def wire_test_native_target_app_host(test_native_target, pod_target, pod_target_installation_results, project, metadata_cache, app_host_pod_target_label, app_host_target_label) + if dependency_installation_result = pod_target_installation_results[app_host_pod_target_label] + unless app_native_target = dependency_installation_result.app_host_target_labelled(app_host_target_label) + raise Informative, "Did not find target with label #{app_host_target_label} in the set of targets installed for #{app_host_pod_target_label}." + end + + dependent_test_project = app_native_target.project + if dependent_test_project != project + project.add_subproject_reference(dependent_test_project, project.dependencies_group) + end + + app_host_target_names = app_native_target.resolved_build_setting('PRODUCT_NAME', true) + test_native_target.build_configurations.each do |configuration| + app_host_target_name = app_host_target_names[configuration.name] || target.name + case test_native_target.symbol_type + when :unit_test_bundle + test_host = "$(BUILT_PRODUCTS_DIR)/#{app_host_target_name}.app/" + test_host << 'Contents/MacOS/' if pod_target.platform == :osx + test_host << app_host_target_name.to_s + configuration.build_settings['BUNDLE_LOADER'] = '$(TEST_HOST)' + configuration.build_settings['TEST_HOST'] = test_host + when :ui_test_bundle + configuration.build_settings['TEST_TARGET_NAME'] = app_host_target_name + end + end + target_attributes = project.root_object.attributes['TargetAttributes'] || {} + target_attributes[test_native_target.uuid.to_s] = { 'TestTargetID' => app_native_target.uuid.to_s } + project.root_object.attributes['TargetAttributes'] = target_attributes + test_native_target.add_dependency(app_native_target) + elsif cached_dependency = metadata_cache.target_label_by_metadata[app_host_target_label] + # Hit the cache + project.add_cached_subproject_reference(sandbox, cached_dependency, project.dependencies_group) + Project.add_cached_dependency(sandbox, test_native_target, cached_dependency) + else + raise "Expected to either have an installation or cache result for #{app_host_target_label} (from pod #{app_host_pod_target_label}) " \ + "for target #{test_native_target.name} in project #{project.project_name}" + end + end + + def wire_app_native_targets(pod_target, installation_result, pod_target_installation_results, project, metadata_cache) + installation_result.app_specs_by_native_target.each do |app_native_target, app_spec| + resource_bundle_native_targets = installation_result.app_resource_bundle_targets[app_spec.name] || [] + resource_bundle_native_targets.each do |app_resource_bundle_target| + app_native_target.add_dependency(app_resource_bundle_target) + end + + app_dependent_targets = pod_target.app_dependent_targets_by_spec_name.fetch(app_spec.name, []).unshift(pod_target).uniq + app_dependent_targets.each do |app_dependent_target| + is_local = sandbox.local?(app_dependent_target.pod_name) + if dependency_installation_result = pod_target_installation_results[app_dependent_target.name] + resource_bundle_native_targets = dependency_installation_result.app_resource_bundle_targets[app_spec.name] + unless resource_bundle_native_targets.nil? + resource_bundle_native_targets.each do |app_resource_bundle_target| + app_native_target.add_dependency(app_resource_bundle_target) + end + end + dependency_project = dependency_installation_result.native_target.project + if dependency_project != project + project.add_pod_subproject(dependency_project, is_local) + end + app_native_target.add_dependency(dependency_installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[app_dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, app_native_target, cached_dependency) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb new file mode 100644 index 0000000..9ae38b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb @@ -0,0 +1,1239 @@ +require 'active_support/core_ext/array' +require 'active_support/core_ext/string/inflections' +require 'cocoapods/xcode' + +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Creates the target for the Pods libraries in the Pods project and the + # relative support files. + # + class PodTargetInstaller < TargetInstaller + require 'cocoapods/installer/xcode/pods_project_generator/app_host_installer' + + # @return [Array] Array of umbrella header paths in the headers directory + # + attr_reader :umbrella_header_paths + + # @return [PodTarget] @see TargetInstaller#target + # + attr_reader :target + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see TargetInstaller#sandbox + # @param [Pod::Project] project @see TargetInstaller#project + # @param [PodTarget] target @see TargetInstaller#target + # @param [Array] umbrella_header_paths @see #umbrella_header_paths + # + def initialize(sandbox, project, target, umbrella_header_paths = nil) + super(sandbox, project, target) + @umbrella_header_paths = umbrella_header_paths + end + + # Creates the target in the Pods project and the relative support files. + # + # @return [TargetInstallationResult] the result of the installation of this target. + # + def install! + UI.message "- Installing target `#{target.name}` #{target.platform}" do + create_support_files_dir + library_file_accessors = target.file_accessors.select { |fa| fa.spec.library_specification? } + test_file_accessors = target.file_accessors.select { |fa| fa.spec.test_specification? } + app_file_accessors = target.file_accessors.select { |fa| fa.spec.app_specification? } + + native_target = if target.should_build? + add_target + else + # For targets that should not be built (e.g. pre-built vendored frameworks etc), we add a placeholder + # PBXAggregateTarget that will be used to wire up dependencies later. + add_placeholder_target + end + + resource_bundle_targets = add_resources_bundle_targets(library_file_accessors).values.flatten + + test_native_targets = add_test_targets + test_app_host_targets = add_test_app_host_targets + test_resource_bundle_targets = add_resources_bundle_targets(test_file_accessors) + + app_native_targets = add_app_targets + app_resource_bundle_targets = add_resources_bundle_targets(app_file_accessors) + + add_files_to_build_phases(native_target, test_native_targets, app_native_targets) + targets_to_validate = test_native_targets + app_native_targets.values + targets_to_validate << native_target if target.should_build? + validate_targets_contain_sources(targets_to_validate) + validate_xcframeworks if target.should_build? + + create_copy_xcframeworks_script unless target.xcframeworks.values.all?(&:empty?) + + create_xcconfig_file(native_target, resource_bundle_targets) + create_test_xcconfig_files(test_native_targets, test_resource_bundle_targets) + create_app_xcconfig_files(app_native_targets, app_resource_bundle_targets) + + if target.should_build? && target.defines_module? && !skip_modulemap?(target.library_specs) + create_module_map(native_target) do |generator| + generator.headers.concat module_map_additional_headers + end + create_umbrella_header(native_target) do |generator| + generator.imports += library_file_accessors.flat_map do |file_accessor| + header_dir = if !target.build_as_framework? && dir = file_accessor.spec_consumer.header_dir + Pathname.new(dir) + end + + file_accessor.public_headers.map do |public_header| + public_header = if header_mappings_dir(file_accessor) + public_header.relative_path_from(header_mappings_dir(file_accessor)) + else + public_header.basename + end + if header_dir + public_header = header_dir.join(public_header) + end + public_header + end + end + end + end + + if target.should_build? && target.build_as_framework? + unless skip_info_plist?(native_target) + create_info_plist_file(target.info_plist_path, native_target, target.version, target.platform, + :additional_entries => target.info_plist_entries) + end + create_build_phase_to_symlink_header_folders(native_target) + end + + if target.should_build? && target.build_as_library? && target.uses_swift? + add_swift_library_compatibility_header_phase(native_target) + end + + project_directory = project.path.dirname + + if target.should_build? && !skip_pch?(target.library_specs) + path = target.prefix_header_path + create_prefix_header(path, library_file_accessors, target.platform, native_target, project_directory) + add_file_to_support_group(path) + end + unless skip_pch?(target.test_specs) + target.test_specs.each do |test_spec| + path = target.prefix_header_path_for_spec(test_spec) + test_spec_consumer = test_spec.consumer(target.platform) + test_native_target = test_native_target_from_spec(test_spec_consumer.spec, test_native_targets) + create_prefix_header(path, test_file_accessors, target.platform, test_native_target, project_directory) + add_file_to_support_group(path) + end + end + unless skip_pch?(target.app_specs) + target.app_specs.each do |app_spec| + path = target.prefix_header_path_for_spec(app_spec) + app_spec_consumer = app_spec.consumer(target.platform) + app_native_target = app_native_targets[app_spec_consumer.spec] + create_prefix_header(path, app_file_accessors, target.platform, app_native_target, project_directory) + add_file_to_support_group(path) + end + end + create_dummy_source(native_target) if target.should_build? + create_copy_dsyms_script + clean_support_files_temp_dir + TargetInstallationResult.new(target, native_target, resource_bundle_targets, + test_native_targets, test_resource_bundle_targets, test_app_host_targets, + app_native_targets, app_resource_bundle_targets) + end + end + + private + + # Adds the target for the library to the Pods project with the + # appropriate build configurations. + # + # @note Overrides the superclass implementation to remove settings that are set in the pod target xcconfig + # + # @return [PBXNativeTarget] the native target that was added. + # + def add_target + super.tap do |native_target| + remove_pod_target_xcconfig_overrides_from_target(target.build_settings, native_target) + end + end + + # Removes overrides of the `pod_target_xcconfig` settings from the target's + # build configurations. + # + # @param [Hash{Symbol => Pod::Target::BuildSettings}] build_settings_by_config the build settings by config + # of the target. + # + # @param [PBXNativeTarget] native_target + # the native target to remove pod target xcconfig overrides from. + # + # @return [Void] + # + # + def remove_pod_target_xcconfig_overrides_from_target(build_settings_by_config, native_target) + native_target.build_configurations.each do |configuration| + build_settings = build_settings_by_config[target.user_build_configurations[configuration.name]] + unless build_settings.nil? + build_settings.merged_pod_target_xcconfigs.each_key do |setting| + configuration.build_settings.delete(setting) + end + end + end + end + + # @param [Array] specs + # the specs to check against whether `.pch` generation should be skipped or not. + # + # @return [Boolean] Whether the target should build a pch file. + # + def skip_pch?(specs) + specs.any? { |spec| spec.root.prefix_header_file.is_a?(FalseClass) } + end + + def skip_modulemap?(specs) + specs.any? { |spec| spec.module_map.is_a?(FalseClass) } + end + + # True if info.plist generation should be skipped + # + # @param [PXNativeTarget] native_target + # + # @return [Boolean] Whether the target should build an Info.plist file + # + def skip_info_plist?(native_target) + existing_setting = native_target.resolved_build_setting('INFOPLIST_FILE', true).values.compact + !existing_setting.empty? + end + + # Remove the default headers folder path settings for static library pod + # targets. + # + # @return [Hash{String => String}] + # + def custom_build_settings + settings = super + unless target.build_as_framework? + settings['PRIVATE_HEADERS_FOLDER_PATH'] = '' + settings['PUBLIC_HEADERS_FOLDER_PATH'] = '' + end + + settings['PRODUCT_NAME'] = target.product_basename + settings['PRODUCT_MODULE_NAME'] = target.product_module_name + + settings['CODE_SIGN_IDENTITY[sdk=appletvos*]'] = '' + settings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'] = '' + settings['CODE_SIGN_IDENTITY[sdk=watchos*]'] = '' + + settings['SWIFT_ACTIVE_COMPILATION_CONDITIONS'] = '$(inherited) ' + + if target.swift_version + settings['SWIFT_VERSION'] = target.swift_version + end + + if info_plist_bundle_id + settings['PRODUCT_BUNDLE_IDENTIFIER'] = info_plist_bundle_id + end + + settings + end + + # @return [String] Bundle Identifier found in the custom Info.plist entries + # + def info_plist_bundle_id + return @plist_bundle_id if defined?(@plist_bundle_id) + unless target.info_plist_entries.nil? + @plist_bundle_id = target.info_plist_entries['CFBundleIdentifier'] + unless @plist_bundle_id.nil? + message = "The `#{target.name}` target " \ + "sets a Bundle Identifier of `#{@plist_bundle_id}` in it's info.plist file. " \ + 'The Bundle Identifier should be set using pod_target_xcconfig: ' \ + "s.pod_target_xcconfig = { 'PRODUCT_BUNDLE_IDENTIFIER': '#{@plist_bundle_id}' }`." + UI.warn message + end + @plist_bundle_id + end + end + + # Filters the given resource file references discarding empty paths which are + # added by their parent directory. This will also include references to the parent [PBXVariantGroup] + # for all resources underneath it. + # + # @param [Array] resource_file_references + # The array of all resource file references to filter. + # + # @yield_param [Array} The filtered resource file references to be installed + # in the copy resources phase. + # + # @yield_param [Array} The filtered resource file references to be installed + # in the compile sources phase. + # + # @note Core Data model directories (.xcdatamodeld) and RealityKit projects (.rcproject) + # used to be added to the `Copy Resources` build phase like all other resources, + # since they would compile correctly in either the resources or compile phase. In + # recent versions of xcode, there's an exception for data models that generate + # headers. These need to be added to the compile sources phase of a real + # target for the headers to be built in time for code in the target to + # use them. These kinds of models generally break when added to resource + # bundles. + # + def filter_resource_file_references(resource_file_references) + file_references = resource_file_references.map do |resource_file_reference| + ref = project.reference_for_path(resource_file_reference) + + # Some nested files are not directly present in the Xcode project, such as the contents + # of an .xcdatamodeld directory. These files are implicitly included by including their + # parent directory. + next if ref.nil? + + # For variant groups, the variant group itself is added, not its members. + next ref.parent if ref.parent.is_a?(Xcodeproj::Project::Object::PBXVariantGroup) + + ref + end.compact.uniq + compile_phase_matcher = lambda { |ref| !(ref.path =~ /.*\.(xcdatamodeld|rcproject)/i).nil? } + compile_phase_refs, resources_phase_refs = file_references.partition(&compile_phase_matcher) + yield compile_phase_refs, resources_phase_refs + end + + #-----------------------------------------------------------------------# + + # Adds the build files of the pods to the target and adds a reference to + # the frameworks of the Pods. + # + # @note The Frameworks are used only for presentation purposes as the + # xcconfig is the authoritative source about their information. + # + # @note Core Data model directories (.xcdatamodeld) defined in the `resources` + # property are currently added to the `Copy Resources` build phase like + # all other resources. The Xcode UI adds these to the `Compile Sources` + # build phase, but they will compile correctly either way. + # + # @return [void] + # + def add_files_to_build_phases(library_native_target, test_native_targets, app_native_targets) + target.file_accessors.each do |file_accessor| + consumer = file_accessor.spec_consumer + + native_target = case consumer.spec.spec_type + when :library + library_native_target + when :test + test_native_target_from_spec(consumer.spec, test_native_targets) + when :app + app_native_targets[consumer.spec] + else + raise ArgumentError, "Unknown spec type #{consumer.spec.spec_type}." + end + + next if native_target.is_a?(Xcodeproj::Project::Object::PBXAggregateTarget) + + headers = file_accessor.headers + public_headers = file_accessor.public_headers.map(&:realpath) + project_headers = file_accessor.project_headers.map(&:realpath) + private_headers = file_accessor.private_headers.map(&:realpath) + other_source_files = file_accessor.other_source_files + + { + true => file_accessor.arc_source_files, + false => file_accessor.non_arc_source_files, + }.each do |arc, source_files| + next if source_files.empty? + source_files = source_files - headers - other_source_files + swift_source_files, non_swift_source_files = source_files.partition { |file| file.extname == '.swift' } + { + :objc => non_swift_source_files, + :swift => swift_source_files, + }.each do |language, files| + compiler_flags = compiler_flags_for_consumer(consumer, arc, language) + file_refs = project_file_references_array(files, 'source') + native_target.add_file_references(file_refs, compiler_flags) + end + end + + header_file_refs = project_file_references_array(headers, 'header') + native_target.add_file_references(header_file_refs) do |build_file| + add_header(file_accessor, build_file, public_headers, project_headers, private_headers, native_target) + end + + other_file_refs = project_file_references_array(other_source_files, 'other source') + native_target.add_file_references(other_file_refs, nil) + + next unless target.build_as_framework? + + filter_resource_file_references(file_accessor.resources.flatten) do |compile_phase_refs, resource_phase_refs| + native_target.add_file_references(compile_phase_refs, nil) + + if target.build_as_static_framework? && consumer.spec.library_specification? + resource_phase_refs = resource_phase_refs.select do |ref| + filename = ref.name || ref.path + Target.resource_extension_compilable?(File.extname(filename)) + end + end + + native_target.add_resources(resource_phase_refs) + end + end + end + + # Adds the test targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Array] the test native targets created. + # + def add_test_targets + target.test_specs.map do |test_spec| + spec_consumer = test_spec.consumer(target.platform) + test_type = spec_consumer.test_type + product_type = target.product_type_for_test_type(test_type) + name = target.test_target_label(test_spec) + platform_name = target.platform.name + language = target.uses_swift_for_spec?(test_spec) ? :swift : :objc + product_basename = target.product_basename_for_spec(test_spec) + embedded_content_contains_swift = target.dependent_targets_for_test_spec(test_spec).any?(&:uses_swift?) + test_native_target = project.new_target(product_type, name, platform_name, + target.deployment_target_for_non_library_spec(test_spec), nil, + language, product_basename) + test_native_target.product_reference.name = name + + target.user_build_configurations.each do |bc_name, type| + test_native_target.add_build_configuration(bc_name, type) + end + + test_native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + # target_installer will automatically add an empty `OTHER_LDFLAGS`. For test + # targets those are set via a test xcconfig file instead. + configuration.build_settings.delete('OTHER_LDFLAGS') + # target_installer will automatically set the product name to the module name if the target + # requires frameworks. For tests we always use the test target name as the product name + # irrelevant to whether we use frameworks or not. + configuration.build_settings['PRODUCT_NAME'] = name + # target_installer sets 'MACH_O_TYPE' for static frameworks ensure this does not propagate + # to test target. + configuration.build_settings.delete('MACH_O_TYPE') + # Use xcode default product module name, which is $(PRODUCT_NAME:c99extidentifier) + # this gives us always valid name that is distinct from the parent spec module name + # which allow tests to use either import or @testable import to access the parent framework + configuration.build_settings.delete('PRODUCT_MODULE_NAME') + # We must codesign iOS XCTest bundles that contain binary frameworks to allow them to be launchable in the simulator + unless target.platform == :osx + configuration.build_settings['CODE_SIGNING_REQUIRED'] = 'YES' + configuration.build_settings['CODE_SIGNING_ALLOWED'] = 'YES' + end + # For macOS we do not code sign the XCTest bundle because we do not code sign the frameworks either. + if target.platform == :osx + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' + elsif target.platform == :ios + configuration.build_settings['CODE_SIGN_IDENTITY'] = 'iPhone Developer' + end + # Ensure swift stdlib gets copied in if needed, even when the target contains no swift files, + # because a dependency uses swift + configuration.build_settings['ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES'] = 'YES' if embedded_content_contains_swift + end + + remove_pod_target_xcconfig_overrides_from_target(target.test_spec_build_settings_by_config[test_spec.name], test_native_target) + + # Test native targets also need frameworks and resources to be copied over to their xctest bundle. + create_test_target_embed_frameworks_script(test_spec) + create_test_target_copy_resources_script(test_spec) + + # Generate vanilla Info.plist for test target similar to the one Xcode generates for new test target. + # This creates valid test bundle accessible at the runtime, allowing tests to load bundle resources + # defined in podspec. + additional_entries = spec_consumer.info_plist + path = target.info_plist_path_for_spec(test_spec) + create_info_plist_file(path, test_native_target, '1.0', target.platform, :bndl, :additional_entries => additional_entries) + + test_native_target + end + end + + # Adds the test app host targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Array] the app host targets created. + # + def add_test_app_host_targets + target.test_spec_consumers.reject(&:requires_app_host?).select(&:app_host_name).each do |test_spec_consumer| + raise Informative, "`#{target.label}-#{test_spec_consumer.test_type}-Tests` manually specifies an app host but has not specified `requires_app_host = true`." + end + + target.test_spec_consumers.select(&:requires_app_host?).reject(&:app_host_name).group_by { |consumer| target.app_host_target_label(consumer.spec) }. + map do |(_, target_name), _| + AppHostInstaller.new(sandbox, project, target.platform, target_name, target.pod_name, target_name).install! + end + end + + # Adds the app targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Hash{Specification => PBXNativeTarget}] the app native targets created, keyed by their app spec + # + def add_app_targets + target.app_specs.each_with_object({}) do |app_spec, hash| + spec_consumer = app_spec.consumer(target.platform) + spec_name = app_spec.parent.name + subspec_name = target.subspec_label(app_spec) + app_target_label = target.app_target_label(app_spec) + platform = Platform.new(target.platform.symbolic_name, target.deployment_target_for_non_library_spec(app_spec)) + info_plist_entries = spec_consumer.info_plist + resources = target.file_accessors.find { |fa| fa.spec == app_spec }.resources + add_launchscreen_storyboard = resources.none? { |resource| resource.basename.to_s == 'LaunchScreen.storyboard' } && platform.name == :ios + embedded_content_contains_swift = target.dependent_targets_for_app_spec(app_spec).any?(&:uses_swift?) + app_native_target = AppHostInstaller.new(sandbox, project, platform, subspec_name, spec_name, + app_target_label, :add_main => false, + :add_launchscreen_storyboard => add_launchscreen_storyboard, + :info_plist_entries => info_plist_entries, + :product_basename => target.product_basename_for_spec(app_spec)).install! + + app_native_target.product_reference.name = app_target_label + target.user_build_configurations.each do |bc_name, type| + app_native_target.add_build_configuration(bc_name, type) + end + + app_native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + + # target_installer will automatically add an empty `OTHER_LDFLAGS`. For app + # targets those are set via an app xcconfig file instead. + configuration.build_settings.delete('OTHER_LDFLAGS') + # target_installer will automatically set the product name to the module name if the target + # requires frameworks. For apps we always use the app target name as the product name + # irrelevant to whether we use frameworks or not. + configuration.build_settings['PRODUCT_NAME'] = app_target_label + # target_installer sets 'MACH_O_TYPE' for static frameworks ensure this does not propagate + # to app target. + configuration.build_settings.delete('MACH_O_TYPE') + # Use xcode default product module name, which is $(PRODUCT_NAME:c99extidentifier) + # this gives us always valid name that is distinct from the parent spec module name + # which allow the app to use import to access the parent framework + configuration.build_settings.delete('PRODUCT_MODULE_NAME') + + # We must codesign iOS app bundles that contain binary frameworks to allow them to be launchable in the simulator + unless target.platform == :osx + configuration.build_settings['CODE_SIGNING_REQUIRED'] = 'YES' + configuration.build_settings['CODE_SIGNING_ALLOWED'] = 'YES' + end + # For macOS we do not code sign the appbundle because we do not code sign the frameworks either. + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' if target.platform == :osx + # For iOS, we delete the target_installer empty values that get set for libraries since CocoaPods will + # code sign the libraries manually but for apps this is not true. + if target.platform == :ios + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=appletvos*]') + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=iphoneos*]') + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=watchos*]') + end + # Ensure swift stdlib gets copied in if needed, even when the target contains no swift files, + # because a dependency uses swift + configuration.build_settings['ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES'] = 'YES' if embedded_content_contains_swift + end + + remove_pod_target_xcconfig_overrides_from_target(target.app_spec_build_settings_by_config[app_spec.name], app_native_target) + + create_app_target_embed_frameworks_script(app_spec) + create_app_target_copy_resources_script(app_spec) + add_resources_to_target(resources, app_native_target) + + hash[app_spec] = app_native_target + end + end + + # Adds the resources to the compile resources phase of the target. + # + # @param [Array] paths the paths to add to the target. + # + # @param [PBXNativeTarget] target the target resources are added to. + # + # @return [Boolean] whether any compile phase references were added. + # + def add_resources_to_target(paths, target) + filter_resource_file_references(paths) do |compile_phase_refs, resource_phase_refs| + # Resource bundles are only meant to have resources, so install everything + # into the resources phase. See note in filter_resource_file_references. + target.add_resources(resource_phase_refs + compile_phase_refs) + !compile_phase_refs.empty? + end + end + + # Adds the resources of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively) in the resources group. + # + # @param [Array] file_accessors + # the file accessors list to generate resource bundles for. + # + # @return [Hash{String=>Array}] the resource bundle native targets created. + # + def add_resources_bundle_targets(file_accessors) + file_accessors.each_with_object({}) do |file_accessor, hash| + hash[file_accessor.spec.name] = file_accessor.resource_bundles.map do |bundle_name, paths| + label = target.resources_bundle_target_label(bundle_name) + resource_bundle_target = project.new_resources_bundle(label, file_accessor.spec_consumer.platform_name, nil, bundle_name) + resource_bundle_target.product_reference.name = label + contains_compile_phase_refs = add_resources_to_target(paths, resource_bundle_target) + + target.user_build_configurations.each do |bc_name, type| + resource_bundle_target.add_build_configuration(bc_name, type) + end + resource_bundle_target.deployment_target = if file_accessor.spec.non_library_specification? + target.deployment_target_for_non_library_spec(file_accessor.spec) + else + deployment_target + end + # Create Info.plist file for bundle + path = target.info_plist_path + path.dirname.mkdir unless path.dirname.exist? + info_plist_path = path.dirname + "ResourceBundle-#{bundle_name}-#{path.basename}" + create_info_plist_file(info_plist_path, resource_bundle_target, target.version, target.platform, :bndl) + + resource_bundle_target.build_configurations.each do |configuration| + configuration.build_settings['PRODUCT_NAME'] = bundle_name + # Do not set the CONFIGURATION_BUILD_DIR for resource bundles that are only meant for test targets. + # This is because the test target itself also does not set this configuration build dir and it expects + # all bundles to be copied from the default path. + unless file_accessor.spec.test_specification? + configuration.build_settings['CONFIGURATION_BUILD_DIR'] = target.configuration_build_dir('$(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)') + end + + # Set the 'IBSC_MODULE' build settings for resource bundles so that Storyboards and Xibs can load + # classes from the parent module. + configuration.build_settings['IBSC_MODULE'] = target.product_module_name + + # Set the `SWIFT_VERSION` build setting for resource bundles that could have resources that get + # compiled such as an `xcdatamodeld` file which has 'Swift' as its code generation language. + if contains_compile_phase_refs && file_accessors.any? { |fa| target.uses_swift_for_spec?(fa.spec) } + configuration.build_settings['SWIFT_VERSION'] = target.swift_version + end + + # Set the correct device family for this bundle, based on the platform + device_family_by_platform = { + :ios => '1,2', + :tvos => '3', + :watchos => '1,2,4', + } + + if (family = device_family_by_platform[target.platform.name]) + configuration.build_settings['TARGETED_DEVICE_FAMILY'] = family + end + end + + remove_pod_target_xcconfig_overrides_from_target(target.build_settings_by_config_for_spec(file_accessor.spec), resource_bundle_target) + + resource_bundle_target + end + end + end + + # Generates the contents of the xcconfig file and saves it to disk. + # + # @param [PBXNativeTarget] native_target + # the native target to link the xcconfig file into. + # + # @param [Array] resource_bundle_targets + # the additional resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_xcconfig_file(native_target, resource_bundle_targets) + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path(config) + update_changed_file(target.build_settings[config], path) + xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle targets. + apply_xcconfig_file_ref_to_targets([native_target] + resource_bundle_targets, xcconfig_file_ref, names) + end + end + + # Generates the contents of the xcconfig file used for each test target type and saves it to disk. + # + # @param [Array] test_native_targets + # the test native target to link the xcconfig file into. + # + # @param [Hash{String=>Array}] test_resource_bundle_targets + # the additional test resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_test_xcconfig_files(test_native_targets, test_resource_bundle_targets) + target.test_specs.each do |test_spec| + spec_consumer = test_spec.consumer(target.platform) + test_type = spec_consumer.test_type + test_native_target = test_native_target_from_spec(spec_consumer.spec, test_native_targets) + + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path("#{test_type.capitalize}-#{target.subspec_label(test_spec)}.#{config}") + test_spec_build_settings = target.build_settings_for_spec(test_spec, :configuration => config) + update_changed_file(test_spec_build_settings, path) + test_xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle test targets related to this test spec. + scoped_test_resource_bundle_targets = test_resource_bundle_targets[test_spec.name] + apply_xcconfig_file_ref_to_targets([test_native_target] + scoped_test_resource_bundle_targets, test_xcconfig_file_ref, names) + end + end + end + + # Creates a script that copies the resources to the bundle of the test target. + # + # @param [Specification] test_spec + # The test spec to create the copy resources script for. + # + # @return [void] + # + def create_test_target_copy_resources_script(test_spec) + path = target.copy_resources_script_path_for_spec(test_spec) + host_target_spec_names = target.app_host_dependent_targets_for_spec(test_spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + resource_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), resources_by_config| + resources_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end + end + unless resource_paths_by_config.each_value.all?(&:empty?) + generator = Generator::CopyResourcesScript.new(resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that embeds the frameworks to the bundle of the test target. + # + # @param [Specification] test_spec + # The test spec to create the embed frameworks script for. + # + # @return [void] + # + def create_test_target_embed_frameworks_script(test_spec) + path = target.embed_frameworks_script_path_for_spec(test_spec) + host_target_spec_names = target.app_host_dependent_targets_for_spec(test_spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + framework_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + xcframeworks_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + unless framework_paths_by_config.each_value.all?(&:empty?) && xcframeworks_by_config.each_value.all?(&:empty?) + generator = Generator::EmbedFrameworksScript.new(framework_paths_by_config, xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Generates the contents of the xcconfig file used for each app target type and saves it to disk. + # + # @param [Hash{Specification => PBXNativeTarget}] app_native_targets + # the app native targets to link the xcconfig file into. + # + # @param [Hash{String=>Array}] app_resource_bundle_targets + # the additional app resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_app_xcconfig_files(app_native_targets, app_resource_bundle_targets) + target.app_specs.each do |app_spec| + spec_consumer = app_spec.consumer(target.platform) + app_native_target = app_native_targets[spec_consumer.spec] + + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path("#{target.subspec_label(app_spec)}.#{config}") + app_spec_build_settings = target.build_settings_for_spec(app_spec, :configuration => config) + update_changed_file(app_spec_build_settings, path) + app_xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle app targets related to this app spec. + scoped_app_resource_bundle_targets = app_resource_bundle_targets[app_spec.name] + apply_xcconfig_file_ref_to_targets([app_native_target] + scoped_app_resource_bundle_targets, app_xcconfig_file_ref, names) + end + end + end + + # Creates a script that copies the resources to the bundle of the app target. + # + # @param [Specification] app_spec + # The app spec to create the copy resources script for. + # + # @return [void] + # + def create_app_target_copy_resources_script(app_spec) + path = target.copy_resources_script_path_for_spec(app_spec) + resource_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), resources_by_config| + pod_targets = target.dependent_targets_for_app_spec(app_spec, :configuration => config) + resources_by_config[config_name] = pod_targets.flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end + end + unless resource_paths_by_config.each_value.all?(&:empty?) + generator = Generator::CopyResourcesScript.new(resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that embeds the frameworks to the bundle of the app target. + # + # @param [Specification] app_spec + # The app spec to create the embed frameworks script for. + # + # @return [void] + # + def create_app_target_embed_frameworks_script(app_spec) + path = target.embed_frameworks_script_path_for_spec(app_spec) + framework_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_app_spec(app_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + xcframeworks_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_app_spec(app_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + + unless framework_paths_by_config.each_value.all?(&:empty?) && xcframeworks_by_config.each_value.all?(&:empty?) + generator = Generator::EmbedFrameworksScript.new(framework_paths_by_config, xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies and strips vendored dSYMs and bcsymbolmaps. + # + # @return [void] + # + def create_copy_dsyms_script + dsym_paths = PodTargetInstaller.dsym_paths(target) + bcsymbolmap_paths = PodTargetInstaller.bcsymbolmap_paths(target) + path = target.copy_dsyms_script_path + unless dsym_paths.empty? && bcsymbolmap_paths.empty? + generator = Generator::CopydSYMsScript.new(dsym_paths, bcsymbolmap_paths) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies the appropriate xcframework slice to the build dir. + # + # @note We can't use Xcode default link libraries phase, because + # we need to ensure that we only copy the frameworks which are + # relevant for the current build configuration. + # + # @return [void] + # + def create_copy_xcframeworks_script + path = target.copy_xcframeworks_script_path + generator = Generator::CopyXCFrameworksScript.new(target.xcframeworks.values.flatten, sandbox.root, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Creates a build phase which links the versioned header folders + # of the OS X framework into the framework bundle's root directory. + # This is only necessary because the way how headers are copied + # via custom copy file build phases in combination with + # header_mappings_dir interferes with xcodebuild's expectations + # about the existence of private or public headers. + # + # @param [PBXNativeTarget] native_target + # the native target to add the script phase into. + # + # @return [void] + # + def create_build_phase_to_symlink_header_folders(native_target) + # This is required on iOS for Catalyst, which uses macOS framework layouts + return unless (target.platform.name == :osx || target.platform.name == :ios) && any_header_mapping_dirs? + + build_phase = native_target.new_shell_script_build_phase('Create Symlinks to Header Folders') + build_phase.shell_script = <<-eos.strip_heredoc + cd "$CONFIGURATION_BUILD_DIR/$WRAPPER_NAME" || exit 1 + if [ ! -d Versions ]; then + # Not a versioned framework, so no need to do anything + exit 0 + fi + + public_path="${PUBLIC_HEADERS_FOLDER_PATH\#\$CONTENTS_FOLDER_PATH/}" + if [ ! -f "$public_path" ]; then + ln -fs "${PUBLIC_HEADERS_FOLDER_PATH\#$WRAPPER_NAME/}" "$public_path" + fi + + private_path="${PRIVATE_HEADERS_FOLDER_PATH\#\$CONTENTS_FOLDER_PATH/}" + if [ ! -f "$private_path" ]; then + ln -fs "${PRIVATE_HEADERS_FOLDER_PATH\#\$WRAPPER_NAME/}" "$private_path" + fi + eos + end + + ENABLE_OBJECT_USE_OBJC_FROM = { + :ios => Version.new('6'), + :osx => Version.new('10.8'), + :watchos => Version.new('2.0'), + :tvos => Version.new('9.0'), + }.freeze + + # Returns the compiler flags for the source files of the given specification. + # + # The following behavior is regarding the `OS_OBJECT_USE_OBJC` flag. When + # set to `0`, it will allow code to use `dispatch_release()` on >= iOS 6.0 + # and OS X 10.8. + # + # * New libraries that do *not* require ARC don’t need to care about this + # issue at all. + # + # * New libraries that *do* require ARC _and_ have a deployment target of + # >= iOS 6.0 or OS X 10.8: + # + # These no longer use `dispatch_release()` and should *not* have the + # `OS_OBJECT_USE_OBJC` flag set to `0`. + # + # **Note:** this means that these libraries *have* to specify the + # deployment target in order to function well. + # + # * New libraries that *do* require ARC, but have a deployment target of + # < iOS 6.0 or OS X 10.8: + # + # These contain `dispatch_release()` calls and as such need the + # `OS_OBJECT_USE_OBJC` flag set to `1`. + # + # **Note:** libraries that do *not* specify a platform version are + # assumed to have a deployment target of < iOS 6.0 or OS X 10.8. + # + # For more information, see: https://opensource.apple.com/source/libdispatch/libdispatch-228.18/os/object.h + # + # @param [Specification::Consumer] consumer + # The consumer for the specification for which the compiler flags + # are needed. + # + # @param [Boolean] arc + # Whether the arc is enabled or not. + # + # @param [Symbol] language + # The language these compiler warnings are for. Can be either :objc or :swift. + # + # @return [String] The compiler flags. + # + def compiler_flags_for_consumer(consumer, arc, language) + flags = consumer.compiler_flags.dup + if !arc && language == :objc + flags << '-fno-objc-arc' + else + platform_name = consumer.platform_name + spec_deployment_target = consumer.spec.deployment_target(platform_name) + if spec_deployment_target.nil? || Version.new(spec_deployment_target) < ENABLE_OBJECT_USE_OBJC_FROM[platform_name] + flags << '-DOS_OBJECT_USE_OBJC=0' + end + end + if target.inhibit_warnings? && language == :objc + flags << '-w -Xanalyzer -analyzer-disable-all-checks' + end + flags * ' ' + end + + def apply_xcconfig_file_ref_to_targets(targets, xcconfig_file_ref, configurations) + targets.each do |config_target| + config_target.build_configurations.each do |configuration| + next unless configurations.include?(configuration.name) + configuration.base_configuration_reference = xcconfig_file_ref + end + end + end + + def create_module_map(native_target) + return super(native_target) unless custom_module_map + + path = target.module_map_path_to_write + UI.message "- Copying module map file to #{UI.path(path)}" do + contents = custom_module_map.read + unless target.build_as_framework? + contents.gsub!(/^(\s*)framework\s+(module[^{}]+){/, '\1\2{') + end + generator = Generator::Constant.new(contents) + update_changed_file(generator, path) + add_file_to_support_group(path) + + linked_path = target.module_map_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + relative_path = target.module_map_path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['MODULEMAP_FILE'] = relative_path.to_s + end + end + end + + def module_map_additional_headers + return [] unless umbrella_header_paths + + other_paths = umbrella_header_paths - [target.umbrella_header_path] + other_paths.map do |module_map_path| + # exclude other targets umbrella headers, to avoid + # incomplete umbrella warnings + Generator::ModuleMap::Header.new(module_map_path.basename, nil, nil, nil, true) + end + end + + def create_umbrella_header(native_target) + super(native_target) unless custom_module_map + end + + def custom_module_map + @custom_module_map ||= target.file_accessors.first.module_map + end + + def project_file_references_array(files, file_type) + error_message_for_missing_reference = lambda do |sf, target| + "Unable to find #{file_type} ref for `#{sf.basename}` for target `#{target.name}`." + end + files.map do |sf| + begin + project.reference_for_path(sf).tap do |ref| + raise Informative, error_message_for_missing_reference.call(sf, target) unless ref + end + rescue Errno::ENOENT + # Normalize the error for Ruby < 2.7. Ruby 2.7 can crash on a different call of real path compared + # to older versions. This ensures that the error message is consistent. + raise Informative, error_message_for_missing_reference.call(sf, target) + end + end + end + + def any_header_mapping_dirs? + return @any_header_mapping_dirs if defined?(@any_header_mapping_dirs) + @any_header_mapping_dirs = target.file_accessors.any? { |fa| fa.spec_consumer.header_mappings_dir } + end + + def header_mappings_dir(file_accessor) + @header_mappings_dirs ||= {} + return @header_mappings_dirs[file_accessor] if @header_mappings_dirs.key?(file_accessor) + @header_mappings_dirs[file_accessor] = if dir = file_accessor.spec_consumer.header_mappings_dir + file_accessor.path_list.root + dir + end + end + + def add_header(file_accessor, build_file, public_headers, project_headers, private_headers, native_target) + file_ref = build_file.file_ref + acl = if !target.build_as_framework? # Headers are already rooted at ${PODS_ROOT}/Headers/P*/[pod]/... + 'Project' + elsif public_headers.include?(file_ref.real_path) + 'Public' + elsif project_headers.include?(file_ref.real_path) + 'Project' + elsif private_headers.include?(file_ref.real_path) + 'Private' + else + 'Project' + end + + if target.build_as_framework? && !header_mappings_dir(file_accessor).nil? && acl != 'Project' + relative_path = if mapping_dir = header_mappings_dir(file_accessor) + file_ref.real_path.relative_path_from(mapping_dir) + else + file_ref.real_path.relative_path_from(file_accessor.path_list.root) + end + compile_build_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(Xcodeproj::Project::Object::PBXSourcesBuildPhase) + end + sub_dir = relative_path.dirname + copy_phase_name = "Copy #{sub_dir} #{acl} Headers" + copy_phase = native_target.copy_files_build_phases.find { |bp| bp.name == copy_phase_name } || + native_target.new_copy_files_build_phase(copy_phase_name) + native_target.build_phases.move(copy_phase, compile_build_phase_index - 1) unless compile_build_phase_index.nil? + copy_phase.symbol_dst_subfolder_spec = :products_directory + copy_phase.dst_path = "$(#{acl.upcase}_HEADERS_FOLDER_PATH)/#{sub_dir}" + copy_phase.add_file_reference(file_ref, true) + else + build_file.settings ||= {} + build_file.settings['ATTRIBUTES'] = [acl] + end + end + + def support_files_group + pod_name = target.pod_name + dir = target.support_files_dir + project.pod_support_files_group(pod_name, dir) + end + + def test_native_target_from_spec(spec, test_native_targets) + test_target_label = target.test_target_label(spec) + test_native_targets.find do |test_native_target| + test_native_target.name == test_target_label + end + end + + # Adds a placeholder native target for the library to the Pods project with the + # appropriate build configurations. + # + # @return [PBXAggregateTarget] the native target that was added. + # + def add_placeholder_target + native_target = project.new_aggregate_target(target.label, [], target.platform.name, deployment_target) + target.user_build_configurations.each do |bc_name, type| + native_target.add_build_configuration(bc_name, type) + end + unless target.archs.empty? + native_target.build_configurations.each do |configuration| + configuration.build_settings['ARCHS'] = target.archs + end + end + native_target + end + + # Adds a shell script phase, intended only for library targets that contain swift, + # to copy the ObjC compatibility header (the -Swift.h file that the swift compiler generates) + # to the built products directory. Additionally, the script phase copies the module map, appending a `.Swift` + # submodule that references the (moved) compatibility header. Since the module map has been moved, the umbrella header + # is _also_ copied, so that it is sitting next to the module map. This is necessary for a successful archive build. + # + # @param [PBXNativeTarget] native_target + # the native target to add the Swift static library script phase into. + # + # @return [Void] + # + def add_swift_library_compatibility_header_phase(native_target) + if custom_module_map + raise Informative, 'Using Swift static libraries with custom module maps is currently not supported. ' \ + "Please build `#{target.label}` as a framework or remove the custom module map." + end + + build_phase = native_target.new_shell_script_build_phase('Copy generated compatibility header') + + relative_module_map_path = target.module_map_path.relative_path_from(target.sandbox.root) + relative_umbrella_header_path = target.umbrella_header_path.relative_path_from(target.sandbox.root) + + build_phase.shell_script = <<-SH.strip_heredoc + COMPATIBILITY_HEADER_PATH="${BUILT_PRODUCTS_DIR}/Swift Compatibility Header/${PRODUCT_MODULE_NAME}-Swift.h" + MODULE_MAP_PATH="${BUILT_PRODUCTS_DIR}/${PRODUCT_MODULE_NAME}.modulemap" + + ditto "${DERIVED_SOURCES_DIR}/${PRODUCT_MODULE_NAME}-Swift.h" "${COMPATIBILITY_HEADER_PATH}" + ditto "${PODS_ROOT}/#{relative_module_map_path}" "${MODULE_MAP_PATH}" + ditto "${PODS_ROOT}/#{relative_umbrella_header_path}" "${BUILT_PRODUCTS_DIR}" + printf "\\n\\nmodule ${PRODUCT_MODULE_NAME}.Swift {\\n header \\"${COMPATIBILITY_HEADER_PATH}\\"\\n requires objc\\n}\\n" >> "${MODULE_MAP_PATH}" + SH + build_phase.input_paths = %W( + ${DERIVED_SOURCES_DIR}/${PRODUCT_MODULE_NAME}-Swift.h + ${PODS_ROOT}/#{relative_module_map_path} + ${PODS_ROOT}/#{relative_umbrella_header_path} + ) + build_phase.output_paths = %W( + ${BUILT_PRODUCTS_DIR}/${PRODUCT_MODULE_NAME}.modulemap + ${BUILT_PRODUCTS_DIR}/#{relative_umbrella_header_path.basename} + ${BUILT_PRODUCTS_DIR}/Swift\ Compatibility\ Header/${PRODUCT_MODULE_NAME}-Swift.h + ) + end + + def validate_targets_contain_sources(native_targets) + native_targets.each do |native_target| + next unless native_target.source_build_phase.files.empty? + raise Informative, "Unable to install the `#{target.label}` pod, because the `#{native_target}` target in Xcode would have no sources to compile." + end + end + + # Raises if a vendored xcframework contains frameworks of mixed linkage or mixed packaging + # + def validate_xcframeworks + target.xcframeworks.each_value do |xcframeworks| + xcframeworks.each do |xcframework| + if xcframework.slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it does not contain any binaries." + end + if xcframework.build_type.dynamic_library? + raise Informative, <<-MSG.strip_heredoc + Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it contains dynamic libraries which are not supported. + Use dynamic frameworks for dynamic linking instead. + MSG + end + if xcframework.build_type.static_library? + binary_names = xcframework.slices.map { |slice| File.basename(slice.binary_path, File.extname(slice.binary_path)) }.uniq + if binary_names.size > 1 + raise Informative, <<-MSG.strip_heredoc + Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it contains static libraries + with differing binary names: #{binary_names.to_sentence}. + MSG + end + end + dynamic_slices, static_slices = xcframework.slices.partition(&:dynamic?) + if !dynamic_slices.empty? && !static_slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}`, because it contains both static and dynamic frameworks." + end + library_slices, framework_slices = xcframework.slices.partition(&:library?) + if !library_slices.empty? && !framework_slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}`, because it contains both libraries and frameworks." + end + end + end + end + + #-----------------------------------------------------------------------# + + class << self + # @param [PodTarget] target the target to be installed + # + # @return [Array] the dSYM paths for the given target + # + def dsym_paths(target) + dsym_paths = target.framework_paths.values.flatten.reject { |fmwk_path| fmwk_path.dsym_path.nil? }.map(&:dsym_path) + dsym_paths.concat(target.xcframeworks.values.flatten.flat_map { |xcframework| xcframework_dsyms(xcframework.path) }) + dsym_paths.map do |dsym_path| + dsym_pathname = Pathname(dsym_path) + dsym_path = "${PODS_ROOT}/#{dsym_pathname.relative_path_from(target.sandbox.root)}" unless dsym_pathname.relative? + dsym_path + end + end + + # @param [PodTarget] target the target to be installed + # + # @return [Array] the bcsymbolmap paths for the given target + # + def bcsymbolmap_paths(target) + target.framework_paths.values.flatten.reject do |fmwk_path| + fmwk_path.bcsymbolmap_paths.nil? + end.flat_map(&:bcsymbolmap_paths).uniq + end + + # @param [Pathname] xcframework_path + # the base path of the .xcframework bundle + # + # @return [Array] all found .dSYM paths + # + def xcframework_dsyms(xcframework_path) + basename = File.basename(xcframework_path, '.xcframework') + dsym_basename = basename + '.dSYMs' + path = xcframework_path.dirname + dsym_basename + if File.directory?(path) + Dir.glob(path + '*.dSYM') + else + [] + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb new file mode 100644 index 0000000..a1df9bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb @@ -0,0 +1,312 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # This class is responsible for integrating a pod target. This includes integrating + # the test targets included by each pod target. + # + class PodTargetIntegrator + # @return [TargetInstallationResult] the installation result of the target that should be integrated. + # + attr_reader :target_installation_result + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Initialize a new instance + # + # @param [TargetInstallationResult] target_installation_result @see #target_installation_result + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(target_installation_result, use_input_output_paths: true) + @target_installation_result = target_installation_result + @use_input_output_paths = use_input_output_paths + end + + # Integrates the pod target. + # + # @return [void] + # + def integrate! + UI.section(integration_message) do + target_installation_result.non_library_specs_by_native_target.each do |native_target, spec| + add_embed_frameworks_script_phase(native_target, spec) + add_copy_resources_script_phase(native_target, spec) + add_on_demand_resources(native_target, spec) if spec.app_specification? + UserProjectIntegrator::TargetIntegrator.create_or_update_user_script_phases(script_phases_for_specs(spec), native_target) + end + add_copy_dsyms_script_phase(target_installation_result.native_target) + add_copy_xcframeworks_script_phase(target_installation_result.native_target) + UserProjectIntegrator::TargetIntegrator.create_or_update_user_script_phases(script_phases_for_specs(target.library_specs), target_installation_result.native_target) + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class} for target `#{target.label}'>" + end + + private + + # @!group Integration steps + #---------------------------------------------------------------------# + + # Find or create a 'Copy Pods Resources' build phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the copy resources script + # + # @param [Pod::Specification] spec + # the specification to integrate + # + # @return [void] + # + def add_copy_resources_script_phase(native_target, spec) + script_path = "${PODS_ROOT}/#{target.copy_resources_script_path_for_spec(spec).relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + dependent_targets = if spec.test_specification? + target.dependent_targets_for_test_spec(spec) + else + target.dependent_targets_for_app_spec(spec) + end + host_target_spec_names = target.app_host_dependent_targets_for_spec(spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + resource_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end.uniq + + if use_input_output_paths? && !resource_paths.empty? + input_file_list_path = target.copy_resources_script_input_files_path_for_spec(spec) + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + resource_paths + + output_file_list_path = target.copy_resources_script_output_files_path_for_spec(spec) + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = UserProjectIntegrator::TargetIntegrator.resource_output_paths(resource_paths) + end + + if resource_paths.empty? + UserProjectIntegrator::TargetIntegrator.remove_copy_resources_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_copy_resources_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Find or create a 'Embed Pods Frameworks' Run Script Build Phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the embed frameworks script + # + # @param [Pod::Specification] spec + # the specification to integrate + # + # @return [void] + # + def add_embed_frameworks_script_phase(native_target, spec) + script_path = "${PODS_ROOT}/#{target.embed_frameworks_script_path_for_spec(spec).relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + dependent_targets = if spec.test_specification? + target.dependent_targets_for_test_spec(spec) + else + target.dependent_targets_for_app_spec(spec) + end + host_target_spec_names = target.app_host_dependent_targets_for_spec(spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + framework_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact + end.uniq + xcframework_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact + end.uniq + + if use_input_output_paths? && !framework_paths.empty? || !xcframework_paths.empty? + input_file_list_path = target.embed_frameworks_script_input_files_path_for_spec(spec) + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + UserProjectIntegrator::TargetIntegrator.embed_frameworks_input_paths(framework_paths, xcframework_paths) + + output_file_list_path = target.embed_frameworks_script_output_files_path_for_spec(spec) + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = UserProjectIntegrator::TargetIntegrator.embed_frameworks_output_paths(framework_paths, xcframework_paths) + end + + if framework_paths.empty? && xcframework_paths.empty? + UserProjectIntegrator::TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_embed_frameworks_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Find or create a 'Prepare Artifacts' Run Script Build Phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the prepare artifacts script + # + # @return [void] + # + def add_copy_xcframeworks_script_phase(native_target) + script_path = "${PODS_ROOT}/#{target.copy_xcframeworks_script_path.relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + xcframeworks = target.xcframeworks.values.flatten + + if use_input_output_paths? && !xcframeworks.empty? + input_file_list_path = target.copy_xcframeworks_script_input_files_path + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths = input_paths_by_config[input_paths_key] = [script_path] + + framework_paths = xcframeworks.map { |xcf| "${PODS_ROOT}/#{xcf.path.relative_path_from(target.sandbox.root)}" } + input_paths.concat framework_paths + + output_file_list_path = target.copy_xcframeworks_script_output_files_path + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = xcframeworks.map do |xcf| + "#{Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{xcf.target_name}/#{xcf.name}.framework" + end + end + + if xcframeworks.empty? + UserProjectIntegrator::TargetIntegrator.remove_copy_xcframeworks_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_copy_xcframeworks_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Adds a script phase that copies and strips dSYMs that are part of this target. Note this only deals with + # vendored dSYMs. + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the copy dSYM files build phase. + # + # @return [void] + # + def add_copy_dsyms_script_phase(native_target) + script_path = "${PODS_ROOT}/#{target.copy_dsyms_script_path.relative_path_from(target.sandbox.root)}" + dsym_paths = PodTargetInstaller.dsym_paths(target) + bcsymbolmap_paths = PodTargetInstaller.bcsymbolmap_paths(target) + + if dsym_paths.empty? && bcsymbolmap_paths.empty? + script_phase = native_target.shell_script_build_phases.find do |bp| + bp.name && bp.name.end_with?(UserProjectIntegrator::TargetIntegrator::COPY_DSYM_FILES_PHASE_NAME) + end + native_target.build_phases.delete(script_phase) if script_phase.present? + return + end + + phase_name = UserProjectIntegrator::TargetIntegrator::BUILD_PHASE_PREFIX + UserProjectIntegrator::TargetIntegrator::COPY_DSYM_FILES_PHASE_NAME + phase = UserProjectIntegrator::TargetIntegrator.create_or_update_shell_script_build_phase(native_target, phase_name) + phase.shell_script = %("#{script_path}"\n) + + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths? + input_file_list_path = target.copy_dsyms_script_input_files_path + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths = input_paths_by_config[input_paths_key] = [] + input_paths.concat([dsym_paths, *bcsymbolmap_paths].flatten.compact) + + output_file_list_path = target.copy_dsyms_script_output_files_path + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths = output_paths_by_config[output_paths_key] = [] + + dsym_output_paths = dsym_paths.map { |dsym_path| "${DWARF_DSYM_FOLDER_PATH}/#{File.basename(dsym_path)}" } + bcsymbolmap_output_paths = bcsymbolmap_paths.map { |bcsymbolmap_path| "${DWARF_DSYM_FOLDER_PATH}/#{File.basename(bcsymbolmap_path)}" } + output_paths.concat([dsym_output_paths, *bcsymbolmap_output_paths].flatten.compact) + end + + UserProjectIntegrator::TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Adds the ODRs that are related to this app spec. This includes the app spec dependencies as well as the ODRs + # coming from the app spec itself. + # + # @param [Xcodeproj::PBXNativeTarget] native_target + # the native target for which to add the ODR file references into. + # + # @param [Specification] app_spec + # the app spec to integrate ODRs for. + # + # @return [void] + # + def add_on_demand_resources(native_target, app_spec) + dependent_targets = target.dependent_targets_for_app_spec(app_spec) + parent_odr_group = native_target.project.group_for_spec(app_spec.name) + + # Add ODRs of the app spec dependencies first. + dependent_targets.each do |pod_target| + file_accessors = pod_target.file_accessors.select do |fa| + fa.spec.library_specification? || + fa.spec.test_specification? && pod_target.test_app_hosts_by_spec[fa.spec]&.first == app_spec + end + target_odr_group_name = "#{pod_target.label}-OnDemandResources" + UserProjectIntegrator::TargetIntegrator.update_on_demand_resources(target.sandbox, native_target.project, + native_target, file_accessors, + parent_odr_group, target_odr_group_name) + end + + # Now add the ODRs of our own app spec declaration. + file_accessor = target.file_accessors.find { |fa| fa.spec == app_spec } + target_odr_group_name = "#{target.subspec_label(app_spec)}-OnDemandResources" + UserProjectIntegrator::TargetIntegrator.update_on_demand_resources(target.sandbox, native_target.project, + native_target, file_accessor, + parent_odr_group, target_odr_group_name) + end + + # @return [String] the message that should be displayed for the target + # integration. + # + def integration_message + "Integrating target `#{target.name}`" + end + + # @return [PodTarget] the target part of the installation result. + # + def target + target_installation_result.target + end + + # @param [Specification, Array] specs + # the specs to return script phrases from. + # + # @return [ArrayString>] an array of all combined script phases from the specs. + # + def script_phases_for_specs(specs) + Array(specs).flat_map { |spec| spec.consumer(target.platform).script_phases } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb new file mode 100644 index 0000000..e190631 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb @@ -0,0 +1,90 @@ +module Pod + class Installer + class Xcode + class PodsProjectWriter + # @return [Sandbox] sandbox + # The Pods sandbox instance. + # + attr_reader :sandbox + + # @return [Array] projects + # The list project to write. + # + attr_reader :projects + + # @return [Hash] pod_target_installation_results + # Hash of pod target name to installation results. + # + attr_reader :pod_target_installation_results + + # @return [InstallationOptions] installation_options + # + attr_reader :installation_options + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Project] projects @see #project + # @param [Hash] pod_target_installation_results @see #pod_target_installation_results + # @param [InstallationOptions] installation_options @see #installation_options + # + def initialize(sandbox, projects, pod_target_installation_results, installation_options) + @sandbox = sandbox + @projects = projects + @pod_target_installation_results = pod_target_installation_results + @installation_options = installation_options + end + + # Writes projects to disk. + # + # @yield If provided, this block will execute right before writing the projects to disk. + # + def write! + cleanup_projects(projects) + + projects.each do |project| + library_product_types = [:framework, :dynamic_library, :static_library] + results_by_native_target = Hash[pod_target_installation_results.map do |_, result| + [result.native_target, result] + end] + project.recreate_user_schemes(false) do |scheme, target| + next unless target.respond_to?(:symbol_type) + next unless library_product_types.include? target.symbol_type + installation_result = results_by_native_target[target] + next unless installation_result + installation_result.test_native_targets.each do |test_native_target| + scheme.add_test_target(test_native_target) + end + end + end + + yield if block_given? + + save_projects(projects) + end + + private + + # Cleans up projects before writing. + # + def cleanup_projects(projects) + projects.each do |project| + [project.pods, project.support_files_group, + project.development_pods, project.dependencies_group].each { |group| group.remove_from_project if group.empty? } + end + end + + # Sorts and then saves projects which writes them to disk. + # + def save_projects(projects) + projects.each do |project| + project.sort(:groups_position => :below) + UI.message "- Writing Xcode project file to #{UI.path project.path}" do + project.save + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb new file mode 100644 index 0000000..4c52c51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb @@ -0,0 +1,120 @@ +module Pod + class Installer + class Xcode + # Responsible for creating and preparing a Pod::Project instance + # + class ProjectGenerator + # @return [Sandbox] sandbox + # The Pods sandbox instance. + # + attr_reader :sandbox + + # @return [String] path + # Path of the project. + # + attr_reader :path + + # @return [Array] pod_targets + # Array of pod targets this project includes. + # + attr_reader :pod_targets + + # @return [Hash{String=>Symbol}] A hash representing all the user build + # configurations across all integration targets. Each key + # corresponds to the name of a configuration and its value to + # its type (`:debug` or `:release`). + # + attr_reader :build_configurations + + # @return [Array] The list of all platforms this project supports. + # + attr_reader :platforms + + # @return [Integer] Object version for the Xcode project. + # + attr_reader :object_version + + # @return [String] Path to the Podfile included in the project. + # + attr_reader :podfile_path + + # @return [Bool] Bool indicating if this project is a pod target subproject. + # Used by `generate_multiple_pod_projects` installation option. + # + attr_reader :pod_target_subproject + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [String] path @see #path + # @param [Array] pod_targets @see #pod_targets + # @param [Hash{String=>Symbol}] build_configurations @see #build_configurations + # @param [Array] platforms @see #platforms + # @param [Integer] object_version @see #object_version + # @param [String] podfile_path @see #podfile_path + # + def initialize(sandbox, path, pod_targets, build_configurations, platforms, + object_version, podfile_path = nil, pod_target_subproject: false) + @sandbox = sandbox + @path = path + @pod_targets = pod_targets + @build_configurations = build_configurations + @platforms = platforms + @object_version = object_version + @podfile_path = podfile_path + @pod_target_subproject = pod_target_subproject + end + + public + + # @return [Project] Generated and prepared project. + # + def generate! + project = create_project(path, object_version, pod_target_subproject) + prepare(sandbox, project, pod_targets, build_configurations, platforms, podfile_path) + project + end + + private + + def create_project(path, object_version, pod_target_subproject) + object_version ||= Xcodeproj::Constants::DEFAULT_OBJECT_VERSION + Pod::Project.new(path, false, object_version, :pod_target_subproject => pod_target_subproject) + end + + def prepare(sandbox, project, pod_targets, build_configurations, platforms, podfile_path) + UI.message "- Creating #{project.project_name} project" do + build_configurations.each do |name, type| + project.add_build_configuration(name, type) + end + # Reset symroot just in case the user has added a new build configuration other than 'Debug' or 'Release'. + project.symroot = Pod::Project::LEGACY_BUILD_ROOT + pod_names = pod_targets.map(&:pod_name).uniq + pod_names.each do |pod_name| + local = sandbox.local?(pod_name) + path = sandbox.pod_dir(pod_name) + was_absolute = sandbox.local_path_was_absolute?(pod_name) + project.add_pod_group(pod_name, path, local, was_absolute) + end + if podfile_path + project.add_podfile(podfile_path) + end + osx_deployment_target = platforms.select { |p| p.name == :osx }.map(&:deployment_target).min + ios_deployment_target = platforms.select { |p| p.name == :ios }.map(&:deployment_target).min + watchos_deployment_target = platforms.select { |p| p.name == :watchos }.map(&:deployment_target).min + tvos_deployment_target = platforms.select { |p| p.name == :tvos }.map(&:deployment_target).min + project.build_configurations.each do |build_configuration| + build_configuration.build_settings['MACOSX_DEPLOYMENT_TARGET'] = osx_deployment_target.to_s if osx_deployment_target + build_configuration.build_settings['IPHONEOS_DEPLOYMENT_TARGET'] = ios_deployment_target.to_s if ios_deployment_target + build_configuration.build_settings['WATCHOS_DEPLOYMENT_TARGET'] = watchos_deployment_target.to_s if watchos_deployment_target + build_configuration.build_settings['TVOS_DEPLOYMENT_TARGET'] = tvos_deployment_target.to_s if tvos_deployment_target + build_configuration.build_settings['STRIP_INSTALLED_PRODUCT'] = 'NO' + build_configuration.build_settings['CLANG_ENABLE_OBJC_ARC'] = 'YES' + build_configuration.build_settings['CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED'] = 'YES' + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb new file mode 100644 index 0000000..3e1d55e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb @@ -0,0 +1,140 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # A simple container produced after a target installation is completed. + # + class TargetInstallationResult + # @return [Target] target + # The target this installation result is for. + # + attr_reader :target + + # @return [PBXNativeTarget] native_target + # The native target that was produced for this target. + # + attr_reader :native_target + + # @return [Array] resource_bundle_targets + # The resource bundle targets that were produced for this target. Can be empty if the target had + # no resource bundles. + # + attr_reader :resource_bundle_targets + + # @return [Array] test_native_targets + # The test native targets that were produced for this target. Can be empty if there were no test + # native targets created (e.g. no test specs present). + # + attr_reader :test_native_targets + + # @return [Hash{String=>Array}] test_resource_bundle_targets + # The test resource bundle targets that were produced for this target keyed by test spec name. + # Can be empty if the target had no resource bundles for any tests. + # + attr_reader :test_resource_bundle_targets + + # @return [Array] test_app_host_targets + # The test app host native targets that were produced for this target. Can be empty. + # + attr_reader :test_app_host_targets + + # @return [Hash{Specification => PBXNativeTarget}] app_native_targets + # The app native targets that were produced for this target. Can be empty if there were no app + # native targets created (e.g. no app specs present). + # + attr_reader :app_native_targets + + # @return [Hash{String=>Array}] app_resource_bundle_targets + # The app resource bundle targets that were produced for this target keyed by app spec name. + # Can be empty if the target had no resource bundles for any apps. + # + attr_reader :app_resource_bundle_targets + + # Initialize a new instance + # + # @param [Target] target @see #target + # @param [PBXNativeTarget] native_target @see #native_target + # @param [Array] resource_bundle_targets @see #resource_bundle_targets + # @param [Array] test_native_targets @see #test_native_targets + # @param [Hash{String=>Array}] test_resource_bundle_targets @see #test_resource_bundle_targets + # @param [Array] test_app_host_targets @see #test_app_host_targets + # @param [Hash{Specification => PBXNativeTarget}] app_native_targets @see #app_native_targets + # @param [Hash{String=>Array}] app_resource_bundle_targets @see #app_resource_bundle_targets + # + def initialize(target, native_target, resource_bundle_targets = [], test_native_targets = [], + test_resource_bundle_targets = {}, test_app_host_targets = [], + app_native_targets = {}, app_resource_bundle_targets = {}) + @target = target + @native_target = native_target + @resource_bundle_targets = resource_bundle_targets + @test_native_targets = test_native_targets + @test_resource_bundle_targets = test_resource_bundle_targets + @test_app_host_targets = test_app_host_targets + @app_native_targets = app_native_targets + @app_resource_bundle_targets = app_resource_bundle_targets + end + + # Returns the corresponding native target to use based on the provided specification. + # + # @param [Specification] spec + # The specification to base from in order to find the native target. + # + # @return [PBXNativeTarget, Nil] the native target to use or `nil` if none is found. + # + def native_target_for_spec(spec) + return native_target if spec.library_specification? + return test_native_target_from_spec(spec) if spec.test_specification? + app_native_target_from_spec(spec) if spec.app_specification? + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the test native targets and the value + # is the test spec associated with this native target. + # + def test_specs_by_native_target + test_specs_by_native_target = Hash[target.test_specs.map { |spec| [test_native_target_from_spec(spec), spec] }] + test_specs_by_native_target.delete_if { |k, _| k.nil? } + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the app native targets and the value + # is the app spec associated with this native target. + # + def app_specs_by_native_target + app_specs_by_native_target = Hash[target.app_specs.map { |spec| [app_native_target_from_spec(spec), spec] }] + app_specs_by_native_target.delete_if { |k, _| k.nil? } + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the native targets and the value + # is the non-library spec associated with this native target. + # + def non_library_specs_by_native_target + test_specs_by_native_target.merge(app_specs_by_native_target) + end + + # @param label [String] the label of the app host target. + # + # @return [PBXNativeTarget] the app host target with the given target label. + # + def app_host_target_labelled(label) + app_native_targets.values.find do |app_native_target| + app_native_target.name == label + end || test_app_host_targets.find do |app_native_target| + app_native_target.name == label + end + end + + private + + def test_native_target_from_spec(spec) + test_native_targets.find do |test_native_target| + test_native_target.name == target.test_target_label(spec) + end + end + + def app_native_target_from_spec(spec) + app_native_targets[spec] + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb new file mode 100644 index 0000000..353e3e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb @@ -0,0 +1,257 @@ +require 'stringio' + +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Controller class responsible of creating and configuring the static + # library target in Pods project. It also creates the support file needed + # by the target. + # + class TargetInstaller + include TargetInstallerHelper + + # @return [Sandbox] sandbox + # The sandbox where the support files should be generated. + # + attr_reader :sandbox + + # @return [Pod::Project] + # The project to install the target into. + # + attr_reader :project + + # @return [Target] target + # The library whose target needs to be generated. + # + attr_reader :target + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Pod::Project] project @see #project + # @param [Target] target @see #target + # + def initialize(sandbox, project, target) + @sandbox = sandbox + @project = project + @target = target + end + + private + + #-----------------------------------------------------------------------# + + # @!group Installation steps + + # Adds the target for the library to the Pods project with the + # appropriate build configurations. + # + # @note The `PODS_HEADERS_SEARCH_PATHS` overrides the xcconfig. + # + # @return [PBXNativeTarget] the native target that was added. + # + def add_target + product_type = target.product_type + name = target.label + platform = target.platform.name + language = target.uses_swift? ? :swift : :objc + native_target = project.new_target(product_type, name, platform, deployment_target, nil, language, target.product_basename) + native_target.product_reference.name = name + + target.user_build_configurations.each do |bc_name, type| + native_target.add_build_configuration(bc_name, type) + end + + native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + end + + native_target + end + + # @return [String] The deployment target. + # + def deployment_target + target.platform.deployment_target.to_s + end + + # Returns the customized build settings which are overridden in the build + # settings of the user target. + # + # @return [Hash{String => String}] + # + def custom_build_settings + settings = {} + + unless target.archs.empty? + settings['ARCHS'] = target.archs + end + + if target.build_as_static_framework? + settings['MACH_O_TYPE'] = 'staticlib' + elsif target.build_as_static_library? + settings.merge!('OTHER_LDFLAGS' => '', 'OTHER_LIBTOOLFLAGS' => '') + end + + settings + end + + # Creates the directory where to store the support files of the target. + # + def create_support_files_dir + target.support_files_dir.mkpath + end + + # Remove temp file whose store .prefix/config/dummy file. + # + def clean_support_files_temp_dir + support_files_temp_dir.rmtree if support_files_temp_dir.exist? + end + + # @return [String] The temp file path to store temporary files. + # + def support_files_temp_dir + sandbox.target_support_files_dir('generated_files_tmp') + end + + # Creates the Info.plist file which sets public framework attributes + # + # @param [Pathname] path + # the path to save the generated Info.plist file. + # + # @param [PBXNativeTarget] native_target + # the native target to link the generated Info.plist file into. + # + # @param [Version] version + # the version to use for when generating this Info.plist file. + # + # @param [Platform] platform + # the platform to use for when generating this Info.plist file. + # + # @param [Symbol] bundle_package_type + # the CFBundlePackageType of the target this Info.plist file is for. + # + # @param [Hash] additional_entries + # additional entries for the generated Info.plist + # + # @return [void] + # + def create_info_plist_file(path, native_target, version, platform, bundle_package_type = :fmwk, additional_entries: {}) + create_info_plist_file_with_sandbox(@sandbox, path, native_target, version, platform, bundle_package_type, + :additional_entries => additional_entries) + add_file_to_support_group(path) + end + + # Creates the module map file which ensures that the umbrella header is + # recognized with a customized path + # + # @param [PBXNativeTarget] native_target + # the native target to link the module map file into. + # + # @return [void] + # + def create_module_map(native_target) + path = target.module_map_path_to_write + UI.message "- Generating module map file at #{UI.path(path)}" do + generator = Generator::ModuleMap.new(target) + yield generator if block_given? + update_changed_file(generator, path) + add_file_to_support_group(path) + + linked_path = target.module_map_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + relative_path_string = target.module_map_path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['MODULEMAP_FILE'] = relative_path_string + end + end + end + + # Generates a header which ensures that all header files are exported + # in the module map + # + # @param [PBXNativeTarget] native_target + # the native target to link the umbrella header file into. + # + # @yield_param [Generator::UmbrellaHeader] + # yielded once to configure the imports + # + # @return [void] + # + def create_umbrella_header(native_target) + path = target.umbrella_header_path_to_write + UI.message "- Generating umbrella header at #{UI.path(path)}" do + generator = Generator::UmbrellaHeader.new(target) + yield generator if block_given? + update_changed_file(generator, path) + + # Add the file to the support group and the native target, + # so it will been added to the header build phase + file_ref = add_file_to_support_group(path) + build_file = native_target.headers_build_phase.add_file_reference(file_ref) + + linked_path = target.umbrella_header_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + acl = target.build_as_framework? ? 'Public' : 'Project' + build_file.settings ||= {} + build_file.settings['ATTRIBUTES'] = [acl] + end + end + + # Generates a dummy source file for each target so libraries that contain + # only categories build. + # + # @param [PBXNativeTarget] native_target + # the native target to link the dummy source file into. + # + # @return [void] + # + def create_dummy_source(native_target) + path = target.dummy_source_path + UI.message "- Generating dummy source at #{UI.path(path)}" do + generator = Generator::DummySource.new(target.label) + update_changed_file(generator, path) + file_reference = add_file_to_support_group(path) + native_target.source_build_phase.add_file_reference(file_reference) + end + end + + private + + #-----------------------------------------------------------------------# + + # @!group Private helpers. + + # @return [PBXGroup] the group where the file references to the support + # files should be stored. + # + attr_reader :support_files_group + + # Adds a reference to the given file in the support group of this target. + # + # @param [Pathname] path + # The path of the file to which the reference should be added. + # + # @return [PBXFileReference] the file reference of the added file. + # + def add_file_to_support_group(path) + support_files_group.new_file(path) + end + + #-----------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb new file mode 100644 index 0000000..72b16b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb @@ -0,0 +1,110 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + module TargetInstallerHelper + # @param [Generator] generator + # the generator to use for generating the content. + # + # @param [Pathname] path + # the pathname to save the content into. + # + # Saves the content the provided path unless the path exists and the contents are exactly the same. + # + def update_changed_file(generator, path) + if path.exist? + contents = generator.generate.to_s + content_stream = StringIO.new(contents) + identical = File.open(path, 'rb') { |f| FileUtils.compare_stream(f, content_stream) } + return if identical + + File.open(path, 'w') { |f| f.write(contents) } + else + path.dirname.mkpath + generator.save_as(path) + end + end + + # Creates the Info.plist file which sets public framework attributes + # + # @param [Sandbox] sandbox @see #sandbox + # The sandbox where the generated Info.plist file should be saved. + # + # @param [Pathname] path + # the path to save the generated Info.plist file. + # + # @param [PBXNativeTarget] native_target + # the native target to link the generated Info.plist file into. + # + # @param [String] version + # the version to use for when generating this Info.plist file. + # + # @param [Platform] platform + # the platform to use for when generating this Info.plist file. + # + # @param [Symbol] bundle_package_type + # the CFBundlePackageType of the target this Info.plist file is for. + # + # @param [Hash] additional_entries + # any additional entries to include in this Info.plist file. + # + # @param [String] build_setting_value + # an optional value to set for the `INFOPLIST_FILE` build setting on the + # native target. If none is specified then the value is calculated from the + # Info.plist path relative to the sandbox root. + # + # @return [void] + # + def create_info_plist_file_with_sandbox(sandbox, path, native_target, version, platform, + bundle_package_type = :fmwk, additional_entries: {}, + build_setting_value: nil) + UI.message "- Generating Info.plist file at #{UI.path(path)}" do + generator = Generator::InfoPlistFile.new(version, platform, bundle_package_type, additional_entries) + update_changed_file(generator, path) + + build_setting_value ||= path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['INFOPLIST_FILE'] = build_setting_value + end + end + end + + # Creates a prefix header file which imports `UIKit` or `Cocoa` according + # to the platform of the target. This file also include any prefix header + # content reported by the specification of the pods. + # + # @param [Pathname] path + # the path to generate the prefix header for. + # + # @param [Array] file_accessors + # the file accessors to use for this prefix header that point to a path of a prefix header. + # + # @param [Platform] platform + # the platform to use for this prefix header. + # + # @param [PBXNativeTarget] native_target + # the native target on which the prefix header should be configured for. + # + # @param [Pathname] project_directory + # the directory containing the project of the target + # + # @return [void] + # + def create_prefix_header(path, file_accessors, platform, native_target, project_directory) + generator = Generator::PrefixHeader.new(file_accessors, platform) + update_changed_file(generator, path) + + relative_path = path.relative_path_from(project_directory).to_s + native_target.build_configurations.each do |c| + c.build_settings['GCC_PREFIX_HEADER'] = relative_path + end + end + + module_function :update_changed_file + module_function :create_info_plist_file_with_sandbox + module_function :create_prefix_header + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator_result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator_result.rb new file mode 100644 index 0000000..7280d02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/pods_project_generator_result.rb @@ -0,0 +1,54 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # A simple container produced after a pod project generation is completed. + # + class PodsProjectGeneratorResult + # @return [Project] project + # + attr_reader :project + + # @return [Hash{Project => Array}] Project by pod targets map + # + attr_reader :projects_by_pod_targets + + # @return [InstallationResults] target installation results + # + attr_reader :target_installation_results + + # Initialize a new instance + # + # @param [Project] project @see #project + # @param [Hash{Project => Array}] projects_by_pod_targets @see #projects_by_pod_targets + # @param [InstallationResults] target_installation_results @see #target_installation_results + # + def initialize(project, projects_by_pod_targets, target_installation_results) + @project = project + @projects_by_pod_targets = projects_by_pod_targets + @target_installation_results = target_installation_results + end + + # @param [Pod::Specification] spec + # A spec which was included in the generated project + # + # @return [Xcodeproj::PBXNativeTarget] the native target for the spec + # + def native_target_for_spec(spec) + installation_results_by_spec[spec.root].native_target_for_spec(spec) + end + + private + + def installation_results_by_spec + @target_installation_results_by_spec ||= begin + target_installation_results.pod_target_installation_results.values.each_with_object({}) do |installation_results, hash| + hash[installation_results.target.root_spec] = installation_results + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/single_pods_project_generator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/single_pods_project_generator.rb new file mode 100644 index 0000000..1287727 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/single_pods_project_generator.rb @@ -0,0 +1,38 @@ +module Pod + class Installer + class Xcode + # The {SinglePodsProjectGenerator} handles generation of the 'Pods/Pods.xcodeproj' + # + class SinglePodsProjectGenerator < PodsProjectGenerator + # Generates single `Pods/Pods.xcodeproj`. + # + # @return [PodsProjectGeneratorResult] + # + def generate! + project_path = sandbox.project_path + platforms = aggregate_targets.map(&:platform) + project_generator = ProjectGenerator.new(sandbox, project_path, pod_targets, build_configurations, + platforms, project_object_version, config.podfile_path) + project = project_generator.generate! + install_file_references(project, pod_targets) + + pod_target_installation_results = install_all_pod_targets(project, pod_targets) + aggregate_target_installation_results = install_aggregate_targets(project, aggregate_targets) + target_installation_results = InstallationResults.new(pod_target_installation_results, aggregate_target_installation_results) + + integrate_targets(target_installation_results.pod_target_installation_results) + wire_target_dependencies(target_installation_results) + PodsProjectGeneratorResult.new(project, {}, target_installation_results) + end + + private + + def install_all_pod_targets(project, pod_targets) + UI.message '- Installing Pod Targets' do + install_pod_targets(project, pod_targets) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/target_validator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/target_validator.rb new file mode 100644 index 0000000..738e3b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/installer/xcode/target_validator.rb @@ -0,0 +1,170 @@ +module Pod + class Installer + class Xcode + # The {Xcode::TargetValidator} ensures that the pod and aggregate target + # configuration is valid for installation. + # + class TargetValidator + # @return [Array] The aggregate targets that should be validated. + # + attr_reader :aggregate_targets + + # @return [Array] The pod targets that should be validated. + # + attr_reader :pod_targets + + # @return [InstallationOptions] The installation options used during this installation. + # + attr_reader :installation_options + + # Create a new TargetValidator with aggregate and pod targets to + # validate. + # + # @param [Array] aggregate_targets #see #aggregate_targets + # @param [Array] pod_targets see #pod_targets + # @param [InstallationOptions] installation_options see #installation_options + # + def initialize(aggregate_targets, pod_targets, installation_options) + @aggregate_targets = aggregate_targets + @pod_targets = pod_targets + @installation_options = installation_options + end + + # Perform the validation steps for the provided aggregate and pod + # targets. + # + def validate! + verify_no_duplicate_framework_and_library_names + verify_no_static_framework_transitive_dependencies + verify_swift_pods_swift_version + verify_swift_pods_have_module_dependencies + verify_no_multiple_project_names if installation_options.generate_multiple_pod_projects? + end + + private + + def verify_no_duplicate_framework_and_library_names + aggregate_targets.each do |aggregate_target| + aggregate_target.user_build_configurations.each_key do |config| + pod_targets = aggregate_target.pod_targets_for_build_configuration(config) + file_accessors = pod_targets.flat_map(&:file_accessors).select { |fa| fa.spec.library_specification? } + + frameworks = file_accessors.flat_map(&:vendored_frameworks).uniq.map(&:basename) + frameworks += pod_targets.select { |pt| pt.should_build? && pt.build_as_framework? }.map(&:product_module_name).uniq + verify_no_duplicate_names(frameworks, aggregate_target.label, 'frameworks') + + libraries = file_accessors.flat_map(&:vendored_libraries).uniq.map(&:basename) + libraries += pod_targets.select { |pt| pt.should_build? && pt.build_as_library? }.map(&:product_name) + verify_no_duplicate_names(libraries, aggregate_target.label, 'libraries') + end + end + end + + def verify_no_duplicate_names(names, label, type) + duplicates = names.group_by { |n| n.to_s.downcase }.select { |_, v| v.size > 1 }.keys + + unless duplicates.empty? + raise Informative, "The '#{label}' target has " \ + "#{type} with conflicting names: #{duplicates.to_sentence}." + end + end + + def verify_no_static_framework_transitive_dependencies + aggregate_targets.each do |aggregate_target| + aggregate_target.user_build_configurations.each_key do |config| + pod_targets = aggregate_target.pod_targets_for_build_configuration(config) + built_targets, unbuilt_targets = pod_targets.partition(&:should_build?) + dynamic_pod_targets = built_targets.select(&:build_as_dynamic?) + + dependencies = dynamic_pod_targets.flat_map(&:dependent_targets).uniq + depended_upon_targets = unbuilt_targets & dependencies + + static_libs = depended_upon_targets.flat_map(&:file_accessors).flat_map(&:vendored_static_artifacts) + unless static_libs.empty? + raise Informative, "The '#{aggregate_target.label}' target has " \ + "transitive dependencies that include statically linked binaries: (#{static_libs.to_sentence})" + end + + static_deps = dynamic_pod_targets.flat_map(&:recursive_dependent_targets).uniq.select(&:build_as_static?) + unless static_deps.empty? + raise Informative, "The '#{aggregate_target.label}' target has " \ + "transitive dependencies that include statically linked binaries: (#{static_deps.flat_map(&:name).to_sentence})" + end + end + end + end + + def verify_swift_pods_swift_version + error_message_for_target_definition = lambda do |target_definition| + "`#{target_definition.name}` (Swift #{target_definition.swift_version})" + end + swift_pod_targets = pod_targets.select(&:uses_swift?) + error_messages = swift_pod_targets.map do |swift_pod_target| + # Legacy targets that do not specify Swift versions derive their Swift version from the target definitions + # they are integrated with. An error is displayed if the target definition Swift versions collide or none + # of target definitions specify the `SWIFT_VERSION` attribute. + if swift_pod_target.spec_swift_versions.empty? + swift_target_definitions = swift_pod_target.target_definitions.reject { |target| target.swift_version.blank? } + next if swift_target_definitions.uniq(&:swift_version).count == 1 + if swift_target_definitions.empty? + "- `#{swift_pod_target.name}` does not specify a Swift version and none of the targets " \ + "(#{swift_pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence}) integrating it have the " \ + '`SWIFT_VERSION` attribute set. Please contact the author or set the `SWIFT_VERSION` attribute in at ' \ + 'least one of the targets that integrate this pod.' + else + target_errors = swift_target_definitions.map(&error_message_for_target_definition).to_sentence + "- `#{swift_pod_target.name}` is integrated by multiple targets that use a different Swift version: #{target_errors}." + end + elsif !swift_pod_target.swift_version.nil? && swift_pod_target.swift_version.empty? + "- `#{swift_pod_target.name}` does not specify a Swift version (#{swift_pod_target.spec_swift_versions.map { |v| "`#{v}`" }.to_sentence}) " \ + "that is satisfied by any of targets (#{swift_pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence}) integrating it." + end + end.compact + + unless error_messages.empty? + raise Informative, "Unable to determine Swift version for the following pods:\n\n#{error_messages.join("\n")}" + end + end + + def verify_swift_pods_have_module_dependencies + error_messages = [] + pod_targets.each do |pod_target| + next unless pod_target.uses_swift? && pod_target.should_build? + + non_module_dependencies = [] + pod_target.dependent_targets.each do |dependent_target| + next if !dependent_target.should_build? || dependent_target.defines_module? + non_module_dependencies << dependent_target.name + end + + next if non_module_dependencies.empty? + + error_messages << "The Swift pod `#{pod_target.name}` depends upon #{non_module_dependencies.map { |d| "`#{d}`" }.to_sentence}, " \ + "which #{non_module_dependencies.count == 1 ? 'does' : 'do'} not define modules. " \ + 'To opt into those targets generating module maps '\ + '(which is necessary to import them from Swift when building as static libraries), ' \ + 'you may set `use_modular_headers!` globally in your Podfile, '\ + 'or specify `:modular_headers => true` for particular dependencies.' + end + return if error_messages.empty? + + raise Informative, 'The following Swift pods cannot yet be integrated '\ + "as static libraries:\n\n#{error_messages.join("\n\n")}" + end + + def verify_no_multiple_project_names + error_messages = pod_targets.map do |pod_target| + project_names = pod_target.target_definitions.map { |td| td.project_name_for_pod(pod_target.pod_name) }.compact.uniq + next unless project_names.count > 1 + "- `#{pod_target.name}` specifies multiple project names (#{project_names.map { |pn| "`#{pn}`" }.to_sentence}) " \ + "in different targets (#{pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence})." + end.compact + return if error_messages.empty? + + raise Informative, 'The following pods cannot be integrated:' \ + "\n\n#{error_messages.join("\n\n")}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/native_target_extension.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/native_target_extension.rb new file mode 100644 index 0000000..49c2a79 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/native_target_extension.rb @@ -0,0 +1,60 @@ +module Pod + class Project + # Adds a dependency on the given metadata cache. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [AbstractTarget] target + # The parent target used to add a cached dependency. + # + # @param [MetadataCache] metadata + # The metadata holding all the required metadata to construct a target as a dependency. + # + # @return [void] + # + def self.add_cached_dependency(sandbox, target, metadata) + return if dependency_for_cached_target?(sandbox, target, metadata) + container_proxy = target.project.new(Xcodeproj::Project::PBXContainerItemProxy) + + subproject_reference = target.project.reference_for_path(sandbox.root + metadata.container_project_path) + raise ArgumentError, "add_dependency received target (#{target}) that belongs to a project that is not this project (#{self}) and is not a subproject of this project" unless subproject_reference + container_proxy.container_portal = subproject_reference.uuid + + container_proxy.proxy_type = Xcodeproj::Constants::PROXY_TYPES[:native_target] + container_proxy.remote_global_id_string = metadata.native_target_uuid + container_proxy.remote_info = metadata.target_label + + dependency = target.project.new(Xcodeproj::Project::PBXTargetDependency) + dependency.name = metadata.target_label + dependency.target_proxy = container_proxy + + target.dependencies << dependency + end + + # Checks whether this target has a dependency on the given target. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [AbstractTarget] target + # The parent target used to add a cached dependency. + # + # @param [TargetMetadata] cached_target + # the target to search for. + # + # @return [Bool] + # + def self.dependency_for_cached_target?(sandbox, target, cached_target) + target.dependencies.find do |dep| + if dep.target_proxy.remote? + subproject_reference = target.project.reference_for_path(sandbox.root + cached_target.container_project_path) + uuid = subproject_reference.uuid if subproject_reference + dep.target_proxy.remote_global_id_string == cached_target.native_target_uuid && dep.target_proxy.container_portal == uuid + else + dep.target.uuid == cached_target.native_target_uuid + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/open-uri.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/open-uri.rb new file mode 100644 index 0000000..f85ce27 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/open-uri.rb @@ -0,0 +1,33 @@ +# rubocop:disable Naming/FileName + +require 'open-uri' + +# Allow OpenURI to follow http to https redirects. +# +module OpenURI + # Whether {#open} should follow a redirect. + # + # Inspiration from: https://gist.github.com/1271420 + # Relevant issue: https://bugs.ruby-lang.org/issues/3719 + # Source here: https://github.com/ruby/ruby/blob/trunk/lib/open-uri.rb + # + # This test is intended to forbid a redirection from http://... to + # file:///etc/passwd, file:///dev/zero, etc. CVE-2011-1521 + # https to http redirect is also forbidden intentionally. + # It avoids sending secure cookie or referrer by non-secure HTTP protocol. + # (RFC 2109 4.3.1, RFC 2965 3.3, RFC 2616 15.1.3) + # However this is ad hoc. It should be extensible/configurable. + # + # @param [URI::Generic] uri1 + # the origin uri from where the redirect origins + # + # @param [URI::Generic] uri2 + # the target uri where to where the redirect points to + # + # @return [Bool] + # + def self.redirectable?(uri1, uri2) + uri1.scheme.downcase == uri2.scheme.downcase || + (/\A(?:http|ftp)\z/i =~ uri1.scheme && /\A(?:https?|ftp)\z/i =~ uri2.scheme) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/podfile.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/podfile.rb new file mode 100644 index 0000000..2282a4c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/podfile.rb @@ -0,0 +1,13 @@ +require 'cocoapods-core/podfile' + +module Pod + class Podfile + autoload :InstallationOptions, 'cocoapods/installer/installation_options' + + # @return [Pod::Installer::InstallationOptions] the installation options specified in the Podfile + # + def installation_options + @installation_options ||= Pod::Installer::InstallationOptions.from_podfile(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/project.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/project.rb new file mode 100644 index 0000000..cd89042 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/project.rb @@ -0,0 +1,544 @@ +require 'xcodeproj' +require 'active_support/core_ext/string/inflections' + +module Pod + # The Pods project. + # + # Model class which provides helpers for working with the Pods project + # through the installation process. + # + class Project < Xcodeproj::Project + # @return [PBXGroup] The group for the support files of the aggregate + # targets. + # + attr_reader :support_files_group + + # @return [PBXGroup] The group for the Pods. + # + attr_reader :pods + + # @return [PBXGroup] The group for Development Pods. + # + attr_reader :development_pods + + # @return [PBXGroup] The group for dependencies. + # Used by #generate_multiple_pod_projects installation option. + # + attr_reader :dependencies_group + + # @return [Bool] Bool indicating if this project is a pod target subproject. + # Used by `generate_multiple_pod_projects` installation option. + # + attr_reader :pod_target_subproject + alias pod_target_subproject? pod_target_subproject + + # @return [String] The basename of the project path without .xcodeproj extension. + # + attr_reader :project_name + + # Initialize a new instance + # + # @param [Pathname, String] path @see Xcodeproj::Project#path + # @param [Bool] skip_initialization Whether the project should be initialized from scratch. + # @param [Int] object_version Object version to use for serialization, defaults to Xcode 3.2 compatible. + # + def initialize(path, skip_initialization = false, + object_version = Xcodeproj::Constants::DEFAULT_OBJECT_VERSION, pod_target_subproject: false) + @uuid_prefix = Digest('SHA256').hexdigest(File.basename(path)).upcase + super(path, skip_initialization, object_version) + @support_files_group = new_group('Targets Support Files') + @refs_by_absolute_path = {} + @variant_groups_by_path_and_name = {} + @pods = new_group('Pods') + @development_pods = new_group('Development Pods') + @dependencies_group = new_group('Dependencies') + @pod_target_subproject = pod_target_subproject + @project_name = Pathname(path).basename('.*').to_s + self.symroot = LEGACY_BUILD_ROOT + end + + # Generates a list of new UUIDs that created objects can be assigned. + # + # @note Overridden to generate UUIDs in a much faster way, since we don't need to check for collisions + # (as the Pods project is regenerated each time, and thus all UUIDs will have come from this method) + # + # @param [Integer] count + # The number of UUIDs to generate + # + # @return [Void] + # + def generate_available_uuid_list(count = 100) + start = @generated_uuids.size + uniques = Array.new(count) { |i| format('%.6s%07X0', @uuid_prefix, start + i) } + @generated_uuids += uniques + @available_uuids += uniques + end + + public + + # @!group Legacy Xcode build root + #-------------------------------------------------------------------------# + + LEGACY_BUILD_ROOT = '${SRCROOT}/../build' + + # @param [String] symroot + # The build root that is used when Xcode is configured to not use the + # workspace’s build root. Defaults to `${SRCROOT}/../build`. + # + # @return [void] + # + def symroot=(symroot) + root_object.build_configuration_list.build_configurations.each do |config| + config.build_settings['SYMROOT'] = symroot + end + end + + public + + # @!group Pod Groups + #-------------------------------------------------------------------------# + + # Creates a new group for the Pod with the given name and configures its + # path. + # + # @param [String] pod_name + # The name of the Pod. + # + # @param [#to_s] path + # The path to the root of the Pod. + # + # @param [Bool] development + # Whether the group should be added to the Development Pods group. + # + # @param [Bool] absolute + # Whether the path of the group should be set as absolute. + # + # @return [PBXGroup] The new group. + # + def add_pod_group(pod_name, path, development = false, absolute = false) + raise '[BUG]' if pod_group(pod_name) + + parent_group = + if pod_target_subproject + main_group + else + development ? development_pods : pods + end + source_tree = absolute ? :absolute : :group + + group = parent_group.new_group(pod_name, path, source_tree) + group + end + + # Creates a new subproject reference for the given project and configures its + # group location. + # + # @param [Project] project + # The subproject to be added. + # + # @param [Bool] development + # Whether the project should be added to the Development Pods group. + # For projects where `pod_target_subproject` is enabled, all subprojects are added into the Dependencies group. + # + # @return [PBXFileReference] The new file reference. + # + def add_pod_subproject(project, development = false) + parent_group = group_for_subproject_reference(development) + add_subproject_reference(project, parent_group) + end + + # Creates a new subproject reference for the given cached metadata and configures its + # group location. + # + # @param [Sandbox] sandbox + # The sandbox used for installation. + # + # @param [TargetMetadata] metadata + # The project metadata to be added. + # + # @param [Bool] development + # Whether the project should be added to the Development Pods group. + # For projects where `pod_target_subproject` is enabled, all subprojects are added into the Dependencies group. + # + # @return [PBXFileReference] The new file reference. + # + def add_cached_pod_subproject(sandbox, metadata, development = false) + parent_group = group_for_subproject_reference(development) + add_cached_subproject_reference(sandbox, metadata, parent_group) + end + + # @return [Array] Returns all the group of the Pods. + # + def pod_groups + if pod_target_subproject + main_group.children.objects + else + pods.children.objects + development_pods.children.objects + end + end + + # Returns the group for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [PBXGroup] The group. + # + def pod_group(pod_name) + pod_groups.find { |group| group.name == pod_name } + end + + # @return [Hash] The names of the specification subgroups by key. + # + SPEC_SUBGROUPS = { + :resources => 'Resources', + :frameworks => 'Frameworks', + :developer => 'Pod', + } + + # Returns the group for the specification with the give name creating it if + # needed. + # + # @param [String] spec_name + # The full name of the specification. + # + # @return [PBXGroup] The group. + # + def group_for_spec(spec_name, subgroup_key = nil) + pod_name = Specification.root_name(spec_name) + group = pod_group(pod_name) + raise "[Bug] Unable to locate group for Pod named `#{pod_name}`" unless group + if spec_name != pod_name + subspecs_names = spec_name.gsub(pod_name + '/', '').split('/') + subspecs_names.each do |name| + group = group[name] || group.new_group(name) + end + end + + if subgroup_key + subgroup_name = SPEC_SUBGROUPS[subgroup_key] + raise ArgumentError, "Unrecognized subgroup key `#{subgroup_key}`" unless subgroup_name + group = group[subgroup_name] || group.new_group(subgroup_name) + end + + group + end + + # Returns the support files group for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [PBXGroup] The group. + # + def pod_support_files_group(pod_name, dir) + group = pod_group(pod_name) + support_files_group = group['Support Files'] + unless support_files_group + support_files_group = group.new_group('Support Files', dir) + end + support_files_group + end + + public + + # @!group File references + #-------------------------------------------------------------------------# + + # Adds a file reference to given path as a child of the given group. + # + # @param [Array] absolute_path + # The path of the file. + # + # @param [PBXGroup] group + # The group for the new file reference. + # + # @param [Bool] reflect_file_system_structure + # Whether group structure should reflect the file system structure. + # If yes, where needed, intermediate groups are created, similar to + # how mkdir -p operates. + # + # @param [Pathname] base_path + # The base path for newly created groups when reflect_file_system_structure is true. + # If nil, the provided group's real_path is used. + # + # @return [PBXFileReference] The new file reference. + # + def add_file_reference(absolute_path, group, reflect_file_system_structure = false, base_path = nil) + file_path_name = absolute_path.is_a?(Pathname) ? absolute_path : Pathname(absolute_path) + if ref = reference_for_path(file_path_name) + return ref + end + + group = group_for_path_in_group(file_path_name, group, reflect_file_system_structure, base_path) + ref = group.new_file(file_path_name.realpath) + @refs_by_absolute_path[file_path_name.to_s] = ref + end + + # @!group File references + #-------------------------------------------------------------------------# + + # Adds a file reference for a project as a child of the given group. + # + # @param [Project] project + # The project to add as a subproject reference. + # + # @param [PBXGroup] group + # The group for the new subproject reference. + # + # @return [PBXFileReference] The new file reference. + # + def add_subproject_reference(project, group) + new_subproject_file_reference(project.path, group) + end + + # Adds a file reference for a cached project as a child of the given group. + # + # @param [Sandbox] sandbox + # The sandbox used for installation. + # + # @param [MetadataCache] metadata + # The metadata holding the required properties to create a subproject reference. + # + # @param [PBXGroup] group + # The group for the new subproject reference. + # + # @return [PBXFileReference] The new file reference. + # + def add_cached_subproject_reference(sandbox, metadata, group) + new_subproject_file_reference(sandbox.root + metadata.container_project_path, group) + end + + # Returns the file reference for the given absolute path. + # + # @param [#to_s] absolute_path + # The absolute path of the file whose reference is needed. + # + # @return [PBXFileReference] The file reference. + # @return [Nil] If no file reference could be found. + # + def reference_for_path(absolute_path) + absolute_path = absolute_path.is_a?(Pathname) ? absolute_path : Pathname(absolute_path) + unless absolute_path.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_path}" + end + + refs_by_absolute_path[absolute_path.to_s] ||= refs_by_absolute_path[absolute_path.realpath.to_s] + end + + # Adds a file reference to the Podfile. + # + # @param [#to_s] podfile_path + # The path of the Podfile. + # + # @return [PBXFileReference] The new file reference. + # + def add_podfile(podfile_path) + new_file(podfile_path, :project).tap do |podfile_ref| + mark_ruby_file_ref(podfile_ref) + end + end + + # Sets the syntax of the provided file reference to be Ruby, in the case that + # the file does not already have a ".rb" file extension (ex. the Podfile) + # + # @param [PBXFileReference] file_ref + # The file reference to change + # + def mark_ruby_file_ref(file_ref) + file_ref.xc_language_specification_identifier = 'xcode.lang.ruby' + file_ref.explicit_file_type = 'text.script.ruby' + file_ref.last_known_file_type = 'text' + file_ref.tab_width = '2' + file_ref.indent_width = '2' + end + + # Adds a new build configuration to the project and populates it with + # default settings according to the provided type. + # + # @note This method extends the original Xcodeproj implementation to + # include a preprocessor definition named after the build + # setting. This is done to support the TargetEnvironmentHeader + # specification of Pods available only on certain build + # configurations. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] The new build configuration. + # + def add_build_configuration(name, type) + build_configuration = super + settings = build_configuration.build_settings + definitions = settings['GCC_PREPROCESSOR_DEFINITIONS'] || ['$(inherited)'] + defines = [defininition_for_build_configuration(name)] + defines << 'DEBUG' if type == :debug + defines.each do |define| + value = "#{define}=1" + unless definitions.include?(value) + definitions.unshift(value) + end + end + settings['GCC_PREPROCESSOR_DEFINITIONS'] = definitions + + if type == :debug + settings['SWIFT_ACTIVE_COMPILATION_CONDITIONS'] = 'DEBUG' + end + + build_configuration + end + + # @param [String] name + # The name of the build configuration. + # + # @return [String] The preprocessor definition to set for the configuration. + # + def defininition_for_build_configuration(name) + "POD_CONFIGURATION_#{name.underscore}".gsub(/[^a-zA-Z0-9_]/, '_').upcase + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # @return [Hash{String => PBXFileReference}] The file references grouped + # by absolute path. + # + attr_reader :refs_by_absolute_path + + # @return [Hash{[Pathname, String] => PBXVariantGroup}] The variant groups + # grouped by absolute path of parent dir and name. + # + attr_reader :variant_groups_by_path_and_name + + # Returns the group for an absolute file path in another group. + # Creates subgroups to reflect the file system structure if + # reflect_file_system_structure is set to true. + # Makes a variant group if the path points to a localized file inside a + # *.lproj directory. To support Apple Base Internationalization, the same + # variant group is returned for interface files and strings files with + # the same name. + # + # @param [Pathname] absolute_pathname + # The pathname of the file to get the group for. + # + # @param [PBXGroup] group + # The parent group used as the base of the relative path. + # + # @param [Bool] reflect_file_system_structure + # Whether group structure should reflect the file system structure. + # If yes, where needed, intermediate groups are created, similar to + # how mkdir -p operates. + # + # @param [Pathname] base_path + # The base path for the newly created group. If nil, the provided group's real_path is used. + # + # @return [PBXGroup] The appropriate group for the filepath. + # Can be PBXVariantGroup, if the file is localized. + # + def group_for_path_in_group(absolute_pathname, group, reflect_file_system_structure, base_path = nil) + unless absolute_pathname.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_pathname}" + end + unless base_path.nil? || base_path.absolute? + raise ArgumentError, "Paths must be absolute #{base_path}" + end + + relative_base = base_path.nil? ? group.real_path : base_path.realdirpath + relative_pathname = absolute_pathname.relative_path_from(relative_base) + relative_dir = relative_pathname.dirname + + # Add subgroups for directories, but treat .lproj as a file + if reflect_file_system_structure + path = relative_base + relative_dir.each_filename do |name| + break if name.to_s.downcase.include? '.lproj' + next if name == '.' + # Make sure groups have the correct absolute path set, as intermittent + # directories may not be included in the group structure + path += name + group = group.children.find { |c| c.display_name == name } || group.new_group(name, path) + end + end + + # Turn files inside .lproj directories into a variant group + if relative_dir.basename.to_s.downcase.include? '.lproj' + group_name = variant_group_name(absolute_pathname) + lproj_parent_dir = absolute_pathname.dirname.dirname + group = @variant_groups_by_path_and_name[[lproj_parent_dir, group_name]] ||= + group.new_variant_group(group_name, lproj_parent_dir) + end + + group + end + + # Returns the name to be used for a the variant group for a file at a given path. + # The path must be localized (within an *.lproj directory). + # + # @param [Pathname] path The localized path to get a variant group name for. + # + # @return [String] The variant group name. + # + def variant_group_name(path) + unless path.to_s.downcase.include?('.lproj/') + raise ArgumentError, 'Only localized resources can be added to variant groups.' + end + + # When using Base Internationalization for XIBs and Storyboards a strings + # file is generated with the same name as the XIB/Storyboard in each .lproj + # directory: + # Base.lproj/MyViewController.xib + # fr.lproj/MyViewController.strings + # + # In this scenario we want the variant group to be the same as the XIB or Storyboard. + # + # Base Internationalization: https://developer.apple.com/library/ios/documentation/MacOSX/Conceptual/BPInternational/InternationalizingYourUserInterface/InternationalizingYourUserInterface.html + if path.extname.downcase == '.strings' + %w(.xib .storyboard).each do |extension| + possible_interface_file = path.dirname.dirname + 'Base.lproj' + path.basename.sub_ext(extension) + return possible_interface_file.basename.to_s if possible_interface_file.exist? + end + end + + path.basename.to_s + end + + def new_subproject_file_reference(project_path, group) + if ref = reference_for_path(project_path) + return ref + end + + # We call into the private function `FileReferencesFactory.new_file_reference` instead of `FileReferencesFactory.new_reference` + # because it delegates into `FileReferencesFactory.new_subproject` which has the extra behavior of opening the Project which + # is an expensive operation for large projects. + # + ref = Xcodeproj::Project::FileReferencesFactory.send(:new_file_reference, group, project_path, :group) + ref.name = Pathname(project_path).basename('.*').to_s + ref.include_in_index = nil + + attribute = PBXProject.references_by_keys_attributes.find { |attrb| attrb.name == :project_references } + project_reference = ObjectDictionary.new(attribute, group.project.root_object) + project_reference[:project_ref] = ref + root_object.project_references << project_reference + refs_by_absolute_path[project_path.to_s] = ref + ref + end + + # Returns the parent group a new subproject reference should belong to. + # + def group_for_subproject_reference(development) + if pod_target_subproject + dependencies_group + else + development ? development_pods : pods + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver.rb new file mode 100644 index 0000000..35835aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver.rb @@ -0,0 +1,600 @@ +require 'molinillo' +require 'cocoapods/podfile' + +module Pod + class NoSpecFoundError < Informative + def exit_status + @exit_status ||= 31 + end + end + + # The resolver is responsible of generating a list of specifications grouped + # by target for a given Podfile. + # + class Resolver + require 'cocoapods/resolver/lazy_specification' + require 'cocoapods/resolver/resolver_specification' + + # @return [Sandbox] the Sandbox used by the resolver to find external + # dependencies. + # + attr_reader :sandbox + + # @return [Podfile] the Podfile used by the resolver. + # + attr_reader :podfile + + # @return [Array] the list of dependencies locked to a specific + # version. + # + attr_reader :locked_dependencies + + # @return [Array] The list of the sources which will be used for + # the resolution. + # + attr_reader :sources + + # @return [Bool] Whether the resolver has sources repositories up-to-date. + # + attr_reader :specs_updated + alias specs_updated? specs_updated + + # @return [Source::Manager] the manager to use for dependency resolution + # + attr_reader :sources_manager + + # Init a new Resolver + # + # @param [Sandbox] sandbox @see sandbox + # @param [Podfile] podfile @see podfile + # @param [Array] locked_dependencies @see locked_dependencies + # @param [Array, Source] sources @see sources + # @param [Boolean] specs_updated @see specs_updated + # @param [PodfileDependencyCache] podfile_dependency_cache the podfile dependency cache to use + # within this Resolver. + # + def initialize(sandbox, podfile, locked_dependencies, sources, specs_updated, + podfile_dependency_cache: Installer::Analyzer::PodfileDependencyCache.from_podfile(podfile), + sources_manager: Config.instance.sources_manager) + @sandbox = sandbox + @podfile = podfile + @locked_dependencies = locked_dependencies + @sources = Array(sources) + @specs_updated = specs_updated + @podfile_dependency_cache = podfile_dependency_cache + @sources_manager = sources_manager + @platforms_by_dependency = Hash.new { |h, k| h[k] = [] } + + @cached_sets = {} + @podfile_requirements_by_root_name = @podfile_dependency_cache.podfile_dependencies.group_by(&:root_name).each_value { |a| a.map!(&:requirement).freeze }.freeze + @search = {} + @validated_platforms = Set.new + end + + #-------------------------------------------------------------------------# + + public + + # @!group Resolution + + # Identifies the specifications that should be installed. + # + # @return [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications that need to be installed grouped by target + # definition. + # + def resolve + dependencies = @podfile_dependency_cache.target_definition_list.flat_map do |target| + @podfile_dependency_cache.target_definition_dependencies(target).each do |dep| + next unless target.platform + @platforms_by_dependency[dep].push(target.platform) + end + end.uniq + @platforms_by_dependency.each_value(&:uniq!) + @activated = Molinillo::Resolver.new(self, self).resolve(dependencies, locked_dependencies) + resolver_specs_by_target + rescue Molinillo::ResolverError => e + handle_resolver_error(e) + end + + # @return [Hash{Podfile::TargetDefinition => Array}] + # returns the resolved specifications grouped by target. + # + # @note The returned specifications can be subspecs. + # + def resolver_specs_by_target + @resolver_specs_by_target ||= {}.tap do |resolver_specs_by_target| + @podfile_dependency_cache.target_definition_list.each do |target| + next if target.abstract? && !target.platform + + # can't use vertex.root? since that considers _all_ targets + explicit_dependencies = @podfile_dependency_cache.target_definition_dependencies(target).map(&:name).to_set + + used_by_aggregate_target_by_spec_name = {} + used_vertices_by_spec_name = {} + + # it's safe to make a single pass here since we iterate in topological order, + # so all of the predecessors have been visited before we get to a node. + # #tsort returns no-children vertices first, and we want them last (i.e. we want no-parent vertices first) + @activated.tsort.reverse_each do |vertex| + spec_name = vertex.name + explicitly_included = explicit_dependencies.include?(spec_name) + if explicitly_included || vertex.incoming_edges.any? { |edge| used_vertices_by_spec_name.key?(edge.origin.name) && edge_is_valid_for_target_platform?(edge, target.platform) } + validate_platform(vertex.payload, target) + used_vertices_by_spec_name[spec_name] = vertex + used_by_aggregate_target_by_spec_name[spec_name] = vertex.payload.library_specification? && + (explicitly_included || vertex.predecessors.any? { |predecessor| used_by_aggregate_target_by_spec_name.fetch(predecessor.name, false) }) + end + end + + resolver_specs_by_target[target] = used_vertices_by_spec_name.each_value. + map do |vertex| + payload = vertex.payload + non_library = !used_by_aggregate_target_by_spec_name.fetch(vertex.name) + spec_source = payload.respond_to?(:spec_source) && payload.spec_source + ResolverSpecification.new(payload, non_library, spec_source) + end. + sort_by(&:name) + end + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Specification Provider + + include Molinillo::SpecificationProvider + + # Returns (and caches) the specification that satisfy the given dependency. + # + # @return [Array] the specifications that satisfy the given + # `dependency`. + # + # @param [Dependency] dependency the dependency that is being searched for. + # + def search_for(dependency) + @search[dependency] ||= begin + additional_requirements = if locked_requirement = requirement_for_locked_pod_named(dependency.name) + [locked_requirement] + else + Array(@podfile_requirements_by_root_name[dependency.root_name]) + end + + specifications_for_dependency(dependency, additional_requirements).freeze + end + end + + # Returns the dependencies of `specification`. + # + # @return [Array] all dependencies of `specification`. + # + # @param [Specification] specification the specification whose own + # dependencies are being asked for. + # + def dependencies_for(specification) + root_name = Specification.root_name(specification.name) + specification.all_dependencies.map do |dependency| + if dependency.root_name == root_name + dependency.dup.tap { |d| d.specific_version = specification.version } + else + dependency + end + end + end + + # Returns the name for the given `dependency`. + # + # @return [String] the name for the given `dependency`. + # + # @param [Dependency] dependency the dependency whose name is being + # queried. + # + def name_for(dependency) + dependency.name + end + + # @return [String] the user-facing name for a {Podfile}. + # + def name_for_explicit_dependency_source + 'Podfile' + end + + # @return [String] the user-facing name for a {Lockfile}. + # + def name_for_locking_dependency_source + 'Podfile.lock' + end + + # Determines whether the given `requirement` is satisfied by the given + # `spec`, in the context of the current `activated` dependency graph. + # + # @return [Boolean] whether `requirement` is satisfied by `spec` in the + # context of the current `activated` dependency graph. + # + # @param [Dependency] requirement the dependency in question. + # + # @param [Molinillo::DependencyGraph] activated the current dependency + # graph in the resolution process. + # + # @param [Specification] spec the specification in question. + # + def requirement_satisfied_by?(requirement, activated, spec) + version = spec.version + return false unless requirement.requirement.satisfied_by?(version) + return false unless valid_possibility_version_for_root_name?(requirement, activated, spec) + return false unless spec_is_platform_compatible?(activated, requirement, spec) + true + end + + def valid_possibility_version_for_root_name?(requirement, activated, spec) + return true if prerelease_requirement = requirement.prerelease? || requirement.external_source || !spec.version.prerelease? + + activated.each do |vertex| + next unless vertex.payload + next unless Specification.root_name(vertex.name) == requirement.root_name + + prerelease_requirement ||= vertex.requirements.any? { |r| r.prerelease? || r.external_source } + + if vertex.payload.respond_to?(:version) + return true if vertex.payload.version == spec.version + break + end + end + + prerelease_requirement + end + private :valid_possibility_version_for_root_name? + + # Sort dependencies so that the ones that are easiest to resolve are first. + # Easiest to resolve is (usually) defined by: + # 1) Is this dependency already activated? + # 2) How relaxed are the requirements? + # 3) Are there any conflicts for this dependency? + # 4) How many possibilities are there to satisfy this dependency? + # + # @return [Array] the sorted dependencies. + # + # @param [Array] dependencies the unsorted dependencies. + # + # @param [Molinillo::DependencyGraph] activated the dependency graph of + # currently activated specs. + # + # @param [{String => Array}] conflicts the current conflicts. + # + def sort_dependencies(dependencies, activated, conflicts) + dependencies.sort_by! do |dependency| + name = name_for(dependency) + [ + activated.vertex_named(name).payload ? 0 : 1, + dependency.external_source ? 0 : 1, + dependency.prerelease? ? 0 : 1, + conflicts[name] ? 0 : 1, + search_for(dependency).count, + ] + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Resolver UI + + include Molinillo::UI + + # The UI object the resolver should use for displaying user-facing output. + # + # @return [UserInterface] the normal CocoaPods UI object. + # + def output + UI + end + + # Called before resolution starts. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def before_resolution + end + + # Called after resolution ends. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def after_resolution + end + + # Called during resolution to indicate progress. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def indicate_progress + end + + #-------------------------------------------------------------------------# + + private + + # !@ Resolution context + + # @return [Hash Set>] A cache that keeps tracks of the sets + # loaded by the resolution process. + # + # @note Sets store the resolved dependencies and return the highest + # available specification found in the sources. This is done + # globally and not per target definition because there can be just + # one Pod installation, so different version of the same Pods for + # target definitions are not allowed. + # + attr_reader :cached_sets + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # Returns available specifications which satisfy requirements of given dependency + # and additional requirements. + # + # @param [Dependency] dependency + # The dependency whose requirements will be satisfied. + # + # @param [Array] additional_requirements + # List of additional requirements which should also be satisfied. + # + # @return [Array] List of specifications satisfying given requirements. + # + def specifications_for_dependency(dependency, additional_requirements = []) + requirement_list = dependency.requirement.as_list + additional_requirements.flat_map(&:as_list) + requirement_list.uniq! + requirement = Requirement.new(requirement_list) + find_cached_set(dependency). + all_specifications(warn_for_multiple_pod_sources, requirement). + map { |s| s.subspec_by_name(dependency.name, false, true) }. + compact + end + + # @return [Set] Loads or returns a previously initialized set for the Pod + # of the given dependency. + # + # @param [Dependency] dependency + # The dependency for which the set is needed. + # + # @return [Set] the cached set for a given dependency. + # + def find_cached_set(dependency) + name = dependency.root_name + cached_sets[name] ||= begin + if dependency.external_source + spec = sandbox.specification(name) + unless spec + raise StandardError, '[Bug] Unable to find the specification ' \ + "for `#{dependency}`." + end + set = Specification::Set::External.new(spec) + else + set = create_set_from_sources(dependency) + end + + unless set + raise Molinillo::NoSuchDependencyError.new(dependency) # rubocop:disable Style/RaiseArgs + end + + set + end + end + + # @return [Requirement, Nil] + # The {Requirement} that locks the dependency with name `name` in + # {#locked_dependencies}. + # + def requirement_for_locked_pod_named(name) + if vertex = locked_dependencies.vertex_named(name) + if dependency = vertex.payload + dependency.requirement + end + end + end + + # @return [Set] Creates a set for the Pod of the given dependency from the + # sources. The set will contain all versions from all sources that + # include the Pod. + # + # @param [Dependency] dependency + # The dependency for which the set is needed. + # + def create_set_from_sources(dependency) + aggregate_for_dependency(dependency).search(dependency) + end + + # @return [Source::Aggregate] The aggregate of the {#sources}. + # + def aggregate_for_dependency(dependency) + if dependency && dependency.podspec_repo + sources_manager.aggregate_for_dependency(dependency) + elsif (locked_vertex = @locked_dependencies.vertex_named(dependency.name)) && (locked_dependency = locked_vertex.payload) && locked_dependency.podspec_repo + sources_manager.aggregate_for_dependency(locked_dependency) + else + @aggregate ||= Source::Aggregate.new(sources) + end + end + + # Ensures that a specification is compatible with the platform of a target. + # + # @raise If the specification is not supported by the target. + # + # @return [void] + # + def validate_platform(spec, target) + return unless target_platform = target.platform + return unless @validated_platforms.add?([spec.object_id, target_platform]) + unless spec.available_platforms.any? { |p| target_platform.to_sym == p.to_sym } + raise Informative, "The platform of the target `#{target.name}` " \ + "(#{target.platform}) is not compatible with `#{spec}`, which does " \ + "not support `#{target.platform.string_name}`." + end + end + + # Handles errors that come out of a {Molinillo::Resolver}. + # + # @return [void] + # + # @param [Molinillo::ResolverError] error + # + def handle_resolver_error(error) + message = error.message + type = Informative + unless specs_updated? + specs_update_message = "\n * out-of-date source repos which you can update with `pod repo update` or with `pod install --repo-update`." + end + case error + when Molinillo::VersionConflict + message = error.message_with_trees( + :solver_name => 'CocoaPods', + :possibility_type => 'pod', + :version_for_spec => lambda(&:version), + :additional_message_for_conflict => lambda do |o, name, conflict| + local_pod_parent = conflict.requirement_trees.flatten.reverse.find(&:local?) + if local_pod_parent && !specifications_for_dependency(conflict.requirement).empty? && !conflict.possibility && conflict.locked_requirement + # Conflict was caused by a requirement from a local dependency. + # Tell user to use `pod update`. + o << "\n\nYou have either:#{specs_update_message}" \ + "\n * changed the constraints of dependency `#{name}` inside your development pod `#{local_pod_parent.name}`." \ + "\n You should run `pod update #{name}` to apply changes you've made." + elsif !conflict.possibility && conflict.locked_requirement && conflict.locked_requirement.external_source && conflict.locked_requirement.external_source[:podspec] && + conflict.requirement && conflict.requirement.external_source && conflict.requirement.external_source[:podspec] + # The internal version of the Podspec doesn't match the external definition of a podspec + o << "\nIt seems like you've changed the version of the dependency `#{name}` " \ + "and it differs from the version stored in `Pods/Local Podspecs`.\nYou should run `pod update #{name} --no-repo-update` to apply " \ + 'changes made locally.' + elsif (conflict.possibility && conflict.possibility.version.prerelease?) && + (conflict.requirement && !( + conflict.requirement.prerelease? || + conflict.requirement.external_source) + ) + # Conflict was caused by not specifying an explicit version for the requirement #[name], + # and there is no available stable version satisfying constraints for the requirement. + o << "\nThere are only pre-release versions available satisfying the following requirements:\n" + conflict.requirements.values.flatten.uniq.each do |r| + unless search_for(r).empty? + o << "\n\t'#{name}', '#{r.requirement}'\n" + end + end + o << "\nYou should explicitly specify the version in order to install a pre-release version" + elsif !conflict.existing + conflicts = conflict.requirements.values.flatten.uniq + found_conflicted_specs = conflicts.reject { |c| search_for(c).empty? } + if found_conflicted_specs.empty? + # There are no existing specification inside any of the spec repos with given requirements. + type = NoSpecFoundError + dependencies = conflicts.count == 1 ? 'dependency' : 'dependencies' + o << "\nNone of your spec sources contain a spec satisfying "\ + "the #{dependencies}: `#{conflicts.join(', ')}`." \ + "\n\nYou have either:#{specs_update_message}" \ + "\n * mistyped the name or version." \ + "\n * not added the source repo that hosts the Podspec to your Podfile." + + else + o << "\nSpecs satisfying the `#{conflicts.join(', ')}` dependency were found, " \ + 'but they required a higher minimum deployment target.' + end + end + end, + ) + when Molinillo::NoSuchDependencyError + message += <<-EOS + + +You have either:#{specs_update_message} + * mistyped the name or version. + * not added the source repo that hosts the Podspec to your Podfile. + EOS + end + raise type.new(message).tap { |e| e.set_backtrace(error.backtrace) } + end + + # Returns whether the given spec is platform-compatible with the dependency + # graph, taking into account the dependency that has required the spec. + # + # @param [Molinillo::DependencyGraph] dependency_graph + # + # @param [Dependency] dependency + # + # @param [Specification] spec + # + # @return [Bool] + # + def spec_is_platform_compatible?(dependency_graph, dependency, spec) + # This is safe since a pod will only be in locked dependencies if we're + # using the same exact version + return true if locked_dependencies.vertex_named(spec.name) + + vertex = dependency_graph.vertex_named(dependency.name) + predecessors = vertex.recursive_predecessors.select(&:root?) + predecessors << vertex if vertex.root? + platforms_to_satisfy = predecessors.flat_map(&:explicit_requirements).flat_map { |r| @platforms_by_dependency[r] }.uniq + + available_platforms = spec.available_platforms + + platforms_to_satisfy.all? do |platform_to_satisfy| + available_platforms.all? do |spec_platform| + next true unless spec_platform.name == platform_to_satisfy.name + # For non library specs all we care is to match by the platform name, not to satisfy the version. + next true if spec.non_library_specification? + platform_to_satisfy.supports?(spec_platform) + end + end + end + + class EdgeAndPlatform + def initialize(edge, target_platform) + @edge = edge + @target_platform = target_platform + end + attr_reader :edge, :target_platform + + def eql?(other) + edge.equal?(other.edge) && target_platform.eql?(other.target_platform) + end + + def hash + edge.object_id ^ target_platform.hash + end + end + private_constant :EdgeAndPlatform + + # Whether the given `edge` should be followed to find dependencies for the + # given `target_platform`. + # + # @return [Bool] + # + def edge_is_valid_for_target_platform?(edge, target_platform) + @edge_validity ||= Hash.new do |hash, edge_and_platform| + e = edge_and_platform.edge + platform = edge_and_platform.target_platform + requirement_name = e.requirement.name + + hash[edge_and_platform] = e.origin.payload.all_dependencies(platform).any? do |dep| + dep.name == requirement_name + end + end + + @edge_validity[EdgeAndPlatform.new(edge, target_platform)] + end + + # @return [Boolean] whether to emit a warning when a pod is found in multiple sources + # + def warn_for_multiple_pod_sources + podfile.installation_options.warn_for_multiple_pod_sources + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/lazy_specification.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/lazy_specification.rb new file mode 100644 index 0000000..474e6a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/lazy_specification.rb @@ -0,0 +1,88 @@ +require 'delegate' +module Pod + class Specification + class Set + class SpecWithSource < DelegateClass(Specification) + attr_reader :spec_source + def initialize(spec, source) + super(spec) + @spec_source = source + end + + undef is_a? + end + + class LazySpecification < DelegateClass(Specification) + attr_reader :name, :version, :spec_source + + def initialize(name, version, spec_source) + @name = name + @version = version + @spec_source = spec_source + end + + def subspec_by_name(name = nil, raise_if_missing = true, include_non_library_specifications = false) + subspec = + if !name || name == self.name + self + else + specification.subspec_by_name(name, raise_if_missing, include_non_library_specifications) + end + return unless subspec + + SpecWithSource.new subspec, spec_source + end + + def specification + @specification ||= spec_source.specification(name, version.version) + end + alias __getobj__ specification + + undef is_a? + end + + class External + def all_specifications(_warn_for_multiple_pod_sources, requirement) + if requirement.satisfied_by? specification.version + [specification] + else + [] + end + end + end + + # returns the highest versioned spec last + def all_specifications(warn_for_multiple_pod_sources, requirement) + @all_specifications ||= {} + @all_specifications[requirement] ||= begin + sources_by_version = {} + versions_by_source.each do |source, versions| + versions.each do |v| + next unless requirement.satisfied_by?(v) + + (sources_by_version[v] ||= []) << source + end + end + + if warn_for_multiple_pod_sources + duplicate_versions = sources_by_version.select { |_version, sources| sources.count > 1 } + + duplicate_versions.each do |version, sources| + UI.warn "Found multiple specifications for `#{name} (#{version})`:\n" + + sources. + map { |s| s.specification_path(name, version) }. + map { |v| "- #{v}" }.join("\n") + end + end + + # sort versions from high to low + sources_by_version.sort_by(&:first).flat_map do |version, sources| + # within each version, we want the prefered (first-specified) source + # to be the _last_ one + sources.reverse_each.map { |source| LazySpecification.new(name, version, source) } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/resolver_specification.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/resolver_specification.rb new file mode 100644 index 0000000..49d4eda --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/resolver/resolver_specification.rb @@ -0,0 +1,41 @@ +module Pod + class Resolver + # A small container that wraps a resolved specification for a given target definition. Additional metadata + # is included here such as if the specification is only used by tests. + # + class ResolverSpecification + # @return [Specification] the specification that was resolved + # + attr_reader :spec + + # @return [Source] the spec repo source the specification came from + # + attr_reader :source + + # @return [Bool] whether this resolved specification is used by non-library targets. + # + attr_reader :used_by_non_library_targets_only + alias used_by_non_library_targets_only? used_by_non_library_targets_only + + def initialize(spec, used_by_non_library_targets_only, source) + @spec = spec + @used_by_non_library_targets_only = used_by_non_library_targets_only + @source = source + end + + def name + spec.name + end + + def root + spec.root + end + + def ==(other) + self.class == other.class && + spec == other.spec && + used_by_non_library_targets_only? == other.used_by_non_library_targets_only? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox.rb new file mode 100644 index 0000000..d7030a1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox.rb @@ -0,0 +1,470 @@ +require 'fileutils' + +module Pod + # The sandbox provides support for the directory that CocoaPods uses for an + # installation. In this directory the Pods projects, the support files and + # the sources of the Pods are stored. + # + # CocoaPods assumes to have control of the sandbox. + # + # Once completed the sandbox will have the following file structure: + # + # Pods + # | + # +-- Headers + # | +-- Private + # | | +-- [Pod Name] + # | +-- Public + # | +-- [Pod Name] + # | + # +-- Local Podspecs + # | +-- External Sources + # | +-- Normal Sources + # | + # +-- Target Support Files + # | +-- [Target Name] + # | +-- Pods-acknowledgements.markdown + # | +-- Pods-acknowledgements.plist + # | +-- Pods-dummy.m + # | +-- Pods-prefix.pch + # | +-- Pods.xcconfig + # | + # +-- [Pod Name] + # | + # +-- Manifest.lock + # | + # +-- Pods.xcodeproj + # (if installation option 'generate_multiple_pod_projects' is enabled) + # | + # +-- PodTarget1.xcodeproj + # | + # ... + # | + # +-- PodTargetN.xcodeproj + # + # + class Sandbox + autoload :FileAccessor, 'cocoapods/sandbox/file_accessor' + autoload :HeadersStore, 'cocoapods/sandbox/headers_store' + autoload :PathList, 'cocoapods/sandbox/path_list' + autoload :PodDirCleaner, 'cocoapods/sandbox/pod_dir_cleaner' + autoload :PodspecFinder, 'cocoapods/sandbox/podspec_finder' + + # @return [Pathname] the root of the sandbox. + # + attr_reader :root + + # @return [HeadersStore] the header directory for the user targets. + # + attr_reader :public_headers + + # Initialize a new instance + # + # @param [String, Pathname] root @see #root + # + def initialize(root) + FileUtils.mkdir_p(root) + @root = Pathname.new(root).realpath + @public_headers = HeadersStore.new(self, 'Public', :public) + @predownloaded_pods = [] + @checkout_sources = {} + @development_pods = {} + @pods_with_absolute_path = [] + @stored_podspecs = {} + end + + # @return [Lockfile] the manifest which contains the information about the + # installed pods or `nil` if one is not present. + # + def manifest + @manifest ||= begin + Lockfile.from_file(manifest_path) if manifest_path.exist? + end + end + + # Removes the files of the Pod with the given name from the sandbox. + # + # @return [void] + # + def clean_pod(name) + root_name = Specification.root_name(name) + unless local?(root_name) + path = pod_dir(name) + path.rmtree if path.exist? + end + podspec_path = specification_path(name) + podspec_path.rmtree if podspec_path + pod_target_project_path = pod_target_project_path(name) + pod_target_project_path.rmtree if pod_target_project_path.exist? + end + + # Prepares the sandbox for a new installation removing any file that will + # be regenerated and ensuring that the directories exists. + # + def prepare + FileUtils.mkdir_p(headers_root) + FileUtils.mkdir_p(sources_root) + FileUtils.mkdir_p(specifications_root) + FileUtils.mkdir_p(target_support_files_root) + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class}> with root #{root}" + end + + #-------------------------------------------------------------------------# + + public + + # @!group Paths + + # @return [Pathname] the path of the manifest. + # + def manifest_path + root + 'Manifest.lock' + end + + # @return [Pathname] the path of the Pods project. + # + def project_path + root + 'Pods.xcodeproj' + end + + # @return [Pathname] the path of the installation cache. + # + def project_installation_cache_path + root.join('.project_cache', 'installation_cache.yaml') + end + + # @return [Pathname] the path of the metadata cache. + # + def project_metadata_cache_path + root.join('.project_cache', 'metadata_cache.yaml') + end + + # @return [Pathname] the path of the version cache. + # + def project_version_cache_path + root.join('.project_cache', 'version') + end + + # @param [String] pod_target_name + # Name of the pod target used to generate the path of its Xcode project. + # + # @return [Pathname] the path of the project for a pod target. + # + def pod_target_project_path(pod_target_name) + root + "#{pod_target_name}.xcodeproj" + end + + # Returns the path for the directory where the support files of + # a target are stored. + # + # @param [String] name + # The name of the target. + # + # @return [Pathname] the path of the support files. + # + def target_support_files_dir(name) + target_support_files_root + name + end + + # Returns the path where the Pod with the given name is stored, taking into + # account whether the Pod is locally sourced. + # + # @param [String] name + # The name of the Pod. + # + # @return [Pathname] the path of the Pod. + # + def pod_dir(name) + root_name = Specification.root_name(name) + if local?(root_name) + Pathname.new(development_pods[root_name].dirname) + else + sources_root + root_name + end + end + + # Returns true if the path as originally specified was absolute. + # + # @param [String] name + # + # @return [Bool] true if originally absolute + # + def local_path_was_absolute?(name) + @pods_with_absolute_path.include? name + end + + # @return [Pathname] The directory where headers are stored. + # + def headers_root + root + 'Headers' + end + + # @return [Pathname] The directory where the downloaded sources of + # the Pods are stored. + # + def sources_root + root + end + + # @return [Pathname] the path for the directory where the + # specifications are stored. + # + def specifications_root + root + 'Local Podspecs' + end + + # @return [Pathname] The directory where the files generated by + # CocoaPods to support the umbrella targets are stored. + # + def target_support_files_root + root + 'Target Support Files' + end + + #-------------------------------------------------------------------------# + + public + + # @!group Specification store + + # Returns the specification for the Pod with the given name. + # + # @param [String] name + # the name of the Pod for which the specification is requested. + # + # @return [Specification] the specification if the file is found. + # + def specification(name) + @stored_podspecs[name] ||= if file = specification_path(name) + original_path = development_pods[name] + Specification.from_file(original_path || file) + end + end + + # Returns the path of the specification for the Pod with the + # given name, if one is stored. + # + # @param [String] name + # the name of the Pod for which the podspec file is requested. + # + # @return [Pathname] the path or nil. + # @return [Nil] if the podspec is not stored. + # + def specification_path(name) + name = Specification.root_name(name) + path = specifications_root + "#{name}.podspec" + if path.exist? + path + else + path = specifications_root + "#{name}.podspec.json" + if path.exist? + path + end + end + end + + # Stores a specification in the `Local Podspecs` folder. + # + # @param [String] name + # the name of the pod + # + # @param [String, Pathname, Specification] podspec + # The contents of the specification (String) or the path to a + # podspec file (Pathname). + # + # @return [void] + # + # + def store_podspec(name, podspec, _external_source = false, json = false) + file_name = json ? "#{name}.podspec.json" : "#{name}.podspec" + output_path = specifications_root + file_name + + spec = + case podspec + when String + Sandbox.update_changed_file(output_path, podspec) + Specification.from_file(output_path) + when Pathname + unless podspec.exist? + raise Informative, "No podspec found for `#{name}` in #{podspec}" + end + FileUtils.copy(podspec, output_path) + Specification.from_file(podspec) + when Specification + raise ArgumentError, 'can only store Specification objects as json' unless json + Sandbox.update_changed_file(output_path, podspec.to_pretty_json) + podspec.dup + else + raise ArgumentError, "Unknown type for podspec: #{podspec.inspect}" + end + + # we force the file to be the file in the sandbox, so specs that have been serialized to + # json maintain a consistent checksum. + # this is safe to do because `spec` is always a clean instance + spec.defined_in_file = output_path + + unless spec.name == name + raise Informative, "The name of the given podspec `#{spec.name}` doesn't match the expected one `#{name}`" + end + @stored_podspecs[spec.name] = spec + end + + #-------------------------------------------------------------------------# + + public + + # @!group Pods information + + # Marks a Pod as pre-downloaded + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def store_pre_downloaded_pod(name) + root_name = Specification.root_name(name) + predownloaded_pods << root_name + end + + # @return [Array] The names of the pods that have been + # pre-downloaded from an external source. + # + attr_reader :predownloaded_pods + + # Checks if a Pod has been pre-downloaded by the resolver in order to fetch + # the podspec. + # + # @param [String] name + # The name of the Pod. + # + # @return [Bool] Whether the Pod has been pre-downloaded. + # + def predownloaded?(name) + root_name = Specification.root_name(name) + predownloaded_pods.include?(root_name) + end + + #--------------------------------------# + + # Stores the local path of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @param [Hash] source + # The hash which contains the options as returned by the + # downloader. + # + # @return [void] + # + def store_checkout_source(name, source) + root_name = Specification.root_name(name) + checkout_sources[root_name] = source + end + + # Removes the checkout source of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def remove_checkout_source(name) + root_name = Specification.root_name(name) + checkout_sources.delete(root_name) + end + + # Removes local podspec a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def remove_local_podspec(name) + local_podspec = specification_path(name) + FileUtils.rm(local_podspec) if local_podspec + end + + # @return [Hash{String=>Hash}] The options necessary to recreate the exact + # checkout of a given Pod grouped by its name. + # + attr_reader :checkout_sources + + #--------------------------------------# + + # Stores the local path of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @param [Pathname, String] path + # The path to the local Podspec + # + # @param [Bool] was_absolute + # True if the specified local path was absolute. + # + # @return [void] + # + def store_local_path(name, path, was_absolute = false) + root_name = Specification.root_name(name) + path = Pathname.new(path) unless path.is_a?(Pathname) + development_pods[root_name] = path + @pods_with_absolute_path << root_name if was_absolute + end + + # @return [Hash{String=>Pathname}] The path of the Pods' podspecs with a local source + # grouped by their root name. + # + attr_reader :development_pods + + # Checks if a Pod is locally sourced? + # + # @param [String] name + # The name of the Pod. + # + # @return [Bool] Whether the Pod is locally sourced. + # + def local?(name) + !local_podspec(name).nil? + end + + # @param [String] name + # The name of a locally specified Pod + # + # @return [Pathname] Path to the local Podspec of the Pod + # + def local_podspec(name) + root_name = Specification.root_name(name) + development_pods[root_name] + end + + # @!group Convenience Methods + + # Writes a file if it does not exist or if its contents have changed. + # + # @param [Pathname] path + # The path to read from and write to. + # + # @param [String] contents + # The contents to write if they do not match or the file does not exist. + # + # @return [void] + # + def self.update_changed_file(path, contents) + if path.exist? + content_stream = StringIO.new(contents) + identical = File.open(path, 'rb') { |f| FileUtils.compare_stream(f, content_stream) } + return if identical + end + File.open(path, 'w') { |f| f.write(contents) } + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/file_accessor.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/file_accessor.rb new file mode 100644 index 0000000..b7771a1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/file_accessor.rb @@ -0,0 +1,523 @@ +require 'cocoapods/xcode/linkage_analyzer' + +module Pod + class Sandbox + # Resolves the file patterns of a specification against its root directory, + # taking into account any exclude pattern and the default extensions to use + # for directories. + # + # @note The FileAccessor always returns absolute paths. + # + class FileAccessor + HEADER_EXTENSIONS = Xcodeproj::Constants::HEADER_FILES_EXTENSIONS + SOURCE_FILE_EXTENSIONS = (%w(.m .mm .i .c .cc .cxx .cpp .c++ .swift) + HEADER_EXTENSIONS).uniq.freeze + + GLOB_PATTERNS = { + :readme => 'readme{*,.*}'.freeze, + :license => 'licen{c,s}e{*,.*}'.freeze, + :source_files => "*{#{SOURCE_FILE_EXTENSIONS.join(',')}}".freeze, + :public_header_files => "*{#{HEADER_EXTENSIONS.join(',')}}".freeze, + :podspecs => '*.{podspec,podspec.json}'.freeze, + :docs => 'doc{s}{*,.*}/**/*'.freeze, + }.freeze + + # @return [Sandbox::PathList] the directory where the source of the Pod + # is located. + # + attr_reader :path_list + + # @return [Specification::Consumer] the consumer of the specification for + # which the file patterns should be resolved. + # + attr_reader :spec_consumer + + # Initialize a new instance + # + # @param [Sandbox::PathList, Pathname] path_list @see #path_list + # @param [Specification::Consumer] spec_consumer @see #spec_consumer + # + def initialize(path_list, spec_consumer) + if path_list.is_a?(PathList) + @path_list = path_list + else + @path_list = PathList.new(path_list) + end + @spec_consumer = spec_consumer + + unless @spec_consumer + raise Informative, 'Attempt to initialize File Accessor without a specification consumer.' + end + end + + # @return [Pathname] the directory which contains the files of the Pod. + # + def root + path_list.root if path_list + end + + # @return [Specification] the specification. + # + def spec + spec_consumer.spec + end + + # @return [Specification] the platform used to consume the specification. + # + def platform_name + spec_consumer.platform_name + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<#{self.class} spec=#{spec.name} platform=#{platform_name} root=#{root}>" + end + + #-----------------------------------------------------------------------# + + public + + # @!group Paths + + # @return [Array] the source files of the specification. + # + def source_files + paths_for_attribute(:source_files) + end + + # @return [Array] the source files of the specification that + # use ARC. + # + def arc_source_files + case spec_consumer.requires_arc + when TrueClass + source_files + when FalseClass + [] + else + paths_for_attribute(:requires_arc) & source_files + end + end + + # @return [Array] the source files of the specification that + # do not use ARC. + # + def non_arc_source_files + source_files - arc_source_files + end + + # @return [Array] the headers of the specification. + # + def headers + extensions = HEADER_EXTENSIONS + source_files.select { |f| extensions.include?(f.extname) } + end + + # @param [Boolean] include_frameworks + # Whether or not to include the headers of the vendored frameworks. + # Defaults to not include them. + # + # @return [Array] the public headers of the specification. + # + def public_headers(include_frameworks = false) + public_headers = public_header_files + project_headers = project_header_files + private_headers = private_header_files + if public_headers.nil? || public_headers.empty? + header_files = headers + else + header_files = public_headers + end + header_files += vendored_frameworks_headers if include_frameworks + header_files - project_headers - private_headers + end + + # @return [Array] The project headers of the specification. + # + def project_headers + project_header_files + end + + # @return [Array] The private headers of the specification. + # + def private_headers + private_header_files + end + + # @return [Array] the resources of the specification. + # + def resources + paths_for_attribute(:resources, true) + end + + # @return [Array] the files of the specification to preserve. + # + def preserve_paths + paths_for_attribute(:preserve_paths, true) + end + + # @return [Array] The paths of the framework bundles that come + # shipped with the Pod. + # + def vendored_frameworks + paths_for_attribute(:vendored_frameworks, true) + end + + # @return [Array] The paths of the dynamic framework bundles + # that come shipped with the Pod. + # + def vendored_dynamic_frameworks + (vendored_frameworks - vendored_xcframeworks).select do |framework| + Xcode::LinkageAnalyzer.dynamic_binary?(framework + framework.basename('.*')) + end + end + + # @return [Array] The paths of the dynamic xcframework bundles + # that come shipped with the Pod. + # + def vendored_static_xcframeworks + vendored_xcframeworks.select do |path| + Xcode::XCFramework.new(spec.name, path).build_type == BuildType.static_framework + end + end + + # @return [Array] The paths of the static (fake) framework + # bundles that come shipped with the Pod. + # + def vendored_static_frameworks + vendored_frameworks - vendored_dynamic_frameworks - vendored_xcframeworks + end + + # @return [Array] The paths of vendored .xcframework bundles + # that come shipped with the Pod. + # + def vendored_xcframeworks + vendored_frameworks.select do |framework| + File.extname(framework) == '.xcframework' + end + end + + # @param [Array] file_accessors + # The list of all file accessors to compute. + # + # @return [Array] The list of all file accessors that a target will integrate into the project. + # + def self.all_files(file_accessors) + files = [ + file_accessors.map(&:vendored_frameworks), + file_accessors.map(&:vendored_libraries), + file_accessors.map(&:resource_bundle_files), + file_accessors.map(&:license), + file_accessors.map(&:prefix_header), + file_accessors.map(&:preserve_paths), + file_accessors.map(&:readme), + file_accessors.map(&:resources), + file_accessors.map(&:on_demand_resources_files), + file_accessors.map(&:source_files), + file_accessors.map(&:module_map), + ] + files.flatten.compact.uniq + end + + # @param [Pathname] framework + # The vendored framework to search into. + # @return [Pathname] The path of the header directory of the + # vendored framework. + # + def self.vendored_frameworks_headers_dir(framework) + dir = framework + 'Headers' + dir.directory? ? dir.realpath : dir + end + + # @param [Pathname] framework + # The vendored framework to search into. + # @return [Array] The paths of the headers included in the + # vendored framework. + # + def self.vendored_frameworks_headers(framework) + headers_dir = vendored_frameworks_headers_dir(framework) + Pathname.glob(headers_dir + '**/' + GLOB_PATTERNS[:public_header_files]) + end + + # @param [String] target_name + # The target name this .xcframework belongs to + # + # @param [Pathname] framework_path + # The path to the .xcframework + # + # @return [Array] The paths to all the headers included in the + # vendored xcframework + # + def self.vendored_xcframework_headers(target_name, framework_path) + xcframework = Xcode::XCFramework.new(target_name, framework_path) + xcframework.slices.flat_map do |slice| + vendored_frameworks_headers(slice.path) + end + end + + # @return [Array] The paths of the framework headers that come + # shipped with the Pod. + # + def vendored_frameworks_headers + paths = (vendored_frameworks - vendored_xcframeworks).flat_map do |framework| + self.class.vendored_frameworks_headers(framework) + end.uniq + paths.concat Array.new(vendored_xcframeworks.flat_map do |framework| + self.class.vendored_xcframework_headers(spec.name, framework) + end) + paths + end + + # @return [Array] The paths of the library bundles that come + # shipped with the Pod. + # + def vendored_libraries + paths_for_attribute(:vendored_libraries) + end + + # @return [Array] The paths of the dynamic libraries + # that come shipped with the Pod. + # + def vendored_dynamic_libraries + vendored_libraries.select do |library| + Xcode::LinkageAnalyzer.dynamic_binary?(library) + end + end + + # @return [Array] The paths of the static libraries + # that come shipped with the Pod. + # + def vendored_static_libraries + vendored_libraries - vendored_dynamic_libraries + end + + # @return [Array] The paths of the dynamic binary artifacts + # that come shipped with the Pod. + # + def vendored_dynamic_artifacts + vendored_dynamic_libraries + vendored_dynamic_frameworks + end + + # @return [Array] The paths of the static binary artifacts + # that come shipped with the Pod. + # + def vendored_static_artifacts + vendored_static_libraries + vendored_static_frameworks + vendored_static_xcframeworks + end + + # @return [Hash{String => Array}] A hash that describes the + # resource bundles of the Pod. The keys represent the name of + # the bundle while the values the path of the resources. + # + def resource_bundles + result = {} + spec_consumer.resource_bundles.each do |name, file_patterns| + paths = expanded_paths(file_patterns, + :exclude_patterns => spec_consumer.exclude_files, + :include_dirs => true) + result[name] = paths + end + result + end + + # @return [Array] The paths of the files which should be + # included in resources bundles by the Pod. + # + def resource_bundle_files + resource_bundles.values.flatten + end + + # @return [Hash{String => Hash] The expanded paths of the on demand resources specified + # keyed by their tag including their category. + # + def on_demand_resources + result = {} + spec_consumer.on_demand_resources.each do |tag_name, file_patterns| + paths = expanded_paths(file_patterns[:paths], + :exclude_patterns => spec_consumer.exclude_files, + :include_dirs => true) + result[tag_name] = { :paths => paths, :category => file_patterns[:category] } + end + result + end + + # @return [Array] The expanded paths of the on demand resources. + # + def on_demand_resources_files + on_demand_resources.values.flat_map { |v| v[:paths] } + end + + # @return [Pathname] The of the prefix header file of the specification. + # + def prefix_header + if file = spec_consumer.prefix_header_file + path_list.root + file + end + end + + # @return [Pathname, nil] The path of the auto-detected README file. + # + def readme + path_list.glob([GLOB_PATTERNS[:readme]]).first + end + + # @return [Pathname] The path of the license file as indicated in the + # specification or auto-detected. + # + def license + spec_license || path_list.glob([GLOB_PATTERNS[:license]]).first + end + + # @return [Pathname, Nil] The path of the custom module map file of the + # specification, if specified. + def module_map + if module_map = spec_consumer.module_map + path_list.root + module_map + end + end + + # @return [Array] The paths of auto-detected podspecs + # + def specs + path_list.glob([GLOB_PATTERNS[:podspecs]]) + end + + # @return [Array] The paths of auto-detected docs + # + def docs + path_list.glob([GLOB_PATTERNS[:docs]]) + end + + # @return [Pathname] The path of the license file specified in the + # specification, if it exists + # + def spec_license + if file = spec_consumer.license[:file] + absolute_path = root + file + absolute_path if File.exist?(absolute_path) + end + end + + # @return [Array] Paths to include for local pods to assist in development + # + def developer_files + podspecs = specs + result = [module_map, prefix_header] + + if license_path = spec_consumer.license[:file] + license_path = root + license_path + unless File.exist?(license_path) + UI.warn "A license was specified in podspec `#{spec.name}` but the file does not exist - #{license_path}" + end + end + + if podspecs.size <= 1 + result += [license, readme, podspecs, docs] + else + # Manually add non-globbing files since there are multiple podspecs in the same folder + result << podspec_file + if license_file = spec_license + absolute_path = root + license_file + result << absolute_path if File.exist?(absolute_path) + end + end + result.compact.flatten.sort + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private paths + + # @return [Array] The paths of the user-specified public header + # files. + # + def public_header_files + paths_for_attribute(:public_header_files) + end + + # @return [Array] The paths of the user-specified project header + # files. + # + def project_header_files + paths_for_attribute(:project_header_files) + end + + # @return [Array] The paths of the user-specified private header + # files. + # + def private_header_files + paths_for_attribute(:private_header_files) + end + + # @return [Pathname] The path of the podspec matching @spec + # + def podspec_file + specs.lazy.select { |p| File.basename(p.to_s, '.*') == spec.name }.first + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private helpers + + # Returns the list of the paths founds in the file system for the + # attribute with given name. It takes into account any dir pattern and + # any file excluded in the specification. + # + # @param [Symbol] attribute + # the name of the attribute. + # + # @return [Array] the paths. + # + def paths_for_attribute(attribute, include_dirs = false) + file_patterns = spec_consumer.send(attribute) + options = { + :exclude_patterns => spec_consumer.exclude_files, + :dir_pattern => GLOB_PATTERNS[attribute], + :include_dirs => include_dirs, + } + expanded_paths(file_patterns, options) + end + + # Matches the given patterns to the file present in the root of the path + # list. + # + # @param [Array] patterns + # The patterns to expand. + # + # @param [Hash] options + # The options to use to expand the patterns to file paths. + # + # @option options [String] :dir_pattern + # The pattern to add to directories. + # + # @option options [Array] :exclude_patterns + # The exclude patterns to pass to the PathList. + # + # @option options [Bool] :include_dirs + # Whether directories should be also included or just plain + # files. + # + # @raise [Informative] If the pod does not exists. + # + # @return [Array] A list of the paths. + # + def expanded_paths(patterns, options = {}) + return [] if patterns.empty? + path_list.glob(patterns, options).flatten.compact.uniq + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/headers_store.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/headers_store.rb new file mode 100644 index 0000000..9442419 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/headers_store.rb @@ -0,0 +1,163 @@ +module Pod + class Sandbox + # Provides support for managing a header directory. It also keeps track of + # the header search paths. + # + class HeadersStore + SEARCH_PATHS_KEY = Struct.new(:platform_name, :target_name, :use_modular_headers) + + # @return [Pathname] the absolute path of this header directory. + # + def root + sandbox.headers_root + @relative_path + end + + # @return [Sandbox] the sandbox where this header directory is stored. + # + attr_reader :sandbox + + # @param [Sandbox] @see #sandbox + # + # @param [String] relative_path + # the relative path to the sandbox root and hence to the Pods + # project. + # + # @param [Symbol] visibility_scope + # the header visibility scope to use in this store. Can be `:private` or `:public`. + # + def initialize(sandbox, relative_path, visibility_scope) + @sandbox = sandbox + @relative_path = relative_path + @search_paths = [] + @search_paths_cache = {} + @visibility_scope = visibility_scope + end + + # @param [Platform] platform + # the platform for which the header search paths should be + # returned. + # + # @param [String] target_name + # the target for which the header search paths should be + # returned. Can be `nil` in which case all headers that match the platform + # will be returned. + # + # @param [Boolean] use_modular_headers + # whether the search paths generated should use modular (stricter) style. + # + # @return [Array] All the search paths of the header directory in + # xcconfig format. The paths are specified relative to the pods + # root with the `${PODS_ROOT}` variable. + # + def search_paths(platform, target_name = nil, use_modular_headers = false) + key = SEARCH_PATHS_KEY.new(platform.name, target_name, use_modular_headers) + if (cached = @search_paths_cache[key]) + return cached + end + search_paths = @search_paths.select do |entry| + matches_platform = entry[:platform] == platform.name + matches_target = target_name.nil? || (File.basename(entry[:path]) == target_name) + matches_platform && matches_target + end + headers_dir = root.relative_path_from(sandbox.root).dirname + @search_paths_cache[key] = search_paths.flat_map do |entry| + paths = [] + paths << "${PODS_ROOT}/#{headers_dir}/#{@relative_path}" if !use_modular_headers || @visibility_scope == :public + paths << "${PODS_ROOT}/#{headers_dir}/#{entry[:path]}" if !use_modular_headers || @visibility_scope == :private + paths + end.tap(&:uniq!).freeze + end + + # Removes the entire root directory. + # + # @return [void] + # + def implode! + root.rmtree if root.exist? + end + + # Removes the directory at the given path relative to the root. + # + # @param [Pathname] path + # The path used to join with #root and remove. + # + # @return [void] + # + def implode_path!(path) + path = root.join(path) + path.rmtree if path.exist? + end + + #-----------------------------------------------------------------------# + + public + + # @!group Adding headers + + # Adds headers to the directory. + # + # @param [Pathname] namespace + # the path where the header file should be stored relative to the + # headers directory. + # + # @param [Array] relative_header_paths + # the path of the header file relative to the Pods project + # (`PODS_ROOT` variable of the xcconfigs). + # + # @note This method does _not_ add the files to the search paths. + # + # @return [Array] + # + def add_files(namespace, relative_header_paths) + root.join(namespace).mkpath unless relative_header_paths.empty? + relative_header_paths.map do |relative_header_path| + add_file(namespace, relative_header_path, :mkdir => false) + end + end + + # Adds a header to the directory. + # + # @param [Pathname] namespace + # the path where the header file should be stored relative to the + # headers directory. + # + # @param [Pathname] relative_header_path + # the path of the header file relative to the Pods project + # (`PODS_ROOT` variable of the xcconfigs). + # + # @note This method does _not_ add the file to the search paths. + # + # @return [Pathname] + # + def add_file(namespace, relative_header_path, mkdir: true) + namespaced_path = root + namespace + namespaced_path.mkpath if mkdir + + absolute_source = (sandbox.root + relative_header_path) + source = absolute_source.relative_path_from(namespaced_path) + if Gem.win_platform? + FileUtils.ln(absolute_source, namespaced_path, :force => true) + else + FileUtils.ln_sf(source, namespaced_path) + end + namespaced_path + relative_header_path.basename + end + + # Adds an header search path to the sandbox. + # + # @param [Pathname] path + # the path to add. + # + # @param [String] platform + # the platform the search path applies to + # + # @return [void] + # + def add_search_path(path, platform) + @search_paths << { :platform => platform.name, :path => File.join(@relative_path, path) } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/path_list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/path_list.rb new file mode 100644 index 0000000..33a3257 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/path_list.rb @@ -0,0 +1,242 @@ +require 'active_support/multibyte/unicode' +require 'find' + +module Pod + class Sandbox + # The PathList class is designed to perform multiple glob matches against + # a given directory. Basically, it generates a list of all the children + # paths and matches the globs patterns against them, resulting in just one + # access to the file system. + # + # @note A PathList once it has generated the list of the paths this is + # updated only if explicitly requested by calling + # {#read_file_system} + # + class PathList + # @return [Pathname] The root of the list whose files and directories + # are used to perform the matching operations. + # + attr_reader :root + + # Initialize a new instance + # + # @param [Pathname] root @see #root + # + def initialize(root) + root_dir = root.to_s.unicode_normalize(:nfkc) + @root = Pathname.new(root_dir) + @glob_cache = {} + end + + # @return [Array] The list of absolute the path of all the files + # contained in {root}. + # + def files + read_file_system unless @files + @files + end + + # @return [Array] The list of absolute the path of all the + # directories contained in {root}. + # + def dirs + read_file_system unless @dirs + @dirs + end + + # @return [void] Reads the file system and populates the files and paths + # lists. + # + def read_file_system + unless root.exist? + raise Informative, "Attempt to read non existent folder `#{root}`." + end + dirs = [] + files = [] + root_length = root.cleanpath.to_s.length + File::SEPARATOR.length + escaped_root = escape_path_for_glob(root) + Dir.glob(escaped_root + '**/*', File::FNM_DOTMATCH).each do |f| + directory = File.directory?(f) + # Ignore `.` and `..` directories + next if directory && f =~ /\.\.?$/ + + f = f.slice(root_length, f.length - root_length) + next if f.nil? + + (directory ? dirs : files) << f + end + + dirs.sort_by!(&:upcase) + files.sort_by!(&:upcase) + + @dirs = dirs + @files = files + @glob_cache = {} + end + + #-----------------------------------------------------------------------# + + public + + # @!group Globbing + + # Similar to {glob} but returns the absolute paths. + # + # @param [String,Array] patterns + # @see #relative_glob + # + # @param [Hash] options + # @see #relative_glob + # + # @return [Array] + # + def glob(patterns, options = {}) + cache_key = options.merge(:patterns => patterns) + @glob_cache[cache_key] ||= relative_glob(patterns, options).map { |p| root.join(p) } + end + + # The list of relative paths that are case insensitively matched by a + # given pattern. This method emulates {Dir#glob} with the + # {File::FNM_CASEFOLD} option. + # + # @param [String,Array] patterns + # A single {Dir#glob} like pattern, or a list of patterns. + # + # @param [Hash] options + # + # @option options [String] :dir_pattern + # An optional pattern to append to a pattern, if it is the path + # to a directory. + # + # @option options [Array] :exclude_patterns + # Exclude specific paths given by those patterns. + # + # @option options [Array] :include_dirs + # Additional paths to take into account for matching. + # + # @return [Array] + # + def relative_glob(patterns, options = {}) + return [] if patterns.empty? + + dir_pattern = options[:dir_pattern] + exclude_patterns = options[:exclude_patterns] + include_dirs = options[:include_dirs] + + if include_dirs + full_list = files + dirs + else + full_list = files + end + patterns_array = Array(patterns) + exact_matches = (full_list & patterns_array).to_set + + unless patterns_array.empty? + list = patterns_array.flat_map do |pattern| + if exact_matches.include?(pattern) + pattern + else + if directory?(pattern) && dir_pattern + pattern += '/' unless pattern.end_with?('/') + pattern += dir_pattern + end + expanded_patterns = dir_glob_equivalent_patterns(pattern) + full_list.select do |path| + expanded_patterns.any? do |p| + File.fnmatch(p, path, File::FNM_CASEFOLD | File::FNM_PATHNAME) + end + end + end + end + end + + list = list.map { |path| Pathname.new(path) } + if exclude_patterns + exclude_options = { :dir_pattern => '**/*', :include_dirs => include_dirs } + list -= relative_glob(exclude_patterns, exclude_options) + end + list + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Bool] Wether a path is a directory. The result of this method + # computed without accessing the file system and is case + # insensitive. + # + # @param [String, Pathname] sub_path The path that could be a directory. + # + def directory?(sub_path) + sub_path = sub_path.to_s.downcase.sub(/\/$/, '') + dirs.any? { |dir| dir.downcase == sub_path } + end + + # @return [Array] An array of patterns converted from a + # {Dir.glob} pattern to patterns that {File.fnmatch} can handle. + # This is used by the {#relative_glob} method to emulate + # {Dir.glob}. + # + # The expansion provides support for: + # + # - Literals + # + # dir_glob_equivalent_patterns('{file1,file2}.{h,m}') + # => ["file1.h", "file1.m", "file2.h", "file2.m"] + # + # - Matching the direct children of a directory with `**` + # + # dir_glob_equivalent_patterns('Classes/**/file.m') + # => ["Classes/**/file.m", "Classes/file.m"] + # + # @param [String] pattern A {Dir#glob} like pattern. + # + def dir_glob_equivalent_patterns(pattern) + pattern = pattern.gsub('/**/', '{/**/,/}') + values_by_set = {} + pattern.scan(/\{[^}]*\}/) do |set| + values = set.gsub(/[{}]/, '').split(',') + values_by_set[set] = values + end + + if values_by_set.empty? + [pattern] + else + patterns = [pattern] + values_by_set.each do |set, values| + patterns = patterns.flat_map do |old_pattern| + values.map do |value| + old_pattern.gsub(set, value) + end + end + end + patterns + end + end + + # Escapes the glob metacharacters from a given path so it can used in + # Dir#glob and similar methods. + # + # @note See CocoaPods/CocoaPods#862. + # + # @param [String, Pathname] path + # The path to escape. + # + # @return [Pathname] The escaped path. + # + def escape_path_for_glob(path) + result = path.to_s + characters_to_escape = ['[', ']', '{', '}', '?', '*'] + characters_to_escape.each do |character| + result.gsub!(character, "\\#{character}") + end + Pathname.new(result) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/pod_dir_cleaner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/pod_dir_cleaner.rb new file mode 100644 index 0000000..c47cf96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/pod_dir_cleaner.rb @@ -0,0 +1,71 @@ +module Pod + class Sandbox + class PodDirCleaner + attr_reader :root + attr_reader :specs_by_platform + + def initialize(root, specs_by_platform) + @root = root + @specs_by_platform = specs_by_platform + end + + # Removes all the files not needed for the installation according to the + # specs by platform. + # + # @return [void] + # + def clean! + clean_paths.each { |path| FileUtils.rm_rf(path) } if root.exist? + end + + private + + # @return [Array] the file accessors for all the + # specifications on their respective platform. + # + def file_accessors + @file_accessors ||= specs_by_platform.flat_map do |platform, specs| + specs.flat_map { |spec| Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) } + end + end + + # @return [Sandbox::PathList] The path list for this Pod. + # + def path_list + @path_list ||= Sandbox::PathList.new(root) + end + + # Finds the absolute paths, including hidden ones, of the files + # that are not used by the pod and thus can be safely deleted. + # + # @note Implementation detail: Don't use `Dir#glob` as there is an + # unexplained issue (#568, #572 and #602). + # + # @todo The paths are down-cased for the comparison as issues similar + # to #602 lead the files not being matched and so cleaning all + # the files. This solution might create side effects. + # + # @return [Array] The paths that can be deleted. + # + def clean_paths + cached_used = used_files.map(&:downcase) + glob_options = File::FNM_DOTMATCH | File::FNM_CASEFOLD + files = Pathname.glob(root + '**/*', glob_options).map(&:to_s) + cached_used_set = cached_used.to_set + files.reject do |candidate| + candidate = candidate.downcase + candidate.end_with?('.', '..') || cached_used_set.include?(candidate) || cached_used.any? do |path| + path.include?(candidate) || candidate.include?(path) + end + end + end + + # @return [Array] The absolute path of all the files used by the + # specifications (according to their platform) of this Pod. + # + def used_files + FileAccessor.all_files(file_accessors).map(&:to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/podspec_finder.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/podspec_finder.rb new file mode 100644 index 0000000..6a23cfe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sandbox/podspec_finder.rb @@ -0,0 +1,23 @@ +module Pod + class Sandbox + class PodspecFinder + attr_reader :root + + def initialize(root) + @root = root + end + + def podspecs + return @specs_by_name if @specs_by_name + @specs_by_name = {} + spec_files = Pathname.glob(root + '{,*}.podspec{,.json}') + spec_files.sort_by { |p| -p.to_path.split(File::SEPARATOR).size }.each do |file| + spec = Specification.from_file(file) + spec.validate_cocoapods_version + @specs_by_name[spec.name] = spec + end + @specs_by_name + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sources_manager.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sources_manager.rb new file mode 100644 index 0000000..540915d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/sources_manager.rb @@ -0,0 +1,221 @@ +require 'cocoapods-core/source' +require 'cocoapods/open-uri' +require 'netrc' +require 'set' +require 'rest' +require 'yaml' + +module Pod + class Source + class Manager + # Returns the source whose {Source#url} is equal to `url`, adding the repo + # in a manner similarly to `pod repo add` if it is not found. + # + # @raise If no source with the given `url` could be created, + # + # @return [Source] The source whose {Source#url} is equal to `url`, + # + # @param [String] url + # The URL of the source. + # + def find_or_create_source_with_url(url) + source_with_url(url) || create_source_with_url(url) + end + + # Adds the source whose {Source#url} is equal to `url`, + # in a manner similarly to `pod repo add` if it is not found. + # + # @raise If no source with the given `url` could be created, + # + # @return [Source] The source whose {Source#url} is equal to `url`, + # + # @param [String] url + # The URL of the source. + # + def create_source_with_url(url) + name = name_for_url(url) + is_cdn = cdn_url?(url) + + # Hack to ensure that `repo add` output is shown. + previous_title_level = UI.title_level + UI.title_level = 0 + + begin + if is_cdn + Command::Repo::AddCDN.parse([name, url]).run + else + Command::Repo::Add.parse([name, url]).run + end + rescue Informative => e + message = "Unable to add a source with url `#{url}` " \ + "named `#{name}`.\n" + message << "(#{e})\n" if Config.instance.verbose? + message << 'You can try adding it manually in ' \ + "`#{Config.instance.repos_dir}` or via `pod repo add`." + raise Informative, message + ensure + UI.title_level = previous_title_level + end + source = source_with_url(url) + + raise "Unable to create a source with URL #{url}" unless source + + source + end + + # Determines whether `url` is a CocoaPods CDN URL. + # + # @return [Boolean] whether `url` is a CocoaPods CDN URL, + # + # @param [String] url + # The URL of the source. + # + def cdn_url?(url) + return false unless url =~ %r{^https?:\/\/} + + uri_options = {} + + netrc_info = Netrc.read + uri = URI.parse(url) + return false unless uri.userinfo.nil? + + netrc_host = uri.host + credentials = netrc_info[netrc_host] + uri_options[:http_basic_authentication] = credentials if credentials + + response = OpenURI.open_uri(url.chomp('/') + '/CocoaPods-version.yml', uri_options) + response_hash = YAML.load(response.read) # rubocop:disable Security/YAMLLoad + response_hash.is_a?(Hash) && !Source::Metadata.new(response_hash).latest_cocoapods_version.nil? + rescue ::OpenURI::HTTPError, SocketError + return false + rescue => e + raise Informative, "Couldn't determine repo type for URL: `#{url}`: #{e}" + end + + # Returns the source whose {Source#name} or {Source#url} is equal to the + # given `name_or_url`. + # + # @return [Source] The source whose {Source#name} or {Source#url} is equal to the + # given `name_or_url`. + # + # @param [String] name_or_url + # The name or the URL of the source. + # + def source_with_name_or_url(name_or_url) + all.find { |s| s.name == name_or_url } || + find_or_create_source_with_url(name_or_url) + end + + # @return [Pathname] The path where the search index should be stored. + # + def search_index_path + @search_index_path ||= Config.instance.search_index_file + end + + # @!group Updating Sources + + # Updates the local clone of the spec-repo with the given name or of all + # the git repos if the name is omitted. + # + # @param [String] source_name + # + # @param [Bool] show_output + # + # @return [void] + # + def update(source_name = nil, show_output = false) + if source_name + sources = [updateable_source_named(source_name)] + else + sources = updateable_sources + end + + changed_spec_paths = {} + + # Do not perform an update if the repos dir has not been setup yet. + return unless repos_dir.exist? + + # Create the Spec_Lock file if needed and lock it so that concurrent + # repo updates do not cause each other to fail + File.open("#{repos_dir}/Spec_Lock", File::CREAT) do |f| + f.flock(File::LOCK_EX) + sources.each do |source| + UI.section "Updating spec repo `#{source.name}`" do + changed_source_paths = source.update(show_output) + changed_spec_paths[source] = changed_source_paths if changed_source_paths.count > 0 + source.verify_compatibility! + end + end + end + # Perform search index update operation in background. + update_search_index_if_needed_in_background(changed_spec_paths) + end + + # Adds the provided source to the list of sources + # + # @param [Source] source the source to add + # + def add_source(source) + all << source unless all.any? { |s| s.url == source || s.name == source.name } + end + end + + extend Executable + executable :git + + def repo_git(args, include_error: false) + Executable.capture_command('git', ['-C', repo] + args, + :capture => include_error ? :merge : :out, + :env => { + 'GIT_CONFIG' => nil, + 'GIT_DIR' => nil, + 'GIT_WORK_TREE' => nil, + } + ). + first.strip + end + + def update_git_repo(show_output = false) + Config.instance.with_changes(:verbose => show_output) do + args = %W(-C #{repo} fetch origin) + args.push('--progress') if show_output + git!(args) + current_branch = git!(%W(-C #{repo} rev-parse --abbrev-ref HEAD)).strip + git!(%W(-C #{repo} reset --hard origin/#{current_branch})) + end + rescue + raise Informative, 'CocoaPods was not able to update the ' \ + "`#{name}` repo. If this is an unexpected issue " \ + 'and persists you can inspect it running ' \ + '`pod repo update --verbose`' + end + end + + class TrunkSource + def verify_compatibility! + super + latest_cocoapods_version = metadata.latest_cocoapods_version && Gem::Version.create(metadata.latest_cocoapods_version) + return unless Config.instance.new_version_message? && + latest_cocoapods_version && + latest_cocoapods_version > Gem::Version.new(Pod::VERSION) + + rc = latest_cocoapods_version.prerelease? + install_message = !Pathname(__FILE__).dirname.writable? ? 'sudo ' : '' + install_message << 'gem install cocoapods' + install_message << ' --pre' if rc + message = [ + '', + "CocoaPods #{latest_cocoapods_version} is available.".green, + "To update use: `#{install_message}`".green, + ("[!] This is a test version we'd love you to try.".yellow if rc), + '', + 'For more information, see https://blog.cocoapods.org ' \ + 'and the CHANGELOG for this version at ' \ + "https://github.com/CocoaPods/CocoaPods/releases/tag/#{latest_cocoapods_version}".green, + '', + '', + ].compact.join("\n") + UI.puts(message) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target.rb new file mode 100644 index 0000000..237f915 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target.rb @@ -0,0 +1,378 @@ +require 'cocoapods/target/build_settings' + +module Pod + # Model class which describes a Pods target. + # + # The Target class stores and provides the information necessary for + # working with a target in the Podfile and its dependent libraries. + # This class is used to represent both the targets and their libraries. + # + class Target + DEFAULT_VERSION = '1.0.0'.freeze + DEFAULT_NAME = 'Default'.freeze + DEFAULT_BUILD_CONFIGURATIONS = { 'Release' => :release, 'Debug' => :debug }.freeze + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Hash{String=>Symbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or `:release`). + # + attr_reader :user_build_configurations + + # @return [Array] The value for the ARCHS build setting. + # + attr_reader :archs + + # @return [Platform] the platform of this target. + # + attr_reader :platform + + # @return [BuildSettings] the build settings for this target. + # + attr_reader :build_settings + + # @return [BuildType] the build type for this target. + # + attr_reader :build_type + private :build_type + + # @return [Boolean] whether the target can be linked to app extensions only. + # + attr_reader :application_extension_api_only + + # @return [Boolean] whether the target must be compiled with Swift's library + # evolution support, necessary for XCFrameworks. + # + attr_reader :build_library_for_distribution + + # Initialize a new target + # + # @param [Sandbox] sandbox @see #sandbox + # @param [BuildType] build_type @see #build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see #user_build_configurations + # @param [Array] archs @see #archs + # @param [Platform] platform @see #platform + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform) + @sandbox = sandbox + @user_build_configurations = user_build_configurations + @archs = archs + @platform = platform + @build_type = build_type + + @application_extension_api_only = false + @build_library_for_distribution = false + @build_settings = create_build_settings + end + + # @return [String] the name of the library. + # + def name + label + end + + alias to_s name + + # @return [String] the label for the target. + # + def label + DEFAULT_NAME + end + + # @return [String] The version associated with this target + # + def version + DEFAULT_VERSION + end + + # @return [Boolean] Whether the target uses Swift code + # + def uses_swift? + false + end + + # @return [Boolean] whether the target is built dynamically + # + def build_as_dynamic? + build_type.dynamic? + end + + # @return [Boolean] whether the target is built as a dynamic framework + # + def build_as_dynamic_framework? + build_type.dynamic_framework? + end + + # @return [Boolean] whether the target is built as a dynamic library + # + def build_as_dynamic_library? + build_type.dynamic_library? + end + + # @return [Boolean] whether the target is built as a framework + # + def build_as_framework? + build_type.framework? + end + + # @return [Boolean] whether the target is built as a library + # + def build_as_library? + build_type.library? + end + + # @return [Boolean] whether the target is built statically + # + def build_as_static? + build_type.static? + end + + # @return [Boolean] whether the target is built as a static framework + # + def build_as_static_framework? + build_type.static_framework? + end + + # @return [Boolean] whether the target is built as a static library + # + def build_as_static_library? + build_type.static_library? + end + + # @deprecated Prefer {build_as_static_framework?}. + # + # @return [Boolean] Whether the target should build a static framework. + # + def static_framework? + build_as_static_framework? + end + + # @return [String] the name to use for the source code module constructed + # for this target, and which will be used to import the module in + # implementation source files. + # + def product_module_name + c99ext_identifier(label) + end + + # @return [String] the name of the product. + # + def product_name + if build_as_framework? + framework_name + else + static_library_name + end + end + + # @return [String] the name of the product excluding the file extension or + # a product type specific prefix, depends on #requires_frameworks? + # and #product_module_name or #label. + # + def product_basename + if build_as_framework? + product_module_name + else + label + end + end + + # @return [String] the name of the framework, depends on #label. + # + # @note This may not depend on #requires_frameworks? indirectly as it is + # used for migration. + # + def framework_name + "#{product_module_name}.framework" + end + + # @return [String] the name of the library, depends on #label. + # + # @note This may not depend on #requires_frameworks? indirectly as it is + # used for migration. + # + def static_library_name + "lib#{label}.a" + end + + # @return [Symbol] either :framework or :static_library, depends on + # #build_as_framework?. + # + def product_type + build_as_framework? ? :framework : :static_library + end + + # @return [String] A string suitable for debugging. + # + def inspect + "#<#{self.class} name=#{name}>" + end + + #-------------------------------------------------------------------------# + + # @!group Framework support + + # @deprecated Prefer {build_as_framework?}. + # + # @return [Boolean] whether the generated target needs to be implemented + # as a framework + # + def requires_frameworks? + build_as_framework? + end + + #-------------------------------------------------------------------------# + + # @!group Support files + + # @return [Pathname] the folder where to store the support files of this + # library. + # + def support_files_dir + sandbox.target_support_files_dir(name) + end + + # @param [String] variant + # The variant of the xcconfig. Used to differentiate build + # configurations. + # + # @return [Pathname] the absolute path of the xcconfig file. + # + def xcconfig_path(variant = nil) + if variant + support_files_dir + "#{label}.#{variant.to_s.gsub(File::SEPARATOR, '-').downcase}.xcconfig" + else + support_files_dir + "#{label}.xcconfig" + end + end + + # @return [Pathname] the absolute path of the header file which contains + # the exported foundation constants with framework version + # information and all headers, which should been exported in the + # module map. + # + def umbrella_header_path + module_map_path.parent + "#{label}-umbrella.h" + end + + def umbrella_header_path_to_write + module_map_path_to_write.parent + "#{label}-umbrella.h" + end + + # @return [Pathname] the absolute path of the LLVM module map file that + # defines the module structure for the compiler. + # + def module_map_path + module_map_path_to_write + end + + # @!private + # + # @return [Pathname] the absolute path of the module map file that + # CocoaPods writes. This can be different from `module_map_path` + # if the module map gets symlinked. + # + def module_map_path_to_write + basename = "#{label}.modulemap" + support_files_dir + basename + end + + # @return [Pathname] the absolute path of the bridge support file. + # + def bridge_support_path + support_files_dir + "#{label}.bridgesupport" + end + + # @return [Pathname] the absolute path of the Info.plist file. + # + def info_plist_path + support_files_dir + "#{label}-Info.plist" + end + + # @return [Hash] additional entries for the generated Info.plist + # + def info_plist_entries + {} + end + + # @return [Pathname] the path of the dummy source generated by CocoaPods + # + def dummy_source_path + support_files_dir + "#{label}-dummy.m" + end + + # Mark the target as extension-only. + # Translates to APPLICATION_EXTENSION_API_ONLY = YES in the build settings. + # + def mark_application_extension_api_only + @application_extension_api_only = true + end + + # Compiles the target with Swift's library evolution support, necessary to + # build XCFrameworks. + # Translates to BUILD_LIBRARY_FOR_DISTRIBUTION = YES in the build settings. + # + def mark_build_library_for_distribution + @build_library_for_distribution = true + end + + # @return [Pathname] The absolute path of the prepare artifacts script. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_path + support_files_dir + "#{label}-artifacts.sh" + end + + # Returns an extension in the target that corresponds to the + # resource's input extension. + # + # @param [String] input_extension + # The input extension to map to. + # + # @return [String] The output extension. + # + def self.output_extension_for_resource(input_extension) + case input_extension + when '.storyboard' then '.storyboardc' + when '.xib' then '.nib' + when '.xcdatamodel' then '.mom' + when '.xcdatamodeld' then '.momd' + when '.xcmappingmodel' then '.cdm' + when '.xcassets' then '.car' + else input_extension + end + end + + def self.resource_extension_compilable?(input_extension) + output_extension_for_resource(input_extension) != input_extension && input_extension != '.xcassets' + end + + #-------------------------------------------------------------------------# + + private + + # Transforms the given string into a valid +identifier+ after C99ext + # standard, so that it can be used in source code where escaping of + # ambiguous characters is not applicable. + # + # @param [String] name + # any name, which may contain leading numbers, spaces or invalid + # characters. + # + # @return [String] + # + def c99ext_identifier(name) + name.gsub(/^([0-9])/, '_\1').gsub(/[^a-zA-Z0-9_]/, '_') + end + + def create_build_settings + BuildSettings.new(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/aggregate_target.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/aggregate_target.rb new file mode 100644 index 0000000..081b642 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/aggregate_target.rb @@ -0,0 +1,558 @@ +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/xcode/xcframework' + +module Pod + # Stores the information relative to the target used to cluster the targets + # of the single Pods. The client targets will then depend on this one. + # + class AggregateTarget < Target + # Product types where the product's frameworks must be embedded in a host target + # + EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES = [:app_extension, :framework, :static_library, :messages_extension, + :watch_extension, :xpc_service].freeze + + # @return [TargetDefinition] the target definition of the Podfile that + # generated this target. + # + attr_reader :target_definition + + # @return [Pathname] the folder where the client is stored used for + # computing the relative paths. If integrating it should be the + # folder where the user project is stored, otherwise it should + # be the installation root. + # + attr_reader :client_root + + # @return [Xcodeproj::Project] the user project that this target will + # integrate as identified by the analyzer. + # + attr_reader :user_project + + # @return [Array] the list of the UUIDs of the user targets that + # will be integrated by this target as identified by the analyzer. + # + # @note The target instances are not stored to prevent editing different + # instances. + # + attr_reader :user_target_uuids + + # @return [Hash] Map from configuration name to + # configuration file for the target + # + # @note The configurations are generated by the {TargetInstaller} and + # used by {UserProjectIntegrator} to check for any overridden + # values. + # + attr_reader :xcconfigs + + # @return [Array] The dependencies for this target. + # + attr_reader :pod_targets + + # @return [Array] The aggregate targets whose pods this + # target must be able to import, but will not directly link against. + # + attr_reader :search_paths_aggregate_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see Target#sandbox + # @param [BuildType] build_type @see Target#build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see Target#user_build_configurations + # @param [Array] archs @see Target#archs + # @param [Platform] platform @see #Target#platform + # @param [TargetDefinition] target_definition @see #target_definition + # @param [Pathname] client_root @see #client_root + # @param [Xcodeproj::Project] user_project @see #user_project + # @param [Array] user_target_uuids @see #user_target_uuids + # @param [Hash{String=>Array}] pod_targets_for_build_configuration @see #pod_targets_for_build_configuration + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform, target_definition, client_root, + user_project, user_target_uuids, pod_targets_for_build_configuration) + super(sandbox, build_type, user_build_configurations, archs, platform) + raise "Can't initialize an AggregateTarget without a TargetDefinition!" if target_definition.nil? + raise "Can't initialize an AggregateTarget with an abstract TargetDefinition!" if target_definition.abstract? + @target_definition = target_definition + @client_root = client_root + @user_project = user_project + @user_target_uuids = user_target_uuids + @pod_targets_for_build_configuration = pod_targets_for_build_configuration + @pod_targets = pod_targets_for_build_configuration.values.flatten.uniq + @search_paths_aggregate_targets = [] + @xcconfigs = {} + end + + # Merges this aggregate target with additional pod targets that are part of embedded aggregate targets. + # + # @param [Hash{String=>Array}] embedded_pod_targets_for_build_configuration + # The pod targets to merge with. + # + # @return [AggregateTarget] a new instance of this aggregate target with additional pod targets to be used from + # pod targets of embedded aggregate targets. + # + def merge_embedded_pod_targets(embedded_pod_targets_for_build_configuration) + merged = @pod_targets_for_build_configuration.merge(embedded_pod_targets_for_build_configuration) do |_, before, after| + (before + after).uniq + end + AggregateTarget.new(sandbox, build_type, user_build_configurations, archs, platform, + target_definition, client_root, user_project, user_target_uuids, merged).tap do |aggregate_target| + aggregate_target.search_paths_aggregate_targets.concat(search_paths_aggregate_targets).freeze + aggregate_target.mark_application_extension_api_only if application_extension_api_only + aggregate_target.mark_build_library_for_distribution if build_library_for_distribution + end + end + + def build_settings(configuration_name = nil) + if configuration_name + @build_settings[configuration_name] || + raise(ArgumentError, "#{self} does not contain a build setting for the #{configuration_name.inspect} configuration, only #{@build_settings.keys.inspect}") + else + @build_settings.each_value.first || + raise(ArgumentError, "#{self} does not contain any build settings") + end + end + + # @return [Boolean] True if the user_target refers to a + # library (framework, static or dynamic lib). + # + def library? + # Without a user_project, we can't say for sure + # that this is a library + return false if user_project.nil? + symbol_types = user_targets.map(&:symbol_type).uniq + unless symbol_types.count == 1 + raise ArgumentError, "Expected single kind of user_target for #{name}. Found #{symbol_types.join(', ')}." + end + [:framework, :dynamic_library, :static_library].include? symbol_types.first + end + + # @return [Boolean] True if the user_target's pods are + # for an extension and must be embedded in a host, + # target, otherwise false. + # + def requires_host_target? + # If we don't have a user_project, then we can't + # glean any info about how this target is going to + # be integrated, so return false since we can't know + # for sure that this target refers to an extension + # target that would require a host target + return false if user_project.nil? + symbol_types = user_targets.map(&:symbol_type).uniq + unless symbol_types.count == 1 + raise ArgumentError, "Expected single kind of user_target for #{name}. Found #{symbol_types.join(', ')}." + end + EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include?(symbol_types[0]) + end + + # @return [String] the label for the target. + # + def label + target_definition.label.to_s + end + + # @return [Podfile] The podfile which declares the dependency + # + def podfile + target_definition.podfile + end + + # @return [Pathname] the path of the user project that this target will + # integrate as identified by the analyzer. + # + def user_project_path + user_project.path if user_project + end + + # List all user targets that will be integrated by this #target. + # + # @return [Array] + # + def user_targets + return [] unless user_project + user_target_uuids.map do |uuid| + native_target = user_project.objects_by_uuid[uuid] + unless native_target + raise Informative, '[Bug] Unable to find the target with ' \ + "the `#{uuid}` UUID for the `#{self}` integration library" + end + native_target + end + end + + # @param [String] build_configuration The build configuration for which the + # the pod targets should be returned. + # + # @return [Array] the pod targets for the given build + # configuration. + # + def pod_targets_for_build_configuration(build_configuration) + @pod_targets_for_build_configuration[build_configuration] || [] + end + + # @return [Array] The specifications used by this aggregate target. + # + def specs + pod_targets.flat_map(&:specs) + end + + # @return [Hash{Symbol => Array}] The pod targets for each + # build configuration. + # + def specs_by_build_configuration + result = {} + user_build_configurations.each_key do |build_configuration| + result[build_configuration] = pod_targets_for_build_configuration(build_configuration). + flat_map(&:specs) + end + result + end + + # @return [Array] The consumers of the Pod. + # + def spec_consumers + specs.map { |spec| spec.consumer(platform) } + end + + # @return [Boolean] Whether the target uses Swift code + # + def uses_swift? + pod_targets.any?(&:uses_swift?) + end + + # @return [Boolean] Whether the target contains any resources + # + def includes_resources? + !resource_paths_by_config.each_value.all?(&:empty?) + end + + # @return [Boolean] Whether the target contains any on demand resources + # + def includes_on_demand_resources? + !on_demand_resources.empty? + end + + # @return [Boolean] Whether the target contains frameworks to be embedded into + # the user target + # + def includes_frameworks? + !framework_paths_by_config.each_value.all?(&:empty?) + end + + # @return [Boolean] Whether the target contains xcframeworks to be embedded into + # the user target + # + def includes_xcframeworks? + !xcframeworks_by_config.each_value.all?(&:empty?) + end + + # @return [Hash{String => Array}] The vendored dynamic artifacts and framework target + # input and output paths grouped by config + # + def framework_paths_by_config + @framework_paths_by_config ||= begin + framework_paths_by_config = {} + user_build_configurations.each_key do |config| + relevant_pod_targets = pod_targets_for_build_configuration(config) + framework_paths_by_config[config] = relevant_pod_targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + pod_target.framework_paths.values_at(*library_specs).flatten.compact.uniq + end + end + framework_paths_by_config + end + end + + # @return [Hash{String => Array}] The vendored dynamic artifacts and framework target + # input and output paths grouped by config + # + def xcframeworks_by_config + @xcframeworks_by_config ||= begin + xcframeworks_by_config = {} + user_build_configurations.each_key do |config| + relevant_pod_targets = pod_targets_for_build_configuration(config) + xcframeworks_by_config[config] = relevant_pod_targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + pod_target.xcframeworks.values_at(*library_specs).flatten.compact.uniq + end + end + xcframeworks_by_config + end + end + + # @return [Array] Uniqued On Demand Resources for this target. + # + # @note On Demand Resources are not separated by config as they are integrated directly into the users target via + # the resources build phase. + # + def on_demand_resources + @on_demand_resources ||= begin + pod_targets.flat_map do |pod_target| + library_file_accessors = pod_target.file_accessors.select { |fa| fa.spec.library_specification? } + library_file_accessors.flat_map(&:on_demand_resources_files) + end.uniq + end + end + + # @return [Hash{String => Array}] Uniqued Resources grouped by config + # + def resource_paths_by_config + @resource_paths_by_config ||= begin + relevant_pod_targets = pod_targets.reject do |pod_target| + pod_target.should_build? && pod_target.build_as_dynamic_framework? + end + user_build_configurations.each_key.each_with_object({}) do |config, resources_by_config| + targets = relevant_pod_targets & pod_targets_for_build_configuration(config) + resources_by_config[config] = targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + resource_paths = pod_target.resource_paths.values_at(*library_specs).flatten + + if pod_target.build_as_static_framework? + built_product_dir = Pathname.new(pod_target.build_product_path('${BUILT_PRODUCTS_DIR}')) + resource_paths = resource_paths.map do |resource_path| + extname = File.extname(resource_path) + if self.class.resource_extension_compilable?(extname) + output_extname = self.class.output_extension_for_resource(extname) + output_path_components = Pathname(resource_path).each_filename.select { |component| File.extname(component) == '.lproj' } + output_path_components << File.basename(resource_path) + built_product_dir.join(*output_path_components).sub_ext(output_extname).to_s + else + resource_path + end + end + end + + resource_paths << bridge_support_file + resource_paths.compact.uniq + end + end + end + end + + # @return [Pathname] the path of the bridge support file relative to the + # sandbox or `nil` if bridge support is disabled. + # + def bridge_support_file + bridge_support_path.relative_path_from(sandbox.root) if podfile.generate_bridge_support? + end + + #-------------------------------------------------------------------------# + + # @!group Support files + + # @return [Pathname] The absolute path of acknowledgements file. + # + # @note The acknowledgements generators add the extension according to + # the file type. + # + def acknowledgements_basepath + support_files_dir + "#{label}-acknowledgements" + end + + # @return [Pathname] The absolute path of the copy resources script. + # + def copy_resources_script_path + support_files_dir + "#{label}-resources.sh" + end + + # @return [Pathname] The absolute path of the embed frameworks script. + # + def embed_frameworks_script_path + support_files_dir + "#{label}-frameworks.sh" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the copy resources script input file list. + # + def copy_resources_script_input_files_path(configuration) + support_files_dir + "#{label}-resources-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the copy resources script output file list. + # + def copy_resources_script_output_files_path(configuration) + support_files_dir + "#{label}-resources-#{configuration}-output-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script input file list. + # + def embed_frameworks_script_input_files_path(configuration) + support_files_dir + "#{label}-frameworks-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script output file list. + # + def embed_frameworks_script_output_files_path(configuration) + support_files_dir + "#{label}-frameworks-#{configuration}-output-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script input file list. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_path(configuration) + support_files_dir + "#{label}-artifacts-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script output file list. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_path(configuration) + support_files_dir + "#{label}-artifacts-#{configuration}-output-files.xcfilelist" + end + + # @return [String] The output file path fo the check manifest lock script. + # + def check_manifest_lock_script_output_file_path + "$(DERIVED_FILE_DIR)/#{label}-checkManifestLockResult.txt" + end + + # @return [Pathname] The relative path of the Pods directory from user project's directory. + # + def relative_pods_root_path + sandbox.root.relative_path_from(client_root) + end + + # @return [String] The xcconfig path of the root from the `$(SRCROOT)` + # variable of the user's project. + # + def relative_pods_root + "${SRCROOT}/#{relative_pods_root_path}" + end + + # @return [String] The path of the Podfile directory relative to the + # root of the user project. + # + def podfile_dir_relative_path + podfile_path = target_definition.podfile.defined_in_file + return "${SRCROOT}/#{podfile_path.relative_path_from(client_root).dirname}" unless podfile_path.nil? + # Fallback to the standard path if the Podfile is not represented by a file. + '${PODS_ROOT}/..' + end + + # @param [String] config_name The build configuration name to get the xcconfig for + # @return [String] The path of the xcconfig file relative to the root of + # the user project. + # + def xcconfig_relative_path(config_name) + xcconfig_path(config_name).relative_path_from(client_root).to_s + end + + # @return [String] The path of the copy resources script relative to the + # root of the Pods project. + # + def copy_resources_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_path)}" + end + + # @return [String] The path of the copy resources script input file list + # relative to the root of the Pods project. + # + def copy_resources_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the copy resources script output file list + # relative to the root of the Pods project. + # + def copy_resources_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_output_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the embed frameworks relative to the + # root of the Pods project. + # + def embed_frameworks_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_path)}" + end + + # @return [String] The path of the embed frameworks script input file list + # relative to the root of the Pods project. + # + def embed_frameworks_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the embed frameworks script output file list + # relative to the root of the Pods project. + # + def embed_frameworks_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_output_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the prepare artifacts script relative to the + # root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_path)}" + end + + # @return [String] The path of the prepare artifacts script input file list + # relative to the root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the prepare artifacts script output file list + # relative to the root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_output_files_path('${CONFIGURATION}'))}" + end + + private + + # @!group Private Helpers + #-------------------------------------------------------------------------# + + # Computes the relative path of a sandboxed file from the `$(PODS_ROOT)` + # variable of the Pods's project. + # + # @param [Pathname] path + # A relative path from the root of the sandbox. + # + # @return [String] The computed path. + # + def relative_to_pods_root(path) + path.relative_path_from(sandbox.root).to_s + end + + def create_build_settings + settings = {} + + user_build_configurations.each do |configuration_name, configuration| + settings[configuration_name] = BuildSettings::AggregateTargetSettings.new(self, configuration_name, :configuration => configuration) + end + + settings + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/build_settings.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/build_settings.rb new file mode 100644 index 0000000..6c2d44c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/build_settings.rb @@ -0,0 +1,1383 @@ +# frozen_string_literal: true + +module Pod + class Target + # @since 1.5.0 + class BuildSettings + #-------------------------------------------------------------------------# + + # @!group Constants + + # @return [Set] + # The build settings that should be treated as arrays, rather than strings. + # + PLURAL_SETTINGS = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INCLUDED_SOURCE_FILE_NAMES + INFOPLIST_PREPROCESSOR_DEFINITIONS + LD_RUNPATH_SEARCH_PATHS + LIBRARY_SEARCH_PATHS + LOCALIZED_STRING_MACRO_NAMES + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + OTHER_SWIFT_FLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + SWIFT_ACTIVE_COMPILATION_CONDITIONS + SWIFT_INCLUDE_PATHS + SYSTEM_FRAMEWORK_SEARCH_PATHS + SYSTEM_HEADER_SEARCH_PATHS + USER_HEADER_SEARCH_PATHS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + + # @return [String] + # The variable for the configuration build directory used when building pod targets. + # + CONFIGURATION_BUILD_DIR_VARIABLE = '${PODS_CONFIGURATION_BUILD_DIR}' + + # @return [String] + # The variable for the configuration intermediate frameworks directory used for building pod targets + # that contain vendored xcframeworks. + # + XCFRAMEWORKS_BUILD_DIR_VARIABLE = '${PODS_XCFRAMEWORKS_BUILD_DIR}' + + #-------------------------------------------------------------------------# + + # @!group DSL + + # Creates a method that calculates a part of the build settings for the {#target}. + # + # @!visibility private + # + # @param [Symbol,String] method_name + # The name of the method to define + # + # @param [Boolean] build_setting + # Whether the method name should be added (upcased) to {.build_setting_names} + # + # @param [Boolean] memoized + # Whether the method should be memoized + # + # @param [Boolean] sorted + # Whether the return value should be sorted + # + # @param [Boolean] uniqued + # Whether the return value should be uniqued + # + # @param [Boolean] compacted + # Whether the return value should be compacted + # + # @param [Boolean] frozen + # Whether the return value should be frozen + # + # @param [Boolean, Symbol] from_search_paths_aggregate_targets + # If truthy, the method from {Aggregate} that should be used to concatenate build settings from + # {::Pod::AggregateTarget#search_paths_aggregate_target} + # + # @param [Symbol] from_pod_targets_to_link + # If truthy, the `_to_import` values from `BuildSettings#pod_targets_to_link` will be concatenated + # + # @param [Block] implementation + # + # @macro [attach] define_build_settings_method + # @!method $1 + # + # The `$1` build setting for the {#target}. + # + # The return value from this method will be: `${1--1}`. + # + def self.define_build_settings_method(method_name, build_setting: false, + memoized: false, sorted: false, uniqued: false, compacted: false, frozen: true, + from_search_paths_aggregate_targets: false, from_pod_targets_to_link: false, + &implementation) + + memoized_key = "#{self}##{method_name}" + + (@build_settings_names ||= Set.new) << method_name.to_s.upcase if build_setting + + raw_method_name = :"_raw_#{method_name}" + define_method(raw_method_name, &implementation) + private(raw_method_name) + + dup_before_freeze = frozen && (from_pod_targets_to_link || from_search_paths_aggregate_targets || uniqued || sorted) + + define_method(method_name) do + if memoized + retval = @__memoized.fetch(memoized_key, :not_found) + return retval if :not_found != retval + end + + retval = send(raw_method_name) + if retval.nil? + @__memoized[memoized_key] = retval if memoized + return + end + + retval = retval.dup if dup_before_freeze && retval.frozen? + + retval.concat(pod_targets_to_link.flat_map { |pod_target| pod_target.build_settings_for_spec(pod_target.root_spec, :configuration => configuration_name).public_send("#{method_name}_to_import") }) if from_pod_targets_to_link + retval.concat(search_paths_aggregate_target_pod_target_build_settings.flat_map(&from_search_paths_aggregate_targets)) if from_search_paths_aggregate_targets + + retval.compact! if compacted + retval.uniq! if uniqued + retval.sort! if sorted + retval.freeze if frozen + + @__memoized[memoized_key] = retval if memoized + + retval + end + end + private_class_method :define_build_settings_method + + # @param [XCFramework] xcframework the xcframework slice that will be copied to the intermediates dir + # + # @return [String] the path to the directory containing the xcframework slice + # + def self.xcframework_intermediate_dir(xcframework) + "#{XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{xcframework.target_name}" + end + + class << self + #-------------------------------------------------------------------------# + + # @!group Public API + + # @return [Set] a set of all the build settings names that will + # be present in the #xcconfig + # + attr_reader :build_settings_names + end + + #-------------------------------------------------------------------------# + + # @!group Public API + + # @return [Target] + # The target this build settings object is generating build settings for + # + attr_reader :target + + # Initialize a new instance + # + # @param [Target] target + # see {#target} + # + def initialize(target) + @target = target + @__memoized = {} + end + + def initialize_copy(other) + super + @__memoized = {} + end + + # @return [Xcodeproj::Config] + define_build_settings_method :xcconfig, :memoized => true do + settings = add_inherited_to_plural(to_h) + Xcodeproj::Config.new(settings) + end + + alias generate xcconfig + + # Saves the generated xcconfig to the given path + # + # @return [Xcodeproj::Config] + # + # @see #xcconfig + # + # @param [String,Pathname] path + # The path the xcconfig will be saved to + # + def save_as(path) + xcconfig.save_as(path) + end + + #-------------------------------------------------------------------------# + + # @!group Build System + + # @return [String] + define_build_settings_method :pods_build_dir, :build_setting => true do + '${BUILD_DIR}' + end + + # @return [String] + define_build_settings_method :pods_configuration_build_dir, :build_setting => true do + '${PODS_BUILD_DIR}/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)' + end + + define_build_settings_method :pods_xcframeworks_build_dir, :build_setting => true do + '$(PODS_CONFIGURATION_BUILD_DIR)/XCFrameworkIntermediates' + end + + # @return [String] + define_build_settings_method :use_recursive_script_inputs_in_script_phases, :build_setting => true do + 'YES' + end + + #-------------------------------------------------------------------------# + + # @!group Code Signing + + # @return [String] + define_build_settings_method :code_sign_identity, :build_setting => true do + return unless target.build_as_dynamic? + return unless target.platform.to_sym == :osx + '' + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :frameworks do + [] + end + + # @return [Array] + define_build_settings_method :weak_frameworks do + [] + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true do + framework_search_paths_to_import_developer_frameworks(frameworks + weak_frameworks) + end + + # @param [Array] frameworks + # The list of framework names + # + # @return [Array] + # the `FRAMEWORK_SEARCH_PATHS` needed to import developer frameworks + def framework_search_paths_to_import_developer_frameworks(frameworks) + if frameworks.include?('XCTest') || frameworks.include?('SenTestingKit') + %w[ $(PLATFORM_DIR)/Developer/Library/Frameworks ] + else + [] + end + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # @return [Array] + define_build_settings_method :libraries do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :gcc_preprocessor_definitions, :build_setting => true do + %w( COCOAPODS=1 ) + end + + # @return [Array] + define_build_settings_method :other_cflags, :build_setting => true, :memoized => true do + module_map_files.map { |f| "-fmodule-map-file=#{f}" } + end + + # @return [Array] + define_build_settings_method :module_map_files do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @return [Boolean] + # Whether `OTHER_SWIFT_FLAGS` should be generated when the target + # does not use swift. + # + def other_swift_flags_without_swift? + false + end + + # @return [Array] + define_build_settings_method :other_swift_flags, :build_setting => true, :memoized => true do + return unless target.uses_swift? || other_swift_flags_without_swift? + flags = %w(-D COCOAPODS) + flags.concat module_map_files.flat_map { |f| ['-Xcc', "-fmodule-map-file=#{f}"] } + flags + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Boolean] + define_build_settings_method :requires_objc_linker_flag? do + false + end + + # @return [Boolean] + define_build_settings_method :requires_fobjc_arc? do + false + end + + # Xcode 12 turns on this warning by default which is problematic for CocoaPods-generated + # imports which use double-quoted paths. + # @return [Boolean] + define_build_settings_method :clang_warn_quoted_include_in_framework_header, :build_setting => true do + 'NO' + end + + # @return [Array] + # the `LD_RUNPATH_SEARCH_PATHS` needed for dynamically linking the {#target} + # + # @param [Boolean] requires_host_target + # + # @param [Boolean] test_bundle + # + def _ld_runpath_search_paths(requires_host_target: false, test_bundle: false, uses_swift: false) + paths = [] + if uses_swift + paths << '/usr/lib/swift' + paths << '$(PLATFORM_DIR)/Developer/Library/Frameworks' if test_bundle + end + if target.platform.symbolic_name == :osx + paths << "'@executable_path/../Frameworks'" + paths << if test_bundle + "'@loader_path/../Frameworks'" + else + "'@loader_path/Frameworks'" + end + paths << '${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}' if uses_swift + else + paths << "'@executable_path/Frameworks'" + paths << "'@loader_path/Frameworks'" + paths << "'@executable_path/../../Frameworks'" if requires_host_target + end + paths + end + private :_ld_runpath_search_paths + + # @return [Array] + define_build_settings_method :other_ldflags, :build_setting => true, :memoized => true do + ld_flags = [] + ld_flags << '-ObjC' if requires_objc_linker_flag? + if requires_fobjc_arc? + ld_flags << '-fobjc-arc' + end + libraries.each { |l| ld_flags << %(-l"#{l}") } + frameworks.each { |f| ld_flags << '-framework' << %("#{f}") } + weak_frameworks.each { |f| ld_flags << '-weak_framework' << %("#{f}") } + ld_flags + end + + #-------------------------------------------------------------------------# + + # @!group Private Methods + + private + + # @return [Hash String|Array>] + def to_h + hash = {} + self.class.build_settings_names.sort.each do |setting| + hash[setting] = public_send(setting.downcase) + end + hash + end + + # @return [Hash String>] + def add_inherited_to_plural(hash) + Hash[hash.map do |key, value| + next [key, '$(inherited)'] if value.nil? + if PLURAL_SETTINGS.include?(key) + raise ArgumentError, "#{key} is a plural setting, cannot have #{value.inspect} as its value" unless value.is_a? Array + + value = "$(inherited) #{quote_array(value)}" + else + raise ArgumentError, "#{key} is not a plural setting, cannot have #{value.inspect} as its value" unless value.is_a? String + end + + [key, value] + end] + end + + # @return [Array] + # + # @param [Array] array + # + def quote_array(array) + array.map do |element| + case element + when /\A([\w-]+?)=(.+)\z/ + key = Regexp.last_match(1) + value = Regexp.last_match(2) + value = %("#{value}") if value =~ /[^\w\d]/ + %(#{key}=#{value}) + when /[\$\[\]\ ]/ + %("#{element}") + else + element + end + end.join(' ') + end + + # @param [Hash] xcconfig_values_by_consumer_by_key + # + # @param [#to_s] attribute + # The name of the attribute being merged + # + # @return [Hash] + # + def merged_xcconfigs(xcconfig_values_by_consumer_by_key, attribute, overriding: {}) + xcconfig_values_by_consumer_by_key.each_with_object(overriding.dup) do |(key, values_by_consumer), xcconfig| + uniq_values = values_by_consumer.values.uniq + values_are_bools = uniq_values.all? { |v| v =~ /\A(yes|no)\z/i } + if values_are_bools + # Boolean build settings + if uniq_values.count > 1 + UI.warn "Can't merge #{attribute} for pod targets: " \ + "#{values_by_consumer.keys.map(&:name)}. Boolean build " \ + "setting #{key} has different values." + else + xcconfig[key] = uniq_values.first + end + elsif PLURAL_SETTINGS.include? key + # Plural build settings + if xcconfig.key?(key) + overridden = xcconfig[key] + uniq_values.prepend(overridden) + end + xcconfig[key] = uniq_values.uniq.join(' ') + elsif uniq_values.count > 1 + # Singular build settings + UI.warn "Can't merge #{attribute} for pod targets: " \ + "#{values_by_consumer.keys.map(&:name)}. Singular build " \ + "setting #{key} has different values." + else + xcconfig[key] = uniq_values.first + end + end + end + + # Merges the spec-defined xcconfig into the derived xcconfig, + # overriding any singular settings and merging plural settings. + # + # @param [Hash] spec_xcconfig_hash the merged xcconfig defined in the spec. + # + # @param [Xcodeproj::Config] xcconfig the config to merge into. + # + # @return [Xcodeproj::Config] the merged config. + # + def merge_spec_xcconfig_into_xcconfig(spec_xcconfig_hash, xcconfig) + plural_configs, singlular_configs = spec_xcconfig_hash.partition { |k, _v| PLURAL_SETTINGS.include?(k) }.map { |a| Hash[a] } + xcconfig.attributes.merge!(singlular_configs) + xcconfig.merge!(plural_configs) + xcconfig + end + + # Filters out pod targets whose `specs` are a subset of + # another target's. + # + # @param [Array] pod_targets + # + # @return [Array] + # + def select_maximal_pod_targets(pod_targets) + subset_targets = [] + pod_targets.uniq.group_by(&:pod_name).each do |_pod_name, targets| + targets.combination(2) do |a, b| + if (a.specs - b.specs).empty? + subset_targets << a + elsif (b.specs - a.specs).empty? + subset_targets << b + end + end + end + pod_targets - subset_targets + end + + # @param [String] target_name the name of the target this xcframework belongs to + # + # @param [Pathname,String] path the path to the xcframework bundle + # + # @return [Xcode::XCFramework] the xcframework at the given path + # + def load_xcframework(target_name, path) + Xcode::XCFramework.new(target_name, path) + end + + # A subclass that generates build settings for a {PodTarget} + class PodTargetSettings < BuildSettings + #-------------------------------------------------------------------------# + + # @!group Public API + + # @see BuildSettings.build_settings_names + def self.build_settings_names + @build_settings_names | BuildSettings.build_settings_names + end + + # @return [Boolean] + # whether settings are being generated for a test bundle + # + attr_reader :test_xcconfig + alias test_xcconfig? test_xcconfig + + # @return [Boolean] + # whether settings are being generated for an application bundle + # + attr_reader :app_xcconfig + alias app_xcconfig? app_xcconfig + + # @return [Boolean] + # whether settings are being generated for an library bundle + # + attr_reader :library_xcconfig + alias library_xcconfig? library_xcconfig + + def non_library_xcconfig? + !library_xcconfig? + end + + # @return [Specification] + # The non-library specification these build settings are for or `nil`. + # + attr_reader :non_library_spec + + # Initializes a new instance + # + # @param [PodTarget] target + # see {#target} + # + # @param [Specification] non_library_spec + # see {#non_library_spec} + # + # @param [Symbol] configuration + # see {#configuration} + # + def initialize(target, non_library_spec = nil, configuration: nil) + super(target) + if @non_library_spec = non_library_spec + @test_xcconfig = non_library_spec.test_specification? + @app_xcconfig = non_library_spec.app_specification? + @xcconfig_spec_type = non_library_spec.spec_type + @library_xcconfig = false + else + @test_xcconfig = @app_xcconfig = false + @xcconfig_spec_type = :library + @library_xcconfig = true + end + (@configuration = configuration) || raise("No configuration for #{self}.") + end + + # @return [Xcodeproj::Xconfig] + define_build_settings_method :xcconfig, :memoized => true do + xcconfig = super() + merge_spec_xcconfig_into_xcconfig(merged_pod_target_xcconfigs, xcconfig) + end + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [String] + define_build_settings_method :pods_root, :build_setting => true do + '${SRCROOT}' + end + + # @return [String] + define_build_settings_method :pods_target_srcroot, :build_setting => true do + target.pod_target_srcroot + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :consumer_frameworks, :memoized => true do + spec_consumers.flat_map(&:frameworks) + end + + # @return [Array] + define_build_settings_method :frameworks, :memoized => true, :sorted => true, :uniqued => true do + return [] if target.build_as_static? && library_xcconfig? + + frameworks = [] + frameworks.concat consumer_frameworks + if library_xcconfig? + # We know that this library target is being built dynamically based + # on the guard above, so include any vendored static frameworks and vendored xcframeworks. + if target.should_build? + frameworks.concat vendored_static_frameworks.map { |l| File.basename(l, '.framework') } + frameworks.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_framework? }. + map(&:name). + uniq + + # Include direct dynamic dependencies to the linker flags. We used to add those in the 'Link Binary With Libraries' + # phase but we no longer do since we cannot differentiate between debug or release configurations within + # that phase. + frameworks.concat target.dependent_targets_by_config[@configuration].flat_map { |pt| pt.build_settings[@configuration].dynamic_frameworks_to_import } + else + # Also include any vendored dynamic frameworks of dependencies. + frameworks.concat dependent_targets.reject(&:should_build?).flat_map { |pt| pt.build_settings[@configuration].dynamic_frameworks_to_import } + end + else + frameworks.concat dependent_targets_to_link.flat_map { |pt| pt.build_settings[@configuration].frameworks_to_import } + end + + frameworks + end + + # @return [Array] + define_build_settings_method :static_frameworks_to_import, :memoized => true do + static_frameworks_to_import = [] + static_frameworks_to_import.concat vendored_static_frameworks.map { |f| File.basename(f, '.framework') } unless target.should_build? && target.build_as_dynamic? + unless target.should_build? && target.build_as_dynamic? + static_frameworks_to_import.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_framework? }. + map(&:name). + uniq + end + static_frameworks_to_import << target.product_basename if target.should_build? && target.build_as_static_framework? + static_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :dynamic_frameworks_to_import, :memoized => true do + dynamic_frameworks_to_import = vendored_dynamic_frameworks.map { |f| File.basename(f, '.framework') } + dynamic_frameworks_to_import.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.dynamic_framework? }. + map(&:name). + uniq + dynamic_frameworks_to_import << target.product_basename if target.should_build? && target.build_as_dynamic_framework? + dynamic_frameworks_to_import.concat consumer_frameworks + dynamic_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :weak_frameworks, :memoized => true do + return [] if target.build_as_static? && library_xcconfig? + + weak_frameworks = spec_consumers.flat_map(&:weak_frameworks) + weak_frameworks.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].weak_frameworks_to_import } + weak_frameworks + end + + # @return [Array] + define_build_settings_method :frameworks_to_import, :memoized => true, :sorted => true, :uniqued => true do + static_frameworks_to_import + dynamic_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :weak_frameworks_to_import, :memoized => true, :sorted => true, :uniqued => true do + spec_consumers.flat_map(&:weak_frameworks) + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = super().dup + paths.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].framework_search_paths_to_import } + paths.concat framework_search_paths_to_import + paths.delete(target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)) if library_xcconfig? + paths + end + + # @return [String] + define_build_settings_method :framework_header_search_path, :memoized => true do + return unless target.build_as_framework? + "#{target.build_product_path}/Headers" + end + + # @return [Array] + define_build_settings_method :vendored_framework_search_paths, :memoized => true do + search_paths = [] + search_paths.concat file_accessors. + flat_map(&:vendored_frameworks). + map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + xcframework_intermediates = vendored_xcframeworks. + select { |xcf| xcf.build_type.framework? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) }. + uniq + search_paths.concat xcframework_intermediates + search_paths + end + + # @return [Array] + define_build_settings_method :framework_search_paths_to_import, :memoized => true do + paths = framework_search_paths_to_import_developer_frameworks(consumer_frameworks) + paths.concat vendored_framework_search_paths + return paths unless target.build_as_framework? && target.should_build? + + paths + [target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)] + end + + # @return [Array] + define_build_settings_method :vendored_static_frameworks, :memoized => true do + file_accessors.flat_map(&:vendored_static_frameworks) + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_frameworks, :memoized => true do + file_accessors.flat_map(&:vendored_dynamic_frameworks) + end + + # @return [Array] + define_build_settings_method :vendored_xcframeworks, :memoized => true do + file_accessors.flat_map do |file_accessor| + file_accessor.vendored_xcframeworks.map { |path| load_xcframework(file_accessor.spec.name, path) } + end + end + + # @return [Array] + define_build_settings_method :system_framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + return ['$(PLATFORM_DIR)/Developer/Library/Frameworks'] if should_apply_xctunwrap_fix? + [] + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # Converts array of library path references to just the names to use + # link each library, e.g. from '/path/to/libSomething.a' to 'Something' + # + # @param [Array] libraries + # + # @return [Array] + # + def linker_names_from_libraries(libraries) + libraries.map { |l| File.basename(l, File.extname(l)).sub(/\Alib/, '') } + end + + # @return [Array] + define_build_settings_method :libraries, :memoized => true, :sorted => true, :uniqued => true do + return [] if library_xcconfig? && target.build_as_static? + + libraries = [] + if non_library_xcconfig? || target.build_as_dynamic? + libraries.concat linker_names_from_libraries(vendored_static_libraries) + libraries.concat libraries_to_import + xcframework_libraries = vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + flat_map { |xcf| linker_names_from_libraries([xcf.slices.first.binary_path]) }. + uniq + libraries.concat xcframework_libraries + end + if non_library_xcconfig? + libraries.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].dynamic_libraries_to_import } + libraries.concat dependent_targets_to_link.flat_map { |pt| pt.build_settings[@configuration].static_libraries_to_import } + end + libraries + end + + # @return [Array] + define_build_settings_method :static_libraries_to_import, :memoized => true do + static_libraries_to_import = [] + unless target.should_build? && target.build_as_dynamic? + static_libraries_to_import.concat linker_names_from_libraries(vendored_static_libraries) + xcframework_libraries = vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + flat_map { |xcf| linker_names_from_libraries([xcf.slices.first.binary_path]) }. + uniq + static_libraries_to_import.concat linker_names_from_libraries(xcframework_libraries) + end + static_libraries_to_import << target.product_basename if target.should_build? && target.build_as_static_library? + static_libraries_to_import + end + + # @return [Array] + define_build_settings_method :dynamic_libraries_to_import, :memoized => true do + dynamic_libraries_to_import = linker_names_from_libraries(vendored_dynamic_libraries) + dynamic_libraries_to_import.concat spec_consumers.flat_map(&:libraries) + dynamic_libraries_to_import << target.product_basename if target.should_build? && target.build_as_dynamic_library? + dynamic_libraries_to_import + end + + # @return [Array] + define_build_settings_method :libraries_to_import, :memoized => true, :sorted => true, :uniqued => true do + static_libraries_to_import + dynamic_libraries_to_import + end + + # @return [Array] + define_build_settings_method :library_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + library_search_paths = should_apply_xctunwrap_fix? ? ['$(PLATFORM_DIR)/Developer/usr/lib'] : [] + return library_search_paths if library_xcconfig? && target.build_as_static? + + library_search_paths.concat library_search_paths_to_import.dup + library_search_paths.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_dynamic_library_search_paths } + if library_xcconfig? + library_search_paths.delete(target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)) + else + library_search_paths.concat(dependent_targets.flat_map { |pt| pt.build_settings[@configuration].library_search_paths_to_import }) + end + + library_search_paths + end + + # @return [Array] + define_build_settings_method :vendored_static_libraries, :memoized => true do + file_accessors.flat_map(&:vendored_static_libraries) + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_libraries, :memoized => true do + file_accessors.flat_map(&:vendored_dynamic_libraries) + end + + # @return [Array] + define_build_settings_method :vendored_static_library_search_paths, :memoized => true do + paths = vendored_static_libraries.map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + paths.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) } + paths + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_library_search_paths, :memoized => true do + paths = vendored_dynamic_libraries.map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + paths.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.dynamic_library? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) } + paths + end + + # @return [Array] + define_build_settings_method :library_search_paths_to_import, :memoized => true do + search_paths = vendored_static_library_search_paths + vendored_dynamic_library_search_paths + if target.uses_swift? || other_swift_flags_without_swift? + search_paths << '/usr/lib/swift' + search_paths << '${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}' + search_paths << '$(PLATFORM_DIR)/Developer/Library/Frameworks' if test_xcconfig? + end + return search_paths if target.build_as_framework? || !target.should_build? + + search_paths << target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE) + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :module_map_files, :memoized => true do + dependent_targets.map { |pt| pt.build_settings[@configuration].module_map_file_to_import }.compact.sort + end + + # @return [Array] + define_build_settings_method :module_map_file_to_import, :memoized => true do + return unless target.should_build? + return if target.build_as_framework? # framework module maps are automatically discovered + return unless target.defines_module? + + if target.uses_swift? + # for swift, we have a custom build phase that copies in the module map, appending the .Swift module + "${PODS_CONFIGURATION_BUILD_DIR}/#{target.label}/#{target.product_module_name}.modulemap" + else + "${PODS_ROOT}/#{target.module_map_path.relative_path_from(target.sandbox.root)}" + end + end + + # @return [Array] + define_build_settings_method :header_search_paths, :build_setting => true, :memoized => true, :sorted => true do + paths = target.header_search_paths(:include_dependent_targets_for_test_spec => test_xcconfig? && non_library_spec, :include_dependent_targets_for_app_spec => app_xcconfig? && non_library_spec, :configuration => @configuration) + + dependent_vendored_xcframeworks = [] + dependent_vendored_xcframeworks.concat vendored_xcframeworks + dependent_vendored_xcframeworks.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_xcframeworks } + paths.concat dependent_vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| "#{BuildSettings.xcframework_intermediate_dir(xcf)}/Headers" }. + compact + paths + end + + # @return [Array] + define_build_settings_method :public_header_search_paths, :memoized => true, :sorted => true do + target.header_search_paths(:include_dependent_targets_for_test_spec => test_xcconfig? && non_library_spec, :include_dependent_targets_for_app_spec => app_xcconfig? && non_library_spec, :include_private_headers => false, :configuration => @configuration) + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @see BuildSettings#other_swift_flags_without_swift? + def other_swift_flags_without_swift? + return false if library_xcconfig? + + target.uses_swift_for_spec?(non_library_spec) + end + + # @return [Array] + define_build_settings_method :other_swift_flags, :build_setting => true, :memoized => true do + return unless target.uses_swift? || other_swift_flags_without_swift? + + flags = super() + flags << '-suppress-warnings' if target.inhibit_warnings? && library_xcconfig? + if !target.build_as_framework? && target.defines_module? && library_xcconfig? + flags.concat %w( -import-underlying-module -Xcc -fmodule-map-file=${SRCROOT}/${MODULEMAP_FILE} ) + end + flags + end + + # @return [Array] + define_build_settings_method :swift_include_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = dependent_targets.flat_map { |pt| pt.build_settings[@configuration].swift_include_paths_to_import } + paths.concat swift_include_paths_to_import if non_library_xcconfig? + paths.concat ['$(PLATFORM_DIR)/Developer/usr/lib'] if should_apply_xctunwrap_fix? + paths + end + + # @return [Array] + define_build_settings_method :swift_include_paths_to_import, :memoized => true do + return [] unless target.uses_swift? && !target.build_as_framework? + + [target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)] + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Boolean] whether the `-ObjC` linker flag is required. + # + # @note this is only true when generating build settings for a test bundle + # + def requires_objc_linker_flag? + test_xcconfig? || app_xcconfig? + end + + # @return [Boolean] whether the `-fobjc-arc` linker flag is required. + # + define_build_settings_method :requires_fobjc_arc?, :memoized => true do + target.podfile.set_arc_compatibility_flag? && + file_accessors.any? { |fa| fa.spec_consumer.requires_arc? } + end + + # @return [Array] + define_build_settings_method :ld_runpath_search_paths, :build_setting => true, :memoized => true do + return if library_xcconfig? + _ld_runpath_search_paths(:test_bundle => test_xcconfig?, + :uses_swift => other_swift_flags_without_swift? || dependent_targets.any?(&:uses_swift?)) + end + + #-------------------------------------------------------------------------# + + # @!group Packaging + + # @return [String] + define_build_settings_method :skip_install, :build_setting => true do + 'YES' + end + + # @return [String] + define_build_settings_method :product_bundle_identifier, :build_setting => true do + 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + end + + # @return [String] + define_build_settings_method :configuration_build_dir, :build_setting => true, :memoized => true do + return if non_library_xcconfig? + target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE) + end + + # @return [String] + define_build_settings_method :application_extension_api_only, :build_setting => true, :memoized => true do + target.application_extension_api_only ? 'YES' : nil + end + + # @return [String] + define_build_settings_method :build_library_for_distribution, :build_setting => true, :memoized => true do + target.build_library_for_distribution ? 'YES' : nil + end + + #-------------------------------------------------------------------------# + + # @!group Target Properties + + # @return [Array] + define_build_settings_method :dependent_targets, :memoized => true do + select_maximal_pod_targets( + if test_xcconfig? + target.dependent_targets_for_test_spec(non_library_spec, :configuration => @configuration) + elsif app_xcconfig? + target.dependent_targets_for_app_spec(non_library_spec, :configuration => @configuration) + else + target.recursive_dependent_targets(:configuration => @configuration) + end, + ) + end + + # @return [Array] + define_build_settings_method :dependent_targets_to_link, :memoized => true do + if test_xcconfig? + # we're embedding into an app defined by an app spec + host_targets = target.app_host_dependent_targets_for_spec(non_library_spec, :configuration => @configuration) + dependent_targets - host_targets + else + dependent_targets + end + end + + # Returns the +pod_target_xcconfig+ for the pod target and its spec + # consumers grouped by keys + # + # @return [Hash{String,Hash{Target,String}] + # + def pod_target_xcconfig_values_by_consumer_by_key + spec_consumers.each_with_object({}) do |spec_consumer, hash| + spec_consumer.pod_target_xcconfig.each do |k, v| + (hash[k] ||= {})[spec_consumer] = v + end + end + end + + # Merges the +pod_target_xcconfig+ for all pod targets into a + # single hash and warns on conflicting definitions. + # + # @return [Hash{String, String}] + # + define_build_settings_method :merged_pod_target_xcconfigs, :memoized => true do + merged_xcconfigs(pod_target_xcconfig_values_by_consumer_by_key, :pod_target_xcconfig, + :overriding => non_library_xcconfig? ? target.build_settings[@configuration].merged_pod_target_xcconfigs : {}) + end + + # @return [Array] + define_build_settings_method :file_accessors, :memoized => true do + if non_library_xcconfig? + target.file_accessors.select { |fa| non_library_spec == fa.spec } + else + target.file_accessors.select { |fa| fa.spec.spec_type == @xcconfig_spec_type } + end + end + + # @return [Array] + define_build_settings_method :spec_consumers, :memoized => true do + if non_library_xcconfig? + target.spec_consumers.select { |sc| non_library_spec == sc.spec } + else + target.spec_consumers.select { |sc| sc.spec.spec_type == @xcconfig_spec_type } + end + end + + # Xcode 11 causes an issue with frameworks or libraries before 12.2 deployment target that link or are part of + # test bundles that use XCTUnwrap. Apple has provided an official work around for this. + # + # @see https://developer.apple.com/documentation/xcode_release_notes/xcode_11_release_notes + # + # @return [Boolean] Whether to apply the fix or not. + # + define_build_settings_method :should_apply_xctunwrap_fix?, :memoized => true do + library_xcconfig? && + target.platform.name == :ios && + Version.new(target.platform.deployment_target) < Version.new('12.2') && + (frameworks_to_import + weak_frameworks_to_import).uniq.include?('XCTest') + end + + #-------------------------------------------------------------------------# + end + + # A subclass that generates build settings for a `PodTarget` + class AggregateTargetSettings < BuildSettings + #-------------------------------------------------------------------------# + + # @!group Public API + + # @see BuildSettings.build_settings_names + def self.build_settings_names + @build_settings_names | BuildSettings.build_settings_names + end + + # @return [Symbol] + # The build configuration these settings will be used for + attr_reader :configuration_name + + # Initializes a new instance + # + # @param [AggregateTarget] target + # see {#target} + # + # @param [Symbol] configuration_name + # see {#configuration_name} + # + def initialize(target, configuration_name, configuration: nil) + super(target) + @configuration_name = configuration_name + (@configuration = configuration) || raise("No configuration for #{self}.") + end + + # @return [Xcodeproj::Config] xcconfig + define_build_settings_method :xcconfig, :memoized => true do + xcconfig = super() + merge_spec_xcconfig_into_xcconfig(merged_user_target_xcconfigs, xcconfig) + end + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [String] + define_build_settings_method :pods_podfile_dir_path, :build_setting => true, :memoized => true do + target.podfile_dir_relative_path + end + + # @return [String] + define_build_settings_method :pods_root, :build_setting => true, :memoized => true do + target.relative_pods_root + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :frameworks, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :dynamic_frameworks_to_import do + [] + end + + # @return [Array] + define_build_settings_method :weak_frameworks, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :weak_frameworks do + [] + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :framework_search_paths_to_import do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # @return [Array] + define_build_settings_method :libraries, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :dynamic_libraries_to_import do + [] + end + + # @return [Array] + define_build_settings_method :library_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :vendored_dynamic_library_search_paths do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :header_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = [] + + if !target.build_as_framework? || !pod_targets.all?(&:should_build?) + paths.concat target.sandbox.public_headers.search_paths(target.platform) + end + + # Make frameworks headers discoverable with any syntax (quotes, + # brackets, @import, etc.) + paths.concat pod_targets. + select { |pt| pt.build_as_framework? && pt.should_build? }. + map { |pt| pt.build_settings[@configuration].framework_header_search_path } + + xcframework_library_headers = pod_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_xcframeworks }. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| "#{BuildSettings.xcframework_intermediate_dir(xcf)}/Headers" }. + compact + + paths.concat xcframework_library_headers + + paths.concat target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).header_search_paths } + + paths + end + + # @return [Array] + define_build_settings_method :other_cflags, :build_setting => true, :memoized => true do + flags = super() + + pod_targets_inhibiting_warnings = pod_targets.select(&:inhibit_warnings?) + + silenced_headers = [] + silenced_frameworks = [] + pod_targets_inhibiting_warnings.each do |pt| + if pt.build_as_framework? && pt.should_build? + silenced_headers.append pt.build_settings[@configuration].framework_header_search_path + else + silenced_headers.concat pt.build_settings[@configuration].public_header_search_paths + end + silenced_frameworks.concat pt.build_settings[@configuration].framework_search_paths_to_import + end + + flags += silenced_headers.uniq.flat_map { |p| ['-isystem', p] } + flags += silenced_frameworks.uniq.flat_map { |p| ['-iframework', p] } + + flags + end + + # @return [Array] + define_build_settings_method :module_map_files, :memoized => true, :sorted => true, :uniqued => true, :compacted => true, :from_search_paths_aggregate_targets => :module_map_file_to_import do + pod_targets.map { |pt| pt.build_settings[@configuration].module_map_file_to_import } + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @see BuildSettings#other_swift_flags_without_swift? + def other_swift_flags_without_swift? + module_map_files.any? + end + + # @return [Array] + define_build_settings_method :swift_include_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :swift_include_paths_to_import do + [] + end + + # @return [String] + define_build_settings_method :always_embed_swift_standard_libraries, :build_setting => true, :memoized => true do + return unless must_embed_swift? + return if target_swift_version < EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + 'YES' + end + + # @return [String] + define_build_settings_method :embedded_content_contains_swift, :build_setting => true, :memoized => true do + return unless must_embed_swift? + return if target_swift_version >= EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + 'YES' + end + + # @return [Boolean] + define_build_settings_method :must_embed_swift?, :memoized => true do + !target.requires_host_target? && pod_targets.any?(&:uses_swift?) + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Array] + define_build_settings_method :ld_runpath_search_paths, :build_setting => true, :memoized => true, :uniqued => true do + return unless pod_targets.any?(&:build_as_dynamic?) || any_vendored_dynamic_artifacts? + symbol_type = target.user_targets.map(&:symbol_type).uniq.first + test_bundle = symbol_type == :octest_bundle || symbol_type == :unit_test_bundle || symbol_type == :ui_test_bundle + _ld_runpath_search_paths(:requires_host_target => target.requires_host_target?, :test_bundle => test_bundle, + :uses_swift => pod_targets.any?(&:uses_swift?)) + end + + # @return [Boolean] + define_build_settings_method :any_vendored_dynamic_artifacts?, :memoized => true do + pod_targets.any? do |pt| + pt.file_accessors.any? do |fa| + !fa.vendored_dynamic_artifacts.empty? + end + end + end + + # @return [Boolean] + define_build_settings_method :any_vendored_static_artifacts?, :memoized => true do + pod_targets.any? do |pt| + pt.file_accessors.any? do |fa| + !fa.vendored_static_artifacts.empty? + end + end + end + + # @return [Boolean] + define_build_settings_method :requires_objc_linker_flag?, :memoized => true do + pod_targets.any?(&:build_as_static?) || + any_vendored_static_artifacts? + end + + # @return [Boolean] + define_build_settings_method :requires_fobjc_arc?, :memoized => true do + target.podfile.set_arc_compatibility_flag? && + target.spec_consumers.any?(&:requires_arc?) + end + + #-------------------------------------------------------------------------# + + # @!group Target Properties + + # @return [Version] the SWIFT_VERSION of the target being integrated + # + define_build_settings_method :target_swift_version, :memoized => true, :frozen => false do + swift_version = target.target_definition.swift_version + swift_version = nil if swift_version.blank? + Version.new(swift_version) + end + + EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION = Version.new('2.3') + private_constant :EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + # Returns the {PodTarget}s which are active for the current + # configuration name. + # + # @return [Array] + # + define_build_settings_method :pod_targets, :memoized => true do + target.pod_targets_for_build_configuration(configuration_name) + end + + # @return [Array] + define_build_settings_method :pod_targets_to_link, :memoized => true do + pod_targets - + target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).pod_targets_to_link } + end + + # @return [Array] + define_build_settings_method :search_paths_aggregate_target_pod_target_build_settings, :memoized => true, :uniqued => true do + pod_targets = target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).pod_targets } + pod_targets = select_maximal_pod_targets(pod_targets) + pod_targets.map { |pt| pt.build_settings[@configuration] } + end + + # Returns the +user_target_xcconfig+ for all pod targets and their spec + # consumers grouped by keys + # + # @return [Hash{String,Hash{Target,String}] + # + def user_target_xcconfig_values_by_consumer_by_key + targets = (pod_targets + target.search_paths_aggregate_targets.flat_map(&:pod_targets)).uniq + targets.each_with_object({}) do |target, hash| + target.spec_consumers.each do |spec_consumer| + spec_consumer.user_target_xcconfig.each do |k, v| + # TODO: Need to decide how we are going to ensure settings like these + # are always excluded from the user's project. + # + # See https://github.com/CocoaPods/CocoaPods/issues/1216 + next if k == 'USE_HEADERMAP' + (hash[k] ||= {})[spec_consumer] = v + end + end + end + end + + # Merges the +user_target_xcconfig+ for all pod targets into a + # single hash and warns on conflicting definitions. + # + # @return [Hash{String, String}] + # + define_build_settings_method :merged_user_target_xcconfigs, :memoized => true do + merged_xcconfigs(user_target_xcconfig_values_by_consumer_by_key, :user_target_xcconfig) + end + + #-------------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/pod_target.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/pod_target.rb new file mode 100644 index 0000000..503e143 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/target/pod_target.rb @@ -0,0 +1,1168 @@ +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/xcode/xcframework' + +module Pod + # Stores the information relative to the target used to compile a single Pod. + # A pod can have one or more activated spec, subspecs and test specs. + # + class PodTarget < Target + # @return [Array] the spec, subspecs and test specs of the target. + # + attr_reader :specs + + # @return [Array] All of the test specs within this target. + # Subset of #specs. + # + attr_reader :test_specs + + # @return [Array] All of the specs within this target that are library specs. + # Subset of #specs. + # + attr_reader :library_specs + + # @return [Array] All of the specs within this target that are app specs. + # Subset of #specs. + # + attr_reader :app_specs + + # @return [Array] the target definitions of the Podfile + # that generated this target. + # + attr_reader :target_definitions + + # @return [Array] the file accessors for the + # specifications of this target. + # + attr_reader :file_accessors + + # @return [String] the suffix used for this target when deduplicated. May be `nil`. + # + # @note This affects the value returned by #configuration_build_dir + # and accessors relying on this as #build_product_path. + # + attr_reader :scope_suffix + + # @return [HeadersStore] the header directory for the target. + # + attr_reader :build_headers + + # @return [Array] the targets that this target has a dependency + # upon. + # + attr_reader :dependent_targets + attr_reader :dependent_targets_by_config + + # @deprecated + def dependent_targets=(dependent_targets) + @dependent_targets = dependent_targets + @dependent_targets_by_config = { :debug => dependent_targets, :release => dependent_targets } + end + + def dependent_targets_by_config=(dependent_targets_by_config) + @dependent_targets_by_config = dependent_targets_by_config + @dependent_targets = dependent_targets_by_config.each_value.reduce([], &:|) + end + + # @return [Hash{String=>Array}] all target dependencies by test spec name. + # + attr_reader :test_dependent_targets_by_spec_name + attr_reader :test_dependent_targets_by_spec_name_by_config + + # @deprecated + def test_dependent_targets_by_spec_name=(test_dependent_targets_by_spec_name) + @test_dependent_targets_by_spec_name = test_dependent_targets_by_spec_name + @test_dependent_targets_by_spec_name_by_config = Hash[test_dependent_targets_by_spec_name.map do |k, v| + [k, { :debug => v, :release => v }] + end] + end + + def test_dependent_targets_by_spec_name_by_config=(test_dependent_targets_by_spec_name_by_config) + @test_dependent_targets_by_spec_name_by_config = test_dependent_targets_by_spec_name_by_config + @test_dependent_targets_by_spec_name = Hash[test_dependent_targets_by_spec_name_by_config.map do |k, v| + [k, v.each_value.reduce(Set.new, &:|).to_a] + end] + end + + # @return [Hash{String=>Array}] all target dependencies by app spec name. + # + attr_reader :app_dependent_targets_by_spec_name + attr_reader :app_dependent_targets_by_spec_name_by_config + + # @deprecated + def app_dependent_targets_by_spec_name=(app_dependent_targets_by_spec_name) + @app_dependent_targets_by_spec_name = app_dependent_targets_by_spec_name + @app_dependent_targets_by_spec_name_by_config = Hash[app_dependent_targets_by_spec_name.map do |k, v| + [k, { :debug => v, :release => v }] + end] + end + + def app_dependent_targets_by_spec_name_by_config=(app_dependent_targets_by_spec_name_by_config) + @app_dependent_targets_by_spec_name_by_config = app_dependent_targets_by_spec_name_by_config + @app_dependent_targets_by_spec_name = Hash[app_dependent_targets_by_spec_name_by_config.map do |k, v| + [k, v.each_value.reduce(Set.new, &:|).to_a] + end] + end + + # @return [Hash{Specification => (Specification,PodTarget)}] tuples of app specs and pod targets by test spec. + # + attr_accessor :test_app_hosts_by_spec + + # @return [Hash{String => BuildSettings}] the test spec build settings for this target. + # + attr_reader :test_spec_build_settings + attr_reader :test_spec_build_settings_by_config + + # @return [Hash{String => BuildSettings}] the app spec build settings for this target. + # + attr_reader :app_spec_build_settings + attr_reader :app_spec_build_settings_by_config + + # @return [String] the Swift version for this target. + # + attr_reader :swift_version + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see Target#sandbox + # @param [BuildType] build_type @see Target#build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see Target#user_build_configurations + # @param [Array] archs @see Target#archs + # @param [Platform] platform @see Target#platform + # @param [Array] specs @see #specs + # @param [Array] target_definitions @see #target_definitions + # @param [Array] file_accessors @see #file_accessors + # @param [String] scope_suffix @see #scope_suffix + # @param [String] swift_version @see #swift_version + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform, specs, target_definitions, + file_accessors = [], scope_suffix = nil, swift_version = nil) + super(sandbox, build_type, user_build_configurations, archs, platform) + raise "Can't initialize a PodTarget without specs!" if specs.nil? || specs.empty? + raise "Can't initialize a PodTarget without TargetDefinition!" if target_definitions.nil? || target_definitions.empty? + raise "Can't initialize a PodTarget with an empty string scope suffix!" if scope_suffix == '' + @specs = specs.dup.freeze + @target_definitions = target_definitions + @file_accessors = file_accessors + @scope_suffix = scope_suffix + @swift_version = swift_version + all_specs_by_type = @specs.group_by(&:spec_type) + @library_specs = all_specs_by_type[:library] || [] + @test_specs = all_specs_by_type[:test] || [] + @app_specs = all_specs_by_type[:app] || [] + @build_headers = Sandbox::HeadersStore.new(sandbox, 'Private', :private) + self.dependent_targets = [] + self.test_dependent_targets_by_spec_name = Hash[test_specs.map { |ts| [ts.name, []] }] + self.app_dependent_targets_by_spec_name = Hash[app_specs.map { |as| [as.name, []] }] + @test_app_hosts_by_spec = {} + @build_config_cache = {} + @test_spec_build_settings_by_config = create_test_build_settings_by_config + @app_spec_build_settings_by_config = create_app_build_settings_by_config + end + + # Scopes the current target based on the existing pod targets within the cache. + # + # @param [Hash{Array => PodTarget}] cache + # the cached target for a previously scoped target. + # + # @return [Array] a scoped copy for each target definition. + # + def scoped(cache = {}) + target_definitions.map do |target_definition| + cache_key = [specs, target_definition] + cache[cache_key] ||= begin + target = PodTarget.new(sandbox, build_type, user_build_configurations, archs, platform, specs, + [target_definition], file_accessors, target_definition.label, swift_version) + scope_dependent_targets = ->(dependent_targets) do + dependent_targets.flat_map do |pod_target| + pod_target.scoped(cache).select { |pt| pt.target_definitions == [target_definition] } + end + end + + target.dependent_targets_by_config = Hash[dependent_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }] + target.test_dependent_targets_by_spec_name_by_config = Hash[test_dependent_targets_by_spec_name_by_config.map do |spec_name, test_pod_targets_by_config| + [spec_name, Hash[test_pod_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }]] + end] + target.app_dependent_targets_by_spec_name_by_config = Hash[app_dependent_targets_by_spec_name_by_config.map do |spec_name, app_pod_targets_by_config| + [spec_name, Hash[app_pod_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }]] + end] + target.test_app_hosts_by_spec = Hash[test_app_hosts_by_spec.map do |spec, (app_host_spec, app_pod_target)| + [spec, [app_host_spec, app_pod_target.scoped(cache).find { |pt| pt.target_definitions == [target_definition] }]] + end] + target + end + end + end + + # @return [String] the label for the target. + # + def label + if scope_suffix.nil? || scope_suffix[0] == '.' + "#{root_spec.name}#{scope_suffix}" + else + "#{root_spec.name}-#{scope_suffix}" + end + end + + # @return [Array] The list of all files tracked. + # + def all_files + Sandbox::FileAccessor.all_files(file_accessors) + end + + # @return [Pathname] the pathname for headers in the sandbox. + # + def headers_sandbox + Pathname.new(pod_name) + end + + # @return [Hash{FileAccessor => Hash}] Hash of file accessors by header mappings. + # + def header_mappings_by_file_accessor + valid_accessors = file_accessors.reject { |fa| fa.spec.non_library_specification? } + Hash[valid_accessors.map do |file_accessor| + # Private headers will always end up in Pods/Headers/Private/PodA/*.h + # This will allow for `""` imports to work. + [file_accessor, header_mappings(file_accessor, file_accessor.headers)] + end] + end + + # @return [Hash{FileAccessor => Hash}] Hash of file accessors by public header mappings. + # + def public_header_mappings_by_file_accessor + valid_accessors = file_accessors.reject { |fa| fa.spec.non_library_specification? } + Hash[valid_accessors.map do |file_accessor| + # Public headers on the other hand will be added in Pods/Headers/Public/PodA/PodA/*.h + # The extra folder is intentional in order for `<>` imports to work. + [file_accessor, header_mappings(file_accessor, file_accessor.public_headers)] + end] + end + + # @return [Array] the Swift versions supported. Might be empty if the author has not + # specified any versions, most likely due to legacy reasons. + # + def spec_swift_versions + root_spec.swift_versions + end + + # @return [Podfile] The podfile which declares the dependency. + # + def podfile + target_definitions.first.podfile + end + + # @return [String] the project name derived from the target definitions that integrate this pod. If none is + # specified then the name of the pod is used by default. + # + # @note The name is guaranteed to be the same across all target definitions and is validated by the target + # validator during installation. + # + def project_name + target_definitions.map { |td| td.project_name_for_pod(pod_name) }.compact.first || pod_name + end + + # @return [String] The name to use for the source code module constructed + # for this target, and which will be used to import the module in + # implementation source files. + # + def product_module_name + root_spec.module_name + end + + # @param [Specification] spec the specification + # + # @return [String] the product basename of the specification's target + def product_basename_for_spec(spec) + user_specified = build_settings_by_config_for_spec(spec). + each_value. + map { |settings| settings.merged_pod_target_xcconfigs['PRODUCT_NAME'] }. + compact. + uniq + + if user_specified.size == 1 + user_specified.first + else + spec_label(spec) + end + end + + # @return [Bool] Whether or not this target should be built. + # + # A target should not be built if it has no source files. + # + def should_build? + return @should_build if defined? @should_build + accessors = file_accessors.select { |fa| fa.spec.library_specification? } + source_files = accessors.flat_map(&:source_files) + source_files -= accessors.flat_map(&:headers) + @should_build = !source_files.empty? + end + + # @return [Array] the specification consumers for + # the target. + # + def spec_consumers + specs.map { |spec| spec.consumer(platform) } + end + + # @return [Array] the test specification consumers for + # the target. + # + def test_spec_consumers + test_specs.map { |test_spec| test_spec.consumer(platform) } + end + + # @return [Array] the app specification consumers for + # the target. + # + def app_spec_consumers + app_specs.map { |app_spec| app_spec.consumer(platform) } + end + + # Checks whether the target itself plus its specs uses Swift code. + # This check excludes source files from non library specs. + # Note that if a target does not need to be built (no source code), + # we fallback to check whether it indicates a swift version. + # + # @return [Boolean] Whether the target uses Swift code. + # + def uses_swift? + return @uses_swift if defined? @uses_swift + @uses_swift = (!should_build? && !spec_swift_versions.empty?) || + file_accessors.select { |a| a.spec.library_specification? }.any? do |file_accessor| + uses_swift_for_spec?(file_accessor.spec) + end + end + + # Checks whether a specification uses Swift or not. + # + # @param [Specification] spec + # The spec to query against. + # + # @return [Boolean] Whether the target uses Swift code within the requested non library spec. + # + def uses_swift_for_spec?(spec) + @uses_swift_for_spec_cache ||= {} + return @uses_swift_for_spec_cache[spec.name] if @uses_swift_for_spec_cache.key?(spec.name) + @uses_swift_for_spec_cache[spec.name] = begin + file_accessor = file_accessors.find { |fa| fa.spec == spec } + raise "[Bug] Unable to find file accessor for spec `#{spec.inspect}` in pod target `#{label}`" unless file_accessor + file_accessor.source_files.any? { |sf| sf.extname == '.swift' } + end + end + + # @return [Boolean] Whether the target defines a "module" + # (and thus will need a module map and umbrella header). + # + # @note Static library targets can temporarily opt in to this behavior by setting + # `DEFINES_MODULE = YES` in their specification's `pod_target_xcconfig`. + # + def defines_module? + return @defines_module if defined?(@defines_module) + return @defines_module = true if uses_swift? || build_as_framework? + + explicit_target_definitions = target_definitions.select { |td| td.dependencies.any? { |d| d.root_name == pod_name } } + tds_by_answer = explicit_target_definitions.group_by { |td| td.build_pod_as_module?(pod_name) } + + if tds_by_answer.size > 1 + UI.warn "Unable to determine whether to build `#{label}` as a module due to a conflict " \ + "between the following target definitions:\n\t- #{tds_by_answer.map do |a, td| + "`#{td.to_sentence}` #{a ? "requires `#{label}` as a module" : "does not require `#{label}` as a module"}" + end.join("\n\t- ")}\n\n" \ + "Defaulting to skip building `#{label}` as a module." + elsif tds_by_answer.keys.first == true || target_definitions.all? { |td| td.build_pod_as_module?(pod_name) } + return @defines_module = true + end + + @defines_module = library_specs.any? { |s| s.consumer(platform).pod_target_xcconfig['DEFINES_MODULE'] == 'YES' } + end + + # @return [ArrayString}>] An array of hashes where each hash represents a single script phase. + # + def script_phases + spec_consumers.flat_map(&:script_phases) + end + + # @return [Boolean] Whether the target contains any script phases. + # + def contains_script_phases? + !script_phases.empty? + end + + # @return [Boolean] Whether the target has any tests specifications. + # + def contains_test_specifications? + !test_specs.empty? + end + + # @return [Boolean] Whether the target has any app specifications. + # + def contains_app_specifications? + !app_specs.empty? + end + + # @return [Hash{String=>Array}] The vendored and non vendored framework paths this target + # depends upon keyed by spec name. For the root spec and subspecs the framework path of the target itself + # is included. + # + def framework_paths + @framework_paths ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + frameworks = file_accessor.vendored_dynamic_artifacts.map do |framework_path| + relative_path_to_sandbox = framework_path.relative_path_from(sandbox.root) + framework_source = "${PODS_ROOT}/#{relative_path_to_sandbox}" + # Until this can be configured, assume the dSYM file uses the file name as the framework. + # See https://github.com/CocoaPods/CocoaPods/issues/1698 + dsym_name = "#{framework_path.basename}.dSYM" + dsym_path = Pathname.new("#{framework_path.dirname}/#{dsym_name}") + dsym_source = if dsym_path.exist? + "${PODS_ROOT}/#{relative_path_to_sandbox}.dSYM" + end + dirname = framework_path.dirname + bcsymbolmap_paths = if dirname.exist? + Dir.chdir(dirname) do + Dir.glob('*.bcsymbolmap').map do |bcsymbolmap_file_name| + bcsymbolmap_path = dirname + bcsymbolmap_file_name + "${PODS_ROOT}/#{bcsymbolmap_path.relative_path_from(sandbox.root)}" + end + end + end + Xcode::FrameworkPaths.new(framework_source, dsym_source, bcsymbolmap_paths) + end + if file_accessor.spec.library_specification? && should_build? && build_as_dynamic_framework? + frameworks << Xcode::FrameworkPaths.new(build_product_path('${BUILT_PRODUCTS_DIR}')) + end + hash[file_accessor.spec.name] = frameworks + end + end + end + + # @return [Hash{String=>Array}] The vendored xcframeworks this target + # depends upon keyed by spec name. + # + def xcframeworks + @xcframeworks ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + frameworks = file_accessor.vendored_xcframeworks.map do |framework_path| + Xcode::XCFramework.new(file_accessor.spec.name, framework_path) + end + hash[file_accessor.spec.name] = frameworks + end + end + end + + # @return [Hash{String=>Array}] The resource and resource bundle paths this target depends upon keyed by + # spec name. Resource (not resource bundles) paths can vary depending on the type of spec: + # - App specs _always_ get their resource paths added to "Copy Bundle Resources" phase from + # [PodTargetInstaller] therefore their resource paths are never included here. + # - Test specs may have their resource paths added to "Copy Bundle Resources" if the target itself is + # built as a framework, which is again checked and handled by PodTargetInstaller. If that is true then + # the resource paths are not included, otherwise they are included and handled via the CocoaPods copy + # resources script phase. + # - Library specs _do not_ have per-configuration CocoaPods copy resources script phase and their resource + # paths will be added to "Copy Bundle Resources" phase if the target is built as a framework because + # it supports it. We always include the resource paths for library specs because they are also + # integrated to the user target. + # + def resource_paths + @resource_paths ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + resource_paths = if file_accessor.spec.app_specification? || (file_accessor.spec.test_specification? && build_as_framework?) + [] + else + file_accessor.resources.map do |res| + "${PODS_ROOT}/#{res.relative_path_from(sandbox.project_path.dirname)}" + end + end + prefix = Pod::Target::BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE + prefix = configuration_build_dir unless file_accessor.spec.test_specification? + resource_bundle_paths = file_accessor.resource_bundles.keys.map { |name| "#{prefix}/#{name.shellescape}.bundle" } + hash[file_accessor.spec.name] = (resource_paths + resource_bundle_paths).map(&:to_s) + end + end + end + + # @param [Specification] spec The non library spec to calculate the deployment target for. + # + # @return [String] The deployment target to use for the non library spec. If the non library spec explicitly + # specifies one then this is the one used otherwise the one that was determined by the analyzer is used. + # + def deployment_target_for_non_library_spec(spec) + raise ArgumentError, 'Must be a non library spec.' unless spec.non_library_specification? + spec.deployment_target(platform.name.to_s) || platform.deployment_target.to_s + end + + # Returns the corresponding native product type to use given the test type. + # This is primarily used when creating the native targets in order to produce the correct test bundle target + # based on the type of tests included. + # + # @param [Symbol] test_type + # The test type to map to provided by the test specification DSL. + # + # @return [Symbol] The native product type to use. + # + def product_type_for_test_type(test_type) + case test_type + when :unit + :unit_test_bundle + when :ui + :ui_test_bundle + else + raise ArgumentError, "Unknown test type `#{test_type}`." + end + end + + # Returns the label to use for the given test type. + # This is used to generate native target names for test specs. + # + # @param [Symbol] test_type + # The test type to map to provided by the test specification DSL. + # + # @return [String] The native product type to use. + # + def label_for_test_type(test_type) + case test_type + when :unit + 'Unit' + when :ui + 'UI' + else + raise ArgumentError, "Unknown test type `#{test_type}`." + end + end + + # @return [Specification] The root specification for the target. + # + def root_spec + @root_spec ||= specs.first.root + end + + # @return [String] The name of the Pod that this target refers to. + # + def pod_name + root_spec.name + end + + # @return [Pathname] the absolute path of the LLVM module map file that + # defines the module structure for the compiler. + # + def module_map_path + basename = "#{label}.modulemap" + if build_as_framework? + super + elsif file_accessors.any?(&:module_map) + build_headers.root + product_module_name + basename + else + sandbox.public_headers.root + product_module_name + basename + end + end + + # @return [Pathname] the absolute path of the prefix header file. + # + def prefix_header_path + support_files_dir + "#{label}-prefix.pch" + end + + # @return [Hash] the additional entries to add to the generated Info.plist + # + def info_plist_entries + root_spec.info_plist + end + + # @param [String] bundle_name + # The name of the bundle product, which is given by the +spec+. + # + # @return [String] The derived name of the resource bundle target. + # + def resources_bundle_target_label(bundle_name) + "#{label}-#{bundle_name}" + end + + # @param [Specification] subspec + # The subspec to use for producing the label. + # + # @return [String] The derived name of the target. + # + def subspec_label(subspec) + raise ArgumentError, 'Must not be a root spec' if subspec.root? + subspec.name.split('/')[1..-1].join('-').to_s + end + + # @param [Specification] test_spec + # The test spec to use for producing the test label. + # + # @return [String] The derived name of the test target. + # + def test_target_label(test_spec) + "#{label}-#{label_for_test_type(test_spec.test_type)}-#{subspec_label(test_spec)}" + end + + # @param [Specification] app_spec + # The app spec to use for producing the app label. + # + # @return [String] The derived name of the app target. + # + def app_target_label(app_spec) + "#{label}-#{subspec_label(app_spec)}" + end + + # @param [Specification] test_spec + # the test spec to use for producing the app host target label. + # + # @return [(String,String)] a tuple, where the first item is the PodTarget#label of the pod target that defines the + # app host, and the second item is the target name of the app host + # + def app_host_target_label(test_spec) + app_spec, app_target = test_app_hosts_by_spec[test_spec] + + if app_spec + [app_target.name, app_target.app_target_label(app_spec)] + elsif test_spec.consumer(platform).requires_app_host? + [name, "AppHost-#{label}-#{label_for_test_type(test_spec.test_type)}-Tests"] + end + end + + # @param [Specification] spec + # the spec to return app host dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app host dependent targets for. + # + # @return [Array] the app host dependent targets for the given spec. + # + def app_host_dependent_targets_for_spec(spec, configuration: nil) + return [] unless spec.test_specification? && spec.consumer(platform).test_type == :unit + app_host_info = test_app_hosts_by_spec[spec] + if app_host_info.nil? + [] + else + app_spec, app_target = *app_host_info + app_target.dependent_targets_for_app_spec(app_spec, :configuration => configuration) + end + end + + def spec_label(spec) + case spec.spec_type + when :library then label + when :test then test_target_label(spec) + when :app then app_target_label(spec) + else raise ArgumentError, "Unhandled spec type #{spec.spec_type.inspect} for #{spec.inspect}" + end + end + # for backwards compatibility + alias non_library_spec_label spec_label + + # @param [Specification] spec + # The spec to return scheme configuration for. + # + # @return [Hash] The scheme configuration used or empty if none is specified. + # + def scheme_for_spec(spec) + return {} if (spec.library_specification? && !spec.root?) || spec.available_platforms.none? do |p| + p.name == platform.name + end + spec.consumer(platform).scheme + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script for the given spec. + # + def copy_resources_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources.sh" + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script input file list for the given spec. + # + def copy_resources_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script output file list for the given spec. + # + def copy_resources_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script for the given spec. + # + def embed_frameworks_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks.sh" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script input file list for the given spec. + # + def embed_frameworks_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script output file list for the given spec. + # + def embed_frameworks_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks-output-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy xcframeworks script. + # + def copy_xcframeworks_script_path + support_files_dir + "#{label}-xcframeworks.sh" + end + + # @return [String] The path of the copy xcframeworks input files file list + # + def copy_xcframeworks_script_input_files_path + support_files_dir + "#{label}-xcframeworks-input-files.xcfilelist" + end + + # @return [String] The path of the copy xcframeworks output files file list + # + def copy_xcframeworks_script_output_files_path + support_files_dir + "#{label}-xcframeworks-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts.sh" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script input file list for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script output file list for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts-output-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy dSYMs script. + # + def copy_dsyms_script_path + support_files_dir + "#{label}-copy-dsyms.sh" + end + + # @return [Pathname] The absolute path of the copy dSYM script phase input file list. + # + def copy_dsyms_script_input_files_path + support_files_dir + "#{label}-copy-dsyms-input-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy dSYM script phase output file list. + # + def copy_dsyms_script_output_files_path + support_files_dir + "#{label}-copy-dsyms-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this Info.plist path is for. + # + # @return [Pathname] The absolute path of the Info.plist for the given spec. + # + def info_plist_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-Info.plist" + end + + # @param [Specification] spec + # The spec this prefix header path is for. + # + # @return [Pathname] the absolute path of the prefix header file for the given spec. + # + def prefix_header_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-prefix.pch" + end + + # @return [Array] The names of the Pods on which this target + # depends. + # + def dependencies + spec_consumers.flat_map do |consumer| + consumer.dependencies.map { |dep| Specification.root_name(dep.name) } + end.uniq + end + + # Returns all dependent targets of this target. If a configuration is passed then the list can be scoped to a given + # configuration. + # + # @param [String] configuration + # The configuration to return the dependent targets for or `nil` if all configurations should be included. + # + # @return [Array] the recursive targets that this target has a dependency upon. + # + def recursive_dependent_targets(configuration: nil) + @recursive_dependent_targets ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_dependent_targets(Set.new, :configuration => config).delete(self).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a + hash + end + @recursive_dependent_targets.fetch(configuration) { raise ArgumentError, "No configuration #{configuration} for #{self}, known configurations are #{@recursive_dependent_targets.keys}" } + end + + def _add_recursive_dependent_targets(set, configuration: nil) + if defined?(@recursive_dependent_targets) + return set.merge(@recursive_dependent_targets[configuration]) + end + dependent_targets = configuration ? dependent_targets_by_config[configuration] : self.dependent_targets + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + protected :_add_recursive_dependent_targets + + # @param [Specification] test_spec + # the test spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the test dependent targets for. + # + # @return [Array] the recursive targets that this target has a + # test dependency upon. + # + def recursive_test_dependent_targets(test_spec, configuration: nil) + @recursive_test_dependent_targets ||= {} + @recursive_test_dependent_targets[test_spec] ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_test_dependent_targets(test_spec, Set.new, :configuration => config).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a.freeze + hash + end + @recursive_test_dependent_targets[test_spec][configuration] + end + + def _add_recursive_test_dependent_targets(test_spec, set, configuration: nil) + raise ArgumentError, 'Must give a test spec' unless test_spec + dependent_targets = configuration ? test_dependent_targets_by_spec_name_by_config[test_spec.name][configuration] : test_dependent_targets_by_spec_name[test_spec.name] + raise ArgumentError, "Unable to find deps for #{test_spec} for config #{configuration.inspect} (out of #{test_dependent_targets_by_spec_name_by_config.inspect})" unless dependent_targets + + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + private :_add_recursive_test_dependent_targets + + # @param [Specification] test_spec + # the test spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the test dependent targets for. + # + # @return [Array] the canonical list of dependent targets this target has a dependency upon. + # This list includes the target itself as well as its recursive dependent and test dependent targets. + # + def dependent_targets_for_test_spec(test_spec, configuration: nil) + [self, *recursive_dependent_targets(:configuration => configuration), *recursive_test_dependent_targets(test_spec, :configuration => configuration)].uniq + end + + # @param [Specification] app_spec + # the app spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app dependent targets for. + # + # @return [Array] the recursive targets that this target has a + # app dependency upon. + # + def recursive_app_dependent_targets(app_spec, configuration: nil) + @recursive_app_dependent_targets ||= {} + @recursive_app_dependent_targets[app_spec] ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_app_dependent_targets(app_spec, Set.new, :configuration => config).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a.freeze + hash + end + @recursive_app_dependent_targets[app_spec][configuration] + end + + def _add_recursive_app_dependent_targets(app_spec, set, configuration: nil) + raise ArgumentError, 'Must give a app spec' unless app_spec + dependent_targets = configuration ? app_dependent_targets_by_spec_name_by_config[app_spec.name][configuration] : app_dependent_targets_by_spec_name[app_spec.name] + raise ArgumentError, "Unable to find deps for #{app_spec} for config #{configuration.inspect} #{app_dependent_targets_by_spec_name_by_config.inspect}" unless dependent_targets + + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + private :_add_recursive_app_dependent_targets + + # @param [Specification] app_spec + # the app spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app dependent targets for. + # + # @return [Array] the canonical list of dependent targets this target has a dependency upon. + # This list includes the target itself as well as its recursive dependent and app dependent targets. + # + def dependent_targets_for_app_spec(app_spec, configuration: nil) + [self, *recursive_dependent_targets(:configuration => configuration), *recursive_app_dependent_targets(app_spec, :configuration => configuration)].uniq + end + + # Checks if warnings should be inhibited for this pod. + # + # @return [Bool] + # + def inhibit_warnings? + return @inhibit_warnings if defined? @inhibit_warnings + whitelists = target_definitions.map do |target_definition| + target_definition.inhibits_warnings_for_pod?(root_spec.name) + end.uniq + + if whitelists.empty? + @inhibit_warnings = false + false + elsif whitelists.count == 1 + @inhibit_warnings = whitelists.first + whitelists.first + else + UI.warn "The pod `#{pod_name}` is linked to different targets " \ + "(#{target_definitions.map { |td| "`#{td.name}`" }.to_sentence}), which contain different " \ + 'settings to inhibit warnings. CocoaPods does not currently ' \ + 'support different settings and will fall back to your preference ' \ + 'set in the root target definition.' + @inhibit_warnings = podfile.root_target_definitions.first.inhibits_warnings_for_pod?(root_spec.name) + end + end + + # @param [String] dir + # The directory (which might be a variable) relative to which + # the returned path should be. This must be used if the + # $CONFIGURATION_BUILD_DIR is modified. + # + # @return [String] The absolute path to the configuration build dir + # + def configuration_build_dir(dir = BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE) + "#{dir}/#{label}" + end + + # @param [String] dir + # @see #configuration_build_dir + # + # @return [String] The absolute path to the build product + # + def build_product_path(dir = BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE) + "#{configuration_build_dir(dir)}/#{product_name}" + end + + # @return [String] The source path of the root for this target relative to `$(PODS_ROOT)` + # + def pod_target_srcroot + "${PODS_ROOT}/#{sandbox.pod_dir(pod_name).relative_path_from(sandbox.root)}" + end + + # @return [String] The version associated with this target + # + def version + version = root_spec.version + [version.major, version.minor, version.patch].join('.') + end + + # @param [Boolean] include_dependent_targets_for_test_spec + # whether to include header search paths for test dependent targets + # + # @param [Boolean] include_dependent_targets_for_app_spec + # whether to include header search paths for app dependent targets + # + # @param [Boolean] include_private_headers + # whether to include header search paths for private headers of this + # target + # + # @param [String] configuration + # the configuration to return header search paths for or `nil` for all configurations. + # + # @return [Array] The set of header search paths this target uses. + # + def header_search_paths(include_dependent_targets_for_test_spec: nil, include_dependent_targets_for_app_spec: nil, + include_private_headers: true, configuration: nil) + header_search_paths = [] + header_search_paths.concat(build_headers.search_paths(platform, nil, false)) if include_private_headers + header_search_paths.concat(sandbox.public_headers.search_paths(platform, pod_name, uses_modular_headers?)) + dependent_targets = recursive_dependent_targets(:configuration => configuration) + if include_dependent_targets_for_test_spec + dependent_targets += recursive_test_dependent_targets(include_dependent_targets_for_test_spec, :configuration => configuration) + end + if include_dependent_targets_for_app_spec + dependent_targets += recursive_app_dependent_targets(include_dependent_targets_for_app_spec, :configuration => configuration) + end + dependent_targets.uniq.each do |dependent_target| + header_search_paths.concat(sandbox.public_headers.search_paths(platform, dependent_target.pod_name, defines_module? && dependent_target.uses_modular_headers?(false))) + end + header_search_paths.uniq + end + + # @param [Specification] spec the specification to return build settings for. + # + # @param [String] configuration the configuration to scope the build settings. + # + # @return [BuildSettings::PodTargetSettings] The build settings for the given spec + # + def build_settings_for_spec(spec, configuration: nil) + raise ArgumentError, 'Must give configuration' unless configuration + configuration = user_build_configurations[configuration] if user_build_configurations.key?(configuration) + build_settings_by_config_for_spec(spec)[configuration] || raise(ArgumentError, "No build settings for #{spec} (configuration #{configuration.inspect}) (known configurations #{config_variants})") + end + + def build_settings_by_config_for_spec(spec) + case spec.spec_type + when :test then test_spec_build_settings_by_config[spec.name] + when :app then app_spec_build_settings_by_config[spec.name] + else build_settings + end || raise(ArgumentError, "No build settings for #{spec}") + end + + def user_config_names_by_config_type + user_build_configurations.each_with_object({}) do |(user, type), hash| + hash[type] ||= [] + hash[type] << user + end.each_value(&:freeze).freeze + end + + protected + + # Returns whether the pod target should use modular headers. + # + # @param [Boolean] only_if_defines_modules + # whether the use of modular headers should require the target to define a module + # + # @note This must return false when a pod has a `header_mappings_dir` or `header_dir`, + # as that allows the spec to customize the header structure, and + # therefore it might not be expecting the module name to be prepended + # to imports at all. + # + def uses_modular_headers?(only_if_defines_modules = true) + return false if only_if_defines_modules && !defines_module? + return @uses_modular_headers if defined? @uses_modular_headers + @uses_modular_headers = spec_consumers.none?(&:header_mappings_dir) && spec_consumers.none?(&:header_dir) + end + + private + + def config_variants + if user_build_configurations.empty? + %i(debug release) + else + user_build_configurations.values.uniq + end + end + + def create_build_settings + Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, nil, :configuration => config)] + end] + end + + def create_test_build_settings_by_config + Hash[test_specs.map do |test_spec| + [test_spec.name, Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, test_spec, :configuration => config)] + end]] + end] + end + + def create_app_build_settings_by_config + Hash[app_specs.map do |app_spec| + [app_spec.name, Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, app_spec, :configuration => config)] + end]] + end] + end + + # Computes the destination sub-directory in the sandbox + # + # @param [Sandbox::FileAccessor] file_accessor + # The consumer file accessor for which the headers need to be + # linked. + # + # @param [Array] headers + # The absolute paths of the headers which need to be mapped. + # + # @return [Hash{Pathname => Array}] A hash containing the + # headers folders as the keys and the absolute paths of the + # header files as the values. + # + def header_mappings(file_accessor, headers) + consumer = file_accessor.spec_consumer + header_mappings_dir = consumer.header_mappings_dir + dir = headers_sandbox + dir += consumer.header_dir if consumer.header_dir + + mappings = {} + headers.each do |header| + next if header.to_s.include?('.framework/') + + sub_dir = dir + if header_mappings_dir + relative_path = header.relative_path_from(file_accessor.path_list.root + header_mappings_dir) + sub_dir += relative_path.dirname + end + mappings[sub_dir] ||= [] + mappings[sub_dir] << header + end + mappings + end + + # @!group Deprecated APIs + # ----------------------------------------------------------------------- # + + public + + # @deprecated Use `test_app_hosts_by_spec` instead. + # + # @todo Remove in 2.0 + # + # @return [Hash{String => (Specification,PodTarget)}] tuples of app specs and pod targets by test spec name. + # + def test_app_hosts_by_spec_name + Hash[test_app_hosts_by_spec.map do |spec, value| + [spec.name, value] + end] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface.rb new file mode 100644 index 0000000..0540afe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface.rb @@ -0,0 +1,463 @@ +require 'cocoapods/user_interface/error_report' +require 'cocoapods/user_interface/inspector_reporter' + +module Pod + # Provides support for UI output. It provides support for nested sections of + # information and for a verbose mode. + # + module UserInterface + require 'colored2' + + @title_colors = %w( yellow green ) + @title_level = 0 + @indentation_level = 2 + @treat_titles_as_messages = false + @warnings = [] + + class << self + include Config::Mixin + + attr_accessor :indentation_level + attr_accessor :title_level + attr_accessor :warnings + + # @return [IO] IO object to which UI output will be directed. + # + attr_accessor :output_io + + # @return [Bool] Whether the wrapping of the strings to the width of the + # terminal should be disabled. + # + attr_accessor :disable_wrap + alias_method :disable_wrap?, :disable_wrap + + # Prints a title taking an optional verbose prefix and + # a relative indentation valid for the UI action in the passed + # block. + # + # In verbose mode titles are printed with a color according + # to their level. In normal mode titles are printed only if + # they have nesting level smaller than 2. + # + # @todo Refactor to title (for always visible titles like search) + # and sections (titles that represent collapsible sections). + # + # @param [String] title + # The title to print + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + def section(title, verbose_prefix = '', relative_indentation = 0) + if config.verbose? + title(title, verbose_prefix, relative_indentation) + elsif title_level < 1 + puts title + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # In verbose mode it shows the sections and the contents. + # In normal mode it just prints the title. + # + # @return [void] + # + def titled_section(title, options = {}) + relative_indentation = options[:relative_indentation] || 0 + verbose_prefix = options[:verbose_prefix] || '' + if config.verbose? + title(title, verbose_prefix, relative_indentation) + else + puts title + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # A title opposed to a section is always visible + # + # @param [String] title + # The title to print + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + def title(title, verbose_prefix = '', relative_indentation = 2) + if @treat_titles_as_messages + message(title, verbose_prefix) + else + title = verbose_prefix + title if config.verbose? + title = "\n#{title}" if @title_level < 2 + if (color = @title_colors[@title_level]) + title = title.send(color) + end + puts "#{title}" + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # Prints a verbose message taking an optional verbose prefix and + # a relative indentation valid for the UI action in the passed + # block. + # + # @todo Clean interface. + # + # @param [String] message + # The message to print. + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def message(message, verbose_prefix = '', relative_indentation = 2) + message = verbose_prefix + message if config.verbose? + puts_indented message if config.verbose? + + self.indentation_level += relative_indentation + yield if block_given? + ensure + self.indentation_level -= relative_indentation + end + + # Prints an info to the user. The info is always displayed. + # It respects the current indentation level only in verbose + # mode. + # + # Any title printed in the optional block is treated as a message. + # + # @param [String] message + # The message to print. + # + def info(message) + indentation = config.verbose? ? self.indentation_level : 0 + indented = wrap_string(message, indentation) + puts(indented) + + self.indentation_level += 2 + @treat_titles_as_messages = true + yield if block_given? + ensure + @treat_titles_as_messages = false + self.indentation_level -= 2 + end + + # Prints an important message to the user. + # + # @param [String] message The message to print. + # + # return [void] + # + def notice(message) + puts("\n[!] #{message}".green) + end + + # Returns a string containing relative location of a path from the Podfile. + # The returned path is quoted. If the argument is nil it returns the + # empty string. + # + # @param [#to_str] pathname + # The path to print. + # + def path(pathname) + if pathname + from_path = config.podfile_path.dirname if config.podfile_path + from_path ||= Pathname.pwd + path = begin + Pathname(pathname).relative_path_from(from_path) + rescue + pathname + end + "`#{path}`" + else + '' + end + end + + # Prints the textual representation of a given set. + # + # @param [Set] set + # the set that should be presented. + # + # @param [Symbol] mode + # the presentation mode, either `:normal` or `:name_and_version`. + # + def pod(set, mode = :normal) + if mode == :name_and_version + puts_indented "#{set.name} #{set.versions.first.version}" + else + pod = Specification::Set::Presenter.new(set) + title = "-> #{pod.name} (#{pod.version})" + if pod.spec.deprecated? + title += " #{pod.deprecation_description}" + colored_title = title.red + else + colored_title = title.green + end + + title(colored_title, '', 1) do + puts_indented pod.summary if pod.summary + puts_indented "pod '#{pod.name}', '~> #{pod.version}'" + labeled('Homepage', pod.homepage) + labeled('Source', pod.source_url) + labeled('Versions', pod.versions_by_source) + if mode == :stats + labeled('Authors', pod.authors) if pod.authors =~ /,/ + labeled('Author', pod.authors) if pod.authors !~ /,/ + labeled('License', pod.license) + labeled('Platform', pod.platform) + labeled('Stars', pod.github_stargazers) + labeled('Forks', pod.github_forks) + end + labeled('Subspecs', pod.subspecs) + end + end + end + + # Prints a message with a label. + # + # @param [String] label + # The label to print. + # + # @param [#to_s] value + # The value to print. + # + # @param [FixNum] justification + # The justification of the label. + # + def labeled(label, value, justification = 12) + if value + title = "- #{label}:" + if value.is_a?(Array) + lines = [wrap_string(title, self.indentation_level)] + value.each do |v| + lines << wrap_string("- #{v}", self.indentation_level + 2) + end + puts lines.join("\n") + else + puts wrap_string(title.ljust(justification) + "#{value}", self.indentation_level) + end + end + end + + # Prints a message respecting the current indentation level and + # wrapping it to the terminal width if necessary. + # + # @param [String] message + # The message to print. + # + def puts_indented(message = '') + indented = wrap_string(message, self.indentation_level) + puts(indented) + end + + # Prints the stored warnings. This method is intended to be called at the + # end of the execution of the binary. + # + # @return [void] + # + def print_warnings + STDOUT.flush + warnings.each do |warning| + next if warning[:verbose_only] && !config.verbose? + STDERR.puts("\n[!] #{warning[:message]}".yellow) + warning[:actions].each do |action| + string = "- #{action}" + string = wrap_string(string, 4) + puts(string) + end + end + end + + # Presents a choice among the elements of an array to the user. + # + # @param [Array<#to_s>] array + # The list of the elements among which the user should make his + # choice. + # + # @param [String] message + # The message to display to the user. + # + # @return [Fixnum] The index of the chosen array item. + # + def choose_from_array(array, message) + array.each_with_index do |item, index| + UI.puts "#{index + 1}: #{item}" + end + + UI.puts message + + index = UI.gets.chomp.to_i - 1 + if index < 0 || index > array.count - 1 + raise Informative, "#{index + 1} is invalid [1-#{array.count}]" + else + index + end + end + + public + + # @!group Basic methods + #-----------------------------------------------------------------------# + + # prints a message followed by a new line unless config is silent. + # + # @param [String] message + # The message to print. + # + def puts(message = '') + return if config.silent? + begin + (output_io || STDOUT).puts(message) + rescue Errno::EPIPE + exit 0 + end + end + + # prints a message followed by a new line unless config is silent. + # + # @param [String] message + # The message to print. + # + def print(message) + return if config.silent? + begin + (output_io || STDOUT).print(message) + rescue Errno::EPIPE + exit 0 + end + end + + # gets input from $stdin + # + def gets + $stdin.gets + end + + # Stores important warning to the user optionally followed by actions + # that the user should take. To print them use {#print_warnings}. + # + # @param [String] message The message to print. + # @param [Array] actions The actions that the user should take. + # @param [Bool] verbose_only + # Restrict the appearance of the warning to verbose mode only + # + # return [void] + # + def warn(message, actions = [], verbose_only = false) + warnings << { :message => message, :actions => actions, :verbose_only => verbose_only } + end + + # Pipes all output inside given block to a pager. + # + # @yield Code block in which inputs to {#puts} and {#print} methods will be printed to the piper. + # + def with_pager + prev_handler = Signal.trap('INT', 'IGNORE') + IO.popen((ENV['PAGER'] || 'less -R'), 'w') do |io| + UI.output_io = io + yield + end + ensure + Signal.trap('INT', prev_handler) + UI.output_io = nil + end + + private + + # @!group Helpers + #-----------------------------------------------------------------------# + + # @return [String] Wraps a string taking into account the width of the + # terminal and an option indent. Adapted from + # https://macromates.com/blog/2006/wrapping-text-with-regular-expressions/ + # + # @param [String] string The string to wrap + # + # @param [String] indent The string to use to indent the result. + # + # @return [String] The formatted string. + # + # @note If CocoaPods is not being run in a terminal or the width of the + # terminal is too small a width of 80 is assumed. + # + def wrap_string(string, indent = 0) + if disable_wrap + (' ' * indent) + string + else + first_space = ' ' * indent + indented = CLAide::Command::Banner::TextWrapper.wrap_with_indent(string, indent, 9999) + first_space + indented + end + end + end + end + UI = UserInterface + + #---------------------------------------------------------------------------# + + # Redirects cocoapods-core UI. + # + module CoreUI + class << self + def puts(message) + UI.puts message + end + + def print(message) + UI.print(message) + end + + def warn(message) + UI.warn message + end + end + end +end + +#---------------------------------------------------------------------------# + +module Xcodeproj + # Redirects xcodeproj UI. + # + module UserInterface + def self.puts(message) + ::Pod::UI.puts message + end + + def self.warn(message) + ::Pod::UI.warn message + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/error_report.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/error_report.rb new file mode 100644 index 0000000..aa03e65 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/error_report.rb @@ -0,0 +1,204 @@ +# encoding: UTF-8 + +require 'rbconfig' +require 'cgi' +require 'gh_inspector' + +module Pod + module UserInterface + module ErrorReport + class << self + def report(exception) + <<-EOS + +#{'――― MARKDOWN TEMPLATE ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +### Command + +``` +#{original_command} +``` + +#{report_instructions} + +#{stack} +### Plugins + +``` +#{plugins_string} +``` +#{markdown_podfile} +### Error + +``` +#{exception.class} - #{exception.message.force_encoding('UTF-8')} +#{exception.backtrace.join("\n") if exception.backtrace} +``` + +#{'――― TEMPLATE END ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +#{'[!] Oh no, an error occurred.'.red} +#{error_from_podfile(exception)} +#{'Search for existing GitHub issues similar to yours:'.yellow} +#{issues_url(exception)} + +#{'If none exists, create a ticket, with the template displayed above, on:'.yellow} +https://github.com/CocoaPods/CocoaPods/issues/new + +#{'Be sure to first read the contributing guide for details on how to properly submit a ticket:'.yellow} +https://github.com/CocoaPods/CocoaPods/blob/master/CONTRIBUTING.md + +Don't forget to anonymize any private data! + +EOS + end + + def report_instructions + <<-EOS +### Report + +* What did you do? + +* What did you expect to happen? + +* What happened instead? +EOS + end + + def stack + parts = { + 'CocoaPods' => Pod::VERSION, + 'Ruby' => RUBY_DESCRIPTION, + 'RubyGems' => Gem::VERSION, + 'Host' => host_information, + 'Xcode' => xcode_information, + 'Git' => git_information, + 'Ruby lib dir' => RbConfig::CONFIG['libdir'], + 'Repositories' => repo_information, + } + justification = parts.keys.map(&:size).max + + str = <<-EOS +### Stack + +``` +EOS + parts.each do |name, value| + str << name.rjust(justification) + str << ' : ' + str << Array(value).join("\n" << (' ' * (justification + 3))) + str << "\n" + end + + str << "```\n" + end + + def plugins_string + plugins = installed_plugins + max_name_length = plugins.keys.map(&:length).max + plugins.map do |name, version| + "#{name.ljust(max_name_length)} : #{version}" + end.sort.join("\n") + end + + def markdown_podfile + return '' unless Config.instance.podfile_path && Config.instance.podfile_path.exist? + <<-EOS + +### Podfile + +```ruby +#{Config.instance.podfile_path.read.strip} +``` +EOS + end + + def search_for_exceptions(exception) + inspector = GhInspector::Inspector.new 'cocoapods', 'cocoapods' + message_delegate = UserInterface::InspectorReporter.new + inspector.search_exception exception, message_delegate + rescue => e + warn "Searching for inspections failed: #{e}" + nil + end + + private + + def `(other) + super + rescue Errno::ENOENT => e + "Unable to find an executable (#{e})" + end + + def pathless_exception_message(message) + message.gsub(/- \(.*\):/, '-') + end + + def error_from_podfile(error) + if error.message =~ /Podfile:(\d*)/ + "\nIt appears to have originated from your Podfile at line #{Regexp.last_match[1]}.\n" + end + end + + def remove_color(string) + string.gsub(/\e\[(\d+)m/, '') + end + + def issues_url(exception) + message = remove_color(pathless_exception_message(exception.message)) + 'https://github.com/CocoaPods/CocoaPods/search?q=' \ + "#{CGI.escape(message)}&type=Issues" + end + + def host_information + product, version, build = `sw_vers`.strip.split("\n").map { |line| line.split(':').last.strip } + "#{product} #{version} (#{build})" + end + + def xcode_information + version, build = `xcodebuild -version`.strip.split("\n").map { |line| line.split(' ').last } + "#{version} (#{build})" + end + + def git_information + `git --version`.strip.split("\n").first + end + + def installed_plugins + CLAide::Command::PluginManager.specifications. + reduce({}) { |hash, s| hash.tap { |h| h[s.name] = s.version.to_s } } + end + + def repo_information + Config.instance.sources_manager.all.map do |source| + repo = source.repo + if source.is_a?(Pod::CDNSource) + "#{repo.basename} - CDN - #{source.url}" + elsif source.git? + sha = git_hash(source) + "#{repo.basename} - git - #{source.url} @ #{sha}" + else + "#{repo.basename} - #{source.type}" + end + end + end + + def original_command + "#{$PROGRAM_NAME} #{ARGV.join(' ')}" + end + + private + + # @param [Source] source + # a git source + # + # @return [String] the current git SHA + def git_hash(source) + Dir.chdir(source.repo) do + `git rev-parse HEAD 2>&1` + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/inspector_reporter.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/inspector_reporter.rb new file mode 100644 index 0000000..76057f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/user_interface/inspector_reporter.rb @@ -0,0 +1,102 @@ +require 'addressable' +require 'uri' + +module Pod + module UserInterface + # Redirects GH-issues delegate callbacks to CocoaPods UI methods. + # + class InspectorReporter + # Called just as the investigation has begun. + # Lets the user know that it's looking for an issue. + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_started_query(_, inspector) + UI.puts "Looking for related issues on #{inspector.repo_owner}/#{inspector.repo_name}..." + end + + # Called once the inspector has received a report with more than one issue, + # showing the top 3 issues, and offering a link to see more. + # + # @param [GhInspector::InspectionReport] report + # Report a list of the issues + # + # @return [void] + # + def inspector_successfully_received_report(report, _) + report.issues[0..2].each { |issue| print_issue_full(issue) } + + if report.issues.count > 3 + UI.puts "and #{report.total_results - 3} more at:" + UI.puts report.url + end + end + + # Called once the report has been received, but when there are no issues found. + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_received_empty_report(_, inspector) + UI.puts 'Found no similar issues. To create a new issue, please visit:' + UI.puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/issues/new" + end + + # Called when there have been networking issues in creating the report. + # + # @param [Error] error + # The error returned during networking + # + # @param [String] query + # The original search query + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_could_not_create_report(error, query, inspector) + safe_query = Addressable::URI.escape query + UI.puts 'Could not access the GitHub API, you may have better luck via the website.' + UI.puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/search?q=#{safe_query}&type=Issues&utf8=✓" + UI.puts "Error: #{error.name}" + end + + private + + def print_issue_full(issue) + safe_url = Addressable::URI.escape issue.html_url + UI.puts " - #{issue.title}" + UI.puts " #{safe_url} [#{issue.state}] [#{issue.comments} comment#{issue.comments == 1 ? '' : 's'}]" + UI.puts " #{pretty_date(issue.updated_at)}" + UI.puts '' + end + + # Taken from https://stackoverflow.com/questions/195740/how-do-you-do-relative-time-in-rails + def pretty_date(date_string) + date = Time.parse(date_string) + a = (Time.now - date).to_i + + case a + when 0 then 'just now' + when 1 then 'a second ago' + when 2..59 then a.to_s + ' seconds ago' + when 60..119 then 'a minute ago' # 120 = 2 minutes + when 120..3540 then (a / 60).to_i.to_s + ' minutes ago' + when 3541..7100 then 'an hour ago' # 3600 = 1 hour + when 7101..82_800 then ((a + 99) / 3600).to_i.to_s + ' hours ago' + when 82_801..172_000 then 'a day ago' # 86400 = 1 day + when 172_001..518_400 then ((a + 800) / (60 * 60 * 24)).to_i.to_s + ' days ago' + when 518_400..1_036_800 then 'a week ago' + when 1_036_801..4_147_204 then ((a + 180_000) / (60 * 60 * 24 * 7)).to_i.to_s + ' weeks ago' + else date.strftime('%d %b %Y') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/validator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/validator.rb new file mode 100644 index 0000000..c69b931 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/validator.rb @@ -0,0 +1,1170 @@ +require 'active_support/core_ext/array' +require 'active_support/core_ext/string/inflections' + +module Pod + # Validates a Specification. + # + # Extends the Linter from the Core to add additional which require the + # LocalPod and the Installer. + # + # In detail it checks that the file patterns defined by the user match + # actually do match at least a file and that the Pod builds, by installing + # it without integration and building the project with xcodebuild. + # + class Validator + include Config::Mixin + + # The default version of Swift to use when linting pods + # + DEFAULT_SWIFT_VERSION = '4.0'.freeze + + # The valid platforms for linting + # + VALID_PLATFORMS = Platform.all.freeze + + # @return [Specification::Linter] the linter instance from CocoaPods + # Core. + # + attr_reader :linter + + # Initialize a new instance + # + # @param [Specification, Pathname, String] spec_or_path + # the Specification or the path of the `podspec` file to lint. + # + # @param [Array] source_urls + # the Source URLs to use in creating a {Podfile}. + # + # @param [Array] platforms + # the platforms to lint. + # + def initialize(spec_or_path, source_urls, platforms = []) + @use_frameworks = true + @linter = Specification::Linter.new(spec_or_path) + @source_urls = if @linter.spec && @linter.spec.dependencies.empty? && @linter.spec.recursive_subspecs.all? { |s| s.dependencies.empty? } + [] + else + source_urls.map { |url| config.sources_manager.source_with_name_or_url(url) }.map(&:url) + end + + @platforms = platforms.map do |platform| + result = case platform.to_s.downcase + # Platform doesn't recognize 'macos' as being the same as 'osx' when initializing + when 'macos' then Platform.macos + else Platform.new(platform, nil) + end + unless valid_platform?(result) + raise Informative, "Unrecognized platform `#{platform}`. Valid platforms: #{VALID_PLATFORMS.join(', ')}" + end + result + end + @use_frameworks = true + end + + #-------------------------------------------------------------------------# + + # @return [Specification] the specification to lint. + # + def spec + @linter.spec + end + + # @return [Pathname] the path of the `podspec` file where {#spec} is + # defined. + # + def file + @linter.file + end + + # Returns a list of platforms to lint for a given Specification + # + # @param [Specification] spec + # The specification to lint + # + # @return [Array] platforms to lint for the given specification + # + def platforms_to_lint(spec) + return spec.available_platforms if @platforms.empty? + + # Validate that the platforms specified are actually supported by the spec + results = @platforms.map do |platform| + matching_platform = spec.available_platforms.find { |p| p.name == platform.name } + unless matching_platform + raise Informative, "Platform `#{platform}` is not supported by specification `#{spec}`." + end + matching_platform + end.uniq + + results + end + + # @return [Sandbox::FileAccessor] the file accessor for the spec. + # + attr_accessor :file_accessor + + #-------------------------------------------------------------------------# + + # Lints the specification adding a {Result} for any + # failed check to the {#results} list. + # + # @note This method shows immediately which pod is being processed and + # overrides the printed line once the result is known. + # + # @return [Bool] whether the specification passed validation. + # + def validate + @results = [] + + # Replace default spec with a subspec if asked for + a_spec = spec + if spec && @only_subspec + subspec_name = @only_subspec.start_with?("#{spec.root.name}/") ? @only_subspec : "#{spec.root.name}/#{@only_subspec}" + a_spec = spec.subspec_by_name(subspec_name, true, true) + @subspec_name = a_spec.name + end + + UI.print " -> #{a_spec ? a_spec.name : file.basename}\r" unless config.silent? + $stdout.flush + + perform_linting + perform_extensive_analysis(a_spec) if a_spec && !quick + + UI.puts ' -> '.send(result_color) << (a_spec ? a_spec.to_s : file.basename.to_s) + print_results + validated? + end + + # Prints the result of the validation to the user. + # + # @return [void] + # + def print_results + UI.puts results_message + end + + def results_message + message = '' + results.each do |result| + if result.platforms == [:ios] + platform_message = '[iOS] ' + elsif result.platforms == [:osx] + platform_message = '[OSX] ' + elsif result.platforms == [:watchos] + platform_message = '[watchOS] ' + elsif result.platforms == [:tvos] + platform_message = '[tvOS] ' + end + + subspecs_message = '' + if result.is_a?(Result) + subspecs = result.subspecs.uniq + if subspecs.count > 2 + subspecs_message = '[' + subspecs[0..2].join(', ') + ', and more...] ' + elsif subspecs.count > 0 + subspecs_message = '[' + subspecs.join(',') + '] ' + end + end + + case result.type + when :error then type = 'ERROR' + when :warning then type = 'WARN' + when :note then type = 'NOTE' + else raise "#{result.type}" end + message << " - #{type.ljust(5)} | #{platform_message}#{subspecs_message}#{result.attribute_name}: #{result.message}\n" + end + message << "\n" + end + + def failure_reason + results_by_type = results.group_by(&:type) + results_by_type.default = [] + return nil if validated? + reasons = [] + if (size = results_by_type[:error].size) && size > 0 + reasons << "#{size} #{'error'.pluralize(size)}" + end + if !allow_warnings && (size = results_by_type[:warning].size) && size > 0 + reason = "#{size} #{'warning'.pluralize(size)}" + pronoun = size == 1 ? 'it' : 'them' + reason << " (but you can use `--allow-warnings` to ignore #{pronoun})" if reasons.empty? + reasons << reason + end + if results.all?(&:public_only) + reasons << 'all results apply only to public specs, but you can use ' \ + '`--private` to ignore them if linting the specification for a private pod' + end + + reasons.to_sentence + end + + #-------------------------------------------------------------------------# + + # @!group Configuration + + # @return [Bool] whether the validation should skip the checks that + # requires the download of the library. + # + attr_accessor :quick + + # @return [Bool] whether the linter should not clean up temporary files + # for inspection. + # + attr_accessor :no_clean + + # @return [Bool] whether the linter should fail as soon as the first build + # variant causes an error. Helpful for i.e. multi-platforms specs, + # specs with subspecs. + # + attr_accessor :fail_fast + + # @return [Bool] whether the validation should be performed against the root of + # the podspec instead to its original source. + # + # @note Uses the `:path` option of the Podfile. + # + attr_accessor :local + alias_method :local?, :local + + # @return [Bool] Whether the validator should fail on warnings, or only on errors. + # + attr_accessor :allow_warnings + + # @return [String] name of the subspec to check, if nil all subspecs are checked. + # + attr_accessor :only_subspec + + # @return [Bool] Whether the validator should validate all subspecs. + # + attr_accessor :no_subspecs + + # @return [Bool] Whether the validator should skip building and running tests. + # + attr_accessor :skip_tests + + # @return [Array] List of test_specs to run. If nil, all tests are run (unless skip_tests is specified). + # + attr_accessor :test_specs + + # @return [Bool] Whether the validator should run Xcode Static Analysis. + # + attr_accessor :analyze + + # @return [Bool] Whether frameworks should be used for the installation. + # + attr_accessor :use_frameworks + + # @return [Boolean] Whether modular headers should be used for the installation. + # + attr_accessor :use_modular_headers + + # @return [Boolean] Whether static frameworks should be used for the installation. + # + attr_accessor :use_static_frameworks + + # @return [Boolean] Whether attributes that affect only public sources + # Bool be skipped. + # + attr_accessor :ignore_public_only_results + + # @return [String] A glob for podspecs to be used during building of + # the local Podfile via :path. + # + attr_accessor :include_podspecs + + # @return [String] A glob for podspecs to be used during building of + # the local Podfile via :podspec. + # + attr_accessor :external_podspecs + + attr_accessor :skip_import_validation + alias_method :skip_import_validation?, :skip_import_validation + + attr_accessor :configuration + + #-------------------------------------------------------------------------# + + # !@group Lint results + + # + # + attr_reader :results + + # @return [Boolean] + # + def validated? + result_type != :error && (result_type != :warning || allow_warnings) + end + + # @return [Symbol] The type, which should been used to display the result. + # One of: `:error`, `:warning`, `:note`. + # + def result_type + applicable_results = results + applicable_results = applicable_results.reject(&:public_only?) if ignore_public_only_results + types = applicable_results.map(&:type).uniq + if types.include?(:error) then :error + elsif types.include?(:warning) then :warning + else :note + end + end + + # @return [Symbol] The color, which should been used to display the result. + # One of: `:green`, `:yellow`, `:red`. + # + def result_color + case result_type + when :error then :red + when :warning then :yellow + else :green end + end + + # @return [Pathname] the temporary directory used by the linter. + # + def validation_dir + @validation_dir ||= Pathname(Dir.mktmpdir(['CocoaPods-Lint-', "-#{spec.name}"])) + end + + # @return [String] The SWIFT_VERSION that should be used to validate the pod. This is set by passing the + # `--swift-version` parameter during validation. + # + attr_accessor :swift_version + + # @return [String] the SWIFT_VERSION within the .swift-version file or nil. + # + def dot_swift_version + return unless file + swift_version_path = file.dirname + '.swift-version' + return unless swift_version_path.exist? + swift_version_path.read.strip + end + + # @return [String] The derived Swift version to use for validation. The order of precedence is as follows: + # - The `--swift-version` parameter is always checked first and honored if passed. + # - The `swift_versions` DSL attribute within the podspec, in which case the latest version is always chosen. + # - The Swift version within the `.swift-version` file if present. + # - If none of the above are set then the `#DEFAULT_SWIFT_VERSION` is used. + # + def derived_swift_version + @derived_swift_version ||= begin + if !swift_version.nil? + swift_version + elsif version = spec.swift_versions.max || dot_swift_version + version.to_s + else + DEFAULT_SWIFT_VERSION + end + end + end + + # @return [Boolean] Whether any of the pod targets part of this validator use Swift or not. + # + def uses_swift? + @installer.pod_targets.any?(&:uses_swift?) + end + + #-------------------------------------------------------------------------# + + private + + # !@group Lint steps + + # + # + def perform_linting + linter.lint + @results.concat(linter.results.to_a) + end + + # Perform analysis for a given spec (or subspec) + # + def perform_extensive_analysis(spec) + if spec.non_library_specification? + error('spec', "Validating a non library spec (`#{spec.name}`) is not supported.") + return false + end + validate_homepage(spec) + validate_screenshots(spec) + validate_social_media_url(spec) + validate_documentation_url(spec) + validate_source_url(spec) + + platforms = platforms_to_lint(spec) + + valid = platforms.send(fail_fast ? :all? : :each) do |platform| + UI.message "\n\n#{spec} - Analyzing on #{platform} platform.".green.reversed + @consumer = spec.consumer(platform) + setup_validation_environment + begin + create_app_project + download_pod + check_file_patterns + install_pod + validate_swift_version + add_app_project_import + validate_vendored_dynamic_frameworks + build_pod + test_pod unless skip_tests + ensure + tear_down_validation_environment + end + validated? + end + return false if fail_fast && !valid + perform_extensive_subspec_analysis(spec) unless @no_subspecs + rescue => e + message = e.to_s + message << "\n" << e.backtrace.join("\n") << "\n" if config.verbose? + error('unknown', "Encountered an unknown error (#{message}) during validation.") + false + end + + # Recursively perform the extensive analysis on all subspecs + # + def perform_extensive_subspec_analysis(spec) + spec.subspecs.reject(&:non_library_specification?).send(fail_fast ? :all? : :each) do |subspec| + @subspec_name = subspec.name + perform_extensive_analysis(subspec) + end + end + + # @return [Consumer] the consumer for the current platform being validated + # + attr_accessor :consumer + + # @return [String, Nil] the name of the current subspec being validated, or nil if none + # + attr_accessor :subspec_name + + # Performs validation of a URL + # + def validate_url(url, user_agent = nil) + resp = Pod::HTTP.validate_url(url, user_agent) + + if !resp + warning('url', "There was a problem validating the URL #{url}.", true) + elsif !resp.success? + note('url', "The URL (#{url}) is not reachable.", true) + end + + resp + end + + # Performs validations related to the `homepage` attribute. + # + def validate_homepage(spec) + if spec.homepage + validate_url(spec.homepage) + end + end + + # Performs validation related to the `screenshots` attribute. + # + def validate_screenshots(spec) + spec.screenshots.compact.each do |screenshot| + response = validate_url(screenshot) + if response && !(response.headers['content-type'] && response.headers['content-type'].first =~ /image\/.*/i) + warning('screenshot', "The screenshot #{screenshot} is not a valid image.") + end + end + end + + # Performs validations related to the `social_media_url` attribute. + # + def validate_social_media_url(spec) + validate_url(spec.social_media_url, 'CocoaPods') if spec.social_media_url + end + + # Performs validations related to the `documentation_url` attribute. + # + def validate_documentation_url(spec) + validate_url(spec.documentation_url) if spec.documentation_url + end + + # Performs validations related to the `source` -> `http` attribute (if exists) + # + def validate_source_url(spec) + return if spec.source.nil? || spec.source[:http].nil? + url = URI(spec.source[:http]) + return if url.scheme == 'https' || url.scheme == 'file' + warning('http', "The URL (`#{url}`) doesn't use the encrypted HTTPS protocol. " \ + 'It is crucial for Pods to be transferred over a secure protocol to protect your users from man-in-the-middle attacks. '\ + 'This will be an error in future releases. Please update the URL to use https.') + end + + # Performs validation for the version of Swift used during validation. + # + # An error will be displayed if the user has provided a `swift_versions` attribute within the podspec but is also + # using either `--swift-version` parameter or a `.swift-version` file with a Swift version that is not declared + # within the attribute. + # + # The user will be warned that the default version of Swift was used if the following things are true: + # - The project uses Swift at all + # - The user did not supply a Swift version via a parameter + # - There is no `swift_versions` attribute set within the specification + # - There is no `.swift-version` file present either. + # + def validate_swift_version + return unless uses_swift? + spec_swift_versions = spec.swift_versions.map(&:to_s) + + unless spec_swift_versions.empty? + message = nil + if !dot_swift_version.nil? && !spec_swift_versions.include?(dot_swift_version) + message = "Specification `#{spec.name}` specifies inconsistent `swift_versions` (#{spec_swift_versions.map { |s| "`#{s}`" }.to_sentence}) compared to the one present in your `.swift-version` file (`#{dot_swift_version}`). " \ + 'Please remove the `.swift-version` file which is now deprecated and only use the `swift_versions` attribute within your podspec.' + elsif !swift_version.nil? && !spec_swift_versions.include?(swift_version) + message = "Specification `#{spec.name}` specifies inconsistent `swift_versions` (#{spec_swift_versions.map { |s| "`#{s}`" }.to_sentence}) compared to the one passed during lint (`#{swift_version}`)." + end + unless message.nil? + error('swift', message) + return + end + end + + if swift_version.nil? && spec.swift_versions.empty? + if !dot_swift_version.nil? + # The user will be warned to delete the `.swift-version` file in favor of the `swift_versions` DSL attribute. + # This is intentionally not a lint warning since we do not want to break existing setups and instead just soft + # deprecate this slowly. + # + UI.warn 'Usage of the `.swift_version` file has been deprecated! Please delete the file and use the ' \ + "`swift_versions` attribute within your podspec instead.\n".yellow + else + warning('swift', + 'The validator used ' \ + "Swift `#{DEFAULT_SWIFT_VERSION}` by default because no Swift version was specified. " \ + 'To specify a Swift version during validation, add the `swift_versions` attribute in your podspec. ' \ + 'Note that usage of a `.swift-version` file is now deprecated.') + end + end + end + + def setup_validation_environment + validation_dir.rmtree if validation_dir.exist? + validation_dir.mkpath + @original_config = Config.instance.clone + config.installation_root = validation_dir + config.silent = !config.verbose + end + + def tear_down_validation_environment + clean! unless no_clean + Config.instance = @original_config + end + + def clean! + validation_dir.rmtree + end + + # @return [String] The deployment targret of the library spec. + # + def deployment_target + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + if consumer.platform_name == :ios && use_frameworks + minimum = Version.new('8.0') + deployment_target = [Version.new(deployment_target), minimum].max.to_s + end + deployment_target + end + + def download_pod + test_spec_names = consumer.spec.test_specs.select { |ts| ts.supported_on_platform?(consumer.platform_name) }.map(&:name) + podfile = podfile_from_spec(consumer.platform_name, deployment_target, use_frameworks, test_spec_names, use_modular_headers, use_static_frameworks) + sandbox = Sandbox.new(config.sandbox_root) + @installer = Installer.new(sandbox, podfile) + @installer.use_default_plugins = false + @installer.has_dependencies = !spec.dependencies.empty? + %i(prepare resolve_dependencies download_dependencies write_lockfiles).each { |m| @installer.send(m) } + @file_accessor = @installer.pod_targets.flat_map(&:file_accessors).find { |fa| fa.spec.name == consumer.spec.name } + end + + def create_app_project + app_project = Xcodeproj::Project.new(validation_dir + 'App.xcodeproj') + app_target = Pod::Generator::AppTargetHelper.add_app_target(app_project, consumer.platform_name, deployment_target) + sandbox = Sandbox.new(config.sandbox_root) + info_plist_path = app_project.path.dirname.+('App/App-Info.plist') + Pod::Installer::Xcode::PodsProjectGenerator::TargetInstallerHelper.create_info_plist_file_with_sandbox(sandbox, + info_plist_path, + app_target, + '1.0.0', + Platform.new(consumer.platform_name), + :appl, + :build_setting_value => '$(SRCROOT)/App/App-Info.plist') + Pod::Generator::AppTargetHelper.add_swift_version(app_target, derived_swift_version) + app_target.build_configurations.each do |config| + # Lint will fail if a AppIcon is set but no image is found with such name + # Happens only with Static Frameworks enabled but shouldn't be set anyway + config.build_settings.delete('ASSETCATALOG_COMPILER_APPICON_NAME') + # Ensure this is set generally but we have seen an issue with ODRs: + # see: https://github.com/CocoaPods/CocoaPods/issues/10933 + config.build_settings['PRODUCT_BUNDLE_IDENTIFIER'] = 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + end + app_project.save + app_project.recreate_user_schemes + end + + def add_app_project_import + app_project = Xcodeproj::Project.open(validation_dir + 'App.xcodeproj') + app_target = app_project.targets.first + pod_target = validation_pod_target + Pod::Generator::AppTargetHelper.add_app_project_import(app_project, app_target, pod_target, consumer.platform_name) + Pod::Generator::AppTargetHelper.add_xctest_search_paths(app_target) if @installer.pod_targets.any? { |pt| pt.spec_consumers.any? { |c| c.frameworks.include?('XCTest') || c.weak_frameworks.include?('XCTest') } } + Pod::Generator::AppTargetHelper.add_empty_swift_file(app_project, app_target) if @installer.pod_targets.any?(&:uses_swift?) + app_project.save + Xcodeproj::XCScheme.share_scheme(app_project.path, 'App') + # Share the pods xcscheme only if it exists. For pre-built vendored pods there is no xcscheme generated. + Xcodeproj::XCScheme.share_scheme(@installer.pods_project.path, pod_target.label) if shares_pod_target_xcscheme?(pod_target) + end + + # Returns the pod target for the pod being validated. Installation must have occurred before this can be invoked. + # + def validation_pod_target + @installer.pod_targets.find { |pt| pt.pod_name == spec.root.name } + end + + # It creates a podfile in memory and builds a library containing the pod + # for all available platforms with xcodebuild. + # + def install_pod + %i(validate_targets generate_pods_project integrate_user_project + perform_post_install_actions).each { |m| @installer.send(m) } + + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + configure_pod_targets(@installer.target_installation_results) + validate_dynamic_framework_support(@installer.aggregate_targets, deployment_target) + @installer.pods_project.save + end + + # @param [Array] target_installation_results + # The installation results to configure + # + def configure_pod_targets(target_installation_results) + target_installation_results.first.values.each do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + native_target = pod_target_installation_result.native_target + native_target.build_configuration_list.build_configurations.each do |build_configuration| + (build_configuration.build_settings['OTHER_CFLAGS'] ||= '$(inherited)') << ' -Wincomplete-umbrella' + if pod_target.uses_swift? + # The Swift version for the target being validated can be overridden by `--swift-version` or the + # `.swift-version` file so we always use the derived Swift version. + # + # For dependencies, if the derived Swift version is supported then it is the one used. Otherwise, the Swift + # version for dependencies is inferred by the target that is integrating them. + swift_version = if pod_target == validation_pod_target + derived_swift_version + else + pod_target.spec_swift_versions.map(&:to_s).find do |v| + v == derived_swift_version + end || pod_target.swift_version + end + build_configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + pod_target_installation_result.test_specs_by_native_target.each do |test_native_target, test_spec| + if pod_target.uses_swift_for_spec?(test_spec) + test_native_target.build_configuration_list.build_configurations.each do |build_configuration| + swift_version = pod_target == validation_pod_target ? derived_swift_version : pod_target.swift_version + build_configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + end + end + end + + # Produces an error of dynamic frameworks were requested but are not supported by the deployment target + # + # @param [Array] aggregate_targets + # The aggregate targets installed by the installer + # + # @param [String,Version] deployment_target + # The deployment target of the installation + # + def validate_dynamic_framework_support(aggregate_targets, deployment_target) + return unless consumer.platform_name == :ios + return unless deployment_target.nil? || Version.new(deployment_target).major < 8 + aggregate_targets.each do |target| + if target.pod_targets.any?(&:uses_swift?) + uses_xctest = target.spec_consumers.any? { |c| (c.frameworks + c.weak_frameworks).include? 'XCTest' } + error('swift', 'Swift support uses dynamic frameworks and is therefore only supported on iOS > 8.') unless uses_xctest + end + end + end + + def validate_vendored_dynamic_frameworks + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + + unless file_accessor.nil? + dynamic_frameworks = file_accessor.vendored_dynamic_frameworks + dynamic_libraries = file_accessor.vendored_dynamic_libraries + if (dynamic_frameworks.count > 0 || dynamic_libraries.count > 0) && consumer.platform_name == :ios && + (deployment_target.nil? || Version.new(deployment_target).major < 8) + error('dynamic', 'Dynamic frameworks and libraries are only supported on iOS 8.0 and onwards.') + end + end + end + + # Performs platform specific analysis. It requires to download the source + # at each iteration + # + # @note Xcode warnings are treated as notes because the spec maintainer + # might not be the author of the library + # + # @return [void] + # + def build_pod + if !xcodebuild_available? + UI.warn "Skipping compilation with `xcodebuild` because it can't be found.\n".yellow + else + UI.message "\nBuilding with `xcodebuild`.\n".yellow do + scheme = if skip_import_validation? + validation_pod_target.label if validation_pod_target.should_build? + else + 'App' + end + if scheme.nil? + UI.warn "Skipping compilation with `xcodebuild` because target contains no sources.\n".yellow + else + requested_configuration = configuration ? configuration : 'Release' + if analyze + output = xcodebuild('analyze', scheme, requested_configuration, :deployment_target => deployment_target) + find_output = Executable.execute_command('find', [validation_dir, '-name', '*.html'], false) + if find_output != '' + message = 'Static Analysis failed.' + message += ' You can use `--verbose` for more information.' unless config.verbose? + message += ' You can use `--no-clean` to save a reproducible buid environment.' unless no_clean + error('build_pod', message) + end + else + output = xcodebuild('build', scheme, requested_configuration, :deployment_target => deployment_target) + end + parsed_output = parse_xcodebuild_output(output) + translate_output_to_linter_messages(parsed_output) + end + end + end + end + + # Builds and runs all test sources associated with the current specification being validated. + # + # @note Xcode warnings are treated as notes because the spec maintainer + # might not be the author of the library + # + # @return [void] + # + def test_pod + if !xcodebuild_available? + UI.warn "Skipping test validation with `xcodebuild` because it can't be found.\n".yellow + else + UI.message "\nTesting with `xcodebuild`.\n".yellow do + pod_target = validation_pod_target + all_test_specs = consumer.spec.test_specs + unless test_specs.nil? + test_spec_names = all_test_specs.map(&:base_name) + all_test_specs.select! { |test_spec| test_specs.include? test_spec.base_name } + test_specs.each do |test_spec| + unless test_spec_names.include? test_spec + UI.warn "Requested test spec `#{test_spec}` does not exist in the podspec. Existing test specs are `#{test_spec_names}`" + end + end + end + all_test_specs.each do |test_spec| + if !test_spec.supported_on_platform?(consumer.platform_name) + UI.warn "Skipping test spec `#{test_spec.name}` on platform `#{consumer.platform_name}` since it is not supported.\n".yellow + else + scheme = @installer.target_installation_results.first[pod_target.name].native_target_for_spec(test_spec) + output = xcodebuild('test', scheme, 'Debug', :deployment_target => test_spec.deployment_target(consumer.platform_name)) + parsed_output = parse_xcodebuild_output(output) + translate_output_to_linter_messages(parsed_output) + end + end + end + end + end + + def xcodebuild_available? + !Executable.which('xcodebuild').nil? && ENV['COCOAPODS_VALIDATOR_SKIP_XCODEBUILD'].nil? + end + + FILE_PATTERNS = %i(source_files resources preserve_paths vendored_libraries + vendored_frameworks public_header_files preserve_paths + project_header_files private_header_files resource_bundles).freeze + + # It checks that every file pattern specified in a spec yields + # at least one file. It requires the pods to be already present + # in the current working directory under Pods/spec.name. + # + # @return [void] + # + def check_file_patterns + FILE_PATTERNS.each do |attr_name| + if respond_to?("_validate_#{attr_name}", true) + send("_validate_#{attr_name}") + else + validate_nonempty_patterns(attr_name, :error) + end + end + + _validate_header_mappings_dir + if consumer.spec.root? + _validate_license + _validate_module_map + end + end + + # Validates that the file patterns in `attr_name` match at least 1 file. + # + # @param [String,Symbol] attr_name the name of the attribute to check (ex. :public_header_files) + # + # @param [String,Symbol] message_type the type of message to send if the patterns are empty (ex. :error) + # + def validate_nonempty_patterns(attr_name, message_type) + return unless !file_accessor.spec_consumer.send(attr_name).empty? && file_accessor.send(attr_name).empty? + + add_result(message_type, 'file patterns', "The `#{attr_name}` pattern did not match any file.") + end + + def _validate_vendored_libraries + file_accessor.vendored_libraries.each do |lib| + basename = File.basename(lib) + lib_name = basename.downcase + unless lib_name.end_with?('.a') && lib_name.start_with?('lib') + warning('vendored_libraries', "`#{basename}` does not match the expected static library name format `lib[name].a`") + end + end + validate_nonempty_patterns(:vendored_libraries, :warning) + end + + def _validate_project_header_files + _validate_header_files(:project_header_files) + validate_nonempty_patterns(:project_header_files, :warning) + end + + def _validate_private_header_files + _validate_header_files(:private_header_files) + validate_nonempty_patterns(:private_header_files, :warning) + end + + def _validate_public_header_files + _validate_header_files(:public_header_files) + validate_nonempty_patterns(:public_header_files, :warning) + end + + def _validate_license + unless file_accessor.license || spec.license && (spec.license[:type] == 'Public Domain' || spec.license[:text]) + warning('license', 'Unable to find a license file') + end + end + + def _validate_module_map + if spec.module_map + unless file_accessor.module_map.exist? + error('module_map', 'Unable to find the specified module map file.') + end + unless file_accessor.module_map.extname == '.modulemap' + relative_path = file_accessor.module_map.relative_path_from file_accessor.root + error('module_map', "Unexpected file extension for modulemap file (#{relative_path}).") + end + end + end + + def _validate_resource_bundles + file_accessor.resource_bundles.each do |bundle, resource_paths| + next unless resource_paths.empty? + error('file patterns', "The `resource_bundles` pattern for `#{bundle}` did not match any file.") + end + end + + # Ensures that a list of header files only contains header files. + # + def _validate_header_files(attr_name) + header_files = file_accessor.send(attr_name) + non_header_files = header_files. + select { |f| !Sandbox::FileAccessor::HEADER_EXTENSIONS.include?(f.extname) }. + map { |f| f.relative_path_from(file_accessor.root) } + unless non_header_files.empty? + error(attr_name, "The pattern matches non-header files (#{non_header_files.join(', ')}).") + end + non_source_files = header_files - file_accessor.source_files + unless non_source_files.empty? + error(attr_name, 'The pattern includes header files that are not listed ' \ + "in source_files (#{non_source_files.join(', ')}).") + end + end + + def _validate_header_mappings_dir + return unless header_mappings_dir = file_accessor.spec_consumer.header_mappings_dir + absolute_mappings_dir = file_accessor.root + header_mappings_dir + unless absolute_mappings_dir.directory? + error('header_mappings_dir', "The header_mappings_dir (`#{header_mappings_dir}`) is not a directory.") + end + non_mapped_headers = file_accessor.headers. + reject { |h| h.to_path.start_with?(absolute_mappings_dir.to_path) }. + map { |f| f.relative_path_from(file_accessor.root) } + unless non_mapped_headers.empty? + error('header_mappings_dir', "There are header files outside of the header_mappings_dir (#{non_mapped_headers.join(', ')}).") + end + end + + #-------------------------------------------------------------------------# + + private + + # !@group Result Helpers + + def error(*args) + add_result(:error, *args) + end + + def warning(*args) + add_result(:warning, *args) + end + + def note(*args) + add_result(:note, *args) + end + + def translate_output_to_linter_messages(parsed_output) + parsed_output.each do |message| + # Checking the error for `InputFile` is to work around an Xcode + # issue where linting would fail even though `xcodebuild` actually + # succeeds. Xcode.app also doesn't fail when this issue occurs, so + # it's safe for us to do the same. + # + # For more details see https://github.com/CocoaPods/CocoaPods/issues/2394#issuecomment-56658587 + # + if message.include?("'InputFile' should have") + next + end + + if message =~ /\S+:\d+:\d+: error:/ + error('xcodebuild', message) + elsif message =~ /\S+:\d+:\d+: warning:/ + warning('xcodebuild', message) + else + note('xcodebuild', message) + end + end + end + + def shares_pod_target_xcscheme?(pod_target) + Pathname.new(@installer.pods_project.path + pod_target.label).exist? + end + + def add_result(type, attribute_name, message, public_only = false) + result = results.find do |r| + r.type == type && r.attribute_name && r.message == message && r.public_only? == public_only + end + unless result + result = Result.new(type, attribute_name, message, public_only) + results << result + end + result.platforms << consumer.platform_name if consumer + result.subspecs << subspec_name if subspec_name && !result.subspecs.include?(subspec_name) + end + + # Specialized Result to support subspecs aggregation + # + class Result < Specification::Linter::Results::Result + def initialize(type, attribute_name, message, public_only = false) + super(type, attribute_name, message, public_only) + @subspecs = [] + end + + attr_reader :subspecs + end + + #-------------------------------------------------------------------------# + + private + + # !@group Helpers + + # @return [Array] an array of source URLs used to create the + # {Podfile} used in the linting process + # + attr_reader :source_urls + + # @param [String] platform_name + # the name of the platform, which should be declared + # in the Podfile. + # + # @param [String] deployment_target + # the deployment target, which should be declared in + # the Podfile. + # + # @param [Bool] use_frameworks + # whether frameworks should be used for the installation + # + # @param [Array] test_spec_names + # the test spec names to include in the podfile. + # + # @return [Podfile] a podfile that requires the specification on the + # current platform. + # + # @note The generated podfile takes into account whether the linter is + # in local mode. + # + def podfile_from_spec(platform_name, deployment_target, use_frameworks = true, test_spec_names = [], use_modular_headers = false, use_static_frameworks = false) + name = subspec_name || spec.name + podspec = file.realpath + local = local? + urls = source_urls + + additional_podspec_pods = external_podspecs ? Dir.glob(external_podspecs) : [] + additional_path_pods = (include_podspecs ? Dir.glob(include_podspecs) : []) .select { |path| spec.name != Specification.from_file(path).name } - additional_podspec_pods + + Pod::Podfile.new do + install! 'cocoapods', :deterministic_uuids => false, :warn_for_unused_master_specs_repo => false + # By default inhibit warnings for all pods, except the one being validated. + inhibit_all_warnings! + urls.each { |u| source(u) } + target 'App' do + if use_static_frameworks + use_frameworks!(:linkage => :static) + else + use_frameworks!(use_frameworks) + end + use_modular_headers! if use_modular_headers + platform(platform_name, deployment_target) + if local + pod name, :path => podspec.dirname.to_s, :inhibit_warnings => false + else + pod name, :podspec => podspec.to_s, :inhibit_warnings => false + end + + additional_path_pods.each do |podspec_path| + podspec_name = File.basename(podspec_path, '.*') + pod podspec_name, :path => File.dirname(podspec_path) + end + + additional_podspec_pods.each do |podspec_path| + podspec_name = File.basename(podspec_path, '.*') + pod podspec_name, :podspec => podspec_path + end + + test_spec_names.each do |test_spec_name| + if local + pod test_spec_name, :path => podspec.dirname.to_s, :inhibit_warnings => false + else + pod test_spec_name, :podspec => podspec.to_s, :inhibit_warnings => false + end + end + end + end + end + + # Parse the xcode build output to identify the lines which are relevant + # to the linter. + # + # @param [String] output the output generated by the xcodebuild tool. + # + # @note The indentation and the temporary path is stripped form the + # lines. + # + # @return [Array] the lines that are relevant to the linter. + # + def parse_xcodebuild_output(output) + lines = output.split("\n") + selected_lines = lines.select do |l| + l.include?('error: ') && (l !~ /errors? generated\./) && (l !~ /error: \(null\)/) || + l.include?('warning: ') && (l !~ /warnings? generated\./) && (l !~ /frameworks only run on iOS 8/) || + l.include?('note: ') && (l !~ /expanded from macro/) + end + selected_lines.map do |l| + new = l.force_encoding('UTF-8').gsub(%r{#{validation_dir}/Pods/}, '') + new.gsub!(/^ */, ' ') + end + end + + # @return [String] Executes xcodebuild in the current working directory and + # returns its output (both STDOUT and STDERR). + # + def xcodebuild(action, scheme, configuration, deployment_target:) + require 'fourflusher' + command = %W(clean #{action} -workspace #{File.join(validation_dir, 'App.xcworkspace')} -scheme #{scheme} -configuration #{configuration}) + case consumer.platform_name + when :osx, :macos + command += %w(CODE_SIGN_IDENTITY=) + when :ios + command += %w(CODE_SIGN_IDENTITY=- -sdk iphonesimulator) + command += Fourflusher::SimControl.new.destination(:oldest, 'iOS', deployment_target) + xcconfig = consumer.pod_target_xcconfig + if xcconfig + archs = xcconfig['VALID_ARCHS'] + if archs && (archs.include? 'armv7') && !(archs.include? 'i386') && (archs.include? 'x86_64') + # Prevent Xcodebuild from testing the non-existent i386 simulator if armv7 is specified without i386 + command += %w(ARCHS=x86_64) + end + end + when :watchos + command += %w(CODE_SIGN_IDENTITY=- -sdk watchsimulator) + command += Fourflusher::SimControl.new.destination(:oldest, 'watchOS', deployment_target) + when :tvos + command += %w(CODE_SIGN_IDENTITY=- -sdk appletvsimulator) + command += Fourflusher::SimControl.new.destination(:oldest, 'tvOS', deployment_target) + end + + if analyze + command += %w(CLANG_ANALYZER_OUTPUT=html CLANG_ANALYZER_OUTPUT_DIR=analyzer) + end + + begin + _xcodebuild(command, true) + rescue => e + message = 'Returned an unsuccessful exit code.' + message += ' You can use `--verbose` for more information.' unless config.verbose? + error('xcodebuild', message) + e.message + end + end + + # Executes the given command in the current working directory. + # + # @return [String] The output of the given command + # + def _xcodebuild(command, raise_on_failure = false) + Executable.execute_command('xcodebuild', command, raise_on_failure) + end + + # Whether the platform with the specified name is valid + # + # @param [Platform] platform + # The platform to check + # + # @return [Bool] True if the platform is valid + # + def valid_platform?(platform) + VALID_PLATFORMS.any? { |p| p.name == platform.name } + end + + # Whether the platform is supported by the specification + # + # @param [Platform] platform + # The platform to check + # + # @param [Specification] spec + # The specification which must support the provided platform + # + # @return [Bool] Whether the platform is supported by the specification + # + def supported_platform?(platform, spec) + available_platforms = spec.available_platforms + + available_platforms.any? { |p| p.name == platform.name } + end + + # Whether the provided name matches the platform + # + # @param [Platform] platform + # The platform + # + # @param [String] name + # The name to check against the provided platform + # + def platform_name_match?(platform, name) + [platform.name, platform.string_name].any? { |n| n.casecmp(name) == 0 } + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/version_metadata.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/version_metadata.rb new file mode 100644 index 0000000..4e9b53c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/version_metadata.rb @@ -0,0 +1,26 @@ +module Pod + module VersionMetadata + CACHE_VERSION = '003'.freeze + + def self.gem_version + Pod::VERSION + end + + def self.project_cache_version + [ + gem_version, + cocoapods_sha, + 'project-cache', + CACHE_VERSION, + ].compact.join('.') + end + + def self.cocoapods_sha + return unless gemspec = Gem.loaded_specs['cocoapods'] + return unless source = gemspec.source + return unless source.respond_to?(:revision) + source.revision + end + private_class_method :cocoapods_sha + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode.rb new file mode 100644 index 0000000..847f5f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode.rb @@ -0,0 +1,7 @@ +module Pod + module Xcode + autoload :LinkageAnalyzer, 'cocoapods/xcode/linkage_analyzer' + autoload :XCFramework, 'cocoapods/xcode/xcframework' + autoload :FrameworkPaths, 'cocoapods/xcode/framework_paths' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/framework_paths.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/framework_paths.rb new file mode 100644 index 0000000..9c33462 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/framework_paths.rb @@ -0,0 +1,54 @@ +module Pod + module Xcode + class FrameworkPaths + # @return [String] the path to the .framework + # + attr_reader :source_path + + # @return [String, Nil] the dSYM path, if one exists + # + attr_reader :dsym_path + + # @return [Array, Nil] the bcsymbolmap files path array, if one exists + # + attr_reader :bcsymbolmap_paths + + def initialize(source_path, dsym_path = nil, bcsymbolmap_paths = nil) + @source_path = source_path + @dsym_path = dsym_path + @bcsymbolmap_paths = bcsymbolmap_paths + end + + def ==(other) + if other.class == self.class + other.source_path == @source_path && other.dsym_path == @dsym_path && other.bcsymbolmap_paths == @bcsymbolmap_paths + else + false + end + end + + alias eql? == + + def hash + [source_path, dsym_path, bcsymbolmap_paths].hash + end + + def all_paths + [source_path, dsym_path, bcsymbolmap_paths].flatten.compact + end + + # @param [Pathname] path the path to the `.framework` bundle + # + # @return [FrameworkPaths] the path of the framework with dsym & bcsymbolmap paths, if found + # + def self.from_path(path) + dsym_name = "#{path.basename}.dSYM" + dsym_path = Pathname.new("#{path.dirname}/#{dsym_name}") + dsym_path = nil unless dsym_path.exist? + bcsymbolmap_paths = Pathname.glob(path.dirname, '*.bcsymbolmap') + + FrameworkPaths.new(path, dsym_path, bcsymbolmap_paths) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/linkage_analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/linkage_analyzer.rb new file mode 100644 index 0000000..270623e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/linkage_analyzer.rb @@ -0,0 +1,22 @@ +require 'macho' + +module Pod + module Xcode + class LinkageAnalyzer + # @param [Pathname] binary + # The file to be checked for being a dynamic Mach-O binary. + # + # @return [Boolean] Whether `binary` can be dynamically linked. + # + def self.dynamic_binary?(binary) + @cached_dynamic_binary_results ||= {} + return @cached_dynamic_binary_results[binary] unless @cached_dynamic_binary_results[binary].nil? + return false unless binary.file? + + @cached_dynamic_binary_results[binary] = MachO.open(binary).dylib? + rescue MachO::MachOError + @cached_dynamic_binary_results[binary] = false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework.rb new file mode 100644 index 0000000..ed894f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework.rb @@ -0,0 +1,99 @@ +# frozen_string_literal: true + +require 'cocoapods/xcode/xcframework/xcframework_slice' + +module Pod + module Xcode + class XCFramework + # @return [String] target_name the target name this XCFramework belongs to + # + attr_reader :target_name + + # @return [Pathname] path the path to the .xcframework on disk + # + attr_reader :path + + # @return [Pod::Version] the format version of the .xcframework + # + attr_reader :format_version + + # @return [Array] the slices contained inside this .xcframework + # + attr_reader :slices + + # @return [Hash] the contents of the parsed plist + # + attr_reader :plist + + # Initializes an XCFramework instance with a path on disk + # + # @param [String] target_name @see target_name + # @param [Pathname, String] path @see path + # + # @return [XCFramework] the xcframework at the given path + # + def initialize(target_name, path) + @target_name = target_name + @path = Pathname.new(path).tap do |p| + raise 'Absolute path is required' unless p.absolute? + end + + @plist = Xcodeproj::Plist.read_from_path(plist_path) + parse_plist_contents + end + + # @return [Pathname] the path to the Info.plist + # + def plist_path + path + 'Info.plist' + end + + # @return [String] the basename of the framework + # + def name + File.basename(path, '.xcframework') + end + + # @return [Boolean] true if any slices use dynamic linkage + # + def includes_dynamic_slices? + build_type.dynamic? + end + + # @return [Boolean] true if any slices use dynamic linkage + # + def includes_static_slices? + build_type.static? + end + + # @return [Pod::BuildType] the build type of the contained slices + # + # @note As CocoaPods does not support mixed packaging nor linkage for xcframework slices, + # we pick the first slice and assume all are the same + # + def build_type + @build_type ||= slices.first.build_type + end + + private + + def parse_plist_contents + @format_version = Pod::Version.new(plist['XCFrameworkFormatVersion']) + @slices = plist['AvailableLibraries'].map do |library| + identifier = library['LibraryIdentifier'] + relative_path = library['LibraryPath'] + archs = library['SupportedArchitectures'] + platform_name = library['SupportedPlatform'] + platform_variant = library['SupportedPlatformVariant'] + headers = library['HeadersPath'] + + slice_root = path.join(identifier) + slice_path = slice_root.join(relative_path) + headers = slice_root.join(headers) unless headers.nil? + XCFramework::Slice.new(slice_path, identifier, archs, platform_name, :platform_variant => platform_variant, :headers => headers) + end + raise Informative, "XCFramework at #{path} does not contain any frameworks or libraries." if slices.empty? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework/xcframework_slice.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework/xcframework_slice.rb new file mode 100644 index 0000000..4488bf2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-1.11.2/lib/cocoapods/xcode/xcframework/xcframework_slice.rb @@ -0,0 +1,138 @@ +require 'cocoapods/xcode/linkage_analyzer' + +module Pod + module Xcode + class XCFramework + class Slice + # @return [Pathname] the path to the .framework or .a of this slice + # + attr_reader :path + + # @return [Array] list of supported architectures + # + attr_reader :supported_archs + + # @return [String] the framework identifier + # + attr_reader :identifier + + # @return [Platform] the supported platform + # + attr_reader :platform + + # @return [Symbol] the platform variant. Either :simulator or nil + # + attr_reader :platform_variant + + # @return [Pathname] the path to the headers + # + attr_reader :headers + + def initialize(path, identifier, archs, platform, platform_variant: nil, headers: path.join('Headers')) + @path = path + @identifier = identifier + @supported_archs = archs + @platform = Pod::Platform.new(platform) + @platform_variant = platform_variant.to_sym unless platform_variant.nil? + @headers = headers + end + + # @return [String] the name of the framework + # + def name + @name ||= begin + case package_type + when :framework + File.basename(path, '.framework') + when :library + result = File.basename(path, '.a').gsub(/^lib/, '') + result[0] = result.downcase[0] + result + else + raise Informative, "Invalid package type `#{package_type}`" + end + end + end + + # @return [Boolean] true if this is a slice built for simulator + # + def simulator_variant? + @platform_variant == :simulator + end + + # @return [Symbol] the package type of the slice - either :framework or :library + # + def package_type + @package_type ||= begin + ext = File.extname(path) + case ext + when '.framework' + :framework + when '.a' + :library + else + raise Informative, "Invalid XCFramework slice type `#{ext}`" + end + end + end + + # @return [Boolean] true if this slice is a framework, not a library + # + def framework? + build_type.framework? + end + + # @return [Boolean] true if this slice is a library, not a framework + # + def library? + build_type.library? + end + + # @return [Boolean] true if this slice contains a statically-linked binary + # + def static? + build_type.static? + end + + # @return [Boolean] true if this slice contains a dynamically-linked binary + # + def dynamic? + build_type.dynamic? + end + + # @return [BuildType] the build type of the binary + # + def build_type + @build_type ||= begin + linkage = Xcode::LinkageAnalyzer.dynamic_binary?(binary_path) ? :dynamic : :static + ext = File.extname(path) + packaging = case ext + when '.framework' + :framework + when '.a' + :library + else + raise Informative, "Invalid XCFramework slice type `#{ext}`" + end + BuildType.new(:linkage => linkage, :packaging => packaging) + end + end + + # @return [Pathname] the path to the bundled binary + # + def binary_path + @binary_path ||= begin + case package_type + when :framework + path + name + when :library + path + else + raise Informative, "Invalid package type `#{package_type}`" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/LICENSE new file mode 100644 index 0000000..f25cc61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/README.md new file mode 100644 index 0000000..f9a7d47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/README.md @@ -0,0 +1,42 @@ +# CocoaPods Core + +[![Build Status](https://img.shields.io/travis/CocoaPods/Core/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Core) +[![Test Coverage](https://api.codeclimate.com/v1/badges/91a2d70b9ed977815c66/test_coverage)](https://codeclimate.com/github/CocoaPods/Core/test_coverage) +[![Maintainability](https://api.codeclimate.com/v1/badges/91a2d70b9ed977815c66/maintainability)](https://codeclimate.com/github/CocoaPods/Core/maintainability) + +The CocoaPods-Core gem provides support to work with the models of CocoaPods. +It is intended to be used in place of the CocoaPods gem when the installation +of the dependencies is not needed. Therefore, it is suitable for web services. + +Provides support for working with the following models: + +- `Pod::Specification` - [Podspec Syntax Reference](https://guides.cocoapods.org/syntax/podspec.html). +- `Pod::Podfile` - [Podfile Syntax Reference](https://guides.cocoapods.org/syntax/podfile.html). +- `Pod::Source` - collections of podspec files like the [CocoaPods Spec repo](https://github.com/CocoaPods/Specs). + +The gem also provides support for ancillary features like +`Pod::Specification::Set::Presenter` suitable for presetting descriptions of +Pods and the `Specification::Linter`, which ensures the validity of podspec +files. + +## Installation + +``` +$ [sudo] gem install cocoapods-core +``` + +The `cocoapods-core` gem requires Ruby 2.0.0 or later. + +## Collaborate + +All CocoaPods development happens on GitHub, there is a repository for +[CocoaPods](https://github.com/CocoaPods/CocoaPods) and one for the [CocoaPods +specs](https://github.com/CocoaPods/Specs). Contributing patches or Pods is +really easy and gratifying. + +Follow [@CocoaPods](http://twitter.com/CocoaPods) to get up to date +information about what's going on in the CocoaPods world. + +## License + +This gem and CocoaPods are available under the MIT license. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core.rb new file mode 100644 index 0000000..c9045b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core.rb @@ -0,0 +1,40 @@ +# The Pod modules name-spaces all the classes of CocoaPods. +# +module Pod + require 'cocoapods-core/gem_version' + + # Indicates a runtime error **not** caused by a bug. + # + class PlainInformative < StandardError; end + + # Indicates a user error. + # + class Informative < PlainInformative; end + + require 'pathname' + require 'cocoapods-core/vendor' + + autoload :Version, 'cocoapods-core/version' + autoload :Requirement, 'cocoapods-core/requirement' + autoload :Dependency, 'cocoapods-core/dependency' + + autoload :CoreUI, 'cocoapods-core/core_ui' + autoload :DSLError, 'cocoapods-core/standard_error' + autoload :GitHub, 'cocoapods-core/github' + autoload :HTTP, 'cocoapods-core/http' + autoload :Lockfile, 'cocoapods-core/lockfile' + autoload :Metrics, 'cocoapods-core/metrics' + autoload :Platform, 'cocoapods-core/platform' + autoload :Podfile, 'cocoapods-core/podfile' + autoload :Source, 'cocoapods-core/source' + autoload :CDNSource, 'cocoapods-core/cdn_source' + autoload :TrunkSource, 'cocoapods-core/trunk_source' + autoload :Specification, 'cocoapods-core/specification' + autoload :StandardError, 'cocoapods-core/standard_error' + autoload :YAMLHelper, 'cocoapods-core/yaml_helper' + autoload :BuildType, 'cocoapods-core/build_type' + + # TODO: Fix + # + Spec = Specification +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/build_type.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/build_type.rb new file mode 100644 index 0000000..0c36d71 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/build_type.rb @@ -0,0 +1,121 @@ +module Pod + class BuildType + # @return [Array] known packaging options. + # + KNOWN_PACKAGING_OPTIONS = %i(library framework).freeze + + # @return [Array] known linking options. + # + KNOWN_LINKAGE_OPTIONS = %i(static dynamic).freeze + + # @return [Symbol] the packaging for this build type, one of #KNOWN_PACKAGING_OPTIONS + # + attr_reader :packaging + + # @return [Symbol] the linkage for this build type, one of #KNOWN_LINKAGE_OPTIONS + # + attr_reader :linkage + + attr_reader :hash + + def initialize(linkage: :static, packaging: :library) + unless KNOWN_LINKAGE_OPTIONS.include?(linkage) + raise ArgumentError, "Invalid linkage option #{linkage.inspect}, valid options are #{KNOWN_LINKAGE_OPTIONS.inspect}" + end + unless KNOWN_PACKAGING_OPTIONS.include?(packaging) + raise ArgumentError, "Invalid packaging option #{packaging.inspect}, valid options are #{KNOWN_PACKAGING_OPTIONS.inspect}" + end + @packaging = packaging + @linkage = linkage + @hash = packaging.hash ^ linkage.hash + end + + # @return [BuildType] the build type for a dynamic library + def self.dynamic_library + new(:linkage => :dynamic, :packaging => :library) + end + + # @return [BuildType] the build type for a static library + # + def self.static_library + new(:linkage => :static, :packaging => :library) + end + + # @return [BuildType] the build type for a dynamic framework + # + def self.dynamic_framework + new(:linkage => :dynamic, :packaging => :framework) + end + + # @return [BuildType] the build type for a static framework + # + def self.static_framework + new(:linkage => :static, :packaging => :framework) + end + + # @return [Boolean] whether the target is built dynamically + # + def dynamic? + linkage == :dynamic + end + + # @return [Boolean] whether the target is built statically + # + def static? + linkage == :static + end + + # @return [Boolean] whether the target is built as a framework + # + def framework? + packaging == :framework + end + + # @return [Boolean] whether the target is built as a library + # + def library? + packaging == :library + end + + # @return [Boolean] whether the target is built as a dynamic framework + # + def dynamic_framework? + dynamic? && framework? + end + + # @return [Boolean] whether the target is built as a dynamic library + # + def dynamic_library? + dynamic? && library? + end + + # @return [Boolean] whether the target is built as a static framework + # + def static_framework? + static? && framework? + end + + # @return [Boolean] whether the target is built as a static library + # + def static_library? + static? && library? + end + + def to_s + "#{linkage} #{packaging}" + end + + def to_hash + { :linkage => linkage, :packaging => packaging } + end + + def inspect + "#<#{self.class} linkage=#{linkage} packaging=#{packaging}>" + end + + def ==(other) + linkage == other.linkage && + packaging == other.packaging + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/cdn_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/cdn_source.rb new file mode 100644 index 0000000..40b578e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/cdn_source.rb @@ -0,0 +1,501 @@ +require 'cocoapods-core/source' +require 'rest' +require 'concurrent' +require 'netrc' +require 'addressable' + +module Pod + # Subclass of Pod::Source to provide support for CDN-based Specs repositories + # + class CDNSource < Source + include Concurrent + + MAX_NUMBER_OF_RETRIES = (ENV['COCOAPODS_CDN_MAX_NUMBER_OF_RETRIES'] || 5).to_i + # Single thread executor for all network activity. + HYDRA_EXECUTOR = Concurrent::SingleThreadExecutor.new + + private_constant :HYDRA_EXECUTOR + + # @param [String] repo The name of the repository + # + def initialize(repo) + @check_existing_files_for_update = false + # Optimization: we initialize startup_time when the source is first initialized + # and then test file modification dates against it. Any file that was touched + # after the source was initialized, is considered fresh enough. + @startup_time = Time.new + + @version_arrays_by_fragment_by_name = {} + + super(repo) + end + + # @return [String] The URL of the source. + # + def url + @url ||= File.read(repo.join('.url')).chomp.chomp('/') + '/' + end + + # @return [String] The type of the source. + # + def type + 'CDN' + end + + def refresh_metadata + if metadata.nil? + unless repo.exist? + debug "CDN: Repo #{name} does not exist!" + return + end + + specs_dir.mkpath + download_file('CocoaPods-version.yml') + end + + super + end + + def preheat_existing_files + files_to_update = files_definitely_to_update + deprecated_local_podspecs - ['deprecated_podspecs.txt'] + debug "CDN: #{name} Going to update #{files_to_update.count} files" + + concurrent_requests_catching_errors do + # Queue all tasks first + loaders = files_to_update.map do |file| + download_file_async(file) + end + # Block and wait for all to complete running on Hydra + Promises.zip_futures_on(HYDRA_EXECUTOR, *loaders).wait! + end + end + + def files_definitely_to_update + Pathname.glob(repo.join('**/*.{txt,yml}')).map { |f| f.relative_path_from(repo).to_s } + end + + def deprecated_local_podspecs + download_file('deprecated_podspecs.txt') + local_file('deprecated_podspecs.txt', &:to_a). + map { |f| Pathname.new(f.chomp) }. + select { |f| repo.join(f).exist? } + end + + # @return [Pathname] The directory where the specs are stored. + # + def specs_dir + @specs_dir ||= repo + 'Specs' + end + + # @!group Querying the source + #-------------------------------------------------------------------------# + + # @return [Array] the list of the name of all the Pods. + # + def pods + download_file('all_pods.txt') + local_file('all_pods.txt', &:to_a).map(&:chomp) + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + # @param [String] name + # the name of the Pod. + # + def versions(name) + return nil unless specs_dir + raise ArgumentError, 'No name' unless name + + fragment = pod_shard_fragment(name) + + ensure_versions_file_loaded(fragment) + + return @versions_by_name[name] unless @versions_by_name[name].nil? + + pod_path_actual = pod_path(name) + pod_path_relative = relative_pod_path(name) + + return nil if @version_arrays_by_fragment_by_name[fragment][name].nil? + + concurrent_requests_catching_errors do + loaders = [] + + @versions_by_name[name] ||= @version_arrays_by_fragment_by_name[fragment][name].map do |version| + # Optimization: ensure all the podspec files at least exist. The correct one will get refreshed + # in #specification_path regardless. + podspec_version_path_relative = Pathname.new(version).join("#{name}.podspec.json") + + unless pod_path_actual.join(podspec_version_path_relative).exist? + # Queue all podspec download tasks first + loaders << download_file_async(pod_path_relative.join(podspec_version_path_relative).to_s) + end + + begin + Version.new(version) if version[0, 1] != '.' + rescue ArgumentError + raise Informative, 'An unexpected version directory ' \ + "`#{version}` was encountered for the " \ + "`#{pod_path_actual}` Pod in the `#{name}` repository." + end + end.compact.sort.reverse + + # Block and wait for all to complete running on Hydra + Promises.zip_futures_on(HYDRA_EXECUTOR, *loaders).wait! + end + + @versions_by_name[name] + end + + # Returns the path of the specification with the given name and version. + # + # @param [String] name + # the name of the Pod. + # + # @param [Version,String] version + # the version for the specification. + # + # @return [Pathname] The path of the specification. + # + def specification_path(name, version) + raise ArgumentError, 'No name' unless name + raise ArgumentError, 'No version' unless version + unless versions(name).include?(Version.new(version)) + raise StandardError, "Unable to find the specification #{name} " \ + "(#{version}) in the #{self.name} source." + end + + podspec_version_path_relative = Pathname.new(version.to_s).join("#{name}.podspec.json") + relative_podspec = relative_pod_path(name).join(podspec_version_path_relative).to_s + download_file(relative_podspec) + pod_path(name).join(podspec_version_path_relative) + end + + # @return [Array] all the specifications contained by the + # source. + # + def all_specs + raise Informative, "Can't retrieve all the specs for a CDN-backed source, it will take forever" + end + + # @return [Array] the sets of all the Pods. + # + def pod_sets + raise Informative, "Can't retrieve all the pod sets for a CDN-backed source, it will take forever" + end + + # @!group Searching the source + #-------------------------------------------------------------------------# + + # @return [Set] a set for a given dependency. The set is identified by the + # name of the dependency and takes into account subspecs. + # + # @note This method is optimized for fast lookups by name, i.e. it does + # *not* require iterating through {#pod_sets} + # + # @todo Rename to #load_set + # + def search(query) + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + if query.is_a?(Dependency) + query = query.root_name + end + + fragment = pod_shard_fragment(query) + + ensure_versions_file_loaded(fragment) + + version_arrays_by_name = @version_arrays_by_fragment_by_name[fragment] || {} + + found = version_arrays_by_name[query].nil? ? nil : query + + if found + set = set(query) + set if set.specification_name == query + end + end + + # @return [Array] The list of the sets that contain the search term. + # + # @param [String] query + # the search term. Can be a regular expression. + # + # @param [Bool] full_text_search + # performed using Algolia + # + # @note full text search requires to load the specification for each pod, + # and therefore not supported. + # + def search_by_name(query, full_text_search = false) + if full_text_search + require 'algoliasearch' + begin + algolia_result = algolia_search_index.search(query, :attributesToRetrieve => 'name') + names = algolia_result['hits'].map { |r| r['name'] } + names.map { |n| set(n) }.reject { |s| s.versions.compact.empty? } + rescue Algolia::AlgoliaError => e + raise Informative, "CDN: #{name} - Cannot perform full-text search because Algolia returned an error: #{e}" + end + else + super(query) + end + end + + # Check update dates for all existing files. + # Does not download non-existing specs, since CDN-backed repo is updated live. + # + # @param [Bool] show_output + # + # @return [Array] Always returns empty array, as it cannot know + # everything that actually changed. + # + def update(_show_output) + @check_existing_files_for_update = true + begin + preheat_existing_files + ensure + @check_existing_files_for_update = false + end + [] + end + + def updateable? + true + end + + def git? + false + end + + def indexable? + false + end + + private + + def ensure_versions_file_loaded(fragment) + return if !@version_arrays_by_fragment_by_name[fragment].nil? && !@check_existing_files_for_update + + # Index file that contains all the versions for all the pods in the shard. + # We use those because you can't get a directory listing from a CDN. + index_file_name = index_file_name_for_fragment(fragment) + download_file(index_file_name) + versions_raw = local_file(index_file_name, &:to_a).map(&:chomp) + @version_arrays_by_fragment_by_name[fragment] = versions_raw.reduce({}) do |hash, row| + row = row.split('/') + pod = row.shift + versions = row + + hash[pod] = versions + hash + end + end + + def algolia_search_index + @index ||= begin + require 'algoliasearch' + + raise Informative, "Cannot perform full-text search in repo #{name} because it's missing Algolia config" if download_file('AlgoliaSearch.yml').nil? + algolia_config = YAMLHelper.load_string(local_file('AlgoliaSearch.yml', &:read)) + + client = Algolia::Client.new(:application_id => algolia_config['application_id'], :api_key => algolia_config['api_key']) + Algolia::Index.new(algolia_config['index'], client) + end + end + + def index_file_name_for_fragment(fragment) + fragment_joined = fragment.join('_') + fragment_joined = '_' + fragment_joined unless fragment.empty? + "all_pods_versions#{fragment_joined}.txt" + end + + def pod_shard_fragment(pod_name) + metadata.path_fragment(pod_name)[0..-2] + end + + def local_file_okay?(partial_url) + file_path = repo.join(partial_url) + File.exist?(file_path) && File.size(file_path) > 0 + end + + def local_file(partial_url) + file_path = repo.join(partial_url) + File.open(file_path) do |file| + yield file if block_given? + end + end + + def relative_pod_path(pod_name) + pod_path(pod_name).relative_path_from(repo) + end + + def download_file(partial_url) + # Block the main thread waiting for Hydra to finish + # + # Used for single-file downloads + download_file_async(partial_url).wait! + end + + def download_file_async(partial_url) + file_remote_url = Addressable::URI.encode(url + partial_url.to_s) + path = repo + partial_url + + file_okay = local_file_okay?(partial_url) + if file_okay + if @startup_time < File.mtime(path) + debug "CDN: #{name} Relative path: #{partial_url} modified during this run! Returning local" + return Promises.fulfilled_future(partial_url, HYDRA_EXECUTOR) + end + + unless @check_existing_files_for_update + debug "CDN: #{name} Relative path: #{partial_url} exists! Returning local because checking is only performed in repo update" + return Promises.fulfilled_future(partial_url, HYDRA_EXECUTOR) + end + end + + path.dirname.mkpath + + etag_path = path.sub_ext(path.extname + '.etag') + + etag = File.read(etag_path) if file_okay && File.exist?(etag_path) + debug "CDN: #{name} Relative path: #{partial_url}, has ETag? #{etag}" unless etag.nil? + + download_and_save_with_retries_async(partial_url, file_remote_url, etag) + end + + def download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries = MAX_NUMBER_OF_RETRIES) + path = repo + partial_url + etag_path = path.sub_ext(path.extname + '.etag') + + download_task = download_typhoeus_impl_async(file_remote_url, etag).then do |response| + case response.response_code + when 301, 302 + redirect_location = response.headers['location'] + debug "CDN: #{name} Redirecting from #{file_remote_url} to #{redirect_location}" + download_and_save_with_retries_async(partial_url, redirect_location, etag) + when 304 + debug "CDN: #{name} Relative path not modified: #{partial_url}" + # We need to update the file modification date, as it is later used for freshness + # optimization. See #initialize for more information. + FileUtils.touch path + partial_url + when 200 + File.open(path, 'w') { |f| f.write(response.response_body.force_encoding('UTF-8')) } + + etag_new = response.headers['etag'] unless response.headers.nil? + debug "CDN: #{name} Relative path downloaded: #{partial_url}, save ETag: #{etag_new}" + File.open(etag_path, 'w') { |f| f.write(etag_new) } unless etag_new.nil? + partial_url + when 404 + debug "CDN: #{name} Relative path couldn't be downloaded: #{partial_url} Response: #{response.response_code}" + nil + when 502, 503, 504 + # Retryable HTTP errors, usually related to server overloading + if retries <= 1 + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}" + else + debug "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}, retries: #{retries - 1}" + exponential_backoff_async(retries).then do + download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries - 1) + end + end + when 0 + # Non-HTTP errors, usually network layer + if retries <= 1 + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.return_message}" + else + debug "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.return_message}, retries: #{retries - 1}" + exponential_backoff_async(retries).then do + download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries - 1) + end + end + else + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}" + end + end + + # Calling `Future#run` flattens the chained futures created by retries or redirects + # + # Does not, in fact, run the task - that is already happening in Hydra at this point + download_task.run + end + + def exponential_backoff_async(retries) + sleep_async(backoff_time(retries)) + end + + def backoff_time(retries) + current_retry = MAX_NUMBER_OF_RETRIES - retries + 4 * 2**current_retry + end + + def sleep_async(seconds) + # Async sleep to avoid blocking either the main or the Hydra thread + Promises.schedule_on(HYDRA_EXECUTOR, seconds) + end + + def download_typhoeus_impl_async(file_remote_url, etag) + require 'typhoeus' + + # Create a prefereably HTTP/2 request - the protocol is ultimately responsible for picking + # the maximum supported protocol + # When debugging with proxy, use the following extra options: + # :proxy => 'http://localhost:8888', + # :ssl_verifypeer => false, + # :ssl_verifyhost => 0, + request = Typhoeus::Request.new( + file_remote_url, + :method => :get, + :http_version => :httpv2_0, + :timeout => 10, + :connecttimeout => 10, + :accept_encoding => 'gzip', + :netrc => :optional, + :netrc_file => Netrc.default_path, + :headers => etag.nil? ? {} : { 'If-None-Match' => etag }, + ) + + future = Promises.resolvable_future_on(HYDRA_EXECUTOR) + queue_request(request) + request.on_complete do |response| + future.fulfill(response) + end + + # This `Future` should never reject, network errors are exposed on `Typhoeus::Response` + future + end + + def debug(message) + if defined?(Pod::UI) + Pod::UI.message(message) + else + CoreUI.puts(message) + end + end + + def concurrent_requests_catching_errors + yield + rescue MultipleErrors => e + # aggregated error message from `Concurrent` + errors = e.errors + raise Informative, "CDN: #{name} Repo update failed - #{e.errors.size} error(s):\n#{errors.join("\n")}" + end + + def queue_request(request) + @hydra ||= Typhoeus::Hydra.new + + # Queue the request into the Hydra (libcurl reactor). + @hydra.queue(request) + + # Cycle the reactor on a separate thread + # + # The way it works is that if more requests are queued while Hydra is in the `#run` + # method, it will keep executing them + # + # The upcoming calls to `#run` will simply run empty. + HYDRA_EXECUTOR.post(@hydra, &:run) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/core_ui.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/core_ui.rb new file mode 100644 index 0000000..d6c9377 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/core_ui.rb @@ -0,0 +1,19 @@ +module Pod + # Manages the UI output so dependent gems can customize it. + # + module CoreUI + def self.puts(message) + STDOUT.puts message + end + + def self.print(message) + STDOUT.print(message) + end + + def self.warn(message) + STDERR.puts message + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/dependency.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/dependency.rb new file mode 100644 index 0000000..e7d5a7b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/dependency.rb @@ -0,0 +1,406 @@ +module Pod + # The Dependency allows to specify dependencies of a {Podfile} or a + # {Specification} on a Pod. It stores the name of the dependency, version + # requirements and external sources information. + # + # This class is based on the dependency class of RubyGems and mimics its + # implementation with adjustments specific to CocoaPods. RubyGems is + # available under the + # [MIT license](https://github.com/rubygems/rubygems/blob/master/MIT.txt). + # + class Dependency + # @return [String] The name of the Pod described by this dependency. + # + attr_accessor :name + + # @return [Hash{Symbol=>String}] a hash describing the external source + # where the pod should be fetched. The external source has to + # provide its own {Specification} file. + # + attr_accessor :external_source + + # @return [String] The source URL of the podspec repo to use to resolve + # this dependency. If not set then the standard source list + # should be used to resolve the dependency. + attr_accessor :podspec_repo + + # @overload initialize(name, requirements) + # + # @param [String] name + # the name of the Pod. + # + # @param [Array, Version, String, Requirement] requirements + # an array specifying the version requirements of the + # dependency. + # + # @example Initialization with version requirements. + # + # Dependency.new('AFNetworking') + # Dependency.new('AFNetworking', '~> 1.0') + # Dependency.new('AFNetworking', '>= 0.5', '< 0.7') + # + # @overload initialize(name, external_source) + # + # @param [String] name + # the name of the Pod. + # + # @param [Hash] external_source + # a hash describing the external source. + # + # @example Initialization with an external source. + # + # Dependency.new('libPusher', {:git => 'example.com/repo.git'}) + # Dependency.new('libPusher', {:path => 'path/to/folder'}) + # Dependency.new('libPusher', {:podspec => 'example.com/libPusher.podspec'}) + # + # @overload initialize(name, requirements, podspec_repo) + # + # @param [String] name + # the name of the Pod. + # + # @param [Array, Version, String, Requirement] requirements + # an array specifying the version requirements of the + # dependency. + # + # @param [Hash] podspec_repo + # The URL of the specific podspec repo to resolve this dependency from. + # + # @example Initialization with a specific podspec repo + # + # Dependency.new('Artsy+UILabels', '~> 1.0', :source => 'https://github.com/Artsy/Specs.git') + # + def initialize(name = nil, *requirements) + if requirements.last.is_a?(Hash) + additional_params = requirements.pop.select { |_, v| !v.nil? } + additional_params = nil if additional_params.empty? + + if additional_params && @podspec_repo = additional_params[:source] + # This dependency specifies the exact source podspec repo to use. + additional_params.delete(:source) + unless additional_params.empty? + raise Informative, 'A dependency with a specified podspec repo may ' \ + "not include other source parameters (#{name})." + end + elsif @external_source = additional_params + unless requirements.empty? + raise Informative, 'A dependency with an external source may not ' \ + "specify version requirements (#{name})." + end + end + + elsif requirements.last == :head + raise Informative, '`:head` dependencies have been removed. Please use ' \ + "normal external source dependencies (`:git => 'GIT_REPO_URL'`) " \ + "instead of `:head` for `#{name}`." + end + + if requirements.length == 1 && requirements.first.is_a?(Requirement) + requirements = requirements.first + end + @name = name + @requirement = Requirement.create(requirements) + end + + # @return [Version] whether the dependency points to a specific version. + # + attr_accessor :specific_version + + # @return [Requirement] the requirement of this dependency (a set of + # one or more version restrictions). + # + def requirement + @specific_requirement || @requirement + end + + # @param [Version] version the specific version to point to + # + def specific_version=(version) + @specific_version = version + @specific_requirement = if version + Requirement.new(Version.new(version.version)) + end + end + + # @return [Bool] whether the dependency points to a subspec. + # + def subspec_dependency? + @name.include?('/') + end + + # @return [Bool] whether the dependency points to an external source. + # + def external? + !@external_source.nil? + end + + # @return [Bool] whether the dependency points to a local path. + # + def local? + if external_source + external_source[:path] + end + end + + # Creates a new dependency with the name of the top level spec and the same + # version requirements. + # + # @note This is used by the {Specification::Set} class to merge + # dependencies and resolve the required version of a Pod regardless + # what particular specification (subspecs or top level) is + # required. + # + # @todo This should not use `dup`. The `name` property should be an + # attr_reader. + # + # @return [Dependency] a dependency with the same versions requirements + # that is guaranteed to point to a top level specification. + # + def to_root_dependency + dep = dup + dep.name = root_name + dep + end + + # Returns the name of the Pod that the dependency is pointing to. + # + # @note In case this is a dependency for a subspec, e.g. + # 'RestKit/Networking', this returns 'RestKit', which is what the + # Pod::Source needs to know to retrieve the correct {Specification} + # from disk. + # + # @return [String] the name of the Pod. + # + def root_name + subspec_dependency? ? @name.split('/').first : @name + end + + # Checks if a dependency would be satisfied by the requirements of another + # dependency. + # + # @param [Dependency] other + # the other dependency. + # + # @note This is used by the Lockfile to check if a stored dependency is + # still compatible with the Podfile. + # + # @return [Bool] whether the dependency is compatible with the given one. + # + def compatible?(other) + return false unless name == other.name + return false unless external_source == other.external_source + + other.requirement.requirements.all? do |_operator, version| + requirement.satisfied_by? Version.new(version) + end + end + + # @return [Bool] whether the dependency is equal to another taking into + # account the loaded specification, the head options and the + # external source. + # + def ==(other) + self.class == other.class && + name == other.name && + external_source == other.external_source && + podspec_repo == other.podspec_repo && + requirement == other.requirement + end + alias_method :eql?, :== + + # @return [Fixnum] The hash value based on the name and on the + # requirements. + # + def hash + name.hash ^ requirement.hash + end + + # @return [Fixnum] How the dependency should be sorted respect to another + # one according to its name. + # + def <=>(other) + name <=> other.name + end + + # Merges the version requirements of the dependency with another one. + # + # @param [Dependency] other + # the other dependency to merge with. + # + # @note If one of the dependencies specifies an external source or is head, + # the resulting dependency preserves this attributes. + # + # @return [Dependency] a dependency (not necessarily a new instance) that + # also includes the version requirements of the given one. + # + def merge(other) + unless name == other.name + raise ArgumentError, "#{self} and #{other} have different names" + end + + default = Requirement.default + self_req = requirement + other_req = other.requirement + + req = if other_req == default + self_req + elsif self_req == default + other_req + else + self_req.as_list.concat(other_req.as_list) + end + + opts = {} + + if external_source || other.external_source + opts. + merge!(external_source || {}). + merge!(other.external_source || {}) + + req_to_set = req + req = [] + end + + if podspec_repo && other.podspec_repo && podspec_repo != other.podspec_repo + raise ArgumentError, "#{self} and #{other} have different podspec repos" + end + + if repo = podspec_repo || other.podspec_repo + opts[:source] = repo + end + + self.class.new(name, *req, opts).tap do |dep| + dep.instance_variable_set(:@requirement, Requirement.create(req_to_set)) if req_to_set + end + end + + # Whether the dependency has any pre-release requirements + # + # @return [Bool] Whether the dependency has any pre-release requirements + # + def prerelease? + return @prerelease if defined?(@prerelease) + @prerelease = requirement.requirements.any? { |_op, version| version.prerelease? } + end + + # Checks whether the dependency would be satisfied by the specification + # with the given name and version. + # + # @param [String] + # The proposed name. + # + # @param [String, Version] version + # The proposed version. + # + # @return [Bool] Whether the dependency is satisfied. + # + def match?(name, version) + return false unless self.name == name + return true if requirement.none? + requirement.satisfied_by?(Version.new(version)) + end + + #-------------------------------------------------------------------------# + + # !@group String representation + + # Creates a string representation of the dependency suitable for + # serialization and de-serialization without loss of information. The + # string is also suitable for UI. + # + # @note This representation is used by the {Lockfile}. + # + # @example Output examples + # + # "libPusher" + # "libPusher (= 1.0)" + # "libPusher (~> 1.0.1)" + # "libPusher (> 1.0, < 2.0)" + # "libPusher (from `www.example.com')" + # "libPusher (defined in Podfile)" + # "RestKit/JSON" + # + # @return [String] the representation of the dependency. + # + def to_s + version = '' + if external? + version << external_source_description(external_source) + elsif requirement != Requirement.default + version << requirement.to_s + end + result = @name.dup + result << " (#{version})" unless version.empty? + result + end + + # Generates a dependency from its string representation. + # + # @param [String] string + # The string that describes the dependency generated from + # {#to_s}. + # + # @note The information about external sources is not completely + # serialized in the string representation and should be stored a + # part by clients that need to create a dependency equal to the + # original one. + # + # @return [Dependency] the dependency described by the string. + # + def self.from_string(string) + match_data = string.match(/((?:\s?[^\s(])+)( (?:.*))?/) + name = match_data[1] + version = match_data[2] + version = version.gsub(/[()]/, '') if version + case version + when nil, /from `(.*)(`|')/ + Dependency.new(name) + else + version_requirements = version.split(',') if version + Dependency.new(name, version_requirements) + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "<#{self.class} name=#{name} requirements=#{requirement} " \ + "source=#{podspec_repo || 'nil'} external_source=#{external_source || 'nil'}>" + end + + #--------------------------------------# + + private + + # Creates a string representation of the external source suitable for UI. + # + # @example Output examples + # + # "from `www.example.com/libPusher.git', tag `v0.0.1'" + # "from `www.example.com/libPusher.podspec'" + # "from `~/path/to/libPusher'" + # + # @todo Improve the description for Mercurial and Subversion. + # + # @return [String] the description of the external source. + # + def external_source_description(source) + if source.key?(:git) + desc = "`#{source[:git]}`" + desc << ", commit `#{source[:commit]}`" if source[:commit] + desc << ", branch `#{source[:branch]}`" if source[:branch] + desc << ", tag `#{source[:tag]}`" if source[:tag] + elsif source.key?(:hg) + desc = "`#{source[:hg]}`" + elsif source.key?(:svn) + desc = "`#{source[:svn]}`" + elsif source.key?(:podspec) + desc = "`#{source[:podspec]}`" + elsif source.key?(:path) + desc = "`#{source[:path]}`" + else + desc = "`#{source}`" + end + "from #{desc}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/gem_version.rb new file mode 100644 index 0000000..1568677 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/gem_version.rb @@ -0,0 +1,5 @@ +module Pod + # The version of the cocoapods-core. + # + CORE_VERSION = '1.11.2'.freeze unless defined? Pod::CORE_VERSION +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/github.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/github.rb new file mode 100644 index 0000000..f93a193 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/github.rb @@ -0,0 +1,161 @@ +module Pod + # Allows to access information about the GitHub repos. + # + # This class is stored in Core because it might be used by web services. + # + module GitHub + # Returns the information of a user. + # + # @param [String] login + # The name of the user. + # + # @return [Hash] The data of user. + # + def self.user(login) + peform_request("https://api.github.com/users/#{login}") + end + + # Returns the information of a repo. + # + # @param [String] url + # The URL of the repo. + # + # @return [Hash] The hash containing the data as reported by GitHub. + # + def self.repo(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}") + end + end + + # Returns the tags of a repo. + # + # @param [String] url @see #repo + # + # @return [Array] The list of the tags. + # + def self.tags(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}/tags") + end + end + + # Returns the branches of a repo. + # + # @param [String] url @see #repo + # + # @return [Array] The list of the branches. + # + def self.branches(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}/branches") + end + end + + # Returns the contents of a file or directory in a repository. + # + # @param [String] url @see #repo + # + # @param [#to_s] path + # The path for which the contents are needed. + # + # @param [String] branch + # The branch for which to fetch the contents of the path. + # + # @return [Array] The list of the files and of the directories if the given + # path is a directory. + # + # @return [Hash] The contents of the file (usually base64 encoded). + # + def self.contents(url, path = nil, branch = nil) + if repo_id = normalized_repo_id(url) + request_url = "https://api.github.com/repos/#{repo_id}/contents" + request_url << "/#{path}" if path + request_url << "?ref=#{branch}" if branch + peform_request(request_url) + end + end + + # Returns whether the repository has been updated since a given commit. + # If the request fails, the response will be true as the API is still in + # beta and likely to change. + # + # @param [String] url @see #repo + # + # @param [String] commit + # The current HEAD commit. + # + # @return [Bool] Whether the repository has been updated since the commit. + # + def self.modified_since_commit(url, commit) + return true unless repo_id = normalized_repo_id(url) + require 'rest' + request_url = "https://api.github.com/repos/#{repo_id}/commits/master" + headers = { + 'User-Agent' => 'CocoaPods', + 'Accept' => 'application/vnd.github.v3.sha', + 'If-None-Match' => %("#{commit}"), + } + + begin + response = REST.get(request_url, headers) + code = response.status_code + code != 304 + rescue + raise Informative, "Failed to connect to GitHub to update the #{repo_id} specs repo - Please check if you are offline, or that GitHub is down" + end + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # Returns the repo ID as it is or converting a GitHub URL. + # + # @param [String] url_or_id + # A repo ID or the URL of the repo. + # + # @return [String] the repo ID. + # + def self.normalized_repo_id(url_or_id) + repo_id_from_url(url_or_id) || url_or_id + end + + # Returns the repo ID given it's URL. + # + # @param [String] url + # The URL of the repo. + # + # @return [String] the repo ID. + # @return [Nil] if the given url is not a valid github repo url. + # + def self.repo_id_from_url(url) + url[%r{github.com[/:]([^/]*/(?:(?!\.git)[^/])*)\.*}, 1] + end + + # Performs a get request with the given URL. + # + # @param [String] url + # The URL of the resource. + # + # @return [Array, Hash] The information of the resource as Ruby objects. + # + def self.peform_request(url) + require 'rest' + require 'json' + headers = { 'User-Agent' => 'CocoaPods' } + response = REST.get(url, headers) + body = JSON.parse(response.body) + if response.ok? + body + else + CoreUI.warn "Request to #{url} failed - #{response.status_code}" + CoreUI.warn body['message'] + nil + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/http.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/http.rb new file mode 100644 index 0000000..bf5f0b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/http.rb @@ -0,0 +1,86 @@ +require 'uri' + +module Pod + # Handles HTTP requests + # + module HTTP + # Resolve potential redirects and return the final URL. + # + # @return [string] + # + def self.get_actual_url(url, user_agent = nil) + redirects = 0 + + loop do + response = perform_head_request(url, user_agent) + + if [301, 302, 303, 307, 308].include? response.status_code + location = response.headers['location'].first + + if location =~ %r{://} + url = location + else + url = URI.join(url, location).to_s + end + + redirects += 1 + else + break + end + + break unless redirects < MAX_HTTP_REDIRECTS + end + + url + end + + # Performs validation of a URL + # + # @return [REST::response] + # + def self.validate_url(url, user_agent = nil) + return nil unless url =~ /^#{URI.regexp}$/ + + begin + url = get_actual_url(url, user_agent) + resp = perform_head_request(url, user_agent) + rescue SocketError, URI::InvalidURIError, REST::Error, REST::Error::Connection + resp = nil + end + + resp + end + + #-------------------------------------------------------------------------# + + private + + # Does a HEAD request and in case of any errors a GET request + # + # @return [REST::response] + # + def self.perform_head_request(url, user_agent) + require 'rest' + + user_agent ||= USER_AGENT + + resp = ::REST.head(url, 'User-Agent' => user_agent) + + if resp.status_code >= 400 + resp = ::REST.get(url, 'User-Agent' => user_agent, + 'Range' => 'bytes=0-0') + + if resp.status_code >= 400 + resp = ::REST.get(url, 'User-Agent' => user_agent) + end + end + + resp + end + + MAX_HTTP_REDIRECTS = 3 + USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/538.43.40 (KHTML, like Gecko) Version/8.0 Safari/538.43.40' + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/lockfile.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/lockfile.rb new file mode 100644 index 0000000..345a7f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/lockfile.rb @@ -0,0 +1,539 @@ +module Pod + # The Lockfile stores information about the pods that were installed by + # CocoaPods. + # + # It is used in combination with the Podfile to resolve the exact version of + # the Pods that should be installed (i.e. to prevent `pod install` from + # upgrading dependencies). + # + # Moreover it is used as a manifest of an installation to detect which Pods + # need to be installed or removed. + # + class Lockfile + # @todo The symbols should be converted to a String and back to symbol + # when reading (EXTERNAL SOURCES Download options) + + # @return [String] the hash used to initialize the Lockfile. + # + attr_reader :internal_data + + # @param [Hash] hash + # a hash representation of the Lockfile. + # + def initialize(hash) + @internal_data = hash + end + + # Loads a lockfile form the given path. + # + # @note This method returns nil if the given path doesn't exists. + # + # @raise If there is a syntax error loading the YAML data. + # + # @param [Pathname] path + # the path where the lockfile is serialized. + # + # @return [Lockfile] a new lockfile. + # + def self.from_file(path) + return nil unless path.exist? + hash = YAMLHelper.load_file(path) + unless hash && hash.is_a?(Hash) + raise Informative, "Invalid Lockfile in `#{path}`" + end + lockfile = Lockfile.new(hash) + lockfile.defined_in_file = path + lockfile + end + + # @return [String] the file where the Lockfile is serialized. + # + attr_accessor :defined_in_file + + # @return [Bool] Whether the Podfiles are equal. + # + def ==(other) + other && to_hash == other.to_hash + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class}>" + end + + #-------------------------------------------------------------------------# + + # !@group Accessing the Data + + public + + # @return [Array] the names of the installed Pods. + # + def pod_names + generate_pod_names_and_versions unless @pod_names + @pod_names + end + + # Returns the version of the given Pod. + # + # @param [String] pod_name The name of the Pod (root name of the specification). + # + # @return [Version] The version of the pod. + # + # @return [Nil] If there is no version stored for the given name. + # + def version(pod_name) + version = pod_versions[pod_name] + return version if version + root_name = pod_versions.keys.find do |name| + Specification.root_name(name) == pod_name + end + pod_versions[root_name] + end + + # Returns the source of the given Pod. + # + # @param [String] pod_name The name of the Pod (root name of the specification). + # + # @return [String] The source of the pod. + # + # @return [Nil] If there is no source stored for the given name. + # + def spec_repo(pod_name) + spec_repos_by_pod[pod_name] + end + + # Returns the checksum for the given Pod. + # + # @param [String] name The name of the Pod (root name of the specification). + # + # @return [String] The checksum of the specification for the given Pod. + # + # @return [Nil] If there is no checksum stored for the given name. + # + def checksum(name) + checksum_data[name] + end + + # @return [Array] the dependencies of the Podfile used for the + # last installation. + # + # @note It includes only the dependencies explicitly required in the + # podfile and not those triggered by the Resolver. + def dependencies + unless @dependencies + data = internal_data['DEPENDENCIES'] || [] + @dependencies = data.map do |string| + dep = Dependency.from_string(string) + dep.external_source = external_sources_data[dep.root_name] + dep + end + end + @dependencies + end + + # Returns pod names grouped by the spec repo they were sourced from. + # + # @return [Hash>] A hash, where the keys are spec + # repo source URLs (or names), and the values are arrays of pod names. + # + # @note It does not include pods that come from "external sources". + # + def pods_by_spec_repo + @pods_by_spec_repo ||= internal_data['SPEC REPOS'] || {} + end + + # Generates a dependency that requires the exact version of the Pod with the + # given name. + # + # @param [String] name + # the name of the Pod + # + # @note The generated dependencies used are by the Resolver from + # upgrading a Pod during an installation. + # + # @raise If there is no version stored for the given name. + # + # @return [Array] the generated dependency. + # + def dependencies_to_lock_pod_named(name) + deps = dependencies.select { |d| d.root_name == name } + if deps.empty? + raise StandardError, "Attempt to lock the `#{name}` Pod without a " \ + 'known dependency.' + end + + deps.map do |dep| + version = version(dep.name) + locked_dependency = dep.dup + locked_dependency.specific_version = version + locked_dependency + end + end + + # Returns the specific checkout options for the external source of the pod + # with the given name. + # + # @example Output + # {:commit => "919903db28535c3f387c4bbaa6a3feae4428e993" + # :git => "https://github.com/luisdelarosa/AFRaptureXMLRequestOperation.git"} + # + # @return [Hash] a hash of the checkout options for the external source of + # the pod with the given name. + # + # @param [String] name + # the name of the Pod. + # + def checkout_options_for_pod_named(name) + checkout_options_data[name] + end + + # @return [Version] The version of CocoaPods which generated this lockfile. + # + def cocoapods_version + Version.new(internal_data['COCOAPODS']) + end + + #--------------------------------------# + + # !@group Accessing the internal data. + + private + + # @return [Array Array[String]}>] the pods installed + # and their dependencies. + # + def generate_pod_names_and_versions + @pod_names = [] + @pod_versions = {} + + return unless pods = internal_data['PODS'] + pods.each do |pod| + pod = pod.keys.first unless pod.is_a?(String) + name, version = Spec.name_and_version_from_string(pod) + @pod_names << name + @pod_versions[name] = version + end + end + + # @return [Hash{String => Hash}] a hash where the name of the pods are the + # keys and the values are the external source hash the dependency + # that required the pod. + # + def external_sources_data + @external_sources_data ||= internal_data['EXTERNAL SOURCES'] || {} + end + + # @return [Hash{String => Hash}] a hash where the name of the pods are the + # keys and the values are a hash of specific checkout options. + # + def checkout_options_data + @checkout_options_data ||= internal_data['CHECKOUT OPTIONS'] || {} + end + + # @return [Hash{String => Version}] a Hash containing the name of the root + # specification of the installed Pods as the keys and their + # corresponding {Version} as the values. + # + def pod_versions + generate_pod_names_and_versions unless @pod_versions + @pod_versions + end + + # @return [Hash{String => Version}] A Hash containing the checksums of the + # specification by the name of their root. + # + def checksum_data + internal_data['SPEC CHECKSUMS'] || {} + end + + # @return [Hash{String => String}] A hash containing the spec repo used for the specification + # by the name of the root spec. + # + def spec_repos_by_pod + @spec_repos_by_pod ||= pods_by_spec_repo.each_with_object({}) do |(spec_repo, pods), spec_repos_by_pod| + pods.each do |pod| + spec_repos_by_pod[pod] = spec_repo + end + end + end + + #-------------------------------------------------------------------------# + + # !@group Comparison with a Podfile + + public + + # Analyzes the {Lockfile} and detects any changes applied to the {Podfile} + # since the last installation. + # + # For each Pod, it detects one state among the following: + # + # - added: Pods that weren't present in the Podfile. + # - changed: Pods that were present in the Podfile but changed: + # - Pods whose version is not compatible anymore with Podfile, + # - Pods that changed their external options. + # - removed: Pods that were removed form the Podfile. + # - unchanged: Pods that are still compatible with Podfile. + # + # @param [Podfile] podfile + # the podfile that should be analyzed. + # + # @return [Hash{Symbol=>Array[Strings]}] a hash where pods are grouped + # by the state in which they are. + # + # @todo Why do we look for compatibility instead of just comparing if the + # two dependencies are equal? + # + def detect_changes_with_podfile(podfile) + result = {} + [:added, :changed, :removed, :unchanged].each { |k| result[k] = [] } + + installed_deps = {} + dependencies.each do |dep| + name = dep.root_name + installed_deps[name] ||= dependencies_to_lock_pod_named(name) + end + + installed_deps = installed_deps.values.flatten(1).group_by(&:name) + + podfile_dependencies = podfile.dependencies + podfile_dependencies_by_name = podfile_dependencies.group_by(&:name) + + all_dep_names = (dependencies + podfile_dependencies).map(&:name).uniq + all_dep_names.each do |name| + installed_dep = installed_deps[name] + installed_dep &&= installed_dep.first + podfile_dep = podfile_dependencies_by_name[name] + podfile_dep &&= podfile_dep.first + + if installed_dep.nil? then key = :added + elsif podfile_dep.nil? then key = :removed + elsif podfile_dep.compatible?(installed_dep) then key = :unchanged + else key = :changed + end + result[key] << name + end + result + end + + #-------------------------------------------------------------------------# + + # !@group Serialization + + public + + # Writes the Lockfile to the given path. + # + # @param [Pathname] path + # the path where the lockfile should be saved. + # + # @return [void] + # + def write_to_disk(path) + path.dirname.mkpath unless path.dirname.exist? + self.defined_in_file = path + # rubocop:disable Lint/RescueException + # rubocop:disable Lint/HandleExceptions + begin + existing = Lockfile.from_file(path) + return if existing == self + rescue Exception + end + path.open('w') { |f| f.write(to_yaml) } + # rubocop:enable Lint/HandleExceptions + # rubocop:enable Lint/RescueException + end + + # @return [Hash{String=>Array,Hash,String}] a hash representation of the + # Lockfile. + # + # @example Output + # + # { + # 'PODS' => [ { BananaLib (1.0) => [monkey (< 1.0.9, ~> 1.0.1)] }, + # "JSONKit (1.4)", + # "monkey (1.0.8)"] + # 'DEPENDENCIES' => [ "BananaLib (~> 1.0)", + # "JSONKit (from `path/JSONKit.podspec`)" ], + # 'EXTERNAL SOURCES' => { "JSONKit" => { :podspec => path/JSONKit.podspec } }, + # 'SPEC CHECKSUMS' => { "BananaLib" => "439d9f683377ecf4a27de43e8cf3bce6be4df97b", + # "JSONKit", "92ae5f71b77c8dec0cd8d0744adab79d38560949" }, + # 'PODFILE CHECKSUM' => "439d9f683377ecf4a27de43e8cf3bce6be4df97b", + # 'COCOAPODS' => "0.17.0" + # } + # + # + def to_hash + hash = {} + internal_data.each do |key, value| + hash[key] = value unless value.nil? || value.empty? + end + hash + end + + # @return [Array] The order in which the hash keys should appear in + # a serialized Lockfile. + # + HASH_KEY_ORDER = [ + 'PODS', + 'DEPENDENCIES', + 'SPEC REPOS', + 'EXTERNAL SOURCES', + 'CHECKOUT OPTIONS', + 'SPEC CHECKSUMS', + 'PODFILE CHECKSUM', + 'COCOAPODS', + ].map(&:freeze).freeze + + # @return [String] the YAML representation of the Lockfile, used for + # serialization. + # + # @note Empty root keys are discarded. + # + # @note The YAML string is prettified. + # + def to_yaml + YAMLHelper.convert_hash(to_hash, HASH_KEY_ORDER, "\n\n") + end + + #-------------------------------------------------------------------------# + + class << self + # !@group Generation + + public + + # Generates a hash representation of the Lockfile generated from a given + # Podfile and the list of resolved Specifications. This representation is + # suitable for serialization. + # + # @param [Podfile] podfile + # the podfile that should be used to generate the lockfile. + # + # @param [Array] specs + # an array containing the podspec that were generated by + # resolving the given podfile. + # + # @return [Lockfile] a new lockfile. + # + def generate(podfile, specs, checkout_options, spec_repos = {}) + hash = { + 'PODS' => generate_pods_data(specs), + 'DEPENDENCIES' => generate_dependencies_data(podfile), + 'SPEC REPOS' => generate_spec_repos(spec_repos), + 'EXTERNAL SOURCES' => generate_external_sources_data(podfile), + 'CHECKOUT OPTIONS' => checkout_options, + 'SPEC CHECKSUMS' => generate_checksums(specs), + 'PODFILE CHECKSUM' => podfile.checksum, + 'COCOAPODS' => CORE_VERSION, + } + Lockfile.new(hash) + end + + #--------------------------------------# + + private + + # !@group Private helpers + + # Generates the list of the installed Pods and their dependencies. + # + # @note The dependencies of iOS and OS X version of the same pod are + # merged. + # + # @todo Specifications should be stored per platform, otherwise they + # list dependencies which actually might not be used. + # + # @return [Array] the generated data. + # + # @example Output + # [ {"BananaLib (1.0)"=>["monkey (< 1.0.9, ~> 1.0.1)"]}, + # "monkey (1.0.8)" ] + # + # + def generate_pods_data(specs) + pods_and_deps_merged = specs.reduce({}) do |result, spec| + name = spec.to_s + result[name] ||= [] + result[name].concat(spec.all_dependencies.map(&:to_s)) + result + end + + pod_and_deps = pods_and_deps_merged.map do |name, deps| + deps.empty? ? name : { name => YAMLHelper.sorted_array(deps.uniq) } + end + YAMLHelper.sorted_array(pod_and_deps) + end + + # Generates the list of the dependencies of the Podfile. + # + # @example Output + # [ "BananaLib (~> 1.0)", + # "JSONKit (from `path/JSONKit.podspec')" ] + # + # @return [Array] the generated data. + # + def generate_dependencies_data(podfile) + YAMLHelper.sorted_array(podfile.dependencies.map(&:to_s)) + end + + # Generates the hash of spec repo sources used in the Podfile. + # + # @example Output + # { "https://github.com/cocoapods/cocoapods.git" => ["Alamofire", "Moya"] } + # + def generate_spec_repos(spec_repos) + Hash[spec_repos.map do |source, specs| + next unless source + next if specs.empty? + key = source.url || source.name + + # save `trunk` as 'trunk' so that the URL itself can be changed without lockfile churn + key = Pod::TrunkSource::TRUNK_REPO_NAME if source.name == Pod::TrunkSource::TRUNK_REPO_NAME + + value = specs.map { |s| s.root.name }.uniq + [key, YAMLHelper.sorted_array(value)] + end.compact] + end + + # Generates the information of the external sources. + # + # @example Output + # { "JSONKit"=>{:podspec=>"path/JSONKit.podspec"} } + # + # @return [Hash] a hash where the keys are the names of the pods and + # the values store the external source hashes of each + # dependency. + # + def generate_external_sources_data(podfile) + deps = podfile.dependencies.select(&:external?) + deps = deps.sort { |d, other| d.name <=> other.name } + sources = {} + deps.each { |d| sources[d.root_name] = d.external_source } + sources + end + + # Generates the relative to the checksum of the specifications. + # + # @example Output + # { + # "BananaLib"=>"9906b267592664126923875ce2c8d03824372c79", + # "JSONKit"=>"92ae5f71b77c8dec0cd8d0744adab79d38560949" + # } + # + # @return [Hash] a hash where the keys are the names of the root + # specifications and the values are the SHA1 digest of the + # podspec file. + # + def generate_checksums(specs) + checksums = {} + specs.select(&:defined_in_file).each do |spec| + checksums[spec.root.name] = spec.checksum + end + checksums + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/metrics.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/metrics.rb new file mode 100644 index 0000000..814008f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/metrics.rb @@ -0,0 +1,47 @@ +module Pod + # Allows to access metrics about pods. + # + # This class is stored in Core because it might be used by web services. + # + module Metrics + # Returns the metrics of a pod. + # + # @param [String] name + # The name of the pod. + # + # @return [Hash] The metrics for the pod. + # + def self.pod(name) + peform_request("http://metrics.cocoapods.org/api/v1/pods/#{name}") + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # Performs a get request with the given URL. + # + # @param [String] url + # The URL of the resource. + # + # @return [Array, Hash] The information of the resource as Ruby objects. + # + def self.peform_request(url) + require 'rest' + require 'json' + headers = { 'User-Agent' => "CocoaPods #{Pod::CORE_VERSION}" } + response = REST.get(url, headers) + body = JSON.parse(response.body) + if response.ok? + body + else + CoreUI.warn "Request to #{url} failed - #{response.status_code}" + CoreUI.warn body['message'] + nil + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/platform.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/platform.rb new file mode 100644 index 0000000..abc81e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/platform.rb @@ -0,0 +1,246 @@ +module Pod + # A Platform describes an SDK name and deployment target. + # + class Platform + # @return [Symbol, String] the name of the SDK represented by the platform. + # + attr_reader :symbolic_name + alias_method :name, :symbolic_name + + # @return [Version] the deployment target of the platform. + # + attr_reader :deployment_target + + # Constructs a platform from either another platform or by + # specifying the symbolic name and optionally the deployment target. + # + # @overload initialize(name, deployment_target) + # + # @param [Symbol, String] name + # the name of platform. + # + # @param [String, Version] deployment_target + # the optional deployment. + # + # @note If the deployment target is not provided a default deployment + # target will not be assigned. + # + # @example Initialization with symbol + # + # Platform.new(:ios) + # Platform.new(:ios, '4.3') + # + # @overload initialize(platform) + # + # @param [Platform] platform + # Another {Platform}. + # + # @example Initialization with another platform + # + # platform = Platform.new(:ios) + # Platform.new(platform) + # + def initialize(input, target = nil) + if input.is_a? Platform + @symbolic_name = input.name + @deployment_target = input.deployment_target + else + # Allow `Platform.new('macos')` to be equivalent to `Platform.macos` + if input == 'macos' + input = 'osx' + end + @symbolic_name = input.to_sym + target = target[:deployment_target] if target.is_a?(Hash) + @deployment_target = Version.create(target) + end + end + + # Convenience method to initialize an iOS platform. + # + # @return [Platform] an iOS platform. + # + def self.ios + new :ios + end + + # Convenience method to initialize an OS X platform. + # + # @return [Platform] an OS X platform. + # + def self.osx + new :osx + end + + # Convenience method to initialize a macOS platform. + # + # @return [Platform] a macOS platform. + # + def self.macos + osx + end + + # Convenience method to initialize a tvOS platform. + # + # @return [Platform] a tvOS platform. + # + def self.tvos + new :tvos + end + + # Convenience method to initialize a watchOS platform. + # + # @return [Platform] a watchOS platform. + # + def self.watchos + new :watchos + end + + # Convenience method to get all available platforms. + # + # @return [Array] list of platforms. + # + def self.all + [ios, osx, watchos, tvos] + end + + # Checks if a platform is equivalent to another one or to a symbol + # representation. + # + # @param [Platform, Symbol] other + # the other platform to check. + # + # @note If a symbol is passed the comparison does not take into account + # the deployment target. + # + # @return [Boolean] whether two platforms are the equivalent. + # + def ==(other) + if other.is_a?(Symbol) + @symbolic_name == other + else + (name == other.name) && (deployment_target == other.deployment_target) + end + end + + # (see #==) + alias_method :eql?, :== + + # Hashes the instance by the platform name and deployment target. + # + # This adds support to make instances usable as Hash keys. + # + # @!visibility private + def hash + name.hash ^ deployment_target.hash + end + + # Checks whether a platform supports another one. + # + # In the context of operating system SDKs, a platform supports another + # one if they have the same name and the other platform has a minor or + # equal deployment target. + # + # @return [Bool] whether the platform supports another platform. + # + def supports?(other) + other = Platform.new(other) + if other.deployment_target && deployment_target + (other.name == name) && (other.deployment_target <= deployment_target) + else + other.name == name + end + end + + # @return [String] a string representation that includes the deployment + # target. + # + def to_s + s = self.class.string_name(@symbolic_name) + s << " #{deployment_target}" if deployment_target + s + end + + # @return [String] the debug representation. + # + def inspect + "#<#{self.class.name} name=#{name.inspect} " \ + "deployment_target=#{deployment_target.inspect}>" + end + + # @return [Symbol] a symbol representing the name of the platform. + # + def to_sym + name + end + + # Compares the platform first by name and the by deployment_target for + # sorting. + # + # @param [Platform] other + # The other platform to compare. + # + # @return [Fixnum] -1, 0, or +1 depending on whether the receiver is less + # than, equal to, or greater than other. + # + def <=>(other) + name_sort = name.to_s <=> other.name.to_s + if name_sort.zero? + deployment_target <=> other.deployment_target + else + name_sort + end + end + + # @return [Bool] whether the platform requires legacy architectures for + # iOS. + # + def requires_legacy_ios_archs? + if name == :ios + deployment_target && (deployment_target < Version.new('4.3')) + else + false + end + end + + # @return [Bool] whether the platform supports dynamic frameworks. + # + def supports_dynamic_frameworks? + if name == :ios + deployment_target && (deployment_target >= Version.new(8.0)) + else + true + end + end + + # @return [String] The string that describes the #symbolic_name. + # + def string_name + self.class.string_name(symbolic_name) + end + + # @return [String] The string that describes the #symbolic_name, + # which doesn't contain spaces and is so safe to use in + # paths which might not be quoted or escaped consequently. + def safe_string_name + string_name.tr(' ', '') + end + + # Converts the symbolic name of a platform to a string name suitable to be + # presented to the user. + # + # @param [Symbol] symbolic_name + # the symbolic name of a platform. + # + # @return [String] The string that describes the name of the given symbol. + # + def self.string_name(symbolic_name) + case symbolic_name + when :ios then 'iOS' + when :osx then 'macOS' + when :watchos then 'watchOS' + when :tvos then 'tvOS' + else symbolic_name.to_s + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile.rb new file mode 100644 index 0000000..427d00e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile.rb @@ -0,0 +1,436 @@ +require 'cocoapods-core/podfile/dsl' +require 'cocoapods-core/podfile/target_definition' + +module Pod + # The Podfile is a specification that describes the dependencies of the + # targets of an Xcode project. + # + # It supports its own DSL and is stored in a file named `Podfile`. + # + # The Podfile creates a hierarchy of target definitions that store the + # information necessary to generate the CocoaPods libraries. + # + class Podfile + # @!group DSL support + + include Pod::Podfile::DSL + + #-------------------------------------------------------------------------# + + class StandardError < ::StandardError; end + + #-------------------------------------------------------------------------# + + # @return [Pathname] the path used to load the Podfile. It is nil + # if the Podfile was generated programmatically. + # + attr_accessor :defined_in_file + + # @param [Pathname] defined_in_file + # the path of the podfile. + # + # @param [Proc] block + # an optional block that configures the Podfile through the DSL. + # + # @example Creating a Podfile. + # + # platform :ios, "6.0" + # target :my_app do + # pod "AFNetworking", "~> 1.0" + # end + # + def initialize(defined_in_file = nil, internal_hash = {}, &block) + self.defined_in_file = defined_in_file + @internal_hash = internal_hash + if block + default_target_def = TargetDefinition.new('Pods', self) + default_target_def.abstract = true + @root_target_definitions = [default_target_def] + @current_target_definition = default_target_def + instance_eval(&block) + else + @root_target_definitions = [] + end + end + + # @return [String] a string useful to represent the Podfile in a message + # presented to the user. + # + def to_s + 'Podfile' + end + + #-------------------------------------------------------------------------# + + public + + # @!group Working with a Podfile + + # @return [Hash{Symbol,String => TargetDefinition}] the target definitions + # of the Podfile stored by their name. + # + def target_definitions + Hash[target_definition_list.map { |td| [td.name, td] }] + end + + # @return [Array] all target definitions in the Podfile. + # + def target_definition_list + root_target_definitions.map { |td| [td, td.recursive_children] }.flatten + end + + # @return [Array] The root target definitions. + # + attr_accessor :root_target_definitions + + # @return [Array] the dependencies of all of the target + # definitions. + # + def dependencies + target_definition_list.map(&:dependencies).flatten.uniq + end + + #-------------------------------------------------------------------------# + + public + + # @!group Attributes + + # @return [Array] The names of the sources. + # + def sources + get_hash_value('sources') || [] + end + + # @return [Hash] The plugins, keyed by name. + # + def plugins + get_hash_value('plugins') || {} + end + + # @return [String] the path of the workspace if specified by the user. + # + def workspace_path + path = get_hash_value('workspace') + if path + if File.extname(path) == '.xcworkspace' + path + else + "#{path}.xcworkspace" + end + end + end + + # @return [Bool] whether the podfile should generate a BridgeSupport + # metadata document. + # + def generate_bridge_support? + get_hash_value('generate_bridge_support') + end + + # @return [Bool] whether the -fobjc-arc flag should be added to the + # OTHER_LD_FLAGS. + # + def set_arc_compatibility_flag? + get_hash_value('set_arc_compatibility_flag') + end + + # @return [(String,Hash)] the installation strategy and installation options + # to be used during installation. + # + def installation_method + get_hash_value('installation_method', 'name' => 'cocoapods', 'options' => {}). + values_at('name', 'options') + end + + #-------------------------------------------------------------------------# + + public + + # @!group Hooks + + # Calls the pre install callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Bool] whether a pre install callback was specified and it was + # called. + # + def pre_install!(installer) + if @pre_install_callback + @pre_install_callback.call(installer) + true + else + false + end + end + + # Calls the pre integrate callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Bool] whether a pre integrate callback was specified and it was + # called. + # + def pre_integrate!(installer) + if @pre_integrate_callback + @pre_integrate_callback.call(installer) + true + else + false + end + end + + # Calls the post install callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Bool] whether a post install callback was specified and it was + # called. + # + def post_install!(installer) + if @post_install_callback + @post_install_callback.call(installer) + true + else + false + end + end + + # Calls the post integrate callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Bool] whether a post install callback was specified and it was + # called. + # + def post_integrate!(installer) + if @post_integrate_callback + @post_integrate_callback.call(installer) + true + else + false + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Representations + + # @return [Array] The keys used by the hash representation of the Podfile. + # + HASH_KEYS = %w( + installation_method + workspace + sources + plugins + set_arc_compatibility_flag + generate_bridge_support + target_definitions + ).freeze + + # @return [Hash] The hash representation of the Podfile. + # + def to_hash + hash = {} + hash['target_definitions'] = root_target_definitions.map(&:to_hash) + hash.merge!(internal_hash) + hash + end + + # @return [String] The YAML representation of the Podfile. + # + def to_yaml + require 'cocoapods-core/yaml_helper' + "---\n" << YAMLHelper.convert_hash(to_hash, HASH_KEYS) + end + + # @return [String] The SHA1 digest of the file in which the Podfile + # is defined. + # + # @return [Nil] If the podfile is not defined in a file. + # + def checksum + @checksum ||= begin + unless defined_in_file.nil? + require 'digest' + checksum = Digest::SHA1.hexdigest(File.read(defined_in_file)) + checksum = checksum.encode('UTF-8') if checksum.respond_to?(:encode) + checksum + end + end + end + + def ==(other) + self.class == other.class && + to_hash == other.to_hash + end + + # @!group Class methods + #-------------------------------------------------------------------------# + + # Initializes a Podfile from the file with the given path. + # + # @param [Pathname] path + # the path from where the Podfile should be loaded. + # + # @return [Podfile] the generated Podfile. + # + def self.from_file(path) + path = Pathname.new(path) + unless path.exist? + raise Informative, "No Podfile exists at path `#{path}`." + end + + case path.extname + when '', '.podfile', '.rb' + Podfile.from_ruby(path) + when '.yaml' + Podfile.from_yaml(path) + else + raise Informative, "Unsupported Podfile format `#{path}`." + end + end + + # Configures a new Podfile from the given ruby string. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @param [String] contents + # The ruby string which will configure the Podfile with the DSL. + # + # @return [Podfile] the new Podfile + # + def self.from_ruby(path, contents = nil) + contents ||= File.open(path, 'r:utf-8', &:read) + + # Work around for Rubinius incomplete encoding in 1.9 mode + if contents.respond_to?(:encoding) && contents.encoding.name != 'UTF-8' + contents.encode!('UTF-8') + end + + if contents.tr!('“”‘’‛', %(""''')) + # Changes have been made + CoreUI.warn "Smart quotes were detected and ignored in your #{path.basename}. " \ + 'To avoid issues in the future, you should not use ' \ + 'TextEdit for editing it. If you are not using TextEdit, ' \ + 'you should turn off smart quotes in your editor of choice.' + end + + podfile = Podfile.new(path) do + # rubocop:disable Lint/RescueException + begin + # rubocop:disable Security/Eval + eval(contents, nil, path.to_s) + # rubocop:enable Security/Eval + rescue Exception => e + message = "Invalid `#{path.basename}` file: #{e.message}" + raise DSLError.new(message, path, e, contents) + end + # rubocop:enable Lint/RescueException + end + podfile + end + + # Configures a new Podfile from the given YAML representation. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @return [Podfile] the new Podfile + # + def self.from_yaml(path) + string = File.open(path, 'r:utf-8', &:read) + # Work around for Rubinius incomplete encoding in 1.9 mode + if string.respond_to?(:encoding) && string.encoding.name != 'UTF-8' + string.encode!('UTF-8') + end + hash = YAMLHelper.load_string(string) + from_hash(hash, path) + end + + # Configures a new Podfile from the given hash. + # + # @param [Hash] hash + # The hash which contains the information of the Podfile. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @return [Podfile] the new Podfile + # + def self.from_hash(hash, path = nil) + internal_hash = hash.dup + target_definitions = internal_hash.delete('target_definitions') || [] + podfile = Podfile.new(path, internal_hash) + target_definitions.each do |definition_hash| + definition = TargetDefinition.from_hash(definition_hash, podfile) + podfile.root_target_definitions << definition + end + podfile + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Hash] The hash which store the attributes of the Podfile. + # + attr_accessor :internal_hash + + # Set a value in the internal hash of the Podfile for the given key. + # + # @param [String] key + # The key for which to store the value. + # + # @param [Object] value + # The value to store. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [void] + # + def set_hash_value(key, value) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = value + end + + # Returns the value for the given key in the internal hash of the Podfile. + # + # @param [String] key + # The key for which the value is needed. + # + # @param default + # The default value to return if the internal hash has no entry for + # the given `key`. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [Object] The value for the key. + # + def get_hash_value(key, default = nil) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash.fetch(key, default) + end + + # @return [TargetDefinition] The current target definition to which the DSL + # commands apply. + # + attr_accessor :current_target_definition + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/dsl.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/dsl.rb new file mode 100644 index 0000000..5bdfe4a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/dsl.rb @@ -0,0 +1,997 @@ +module Pod + class Podfile + # The methods defined in this file and the order of the methods is + # relevant for the documentation generated on + # https://github.com/CocoaPods/guides.cocoapods.org. + + # The Podfile is a specification that describes the dependencies of the + # targets of one or more Xcode projects. + # + # A Podfile can be very simple: + # + # target 'MyApp' + # pod 'AFNetworking', '~> 1.0' + # + # An example of a more complex Podfile can be: + # + # platform :ios, '9.0' + # inhibit_all_warnings! + # + # target 'MyApp' do + # pod 'ObjectiveSugar', '~> 0.5' + # + # target 'MyAppTests' do + # inherit! :search_paths + # pod 'OCMock', '~> 2.0.1' + # end + # end + # + # post_install do |installer| + # installer.pods_project.targets.each do |target| + # puts "#{target.name}" + # end + # end + # + module DSL + # @!group Root Options + # Configuration that applies to the Podfile as a whole. + # + # * `install!` declares the installation method and options to be used + # during installation. + + # Specifies the installation method and options to be used when + # CocoaPods installs this Podfile. + # + # The first parameter indicates the installation method to use; + # next parameters indicate installation options. + # + # For now the only accepted installation method is `'cocoapods'`, so + # you'll always use this value for the first parameter; but more + # installation methods might be available in future versions. + # + # @param [String] installation_method + # the name of the installation strategy. + # + # @param [Hash] options + # the installation options. + # + # @example Specifying custom CocoaPods installation options + # + # install! 'cocoapods', + # :deterministic_uuids => false, + # :integrate_targets => false + # + # @return [void] + # + def install!(installation_method, options = {}) + unless current_target_definition.root? + raise Informative, 'The installation method can only be set at the root level of the Podfile.' + end + + set_hash_value('installation_method', 'name' => installation_method, 'options' => options) + end + + # Raises a warning when CocoaPods is run using the Global Gemset. + # A Semantic version can be supplied to warn if the bundler version + # does not match the required version. + # + # @param [String] version + # The required bundler version, in semantic version format. + # + # @example + # + # ensure_bundler! + # + # @example + # + # ensure_bundler! '~> 2.0.0' + # + # @return [void] + # + def ensure_bundler!(version = nil) + unless current_target_definition.root? + raise Informative, 'The Ensure Bundler check can only be set at the root level of the Podfile.' + end + unless %w(BUNDLE_BIN_PATH BUNDLE_GEMFILE).all? { |key| ENV.key?(key) } + raise Informative, "CocoaPods was invoked from Global Gemset.\nPlease re-run using: `bundle exec pod #{ARGV.join(' ')}`" + end + unless ENV['BUNDLER_VERSION'].nil? || Requirement.create(version).satisfied_by?(Version.new(ENV['BUNDLER_VERSION'])) + raise Informative, "The installed Bundler version: #{ENV['BUNDLER_VERSION']} does not match the required version: #{version}" + end + end + + #-----------------------------------------------------------------------# + + # @!group Dependencies + # The Podfile specifies the dependencies of each user target. + # + # * `pod` is the way to declare a specific dependency. + # * `podspec` provides an easy API for the creation of podspecs. + # * `target` is how you scope your dependencies to specific + # targets in your Xcode projects. + + #-----------------------------------------------------------------------# + + # Specifies a dependency of the project. + # + # A dependency requirement is defined by the name of the Pod and + # optionally a list of version requirements. + # + # When starting out with a project it is likely that you will want to use + # the latest version of a Pod. If this is the case, simply omit the + # version requirements. + # + # pod 'SSZipArchive' + # + # Later on in the project you may want to freeze to a specific version of + # a Pod, in which case you can specify that version number. + # + # pod 'Objection', '0.9' + # + # Besides no version, or a specific one, it is also possible to use + # operators: + # + # * `= 0.1` Version 0.1. + # * `> 0.1` Any version higher than 0.1. + # * `>= 0.1` Version 0.1 and any higher version. + # * `< 0.1` Any version lower than 0.1. + # * `<= 0.1` Version 0.1 and any lower version. + # * `~> 0.1.2` Version 0.1.2 and the versions up to 0.2, not including 0.2. + # This operator works based on _the last component_ that you + # specify in your version requirement. The example is equal to + # `>= 0.1.2` combined with `< 0.2.0` and will always match the + # latest known version matching your requirements. + # * `~> 0` Version 0 and the versions up to 1, not including 1. + # * `~> 0.1.3-beta.0` Beta and release versions for 0.1.3, release versions + # up to 0.2 excluding 0.2. Components separated by a dash (-) + # will not be considered for the version requirement. + # + # A list of version requirements can be specified for even more fine + # grained control. + # + # For more information, regarding versioning policy, see: + # + # * [Semantic Versioning](http://semver.org) + # * [RubyGems Versioning Policies](http://guides.rubygems.org/patterns/#semantic-versioning) + # + # ------ + # + # ### Build configurations + # + # By default dependencies are installed in all the build configurations + # of the target. For debug purposes or for other reasons, they can be + # only enabled on a list of build configurations. + # + # pod 'PonyDebugger', :configurations => ['Debug', 'Beta'] + # + # Alternatively, you can specify to have it included on a single build + # configuration. + # + # pod 'PonyDebugger', :configuration => 'Debug' + # + # Note that transitive dependencies are included in all configurations + # and you have to manually specify build configurations for them as well in + # case this is not desired. + # + # ------ + # + # ### Modular Headers + # + # If you would like to use modular headers per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :modular_headers => true + # + # Additionally, when you use the `use_modular_headers!` attribute, + # you can exclude a particular Pod from modular headers using the following: + # + # pod 'SSZipArchive', :modular_headers => false + # + # ------ + # + # ### Source + # + # By default the sources specified at the global level are searched in the order + # they are specified for a dependency match. This behaviour can be altered + # for a specific dependency by specifying the source with the dependency: + # + # pod 'PonyDebugger', :source => 'https://github.com/CocoaPods/Specs.git' + # + # In this case only the specified source will be searched for the dependency + # and any global sources ignored. + # + # ------ + # + # ### Subspecs + # + # When installing a Pod via its name, it will install all of the + # default subspecs defined in the podspec. + # + # You may install a specific subspec using the following: + # + # pod 'QueryKit/Attribute' + # + # You may specify a collection of subspecs to be installed as follows: + # + # pod 'QueryKit', :subspecs => ['Attribute', 'QuerySet'] + # + # ### Test Specs + # + # Test specs can be optionally included via the `:testspecs` option. By default, + # none of a Pod's test specs are included. + # + # You may specify a list of test spec names to install using the following: + # + # pod 'AFNetworking', :testspecs => ['UnitTests', 'SomeOtherTests'] + # + # The values provided to `:testspecs` correspond to the name provided to the + # `test_spec` DSL attribute in a Podspec. + # + # ------ + # + # Dependencies can be obtained also from external sources. + # + # + # ### Using the files from a local path. + # + # If you would like to use develop a Pod in tandem with its client + # project you can use the `path` option. + # + # pod 'AFNetworking', :path => '~/Documents/AFNetworking' + # + # Using this option CocoaPods will assume the given folder to be the + # root of the Pod and will link the files directly from there in the + # Pods project. This means that your edits will persist to CocoaPods + # installations. + # + # The referenced folder can be a checkout of your your favourite SCM or + # even a git submodule of the current repository. + # + # Note that the `podspec` of the Pod file is expected to be in the + # folder. + # + # + # ### From a podspec in the root of a library repository. + # + # Sometimes you may want to use the bleeding edge version of a Pod. Or a + # specific revision. If this is the case, you can specify that with your + # pod declaration. + # + # To use the `master` branch of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git' + # + # + # To use a different branch of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :branch => 'dev' + # + # + # To use a tag of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :tag => '0.7.0' + # + # + # Or specify a commit: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :commit => '082f8319af' + # + # It is important to note, though, that this means that the version will + # have to satisfy any other dependencies on the Pod by other Pods. + # + # The `podspec` file is expected to be in the root of the repository, + # if this library does not have a `podspec` file in its repository + # yet, you will have to use one of the approaches outlined in the + # sections below. + # + # + # ### From a podspec outside a spec repository, for a library without podspec. + # + # If a podspec is available from another source outside of the library’s + # repository. Consider, for instance, a podspec available via HTTP: + # + # pod 'JSONKit', :podspec => 'https://example.com/JSONKit.podspec' + # + # + # @note This method allow a nil name and the raises to be more + # informative. + # + # @return [void] + # + def pod(name = nil, *requirements) + unless name + raise StandardError, 'A dependency requires a name.' + end + + current_target_definition.store_pod(name, *requirements) + end + + # Use just the dependencies of a Pod defined in the given podspec file. + # If no arguments are passed the first podspec in the root of the Podfile + # is used. It is intended to be used by the project of a library. Note: + # this does not include the sources derived from the podspec just the + # CocoaPods infrastructure. + # + # @example + # podspec + # + # @example + # podspec :name => 'QuickDialog' + # + # @example + # podspec :path => '/Documents/PrettyKit/PrettyKit.podspec' + # + # @param [Hash {Symbol=>String}] options + # the path where to load the {Specification}. If not provided + # the first podspec in the directory of the Podfile is used. + # + # @option options [String] :path + # the path of the podspec file + # + # @option options [String] :name + # the name of the podspec + # + # @note This method uses the dependencies declared for the + # platform of the target definition. + # + # + # @note This method requires that the Podfile has a non nil value for + # {#defined_in_file} unless the path option is used. + # + # @return [void] + # + def podspec(options = nil) + current_target_definition.store_podspec(options) + end + + # Defines a CocoaPods target and scopes dependencies defined + # within the given block. A target should correspond to an Xcode target. + # By default the target includes the dependencies defined outside of + # the block, unless instructed not to `inherit!` them. + # + # @param [Symbol, String] name + # the name of the target. + # + # @example Defining a target + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # end + # + # @example Defining a test target accessing SSZipArchive pod from its parent + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # + # target 'ZipAppTests' do + # inherit! :search_paths + # pod 'Nimble' + # end + # end + # + # @example Defining a target applies Pods to multiple targets via its parent target + # + # target 'ShowsApp' do + # pod 'ShowsKit' + # + # # Has its own copy of ShowsKit + ShowTVAuth + # target 'ShowsTV' do + # pod 'ShowTVAuth' + # end + # + # # Has its own copy of Specta + Expecta + # # and has access to ShowsKit via the app + # # that the test target is bundled into + # + # target 'ShowsTests' do + # inherit! :search_paths + # pod 'Specta' + # pod 'Expecta' + # end + # end + # + # @return [void] + # + def target(name, options = nil) + if options + raise Informative, "Unsupported options `#{options}` for " \ + "target `#{name}`." + end + + parent = current_target_definition + definition = TargetDefinition.new(name, parent) + self.current_target_definition = definition + yield if block_given? + ensure + self.current_target_definition = parent + end + + # Adds a script phase to be integrated with this target. A script phase can be used to execute an arbitrary + # script that can use all Xcode environment variables during execution. A target may include multiple script + # phases which they will be added in the order they were declared. Deleting a script phase will effectively remove + # it from the target if it has been added previously. + # + # @example + # script_phase :name => 'HelloWorldScript', :script => 'echo "Hello World"' + # + # @example + # script_phase :name => 'HelloWorldScript', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' + # + # @param [Hash] options + # the options for this script phase. + # + # @option options [String] :name + # the name of the script phase. This option is required. + # + # @option options [String] :script + # the body of the script to execute. This option is required. + # + # @option options [String] :shell_path + # the shell path to use for this script phase, for example `/usr/bin/ruby` to use Ruby for this phase. + # + # @option options [Array] :input_files + # the input paths to use for this script phase. This is used by Xcode to determine whether to re-execute + # this script phase if the input paths have changed or not. + # + # @option options [Array] :output_files + # the output paths to use for this script phase. This is used by Xcode to avoid re-executing this script + # phase if none of the output paths have changed. + # + # @option options [Array] :input_file_lists + # the input file lists to use for this script phase. This is used by Xcode to determine whether to + # re-execute this script phase if the input paths have changed or not. + # + # @option options [Array] :output_file_lists + # the output file lists to use for this script phase. This is used by Xcode to avoid re-executing this + # script phase if none of the output paths have changed. + # + # @option options [Boolean] :show_env_vars_in_log + # whether this script phase should output the environment variables during execution. + # + # @option options [Symbol] :execution_position + # specifies the position of which this script phase should be executed. The currently supported values are: + # `:before_compile`, `:after_compile` and `:any` which is the default. + # + # @option options [String] :dependency_file + # specifies the dependency file to use for this script phase. + # + # @return [void] + # + def script_phase(options) + raise Informative, 'Script phases can only be added within target definitions.' if current_target_definition.root? + raise Informative, 'Script phases cannot be added to abstract targets.' if current_target_definition.abstract? + current_target_definition.store_script_phase(options) + end + + # Defines a new abstract target that can be used for convenient + # target dependency inheritance. + # + # @param [Symbol, String] name + # the name of the target. + # + # @example Defining an abstract target + # + # abstract_target 'Networking' do + # pod 'AlamoFire' + # + # target 'Networking App 1' + # target 'Networking App 2' + # end + # + # @example Defining an abstract_target wrapping Pods to multiple targets + # + # # Note: There are no targets called "Shows" in any of this workspace's Xcode projects + # abstract_target 'Shows' do + # pod 'ShowsKit' + # + # # The target ShowsiOS has its own copy of ShowsKit (inherited) + ShowWebAuth (added here) + # target 'ShowsiOS' do + # pod 'ShowWebAuth' + # end + # + # # The target ShowsTV has its own copy of ShowsKit (inherited) + ShowTVAuth (added here) + # target 'ShowsTV' do + # pod 'ShowTVAuth' + # end + # + # # Our tests target has its own copy of + # # our testing frameworks, and has access + # # to ShowsKit as well because it is + # # a child of the abstract target 'Shows' + # + # target 'ShowsTests' do + # inherit! :search_paths + # pod 'Specta' + # pod 'Expecta' + # end + # end + # + # @return [void] + # + def abstract_target(name) + target(name) do + abstract! + yield if block_given? + end + end + + # Denotes that the current target is abstract, and thus will not directly + # link against an Xcode target. + # + # @return [void] + # + def abstract!(abstract = true) + current_target_definition.abstract = abstract + end + + # Sets the inheritance mode for the current target. + # + # @param [Symbol] inheritance + # the inheritance mode to set. + # + # **Available Modes:** + # + `:complete` The target inherits all + # behaviour from the parent. + # + `:none` The target inherits none of + # the behaviour from the parent. + # + `:search_paths` The target inherits + # the search paths of the parent only. + # + # + # @example Inheriting only search paths + # + # target 'App' do + # target 'AppTests' do + # inherit! :search_paths + # end + # end + # + # @return [void] + # + def inherit!(inheritance) + current_target_definition.inheritance = inheritance + end + + #-----------------------------------------------------------------------# + + # @!group Target configuration + # These settings are used to control the CocoaPods generated project. + # + # This starts out simply with stating what `platform` you are working + # on. `xcodeproj` allows you to state specifically which project to + # link with. + + #-----------------------------------------------------------------------# + + # Specifies the platform for which a static library should be built. + # + # CocoaPods provides a default deployment target if one is not specified. + # The current default values are `4.3` for iOS, `10.6` for OS X, `9.0` for tvOS + # and `2.0` for watchOS. + # + # If the deployment target requires it (iOS < `4.3`), `armv6` + # architecture will be added to `ARCHS`. + # + # @param [Symbol] name + # the name of platform, can be either `:osx` for OS X, `:ios` + # for iOS, `:tvos` for tvOS, or `:watchos` for watchOS. + # + # @param [String, Version] target + # The optional deployment. If not provided a default value + # according to the platform name will be assigned. + # + # @example Specifying the platform + # + # platform :ios, '4.0' + # platform :ios + # + # @return [void] + # + def platform(name, target = nil) + # Support for deprecated options parameter + target = target[:deployment_target] if target.is_a?(Hash) + current_target_definition.set_platform!(name, target) + end + + # Specifies the Xcode project that contains the target that the Pods + # library should be linked with. + # + # ----- + # + # If none of the target definitions specify an explicit project + # and there is only **one** project in the same directory as the Podfile + # then that project will be used. + # + # It is possible also to specify whether the build settings of your + # custom build configurations should be modelled after the release or + # the debug presets. To do so you need to specify a hash where the name + # of each build configuration is associated to either `:release` or + # `:debug`. + # + # @param [String] path + # the path of the project to link with + # + # @param [Hash{String => symbol}] build_configurations + # a hash where the keys are the name of the build + # configurations in your Xcode project and the values are + # Symbols that specify if the configuration should be based on + # the `:debug` or `:release` configuration. If no explicit + # mapping is specified for a configuration in your project, it + # will default to `:release`. + # + # @example Specifying the user project + # + # # This Target can be found in a Xcode project called `FastGPS` + # target 'MyGPSApp' do + # project 'FastGPS' + # ... + # end + # + # # Same Podfile, multiple Xcodeprojects + # target 'MyNotesApp' do + # project 'FastNotes' + # ... + # end + # + # @example Using custom build configurations + # + # project 'TestProject', 'Mac App Store' => :release, 'Test' => :debug + # + # @return [void] + # + def project(path, build_configurations = {}) + current_target_definition.user_project_path = path + current_target_definition.build_configurations = build_configurations + end + + # @!visibility private + # + # @deprecated #{xcodeproj} was renamed to #{project}. + # + # `xcodeproj` is deprecated in [1.0](http://blog.cocoapods.org/CocoaPods-1.0/) and has been renamed to `project`. + # For pre-1.0 versions use `xcodeproj`. + # + def xcodeproj(*args) + CoreUI.warn '`xcodeproj` was renamed to `project`. Please update your Podfile accordingly.' + project(*args) + end + + # @!visibility private + # + # @deprecated linking a single target with multiple Xcode targets is no + # longer supported. Use an {#abstract_target} and target + # inheritance instead. + # + # `link_with` is deprecated in [1.0](http://blog.cocoapods.org/CocoaPods-1.0/) in + # favour of `abstract_target` and target inheritance instead. + # + def link_with(*) + raise Informative, 'The specification of `link_with` in the Podfile ' \ + 'is now unsupported, please use target blocks instead.' + end + + # Inhibits **all** the warnings from the CocoaPods libraries. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # If you would like to inhibit warnings per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :inhibit_warnings => true + # + # Additionally, when you use `inhibit_all_warnings!` attribute, + # you can exclude a particular Pod from being inhibited using the following: + # + # pod 'SSZipArchive', :inhibit_warnings => false + # + def inhibit_all_warnings! + current_target_definition.inhibit_all_warnings = true + end + + # Use modular headers for all CocoaPods static libraries. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # If you would like to use modular headers per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :modular_headers => true + # + # Additionally, when you use the `use_modular_headers!` attribute, + # you can exclude a particular Pod from modular headers using the following: + # + # pod 'SSZipArchive', :modular_headers => false + # + def use_modular_headers! + current_target_definition.use_modular_headers_for_all_pods = true + end + + # Use frameworks instead of static libraries for Pods. When using frameworks, you may also specify the `:linkage` + # style to use, either `:static` or `:dynamic`. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # @param [Boolean, Hash] option + # The option to use for configuring packaging and linkage style. + # + # @example + # + # target 'MyApp' do + # use_frameworks! + # pod 'AFNetworking', '~> 1.0' + # end + # + # @example + # + # target 'MyApp' do + # use_frameworks! :linkage => :dynamic + # pod 'AFNetworking', '~> 1.0' + # end + # + # target 'ZipApp' do + # use_frameworks! :linkage => :static + # pod 'SSZipArchive' + # end + # + # @return [void] + # + def use_frameworks!(option = true) + current_target_definition.use_frameworks!(option) + end + + # Specifies the Swift version requirements this target definition supports. + # + # **Note** These requirements are inherited from the parent, if specified and if none + # are specified at the root level then all versions are considered to be supported. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements this target supports. + # + # @example + # + # target 'MyApp' do + # supports_swift_versions '>= 3.0', '< 4.0' + # pod 'AFNetworking', '~> 1.0' + # end + # + # @example + # + # supports_swift_versions '>= 3.0', '< 4.0' + # + # target 'MyApp' do + # pod 'AFNetworking', '~> 1.0' + # end + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # end + # + # @return [void] + # + def supports_swift_versions(*requirements) + current_target_definition.store_swift_version_requirements(*requirements) + end + + #-----------------------------------------------------------------------# + + # @!group Workspace + # + # This group list the options to configure workspace and to set global + # settings. + + #-----------------------------------------------------------------------# + + # Specifies the Xcode workspace that should contain all the projects. + # + # ----- + # + # If no explicit Xcode workspace is specified and only **one** project + # exists in the same directory as the Podfile, then the name of that + # project is used as the workspace’s name. + # + # @param [String] path + # path of the workspace. + # + # @example Specifying a workspace + # + # workspace 'MyWorkspace' + # + # @return [void] + # + def workspace(path) + set_hash_value('workspace', path.to_s) + end + + # Specifies that a BridgeSupport metadata document should be generated + # from the headers of all installed Pods. + # + # ----- + # + # This is for scripting languages such as [MacRuby](http://macruby.org), + # [Nu](http://programming.nu/index), and + # [JSCocoa](http://inexdo.com/JSCocoa), which use it to bridge types, + # functions, etc. + # + # @return [void] + # + def generate_bridge_support! + set_hash_value('generate_bridge_support', true) + end + + # Specifies that the -fobjc-arc flag should be added to the + # `OTHER_LD_FLAGS`. + # + # ----- + # + # This is used as a workaround for a compiler bug with non-ARC projects + # (see #142). This was originally done automatically but libtool as of + # Xcode 4.3.2 no longer seems to support the `-fobjc-arc` flag. Therefore + # it now has to be enabled explicitly using this method. + # + # Support for this method might be dropped in CocoaPods `1.0`. + # + # @return [void] + # + def set_arc_compatibility_flag! + set_hash_value('set_arc_compatibility_flag', true) + end + + #-----------------------------------------------------------------------# + + # @!group Sources + # + # The Podfile retrieves specs from a given list of sources (repositories). + # + # Sources are __global__ and they are not stored per target definition. + + #-----------------------------------------------------------------------# + + # Specifies the location of specs + # + # ----- + # + # Use this method to specify sources. The order of the sources is + # relevant. CocoaPods will use the highest version of a Pod of the first + # source which includes the Pod (regardless whether other sources have a + # higher version). + # + # The official CocoaPods source is implicit. + # Once you specify another source, then it will need to be included. + # + # @param [String] source + # The URL of a specs repository. + # + # @example Specifying to first use the Artsy repository and then the CocoaPods Master Repository + # + # source 'https://github.com/artsy/Specs.git' + # source 'https://github.com/CocoaPods/Specs.git' + # + # @return [void] + # + def source(source) + hash_sources = get_hash_value('sources') || [] + hash_sources << source + set_hash_value('sources', hash_sources.uniq) + end + + #-----------------------------------------------------------------------# + + # @!group Hooks + # The Podfile provides hooks that will be called during the + # installation process. + # + # Hooks are __global__ and not stored per target definition. + + #-----------------------------------------------------------------------# + + # Specifies the plugins that should be used during installation. + # + # ----- + # + # Use this method to specify a plugin that should be used during + # installation, along with the options that should be passed to the plugin + # when it is invoked. + # + # @param [String] name + # The name of the plugin. + # + # @param [Hash] options + # The optional options that should be passed to the plugin when + # its hooks are invoked. + # + # @example Specifying to use the `slather` and `cocoapods-keys` plugins. + # + # plugin 'cocoapods-keys', :keyring => 'Eidolon' + # plugin 'slather' + # + # @return [void] + # + def plugin(name, options = {}) + hash_plugins = get_hash_value('plugins') || {} + (hash_plugins[name] ||= {}).merge!(options.deep_stringify_keys) + set_hash_value('plugins', hash_plugins) + end + + # This hook allows you to make any changes to the Pods after they have + # been downloaded but before they are installed. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Defining a pre-install hook in a Podfile. + # + # pre_install do |installer| + # # Do something fancy! + # end + # + # + def pre_install(&block) + @pre_install_callback = block + end + + # This hook allows you to make changes before the project is written + # to disk. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the dependencies before integration + # + # pre_integrate do |installer| + # # perform some changes on dependencies + # end + # + # @return [void] + # + def pre_integrate(&block) + raise Informative, 'Specifying multiple `pre_integrate` hooks is unsupported.' if @pre_integrate_callback + @pre_integrate_callback = block + end + + # This hook allows you to make any last changes to the generated Xcode + # project before it is written to disk, or any other tasks you might want + # to perform. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the build settings of all targets + # + # post_install do |installer| + # installer.pods_project.targets.each do |target| + # target.build_configurations.each do |config| + # config.build_settings['GCC_ENABLE_OBJC_GC'] = 'supported' + # end + # end + # end + # + # @return [void] + # + def post_install(&block) + raise Informative, 'Specifying multiple `post_install` hooks is unsupported.' if @post_install_callback + @post_install_callback = block + end + + # This hook allows you to make changes after the project is written + # to disk. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the build settings of all targets + # + # post_integrate do |installer| + # # some change after project write to disk + # end + # + # @return [void] + # + def post_integrate(&block) + raise Informative, 'Specifying multiple `post_integrate` hooks is unsupported.' if @post_integrate_callback + @post_integrate_callback = block + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/target_definition.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/target_definition.rb new file mode 100644 index 0000000..9d03073 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/podfile/target_definition.rb @@ -0,0 +1,1181 @@ +module Pod + class Podfile + # The TargetDefinition stores the information of a CocoaPods static + # library. The target definition can be linked with one or more targets of + # the user project. + # + # Target definitions can be nested and by default inherit the dependencies + # of the parent. + # + class TargetDefinition + # @return [TargetDefinition, Podfile] the parent target definition or the + # Podfile if the receiver is root. + # + attr_reader :parent + + # @param [String, Symbol] + # name @see name + # + # @param [TargetDefinition] parent + # @see parent + # + def initialize(name, parent, internal_hash = nil) + @internal_hash = internal_hash || {} + @parent = parent + @children = [] + @label = nil + self.name ||= name + if parent.is_a?(TargetDefinition) + parent.children << self + end + end + + # @return [Array] the children target definitions. + # + attr_reader :children + + # @return [Array] the targets definition descending + # from this one. + # + def recursive_children + (children + children.map(&:recursive_children)).flatten + end + + # @return [Bool] Whether the target definition is root. + # + def root? + parent.is_a?(Podfile) || parent.nil? + end + + # @return [TargetDefinition] The root target definition. + # + def root + if root? + self + else + parent.root + end + end + + # @return [Podfile] The podfile that contains the specification for this + # target definition. + # + def podfile + root.parent + end + + # @return [Array] The list of the dependencies of the target + # definition including the inherited ones. + # + def dependencies + if exclusive? + non_inherited_dependencies + else + non_inherited_dependencies + parent.dependencies + end + end + + # @return [Array] the targets from which this target + # definition should inherit only search paths. + # + def targets_to_inherit_search_paths + can_inherit = !root? && matches_platform?(parent) + if inheritance == 'search_paths' # && can_inherit + parent.targets_to_inherit_search_paths << parent + elsif can_inherit + parent.targets_to_inherit_search_paths + else + [] + end + end + + # @return [Array] The list of the dependencies of the target definition, + # excluding inherited ones. + # + def non_inherited_dependencies + pod_dependencies.concat(podspec_dependencies) + end + + # @return [Bool] Whether the target definition has at least one + # dependency, excluding inherited ones. + # + def empty? + non_inherited_dependencies.empty? + end + + # @return [String] The label of the target definition according to its + # name. + # + def label + @label ||= if root? && name == 'Pods' + 'Pods' + elsif exclusive? || parent.nil? + "Pods-#{name}" + else + "#{parent.label}-#{name}" + end + end + + alias_method :to_s, :label + + # @return [String] A string representation suitable for debug. + # + def inspect + "#<#{self.class} label=#{label}>" + end + + #-----------------------------------------------------------------------# + + public + + # @!group Attributes + + # @return [String] the path of the project this target definition should + # link with. + # + def name + get_hash_value('name') + end + + # Sets the path of the user project this target definition should link + # with. + # + # @param [String] name + # The path of the project. + # + # @return [void] + # + def name=(name) + @label = nil + set_hash_value('name', name) + end + + #--------------------------------------# + + # @return [Boolean] whether this target definition is abstract. + # + def abstract? + get_hash_value('abstract', root?) + end + + # Sets whether this target definition is abstract. + # + # @param [Boolean] abstract + # whether this target definition is abstract. + # + # @return [void] + # + def abstract=(abstract) + set_hash_value('abstract', abstract) + end + + #--------------------------------------# + + # @return [String] the inheritance mode for this target definition. + # + def inheritance + get_hash_value('inheritance', 'complete') + end + + # Sets the inheritance mode for this target definition. + # + # @param [#to_s] inheritance + # the inheritance mode for this target definition. + # + # @raise [Informative] if this target definition is a root target + # definition or if the `inheritance` value is unknown. + # + # @return [void] + # + def inheritance=(inheritance) + inheritance = inheritance.to_s + unless %w(none search_paths complete).include?(inheritance) + raise Informative, "Unrecognized inheritance option `#{inheritance}` specified for target `#{name}`." + end + if root? + raise Informative, 'Cannot set inheritance for the root target definition.' + end + if abstract? + raise Informative, 'Cannot set inheritance for abstract target definition.' + end + set_hash_value('inheritance', inheritance) + end + + #--------------------------------------# + + # Returns whether the target definition should inherit the dependencies + # of the parent. + # + # @note A target is always `exclusive` if it is root. + # + # @note A target is always `exclusive` if the `platform` does + # not match the parent's `platform`. + # + # @return [Bool] whether is exclusive. + # + def exclusive? + if root? + true + else + !matches_platform?(parent) || (inheritance != 'complete') + end + end + + # @param [TargetDefinition, Nil] target_definition + # the target definition to check for platform compatibility. + # + # @return [Boolean] + # whether this target definition matches the platform of + # `target_definition`. + # + def matches_platform?(target_definition) + return false unless target_definition + return true if target_definition.platform == platform + !target_definition.platform && target_definition.abstract? + end + + #--------------------------------------# + + # @return [String] the path of the project this target definition should + # link with. + # + def user_project_path + path = get_hash_value('user_project_path') + if path + Pathname(path).sub_ext('.xcodeproj').to_path + else + parent.user_project_path unless root? + end + end + + # Sets the path of the user project this target definition should link + # with. + # + # @param [String] path + # The path of the project. + # + # @return [void] + # + def user_project_path=(path) + set_hash_value('user_project_path', path) + end + + #--------------------------------------# + + # @return [Hash{String => symbol}] A hash where the keys are the name of + # the build configurations and the values a symbol that + # represents their type (`:debug` or `:release`). + # + def build_configurations + if root? + get_hash_value('build_configurations') + else + get_hash_value('build_configurations') || parent.build_configurations + end + end + + # Sets the build configurations for this target. + # + # @return [Hash{String => Symbol}] hash + # A hash where the keys are the name of the build configurations + # and the values the type. + # + # @return [void] + # + def build_configurations=(hash) + set_hash_value('build_configurations', hash) unless hash.empty? + end + + #--------------------------------------# + + # @return [Array] The list of the script phases of the target definition. + # + def script_phases + get_hash_value('script_phases') || [] + end + + #--------------------------------------# + + # @return [String] The project name to use for the given pod name or `nil` if none specified. + # + # @note When querying for a subspec then use the root pod spec name instead as this is what's stored. + # + def project_name_for_pod(pod_name) + if root? + raw_project_names_hash[pod_name] + else + raw_project_names_hash[pod_name] || parent.project_name_for_pod(pod_name) + end + end + + #--------------------------------------# + # + # @return [Bool] whether the target definition should inhibit warnings + # for a single pod. If inhibit_all_warnings is true, it will + # return true for any asked pod. + # + def inhibits_warnings_for_pod?(pod_name) + if Array(inhibit_warnings_hash['not_for_pods']).include?(pod_name) + false + elsif inhibit_warnings_hash['all'] + true + elsif !root? && parent.inhibits_warnings_for_pod?(pod_name) + true + else + Array(inhibit_warnings_hash['for_pods']).include? pod_name + end + end + + # Sets whether the target definition should inhibit the warnings during + # compilation for all pods. + # + # @param [Bool] flag + # Whether the warnings should be suppressed. + # + # @return [void] + # + def inhibit_all_warnings=(flag) + raw_inhibit_warnings_hash['all'] = flag + end + + # Inhibits warnings for a specific pod during compilation. + # + # @param [String] pod_name + # Name of the pod for which the warnings will be inhibited or not. + # + # @param [Bool] should_inhibit + # Whether the warnings should be inhibited or not for given pod. + # + # @return [void] + # + def set_inhibit_warnings_for_pod(pod_name, should_inhibit) + hash_key = case should_inhibit + when true + 'for_pods' + when false + 'not_for_pods' + when nil + return + else + raise ArgumentError, "Got `#{should_inhibit.inspect}`, should be a boolean" + end + raw_inhibit_warnings_hash[hash_key] ||= [] + raw_inhibit_warnings_hash[hash_key] << pod_name + end + + #--------------------------------------# + + # The (desired) build type for the pods integrated in this target definition. Defaults to static libraries and can + # only be overridden through Pod::Podfile::DSL#use_frameworks!. + # + # @return [BuildType] + # + def build_type + value = get_hash_value('uses_frameworks', root? ? BuildType.static_library : parent.build_type) + case value + when true, false + value ? BuildType.dynamic_framework : BuildType.static_library + when Hash + BuildType.new(:linkage => value.fetch(:linkage), :packaging => value.fetch(:packaging)) + when BuildType + value + else + raise ArgumentError, "Got `#{value.inspect}`, should be a boolean, hash or BuildType." + end + end + + # Sets whether the target definition's pods should be built as frameworks. + # + # @param [Boolean, Hash] option + # Whether pods that are integrated in this target should be built as frameworks. If the option is a + # boolean then the value affects both packaging and linkage styles. If set to true, then dynamic frameworks + # are used and if it's set to false, then static libraries are used. If the option is a hash then + # `:framework` packaging is implied and the user configures the `:linkage` style to use. + # + # @return [void] + # + def use_frameworks!(option = true) + value = case option + when true, false + option ? BuildType.dynamic_framework : BuildType.static_library + when Hash + BuildType.new(:linkage => option.fetch(:linkage), :packaging => :framework) + else + raise ArgumentError, "Got `#{option.inspect}`, should be a boolean or hash." + end + set_hash_value('uses_frameworks', value.to_hash) + end + + # @return [Bool] whether the target definition pods should be built as frameworks. + # + def uses_frameworks? + if internal_hash['uses_frameworks'].nil? + root? ? false : parent.uses_frameworks? + else + build_type.framework? + end + end + + #--------------------------------------# + + # Sets the Swift version that the target definition should use. + # + # @param [String] version + # The Swift version that the target definition should use. + # + # @return [void] + # + def swift_version=(version) + set_hash_value('swift_version', version) + end + + # @return [String] the Swift version that the target definition should + # use. + # + def swift_version + get_hash_value('swift_version') + end + + # @return [Array] the Swift version requirements this target definition enforces. + # + def swift_version_requirements + get_hash_value('swift_version_requirements') + end + + # Queries the target if a version of Swift is supported or not. + # + # @param [Version] swift_version + # The Swift version to query against. + # + # @return [Boolean] Whether the target accepts the specified Swift version. + # + def supports_swift_version?(swift_version) + if swift_version_requirements.nil? + root? || parent.supports_swift_version?(swift_version) + else + Requirement.create(swift_version_requirements).satisfied_by?(swift_version) + end + end + + #--------------------------------------# + + # Whether a specific pod should be linked to the target when building for + # a specific configuration. If a pod has not been explicitly whitelisted + # for any configuration, it is implicitly whitelisted. + # + # @param [String] pod_name + # The pod that we're querying about inclusion for in the given + # configuration. + # + # @param [String] configuration_name + # The configuration that we're querying about inclusion of the + # pod in. + # + # @note Build configurations are case compared case-insensitively in + # CocoaPods. + # + # @return [Bool] flag + # Whether the pod should be linked with the target + # + def pod_whitelisted_for_configuration?(pod_name, configuration_name) + found = false + configuration_pod_whitelist.each do |configuration, pods| + if pods.include?(pod_name) + found = true + if configuration.downcase == configuration_name.to_s.downcase + return true + end + end + end + !found && (root? || (inheritance != 'none' && parent.pod_whitelisted_for_configuration?(pod_name, configuration_name))) + end + + # Whitelists a pod for a specific configuration. If a pod is whitelisted + # for any configuration, it will only be linked with the target in the + # configuration(s) specified. If it is not whitelisted for any + # configuration, it is implicitly included in all configurations. + # + # @param [String] pod_name + # The pod that should be included in the given configuration. + # + # @param [String, Symbol] configuration_name + # The configuration that the pod should be included in + # + # @note Build configurations are stored as a String. + # + # @return [void] + # + def whitelist_pod_for_configuration(pod_name, configuration_name) + configuration_name = configuration_name.to_s + list = raw_configuration_pod_whitelist[configuration_name] ||= [] + list << pod_name + end + + # @return [Array] unique list of all configurations for which + # pods have been whitelisted. + # + def all_whitelisted_configurations + parent_configurations = (root? || inheritance == 'none') ? [] : parent.all_whitelisted_configurations + (configuration_pod_whitelist.keys + parent_configurations).uniq + end + + #--------------------------------------# + + def raw_use_modular_headers_hash + get_hash_value('use_modular_headers', {}) + end + private :raw_use_modular_headers_hash + + # Returns the use_modular_headers hash pre-populated with default values. + # + # @return [Hash] Hash with :all key for building all + # pods as modules, :for_pods key for building as module per Pod, + # and :not_for_pods key for not biulding as module per Pod. + # + def use_modular_headers_hash + raw_hash = raw_use_modular_headers_hash + if exclusive? + raw_hash + else + parent_hash = parent.send(:use_modular_headers_hash).dup + if parent_hash['not_for_pods'] + # Remove pods that are set to not use modular headers inside parent + # if they are set to use modular headers inside current target. + parent_hash['not_for_pods'] -= Array(raw_hash['for_pods']) + end + if parent_hash['for_pods'] + # Remove pods that are set to use modular headers inside parent if they are set to not use modular headers inside current target. + parent_hash['for_pods'] -= Array(raw_hash['for_pods']) + end + if raw_hash['all'] + # Clean pods that are set to not use modular headers inside parent if use_modular_headers! was set. + parent_hash['not_for_pods'] = nil + end + parent_hash.merge(raw_hash) do |_, l, r| + Array(l).concat(r).uniq + end + end + end + + # @return [Bool] whether the target definition should use modular headers + # for a single pod. If use_modular_headers! is true, it will + # return true for any asked pod. + # + def build_pod_as_module?(pod_name) + if Array(use_modular_headers_hash['not_for_pods']).include?(pod_name) + false + elsif use_modular_headers_hash['all'] + true + elsif !root? && parent.build_pod_as_module?(pod_name) + true + else + Array(use_modular_headers_hash['for_pods']).include? pod_name + end + end + + # Sets whether the target definition should use modular headers for all pods. + # + # @param [Bool] flag + # Whether the warnings should be suppressed. + # + # @return [void] + # + def use_modular_headers_for_all_pods=(flag) + raw_use_modular_headers_hash['all'] = flag + end + + # Use modular headers for a specific pod during compilation. + # + # @param [String] pod_name + # Name of the pod for which modular headers will be used. + # + # @param [Bool] flag + # Whether modular headers should be used. + # + # @return [void] + # + def set_use_modular_headers_for_pod(pod_name, flag) + hash_key = case flag + when true + 'for_pods' + when false + 'not_for_pods' + when nil + return + else + raise ArgumentError, "Got `#{flag.inspect}`, should be a boolean" + end + raw_use_modular_headers_hash[hash_key] ||= [] + raw_use_modular_headers_hash[hash_key] << pod_name + end + + #--------------------------------------# + + PLATFORM_DEFAULTS = { :ios => '4.3', :osx => '10.6', :tvos => '9.0', :watchos => '2.0' }.freeze + + # @return [Platform] the platform of the target definition. + # + # @note If no deployment target has been specified a default value is + # provided. + # + def platform + name_or_hash = get_hash_value('platform') + if name_or_hash + if name_or_hash.is_a?(Hash) + name = name_or_hash.keys.first.to_sym + target = name_or_hash.values.first + else + name = name_or_hash.to_sym + end + target ||= PLATFORM_DEFAULTS[name] + Platform.new(name, target) + else + parent.platform unless root? + end + end + + # Sets the platform of the target definition. + # + # @param [Symbol] name + # The name of the platform. + # + # @param [String] target + # The deployment target of the platform. + # + # @raise When the name of the platform is unsupported. + # + # @return [void] + # + def set_platform(name, target = nil) + name = :osx if name == :macos + unless [:ios, :osx, :tvos, :watchos].include?(name) + raise StandardError, "Unsupported platform `#{name}`. Platform " \ + 'must be `:ios`, `:osx`, `:macos`, `:tvos`, or `:watchos`.' + end + + if target + value = { name.to_s => target } + else + value = name.to_s + end + set_hash_value('platform', value) + end + + # Sets the platform of the target definition. + # + # @see #set_platform + # + # @raise When the target definition already has a platform set. + # + # @return [void] + # + def set_platform!(name, target = nil) + raise StandardError, "The target `#{label}` already has a platform set." if get_hash_value('platform') + set_platform(name, target) + end + + # Stores the Swift version requirements to be used for this target. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements this target supports. + # + # @return [void] + # + def store_swift_version_requirements(*requirements) + set_hash_value('swift_version_requirements', requirements.flatten.map(&:to_s)) + end + + #--------------------------------------# + + # Stores the dependency for a Pod with the given name. + # + # @param [String] name + # The name of the Pod + # + # @param [Array] requirements + # The requirements and the options of the dependency. + # + # @note The dependencies are stored as an array. To simplify the YAML + # representation if they have requirements they are represented + # as a Hash, otherwise only the String of the name is added to + # the array. + # + # @todo This needs urgently a rename. + # + # @return [void] + # + def store_pod(name, *requirements) + return if parse_subspecs(name, requirements) # This parse method must be called first + parse_inhibit_warnings(name, requirements) + parse_modular_headers(name, requirements) + parse_configuration_whitelist(name, requirements) + parse_project_name(name, requirements) + + if requirements && !requirements.empty? + pod = { name => requirements } + else + pod = name + end + + get_hash_value('dependencies', []) << pod + nil + end + + #--------------------------------------# + + # Stores the podspec whose dependencies should be included by the + # target. + # + # @param [Hash] options + # The options used to find the podspec (either by name or by + # path). If nil the podspec is auto-detected (i.e. the first one + # in the folder of the Podfile) + # + # @note The storage of this information is optimized for YAML + # readability. + # + # @todo This urgently needs a rename. + # + # @return [void] + # + def store_podspec(options = nil) + options ||= {} + unless options.keys.all? { |key| [:name, :path, :subspecs, :subspec].include?(key) } + raise StandardError, 'Unrecognized options for the podspec ' \ + "method `#{options}`" + end + if subspec_name = options[:subspec] + unless subspec_name.is_a?(String) + raise StandardError, "Option `:subspec => #{subspec_name.inspect}` should be a String" + end + end + if subspec_names = options[:subspecs] + if !subspec_names.is_a?(Array) || !subspec_names.all? { |name| name.is_a? String } + raise StandardError, "Option `:subspecs => #{subspec_names.inspect}` " \ + 'should be an Array of Strings' + end + end + options[:autodetect] = true if !options.include?(:name) && !options.include?(:path) + get_hash_value('podspecs', []) << options + end + + #--------------------------------------# + + # Stores the script phase to add for this target definition. + # + # @param [Hash] options + # The options to use for this script phase. The required keys + # are: `:name`, `:script`, while the optional keys are: + # `:shell_path`, `:input_files`, `:output_files`, `:show_env_vars_in_log`, `:execution_position` and + # `:dependency_file`. + # + # @return [void] + # + def store_script_phase(options) + option_keys = options.keys + unrecognized_keys = option_keys - Specification::ALL_SCRIPT_PHASE_KEYS + unless unrecognized_keys.empty? + raise StandardError, "Unrecognized options `#{unrecognized_keys}` in shell script `#{options[:name]}` within `#{name}` target. " \ + "Available options are `#{Specification::ALL_SCRIPT_PHASE_KEYS}`." + end + missing_required_keys = Specification::SCRIPT_PHASE_REQUIRED_KEYS - option_keys + unless missing_required_keys.empty? + raise StandardError, "Missing required shell script phase options `#{missing_required_keys.join(', ')}`" + end + script_phases_hash = get_hash_value('script_phases', []) + if script_phases_hash.map { |script_phase_options| script_phase_options[:name] }.include?(options[:name]) + raise StandardError, "Script phase with name `#{options[:name]}` name already present for target `#{name}`." + end + options[:execution_position] = :any unless options.key?(:execution_position) + unless Specification::EXECUTION_POSITION_KEYS.include?(options[:execution_position]) + raise StandardError, "Invalid execution position value `#{options[:execution_position]}` in shell script `#{options[:name]}` within `#{name}` target. " \ + "Available options are `#{Specification::EXECUTION_POSITION_KEYS}`." + end + script_phases_hash << options + end + + #-----------------------------------------------------------------------# + + public + + # @!group Representations + + # @return [Array] The keys used by the hash representation of the + # target definition. + # + HASH_KEYS = %w( + name + platform + podspecs + exclusive + link_with + link_with_first_target + inhibit_warnings + use_modular_headers + user_project_path + build_configurations + project_names + dependencies + script_phases + children + configuration_pod_whitelist + uses_frameworks + swift_version_requirements + inheritance + abstract + swift_version + ).freeze + + # @return [Hash] The hash representation of the target definition. + # + def to_hash + hash = internal_hash.dup + unless children.empty? + hash['children'] = children.map(&:to_hash) + end + hash + end + + # Configures a new target definition from the given hash. + # + # @param [Hash] the hash which contains the information of the + # Podfile. + # + # @return [TargetDefinition] the new target definition + # + def self.from_hash(hash, parent) + internal_hash = hash.dup + children_hashes = internal_hash.delete('children') || [] + definition = TargetDefinition.new(nil, parent, internal_hash) + children_hashes.each do |child_hash| + TargetDefinition.from_hash(child_hash, definition) + end + definition + end + + #-----------------------------------------------------------------------# + + protected + + # @!group Private helpers + + # @return [Array] + # + attr_writer :children + + # @return [Hash] The hash which store the attributes of the target + # definition. + # + attr_accessor :internal_hash + + private + + # Set a value in the internal hash of the target definition for the given + # key. + # + # @param [String] key + # The key for which to store the value. + # + # @param [Object] value + # The value to store. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [void] + # + def set_hash_value(key, value) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = value + end + + # Returns the value for the given key in the internal hash of the target + # definition. + # + # @param [String] key + # The key for which the value is needed. + # + # @param [Object] base_value + # The value to set if they key is nil. Useful for collections. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [Object] The value for the key. + # + def get_hash_value(key, base_value = nil) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = base_value if internal_hash[key].nil? + internal_hash[key] + end + + def raw_inhibit_warnings_hash + get_hash_value('inhibit_warnings', {}) + end + private :raw_inhibit_warnings_hash + + # Returns the inhibit_warnings hash pre-populated with default values. + # + # @return [Hash] Hash with :all key for inhibiting all + # warnings, :for_pods key for inhibiting warnings per Pod, + # and :not_for_pods key for not inhibiting warnings per Pod. + # + def inhibit_warnings_hash + inhibit_hash = raw_inhibit_warnings_hash + if exclusive? + inhibit_hash + else + parent_hash = parent.send(:inhibit_warnings_hash).dup + if parent_hash['not_for_pods'] + # Remove pods that are set to not inhibit inside parent if they are set to inhibit inside current target. + parent_hash['not_for_pods'] -= Array(inhibit_hash['for_pods']) + end + if parent_hash['for_pods'] + # Remove pods that are set to inhibit inside parent if they are set to not inhibit inside current target. + parent_hash['for_pods'] -= Array(inhibit_hash['for_pods']) + end + if inhibit_hash['all'] + # Clean pods that are set to not inhibit inside parent if inhibit_all_warnings! was set. + parent_hash['not_for_pods'] = nil + inhibit_hash.delete('all') if parent_hash['all'] + end + parent_hash.merge(inhibit_hash) do |_, l, r| + Array(l).concat(r).uniq + end + end + end + + def raw_configuration_pod_whitelist + get_hash_value('configuration_pod_whitelist', {}) + end + private :raw_configuration_pod_whitelist + + # Returns the configuration_pod_whitelist hash + # + # @return [Hash] Hash with configuration name as key, + # array of pod names to be linked in builds with that configuration + # as value. + # + def configuration_pod_whitelist + whitelist_hash = raw_configuration_pod_whitelist + if exclusive? + whitelist_hash + else + parent.send(:configuration_pod_whitelist).merge(whitelist_hash) { |_, l, r| Array(l).concat(r).uniq } + end + end + + # @return [Array] The dependencies specified by the user for + # this target definition. + # + def pod_dependencies + pods = get_hash_value('dependencies') || [] + pods.map do |name_or_hash| + if name_or_hash.is_a?(Hash) + name = name_or_hash.keys.first + requirements = name_or_hash.values.first + Dependency.new(name, *requirements) + else + Dependency.new(name_or_hash) + end + end + end + + # @return [Array] The dependencies inherited by the podspecs. + # + # @note The podspec directive is intended to include the dependencies of + # a spec in the project where it is developed. For this reason the + # spec, or any of it subspecs, cannot be included in the + # dependencies. Otherwise it would generate a chicken-and-egg + # problem. + # + def podspec_dependencies + podspecs = get_hash_value('podspecs') || [] + podspecs.map do |options| + file = podspec_path_from_options(options) + spec = Specification.from_file(file) + subspec_names = options[:subspecs] || options[:subspec] + specs = if subspec_names.blank? + [spec] + else + subspec_names = [subspec_names] if subspec_names.is_a?(String) + subspec_names.map { |subspec_name| spec.subspec_by_name("#{spec.name}/#{subspec_name}") } + end + specs.map do |subspec| + all_specs = [subspec, *subspec.recursive_subspecs] + all_deps = all_specs.map { |s| s.dependencies(platform) }.flatten + all_deps.reject { |dep| dep.root_name == subspec.root.name } + end.flatten + end.flatten.uniq + end + + # The path of the podspec with the given options. + # + # @param [Hash] options + # The options to use for finding the podspec. The supported keys + # are: `:name`, `:path`, `:autodetect`. + # + # @return [Pathname] The path. + # + def podspec_path_from_options(options) + if path = options[:path] + if File.basename(path).include?('.podspec') + path_with_ext = path + else + path_with_ext = "#{path}.podspec" + end + path_without_tilde = path_with_ext.gsub('~', ENV['HOME']) + podfile.defined_in_file.dirname + path_without_tilde + elsif name = options[:name] + name = File.basename(name).include?('.podspec') ? name : "#{name}.podspec" + podfile.defined_in_file.dirname + name + elsif options[:autodetect] + glob_pattern = podfile.defined_in_file.dirname + '*.podspec{,.json}' + path = Pathname.glob(glob_pattern).first + unless path + raise Informative, 'Could not locate a podspec in the current directory. '\ + 'You can specify the path via the path option.' + end + + path + end + end + + # Removes :inhibit_warnings from the requirements list, and adds + # the pod's name into internal hash for disabling warnings. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :inhibit_warnings is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_inhibit_warnings(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + should_inhibit = options.delete(:inhibit_warnings) + pod_name = Specification.root_name(name) + set_inhibit_warnings_for_pod(pod_name, should_inhibit) + + requirements.pop if options.empty? + end + + # Removes :modular_headers from the requirements list, and adds + # the pods name into internal hash for modular headers. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :modular_headers is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_modular_headers(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + defines_module = options.delete(:modular_headers) + pod_name = Specification.root_name(name) + set_use_modular_headers_for_pod(pod_name, defines_module) + + requirements.pop if options.empty? + end + + # Removes :project_name from the requirements list, and adds + # the pods name into internal hash. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :project_name is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_project_name(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + project_name = options.delete(:project_name) + pod_name = Specification.root_name(name) + raw_project_names_hash[pod_name] = project_name if project_name + + requirements.pop if options.empty? + end + + def raw_project_names_hash + get_hash_value('project_names', {}) + end + private :raw_project_names_hash + + # Removes :configurations or :configuration from the requirements list, + # and adds the pod's name into the internal hash for which pods should be + # linked in which configuration only. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :configurations is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_configuration_whitelist(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + configurations = options.delete(:configurations) + configurations ||= options.delete(:configuration) + Array(configurations).each do |configuration| + whitelist_pod_for_configuration(name, configuration) + end + requirements.pop if options.empty? + end + + # Removes :subspecs and :testspecs from the requirements list, and stores the pods + # with the given subspecs or test specs as dependencies. + # + # @param [String] name + # + # @param [Array] requirements + # If :subspecs is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [Boolean] Whether new subspecs were added + # + def parse_subspecs(name, requirements) + options = requirements.last + return false unless options.is_a?(Hash) + + subspecs = options.delete(:subspecs) + test_specs = options.delete(:testspecs) + app_specs = options.delete(:appspecs) + + subspecs.each do |ss| + store_pod("#{name}/#{ss}", *requirements.dup) + end if subspecs + + test_specs.each do |ss| + requirements_copy = requirements.map(&:dup) + store_pod("#{name}/#{ss}", *requirements_copy) + end if test_specs + + app_specs.each do |as| + requirements_copy = requirements.map(&:dup) + store_pod("#{name}/#{as}", *requirements_copy) + end if app_specs + + requirements.pop if options.empty? + !subspecs.nil? + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/requirement.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/requirement.rb new file mode 100644 index 0000000..d159102 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/requirement.rb @@ -0,0 +1,104 @@ +module Pod + # A Requirement is a set of one or more version restrictions of a + # {Dependency}. + # + # It is based on the RubyGems class adapted to support CocoaPods specific + # information. + # + # @todo Move support about external sources and head information here from + # the Dependency class. + # + class Requirement < Pod::Vendor::Gem::Requirement + quoted_operators = OPS.keys.map { |k| Regexp.quote k }.join '|' + + # @return [Regexp] The regular expression used to validate input strings. + # + PATTERN = /\A\s*(#{quoted_operators})?\s*(#{Version::VERSION_PATTERN})\s*\z/ + + DefaultRequirement = ['>=', Version.new(0)] # rubocop:disable Naming/ConstantName + + #-------------------------------------------------------------------------# + + # Factory method to create a new requirement. + # + # @param [Requirement, Version, Array, String, Nil] input + # The input used to create the requirement. + # + # @return [Requirement] A new requirement. + # + def self.create(input) + case input + when Requirement + input + when Version, Array + new(input) + else + if input.respond_to? :to_str + new([input.to_str]) + else + default + end + end + end + + # @return [Requirement] The default requirement. + # + def self.default + new('>= 0') + end + + # Parses the given object returning a tuple where the first entry is an + # operator and the second a version. If not operator is provided it + # defaults to `=`. + # + # @param [String, Version] input + # The input passed to create the requirement. + # + # @return [Array] A tuple representing the requirement. + # + def self.parse(input) + return ['=', input] if input.is_a?(Version) + + unless PATTERN =~ input.to_s + raise ArgumentError, "Illformed requirement `#{input.inspect}`" + end + + operator = Regexp.last_match[1] || '=' + version = Version.new(Regexp.last_match[2]) + [operator, version] + end + + # Constructs a requirement from `requirements`. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements + # + # @note Duplicate requirements are ignored. + # + # @note An empty set of `requirements` is the same as `">= 0"` + # + def initialize(*requirements) + requirements = requirements.flatten + requirements.compact! + requirements.uniq! + + @requirements = if requirements.empty? + [DefaultRequirement] + else + requirements.map! { |r| self.class.parse r } + end + end + + # + # @return [Bool] true if this pod has no requirements. + # + def none? + if @requirements.size == 1 + @requirements[0] == DefaultRequirement + else + false + end + end + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source.rb new file mode 100644 index 0000000..d33087b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source.rb @@ -0,0 +1,476 @@ +require 'cocoapods-core/source/acceptor' +require 'cocoapods-core/source/aggregate' +require 'cocoapods-core/source/health_reporter' +require 'cocoapods-core/source/manager' +require 'cocoapods-core/source/metadata' + +module Pod + # The Source class is responsible to manage a collection of podspecs. + # + # The backing store of the podspecs collection is an implementation detail + # abstracted from the rest of CocoaPods. + # + # The default implementation uses a git repo as a backing store, where the + # podspecs are namespaced as: + # + # "#{SPEC_NAME}/#{VERSION}/#{SPEC_NAME}.podspec" + # + class Source + # The default branch in which the specs are stored + DEFAULT_SPECS_BRANCH = 'master'.freeze + + # @return [Pod::Source::Metadata] The metadata for this source. + # + attr_reader :metadata + + # @param [Pathname, String] repo @see #repo. + # + def initialize(repo) + @repo = Pathname(repo).expand_path + @versions_by_name = {} + refresh_metadata + end + + # @return [String] The name of the source. + # + def name + repo.basename.to_s + end + + # @return [String] The URL of the source. + # + # @note In the past we had used `git ls-remote --get-url`, but this could + # lead to an issue when finding a source based on its URL when `git` + # is configured to rewrite URLs with the `url..insteadOf` + # option. See https://github.com/CocoaPods/CocoaPods/issues/2724. + # + def url + @url ||= begin + remote = repo_git(%w(config --get remote.origin.url)) + if !remote.empty? + remote + elsif (repo + '.git').exist? + "file://#{repo}/.git" + end + end + end + + # @return [String] The type of the source. + # + def type + git? ? 'git' : 'file system' + end + + alias_method :to_s, :name + + # @return [Integer] compares a source with another one for sorting + # purposes. + # + # @note Source are compared by the alphabetical order of their name, and + # this convention should be used in any case where sources need to + # be disambiguated. + # + def <=>(other) + name <=> other.name + end + + # @return [String] A description suitable for debugging. + # + def inspect + "#<#{self.class} name:#{name} type:#{type}>" + end + + # @!group Paths + #-------------------------------------------------------------------------# + + # @return [Pathname] The path where the source is stored. + # + attr_reader :repo + + # @return [Pathname] The directory where the specs are stored. + # + # @note In previous versions of CocoaPods they used to be stored in + # the root of the repo. This lead to issues, especially with + # the GitHub interface and now they are stored in a dedicated + # folder. + # + def specs_dir + @specs_dir ||= begin + specs_sub_dir = repo + 'Specs' + if specs_sub_dir.exist? + specs_sub_dir + elsif repo.exist? + repo + end + end + end + + # @param [String] name The name of the pod. + # + # @return [Pathname] The path at which the specs for the given pod are + # stored. + # + def pod_path(name) + specs_dir.join(*metadata.path_fragment(name)) + end + + # @return [Pathname] The path at which source metadata is stored. + # + def metadata_path + repo + 'CocoaPods-version.yml' + end + + public + + # @!group Querying the source + #-------------------------------------------------------------------------# + + # @return [Array] the list of the name of all the Pods. + # + # + def pods + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + glob = specs_dir.join('*/' * metadata.prefix_lengths.size, '*') + Pathname.glob(glob).reduce([]) do |pods, entry| + pods << entry.basename.to_s if entry.directory? + pods + end.sort + end + + # Returns pod names for given array of specification paths. + # + # @param [Array] spec_paths + # Array of file path names for specifications. Path strings should be relative to the source path. + # + # @return [Array] the list of the name of Pods corresponding to specification paths. + # + def pods_for_specification_paths(spec_paths) + spec_paths.map do |path| + absolute_path = repo + path + relative_path = absolute_path.relative_path_from(specs_dir) + # The first file name returned by 'each_filename' is the pod name + relative_path.each_filename.first + end + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + # @param [String] name + # the name of the Pod. + # + def versions(name) + return nil unless specs_dir + raise ArgumentError, 'No name' unless name + pod_dir = pod_path(name) + return unless pod_dir.exist? + @versions_by_name[name] ||= pod_dir.children.map do |v| + next nil unless v.directory? + basename = v.basename.to_s + next unless basename[0, 1] != '.' + begin + Version.new(basename) + rescue ArgumentError + raise Informative, 'An unexpected version directory ' \ + "`#{basename}` was encountered for the " \ + "`#{pod_dir}` Pod in the `#{name}` repository." + end + end.compact.sort.reverse + end + + # @return [Specification] the specification for a given version of Pod. + # + # @param @see specification_path + # + def specification(name, version) + Specification.from_file(specification_path(name, version)) + end + + # Returns the path of the specification with the given name and version. + # + # @param [String] name + # the name of the Pod. + # + # @param [Version,String] version + # the version for the specification. + # + # @return [Pathname] The path of the specification. + # + def specification_path(name, version) + raise ArgumentError, 'No name' unless name + raise ArgumentError, 'No version' unless version + path = pod_path(name) + version.to_s + specification_path = path + "#{name}.podspec.json" + unless specification_path.exist? + specification_path = path + "#{name}.podspec" + end + unless specification_path.exist? + raise StandardError, "Unable to find the specification #{name} " \ + "(#{version}) in the #{self.name} source." + end + specification_path + end + + # @return [Array] all the specifications contained by the + # source. + # + def all_specs + glob = specs_dir.join('*/' * metadata.prefix_lengths.size, '*', '*', '*.podspec{.json,}') + specs = Pathname.glob(glob).map do |path| + begin + Specification.from_file(path) + rescue + CoreUI.warn "Skipping `#{path.relative_path_from(repo)}` because the " \ + 'podspec contains errors.' + next + end + end + specs.compact + end + + # Returns the set for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [Sets] the set. + # + def set(pod_name) + Specification::Set.new(pod_name, self) + end + + # @return [Array] the sets of all the Pods. + # + def pod_sets + pods.map { |pod_name| set(pod_name) } + end + + public + + # @!group Searching the source + #-------------------------------------------------------------------------# + + # @return [Set] a set for a given dependency. The set is identified by the + # name of the dependency and takes into account subspecs. + # + # @note This method is optimized for fast lookups by name, i.e. it does + # *not* require iterating through {#pod_sets} + # + # @todo Rename to #load_set + # + def search(query) + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + if query.is_a?(Dependency) + query = query.root_name + end + + if (versions = @versions_by_name[query]) && !versions.empty? + set = set(query) + return set if set.specification_name == query + end + + found = [] + Pathname.glob(pod_path(query)) do |path| + next unless Dir.foreach(path).any? { |child| child != '.' && child != '..' } + found << path.basename.to_s + end + + if [query] == found + set = set(query) + set if set.specification_name == query + end + end + + # @return [Array] The list of the sets that contain the search term. + # + # @param [String] query + # the search term. Can be a regular expression. + # + # @param [Bool] full_text_search + # whether the search should be limited to the name of the Pod or + # should include also the author, the summary, and the description. + # + # @note full text search requires to load the specification for each pod, + # hence is considerably slower. + # + # @todo Rename to #search + # + def search_by_name(query, full_text_search = false) + regexp_query = /#{query}/i + if full_text_search + pod_sets.reject do |set| + texts = [] + begin + s = set.specification + texts << s.name + texts += s.authors.keys + texts << s.summary + texts << s.description + rescue + CoreUI.warn "Skipping `#{set.name}` because the podspec " \ + 'contains errors.' + end + texts.grep(regexp_query).empty? + end + else + names = pods.grep(regexp_query) + names.map { |pod_name| set(pod_name) } + end + end + + # Returns the set of the Pod whose name fuzzily matches the given query. + # + # @param [String] query + # The query to search for. + # + # @return [Set] The name of the Pod. + # @return [Nil] If no Pod with a suitable name was found. + # + def fuzzy_search(query) + require 'fuzzy_match' + pod_name = FuzzyMatch.new(pods).find(query) + if pod_name + search(pod_name) + end + end + + # @!group Updating the source + #-------------------------------------------------------------------------# + + # Updates the local clone of the source repo. + # + # @param [Bool] show_output + # + # @return [Array] changed_spec_paths + # Returns the list of changed spec paths. + # + def update(show_output) + return [] if unchanged_github_repo? + prev_commit_hash = git_commit_hash + update_git_repo(show_output) + @versions_by_name.clear + refresh_metadata + if version = metadata.last_compatible_version(Version.new(CORE_VERSION)) + tag = "v#{version}" + CoreUI.warn "Using the `#{tag}` tag of the `#{name}` source because " \ + "it is the last version compatible with CocoaPods #{CORE_VERSION}." + repo_git(['checkout', tag]) + end + diff_until_commit_hash(prev_commit_hash) + end + + def updateable? + git? + end + + def git? + repo.join('.git').exist? && !repo_git(%w(rev-parse HEAD)).empty? + end + + def indexable? + true + end + + def verify_compatibility! + return if metadata.compatible?(CORE_VERSION) + + version_msg = if metadata.minimum_cocoapods_version == metadata.maximum_cocoapods_version + metadata.minimum_cocoapods_version + else + "#{metadata.minimum_cocoapods_version} - #{metadata.maximum_cocoapods_version}" + end + raise Informative, "The `#{name}` repo requires " \ + "CocoaPods #{version_msg} (currently using #{CORE_VERSION})\n" \ + 'Update CocoaPods, or checkout the appropriate tag in the repo.' + end + + public + + # @!group Representations + #-------------------------------------------------------------------------# + + # @return [Hash{String=>{String=>Specification}}] the static representation + # of all the specifications grouped first by name and then by + # version. + # + def to_hash + hash = {} + all_specs.each do |spec| + hash[spec.name] ||= {} + hash[spec.name][spec.version.version] = spec.to_hash + end + hash + end + + # @return [String] the YAML encoded {to_hash} representation. + # + def to_yaml + require 'yaml' + to_hash.to_yaml + end + + private + + # @group Private Helpers + #-------------------------------------------------------------------------# + + # Loads the specification for the given Pod gracefully. + # + # @param [String] name + # the name of the Pod. + # + # @return [Specification] The specification for the last version of the + # Pod. + # @return [Nil] If the spec could not be loaded. + # + def load_spec_gracefully(name) + versions = versions(name) + version = versions.sort.last if versions + specification(name, version) if version + rescue Informative + Pod::CoreUI.warn "Skipping `#{name}` because the podspec " \ + 'contains errors.' + nil + end + + def refresh_metadata + @metadata = Metadata.from_file(metadata_path) + end + + def git_commit_hash + repo_git(%w(rev-parse HEAD)) + end + + def update_git_repo(show_output = false) + repo_git(['checkout', git_tracking_branch]) + output = repo_git(%w(pull --ff-only), :include_error => true) + CoreUI.puts output if show_output + end + + def git_tracking_branch + path = repo.join('.git', 'cocoapods_branch') + path.file? ? path.read.strip : DEFAULT_SPECS_BRANCH + end + + def diff_until_commit_hash(commit_hash) + repo_git(%W(diff --name-only #{commit_hash}..HEAD)).split("\n") + end + + def repo_git(args, include_error: false) + command = "env -u GIT_CONFIG git -C \"#{repo}\" " << args.join(' ') + command << ' 2>&1' if include_error + (`#{command}` || '').strip + end + + def unchanged_github_repo? + return unless url =~ /github.com/ + !GitHub.modified_since_commit(url, git_commit_hash) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/acceptor.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/acceptor.rb new file mode 100644 index 0000000..fb05c9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/acceptor.rb @@ -0,0 +1,170 @@ +module Pod + class Source + # Checks whether a podspec can be accepted by a source. The check takes + # into account the introduction of 0.0.1 version if there are already + # tagged ones or whether there is change in the source. + # + class Acceptor + # @return [Source] the source where the podspec should be added. + # + attr_reader :source + + # @param [Pathname] repo @see Source#repo. + # + def initialize(repo) + @source = Source.new(repo) + end + + public + + # @!group Actions + #-----------------------------------------------------------------------# + + # Checks whether the given specification can be accepted. + # + # @return [Array] A list of errors. If the list is empty the + # specification should be accepted. + # + def analyze(spec, previous_spec = nil) + errors = [] + check_spec_source_change(spec, errors) + check_if_untagged_version_is_acceptable(spec, previous_spec, errors) + check_commit_change_for_untagged_version(spec, previous_spec, errors) + check_dependencies(spec, errors) + errors + end + + # Checks whether the specification at the given path can be accepted. + # + # @return [Array] A list of errors. If the list is empty the + # specification should be accepted. + # + def analyze_path(spec_path) + spec = Specification.from_file(spec_path) + analyze(spec) + rescue + ['Unable to load the specification.'] + end + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Checks whether the source of the proposed specification is different + # from the one of the reference specification. + # + # @note HTTP Sources are ignored as they change per version. + # + # @return [void] + # + def check_spec_source_change(spec, errors) + require 'cocoapods-core/http' + + return unless spec + return if spec.source[:http] + return unless reference_spec(spec) + keys = Spec::DSL::SOURCE_KEYS.keys + source = spec.source.values_at(*keys).compact.first + old_source = reference_spec(spec).source.values_at(*keys).compact.first + unless source == old_source + source = HTTP.get_actual_url(source) + old_source = HTTP.get_actual_url(old_source) + unless source == old_source + message = "The source of the spec doesn't match with the recorded " + message << "ones. Source: `#{source}`. Previous: `#{old_source}`.\n " + message << 'Please contact the specs repo maintainers if the ' + message << 'library changed location.' + errors << message + end + end + end + + # Checks there are already tagged specifications if the specification has + # a git source and doesn't specify a tag (i.e. rejects 0.0.1 specs if + # they are not admissible anymore). + # + # @return [void] + # + def check_if_untagged_version_is_acceptable(spec, previous_spec, errors) + return if !spec.source[:git] || spec.source[:tag] + return unless related_specifications(spec) + return if previous_spec + has_tagged_spec = related_specifications(spec).any? do |s| + s.version != '0.0.1' + end + if has_tagged_spec + errors << 'There is already at least one versioned specification ' \ + 'so untagged versions cannot be accepted.' + end + end + + # If the previous specification for the given file is passed it is + # checked for any attempt to update the commit of a 0.0.1 version. + # + # @return [void] + # + def check_commit_change_for_untagged_version(spec, previous_spec, errors) + return unless previous_spec + return unless spec.version == Version.new('0.0.1') + unless spec.source[:commit] == previous_spec.source[:commit] + errors << 'Attempt to rewrite the commit of a 0.0.1 version.' + end + end + + # Checks that there is a specification available for each of the + # dependencies of the given specification. + # + # @return [void] + # + def check_dependencies(spec, errors) + spec.dependencies.each do |dep| + set = source.search(dep) + unless set && set.specification + errors << "Unable to find a specification for the `#{dep}` " \ + 'dependency.' + end + end + end + + private + + # @!group Source helpers + #-----------------------------------------------------------------------# + + # Returns the specifications related to the given spec. + # + # @param [Specification] spec + # The specification for which the siblings specs are needed. + # + # @return [Array] The other specifications of the Pod. + # + # @return [Nil] If there are no other specifications stored. + # + def related_specifications(spec) + versions = source.versions(spec.name) + return unless versions + specs = versions.sort.map { |v| source.specification(spec.name, v) } + specs.delete(spec) + specs + end + + # Returns the most representative specification for the Pod of the given + # spec. + # + # @param [Specification] spec + # The specification for which the representative spec is needed. + # + # @return [Specification] The specification with the highest version. + # + # @return [Nil] If there are no other specifications stored. + # + def reference_spec(spec) + specs = related_specifications(spec) + specs.last if specs + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/aggregate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/aggregate.rb new file mode 100644 index 0000000..f78ae95 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/aggregate.rb @@ -0,0 +1,218 @@ +module Pod + class Source + # The Aggregate manages a directory of sources repositories. + # + class Aggregate + # @return [Array] The ordered list of sources. + # + attr_reader :sources + + # @param [Array] repos_dirs @see Sources + # + def initialize(sources) + raise "Cannot initialize an aggregate with a nil source: (#{sources})" if sources.include?(nil) + @sources = sources + end + + # @return [Array] the names of all the pods available. + # + def all_pods + sources.map(&:pods).flatten.uniq + end + + # @return [Array] The sets for all the pods available. + # + # @note Implementation detail: The sources don't cache their values + # because they might change in response to an update. Therefore + # this method to preserve performance caches the values before + # processing them. + # + def all_sets + pods_by_source = {} + sources.each do |source| + pods_by_source[source] = source.pods + end + pods = pods_by_source.values.flatten.uniq + + pods.map do |pod| + pod_sources = sources.select { |s| pods_by_source[s].include?(pod) } + pod_sources = pod_sources.compact + Specification::Set.new(pod, pod_sources) + end + end + + # Returns a set configured with the source which contains the highest + # version in the aggregate. + # + # @param [String] name + # The name of the Pod. + # + # @return [Set] The most representative set for the Pod with the given + # name. Returns nil if no representative source found containing a pod with given name. + # + def representative_set(name) + representative_source = nil + highest_version = nil + sources.each do |source| + source_versions = source.versions(name) + if source_versions + source_version = source_versions.first + if highest_version.nil? || (highest_version < source_version) + highest_version = source_version + representative_source = source + end + end + end + representative_source ? Specification::Set.new(name, representative_source) : nil + end + + public + + # @!group Search + #-----------------------------------------------------------------------# + + # @return [Set, nil] a set for a given dependency including all the + # {Source} that contain the Pod. If no sources containing the + # Pod where found it returns nil. + # + # @raise If no source including the set can be found. + # + # @see Source#search + # + def search(dependency) + found_sources = sources.select { |s| s.search(dependency) } + unless found_sources.empty? + Specification::Set.new(dependency.root_name, found_sources) + end + end + + # @return [Array] the sets that contain the search term. + # + # @raise If no source including the set can be found. + # + # @todo Clients should raise not this method. + # + # @see Source#search_by_name + # + def search_by_name(query, full_text_search = false) + pods_by_source = {} + result = [] + sources.each do |s| + source_pods = s.search_by_name(query, full_text_search) + pods_by_source[s] = source_pods.map(&:name) + end + + root_spec_names = pods_by_source.values.flatten.uniq + root_spec_names.each do |pod| + result_sources = sources.select do |source| + pods_by_source[source].include?(pod) + end + + result << Specification::Set.new(pod, result_sources) + end + + if result.empty? + extra = ', author, summary, or description' if full_text_search + raise Informative, 'Unable to find a pod with name' \ + "#{extra} matching `#{query}'" + end + result + end + + public + + # @!group Search Index + #-----------------------------------------------------------------------# + + # Generates from scratch the search data for given source. + # This operation can take a considerable amount of time + # (seconds) as it needs to evaluate the most representative podspec + # for each Pod. + # + # @param [Source] source + # The source from which a search index will be generated. + # + # @return [Hash{String=>Hash}] The search data for the source. + # + def generate_search_index_for_source(source) + generate_search_index_for_sets(source.pod_sets) + end + + # Generates from scratch the search data for changed specifications in given source. + # + # @param [Source] source + # The source from which a search index will be generated. + # @param [Array] spec_paths + # Array of file path names for corresponding changed specifications. + # + # @return [Hash{String=>Hash}] The search data for changed specifications. + # + def generate_search_index_for_changes_in_source(source, spec_paths) + pods = source.pods_for_specification_paths(spec_paths) + sets = pods.map do |pod| + Specification::Set.new(pod, source) + end + generate_search_index_for_sets(sets) + end + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Generates search data for given array of sets. + def generate_search_index_for_sets(sets) + result = {} + sets.each do |set| + word_list_from_set(set).each do |w| + (result[w] ||= []).push(set.name) + end + end + result + end + + # Returns the vocabulary extracted from the most representative + # specification of the set. Vocabulary contains words from following information: + # + # - version + # - summary + # - description + # - authors + # + # @param [Set] set + # The set for which the information is needed. + # + # @note If the specification can't load an empty array is returned and + # a warning is printed. + # + # @note For compatibility with non Ruby clients a strings are used + # instead of symbols for the keys. + # + # @return [Array] An array of words contained by the set's search related information. + # + def word_list_from_set(set) + spec = set.specification + word_list = [set.name.dup] + if spec.summary + word_list += spec.summary.split + end + if spec.description + word_list += spec.description.split + end + if spec.authors + spec.authors.each_pair do |k, v| + word_list += k.split if k + word_list += v.split if v + end + end + word_list.uniq + rescue + CoreUI.warn "Skipping `#{set.name}` because the podspec contains " \ + 'errors.' + [] + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/health_reporter.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/health_reporter.rb new file mode 100644 index 0000000..9c834ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/health_reporter.rb @@ -0,0 +1,192 @@ +module Pod + class Source + # Checks a source for errors and warnings. + # + class HealthReporter + # @return [Source] the source to check. + # + attr_reader :source + + # @param [Pathname] repo @see Source#repo. + # + def initialize(repo) + @source = Source.new(repo) + @errors = {} + @linter_results = {} + end + + public + + # @!group Configuration + #-----------------------------------------------------------------------# + + # Allows to specify an optional callback which is called before + # analysing every spec. Suitable for UI. + # + # @param [Proc] A callback which is called before checking any + # specification. It receives the name and the version of the + # spec. + # + # @return [void] + # + def pre_check(&block) + @pre_check_callback = block + end + + public + + # @!group Actions + #-----------------------------------------------------------------------# + + # Analyzes all the specification files in the source. + # + # @return [HealthReport] A report which contains the information about the + # state of the source. + # + def analyze + @report = HealthReport.new(source) + + source.pods.each do |name| + source.versions(name).each do |version| + @pre_check_callback.call(name, version) if @pre_check_callback + spec_path = source.specification_path(name, version) + spec = lint_spec(name, version, spec_path) + check_spec_path(name, version, spec) if spec + report.analyzed_paths << spec_path + end + end + + check_stray_specs + report + end + + # @return [HealtReport] The report produced by the analysis. + # + attr_reader :report + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Checks the validity of the specification with the linter. + # + # @param [String] name + # The name of the Pod. + # + # @param [Version] version + # The version of the specification. + # + # @param [Pathname] spec_path + # The path of the specification to check. + # + # @return [Specification] The specification loaded by the linter. + # @return [Nil] If the specifications raised during evaluation. + # + def lint_spec(name, version, spec_path) + linter = Specification::Linter.new(spec_path) + linter.lint + linter.results.each do |result| + next if result.public_only? + report.add_message(result.type, result.message, name, version) + end + linter.spec + end + + # Ensures that the name and the version of the specification correspond + # to the ones expected by the repo given its path. + # + # @param [String] name + # The name of the Pod. + # + # @param [Version] version + # The version of the specification. + # + # @param [Specification] spec + # The specification to check. + # + # @return [void] + # + def check_spec_path(name, version, spec) + unless spec.name == name && spec.version.to_s == version.to_s + message = "Incorrect path #{spec.defined_in_file}" + report.add_message(:error, message, name, spec.version) + end + end + + # Checks for any stray specification in the repo. + # + # @param [Array] analyzed_paths + # The specification to check. + # + # @return [void] + # + def check_stray_specs + all_paths = Pathname.glob(source.repo + '**/*.podspec{,.json}') + stray_specs = all_paths - report.analyzed_paths + stray_specs.each do |path| + report.add_message(:error, 'Stray spec', path) + end + end + + #-----------------------------------------------------------------------# + + # Encapsulates the information about the state of a repo. + # + class HealthReport + # @return [Source] the source analyzed. + # + attr_reader :source + + # @param [Source] @see source. + # + def initialize(source) + @source = source + @analyzed_paths = [] + @pods_by_error = {} + @pods_by_warning = {} + end + + # @return [Array] The list of the analyzed paths. + # + attr_accessor :analyzed_paths + + # @return [Hash{ String => Hash }] The pods (the version grouped by + # name) grouped by an error message. + # + attr_accessor :pods_by_error + + # @return [Hash{ String => Hash }] The pods (the version grouped by + # name) grouped by a warning message. + # + attr_accessor :pods_by_warning + + # Adds a message with the given type for the specification with the + # given name and version. + # + # @param [Symbol] type + # The type of message. Either `:error` or `:warning`. + # + # @param [String] message + # The contents of the message. + # + # @param [String] spec_name + # The name of the Pod. + # + # @param [String] spec_version + # The version of the specification. + # + # @return [void] + # + def add_message(type, message, spec_name, spec_version = nil) + pods = send(:"pods_by_#{type}") + pods[message] ||= {} + pods[message][spec_name] ||= [] + pods[message][spec_name] << spec_version + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/manager.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/manager.rb new file mode 100644 index 0000000..6fc2b06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/manager.rb @@ -0,0 +1,488 @@ +require 'public_suffix' + +module Pod + class Source + class Manager + # @return [Pathname] The directory that contains the source repo + # directories. + # + attr_reader :repos_dir + + def initialize(repos_dir) + @repos_dir = Pathname(repos_dir).expand_path + end + + # @return [Array] The source repo directories. + # + def source_repos + return [] unless repos_dir.exist? + repos_dir.children.select(&:directory?).sort_by { |d| d.basename.to_s.downcase } + end + + # @return [Source::Aggregate] The aggregate of all the sources with the + # known Pods. + # + def aggregate + aggregate_with_repos(source_repos) + end + + # @return [Source::Aggregate] The aggregate of the sources from repos. + # + # @param [Dependency] dependency + # The dependency for which to find or build the appropriate. + # aggregate. If the dependency specifies a source podspec repo + # then only that source will be used, otherwise all sources + # will be used. + # + def aggregate_for_dependency(dependency) + return aggregate if dependency.podspec_repo.nil? + + source = source_with_url(dependency.podspec_repo) || source_with_name(dependency.podspec_repo) + return aggregate if source.nil? + + aggregate_with_repos([source.repo]) + end + + # @return [Array] The list of the sources with the given names. + # + # @param [Array<#to_s>] names + # The names of the sources. + # + def sources(names) + dirs = names.map { |name| source_dir(name) } + dirs.map { |repo| source_from_path(repo) } + end + + # @return [Array] The list of all the sources known to this + # installation of CocoaPods. + # + def all + aggregate.sources + end + + # @return [Array] The list of all the non-indexable sources known to this + # installation of CocoaPods. + # + def all_non_indexable + aggregate.sources.reject(&:indexable?) + end + + # @return [Array] The CocoaPods Master Repo source. + # + def master + sources([Pod::TrunkSource::TRUNK_REPO_NAME]).select { |s| s.repo.directory? } + end + + # @!group Master repo + + # @return [Pathname] The path of the master repo. + # + def master_repo_dir + source_dir(Pod::TrunkSource::TRUNK_REPO_NAME) + end + + # @return [Bool] Checks if the master repo is usable. + # + # @note Note this is used to automatically setup the master repo if + # needed. + # + def master_repo_functional? + return false unless master_repo = master.first + master_repo.metadata.compatible?(CORE_VERSION) + end + + # Search the appropriate sources to match the set for the given dependency. + # + # @return [Set, nil] a set for a given dependency including all the + # {Source} that contain the Pod. If no sources containing the + # Pod where found it returns nil. + # + # @raise If no source can be found that includes the dependency. + # + def search(dependency) + aggregate_for_dependency(dependency).search(dependency) + end + + # Search all the sources with the given search term. + # + # @param [String] query + # The search term. + # + # @param [Bool] full_text_search + # Whether the search should be limited to the name of the Pod or + # should include also the author, the summary, and the + # description. + # + # @raise If no source including the set can be found. + # + # @return [Array] The sets that contain the search term. + # + def search_by_name(query, full_text_search = false) + query_word_regexps = query.split.map { |word| /#{word}/i } + if full_text_search + query_word_results_hash = {} + updated_search_index.each_value do |word_spec_hash| + word_spec_hash.each_pair do |word, spec_names| + query_word_regexps.each do |query_word_regexp| + set = (query_word_results_hash[query_word_regexp] ||= Set.new) + set.merge(spec_names) if word =~ query_word_regexp + end + end + end + found_set_names = query_word_results_hash.values.reduce(:&) + found_set_names ||= [] + + sets_from_non_indexable = all_non_indexable.map { |s| s.search_by_name(query, true) }.flatten + + found_set_names += sets_from_non_indexable.map(&:name).flatten.uniq + + sets = found_set_names.map do |name| + aggregate.representative_set(name) + end + + # Remove nil values because representative_set return nil if no pod is found in any of the sources. + sets.compact! + else + sets = aggregate.search_by_name(query, false) + end + if sets.empty? + extra = ', author, summary, or description' if full_text_search + raise Informative, "Unable to find a pod with name#{extra} " \ + "matching `#{query}`" + end + sorted_sets(sets, query_word_regexps) + end + + # Returns given set array by sorting it in-place. + # + # @param [Array] sets + # Array of sets to be sorted. + # + # @param [Array] query_word_regexps + # Array of regexp objects for user query. + # + # @return [Array] Given sets parameter itself after sorting it in-place. + # + def sorted_sets(sets, query_word_regexps) + sets.sort_by! do |set| + pre_match_length = nil + found_query_index = nil + found_query_count = 0 + query_word_regexps.each_with_index do |q, idx| + if (m = set.name.match(/#{q}/i)) + pre_match_length ||= m.pre_match.length + found_query_index ||= idx + found_query_count += 1 + end + end + pre_match_length ||= 1000 + found_query_index ||= 1000 + [-found_query_count, pre_match_length, found_query_index, set.name.downcase] + end + sets + end + + # Returns the search data. If a saved search data exists, retrieves it from file and returns it. + # Else, creates the search data from scratch, saves it to file system, and returns it. + # Search data is grouped by source repos. For each source, it contains a hash where keys are words + # and values are the pod names containing corresponding word. + # + # For each source, list of unique words are generated from the following spec information. + # - version + # - summary + # - description + # - authors + # + # @return [Hash{String => Hash{String => Array}}] The up to date search data. + # + def updated_search_index + index = stored_search_index || {} + indexable_sources.each do |source| + source_name = source.name + unless index[source_name] + CoreUI.print "Creating search index for spec repo '#{source_name}'.." + index[source_name] = aggregate.generate_search_index_for_source(source) + CoreUI.puts ' Done!' + end + end + save_search_index(index) + index + end + + # Updates the stored search index if there are changes in spec repos while updating them. + # Update is performed incrementally. Only the changed pods' search data is re-generated and updated. + # @param [Hash{Source => Array}] changed_spec_paths + # A hash containing changed specification paths for each source. + # + def update_search_index_if_needed(changed_spec_paths) + search_index = stored_search_index + return unless search_index + changed_spec_paths.each_pair do |source, spec_paths| + next unless source.indexable? + index_for_source = search_index[source.name] + next unless index_for_source && !spec_paths.empty? + updated_pods = source.pods_for_specification_paths(spec_paths) + + new_index = aggregate.generate_search_index_for_changes_in_source(source, spec_paths) + # First traverse search_index and update existing words + # Remove traversed words from new_index after adding to search_index, + # so that only non existing words will remain in new_index after enumeration completes. + index_for_source.each_pair do |word, _| + if new_index[word] + index_for_source[word] |= new_index[word] + new_index.delete(word) + else + index_for_source[word] -= updated_pods + end + end + + # Now add non existing words remained in new_index to search_index + index_for_source.merge!(new_index) + end + save_search_index(search_index) + end + + # Updates search index for changed pods in background + # @param [Hash{Source => Array}] changed_spec_paths + # A hash containing changed specification paths for each source. + # + def update_search_index_if_needed_in_background(changed_spec_paths) + if Gem.win_platform? + update_search_index_if_needed(changed_spec_paths) + return + end + Process.fork do + Process.daemon + update_search_index_if_needed(changed_spec_paths) + exit + end + end + + # Returns the search data stored in the file system. + # If existing data in the file system is not valid, returns nil. + # + def stored_search_index + @updated_search_index ||= begin + if search_index_path.exist? + require 'json' + begin + index = JSON.parse(search_index_path.read) + unless index # JSON.parse("null") => nil + search_index_path.delete + return nil + end + + index if index.is_a?(Hash) # TODO: should we also check if hash has correct hierarchy? + rescue JSON::ParserError + search_index_path.delete + nil + end + end + end + end + + # Stores given search data in the file system. + # @param [Hash] index + # Index to be saved in file system + # + def save_search_index(index) + require 'json' + @updated_search_index = index + search_index_path.open('w') do |io| + io.write(@updated_search_index.to_json) + end + end + + # Allows to clear the search index. + # + attr_writer :updated_search_index + + # @return [Pathname] The path where the search index should be stored. + # + attr_accessor :search_index_path + + private + + # @return [Source] The Source at a given path. + # + # @param [Pathname] path + # The local file path to one podspec repo. + # + def source_from_path(path) + @sources_by_path ||= Hash.new do |hash, key| + hash[key] = case + when key.basename.to_s == Pod::TrunkSource::TRUNK_REPO_NAME + TrunkSource.new(key) + when (key + '.url').exist? + CDNSource.new(key) + else + Source.new(key) + end + end + @sources_by_path[path] + end + + # @return [Source::Aggregate] The aggregate of the sources from repos. + # + # @param [Array] repos + # The local file paths to one or more podspec repo caches. + # + def aggregate_with_repos(repos) + sources = repos.map { |path| source_from_path(path) } + @aggregates_by_repos ||= {} + @aggregates_by_repos[repos] ||= Source::Aggregate.new(sources) + end + + # @return [Source] The source with the given name. + # + # @param [String] name + # The name of the source. + # + def source_with_name(name) + source = sources([name]).first + return nil unless source.repo.exist? + source + end + + # @return [Source] The updateable source with the given name. If no updateable source + # with given name is found it raises. + # + # @param [String] name + # The name of the source. + # + def updateable_source_named(name) + specified_source = source_with_name(name) + unless specified_source + raise Informative, "Unable to find the `#{name}` repo." + end + unless specified_source.updateable? + raise Informative, "The `#{name}` repo is not a updateable repo." + end + specified_source + end + + # @return [Source] The list of the updateable sources. + # + def updateable_sources + all.select(&:updateable?) + end + + # @return [Source] The list of the indexable sources. + # + def indexable_sources + all.select(&:indexable?) + end + + # @return [Pathname] The path of the source with the given name. + # + # @param [String] name + # The name of the source. + # + def source_dir(name) + repos_dir + name + end + + # @return [Source] The source whose {Source#url} is equal to `url`. + # + # @param [String] url + # The URL of the source. + # + def source_with_url(url) + url = canonic_url(url) + url = 'https://github.com/cocoapods/specs' if url =~ %r{github.com[:/]+cocoapods/specs} + all.find do |source| + source.url && canonic_url(source.url) == url + end + end + + def canonic_url(url) + url.downcase.gsub(/\.git$/, '').gsub(%r{\/$}, '') + end + + # Returns a suitable repository name for `url`. + # + # @example A GitHub.com URL + # + # name_for_url('https://github.com/Artsy/Specs.git') + # # "artsy" + # name_for_url('https://github.com/Artsy/Specs.git') + # # "artsy-1" + # + # @example A non-Github.com URL + # + # name_for_url('https://sourceforge.org/Artsy/Specs.git') + # # sourceforge-artsy + # + # @example A file URL + # + # name_for_url('file:///Artsy/Specs.git') + # # artsy + # + # @param [#to_s] url + # The URL of the source. + # + # @return [String] A suitable repository name for `url`. + # + def name_for_url(url) + base_from_host_and_path = lambda do |host, path| + if host && !host.empty? + domain = PublicSuffix.parse(host) rescue nil + base = [domain&.sld || host] + base = [] if base == %w(github) + else + base = [] + end + + path = path.gsub(/.git$/, '').gsub(%r{^/}, '').split('/') + path.pop if path.last == 'specs' + + (base + path).join('-') + end + + valid_url = lambda do |url| + url =~ URI.regexp && (URI(url) rescue false) + end + + valid_scp_url = lambda do |url| + valid_url['scp://' + url] + end + + url = url.to_s.downcase + + case url + when %r{https://#{Regexp.quote(trunk_repo_hostname)}}i + # Main CDN repo + base = Pod::TrunkSource::TRUNK_REPO_NAME + when valid_url + # HTTPS URL or something similar + url = valid_url[url] + base = base_from_host_and_path[url.host, url.path] + when valid_scp_url + # SCP-style URLs for private git repos + url = valid_scp_url[url] + base = base_from_host_and_path[url.host, url.path] + when %r{(?:git|ssh|https?|[a-z0-9_-]+@([-\w.]+)):(\/\/)?(.*?)(\.git)?(\/?|\#[-\d\w._]+?)$}i + # Additional SCP-style URLs for private git repos + host, _, path = Regexp.last_match.captures + base = base_from_host_and_path[host, path] + else + # This is nearly impossible, with all the previous cases + raise Informative, "Couldn't determine repo name for URL: #{url}" + end + + name = base + (1..).each do |i| + break unless source_dir(name).exist? + name = "#{base}-#{i}" + end + name + end + + # Returns hostname for for `trunk` URL. + # + def trunk_repo_hostname + URI.parse(TrunkSource::TRUNK_REPO_URL).host.downcase.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/metadata.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/metadata.rb new file mode 100644 index 0000000..3180e00 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/source/metadata.rb @@ -0,0 +1,79 @@ +autoload :Digest, 'digest/md5' +require 'active_support/hash_with_indifferent_access' +require 'active_support/core_ext/hash/indifferent_access' + +module Pod + class Source + class Metadata + attr_reader :minimum_cocoapods_version + attr_reader :maximum_cocoapods_version + attr_reader :latest_cocoapods_version + attr_reader :prefix_lengths + attr_reader :last_compatible_versions + + def initialize(hash = {}) + hash = hash.with_indifferent_access + @minimum_cocoapods_version = hash['min'] + @minimum_cocoapods_version &&= Pod::Version.new(@minimum_cocoapods_version) + @maximum_cocoapods_version = hash['max'] + @maximum_cocoapods_version &&= Pod::Version.new(@maximum_cocoapods_version) + @latest_cocoapods_version = hash['last'] + @latest_cocoapods_version &&= Pod::Version.new(@latest_cocoapods_version) + @prefix_lengths = Array(hash['prefix_lengths']).map!(&:to_i) + @last_compatible_versions = Array(hash['last_compatible_versions']).map(&Pod::Version.method(:new)).sort + end + + def self.from_file(file) + hash = file.file? ? YAMLHelper.load_file(file) : {} + new(hash) + end + + def to_hash + hash = ActiveSupport::HashWithIndifferentAccess.new + hash['min'] = @minimum_cocoapods_version.to_s if @minimum_cocoapods_version + hash['max'] = @maximum_cocoapods_version.to_s if @maximum_cocoapods_version + hash['last'] = @latest_cocoapods_version.to_s if @latest_cocoapods_version + hash['prefix_lengths'] = @prefix_lengths unless @prefix_lengths.empty? + hash['last_compatible_versions'] = @last_compatible_versions.map(&:to_s) unless @last_compatible_versions.empty? + hash + end + + def path_fragment(pod_name, version = nil) + prefixes = if prefix_lengths.empty? + [] + else + hashed = Digest::MD5.hexdigest(pod_name) + prefix_lengths.map do |length| + hashed.slice!(0, length) + end + end + prefixes.concat([pod_name, version]).compact + end + + def last_compatible_version(target_version) + return unless minimum_cocoapods_version + return if minimum_cocoapods_version <= target_version + @last_compatible_versions.reverse.bsearch { |v| v <= target_version }.tap do |version| + raise Informative, 'Unable to find compatible version' unless version + end + end + + # Returns whether a source is compatible with the current version of + # CocoaPods. + # + # @param [Pathname] dir + # The directory where the source is stored. + # + # @return [Bool] whether the source is compatible. + # + def compatible?(version) + bin_version = Gem::Version.new(version) + supports_min = !minimum_cocoapods_version || + (bin_version >= Gem::Version.new(minimum_cocoapods_version)) + supports_max = !maximum_cocoapods_version || + (bin_version <= Gem::Version.new(maximum_cocoapods_version)) + supports_min && supports_max + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification.rb new file mode 100644 index 0000000..cca80bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification.rb @@ -0,0 +1,843 @@ +require 'active_support/core_ext/string/strip.rb' + +require 'cocoapods-core/specification/consumer' +require 'cocoapods-core/specification/dsl' +require 'cocoapods-core/specification/linter' +require 'cocoapods-core/specification/root_attribute_accessors' +require 'cocoapods-core/specification/set' +require 'cocoapods-core/specification/json' + +module Pod + # The Specification provides a DSL to describe a Pod. A pod is defined as a + # library originating from a source. A specification can support detailed + # attributes for modules of code through subspecs. + # + # Usually it is stored in files with `podspec` extension. + # + class Specification + include Pod::Specification::DSL + include Pod::Specification::DSL::Deprecations + include Pod::Specification::RootAttributesAccessors + include Pod::Specification::JSONSupport + + # @return [Specification] the parent of the specification unless the + # specification is a root. + # + attr_reader :parent + + # @return [Integer] the cached hash value for this spec. + # + attr_reader :hash_value + + # @param [Specification] parent @see parent + # + # @param [String] name + # the name of the specification. + # + # @param [Bool] test_specification + # Whether the specification is a test specification + # + # @param [Bool] app_specification + # Whether the specification is an app specification + # + def initialize(parent = nil, name = nil, test_specification = false, app_specification: false) + raise StandardError, "#{self} can not be both an app and test specification." if test_specification && app_specification + @attributes_hash = {} + @subspecs = [] + @consumers = {} + @parent = parent + @hash_value = nil + @test_specification = test_specification + @app_specification = app_specification + attributes_hash['name'] = name + attributes_hash['test_type'] = :unit if test_specification + + yield self if block_given? + end + + def initialize_copy(other) + super + + @subspecs = @subspecs.map do |subspec| + subspec = subspec.dup + subspec.instance_variable_set :@parent, self + subspec + end + end + + # @return [Hash] the hash that stores the information of the attributes of + # the specification. + # + attr_accessor :attributes_hash + + # @return [Array] The subspecs of the specification. + # + attr_accessor :subspecs + + # @return [Bool] If this specification is a test specification. + # + attr_accessor :test_specification + alias_method :test_specification?, :test_specification + + # @return [Bool] If this specification is an app specification. + # + attr_accessor :app_specification + alias_method :app_specification?, :app_specification + + # Checks if a specification is equal to the given one according its name + # and to its version. + # + # @param [Specification] other + # the specification to compare with. + # + # @todo Not sure if comparing only the name and the version is the way to + # go. This is used by the installer to group specifications by root + # spec. + # + # @return [Bool] Whether the specifications are equal. + # + def ==(other) + other.is_a?(self.class) && + name == other.name && + version == other.version + end + + alias_method :eql?, :== + + # Return the hash value for this specification according to its attributes + # hash. + # + # @note This function must have the property that a.eql?(b) implies + # a.hash == b.hash. + # + # @note This method is used by the Hash class. + # + # @return [Fixnum] The hash value. + # + def hash + if @hash_value.nil? + @hash_value = (name.hash * 53) ^ version.hash + end + @hash_value + end + + # @return [String] A string suitable for representing the specification in + # clients. + # + def to_s + specified_version = raw_version || '' + if name && !specified_version.empty? + "#{name} (#{specified_version})" + elsif name + name + else + 'No-name' + end + end + + # @return [String] A string suitable for debugging. + # + def inspect + "#<#{self.class.name} name=#{name.inspect}>" + end + + # @param [String] string_representation + # the string that describes a {Specification} generated from + # {Specification#to_s}. + # + # @example Input examples + # + # "libPusher (1.0)" + # "RestKit/JSON (1.0)" + # + # @return [Array] the name and the version of a + # pod. + # + def self.name_and_version_from_string(string_representation) + match_data = string_representation.match(/\A((?:\s?[^\s(])+)(?: \((.+)\))?\Z/) + unless match_data + raise Informative, 'Invalid string representation for a ' \ + "specification: `#{string_representation}`. " \ + 'The string representation should include the name and ' \ + 'optionally the version of the Pod.' + end + name = match_data[1] + vers = Version.new(match_data[2]) + [name, vers] + end + + # Returns the root name of a specification. + # + # @param [String] the name of a specification or of a subspec. + # + # @return [String] the root name + # + def self.root_name(full_name) + full_name.split('/', 2).first + end + + # Returns the module name of a specification + # + # @return [String] the module name + # + def module_name + attributes_hash['module_name'] || + c99ext_identifier(attributes_hash['header_dir']) || + c99ext_identifier(attributes_hash['name']) + end + + private + + # Transforms the given string into a valid +identifier+ after C99ext + # standard, so that it can be used in source code where escaping of + # ambiguous characters is not applicable. + # + # @param [String] name + # any name, which may contain leading numbers, spaces or invalid + # characters. + # + # @return [String] + # + def c99ext_identifier(name) + return nil if name.nil? + I18n.transliterate(name).gsub(/^([0-9])/, '_\1'). + gsub(/[^a-zA-Z0-9_]/, '_').gsub(/_+/, '_') + end + + # @return [Object, Nil] + # the raw value specified for the version attribute, or nil + # + def raw_version + root.attributes_hash['version'] + end + + #-------------------------------------------------------------------------# + + public + + # @!group Hierarchy + + # @return [Specification] The root specification or itself if it is root. + # + def root + parent ? parent.root : self + end + + # @return [Bool] whether the specification is root. + # + def root? + parent.nil? + end + + # @return [Bool] whether the specification is a subspec. + # + def subspec? + !parent.nil? + end + + #-------------------------------------------------------------------------# + + public + + # @return [Symbol] Spec type of the current spec. + # + # @note see Attribute#SUPPORTED_SPEC_TYPES for the list of available spec_types. + # + def spec_type + return :app if app_specification? + return :test if test_specification? + + :library + end + + # @!group Dependencies & Subspecs + + # @return [Bool] If this specification is a library specification. + # + # @note a library specification is a specification that is not of type app or test. + # + def library_specification? + !app_specification? && !test_specification? + end + + # @return [Bool] If this specification is not a library specification. + # + # @note see #library_specification? + # + def non_library_specification? + !library_specification? + end + + # @return [Symbol] the test type supported if this is a test specification. + # + def test_type + attributes_hash['test_type'].to_sym + end + + # @return [Array] the list of all the test subspecs of + # a specification. + # + def test_specs + subspecs.select(&:test_specification?) + end + + # @return [Array] the list of all the app subspecs of + # a specification. + # + def app_specs + subspecs.select(&:app_specification?) + end + + # @return [Array] the list of all the non libary (app or test) subspecs of + # a specification. + # + def non_library_specs + subspecs.select(&:non_library_specification?) + end + + # @return [Array] the recursive list of all the subspecs of + # a specification. + # + def recursive_subspecs + mapper = lambda do |spec| + spec.subspecs.map do |subspec| + [subspec, *mapper.call(subspec)] + end.flatten + end + mapper.call(self) + end + + # Returns the subspec with the given name or the receiver if the name is + # nil or equal to the name of the receiver. + # + # @param [String] relative_name + # the relative name of the subspecs starting from the receiver + # including the name of the receiver. + # + # @param [Boolean] raise_if_missing + # whether an exception should be raised if no specification named + # `relative_name` is found. + # + # @example Retrieving a subspec + # + # s.subspec_by_name('Pod/subspec').name #=> 'subspec' + # + # @return [Specification] the subspec with the given name or self. + # + def subspec_by_name(relative_name, raise_if_missing = true, include_non_library_specifications = false) + if relative_name.nil? || relative_name == base_name + self + elsif relative_name.downcase == base_name.downcase + raise Informative, "Trying to access a `#{relative_name}` " \ + "specification from `#{base_name}`, which has a different case." + else + remainder = relative_name[base_name.size + 1..-1] + subspec_name = remainder.split('/').shift + subspec = subspecs.find { |s| s.base_name == subspec_name && (include_non_library_specifications || !s.non_library_specification?) } + unless subspec + if raise_if_missing + raise Informative, 'Unable to find a specification named ' \ + "`#{relative_name}` in `#{name} (#{version})`." + else + return nil + end + end + subspec.subspec_by_name(remainder, raise_if_missing, include_non_library_specifications) + end + end + + # @return [Array, Symbol] the name(s) of the default subspecs if provided or :none for no default subspecs. + # + def default_subspecs + # TODO: remove singular form and update the JSON specs. + value = Array(attributes_hash['default_subspecs'] || attributes_hash['default_subspec']) + first = value.first + if first == :none || first == 'none' + first.to_sym + else + value + end + end + + # Returns the dependencies on subspecs. + # + # @note A specification has a dependency on either the + # {#default_subspecs} or each of its children subspecs that are + # compatible with its platform. + # + # @param [Platform] platform + # return only dependencies supported on the given platform. + # + # @return [Array] the dependencies on subspecs. + # + def subspec_dependencies(platform = nil) + specs = if default_subspecs.empty? + subspecs.compact.reject(&:non_library_specification?) + elsif default_subspecs == :none + [] + else + default_subspecs.map do |subspec_name| + root.subspec_by_name("#{name}/#{subspec_name}") + end + end + if platform + specs = specs.select { |s| s.supported_on_platform?(platform) } + end + specs.map { |s| Dependency.new(s.name, version) } + end + + # Returns the dependencies on other Pods or subspecs of other Pods. + # + # @param [Platform] platform + # return only dependencies supported on the given platform. + # + # @note External dependencies are inherited by subspecs + # + # @return [Array] the dependencies on other Pods. + # + def dependencies(platform = nil) + if platform + consumer(platform).dependencies || [] + else + available_platforms.map do |spec_platform| + consumer(spec_platform).dependencies + end.flatten.uniq + end + end + + # @return [Array] all the dependencies of the specification. + # + def all_dependencies(platform = nil) + dependencies(platform) + subspec_dependencies(platform) + end + + # Returns whether a dependency is whitelisted for the given configuration. + # + # @param [Pod::Dependency] dependency + # the dependency verify. + # + # @param [Symbol, String] configuration + # the configuration to check against. + # + # @return [Bool] whether the dependency is whitelisted or not. + # + def dependency_whitelisted_for_configuration?(dependency, configuration) + inherited = -> { root? ? true : parent.dependency_whitelisted_for_configuration?(dependency, configuration) } + + return inherited[] unless configuration_whitelist = attributes_hash['configuration_pod_whitelist'] + return inherited[] unless whitelist_for_pod = configuration_whitelist[dependency.name] + + whitelist_for_pod.include?(configuration.to_s.downcase) + end + + # Returns a consumer to access the multi-platform attributes. + # + # @param [String, Symbol, Platform] platform + # the platform of the consumer + # + # @return [Specification::Consumer] the consumer for the given platform + # + def consumer(platform) + platform = platform.to_sym + @consumers[platform] ||= Consumer.new(self, platform) + end + + # @return [Bool, String] The prefix_header_file value. + # + def prefix_header_file + attributes_hash['prefix_header_file'] + end + + # @return [ArrayString}>] The script_phases value. + # + def script_phases + script_phases = attributes_hash['script_phases'] || [] + script_phases.map do |script_phase| + phase = Specification.convert_keys_to_symbol(script_phase) + phase[:execution_position] = if phase.key?(:execution_position) + phase[:execution_position].to_sym + else + :any + end + phase + end + end + + # @return [Hash] The on demand resources value. + # + def on_demand_resources + attributes_hash['on_demand_resources'] || {} + end + + # @return [Hash] The scheme value. + # + def scheme + value = attributes_hash['scheme'] || {} + Specification.convert_keys_to_symbol(value, :recursive => false) + end + + # @return [Hash] The Info.plist value. + # + def info_plist + attributes_hash['info_plist'] || {} + end + + #-------------------------------------------------------------------------# + + public + + # @!group DSL helpers + + # @return [Bool] whether the specification should use a directory as its + # source. + # + def local? + return true if source[:path] + false + end + + # @return [Bool] whether the specification is supported in the given + # platform. + # + # @overload supported_on_platform?(platform) + # + # @param [Platform] platform + # the platform which is checked for support. + # + # @overload supported_on_platform?(symbolic_name, deployment_target) + # + # + def supported_on_platform?(*platform) + platform = Platform.new(*platform) + available_platforms.any? { |available| platform.supports?(available) } + end + + # @return [Array] The platforms that the Pod is supported on. + # + # @note If no platform is specified, this method returns all known + # platforms. + # + def available_platforms + names = supported_platform_names + names = PLATFORMS if names.empty? + names.map { |name| Platform.new(name, deployment_target(name)) } + end + + # Returns the deployment target for the specified platform. + # + # @param [String] platform_name + # the symbolic name of the platform. + # + # @return [String] the deployment target + # @return [Nil] if not deployment target was specified for the platform. + # + def deployment_target(platform_name) + result = platform_hash[platform_name.to_s] + result ||= parent.deployment_target(platform_name) if parent + result + end + + protected + + # @return [Array[Symbol]] the symbolic name of the platform in which the + # specification is supported. + # + # @return [Nil] if the specification is supported on all the known + # platforms. + # + def supported_platform_names + result = platform_hash.keys + if result.empty? && parent + result = parent.supported_platform_names + end + result + end + + # @return [Hash] the normalized hash which represents the platform + # information. + # + def platform_hash + case value = attributes_hash['platforms'] + when String + { value => nil } + when Array + result = {} + value.each do |a_value| + result[a_value] = nil + end + result + when Hash + value + else + {} + end + end + + public + + #-------------------------------------------------------------------------# + + # @!group DSL attribute writers + + # Sets the value for the attribute with the given name. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Object] value + # the value to store. + # + # @param [Symbol] platform_name + # If provided the attribute is stored only for the given platform. + # + # @note If the provides value is Hash the keys are converted to a string. + # + # @return void + # + def store_attribute(name, value, platform_name = nil) + name = name.to_s + value = Specification.convert_keys_to_string(value) if value.is_a?(Hash) + value = value.strip_heredoc.strip if value.respond_to?(:strip_heredoc) + if platform_name + platform_name = platform_name.to_s + attributes_hash[platform_name] ||= {} + attributes_hash[platform_name][name] = value + else + attributes_hash[name] = value + end + end + + # Defines the setters methods for the attributes providing support for the + # Ruby DSL. + # + DSL.attributes.values.each do |a| + define_method(a.writer_name) do |value| + store_attribute(a.name, value) + end + + if a.writer_singular_form + alias_method(a.writer_singular_form, a.writer_name) + end + end + + # Converts the keys of the given hash to a string. + # + # @param [Object] value + # the value that needs to be stripped from the Symbols. + # + # @param [Boolean] recursive + # whether to convert keys of nested hashes. + # + # @return [Hash] the hash with the keys as strings instead of symbols. + # + def self.convert_keys_to_string(value, recursive: true) + return unless value + result = {} + value.each do |key, subvalue| + subvalue = Specification.convert_keys_to_string(subvalue) if recursive && subvalue.is_a?(Hash) + result[key.to_s] = subvalue + end + result + end + + # Converts the keys of the given hash to a symbol. + # + # @param [Object] value + # the value that needs to be stripped from the Strings. + # + # @param [Boolean] recursive + # whether to convert keys of nested hashes. + # + # @return [Hash] the hash with the keys as symbols instead of strings. + # + def self.convert_keys_to_symbol(value, recursive: true) + return unless value + result = {} + value.each do |key, subvalue| + subvalue = Specification.convert_keys_to_symbol(subvalue) if recursive && subvalue.is_a?(Hash) + result[key.to_sym] = subvalue + end + result + end + + #-------------------------------------------------------------------------# + + public + + # @!group File representation + + # @return [String] The SHA1 digest of the file in which the specification + # is defined. + # + # @return [Nil] If the specification is not defined in a file. + # + def checksum + @checksum ||= begin + if root? + unless defined_in_file.nil? + require 'digest' + checksum = Digest::SHA1.hexdigest(File.read(defined_in_file)) + checksum = checksum.encode('UTF-8') if checksum.respond_to?(:encode) + checksum + end + else + root.checksum + end + end + end + + # @return [String] the path where the specification is defined, if loaded + # from a file. + # + def defined_in_file + root? ? @defined_in_file : root.defined_in_file + end + + # Loads a specification form the given path. + # + # @param [Pathname, String] path + # the path of the `podspec` file. + # + # @param [String] subspec_name + # the name of the specification that should be returned. If it is + # nil returns the root specification. + # + # @raise If the file doesn't return a Pods::Specification after + # evaluation. + # + # @return [Specification] the specification + # + def self.from_file(path, subspec_name = nil) + path = Pathname.new(path) + unless path.exist? + raise Informative, "No podspec exists at path `#{path}`." + end + + string = File.open(path, 'r:utf-8', &:read) + # Work around for Rubinius incomplete encoding in 1.9 mode + if string.respond_to?(:encoding) && string.encoding.name != 'UTF-8' + string.encode!('UTF-8') + end + + from_string(string, path, subspec_name) + end + + # Loads a specification with the given string. + # The specification is evaluated in the context of `path`. + # + # @param [String] spec_contents + # A string describing a specification. + # + # @param [Pathname, String] path @see from_file + # @param [String] subspec_name @see from_file + # + # @return [Specification] the specification + # + def self.from_string(spec_contents, path, subspec_name = nil) + path = Pathname.new(path).expand_path + spec = nil + case path.extname + when '.podspec' + Dir.chdir(path.parent.directory? ? path.parent : Dir.pwd) do + spec = ::Pod._eval_podspec(spec_contents, path) + unless spec.is_a?(Specification) + raise Informative, "Invalid podspec file at path `#{path}`." + end + end + when '.json' + spec = Specification.from_json(spec_contents) + else + raise Informative, "Unsupported specification format `#{path.extname}` for spec at `#{path}`." + end + + spec.defined_in_file = path + spec.subspec_by_name(subspec_name, true) + end + + # Sets the path of the `podspec` file used to load the specification. + # + # @param [String] file + # the `podspec` file. + # + # @return [void] + # + # @visibility private + # + def defined_in_file=(file) + unless root? + raise StandardError, 'Defined in file can be set only for root specs.' + end + @defined_in_file = file + end + + # Sets the name of the `podspec`. + # + # @param [String] name + # the `podspec` name. + # + # @return [void] + # + # @visibility private + # + def name=(name) + @hash_value = nil + attributes_hash['name'] = name + end + + # Sets the version of the `podspec`. + # + # @param [String] version + # the `podspec` version. + # + # @return [void] + # + # @visibility private + # + def version=(version) + @hash_value = nil + store_attribute(:version, version) + @version = nil + end + + # @!group Validation + + # Validates the cocoapods_version in the specification against the current version of Core. + # It will raise an Informative error if the version is not satisfied. + # + def validate_cocoapods_version + unless cocoapods_version.satisfied_by?(Version.create(CORE_VERSION)) + raise Informative, "`#{name}` requires CocoaPods version `#{cocoapods_version}`, " \ + "which is not satisfied by your current version, `#{CORE_VERSION}`." + end + end + end + + #---------------------------------------------------------------------------# + + # @visibility private + # + # Evaluates the given string in the namespace of the Pod module. + # + # @param [String] string + # The string containing the Ruby description of the Object to + # evaluate. + # + # @param [Pathname] path + # The path where the object to evaluate is stored. + # + # @return [Object] it can return any object but, is expected to be called on + # `podspec` files that should return a #{Specification}. + # + # + def self._eval_podspec(string, path) + # rubocop:disable Security/Eval + eval(string, nil, path.to_s) + # rubocop:enable Security/Eval + + # rubocop:disable Lint/RescueException + rescue Exception => e + # rubocop:enable Lint/RescueException + message = "Invalid `#{path.basename}` file: #{e.message}" + raise DSLError.new(message, path, e, string) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/consumer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/consumer.rb new file mode 100644 index 0000000..5d82818 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/consumer.rb @@ -0,0 +1,515 @@ +require 'cocoapods-core/specification/root_attribute_accessors' + +module Pod + class Specification + # Allows to conveniently access a Specification programmatically. + # + # It takes care of: + # + # - standardizing the attributes + # - handling multi-platform values + # - handle default values + # - handle automatic container wrapping of values + # - handle inherited values + # + # This class allows to store the values of the attributes in the + # Specification as specified in the DSL. The benefits is reduced reliance + # on meta programming to access the attributes and the possibility of + # serializing a specification back exactly as defined in a file. + # + class Consumer + # @return [Specification] The specification to consume. + # + attr_reader :spec + + # @return [Symbol] The name of the platform for which the specification + # needs to be consumed. + # + attr_reader :platform_name + + # @param [Specification] spec @see spec + # @param [Symbol, Platform] platform + # The platform for which the specification needs to be consumed. + # + def initialize(spec, platform) + @spec = spec + @platform_name = platform.is_a?(Symbol) ? platform : platform.name + + unless spec.supported_on_platform?(platform) + raise StandardError, "#{self} is not compatible with #{platform}." + end + end + + # Creates a method to access the contents of the attribute. + # + # @param [Symbol] name + # the name of the attribute. + # + # @macro [attach] + # @!method $1 + # + def self.spec_attr_accessor(name) + define_method(name) do + value_for_attribute(name) + end + end + + DSL::RootAttributesAccessors.instance_methods.each do |root_accessor| + define_method(root_accessor) do + spec.root.send(root_accessor) + end + end + + #-----------------------------------------------------------------------# + + # @!group Regular attributes + + # @return [String] The name of the specification. + # + spec_attr_accessor :name + + # @return [Bool] Whether the source files of the specification require to + # be compiled with ARC. + # + spec_attr_accessor :requires_arc + alias_method :requires_arc?, :requires_arc + + # @return [Array] A list of frameworks that the user’s target + # needs to link against + # + spec_attr_accessor :frameworks + + # @return [Array] A list of frameworks that the user’s target + # needs to **weakly** link against + # + spec_attr_accessor :weak_frameworks + + # @return [Array] A list of libraries that the user’s target + # needs to link against + # + spec_attr_accessor :libraries + + # @return [Array] the list of compiler flags needed by the + # specification files. + # + spec_attr_accessor :compiler_flags + + # @return [Hash{String => String}] the xcconfig flags for the current + # specification for the pod target. + # + def pod_target_xcconfig + attr = Specification::DSL.attributes[:pod_target_xcconfig] + merge_values(attr, value_for_attribute(:xcconfig), value_for_attribute(:pod_target_xcconfig)) + end + + # @return [Hash{String => String}] the xcconfig flags for the current + # specification for the user target. + # + def user_target_xcconfig + attr = Specification::DSL.attributes[:user_target_xcconfig] + merge_values(attr, value_for_attribute(:xcconfig), value_for_attribute(:user_target_xcconfig)) + end + + # @return [Hash{String => String}] the Info.plist values for the current specification + # + spec_attr_accessor :info_plist + + # @return [String] The contents of the prefix header. + # + spec_attr_accessor :prefix_header_contents + + # @return [String] The path of the prefix header file. + # + spec_attr_accessor :prefix_header_file + + # @return [String] the module name. + # + spec_attr_accessor :module_name + + # @return [String] the path of the module map file. + # + spec_attr_accessor :module_map + + # @return [String] the headers directory. + # + spec_attr_accessor :header_dir + + # @return [String] the directory from where to preserve the headers + # namespacing. + # + spec_attr_accessor :header_mappings_dir + + #-----------------------------------------------------------------------# + + # @!group Test Support + + # @return [Bool] Whether this test specification requires an app host. + # + spec_attr_accessor :requires_app_host + alias_method :requires_app_host?, :requires_app_host + + # @return [String] Name of the app host this spec requires + # + spec_attr_accessor :app_host_name + + # @return [Symbol] the test type supported by this specification. + # + spec_attr_accessor :test_type + + #-----------------------------------------------------------------------# + + # @!group File patterns + + # @return [Array] the source files of the Pod. + # + spec_attr_accessor :source_files + + # @return [Array] the public headers of the Pod. + # + spec_attr_accessor :public_header_files + + # @return [Array] the project headers of the Pod. + # + spec_attr_accessor :project_header_files + + # @return [Array] the private headers of the Pod. + # + spec_attr_accessor :private_header_files + + # @return [Array] The paths of the framework bundles shipped with + # the Pod. + # + spec_attr_accessor :vendored_frameworks + + # @return [Array] The paths of the libraries shipped with the + # Pod. + # + spec_attr_accessor :vendored_libraries + + # @return [Hash{String => Array}] hash where the keys are the tags of + # the on demand resources and the values are their relative file + # patterns. + # + spec_attr_accessor :on_demand_resources + + # @return [Hash{String=>String}]] hash where the keys are the names of + # the resource bundles and the values are their relative file + # patterns. + # + spec_attr_accessor :resource_bundles + + # @return [ArrayString}>] An array of hashes where each hash + # represents a script phase. + # + spec_attr_accessor :script_phases + + # @return [Hash] A hash that contains the scheme configuration. + # + spec_attr_accessor :scheme + + # @return [Array] A hash where the key represents the + # paths of the resources to copy and the values the paths of + # the resources that should be copied. + # + spec_attr_accessor :resources + + # @return [Array] The file patterns that the + # Pod should ignore. + # + spec_attr_accessor :exclude_files + + # @return [Array] The paths that should be not + # cleaned. + # + spec_attr_accessor :preserve_paths + + #-----------------------------------------------------------------------# + + # @return [Array] the dependencies on other Pods. + # + def dependencies + value = value_for_attribute(:dependencies) + value.map do |name, requirements| + Dependency.new(name, requirements) + end + end + + # Raw values need to be prepared as soon as they are read so they can be + # safely merged to support multi platform attributes and inheritance + #-----------------------------------------------------------------------# + + # Returns the value for the attribute with the given name for the + # specification. It takes into account inheritance, multi-platform + # attributes and default values. + # + # @param [Symbol] attr_name + # The name of the attribute. + # + # @return [String, Array, Hash] the value for the attribute. + # + def value_for_attribute(attr_name) + attr = Specification::DSL.attributes[attr_name] + value = value_with_inheritance(spec, attr) + value = attr.default(platform_name) if value.nil? + value = attr.container.new if value.nil? && attr.container + value + end + + # Returns the value of a given attribute taking into account inheritance. + # + # @param [Specification] the_spec + # the specification for which the value is needed. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @return [String, Array, Hash] the value for the attribute. + # + def value_with_inheritance(the_spec, attr) + value = raw_value_for_attribute(the_spec, attr) + if the_spec.root? || !attr.inherited? + return value + end + + parent_value = value_with_inheritance(the_spec.parent, attr) + merge_values(attr, parent_value, value) + end + + # Returns the value of a given attribute taking into account multi + # platform values. + # + # @param [Specification] the_spec + # the specification for which the value is needed. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @return [String, Array, Hash] The value for an attribute. + # + def raw_value_for_attribute(the_spec, attr) + value = the_spec.attributes_hash[attr.name.to_s] + value = prepare_value(attr, value) + + if attr.multi_platform? + if platform_hash = the_spec.attributes_hash[platform_name.to_s] + platform_value = platform_hash[attr.name.to_s] + platform_value = prepare_value(attr, platform_value) + value = merge_values(attr, value, platform_value) + end + end + value + end + + # Merges the values of an attribute, either because the attribute is + # multi platform or because it is inherited. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @param [String, Array, Hash] existing_value + # the current value (the value of the parent or non-multiplatform + # value). + # + # @param [String, Array, Hash] new_value + # the value to append (the value of the spec or the + # multi-platform value). + # + # @return [String, Array, Hash] The merged value. + # + def merge_values(attr, existing_value, new_value) + return existing_value if new_value.nil? + return new_value if existing_value.nil? + + if attr.types.include?(TrueClass) + new_value.nil? ? existing_value : new_value + elsif attr.container == Array + r = [*existing_value] + [*new_value] + r.compact + elsif attr.container == Hash + existing_value.merge(new_value) do |_, old, new| + merge_hash_value(attr, old, new) + end + else + new_value + end + end + + # Wraps a value in an Array if needed and calls the prepare hook to + # allow further customization of a value before storing it in the + # instance variable. + # + # @note Only array containers are wrapped. To automatically wrap + # values for attributes with hash containers a prepare hook + # should be used. + # + # @return [Object] the customized value of the original one if no + # prepare hook was defined. + # + def prepare_value(attr, value) + if attr.container == Array + value = if value.is_a?(Hash) + [value] + else + [*value].compact + end + end + + hook_name = prepare_hook_name(attr) + if self.respond_to?(hook_name, true) + send(hook_name, value) + else + value + end + end + + private + + # Merges two values in a hash together based on the needs of the attribute + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @param [Object] old the value from the original hash + # + # @param [Object] new the value from the new hash + # + # @return [Object] the merged value + # + def merge_hash_value(attr, old, new) + case attr.name + when :info_plist + new + when ->(name) { spec.non_library_specification? && [:pod_target_xcconfig, :user_target_xcconfig, :xcconfig].include?(name) } + new + else + if new.is_a?(Array) || old.is_a?(Array) + r = Array(old) + Array(new) + r.compact + else + old + ' ' + new + end + end + end + + # @!group Preparing Values + #-----------------------------------------------------------------------# + + # @return [String] the name of the prepare hook for this attribute. + # + # @note The hook is called after the value has been wrapped in an + # array (if needed according to the container) but before + # validation. + # + def prepare_hook_name(attr) + "_prepare_#{attr.name}" + end + + # Converts the prefix header to a string if specified as an array. + # + # @param [String, Array] value. + # The value of the attribute as specified by the user. + # + # @return [String] the prefix header. + # + def _prepare_prefix_header_contents(value) + if value + value = value.join("\n") if value.is_a?(Array) + value.strip_heredoc.chomp + end + end + + # Converts the test type value from a string to a symbol. + # + # @param [String, Symbol] value. + # The value of the test type attributed as specified by the user. + # + # @return [Symbol] the test type as a symbol. + # + def _prepare_test_type(value) + if value + value.to_sym + end + end + + # Converts the array of hashes (script phases) where keys are strings into symbols. + # + # @param [ArrayString}>] value. + # The value of the attribute as specified by the user. + # + # @return [ArrayString}>] the script phases array with symbols for each hash instead of strings. + # + def _prepare_script_phases(value) + if value + value.map do |script_phase| + if script_phase.is_a?(Hash) + phase = Specification.convert_keys_to_symbol(script_phase) + phase[:execution_position] = if phase.key?(:execution_position) + phase[:execution_position].to_sym + else + :any + end + phase + end + end.compact + end + end + + # Converts the a scheme where keys are strings into symbols. + # + # @param [Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the scheme with symbols as keys instead of strings or `nil` if the value is not a hash. + # + def _prepare_scheme(value) + Specification.convert_keys_to_symbol(value, :recursive => false) if value && value.is_a?(Hash) + end + + # Ensures that the file patterns of the on demand resources are contained in + # an array. + # + # @param [String, Array, Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the on demand resources. + # + def _prepare_on_demand_resources(value) + result = {} + if value + value.each do |key, patterns| + case patterns + when String, Array + result[key] = { :paths => [*patterns].compact, :category => :download_on_demand } + when Hash + patterns = Specification.convert_keys_to_symbol(patterns, :recursive => false) + result[key] = { :paths => [*patterns[:paths]].compact, :category => patterns.fetch(:category, :download_on_demand).to_sym } + else + raise StandardError, "Unknown on demand resource value type `#{patterns}`." + end + end + end + result + end + + # Ensures that the file patterns of the resource bundles are contained in + # an array. + # + # @param [String, Array, Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the resources. + # + def _prepare_resource_bundles(value) + result = {} + if value + value.each do |key, patterns| + result[key] = [*patterns].compact + end + end + result + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl.rb new file mode 100644 index 0000000..2e67f29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl.rb @@ -0,0 +1,1895 @@ +require 'cocoapods-core/specification/dsl/attribute_support' +require 'cocoapods-core/specification/dsl/attribute' +require 'cocoapods-core/specification/dsl/platform_proxy' + +module Pod + class Specification + #- NOTE ------------------------------------------------------------------# + # The order of the methods defined in this file and the order of the + # methods is relevant for the documentation generated on the + # CocoaPods/cocoapods.github.com repository. + #-------------------------------------------------------------------------# + + # A specification describes a version of Pod library. It includes details + # about where the source should be fetched from, what files to use, the + # build settings to apply, and other general metadata such as its name, + # version, and description. + # + # --- + # + # A stub specification file can be generated by the [pod spec + # create](http://guides.cocoapods.org/terminal/commands.html#pod_spec_create) command. + # + # --- + # + # The specification DSL provides great flexibility and dynamism. Moreover, + # the DSL adopts the + # [convention over configuration](http://en.wikipedia.org/wiki/Convention_over_configuration) + # and thus it can be very simple: + # + # Pod::Spec.new do |spec| + # spec.name = 'Reachability' + # spec.version = '3.1.0' + # spec.license = { :type => 'BSD' } + # spec.homepage = 'https://github.com/tonymillion/Reachability' + # spec.authors = { 'Tony Million' => 'tonymillion@gmail.com' } + # spec.summary = 'ARC and GCD Compatible Reachability Class for iOS and OS X.' + # spec.source = { :git => 'https://github.com/tonymillion/Reachability.git', :tag => 'v3.1.0' } + # spec.source_files = 'Reachability.{h,m}' + # spec.framework = 'SystemConfiguration' + # end + # + # Or it can be quite detailed: + # + # Pod::Spec.new do |spec| + # spec.name = 'Reachability' + # spec.version = '3.1.0' + # spec.license = { :type => 'BSD' } + # spec.homepage = 'https://github.com/tonymillion/Reachability' + # spec.authors = { 'Tony Million' => 'tonymillion@gmail.com' } + # spec.summary = 'ARC and GCD Compatible Reachability Class for iOS and OS X.' + # spec.source = { :git => 'https://github.com/tonymillion/Reachability.git', :tag => 'v3.1.0' } + # spec.module_name = 'Rich' + # spec.swift_version = '4.0' + # + # spec.ios.deployment_target = '9.0' + # spec.osx.deployment_target = '10.10' + # + # spec.source_files = 'Reachability/common/*.swift' + # spec.ios.source_files = 'Reachability/ios/*.swift', 'Reachability/extensions/*.swift' + # spec.osx.source_files = 'Reachability/osx/*.swift' + # + # spec.framework = 'SystemConfiguration' + # spec.ios.framework = 'UIKit' + # spec.osx.framework = 'AppKit' + # + # spec.dependency 'SomeOtherPod' + # end + # + module DSL + extend Pod::Specification::DSL::AttributeSupport + + # Deprecations must be required after include AttributeSupport + require 'cocoapods-core/specification/dsl/deprecations' + + #-----------------------------------------------------------------------# + + # @!group Root specification + # + # A ‘root’ specification stores the information about the specific + # version of a library. + # + # The attributes in this group can only be written to on the ‘root’ + # specification, **not** on the ‘sub-specifications’. + # + # --- + # + # The attributes listed in this group are the only one which are + # required by a podspec. + # + # The attributes of the other groups are offered to refine the podspec + # and follow a convention over configuration approach. A root + # specification can describe these attributes either directly of + # through ‘[sub-specifications](#subspec)’. + + #-----------------------------------------------------------------------# + + # @!method name=(name) + # + # The name of the Pod. + # + # @example + # + # spec.name = 'AFNetworking' + # + # @param [String] name + # the name of the pod. + # + attribute :name, + :required => true, + :inherited => false, + :multi_platform => false + + #------------------# + + # @!method version=(version) + # + # The version of the Pod. CocoaPods follows + # [semantic versioning](http://semver.org). + # + # @example + # + # spec.version = '0.0.1' + # + # @param [String] version + # the version of the Pod. + # + root_attribute :version, + :required => true + + #------------------# + + # @!method swift_versions=(version) + # + # The versions of Swift that the specification supports. A version of '4' will be treated as + # '4.0' by CocoaPods and not '4.1' or '4.2'. + # + # **Note** The Swift compiler mostly accepts major versions and sometimes will honor minor versions. + # While CocoaPods allows specifying a minor or patch version it might not be honored fully by the Swift compiler. + # + # @example + # + # spec.swift_versions = ['3.0'] + # + # @example + # + # spec.swift_versions = ['3.0', '4.0', '4.2'] + # + # @example + # + # spec.swift_version = '3.0' + # + # @example + # + # spec.swift_version = '3.0', '4.0' + # + # @param [String, Array] swift_versions + # + root_attribute :swift_versions, + :container => Array, + :singularize => true + + #-----------------------------------------------------------------------# + + # @!method cocoapods_version=(cocoapods_version) + # + # The version of CocoaPods that the specification supports. + # + # @example + # + # spec.cocoapods_version = '>= 0.36' + # + # @param [String] cocoapods_version + # the CocoaPods version that the specification supports. + # CocoaPods follows [semantic versioning](http://semver.org). + # + root_attribute :cocoapods_version + + #------------------# + + # @!method authors=(authors) + # + # The name and email addresses of the library maintainers, not the + # Podspec maintainer. + # + # @example + # + # spec.author = 'Darth Vader' + # + # @example + # + # spec.authors = 'Darth Vader', 'Wookiee' + # + # @example + # + # spec.authors = { 'Darth Vader' => 'darthvader@darkside.com', + # 'Wookiee' => 'wookiee@aggrrttaaggrrt.com' } + # + # @param [String, Hash{String=>String}] authors + # the list of the authors of the library and their emails. + # + root_attribute :authors, + :types => [String, Array, Hash], + :container => Hash, + :required => true, + :singularize => true + + #------------------# + + # @!method social_media_url=(social_media_url) + # + # The URL for the social media contact of the Pod, CocoaPods web + # services can use this. + # + # For example, the @CocoaPodsFeed notifications will include the + # Twitter handle (shortening the description) if the URL is relative to + # Twitter. This does **not** necessarily have to be a Twitter URL, but + # only those are included in the Twitter @CocoaPodsFeed notifications. + # + # @example + # + # spec.social_media_url = 'https://twitter.com/cocoapods' + # + # @example + # + # spec.social_media_url = 'https://groups.google.com/forum/#!forum/cocoapods' + # + # @param [String] social_media_url + # the social media URL. + # + root_attribute :social_media_url + + #------------------# + + # The keys accepted by the license attribute. + # + LICENSE_KEYS = [:type, :file, :text].freeze + + # @!method license=(license) + # + # The license of the Pod. + # + # --- + # + # Unless the source contains a file named `LICENSE.*` or `LICENCE.*`, + # the path of the license file **or** the integral text of the notice + # commonly used for the license type must be specified. + # If a license file is specified, it either must be without a file + # extensions or be one of `txt`, `md`, or `markdown`. + # + # This information is used by CocoaPods to generate acknowledgement + # files (markdown and plist) which can be used in the acknowledgements + # section of the final application. + # + # @example + # + # spec.license = 'MIT' + # + # @example + # + # spec.license = { :type => 'MIT', :file => 'MIT-LICENSE.txt' } + # + # @example + # + # spec.license = { :type => 'MIT', :text => <<-LICENSE + # Copyright 2012 + # Permission is granted to... + # LICENSE + # } + # + # @param [String] license + # The type of the license + # + # @overload license=(license) + # @param [String, Hash{Symbol=>String}] license + # @option license [String] :type license type + # @option license [String] :file file containing full license text. Supports txt, md, and markdown + # @option license [String] :text full license text + # + root_attribute :license, + :container => Hash, + :keys => LICENSE_KEYS, + :required => true + + #------------------# + + # @!method homepage=(homepage) + # + # The URL of the homepage of the Pod. + # + # @example + # + # spec.homepage = 'http://www.example.com' + # + # @param [String] homepage + # the URL of the homepage of the Pod. + # + root_attribute :homepage, + :required => true + + #------------------# + + # @!method readme=(readme) + # + # The URL for the README markdown file for this pod version. + # + # @example + # + # spec.readme = 'https://www.example.com/Pod-1.5-README.md' + # + # @param [String] readme + # the readme markdown URL. + # + root_attribute :readme + + #------------------# + + # @!method changelog=(changelog) + # + # The URL for the CHANGELOG markdown file for this pod version. + # + # @example + # + # spec.changelog = 'https://www.example.com/Pod-1.5-CHANGELOG.md' + # + # @param [String] changelog + # the changelog markdown URL. + # + root_attribute :changelog + + #------------------# + + # The keys accepted by the hash of the source attribute. + # + SOURCE_KEYS = { + :git => [:tag, :branch, :commit, :submodules].freeze, + :svn => [:folder, :tag, :revision].freeze, + :hg => [:revision].freeze, + :http => [:flatten, :type, :sha256, :sha1, :headers].freeze, + }.freeze + + # @!method source=(source) + # + # The location from where the library should be retrieved. + # + # @example Specifying a Git source with a tag. This is how most OSS Podspecs work. + # + # spec.source = { :git => 'https://github.com/AFNetworking/AFNetworking.git', + # :tag => spec.version.to_s } + # + # @example Using a tag prefixed with 'v' and submodules. + # + # spec.source = { :git => 'https://github.com/typhoon-framework/Typhoon.git', + # :tag => "v#{spec.version}", :submodules => true } + # + # @example Using Subversion with a tag. + # + # spec.source = { :svn => 'http://svn.code.sf.net/p/polyclipping/code', :tag => '4.8.8' } + # + # @example Using Mercurial with the same revision as the spec's semantic version string. + # + # spec.source = { :hg => 'https://bitbucket.org/dcutting/hyperbek', :revision => "#{s.version}" } + # + # @example Using HTTP to download a compressed file of the code. It supports zip, tgz, bz2, txz and tar. + # + # spec.source = { :http => 'http://dev.wechatapp.com/download/sdk/WeChat_SDK_iOS_en.zip' } + # + # @example Using HTTP to download a file using a hash to verify the download. It supports sha1 and sha256. + # + # spec.source = { :http => 'http://dev.wechatapp.com/download/sdk/WeChat_SDK_iOS_en.zip', + # :sha1 => '7e21857fe11a511f472cfd7cfa2d979bd7ab7d96' } + # + # + # @overload source=(git) + # @param [Hash] git + # @option git [String] :git git source URI + # @option git [String] :tag version tag + # @option git [Bool] :submodules Whether to checkout submodules + # @option git [String] :branch branch name + # @option git [String] :commit commit hash + # + # @overload source=(svn) + # @param [Hash] svn + # @option svn [String] :svn svn source URI + # @option svn [String] :tag version tag + # @option svn [String] :folder folder + # @option svn [String] :revision revision + # + # @overload source=(hg) + # @param [Hash] hg + # @option hg [String] :hg mercurial source URI + # @option hg [String] :revision revision + # + # @overload source=(http) + # @param [Hash] http + # @option http [String] :http compressed source URL + # @option http [String] :type file type. Supports zip, tgz, bz2, txz and tar + # @option http [String] :sha1 SHA hash. Supports SHA1 and SHA256 + # + root_attribute :source, + :container => Hash, + :keys => SOURCE_KEYS, + :required => true + + #------------------# + + # @!method summary=(summary) + # + # A short (maximum 140 characters) description of the Pod. + # + # --- + # + # The description should be short, yet informative. It represents the + # tag line of the Pod and there is no need to specify that a Pod is a + # library (they always are). + # + # The summary is expected to be properly capitalised and containing the + # correct punctuation. + # + # @example + # + # spec.summary = 'Computes the meaning of life.' + # + # @param [String] summary + # A short description of the Pod. + # + root_attribute :summary, + :required => true + + #------------------# + + # @!method description=(description) + # + # A description of the Pod more detailed than the summary. + # + # @example + # + # spec.description = <<-DESC + # Computes the meaning of life. + # Features: + # 1. Is self aware + # ... + # 42. Likes candies. + # DESC + # + # @param [String] description + # A longer description of the Pod. + # + root_attribute :description + + #------------------# + + # @!method screenshots=(screenshots) + # + # A list of URLs to images showcasing the Pod. Intended for UI oriented + # libraries. CocoaPods recommends the usage of the `gif` format. + # + # @example + # + # spec.screenshot = 'http://dl.dropbox.com/u/378729/MBProgressHUD/1.png' + # + # @example + # + # spec.screenshots = [ 'http://dl.dropbox.com/u/378729/MBProgressHUD/1.png', + # 'http://dl.dropbox.com/u/378729/MBProgressHUD/2.png' ] + # + # @param [String] screenshots + # An URL for the screenshot of the Pod. + # + root_attribute :screenshots, + :singularize => true, + :container => Array + + #------------------# + + # @!method documentation_url=(documentation_url) + # + # An optional URL for the documentation of the Pod which will be honoured by + # CocoaPods web properties. Leaving it blank will default to a CocoaDocs + # generated URL for your library. + # + # @example + # + # spec.documentation_url = 'http://www.example.com/docs.html' + # + # @param [String] documentation_url + # The link of the web documentation of the Pod. + # + root_attribute :documentation_url + + #------------------# + + # @!method prepare_command=(command) + # + # A bash script that will be executed after the Pod is downloaded. This + # command can be used to create, delete and modify any file downloaded + # and will be ran before any paths for other file attributes of the + # specification are collected. + # + # This command is executed before the Pod is cleaned and before the + # Pods project is created. The working directory is the root of the + # Pod. + # + # If the pod is installed with the `:path` option this command will not + # be executed. + # + # @example + # + # spec.prepare_command = 'ruby build_files.rb' + # + # @example + # + # spec.prepare_command = <<-CMD + # sed -i 's/MyNameSpacedHeader/Header/g' ./**/*.h + # sed -i 's/MyNameOtherSpacedHeader/OtherHeader/g' ./**/*.h + # CMD + # + # @param [String] command + # the prepare command of the pod. + # + root_attribute :prepare_command + + #------------------# + + # @!method static_framework=(flag) + # + # Indicates, that if use_frameworks! is specified, the + # pod should include a static library framework. + # + # @example + # + # spec.static_framework = true + # + # @param [Bool] flag + # Indicates, that if use_frameworks! is specified, the + # pod should include a static library framework. + # + root_attribute :static_framework, + :types => [TrueClass, FalseClass], + :default_value => false + + #------------------# + + # @!method deprecated=(flag) + # + # Whether the library has been deprecated. + # + # @example + # + # spec.deprecated = true + # + # @param [Bool] flag + # whether the library has been deprecated. + # + root_attribute :deprecated, + :types => [TrueClass, FalseClass], + :default_value => false + + # @!method deprecated_in_favor_of=(deprecated_in_favor_of) + # + # The name of the Pod that this one has been deprecated in favor of. + # + # @example + # + # spec.deprecated_in_favor_of = 'NewMoreAwesomePod' + # + # @param [String] deprecated_in_favor_of + # the name of the Pod that this one has been deprecated in + # favor of. + # + root_attribute :deprecated_in_favor_of + + #-----------------------------------------------------------------------# + + # @!group Platform + # + # A specification should indicate the platform and the correspondent + # deployment targets on which the library is supported. + # + # If not defined in a subspec the attributes of this group inherit the + # value of the parent. + + #-----------------------------------------------------------------------# + + # The names of the platforms supported by the specification class. + # + PLATFORMS = [:osx, :ios, :tvos, :watchos].freeze + + # @todo This currently is not used in the Ruby DSL. + # + attribute :platforms, + :container => Hash, + :keys => PLATFORMS, + :multi_platform => false, + :inherited => true + + # The platform on which this Pod is supported. Leaving this blank + # means the Pod is supported on all platforms. When supporting multiple + # platforms you should use deployment_target below instead. + # + # @example + # + # spec.platform = :osx, '10.8' + # + # @example + # + # spec.platform = :ios + # + # @example + # + # spec.platform = :osx + # + # @param [Array] args + # A tuple where the first value is the name of the platform, + # (either `:ios` or `:osx`) and the second is the deployment + # target. + # + def platform=(args) + name, deployment_target = args + name = :osx if name.to_s == 'macos' + attributes_hash['platforms'] = if name + { name.to_s => deployment_target } + else + {} + end + end + + #------------------# + + # The minimum deployment targets of the supported platforms. + # + # As opposed to the `platform` attribute, the `deployment_target` + # attribute allows to specify multiple platforms on which this pod + # is supported — specifying a different deployment target for each. + # + # @example + # + # spec.ios.deployment_target = '6.0' + # + # @example + # + # spec.osx.deployment_target = '10.8' + # + # @param [String] _args + # The deployment target of the platform. + # + def deployment_target=(*_args) + raise Informative, 'The deployment target can be declared only per ' \ + 'platform.' + end + + #-----------------------------------------------------------------------# + + # @!group Build settings + # + # In this group are listed the attributes related to the configuration + # of the build environment that should be used to build the library. + # + # If not defined in a subspec the attributes of this group inherit the + # value of the parent. + + #-----------------------------------------------------------------------# + + # @todo This currently is not used in the Ruby DSL. + # + attribute :dependencies, + :container => Hash, + :inherited => true + + # Any dependency on other Pods or to a ‘sub-specification’. + # + # --- + # + # Dependencies can specify versions requirements. The use of the optimistic + # version indicator `~>` is recommended because it provides good + # control over the version without being too restrictive. For example, + # `~> 1.0.1` is equivalent to `>= 1.0.1` combined with `< 1.1`. Similarly, + # `~> 1.0` will match `1.0`, `1.0.1`, `1.1`, but will not upgrade to `2.0`. + # + # Pods with overly restrictive dependencies limit their compatibility with + # other Pods. + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0' + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0', :configurations => ['Debug'] + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0', :configurations => :debug + # + # @example + # spec.dependency 'RestKit/CoreData', '~> 0.20.0' + # + # @example + # spec.ios.dependency 'MBProgressHUD', '~> 0.5' + # + def dependency(*args) + name, *version_requirements = args + if name == self.name + raise Informative, "A specification can't require itself as a " \ + 'subspec' + end + if @parent + composed_name = '' + @parent.name.split('/').each do |component| + composed_name << component + if name == composed_name + raise Informative, "A subspec can't require one of its " \ + 'parents specifications' + else + composed_name << '/' + end + end + end + + configurations_option = version_requirements.find { |option| option.is_a?(Hash) && option.key?(:configurations) } + whitelisted_configurations = if configurations_option + version_requirements.delete(configurations_option) + Array(configurations_option.delete(:configurations)).map { |c| c.to_s.downcase } + end + + dependency_options = version_requirements.reject { |req| req.is_a?(String) } + dependency_options.each do |dependency_option| + if dependency_option.is_a?(Hash) + if !dependency_option[:path].nil? + raise Informative, 'Podspecs cannot specify the source of dependencies. The `:path` option is not supported.'\ + ' `:path` can be used in the Podfile instead to override global dependencies.' + elsif !dependency_option[:git].nil? + raise Informative, 'Podspecs cannot specify the source of dependencies. The `:git` option is not supported.'\ + ' `:git` can be used in the Podfile instead to override global dependencies.' + end + end + + raise Informative, "Unsupported version requirements. #{version_requirements.inspect} is not valid." + end + + attributes_hash['dependencies'] ||= {} + attributes_hash['dependencies'][name] = version_requirements + + unless whitelisted_configurations.nil? + if (extras = whitelisted_configurations - %w(debug release)) && !extras.empty? + raise Informative, "Only `Debug` & `Release` are allowed under configurations for dependency on `#{name}`. " \ + "Found #{extras.map { |configuration| "`#{configuration}`" }.to_sentence}." + end + attributes_hash['configuration_pod_whitelist'] ||= {} + attributes_hash['configuration_pod_whitelist'][name] = whitelisted_configurations + end + end + + def dependency=(args) + joined = args.join('\', \'') + arguments = "\'#{joined}\'" + raise Informative, "Cannot assign value to `dependency`. Did you mean: `dependency #{arguments}`?" + end + + #------------------# + + # @!method info_plist=(info_plist) + # + # Key-Value pairs to add to the generated `Info.plist`. + # + # The values will be merged with the default values that + # CocoaPods generates, overriding any duplicates. + # + # For library specs, the values will be merged into the generated Info.plist + # for libraries that are integrated using frameworks. It will have no effect + # for static libraries. + # + # Subspecs (other than app and test specs) are not supported. + # + # For app specs, the values will be merged into the application host's `Info.plist`. + # + # For test specs, the values will be merged into the test bundle's `Info.plist`. + # + # @example + # + # spec.info_plist = { + # 'CFBundleIdentifier' => 'com.myorg.MyLib', + # 'MY_VAR' => 'SOME_VALUE' + # } + # + # @param [Hash] info_plist + # The Info.plist values for the Pod. + # + attribute :info_plist, + :container => Hash, + :inherited => false + + #------------------# + + # @!method requires_arc=(flag) + # + # `requires_arc` allows you to specify which source_files use ARC. + # This can either be the files which support ARC, or true to indicate + # all of the source_files use ARC. + # + # Files which do not use ARC will have the `-fno-objc-arc` compiler + # flag. + # + # The default value of this attribute is `true`. + # + # @example + # + # spec.requires_arc = false + # + # @example + # + # spec.requires_arc = 'Classes/Arc' + # + # @example + # + # spec.requires_arc = ['Classes/*ARC.m', 'Classes/ARC.mm'] + # + # @param [Bool, String, Array] flag + # whether the source files require ARC. + # + attribute :requires_arc, + :types => [TrueClass, FalseClass, String, Array], + :file_patterns => true, + :default_value => true, + :inherited => true + + #------------------# + + # @!method frameworks=(*frameworks) + # + # A list of system frameworks that the user’s target needs to link + # against. + # + # @example + # + # spec.ios.framework = 'CFNetwork' + # + # @example + # + # spec.frameworks = 'QuartzCore', 'CoreData' + # + # @param [String, Array] frameworks + # A list of framework names. + # + attribute :frameworks, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method weak_frameworks=(*frameworks) + # + # A list of frameworks that the user’s target needs to **weakly** link + # against. + # + # @example + # + # spec.weak_framework = 'Twitter' + # + # @example + # + # spec.weak_frameworks = 'Twitter', 'SafariServices' + # + # @param [String, Array] weak_frameworks + # A list of frameworks names. + # + attribute :weak_frameworks, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method libraries=(*libraries) + # + # A list of system libraries that the user’s target (application) needs to + # link against. + # + # @example + # + # spec.ios.library = 'xml2' + # + # @example + # + # spec.libraries = 'xml2', 'z' + # + # @param [String, Array] libraries + # A list of library names. + # + attribute :libraries, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method compiler_flags=(flags) + # + # A list of flags which should be passed to the compiler. + # + # @example + # + # spec.compiler_flags = '-DOS_OBJECT_USE_OBJC=0', '-Wno-format' + # + # @param [String, Array] flags + # A list of flags. + # + attribute :compiler_flags, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method pod_target_xcconfig=(value) + # + # Any flag to add to the final __private__ pod target xcconfig file. + # + # @example + # + # spec.pod_target_xcconfig = { 'OTHER_LDFLAGS' => '-lObjC' } + # + # @param [Hash{String => String}] value + # Key-value pairs representing build settings. + # + attribute :pod_target_xcconfig, + :container => Hash, + :inherited => true + + # @!method user_target_xcconfig=(value) + # + # Specifies flags to add to the final aggregate target xcconfig file, + # which propagates to non-overridden and inheriting build settings to + # the integrated user targets. + # + # --- + # + # This attribute is __not recommended__ as Pods should not pollute the + # build settings of the user project and this can cause conflicts. + # + # Multiple definitions for build settings that take multiple values + # will be merged. The user is warned on conflicting definitions for + # custom build settings and build settings that take only one value. + # + # Typically clang compiler flags or precompiler macro definitions go + # in here if they are required when importing the pod in the user + # target. Note that, this influences not only the compiler view of the + # public interface of your pod, but also all other integrated pods + # alongside to yours. You should always prefer [`pod_target_xcconfig`]( + # http://guides.cocoapods.org/syntax/podspec.html#pod_target_xcconfig), + # which can contain the same settings, but only influence the + # toolchain when compiling your pod target. + # + # @example + # + # spec.user_target_xcconfig = { 'MY_SUBSPEC' => 'YES' } + # + # @param [Hash{String => String}] value + # Key-value pairs representing build settings. + # + attribute :user_target_xcconfig, + :container => Hash, + :inherited => true + + #------------------# + + # @!method prefix_header_contents=(content) + # + # Any content to inject in the prefix header of the pod project. + # + # --- + # + # This attribute is __not recommended__ as Pods should not pollute the + # prefix header of other libraries or of the user project. + # + # @example + # + # spec.prefix_header_contents = '#import ' + # + # @example + # + # spec.prefix_header_contents = '#import ', '#import ' + # + # @param [String] content + # The contents of the prefix header. + # + attribute :prefix_header_contents, + :types => [Array, String], + :inherited => true + + #------------------# + + # @!method prefix_header_file=(path) + # + # A path to a prefix header file to inject in the prefix header of the + # pod project. + # `false` indicates that the default CocoaPods prefix header should not + # be generated. + # `true` is the default and indicates that the default CocoaPods prefix + # header should be generated. + # + # --- + # + # The file path options is __not recommended__ as Pods should not + # pollute the prefix header of other libraries or of the user project. + # + # + # @example + # + # spec.prefix_header_file = 'iphone/include/prefix.pch' + # + # @example + # + # spec.prefix_header_file = false + # + # @param [Bool, String] path + # The path to the prefix header file or whether to disable + # prefix_header generation. + # + attribute :prefix_header_file, + :types => [TrueClass, FalseClass, String], + :inherited => true + + #------------------# + + # @!method module_name=(name) + # + # The name to use for the framework / clang module which + # will be generated for this specification instead of the + # default (header_dir if set, otherwise the specification + # name). + # + # @example + # + # spec.module_name = 'Three20' + # + # @param [String] name + # the module name. + # + root_attribute :module_name + + #------------------# + + # @!method header_dir=(dir) + # + # The directory where to store the headers files so they don't break + # includes. + # + # @example + # + # spec.header_dir = 'Three20Core' + # + # @param [String] dir + # the headers directory. + # + attribute :header_dir, + :inherited => true + + #------------------# + + # @!method header_mappings_dir=(dir) + # + # A directory from where to preserve the folder structure for the + # headers files. If not provided the headers files are flattened. + # + # @example + # + # spec.header_mappings_dir = 'src/include' + # + # @param [String] dir + # the directory from where to preserve the headers namespacing. + # + attribute :header_mappings_dir, + :inherited => true + + #------------------# + + SCRIPT_PHASE_REQUIRED_KEYS = [:name, :script].freeze + + SCRIPT_PHASE_OPTIONAL_KEYS = [:shell_path, :input_files, :output_files, :input_file_lists, :output_file_lists, + :show_env_vars_in_log, :execution_position, :dependency_file].freeze + + EXECUTION_POSITION_KEYS = [:before_compile, :after_compile, :before_headers, :after_headers, :any].freeze + + ALL_SCRIPT_PHASE_KEYS = (SCRIPT_PHASE_REQUIRED_KEYS + SCRIPT_PHASE_OPTIONAL_KEYS).freeze + + # @!method script_phases=(*script_phases) + # + # This attribute allows to define a script phase to execute as part of compilation of the Pod. + # Unlike a prepare command, script phases execute as part of `xcodebuild` they can also utilize all environment + # variables that are set during compilation. + # + # A Pod can provide multiple script phases to execute and they will be added in the order they were + # declared and after taking into consideration their execution position setting. + # + # **Note** In order to provide visibility and awareness of the contents of all script phases, + # a warning will be presented to the user upon installing your pod if it includes any script phases. + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"' } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', :execution_position => :before_compile } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', + # :input_files => ['/path/to/input_file.txt'], :output_files => ['/path/to/output_file.txt'] + # } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', + # :input_file_lists => ['/path/to/input_files.xcfilelist'], :output_file_lists => ['/path/to/output_files.xcfilelist'] + # } + # + # @example + # + # spec.script_phases = [ + # { :name => 'Hello World', :script => 'echo "Hello World"' }, + # { :name => 'Hello Ruby World', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' }, + # ] + # + # @param [ArrayString}>] script_phases + # An array of hashes where each hash represents a single script phase. + # + attribute :script_phases, + :types => [Hash], + :container => Array, + :singularize => true + + #-----------------------------------------------------------------------# + + # @!group File patterns + # + # Podspecs should be located at the **root** of the repository, and paths + # to files should be specified **relative** to the root of the repository + # as well. File patterns do not support traversing the parent directory ( `..` ). + # File patterns may contain the following wildcard patterns: + # + # --- + # + # ### Pattern: * + # + # Matches any file. Can be restricted by other values in the glob. + # + # * `*` will match all files + # * `c*` will match all files beginning with `c` + # * `*c` will match all files ending with `c` + # * `*c*` will match all files that have `c` in them (including at the + # beginning or end) + # + # Equivalent to `/.*/x` in regexp. + # + # **Note** this will not match Unix-like hidden files (dotfiles). In + # order to include those in the match results, you must use something + # like `{*,.*}`. + # + # --- + # + # ### Pattern: ** + # + # Matches directories recursively. + # + # --- + # + # ### Pattern: ? + # + # Matches any one character. Equivalent to `/.{1}/` in regexp. + # + # --- + # + # ### Pattern: [set] + # + # Matches any one character in set. + # + # Behaves exactly like character sets in Regexp, including set negation + # (`[^a-z]`). + # + # --- + # + # ### Pattern: {p,q} + # + # Matches either literal `p` or literal `q`. + # + # Matching literals may be more than one character in length. More than + # two literals may be specified. + # + # Equivalent to pattern alternation in regexp. + # + # --- + # + # ### Pattern: \ + # + # Escapes the next meta-character. + # + # --- + # + # ### Examples + # + # Consider these to be evaluated in the source root of + # [JSONKit](https://github.com/johnezang/JSONKit). + # + # "JSONKit.?" #=> ["JSONKit.h", "JSONKit.m"] + # "*.[a-z][a-z]" #=> ["CHANGELOG.md", "README.md"] + # "*.[^m]*" #=> ["JSONKit.h"] + # "*.{h,m}" #=> ["JSONKit.h", "JSONKit.m"] + # "*" #=> ["CHANGELOG.md", "JSONKit.h", "JSONKit.m", "README.md"] + + #-----------------------------------------------------------------------# + + # @!method source_files=(source_files) + # + # The source files of the Pod. + # + # @example + # + # spec.source_files = 'Classes/**/*.{h,m}' + # + # @example + # + # spec.source_files = 'Classes/**/*.{h,m}', 'More_Classes/**/*.{h,m}' + # + # @param [String, Array] source_files + # the source files of the Pod. + # + attribute :source_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method public_header_files=(public_header_files) + # + # A list of file patterns that should be used as public headers. + # + # --- + # + # These patterns are matched against the source files to include headers + # that will be exposed to the user’s project and + # from which documentation will be generated. When the library is built, + # these headers will appear in the build directory. If no public headers + # are specified then **all** the headers in source_files are considered + # public. + # + # @example + # + # spec.public_header_files = 'Headers/Public/*.h' + # + # @param [String, Array] public_header_files + # the public headers of the Pod. + # + attribute :public_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method project_header_files=(project_header_files) + # + # A list of file patterns that should be used to mark project headers. + # + # --- + # + # These patterns are matched against the public headers (or all the + # headers if no public headers have been specified) to exclude those + # headers which should not be exposed to the user project and which + # should not be used to generate the documentation. When the library + # is built, these headers will _not_ appear in the build directory. + # + # + # @example + # + # spec.project_header_files = 'Headers/Project/*.h' + # + # @param [String, Array] project_header_files + # the project headers of the Pod. + # + attribute :project_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method private_header_files=(private_header_files) + # + # A list of file patterns that should be used to mark private headers. + # + # --- + # + # These patterns are matched against the public headers (or all the + # headers if no public headers have been specified) to exclude those + # headers which should not be exposed to the user project and which + # should not be used to generate the documentation. When the library + # is built, these headers will appear in the build directory. + # + # Header files that are not listed as neither public nor project or private will + # be treated as private, but in addition will not appear in the build + # directory at all. + # + # + # @example + # + # spec.private_header_files = 'Headers/Private/*.h' + # + # @param [String, Array] private_header_files + # the private headers of the Pod. + # + attribute :private_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method vendored_frameworks=(*frameworks) + # + # The paths of the framework bundles that come shipped with the Pod. Supports both `.framework` and `.xcframework` bundles. + # The frameworks will be made available to the Pod and to the consumers of the pod. + # + # @example + # + # spec.ios.vendored_frameworks = 'Frameworks/MyFramework.framework' + # + # @example + # + # spec.vendored_frameworks = 'MyFramework.framework', 'TheirFramework.xcframework' + # + # @param [String, Array] vendored_frameworks + # A list of framework bundles paths. + # + attribute :vendored_frameworks, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method vendored_libraries=(*frameworks) + # + # The paths of the libraries that come shipped with the Pod. The libraries will be available to the Pod and the + # consumers of the Pod. + # + # @example + # + # spec.ios.vendored_library = 'Libraries/libProj4.a' + # + # @example + # + # spec.vendored_libraries = 'libProj4.a', 'libJavaScriptCore.a' + # + # @param [String, Array] vendored_libraries + # A list of library paths. + # + attribute :vendored_libraries, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # The keys accepted by the category attribute for each on demand resource entry. + # + ON_DEMAND_RESOURCES_CATEGORY_KEYS = [:download_on_demand, :prefetched, :initial_install].freeze + + # @!method on_demand_resources=(on_demand_resources) + # + # A hash of on demand resources that should be copied into the target bundle. Resources specified here + # will automatically become part of the resources build phase of the target this pod is integrated into. + # + # If no category is specified then `:download_on_demand` is used as the default. + # + # @note + # + # Tags specified by pods are _always_ managed by CocoaPods. If a tag is renamed, changed or deleted then + # CocoaPods will update the tag within the targets the pod was integrated into. It is highly recommended not to + # share the same tags for your project as the ones used by the pods your project consumes. + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => 'file1.png' + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => ['file1.png', 'file2.png'] + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => { :paths => ['file1.png', 'file2.png'], :category => :download_on_demand } + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => { :paths => ['file1.png', 'file2.png'], :category => :initial_install } + # } + # + # @param [Hash{String=>String}, Hash{String=>Array}, Hash{String=>Hash}] on_demand_resources + # The on demand resources shipped with the Pod. + # + attribute :on_demand_resources, + :types => [String, Array, Hash], + :container => Hash, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method resource_bundles=(*resource_bundles) + # + # This attribute allows to define the name and the file of the resource + # bundles which should be built for the Pod. They are specified as a + # hash where the keys represent the name of the bundles and the values + # the file patterns that they should include. + # + # For building the Pod as a static library, we strongly **recommend** + # library developers to adopt resource bundles as there can be name + # collisions using the resources attribute. + # + # The names of the bundles should at least include the name of the Pod + # to minimise the chance of name collisions. + # + # To provide different resources per platform namespaced bundles *must* + # be used. + # + # @example + # + # spec.ios.resource_bundle = { 'MapBox' => 'MapView/Map/Resources/*.png' } + # + # @example + # + # spec.resource_bundles = { + # 'MapBox' => ['MapView/Map/Resources/*.png'], + # 'MapBoxOtherResources' => ['MapView/Map/OtherResources/*.png'] + # } + # + # @param [Hash{String=>String}, Hash{String=>Array}] resource_bundles + # A hash where the keys are the names of the resource bundles + # and the values are their relative file patterns. + # + attribute :resource_bundles, + :types => [String, Array], + :container => Hash, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method resources=(resources) + # + # A list of resources that should be copied into the target bundle. + # + # For building the Pod as a static library, we strongly **recommend** + # library developers to adopt [resource bundles](http://guides.cocoapods.org/syntax/podspec.html#resource_bundles) + # as there can be name collisions using the resources attribute. + # Moreover, resources specified with this attribute are copied + # directly to the client target and therefore they are not + # optimised by Xcode. + # + # @example + # + # spec.resource = 'Resources/HockeySDK.bundle' + # + # @example + # + # spec.resources = ['Images/*.png', 'Sounds/*'] + # + # @param [String, Array] resources + # The resources shipped with the Pod. + # + attribute :resources, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method exclude_files=(exclude_files) + # + # A list of file patterns that should be excluded from the other + # file patterns. + # + # @example + # + # spec.ios.exclude_files = 'Classes/osx' + # + # @example + # + # spec.exclude_files = 'Classes/**/unused.{h,m}' + # + # @param [String, Array] exclude_files + # the file patterns that the Pod should ignore. + # + attribute :exclude_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method preserve_paths=(preserve_paths) + # + # Any file that should **not** be removed after being downloaded. + # + # --- + # + # By default, CocoaPods removes all files that are not matched by any + # of the other file pattern. + # + # @example + # + # spec.preserve_path = 'IMPORTANT.txt' + # + # @example + # + # spec.preserve_paths = 'Frameworks/*.framework' + # + # @param [String, Array] preserve_paths + # the paths that should be not cleaned. + # + attribute :preserve_paths, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method module_map=(module_map) + # + # The module map file that should be used when this pod is integrated as + # a framework. + # + # `false` indicates that the default CocoaPods `modulemap` file should not + # be generated. + # + # `true` is the default and indicates that the default CocoaPods + # `modulemap` file should be generated. + # + # --- + # + # By default, CocoaPods creates a module map file based upon the public + # headers in a specification. + # + # @example + # + # spec.module_map = 'source/module.modulemap' + # + # @example + # + # spec.module_map = false + # + # @param [String, Bool] module_map + # the path to the module map file that should be used + # or whether to disable module_map generation. + # + attribute :module_map, + :types => [TrueClass, FalseClass, String], + :root_only => true + + #-----------------------------------------------------------------------# + + # @!group Subspecs + # + # A library can specify a dependency on either another library, a + # subspec of another library, or a subspec of itself. + + #-----------------------------------------------------------------------# + + # Represents specification for a module of the library. + # + # --- + # + # Subspecs participate on a dual hierarchy. + # + # On one side, a specification automatically inherits as a dependency all + # it children ‘sub-specifications’ (unless a default subspec is + # specified). + # + # On the other side, a ‘sub-specification’ inherits the value of the + # attributes of the parents so common values for attributes can be + # specified in the ancestors. + # + # Although it sounds complicated in practice it means that subspecs in + # general do what you would expect: + # + # pod 'ShareKit', '2.0' + # + # Installs ShareKit with all the sharers like `ShareKit/Evernote`, + # `ShareKit/Facebook`, etc, as they are defined as subspecs. + # + # pod 'ShareKit/Twitter', '2.0' + # pod 'ShareKit/Pinboard', '2.0' + # + # Installs ShareKit with only the source files for `ShareKit/Twitter`, + # `ShareKit/Pinboard`. Note that, in this case, the ‘sub-specifications’ + # to compile need the source files, the dependencies, and the other + # attributes defined by the root specification. CocoaPods is smart enough + # to handle any issues arising from duplicate attributes. + # + # @example Subspecs with different source files. + # + # subspec 'Twitter' do |sp| + # sp.source_files = 'Classes/Twitter' + # end + # + # subspec 'Pinboard' do |sp| + # sp.source_files = 'Classes/Pinboard' + # end + # + # @example Subspecs referencing dependencies to other subspecs. + # + # Pod::Spec.new do |s| + # s.name = 'RestKit' + # + # s.subspec 'Core' do |cs| + # cs.dependency 'RestKit/ObjectMapping' + # cs.dependency 'RestKit/Network' + # cs.dependency 'RestKit/CoreData' + # end + # + # s.subspec 'ObjectMapping' do |os| + # end + # end + # + # @example Nested subspecs. + # + # Pod::Spec.new do |s| + # s.name = 'Root' + # + # s.subspec 'Level_1' do |sp| + # sp.subspec 'Level_2' do |ssp| + # end + # end + # end + # + def subspec(name, &block) + subspec = Specification.new(self, name, &block) + @subspecs << subspec + subspec + end + + # The list of the test types currently supported. + # + SUPPORTED_TEST_TYPES = [:unit, :ui].freeze + + # The test type this specification supports. This only applies to test specifications. + # + # --- + # + # @example + # + # test_spec.test_type = :unit + # + # @example + # + # test_spec.test_type = 'unit' + # + # @param [Symbol, String] type + # The test type to use. + # + attribute :test_type, + :types => [Symbol, String], + :multi_platform => false, + :spec_types => [:test] + + # @!method requires_app_host=(flag) + # + # Whether a test specification requires an app host to run tests. This only applies to test specifications. + # + # @example + # + # test_spec.requires_app_host = true + # + # @param [Bool] flag + # whether a test specification requires an app host to run tests. + # + attribute :requires_app_host, + :types => [TrueClass, FalseClass], + :default_value => false, + :spec_types => [:test] + + # @!method app_host_name=(name) + # + # The app specification to use as an app host, if necessary. + # + # @note + # + # You must depend on that app spec using `test_spec.dependency 'PodName'`. + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.test_spec do |test_spec| + # test_spec.source_files = 'NSAttributedString+CCLFormatTests.m' + # test_spec.requires_app_host = true + # test_spec.app_host_name = 'NSAttributedString+CCLFormat/App' + # test_spec.dependency 'NSAttributedString+CCLFormat/App' + # end + # + # spec.app_spec 'App' do |app_spec| + # app_spec.source_files = 'NSAttributedString+CCLFormat.m' + # app_spec.dependency 'AFNetworking' + # end + # end + # + # @param [String] name + # The app specification to use as an app host, if necessary. + # + attribute :app_host_name, + :types => [String], + :spec_types => [:test] + + SCHEME_KEYS = [:launch_arguments, :environment_variables, :code_coverage].freeze + + # @!method scheme=(flag) + # + # Specifies the scheme configuration to be used for this specification. + # + # --- + # + # @example + # + # spec.scheme = { :launch_arguments => ['Arg1'] } + # + # @example + # + # spec.scheme = { :launch_arguments => ['Arg1', 'Arg2'], :environment_variables => { 'Key1' => 'Val1'} } + # + # @param [Hash] scheme + # the scheme configuration to be used for this specification. + # + attribute :scheme, + :types => [Hash], + :container => Hash, + :keys => SCHEME_KEYS + + # Represents a test specification for the library. Here you can place all + # your tests for your podspec along with the test dependencies. + # + # --- + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.test_spec do |test_spec| + # test_spec.source_files = 'NSAttributedString+CCLFormatTests.m' + # test_spec.dependency 'Expecta' + # end + # end + # + def test_spec(name = 'Tests', &block) + subspec = Specification.new(self, name, true, &block) + @subspecs << subspec + subspec + end + + # Represents an app specification for the library. Here you can place all + # your app source files for your podspec along with the app dependencies. + # + # --- + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.app_spec do |app_spec| + # app_spec.source_files = 'NSAttributedString+CCLFormat.m' + # app_spec.dependency 'AFNetworking' + # end + # end + # + def app_spec(name = 'App', &block) + appspec = Specification.new(self, name, :app_specification => true, &block) + @subspecs << appspec + appspec + end + + #------------------# + + # @!method default_subspecs=(subspec_array) + # + # An array of subspecs names that should be used as preferred dependency. + # If not specified, a specification requires all of its subspecs as + # dependencies. + # + # You may use the value `:none` to specify that none of the subspecs are + # required to compile this pod and that all subspecs are optional. + # + # --- + # + # A Pod should make available the full library by default. Users can + # fine tune their dependencies, and exclude unneeded subspecs, once + # their requirements are known. Therefore, this attribute is rarely + # needed. It is intended to be used to select a default if there are + # ‘sub-specifications’ which provide alternative incompatible + # implementations, or to exclude modules rarely needed (especially if + # they trigger dependencies on other libraries). + # + # @example + # + # spec.default_subspec = 'Core' + # + # @example + # + # spec.default_subspecs = 'Core', 'UI' + # + # @example + # + # spec.default_subspecs = :none + # + # @param [Array, String, Symbol] subspec_names + # An array of subspec names that should be inherited as + # dependency. + # + root_attribute :default_subspecs, + :container => Array, + :types => [Array, String, Symbol], + :singularize => true + + #-----------------------------------------------------------------------# + + # @!group Multi-Platform support + # + # A specification can store values which are specific to only one + # platform. + # + # --- + # + # For example one might want to store resources which are specific to + # only iOS projects. + # + # spec.resources = 'Resources/**/*.png' + # spec.ios.resources = 'Resources_ios/**/*.png' + + #-----------------------------------------------------------------------# + + # Provides support for specifying iOS attributes. + # + # @example + # spec.ios.source_files = 'Classes/ios/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def ios + PlatformProxy.new(self, :ios) + end + + # Provides support for specifying OS X attributes. + # + # @example + # spec.osx.source_files = 'Classes/osx/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def osx + PlatformProxy.new(self, :osx) + end + + alias macos osx + + # Provides support for specifying tvOS attributes. + # + # @example + # spec.tvos.source_files = 'Classes/tvos/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def tvos + PlatformProxy.new(self, :tvos) + end + + # Provides support for specifying watchOS attributes. + # + # @example + # spec.watchos.source_files = 'Classes/watchos/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def watchos + PlatformProxy.new(self, :watchos) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute.rb new file mode 100644 index 0000000..b663c6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute.rb @@ -0,0 +1,206 @@ +module Pod + class Specification + module DSL + # A Specification attribute stores the information of an attribute. It + # also provides logic to implement any required logic. + # + class Attribute + require 'active_support/inflector/inflections' + + # Spec types currently supported. + # + SUPPORTED_SPEC_TYPES = [:library, :app, :test].freeze + + # @return [Symbol] the name of the attribute. + # + attr_reader :name + + # Returns a new attribute initialized with the given options. + # + # Attributes by default are: + # + # - inherited + # - multi-platform + # + # @param [Symbol] name @see name + # + # @param [Hash{Symbol=>Object}] options + # The options for configuring the attribute (see Options + # group). + # + # @raise If there are unrecognized options. + # + def initialize(name, options) + @name = name + + @multi_platform = options.delete(:multi_platform) { true } + @root_only = options.delete(:root_only) { false } + @spec_types = options.delete(:spec_types) { SUPPORTED_SPEC_TYPES } + @inherited = options.delete(:inherited) { @root_only } + @required = options.delete(:required) { false } + @singularize = options.delete(:singularize) { false } + @file_patterns = options.delete(:file_patterns) { false } + @container = options.delete(:container) { nil } + @keys = options.delete(:keys) { nil } + @default_value = options.delete(:default_value) { nil } + @ios_default = options.delete(:ios_default) { nil } + @osx_default = options.delete(:osx_default) { nil } + @types = options.delete(:types) { [String] } + + unless options.empty? + raise StandardError, "Unrecognized options: #{options} for #{self}" + end + unless (@spec_types - SUPPORTED_SPEC_TYPES).empty? + raise StandardError, "Unrecognized spec type option: #{@spec_types} for #{self}" + end + end + + # @return [String] A string representation suitable for UI. + # + def to_s + "Specification attribute `#{name}`" + end + + # @return [String] A string representation suitable for debugging. + # + def inspect + "<#{self.class} name=#{name} types=#{types} " \ + "multi_platform=#{multi_platform?}>" + end + + #---------------------------------------------------------------------# + + # @!group Options + + # @return [Array] the list of the classes of the values + # supported by the attribute writer. If not specified defaults + # to #{String}. + # + attr_reader :types + + # @return [Array] the list of the classes of the values + # supported by the attribute, including the container. + # + def supported_types + @supported_types ||= @types.dup.push(container).compact + end + + # @return [Class] if defined it can be #{Array} or #{Hash}. It is used + # as default initialization value and to automatically wrap + # other values to arrays. + # + attr_reader :container + + # @return [Array, Hash] the list of the accepted keys for an attribute + # wrapped by a Hash. + # + # @note A hash is accepted to group the keys associated only with + # certain keys (see the source attribute of a Spec). + # + attr_reader :keys + + # @return [Object] if the attribute follows configuration over + # convention it can specify a default value. + # + # @note The default value is not automatically wrapped and should be + # specified within the container if any. + # + attr_reader :default_value + + # @return [Object] similar to #{default_value} but for iOS. + # + attr_reader :ios_default + + # @return [Object] similar to #{default_value} but for OS X. + # + attr_reader :osx_default + + # @return [Bool] whether the specification should be considered invalid + # if a value for the attribute is not specified. + # + def required? + @required + end + + # @return [Bool] whether the attribute should be specified only on the + # root specification. + # + def root_only? + @root_only + end + + # @return [Bool] whether the attribute should be specified only on + # test specifications. + # + def test_only? + @spec_types == [:test] + end + + # @return [Bool] whether the attribute is multi-platform and should + # work in conjunction with #{PlatformProxy}. + # + def multi_platform? + @multi_platform + end + + # @return [Bool] whether there should be a singular alias for the + # attribute writer. + # + def singularize? + @singularize + end + + # @return [Bool] whether the attribute describes file patterns. + # + # @note This is mostly used by the linter. + # + def file_patterns? + @file_patterns + end + + # @return [Bool] defines whether the attribute reader should join the + # values with the parent. + # + # @note Attributes stored in wrappers are always inherited. + # + def inherited? + @inherited + end + + #---------------------------------------------------------------------# + + # @!group Accessors support + + # Returns the default value for the attribute. + # + # @param [Symbol] platform + # the platform for which the default value is requested. + # + # @return [Object] The default value. + # + def default(platform = nil) + if platform && multi_platform? + platform_value = ios_default if platform == :ios + platform_value = osx_default if platform == :osx + platform_value || default_value + else + default_value + end + end + + # @return [String] the name of the setter method for the attribute. + # + def writer_name + "#{name}=" + end + + # @return [String] an aliased attribute writer offered for convenience + # on the DSL. + # + def writer_singular_form + "#{name.to_s.singularize}=" if singularize? + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute_support.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute_support.rb new file mode 100644 index 0000000..c8ebc13 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/attribute_support.rb @@ -0,0 +1,74 @@ +module Pod + class Specification + module DSL + # @return [Array] The attributes of the class. + # + class << self + attr_reader :attributes + end + + # This module provides support for storing the runtime information of the + # {Specification} DSL. + # + module AttributeSupport + # Defines a root attribute for the extended class. + # + # Root attributes make sense only in root specification, they never are + # multi-platform, they don't have inheritance, and they never have a + # default value. + # + # @param [Symbol, String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + def root_attribute(name, options = {}) + options[:root_only] = true + options[:multi_platform] = false + store_attribute(name, options) + end + + # Defines an attribute for the extended class. + # + # Regular attributes in general support inheritance and multi-platform + # values, so resolving them for a given specification is not trivial. + # + # @param [Symbol, String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + def attribute(name, options = {}) + store_attribute(name, options) + end + + #---------------------------------------------------------------------# + + # Creates an attribute with the given name and options and stores it in + # the {DSL.attributes} hash. + # + # @param [String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + # @visibility private + # + def store_attribute(name, options) + attr = Attribute.new(name, options) + @attributes ||= {} + @attributes[name] = attr + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/deprecations.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/deprecations.rb new file mode 100644 index 0000000..3d44734 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/deprecations.rb @@ -0,0 +1,20 @@ +module Pod + class Specification + module DSL + # Provides warning and errors for the deprecated attributes of the DSL. + # + module Deprecations + DSL.attribute :xcconfig, + :container => Hash, + :inherited => true + + def xcconfig=(value) + self.pod_target_xcconfig = value + self.user_target_xcconfig = value + CoreUI.warn "[#{self}] `xcconfig` has been split into "\ + '`pod_target_xcconfig` and `user_target_xcconfig`.' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/platform_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/platform_proxy.rb new file mode 100644 index 0000000..63db751 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/dsl/platform_proxy.rb @@ -0,0 +1,82 @@ +module Pod + class Specification + module DSL + # The PlatformProxy works in conjunction with Specification#_on_platform. + # It provides support for a syntax like `spec.ios.source_files = 'file'`. + # + class PlatformProxy + # @return [Specification] the specification for this platform proxy. + # + attr_accessor :spec + + # @return [Symbol] the platform described by this proxy. Can be either + # `:ios` or `:osx`. + # + attr_reader :platform + + # @param [Specification] spec @see spec + # @param [Symbol] platform @see platform + # + def initialize(spec, platform) + @spec = spec + @platform = platform + end + + # Defines a setter method for each attribute of the specification + # class, that forwards the message to the {#specification} using the + # {Specification#on_platform} method. + # + # @return [void] + # + def method_missing(meth, *args, &block) + return super unless attribute = attribute_for_method(meth) + raise NoMethodError, "#{attribute} cannot be set per-platform" unless attribute.multi_platform? + spec.store_attribute(attribute.name, args.first, platform) + end + + # @!visibility private + # + def respond_to_missing?(method, include_all) + attribute = attribute_for_method(method) + (attribute && attribute.multi_platform?) || super + end + + # Allows to add dependency for the platform. + # + # @return [void] + # + def dependency(*args) + name, *version_requirements = args + platform_name = platform.to_s + platform_hash = spec.attributes_hash[platform_name] || {} + platform_hash['dependencies'] ||= {} + platform_hash['dependencies'][name] = version_requirements + spec.attributes_hash[platform_name] = platform_hash + end + + # Allows to set the deployment target for the platform. + # + # @return [void] + # + def deployment_target=(value) + platform_name = platform.to_s + spec.attributes_hash['platforms'] ||= {} + spec.attributes_hash['platforms'][platform_name] = value + end + + private + + def attribute_for_method(method) + method = method.to_sym + Specification::DSL.attributes.values.find do |attribute| + if attribute.writer_name.to_sym == method + true + elsif attribute.writer_singular_form + attribute.writer_singular_form.to_sym == method + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/json.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/json.rb new file mode 100644 index 0000000..3b19c83 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/json.rb @@ -0,0 +1,104 @@ +module Pod + class Specification + module JSONSupport + # @return [String] the json representation of the specification. + # + def to_json(*a) + require 'json' + to_hash.to_json(*a) << "\n" + end + + # @return [String] the pretty json representation of the specification. + # + def to_pretty_json(*a) + require 'json' + JSON.pretty_generate(to_hash, *a) << "\n" + end + + #-----------------------------------------------------------------------# + + # @return [Hash] the hash representation of the specification including + # subspecs. + # + def to_hash + hash = attributes_hash.dup + if root? || available_platforms != parent.available_platforms + platforms = Hash[available_platforms.map { |p| [p.name.to_s, p.deployment_target && p.deployment_target.to_s] }] + hash['platforms'] = platforms + end + specs_by_type = subspecs.group_by(&:spec_type) + all_appspecs = specs_by_type[:app] || [] + all_testspecs = specs_by_type[:test] || [] + all_subspecs = specs_by_type[:library] || [] + + hash.delete('testspecs') + hash['testspecs'] = all_testspecs.map(&:to_hash) unless all_testspecs.empty? + hash.delete('appspecs') + hash['appspecs'] = all_appspecs.map(&:to_hash) unless all_appspecs.empty? + hash.delete('subspecs') + hash['subspecs'] = all_subspecs.map(&:to_hash) unless all_subspecs.empty? + + # Since CocoaPods 1.7 version the DSL has changed to be pluralized. When we serialize a podspec to JSON with + # 1.7, ensure that we also include the singular version in the hash to maintain backwards compatibility with + # < 1.7 versions. We also delete this key and re-add it to ensure it gets added at the end. + hash.delete('swift_version') + hash['swift_version'] = swift_version.to_s unless swift_version.nil? + + hash + end + end + + # Configures a new specification from the given JSON representation. + # + # @param [String] the JSON encoded hash which contains the information of + # the specification. + # + # + # @return [Specification] the specification + # + def self.from_json(json) + require 'json' + hash = JSON.parse(json) + from_hash(hash) + end + + # Configures a new specification from the given hash. + # + # @param [Hash] hash the hash which contains the information of the + # specification. + # + # @param [Specification] parent the parent of the specification unless the + # specification is a root. + # + # @return [Specification] the specification + # + def self.from_hash(hash, parent = nil, test_specification: false, app_specification: false) + attributes_hash = hash.dup + spec = Spec.new(parent, nil, test_specification, :app_specification => app_specification) + subspecs = attributes_hash.delete('subspecs') + testspecs = attributes_hash.delete('testspecs') + appspecs = attributes_hash.delete('appspecs') + + ## backwards compatibility with 1.3.0 + spec.test_specification = !attributes_hash['test_type'].nil? + + spec.attributes_hash = attributes_hash + spec.subspecs.concat(subspecs_from_hash(spec, subspecs, false, false)) + spec.subspecs.concat(subspecs_from_hash(spec, testspecs, true, false)) + spec.subspecs.concat(subspecs_from_hash(spec, appspecs, false, true)) + + spec + end + + def self.subspecs_from_hash(spec, subspecs, test_specification, app_specification) + return [] if subspecs.nil? + subspecs.map do |s_hash| + Specification.from_hash(s_hash, spec, + :test_specification => test_specification, + :app_specification => app_specification) + end + end + + #-----------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter.rb new file mode 100644 index 0000000..671fa66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter.rb @@ -0,0 +1,576 @@ +require 'cocoapods-core/specification/linter/result' +require 'cocoapods-core/specification/linter/analyzer' + +module Pod + class Specification + # The Linter check specifications for errors and warnings. + # + # It is designed not only to guarantee the formal functionality of a + # specification, but also to support the maintenance of sources. + # + class Linter + # @return [Specification] the specification to lint. + # + attr_reader :spec + + # @return [Pathname] the path of the `podspec` file where {#spec} is + # defined. + # + attr_reader :file + + attr_reader :results + + # @param [Specification, Pathname, String] spec_or_path + # the Specification or the path of the `podspec` file to lint. + # + def initialize(spec_or_path) + if spec_or_path.is_a?(Specification) + @spec = spec_or_path + @file = @spec.defined_in_file + else + @file = Pathname.new(spec_or_path) + begin + @spec = Specification.from_file(@file) + rescue => e + @spec = nil + @raise_message = e.message + end + end + end + + # Lints the specification adding a {Result} for any failed check to the + # {#results} object. + # + # @return [Bool] whether the specification passed validation. + # + def lint + @results = Results.new + if spec + validate_root_name + check_required_attributes + check_requires_arc_attribute + run_root_validation_hooks + perform_all_specs_analysis + else + results.add_error('spec', "The specification defined in `#{file}` "\ + "could not be loaded.\n\n#{@raise_message}") + end + results.empty? + end + + #-----------------------------------------------------------------------# + + # !@group Lint results + + public + + # @return [Array] all the errors generated by the Linter. + # + def errors + @errors ||= results.select { |r| r.type == :error } + end + + # @return [Array] all the warnings generated by the Linter. + # + def warnings + @warnings ||= results.select { |r| r.type == :warning } + end + + #-----------------------------------------------------------------------# + + private + + # !@group Lint steps + + # Checks that the spec's root name matches the filename. + # + # @return [void] + # + def validate_root_name + if spec.root.name && file + acceptable_names = [ + spec.root.name + '.podspec', + spec.root.name + '.podspec.json', + ] + names_match = acceptable_names.include?(file.basename.to_s) + unless names_match + results.add_error('name', 'The name of the spec should match the ' \ + 'name of the file.') + end + end + end + + # Generates a warning if the requires_arc attribute has true or false string values. + # + # @return [void] + # + def check_requires_arc_attribute + attribute = DSL.attributes.values.find { |attr| attr.name == :requires_arc } + if attribute + value = spec.send(attribute.name) + if value == 'true' || value == 'false' + results.add_warning('requires_arc', value + ' is considered to be the name of a file.') + end + end + end + + # Checks that every required attribute has a value. + # + # @return [void] + # + def check_required_attributes + attributes = DSL.attributes.values.select(&:required?) + attributes.each do |attr| + begin + value = spec.send(attr.name) + unless value && (!value.respond_to?(:empty?) || !value.empty?) + if attr.name == :license + results.add_warning('attributes', 'Missing required attribute ' \ + "`#{attr.name}`.") + else + results.add_error('attributes', 'Missing required attribute ' \ + "`#{attr.name}`.") + end + end + rescue => exception + results.add_error('attributes', "Unable to parse attribute `#{attr.name}` due to error: #{exception}") + end + end + end + + # Runs the validation hook for root only attributes. + # + # @return [void] + # + def run_root_validation_hooks + attributes = DSL.attributes.values.select(&:root_only?) + run_validation_hooks(attributes, spec) + end + + # Run validations for multi-platform attributes activating. + # + # @return [void] + # + def perform_all_specs_analysis + all_specs = [spec, *spec.recursive_subspecs] + all_specs.each do |current_spec| + current_spec.available_platforms.each do |platform| + @consumer = Specification::Consumer.new(current_spec, platform) + results.consumer = @consumer + run_all_specs_validation_hooks + analyzer = Analyzer.new(@consumer, results) + results = analyzer.analyze + @consumer = nil + results.consumer = nil + end + end + end + + # @return [Specification::Consumer] the current consumer. + # + attr_accessor :consumer + + # Runs the validation hook for the attributes that are not root only. + # + # @return [void] + # + def run_all_specs_validation_hooks + attributes = DSL.attributes.values.reject(&:root_only?) + run_validation_hooks(attributes, consumer) + end + + # Runs the validation hook for each attribute. + # + # @note Hooks are called only if there is a value for the attribute as + # required attributes are already checked by the + # {#check_required_attributes} step. + # + # @return [void] + # + def run_validation_hooks(attributes, target) + attributes.each do |attr| + validation_hook = "_validate_#{attr.name}" + next unless respond_to?(validation_hook, true) + begin + value = target.send(attr.name) + next unless value + send(validation_hook, value) + rescue => e + results.add_error(attr.name, "Unable to validate due to exception: #{e}") + end + end + end + + #-----------------------------------------------------------------------# + + private + + # Performs validations related to the `name` attribute. + # + def _validate_name(name) + if name =~ %r{/} + results.add_error('name', 'The name of a spec should not contain ' \ + 'a slash.') + end + + if name =~ /\s/ + results.add_error('name', 'The name of a spec should not contain ' \ + 'whitespace.') + end + + if name[0, 1] == '.' + results.add_error('name', 'The name of a spec should not begin' \ + ' with a period.') + end + end + + # @!group Root spec validation helpers + + # Performs validations related to the `authors` attribute. + # + def _validate_authors(a) + if a.is_a? Hash + if a == { 'YOUR NAME HERE' => 'YOUR EMAIL HERE' } + results.add_error('authors', 'The authors have not been updated ' \ + 'from default') + end + end + end + + # Performs validations related to the `version` attribute. + # + def _validate_version(v) + if v.to_s.empty? + results.add_error('version', 'A version is required.') + end + end + + # Performs validations related to the `module_name` attribute. + # + def _validate_module_name(m) + unless m.nil? || m =~ /^[a-z_][0-9a-z_]*$/i + results.add_error('module_name', 'The module name of a spec' \ + ' should be a valid C99 identifier.') + end + end + + # Performs validations related to the `summary` attribute. + # + def _validate_summary(s) + if s.length > 140 + results.add_warning('summary', 'The summary should be a short ' \ + 'version of `description` (max 140 characters).') + end + if s =~ /A short description of/ + results.add_warning('summary', 'The summary is not meaningful.') + end + end + + # Performs validations related to the `description` attribute. + # + def _validate_description(d) + if d == spec.summary + results.add_warning('description', 'The description is equal to' \ + ' the summary.') + end + + if d.strip.empty? + results.add_error('description', 'The description is empty.') + elsif spec.summary && d.length < spec.summary.length + results.add_warning('description', 'The description is shorter ' \ + 'than the summary.') + end + end + + # Performs validations related to the `homepage` attribute. + # + def _validate_homepage(h) + return unless h.is_a?(String) + if h =~ %r{http://EXAMPLE} + results.add_warning('homepage', 'The homepage has not been updated' \ + ' from default') + end + end + + # Performs validations related to the `frameworks` attribute. + # + def _validate_frameworks(frameworks) + if frameworks_invalid?(frameworks) + results.add_error('frameworks', 'A framework should only be' \ + ' specified by its name') + end + end + + # Performs validations related to the `weak frameworks` attribute. + # + def _validate_weak_frameworks(frameworks) + if frameworks_invalid?(frameworks) + results.add_error('weak_frameworks', 'A weak framework should only be' \ + ' specified by its name') + end + end + + # Performs validations related to the `libraries` attribute. + # + def _validate_libraries(libs) + libs.each do |lib| + lib = lib.downcase + if lib.end_with?('.a') || lib.end_with?('.dylib') + results.add_error('libraries', 'Libraries should not include the' \ + ' extension ' \ + "(`#{lib}`)") + end + + if lib.start_with?('lib') + results.add_error('libraries', 'Libraries should omit the `lib`' \ + ' prefix ' \ + " (`#{lib}`)") + end + + if lib.include?(',') + results.add_error('libraries', 'Libraries should not include comas ' \ + "(`#{lib}`)") + end + end + end + + # Performs validations related to the `license` attribute. + # + def _validate_license(l) + type = l[:type] + file = l[:file] + if type.nil? + results.add_warning('license', 'Missing license type.') + end + if type && type.delete(' ').delete("\n").empty? + results.add_warning('license', 'Invalid license type.') + end + if type && type =~ /\(example\)/ + results.add_error('license', 'Sample license type.') + end + if file && Pathname.new(file).extname !~ /^(\.(txt|md|markdown|))?$/i + results.add_error('license', 'Invalid file type') + end + end + + # Performs validations related to the `source` attribute. + # + def _validate_source(s) + return unless s.is_a?(Hash) + if git = s[:git] + tag, commit = s.values_at(:tag, :commit) + version = spec.version.to_s + + if git =~ %r{http://EXAMPLE} + results.add_error('source', 'The Git source still contains the ' \ + 'example URL.') + end + if commit && commit.downcase =~ /head/ + results.add_error('source', 'The commit of a Git source cannot be' \ + ' `HEAD`.') + end + if tag && !tag.to_s.include?(version) + results.add_warning('source', 'The version should be included in' \ + ' the Git tag.') + end + if tag.nil? + results.add_warning('source', 'Git sources should specify a tag.', true) + end + end + + perform_github_source_checks(s) + check_git_ssh_source(s) + end + + # Performs validations related to the `deprecated_in_favor_of` attribute. + # + def _validate_deprecated_in_favor_of(d) + if spec.root.name == Specification.root_name(d) + results.add_error('deprecated_in_favor_of', 'a spec cannot be ' \ + 'deprecated in favor of itself') + end + end + + # Performs validations related to the `test_type` attribute. + # + def _validate_test_type(t) + supported_test_types = Specification::DSL::SUPPORTED_TEST_TYPES.map(&:to_s) + unless supported_test_types.include?(t.to_s) + results.add_error('test_type', "The test type `#{t}` is not supported. " \ + "Supported test type values are #{supported_test_types}.") + end + end + + def _validate_app_host_name(n) + unless consumer.requires_app_host? + results.add_error('app_host_name', '`requires_app_host` must be set to ' \ + '`true` when `app_host_name` is specified.') + end + + unless consumer.dependencies.map(&:name).include?(n) + results.add_error('app_host_name', "The app host name (#{n}) specified by `#{consumer.spec.name}` could " \ + 'not be found. You must explicitly declare a dependency on that app spec.') + end + end + + # Performs validations related to the `script_phases` attribute. + # + def _validate_script_phases(s) + s.each do |script_phase| + keys = script_phase.keys + unrecognized_keys = keys - Specification::ALL_SCRIPT_PHASE_KEYS + unless unrecognized_keys.empty? + results.add_error('script_phases', "Unrecognized option(s) `#{unrecognized_keys.join(', ')}` in script phase `#{script_phase[:name]}`. " \ + "Available options are `#{Specification::ALL_SCRIPT_PHASE_KEYS.join(', ')}`.") + end + missing_required_keys = Specification::SCRIPT_PHASE_REQUIRED_KEYS - keys + unless missing_required_keys.empty? + results.add_error('script_phases', "Missing required shell script phase options `#{missing_required_keys.join(', ')}` in script phase `#{script_phase[:name]}`.") + end + unless Specification::EXECUTION_POSITION_KEYS.include?(script_phase[:execution_position]) + results.add_error('script_phases', "Invalid execution position value `#{script_phase[:execution_position]}` in shell script `#{script_phase[:name]}`. " \ + "Available options are `#{Specification::EXECUTION_POSITION_KEYS.join(', ')}`.") + end + end + end + + # Performs validations related to the `on_demand_resources` attribute. + # + def _validate_on_demand_resources(h) + h.values.each do |value| + unless Specification::ON_DEMAND_RESOURCES_CATEGORY_KEYS.include?(value[:category]) + results.add_error('on_demand_resources', "Invalid on demand resources category value `#{value[:category]}`. " \ + "Available options are `#{Specification::ON_DEMAND_RESOURCES_CATEGORY_KEYS.join(', ')}`.") + end + end + end + + # Performs validation related to the `scheme` attribute. + # + def _validate_scheme(s) + unless s.empty? + if consumer.spec.subspec? && consumer.spec.library_specification? + results.add_error('scheme', 'Scheme configuration is not currently supported for subspecs.') + return + end + if s.key?(:launch_arguments) && !s[:launch_arguments].is_a?(Array) + results.add_error('scheme', 'Expected an array for key `launch_arguments`.') + end + if s.key?(:environment_variables) && !s[:environment_variables].is_a?(Hash) + results.add_error('scheme', 'Expected a hash for key `environment_variables`.') + end + if s.key?(:code_coverage) && ![true, false].include?(s[:code_coverage]) + results.add_error('scheme', 'Expected a boolean for key `code_coverage`.') + end + end + end + + # Performs validations related to github sources. + # + def perform_github_source_checks(s) + require 'uri' + + if git = s[:git] + return unless git =~ /^#{URI.regexp}$/ + git_uri = URI.parse(git) + if git_uri.host + perform_github_uri_checks(git, git_uri) if git_uri.host.end_with?('github.com') + end + end + end + + def perform_github_uri_checks(git, git_uri) + if git_uri.host.start_with?('www.') + results.add_warning('github_sources', 'Github repositories should ' \ + 'not use `www` in their URL.') + end + unless git.end_with?('.git') + results.add_warning('github_sources', 'Github repositories ' \ + 'should end in `.git`.') + end + unless git_uri.scheme == 'https' + results.add_warning('github_sources', 'Github repositories ' \ + 'should use an `https` link.', true) + end + end + + # Performs validations related to SSH sources + # + def check_git_ssh_source(s) + if git = s[:git] + if git =~ %r{\w+\@(\w|\.)+\:(/\w+)*} + results.add_warning('source', 'Git SSH URLs will NOT work for ' \ + 'people behind firewalls configured to only allow HTTP, ' \ + 'therefore HTTPS is preferred.', true) + end + end + end + + # Performs validations related to the `social_media_url` attribute. + # + def _validate_social_media_url(s) + if s =~ %r{https://twitter.com/EXAMPLE} + results.add_warning('social_media_url', 'The social media URL has ' \ + 'not been updated from the default.') + end + end + + # Performs validations related to the `readme` attribute. + # + def _validate_readme(s) + if s =~ %r{https://www.example.com/README} + results.add_warning('readme', 'The readme has ' \ + 'not been updated from the default.') + end + end + + # Performs validations related to the `changelog` attribute. + # + def _validate_changelog(s) + if s =~ %r{https://www.example.com/CHANGELOG} + results.add_warning('changelog', 'The changelog has ' \ + 'not been updated from the default.') + end + end + + # @param [Hash,Object] value + # + def _validate_info_plist(value) + return if value.empty? + if consumer.spec.subspec? && consumer.spec.library_specification? + results.add_error('info_plist', 'Info.plist configuration is not currently supported for subspecs.') + end + end + + #-----------------------------------------------------------------------# + + # @!group All specs validation helpers + + private + + # Performs validations related to the `compiler_flags` attribute. + # + def _validate_compiler_flags(flags) + if flags.join(' ').split(' ').any? { |flag| flag.start_with?('-Wno') } + results.add_warning('compiler_flags', 'Warnings must not be disabled' \ + '(`-Wno compiler` flags).') + end + end + + # Returns whether the frameworks are valid + # + # @param frameworks [Array] + # The frameworks to be validated + # + # @return [Boolean] true if a framework contains any + # non-alphanumeric character or includes an extension. + # + def frameworks_invalid?(frameworks) + frameworks.any? do |framework| + framework_regex = /[^\w\-\+]/ + framework =~ framework_regex + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/analyzer.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/analyzer.rb new file mode 100644 index 0000000..a3478a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/analyzer.rb @@ -0,0 +1,218 @@ +require 'cocoapods-core/specification/linter/result' + +module Pod + class Specification + class Linter + class Analyzer + def initialize(consumer, results) + @consumer = consumer + @results = results + @results.consumer = @consumer + end + + # Analyzes the consumer adding a {Result} for any failed check to + # the {#results} object. + # + # @return [Results] the results of the analysis. + # + def analyze + check_attributes + validate_file_patterns + check_if_spec_is_empty + results + end + + private + + attr_reader :consumer + + attr_reader :results + + # @return [Array] Keys that are valid but have been deprecated. + # + DEPRECATED_KEYS = ['swift_version'].freeze + + # @return [Array] Keys that are only used for internal purposes. + # + INTERNAL_KEYS = ['configuration_pod_whitelist'].freeze + + # Checks the attributes hash for any unknown key which might be the + # result of a misspelling in a JSON file. + # + # @note Sub-keys are not checked per-platform as + # there is no attribute supporting this combination. + # + # @note The keys of sub-keys are not checked as they are only used by + # the `source` attribute and they are subject + # to change according to the support in the + # `cocoapods-downloader` gem. + # + def check_attributes + attributes_keys = Pod::Specification::DSL.attributes.keys.map(&:to_s) + platform_keys = Specification::DSL::PLATFORMS.map(&:to_s) + valid_keys = attributes_keys + platform_keys + DEPRECATED_KEYS + INTERNAL_KEYS + attributes_hash = consumer.spec.attributes_hash + keys = attributes_hash.keys + Specification::DSL::PLATFORMS.each do |platform| + if attributes_hash[platform.to_s] + keys += attributes_hash[platform.to_s].keys + end + end + unknown_keys = keys - valid_keys + + unknown_keys.each do |key| + results.add_warning('attributes', "Unrecognized `#{key}` key.") + end + + Pod::Specification::DSL.attributes.each do |_key, attribute| + declared_value = consumer.spec.attributes_hash[attribute.name.to_s] + validate_attribute_occurrence(attribute, declared_value) + validate_attribute_type(attribute, declared_value) + if attribute.name != :platforms + value = value_for_attribute(attribute) + validate_attribute_value(attribute, value) if value + end + end + end + + # Checks the attributes that represent file patterns. + # + # @todo Check the attributes hash directly. + # + def validate_file_patterns + attributes = DSL.attributes.values.select(&:file_patterns?) + attributes.each do |attrb| + patterns = consumer.send(attrb.name) + + if patterns.is_a?(Hash) + patterns = patterns.values.flatten(1) + end + + if patterns.respond_to?(:each) + patterns.each do |pattern| + pattern = pattern[:paths].join if attrb.name == :on_demand_resources + if pattern.respond_to?(:start_with?) && pattern.start_with?('/') + results.add_error('File Patterns', 'File patterns must be ' \ + "relative and cannot start with a slash (#{attrb.name}).") + end + end + end + end + end + + # Check empty subspec attributes + # + def check_if_spec_is_empty + methods = %w( source_files on_demand_resources resources resource_bundles preserve_paths + dependencies vendored_libraries vendored_frameworks ) + empty_patterns = methods.all? { |m| consumer.send(m).empty? } + empty = empty_patterns && consumer.spec.subspecs.empty? + if empty + results.add_error('File Patterns', "The #{consumer.spec} spec is " \ + 'empty (no source files, resources, resource_bundles, ' \ + 'preserve paths, vendored_libraries, vendored_frameworks, ' \ + 'dependencies, nor subspecs).') + end + end + + private + + # Returns the own or inherited (if applicable) value of the + # given attribute. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + # + # @return [mixed] + # + def value_for_attribute(attribute) + if attribute.root_only? + consumer.spec.send(attribute.name) + else + consumer.send(attribute.name) if consumer.respond_to?(attribute.name) + end + rescue => e + results.add_error('attributes', "Unable to validate `#{attribute.name}` (#{e}).") + nil + end + + # Validates that root attributes don't occur in subspecs. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + + # @param [Object] value + # The value of the attribute. + # + def validate_attribute_occurrence(attribute, value) + if attribute.root_only? && !value.nil? && !consumer.spec.root? + results.add_error('attributes', "Can't set `#{attribute.name}` attribute for " \ + "subspecs (in `#{consumer.spec.name}`).") + end + if attribute.test_only? && !value.nil? && !consumer.spec.test_specification? + results.add_error('attributes', "Attribute `#{attribute.name}` can only be set " \ + "within test specs (in `#{consumer.spec.name}`).") + end + end + + # Validates the given value for the given attribute. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + # + # @param [Object] value + # The value of the attribute. + # + def validate_attribute_value(attribute, value) + if attribute.keys.is_a?(Array) + validate_attribute_array_keys(attribute, value) + elsif attribute.keys.is_a?(Hash) + validate_attribute_hash_keys(attribute, value) + end + end + + def validate_attribute_type(attribute, value) + return unless value + types = attribute.supported_types + if types.none? { |klass| value.class == klass } + results.add_error('attributes', 'Unacceptable type ' \ + "`#{value.class}` for `#{attribute.name}`. Allowed values: `#{types.inspect}`.") + end + end + + def validate_attribute_array_keys(attribute, value) + unknown_keys = value.keys.map(&:to_s) - attribute.keys.map(&:to_s) + unknown_keys.each do |unknown_key| + results.add_warning('keys', "Unrecognized `#{unknown_key}` key for " \ + "`#{attribute.name}` attribute.") + end + end + + def validate_attribute_hash_keys(attribute, value) + unless value.is_a?(Hash) + results.add_error(attribute.name, "Unsupported type `#{value.class}`, expected `Hash`") + return + end + major_keys = value.keys & attribute.keys.keys + if major_keys.count.zero? + results.add_warning('keys', "Missing primary key for `#{attribute.name}` " \ + 'attribute. The acceptable ones are: ' \ + "`#{attribute.keys.keys.map(&:to_s).sort.join(', ')}`.") + elsif major_keys.count == 1 + acceptable = attribute.keys[major_keys.first] || [] + unknown = value.keys - major_keys - acceptable + unless unknown.empty? + results.add_warning('keys', "Incompatible `#{unknown.sort.join(', ')}` " \ + "key(s) with `#{major_keys.first}` primary key for " \ + "`#{attribute.name}` attribute.") + end + else + sorted_keys = major_keys.map(&:to_s).sort + results.add_warning('keys', "Incompatible `#{sorted_keys.join(', ')}` " \ + "keys for `#{attribute.name}` attribute.") + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/result.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/result.rb new file mode 100644 index 0000000..1843a97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/linter/result.rb @@ -0,0 +1,128 @@ +module Pod + class Specification + class Linter + class Results + public + + class Result + # @return [Symbol] the type of result. + # + attr_reader :type + + # @return[String] the name of the attribute associated with result. + # + attr_reader :attribute_name + + # @return [String] the message associated with result. + # + attr_reader :message + + # @return [Boolean] whether the result only applies to public specs. + # + attr_reader :public_only + alias_method :public_only?, :public_only + + # @param [Symbol] type @see type + # @param [String] message @see message + # + def initialize(type, attribute_name, message, public_only = false) + @type = type + @attribute_name = attribute_name + @message = message + @public_only = public_only + @platforms = [] + end + + # @return [Array] the platforms where this result was + # generated. + # + attr_reader :platforms + + # @return [String] a string representation suitable for UI output. + # + def to_s + r = "[#{type.to_s.upcase}] [#{attribute_name}] #{message}" + if platforms != Specification::PLATFORMS + platforms_names = platforms.uniq.map do |p| + Platform.string_name(p) + end + r << " [#{platforms_names * ' - '}]" unless platforms.empty? + end + r + end + end + + def initialize + @results = [] + @consumer = nil + end + + include Enumerable + + def each + results.each { |r| yield r } + end + + def empty? + results.empty? + end + + # @return [Specification::Consumer] the current consumer. + # + attr_accessor :consumer + + # Adds an error result with the given message. + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_error(attribute_name, message, public_only = false) + add_result(:error, attribute_name, message, public_only) + end + + # Adds a warning result with the given message. + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_warning(attribute_name, message, public_only = false) + add_result(:warning, attribute_name, message, public_only) + end + + private + + # @return [Array] all of the generated results. + # + attr_reader :results + + # Adds a result of the given type with the given message. If there is a + # current platform it is added to the result. If a result with the same + # type and the same message is already available the current platform is + # added to the existing result. + # + # @param [Symbol] type + # The type of the result (`:error`, `:warning`). + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_result(type, attribute_name, message, public_only) + result = results.find do |r| + r.type == type && r.attribute_name == attribute_name && r.message == message && r.public_only? == public_only + end + unless result + result = Result.new(type, attribute_name, message, public_only) + results << result + end + result.platforms << @consumer.platform_name if @consumer + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/root_attribute_accessors.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/root_attribute_accessors.rb new file mode 100644 index 0000000..7cbbb54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/root_attribute_accessors.rb @@ -0,0 +1,226 @@ +module Pod + class Specification + module DSL + # Provides the accessors methods for the root attributes. Root attributes + # do not support multi-platform values and inheritance. + # + module RootAttributesAccessors + # @return [String] The name of the specification *not* including the + # names of the parents, in case of ‘sub-specifications’. + # + def base_name + attributes_hash['name'] + end + + # @return [String] The name of the specification including the names of + # the parents, in case of ‘sub-specifications’. + # + def name + parent ? "#{parent.name}/#{base_name}" : base_name + end + + # @return [Bool, String, Array] The requires_arc value. + # + def requires_arc + attributes_hash['requires_arc'] + end + + # @return [Version] The version of the Pod. + # + def version + if root? + @version ||= Version.new(attributes_hash['version']) + else + @version ||= root.version + end + end + + # @deprecated in favor of #swift_versions + # + # @return [Version] The Swift version specified by the specification. + # + def swift_version + swift_versions.last + end + + # @return [Array] The Swift versions supported by the specification. + # + def swift_versions + @swift_versions ||= begin + swift_versions = Array(attributes_hash['swift_versions']).dup + # Pre 1.7.0, the DSL was singularized as it supported only a single version of Swift. In 1.7.0 the DSL + # is now pluralized always and a specification can support multiple versions of Swift. This ensures + # we parse the old JSON serialized format and include it as part of the Swift versions supported. + swift_versions << attributes_hash['swift_version'] unless attributes_hash['swift_version'].nil? + swift_versions.map { |swift_version| Version.new(swift_version) }.uniq.sort + end + end + + # @return [Requirement] The CocoaPods version required to use the specification. + # + def cocoapods_version + @cocoapods_version ||= Requirement.create(attributes_hash['cocoapods_version']) + end + + # @return [Hash] a hash containing the authors as the keys and their + # email address as the values. + # + # @note The value is coerced to a hash with a nil email if needed. + # + # @example Possible values + # + # { 'Author' => 'email@host.com' } + # [ 'Author', { 'Author_2' => 'email@host.com' } ] + # [ 'Author', 'Author_2' ] + # 'Author' + # + def authors + authors = attributes_hash['authors'] + if authors.is_a?(Hash) + authors + elsif authors.is_a?(Array) + result = {} + authors.each do |name_or_hash| + if name_or_hash.is_a?(String) + result[name_or_hash] = nil + else + result.merge!(name_or_hash) + end + end + result + elsif authors.is_a?(String) + { authors => nil } + end + end + + # @return [String] The social media URL. + # + def social_media_url + attributes_hash['social_media_url'] + end + + # @return [String] The readme. + # + def readme + attributes_hash['readme'] + end + + # @return [String] The changelog. + # + def changelog + attributes_hash['changelog'] + end + + # @return [Hash] A hash containing the license information of the Pod. + # + # @note The indentation is stripped from the license text. + # + def license + license = attributes_hash['license'] + if license.is_a?(String) + { :type => license } + elsif license.is_a?(Hash) + license = Specification.convert_keys_to_symbol(license) + license[:text] = license[:text].strip_heredoc if license[:text] + license + else + {} + end + end + + # @return [String] The URL of the homepage of the Pod. + # + def homepage + attributes_hash['homepage'] + end + + # @return [Hash{Symbol=>String}] The location from where the library + # should be retrieved. + # + def source + value = attributes_hash['source'] + if value && value.is_a?(Hash) + Specification.convert_keys_to_symbol(value) + else + value + end + end + + # @return [String] A short description of the Pod. + # + def summary + summary = attributes_hash['summary'] + summary.strip_heredoc.chomp if summary + end + + # @return [String] A longer description of the Pod. + # + # @note The indentation is stripped from the description. + # + def description + description = attributes_hash['description'] + description.strip_heredoc.chomp if description + end + + # @return [Array] The list of the URL for the screenshots of + # the Pod. + # + # @note The value is coerced to an array. + # + def screenshots + value = attributes_hash['screenshots'] + [*value] + end + + # @return [String, Nil] The documentation URL of the Pod if specified. + # + def documentation_url + attributes_hash['documentation_url'] + end + + # @return [String, Nil] The prepare command of the Pod if specified. + # + def prepare_command + command = attributes_hash['prepare_command'] + command.strip_heredoc.chomp if command + end + + # @return [Bool] Indicates, that if use_frameworks! is specified, the + # framework should include a static library. + # + def static_framework + attributes_hash['static_framework'] + end + + # @return [Bool] Whether the Pod has been deprecated. + # + def deprecated + attributes_hash['deprecated'] + end + + # @return [String] The name of the Pod that this one has been + # deprecated in favor of. + # + def deprecated_in_favor_of + attributes_hash['deprecated_in_favor_of'] + end + + # @return [Bool] Wether the pod is deprecated either in favor of some other + # pod or simply deprecated. + # + def deprecated? + deprecated || !deprecated_in_favor_of.nil? + end + + # @return [String, Nil] The custom module map file of the Pod, + # if specified. + # + def module_map + attributes_hash['module_map'] + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set.rb new file mode 100644 index 0000000..397e60e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set.rb @@ -0,0 +1,177 @@ +require 'active_support/core_ext/array/conversions' +require 'cocoapods-core/specification/set/presenter' + +module Pod + class Specification + # A Specification::Set is responsible of handling all the specifications of + # a Pod. This class stores the information of the dependencies that required + # a Pod in the resolution process. + # + # @note The order in which the sets are provided is used to select a + # specification if multiple are available for a given version. + # + # @note The set class is not and should be not aware of the backing store + # of a Source. + # + class Set + # @return [String] the name of the Pod. + # + attr_reader :name + + # @return [Array] the sources that contain the specifications for + # the available versions of a Pod. + # + attr_reader :sources + + # @param [String] name + # the name of the Pod. + # + # @param [Array,Source] sources + # the sources that contain a Pod. + # + def initialize(name, sources = []) + @name = name + @sources = Array(sources) + end + + # @return [Specification] the top level specification of the Pod for the + # {#required_version}. + # + # @note If multiple sources have a specification for the + # {#required_version}, the order in which they are provided + # is used to disambiguate. + # + def specification + unless highest_version_spec_path + raise Informative, "Could not find the highest version for `#{name}`. "\ + "This could be due to an empty #{name} directory in a local repository." + end + + Specification.from_file(highest_version_spec_path) + end + + # @return [Specification] the top level specification for this set for any version. + # + def specification_name + versions_by_source.each do |source, versions| + next unless version = versions.first + return source.specification(name, version).name + end + nil + end + + # @return [Array] the paths to specifications for the given + # version + # + def specification_paths_for_version(version) + sources = @sources.select { |source| versions_by_source[source].include?(version) } + sources.map { |source| source.specification_path(name, version) } + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + def versions + @versions ||= versions_by_source.values.flatten.uniq.sort.reverse + end + + # @return [Version] The highest version known of the specification. + # + def highest_version + versions.first + end + + # @return [Pathname] The path of the highest version. + # + # @note If multiple sources have a specification for the + # {#required_version}, the order in which they are provided + # is used to disambiguate. + # + def highest_version_spec_path + @highest_version_spec_path ||= specification_paths_for_version(highest_version).first + end + + # @return [Hash{Source => Version}] all the available versions for the + # Pod grouped by source. + # + def versions_by_source + @versions_by_source ||= sources.each_with_object({}) do |source, result| + result[source] = source.versions(name) + end + end + + def ==(other) + self.class == other.class && + @name == other.name && + @sources.map(&:name) == other.sources.map(&:name) + end + + def to_s + "#<#{self.class.name} for `#{name}' available at `#{sources.map(&:name).join(', ')}'>" + end + alias_method :inspect, :to_s + + # Returns a hash representation of the set composed by dumb data types. + # + # @example + # + # "name" => "CocoaLumberjack", + # "versions" => { "master" => [ "1.6", "1.3.3"] }, + # "highest_version" => "1.6", + # "highest_version_spec" => 'REPO/CocoaLumberjack/1.6/CocoaLumberjack.podspec' + # + # @return [Hash] The hash representation. + # + def to_hash + versions = versions_by_source.reduce({}) do |memo, (source, version)| + memo[source.name] = version.map(&:to_s) + memo + end + { + 'name' => name, + 'versions' => versions, + 'highest_version' => highest_version.to_s, + 'highest_version_spec' => highest_version_spec_path.to_s, + } + end + + #-----------------------------------------------------------------------# + + # The Set::External class handles Pods from external sources. Pods from + # external sources don't use the {Source} and are initialized by a given + # specification. + # + # @note External sources *don't* support subspecs. + # + class External < Set + attr_reader :specification + + def initialize(spec) + @specification = spec.root + super(@specification.name) + end + + def ==(other) + self.class == other.class && specification == other.specification + end + + def versions + [specification.version] + end + end + + #-----------------------------------------------------------------------# + + # The Set::Head class handles Pods in head mode. Pods in head + # mode don't use the {Source} and are initialized by a given + # specification. + # + class Head < External + def initialize(spec) + super + specification.version.head = true + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set/presenter.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set/presenter.rb new file mode 100644 index 0000000..3f7dd58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/specification/set/presenter.rb @@ -0,0 +1,203 @@ +require 'active_support/core_ext/array/conversions' + +module Pod + class Specification + class Set + # Provides support for presenting a Pod described by a {Set} in a + # consistent way across clients of CocoaPods-Core. + # + class Presenter + # @return [Set] the set that should be presented. + # + attr_reader :set + + # @param [Set] set @see #set. + # + def initialize(set) + @set = set + end + + #---------------------------------------------------------------------# + + # @!group Set Information + + # @return [String] the name of the Pod. + # + def name + @set.name + end + + # @return [Version] the highest version of available for the Pod. + # + def version + @set.versions.first + end + + # @return [Array] all the versions available ascending + # order. + # + def versions + @set.versions + end + + # @return [String] all the versions available sorted from the highest + # to the lowest. + # + # @example Return example + # + # "1.5pre, 1.4 [master repo] - 1.4 [test_repo repo]" + # + # @note This method orders the sources by name. + # + def versions_by_source + result = [] + versions_by_source = @set.versions_by_source + @set.sources.sort.each do |source| + versions = versions_by_source[source] + result << "#{versions.map(&:to_s) * ', '} [#{source.name} repo]" + end + result * ' - ' + end + + # @return [Array] The name of the sources that contain the Pod + # sorted alphabetically. + # + def sources + @set.sources.map(&:name).sort + end + + #---------------------------------------------------------------------# + + # @!group Specification Information + + # @return [Specification] the specification of the {Set}. If no + # versions requirements where passed to the set it returns the + # highest available version. + # + def spec + @spec ||= @set.specification + end + + # @return [String] the list of the authors of the Pod in sentence + # format. + # + # @example Output example + # + # "Author 1, Author 2 and Author 3" + # + def authors + return '' unless spec.authors + spec.authors.keys.to_sentence + end + + # @return [String] the homepage of the pod. + # + def homepage + spec.homepage + end + + # @return [String] a short description, expected to be 140 characters + # long of the Pod. + # + def summary + spec.summary + end + + # @return [String] the description of the Pod, if no description is + # available the summary is returned. + # + def description + spec.description || spec.summary + end + + # @return [String] A string that describes the deprecation of the pod. + # If the pod is deprecated in favor of another pod it will contain + # information about that. If the pod is not deprecated returns nil. + # + # @example Output example + # + # "[DEPRECATED]" + # "[DEPRECATED in favor of NewAwesomePod]" + # + def deprecation_description + if spec.deprecated? + description = '[DEPRECATED' + description += if spec.deprecated_in_favor_of.nil? + ']' + else + " in favor of #{spec.deprecated_in_favor_of}]" + end + + description + end + end + + # @return [String] the URL of the source of the Pod. + # + def source_url + url_keys = [:git, :svn, :http, :hg, :path] + key = spec.source.keys.find { |k| url_keys.include?(k) } + key ? spec.source[key] : 'No source url' + end + + # @return [String] the platforms supported by the Pod. + # + # @example + # + # "iOS" + # "iOS - OS X" + # + def platform + sorted_platforms = spec.available_platforms.sort do |a, b| + a.to_s.downcase <=> b.to_s.downcase + end + sorted_platforms.join(' - ') + end + + # @return [String] the type of the license of the Pod. + # + # @example + # + # "MIT" + # + def license + spec.license[:type] if spec.license + end + + # @return [Array] an array containing all the subspecs of the Pod. + # + def subspecs + (spec.recursive_subspecs.any? && spec.recursive_subspecs) || nil + end + + #---------------------------------------------------------------------# + + # @!group Statistics + + # @return [Integer] the GitHub likes of the repo of the Pod. + # + def github_stargazers + github_metrics['stargazers'] + end + + # @return [Integer] the GitHub forks of the repo of the Pod. + # + def github_forks + github_metrics['forks'] + end + + #---------------------------------------------------------------------# + + # @!group Private Helpers + + def metrics + @metrics ||= Metrics.pod(name) || {} + end + + def github_metrics + metrics['github'] || {} + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/standard_error.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/standard_error.rb new file mode 100644 index 0000000..bdd9405 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/standard_error.rb @@ -0,0 +1,108 @@ +module Pod + # Namespaces all the errors raised by CocoaPods. + # + class StandardError < ::StandardError; end + + #-------------------------------------------------------------------------# + + # Wraps an exception raised by a DSL file in order to show to the user the + # contents of the line that raised the exception. + # + class DSLError < Informative + # @return [String] the description that should be presented to the user. + # + attr_reader :description + + # @return [String] the path of the dsl file that raised the exception. + # + attr_reader :dsl_path + + # @return [Exception] the exception raised by the + # evaluation of the dsl file. + # + attr_reader :underlying_exception + + # @param [Exception] underlying_exception @see underlying_exception + # @param [String] dsl_path @see dsl_path + # + def initialize(description, dsl_path, underlying_exception, contents = nil) + @description = description + @dsl_path = dsl_path + @underlying_exception = underlying_exception + @contents = contents + end + + # @return [String] the contents of the DSL that cause the exception to + # be raised. + # + def contents + @contents ||= begin + dsl_path && File.exist?(dsl_path) && File.read(dsl_path) + end + end + + # The message of the exception reports the content of podspec for the + # line that generated the original exception. + # + # @example Output + # + # Invalid podspec at `RestKit.podspec` - undefined method + # `exclude_header_search_paths=' for # + # + # from spec-repos/master/RestKit/0.9.3/RestKit.podspec:36 + # ------------------------------------------- + # # because it would break: #import + # > ns.exclude_header_search_paths = 'Code/RestKit.h' + # end + # ------------------------------------------- + # + # @return [String] the message of the exception. + # + def message + @message ||= begin + trace_line, description = parse_line_number_from_description + + m = "\n[!] #{description}.\n" + m = m.red if m.respond_to?(:red) + + backtrace = underlying_exception.backtrace + return m unless backtrace && dsl_path && contents + + trace_line = backtrace.find { |l| l.include?(dsl_path.to_s) } || trace_line + return m unless trace_line + line_numer = trace_line.split(':')[1].to_i - 1 + return m unless line_numer + + lines = contents.lines + indent = ' # ' + indicator = indent.tr('#', '>') + first_line = (line_numer.zero?) + last_line = (line_numer == (lines.count - 1)) + + m << "\n" + m << "#{indent}from #{trace_line.gsub(/:in.*$/, '')}\n" + m << "#{indent}-------------------------------------------\n" + m << "#{indent}#{lines[line_numer - 1]}" unless first_line + m << "#{indicator}#{lines[line_numer]}" + m << "#{indent}#{lines[line_numer + 1]}" unless last_line + m << "\n" unless m.end_with?("\n") + m << "#{indent}-------------------------------------------\n" + end + end + + private + + def parse_line_number_from_description + description = self.description + if dsl_path && description =~ /((#{Regexp.quote File.expand_path(dsl_path)}|#{Regexp.quote dsl_path.to_s}):\d+)/ + trace_line = Regexp.last_match[1] + description = description.sub(/#{Regexp.quote trace_line}:\s*/, '') + if description =~ /^\s*\^\z/ + description = description.lines[0..-3].join.chomp + end + end + [trace_line, description] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/trunk_source.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/trunk_source.rb new file mode 100644 index 0000000..ce7a268 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/trunk_source.rb @@ -0,0 +1,14 @@ +module Pod + class TrunkSource < CDNSource + # On-disk master repo name + TRUNK_REPO_NAME = 'trunk'.freeze + + # Remote CDN repo URL + TRUNK_REPO_URL = 'https://cdn.cocoapods.org/'.freeze + + def url + @url ||= TRUNK_REPO_URL + super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor.rb new file mode 100644 index 0000000..b1656b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor.rb @@ -0,0 +1,50 @@ +module Pod + # Namespaces the vendored modules. + # + module Vendor + # Namespaces the classes of RubyGems used by CocoaPods. + # + # CocoaPods needs to vendor RubyGems because OS X ships with `v1.3.6` which + # has a couple of bugs related to the comparison of pre-release versions. + # + # E.g. https://github.com/CocoaPods/CocoaPods/issues/398 + # + # The following classes are copied from RubyGems `v2.6.3`. The changes + # performed to the source files are the following: + # + # - Namespaced in `Pod::Vendor` + # - commented all the `require` calls + # - replaced `::Gem` with `Pod::Vendor::Gem` + # + module Gem + require 'cocoapods-core/vendor/version' + require 'cocoapods-core/vendor/requirement' + + #-----------------------------------------------------------------------# + # RubyGems License # + # https://github.com/rubygems/rubygems/blob/master/MIT.txt # + #-----------------------------------------------------------------------# + + # Copyright (c) Chad Fowler, Rich Kilmer, Jim Weirich and others. + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # 'Software'), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/requirement.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/requirement.rb new file mode 100644 index 0000000..d9ee99f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/requirement.rb @@ -0,0 +1,288 @@ +# frozen_string_literal: true +module Pod::Vendor + + # require "rubygems/version" + # require "rubygems/deprecate" + + # If we're being loaded after yaml was already required, then + # load our yaml + workarounds now. + # Gem.load_yaml if defined? ::YAML + + ## + # A Requirement is a set of one or more version restrictions. It supports a + # few (=, !=, >, <, >=, <=, ~>) different restriction operators. + # + # See Gem::Version for a description on how versions and requirements work + # together in RubyGems. + + class Gem::Requirement + OPS = { #:nodoc: + "=" => lambda { |v, r| v == r }, + "!=" => lambda { |v, r| v != r }, + ">" => lambda { |v, r| v > r }, + "<" => lambda { |v, r| v < r }, + ">=" => lambda { |v, r| v >= r }, + "<=" => lambda { |v, r| v <= r }, + "~>" => lambda { |v, r| v >= r && v.release < r.bump } + } + + SOURCE_SET_REQUIREMENT = Struct.new(:for_lockfile).new "!" # :nodoc: + + quoted = OPS.keys.map { |k| Regexp.quote k }.join "|" + PATTERN_RAW = "\\s*(#{quoted})?\\s*(#{Gem::Version::VERSION_PATTERN})\\s*" # :nodoc: + + ## + # A regular expression that matches a requirement + + PATTERN = /\A#{PATTERN_RAW}\z/ + + ## + # The default requirement matches any version + + DefaultRequirement = [">=", Gem::Version.new(0)] + + ## + # Raised when a bad requirement is encountered + + class BadRequirementError < ArgumentError; end + + ## + # Factory method to create a Gem::Requirement object. Input may be + # a Version, a String, or nil. Intended to simplify client code. + # + # If the input is "weird", the default version requirement is + # returned. + + def self.create input + case input + when Gem::Requirement then + input + when Gem::Version, Array then + new input + when '!' then + source_set + else + if input.respond_to? :to_str then + new [input.to_str] + else + default + end + end + end + + ## + # A default "version requirement" can surely _only_ be '>= 0'. + + def self.default + new '>= 0' + end + + ### + # A source set requirement, used for Gemfiles and lockfiles + + def self.source_set # :nodoc: + SOURCE_SET_REQUIREMENT + end + + ## + # Parse +obj+, returning an [op, version] pair. +obj+ can + # be a String or a Gem::Version. + # + # If +obj+ is a String, it can be either a full requirement + # specification, like ">= 1.2", or a simple version number, + # like "1.2". + # + # parse("> 1.0") # => [">", Gem::Version.new("1.0")] + # parse("1.0") # => ["=", Gem::Version.new("1.0")] + # parse(Gem::Version.new("1.0")) # => ["=, Gem::Version.new("1.0")] + + def self.parse obj + return ["=", obj] if Gem::Version === obj + + unless PATTERN =~ obj.to_s + raise BadRequirementError, "Illformed requirement [#{obj.inspect}]" + end + + if $1 == ">=" && $2 == "0" + DefaultRequirement + else + [$1 || "=", Gem::Version.new($2)] + end + end + + ## + # An array of requirement pairs. The first element of the pair is + # the op, and the second is the Gem::Version. + + attr_reader :requirements #:nodoc: + + ## + # Constructs a requirement from +requirements+. Requirements can be + # Strings, Gem::Versions, or Arrays of those. +nil+ and duplicate + # requirements are ignored. An empty set of +requirements+ is the + # same as ">= 0". + + def initialize *requirements + requirements = requirements.flatten + requirements.compact! + requirements.uniq! + + if requirements.empty? + @requirements = [DefaultRequirement] + else + @requirements = requirements.map! { |r| self.class.parse r } + end + end + + ## + # Concatenates the +new+ requirements onto this requirement. + + def concat new + new = new.flatten + new.compact! + new.uniq! + new = new.map { |r| self.class.parse r } + + @requirements.concat new + end + + ## + # Formats this requirement for use in a Gem::RequestSet::Lockfile. + + def for_lockfile # :nodoc: + return if [DefaultRequirement] == @requirements + + list = requirements.sort_by { |_, version| + version + }.map { |op, version| + "#{op} #{version}" + }.uniq + + " (#{list.join ', '})" + end + + ## + # true if this gem has no requirements. + + def none? + if @requirements.size == 1 + @requirements[0] == DefaultRequirement + else + false + end + end + + ## + # true if the requirement is for only an exact version + + def exact? + return false unless @requirements.size == 1 + @requirements[0][0] == "=" + end + + def as_list # :nodoc: + requirements.map { |op, version| "#{op} #{version}" }.sort + end + + def hash # :nodoc: + requirements.sort.hash + end + + def marshal_dump # :nodoc: + fix_syck_default_key_in_requirements + + [@requirements] + end + + def marshal_load array # :nodoc: + @requirements = array[0] + + fix_syck_default_key_in_requirements + end + + def yaml_initialize(tag, vals) # :nodoc: + vals.each do |ivar, val| + instance_variable_set "@#{ivar}", val + end + + Gem.load_yaml + fix_syck_default_key_in_requirements + end + + def init_with coder # :nodoc: + yaml_initialize coder.tag, coder.map + end + + def to_yaml_properties # :nodoc: + ["@requirements"] + end + + def encode_with coder # :nodoc: + coder.add 'requirements', @requirements + end + + ## + # A requirement is a prerelease if any of the versions inside of it + # are prereleases + + def prerelease? + requirements.any? { |r| r.last.prerelease? } + end + + def pretty_print q # :nodoc: + q.group 1, 'Gem::Requirement.new(', ')' do + q.pp as_list + end + end + + ## + # True if +version+ satisfies this Requirement. + + def satisfied_by? version + raise ArgumentError, "Need a Gem::Version: #{version.inspect}" unless + Gem::Version === version + # #28965: syck has a bug with unquoted '=' YAML.loading as YAML::DefaultKey + requirements.all? { |op, rv| (OPS[op] || OPS["="]).call version, rv } + end + + alias :=== :satisfied_by? + alias :=~ :satisfied_by? + + ## + # True if the requirement will not always match the latest version. + + def specific? + return true if @requirements.length > 1 # GIGO, > 1, > 2 is silly + + not %w[> >=].include? @requirements.first.first # grab the operator + end + + def to_s # :nodoc: + as_list.join ", " + end + + def == other # :nodoc: + Gem::Requirement === other and to_s == other.to_s + end + + private + + def fix_syck_default_key_in_requirements # :nodoc: + Gem.load_yaml + + # Fixup the Syck DefaultKey bug + @requirements.each do |r| + if r[0].kind_of? Gem::SyckDefaultKey + r[0] = "=" + end + end + end + end + + class Gem::Version + # This is needed for compatibility with older yaml + # gemspecs. + + Requirement = Gem::Requirement # :nodoc: + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/version.rb new file mode 100644 index 0000000..3e454ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/vendor/version.rb @@ -0,0 +1,377 @@ +# frozen_string_literal: true +module Pod::Vendor + + ## + # The Version class processes string versions into comparable + # values. A version string should normally be a series of numbers + # separated by periods. Each part (digits separated by periods) is + # considered its own number, and these are used for sorting. So for + # instance, 3.10 sorts higher than 3.2 because ten is greater than + # two. + # + # If any part contains letters (currently only a-z are supported) then + # that version is considered prerelease. Versions with a prerelease + # part in the Nth part sort less than versions with N-1 + # parts. Prerelease parts are sorted alphabetically using the normal + # Ruby string sorting rules. If a prerelease part contains both + # letters and numbers, it will be broken into multiple parts to + # provide expected sort behavior (1.0.a10 becomes 1.0.a.10, and is + # greater than 1.0.a9). + # + # Prereleases sort between real releases (newest to oldest): + # + # 1. 1.0 + # 2. 1.0.b1 + # 3. 1.0.a.2 + # 4. 0.9 + # + # If you want to specify a version restriction that includes both prereleases + # and regular releases of the 1.x series this is the best way: + # + # s.add_dependency 'example', '>= 1.0.0.a', '< 2.0.0' + # + # == How Software Changes + # + # Users expect to be able to specify a version constraint that gives them + # some reasonable expectation that new versions of a library will work with + # their software if the version constraint is true, and not work with their + # software if the version constraint is false. In other words, the perfect + # system will accept all compatible versions of the library and reject all + # incompatible versions. + # + # Libraries change in 3 ways (well, more than 3, but stay focused here!). + # + # 1. The change may be an implementation detail only and have no effect on + # the client software. + # 2. The change may add new features, but do so in a way that client software + # written to an earlier version is still compatible. + # 3. The change may change the public interface of the library in such a way + # that old software is no longer compatible. + # + # Some examples are appropriate at this point. Suppose I have a Stack class + # that supports a push and a pop method. + # + # === Examples of Category 1 changes: + # + # * Switch from an array based implementation to a linked-list based + # implementation. + # * Provide an automatic (and transparent) backing store for large stacks. + # + # === Examples of Category 2 changes might be: + # + # * Add a depth method to return the current depth of the stack. + # * Add a top method that returns the current top of stack (without + # changing the stack). + # * Change push so that it returns the item pushed (previously it + # had no usable return value). + # + # === Examples of Category 3 changes might be: + # + # * Changes pop so that it no longer returns a value (you must use + # top to get the top of the stack). + # * Rename the methods to push_item and pop_item. + # + # == RubyGems Rational Versioning + # + # * Versions shall be represented by three non-negative integers, separated + # by periods (e.g. 3.1.4). The first integers is the "major" version + # number, the second integer is the "minor" version number, and the third + # integer is the "build" number. + # + # * A category 1 change (implementation detail) will increment the build + # number. + # + # * A category 2 change (backwards compatible) will increment the minor + # version number and reset the build number. + # + # * A category 3 change (incompatible) will increment the major build number + # and reset the minor and build numbers. + # + # * Any "public" release of a gem should have a different version. Normally + # that means incrementing the build number. This means a developer can + # generate builds all day long, but as soon as they make a public release, + # the version must be updated. + # + # === Examples + # + # Let's work through a project lifecycle using our Stack example from above. + # + # Version 0.0.1:: The initial Stack class is release. + # Version 0.0.2:: Switched to a linked=list implementation because it is + # cooler. + # Version 0.1.0:: Added a depth method. + # Version 1.0.0:: Added top and made pop return nil + # (pop used to return the old top item). + # Version 1.1.0:: push now returns the value pushed (it used it + # return nil). + # Version 1.1.1:: Fixed a bug in the linked list implementation. + # Version 1.1.2:: Fixed a bug introduced in the last fix. + # + # Client A needs a stack with basic push/pop capability. They write to the + # original interface (no top), so their version constraint looks like: + # + # gem 'stack', '>= 0.0' + # + # Essentially, any version is OK with Client A. An incompatible change to + # the library will cause them grief, but they are willing to take the chance + # (we call Client A optimistic). + # + # Client B is just like Client A except for two things: (1) They use the + # depth method and (2) they are worried about future + # incompatibilities, so they write their version constraint like this: + # + # gem 'stack', '~> 0.1' + # + # The depth method was introduced in version 0.1.0, so that version + # or anything later is fine, as long as the version stays below version 1.0 + # where incompatibilities are introduced. We call Client B pessimistic + # because they are worried about incompatible future changes (it is OK to be + # pessimistic!). + # + # == Preventing Version Catastrophe: + # + # From: http://blog.zenspider.com/2008/10/rubygems-howto-preventing-cata.html + # + # Let's say you're depending on the fnord gem version 2.y.z. If you + # specify your dependency as ">= 2.0.0" then, you're good, right? What + # happens if fnord 3.0 comes out and it isn't backwards compatible + # with 2.y.z? Your stuff will break as a result of using ">=". The + # better route is to specify your dependency with an "approximate" version + # specifier ("~>"). They're a tad confusing, so here is how the dependency + # specifiers work: + # + # Specification From ... To (exclusive) + # ">= 3.0" 3.0 ... ∞ + # "~> 3.0" 3.0 ... 4.0 + # "~> 3.0.0" 3.0.0 ... 3.1 + # "~> 3.5" 3.5 ... 4.0 + # "~> 3.5.0" 3.5.0 ... 3.6 + # "~> 3" 3.0 ... 4.0 + # + # For the last example, single-digit versions are automatically extended with + # a zero to give a sensible result. + + class Gem::Version + autoload :Requirement, 'rubygems/requirement' + + include Comparable + + VERSION_PATTERN = '[0-9]+(?>\.[0-9a-zA-Z]+)*(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?' # :nodoc: + ANCHORED_VERSION_PATTERN = /\A\s*(#{VERSION_PATTERN})?\s*\z/ # :nodoc: + + ## + # A string representation of this Version. + + def version + @version.dup + end + + alias to_s version + + ## + # True if the +version+ string matches RubyGems' requirements. + + def self.correct? version + version.to_s =~ ANCHORED_VERSION_PATTERN + end + + ## + # Factory method to create a Version object. Input may be a Version + # or a String. Intended to simplify client code. + # + # ver1 = Version.create('1.3.17') # -> (Version object) + # ver2 = Version.create(ver1) # -> (ver1) + # ver3 = Version.create(nil) # -> nil + + def self.create input + if self === input then # check yourself before you wreck yourself + input + elsif input.nil? then + nil + else + new input + end + end + + @@all = {} + + def self.new version # :nodoc: + return super unless Gem::Version == self + + @@all[version] ||= super + end + + ## + # Constructs a Version from the +version+ string. A version string is a + # series of digits or ASCII letters separated by dots. + + def initialize version + raise ArgumentError, "Malformed version number string #{version}" unless + self.class.correct?(version) + + @version = version.to_s.strip.gsub("-",".pre.") + @segments = nil + end + + ## + # Return a new version object where the next to the last revision + # number is one greater (e.g., 5.3.1 => 5.4). + # + # Pre-release (alpha) parts, e.g, 5.3.1.b.2 => 5.4, are ignored. + + def bump + @bump ||= begin + segments = self.segments + segments.pop while segments.any? { |s| String === s } + segments.pop if segments.size > 1 + + segments[-1] = segments[-1].succ + self.class.new segments.join(".") + end + end + + ## + # A Version is only eql? to another version if it's specified to the + # same precision. Version "1.0" is not the same as version "1". + + def eql? other + self.class === other and @version == other._version + end + + def hash # :nodoc: + @version.hash + end + + def init_with coder # :nodoc: + yaml_initialize coder.tag, coder.map + end + + def inspect # :nodoc: + "#<#{self.class} #{version.inspect}>" + end + + ## + # Dump only the raw version string, not the complete object. It's a + # string for backwards (RubyGems 1.3.5 and earlier) compatibility. + + def marshal_dump + [version] + end + + ## + # Load custom marshal format. It's a string for backwards (RubyGems + # 1.3.5 and earlier) compatibility. + + def marshal_load array + initialize array[0] + end + + def yaml_initialize(tag, map) # :nodoc: + @version = map['version'] + @segments = nil + @hash = nil + end + + def to_yaml_properties # :nodoc: + ["@version"] + end + + def encode_with coder # :nodoc: + coder.add 'version', @version + end + + ## + # A version is considered a prerelease if it contains a letter. + + def prerelease? + unless instance_variable_defined? :@prerelease + @prerelease = !!(@version =~ /[a-zA-Z]/) + end + @prerelease + end + + def pretty_print q # :nodoc: + q.text "Gem::Version.new(#{version.inspect})" + end + + ## + # The release for this version (e.g. 1.2.0.a -> 1.2.0). + # Non-prerelease versions return themselves. + + def release + @release ||= if prerelease? + segments = self.segments + segments.pop while segments.any? { |s| String === s } + self.class.new segments.join('.') + else + self + end + end + + def segments # :nodoc: + _segments.dup + end + + ## + # A recommended version for use with a ~> Requirement. + + def approximate_recommendation + segments = self.segments + + segments.pop while segments.any? { |s| String === s } + segments.pop while segments.size > 2 + segments.push 0 while segments.size < 2 + + "~> #{segments.join(".")}" + end + + ## + # Compares this version with +other+ returning -1, 0, or 1 if the + # other version is larger, the same, or smaller than this + # one. Attempts to compare to something that's not a + # Gem::Version return +nil+. + + def <=> other + return unless Gem::Version === other + return 0 if @version == other._version + + lhsegments = _segments + rhsegments = other._segments + + lhsize = lhsegments.size + rhsize = rhsegments.size + limit = (lhsize > rhsize ? lhsize : rhsize) - 1 + + i = 0 + + while i <= limit + lhs, rhs = lhsegments[i] || 0, rhsegments[i] || 0 + i += 1 + + next if lhs == rhs + return -1 if String === lhs && Numeric === rhs + return 1 if Numeric === lhs && String === rhs + + return lhs <=> rhs + end + + return 0 + end + + protected + + def _version + @version + end + + def _segments + # segments is lazy so it can pick up version values that come from + # old marshaled versions, which don't go through marshal_load. + # since this version object is cached in @@all, its @segments should be frozen + + @segments ||= @version.scan(/[0-9]+|[a-z]+/i).map do |s| + /^\d+$/ =~ s ? s.to_i : s + end.freeze + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/version.rb new file mode 100644 index 0000000..84d7e61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/version.rb @@ -0,0 +1,239 @@ +module Pod + # The Version class stores information about the version of a + # {Specification}. + # + # It is based on the RubyGems class adapted to support head information. + # + # ### From RubyGems: + # + # The Version class processes string versions into comparable + # values. A version string should normally be a series of numbers + # separated by periods. Each part (digits separated by periods) is + # considered its own number, and these are used for sorting. So for + # instance, 3.10 sorts higher than 3.2 because ten is greater than + # two. + # + # If any part contains letters (currently only a-z are supported) then + # that version is considered prerelease. Versions with a prerelease + # part in the Nth part sort less than versions with N-1 + # parts. Prerelease parts are sorted alphabetically using the normal + # Ruby string sorting rules. If a prerelease part contains both + # letters and numbers, it will be broken into multiple parts to + # provide expected sort behavior (1.0.a10 becomes 1.0.a.10, and is + # greater than 1.0.a9). + # + # Prereleases sort between real releases (newest to oldest): + # + # 1. 1.0 + # 2. 1.0.b1 + # 3. 1.0.a.2 + # 4. 0.9 + # + class Version < Pod::Vendor::Gem::Version + # Override the constants defined by the superclass to add: + # - Semantic Versioning prerelease support (with a dash). E.g.: 1.0.0-alpha1 + # - Semantic Versioning metadata support (with a +) E.g: 1.0.0+96ef7ed + # + # For more info, see: http://semver.org + # + METADATA_PATTERN = '(\+[0-9a-zA-Z\-\.]+)' + VERSION_PATTERN = "[0-9]+(\\.[0-9a-zA-Z\\-]+)*#{METADATA_PATTERN}?" + ANCHORED_VERSION_PATTERN = /\A\s*(#{VERSION_PATTERN})*\s*\z/ + + # @param [String,Version] version + # A string representing a version, or another version. + # + def initialize(version) + raise ArgumentError, "Malformed version number string #{version}" unless + self.class.correct?(version) + + @version = version.to_s.strip + end + + # An instance that represents version 0. + # + ZERO = new('0') + + # @return [String] a string representation suitable for debugging. + # + def inspect + "<#{self.class} version=#{version}>" + end + + # @return [Boolean] indicates whether or not the version is a prerelease. + # + # @note Prerelease Pods can contain a hyphen and/or a letter (conforms to + # Semantic Versioning instead of RubyGems). + # + # For more info, see: http://semver.org + # + def prerelease? + return @prerelease if defined?(@prerelease) + comparable_version = @version.sub(/#{METADATA_PATTERN}$/, '') + @prerelease = comparable_version =~ /[a-zA-Z\-]/ + end + + # @return [Bool] Whether a string representation is correct. + # + def self.correct?(version) + version.to_s =~ ANCHORED_VERSION_PATTERN + end + + #-------------------------------------------------------------------------# + + # @!group Semantic Versioning + + SEMVER_PATTERN = "[0-9]+(\\.[0-9]+(\\.[0-9]+(-[0-9A-Za-z\\-\\.]+)?#{METADATA_PATTERN}?)?)?" + ANCHORED_SEMANTIC_VERSION_PATTERN = /\A\s*(#{SEMVER_PATTERN})*\s*\z/ + + # @return [Bool] Whether the version conforms to the Semantic Versioning + # specification (2.0.0-rc.1). + # + # @note This comparison is lenient. + # + # @note It doesn't support build identifiers. + # + def semantic? + version.to_s =~ ANCHORED_SEMANTIC_VERSION_PATTERN + end + + # @return [Fixnum] The semver major identifier. + # + def major + numeric_segments[0].to_i + end + + # @return [Fixnum] The semver minor identifier. + # + def minor + numeric_segments[1].to_i + end + + # @return [Fixnum] The semver patch identifier. + # + def patch + numeric_segments[2].to_i + end + + # Compares the versions for sorting. + # + # @param [Version] other + # The other version to compare. + # + # @return [Fixnum] -1, 0, or +1 depending on whether the receiver is less + # than, equal to, or greater than other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def <=>(other) + comparison = compare_segments(other) + comparison == 0 ? version <=> other.version : comparison + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def ==(other) + compare_segments(other) == 0 + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is greater than or equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def >=(other) + comparison = compare_segments(other) + comparison >= 0 + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is less than or equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def <=(other) + comparison = compare_segments(other) + comparison <= 0 + end + + protected + + # This overrides the Gem::Version implementation of `_segments` to drop the + # metadata from comparisons as per http://semver.org/#spec-item-10 + # + def _segments + # segments is lazy so it can pick up version values that come from + # old marshaled versions, which don't go through marshal_load. + # since this version object is cached in @@all, its @segments should be frozen + + @segments ||= @version.sub(/#{METADATA_PATTERN}$/, '').scan(/[0-9]+|[a-z]+/i).map do |s| + /^\d+$/ =~ s ? s.to_i : s + end.freeze + end + + def numeric_segments + @numeric_segments ||= segments.take_while { |s| s.is_a?(Numeric) }.reverse_each.drop_while { |s| s == 0 }.reverse + end + + def prerelease_segments + @prerelease_segments ||= segments.drop_while { |s| s.is_a?(Numeric) } + end + + def compare_segments(other) + return unless other.is_a?(Pod::Version) + return 0 if @version == other.version + + compare = proc do |segments, other_segments, is_pre_release| + limit = [segments.size, other_segments.size].max + + 0.upto(limit) do |i| + lhs = segments[i] + rhs = other_segments[i] + + next if lhs == rhs + # If it's pre-release and the first segment, then + # this is a special case because a segment missing + # means that one is not a pre-release version + if is_pre_release && i == 0 + return 1 if lhs.nil? + return -1 if rhs.nil? + else + return -1 if lhs.nil? + return 1 if rhs.nil? + end + + if comparison = lhs <=> rhs + return comparison + end + end + end + + compare[numeric_segments, other.numeric_segments, false] + compare[prerelease_segments, other.prerelease_segments, true] + 0 + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/yaml_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/yaml_helper.rb new file mode 100644 index 0000000..287f05e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-core-1.11.2/lib/cocoapods-core/yaml_helper.rb @@ -0,0 +1,323 @@ +require 'yaml' + +module Pod + # Converts objects to their YAML representation. + # + # This class was created for the need having control on how the YAML is + # representation is generated. In details it provides: + # + # - sorting for hashes in ruby 1.8.x + # - ability to hint the sorting of the keys of a dictionary when converting + # it. In this case the keys are also separated by an additional new line + # feed for readability. + # + # @note This class misses important features necessary for a correct YAML + # serialization and thus it is safe to use only for the Lockfile. + # The missing features include: + # - Strings are never quoted even when ambiguous. + # + # @todo Remove any code required solely for Ruby 1.8.x. + # + class YAMLHelper + class << self + # Returns the YAML representation of the given object. If the given object + # is a Hash, it accepts an optional hint for sorting the keys. + # + # @param [String, Symbol, Array, Hash] object + # the object to convert + # + # @param [Array] hash_keys_hint + # an array to use as a hint for sorting the keys of the object to + # convert if it is a hash. + # + # @return [String] the YAML representation of the given object. + # + def convert(value) + result = process_according_to_class(value) + result << "\n" + end + + def convert_hash(value, hash_keys_hint, line_separator = "\n") + result = process_hash(value, hash_keys_hint, line_separator) + result << "\n" + end + + # Loads a YAML string and provide more informative + # error messages in special cases like merge conflict. + # + # @param [String] yaml_string + # The YAML String to be loaded + # + # @param [Pathname] file_path + # The (optional) file path to be used for read for the YAML file + # + # @return [Hash, Array] the Ruby YAML representaton + # + def load_string(yaml_string, file_path = nil) + YAML.load(yaml_string) + rescue + if yaml_has_merge_error?(yaml_string) + raise Informative, yaml_merge_conflict_msg(yaml_string, file_path) + else + raise Informative, yaml_parsing_error_msg(yaml_string, file_path) + end + end + + # Loads a YAML file and leans on the #load_string imp + # to do error detection + # + # @param [Pathname] file_path + # The file path to be used for read for the YAML file + # + # @return [Hash, Array] the Ruby YAML representaton + # + def load_file(file_path) + load_string(File.read(file_path), file_path) + end + + #-----------------------------------------------------------------------# + + private + + # Implementation notes: + # + # - each of the methods returns a YAML partial without an ending new + # line. + # - if a partial needs to be indented is responsibility of the method + # using it. + # + # --- + + # @!group Private Helpers + + # @return [String] the YAML representation of the given object. + # + def process_according_to_class(value, hash_keys_hint = nil) + case value + when Array then process_array(value) + when Hash then process_hash(value, hash_keys_hint) + when String then process_string(value) + else YAML.dump(value, :line_width => 2**31 - 1).sub(/\A---/, '').sub(/[.]{3}\s*\Z/, '') + end.strip + end + + # Converts an array to YAML after sorting it. + # + # @param [Array] array + # the array to convert. + # + # @return [String] the YAML representation of the given object. + # + def process_array(array) + return '[]' if array.empty? + + result = sorted_array(array).map do |array_value| + processed = process_according_to_class(array_value) + case array_value + when Array, Hash + if array_value.size > 1 + processed = processed.gsub(/^.*/).to_a + head = processed.shift + processed.map { |s| " #{s}" }.prepend(head).join("\n") + else + processed + end + else + processed + end + end + "- #{result.join("\n- ").strip}" + end + + # Converts a hash to YAML after sorting its keys. Optionally accepts a + # hint for sorting the keys. + # + # @note If a hint for sorting the keys is provided the array is assumed + # to be the root object and the keys are separated by an + # additional new line feed for readability. + # + # @note If the value of a given key is a String it displayed inline, + # otherwise it is displayed below and indented. + # + # @param [Hash] hash + # the hash to convert. + # + # @return [String] the YAML representation of the given object. + # + def process_hash(hash, hash_keys_hint = nil, line_separator = "\n") + return '{}' if hash.empty? + + keys = sorted_array_with_hint(hash.keys, hash_keys_hint) + key_lines = keys.map do |key| + key_value = hash[key] + processed = process_according_to_class(key_value) + processed_key = process_according_to_class(key) + case key_value + when Hash, Array + key_partial_yaml = processed.lines.map { |line| " #{line}" } * '' + "#{processed_key}:\n#{key_partial_yaml}" + else + "#{processed_key}: #{processed}" + end + end + key_lines * line_separator + end + + # Check for merge errors in a YAML string. + # + # @param [String] yaml_string + # A YAML string to evaluate + # + # @return If a merge error was detected or not. + # + def yaml_has_merge_error?(yaml_string) + yaml_string.include?('<<<<<<< HEAD') + end + + # Error message describing that a merge conflict was found + # while parsing the YAML. + # + # @param [String] yaml + # Offending YAML + # + # @param [Pathname] path + # The (optional) offending path + # + # @return [String] The Error Message + # + def yaml_merge_conflict_msg(yaml, path = nil) + err = 'ERROR: Parsing unable to continue due ' + err += "to merge conflicts present in:\n" + err += "the file located at #{path}\n" if path + err + "#{yaml}" + end + + # Error message describing a general error took happened + # while parsing the YAML. + # + # @param [String] yaml + # Offending YAML + # + # @param [Pathname] path + # The (optional) offending path + # + # @return [String] The Error Message + # + def yaml_parsing_error_msg(yaml, path = nil) + err = 'ERROR: Parsing unable to continue due ' + err += "to parsing error:\n" + err += "contained in the file located at #{path}\n" if path + err + "#{yaml}" + end + + #-----------------------------------------------------------------------# + + # @!group Array Sorting + + # Sorts an array using another one as a sort hint. All the values of the + # hint which appear in the array will be returned respecting the order in + # the hint. If any other key is present in the original array they are + # sorted using the {#sorted_array} method. + # + # @param [Array] array + # The array which needs to be sorted. + # + # @param [Array] sort_hint + # The array which should be used to sort the keys. + # + # @return [Array] The sorted Array. + # + def sorted_array_with_hint(array, sort_hint) + if sort_hint + hinted = sort_hint & array + remaining = array - sort_hint + hinted + sorted_array(remaining) + else + sorted_array(array) + end + end + + public + + # Sorts an array according to the string representation of it values. + # This method allows to sort arrays which contains strings or hashes. + # + # @note If the value contained in the array is another Array or a Hash + # the first value of the collection is used for sorting, as this + # method is more useful, for arrays which contains a collection + # composed by one object. + # + # @todo This stuff is here only because the Lockfile intermixes strings + # and hashes for the `PODS` key. The Lockfile should be more + # consistent. + # + # @return [Array] The sorted array. + # + def sorted_array(array) + array.each_with_index.sort_by do |element, index| + [sorting_string(element), index] + end.map(&:first) + end + + private + + # Returns the string representation of a value useful for sorting. + # + # @param [String, Symbol, Array, Hash] value + # The value which needs to be sorted + # + # @return [String] A string useful to compare the value with other ones. + # + def sorting_string(value) + return '' unless value + case value + when String then value.downcase + when Symbol then sorting_string(value.to_s) + when Array then sorting_string(value.first) + when Hash then value.keys.map { |key| key.to_s.downcase }.sort.first + else raise ArgumentError, "Cannot sort #{value.inspect}" + end + end + + RESOLVED_TAGS = Regexp.union( + 'null', 'Null', 'NULL', '~', '', # resolve to null + 'true', 'True', 'TRUE', 'false', 'False', 'FALSE', # bool + 'yes', 'Yes', 'YES', 'no', 'No', 'NO', # yes/no + 'on', 'On', 'ON', 'off', 'Off', 'OFF', # no/off + /[-+]?[0-9]+/, # base 10 int + /00[0-7]+/, # base 8 int + /0x[0-9a-fA-F]+/, # base 16 int + /[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?/, # float + /[-+]?\.(inf|Inf|INF)/, # infinity + /\.(nan|NaN|NAN)/ # NaN + ) + private_constant :RESOLVED_TAGS + + INDICATOR_START_CHARS = %w(- ? : , [ ] { } # & * ! | > ' " % @ `).freeze + INDICATOR_START = /\A#{Regexp.union(INDICATOR_START_CHARS)}/.freeze + private_constant :INDICATOR_START_CHARS, :INDICATOR_START + + RESOLVED_TAGS_PATTERN = /\A#{Regexp.union(RESOLVED_TAGS)}\z/.freeze + private_constant :RESOLVED_TAGS_PATTERN + + VALID_PLAIN_SCALAR_STRING = %r{\A + [\w&&[^#{INDICATOR_START_CHARS}]] # valid first character + [\w/\ \(\)~<>=\.:`,-]* # all characters allowed after the first one + \z}ox.freeze + private_constant :VALID_PLAIN_SCALAR_STRING + + def process_string(string) + case string + when RESOLVED_TAGS_PATTERN + "'#{string}'" + when /\A\s*\z/, INDICATOR_START, /:\z/ + string.inspect + when VALID_PLAIN_SCALAR_STRING + string + else + string.inspect + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/LICENSE new file mode 100644 index 0000000..5a7b88b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015 Kyle Fuller + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/README.md new file mode 100644 index 0000000..1fde3ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/README.md @@ -0,0 +1,56 @@ +# cocoapods-deintegrate + +A CocoaPods plugin to remove and deintegrate CocoaPods from your project. +Removing all traces of CocoaPods from an Xcode project. + +## Installation + +```bash +$ [sudo] gem install cocoapods-deintegrate +``` + +## Usage + +Running `pod deintegrate` will deintegrate your Xcode project from +CocoaPods. Before running you should ensure you have a backup of your project. + +```bash +$ pod deintegrate +Deintegrating Palaver.xcodeproj +Deintegrating target Palaver +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. +Removing Pod libraries from build phase: +- libPods-Palaver.a +Deleting Pod file references from project +- libPods-Palaver.a +- libPods-PalaverTests.a +- Pods-Palaver.debug.xcconfig +- Pods-Palaver.release.xcconfig +- Pods-Palaver.ad hoc.xcconfig +- Pods-PalaverTests.debug.xcconfig +- Pods-PalaverTests.release.xcconfig +- Pods-PalaverTests.ad hoc.xcconfig +Deleted 1 `Pod` groups from project. +Deintegrating target PalaverTests +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. + +Project has been deintegrated. No traces of CocoaPods left in project. +Note: The workspace referencing the Pods project still remains. +``` + +The only things that will remains are as follows: + +- Podfile, Podfile.lock +- Workspace + +### Credits + +This CocoaPods plugin was created by [Kyle Fuller](http://kylefuller.co.uk/) +([@kylefuller](https://twitter.com/kylefuller)). + +### License + +cocoapods-deintegrate is released under the MIT license. See [LICENSE](LICENSE). + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/command/deintegrate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/command/deintegrate.rb new file mode 100644 index 0000000..d792a13 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/command/deintegrate.rb @@ -0,0 +1,53 @@ +module Pod + class Command + # @CocoaPods 1.0.0.beta.1 + # + class Deintegrate < Command + include ProjectDirectory + + self.summary = 'Deintegrate CocoaPods from your project' + self.description = <<-DESC + Deintegrate your project from CocoaPods. Removing all traces + of CocoaPods from your Xcode project. + + If no xcodeproj is specified, then a search for an Xcode project + will be made in the current directory. + DESC + self.arguments = [ + CLAide::Argument.new('XCODE_PROJECT', false), + ] + + def initialize(argv) + path = argv.shift_argument + @project_path = Pathname.new(path) if path + super + end + + def validate! + super + + unless @project_path + xcodeprojs = Pathname.glob('*.xcodeproj') + @project_path = xcodeprojs.first if xcodeprojs.size == 1 + end + + help! 'A valid Xcode project file is required.' unless @project_path + help! "#{@project_path} does not exist." unless @project_path.exist? + unless @project_path.directory? && (@project_path + 'project.pbxproj').exist? + help! "#{@project_path} is not a valid Xcode project." + end + + @project = Xcodeproj::Project.open(@project_path) + end + + def run + # We don't traverse a Podfile and try to de-intergrate each target. + # Instead, we're just deintegrating anything CP could have done to a + # project. This is so that it will clean stale, and modified projects. + deintegrator = Deintegrator.new + deintegrator.deintegrate_project(@project) + @project.save + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrate/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrate/gem_version.rb new file mode 100644 index 0000000..f73e774 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrate/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsDeintegrate + VERSION = '1.0.4'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrator.rb new file mode 100644 index 0000000..657db64 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods/deintegrator.rb @@ -0,0 +1,145 @@ +module Pod + class Deintegrator + include Config::Mixin + + FRAMEWORK_NAMES = /^(libPods.*\.a)|(Pods.*\.framework)$/i + XCCONFIG_NAMES = /^Pods.*\.xcconfig$/i + + def deintegrate_project(project) + UI.section("Deintegrating #{UI.path project.path}") do + project.native_targets.each do |target| + deintegrate_target(target) + end + end + + delete_pods_file_references(project) + remove_sandbox + + UI.puts + UI.puts('Project has been deintegrated. No traces of CocoaPods left in project.'.green) + UI.puts('Note: The workspace referencing the Pods project still remains.') + end + + def deintegrate_target(target) + UI.section("Deintegrating target `#{target.name}`") do + deintegrate_shell_script_phase(target, 'Copy Pods Resources') + deintegrate_shell_script_phase(target, 'Check Pods Manifest.lock') + deintegrate_shell_script_phase(target, 'Embed Pods Frameworks') + deintegrate_user_shell_script_phases(target) + deintegrate_pods_libraries(target) + deintegrate_configuration_file_references(target) + end + end + + def remove_sandbox + pods_directory = config.sandbox.root + if pods_directory.exist? + UI.puts("Removing #{UI.path pods_directory} directory.") + pods_directory.rmtree + end + end + + def deintegrate_pods_libraries(target) + frameworks_build_phase = target.frameworks_build_phase + + pods_build_files = frameworks_build_phase.files.select do |build_file| + build_file.display_name =~ FRAMEWORK_NAMES + end + + unless pods_build_files.empty? + UI.section('Removing Pod libraries from build phase:') do + pods_build_files.each do |build_file| + UI.puts("- #{build_file.display_name}") + if build_file.file_ref.build_files.count == 1 + build_file.file_ref.remove_from_project + end + frameworks_build_phase.remove_build_file(build_file) + end + end + end + end + + def deintegrate_user_shell_script_phases(target) + user_script_phases = target.shell_script_build_phases.select do |phase| + next unless phase.name + phase.name.start_with?('[CP-User] ') + end + + unless user_script_phases.empty? + user_script_phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{user_script_phases.count} user build phases.") + end + end + + def deintegrate_shell_script_phase(target, phase_name) + phases = target.shell_script_build_phases.select do |phase| + phase.name && phase.name =~ /#{Regexp.escape(phase_name)}\z$/ + end + + unless phases.empty? + phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{phases.count} '#{phase_name}' build phases.") + end + end + + def delete_empty_group(project, group_name) + groups = project.main_group.recursive_children_groups.select do |group| + group.name == group_name && group.children.empty? + end + + unless groups.empty? + groups.each(&:remove_from_project) + UI.puts "Deleted #{groups.count} empty `#{group_name}` groups from project." + end + end + + def deintegrate_configuration_file_references(target) + config_files = target.build_configurations.map do |config| + config_file = config.base_configuration_reference + config_file if config_file && config_file.name =~ XCCONFIG_NAMES + end.compact + unless config_files.empty? + UI.section('Deleting configuration file references') do + config_files.each do |file_reference| + UI.puts("- #{file_reference.name}") + file_reference.remove_from_project + end + end + end + end + + def delete_pods_file_references(project) + # The following implementation goes for files and empty groups so it + # should catch cases where a user has changed the structure manually. + + groups = project.main_group.recursive_children_groups + groups << project.main_group + + pod_files = groups.flat_map do |group| + group.files.select do |obj| + obj.name =~ XCCONFIG_NAMES || + obj.path =~ /^(libPods.*\.a)|(Pods_.*\.framework)$/i + end + end + + unless pod_files.empty? + UI.section('Deleting Pod file references from project') do + pod_files.each do |file_reference| + UI.puts("- #{file_reference.name || file_reference.path}") + file_reference.remove_from_project + end + end + end + + # Delete empty `Pods` group if exists + delete_empty_group(project, 'Pods') + delete_empty_group(project, 'Frameworks') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_deintegrate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_deintegrate.rb new file mode 100644 index 0000000..f20a484 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_deintegrate.rb @@ -0,0 +1,2 @@ +require 'cocoapods/deintegrate/gem_version' +require 'cocoapods/deintegrator' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..8049ee2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.4/lib/cocoapods_plugin.rb @@ -0,0 +1,4 @@ +module Pod + require 'cocoapods_deintegrate' + require 'cocoapods/command/deintegrate' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/LICENSE new file mode 100644 index 0000000..5a7b88b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015 Kyle Fuller + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/README.md new file mode 100644 index 0000000..1fde3ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/README.md @@ -0,0 +1,56 @@ +# cocoapods-deintegrate + +A CocoaPods plugin to remove and deintegrate CocoaPods from your project. +Removing all traces of CocoaPods from an Xcode project. + +## Installation + +```bash +$ [sudo] gem install cocoapods-deintegrate +``` + +## Usage + +Running `pod deintegrate` will deintegrate your Xcode project from +CocoaPods. Before running you should ensure you have a backup of your project. + +```bash +$ pod deintegrate +Deintegrating Palaver.xcodeproj +Deintegrating target Palaver +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. +Removing Pod libraries from build phase: +- libPods-Palaver.a +Deleting Pod file references from project +- libPods-Palaver.a +- libPods-PalaverTests.a +- Pods-Palaver.debug.xcconfig +- Pods-Palaver.release.xcconfig +- Pods-Palaver.ad hoc.xcconfig +- Pods-PalaverTests.debug.xcconfig +- Pods-PalaverTests.release.xcconfig +- Pods-PalaverTests.ad hoc.xcconfig +Deleted 1 `Pod` groups from project. +Deintegrating target PalaverTests +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. + +Project has been deintegrated. No traces of CocoaPods left in project. +Note: The workspace referencing the Pods project still remains. +``` + +The only things that will remains are as follows: + +- Podfile, Podfile.lock +- Workspace + +### Credits + +This CocoaPods plugin was created by [Kyle Fuller](http://kylefuller.co.uk/) +([@kylefuller](https://twitter.com/kylefuller)). + +### License + +cocoapods-deintegrate is released under the MIT license. See [LICENSE](LICENSE). + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb new file mode 100644 index 0000000..d792a13 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb @@ -0,0 +1,53 @@ +module Pod + class Command + # @CocoaPods 1.0.0.beta.1 + # + class Deintegrate < Command + include ProjectDirectory + + self.summary = 'Deintegrate CocoaPods from your project' + self.description = <<-DESC + Deintegrate your project from CocoaPods. Removing all traces + of CocoaPods from your Xcode project. + + If no xcodeproj is specified, then a search for an Xcode project + will be made in the current directory. + DESC + self.arguments = [ + CLAide::Argument.new('XCODE_PROJECT', false), + ] + + def initialize(argv) + path = argv.shift_argument + @project_path = Pathname.new(path) if path + super + end + + def validate! + super + + unless @project_path + xcodeprojs = Pathname.glob('*.xcodeproj') + @project_path = xcodeprojs.first if xcodeprojs.size == 1 + end + + help! 'A valid Xcode project file is required.' unless @project_path + help! "#{@project_path} does not exist." unless @project_path.exist? + unless @project_path.directory? && (@project_path + 'project.pbxproj').exist? + help! "#{@project_path} is not a valid Xcode project." + end + + @project = Xcodeproj::Project.open(@project_path) + end + + def run + # We don't traverse a Podfile and try to de-intergrate each target. + # Instead, we're just deintegrating anything CP could have done to a + # project. This is so that it will clean stale, and modified projects. + deintegrator = Deintegrator.new + deintegrator.deintegrate_project(@project) + @project.save + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb new file mode 100644 index 0000000..e77e334 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsDeintegrate + VERSION = '1.0.5'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb new file mode 100644 index 0000000..e3827dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb @@ -0,0 +1,149 @@ +module Pod + class Deintegrator + include Config::Mixin + + FRAMEWORK_NAMES = /^(libPods.*\.a)|(Pods.*\.framework)$/i + XCCONFIG_NAMES = /^Pods.*\.xcconfig$/i + + def deintegrate_project(project) + UI.section("Deintegrating #{UI.path project.path}") do + project.native_targets.each do |target| + deintegrate_target(target) + end + end + + delete_pods_file_references(project) + remove_sandbox + + UI.puts + UI.puts('Project has been deintegrated. No traces of CocoaPods left in project.'.green) + UI.puts('Note: The workspace referencing the Pods project still remains.') + end + + def deintegrate_target(target) + UI.section("Deintegrating target `#{target.name}`") do + deintegrate_shell_script_phase(target, 'Copy Pods Resources') + deintegrate_shell_script_phase(target, 'Check Pods Manifest.lock') + deintegrate_shell_script_phase(target, 'Embed Pods Frameworks') + deintegrate_user_shell_script_phases(target) + deintegrate_pods_libraries(target) + deintegrate_configuration_file_references(target) + end + end + + def remove_sandbox + pods_directory = config.sandbox.root + if pods_directory.exist? + UI.puts("Removing #{UI.path pods_directory} directory.") + pods_directory.rmtree + end + end + + def deintegrate_pods_libraries(target) + # `frameworks_build_phases` returns but does not automatically create this build phase + # when we are deinitegrating. Its a bit of a weird API but thats what Xcodeproj gives + # us. + frameworks_build_phase = target.frameworks_build_phases + return if frameworks_build_phase.nil? + + pods_build_files = frameworks_build_phase.files.select do |build_file| + build_file.display_name =~ FRAMEWORK_NAMES + end + + unless pods_build_files.empty? + UI.section('Removing Pod libraries from build phase:') do + pods_build_files.each do |build_file| + UI.puts("- #{build_file.display_name}") + if build_file.file_ref.build_files.count == 1 + build_file.file_ref.remove_from_project + end + frameworks_build_phase.remove_build_file(build_file) + end + end + end + end + + def deintegrate_user_shell_script_phases(target) + user_script_phases = target.shell_script_build_phases.select do |phase| + next unless phase.name + phase.name.start_with?('[CP-User] ') + end + + unless user_script_phases.empty? + user_script_phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{user_script_phases.count} user build phases.") + end + end + + def deintegrate_shell_script_phase(target, phase_name) + phases = target.shell_script_build_phases.select do |phase| + phase.name && phase.name =~ /#{Regexp.escape(phase_name)}\z$/ + end + + unless phases.empty? + phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{phases.count} '#{phase_name}' build phases.") + end + end + + def delete_empty_group(project, group_name) + groups = project.main_group.recursive_children_groups.select do |group| + group.name == group_name && group.children.empty? + end + + unless groups.empty? + groups.each(&:remove_from_project) + UI.puts "Deleted #{groups.count} empty `#{group_name}` groups from project." + end + end + + def deintegrate_configuration_file_references(target) + config_files = target.build_configurations.map do |config| + config_file = config.base_configuration_reference + config_file if config_file && config_file.name =~ XCCONFIG_NAMES + end.compact + unless config_files.empty? + UI.section('Deleting configuration file references') do + config_files.each do |file_reference| + UI.puts("- #{file_reference.name}") + file_reference.remove_from_project + end + end + end + end + + def delete_pods_file_references(project) + # The following implementation goes for files and empty groups so it + # should catch cases where a user has changed the structure manually. + + groups = project.main_group.recursive_children_groups + groups << project.main_group + + pod_files = groups.flat_map do |group| + group.files.select do |obj| + obj.name =~ XCCONFIG_NAMES || + obj.path =~ /^(libPods.*\.a)|(Pods_.*\.framework)$/i + end + end + + unless pod_files.empty? + UI.section('Deleting Pod file references from project') do + pod_files.each do |file_reference| + UI.puts("- #{file_reference.name || file_reference.path}") + file_reference.remove_from_project + end + end + end + + # Delete empty `Pods` group if exists + delete_empty_group(project, 'Pods') + delete_empty_group(project, 'Frameworks') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb new file mode 100644 index 0000000..f20a484 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb @@ -0,0 +1,2 @@ +require 'cocoapods/deintegrate/gem_version' +require 'cocoapods/deintegrator' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..8049ee2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb @@ -0,0 +1,4 @@ +module Pod + require 'cocoapods_deintegrate' + require 'cocoapods/command/deintegrate' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/README.markdown b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/README.markdown new file mode 100644 index 0000000..dc964cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/README.markdown @@ -0,0 +1,77 @@ +# Downloader + +A small library for downloading files from remotes in a folder. + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-downloader/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-downloader) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/cocoapods-downloader.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-downloader) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/cocoapods-downloader.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-downloader) + +## Install + +``` +$ [sudo] gem install cocoapods-downloader +``` + +## Usage + +```ruby +require 'cocoapods-downloader' + +target_path = './Downloads/MyDownload' +options = { :git => 'example.com' } +options = Pod::Downloader.preprocess_options(options) +downloader = Pod::Downloader.for_target(target_path, options) +downloader.cache_root = '~/Library/Caches/APPNAME' +downloader.max_cache_size = 500 +downloader.download +downloader.checkout_options #=> { :git => 'example.com', :commit => 'd7f410490dabf7a6bde665ba22da102c3acf1bd9' } +``` + +The downloader class supports the following option keys: + +- git: commit, tag, branch, submodules +- svn: revision, tag, folder, externals +- hg: revision, tag, branch +- http: type, flatten +- scp: type, flatten +- bzr: revision, tag + +The downloader also provides hooks which allow to customize its output or the way in which the commands are executed + +```ruby +require 'cocoapods-downloader' + +module Pod + module Downloader + class Base + + override_api do + def self.execute_command(executable, command, raise_on_failure = false) + puts "Will download" + super + end + + def self.ui_action(ui_message) + puts ui_message.green + yield + end + end + + end + end +end +``` + +## Extraction + +This gem was extracted from [CocoaPods](https://github.com/CocoaPods/CocoaPods). Refer to also that repository for the history and the contributors. + +## Collaborate + +All CocoaPods development happens on GitHub, there is a repository for [CocoaPods](https://github.com/CocoaPods/CocoaPods) and one for the [CocoaPods specs](https://github.com/CocoaPods/Specs). Contributing patches or Pods is really easy and gratifying and for a lot of people is their first time. + +Follow [@CocoaPods](http://twitter.com/CocoaPods) to get up to date information about what's going on in the CocoaPods world. + +## License + +This gem and CocoaPods are available under the MIT license. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader.rb new file mode 100644 index 0000000..0dd759a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader.rb @@ -0,0 +1,102 @@ +module Pod + module Downloader + require 'cocoapods-downloader/gem_version' + require 'cocoapods-downloader/api' + require 'cocoapods-downloader/api_exposable' + require 'cocoapods-downloader/base' + + autoload :Bazaar, 'cocoapods-downloader/bazaar' + autoload :Git, 'cocoapods-downloader/git' + autoload :Http, 'cocoapods-downloader/http' + autoload :Mercurial, 'cocoapods-downloader/mercurial' + autoload :Scp, 'cocoapods-downloader/scp' + autoload :Subversion, 'cocoapods-downloader/subversion' + + # Denotes the error generated by a Downloader + # + class DownloaderError < StandardError; end + + # @return [Hash{Symbol=>Class}] The concrete classes of the supported + # strategies by key. + # + def self.downloader_class_by_key + { + :bzr => Bazaar, + :git => Git, + :hg => Mercurial, + :http => Http, + :scp => Scp, + :svn => Subversion, + } + end + + # Identifies the concrete strategy for the given options. + # + # @param [Hash{Symbol}] options + # The options for which a strategy is needed. + # + # @return [Symbol] The symbol associated with a concrete strategy. + # @return [Nil] If no suitable concrete strategy could be selected. + # + def self.strategy_from_options(options) + common = downloader_class_by_key.keys & options.keys + if common.count == 1 + common.first + end + end + + # @return [Downloader::Base] A concrete downloader according to the + # options. + # + def self.for_target(target_path, options) + options = options_to_sym(options) + + if target_path.nil? + raise DownloaderError, 'No target path provided.' + end + + strategy, klass = class_for_options(options) + + url = options[strategy] + sub_options = options.dup + sub_options.delete(strategy) + + klass.new(target_path, url, sub_options) + end + + # Have the concrete strategy preprocess options + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options = options_to_sym(options) + + _, klass = class_for_options(options) + klass.preprocess_options(options) + end + + private_class_method + + def self.options_to_sym(options) + Hash[options.map { |k, v| [k.to_sym, v] }] + end + + def self.class_for_options(options) + if options.nil? || options.empty? + raise DownloaderError, 'No source URL provided.' + end + + strategy = strategy_from_options(options) + unless strategy + raise DownloaderError, 'Unsupported download strategy ' \ + "`#{options.inspect}`." + end + + # Explicit return for multiple params, rubocop thinks it's useless but it's not + return strategy, downloader_class_by_key[strategy] # rubocop:disable Style/RedundantReturn + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api.rb new file mode 100644 index 0000000..9375f10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api.rb @@ -0,0 +1,73 @@ +module Pod + module Downloader + # The Downloader::Hooks module allows to adapt the Downloader to + # the UI of other gems. + # + module API + # Executes + # @return [String] the output of the command. + # + def execute_command(executable, command, raise_on_failure = false) + require 'shellwords' + command = command.map(&:to_s).map(&:shellescape).join(' ') + output = `\n#{executable} #{command} 2>&1` + check_exit_code!(executable, command, output) if raise_on_failure + puts output + output + end + + # Checks if the just executed command completed successfully. + # + # @raise If the command failed. + # + # @return [void] + # + def check_exit_code!(executable, command, output) + if $?.exitstatus != 0 + raise DownloaderError, "Error on `#{executable} #{command}`.\n#{output}" + end + end + + # Indicates that an action will be performed. The action is passed as a + # block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_action(message) + puts message + yield + end + + # Indicates that a minor action will be performed. The action is passed as + # a block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_sub_action(message) + puts message + yield + end + + # Prints an UI message. + # + # @param [String] message + # The message associated with the action. + # + # @return [void] + # + def ui_message(message) + puts message + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api_exposable.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api_exposable.rb new file mode 100644 index 0000000..62066db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/api_exposable.rb @@ -0,0 +1,23 @@ +module Pod + module Downloader + module APIExposable + def expose_api(mod = nil, &block) + if mod.nil? + if block.nil? + raise "Either a module or a block that's used to create a module is required." + else + mod = Module.new(&block) + end + elsif mod && block + raise 'Only a module *or* is required, not both.' + end + include mod + # TODO: Try to find a nicer way to do this + # See https://github.com/CocoaPods/cocoapods-downloader/pull/57 + extend mod + end + + alias override_api expose_api + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/base.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/base.rb new file mode 100644 index 0000000..8e2d38a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/base.rb @@ -0,0 +1,185 @@ +require 'shellwords' + +class Pathname + # @return [String] a version of the path that is escaped to be safe to use in + # a shell. + def shellescape + to_s.shellescape + end +end + +module Pod + module Downloader + # The base class defines the common behaviour of the downloaders. + # + # @abstract Subclass and implement {#download}. + # + # @private + # + class Base + extend APIExposable + expose_api API + + # @abstract Override in subclasses. + # + # @return [Array] the options accepted by the concrete class. + # + def self.options + [] + end + + # @return [Pathname] the destination folder for the download. + # + attr_reader :target_path + + # @return [String] the url of the remote source. + # + attr_reader :url + + # @return [Hash={Symbol=>String}] options specific to each concrete + # downloader. + # + attr_reader :options + + # @param [String, Pathname] target_path @see target_path + # @param [String] url @see url + # @param [Hash={Symbol=>String}] options @see options + # + def initialize(target_path, url, options) + require 'pathname' + @target_path = Pathname.new(target_path) + @url = url + @options = options + + unrecognized_options = options.keys - self.class.options + unless unrecognized_options.empty? + raise DownloaderError, "Unrecognized options `#{unrecognized_options}`" + end + end + + # @return [String] the name of the downloader. + # + # @example Downloader::Mercurial name + # + # "Mercurial" + # + def name + self.class.name.split('::').last + end + + #-----------------------------------------------------------------------# + + # @!group Downloading + + # Downloads the revision specified in the option of a source. If no + # revision is specified it fall-back to {#download_head}. + # + # @return [void] + # + def download + ui_action("#{name} download") do + target_path.mkpath + download! + end + end + + # Downloads the head revision of a source. + # + # @todo Spec for raise. + # + # @return [void] + # + def download_head + ui_action("#{name} HEAD download") do + if head_supported? + download_head! + else + raise DownloaderError, "The `#{name}` downloader does not support " \ + 'the HEAD option.' + end + end + end + + # @return [Bool] Whether the downloader supports the head download + # strategy. + # + def head_supported? + respond_to?(:download_head!, true) + end + + # @return [Bool] Whether the options provided completely identify a source + # or could lead to the download of different files in future. + # + def options_specific? + true + end + + # @return [Hash{Symbol=>String}] The options that would allow to + # re-download the exact files. + # + def checkout_options + raise 'Abstract method' + end + + # Returns a User-Agent string that itentifies http network requests as + # originating from CocoaPods. + # Contains version numbers from the CocoaPods Gem and the cocoapods-downloader Gem. + # + # @param [module] base_module The Base CocoaPods Module to retrieve the version number from. + # @return [String] the User-Agent string. + # + def self.user_agent_string(base_module = Pod) + pods_version = base_module.const_defined?('VERSION') ? "CocoaPods/#{base_module::VERSION} " : '' + "#{pods_version}cocoapods-downloader/#{Pod::Downloader::VERSION}" + end + + #-----------------------------------------------------------------------# + + # Defines two methods for an executable, based on its name. The bang + # version raises if the executable terminates with a non-zero exit code. + # + # For example + # + # executable :git + # + # generates + # + # def git(command) + # Hooks.execute_with_check("git", command, false) + # end + # + # def git!(command) + # Hooks.execute_with_check("git", command, true) + # end + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def self.executable(name) + define_method(name) do |*command| + execute_command(name.to_s, command.flatten, false) + end + + define_method(name.to_s + '!') do |*command| + execute_command(name.to_s, command.flatten, true) + end + end + + # preprocess download options + # + # Usage of this method is optional. concrete strategies should not + # assume options are preprocessed for correct execution. + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/bazaar.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/bazaar.rb new file mode 100644 index 0000000..dee53db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/bazaar.rb @@ -0,0 +1,60 @@ +module Pod + module Downloader + class Bazaar < Base + def self.options + [:revision, :tag] + end + + def options_specific? + !options[:revision].nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:bzr] = url + options[:revision] = `bzr revno`.chomp + options + end + end + + private + + # @group Private Helpers + #-----------------------------------------------------------------------# + + executable :bzr + + def download! + if options[:tag] + download_revision!(options[:tag]) + elsif options[:revision] + download_revision!(options[:revision]) + else + download_head! + end + end + + def download_head! + bzr! 'branch', url, *dir_opts, target_path + end + + def download_revision!(rev) + bzr! 'branch', url, *dir_opts, '-r', rev, @target_path + end + + # @return [String] The command line flags to use according to whether the + # target path exits. + # + def dir_opts + if @target_path.exist? + %w(--use-existing-dir) + else + [] + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/gem_version.rb new file mode 100644 index 0000000..aad37bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/gem_version.rb @@ -0,0 +1,8 @@ +module Pod + module Downloader + # @return [String] Downloader’s version, following + # [semver](http://semver.org). + # + VERSION = '1.4.0'.freeze + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/git.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/git.rb new file mode 100644 index 0000000..307e6d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/git.rb @@ -0,0 +1,157 @@ +module Pod + module Downloader + # Concreted Downloader class that provides support for specifications with + # git sources. + # + class Git < Base + def self.options + [:commit, :tag, :branch, :submodules] + end + + def options_specific? + !(options[:commit] || options[:tag]).nil? + end + + def checkout_options + options = {} + options[:git] = url + options[:commit] = target_git('rev-parse', 'HEAD').chomp + options[:submodules] = true if self.options[:submodules] + options + end + + def self.preprocess_options(options) + return options unless options[:branch] + + command = ['ls-remote', + options[:git], + options[:branch]] + output = Git.execute_command('git', command) + match = commit_from_ls_remote output, options[:branch] + + return options if match.nil? + + options[:commit] = match + options.delete(:branch) + + options + end + + # Matches a commit from the branches reported by git ls-remote. + # + # @note When there is a branch and tag with the same name, it will match + # the branch, since `refs/heads` is sorted before `refs/tags`. + # + # @param [String] output + # The output from git ls-remote. + # + # @param [String] branch_name + # The desired branch to match a commit to. + # + # @return [String] commit hash string, or nil if no match found + # + def self.commit_from_ls_remote(output, branch_name) + return nil if branch_name.nil? + match = %r{([a-z0-9]*)\trefs\/(heads|tags)\/#{Regexp.quote(branch_name)}}.match(output) + match[1] unless match.nil? + end + + private_class_method :commit_from_ls_remote + + private + + # @!group Base class hooks + + def download! + clone + checkout_commit if options[:commit] + end + + # @return [void] Checks out the HEAD of the git source in the destination + # path. + # + def download_head! + clone(true) + end + + # @!group Download implementations + + executable :git + + # Clones the repo. If possible the repo will be shallowly cloned. + # + # @note The `:commit` option requires a specific strategy as it is not + # possible to specify the commit to the `clone` command. + # + # @note `--branch` command line option can also take tags and detaches + # the HEAD. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + def clone(force_head = false, shallow_clone = true) + ui_sub_action('Git download') do + begin + git! clone_arguments(force_head, shallow_clone) + update_submodules + rescue DownloaderError => e + if e.message =~ /^fatal:.*does not support (--depth|shallow capabilities)$/im + clone(force_head, false) + else + raise + end + end + end + end + + def update_submodules + return unless options[:submodules] + target_git %w(submodule update --init --recursive) + end + + # The arguments to pass to `git` to clone the repo. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + # @return [Array] arguments to pass to `git` to clone the repo. + # + def clone_arguments(force_head, shallow_clone) + command = ['clone', url, target_path, '--template='] + + if shallow_clone && !options[:commit] + command += %w(--single-branch --depth 1) + end + + unless force_head + if tag_or_branch = options[:tag] || options[:branch] + command += ['--branch', tag_or_branch] + end + end + + command + end + + # Checks out a specific commit of the cloned repo. + # + def checkout_commit + target_git 'checkout', '--quiet', options[:commit] + update_submodules + end + + def target_git(*args) + git!(['-C', target_path] + args) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/http.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/http.rb new file mode 100644 index 0000000..90e13dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/http.rb @@ -0,0 +1,34 @@ +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Http < RemoteFile + USER_AGENT_HEADER = 'User-Agent'.freeze + + private + + executable :curl + + def download_file(full_filename) + parameters = ['-f', '-L', '-o', full_filename, url, '--create-dirs', '--netrc-optional', '--retry', '2'] + parameters << user_agent_argument if headers.nil? || + headers.none? { |header| header.casecmp(USER_AGENT_HEADER).zero? } + + headers.each do |h| + parameters << '-H' + parameters << h + end unless headers.nil? + + curl! parameters + end + + # Returns a cURL command flag to add the CocoaPods User-Agent. + # + # @return [String] cURL command -A flag and User-Agent. + # + def user_agent_argument + "-A '#{Http.user_agent_string}'" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/mercurial.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/mercurial.rb new file mode 100644 index 0000000..6dc6c49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/mercurial.rb @@ -0,0 +1,54 @@ +module Pod + module Downloader + class Mercurial < Base + def self.options + [:revision, :tag, :branch] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:hg] = url + options[:revision] = `hg --debug id -i`.chomp + options + end + end + + private + + executable :hg + + def download! + if options[:revision] + download_revision! + elsif options[:tag] + download_tag! + elsif options[:branch] + download_branch! + else + download_head! + end + end + + def download_head! + hg! 'clone', url, @target_path + end + + def download_revision! + hg! 'clone', url, '--rev', options[:revision], @target_path + end + + def download_tag! + hg! 'clone', url, '--updaterev', options[:tag], @target_path + end + + def download_branch! + hg! 'clone', url, '--updaterev', options[:branch], @target_path + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/remote_file.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/remote_file.rb new file mode 100644 index 0000000..2721cc3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/remote_file.rb @@ -0,0 +1,182 @@ +require 'fileutils' +require 'uri' +require 'zlib' + +module Pod + module Downloader + class RemoteFile < Base + def self.options + [:type, :flatten, :sha1, :sha256, :headers] + end + + class UnsupportedFileTypeError < StandardError; end + + private + + executable :unzip + executable :tar + executable :hdiutil + + attr_accessor :filename, :download_path + + def download! + @filename = filename_with_type(type) + @download_path = target_path + @filename + download_file(@download_path) + verify_checksum(@download_path) + extract_with_type(@download_path, type) + end + + def type + if options[:type] + options[:type].to_sym + else + type_with_url(url) + end + end + + def headers + options[:headers] + end + + # @note The archive is flattened if it contains only one folder and its + # extension is either `tgz`, `tar`, `tbz` or the options specify + # it. + # + # @return [Bool] Whether the archive should be flattened if it contains + # only one folder. + # + def should_flatten? + if options.key?(:flatten) + options[:flatten] + elsif [:tgz, :tar, :tbz, :txz].include?(type) + true # those archives flatten by default + else + false # all others (actually only .zip) default not to flatten + end + end + + def type_with_url(url) + case URI.parse(url).path + when /\.zip$/ + :zip + when /\.(tgz|tar\.gz)$/ + :tgz + when /\.tar$/ + :tar + when /\.(tbz|tar\.bz2)$/ + :tbz + when /\.(txz|tar\.xz)$/ + :txz + when /\.dmg$/ + :dmg + end + end + + def filename_with_type(type = :zip) + case type + when :zip, :tgz, :tar, :tbz, :txz, :dmg + "file.#{type}" + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + end + + def download_file(_full_filename) + raise NotImplementedError + end + + def extract_with_type(full_filename, type = :zip) + unpack_from = full_filename + unpack_to = @target_path + + case type + when :zip + unzip! unpack_from, '-d', unpack_to + when :tgz + tar! 'xfz', unpack_from, '-C', unpack_to + when :tar + tar! 'xf', unpack_from, '-C', unpack_to + when :tbz + tar! 'xfj', unpack_from, '-C', unpack_to + when :txz + tar! 'xf', unpack_from, '-C', unpack_to + when :dmg + extract_dmg(unpack_from, unpack_to) + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + + # If the archive is a tarball and it only contained a folder, move its + # contents to the target (#727) + # + if should_flatten? + contents = target_path.children + contents.delete(target_path + @filename) + entry = contents.first + if contents.count == 1 && entry.directory? + tmp_entry = entry.sub_ext("#{entry.extname}.tmp") + begin + FileUtils.move(entry, tmp_entry) + FileUtils.move(tmp_entry.children, target_path) + ensure + FileUtils.remove_entry(tmp_entry) + end + end + end + + FileUtils.rm(unpack_from) if File.exist?(unpack_from) + end + + def extract_dmg(unpack_from, unpack_to) + require 'rexml/document' + plist_s = hdiutil! 'attach', '-plist', '-nobrowse', unpack_from, '-mountrandom', unpack_to + plist = REXML::Document.new plist_s + xpath = '//key[.="mount-point"]/following-sibling::string' + mount_point = REXML::XPath.first(plist, xpath).text + FileUtils.cp_r(Dir.glob(mount_point + '/*'), unpack_to) + hdiutil! 'detach', mount_point + end + + def compare_hash(filename, hasher, hash) + incremental_hash = hasher.new + + File.open(filename, 'rb') do |file| + buf = '' + incremental_hash << buf while file.read(1024, buf) + end + + computed_hash = incremental_hash.hexdigest + + if computed_hash != hash + raise DownloaderError, 'Verification checksum was incorrect, ' \ + "expected #{hash}, got #{computed_hash}" + end + end + + # Verify that the downloaded file matches a sha1 hash + # + def verify_sha1_hash(filename, hash) + require 'digest/sha1' + compare_hash(filename, Digest::SHA1, hash) + end + + # Verify that the downloaded file matches a sha256 hash + # + def verify_sha256_hash(filename, hash) + require 'digest/sha2' + compare_hash(filename, Digest::SHA2, hash) + end + + # Verify that the downloaded file matches the hash if set + # + def verify_checksum(filename) + if options[:sha256] + verify_sha256_hash(filename, options[:sha256]) + elsif options[:sha1] + verify_sha1_hash(filename, options[:sha1]) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/scp.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/scp.rb new file mode 100644 index 0000000..c8a92f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/scp.rb @@ -0,0 +1,30 @@ +require 'uri' +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Scp < RemoteFile + DEFAULT_PORT = 22 + + private + + executable :scp + + def download_file(full_filename) + scp! '-P', port, '-q', source, full_filename + end + + def source + "#{uri.user ? uri.user + '@' : ''}#{uri.host}:'#{uri.path}'" + end + + def port + uri.port || DEFAULT_PORT + end + + def uri + @uri ||= URI.parse(url) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/subversion.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/subversion.rb new file mode 100644 index 0000000..a7ff2ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.4.0/lib/cocoapods-downloader/subversion.rb @@ -0,0 +1,69 @@ +module Pod + module Downloader + class Subversion < Base + def self.options + [:revision, :tag, :folder, :externals, :checkout] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:svn] = url + options[:revision] = @exported_revision + options + end + end + + private + + executable :svn + + def download! + output = svn!(*subcommand, *reference_url, @target_path) + store_exported_revision(output) + end + + def download_head! + output = svn!(*subcommand, *trunk_url, @target_path) + store_exported_revision(output) + end + + def store_exported_revision(output) + output =~ /Exported revision ([0-9]+)\./ + @exported_revision = Regexp.last_match[1] if Regexp.last_match + end + + def subcommand + result = if options[:checkout] + %w(checkout) + else + %w(export) + end + + result += %w(--non-interactive --trust-server-cert --force) + result << '--ignore-externals' if options[:externals] == false + result + end + + def reference_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/tags/' << options[:tag] if options[:tag] + result = [result] + result << '-r' << options[:revision] if options[:revision] + result + end + + def trunk_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/trunk' + [result] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy Durán +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/README.markdown b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/README.markdown new file mode 100644 index 0000000..974b335 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/README.markdown @@ -0,0 +1,78 @@ +# Downloader + +A small library for downloading files from remotes in a folder. + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/CocoaPods-Downloader/Spec)](https://github.com/CocoaPods/cocoapods-downloader/actions) +[![Gem Version](https://img.shields.io/gem/v/cocoapods-downloader)](https://rubygems.org/gems/cocoapods-downloader) +[![Maintainability](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/CocoaPods/cocoapods-downloader/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/test_coverage)](https://codeclimate.com/github/CocoaPods/cocoapods-downloader/test_coverage) + +## Install + +``` +$ [sudo] gem install cocoapods-downloader +``` + +## Usage + +```ruby +require 'cocoapods-downloader' + +target_path = './Downloads/MyDownload' +options = { :git => 'example.com' } +options = Pod::Downloader.preprocess_options(options) +downloader = Pod::Downloader.for_target(target_path, options) +downloader.cache_root = '~/Library/Caches/APPNAME' +downloader.max_cache_size = 500 +downloader.download +downloader.checkout_options #=> { :git => 'example.com', :commit => 'd7f410490dabf7a6bde665ba22da102c3acf1bd9' } +``` + +The downloader class supports the following option keys: + +- git: commit, tag, branch, submodules +- svn: revision, tag, folder, externals +- hg: revision, tag, branch +- http: type, flatten +- scp: type, flatten +- bzr: revision, tag + +The downloader also provides hooks which allow to customize its output or the way in which the commands are executed + +```ruby +require 'cocoapods-downloader' + +module Pod + module Downloader + class Base + + override_api do + def self.execute_command(executable, command, raise_on_failure = false) + puts "Will download" + super + end + + def self.ui_action(ui_message) + puts ui_message.green + yield + end + end + + end + end +end +``` + +## Extraction + +This gem was extracted from [CocoaPods](https://github.com/CocoaPods/CocoaPods). Refer to also that repository for the history and the contributors. + +## Collaborate + +All CocoaPods development happens on GitHub, there is a repository for [CocoaPods](https://github.com/CocoaPods/CocoaPods) and one for the [CocoaPods specs](https://github.com/CocoaPods/Specs). Contributing patches or Pods is really easy and gratifying and for a lot of people is their first time. + +Follow [@CocoaPods](http://twitter.com/CocoaPods) to get up to date information about what's going on in the CocoaPods world. + +## License + +This gem and CocoaPods are available under the MIT license. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader.rb new file mode 100644 index 0000000..0dd759a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader.rb @@ -0,0 +1,102 @@ +module Pod + module Downloader + require 'cocoapods-downloader/gem_version' + require 'cocoapods-downloader/api' + require 'cocoapods-downloader/api_exposable' + require 'cocoapods-downloader/base' + + autoload :Bazaar, 'cocoapods-downloader/bazaar' + autoload :Git, 'cocoapods-downloader/git' + autoload :Http, 'cocoapods-downloader/http' + autoload :Mercurial, 'cocoapods-downloader/mercurial' + autoload :Scp, 'cocoapods-downloader/scp' + autoload :Subversion, 'cocoapods-downloader/subversion' + + # Denotes the error generated by a Downloader + # + class DownloaderError < StandardError; end + + # @return [Hash{Symbol=>Class}] The concrete classes of the supported + # strategies by key. + # + def self.downloader_class_by_key + { + :bzr => Bazaar, + :git => Git, + :hg => Mercurial, + :http => Http, + :scp => Scp, + :svn => Subversion, + } + end + + # Identifies the concrete strategy for the given options. + # + # @param [Hash{Symbol}] options + # The options for which a strategy is needed. + # + # @return [Symbol] The symbol associated with a concrete strategy. + # @return [Nil] If no suitable concrete strategy could be selected. + # + def self.strategy_from_options(options) + common = downloader_class_by_key.keys & options.keys + if common.count == 1 + common.first + end + end + + # @return [Downloader::Base] A concrete downloader according to the + # options. + # + def self.for_target(target_path, options) + options = options_to_sym(options) + + if target_path.nil? + raise DownloaderError, 'No target path provided.' + end + + strategy, klass = class_for_options(options) + + url = options[strategy] + sub_options = options.dup + sub_options.delete(strategy) + + klass.new(target_path, url, sub_options) + end + + # Have the concrete strategy preprocess options + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options = options_to_sym(options) + + _, klass = class_for_options(options) + klass.preprocess_options(options) + end + + private_class_method + + def self.options_to_sym(options) + Hash[options.map { |k, v| [k.to_sym, v] }] + end + + def self.class_for_options(options) + if options.nil? || options.empty? + raise DownloaderError, 'No source URL provided.' + end + + strategy = strategy_from_options(options) + unless strategy + raise DownloaderError, 'Unsupported download strategy ' \ + "`#{options.inspect}`." + end + + # Explicit return for multiple params, rubocop thinks it's useless but it's not + return strategy, downloader_class_by_key[strategy] # rubocop:disable Style/RedundantReturn + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api.rb new file mode 100644 index 0000000..9375f10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api.rb @@ -0,0 +1,73 @@ +module Pod + module Downloader + # The Downloader::Hooks module allows to adapt the Downloader to + # the UI of other gems. + # + module API + # Executes + # @return [String] the output of the command. + # + def execute_command(executable, command, raise_on_failure = false) + require 'shellwords' + command = command.map(&:to_s).map(&:shellescape).join(' ') + output = `\n#{executable} #{command} 2>&1` + check_exit_code!(executable, command, output) if raise_on_failure + puts output + output + end + + # Checks if the just executed command completed successfully. + # + # @raise If the command failed. + # + # @return [void] + # + def check_exit_code!(executable, command, output) + if $?.exitstatus != 0 + raise DownloaderError, "Error on `#{executable} #{command}`.\n#{output}" + end + end + + # Indicates that an action will be performed. The action is passed as a + # block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_action(message) + puts message + yield + end + + # Indicates that a minor action will be performed. The action is passed as + # a block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_sub_action(message) + puts message + yield + end + + # Prints an UI message. + # + # @param [String] message + # The message associated with the action. + # + # @return [void] + # + def ui_message(message) + puts message + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api_exposable.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api_exposable.rb new file mode 100644 index 0000000..62066db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/api_exposable.rb @@ -0,0 +1,23 @@ +module Pod + module Downloader + module APIExposable + def expose_api(mod = nil, &block) + if mod.nil? + if block.nil? + raise "Either a module or a block that's used to create a module is required." + else + mod = Module.new(&block) + end + elsif mod && block + raise 'Only a module *or* is required, not both.' + end + include mod + # TODO: Try to find a nicer way to do this + # See https://github.com/CocoaPods/cocoapods-downloader/pull/57 + extend mod + end + + alias override_api expose_api + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/base.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/base.rb new file mode 100644 index 0000000..8e2d38a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/base.rb @@ -0,0 +1,185 @@ +require 'shellwords' + +class Pathname + # @return [String] a version of the path that is escaped to be safe to use in + # a shell. + def shellescape + to_s.shellescape + end +end + +module Pod + module Downloader + # The base class defines the common behaviour of the downloaders. + # + # @abstract Subclass and implement {#download}. + # + # @private + # + class Base + extend APIExposable + expose_api API + + # @abstract Override in subclasses. + # + # @return [Array] the options accepted by the concrete class. + # + def self.options + [] + end + + # @return [Pathname] the destination folder for the download. + # + attr_reader :target_path + + # @return [String] the url of the remote source. + # + attr_reader :url + + # @return [Hash={Symbol=>String}] options specific to each concrete + # downloader. + # + attr_reader :options + + # @param [String, Pathname] target_path @see target_path + # @param [String] url @see url + # @param [Hash={Symbol=>String}] options @see options + # + def initialize(target_path, url, options) + require 'pathname' + @target_path = Pathname.new(target_path) + @url = url + @options = options + + unrecognized_options = options.keys - self.class.options + unless unrecognized_options.empty? + raise DownloaderError, "Unrecognized options `#{unrecognized_options}`" + end + end + + # @return [String] the name of the downloader. + # + # @example Downloader::Mercurial name + # + # "Mercurial" + # + def name + self.class.name.split('::').last + end + + #-----------------------------------------------------------------------# + + # @!group Downloading + + # Downloads the revision specified in the option of a source. If no + # revision is specified it fall-back to {#download_head}. + # + # @return [void] + # + def download + ui_action("#{name} download") do + target_path.mkpath + download! + end + end + + # Downloads the head revision of a source. + # + # @todo Spec for raise. + # + # @return [void] + # + def download_head + ui_action("#{name} HEAD download") do + if head_supported? + download_head! + else + raise DownloaderError, "The `#{name}` downloader does not support " \ + 'the HEAD option.' + end + end + end + + # @return [Bool] Whether the downloader supports the head download + # strategy. + # + def head_supported? + respond_to?(:download_head!, true) + end + + # @return [Bool] Whether the options provided completely identify a source + # or could lead to the download of different files in future. + # + def options_specific? + true + end + + # @return [Hash{Symbol=>String}] The options that would allow to + # re-download the exact files. + # + def checkout_options + raise 'Abstract method' + end + + # Returns a User-Agent string that itentifies http network requests as + # originating from CocoaPods. + # Contains version numbers from the CocoaPods Gem and the cocoapods-downloader Gem. + # + # @param [module] base_module The Base CocoaPods Module to retrieve the version number from. + # @return [String] the User-Agent string. + # + def self.user_agent_string(base_module = Pod) + pods_version = base_module.const_defined?('VERSION') ? "CocoaPods/#{base_module::VERSION} " : '' + "#{pods_version}cocoapods-downloader/#{Pod::Downloader::VERSION}" + end + + #-----------------------------------------------------------------------# + + # Defines two methods for an executable, based on its name. The bang + # version raises if the executable terminates with a non-zero exit code. + # + # For example + # + # executable :git + # + # generates + # + # def git(command) + # Hooks.execute_with_check("git", command, false) + # end + # + # def git!(command) + # Hooks.execute_with_check("git", command, true) + # end + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def self.executable(name) + define_method(name) do |*command| + execute_command(name.to_s, command.flatten, false) + end + + define_method(name.to_s + '!') do |*command| + execute_command(name.to_s, command.flatten, true) + end + end + + # preprocess download options + # + # Usage of this method is optional. concrete strategies should not + # assume options are preprocessed for correct execution. + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/bazaar.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/bazaar.rb new file mode 100644 index 0000000..dee53db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/bazaar.rb @@ -0,0 +1,60 @@ +module Pod + module Downloader + class Bazaar < Base + def self.options + [:revision, :tag] + end + + def options_specific? + !options[:revision].nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:bzr] = url + options[:revision] = `bzr revno`.chomp + options + end + end + + private + + # @group Private Helpers + #-----------------------------------------------------------------------# + + executable :bzr + + def download! + if options[:tag] + download_revision!(options[:tag]) + elsif options[:revision] + download_revision!(options[:revision]) + else + download_head! + end + end + + def download_head! + bzr! 'branch', url, *dir_opts, target_path + end + + def download_revision!(rev) + bzr! 'branch', url, *dir_opts, '-r', rev, @target_path + end + + # @return [String] The command line flags to use according to whether the + # target path exits. + # + def dir_opts + if @target_path.exist? + %w(--use-existing-dir) + else + [] + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/gem_version.rb new file mode 100644 index 0000000..964c31b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/gem_version.rb @@ -0,0 +1,8 @@ +module Pod + module Downloader + # @return [String] Downloader’s version, following + # [semver](http://semver.org). + # + VERSION = '1.5.1'.freeze + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/git.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/git.rb new file mode 100644 index 0000000..58feb6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/git.rb @@ -0,0 +1,158 @@ +module Pod + module Downloader + # Concreted Downloader class that provides support for specifications with + # git sources. + # + class Git < Base + def self.options + [:commit, :tag, :branch, :submodules] + end + + def options_specific? + !(options[:commit] || options[:tag]).nil? + end + + def checkout_options + options = {} + options[:git] = url + options[:commit] = target_git('rev-parse', 'HEAD').chomp + options[:submodules] = true if self.options[:submodules] + options + end + + def self.preprocess_options(options) + return options unless options[:branch] + + command = ['ls-remote', + options[:git], + options[:branch]] + output = Git.execute_command('git', command) + match = commit_from_ls_remote output, options[:branch] + + return options if match.nil? + + options[:commit] = match + options.delete(:branch) + + options + end + + # Matches a commit from the branches reported by git ls-remote. + # + # @note When there is a branch and tag with the same name, it will match + # the branch, since `refs/heads` is sorted before `refs/tags`. + # + # @param [String] output + # The output from git ls-remote. + # + # @param [String] branch_name + # The desired branch to match a commit to. + # + # @return [String] commit hash string, or nil if no match found + # + def self.commit_from_ls_remote(output, branch_name) + return nil if branch_name.nil? + encoded_branch_name = branch_name.dup.force_encoding(Encoding::ASCII_8BIT) + match = %r{([a-z0-9]*)\trefs\/(heads|tags)\/#{Regexp.quote(encoded_branch_name)}}.match(output) + match[1] unless match.nil? + end + + private_class_method :commit_from_ls_remote + + private + + # @!group Base class hooks + + def download! + clone + checkout_commit if options[:commit] + end + + # @return [void] Checks out the HEAD of the git source in the destination + # path. + # + def download_head! + clone(true) + end + + # @!group Download implementations + + executable :git + + # Clones the repo. If possible the repo will be shallowly cloned. + # + # @note The `:commit` option requires a specific strategy as it is not + # possible to specify the commit to the `clone` command. + # + # @note `--branch` command line option can also take tags and detaches + # the HEAD. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + def clone(force_head = false, shallow_clone = true) + ui_sub_action('Git download') do + begin + git! clone_arguments(force_head, shallow_clone) + update_submodules + rescue DownloaderError => e + if e.message =~ /^fatal:.*does not support (--depth|shallow capabilities)$/im + clone(force_head, false) + else + raise + end + end + end + end + + def update_submodules + return unless options[:submodules] + target_git %w(submodule update --init --recursive) + end + + # The arguments to pass to `git` to clone the repo. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + # @return [Array] arguments to pass to `git` to clone the repo. + # + def clone_arguments(force_head, shallow_clone) + command = ['clone', url, target_path, '--template='] + + if shallow_clone && !options[:commit] + command += %w(--single-branch --depth 1) + end + + unless force_head + if tag_or_branch = options[:tag] || options[:branch] + command += ['--branch', tag_or_branch] + end + end + + command + end + + # Checks out a specific commit of the cloned repo. + # + def checkout_commit + target_git 'checkout', '--quiet', options[:commit] + update_submodules + end + + def target_git(*args) + git!(['-C', target_path] + args) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/http.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/http.rb new file mode 100644 index 0000000..90e13dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/http.rb @@ -0,0 +1,34 @@ +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Http < RemoteFile + USER_AGENT_HEADER = 'User-Agent'.freeze + + private + + executable :curl + + def download_file(full_filename) + parameters = ['-f', '-L', '-o', full_filename, url, '--create-dirs', '--netrc-optional', '--retry', '2'] + parameters << user_agent_argument if headers.nil? || + headers.none? { |header| header.casecmp(USER_AGENT_HEADER).zero? } + + headers.each do |h| + parameters << '-H' + parameters << h + end unless headers.nil? + + curl! parameters + end + + # Returns a cURL command flag to add the CocoaPods User-Agent. + # + # @return [String] cURL command -A flag and User-Agent. + # + def user_agent_argument + "-A '#{Http.user_agent_string}'" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/mercurial.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/mercurial.rb new file mode 100644 index 0000000..6dc6c49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/mercurial.rb @@ -0,0 +1,54 @@ +module Pod + module Downloader + class Mercurial < Base + def self.options + [:revision, :tag, :branch] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:hg] = url + options[:revision] = `hg --debug id -i`.chomp + options + end + end + + private + + executable :hg + + def download! + if options[:revision] + download_revision! + elsif options[:tag] + download_tag! + elsif options[:branch] + download_branch! + else + download_head! + end + end + + def download_head! + hg! 'clone', url, @target_path + end + + def download_revision! + hg! 'clone', url, '--rev', options[:revision], @target_path + end + + def download_tag! + hg! 'clone', url, '--updaterev', options[:tag], @target_path + end + + def download_branch! + hg! 'clone', url, '--updaterev', options[:branch], @target_path + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/remote_file.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/remote_file.rb new file mode 100644 index 0000000..3388342 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/remote_file.rb @@ -0,0 +1,176 @@ +require 'fileutils' +require 'uri' +require 'zlib' + +module Pod + module Downloader + class RemoteFile < Base + def self.options + [:type, :flatten, :sha1, :sha256, :headers] + end + + class UnsupportedFileTypeError < StandardError; end + + private + + executable :unzip + executable :tar + executable :hdiutil + + attr_accessor :filename, :download_path + + def download! + @filename = filename_with_type(type) + @download_path = target_path + @filename + download_file(@download_path) + verify_checksum(@download_path) + extract_with_type(@download_path, type) + end + + def type + if options[:type] + options[:type].to_sym + else + type_with_url(url) + end + end + + def headers + options[:headers] + end + + # @note The archive is flattened if it contains only one folder and its + # extension is either `tgz`, `tar`, `tbz` or the options specify + # it. + # + # @return [Bool] Whether the archive should be flattened if it contains + # only one folder. + # + def should_flatten? + if options.key?(:flatten) + options[:flatten] + elsif [:tgz, :tar, :tbz, :txz].include?(type) + true # those archives flatten by default + else + false # all others (actually only .zip) default not to flatten + end + end + + def type_with_url(url) + case URI.parse(url).path + when /\.zip$/ + :zip + when /\.(tgz|tar\.gz)$/ + :tgz + when /\.tar$/ + :tar + when /\.(tbz|tar\.bz2)$/ + :tbz + when /\.(txz|tar\.xz)$/ + :txz + when /\.dmg$/ + :dmg + end + end + + def filename_with_type(type = :zip) + case type + when :zip, :tgz, :tar, :tbz, :txz, :dmg + "file.#{type}" + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + end + + def download_file(_full_filename) + raise NotImplementedError + end + + def extract_with_type(full_filename, type = :zip) + unpack_from = full_filename + unpack_to = @target_path + + case type + when :zip + unzip! unpack_from, '-d', unpack_to + when :tar, :tgz, :tbz, :txz + tar! 'xf', unpack_from, '-C', unpack_to + when :dmg + extract_dmg(unpack_from, unpack_to) + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + + # If the archive is a tarball and it only contained a folder, move its + # contents to the target (#727) + # + if should_flatten? + contents = target_path.children + contents.delete(target_path + @filename) + entry = contents.first + if contents.count == 1 && entry.directory? + tmp_entry = entry.sub_ext("#{entry.extname}.tmp") + begin + FileUtils.move(entry, tmp_entry) + FileUtils.move(tmp_entry.children, target_path) + ensure + FileUtils.remove_entry(tmp_entry) + end + end + end + + FileUtils.rm(unpack_from) if File.exist?(unpack_from) + end + + def extract_dmg(unpack_from, unpack_to) + require 'rexml/document' + plist_s = hdiutil! 'attach', '-plist', '-nobrowse', unpack_from, '-mountrandom', unpack_to + plist = REXML::Document.new plist_s + xpath = '//key[.="mount-point"]/following-sibling::string' + mount_point = REXML::XPath.first(plist, xpath).text + FileUtils.cp_r(Dir.glob(mount_point + '/*'), unpack_to) + hdiutil! 'detach', mount_point + end + + def compare_hash(filename, hasher, hash) + incremental_hash = hasher.new + + File.open(filename, 'rb') do |file| + buf = '' + incremental_hash << buf while file.read(1024, buf) + end + + computed_hash = incremental_hash.hexdigest + + if computed_hash != hash + raise DownloaderError, 'Verification checksum was incorrect, ' \ + "expected #{hash}, got #{computed_hash}" + end + end + + # Verify that the downloaded file matches a sha1 hash + # + def verify_sha1_hash(filename, hash) + require 'digest/sha1' + compare_hash(filename, Digest::SHA1, hash) + end + + # Verify that the downloaded file matches a sha256 hash + # + def verify_sha256_hash(filename, hash) + require 'digest/sha2' + compare_hash(filename, Digest::SHA2, hash) + end + + # Verify that the downloaded file matches the hash if set + # + def verify_checksum(filename) + if options[:sha256] + verify_sha256_hash(filename, options[:sha256]) + elsif options[:sha1] + verify_sha1_hash(filename, options[:sha1]) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/scp.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/scp.rb new file mode 100644 index 0000000..c8a92f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/scp.rb @@ -0,0 +1,30 @@ +require 'uri' +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Scp < RemoteFile + DEFAULT_PORT = 22 + + private + + executable :scp + + def download_file(full_filename) + scp! '-P', port, '-q', source, full_filename + end + + def source + "#{uri.user ? uri.user + '@' : ''}#{uri.host}:'#{uri.path}'" + end + + def port + uri.port || DEFAULT_PORT + end + + def uri + @uri ||= URI.parse(url) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/subversion.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/subversion.rb new file mode 100644 index 0000000..a7ff2ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-downloader-1.5.1/lib/cocoapods-downloader/subversion.rb @@ -0,0 +1,69 @@ +module Pod + module Downloader + class Subversion < Base + def self.options + [:revision, :tag, :folder, :externals, :checkout] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:svn] = url + options[:revision] = @exported_revision + options + end + end + + private + + executable :svn + + def download! + output = svn!(*subcommand, *reference_url, @target_path) + store_exported_revision(output) + end + + def download_head! + output = svn!(*subcommand, *trunk_url, @target_path) + store_exported_revision(output) + end + + def store_exported_revision(output) + output =~ /Exported revision ([0-9]+)\./ + @exported_revision = Regexp.last_match[1] if Regexp.last_match + end + + def subcommand + result = if options[:checkout] + %w(checkout) + else + %w(export) + end + + result += %w(--non-interactive --trust-server-cert --force) + result << '--ignore-externals' if options[:externals] == false + result + end + + def reference_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/tags/' << options[:tag] if options[:tag] + result = [result] + result << '-r' << options[:revision] if options[:revision] + result + end + + def trunk_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/trunk' + [result] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.gitignore new file mode 100644 index 0000000..40855cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.gitignore @@ -0,0 +1,40 @@ +*.gem +*.rbc +/.config +/coverage/ +/InstalledFiles +/pkg/ +/spec/reports/ +/test/tmp/ +/test/version_tmp/ +/tmp/ + +## Specific to RubyMotion: +.dat* +.repl_history +build/ + +## Documentation cache and generated files: +/.yardoc/ +/_yardoc/ +/doc/ +/rdoc/ + +## Environment normalisation: +/.bundle/ +/lib/bundler/man/ + +# for a library or gem, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# Gemfile.lock +# .ruby-version +# .ruby-gemset + +# unless supporting rvm < 1.11.0 or doing something fancy, ignore this: +.rvmrc + +/coverage/ + +# RubyMine Editor +.idea + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml new file mode 100644 index 0000000..7583205 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml @@ -0,0 +1,4 @@ +inherit_from: + - .rubocop_cocoapods.yml + + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..9104ebd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml @@ -0,0 +1,116 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +TrailingComma: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.travis.yml new file mode 100644 index 0000000..282bfc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/.travis.yml @@ -0,0 +1,24 @@ +# Sets Travis to run the Ruby specs on OS X machines to be as close as possible +# to the user environment. +# +language: objective-c +addons: + code_climate: + repo_token: 2926ae7ea0b2a6ced8b0d67efa235769ab85de1d9c9f6702f40d80bacec3c9c4 + +env: + - RVM_RUBY_VERSION=system + # - RVM_RUBY_VERSION=1.8.7-p358 + +before_install: + - export LANG=en_US.UTF-8 + - curl http://curl.haxx.se/ca/cacert.pem -o /usr/local/share/cacert.pem + - source ~/.rvm/scripts/rvm + - if [[ $RVM_RUBY_VERSION != 'system' ]]; then rvm install $RVM_RUBY_VERSION; fi + - rvm use $RVM_RUBY_VERSION + - if [[ $RVM_RUBY_VERSION == 'system' ]]; then sudo gem install bundler --no-ri --no-rdoc; else gem install bundler --no-ri --no-rdoc; fi + +install: + - sudo bundle install --without=documentation + +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md new file mode 100644 index 0000000..4d1a245 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md @@ -0,0 +1,102 @@ +# Cocoapods::Plugins Changelog + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +This version contains no changes. + + +## 1.0.0.beta.1 (2015-12-30) + +This version contains no changes. + + +## 0.4.2 (2015-04-03) + + +## 0.4.1 (2015-02-25) + +* Added the `pod plugins installed` subcommand. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.4.0 (2014-12-25) + +* Added the `pod plugins publish` subcommand. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.3.2 (2014-11-02) + +* Switch to using cocoapods-plugins JSON file instead of from Cocoapods.org's repo. + [542919](https://github.com/CocoaPods/cocoapods-plugins/commit/542919902e611c33bb0e02848037474529ddd0f9) + [Florian Hanke](https://github.com/floere) + + +## 0.3.1 (2014-09-12) + +* Restore compatibility with Ruby 1.8.7. + [#30](https://github.com/CocoaPods/cocoapods-plugins/issues/30) + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.3.0 (2014-09-11) + +* Added a reminder to add plugin to `plugins.json` once released. + [#27](https://github.com/CocoaPods/cocoapods-plugins/issues/27) + [Olivier Halligon](https://github.com/AliSoftware) + +* Print out the version of plugins when invoked with `--verbose`. + [#16](https://github.com/CocoaPods/cocoapods-plugins/issues/16) + [David Grandinetti](https://github.com/dbgrandi) + +## 0.2.0 (2014-05-20) + +* Migrating to new syntax of CLAide::Command#arguments. + [#23](https://github.com/CocoaPods/cocoapods-plugins/issues/23) + [Olivier Halligon](https://github.com/AliSoftware) + +* Printing URL of template used. + [#21](https://github.com/CocoaPods/cocoapods-plugins/issues/21) + [Olivier Halligon](https://github.com/AliSoftware) + +* `create` subcommand now prefixes the given name if not already. + [#20](https://github.com/CocoaPods/cocoapods-plugins/issues/20) + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.1.1 (2014-05-02) + +* Making `pod plugins` an abstract command, with `list` the default subcommand. + [#11](https://github.com/CocoaPods/cocoapods-plugins/issues/11) + [#12](https://github.com/CocoaPods/cocoapods-plugins/issues/12) + [Olivier Halligon](https://github.com/AliSoftware) + +* Added `search` subcommand to search plugins by name, author and description. + [#6](https://github.com/CocoaPods/cocoapods-plugins/issues/6) + [Olivier Halligon](https://github.com/AliSoftware) + +* Refactoring and improved output formatting. + [#8](https://github.com/CocoaPods/cocoapods-plugins/issues/8) + [#10](https://github.com/CocoaPods/cocoapods-plugins/issues/10) + [#13](https://github.com/CocoaPods/cocoapods-plugins/issues/13) + [Olivier Halligon](https://github.com/AliSoftware) + +* Fixing coding conventions and RuboCop offenses. + [#17](https://github.com/CocoaPods/cocoapods-plugins/issues/17) + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.1.0 (2014-04-22) + +* Initial implementation. + [David Grandinetti](https://github.com/dbgrandi) + +* Added `create` subcommand to create an empty project for a new plugin. + [#6](https://github.com/CocoaPods/cocoapods-plugins/issues/6) + [Boris Bügling](https://github.com/neonichu) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile new file mode 100644 index 0000000..23f47e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile @@ -0,0 +1,18 @@ +source 'https://rubygems.org' + +gemspec + +group :development do + gem 'cocoapods', :git => 'https://github.com/CocoaPods/CocoaPods.git', :branch => 'master' + gem 'cocoapods-core', :git => 'https://github.com/CocoaPods/Core.git', :branch => 'master' + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + + gem 'bacon' + gem 'mocha-on-bacon' + gem 'prettybacon' + gem 'vcr' + gem 'webmock' + + gem 'codeclimate-test-reporter', :require => nil + gem 'rubocop' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock new file mode 100644 index 0000000..de064d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock @@ -0,0 +1,134 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: 00927807580554b7d3485d673c90386d3fd8fde0 + branch: master + specs: + claide (0.8.1) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 06f9a9870f1fc205f06c6d0193502fd7cf0241ef + branch: master + specs: + cocoapods (0.36.3) + activesupport (>= 3.2.15) + claide (~> 0.8.1) + cocoapods-core (= 0.36.3) + cocoapods-downloader (~> 0.9.0) + cocoapods-plugins (= 1.0.0) + cocoapods-trunk (~> 0.6.0) + cocoapods-try (~> 0.4.3) + colored (~> 1.2) + escape (~> 0.0.4) + molinillo (~> 0.2.1) + nap (~> 0.8) + open4 (~> 1.3) + xcodeproj (~> 0.23.1) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 11f3eee2008e822e5af0b01866a5a0b376c930a7 + branch: master + specs: + cocoapods-core (0.36.3) + activesupport (>= 3.2.15) + fuzzy_match (~> 2.0.4) + nap (~> 0.8.0) + +PATH + remote: . + specs: + cocoapods-plugins (1.0.0) + nap + +GEM + remote: https://rubygems.org/ + specs: + activesupport (4.2.1) + i18n (~> 0.7) + json (~> 1.7, >= 1.7.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + addressable (2.3.7) + ast (2.0.0) + astrolabe (1.3.0) + parser (>= 2.2.0.pre.3, < 3.0) + bacon (1.2.0) + cocoapods-downloader (0.9.0) + cocoapods-trunk (0.6.0) + nap (>= 0.8) + netrc (= 0.7.8) + cocoapods-try (0.4.3) + codeclimate-test-reporter (0.4.0) + simplecov (>= 0.7.1, < 1.0.0) + colored (1.2) + crack (0.4.2) + safe_yaml (~> 1.0.0) + docile (1.1.5) + escape (0.0.4) + fuzzy_match (2.0.4) + i18n (0.7.0) + json (1.8.2) + metaclass (0.0.4) + minitest (5.5.1) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.2.3) + multi_json (1.10.1) + nap (0.8.0) + netrc (0.7.8) + open4 (1.3.4) + parser (2.2.0.3) + ast (>= 1.1, < 3.0) + powerpack (0.1.0) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (2.0.0) + rake (10.3.2) + rubocop (0.29.1) + astrolabe (~> 1.3) + parser (>= 2.2.0.1, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.4) + ruby-progressbar (1.7.5) + safe_yaml (1.0.4) + simplecov (0.9.0) + docile (~> 1.1.0) + multi_json + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + thread_safe (0.3.5) + tzinfo (1.2.2) + thread_safe (~> 0.1) + vcr (2.9.3) + webmock (1.20.4) + addressable (>= 2.3.6) + crack (>= 0.3.2) + xcodeproj (0.23.1) + activesupport (>= 3) + colored (~> 1.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-plugins! + codeclimate-test-reporter + mocha-on-bacon + prettybacon + rake + rubocop + vcr + webmock + +BUNDLED WITH + 1.11.2 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/LICENSE new file mode 100644 index 0000000..0d932da --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 David Grandinetti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/README.md new file mode 100644 index 0000000..a38af09 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/README.md @@ -0,0 +1,44 @@ +# Cocoapods plugins + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-plugins/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-plugins) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/cocoapods-plugins.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-plugins) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/cocoapods-plugins.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-plugins) + +CocoaPods plugin which shows info about available CocoaPods plugins or helps you get started developing a new plugin. Yeah, it's very meta. + +## Installation + + $ gem install cocoapods-plugins + +## Usage + +##### List installed plugins + + $ pod plugins installed + +List all installed CocoaPods plugins with their respective version (and pre_install/post_insall hooks if any) + +##### List known plugins + + $ pod plugins list + +List all known CocoaPods plugins (according to the list hosted on `http://github.com/CocoaPods/cocoapods-plugins`) + +##### Search plugins + + $ pod plugins search QUERY + +Search plugins whose name contains the given text (ignoring case). With --full, it searches by name but also by author and description. + +##### Create a new plugin + + $ pod plugins create NAME [TEMPLATE_URL] + +Create a scaffold for the development of a new plugin according to the CocoaPods best practices. +If a `TEMPLATE_URL`, pointing to a git repo containing a compatible template, is specified, it will be used in place of the default one. + +## Get your plugin listed + + $ pod plugins publish + +Create an issue in the `cocoapods-plugins` GitHub repository to ask for your plugin to be added to the official list (with the proper JSON fragment to be added to `plugins.json` so we just have to copy/paste it). diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Rakefile new file mode 100644 index 0000000..ce44a28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/Rakefile @@ -0,0 +1,88 @@ +# Bootstrap +#-----------------------------------------------------------------------------# + +task :bootstrap do + if system('which bundle') + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + + require 'bundler/gem_tasks' + + task :default => 'spec' + + # Spec + #-----------------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + start_time = Time.now + sh "bundle exec bacon #{specs('**')}" + duration = Time.now - start_time + puts "Tests completed in #{duration}s" + Rake::Task['rubocop'].invoke + Rake::Task['validate_json'].invoke + end + + def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') + end + + # Rubocop + #-----------------------------------------------------------------------------# + + desc 'Checks code style' + task :rubocop do + require 'rubocop' + cli = RuboCop::CLI.new + result = cli.run(FileList['{spec,lib}/**/*.rb']) + abort('RuboCop failed!') unless result == 0 + end + + # plugins.json + #----------------------------------------------------------------------------# + + desc 'Validates plugins.json' + task :validate_json do + require 'json' + require 'pathname' + + puts 'Validating plugins.json' + + json_file = Pathname(__FILE__).parent + 'plugins.json' + json = json_file.read + plugins = JSON.load(json) + abort('Invalid JSON in plugins.json') unless plugins + keys = %w(gem name author social_media_url url description) + optional_keys = %w(social_media_url) + errors = plugins['plugins'].reduce([]) do |errors, plugin| + extra_keys = plugin.keys - keys + unless extra_keys.empty? + errors << "plugin `#{plugin['name']}` has extra keys #{extra_keys}" + end + (keys - optional_keys).each do |key| + unless plugin[key] + errors << "plugin `#{plugin['name']}` is missing key `#{key}`" + end + end + errors + end + unless errors.empty? + abort("Invalid plugins.json:\n\n#{errors.join("\n")}") + end + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec new file mode 100644 index 0000000..555c86f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec @@ -0,0 +1,31 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_plugins.rb' + +Gem::Specification.new do |spec| + spec.name = 'cocoapods-plugins' + spec.version = CocoapodsPlugins::VERSION + spec.authors = ['David Grandinetti', 'Olivier Halligon'] + spec.summary = %q{CocoaPods plugin which shows info about available CocoaPods plugins.} + spec.description = <<-DESC + This CocoaPods plugin shows information about all available CocoaPods plugins + (yes, this is very meta!). + This CP plugin adds the "pod plugins" command to CocoaPods so that you can list + all plugins (registered in the reference JSON hosted at CocoaPods/cocoapods-plugins) + DESC + spec.homepage = 'https://github.com/cocoapods/cocoapods-plugins' + spec.license = 'MIT' + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ['lib'] + + spec.add_runtime_dependency 'nap' + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' + + spec.required_ruby_version = '>= 2.0.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..4ef5c44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/plugins' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb new file mode 100644 index 0000000..cb8a09e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb @@ -0,0 +1,5 @@ +# The namespace of the Cocoapods plugins plugin. +# +module CocoapodsPlugins + VERSION = '1.0.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb new file mode 100644 index 0000000..e1a0715 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb @@ -0,0 +1,120 @@ +require 'pod/command/gem_index_cache' + +module Pod + class Command + # This module is used by Command::PluginsHelper to download the Gem + # Specification data, check if a Gem is installed, and provide info + # on all versions of a Gem. + # + module GemHelper + # A GemIndexCache to manage downloading/caching the spec index. + # + @cache = nil + + # Getter for GemIndexCache + # + # @return [GemIndexCache] a new or memoized GemIndexCache + # + def self.cache + @cache ||= GemIndexCache.new + end + + # Instantiate a cache and download the spec index if it has + # not already been done. + # + def self.download_and_cache_specs + cache.download_and_cache_specs + end + + # Tells if a gem is installed + # + # @param [String] gem_name + # The name of the plugin gem to test + # + # @param [String] version_string + # An optional version string, used to check if a specific + # version of a gem is installed + # + # @return [Bool] true if the gem is installed, false otherwise. + # + def self.gem_installed?(gem_name, version_string = nil) + version = Gem::Version.new(version_string) if version_string + + if Gem::Specification.respond_to?(:find_all_by_name) + gems = Gem::Specification.find_all_by_name(gem_name) + return !gems.empty? unless version + gems.each { |gem| return true if gem.version == version } + false + else + dep = Gem::Dependency.new(gem_name, version_string) + !Gem.source_index.search(dep).empty? + end + end + + # Get the version of a gem that is installed locally. If more than + # one version is installed, this returns the first version found, + # which MAY not be the highest/newest version. + # + # @return [String] The version of the gem that is installed, + # or nil if it is not installed. + # + def self.installed_version(gem_name) + if Gem::Specification.respond_to?(:find_all_by_name) + gem = Gem::Specification.find_all_by_name(gem_name).first + else + dep = Gem::Dependency.new(gem_name) + gem = Gem.source_index.search(dep).first + end + gem ? gem.version.to_s : nil + end + + # Create a string containing all versions of a plugin, + # colored to indicate if a specific version is installed + # locally. + # + # @param [String] plugin_name + # The name of the plugin gem + # + # @param [GemIndexCache] index_cache + # Optional index cache can be passed in, otherwise + # the module instance is used. + # + # @return [String] a string containing a comma separated + # concatenation of all versions of a plugin + # that were found on rubygems.org + # + def self.versions_string(plugin_name, index_cache = @cache) + name_tuples = index_cache.specs_with_name(plugin_name) + sorted_versions = name_tuples.sort_by(&:version) + version_strings = colorize_versions(sorted_versions) + version_strings.join ', ' + end + + #----------------# + + private + + # Colorize an Array of version strings so versions that are installed + # are green and uninstalled versions are yellow. + # + # @param [Array] versions + # sorted array of Gem::NameTuples representing all versions of + # a plugin gem. + # + # @return [Array] An array of strings, each one being the version + # string of the same plugin + # + def self.colorize_versions(versions) + colored_strings = [] + versions.reverse_each do |name_tuple| + if gem_installed?(name_tuple.name, name_tuple.version.to_s) + colored_strings << name_tuple.version.to_s.green + else + colored_strings << name_tuple.version.to_s.yellow + end + end + colored_strings + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb new file mode 100644 index 0000000..b9c667a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb @@ -0,0 +1,87 @@ +require 'pod/command/gem_helper' + +module Pod + class Command + # This class is used by Command::GemsHelper to download the Gem + # Specification index from rubygems.org and provide info about + # the index. + # + class GemIndexCache + # A memoized hash of all the rubygem specs. If it is nil, the specs will + # be downloaded, which will take a few seconds to download. + # + # @return [Hash] The hash of all rubygems + # + def specs + @specs ||= download_specs + end + + # Alias to make the initial caching process more readable. + # + alias_method :download_and_cache_specs, :specs + + # Get an Array of Gem::NameTuple objects that match a given + # spec name. + # + # @param [String] name + # The name of the gem to match on (e.g. 'cocoapods-try') + # + # @return [Array] Array of Gem::NameTuple that match the name + # + def specs_with_name(name) + matching_specs = @specs.select do |spec| + spec[0].name == name + end + + name_tuples = [] + matching_specs.each do |(name_tuple, _)| + name_tuples << name_tuple + end + + name_tuples + end + + #----------------# + + private + + # Force the rubygem spec index file + # + # @return [Hash] The hash of all rubygems + # + def download_specs + UI.puts 'Downloading Rubygem specification index...' + fetcher = Gem::SpecFetcher.fetcher + results, errors = fetcher.available_specs(:released) + + unless errors.empty? + UI.puts 'Error downloading Rubygem specification index: ' + + errors.first.error.to_s + return [] + end + + flatten_fetcher_results(results) + end + + # Flatten the dictionary returned from Gem::SpecFetcher + # to a simple array. + # + # @param [Hash] results + # the hash returned from the call to + # Gem::SpecFetcher.available_specs() + # + # @return [Array] Array of all spec results + # + def flatten_fetcher_results(results) + specs = [] + results.each do |source, source_specs| + source_specs.each do |tuple| + specs << [tuple, source] + end + end + + specs + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb new file mode 100644 index 0000000..632b10e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb @@ -0,0 +1,30 @@ +require 'rest' +require 'json' + +# The CocoaPods namespace +# +module Pod + class Command + # The pod plugins command. + # + class Plugins < Command + require 'pod/command/plugins/list' + require 'pod/command/plugins/search' + require 'pod/command/plugins/create' + require 'pod/command/plugins/publish' + require 'pod/command/plugins/installed' + + self.abstract_command = true + self.default_subcommand = 'list' + + self.summary = 'Show available CocoaPods plugins' + self.description = <<-DESC + Lists or searches the available CocoaPods plugins + and show if you have them installed or not. + + Also allows you to quickly create a new Cocoapods + plugin using a provided template. + DESC + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb new file mode 100644 index 0000000..17344e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb @@ -0,0 +1,120 @@ +require 'pod/command/plugins_helper' + +module Pod + class Command + class Plugins + # The create subcommand. Used to create a new plugin using either the + # default template (CocoaPods/cocoapods-plugin-template) or a custom + # template + # + class Create < Plugins + NAME_PREFIX = 'cocoapods-' + + self.summary = 'Creates a new plugin' + self.description = <<-DESC + Creates a scaffold for the development of a new plugin + named `NAME` according to the CocoaPods best practices. + + If a `TEMPLATE_URL`, pointing to a git repo containing a + compatible template, is specified, it will be used + in place of the default one. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('TEMPLATE_URL', false), + ] + + def initialize(argv) + @name = argv.shift_argument + unless @name.nil? || @name.empty? || @name.start_with?(NAME_PREFIX) + @name = NAME_PREFIX + @name.dup + end + @template_url = argv.shift_argument + super + end + + def validate! + super + if @name.nil? || @name.empty? + help! 'A name for the plugin is required.' + end + + help! 'The plugin name cannot contain spaces.' if @name.match(/\s/) + end + + def run + clone_template + configure_template + show_reminder + end + + #----------------------------------------# + + private + + # !@group Private helpers + + extend Executable + executable :git + + TEMPLATE_BASE_URL = 'https://github.com/CocoaPods/' + TEMPLATE_REPO = TEMPLATE_BASE_URL + 'cocoapods-plugin-template.git' + TEMPLATE_INFO_URL = TEMPLATE_BASE_URL + 'cocoapods-plugin-template' + + # Clones the template from the remote in the working directory using + # the name of the plugin. + # + # @return [void] + # + def clone_template + UI.section("-> Creating `#{@name}` plugin") do + UI.notice "using template '#{template_repo_url}'" + command = ['clone', template_repo_url, @name] + if method(:git!).arity == -1 + git! command + else + # TODO: delete this conditional and use the other branch when + # 0.5.0 is released + require 'shellwords' + git! command.map(&:to_s).map(&:shellescape).join(' ') + end + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def configure_template + UI.section('-> Configuring template') do + Dir.chdir(@name) do + if File.file? 'configure' + system "./configure #{@name}" + else + UI.warn 'Template does not have a configure file.' + end + end + end + end + + # Checks if a template URL is given else returns the TEMPLATE_REPO URL + # + # @return String + # + def template_repo_url + @template_url || TEMPLATE_REPO + end + + # Shows a reminder to the plugin author to make a Pull Request + # in order to update plugins.json once the plugin is released + # + def show_reminder + repo = PluginsHelper::PLUGINS_JSON_REPO + UI.notice "Don't forget to create a Pull Request on #{repo}\n" \ + ' to add your plugin to the plugins.json file once it is released!' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb new file mode 100644 index 0000000..e777b8f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb @@ -0,0 +1,92 @@ + +module Pod + class Command + class Plugins + # The `installed` subcommand. + # Used to list all installed plugins. + # + class Installed < Plugins + self.summary = 'List plugins installed on your machine' + self.description = <<-DESC + List all installed plugins and their + respective version. + DESC + + def self.options + # Silent mode is meaningless for this command as + # the command only purpose is to print information + super.reject { |option, _| option == '--silent' } + end + + def run + plugins = CLAide::Command::PluginManager.specifications + + UI.title 'Installed CocoaPods Plugins:' do + if verbose? + print_verbose_list(plugins) + else + print_compact_list(plugins) + end + end + end + + private + + # Print the given plugins as a compact list, one line + # per plugin with only its name & version + # + # @param [Array] plugins + # The list of plugins to print + # + def print_compact_list(plugins) + max_length = plugins.map { |p| p.name.length }.max + plugins.each do |plugin| + name_just = plugin.name.ljust(max_length) + hooks = registered_hooks(plugin) + hooks_list = '' + unless hooks.empty? + suffix = 'hook'.pluralize(hooks.count) + hooks_list = " (#{hooks.to_sentence} #{suffix})" + end + UI.puts_indented " - #{name_just} : #{plugin.version}#{hooks_list}" + end + end + + # Print the given plugins as a verbose list, with name, version, + # homepage and summary for each plugin. + # + # @param [Array] plugins + # The list of plugins to print + # + def print_verbose_list(plugins) + plugins.each do |plugin| + hooks = registered_hooks(plugin) + + UI.title(plugin.name) + UI.labeled('Version', plugin.version.to_s) + UI.labeled('Hooks', hooks) unless hooks.empty? + unless plugin.homepage.empty? + UI.labeled('Homepage', plugin.homepage) + end + UI.labeled('Summary', plugin.summary) + end + end + + # Names of the registered hook(s) (if any) for the given plugin + # + # @return [Array] + # Names of the hooks the given plugin did register for. + # + def registered_hooks(plugin) + registrations = Pod::HooksManager.registrations + return [] if registrations.nil? + + registrations.reduce([]) do |list, (name, hooks)| + list.push(name) if hooks.any? { |h| h.plugin_name == plugin.name } + list + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb new file mode 100644 index 0000000..e773165 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb @@ -0,0 +1,33 @@ +require 'pod/command/plugins_helper' +require 'pod/command/gem_helper' + +module Pod + class Command + class Plugins + # The list subcommand. Used to list all known plugins + # + class List < Plugins + self.summary = 'List all known plugins' + self.description = <<-DESC + List all known plugins (according to the list + hosted on github.com/CocoaPods/cocoapods-plugins) + DESC + + def self.options + super.reject { |option, _| option == '--silent' } + end + + def run + plugins = PluginsHelper.known_plugins + GemHelper.download_and_cache_specs if self.verbose? + + UI.title 'Available CocoaPods Plugins:' do + plugins.each do |plugin| + PluginsHelper.print_plugin plugin, self.verbose? + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb new file mode 100644 index 0000000..b10ac79 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb @@ -0,0 +1,76 @@ +require 'json' +require 'cgi' + +module Pod + class Command + class Plugins + # The publish subcommand. Used to request to add a plugin + # to the official list of plugins + # + class Publish < Plugins + self.summary = 'Request to add the plugin to the official plugins list' + self.description = <<-DESC + This command is only useful for developers of CocoaPods plugins. + + It opens a new GitHub issue to request adding the plugin + currently being developped to the list of official plugins. + + The current directory is expected to have one (and only one) + `.gemspec` file describing the CocoaPods plugin gem. + DESC + + def initialize(argv) + @gemspec_files = Dir.glob('*.gemspec') + super + end + + def validate! + super + if @gemspec_files.count > 1 + help! 'There is more than one gemspec in the current directory' + elsif @gemspec_files.empty? + help! 'No `.gemspec` file found in the current directory.' + end + end + + def run + gemspec = Gem::Specification.load(@gemspec_files.first) + unless gemspec.name.start_with?('cocoapods-') + UI.notice 'Your gem name should start with `cocoapods-` to be ' \ + 'loaded as a plugin by CocoaPods' + end + + json = json_from_gemspec(gemspec) + + title = "[plugins.json] Add #{gemspec.name}" + body = 'Please add the following entry to the `plugins.json` file:' \ + "\n\n```\n#{json}\n```" + open_new_issue_url(title, body) + end + + private + + def json_from_gemspec(gemspec) + JSON.pretty_generate( + :gem => gemspec.name, + :name => pretty_name_from_gemname(gemspec.name), + :author => gemspec.authors.join(', '), + :url => gemspec.homepage, + :description => gemspec.summary || gemspec.description, + ) + end + + def pretty_name_from_gemname(gemname) + gemname.split('-').map(&:capitalize).join(' '). + gsub(/cocoapods/i, 'CocoaPods') + end + + def open_new_issue_url(title, body) + url = 'https://github.com/CocoaPods/cocoapods-plugins/issues/new?' \ + "title=#{CGI.escape(title)}&body=#{CGI.escape(body)}" + `open "#{url}"` + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb new file mode 100644 index 0000000..bfc6718 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb @@ -0,0 +1,58 @@ +require 'pod/command/plugins_helper' +require 'pod/command/gem_helper' + +module Pod + class Command + class Plugins + # The search subcommand. + # Used to search a plugin in the list of known plugins, + # searching into the name, author description fields + # + class Search < Plugins + self.summary = 'Search for known plugins' + self.description = <<-DESC + Searches plugins whose 'name' contains the given `QUERY`. + `QUERY` is a regular expression, ignoring case. + + With `--full`, it also searches by 'author' and 'description'. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', true), + ] + + def self.options + [ + ['--full', 'Search by name, author, and description'], + ].concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + @full_text_search = argv.flag?('full') + @query = argv.shift_argument unless argv.arguments.empty? + super + end + + def validate! + super + help! 'A search query is required.' if @query.nil? || @query.empty? + begin + /#{@query}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + end + + def run + plugins = PluginsHelper.matching_plugins(@query, @full_text_search) + GemHelper.download_and_cache_specs if self.verbose? + + UI.title "Available CocoaPods Plugins matching '#{@query}':" + plugins.each do |plugin| + PluginsHelper.print_plugin plugin, self.verbose? + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb new file mode 100644 index 0000000..2a627ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb @@ -0,0 +1,137 @@ +require 'pod/command/gem_helper' + +module Pod + class Command + # This module is used by Command::Plugins::List + # and Command::Plugins::Search to download and parse + # the JSON describing the plugins list and manipulate it + # + module PluginsHelper + PLUGINS_JSON_REPO_NAME = 'CocoaPods/cocoapods-plugins' + PLUGINS_JSON_REPO = 'https://github.com/' + PLUGINS_JSON_REPO_NAME + PLUGINS_JSON_REL_URL = '/master/plugins.json' + + PLUGINS_RAW_URL = 'https://raw.githubusercontent.com/' \ + + PLUGINS_JSON_REPO_NAME + PLUGINS_JSON_REL_URL + + # Force-download the JSON + # + # @return [Hash] The hash representing the JSON with all known plugins + # + def self.download_json + UI.puts 'Downloading Plugins list...' + response = REST.get(PLUGINS_RAW_URL) + if response.ok? + parse_json(response.body) + else + raise Informative, 'Could not download plugins list ' \ + "from cocoapods-plugins: #{response.inspect}" + end + end + + # The list of all known plugins, according to + # the JSON hosted on github's cocoapods-plugins + # + # @return [Array] all known plugins, as listed in the downloaded JSON + # + def self.known_plugins + json = download_json + json['plugins'] + end + + # Filter plugins to return only matching ones + # + # @param [String] query + # A query string that corresponds to a valid RegExp pattern. + # + # @param [Bool] full_text_search + # false only searches in the plugin's name. + # true searches in the plugin's name, author and description. + # + # @return [Array] all plugins matching the query + # + def self.matching_plugins(query, full_text_search) + query_regexp = /#{query}/i + known_plugins.reject do |plugin| + texts = [plugin['name']] + if full_text_search + texts << plugin['author'] if plugin['author'] + texts << plugin['description'] if plugin['description'] + end + texts.grep(query_regexp).empty? + end + end + + # Display information about a plugin + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @param [Bool] verbose + # If true, will also print the author of the plugins. + # Defaults to false. + # + def self.print_plugin(plugin, verbose = false) + plugin_colored_name = plugin_title(plugin) + + UI.title(plugin_colored_name, '', 1) do + UI.puts_indented plugin['description'] + ljust = verbose ? 16 : 11 + UI.labeled('Gem', plugin['gem'], ljust) + UI.labeled('URL', plugin['url'], ljust) + print_verbose_plugin(plugin, ljust) if verbose + end + end + + #----------------# + + private + + # Smaller helper to print out the verbose details + # for a plugin. + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @param [Integer] ljust + # The left justification that is passed into UI.labeled + # + def self.print_verbose_plugin(plugin, ljust) + UI.labeled('Author', plugin['author'], ljust) + unless GemHelper.cache.specs.empty? + versions = GemHelper.versions_string(plugin['gem']) + UI.labeled('Versions', versions, ljust) + end + end + + # Parse the given JSON data, handling parsing errors if any + # + # @param [String] json_str + # The string representation of the JSON to parse + # + def self.parse_json(json_str) + JSON.parse(json_str) + rescue JSON::ParserError => e + raise Informative, "Invalid plugins list from cocoapods-plugins: #{e}" + end + + # Format the title line to print the plugin info with print_plugin + # coloring it according to whether the plugin is installed or not + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @return [String] The formatted and colored title + # + def self.plugin_title(plugin) + plugin_name = "-> #{plugin['name']}" + if GemHelper.gem_installed?(plugin['gem']) + plugin_name += " (#{GemHelper.installed_version(plugin['gem'])})" + plugin_name.green + else + plugin_name.yellow + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/plugins.json b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/plugins.json new file mode 100644 index 0000000..2f6f985 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/plugins.json @@ -0,0 +1,245 @@ +{ + "plugins": [ + { + "gem": "cocoapods-appledoc", + "name": "AppleDoc", + "author": "Kyle Fuller", + "social_media_url": "http://twitter.com/kylefuller", + "url": "https://github.com/CocoaPods/cocoapods-appledoc", + "description": "Generates docset and documentation for a pod." + }, + { + "gem": "cocoapods-deploy", + "name": "Deploy", + "author": "James Campbell", + "social_media_url": "https://twitter.com/jcampbell_05", + "url": "https://github.com/jcampbell05/cocoapods-deploy", + "description": "Deploys dependencies for a CocoaPods project without needing to clone the repo (Similar to Bundler's `--deployment`)." + }, + { + "gem": "cocoapods-rome", + "name": "Rome", + "author": "Boris Bügling", + "social_media_url": "https://twitter.com/neonichu", + "url": "https://github.com/neonichu/rome", + "description": "Rome makes it easy to build a list of frameworks for consumption outside of Xcode, e.g. for a Swift script." + }, + { + "gem": "cocoapods-deintegrate", + "name": "Deintegrate", + "author": "Kyle Fuller", + "social_media_url": "http://twitter.com/kylefuller", + "url": "https://github.com/kylef/cocoapods-deintegrate", + "description": "Deintegrates a project from CocoaPods." + }, + { + "gem": "cocoapods-dependencies", + "name": "Pod Dependencies", + "author": "Samuel E. Giddins", + "social_media_url": "http://twitter.com/segiddins", + "url": "https://github.com/segiddins/cocoapods-dependencies", + "description": "Shows a project's CocoaPod dependency graph." + }, + { + "gem": "cocoapods-browser", + "name": "Pod browser", + "author": "Toshihiro Morimoto", + "social_media_url": "http://twitter.com/dealforest", + "url": "https://github.com/dealforest/cocoapods-browser", + "description": "Open a pod's homepage in the browser." + }, + { + "gem": "cocoapods-check_latest", + "name": "Check Latest", + "author": "Yuji Nakayama", + "social_media_url": "http://twitter.com/nkym37", + "url": "https://github.com/yujinakayama/cocoapods-check_latest", + "description": "Checks if the latest version of a pod is up to date." + }, + { + "gem": "cocoapods-docs", + "name": "Pod docs", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/CocoaPods/cocoapods-docs", + "description": "Convenient access to the documentation of a Pod via cocoadocs.org." + }, + { + "gem": "cocoapods-docstats", + "name": "docstats", + "author": "Boris Bügling", + "social_media_url": "http://twitter.com/NeoNacho", + "url": "https://github.com/neonichu/cocoapods-docstats", + "description": "Showing documentation metrics of Pods." + }, + { + "gem": "cocoapods-open", + "name": "open", + "author": "Les Hill", + "social_media_url": "http://twitter.com/leshill", + "url": "https://github.com/leshill/open_pod_bay", + "description": "Open a pod’s workspace." + }, + { + "gem": "cocoapods-podfile_info", + "name": "Pod info", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/cocoapods/cocoapods-podfile_info", + "description": "Shows information on installed Pods." + }, + { + "gem": "cocoapods-repo-svn", + "name": "repo-svn", + "author": "Dusty Clarkda", + "social_media_url": "http://twitter.com/_clarkda", + "url": "https://github.com/clarkda/cocoapods-repo-svn", + "description": "Adds subversion support to manage spec-repositories." + }, + { + "gem": "cocoapods-repo-hg", + "name": "repo-hg", + "author": "Dusty Clarkda", + "social_media_url": "http://twitter.com/_clarkda", + "url": "https://github.com/clarkda/cocoapods-repo-hg", + "description": "Adds mercurial support to manage spec-repositories." + }, + { + "gem": "cocoapods-try", + "name": "Pod try", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/CocoaPods/cocoapods-try", + "description": "Quickly try the demo project of a Pod." + }, + { + "gem": "cocoapods-watch", + "name": "Pod watch", + "author": "Marin Usalj", + "url": "https://github.com/supermarin/cocoapods-watch", + "description": "Watch for Podfile changes and run pod install." + }, + { + "gem": "cocoapods-roulette", + "name": "Pods Roulette", + "author": "Heiko Behrens, Marcel Jackwerth", + "url": "https://github.com/sirlantis/cocoapods-roulette", + "description": "Builds an empty project with three random pods." + }, + { + "gem": "cocoapods-sorted-search", + "name": "Sorted Search", + "author": "Denys Telezhkin", + "url": "https://github.com/DenHeadless/cocoapods-sorted-search", + "description": "Adds a sort subcommand for pod search to sort search results by amount of stars, forks, or github activity." + }, + { + "gem": "cocoapods-release", + "name": "Release", + "author": "Oliver Letterer", + "social_media_url": "https://twitter.com/oletterer", + "url": "https://github.com/Sparrow-Labs/cocoapods-release", + "description": "Tags and releases pods for you." + }, + { + "gem": "cocoapods-clean", + "name": "cocoapods clean", + "author": "Luca Querella", + "url": "https://github.com/BendingSpoons/cocoapods-clean", + "description": "Remove Podfile.lock, Pods/ and *.xcworkspace." + }, + { + "gem": "cocoapods-keys", + "name": "CocoaPods Keys", + "author": "Orta Therox, Samuel E. Giddins", + "url": "https://github.com/orta/cocoapods-keys", + "description": "Store sensitive data in your Mac's keychain, that will be installed into your app's source code via the Pods library." + }, + { + "gem": "cocoapods-packager", + "name": "CocoaPods Packager", + "author": "Kyle Fuller, Boris Bügling", + "url": "https://github.com/CocoaPods/cocoapods-packager", + "description": "Generate a framework or static library from a podspec." + }, + { + "gem": "cocoapods-links", + "name": "CocoaPods Links", + "author": "Mike Owens", + "social_media_url": "https://twitter.com/mikejowens", + "url": "https://github.com/mowens/cocoapods-links", + "description": "A CocoaPods plugin to manage local development pods" + }, + { + "gem": "cocoapods-prune-localizations", + "name": "CocoaPods Prune Localizations", + "author": "Diego Torres", + "social_media_url": "https://twitter.com/dtorres", + "url": "https://github.com/dtorres/cocoapods-prune-localizations", + "description": "Upon running pod install, this plugin will remove unused localizations by your project" + }, + { + "gem": "cocoapods-readonly", + "name": "CocoaPods Readonly", + "author": "Mason Glidden", + "url": "https://github.com/Yelp/cocoapods-readonly", + "description": "Developers switching from submodules are used to modifying library source files from within Xcode. This locks those files as needed so Xcode warns you when attempting to edit them." + }, + { + "gem": "cocoapods-thumbs", + "name": "CocoaPods Thumbs", + "author": "Pablo Bendersky", + "url": "https://github.com/quadion/cocoapods-thumbs", + "description": "Use cocoapods-thumbs to check upvotes or downvotes of Podspecs from your peers based on past experiences." + }, + { + "gem": "cocoapods-blacklist", + "name": "CocoaPods Blacklist", + "author": "David Grandinetti", + "url": "https://github.com/yahoo/cocoapods-blacklist", + "description": "Check if a project is using a banned version of a pod. Handy for security audits." + }, + { + "gem": "cocoapods-superdeintegrate", + "name": "CocoaPods Superdeintegrate", + "author": "Ash Furrow", + "url": "https://github.com/ashfurrow/cocoapods-superdeintegrate", + "description": "Deletes the CocoaPods cache, your derived data folder, and makes sure that your Pods directory is gone." + }, + { + "gem": "cocoapods-archive", + "name": "CocoaPods Archive", + "author": "fjbelchi, alexito4", + "url": "https://github.com/fjbelchi/cocoapods-archive", + "description": "cocoapods-archive plugin that archive your project" + }, + { + "gem": "cocoapods-check", + "name": "CocoaPods Check", + "author": "Matt Di Iorio", + "url": "https://github.com/square/cocoapods-check", + "description": "Displays differences between locked and installed Pods" + }, + { + "gem": "cocoapods-acknowledgements", + "name": "CocoaPods Acknowledgements", + "author": "Fabio Pelosin, Orta Therox, Marcelo Fabri", + "url": "https://github.com/CocoaPods/cocoapods-acknowledgements", + "description": "CocoaPods plugin that generates an acknowledgements plist to make it easy to create tools to use in apps." + }, + { + "gem": "cocoapods-generator", + "name": "CocoaPods Generator", + "author": "从权", + "url": "https://github.com/zhzhy/cocoapods-generator", + "description": "Add files to empty target from *.podspec, such as souce files, libraries, frameworks, resources and so on." + }, + { + "gem": "cocoapods-debug", + "name": "CocoaPods Debug", + "author": "Samuel Giddins", + "url": "https://github.com/segiddins/cocoapods-debug", + "description": "A simple plugin to ease debugging CocoaPods." + } + ] +} diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb new file mode 100644 index 0000000..45c4d17 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb @@ -0,0 +1,40 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::GemHelper do + before do + UI.output = '' + end + + after do + mocha_teardown + end + + it 'detects if a gem is installed' do + Command::GemHelper.gem_installed?('bacon').should.be.true + Command::GemHelper.gem_installed?('fake-fake-fake-gem').should.be.false + end + + it 'detects if a specific version of a gem is installed' do + Command::GemHelper.gem_installed?('bacon', Bacon::VERSION).should.be.true + impossibacon = Gem::Version.new(Bacon::VERSION).bump + Command::GemHelper.gem_installed?('bacon', impossibacon).should.be.false + end + + it 'creates a version list that includes all versions of a single gem' do + spec2 = Gem::NameTuple.new('cocoapods-plugins', Gem::Version.new('0.2.0')) + spec1 = Gem::NameTuple.new('cocoapods-plugins', Gem::Version.new('0.1.0')) + response = [{ 1 => [spec2, spec1] }, []] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache = Command::GemIndexCache.new + @cache.download_and_cache_specs + versions_string = + Command::GemHelper.versions_string('cocoapods-plugins', @cache) + versions_string.should.include('0.2.0') + versions_string.should.include('0.1.0') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb new file mode 100644 index 0000000..34f63ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb @@ -0,0 +1,37 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::GemIndexCache do + before do + @cache = Command::GemIndexCache.new + UI.output = '' + end + + after do + mocha_teardown + end + + it 'notifies the user that it is downloading the spec index' do + response = [{}, []] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache.download_and_cache_specs + UI.output.should.include('Downloading Rubygem specification index...') + UI.output.should.not.include('Error downloading Rubygem specification') + end + + it 'notifies the user when getting the spec index fails' do + error = Gem::RemoteFetcher::FetchError.new('no host', 'bad url') + wrapper_error = stub(:error => error) + response = [[], [wrapper_error]] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache.download_and_cache_specs + @cache.specs.should.be.empty? + UI.output.should.include('Downloading Rubygem specification index...') + UI.output.should.include('Error downloading Rubygem specification') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb new file mode 100644 index 0000000..fa3e7cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb @@ -0,0 +1,89 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins::Create do + extend SpecHelper::PluginsCreateCommand + + before do + UI.output = '' + end + + it 'registers itself' do + Command.parse(%w(plugins create)). + should.be.instance_of Command::Plugins::Create + end + + #--- Validation + + it 'should require a name is passed in' do + @command = create_command + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/A name for the plugin is required./) + end + + it 'should require a non-empty name is passed in' do + @command = create_command('') + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/A name for the plugin is required./) + end + + it 'should require the name does not have spaces' do + @command = create_command('my gem') + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/The plugin name cannot contain spaces./) + end + + #--- Naming + + it 'should prefix the given name if not already' do + @command = create_command('unprefixed') + Dir.mktmpdir do |tmpdir| + Dir.chdir(tmpdir) do + @command.run + end + end + UI.output.should.include('Creating `cocoapods-unprefixed` plugin') + end + + it 'should not prefix the name if already prefixed' do + @command = create_command('cocoapods-prefixed') + Dir.mktmpdir do |tmpdir| + Dir.chdir(tmpdir) do + @command.run + end + end + UI.output.should.include('Creating `cocoapods-prefixed` plugin') + end + + #--- Template download + + it 'should download the default template repository' do + @command = create_command('cocoapods-banana') + + template_repo = 'https://github.com/CocoaPods/' \ + 'cocoapods-plugin-template.git' + git_command = ['clone', template_repo, 'cocoapods-banana'] + @command.expects(:git!).with(git_command) + @command.expects(:configure_template) + @command.run + UI.output.should.include('Creating `cocoapods-banana` plugin') + end + + it 'should download the passed in template repository' do + alt_repo = 'https://github.com/CocoaPods/' \ + 'cocoapods-banana-plugin-template.git' + @command = create_command('cocoapods-banana', alt_repo) + + @command.expects(:git!).with(['clone', alt_repo, 'cocoapods-banana']) + @command.expects(:configure_template) + @command.run + UI.output.should.include('Creating `cocoapods-banana` plugin') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb new file mode 100644 index 0000000..4f27be1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb @@ -0,0 +1,140 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins::Installed do + extend SpecHelper::PluginsStubs + + def stub_plugins(plugins_and_hooks) + specs = [] + registrations = {} + plugins_and_hooks.each do |(plugin_name, hooks)| + # Load Plugin GemSpec + fixture_path = fixture("#{plugin_name}.gemspec") + specs.push Gem::Specification.load(fixture_path.to_s) + # Fill hook registrations hash + Array(hooks).each do |hook_name| + registrations[hook_name] ||= [] + hook = Pod::HooksManager::Hook.new(hook_name, plugin_name, {}) + registrations[hook_name] << hook + end + end + + Pod::HooksManager.stubs(:registrations).returns(registrations) + CLAide::Command::PluginManager.stubs(:specifications).returns(specs) + end + + before do + UI.output = '' + end + + it 'registers itself' do + Command.parse(%w(plugins installed)). + should.be.instance_of Command::Plugins::Installed + end + + #--- Output printing + + describe 'Compact List' do + before do + @command = Pod::Command::Plugins::Installed.new CLAide::ARGV.new([]) + end + + it 'no hooks' do + stub_plugins('cocoapods-foo1' => nil, 'cocoapods-foo2' => nil) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1') + UI.output.should.include(' - cocoapods-foo2 : 2.0.2') + UI.output.should.not.include('pre_install') + UI.output.should.not.include('post_install') + end + + it 'one hook' do + stub_plugins( + 'cocoapods-foo1' => :pre_install, + 'cocoapods-foo2' => :post_install, + ) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1 ' \ + '(pre_install hook)') + UI.output.should.include(' - cocoapods-foo2 : 2.0.2 ' \ + '(post_install hook)') + end + + it 'two hooks' do + stub_plugins('cocoapods-foo1' => [:pre_install, :post_install]) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1 ' \ + '(pre_install and post_install hooks)') + end + end + + describe 'Verbose List' do + before do + verbose_args = CLAide::ARGV.new(['--verbose']) + @command = Pod::Command::Plugins::Installed.new verbose_args + end + + it 'no hooks' do + stub_plugins('cocoapods-foo1' => nil, 'cocoapods-foo2' => nil) + + @command.run + + UI.output.should.include < :pre_install, + 'cocoapods-foo2' => :post_install, + ) + + @command.run + UI.output.should.include < [:pre_install, :post_install]) + + @command.run + UI.output.should.include < CocoaPods Fake Gem') + UI.output.should.include('-> CocoaPods Searchable Fake Gem') + UI.output.should.not.include('-> Bacon') + end + + it 'should filter plugins by name, author, description with full search' do + stub_plugins_json_request + @command = search_command('--full', 'search') + @command.run + UI.output.should.include('-> CocoaPods Fake Gem') + UI.output.should.include('-> CocoaPods Searchable Fake Gem') + UI.output.should.not.include('-> Bacon') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb new file mode 100644 index 0000000..166fd60 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb @@ -0,0 +1,33 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::PluginsHelper do + extend SpecHelper::PluginsStubs + + it 'downloads the json file' do + stub_plugins_json_request + json = Command::PluginsHelper.download_json + json.should.not.be.nil? + json.should.be.is_a? Hash + json['plugins'].size.should.eql? 3 + end + + it 'handles empty/bad JSON' do + stub_plugins_json_request 'This is not JSON' + expected_error = /Invalid plugins list from cocoapods-plugins/ + should.raise(Pod::Informative) do + Command::PluginsHelper.download_json + end.message.should.match(expected_error) + end + + it 'notifies the user if the download fails' do + stub_plugins_json_request '', [404, 'Not Found'] + expected_error = /Could not download plugins list from cocoapods-plugins/ + should.raise(Pod::Informative) do + Command::PluginsHelper.download_json + end.message.should.match(expected_error) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb new file mode 100644 index 0000000..f15b1cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb @@ -0,0 +1,20 @@ +require File.expand_path('../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins do + before do + argv = CLAide::ARGV.new([]) + @command = Command::Plugins.new(argv) + end + + it 'registers itself and uses the default subcommand' do + Command.parse(%w(plugins)).should.be.instance_of Command::Plugins::List + end + + it 'exists' do + @command.should.not.be.nil? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec new file mode 100644 index 0000000..6d27dbc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec @@ -0,0 +1,10 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'cocoapods-foo1' + spec.version = '2.0.1' + spec.authors = ['Author 1'] + spec.summary = 'Gem Summary 1' + spec.description = 'Gem Description 1' + spec.homepage = 'https://github.com/proper-man/cocoapods-foo1' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec new file mode 100644 index 0000000..ed0428f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec @@ -0,0 +1,9 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'cocoapods-foo2' + spec.version = '2.0.2' + spec.authors = ['Author 1', 'Author 2'] + spec.description = 'Gem Description 2' + spec.homepage = 'https://github.com/proper-man/cocoapods-foo2' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json new file mode 100644 index 0000000..ebe0d5c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json @@ -0,0 +1,22 @@ +{ + "plugins":[ + { + "gem":"cocoapods-fake-fake-fake-1", + "name":"CocoaPods Fake Gem", + "url":"https://github.com/CocoaPods/cocoapods-fake-1", + "description":"A Pod that should not exist and should only be found by full search" + }, + { + "gem":"bacon", + "name":"Bacon", + "url":"https://github.com/chneukirchen/bacon", + "description":"A minimal RSpec clone." + }, + { + "gem":"cocoapods-fake-fake-fake-2", + "name":"CocoaPods Searchable Fake Gem", + "url":"https://github.com/CocoaPods/cocoapods-fake-2", + "description":"A Pod that should not exist but should be found with search" + } + ] +} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec new file mode 100644 index 0000000..abd90e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec @@ -0,0 +1,10 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'unprefixed-plugin' + spec.version = '1.2.3' + spec.authors = ['Author 1', 'Author 2'] + spec.summary = 'Gem Summary' + spec.description = 'Gem Description' + spec.homepage = 'https://github.com/messy-man/unprefixed-plugins' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb new file mode 100644 index 0000000..8a3720f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb @@ -0,0 +1,122 @@ +# Set up coverage analysis +#-----------------------------------------------------------------------------# + +require 'codeclimate-test-reporter' +CodeClimate::TestReporter.configure do |config| + config.logger.level = Logger::WARN +end +CodeClimate::TestReporter.start + +# Set up +#-----------------------------------------------------------------------------# + +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$LOAD_PATH.unshift((ROOT + 'lib').to_s) +$LOAD_PATH.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' + +require 'webmock' +include WebMock::API + +require 'cocoapods' +require 'cocoapods_plugin' + +# VCR +#--------------------------------------# + +require 'vcr' +VCR.configure do |c| + c.cassette_library_dir = ROOT + 'spec/fixtures/vcr_cassettes' + c.hook_into :webmock + c.ignore_hosts 'codeclimate.com' +end + +#-----------------------------------------------------------------------------# + +# The CocoaPods namespace +# +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end +end + +#-----------------------------------------------------------------------------# + +# Bacon namespace +# +module Bacon + # Add a fixture helper to the Bacon Context + class Context + ROOT = ::ROOT + 'spec/fixtures' + + def fixture(name) + ROOT + name + end + end +end + +#-----------------------------------------------------------------------------# + +# SpecHelper namespace +# +module SpecHelper + # Add this as an extension into the Search and List specs + # to help stub the plugins.json request + module PluginsStubs + def stub_plugins_json_request(json = nil, status = 200) + body = json || File.read(fixture('plugins.json')) + stub_request(:get, Pod::Command::PluginsHelper::PLUGINS_RAW_URL). + to_return(:status => status, :body => body, :headers => {}) + end + end + + # Add this as an extension into the Create specs + module PluginsCreateCommand + def create_command(*args) + Pod::Command::Plugins::Create.new CLAide::ARGV.new(args) + end + end + + # Add this as an extension into the Search specs + module PluginsSearchCommand + def search_command(*args) + Pod::Command::Plugins::Search.new CLAide::ARGV.new(args) + end + end + + module PluginsPublishCommand + def publish_command + Pod::Command::Plugins::Publish.new CLAide::ARGV.new [] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitignore new file mode 100644 index 0000000..54a36d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitignore @@ -0,0 +1,39 @@ +*.gem +*.rbc +/.config +/coverage/ +/InstalledFiles +/pkg/ +/spec/reports/ +/test/tmp/ +/test/version_tmp/ +/tmp/ + +## Specific to RubyMotion: +.dat* +.repl_history +build/ + +## Documentation cache and generated files: +/.yardoc/ +/_yardoc/ +/doc/ +/rdoc/ + +## Environment normalisation: +/.bundle/ +/lib/bundler/man/ + +# for a library or gem, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# Gemfile.lock +# .ruby-version +# .ruby-gemset + +# unless supporting rvm < 1.11.0 or doing something fancy, ignore this: +.rvmrc + +/coverage/ + +# RubyMine Editor +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitmodules b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitmodules new file mode 100644 index 0000000..eac8b99 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.gitmodules @@ -0,0 +1,3 @@ +[submodule "spec/fixtures/spec-repos/master"] + path = spec/fixtures/spec-repos/master + url = https://github.com/CocoaPods/Specs.git diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.travis.yml new file mode 100644 index 0000000..c104be6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/.travis.yml @@ -0,0 +1,10 @@ +language: ruby +cache: bundler +rvm: + - 2.0.0-p451 + +branches: + only: + - master + - /.+-stable$/ + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/CHANGELOG.md new file mode 100644 index 0000000..8e81c08 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/CHANGELOG.md @@ -0,0 +1,70 @@ +# Cocoapods::Search Changelog + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-04-14) + +##### Bug Fixes + +* Compatibility with CocoaPods 1.0.0.beta.7. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* Perform full search as default, add `--simple` option to search only by + name. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#13](https://github.com/CocoaPods/cocoapods-search/issues/13) + +* Add support for tvOS and any possible future platforms. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#11](https://github.com/CocoaPods/cocoapods-search/issues/11) + +##### Bug Fixes + +* Print output in reverse order. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + +* Perform regexp escape on individual query words before joining them. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#8](https://github.com/CocoaPods/cocoapods-search/issues/8) + + +## 0.1.0 (2015-09-03) + +* Version number must not collide with old gem called cocoapods-search 0.0.7 + + +## 0.0.1 (2015-09-03) + +* Initial implementation. This version is an extraction from [CocoaPods](https://github.com/CocoaPods/CocoaPods). + +Original creators: +[Eloy Durán](https://github.com/alloy) +[Fabio Pelosin](https://github.com/fabiopelosin) + +Extractor: +[Emma Koszinowski](http://github.com/emkosz) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile new file mode 100644 index 0000000..0f7f267 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile @@ -0,0 +1,13 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in cocoapods-search.gemspec +gemspec + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'bacon' + gem 'mocha-on-bacon' + gem 'prettybacon' +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile.lock new file mode 100644 index 0000000..4c7eb74 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Gemfile.lock @@ -0,0 +1,99 @@ +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 6d77bb4884bf7a5a24909a26f9434ae17dfa558a + branch: master + specs: + cocoapods (1.0.0.beta.6) + activesupport (>= 4.0.2) + claide (>= 1.0.0.beta.3, < 2.0) + cocoapods-core (= 1.0.0.beta.6) + cocoapods-deintegrate (>= 1.0.0.beta.1, < 2.0) + cocoapods-downloader (>= 1.0.0.beta.2, < 2.0) + cocoapods-plugins (>= 1.0.0.beta.1, < 2.0) + cocoapods-search (= 1.0.0) + cocoapods-stats (>= 1.0.0.beta.3, < 2.0) + cocoapods-trunk (>= 1.0.0.beta.2, < 2.0) + cocoapods-try (>= 1.0.0.beta.3, < 2.0) + colored (~> 1.2) + escape (~> 0.0.4) + fourflusher (~> 0.3.0) + molinillo (~> 0.4.4) + nap (~> 1.0) + xcodeproj (>= 1.0.0.beta.3, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: fff02c11422963e4bce9c2796c00bd9b471d79b7 + branch: master + specs: + cocoapods-core (1.0.0.beta.6) + activesupport (>= 4.0.2) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + +PATH + remote: . + specs: + cocoapods-search (1.0.0) + +GEM + remote: https://rubygems.org/ + specs: + activesupport (4.2.6) + i18n (~> 0.7) + json (~> 1.7, >= 1.7.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + bacon (1.2.0) + claide (1.0.0.beta.3) + cocoapods-deintegrate (1.0.0.beta.1) + cocoapods-downloader (1.0.0.beta.2) + cocoapods-plugins (1.0.0.beta.1) + nap + cocoapods-stats (1.0.0.beta.3) + cocoapods-trunk (1.0.0.beta.2) + nap (>= 0.8, < 2.0) + netrc (= 0.7.8) + cocoapods-try (1.0.0.beta.3) + colored (1.2) + escape (0.0.4) + fourflusher (0.3.0) + fuzzy_match (2.0.4) + i18n (0.7.0) + json (1.8.3) + metaclass (0.0.4) + minitest (5.8.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.4.4) + nap (1.1.0) + netrc (0.7.8) + prettybacon (0.0.2) + bacon (~> 1.2) + rake (10.4.2) + thread_safe (0.3.5) + tzinfo (1.2.2) + thread_safe (~> 0.1) + xcodeproj (1.0.0.beta.3) + activesupport (>= 3) + claide (>= 1.0.0.beta.1, < 2.0) + colored (~> 1.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + cocoapods! + cocoapods-core! + cocoapods-search! + mocha-on-bacon + prettybacon + rake + +BUNDLED WITH + 1.11.2 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/LICENSE.txt new file mode 100644 index 0000000..288b8fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2015 Eloy Durán , Fabio Pelosin +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/README.md new file mode 100644 index 0000000..1e8a609 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/README.md @@ -0,0 +1,106 @@ +# cocoapods-search + +[![Build Status](https://travis-ci.org/CocoaPods/cocoapods-search.svg)](https://travis-ci.org/CocoaPods/cocoapods-search) + +A CocoaPods plugin that allows you to search multiple pod spec repositories for specific pods matching a query. cocoapods-search is by default included in CocoaPods. + +## Installation + +If you have CocoaPods, you already have cocoapods-search installed by default. If not, you can also install it as a seperate gem as followed. + + $ gem install cocoapods-search + +## Usage + +Search for pods by using the pod search command as followed. + + $ pod search QUERY + +e.g. + + $ pod search networkin + + -> ACSNetworking (0.0.1) + On the basis of AFNetworking encapsulation. + pod 'ACSNetworking', '~> 0.0.1' + - Homepage: https://github.com/Hyosung/ACSNetworking + - Source: https://github.com/Hyosung/ACSNetworking.git + - Versions: 0.0.1 [master repo] + + + -> AFNetworking (2.5.4) + A delightful iOS and OS X networking framework. + pod 'AFNetworking', '~> 2.5.4' + - Homepage: https://github.com/AFNetworking/AFNetworking + - Source: https://github.com/AFNetworking/AFNetworking.git + - Versions: 2.5.4, 2.5.3, 2.5.2, 2.5.1, 2.5.0, 2.4.1, 2.4.0, 2.3.1, 2.3.0, 2.2.4, 2.2.3, 2.2.2, 2.2.1, 2.2.0, 2.1.0, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-RC3, + 2.0.0-RC2, 2.0.0-RC1, 1.3.4, 1.3.3, 1.3.2, 1.3.1, 1.3.0, 1.2.1, 1.2.0, 1.1.0, 1.0.1, 1.0, 1.0RC3, 1.0RC2, 1.0RC1, 0.10.1, 0.10.0, 0.9.2, 0.9.1, 0.9.0, 0.7.0, + 0.5.1 [master repo] + - Subspecs: + - AFNetworking/Serialization (2.5.4) + - AFNetworking/Security (2.5.4) + - AFNetworking/Reachability (2.5.4) + - AFNetworking/NSURLConnection (2.5.4) + - AFNetworking/NSURLSession (2.5.4) + - AFNetworking/UIKit (2.5.4) + + + -> AFNetworking+AutoRetry (0.0.5) + Auto Retries for AFNetworking requests + pod 'AFNetworking+AutoRetry', '~> 0.0.5' + - Homepage: https://github.com/shaioz/AFNetworking-AutoRetry + - Source: https://github.com/shaioz/AFNetworking-AutoRetry.git + - Versions: 0.0.5, 0.0.4, 0.0.3, 0.0.2, 0.0.1 [master repo] + + ... + + +### Options + +You can use the following options with the search command. + +| Flag | Description | +|----------- |-------------| +| `--regex` | Interpret the `QUERY` as a regular expression | +| `--full` | Search by name, summary, and description | +| `--stats` | Show additional stats (like GitHub watchers and forks) | +| `--ios` | Restricts the search to Pods supported on iOS | +| `--osx` | Restricts the search to Pods supported on OS X | +| `--watchos` | Restricts the seach to Pods supported on Watch OS | +| `--web` | Opens a new search on cocoapods.org | + + +e.g. + + $ pod search video --osx + + -> AMCoreAudio (2.0.7) + AMCoreAudio is a Swift wrapper for Apple's CoreAudio framework + pod 'AMCoreAudio', '~> 2.0.7' + - Homepage: https://github.com/rnine/AMCoreAudio + - Source: https://github.com/rnine/AMCoreAudio.git + - Versions: 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0, 1.5, 1.4.3, 1.4.2, 1.4.1, 1.4, 1.3.2, 1.3.1, 1.3, 1.2, 1.1, 1.0.1, 1.0 [master repo] + + + -> AppleCoreAudioUtilityClasses@thehtb (2013.09.17) + A git mirror of Apple's Core Audio Utility Classes for better versioning and with clang/llvm fixes. + pod 'AppleCoreAudioUtilityClasses@thehtb', '~> 2013.09.17' + - Homepage: https://github.com/thehtb/AppleCoreAudioUtilityClasses + - Source: https://github.com/thehtb/AppleCoreAudioUtilityClasses.git + - Versions: 2013.09.17, 2013.2.18, 2013.1.2 [master repo] + - Subspecs: + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAProcess (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAAutoDisposer (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CABitOperations (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CASpectralProcessor (2013.09.17) + + + -> AudioKit (2.1.1) + Open-source audio synthesis, processing, & analysis platform. + pod 'AudioKit', '~> 2.1.1' + - Homepage: http://audiokit.io/ + - Source: https://github.com/audiokit/AudioKit.git + - Versions: 2.1.1, 2.0.1, 2.0, 1.3, 1.2-01, 1.2 [master repo] + + ... diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Rakefile new file mode 100644 index 0000000..c34b828 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/Rakefile @@ -0,0 +1,13 @@ +require 'bundler/gem_tasks' + +def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') +end + +desc 'Runs all the specs' +task :specs do + sh "bundle exec bacon #{specs('**')}" +end + +task :default => :specs + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/cocoapods-search.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/cocoapods-search.gemspec new file mode 100644 index 0000000..80d106f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/cocoapods-search.gemspec @@ -0,0 +1,25 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods-search/gem_version.rb' + +Gem::Specification.new do |spec| + spec.name = 'cocoapods-search' + spec.version = CocoapodsSearch::VERSION + spec.authors = ['Eloy Durán', 'Fabio Pelosin', 'Emma Koszinowski'] + spec.email = ['eloy.de.enige@gmail.com', 'fabiopelosin@gmail.com', 'emkosz@gmail.com'] + spec.description = %q{Search for pods.} + spec.summary = %q{Searches for pods, ignoring case, whose name matches `QUERY`. If the + `--full` option is specified, this will also search in the summary and + description of the pods.} + spec.homepage = 'https://github.com/CocoaPods/cocoapods-search' + spec.license = 'MIT' + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search.rb new file mode 100644 index 0000000..b955220 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search.rb @@ -0,0 +1 @@ +require 'cocoapods-search/gem_version' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command.rb new file mode 100644 index 0000000..61b9cb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command/search' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command/search.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command/search.rb new file mode 100644 index 0000000..644561e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/command/search.rb @@ -0,0 +1,115 @@ +module Pod + class Command + # @CocoaPods 0.0.2 + # + class Search < Command + self.summary = 'Search for pods' + + self.description = <<-DESC + Searches for pods, ignoring case, whose name, summary, description, or authors match `QUERY`. If the + `--simple` option is specified, this will only search in the names of the pods. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', true), + ] + + def self.options + options = [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--simple', 'Search only by name'], + ['--stats', 'Show additional stats (like GitHub watchers and forks)'], + ['--web', 'Searches on cocoapods.org'], + ] + options += Platform.all.map do |platform| + ["--#{platform.name.to_s}", "Restricts the search to Pods supported on #{Platform.string_name(platform.to_sym)}"] + end + options << ['--no-pager', 'Do not pipe search results into a pager'] + options.concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @simple_search = argv.flag?('simple') + @stats = argv.flag?('stats') + @web = argv.flag?('web') + @platform_filters = Platform.all.map do |platform| + argv.flag?(platform.name.to_s) ? platform.to_sym : nil + end.compact + @query = argv.arguments! unless argv.arguments.empty? + config.silent = false + @use_pager = argv.flag?('pager', true) + super + end + + def validate! + super + help! 'A search query is required.' unless @query + + unless @web || !@use_regex + begin + /#{@query.join(' ').strip}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + end + end + + def run + ensure_master_spec_repo_exists! + if @web + web_search + else + local_search + end + end + + def sources_manager + defined?(Pod::SourcesManager) ? Pod::SourcesManager : config.sources_manager + end + + def web_search + queries = @platform_filters.map do |platform| + "on:#{platform}" + end + queries += @query + query_parameter = queries.compact.flatten.join(' ') + url = "https://cocoapods.org/?q=#{CGI.escape(query_parameter).gsub('+', '%20')}" + UI.puts("Opening #{url}") + open!(url) + end + + def local_search + query_regex = @query.reduce([]) { |result, q| + result << (@use_regex ? q : Regexp.escape(q)) + }.join(' ').strip + + sets = sources_manager.search_by_name(query_regex, !@simple_search) + + @platform_filters.each do |platform| + sets.reject! { |set| !set.specification.available_platforms.map(&:name).include?(platform) } + end + + if(@use_pager) + UI.with_pager { print_sets(sets) } + else + print_sets(sets) + end + end + + def print_sets(sets) + sets.each do |set| + begin + if @stats + UI.pod(set, :stats) + else + UI.pod(set, :normal) + end + rescue DSLError + UI.warn "Skipping `#{set.name}` because the podspec contains errors." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/gem_version.rb new file mode 100644 index 0000000..5f93d57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods-search/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsSearch + VERSION = '1.0.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..678d204 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/command/search_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/command/search_spec.rb new file mode 100644 index 0000000..142393a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/command/search_spec.rb @@ -0,0 +1,154 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Search do + extend SpecHelper::TemporaryRepos + + before do + @test_source = Source.new(fixture('spec-repos/test_repo')) + Source::Aggregate.any_instance.stubs(:sources).returns([@test_source]) + Config.instance.sources_manager.updated_search_index = nil + end + + describe 'Search' do + it 'registers it self' do + Command.parse(%w{ search }).should.be.instance_of Command::Search + end + + it 'runs with correct parameters' do + lambda { run_command('search', 'JSON') }.should.not.raise + lambda { run_command('search', 'JSON', '--simple') }.should.not.raise + end + + it 'complains for wrong parameters' do + lambda { run_command('search') }.should.raise CLAide::Help + lambda { run_command('search', 'too', '--wrong') }.should.raise CLAide::Help + lambda { run_command('search', '--wrong') }.should.raise CLAide::Help + end + + it 'searches for a pod with name matching the given query ignoring case' do + output = run_command('search', 'json', '--simple') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given query ignoring case' do + output = run_command('search', 'engelhart') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given multi-word query ignoring case' do + output = run_command('search', 'very', 'high', 'performance') + output.should.include? 'JSONKit' + end + + it 'prints search results in order' do + output = run_command('search', 'lib') + output.should.match /BananaLib.*JSONKit/m + end + + it 'restricts the search to Pods supported on iOS' do + output = run_command('search', 'BananaLib', '--ios') + output.should.include? 'BananaLib' + Specification.any_instance.stubs(:available_platforms).returns([Platform.osx]) + output = run_command('search', 'BananaLib', '--ios') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on OS X' do + output = run_command('search', 'BananaLib', '--osx') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on Watch OS' do + output = run_command('search', 'a', '--watchos') + output.should.include? 'Realm' + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on tvOS' do + output = run_command('search', 'n', '--tvos') + output.should.include? 'monkey' + output.should.not.include? 'BananaLib' + end + + it 'outputs with the silent parameter' do + output = run_command('search', 'BananaLib', '--silent') + output.should.include? 'BananaLib' + end + + it 'shows a friendly message when locally searching with invalid regex' do + lambda { run_command('search', '--regex', '+') }.should.raise CLAide::Help + end + + it 'does not try to validate the query as a regex with plain-text search' do + lambda { run_command('search', '+') }.should.not.raise CLAide::Help + end + + it 'uses regex search when asked for regex mode' do + output = run_command('search', '--regex', 'Ba(na)+Lib') + output.should.include? 'BananaLib' + output.should.not.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'JSONKit' + end + + it 'uses plain-text search when not asked for regex mode' do + output = run_command('search', 'Pod+With+Plus+Signs') + output.should.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'BananaLib' + end + end + + describe 'option --web' do + extend SpecHelper::TemporaryRepos + + it 'searches with invalid regex' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=NSAttributedString%2BCCLFormat') + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'should url encode search queries' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=NSAttributedString%2BCCLFormat') + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'searches the web via the open! command' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=bananalib') + run_command('search', '--web', 'bananalib') + end + + it 'includes option --osx correctly' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Aosx%20bananalib') + run_command('search', '--web', '--osx', 'bananalib') + end + + it 'includes option --ios correctly' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Aios%20bananalib') + run_command('search', '--web', '--ios', 'bananalib') + end + + it 'includes option --watchos correctly' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Awatchos%20bananalib') + run_command('search', '--web', '--watchos', 'bananalib') + end + + it 'includes option --tvos correctly' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Atvos%20bananalib') + run_command('search', '--web', '--tvos', 'bananalib') + end + + it 'includes any new platform option correctly' do + Platform.stubs(:all).returns([Platform.ios, Platform.tvos, Platform.new('whateveros')]) + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Awhateveros%20bananalib') + run_command('search', '--web', '--whateveros', 'bananalib') + end + + it 'does not matter in which order the ios/osx options are set' do + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib') + run_command('search', '--web', '--ios', '--osx', 'bananalib') + + Command::Search.any_instance.expects(:open!).with('https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib') + run_command('search', '--web', '--osx', '--ios', 'bananalib') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec new file mode 100644 index 0000000..881ed82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec @@ -0,0 +1,21 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.platform = :ios + + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.source_files = 'Classes/*.{h,m}', 'Vendor' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.prefix_header_file = 'Classes/BananaLib.pch' + s.resources = "Resources/*.png" + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec new file mode 100644 index 0000000..687fe5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '1.4' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :tag => 'v1.4' } + + s.source_files = 'JSONKit.*' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec new file mode 100644 index 0000000..d9f2c9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec @@ -0,0 +1,12 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '999.999.999' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :commit => '0aff3deb5e1bb2bbc88a83fd71c8ad5550185cce' } + + s.source_files = 'JSONKit.*' + s.compiler_flags = '-Wno-deprecated-objc-isa-usage', '-Wno-format' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec new file mode 100644 index 0000000..ef91e61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec @@ -0,0 +1,16 @@ +Pod::Spec.new do |s| + s.name = "OrangeFramework" + s.version = "0.1.0" + s.author = { "Swiftest Orang-Utan" => "swiftest@orang.utan.local" } + s.summary = "Fresh juice!" + s.description = "Blends fresh orange juice." + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://utan.local/orange-framework.git", :tag => s.version.to_s } + s.license = 'MIT' + + s.platform = :ios, '8.0' + + s.source_files = 'Source/Juicer.swift' + + s.frameworks = 'UIKit' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec new file mode 100644 index 0000000..9aa9123 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec @@ -0,0 +1,17 @@ +Pod::Spec.new do |s| + s.name = 'Pod+With+Plus+Signs' + s.version = '1.0' + s.authors = 'Evil Corp' + s.homepage = 'http://evil-corp.local/pod_with_plus_signs.html' + s.summary = 'Messing with special chars' + s.description = 'I love messing up with special chars in my pod name! Mouahahahahaa (evil laugh)' + s.platform = :ios + + s.source = { :git => 'http://evil-corp.local/pod_with_plus_signs.git', :tag => '1.0' } + s.source_files = 'Classes/*.{h,m}' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec new file mode 100644 index 0000000..14f9360 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec @@ -0,0 +1,18 @@ +Pod::Spec.new do |s| + s.name = 'Realm' + s.version = '0.94' + s.authors = 'Realm', { 'Realm' => 'help@realm.io' } + s.homepage = 'https://realm.io/' + s.summary = 'Realm is a modern data framework & database for iOS & OS X.' + s.description = 'The Realm database, for Objective-C. (If you want to use Realm from Swift, see the “RealmSwift” pod.)\n\nRealm is a mobile database: a replacement for Core Data & SQLite. You can use it on iOS & OS X. Realm is not an ORM on top SQLite: instead it uses its own persistence engine, built for simplicity (& speed). Learn more and get help at https://realm.io' + s.platform = :watchos + + s.source = { :git => 'https://github.com/realm/realm-cocoa.git', :tag => 'v0.94.0' } + s.source_files = 'Realm/*.{m,mm}', 'Realm/ObjectStore/*.cpp' + s.xcconfig = { 'CLANG_CXX_LANGUAGE_STANDARD": "compiler-default' => 'OTHER_CPLUSPLUSFLAGS": "-std=c++1y $(inherited)' } + s.prefix_header_file = 'Classes/Realm.pch' + s.license = { + :type => 'Apache 2.0', + :file => 'LICENSE' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec new file mode 100644 index 0000000..60d4234 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = "monkey" + s.version = "1.0.2" + s.author = { "Funky Monkey" => "funky@monkey.local" } + s.summary = "🙈🙉🙊" + s.description = "See no evil! Hear no evil! Speak no evil!" + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://monkey.local/monkey.git", :tag => s.version.to_s } + s.license = 'MIT' + s.vendored_library = 'monkey.a' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper.rb new file mode 100644 index 0000000..d10a130 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper.rb @@ -0,0 +1,85 @@ +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'cocoapods' + +require 'cocoapods_plugin' + +require 'spec_helper/command' # Allows to run Pod commands and returns their output. +require 'spec_helper/fixture' # Provides access to the fixtures and unpacks them if needed. +require 'spec_helper/temporary_repos' # Allows to create and modify temporary spec repositories. +require 'spec_helper/user_interface' # Redirects UI to UI.output & UI.warnings. +require 'spec_helper/pre_flight' # Cleans the temporary directory, the config & the UI.output before every test. + +module Bacon + class Context + include Pod::Config::Mixin + include SpecHelper::Fixture + include SpecHelper::Command + + def skip_xcodebuild? + ENV['SKIP_XCODEBUILD'] + end + + def temporary_directory + SpecHelper.temporary_directory + end + end +end + +#Mocha::Configuration.prevent(:stubbing_non_existent_method) + +module SpecHelper + def self.temporary_directory + ROOT + 'tmp' + end +end + +def temporary_sandbox + Pod::Sandbox.new(temporary_directory + 'Pods') +end + +def fixture_spec(name) + file = SpecHelper::Fixture.fixture(name) + Pod::Specification.from_file(file) +end + +def fixture_file_accessor(spec_or_name, platform = Pod::Platform.ios) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + path_list = Pod::Sandbox::PathList.new(spec.defined_in_file.dirname) + Pod::Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) +end + +def fixture_target_definition(name = 'Pods', platform = Pod::Platform.ios) + Pod::Podfile::TargetDefinition.new(name, Pod::Podfile.new, 'name' => name, 'platform' => platform) +end + +def fixture_pod_target(spec_or_name, target_definition = nil) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + target_definition ||= fixture_target_definition + target_definition.store_pod(spec.name) + Pod::PodTarget.new([spec], [target_definition], config.sandbox).tap do |pod_target| + pod_target.file_accessors << fixture_file_accessor(spec, pod_target.platform) + consumer = spec.consumer(pod_target.platform) + pod_target.spec_consumers << consumer + end +end + +def fixture_aggregate_target(pod_targets = [], target_definition = nil) + target_definition ||= pod_targets.flat_map(&:target_definitions).first || fixture_target_definition + target = Pod::AggregateTarget.new(target_definition, config.sandbox) + target.client_root = config.sandbox.root.dirname + target.pod_targets = pod_targets + target +end + +#-----------------------------------------------------------------------------# + +SpecHelper::Fixture.fixture('banana-lib') # ensure it exists +SpecHelper::Fixture.fixture('orange-framework') diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/command.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/command.rb new file mode 100644 index 0000000..6662114 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/command.rb @@ -0,0 +1,27 @@ +module SpecHelper + module Command + def argv(*argv) + CLAide::ARGV.new(argv) + end + + def command(*argv) + argv += ['--no-ansi', '--no-pager'] + Pod::Command.parse(argv) + end + + def run_command(*args) + Dir.chdir(SpecHelper.temporary_directory) do + Pod::UI.output = '' + # @todo Remove this once all cocoapods has + # been converted to use the UI.puts + config_silent = config.silent? + config.silent = false + cmd = command(*args) + cmd.validate! + cmd.run + config.silent = config_silent + Pod::UI.output + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/fixture.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/fixture.rb new file mode 100644 index 0000000..39704e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/fixture.rb @@ -0,0 +1,32 @@ +module SpecHelper + def self.fixture(name) + Fixture.fixture(name) + end + + def self.create_sample_app_copy_from_fixture(fixture_name) + fixture_copy_path = temporary_directory + fixture_name + FileUtils.cp_r(fixture(fixture_name), temporary_directory) + fixture_copy_path + "#{fixture_name}.xcodeproj" + end + + def self.test_repo_url + 'https://github.com/CocoaPods/test_repo.git' + + end + + module Fixture + ROOT = ::ROOT + 'spec/fixtures' + + def fixture(name) + file = ROOT + name + unless file.exist? + archive = Pathname.new(file.to_s + '.tar.gz') + if archive.exist? + system "cd '#{archive.dirname}' && tar -zxvf '#{archive}' > /dev/null 2>&1" + end + end + file + end + module_function :fixture + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/pre_flight.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/pre_flight.rb new file mode 100644 index 0000000..ee52b35 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/pre_flight.rb @@ -0,0 +1,36 @@ +# Restores the config to the default state before each requirement + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + + + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::Config.instance.tap do |c| + c.verbose = false + c.silent = true + c.repos_dir = fixture('spec-repos') + c.installation_root = SpecHelper.temporary_directory + c.cache_root = SpecHelper.temporary_directory + 'Cache' + end + + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.next_input = '' + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + SpecHelper.temporary_directory.rmtree if SpecHelper.temporary_directory.exist? + SpecHelper.temporary_directory.mkpath + + # TODO + #::Pod::SourcesManager.stubs(:search_index_path).returns(temporary_directory + 'search_index.yaml') + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/temporary_repos.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/temporary_repos.rb new file mode 100644 index 0000000..2c6f5da --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/temporary_repos.rb @@ -0,0 +1,90 @@ +module SpecHelper + def self.tmp_repos_path + TemporaryRepos.tmp_repos_path + end + + module TemporaryRepos + extend Pod::Executable + executable :git + + # @return [Pathname] The path for the repo with the given name. + # + def repo_path(name) + tmp_repos_path + name + end + + # Makes a repo with the given name. + # + def repo_make(name) + path = repo_path(name) + path.mkpath + Dir.chdir(path) do + `git init` + repo_make_readme_change(name, 'Added') + `git add .` + `git commit -m "Initialized."` + end + path + end + + # Clones a repo to the given name. + # + def repo_clone(from_name, to_name) + Dir.chdir(tmp_repos_path) { `git clone #{from_name} #{to_name} 2>&1 > /dev/null` } + repo_path(to_name) + end + + def repo_make_readme_change(name, string) + file = repo_path(name) + 'README' + file.open('w') { |f| f << "#{string}" } + end + + #--------------------------------------# + + def test_repo_path + repo_path('master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/repos/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_test_repo + require 'fileutils' + test_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + 'master' + FileUtils.cp_r(origin, destination) + repo_make('master') + end + + def test_old_repo_path + repo_path('../master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_old_test_repo + require 'fileutils' + test_old_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + '../master' + FileUtils.cp_r(origin, destination) + repo_make('../master') + end + + #--------------------------------------# + + def tmp_repos_path + SpecHelper.temporary_directory + 'cocoapods/repos' + end + + module_function :tmp_repos_path + + def self.extended(base) + base.before do + TemporaryRepos.tmp_repos_path.mkpath + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/user_interface.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/user_interface.rb new file mode 100644 index 0000000..d603a45 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.0/spec/spec_helper/user_interface.rb @@ -0,0 +1,36 @@ +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + @next_input = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :next_input + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + + alias_method :gets, :next_input + + def print_warnings + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml new file mode 100644 index 0000000..bd3bb06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml @@ -0,0 +1,42 @@ +name: Specs + +jobs: + specs: + strategy: + matrix: + os: [ubuntu-16.04] + ruby: [2.6, 2.7, 3.0] + + name: ${{ matrix.os }} / Ruby ${{ matrix.ruby }} + runs-on: ${{ matrix.os }} + steps: + - name: Checkout git + uses: actions/checkout@v1 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + if: ${{ matrix.ruby != 'system' }} + with: + ruby-version: ${{ matrix.ruby }} + + - name: Update git submodules + run: git submodule update --init --recursive + + - name: Run bundle install + run: | + gem install bundler -v "~> 1.17" + bundle install --jobs 4 --retry 3 --without debugging documentation + + - name: Run Specs + run: bundle exec rake specs + +on: + push: + branches: + - "master" + - "*-stable" + pull_request: + branches: + - master + - "*-stable" + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.gitignore new file mode 100644 index 0000000..54a36d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/.gitignore @@ -0,0 +1,39 @@ +*.gem +*.rbc +/.config +/coverage/ +/InstalledFiles +/pkg/ +/spec/reports/ +/test/tmp/ +/test/version_tmp/ +/tmp/ + +## Specific to RubyMotion: +.dat* +.repl_history +build/ + +## Documentation cache and generated files: +/.yardoc/ +/_yardoc/ +/doc/ +/rdoc/ + +## Environment normalisation: +/.bundle/ +/lib/bundler/man/ + +# for a library or gem, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# Gemfile.lock +# .ruby-version +# .ruby-gemset + +# unless supporting rvm < 1.11.0 or doing something fancy, ignore this: +.rvmrc + +/coverage/ + +# RubyMine Editor +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/CHANGELOG.md new file mode 100644 index 0000000..34b4660 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/CHANGELOG.md @@ -0,0 +1,83 @@ +# Cocoapods::Search Changelog + +## 1.0.1 (2021-08-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix running with `--web`. + [Samuel Giddins](https://github.com/segiddins) + [#25](https://github.com/CocoaPods/cocoapods-search/issues/25) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-04-14) + +##### Bug Fixes + +* Compatibility with CocoaPods 1.0.0.beta.7. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* Perform full search as default, add `--simple` option to search only by + name. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#13](https://github.com/CocoaPods/cocoapods-search/issues/13) + +* Add support for tvOS and any possible future platforms. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#11](https://github.com/CocoaPods/cocoapods-search/issues/11) + +##### Bug Fixes + +* Print output in reverse order. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + +* Perform regexp escape on individual query words before joining them. + [Muhammed Yavuz Nuzumlalı](https://github.com/manuyavuz) + [#8](https://github.com/CocoaPods/cocoapods-search/issues/8) + + +## 0.1.0 (2015-09-03) + +* Version number must not collide with old gem called cocoapods-search 0.0.7 + + +## 0.0.1 (2015-09-03) + +* Initial implementation. This version is an extraction from [CocoaPods](https://github.com/CocoaPods/CocoaPods). + +Original creators: +[Eloy Durán](https://github.com/alloy) +[Fabio Pelosin](https://github.com/fabiopelosin) + +Extractor: +[Emma Koszinowski](http://github.com/emkosz) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile new file mode 100644 index 0000000..0f7f267 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile @@ -0,0 +1,13 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in cocoapods-search.gemspec +gemspec + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'bacon' + gem 'mocha-on-bacon' + gem 'prettybacon' +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile.lock new file mode 100644 index 0000000..5db12c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Gemfile.lock @@ -0,0 +1,126 @@ +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 2e285ae6be8aadf0e6319a51626d5176c47e0ede + branch: master + specs: + cocoapods (1.11.0.beta.2) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.11.0.beta.2) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.4.0, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (= 1.0.1) + cocoapods-trunk (>= 1.4.0, < 2.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 1.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 0a0394afabd9c5f0838fc044e1c817024499dace + branch: master + specs: + cocoapods-core (1.11.0.beta.2) + activesupport (>= 5.0, < 7) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + +PATH + remote: . + specs: + cocoapods-search (1.0.1) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.3) + activesupport (6.1.4) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + atomos (0.1.3) + bacon (1.2.0) + claide (1.0.3) + cocoapods-deintegrate (1.0.4) + cocoapods-downloader (1.4.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-trunk (1.5.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + cocoapods-try (1.2.0) + colored2 (3.1.2) + concurrent-ruby (1.1.9) + escape (0.0.4) + ethon (0.14.0) + ffi (>= 1.15.0) + ffi (1.15.3) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + httpclient (2.8.3) + i18n (1.8.10) + concurrent-ruby (~> 1.0) + json (2.5.1) + minitest (5.14.4) + mocha (1.13.0) + mocha-on-bacon (0.2.3) + mocha (>= 0.13.0) + molinillo (0.8.0) + nanaimo (0.3.0) + nap (1.1.0) + netrc (0.11.0) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (4.0.6) + rake (13.0.6) + rexml (3.2.5) + ruby-macho (2.5.1) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + xcodeproj (1.21.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + zeitwerk (2.4.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + cocoapods! + cocoapods-core! + cocoapods-search! + mocha-on-bacon + prettybacon + rake + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/LICENSE.txt new file mode 100644 index 0000000..288b8fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2015 Eloy Durán , Fabio Pelosin +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/README.md new file mode 100644 index 0000000..1e8a609 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/README.md @@ -0,0 +1,106 @@ +# cocoapods-search + +[![Build Status](https://travis-ci.org/CocoaPods/cocoapods-search.svg)](https://travis-ci.org/CocoaPods/cocoapods-search) + +A CocoaPods plugin that allows you to search multiple pod spec repositories for specific pods matching a query. cocoapods-search is by default included in CocoaPods. + +## Installation + +If you have CocoaPods, you already have cocoapods-search installed by default. If not, you can also install it as a seperate gem as followed. + + $ gem install cocoapods-search + +## Usage + +Search for pods by using the pod search command as followed. + + $ pod search QUERY + +e.g. + + $ pod search networkin + + -> ACSNetworking (0.0.1) + On the basis of AFNetworking encapsulation. + pod 'ACSNetworking', '~> 0.0.1' + - Homepage: https://github.com/Hyosung/ACSNetworking + - Source: https://github.com/Hyosung/ACSNetworking.git + - Versions: 0.0.1 [master repo] + + + -> AFNetworking (2.5.4) + A delightful iOS and OS X networking framework. + pod 'AFNetworking', '~> 2.5.4' + - Homepage: https://github.com/AFNetworking/AFNetworking + - Source: https://github.com/AFNetworking/AFNetworking.git + - Versions: 2.5.4, 2.5.3, 2.5.2, 2.5.1, 2.5.0, 2.4.1, 2.4.0, 2.3.1, 2.3.0, 2.2.4, 2.2.3, 2.2.2, 2.2.1, 2.2.0, 2.1.0, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-RC3, + 2.0.0-RC2, 2.0.0-RC1, 1.3.4, 1.3.3, 1.3.2, 1.3.1, 1.3.0, 1.2.1, 1.2.0, 1.1.0, 1.0.1, 1.0, 1.0RC3, 1.0RC2, 1.0RC1, 0.10.1, 0.10.0, 0.9.2, 0.9.1, 0.9.0, 0.7.0, + 0.5.1 [master repo] + - Subspecs: + - AFNetworking/Serialization (2.5.4) + - AFNetworking/Security (2.5.4) + - AFNetworking/Reachability (2.5.4) + - AFNetworking/NSURLConnection (2.5.4) + - AFNetworking/NSURLSession (2.5.4) + - AFNetworking/UIKit (2.5.4) + + + -> AFNetworking+AutoRetry (0.0.5) + Auto Retries for AFNetworking requests + pod 'AFNetworking+AutoRetry', '~> 0.0.5' + - Homepage: https://github.com/shaioz/AFNetworking-AutoRetry + - Source: https://github.com/shaioz/AFNetworking-AutoRetry.git + - Versions: 0.0.5, 0.0.4, 0.0.3, 0.0.2, 0.0.1 [master repo] + + ... + + +### Options + +You can use the following options with the search command. + +| Flag | Description | +|----------- |-------------| +| `--regex` | Interpret the `QUERY` as a regular expression | +| `--full` | Search by name, summary, and description | +| `--stats` | Show additional stats (like GitHub watchers and forks) | +| `--ios` | Restricts the search to Pods supported on iOS | +| `--osx` | Restricts the search to Pods supported on OS X | +| `--watchos` | Restricts the seach to Pods supported on Watch OS | +| `--web` | Opens a new search on cocoapods.org | + + +e.g. + + $ pod search video --osx + + -> AMCoreAudio (2.0.7) + AMCoreAudio is a Swift wrapper for Apple's CoreAudio framework + pod 'AMCoreAudio', '~> 2.0.7' + - Homepage: https://github.com/rnine/AMCoreAudio + - Source: https://github.com/rnine/AMCoreAudio.git + - Versions: 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0, 1.5, 1.4.3, 1.4.2, 1.4.1, 1.4, 1.3.2, 1.3.1, 1.3, 1.2, 1.1, 1.0.1, 1.0 [master repo] + + + -> AppleCoreAudioUtilityClasses@thehtb (2013.09.17) + A git mirror of Apple's Core Audio Utility Classes for better versioning and with clang/llvm fixes. + pod 'AppleCoreAudioUtilityClasses@thehtb', '~> 2013.09.17' + - Homepage: https://github.com/thehtb/AppleCoreAudioUtilityClasses + - Source: https://github.com/thehtb/AppleCoreAudioUtilityClasses.git + - Versions: 2013.09.17, 2013.2.18, 2013.1.2 [master repo] + - Subspecs: + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAProcess (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAAutoDisposer (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CABitOperations (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CASpectralProcessor (2013.09.17) + + + -> AudioKit (2.1.1) + Open-source audio synthesis, processing, & analysis platform. + pod 'AudioKit', '~> 2.1.1' + - Homepage: http://audiokit.io/ + - Source: https://github.com/audiokit/AudioKit.git + - Versions: 2.1.1, 2.0.1, 2.0, 1.3, 1.2-01, 1.2 [master repo] + + ... diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Rakefile new file mode 100644 index 0000000..c34b828 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/Rakefile @@ -0,0 +1,13 @@ +require 'bundler/gem_tasks' + +def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') +end + +desc 'Runs all the specs' +task :specs do + sh "bundle exec bacon #{specs('**')}" +end + +task :default => :specs + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec new file mode 100644 index 0000000..80d106f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec @@ -0,0 +1,25 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods-search/gem_version.rb' + +Gem::Specification.new do |spec| + spec.name = 'cocoapods-search' + spec.version = CocoapodsSearch::VERSION + spec.authors = ['Eloy Durán', 'Fabio Pelosin', 'Emma Koszinowski'] + spec.email = ['eloy.de.enige@gmail.com', 'fabiopelosin@gmail.com', 'emkosz@gmail.com'] + spec.description = %q{Search for pods.} + spec.summary = %q{Searches for pods, ignoring case, whose name matches `QUERY`. If the + `--full` option is specified, this will also search in the summary and + description of the pods.} + spec.homepage = 'https://github.com/CocoaPods/cocoapods-search' + spec.license = 'MIT' + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb new file mode 100644 index 0000000..b955220 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb @@ -0,0 +1 @@ +require 'cocoapods-search/gem_version' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb new file mode 100644 index 0000000..61b9cb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command/search' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb new file mode 100644 index 0000000..4cc195d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb @@ -0,0 +1,115 @@ +module Pod + class Command + # @CocoaPods 0.0.2 + # + class Search < Command + self.summary = 'Search for pods' + + self.description = <<-DESC + Searches for pods, ignoring case, whose name, summary, description, or authors match `QUERY`. If the + `--simple` option is specified, this will only search in the names of the pods. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', true), + ] + + def self.options + options = [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--simple', 'Search only by name'], + ['--stats', 'Show additional stats (like GitHub watchers and forks)'], + ['--web', 'Searches on cocoapods.org'], + ] + options += Platform.all.map do |platform| + ["--#{platform.name.to_s}", "Restricts the search to Pods supported on #{Platform.string_name(platform.to_sym)}"] + end + options << ['--no-pager', 'Do not pipe search results into a pager'] + options.concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @simple_search = argv.flag?('simple') + @stats = argv.flag?('stats') + @web = argv.flag?('web') + @platform_filters = Platform.all.map do |platform| + argv.flag?(platform.name.to_s) ? platform.to_sym : nil + end.compact + @query = argv.arguments! unless argv.arguments.empty? + config.silent = false + @use_pager = argv.flag?('pager', true) + super + end + + def validate! + super + help! 'A search query is required.' unless @query + + unless @web || !@use_regex + begin + /#{@query.join(' ').strip}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + end + end + + def run + ensure_master_spec_repo_exists! + if @web + web_search + else + local_search + end + end + + def sources_manager + defined?(Pod::SourcesManager) ? Pod::SourcesManager : config.sources_manager + end + + def web_search + queries = @platform_filters.map do |platform| + "on:#{platform}" + end + queries += @query + query_parameter = queries.compact.flatten.join(' ') + url = "https://cocoapods.org/?q=#{CGI.escape(query_parameter).gsub('+', '%20')}" + UI.puts("Opening #{url}") + Executable.execute_command(:open, [url]) + end + + def local_search + query_regex = @query.reduce([]) { |result, q| + result << (@use_regex ? q : Regexp.escape(q)) + }.join(' ').strip + + sets = sources_manager.search_by_name(query_regex, !@simple_search) + + @platform_filters.each do |platform| + sets.reject! { |set| !set.specification.available_platforms.map(&:name).include?(platform) } + end + + if(@use_pager) + UI.with_pager { print_sets(sets) } + else + print_sets(sets) + end + end + + def print_sets(sets) + sets.each do |set| + begin + if @stats + UI.pod(set, :stats) + else + UI.pod(set, :normal) + end + rescue DSLError + UI.warn "Skipping `#{set.name}` because the podspec contains errors." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb new file mode 100644 index 0000000..6364df6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsSearch + VERSION = '1.0.1'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..678d204 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb new file mode 100644 index 0000000..f3d4334 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb @@ -0,0 +1,148 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Search do + extend SpecHelper::TemporaryRepos + + describe 'Search' do + it 'registers it self' do + Command.parse(%w{ search }).should.be.instance_of Command::Search + end + + it 'runs with correct parameters' do + lambda { run_command('search', 'JSON') }.should.not.raise + lambda { run_command('search', 'JSON', '--simple') }.should.not.raise + end + + it 'complains for wrong parameters' do + lambda { run_command('search') }.should.raise CLAide::Help + lambda { run_command('search', 'too', '--wrong') }.should.raise CLAide::Help + lambda { run_command('search', '--wrong') }.should.raise CLAide::Help + end + + it 'searches for a pod with name matching the given query ignoring case' do + output = run_command('search', 'json', '--simple') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given query ignoring case' do + output = run_command('search', 'engelhart') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given multi-word query ignoring case' do + output = run_command('search', 'very', 'high', 'performance') + output.should.include? 'JSONKit' + end + + it 'prints search results in order' do + output = run_command('search', 'lib') + output.should.match /BananaLib.*JSONKit/m + end + + it 'restricts the search to Pods supported on iOS' do + output = run_command('search', 'BananaLib', '--ios') + output.should.include? 'BananaLib' + Specification.any_instance.stubs(:available_platforms).returns([Platform.osx]) + output = run_command('search', 'BananaLib', '--ios') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on OS X' do + output = run_command('search', 'BananaLib', '--osx') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on Watch OS' do + output = run_command('search', 'a', '--watchos') + output.should.include? 'Realm' + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on tvOS' do + output = run_command('search', 'n', '--tvos') + output.should.include? 'monkey' + output.should.not.include? 'BananaLib' + end + + it 'outputs with the silent parameter' do + output = run_command('search', 'BananaLib', '--silent') + output.should.include? 'BananaLib' + end + + it 'shows a friendly message when locally searching with invalid regex' do + lambda { run_command('search', '--regex', '+') }.should.raise CLAide::Help + end + + it 'does not try to validate the query as a regex with plain-text search' do + lambda { run_command('search', '+') }.should.not.raise CLAide::Help + end + + it 'uses regex search when asked for regex mode' do + output = run_command('search', '--regex', 'Ba(na)+Lib') + output.should.include? 'BananaLib' + output.should.not.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'JSONKit' + end + + it 'uses plain-text search when not asked for regex mode' do + output = run_command('search', 'Pod+With+Plus+Signs') + output.should.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'BananaLib' + end + end + + describe 'option --web' do + extend SpecHelper::TemporaryRepos + + it 'searches with invalid regex' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=NSAttributedString%2BCCLFormat']) + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'should url encode search queries' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=NSAttributedString%2BCCLFormat']) + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'searches the web via the open! command' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=bananalib']) + run_command('search', '--web', 'bananalib') + end + + it 'includes option --osx correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aosx%20bananalib']) + run_command('search', '--web', '--osx', 'bananalib') + end + + it 'includes option --ios correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20bananalib']) + run_command('search', '--web', '--ios', 'bananalib') + end + + it 'includes option --watchos correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Awatchos%20bananalib']) + run_command('search', '--web', '--watchos', 'bananalib') + end + + it 'includes option --tvos correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Atvos%20bananalib']) + run_command('search', '--web', '--tvos', 'bananalib') + end + + it 'includes any new platform option correctly' do + Platform.stubs(:all).returns([Platform.ios, Platform.tvos, Platform.new('whateveros')]) + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Awhateveros%20bananalib']) + run_command('search', '--web', '--whateveros', 'bananalib') + end + + it 'does not matter in which order the ios/osx options are set' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib']) + run_command('search', '--web', '--ios', '--osx', 'bananalib') + + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib']) + run_command('search', '--web', '--osx', '--ios', 'bananalib') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec new file mode 100644 index 0000000..881ed82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec @@ -0,0 +1,21 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.platform = :ios + + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.source_files = 'Classes/*.{h,m}', 'Vendor' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.prefix_header_file = 'Classes/BananaLib.pch' + s.resources = "Resources/*.png" + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec new file mode 100644 index 0000000..687fe5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '1.4' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :tag => 'v1.4' } + + s.source_files = 'JSONKit.*' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec new file mode 100644 index 0000000..d9f2c9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec @@ -0,0 +1,12 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '999.999.999' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :commit => '0aff3deb5e1bb2bbc88a83fd71c8ad5550185cce' } + + s.source_files = 'JSONKit.*' + s.compiler_flags = '-Wno-deprecated-objc-isa-usage', '-Wno-format' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec new file mode 100644 index 0000000..ef91e61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec @@ -0,0 +1,16 @@ +Pod::Spec.new do |s| + s.name = "OrangeFramework" + s.version = "0.1.0" + s.author = { "Swiftest Orang-Utan" => "swiftest@orang.utan.local" } + s.summary = "Fresh juice!" + s.description = "Blends fresh orange juice." + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://utan.local/orange-framework.git", :tag => s.version.to_s } + s.license = 'MIT' + + s.platform = :ios, '8.0' + + s.source_files = 'Source/Juicer.swift' + + s.frameworks = 'UIKit' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec new file mode 100644 index 0000000..9aa9123 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec @@ -0,0 +1,17 @@ +Pod::Spec.new do |s| + s.name = 'Pod+With+Plus+Signs' + s.version = '1.0' + s.authors = 'Evil Corp' + s.homepage = 'http://evil-corp.local/pod_with_plus_signs.html' + s.summary = 'Messing with special chars' + s.description = 'I love messing up with special chars in my pod name! Mouahahahahaa (evil laugh)' + s.platform = :ios + + s.source = { :git => 'http://evil-corp.local/pod_with_plus_signs.git', :tag => '1.0' } + s.source_files = 'Classes/*.{h,m}' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec new file mode 100644 index 0000000..14f9360 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec @@ -0,0 +1,18 @@ +Pod::Spec.new do |s| + s.name = 'Realm' + s.version = '0.94' + s.authors = 'Realm', { 'Realm' => 'help@realm.io' } + s.homepage = 'https://realm.io/' + s.summary = 'Realm is a modern data framework & database for iOS & OS X.' + s.description = 'The Realm database, for Objective-C. (If you want to use Realm from Swift, see the “RealmSwift” pod.)\n\nRealm is a mobile database: a replacement for Core Data & SQLite. You can use it on iOS & OS X. Realm is not an ORM on top SQLite: instead it uses its own persistence engine, built for simplicity (& speed). Learn more and get help at https://realm.io' + s.platform = :watchos + + s.source = { :git => 'https://github.com/realm/realm-cocoa.git', :tag => 'v0.94.0' } + s.source_files = 'Realm/*.{m,mm}', 'Realm/ObjectStore/*.cpp' + s.xcconfig = { 'CLANG_CXX_LANGUAGE_STANDARD": "compiler-default' => 'OTHER_CPLUSPLUSFLAGS": "-std=c++1y $(inherited)' } + s.prefix_header_file = 'Classes/Realm.pch' + s.license = { + :type => 'Apache 2.0', + :file => 'LICENSE' + } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec new file mode 100644 index 0000000..60d4234 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = "monkey" + s.version = "1.0.2" + s.author = { "Funky Monkey" => "funky@monkey.local" } + s.summary = "🙈🙉🙊" + s.description = "See no evil! Hear no evil! Speak no evil!" + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://monkey.local/monkey.git", :tag => s.version.to_s } + s.license = 'MIT' + s.vendored_library = 'monkey.a' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb new file mode 100644 index 0000000..d10a130 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb @@ -0,0 +1,85 @@ +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'cocoapods' + +require 'cocoapods_plugin' + +require 'spec_helper/command' # Allows to run Pod commands and returns their output. +require 'spec_helper/fixture' # Provides access to the fixtures and unpacks them if needed. +require 'spec_helper/temporary_repos' # Allows to create and modify temporary spec repositories. +require 'spec_helper/user_interface' # Redirects UI to UI.output & UI.warnings. +require 'spec_helper/pre_flight' # Cleans the temporary directory, the config & the UI.output before every test. + +module Bacon + class Context + include Pod::Config::Mixin + include SpecHelper::Fixture + include SpecHelper::Command + + def skip_xcodebuild? + ENV['SKIP_XCODEBUILD'] + end + + def temporary_directory + SpecHelper.temporary_directory + end + end +end + +#Mocha::Configuration.prevent(:stubbing_non_existent_method) + +module SpecHelper + def self.temporary_directory + ROOT + 'tmp' + end +end + +def temporary_sandbox + Pod::Sandbox.new(temporary_directory + 'Pods') +end + +def fixture_spec(name) + file = SpecHelper::Fixture.fixture(name) + Pod::Specification.from_file(file) +end + +def fixture_file_accessor(spec_or_name, platform = Pod::Platform.ios) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + path_list = Pod::Sandbox::PathList.new(spec.defined_in_file.dirname) + Pod::Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) +end + +def fixture_target_definition(name = 'Pods', platform = Pod::Platform.ios) + Pod::Podfile::TargetDefinition.new(name, Pod::Podfile.new, 'name' => name, 'platform' => platform) +end + +def fixture_pod_target(spec_or_name, target_definition = nil) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + target_definition ||= fixture_target_definition + target_definition.store_pod(spec.name) + Pod::PodTarget.new([spec], [target_definition], config.sandbox).tap do |pod_target| + pod_target.file_accessors << fixture_file_accessor(spec, pod_target.platform) + consumer = spec.consumer(pod_target.platform) + pod_target.spec_consumers << consumer + end +end + +def fixture_aggregate_target(pod_targets = [], target_definition = nil) + target_definition ||= pod_targets.flat_map(&:target_definitions).first || fixture_target_definition + target = Pod::AggregateTarget.new(target_definition, config.sandbox) + target.client_root = config.sandbox.root.dirname + target.pod_targets = pod_targets + target +end + +#-----------------------------------------------------------------------------# + +SpecHelper::Fixture.fixture('banana-lib') # ensure it exists +SpecHelper::Fixture.fixture('orange-framework') diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb new file mode 100644 index 0000000..6662114 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb @@ -0,0 +1,27 @@ +module SpecHelper + module Command + def argv(*argv) + CLAide::ARGV.new(argv) + end + + def command(*argv) + argv += ['--no-ansi', '--no-pager'] + Pod::Command.parse(argv) + end + + def run_command(*args) + Dir.chdir(SpecHelper.temporary_directory) do + Pod::UI.output = '' + # @todo Remove this once all cocoapods has + # been converted to use the UI.puts + config_silent = config.silent? + config.silent = false + cmd = command(*args) + cmd.validate! + cmd.run + config.silent = config_silent + Pod::UI.output + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb new file mode 100644 index 0000000..39704e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb @@ -0,0 +1,32 @@ +module SpecHelper + def self.fixture(name) + Fixture.fixture(name) + end + + def self.create_sample_app_copy_from_fixture(fixture_name) + fixture_copy_path = temporary_directory + fixture_name + FileUtils.cp_r(fixture(fixture_name), temporary_directory) + fixture_copy_path + "#{fixture_name}.xcodeproj" + end + + def self.test_repo_url + 'https://github.com/CocoaPods/test_repo.git' + + end + + module Fixture + ROOT = ::ROOT + 'spec/fixtures' + + def fixture(name) + file = ROOT + name + unless file.exist? + archive = Pathname.new(file.to_s + '.tar.gz') + if archive.exist? + system "cd '#{archive.dirname}' && tar -zxvf '#{archive}' > /dev/null 2>&1" + end + end + file + end + module_function :fixture + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb new file mode 100644 index 0000000..ee52b35 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb @@ -0,0 +1,36 @@ +# Restores the config to the default state before each requirement + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + + + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::Config.instance.tap do |c| + c.verbose = false + c.silent = true + c.repos_dir = fixture('spec-repos') + c.installation_root = SpecHelper.temporary_directory + c.cache_root = SpecHelper.temporary_directory + 'Cache' + end + + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.next_input = '' + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + SpecHelper.temporary_directory.rmtree if SpecHelper.temporary_directory.exist? + SpecHelper.temporary_directory.mkpath + + # TODO + #::Pod::SourcesManager.stubs(:search_index_path).returns(temporary_directory + 'search_index.yaml') + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb new file mode 100644 index 0000000..2c6f5da --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb @@ -0,0 +1,90 @@ +module SpecHelper + def self.tmp_repos_path + TemporaryRepos.tmp_repos_path + end + + module TemporaryRepos + extend Pod::Executable + executable :git + + # @return [Pathname] The path for the repo with the given name. + # + def repo_path(name) + tmp_repos_path + name + end + + # Makes a repo with the given name. + # + def repo_make(name) + path = repo_path(name) + path.mkpath + Dir.chdir(path) do + `git init` + repo_make_readme_change(name, 'Added') + `git add .` + `git commit -m "Initialized."` + end + path + end + + # Clones a repo to the given name. + # + def repo_clone(from_name, to_name) + Dir.chdir(tmp_repos_path) { `git clone #{from_name} #{to_name} 2>&1 > /dev/null` } + repo_path(to_name) + end + + def repo_make_readme_change(name, string) + file = repo_path(name) + 'README' + file.open('w') { |f| f << "#{string}" } + end + + #--------------------------------------# + + def test_repo_path + repo_path('master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/repos/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_test_repo + require 'fileutils' + test_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + 'master' + FileUtils.cp_r(origin, destination) + repo_make('master') + end + + def test_old_repo_path + repo_path('../master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_old_test_repo + require 'fileutils' + test_old_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + '../master' + FileUtils.cp_r(origin, destination) + repo_make('../master') + end + + #--------------------------------------# + + def tmp_repos_path + SpecHelper.temporary_directory + 'cocoapods/repos' + end + + module_function :tmp_repos_path + + def self.extended(base) + base.before do + TemporaryRepos.tmp_repos_path.mkpath + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb new file mode 100644 index 0000000..d603a45 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb @@ -0,0 +1,36 @@ +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + @next_input = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :next_input + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + + alias_method :gets, :next_input + + def print_warnings + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.kick b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.kick new file mode 100644 index 0000000..612ecc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.kick @@ -0,0 +1,29 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop.yml new file mode 100644 index 0000000..227d50d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..ebc2123 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_cocoapods.yml @@ -0,0 +1,138 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Autocorrect makes this cop much more useful, taking away needless guessing +Lint/EndAlignment: + AutoCorrect: true + + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_todo.yml new file mode 100644 index 0000000..e8af53d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.rubocop_todo.yml @@ -0,0 +1,33 @@ +# This configuration was generated by `rubocop --auto-gen-config` +# on 2014-08-20 17:00:42 +0200 using RuboCop version 0.25.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +Metrics/CyclomaticComplexity: + Max: 8 + +# Offense count: 9 +# Configuration parameters: AllowURI. +Metrics/LineLength: + Max: 105 + +# Offense count: 7 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 42 + +# Offense count: 1 +Metrics/PerceivedComplexity: + Max: 9 + +# Offense count: 1 +Style/ClassVars: + Enabled: false + +# Offense count: 1 +# Configuration parameters: Keywords. +Style/CommentAnnotation: + Enabled: false diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.travis.yml new file mode 100644 index 0000000..7f1c402 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/.travis.yml @@ -0,0 +1,25 @@ +dist: trusty +language: ruby +cache: bundler +rvm: + - 2.0.0-p647 + - 2.3.4 + - 2.4.1 + - 2.6.3 +addons: + code_climate: + repo_token: 937468c2cbb0d7c0546b62d0fcbcba8a2a8b82714a64a52ffd0b951e71df626d + +before_install: + - export GEM_HOME=$HOME/.gem + - export PATH=$GEM_HOME/bin:$PATH + # There is a bug in travis. When using system ruby, bundler is not + # installed and causes the default install action to fail. + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem install "bundler:~> 1.13"; else gem install "bundler:~> 1.13"; fi + # RubyGems 2.0.14 isn't a fun time on 2.0.0p451 + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem update --system; fi + +install: + - bundle install --without=documentation + +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/CHANGELOG.md new file mode 100644 index 0000000..c144828 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/CHANGELOG.md @@ -0,0 +1,367 @@ +## 1.5.0 (2020-05-01) + +##### Enhancements + +* Add --synchronous option to `pod trunk push`. + [Paul Beusterien](https://github.com/paulb777) + [#147](https://github.com/CocoaPods/cocoapods-trunk/pull/147) + [CocoaPods#9497](https://github.com/CocoaPods/CocoaPods/issues/9497) + +##### Bug Fixes + +* None. + + +## 1.4.1 (2019-09-26) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Use a more robust `Trunk` init when pushing. + [Igor Makarov](https://github.com/igor-makarov) + [#135](https://github.com/CocoaPods/cocoapods-trunk/pull/135) + + +## 1.4.0 (2019-08-21) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Update to get the master spec repo from `Source::Manager` for validation - effectively + use the new CDN `TrunkSource` for podspec validation and not a hard-coded URL + [Igor Makarov](https://github.com/igor-makarov) + [#132](https://github.com/CocoaPods/cocoapods-trunk/pull/132) + [CocoaPods#9112](https://github.com/CocoaPods/CocoaPods/issues/9112) + +## 1.3.1 (2018-08-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.3.0 (2017-10-02) + +##### Enhancements + +* Add skip test option to trunk push + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/93) + +* Loosen netrc requirement + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +* Update development dependencies to support MRI 2.3+ + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +##### Bug Fixes + +* None. + + +## 1.2.0 (2017-04-11) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Properly display `pod trunk deprecate` command line options + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6486](https://github.com/CocoaPods/CocoaPods/issues/6486) + +* Add `--skip-import-validation` to skip linking a pod during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#86](https://github.com/CocoaPods/cocoapods-trunk/pull/86) + + +## 1.1.2 (2016-12-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Checks that `Pod::Validator` has `swift_version=` for CocoaPods <= 1.1.0 support. + [Danielle Tomlinson](https://github.com/dantoml) + [#6209](https://github.com/CocoaPods/CocoaPods/issues/6209) + + +## 1.1.1 (2016-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Support submitting from multiple versions of CocoaPods. + [Samuel Giddins](https://github.com/segiddins) + +## 1.1.0 (2016-10-19) + +##### Enhancements + +* Passes the pod's version of Swift used for deployment to the CocoaPods Specs repo + [Orta](https://github.com/orta) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +* Prettier success message when successfully pushed a new version + [Marin](https://github.com/icanzilb) + [#76](https://github.com/CocoaPods/cocoapods-trunk/pull/76) + +##### Bug Fixes + +* None. + + +## 1.1.0.beta.1 (2016-10-10) + +##### Enhancements + +* Pass --swift-version to the Validator during `pod lib lint` + [Danielle Tomlinson](https://github.com/dantoml) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +##### Bug Fixes + +* None. + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't print the invocation of `/bin/date`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* Make the error loading a specification during `pod trunk push` more + informative. + [Samuel Giddins](https://github.com/segiddins) + [#63](https://github.com/CocoaPods/cocoapods-trunk/issues/63) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.4 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Compatibility With CocoaPods 1.0.0.beta.8. + [Samuel Giddins](https://github.com/segiddins) + [#61](https://github.com/CocoaPods/cocoapods-trunk/issues/61) + + +## 1.0.0.beta.3 (2016-04-14) + +##### Enhancements + +* The failure reason is printed when validation fails during `pod trunk push`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#5073](https://github.com/CocoaPods/CocoaPods/issues/5073) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-02-03) + +##### Bug Fixes + +* Send a body with the `PATCH` request to deprecate a pod. + [Samuel Giddins](https://github.com/segiddins) + [#52](https://github.com/CocoaPods/cocoapods-trunk/issues/52) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* The `pod deprecate PODNAME` command has been added to deprecate all versions + of a pod. + [Samuel Giddins](https://github.com/segiddins) + [#31](https://github.com/CocoaPods/cocoapods-trunk/issues/31) + +* The `pod delete PODNAME VERSION` command has been added to delete a single + version of a pod. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* If the master repo has not been setup when pushing a spec, run `pod setup` + instead of failing. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/cocoapods-trunk/issues/48) + + +## 0.6.4 (2015-08-28) + +##### Bug Fixes + +* This release fixes installation compatibility issues when using the RubyGem + due to an incompatible dependency on `nap`. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.3 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.2 (2015-08-26) + +##### Enhancements + +* The `--allow-warnings` flag to `pod trunk push` is now propagated to the trunk + server. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3855](https://github.com/CocoaPods/CocoaPods/issues/3855) + + +## 0.6.1 (2015-05-27) + +##### Enhancements + +* The `master` specs repo is updated before and after pushing a new spec to + trunk. + [Samuel Giddins](https://github.com/segiddins) + [#43](https://github.com/CocoaPods/cocoapods-trunk/issues/43) + + +## 0.6.0 (2015-03-11) + +##### Enhancements + +* Allow specifying a Trunk token via the `COCOAPODS_TRUNK_TOKEN` environment + variable. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3224](https://github.com/CocoaPods/CocoaPods/issues/3224) + + +## 0.5.1 (2015-02-25) + +##### Enhancements + +* Lint as a framework automatically. If needed, the `--use-libraries` + option allows linting as a static library. + [Boris Bügling](https://github.com/neonichu) + [#2912](https://github.com/CocoaPods/CocoaPods/issues/2912) + +##### Bug Fixes + +* Fix the detection of spec validation errors, and present the proper error + (and messages) to the user. + [Orta Therox](https://github.com/orta) + [#39](https://github.com/CocoaPods/cocoapods-trunk/issues/39) + + +## 0.5.0 (2014-12-25) + +##### Enhancements + +* Added `pod trunk remove-owner` command to remove an owner from a pod. + [Samuel Giddins](https://github.com/segiddins) + [#35](https://github.com/CocoaPods/cocoapods-trunk/issues/35) + +* Added `pod trunk info` command to get information for a pod, including the + owners. + [Kyle Fuller](https://github.com/kylef) + [#15](https://github.com/CocoaPods/cocoapods-trunk/issues/15) + + +## 0.4.1 (2014-11-19) + +##### Enhancements + +* Improved code readability and structure by splitting subcommands + into individual files. + [Olivier Halligon](https://github.com/alisoftware) + [#21](https://github.com/CocoaPods/CocoaPods/issues/21) + +##### Bug Fixes + +* Updates for changes in CocoaPods regarding `--allow-warnings`. + [Kyle Fuller](https://github.com/kylef) + [Cocoapods#2831](https://github.com/CocoaPods/CocoaPods/pull/2831) + + +## 0.4.0 (2014-11-06) + +##### Bug Fixes + +* Fixes installation issues with the JSON dependency. + [Eloy Durán](https://github.com/alloy) + [CocoaPods#2773](https://github.com/CocoaPods/CocoaPods/issues/2773) + +## 0.3.1 (2014-10-15) + +##### Bug Fixes + +* Fixes an issue introduced with the release of `netrc 0.7.8`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#2674](https://github.com/CocoaPods/CocoaPods/issues/2674) + + +## 0.3.0 (2014-10-07) + +##### Enhancements + +* When linting, only allow dependencies from the 'master' specs repository. + [Samuel Giddins](https://github.com/segiddins) + [#28](https://github.com/CocoaPods/cocoapods-trunk/issues/28) + +##### Bug Fixes + +* Fixes an issue where `pod trunk push` doesn't show which validation errors + and just stated it failed. + [Kyle Fuller](https://github.com/kylef) + [#26](https://github.com/CocoaPods/cocoapods-trunk/issues/26) + + +## 0.2.0 (2014-09-11) + +##### Enhancements + +* Network errors are now gracefully handled. + [Samuel E. Giddins](https://github.com/segiddins) + +* Adopted new argument format of CLAide. + [Olivier Halligon](https://github.com/AliSoftware) + + +## 0.1.0 (2014-05-19) + +* Initial release. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile new file mode 100644 index 0000000..0244104 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile @@ -0,0 +1,25 @@ +source 'https://rubygems.org' + +gemspec + +# This is the version that ships with OS X 10.10, so be sure we test against it. +# At the same time, the 1.7.7 version won't install cleanly on Ruby > 2.2, +# so we use a fork that makes a trivial change to a macro invocation. +gem 'json', :git => 'https://github.com/segiddins/json.git', :branch => 'seg-1.7.7-ruby-2.2' + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + + gem 'bacon' + gem 'kicker' + gem 'mocha' + gem 'mocha-on-bacon' + gem 'prettybacon' + gem 'webmock' + + gem 'codeclimate-test-reporter', :require => nil + gem 'rubocop' +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile.lock new file mode 100644 index 0000000..e556b39 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Gemfile.lock @@ -0,0 +1,181 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: b5ced9cc141df732e8027078543eb92fc6447567 + branch: master + specs: + claide (1.0.3) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: ae4d9804450c9f83a3fea29eadc3c6e2affe24c7 + branch: master + specs: + cocoapods (1.8.0) + activesupport (>= 4.0.2, < 5) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.8.0) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.2.2, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-stats (>= 1.0.0, < 2.0) + cocoapods-trunk (= 1.5.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.6.6) + nap (~> 1.0) + ruby-macho (~> 1.4) + xcodeproj (>= 1.11.1, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: dbc9b76820d546b4d655aa26141fad0412c3c849 + branch: master + specs: + cocoapods-core (1.8.0) + activesupport (>= 4.0.2, < 6) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.0) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + +GIT + remote: https://github.com/segiddins/json.git + revision: a9588bc4334c2f5bf985f255b61c05eafdcd8907 + branch: seg-1.7.7-ruby-2.2 + specs: + json (1.7.7) + +PATH + remote: . + specs: + cocoapods-trunk (1.5.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.1) + activesupport (4.2.11.1) + i18n (~> 0.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + addressable (2.5.1) + public_suffix (~> 2.0, >= 2.0.2) + algoliasearch (1.27.0) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + ast (2.2.0) + atomos (0.1.3) + bacon (1.2.0) + cocoapods-deintegrate (1.0.4) + cocoapods-downloader (1.2.2) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.0) + cocoapods-stats (1.1.0) + cocoapods-try (1.1.0) + codeclimate-test-reporter (0.4.7) + simplecov (>= 0.7.1, < 1.0.0) + colored2 (3.1.2) + concurrent-ruby (1.1.5) + crack (0.4.3) + safe_yaml (~> 1.0.0) + docile (1.1.5) + escape (0.0.4) + ffi (1.9.10) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + hashdiff (0.3.4) + httpclient (2.8.3) + i18n (0.9.5) + concurrent-ruby (~> 1.0) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + minitest (5.12.0) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.6.6) + multi_json (1.11.2) + nanaimo (0.2.6) + nap (1.1.0) + netrc (0.11.0) + notify (0.5.2) + parser (2.3.0.7) + ast (~> 2.2) + powerpack (0.1.1) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (2.0.5) + rainbow (2.1.0) + rake (10.4.2) + rb-fsevent (0.9.5) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.4) + ffi (>= 0.5.0) + rubocop (0.39.0) + parser (>= 2.3.0.7, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-macho (1.4.0) + ruby-progressbar (1.7.5) + safe_yaml (1.0.4) + simplecov (0.9.2) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.9.0) + simplecov-html (0.9.0) + thread_safe (0.3.6) + tzinfo (1.2.5) + thread_safe (~> 0.1) + unicode-display_width (1.0.3) + webmock (3.5.1) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff + xcodeproj (1.12.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.2.6) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-trunk! + codeclimate-test-reporter + json! + kicker + mocha + mocha-on-bacon + prettybacon + rake (~> 10.0) + rubocop + webmock + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/LICENSE.txt new file mode 100644 index 0000000..d011c24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2013 Eloy Durán + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/README.md new file mode 100644 index 0000000..90552b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/README.md @@ -0,0 +1,36 @@ +# CocoaPods::Trunk + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-trunk/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-trunk) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/cocoapods-trunk.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-trunk) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/cocoapods-trunk.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-trunk) + +CocoaPods plugin for trunk. + +## Installation + +Add this line to your application's Gemfile: + + gem 'cocoapods-trunk' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install cocoapods-trunk + +## Usage + +With a local install of `trunk.cocoapods.org` up and running: + + $ env TRUNK_SCHEME_AND_HOST=http://localhost:4567 bundle exec pod trunk --help + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Rakefile new file mode 100644 index 0000000..2a8b679 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/Rakefile @@ -0,0 +1,68 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' + "\e[0m" + exit 1 + end +end + +begin + require "bundler/gem_tasks" + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + title 'Checking code style...' + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + #-- Kick -------------------------------------------------------------------# + + desc 'Automatically run specs for updated files' + task :kick do + exec 'bundle exec kicker -c' + end + + #-- RuboCop ----------------------------------------------------------------# + + if RUBY_VERSION >= '1.9.3' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + end + +rescue LoadError => e + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" + $stderr.puts e.message + $stderr.puts e.backtrace + $stderr.puts +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/cocoapods-trunk.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/cocoapods-trunk.gemspec new file mode 100644 index 0000000..e4f525f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/cocoapods-trunk.gemspec @@ -0,0 +1,26 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_trunk' + +Gem::Specification.new do |spec| + spec.name = "cocoapods-trunk" + spec.version = CocoaPodsTrunk::VERSION + spec.authors = ["Eloy Durán"] + spec.email = ["eloy.de.enige@gmail.com"] + spec.summary = "Interact with trunk.cocoapods.org" + spec.homepage = "https://github.com/CocoaPods/cocoapods-trunk" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_dependency 'nap', '>= 0.8', '< 2.0' + spec.add_dependency 'netrc', '~> 0.11' + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake", '~> 10.0' + + spec.required_ruby_version = '>= 2.0.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..60a2dea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/trunk' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_trunk.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_trunk.rb new file mode 100644 index 0000000..3d5a0ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/cocoapods_trunk.rb @@ -0,0 +1,3 @@ +module CocoaPodsTrunk + VERSION = '1.5.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk.rb new file mode 100644 index 0000000..7101e5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk.rb @@ -0,0 +1,146 @@ +# encoding: UTF-8 + +require 'json' +require 'rest' +require 'netrc' + +module Pod + class Command + class Trunk < Command + self.abstract_command = true + self.summary = 'Interact with the CocoaPods API (e.g. publishing new specs)' + + SCHEME_AND_HOST = ENV['TRUNK_SCHEME_AND_HOST'] || 'https://trunk.cocoapods.org' + BASE_URL = "#{SCHEME_AND_HOST}/api/v1".freeze + + require 'pod/command/trunk/add_owner' + require 'pod/command/trunk/delete' + require 'pod/command/trunk/deprecate' + require 'pod/command/trunk/info' + require 'pod/command/trunk/me' + require 'pod/command/trunk/push' + require 'pod/command/trunk/register' + require 'pod/command/trunk/remove_owner' + + private + + def request_url(action, url, *args) + response = create_request(action, url, *args) + if (400...600).cover?(response.status_code) + print_error(response.body) + end + response + end + + def request_path(action, path, *args) + request_url(action, "#{BASE_URL}/#{path}", *args) + end + + def create_request(*args) + if verbose? + REST.send(*args) do |request| + request.set_debug_output($stdout) + end + else + REST.send(*args) + end + end + + def print_error(body) + begin + json = JSON.parse(body) + rescue JSON::ParserError + json = {} + end + error = json['error'] || "An unexpected error occurred: #{body}" + + case data = json['data'] + when Hash + lines = data.sort_by(&:first).map do |attr, messages| + attr = attr[0, 1].upcase << attr[1..-1] + messages.sort.map do |message| + "- #{attr}: #{message}" + end + end.flatten + count = lines.size + lines.unshift "The following #{'validation'.pluralize(count)} failed:" + error += "\n" << lines.join("\n") + end + + raise Informative, error + end + + def print_messages(data_url, messages, spec = nil, action = nil) + if verbose? || spec.nil? + # Using UI.labeled here is dangerous, as it wraps the URL and indents + # it, which breaks the URL when you try to copy-paste it. + UI.puts " - Data URL: #{data_url}" + + server_logs = messages.map do |entry| + at, message = entry.to_a.flatten + "#{formatted_time(at)}: #{message}" + end + UI.labeled 'Log messages', server_logs + else + separator = '-' * 80 + UI.puts + UI.puts separator + UI.puts " 🎉 Congrats" + UI.puts + UI.puts " 🚀 #{spec.name} (#{spec.version}) successfully #{action}" + unless messages.empty? + at = messages.first.to_a.flatten.first + UI.puts " 📅 #{formatted_time(at)}" + end + UI.puts " 🌎 https://cocoapods.org/pods/#{spec.name}" + UI.puts " 👍 Tell your friends!" + UI.puts separator + end + end + + def json(response) + JSON.parse(response.body) + end + + def netrc + @@netrc ||= Netrc.read + end + + def token + ENV['COCOAPODS_TRUNK_TOKEN'] || + (netrc['trunk.cocoapods.org'] && netrc['trunk.cocoapods.org'].password) + end + + def default_headers + { + 'Content-Type' => 'application/json; charset=utf-8', + 'Accept' => 'application/json; charset=utf-8', + 'User-Agent' => "CocoaPods/#{Pod::VERSION}", + } + end + + def auth_headers + default_headers.merge('Authorization' => "Token #{token}") + end + + def formatted_time(time_string) + require 'active_support/time' + @tz_offset ||= Time.zone_offset(time_zone) + @current_year ||= Date.today.year + + time = Time.parse(time_string) + @tz_offset + formatted = time.to_formatted_s(:long_ordinal) + # No need to show the current year, the user will probably know. + if time.year == @current_year + formatted.sub!(" #{@current_year}", '') + end + formatted + end + + def time_zone + out, = Executable.capture_command('/bin/date', %w(+%Z), :capture => :out) + out.strip + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/add_owner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/add_owner.rb new file mode 100644 index 0000000..529a6f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/add_owner.rb @@ -0,0 +1,47 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class AddOwner < Trunk + self.summary = 'Add an owner to a pod' + self.description = <<-DESC + Adds the registered user with specified `OWNER-EMAIL` as an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the new owner’s email address.' + end + end + + def run + body = { 'email' => @email }.to_json + json = json(request_path(:patch, "pods/#{@pod}/owners", body, auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error adding #{@email} to " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/delete.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/delete.rb new file mode 100644 index 0000000..e10203b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/delete.rb @@ -0,0 +1,70 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Delete < Trunk + self.summary = 'Deletes a version of a pod.' + self.description = <<-DESC + WARNING: It is generally considered bad behavior to remove + versions of a Pod that others are depending on! Please + consider using the deprecate command instead. + + Deletes the specified pod version from trunk and the master specs + repo. Once deleted, this version can never be pushed again. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('VERSION', true), + ] + + def initialize(argv) + @name = argv.shift_argument + @version = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + help! 'Please specify a version.' unless @version + end + + def run + return unless confirm_deletion? + json = delete + print_messages(json['data_url'], json['messages'], nil, nil) + end + + private + + WARNING_MESSAGE = 'WARNING: It is generally considered bad behavior ' \ + "to remove versions of a Pod that others are depending on!\n" \ + 'Please consider using the `deprecate` command instead.'.freeze + + def confirm_deletion? + UI.puts(WARNING_MESSAGE.yellow) + loop do + UI.print("Are you sure you want to delete this Pod version?\n> ") + answer = UI.gets.strip.downcase + UI.puts # ensures a newline is printed after the user input + affirmatives = %w(y yes true 1) + negatives = %w(n no false 0) + return true if affirmatives.include?(answer) + return false if negatives.include?(answer) + end + end + + def delete + response = request_path(:delete, "pods/#{@name}/#{@version}", auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deleting the pod version ' \ + "from trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/deprecate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/deprecate.rb new file mode 100644 index 0000000..4e3b78d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/deprecate.rb @@ -0,0 +1,48 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Deprecate < Trunk + self.summary = 'Deprecates a pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def self.options + [ + ['--in-favor-of=OTHER_NAME', 'The pod to deprecate this pod in favor of.'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @in_favor_of = argv.option('in-favor-of') + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + json = deprecate + print_messages(json['data_url'], json['messages'], nil, nil) + end + + def deprecate + body = { + :in_favor_of => @in_favor_of, + }.to_json + response = request_path(:patch, "pods/#{@name}/deprecated", body, auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deprecating the pod ' \ + "via trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/info.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/info.rb new file mode 100644 index 0000000..2941890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/info.rb @@ -0,0 +1,35 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Info < Trunk + self.summary = 'Returns information about a Pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + response = json(request_path(:get, "pods/#{@name}", auth_headers)) + versions = response['versions'] || [] + owners = response['owners'] || [] + + UI.title(@name) do + UI.labeled 'Versions', versions.map { |v| "#{v['name']} (#{v['created_at']})" } + UI.labeled 'Owners', owners.map { |o| "#{o['name']} <#{o['email']}>" } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/me.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/me.rb new file mode 100644 index 0000000..b8aab38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/me.rb @@ -0,0 +1,119 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Me < Trunk + self.summary = 'Display information about your sessions' + self.description = <<-DESC + Includes information about your registration, followed by all your + sessions. + + These are your current session, other valid sessions, unverified + sessions, and expired sessions. + DESC + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + me = json(request_path(:get, 'sessions', auth_headers)) + owner = json(request_path(:get, "owners/#{me['email']}")) + UI.labeled 'Name', owner['name'] + UI.labeled 'Email', owner['email'] + UI.labeled 'Since', formatted_time(owner['created_at']) + + pods = owner['pods'] || [] + pods = pods.map { |pod| pod['name'] } + pods = 'None' unless pods.any? + UI.labeled 'Pods', pods + + sessions = me['sessions'].map do |session| + hash = { + :created_at => formatted_time(session['created_at']), + :valid_until => formatted_time(session['valid_until']), + :created_from_ip => session['created_from_ip'], + :description => session['description'], + } + if Time.parse(session['valid_until']) <= Time.now.utc + hash[:color] = :red + elsif session['verified'] + hash[:color] = session['current'] ? :cyan : :green + else + hash[:color] = :yellow + hash[:valid_until] = 'Unverified' + end + hash + end + + columns = [:created_at, :valid_until, :created_from_ip, :description].map do |key| + find_max_size(sessions, key) + end + + sessions = sessions.map do |session| + created_at = session[:created_at].ljust(columns[0]) + valid_until = session[:valid_until].rjust(columns[1]) + created_from_ip = session[:created_from_ip].ljust(columns[2]) + description = session[:description] + msg = "#{created_at} - #{valid_until}. IP: #{created_from_ip}" + msg << " Description: #{description}" if description + msg.send(session[:color]) + end + + UI.labeled 'Sessions', sessions + + rescue REST::Error => e + raise Informative, 'There was an error fetching your info ' \ + "from trunk: #{e.message}" + end + + private + + def find_max_size(sessions, key) + sessions.map { |s| (s[key] || '').size }.max + end + + class CleanSessions < Me + self.summary = 'Remove sessions' + self.description = <<-DESC + By default this will clean-up your sessions by removing expired and + unverified sessions. + + To remove all your sessions, except for the one you are currently + using, specify the `--all` flag. + DESC + + def self.options + [ + ['--all', 'Removes all your sessions, except for the current one'], + ].concat(super) + end + + def initialize(argv) + @remove_all = argv.flag?('all', false) + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + path = @remove_all ? 'sessions/all' : 'sessions' + request_path(:delete, path, auth_headers) + rescue REST::Error => e + raise Informative, 'There was an error cleaning up your ' \ + "sessions from trunk: #{e.message}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/push.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/push.rb new file mode 100644 index 0000000..209018b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/push.rb @@ -0,0 +1,169 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Push < Trunk + self.summary = 'Publish a podspec' + self.description = <<-DESC + Publish the podspec at `PATH` to make it available to all users of + the ‘trunk’ spec-repo. If `PATH` is not provided, defaults to the + current directory. + + Before pushing the podspec to cocoapods.org, this will perform a local + lint of the podspec, including a build of the library. However, it + remains *your* responsibility to ensure that the published podspec + will actually work for your users. Thus it is recommended that you + *first* try to use the podspec to integrate the library into your demo + and/or real application. + + If this is the first time you publish a spec for this pod, you will + automatically be registered as the ‘owner’ of this pod. (Note that + ‘owner’ in this case implies a person that is allowed to publish new + versions and add other ‘owners’, not necessarily the library author.) + DESC + + self.arguments = [ + CLAide::Argument.new('PATH', false), + ] + + def self.options + [ + ['--allow-warnings', 'Allows push even if there are lint warnings'], + ['--use-libraries', 'Linter uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--swift-version=VERSION', 'The SWIFT_VERSION that should be used to lint the spec. ' \ + 'This takes precedence over a .swift-version file.'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--synchronous', 'If validation depends on other recently pushed pods, synchronize'], + ].concat(super) + end + + def initialize(argv) + @allow_warnings = argv.flag?('allow-warnings', false) + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @path = argv.shift_argument || '.' + @synchronous = argv.flag?('synchronous', false) + find_podspec_file if File.directory?(@path) + super + end + + def validate! + super + unless token + help! 'You need to run `pod trunk register` to register a session first.' + end + unless @path + help! 'Please specify the path to the podspec file.' + end + unless File.exist?(@path) && !File.directory?(@path) + help! "The specified path `#{@path}` does not point to " \ + 'an existing podspec file.' + end + end + + def run + update_master_repo + validate_podspec + status, json = push_to_trunk + update_master_repo + + if (400...600).cover?(status) + print_messages(json['data_url'], json['messages'], nil) + else + print_messages(json['data_url'], json['messages'], spec, 'published') + end + end + + private + + MASTER_GIT_REPO_URL = 'https://github.com/CocoaPods/Specs.git'.freeze + + def push_to_trunk + spec.attributes_hash[:pushed_with_swift_version] = @swift_version if @swift_version + response = request_path(:post, "pods?allow_warnings=#{@allow_warnings}", + spec.to_json, auth_headers) + url = response.headers['location'].first + return response.status_code, json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error pushing a new version ' \ + "to trunk: #{e.message}" + end + + def find_podspec_file + podspecs = Dir[Pathname(@path) + '*.podspec{.json,}'] + case podspecs.count + when 0 + UI.notice "No podspec found in directory `#{@path}`" + when 1 + UI.notice "Found podspec `#{podspecs[0]}`" + else + UI.notice "Multiple podspec files in directory `#{@path}`. " \ + 'You need to explicitly specify which one to use.' + end + @path = (podspecs.count == 1) ? podspecs[0] : nil + end + + def spec + @spec ||= Pod::Specification.from_file(@path) + rescue Informative => e # TODO: this should be a more specific error + raise Informative, 'Unable to interpret the specified path ' \ + "#{UI.path(@path)} as a podspec (#{e})." + end + + # Performs a full lint against the podspecs. + # + # TODO: Currently copied verbatim from `pod push`. + def validate_podspec + UI.puts 'Validating podspec'.yellow + + validator = Validator.new(spec, [repo_url]) + validator.allow_warnings = @allow_warnings + validator.use_frameworks = @use_frameworks + if validator.respond_to?(:use_modular_headers=) + validator.use_modular_headers = @use_modular_headers + end + if validator.respond_to?(:swift_version=) + validator.swift_version = @swift_version + end + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.validate + unless validator.validated? + raise Informative, "The spec did not pass validation, due to #{validator.failure_reason}." + end + + # Let the validator's logic for the swift version + # set the value for the trunk JSON uploader + @swift_version = validator.respond_to?(:used_swift_version) && validator.used_swift_version + end + + def repo_url + @synchronous ? MASTER_GIT_REPO_URL : Pod::TrunkSource::TRUNK_REPO_URL + end + + def update_master_repo + # more robust Trunk setup logic: + # - if Trunk exists, updates it + # - if Trunk doesn't exist, add it and update it + # + repo = sources_manager.find_or_create_source_with_url(repo_url) + sources_manager.update(repo.name) + end + + def sources_manager + if defined?(Pod::SourcesManager) + Pod::SourcesManager + else + config.sources_manager + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/register.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/register.rb new file mode 100644 index 0000000..19f1a09 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/register.rb @@ -0,0 +1,78 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Register < Trunk + self.summary = 'Manage sessions' + self.description = <<-DESC + Register a new account, or create a new session. + + If this is your first registration, both an `EMAIL` address and + `YOUR_NAME` are required. If you’ve already registered with trunk, you may + omit the `YOUR_NAME` (unless you would like to change it). + + It is recommended that you provide a description of the session, so + that it will be easier to identify later on. For instance, when you + would like to clean-up your sessions. A common example is to specify + the location where the machine, that you are using the session for, is + physically located. + + Examples: + + $ pod trunk register eloy@example.com 'Eloy Durán' --description='Personal Laptop' + $ pod trunk register eloy@example.com --description='Work Laptop' + $ pod trunk register eloy@example.com + DESC + + self.arguments = [ + CLAide::Argument.new('EMAIL', true), + CLAide::Argument.new('YOUR_NAME', false), + ] + + def self.options + [ + ['--description=DESCRIPTION', 'An arbitrary description to ' \ + 'easily identify your session ' \ + 'later on.'], + ].concat(super) + end + + def initialize(argv) + @session_description = argv.option('description') + @email = argv.shift_argument + @name = argv.shift_argument + super + end + + def validate! + super + unless @email + help! 'Specify at least your email address.' + end + end + + def run + body = { + 'email' => @email, + 'name' => @name, + 'description' => @session_description, + }.to_json + json = json(request_path(:post, 'sessions', body, default_headers)) + save_token(json['token']) + # TODO UI.notice inserts an empty line :/ + UI.puts '[!] Please verify the session by clicking the link in the ' \ + "verification email that has been sent to #{@email}".yellow + rescue REST::Error => e + raise Informative, 'There was an error registering with trunk: ' \ + "#{e.message}" + end + + def save_token(token) + netrc['trunk.cocoapods.org'] = @email, token + netrc.save + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/remove_owner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/remove_owner.rb new file mode 100644 index 0000000..b0d170e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/lib/pod/command/trunk/remove_owner.rb @@ -0,0 +1,46 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class RemoveOwner < Trunk + self.summary = 'Remove an owner from a pod' + self.description = <<-DESC + Removes the user with specified `OWNER-EMAIL` from being an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the owner’s email address.' + end + end + + def run + json = json(request_path(:delete, "pods/#{@pod}/owners/#{@email}", auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error removing #{@email} from " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/addowner_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/addowner_spec.rb new file mode 100644 index 0000000..2600ada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/addowner_spec.rb @@ -0,0 +1,52 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::AddOwner do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk add-owner )).should.be.instance_of Command::Trunk::AddOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk add-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk add-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully add an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners' + stub_request(:patch, url). + with(:body => '{"email":"kyle@cocoapods.org"}', + :headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/delete_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/delete_spec.rb new file mode 100644 index 0000000..dea1699 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/delete_spec.rb @@ -0,0 +1,68 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Delete do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk delete )).should.be.instance_of Command::Trunk::Delete + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk delete )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should error without a version' do + command = Command.parse(%w( trunk delete Stencil )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'confirms deletion' do + UI.inputs += %w(garbage true false) + command = Command.parse(%w( trunk delete Stencil 1.0.0 )) + command.send(:confirm_deletion?).should.be.true + command.send(:confirm_deletion?).should.be.false + + UI.output.should == <<-OUTPUT.gsub(/^>$/, '> ') +\e[33mWARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead.\e[0m +Are you sure you want to delete this Pod version? +> +Are you sure you want to delete this Pod version? +> +\e[33mWARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead.\e[0m +Are you sure you want to delete this Pod version? +> + OUTPUT + end + + it 'does not delete if the user does not confirm' do + Command::Trunk::Delete.any_instance.expects(:confirm_deletion?).returns(false) + Command::Trunk::Delete.any_instance.expects(:delete).never + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + end + + it 'should show information for a pod' do + response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 1.0.0` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 1.0.0` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/1.0.0/Stencil.podspec.json', + } + Command::Trunk::Delete.any_instance.expects(:delete).returns(response) + UI.inputs << 'TRUE ' + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 1.0.0` initiated' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/deprecate_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/deprecate_spec.rb new file mode 100644 index 0000000..7f43498 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/deprecate_spec.rb @@ -0,0 +1,58 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Deprecate do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk deprecate )).should.be.instance_of Command::Trunk::Deprecate + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk deprecate )) + lambda { command.validate! }.should.raise CLAide::Help + end + + before do + @push_response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 0.96.3` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 0.96.3` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json', + } + end + + it 'should show information for a pod' do + Command::Trunk::Deprecate.any_instance.expects(:deprecate).returns(@push_response) + Command::Trunk::Deprecate.invoke(%w(Stencil)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 0.96.3` initiated' + end + + it 'should send the proper network request' do + redirect = 'http://redirected.com' + stub_request(:patch, 'https://trunk.cocoapods.org/api/v1/pods/Stencil/deprecated'). + with(:body => hash_including('in_favor_of' => 'Stamp')). + to_return(:status => 201, :headers => { :location => redirect }) + + stub_request(:get, redirect). + to_return(:status => 200, :body => @push_response.to_json) + + Command::Trunk::Deprecate.invoke(%w(Stencil --in-favor-of=Stamp)) + + UI.output.should == <<-EOS + - Data URL: https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json +- Log messages: + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` initiated. + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` has been pushed (1.02409270 s). + EOS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/info_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/info_spec.rb new file mode 100644 index 0000000..1ff1a38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/info_spec.rb @@ -0,0 +1,36 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Info do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk info )).should.be.instance_of Command::Trunk::Info + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk info )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should show information for a pod' do + url = 'https://trunk.cocoapods.org/api/v1/pods/Stencil' + stub_request(:get, url).to_return(:body => { + 'owners' => [ + { + 'name' => 'Kyle Fuller', + 'email' => 'kyle@example.com', + }, + ], + }.to_json) + + command = Command.parse(%w( trunk info Stencil )) + lambda { command.validate! }.should.not.raise CLAide::Help + command.run + + UI.output.should.include 'Owners' + UI.output.should.include 'Kyle Fuller ' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/me_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/me_spec.rb new file mode 100644 index 0000000..585bb38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/me_spec.rb @@ -0,0 +1,17 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Me do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk me )).should.be.instance_of Command::Trunk::Me + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk me )) + lambda { command.validate! }.should.raise CLAide::Help + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/push_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/push_spec.rb new file mode 100644 index 0000000..6dde8cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/push_spec.rb @@ -0,0 +1,332 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Push do + def success_json + { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => "Push for `BananaLib 0.96.3' initiated.", + }, + { + '2015-12-05 02:00:26 UTC' => "Push for `BananaLib 0.96.3' has been pushed (1.02409270 s).", + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/BananaLib/0.96.3/BananaLib.podspec.json', + } + end + + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + end + + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk push )).should.be.instance_of Command::Trunk::Push + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error when the trunk service returns an error' do + url = 'https://trunk.cocoapods.org/api/v1/pods?allow_warnings=false' + stub_request(:post, url).to_return(:status => 422, :body => { + 'error' => 'The Pod Specification did not pass validation.', + 'data' => { + 'warnings' => [ + 'A value for `requires_arc` should be specified until the migration to a `true` default.', + ], + }, + }.to_json) + command = Command.parse(%w(trunk push)) + command.stubs(:validate_podspec) + command.stubs(:spec).returns(Pod::Specification.new) + exception = lambda { command.run }.should.raise Informative + exception.message.should.include 'following validation failed' + exception.message.should.include 'should be specified' + exception.message.should.include 'The Pod Specification did not pass validation' + end + + describe 'PATH' do + before do + UI.output = '' + end + it 'defaults to the current directory' do + # Disable the podspec finding algorithm so we can check the raw path + Command::Trunk::Push.any_instance.stubs(:find_podspec_file) { |path| path } + command = Command.parse(%w( trunk push )) + command.instance_eval { @path }.should == '.' + end + + def found_podspec_among_files(files) + # Create a temp directory with the dummy `files` in it + Dir.mktmpdir do |dir| + files.each do |filename| + path = Pathname(dir) + filename + File.open(path, 'w') {} + end + # Execute `pod trunk push` with this dir as parameter + command = Command.parse(%w( trunk push ) + [dir]) + path = command.instance_eval { @path } + return File.basename(path) if path + end + end + + it 'should find the only JSON podspec in a given directory' do + files = %w(foo bar.podspec.json baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should find the only Ruby podspec in a given directory' do + files = %w(foo bar.podspec baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should warn when no podspec found in a given directory' do + files = %w(foo bar baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /No podspec found in directory/ + end + + it 'should warn when multiple podspecs found in a given directory' do + files = %w(foo bar.podspec bar.podspec.json baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /Multiple podspec files in directory/ + end + end + + describe 'validation' do + before do + Installer.any_instance.stubs(:aggregate_targets).returns([]) + Installer.any_instance.stubs(:pod_targets).returns([]) + + Validator.any_instance.stubs(:check_file_patterns) + Validator.any_instance.stubs(:validate_url) + Validator.any_instance.stubs(:validate_screenshots) + Validator.any_instance.stubs(:xcodebuild).returns('') + Validator.any_instance.stubs(:install_pod) + Validator.any_instance.stubs(:build_pod) + Validator.any_instance.stubs(:add_app_project_import) + Validator.any_instance.stubs(:used_swift_version).returns(nil) + %i(prepare resolve_dependencies download_dependencies).each do |m| + Installer.any_instance.stubs(m) + end + Command::Trunk::Push.any_instance.stubs(:master_repo_url). + returns(Pod::TrunkSource::TRUNK_REPO_URL) + end + + it 'passes the SWIFT_VERSION to the Validator' do + Validator.any_instance.expects(:swift_version=).with('3.0') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=3.0)) + cmd.send(:validate_podspec) + end + + it 'passes a swift version back to command, to handle .swift-version files' do + Validator.any_instance.stubs(:dot_swift_version).returns('1.2.3') + Validator.any_instance.stubs(:used_swift_version).returns('1.2.3') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --allow-warnings)) + cmd.send(:validate_podspec) + cmd.instance_variable_get(:@swift_version).should == '1.2.3' + end + + it 'validates specs as frameworks by default' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, '8.0', true, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, true, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, true, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, true, [], nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.send(:validate_podspec) + end + + it 'validates specs as libraries if requested' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, nil, false, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, false, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, false, [], nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, false, [], nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + cmd.send(:validate_podspec) + end + + it 'prints the failure reason' do + Validator.any_instance.expects(:validated?).returns(false) + Validator.any_instance.expects(:validate) + Validator.any_instance.expects(:failure_reason).returns('failure_reason') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + e = should.raise(Informative) { cmd.send(:validate_podspec) } + e.message.should.include 'The spec did not pass validation, due to failure_reason.' + end + + it 'passes skip import validation' do + Validator.any_instance.expects(:skip_import_validation=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-import-validation)) + cmd.send(:validate_podspec) + end + + it 'passes skip test' do + Validator.any_instance.expects(:skip_tests=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-tests)) + cmd.send(:validate_podspec) + end + + it 'passes use modular headers' do + Validator.any_instance.expects(:use_modular_headers=) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-modular-headers)) + cmd.send(:validate_podspec) + end + end + + describe 'sending the swift version up to trunk' do + before do + # This won't get called + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + # For faking the networking when sending + Pod::Command::Trunk.any_instance.expects(:json).returns({}) + Pod::Command::Trunk.any_instance.expects(:auth_headers).returns({}) + end + + it 'passes the value to trunk' do + # Fakes for the network response + response = mock + response.expects(:headers).returns('location' => ['http://theinternet.com']) + response.expects(:status_code).returns(200) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=1.1.2)) + + # Using a blank podspec - JSON should include `"pushed_with_swift_version":"1.1.2"` + cmd.stubs(:spec).returns(Pod::Specification.new) + + json = <<-JSON +{"name":null,"pushed_with_swift_version":"1.1.2","platforms":{"osx":null,"ios":null,"tvos":null,"watchos":null}} + JSON + + cmd.stubs(:validate_podspec) + cmd.stubs(:request_url) + + api_route = 'pods?allow_warnings=false' + cmd.expects(:request_path).with(:post, api_route, json, {}).returns(response) + cmd.send(:push_to_trunk) + end + end + + describe 'updating the master repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name). + returns(Pod::TrunkSource::TRUNK_REPO_NAME) + end + + it 'updates the master repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the master repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run) + + @cmd.run + end + end + + describe 'synchronous updating the git repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --synchronous)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name).returns('master') + end + + it 'updates the git repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new('master')) + + Config.instance.sources_manager.expects(:update).with('master').twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the git repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.stubs(:cdn_url?).returns(false) + Config.instance.sources_manager.stubs(:create_source_with_url).once. + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.expects(:update).with('master').twice + + @cmd.run + end + end + + describe 'Presenting Responses to the user' do + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + Config.instance.sources_manager.stubs(:master_repo_functional?).returns(true) + end + + it 'shows full logs when verbose' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows full logs when errored' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([400, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows thanks emojis when success' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + cmd.run + + UI.output.should.match %r{https://cocoapods.org/pods/BananaLib} + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/register_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/register_spec.rb new file mode 100644 index 0000000..7bc316a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/register_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Register do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk register )).should.be.instance_of Command::Trunk::Register + end + end + + it 'should error if email is not supplied' do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk register )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email address' + end + + it 'should register user' do + url = 'https://trunk.cocoapods.org/api/v1/sessions' + stub_request(:post, url). + with(:body => hash_including('email' => 'kyle@cocoapods.org')). + to_return(:status => 200, :body => '{"token": "acct"}') + Netrc.any_instance.stubs(:[]).returns(nil) + Netrc.any_instance.expects(:[]=).with('trunk.cocoapods.org', ['kyle@cocoapods.org', 'acct']) + Netrc.any_instance.expects(:save) + + command = Command.parse(%w( trunk register kyle@cocoapods.org )) + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/remove_owner_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/remove_owner_spec.rb new file mode 100644 index 0000000..bed408f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk/remove_owner_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::RemoveOwner do + describe 'CLAide' do + it 'registers itself' do + Command.parse(%w( trunk remove-owner )).should.be.instance_of Command::Trunk::RemoveOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk remove-owner )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk remove-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk remove-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully remove an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners/kyle@cocoapods.org' + stub_request(:delete, url). + with(:headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk_spec.rb new file mode 100644 index 0000000..e062461 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/command/trunk_spec.rb @@ -0,0 +1,23 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk )).should.be.instance_of Command::Trunk + end + end + + before do + @command = Command.parse(%w(trunk)) + end + + describe 'authorization' do + it 'will use the trunk token from ENV if present' do + ENV.stubs(:[]).with('COCOAPODS_TRUNK_TOKEN').returns('token') + + @command.send(:token).should == 'token' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/fixtures/BananaLib.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/fixtures/BananaLib.podspec new file mode 100644 index 0000000..b9f98d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/fixtures/BananaLib.podspec @@ -0,0 +1,25 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } + s.source_files = 'Classes/*.{h,m,d}', 'Vendor' + s.resources = "Resources/*" + s.vendored_framework = 'Bananalib.framework' + s.vendored_library = 'libBananalib.a' + s.preserve_paths = 'preserve_me.txt' + s.public_header_files = 'Classes/Banana.h' + + s.prefix_header_file = 'Classes/BananaLib.pch' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/spec_helper.rb new file mode 100644 index 0000000..3ca19a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.5.0/spec/spec_helper.rb @@ -0,0 +1,97 @@ +# Set up coverage analysis +#-----------------------------------------------------------------------------# + +if RUBY_VERSION >= '1.9.3' + require 'codeclimate-test-reporter' + CodeClimate::TestReporter.configure do |config| + config.logger.level = Logger::WARN + end + CodeClimate::TestReporter.start +end + +# Set up +#-----------------------------------------------------------------------------# + +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'webmock' + +include WebMock::API +WebMock.enable! +WebMock.disable_net_connect!(:allow => ['codeclimate.com', 'cdn.cocoapods.org']) + +require 'cocoapods' + +require 'cocoapods_plugin' + +# Helpers +#-----------------------------------------------------------------------------# + +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :inputs + + def gets + inputs.shift + end + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end + + class Command::Trunk + def time_zone + 'UTC' + end + end +end + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.inputs = [] + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + WebMock.reset! + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml new file mode 100644 index 0000000..f73b8e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml @@ -0,0 +1,64 @@ +name: Specs + +jobs: + specs: + strategy: + fail-fast: false + matrix: + task: [SPECS] + ruby: [2.6, 2.7] + os: [ubuntu-16.04] + include: + - task: SPECS + os: macos-10.15 + ruby: system + + name: ${{ matrix.task }} / ${{ matrix.os }} / Ruby ${{ matrix.ruby }} + runs-on: ${{ matrix.os }} + + steps: + - name: Set build image var + run: echo "ImageVersion=$ImageVersion" >> $GITHUB_ENV + + - name: Checkout git + uses: actions/checkout@v1 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + if: ${{ matrix.ruby != 'system' }} + with: + ruby-version: ${{ matrix.ruby }} + + - name: Update git submodules + run: git submodule update --init + + - uses: actions/cache@v2 + with: + path: vendor/bundle + key: gems@v1-${{ matrix.os }}-${{ env.ImageVersion }}-Ruby${{ matrix.ruby }}-${{ hashFiles('Gemfile.lock') }} + restore-keys: | + gems@v1-${{ matrix.os }}-${{ env.ImageVersion }}-Ruby${{ matrix.ruby }}- + - name: Run bundle install + run: | + gem install bundler -v "~> 1.17" + bundle config path vendor/bundle + bundle install --jobs 4 --retry 3 --without debugging documentation + - name: Set up git identity + run: | + git config --global user.email "tests@cocoapods.org" + git config --global user.name "CocoaPods Tests" + + - name: Run Tests + run: bundle exec rake spec + env: + COCOAPODS_CI_TASKS: ${{ matrix.task }} + +on: + push: + branches: + - "master" + - "*-stable" + pull_request: + branches: + - master + - "*-stable" diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.kick b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.kick new file mode 100644 index 0000000..612ecc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.kick @@ -0,0 +1,29 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml new file mode 100644 index 0000000..227d50d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..ebc2123 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml @@ -0,0 +1,138 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Autocorrect makes this cop much more useful, taking away needless guessing +Lint/EndAlignment: + AutoCorrect: true + + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml new file mode 100644 index 0000000..e8af53d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml @@ -0,0 +1,33 @@ +# This configuration was generated by `rubocop --auto-gen-config` +# on 2014-08-20 17:00:42 +0200 using RuboCop version 0.25.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +Metrics/CyclomaticComplexity: + Max: 8 + +# Offense count: 9 +# Configuration parameters: AllowURI. +Metrics/LineLength: + Max: 105 + +# Offense count: 7 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 42 + +# Offense count: 1 +Metrics/PerceivedComplexity: + Max: 9 + +# Offense count: 1 +Style/ClassVars: + Enabled: false + +# Offense count: 1 +# Configuration parameters: Keywords. +Style/CommentAnnotation: + Enabled: false diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md new file mode 100644 index 0000000..3a382bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md @@ -0,0 +1,378 @@ +## 1.6.0 (2021-09-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.5.0 (2020-05-01) + +##### Enhancements + +* Add --synchronous option to `pod trunk push`. + [Paul Beusterien](https://github.com/paulb777) + [#147](https://github.com/CocoaPods/cocoapods-trunk/pull/147) + [CocoaPods#9497](https://github.com/CocoaPods/CocoaPods/issues/9497) + +##### Bug Fixes + +* None. + + +## 1.4.1 (2019-09-26) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Use a more robust `Trunk` init when pushing. + [Igor Makarov](https://github.com/igor-makarov) + [#135](https://github.com/CocoaPods/cocoapods-trunk/pull/135) + + +## 1.4.0 (2019-08-21) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Update to get the master spec repo from `Source::Manager` for validation - effectively + use the new CDN `TrunkSource` for podspec validation and not a hard-coded URL + [Igor Makarov](https://github.com/igor-makarov) + [#132](https://github.com/CocoaPods/cocoapods-trunk/pull/132) + [CocoaPods#9112](https://github.com/CocoaPods/CocoaPods/issues/9112) + +## 1.3.1 (2018-08-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.3.0 (2017-10-02) + +##### Enhancements + +* Add skip test option to trunk push + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/93) + +* Loosen netrc requirement + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +* Update development dependencies to support MRI 2.3+ + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +##### Bug Fixes + +* None. + + +## 1.2.0 (2017-04-11) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Properly display `pod trunk deprecate` command line options + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6486](https://github.com/CocoaPods/CocoaPods/issues/6486) + +* Add `--skip-import-validation` to skip linking a pod during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#86](https://github.com/CocoaPods/cocoapods-trunk/pull/86) + + +## 1.1.2 (2016-12-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Checks that `Pod::Validator` has `swift_version=` for CocoaPods <= 1.1.0 support. + [Danielle Tomlinson](https://github.com/dantoml) + [#6209](https://github.com/CocoaPods/CocoaPods/issues/6209) + + +## 1.1.1 (2016-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Support submitting from multiple versions of CocoaPods. + [Samuel Giddins](https://github.com/segiddins) + +## 1.1.0 (2016-10-19) + +##### Enhancements + +* Passes the pod's version of Swift used for deployment to the CocoaPods Specs repo + [Orta](https://github.com/orta) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +* Prettier success message when successfully pushed a new version + [Marin](https://github.com/icanzilb) + [#76](https://github.com/CocoaPods/cocoapods-trunk/pull/76) + +##### Bug Fixes + +* None. + + +## 1.1.0.beta.1 (2016-10-10) + +##### Enhancements + +* Pass --swift-version to the Validator during `pod lib lint` + [Danielle Tomlinson](https://github.com/dantoml) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +##### Bug Fixes + +* None. + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't print the invocation of `/bin/date`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* Make the error loading a specification during `pod trunk push` more + informative. + [Samuel Giddins](https://github.com/segiddins) + [#63](https://github.com/CocoaPods/cocoapods-trunk/issues/63) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.4 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Compatibility With CocoaPods 1.0.0.beta.8. + [Samuel Giddins](https://github.com/segiddins) + [#61](https://github.com/CocoaPods/cocoapods-trunk/issues/61) + + +## 1.0.0.beta.3 (2016-04-14) + +##### Enhancements + +* The failure reason is printed when validation fails during `pod trunk push`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#5073](https://github.com/CocoaPods/CocoaPods/issues/5073) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-02-03) + +##### Bug Fixes + +* Send a body with the `PATCH` request to deprecate a pod. + [Samuel Giddins](https://github.com/segiddins) + [#52](https://github.com/CocoaPods/cocoapods-trunk/issues/52) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* The `pod deprecate PODNAME` command has been added to deprecate all versions + of a pod. + [Samuel Giddins](https://github.com/segiddins) + [#31](https://github.com/CocoaPods/cocoapods-trunk/issues/31) + +* The `pod delete PODNAME VERSION` command has been added to delete a single + version of a pod. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* If the master repo has not been setup when pushing a spec, run `pod setup` + instead of failing. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/cocoapods-trunk/issues/48) + + +## 0.6.4 (2015-08-28) + +##### Bug Fixes + +* This release fixes installation compatibility issues when using the RubyGem + due to an incompatible dependency on `nap`. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.3 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.2 (2015-08-26) + +##### Enhancements + +* The `--allow-warnings` flag to `pod trunk push` is now propagated to the trunk + server. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3855](https://github.com/CocoaPods/CocoaPods/issues/3855) + + +## 0.6.1 (2015-05-27) + +##### Enhancements + +* The `master` specs repo is updated before and after pushing a new spec to + trunk. + [Samuel Giddins](https://github.com/segiddins) + [#43](https://github.com/CocoaPods/cocoapods-trunk/issues/43) + + +## 0.6.0 (2015-03-11) + +##### Enhancements + +* Allow specifying a Trunk token via the `COCOAPODS_TRUNK_TOKEN` environment + variable. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3224](https://github.com/CocoaPods/CocoaPods/issues/3224) + + +## 0.5.1 (2015-02-25) + +##### Enhancements + +* Lint as a framework automatically. If needed, the `--use-libraries` + option allows linting as a static library. + [Boris Bügling](https://github.com/neonichu) + [#2912](https://github.com/CocoaPods/CocoaPods/issues/2912) + +##### Bug Fixes + +* Fix the detection of spec validation errors, and present the proper error + (and messages) to the user. + [Orta Therox](https://github.com/orta) + [#39](https://github.com/CocoaPods/cocoapods-trunk/issues/39) + + +## 0.5.0 (2014-12-25) + +##### Enhancements + +* Added `pod trunk remove-owner` command to remove an owner from a pod. + [Samuel Giddins](https://github.com/segiddins) + [#35](https://github.com/CocoaPods/cocoapods-trunk/issues/35) + +* Added `pod trunk info` command to get information for a pod, including the + owners. + [Kyle Fuller](https://github.com/kylef) + [#15](https://github.com/CocoaPods/cocoapods-trunk/issues/15) + + +## 0.4.1 (2014-11-19) + +##### Enhancements + +* Improved code readability and structure by splitting subcommands + into individual files. + [Olivier Halligon](https://github.com/alisoftware) + [#21](https://github.com/CocoaPods/CocoaPods/issues/21) + +##### Bug Fixes + +* Updates for changes in CocoaPods regarding `--allow-warnings`. + [Kyle Fuller](https://github.com/kylef) + [Cocoapods#2831](https://github.com/CocoaPods/CocoaPods/pull/2831) + + +## 0.4.0 (2014-11-06) + +##### Bug Fixes + +* Fixes installation issues with the JSON dependency. + [Eloy Durán](https://github.com/alloy) + [CocoaPods#2773](https://github.com/CocoaPods/CocoaPods/issues/2773) + +## 0.3.1 (2014-10-15) + +##### Bug Fixes + +* Fixes an issue introduced with the release of `netrc 0.7.8`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#2674](https://github.com/CocoaPods/CocoaPods/issues/2674) + + +## 0.3.0 (2014-10-07) + +##### Enhancements + +* When linting, only allow dependencies from the 'master' specs repository. + [Samuel Giddins](https://github.com/segiddins) + [#28](https://github.com/CocoaPods/cocoapods-trunk/issues/28) + +##### Bug Fixes + +* Fixes an issue where `pod trunk push` doesn't show which validation errors + and just stated it failed. + [Kyle Fuller](https://github.com/kylef) + [#26](https://github.com/CocoaPods/cocoapods-trunk/issues/26) + + +## 0.2.0 (2014-09-11) + +##### Enhancements + +* Network errors are now gracefully handled. + [Samuel E. Giddins](https://github.com/segiddins) + +* Adopted new argument format of CLAide. + [Olivier Halligon](https://github.com/AliSoftware) + + +## 0.1.0 (2014-05-19) + +* Initial release. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile new file mode 100644 index 0000000..0244104 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile @@ -0,0 +1,25 @@ +source 'https://rubygems.org' + +gemspec + +# This is the version that ships with OS X 10.10, so be sure we test against it. +# At the same time, the 1.7.7 version won't install cleanly on Ruby > 2.2, +# so we use a fork that makes a trivial change to a macro invocation. +gem 'json', :git => 'https://github.com/segiddins/json.git', :branch => 'seg-1.7.7-ruby-2.2' + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + + gem 'bacon' + gem 'kicker' + gem 'mocha' + gem 'mocha-on-bacon' + gem 'prettybacon' + gem 'webmock' + + gem 'codeclimate-test-reporter', :require => nil + gem 'rubocop' +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock new file mode 100644 index 0000000..45210fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock @@ -0,0 +1,190 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: a5d1a29b08ca88f90f47104805bc4fad2efc93c9 + branch: master + specs: + claide (1.0.3) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 035518e56945778e9916d8118ea5e61ecb96beb0 + branch: master + specs: + cocoapods (1.11.0) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.11.0) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.4.0, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (= 1.6.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 1.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: a8e38de9907968d6e627b1465f053c55fc778118 + branch: master + specs: + cocoapods-core (1.11.0) + activesupport (>= 5.0, < 7) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + +GIT + remote: https://github.com/segiddins/json.git + revision: a9588bc4334c2f5bf985f255b61c05eafdcd8907 + branch: seg-1.7.7-ruby-2.2 + specs: + json (1.7.7) + +PATH + remote: . + specs: + cocoapods-trunk (1.6.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.3) + activesupport (6.1.4.1) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + ast (2.2.0) + atomos (0.1.3) + bacon (1.2.0) + cocoapods-deintegrate (1.0.5) + cocoapods-downloader (1.5.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.1) + cocoapods-try (1.2.0) + codeclimate-test-reporter (0.4.7) + simplecov (>= 0.7.1, < 1.0.0) + colored2 (3.1.2) + concurrent-ruby (1.1.9) + crack (0.4.3) + safe_yaml (~> 1.0.0) + docile (1.1.5) + escape (0.0.4) + ethon (0.14.0) + ffi (>= 1.15.0) + ffi (1.15.3) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + hashdiff (0.3.4) + httpclient (2.8.3) + i18n (1.8.10) + concurrent-ruby (~> 1.0) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + minitest (5.14.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.8.0) + multi_json (1.11.2) + nanaimo (0.3.0) + nap (1.1.0) + netrc (0.11.0) + notify (0.5.2) + parser (2.3.0.7) + ast (~> 2.2) + powerpack (0.1.1) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (4.0.6) + rainbow (2.1.0) + rake (10.4.2) + rb-fsevent (0.9.5) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.4) + ffi (>= 0.5.0) + rexml (3.2.5) + rubocop (0.39.0) + parser (>= 2.3.0.7, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-macho (2.5.1) + ruby-progressbar (1.7.5) + safe_yaml (1.0.4) + simplecov (0.9.2) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.9.0) + simplecov-html (0.9.0) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + unicode-display_width (1.0.3) + webmock (3.5.1) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff + xcodeproj (1.21.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + zeitwerk (2.4.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-trunk! + codeclimate-test-reporter + json! + kicker + mocha + mocha-on-bacon + prettybacon + rake (~> 10.0) + rubocop + webmock + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt new file mode 100644 index 0000000..d011c24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2013 Eloy Durán + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/README.md new file mode 100644 index 0000000..a600242 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/README.md @@ -0,0 +1,35 @@ +# CocoaPods::Trunk + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/cocoapods-trunk/Specs)](https://github.com/CocoaPods/cocoapods-trunk/actions) +[![Maintainability](https://api.codeclimate.com/v1/badges/157b8b7f7b73976f3edf/maintainability)](https://codeclimate.com/github/CocoaPods/cocoapods-trunk/maintainability) + +CocoaPods plugin for trunk. + +## Installation + +Add this line to your application's Gemfile: + + gem 'cocoapods-trunk' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install cocoapods-trunk + +## Usage + +With a local install of `trunk.cocoapods.org` up and running: + + $ env TRUNK_SCHEME_AND_HOST=http://localhost:4567 bundle exec pod trunk --help + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Rakefile new file mode 100644 index 0000000..2a8b679 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/Rakefile @@ -0,0 +1,68 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' + "\e[0m" + exit 1 + end +end + +begin + require "bundler/gem_tasks" + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + title 'Checking code style...' + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + #-- Kick -------------------------------------------------------------------# + + desc 'Automatically run specs for updated files' + task :kick do + exec 'bundle exec kicker -c' + end + + #-- RuboCop ----------------------------------------------------------------# + + if RUBY_VERSION >= '1.9.3' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + end + +rescue LoadError => e + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" + $stderr.puts e.message + $stderr.puts e.backtrace + $stderr.puts +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec new file mode 100644 index 0000000..e4f525f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec @@ -0,0 +1,26 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_trunk' + +Gem::Specification.new do |spec| + spec.name = "cocoapods-trunk" + spec.version = CocoaPodsTrunk::VERSION + spec.authors = ["Eloy Durán"] + spec.email = ["eloy.de.enige@gmail.com"] + spec.summary = "Interact with trunk.cocoapods.org" + spec.homepage = "https://github.com/CocoaPods/cocoapods-trunk" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_dependency 'nap', '>= 0.8', '< 2.0' + spec.add_dependency 'netrc', '~> 0.11' + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake", '~> 10.0' + + spec.required_ruby_version = '>= 2.0.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..60a2dea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/trunk' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb new file mode 100644 index 0000000..1896acf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb @@ -0,0 +1,3 @@ +module CocoaPodsTrunk + VERSION = '1.6.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb new file mode 100644 index 0000000..7101e5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb @@ -0,0 +1,146 @@ +# encoding: UTF-8 + +require 'json' +require 'rest' +require 'netrc' + +module Pod + class Command + class Trunk < Command + self.abstract_command = true + self.summary = 'Interact with the CocoaPods API (e.g. publishing new specs)' + + SCHEME_AND_HOST = ENV['TRUNK_SCHEME_AND_HOST'] || 'https://trunk.cocoapods.org' + BASE_URL = "#{SCHEME_AND_HOST}/api/v1".freeze + + require 'pod/command/trunk/add_owner' + require 'pod/command/trunk/delete' + require 'pod/command/trunk/deprecate' + require 'pod/command/trunk/info' + require 'pod/command/trunk/me' + require 'pod/command/trunk/push' + require 'pod/command/trunk/register' + require 'pod/command/trunk/remove_owner' + + private + + def request_url(action, url, *args) + response = create_request(action, url, *args) + if (400...600).cover?(response.status_code) + print_error(response.body) + end + response + end + + def request_path(action, path, *args) + request_url(action, "#{BASE_URL}/#{path}", *args) + end + + def create_request(*args) + if verbose? + REST.send(*args) do |request| + request.set_debug_output($stdout) + end + else + REST.send(*args) + end + end + + def print_error(body) + begin + json = JSON.parse(body) + rescue JSON::ParserError + json = {} + end + error = json['error'] || "An unexpected error occurred: #{body}" + + case data = json['data'] + when Hash + lines = data.sort_by(&:first).map do |attr, messages| + attr = attr[0, 1].upcase << attr[1..-1] + messages.sort.map do |message| + "- #{attr}: #{message}" + end + end.flatten + count = lines.size + lines.unshift "The following #{'validation'.pluralize(count)} failed:" + error += "\n" << lines.join("\n") + end + + raise Informative, error + end + + def print_messages(data_url, messages, spec = nil, action = nil) + if verbose? || spec.nil? + # Using UI.labeled here is dangerous, as it wraps the URL and indents + # it, which breaks the URL when you try to copy-paste it. + UI.puts " - Data URL: #{data_url}" + + server_logs = messages.map do |entry| + at, message = entry.to_a.flatten + "#{formatted_time(at)}: #{message}" + end + UI.labeled 'Log messages', server_logs + else + separator = '-' * 80 + UI.puts + UI.puts separator + UI.puts " 🎉 Congrats" + UI.puts + UI.puts " 🚀 #{spec.name} (#{spec.version}) successfully #{action}" + unless messages.empty? + at = messages.first.to_a.flatten.first + UI.puts " 📅 #{formatted_time(at)}" + end + UI.puts " 🌎 https://cocoapods.org/pods/#{spec.name}" + UI.puts " 👍 Tell your friends!" + UI.puts separator + end + end + + def json(response) + JSON.parse(response.body) + end + + def netrc + @@netrc ||= Netrc.read + end + + def token + ENV['COCOAPODS_TRUNK_TOKEN'] || + (netrc['trunk.cocoapods.org'] && netrc['trunk.cocoapods.org'].password) + end + + def default_headers + { + 'Content-Type' => 'application/json; charset=utf-8', + 'Accept' => 'application/json; charset=utf-8', + 'User-Agent' => "CocoaPods/#{Pod::VERSION}", + } + end + + def auth_headers + default_headers.merge('Authorization' => "Token #{token}") + end + + def formatted_time(time_string) + require 'active_support/time' + @tz_offset ||= Time.zone_offset(time_zone) + @current_year ||= Date.today.year + + time = Time.parse(time_string) + @tz_offset + formatted = time.to_formatted_s(:long_ordinal) + # No need to show the current year, the user will probably know. + if time.year == @current_year + formatted.sub!(" #{@current_year}", '') + end + formatted + end + + def time_zone + out, = Executable.capture_command('/bin/date', %w(+%Z), :capture => :out) + out.strip + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb new file mode 100644 index 0000000..529a6f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb @@ -0,0 +1,47 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class AddOwner < Trunk + self.summary = 'Add an owner to a pod' + self.description = <<-DESC + Adds the registered user with specified `OWNER-EMAIL` as an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the new owner’s email address.' + end + end + + def run + body = { 'email' => @email }.to_json + json = json(request_path(:patch, "pods/#{@pod}/owners", body, auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error adding #{@email} to " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb new file mode 100644 index 0000000..e10203b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb @@ -0,0 +1,70 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Delete < Trunk + self.summary = 'Deletes a version of a pod.' + self.description = <<-DESC + WARNING: It is generally considered bad behavior to remove + versions of a Pod that others are depending on! Please + consider using the deprecate command instead. + + Deletes the specified pod version from trunk and the master specs + repo. Once deleted, this version can never be pushed again. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('VERSION', true), + ] + + def initialize(argv) + @name = argv.shift_argument + @version = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + help! 'Please specify a version.' unless @version + end + + def run + return unless confirm_deletion? + json = delete + print_messages(json['data_url'], json['messages'], nil, nil) + end + + private + + WARNING_MESSAGE = 'WARNING: It is generally considered bad behavior ' \ + "to remove versions of a Pod that others are depending on!\n" \ + 'Please consider using the `deprecate` command instead.'.freeze + + def confirm_deletion? + UI.puts(WARNING_MESSAGE.yellow) + loop do + UI.print("Are you sure you want to delete this Pod version?\n> ") + answer = UI.gets.strip.downcase + UI.puts # ensures a newline is printed after the user input + affirmatives = %w(y yes true 1) + negatives = %w(n no false 0) + return true if affirmatives.include?(answer) + return false if negatives.include?(answer) + end + end + + def delete + response = request_path(:delete, "pods/#{@name}/#{@version}", auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deleting the pod version ' \ + "from trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb new file mode 100644 index 0000000..4e3b78d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb @@ -0,0 +1,48 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Deprecate < Trunk + self.summary = 'Deprecates a pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def self.options + [ + ['--in-favor-of=OTHER_NAME', 'The pod to deprecate this pod in favor of.'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @in_favor_of = argv.option('in-favor-of') + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + json = deprecate + print_messages(json['data_url'], json['messages'], nil, nil) + end + + def deprecate + body = { + :in_favor_of => @in_favor_of, + }.to_json + response = request_path(:patch, "pods/#{@name}/deprecated", body, auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deprecating the pod ' \ + "via trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb new file mode 100644 index 0000000..2941890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb @@ -0,0 +1,35 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Info < Trunk + self.summary = 'Returns information about a Pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + response = json(request_path(:get, "pods/#{@name}", auth_headers)) + versions = response['versions'] || [] + owners = response['owners'] || [] + + UI.title(@name) do + UI.labeled 'Versions', versions.map { |v| "#{v['name']} (#{v['created_at']})" } + UI.labeled 'Owners', owners.map { |o| "#{o['name']} <#{o['email']}>" } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb new file mode 100644 index 0000000..b8aab38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb @@ -0,0 +1,119 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Me < Trunk + self.summary = 'Display information about your sessions' + self.description = <<-DESC + Includes information about your registration, followed by all your + sessions. + + These are your current session, other valid sessions, unverified + sessions, and expired sessions. + DESC + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + me = json(request_path(:get, 'sessions', auth_headers)) + owner = json(request_path(:get, "owners/#{me['email']}")) + UI.labeled 'Name', owner['name'] + UI.labeled 'Email', owner['email'] + UI.labeled 'Since', formatted_time(owner['created_at']) + + pods = owner['pods'] || [] + pods = pods.map { |pod| pod['name'] } + pods = 'None' unless pods.any? + UI.labeled 'Pods', pods + + sessions = me['sessions'].map do |session| + hash = { + :created_at => formatted_time(session['created_at']), + :valid_until => formatted_time(session['valid_until']), + :created_from_ip => session['created_from_ip'], + :description => session['description'], + } + if Time.parse(session['valid_until']) <= Time.now.utc + hash[:color] = :red + elsif session['verified'] + hash[:color] = session['current'] ? :cyan : :green + else + hash[:color] = :yellow + hash[:valid_until] = 'Unverified' + end + hash + end + + columns = [:created_at, :valid_until, :created_from_ip, :description].map do |key| + find_max_size(sessions, key) + end + + sessions = sessions.map do |session| + created_at = session[:created_at].ljust(columns[0]) + valid_until = session[:valid_until].rjust(columns[1]) + created_from_ip = session[:created_from_ip].ljust(columns[2]) + description = session[:description] + msg = "#{created_at} - #{valid_until}. IP: #{created_from_ip}" + msg << " Description: #{description}" if description + msg.send(session[:color]) + end + + UI.labeled 'Sessions', sessions + + rescue REST::Error => e + raise Informative, 'There was an error fetching your info ' \ + "from trunk: #{e.message}" + end + + private + + def find_max_size(sessions, key) + sessions.map { |s| (s[key] || '').size }.max + end + + class CleanSessions < Me + self.summary = 'Remove sessions' + self.description = <<-DESC + By default this will clean-up your sessions by removing expired and + unverified sessions. + + To remove all your sessions, except for the one you are currently + using, specify the `--all` flag. + DESC + + def self.options + [ + ['--all', 'Removes all your sessions, except for the current one'], + ].concat(super) + end + + def initialize(argv) + @remove_all = argv.flag?('all', false) + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + path = @remove_all ? 'sessions/all' : 'sessions' + request_path(:delete, path, auth_headers) + rescue REST::Error => e + raise Informative, 'There was an error cleaning up your ' \ + "sessions from trunk: #{e.message}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb new file mode 100644 index 0000000..209018b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb @@ -0,0 +1,169 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Push < Trunk + self.summary = 'Publish a podspec' + self.description = <<-DESC + Publish the podspec at `PATH` to make it available to all users of + the ‘trunk’ spec-repo. If `PATH` is not provided, defaults to the + current directory. + + Before pushing the podspec to cocoapods.org, this will perform a local + lint of the podspec, including a build of the library. However, it + remains *your* responsibility to ensure that the published podspec + will actually work for your users. Thus it is recommended that you + *first* try to use the podspec to integrate the library into your demo + and/or real application. + + If this is the first time you publish a spec for this pod, you will + automatically be registered as the ‘owner’ of this pod. (Note that + ‘owner’ in this case implies a person that is allowed to publish new + versions and add other ‘owners’, not necessarily the library author.) + DESC + + self.arguments = [ + CLAide::Argument.new('PATH', false), + ] + + def self.options + [ + ['--allow-warnings', 'Allows push even if there are lint warnings'], + ['--use-libraries', 'Linter uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--swift-version=VERSION', 'The SWIFT_VERSION that should be used to lint the spec. ' \ + 'This takes precedence over a .swift-version file.'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--synchronous', 'If validation depends on other recently pushed pods, synchronize'], + ].concat(super) + end + + def initialize(argv) + @allow_warnings = argv.flag?('allow-warnings', false) + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @path = argv.shift_argument || '.' + @synchronous = argv.flag?('synchronous', false) + find_podspec_file if File.directory?(@path) + super + end + + def validate! + super + unless token + help! 'You need to run `pod trunk register` to register a session first.' + end + unless @path + help! 'Please specify the path to the podspec file.' + end + unless File.exist?(@path) && !File.directory?(@path) + help! "The specified path `#{@path}` does not point to " \ + 'an existing podspec file.' + end + end + + def run + update_master_repo + validate_podspec + status, json = push_to_trunk + update_master_repo + + if (400...600).cover?(status) + print_messages(json['data_url'], json['messages'], nil) + else + print_messages(json['data_url'], json['messages'], spec, 'published') + end + end + + private + + MASTER_GIT_REPO_URL = 'https://github.com/CocoaPods/Specs.git'.freeze + + def push_to_trunk + spec.attributes_hash[:pushed_with_swift_version] = @swift_version if @swift_version + response = request_path(:post, "pods?allow_warnings=#{@allow_warnings}", + spec.to_json, auth_headers) + url = response.headers['location'].first + return response.status_code, json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error pushing a new version ' \ + "to trunk: #{e.message}" + end + + def find_podspec_file + podspecs = Dir[Pathname(@path) + '*.podspec{.json,}'] + case podspecs.count + when 0 + UI.notice "No podspec found in directory `#{@path}`" + when 1 + UI.notice "Found podspec `#{podspecs[0]}`" + else + UI.notice "Multiple podspec files in directory `#{@path}`. " \ + 'You need to explicitly specify which one to use.' + end + @path = (podspecs.count == 1) ? podspecs[0] : nil + end + + def spec + @spec ||= Pod::Specification.from_file(@path) + rescue Informative => e # TODO: this should be a more specific error + raise Informative, 'Unable to interpret the specified path ' \ + "#{UI.path(@path)} as a podspec (#{e})." + end + + # Performs a full lint against the podspecs. + # + # TODO: Currently copied verbatim from `pod push`. + def validate_podspec + UI.puts 'Validating podspec'.yellow + + validator = Validator.new(spec, [repo_url]) + validator.allow_warnings = @allow_warnings + validator.use_frameworks = @use_frameworks + if validator.respond_to?(:use_modular_headers=) + validator.use_modular_headers = @use_modular_headers + end + if validator.respond_to?(:swift_version=) + validator.swift_version = @swift_version + end + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.validate + unless validator.validated? + raise Informative, "The spec did not pass validation, due to #{validator.failure_reason}." + end + + # Let the validator's logic for the swift version + # set the value for the trunk JSON uploader + @swift_version = validator.respond_to?(:used_swift_version) && validator.used_swift_version + end + + def repo_url + @synchronous ? MASTER_GIT_REPO_URL : Pod::TrunkSource::TRUNK_REPO_URL + end + + def update_master_repo + # more robust Trunk setup logic: + # - if Trunk exists, updates it + # - if Trunk doesn't exist, add it and update it + # + repo = sources_manager.find_or_create_source_with_url(repo_url) + sources_manager.update(repo.name) + end + + def sources_manager + if defined?(Pod::SourcesManager) + Pod::SourcesManager + else + config.sources_manager + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb new file mode 100644 index 0000000..19f1a09 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb @@ -0,0 +1,78 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Register < Trunk + self.summary = 'Manage sessions' + self.description = <<-DESC + Register a new account, or create a new session. + + If this is your first registration, both an `EMAIL` address and + `YOUR_NAME` are required. If you’ve already registered with trunk, you may + omit the `YOUR_NAME` (unless you would like to change it). + + It is recommended that you provide a description of the session, so + that it will be easier to identify later on. For instance, when you + would like to clean-up your sessions. A common example is to specify + the location where the machine, that you are using the session for, is + physically located. + + Examples: + + $ pod trunk register eloy@example.com 'Eloy Durán' --description='Personal Laptop' + $ pod trunk register eloy@example.com --description='Work Laptop' + $ pod trunk register eloy@example.com + DESC + + self.arguments = [ + CLAide::Argument.new('EMAIL', true), + CLAide::Argument.new('YOUR_NAME', false), + ] + + def self.options + [ + ['--description=DESCRIPTION', 'An arbitrary description to ' \ + 'easily identify your session ' \ + 'later on.'], + ].concat(super) + end + + def initialize(argv) + @session_description = argv.option('description') + @email = argv.shift_argument + @name = argv.shift_argument + super + end + + def validate! + super + unless @email + help! 'Specify at least your email address.' + end + end + + def run + body = { + 'email' => @email, + 'name' => @name, + 'description' => @session_description, + }.to_json + json = json(request_path(:post, 'sessions', body, default_headers)) + save_token(json['token']) + # TODO UI.notice inserts an empty line :/ + UI.puts '[!] Please verify the session by clicking the link in the ' \ + "verification email that has been sent to #{@email}".yellow + rescue REST::Error => e + raise Informative, 'There was an error registering with trunk: ' \ + "#{e.message}" + end + + def save_token(token) + netrc['trunk.cocoapods.org'] = @email, token + netrc.save + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb new file mode 100644 index 0000000..b0d170e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb @@ -0,0 +1,46 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class RemoveOwner < Trunk + self.summary = 'Remove an owner from a pod' + self.description = <<-DESC + Removes the user with specified `OWNER-EMAIL` from being an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the owner’s email address.' + end + end + + def run + json = json(request_path(:delete, "pods/#{@pod}/owners/#{@email}", auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error removing #{@email} from " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb new file mode 100644 index 0000000..2600ada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb @@ -0,0 +1,52 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::AddOwner do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk add-owner )).should.be.instance_of Command::Trunk::AddOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk add-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk add-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully add an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners' + stub_request(:patch, url). + with(:body => '{"email":"kyle@cocoapods.org"}', + :headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb new file mode 100644 index 0000000..7538773 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb @@ -0,0 +1,69 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Delete do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk delete )).should.be.instance_of Command::Trunk::Delete + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk delete )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should error without a version' do + command = Command.parse(%w( trunk delete Stencil )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'confirms deletion' do + Colored2.disable! + UI.inputs += %w(garbage true false) + command = Command.parse(%w( trunk delete Stencil 1.0.0 )) + command.send(:confirm_deletion?).should.be.true + command.send(:confirm_deletion?).should.be.false + + UI.output.should == <<-OUTPUT.gsub(/^>$/, '> ') +WARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead. +Are you sure you want to delete this Pod version? +> +Are you sure you want to delete this Pod version? +> +WARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead. +Are you sure you want to delete this Pod version? +> + OUTPUT + end + + it 'does not delete if the user does not confirm' do + Command::Trunk::Delete.any_instance.expects(:confirm_deletion?).returns(false) + Command::Trunk::Delete.any_instance.expects(:delete).never + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + end + + it 'should show information for a pod' do + response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 1.0.0` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 1.0.0` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/1.0.0/Stencil.podspec.json', + } + Command::Trunk::Delete.any_instance.expects(:delete).returns(response) + UI.inputs << 'TRUE ' + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 1.0.0` initiated' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb new file mode 100644 index 0000000..7f43498 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb @@ -0,0 +1,58 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Deprecate do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk deprecate )).should.be.instance_of Command::Trunk::Deprecate + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk deprecate )) + lambda { command.validate! }.should.raise CLAide::Help + end + + before do + @push_response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 0.96.3` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 0.96.3` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json', + } + end + + it 'should show information for a pod' do + Command::Trunk::Deprecate.any_instance.expects(:deprecate).returns(@push_response) + Command::Trunk::Deprecate.invoke(%w(Stencil)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 0.96.3` initiated' + end + + it 'should send the proper network request' do + redirect = 'http://redirected.com' + stub_request(:patch, 'https://trunk.cocoapods.org/api/v1/pods/Stencil/deprecated'). + with(:body => hash_including('in_favor_of' => 'Stamp')). + to_return(:status => 201, :headers => { :location => redirect }) + + stub_request(:get, redirect). + to_return(:status => 200, :body => @push_response.to_json) + + Command::Trunk::Deprecate.invoke(%w(Stencil --in-favor-of=Stamp)) + + UI.output.should == <<-EOS + - Data URL: https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json +- Log messages: + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` initiated. + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` has been pushed (1.02409270 s). + EOS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb new file mode 100644 index 0000000..1ff1a38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb @@ -0,0 +1,36 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Info do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk info )).should.be.instance_of Command::Trunk::Info + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk info )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should show information for a pod' do + url = 'https://trunk.cocoapods.org/api/v1/pods/Stencil' + stub_request(:get, url).to_return(:body => { + 'owners' => [ + { + 'name' => 'Kyle Fuller', + 'email' => 'kyle@example.com', + }, + ], + }.to_json) + + command = Command.parse(%w( trunk info Stencil )) + lambda { command.validate! }.should.not.raise CLAide::Help + command.run + + UI.output.should.include 'Owners' + UI.output.should.include 'Kyle Fuller ' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb new file mode 100644 index 0000000..585bb38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb @@ -0,0 +1,17 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Me do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk me )).should.be.instance_of Command::Trunk::Me + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk me )) + lambda { command.validate! }.should.raise CLAide::Help + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb new file mode 100644 index 0000000..c6c62bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb @@ -0,0 +1,332 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Push do + def success_json + { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => "Push for `BananaLib 0.96.3' initiated.", + }, + { + '2015-12-05 02:00:26 UTC' => "Push for `BananaLib 0.96.3' has been pushed (1.02409270 s).", + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/BananaLib/0.96.3/BananaLib.podspec.json', + } + end + + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + end + + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk push )).should.be.instance_of Command::Trunk::Push + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error when the trunk service returns an error' do + url = 'https://trunk.cocoapods.org/api/v1/pods?allow_warnings=false' + stub_request(:post, url).to_return(:status => 422, :body => { + 'error' => 'The Pod Specification did not pass validation.', + 'data' => { + 'warnings' => [ + 'A value for `requires_arc` should be specified until the migration to a `true` default.', + ], + }, + }.to_json) + command = Command.parse(%w(trunk push)) + command.stubs(:validate_podspec) + command.stubs(:spec).returns(Pod::Specification.new) + exception = lambda { command.run }.should.raise Informative + exception.message.should.include 'following validation failed' + exception.message.should.include 'should be specified' + exception.message.should.include 'The Pod Specification did not pass validation' + end + + describe 'PATH' do + before do + UI.output = '' + end + it 'defaults to the current directory' do + # Disable the podspec finding algorithm so we can check the raw path + Command::Trunk::Push.any_instance.stubs(:find_podspec_file) { |path| path } + command = Command.parse(%w( trunk push )) + command.instance_eval { @path }.should == '.' + end + + def found_podspec_among_files(files) + # Create a temp directory with the dummy `files` in it + Dir.mktmpdir do |dir| + files.each do |filename| + path = Pathname(dir) + filename + File.open(path, 'w') {} + end + # Execute `pod trunk push` with this dir as parameter + command = Command.parse(%w( trunk push ) + [dir]) + path = command.instance_eval { @path } + return File.basename(path) if path + end + end + + it 'should find the only JSON podspec in a given directory' do + files = %w(foo bar.podspec.json baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should find the only Ruby podspec in a given directory' do + files = %w(foo bar.podspec baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should warn when no podspec found in a given directory' do + files = %w(foo bar baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /No podspec found in directory/ + end + + it 'should warn when multiple podspecs found in a given directory' do + files = %w(foo bar.podspec bar.podspec.json baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /Multiple podspec files in directory/ + end + end + + describe 'validation' do + before do + Installer.any_instance.stubs(:aggregate_targets).returns([]) + Installer.any_instance.stubs(:pod_targets).returns([]) + + Validator.any_instance.stubs(:check_file_patterns) + Validator.any_instance.stubs(:validate_url) + Validator.any_instance.stubs(:validate_screenshots) + Validator.any_instance.stubs(:xcodebuild).returns('') + Validator.any_instance.stubs(:install_pod) + Validator.any_instance.stubs(:build_pod) + Validator.any_instance.stubs(:add_app_project_import) + Validator.any_instance.stubs(:used_swift_version).returns(nil) + %i(prepare resolve_dependencies download_dependencies write_lockfiles).each do |m| + Installer.any_instance.stubs(m) + end + Command::Trunk::Push.any_instance.stubs(:master_repo_url). + returns(Pod::TrunkSource::TRUNK_REPO_URL) + end + + it 'passes the SWIFT_VERSION to the Validator' do + Validator.any_instance.expects(:swift_version=).with('3.0') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=3.0)) + cmd.send(:validate_podspec) + end + + it 'passes a swift version back to command, to handle .swift-version files' do + Validator.any_instance.stubs(:dot_swift_version).returns('1.2.3') + Validator.any_instance.stubs(:used_swift_version).returns('1.2.3') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --allow-warnings)) + cmd.send(:validate_podspec) + cmd.instance_variable_get(:@swift_version).should == '1.2.3' + end + + it 'validates specs as frameworks by default' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, '8.0', true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, true, [], nil, nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.send(:validate_podspec) + end + + it 'validates specs as libraries if requested' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, false, [], nil, nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + cmd.send(:validate_podspec) + end + + it 'prints the failure reason' do + Validator.any_instance.expects(:validated?).returns(false) + Validator.any_instance.expects(:validate) + Validator.any_instance.expects(:failure_reason).returns('failure_reason') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + e = should.raise(Informative) { cmd.send(:validate_podspec) } + e.message.should.include 'The spec did not pass validation, due to failure_reason.' + end + + it 'passes skip import validation' do + Validator.any_instance.expects(:skip_import_validation=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-import-validation)) + cmd.send(:validate_podspec) + end + + it 'passes skip test' do + Validator.any_instance.expects(:skip_tests=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-tests)) + cmd.send(:validate_podspec) + end + + it 'passes use modular headers' do + Validator.any_instance.expects(:use_modular_headers=) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-modular-headers)) + cmd.send(:validate_podspec) + end + end + + describe 'sending the swift version up to trunk' do + before do + # This won't get called + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + # For faking the networking when sending + Pod::Command::Trunk.any_instance.expects(:json).returns({}) + Pod::Command::Trunk.any_instance.expects(:auth_headers).returns({}) + end + + it 'passes the value to trunk' do + # Fakes for the network response + response = mock + response.expects(:headers).returns('location' => ['http://theinternet.com']) + response.expects(:status_code).returns(200) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=1.1.2)) + + # Using a blank podspec - JSON should include `"pushed_with_swift_version":"1.1.2"` + cmd.stubs(:spec).returns(Pod::Specification.new) + + json = <<-JSON +{"name":null,"pushed_with_swift_version":"1.1.2","platforms":{"osx":null,"ios":null,"tvos":null,"watchos":null}} + JSON + + cmd.stubs(:validate_podspec) + cmd.stubs(:request_url) + + api_route = 'pods?allow_warnings=false' + cmd.expects(:request_path).with(:post, api_route, json, {}).returns(response) + cmd.send(:push_to_trunk) + end + end + + describe 'updating the master repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name). + returns(Pod::TrunkSource::TRUNK_REPO_NAME) + end + + it 'updates the master repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the master repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run) + + @cmd.run + end + end + + describe 'synchronous updating the git repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --synchronous)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name).returns('master') + end + + it 'updates the git repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new('master')) + + Config.instance.sources_manager.expects(:update).with('master').twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the git repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.stubs(:cdn_url?).returns(false) + Config.instance.sources_manager.stubs(:create_source_with_url).once. + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.expects(:update).with('master').twice + + @cmd.run + end + end + + describe 'Presenting Responses to the user' do + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + Config.instance.sources_manager.stubs(:master_repo_functional?).returns(true) + end + + it 'shows full logs when verbose' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows full logs when errored' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([400, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows thanks emojis when success' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + cmd.run + + UI.output.should.match %r{https://cocoapods.org/pods/BananaLib} + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb new file mode 100644 index 0000000..7bc316a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Register do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk register )).should.be.instance_of Command::Trunk::Register + end + end + + it 'should error if email is not supplied' do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk register )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email address' + end + + it 'should register user' do + url = 'https://trunk.cocoapods.org/api/v1/sessions' + stub_request(:post, url). + with(:body => hash_including('email' => 'kyle@cocoapods.org')). + to_return(:status => 200, :body => '{"token": "acct"}') + Netrc.any_instance.stubs(:[]).returns(nil) + Netrc.any_instance.expects(:[]=).with('trunk.cocoapods.org', ['kyle@cocoapods.org', 'acct']) + Netrc.any_instance.expects(:save) + + command = Command.parse(%w( trunk register kyle@cocoapods.org )) + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb new file mode 100644 index 0000000..bed408f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::RemoveOwner do + describe 'CLAide' do + it 'registers itself' do + Command.parse(%w( trunk remove-owner )).should.be.instance_of Command::Trunk::RemoveOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk remove-owner )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk remove-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk remove-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully remove an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners/kyle@cocoapods.org' + stub_request(:delete, url). + with(:headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb new file mode 100644 index 0000000..e062461 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb @@ -0,0 +1,23 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk )).should.be.instance_of Command::Trunk + end + end + + before do + @command = Command.parse(%w(trunk)) + end + + describe 'authorization' do + it 'will use the trunk token from ENV if present' do + ENV.stubs(:[]).with('COCOAPODS_TRUNK_TOKEN').returns('token') + + @command.send(:token).should == 'token' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec new file mode 100644 index 0000000..b9f98d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec @@ -0,0 +1,25 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } + s.source_files = 'Classes/*.{h,m,d}', 'Vendor' + s.resources = "Resources/*" + s.vendored_framework = 'Bananalib.framework' + s.vendored_library = 'libBananalib.a' + s.preserve_paths = 'preserve_me.txt' + s.public_header_files = 'Classes/Banana.h' + + s.prefix_header_file = 'Classes/BananaLib.pch' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb new file mode 100644 index 0000000..3ca19a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb @@ -0,0 +1,97 @@ +# Set up coverage analysis +#-----------------------------------------------------------------------------# + +if RUBY_VERSION >= '1.9.3' + require 'codeclimate-test-reporter' + CodeClimate::TestReporter.configure do |config| + config.logger.level = Logger::WARN + end + CodeClimate::TestReporter.start +end + +# Set up +#-----------------------------------------------------------------------------# + +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'webmock' + +include WebMock::API +WebMock.enable! +WebMock.disable_net_connect!(:allow => ['codeclimate.com', 'cdn.cocoapods.org']) + +require 'cocoapods' + +require 'cocoapods_plugin' + +# Helpers +#-----------------------------------------------------------------------------# + +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :inputs + + def gets + inputs.shift + end + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end + + class Command::Trunk + def time_zone + 'UTC' + end + end +end + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.inputs = [] + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + WebMock.reset! + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop.yml new file mode 100644 index 0000000..e3dc640 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..93f03f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml @@ -0,0 +1,129 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Style/Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +Style/SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +Lint/AssignmentInCondition: + Enabled: false + +# Allow backticks +Style/AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +Style/IfUnlessModifier: + Enabled: false + +# No enforced convention here. +Style/SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Style/Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Layout/MultilineOperationIndentation: + EnforcedStyle: indented + +Style/PercentLiteralDelimiters: + PreferredDelimiters: + default: '()' + '%w': '()' + +# Clashes with CLAide Command#validate! +Style/GuardClause: + Enabled: false + +# Not always desirable +Style/Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +Style/HashSyntax: + EnforcedStyle: hash_rockets + +Style/Lambda: + Enabled: true + +Layout/DotPosition: + EnforcedStyle: trailing + +Style/EachWithObject: + Enabled: true + +Performance/HashEachMethods: + Enabled: true + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +Lint/AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Lint/Void: + Exclude: + - spec/**/* + +Style/ClassAndModuleChildren: + Exclude: + - spec/**/* + +Lint/UselessComparison: + Exclude: + - spec/**/* + +Metrics/BlockLength: + Exclude: + - spec/**/* + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml new file mode 100644 index 0000000..a5288bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml @@ -0,0 +1,72 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2020-04-16 22:06:47 -0700 using RuboCop version 0.50.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 2 +# Configuration parameters: ContextCreatingMethods, MethodCreatingMethods. +Lint/UselessAccessModifier: + Exclude: + - 'lib/pod/command/try.rb' + +# Offense count: 37 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 108 + +# Offense count: 3 +# Configuration parameters: ExpectMatchingDefinition, Regex, IgnoreExecutableScripts, AllowedAcronyms. +# AllowedAcronyms: CLI, DSL, ACL, API, ASCII, CPU, CSS, DNS, EOF, GUID, HTML, HTTP, HTTPS, ID, IP, JSON, LHS, QPS, RAM, RHS, RPC, SLA, SMTP, SQL, SSH, TCP, TLS, TTL, UDP, UI, UID, UUID, URI, URL, UTF8, VM, XML, XMPP, XSRF, XSS +Naming/FileName: + Exclude: + - 'Gemfile' + - 'Rakefile' + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, SupportedStyles. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/pod/command/try.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: PreferredDelimiters. +Style/PercentLiteralDelimiters: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/UnneededPercentQ: + Exclude: + - 'cocoapods-try.gemspec' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.travis.yml new file mode 100644 index 0000000..49cb2c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/.travis.yml @@ -0,0 +1,34 @@ +# Sets Travis to run the Ruby specs on OS X machines to be as close as possible +# to the user environment. +# +language: ruby + +dist: trusty + +branches: + only: + - master + - /.+-stable$/ + +matrix: + include: + - rvm: 2.4.1 + +rvm: + - 2.0.0-p647 + - 2.3.4 + - 2.4.1 + - 2.6.2 + +before_install: + # There is a bug in travis. When using system ruby, bundler is not + # installed and causes the default install action to fail. + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem install "bundler:~> 1.17"; else gem install "bundler:~> 1.17"; fi + # RubyGems 2.0.14 isn't a fun time on 2.0.0p648 + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem update --system; fi + +install: + - bundle install --path .bundle + - bundle exec pod repo add-cdn trunk 'https://cdn.cocoapods.org' + +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/CHANGELOG.md new file mode 100644 index 0000000..7068a50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/CHANGELOG.md @@ -0,0 +1,204 @@ +# CocoaPods::Try CHANGELOG + +## 1.2.0 (2020-04-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix a crash when using `pod try` with CocoaPods 1.8.0 or higher. + [@arielpollack](https://github.com/arielpollack) + [#63](https://github.com/CocoaPods/cocoapods-try/issues/63) + [#65](https://github.com/CocoaPods/cocoapods-try/pull/65) + + +## 1.1.0 (2016-07-10) + +##### Enhancements + +* Added a command line option for specifying the podspec file from Git URL + [@rockwotj](https://github.com/rockwotj) + [59](https://github.com/CocoaPods/CocoaPods-try/issues/59) + +##### Bug Fixes + +* None. + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.4 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Compatibility With CocoaPods 1.0.0.beta.8. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#5159](https://github.com/CocoaPods/CocoaPods/issues/5159) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Bug Fixes + +* Compatibility with CocoaPods 1.0.0.beta.6. + [Marius Rackwitz](https://github.com/mrackwitz) + + +## 1.0.0.beta.2 (2016-01-05) + +##### Bug Fixes + +* Ensure that the pod's source is re-downloaded, instead of pulling from the + cache, which only holds cleaned sources. + [Samuel Giddins](https://github.com/segiddins) + [#43](https://github.com/CocoaPods/cocoapods-try/issues/43) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* Ensure commands in the `.cocoapods` file are strings, and uses the pods folder when executing commands. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods-Try#40](https://github.com/CocoaPods/cocoapods-try/issues/40) + + +## 0.5.1 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.5.0 (2015-08-26) + +##### Enhancements + +* Any CocoaPod / GitHub repo can now declare their own pre-install commands, and prefer a + project. To use this, add a `.cocoapods.yml` file to the root of your repo. The yaml file + should have a structure like: + + ``` yaml + try: + install: + pre: + - pod install + - git submodule init + project: 'ORStackView.xcworkspace' + ``` + + [Orta Therox](https://github.com/orta) + [#33](https://github.com/CocoaPods/cocoapods-try/issues/33) + + +## 0.4.5 (2015-05-27) + +##### Bug Fixes + +* Use `Dir.tmpdir` instead of explicit `/tmp`. + [Boris Bügling](https://github.com/neonichu) + [#34](https://github.com/CocoaPods/cocoapods-try/pull/34) + +* Automatically detect JSON podspecs. + [Samuel Giddins](https://github.com/segiddins) + [#35](https://github.com/CocoaPods/cocoapods-try/issues/35) + + +## 0.4.4 (2015-05-06) + +##### Bug Fixes + +* Fix working with the CocoaPods download cache introduced in 0.37. + [Samuel Giddins](https://github.com/) + [#30](https://github.com/CocoaPods/cocoapods-try/issues/30) + + +## 0.4.3 (2014-12-25) + +##### Bug Fixes + +* Ensure that the master repo is setup on try. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [CocoaPods/CocoaPods#2563](https://github.com/CocoaPods/CocoaPods/pull/2563) + +## 0.4.2 (2014-10-29) + +* Prefer projects or workspaces with the name including Sample over others. + [Kyle Fuller](https://github.com/kylef) + +## 0.4.1 (2014-09-26) + +* Add `--no-repo-update` option. + [Eloy Durán](https://github.com/alloy) + +## 0.4.0 (2014-09-11) + +### Enhancements + +* Adopted new argument format of CLAide. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.3.0 (2014-05-19) + +### Enhancements + +* Adopted new CLAide release. + [Fabio Pelosin](https://github.com/irrationalfab) + +## 0.2.0 (2014-03-28) + +### Enhancements + +* Added support for the specification of an URL instead of the name of a Pod. + [David Grandinetti](https://github.com/dbgrandi) + [Fabio Pelosin](https://github.com/irrationalfab) + +## 0.1.2 + +### Enhancements + +* Prefer workspaces over projects. + [Kyle Fuller](https://github.com/kylef) + +* Open workspaces if available. + [Kyle Fuller](https://github.com/kylef) + +### Fixes + +* Don't consider workspaces in bundles. + [Eloy Durán](https://github.com/alloy) + +* Typo fixes. + [Mark Townsend](https://github.com/markltownsend) + +## 0.1.0 (2013-12-02) + +* Initial implementation. + [Fabio Pelosin](https://github.com/fabiopelosin) diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile new file mode 100644 index 0000000..8a73433 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile @@ -0,0 +1,16 @@ +source 'https://rubygems.org' + +gemspec + +group :development do + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + gem 'cocoapods', :git => 'https://github.com/CocoaPods/CocoaPods.git', :branch => 'master' + gem 'cocoapods-core', :git => 'https://github.com/CocoaPods/Core.git', :branch => 'master' + + gem 'bacon' + gem 'mocha' + gem 'mocha-on-bacon' + gem 'prettybacon' + + gem 'rubocop', '0.50.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile.lock new file mode 100644 index 0000000..0cc12aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Gemfile.lock @@ -0,0 +1,148 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: b5ced9cc141df732e8027078543eb92fc6447567 + branch: master + specs: + claide (1.0.3) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: c75c4a6dd226c45e0ad876caa926bff51a3f00d9 + branch: master + specs: + cocoapods (1.9.1) + activesupport (>= 4.0.2, < 5) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.9.1) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.2.2, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (>= 1.4.0, < 2.0) + cocoapods-try (= 1.2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.6.6) + nap (~> 1.0) + ruby-macho (~> 1.4) + xcodeproj (>= 1.14.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 8923c0cdca68d4bc7126cd64106a5fc1e9217ced + branch: master + specs: + cocoapods-core (1.9.1) + activesupport (>= 4.0.2, < 6) + addressable (~> 2.6) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 2.0) + typhoeus (~> 1.0) + +PATH + remote: . + specs: + cocoapods-try (1.2.0) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.2) + activesupport (4.2.11.1) + i18n (~> 0.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.1) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + ast (2.4.0) + atomos (0.1.3) + bacon (1.2.0) + cocoapods-deintegrate (1.0.4) + cocoapods-downloader (1.3.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.0) + cocoapods-trunk (1.4.1) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + colored2 (3.1.2) + concurrent-ruby (1.1.6) + escape (0.0.4) + ethon (0.12.0) + ffi (>= 1.3.0) + ffi (1.12.2) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + httpclient (2.8.3) + i18n (0.9.5) + concurrent-ruby (~> 1.0) + json (2.3.0) + minitest (5.8.4) + mocha (1.11.2) + mocha-on-bacon (0.2.3) + mocha (>= 0.13.0) + molinillo (0.6.6) + nanaimo (0.2.6) + nap (1.1.0) + netrc (0.11.0) + parallel (1.10.0) + parser (2.7.1.1) + ast (~> 2.4.0) + powerpack (0.1.2) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (2.0.5) + rainbow (2.2.2) + rake + rake (10.5.0) + rubocop (0.50.0) + parallel (~> 1.10) + parser (>= 2.3.3.1, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-macho (1.4.0) + ruby-progressbar (1.10.1) + thread_safe (0.3.6) + typhoeus (1.3.1) + ethon (>= 0.9.0) + tzinfo (1.2.7) + thread_safe (~> 0.1) + unicode-display_width (1.7.0) + xcodeproj (1.16.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.2.6) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-try! + mocha + mocha-on-bacon + prettybacon + rake (~> 10.0) + rubocop (= 0.50.0) + +BUNDLED WITH + 1.17.2 diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/LICENSE new file mode 100644 index 0000000..c000bf0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Fabio Pelosin + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/README.md b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/README.md new file mode 100644 index 0000000..b61acde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/README.md @@ -0,0 +1,17 @@ +# Cocoapods try + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-try/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-try) + +CocoaPods plugin which allows to quickly try the demo project of a Pod. + +![](http://i.imgur.com/xxWNUrg.gif) + +## Usage + + $ pod try POD_NAME + +## Installation + + $ gem install cocoapods-try + + diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Rakefile new file mode 100644 index 0000000..9f8fa94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/Rakefile @@ -0,0 +1,54 @@ +# Bootstrap +#-----------------------------------------------------------------------------# + +task :bootstrap, :use_bundle_dir? do |_t, args| + if system('which bundle') + if args[:use_bundle_dir?] + sh 'bundle install --path ./travis_bundle_dir' + else + sh 'bundle install' + end + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + + task :default => 'spec' + + # Spec + #-----------------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + puts "\033[0;32mUsing #{`ruby --version`}\033[0m" + start_time = Time.now + sh "bundle exec bacon #{specs('**')}" + duration = Time.now - start_time + puts "Tests completed in #{duration}s" + + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') + end + + #-- Rubocop ----------------------------------------------------------------# + + if RUBY_VERSION >= '1.9.3' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + end +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec new file mode 100644 index 0000000..220b951 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec @@ -0,0 +1,21 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_try.rb' + +Gem::Specification.new do |spec| + spec.name = "cocoapods-try" + spec.version = CocoapodsTry::VERSION + spec.authors = ["Fabio Pelosin"] + spec.summary = %q{CocoaPods plugin which allows to quickly try the demo project of a Pod.} + spec.homepage = "https://github.com/cocoapods/cocoapods-try" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake", '~> 10.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..360daf4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/try' diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb new file mode 100644 index 0000000..44b3fa5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb @@ -0,0 +1,5 @@ +# The namespace of the Cocoapods try plugin. +# +module CocoapodsTry + VERSION = '1.2.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb new file mode 100644 index 0000000..445945a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb @@ -0,0 +1,279 @@ +require 'pod/try_settings' + +# The CocoaPods namespace +# +module Pod + class Command + # The pod try command. + # @CocoaPods 0.29.0 + # + class Try < Command + include RepoUpdate + + self.summary = 'Try a Pod!' + + self.description = <<-DESC + Downloads the Pod with the given `NAME` (or Git `URL`), install its + dependencies if needed and opens its demo project. If a Git URL is + provided the head of the repo is used. + + If a Git URL is specified, then a --podspec_name can be provided, + if the podspec name is different than the git repo for some reason. + DESC + + self.arguments = [CLAide::Argument.new(%w(NAME URL), true)] + + def self.options + [ + ['--podspec_name=[name]', 'The name of the podspec file within the Git Repository'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @podspec_name = argv.option('podspec_name') + super + end + + def validate! + super + help! 'A Pod name or URL is required.' unless @name + help! 'Podspec name can only be used with a Git URL' if @podspec_name && !git_url?(@name) + end + + def run + ensure_master_spec_repo_exists! + sandbox = Sandbox.new(TRY_TMP_DIR) + spec = setup_spec_in_sandbox(sandbox) + + UI.title "Trying #{spec.name}" do + pod_dir = install_pod(spec, sandbox) + settings = TrySettings.settings_from_folder(pod_dir) + Dir.chdir(pod_dir) { settings.run_pre_install_commands(true) } + proj = settings.project_path || pick_demo_project(pod_dir) + file = install_podfile(proj) + if file + open_project(file) + else + UI.puts "Unable to locate a project for #{spec.name}" + end + end + end + + public + + # Helpers + #-----------------------------------------------------------------------# + + # @return [Pathname] + # + TRY_TMP_DIR = Pathname.new(Dir.tmpdir) + 'CocoaPods/Try' + + # Puts the spec's data in the sandbox + # + def setup_spec_in_sandbox(sandbox) + if git_url?(@name) + spec = spec_with_url(@name, @podspec_name) + sandbox.store_pre_downloaded_pod(spec.name) + else + update_specs_repos + spec = spec_with_name(@name) + end + spec + end + + # Returns the specification of the last version of the Pod with the given + # name. + # + # @param [String] name + # The name of the pod. + # + # @return [Specification] The specification. + # + def spec_with_name(name) + set = config.sources_manager.search(Dependency.new(name)) + if set + set.specification.root + else + raise Informative, "Unable to find a specification for `#{name}`" + end + end + + # Returns the specification found in the given Git repository URL by + # downloading the repository. + # + # @param [String] url + # The URL for the pod Git repository. + # + # @param [String] spec_name + # The name of the podspec file within the Git repository. + # + # @return [Specification] The specification. + # + def spec_with_url(url, spec_name = nil) + name = url.split('/').last + name = name.chomp('.git') if name.end_with?('.git') + name = spec_name unless spec_name.nil? + + target_dir = TRY_TMP_DIR + name + target_dir.rmtree if target_dir.exist? + + downloader = Pod::Downloader.for_target(target_dir, :git => url) + downloader.download + + spec_file = Pathname.glob(target_dir + "#{name}.podspec{,.json}").first + Pod::Specification.from_file(spec_file) + end + + # Installs the specification in the given directory. + # + # @param [Specification] The specification of the Pod. + # @param [Pathname] The directory of the sandbox where to install the + # Pod. + # + # @return [Pathname] The path where the Pod was installed + # + def install_pod(spec, sandbox) + specs = { :ios => spec, :osx => spec } + if cocoapods_version >= Pod::Version.new('1.8.0') + dummy_podfile = Podfile.new + installer = Installer::PodSourceInstaller.new(sandbox, dummy_podfile, specs, :can_cache => false) + else + installer = Installer::PodSourceInstaller.new(sandbox, specs, :can_cache => false) + end + installer.install! + sandbox.root + spec.name + end + + # Picks a project or workspace suitable for the demo purposes in the + # given directory. + # + # To decide the project simple heuristics are used according to the name, + # if no project is found this method raises and `Informative` otherwise + # if more than one project is found the choice is presented to the user. + # + # @param [#to_s] dir + # The path where to look for projects. + # + # @return [String] The path of the project. + # + def pick_demo_project(dir) + dir = Pathname.new(dir) + projs = projects_in_dir(dir) + if projs.count == 0 + raise Informative, 'Unable to find any project in the source files' \ + " of the Pod: `#{dir}`" + elsif projs.count == 1 + projs.first + elsif (workspaces = projs.grep(/(demo|example|sample).*\.xcworkspace$/i)).count == 1 + workspaces.first + elsif (projects = projs.grep(/demo|example|sample/i)).count == 1 + projects.first + else + message = 'Which project would you like to open' + selection_array = projs.map do |p| + Pathname.new(p).relative_path_from(dir).to_s + end + index = UI.choose_from_array(selection_array, message) + projs[index] + end + end + + # Performs a CocoaPods installation for the given project if Podfile is + # found. Shells out to avoid issues with the config of the process + # running the try command. + # + # @return [String] proj + # The path of the project. + # + # @return [String] The path of the file to open, in other words the + # workspace of the installation or the given project. + # + def install_podfile(proj) + return unless proj + dirname = Pathname.new(proj).dirname + podfile_path = dirname + 'Podfile' + if podfile_path.exist? + Dir.chdir(dirname) do + perform_cocoapods_installation + + podfile = Pod::Podfile.from_file(podfile_path) + + if podfile.workspace_path + File.expand_path(podfile.workspace_path) + else + proj.to_s.chomp(File.extname(proj.to_s)) + '.xcworkspace' + end + end + else + proj + end + end + + public + + # Private Helpers + #-----------------------------------------------------------------------# + + # @return [void] Updates the specs repo unless disabled by the config. + # + def update_specs_repos + return unless repo_update?(:default => true) + UI.section 'Updating spec repositories' do + config.sources_manager.update + end + end + + # Opens the project at the given path. + # + # @return [String] path + # The path of the project. + # + # @return [void] + # + def open_project(path) + UI.puts "Opening '#{path}'" + `open "#{path}"` + end + + # @return [void] Performs a CocoaPods installation in the working + # directory. + # + def perform_cocoapods_installation + UI.titled_section 'Performing CocoaPods Installation' do + Command::Install.invoke + end + end + + # @return [Bool] Wether the given string is the name of a Pod or an URL + # for a Git repo. + # + def git_url?(name) + prefixes = ['https://', 'http://'] + prefixes.any? { |prefix| name.start_with?(prefix) } + end + + # @return [Array] The list of the workspaces and projects in a + # given directory excluding The Pods project and the projects + # that have a sister workspace. + # + def projects_in_dir(dir) + glob_match = Dir.glob("#{dir}/**/*.xc{odeproj,workspace}") + glob_match = glob_match.reject do |p| + next true if p.include?('Pods.xcodeproj') + next true if p.end_with?('.xcodeproj/project.xcworkspace') + sister_workspace = p.chomp(File.extname(p.to_s)) + '.xcworkspace' + p.end_with?('.xcodeproj') && glob_match.include?(sister_workspace) + end + end + + # @return [Pod::Version] the version of CocoaPods currently running + # + def cocoapods_version + Pod::Version.new(Pod::VERSION) + end + + #-------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb new file mode 100644 index 0000000..473c299 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb @@ -0,0 +1,53 @@ +module Pod + class TrySettings + attr_accessor :pre_install_commands, :project_path + + # Creates a TrySettings instance based on a folder path + # + def self.settings_from_folder(path) + settings_path = Pathname.new(path) + '.cocoapods.yml' + return TrySettings.new unless File.exist? settings_path + + settings = YAMLHelper.load_file(settings_path) + try_settings = TrySettings.new + return try_settings unless settings['try'] + + if settings['try']['install'] + try_settings.pre_install_commands = Array(settings['try']['install']['pre']) + end + + if settings['try']['project'] + try_settings.project_path = Pathname.new(path) + settings['try']['project'] + end + + try_settings + end + + # If we need to run commands from pod-try we should let the users know + # what is going to be running on their device. + # + def prompt_for_permission + UI.titled_section 'Running Pre-Install Commands' do + commands = pre_install_commands.length > 1 ? 'commands' : 'command' + UI.puts "In order to try this pod, CocoaPods-Try needs to run the following #{commands}:" + pre_install_commands.each { |command| UI.puts " - #{command}" } + UI.puts "\nPress return to run these #{commands}, or press `ctrl + c` to stop trying this pod." + end + + # Give an elegant exit point. + UI.gets.chomp + end + + # Runs the pre_install_commands from + # + # @param [Bool] prompt + # Should CocoaPods-Try show a prompt with the commands to the user. + # + def run_pre_install_commands(prompt) + if pre_install_commands + prompt_for_permission if prompt + pre_install_commands.each { |command| Executable.execute_command('bash', ['-ec', command], true) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb new file mode 100644 index 0000000..d206186 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb @@ -0,0 +1,105 @@ +require 'tmpdir' +require File.expand_path('../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe TrySettings do + it 'returns an instance with empty defaults when there are no yml settings files' do + Dir.mktmpdir do |dir| + settings = TrySettings.settings_from_folder dir + settings.should.be.instance_of TrySettings + settings.pre_install_commands.should.be.nil? + settings.project_path.should.be.nil? + end + end + + it 'returns an instance with the right defaults when there are no yml settings files' do + Dir.mktmpdir do |dir| + yaml = < 'ARAnalytics') + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + + command = Pod::Command.parse(['try', 'https://github.com/orta/ARAnalytics.git']) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.expects(:update_specs_repos).never + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + + describe 'updates of the spec repos' do + it 'updates the spec repos by default' do + command = Pod::Command.parse(%w(try ARAnalytics)) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.config.sources_manager.expects(:update) + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + + it "doesn't update the spec repos if that option was given" do + command = Pod::Command.parse(%w(try ARAnalytics --no-repo-update)) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.config.sources_manager.expects(:update).never + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + end + end + + describe 'Helpers' do + before do + @sut = Pod::Command.parse(['try']) + end + + it 'returns the spec with the given name' do + spec = @sut.spec_with_name('ARAnalytics') + spec.name.should == 'ARAnalytics' + end + + describe '#spec_at_url' do + it 'returns a spec for an https git repo' do + require 'cocoapods-downloader/git' + Pod::Downloader::Git.any_instance.expects(:download) + spec_file = Pod::Command::Try::TRY_TMP_DIR + 'ARAnalytics/ARAnalytics.podspec' + Pathname.stubs(:glob).once.returns([spec_file]) + stub_spec = stub + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + spec = @sut.spec_with_url('https://github.com/orta/ARAnalytics.git') + spec.should == stub_spec + end + + it 'returns a spec for an https git repo with podspec_name option' do + require 'cocoapods-downloader/git' + Pod::Downloader::Git.any_instance.expects(:download) + spec_file = Pod::Command::Try::TRY_TMP_DIR + 'ARAnalytics/Analytics.podspec' + Pathname.stubs(:glob).once.returns([spec_file]) + stub_spec = stub + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + spec = @sut.spec_with_url('https://github.com/orta/ARAnalytics.git', 'Analytics') + spec.should == stub_spec + end + end + + it 'installs the pod' do + Installer::PodSourceInstaller.any_instance.expects(:install!) + spec = stub(:name => 'ARAnalytics') + sandbox_root = Pathname.new(Pod::Command::Try::TRY_TMP_DIR) + sandbox = Sandbox.new(sandbox_root) + path = @sut.install_pod(spec, sandbox) + path.should == sandbox.root + 'ARAnalytics' + end + + it 'installs the pod on older versions of CocoaPods' do + @sut.stubs(:cocoapods_version).returns(Pod::Version.new('1.7.0')) + spec = stub(:name => 'ARAnalytics') + sandbox_root = Pathname.new(Pod::Command::Try::TRY_TMP_DIR) + sandbox = Sandbox.new(sandbox_root) + installer = stub('Installer') + installer.stubs(:install!) + Pod::Installer::PodSourceInstaller.expects(:new).with(any_parameters) do |*args| + args.size == 3 + end.returns(installer).once + @sut.install_pod(spec, sandbox) + + @sut.stubs(:cocoapods_version).returns(Pod::Version.new('1.8.0')) + Pod::Installer::PodSourceInstaller.expects(:new).with(any_parameters) do |*args| + args.size == 4 + end.returns(installer) + @sut.install_pod(spec, sandbox) + end + + describe '#pick_demo_project' do + it 'raises if no demo project could be found' do + @sut.stubs(:projects_in_dir).returns([]) + should.raise Informative do + @sut.pick_demo_project('.') + end.message.should.match(/Unable to find any project/) + end + + it 'picks a demo project' do + projects = ['Demo.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'Demo.xcodeproj' + end + + it 'is not case sensitive' do + projects = ['demo.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'demo.xcodeproj' + end + + it 'considers also projects named example' do + projects = ['Example.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'Example.xcodeproj' + end + + it 'returns the project if only one is found' do + projects = [Pathname.new('Lib.xcodeproj')] + @sut.stubs(:projects_in_dir).returns(projects) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib.xcodeproj' + end + + it 'asks the user which project would like to open if not a single suitable one is found' do + projects = ['Lib_1.xcodeproj', 'Lib_2.xcodeproj'] + @sut.stubs(:projects_in_dir).returns(projects) + UI.stubs(:choose_from_array).returns(0) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib_1.xcodeproj' + + UI.stubs(:choose_from_array).returns(1) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib_2.xcodeproj' + end + + it 'should prefer demo or example workspaces' do + @sut.stubs(:projects_in_dir).returns(['Project Demo.xcodeproj', 'Project Demo.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcworkspace' + end + + it 'should not show workspaces inside a project' do + Dir.stubs(:glob).returns(['Project Demo.xcodeproj', 'Project Demo.xcodeproj/project.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcodeproj' + end + + it 'should prefer workspaces over projects with the same name' do + @sut.stubs(:projects_in_dir).returns(['Project Demo.xcodeproj', 'Project Demo.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcworkspace' + end + end + + describe '#install_podfile' do + it 'returns the original project if no Podfile could be found' do + Pathname.any_instance.stubs(:exist?).returns(false) + proj = XCODE_PROJECT + path = @sut.install_podfile(proj) + path.should == proj + end + + it 'performs an installation and returns the path of the workspace' do + Pathname.any_instance.stubs(:exist?).returns(true) + proj = XCODE_PROJECT + @sut.expects(:perform_cocoapods_installation) + Podfile.stubs(:from_file).returns(stub('Workspace', :workspace_path => XCODE_WORKSPACE)) + path = @sut.install_podfile(proj) + path.to_s.should == XCODE_WORKSPACE.to_s + end + + it 'returns the default workspace if one is not set' do + Pathname.any_instance.stubs(:exist?).returns(true) + proj = XCODE_PROJECT + Podfile.stubs(:from_file).returns(stub('Workspace', :workspace_path => nil)) + @sut.expects(:perform_cocoapods_installation).once + path = @sut.install_podfile(proj) + path.to_s.should == XCODE_WORKSPACE.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb new file mode 100644 index 0000000..313584d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb @@ -0,0 +1,49 @@ + +# Set up +#-----------------------------------------------------------------------------# + +require 'bundler/setup' +require 'pathname' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'cocoapods' + +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$LOAD_PATH.unshift((ROOT + 'lib').to_s) +$LOAD_PATH.unshift((ROOT + 'spec').to_s) +require 'cocoapods_plugin' + +#-----------------------------------------------------------------------------# + +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end +end + +#-----------------------------------------------------------------------------# diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/LICENSE b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/LICENSE new file mode 100644 index 0000000..e543acb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2010 Chris Wanstrath +Copyright (c) 2016 Konstantin Gredeskoul + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/README.md b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/README.md new file mode 100644 index 0000000..f2da87b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/README.md @@ -0,0 +1,93 @@ +[![Gem Version](https://badge.fury.io/rb/colored2.svg)](https://badge.fury.io/rb/colored2) +[![Downloads](http://ruby-gem-downloads-badge.herokuapp.com/colored2?type=total)](https://rubygems.org/gems/colored2) +[![Gitter](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/colored2) + +[![Build Status](https://travis-ci.org/kigster/colored2.svg?branch=master)](https://travis-ci.org/kigster/colored2) +[![Test Coverage](https://codeclimate.com/github/kigster/colored2/badges/coverage.svg)](https://codeclimate.com/github/kigster/colored2/coverage) +[![Code Climate](https://codeclimate.com/github/kigster/colored2/badges/gpa.svg)](https://codeclimate.com/github/kigster/colored2) +[![Issue Count](https://codeclimate.com/github/kigster/colored2/badges/issue_count.svg)](https://codeclimate.com/github/kigster/colored2) + +## Colored2 + +This is an actively maintained fork of Chris (defunkt) Wanstrath's gem [colored](https://github.com/defunkt/colored), which appears to be no longer supported. + +This fork comes with a slightly spruced up syntax, some additional features, and a test suite written in [RSpec](http://rspec.info/). + +## Usage + +In addition to the simple syntax of the original gem, which affected only the string to the left of the method call, the new "bang" syntax affects a string to the right. If the block or a method argument is provided, the contents is wrapped in the color, and the color is then reset back. + +If no block or argument is provided, the color is left open-ended, and must be explicitly reset – when using the 'bang' notation. + +![](doc/colored2-session1.png) + +### Complete set of colors: + + * black + * red + * green + * yellow + * blue + * magenta + * cyan + * white + +### Complete Set of Effects + +> Note: previous versions used method name `clear` instead of `no_color`, which clashed with many 3rd party frameworks that defined similarly named method in the global namespace. +> This highlights the dangers of introducing so many words into the `String` namespace. + + * no_color + * bold + * dark + * italic + * underlined + * reversed + * plain + * normal + +## Usage in Other Classes + +With this gem you can add color to not just strings, but to any other class. `String` class is automatically decorated as soon as `require 'colored2'` is parsed by the ruby interpreter. Note that future versions may refrain from auto-requiring `colored2/strings`, and rely on explicitly requiring components they need colorized, eg `require 'colored2/numbers'`. + +To color numbers, require the following file, which automatically decorates `Integer` and `Float`. You can also add color methods to the `Object`. Finally, you can add the methods to any custom class by including the `Colored2` Module. + +Below is an `IRB` — session that shows a slightly more advanced usage. + +![](doc/colored2-session2.png) + +## Additional Helpers + +There are several additional helpers tucked onto the `String` class. + + * `#to_bol` (to beginning of the line) will rewind the cursor back to the beginning of the current line. + * `#to_eol` (to end of line) + +## Installation + +Add this line to your application's Gemfile: + + + gem 'colored2' + + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install colored2 + + +## Development + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at [https://github.com/kigster/colored2](https://github.com/kigster/colored2). + +## License + +The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT). diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/Rakefile b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/Rakefile new file mode 100644 index 0000000..47ca61b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/Rakefile @@ -0,0 +1,13 @@ +require 'bundler' +require 'bundler/gem_tasks' +require 'rake/clean' + +CLEAN.include %w(pkg coverage *.gem) + +begin + require 'rspec/core/rake_task' + RSpec::Core::RakeTask.new(:spec) +rescue LoadError +end + +task :default => [:spec] diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2.rb new file mode 100644 index 0000000..c4305dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2.rb @@ -0,0 +1,85 @@ +require 'colored2/codes' +require 'colored2/ascii_decorator' + +module Colored2 + def self.decorate(a_class) + a_class.send(:include, Colored2) + end + + def self.included(from_class) + from_class.class_eval do + + def surround_with_color(color: nil, effect: nil, color_self: nil, string: nil, &block) + color_type = if Colored2.background_next? && effect.nil? + Colored2.foreground_next! + :background + else + :foreground + end + + opts = {} + opts.merge!(color_type => color) if color + opts.merge!(effect: effect) if effect + + if color_self then + opts.merge!( beginning: :on, end: :off) + colored = Colored2::AsciiDecorator.new(self).decorate(opts) + if string || block + arg = "#{string}#{block.call if block}" + colored << Colored2::AsciiDecorator.new(arg).decorate(opts) if arg.length > 0 + end + else + opts.merge!( end: :on ) + colored = Colored2::AsciiDecorator.new(self).decorate(opts) + if string || block + arg = "#{string}#{block.call if block}" + colored << Colored2::AsciiDecorator.new(arg).decorate(opts.merge(end: :off)) if arg.length > 0 + end + end + colored + end + + def on + Colored2.background_next! + self + end + end + + from_class.instance_eval do + COLORS.keys.each do |color| + define_method(color) do |string = nil, &block| + surround_with_color(color: color, color_self: true, string: string, &block) + end + + define_method("#{color}!".to_sym) do |string = nil, &block| + surround_with_color(color: color, color_self: false, string: string, &block) + end + end + + EFFECTS.keys.each do |effect| + next if effect == 'no_color' + define_method(effect) do |string = nil, &block| + surround_with_color(effect: effect, color_self: true, string: string, &block) + end + + define_method("#{effect}!".to_sym) do |string = nil, &block| + surround_with_color(effect: effect, color_self: false, string: string, &block) + end + end + + define_method(:to_eol) do + tmp = sub(/^(\e\[[\[\e0-9;m]+m)/, "\\1\e[2K") + if tmp == self + return "\e[2K" << self + end + tmp + end + + define_method(:to_bol) do + "#{self}\033[#{length}D\033[0D" + end + end + end +end + +require 'colored2/strings' diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb new file mode 100644 index 0000000..c491e61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb @@ -0,0 +1,86 @@ +require 'colored2/codes' +require 'forwardable' + +module Colored2 + def self.enable! + Colored2::AsciiDecorator.enable! + end + def self.disable! + Colored2::AsciiDecorator.disable! + end + def self.background_next! + Colored2::AsciiDecorator.background_next! + end + def self.foreground_next! + Colored2::AsciiDecorator.foreground_next! + end + def self.background_next? + Colored2::AsciiDecorator.background_next? + end + + class AsciiDecorator + @__background_next = false + @__colors_disabled = false + class << self + attr_accessor :__background_next, :__colors_disabled + def enable! + self.__colors_disabled = false + end + def enabled? + !self.__colors_disabled + end + def disable! + self.__colors_disabled = true + end + def background_next! + self.__background_next = true + end + def foreground_next! + self.__background_next = false + end + def background_next? + self.__background_next + end + end + + extend Forwardable + def_delegators :@my_class, :enable!, :disable! + + attr_accessor :string, :my_class + + def initialize(a_string) + self.string = a_string.instance_of?(Object) ? '' : a_string.to_s + self.my_class = self.class + end + + # options[:start] = :color + # options[:end] = :color | :no_color + def decorate(options = {}) + return string if !self.class.enabled? || string.length == 0 + escape_sequence = [ + Colored2::TextColor.new(options[:foreground]), + Colored2::BackgroundColor.new(options[:background]), + Colored2::Effect.new(options[:effect]) + ].compact.join + + colored = '' + colored << escape_sequence if options[:beginning] == :on + colored << string + if options[:end] + colored << no_color if options[:end] == :off && !colored.end_with?(no_color) + colored << escape_sequence if options[:end] == :on + end + colored + end + + def un_decorate + string.gsub(%r{\e\[\d+(;\d+)*m}, '') + end + + private + + def no_color + @no_color ||= Colored2::Effect.new(:no_color).to_s + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/codes.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/codes.rb new file mode 100644 index 0000000..8f46f43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/codes.rb @@ -0,0 +1,63 @@ +module Colored2 + + COLORS = { + black: 30, + red: 31, + green: 32, + yellow: 33, + blue: 34, + magenta: 35, + cyan: 36, + white: 37 + } + + EFFECTS = { + no_color: 0, + bold: 1, + dark: 2, + italic: 3, + underlined: 4, + reversed: 7, + plain: 21, # non-bold + normal: 22 + } + + class Code + attr_accessor :name, :escape + def initialize(name) + @name = name + return if name.nil? + @escape = codes[name.to_sym] + raise ArgumentError.new("No color or effect named #{name} exists for #{self.class}.") if @escape.nil? + end + + def value(shift = nil) + escape_code = escape + escape_code += shift if shift && escape_code + name && escape ? "\e[#{escape_code}m" : '' + end + + def to_s + value + end + end + + class Effect < Code + def codes + EFFECTS + end + end + + class TextColor < Code + def codes + COLORS + end + end + + class BackgroundColor < TextColor + def value + super 10 + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/numbers.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/numbers.rb new file mode 100644 index 0000000..64161b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/numbers.rb @@ -0,0 +1,11 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) + +module Colored2 + def self.integer_class + major, minor = RUBY_VERSION.split(/\./).map(&:to_i) + major >= 2 && minor >= 4 ? Integer : Kernel.const_get(:Fixnum) + end +end + +Colored2.decorate(Colored2.integer_class) +Colored2.decorate(Float) diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/object.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/object.rb new file mode 100644 index 0000000..439b0f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/object.rb @@ -0,0 +1,2 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) +Colored2.decorate(Object) diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/strings.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/strings.rb new file mode 100644 index 0000000..8e099e8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/strings.rb @@ -0,0 +1,2 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) +Colored2.decorate(String) diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/version.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/version.rb new file mode 100644 index 0000000..ab0f8a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/lib/colored2/version.rb @@ -0,0 +1,3 @@ +module Colored2 + VERSION = '3.1.2' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb new file mode 100644 index 0000000..f686bfa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb @@ -0,0 +1,26 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/numbers' +require 'colored2/strings' + +RSpec.describe Colored2.integer_class do + describe 'with foreground and background colors' do + it 'should work with one color' do + expect(32.red).to eql('32'.red) + end + it 'should insert escape sequences' do + expect(32.red).to eql("\e[31m32\e[0m") + end + end +end + +RSpec.describe Float do + describe 'with foreground and background colors' do + it 'should add two colors chained' do + expect((32.5).blue.on.red).to eql('32.5'.blue.on.red) + end + + it 'should insert escape sequences' do + expect((32.5).blue.on.red).to eql("\e[41m\e[34m32.5\e[0m") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb new file mode 100644 index 0000000..6256254 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb @@ -0,0 +1,24 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/object' + +subject1 = red('hello') +subject2 = red('blue').on.blue +subject3 = on.yellow('on yellow') + +RSpec.describe Object do + + describe 'with foreground and background colors' do + it 'should work with one color' do + expect(subject1).to eql('hello'.red) + end + + it 'should work with color on color' do + expect(subject2).to eql('blue'.red.on.blue) + end + + it 'should add background color using on_' do + expect(subject3).to eql('on yellow'.on.yellow) + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb new file mode 100644 index 0000000..10da2a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb @@ -0,0 +1,77 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/strings' + +RSpec.describe String do + before do + Colored2.decorate(String) + end + + describe 'with foreground and background colors' do + it 'should work with one color' do + expect('red'.red).to eql("\e[31mred\e[0m") + end + + it 'should add two colors chained' do + expect('blue'.red.blue).to eql("\e[34m\e[31mblue\e[0m") + end + + it 'should add background color using on_' do + expect('on yellow'.on.yellow).to eql("\e[43mon yellow\e[0m") + end + + it 'should work with _on_ syntax' do + expect('red on blue'.red.on.blue).to eql("\e[44m\e[31mred on blue\e[0m") + end + end + + describe 'with effects' do + it 'should add a bold modifier' do + expect('way bold'.bold).to eql("\e[1mway bold\e[0m") + end + + it 'should let modifiers stack' do + expect('underlinedd bold'.bold.underlined).to eql("\e[4m\e[1munderlinedd bold\e[0m") + end + + it 'should let modifiers stack with colors' do + expect('cyan underlinedd bold'.bold.underlined.cyan).to eql("\e[36m\e[4m\e[1mcyan underlinedd bold\e[0m") + end + end + + describe 'new block syntax' do + it 'should defined block syntax nested colors' do + expect('No Color, then'.blue!('blue inside')).to eql('No Color, then' + 'blue inside'.blue) + end + + it 'should defined block syntax nested colors two levels deep' do + expect('regular here'.blue! + 'blue here'.no_color!).to eql('regular here' << 'blue here'.blue) + end + + it 'should defined block syntax nested colors two levels deep' do + expect('regular here'.blue! { 'something else'.red!('red riding hood') }).to eql('regular here'.blue! << 'something else'.red! << 'red riding hood'.no_color!) + end + + it 'should defined block syntax nested colors two levels deep' do + expectation = 'this is regular, but '.red! do + 'this is red '.yellow! do + ' and yellow'.no_color! + end + end + expect(expectation).to eql('this is regular, but '.red! << 'this is red '.yellow! << ' and yellow'.no_color!) + end + end + + describe 'end of line' do + it 'should work with eol' do + expect('nothing to see here really.'.to_eol).to eql("\e[2Knothing to see here really.") + end + + it 'should work with eol_with_with_two_colors' do + expect('blue'.red.blue.to_eol).to eql("\e[34m\e[31m\e[2Kblue\e[0m") + end + + it 'should work with eol_with_modifiers_stack_with_colors' do + expect('cyan underlinedd bold'.bold.underlined.cyan.to_eol).to eql("\e[36m\e[4m\e[1m\e[2Kcyan underlinedd bold\e[0m") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2_spec.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2_spec.rb new file mode 100644 index 0000000..3b55f07 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/colored2_spec.rb @@ -0,0 +1,23 @@ +require 'spec_helper' +require 'colored2/strings' + +RSpec.describe Colored2 do + describe 'global enable and disable' do + before do + Colored2.disable! + end + after do + Colored2.enable! + end + let(:sample) { 'sample string' } + + describe 'colors' do + subject { sample.red.on.blue } + it { should eql(sample) } + end + describe 'effects' do + subject { sample.bold.on.red } + it { should eql(sample) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/spec_helper.rb new file mode 100644 index 0000000..3f2faa0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/colored2-3.1.2/spec/spec_helper.rb @@ -0,0 +1,5 @@ +require 'simplecov' +SimpleCov.start + +require 'rspec/core' + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md new file mode 100644 index 0000000..f7d4921 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/CHANGELOG.md @@ -0,0 +1,511 @@ +## Current + +## Release v1.1.7 (6 August 2020) + +concurrent-ruby: + +* (#879) Consider falsy value on `Concurrent::Map#compute_if_absent` for fast non-blocking path +* (#876) Reset Async queue on forking, makes Async fork-safe +* (#856) Avoid running problematic code in RubyThreadLocalVar on MRI that occasionally results in segfault +* (#853) Introduce ThreadPoolExecutor without a Queue + +## Release v1.1.6, edge v0.6.0 (10 Feb 2020) + +concurrent-ruby: + +* (#841) Concurrent.disable_at_exit_handlers! is no longer needed and was deprecated. +* (#841) AbstractExecutorService#auto_terminate= was deprecated and has no effect. + Set :auto_terminate option instead when executor is initialized. + +## Release v1.1.6.pre1, edge v0.6.0.pre1 (26 Jan 2020) + +concurrent-ruby: + +* (#828) Allow to name executors, the name is also used to name their threads +* (#838) Implement #dup and #clone for structs +* (#821) Safer finalizers for thread local variables +* Documentation fixes +* (#814) Use Ruby's Etc.nprocessors if available +* (#812) Fix directory structure not to mess with packaging tools +* (#840) Fix termination of pools on JRuby + +concurrent-ruby-edge: + +* Add WrappingExecutor (#830) + +## Release v1.1.5, edge v0.5.0 (10 Mar 2019) + +concurrent-ruby: + +* fix potential leak of context on JRuby and Java 7 + +concurrent-ruby-edge: + +* Add finalized Concurrent::Cancellation +* Add finalized Concurrent::Throttle +* Add finalized Concurrent::Promises::Channel +* Add new Concurrent::ErlangActor + +## Release v1.1.4 (14 Dec 2018) + +* (#780) Remove java_alias of 'submit' method of Runnable to let executor service work on java 11 +* (#776) Fix NameError on defining a struct with a name which is already taken in an ancestor + +## Release v1.1.3 (7 Nov 2018) + +* (#775) fix partial require of the gem (although not officially supported) + +## Release v1.1.2 (6 Nov 2018) + +* (#773) more defensive 1.9.3 support + +## Release v1.1.1, edge v0.4.1 (1 Nov 2018) + +* (#768) add support for 1.9.3 back + +## Release v1.1.0, edge v0.4.0 (31 OCt 2018) (yanked) + +* (#768) yanked because of issues with removed 1.9.3 support + +## Release v1.1.0.pre2, edge v0.4.0.pre2 (18 Sep 2018) + +concurrent-ruby: + +* fixed documentation and README links +* fix Set for TruffleRuby and Rubinius +* use properly supported TruffleRuby APIs + +concurrent-ruby-edge: + +* add Promises.zip_futures_over_on + +## Release v1.1.0.pre1, edge v0.4.0.pre1 (15 Aug 2018) + +concurrent-ruby: + +* requires at least Ruby 2.0 +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/1.1.0/Concurrent/Promises.html) + are moved from `concurrent-ruby-edge` to `concurrent-ruby` +* Add support for TruffleRuby + * (#734) Fix Array/Hash/Set construction broken on TruffleRuby + * AtomicReference fixed +* CI stabilization +* remove sharp dependency edge -> core +* remove warnings +* documentation updates +* Exchanger is no longer documented as edge since it was already available in + `concurrent-ruby` +* (#644) Fix Map#each and #each_pair not returning enumerator outside of MRI +* (#659) Edge promises fail during error handling +* (#741) Raise on recursive Delay#value call +* (#727) #717 fix global IO executor on JRuby +* (#740) Drop support for CRuby 1.9, JRuby 1.7, Rubinius. +* (#737) Move AtomicMarkableReference out of Edge +* (#708) Prefer platform specific memory barriers +* (#735) Fix wrong expected exception in channel spec assertion +* (#729) Allow executor option in `Promise#then` +* (#725) fix timeout check to use timeout_interval +* (#719) update engine detection +* (#660) Add specs for Promise#zip/Promise.zip ordering +* (#654) Promise.zip execution changes +* (#666) Add thread safe set implementation +* (#651) #699 #to_s, #inspect should not output negative object IDs. +* (#685) Avoid RSpec warnings about raise_error +* (#680) Avoid RSpec monkey patching, persist spec results locally, use RSpec + v3.7.0 +* (#665) Initialize the monitor for new subarrays on Rubinius +* (#661) Fix error handling in edge promises + +concurrent-ruby-edge: + +* (#659) Edge promises fail during error handling +* Edge files clearly separated in `lib-edge` +* added ReInclude + +## Release v1.0.5, edge v0.3.1 (26 Feb 2017) + +concurrent-ruby: + +* Documentation for Event and Semaphore +* Use Unsafe#fullFence and #loadFence directly since the shortcuts were removed in JRuby +* Do not depend on org.jruby.util.unsafe.UnsafeHolder + +concurrent-ruby-edge: + +* (#620) Actors on Pool raise an error +* (#624) Delayed promises did not interact correctly with flatting + * Fix arguments yielded by callback methods +* Overridable default executor in promises factory methods +* Asking actor to terminate will always resolve to `true` + +## Release v1.0.4, edge v0.3.0 (27 Dec 2016) + +concurrent-ruby: + +* Nothing + +concurrent-ruby-edge: + +* New promises' API renamed, lots of improvements, edge bumped to 0.3.0 + * **Incompatible** with previous 0.2.3 version + * see https://github.com/ruby-concurrency/concurrent-ruby/pull/522 + +## Release v1.0.3 (17 Dec 2016) + +* Trigger execution of flattened delayed futures +* Avoid forking for processor_count if possible +* Semaphore Mutex and JRuby parity +* Adds Map#each as alias to Map#each_pair +* Fix uninitialized instance variables +* Make Fixnum, Bignum merger ready +* Allows Promise#then to receive an executor +* TimerSet now survives a fork +* Reject promise on any exception +* Allow ThreadLocalVar to be initialized with a block +* Support Alpha with `Concurrent::processor_count` +* Fixes format-security error when compiling ruby_193_compatible.h +* Concurrent::Atom#swap fixed: reraise the exceptions from block + +## Release v1.0.2 (2 May 2016) + +* Fix bug with `Concurrent::Map` MRI backend `#inspect` method +* Fix bug with `Concurrent::Map` MRI backend using `Hash#value?` +* Improved documentation and examples +* Minor updates to Edge + +## Release v1.0.1 (27 February 2016) + +* Fix "uninitialized constant Concurrent::ReentrantReadWriteLock" error. +* Better handling of `autoload` vs. `require`. +* Improved API for Edge `Future` zipping. +* Fix reference leak in Edge `Future` constructor . +* Fix bug which prevented thread pools from surviving a `fork`. +* Fix bug in which `TimerTask` did not correctly specify all its dependencies. +* Improved support for JRuby+Truffle +* Improved error messages. +* Improved documentation. +* Updated README and CONTRIBUTING. + +## Release v1.0.0 (13 November 2015) + +* Rename `attr_volatile_with_cas` to `attr_atomic` +* Add `clear_each` to `LockFreeStack` +* Update `AtomicReference` documentation +* Further updates and improvements to the synchronization layer. +* Performance and memory usage performance with `Actor` logging. +* Fixed `ThreadPoolExecutor` task count methods. +* Improved `Async` performance for both short and long-lived objects. +* Fixed bug in `LockFreeLinkedSet`. +* Fixed bug in which `Agent#await` triggered a validation failure. +* Further `Channel` updates. +* Adopted a project Code of Conduct +* Cleared interpreter warnings +* Fixed bug in `ThreadPoolExecutor` task count methods +* Fixed bug in 'LockFreeLinkedSet' +* Improved Java extension loading +* Handle Exception children in Edge::Future +* Continued improvements to channel +* Removed interpreter warnings. +* Shared constants now in `lib/concurrent/constants.rb` +* Refactored many tests. +* Improved synchronization layer/memory model documentation. +* Bug fix in Edge `Future#flat` +* Brand new `Channel` implementation in Edge gem. +* Simplification of `RubySingleThreadExecutor` +* `Async` improvements + - Each object uses its own `SingleThreadExecutor` instead of the global thread pool. + - No longers supports executor injection + - Much better documentation +* `Atom` updates + - No longer `Dereferenceable` + - Now `Observable` + - Added a `#reset` method +* Brand new `Agent` API and implementation. Now functionally equivalent to Clojure. +* Continued improvements to the synchronization layer +* Merged in the `thread_safe` gem + - `Concurrent::Array` + - `Concurrent::Hash` + - `Concurrent::Map` (formerly ThreadSafe::Cache) + - `Concurrent::Tuple` +* Minor improvements to Concurrent::Map +* Complete rewrite of `Exchanger` +* Removed all deprecated code (classes, methods, constants, etc.) +* Updated Agent, MutexAtomic, and BufferedChannel to inherit from Synchronization::Object. +* Many improved tests +* Some internal reorganization + +## Release v0.9.1 (09 August 2015) + +* Fixed a Rubiniux bug in synchronization object +* Fixed all interpreter warnings (except circular references) +* Fixed require statements when requiring `Atom` alone +* Significantly improved `ThreadLocalVar` on non-JRuby platforms +* Fixed error handling in Edge `Concurrent.zip` +* `AtomicFixnum` methods `#increment` and `#decrement` now support optional delta +* New `AtomicFixnum#update` method +* Minor optimizations in `ReadWriteLock` +* New `ReentrantReadWriteLock` class +* `ThreadLocalVar#bind` method is now public +* Refactored many tests + +## Release v0.9.0 (10 July 2015) + +* Updated `AtomicReference` + - `AtomicReference#try_update` now simply returns instead of raising exception + - `AtomicReference#try_update!` was added to raise exceptions if an update + fails. Note: this is the same behavior as the old `try_update` +* Pure Java implementations of + - `AtomicBoolean` + - `AtomicFixnum` + - `Semaphore` +* Fixed bug when pruning Ruby thread pools +* Fixed bug in time calculations within `ScheduledTask` +* Default `count` in `CountDownLatch` to 1 +* Use monotonic clock for all timers via `Concurrent.monotonic_time` + - Use `Process.clock_gettime(Process::CLOCK_MONOTONIC)` when available + - Fallback to `java.lang.System.nanoTime()` on unsupported JRuby versions + - Pure Ruby implementation for everything else + - Effects `Concurrent.timer`, `Concurrent.timeout`, `TimerSet`, `TimerTask`, and `ScheduledTask` +* Deprecated all clock-time based timer scheduling + - Only support scheduling by delay + - Effects `Concurrent.timer`, `TimerSet`, and `ScheduledTask` +* Added new `ReadWriteLock` class +* Consistent `at_exit` behavior for Java and Ruby thread pools. +* Added `at_exit` handler to Ruby thread pools (already in Java thread pools) + - Ruby handler stores the object id and retrieves from `ObjectSpace` + - JRuby disables `ObjectSpace` by default so that handler stores the object reference +* Added a `:stop_on_exit` option to thread pools to enable/disable `at_exit` handler +* Updated thread pool docs to better explain shutting down thread pools +* Simpler `:executor` option syntax for all abstractions which support this option +* Added `Executor#auto_terminate?` predicate method (for thread pools) +* Added `at_exit` handler to `TimerSet` +* Simplified auto-termination of the global executors + - Can now disable auto-termination of global executors + - Added shutdown/kill/wait_for_termination variants for global executors +* Can now disable auto-termination for *all* executors (the nuclear option) +* Simplified auto-termination of the global executors +* Deprecated terms "task pool" and "operation pool" + - New terms are "io executor" and "fast executor" + - New functions added with new names + - Deprecation warnings added to functions referencing old names +* Moved all thread pool related functions from `Concurrent::Configuration` to `Concurrent` + - Old functions still exist with deprecation warnings + - New functions have updated names as appropriate +* All high-level abstractions default to the "io executor" +* Fixed bug in `Actor` causing it to prematurely warm global thread pools on gem load + - This also fixed a `RejectedExecutionError` bug when running with minitest/autorun via JRuby +* Moved global logger up to the `Concurrent` namespace and refactored the code +* Optimized the performance of `Delay` + - Fixed a bug in which no executor option on construction caused block execution on a global thread pool +* Numerous improvements and bug fixes to `TimerSet` +* Fixed deadlock of `Future` when the handler raises Exception +* Added shared specs for more classes +* New concurrency abstractions including: + - `Atom` + - `Maybe` + - `ImmutableStruct` + - `MutableStruct` + - `SettableStruct` +* Created an Edge gem for unstable abstractions including + - `Actor` + - `Agent` + - `Channel` + - `Exchanger` + - `LazyRegister` + - **new Future Framework** - unified + implementation of Futures and Promises which combines Features of previous `Future`, + `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively + new synchronization layer to make all the paths **lock-free** with exception of blocking threads on `#wait`. + It offers better performance and does not block threads when not required. +* Actor framework changes: + - fixed reset loop in Pool + - Pool can use any actor as a worker, abstract worker class is no longer needed. + - Actor events not have format `[:event_name, *payload]` instead of just the Symbol. + - Actor now uses new Future/Promise Framework instead of `IVar` for better interoperability + - Behaviour definition array was simplified to `[BehaviourClass1, [BehaviourClass2, *initialization_args]]` + - Linking behavior responds to :linked message by returning array of linked actors + - Supervised behavior is removed in favour of just Linking + - RestartingContext is supervised by default now, `supervise: true` is not required any more + - Events can be private and public, so far only difference is that Linking will + pass to linked actors only public messages. Adding private :restarting and + :resetting events which are send before the actor restarts or resets allowing + to add callbacks to cleanup current child actors. + - Print also object_id in Reference to_s + - Add AbstractContext#default_executor to be able to override executor class wide + - Add basic IO example + - Documentation somewhat improved + - All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and + be sure that both jobs are processed first. +* Refactored `Channel` to use newer synchronization objects +* Added `#reset` and `#cancel` methods to `TimerSet` +* Added `#cancel` method to `Future` and `ScheduledTask` +* Refactored `TimerSet` to use `ScheduledTask` +* Updated `Async` with a factory that initializes the object +* Deprecated `Concurrent.timer` and `Concurrent.timeout` +* Reduced max threads on pure-Ruby thread pools (abends around 14751 threads) +* Moved many private/internal classes/modules into "namespace" modules +* Removed brute-force killing of threads in tests +* Fixed a thread pool bug when the operating system cannot allocate more threads + +## Release v0.8.0 (25 January 2015) + +* C extension for MRI have been extracted into the `concurrent-ruby-ext` companion gem. + Please see the README for more detail. +* Better variable isolation in `Promise` and `Future` via an `:args` option +* Continued to update intermittently failing tests + +## Release v0.7.2 (24 January 2015) + +* New `Semaphore` class based on [java.util.concurrent.Semaphore](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Semaphore.html) +* New `Promise.all?` and `Promise.any?` class methods +* Renamed `:overflow_policy` on thread pools to `:fallback_policy` +* Thread pools still accept the `:overflow_policy` option but display a warning +* Thread pools now implement `fallback_policy` behavior when not running (rather than universally rejecting tasks) +* Fixed minor `set_deref_options` constructor bug in `Promise` class +* Fixed minor `require` bug in `ThreadLocalVar` class +* Fixed race condition bug in `TimerSet` class +* Fixed race condition bug in `TimerSet` class +* Fixed signal bug in `TimerSet#post` method +* Numerous non-functional updates to clear warning when running in debug mode +* Fixed more intermittently failing tests +* Tests now run on new Travis build environment +* Multiple documentation updates + +## Release v0.7.1 (4 December 2014) + +Please see the [roadmap](https://github.com/ruby-concurrency/concurrent-ruby/issues/142) for more information on the next planned release. + +* Added `flat_map` method to `Promise` +* Added `zip` method to `Promise` +* Fixed bug with logging in `Actor` +* Improvements to `Promise` tests +* Removed actor-experimental warning +* Added an `IndirectImmediateExecutor` class +* Allow disabling auto termination of global executors +* Fix thread leaking in `ThreadLocalVar` (uses `Ref` gem on non-JRuby systems) +* Fix thread leaking when pruning pure-Ruby thread pools +* Prevent `Actor` from using an `ImmediateExecutor` (causes deadlock) +* Added missing synchronizations to `TimerSet` +* Fixed bug with return value of `Concurrent::Actor::Utils::Pool#ask` +* Fixed timing bug in `TimerTask` +* Fixed bug when creating a `JavaThreadPoolExecutor` with minimum pool size of zero +* Removed confusing warning when not using native extenstions +* Improved documentation + +## Release v0.7.0 (13 August 2014) + +* Merge the [atomic](https://github.com/ruby-concurrency/atomic) gem + - Pure Ruby `MutexAtomic` atomic reference class + - Platform native atomic reference classes `CAtomic`, `JavaAtomic`, and `RbxAtomic` + - Automated [build process](https://github.com/ruby-concurrency/rake-compiler-dev-box) + - Fat binary releases for [multiple platforms](https://rubygems.org/gems/concurrent-ruby/versions) including Windows (32/64), Linux (32/64), OS X (64-bit), Solaris (64-bit), and JRuby +* C native `CAtomicBoolean` +* C native `CAtomicFixnum` +* Refactored intermittently failing tests +* Added `dataflow!` and `dataflow_with!` methods to match `Future#value!` method +* Better handling of timeout in `Agent` +* Actor Improvements + - Fine-grained implementation using chain of behaviors. Each behavior is responsible for single aspect like: `Termination`, `Pausing`, `Linking`, `Supervising`, etc. Users can create custom Actors easily based on their needs. + - Supervision was added. `RestartingContext` will pause on error waiting on its supervisor to decide what to do next ( options are `:terminate!`, `:resume!`, `:reset!`, `:restart!`). Supervising behavior also supports strategies `:one_for_one` and `:one_for_all`. + - Linking was added to be able to monitor actor's events like: `:terminated`, `:paused`, `:restarted`, etc. + - Dead letter routing added. Rejected envelopes are collected in a configurable actor (default: `Concurrent::Actor.root.ask!(:dead_letter_routing)`) + - Old `Actor` class removed and replaced by new implementation previously called `Actress`. `Actress` was kept as an alias for `Actor` to keep compatibility. + - `Utils::Broadcast` actor which allows Publish–subscribe pattern. +* More executors for managing serialized operations + - `SerializedExecution` mixin module + - `SerializedExecutionDelegator` for serializing *any* executor +* Updated `Async` with serialized execution +* Updated `ImmediateExecutor` and `PerThreadExecutor` with full executor service lifecycle +* Added a `Delay` to root `Actress` initialization +* Minor bug fixes to thread pools +* Refactored many intermittently failing specs +* Removed Java interop warning `executor.rb:148 warning: ambiguous Java methods found, using submit(java.lang.Runnable)` +* Fixed minor bug in `RubyCachedThreadPool` overflow policy +* Updated tests to use [RSpec 3.0](http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3) +* Removed deprecated `Actor` class +* Better support for Rubinius + +## Release v0.6.1 (14 June 2014) + +* Many improvements to `Concurrent::Actress` +* Bug fixes to `Concurrent::RubyThreadPoolExecutor` +* Fixed several brittle tests +* Moved documentation to http://ruby-concurrency.github.io/concurrent-ruby/frames.html + +## Release v0.6.0 (25 May 2014) + +* Added `Concurrent::Observable` to encapsulate our thread safe observer sets +* Improvements to new `Channel` +* Major improvements to `CachedThreadPool` and `FixedThreadPool` +* Added `SingleThreadExecutor` +* Added `Current::timer` function +* Added `TimerSet` executor +* Added `AtomicBoolean` +* `ScheduledTask` refactoring +* Pure Ruby and JRuby-optimized `PriorityQueue` classes +* Updated `Agent` behavior to more closely match Clojure +* Observer sets support block callbacks to the `add_observer` method +* New algorithm for thread creation in `RubyThreadPoolExecutor` +* Minor API updates to `Event` +* Rewritten `TimerTask` now an `Executor` instead of a `Runnable` +* Fixed many brittle specs +* Renamed `FixedThreadPool` and `CachedThreadPool` to `RubyFixedThreadPool` and `RubyCachedThreadPool` +* Created JRuby optimized `JavaFixedThreadPool` and `JavaCachedThreadPool` +* Consolidated fixed thread pool tests into `spec/concurrent/fixed_thread_pool_shared.rb` and `spec/concurrent/cached_thread_pool_shared.rb` +* `FixedThreadPool` now subclasses `RubyFixedThreadPool` or `JavaFixedThreadPool` as appropriate +* `CachedThreadPool` now subclasses `RubyCachedThreadPool` or `JavaCachedThreadPool` as appropriate +* New `Delay` class +* `Concurrent::processor_count` helper function +* New `Async` module +* Renamed `NullThreadPool` to `PerThreadExecutor` +* Deprecated `Channel` (we are planning a new implementation based on [Go](http://golangtutorials.blogspot.com/2011/06/channels-in-go.html)) +* Added gem-level [configuration](http://robots.thoughtbot.com/mygem-configure-block) +* Deprecated `$GLOBAL_THREAD_POOL` in lieu of gem-level configuration +* Removed support for Ruby [1.9.2](https://www.ruby-lang.org/en/news/2013/12/17/maintenance-of-1-8-7-and-1-9-2/) +* New `RubyThreadPoolExecutor` and `JavaThreadPoolExecutor` classes +* All thread pools now extend the appropriate thread pool executor classes +* All thread pools now support `:overflow_policy` (based on Java's [reject policies](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html)) +* Deprecated `UsesGlobalThreadPool` in lieu of explicit `:executor` option (dependency injection) on `Future`, `Promise`, and `Agent` +* Added `Concurrent::dataflow_with(executor, *inputs)` method to support executor dependency injection for dataflow +* Software transactional memory with `TVar` and `Concurrent::atomically` +* First implementation of [new, high-performance](https://github.com/ruby-concurrency/concurrent-ruby/pull/49) `Channel` +* `Actor` is deprecated in favor of new experimental actor implementation [#73](https://github.com/ruby-concurrency/concurrent-ruby/pull/73). To avoid namespace collision it is living in `Actress` namespace until `Actor` is removed in next release. + +## Release v0.5.0 + +This is the most significant release of this gem since its inception. This release includes many improvements and optimizations. It also includes several bug fixes. The major areas of focus for this release were: + +* Stability improvements on Ruby versions with thread-level parallelism ([JRuby](http://jruby.org/) and [Rubinius](http://rubini.us/)) +* Creation of new low-level concurrency abstractions +* Internal refactoring to use the new low-level abstractions + +Most of these updates had no effect on the gem API. There are a few notable exceptions which were unavoidable. Please read the [release notes](API-Updates-in-v0.5.0) for more information. + +Specific changes include: + +* New class `IVar` +* New class `MVar` +* New class `ThreadLocalVar` +* New class `AtomicFixnum` +* New class method `dataflow` +* New class `Condition` +* New class `CountDownLatch` +* New class `DependencyCounter` +* New class `SafeTaskExecutor` +* New class `CopyOnNotifyObserverSet` +* New class `CopyOnWriteObserverSet` +* `Future` updated with `execute` API +* `ScheduledTask` updated with `execute` API +* New `Promise` API +* `Future` now extends `IVar` +* `Postable#post?` now returns an `IVar` +* Thread safety fixes to `Dereferenceable` +* Thread safety fixes to `Obligation` +* Thread safety fixes to `Supervisor` +* Thread safety fixes to `Event` +* Various other thread safety (race condition) fixes +* Refactored brittle tests +* Implemented pending tests +* Added JRuby and Rubinius as Travis CI build targets +* Added [CodeClimate](https://codeclimate.com/) code review +* Improved YARD documentation diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Gemfile b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Gemfile new file mode 100644 index 0000000..1b8d9fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Gemfile @@ -0,0 +1,42 @@ +source 'https://rubygems.org' + +require File.join(File.dirname(__FILE__), 'lib/concurrent-ruby/concurrent/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby-edge/concurrent/edge/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby/concurrent/utility/engine') + +no_path = ENV['NO_PATH'] +options = no_path ? {} : { path: '.' } + +gem 'concurrent-ruby', Concurrent::VERSION, options +gem 'concurrent-ruby-edge', Concurrent::EDGE_VERSION, options +gem 'concurrent-ruby-ext', Concurrent::VERSION, options.merge(platform: :mri) + +group :development do + gem 'rake', (Concurrent.ruby_version :<, 2, 2, 0) ? '~> 12.0' : '~> 13.0' + gem 'rake-compiler', '~> 1.0', '>= 1.0.7' + gem 'rake-compiler-dock', '~> 1.0' + gem 'pry', '~> 0.11', platforms: :mri +end + +group :documentation, optional: true do + gem 'yard', '~> 0.9.0', require: false + gem 'redcarpet', '~> 3.0', platforms: :mri # understands github markdown + gem 'md-ruby-eval', '~> 0.6' +end + +group :testing do + gem 'rspec', '~> 3.7' + gem 'timecop', '~> 0.7.4' + gem 'sigdump', require: false +end + +# made opt-in since it will not install on jruby 1.7 +group :coverage, optional: !ENV['COVERAGE'] do + gem 'simplecov', '~> 0.16.0', require: false + gem 'coveralls', '~> 0.8.2', require: false +end + +group :benchmarks, optional: true do + gem 'benchmark-ips', '~> 2.7' + gem 'bench9000' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/LICENSE.txt new file mode 100644 index 0000000..1026f28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) Jerry D'Antonio -- released under the MIT license. + +http://www.opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/README.md b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/README.md new file mode 100644 index 0000000..2e89c2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/README.md @@ -0,0 +1,384 @@ +# Concurrent Ruby + +[![Gem Version](https://badge.fury.io/rb/concurrent-ruby.svg)](http://badge.fury.io/rb/concurrent-ruby) +[![Build Status](https://travis-ci.org/ruby-concurrency/concurrent-ruby.svg?branch=master)](https://travis-ci.org/ruby-concurrency/concurrent-ruby) +[![Build status](https://ci.appveyor.com/api/projects/status/iq8aboyuu3etad4w?svg=true)](https://ci.appveyor.com/project/rubyconcurrency/concurrent-ruby) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) +[![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better + or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled +* Thread-safety +* Backward compatibility + +## Contributing + +**This gem depends on +[contributions](https://github.com/ruby-concurrency/concurrent-ruby/graphs/contributors) and we +appreciate your help. Would you like to contribute? Great! Have a look at +[issues with `looking-for-contributor` label](https://github.com/ruby-concurrency/concurrent-ruby/issues?q=is%3Aissue+is%3Aopen+label%3Alooking-for-contributor).** And if you pick something up let us know on the issue. + +## Thread Safety + +*Concurrent Ruby makes one of the strongest thread safety guarantees of any Ruby concurrency +library, providing consistent behavior and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby).* + +Every abstraction in this library is thread safe. Specific thread safety guarantees are documented +with each abstraction. + +It is critical to remember, however, that Ruby is a language of mutable references. *No* +concurrency library for Ruby can ever prevent the user from making thread safety mistakes (such as +sharing a mutable object between threads and modifying it on both threads) or from creating +deadlocks through incorrect use of locks. All the library can do is provide safe abstractions which +encourage safe practices. Concurrent Ruby provides more safe concurrency abstractions than any +other Ruby library, many of which support the mantra of +["Do not communicate by sharing memory; instead, share memory by communicating"](https://blog.golang.org/share-memory-by-communicating). +Concurrent Ruby is also the only Ruby library which provides a full suite of thread safe and +immutable variable types and data structures. + +We've also initiated discussion to document [memory model](docs-source/synchronization.md) of Ruby which +would provide consistent behaviour and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby). + +## Features & Documentation + +**The primary site for documentation is the automatically generated +[API documentation](http://ruby-concurrency.github.io/concurrent-ruby/index.html) which is up to +date with latest release.** This readme matches the master so may contain new stuff not yet +released. + +We also have a [IRC (gitter)](https://gitter.im/ruby-concurrency/concurrent-ruby). + +### Versioning + +* `concurrent-ruby` uses [Semantic Versioning](http://semver.org/) +* `concurrent-ruby-ext` has always same version as `concurrent-ruby` +* `concurrent-ruby-edge` will always be 0.y.z therefore following + [point 4](http://semver.org/#spec-item-4) applies *"Major version zero + (0.y.z) is for initial development. Anything may change at any time. The + public API should not be considered stable."* However we additionally use + following rules: + * Minor version increment means incompatible changes were made + * Patch version increment means only compatible changes were made + + +#### General-purpose Concurrency Abstractions + +* [Async](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Async.html): + A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's + [gen_server](http://www.erlang.org/doc/man/gen_server.html). +* [ScheduledTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ScheduledTask.html): + Like a Future scheduled for a specific future time. +* [TimerTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TimerTask.html): + A Thread that periodically wakes up to perform work at regular intervals. +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and (partially) `TimerTask` into a single + framework. It extensively uses the new synchronization layer to make all the features + **non-blocking** and **lock-free**, with the exception of obviously blocking operations like + `#wait`, `#value`. It also offers better performance. + +#### Thread-safe Value Objects, Structures, and Collections + +Collection classes that were originally part of the (deprecated) `thread_safe` gem: + +* [Array](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Array.html) A thread-safe + subclass of Ruby's standard [Array](http://ruby-doc.org/core/Array.html). +* [Hash](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Hash.html) A thread-safe + subclass of Ruby's standard [Hash](http://ruby-doc.org/core/Hash.html). +* [Set](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Set.html) A thread-safe + subclass of Ruby's standard [Set](http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html). +* [Map](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Map.html) A hash-like object + that should have much better performance characteristics, especially under high concurrency, + than `Concurrent::Hash`. +* [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Tuple.html) A fixed size + array with volatile (synchronized, thread safe) getters/setters. + +Value objects inspired by other languages: + +* [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Maybe.html) A thread-safe, + immutable object representing an optional value, based on + [Haskell Data.Maybe](https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html). + +Structure classes derived from Ruby's [Struct](http://ruby-doc.org/core/Struct.html): + +* [ImmutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ImmutableStruct.html) + Immutable struct where values are set at construction and cannot be changed later. +* [MutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MutableStruct.html) + Synchronized, mutable struct where values can be safely changed at any time. +* [SettableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/SettableStruct.html) + Synchronized, write-once struct where values can be set at most once, either at construction + or any time thereafter. + +Thread-safe variables: + +* [Agent](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Agent.html): A way to + manage shared, mutable, *asynchronous*, independent state. Based on Clojure's + [Agent](http://clojure.org/agents). +* [Atom](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Atom.html): A way to manage + shared, mutable, *synchronous*, independent state. Based on Clojure's + [Atom](http://clojure.org/atoms). +* [AtomicBoolean](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicBoolean.html) + A boolean value that can be updated atomically. +* [AtomicFixnum](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicFixnum.html) + A numeric value that can be updated atomically. +* [AtomicReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicReference.html) + An object reference that may be updated atomically. +* [Exchanger](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Exchanger.html) + A synchronization point at which threads can pair and swap elements within pairs. Based on + Java's [Exchanger](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html). +* [MVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MVar.html) A synchronized + single element container. Based on Haskell's + [MVar](https://hackage.haskell.org/package/base-4.8.1.0/docs/Control-Concurrent-MVar.html) and + Scala's [MVar](http://docs.typelevel.org/api/scalaz/nightly/index.html#scalaz.concurrent.MVar$). +* [ThreadLocalVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ThreadLocalVar.html) + A variable where the value is different for each thread. +* [TVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TVar.html) A transactional + variable implementing software transactional memory (STM). Based on Clojure's + [Ref](http://clojure.org/refs). + +#### Java-inspired ThreadPools and Other Executors + +* See the [thread pool](http://ruby-concurrency.github.io/concurrent-ruby/master/file.thread_pools.html) + overview, which also contains a list of other Executors available. + +#### Thread Synchronization Classes and Algorithms + +* [CountDownLatch](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CountDownLatch.html) + A synchronization object that allows one thread to wait on multiple other threads. +* [CyclicBarrier](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CyclicBarrier.html) + A synchronization aid that allows a set of threads to all wait for each other to reach a common barrier point. +* [Event](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Event.html) Old school + kernel-style event. +* [ReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReadWriteLock.html) + A lock that supports multiple readers but only one writer. +* [ReentrantReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReentrantReadWriteLock.html) + A read/write lock with reentrant and upgrade features. +* [Semaphore](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Semaphore.html) + A counting-based locking mechanism that uses permits. +* [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicMarkableReference.html) + +#### Deprecated + +Deprecated features are still available and bugs are being fixed, but new features will not be added. + +* ~~[Future](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Future.html): + An asynchronous operation that produces a value.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + * ~~[.dataflow](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent.html#dataflow-class_method): + Built on Futures, Dataflow allows you to create a task that will be scheduled when all of + its data dependencies are available.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Promise](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promise.html): Similar + to Futures, with more features.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Delay](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Delay.html) Lazy evaluation + of a block yielding an immutable result. Based on Clojure's + [delay](https://clojuredocs.org/clojure.core/delay).~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[IVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/IVar.html) Similar to a + "future" but can be manually assigned once, after which it becomes immutable.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + +### Edge Features + +These are available in the `concurrent-ruby-edge` companion gem. + +These features are under active development and may change frequently. They are expected not to +keep backward compatibility (there may also lack tests and documentation). Semantic versions will +be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to +`concurrent-ruby` when final. + +* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Actor.html): Implements + the Actor Model, where concurrent actors exchange messages. + *Status: Partial documentation and tests; depends on new future/promise framework; stability is good.* +* [Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Channel.html): + Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). + Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional + inspiration from Clojure [core.async](https://clojure.github.io/core.async/). + *Status: Partial documentation and tests.* +* [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LazyRegister.html) +* [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Edge/LockFreeLinkedSet.html) + *Status: will be moved to core soon.* +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LockFreeStack.html) + *Status: missing documentation and tests.* +* [Promises::Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises/Channel.html) + A first in first out channel that accepts messages with push family of methods and returns + messages with pop family of methods. + Pop and push operations can be represented as futures, see `#pop_op` and `#push_op`. + The capacity of the channel can be limited to support back pressure, use capacity option in `#initialize`. + `#pop` method blocks ans `#pop_op` returns pending future if there is no message in the channel. + If the capacity is limited the `#push` method blocks and `#push_op` returns pending future. +* [Cancellation](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Cancellation.html) + The Cancellation abstraction provides cooperative cancellation. + + The standard methods `Thread#raise` of `Thread#kill` available in Ruby + are very dangerous (see linked the blog posts bellow). + Therefore concurrent-ruby provides an alternative. + + * + * + * + + It provides an object which represents a task which can be executed, + the task has to get the reference to the object and periodically cooperatively check that it is not cancelled. + Good practices to make tasks cancellable: + * check cancellation every cycle of a loop which does significant work, + * do all blocking actions in a loop with a timeout then on timeout check cancellation + and if ok block again with the timeout +* [Throttle](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Throttle.html) + A tool managing concurrency level of tasks. +* [ErlangActor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ErlangActor.html) + Actor implementation which precisely matches Erlang actor behaviour. + Requires at least Ruby 2.1 otherwise it's not loaded. +* [WrappingExecutor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/WrappingExecutor.html) + A delegating executor which modifies each task before the task is given to + the target executor it delegates to. + +## Supported Ruby versions + +* MRI 2.0 and above +* JRuby 9000 +* TruffleRuby are supported. +* Any Ruby interpreter that is compliant with Ruby 2.0 or newer. + +Actually we still support mri 1.9.3 and jruby 1.7.27 but we are looking at ways how to drop the support. +Java 8 is preferred for JRuby but every Java version on which JRuby 9000 runs is supported. + +The legacy support for Rubinius is kept but it is no longer maintained, if you would like to help +please respond to [#739](https://github.com/ruby-concurrency/concurrent-ruby/issues/739). + +## Usage + +Everything within this gem can be loaded simply by requiring it: + +```ruby +require 'concurrent' +``` + +*Requiring only specific abstractions from Concurrent Ruby is not yet supported.* + +To use the tools in the Edge gem it must be required separately: + +```ruby +require 'concurrent-edge' +``` + +If the library does not behave as expected, `Concurrent.use_stdlib_logger(Logger::DEBUG)` could +help to reveal the problem. + +## Installation + +```shell +gem install concurrent-ruby +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby', require: 'concurrent' +``` + +and run `bundle install` from your shell. + +### Edge Gem Installation + +The Edge gem must be installed separately from the core gem: + +```shell +gem install concurrent-ruby-edge +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-edge', require: 'concurrent-edge' +``` + +and run `bundle install` from your shell. + + +### C Extensions for MRI + +Potential performance improvements may be achieved under MRI by installing optional C extensions. +To minimise installation errors the C extensions are available in the `concurrent-ruby-ext` +extension gem. `concurrent-ruby` and `concurrent-ruby-ext` are always released together with same +version. Simply install the extension gem too: + +```ruby +gem install concurrent-ruby-ext +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-ext' +``` + +and run `bundle install` from your shell. + +In code it is only necessary to + +```ruby +require 'concurrent' +``` + +The `concurrent-ruby` gem will automatically detect the presence of the `concurrent-ruby-ext` gem +and load the appropriate C extensions. + +#### Note For gem developers + +No gems should depend on `concurrent-ruby-ext`. Doing so will force C extensions on your users. The +best practice is to depend on `concurrent-ruby` and let users to decide if they want C extensions. + +## Maintainers + +* [Petr Chalupa](https://github.com/pitr-ch) (lead maintainer, point-of-contact) +* [Jerry D'Antonio](https://github.com/jdantonio) (creator) +* [Chris Seaton](https://github.com/chrisseaton) + +### Special Thanks to + +* [Brian Durand](https://github.com/bdurand) for the `ref` gem +* [Charles Oliver Nutter](https://github.com/headius) for the `atomic` and `thread_safe` gems +* [thedarkone](https://github.com/thedarkone) for the `thread_safe` gem + +and to the past maintainers + +* [Michele Della Torre](https://github.com/mighe) +* [Paweł Obrok](https://github.com/obrok) +* [Lucas Allan](https://github.com/lucasallan) + +and to [Ruby Association](https://www.ruby.or.jp/en/) for sponsoring a project +["Enhancing Ruby’s concurrency tooling"](https://www.ruby.or.jp/en/news/20181106) in 2018. + +## License and Copyright + +*Concurrent Ruby* is free software released under the +[MIT License](http://www.opensource.org/licenses/MIT). + +The *Concurrent Ruby* [logo](https://raw.githubusercontent.com/ruby-concurrency/concurrent-ruby/master/docs-source/logo/concurrent-ruby-logo-300x300.png) was +designed by [David Jones](https://twitter.com/zombyboy). It is Copyright © 2014 +[Jerry D'Antonio](https://twitter.com/jerrydantonio). All Rights Reserved. diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Rakefile b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Rakefile new file mode 100644 index 0000000..2ce9ab6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/Rakefile @@ -0,0 +1,332 @@ +require_relative 'lib/concurrent-ruby/concurrent/version' +require_relative 'lib/concurrent-ruby-edge/concurrent/edge/version' +require_relative 'lib/concurrent-ruby/concurrent/utility/engine' + +if Concurrent.ruby_version :<, 2, 0, 0 + # @!visibility private + module Kernel + def __dir__ + File.dirname __FILE__ + end + end +end + +core_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby.gemspec') +ext_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-ext.gemspec') +edge_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-edge.gemspec') + +require 'rake/javaextensiontask' + +ENV['JRUBY_HOME'] = ENV['CONCURRENT_JRUBY_HOME'] if ENV['CONCURRENT_JRUBY_HOME'] && !Concurrent.on_jruby? + +Rake::JavaExtensionTask.new('concurrent_ruby', core_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' +end + +unless Concurrent.on_jruby? + require 'rake/extensiontask' + + Rake::ExtensionTask.new('concurrent_ruby_ext', ext_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby-ext' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' + ext.source_pattern = '*.{c,h}' + + ext.cross_compile = true + ext.cross_platform = ['x86-mingw32', 'x64-mingw32'] + end +end + +require 'rake_compiler_dock' +namespace :repackage do + desc '* with Windows fat distributions' + task :all do + Dir.chdir(__dir__) do + # store gems in vendor cache for docker + sh 'bundle package' + + # build only the jar file not the whole gem for java platform, the jar is part the concurrent-ruby-x.y.z.gem + Rake::Task['lib/concurrent-ruby/concurrent/concurrent_ruby.jar'].invoke + + # build all gem files + RakeCompilerDock.sh 'bundle install --local && bundle exec rake cross native package --trace' + end + end +end + +require 'rubygems' +require 'rubygems/package_task' + +Gem::PackageTask.new(core_gemspec) {} if core_gemspec +Gem::PackageTask.new(ext_gemspec) {} if ext_gemspec && !Concurrent.on_jruby? +Gem::PackageTask.new(edge_gemspec) {} if edge_gemspec + +CLEAN.include('lib/concurrent-ruby/concurrent/2.*', 'lib/concurrent-ruby/concurrent/*.jar') + +begin + require 'rspec' + require 'rspec/core/rake_task' + + RSpec::Core::RakeTask.new(:spec) + + namespace :spec do + desc '* Configured for ci' + RSpec::Core::RakeTask.new(:ci) do |t| + options = %w[ --color + --backtrace + --order defined + --format documentation + --tag ~notravis ] + t.rspec_opts = [*options].join(' ') + end + + desc '* test packaged and installed gems instead of local files' + task :installed do + Dir.chdir(__dir__) do + sh "gem install pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem install pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if Concurrent.on_cruby? + sh "gem install pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" + ENV['NO_PATH'] = 'true' + sh 'bundle update' + sh 'bundle exec rake spec:ci' + end + end + end + + desc 'executed in CI' + task :ci => [:compile, 'spec:ci'] + + task :default => [:clobber, :compile, :spec] +rescue LoadError => e + puts 'RSpec is not installed, skipping test task definitions: ' + e.message +end + +current_yard_version_name = Concurrent::VERSION + +begin + require 'yard' + require 'md_ruby_eval' + require_relative 'support/yard_full_types' + + common_yard_options = ['--no-yardopts', + '--no-document', + '--no-private', + '--embed-mixins', + '--markup', 'markdown', + '--title', 'Concurrent Ruby', + '--template', 'default', + '--template-path', 'yard-template', + '--default-return', 'undocumented'] + + desc 'Generate YARD Documentation (signpost, master)' + task :yard => ['yard:signpost', 'yard:master'] + + namespace :yard do + + desc '* eval markdown files' + task :eval_md do + Dir.chdir File.join(__dir__, 'docs-source') do + sh 'bundle exec md-ruby-eval --auto' + end + end + + task :update_readme do + Dir.chdir __dir__ do + content = File.read(File.join('README.md')). + gsub(/\[([\w ]+)\]\(http:\/\/ruby-concurrency\.github\.io\/concurrent-ruby\/master\/.*\)/) do |_| + case $1 + when 'LockFreeLinkedSet' + "{Concurrent::Edge::#{$1} #{$1}}" + when '.dataflow' + '{Concurrent.dataflow Concurrent.dataflow}' + when 'thread pool' + '{file:thread_pools.md thread pool}' + else + "{Concurrent::#{$1} #{$1}}" + end + end + FileUtils.mkpath 'tmp' + File.write 'tmp/README.md', content + end + end + + define_yard_task = -> name do + output_dir = "docs/#{name}" + + removal_name = "remove.#{name}" + task removal_name do + Dir.chdir __dir__ do + FileUtils.rm_rf output_dir + end + end + + desc "* of #{name} into subdir #{name}" + YARD::Rake::YardocTask.new(name) do |yard| + yard.options.push( + '--output-dir', output_dir, + '--main', 'tmp/README.md', + *common_yard_options) + yard.files = ['./lib/concurrent-ruby/**/*.rb', + './lib/concurrent-ruby-edge/**/*.rb', + './ext/concurrent_ruby_ext/**/*.c', + '-', + 'docs-source/thread_pools.md', + 'docs-source/promises.out.md', + 'docs-source/medium-example.out.rb', + 'LICENSE.txt', + 'CHANGELOG.md'] + end + Rake::Task[name].prerequisites.push removal_name, + # 'yard:eval_md', + 'yard:update_readme' + end + + define_yard_task.call current_yard_version_name + define_yard_task.call 'master' + + desc "* signpost for versions" + YARD::Rake::YardocTask.new(:signpost) do |yard| + yard.options.push( + '--output-dir', 'docs', + '--main', 'docs-source/signpost.md', + *common_yard_options) + yard.files = ['no-lib'] + end + + define_uptodate_task = -> name do + namespace name do + desc "** ensure that #{name} generated documentation is matching the source code" + task :uptodate do + Dir.chdir(__dir__) do + begin + FileUtils.cp_r 'docs', 'docs-copy', verbose: true + Rake::Task["yard:#{name}"].invoke + sh 'diff -r docs/ docs-copy/' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + ensure + FileUtils.rm_rf 'docs-copy', verbose: true + end + end + end + end + end + + define_uptodate_task.call current_yard_version_name + define_uptodate_task.call 'master' + end + +rescue LoadError => e + puts 'YARD is not installed, skipping documentation task definitions: ' + e.message +end + +desc 'build, test, and publish the gem' +task :release => ['release:checks', 'release:build', 'release:test', 'release:publish'] + +namespace :release do + # Depends on environment of @pitr-ch + + mri_version = '2.6.5' + jruby_version = 'jruby-9.2.9.0' + + task :checks => "yard:#{current_yard_version_name}:uptodate" do + Dir.chdir(__dir__) do + sh 'test -z "$(git status --porcelain)"' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + sh 'git fetch' + sh 'test $(git show-ref --verify --hash refs/heads/master) = ' + + '$(git show-ref --verify --hash refs/remotes/origin/master)' do |ok, res| + unless ok + begin + STDOUT.puts 'Command failed. Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + end + end + + desc '* build all *.gem files necessary for release' + task :build => [:clobber, 'repackage:all'] + + desc '* test actual installed gems instead of cloned repository on MRI and JRuby' + task :test do + Dir.chdir(__dir__) do + old = ENV['RBENV_VERSION'] + + ENV['RBENV_VERSION'] = mri_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + ENV['RBENV_VERSION'] = jruby_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + puts 'Windows build is untested' + + ENV['RBENV_VERSION'] = old + end + end + + desc '* do all nested steps' + task :publish => ['publish:ask', 'publish:tag', 'publish:rubygems', 'publish:post_steps'] + + namespace :publish do + publish_edge = false + + task :ask do + begin + STDOUT.puts 'Do you want to publish anything? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + begin + STDOUT.puts 'Do you want to publish edge? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + publish_edge = input == 'y' + end + + desc '** tag HEAD with current version and push to github' + task :tag do + Dir.chdir(__dir__) do + sh "git tag v#{Concurrent::VERSION}" + sh "git push origin v#{Concurrent::VERSION}" + sh "git tag edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + sh "git push origin edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + end + end + + desc '** push all *.gem files to rubygems' + task :rubygems do + Dir.chdir(__dir__) do + sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" if publish_edge + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem" + end + end + + desc '** print post release steps' + task :post_steps do + puts 'Manually: create a release on GitHub with relevant changelog part' + puts 'Manually: send email same as release with relevant changelog part' + puts 'Manually: tweet' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java new file mode 100644 index 0000000..fb6be96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/ConcurrentRubyService.java @@ -0,0 +1,17 @@ +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import java.io.IOException; + +public class ConcurrentRubyService implements BasicLibraryService { + + public boolean basicLoad(final Ruby runtime) throws IOException { + new com.concurrent_ruby.ext.AtomicReferenceLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicBooleanLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicFixnumLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaSemaphoreLibrary().load(runtime, false); + new com.concurrent_ruby.ext.SynchronizationLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JRubyMapBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java new file mode 100644 index 0000000..dfa9e77 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java @@ -0,0 +1,175 @@ +package com.concurrent_ruby.ext; + +import java.lang.reflect.Field; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +/** + * This library adds an atomic reference type to JRuby for use in the atomic + * library. We do a native version to avoid the implicit value coercion that + * normally happens through JI. + * + * @author headius + */ +public class AtomicReferenceLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicReference", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + try { + sun.misc.Unsafe.class.getMethod("getAndSetObject", Object.class); + atomicCls.setAllocator(JRUBYREFERENCE8_ALLOCATOR); + } catch (Exception e) { + // leave it as Java 6/7 version + } + atomicCls.defineAnnotatedMethods(JRubyReference.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBYREFERENCE8_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference8(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyReference", parent="Object") + public static class JRubyReference extends RubyObject { + volatile IRubyObject reference; + + static final sun.misc.Unsafe UNSAFE; + static final long referenceOffset; + + static { + try { + UNSAFE = UnsafeHolder.U; + Class k = JRubyReference.class; + referenceOffset = UNSAFE.objectFieldOffset(k.getDeclaredField("reference")); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public JRubyReference(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + UNSAFE.putObject(this, referenceOffset, context.nil); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + UNSAFE.putObject(this, referenceOffset, value); + return context.nil; + } + + @JRubyMethod(name = {"get", "value"}) + public IRubyObject get() { + return reference; + } + + @JRubyMethod(name = {"set", "value="}) + public IRubyObject set(IRubyObject newValue) { + UNSAFE.putObjectVolatile(this, referenceOffset, newValue); + return newValue; + } + + @JRubyMethod(name = {"compare_and_set", "compare_and_swap"}) + public IRubyObject compare_and_set(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + if (expectedValue instanceof RubyNumeric) { + // numerics are not always idempotent in Ruby, so we need to do slower logic + return compareAndSetNumeric(context, expectedValue, newValue); + } + + return runtime.newBoolean(UNSAFE.compareAndSwapObject(this, referenceOffset, expectedValue, newValue)); + } + + @JRubyMethod(name = {"get_and_set", "swap"}) + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // less-efficient version for Java 6 and 7 + while (true) { + IRubyObject oldValue = get(); + if (UNSAFE.compareAndSwapObject(this, referenceOffset, oldValue, newValue)) { + return oldValue; + } + } + } + + private IRubyObject compareAndSetNumeric(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + // loop until: + // * reference CAS would succeed for same-valued objects + // * current and expected have different values as determined by #equals + while (true) { + IRubyObject current = reference; + + if (!(current instanceof RubyNumeric)) { + // old value is not numeric, CAS fails + return runtime.getFalse(); + } + + RubyNumeric currentNumber = (RubyNumeric)current; + if (!currentNumber.equals(expectedValue)) { + // current number does not equal expected, fail CAS + return runtime.getFalse(); + } + + // check that current has not changed, or else allow loop to repeat + boolean success = UNSAFE.compareAndSwapObject(this, referenceOffset, current, newValue); + if (success) { + // value is same and did not change in interim...success + return runtime.getTrue(); + } + } + } + } + + private static final class UnsafeHolder { + private UnsafeHolder(){} + + public static final sun.misc.Unsafe U = loadUnsafe(); + + private static sun.misc.Unsafe loadUnsafe() { + try { + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + Field f = unsafeClass.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + } catch (Exception e) { + return null; + } + } + } + + public static class JRubyReference8 extends JRubyReference { + public JRubyReference8(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @Override + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // efficient version for Java 8 + return (IRubyObject)UNSAFE.getAndSetObject(this, referenceOffset, newValue); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java new file mode 100644 index 0000000..a09f916 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java @@ -0,0 +1,248 @@ +package com.concurrent_ruby.ext; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8; +import com.concurrent_ruby.ext.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyMapBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyModule thread_safeMod = concurrentMod.defineModuleUnder("Collection"); + RubyClass jrubyRefClass = thread_safeMod.defineClassUnder("JRubyMapBackend", runtime.getObject(), BACKEND_ALLOCATOR); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyMapBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyMapBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyMapBackend", parent="Object") + public static class JRubyMapBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap map; + + private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8(initialCapacity, loadFactor); + } else { + return new com.concurrent_ruby.ext.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyMapBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyMapBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java new file mode 100644 index 0000000..b566076 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java @@ -0,0 +1,93 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNil; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class JavaAtomicBooleanLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicBoolean", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + atomicCls.defineAnnotatedMethods(JavaAtomicBoolean.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicBoolean(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicBoolean", parent = "Object") + public static class JavaAtomicBoolean extends RubyObject { + + private AtomicBoolean atomicBoolean; + + public JavaAtomicBoolean(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + atomicBoolean = new AtomicBoolean(convertRubyBooleanToJavaBoolean(value)); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + atomicBoolean = new AtomicBoolean(); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject value() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "true?") + public IRubyObject isAtomicTrue() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "false?") + public IRubyObject isAtomicFalse() { + return getRuntime().newBoolean((atomicBoolean.get() == false)); + } + + @JRubyMethod(name = "value=") + public IRubyObject setAtomic(ThreadContext context, IRubyObject newValue) { + atomicBoolean.set(convertRubyBooleanToJavaBoolean(newValue)); + return context.nil; + } + + @JRubyMethod(name = "make_true") + public IRubyObject makeTrue() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(false, true)); + } + + @JRubyMethod(name = "make_false") + public IRubyObject makeFalse() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(true, false)); + } + + private boolean convertRubyBooleanToJavaBoolean(IRubyObject newValue) { + if (newValue instanceof RubyBoolean.False || newValue instanceof RubyNil) { + return false; + } else { + return true; + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java new file mode 100755 index 0000000..672bfc0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java @@ -0,0 +1,113 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import org.jruby.runtime.Block; + +public class JavaAtomicFixnumLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicFixnum", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaAtomicFixnum.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicFixnum(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicFixnum", parent = "Object") + public static class JavaAtomicFixnum extends RubyObject { + + private AtomicLong atomicLong; + + public JavaAtomicFixnum(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + this.atomicLong = new AtomicLong(0); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.atomicLong = new AtomicLong(rubyFixnumToLong(value)); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject getValue() { + return getRuntime().newFixnum(atomicLong.get()); + } + + @JRubyMethod(name = "value=") + public IRubyObject setValue(ThreadContext context, IRubyObject newValue) { + atomicLong.set(rubyFixnumToLong(newValue)); + return context.nil; + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment() { + return getRuntime().newFixnum(atomicLong.incrementAndGet()); + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(delta)); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement() { + return getRuntime().newFixnum(atomicLong.decrementAndGet()); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(-delta)); + } + + @JRubyMethod(name = "compare_and_set") + public IRubyObject compareAndSet(ThreadContext context, IRubyObject expect, IRubyObject update) { + return getRuntime().newBoolean(atomicLong.compareAndSet(rubyFixnumToLong(expect), rubyFixnumToLong(update))); + } + + @JRubyMethod + public IRubyObject update(ThreadContext context, Block block) { + for (;;) { + long _oldValue = atomicLong.get(); + IRubyObject oldValue = getRuntime().newFixnum(_oldValue); + IRubyObject newValue = block.yield(context, oldValue); + if (atomicLong.compareAndSet(_oldValue, rubyFixnumToLong(newValue))) { + return newValue; + } + } + } + + private long rubyFixnumToLong(IRubyObject value) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError("value must be a Fixnum"); + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java new file mode 100755 index 0000000..a3e847d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java @@ -0,0 +1,159 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.Semaphore; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +public class JavaSemaphoreLibrary { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaSemaphore", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaSemaphore.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaSemaphore(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaSemaphore", parent = "Object") + public static class JavaSemaphore extends RubyObject { + + private JRubySemaphore semaphore; + + public JavaSemaphore(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.semaphore = new JRubySemaphore(rubyFixnumInt(value, "count")); + return context.nil; + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context, IRubyObject value) throws InterruptedException { + this.semaphore.acquire(rubyFixnumToPositiveInt(value, "permits")); + return context.nil; + } + + @JRubyMethod(name = "available_permits") + public IRubyObject availablePermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.availablePermits()); + } + + @JRubyMethod(name = "drain_permits") + public IRubyObject drainPermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.drainPermits()); + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context) throws InterruptedException { + this.semaphore.acquire(1); + return context.nil; + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(1)); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(rubyFixnumToPositiveInt(permits, "permits"))); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits, IRubyObject timeout) throws InterruptedException { + return getRuntime().newBoolean( + semaphore.tryAcquire( + rubyFixnumToPositiveInt(permits, "permits"), + rubyNumericToLong(timeout, "timeout"), + java.util.concurrent.TimeUnit.SECONDS) + ); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context) { + this.semaphore.release(1); + return getRuntime().newBoolean(true); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context, IRubyObject value) { + this.semaphore.release(rubyFixnumToPositiveInt(value, "permits")); + return getRuntime().newBoolean(true); + } + + @JRubyMethod(name = "reduce_permits") + public IRubyObject reducePermits(ThreadContext context, IRubyObject reduction) throws InterruptedException { + this.semaphore.publicReducePermits(rubyFixnumToNonNegativeInt(reduction, "reduction")); + return context.nil; + } + + private int rubyFixnumInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be integer"); + } + } + + private int rubyFixnumToNonNegativeInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() >= 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a non-negative integer"); + } + } + + private int rubyFixnumToPositiveInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() > 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be an integer greater than zero"); + } + } + + private long rubyNumericToLong(IRubyObject value, String paramName) { + if (value instanceof RubyNumeric && ((RubyNumeric) value).getDoubleValue() > 0) { + RubyNumeric fixNum = (RubyNumeric) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a float greater than zero"); + } + } + + class JRubySemaphore extends Semaphore { + + public JRubySemaphore(int permits) { + super(permits); + } + + public JRubySemaphore(int permits, boolean value) { + super(permits, value); + } + + public void publicReducePermits(int i) { + reducePermits(i); + } + + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java new file mode 100644 index 0000000..bfcc0d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java @@ -0,0 +1,307 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBasicObject; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.RubyThread; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import sun.misc.Unsafe; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +public class SynchronizationLibrary implements Library { + + private static final Unsafe UNSAFE = loadUnsafe(); + private static final boolean FULL_FENCE = supportsFences(); + + private static Unsafe loadUnsafe() { + try { + Class ncdfe = Class.forName("sun.misc.Unsafe"); + Field f = ncdfe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get((java.lang.Object) null); + } catch (Exception var2) { + return null; + } catch (NoClassDefFoundError var3) { + return null; + } + } + + private static boolean supportsFences() { + if (UNSAFE == null) { + return false; + } else { + try { + Method m = UNSAFE.getClass().getDeclaredMethod("fullFence", new Class[0]); + if (m != null) { + return true; + } + } catch (Exception var1) { + // nothing + } + + return false; + } + } + + private static final ObjectAllocator JRUBY_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyObject(runtime, klazz); + } + }; + + private static final ObjectAllocator OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Object(runtime, klazz); + } + }; + + private static final ObjectAllocator ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new AbstractLockableObject(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBY_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyLockableObject(runtime, klazz); + } + }; + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule synchronizationModule = runtime. + defineModule("Concurrent"). + defineModuleUnder("Synchronization"); + + RubyModule jrubyAttrVolatileModule = synchronizationModule.defineModuleUnder("JRubyAttrVolatile"); + jrubyAttrVolatileModule.defineAnnotatedMethods(JRubyAttrVolatile.class); + + defineClass(runtime, synchronizationModule, "AbstractObject", "JRubyObject", + JRubyObject.class, JRUBY_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "JRubyObject", "Object", + Object.class, OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "AbstractLockableObject", + AbstractLockableObject.class, ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "AbstractLockableObject", "JRubyLockableObject", + JRubyLockableObject.class, JRUBY_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "JRuby", + JRuby.class, new ObjectAllocator() { + @Override + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRuby(runtime, klazz); + } + }); + } + + private RubyClass defineClass( + Ruby runtime, + RubyModule namespace, + String parentName, + String name, + Class javaImplementation, + ObjectAllocator allocator) { + final RubyClass parentClass = namespace.getClass(parentName); + + if (parentClass == null) { + System.out.println("not found " + parentName); + throw runtime.newRuntimeError(namespace.toString() + "::" + parentName + " is missing"); + } + + final RubyClass newClass = namespace.defineClassUnder(name, parentClass, allocator); + newClass.defineAnnotatedMethods(javaImplementation); + return newClass; + } + + // Facts: + // - all ivar reads are without any synchronisation of fences see + // https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/VariableAccessor.java#L110-110 + // - writes depend on UnsafeHolder.U, null -> SynchronizedVariableAccessor, !null -> StampedVariableAccessor + // SynchronizedVariableAccessor wraps with synchronized block, StampedVariableAccessor uses fullFence or + // volatilePut + // TODO (pitr 16-Sep-2015): what do we do in Java 9 ? + + // module JRubyAttrVolatile + public static class JRubyAttrVolatile { + + // volatile threadContext is used as a memory barrier per the JVM memory model happens-before semantic + // on volatile fields. any volatile field could have been used but using the thread context is an + // attempt to avoid code elimination. + private static volatile int volatileField; + + @JRubyMethod(name = "full_memory_barrier", visibility = Visibility.PUBLIC) + public static IRubyObject fullMemoryBarrier(ThreadContext context, IRubyObject self) { + // Prevent reordering of ivar writes with publication of this instance + if (!FULL_FENCE) { + // Assuming that following volatile read and write is not eliminated it simulates fullFence. + // If it's eliminated it'll cause problems only on non-x86 platforms. + // http://shipilev.net/blog/2014/jmm-pragmatics/#_happens_before_test_your_understanding + final int volatileRead = volatileField; + volatileField = context.getLine(); + } else { + UNSAFE.fullFence(); + } + return context.nil; + } + + @JRubyMethod(name = "instance_variable_get_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject instanceVariableGetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name) { + // Ensure we ses latest value with loadFence + if (!FULL_FENCE) { + // piggybacking on volatile read, simulating loadFence + final int volatileRead = volatileField; + return ((RubyBasicObject) self).instance_variable_get(context, name); + } else { + UNSAFE.loadFence(); + return ((RubyBasicObject) self).instance_variable_get(context, name); + } + } + + @JRubyMethod(name = "instance_variable_set_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject InstanceVariableSetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name, + IRubyObject value) { + // Ensure we make last update visible + if (!FULL_FENCE) { + // piggybacking on volatile write, simulating storeFence + final IRubyObject result = ((RubyBasicObject) self).instance_variable_set(name, value); + volatileField = context.getLine(); + return result; + } else { + // JRuby uses StampedVariableAccessor which calls fullFence + // so no additional steps needed. + // See https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/StampedVariableAccessor.java#L151-L159 + return ((RubyBasicObject) self).instance_variable_set(name, value); + } + } + } + + @JRubyClass(name = "JRubyObject", parent = "AbstractObject") + public static class JRubyObject extends RubyObject { + + public JRubyObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "Object", parent = "JRubyObject") + public static class Object extends JRubyObject { + + public Object(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "AbstractLockableObject", parent = "Object") + public static class AbstractLockableObject extends Object { + + public AbstractLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "JRubyLockableObject", parent = "AbstractLockableObject") + public static class JRubyLockableObject extends JRubyObject { + + public JRubyLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "synchronize", visibility = Visibility.PROTECTED) + public IRubyObject rubySynchronize(ThreadContext context, Block block) { + synchronized (this) { + return block.yield(context, null); + } + } + + @JRubyMethod(name = "ns_wait", optional = 1, visibility = Visibility.PROTECTED) + public IRubyObject nsWait(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.runtime; + if (args.length > 1) { + throw runtime.newArgumentError(args.length, 1); + } + Double timeout = null; + if (args.length > 0 && !args[0].isNil()) { + timeout = args[0].convertToFloat().getDoubleValue(); + if (timeout < 0) { + throw runtime.newArgumentError("time interval must be positive"); + } + } + if (Thread.interrupted()) { + throw runtime.newConcurrencyError("thread interrupted"); + } + boolean success = false; + try { + success = context.getThread().wait_timeout(this, timeout); + } catch (InterruptedException ie) { + throw runtime.newConcurrencyError(ie.getLocalizedMessage()); + } finally { + // An interrupt or timeout may have caused us to miss + // a notify that we consumed, so do another notify in + // case someone else is available to pick it up. + if (!success) { + this.notify(); + } + } + return this; + } + + @JRubyMethod(name = "ns_signal", visibility = Visibility.PROTECTED) + public IRubyObject nsSignal(ThreadContext context) { + notify(); + return this; + } + + @JRubyMethod(name = "ns_broadcast", visibility = Visibility.PROTECTED) + public IRubyObject nsBroadcast(ThreadContext context) { + notifyAll(); + return this; + } + } + + @JRubyClass(name = "JRuby") + public static class JRuby extends RubyObject { + public JRuby(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "sleep_interruptibly", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject sleepInterruptibly(final ThreadContext context, IRubyObject receiver, final Block block) { + try { + context.getThread().executeBlockingTask(new RubyThread.BlockingTask() { + @Override + public void run() throws InterruptedException { + block.call(context); + } + + @Override + public void wakeup() { + context.getThread().getNativeThread().interrupt(); + } + }); + } catch (InterruptedException e) { + throw context.runtime.newThreadError("interrupted in Concurrent::Synchronization::JRuby.sleep_interruptibly"); + } + return context.nil; + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..e11e15a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package com.concurrent_ruby.ext.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap { + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun mf); + public V computeIfPresent(K key, BiFun mf); + public V compute(K key, BiFun mf); + public V merge(K key, V value, BiFun mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..86aa4eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + * + *
    + *
+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java new file mode 100644 index 0000000..47a923c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java new file mode 100644 index 0000000..93a277f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..b7fc5a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray initTable() { + AtomicReferenceArray tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { + int n = tab.length(); + AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; AtomicReferenceArray t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray tab = new AtomicReferenceArray(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..ecf552a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..f521642 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..3ea409f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package com.concurrent_ruby.ext.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + *

Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb new file mode 100644 index 0000000..08917e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent-ruby.rb @@ -0,0 +1 @@ +require_relative "./concurrent" diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb new file mode 100644 index 0000000..87de46f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent.rb @@ -0,0 +1,134 @@ +require 'concurrent/version' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' + +require 'concurrent/atomics' +require 'concurrent/executors' +require 'concurrent/synchronization' + +require 'concurrent/atomic/atomic_markable_reference' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/agent' +require 'concurrent/atom' +require 'concurrent/array' +require 'concurrent/hash' +require 'concurrent/set' +require 'concurrent/map' +require 'concurrent/tuple' +require 'concurrent/async' +require 'concurrent/dataflow' +require 'concurrent/delay' +require 'concurrent/exchanger' +require 'concurrent/future' +require 'concurrent/immutable_struct' +require 'concurrent/ivar' +require 'concurrent/maybe' +require 'concurrent/mutable_struct' +require 'concurrent/mvar' +require 'concurrent/promise' +require 'concurrent/scheduled_task' +require 'concurrent/settable_struct' +require 'concurrent/timer_task' +require 'concurrent/tvar' +require 'concurrent/promises' + +require 'concurrent/thread_safe/synchronized_delegator' +require 'concurrent/thread_safe/util' + +require 'concurrent/options' + +# @!macro internal_implementation_note +# +# @note **Private Implementation:** This abstraction is a private, internal +# implementation detail. It should never be used directly. + +# @!macro monotonic_clock_warning +# +# @note Time calculations on all platforms and languages are sensitive to +# changes to the system clock. To alleviate the potential problems +# associated with changing the system clock while an application is running, +# most modern operating systems provide a monotonic clock that operates +# independently of the system clock. A monotonic clock cannot be used to +# determine human-friendly clock times. A monotonic clock is used exclusively +# for calculating time intervals. Not all Ruby platforms provide access to an +# operating system monotonic clock. On these platforms a pure-Ruby monotonic +# clock will be used as a fallback. An operating system monotonic clock is both +# faster and more reliable than the pure-Ruby implementation. The pure-Ruby +# implementation should be fast and reliable enough for most non-realtime +# operations. At this time the common Ruby platforms that provide access to an +# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). +# +# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) + +# @!macro copy_options +# +# ## Copy Options +# +# Object references in Ruby are mutable. This can lead to serious +# problems when the {#value} of an object is a mutable reference. Which +# is always the case unless the value is a `Fixnum`, `Symbol`, or similar +# "primitive" data type. Each instance can be configured with a few +# options that can help protect the program from potentially dangerous +# operations. Each of these options can be optionally set when the object +# instance is created: +# +# * `:dup_on_deref` When true the object will call the `#dup` method on +# the `value` object every time the `#value` method is called +# (default: false) +# * `:freeze_on_deref` When true the object will call the `#freeze` +# method on the `value` object every time the `#value` method is called +# (default: false) +# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run +# every time the `#value` method is called. The `Proc` will be given +# the current `value` as its only argument and the result returned by +# the block will be the return value of the `#value` call. When `nil` +# this option will be ignored (default: nil) +# +# When multiple deref options are set the order of operations is strictly defined. +# The order of deref operations is: +# * `:copy_on_deref` +# * `:dup_on_deref` +# * `:freeze_on_deref` +# +# Because of this ordering there is no need to `#freeze` an object created by a +# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. +# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is +# as close to the behavior of a "pure" functional language (like Erlang, Clojure, +# or Haskell) as we are likely to get in Ruby. + +# @!macro deref_options +# +# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before +# returning the data from {#value} +# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before +# returning the data from {#value} +# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} +# method, call the given proc passing the internal value as the sole +# argument then return the new value returned from the proc. + +# @!macro executor_and_deref_options +# +# @param [Hash] opts the options used to define the behavior at update and deref +# and to specify the executor on which to perform actions +# @option opts [Executor] :executor when set use the given `Executor` instance. +# Three special values are also supported: `:io` returns the global pool for +# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast +# operations, and `:immediate` returns the global `ImmediateExecutor` object. +# @!macro deref_options + +# @!macro warn.edge +# @api Edge +# @note **Edge Features** are under active development and may change frequently. +# +# - Deprecations are not added before incompatible changes. +# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, +# _patch_ bump means compatible change. +# - Edge features may also lack tests and documentation. +# - Features developed in `concurrent-ruby-edge` are expected to move +# to `concurrent-ruby` when finalised. + + +# {include:file:README.md} +module Concurrent +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb new file mode 100644 index 0000000..815dca0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/agent.rb @@ -0,0 +1,587 @@ +require 'concurrent/configuration' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/thread_local_var' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) + # function. An agent is a shared, mutable variable providing independent, + # uncoordinated, *asynchronous* change of individual values. Best used when + # the value will undergo frequent, complex updates. Suitable when the result + # of an update does not need to be known immediately. `Agent` is (mostly) + # functionally equivalent to Clojure's agent, except where the runtime + # prevents parity. + # + # Agents are reactive, not autonomous - there is no imperative message loop + # and no blocking receive. The state of an Agent should be itself immutable + # and the `#value` of an Agent is always immediately available for reading by + # any thread without any messages, i.e. observation does not require + # cooperation or coordination. + # + # Agent action dispatches are made using the various `#send` methods. These + # methods always return immediately. At some point later, in another thread, + # the following will happen: + # + # 1. The given `action` will be applied to the state of the Agent and the + # `args`, if any were supplied. + # 2. The return value of `action` will be passed to the validator lambda, + # if one has been set on the Agent. + # 3. If the validator succeeds or if no validator was given, the return value + # of the given `action` will become the new `#value` of the Agent. See + # `#initialize` for details. + # 4. If any observers were added to the Agent, they will be notified. See + # `#add_observer` for details. + # 5. If during the `action` execution any other dispatches are made (directly + # or indirectly), they will be held until after the `#value` of the Agent + # has been changed. + # + # If any exceptions are thrown by an action function, no nested dispatches + # will occur, and the exception will be cached in the Agent itself. When an + # Agent has errors cached, any subsequent interactions will immediately throw + # an exception, until the agent's errors are cleared. Agent errors can be + # examined with `#error` and the agent restarted with `#restart`. + # + # The actions of all Agents get interleaved amongst threads in a thread pool. + # At any point in time, at most one action for each Agent is being executed. + # Actions dispatched to an agent from another single agent or thread will + # occur in the order they were sent, potentially interleaved with actions + # dispatched to the same agent from other sources. The `#send` method should + # be used for actions that are CPU limited, while the `#send_off` method is + # appropriate for actions that may block on IO. + # + # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an agent with an initial value + # agent = Concurrent::Agent.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # agent.send{|set| next_fibonacci(set) } + # end + # + # # wait for them to complete + # agent.await + # + # # get the current value + # agent.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Agents support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time an action dispatch returns and + # the new value is successfully validated. Observation will *not* occur if the + # action raises an exception, if validation fails, or when a {#restart} occurs. + # + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Agent when the action began + # processing. The `new_value` is the value to which the Agent was set when the + # action completed. Note that `old_value` and `new_value` may be the same. + # This is not an error. It simply means that the action returned the same + # value. + # + # ## Nested Actions + # + # It is possible for an Agent action to post further actions back to itself. + # The nested actions will be enqueued normally then processed *after* the + # outer action completes, in the order they were sent, possibly interleaved + # with action dispatches from other threads. Nested actions never deadlock + # with one another and a failure in a nested action will never affect the + # outer action. + # + # Nested actions can be called using the Agent reference from the enclosing + # scope or by passing the reference in as a "send" argument. Nested actions + # cannot be post using `self` from within the action block/proc/lambda; `self` + # in this context will not reference the Agent. The preferred method for + # dispatching nested actions is to pass the Agent as an argument. This allows + # Ruby to more effectively manage the closing scope. + # + # Prefer this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send(agent) do |value, this| + # this.send {|v| v + 42 } + # 3.14 + # end + # agent.value #=> 45.14 + # ``` + # + # Over this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send do |value| + # agent.send {|v| v + 42 } + # 3.14 + # end + # ``` + # + # @!macro agent_await_warning + # + # **NOTE** Never, *under any circumstances*, call any of the "await" methods + # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action + # block/proc/lambda. The call will block the Agent and will always fail. + # Calling either {#await} or {#wait} (with a timeout of `nil`) will + # hopelessly deadlock the Agent with no possibility of recovery. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/Agents Clojure Agents + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Agent < Synchronization::LockableObject + include Concern::Observable + + ERROR_MODES = [:continue, :fail].freeze + private_constant :ERROR_MODES + + AWAIT_FLAG = ::Object.new + private_constant :AWAIT_FLAG + + AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } + private_constant :AWAIT_ACTION + + DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } + private_constant :DEFAULT_ERROR_HANDLER + + DEFAULT_VALIDATOR = ->(value) { true } + private_constant :DEFAULT_VALIDATOR + + Job = Struct.new(:action, :args, :executor, :caller) + private_constant :Job + + # Raised during action processing or any other time in an Agent's lifecycle. + class Error < StandardError + def initialize(message = nil) + message ||= 'agent must be restarted before jobs can post' + super(message) + end + end + + # Raised when a new value obtained during action processing or at `#restart` + # fails validation. + class ValidationError < Error + def initialize(message = nil) + message ||= 'invalid value' + super(message) + end + end + + # The error mode this Agent is operating in. See {#initialize} for details. + attr_reader :error_mode + + # Create a new `Agent` with the given initial value and options. + # + # The `:validator` option must be `nil` or a side-effect free proc/lambda + # which takes one argument. On any intended value change the validator, if + # provided, will be called. If the new value is invalid the validator should + # return `false` or raise an error. + # + # The `:error_handler` option must be `nil` or a proc/lambda which takes two + # arguments. When an action raises an error or validation fails, either by + # returning false or raising an error, the error handler will be called. The + # arguments to the error handler will be a reference to the agent itself and + # the error object which was raised. + # + # The `:error_mode` may be either `:continue` (the default if an error + # handler is given) or `:fail` (the default if error handler nil or not + # given). + # + # If an action being run by the agent throws an error or doesn't pass + # validation the error handler, if present, will be called. After the + # handler executes if the error mode is `:continue` the Agent will continue + # as if neither the action that caused the error nor the error itself ever + # happened. + # + # If the mode is `:fail` the Agent will become {#failed?} and will stop + # accepting new action dispatches. Any previously queued actions will be + # held until {#restart} is called. The {#value} method will still work, + # returning the value of the Agent before the error. + # + # @param [Object] initial the initial value + # @param [Hash] opts the configuration options + # + # @option opts [Symbol] :error_mode either `:continue` or `:fail` + # @option opts [nil, Proc] :error_handler the (optional) error handler + # @option opts [nil, Proc] :validator the (optional) validation procedure + def initialize(initial, opts = {}) + super() + synchronize { ns_initialize(initial, opts) } + end + + # The current value (state) of the Agent, irrespective of any pending or + # in-progress actions. The value is always available and is non-blocking. + # + # @return [Object] the current value + def value + @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? + end + + alias_method :deref, :value + + # When {#failed?} and {#error_mode} is `:fail`, returns the error object + # which caused the failure, else `nil`. When {#error_mode} is `:continue` + # will *always* return `nil`. + # + # @return [nil, Error] the error which caused the failure when {#failed?} + def error + @error.value + end + + alias_method :reason, :error + + # @!macro agent_send + # + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Action dispatches are only allowed when the Agent + # is not {#failed?}. + # + # The action must be a block/proc/lambda which takes 1 or more arguments. + # The first argument is the current {#value} of the Agent. Any arguments + # passed to the send method via the `args` parameter will be passed to the + # action as the remaining arguments. The action must return the new value + # of the Agent. + # + # * {#send} and {#send!} should be used for actions that are CPU limited + # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that + # may block on IO + # * {#send_via} and {#send_via!} are used when a specific executor is to + # be used for the action + # + # @param [Array] args zero or more arguments to be passed to + # the action + # @param [Proc] action the action dispatch to be enqueued + # + # @yield [agent, value, *args] process the old value and return the new + # @yieldparam [Object] value the current {#value} of the Agent + # @yieldparam [Array] args zero or more arguments to pass to the + # action + # @yieldreturn [Object] the new value of the Agent + # + # @!macro send_return + # @return [Boolean] true if the action is successfully enqueued, false if + # the Agent is {#failed?} + def send(*args, &action) + enqueue_action_job(action, args, Concurrent.global_fast_executor) + end + + # @!macro agent_send + # + # @!macro send_bang_return_and_raise + # @return [Boolean] true if the action is successfully enqueued + # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} + def send!(*args, &action) + raise Error.new unless send(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + def send_off(*args, &action) + enqueue_action_job(action, args, Concurrent.global_io_executor) + end + + alias_method :post, :send_off + + # @!macro agent_send + # @!macro send_bang_return_and_raise + def send_off!(*args, &action) + raise Error.new unless send_off(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via(executor, *args, &action) + enqueue_action_job(action, args, executor) + end + + # @!macro agent_send + # @!macro send_bang_return_and_raise + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via!(executor, *args, &action) + raise Error.new unless send_via(executor, *args, &action) + true + end + + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Appropriate for actions that may block on IO. + # + # @param [Proc] action the action dispatch to be enqueued + # @return [Concurrent::Agent] self + # @see #send_off + def <<(action) + send_off(&action) + self + end + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far, from this thread or nested by the Agent, have occurred. Will + # block when {#failed?}. Will never return if a failed Agent is {#restart} + # with `:clear_actions` true. + # + # Returns a reference to `self` to support method chaining: + # + # ``` + # current_value = agent.await.value + # ``` + # + # @return [Boolean] self + # + # @!macro agent_await_warning + def await + wait(nil) + self + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout) + wait(timeout.to_f) + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # + # @!macro agent_await_warning + def await_for!(timeout) + raise Concurrent::TimeoutError unless wait(timeout.to_f) + true + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. Will block indefinitely when timeout is nil or not given. + # + # Provided mainly for consistency with other classes in this library. Prefer + # the various `await` methods instead. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def wait(timeout = nil) + latch = Concurrent::CountDownLatch.new(1) + enqueue_await_job(latch) + latch.wait(timeout) + end + + # Is the Agent in a failed state? + # + # @see #restart + def failed? + !@error.value.nil? + end + + alias_method :stopped?, :failed? + + # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` + # then un-fails the Agent so that action dispatches are allowed again. If + # the `:clear_actions` option is give and true, any actions queued on the + # Agent that were being held while it was failed will be discarded, + # otherwise those held actions will proceed. The `new_value` must pass the + # validator if any, or `restart` will raise an exception and the Agent will + # remain failed with its old {#value} and {#error}. Observers, if any, will + # not be notified of the new state. + # + # @param [Object] new_value the new value for the Agent once restarted + # @param [Hash] opts the configuration options + # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed + # actions should be discarded on restart, else false (default: false) + # @return [Boolean] true + # + # @raise [Concurrent:AgentError] when not failed + def restart(new_value, opts = {}) + clear_actions = opts.fetch(:clear_actions, false) + synchronize do + raise Error.new('agent is not failed') unless failed? + raise ValidationError unless ns_validate(new_value) + @current.value = new_value + @error.value = nil + @queue.clear if clear_actions + ns_post_next_job unless @queue.empty? + end + true + end + + class << self + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far to all the given Agents, from this thread or nested by the + # given Agents, have occurred. Will block when any of the agents are + # failed. Will never return if a failed Agent is restart with + # `:clear_actions` true. + # + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true + # + # @!macro agent_await_warning + def await(*agents) + agents.each { |agent| agent.await } + true + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout, *agents) + end_at = Concurrent.monotonic_time + timeout.to_f + ok = agents.length.times do |i| + break false if (delay = end_at - Concurrent.monotonic_time) < 0 + break false unless agents[i].await_for(delay) + end + !!ok + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # @!macro agent_await_warning + def await_for!(timeout, *agents) + raise Concurrent::TimeoutError unless await_for(timeout, *agents) + true + end + end + + private + + def ns_initialize(initial, opts) + @error_mode = opts[:error_mode] + @error_handler = opts[:error_handler] + + if @error_mode && !ERROR_MODES.include?(@error_mode) + raise ArgumentError.new('unrecognized error mode') + elsif @error_mode.nil? + @error_mode = @error_handler ? :continue : :fail + end + + @error_handler ||= DEFAULT_ERROR_HANDLER + @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) + @current = Concurrent::AtomicReference.new(initial) + @error = Concurrent::AtomicReference.new(nil) + @caller = Concurrent::ThreadLocalVar.new(nil) + @queue = [] + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + def enqueue_action_job(action, args, executor) + raise ArgumentError.new('no action given') unless action + job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) + synchronize { ns_enqueue_job(job) } + end + + def enqueue_await_job(latch) + synchronize do + if (index = ns_find_last_job_for_thread) + job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, + Thread.current.object_id) + ns_enqueue_job(job, index+1) + else + latch.count_down + true + end + end + end + + def ns_enqueue_job(job, index = nil) + # a non-nil index means this is an await job + return false if index.nil? && failed? + index ||= @queue.length + @queue.insert(index, job) + # if this is the only job, post to executor + ns_post_next_job if @queue.length == 1 + true + end + + def ns_post_next_job + @queue.first.executor.post { execute_next_job } + end + + def execute_next_job + job = synchronize { @queue.first } + old_value = @current.value + + @caller.value = job.caller # for nested actions + new_value = job.action.call(old_value, *job.args) + @caller.value = nil + + return if new_value == AWAIT_FLAG + + if ns_validate(new_value) + @current.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + else + handle_error(ValidationError.new) + end + rescue => error + handle_error(error) + ensure + synchronize do + @queue.shift + unless failed? || @queue.empty? + ns_post_next_job + end + end + end + + def ns_validate(value) + @validator.call(value) + rescue + false + end + + def handle_error(error) + # stop new jobs from posting + @error.value = error if @error_mode == :fail + @error_handler.call(self, error) + rescue + # do nothing + end + + def ns_find_last_job_for_thread + @queue.rindex { |job| job.caller == Thread.current.object_id } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb new file mode 100644 index 0000000..60e5b56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/array.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_array + # + # A thread-safe subclass of Array. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` + # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#concat` instead. + # + # @see http://ruby-doc.org/core/Array.html Ruby standard library `Array` + + # @!macro internal_implementation_note + ArrayImplementation = case + when Concurrent.on_cruby? + # Array is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Array + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyArray < ::Array + include JRuby::Synchronized + end + JRubyArray + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_rbx RbxArray + RbxArray + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray + TruffleRubyArray + + else + warn 'Possibly unsupported Ruby implementation' + ::Array + end + private_constant :ArrayImplementation + + # @!macro concurrent_array + class Array < ArrayImplementation + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb new file mode 100644 index 0000000..5e125e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/async.rb @@ -0,0 +1,448 @@ +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A mixin module that provides simple asynchronous behavior to a class, + # turning it into a simple actor. Loosely based on Erlang's + # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without + # supervision or linking. + # + # A more feature-rich {Concurrent::Actor} is also available when the + # capabilities of `Async` are too limited. + # + # ```cucumber + # Feature: + # As a stateful, plain old Ruby class + # I want safe, asynchronous behavior + # So my long-running methods don't block the main thread + # ``` + # + # The `Async` module is a way to mix simple yet powerful asynchronous + # capabilities into any plain old Ruby object or class, turning each object + # into a simple Actor. Method calls are processed on a background thread. The + # caller is free to perform other actions while processing occurs in the + # background. + # + # Method calls to the asynchronous object are made via two proxy methods: + # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post + # the method call to the object's background thread and return a "future" + # which will eventually contain the result of the method call. + # + # This behavior is loosely patterned after Erlang's `gen_server` behavior. + # When an Erlang module implements the `gen_server` behavior it becomes + # inherently asynchronous. The `start` or `start_link` function spawns a + # process (similar to a thread but much more lightweight and efficient) and + # returns the ID of the process. Using the process ID, other processes can + # send messages to the `gen_server` via the `cast` and `call` methods. Unlike + # Erlang's `gen_server`, however, `Async` classes do not support linking or + # supervision trees. + # + # ## Basic Usage + # + # When this module is mixed into a class, objects of the class become inherently + # asynchronous. Each object gets its own background thread on which to post + # asynchronous method calls. Asynchronous method calls are executed in the + # background one at a time in the order they are received. + # + # To create an asynchronous class, simply mix in the `Concurrent::Async` module: + # + # ``` + # class Hello + # include Concurrent::Async + # + # def hello(name) + # "Hello, #{name}!" + # end + # end + # ``` + # + # Mixing this module into a class provides each object two proxy methods: + # `async` and `await`. These methods are thread safe with respect to the + # enclosing object. The former proxy allows methods to be called + # asynchronously by posting to the object's internal thread. The latter proxy + # allows a method to be called synchronously but does so safely with respect + # to any pending asynchronous method calls and ensures proper ordering. Both + # methods return a {Concurrent::IVar} which can be inspected for the result + # of the proxied method call. Calling a method with `async` will return a + # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. + # + # ``` + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # ``` + # + # ## Let It Fail + # + # The `async` and `await` proxy methods have built-in error protection based + # on Erlang's famous "let it fail" philosophy. Instance methods should not be + # programmed defensively. When an exception is raised by a delegated method + # the proxy will rescue the exception, expose it to the caller as the `reason` + # attribute of the returned future, then process the next method call. + # + # ## Calling Methods Internally + # + # External method calls should *always* use the `async` and `await` proxy + # methods. When one method calls another method, the `async` proxy should + # rarely be used and the `await` proxy should *never* be used. + # + # When an object calls one of its own methods using the `await` proxy the + # second call will be enqueued *behind* the currently running method call. + # Any attempt to wait on the result will fail as the second call will never + # run until after the current call completes. + # + # Calling a method using the `await` proxy from within a method that was + # itself called using `async` or `await` will irreversibly deadlock the + # object. Do *not* do this, ever. + # + # ## Instance Variables and Attribute Accessors + # + # Instance variables do not need to be thread-safe so long as they are private. + # Asynchronous method calls are processed in the order they are received and + # are processed one at a time. Therefore private instance variables can only + # be accessed by one thread at a time. This is inherently thread-safe. + # + # When using private instance variables within asynchronous methods, the best + # practice is to read the instance variable into a local variable at the start + # of the method then update the instance variable at the *end* of the method. + # This way, should an exception be raised during method execution the internal + # state of the object will not have been changed. + # + # ### Reader Attributes + # + # The use of `attr_reader` is discouraged. Internal state exposed externally, + # when necessary, should be done through accessor methods. The instance + # variables exposed by these methods *must* be thread-safe, or they must be + # called using the `async` and `await` proxy methods. These two approaches are + # subtly different. + # + # When internal state is accessed via the `async` and `await` proxy methods, + # the returned value represents the object's state *at the time the call is + # processed*, which may *not* be the state of the object at the time the call + # is made. + # + # To get the state *at the current* time, irrespective of an enqueued method + # calls, a reader method must be called directly. This is inherently unsafe + # unless the instance variable is itself thread-safe, preferably using one + # of the thread-safe classes within this library. Because the thread-safe + # classes within this library are internally-locking or non-locking, they can + # be safely used from within asynchronous methods without causing deadlocks. + # + # Generally speaking, the best practice is to *not* expose internal state via + # reader methods. The best practice is to simply use the method's return value. + # + # ### Writer Attributes + # + # Writer attributes should never be used with asynchronous classes. Changing + # the state externally, even when done in the thread-safe way, is not logically + # consistent. Changes to state need to be timed with respect to all asynchronous + # method calls which my be in-process or enqueued. The only safe practice is to + # pass all necessary data to each method as arguments and let the method update + # the internal state as necessary. + # + # ## Class Constants, Variables, and Methods + # + # ### Class Constants + # + # Class constants do not need to be thread-safe. Since they are read-only and + # immutable they may be safely read both externally and from within + # asynchronous methods. + # + # ### Class Variables + # + # Class variables should be avoided. Class variables represent shared state. + # Shared state is anathema to concurrency. Should there be a need to share + # state using class variables they *must* be thread-safe, preferably + # using the thread-safe classes within this library. When updating class + # variables, never assign a new value/object to the variable itself. Assignment + # is not thread-safe in Ruby. Instead, use the thread-safe update functions + # of the variable itself to change the value. + # + # The best practice is to *never* use class variables with `Async` classes. + # + # ### Class Methods + # + # Class methods which are pure functions are safe. Class methods which modify + # class variables should be avoided, for all the reasons listed above. + # + # ## An Important Note About Thread Safe Guarantees + # + # > Thread safe guarantees can only be made when asynchronous method calls + # > are not mixed with direct method calls. Use only direct method calls + # > when the object is used exclusively on a single thread. Use only + # > `async` and `await` when the object is shared between threads. Once you + # > call a method using `async` or `await`, you should no longer call methods + # > directly on the object. Use `async` and `await` exclusively from then on. + # + # @example + # + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # + # @see Concurrent::Actor + # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia + # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server + # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ + module Async + + # @!method self.new(*args, &block) + # + # Instanciate a new object and ensure proper initialization of the + # synchronization mechanisms. + # + # @param [Array] args Zero or more arguments to be passed to the + # object's initializer. + # @param [Proc] block Optional block to pass to the object's initializer. + # @return [Object] A properly initialized object of the asynchronous class. + + # Check for the presence of a method on an object and determine if a given + # set of arguments matches the required arity. + # + # @param [Object] obj the object to check against + # @param [Symbol] method the method to check the object for + # @param [Array] args zero or more arguments for the arity check + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + # + # @note This check is imperfect because of the way Ruby reports the arity of + # methods with a variable number of arguments. It is possible to determine + # if too few arguments are given but impossible to determine if too many + # arguments are given. This check may also fail to recognize dynamic behavior + # of the object, such as methods simulated with `method_missing`. + # + # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity + # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? + # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing + # + # @!visibility private + def self.validate_argc(obj, method, *args) + argc = args.length + arity = obj.method(method).arity + + if arity >= 0 && argc != arity + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") + elsif arity < 0 && (arity = (arity + 1).abs) > argc + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") + end + end + + # @!visibility private + def self.included(base) + base.singleton_class.send(:alias_method, :original_new, :new) + base.extend(ClassMethods) + super(base) + end + + # @!visibility private + module ClassMethods + def new(*args, &block) + obj = original_new(*args, &block) + obj.send(:init_synchronization) + obj + end + end + private_constant :ClassMethods + + # Delegates asynchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AsyncDelegator < Synchronization::LockableObject + safe_initialization! + + # Create a new delegator object wrapping the given delegate. + # + # @param [Object] delegate the object to wrap and delegate method calls to + def initialize(delegate) + super() + @delegate = delegate + @queue = [] + @executor = Concurrent.global_io_executor + @ruby_pid = $$ + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + super unless @delegate.respond_to?(method) + Async::validate_argc(@delegate, method, *args) + + ivar = Concurrent::IVar.new + synchronize do + reset_if_forked + @queue.push [ivar, method, args, block] + @executor.post { perform } if @queue.length == 1 + end + + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + + # Perform all enqueued tasks. + # + # This method must be called from within the executor. It must not be + # called while already running. It will loop until the queue is empty. + def perform + loop do + ivar, method, args, block = synchronize { @queue.first } + break unless ivar # queue is empty + + begin + ivar.set(@delegate.send(method, *args, &block)) + rescue => error + ivar.fail(error) + end + + synchronize do + @queue.shift + return if @queue.empty? + end + end + end + + def reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ruby_pid = $$ + end + end + end + private_constant :AsyncDelegator + + # Delegates synchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AwaitDelegator + + # Create a new delegator object wrapping the given delegate. + # + # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to + def initialize(delegate) + @delegate = delegate + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + ivar = @delegate.send(method, *args, &block) + ivar.wait + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + end + private_constant :AwaitDelegator + + # Causes the chained method call to be performed asynchronously on the + # object's thread. The delegated method will return a future in the + # `:pending` state and the method call will have been scheduled on the + # object's thread. The final disposition of the method call can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # @note The method call is guaranteed to be thread safe with respect to + # all other method calls against the same object that are called with + # either `async` or `await`. The mutable nature of Ruby references + # (and object orientation in general) prevent any other thread safety + # guarantees. Do NOT mix direct method calls with delegated method calls. + # Use *only* delegated method calls when sharing the object between threads. + # + # @return [Concurrent::IVar] the pending result of the asynchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of + # the requested method + def async + @__async_delegator__ + end + alias_method :cast, :async + + # Causes the chained method call to be performed synchronously on the + # current thread. The delegated will return a future in either the + # `:fulfilled` or `:rejected` state and the delegated method will have + # completed. The final disposition of the delegated method can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # + # @return [Concurrent::IVar] the completed result of the synchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of the + # requested method + def await + @__await_delegator__ + end + alias_method :call, :await + + # Initialize the internal serializer and other stnchronization mechanisms. + # + # @note This method *must* be called immediately upon object construction. + # This is the only way thread-safe initialization can be guaranteed. + # + # @!visibility private + def init_synchronization + return self if defined?(@__async_initialized__) && @__async_initialized__ + @__async_initialized__ = true + @__async_delegator__ = AsyncDelegator.new(self) + @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb new file mode 100644 index 0000000..8a45730 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atom.rb @@ -0,0 +1,222 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +# @!macro thread_safe_variable_comparison +# +# ## Thread-safe Variable Classes +# +# Each of the thread-safe variable classes is designed to solve a different +# problem. In general: +# +# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, +# uncoordinated, *asynchronous* change of individual values. Best used when +# the value will undergo frequent, complex updates. Suitable when the result +# of an update does not need to be known immediately. +# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, +# uncoordinated, *synchronous* change of individual values. Best used when +# the value will undergo frequent reads but only occasional, though complex, +# updates. Suitable when the result of an update must be known immediately. +# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated +# atomically. Updates are synchronous but fast. Best used when updates a +# simple set operations. Not suitable when updates are complex. +# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar +# but optimized for the given data type. +# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used +# when two or more threads need to exchange data. The threads will pair then +# block on each other until the exchange is complete. +# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread +# must give a value to another, which must take the value. The threads will +# block on each other until the exchange is complete. +# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which +# holds a different value for each thread which has access. Often used as +# an instance variable in objects which must maintain different state +# for different threads. +# * *{Concurrent::TVar}:* Shared, mutable variables which provide +# *coordinated*, *synchronous*, change of *many* stated. Used when multiple +# value must change together, in an all-or-nothing transaction. + + +module Concurrent + + # Atoms provide a way to manage shared, synchronous, independent state. + # + # An atom is initialized with an initial value and an optional validation + # proc. At any time the value of the atom can be synchronously and safely + # changed. If a validator is given at construction then any new value + # will be checked against the validator and will be rejected if the + # validator returns false or raises an exception. + # + # There are two ways to change the value of an atom: {#compare_and_set} and + # {#swap}. The former will set the new value if and only if it validates and + # the current value matches the new value. The latter will atomically set the + # new value to the result of running the given block if and only if that + # value validates. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an atom with an initial value + # atom = Concurrent::Atom.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # atom.swap{|set| next_fibonacci(set) } + # end + # + # # get the current value + # atom.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Atoms support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time the value of the Atom changes. + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Atom when the change began + # The `new_value` is the value to which the Atom was set when the change + # completed. Note that `old_value` and `new_value` may be the same. This is + # not an error. It simply means that the change operation returned the same + # value. + # + # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/atoms Clojure Atoms + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Atom < Synchronization::Object + include Concern::Observable + + safe_initialization! + attr_atomic(:value) + private :value=, :swap_value, :compare_and_set_value, :update_value + public :value + alias_method :deref, :value + + # @!method value + # The current value of the atom. + # + # @return [Object] The current value. + + # Create a new atom with the given initial value. + # + # @param [Object] value The initial value + # @param [Hash] opts The options used to configure the atom + # @option opts [Proc] :validator (nil) Optional proc used to validate new + # values. It must accept one and only one argument which will be the + # intended new value. The validator will return true if the new value + # is acceptable else return false (preferrably) or raise an exception. + # + # @!macro deref_options + # + # @raise [ArgumentError] if the validator is not a `Proc` (when given) + def initialize(value, opts = {}) + super() + @Validator = opts.fetch(:validator, -> v { true }) + self.observers = Collection::CopyOnNotifyObserverSet.new + self.value = value + end + + # Atomically swaps the value of atom using the given block. The current + # value will be passed to the block, as will any arguments passed as + # arguments to the function. The new value will be validated against the + # (optional) validator proc given at construction. If validation fails the + # value will not be changed. + # + # Internally, {#swap} reads the current value, applies the block to it, and + # attempts to compare-and-set it in. Since another thread may have changed + # the value in the intervening time, it may have to retry, and does so in a + # spin loop. The net effect is that the value will always be the result of + # the application of the supplied block to a current value, atomically. + # However, because the block might be called multiple times, it must be free + # of side effects. + # + # @note The given block may be called multiple times, and thus should be free + # of side effects. + # + # @param [Object] args Zero or more arguments passed to the block. + # + # @yield [value, args] Calculates a new value for the atom based on the + # current value and any supplied arguments. + # @yieldparam value [Object] The current value of the atom. + # @yieldparam args [Object] All arguments passed to the function, in order. + # @yieldreturn [Object] The intended new value of the atom. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + # + # @raise [ArgumentError] When no block is given. + def swap(*args) + raise ArgumentError.new('no block given') unless block_given? + + loop do + old_value = value + new_value = yield(old_value, *args) + begin + break old_value unless valid?(new_value) + break new_value if compare_and_set(old_value, new_value) + rescue + break old_value + end + end + end + + # Atomically sets the value of atom to the new value if and only if the + # current value of the atom is identical to the old value and the new + # value successfully validates against the (optional) validator given + # at construction. + # + # @param [Object] old_value The expected current value. + # @param [Object] new_value The intended new value. + # + # @return [Boolean] True if the value is changed else false. + def compare_and_set(old_value, new_value) + if valid?(new_value) && compare_and_set_value(old_value, new_value) + observers.notify_observers(Time.now, old_value, new_value) + true + else + false + end + end + + # Atomically sets the value of atom to the new value without regard for the + # current value so long as the new value successfully validates against the + # (optional) validator given at construction. + # + # @param [Object] new_value The intended new value. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + def reset(new_value) + old_value = value + if valid?(new_value) + self.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + new_value + else + old_value + end + end + + private + + # Is the new value valid? + # + # @param [Object] new_value The intended new value. + # @return [Boolean] false if the validator function returns false or raises + # an exception else true + def valid?(new_value) + @Validator.call(new_value) + rescue + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb new file mode 100644 index 0000000..fcdeed7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb @@ -0,0 +1,66 @@ +require 'concurrent/constants' + +module Concurrent + + # @!macro thread_local_var + # @!macro internal_implementation_note + # @!visibility private + class AbstractThreadLocalVar + + # @!macro thread_local_var_method_initialize + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + allocate_storage + end + + # @!macro thread_local_var_method_get + def value + raise NotImplementedError + end + + # @!macro thread_local_var_method_set + def value=(value) + raise NotImplementedError + end + + # @!macro thread_local_var_method_bind + def bind(value, &block) + if block_given? + old_value = self.value + begin + self.value = value + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def allocate_storage + raise NotImplementedError + end + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb new file mode 100644 index 0000000..0b0373d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb @@ -0,0 +1,126 @@ +require 'concurrent/atomic/mutex_atomic_boolean' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_boolean_method_initialize + # + # Creates a new `AtomicBoolean` with the given initial value. + # + # @param [Boolean] initial the initial value + + # @!macro atomic_boolean_method_value_get + # + # Retrieves the current `Boolean` value. + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_value_set + # + # Explicitly sets the value. + # + # @param [Boolean] value the new value to be set + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_true_question + # + # Is the current value `true` + # + # @return [Boolean] true if the current value is `true`, else false + + # @!macro atomic_boolean_method_false_question + # + # Is the current value `false` + # + # @return [Boolean] true if the current value is `false`, else false + + # @!macro atomic_boolean_method_make_true + # + # Explicitly sets the value to true. + # + # @return [Boolean] true if value has changed, otherwise false + + # @!macro atomic_boolean_method_make_false + # + # Explicitly sets the value to false. + # + # @return [Boolean] true if value has changed, otherwise false + + ################################################################### + + # @!macro atomic_boolean_public_api + # + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + # + # @!method value + # @!macro atomic_boolean_method_value_get + # + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + # + # @!method true? + # @!macro atomic_boolean_method_true_question + # + # @!method false? + # @!macro atomic_boolean_method_false_question + # + # @!method make_true + # @!macro atomic_boolean_method_make_true + # + # @!method make_false + # @!macro atomic_boolean_method_make_false + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicBooleanImplementation = case + when defined?(JavaAtomicBoolean) + JavaAtomicBoolean + when defined?(CAtomicBoolean) + CAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # A boolean value that can be updated atomically. Reads and writes to an atomic + # boolean and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicBoolean... + # 2.790000 0.000000 2.790000 ( 2.791454) + # Testing with Concurrent::CAtomicBoolean... + # 0.740000 0.000000 0.740000 ( 0.740206) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicBoolean... + # 5.240000 2.520000 7.760000 ( 3.683000) + # Testing with Concurrent::JavaAtomicBoolean... + # 3.340000 0.010000 3.350000 ( 0.855000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean + # + # @!macro atomic_boolean_public_api + class AtomicBoolean < AtomicBooleanImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb new file mode 100644 index 0000000..c67166d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb @@ -0,0 +1,143 @@ +require 'concurrent/atomic/mutex_atomic_fixnum' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_fixnum_method_initialize + # + # Creates a new `AtomicFixnum` with the given initial value. + # + # @param [Fixnum] initial the initial value + # @raise [ArgumentError] if the initial value is not a `Fixnum` + + # @!macro atomic_fixnum_method_value_get + # + # Retrieves the current `Fixnum` value. + # + # @return [Fixnum] the current value + + # @!macro atomic_fixnum_method_value_set + # + # Explicitly sets the value. + # + # @param [Fixnum] value the new value to be set + # + # @return [Fixnum] the current value + # + # @raise [ArgumentError] if the new value is not a `Fixnum` + + # @!macro atomic_fixnum_method_increment + # + # Increases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to increase the current value + # + # @return [Fixnum] the current value after incrementation + + # @!macro atomic_fixnum_method_decrement + # + # Decreases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to decrease the current value + # + # @return [Fixnum] the current value after decrementation + + # @!macro atomic_fixnum_method_compare_and_set + # + # Atomically sets the value to the given updated value if the current + # value == the expected value. + # + # @param [Fixnum] expect the expected value + # @param [Fixnum] update the new value + # + # @return [Boolean] true if the value was updated else false + + # @!macro atomic_fixnum_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # + # @return [Object] the new value + + ################################################################### + + # @!macro atomic_fixnum_public_api + # + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize + # + # @!method value + # @!macro atomic_fixnum_method_value_get + # + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set + # + # @!method increment(delta = 1) + # @!macro atomic_fixnum_method_increment + # + # @!method decrement(delta = 1) + # @!macro atomic_fixnum_method_decrement + # + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set + # + # @!method update + # @!macro atomic_fixnum_method_update + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicFixnumImplementation = case + when defined?(JavaAtomicFixnum) + JavaAtomicFixnum + when defined?(CAtomicFixnum) + CAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # A numeric value that can be updated atomically. Reads and writes to an atomic + # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicFixnum... + # 3.130000 0.000000 3.130000 ( 3.136505) + # Testing with Concurrent::CAtomicFixnum... + # 0.790000 0.000000 0.790000 ( 0.785550) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicFixnum... + # 5.460000 2.460000 7.920000 ( 3.715000) + # Testing with Concurrent::JavaAtomicFixnum... + # 4.520000 0.030000 4.550000 ( 1.187000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong + # + # @!macro atomic_fixnum_public_api + class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb new file mode 100644 index 0000000..f20cd46 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb @@ -0,0 +1,164 @@ +module Concurrent + # An atomic reference which maintains an object reference along with a mark bit + # that can be updated atomically. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html + # java.util.concurrent.atomic.AtomicMarkableReference + class AtomicMarkableReference < ::Concurrent::Synchronization::Object + + attr_atomic(:reference) + private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference + + def initialize(value = nil, mark = false) + super() + self.reference = immutable_array(value, mark) + end + + # Atomically sets the value and mark to the given updated value and + # mark given both: + # - the current value == the expected value && + # - the current mark == the expected mark + # + # @param [Object] expected_val the expected value + # @param [Object] new_val the new value + # @param [Boolean] expected_mark the expected mark + # @param [Boolean] new_mark the new mark + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value or the + # actual mark was not equal to the expected mark + def compare_and_set(expected_val, new_val, expected_mark, new_mark) + # Memoize a valid reference to the current AtomicReference for + # later comparison. + current = reference + curr_val, curr_mark = current + + # Ensure that that the expected marks match. + return false unless expected_mark == curr_mark + + if expected_val.is_a? Numeric + # If the object is a numeric, we need to ensure we are comparing + # the numerical values + return false unless expected_val == curr_val + else + # Otherwise, we need to ensure we are comparing the object identity. + # Theoretically, this could be incorrect if a user monkey-patched + # `Object#equal?`, but they should know that they are playing with + # fire at that point. + return false unless expected_val.equal? curr_val + end + + prospect = immutable_array(new_val, new_mark) + + compare_and_set_reference current, prospect + end + + alias_method :compare_and_swap, :compare_and_set + + # Gets the current reference and marked values. + # + # @return [Array] the current reference and marked values + def get + reference + end + + # Gets the current value of the reference + # + # @return [Object] the current value of the reference + def value + reference[0] + end + + # Gets the current marked value + # + # @return [Boolean] the current marked value + def mark + reference[1] + end + + alias_method :marked?, :mark + + # _Unconditionally_ sets to the given value of both the reference and + # the mark. + # + # @param [Object] new_val the new value + # @param [Boolean] new_mark the new mark + # + # @return [Array] both the new value and the new mark + def set(new_val, new_mark) + self.reference = immutable_array(new_val, new_mark) + end + + # Pass the current value and marked state to the given block, replacing it + # with the block's results. May retry if the value changes during the + # block's execution. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and new mark + def update + loop do + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + if compare_and_set old_val, new_val, old_mark, new_mark + return immutable_array(new_val, new_mark) + end + end + end + + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state + # + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + unless compare_and_set old_val, new_val, old_mark, new_mark + fail ::Concurrent::ConcurrentUpdateError, + 'AtomicMarkableReference: Update failed due to race condition.', + 'Note: If you would like to guarantee an update, please use ' + + 'the `AtomicMarkableReference#update` method.' + end + + immutable_array(new_val, new_mark) + end + + # Pass the current value to the given block, replacing it with the + # block's result. Simply return nil if update fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state, or nil if + # the update failed + def try_update + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + return unless compare_and_set old_val, new_val, old_mark, new_mark + + immutable_array(new_val, new_mark) + end + + private + + def immutable_array(*args) + args.freeze + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb new file mode 100644 index 0000000..620c069 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb @@ -0,0 +1,204 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/engine' +require 'concurrent/atomic_reference/numeric_cas_wrapper' + +# Shim for TruffleRuby::AtomicReference +if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) + # @!visibility private + module TruffleRuby + AtomicReference = Truffle::AtomicReference + end +end + +module Concurrent + + # Define update methods that use direct paths + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicDirectUpdate + + # @!macro atomic_reference_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @return [Object] the new value + def update + true until compare_and_set(old_value = get, new_value = yield(old_value)) + new_value + end + + # @!macro atomic_reference_method_try_update + # + # Pass the current value to the given block, replacing it + # with the block's result. Return nil if the update fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This method was altered to avoid raising an exception by default. + # Instead, this method now returns `nil` in case of failure. For more info, + # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value, or nil if update failed + def try_update + old_value = get + new_value = yield old_value + + return unless compare_and_set old_value, new_value + + new_value + end + + # @!macro atomic_reference_method_try_update! + # + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This behavior mimics the behavior of the original + # `AtomicReference#try_update` API. The reason this was changed was to + # avoid raising exceptions (which are inherently slow) by default. For more + # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_value = get + new_value = yield old_value + unless compare_and_set(old_value, new_value) + if $VERBOSE + raise ConcurrentUpdateError, "Update failed" + else + raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE + end + end + new_value + end + end + + require 'concurrent/atomic_reference/mutex_atomic' + + # @!macro atomic_reference + # + # An object reference that may be updated atomically. All read and write + # operations have java volatile semantic. + # + # @!macro thread_safe_variable_comparison + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html + # + # @!method initialize(value = nil) + # @!macro atomic_reference_method_initialize + # @param [Object] value The initial value. + # + # @!method get + # @!macro atomic_reference_method_get + # Gets the current value. + # @return [Object] the current value + # + # @!method set(new_value) + # @!macro atomic_reference_method_set + # Sets to the given value. + # @param [Object] new_value the new value + # @return [Object] the new value + # + # @!method get_and_set(new_value) + # @!macro atomic_reference_method_get_and_set + # Atomically sets to the given value and returns the old value. + # @param [Object] new_value the new value + # @return [Object] the old value + # + # @!method compare_and_set(old_value, new_value) + # @!macro atomic_reference_method_compare_and_set + # + # Atomically sets the value to the given updated value if + # the current value == the expected value. + # + # @param [Object] old_value the expected value + # @param [Object] new_value the new value + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value. + # + # @!method update + # @!macro atomic_reference_method_update + # + # @!method try_update + # @!macro atomic_reference_method_try_update + # + # @!method try_update! + # @!macro atomic_reference_method_try_update! + + + # @!macro internal_implementation_note + class ConcurrentUpdateError < ThreadError + # frozen pre-allocated backtrace to speed ConcurrentUpdateError + CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze + end + + # @!macro internal_implementation_note + AtomicReferenceImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + # @!visibility private + # @!macro internal_implementation_note + class CAtomicReference + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + end + CAtomicReference + when Concurrent.on_jruby? + # @!visibility private + # @!macro internal_implementation_note + class JavaAtomicReference + include AtomicDirectUpdate + end + JavaAtomicReference + when Concurrent.on_truffleruby? + class TruffleRubyAtomicReference < TruffleRuby::AtomicReference + include AtomicDirectUpdate + alias_method :value, :get + alias_method :value=, :set + alias_method :compare_and_swap, :compare_and_set + alias_method :swap, :get_and_set + end + when Concurrent.on_rbx? + # @note Extends `Rubinius::AtomicReference` version adding aliases + # and numeric logic. + # + # @!visibility private + # @!macro internal_implementation_note + class RbxAtomicReference < Rubinius::AtomicReference + alias_method :_compare_and_set, :compare_and_set + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :value, :get + alias_method :value=, :set + alias_method :swap, :get_and_set + alias_method :compare_and_swap, :compare_and_set + end + RbxAtomicReference + else + MutexAtomicReference + end + private_constant :AtomicReferenceImplementation + + # @!macro atomic_reference + class AtomicReference < AtomicReferenceImplementation + + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], get + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb new file mode 100644 index 0000000..d883aed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb @@ -0,0 +1,100 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/mutex_count_down_latch' +require 'concurrent/atomic/java_count_down_latch' + +module Concurrent + + ################################################################### + + # @!macro count_down_latch_method_initialize + # + # Create a new `CountDownLatch` with the initial `count`. + # + # @param [new] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro count_down_latch_method_wait + # + # Block on the latch until the counter reaches zero or until `timeout` is reached. + # + # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` + # to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` + + # @!macro count_down_latch_method_count_down + # + # Signal the latch to decrement the counter. Will signal all blocked threads when + # the `count` reaches zero. + + # @!macro count_down_latch_method_count + # + # The current value of the counter. + # + # @return [Fixnum] the current value of the counter + + ################################################################### + + # @!macro count_down_latch_public_api + # + # @!method initialize(count = 1) + # @!macro count_down_latch_method_initialize + # + # @!method wait(timeout = nil) + # @!macro count_down_latch_method_wait + # + # @!method count_down + # @!macro count_down_latch_method_count_down + # + # @!method count + # @!macro count_down_latch_method_count + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + CountDownLatchImplementation = case + when Concurrent.on_jruby? + JavaCountDownLatch + else + MutexCountDownLatch + end + private_constant :CountDownLatchImplementation + + # @!macro count_down_latch + # + # A synchronization object that allows one thread to wait on multiple other threads. + # The thread that will wait creates a `CountDownLatch` and sets the initial value + # (normally equal to the number of other threads). The initiating thread passes the + # latch to the other threads then waits for the other threads by calling the `#wait` + # method. Each of the other threads calls `#count_down` when done with its work. + # When the latch counter reaches zero the waiting thread is unblocked and continues + # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. + # + # @!macro count_down_latch_public_api + # @example Waiter and Decrementer + # latch = Concurrent::CountDownLatch.new(3) + # + # waiter = Thread.new do + # latch.wait() + # puts ("Waiter released") + # end + # + # decrementer = Thread.new do + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # end + # + # [waiter, decrementer].each(&:join) + class CountDownLatch < CountDownLatchImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb new file mode 100644 index 0000000..42f5a94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb @@ -0,0 +1,128 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # A synchronization aid that allows a set of threads to all wait for each + # other to reach a common barrier point. + # @example + # barrier = Concurrent::CyclicBarrier.new(3) + # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } + # process = -> (i) do + # # waiting to start at the same time + # barrier.wait + # # execute job + # jobs[i].call + # # wait for others to finish + # barrier.wait + # end + # threads = 2.times.map do |i| + # Thread.new(i, &process) + # end + # + # # use main as well + # process.call 2 + # + # # here we can be sure that all jobs are processed + class CyclicBarrier < Synchronization::LockableObject + + # @!visibility private + Generation = Struct.new(:status) + private_constant :Generation + + # Create a new `CyclicBarrier` that waits for `parties` threads + # + # @param [Fixnum] parties the number of parties + # @yield an optional block that will be executed that will be executed after + # the last thread arrives and before the others are released + # + # @raise [ArgumentError] if `parties` is not an integer or is less than zero + def initialize(parties, &block) + Utility::NativeInteger.ensure_integer_and_bounds parties + Utility::NativeInteger.ensure_positive_and_no_zero parties + + super(&nil) + synchronize { ns_initialize parties, &block } + end + + # @return [Fixnum] the number of threads needed to pass the barrier + def parties + synchronize { @parties } + end + + # @return [Fixnum] the number of threads currently waiting on the barrier + def number_waiting + synchronize { @number_waiting } + end + + # Blocks on the barrier until the number of waiting threads is equal to + # `parties` or until `timeout` is reached or `reset` is called + # If a block has been passed to the constructor, it will be executed once by + # the last arrived thread before releasing the others + # @param [Fixnum] timeout the number of seconds to wait for the counter or + # `nil` to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on + # `timeout` or on `reset` or if the barrier is broken + def wait(timeout = nil) + synchronize do + + return false unless @generation.status == :waiting + + @number_waiting += 1 + + if @number_waiting == @parties + @action.call if @action + ns_generation_done @generation, :fulfilled + true + else + generation = @generation + if ns_wait_until(timeout) { generation.status != :waiting } + generation.status == :fulfilled + else + ns_generation_done generation, :broken, false + false + end + end + end + end + + # resets the barrier to its initial state + # If there is at least one waiting thread, it will be woken up, the `wait` + # method will return false and the barrier will be broken + # If the barrier is broken, this method restores it to the original state + # + # @return [nil] + def reset + synchronize { ns_generation_done @generation, :reset } + end + + # A barrier can be broken when: + # - a thread called the `reset` method while at least one other thread was waiting + # - at least one thread timed out on `wait` method + # + # A broken barrier can be restored using `reset` it's safer to create a new one + # @return [Boolean] true if the barrier is broken otherwise false + def broken? + synchronize { @generation.status != :waiting } + end + + protected + + def ns_generation_done(generation, status, continue = true) + generation.status = status + ns_next_generation if continue + ns_broadcast + end + + def ns_next_generation + @generation = Generation.new(:waiting) + @number_waiting = 0 + end + + def ns_initialize(parties, &block) + @parties = parties + @action = block + ns_next_generation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb new file mode 100644 index 0000000..825f38a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/event.rb @@ -0,0 +1,109 @@ +require 'thread' +require 'concurrent/synchronization' + +module Concurrent + + # Old school kernel-style event reminiscent of Win32 programming in C++. + # + # When an `Event` is created it is in the `unset` state. Threads can choose to + # `#wait` on the event, blocking until released by another thread. When one + # thread wants to alert all blocking threads it calls the `#set` method which + # will then wake up all listeners. Once an `Event` has been set it remains set. + # New threads calling `#wait` will return immediately. An `Event` may be + # `#reset` at any time once it has been set. + # + # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx + # @example + # event = Concurrent::Event.new + # + # t1 = Thread.new do + # puts "t1 is waiting" + # event.wait(1) + # puts "event ocurred" + # end + # + # t2 = Thread.new do + # puts "t2 calling set" + # event.set + # end + # + # [t1, t2].each(&:join) + # + # # prints: + # # t2 calling set + # # t1 is waiting + # # event occurred + class Event < Synchronization::LockableObject + + # Creates a new `Event` in the unset state. Threads calling `#wait` on the + # `Event` will block. + def initialize + super + synchronize { ns_initialize } + end + + # Is the object in the set state? + # + # @return [Boolean] indicating whether or not the `Event` has been set + def set? + synchronize { @set } + end + + # Trigger the event, setting the state to `set` and releasing all threads + # waiting on the event. Has no effect if the `Event` has already been set. + # + # @return [Boolean] should always return `true` + def set + synchronize { ns_set } + end + + def try? + synchronize { @set ? false : ns_set } + end + + # Reset a previously set event back to the `unset` state. + # Has no effect if the `Event` has not yet been set. + # + # @return [Boolean] should always return `true` + def reset + synchronize do + if @set + @set = false + @iteration +=1 + end + true + end + end + + # Wait a given number of seconds for the `Event` to be set by another + # thread. Will wait forever when no `timeout` value is given. Returns + # immediately if the `Event` has already been set. + # + # @return [Boolean] true if the `Event` was set before timeout else false + def wait(timeout = nil) + synchronize do + unless @set + iteration = @iteration + ns_wait_until(timeout) { iteration < @iteration || @set } + else + true + end + end + end + + protected + + def ns_set + unless @set + @set = true + ns_broadcast + end + true + end + + def ns_initialize + @set = false + @iteration = 0 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb new file mode 100644 index 0000000..cb5b35a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb @@ -0,0 +1,42 @@ +if Concurrent.on_jruby? + + module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class JavaCountDownLatch + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds(count) + Utility::NativeInteger.ensure_positive(count) + @latch = java.util.concurrent.CountDownLatch.new(count) + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly { @latch.await } + result = true + else + Synchronization::JRuby.sleep_interruptibly do + result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + end + + # @!macro count_down_latch_method_count_down + def count_down + @latch.countDown + end + + # @!macro count_down_latch_method_count + def count + @latch.getCount + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb new file mode 100644 index 0000000..b41018f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb @@ -0,0 +1,37 @@ +require 'concurrent/atomic/abstract_thread_local_var' + +if Concurrent.on_jruby? + + module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class JavaThreadLocalVar < AbstractThreadLocalVar + + # @!macro thread_local_var_method_get + def value + value = @var.get + + if value.nil? + default + elsif value == NULL + nil + else + value + end + end + + # @!macro thread_local_var_method_set + def value=(value) + @var.set(value) + end + + protected + + # @!visibility private + def allocate_storage + @var = java.lang.ThreadLocal.new + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb new file mode 100644 index 0000000..a033de4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb @@ -0,0 +1,62 @@ +require 'concurrent/synchronization' + +module Concurrent + + # @!macro atomic_boolean + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicBoolean < Synchronization::LockableObject + + # @!macro atomic_boolean_method_initialize + def initialize(initial = false) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_boolean_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_boolean_method_value_set + def value=(value) + synchronize { @value = !!value } + end + + # @!macro atomic_boolean_method_true_question + def true? + synchronize { @value } + end + + # @!macro atomic_boolean_method_false_question + def false? + synchronize { !@value } + end + + # @!macro atomic_boolean_method_make_true + def make_true + synchronize { ns_make_value(true) } + end + + # @!macro atomic_boolean_method_make_false + def make_false + synchronize { ns_make_value(false) } + end + + protected + + # @!visibility private + def ns_initialize(initial) + @value = !!initial + end + + private + + # @!visibility private + def ns_make_value(value) + old = @value + @value = value + old != @value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb new file mode 100644 index 0000000..77b91d2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb @@ -0,0 +1,75 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro atomic_fixnum + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicFixnum < Synchronization::LockableObject + + # @!macro atomic_fixnum_method_initialize + def initialize(initial = 0) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_fixnum_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_fixnum_method_value_set + def value=(value) + synchronize { ns_set(value) } + end + + # @!macro atomic_fixnum_method_increment + def increment(delta = 1) + synchronize { ns_set(@value + delta.to_i) } + end + + alias_method :up, :increment + + # @!macro atomic_fixnum_method_decrement + def decrement(delta = 1) + synchronize { ns_set(@value - delta.to_i) } + end + + alias_method :down, :decrement + + # @!macro atomic_fixnum_method_compare_and_set + def compare_and_set(expect, update) + synchronize do + if @value == expect.to_i + @value = update.to_i + true + else + false + end + end + end + + # @!macro atomic_fixnum_method_update + def update + synchronize do + @value = yield @value + end + end + + protected + + # @!visibility private + def ns_initialize(initial) + ns_set(initial) + end + + private + + # @!visibility private + def ns_set(value) + Utility::NativeInteger.ensure_integer_and_bounds value + @value = value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb new file mode 100644 index 0000000..e99744c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb @@ -0,0 +1,44 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class MutexCountDownLatch < Synchronization::LockableObject + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds count + Utility::NativeInteger.ensure_positive count + + super() + synchronize { ns_initialize count } + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + synchronize { ns_wait_until(timeout) { @count == 0 } } + end + + # @!macro count_down_latch_method_count_down + def count_down + synchronize do + @count -= 1 if @count > 0 + ns_broadcast if @count == 0 + end + end + + # @!macro count_down_latch_method_count + def count + synchronize { @count } + end + + protected + + def ns_initialize(count) + @count = count + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb new file mode 100644 index 0000000..2042f73 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb @@ -0,0 +1,115 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro semaphore + # @!visibility private + # @!macro internal_implementation_note + class MutexSemaphore < Synchronization::LockableObject + + # @!macro semaphore_method_initialize + def initialize(count) + Utility::NativeInteger.ensure_integer_and_bounds count + + super() + synchronize { ns_initialize count } + end + + # @!macro semaphore_method_acquire + def acquire(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + try_acquire_timed(permits, nil) + nil + end + end + + # @!macro semaphore_method_available_permits + def available_permits + synchronize { @free } + end + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + def drain_permits + synchronize do + @free.tap { |_| @free = 0 } + end + end + + # @!macro semaphore_method_try_acquire + def try_acquire(permits = 1, timeout = nil) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + if timeout.nil? + try_acquire_now(permits) + else + try_acquire_timed(permits, timeout) + end + end + end + + # @!macro semaphore_method_release + def release(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + @free += permits + permits.times { ns_signal } + end + nil + end + + # Shrinks the number of available permits by the indicated reduction. + # + # @param [Fixnum] reduction Number of permits to remove. + # + # @raise [ArgumentError] if `reduction` is not an integer or is negative + # + # @raise [ArgumentError] if `@free` - `@reduction` is less than zero + # + # @return [nil] + # + # @!visibility private + def reduce_permits(reduction) + Utility::NativeInteger.ensure_integer_and_bounds reduction + Utility::NativeInteger.ensure_positive reduction + + synchronize { @free -= reduction } + nil + end + + protected + + # @!visibility private + def ns_initialize(count) + @free = count + end + + private + + # @!visibility private + def try_acquire_now(permits) + if @free >= permits + @free -= permits + true + else + false + end + end + + # @!visibility private + def try_acquire_timed(permits, timeout) + ns_wait_until(timeout) { try_acquire_now(permits) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb new file mode 100644 index 0000000..246f21a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb @@ -0,0 +1,254 @@ +require 'thread' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # Ruby read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And if the "write" lock is taken, any readers who come along will have to wait) + # + # If readers are already active when a writer comes along, the writer will wait for + # all the readers to finish before going ahead. + # Any additional readers that come when the writer is already waiting, will also + # wait (so writers are not starved). + # + # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @note Do **not** try to acquire the write lock while already holding a read lock + # **or** try to acquire the write lock while you already have it. + # This will lead to deadlock + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReadWriteLock < Synchronization::Object + + # @!visibility private + WAITING_WRITER = 1 << 15 + + # @!visibility private + RUNNING_WRITER = 1 << 29 + + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + safe_initialization! + + # Implementation notes: + # A goal is to make the uncontended path for both readers/writers lock-free + # Only if there is reader-writer or writer-writer contention, should locks be used + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each reader increments the counter by 1 when acquiring a read lock + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + + # Create a new `ReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadLock = Synchronization::Lock.new + @WriteLock = Synchronization::Lock.new + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock has been acquired will block until + # it is released. Will not block if other read locks have been acquired. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting when we first queue up, we need to wait + if waiting_writer?(c) + @ReadLock.wait_until { !waiting_writer? } + + # after a reader has waited once, they are allowed to "barge" ahead of waiting writers + # but if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadLock.wait_until { !running_writer? } + else + return if @Counter.compare_and_set(c, c+1) + end + end + else + break if @Counter.compare_and_set(c, c+1) + end + end + true + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + while true + c = @Counter.value + if @Counter.compare_and_set(c, c-1) + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_writer?(c) && running_readers(c) == 1 + @WriteLock.signal + end + break + end + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + if c == 0 # no readers OR writers running + # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead + break if @Counter.compare_and_set(0, RUNNING_WRITER) + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here, OR another writer could increment + @WriteLock.wait_until do + # So we have to do another check inside the synchronized section + # If a writer OR reader is running, then go to sleep + c = @Counter.value + !running_writer?(c) && !running_readers?(c) + end + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # Then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + end + break + end + end + true + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + return true unless running_writer? + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadLock.broadcast + @WriteLock.signal if waiting_writers(c) > 0 + true + end + + # Queries if the write lock is held by any thread. + # + # @return [Boolean] true if the write lock is held else false` + def write_locked? + @Counter.value >= RUNNING_WRITER + end + + # Queries whether any threads are waiting to acquire the read or write lock. + # + # @return [Boolean] true if any threads are waiting for a lock else false + def has_waiters? + waiting_writer?(@Counter.value) + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) / WAITING_WRITER + end + + # @!visibility private + def waiting_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb new file mode 100644 index 0000000..42d7f3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb @@ -0,0 +1,379 @@ +require 'thread' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/errors' +require 'concurrent/synchronization' +require 'concurrent/atomic/thread_local_var' + +module Concurrent + + # Re-entrant read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And while the "write" lock is taken, no read locks can be obtained either. + # Hence, the write lock can also be called an "exclusive" lock.) + # + # If another thread has taken a read lock, any thread which wants a write lock + # will block until all the readers release their locks. However, once a thread + # starts waiting to obtain a write lock, any additional readers that come along + # will also wait (so writers are not starved). + # + # A thread can acquire both a read and write lock at the same time. A thread can + # also acquire a read lock OR a write lock more than once. Only when the read (or + # write) lock is released as many times as it was acquired, will the thread + # actually let it go, allowing other threads which might have been waiting + # to proceed. Therefore the lock can be upgraded by first acquiring + # read lock and then write lock and that the lock can be downgraded by first + # having both read and write lock a releasing just the write lock. + # + # If both read and write locks are acquired by the same thread, it is not strictly + # necessary to release them in the same order they were acquired. In other words, + # the following code is legal: + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.acquire_write_lock + # lock.acquire_read_lock + # lock.release_write_lock + # # At this point, the current thread is holding only a read lock, not a write + # # lock. So other threads can take read locks, but not a write lock. + # lock.release_read_lock + # # Now the current thread is not holding either a read or write lock, so + # # another thread could potentially acquire a write lock. + # + # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReentrantReadWriteLock < Synchronization::Object + + # Implementation notes: + # + # A goal is to make the uncontended path for both readers/writers mutex-free + # Only if there is reader-writer or writer-writer contention, should mutexes be used + # Otherwise, a single CAS operation is all we need to acquire/release a lock + # + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each thread which has one OR MORE read locks increments the counter by 1 + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + # + # Additionally, each thread uses a thread-local variable to count how many times + # it has acquired a read lock, AND how many times it has acquired a write lock. + # It uses a similar trick; an increment of 1 means a read lock was taken, and + # an increment of (1 << 15) means a write lock was taken + # This is what makes re-entrancy possible + # + # 2 rules are followed to ensure good liveness properties: + # 1) Once a writer has queued up and is waiting for a write lock, no other thread + # can take a lock without waiting + # 2) When a write lock is released, readers are given the "first chance" to wake + # up and acquire a read lock + # Following these rules means readers and writers tend to "take turns", so neither + # can starve the other, even under heavy contention + + # @!visibility private + READER_BITS = 15 + # @!visibility private + WRITER_BITS = 14 + + # Used with @Counter: + # @!visibility private + WAITING_WRITER = 1 << READER_BITS + # @!visibility private + RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + # Used with @HeldCount: + # @!visibility private + WRITE_LOCK_HELD = 1 << READER_BITS + # @!visibility private + READ_LOCK_MASK = WRITE_LOCK_HELD - 1 + # @!visibility private + WRITE_LOCK_MASK = MAX_WRITERS + + safe_initialization! + + # Create a new `ReentrantReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadQueue = Synchronization::Lock.new # used to queue waiting readers + @WriteQueue = Synchronization::Lock.new # used to queue waiting writers + @HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock is held by another thread, will block + # until it is released. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + if (held = @HeldCount.value) > 0 + # If we already have a lock, there's no need to wait + if held & READ_LOCK_MASK == 0 + # But we do need to update the counter, if we were holding a write + # lock but not a read lock + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting OR running when we first queue up, we need to wait + if waiting_or_running_writer?(c) + # Before going to sleep, check again with the ReadQueue mutex held + @ReadQueue.synchronize do + @ReadQueue.ns_wait if waiting_or_running_writer? + end + # Note: the above 'synchronize' block could have used #wait_until, + # but that waits repeatedly in a loop, checking the wait condition + # each time it wakes up (to protect against spurious wakeups) + # But we are already in a loop, which is only broken when we successfully + # acquire the lock! So we don't care about spurious wakeups, and would + # rather not pay the extra overhead of using #wait_until + + # After a reader has waited once, they are allowed to "barge" ahead of waiting writers + # But if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadQueue.synchronize do + @ReadQueue.ns_wait if running_writer? + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + end + + # Try to acquire a read lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_read_lock + if (held = @HeldCount.value) > 0 + if held & READ_LOCK_MASK == 0 + # If we hold a write lock, but not a read lock... + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + false + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + held = @HeldCount.value = @HeldCount.value - 1 + rlocks_held = held & READ_LOCK_MASK + if rlocks_held == 0 + c = @Counter.update { |counter| counter - 1 } + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_or_running_writer?(c) && running_readers(c) == 0 + @WriteQueue.signal + end + elsif rlocks_held == READ_LOCK_MASK + raise IllegalOperationError, "Cannot release a read lock which is not held" + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + # if we already have a write (exclusive) lock, there's no need to wait + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + # To go ahead and take the lock without waiting, there must be no writer + # running right now, AND no writers who came before us still waiting to + # acquire the lock + # Additionally, if any read locks have been taken, we must hold all of them + if c == held + # If we successfully swap the RUNNING_WRITER bit on, then we can go ahead + if @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here + @WriteQueue.synchronize do + # So we have to do another check inside the synchronized section + # If a writer OR another reader is running, then go to sleep + c = @Counter.value + @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held + end + # Note: if you are thinking of replacing the above 'synchronize' block + # with #wait_until, read the comment in #acquire_read_lock first! + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + if !running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + end + end + end + + # Try to acquire a write lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + @HeldCount.value = held + WRITE_LOCK_HELD + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + false + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD + wlocks_held = held & WRITE_LOCK_MASK + if wlocks_held == 0 + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadQueue.broadcast + @WriteQueue.signal if waiting_writers(c) > 0 + elsif wlocks_held == WRITE_LOCK_MASK + raise IllegalOperationError, "Cannot release a write lock which is not held" + end + true + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) >> READER_BITS + end + + # @!visibility private + def waiting_or_running_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb new file mode 100644 index 0000000..6d7601a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb @@ -0,0 +1,178 @@ +require 'thread' +require 'concurrent/atomic/abstract_thread_local_var' + +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class RubyThreadLocalVar < AbstractThreadLocalVar + + # Each thread has a (lazily initialized) array of thread-local variable values + # Each time a new thread-local var is created, we allocate an "index" for it + # For example, if the allocated index is 1, that means slot #1 in EVERY + # thread's thread-local array will be used for the value of that TLV + # + # The good thing about using a per-THREAD structure to hold values, rather + # than a per-TLV structure, is that no synchronization is needed when + # reading and writing those values (since the structure is only ever + # accessed by a single thread) + # + # Of course, when a TLV is GC'd, 1) we need to recover its index for use + # by other new TLVs (otherwise the thread-local arrays could get bigger + # and bigger with time), and 2) we need to null out all the references + # held in the now-unused slots (both to avoid blocking GC of those objects, + # and also to prevent "stale" values from being passed on to a new TLV + # when the index is reused) + # Because we need to null out freed slots, we need to keep references to + # ALL the thread-local arrays -- ARRAYS is for that + # But when a Thread is GC'd, we need to drop the reference to its thread-local + # array, so we don't leak memory + + FREE = [] + LOCK = Mutex.new + THREAD_LOCAL_ARRAYS = {} # used as a hash set + + # synchronize when not on MRI + # on MRI using lock in finalizer leads to "can't be called from trap context" error + # so the code is carefully written to be tread-safe on MRI relying on GIL + + if Concurrent.on_cruby? + # @!visibility private + def self.semi_sync(&block) + block.call + end + else + # @!visibility private + def self.semi_sync(&block) + LOCK.synchronize(&block) + end + end + + private_constant :FREE, :LOCK, :THREAD_LOCAL_ARRAYS + + # @!macro thread_local_var_method_get + def value + if (array = get_threadlocal_array) + value = array[@index] + if value.nil? + default + elsif value.equal?(NULL) + nil + else + value + end + else + default + end + end + + # @!macro thread_local_var_method_set + def value=(value) + me = Thread.current + # We could keep the thread-local arrays in a hash, keyed by Thread + # But why? That would require locking + # Using Ruby's built-in thread-local storage is faster + unless (array = get_threadlocal_array(me)) + array = set_threadlocal_array([], me) + self.class.semi_sync { THREAD_LOCAL_ARRAYS[array.object_id] = array } + ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id)) + end + array[@index] = (value.nil? ? NULL : value) + value + end + + protected + + # @!visibility private + def allocate_storage + @index = FREE.pop || next_index + + ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index)) + end + + # @!visibility private + def self.thread_local_finalizer(index) + proc do + semi_sync do + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + THREAD_LOCAL_ARRAYS.each_value { |array| array[index] = nil } + # free index has to be published after the arrays are cleared + FREE.push(index) + end + end + end + + # @!visibility private + def self.thread_finalizer(id) + proc do + semi_sync do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + THREAD_LOCAL_ARRAYS.delete(id) + end + end + end + + private + + # noinspection RubyClassVariableUsageInspection + @@next = 0 + # noinspection RubyClassVariableUsageInspection + def next_index + LOCK.synchronize do + result = @@next + @@next += 1 + result + end + end + + if Thread.instance_methods.include?(:thread_variable_get) + + def get_threadlocal_array(thread = Thread.current) + thread.thread_variable_get(:__threadlocal_array__) + end + + def set_threadlocal_array(array, thread = Thread.current) + thread.thread_variable_set(:__threadlocal_array__, array) + end + + else + + def get_threadlocal_array(thread = Thread.current) + thread[:__threadlocal_array__] + end + + def set_threadlocal_array(array, thread = Thread.current) + thread[:__threadlocal_array__] = array + end + end + + # This exists only for use in testing + # @!visibility private + def value_for(thread) + if (array = get_threadlocal_array(thread)) + value = array[@index] + if value.nil? + get_default + elsif value.equal?(NULL) + nil + else + value + end + else + get_default + end + end + + # @!visibility private + def get_default + if @default_block + raise "Cannot use default_for with default block" + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb new file mode 100644 index 0000000..1b2bd8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/semaphore.rb @@ -0,0 +1,145 @@ +require 'concurrent/atomic/mutex_semaphore' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro semaphore_method_initialize + # + # Create a new `Semaphore` with the initial `count`. + # + # @param [Fixnum] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro semaphore_method_acquire + # + # Acquires the given number of permits from this semaphore, + # blocking until all are available. + # + # @param [Fixnum] permits Number of permits to acquire + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [nil] + + # @!macro semaphore_method_available_permits + # + # Returns the current number of permits available in this semaphore. + # + # @return [Integer] + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + + # @!macro semaphore_method_try_acquire + # + # Acquires the given number of permits from this semaphore, + # only if all are available at the time of invocation or within + # `timeout` interval + # + # @param [Fixnum] permits the number of permits to acquire + # + # @param [Fixnum] timeout the number of seconds to wait for the counter + # or `nil` to return immediately + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [Boolean] `false` if no permits are available, `true` when + # acquired a permit + + # @!macro semaphore_method_release + # + # Releases the given number of permits, returning them to the semaphore. + # + # @param [Fixnum] permits Number of permits to return to the semaphore. + # + # @raise [ArgumentError] if `permits` is not a number or is less than one + # + # @return [nil] + + ################################################################### + + # @!macro semaphore_public_api + # + # @!method initialize(count) + # @!macro semaphore_method_initialize + # + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + # + # @!method available_permits + # @!macro semaphore_method_available_permits + # + # @!method drain_permits + # @!macro semaphore_method_drain_permits + # + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + # + # @!method release(permits = 1) + # @!macro semaphore_method_release + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + SemaphoreImplementation = case + when defined?(JavaSemaphore) + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation + + # @!macro semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. + # + # @!macro semaphore_public_api + # @example + # semaphore = Concurrent::Semaphore.new(2) + # + # t1 = Thread.new do + # semaphore.acquire + # puts "Thread 1 acquired semaphore" + # end + # + # t2 = Thread.new do + # semaphore.acquire + # puts "Thread 2 acquired semaphore" + # end + # + # t3 = Thread.new do + # semaphore.acquire + # puts "Thread 3 acquired semaphore" + # end + # + # t4 = Thread.new do + # sleep(2) + # puts "Thread 4 releasing semaphore" + # semaphore.release + # end + # + # [t1, t2, t3, t4].each(&:join) + # + # # prints: + # # Thread 3 acquired semaphore + # # Thread 2 acquired semaphore + # # Thread 4 releasing semaphore + # # Thread 1 acquired semaphore + # + class Semaphore < SemaphoreImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb new file mode 100644 index 0000000..100cc8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb @@ -0,0 +1,104 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/ruby_thread_local_var' +require 'concurrent/atomic/java_thread_local_var' + +module Concurrent + + ################################################################### + + # @!macro thread_local_var_method_initialize + # + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each thread + + # @!macro thread_local_var_method_get + # + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value + + # @!macro thread_local_var_method_set + # + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + + # @!macro thread_local_var_method_bind + # + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + + + ################################################################### + + # @!macro thread_local_var_public_api + # + # @!method initialize(default = nil, &default_block) + # @!macro thread_local_var_method_initialize + # + # @!method value + # @!macro thread_local_var_method_get + # + # @!method value=(value) + # @!macro thread_local_var_method_set + # + # @!method bind(value, &block) + # @!macro thread_local_var_method_bind + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + ThreadLocalVarImplementation = case + when Concurrent.on_jruby? + JavaThreadLocalVar + else + RubyThreadLocalVar + end + private_constant :ThreadLocalVarImplementation + + # @!macro thread_local_var + # + # A `ThreadLocalVar` is a variable where the value is different for each thread. + # Each variable may have a default value, but when you modify the variable only + # the current thread will ever see that change. + # + # @!macro thread_safe_variable_comparison + # + # @example + # v = ThreadLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = ThreadLocalVar.new(14) + # + # t1 = Thread.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end + # + # t2 = Thread.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end + # + # v.value #=> 14 + # + # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal + # + # @!macro thread_local_var_public_api + class ThreadLocalVar < ThreadLocalVarImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb new file mode 100644 index 0000000..d092aed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb @@ -0,0 +1,56 @@ +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicReference < Synchronization::LockableObject + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + + # @!macro atomic_reference_method_initialize + def initialize(value = nil) + super() + synchronize { ns_initialize(value) } + end + + # @!macro atomic_reference_method_get + def get + synchronize { @value } + end + alias_method :value, :get + + # @!macro atomic_reference_method_set + def set(new_value) + synchronize { @value = new_value } + end + alias_method :value=, :set + + # @!macro atomic_reference_method_get_and_set + def get_and_set(new_value) + synchronize do + old_value = @value + @value = new_value + old_value + end + end + alias_method :swap, :get_and_set + + # @!macro atomic_reference_method_compare_and_set + def _compare_and_set(old_value, new_value) + synchronize do + if @value.equal? old_value + @value = new_value + true + else + false + end + end + end + + protected + + def ns_initialize(value) + @value = value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb new file mode 100644 index 0000000..709a382 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb @@ -0,0 +1,28 @@ +module Concurrent + + # Special "compare and set" handling of numeric values. + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicNumericCompareAndSetWrapper + + # @!macro atomic_reference_method_compare_and_set + def compare_and_set(old_value, new_value) + if old_value.kind_of? Numeric + while true + old = get + + return false unless old.kind_of? Numeric + + return false unless old == old_value + + result = _compare_and_set(old, new_value) + return result if result + end + else + _compare_and_set(old_value, new_value) + end + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb new file mode 100644 index 0000000..16cbe66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/atomics.rb @@ -0,0 +1,10 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/cyclic_barrier' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/event' +require 'concurrent/atomic/read_write_lock' +require 'concurrent/atomic/reentrant_read_write_lock' +require 'concurrent/atomic/semaphore' +require 'concurrent/atomic/thread_local_var' diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb new file mode 100644 index 0000000..50d52a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb @@ -0,0 +1,107 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-read approach: + # observers are added and removed from a thread safe collection; every time + # a notification is required the internal data structure is copied to + # prevent concurrency issues + # + # @api private + class CopyOnNotifyObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + @observers[observer] = func + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + @observers.delete(observer) + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + synchronize do + @observers.clear + self + end + end + + # @!macro observable_count_observers + def count_observers + synchronize { @observers.count } + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + observers = duplicate_observers + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + observers = duplicate_and_clear_observers + notify_to(observers, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def duplicate_and_clear_observers + synchronize do + observers = @observers.dup + @observers.clear + observers + end + end + + def duplicate_observers + synchronize { @observers.dup } + end + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb new file mode 100644 index 0000000..3f3f7cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb @@ -0,0 +1,111 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-write approach: + # every time an observer is added or removed the whole internal data structure is + # duplicated and replaced with a new one. + # + # @api private + class CopyOnWriteObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + new_observers = @observers.dup + new_observers[observer] = func + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + new_observers = @observers.dup + new_observers.delete(observer) + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + self.observers = {} + self + end + + # @!macro observable_count_observers + def count_observers + observers.count + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + old = clear_observers_and_return_old + notify_to(old, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + + def observers + synchronize { @observers } + end + + def observers=(new_set) + synchronize { @observers = new_set } + end + + def clear_observers_and_return_old + synchronize do + old_observers = @observers + @observers = {} + old_observers + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..2be9e43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb @@ -0,0 +1,84 @@ +if Concurrent.on_jruby? + + module Concurrent + module Collection + + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class JavaNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + if [:min, :low].include?(order) + @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity + else + @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) + end + end + + # @!macro priority_queue_method_clear + def clear + @queue.clear + true + end + + # @!macro priority_queue_method_delete + def delete(item) + found = false + while @queue.remove(item) do + found = true + end + found + end + + # @!macro priority_queue_method_empty + def empty? + @queue.size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.contains(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @queue.size + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + @queue.peek + end + + # @!macro priority_queue_method_pop + def pop + @queue.poll + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @queue.add(item) + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb new file mode 100644 index 0000000..d003d3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb @@ -0,0 +1,158 @@ +module Concurrent + + # @!macro warn.edge + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + + # @return [Node] + attr_reader :next_node + + # @return [Object] + attr_reader :value + + # @!visibility private + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + # The singleton for empty node + EMPTY = Node[nil, nil] + def EMPTY.next_node + self + end + + attr_atomic(:head) + private :head, :head=, :swap_head, :compare_and_set_head, :update_head + + # @!visibility private + def self.of1(value) + new Node[value, EMPTY] + end + + # @!visibility private + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + # @param [Node] head + def initialize(head = EMPTY) + super() + self.head = head + end + + # @param [Node] head + # @return [true, false] + def empty?(head = head()) + head.equal? EMPTY + end + + # @param [Node] head + # @param [Object] value + # @return [true, false] + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + # @param [Object] value + # @return [self] + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + # @return [Node] + def peek + head + end + + # @param [Node] head + # @return [true, false] + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + # @return [Object] + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + # @param [Node] head + # @return [true, false] + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + # @param [Node] head + # @return [self] + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + # @return [true, false] + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + # @param [Node] head + # @return [true, false] + def clear_if(head) + compare_and_set_head head, EMPTY + end + + # @param [Node] head + # @param [Node] new_head + # @return [true, false] + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + # @return [self] + # @yield over the cleared stack + # @yieldparam [Object] value + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], to_a.to_s + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb new file mode 100644 index 0000000..dc51893 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb @@ -0,0 +1,927 @@ +require 'concurrent/constants' +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/adder' +require 'concurrent/thread_safe/util/cheap_lockable' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module Collection + + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + # + # @!visibility private + class AtomicReferenceMapBackend + + # @!visibility private + class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + # + # @!visibility private + class Node + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :hash, :value, :next + + include Concurrent::ThreadSafe::Util::CheapLockable + + bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_acquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_acquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Concurrent::ThreadSafe::Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Concurrent::ThreadSafe::Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= ::Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb new file mode 100644 index 0000000..903c1f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb @@ -0,0 +1,66 @@ +require 'thread' +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class MriMapBackend < NonConcurrentMapBackend + + def initialize(options = nil) + super(options) + @write_lock = Mutex.new + end + + def []=(key, value) + @write_lock.synchronize { super } + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case + stored_value + else + @write_lock.synchronize { super } + end + end + + def compute_if_present(key) + @write_lock.synchronize { super } + end + + def compute(key) + @write_lock.synchronize { super } + end + + def merge_pair(key, value) + @write_lock.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + @write_lock.synchronize { super } + end + + def replace_if_exists(key, new_value) + @write_lock.synchronize { super } + end + + def get_and_set(key, value) + @write_lock.synchronize { super } + end + + def delete(key) + @write_lock.synchronize { super } + end + + def delete_pair(key, value) + @write_lock.synchronize { super } + end + + def clear + @write_lock.synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb new file mode 100644 index 0000000..e7c62e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb @@ -0,0 +1,140 @@ +require 'concurrent/constants' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class NonConcurrentMapBackend + + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedMapBackend which uses a non-reentrant mutex for performance + # reasons. + def initialize(options = nil) + @backend = {} + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(@backend[key])) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = @backend[key] + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + alias_method :_get, :[] + alias_method :_set, :[]= + private :_get, :_set + private + def initialize_copy(other) + super + @backend = {} + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb new file mode 100644 index 0000000..190c8d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb @@ -0,0 +1,82 @@ +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class SynchronizedMapBackend < NonConcurrentMapBackend + + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb new file mode 100644 index 0000000..694cd7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb @@ -0,0 +1,143 @@ +require 'concurrent/utility/engine' +require 'concurrent/collection/java_non_concurrent_priority_queue' +require 'concurrent/collection/ruby_non_concurrent_priority_queue' + +module Concurrent + module Collection + + # @!visibility private + # @!macro internal_implementation_note + NonConcurrentPriorityQueueImplementation = case + when Concurrent.on_jruby? + JavaNonConcurrentPriorityQueue + else + RubyNonConcurrentPriorityQueue + end + private_constant :NonConcurrentPriorityQueueImplementation + + # @!macro priority_queue + # + # A queue collection in which the elements are sorted based on their + # comparison (spaceship) operator `<=>`. Items are added to the queue + # at a position relative to their priority. On removal the element + # with the "highest" priority is removed. By default the sort order is + # from highest to lowest, but a lowest-to-highest sort order can be + # set on construction. + # + # The API is based on the `Queue` class from the Ruby standard library. + # + # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm + # stored in an array. The algorithm is based on the work of Robert Sedgewick + # and Kevin Wayne. + # + # The JRuby native implementation is a thin wrapper around the standard + # library `java.util.NonConcurrentPriorityQueue`. + # + # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. + # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. + # + # @note This implementation is *not* thread safe. + # + # @see http://en.wikipedia.org/wiki/Priority_queue + # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html + # + # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 + # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html + # + # @!visibility private + class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation + + alias_method :has_priority?, :include? + + alias_method :size, :length + + alias_method :deq, :pop + alias_method :shift, :pop + + alias_method :<<, :push + alias_method :enq, :push + + # @!method initialize(opts = {}) + # @!macro priority_queue_method_initialize + # + # Create a new priority queue with no items. + # + # @param [Hash] opts the options for creating the queue + # @option opts [Symbol] :order (:max) dictates the order in which items are + # stored: from highest to lowest when `:max` or `:high`; from lowest to + # highest when `:min` or `:low` + + # @!method clear + # @!macro priority_queue_method_clear + # + # Removes all of the elements from this priority queue. + + # @!method delete(item) + # @!macro priority_queue_method_delete + # + # Deletes all items from `self` that are equal to `item`. + # + # @param [Object] item the item to be removed from the queue + # @return [Object] true if the item is found else false + + # @!method empty? + # @!macro priority_queue_method_empty + # + # Returns `true` if `self` contains no elements. + # + # @return [Boolean] true if there are no items in the queue else false + + # @!method include?(item) + # @!macro priority_queue_method_include + # + # Returns `true` if the given item is present in `self` (that is, if any + # element == `item`), otherwise returns false. + # + # @param [Object] item the item to search for + # + # @return [Boolean] true if the item is found else false + + # @!method length + # @!macro priority_queue_method_length + # + # The current length of the queue. + # + # @return [Fixnum] the number of items in the queue + + # @!method peek + # @!macro priority_queue_method_peek + # + # Retrieves, but does not remove, the head of this queue, or returns `nil` + # if this queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method pop + # @!macro priority_queue_method_pop + # + # Retrieves and removes the head of this queue, or returns `nil` if this + # queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method push(item) + # @!macro priority_queue_method_push + # + # Inserts the specified element into this priority queue. + # + # @param [Object] item the item to insert onto the queue + + # @!method self.from_list(list, opts = {}) + # @!macro priority_queue_method_from_list + # + # Create a new priority queue from the given list. + # + # @param [Enumerable] list the list to build the queue from + # @param [Hash] opts the options for creating the queue + # + # @return [NonConcurrentPriorityQueue] the newly created and populated queue + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..bdf3cba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb @@ -0,0 +1,150 @@ +module Concurrent + module Collection + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class RubyNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + @comparator = [:min, :low].include?(order) ? -1 : 1 + clear + end + + # @!macro priority_queue_method_clear + def clear + @queue = [nil] + @length = 0 + true + end + + # @!macro priority_queue_method_delete + def delete(item) + return false if empty? + original_length = @length + k = 1 + while k <= @length + if @queue[k] == item + swap(k, @length) + @length -= 1 + sink(k) + @queue.pop + else + k += 1 + end + end + @length != original_length + end + + # @!macro priority_queue_method_empty + def empty? + size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.include?(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @length + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + empty? ? nil : @queue[1] + end + + # @!macro priority_queue_method_pop + def pop + return nil if empty? + max = @queue[1] + swap(1, @length) + @length -= 1 + sink(1) + @queue.pop + max + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @length += 1 + @queue << item + swim(@length) + true + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + + private + + # Exchange the values at the given indexes within the internal array. + # + # @param [Integer] x the first index to swap + # @param [Integer] y the second index to swap + # + # @!visibility private + def swap(x, y) + temp = @queue[x] + @queue[x] = @queue[y] + @queue[y] = temp + end + + # Are the items at the given indexes ordered based on the priority + # order specified at construction? + # + # @param [Integer] x the first index from which to retrieve a comparable value + # @param [Integer] y the second index from which to retrieve a comparable value + # + # @return [Boolean] true if the two elements are in the correct priority order + # else false + # + # @!visibility private + def ordered?(x, y) + (@queue[x] <=> @queue[y]) == @comparator + end + + # Percolate down to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def sink(k) + while (j = (2 * k)) <= @length do + j += 1 if j < @length && ! ordered?(j, j+1) + break if ordered?(k, j) + swap(k, j) + k = j + end + end + + # Percolate up to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def swim(k) + while k > 1 && ! ordered?(k/2, k) do + swap(k, k/2) + k = k/2 + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb new file mode 100644 index 0000000..35ae4b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/deprecation.rb @@ -0,0 +1,34 @@ +require 'concurrent/concern/logging' + +module Concurrent + module Concern + + # @!visibility private + # @!macro internal_implementation_note + module Deprecation + # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. + include Concern::Logging + + def deprecated(message, strip = 2) + caller_line = caller(strip).first if strip > 0 + klass = if Module === self + self + else + self.class + end + message = if strip > 0 + format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) + else + format('[DEPRECATED] %s', message) + end + log WARN, klass.to_s, message + end + + def deprecated_method(old_name, new_name) + deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb new file mode 100644 index 0000000..dc172ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb @@ -0,0 +1,73 @@ +module Concurrent + module Concern + + # Object references in Ruby are mutable. This can lead to serious problems when + # the `#value` of a concurrent object is a mutable reference. Which is always the + # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. + # Most classes in this library that expose a `#value` getter method do so using the + # `Dereferenceable` mixin module. + # + # @!macro copy_options + module Dereferenceable + # NOTE: This module is going away in 2.0. In the mean time we need it to + # play nicely with the synchronization layer. This means that the + # including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Return the value this object represents after applying the options specified + # by the `#set_deref_options` method. + # + # @return [Object] the current value of the object + def value + synchronize { apply_deref_options(@value) } + end + alias_method :deref, :value + + protected + + # Set the internal value of this object + # + # @param [Object] value the new value + def value=(value) + synchronize{ @value = value } + end + + # @!macro dereferenceable_set_deref_options + # Set the options which define the operations #value performs before + # returning data to the caller (dereferencing). + # + # @note Most classes that include this module will call `#set_deref_options` + # from within the constructor, thus allowing these options to be set at + # object creation. + # + # @param [Hash] opts the options defining dereference behavior. + # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def set_deref_options(opts = {}) + synchronize{ ns_set_deref_options(opts) } + end + + # @!macro dereferenceable_set_deref_options + # @!visibility private + def ns_set_deref_options(opts) + @dup_on_deref = opts[:dup_on_deref] || opts[:dup] + @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] + @copy_on_deref = opts[:copy_on_deref] || opts[:copy] + @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) + nil + end + + # @!visibility private + def apply_deref_options(value) + return nil if value.nil? + return value if @do_nothing_on_deref + value = @copy_on_deref.call(value) if @copy_on_deref + value = value.dup if @dup_on_deref + value = value.freeze if @freeze_on_deref + value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb new file mode 100644 index 0000000..2c74999 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/logging.rb @@ -0,0 +1,32 @@ +require 'logger' + +module Concurrent + module Concern + + # Include where logging is needed + # + # @!visibility private + module Logging + include Logger::Severity + + # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger + # @param [Integer] level one of Logger::Severity constants + # @param [String] progname e.g. a path of an Actor + # @param [String, nil] message when nil block is used to generate the message + # @yieldreturn [String] a message + def log(level, progname, message = nil, &block) + #NOTE: Cannot require 'concurrent/configuration' above due to circular references. + # Assume that the gem has been initialized if we've gotten this far. + logger = if defined?(@logger) && @logger + @logger + else + Concurrent.global_logger + end + logger.call level, progname, message, &block + rescue => error + $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + + "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb new file mode 100644 index 0000000..2c9ac12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/obligation.rb @@ -0,0 +1,220 @@ +require 'thread' +require 'timeout' + +require 'concurrent/atomic/event' +require 'concurrent/concern/dereferenceable' + +module Concurrent + module Concern + + module Obligation + include Concern::Dereferenceable + # NOTE: The Dereferenceable module is going away in 2.0. In the mean time + # we need it to place nicely with the synchronization layer. This means + # that the including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Has the obligation been fulfilled? + # + # @return [Boolean] + def fulfilled? + state == :fulfilled + end + alias_method :realized?, :fulfilled? + + # Has the obligation been rejected? + # + # @return [Boolean] + def rejected? + state == :rejected + end + + # Is obligation completion still pending? + # + # @return [Boolean] + def pending? + state == :pending + end + + # Is the obligation still unscheduled? + # + # @return [Boolean] + def unscheduled? + state == :unscheduled + end + + # Has the obligation completed processing? + # + # @return [Boolean] + def complete? + [:fulfilled, :rejected].include? state + end + + # Is the obligation still awaiting completion of processing? + # + # @return [Boolean] + def incomplete? + ! complete? + end + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + def value(timeout = nil) + wait timeout + deref + end + + # Wait until obligation is complete or the timeout has been reached. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + def wait(timeout = nil) + event.wait(timeout) if timeout != 0 && incomplete? + self + end + + # Wait until obligation is complete or the timeout is reached. Will re-raise + # any exceptions raised during processing (but will not raise an exception + # on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + # @raise [Exception] raises the reason when rejected + def wait!(timeout = nil) + wait(timeout).tap { raise self if rejected? } + end + alias_method :no_error!, :wait! + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. Will re-raise any exceptions + # raised during processing (but will not raise an exception on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + # @raise [Exception] raises the reason when rejected + def value!(timeout = nil) + wait(timeout) + if rejected? + raise self + else + deref + end + end + + # The current state of the obligation. + # + # @return [Symbol] the current state + def state + synchronize { @state } + end + + # If an exception was raised during processing this will return the + # exception object. Will return `nil` when the state is pending or if + # the obligation has been successfully fulfilled. + # + # @return [Exception] the exception raised during processing or `nil` + def reason + synchronize { @reason } + end + + # @example allows Obligation to be risen + # rejected_ivar = Ivar.new.fail + # raise rejected_ivar + def exception(*args) + raise 'obligation is not rejected' unless rejected? + reason.exception(*args) + end + + protected + + # @!visibility private + def get_arguments_from(opts = {}) + [*opts.fetch(:args, [])] + end + + # @!visibility private + def init_obligation + @event = Event.new + @value = @reason = nil + end + + # @!visibility private + def event + @event + end + + # @!visibility private + def set_state(success, value, reason) + if success + @value = value + @state = :fulfilled + else + @reason = reason + @state = :rejected + end + end + + # @!visibility private + def state=(value) + synchronize { ns_set_state(value) } + end + + # Atomic compare and set operation + # State is set to `next_state` only if `current state == expected_current`. + # + # @param [Symbol] next_state + # @param [Symbol] expected_current + # + # @return [Boolean] true is state is changed, false otherwise + # + # @!visibility private + def compare_and_set_state(next_state, *expected_current) + synchronize do + if expected_current.include? @state + @state = next_state + true + else + false + end + end + end + + # Executes the block within mutex if current state is included in expected_states + # + # @return block value if executed, false otherwise + # + # @!visibility private + def if_state(*expected_states) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + + if expected_states.include? @state + yield + else + false + end + end + end + + protected + + # Am I in the current state? + # + # @param [Symbol] expected The state to check against + # @return [Boolean] true if in the expected state else false + # + # @!visibility private + def ns_check_state?(expected) + @state == expected + end + + # @!visibility private + def ns_set_state(value) + @state = value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb new file mode 100644 index 0000000..b513271 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concern/observable.rb @@ -0,0 +1,110 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/collection/copy_on_write_observer_set' + +module Concurrent + module Concern + + # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one + # of the most useful design patterns. + # + # The workflow is very simple: + # - an `observer` can register itself to a `subject` via a callback + # - many `observers` can be registered to the same `subject` + # - the `subject` notifies all registered observers when its status changes + # - an `observer` can deregister itself when is no more interested to receive + # event notifications + # + # In a single threaded environment the whole pattern is very easy: the + # `subject` can use a simple data structure to manage all its subscribed + # `observer`s and every `observer` can react directly to every event without + # caring about synchronization. + # + # In a multi threaded environment things are more complex. The `subject` must + # synchronize the access to its data structure and to do so currently we're + # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} + # and {Concurrent::Concern::CopyOnNotifyObserverSet}. + # + # When implementing and `observer` there's a very important rule to remember: + # **there are no guarantees about the thread that will execute the callback** + # + # Let's take this example + # ``` + # class Observer + # def initialize + # @count = 0 + # end + # + # def update + # @count += 1 + # end + # end + # + # obs = Observer.new + # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } + # # execute [obj1, obj2, obj3, obj4] + # ``` + # + # `obs` is wrong because the variable `@count` can be accessed by different + # threads at the same time, so it should be synchronized (using either a Mutex + # or an AtomicFixum) + module Observable + + # @!macro observable_add_observer + # + # Adds an observer to this set. If a block is passed, the observer will be + # created by this method and no other params should be passed. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # Default is :update + # @return [Object] the added observer + def add_observer(observer = nil, func = :update, &block) + observers.add_observer(observer, func, &block) + end + + # As `#add_observer` but can be used for chaining. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # @return [Observable] self + def with_observer(observer = nil, func = :update, &block) + add_observer(observer, func, &block) + self + end + + # @!macro observable_delete_observer + # + # Remove `observer` as an observer on this object so that it will no + # longer receive notifications. + # + # @param [Object] observer the observer to remove + # @return [Object] the deleted observer + def delete_observer(observer) + observers.delete_observer(observer) + end + + # @!macro observable_delete_observers + # + # Remove all observers associated with this object. + # + # @return [Observable] self + def delete_observers + observers.delete_observers + self + end + + # @!macro observable_count_observers + # + # Return the number of observers associated with this object. + # + # @return [Integer] the observers count + def count_observers + observers.count_observers + end + + protected + + attr_accessor :observers + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar new file mode 100644 index 0000000..bd2b279 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/concurrent_ruby.jar differ diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb new file mode 100644 index 0000000..a00dc84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/configuration.rb @@ -0,0 +1,188 @@ +require 'thread' +require 'concurrent/delay' +require 'concurrent/errors' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/concern/logging' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/utility/processor_counter' + +module Concurrent + extend Concern::Logging + extend Concern::Deprecation + + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' + + # @return [Logger] Logger with provided level and output. + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message + when String + message + when Exception + format "%s (%s)\n%s", + message.message, message.class, (message.backtrace || []).join("\n") + else + message.inspect + end + + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true + end + end + + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output + end + + # @return [Logger] Logger with provided level and output. + # @deprecated + def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) + logger = Logger.new(output) + logger.level = level + logger.formatter = lambda do |severity, datetime, progname, msg| + formatted_message = case msg + when String + msg + when Exception + format "%s (%s)\n%s", + msg.message, msg.class, (msg.backtrace || []).join("\n") + else + msg.inspect + end + format "[%s] %5s -- %s: %s\n", + datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), + severity, + progname, + formatted_message + end + + lambda do |loglevel, progname, message = nil, &block| + logger.add loglevel, message, progname, &block + end + end + + # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. + # @deprecated + def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_stdlib_logger level, output + end + + # TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods + + # Suppresses all output when used for logging. + NULL_LOGGER = lambda { |level, progname, message = nil, &block| } + + # @!visibility private + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) + private_constant :GLOBAL_LOGGER + + def self.global_logger + GLOBAL_LOGGER.value + end + + def self.global_logger=(value) + GLOBAL_LOGGER.value = value + end + + # @!visibility private + GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor } + private_constant :GLOBAL_FAST_EXECUTOR + + # @!visibility private + GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor } + private_constant :GLOBAL_IO_EXECUTOR + + # @!visibility private + GLOBAL_TIMER_SET = Delay.new { TimerSet.new } + private_constant :GLOBAL_TIMER_SET + + # @!visibility private + GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new + private_constant :GLOBAL_IMMEDIATE_EXECUTOR + + # Disables AtExit handlers including pool auto-termination handlers. + # When disabled it will be the application programmer's responsibility + # to ensure that the handlers are shutdown properly prior to application + # exit by calling `AtExit.run` method. + # + # @note this option should be needed only because of `at_exit` ordering + # issues which may arise when running some of the testing frameworks. + # E.g. Minitest's test-suite runs itself in `at_exit` callback which + # executes after the pools are already terminated. Then auto termination + # needs to be disabled and called manually after test-suite ends. + # @note This method should *never* be called + # from within a gem. It should *only* be used from within the main + # application and even then it should be used only when necessary. + # @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841. + # + def self.disable_at_exit_handlers! + deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841." + end + + # Global thread pool optimized for short, fast *operations*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_fast_executor + GLOBAL_FAST_EXECUTOR.value + end + + # Global thread pool optimized for long, blocking (IO) *tasks*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_io_executor + GLOBAL_IO_EXECUTOR.value + end + + def self.global_immediate_executor + GLOBAL_IMMEDIATE_EXECUTOR + end + + # Global thread pool user for global *timers*. + # + # @return [Concurrent::TimerSet] the thread pool + def self.global_timer_set + GLOBAL_TIMER_SET.value + end + + # General access point to global executors. + # @param [Symbol, Executor] executor_identifier symbols: + # - :fast - {Concurrent.global_fast_executor} + # - :io - {Concurrent.global_io_executor} + # - :immediate - {Concurrent.global_immediate_executor} + # @return [Executor] + def self.executor(executor_identifier) + Options.executor(executor_identifier) + end + + def self.new_fast_executor(opts = {}) + FixedThreadPool.new( + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "fast" + ) + end + + def self.new_io_executor(opts = {}) + CachedThreadPool.new( + auto_terminate: opts.fetch(:auto_terminate, true), + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "io" + ) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb new file mode 100644 index 0000000..676c2af --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/constants.rb @@ -0,0 +1,8 @@ +module Concurrent + + # Various classes within allows for +nil+ values to be stored, + # so a special +NULL+ token is required to indicate the "nil-ness". + # @!visibility private + NULL = ::Object.new + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb new file mode 100644 index 0000000..d55f19d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/dataflow.rb @@ -0,0 +1,81 @@ +require 'concurrent/future' +require 'concurrent/atomic/atomic_fixnum' + +module Concurrent + + # @!visibility private + class DependencyCounter # :nodoc: + + def initialize(count, &block) + @counter = AtomicFixnum.new(count) + @block = block + end + + def update(time, value, reason) + if @counter.decrement == 0 + @block.call + end + end + end + + # Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. + # {include:file:docs-source/dataflow.md} + # + # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon + # + # @yield The operation to perform once all the dependencies are met + # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow + # @yieldreturn [Object] the result of the block operation + # + # @return [Object] the result of all the operations + # + # @raise [ArgumentError] if no block is given + # @raise [ArgumentError] if any of the inputs are not `IVar`s + def dataflow(*inputs, &block) + dataflow_with(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow + + def dataflow_with(executor, *inputs, &block) + call_dataflow(:value, executor, *inputs, &block) + end + module_function :dataflow_with + + def dataflow!(*inputs, &block) + dataflow_with!(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow! + + def dataflow_with!(executor, *inputs, &block) + call_dataflow(:value!, executor, *inputs, &block) + end + module_function :dataflow_with! + + private + + def call_dataflow(method, executor, *inputs, &block) + raise ArgumentError.new('an executor must be provided') if executor.nil? + raise ArgumentError.new('no block given') unless block_given? + unless inputs.all? { |input| input.is_a? IVar } + raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }") + end + + result = Future.new(executor: executor) do + values = inputs.map { |input| input.send(method) } + block.call(*values) + end + + if inputs.empty? + result.execute + else + counter = DependencyCounter.new(inputs.size) { result.execute } + + inputs.each do |input| + input.add_observer counter + end + end + + result + end + module_function :call_dataflow +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb new file mode 100644 index 0000000..83799d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/delay.rb @@ -0,0 +1,199 @@ +require 'thread' +require 'concurrent/concern/obligation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/synchronization' + +module Concurrent + + # This file has circular require issues. It must be autoloaded here. + autoload :Options, 'concurrent/options' + + # Lazy evaluation of a block yielding an immutable result. Useful for + # expensive operations that may never be needed. It may be non-blocking, + # supports the `Concern::Obligation` interface, and accepts the injection of + # custom executor upon which to execute the block. Processing of + # block will be deferred until the first time `#value` is called. + # At that time the caller can choose to return immediately and let + # the block execute asynchronously, block indefinitely, or block + # with a timeout. + # + # When a `Delay` is created its state is set to `pending`. The value and + # reason are both `nil`. The first time the `#value` method is called the + # enclosed opration will be run and the calling thread will block. Other + # threads attempting to call `#value` will block as well. Once the operation + # is complete the *value* will be set to the result of the operation or the + # *reason* will be set to the raised exception, as appropriate. All threads + # blocked on `#value` will return. Subsequent calls to `#value` will immediately + # return the cached value. The operation will only be run once. This means that + # any side effects created by the operation will only happen once as well. + # + # `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread + # safety of the reference returned by `#value`. + # + # @!macro copy_options + # + # @!macro delay_note_regarding_blocking + # @note The default behavior of `Delay` is to block indefinitely when + # calling either `value` or `wait`, executing the delayed operation on + # the current thread. This makes the `timeout` value completely + # irrelevant. To enable non-blocking behavior, use the `executor` + # constructor option. This will cause the delayed operation to be + # execute on the given executor, allowing the call to timeout. + # + # @see Concurrent::Concern::Dereferenceable + class Delay < Synchronization::LockableObject + include Concern::Obligation + + # NOTE: Because the global thread pools are lazy-loaded with these objects + # there is a performance hit every time we post a new task to one of these + # thread pools. Subsequently it is critical that `Delay` perform as fast + # as possible post-completion. This class has been highly optimized using + # the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to + # DRY-up this class or perform other refactoring with running the + # benchmarks and ensuring that performance is not negatively impacted. + + # Create a new `Delay` in the `:pending` state. + # + # @!macro executor_and_deref_options + # + # @yield the delayed operation to perform + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(&nil) + synchronize { ns_initialize(opts, &block) } + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception this method will return nil. The execption object + # can be accessed via the `#reason` method. + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # + # @!macro delay_note_regarding_blocking + def value(timeout = nil) + if @executor # TODO (pitr 12-Sep-2015): broken unsafe read? + super + else + # this function has been optimized for performance and + # should not be modified without running new benchmarks + synchronize do + execute = @evaluation_started = true unless @evaluation_started + if execute + begin + set_state(true, @task.call, nil) + rescue => ex + set_state(false, nil, ex) + end + elsif incomplete? + raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' + end + end + if @do_nothing_on_deref + @value + else + apply_deref_options(@value) + end + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception, this method will raise that exception (even when) + # the operation has already been executed). + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # @raise [Exception] when `#rejected?` raises `#reason` + # + # @!macro delay_note_regarding_blocking + def value!(timeout = nil) + if @executor + super + else + result = value + raise @reason if @reason + result + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. + # + # @param [Integer] timeout (nil) the maximum number of seconds to wait for + # the value to be computed. When `nil` the caller will block indefinitely. + # + # @return [Object] self + # + # @!macro delay_note_regarding_blocking + def wait(timeout = nil) + if @executor + execute_task_once + super(timeout) + else + value + end + self + end + + # Reconfigures the block returning the value if still `#incomplete?` + # + # @yield the delayed operation to perform + # @return [true, false] if success + def reconfigure(&block) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + unless @evaluation_started + @task = block + true + else + false + end + end + end + + protected + + def ns_initialize(opts, &block) + init_obligation + set_deref_options(opts) + @executor = opts[:executor] + + @task = block + @state = :pending + @evaluation_started = false + end + + private + + # @!visibility private + def execute_task_once # :nodoc: + # this function has been optimized for performance and + # should not be modified without running new benchmarks + execute = task = nil + synchronize do + execute = @evaluation_started = true unless @evaluation_started + task = @task + end + + if execute + executor = Options.executor_from_options(executor: @executor) + executor.post do + begin + result = task.call + success = true + rescue => ex + reason = ex + end + synchronize do + set_state(success, result, reason) + event.set + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb new file mode 100644 index 0000000..b69fec0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/errors.rb @@ -0,0 +1,69 @@ +module Concurrent + + Error = Class.new(StandardError) + + # Raised when errors occur during configuration. + ConfigurationError = Class.new(Error) + + # Raised when an asynchronous operation is cancelled before execution. + CancelledOperationError = Class.new(Error) + + # Raised when a lifecycle method (such as `stop`) is called in an improper + # sequence or when the object is in an inappropriate state. + LifecycleError = Class.new(Error) + + # Raised when an attempt is made to violate an immutability guarantee. + ImmutabilityError = Class.new(Error) + + # Raised when an operation is attempted which is not legal given the + # receiver's current state + IllegalOperationError = Class.new(Error) + + # Raised when an object's methods are called when it has not been + # properly initialized. + InitializationError = Class.new(Error) + + # Raised when an object with a start/stop lifecycle has been started an + # excessive number of times. Often used in conjunction with a restart + # policy or strategy. + MaxRestartFrequencyError = Class.new(Error) + + # Raised when an attempt is made to modify an immutable object + # (such as an `IVar`) after its final state has been set. + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end + + # Raised by an `Executor` when it is unable to process a given task, + # possibly because of a reject policy or other internal error. + RejectedExecutionError = Class.new(Error) + + # Raised when any finite resource, such as a lock counter, exceeds its + # maximum limit/threshold. + ResourceLimitError = Class.new(Error) + + # Raised when an operation times out. + TimeoutError = Class.new(Error) + + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb new file mode 100644 index 0000000..5a99550 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/exchanger.rb @@ -0,0 +1,352 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/maybe' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/utility/engine' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro exchanger + # + # A synchronization point at which threads can pair and swap elements within + # pairs. Each thread presents some object on entry to the exchange method, + # matches with a partner thread, and receives its partner's object on return. + # + # @!macro thread_safe_variable_comparison + # + # This implementation is very simple, using only a single slot for each + # exchanger (unlike more advanced implementations which use an "arena"). + # This approach will work perfectly fine when there are only a few threads + # accessing a single `Exchanger`. Beyond a handful of threads the performance + # will degrade rapidly due to contention on the single slot, but the algorithm + # will remain correct. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # threads = [ + # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" + # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" + # ] + # threads.each {|t| t.join(2) } + + # @!visibility private + class AbstractExchanger < Synchronization::Object + + # @!visibility private + CANCEL = ::Object.new + private_constant :CANCEL + + def initialize + super + end + + # @!macro exchanger_method_do_exchange + # + # Waits for another thread to arrive at this exchange point (unless the + # current thread is interrupted), and then transfers the given object to + # it, receiving its object in return. The timeout value indicates the + # approximate number of seconds the method should block while waiting + # for the exchange. When the timeout value is `nil` the method will + # block indefinitely. + # + # @param [Object] value the value to exchange with another thread + # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely + # + # @!macro exchanger_method_exchange + # + # In some edge cases when a `timeout` is given a return value of `nil` may be + # ambiguous. Specifically, if `nil` is a valid value in the exchange it will + # be impossible to tell whether `nil` is the actual return value or if it + # signifies timeout. When `nil` is a valid value in the exchange consider + # using {#exchange!} or {#try_exchange} instead. + # + # @return [Object] the value exchanged by the other thread or `nil` on timeout + def exchange(value, timeout = nil) + (value = do_exchange(value, timeout)) == CANCEL ? nil : value + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + # + # On timeout a {Concurrent::TimeoutError} exception will be raised. + # + # @return [Object] the value exchanged by the other thread + # @raise [Concurrent::TimeoutError] on timeout + def exchange!(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + raise Concurrent::TimeoutError + else + value + end + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + # + # The return value will be a {Concurrent::Maybe} set to `Just` on success or + # `Nothing` on timeout. + # + # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with + # the item exchanged by the other thread as `#value`; on timeout a + # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` + # + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # result = exchanger.exchange(:foo, 0.5) + # + # if result.just? + # puts result.value #=> :bar + # else + # puts 'timeout' + # end + def try_exchange(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + Concurrent::Maybe.nothing(Concurrent::TimeoutError) + else + Concurrent::Maybe.just(value) + end + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + raise NotImplementedError + end + end + + # @!macro internal_implementation_note + # @!visibility private + class RubyExchanger < AbstractExchanger + # A simplified version of java.util.concurrent.Exchanger written by + # Doug Lea, Bill Scherer, and Michael Scott with assistance from members + # of JCP JSR-166 Expert Group and released to the public domain. It does + # not include the arena or the multi-processor spin loops. + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java + + safe_initialization! + + class Node < Concurrent::Synchronization::Object + attr_atomic :value + safe_initialization! + + def initialize(item) + super() + @Item = item + @Latch = Concurrent::CountDownLatch.new + self.value = nil + end + + def latch + @Latch + end + + def item + @Item + end + end + private_constant :Node + + def initialize + super + end + + private + + attr_atomic(:slot) + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + + # ALGORITHM + # + # From the original Java version: + # + # > The basic idea is to maintain a "slot", which is a reference to + # > a Node containing both an Item to offer and a "hole" waiting to + # > get filled in. If an incoming "occupying" thread sees that the + # > slot is null, it CAS'es (compareAndSets) a Node there and waits + # > for another to invoke exchange. That second "fulfilling" thread + # > sees that the slot is non-null, and so CASes it back to null, + # > also exchanging items by CASing the hole, plus waking up the + # > occupying thread if it is blocked. In each case CAS'es may + # > fail because a slot at first appears non-null but is null upon + # > CAS, or vice-versa. So threads may need to retry these + # > actions. + # + # This version: + # + # An exchange occurs between an "occupier" thread and a "fulfiller" thread. + # The "slot" is used to setup this interaction. The first thread in the + # exchange puts itself into the slot (occupies) and waits for a fulfiller. + # The second thread removes the occupier from the slot and attempts to + # perform the exchange. Removing the occupier also frees the slot for + # another occupier/fulfiller pair. + # + # Because the occupier and the fulfiller are operating independently and + # because there may be contention with other threads, any failed operation + # indicates contention. Both the occupier and the fulfiller operate within + # spin loops. Any failed actions along the happy path will cause the thread + # to repeat the loop and try again. + # + # When a timeout value is given the thread must be cognizant of time spent + # in the spin loop. The remaining time is checked every loop. When the time + # runs out the thread will exit. + # + # A "node" is the data structure used to perform the exchange. Only the + # occupier's node is necessary. It's the node used for the exchange. + # Each node has an "item," a "hole" (self), and a "latch." The item is the + # node's initial value. It never changes. It's what the fulfiller returns on + # success. The occupier's hole is where the fulfiller put its item. It's the + # item that the occupier returns on success. The latch is used for synchronization. + # Because a thread may act as either an occupier or fulfiller (or possibly + # both in periods of high contention) every thread creates a node when + # the exchange method is first called. + # + # The following steps occur within the spin loop. If any actions fail + # the thread will loop and try again, so long as there is time remaining. + # If time runs out the thread will return CANCEL. + # + # Check the slot for an occupier: + # + # * If the slot is empty try to occupy + # * If the slot is full try to fulfill + # + # Attempt to occupy: + # + # * Attempt to CAS myself into the slot + # * Go to sleep and wait to be woken by a fulfiller + # * If the sleep is successful then the fulfiller completed its happy path + # - Return the value from my hole (the value given by the fulfiller) + # * When the sleep fails (time ran out) attempt to cancel the operation + # - Attempt to CAS myself out of the hole + # - If successful there is no contention + # - Return CANCEL + # - On failure, I am competing with a fulfiller + # - Attempt to CAS my hole to CANCEL + # - On success + # - Let the fulfiller deal with my cancel + # - Return CANCEL + # - On failure the fulfiller has completed its happy path + # - Return th value from my hole (the fulfiller's value) + # + # Attempt to fulfill: + # + # * Attempt to CAS the occupier out of the slot + # - On failure loop again + # * Attempt to CAS my item into the occupier's hole + # - On failure the occupier is trying to cancel + # - Loop again + # - On success we are on the happy path + # - Wake the sleeping occupier + # - Return the occupier's item + + value = NULL if value.nil? # The sentinel allows nil to be a valid value + me = Node.new(value) # create my node in case I need to occupy + end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up + + result = loop do + other = slot + if other && compare_and_set_slot(other, nil) + # try to fulfill + if other.compare_and_set_value(nil, value) + # happy path + other.latch.count_down + break other.item + end + elsif other.nil? && compare_and_set_slot(nil, me) + # try to occupy + timeout = end_at - Concurrent.monotonic_time if timeout + if me.latch.wait(timeout) + # happy path + break me.value + else + # attempt to remove myself from the slot + if compare_and_set_slot(me, nil) + break CANCEL + elsif !me.compare_and_set_value(nil, CANCEL) + # I've failed to block the fulfiller + break me.value + end + end + end + break CANCEL if timeout && Concurrent.monotonic_time >= end_at + end + + result == NULL ? nil : result + end + end + + if Concurrent.on_jruby? + + # @!macro internal_implementation_note + # @!visibility private + class JavaExchanger < AbstractExchanger + + def initialize + @exchanger = java.util.concurrent.Exchanger.new + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value) + end + else + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + rescue java.util.concurrent.TimeoutException + CANCEL + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + ExchangerImplementation = case + when Concurrent.on_jruby? + JavaExchanger + else + RubyExchanger + end + private_constant :ExchangerImplementation + + # @!macro exchanger + class Exchanger < ExchangerImplementation + + # @!method initialize + # Creates exchanger instance + + # @!method exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange + + # @!method exchange!(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + + # @!method try_exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb new file mode 100644 index 0000000..6d0b047 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb @@ -0,0 +1,128 @@ +require 'concurrent/errors' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/executor_service' +require 'concurrent/synchronization' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class AbstractExecutorService < Synchronization::LockableObject + include ExecutorService + include Concern::Deprecation + + # The set of possible fallback policies that may be set at thread pool creation. + FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze + + # @!macro executor_service_attr_reader_fallback_policy + attr_reader :fallback_policy + + attr_reader :name + + # Create a new thread pool. + def initialize(opts = {}, &block) + super(&nil) + synchronize do + @auto_terminate = opts.fetch(:auto_terminate, true) + @name = opts.fetch(:name) if opts.key?(:name) + ns_initialize(opts, &block) + end + end + + def to_s + name ? "#{super[0..-2]} name: #{name}>" : super + end + + # @!macro executor_service_method_shutdown + def shutdown + raise NotImplementedError + end + + # @!macro executor_service_method_kill + def kill + raise NotImplementedError + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + raise NotImplementedError + end + + # @!macro executor_service_method_running_question + def running? + synchronize { ns_running? } + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + synchronize { ns_shuttingdown? } + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + synchronize { ns_shutdown? } + end + + # @!macro executor_service_method_auto_terminate_question + def auto_terminate? + synchronize { @auto_terminate } + end + + # @!macro executor_service_method_auto_terminate_setter + def auto_terminate=(value) + deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized." + end + + private + + # Handler which executes the `fallback_policy` once the queue size + # reaches `max_queue`. + # + # @param [Array] args the arguments to the task which is being handled. + # + # @!visibility private + def handle_fallback(*args) + case fallback_policy + when :abort + raise RejectedExecutionError + when :discard + false + when :caller_runs + begin + yield(*args) + rescue => ex + # let it fail + log DEBUG, ex + end + true + else + fail "Unknown fallback policy #{fallback_policy}" + end + end + + def ns_execute(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_ns_shutdown_execution + # + # Callback method called when an orderly shutdown has completed. + # The default behavior is to signal all waiting threads. + def ns_shutdown_execution + # do nothing + end + + # @!macro executor_service_method_ns_kill_execution + # + # Callback method called when the executor has been killed. + # The default behavior is to do nothing. + def ns_kill_execution + # do nothing + end + + def ns_auto_terminate? + @auto_terminate + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb new file mode 100644 index 0000000..de50ed1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb @@ -0,0 +1,62 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @!macro thread_pool_options + class CachedThreadPool < ThreadPoolExecutor + + # @!macro cached_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- + def initialize(opts = {}) + defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: 0, + max_threads: DEFAULT_MAX_POOL_SIZE, + max_queue: DEFAULT_MAX_QUEUE_SIZE } + super(defaults.merge(opts).merge(overrides)) + end + + private + + # @!macro cached_thread_pool_method_initialize + # @!visibility private + def ns_initialize(opts) + super(opts) + if Concurrent.on_jruby? + @max_queue = 0 + @executor = java.util.concurrent.Executors.newCachedThreadPool( + DaemonThreadFactory.new(ns_auto_terminate?)) + @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) + @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb new file mode 100644 index 0000000..7e34491 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/executor_service.rb @@ -0,0 +1,185 @@ +require 'concurrent/concern/logging' + +module Concurrent + + ################################################################### + + # @!macro executor_service_method_post + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + + # @!macro executor_service_method_left_shift + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Proc] task the asynchronous task to perform + # + # @return [self] returns itself + + # @!macro executor_service_method_can_overflow_question + # + # Does the task queue have a maximum size? + # + # @return [Boolean] True if the task queue has a maximum size else false. + + # @!macro executor_service_method_serialized_question + # + # Does this executor guarantee serialization of its operations? + # + # @return [Boolean] True if the executor guarantees that all operations + # will be post in the order they are received and no two operations may + # occur simultaneously. Else false. + + ################################################################### + + # @!macro executor_service_public_api + # + # @!method post(*args, &task) + # @!macro executor_service_method_post + # + # @!method <<(task) + # @!macro executor_service_method_left_shift + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method serialized? + # @!macro executor_service_method_serialized_question + + ################################################################### + + # @!macro executor_service_attr_reader_fallback_policy + # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. + + # @!macro executor_service_method_shutdown + # + # Begin an orderly shutdown. Tasks already in the queue will be executed, + # but no new tasks will be accepted. Has no additional effect if the + # thread pool is not running. + + # @!macro executor_service_method_kill + # + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + + # @!macro executor_service_method_wait_for_termination + # + # Block until executor shutdown is complete or until `timeout` seconds have + # passed. + # + # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` + # must be called before this method (or on another thread). + # + # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete + # + # @return [Boolean] `true` if shutdown complete or false on `timeout` + + # @!macro executor_service_method_running_question + # + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + + # @!macro executor_service_method_shuttingdown_question + # + # Is the executor shuttingdown? + # + # @return [Boolean] `true` when not running and not shutdown, else `false` + + # @!macro executor_service_method_shutdown_question + # + # Is the executor shutdown? + # + # @return [Boolean] `true` when shutdown, `false` when shutting down or running + + # @!macro executor_service_method_auto_terminate_question + # + # Is the executor auto-terminate when the application exits? + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + # @!macro executor_service_method_auto_terminate_setter + # + # + # Set the auto-terminate behavior for this executor. + # @deprecated Has no effect + # @param [Boolean] value The new auto-terminate value to set for this executor. + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + ################################################################### + + # @!macro abstract_executor_service_public_api + # + # @!macro executor_service_public_api + # + # @!attribute [r] fallback_policy + # @!macro executor_service_attr_reader_fallback_policy + # + # @!method shutdown + # @!macro executor_service_method_shutdown + # + # @!method kill + # @!macro executor_service_method_kill + # + # @!method wait_for_termination(timeout = nil) + # @!macro executor_service_method_wait_for_termination + # + # @!method running? + # @!macro executor_service_method_running_question + # + # @!method shuttingdown? + # @!macro executor_service_method_shuttingdown_question + # + # @!method shutdown? + # @!macro executor_service_method_shutdown_question + # + # @!method auto_terminate? + # @!macro executor_service_method_auto_terminate_question + # + # @!method auto_terminate=(value) + # @!macro executor_service_method_auto_terminate_setter + + ################################################################### + + # @!macro executor_service_public_api + # @!visibility private + module ExecutorService + include Concern::Logging + + # @!macro executor_service_method_post + def post(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_can_overflow_question + # + # @note Always returns `false` + def can_overflow? + false + end + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `false` + def serialized? + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb new file mode 100644 index 0000000..b665f6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb @@ -0,0 +1,210 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # @!macro thread_pool_executor_constant_default_max_pool_size + # Default maximum number of threads that will be created in the pool. + + # @!macro thread_pool_executor_constant_default_min_pool_size + # Default minimum number of threads that will be retained in the pool. + + # @!macro thread_pool_executor_constant_default_max_queue_size + # Default maximum number of tasks that may be added to the task queue. + + # @!macro thread_pool_executor_constant_default_thread_timeout + # Default maximum number of seconds a thread in the pool may remain idle + # before being reclaimed. + + # @!macro thread_pool_executor_constant_default_synchronous + # Default value of the :synchronous option. + + # @!macro thread_pool_executor_attr_reader_max_length + # The maximum number of threads that may be created in the pool. + # @return [Integer] The maximum number of threads that may be created in the pool. + + # @!macro thread_pool_executor_attr_reader_min_length + # The minimum number of threads that may be retained in the pool. + # @return [Integer] The minimum number of threads that may be retained in the pool. + + # @!macro thread_pool_executor_attr_reader_largest_length + # The largest number of threads that have been created in the pool since construction. + # @return [Integer] The largest number of threads that have been created in the pool since construction. + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # The number of tasks that have been scheduled for execution on the pool since construction. + # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. + + # @!macro thread_pool_executor_attr_reader_completed_task_count + # The number of tasks that have been completed by the pool since construction. + # @return [Integer] The number of tasks that have been completed by the pool since construction. + + # @!macro thread_pool_executor_attr_reader_idletime + # The number of seconds that a thread may be idle before being reclaimed. + # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_synchronous + # Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue. + # @return [true, false] + + # @!macro thread_pool_executor_attr_reader_max_queue + # The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + # + # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + + # @!macro thread_pool_executor_attr_reader_length + # The number of threads currently in the pool. + # @return [Integer] The number of threads currently in the pool. + + # @!macro thread_pool_executor_attr_reader_queue_length + # The number of tasks in the queue awaiting execution. + # @return [Integer] The number of tasks in the queue awaiting execution. + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + # + # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + + + + + + # @!macro thread_pool_executor_public_api + # + # @!macro abstract_executor_service_public_api + # + # @!attribute [r] max_length + # @!macro thread_pool_executor_attr_reader_max_length + # + # @!attribute [r] min_length + # @!macro thread_pool_executor_attr_reader_min_length + # + # @!attribute [r] largest_length + # @!macro thread_pool_executor_attr_reader_largest_length + # + # @!attribute [r] scheduled_task_count + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # + # @!attribute [r] completed_task_count + # @!macro thread_pool_executor_attr_reader_completed_task_count + # + # @!attribute [r] idletime + # @!macro thread_pool_executor_attr_reader_idletime + # + # @!attribute [r] max_queue + # @!macro thread_pool_executor_attr_reader_max_queue + # + # @!attribute [r] length + # @!macro thread_pool_executor_attr_reader_length + # + # @!attribute [r] queue_length + # @!macro thread_pool_executor_attr_reader_queue_length + # + # @!attribute [r] remaining_capacity + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + + + + + # @!macro thread_pool_options + # + # **Thread Pool Options** + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and + # a `-worker-` name is given to its threads if supported by used Ruby + # implementation. `` is uniq for each thread. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` and no new threads can be created, + # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default), the threads started will be marked as daemon. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit, all threads can be marked as daemon according to the + # `:auto_terminate` option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean- + + + + + + # @!macro fixed_thread_pool + # + # A thread pool that reuses a fixed number of threads operating off an unbounded queue. + # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @!macro thread_pool_options + class FixedThreadPool < ThreadPoolExecutor + + # @!macro fixed_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Integer] num_threads the number of threads to allocate + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `num_threads` is less than or equal to zero + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- + def initialize(num_threads, opts = {}) + raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 + defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, + idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: num_threads, + max_threads: num_threads } + super(defaults.merge(opts).merge(overrides)) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb new file mode 100644 index 0000000..282df7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb @@ -0,0 +1,66 @@ +require 'concurrent/atomic/event' +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/serial_executor_service' + +module Concurrent + + # An executor service which runs all operations on the current thread, + # blocking as necessary. Operations are performed in the order they are + # received and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used + # it immediately runs every `#post` operation on the current thread, blocking + # that thread until the operation is complete. This can be very beneficial + # during testing because it makes all operations deterministic. + # + # @note Intended for use primarily in testing and debugging. + class ImmediateExecutor < AbstractExecutorService + include SerialExecutorService + + # Creates a new executor + def initialize + @stopped = Concurrent::Event.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + task.call(*args) + true + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + ! shutdown? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + false + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @stopped.set + true + end + alias_method :kill, :shutdown + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb new file mode 100644 index 0000000..4f9769f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb @@ -0,0 +1,44 @@ +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/simple_executor_service' + +module Concurrent + # An executor service which runs all operations on a new thread, blocking + # until it completes. Operations are performed in the order they are received + # and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used it + # immediately runs every `#post` operation on a new thread, blocking the + # current thread until the operation is complete. This is similar to how the + # ImmediateExecutor works, but the operation has the full stack of the new + # thread at its disposal. This can be helpful when the operations will spawn + # more operations on the same executor and so on - such a situation might + # overflow the single stack in case of an ImmediateExecutor, which is + # inconsistent with how it would behave for a threaded executor. + # + # @note Intended for use primarily in testing and debugging. + class IndirectImmediateExecutor < ImmediateExecutor + # Creates a new executor + def initialize + super + @internal_executor = SimpleExecutorService.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new("no block given") unless block_given? + return false unless running? + + event = Concurrent::Event.new + @internal_executor.post do + begin + task.call(*args) + ensure + event.set + end + end + event.wait + + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb new file mode 100644 index 0000000..9c0f310 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb @@ -0,0 +1,103 @@ +if Concurrent.on_jruby? + + require 'concurrent/errors' + require 'concurrent/utility/engine' + require 'concurrent/executor/abstract_executor_service' + + module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaExecutorService < AbstractExecutorService + java_import 'java.lang.Runnable' + + FALLBACK_POLICY_CLASSES = { + abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, + discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, + caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy + }.freeze + private_constant :FALLBACK_POLICY_CLASSES + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return handle_fallback(*args, &task) unless running? + @executor.submit Job.new(args, task) + true + rescue Java::JavaUtilConcurrent::RejectedExecutionException + raise RejectedExecutionError + end + + def wait_for_termination(timeout = nil) + if timeout.nil? + ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok + true + else + @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + + def shutdown + synchronize do + @executor.shutdown + nil + end + end + + def kill + synchronize do + @executor.shutdownNow + nil + end + end + + private + + def ns_running? + !(ns_shuttingdown? || ns_shutdown?) + end + + def ns_shuttingdown? + if @executor.respond_to? :isTerminating + @executor.isTerminating + else + false + end + end + + def ns_shutdown? + @executor.isShutdown || @executor.isTerminated + end + + class Job + include Runnable + def initialize(args, block) + @args = args + @block = block + end + + def run + @block.call(*@args) + end + end + private_constant :Job + end + + class DaemonThreadFactory + # hide include from YARD + send :include, java.util.concurrent.ThreadFactory + + def initialize(daemonize = true) + @daemonize = daemonize + end + + def newThread(runnable) + thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable) + thread.setDaemon(@daemonize) + return thread + end + end + + private_constant :DaemonThreadFactory + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb new file mode 100644 index 0000000..7aa24f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb @@ -0,0 +1,30 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + require 'concurrent/executor/serial_executor_service' + + module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaSingleThreadExecutor < JavaExecutorService + include SerialExecutorService + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + private + + def ns_initialize(opts) + @executor = java.util.concurrent.Executors.newSingleThreadExecutor( + DaemonThreadFactory.new(ns_auto_terminate?) + ) + @fallback_policy = opts.fetch(:fallback_policy, :discard) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb new file mode 100644 index 0000000..e670663 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb @@ -0,0 +1,136 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + + module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class JavaThreadPoolExecutor < JavaExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + @max_queue != 0 + end + + # @!macro thread_pool_executor_attr_reader_min_length + def min_length + @executor.getCorePoolSize + end + + # @!macro thread_pool_executor_attr_reader_max_length + def max_length + @executor.getMaximumPoolSize + end + + # @!macro thread_pool_executor_attr_reader_length + def length + @executor.getPoolSize + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + @executor.getLargestPoolSize + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + @executor.getTaskCount + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + @executor.getCompletedTaskCount + end + + # @!macro thread_pool_executor_attr_reader_idletime + def idletime + @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + @executor.getQueue.size + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity + end + + # @!macro executor_service_method_running_question + def running? + super && !@executor.isTerminating + end + + private + + def ns_initialize(opts) + min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) + + if @max_queue == 0 + if @synchronous + queue = java.util.concurrent.SynchronousQueue.new + else + queue = java.util.concurrent.LinkedBlockingQueue.new + end + else + queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) + end + + @executor = java.util.concurrent.ThreadPoolExecutor.new( + min_length, + max_length, + idletime, + java.util.concurrent.TimeUnit::SECONDS, + queue, + DaemonThreadFactory.new(ns_auto_terminate?), + FALLBACK_POLICY_CLASSES[@fallback_policy].new) + + end + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb new file mode 100644 index 0000000..06658d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb @@ -0,0 +1,76 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/atomic/event' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubyExecutorService < AbstractExecutorService + safe_initialization! + + def initialize(*args, &block) + super + @StopEvent = Event.new + @StoppedEvent = Event.new + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + synchronize do + # If the executor is shut down, reject this task + return handle_fallback(*args, &task) unless running? + ns_execute(*args, &task) + true + end + end + + def shutdown + synchronize do + break unless running? + stop_event.set + ns_shutdown_execution + end + true + end + + def kill + synchronize do + break if shutdown? + stop_event.set + ns_kill_execution + stopped_event.set + end + true + end + + def wait_for_termination(timeout = nil) + stopped_event.wait(timeout) + end + + private + + def stop_event + @StopEvent + end + + def stopped_event + @StoppedEvent + end + + def ns_shutdown_execution + stopped_event.set + end + + def ns_running? + !stop_event.set? + end + + def ns_shuttingdown? + !(ns_running? || ns_shutdown?) + end + + def ns_shutdown? + stopped_event.set? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb new file mode 100644 index 0000000..916337d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb @@ -0,0 +1,21 @@ +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubySingleThreadExecutor < RubyThreadPoolExecutor + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super( + min_threads: 1, + max_threads: 1, + max_queue: 0, + idletime: DEFAULT_THREAD_IDLETIMEOUT, + fallback_policy: opts.fetch(:fallback_policy, :discard), + ) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb new file mode 100644 index 0000000..dc20d76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb @@ -0,0 +1,377 @@ +require 'thread' +require 'concurrent/atomic/event' +require 'concurrent/concern/logging' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class RubyThreadPoolExecutor < RubyExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_min_length + attr_reader :min_length + + # @!macro thread_pool_executor_attr_reader_idletime + attr_reader :idletime + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + synchronize { @largest_length } + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + synchronize { @scheduled_task_count } + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + synchronize { @completed_task_count } + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + synchronize { ns_limited_queue? } + end + + # @!macro thread_pool_executor_attr_reader_length + def length + synchronize { @pool.length } + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + synchronize { @queue.length } + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + synchronize do + if ns_limited_queue? + @max_queue - @queue.length + else + -1 + end + end + end + + # @!visibility private + def remove_busy_worker(worker) + synchronize { ns_remove_busy_worker worker } + end + + # @!visibility private + def ready_worker(worker) + synchronize { ns_ready_worker worker } + end + + # @!visibility private + def worker_not_old_enough(worker) + synchronize { ns_worker_not_old_enough worker } + end + + # @!visibility private + def worker_died(worker) + synchronize { ns_worker_died worker } + end + + # @!visibility private + def worker_task_completed + synchronize { @completed_task_count += 1 } + end + + private + + # @!visibility private + def ns_initialize(opts) + @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + + @pool = [] # all workers + @ready = [] # used as a stash (most idle worker is at the start) + @queue = [] # used as queue + # @ready or @queue is empty at all times + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ # detects if Ruby has forked + + @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + # @!visibility private + def ns_limited_queue? + @max_queue != 0 + end + + # @!visibility private + def ns_execute(*args, &task) + ns_reset_if_forked + + if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) + @scheduled_task_count += 1 + else + handle_fallback(*args, &task) + end + + ns_prune_pool if @next_gc_time < Concurrent.monotonic_time + end + + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + + if @pool.empty? + # nothing to do + stopped_event.set + end + + if @queue.empty? + # no more tasks will be accepted, just stop all workers + @pool.each(&:stop) + end + end + + # @!visibility private + def ns_kill_execution + # TODO log out unprocessed tasks in queue + # TODO try to shutdown first? + @pool.each(&:kill) + @pool.clear + @ready.clear + end + + # tries to assign task to a worker, tries to get one from @ready or to create new one + # @return [true, false] if task is assigned to a worker + # + # @!visibility private + def ns_assign_worker(*args, &task) + # keep growing if the pool is not at the minimum yet + worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker + if worker + worker << [task, args] + true + else + false + end + rescue ThreadError + # Raised when the operating system refuses to create the new thread + return false + end + + # tries to enqueue task + # @return [true, false] if enqueued + # + # @!visibility private + def ns_enqueue(*args, &task) + return false if @synchronous + + if !ns_limited_queue? || @queue.size < @max_queue + @queue << [task, args] + true + else + false + end + end + + # @!visibility private + def ns_worker_died(worker) + ns_remove_busy_worker worker + replacement_worker = ns_add_busy_worker + ns_ready_worker replacement_worker, false if replacement_worker + end + + # creates new worker which has to receive work to do after it's added + # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private + def ns_add_busy_worker + return if @pool.size >= @max_length + + @workers_counter += 1 + @pool << (worker = Worker.new(self, @workers_counter)) + @largest_length = @pool.length if @pool.length > @largest_length + worker + end + + # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private + def ns_ready_worker(worker, success = true) + task_and_args = @queue.shift + if task_and_args + worker << task_and_args + else + # stop workers when !running?, do not return them to @ready + if running? + @ready.push(worker) + else + worker.stop + end + end + end + + # returns back worker to @ready which was not idle for enough time + # + # @!visibility private + def ns_worker_not_old_enough(worker) + # let's put workers coming from idle_test back to the start (as the oldest worker) + @ready.unshift(worker) + true + end + + # removes a worker which is not in not tracked in @ready + # + # @!visibility private + def ns_remove_busy_worker(worker) + @pool.delete(worker) + stopped_event.set if @pool.empty? && !running? + true + end + + # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private + def ns_prune_pool + return if @pool.size <= @min_length + + last_used = @ready.shift + last_used << :idle_test if last_used + + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ready.clear + @pool.clear + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ + end + end + + # @!visibility private + class Worker + include Concern::Logging + + def initialize(pool, id) + # instance variables accessed only under pool's lock so no need to sync here again + @queue = Queue.new + @pool = pool + @thread = create_worker @queue, pool, pool.idletime + + if @thread.respond_to?(:name=) + @thread.name = [pool.name, 'worker', id].compact.join('-') + end + end + + def <<(message) + @queue << message + end + + def stop + @queue << :stop + end + + def kill + @thread.kill + end + + private + + def create_worker(queue, pool, idletime) + Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| + last_message = Concurrent.monotonic_time + catch(:stop) do + loop do + + case message = my_queue.pop + when :idle_test + if (Concurrent.monotonic_time - last_message) > my_idletime + my_pool.remove_busy_worker(self) + throw :stop + else + my_pool.worker_not_old_enough(self) + end + + when :stop + my_pool.remove_busy_worker(self) + throw :stop + + else + task, args = message + run_task my_pool, task, args + last_message = Concurrent.monotonic_time + + my_pool.ready_worker(self) + end + end + end + end + end + + def run_task(pool, task, args) + task.call(*args) + pool.worker_task_completed + rescue => ex + # let it fail + log DEBUG, ex + rescue Exception => ex + log ERROR, ex + pool.worker_died(self) + throw :stop + end + end + + private_constant :Worker + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb new file mode 100644 index 0000000..414aa64 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb @@ -0,0 +1,35 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A simple utility class that executes a callable and returns and array of three elements: + # success - indicating if the callable has been executed without errors + # value - filled by the callable result if it has been executed without errors, nil otherwise + # reason - the error risen by the callable if it has been executed with errors, nil otherwise + class SafeTaskExecutor < Synchronization::LockableObject + + def initialize(task, opts = {}) + @task = task + @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError + super() # ensures visibility + end + + # @return [Array] + def execute(*args) + synchronize do + success = false + value = reason = nil + + begin + value = @task.call(*args) + success = true + rescue @exception_class => ex + reason = ex + success = false + end + + [success, value, reason] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb new file mode 100644 index 0000000..f1c38ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb @@ -0,0 +1,34 @@ +require 'concurrent/executor/executor_service' + +module Concurrent + + # Indicates that the including `ExecutorService` guarantees + # that all operations will occur in the order they are post and that no + # two operations may occur simultaneously. This module provides no + # functionality and provides no guarantees. That is the responsibility + # of the including class. This module exists solely to allow the including + # object to be interrogated for its serialization status. + # + # @example + # class Foo + # include Concurrent::SerialExecutor + # end + # + # foo = Foo.new + # + # foo.is_a? Concurrent::ExecutorService #=> true + # foo.is_a? Concurrent::SerialExecutor #=> true + # foo.serialized? #=> true + # + # @!visibility private + module SerialExecutorService + include ExecutorService + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `true` + def serialized? + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb new file mode 100644 index 0000000..d314e90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb @@ -0,0 +1,107 @@ +require 'concurrent/errors' +require 'concurrent/concern/logging' +require 'concurrent/synchronization' + +module Concurrent + + # Ensures passed jobs in a serialized order never running at the same time. + class SerializedExecution < Synchronization::LockableObject + include Concern::Logging + + def initialize() + super() + synchronize { ns_initialize } + end + + Job = Struct.new(:executor, :args, :block) do + def call + block.call(*args) + end + end + + # Submit a task to the executor for asynchronous processing. + # + # @param [Executor] executor to be used for this job + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + def post(executor, *args, &task) + posts [[executor, args, task]] + true + end + + # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not + # be interleaved by other tasks. + # + # @param [Array, Proc)>] posts array of triplets where + # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) + def posts(posts) + # if can_overflow? + # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' + # end + + return nil if posts.empty? + + jobs = posts.map { |executor, args, task| Job.new executor, args, task } + + job_to_post = synchronize do + if @being_executed + @stash.push(*jobs) + nil + else + @being_executed = true + @stash.push(*jobs[1..-1]) + jobs.first + end + end + + call_job job_to_post if job_to_post + true + end + + private + + def ns_initialize + @being_executed = false + @stash = [] + end + + def call_job(job) + did_it_run = begin + job.executor.post { work(job) } + true + rescue RejectedExecutionError => ex + false + end + + # TODO not the best idea to run it myself + unless did_it_run + begin + work job + rescue => ex + # let it fail + log DEBUG, ex + end + end + end + + # ensures next job is executed if any is stashed + def work(job) + job.call + ensure + synchronize do + job = @stash.shift || (@being_executed = false) + end + + # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end + # of this block + call_job job if job + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb new file mode 100644 index 0000000..8197781 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb @@ -0,0 +1,28 @@ +require 'delegate' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' + +module Concurrent + + # A wrapper/delegator for any `ExecutorService` that + # guarantees serialized execution of tasks. + # + # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) + # @see Concurrent::SerializedExecution + class SerializedExecutionDelegator < SimpleDelegator + include SerialExecutorService + + def initialize(executor) + @executor = executor + @serializer = SerializedExecution.new + super(executor) + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @serializer.post(@executor, *args, &task) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb new file mode 100644 index 0000000..b87fed5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb @@ -0,0 +1,100 @@ +require 'concurrent/atomics' +require 'concurrent/executor/executor_service' + +module Concurrent + + # An executor service in which every operation spawns a new, + # independently operating thread. + # + # This is perhaps the most inefficient executor service in this + # library. It exists mainly for testing an debugging. Thread creation + # and management is expensive in Ruby and this executor performs no + # resource pooling. This can be very beneficial during testing and + # debugging because it decouples the using code from the underlying + # executor implementation. In production this executor will likely + # lead to suboptimal performance. + # + # @note Intended for use primarily in testing and debugging. + class SimpleExecutorService < RubyExecutorService + + # @!macro executor_service_method_post + def self.post(*args) + raise ArgumentError.new('no block given') unless block_given? + Thread.new(*args) do + Thread.current.abort_on_exception = false + yield(*args) + end + true + end + + # @!macro executor_service_method_left_shift + def self.<<(task) + post(&task) + self + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @count.increment + Thread.new(*args) do + Thread.current.abort_on_exception = false + begin + yield(*args) + ensure + @count.decrement + @stopped.set if @running.false? && @count.value == 0 + end + end + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + @running.true? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + @running.false? && ! @stopped.set? + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @running.make_false + @stopped.set if @count.value == 0 + true + end + + # @!macro executor_service_method_kill + def kill + @running.make_false + @stopped.set + true + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + + private + + def ns_initialize(*args) + @running = Concurrent::AtomicBoolean.new(true) + @stopped = Concurrent::Event.new + @count = Concurrent::AtomicFixnum.new(0) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb new file mode 100644 index 0000000..f1474ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb @@ -0,0 +1,57 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_single_thread_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation + + # @!macro single_thread_executor + # + # A thread pool with a single thread an unlimited queue. Should the thread + # die for any reason it will be removed and replaced, thus ensuring that + # the executor will always remain viable and available to process jobs. + # + # A common pattern for background processing is to create a single thread + # on which an infinite loop is run. The thread's loop blocks on an input + # source (perhaps blocking I/O or a queue) and processes each input as it + # is received. This pattern has several issues. The thread itself is highly + # susceptible to errors during processing. Also, the thread itself must be + # constantly monitored and restarted should it die. `SingleThreadExecutor` + # encapsulates all these bahaviors. The task processor is highly resilient + # to errors from within tasks. Also, should the thread die it will + # automatically be restarted. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor`. + # + # @!macro abstract_executor_service_public_api + class SingleThreadExecutor < SingleThreadExecutorImplementation + + # @!macro single_thread_executor_method_initialize + # + # Create a new thread pool. + # + # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html + + # @!method initialize(opts = {}) + # @!macro single_thread_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb new file mode 100644 index 0000000..253d46a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb @@ -0,0 +1,88 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_thread_pool_executor' + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submitted to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. + # + # A `ThreadPoolExecutor` will automatically adjust the pool size according + # to the bounds set by `min-threads` and `max-threads`. When a new task is + # submitted and fewer than `min-threads` threads are running, a new thread + # is created to handle the request, even if other worker threads are idle. + # If there are more than `min-threads` but less than `max-threads` threads + # running, a new thread will be created only if the queue is full. + # + # Threads that are idle for too long will be garbage collected, down to the + # configured minimum options. Should a thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentation; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + # + # @!macro thread_pool_executor_public_api + class ThreadPoolExecutor < ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options which configure the thread pool. + # + # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum + # number of threads to be created + # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted + # and fewer than `min_threads` are running, a new thread is created + # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum + # number of seconds a thread may be idle before being reclaimed + # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum + # number of tasks allowed in the work queue at any one time; a value of + # zero means the queue may grow without bound + # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0 + # for :max_queue means the queue must perform direct hand-off rather than unbounded. + # @raise [ArgumentError] if `:max_threads` is less than one + # @raise [ArgumentError] if `:min_threads` is less than zero + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html + + # @!method initialize(opts = {}) + # @!macro thread_pool_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb new file mode 100644 index 0000000..0dfaf12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executor/timer_set.rb @@ -0,0 +1,172 @@ +require 'concurrent/scheduled_task' +require 'concurrent/atomic/event' +require 'concurrent/collection/non_concurrent_priority_queue' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/single_thread_executor' + +require 'concurrent/options' + +module Concurrent + + # Executes a collection of tasks, each after a given delay. A master task + # monitors the set and schedules each task for execution at the appropriate + # time. Tasks are run on the global thread pool or on the supplied executor. + # Each task is represented as a `ScheduledTask`. + # + # @see Concurrent::ScheduledTask + # + # @!macro monotonic_clock_warning + class TimerSet < RubyExecutorService + + # Create a new set of timed tasks. + # + # @!macro executor_options + # + # @param [Hash] opts the options used to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + def initialize(opts = {}) + super(opts) + end + + # Post a task to be execute run after a given delay (in seconds). If the + # delay is less than 1/100th of a second the task will be immediately post + # to the executor. + # + # @param [Float] delay the number of seconds to wait for before executing the task. + # @param [Array] args the arguments passed to the task on execution. + # + # @yield the task to be performed. + # + # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post + # is successful; false after shutdown. + # + # @raise [ArgumentError] if the intended execution time is not in the future. + # @raise [ArgumentError] if no block is given. + def post(delay, *args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + opts = { executor: @task_executor, + args: args, + timer_set: self } + task = ScheduledTask.execute(delay, opts, &task) # may raise exception + task.unscheduled? ? false : task + end + + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + def kill + shutdown + end + + private :<< + + private + + # Initialize the object. + # + # @param [Hash] opts the options to create the object with. + # @!visibility private + def ns_initialize(opts) + @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) + @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @timer_executor = SingleThreadExecutor.new + @condition = Event.new + @ruby_pid = $$ # detects if Ruby has forked + end + + # Post the task to the internal queue. + # + # @note This is intended as a callback method from ScheduledTask + # only. It is not intended to be used directly. Post a task + # by using the `SchedulesTask#execute` method. + # + # @!visibility private + def post_task(task) + synchronize { ns_post_task(task) } + end + + # @!visibility private + def ns_post_task(task) + return false unless ns_running? + ns_reset_if_forked + if (task.initial_delay) <= 0.01 + task.executor.post { task.process_task } + else + @queue.push(task) + # only post the process method when the queue is empty + @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 + @condition.set + end + true + end + + # Remove the given task from the queue. + # + # @note This is intended as a callback method from `ScheduledTask` + # only. It is not intended to be used directly. Cancel a task + # by using the `ScheduledTask#cancel` method. + # + # @!visibility private + def remove_task(task) + synchronize { @queue.delete(task) } + end + + # `ExecutorService` callback called during shutdown. + # + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + @queue.clear + @timer_executor.kill + stopped_event.set + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @condition.reset + @ruby_pid = $$ + end + end + + # Run a loop and execute tasks in the scheduled order and at the approximate + # scheduled time. If no tasks remain the thread will exit gracefully so that + # garbage collection can occur. If there are no ready tasks it will sleep + # for up to 60 seconds waiting for the next scheduled task. + # + # @!visibility private + def process_tasks + loop do + task = synchronize { @condition.reset; @queue.peek } + break unless task + + now = Concurrent.monotonic_time + diff = task.schedule_time - now + + if diff <= 0 + # We need to remove the task from the queue before passing + # it to the executor, to avoid race conditions where we pass + # the peek'ed task to the executor and then pop a different + # one that's been added in the meantime. + # + # Note that there's no race condition between the peek and + # this pop - this pop could retrieve a different task from + # the peek, but that task would be due to fire now anyway + # (because @queue is a priority queue, and this thread is + # the only reader, so whatever timer is at the head of the + # queue now must have the same pop time, or a closer one, as + # when we peeked). + task = synchronize { @queue.pop } + task.executor.post { task.process_task } + else + @condition.wait([diff, 60].min) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb new file mode 100644 index 0000000..eb1972c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/executors.rb @@ -0,0 +1,20 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/indirect_immediate_executor' +require 'concurrent/executor/java_executor_service' +require 'concurrent/executor/java_single_thread_executor' +require 'concurrent/executor/java_thread_pool_executor' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/ruby_single_thread_executor' +require 'concurrent/executor/ruby_thread_pool_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' +require 'concurrent/executor/serialized_execution_delegator' +require 'concurrent/executor/single_thread_executor' +require 'concurrent/executor/thread_pool_executor' +require 'concurrent/executor/timer_set' diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb new file mode 100644 index 0000000..1af182e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/future.rb @@ -0,0 +1,141 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. + + +module Concurrent + + # {include:file:docs-source/future.md} + # + # @!macro copy_options + # + # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module + # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future + class Future < IVar + + # Create a new `Future` in the `:unscheduled` state. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(NULL, opts.merge(__task_from_block__: block), &nil) + end + + # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Future` is in any state other than `:unscheduled`. + # + # @return [Future] a reference to `self` + # + # @example Instance and execute in separate steps + # future = Concurrent::Future.new{ sleep(1); 42 } + # future.state #=> :unscheduled + # future.execute + # future.state #=> :pending + # + # @example Instance and execute in one line + # future = Concurrent::Future.new{ sleep(1); 42 }.execute + # future.state #=> :pending + def execute + if compare_and_set_state(:pending, :unscheduled) + @executor.post{ safe_execute(@task, @args) } + self + end + end + + # Create a new `Future` object with the given block, execute it, and return the + # `:pending` object. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + # + # @return [Future] the newly created `Future` in the `:pending` state + # + # @example + # future = Concurrent::Future.execute{ sleep(1); 42 } + # future.state #=> :pending + def self.execute(opts = {}, &block) + Future.new(opts, &block).execute + end + + # @!macro ivar_set_method + def set(value = NULL, &block) + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @task = block || Proc.new { value } + end + end + execute + end + + # Attempt to cancel the operation if it has not already processed. + # The operation can only be cancelled while still `pending`. It cannot + # be cancelled once it has begun processing or has completed. + # + # @return [Boolean] was the operation successfully cancelled. + def cancel + if compare_and_set_state(:cancelled, :pending) + complete(false, nil, CancelledOperationError.new) + true + else + false + end + end + + # Has the operation been successfully cancelled? + # + # @return [Boolean] + def cancelled? + state == :cancelled + end + + # Wait the given number of seconds for the operation to complete. + # On timeout attempt to cancel the operation. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Boolean] true if the operation completed before the timeout + # else false + def wait_or_cancel(timeout) + wait(timeout) + if complete? + true + else + cancel + false + end + end + + protected + + def ns_initialize(value, opts) + super + @state = :unscheduled + @task = opts[:__task_from_block__] + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb new file mode 100644 index 0000000..92df66b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/hash.rb @@ -0,0 +1,59 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_hash + # + # A thread-safe subclass of Hash. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`, + # which takes the lock repeatedly when reading an item. + # + # @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash` + + # @!macro internal_implementation_note + HashImplementation = case + when Concurrent.on_cruby? + # Hash is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Hash + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyHash < ::Hash + include JRuby::Synchronized + end + JRubyHash + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxHash < ::Hash + end + ThreadSafe::Util.make_synchronized_on_rbx RbxHash + RbxHash + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyHash < ::Hash + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash + TruffleRubyHash + + else + warn 'Possibly unsupported Ruby implementation' + ::Hash + end + private_constant :HashImplementation + + # @!macro concurrent_hash + class Hash < HashImplementation + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb new file mode 100644 index 0000000..d275595 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/immutable_struct.rb @@ -0,0 +1,101 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # A thread-safe, immutable variation of Ruby's standard `Struct`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module ImmutableStruct + include Synchronization::AbstractStruct + + def self.included(base) + base.safe_initialization! + end + + # @!macro struct_values + def values + ns_values + end + + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + ns_values_at(indexes) + end + + # @!macro struct_inspect + def inspect + ns_inspect + end + + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + ns_merge(other, &block) + end + + # @!macro struct_to_h + def to_h + ns_to_h + end + + # @!macro struct_get + def [](member) + ns_get(member) + end + + # @!macro struct_equality + def ==(other) + ns_equality(other) + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + ns_each(&block) + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + ns_each_pair(&block) + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + ns_select(&block) + end + + private + + # @!visibility private + def initialize_copy(original) + super(original) + ns_initialize_copy + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb new file mode 100644 index 0000000..2a724db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/ivar.rb @@ -0,0 +1,207 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/obligation' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # An `IVar` is like a future that you can assign. As a future is a value that + # is being computed that you can wait on, an `IVar` is a value that is waiting + # to be assigned, that you can wait on. `IVars` are single assignment and + # deterministic. + # + # Then, express futures as an asynchronous computation that assigns an `IVar`. + # The `IVar` becomes the primitive on which [futures](Future) and + # [dataflow](Dataflow) are built. + # + # An `IVar` is a single-element container that is normally created empty, and + # can only be set once. The I in `IVar` stands for immutable. Reading an + # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` + # from different threads. + # + # If you want to have some parallel task set the value in an `IVar`, you want + # a `Future`. If you want to create a graph of parallel tasks all executed + # when the values they depend on are ready you want `dataflow`. `IVar` is + # generally a low-level primitive. + # + # ## Examples + # + # Create, set and get an `IVar` + # + # ```ruby + # ivar = Concurrent::IVar.new + # ivar.set 14 + # ivar.value #=> 14 + # ivar.set 2 # would now be an error + # ``` + # + # ## See Also + # + # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. + # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). + # In Proceedings of Workshop on Graph Reduction, 1986. + # 2. For recent application: + # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). + class IVar < Synchronization::LockableObject + include Concern::Obligation + include Concern::Observable + + # Create a new `IVar` in the `:pending` state with the (optional) initial value. + # + # @param [Object] value the initial value + # @param [Hash] opts the options to create a message with + # @option opts [String] :dup_on_deref (false) call `#dup` before returning + # the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before + # returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def initialize(value = NULL, opts = {}, &block) + if value != NULL && block_given? + raise ArgumentError.new('provide only a value or a block') + end + super(&nil) + synchronize { ns_initialize(value, opts, &block) } + end + + # Add an observer on this object that will receive notification on update. + # + # Upon completion the `IVar` will notify all observers in a thread-safe way. + # The `func` method of the observer will be called with three arguments: the + # `Time` at which the `Future` completed the asynchronous operation, the + # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on + # fulfillment). + # + # @param [Object] observer the object that will be notified of changes + # @param [Symbol] func symbol naming the method to call when this + # `Observable` has changes` + def add_observer(observer = nil, func = :update, &block) + raise ArgumentError.new('cannot provide both an observer and a block') if observer && block + direct_notification = false + + if block + observer = block + func = :call + end + + synchronize do + if event.set? + direct_notification = true + else + observers.add_observer(observer, func) + end + end + + observer.send(func, Time.now, self.value, reason) if direct_notification + observer + end + + # @!macro ivar_set_method + # Set the `IVar` to a value and wake or notify all threads waiting on it. + # + # @!macro ivar_set_parameters_and_exceptions + # @param [Object] value the value to store in the `IVar` + # @yield A block operation to use for setting the value + # @raise [ArgumentError] if both a value and a block are given + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # + # @return [IVar] self + def set(value = NULL) + check_for_block_or_value!(block_given?, value) + raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) + + begin + value = yield if block_given? + complete_without_notification(true, value, nil) + rescue => ex + complete_without_notification(false, nil, ex) + end + + notify_observers(self.value, reason) + self + end + + # @!macro ivar_fail_method + # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # @return [IVar] self + def fail(reason = StandardError.new) + complete(false, nil, reason) + end + + # Attempt to set the `IVar` with the given value or block. Return a + # boolean indicating the success or failure of the set operation. + # + # @!macro ivar_set_parameters_and_exceptions + # + # @return [Boolean] true if the value was set else false + def try_set(value = NULL, &block) + set(value, &block) + true + rescue MultipleAssignmentError + false + end + + protected + + # @!visibility private + def ns_initialize(value, opts) + value = yield if block_given? + init_obligation + self.observers = Collection::CopyOnWriteObserverSet.new + set_deref_options(opts) + + @state = :pending + if value != NULL + ns_complete_without_notification(true, value, nil) + end + end + + # @!visibility private + def safe_execute(task, args = []) + if compare_and_set_state(:processing, :pending) + success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, val, reason) + yield(success, val, reason) if block_given? + end + end + + # @!visibility private + def complete(success, value, reason) + complete_without_notification(success, value, reason) + notify_observers(self.value, reason) + self + end + + # @!visibility private + def complete_without_notification(success, value, reason) + synchronize { ns_complete_without_notification(success, value, reason) } + self + end + + # @!visibility private + def notify_observers(value, reason) + observers.notify_and_delete_observers{ [Time.now, value, reason] } + end + + # @!visibility private + def ns_complete_without_notification(success, value, reason) + raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state + set_state(success, value, reason) + event.set + end + + # @!visibility private + def check_for_block_or_value!(block_given, value) # :nodoc: + if (block_given && value != NULL) || (! block_given && value == NULL) + raise ArgumentError.new('must set with either a value or a block') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb new file mode 100644 index 0000000..3967cbf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/map.rb @@ -0,0 +1,337 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/synchronization' +require 'concurrent/utility/engine' + +module Concurrent + # @!visibility private + module Collection + + # @!visibility private + MapImplementation = case + when Concurrent.on_jruby? + # noinspection RubyResolve + JRubyMapBackend + when Concurrent.on_cruby? + require 'concurrent/collection/map/mri_map_backend' + MriMapBackend + when Concurrent.on_rbx? || Concurrent.on_truffleruby? + require 'concurrent/collection/map/atomic_reference_map_backend' + AtomicReferenceMapBackend + else + warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' + require 'concurrent/collection/map/synchronized_map_backend' + SynchronizedMapBackend + end + end + + # `Concurrent::Map` is a hash-like object and should have much better performance + # characteristics, especially under high concurrency, than `Concurrent::Hash`. + # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` + # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` + # does. For most uses it should do fine though, and we recommend you consider + # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. + class Map < Collection::MapImplementation + + # @!macro map.atomic_method + # This method is atomic. + + # @!macro map.atomic_method_with_block + # This method is atomic. + # @note Atomic methods taking a block do not allow the `self` instance + # to be used within the block. Doing so will cause a deadlock. + + # @!method compute_if_absent(key) + # Compute and store new value for key if the key is absent. + # @param [Object] key + # @yield new value + # @yieldreturn [Object] new value + # @return [Object] new value or current value + # @!macro map.atomic_method_with_block + + # @!method compute_if_present(key) + # Compute and store new value for key if the key is present. + # @param [Object] key + # @yield new value + # @yieldparam old_value [Object] + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method compute(key) + # Compute and store new value for key. + # @param [Object] key + # @yield compute new value from old one + # @yieldparam old_value [Object, nil] old_value, or nil when key is absent + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method merge_pair(key, value) + # If the key is absent, the value is stored, otherwise new value is + # computed with a block. + # @param [Object] key + # @param [Object] value + # @yield compute new value from old one + # @yieldparam old_value [Object] old value + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method replace_pair(key, old_value, new_value) + # Replaces old_value with new_value if key exists and current value + # matches old_value + # @param [Object] key + # @param [Object] old_value + # @param [Object] new_value + # @return [true, false] true if replaced + # @!macro map.atomic_method + + # @!method replace_if_exists(key, new_value) + # Replaces current value with new_value if key exists + # @param [Object] key + # @param [Object] new_value + # @return [Object, nil] old value or nil + # @!macro map.atomic_method + + # @!method get_and_set(key, value) + # Get the current value under key and set new value. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete(key) + # Delete key and its value. + # @param [Object] key + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete_pair(key, value) + # Delete pair and its value if current value equals the provided value. + # @param [Object] key + # @param [Object] value + # @return [true, false] true if deleted + # @!macro map.atomic_method + + + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + # Get a value with key + # @param [Object] key + # @return [Object] the value + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + alias_method :get, :[] + # TODO (pitr-ch 30-Oct-2018): doc + alias_method :put, :[]= + + # Get a value with key, or default_value when key is absent, + # or fail when no default_value is given. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @raise [KeyError] when key is missing and no default_value is provided + # @!macro map_method_not_atomic + # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended + # to be use as a concurrency primitive with strong happens-before + # guarantees. It is not intended to be used as a high-level abstraction + # supporting complex operations. All read and write operations are + # thread safe, but no guarantees are made regarding race conditions + # between the fetch operation and yielding to the block. Additionally, + # this method does not support recursion. This is due to internal + # constraints that are very unlikely to change in the near future. + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + # Fetch value with key, or store default value when key is absent, + # or fail when no default_value is given. This is a two step operation, + # therefore not atomic. The store can overwrite other concurrently + # stored value. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @!macro map.atomic_method_with_block + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + # Insert value into map with key if key is absent in one atomic step. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] the previous value when key was present or nil when there was no key + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + # Is the value stored in the map. Iterates over all values. + # @param [Object] value + # @return [true, false] + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end + + # All keys + # @return [::Array] keys + def keys + arr = [] + each_pair { |k, v| arr << k } + arr + end unless method_defined?(:keys) + + # All values + # @return [::Array] values + def values + arr = [] + each_pair { |k, v| arr << v } + arr + end unless method_defined?(:values) + + # Iterates over each key. + # @yield for each key in the map + # @yieldparam key [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_key + each_pair { |k, v| yield k } + end unless method_defined?(:each_key) + + # Iterates over each value. + # @yield for each value in the map + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_value + each_pair { |k, v| yield v } + end unless method_defined?(:each_value) + + # Iterates over each key value pair. + # @yield for each key value pair in the map + # @yieldparam key [Object] + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_pair + return enum_for :each_pair unless block_given? + super + end + + alias_method :each, :each_pair unless method_defined?(:each) + + # Find key of a value. + # @param [Object] value + # @return [Object, nil] key or nil when not found + def key(value) + each_pair { |k, v| return k if v == value } + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + # Is map empty? + # @return [true, false] + def empty? + each_pair { |k, v| return false } + true + end unless method_defined?(:empty?) + + # The size of map. + # @return [Integer] size + def size + count = 0 + each_pair { |k, v| count += 1 } + count + end unless method_defined?(:size) + + # @!visibility private + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair { |k, v| h[k] = v } + h + end + + # @!visibility private + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + # @!visibility private + def inspect + format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect + end + + private + + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair { |k, v| self[k] = v } + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive Integer" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb new file mode 100644 index 0000000..7ba3d3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/maybe.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value + # of (represented as `Just`), or it is empty (represented as `Nothing`). Using + # `Maybe` is a good way to deal with errors or exceptional cases without + # resorting to drastic measures such as exceptions. + # + # `Maybe` is a replacement for the use of `nil` with better type checking. + # + # For compatibility with {Concurrent::Concern::Obligation} the predicate and + # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and + # `reason`. + # + # ## Motivation + # + # A common pattern in languages with pattern matching, such as Erlang and + # Haskell, is to return *either* a value *or* an error from a function + # Consider this Erlang code: + # + # ```erlang + # case file:consult("data.dat") of + # {ok, Terms} -> do_something_useful(Terms); + # {error, Reason} -> lager:error(Reason) + # end. + # ``` + # + # In this example the standard library function `file:consult` returns a + # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) + # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) + # (similar to a ruby symbol) and a variable containing ancillary data. On + # success it returns the atom `ok` and the data from the file. On failure it + # returns `error` and a string with an explanation of the problem. With this + # pattern there is no ambiguity regarding success or failure. If the file is + # empty the return value cannot be misinterpreted as an error. And when an + # error occurs the return value provides useful information. + # + # In Ruby we tend to return `nil` when an error occurs or else we raise an + # exception. Both of these idioms are problematic. Returning `nil` is + # ambiguous because `nil` may also be a valid value. It also lacks + # information pertaining to the nature of the error. Raising an exception + # is both expensive and usurps the normal flow of control. All of these + # problems can be solved with the use of a `Maybe`. + # + # A `Maybe` is unambiguous with regard to whether or not it contains a value. + # When `Just` it contains a value, when `Nothing` it does not. When `Just` + # the value it contains may be `nil`, which is perfectly valid. When + # `Nothing` the reason for the lack of a value is contained as well. The + # previous Erlang example can be duplicated in Ruby in a principled way by + # having functions return `Maybe` objects: + # + # ```ruby + # result = MyFileUtils.consult("data.dat") # returns a Maybe + # if result.just? + # do_something_useful(result.value) # or result.just + # else + # logger.error(result.reason) # or result.nothing + # end + # ``` + # + # @example Returning a Maybe from a Function + # module MyFileUtils + # def self.consult(path) + # file = File.open(path, 'r') + # Concurrent::Maybe.just(file.read) + # rescue => ex + # return Concurrent::Maybe.nothing(ex) + # ensure + # file.close if file + # end + # end + # + # maybe = MyFileUtils.consult('bogus.file') + # maybe.just? #=> false + # maybe.nothing? #=> true + # maybe.reason #=> # + # + # maybe = MyFileUtils.consult('README.md') + # maybe.just? #=> true + # maybe.nothing? #=> false + # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." + # + # @example Using Maybe with a Block + # result = Concurrent::Maybe.from do + # Client.find(10) # Client is an ActiveRecord model + # end + # + # # -- if the record was found + # result.just? #=> true + # result.value #=> # + # + # # -- if the record was not found + # result.just? #=> false + # result.reason #=> ActiveRecord::RecordNotFound + # + # @example Using Maybe with the Null Object Pattern + # # In a Rails controller... + # result = ClientService.new(10).find # returns a Maybe + # render json: result.or(NullClient.new) + # + # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe + # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe + class Maybe < Synchronization::Object + include Comparable + safe_initialization! + + # Indicates that the given attribute has not been set. + # When `Just` the {#nothing} getter will return `NONE`. + # When `Nothing` the {#just} getter will return `NONE`. + NONE = ::Object.new.freeze + + # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. + attr_reader :just + + # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. + attr_reader :nothing + + private_class_method :new + + # Create a new `Maybe` using the given block. + # + # Runs the given block passing all function arguments to the block as block + # arguments. If the block runs to completion without raising an exception + # a new `Just` is created with the value set to the return value of the + # block. If the block raises an exception a new `Nothing` is created with + # the reason being set to the raised exception. + # + # @param [Array] args Zero or more arguments to pass to the block. + # @yield The block from which to create a new `Maybe`. + # @yieldparam [Array] args Zero or more block arguments passed as + # arguments to the function. + # + # @return [Maybe] The newly created object. + # + # @raise [ArgumentError] when no block given. + def self.from(*args) + raise ArgumentError.new('no block given') unless block_given? + begin + value = yield(*args) + return new(value, NONE) + rescue => ex + return new(NONE, ex) + end + end + + # Create a new `Just` with the given value. + # + # @param [Object] value The value to set for the new `Maybe` object. + # + # @return [Maybe] The newly created object. + def self.just(value) + return new(value, NONE) + end + + # Create a new `Nothing` with the given (optional) reason. + # + # @param [Exception] error The reason to set for the new `Maybe` object. + # When given a string a new `StandardError` will be created with the + # argument as the message. When no argument is given a new + # `StandardError` with an empty message will be created. + # + # @return [Maybe] The newly created object. + def self.nothing(error = '') + if error.is_a?(Exception) + nothing = error + else + nothing = StandardError.new(error.to_s) + end + return new(NONE, nothing) + end + + # Is this `Maybe` a `Just` (successfully fulfilled with a value)? + # + # @return [Boolean] True if `Just` or false if `Nothing`. + def just? + ! nothing? + end + alias :fulfilled? :just? + + # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? + # + # @return [Boolean] True if `Nothing` or false if `Just`. + def nothing? + @nothing != NONE + end + alias :rejected? :nothing? + + alias :value :just + + alias :reason :nothing + + # Comparison operator. + # + # @return [Integer] 0 if self and other are both `Nothing`; + # -1 if self is `Nothing` and other is `Just`; + # 1 if self is `Just` and other is nothing; + # `self.just <=> other.just` if both self and other are `Just`. + def <=>(other) + if nothing? + other.nothing? ? 0 : -1 + else + other.nothing? ? 1 : just <=> other.just + end + end + + # Return either the value of self or the given default value. + # + # @return [Object] The value of self when `Just`; else the given default. + def or(other) + just? ? just : other + end + + private + + # Create a new `Maybe` with the given attributes. + # + # @param [Object] just The value when `Just` else `NONE`. + # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. + # + # @return [Maybe] The new `Maybe`. + # + # @!visibility private + def initialize(just, nothing) + @just = just + @nothing = nothing + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb new file mode 100644 index 0000000..55361e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mutable_struct.rb @@ -0,0 +1,239 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe variation of Ruby's standard `Struct`. Values can be set at + # construction or safely changed at any time during the object's lifecycle. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module MutableStruct + include Synchronization::AbstractStruct + + # @!macro struct_new + # + # Factory for creating new struct classes. + # + # ``` + # new([class_name] [, member_name]+>) -> StructClass click to toggle source + # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass + # new(value, ...) -> obj + # StructClass[value, ...] -> obj + # ``` + # + # The first two forms are used to create a new struct subclass `class_name` + # that can contain a value for each member_name . This subclass can be + # used to create instances of the structure like any other Class . + # + # If the `class_name` is omitted an anonymous struct class will be created. + # Otherwise, the name of this struct will appear as a constant in the struct class, + # so it must be unique for all structs under this base class and must start with a + # capital letter. Assigning a struct class to a constant also gives the class + # the name of the constant. + # + # If a block is given it will be evaluated in the context of `StructClass`, passing + # the created class as a parameter. This is the recommended way to customize a struct. + # Subclassing an anonymous struct creates an extra anonymous class that will never be used. + # + # The last two forms create a new instance of a struct subclass. The number of value + # parameters must be less than or equal to the number of attributes defined for the + # struct. Unset parameters default to nil. Passing more parameters than number of attributes + # will raise an `ArgumentError`. + # + # @see http://ruby-doc.org/core/Struct.html#method-c-new Ruby standard library `Struct#new` + + # @!macro struct_values + # + # Returns the values for this struct as an Array. + # + # @return [Array] the values for this struct + # + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + # + # Returns the struct member values for each selector as an Array. + # + # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). + # + # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + # + # Describe the contents of this struct in a string. + # + # @return [String] the contents of this struct in a string + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + # + # Returns a new struct containing the contents of `other` and the contents + # of `self`. If no block is specified, the value for entries with duplicate + # keys will be that of `other`. Otherwise the value for each duplicate key + # is determined by calling the block with the key, its value in `self` and + # its value in `other`. + # + # @param [Hash] other the hash from which to set the new values + # @yield an options block for resolving duplicate keys + # @yieldparam [String, Symbol] member the name of the member which is duplicated + # @yieldparam [Object] selfvalue the value of the member in `self` + # @yieldparam [Object] othervalue the value of the member in `other` + # + # @return [Synchronization::AbstractStruct] a new struct with the new values + # + # @raise [ArgumentError] of given a member that is not defined in the struct + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + # + # Returns a hash containing the names and values for the struct’s members. + # + # @return [Hash] the names and values for the struct’s members + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + # + # Attribute Reference + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the member does not exist + # @raise [IndexError] if the index is out of range. + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + # + # Equality + # + # @return [Boolean] true if other has the same struct subclass and has + # equal member values (according to `Object#==`) + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + # + # Yields the value of each struct member in order. If no block is given + # an enumerator is returned. + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + # + # Yields the name and value of each struct member in order. If no block is + # given an enumerator is returned. + # + # @yield the operation to be performed on each struct member/value pair + # @yieldparam [Object] member each struct member (in order) + # @yieldparam [Object] value each struct value (in order) + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + # + # Yields each member value from the struct to the block and returns an Array + # containing the member values from the struct for which the given block + # returns a true value (equivalent to `Enumerable#select`). + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + # + # @return [Array] an array containing each value for which the block returns true + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # Attribute Assignment + # + # Sets the value of the given struct member or the member at the given index. + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the name does not exist + # @raise [IndexError] if the index is out of range. + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize { @values[member] = value } + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize { @values[index] = value } + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb new file mode 100644 index 0000000..9034711 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/mvar.rb @@ -0,0 +1,242 @@ +require 'concurrent/concern/dereferenceable' +require 'concurrent/synchronization' + +module Concurrent + + # An `MVar` is a synchronized single element container. They are empty or + # contain one item. Taking a value from an empty `MVar` blocks, as does + # putting a value into a full one. You can either think of them as blocking + # queue of length one, or a special kind of mutable variable. + # + # On top of the fundamental `#put` and `#take` operations, we also provide a + # `#mutate` that is atomic with respect to operations on the same instance. + # These operations all support timeouts. + # + # We also support non-blocking operations `#try_put!` and `#try_take!`, a + # `#set!` that ignores existing values, a `#value` that returns the value + # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields + # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. + # You shouldn't use these operations in the first instance. + # + # `MVar` is a [Dereferenceable](Dereferenceable). + # + # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. + # + # Note that unlike the original Haskell paper, our `#take` is blocking. This is how + # Haskell and Scala do it today. + # + # @!macro copy_options + # + # ## See Also + # + # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th + # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. + # + # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). + # In Proceedings of the 23rd Symposium on Principles of Programming Languages + # (PoPL), 1996. + class MVar < Synchronization::Object + include Concern::Dereferenceable + safe_initialization! + + # Unique value that represents that an `MVar` was empty + EMPTY = ::Object.new + + # Unique value that represents that an `MVar` timed out before it was able + # to produce a value. + TIMEOUT = ::Object.new + + # Create a new `MVar`, either empty or with an initial value. + # + # @param [Hash] opts the options controlling how the future will be processed + # + # @!macro deref_options + def initialize(value = EMPTY, opts = {}) + @value = value + @mutex = Mutex.new + @empty_condition = ConditionVariable.new + @full_condition = ConditionVariable.new + set_deref_options(opts) + end + + # Remove the value from an `MVar`, leaving it empty, and blocking if there + # isn't a value. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was taken, or `TIMEOUT` + def take(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # acquires lock on the from an `MVAR`, yields the value to provided block, + # and release lock. A timeout can be set to limit the time spent blocked, + # in which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value returned by the block, or `TIMEOUT` + def borrow(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # if we timeoud out we'll still be empty + if unlocked_full? + yield @value + else + TIMEOUT + end + end + end + + # Put a value into an `MVar`, blocking if there is already a value until + # it is empty. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was put, or `TIMEOUT` + def put(value, timeout = nil) + @mutex.synchronize do + wait_for_empty(timeout) + + # If we timed out we won't be empty + if unlocked_empty? + @value = value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Atomically `take`, yield the value to a block for transformation, and then + # `put` the transformed value. Returns the transformed value. A timeout can + # be set to limit the time spent blocked, in which case it returns `TIMEOUT` + # if the time is exceeded. + # @return [Object] the transformed value, or `TIMEOUT` + def modify(timeout = nil) + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = yield value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. + def try_take! + @mutex.synchronize do + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + EMPTY + end + end + end + + # Non-blocking version of `put`, that returns whether or not it was successful. + def try_put!(value) + @mutex.synchronize do + if unlocked_empty? + @value = value + @full_condition.signal + true + else + false + end + end + end + + # Non-blocking version of `put` that will overwrite an existing value. + def set!(value) + @mutex.synchronize do + old_value = @value + @value = value + @full_condition.signal + apply_deref_options(old_value) + end + end + + # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. + def modify! + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + value = @value + @value = yield value + if unlocked_empty? + @empty_condition.signal + else + @full_condition.signal + end + apply_deref_options(value) + end + end + + # Returns if the `MVar` is currently empty. + def empty? + @mutex.synchronize { @value == EMPTY } + end + + # Returns if the `MVar` currently contains a value. + def full? + !empty? + end + + protected + + def synchronize(&block) + @mutex.synchronize(&block) + end + + private + + def unlocked_empty? + @value == EMPTY + end + + def unlocked_full? + ! unlocked_empty? + end + + def wait_for_full(timeout) + wait_while(@full_condition, timeout) { unlocked_empty? } + end + + def wait_for_empty(timeout) + wait_while(@empty_condition, timeout) { unlocked_full? } + end + + def wait_while(condition, timeout) + if timeout.nil? + while yield + condition.wait(@mutex) + end + else + stop = Concurrent.monotonic_time + timeout + while yield && timeout > 0.0 + condition.wait(@mutex, timeout) + timeout = stop - Concurrent.monotonic_time + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb new file mode 100644 index 0000000..bdd22a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/options.rb @@ -0,0 +1,42 @@ +require 'concurrent/configuration' + +module Concurrent + + # @!visibility private + module Options + + # Get the requested `Executor` based on the values set in the options hash. + # + # @param [Hash] opts the options defining the requested executor + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:fast` returns the global fast executor, + # `:io` returns the global io executor, and `:immediate` returns a new + # `ImmediateExecutor` object. + # + # @return [Executor, nil] the requested thread pool, or nil when no option specified + # + # @!visibility private + def self.executor_from_options(opts = {}) # :nodoc: + if identifier = opts.fetch(:executor, nil) + executor(identifier) + else + nil + end + end + + def self.executor(executor_identifier) + case executor_identifier + when :fast + Concurrent.global_fast_executor + when :io + Concurrent.global_io_executor + when :immediate + Concurrent.global_immediate_executor + when Concurrent::ExecutorService + executor_identifier + else + raise ArgumentError, "executor not recognized by '#{executor_identifier}'" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb new file mode 100644 index 0000000..f5f31eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promise.rb @@ -0,0 +1,579 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +module Concurrent + + PromiseExecutionError = Class.new(StandardError) + + # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) + # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. + # + # > A promise represents the eventual value returned from the single + # > completion of an operation. + # + # Promises are similar to futures and share many of the same behaviours. + # Promises are far more robust, however. Promises can be chained in a tree + # structure where each promise may have zero or more children. Promises are + # chained using the `then` method. The result of a call to `then` is always + # another promise. Promises are resolved asynchronously (with respect to the + # main thread) but in a strict order: parents are guaranteed to be resolved + # before their children, children before their younger siblings. The `then` + # method takes two parameters: an optional block to be executed upon parent + # resolution and an optional callable to be executed upon parent failure. The + # result of each promise is passed to each of its children upon resolution. + # When a promise is rejected all its children will be summarily rejected and + # will receive the reason. + # + # Promises have several possible states: *:unscheduled*, *:pending*, + # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as + # `#incomplete?` and `#complete?`. When a Promise is created it is set to + # *:unscheduled*. Once the `#execute` method is called the state becomes + # *:pending*. Once a job is pulled from the thread pool's queue and is given + # to a thread for processing (often immediately upon `#post`) the state + # becomes *:processing*. The future will remain in this state until processing + # is complete. A future that is in the *:unscheduled*, *:pending*, or + # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either + # *:rejected*, indicating that an exception was thrown during processing, or + # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` + # will be updated to reflect the result of the operation. If *:rejected* the + # `reason` will be updated with a reference to the thrown exception. The + # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and + # `#fulfilled?` can be called at any time to obtain the state of the Promise, + # as can the `#state` method, which returns a symbol. + # + # Retrieving the value of a promise is done through the `value` (alias: + # `deref`) method. Obtaining the value of a promise is a potentially blocking + # operation. When a promise is *rejected* a call to `value` will return `nil` + # immediately. When a promise is *fulfilled* a call to `value` will + # immediately return the current value. When a promise is *pending* a call to + # `value` will block until the promise is either *rejected* or *fulfilled*. A + # *timeout* value can be passed to `value` to limit how long the call will + # block. If `nil` the call will block indefinitely. If `0` the call will not + # block. Any other integer or float value will indicate the maximum number of + # seconds to block. + # + # Promises run on the global thread pool. + # + # @!macro copy_options + # + # ### Examples + # + # Start by requiring promises + # + # ```ruby + # require 'concurrent' + # ``` + # + # Then create one + # + # ```ruby + # p = Concurrent::Promise.execute do + # # do something + # 42 + # end + # ``` + # + # Promises can be chained using the `then` method. The `then` method accepts a + # block and an executor, to be executed on fulfillment, and a callable argument to be executed + # on rejection. The result of the each promise is passed as the block argument + # to chained promises. + # + # ```ruby + # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute + # ``` + # + # And so on, and so on, and so on... + # + # ```ruby + # p = Concurrent::Promise.fulfill(20). + # then{|result| result - 10 }. + # then{|result| result * 3 }. + # then(executor: different_executor){|result| result % 5 }.execute + # ``` + # + # The initial state of a newly created Promise depends on the state of its parent: + # - if parent is *unscheduled* the child will be *unscheduled* + # - if parent is *pending* the child will be *pending* + # - if parent is *fulfilled* the child will be *pending* + # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) + # + # Promises are executed asynchronously from the main thread. By the time a + # child Promise finishes intialization it may be in a different state than its + # parent (by the time a child is created its parent may have completed + # execution and changed state). Despite being asynchronous, however, the order + # of execution of Promise objects in a chain (or tree) is strictly defined. + # + # There are multiple ways to create and execute a new `Promise`. Both ways + # provide identical behavior: + # + # ```ruby + # # create, operate, then execute + # p1 = Concurrent::Promise.new{ "Hello World!" } + # p1.state #=> :unscheduled + # p1.execute + # + # # create and immediately execute + # p2 = Concurrent::Promise.new{ "Hello World!" }.execute + # + # # execute during creation + # p3 = Concurrent::Promise.execute{ "Hello World!" } + # ``` + # + # Once the `execute` method is called a `Promise` becomes `pending`: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # p.state #=> :pending + # p.pending? #=> true + # ``` + # + # Wait a little bit, and the promise will resolve and provide a value: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # sleep(0.1) + # + # p.state #=> :fulfilled + # p.fulfilled? #=> true + # p.value #=> "Hello, world!" + # ``` + # + # If an exception occurs, the promise will be rejected and will provide + # a reason for the rejection: + # + # ```ruby + # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } + # sleep(0.1) + # + # p.state #=> :rejected + # p.rejected? #=> true + # p.reason #=> "#" + # ``` + # + # #### Rejection + # + # When a promise is rejected all its children will be rejected and will + # receive the rejection `reason` as the rejection callable parameter: + # + # ```ruby + # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } + # + # c1 = p.then(-> reason { 42 }) + # c2 = p.then(-> reason { raise 'Boom!' }) + # + # c1.wait.state #=> :fulfilled + # c1.value #=> 45 + # c2.wait.state #=> :rejected + # c2.reason #=> # + # ``` + # + # Once a promise is rejected it will continue to accept children that will + # receive immediately rejection (they will be executed asynchronously). + # + # #### Aliases + # + # The `then` method is the most generic alias: it accepts a block to be + # executed upon parent fulfillment and a callable to be executed upon parent + # rejection. At least one of them should be passed. The default block is `{ + # |result| result }` that fulfills the child with the parent value. The + # default callable is `{ |reason| raise reason }` that rejects the child with + # the parent reason. + # + # - `on_success { |result| ... }` is the same as `then {|result| ... }` + # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` + # - `rescue` is aliased by `catch` and `on_error` + class Promise < IVar + + # Initialize a new Promise with the provided options. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @option opts [Promise] :parent the parent `Promise` when building a chain/tree + # @option opts [Proc] :on_fulfill fulfillment handler + # @option opts [Proc] :on_reject rejection handler + # @option opts [object, Array] :args zero or more arguments to be passed + # the task block on execution + # + # @yield The block operation to be performed asynchronously. + # + # @raise [ArgumentError] if no block is given + # + # @see http://wiki.commonjs.org/wiki/Promises/A + # @see http://promises-aplus.github.io/promises-spec/ + def initialize(opts = {}, &block) + opts.delete_if { |k, v| v.nil? } + super(NULL, opts.merge(__promise_body_from_block__: block), &nil) + end + + # Create a new `Promise` and fulfill it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.fulfill(value, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } + end + + # Create a new `Promise` and reject it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.reject(reason, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } + end + + # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Promise` is in any state other than `:unscheduled`. + # + # @return [Promise] a reference to `self` + def execute + if root? + if compare_and_set_state(:pending, :unscheduled) + set_pending + realize(@promise_body) + end + else + @parent.execute + end + self + end + + # @!macro ivar_set_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def set(value = NULL, &block) + raise PromiseExecutionError.new('supported only on root promise') unless root? + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @promise_body = block || Proc.new { |result| value } + end + end + execute + end + + # @!macro ivar_fail_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def fail(reason = StandardError.new) + set { raise reason } + end + + # Create a new `Promise` object with the given block, execute it, and return the + # `:pending` object. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @return [Promise] the newly created `Promise` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + # + # @example + # promise = Concurrent::Promise.execute{ sleep(1); 42 } + # promise.state #=> :pending + def self.execute(opts = {}, &block) + new(opts, &block).execute + end + + # Chain a new promise off the current promise. + # + # @return [Promise] the new promise + # @yield The block operation to be performed asynchronously. + # @overload then(rescuer, executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + # @overload then(rescuer, executor: executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + def then(*args, &block) + if args.last.is_a?(::Hash) + executor = args.pop[:executor] + rescuer = args.first + else + rescuer, executor = args + end + + executor ||= @executor + + raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? + block = Proc.new { |result| result } unless block_given? + child = Promise.new( + parent: self, + executor: executor, + on_fulfill: block, + on_reject: rescuer + ) + + synchronize do + child.state = :pending if @state == :pending + child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled + child.on_reject(@reason) if @state == :rejected + @children << child + end + + child + end + + # Chain onto this promise an action to be undertaken on success + # (fulfillment). + # + # @yield The block to execute + # + # @return [Promise] self + def on_success(&block) + raise ArgumentError.new('no block given') unless block_given? + self.then(&block) + end + + # Chain onto this promise an action to be undertaken on failure + # (rejection). + # + # @yield The block to execute + # + # @return [Promise] self + def rescue(&block) + self.then(block) + end + + alias_method :catch, :rescue + alias_method :on_error, :rescue + + # Yield the successful result to the block that returns a promise. If that + # promise is also successful the result is the result of the yielded promise. + # If either part fails the whole also fails. + # + # @example + # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 + # + # @return [Promise] + def flat_map(&block) + child = Promise.new( + parent: self, + executor: ImmediateExecutor.new, + ) + + on_error { |e| child.on_reject(e) } + on_success do |result1| + begin + inner = block.call(result1) + inner.execute + inner.on_success { |result2| child.on_fulfill(result2) } + inner.on_error { |e| child.on_reject(e) } + rescue => e + child.on_reject(e) + end + end + + child + end + + # Builds a promise that produces the result of promises in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] promises + # + # @overload zip(*promises, opts) + # @param [Array] promises + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def self.zip(*promises) + opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} + opts[:executor] ||= ImmediateExecutor.new + zero = if !opts.key?(:execute) || opts.delete(:execute) + fulfill([], opts) + else + Promise.new(opts) { [] } + end + + promises.reduce(zero) do |p1, p2| + p1.flat_map do |results| + p2.then do |next_result| + results << next_result + end + end + end + end + + # Builds a promise that produces the result of self and others in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] others + # + # @overload zip(*promises, opts) + # @param [Array] others + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def zip(*others) + self.class.zip(self, *others) + end + + # Aggregates a collection of promises and executes the `then` condition + # if all aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + # + # The returned promise will not yet have been executed. Additional `#then` + # and `#rescue` handlers may still be provided. Once the returned promise + # is execute the aggregate promises will be also be executed (if they have + # not been executed already). The results of the aggregate promises will + # be checked upon completion. The necessary `#then` and `#rescue` blocks + # on the aggregating promise will then be executed as appropriate. If the + # `#rescue` handlers are executed the raises exception will be + # `Concurrent::PromiseExecutionError`. + # + # @param [Array] promises Zero or more promises to aggregate + # @return [Promise] an unscheduled (not executed) promise that aggregates + # the promises given as arguments + def self.all?(*promises) + aggregate(:all?, *promises) + end + + # Aggregates a collection of promises and executes the `then` condition + # if any aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + def self.any?(*promises) + aggregate(:any?, *promises) + end + + protected + + def ns_initialize(value, opts) + super + + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + + @parent = opts.fetch(:parent) { nil } + @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } + @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } + + @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } + @state = :unscheduled + @children = [] + end + + # Aggregate a collection of zero or more promises under a composite promise, + # execute the aggregated promises and collect them into a standard Ruby array, + # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, + # or `one?`) on the collection checking for the success or failure of each, + # then executing the composite's `#then` handlers if the predicate returns + # `true` or executing the composite's `#rescue` handlers if the predicate + # returns false. + # + # @!macro promise_self_aggregate + def self.aggregate(method, *promises) + composite = Promise.new do + completed = promises.collect do |promise| + promise.execute if promise.unscheduled? + promise.wait + promise + end + unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } + raise PromiseExecutionError + end + end + composite + end + + # @!visibility private + def set_pending + synchronize do + @state = :pending + @children.each { |c| c.set_pending } + end + end + + # @!visibility private + def root? # :nodoc: + @parent.nil? + end + + # @!visibility private + def on_fulfill(result) + realize Proc.new { @on_fulfill.call(result) } + nil + end + + # @!visibility private + def on_reject(reason) + realize Proc.new { @on_reject.call(reason) } + nil + end + + # @!visibility private + def notify_child(child) + if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } + if_state(:rejected) { child.on_reject(@reason) } + end + + # @!visibility private + def complete(success, value, reason) + children_to_notify = synchronize do + set_state!(success, value, reason) + @children.dup + end + + children_to_notify.each { |child| notify_child(child) } + observers.notify_and_delete_observers{ [Time.now, self.value, reason] } + end + + # @!visibility private + def realize(task) + @executor.post do + success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, value, reason) + end + end + + # @!visibility private + def set_state!(success, value, reason) + set_state(success, value, reason) + event.set + end + + # @!visibility private + def synchronized_set_state!(success, value, reason) + synchronize { set_state!(success, value, reason) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb new file mode 100644 index 0000000..76af4d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/promises.rb @@ -0,0 +1,2167 @@ +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/collection/lock_free_stack' +require 'concurrent/errors' +require 'concurrent/re_include' + +module Concurrent + + # {include:file:docs-source/promises-main.md} + module Promises + + # @!macro promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + extend self + + module Configuration + # @return [Executor, :io, :fast] the executor which is used when none is supplied + # to a factory method. The method can be overridden in the receivers of + # `include FactoryMethod` + def default_executor + :io + end + end + + include Configuration + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on default_executor + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = self.default_executor) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on default_executor + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = self.default_executor) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(default_executor, *args, &task) + end + + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @param [Object] value + # @return [Future] + def fulfilled_future(value, default_executor = self.default_executor) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @param [Object] reason + # @return [Future] + def rejected_future(reason, default_executor = self.default_executor) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = self.default_executor) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload make_future(nil, default_executor = self.default_executor) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload make_future(a_future, default_executor = self.default_executor) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload make_future(an_event, default_executor = self.default_executor) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload make_future(exception, default_executor = self.default_executor) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload make_future(value, default_executor = self.default_executor) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def make_future(argument = nil, default_executor = self.default_executor) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def delay(*args, &task) + delay_on default_executor, *args, &task + end + + # Creates new event or future which is resolved only after it is touched, + # see {Concurrent::AbstractEventFuture#touch}. + # + # @!macro promises.param.default_executor + # @overload delay_on(default_executor, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload delay_on(default_executor) + # If no task is provided, it returns an {Event} + # @return [Event] + def delay_on(default_executor, *args, &task) + event = DelayPromise.new(default_executor).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def schedule(intended_time, *args, &task) + schedule_on default_executor, intended_time, *args, &task + end + + # Creates new event or future which is resolved in intended_time. + # + # @!macro promises.param.default_executor + # @!macro promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + # @overload schedule_on(default_executor, intended_time, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload schedule_on(default_executor, intended_time) + # If no task is provided, it returns an {Event} + # @return [Event] + def schedule_on(default_executor, intended_time, *args, &task) + event = ScheduledPromise.new(default_executor, intended_time).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on default_executor, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on default_executor, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro promises.any-touch + # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on default_executor, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + # TODO or rather a generic aggregator taking a function + end + + module InternalStates + # @!visibility private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + # @!visibility private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + # @!visibility private + class Reserved < Pending + end + + # @!visibility private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + # @!visibility private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + # @!visibility private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + # @!visibility private + PENDING = Pending.new + # @!visibility private + RESERVED = Reserved.new + # @!visibility private + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + end + + private_constant :InternalStates + + # @!macro promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + attr_atomic(:internal_state) + private :internal_state=, :swap_internal_state, :compare_and_set_internal_state, :update_internal_state + # @!method internal_state + # @!visibility private + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending? + !internal_state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved? + internal_state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro promises.touches + # Calls {Concurrent::AbstractEventFuture#touch}. + + # @!macro promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [self, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true, reserved = false) + if compare_and_set_internal_state(reserved ? RESERVED : PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback_notify_blocked(promise, index) + add_callback :callback_notify_blocked, promise, index + end + + # @!visibility private + def add_callback_clear_delayed_node(node) + add_callback(:callback_clear_delayed_node, node) + end + + # @!visibility private + def with_hidden_resolvable + # TODO (pitr-ch 10-Dec-2018): documentation, better name if in edge + self + end + + private + + def add_callback(method, *args) + state = internal_state + if state.resolved? + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if state.resolved? + end + self + end + + def callback_clear_delayed_node(state, node) + node.value = nil + end + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call(*args) + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled? + state = internal_state + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected? + state = internal_state + state.resolved? && !state.fulfilled? + end + + # @!macro promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @param [Object] timeout_value a value returned by the method when it times out + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # timeout_value on timeout, + # nil on rejection. + def value(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.value + else + timeout_value + end + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment. + def reason(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.reason + else + timeout_value + end + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Object), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # or nil on rejection, + # or timeout_value on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil, timeout_value = nil) + if wait_until_resolved! timeout + internal_state.value + else + timeout_value + end + end + + # Allows rejected Future to be risen with `raise` method. + # If the reason is not an exception `Runtime.new(reason)` is returned. + # + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # raise Promises.rejected_future("or just boom") + # @raise [Concurrent::Error] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + raise ArgumentError unless args.size <= 1 + reason = Array(internal_state.reason).flatten.compact + if reason.size > 1 + ex = Concurrent::MultipleErrors.new reason + ex.set_backtrace(caller) + ex + else + ex = if reason[0].respond_to? :exception + reason[0].exception(*args) + else + RuntimeError.new(reason[0]).exception(*args) + end + ex.set_backtrace Array(ex.backtrace) + caller + ex + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @param [#call(value)] run_test + # an object which when called returns either Future to keep running with + # or nil, then the run completes with the value. + # The run_test can be used to extract the Future from deeper structure, + # or to distinguish Future which is a resulting value from a future + # which is suppose to continue running. + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run(run_test = method(:run_test)) + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor, run_test).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + # @return [String] Short string representation. + def to_s + if resolved? + format '%s with %s>', super[0..-2], (fulfilled? ? value : reason).inspect + else + super + end + end + + alias_method :inspect, :to_s + + private + + def run_test(v) + v if v.is_a?(Future) + end + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + if internal_state == RESERVED + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It is already reserved.") + else + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, + new_result: state.result) + end + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call(*state.result, *args) + end + + end + + # Marker module of Future, Event resolved manually. + module Resolvable + include InternalStates + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + # @!macro raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + # @param [true, false] reserved + # Set to true if the resolvable is {#reserve}d by you, + # marks resolution of reserved resolvable events and futures explicitly. + # Advanced feature, ignore unless you use {Resolvable#reserve} from edge. + def resolve(raise_on_reassign = true, reserved = false) + resolve_with RESOLVED, raise_on_reassign, reserved + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @param [true, false] resolve_on_timeout + # If it times out and the argument is true it will also resolve the event. + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = false) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(false) + else + false + end + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true, reserved = false) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign, reserved) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @param [Object] value + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def fulfill(value, raise_on_reassign = true, reserved = false) + resolve_with Fulfilled.new(value), raise_on_reassign, reserved + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def reject(reason, raise_on_reassign = true, reserved = false) + resolve_with Rejected.new(reason), raise_on_reassign, reserved + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to(*args, block).wait! + end + + # @!macro promises.resolvable.resolve_on_timeout + # @param [::Array(true, Object, nil), ::Array(false, nil, Exception), nil] resolve_on_timeout + # If it times out and the argument is not nil it will also resolve the future + # to the provided resolution. + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(*resolve_on_timeout, false) + else + false + end + end + + # Behaves as {Future#wait!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @raise [Exception] {#reason} on rejection + # @see Future#wait! + def wait!(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + if resolve(*resolve_on_timeout, false) + false + else + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + raise self if rejected? + true + end + else + false + end + end + + # Behaves as {Future#value} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @see Future#value + def value(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#value!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @raise [Exception] {#reason} on rejection + # @see Future#value! + def value!(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved! timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + raise self if rejected? + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#reason} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Exception, timeout_value, nil] + # @see Future#reason + def reason(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.reason + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.reason + end + end + timeout_value + end + end + + # Behaves as {Future#result} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [::Array(Boolean, Object, Exception), nil] + # @see Future#result + def result(timeout = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.result + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + internal_state.result + end + end + # otherwise returns nil + end + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + format '%s %s>', super[0..-2], @Future + end + + alias_method :inspect, :to_s + + def delayed_because + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + resolve_with Rejected.new(error) + raise error unless error.is_a?(StandardError) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + public :evaluate_to + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed_because + promise = new(blocker_delayed, 1, *args, &block) + blocker.add_callback_notify_blocked promise, 0 + promise + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed_because + blocker_delayed2 = blocker2.promise.delayed_because + delayed = if blocker_delayed1 && blocker_delayed2 + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + blocker_delayed1 || blocker_delayed2 + end + promise = new(delayed, 2, *args, &block) + blocker1.add_callback_notify_blocked promise, 0 + blocker2.add_callback_notify_blocked promise, 1 + promise + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } + promise = new(delayed, blockers.size, *args, &block) + blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } + promise + end + + def self.add_delayed(delayed1, delayed2) + if delayed1 && delayed2 + delayed1.push delayed2 + delayed1 + else + delayed1 || delayed2 + end + end + + def initialize(delayed, blockers_count, future) + super(future) + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed_because + @Delayed + end + + def touch + clear_and_propagate_touch + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_and_propagate_touch(stack_or_element = @Delayed) + return if stack_or_element.nil? + + if stack_or_element.is_a? LockFreeStack + stack_or_element.clear_each { |element| clear_and_propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to(*args, task) + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + def initialize(delayed_because, blockers_count, event_or_future) + delayed = LockFreeStack.of1(self) + super(delayed, blockers_count, event_or_future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @DelayedBecause = delayed_because || LockFreeStack.new + + event_or_future.add_callback_clear_delayed_node delayed.peek + end + + def touch + if @Touched.make_true + clear_and_propagate_touch @DelayedBecause + end + end + + private + + def touched? + @Touched.value + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + delayed = future.promise.delayed_because + if touched? + clear_and_propagate_touch delayed + else + BlockedPromise.add_delayed @DelayedBecause, delayed + clear_and_propagate_touch @DelayedBecause if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor, run_test) + super delayed, 1, Future.new(self, default_executor) + @RunTest = run_test + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + continuation_future = @RunTest.call value + + if continuation_future + add_delayed_of continuation_future + continuation_future.add_callback_notify_blocked self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count, nil) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = ::Array.new(@Resolutions.size) + reasons = ::Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, future, index) + future.fulfilled? || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + event = Event.new(self, default_executor) + @Delayed = LockFreeStack.of1(self) + super event + event.add_callback_clear_delayed_node @Delayed.peek + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed_because + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb new file mode 100644 index 0000000..516d58c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/re_include.rb @@ -0,0 +1,58 @@ +module Concurrent + + # Methods form module A included to a module B, which is already included into class C, + # will not be visible in the C class. If this module is extended to B then A's methods + # are correctly made visible to C. + # + # @example + # module A + # def a + # :a + # end + # end + # + # module B1 + # end + # + # class C1 + # include B1 + # end + # + # module B2 + # extend Concurrent::ReInclude + # end + # + # class C2 + # include B2 + # end + # + # B1.send :include, A + # B2.send :include, A + # + # C1.new.respond_to? :a # => false + # C2.new.respond_to? :a # => true + module ReInclude + # @!visibility private + def included(base) + (@re_include_to_bases ||= []) << [:include, base] + super(base) + end + + # @!visibility private + def extended(base) + (@re_include_to_bases ||= []) << [:extend, base] + super(base) + end + + # @!visibility private + def include(*modules) + result = super(*modules) + modules.reverse.each do |module_being_included| + (@re_include_to_bases ||= []).each do |method, mod| + mod.send method, module_being_included + end + end + result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb new file mode 100644 index 0000000..90f78b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/scheduled_task.rb @@ -0,0 +1,318 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/utility/monotonic_time' + +require 'concurrent/options' + +module Concurrent + + # `ScheduledTask` is a close relative of `Concurrent::Future` but with one + # important difference: A `Future` is set to execute as soon as possible + # whereas a `ScheduledTask` is set to execute after a specified delay. This + # implementation is loosely based on Java's + # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). + # It is a more feature-rich variant of {Concurrent.timer}. + # + # The *intended* schedule time of task execution is set on object construction + # with the `delay` argument. The delay is a numeric (floating point or integer) + # representing a number of seconds in the future. Any other value or a numeric + # equal to or less than zero will result in an exception. The *actual* schedule + # time of task execution is set when the `execute` method is called. + # + # The constructor can also be given zero or more processing options. Currently + # the only supported options are those recognized by the + # [Dereferenceable](Dereferenceable) module. + # + # The final constructor argument is a block representing the task to be performed. + # If no block is given an `ArgumentError` will be raised. + # + # **States** + # + # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it + # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` + # has one additional state, however. While the task (block) is being executed the + # state of the object will be `:processing`. This additional state is necessary + # because it has implications for task cancellation. + # + # **Cancellation** + # + # A `:pending` task can be cancelled using the `#cancel` method. A task in any + # other state, including `:processing`, cannot be cancelled. The `#cancel` + # method returns a boolean indicating the success of the cancellation attempt. + # A cancelled `ScheduledTask` cannot be restarted. It is immutable. + # + # **Obligation and Observation** + # + # The result of a `ScheduledTask` can be obtained either synchronously or + # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) + # module and the + # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) + # module from the Ruby standard library. With one exception `ScheduledTask` + # behaves identically to [Future](Observable) with regard to these modules. + # + # @!macro copy_options + # + # @example Basic usage + # + # require 'concurrent' + # require 'thread' # for Queue + # require 'open-uri' # for open(uri) + # + # class Ticker + # def get_year_end_closing(symbol, year) + # uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m" + # data = open(uri) {|f| f.collect{|line| line.strip } } + # data[1].split(',')[4].to_f + # end + # end + # + # # Future + # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) } + # price.state #=> :pending + # sleep(1) # do other stuff + # price.value #=> 63.65 + # price.state #=> :fulfilled + # + # # ScheduledTask + # task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) } + # task.state #=> :pending + # sleep(3) # do other stuff + # task.value #=> 25.96 + # + # @example Successful task execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.state #=> :unscheduled + # task.execute + # task.state #=> pending + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> true + # task.rejected? #=> false + # task.value #=> 'What does the fox say?' + # + # @example One line creation and execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute + # task.state #=> pending + # + # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } + # task.state #=> pending + # + # @example Failed task execution + # + # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> false + # task.rejected? #=> true + # task.value #=> nil + # task.reason #=> # + # + # @example Task execution with observation + # + # observer = Class.new{ + # def update(time, value, reason) + # puts "The task completed at #{time} with value '#{value}'" + # end + # }.new + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.add_observer(observer) + # task.execute + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' + # + # @!macro monotonic_clock_warning + # + # @see Concurrent.timer + class ScheduledTask < IVar + include Comparable + + # The executor on which to execute the task. + # @!visibility private + attr_reader :executor + + # Schedule a task for execution at a specified future time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @yield the task to be performed + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] When no block is given + # @raise [ArgumentError] When given a time that is in the past + def initialize(delay, opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 + + super(NULL, opts, &nil) + + synchronize do + ns_set_state(:unscheduled) + @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) + @args = get_arguments_from(opts) + @delay = delay.to_f + @task = task + @time = nil + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + self.observers = Collection::CopyOnNotifyObserverSet.new + end + end + + # The `delay` value given at instanciation. + # + # @return [Float] the initial delay. + def initial_delay + synchronize { @delay } + end + + # The monotonic time at which the the task is scheduled to be executed. + # + # @return [Float] the schedule time or nil if `unscheduled` + def schedule_time + synchronize { @time } + end + + # Comparator which orders by schedule time. + # + # @!visibility private + def <=>(other) + schedule_time <=> other.schedule_time + end + + # Has the task been cancelled? + # + # @return [Boolean] true if the task is in the given state else false + def cancelled? + synchronize { ns_check_state?(:cancelled) } + end + + # In the task execution in progress? + # + # @return [Boolean] true if the task is in the given state else false + def processing? + synchronize { ns_check_state?(:processing) } + end + + # Cancel this task and prevent it from executing. A task can only be + # cancelled if it is pending or unscheduled. + # + # @return [Boolean] true if successfully cancelled else false + def cancel + if compare_and_set_state(:cancelled, :pending, :unscheduled) + complete(false, nil, CancelledOperationError.new) + # To avoid deadlocks this call must occur outside of #synchronize + # Changing the state above should prevent redundant calls + @parent.send(:remove_task, self) + else + false + end + end + + # Reschedule the task using the original delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @return [Boolean] true if successfully rescheduled else false + def reset + synchronize{ ns_reschedule(@delay) } + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @raise [ArgumentError] When given a time that is in the past + def reschedule(delay) + delay = delay.to_f + raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 + synchronize{ ns_reschedule(delay) } + end + + # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` + # and starts counting down toward execution. Does nothing if the `ScheduledTask` is + # in any state other than `:unscheduled`. + # + # @return [ScheduledTask] a reference to `self` + def execute + if compare_and_set_state(:pending, :unscheduled) + synchronize{ ns_schedule(@delay) } + end + self + end + + # Create a new `ScheduledTask` object with the given block, execute it, and return the + # `:pending` object. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @!macro executor_and_deref_options + # + # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + def self.execute(delay, opts = {}, &task) + new(delay, opts, &task).execute + end + + # Execute the task. + # + # @!visibility private + def process_task + safe_execute(@task, @args) + end + + protected :set, :try_set, :fail, :complete + + protected + + # Schedule the task using the given delay and the current time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_schedule(delay) + @delay = delay + @time = Concurrent.monotonic_time + @delay + @parent.send(:post_task, self) + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_reschedule(delay) + return false unless ns_check_state?(:pending) + @parent.send(:remove_task, self) && ns_schedule(delay) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb new file mode 100644 index 0000000..602d494 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/set.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' +require 'set' + +module Concurrent + + # @!macro concurrent_set + # + # A thread-safe subclass of Set. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` + # which is union of `a` and `b`, then it writes the union to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#merge` instead. + # + # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` + + + # @!macro internal_implementation_note + SetImplementation = case + when Concurrent.on_cruby? + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + ::Set + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubySet < ::Set + include JRuby::Synchronized + end + JRubySet + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxSet < ::Set + end + ThreadSafe::Util.make_synchronized_on_rbx Concurrent::RbxSet + RbxSet + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_truffleruby Concurrent::TruffleRubySet + TruffleRubySet + + else + warn 'Possibly unsupported Ruby implementation' + ::Set + end + private_constant :SetImplementation + + # @!macro concurrent_set + class Set < SetImplementation + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb new file mode 100644 index 0000000..0012352 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/settable_struct.rb @@ -0,0 +1,139 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe, write-once variation of Ruby's standard `Struct`. + # Each member can have its value set at most once, either at construction + # or any time thereafter. Attempting to assign a value to a member + # that has already been set will result in a `Concurrent::ImmutabilityError`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword + module SettableStruct + include Synchronization::AbstractStruct + + # @!macro struct_values + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # @raise [Concurrent::ImmutabilityError] if the given member has already been set + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize do + unless @values[member].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[member] = value + end + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize do + unless @values[index].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[index] = value + end + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb new file mode 100644 index 0000000..49c68eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization.rb @@ -0,0 +1,30 @@ +require 'concurrent/utility/engine' + +require 'concurrent/synchronization/abstract_object' +require 'concurrent/utility/native_extension_loader' # load native parts first +Concurrent.load_native_extensions + +require 'concurrent/synchronization/mri_object' +require 'concurrent/synchronization/jruby_object' +require 'concurrent/synchronization/rbx_object' +require 'concurrent/synchronization/truffleruby_object' +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/volatile' + +require 'concurrent/synchronization/abstract_lockable_object' +require 'concurrent/synchronization/mutex_lockable_object' +require 'concurrent/synchronization/jruby_lockable_object' +require 'concurrent/synchronization/rbx_lockable_object' + +require 'concurrent/synchronization/lockable_object' + +require 'concurrent/synchronization/condition' +require 'concurrent/synchronization/lock' + +module Concurrent + # {include:file:docs-source/synchronization.md} + # {include:file:docs-source/synchronization-notes.md} + module Synchronization + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb new file mode 100644 index 0000000..bc12603 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb @@ -0,0 +1,98 @@ +module Concurrent + module Synchronization + + # @!visibility private + class AbstractLockableObject < Synchronization::Object + + protected + + # @!macro synchronization_object_method_synchronize + # + # @yield runs the block synchronized against this object, + # equivalent of java's `synchronize(this) {}` + # @note can by made public in descendants if required by `public :synchronize` + def synchronize + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_wait_until + # + # Wait until condition is met or timeout passes, + # protects against spurious wake-ups. + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @yield condition to be met + # @yieldreturn [true, false] + # @return [true, false] if condition met + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait_until(timeout = nil, &condition) + # synchronize { ns_wait_until(timeout, &condition) } + # end + # ``` + def ns_wait_until(timeout = nil, &condition) + if timeout + wait_until = Concurrent.monotonic_time + timeout + loop do + now = Concurrent.monotonic_time + condition_result = condition.call + return condition_result if now >= wait_until || condition_result + ns_wait wait_until - now + end + else + ns_wait timeout until condition.call + true + end + end + + # @!macro synchronization_object_method_ns_wait + # + # Wait until another thread calls #signal or #broadcast, + # spurious wake-ups can happen. + # + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait(timeout = nil) + # synchronize { ns_wait(timeout) } + # end + # ``` + def ns_wait(timeout = nil) + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_signal + # + # Signal one waiting thread. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def signal + # synchronize { ns_signal } + # end + # ``` + def ns_signal + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_broadcast + # + # Broadcast to all waiting threads. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def broadcast + # synchronize { ns_broadcast } + # end + # ``` + def ns_broadcast + raise NotImplementedError + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb new file mode 100644 index 0000000..532388b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb @@ -0,0 +1,24 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class AbstractObject + + # @abstract has to be implemented based on Ruby runtime + def initialize + raise NotImplementedError + end + + # @!visibility private + # @abstract + def full_memory_barrier + raise NotImplementedError + end + + def self.attr_volatile(*names) + raise NotImplementedError + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb new file mode 100644 index 0000000..1fe90c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb @@ -0,0 +1,171 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module AbstractStruct + + # @!visibility private + def initialize(*values) + super() + ns_initialize(*values) + end + + # @!macro struct_length + # + # Returns the number of struct members. + # + # @return [Fixnum] the number of struct members + def length + self.class::MEMBERS.length + end + alias_method :size, :length + + # @!macro struct_members + # + # Returns the struct members as an array of symbols. + # + # @return [Array] the struct members as an array of symbols + def members + self.class::MEMBERS.dup + end + + protected + + # @!macro struct_values + # + # @!visibility private + def ns_values + @values.dup + end + + # @!macro struct_values_at + # + # @!visibility private + def ns_values_at(indexes) + @values.values_at(*indexes) + end + + # @!macro struct_to_h + # + # @!visibility private + def ns_to_h + length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} + end + + # @!macro struct_get + # + # @!visibility private + def ns_get(member) + if member.is_a? Integer + if member >= @values.length + raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") + end + @values[member] + else + send(member) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_equality + # + # @!visibility private + def ns_equality(other) + self.class == other.class && self.values == other.values + end + + # @!macro struct_each + # + # @!visibility private + def ns_each + values.each{|value| yield value } + end + + # @!macro struct_each_pair + # + # @!visibility private + def ns_each_pair + @values.length.times do |index| + yield self.class::MEMBERS[index], @values[index] + end + end + + # @!macro struct_select + # + # @!visibility private + def ns_select + values.select{|value| yield value } + end + + # @!macro struct_inspect + # + # @!visibility private + def ns_inspect + struct = pr_underscore(self.class.ancestors[1]) + clazz = ((self.class.to_s =~ /^#" + end + + # @!macro struct_merge + # + # @!visibility private + def ns_merge(other, &block) + self.class.new(*self.to_h.merge(other, &block).values) + end + + # @!visibility private + def ns_initialize_copy + @values = @values.map do |val| + begin + val.clone + rescue TypeError + val + end + end + end + + # @!visibility private + def pr_underscore(clazz) + word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 + word.gsub!(/::/, '/') + word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') + word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # @!visibility private + def self.define_struct_class(parent, base, name, members, &block) + clazz = Class.new(base || Object) do + include parent + self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) + def ns_initialize(*values) + raise ArgumentError.new('struct size differs') if values.length > length + @values = values.fill(nil, values.length..length-1) + end + end + unless name.nil? + begin + parent.send :remove_const, name if parent.const_defined?(name, false) + parent.const_set(name, clazz) + clazz + rescue NameError + raise NameError.new("identifier #{name} needs to be constant") + end + end + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + @values[index] + end + end + clazz.class_exec(&block) unless block.nil? + clazz.singleton_class.send :alias_method, :[], :new + clazz + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb new file mode 100644 index 0000000..f704b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/condition.rb @@ -0,0 +1,60 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Condition < LockableObject + safe_initialization! + + # TODO (pitr 12-Sep-2015): locks two objects, improve + # TODO (pitr 26-Sep-2015): study + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/concurrent/locks/AbstractQueuedSynchronizer.java#AbstractQueuedSynchronizer.Node + + singleton_class.send :alias_method, :private_new, :new + private_class_method :new + + def initialize(lock) + super() + @Lock = lock + end + + def wait(timeout = nil) + @Lock.synchronize { ns_wait(timeout) } + end + + def ns_wait(timeout = nil) + synchronize { super(timeout) } + end + + def wait_until(timeout = nil, &condition) + @Lock.synchronize { ns_wait_until(timeout, &condition) } + end + + def ns_wait_until(timeout = nil, &condition) + synchronize { super(timeout, &condition) } + end + + def signal + @Lock.synchronize { ns_signal } + end + + def ns_signal + synchronize { super } + end + + def broadcast + @Lock.synchronize { ns_broadcast } + end + + def ns_broadcast + synchronize { super } + end + end + + class LockableObject < LockableObjectImplementation + def new_condition + Condition.private_new(self) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb new file mode 100644 index 0000000..359a032 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb @@ -0,0 +1,13 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + # @!macro internal_implementation_note + class JRubyLockableObject < AbstractLockableObject + + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb new file mode 100644 index 0000000..da91ac5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb @@ -0,0 +1,45 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + module JRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + instance_variable_get_volatile(:#{ivar}) + end + + def #{name}=(value) + instance_variable_set_volatile(:#{ivar}, value) + end + RUBY + + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + class JRubyObject < AbstractObject + include JRubyAttrVolatile + + def initialize + # nothing to do + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb new file mode 100644 index 0000000..0dbad2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lock.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Lock < LockableObject + # TODO use JavaReentrantLock on JRuby + + public :synchronize + + def wait(timeout = nil) + synchronize { ns_wait(timeout) } + end + + public :ns_wait + + def wait_until(timeout = nil, &condition) + synchronize { ns_wait_until(timeout, &condition) } + end + + public :ns_wait_until + + def signal + synchronize { ns_signal } + end + + public :ns_signal + + def broadcast + synchronize { ns_broadcast } + end + + public :ns_broadcast + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb new file mode 100644 index 0000000..cdbe4d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb @@ -0,0 +1,74 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + LockableObjectImplementation = case + when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3) + MonitorLockableObject + when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3) + MutexLockableObject + when Concurrent.on_jruby? + JRubyLockableObject + when Concurrent.on_rbx? + RbxLockableObject + when Concurrent.on_truffleruby? + MutexLockableObject + else + warn 'Possibly unsupported Ruby implementation' + MonitorLockableObject + end + private_constant :LockableObjectImplementation + + # Safe synchronization under any Ruby implementation. + # It provides methods like {#synchronize}, {#wait}, {#signal} and {#broadcast}. + # Provides a single layer which can improve its implementation over time without changes needed to + # the classes using it. Use {Synchronization::Object} not this abstract class. + # + # @note this object does not support usage together with + # [`Thread#wakeup`](http://ruby-doc.org/core/Thread.html#method-i-wakeup) + # and [`Thread#raise`](http://ruby-doc.org/core/Thread.html#method-i-raise). + # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and + # `Thread#wakeup` will not work on all platforms. + # + # @see Event implementation as an example of this class use + # + # @example simple + # class AnClass < Synchronization::Object + # def initialize + # super + # synchronize { @value = 'asd' } + # end + # + # def value + # synchronize { @value } + # end + # end + # + # @!visibility private + class LockableObject < LockableObjectImplementation + + # TODO (pitr 12-Sep-2015): make private for c-r, prohibit subclassing + # TODO (pitr 12-Sep-2015): we inherit too much ourselves :/ + + # @!method initialize(*args, &block) + # @!macro synchronization_object_method_initialize + + # @!method synchronize + # @!macro synchronization_object_method_synchronize + + # @!method wait_until(timeout = nil, &condition) + # @!macro synchronization_object_method_ns_wait_until + + # @!method wait(timeout = nil) + # @!macro synchronization_object_method_ns_wait + + # @!method signal + # @!macro synchronization_object_method_ns_signal + + # @!method broadcast + # @!macro synchronization_object_method_ns_broadcast + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb new file mode 100644 index 0000000..4b1d6c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb @@ -0,0 +1,44 @@ +module Concurrent + module Synchronization + + # @!visibility private + module MriAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + # relying on undocumented behavior of CRuby, GVL acquire has lock which ensures visibility of ivars + # https://github.com/ruby/ruby/blob/ruby_2_2/thread_pthread.c#L204-L211 + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MriObject < AbstractObject + include MriAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb new file mode 100644 index 0000000..f288c51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb @@ -0,0 +1,76 @@ +module Concurrent + # noinspection RubyInstanceVariableNamingConvention + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module ConditionSignalling + protected + + def ns_signal + @__Condition__.signal + self + end + + def ns_broadcast + @__Condition__.broadcast + self + end + end + + + # @!visibility private + # @!macro internal_implementation_note + class MutexLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + protected + + def synchronize + if @__Lock__.owned? + yield + else + @__Lock__.synchronize { yield } + end + end + + def ns_wait(timeout = nil) + @__Condition__.wait @__Lock__, timeout + self + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MonitorLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + protected + + def synchronize # TODO may be a problem with lock.synchronize { lock.wait } + @__Lock__.synchronize { yield } + end + + def ns_wait(timeout = nil) + @__Condition__.wait timeout + self + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb new file mode 100644 index 0000000..0e62112 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/object.rb @@ -0,0 +1,183 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + ObjectImplementation = case + when Concurrent.on_cruby? + MriObject + when Concurrent.on_jruby? + JRubyObject + when Concurrent.on_rbx? + RbxObject + when Concurrent.on_truffleruby? + TruffleRubyObject + else + warn 'Possibly unsupported Ruby implementation' + MriObject + end + private_constant :ObjectImplementation + + # Abstract object providing final, volatile, ans CAS extensions to build other concurrent abstractions. + # - final instance variables see {Object.safe_initialization!} + # - volatile instance variables see {Object.attr_volatile} + # - volatile instance variables see {Object.attr_atomic} + class Object < ObjectImplementation + # TODO make it a module if possible + + # @!method self.attr_volatile(*names) + # Creates methods for reading and writing (as `attr_accessor` does) to a instance variable with + # volatile (Java) semantic. The instance variable should be accessed only through generated methods. + # + # @param [::Array] names of the instance variables to be volatile + # @return [::Array] names of defined method names + + # Has to be called by children. + def initialize + super + __initialize_atomic_fields__ + end + + # By calling this method on a class, it and all its children are marked to be constructed safely. Meaning that + # all writes (ivar initializations) are made visible to all readers of newly constructed object. It ensures + # same behaviour as Java's final fields. + # @example + # class AClass < Concurrent::Synchronization::Object + # safe_initialization! + # + # def initialize + # @AFinalValue = 'value' # published safely, does not have to be synchronized + # end + # end + # @return [true] + def self.safe_initialization! + # define only once, and not again in children + return if safe_initialization? + + # @!visibility private + def self.new(*args, &block) + object = super(*args, &block) + ensure + object.full_memory_barrier if object + end + + @safe_initialization = true + end + + # @return [true, false] if this class is safely initialized. + def self.safe_initialization? + @safe_initialization = false unless defined? @safe_initialization + @safe_initialization || (superclass.respond_to?(:safe_initialization?) && superclass.safe_initialization?) + end + + # For testing purposes, quite slow. Injects assert code to new method which will raise if class instance contains + # any instance variables with CamelCase names and isn't {.safe_initialization?}. + # @raise when offend found + # @return [true] + def self.ensure_safe_initialization_when_final_fields_are_present + Object.class_eval do + def self.new(*args, &block) + object = super(*args, &block) + ensure + has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } + if has_final_field && !safe_initialization? + raise "there was an instance of #{object.class} with final field but not marked with safe_initialization!" + end + end + end + true + end + + # Creates methods for reading and writing to a instance variable with + # volatile (Java) semantic as {.attr_volatile} does. + # The instance variable should be accessed oly through generated methods. + # This method generates following methods: `value`, `value=(new_value) #=> new_value`, + # `swap_value(new_value) #=> old_value`, + # `compare_and_set_value(expected, value) #=> true || false`, `update_value(&block)`. + # @param [::Array] names of the instance variables to be volatile with CAS. + # @return [::Array] names of defined method names. + # @!macro attr_atomic + # @!method $1 + # @return [Object] The $1. + # @!method $1=(new_$1) + # Set the $1. + # @return [Object] new_$1. + # @!method swap_$1(new_$1) + # Set the $1 to new_$1 and return the old $1. + # @return [Object] old $1 + # @!method compare_and_set_$1(expected_$1, new_$1) + # Sets the $1 to new_$1 if the current $1 is expected_$1 + # @return [true, false] + # @!method update_$1(&block) + # Updates the $1 using the block. + # @yield [Object] Calculate a new $1 using given (old) $1 + # @yieldparam [Object] old $1 + # @return [Object] new $1 + def self.attr_atomic(*names) + @__atomic_fields__ ||= [] + @__atomic_fields__ += names + safe_initialization! + define_initialize_atomic_fields + + names.each do |name| + ivar = :"@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar}.get + end + + def #{name}=(value) + #{ivar}.set value + end + + def swap_#{name}(value) + #{ivar}.swap value + end + + def compare_and_set_#{name}(expected, value) + #{ivar}.compare_and_set expected, value + end + + def update_#{name}(&block) + #{ivar}.update(&block) + end + RUBY + end + names.flat_map { |n| [n, :"#{n}=", :"swap_#{n}", :"compare_and_set_#{n}", :"update_#{n}"] } + end + + # @param [true, false] inherited should inherited volatile with CAS fields be returned? + # @return [::Array] Returns defined volatile with CAS fields on this class. + def self.atomic_attributes(inherited = true) + @__atomic_fields__ ||= [] + ((superclass.atomic_attributes if superclass.respond_to?(:atomic_attributes) && inherited) || []) + @__atomic_fields__ + end + + # @return [true, false] is the attribute with name atomic? + def self.atomic_attribute?(name) + atomic_attributes.include? name + end + + private + + def self.define_initialize_atomic_fields + assignments = @__atomic_fields__.map do |name| + "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" + end.join("\n") + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def __initialize_atomic_fields__ + super + #{assignments} + end + RUBY + end + + private_class_method :define_initialize_atomic_fields + + def __initialize_atomic_fields__ + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb new file mode 100644 index 0000000..8dbd3c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb @@ -0,0 +1,65 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class RbxLockableObject < AbstractLockableObject + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Waiters__ = [] + @__owner__ = nil + end + + protected + + def synchronize(&block) + if @__owner__ == Thread.current + yield + else + result = nil + Rubinius.synchronize(self) do + begin + @__owner__ = Thread.current + result = yield + ensure + @__owner__ = nil + end + end + result + end + end + + def ns_wait(timeout = nil) + wchan = Rubinius::Channel.new + + begin + @__Waiters__.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout timeout + ensure + Rubinius.lock(self) + + if !signaled && !@__Waiters__.delete(wchan) + # we timed out, but got signaled afterwards, + # so pass that signal on to the next waiter + @__Waiters__.shift << true unless @__Waiters__.empty? + end + end + + self + end + + def ns_signal + @__Waiters__.shift << true unless @__Waiters__.empty? + self + end + + def ns_broadcast + @__Waiters__.shift << true until @__Waiters__.empty? + self + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb new file mode 100644 index 0000000..4b23f2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb @@ -0,0 +1,49 @@ +module Concurrent + module Synchronization + + # @!visibility private + module RbxAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + Rubinius.memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + Rubinius.memory_barrier + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + + end + + def full_memory_barrier + # Rubinius instance variables are not volatile so we need to insert barrier + # TODO (pitr 26-Nov-2015): check comments like ^ + Rubinius.memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class RbxObject < AbstractObject + include RbxAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb new file mode 100644 index 0000000..3919c76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb @@ -0,0 +1,47 @@ +module Concurrent + module Synchronization + + # @!visibility private + module TruffleRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + full_memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + full_memory_barrier + end + RUBY + end + + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + TruffleRuby.full_memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class TruffleRubyObject < AbstractObject + include TruffleRubyAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb new file mode 100644 index 0000000..9dffa91 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/synchronization/volatile.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # Volatile adds the attr_volatile class method when included. + # + # @example + # class Foo + # include Concurrent::Synchronization::Volatile + # + # attr_volatile :bar + # + # def initialize + # self.bar = 1 + # end + # end + # + # foo = Foo.new + # foo.bar + # => 1 + # foo.bar = 2 + # => 2 + + Volatile = case + when Concurrent.on_cruby? + MriAttrVolatile + when Concurrent.on_jruby? + JRubyAttrVolatile + when Concurrent.on_rbx? + RbxAttrVolatile + when Concurrent.on_truffleruby? + TruffleRubyAttrVolatile + else + MriAttrVolatile + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..92e7c45 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb @@ -0,0 +1,50 @@ +require 'delegate' +require 'monitor' + +module Concurrent + unless defined?(SynchronizedDelegator) + + # This class provides a trivial way to synchronize all calls to a given object + # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls + # around the delegated `#send`. Example: + # + # array = [] # not thread-safe on many impls + # array = SynchronizedDelegator.new([]) # thread-safe + # + # A simple `Monitor` provides a very coarse-grained way to synchronize a given + # object, in that it will cause synchronization for methods that have no need + # for it, but this is a trivial way to get thread-safety where none may exist + # currently on some implementations. + # + # This class is currently being considered for inclusion into stdlib, via + # https://bugs.ruby-lang.org/issues/8556 + # + # @!visibility private + class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb new file mode 100644 index 0000000..c67084a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util.rb @@ -0,0 +1,16 @@ +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter + CPU_COUNT = 16 # is there a way to determine this? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb new file mode 100644 index 0000000..7a6e8d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb @@ -0,0 +1,74 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/striped64' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + # + # @!visibility private + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..d9b4c58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,118 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/volatile' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + # + # @!visibility private + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb new file mode 100644 index 0000000..ff1e8ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb @@ -0,0 +1,63 @@ +require 'concurrent/thread_safe/util' + +# Shim for TruffleRuby.synchronized +if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) + module TruffleRuby + def self.synchronized(object, &block) + Truffle::System.synchronized(object, &block) + end + end +end + +module Concurrent + module ThreadSafe + module Util + def self.make_synchronized_on_rbx(klass) + klass.class_eval do + private + + def _mon_initialize + @_monitor = Monitor.new unless @_monitor # avoid double initialisation + end + + def self.new(*args) + obj = super(*args) + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + case method + when :new_range, :new_reserved + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + obj = super + obj.send(:_mon_initialize) + obj + end + RUBY + else + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + end + + def self.make_synchronized_on_truffleruby(klass) + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args, &block) + TruffleRuby.synchronized(self) { super(*args, &block) } + end + RUBY + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..b54be39 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,38 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/tuple' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + class PowerOfTwoTuple < Concurrent::Tuple + + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb new file mode 100644 index 0000000..4169c3d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb @@ -0,0 +1,246 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + # + # @!visibility private + class Striped64 + + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + # + # @!visibility private + class Cell < Concurrent::AtomicReference + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + + # @!visibility private + def self.padding + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created + # hide from yardoc in a method + attr_reader :padding_0, :padding_1, :padding_2, :padding_3, :padding_4, :padding_5, :padding_6, :padding_7, :padding_8, :padding_9, :padding_10, :padding_11 + end + padding + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb new file mode 100644 index 0000000..cdac2a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb @@ -0,0 +1,75 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + module Volatile + + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of +Concurrent::AtomicReference+. + # + # Usage: + # class Foo + # extend Concurrent::ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..bdde2dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,50 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb new file mode 100644 index 0000000..a0b7233 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/timer_task.rb @@ -0,0 +1,333 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/dereferenceable' +require 'concurrent/concern/observable' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/scheduled_task' + +module Concurrent + + # A very common concurrency pattern is to run a thread that performs a task at + # regular intervals. The thread that performs the task sleeps for the given + # interval then wakes up and performs the task. Lather, rinse, repeat... This + # pattern causes two problems. First, it is difficult to test the business + # logic of the task because the task itself is tightly coupled with the + # concurrency logic. Second, an exception raised while performing the task can + # cause the entire thread to abend. In a long-running application where the + # task thread is intended to run for days/weeks/years a crashed task thread + # can pose a significant problem. `TimerTask` alleviates both problems. + # + # When a `TimerTask` is launched it starts a thread for monitoring the + # execution interval. The `TimerTask` thread does not perform the task, + # however. Instead, the TimerTask launches the task on a separate thread. + # Should the task experience an unrecoverable crash only the task thread will + # crash. This makes the `TimerTask` very fault tolerant. Additionally, the + # `TimerTask` thread can respond to the success or failure of the task, + # performing logging or ancillary operations. `TimerTask` can also be + # configured with a timeout value allowing it to kill a task that runs too + # long. + # + # One other advantage of `TimerTask` is that it forces the business logic to + # be completely decoupled from the concurrency logic. The business logic can + # be tested separately then passed to the `TimerTask` for scheduling and + # running. + # + # In some cases it may be necessary for a `TimerTask` to affect its own + # execution cycle. To facilitate this, a reference to the TimerTask instance + # is passed as an argument to the provided block every time the task is + # executed. + # + # The `TimerTask` class includes the `Dereferenceable` mixin module so the + # result of the last execution is always available via the `#value` method. + # Dereferencing options can be passed to the `TimerTask` during construction or + # at any later time using the `#set_deref_options` method. + # + # `TimerTask` supports notification through the Ruby standard library + # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # Observable} module. On execution the `TimerTask` will notify the observers + # with three arguments: time of execution, the result of the block (or nil on + # failure), and any raised exceptions (or nil on success). If the timeout + # interval is exceeded the observer will receive a `Concurrent::TimeoutError` + # object as the third argument. + # + # @!macro copy_options + # + # @example Basic usage + # task = Concurrent::TimerTask.new{ puts 'Boom!' } + # task.execute + # + # task.execution_interval #=> 60 (default) + # task.timeout_interval #=> 30 (default) + # + # # wait 60 seconds... + # #=> 'Boom!' + # + # task.shutdown #=> true + # + # @example Configuring `:execution_interval` and `:timeout_interval` + # task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do + # puts 'Boom!' + # end + # + # task.execution_interval #=> 5 + # task.timeout_interval #=> 5 + # + # @example Immediate execution with `:run_now` + # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } + # task.execute + # + # #=> 'Boom!' + # + # @example Last `#value` and `Dereferenceable` mixin + # task = Concurrent::TimerTask.new( + # dup_on_deref: true, + # execution_interval: 5 + # ){ Time.now } + # + # task.execute + # Time.now #=> 2013-11-07 18:06:50 -0500 + # sleep(10) + # task.value #=> 2013-11-07 18:06:55 -0500 + # + # @example Controlling execution from within the block + # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| + # task.execution_interval.times{ print 'Boom! ' } + # print "\n" + # task.execution_interval += 1 + # if task.execution_interval > 5 + # puts 'Stopping...' + # task.shutdown + # end + # end + # + # timer_task.execute # blocking call - this task will stop itself + # #=> Boom! + # #=> Boom! Boom! + # #=> Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! Boom! + # #=> Stopping... + # + # @example Observation + # class TaskObserver + # def update(time, result, ex) + # if result + # print "(#{time}) Execution successfully returned #{result}\n" + # elsif ex.is_a?(Concurrent::TimeoutError) + # print "(#{time}) Execution timed out\n" + # else + # print "(#{time}) Execution failed with error #{ex}\n" + # end + # end + # end + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 } + # task.add_observer(TaskObserver.new) + # task.execute + # sleep 4 + # + # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:07:25 -0400) Execution timed out + # #=> (2013-10-13 19:07:27 -0400) Execution timed out + # #=> (2013-10-13 19:07:29 -0400) Execution timed out + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError + # task.shutdown + # + # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html + class TimerTask < RubyExecutorService + include Concern::Dereferenceable + include Concern::Observable + + # Default `:execution_interval` in seconds. + EXECUTION_INTERVAL = 60 + + # Default `:timeout_interval` in seconds. + TIMEOUT_INTERVAL = 30 + + # Create a new TimerTask with the given task and configuration. + # + # @!macro timer_task_initialize + # @param [Hash] opts the options defining task execution. + # @option opts [Integer] :execution_interval number of seconds between + # task executions (default: EXECUTION_INTERVAL) + # @option opts [Integer] :timeout_interval number of seconds a task can + # run before it is considered to have failed (default: TIMEOUT_INTERVAL) + # @option opts [Boolean] :run_now Whether to run the task immediately + # upon instantiation or to wait until the first # execution_interval + # has passed (default: false) + # + # @!macro deref_options + # + # @raise ArgumentError when no block is given. + # + # @yield to the block after :execution_interval seconds have passed since + # the last yield + # @yieldparam task a reference to the `TimerTask` instance so that the + # block can control its own lifecycle. Necessary since `self` will + # refer to the execution context of the block rather than the running + # `TimerTask`. + # + # @return [TimerTask] the new `TimerTask` + def initialize(opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + super + set_deref_options opts + end + + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + def running? + @running.true? + end + + # Execute a previously created `TimerTask`. + # + # @return [TimerTask] a reference to `self` + # + # @example Instance and execute in separate steps + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> false + # task.execute + # task.running? #=> true + # + # @example Instance and execute in one line + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute + # task.running? #=> true + def execute + synchronize do + if @running.false? + @running.make_true + schedule_next_task(@run_now ? 0 : @execution_interval) + end + end + self + end + + # Create and execute a new `TimerTask`. + # + # @!macro timer_task_initialize + # + # @example + # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> true + def self.execute(opts = {}, &task) + TimerTask.new(opts, &task).execute + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval + synchronize { @execution_interval } + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @execution_interval = value } + end + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval + synchronize { @timeout_interval } + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @timeout_interval = value } + end + end + + private :post, :<< + + private + + def ns_initialize(opts, &task) + set_deref_options(opts) + + self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL + self.timeout_interval = opts[:timeout] || opts[:timeout_interval] || TIMEOUT_INTERVAL + @run_now = opts[:now] || opts[:run_now] + @executor = Concurrent::SafeTaskExecutor.new(task) + @running = Concurrent::AtomicBoolean.new(false) + @value = nil + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + # @!visibility private + def ns_shutdown_execution + @running.make_false + super + end + + # @!visibility private + def ns_kill_execution + @running.make_false + super + end + + # @!visibility private + def schedule_next_task(interval = execution_interval) + ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) + nil + end + + # @!visibility private + def execute_task(completion) + return nil unless @running.true? + ScheduledTask.execute(timeout_interval, args: [completion], &method(:timeout_task)) + _success, value, reason = @executor.execute(self) + if completion.try? + self.value = value + schedule_next_task + time = Time.now + observers.notify_observers do + [time, self.value, reason] + end + end + nil + end + + # @!visibility private + def timeout_task(completion) + return unless @running.true? + if completion.try? + schedule_next_task + observers.notify_observers(Time.now, nil, Concurrent::TimeoutError.new) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb new file mode 100644 index 0000000..f8c4c25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tuple.rb @@ -0,0 +1,86 @@ +require 'concurrent/atomic/atomic_reference' + +module Concurrent + + # A fixed size array with volatile (synchronized, thread safe) getters/setters. + # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. + # + # @example + # tuple = Concurrent::Tuple.new(16) + # + # tuple.set(0, :foo) #=> :foo | volatile write + # tuple.get(0) #=> :foo | volatile read + # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS + # tuple.cas(0, :foo, :baz) #=> false | strong CAS + # tuple.get(0) #=> :bar | volatile read + # + # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia + # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple + # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable + class Tuple + include Enumerable + + # The (fixed) size of the tuple. + attr_reader :size + + # @!visibility private + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : ::Array + private_constant :Tuple + + # Create a new tuple of the given size. + # + # @param [Integer] size the number of elements in the tuple + def initialize(size) + @size = size + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = Concurrent::AtomicReference.new + i += 1 + end + end + + # Get the value of the element at the given index. + # + # @param [Integer] i the index from which to retrieve the value + # @return [Object] the value at the given index or nil if the index is out of bounds + def get(i) + return nil if i >= @size || i < 0 + @tuple[i].get + end + alias_method :volatile_get, :get + + # Set the element at the given index to the given value + # + # @param [Integer] i the index for the element to set + # @param [Object] value the value to set at the given index + # + # @return [Object] the new value of the element at the given index or nil if the index is out of bounds + def set(i, value) + return nil if i >= @size || i < 0 + @tuple[i].set(value) + end + alias_method :volatile_set, :set + + # Set the value at the given index to the new value if and only if the current + # value matches the given old value. + # + # @param [Integer] i the index for the element to set + # @param [Object] old_value the value to compare against the current value + # @param [Object] new_value the value to set at the given index + # + # @return [Boolean] true if the value at the given element was set else false + def compare_and_set(i, old_value, new_value) + return false if i >= @size || i < 0 + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + # Calls the given block once for each element in self, passing that element as a parameter. + # + # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index + def each + @tuple.each {|ref| yield ref.get} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb new file mode 100644 index 0000000..09138c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/tvar.rb @@ -0,0 +1,258 @@ +require 'set' +require 'concurrent/synchronization' + +module Concurrent + + # A `TVar` is a transactional variable - a single-element container that + # is used as part of a transaction - see `Concurrent::atomically`. + # + # @!macro thread_safe_variable_comparison + # + # {include:file:docs-source/tvar.md} + class TVar < Synchronization::Object + safe_initialization! + + # Create a new `TVar` with an initial value. + def initialize(value) + @value = value + @version = 0 + @lock = Mutex.new + end + + # Get the value of a `TVar`. + def value + Concurrent::atomically do + Transaction::current.read(self) + end + end + + # Set the value of a `TVar`. + def value=(value) + Concurrent::atomically do + Transaction::current.write(self, value) + end + end + + # @!visibility private + def unsafe_value # :nodoc: + @value + end + + # @!visibility private + def unsafe_value=(value) # :nodoc: + @value = value + end + + # @!visibility private + def unsafe_version # :nodoc: + @version + end + + # @!visibility private + def unsafe_increment_version # :nodoc: + @version += 1 + end + + # @!visibility private + def unsafe_lock # :nodoc: + @lock + end + + end + + # Run a block that reads and writes `TVar`s as a single atomic transaction. + # With respect to the value of `TVar` objects, the transaction is atomic, in + # that it either happens or it does not, consistent, in that the `TVar` + # objects involved will never enter an illegal state, and isolated, in that + # transactions never interfere with each other. You may recognise these + # properties from database transactions. + # + # There are some very important and unusual semantics that you must be aware of: + # + # * Most importantly, the block that you pass to atomically may be executed + # more than once. In most cases your code should be free of + # side-effects, except for via TVar. + # + # * If an exception escapes an atomically block it will abort the transaction. + # + # * It is undefined behaviour to use callcc or Fiber with atomically. + # + # * If you create a new thread within an atomically, it will not be part of + # the transaction. Creating a thread counts as a side-effect. + # + # Transactions within transactions are flattened to a single transaction. + # + # @example + # a = new TVar(100_000) + # b = new TVar(100) + # + # Concurrent::atomically do + # a.value -= 10 + # b.value += 10 + # end + def atomically + raise ArgumentError.new('no block given') unless block_given? + + # Get the current transaction + + transaction = Transaction::current + + # Are we not already in a transaction (not nested)? + + if transaction.nil? + # New transaction + + begin + # Retry loop + + loop do + + # Create a new transaction + + transaction = Transaction.new + Transaction::current = transaction + + # Run the block, aborting on exceptions + + begin + result = yield + rescue Transaction::AbortError => e + transaction.abort + result = Transaction::ABORTED + rescue Transaction::LeaveError => e + transaction.abort + break result + rescue => e + transaction.abort + raise e + end + # If we can commit, break out of the loop + + if result != Transaction::ABORTED + if transaction.commit + break result + end + end + end + ensure + # Clear the current transaction + + Transaction::current = nil + end + else + # Nested transaction - flatten it and just run the block + + yield + end + end + + # Abort a currently running transaction - see `Concurrent::atomically`. + def abort_transaction + raise Transaction::AbortError.new + end + + # Leave a transaction without committing or aborting - see `Concurrent::atomically`. + def leave_transaction + raise Transaction::LeaveError.new + end + + module_function :atomically, :abort_transaction, :leave_transaction + + private + + class Transaction + + ABORTED = ::Object.new + + ReadLogEntry = Struct.new(:tvar, :version) + + AbortError = Class.new(StandardError) + LeaveError = Class.new(StandardError) + + def initialize + @read_log = [] + @write_log = {} + end + + def read(tvar) + Concurrent::abort_transaction unless valid? + + if @write_log.has_key? tvar + @write_log[tvar] + else + @read_log.push(ReadLogEntry.new(tvar, tvar.unsafe_version)) + tvar.unsafe_value + end + end + + def write(tvar, value) + # Have we already written to this TVar? + + unless @write_log.has_key? tvar + # Try to lock the TVar + + unless tvar.unsafe_lock.try_lock + # Someone else is writing to this TVar - abort + Concurrent::abort_transaction + end + + # If we previously wrote to it, check the version hasn't changed + + @read_log.each do |log_entry| + if log_entry.tvar == tvar and tvar.unsafe_version > log_entry.version + Concurrent::abort_transaction + end + end + end + + # Record the value written + + @write_log[tvar] = value + end + + def abort + unlock + end + + def commit + return false unless valid? + + @write_log.each_pair do |tvar, value| + tvar.unsafe_value = value + tvar.unsafe_increment_version + end + + unlock + + true + end + + def valid? + @read_log.each do |log_entry| + unless @write_log.has_key? log_entry.tvar + if log_entry.tvar.unsafe_version > log_entry.version + return false + end + end + end + + true + end + + def unlock + @write_log.each_key do |tvar| + tvar.unsafe_lock.unlock + end + end + + def self.current + Thread.current[:current_tvar_transaction] + end + + def self.current=(transaction) + Thread.current[:current_tvar_transaction] = transaction + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb new file mode 100644 index 0000000..bc4173e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/engine.rb @@ -0,0 +1,56 @@ +module Concurrent + module Utility + + # @!visibility private + module EngineDetector + def on_jruby? + ruby_engine == 'jruby' + end + + def on_jruby_9000? + on_jruby? && ruby_version(JRUBY_VERSION, :>=, 9, 0, 0) + end + + def on_cruby? + ruby_engine == 'ruby' + end + + def on_rbx? + ruby_engine == 'rbx' + end + + def on_truffleruby? + ruby_engine == 'truffleruby' + end + + def on_windows? + !(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil? + end + + def on_osx? + !(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil? + end + + def on_linux? + !(RbConfig::CONFIG['host_os'] =~ /linux/).nil? + end + + def ruby_engine + defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' + end + + def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) + result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) + comparisons = { :== => [0], + :>= => [1, 0], + :<= => [-1, 0], + :> => [1], + :< => [-1] } + comparisons.fetch(comparison).include? result + end + end + end + + # @!visibility private + extend Utility::EngineDetector +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb new file mode 100644 index 0000000..c9f4b36 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb @@ -0,0 +1,58 @@ +require 'concurrent/synchronization' + +module Concurrent + + class_definition = Class.new(Synchronization::LockableObject) do + def initialize + @last_time = Time.now.to_f + super() + end + + if defined?(Process::CLOCK_MONOTONIC) + # @!visibility private + def get_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + elsif Concurrent.on_jruby? + # @!visibility private + def get_time + java.lang.System.nanoTime() / 1_000_000_000.0 + end + else + + # @!visibility private + def get_time + synchronize do + now = Time.now.to_f + if @last_time < now + @last_time = now + else # clock has moved back in time + @last_time += 0.000_001 + end + end + end + + end + end + + # Clock that cannot be set and represents monotonic time since + # some unspecified starting point. + # + # @!visibility private + GLOBAL_MONOTONIC_CLOCK = class_definition.new + private_constant :GLOBAL_MONOTONIC_CLOCK + + # @!macro monotonic_get_time + # + # Returns the current time a tracked by the application monotonic clock. + # + # @return [Float] The current monotonic time since some unspecified + # starting point + # + # @!macro monotonic_clock_warning + def monotonic_time + GLOBAL_MONOTONIC_CLOCK.get_time + end + + module_function :monotonic_time +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb new file mode 100644 index 0000000..a944bd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb @@ -0,0 +1,79 @@ +require 'concurrent/utility/engine' + +module Concurrent + + module Utility + + # @!visibility private + module NativeExtensionLoader + + def allow_c_extensions? + Concurrent.on_cruby? + end + + def c_extensions_loaded? + defined?(@c_extensions_loaded) && @c_extensions_loaded + end + + def java_extensions_loaded? + defined?(@java_extensions_loaded) && @java_extensions_loaded + end + + def load_native_extensions + unless defined? Synchronization::AbstractObject + raise 'native_extension_loader loaded before Synchronization::AbstractObject' + end + + if Concurrent.on_cruby? && !c_extensions_loaded? + ['concurrent/concurrent_ruby_ext', + "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" + ].each { |p| try_load_c_extension p } + end + + if Concurrent.on_jruby? && !java_extensions_loaded? + begin + require 'concurrent/concurrent_ruby.jar' + set_java_extensions_loaded + rescue LoadError => e + raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace + end + end + end + + private + + def load_error_path(error) + if error.respond_to? :path + error.path + else + error.message.split(' -- ').last + end + end + + def set_c_extensions_loaded + @c_extensions_loaded = true + end + + def set_java_extensions_loaded + @java_extensions_loaded = true + end + + def try_load_c_extension(path) + require path + set_c_extensions_loaded + rescue LoadError => e + if load_error_path(e) == path + # move on with pure-Ruby implementations + # TODO (pitr-ch 12-Jul-2018): warning on verbose? + else + raise e + end + end + + end + end + + # @!visibility private + extend Utility::NativeExtensionLoader +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb new file mode 100644 index 0000000..10719e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/native_integer.rb @@ -0,0 +1,53 @@ +module Concurrent + module Utility + # @private + module NativeInteger + # http://stackoverflow.com/questions/535721/ruby-max-integer + MIN_VALUE = -(2**(0.size * 8 - 2)) + MAX_VALUE = (2**(0.size * 8 - 2) - 1) + + def ensure_upper_bound(value) + if value > MAX_VALUE + raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") + end + value + end + + def ensure_lower_bound(value) + if value < MIN_VALUE + raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") + end + value + end + + def ensure_integer(value) + unless value.is_a?(Integer) + raise ArgumentError.new("#{value} is not an Integer") + end + value + end + + def ensure_integer_and_bounds(value) + ensure_integer value + ensure_upper_bound value + ensure_lower_bound value + end + + def ensure_positive(value) + if value < 0 + raise ArgumentError.new("#{value} cannot be negative") + end + value + end + + def ensure_positive_and_no_zero(value) + if value < 1 + raise ArgumentError.new("#{value} cannot be negative or zero") + end + value + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb new file mode 100644 index 0000000..531ca0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/utility/processor_counter.rb @@ -0,0 +1,163 @@ +require 'etc' +require 'rbconfig' +require 'concurrent/delay' + +module Concurrent + module Utility + + # @!visibility private + class ProcessorCounter + def initialize + @processor_count = Delay.new { compute_processor_count } + @physical_processor_count = Delay.new { compute_physical_processor_count } + end + + # Number of processors seen by the OS and used for process scheduling. For + # performance reasons the calculated value will be memoized on the first + # call. + # + # When running under JRuby the Java runtime call + # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According + # to the Java documentation this "value may change during a particular + # invocation of the virtual machine... [applications] should therefore + # occasionally poll this property." Subsequently the result will NOT be + # memoized under JRuby. + # + # Ruby's Etc.nprocessors will be used if available (MRI 2.2+). + # + # On Windows the Win32 API will be queried for the + # `NumberOfLogicalProcessors from Win32_Processor`. This will return the + # total number "logical processors for the current instance of the + # processor", which taked into account hyperthreading. + # + # * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev + # * Alpha: /usr/bin/nproc (/proc/cpuinfo exists but cannot be used) + # * BSD: /sbin/sysctl + # * Cygwin: /proc/cpuinfo + # * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl + # * HP-UX: /usr/sbin/ioscan + # * IRIX: /usr/sbin/sysconf + # * Linux: /proc/cpuinfo + # * Minix 3+: /proc/cpuinfo + # * Solaris: /usr/sbin/psrinfo + # * Tru64 UNIX: /usr/sbin/psrinfo + # * UnixWare: /usr/sbin/psrinfo + # + # @return [Integer] number of processors seen by the OS or Java runtime + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + def processor_count + @processor_count.value + end + + # Number of physical processor cores on the current system. For performance + # reasons the calculated value will be memoized on the first call. + # + # On Windows the Win32 API will be queried for the `NumberOfCores from + # Win32_Processor`. This will return the total number "of cores for the + # current instance of the processor." On Unix-like operating systems either + # the `hwprefs` or `sysctl` utility will be called in a subshell and the + # returned value will be used. In the rare case where none of these methods + # work or an exception is raised the function will simply return 1. + # + # @return [Integer] number physical processor cores on the current system + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + # @see http://www.unix.com/man-page/osx/1/HWPREFS/ + # @see http://linux.die.net/man/8/sysctl + def physical_processor_count + @physical_processor_count.value + end + + private + + def compute_processor_count + if Concurrent.on_jruby? + java.lang.Runtime.getRuntime.availableProcessors + elsif Etc.respond_to?(:nprocessors) && (nprocessor = Etc.nprocessors rescue nil) + nprocessor + else + os_name = RbConfig::CONFIG["target_os"] + if os_name =~ /mingw|mswin/ + require 'win32ole' + result = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfLogicalProcessors from Win32_Processor") + result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+) + elsif File.readable?("/proc/cpuinfo") && (cpuinfo_count = IO.read("/proc/cpuinfo").scan(/^processor/).size) > 0 + cpuinfo_count + elsif File.executable?("/usr/bin/nproc") + IO.popen("/usr/bin/nproc --all", &:read).to_i + elsif File.executable?("/usr/bin/hwprefs") + IO.popen("/usr/bin/hwprefs thread_count", &:read).to_i + elsif File.executable?("/usr/sbin/psrinfo") + IO.popen("/usr/sbin/psrinfo", &:read).scan(/^.*on-*line/).size + elsif File.executable?("/usr/sbin/ioscan") + IO.popen("/usr/sbin/ioscan -kC processor", &:read).scan(/^.*processor/).size + elsif File.executable?("/usr/sbin/pmcycles") + IO.popen("/usr/sbin/pmcycles -m", &:read).count("\n") + elsif File.executable?("/usr/sbin/lsdev") + IO.popen("/usr/sbin/lsdev -Cc processor -S 1", &:read).count("\n") + elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i + IO.popen("/usr/sbin/sysconf NPROC_ONLN", &:read).to_i + elsif File.executable?("/usr/sbin/sysctl") + IO.popen("/usr/sbin/sysctl -n hw.ncpu", &:read).to_i + elsif File.executable?("/sbin/sysctl") + IO.popen("/sbin/sysctl -n hw.ncpu", &:read).to_i + else + # TODO (pitr-ch 05-Nov-2016): warn about failures + 1 + end + end + rescue + return 1 + end + + def compute_physical_processor_count + ppc = case RbConfig::CONFIG["target_os"] + when /darwin1/ + IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i + when /linux/ + cores = {} # unique physical ID / core ID combinations + phy = 0 + IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| + if ln.start_with?("physical") + phy = ln[/\d+/] + elsif ln.start_with?("core") + cid = phy + ":" + ln[/\d+/] + cores[cid] = true if not cores[cid] + end + end + cores.count + when /mswin|mingw/ + require 'win32ole' + result_set = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfCores from Win32_Processor") + result_set.to_enum.collect(&:NumberOfCores).reduce(:+) + else + processor_count + end + # fall back to logical count if physical info is invalid + ppc > 0 ? ppc : processor_count + rescue + return 1 + end + end + end + + # create the default ProcessorCounter on load + @processor_counter = Utility::ProcessorCounter.new + singleton_class.send :attr_reader, :processor_counter + + def self.processor_count + processor_counter.processor_count + end + + def self.physical_processor_count + processor_counter.physical_processor_count + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb new file mode 100644 index 0000000..d18c038 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.7/lib/concurrent-ruby/concurrent/version.rb @@ -0,0 +1,3 @@ +module Concurrent + VERSION = '1.1.7' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/CHANGELOG.md new file mode 100644 index 0000000..da1e00d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/CHANGELOG.md @@ -0,0 +1,528 @@ +## Current + +## Release v1.1.9 (5 Jun 2021) + +concurrent-ruby: + +* (#866) Child promise state not set to :pending immediately after #execute when parent has completed +* (#905, #872) Fix RubyNonConcurrentPriorityQueue#delete method +* (2df0337d) Make sure locks are not shared on shared when objects are dup/cloned +* (#900, #906, #796, #847, #911) Fix Concurrent::Set tread-safety issues on CRuby +* (#907) Add new ConcurrentMap backend for TruffleRuby + +## Release v1.1.8 (20 January 2021) + +concurrent-ruby: + +* (#885) Fix race condition in TVar for stale reads +* (#884) RubyThreadLocalVar: Do not iterate over hash which might conflict with new pair addition + +## Release v1.1.7 (6 August 2020) + +concurrent-ruby: + +* (#879) Consider falsy value on `Concurrent::Map#compute_if_absent` for fast non-blocking path +* (#876) Reset Async queue on forking, makes Async fork-safe +* (#856) Avoid running problematic code in RubyThreadLocalVar on MRI that occasionally results in segfault +* (#853) Introduce ThreadPoolExecutor without a Queue + +## Release v1.1.6, edge v0.6.0 (10 Feb 2020) + +concurrent-ruby: + +* (#841) Concurrent.disable_at_exit_handlers! is no longer needed and was deprecated. +* (#841) AbstractExecutorService#auto_terminate= was deprecated and has no effect. + Set :auto_terminate option instead when executor is initialized. + +## Release v1.1.6.pre1, edge v0.6.0.pre1 (26 Jan 2020) + +concurrent-ruby: + +* (#828) Allow to name executors, the name is also used to name their threads +* (#838) Implement #dup and #clone for structs +* (#821) Safer finalizers for thread local variables +* Documentation fixes +* (#814) Use Ruby's Etc.nprocessors if available +* (#812) Fix directory structure not to mess with packaging tools +* (#840) Fix termination of pools on JRuby + +concurrent-ruby-edge: + +* Add WrappingExecutor (#830) + +## Release v1.1.5, edge v0.5.0 (10 Mar 2019) + +concurrent-ruby: + +* fix potential leak of context on JRuby and Java 7 + +concurrent-ruby-edge: + +* Add finalized Concurrent::Cancellation +* Add finalized Concurrent::Throttle +* Add finalized Concurrent::Promises::Channel +* Add new Concurrent::ErlangActor + +## Release v1.1.4 (14 Dec 2018) + +* (#780) Remove java_alias of 'submit' method of Runnable to let executor service work on java 11 +* (#776) Fix NameError on defining a struct with a name which is already taken in an ancestor + +## Release v1.1.3 (7 Nov 2018) + +* (#775) fix partial require of the gem (although not officially supported) + +## Release v1.1.2 (6 Nov 2018) + +* (#773) more defensive 1.9.3 support + +## Release v1.1.1, edge v0.4.1 (1 Nov 2018) + +* (#768) add support for 1.9.3 back + +## Release v1.1.0, edge v0.4.0 (31 OCt 2018) (yanked) + +* (#768) yanked because of issues with removed 1.9.3 support + +## Release v1.1.0.pre2, edge v0.4.0.pre2 (18 Sep 2018) + +concurrent-ruby: + +* fixed documentation and README links +* fix Set for TruffleRuby and Rubinius +* use properly supported TruffleRuby APIs + +concurrent-ruby-edge: + +* add Promises.zip_futures_over_on + +## Release v1.1.0.pre1, edge v0.4.0.pre1 (15 Aug 2018) + +concurrent-ruby: + +* requires at least Ruby 2.0 +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/1.1.0/Concurrent/Promises.html) + are moved from `concurrent-ruby-edge` to `concurrent-ruby` +* Add support for TruffleRuby + * (#734) Fix Array/Hash/Set construction broken on TruffleRuby + * AtomicReference fixed +* CI stabilization +* remove sharp dependency edge -> core +* remove warnings +* documentation updates +* Exchanger is no longer documented as edge since it was already available in + `concurrent-ruby` +* (#644) Fix Map#each and #each_pair not returning enumerator outside of MRI +* (#659) Edge promises fail during error handling +* (#741) Raise on recursive Delay#value call +* (#727) #717 fix global IO executor on JRuby +* (#740) Drop support for CRuby 1.9, JRuby 1.7, Rubinius. +* (#737) Move AtomicMarkableReference out of Edge +* (#708) Prefer platform specific memory barriers +* (#735) Fix wrong expected exception in channel spec assertion +* (#729) Allow executor option in `Promise#then` +* (#725) fix timeout check to use timeout_interval +* (#719) update engine detection +* (#660) Add specs for Promise#zip/Promise.zip ordering +* (#654) Promise.zip execution changes +* (#666) Add thread safe set implementation +* (#651) #699 #to_s, #inspect should not output negative object IDs. +* (#685) Avoid RSpec warnings about raise_error +* (#680) Avoid RSpec monkey patching, persist spec results locally, use RSpec + v3.7.0 +* (#665) Initialize the monitor for new subarrays on Rubinius +* (#661) Fix error handling in edge promises + +concurrent-ruby-edge: + +* (#659) Edge promises fail during error handling +* Edge files clearly separated in `lib-edge` +* added ReInclude + +## Release v1.0.5, edge v0.3.1 (26 Feb 2017) + +concurrent-ruby: + +* Documentation for Event and Semaphore +* Use Unsafe#fullFence and #loadFence directly since the shortcuts were removed in JRuby +* Do not depend on org.jruby.util.unsafe.UnsafeHolder + +concurrent-ruby-edge: + +* (#620) Actors on Pool raise an error +* (#624) Delayed promises did not interact correctly with flatting + * Fix arguments yielded by callback methods +* Overridable default executor in promises factory methods +* Asking actor to terminate will always resolve to `true` + +## Release v1.0.4, edge v0.3.0 (27 Dec 2016) + +concurrent-ruby: + +* Nothing + +concurrent-ruby-edge: + +* New promises' API renamed, lots of improvements, edge bumped to 0.3.0 + * **Incompatible** with previous 0.2.3 version + * see https://github.com/ruby-concurrency/concurrent-ruby/pull/522 + +## Release v1.0.3 (17 Dec 2016) + +* Trigger execution of flattened delayed futures +* Avoid forking for processor_count if possible +* Semaphore Mutex and JRuby parity +* Adds Map#each as alias to Map#each_pair +* Fix uninitialized instance variables +* Make Fixnum, Bignum merger ready +* Allows Promise#then to receive an executor +* TimerSet now survives a fork +* Reject promise on any exception +* Allow ThreadLocalVar to be initialized with a block +* Support Alpha with `Concurrent::processor_count` +* Fixes format-security error when compiling ruby_193_compatible.h +* Concurrent::Atom#swap fixed: reraise the exceptions from block + +## Release v1.0.2 (2 May 2016) + +* Fix bug with `Concurrent::Map` MRI backend `#inspect` method +* Fix bug with `Concurrent::Map` MRI backend using `Hash#value?` +* Improved documentation and examples +* Minor updates to Edge + +## Release v1.0.1 (27 February 2016) + +* Fix "uninitialized constant Concurrent::ReentrantReadWriteLock" error. +* Better handling of `autoload` vs. `require`. +* Improved API for Edge `Future` zipping. +* Fix reference leak in Edge `Future` constructor . +* Fix bug which prevented thread pools from surviving a `fork`. +* Fix bug in which `TimerTask` did not correctly specify all its dependencies. +* Improved support for JRuby+Truffle +* Improved error messages. +* Improved documentation. +* Updated README and CONTRIBUTING. + +## Release v1.0.0 (13 November 2015) + +* Rename `attr_volatile_with_cas` to `attr_atomic` +* Add `clear_each` to `LockFreeStack` +* Update `AtomicReference` documentation +* Further updates and improvements to the synchronization layer. +* Performance and memory usage performance with `Actor` logging. +* Fixed `ThreadPoolExecutor` task count methods. +* Improved `Async` performance for both short and long-lived objects. +* Fixed bug in `LockFreeLinkedSet`. +* Fixed bug in which `Agent#await` triggered a validation failure. +* Further `Channel` updates. +* Adopted a project Code of Conduct +* Cleared interpreter warnings +* Fixed bug in `ThreadPoolExecutor` task count methods +* Fixed bug in 'LockFreeLinkedSet' +* Improved Java extension loading +* Handle Exception children in Edge::Future +* Continued improvements to channel +* Removed interpreter warnings. +* Shared constants now in `lib/concurrent/constants.rb` +* Refactored many tests. +* Improved synchronization layer/memory model documentation. +* Bug fix in Edge `Future#flat` +* Brand new `Channel` implementation in Edge gem. +* Simplification of `RubySingleThreadExecutor` +* `Async` improvements + - Each object uses its own `SingleThreadExecutor` instead of the global thread pool. + - No longers supports executor injection + - Much better documentation +* `Atom` updates + - No longer `Dereferenceable` + - Now `Observable` + - Added a `#reset` method +* Brand new `Agent` API and implementation. Now functionally equivalent to Clojure. +* Continued improvements to the synchronization layer +* Merged in the `thread_safe` gem + - `Concurrent::Array` + - `Concurrent::Hash` + - `Concurrent::Map` (formerly ThreadSafe::Cache) + - `Concurrent::Tuple` +* Minor improvements to Concurrent::Map +* Complete rewrite of `Exchanger` +* Removed all deprecated code (classes, methods, constants, etc.) +* Updated Agent, MutexAtomic, and BufferedChannel to inherit from Synchronization::Object. +* Many improved tests +* Some internal reorganization + +## Release v0.9.1 (09 August 2015) + +* Fixed a Rubiniux bug in synchronization object +* Fixed all interpreter warnings (except circular references) +* Fixed require statements when requiring `Atom` alone +* Significantly improved `ThreadLocalVar` on non-JRuby platforms +* Fixed error handling in Edge `Concurrent.zip` +* `AtomicFixnum` methods `#increment` and `#decrement` now support optional delta +* New `AtomicFixnum#update` method +* Minor optimizations in `ReadWriteLock` +* New `ReentrantReadWriteLock` class +* `ThreadLocalVar#bind` method is now public +* Refactored many tests + +## Release v0.9.0 (10 July 2015) + +* Updated `AtomicReference` + - `AtomicReference#try_update` now simply returns instead of raising exception + - `AtomicReference#try_update!` was added to raise exceptions if an update + fails. Note: this is the same behavior as the old `try_update` +* Pure Java implementations of + - `AtomicBoolean` + - `AtomicFixnum` + - `Semaphore` +* Fixed bug when pruning Ruby thread pools +* Fixed bug in time calculations within `ScheduledTask` +* Default `count` in `CountDownLatch` to 1 +* Use monotonic clock for all timers via `Concurrent.monotonic_time` + - Use `Process.clock_gettime(Process::CLOCK_MONOTONIC)` when available + - Fallback to `java.lang.System.nanoTime()` on unsupported JRuby versions + - Pure Ruby implementation for everything else + - Effects `Concurrent.timer`, `Concurrent.timeout`, `TimerSet`, `TimerTask`, and `ScheduledTask` +* Deprecated all clock-time based timer scheduling + - Only support scheduling by delay + - Effects `Concurrent.timer`, `TimerSet`, and `ScheduledTask` +* Added new `ReadWriteLock` class +* Consistent `at_exit` behavior for Java and Ruby thread pools. +* Added `at_exit` handler to Ruby thread pools (already in Java thread pools) + - Ruby handler stores the object id and retrieves from `ObjectSpace` + - JRuby disables `ObjectSpace` by default so that handler stores the object reference +* Added a `:stop_on_exit` option to thread pools to enable/disable `at_exit` handler +* Updated thread pool docs to better explain shutting down thread pools +* Simpler `:executor` option syntax for all abstractions which support this option +* Added `Executor#auto_terminate?` predicate method (for thread pools) +* Added `at_exit` handler to `TimerSet` +* Simplified auto-termination of the global executors + - Can now disable auto-termination of global executors + - Added shutdown/kill/wait_for_termination variants for global executors +* Can now disable auto-termination for *all* executors (the nuclear option) +* Simplified auto-termination of the global executors +* Deprecated terms "task pool" and "operation pool" + - New terms are "io executor" and "fast executor" + - New functions added with new names + - Deprecation warnings added to functions referencing old names +* Moved all thread pool related functions from `Concurrent::Configuration` to `Concurrent` + - Old functions still exist with deprecation warnings + - New functions have updated names as appropriate +* All high-level abstractions default to the "io executor" +* Fixed bug in `Actor` causing it to prematurely warm global thread pools on gem load + - This also fixed a `RejectedExecutionError` bug when running with minitest/autorun via JRuby +* Moved global logger up to the `Concurrent` namespace and refactored the code +* Optimized the performance of `Delay` + - Fixed a bug in which no executor option on construction caused block execution on a global thread pool +* Numerous improvements and bug fixes to `TimerSet` +* Fixed deadlock of `Future` when the handler raises Exception +* Added shared specs for more classes +* New concurrency abstractions including: + - `Atom` + - `Maybe` + - `ImmutableStruct` + - `MutableStruct` + - `SettableStruct` +* Created an Edge gem for unstable abstractions including + - `Actor` + - `Agent` + - `Channel` + - `Exchanger` + - `LazyRegister` + - **new Future Framework** - unified + implementation of Futures and Promises which combines Features of previous `Future`, + `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively + new synchronization layer to make all the paths **lock-free** with exception of blocking threads on `#wait`. + It offers better performance and does not block threads when not required. +* Actor framework changes: + - fixed reset loop in Pool + - Pool can use any actor as a worker, abstract worker class is no longer needed. + - Actor events not have format `[:event_name, *payload]` instead of just the Symbol. + - Actor now uses new Future/Promise Framework instead of `IVar` for better interoperability + - Behaviour definition array was simplified to `[BehaviourClass1, [BehaviourClass2, *initialization_args]]` + - Linking behavior responds to :linked message by returning array of linked actors + - Supervised behavior is removed in favour of just Linking + - RestartingContext is supervised by default now, `supervise: true` is not required any more + - Events can be private and public, so far only difference is that Linking will + pass to linked actors only public messages. Adding private :restarting and + :resetting events which are send before the actor restarts or resets allowing + to add callbacks to cleanup current child actors. + - Print also object_id in Reference to_s + - Add AbstractContext#default_executor to be able to override executor class wide + - Add basic IO example + - Documentation somewhat improved + - All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and + be sure that both jobs are processed first. +* Refactored `Channel` to use newer synchronization objects +* Added `#reset` and `#cancel` methods to `TimerSet` +* Added `#cancel` method to `Future` and `ScheduledTask` +* Refactored `TimerSet` to use `ScheduledTask` +* Updated `Async` with a factory that initializes the object +* Deprecated `Concurrent.timer` and `Concurrent.timeout` +* Reduced max threads on pure-Ruby thread pools (abends around 14751 threads) +* Moved many private/internal classes/modules into "namespace" modules +* Removed brute-force killing of threads in tests +* Fixed a thread pool bug when the operating system cannot allocate more threads + +## Release v0.8.0 (25 January 2015) + +* C extension for MRI have been extracted into the `concurrent-ruby-ext` companion gem. + Please see the README for more detail. +* Better variable isolation in `Promise` and `Future` via an `:args` option +* Continued to update intermittently failing tests + +## Release v0.7.2 (24 January 2015) + +* New `Semaphore` class based on [java.util.concurrent.Semaphore](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Semaphore.html) +* New `Promise.all?` and `Promise.any?` class methods +* Renamed `:overflow_policy` on thread pools to `:fallback_policy` +* Thread pools still accept the `:overflow_policy` option but display a warning +* Thread pools now implement `fallback_policy` behavior when not running (rather than universally rejecting tasks) +* Fixed minor `set_deref_options` constructor bug in `Promise` class +* Fixed minor `require` bug in `ThreadLocalVar` class +* Fixed race condition bug in `TimerSet` class +* Fixed race condition bug in `TimerSet` class +* Fixed signal bug in `TimerSet#post` method +* Numerous non-functional updates to clear warning when running in debug mode +* Fixed more intermittently failing tests +* Tests now run on new Travis build environment +* Multiple documentation updates + +## Release v0.7.1 (4 December 2014) + +Please see the [roadmap](https://github.com/ruby-concurrency/concurrent-ruby/issues/142) for more information on the next planned release. + +* Added `flat_map` method to `Promise` +* Added `zip` method to `Promise` +* Fixed bug with logging in `Actor` +* Improvements to `Promise` tests +* Removed actor-experimental warning +* Added an `IndirectImmediateExecutor` class +* Allow disabling auto termination of global executors +* Fix thread leaking in `ThreadLocalVar` (uses `Ref` gem on non-JRuby systems) +* Fix thread leaking when pruning pure-Ruby thread pools +* Prevent `Actor` from using an `ImmediateExecutor` (causes deadlock) +* Added missing synchronizations to `TimerSet` +* Fixed bug with return value of `Concurrent::Actor::Utils::Pool#ask` +* Fixed timing bug in `TimerTask` +* Fixed bug when creating a `JavaThreadPoolExecutor` with minimum pool size of zero +* Removed confusing warning when not using native extenstions +* Improved documentation + +## Release v0.7.0 (13 August 2014) + +* Merge the [atomic](https://github.com/ruby-concurrency/atomic) gem + - Pure Ruby `MutexAtomic` atomic reference class + - Platform native atomic reference classes `CAtomic`, `JavaAtomic`, and `RbxAtomic` + - Automated [build process](https://github.com/ruby-concurrency/rake-compiler-dev-box) + - Fat binary releases for [multiple platforms](https://rubygems.org/gems/concurrent-ruby/versions) including Windows (32/64), Linux (32/64), OS X (64-bit), Solaris (64-bit), and JRuby +* C native `CAtomicBoolean` +* C native `CAtomicFixnum` +* Refactored intermittently failing tests +* Added `dataflow!` and `dataflow_with!` methods to match `Future#value!` method +* Better handling of timeout in `Agent` +* Actor Improvements + - Fine-grained implementation using chain of behaviors. Each behavior is responsible for single aspect like: `Termination`, `Pausing`, `Linking`, `Supervising`, etc. Users can create custom Actors easily based on their needs. + - Supervision was added. `RestartingContext` will pause on error waiting on its supervisor to decide what to do next ( options are `:terminate!`, `:resume!`, `:reset!`, `:restart!`). Supervising behavior also supports strategies `:one_for_one` and `:one_for_all`. + - Linking was added to be able to monitor actor's events like: `:terminated`, `:paused`, `:restarted`, etc. + - Dead letter routing added. Rejected envelopes are collected in a configurable actor (default: `Concurrent::Actor.root.ask!(:dead_letter_routing)`) + - Old `Actor` class removed and replaced by new implementation previously called `Actress`. `Actress` was kept as an alias for `Actor` to keep compatibility. + - `Utils::Broadcast` actor which allows Publish–subscribe pattern. +* More executors for managing serialized operations + - `SerializedExecution` mixin module + - `SerializedExecutionDelegator` for serializing *any* executor +* Updated `Async` with serialized execution +* Updated `ImmediateExecutor` and `PerThreadExecutor` with full executor service lifecycle +* Added a `Delay` to root `Actress` initialization +* Minor bug fixes to thread pools +* Refactored many intermittently failing specs +* Removed Java interop warning `executor.rb:148 warning: ambiguous Java methods found, using submit(java.lang.Runnable)` +* Fixed minor bug in `RubyCachedThreadPool` overflow policy +* Updated tests to use [RSpec 3.0](http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3) +* Removed deprecated `Actor` class +* Better support for Rubinius + +## Release v0.6.1 (14 June 2014) + +* Many improvements to `Concurrent::Actress` +* Bug fixes to `Concurrent::RubyThreadPoolExecutor` +* Fixed several brittle tests +* Moved documentation to http://ruby-concurrency.github.io/concurrent-ruby/frames.html + +## Release v0.6.0 (25 May 2014) + +* Added `Concurrent::Observable` to encapsulate our thread safe observer sets +* Improvements to new `Channel` +* Major improvements to `CachedThreadPool` and `FixedThreadPool` +* Added `SingleThreadExecutor` +* Added `Current::timer` function +* Added `TimerSet` executor +* Added `AtomicBoolean` +* `ScheduledTask` refactoring +* Pure Ruby and JRuby-optimized `PriorityQueue` classes +* Updated `Agent` behavior to more closely match Clojure +* Observer sets support block callbacks to the `add_observer` method +* New algorithm for thread creation in `RubyThreadPoolExecutor` +* Minor API updates to `Event` +* Rewritten `TimerTask` now an `Executor` instead of a `Runnable` +* Fixed many brittle specs +* Renamed `FixedThreadPool` and `CachedThreadPool` to `RubyFixedThreadPool` and `RubyCachedThreadPool` +* Created JRuby optimized `JavaFixedThreadPool` and `JavaCachedThreadPool` +* Consolidated fixed thread pool tests into `spec/concurrent/fixed_thread_pool_shared.rb` and `spec/concurrent/cached_thread_pool_shared.rb` +* `FixedThreadPool` now subclasses `RubyFixedThreadPool` or `JavaFixedThreadPool` as appropriate +* `CachedThreadPool` now subclasses `RubyCachedThreadPool` or `JavaCachedThreadPool` as appropriate +* New `Delay` class +* `Concurrent::processor_count` helper function +* New `Async` module +* Renamed `NullThreadPool` to `PerThreadExecutor` +* Deprecated `Channel` (we are planning a new implementation based on [Go](http://golangtutorials.blogspot.com/2011/06/channels-in-go.html)) +* Added gem-level [configuration](http://robots.thoughtbot.com/mygem-configure-block) +* Deprecated `$GLOBAL_THREAD_POOL` in lieu of gem-level configuration +* Removed support for Ruby [1.9.2](https://www.ruby-lang.org/en/news/2013/12/17/maintenance-of-1-8-7-and-1-9-2/) +* New `RubyThreadPoolExecutor` and `JavaThreadPoolExecutor` classes +* All thread pools now extend the appropriate thread pool executor classes +* All thread pools now support `:overflow_policy` (based on Java's [reject policies](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html)) +* Deprecated `UsesGlobalThreadPool` in lieu of explicit `:executor` option (dependency injection) on `Future`, `Promise`, and `Agent` +* Added `Concurrent::dataflow_with(executor, *inputs)` method to support executor dependency injection for dataflow +* Software transactional memory with `TVar` and `Concurrent::atomically` +* First implementation of [new, high-performance](https://github.com/ruby-concurrency/concurrent-ruby/pull/49) `Channel` +* `Actor` is deprecated in favor of new experimental actor implementation [#73](https://github.com/ruby-concurrency/concurrent-ruby/pull/73). To avoid namespace collision it is living in `Actress` namespace until `Actor` is removed in next release. + +## Release v0.5.0 + +This is the most significant release of this gem since its inception. This release includes many improvements and optimizations. It also includes several bug fixes. The major areas of focus for this release were: + +* Stability improvements on Ruby versions with thread-level parallelism ([JRuby](http://jruby.org/) and [Rubinius](http://rubini.us/)) +* Creation of new low-level concurrency abstractions +* Internal refactoring to use the new low-level abstractions + +Most of these updates had no effect on the gem API. There are a few notable exceptions which were unavoidable. Please read the [release notes](API-Updates-in-v0.5.0) for more information. + +Specific changes include: + +* New class `IVar` +* New class `MVar` +* New class `ThreadLocalVar` +* New class `AtomicFixnum` +* New class method `dataflow` +* New class `Condition` +* New class `CountDownLatch` +* New class `DependencyCounter` +* New class `SafeTaskExecutor` +* New class `CopyOnNotifyObserverSet` +* New class `CopyOnWriteObserverSet` +* `Future` updated with `execute` API +* `ScheduledTask` updated with `execute` API +* New `Promise` API +* `Future` now extends `IVar` +* `Postable#post?` now returns an `IVar` +* Thread safety fixes to `Dereferenceable` +* Thread safety fixes to `Obligation` +* Thread safety fixes to `Supervisor` +* Thread safety fixes to `Event` +* Various other thread safety (race condition) fixes +* Refactored brittle tests +* Implemented pending tests +* Added JRuby and Rubinius as Travis CI build targets +* Added [CodeClimate](https://codeclimate.com/) code review +* Improved YARD documentation diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Gemfile b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Gemfile new file mode 100644 index 0000000..1b8d9fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Gemfile @@ -0,0 +1,42 @@ +source 'https://rubygems.org' + +require File.join(File.dirname(__FILE__), 'lib/concurrent-ruby/concurrent/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby-edge/concurrent/edge/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby/concurrent/utility/engine') + +no_path = ENV['NO_PATH'] +options = no_path ? {} : { path: '.' } + +gem 'concurrent-ruby', Concurrent::VERSION, options +gem 'concurrent-ruby-edge', Concurrent::EDGE_VERSION, options +gem 'concurrent-ruby-ext', Concurrent::VERSION, options.merge(platform: :mri) + +group :development do + gem 'rake', (Concurrent.ruby_version :<, 2, 2, 0) ? '~> 12.0' : '~> 13.0' + gem 'rake-compiler', '~> 1.0', '>= 1.0.7' + gem 'rake-compiler-dock', '~> 1.0' + gem 'pry', '~> 0.11', platforms: :mri +end + +group :documentation, optional: true do + gem 'yard', '~> 0.9.0', require: false + gem 'redcarpet', '~> 3.0', platforms: :mri # understands github markdown + gem 'md-ruby-eval', '~> 0.6' +end + +group :testing do + gem 'rspec', '~> 3.7' + gem 'timecop', '~> 0.7.4' + gem 'sigdump', require: false +end + +# made opt-in since it will not install on jruby 1.7 +group :coverage, optional: !ENV['COVERAGE'] do + gem 'simplecov', '~> 0.16.0', require: false + gem 'coveralls', '~> 0.8.2', require: false +end + +group :benchmarks, optional: true do + gem 'benchmark-ips', '~> 2.7' + gem 'bench9000' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/LICENSE.txt new file mode 100644 index 0000000..1026f28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) Jerry D'Antonio -- released under the MIT license. + +http://www.opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/README.md b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/README.md new file mode 100644 index 0000000..cdf3480 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/README.md @@ -0,0 +1,408 @@ +# Concurrent Ruby + +[![Gem Version](https://badge.fury.io/rb/concurrent-ruby.svg)](http://badge.fury.io/rb/concurrent-ruby) +[![Build Status](https://travis-ci.org/ruby-concurrency/concurrent-ruby.svg?branch=master)](https://travis-ci.org/ruby-concurrency/concurrent-ruby) +[![Build status](https://ci.appveyor.com/api/projects/status/iq8aboyuu3etad4w?svg=true)](https://ci.appveyor.com/project/rubyconcurrency/concurrent-ruby) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) +[![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better + or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled +* Thread-safety +* Backward compatibility + +## Contributing + +**This gem depends on +[contributions](https://github.com/ruby-concurrency/concurrent-ruby/graphs/contributors) and we +appreciate your help. Would you like to contribute? Great! Have a look at +[issues with `looking-for-contributor` label](https://github.com/ruby-concurrency/concurrent-ruby/issues?q=is%3Aissue+is%3Aopen+label%3Alooking-for-contributor).** And if you pick something up let us know on the issue. + +## Thread Safety + +*Concurrent Ruby makes one of the strongest thread safety guarantees of any Ruby concurrency +library, providing consistent behavior and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby).* + +Every abstraction in this library is thread safe. Specific thread safety guarantees are documented +with each abstraction. + +It is critical to remember, however, that Ruby is a language of mutable references. *No* +concurrency library for Ruby can ever prevent the user from making thread safety mistakes (such as +sharing a mutable object between threads and modifying it on both threads) or from creating +deadlocks through incorrect use of locks. All the library can do is provide safe abstractions which +encourage safe practices. Concurrent Ruby provides more safe concurrency abstractions than any +other Ruby library, many of which support the mantra of +["Do not communicate by sharing memory; instead, share memory by communicating"](https://blog.golang.org/share-memory-by-communicating). +Concurrent Ruby is also the only Ruby library which provides a full suite of thread safe and +immutable variable types and data structures. + +We've also initiated discussion to document [memory model](docs-source/synchronization.md) of Ruby which +would provide consistent behaviour and guarantees on all four of the main Ruby interpreters +(MRI/CRuby, JRuby, Rubinius, TruffleRuby). + +## Features & Documentation + +**The primary site for documentation is the automatically generated +[API documentation](http://ruby-concurrency.github.io/concurrent-ruby/index.html) which is up to +date with latest release.** This readme matches the master so may contain new stuff not yet +released. + +We also have a [IRC (gitter)](https://gitter.im/ruby-concurrency/concurrent-ruby). + +### Versioning + +* `concurrent-ruby` uses [Semantic Versioning](http://semver.org/) +* `concurrent-ruby-ext` has always same version as `concurrent-ruby` +* `concurrent-ruby-edge` will always be 0.y.z therefore following + [point 4](http://semver.org/#spec-item-4) applies *"Major version zero + (0.y.z) is for initial development. Anything may change at any time. The + public API should not be considered stable."* However we additionally use + following rules: + * Minor version increment means incompatible changes were made + * Patch version increment means only compatible changes were made + + +#### General-purpose Concurrency Abstractions + +* [Async](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Async.html): + A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's + [gen_server](http://www.erlang.org/doc/man/gen_server.html). +* [ScheduledTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ScheduledTask.html): + Like a Future scheduled for a specific future time. +* [TimerTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TimerTask.html): + A Thread that periodically wakes up to perform work at regular intervals. +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and (partially) `TimerTask` into a single + framework. It extensively uses the new synchronization layer to make all the features + **non-blocking** and **lock-free**, with the exception of obviously blocking operations like + `#wait`, `#value`. It also offers better performance. + +#### Thread-safe Value Objects, Structures, and Collections + +Collection classes that were originally part of the (deprecated) `thread_safe` gem: + +* [Array](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Array.html) A thread-safe + subclass of Ruby's standard [Array](http://ruby-doc.org/core/Array.html). +* [Hash](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Hash.html) A thread-safe + subclass of Ruby's standard [Hash](http://ruby-doc.org/core/Hash.html). +* [Set](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Set.html) A thread-safe + subclass of Ruby's standard [Set](http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html). +* [Map](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Map.html) A hash-like object + that should have much better performance characteristics, especially under high concurrency, + than `Concurrent::Hash`. +* [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Tuple.html) A fixed size + array with volatile (synchronized, thread safe) getters/setters. + +Value objects inspired by other languages: + +* [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Maybe.html) A thread-safe, + immutable object representing an optional value, based on + [Haskell Data.Maybe](https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html). + +Structure classes derived from Ruby's [Struct](http://ruby-doc.org/core/Struct.html): + +* [ImmutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ImmutableStruct.html) + Immutable struct where values are set at construction and cannot be changed later. +* [MutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MutableStruct.html) + Synchronized, mutable struct where values can be safely changed at any time. +* [SettableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/SettableStruct.html) + Synchronized, write-once struct where values can be set at most once, either at construction + or any time thereafter. + +Thread-safe variables: + +* [Agent](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Agent.html): A way to + manage shared, mutable, *asynchronous*, independent state. Based on Clojure's + [Agent](http://clojure.org/agents). +* [Atom](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Atom.html): A way to manage + shared, mutable, *synchronous*, independent state. Based on Clojure's + [Atom](http://clojure.org/atoms). +* [AtomicBoolean](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicBoolean.html) + A boolean value that can be updated atomically. +* [AtomicFixnum](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicFixnum.html) + A numeric value that can be updated atomically. +* [AtomicReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicReference.html) + An object reference that may be updated atomically. +* [Exchanger](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Exchanger.html) + A synchronization point at which threads can pair and swap elements within pairs. Based on + Java's [Exchanger](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html). +* [MVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MVar.html) A synchronized + single element container. Based on Haskell's + [MVar](https://hackage.haskell.org/package/base-4.8.1.0/docs/Control-Concurrent-MVar.html) and + Scala's [MVar](http://docs.typelevel.org/api/scalaz/nightly/index.html#scalaz.concurrent.MVar$). +* [ThreadLocalVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ThreadLocalVar.html) + A variable where the value is different for each thread. +* [TVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TVar.html) A transactional + variable implementing software transactional memory (STM). Based on Clojure's + [Ref](http://clojure.org/refs). + +#### Java-inspired ThreadPools and Other Executors + +* See the [thread pool](http://ruby-concurrency.github.io/concurrent-ruby/master/file.thread_pools.html) + overview, which also contains a list of other Executors available. + +#### Thread Synchronization Classes and Algorithms + +* [CountDownLatch](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CountDownLatch.html) + A synchronization object that allows one thread to wait on multiple other threads. +* [CyclicBarrier](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CyclicBarrier.html) + A synchronization aid that allows a set of threads to all wait for each other to reach a common barrier point. +* [Event](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Event.html) Old school + kernel-style event. +* [ReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReadWriteLock.html) + A lock that supports multiple readers but only one writer. +* [ReentrantReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReentrantReadWriteLock.html) + A read/write lock with reentrant and upgrade features. +* [Semaphore](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Semaphore.html) + A counting-based locking mechanism that uses permits. +* [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicMarkableReference.html) + +#### Deprecated + +Deprecated features are still available and bugs are being fixed, but new features will not be added. + +* ~~[Future](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Future.html): + An asynchronous operation that produces a value.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + * ~~[.dataflow](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent.html#dataflow-class_method): + Built on Futures, Dataflow allows you to create a task that will be scheduled when all of + its data dependencies are available.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Promise](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promise.html): Similar + to Futures, with more features.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Delay](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Delay.html) Lazy evaluation + of a block yielding an immutable result. Based on Clojure's + [delay](https://clojuredocs.org/clojure.core/delay).~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[IVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/IVar.html) Similar to a + "future" but can be manually assigned once, after which it becomes immutable.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + +### Edge Features + +These are available in the `concurrent-ruby-edge` companion gem. + +These features are under active development and may change frequently. They are expected not to +keep backward compatibility (there may also lack tests and documentation). Semantic versions will +be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to +`concurrent-ruby` when final. + +* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Actor.html): Implements + the Actor Model, where concurrent actors exchange messages. + *Status: Partial documentation and tests; depends on new future/promise framework; stability is good.* +* [Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Channel.html): + Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). + Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional + inspiration from Clojure [core.async](https://clojure.github.io/core.async/). + *Status: Partial documentation and tests.* +* [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LazyRegister.html) +* [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Edge/LockFreeLinkedSet.html) + *Status: will be moved to core soon.* +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LockFreeStack.html) + *Status: missing documentation and tests.* +* [Promises::Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises/Channel.html) + A first in first out channel that accepts messages with push family of methods and returns + messages with pop family of methods. + Pop and push operations can be represented as futures, see `#pop_op` and `#push_op`. + The capacity of the channel can be limited to support back pressure, use capacity option in `#initialize`. + `#pop` method blocks ans `#pop_op` returns pending future if there is no message in the channel. + If the capacity is limited the `#push` method blocks and `#push_op` returns pending future. +* [Cancellation](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Cancellation.html) + The Cancellation abstraction provides cooperative cancellation. + + The standard methods `Thread#raise` of `Thread#kill` available in Ruby + are very dangerous (see linked the blog posts bellow). + Therefore concurrent-ruby provides an alternative. + + * + * + * + + It provides an object which represents a task which can be executed, + the task has to get the reference to the object and periodically cooperatively check that it is not cancelled. + Good practices to make tasks cancellable: + * check cancellation every cycle of a loop which does significant work, + * do all blocking actions in a loop with a timeout then on timeout check cancellation + and if ok block again with the timeout +* [Throttle](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Throttle.html) + A tool managing concurrency level of tasks. +* [ErlangActor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ErlangActor.html) + Actor implementation which precisely matches Erlang actor behaviour. + Requires at least Ruby 2.1 otherwise it's not loaded. +* [WrappingExecutor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/WrappingExecutor.html) + A delegating executor which modifies each task before the task is given to + the target executor it delegates to. + +## Supported Ruby versions + +* MRI 2.0 and above +* JRuby 9000 +* TruffleRuby are supported. +* Any Ruby interpreter that is compliant with Ruby 2.0 or newer. + +Actually we still support mri 1.9.3 and jruby 1.7.27 but we are looking at ways how to drop the support. +Java 8 is preferred for JRuby but every Java version on which JRuby 9000 runs is supported. + +The legacy support for Rubinius is kept but it is no longer maintained, if you would like to help +please respond to [#739](https://github.com/ruby-concurrency/concurrent-ruby/issues/739). + +## Usage + +Everything within this gem can be loaded simply by requiring it: + +```ruby +require 'concurrent' +``` + +*Requiring only specific abstractions from Concurrent Ruby is not yet supported.* + +To use the tools in the Edge gem it must be required separately: + +```ruby +require 'concurrent-edge' +``` + +If the library does not behave as expected, `Concurrent.use_stdlib_logger(Logger::DEBUG)` could +help to reveal the problem. + +## Installation + +```shell +gem install concurrent-ruby +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby', require: 'concurrent' +``` + +and run `bundle install` from your shell. + +### Edge Gem Installation + +The Edge gem must be installed separately from the core gem: + +```shell +gem install concurrent-ruby-edge +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-edge', require: 'concurrent-edge' +``` + +and run `bundle install` from your shell. + + +### C Extensions for MRI + +Potential performance improvements may be achieved under MRI by installing optional C extensions. +To minimise installation errors the C extensions are available in the `concurrent-ruby-ext` +extension gem. `concurrent-ruby` and `concurrent-ruby-ext` are always released together with same +version. Simply install the extension gem too: + +```ruby +gem install concurrent-ruby-ext +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-ext' +``` + +and run `bundle install` from your shell. + +In code it is only necessary to + +```ruby +require 'concurrent' +``` + +The `concurrent-ruby` gem will automatically detect the presence of the `concurrent-ruby-ext` gem +and load the appropriate C extensions. + +#### Note For gem developers + +No gems should depend on `concurrent-ruby-ext`. Doing so will force C extensions on your users. The +best practice is to depend on `concurrent-ruby` and let users to decide if they want C extensions. + +## Building the gem + +### Requirements + +* Recent CRuby +* JRuby, `rbenv install jruby-9.2.17.0` +* Set env variable `CONCURRENT_JRUBY_HOME` to point to it, e.g. `/usr/local/opt/rbenv/versions/jruby-9.2.17.0` +* Install Docker, required for Windows builds + +### Publishing the Gem + +* Update`version.rb` +* Update the CHANGELOG +* Update the Yard documentation + - Add the new version to `docs-source/signpost.md`. Needs to be done only if there are visible changes in the + documentation. + - Run `bundle exec rake yard` to update the master documentation and signpost. + - Run `bundle exec rake yard:` to add or update the documentation of the new version. +* Commit (and push) the changes. +* Use `be rake release` to release the gem. It consists + of `['release:checks', 'release:build', 'release:test', 'release:publish']` steps. It will ask at the end before + publishing anything. Steps can also be executed individually. + +## Maintainers + +* [Petr Chalupa](https://github.com/pitr-ch) — Lead maintainer, point-of-contact. +* [Chris Seaton](https://github.com/chrisseaton) — + If Petr is not available Chris can help or poke Petr to pay attention where it is needed. + +### Special Thanks to + +* [Jerry D'Antonio](https://github.com/jdantonio) for creating the gem +* [Brian Durand](https://github.com/bdurand) for the `ref` gem +* [Charles Oliver Nutter](https://github.com/headius) for the `atomic` and `thread_safe` gems +* [thedarkone](https://github.com/thedarkone) for the `thread_safe` gem + +to the past maintainers + +* [Michele Della Torre](https://github.com/mighe) +* [Paweł Obrok](https://github.com/obrok) +* [Lucas Allan](https://github.com/lucasallan) + +and to [Ruby Association](https://www.ruby.or.jp/en/) for sponsoring a project +["Enhancing Ruby’s concurrency tooling"](https://www.ruby.or.jp/en/news/20181106) in 2018. + +## License and Copyright + +*Concurrent Ruby* is free software released under the +[MIT License](http://www.opensource.org/licenses/MIT). + +The *Concurrent Ruby* [logo](https://raw.githubusercontent.com/ruby-concurrency/concurrent-ruby/master/docs-source/logo/concurrent-ruby-logo-300x300.png) was +designed by [David Jones](https://twitter.com/zombyboy). It is Copyright © 2014 +[Jerry D'Antonio](https://twitter.com/jerrydantonio). All Rights Reserved. diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Rakefile b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Rakefile new file mode 100644 index 0000000..4fd002b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/Rakefile @@ -0,0 +1,339 @@ +require_relative 'lib/concurrent-ruby/concurrent/version' +require_relative 'lib/concurrent-ruby-edge/concurrent/edge/version' +require_relative 'lib/concurrent-ruby/concurrent/utility/engine' + +if Concurrent.ruby_version :<, 2, 0, 0 + # @!visibility private + module Kernel + def __dir__ + File.dirname __FILE__ + end + end +end + +core_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby.gemspec') +ext_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-ext.gemspec') +edge_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-edge.gemspec') + +require 'rake/javaextensiontask' + +ENV['JRUBY_HOME'] = ENV['CONCURRENT_JRUBY_HOME'] if ENV['CONCURRENT_JRUBY_HOME'] && !Concurrent.on_jruby? + +Rake::JavaExtensionTask.new('concurrent_ruby', core_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' +end + +unless Concurrent.on_jruby? + require 'rake/extensiontask' + + Rake::ExtensionTask.new('concurrent_ruby_ext', ext_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby-ext' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' + ext.source_pattern = '*.{c,h}' + + ext.cross_compile = true + ext.cross_platform = ['x86-mingw32', 'x64-mingw32'] + end +end + +require 'rake_compiler_dock' +namespace :repackage do + desc '* with Windows fat distributions' + task :all do + Dir.chdir(__dir__) do + # store gems in vendor cache for docker + sh 'bundle package' + + # build only the jar file not the whole gem for java platform, the jar is part the concurrent-ruby-x.y.z.gem + Rake::Task['lib/concurrent-ruby/concurrent/concurrent_ruby.jar'].invoke + + # build all gem files + %w[x86-mingw32 x64-mingw32].each do |plat| + RakeCompilerDock.sh "bundle install --local && bundle exec rake native:#{plat} gem --trace", platform: plat + end + end + end +end + +require 'rubygems' +require 'rubygems/package_task' + +Gem::PackageTask.new(core_gemspec) {} if core_gemspec +Gem::PackageTask.new(ext_gemspec) {} if ext_gemspec && !Concurrent.on_jruby? +Gem::PackageTask.new(edge_gemspec) {} if edge_gemspec + +CLEAN.include('lib/concurrent-ruby/concurrent/2.*', 'lib/concurrent-ruby/concurrent/*.jar') + +begin + require 'rspec' + require 'rspec/core/rake_task' + + RSpec::Core::RakeTask.new(:spec) + + namespace :spec do + desc '* Configured for ci' + RSpec::Core::RakeTask.new(:ci) do |t| + options = %w[ --color + --backtrace + --order defined + --format documentation + --tag ~notravis ] + t.rspec_opts = [*options].join(' ') + end + + desc '* test packaged and installed gems instead of local files' + task :installed do + Dir.chdir(__dir__) do + sh "gem install pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem install pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if Concurrent.on_cruby? + sh "gem install pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" + ENV['NO_PATH'] = 'true' + sh 'bundle update' + sh 'bundle exec rake spec:ci' + end + end + end + + desc 'executed in CI' + task :ci => [:compile, 'spec:ci'] + + task :default => [:clobber, :compile, :spec] +rescue LoadError => e + puts 'RSpec is not installed, skipping test task definitions: ' + e.message +end + +current_yard_version_name = Concurrent::VERSION + +begin + require 'yard' + require 'md_ruby_eval' + require_relative 'support/yard_full_types' + + common_yard_options = ['--no-yardopts', + '--no-document', + '--no-private', + '--embed-mixins', + '--markup', 'markdown', + '--title', 'Concurrent Ruby', + '--template', 'default', + '--template-path', 'yard-template', + '--default-return', 'undocumented'] + + desc 'Generate YARD Documentation (signpost, master)' + task :yard => ['yard:signpost', 'yard:master'] + + namespace :yard do + + desc '* eval markdown files' + task :eval_md do + Dir.chdir File.join(__dir__, 'docs-source') do + sh 'bundle exec md-ruby-eval --auto' + end + end + + task :update_readme do + Dir.chdir __dir__ do + content = File.read(File.join('README.md')). + gsub(/\[([\w ]+)\]\(http:\/\/ruby-concurrency\.github\.io\/concurrent-ruby\/master\/.*\)/) do |_| + case $1 + when 'LockFreeLinkedSet' + "{Concurrent::Edge::#{$1} #{$1}}" + when '.dataflow' + '{Concurrent.dataflow Concurrent.dataflow}' + when 'thread pool' + '{file:thread_pools.md thread pool}' + else + "{Concurrent::#{$1} #{$1}}" + end + end + FileUtils.mkpath 'tmp' + File.write 'tmp/README.md', content + end + end + + define_yard_task = -> name do + output_dir = "docs/#{name}" + + removal_name = "remove.#{name}" + task removal_name do + Dir.chdir __dir__ do + FileUtils.rm_rf output_dir + end + end + + desc "* of #{name} into subdir #{name}" + YARD::Rake::YardocTask.new(name) do |yard| + yard.options.push( + '--output-dir', output_dir, + '--main', 'tmp/README.md', + *common_yard_options) + yard.files = ['./lib/concurrent-ruby/**/*.rb', + './lib/concurrent-ruby-edge/**/*.rb', + './ext/concurrent_ruby_ext/**/*.c', + '-', + 'docs-source/thread_pools.md', + 'docs-source/promises.out.md', + 'docs-source/medium-example.out.rb', + 'LICENSE.txt', + 'CHANGELOG.md'] + end + Rake::Task[name].prerequisites.push removal_name, + # 'yard:eval_md', + 'yard:update_readme' + end + + define_yard_task.call current_yard_version_name + define_yard_task.call 'master' + + desc "* signpost for versions" + YARD::Rake::YardocTask.new(:signpost) do |yard| + yard.options.push( + '--output-dir', 'docs', + '--main', 'docs-source/signpost.md', + *common_yard_options) + yard.files = ['no-lib'] + end + + define_uptodate_task = -> name do + namespace name do + desc "** ensure that #{name} generated documentation is matching the source code" + task :uptodate do + Dir.chdir(__dir__) do + begin + FileUtils.cp_r 'docs', 'docs-copy', verbose: true + Rake::Task["yard:#{name}"].invoke + sh 'diff -r docs/ docs-copy/' do |ok, res| + unless ok + begin + STDOUT.puts "yard:#{name} is not properly generated and committed.", "Continue? (y/n)" + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + ensure + FileUtils.rm_rf 'docs-copy', verbose: true + end + end + end + end + end + + define_uptodate_task.call current_yard_version_name + define_uptodate_task.call 'master' + end + +rescue LoadError => e + puts 'YARD is not installed, skipping documentation task definitions: ' + e.message +end + +desc 'build, test, and publish the gem' +task :release => ['release:checks', 'release:build', 'release:test', 'release:publish'] + +namespace :release do + # Depends on environment of @pitr-ch + + task :checks => "yard:#{current_yard_version_name}:uptodate" do + Dir.chdir(__dir__) do + sh 'test -z "$(git status --porcelain)"' do |ok, res| + unless ok + begin + status = `git status --porcelain` + STDOUT.puts 'There are local changes that you might want to commit.', status, 'Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + sh 'git fetch' + sh 'test $(git show-ref --verify --hash refs/heads/master) = ' + + '$(git show-ref --verify --hash refs/remotes/origin/master)' do |ok, res| + unless ok + begin + STDOUT.puts 'Local master branch is not pushed to origin.', 'Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + end + end + + desc '* build all *.gem files necessary for release' + task :build => [:clobber, 'repackage:all'] + + desc '* test actual installed gems instead of cloned repository on MRI and JRuby' + task :test do + Dir.chdir(__dir__) do + old = ENV['RBENV_VERSION'] + + mri_version = `ruby -e 'puts RUBY_VERSION'`.chomp + jruby_version = File.basename(ENV['CONCURRENT_JRUBY_HOME']) + + puts "Using following version:" + pp mri_version: mri_version, jruby_version: jruby_version + + ENV['RBENV_VERSION'] = mri_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + ENV['RBENV_VERSION'] = jruby_version + sh 'rbenv version' + sh 'bundle exec rake spec:installed' + + puts 'Windows build is untested' + + ENV['RBENV_VERSION'] = old + end + end + + desc '* do all nested steps' + task :publish => ['publish:ask', 'publish:tag', 'publish:rubygems', 'publish:post_steps'] + + namespace :publish do + publish_edge = false + + task :ask do + begin + STDOUT.puts 'Do you want to publish anything now? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + begin + STDOUT.puts 'It will publish `concurrent-ruby`. Do you want to publish `concurrent-ruby-edge`? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + publish_edge = input == 'y' + end + + desc '** tag HEAD with current version and push to github' + task :tag => :ask do + Dir.chdir(__dir__) do + sh "git tag v#{Concurrent::VERSION}" + sh "git push origin v#{Concurrent::VERSION}" + sh "git tag edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + sh "git push origin edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + end + end + + desc '** push all *.gem files to rubygems' + task :rubygems => :ask do + Dir.chdir(__dir__) do + sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" if publish_edge + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem" + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem" + end + end + + desc '** print post release steps' + task :post_steps do + # TODO: (petr 05-Jun-2021) automate and renew the process + # puts 'Manually: create a release on GitHub with relevant changelog part' + # puts 'Manually: send email same as release with relevant changelog part' + # puts 'Manually: tweet' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/ConcurrentRubyService.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/ConcurrentRubyService.java new file mode 100644 index 0000000..fb6be96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/ConcurrentRubyService.java @@ -0,0 +1,17 @@ +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import java.io.IOException; + +public class ConcurrentRubyService implements BasicLibraryService { + + public boolean basicLoad(final Ruby runtime) throws IOException { + new com.concurrent_ruby.ext.AtomicReferenceLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicBooleanLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicFixnumLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaSemaphoreLibrary().load(runtime, false); + new com.concurrent_ruby.ext.SynchronizationLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JRubyMapBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java new file mode 100644 index 0000000..dfa9e77 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java @@ -0,0 +1,175 @@ +package com.concurrent_ruby.ext; + +import java.lang.reflect.Field; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +/** + * This library adds an atomic reference type to JRuby for use in the atomic + * library. We do a native version to avoid the implicit value coercion that + * normally happens through JI. + * + * @author headius + */ +public class AtomicReferenceLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicReference", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + try { + sun.misc.Unsafe.class.getMethod("getAndSetObject", Object.class); + atomicCls.setAllocator(JRUBYREFERENCE8_ALLOCATOR); + } catch (Exception e) { + // leave it as Java 6/7 version + } + atomicCls.defineAnnotatedMethods(JRubyReference.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBYREFERENCE8_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference8(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyReference", parent="Object") + public static class JRubyReference extends RubyObject { + volatile IRubyObject reference; + + static final sun.misc.Unsafe UNSAFE; + static final long referenceOffset; + + static { + try { + UNSAFE = UnsafeHolder.U; + Class k = JRubyReference.class; + referenceOffset = UNSAFE.objectFieldOffset(k.getDeclaredField("reference")); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public JRubyReference(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + UNSAFE.putObject(this, referenceOffset, context.nil); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + UNSAFE.putObject(this, referenceOffset, value); + return context.nil; + } + + @JRubyMethod(name = {"get", "value"}) + public IRubyObject get() { + return reference; + } + + @JRubyMethod(name = {"set", "value="}) + public IRubyObject set(IRubyObject newValue) { + UNSAFE.putObjectVolatile(this, referenceOffset, newValue); + return newValue; + } + + @JRubyMethod(name = {"compare_and_set", "compare_and_swap"}) + public IRubyObject compare_and_set(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + if (expectedValue instanceof RubyNumeric) { + // numerics are not always idempotent in Ruby, so we need to do slower logic + return compareAndSetNumeric(context, expectedValue, newValue); + } + + return runtime.newBoolean(UNSAFE.compareAndSwapObject(this, referenceOffset, expectedValue, newValue)); + } + + @JRubyMethod(name = {"get_and_set", "swap"}) + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // less-efficient version for Java 6 and 7 + while (true) { + IRubyObject oldValue = get(); + if (UNSAFE.compareAndSwapObject(this, referenceOffset, oldValue, newValue)) { + return oldValue; + } + } + } + + private IRubyObject compareAndSetNumeric(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + // loop until: + // * reference CAS would succeed for same-valued objects + // * current and expected have different values as determined by #equals + while (true) { + IRubyObject current = reference; + + if (!(current instanceof RubyNumeric)) { + // old value is not numeric, CAS fails + return runtime.getFalse(); + } + + RubyNumeric currentNumber = (RubyNumeric)current; + if (!currentNumber.equals(expectedValue)) { + // current number does not equal expected, fail CAS + return runtime.getFalse(); + } + + // check that current has not changed, or else allow loop to repeat + boolean success = UNSAFE.compareAndSwapObject(this, referenceOffset, current, newValue); + if (success) { + // value is same and did not change in interim...success + return runtime.getTrue(); + } + } + } + } + + private static final class UnsafeHolder { + private UnsafeHolder(){} + + public static final sun.misc.Unsafe U = loadUnsafe(); + + private static sun.misc.Unsafe loadUnsafe() { + try { + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + Field f = unsafeClass.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + } catch (Exception e) { + return null; + } + } + } + + public static class JRubyReference8 extends JRubyReference { + public JRubyReference8(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @Override + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // efficient version for Java 8 + return (IRubyObject)UNSAFE.getAndSetObject(this, referenceOffset, newValue); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java new file mode 100644 index 0000000..a09f916 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java @@ -0,0 +1,248 @@ +package com.concurrent_ruby.ext; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8; +import com.concurrent_ruby.ext.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyMapBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyModule thread_safeMod = concurrentMod.defineModuleUnder("Collection"); + RubyClass jrubyRefClass = thread_safeMod.defineClassUnder("JRubyMapBackend", runtime.getObject(), BACKEND_ALLOCATOR); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyMapBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyMapBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyMapBackend", parent="Object") + public static class JRubyMapBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap map; + + private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8(initialCapacity, loadFactor); + } else { + return new com.concurrent_ruby.ext.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyMapBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyMapBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java new file mode 100644 index 0000000..b566076 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java @@ -0,0 +1,93 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNil; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class JavaAtomicBooleanLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicBoolean", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + atomicCls.defineAnnotatedMethods(JavaAtomicBoolean.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicBoolean(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicBoolean", parent = "Object") + public static class JavaAtomicBoolean extends RubyObject { + + private AtomicBoolean atomicBoolean; + + public JavaAtomicBoolean(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + atomicBoolean = new AtomicBoolean(convertRubyBooleanToJavaBoolean(value)); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + atomicBoolean = new AtomicBoolean(); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject value() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "true?") + public IRubyObject isAtomicTrue() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "false?") + public IRubyObject isAtomicFalse() { + return getRuntime().newBoolean((atomicBoolean.get() == false)); + } + + @JRubyMethod(name = "value=") + public IRubyObject setAtomic(ThreadContext context, IRubyObject newValue) { + atomicBoolean.set(convertRubyBooleanToJavaBoolean(newValue)); + return context.nil; + } + + @JRubyMethod(name = "make_true") + public IRubyObject makeTrue() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(false, true)); + } + + @JRubyMethod(name = "make_false") + public IRubyObject makeFalse() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(true, false)); + } + + private boolean convertRubyBooleanToJavaBoolean(IRubyObject newValue) { + if (newValue instanceof RubyBoolean.False || newValue instanceof RubyNil) { + return false; + } else { + return true; + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java new file mode 100755 index 0000000..672bfc0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java @@ -0,0 +1,113 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import org.jruby.runtime.Block; + +public class JavaAtomicFixnumLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicFixnum", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaAtomicFixnum.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicFixnum(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicFixnum", parent = "Object") + public static class JavaAtomicFixnum extends RubyObject { + + private AtomicLong atomicLong; + + public JavaAtomicFixnum(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + this.atomicLong = new AtomicLong(0); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.atomicLong = new AtomicLong(rubyFixnumToLong(value)); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject getValue() { + return getRuntime().newFixnum(atomicLong.get()); + } + + @JRubyMethod(name = "value=") + public IRubyObject setValue(ThreadContext context, IRubyObject newValue) { + atomicLong.set(rubyFixnumToLong(newValue)); + return context.nil; + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment() { + return getRuntime().newFixnum(atomicLong.incrementAndGet()); + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(delta)); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement() { + return getRuntime().newFixnum(atomicLong.decrementAndGet()); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(-delta)); + } + + @JRubyMethod(name = "compare_and_set") + public IRubyObject compareAndSet(ThreadContext context, IRubyObject expect, IRubyObject update) { + return getRuntime().newBoolean(atomicLong.compareAndSet(rubyFixnumToLong(expect), rubyFixnumToLong(update))); + } + + @JRubyMethod + public IRubyObject update(ThreadContext context, Block block) { + for (;;) { + long _oldValue = atomicLong.get(); + IRubyObject oldValue = getRuntime().newFixnum(_oldValue); + IRubyObject newValue = block.yield(context, oldValue); + if (atomicLong.compareAndSet(_oldValue, rubyFixnumToLong(newValue))) { + return newValue; + } + } + } + + private long rubyFixnumToLong(IRubyObject value) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError("value must be a Fixnum"); + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java new file mode 100755 index 0000000..a3e847d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java @@ -0,0 +1,159 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.Semaphore; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +public class JavaSemaphoreLibrary { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaSemaphore", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaSemaphore.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaSemaphore(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaSemaphore", parent = "Object") + public static class JavaSemaphore extends RubyObject { + + private JRubySemaphore semaphore; + + public JavaSemaphore(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.semaphore = new JRubySemaphore(rubyFixnumInt(value, "count")); + return context.nil; + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context, IRubyObject value) throws InterruptedException { + this.semaphore.acquire(rubyFixnumToPositiveInt(value, "permits")); + return context.nil; + } + + @JRubyMethod(name = "available_permits") + public IRubyObject availablePermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.availablePermits()); + } + + @JRubyMethod(name = "drain_permits") + public IRubyObject drainPermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.drainPermits()); + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context) throws InterruptedException { + this.semaphore.acquire(1); + return context.nil; + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(1)); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits) throws InterruptedException { + return getRuntime().newBoolean(semaphore.tryAcquire(rubyFixnumToPositiveInt(permits, "permits"))); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits, IRubyObject timeout) throws InterruptedException { + return getRuntime().newBoolean( + semaphore.tryAcquire( + rubyFixnumToPositiveInt(permits, "permits"), + rubyNumericToLong(timeout, "timeout"), + java.util.concurrent.TimeUnit.SECONDS) + ); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context) { + this.semaphore.release(1); + return getRuntime().newBoolean(true); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context, IRubyObject value) { + this.semaphore.release(rubyFixnumToPositiveInt(value, "permits")); + return getRuntime().newBoolean(true); + } + + @JRubyMethod(name = "reduce_permits") + public IRubyObject reducePermits(ThreadContext context, IRubyObject reduction) throws InterruptedException { + this.semaphore.publicReducePermits(rubyFixnumToNonNegativeInt(reduction, "reduction")); + return context.nil; + } + + private int rubyFixnumInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be integer"); + } + } + + private int rubyFixnumToNonNegativeInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() >= 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a non-negative integer"); + } + } + + private int rubyFixnumToPositiveInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() > 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be an integer greater than zero"); + } + } + + private long rubyNumericToLong(IRubyObject value, String paramName) { + if (value instanceof RubyNumeric && ((RubyNumeric) value).getDoubleValue() > 0) { + RubyNumeric fixNum = (RubyNumeric) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a float greater than zero"); + } + } + + class JRubySemaphore extends Semaphore { + + public JRubySemaphore(int permits) { + super(permits); + } + + public JRubySemaphore(int permits, boolean value) { + super(permits, value); + } + + public void publicReducePermits(int i) { + reducePermits(i); + } + + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java new file mode 100644 index 0000000..bfcc0d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java @@ -0,0 +1,307 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBasicObject; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.RubyThread; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import sun.misc.Unsafe; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +public class SynchronizationLibrary implements Library { + + private static final Unsafe UNSAFE = loadUnsafe(); + private static final boolean FULL_FENCE = supportsFences(); + + private static Unsafe loadUnsafe() { + try { + Class ncdfe = Class.forName("sun.misc.Unsafe"); + Field f = ncdfe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get((java.lang.Object) null); + } catch (Exception var2) { + return null; + } catch (NoClassDefFoundError var3) { + return null; + } + } + + private static boolean supportsFences() { + if (UNSAFE == null) { + return false; + } else { + try { + Method m = UNSAFE.getClass().getDeclaredMethod("fullFence", new Class[0]); + if (m != null) { + return true; + } + } catch (Exception var1) { + // nothing + } + + return false; + } + } + + private static final ObjectAllocator JRUBY_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyObject(runtime, klazz); + } + }; + + private static final ObjectAllocator OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Object(runtime, klazz); + } + }; + + private static final ObjectAllocator ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new AbstractLockableObject(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBY_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyLockableObject(runtime, klazz); + } + }; + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule synchronizationModule = runtime. + defineModule("Concurrent"). + defineModuleUnder("Synchronization"); + + RubyModule jrubyAttrVolatileModule = synchronizationModule.defineModuleUnder("JRubyAttrVolatile"); + jrubyAttrVolatileModule.defineAnnotatedMethods(JRubyAttrVolatile.class); + + defineClass(runtime, synchronizationModule, "AbstractObject", "JRubyObject", + JRubyObject.class, JRUBY_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "JRubyObject", "Object", + Object.class, OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "AbstractLockableObject", + AbstractLockableObject.class, ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "AbstractLockableObject", "JRubyLockableObject", + JRubyLockableObject.class, JRUBY_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "JRuby", + JRuby.class, new ObjectAllocator() { + @Override + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRuby(runtime, klazz); + } + }); + } + + private RubyClass defineClass( + Ruby runtime, + RubyModule namespace, + String parentName, + String name, + Class javaImplementation, + ObjectAllocator allocator) { + final RubyClass parentClass = namespace.getClass(parentName); + + if (parentClass == null) { + System.out.println("not found " + parentName); + throw runtime.newRuntimeError(namespace.toString() + "::" + parentName + " is missing"); + } + + final RubyClass newClass = namespace.defineClassUnder(name, parentClass, allocator); + newClass.defineAnnotatedMethods(javaImplementation); + return newClass; + } + + // Facts: + // - all ivar reads are without any synchronisation of fences see + // https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/VariableAccessor.java#L110-110 + // - writes depend on UnsafeHolder.U, null -> SynchronizedVariableAccessor, !null -> StampedVariableAccessor + // SynchronizedVariableAccessor wraps with synchronized block, StampedVariableAccessor uses fullFence or + // volatilePut + // TODO (pitr 16-Sep-2015): what do we do in Java 9 ? + + // module JRubyAttrVolatile + public static class JRubyAttrVolatile { + + // volatile threadContext is used as a memory barrier per the JVM memory model happens-before semantic + // on volatile fields. any volatile field could have been used but using the thread context is an + // attempt to avoid code elimination. + private static volatile int volatileField; + + @JRubyMethod(name = "full_memory_barrier", visibility = Visibility.PUBLIC) + public static IRubyObject fullMemoryBarrier(ThreadContext context, IRubyObject self) { + // Prevent reordering of ivar writes with publication of this instance + if (!FULL_FENCE) { + // Assuming that following volatile read and write is not eliminated it simulates fullFence. + // If it's eliminated it'll cause problems only on non-x86 platforms. + // http://shipilev.net/blog/2014/jmm-pragmatics/#_happens_before_test_your_understanding + final int volatileRead = volatileField; + volatileField = context.getLine(); + } else { + UNSAFE.fullFence(); + } + return context.nil; + } + + @JRubyMethod(name = "instance_variable_get_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject instanceVariableGetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name) { + // Ensure we ses latest value with loadFence + if (!FULL_FENCE) { + // piggybacking on volatile read, simulating loadFence + final int volatileRead = volatileField; + return ((RubyBasicObject) self).instance_variable_get(context, name); + } else { + UNSAFE.loadFence(); + return ((RubyBasicObject) self).instance_variable_get(context, name); + } + } + + @JRubyMethod(name = "instance_variable_set_volatile", visibility = Visibility.PUBLIC) + public static IRubyObject InstanceVariableSetVolatile( + ThreadContext context, + IRubyObject self, + IRubyObject name, + IRubyObject value) { + // Ensure we make last update visible + if (!FULL_FENCE) { + // piggybacking on volatile write, simulating storeFence + final IRubyObject result = ((RubyBasicObject) self).instance_variable_set(name, value); + volatileField = context.getLine(); + return result; + } else { + // JRuby uses StampedVariableAccessor which calls fullFence + // so no additional steps needed. + // See https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/StampedVariableAccessor.java#L151-L159 + return ((RubyBasicObject) self).instance_variable_set(name, value); + } + } + } + + @JRubyClass(name = "JRubyObject", parent = "AbstractObject") + public static class JRubyObject extends RubyObject { + + public JRubyObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "Object", parent = "JRubyObject") + public static class Object extends JRubyObject { + + public Object(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "AbstractLockableObject", parent = "Object") + public static class AbstractLockableObject extends Object { + + public AbstractLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "JRubyLockableObject", parent = "AbstractLockableObject") + public static class JRubyLockableObject extends JRubyObject { + + public JRubyLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "synchronize", visibility = Visibility.PROTECTED) + public IRubyObject rubySynchronize(ThreadContext context, Block block) { + synchronized (this) { + return block.yield(context, null); + } + } + + @JRubyMethod(name = "ns_wait", optional = 1, visibility = Visibility.PROTECTED) + public IRubyObject nsWait(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.runtime; + if (args.length > 1) { + throw runtime.newArgumentError(args.length, 1); + } + Double timeout = null; + if (args.length > 0 && !args[0].isNil()) { + timeout = args[0].convertToFloat().getDoubleValue(); + if (timeout < 0) { + throw runtime.newArgumentError("time interval must be positive"); + } + } + if (Thread.interrupted()) { + throw runtime.newConcurrencyError("thread interrupted"); + } + boolean success = false; + try { + success = context.getThread().wait_timeout(this, timeout); + } catch (InterruptedException ie) { + throw runtime.newConcurrencyError(ie.getLocalizedMessage()); + } finally { + // An interrupt or timeout may have caused us to miss + // a notify that we consumed, so do another notify in + // case someone else is available to pick it up. + if (!success) { + this.notify(); + } + } + return this; + } + + @JRubyMethod(name = "ns_signal", visibility = Visibility.PROTECTED) + public IRubyObject nsSignal(ThreadContext context) { + notify(); + return this; + } + + @JRubyMethod(name = "ns_broadcast", visibility = Visibility.PROTECTED) + public IRubyObject nsBroadcast(ThreadContext context) { + notifyAll(); + return this; + } + } + + @JRubyClass(name = "JRuby") + public static class JRuby extends RubyObject { + public JRuby(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "sleep_interruptibly", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject sleepInterruptibly(final ThreadContext context, IRubyObject receiver, final Block block) { + try { + context.getThread().executeBlockingTask(new RubyThread.BlockingTask() { + @Override + public void run() throws InterruptedException { + block.call(context); + } + + @Override + public void wakeup() { + context.getThread().getNativeThread().interrupt(); + } + }); + } catch (InterruptedException e) { + throw context.runtime.newThreadError("interrupted in Concurrent::Synchronization::JRuby.sleep_interruptibly"); + } + return context.nil; + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..e11e15a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package com.concurrent_ruby.ext.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap { + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun mf); + public V computeIfPresent(K key, BiFun mf); + public V compute(K key, BiFun mf); + public V merge(K key, V value, BiFun mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..86aa4eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + * + *
    + *
+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java new file mode 100644 index 0000000..47a923c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java new file mode 100644 index 0000000..93a277f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..b7fc5a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray initTable() { + AtomicReferenceArray tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { + int n = tab.length(); + AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; AtomicReferenceArray t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray tab = new AtomicReferenceArray(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..ecf552a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..f521642 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..3ea409f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package com.concurrent_ruby.ext.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + *

Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent-ruby.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent-ruby.rb new file mode 100644 index 0000000..08917e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent-ruby.rb @@ -0,0 +1 @@ +require_relative "./concurrent" diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent.rb new file mode 100644 index 0000000..87de46f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent.rb @@ -0,0 +1,134 @@ +require 'concurrent/version' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' + +require 'concurrent/atomics' +require 'concurrent/executors' +require 'concurrent/synchronization' + +require 'concurrent/atomic/atomic_markable_reference' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/agent' +require 'concurrent/atom' +require 'concurrent/array' +require 'concurrent/hash' +require 'concurrent/set' +require 'concurrent/map' +require 'concurrent/tuple' +require 'concurrent/async' +require 'concurrent/dataflow' +require 'concurrent/delay' +require 'concurrent/exchanger' +require 'concurrent/future' +require 'concurrent/immutable_struct' +require 'concurrent/ivar' +require 'concurrent/maybe' +require 'concurrent/mutable_struct' +require 'concurrent/mvar' +require 'concurrent/promise' +require 'concurrent/scheduled_task' +require 'concurrent/settable_struct' +require 'concurrent/timer_task' +require 'concurrent/tvar' +require 'concurrent/promises' + +require 'concurrent/thread_safe/synchronized_delegator' +require 'concurrent/thread_safe/util' + +require 'concurrent/options' + +# @!macro internal_implementation_note +# +# @note **Private Implementation:** This abstraction is a private, internal +# implementation detail. It should never be used directly. + +# @!macro monotonic_clock_warning +# +# @note Time calculations on all platforms and languages are sensitive to +# changes to the system clock. To alleviate the potential problems +# associated with changing the system clock while an application is running, +# most modern operating systems provide a monotonic clock that operates +# independently of the system clock. A monotonic clock cannot be used to +# determine human-friendly clock times. A monotonic clock is used exclusively +# for calculating time intervals. Not all Ruby platforms provide access to an +# operating system monotonic clock. On these platforms a pure-Ruby monotonic +# clock will be used as a fallback. An operating system monotonic clock is both +# faster and more reliable than the pure-Ruby implementation. The pure-Ruby +# implementation should be fast and reliable enough for most non-realtime +# operations. At this time the common Ruby platforms that provide access to an +# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). +# +# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) + +# @!macro copy_options +# +# ## Copy Options +# +# Object references in Ruby are mutable. This can lead to serious +# problems when the {#value} of an object is a mutable reference. Which +# is always the case unless the value is a `Fixnum`, `Symbol`, or similar +# "primitive" data type. Each instance can be configured with a few +# options that can help protect the program from potentially dangerous +# operations. Each of these options can be optionally set when the object +# instance is created: +# +# * `:dup_on_deref` When true the object will call the `#dup` method on +# the `value` object every time the `#value` method is called +# (default: false) +# * `:freeze_on_deref` When true the object will call the `#freeze` +# method on the `value` object every time the `#value` method is called +# (default: false) +# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run +# every time the `#value` method is called. The `Proc` will be given +# the current `value` as its only argument and the result returned by +# the block will be the return value of the `#value` call. When `nil` +# this option will be ignored (default: nil) +# +# When multiple deref options are set the order of operations is strictly defined. +# The order of deref operations is: +# * `:copy_on_deref` +# * `:dup_on_deref` +# * `:freeze_on_deref` +# +# Because of this ordering there is no need to `#freeze` an object created by a +# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. +# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is +# as close to the behavior of a "pure" functional language (like Erlang, Clojure, +# or Haskell) as we are likely to get in Ruby. + +# @!macro deref_options +# +# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before +# returning the data from {#value} +# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before +# returning the data from {#value} +# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} +# method, call the given proc passing the internal value as the sole +# argument then return the new value returned from the proc. + +# @!macro executor_and_deref_options +# +# @param [Hash] opts the options used to define the behavior at update and deref +# and to specify the executor on which to perform actions +# @option opts [Executor] :executor when set use the given `Executor` instance. +# Three special values are also supported: `:io` returns the global pool for +# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast +# operations, and `:immediate` returns the global `ImmediateExecutor` object. +# @!macro deref_options + +# @!macro warn.edge +# @api Edge +# @note **Edge Features** are under active development and may change frequently. +# +# - Deprecations are not added before incompatible changes. +# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, +# _patch_ bump means compatible change. +# - Edge features may also lack tests and documentation. +# - Features developed in `concurrent-ruby-edge` are expected to move +# to `concurrent-ruby` when finalised. + + +# {include:file:README.md} +module Concurrent +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/agent.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/agent.rb new file mode 100644 index 0000000..815dca0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/agent.rb @@ -0,0 +1,587 @@ +require 'concurrent/configuration' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/thread_local_var' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) + # function. An agent is a shared, mutable variable providing independent, + # uncoordinated, *asynchronous* change of individual values. Best used when + # the value will undergo frequent, complex updates. Suitable when the result + # of an update does not need to be known immediately. `Agent` is (mostly) + # functionally equivalent to Clojure's agent, except where the runtime + # prevents parity. + # + # Agents are reactive, not autonomous - there is no imperative message loop + # and no blocking receive. The state of an Agent should be itself immutable + # and the `#value` of an Agent is always immediately available for reading by + # any thread without any messages, i.e. observation does not require + # cooperation or coordination. + # + # Agent action dispatches are made using the various `#send` methods. These + # methods always return immediately. At some point later, in another thread, + # the following will happen: + # + # 1. The given `action` will be applied to the state of the Agent and the + # `args`, if any were supplied. + # 2. The return value of `action` will be passed to the validator lambda, + # if one has been set on the Agent. + # 3. If the validator succeeds or if no validator was given, the return value + # of the given `action` will become the new `#value` of the Agent. See + # `#initialize` for details. + # 4. If any observers were added to the Agent, they will be notified. See + # `#add_observer` for details. + # 5. If during the `action` execution any other dispatches are made (directly + # or indirectly), they will be held until after the `#value` of the Agent + # has been changed. + # + # If any exceptions are thrown by an action function, no nested dispatches + # will occur, and the exception will be cached in the Agent itself. When an + # Agent has errors cached, any subsequent interactions will immediately throw + # an exception, until the agent's errors are cleared. Agent errors can be + # examined with `#error` and the agent restarted with `#restart`. + # + # The actions of all Agents get interleaved amongst threads in a thread pool. + # At any point in time, at most one action for each Agent is being executed. + # Actions dispatched to an agent from another single agent or thread will + # occur in the order they were sent, potentially interleaved with actions + # dispatched to the same agent from other sources. The `#send` method should + # be used for actions that are CPU limited, while the `#send_off` method is + # appropriate for actions that may block on IO. + # + # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an agent with an initial value + # agent = Concurrent::Agent.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # agent.send{|set| next_fibonacci(set) } + # end + # + # # wait for them to complete + # agent.await + # + # # get the current value + # agent.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Agents support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time an action dispatch returns and + # the new value is successfully validated. Observation will *not* occur if the + # action raises an exception, if validation fails, or when a {#restart} occurs. + # + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Agent when the action began + # processing. The `new_value` is the value to which the Agent was set when the + # action completed. Note that `old_value` and `new_value` may be the same. + # This is not an error. It simply means that the action returned the same + # value. + # + # ## Nested Actions + # + # It is possible for an Agent action to post further actions back to itself. + # The nested actions will be enqueued normally then processed *after* the + # outer action completes, in the order they were sent, possibly interleaved + # with action dispatches from other threads. Nested actions never deadlock + # with one another and a failure in a nested action will never affect the + # outer action. + # + # Nested actions can be called using the Agent reference from the enclosing + # scope or by passing the reference in as a "send" argument. Nested actions + # cannot be post using `self` from within the action block/proc/lambda; `self` + # in this context will not reference the Agent. The preferred method for + # dispatching nested actions is to pass the Agent as an argument. This allows + # Ruby to more effectively manage the closing scope. + # + # Prefer this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send(agent) do |value, this| + # this.send {|v| v + 42 } + # 3.14 + # end + # agent.value #=> 45.14 + # ``` + # + # Over this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send do |value| + # agent.send {|v| v + 42 } + # 3.14 + # end + # ``` + # + # @!macro agent_await_warning + # + # **NOTE** Never, *under any circumstances*, call any of the "await" methods + # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action + # block/proc/lambda. The call will block the Agent and will always fail. + # Calling either {#await} or {#wait} (with a timeout of `nil`) will + # hopelessly deadlock the Agent with no possibility of recovery. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/Agents Clojure Agents + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Agent < Synchronization::LockableObject + include Concern::Observable + + ERROR_MODES = [:continue, :fail].freeze + private_constant :ERROR_MODES + + AWAIT_FLAG = ::Object.new + private_constant :AWAIT_FLAG + + AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } + private_constant :AWAIT_ACTION + + DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } + private_constant :DEFAULT_ERROR_HANDLER + + DEFAULT_VALIDATOR = ->(value) { true } + private_constant :DEFAULT_VALIDATOR + + Job = Struct.new(:action, :args, :executor, :caller) + private_constant :Job + + # Raised during action processing or any other time in an Agent's lifecycle. + class Error < StandardError + def initialize(message = nil) + message ||= 'agent must be restarted before jobs can post' + super(message) + end + end + + # Raised when a new value obtained during action processing or at `#restart` + # fails validation. + class ValidationError < Error + def initialize(message = nil) + message ||= 'invalid value' + super(message) + end + end + + # The error mode this Agent is operating in. See {#initialize} for details. + attr_reader :error_mode + + # Create a new `Agent` with the given initial value and options. + # + # The `:validator` option must be `nil` or a side-effect free proc/lambda + # which takes one argument. On any intended value change the validator, if + # provided, will be called. If the new value is invalid the validator should + # return `false` or raise an error. + # + # The `:error_handler` option must be `nil` or a proc/lambda which takes two + # arguments. When an action raises an error or validation fails, either by + # returning false or raising an error, the error handler will be called. The + # arguments to the error handler will be a reference to the agent itself and + # the error object which was raised. + # + # The `:error_mode` may be either `:continue` (the default if an error + # handler is given) or `:fail` (the default if error handler nil or not + # given). + # + # If an action being run by the agent throws an error or doesn't pass + # validation the error handler, if present, will be called. After the + # handler executes if the error mode is `:continue` the Agent will continue + # as if neither the action that caused the error nor the error itself ever + # happened. + # + # If the mode is `:fail` the Agent will become {#failed?} and will stop + # accepting new action dispatches. Any previously queued actions will be + # held until {#restart} is called. The {#value} method will still work, + # returning the value of the Agent before the error. + # + # @param [Object] initial the initial value + # @param [Hash] opts the configuration options + # + # @option opts [Symbol] :error_mode either `:continue` or `:fail` + # @option opts [nil, Proc] :error_handler the (optional) error handler + # @option opts [nil, Proc] :validator the (optional) validation procedure + def initialize(initial, opts = {}) + super() + synchronize { ns_initialize(initial, opts) } + end + + # The current value (state) of the Agent, irrespective of any pending or + # in-progress actions. The value is always available and is non-blocking. + # + # @return [Object] the current value + def value + @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? + end + + alias_method :deref, :value + + # When {#failed?} and {#error_mode} is `:fail`, returns the error object + # which caused the failure, else `nil`. When {#error_mode} is `:continue` + # will *always* return `nil`. + # + # @return [nil, Error] the error which caused the failure when {#failed?} + def error + @error.value + end + + alias_method :reason, :error + + # @!macro agent_send + # + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Action dispatches are only allowed when the Agent + # is not {#failed?}. + # + # The action must be a block/proc/lambda which takes 1 or more arguments. + # The first argument is the current {#value} of the Agent. Any arguments + # passed to the send method via the `args` parameter will be passed to the + # action as the remaining arguments. The action must return the new value + # of the Agent. + # + # * {#send} and {#send!} should be used for actions that are CPU limited + # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that + # may block on IO + # * {#send_via} and {#send_via!} are used when a specific executor is to + # be used for the action + # + # @param [Array] args zero or more arguments to be passed to + # the action + # @param [Proc] action the action dispatch to be enqueued + # + # @yield [agent, value, *args] process the old value and return the new + # @yieldparam [Object] value the current {#value} of the Agent + # @yieldparam [Array] args zero or more arguments to pass to the + # action + # @yieldreturn [Object] the new value of the Agent + # + # @!macro send_return + # @return [Boolean] true if the action is successfully enqueued, false if + # the Agent is {#failed?} + def send(*args, &action) + enqueue_action_job(action, args, Concurrent.global_fast_executor) + end + + # @!macro agent_send + # + # @!macro send_bang_return_and_raise + # @return [Boolean] true if the action is successfully enqueued + # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} + def send!(*args, &action) + raise Error.new unless send(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + def send_off(*args, &action) + enqueue_action_job(action, args, Concurrent.global_io_executor) + end + + alias_method :post, :send_off + + # @!macro agent_send + # @!macro send_bang_return_and_raise + def send_off!(*args, &action) + raise Error.new unless send_off(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via(executor, *args, &action) + enqueue_action_job(action, args, executor) + end + + # @!macro agent_send + # @!macro send_bang_return_and_raise + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via!(executor, *args, &action) + raise Error.new unless send_via(executor, *args, &action) + true + end + + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Appropriate for actions that may block on IO. + # + # @param [Proc] action the action dispatch to be enqueued + # @return [Concurrent::Agent] self + # @see #send_off + def <<(action) + send_off(&action) + self + end + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far, from this thread or nested by the Agent, have occurred. Will + # block when {#failed?}. Will never return if a failed Agent is {#restart} + # with `:clear_actions` true. + # + # Returns a reference to `self` to support method chaining: + # + # ``` + # current_value = agent.await.value + # ``` + # + # @return [Boolean] self + # + # @!macro agent_await_warning + def await + wait(nil) + self + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout) + wait(timeout.to_f) + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # + # @!macro agent_await_warning + def await_for!(timeout) + raise Concurrent::TimeoutError unless wait(timeout.to_f) + true + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. Will block indefinitely when timeout is nil or not given. + # + # Provided mainly for consistency with other classes in this library. Prefer + # the various `await` methods instead. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def wait(timeout = nil) + latch = Concurrent::CountDownLatch.new(1) + enqueue_await_job(latch) + latch.wait(timeout) + end + + # Is the Agent in a failed state? + # + # @see #restart + def failed? + !@error.value.nil? + end + + alias_method :stopped?, :failed? + + # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` + # then un-fails the Agent so that action dispatches are allowed again. If + # the `:clear_actions` option is give and true, any actions queued on the + # Agent that were being held while it was failed will be discarded, + # otherwise those held actions will proceed. The `new_value` must pass the + # validator if any, or `restart` will raise an exception and the Agent will + # remain failed with its old {#value} and {#error}. Observers, if any, will + # not be notified of the new state. + # + # @param [Object] new_value the new value for the Agent once restarted + # @param [Hash] opts the configuration options + # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed + # actions should be discarded on restart, else false (default: false) + # @return [Boolean] true + # + # @raise [Concurrent:AgentError] when not failed + def restart(new_value, opts = {}) + clear_actions = opts.fetch(:clear_actions, false) + synchronize do + raise Error.new('agent is not failed') unless failed? + raise ValidationError unless ns_validate(new_value) + @current.value = new_value + @error.value = nil + @queue.clear if clear_actions + ns_post_next_job unless @queue.empty? + end + true + end + + class << self + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far to all the given Agents, from this thread or nested by the + # given Agents, have occurred. Will block when any of the agents are + # failed. Will never return if a failed Agent is restart with + # `:clear_actions` true. + # + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true + # + # @!macro agent_await_warning + def await(*agents) + agents.each { |agent| agent.await } + true + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout, *agents) + end_at = Concurrent.monotonic_time + timeout.to_f + ok = agents.length.times do |i| + break false if (delay = end_at - Concurrent.monotonic_time) < 0 + break false unless agents[i].await_for(delay) + end + !!ok + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # @!macro agent_await_warning + def await_for!(timeout, *agents) + raise Concurrent::TimeoutError unless await_for(timeout, *agents) + true + end + end + + private + + def ns_initialize(initial, opts) + @error_mode = opts[:error_mode] + @error_handler = opts[:error_handler] + + if @error_mode && !ERROR_MODES.include?(@error_mode) + raise ArgumentError.new('unrecognized error mode') + elsif @error_mode.nil? + @error_mode = @error_handler ? :continue : :fail + end + + @error_handler ||= DEFAULT_ERROR_HANDLER + @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) + @current = Concurrent::AtomicReference.new(initial) + @error = Concurrent::AtomicReference.new(nil) + @caller = Concurrent::ThreadLocalVar.new(nil) + @queue = [] + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + def enqueue_action_job(action, args, executor) + raise ArgumentError.new('no action given') unless action + job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) + synchronize { ns_enqueue_job(job) } + end + + def enqueue_await_job(latch) + synchronize do + if (index = ns_find_last_job_for_thread) + job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, + Thread.current.object_id) + ns_enqueue_job(job, index+1) + else + latch.count_down + true + end + end + end + + def ns_enqueue_job(job, index = nil) + # a non-nil index means this is an await job + return false if index.nil? && failed? + index ||= @queue.length + @queue.insert(index, job) + # if this is the only job, post to executor + ns_post_next_job if @queue.length == 1 + true + end + + def ns_post_next_job + @queue.first.executor.post { execute_next_job } + end + + def execute_next_job + job = synchronize { @queue.first } + old_value = @current.value + + @caller.value = job.caller # for nested actions + new_value = job.action.call(old_value, *job.args) + @caller.value = nil + + return if new_value == AWAIT_FLAG + + if ns_validate(new_value) + @current.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + else + handle_error(ValidationError.new) + end + rescue => error + handle_error(error) + ensure + synchronize do + @queue.shift + unless failed? || @queue.empty? + ns_post_next_job + end + end + end + + def ns_validate(value) + @validator.call(value) + rescue + false + end + + def handle_error(error) + # stop new jobs from posting + @error.value = error if @error_mode == :fail + @error_handler.call(self, error) + rescue + # do nothing + end + + def ns_find_last_job_for_thread + @queue.rindex { |job| job.caller == Thread.current.object_id } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/array.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/array.rb new file mode 100644 index 0000000..60e5b56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/array.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_array + # + # A thread-safe subclass of Array. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` + # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#concat` instead. + # + # @see http://ruby-doc.org/core/Array.html Ruby standard library `Array` + + # @!macro internal_implementation_note + ArrayImplementation = case + when Concurrent.on_cruby? + # Array is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Array + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyArray < ::Array + include JRuby::Synchronized + end + JRubyArray + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_rbx RbxArray + RbxArray + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray + TruffleRubyArray + + else + warn 'Possibly unsupported Ruby implementation' + ::Array + end + private_constant :ArrayImplementation + + # @!macro concurrent_array + class Array < ArrayImplementation + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/async.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/async.rb new file mode 100644 index 0000000..5e125e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/async.rb @@ -0,0 +1,448 @@ +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A mixin module that provides simple asynchronous behavior to a class, + # turning it into a simple actor. Loosely based on Erlang's + # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without + # supervision or linking. + # + # A more feature-rich {Concurrent::Actor} is also available when the + # capabilities of `Async` are too limited. + # + # ```cucumber + # Feature: + # As a stateful, plain old Ruby class + # I want safe, asynchronous behavior + # So my long-running methods don't block the main thread + # ``` + # + # The `Async` module is a way to mix simple yet powerful asynchronous + # capabilities into any plain old Ruby object or class, turning each object + # into a simple Actor. Method calls are processed on a background thread. The + # caller is free to perform other actions while processing occurs in the + # background. + # + # Method calls to the asynchronous object are made via two proxy methods: + # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post + # the method call to the object's background thread and return a "future" + # which will eventually contain the result of the method call. + # + # This behavior is loosely patterned after Erlang's `gen_server` behavior. + # When an Erlang module implements the `gen_server` behavior it becomes + # inherently asynchronous. The `start` or `start_link` function spawns a + # process (similar to a thread but much more lightweight and efficient) and + # returns the ID of the process. Using the process ID, other processes can + # send messages to the `gen_server` via the `cast` and `call` methods. Unlike + # Erlang's `gen_server`, however, `Async` classes do not support linking or + # supervision trees. + # + # ## Basic Usage + # + # When this module is mixed into a class, objects of the class become inherently + # asynchronous. Each object gets its own background thread on which to post + # asynchronous method calls. Asynchronous method calls are executed in the + # background one at a time in the order they are received. + # + # To create an asynchronous class, simply mix in the `Concurrent::Async` module: + # + # ``` + # class Hello + # include Concurrent::Async + # + # def hello(name) + # "Hello, #{name}!" + # end + # end + # ``` + # + # Mixing this module into a class provides each object two proxy methods: + # `async` and `await`. These methods are thread safe with respect to the + # enclosing object. The former proxy allows methods to be called + # asynchronously by posting to the object's internal thread. The latter proxy + # allows a method to be called synchronously but does so safely with respect + # to any pending asynchronous method calls and ensures proper ordering. Both + # methods return a {Concurrent::IVar} which can be inspected for the result + # of the proxied method call. Calling a method with `async` will return a + # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. + # + # ``` + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # ``` + # + # ## Let It Fail + # + # The `async` and `await` proxy methods have built-in error protection based + # on Erlang's famous "let it fail" philosophy. Instance methods should not be + # programmed defensively. When an exception is raised by a delegated method + # the proxy will rescue the exception, expose it to the caller as the `reason` + # attribute of the returned future, then process the next method call. + # + # ## Calling Methods Internally + # + # External method calls should *always* use the `async` and `await` proxy + # methods. When one method calls another method, the `async` proxy should + # rarely be used and the `await` proxy should *never* be used. + # + # When an object calls one of its own methods using the `await` proxy the + # second call will be enqueued *behind* the currently running method call. + # Any attempt to wait on the result will fail as the second call will never + # run until after the current call completes. + # + # Calling a method using the `await` proxy from within a method that was + # itself called using `async` or `await` will irreversibly deadlock the + # object. Do *not* do this, ever. + # + # ## Instance Variables and Attribute Accessors + # + # Instance variables do not need to be thread-safe so long as they are private. + # Asynchronous method calls are processed in the order they are received and + # are processed one at a time. Therefore private instance variables can only + # be accessed by one thread at a time. This is inherently thread-safe. + # + # When using private instance variables within asynchronous methods, the best + # practice is to read the instance variable into a local variable at the start + # of the method then update the instance variable at the *end* of the method. + # This way, should an exception be raised during method execution the internal + # state of the object will not have been changed. + # + # ### Reader Attributes + # + # The use of `attr_reader` is discouraged. Internal state exposed externally, + # when necessary, should be done through accessor methods. The instance + # variables exposed by these methods *must* be thread-safe, or they must be + # called using the `async` and `await` proxy methods. These two approaches are + # subtly different. + # + # When internal state is accessed via the `async` and `await` proxy methods, + # the returned value represents the object's state *at the time the call is + # processed*, which may *not* be the state of the object at the time the call + # is made. + # + # To get the state *at the current* time, irrespective of an enqueued method + # calls, a reader method must be called directly. This is inherently unsafe + # unless the instance variable is itself thread-safe, preferably using one + # of the thread-safe classes within this library. Because the thread-safe + # classes within this library are internally-locking or non-locking, they can + # be safely used from within asynchronous methods without causing deadlocks. + # + # Generally speaking, the best practice is to *not* expose internal state via + # reader methods. The best practice is to simply use the method's return value. + # + # ### Writer Attributes + # + # Writer attributes should never be used with asynchronous classes. Changing + # the state externally, even when done in the thread-safe way, is not logically + # consistent. Changes to state need to be timed with respect to all asynchronous + # method calls which my be in-process or enqueued. The only safe practice is to + # pass all necessary data to each method as arguments and let the method update + # the internal state as necessary. + # + # ## Class Constants, Variables, and Methods + # + # ### Class Constants + # + # Class constants do not need to be thread-safe. Since they are read-only and + # immutable they may be safely read both externally and from within + # asynchronous methods. + # + # ### Class Variables + # + # Class variables should be avoided. Class variables represent shared state. + # Shared state is anathema to concurrency. Should there be a need to share + # state using class variables they *must* be thread-safe, preferably + # using the thread-safe classes within this library. When updating class + # variables, never assign a new value/object to the variable itself. Assignment + # is not thread-safe in Ruby. Instead, use the thread-safe update functions + # of the variable itself to change the value. + # + # The best practice is to *never* use class variables with `Async` classes. + # + # ### Class Methods + # + # Class methods which are pure functions are safe. Class methods which modify + # class variables should be avoided, for all the reasons listed above. + # + # ## An Important Note About Thread Safe Guarantees + # + # > Thread safe guarantees can only be made when asynchronous method calls + # > are not mixed with direct method calls. Use only direct method calls + # > when the object is used exclusively on a single thread. Use only + # > `async` and `await` when the object is shared between threads. Once you + # > call a method using `async` or `await`, you should no longer call methods + # > directly on the object. Use `async` and `await` exclusively from then on. + # + # @example + # + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # + # @see Concurrent::Actor + # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia + # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server + # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ + module Async + + # @!method self.new(*args, &block) + # + # Instanciate a new object and ensure proper initialization of the + # synchronization mechanisms. + # + # @param [Array] args Zero or more arguments to be passed to the + # object's initializer. + # @param [Proc] block Optional block to pass to the object's initializer. + # @return [Object] A properly initialized object of the asynchronous class. + + # Check for the presence of a method on an object and determine if a given + # set of arguments matches the required arity. + # + # @param [Object] obj the object to check against + # @param [Symbol] method the method to check the object for + # @param [Array] args zero or more arguments for the arity check + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + # + # @note This check is imperfect because of the way Ruby reports the arity of + # methods with a variable number of arguments. It is possible to determine + # if too few arguments are given but impossible to determine if too many + # arguments are given. This check may also fail to recognize dynamic behavior + # of the object, such as methods simulated with `method_missing`. + # + # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity + # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? + # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing + # + # @!visibility private + def self.validate_argc(obj, method, *args) + argc = args.length + arity = obj.method(method).arity + + if arity >= 0 && argc != arity + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") + elsif arity < 0 && (arity = (arity + 1).abs) > argc + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") + end + end + + # @!visibility private + def self.included(base) + base.singleton_class.send(:alias_method, :original_new, :new) + base.extend(ClassMethods) + super(base) + end + + # @!visibility private + module ClassMethods + def new(*args, &block) + obj = original_new(*args, &block) + obj.send(:init_synchronization) + obj + end + end + private_constant :ClassMethods + + # Delegates asynchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AsyncDelegator < Synchronization::LockableObject + safe_initialization! + + # Create a new delegator object wrapping the given delegate. + # + # @param [Object] delegate the object to wrap and delegate method calls to + def initialize(delegate) + super() + @delegate = delegate + @queue = [] + @executor = Concurrent.global_io_executor + @ruby_pid = $$ + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + super unless @delegate.respond_to?(method) + Async::validate_argc(@delegate, method, *args) + + ivar = Concurrent::IVar.new + synchronize do + reset_if_forked + @queue.push [ivar, method, args, block] + @executor.post { perform } if @queue.length == 1 + end + + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + + # Perform all enqueued tasks. + # + # This method must be called from within the executor. It must not be + # called while already running. It will loop until the queue is empty. + def perform + loop do + ivar, method, args, block = synchronize { @queue.first } + break unless ivar # queue is empty + + begin + ivar.set(@delegate.send(method, *args, &block)) + rescue => error + ivar.fail(error) + end + + synchronize do + @queue.shift + return if @queue.empty? + end + end + end + + def reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ruby_pid = $$ + end + end + end + private_constant :AsyncDelegator + + # Delegates synchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AwaitDelegator + + # Create a new delegator object wrapping the given delegate. + # + # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to + def initialize(delegate) + @delegate = delegate + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + ivar = @delegate.send(method, *args, &block) + ivar.wait + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + end + private_constant :AwaitDelegator + + # Causes the chained method call to be performed asynchronously on the + # object's thread. The delegated method will return a future in the + # `:pending` state and the method call will have been scheduled on the + # object's thread. The final disposition of the method call can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # @note The method call is guaranteed to be thread safe with respect to + # all other method calls against the same object that are called with + # either `async` or `await`. The mutable nature of Ruby references + # (and object orientation in general) prevent any other thread safety + # guarantees. Do NOT mix direct method calls with delegated method calls. + # Use *only* delegated method calls when sharing the object between threads. + # + # @return [Concurrent::IVar] the pending result of the asynchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of + # the requested method + def async + @__async_delegator__ + end + alias_method :cast, :async + + # Causes the chained method call to be performed synchronously on the + # current thread. The delegated will return a future in either the + # `:fulfilled` or `:rejected` state and the delegated method will have + # completed. The final disposition of the delegated method can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # + # @return [Concurrent::IVar] the completed result of the synchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of the + # requested method + def await + @__await_delegator__ + end + alias_method :call, :await + + # Initialize the internal serializer and other stnchronization mechanisms. + # + # @note This method *must* be called immediately upon object construction. + # This is the only way thread-safe initialization can be guaranteed. + # + # @!visibility private + def init_synchronization + return self if defined?(@__async_initialized__) && @__async_initialized__ + @__async_initialized__ = true + @__async_delegator__ = AsyncDelegator.new(self) + @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atom.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atom.rb new file mode 100644 index 0000000..8a45730 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atom.rb @@ -0,0 +1,222 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +# @!macro thread_safe_variable_comparison +# +# ## Thread-safe Variable Classes +# +# Each of the thread-safe variable classes is designed to solve a different +# problem. In general: +# +# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, +# uncoordinated, *asynchronous* change of individual values. Best used when +# the value will undergo frequent, complex updates. Suitable when the result +# of an update does not need to be known immediately. +# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, +# uncoordinated, *synchronous* change of individual values. Best used when +# the value will undergo frequent reads but only occasional, though complex, +# updates. Suitable when the result of an update must be known immediately. +# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated +# atomically. Updates are synchronous but fast. Best used when updates a +# simple set operations. Not suitable when updates are complex. +# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar +# but optimized for the given data type. +# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used +# when two or more threads need to exchange data. The threads will pair then +# block on each other until the exchange is complete. +# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread +# must give a value to another, which must take the value. The threads will +# block on each other until the exchange is complete. +# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which +# holds a different value for each thread which has access. Often used as +# an instance variable in objects which must maintain different state +# for different threads. +# * *{Concurrent::TVar}:* Shared, mutable variables which provide +# *coordinated*, *synchronous*, change of *many* stated. Used when multiple +# value must change together, in an all-or-nothing transaction. + + +module Concurrent + + # Atoms provide a way to manage shared, synchronous, independent state. + # + # An atom is initialized with an initial value and an optional validation + # proc. At any time the value of the atom can be synchronously and safely + # changed. If a validator is given at construction then any new value + # will be checked against the validator and will be rejected if the + # validator returns false or raises an exception. + # + # There are two ways to change the value of an atom: {#compare_and_set} and + # {#swap}. The former will set the new value if and only if it validates and + # the current value matches the new value. The latter will atomically set the + # new value to the result of running the given block if and only if that + # value validates. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an atom with an initial value + # atom = Concurrent::Atom.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # atom.swap{|set| next_fibonacci(set) } + # end + # + # # get the current value + # atom.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Atoms support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time the value of the Atom changes. + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Atom when the change began + # The `new_value` is the value to which the Atom was set when the change + # completed. Note that `old_value` and `new_value` may be the same. This is + # not an error. It simply means that the change operation returned the same + # value. + # + # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/atoms Clojure Atoms + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Atom < Synchronization::Object + include Concern::Observable + + safe_initialization! + attr_atomic(:value) + private :value=, :swap_value, :compare_and_set_value, :update_value + public :value + alias_method :deref, :value + + # @!method value + # The current value of the atom. + # + # @return [Object] The current value. + + # Create a new atom with the given initial value. + # + # @param [Object] value The initial value + # @param [Hash] opts The options used to configure the atom + # @option opts [Proc] :validator (nil) Optional proc used to validate new + # values. It must accept one and only one argument which will be the + # intended new value. The validator will return true if the new value + # is acceptable else return false (preferrably) or raise an exception. + # + # @!macro deref_options + # + # @raise [ArgumentError] if the validator is not a `Proc` (when given) + def initialize(value, opts = {}) + super() + @Validator = opts.fetch(:validator, -> v { true }) + self.observers = Collection::CopyOnNotifyObserverSet.new + self.value = value + end + + # Atomically swaps the value of atom using the given block. The current + # value will be passed to the block, as will any arguments passed as + # arguments to the function. The new value will be validated against the + # (optional) validator proc given at construction. If validation fails the + # value will not be changed. + # + # Internally, {#swap} reads the current value, applies the block to it, and + # attempts to compare-and-set it in. Since another thread may have changed + # the value in the intervening time, it may have to retry, and does so in a + # spin loop. The net effect is that the value will always be the result of + # the application of the supplied block to a current value, atomically. + # However, because the block might be called multiple times, it must be free + # of side effects. + # + # @note The given block may be called multiple times, and thus should be free + # of side effects. + # + # @param [Object] args Zero or more arguments passed to the block. + # + # @yield [value, args] Calculates a new value for the atom based on the + # current value and any supplied arguments. + # @yieldparam value [Object] The current value of the atom. + # @yieldparam args [Object] All arguments passed to the function, in order. + # @yieldreturn [Object] The intended new value of the atom. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + # + # @raise [ArgumentError] When no block is given. + def swap(*args) + raise ArgumentError.new('no block given') unless block_given? + + loop do + old_value = value + new_value = yield(old_value, *args) + begin + break old_value unless valid?(new_value) + break new_value if compare_and_set(old_value, new_value) + rescue + break old_value + end + end + end + + # Atomically sets the value of atom to the new value if and only if the + # current value of the atom is identical to the old value and the new + # value successfully validates against the (optional) validator given + # at construction. + # + # @param [Object] old_value The expected current value. + # @param [Object] new_value The intended new value. + # + # @return [Boolean] True if the value is changed else false. + def compare_and_set(old_value, new_value) + if valid?(new_value) && compare_and_set_value(old_value, new_value) + observers.notify_observers(Time.now, old_value, new_value) + true + else + false + end + end + + # Atomically sets the value of atom to the new value without regard for the + # current value so long as the new value successfully validates against the + # (optional) validator given at construction. + # + # @param [Object] new_value The intended new value. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + def reset(new_value) + old_value = value + if valid?(new_value) + self.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + new_value + else + old_value + end + end + + private + + # Is the new value valid? + # + # @param [Object] new_value The intended new value. + # @return [Boolean] false if the validator function returns false or raises + # an exception else true + def valid?(new_value) + @Validator.call(new_value) + rescue + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb new file mode 100644 index 0000000..fcdeed7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb @@ -0,0 +1,66 @@ +require 'concurrent/constants' + +module Concurrent + + # @!macro thread_local_var + # @!macro internal_implementation_note + # @!visibility private + class AbstractThreadLocalVar + + # @!macro thread_local_var_method_initialize + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + allocate_storage + end + + # @!macro thread_local_var_method_get + def value + raise NotImplementedError + end + + # @!macro thread_local_var_method_set + def value=(value) + raise NotImplementedError + end + + # @!macro thread_local_var_method_bind + def bind(value, &block) + if block_given? + old_value = self.value + begin + self.value = value + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def allocate_storage + raise NotImplementedError + end + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb new file mode 100644 index 0000000..0b0373d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb @@ -0,0 +1,126 @@ +require 'concurrent/atomic/mutex_atomic_boolean' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_boolean_method_initialize + # + # Creates a new `AtomicBoolean` with the given initial value. + # + # @param [Boolean] initial the initial value + + # @!macro atomic_boolean_method_value_get + # + # Retrieves the current `Boolean` value. + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_value_set + # + # Explicitly sets the value. + # + # @param [Boolean] value the new value to be set + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_true_question + # + # Is the current value `true` + # + # @return [Boolean] true if the current value is `true`, else false + + # @!macro atomic_boolean_method_false_question + # + # Is the current value `false` + # + # @return [Boolean] true if the current value is `false`, else false + + # @!macro atomic_boolean_method_make_true + # + # Explicitly sets the value to true. + # + # @return [Boolean] true if value has changed, otherwise false + + # @!macro atomic_boolean_method_make_false + # + # Explicitly sets the value to false. + # + # @return [Boolean] true if value has changed, otherwise false + + ################################################################### + + # @!macro atomic_boolean_public_api + # + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + # + # @!method value + # @!macro atomic_boolean_method_value_get + # + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + # + # @!method true? + # @!macro atomic_boolean_method_true_question + # + # @!method false? + # @!macro atomic_boolean_method_false_question + # + # @!method make_true + # @!macro atomic_boolean_method_make_true + # + # @!method make_false + # @!macro atomic_boolean_method_make_false + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicBooleanImplementation = case + when defined?(JavaAtomicBoolean) + JavaAtomicBoolean + when defined?(CAtomicBoolean) + CAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # A boolean value that can be updated atomically. Reads and writes to an atomic + # boolean and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicBoolean... + # 2.790000 0.000000 2.790000 ( 2.791454) + # Testing with Concurrent::CAtomicBoolean... + # 0.740000 0.000000 0.740000 ( 0.740206) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicBoolean... + # 5.240000 2.520000 7.760000 ( 3.683000) + # Testing with Concurrent::JavaAtomicBoolean... + # 3.340000 0.010000 3.350000 ( 0.855000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean + # + # @!macro atomic_boolean_public_api + class AtomicBoolean < AtomicBooleanImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb new file mode 100644 index 0000000..c67166d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb @@ -0,0 +1,143 @@ +require 'concurrent/atomic/mutex_atomic_fixnum' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_fixnum_method_initialize + # + # Creates a new `AtomicFixnum` with the given initial value. + # + # @param [Fixnum] initial the initial value + # @raise [ArgumentError] if the initial value is not a `Fixnum` + + # @!macro atomic_fixnum_method_value_get + # + # Retrieves the current `Fixnum` value. + # + # @return [Fixnum] the current value + + # @!macro atomic_fixnum_method_value_set + # + # Explicitly sets the value. + # + # @param [Fixnum] value the new value to be set + # + # @return [Fixnum] the current value + # + # @raise [ArgumentError] if the new value is not a `Fixnum` + + # @!macro atomic_fixnum_method_increment + # + # Increases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to increase the current value + # + # @return [Fixnum] the current value after incrementation + + # @!macro atomic_fixnum_method_decrement + # + # Decreases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to decrease the current value + # + # @return [Fixnum] the current value after decrementation + + # @!macro atomic_fixnum_method_compare_and_set + # + # Atomically sets the value to the given updated value if the current + # value == the expected value. + # + # @param [Fixnum] expect the expected value + # @param [Fixnum] update the new value + # + # @return [Boolean] true if the value was updated else false + + # @!macro atomic_fixnum_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # + # @return [Object] the new value + + ################################################################### + + # @!macro atomic_fixnum_public_api + # + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize + # + # @!method value + # @!macro atomic_fixnum_method_value_get + # + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set + # + # @!method increment(delta = 1) + # @!macro atomic_fixnum_method_increment + # + # @!method decrement(delta = 1) + # @!macro atomic_fixnum_method_decrement + # + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set + # + # @!method update + # @!macro atomic_fixnum_method_update + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicFixnumImplementation = case + when defined?(JavaAtomicFixnum) + JavaAtomicFixnum + when defined?(CAtomicFixnum) + CAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # A numeric value that can be updated atomically. Reads and writes to an atomic + # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicFixnum... + # 3.130000 0.000000 3.130000 ( 3.136505) + # Testing with Concurrent::CAtomicFixnum... + # 0.790000 0.000000 0.790000 ( 0.785550) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicFixnum... + # 5.460000 2.460000 7.920000 ( 3.715000) + # Testing with Concurrent::JavaAtomicFixnum... + # 4.520000 0.030000 4.550000 ( 1.187000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong + # + # @!macro atomic_fixnum_public_api + class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb new file mode 100644 index 0000000..f20cd46 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb @@ -0,0 +1,164 @@ +module Concurrent + # An atomic reference which maintains an object reference along with a mark bit + # that can be updated atomically. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html + # java.util.concurrent.atomic.AtomicMarkableReference + class AtomicMarkableReference < ::Concurrent::Synchronization::Object + + attr_atomic(:reference) + private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference + + def initialize(value = nil, mark = false) + super() + self.reference = immutable_array(value, mark) + end + + # Atomically sets the value and mark to the given updated value and + # mark given both: + # - the current value == the expected value && + # - the current mark == the expected mark + # + # @param [Object] expected_val the expected value + # @param [Object] new_val the new value + # @param [Boolean] expected_mark the expected mark + # @param [Boolean] new_mark the new mark + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value or the + # actual mark was not equal to the expected mark + def compare_and_set(expected_val, new_val, expected_mark, new_mark) + # Memoize a valid reference to the current AtomicReference for + # later comparison. + current = reference + curr_val, curr_mark = current + + # Ensure that that the expected marks match. + return false unless expected_mark == curr_mark + + if expected_val.is_a? Numeric + # If the object is a numeric, we need to ensure we are comparing + # the numerical values + return false unless expected_val == curr_val + else + # Otherwise, we need to ensure we are comparing the object identity. + # Theoretically, this could be incorrect if a user monkey-patched + # `Object#equal?`, but they should know that they are playing with + # fire at that point. + return false unless expected_val.equal? curr_val + end + + prospect = immutable_array(new_val, new_mark) + + compare_and_set_reference current, prospect + end + + alias_method :compare_and_swap, :compare_and_set + + # Gets the current reference and marked values. + # + # @return [Array] the current reference and marked values + def get + reference + end + + # Gets the current value of the reference + # + # @return [Object] the current value of the reference + def value + reference[0] + end + + # Gets the current marked value + # + # @return [Boolean] the current marked value + def mark + reference[1] + end + + alias_method :marked?, :mark + + # _Unconditionally_ sets to the given value of both the reference and + # the mark. + # + # @param [Object] new_val the new value + # @param [Boolean] new_mark the new mark + # + # @return [Array] both the new value and the new mark + def set(new_val, new_mark) + self.reference = immutable_array(new_val, new_mark) + end + + # Pass the current value and marked state to the given block, replacing it + # with the block's results. May retry if the value changes during the + # block's execution. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and new mark + def update + loop do + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + if compare_and_set old_val, new_val, old_mark, new_mark + return immutable_array(new_val, new_mark) + end + end + end + + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state + # + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + unless compare_and_set old_val, new_val, old_mark, new_mark + fail ::Concurrent::ConcurrentUpdateError, + 'AtomicMarkableReference: Update failed due to race condition.', + 'Note: If you would like to guarantee an update, please use ' + + 'the `AtomicMarkableReference#update` method.' + end + + immutable_array(new_val, new_mark) + end + + # Pass the current value to the given block, replacing it with the + # block's result. Simply return nil if update fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state, or nil if + # the update failed + def try_update + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + return unless compare_and_set old_val, new_val, old_mark, new_mark + + immutable_array(new_val, new_mark) + end + + private + + def immutable_array(*args) + args.freeze + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb new file mode 100644 index 0000000..620c069 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb @@ -0,0 +1,204 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/engine' +require 'concurrent/atomic_reference/numeric_cas_wrapper' + +# Shim for TruffleRuby::AtomicReference +if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) + # @!visibility private + module TruffleRuby + AtomicReference = Truffle::AtomicReference + end +end + +module Concurrent + + # Define update methods that use direct paths + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicDirectUpdate + + # @!macro atomic_reference_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @return [Object] the new value + def update + true until compare_and_set(old_value = get, new_value = yield(old_value)) + new_value + end + + # @!macro atomic_reference_method_try_update + # + # Pass the current value to the given block, replacing it + # with the block's result. Return nil if the update fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This method was altered to avoid raising an exception by default. + # Instead, this method now returns `nil` in case of failure. For more info, + # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value, or nil if update failed + def try_update + old_value = get + new_value = yield old_value + + return unless compare_and_set old_value, new_value + + new_value + end + + # @!macro atomic_reference_method_try_update! + # + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This behavior mimics the behavior of the original + # `AtomicReference#try_update` API. The reason this was changed was to + # avoid raising exceptions (which are inherently slow) by default. For more + # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_value = get + new_value = yield old_value + unless compare_and_set(old_value, new_value) + if $VERBOSE + raise ConcurrentUpdateError, "Update failed" + else + raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE + end + end + new_value + end + end + + require 'concurrent/atomic_reference/mutex_atomic' + + # @!macro atomic_reference + # + # An object reference that may be updated atomically. All read and write + # operations have java volatile semantic. + # + # @!macro thread_safe_variable_comparison + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html + # + # @!method initialize(value = nil) + # @!macro atomic_reference_method_initialize + # @param [Object] value The initial value. + # + # @!method get + # @!macro atomic_reference_method_get + # Gets the current value. + # @return [Object] the current value + # + # @!method set(new_value) + # @!macro atomic_reference_method_set + # Sets to the given value. + # @param [Object] new_value the new value + # @return [Object] the new value + # + # @!method get_and_set(new_value) + # @!macro atomic_reference_method_get_and_set + # Atomically sets to the given value and returns the old value. + # @param [Object] new_value the new value + # @return [Object] the old value + # + # @!method compare_and_set(old_value, new_value) + # @!macro atomic_reference_method_compare_and_set + # + # Atomically sets the value to the given updated value if + # the current value == the expected value. + # + # @param [Object] old_value the expected value + # @param [Object] new_value the new value + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value. + # + # @!method update + # @!macro atomic_reference_method_update + # + # @!method try_update + # @!macro atomic_reference_method_try_update + # + # @!method try_update! + # @!macro atomic_reference_method_try_update! + + + # @!macro internal_implementation_note + class ConcurrentUpdateError < ThreadError + # frozen pre-allocated backtrace to speed ConcurrentUpdateError + CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze + end + + # @!macro internal_implementation_note + AtomicReferenceImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + # @!visibility private + # @!macro internal_implementation_note + class CAtomicReference + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + end + CAtomicReference + when Concurrent.on_jruby? + # @!visibility private + # @!macro internal_implementation_note + class JavaAtomicReference + include AtomicDirectUpdate + end + JavaAtomicReference + when Concurrent.on_truffleruby? + class TruffleRubyAtomicReference < TruffleRuby::AtomicReference + include AtomicDirectUpdate + alias_method :value, :get + alias_method :value=, :set + alias_method :compare_and_swap, :compare_and_set + alias_method :swap, :get_and_set + end + when Concurrent.on_rbx? + # @note Extends `Rubinius::AtomicReference` version adding aliases + # and numeric logic. + # + # @!visibility private + # @!macro internal_implementation_note + class RbxAtomicReference < Rubinius::AtomicReference + alias_method :_compare_and_set, :compare_and_set + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :value, :get + alias_method :value=, :set + alias_method :swap, :get_and_set + alias_method :compare_and_swap, :compare_and_set + end + RbxAtomicReference + else + MutexAtomicReference + end + private_constant :AtomicReferenceImplementation + + # @!macro atomic_reference + class AtomicReference < AtomicReferenceImplementation + + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], get + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb new file mode 100644 index 0000000..d883aed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb @@ -0,0 +1,100 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/mutex_count_down_latch' +require 'concurrent/atomic/java_count_down_latch' + +module Concurrent + + ################################################################### + + # @!macro count_down_latch_method_initialize + # + # Create a new `CountDownLatch` with the initial `count`. + # + # @param [new] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro count_down_latch_method_wait + # + # Block on the latch until the counter reaches zero or until `timeout` is reached. + # + # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` + # to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` + + # @!macro count_down_latch_method_count_down + # + # Signal the latch to decrement the counter. Will signal all blocked threads when + # the `count` reaches zero. + + # @!macro count_down_latch_method_count + # + # The current value of the counter. + # + # @return [Fixnum] the current value of the counter + + ################################################################### + + # @!macro count_down_latch_public_api + # + # @!method initialize(count = 1) + # @!macro count_down_latch_method_initialize + # + # @!method wait(timeout = nil) + # @!macro count_down_latch_method_wait + # + # @!method count_down + # @!macro count_down_latch_method_count_down + # + # @!method count + # @!macro count_down_latch_method_count + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + CountDownLatchImplementation = case + when Concurrent.on_jruby? + JavaCountDownLatch + else + MutexCountDownLatch + end + private_constant :CountDownLatchImplementation + + # @!macro count_down_latch + # + # A synchronization object that allows one thread to wait on multiple other threads. + # The thread that will wait creates a `CountDownLatch` and sets the initial value + # (normally equal to the number of other threads). The initiating thread passes the + # latch to the other threads then waits for the other threads by calling the `#wait` + # method. Each of the other threads calls `#count_down` when done with its work. + # When the latch counter reaches zero the waiting thread is unblocked and continues + # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. + # + # @!macro count_down_latch_public_api + # @example Waiter and Decrementer + # latch = Concurrent::CountDownLatch.new(3) + # + # waiter = Thread.new do + # latch.wait() + # puts ("Waiter released") + # end + # + # decrementer = Thread.new do + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # end + # + # [waiter, decrementer].each(&:join) + class CountDownLatch < CountDownLatchImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb new file mode 100644 index 0000000..42f5a94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb @@ -0,0 +1,128 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # A synchronization aid that allows a set of threads to all wait for each + # other to reach a common barrier point. + # @example + # barrier = Concurrent::CyclicBarrier.new(3) + # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } + # process = -> (i) do + # # waiting to start at the same time + # barrier.wait + # # execute job + # jobs[i].call + # # wait for others to finish + # barrier.wait + # end + # threads = 2.times.map do |i| + # Thread.new(i, &process) + # end + # + # # use main as well + # process.call 2 + # + # # here we can be sure that all jobs are processed + class CyclicBarrier < Synchronization::LockableObject + + # @!visibility private + Generation = Struct.new(:status) + private_constant :Generation + + # Create a new `CyclicBarrier` that waits for `parties` threads + # + # @param [Fixnum] parties the number of parties + # @yield an optional block that will be executed that will be executed after + # the last thread arrives and before the others are released + # + # @raise [ArgumentError] if `parties` is not an integer or is less than zero + def initialize(parties, &block) + Utility::NativeInteger.ensure_integer_and_bounds parties + Utility::NativeInteger.ensure_positive_and_no_zero parties + + super(&nil) + synchronize { ns_initialize parties, &block } + end + + # @return [Fixnum] the number of threads needed to pass the barrier + def parties + synchronize { @parties } + end + + # @return [Fixnum] the number of threads currently waiting on the barrier + def number_waiting + synchronize { @number_waiting } + end + + # Blocks on the barrier until the number of waiting threads is equal to + # `parties` or until `timeout` is reached or `reset` is called + # If a block has been passed to the constructor, it will be executed once by + # the last arrived thread before releasing the others + # @param [Fixnum] timeout the number of seconds to wait for the counter or + # `nil` to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on + # `timeout` or on `reset` or if the barrier is broken + def wait(timeout = nil) + synchronize do + + return false unless @generation.status == :waiting + + @number_waiting += 1 + + if @number_waiting == @parties + @action.call if @action + ns_generation_done @generation, :fulfilled + true + else + generation = @generation + if ns_wait_until(timeout) { generation.status != :waiting } + generation.status == :fulfilled + else + ns_generation_done generation, :broken, false + false + end + end + end + end + + # resets the barrier to its initial state + # If there is at least one waiting thread, it will be woken up, the `wait` + # method will return false and the barrier will be broken + # If the barrier is broken, this method restores it to the original state + # + # @return [nil] + def reset + synchronize { ns_generation_done @generation, :reset } + end + + # A barrier can be broken when: + # - a thread called the `reset` method while at least one other thread was waiting + # - at least one thread timed out on `wait` method + # + # A broken barrier can be restored using `reset` it's safer to create a new one + # @return [Boolean] true if the barrier is broken otherwise false + def broken? + synchronize { @generation.status != :waiting } + end + + protected + + def ns_generation_done(generation, status, continue = true) + generation.status = status + ns_next_generation if continue + ns_broadcast + end + + def ns_next_generation + @generation = Generation.new(:waiting) + @number_waiting = 0 + end + + def ns_initialize(parties, &block) + @parties = parties + @action = block + ns_next_generation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/event.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/event.rb new file mode 100644 index 0000000..825f38a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/event.rb @@ -0,0 +1,109 @@ +require 'thread' +require 'concurrent/synchronization' + +module Concurrent + + # Old school kernel-style event reminiscent of Win32 programming in C++. + # + # When an `Event` is created it is in the `unset` state. Threads can choose to + # `#wait` on the event, blocking until released by another thread. When one + # thread wants to alert all blocking threads it calls the `#set` method which + # will then wake up all listeners. Once an `Event` has been set it remains set. + # New threads calling `#wait` will return immediately. An `Event` may be + # `#reset` at any time once it has been set. + # + # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx + # @example + # event = Concurrent::Event.new + # + # t1 = Thread.new do + # puts "t1 is waiting" + # event.wait(1) + # puts "event ocurred" + # end + # + # t2 = Thread.new do + # puts "t2 calling set" + # event.set + # end + # + # [t1, t2].each(&:join) + # + # # prints: + # # t2 calling set + # # t1 is waiting + # # event occurred + class Event < Synchronization::LockableObject + + # Creates a new `Event` in the unset state. Threads calling `#wait` on the + # `Event` will block. + def initialize + super + synchronize { ns_initialize } + end + + # Is the object in the set state? + # + # @return [Boolean] indicating whether or not the `Event` has been set + def set? + synchronize { @set } + end + + # Trigger the event, setting the state to `set` and releasing all threads + # waiting on the event. Has no effect if the `Event` has already been set. + # + # @return [Boolean] should always return `true` + def set + synchronize { ns_set } + end + + def try? + synchronize { @set ? false : ns_set } + end + + # Reset a previously set event back to the `unset` state. + # Has no effect if the `Event` has not yet been set. + # + # @return [Boolean] should always return `true` + def reset + synchronize do + if @set + @set = false + @iteration +=1 + end + true + end + end + + # Wait a given number of seconds for the `Event` to be set by another + # thread. Will wait forever when no `timeout` value is given. Returns + # immediately if the `Event` has already been set. + # + # @return [Boolean] true if the `Event` was set before timeout else false + def wait(timeout = nil) + synchronize do + unless @set + iteration = @iteration + ns_wait_until(timeout) { iteration < @iteration || @set } + else + true + end + end + end + + protected + + def ns_set + unless @set + @set = true + ns_broadcast + end + true + end + + def ns_initialize + @set = false + @iteration = 0 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb new file mode 100644 index 0000000..cb5b35a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb @@ -0,0 +1,42 @@ +if Concurrent.on_jruby? + + module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class JavaCountDownLatch + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds(count) + Utility::NativeInteger.ensure_positive(count) + @latch = java.util.concurrent.CountDownLatch.new(count) + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly { @latch.await } + result = true + else + Synchronization::JRuby.sleep_interruptibly do + result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + end + + # @!macro count_down_latch_method_count_down + def count_down + @latch.countDown + end + + # @!macro count_down_latch_method_count + def count + @latch.getCount + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb new file mode 100644 index 0000000..b41018f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb @@ -0,0 +1,37 @@ +require 'concurrent/atomic/abstract_thread_local_var' + +if Concurrent.on_jruby? + + module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class JavaThreadLocalVar < AbstractThreadLocalVar + + # @!macro thread_local_var_method_get + def value + value = @var.get + + if value.nil? + default + elsif value == NULL + nil + else + value + end + end + + # @!macro thread_local_var_method_set + def value=(value) + @var.set(value) + end + + protected + + # @!visibility private + def allocate_storage + @var = java.lang.ThreadLocal.new + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb new file mode 100644 index 0000000..a033de4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb @@ -0,0 +1,62 @@ +require 'concurrent/synchronization' + +module Concurrent + + # @!macro atomic_boolean + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicBoolean < Synchronization::LockableObject + + # @!macro atomic_boolean_method_initialize + def initialize(initial = false) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_boolean_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_boolean_method_value_set + def value=(value) + synchronize { @value = !!value } + end + + # @!macro atomic_boolean_method_true_question + def true? + synchronize { @value } + end + + # @!macro atomic_boolean_method_false_question + def false? + synchronize { !@value } + end + + # @!macro atomic_boolean_method_make_true + def make_true + synchronize { ns_make_value(true) } + end + + # @!macro atomic_boolean_method_make_false + def make_false + synchronize { ns_make_value(false) } + end + + protected + + # @!visibility private + def ns_initialize(initial) + @value = !!initial + end + + private + + # @!visibility private + def ns_make_value(value) + old = @value + @value = value + old != @value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb new file mode 100644 index 0000000..77b91d2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb @@ -0,0 +1,75 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro atomic_fixnum + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicFixnum < Synchronization::LockableObject + + # @!macro atomic_fixnum_method_initialize + def initialize(initial = 0) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_fixnum_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_fixnum_method_value_set + def value=(value) + synchronize { ns_set(value) } + end + + # @!macro atomic_fixnum_method_increment + def increment(delta = 1) + synchronize { ns_set(@value + delta.to_i) } + end + + alias_method :up, :increment + + # @!macro atomic_fixnum_method_decrement + def decrement(delta = 1) + synchronize { ns_set(@value - delta.to_i) } + end + + alias_method :down, :decrement + + # @!macro atomic_fixnum_method_compare_and_set + def compare_and_set(expect, update) + synchronize do + if @value == expect.to_i + @value = update.to_i + true + else + false + end + end + end + + # @!macro atomic_fixnum_method_update + def update + synchronize do + @value = yield @value + end + end + + protected + + # @!visibility private + def ns_initialize(initial) + ns_set(initial) + end + + private + + # @!visibility private + def ns_set(value) + Utility::NativeInteger.ensure_integer_and_bounds value + @value = value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb new file mode 100644 index 0000000..e99744c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb @@ -0,0 +1,44 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class MutexCountDownLatch < Synchronization::LockableObject + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds count + Utility::NativeInteger.ensure_positive count + + super() + synchronize { ns_initialize count } + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + synchronize { ns_wait_until(timeout) { @count == 0 } } + end + + # @!macro count_down_latch_method_count_down + def count_down + synchronize do + @count -= 1 if @count > 0 + ns_broadcast if @count == 0 + end + end + + # @!macro count_down_latch_method_count + def count + synchronize { @count } + end + + protected + + def ns_initialize(count) + @count = count + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb new file mode 100644 index 0000000..2042f73 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb @@ -0,0 +1,115 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro semaphore + # @!visibility private + # @!macro internal_implementation_note + class MutexSemaphore < Synchronization::LockableObject + + # @!macro semaphore_method_initialize + def initialize(count) + Utility::NativeInteger.ensure_integer_and_bounds count + + super() + synchronize { ns_initialize count } + end + + # @!macro semaphore_method_acquire + def acquire(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + try_acquire_timed(permits, nil) + nil + end + end + + # @!macro semaphore_method_available_permits + def available_permits + synchronize { @free } + end + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + def drain_permits + synchronize do + @free.tap { |_| @free = 0 } + end + end + + # @!macro semaphore_method_try_acquire + def try_acquire(permits = 1, timeout = nil) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + if timeout.nil? + try_acquire_now(permits) + else + try_acquire_timed(permits, timeout) + end + end + end + + # @!macro semaphore_method_release + def release(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + @free += permits + permits.times { ns_signal } + end + nil + end + + # Shrinks the number of available permits by the indicated reduction. + # + # @param [Fixnum] reduction Number of permits to remove. + # + # @raise [ArgumentError] if `reduction` is not an integer or is negative + # + # @raise [ArgumentError] if `@free` - `@reduction` is less than zero + # + # @return [nil] + # + # @!visibility private + def reduce_permits(reduction) + Utility::NativeInteger.ensure_integer_and_bounds reduction + Utility::NativeInteger.ensure_positive reduction + + synchronize { @free -= reduction } + nil + end + + protected + + # @!visibility private + def ns_initialize(count) + @free = count + end + + private + + # @!visibility private + def try_acquire_now(permits) + if @free >= permits + @free -= permits + true + else + false + end + end + + # @!visibility private + def try_acquire_timed(permits, timeout) + ns_wait_until(timeout) { try_acquire_now(permits) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb new file mode 100644 index 0000000..246f21a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb @@ -0,0 +1,254 @@ +require 'thread' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # Ruby read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And if the "write" lock is taken, any readers who come along will have to wait) + # + # If readers are already active when a writer comes along, the writer will wait for + # all the readers to finish before going ahead. + # Any additional readers that come when the writer is already waiting, will also + # wait (so writers are not starved). + # + # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @note Do **not** try to acquire the write lock while already holding a read lock + # **or** try to acquire the write lock while you already have it. + # This will lead to deadlock + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReadWriteLock < Synchronization::Object + + # @!visibility private + WAITING_WRITER = 1 << 15 + + # @!visibility private + RUNNING_WRITER = 1 << 29 + + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + safe_initialization! + + # Implementation notes: + # A goal is to make the uncontended path for both readers/writers lock-free + # Only if there is reader-writer or writer-writer contention, should locks be used + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each reader increments the counter by 1 when acquiring a read lock + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + + # Create a new `ReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadLock = Synchronization::Lock.new + @WriteLock = Synchronization::Lock.new + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock has been acquired will block until + # it is released. Will not block if other read locks have been acquired. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting when we first queue up, we need to wait + if waiting_writer?(c) + @ReadLock.wait_until { !waiting_writer? } + + # after a reader has waited once, they are allowed to "barge" ahead of waiting writers + # but if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadLock.wait_until { !running_writer? } + else + return if @Counter.compare_and_set(c, c+1) + end + end + else + break if @Counter.compare_and_set(c, c+1) + end + end + true + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + while true + c = @Counter.value + if @Counter.compare_and_set(c, c-1) + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_writer?(c) && running_readers(c) == 1 + @WriteLock.signal + end + break + end + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + if c == 0 # no readers OR writers running + # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead + break if @Counter.compare_and_set(0, RUNNING_WRITER) + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here, OR another writer could increment + @WriteLock.wait_until do + # So we have to do another check inside the synchronized section + # If a writer OR reader is running, then go to sleep + c = @Counter.value + !running_writer?(c) && !running_readers?(c) + end + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # Then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + end + break + end + end + true + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + return true unless running_writer? + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadLock.broadcast + @WriteLock.signal if waiting_writers(c) > 0 + true + end + + # Queries if the write lock is held by any thread. + # + # @return [Boolean] true if the write lock is held else false` + def write_locked? + @Counter.value >= RUNNING_WRITER + end + + # Queries whether any threads are waiting to acquire the read or write lock. + # + # @return [Boolean] true if any threads are waiting for a lock else false + def has_waiters? + waiting_writer?(@Counter.value) + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) / WAITING_WRITER + end + + # @!visibility private + def waiting_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb new file mode 100644 index 0000000..42d7f3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb @@ -0,0 +1,379 @@ +require 'thread' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/errors' +require 'concurrent/synchronization' +require 'concurrent/atomic/thread_local_var' + +module Concurrent + + # Re-entrant read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And while the "write" lock is taken, no read locks can be obtained either. + # Hence, the write lock can also be called an "exclusive" lock.) + # + # If another thread has taken a read lock, any thread which wants a write lock + # will block until all the readers release their locks. However, once a thread + # starts waiting to obtain a write lock, any additional readers that come along + # will also wait (so writers are not starved). + # + # A thread can acquire both a read and write lock at the same time. A thread can + # also acquire a read lock OR a write lock more than once. Only when the read (or + # write) lock is released as many times as it was acquired, will the thread + # actually let it go, allowing other threads which might have been waiting + # to proceed. Therefore the lock can be upgraded by first acquiring + # read lock and then write lock and that the lock can be downgraded by first + # having both read and write lock a releasing just the write lock. + # + # If both read and write locks are acquired by the same thread, it is not strictly + # necessary to release them in the same order they were acquired. In other words, + # the following code is legal: + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.acquire_write_lock + # lock.acquire_read_lock + # lock.release_write_lock + # # At this point, the current thread is holding only a read lock, not a write + # # lock. So other threads can take read locks, but not a write lock. + # lock.release_read_lock + # # Now the current thread is not holding either a read or write lock, so + # # another thread could potentially acquire a write lock. + # + # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReentrantReadWriteLock < Synchronization::Object + + # Implementation notes: + # + # A goal is to make the uncontended path for both readers/writers mutex-free + # Only if there is reader-writer or writer-writer contention, should mutexes be used + # Otherwise, a single CAS operation is all we need to acquire/release a lock + # + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each thread which has one OR MORE read locks increments the counter by 1 + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + # + # Additionally, each thread uses a thread-local variable to count how many times + # it has acquired a read lock, AND how many times it has acquired a write lock. + # It uses a similar trick; an increment of 1 means a read lock was taken, and + # an increment of (1 << 15) means a write lock was taken + # This is what makes re-entrancy possible + # + # 2 rules are followed to ensure good liveness properties: + # 1) Once a writer has queued up and is waiting for a write lock, no other thread + # can take a lock without waiting + # 2) When a write lock is released, readers are given the "first chance" to wake + # up and acquire a read lock + # Following these rules means readers and writers tend to "take turns", so neither + # can starve the other, even under heavy contention + + # @!visibility private + READER_BITS = 15 + # @!visibility private + WRITER_BITS = 14 + + # Used with @Counter: + # @!visibility private + WAITING_WRITER = 1 << READER_BITS + # @!visibility private + RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + # Used with @HeldCount: + # @!visibility private + WRITE_LOCK_HELD = 1 << READER_BITS + # @!visibility private + READ_LOCK_MASK = WRITE_LOCK_HELD - 1 + # @!visibility private + WRITE_LOCK_MASK = MAX_WRITERS + + safe_initialization! + + # Create a new `ReentrantReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadQueue = Synchronization::Lock.new # used to queue waiting readers + @WriteQueue = Synchronization::Lock.new # used to queue waiting writers + @HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock is held by another thread, will block + # until it is released. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + if (held = @HeldCount.value) > 0 + # If we already have a lock, there's no need to wait + if held & READ_LOCK_MASK == 0 + # But we do need to update the counter, if we were holding a write + # lock but not a read lock + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting OR running when we first queue up, we need to wait + if waiting_or_running_writer?(c) + # Before going to sleep, check again with the ReadQueue mutex held + @ReadQueue.synchronize do + @ReadQueue.ns_wait if waiting_or_running_writer? + end + # Note: the above 'synchronize' block could have used #wait_until, + # but that waits repeatedly in a loop, checking the wait condition + # each time it wakes up (to protect against spurious wakeups) + # But we are already in a loop, which is only broken when we successfully + # acquire the lock! So we don't care about spurious wakeups, and would + # rather not pay the extra overhead of using #wait_until + + # After a reader has waited once, they are allowed to "barge" ahead of waiting writers + # But if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadQueue.synchronize do + @ReadQueue.ns_wait if running_writer? + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + end + + # Try to acquire a read lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_read_lock + if (held = @HeldCount.value) > 0 + if held & READ_LOCK_MASK == 0 + # If we hold a write lock, but not a read lock... + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + false + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + held = @HeldCount.value = @HeldCount.value - 1 + rlocks_held = held & READ_LOCK_MASK + if rlocks_held == 0 + c = @Counter.update { |counter| counter - 1 } + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_or_running_writer?(c) && running_readers(c) == 0 + @WriteQueue.signal + end + elsif rlocks_held == READ_LOCK_MASK + raise IllegalOperationError, "Cannot release a read lock which is not held" + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + # if we already have a write (exclusive) lock, there's no need to wait + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + # To go ahead and take the lock without waiting, there must be no writer + # running right now, AND no writers who came before us still waiting to + # acquire the lock + # Additionally, if any read locks have been taken, we must hold all of them + if c == held + # If we successfully swap the RUNNING_WRITER bit on, then we can go ahead + if @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here + @WriteQueue.synchronize do + # So we have to do another check inside the synchronized section + # If a writer OR another reader is running, then go to sleep + c = @Counter.value + @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held + end + # Note: if you are thinking of replacing the above 'synchronize' block + # with #wait_until, read the comment in #acquire_read_lock first! + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + if !running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + end + end + end + + # Try to acquire a write lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + @HeldCount.value = held + WRITE_LOCK_HELD + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + false + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD + wlocks_held = held & WRITE_LOCK_MASK + if wlocks_held == 0 + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadQueue.broadcast + @WriteQueue.signal if waiting_writers(c) > 0 + elsif wlocks_held == WRITE_LOCK_MASK + raise IllegalOperationError, "Cannot release a write lock which is not held" + end + true + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) >> READER_BITS + end + + # @!visibility private + def waiting_or_running_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb new file mode 100644 index 0000000..9a51eb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb @@ -0,0 +1,181 @@ +require 'thread' +require 'concurrent/atomic/abstract_thread_local_var' + +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class RubyThreadLocalVar < AbstractThreadLocalVar + + # Each thread has a (lazily initialized) array of thread-local variable values + # Each time a new thread-local var is created, we allocate an "index" for it + # For example, if the allocated index is 1, that means slot #1 in EVERY + # thread's thread-local array will be used for the value of that TLV + # + # The good thing about using a per-THREAD structure to hold values, rather + # than a per-TLV structure, is that no synchronization is needed when + # reading and writing those values (since the structure is only ever + # accessed by a single thread) + # + # Of course, when a TLV is GC'd, 1) we need to recover its index for use + # by other new TLVs (otherwise the thread-local arrays could get bigger + # and bigger with time), and 2) we need to null out all the references + # held in the now-unused slots (both to avoid blocking GC of those objects, + # and also to prevent "stale" values from being passed on to a new TLV + # when the index is reused) + # Because we need to null out freed slots, we need to keep references to + # ALL the thread-local arrays -- ARRAYS is for that + # But when a Thread is GC'd, we need to drop the reference to its thread-local + # array, so we don't leak memory + + FREE = [] + LOCK = Mutex.new + THREAD_LOCAL_ARRAYS = {} # used as a hash set + + # synchronize when not on MRI + # on MRI using lock in finalizer leads to "can't be called from trap context" error + # so the code is carefully written to be tread-safe on MRI relying on GIL + + if Concurrent.on_cruby? + # @!visibility private + def self.semi_sync(&block) + block.call + end + else + # @!visibility private + def self.semi_sync(&block) + LOCK.synchronize(&block) + end + end + + private_constant :FREE, :LOCK, :THREAD_LOCAL_ARRAYS + + # @!macro thread_local_var_method_get + def value + if (array = get_threadlocal_array) + value = array[@index] + if value.nil? + default + elsif value.equal?(NULL) + nil + else + value + end + else + default + end + end + + # @!macro thread_local_var_method_set + def value=(value) + me = Thread.current + # We could keep the thread-local arrays in a hash, keyed by Thread + # But why? That would require locking + # Using Ruby's built-in thread-local storage is faster + unless (array = get_threadlocal_array(me)) + array = set_threadlocal_array([], me) + self.class.semi_sync { THREAD_LOCAL_ARRAYS[array.object_id] = array } + ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id)) + end + array[@index] = (value.nil? ? NULL : value) + value + end + + protected + + # @!visibility private + def allocate_storage + @index = FREE.pop || next_index + + ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index)) + end + + # @!visibility private + def self.thread_local_finalizer(index) + proc do + semi_sync do + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + # + # DO NOT use each_value which might conflict with new pair assignment + # into the hash in #value= method + THREAD_LOCAL_ARRAYS.values.each { |array| array[index] = nil } + # free index has to be published after the arrays are cleared + FREE.push(index) + end + end + end + + # @!visibility private + def self.thread_finalizer(id) + proc do + semi_sync do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + THREAD_LOCAL_ARRAYS.delete(id) + end + end + end + + private + + # noinspection RubyClassVariableUsageInspection + @@next = 0 + # noinspection RubyClassVariableUsageInspection + def next_index + LOCK.synchronize do + result = @@next + @@next += 1 + result + end + end + + if Thread.instance_methods.include?(:thread_variable_get) + + def get_threadlocal_array(thread = Thread.current) + thread.thread_variable_get(:__threadlocal_array__) + end + + def set_threadlocal_array(array, thread = Thread.current) + thread.thread_variable_set(:__threadlocal_array__, array) + end + + else + + def get_threadlocal_array(thread = Thread.current) + thread[:__threadlocal_array__] + end + + def set_threadlocal_array(array, thread = Thread.current) + thread[:__threadlocal_array__] = array + end + end + + # This exists only for use in testing + # @!visibility private + def value_for(thread) + if (array = get_threadlocal_array(thread)) + value = array[@index] + if value.nil? + get_default + elsif value.equal?(NULL) + nil + else + value + end + else + get_default + end + end + + # @!visibility private + def get_default + if @default_block + raise "Cannot use default_for with default block" + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/semaphore.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/semaphore.rb new file mode 100644 index 0000000..1b2bd8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/semaphore.rb @@ -0,0 +1,145 @@ +require 'concurrent/atomic/mutex_semaphore' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro semaphore_method_initialize + # + # Create a new `Semaphore` with the initial `count`. + # + # @param [Fixnum] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro semaphore_method_acquire + # + # Acquires the given number of permits from this semaphore, + # blocking until all are available. + # + # @param [Fixnum] permits Number of permits to acquire + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [nil] + + # @!macro semaphore_method_available_permits + # + # Returns the current number of permits available in this semaphore. + # + # @return [Integer] + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + + # @!macro semaphore_method_try_acquire + # + # Acquires the given number of permits from this semaphore, + # only if all are available at the time of invocation or within + # `timeout` interval + # + # @param [Fixnum] permits the number of permits to acquire + # + # @param [Fixnum] timeout the number of seconds to wait for the counter + # or `nil` to return immediately + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [Boolean] `false` if no permits are available, `true` when + # acquired a permit + + # @!macro semaphore_method_release + # + # Releases the given number of permits, returning them to the semaphore. + # + # @param [Fixnum] permits Number of permits to return to the semaphore. + # + # @raise [ArgumentError] if `permits` is not a number or is less than one + # + # @return [nil] + + ################################################################### + + # @!macro semaphore_public_api + # + # @!method initialize(count) + # @!macro semaphore_method_initialize + # + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + # + # @!method available_permits + # @!macro semaphore_method_available_permits + # + # @!method drain_permits + # @!macro semaphore_method_drain_permits + # + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + # + # @!method release(permits = 1) + # @!macro semaphore_method_release + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + SemaphoreImplementation = case + when defined?(JavaSemaphore) + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation + + # @!macro semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. + # + # @!macro semaphore_public_api + # @example + # semaphore = Concurrent::Semaphore.new(2) + # + # t1 = Thread.new do + # semaphore.acquire + # puts "Thread 1 acquired semaphore" + # end + # + # t2 = Thread.new do + # semaphore.acquire + # puts "Thread 2 acquired semaphore" + # end + # + # t3 = Thread.new do + # semaphore.acquire + # puts "Thread 3 acquired semaphore" + # end + # + # t4 = Thread.new do + # sleep(2) + # puts "Thread 4 releasing semaphore" + # semaphore.release + # end + # + # [t1, t2, t3, t4].each(&:join) + # + # # prints: + # # Thread 3 acquired semaphore + # # Thread 2 acquired semaphore + # # Thread 4 releasing semaphore + # # Thread 1 acquired semaphore + # + class Semaphore < SemaphoreImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb new file mode 100644 index 0000000..100cc8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb @@ -0,0 +1,104 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/ruby_thread_local_var' +require 'concurrent/atomic/java_thread_local_var' + +module Concurrent + + ################################################################### + + # @!macro thread_local_var_method_initialize + # + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each thread + + # @!macro thread_local_var_method_get + # + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value + + # @!macro thread_local_var_method_set + # + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + + # @!macro thread_local_var_method_bind + # + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + + + ################################################################### + + # @!macro thread_local_var_public_api + # + # @!method initialize(default = nil, &default_block) + # @!macro thread_local_var_method_initialize + # + # @!method value + # @!macro thread_local_var_method_get + # + # @!method value=(value) + # @!macro thread_local_var_method_set + # + # @!method bind(value, &block) + # @!macro thread_local_var_method_bind + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + ThreadLocalVarImplementation = case + when Concurrent.on_jruby? + JavaThreadLocalVar + else + RubyThreadLocalVar + end + private_constant :ThreadLocalVarImplementation + + # @!macro thread_local_var + # + # A `ThreadLocalVar` is a variable where the value is different for each thread. + # Each variable may have a default value, but when you modify the variable only + # the current thread will ever see that change. + # + # @!macro thread_safe_variable_comparison + # + # @example + # v = ThreadLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = ThreadLocalVar.new(14) + # + # t1 = Thread.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end + # + # t2 = Thread.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end + # + # v.value #=> 14 + # + # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal + # + # @!macro thread_local_var_public_api + class ThreadLocalVar < ThreadLocalVarImplementation + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb new file mode 100644 index 0000000..d092aed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb @@ -0,0 +1,56 @@ +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicReference < Synchronization::LockableObject + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + + # @!macro atomic_reference_method_initialize + def initialize(value = nil) + super() + synchronize { ns_initialize(value) } + end + + # @!macro atomic_reference_method_get + def get + synchronize { @value } + end + alias_method :value, :get + + # @!macro atomic_reference_method_set + def set(new_value) + synchronize { @value = new_value } + end + alias_method :value=, :set + + # @!macro atomic_reference_method_get_and_set + def get_and_set(new_value) + synchronize do + old_value = @value + @value = new_value + old_value + end + end + alias_method :swap, :get_and_set + + # @!macro atomic_reference_method_compare_and_set + def _compare_and_set(old_value, new_value) + synchronize do + if @value.equal? old_value + @value = new_value + true + else + false + end + end + end + + protected + + def ns_initialize(value) + @value = value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb new file mode 100644 index 0000000..709a382 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb @@ -0,0 +1,28 @@ +module Concurrent + + # Special "compare and set" handling of numeric values. + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicNumericCompareAndSetWrapper + + # @!macro atomic_reference_method_compare_and_set + def compare_and_set(old_value, new_value) + if old_value.kind_of? Numeric + while true + old = get + + return false unless old.kind_of? Numeric + + return false unless old == old_value + + result = _compare_and_set(old, new_value) + return result if result + end + else + _compare_and_set(old_value, new_value) + end + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomics.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomics.rb new file mode 100644 index 0000000..16cbe66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/atomics.rb @@ -0,0 +1,10 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/cyclic_barrier' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/event' +require 'concurrent/atomic/read_write_lock' +require 'concurrent/atomic/reentrant_read_write_lock' +require 'concurrent/atomic/semaphore' +require 'concurrent/atomic/thread_local_var' diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb new file mode 100644 index 0000000..50d52a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb @@ -0,0 +1,107 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-read approach: + # observers are added and removed from a thread safe collection; every time + # a notification is required the internal data structure is copied to + # prevent concurrency issues + # + # @api private + class CopyOnNotifyObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + @observers[observer] = func + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + @observers.delete(observer) + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + synchronize do + @observers.clear + self + end + end + + # @!macro observable_count_observers + def count_observers + synchronize { @observers.count } + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + observers = duplicate_observers + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + observers = duplicate_and_clear_observers + notify_to(observers, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def duplicate_and_clear_observers + synchronize do + observers = @observers.dup + @observers.clear + observers + end + end + + def duplicate_observers + synchronize { @observers.dup } + end + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb new file mode 100644 index 0000000..3f3f7cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb @@ -0,0 +1,111 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-write approach: + # every time an observer is added or removed the whole internal data structure is + # duplicated and replaced with a new one. + # + # @api private + class CopyOnWriteObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + new_observers = @observers.dup + new_observers[observer] = func + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + new_observers = @observers.dup + new_observers.delete(observer) + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + self.observers = {} + self + end + + # @!macro observable_count_observers + def count_observers + observers.count + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + old = clear_observers_and_return_old + notify_to(old, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + + def observers + synchronize { @observers } + end + + def observers=(new_set) + synchronize { @observers = new_set } + end + + def clear_observers_and_return_old + synchronize do + old_observers = @observers + @observers = {} + old_observers + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..2be9e43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb @@ -0,0 +1,84 @@ +if Concurrent.on_jruby? + + module Concurrent + module Collection + + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class JavaNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + if [:min, :low].include?(order) + @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity + else + @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) + end + end + + # @!macro priority_queue_method_clear + def clear + @queue.clear + true + end + + # @!macro priority_queue_method_delete + def delete(item) + found = false + while @queue.remove(item) do + found = true + end + found + end + + # @!macro priority_queue_method_empty + def empty? + @queue.size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.contains(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @queue.size + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + @queue.peek + end + + # @!macro priority_queue_method_pop + def pop + @queue.poll + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @queue.add(item) + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb new file mode 100644 index 0000000..d003d3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb @@ -0,0 +1,158 @@ +module Concurrent + + # @!macro warn.edge + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + + # @return [Node] + attr_reader :next_node + + # @return [Object] + attr_reader :value + + # @!visibility private + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + # The singleton for empty node + EMPTY = Node[nil, nil] + def EMPTY.next_node + self + end + + attr_atomic(:head) + private :head, :head=, :swap_head, :compare_and_set_head, :update_head + + # @!visibility private + def self.of1(value) + new Node[value, EMPTY] + end + + # @!visibility private + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + # @param [Node] head + def initialize(head = EMPTY) + super() + self.head = head + end + + # @param [Node] head + # @return [true, false] + def empty?(head = head()) + head.equal? EMPTY + end + + # @param [Node] head + # @param [Object] value + # @return [true, false] + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + # @param [Object] value + # @return [self] + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + # @return [Node] + def peek + head + end + + # @param [Node] head + # @return [true, false] + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + # @return [Object] + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + # @param [Node] head + # @return [true, false] + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + # @param [Node] head + # @return [self] + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + # @return [true, false] + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + # @param [Node] head + # @return [true, false] + def clear_if(head) + compare_and_set_head head, EMPTY + end + + # @param [Node] head + # @param [Node] new_head + # @return [true, false] + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + # @return [self] + # @yield over the cleared stack + # @yieldparam [Object] value + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], to_a.to_s + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb new file mode 100644 index 0000000..dc51893 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb @@ -0,0 +1,927 @@ +require 'concurrent/constants' +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/adder' +require 'concurrent/thread_safe/util/cheap_lockable' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module Collection + + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + # + # @!visibility private + class AtomicReferenceMapBackend + + # @!visibility private + class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + # + # @!visibility private + class Node + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :hash, :value, :next + + include Concurrent::ThreadSafe::Util::CheapLockable + + bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_acquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_acquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Concurrent::ThreadSafe::Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Concurrent::ThreadSafe::Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= ::Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb new file mode 100644 index 0000000..903c1f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb @@ -0,0 +1,66 @@ +require 'thread' +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class MriMapBackend < NonConcurrentMapBackend + + def initialize(options = nil) + super(options) + @write_lock = Mutex.new + end + + def []=(key, value) + @write_lock.synchronize { super } + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case + stored_value + else + @write_lock.synchronize { super } + end + end + + def compute_if_present(key) + @write_lock.synchronize { super } + end + + def compute(key) + @write_lock.synchronize { super } + end + + def merge_pair(key, value) + @write_lock.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + @write_lock.synchronize { super } + end + + def replace_if_exists(key, new_value) + @write_lock.synchronize { super } + end + + def get_and_set(key, value) + @write_lock.synchronize { super } + end + + def delete(key) + @write_lock.synchronize { super } + end + + def delete_pair(key, value) + @write_lock.synchronize { super } + end + + def clear + @write_lock.synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb new file mode 100644 index 0000000..e7c62e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb @@ -0,0 +1,140 @@ +require 'concurrent/constants' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class NonConcurrentMapBackend + + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedMapBackend which uses a non-reentrant mutex for performance + # reasons. + def initialize(options = nil) + @backend = {} + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(@backend[key])) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = @backend[key] + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + alias_method :_get, :[] + alias_method :_set, :[]= + private :_get, :_set + private + def initialize_copy(other) + super + @backend = {} + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb new file mode 100644 index 0000000..190c8d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb @@ -0,0 +1,82 @@ +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class SynchronizedMapBackend < NonConcurrentMapBackend + + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb new file mode 100644 index 0000000..68a1b38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb @@ -0,0 +1,14 @@ +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class TruffleRubyMapBackend < TruffleRuby::ConcurrentMap + def initialize(options = nil) + options ||= {} + super(initial_capacity: options[:initial_capacity], load_factor: options[:load_factor]) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb new file mode 100644 index 0000000..694cd7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb @@ -0,0 +1,143 @@ +require 'concurrent/utility/engine' +require 'concurrent/collection/java_non_concurrent_priority_queue' +require 'concurrent/collection/ruby_non_concurrent_priority_queue' + +module Concurrent + module Collection + + # @!visibility private + # @!macro internal_implementation_note + NonConcurrentPriorityQueueImplementation = case + when Concurrent.on_jruby? + JavaNonConcurrentPriorityQueue + else + RubyNonConcurrentPriorityQueue + end + private_constant :NonConcurrentPriorityQueueImplementation + + # @!macro priority_queue + # + # A queue collection in which the elements are sorted based on their + # comparison (spaceship) operator `<=>`. Items are added to the queue + # at a position relative to their priority. On removal the element + # with the "highest" priority is removed. By default the sort order is + # from highest to lowest, but a lowest-to-highest sort order can be + # set on construction. + # + # The API is based on the `Queue` class from the Ruby standard library. + # + # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm + # stored in an array. The algorithm is based on the work of Robert Sedgewick + # and Kevin Wayne. + # + # The JRuby native implementation is a thin wrapper around the standard + # library `java.util.NonConcurrentPriorityQueue`. + # + # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. + # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. + # + # @note This implementation is *not* thread safe. + # + # @see http://en.wikipedia.org/wiki/Priority_queue + # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html + # + # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 + # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html + # + # @!visibility private + class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation + + alias_method :has_priority?, :include? + + alias_method :size, :length + + alias_method :deq, :pop + alias_method :shift, :pop + + alias_method :<<, :push + alias_method :enq, :push + + # @!method initialize(opts = {}) + # @!macro priority_queue_method_initialize + # + # Create a new priority queue with no items. + # + # @param [Hash] opts the options for creating the queue + # @option opts [Symbol] :order (:max) dictates the order in which items are + # stored: from highest to lowest when `:max` or `:high`; from lowest to + # highest when `:min` or `:low` + + # @!method clear + # @!macro priority_queue_method_clear + # + # Removes all of the elements from this priority queue. + + # @!method delete(item) + # @!macro priority_queue_method_delete + # + # Deletes all items from `self` that are equal to `item`. + # + # @param [Object] item the item to be removed from the queue + # @return [Object] true if the item is found else false + + # @!method empty? + # @!macro priority_queue_method_empty + # + # Returns `true` if `self` contains no elements. + # + # @return [Boolean] true if there are no items in the queue else false + + # @!method include?(item) + # @!macro priority_queue_method_include + # + # Returns `true` if the given item is present in `self` (that is, if any + # element == `item`), otherwise returns false. + # + # @param [Object] item the item to search for + # + # @return [Boolean] true if the item is found else false + + # @!method length + # @!macro priority_queue_method_length + # + # The current length of the queue. + # + # @return [Fixnum] the number of items in the queue + + # @!method peek + # @!macro priority_queue_method_peek + # + # Retrieves, but does not remove, the head of this queue, or returns `nil` + # if this queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method pop + # @!macro priority_queue_method_pop + # + # Retrieves and removes the head of this queue, or returns `nil` if this + # queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method push(item) + # @!macro priority_queue_method_push + # + # Inserts the specified element into this priority queue. + # + # @param [Object] item the item to insert onto the queue + + # @!method self.from_list(list, opts = {}) + # @!macro priority_queue_method_from_list + # + # Create a new priority queue from the given list. + # + # @param [Enumerable] list the list to build the queue from + # @param [Hash] opts the options for creating the queue + # + # @return [NonConcurrentPriorityQueue] the newly created and populated queue + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..322b4ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb @@ -0,0 +1,160 @@ +module Concurrent + module Collection + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class RubyNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + @comparator = [:min, :low].include?(order) ? -1 : 1 + clear + end + + # @!macro priority_queue_method_clear + def clear + @queue = [nil] + @length = 0 + true + end + + # @!macro priority_queue_method_delete + def delete(item) + return false if empty? + original_length = @length + k = 1 + while k <= @length + if @queue[k] == item + swap(k, @length) + @length -= 1 + sink(k) || swim(k) + @queue.pop + else + k += 1 + end + end + @length != original_length + end + + # @!macro priority_queue_method_empty + def empty? + size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.include?(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @length + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + empty? ? nil : @queue[1] + end + + # @!macro priority_queue_method_pop + def pop + return nil if empty? + max = @queue[1] + swap(1, @length) + @length -= 1 + sink(1) + @queue.pop + max + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @length += 1 + @queue << item + swim(@length) + true + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + + private + + # Exchange the values at the given indexes within the internal array. + # + # @param [Integer] x the first index to swap + # @param [Integer] y the second index to swap + # + # @!visibility private + def swap(x, y) + temp = @queue[x] + @queue[x] = @queue[y] + @queue[y] = temp + end + + # Are the items at the given indexes ordered based on the priority + # order specified at construction? + # + # @param [Integer] x the first index from which to retrieve a comparable value + # @param [Integer] y the second index from which to retrieve a comparable value + # + # @return [Boolean] true if the two elements are in the correct priority order + # else false + # + # @!visibility private + def ordered?(x, y) + (@queue[x] <=> @queue[y]) == @comparator + end + + # Percolate down to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def sink(k) + success = false + + while (j = (2 * k)) <= @length do + j += 1 if j < @length && ! ordered?(j, j+1) + break if ordered?(k, j) + swap(k, j) + success = true + k = j + end + + success + end + + # Percolate up to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def swim(k) + success = false + + while k > 1 && ! ordered?(k/2, k) do + swap(k, k/2) + k = k/2 + success = true + end + + success + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/deprecation.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/deprecation.rb new file mode 100644 index 0000000..35ae4b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/deprecation.rb @@ -0,0 +1,34 @@ +require 'concurrent/concern/logging' + +module Concurrent + module Concern + + # @!visibility private + # @!macro internal_implementation_note + module Deprecation + # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. + include Concern::Logging + + def deprecated(message, strip = 2) + caller_line = caller(strip).first if strip > 0 + klass = if Module === self + self + else + self.class + end + message = if strip > 0 + format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) + else + format('[DEPRECATED] %s', message) + end + log WARN, klass.to_s, message + end + + def deprecated_method(old_name, new_name) + deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb new file mode 100644 index 0000000..dc172ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb @@ -0,0 +1,73 @@ +module Concurrent + module Concern + + # Object references in Ruby are mutable. This can lead to serious problems when + # the `#value` of a concurrent object is a mutable reference. Which is always the + # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. + # Most classes in this library that expose a `#value` getter method do so using the + # `Dereferenceable` mixin module. + # + # @!macro copy_options + module Dereferenceable + # NOTE: This module is going away in 2.0. In the mean time we need it to + # play nicely with the synchronization layer. This means that the + # including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Return the value this object represents after applying the options specified + # by the `#set_deref_options` method. + # + # @return [Object] the current value of the object + def value + synchronize { apply_deref_options(@value) } + end + alias_method :deref, :value + + protected + + # Set the internal value of this object + # + # @param [Object] value the new value + def value=(value) + synchronize{ @value = value } + end + + # @!macro dereferenceable_set_deref_options + # Set the options which define the operations #value performs before + # returning data to the caller (dereferencing). + # + # @note Most classes that include this module will call `#set_deref_options` + # from within the constructor, thus allowing these options to be set at + # object creation. + # + # @param [Hash] opts the options defining dereference behavior. + # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def set_deref_options(opts = {}) + synchronize{ ns_set_deref_options(opts) } + end + + # @!macro dereferenceable_set_deref_options + # @!visibility private + def ns_set_deref_options(opts) + @dup_on_deref = opts[:dup_on_deref] || opts[:dup] + @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] + @copy_on_deref = opts[:copy_on_deref] || opts[:copy] + @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) + nil + end + + # @!visibility private + def apply_deref_options(value) + return nil if value.nil? + return value if @do_nothing_on_deref + value = @copy_on_deref.call(value) if @copy_on_deref + value = value.dup if @dup_on_deref + value = value.freeze if @freeze_on_deref + value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/logging.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/logging.rb new file mode 100644 index 0000000..2c74999 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/logging.rb @@ -0,0 +1,32 @@ +require 'logger' + +module Concurrent + module Concern + + # Include where logging is needed + # + # @!visibility private + module Logging + include Logger::Severity + + # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger + # @param [Integer] level one of Logger::Severity constants + # @param [String] progname e.g. a path of an Actor + # @param [String, nil] message when nil block is used to generate the message + # @yieldreturn [String] a message + def log(level, progname, message = nil, &block) + #NOTE: Cannot require 'concurrent/configuration' above due to circular references. + # Assume that the gem has been initialized if we've gotten this far. + logger = if defined?(@logger) && @logger + @logger + else + Concurrent.global_logger + end + logger.call level, progname, message, &block + rescue => error + $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + + "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/obligation.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/obligation.rb new file mode 100644 index 0000000..2c9ac12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/obligation.rb @@ -0,0 +1,220 @@ +require 'thread' +require 'timeout' + +require 'concurrent/atomic/event' +require 'concurrent/concern/dereferenceable' + +module Concurrent + module Concern + + module Obligation + include Concern::Dereferenceable + # NOTE: The Dereferenceable module is going away in 2.0. In the mean time + # we need it to place nicely with the synchronization layer. This means + # that the including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Has the obligation been fulfilled? + # + # @return [Boolean] + def fulfilled? + state == :fulfilled + end + alias_method :realized?, :fulfilled? + + # Has the obligation been rejected? + # + # @return [Boolean] + def rejected? + state == :rejected + end + + # Is obligation completion still pending? + # + # @return [Boolean] + def pending? + state == :pending + end + + # Is the obligation still unscheduled? + # + # @return [Boolean] + def unscheduled? + state == :unscheduled + end + + # Has the obligation completed processing? + # + # @return [Boolean] + def complete? + [:fulfilled, :rejected].include? state + end + + # Is the obligation still awaiting completion of processing? + # + # @return [Boolean] + def incomplete? + ! complete? + end + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + def value(timeout = nil) + wait timeout + deref + end + + # Wait until obligation is complete or the timeout has been reached. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + def wait(timeout = nil) + event.wait(timeout) if timeout != 0 && incomplete? + self + end + + # Wait until obligation is complete or the timeout is reached. Will re-raise + # any exceptions raised during processing (but will not raise an exception + # on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + # @raise [Exception] raises the reason when rejected + def wait!(timeout = nil) + wait(timeout).tap { raise self if rejected? } + end + alias_method :no_error!, :wait! + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. Will re-raise any exceptions + # raised during processing (but will not raise an exception on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + # @raise [Exception] raises the reason when rejected + def value!(timeout = nil) + wait(timeout) + if rejected? + raise self + else + deref + end + end + + # The current state of the obligation. + # + # @return [Symbol] the current state + def state + synchronize { @state } + end + + # If an exception was raised during processing this will return the + # exception object. Will return `nil` when the state is pending or if + # the obligation has been successfully fulfilled. + # + # @return [Exception] the exception raised during processing or `nil` + def reason + synchronize { @reason } + end + + # @example allows Obligation to be risen + # rejected_ivar = Ivar.new.fail + # raise rejected_ivar + def exception(*args) + raise 'obligation is not rejected' unless rejected? + reason.exception(*args) + end + + protected + + # @!visibility private + def get_arguments_from(opts = {}) + [*opts.fetch(:args, [])] + end + + # @!visibility private + def init_obligation + @event = Event.new + @value = @reason = nil + end + + # @!visibility private + def event + @event + end + + # @!visibility private + def set_state(success, value, reason) + if success + @value = value + @state = :fulfilled + else + @reason = reason + @state = :rejected + end + end + + # @!visibility private + def state=(value) + synchronize { ns_set_state(value) } + end + + # Atomic compare and set operation + # State is set to `next_state` only if `current state == expected_current`. + # + # @param [Symbol] next_state + # @param [Symbol] expected_current + # + # @return [Boolean] true is state is changed, false otherwise + # + # @!visibility private + def compare_and_set_state(next_state, *expected_current) + synchronize do + if expected_current.include? @state + @state = next_state + true + else + false + end + end + end + + # Executes the block within mutex if current state is included in expected_states + # + # @return block value if executed, false otherwise + # + # @!visibility private + def if_state(*expected_states) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + + if expected_states.include? @state + yield + else + false + end + end + end + + protected + + # Am I in the current state? + # + # @param [Symbol] expected The state to check against + # @return [Boolean] true if in the expected state else false + # + # @!visibility private + def ns_check_state?(expected) + @state == expected + end + + # @!visibility private + def ns_set_state(value) + @state = value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/observable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/observable.rb new file mode 100644 index 0000000..b513271 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concern/observable.rb @@ -0,0 +1,110 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/collection/copy_on_write_observer_set' + +module Concurrent + module Concern + + # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one + # of the most useful design patterns. + # + # The workflow is very simple: + # - an `observer` can register itself to a `subject` via a callback + # - many `observers` can be registered to the same `subject` + # - the `subject` notifies all registered observers when its status changes + # - an `observer` can deregister itself when is no more interested to receive + # event notifications + # + # In a single threaded environment the whole pattern is very easy: the + # `subject` can use a simple data structure to manage all its subscribed + # `observer`s and every `observer` can react directly to every event without + # caring about synchronization. + # + # In a multi threaded environment things are more complex. The `subject` must + # synchronize the access to its data structure and to do so currently we're + # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} + # and {Concurrent::Concern::CopyOnNotifyObserverSet}. + # + # When implementing and `observer` there's a very important rule to remember: + # **there are no guarantees about the thread that will execute the callback** + # + # Let's take this example + # ``` + # class Observer + # def initialize + # @count = 0 + # end + # + # def update + # @count += 1 + # end + # end + # + # obs = Observer.new + # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } + # # execute [obj1, obj2, obj3, obj4] + # ``` + # + # `obs` is wrong because the variable `@count` can be accessed by different + # threads at the same time, so it should be synchronized (using either a Mutex + # or an AtomicFixum) + module Observable + + # @!macro observable_add_observer + # + # Adds an observer to this set. If a block is passed, the observer will be + # created by this method and no other params should be passed. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # Default is :update + # @return [Object] the added observer + def add_observer(observer = nil, func = :update, &block) + observers.add_observer(observer, func, &block) + end + + # As `#add_observer` but can be used for chaining. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # @return [Observable] self + def with_observer(observer = nil, func = :update, &block) + add_observer(observer, func, &block) + self + end + + # @!macro observable_delete_observer + # + # Remove `observer` as an observer on this object so that it will no + # longer receive notifications. + # + # @param [Object] observer the observer to remove + # @return [Object] the deleted observer + def delete_observer(observer) + observers.delete_observer(observer) + end + + # @!macro observable_delete_observers + # + # Remove all observers associated with this object. + # + # @return [Observable] self + def delete_observers + observers.delete_observers + self + end + + # @!macro observable_count_observers + # + # Return the number of observers associated with this object. + # + # @return [Integer] the observers count + def count_observers + observers.count_observers + end + + protected + + attr_accessor :observers + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concurrent_ruby.jar b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concurrent_ruby.jar new file mode 100644 index 0000000..887aa16 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/concurrent_ruby.jar differ diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/configuration.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/configuration.rb new file mode 100644 index 0000000..a00dc84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/configuration.rb @@ -0,0 +1,188 @@ +require 'thread' +require 'concurrent/delay' +require 'concurrent/errors' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/concern/logging' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/utility/processor_counter' + +module Concurrent + extend Concern::Logging + extend Concern::Deprecation + + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' + + # @return [Logger] Logger with provided level and output. + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message + when String + message + when Exception + format "%s (%s)\n%s", + message.message, message.class, (message.backtrace || []).join("\n") + else + message.inspect + end + + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true + end + end + + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output + end + + # @return [Logger] Logger with provided level and output. + # @deprecated + def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) + logger = Logger.new(output) + logger.level = level + logger.formatter = lambda do |severity, datetime, progname, msg| + formatted_message = case msg + when String + msg + when Exception + format "%s (%s)\n%s", + msg.message, msg.class, (msg.backtrace || []).join("\n") + else + msg.inspect + end + format "[%s] %5s -- %s: %s\n", + datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), + severity, + progname, + formatted_message + end + + lambda do |loglevel, progname, message = nil, &block| + logger.add loglevel, message, progname, &block + end + end + + # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. + # @deprecated + def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_stdlib_logger level, output + end + + # TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods + + # Suppresses all output when used for logging. + NULL_LOGGER = lambda { |level, progname, message = nil, &block| } + + # @!visibility private + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) + private_constant :GLOBAL_LOGGER + + def self.global_logger + GLOBAL_LOGGER.value + end + + def self.global_logger=(value) + GLOBAL_LOGGER.value = value + end + + # @!visibility private + GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor } + private_constant :GLOBAL_FAST_EXECUTOR + + # @!visibility private + GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor } + private_constant :GLOBAL_IO_EXECUTOR + + # @!visibility private + GLOBAL_TIMER_SET = Delay.new { TimerSet.new } + private_constant :GLOBAL_TIMER_SET + + # @!visibility private + GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new + private_constant :GLOBAL_IMMEDIATE_EXECUTOR + + # Disables AtExit handlers including pool auto-termination handlers. + # When disabled it will be the application programmer's responsibility + # to ensure that the handlers are shutdown properly prior to application + # exit by calling `AtExit.run` method. + # + # @note this option should be needed only because of `at_exit` ordering + # issues which may arise when running some of the testing frameworks. + # E.g. Minitest's test-suite runs itself in `at_exit` callback which + # executes after the pools are already terminated. Then auto termination + # needs to be disabled and called manually after test-suite ends. + # @note This method should *never* be called + # from within a gem. It should *only* be used from within the main + # application and even then it should be used only when necessary. + # @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841. + # + def self.disable_at_exit_handlers! + deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841." + end + + # Global thread pool optimized for short, fast *operations*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_fast_executor + GLOBAL_FAST_EXECUTOR.value + end + + # Global thread pool optimized for long, blocking (IO) *tasks*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_io_executor + GLOBAL_IO_EXECUTOR.value + end + + def self.global_immediate_executor + GLOBAL_IMMEDIATE_EXECUTOR + end + + # Global thread pool user for global *timers*. + # + # @return [Concurrent::TimerSet] the thread pool + def self.global_timer_set + GLOBAL_TIMER_SET.value + end + + # General access point to global executors. + # @param [Symbol, Executor] executor_identifier symbols: + # - :fast - {Concurrent.global_fast_executor} + # - :io - {Concurrent.global_io_executor} + # - :immediate - {Concurrent.global_immediate_executor} + # @return [Executor] + def self.executor(executor_identifier) + Options.executor(executor_identifier) + end + + def self.new_fast_executor(opts = {}) + FixedThreadPool.new( + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "fast" + ) + end + + def self.new_io_executor(opts = {}) + CachedThreadPool.new( + auto_terminate: opts.fetch(:auto_terminate, true), + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "io" + ) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/constants.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/constants.rb new file mode 100644 index 0000000..676c2af --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/constants.rb @@ -0,0 +1,8 @@ +module Concurrent + + # Various classes within allows for +nil+ values to be stored, + # so a special +NULL+ token is required to indicate the "nil-ness". + # @!visibility private + NULL = ::Object.new + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/dataflow.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/dataflow.rb new file mode 100644 index 0000000..d55f19d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/dataflow.rb @@ -0,0 +1,81 @@ +require 'concurrent/future' +require 'concurrent/atomic/atomic_fixnum' + +module Concurrent + + # @!visibility private + class DependencyCounter # :nodoc: + + def initialize(count, &block) + @counter = AtomicFixnum.new(count) + @block = block + end + + def update(time, value, reason) + if @counter.decrement == 0 + @block.call + end + end + end + + # Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. + # {include:file:docs-source/dataflow.md} + # + # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon + # + # @yield The operation to perform once all the dependencies are met + # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow + # @yieldreturn [Object] the result of the block operation + # + # @return [Object] the result of all the operations + # + # @raise [ArgumentError] if no block is given + # @raise [ArgumentError] if any of the inputs are not `IVar`s + def dataflow(*inputs, &block) + dataflow_with(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow + + def dataflow_with(executor, *inputs, &block) + call_dataflow(:value, executor, *inputs, &block) + end + module_function :dataflow_with + + def dataflow!(*inputs, &block) + dataflow_with!(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow! + + def dataflow_with!(executor, *inputs, &block) + call_dataflow(:value!, executor, *inputs, &block) + end + module_function :dataflow_with! + + private + + def call_dataflow(method, executor, *inputs, &block) + raise ArgumentError.new('an executor must be provided') if executor.nil? + raise ArgumentError.new('no block given') unless block_given? + unless inputs.all? { |input| input.is_a? IVar } + raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }") + end + + result = Future.new(executor: executor) do + values = inputs.map { |input| input.send(method) } + block.call(*values) + end + + if inputs.empty? + result.execute + else + counter = DependencyCounter.new(inputs.size) { result.execute } + + inputs.each do |input| + input.add_observer counter + end + end + + result + end + module_function :call_dataflow +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/delay.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/delay.rb new file mode 100644 index 0000000..83799d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/delay.rb @@ -0,0 +1,199 @@ +require 'thread' +require 'concurrent/concern/obligation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/synchronization' + +module Concurrent + + # This file has circular require issues. It must be autoloaded here. + autoload :Options, 'concurrent/options' + + # Lazy evaluation of a block yielding an immutable result. Useful for + # expensive operations that may never be needed. It may be non-blocking, + # supports the `Concern::Obligation` interface, and accepts the injection of + # custom executor upon which to execute the block. Processing of + # block will be deferred until the first time `#value` is called. + # At that time the caller can choose to return immediately and let + # the block execute asynchronously, block indefinitely, or block + # with a timeout. + # + # When a `Delay` is created its state is set to `pending`. The value and + # reason are both `nil`. The first time the `#value` method is called the + # enclosed opration will be run and the calling thread will block. Other + # threads attempting to call `#value` will block as well. Once the operation + # is complete the *value* will be set to the result of the operation or the + # *reason* will be set to the raised exception, as appropriate. All threads + # blocked on `#value` will return. Subsequent calls to `#value` will immediately + # return the cached value. The operation will only be run once. This means that + # any side effects created by the operation will only happen once as well. + # + # `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread + # safety of the reference returned by `#value`. + # + # @!macro copy_options + # + # @!macro delay_note_regarding_blocking + # @note The default behavior of `Delay` is to block indefinitely when + # calling either `value` or `wait`, executing the delayed operation on + # the current thread. This makes the `timeout` value completely + # irrelevant. To enable non-blocking behavior, use the `executor` + # constructor option. This will cause the delayed operation to be + # execute on the given executor, allowing the call to timeout. + # + # @see Concurrent::Concern::Dereferenceable + class Delay < Synchronization::LockableObject + include Concern::Obligation + + # NOTE: Because the global thread pools are lazy-loaded with these objects + # there is a performance hit every time we post a new task to one of these + # thread pools. Subsequently it is critical that `Delay` perform as fast + # as possible post-completion. This class has been highly optimized using + # the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to + # DRY-up this class or perform other refactoring with running the + # benchmarks and ensuring that performance is not negatively impacted. + + # Create a new `Delay` in the `:pending` state. + # + # @!macro executor_and_deref_options + # + # @yield the delayed operation to perform + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(&nil) + synchronize { ns_initialize(opts, &block) } + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception this method will return nil. The execption object + # can be accessed via the `#reason` method. + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # + # @!macro delay_note_regarding_blocking + def value(timeout = nil) + if @executor # TODO (pitr 12-Sep-2015): broken unsafe read? + super + else + # this function has been optimized for performance and + # should not be modified without running new benchmarks + synchronize do + execute = @evaluation_started = true unless @evaluation_started + if execute + begin + set_state(true, @task.call, nil) + rescue => ex + set_state(false, nil, ex) + end + elsif incomplete? + raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' + end + end + if @do_nothing_on_deref + @value + else + apply_deref_options(@value) + end + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception, this method will raise that exception (even when) + # the operation has already been executed). + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # @raise [Exception] when `#rejected?` raises `#reason` + # + # @!macro delay_note_regarding_blocking + def value!(timeout = nil) + if @executor + super + else + result = value + raise @reason if @reason + result + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. + # + # @param [Integer] timeout (nil) the maximum number of seconds to wait for + # the value to be computed. When `nil` the caller will block indefinitely. + # + # @return [Object] self + # + # @!macro delay_note_regarding_blocking + def wait(timeout = nil) + if @executor + execute_task_once + super(timeout) + else + value + end + self + end + + # Reconfigures the block returning the value if still `#incomplete?` + # + # @yield the delayed operation to perform + # @return [true, false] if success + def reconfigure(&block) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + unless @evaluation_started + @task = block + true + else + false + end + end + end + + protected + + def ns_initialize(opts, &block) + init_obligation + set_deref_options(opts) + @executor = opts[:executor] + + @task = block + @state = :pending + @evaluation_started = false + end + + private + + # @!visibility private + def execute_task_once # :nodoc: + # this function has been optimized for performance and + # should not be modified without running new benchmarks + execute = task = nil + synchronize do + execute = @evaluation_started = true unless @evaluation_started + task = @task + end + + if execute + executor = Options.executor_from_options(executor: @executor) + executor.post do + begin + result = task.call + success = true + rescue => ex + reason = ex + end + synchronize do + set_state(success, result, reason) + event.set + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/errors.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/errors.rb new file mode 100644 index 0000000..b69fec0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/errors.rb @@ -0,0 +1,69 @@ +module Concurrent + + Error = Class.new(StandardError) + + # Raised when errors occur during configuration. + ConfigurationError = Class.new(Error) + + # Raised when an asynchronous operation is cancelled before execution. + CancelledOperationError = Class.new(Error) + + # Raised when a lifecycle method (such as `stop`) is called in an improper + # sequence or when the object is in an inappropriate state. + LifecycleError = Class.new(Error) + + # Raised when an attempt is made to violate an immutability guarantee. + ImmutabilityError = Class.new(Error) + + # Raised when an operation is attempted which is not legal given the + # receiver's current state + IllegalOperationError = Class.new(Error) + + # Raised when an object's methods are called when it has not been + # properly initialized. + InitializationError = Class.new(Error) + + # Raised when an object with a start/stop lifecycle has been started an + # excessive number of times. Often used in conjunction with a restart + # policy or strategy. + MaxRestartFrequencyError = Class.new(Error) + + # Raised when an attempt is made to modify an immutable object + # (such as an `IVar`) after its final state has been set. + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end + + # Raised by an `Executor` when it is unable to process a given task, + # possibly because of a reject policy or other internal error. + RejectedExecutionError = Class.new(Error) + + # Raised when any finite resource, such as a lock counter, exceeds its + # maximum limit/threshold. + ResourceLimitError = Class.new(Error) + + # Raised when an operation times out. + TimeoutError = Class.new(Error) + + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/exchanger.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/exchanger.rb new file mode 100644 index 0000000..5a99550 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/exchanger.rb @@ -0,0 +1,352 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/maybe' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/utility/engine' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro exchanger + # + # A synchronization point at which threads can pair and swap elements within + # pairs. Each thread presents some object on entry to the exchange method, + # matches with a partner thread, and receives its partner's object on return. + # + # @!macro thread_safe_variable_comparison + # + # This implementation is very simple, using only a single slot for each + # exchanger (unlike more advanced implementations which use an "arena"). + # This approach will work perfectly fine when there are only a few threads + # accessing a single `Exchanger`. Beyond a handful of threads the performance + # will degrade rapidly due to contention on the single slot, but the algorithm + # will remain correct. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # threads = [ + # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" + # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" + # ] + # threads.each {|t| t.join(2) } + + # @!visibility private + class AbstractExchanger < Synchronization::Object + + # @!visibility private + CANCEL = ::Object.new + private_constant :CANCEL + + def initialize + super + end + + # @!macro exchanger_method_do_exchange + # + # Waits for another thread to arrive at this exchange point (unless the + # current thread is interrupted), and then transfers the given object to + # it, receiving its object in return. The timeout value indicates the + # approximate number of seconds the method should block while waiting + # for the exchange. When the timeout value is `nil` the method will + # block indefinitely. + # + # @param [Object] value the value to exchange with another thread + # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely + # + # @!macro exchanger_method_exchange + # + # In some edge cases when a `timeout` is given a return value of `nil` may be + # ambiguous. Specifically, if `nil` is a valid value in the exchange it will + # be impossible to tell whether `nil` is the actual return value or if it + # signifies timeout. When `nil` is a valid value in the exchange consider + # using {#exchange!} or {#try_exchange} instead. + # + # @return [Object] the value exchanged by the other thread or `nil` on timeout + def exchange(value, timeout = nil) + (value = do_exchange(value, timeout)) == CANCEL ? nil : value + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + # + # On timeout a {Concurrent::TimeoutError} exception will be raised. + # + # @return [Object] the value exchanged by the other thread + # @raise [Concurrent::TimeoutError] on timeout + def exchange!(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + raise Concurrent::TimeoutError + else + value + end + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + # + # The return value will be a {Concurrent::Maybe} set to `Just` on success or + # `Nothing` on timeout. + # + # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with + # the item exchanged by the other thread as `#value`; on timeout a + # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` + # + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # result = exchanger.exchange(:foo, 0.5) + # + # if result.just? + # puts result.value #=> :bar + # else + # puts 'timeout' + # end + def try_exchange(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + Concurrent::Maybe.nothing(Concurrent::TimeoutError) + else + Concurrent::Maybe.just(value) + end + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + raise NotImplementedError + end + end + + # @!macro internal_implementation_note + # @!visibility private + class RubyExchanger < AbstractExchanger + # A simplified version of java.util.concurrent.Exchanger written by + # Doug Lea, Bill Scherer, and Michael Scott with assistance from members + # of JCP JSR-166 Expert Group and released to the public domain. It does + # not include the arena or the multi-processor spin loops. + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java + + safe_initialization! + + class Node < Concurrent::Synchronization::Object + attr_atomic :value + safe_initialization! + + def initialize(item) + super() + @Item = item + @Latch = Concurrent::CountDownLatch.new + self.value = nil + end + + def latch + @Latch + end + + def item + @Item + end + end + private_constant :Node + + def initialize + super + end + + private + + attr_atomic(:slot) + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + + # ALGORITHM + # + # From the original Java version: + # + # > The basic idea is to maintain a "slot", which is a reference to + # > a Node containing both an Item to offer and a "hole" waiting to + # > get filled in. If an incoming "occupying" thread sees that the + # > slot is null, it CAS'es (compareAndSets) a Node there and waits + # > for another to invoke exchange. That second "fulfilling" thread + # > sees that the slot is non-null, and so CASes it back to null, + # > also exchanging items by CASing the hole, plus waking up the + # > occupying thread if it is blocked. In each case CAS'es may + # > fail because a slot at first appears non-null but is null upon + # > CAS, or vice-versa. So threads may need to retry these + # > actions. + # + # This version: + # + # An exchange occurs between an "occupier" thread and a "fulfiller" thread. + # The "slot" is used to setup this interaction. The first thread in the + # exchange puts itself into the slot (occupies) and waits for a fulfiller. + # The second thread removes the occupier from the slot and attempts to + # perform the exchange. Removing the occupier also frees the slot for + # another occupier/fulfiller pair. + # + # Because the occupier and the fulfiller are operating independently and + # because there may be contention with other threads, any failed operation + # indicates contention. Both the occupier and the fulfiller operate within + # spin loops. Any failed actions along the happy path will cause the thread + # to repeat the loop and try again. + # + # When a timeout value is given the thread must be cognizant of time spent + # in the spin loop. The remaining time is checked every loop. When the time + # runs out the thread will exit. + # + # A "node" is the data structure used to perform the exchange. Only the + # occupier's node is necessary. It's the node used for the exchange. + # Each node has an "item," a "hole" (self), and a "latch." The item is the + # node's initial value. It never changes. It's what the fulfiller returns on + # success. The occupier's hole is where the fulfiller put its item. It's the + # item that the occupier returns on success. The latch is used for synchronization. + # Because a thread may act as either an occupier or fulfiller (or possibly + # both in periods of high contention) every thread creates a node when + # the exchange method is first called. + # + # The following steps occur within the spin loop. If any actions fail + # the thread will loop and try again, so long as there is time remaining. + # If time runs out the thread will return CANCEL. + # + # Check the slot for an occupier: + # + # * If the slot is empty try to occupy + # * If the slot is full try to fulfill + # + # Attempt to occupy: + # + # * Attempt to CAS myself into the slot + # * Go to sleep and wait to be woken by a fulfiller + # * If the sleep is successful then the fulfiller completed its happy path + # - Return the value from my hole (the value given by the fulfiller) + # * When the sleep fails (time ran out) attempt to cancel the operation + # - Attempt to CAS myself out of the hole + # - If successful there is no contention + # - Return CANCEL + # - On failure, I am competing with a fulfiller + # - Attempt to CAS my hole to CANCEL + # - On success + # - Let the fulfiller deal with my cancel + # - Return CANCEL + # - On failure the fulfiller has completed its happy path + # - Return th value from my hole (the fulfiller's value) + # + # Attempt to fulfill: + # + # * Attempt to CAS the occupier out of the slot + # - On failure loop again + # * Attempt to CAS my item into the occupier's hole + # - On failure the occupier is trying to cancel + # - Loop again + # - On success we are on the happy path + # - Wake the sleeping occupier + # - Return the occupier's item + + value = NULL if value.nil? # The sentinel allows nil to be a valid value + me = Node.new(value) # create my node in case I need to occupy + end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up + + result = loop do + other = slot + if other && compare_and_set_slot(other, nil) + # try to fulfill + if other.compare_and_set_value(nil, value) + # happy path + other.latch.count_down + break other.item + end + elsif other.nil? && compare_and_set_slot(nil, me) + # try to occupy + timeout = end_at - Concurrent.monotonic_time if timeout + if me.latch.wait(timeout) + # happy path + break me.value + else + # attempt to remove myself from the slot + if compare_and_set_slot(me, nil) + break CANCEL + elsif !me.compare_and_set_value(nil, CANCEL) + # I've failed to block the fulfiller + break me.value + end + end + end + break CANCEL if timeout && Concurrent.monotonic_time >= end_at + end + + result == NULL ? nil : result + end + end + + if Concurrent.on_jruby? + + # @!macro internal_implementation_note + # @!visibility private + class JavaExchanger < AbstractExchanger + + def initialize + @exchanger = java.util.concurrent.Exchanger.new + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value) + end + else + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + rescue java.util.concurrent.TimeoutException + CANCEL + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + ExchangerImplementation = case + when Concurrent.on_jruby? + JavaExchanger + else + RubyExchanger + end + private_constant :ExchangerImplementation + + # @!macro exchanger + class Exchanger < ExchangerImplementation + + # @!method initialize + # Creates exchanger instance + + # @!method exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange + + # @!method exchange!(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + + # @!method try_exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb new file mode 100644 index 0000000..6d0b047 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb @@ -0,0 +1,128 @@ +require 'concurrent/errors' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/executor_service' +require 'concurrent/synchronization' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class AbstractExecutorService < Synchronization::LockableObject + include ExecutorService + include Concern::Deprecation + + # The set of possible fallback policies that may be set at thread pool creation. + FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze + + # @!macro executor_service_attr_reader_fallback_policy + attr_reader :fallback_policy + + attr_reader :name + + # Create a new thread pool. + def initialize(opts = {}, &block) + super(&nil) + synchronize do + @auto_terminate = opts.fetch(:auto_terminate, true) + @name = opts.fetch(:name) if opts.key?(:name) + ns_initialize(opts, &block) + end + end + + def to_s + name ? "#{super[0..-2]} name: #{name}>" : super + end + + # @!macro executor_service_method_shutdown + def shutdown + raise NotImplementedError + end + + # @!macro executor_service_method_kill + def kill + raise NotImplementedError + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + raise NotImplementedError + end + + # @!macro executor_service_method_running_question + def running? + synchronize { ns_running? } + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + synchronize { ns_shuttingdown? } + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + synchronize { ns_shutdown? } + end + + # @!macro executor_service_method_auto_terminate_question + def auto_terminate? + synchronize { @auto_terminate } + end + + # @!macro executor_service_method_auto_terminate_setter + def auto_terminate=(value) + deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized." + end + + private + + # Handler which executes the `fallback_policy` once the queue size + # reaches `max_queue`. + # + # @param [Array] args the arguments to the task which is being handled. + # + # @!visibility private + def handle_fallback(*args) + case fallback_policy + when :abort + raise RejectedExecutionError + when :discard + false + when :caller_runs + begin + yield(*args) + rescue => ex + # let it fail + log DEBUG, ex + end + true + else + fail "Unknown fallback policy #{fallback_policy}" + end + end + + def ns_execute(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_ns_shutdown_execution + # + # Callback method called when an orderly shutdown has completed. + # The default behavior is to signal all waiting threads. + def ns_shutdown_execution + # do nothing + end + + # @!macro executor_service_method_ns_kill_execution + # + # Callback method called when the executor has been killed. + # The default behavior is to do nothing. + def ns_kill_execution + # do nothing + end + + def ns_auto_terminate? + @auto_terminate + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb new file mode 100644 index 0000000..de50ed1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb @@ -0,0 +1,62 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @!macro thread_pool_options + class CachedThreadPool < ThreadPoolExecutor + + # @!macro cached_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- + def initialize(opts = {}) + defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: 0, + max_threads: DEFAULT_MAX_POOL_SIZE, + max_queue: DEFAULT_MAX_QUEUE_SIZE } + super(defaults.merge(opts).merge(overrides)) + end + + private + + # @!macro cached_thread_pool_method_initialize + # @!visibility private + def ns_initialize(opts) + super(opts) + if Concurrent.on_jruby? + @max_queue = 0 + @executor = java.util.concurrent.Executors.newCachedThreadPool( + DaemonThreadFactory.new(ns_auto_terminate?)) + @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) + @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/executor_service.rb new file mode 100644 index 0000000..7e34491 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/executor_service.rb @@ -0,0 +1,185 @@ +require 'concurrent/concern/logging' + +module Concurrent + + ################################################################### + + # @!macro executor_service_method_post + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + + # @!macro executor_service_method_left_shift + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Proc] task the asynchronous task to perform + # + # @return [self] returns itself + + # @!macro executor_service_method_can_overflow_question + # + # Does the task queue have a maximum size? + # + # @return [Boolean] True if the task queue has a maximum size else false. + + # @!macro executor_service_method_serialized_question + # + # Does this executor guarantee serialization of its operations? + # + # @return [Boolean] True if the executor guarantees that all operations + # will be post in the order they are received and no two operations may + # occur simultaneously. Else false. + + ################################################################### + + # @!macro executor_service_public_api + # + # @!method post(*args, &task) + # @!macro executor_service_method_post + # + # @!method <<(task) + # @!macro executor_service_method_left_shift + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method serialized? + # @!macro executor_service_method_serialized_question + + ################################################################### + + # @!macro executor_service_attr_reader_fallback_policy + # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. + + # @!macro executor_service_method_shutdown + # + # Begin an orderly shutdown. Tasks already in the queue will be executed, + # but no new tasks will be accepted. Has no additional effect if the + # thread pool is not running. + + # @!macro executor_service_method_kill + # + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + + # @!macro executor_service_method_wait_for_termination + # + # Block until executor shutdown is complete or until `timeout` seconds have + # passed. + # + # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` + # must be called before this method (or on another thread). + # + # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete + # + # @return [Boolean] `true` if shutdown complete or false on `timeout` + + # @!macro executor_service_method_running_question + # + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + + # @!macro executor_service_method_shuttingdown_question + # + # Is the executor shuttingdown? + # + # @return [Boolean] `true` when not running and not shutdown, else `false` + + # @!macro executor_service_method_shutdown_question + # + # Is the executor shutdown? + # + # @return [Boolean] `true` when shutdown, `false` when shutting down or running + + # @!macro executor_service_method_auto_terminate_question + # + # Is the executor auto-terminate when the application exits? + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + # @!macro executor_service_method_auto_terminate_setter + # + # + # Set the auto-terminate behavior for this executor. + # @deprecated Has no effect + # @param [Boolean] value The new auto-terminate value to set for this executor. + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + ################################################################### + + # @!macro abstract_executor_service_public_api + # + # @!macro executor_service_public_api + # + # @!attribute [r] fallback_policy + # @!macro executor_service_attr_reader_fallback_policy + # + # @!method shutdown + # @!macro executor_service_method_shutdown + # + # @!method kill + # @!macro executor_service_method_kill + # + # @!method wait_for_termination(timeout = nil) + # @!macro executor_service_method_wait_for_termination + # + # @!method running? + # @!macro executor_service_method_running_question + # + # @!method shuttingdown? + # @!macro executor_service_method_shuttingdown_question + # + # @!method shutdown? + # @!macro executor_service_method_shutdown_question + # + # @!method auto_terminate? + # @!macro executor_service_method_auto_terminate_question + # + # @!method auto_terminate=(value) + # @!macro executor_service_method_auto_terminate_setter + + ################################################################### + + # @!macro executor_service_public_api + # @!visibility private + module ExecutorService + include Concern::Logging + + # @!macro executor_service_method_post + def post(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_can_overflow_question + # + # @note Always returns `false` + def can_overflow? + false + end + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `false` + def serialized? + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb new file mode 100644 index 0000000..b665f6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb @@ -0,0 +1,210 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # @!macro thread_pool_executor_constant_default_max_pool_size + # Default maximum number of threads that will be created in the pool. + + # @!macro thread_pool_executor_constant_default_min_pool_size + # Default minimum number of threads that will be retained in the pool. + + # @!macro thread_pool_executor_constant_default_max_queue_size + # Default maximum number of tasks that may be added to the task queue. + + # @!macro thread_pool_executor_constant_default_thread_timeout + # Default maximum number of seconds a thread in the pool may remain idle + # before being reclaimed. + + # @!macro thread_pool_executor_constant_default_synchronous + # Default value of the :synchronous option. + + # @!macro thread_pool_executor_attr_reader_max_length + # The maximum number of threads that may be created in the pool. + # @return [Integer] The maximum number of threads that may be created in the pool. + + # @!macro thread_pool_executor_attr_reader_min_length + # The minimum number of threads that may be retained in the pool. + # @return [Integer] The minimum number of threads that may be retained in the pool. + + # @!macro thread_pool_executor_attr_reader_largest_length + # The largest number of threads that have been created in the pool since construction. + # @return [Integer] The largest number of threads that have been created in the pool since construction. + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # The number of tasks that have been scheduled for execution on the pool since construction. + # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. + + # @!macro thread_pool_executor_attr_reader_completed_task_count + # The number of tasks that have been completed by the pool since construction. + # @return [Integer] The number of tasks that have been completed by the pool since construction. + + # @!macro thread_pool_executor_attr_reader_idletime + # The number of seconds that a thread may be idle before being reclaimed. + # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_synchronous + # Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue. + # @return [true, false] + + # @!macro thread_pool_executor_attr_reader_max_queue + # The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + # + # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + + # @!macro thread_pool_executor_attr_reader_length + # The number of threads currently in the pool. + # @return [Integer] The number of threads currently in the pool. + + # @!macro thread_pool_executor_attr_reader_queue_length + # The number of tasks in the queue awaiting execution. + # @return [Integer] The number of tasks in the queue awaiting execution. + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + # + # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + + + + + + # @!macro thread_pool_executor_public_api + # + # @!macro abstract_executor_service_public_api + # + # @!attribute [r] max_length + # @!macro thread_pool_executor_attr_reader_max_length + # + # @!attribute [r] min_length + # @!macro thread_pool_executor_attr_reader_min_length + # + # @!attribute [r] largest_length + # @!macro thread_pool_executor_attr_reader_largest_length + # + # @!attribute [r] scheduled_task_count + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # + # @!attribute [r] completed_task_count + # @!macro thread_pool_executor_attr_reader_completed_task_count + # + # @!attribute [r] idletime + # @!macro thread_pool_executor_attr_reader_idletime + # + # @!attribute [r] max_queue + # @!macro thread_pool_executor_attr_reader_max_queue + # + # @!attribute [r] length + # @!macro thread_pool_executor_attr_reader_length + # + # @!attribute [r] queue_length + # @!macro thread_pool_executor_attr_reader_queue_length + # + # @!attribute [r] remaining_capacity + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + + + + + # @!macro thread_pool_options + # + # **Thread Pool Options** + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and + # a `-worker-` name is given to its threads if supported by used Ruby + # implementation. `` is uniq for each thread. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` and no new threads can be created, + # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default), the threads started will be marked as daemon. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit, all threads can be marked as daemon according to the + # `:auto_terminate` option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean- + + + + + + # @!macro fixed_thread_pool + # + # A thread pool that reuses a fixed number of threads operating off an unbounded queue. + # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @!macro thread_pool_options + class FixedThreadPool < ThreadPoolExecutor + + # @!macro fixed_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Integer] num_threads the number of threads to allocate + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `num_threads` is less than or equal to zero + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- + def initialize(num_threads, opts = {}) + raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 + defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, + idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: num_threads, + max_threads: num_threads } + super(defaults.merge(opts).merge(overrides)) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb new file mode 100644 index 0000000..282df7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb @@ -0,0 +1,66 @@ +require 'concurrent/atomic/event' +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/serial_executor_service' + +module Concurrent + + # An executor service which runs all operations on the current thread, + # blocking as necessary. Operations are performed in the order they are + # received and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used + # it immediately runs every `#post` operation on the current thread, blocking + # that thread until the operation is complete. This can be very beneficial + # during testing because it makes all operations deterministic. + # + # @note Intended for use primarily in testing and debugging. + class ImmediateExecutor < AbstractExecutorService + include SerialExecutorService + + # Creates a new executor + def initialize + @stopped = Concurrent::Event.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + task.call(*args) + true + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + ! shutdown? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + false + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @stopped.set + true + end + alias_method :kill, :shutdown + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb new file mode 100644 index 0000000..4f9769f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb @@ -0,0 +1,44 @@ +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/simple_executor_service' + +module Concurrent + # An executor service which runs all operations on a new thread, blocking + # until it completes. Operations are performed in the order they are received + # and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used it + # immediately runs every `#post` operation on a new thread, blocking the + # current thread until the operation is complete. This is similar to how the + # ImmediateExecutor works, but the operation has the full stack of the new + # thread at its disposal. This can be helpful when the operations will spawn + # more operations on the same executor and so on - such a situation might + # overflow the single stack in case of an ImmediateExecutor, which is + # inconsistent with how it would behave for a threaded executor. + # + # @note Intended for use primarily in testing and debugging. + class IndirectImmediateExecutor < ImmediateExecutor + # Creates a new executor + def initialize + super + @internal_executor = SimpleExecutorService.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new("no block given") unless block_given? + return false unless running? + + event = Concurrent::Event.new + @internal_executor.post do + begin + task.call(*args) + ensure + event.set + end + end + event.wait + + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb new file mode 100644 index 0000000..9c0f310 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb @@ -0,0 +1,103 @@ +if Concurrent.on_jruby? + + require 'concurrent/errors' + require 'concurrent/utility/engine' + require 'concurrent/executor/abstract_executor_service' + + module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaExecutorService < AbstractExecutorService + java_import 'java.lang.Runnable' + + FALLBACK_POLICY_CLASSES = { + abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, + discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, + caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy + }.freeze + private_constant :FALLBACK_POLICY_CLASSES + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return handle_fallback(*args, &task) unless running? + @executor.submit Job.new(args, task) + true + rescue Java::JavaUtilConcurrent::RejectedExecutionException + raise RejectedExecutionError + end + + def wait_for_termination(timeout = nil) + if timeout.nil? + ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok + true + else + @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + + def shutdown + synchronize do + @executor.shutdown + nil + end + end + + def kill + synchronize do + @executor.shutdownNow + nil + end + end + + private + + def ns_running? + !(ns_shuttingdown? || ns_shutdown?) + end + + def ns_shuttingdown? + if @executor.respond_to? :isTerminating + @executor.isTerminating + else + false + end + end + + def ns_shutdown? + @executor.isShutdown || @executor.isTerminated + end + + class Job + include Runnable + def initialize(args, block) + @args = args + @block = block + end + + def run + @block.call(*@args) + end + end + private_constant :Job + end + + class DaemonThreadFactory + # hide include from YARD + send :include, java.util.concurrent.ThreadFactory + + def initialize(daemonize = true) + @daemonize = daemonize + end + + def newThread(runnable) + thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable) + thread.setDaemon(@daemonize) + return thread + end + end + + private_constant :DaemonThreadFactory + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb new file mode 100644 index 0000000..7aa24f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb @@ -0,0 +1,30 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + require 'concurrent/executor/serial_executor_service' + + module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaSingleThreadExecutor < JavaExecutorService + include SerialExecutorService + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + private + + def ns_initialize(opts) + @executor = java.util.concurrent.Executors.newSingleThreadExecutor( + DaemonThreadFactory.new(ns_auto_terminate?) + ) + @fallback_policy = opts.fetch(:fallback_policy, :discard) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb new file mode 100644 index 0000000..e670663 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb @@ -0,0 +1,136 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + + module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class JavaThreadPoolExecutor < JavaExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + @max_queue != 0 + end + + # @!macro thread_pool_executor_attr_reader_min_length + def min_length + @executor.getCorePoolSize + end + + # @!macro thread_pool_executor_attr_reader_max_length + def max_length + @executor.getMaximumPoolSize + end + + # @!macro thread_pool_executor_attr_reader_length + def length + @executor.getPoolSize + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + @executor.getLargestPoolSize + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + @executor.getTaskCount + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + @executor.getCompletedTaskCount + end + + # @!macro thread_pool_executor_attr_reader_idletime + def idletime + @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + @executor.getQueue.size + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity + end + + # @!macro executor_service_method_running_question + def running? + super && !@executor.isTerminating + end + + private + + def ns_initialize(opts) + min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) + + if @max_queue == 0 + if @synchronous + queue = java.util.concurrent.SynchronousQueue.new + else + queue = java.util.concurrent.LinkedBlockingQueue.new + end + else + queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) + end + + @executor = java.util.concurrent.ThreadPoolExecutor.new( + min_length, + max_length, + idletime, + java.util.concurrent.TimeUnit::SECONDS, + queue, + DaemonThreadFactory.new(ns_auto_terminate?), + FALLBACK_POLICY_CLASSES[@fallback_policy].new) + + end + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb new file mode 100644 index 0000000..06658d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb @@ -0,0 +1,76 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/atomic/event' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubyExecutorService < AbstractExecutorService + safe_initialization! + + def initialize(*args, &block) + super + @StopEvent = Event.new + @StoppedEvent = Event.new + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + synchronize do + # If the executor is shut down, reject this task + return handle_fallback(*args, &task) unless running? + ns_execute(*args, &task) + true + end + end + + def shutdown + synchronize do + break unless running? + stop_event.set + ns_shutdown_execution + end + true + end + + def kill + synchronize do + break if shutdown? + stop_event.set + ns_kill_execution + stopped_event.set + end + true + end + + def wait_for_termination(timeout = nil) + stopped_event.wait(timeout) + end + + private + + def stop_event + @StopEvent + end + + def stopped_event + @StoppedEvent + end + + def ns_shutdown_execution + stopped_event.set + end + + def ns_running? + !stop_event.set? + end + + def ns_shuttingdown? + !(ns_running? || ns_shutdown?) + end + + def ns_shutdown? + stopped_event.set? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb new file mode 100644 index 0000000..916337d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb @@ -0,0 +1,21 @@ +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubySingleThreadExecutor < RubyThreadPoolExecutor + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super( + min_threads: 1, + max_threads: 1, + max_queue: 0, + idletime: DEFAULT_THREAD_IDLETIMEOUT, + fallback_policy: opts.fetch(:fallback_policy, :discard), + ) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb new file mode 100644 index 0000000..dc20d76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb @@ -0,0 +1,377 @@ +require 'thread' +require 'concurrent/atomic/event' +require 'concurrent/concern/logging' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class RubyThreadPoolExecutor < RubyExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_min_length + attr_reader :min_length + + # @!macro thread_pool_executor_attr_reader_idletime + attr_reader :idletime + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + synchronize { @largest_length } + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + synchronize { @scheduled_task_count } + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + synchronize { @completed_task_count } + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + synchronize { ns_limited_queue? } + end + + # @!macro thread_pool_executor_attr_reader_length + def length + synchronize { @pool.length } + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + synchronize { @queue.length } + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + synchronize do + if ns_limited_queue? + @max_queue - @queue.length + else + -1 + end + end + end + + # @!visibility private + def remove_busy_worker(worker) + synchronize { ns_remove_busy_worker worker } + end + + # @!visibility private + def ready_worker(worker) + synchronize { ns_ready_worker worker } + end + + # @!visibility private + def worker_not_old_enough(worker) + synchronize { ns_worker_not_old_enough worker } + end + + # @!visibility private + def worker_died(worker) + synchronize { ns_worker_died worker } + end + + # @!visibility private + def worker_task_completed + synchronize { @completed_task_count += 1 } + end + + private + + # @!visibility private + def ns_initialize(opts) + @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + + @pool = [] # all workers + @ready = [] # used as a stash (most idle worker is at the start) + @queue = [] # used as queue + # @ready or @queue is empty at all times + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ # detects if Ruby has forked + + @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + # @!visibility private + def ns_limited_queue? + @max_queue != 0 + end + + # @!visibility private + def ns_execute(*args, &task) + ns_reset_if_forked + + if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) + @scheduled_task_count += 1 + else + handle_fallback(*args, &task) + end + + ns_prune_pool if @next_gc_time < Concurrent.monotonic_time + end + + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + + if @pool.empty? + # nothing to do + stopped_event.set + end + + if @queue.empty? + # no more tasks will be accepted, just stop all workers + @pool.each(&:stop) + end + end + + # @!visibility private + def ns_kill_execution + # TODO log out unprocessed tasks in queue + # TODO try to shutdown first? + @pool.each(&:kill) + @pool.clear + @ready.clear + end + + # tries to assign task to a worker, tries to get one from @ready or to create new one + # @return [true, false] if task is assigned to a worker + # + # @!visibility private + def ns_assign_worker(*args, &task) + # keep growing if the pool is not at the minimum yet + worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker + if worker + worker << [task, args] + true + else + false + end + rescue ThreadError + # Raised when the operating system refuses to create the new thread + return false + end + + # tries to enqueue task + # @return [true, false] if enqueued + # + # @!visibility private + def ns_enqueue(*args, &task) + return false if @synchronous + + if !ns_limited_queue? || @queue.size < @max_queue + @queue << [task, args] + true + else + false + end + end + + # @!visibility private + def ns_worker_died(worker) + ns_remove_busy_worker worker + replacement_worker = ns_add_busy_worker + ns_ready_worker replacement_worker, false if replacement_worker + end + + # creates new worker which has to receive work to do after it's added + # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private + def ns_add_busy_worker + return if @pool.size >= @max_length + + @workers_counter += 1 + @pool << (worker = Worker.new(self, @workers_counter)) + @largest_length = @pool.length if @pool.length > @largest_length + worker + end + + # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private + def ns_ready_worker(worker, success = true) + task_and_args = @queue.shift + if task_and_args + worker << task_and_args + else + # stop workers when !running?, do not return them to @ready + if running? + @ready.push(worker) + else + worker.stop + end + end + end + + # returns back worker to @ready which was not idle for enough time + # + # @!visibility private + def ns_worker_not_old_enough(worker) + # let's put workers coming from idle_test back to the start (as the oldest worker) + @ready.unshift(worker) + true + end + + # removes a worker which is not in not tracked in @ready + # + # @!visibility private + def ns_remove_busy_worker(worker) + @pool.delete(worker) + stopped_event.set if @pool.empty? && !running? + true + end + + # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private + def ns_prune_pool + return if @pool.size <= @min_length + + last_used = @ready.shift + last_used << :idle_test if last_used + + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ready.clear + @pool.clear + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ + end + end + + # @!visibility private + class Worker + include Concern::Logging + + def initialize(pool, id) + # instance variables accessed only under pool's lock so no need to sync here again + @queue = Queue.new + @pool = pool + @thread = create_worker @queue, pool, pool.idletime + + if @thread.respond_to?(:name=) + @thread.name = [pool.name, 'worker', id].compact.join('-') + end + end + + def <<(message) + @queue << message + end + + def stop + @queue << :stop + end + + def kill + @thread.kill + end + + private + + def create_worker(queue, pool, idletime) + Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| + last_message = Concurrent.monotonic_time + catch(:stop) do + loop do + + case message = my_queue.pop + when :idle_test + if (Concurrent.monotonic_time - last_message) > my_idletime + my_pool.remove_busy_worker(self) + throw :stop + else + my_pool.worker_not_old_enough(self) + end + + when :stop + my_pool.remove_busy_worker(self) + throw :stop + + else + task, args = message + run_task my_pool, task, args + last_message = Concurrent.monotonic_time + + my_pool.ready_worker(self) + end + end + end + end + end + + def run_task(pool, task, args) + task.call(*args) + pool.worker_task_completed + rescue => ex + # let it fail + log DEBUG, ex + rescue Exception => ex + log ERROR, ex + pool.worker_died(self) + throw :stop + end + end + + private_constant :Worker + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb new file mode 100644 index 0000000..414aa64 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb @@ -0,0 +1,35 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A simple utility class that executes a callable and returns and array of three elements: + # success - indicating if the callable has been executed without errors + # value - filled by the callable result if it has been executed without errors, nil otherwise + # reason - the error risen by the callable if it has been executed with errors, nil otherwise + class SafeTaskExecutor < Synchronization::LockableObject + + def initialize(task, opts = {}) + @task = task + @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError + super() # ensures visibility + end + + # @return [Array] + def execute(*args) + synchronize do + success = false + value = reason = nil + + begin + value = @task.call(*args) + success = true + rescue @exception_class => ex + reason = ex + success = false + end + + [success, value, reason] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb new file mode 100644 index 0000000..f1c38ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb @@ -0,0 +1,34 @@ +require 'concurrent/executor/executor_service' + +module Concurrent + + # Indicates that the including `ExecutorService` guarantees + # that all operations will occur in the order they are post and that no + # two operations may occur simultaneously. This module provides no + # functionality and provides no guarantees. That is the responsibility + # of the including class. This module exists solely to allow the including + # object to be interrogated for its serialization status. + # + # @example + # class Foo + # include Concurrent::SerialExecutor + # end + # + # foo = Foo.new + # + # foo.is_a? Concurrent::ExecutorService #=> true + # foo.is_a? Concurrent::SerialExecutor #=> true + # foo.serialized? #=> true + # + # @!visibility private + module SerialExecutorService + include ExecutorService + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `true` + def serialized? + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb new file mode 100644 index 0000000..d314e90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb @@ -0,0 +1,107 @@ +require 'concurrent/errors' +require 'concurrent/concern/logging' +require 'concurrent/synchronization' + +module Concurrent + + # Ensures passed jobs in a serialized order never running at the same time. + class SerializedExecution < Synchronization::LockableObject + include Concern::Logging + + def initialize() + super() + synchronize { ns_initialize } + end + + Job = Struct.new(:executor, :args, :block) do + def call + block.call(*args) + end + end + + # Submit a task to the executor for asynchronous processing. + # + # @param [Executor] executor to be used for this job + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + def post(executor, *args, &task) + posts [[executor, args, task]] + true + end + + # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not + # be interleaved by other tasks. + # + # @param [Array, Proc)>] posts array of triplets where + # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) + def posts(posts) + # if can_overflow? + # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' + # end + + return nil if posts.empty? + + jobs = posts.map { |executor, args, task| Job.new executor, args, task } + + job_to_post = synchronize do + if @being_executed + @stash.push(*jobs) + nil + else + @being_executed = true + @stash.push(*jobs[1..-1]) + jobs.first + end + end + + call_job job_to_post if job_to_post + true + end + + private + + def ns_initialize + @being_executed = false + @stash = [] + end + + def call_job(job) + did_it_run = begin + job.executor.post { work(job) } + true + rescue RejectedExecutionError => ex + false + end + + # TODO not the best idea to run it myself + unless did_it_run + begin + work job + rescue => ex + # let it fail + log DEBUG, ex + end + end + end + + # ensures next job is executed if any is stashed + def work(job) + job.call + ensure + synchronize do + job = @stash.shift || (@being_executed = false) + end + + # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end + # of this block + call_job job if job + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb new file mode 100644 index 0000000..8197781 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb @@ -0,0 +1,28 @@ +require 'delegate' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' + +module Concurrent + + # A wrapper/delegator for any `ExecutorService` that + # guarantees serialized execution of tasks. + # + # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) + # @see Concurrent::SerializedExecution + class SerializedExecutionDelegator < SimpleDelegator + include SerialExecutorService + + def initialize(executor) + @executor = executor + @serializer = SerializedExecution.new + super(executor) + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @serializer.post(@executor, *args, &task) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb new file mode 100644 index 0000000..b87fed5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb @@ -0,0 +1,100 @@ +require 'concurrent/atomics' +require 'concurrent/executor/executor_service' + +module Concurrent + + # An executor service in which every operation spawns a new, + # independently operating thread. + # + # This is perhaps the most inefficient executor service in this + # library. It exists mainly for testing an debugging. Thread creation + # and management is expensive in Ruby and this executor performs no + # resource pooling. This can be very beneficial during testing and + # debugging because it decouples the using code from the underlying + # executor implementation. In production this executor will likely + # lead to suboptimal performance. + # + # @note Intended for use primarily in testing and debugging. + class SimpleExecutorService < RubyExecutorService + + # @!macro executor_service_method_post + def self.post(*args) + raise ArgumentError.new('no block given') unless block_given? + Thread.new(*args) do + Thread.current.abort_on_exception = false + yield(*args) + end + true + end + + # @!macro executor_service_method_left_shift + def self.<<(task) + post(&task) + self + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @count.increment + Thread.new(*args) do + Thread.current.abort_on_exception = false + begin + yield(*args) + ensure + @count.decrement + @stopped.set if @running.false? && @count.value == 0 + end + end + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + @running.true? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + @running.false? && ! @stopped.set? + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @running.make_false + @stopped.set if @count.value == 0 + true + end + + # @!macro executor_service_method_kill + def kill + @running.make_false + @stopped.set + true + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + + private + + def ns_initialize(*args) + @running = Concurrent::AtomicBoolean.new(true) + @stopped = Concurrent::Event.new + @count = Concurrent::AtomicFixnum.new(0) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb new file mode 100644 index 0000000..f1474ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb @@ -0,0 +1,57 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_single_thread_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation + + # @!macro single_thread_executor + # + # A thread pool with a single thread an unlimited queue. Should the thread + # die for any reason it will be removed and replaced, thus ensuring that + # the executor will always remain viable and available to process jobs. + # + # A common pattern for background processing is to create a single thread + # on which an infinite loop is run. The thread's loop blocks on an input + # source (perhaps blocking I/O or a queue) and processes each input as it + # is received. This pattern has several issues. The thread itself is highly + # susceptible to errors during processing. Also, the thread itself must be + # constantly monitored and restarted should it die. `SingleThreadExecutor` + # encapsulates all these bahaviors. The task processor is highly resilient + # to errors from within tasks. Also, should the thread die it will + # automatically be restarted. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor`. + # + # @!macro abstract_executor_service_public_api + class SingleThreadExecutor < SingleThreadExecutorImplementation + + # @!macro single_thread_executor_method_initialize + # + # Create a new thread pool. + # + # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html + + # @!method initialize(opts = {}) + # @!macro single_thread_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb new file mode 100644 index 0000000..253d46a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb @@ -0,0 +1,88 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_thread_pool_executor' + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submitted to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. + # + # A `ThreadPoolExecutor` will automatically adjust the pool size according + # to the bounds set by `min-threads` and `max-threads`. When a new task is + # submitted and fewer than `min-threads` threads are running, a new thread + # is created to handle the request, even if other worker threads are idle. + # If there are more than `min-threads` but less than `max-threads` threads + # running, a new thread will be created only if the queue is full. + # + # Threads that are idle for too long will be garbage collected, down to the + # configured minimum options. Should a thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentation; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + # + # @!macro thread_pool_executor_public_api + class ThreadPoolExecutor < ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options which configure the thread pool. + # + # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum + # number of threads to be created + # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted + # and fewer than `min_threads` are running, a new thread is created + # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum + # number of seconds a thread may be idle before being reclaimed + # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum + # number of tasks allowed in the work queue at any one time; a value of + # zero means the queue may grow without bound + # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0 + # for :max_queue means the queue must perform direct hand-off rather than unbounded. + # @raise [ArgumentError] if `:max_threads` is less than one + # @raise [ArgumentError] if `:min_threads` is less than zero + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html + + # @!method initialize(opts = {}) + # @!macro thread_pool_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/timer_set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/timer_set.rb new file mode 100644 index 0000000..0dfaf12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executor/timer_set.rb @@ -0,0 +1,172 @@ +require 'concurrent/scheduled_task' +require 'concurrent/atomic/event' +require 'concurrent/collection/non_concurrent_priority_queue' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/single_thread_executor' + +require 'concurrent/options' + +module Concurrent + + # Executes a collection of tasks, each after a given delay. A master task + # monitors the set and schedules each task for execution at the appropriate + # time. Tasks are run on the global thread pool or on the supplied executor. + # Each task is represented as a `ScheduledTask`. + # + # @see Concurrent::ScheduledTask + # + # @!macro monotonic_clock_warning + class TimerSet < RubyExecutorService + + # Create a new set of timed tasks. + # + # @!macro executor_options + # + # @param [Hash] opts the options used to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + def initialize(opts = {}) + super(opts) + end + + # Post a task to be execute run after a given delay (in seconds). If the + # delay is less than 1/100th of a second the task will be immediately post + # to the executor. + # + # @param [Float] delay the number of seconds to wait for before executing the task. + # @param [Array] args the arguments passed to the task on execution. + # + # @yield the task to be performed. + # + # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post + # is successful; false after shutdown. + # + # @raise [ArgumentError] if the intended execution time is not in the future. + # @raise [ArgumentError] if no block is given. + def post(delay, *args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + opts = { executor: @task_executor, + args: args, + timer_set: self } + task = ScheduledTask.execute(delay, opts, &task) # may raise exception + task.unscheduled? ? false : task + end + + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + def kill + shutdown + end + + private :<< + + private + + # Initialize the object. + # + # @param [Hash] opts the options to create the object with. + # @!visibility private + def ns_initialize(opts) + @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) + @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @timer_executor = SingleThreadExecutor.new + @condition = Event.new + @ruby_pid = $$ # detects if Ruby has forked + end + + # Post the task to the internal queue. + # + # @note This is intended as a callback method from ScheduledTask + # only. It is not intended to be used directly. Post a task + # by using the `SchedulesTask#execute` method. + # + # @!visibility private + def post_task(task) + synchronize { ns_post_task(task) } + end + + # @!visibility private + def ns_post_task(task) + return false unless ns_running? + ns_reset_if_forked + if (task.initial_delay) <= 0.01 + task.executor.post { task.process_task } + else + @queue.push(task) + # only post the process method when the queue is empty + @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 + @condition.set + end + true + end + + # Remove the given task from the queue. + # + # @note This is intended as a callback method from `ScheduledTask` + # only. It is not intended to be used directly. Cancel a task + # by using the `ScheduledTask#cancel` method. + # + # @!visibility private + def remove_task(task) + synchronize { @queue.delete(task) } + end + + # `ExecutorService` callback called during shutdown. + # + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + @queue.clear + @timer_executor.kill + stopped_event.set + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @condition.reset + @ruby_pid = $$ + end + end + + # Run a loop and execute tasks in the scheduled order and at the approximate + # scheduled time. If no tasks remain the thread will exit gracefully so that + # garbage collection can occur. If there are no ready tasks it will sleep + # for up to 60 seconds waiting for the next scheduled task. + # + # @!visibility private + def process_tasks + loop do + task = synchronize { @condition.reset; @queue.peek } + break unless task + + now = Concurrent.monotonic_time + diff = task.schedule_time - now + + if diff <= 0 + # We need to remove the task from the queue before passing + # it to the executor, to avoid race conditions where we pass + # the peek'ed task to the executor and then pop a different + # one that's been added in the meantime. + # + # Note that there's no race condition between the peek and + # this pop - this pop could retrieve a different task from + # the peek, but that task would be due to fire now anyway + # (because @queue is a priority queue, and this thread is + # the only reader, so whatever timer is at the head of the + # queue now must have the same pop time, or a closer one, as + # when we peeked). + task = synchronize { @queue.pop } + task.executor.post { task.process_task } + else + @condition.wait([diff, 60].min) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executors.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executors.rb new file mode 100644 index 0000000..eb1972c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/executors.rb @@ -0,0 +1,20 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/indirect_immediate_executor' +require 'concurrent/executor/java_executor_service' +require 'concurrent/executor/java_single_thread_executor' +require 'concurrent/executor/java_thread_pool_executor' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/ruby_single_thread_executor' +require 'concurrent/executor/ruby_thread_pool_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' +require 'concurrent/executor/serialized_execution_delegator' +require 'concurrent/executor/single_thread_executor' +require 'concurrent/executor/thread_pool_executor' +require 'concurrent/executor/timer_set' diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/future.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/future.rb new file mode 100644 index 0000000..1af182e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/future.rb @@ -0,0 +1,141 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. + + +module Concurrent + + # {include:file:docs-source/future.md} + # + # @!macro copy_options + # + # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module + # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future + class Future < IVar + + # Create a new `Future` in the `:unscheduled` state. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(NULL, opts.merge(__task_from_block__: block), &nil) + end + + # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Future` is in any state other than `:unscheduled`. + # + # @return [Future] a reference to `self` + # + # @example Instance and execute in separate steps + # future = Concurrent::Future.new{ sleep(1); 42 } + # future.state #=> :unscheduled + # future.execute + # future.state #=> :pending + # + # @example Instance and execute in one line + # future = Concurrent::Future.new{ sleep(1); 42 }.execute + # future.state #=> :pending + def execute + if compare_and_set_state(:pending, :unscheduled) + @executor.post{ safe_execute(@task, @args) } + self + end + end + + # Create a new `Future` object with the given block, execute it, and return the + # `:pending` object. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + # + # @return [Future] the newly created `Future` in the `:pending` state + # + # @example + # future = Concurrent::Future.execute{ sleep(1); 42 } + # future.state #=> :pending + def self.execute(opts = {}, &block) + Future.new(opts, &block).execute + end + + # @!macro ivar_set_method + def set(value = NULL, &block) + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @task = block || Proc.new { value } + end + end + execute + end + + # Attempt to cancel the operation if it has not already processed. + # The operation can only be cancelled while still `pending`. It cannot + # be cancelled once it has begun processing or has completed. + # + # @return [Boolean] was the operation successfully cancelled. + def cancel + if compare_and_set_state(:cancelled, :pending) + complete(false, nil, CancelledOperationError.new) + true + else + false + end + end + + # Has the operation been successfully cancelled? + # + # @return [Boolean] + def cancelled? + state == :cancelled + end + + # Wait the given number of seconds for the operation to complete. + # On timeout attempt to cancel the operation. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Boolean] true if the operation completed before the timeout + # else false + def wait_or_cancel(timeout) + wait(timeout) + if complete? + true + else + cancel + false + end + end + + protected + + def ns_initialize(value, opts) + super + @state = :unscheduled + @task = opts[:__task_from_block__] + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/hash.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/hash.rb new file mode 100644 index 0000000..92df66b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/hash.rb @@ -0,0 +1,59 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_hash + # + # A thread-safe subclass of Hash. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`, + # which takes the lock repeatedly when reading an item. + # + # @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash` + + # @!macro internal_implementation_note + HashImplementation = case + when Concurrent.on_cruby? + # Hash is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Hash + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyHash < ::Hash + include JRuby::Synchronized + end + JRubyHash + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxHash < ::Hash + end + ThreadSafe::Util.make_synchronized_on_rbx RbxHash + RbxHash + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyHash < ::Hash + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash + TruffleRubyHash + + else + warn 'Possibly unsupported Ruby implementation' + ::Hash + end + private_constant :HashImplementation + + # @!macro concurrent_hash + class Hash < HashImplementation + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/immutable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/immutable_struct.rb new file mode 100644 index 0000000..d275595 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/immutable_struct.rb @@ -0,0 +1,101 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # A thread-safe, immutable variation of Ruby's standard `Struct`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module ImmutableStruct + include Synchronization::AbstractStruct + + def self.included(base) + base.safe_initialization! + end + + # @!macro struct_values + def values + ns_values + end + + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + ns_values_at(indexes) + end + + # @!macro struct_inspect + def inspect + ns_inspect + end + + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + ns_merge(other, &block) + end + + # @!macro struct_to_h + def to_h + ns_to_h + end + + # @!macro struct_get + def [](member) + ns_get(member) + end + + # @!macro struct_equality + def ==(other) + ns_equality(other) + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + ns_each(&block) + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + ns_each_pair(&block) + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + ns_select(&block) + end + + private + + # @!visibility private + def initialize_copy(original) + super(original) + ns_initialize_copy + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/ivar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/ivar.rb new file mode 100644 index 0000000..2a724db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/ivar.rb @@ -0,0 +1,207 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/obligation' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # An `IVar` is like a future that you can assign. As a future is a value that + # is being computed that you can wait on, an `IVar` is a value that is waiting + # to be assigned, that you can wait on. `IVars` are single assignment and + # deterministic. + # + # Then, express futures as an asynchronous computation that assigns an `IVar`. + # The `IVar` becomes the primitive on which [futures](Future) and + # [dataflow](Dataflow) are built. + # + # An `IVar` is a single-element container that is normally created empty, and + # can only be set once. The I in `IVar` stands for immutable. Reading an + # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` + # from different threads. + # + # If you want to have some parallel task set the value in an `IVar`, you want + # a `Future`. If you want to create a graph of parallel tasks all executed + # when the values they depend on are ready you want `dataflow`. `IVar` is + # generally a low-level primitive. + # + # ## Examples + # + # Create, set and get an `IVar` + # + # ```ruby + # ivar = Concurrent::IVar.new + # ivar.set 14 + # ivar.value #=> 14 + # ivar.set 2 # would now be an error + # ``` + # + # ## See Also + # + # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. + # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). + # In Proceedings of Workshop on Graph Reduction, 1986. + # 2. For recent application: + # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). + class IVar < Synchronization::LockableObject + include Concern::Obligation + include Concern::Observable + + # Create a new `IVar` in the `:pending` state with the (optional) initial value. + # + # @param [Object] value the initial value + # @param [Hash] opts the options to create a message with + # @option opts [String] :dup_on_deref (false) call `#dup` before returning + # the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before + # returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def initialize(value = NULL, opts = {}, &block) + if value != NULL && block_given? + raise ArgumentError.new('provide only a value or a block') + end + super(&nil) + synchronize { ns_initialize(value, opts, &block) } + end + + # Add an observer on this object that will receive notification on update. + # + # Upon completion the `IVar` will notify all observers in a thread-safe way. + # The `func` method of the observer will be called with three arguments: the + # `Time` at which the `Future` completed the asynchronous operation, the + # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on + # fulfillment). + # + # @param [Object] observer the object that will be notified of changes + # @param [Symbol] func symbol naming the method to call when this + # `Observable` has changes` + def add_observer(observer = nil, func = :update, &block) + raise ArgumentError.new('cannot provide both an observer and a block') if observer && block + direct_notification = false + + if block + observer = block + func = :call + end + + synchronize do + if event.set? + direct_notification = true + else + observers.add_observer(observer, func) + end + end + + observer.send(func, Time.now, self.value, reason) if direct_notification + observer + end + + # @!macro ivar_set_method + # Set the `IVar` to a value and wake or notify all threads waiting on it. + # + # @!macro ivar_set_parameters_and_exceptions + # @param [Object] value the value to store in the `IVar` + # @yield A block operation to use for setting the value + # @raise [ArgumentError] if both a value and a block are given + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # + # @return [IVar] self + def set(value = NULL) + check_for_block_or_value!(block_given?, value) + raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) + + begin + value = yield if block_given? + complete_without_notification(true, value, nil) + rescue => ex + complete_without_notification(false, nil, ex) + end + + notify_observers(self.value, reason) + self + end + + # @!macro ivar_fail_method + # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # @return [IVar] self + def fail(reason = StandardError.new) + complete(false, nil, reason) + end + + # Attempt to set the `IVar` with the given value or block. Return a + # boolean indicating the success or failure of the set operation. + # + # @!macro ivar_set_parameters_and_exceptions + # + # @return [Boolean] true if the value was set else false + def try_set(value = NULL, &block) + set(value, &block) + true + rescue MultipleAssignmentError + false + end + + protected + + # @!visibility private + def ns_initialize(value, opts) + value = yield if block_given? + init_obligation + self.observers = Collection::CopyOnWriteObserverSet.new + set_deref_options(opts) + + @state = :pending + if value != NULL + ns_complete_without_notification(true, value, nil) + end + end + + # @!visibility private + def safe_execute(task, args = []) + if compare_and_set_state(:processing, :pending) + success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, val, reason) + yield(success, val, reason) if block_given? + end + end + + # @!visibility private + def complete(success, value, reason) + complete_without_notification(success, value, reason) + notify_observers(self.value, reason) + self + end + + # @!visibility private + def complete_without_notification(success, value, reason) + synchronize { ns_complete_without_notification(success, value, reason) } + self + end + + # @!visibility private + def notify_observers(value, reason) + observers.notify_and_delete_observers{ [Time.now, value, reason] } + end + + # @!visibility private + def ns_complete_without_notification(success, value, reason) + raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state + set_state(success, value, reason) + event.set + end + + # @!visibility private + def check_for_block_or_value!(block_given, value) # :nodoc: + if (block_given && value != NULL) || (! block_given && value == NULL) + raise ArgumentError.new('must set with either a value or a block') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/map.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/map.rb new file mode 100644 index 0000000..00731a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/map.rb @@ -0,0 +1,347 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/synchronization' +require 'concurrent/utility/engine' + +module Concurrent + # @!visibility private + module Collection + + # @!visibility private + MapImplementation = case + when Concurrent.on_jruby? + # noinspection RubyResolve + JRubyMapBackend + when Concurrent.on_cruby? + require 'concurrent/collection/map/mri_map_backend' + MriMapBackend + when Concurrent.on_truffleruby? && defined?(::TruffleRuby::ConcurrentMap) + require 'concurrent/collection/map/truffleruby_map_backend' + TruffleRubyMapBackend + when Concurrent.on_truffleruby? || Concurrent.on_rbx? + require 'concurrent/collection/map/atomic_reference_map_backend' + AtomicReferenceMapBackend + else + warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' + require 'concurrent/collection/map/synchronized_map_backend' + SynchronizedMapBackend + end + end + + # `Concurrent::Map` is a hash-like object and should have much better performance + # characteristics, especially under high concurrency, than `Concurrent::Hash`. + # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` + # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` + # does. For most uses it should do fine though, and we recommend you consider + # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. + class Map < Collection::MapImplementation + + # @!macro map.atomic_method + # This method is atomic. + + # @!macro map.atomic_method_with_block + # This method is atomic. + # @note Atomic methods taking a block do not allow the `self` instance + # to be used within the block. Doing so will cause a deadlock. + + # @!method compute_if_absent(key) + # Compute and store new value for key if the key is absent. + # @param [Object] key + # @yield new value + # @yieldreturn [Object] new value + # @return [Object] new value or current value + # @!macro map.atomic_method_with_block + + # @!method compute_if_present(key) + # Compute and store new value for key if the key is present. + # @param [Object] key + # @yield new value + # @yieldparam old_value [Object] + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method compute(key) + # Compute and store new value for key. + # @param [Object] key + # @yield compute new value from old one + # @yieldparam old_value [Object, nil] old_value, or nil when key is absent + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method merge_pair(key, value) + # If the key is absent, the value is stored, otherwise new value is + # computed with a block. + # @param [Object] key + # @param [Object] value + # @yield compute new value from old one + # @yieldparam old_value [Object] old value + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method replace_pair(key, old_value, new_value) + # Replaces old_value with new_value if key exists and current value + # matches old_value + # @param [Object] key + # @param [Object] old_value + # @param [Object] new_value + # @return [true, false] true if replaced + # @!macro map.atomic_method + + # @!method replace_if_exists(key, new_value) + # Replaces current value with new_value if key exists + # @param [Object] key + # @param [Object] new_value + # @return [Object, nil] old value or nil + # @!macro map.atomic_method + + # @!method get_and_set(key, value) + # Get the current value under key and set new value. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete(key) + # Delete key and its value. + # @param [Object] key + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete_pair(key, value) + # Delete pair and its value if current value equals the provided value. + # @param [Object] key + # @param [Object] value + # @return [true, false] true if deleted + # @!macro map.atomic_method + + # + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + # Get a value with key + # @param [Object] key + # @return [Object] the value + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + # Set a value with key + # @param [Object] key + # @param [Object] value + # @return [Object] the new value + def []=(key, value) + super + end + + alias_method :get, :[] + alias_method :put, :[]= + + # Get a value with key, or default_value when key is absent, + # or fail when no default_value is given. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @raise [KeyError] when key is missing and no default_value is provided + # @!macro map_method_not_atomic + # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended + # to be use as a concurrency primitive with strong happens-before + # guarantees. It is not intended to be used as a high-level abstraction + # supporting complex operations. All read and write operations are + # thread safe, but no guarantees are made regarding race conditions + # between the fetch operation and yielding to the block. Additionally, + # this method does not support recursion. This is due to internal + # constraints that are very unlikely to change in the near future. + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + # Fetch value with key, or store default value when key is absent, + # or fail when no default_value is given. This is a two step operation, + # therefore not atomic. The store can overwrite other concurrently + # stored value. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @!macro map.atomic_method_with_block + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + # Insert value into map with key if key is absent in one atomic step. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] the previous value when key was present or nil when there was no key + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + # Is the value stored in the map. Iterates over all values. + # @param [Object] value + # @return [true, false] + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end + + # All keys + # @return [::Array] keys + def keys + arr = [] + each_pair { |k, v| arr << k } + arr + end unless method_defined?(:keys) + + # All values + # @return [::Array] values + def values + arr = [] + each_pair { |k, v| arr << v } + arr + end unless method_defined?(:values) + + # Iterates over each key. + # @yield for each key in the map + # @yieldparam key [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_key + each_pair { |k, v| yield k } + end unless method_defined?(:each_key) + + # Iterates over each value. + # @yield for each value in the map + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_value + each_pair { |k, v| yield v } + end unless method_defined?(:each_value) + + # Iterates over each key value pair. + # @yield for each key value pair in the map + # @yieldparam key [Object] + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_pair + return enum_for :each_pair unless block_given? + super + end + + alias_method :each, :each_pair unless method_defined?(:each) + + # Find key of a value. + # @param [Object] value + # @return [Object, nil] key or nil when not found + def key(value) + each_pair { |k, v| return k if v == value } + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + # Is map empty? + # @return [true, false] + def empty? + each_pair { |k, v| return false } + true + end unless method_defined?(:empty?) + + # The size of map. + # @return [Integer] size + def size + count = 0 + each_pair { |k, v| count += 1 } + count + end unless method_defined?(:size) + + # @!visibility private + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair { |k, v| h[k] = v } + h + end + + # @!visibility private + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + # @!visibility private + def inspect + format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect + end + + private + + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair { |k, v| self[k] = v } + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive Integer" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/maybe.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/maybe.rb new file mode 100644 index 0000000..7ba3d3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/maybe.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value + # of (represented as `Just`), or it is empty (represented as `Nothing`). Using + # `Maybe` is a good way to deal with errors or exceptional cases without + # resorting to drastic measures such as exceptions. + # + # `Maybe` is a replacement for the use of `nil` with better type checking. + # + # For compatibility with {Concurrent::Concern::Obligation} the predicate and + # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and + # `reason`. + # + # ## Motivation + # + # A common pattern in languages with pattern matching, such as Erlang and + # Haskell, is to return *either* a value *or* an error from a function + # Consider this Erlang code: + # + # ```erlang + # case file:consult("data.dat") of + # {ok, Terms} -> do_something_useful(Terms); + # {error, Reason} -> lager:error(Reason) + # end. + # ``` + # + # In this example the standard library function `file:consult` returns a + # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) + # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) + # (similar to a ruby symbol) and a variable containing ancillary data. On + # success it returns the atom `ok` and the data from the file. On failure it + # returns `error` and a string with an explanation of the problem. With this + # pattern there is no ambiguity regarding success or failure. If the file is + # empty the return value cannot be misinterpreted as an error. And when an + # error occurs the return value provides useful information. + # + # In Ruby we tend to return `nil` when an error occurs or else we raise an + # exception. Both of these idioms are problematic. Returning `nil` is + # ambiguous because `nil` may also be a valid value. It also lacks + # information pertaining to the nature of the error. Raising an exception + # is both expensive and usurps the normal flow of control. All of these + # problems can be solved with the use of a `Maybe`. + # + # A `Maybe` is unambiguous with regard to whether or not it contains a value. + # When `Just` it contains a value, when `Nothing` it does not. When `Just` + # the value it contains may be `nil`, which is perfectly valid. When + # `Nothing` the reason for the lack of a value is contained as well. The + # previous Erlang example can be duplicated in Ruby in a principled way by + # having functions return `Maybe` objects: + # + # ```ruby + # result = MyFileUtils.consult("data.dat") # returns a Maybe + # if result.just? + # do_something_useful(result.value) # or result.just + # else + # logger.error(result.reason) # or result.nothing + # end + # ``` + # + # @example Returning a Maybe from a Function + # module MyFileUtils + # def self.consult(path) + # file = File.open(path, 'r') + # Concurrent::Maybe.just(file.read) + # rescue => ex + # return Concurrent::Maybe.nothing(ex) + # ensure + # file.close if file + # end + # end + # + # maybe = MyFileUtils.consult('bogus.file') + # maybe.just? #=> false + # maybe.nothing? #=> true + # maybe.reason #=> # + # + # maybe = MyFileUtils.consult('README.md') + # maybe.just? #=> true + # maybe.nothing? #=> false + # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." + # + # @example Using Maybe with a Block + # result = Concurrent::Maybe.from do + # Client.find(10) # Client is an ActiveRecord model + # end + # + # # -- if the record was found + # result.just? #=> true + # result.value #=> # + # + # # -- if the record was not found + # result.just? #=> false + # result.reason #=> ActiveRecord::RecordNotFound + # + # @example Using Maybe with the Null Object Pattern + # # In a Rails controller... + # result = ClientService.new(10).find # returns a Maybe + # render json: result.or(NullClient.new) + # + # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe + # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe + class Maybe < Synchronization::Object + include Comparable + safe_initialization! + + # Indicates that the given attribute has not been set. + # When `Just` the {#nothing} getter will return `NONE`. + # When `Nothing` the {#just} getter will return `NONE`. + NONE = ::Object.new.freeze + + # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. + attr_reader :just + + # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. + attr_reader :nothing + + private_class_method :new + + # Create a new `Maybe` using the given block. + # + # Runs the given block passing all function arguments to the block as block + # arguments. If the block runs to completion without raising an exception + # a new `Just` is created with the value set to the return value of the + # block. If the block raises an exception a new `Nothing` is created with + # the reason being set to the raised exception. + # + # @param [Array] args Zero or more arguments to pass to the block. + # @yield The block from which to create a new `Maybe`. + # @yieldparam [Array] args Zero or more block arguments passed as + # arguments to the function. + # + # @return [Maybe] The newly created object. + # + # @raise [ArgumentError] when no block given. + def self.from(*args) + raise ArgumentError.new('no block given') unless block_given? + begin + value = yield(*args) + return new(value, NONE) + rescue => ex + return new(NONE, ex) + end + end + + # Create a new `Just` with the given value. + # + # @param [Object] value The value to set for the new `Maybe` object. + # + # @return [Maybe] The newly created object. + def self.just(value) + return new(value, NONE) + end + + # Create a new `Nothing` with the given (optional) reason. + # + # @param [Exception] error The reason to set for the new `Maybe` object. + # When given a string a new `StandardError` will be created with the + # argument as the message. When no argument is given a new + # `StandardError` with an empty message will be created. + # + # @return [Maybe] The newly created object. + def self.nothing(error = '') + if error.is_a?(Exception) + nothing = error + else + nothing = StandardError.new(error.to_s) + end + return new(NONE, nothing) + end + + # Is this `Maybe` a `Just` (successfully fulfilled with a value)? + # + # @return [Boolean] True if `Just` or false if `Nothing`. + def just? + ! nothing? + end + alias :fulfilled? :just? + + # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? + # + # @return [Boolean] True if `Nothing` or false if `Just`. + def nothing? + @nothing != NONE + end + alias :rejected? :nothing? + + alias :value :just + + alias :reason :nothing + + # Comparison operator. + # + # @return [Integer] 0 if self and other are both `Nothing`; + # -1 if self is `Nothing` and other is `Just`; + # 1 if self is `Just` and other is nothing; + # `self.just <=> other.just` if both self and other are `Just`. + def <=>(other) + if nothing? + other.nothing? ? 0 : -1 + else + other.nothing? ? 1 : just <=> other.just + end + end + + # Return either the value of self or the given default value. + # + # @return [Object] The value of self when `Just`; else the given default. + def or(other) + just? ? just : other + end + + private + + # Create a new `Maybe` with the given attributes. + # + # @param [Object] just The value when `Just` else `NONE`. + # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. + # + # @return [Maybe] The new `Maybe`. + # + # @!visibility private + def initialize(just, nothing) + @just = just + @nothing = nothing + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mutable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mutable_struct.rb new file mode 100644 index 0000000..55361e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mutable_struct.rb @@ -0,0 +1,239 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe variation of Ruby's standard `Struct`. Values can be set at + # construction or safely changed at any time during the object's lifecycle. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module MutableStruct + include Synchronization::AbstractStruct + + # @!macro struct_new + # + # Factory for creating new struct classes. + # + # ``` + # new([class_name] [, member_name]+>) -> StructClass click to toggle source + # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass + # new(value, ...) -> obj + # StructClass[value, ...] -> obj + # ``` + # + # The first two forms are used to create a new struct subclass `class_name` + # that can contain a value for each member_name . This subclass can be + # used to create instances of the structure like any other Class . + # + # If the `class_name` is omitted an anonymous struct class will be created. + # Otherwise, the name of this struct will appear as a constant in the struct class, + # so it must be unique for all structs under this base class and must start with a + # capital letter. Assigning a struct class to a constant also gives the class + # the name of the constant. + # + # If a block is given it will be evaluated in the context of `StructClass`, passing + # the created class as a parameter. This is the recommended way to customize a struct. + # Subclassing an anonymous struct creates an extra anonymous class that will never be used. + # + # The last two forms create a new instance of a struct subclass. The number of value + # parameters must be less than or equal to the number of attributes defined for the + # struct. Unset parameters default to nil. Passing more parameters than number of attributes + # will raise an `ArgumentError`. + # + # @see http://ruby-doc.org/core/Struct.html#method-c-new Ruby standard library `Struct#new` + + # @!macro struct_values + # + # Returns the values for this struct as an Array. + # + # @return [Array] the values for this struct + # + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + # + # Returns the struct member values for each selector as an Array. + # + # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). + # + # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + # + # Describe the contents of this struct in a string. + # + # @return [String] the contents of this struct in a string + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + # + # Returns a new struct containing the contents of `other` and the contents + # of `self`. If no block is specified, the value for entries with duplicate + # keys will be that of `other`. Otherwise the value for each duplicate key + # is determined by calling the block with the key, its value in `self` and + # its value in `other`. + # + # @param [Hash] other the hash from which to set the new values + # @yield an options block for resolving duplicate keys + # @yieldparam [String, Symbol] member the name of the member which is duplicated + # @yieldparam [Object] selfvalue the value of the member in `self` + # @yieldparam [Object] othervalue the value of the member in `other` + # + # @return [Synchronization::AbstractStruct] a new struct with the new values + # + # @raise [ArgumentError] of given a member that is not defined in the struct + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + # + # Returns a hash containing the names and values for the struct’s members. + # + # @return [Hash] the names and values for the struct’s members + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + # + # Attribute Reference + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the member does not exist + # @raise [IndexError] if the index is out of range. + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + # + # Equality + # + # @return [Boolean] true if other has the same struct subclass and has + # equal member values (according to `Object#==`) + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + # + # Yields the value of each struct member in order. If no block is given + # an enumerator is returned. + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + # + # Yields the name and value of each struct member in order. If no block is + # given an enumerator is returned. + # + # @yield the operation to be performed on each struct member/value pair + # @yieldparam [Object] member each struct member (in order) + # @yieldparam [Object] value each struct value (in order) + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + # + # Yields each member value from the struct to the block and returns an Array + # containing the member values from the struct for which the given block + # returns a true value (equivalent to `Enumerable#select`). + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + # + # @return [Array] an array containing each value for which the block returns true + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # Attribute Assignment + # + # Sets the value of the given struct member or the member at the given index. + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the name does not exist + # @raise [IndexError] if the index is out of range. + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize { @values[member] = value } + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize { @values[index] = value } + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mvar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mvar.rb new file mode 100644 index 0000000..9034711 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/mvar.rb @@ -0,0 +1,242 @@ +require 'concurrent/concern/dereferenceable' +require 'concurrent/synchronization' + +module Concurrent + + # An `MVar` is a synchronized single element container. They are empty or + # contain one item. Taking a value from an empty `MVar` blocks, as does + # putting a value into a full one. You can either think of them as blocking + # queue of length one, or a special kind of mutable variable. + # + # On top of the fundamental `#put` and `#take` operations, we also provide a + # `#mutate` that is atomic with respect to operations on the same instance. + # These operations all support timeouts. + # + # We also support non-blocking operations `#try_put!` and `#try_take!`, a + # `#set!` that ignores existing values, a `#value` that returns the value + # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields + # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. + # You shouldn't use these operations in the first instance. + # + # `MVar` is a [Dereferenceable](Dereferenceable). + # + # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. + # + # Note that unlike the original Haskell paper, our `#take` is blocking. This is how + # Haskell and Scala do it today. + # + # @!macro copy_options + # + # ## See Also + # + # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th + # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. + # + # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). + # In Proceedings of the 23rd Symposium on Principles of Programming Languages + # (PoPL), 1996. + class MVar < Synchronization::Object + include Concern::Dereferenceable + safe_initialization! + + # Unique value that represents that an `MVar` was empty + EMPTY = ::Object.new + + # Unique value that represents that an `MVar` timed out before it was able + # to produce a value. + TIMEOUT = ::Object.new + + # Create a new `MVar`, either empty or with an initial value. + # + # @param [Hash] opts the options controlling how the future will be processed + # + # @!macro deref_options + def initialize(value = EMPTY, opts = {}) + @value = value + @mutex = Mutex.new + @empty_condition = ConditionVariable.new + @full_condition = ConditionVariable.new + set_deref_options(opts) + end + + # Remove the value from an `MVar`, leaving it empty, and blocking if there + # isn't a value. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was taken, or `TIMEOUT` + def take(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # acquires lock on the from an `MVAR`, yields the value to provided block, + # and release lock. A timeout can be set to limit the time spent blocked, + # in which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value returned by the block, or `TIMEOUT` + def borrow(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # if we timeoud out we'll still be empty + if unlocked_full? + yield @value + else + TIMEOUT + end + end + end + + # Put a value into an `MVar`, blocking if there is already a value until + # it is empty. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was put, or `TIMEOUT` + def put(value, timeout = nil) + @mutex.synchronize do + wait_for_empty(timeout) + + # If we timed out we won't be empty + if unlocked_empty? + @value = value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Atomically `take`, yield the value to a block for transformation, and then + # `put` the transformed value. Returns the transformed value. A timeout can + # be set to limit the time spent blocked, in which case it returns `TIMEOUT` + # if the time is exceeded. + # @return [Object] the transformed value, or `TIMEOUT` + def modify(timeout = nil) + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = yield value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. + def try_take! + @mutex.synchronize do + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + EMPTY + end + end + end + + # Non-blocking version of `put`, that returns whether or not it was successful. + def try_put!(value) + @mutex.synchronize do + if unlocked_empty? + @value = value + @full_condition.signal + true + else + false + end + end + end + + # Non-blocking version of `put` that will overwrite an existing value. + def set!(value) + @mutex.synchronize do + old_value = @value + @value = value + @full_condition.signal + apply_deref_options(old_value) + end + end + + # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. + def modify! + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + value = @value + @value = yield value + if unlocked_empty? + @empty_condition.signal + else + @full_condition.signal + end + apply_deref_options(value) + end + end + + # Returns if the `MVar` is currently empty. + def empty? + @mutex.synchronize { @value == EMPTY } + end + + # Returns if the `MVar` currently contains a value. + def full? + !empty? + end + + protected + + def synchronize(&block) + @mutex.synchronize(&block) + end + + private + + def unlocked_empty? + @value == EMPTY + end + + def unlocked_full? + ! unlocked_empty? + end + + def wait_for_full(timeout) + wait_while(@full_condition, timeout) { unlocked_empty? } + end + + def wait_for_empty(timeout) + wait_while(@empty_condition, timeout) { unlocked_full? } + end + + def wait_while(condition, timeout) + if timeout.nil? + while yield + condition.wait(@mutex) + end + else + stop = Concurrent.monotonic_time + timeout + while yield && timeout > 0.0 + condition.wait(@mutex, timeout) + timeout = stop - Concurrent.monotonic_time + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/options.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/options.rb new file mode 100644 index 0000000..bdd22a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/options.rb @@ -0,0 +1,42 @@ +require 'concurrent/configuration' + +module Concurrent + + # @!visibility private + module Options + + # Get the requested `Executor` based on the values set in the options hash. + # + # @param [Hash] opts the options defining the requested executor + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:fast` returns the global fast executor, + # `:io` returns the global io executor, and `:immediate` returns a new + # `ImmediateExecutor` object. + # + # @return [Executor, nil] the requested thread pool, or nil when no option specified + # + # @!visibility private + def self.executor_from_options(opts = {}) # :nodoc: + if identifier = opts.fetch(:executor, nil) + executor(identifier) + else + nil + end + end + + def self.executor(executor_identifier) + case executor_identifier + when :fast + Concurrent.global_fast_executor + when :io + Concurrent.global_io_executor + when :immediate + Concurrent.global_immediate_executor + when Concurrent::ExecutorService + executor_identifier + else + raise ArgumentError, "executor not recognized by '#{executor_identifier}'" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promise.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promise.rb new file mode 100644 index 0000000..e917791 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promise.rb @@ -0,0 +1,580 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +module Concurrent + + PromiseExecutionError = Class.new(StandardError) + + # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) + # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. + # + # > A promise represents the eventual value returned from the single + # > completion of an operation. + # + # Promises are similar to futures and share many of the same behaviours. + # Promises are far more robust, however. Promises can be chained in a tree + # structure where each promise may have zero or more children. Promises are + # chained using the `then` method. The result of a call to `then` is always + # another promise. Promises are resolved asynchronously (with respect to the + # main thread) but in a strict order: parents are guaranteed to be resolved + # before their children, children before their younger siblings. The `then` + # method takes two parameters: an optional block to be executed upon parent + # resolution and an optional callable to be executed upon parent failure. The + # result of each promise is passed to each of its children upon resolution. + # When a promise is rejected all its children will be summarily rejected and + # will receive the reason. + # + # Promises have several possible states: *:unscheduled*, *:pending*, + # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as + # `#incomplete?` and `#complete?`. When a Promise is created it is set to + # *:unscheduled*. Once the `#execute` method is called the state becomes + # *:pending*. Once a job is pulled from the thread pool's queue and is given + # to a thread for processing (often immediately upon `#post`) the state + # becomes *:processing*. The future will remain in this state until processing + # is complete. A future that is in the *:unscheduled*, *:pending*, or + # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either + # *:rejected*, indicating that an exception was thrown during processing, or + # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` + # will be updated to reflect the result of the operation. If *:rejected* the + # `reason` will be updated with a reference to the thrown exception. The + # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and + # `#fulfilled?` can be called at any time to obtain the state of the Promise, + # as can the `#state` method, which returns a symbol. + # + # Retrieving the value of a promise is done through the `value` (alias: + # `deref`) method. Obtaining the value of a promise is a potentially blocking + # operation. When a promise is *rejected* a call to `value` will return `nil` + # immediately. When a promise is *fulfilled* a call to `value` will + # immediately return the current value. When a promise is *pending* a call to + # `value` will block until the promise is either *rejected* or *fulfilled*. A + # *timeout* value can be passed to `value` to limit how long the call will + # block. If `nil` the call will block indefinitely. If `0` the call will not + # block. Any other integer or float value will indicate the maximum number of + # seconds to block. + # + # Promises run on the global thread pool. + # + # @!macro copy_options + # + # ### Examples + # + # Start by requiring promises + # + # ```ruby + # require 'concurrent' + # ``` + # + # Then create one + # + # ```ruby + # p = Concurrent::Promise.execute do + # # do something + # 42 + # end + # ``` + # + # Promises can be chained using the `then` method. The `then` method accepts a + # block and an executor, to be executed on fulfillment, and a callable argument to be executed + # on rejection. The result of the each promise is passed as the block argument + # to chained promises. + # + # ```ruby + # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute + # ``` + # + # And so on, and so on, and so on... + # + # ```ruby + # p = Concurrent::Promise.fulfill(20). + # then{|result| result - 10 }. + # then{|result| result * 3 }. + # then(executor: different_executor){|result| result % 5 }.execute + # ``` + # + # The initial state of a newly created Promise depends on the state of its parent: + # - if parent is *unscheduled* the child will be *unscheduled* + # - if parent is *pending* the child will be *pending* + # - if parent is *fulfilled* the child will be *pending* + # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) + # + # Promises are executed asynchronously from the main thread. By the time a + # child Promise finishes intialization it may be in a different state than its + # parent (by the time a child is created its parent may have completed + # execution and changed state). Despite being asynchronous, however, the order + # of execution of Promise objects in a chain (or tree) is strictly defined. + # + # There are multiple ways to create and execute a new `Promise`. Both ways + # provide identical behavior: + # + # ```ruby + # # create, operate, then execute + # p1 = Concurrent::Promise.new{ "Hello World!" } + # p1.state #=> :unscheduled + # p1.execute + # + # # create and immediately execute + # p2 = Concurrent::Promise.new{ "Hello World!" }.execute + # + # # execute during creation + # p3 = Concurrent::Promise.execute{ "Hello World!" } + # ``` + # + # Once the `execute` method is called a `Promise` becomes `pending`: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # p.state #=> :pending + # p.pending? #=> true + # ``` + # + # Wait a little bit, and the promise will resolve and provide a value: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # sleep(0.1) + # + # p.state #=> :fulfilled + # p.fulfilled? #=> true + # p.value #=> "Hello, world!" + # ``` + # + # If an exception occurs, the promise will be rejected and will provide + # a reason for the rejection: + # + # ```ruby + # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } + # sleep(0.1) + # + # p.state #=> :rejected + # p.rejected? #=> true + # p.reason #=> "#" + # ``` + # + # #### Rejection + # + # When a promise is rejected all its children will be rejected and will + # receive the rejection `reason` as the rejection callable parameter: + # + # ```ruby + # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } + # + # c1 = p.then(-> reason { 42 }) + # c2 = p.then(-> reason { raise 'Boom!' }) + # + # c1.wait.state #=> :fulfilled + # c1.value #=> 45 + # c2.wait.state #=> :rejected + # c2.reason #=> # + # ``` + # + # Once a promise is rejected it will continue to accept children that will + # receive immediately rejection (they will be executed asynchronously). + # + # #### Aliases + # + # The `then` method is the most generic alias: it accepts a block to be + # executed upon parent fulfillment and a callable to be executed upon parent + # rejection. At least one of them should be passed. The default block is `{ + # |result| result }` that fulfills the child with the parent value. The + # default callable is `{ |reason| raise reason }` that rejects the child with + # the parent reason. + # + # - `on_success { |result| ... }` is the same as `then {|result| ... }` + # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` + # - `rescue` is aliased by `catch` and `on_error` + class Promise < IVar + + # Initialize a new Promise with the provided options. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @option opts [Promise] :parent the parent `Promise` when building a chain/tree + # @option opts [Proc] :on_fulfill fulfillment handler + # @option opts [Proc] :on_reject rejection handler + # @option opts [object, Array] :args zero or more arguments to be passed + # the task block on execution + # + # @yield The block operation to be performed asynchronously. + # + # @raise [ArgumentError] if no block is given + # + # @see http://wiki.commonjs.org/wiki/Promises/A + # @see http://promises-aplus.github.io/promises-spec/ + def initialize(opts = {}, &block) + opts.delete_if { |k, v| v.nil? } + super(NULL, opts.merge(__promise_body_from_block__: block), &nil) + end + + # Create a new `Promise` and fulfill it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.fulfill(value, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } + end + + # Create a new `Promise` and reject it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.reject(reason, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } + end + + # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Promise` is in any state other than `:unscheduled`. + # + # @return [Promise] a reference to `self` + def execute + if root? + if compare_and_set_state(:pending, :unscheduled) + set_pending + realize(@promise_body) + end + else + compare_and_set_state(:pending, :unscheduled) + @parent.execute + end + self + end + + # @!macro ivar_set_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def set(value = NULL, &block) + raise PromiseExecutionError.new('supported only on root promise') unless root? + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @promise_body = block || Proc.new { |result| value } + end + end + execute + end + + # @!macro ivar_fail_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def fail(reason = StandardError.new) + set { raise reason } + end + + # Create a new `Promise` object with the given block, execute it, and return the + # `:pending` object. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @return [Promise] the newly created `Promise` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + # + # @example + # promise = Concurrent::Promise.execute{ sleep(1); 42 } + # promise.state #=> :pending + def self.execute(opts = {}, &block) + new(opts, &block).execute + end + + # Chain a new promise off the current promise. + # + # @return [Promise] the new promise + # @yield The block operation to be performed asynchronously. + # @overload then(rescuer, executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + # @overload then(rescuer, executor: executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + def then(*args, &block) + if args.last.is_a?(::Hash) + executor = args.pop[:executor] + rescuer = args.first + else + rescuer, executor = args + end + + executor ||= @executor + + raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? + block = Proc.new { |result| result } unless block_given? + child = Promise.new( + parent: self, + executor: executor, + on_fulfill: block, + on_reject: rescuer + ) + + synchronize do + child.state = :pending if @state == :pending + child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled + child.on_reject(@reason) if @state == :rejected + @children << child + end + + child + end + + # Chain onto this promise an action to be undertaken on success + # (fulfillment). + # + # @yield The block to execute + # + # @return [Promise] self + def on_success(&block) + raise ArgumentError.new('no block given') unless block_given? + self.then(&block) + end + + # Chain onto this promise an action to be undertaken on failure + # (rejection). + # + # @yield The block to execute + # + # @return [Promise] self + def rescue(&block) + self.then(block) + end + + alias_method :catch, :rescue + alias_method :on_error, :rescue + + # Yield the successful result to the block that returns a promise. If that + # promise is also successful the result is the result of the yielded promise. + # If either part fails the whole also fails. + # + # @example + # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 + # + # @return [Promise] + def flat_map(&block) + child = Promise.new( + parent: self, + executor: ImmediateExecutor.new, + ) + + on_error { |e| child.on_reject(e) } + on_success do |result1| + begin + inner = block.call(result1) + inner.execute + inner.on_success { |result2| child.on_fulfill(result2) } + inner.on_error { |e| child.on_reject(e) } + rescue => e + child.on_reject(e) + end + end + + child + end + + # Builds a promise that produces the result of promises in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] promises + # + # @overload zip(*promises, opts) + # @param [Array] promises + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def self.zip(*promises) + opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} + opts[:executor] ||= ImmediateExecutor.new + zero = if !opts.key?(:execute) || opts.delete(:execute) + fulfill([], opts) + else + Promise.new(opts) { [] } + end + + promises.reduce(zero) do |p1, p2| + p1.flat_map do |results| + p2.then do |next_result| + results << next_result + end + end + end + end + + # Builds a promise that produces the result of self and others in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] others + # + # @overload zip(*promises, opts) + # @param [Array] others + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def zip(*others) + self.class.zip(self, *others) + end + + # Aggregates a collection of promises and executes the `then` condition + # if all aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + # + # The returned promise will not yet have been executed. Additional `#then` + # and `#rescue` handlers may still be provided. Once the returned promise + # is execute the aggregate promises will be also be executed (if they have + # not been executed already). The results of the aggregate promises will + # be checked upon completion. The necessary `#then` and `#rescue` blocks + # on the aggregating promise will then be executed as appropriate. If the + # `#rescue` handlers are executed the raises exception will be + # `Concurrent::PromiseExecutionError`. + # + # @param [Array] promises Zero or more promises to aggregate + # @return [Promise] an unscheduled (not executed) promise that aggregates + # the promises given as arguments + def self.all?(*promises) + aggregate(:all?, *promises) + end + + # Aggregates a collection of promises and executes the `then` condition + # if any aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + def self.any?(*promises) + aggregate(:any?, *promises) + end + + protected + + def ns_initialize(value, opts) + super + + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + + @parent = opts.fetch(:parent) { nil } + @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } + @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } + + @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } + @state = :unscheduled + @children = [] + end + + # Aggregate a collection of zero or more promises under a composite promise, + # execute the aggregated promises and collect them into a standard Ruby array, + # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, + # or `one?`) on the collection checking for the success or failure of each, + # then executing the composite's `#then` handlers if the predicate returns + # `true` or executing the composite's `#rescue` handlers if the predicate + # returns false. + # + # @!macro promise_self_aggregate + def self.aggregate(method, *promises) + composite = Promise.new do + completed = promises.collect do |promise| + promise.execute if promise.unscheduled? + promise.wait + promise + end + unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } + raise PromiseExecutionError + end + end + composite + end + + # @!visibility private + def set_pending + synchronize do + @state = :pending + @children.each { |c| c.set_pending } + end + end + + # @!visibility private + def root? # :nodoc: + @parent.nil? + end + + # @!visibility private + def on_fulfill(result) + realize Proc.new { @on_fulfill.call(result) } + nil + end + + # @!visibility private + def on_reject(reason) + realize Proc.new { @on_reject.call(reason) } + nil + end + + # @!visibility private + def notify_child(child) + if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } + if_state(:rejected) { child.on_reject(@reason) } + end + + # @!visibility private + def complete(success, value, reason) + children_to_notify = synchronize do + set_state!(success, value, reason) + @children.dup + end + + children_to_notify.each { |child| notify_child(child) } + observers.notify_and_delete_observers{ [Time.now, self.value, reason] } + end + + # @!visibility private + def realize(task) + @executor.post do + success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, value, reason) + end + end + + # @!visibility private + def set_state!(success, value, reason) + set_state(success, value, reason) + event.set + end + + # @!visibility private + def synchronized_set_state!(success, value, reason) + synchronize { set_state!(success, value, reason) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promises.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promises.rb new file mode 100644 index 0000000..76af4d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/promises.rb @@ -0,0 +1,2167 @@ +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/collection/lock_free_stack' +require 'concurrent/errors' +require 'concurrent/re_include' + +module Concurrent + + # {include:file:docs-source/promises-main.md} + module Promises + + # @!macro promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + extend self + + module Configuration + # @return [Executor, :io, :fast] the executor which is used when none is supplied + # to a factory method. The method can be overridden in the receivers of + # `include FactoryMethod` + def default_executor + :io + end + end + + include Configuration + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on default_executor + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = self.default_executor) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on default_executor + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = self.default_executor) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(default_executor, *args, &task) + end + + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @param [Object] value + # @return [Future] + def fulfilled_future(value, default_executor = self.default_executor) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @param [Object] reason + # @return [Future] + def rejected_future(reason, default_executor = self.default_executor) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = self.default_executor) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload make_future(nil, default_executor = self.default_executor) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload make_future(a_future, default_executor = self.default_executor) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload make_future(an_event, default_executor = self.default_executor) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload make_future(exception, default_executor = self.default_executor) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload make_future(value, default_executor = self.default_executor) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def make_future(argument = nil, default_executor = self.default_executor) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def delay(*args, &task) + delay_on default_executor, *args, &task + end + + # Creates new event or future which is resolved only after it is touched, + # see {Concurrent::AbstractEventFuture#touch}. + # + # @!macro promises.param.default_executor + # @overload delay_on(default_executor, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload delay_on(default_executor) + # If no task is provided, it returns an {Event} + # @return [Event] + def delay_on(default_executor, *args, &task) + event = DelayPromise.new(default_executor).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def schedule(intended_time, *args, &task) + schedule_on default_executor, intended_time, *args, &task + end + + # Creates new event or future which is resolved in intended_time. + # + # @!macro promises.param.default_executor + # @!macro promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + # @overload schedule_on(default_executor, intended_time, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload schedule_on(default_executor, intended_time) + # If no task is provided, it returns an {Event} + # @return [Event] + def schedule_on(default_executor, intended_time, *args, &task) + event = ScheduledPromise.new(default_executor, intended_time).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on default_executor, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on default_executor, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro promises.any-touch + # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on default_executor, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + # TODO or rather a generic aggregator taking a function + end + + module InternalStates + # @!visibility private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + # @!visibility private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + # @!visibility private + class Reserved < Pending + end + + # @!visibility private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + # @!visibility private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + # @!visibility private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + # @!visibility private + PENDING = Pending.new + # @!visibility private + RESERVED = Reserved.new + # @!visibility private + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + end + + private_constant :InternalStates + + # @!macro promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + attr_atomic(:internal_state) + private :internal_state=, :swap_internal_state, :compare_and_set_internal_state, :update_internal_state + # @!method internal_state + # @!visibility private + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending? + !internal_state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved? + internal_state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro promises.touches + # Calls {Concurrent::AbstractEventFuture#touch}. + + # @!macro promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [self, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true, reserved = false) + if compare_and_set_internal_state(reserved ? RESERVED : PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback_notify_blocked(promise, index) + add_callback :callback_notify_blocked, promise, index + end + + # @!visibility private + def add_callback_clear_delayed_node(node) + add_callback(:callback_clear_delayed_node, node) + end + + # @!visibility private + def with_hidden_resolvable + # TODO (pitr-ch 10-Dec-2018): documentation, better name if in edge + self + end + + private + + def add_callback(method, *args) + state = internal_state + if state.resolved? + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if state.resolved? + end + self + end + + def callback_clear_delayed_node(state, node) + node.value = nil + end + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call(*args) + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled? + state = internal_state + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected? + state = internal_state + state.resolved? && !state.fulfilled? + end + + # @!macro promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @param [Object] timeout_value a value returned by the method when it times out + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # timeout_value on timeout, + # nil on rejection. + def value(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.value + else + timeout_value + end + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment. + def reason(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.reason + else + timeout_value + end + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Object), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # or nil on rejection, + # or timeout_value on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil, timeout_value = nil) + if wait_until_resolved! timeout + internal_state.value + else + timeout_value + end + end + + # Allows rejected Future to be risen with `raise` method. + # If the reason is not an exception `Runtime.new(reason)` is returned. + # + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # raise Promises.rejected_future("or just boom") + # @raise [Concurrent::Error] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + raise ArgumentError unless args.size <= 1 + reason = Array(internal_state.reason).flatten.compact + if reason.size > 1 + ex = Concurrent::MultipleErrors.new reason + ex.set_backtrace(caller) + ex + else + ex = if reason[0].respond_to? :exception + reason[0].exception(*args) + else + RuntimeError.new(reason[0]).exception(*args) + end + ex.set_backtrace Array(ex.backtrace) + caller + ex + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @param [#call(value)] run_test + # an object which when called returns either Future to keep running with + # or nil, then the run completes with the value. + # The run_test can be used to extract the Future from deeper structure, + # or to distinguish Future which is a resulting value from a future + # which is suppose to continue running. + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run(run_test = method(:run_test)) + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor, run_test).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + # @return [String] Short string representation. + def to_s + if resolved? + format '%s with %s>', super[0..-2], (fulfilled? ? value : reason).inspect + else + super + end + end + + alias_method :inspect, :to_s + + private + + def run_test(v) + v if v.is_a?(Future) + end + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + if internal_state == RESERVED + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It is already reserved.") + else + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, + new_result: state.result) + end + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call(*state.result, *args) + end + + end + + # Marker module of Future, Event resolved manually. + module Resolvable + include InternalStates + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + # @!macro raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + # @param [true, false] reserved + # Set to true if the resolvable is {#reserve}d by you, + # marks resolution of reserved resolvable events and futures explicitly. + # Advanced feature, ignore unless you use {Resolvable#reserve} from edge. + def resolve(raise_on_reassign = true, reserved = false) + resolve_with RESOLVED, raise_on_reassign, reserved + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @param [true, false] resolve_on_timeout + # If it times out and the argument is true it will also resolve the event. + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = false) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(false) + else + false + end + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true, reserved = false) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign, reserved) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @param [Object] value + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def fulfill(value, raise_on_reassign = true, reserved = false) + resolve_with Fulfilled.new(value), raise_on_reassign, reserved + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def reject(reason, raise_on_reassign = true, reserved = false) + resolve_with Rejected.new(reason), raise_on_reassign, reserved + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to(*args, block).wait! + end + + # @!macro promises.resolvable.resolve_on_timeout + # @param [::Array(true, Object, nil), ::Array(false, nil, Exception), nil] resolve_on_timeout + # If it times out and the argument is not nil it will also resolve the future + # to the provided resolution. + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(*resolve_on_timeout, false) + else + false + end + end + + # Behaves as {Future#wait!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @raise [Exception] {#reason} on rejection + # @see Future#wait! + def wait!(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + if resolve(*resolve_on_timeout, false) + false + else + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + raise self if rejected? + true + end + else + false + end + end + + # Behaves as {Future#value} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @see Future#value + def value(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#value!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @raise [Exception] {#reason} on rejection + # @see Future#value! + def value!(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved! timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + raise self if rejected? + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#reason} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Exception, timeout_value, nil] + # @see Future#reason + def reason(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.reason + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.reason + end + end + timeout_value + end + end + + # Behaves as {Future#result} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [::Array(Boolean, Object, Exception), nil] + # @see Future#result + def result(timeout = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.result + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + internal_state.result + end + end + # otherwise returns nil + end + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + format '%s %s>', super[0..-2], @Future + end + + alias_method :inspect, :to_s + + def delayed_because + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + resolve_with Rejected.new(error) + raise error unless error.is_a?(StandardError) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + public :evaluate_to + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed_because + promise = new(blocker_delayed, 1, *args, &block) + blocker.add_callback_notify_blocked promise, 0 + promise + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed_because + blocker_delayed2 = blocker2.promise.delayed_because + delayed = if blocker_delayed1 && blocker_delayed2 + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + blocker_delayed1 || blocker_delayed2 + end + promise = new(delayed, 2, *args, &block) + blocker1.add_callback_notify_blocked promise, 0 + blocker2.add_callback_notify_blocked promise, 1 + promise + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } + promise = new(delayed, blockers.size, *args, &block) + blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } + promise + end + + def self.add_delayed(delayed1, delayed2) + if delayed1 && delayed2 + delayed1.push delayed2 + delayed1 + else + delayed1 || delayed2 + end + end + + def initialize(delayed, blockers_count, future) + super(future) + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed_because + @Delayed + end + + def touch + clear_and_propagate_touch + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_and_propagate_touch(stack_or_element = @Delayed) + return if stack_or_element.nil? + + if stack_or_element.is_a? LockFreeStack + stack_or_element.clear_each { |element| clear_and_propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to(*args, task) + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + def initialize(delayed_because, blockers_count, event_or_future) + delayed = LockFreeStack.of1(self) + super(delayed, blockers_count, event_or_future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @DelayedBecause = delayed_because || LockFreeStack.new + + event_or_future.add_callback_clear_delayed_node delayed.peek + end + + def touch + if @Touched.make_true + clear_and_propagate_touch @DelayedBecause + end + end + + private + + def touched? + @Touched.value + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + delayed = future.promise.delayed_because + if touched? + clear_and_propagate_touch delayed + else + BlockedPromise.add_delayed @DelayedBecause, delayed + clear_and_propagate_touch @DelayedBecause if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor, run_test) + super delayed, 1, Future.new(self, default_executor) + @RunTest = run_test + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + continuation_future = @RunTest.call value + + if continuation_future + add_delayed_of continuation_future + continuation_future.add_callback_notify_blocked self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count, nil) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = ::Array.new(@Resolutions.size) + reasons = ::Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, future, index) + future.fulfilled? || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + event = Event.new(self, default_executor) + @Delayed = LockFreeStack.of1(self) + super event + event.add_callback_clear_delayed_node @Delayed.peek + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed_because + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/re_include.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/re_include.rb new file mode 100644 index 0000000..516d58c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/re_include.rb @@ -0,0 +1,58 @@ +module Concurrent + + # Methods form module A included to a module B, which is already included into class C, + # will not be visible in the C class. If this module is extended to B then A's methods + # are correctly made visible to C. + # + # @example + # module A + # def a + # :a + # end + # end + # + # module B1 + # end + # + # class C1 + # include B1 + # end + # + # module B2 + # extend Concurrent::ReInclude + # end + # + # class C2 + # include B2 + # end + # + # B1.send :include, A + # B2.send :include, A + # + # C1.new.respond_to? :a # => false + # C2.new.respond_to? :a # => true + module ReInclude + # @!visibility private + def included(base) + (@re_include_to_bases ||= []) << [:include, base] + super(base) + end + + # @!visibility private + def extended(base) + (@re_include_to_bases ||= []) << [:extend, base] + super(base) + end + + # @!visibility private + def include(*modules) + result = super(*modules) + modules.reverse.each do |module_being_included| + (@re_include_to_bases ||= []).each do |method, mod| + mod.send method, module_being_included + end + end + result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/scheduled_task.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/scheduled_task.rb new file mode 100644 index 0000000..90f78b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/scheduled_task.rb @@ -0,0 +1,318 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/utility/monotonic_time' + +require 'concurrent/options' + +module Concurrent + + # `ScheduledTask` is a close relative of `Concurrent::Future` but with one + # important difference: A `Future` is set to execute as soon as possible + # whereas a `ScheduledTask` is set to execute after a specified delay. This + # implementation is loosely based on Java's + # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). + # It is a more feature-rich variant of {Concurrent.timer}. + # + # The *intended* schedule time of task execution is set on object construction + # with the `delay` argument. The delay is a numeric (floating point or integer) + # representing a number of seconds in the future. Any other value or a numeric + # equal to or less than zero will result in an exception. The *actual* schedule + # time of task execution is set when the `execute` method is called. + # + # The constructor can also be given zero or more processing options. Currently + # the only supported options are those recognized by the + # [Dereferenceable](Dereferenceable) module. + # + # The final constructor argument is a block representing the task to be performed. + # If no block is given an `ArgumentError` will be raised. + # + # **States** + # + # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it + # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` + # has one additional state, however. While the task (block) is being executed the + # state of the object will be `:processing`. This additional state is necessary + # because it has implications for task cancellation. + # + # **Cancellation** + # + # A `:pending` task can be cancelled using the `#cancel` method. A task in any + # other state, including `:processing`, cannot be cancelled. The `#cancel` + # method returns a boolean indicating the success of the cancellation attempt. + # A cancelled `ScheduledTask` cannot be restarted. It is immutable. + # + # **Obligation and Observation** + # + # The result of a `ScheduledTask` can be obtained either synchronously or + # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) + # module and the + # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) + # module from the Ruby standard library. With one exception `ScheduledTask` + # behaves identically to [Future](Observable) with regard to these modules. + # + # @!macro copy_options + # + # @example Basic usage + # + # require 'concurrent' + # require 'thread' # for Queue + # require 'open-uri' # for open(uri) + # + # class Ticker + # def get_year_end_closing(symbol, year) + # uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m" + # data = open(uri) {|f| f.collect{|line| line.strip } } + # data[1].split(',')[4].to_f + # end + # end + # + # # Future + # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) } + # price.state #=> :pending + # sleep(1) # do other stuff + # price.value #=> 63.65 + # price.state #=> :fulfilled + # + # # ScheduledTask + # task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) } + # task.state #=> :pending + # sleep(3) # do other stuff + # task.value #=> 25.96 + # + # @example Successful task execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.state #=> :unscheduled + # task.execute + # task.state #=> pending + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> true + # task.rejected? #=> false + # task.value #=> 'What does the fox say?' + # + # @example One line creation and execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute + # task.state #=> pending + # + # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } + # task.state #=> pending + # + # @example Failed task execution + # + # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> false + # task.rejected? #=> true + # task.value #=> nil + # task.reason #=> # + # + # @example Task execution with observation + # + # observer = Class.new{ + # def update(time, value, reason) + # puts "The task completed at #{time} with value '#{value}'" + # end + # }.new + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.add_observer(observer) + # task.execute + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' + # + # @!macro monotonic_clock_warning + # + # @see Concurrent.timer + class ScheduledTask < IVar + include Comparable + + # The executor on which to execute the task. + # @!visibility private + attr_reader :executor + + # Schedule a task for execution at a specified future time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @yield the task to be performed + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] When no block is given + # @raise [ArgumentError] When given a time that is in the past + def initialize(delay, opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 + + super(NULL, opts, &nil) + + synchronize do + ns_set_state(:unscheduled) + @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) + @args = get_arguments_from(opts) + @delay = delay.to_f + @task = task + @time = nil + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + self.observers = Collection::CopyOnNotifyObserverSet.new + end + end + + # The `delay` value given at instanciation. + # + # @return [Float] the initial delay. + def initial_delay + synchronize { @delay } + end + + # The monotonic time at which the the task is scheduled to be executed. + # + # @return [Float] the schedule time or nil if `unscheduled` + def schedule_time + synchronize { @time } + end + + # Comparator which orders by schedule time. + # + # @!visibility private + def <=>(other) + schedule_time <=> other.schedule_time + end + + # Has the task been cancelled? + # + # @return [Boolean] true if the task is in the given state else false + def cancelled? + synchronize { ns_check_state?(:cancelled) } + end + + # In the task execution in progress? + # + # @return [Boolean] true if the task is in the given state else false + def processing? + synchronize { ns_check_state?(:processing) } + end + + # Cancel this task and prevent it from executing. A task can only be + # cancelled if it is pending or unscheduled. + # + # @return [Boolean] true if successfully cancelled else false + def cancel + if compare_and_set_state(:cancelled, :pending, :unscheduled) + complete(false, nil, CancelledOperationError.new) + # To avoid deadlocks this call must occur outside of #synchronize + # Changing the state above should prevent redundant calls + @parent.send(:remove_task, self) + else + false + end + end + + # Reschedule the task using the original delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @return [Boolean] true if successfully rescheduled else false + def reset + synchronize{ ns_reschedule(@delay) } + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @raise [ArgumentError] When given a time that is in the past + def reschedule(delay) + delay = delay.to_f + raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 + synchronize{ ns_reschedule(delay) } + end + + # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` + # and starts counting down toward execution. Does nothing if the `ScheduledTask` is + # in any state other than `:unscheduled`. + # + # @return [ScheduledTask] a reference to `self` + def execute + if compare_and_set_state(:pending, :unscheduled) + synchronize{ ns_schedule(@delay) } + end + self + end + + # Create a new `ScheduledTask` object with the given block, execute it, and return the + # `:pending` object. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @!macro executor_and_deref_options + # + # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + def self.execute(delay, opts = {}, &task) + new(delay, opts, &task).execute + end + + # Execute the task. + # + # @!visibility private + def process_task + safe_execute(@task, @args) + end + + protected :set, :try_set, :fail, :complete + + protected + + # Schedule the task using the given delay and the current time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_schedule(delay) + @delay = delay + @time = Concurrent.monotonic_time + @delay + @parent.send(:post_task, self) + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_reschedule(delay) + return false unless ns_check_state?(:pending) + @parent.send(:remove_task, self) && ns_schedule(delay) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/set.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/set.rb new file mode 100644 index 0000000..3bf0c89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/set.rb @@ -0,0 +1,74 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' +require 'set' + +module Concurrent + + # @!macro concurrent_set + # + # A thread-safe subclass of Set. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` + # which is union of `a` and `b`, then it writes the union to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#merge` instead. + # + # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` + + # @!macro internal_implementation_note + SetImplementation = case + when Concurrent.on_cruby? + # The CRuby implementation of Set is written in Ruby itself and is + # not thread safe for certain methods. + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class CRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_cruby CRubySet + CRubySet + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubySet < ::Set + include JRuby::Synchronized + end + + JRubySet + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxSet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_rbx RbxSet + RbxSet + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubySet + TruffleRubySet + + else + warn 'Possibly unsupported Ruby implementation' + ::Set + end + private_constant :SetImplementation + + # @!macro concurrent_set + class Set < SetImplementation + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/settable_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/settable_struct.rb new file mode 100644 index 0000000..0012352 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/settable_struct.rb @@ -0,0 +1,139 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe, write-once variation of Ruby's standard `Struct`. + # Each member can have its value set at most once, either at construction + # or any time thereafter. Attempting to assign a value to a member + # that has already been set will result in a `Concurrent::ImmutabilityError`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword + module SettableStruct + include Synchronization::AbstractStruct + + # @!macro struct_values + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # @raise [Concurrent::ImmutabilityError] if the given member has already been set + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize do + unless @values[member].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[member] = value + end + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize do + unless @values[index].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[index] = value + end + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization.rb new file mode 100644 index 0000000..49c68eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization.rb @@ -0,0 +1,30 @@ +require 'concurrent/utility/engine' + +require 'concurrent/synchronization/abstract_object' +require 'concurrent/utility/native_extension_loader' # load native parts first +Concurrent.load_native_extensions + +require 'concurrent/synchronization/mri_object' +require 'concurrent/synchronization/jruby_object' +require 'concurrent/synchronization/rbx_object' +require 'concurrent/synchronization/truffleruby_object' +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/volatile' + +require 'concurrent/synchronization/abstract_lockable_object' +require 'concurrent/synchronization/mutex_lockable_object' +require 'concurrent/synchronization/jruby_lockable_object' +require 'concurrent/synchronization/rbx_lockable_object' + +require 'concurrent/synchronization/lockable_object' + +require 'concurrent/synchronization/condition' +require 'concurrent/synchronization/lock' + +module Concurrent + # {include:file:docs-source/synchronization.md} + # {include:file:docs-source/synchronization-notes.md} + module Synchronization + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb new file mode 100644 index 0000000..bc12603 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb @@ -0,0 +1,98 @@ +module Concurrent + module Synchronization + + # @!visibility private + class AbstractLockableObject < Synchronization::Object + + protected + + # @!macro synchronization_object_method_synchronize + # + # @yield runs the block synchronized against this object, + # equivalent of java's `synchronize(this) {}` + # @note can by made public in descendants if required by `public :synchronize` + def synchronize + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_wait_until + # + # Wait until condition is met or timeout passes, + # protects against spurious wake-ups. + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @yield condition to be met + # @yieldreturn [true, false] + # @return [true, false] if condition met + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait_until(timeout = nil, &condition) + # synchronize { ns_wait_until(timeout, &condition) } + # end + # ``` + def ns_wait_until(timeout = nil, &condition) + if timeout + wait_until = Concurrent.monotonic_time + timeout + loop do + now = Concurrent.monotonic_time + condition_result = condition.call + return condition_result if now >= wait_until || condition_result + ns_wait wait_until - now + end + else + ns_wait timeout until condition.call + true + end + end + + # @!macro synchronization_object_method_ns_wait + # + # Wait until another thread calls #signal or #broadcast, + # spurious wake-ups can happen. + # + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait(timeout = nil) + # synchronize { ns_wait(timeout) } + # end + # ``` + def ns_wait(timeout = nil) + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_signal + # + # Signal one waiting thread. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def signal + # synchronize { ns_signal } + # end + # ``` + def ns_signal + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_broadcast + # + # Broadcast to all waiting threads. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def broadcast + # synchronize { ns_broadcast } + # end + # ``` + def ns_broadcast + raise NotImplementedError + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb new file mode 100644 index 0000000..532388b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb @@ -0,0 +1,24 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class AbstractObject + + # @abstract has to be implemented based on Ruby runtime + def initialize + raise NotImplementedError + end + + # @!visibility private + # @abstract + def full_memory_barrier + raise NotImplementedError + end + + def self.attr_volatile(*names) + raise NotImplementedError + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb new file mode 100644 index 0000000..1fe90c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb @@ -0,0 +1,171 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module AbstractStruct + + # @!visibility private + def initialize(*values) + super() + ns_initialize(*values) + end + + # @!macro struct_length + # + # Returns the number of struct members. + # + # @return [Fixnum] the number of struct members + def length + self.class::MEMBERS.length + end + alias_method :size, :length + + # @!macro struct_members + # + # Returns the struct members as an array of symbols. + # + # @return [Array] the struct members as an array of symbols + def members + self.class::MEMBERS.dup + end + + protected + + # @!macro struct_values + # + # @!visibility private + def ns_values + @values.dup + end + + # @!macro struct_values_at + # + # @!visibility private + def ns_values_at(indexes) + @values.values_at(*indexes) + end + + # @!macro struct_to_h + # + # @!visibility private + def ns_to_h + length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} + end + + # @!macro struct_get + # + # @!visibility private + def ns_get(member) + if member.is_a? Integer + if member >= @values.length + raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") + end + @values[member] + else + send(member) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_equality + # + # @!visibility private + def ns_equality(other) + self.class == other.class && self.values == other.values + end + + # @!macro struct_each + # + # @!visibility private + def ns_each + values.each{|value| yield value } + end + + # @!macro struct_each_pair + # + # @!visibility private + def ns_each_pair + @values.length.times do |index| + yield self.class::MEMBERS[index], @values[index] + end + end + + # @!macro struct_select + # + # @!visibility private + def ns_select + values.select{|value| yield value } + end + + # @!macro struct_inspect + # + # @!visibility private + def ns_inspect + struct = pr_underscore(self.class.ancestors[1]) + clazz = ((self.class.to_s =~ /^#" + end + + # @!macro struct_merge + # + # @!visibility private + def ns_merge(other, &block) + self.class.new(*self.to_h.merge(other, &block).values) + end + + # @!visibility private + def ns_initialize_copy + @values = @values.map do |val| + begin + val.clone + rescue TypeError + val + end + end + end + + # @!visibility private + def pr_underscore(clazz) + word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 + word.gsub!(/::/, '/') + word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') + word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # @!visibility private + def self.define_struct_class(parent, base, name, members, &block) + clazz = Class.new(base || Object) do + include parent + self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) + def ns_initialize(*values) + raise ArgumentError.new('struct size differs') if values.length > length + @values = values.fill(nil, values.length..length-1) + end + end + unless name.nil? + begin + parent.send :remove_const, name if parent.const_defined?(name, false) + parent.const_set(name, clazz) + clazz + rescue NameError + raise NameError.new("identifier #{name} needs to be constant") + end + end + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + @values[index] + end + end + clazz.class_exec(&block) unless block.nil? + clazz.singleton_class.send :alias_method, :[], :new + clazz + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/condition.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/condition.rb new file mode 100644 index 0000000..f704b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/condition.rb @@ -0,0 +1,60 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Condition < LockableObject + safe_initialization! + + # TODO (pitr 12-Sep-2015): locks two objects, improve + # TODO (pitr 26-Sep-2015): study + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/concurrent/locks/AbstractQueuedSynchronizer.java#AbstractQueuedSynchronizer.Node + + singleton_class.send :alias_method, :private_new, :new + private_class_method :new + + def initialize(lock) + super() + @Lock = lock + end + + def wait(timeout = nil) + @Lock.synchronize { ns_wait(timeout) } + end + + def ns_wait(timeout = nil) + synchronize { super(timeout) } + end + + def wait_until(timeout = nil, &condition) + @Lock.synchronize { ns_wait_until(timeout, &condition) } + end + + def ns_wait_until(timeout = nil, &condition) + synchronize { super(timeout, &condition) } + end + + def signal + @Lock.synchronize { ns_signal } + end + + def ns_signal + synchronize { super } + end + + def broadcast + @Lock.synchronize { ns_broadcast } + end + + def ns_broadcast + synchronize { super } + end + end + + class LockableObject < LockableObjectImplementation + def new_condition + Condition.private_new(self) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb new file mode 100644 index 0000000..359a032 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb @@ -0,0 +1,13 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + # @!macro internal_implementation_note + class JRubyLockableObject < AbstractLockableObject + + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb new file mode 100644 index 0000000..da91ac5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb @@ -0,0 +1,45 @@ +module Concurrent + module Synchronization + + if Concurrent.on_jruby? && Concurrent.java_extensions_loaded? + + # @!visibility private + module JRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + instance_variable_get_volatile(:#{ivar}) + end + + def #{name}=(value) + instance_variable_set_volatile(:#{ivar}, value) + end + RUBY + + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + class JRubyObject < AbstractObject + include JRubyAttrVolatile + + def initialize + # nothing to do + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lock.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lock.rb new file mode 100644 index 0000000..0dbad2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lock.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Lock < LockableObject + # TODO use JavaReentrantLock on JRuby + + public :synchronize + + def wait(timeout = nil) + synchronize { ns_wait(timeout) } + end + + public :ns_wait + + def wait_until(timeout = nil, &condition) + synchronize { ns_wait_until(timeout, &condition) } + end + + public :ns_wait_until + + def signal + synchronize { ns_signal } + end + + public :ns_signal + + def broadcast + synchronize { ns_broadcast } + end + + public :ns_broadcast + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb new file mode 100644 index 0000000..cdbe4d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb @@ -0,0 +1,74 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + LockableObjectImplementation = case + when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3) + MonitorLockableObject + when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3) + MutexLockableObject + when Concurrent.on_jruby? + JRubyLockableObject + when Concurrent.on_rbx? + RbxLockableObject + when Concurrent.on_truffleruby? + MutexLockableObject + else + warn 'Possibly unsupported Ruby implementation' + MonitorLockableObject + end + private_constant :LockableObjectImplementation + + # Safe synchronization under any Ruby implementation. + # It provides methods like {#synchronize}, {#wait}, {#signal} and {#broadcast}. + # Provides a single layer which can improve its implementation over time without changes needed to + # the classes using it. Use {Synchronization::Object} not this abstract class. + # + # @note this object does not support usage together with + # [`Thread#wakeup`](http://ruby-doc.org/core/Thread.html#method-i-wakeup) + # and [`Thread#raise`](http://ruby-doc.org/core/Thread.html#method-i-raise). + # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and + # `Thread#wakeup` will not work on all platforms. + # + # @see Event implementation as an example of this class use + # + # @example simple + # class AnClass < Synchronization::Object + # def initialize + # super + # synchronize { @value = 'asd' } + # end + # + # def value + # synchronize { @value } + # end + # end + # + # @!visibility private + class LockableObject < LockableObjectImplementation + + # TODO (pitr 12-Sep-2015): make private for c-r, prohibit subclassing + # TODO (pitr 12-Sep-2015): we inherit too much ourselves :/ + + # @!method initialize(*args, &block) + # @!macro synchronization_object_method_initialize + + # @!method synchronize + # @!macro synchronization_object_method_synchronize + + # @!method wait_until(timeout = nil, &condition) + # @!macro synchronization_object_method_ns_wait_until + + # @!method wait(timeout = nil) + # @!macro synchronization_object_method_ns_wait + + # @!method signal + # @!macro synchronization_object_method_ns_signal + + # @!method broadcast + # @!macro synchronization_object_method_ns_broadcast + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb new file mode 100644 index 0000000..4b1d6c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb @@ -0,0 +1,44 @@ +module Concurrent + module Synchronization + + # @!visibility private + module MriAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + # relying on undocumented behavior of CRuby, GVL acquire has lock which ensures visibility of ivars + # https://github.com/ruby/ruby/blob/ruby_2_2/thread_pthread.c#L204-L211 + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MriObject < AbstractObject + include MriAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb new file mode 100644 index 0000000..f17ea50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb @@ -0,0 +1,88 @@ +module Concurrent + # noinspection RubyInstanceVariableNamingConvention + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module ConditionSignalling + protected + + def ns_signal + @__Condition__.signal + self + end + + def ns_broadcast + @__Condition__.broadcast + self + end + end + + + # @!visibility private + # @!macro internal_implementation_note + class MutexLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + def initialize_copy(other) + super + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + protected + + def synchronize + if @__Lock__.owned? + yield + else + @__Lock__.synchronize { yield } + end + end + + def ns_wait(timeout = nil) + @__Condition__.wait @__Lock__, timeout + self + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MonitorLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + def initialize_copy(other) + super + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + protected + + def synchronize # TODO may be a problem with lock.synchronize { lock.wait } + @__Lock__.synchronize { yield } + end + + def ns_wait(timeout = nil) + @__Condition__.wait timeout + self + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/object.rb new file mode 100644 index 0000000..0e62112 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/object.rb @@ -0,0 +1,183 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + ObjectImplementation = case + when Concurrent.on_cruby? + MriObject + when Concurrent.on_jruby? + JRubyObject + when Concurrent.on_rbx? + RbxObject + when Concurrent.on_truffleruby? + TruffleRubyObject + else + warn 'Possibly unsupported Ruby implementation' + MriObject + end + private_constant :ObjectImplementation + + # Abstract object providing final, volatile, ans CAS extensions to build other concurrent abstractions. + # - final instance variables see {Object.safe_initialization!} + # - volatile instance variables see {Object.attr_volatile} + # - volatile instance variables see {Object.attr_atomic} + class Object < ObjectImplementation + # TODO make it a module if possible + + # @!method self.attr_volatile(*names) + # Creates methods for reading and writing (as `attr_accessor` does) to a instance variable with + # volatile (Java) semantic. The instance variable should be accessed only through generated methods. + # + # @param [::Array] names of the instance variables to be volatile + # @return [::Array] names of defined method names + + # Has to be called by children. + def initialize + super + __initialize_atomic_fields__ + end + + # By calling this method on a class, it and all its children are marked to be constructed safely. Meaning that + # all writes (ivar initializations) are made visible to all readers of newly constructed object. It ensures + # same behaviour as Java's final fields. + # @example + # class AClass < Concurrent::Synchronization::Object + # safe_initialization! + # + # def initialize + # @AFinalValue = 'value' # published safely, does not have to be synchronized + # end + # end + # @return [true] + def self.safe_initialization! + # define only once, and not again in children + return if safe_initialization? + + # @!visibility private + def self.new(*args, &block) + object = super(*args, &block) + ensure + object.full_memory_barrier if object + end + + @safe_initialization = true + end + + # @return [true, false] if this class is safely initialized. + def self.safe_initialization? + @safe_initialization = false unless defined? @safe_initialization + @safe_initialization || (superclass.respond_to?(:safe_initialization?) && superclass.safe_initialization?) + end + + # For testing purposes, quite slow. Injects assert code to new method which will raise if class instance contains + # any instance variables with CamelCase names and isn't {.safe_initialization?}. + # @raise when offend found + # @return [true] + def self.ensure_safe_initialization_when_final_fields_are_present + Object.class_eval do + def self.new(*args, &block) + object = super(*args, &block) + ensure + has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } + if has_final_field && !safe_initialization? + raise "there was an instance of #{object.class} with final field but not marked with safe_initialization!" + end + end + end + true + end + + # Creates methods for reading and writing to a instance variable with + # volatile (Java) semantic as {.attr_volatile} does. + # The instance variable should be accessed oly through generated methods. + # This method generates following methods: `value`, `value=(new_value) #=> new_value`, + # `swap_value(new_value) #=> old_value`, + # `compare_and_set_value(expected, value) #=> true || false`, `update_value(&block)`. + # @param [::Array] names of the instance variables to be volatile with CAS. + # @return [::Array] names of defined method names. + # @!macro attr_atomic + # @!method $1 + # @return [Object] The $1. + # @!method $1=(new_$1) + # Set the $1. + # @return [Object] new_$1. + # @!method swap_$1(new_$1) + # Set the $1 to new_$1 and return the old $1. + # @return [Object] old $1 + # @!method compare_and_set_$1(expected_$1, new_$1) + # Sets the $1 to new_$1 if the current $1 is expected_$1 + # @return [true, false] + # @!method update_$1(&block) + # Updates the $1 using the block. + # @yield [Object] Calculate a new $1 using given (old) $1 + # @yieldparam [Object] old $1 + # @return [Object] new $1 + def self.attr_atomic(*names) + @__atomic_fields__ ||= [] + @__atomic_fields__ += names + safe_initialization! + define_initialize_atomic_fields + + names.each do |name| + ivar = :"@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar}.get + end + + def #{name}=(value) + #{ivar}.set value + end + + def swap_#{name}(value) + #{ivar}.swap value + end + + def compare_and_set_#{name}(expected, value) + #{ivar}.compare_and_set expected, value + end + + def update_#{name}(&block) + #{ivar}.update(&block) + end + RUBY + end + names.flat_map { |n| [n, :"#{n}=", :"swap_#{n}", :"compare_and_set_#{n}", :"update_#{n}"] } + end + + # @param [true, false] inherited should inherited volatile with CAS fields be returned? + # @return [::Array] Returns defined volatile with CAS fields on this class. + def self.atomic_attributes(inherited = true) + @__atomic_fields__ ||= [] + ((superclass.atomic_attributes if superclass.respond_to?(:atomic_attributes) && inherited) || []) + @__atomic_fields__ + end + + # @return [true, false] is the attribute with name atomic? + def self.atomic_attribute?(name) + atomic_attributes.include? name + end + + private + + def self.define_initialize_atomic_fields + assignments = @__atomic_fields__.map do |name| + "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" + end.join("\n") + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def __initialize_atomic_fields__ + super + #{assignments} + end + RUBY + end + + private_class_method :define_initialize_atomic_fields + + def __initialize_atomic_fields__ + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb new file mode 100644 index 0000000..1c4697c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb @@ -0,0 +1,71 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class RbxLockableObject < AbstractLockableObject + safe_initialization! + + def initialize(*defaults) + super(*defaults) + @__Waiters__ = [] + @__owner__ = nil + end + + def initialize_copy(other) + super + @__Waiters__ = [] + @__owner__ = nil + end + + protected + + def synchronize(&block) + if @__owner__ == Thread.current + yield + else + result = nil + Rubinius.synchronize(self) do + begin + @__owner__ = Thread.current + result = yield + ensure + @__owner__ = nil + end + end + result + end + end + + def ns_wait(timeout = nil) + wchan = Rubinius::Channel.new + + begin + @__Waiters__.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout timeout + ensure + Rubinius.lock(self) + + if !signaled && !@__Waiters__.delete(wchan) + # we timed out, but got signaled afterwards, + # so pass that signal on to the next waiter + @__Waiters__.shift << true unless @__Waiters__.empty? + end + end + + self + end + + def ns_signal + @__Waiters__.shift << true unless @__Waiters__.empty? + self + end + + def ns_broadcast + @__Waiters__.shift << true until @__Waiters__.empty? + self + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb new file mode 100644 index 0000000..4b23f2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb @@ -0,0 +1,49 @@ +module Concurrent + module Synchronization + + # @!visibility private + module RbxAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + Rubinius.memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + Rubinius.memory_barrier + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + + end + + def full_memory_barrier + # Rubinius instance variables are not volatile so we need to insert barrier + # TODO (pitr 26-Nov-2015): check comments like ^ + Rubinius.memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class RbxObject < AbstractObject + include RbxAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb new file mode 100644 index 0000000..3919c76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb @@ -0,0 +1,47 @@ +module Concurrent + module Synchronization + + # @!visibility private + module TruffleRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + full_memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + full_memory_barrier + end + RUBY + end + + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + TruffleRuby.full_memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class TruffleRubyObject < AbstractObject + include TruffleRubyAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/volatile.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/volatile.rb new file mode 100644 index 0000000..9dffa91 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/synchronization/volatile.rb @@ -0,0 +1,36 @@ +module Concurrent + module Synchronization + + # Volatile adds the attr_volatile class method when included. + # + # @example + # class Foo + # include Concurrent::Synchronization::Volatile + # + # attr_volatile :bar + # + # def initialize + # self.bar = 1 + # end + # end + # + # foo = Foo.new + # foo.bar + # => 1 + # foo.bar = 2 + # => 2 + + Volatile = case + when Concurrent.on_cruby? + MriAttrVolatile + when Concurrent.on_jruby? + JRubyAttrVolatile + when Concurrent.on_rbx? + RbxAttrVolatile + when Concurrent.on_truffleruby? + TruffleRubyAttrVolatile + else + MriAttrVolatile + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..92e7c45 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb @@ -0,0 +1,50 @@ +require 'delegate' +require 'monitor' + +module Concurrent + unless defined?(SynchronizedDelegator) + + # This class provides a trivial way to synchronize all calls to a given object + # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls + # around the delegated `#send`. Example: + # + # array = [] # not thread-safe on many impls + # array = SynchronizedDelegator.new([]) # thread-safe + # + # A simple `Monitor` provides a very coarse-grained way to synchronize a given + # object, in that it will cause synchronization for methods that have no need + # for it, but this is a trivial way to get thread-safety where none may exist + # currently on some implementations. + # + # This class is currently being considered for inclusion into stdlib, via + # https://bugs.ruby-lang.org/issues/8556 + # + # @!visibility private + class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util.rb new file mode 100644 index 0000000..c67084a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util.rb @@ -0,0 +1,16 @@ +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter + CPU_COUNT = 16 # is there a way to determine this? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb new file mode 100644 index 0000000..7a6e8d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb @@ -0,0 +1,74 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/striped64' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + # + # @!visibility private + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..d9b4c58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,118 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/volatile' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + # + # @!visibility private + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb new file mode 100644 index 0000000..24d039b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb @@ -0,0 +1,88 @@ +require 'concurrent/thread_safe/util' + +# Shim for TruffleRuby.synchronized +if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) + module TruffleRuby + def self.synchronized(object, &block) + Truffle::System.synchronized(object, &block) + end + end +end + +module Concurrent + module ThreadSafe + module Util + def self.make_synchronized_on_cruby(klass) + klass.class_eval do + def initialize(*args, &block) + @_monitor = Monitor.new + super + end + + def initialize_copy(other) + # make sure a copy is not sharing a monitor with the original object! + @_monitor = Monitor.new + super + end + end + + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + + def self.make_synchronized_on_rbx(klass) + klass.class_eval do + private + + def _mon_initialize + @_monitor ||= Monitor.new # avoid double initialisation + end + + def self.new(*args) + obj = super(*args) + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + case method + when :new_range, :new_reserved + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + obj = super + obj.send(:_mon_initialize) + obj + end + RUBY + else + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + end + + def self.make_synchronized_on_truffleruby(klass) + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args, &block) + TruffleRuby.synchronized(self) { super(*args, &block) } + end + RUBY + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..b54be39 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,38 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/tuple' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + class PowerOfTwoTuple < Concurrent::Tuple + + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb new file mode 100644 index 0000000..4169c3d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb @@ -0,0 +1,246 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + # + # @!visibility private + class Striped64 + + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + # + # @!visibility private + class Cell < Concurrent::AtomicReference + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + + # @!visibility private + def self.padding + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created + # hide from yardoc in a method + attr_reader :padding_0, :padding_1, :padding_2, :padding_3, :padding_4, :padding_5, :padding_6, :padding_7, :padding_8, :padding_9, :padding_10, :padding_11 + end + padding + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb new file mode 100644 index 0000000..cdac2a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb @@ -0,0 +1,75 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + module Volatile + + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of +Concurrent::AtomicReference+. + # + # Usage: + # class Foo + # extend Concurrent::ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..bdde2dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,50 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/timer_task.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/timer_task.rb new file mode 100644 index 0000000..a0b7233 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/timer_task.rb @@ -0,0 +1,333 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/dereferenceable' +require 'concurrent/concern/observable' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/scheduled_task' + +module Concurrent + + # A very common concurrency pattern is to run a thread that performs a task at + # regular intervals. The thread that performs the task sleeps for the given + # interval then wakes up and performs the task. Lather, rinse, repeat... This + # pattern causes two problems. First, it is difficult to test the business + # logic of the task because the task itself is tightly coupled with the + # concurrency logic. Second, an exception raised while performing the task can + # cause the entire thread to abend. In a long-running application where the + # task thread is intended to run for days/weeks/years a crashed task thread + # can pose a significant problem. `TimerTask` alleviates both problems. + # + # When a `TimerTask` is launched it starts a thread for monitoring the + # execution interval. The `TimerTask` thread does not perform the task, + # however. Instead, the TimerTask launches the task on a separate thread. + # Should the task experience an unrecoverable crash only the task thread will + # crash. This makes the `TimerTask` very fault tolerant. Additionally, the + # `TimerTask` thread can respond to the success or failure of the task, + # performing logging or ancillary operations. `TimerTask` can also be + # configured with a timeout value allowing it to kill a task that runs too + # long. + # + # One other advantage of `TimerTask` is that it forces the business logic to + # be completely decoupled from the concurrency logic. The business logic can + # be tested separately then passed to the `TimerTask` for scheduling and + # running. + # + # In some cases it may be necessary for a `TimerTask` to affect its own + # execution cycle. To facilitate this, a reference to the TimerTask instance + # is passed as an argument to the provided block every time the task is + # executed. + # + # The `TimerTask` class includes the `Dereferenceable` mixin module so the + # result of the last execution is always available via the `#value` method. + # Dereferencing options can be passed to the `TimerTask` during construction or + # at any later time using the `#set_deref_options` method. + # + # `TimerTask` supports notification through the Ruby standard library + # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # Observable} module. On execution the `TimerTask` will notify the observers + # with three arguments: time of execution, the result of the block (or nil on + # failure), and any raised exceptions (or nil on success). If the timeout + # interval is exceeded the observer will receive a `Concurrent::TimeoutError` + # object as the third argument. + # + # @!macro copy_options + # + # @example Basic usage + # task = Concurrent::TimerTask.new{ puts 'Boom!' } + # task.execute + # + # task.execution_interval #=> 60 (default) + # task.timeout_interval #=> 30 (default) + # + # # wait 60 seconds... + # #=> 'Boom!' + # + # task.shutdown #=> true + # + # @example Configuring `:execution_interval` and `:timeout_interval` + # task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do + # puts 'Boom!' + # end + # + # task.execution_interval #=> 5 + # task.timeout_interval #=> 5 + # + # @example Immediate execution with `:run_now` + # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } + # task.execute + # + # #=> 'Boom!' + # + # @example Last `#value` and `Dereferenceable` mixin + # task = Concurrent::TimerTask.new( + # dup_on_deref: true, + # execution_interval: 5 + # ){ Time.now } + # + # task.execute + # Time.now #=> 2013-11-07 18:06:50 -0500 + # sleep(10) + # task.value #=> 2013-11-07 18:06:55 -0500 + # + # @example Controlling execution from within the block + # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| + # task.execution_interval.times{ print 'Boom! ' } + # print "\n" + # task.execution_interval += 1 + # if task.execution_interval > 5 + # puts 'Stopping...' + # task.shutdown + # end + # end + # + # timer_task.execute # blocking call - this task will stop itself + # #=> Boom! + # #=> Boom! Boom! + # #=> Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! Boom! + # #=> Stopping... + # + # @example Observation + # class TaskObserver + # def update(time, result, ex) + # if result + # print "(#{time}) Execution successfully returned #{result}\n" + # elsif ex.is_a?(Concurrent::TimeoutError) + # print "(#{time}) Execution timed out\n" + # else + # print "(#{time}) Execution failed with error #{ex}\n" + # end + # end + # end + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 } + # task.add_observer(TaskObserver.new) + # task.execute + # sleep 4 + # + # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:07:25 -0400) Execution timed out + # #=> (2013-10-13 19:07:27 -0400) Execution timed out + # #=> (2013-10-13 19:07:29 -0400) Execution timed out + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError + # task.shutdown + # + # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html + class TimerTask < RubyExecutorService + include Concern::Dereferenceable + include Concern::Observable + + # Default `:execution_interval` in seconds. + EXECUTION_INTERVAL = 60 + + # Default `:timeout_interval` in seconds. + TIMEOUT_INTERVAL = 30 + + # Create a new TimerTask with the given task and configuration. + # + # @!macro timer_task_initialize + # @param [Hash] opts the options defining task execution. + # @option opts [Integer] :execution_interval number of seconds between + # task executions (default: EXECUTION_INTERVAL) + # @option opts [Integer] :timeout_interval number of seconds a task can + # run before it is considered to have failed (default: TIMEOUT_INTERVAL) + # @option opts [Boolean] :run_now Whether to run the task immediately + # upon instantiation or to wait until the first # execution_interval + # has passed (default: false) + # + # @!macro deref_options + # + # @raise ArgumentError when no block is given. + # + # @yield to the block after :execution_interval seconds have passed since + # the last yield + # @yieldparam task a reference to the `TimerTask` instance so that the + # block can control its own lifecycle. Necessary since `self` will + # refer to the execution context of the block rather than the running + # `TimerTask`. + # + # @return [TimerTask] the new `TimerTask` + def initialize(opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + super + set_deref_options opts + end + + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + def running? + @running.true? + end + + # Execute a previously created `TimerTask`. + # + # @return [TimerTask] a reference to `self` + # + # @example Instance and execute in separate steps + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> false + # task.execute + # task.running? #=> true + # + # @example Instance and execute in one line + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute + # task.running? #=> true + def execute + synchronize do + if @running.false? + @running.make_true + schedule_next_task(@run_now ? 0 : @execution_interval) + end + end + self + end + + # Create and execute a new `TimerTask`. + # + # @!macro timer_task_initialize + # + # @example + # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> true + def self.execute(opts = {}, &task) + TimerTask.new(opts, &task).execute + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval + synchronize { @execution_interval } + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @execution_interval = value } + end + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval + synchronize { @timeout_interval } + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @timeout_interval = value } + end + end + + private :post, :<< + + private + + def ns_initialize(opts, &task) + set_deref_options(opts) + + self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL + self.timeout_interval = opts[:timeout] || opts[:timeout_interval] || TIMEOUT_INTERVAL + @run_now = opts[:now] || opts[:run_now] + @executor = Concurrent::SafeTaskExecutor.new(task) + @running = Concurrent::AtomicBoolean.new(false) + @value = nil + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + # @!visibility private + def ns_shutdown_execution + @running.make_false + super + end + + # @!visibility private + def ns_kill_execution + @running.make_false + super + end + + # @!visibility private + def schedule_next_task(interval = execution_interval) + ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) + nil + end + + # @!visibility private + def execute_task(completion) + return nil unless @running.true? + ScheduledTask.execute(timeout_interval, args: [completion], &method(:timeout_task)) + _success, value, reason = @executor.execute(self) + if completion.try? + self.value = value + schedule_next_task + time = Time.now + observers.notify_observers do + [time, self.value, reason] + end + end + nil + end + + # @!visibility private + def timeout_task(completion) + return unless @running.true? + if completion.try? + schedule_next_task + observers.notify_observers(Time.now, nil, Concurrent::TimeoutError.new) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tuple.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tuple.rb new file mode 100644 index 0000000..f8c4c25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tuple.rb @@ -0,0 +1,86 @@ +require 'concurrent/atomic/atomic_reference' + +module Concurrent + + # A fixed size array with volatile (synchronized, thread safe) getters/setters. + # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. + # + # @example + # tuple = Concurrent::Tuple.new(16) + # + # tuple.set(0, :foo) #=> :foo | volatile write + # tuple.get(0) #=> :foo | volatile read + # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS + # tuple.cas(0, :foo, :baz) #=> false | strong CAS + # tuple.get(0) #=> :bar | volatile read + # + # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia + # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple + # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable + class Tuple + include Enumerable + + # The (fixed) size of the tuple. + attr_reader :size + + # @!visibility private + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : ::Array + private_constant :Tuple + + # Create a new tuple of the given size. + # + # @param [Integer] size the number of elements in the tuple + def initialize(size) + @size = size + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = Concurrent::AtomicReference.new + i += 1 + end + end + + # Get the value of the element at the given index. + # + # @param [Integer] i the index from which to retrieve the value + # @return [Object] the value at the given index or nil if the index is out of bounds + def get(i) + return nil if i >= @size || i < 0 + @tuple[i].get + end + alias_method :volatile_get, :get + + # Set the element at the given index to the given value + # + # @param [Integer] i the index for the element to set + # @param [Object] value the value to set at the given index + # + # @return [Object] the new value of the element at the given index or nil if the index is out of bounds + def set(i, value) + return nil if i >= @size || i < 0 + @tuple[i].set(value) + end + alias_method :volatile_set, :set + + # Set the value at the given index to the new value if and only if the current + # value matches the given old value. + # + # @param [Integer] i the index for the element to set + # @param [Object] old_value the value to compare against the current value + # @param [Object] new_value the value to set at the given index + # + # @return [Boolean] true if the value at the given element was set else false + def compare_and_set(i, old_value, new_value) + return false if i >= @size || i < 0 + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + # Calls the given block once for each element in self, passing that element as a parameter. + # + # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index + def each + @tuple.each {|ref| yield ref.get} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tvar.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tvar.rb new file mode 100644 index 0000000..d6fe3b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/tvar.rb @@ -0,0 +1,261 @@ +require 'set' +require 'concurrent/synchronization' + +module Concurrent + + # A `TVar` is a transactional variable - a single-element container that + # is used as part of a transaction - see `Concurrent::atomically`. + # + # @!macro thread_safe_variable_comparison + # + # {include:file:docs-source/tvar.md} + class TVar < Synchronization::Object + safe_initialization! + + # Create a new `TVar` with an initial value. + def initialize(value) + @value = value + @version = 0 + @lock = Mutex.new + end + + # Get the value of a `TVar`. + def value + Concurrent::atomically do + Transaction::current.read(self) + end + end + + # Set the value of a `TVar`. + def value=(value) + Concurrent::atomically do + Transaction::current.write(self, value) + end + end + + # @!visibility private + def unsafe_value # :nodoc: + @value + end + + # @!visibility private + def unsafe_value=(value) # :nodoc: + @value = value + end + + # @!visibility private + def unsafe_version # :nodoc: + @version + end + + # @!visibility private + def unsafe_increment_version # :nodoc: + @version += 1 + end + + # @!visibility private + def unsafe_lock # :nodoc: + @lock + end + + end + + # Run a block that reads and writes `TVar`s as a single atomic transaction. + # With respect to the value of `TVar` objects, the transaction is atomic, in + # that it either happens or it does not, consistent, in that the `TVar` + # objects involved will never enter an illegal state, and isolated, in that + # transactions never interfere with each other. You may recognise these + # properties from database transactions. + # + # There are some very important and unusual semantics that you must be aware of: + # + # * Most importantly, the block that you pass to atomically may be executed + # more than once. In most cases your code should be free of + # side-effects, except for via TVar. + # + # * If an exception escapes an atomically block it will abort the transaction. + # + # * It is undefined behaviour to use callcc or Fiber with atomically. + # + # * If you create a new thread within an atomically, it will not be part of + # the transaction. Creating a thread counts as a side-effect. + # + # Transactions within transactions are flattened to a single transaction. + # + # @example + # a = new TVar(100_000) + # b = new TVar(100) + # + # Concurrent::atomically do + # a.value -= 10 + # b.value += 10 + # end + def atomically + raise ArgumentError.new('no block given') unless block_given? + + # Get the current transaction + + transaction = Transaction::current + + # Are we not already in a transaction (not nested)? + + if transaction.nil? + # New transaction + + begin + # Retry loop + + loop do + + # Create a new transaction + + transaction = Transaction.new + Transaction::current = transaction + + # Run the block, aborting on exceptions + + begin + result = yield + rescue Transaction::AbortError => e + transaction.abort + result = Transaction::ABORTED + rescue Transaction::LeaveError => e + transaction.abort + break result + rescue => e + transaction.abort + raise e + end + # If we can commit, break out of the loop + + if result != Transaction::ABORTED + if transaction.commit + break result + end + end + end + ensure + # Clear the current transaction + + Transaction::current = nil + end + else + # Nested transaction - flatten it and just run the block + + yield + end + end + + # Abort a currently running transaction - see `Concurrent::atomically`. + def abort_transaction + raise Transaction::AbortError.new + end + + # Leave a transaction without committing or aborting - see `Concurrent::atomically`. + def leave_transaction + raise Transaction::LeaveError.new + end + + module_function :atomically, :abort_transaction, :leave_transaction + + private + + class Transaction + + ABORTED = ::Object.new + + ReadLogEntry = Struct.new(:tvar, :version) + + AbortError = Class.new(StandardError) + LeaveError = Class.new(StandardError) + + def initialize + @read_log = [] + @write_log = {} + end + + def read(tvar) + Concurrent::abort_transaction unless valid? + + if @write_log.has_key? tvar + @write_log[tvar] + else + @read_log.push(ReadLogEntry.new(tvar, tvar.unsafe_version)) + tvar.unsafe_value + end + end + + def write(tvar, value) + # Have we already written to this TVar? + + if @write_log.has_key? tvar + # Record the value written + @write_log[tvar] = value + else + # Try to lock the TVar + + unless tvar.unsafe_lock.try_lock + # Someone else is writing to this TVar - abort + Concurrent::abort_transaction + end + + # Record the value written + + @write_log[tvar] = value + + # If we previously read from it, check the version hasn't changed + + @read_log.each do |log_entry| + if log_entry.tvar == tvar and tvar.unsafe_version > log_entry.version + Concurrent::abort_transaction + end + end + end + end + + def abort + unlock + end + + def commit + return false unless valid? + + @write_log.each_pair do |tvar, value| + tvar.unsafe_value = value + tvar.unsafe_increment_version + end + + unlock + + true + end + + def valid? + @read_log.each do |log_entry| + unless @write_log.has_key? log_entry.tvar + if log_entry.tvar.unsafe_version > log_entry.version + return false + end + end + end + + true + end + + def unlock + @write_log.each_key do |tvar| + tvar.unsafe_lock.unlock + end + end + + def self.current + Thread.current[:current_tvar_transaction] + end + + def self.current=(transaction) + Thread.current[:current_tvar_transaction] = transaction + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/engine.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/engine.rb new file mode 100644 index 0000000..bc4173e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/engine.rb @@ -0,0 +1,56 @@ +module Concurrent + module Utility + + # @!visibility private + module EngineDetector + def on_jruby? + ruby_engine == 'jruby' + end + + def on_jruby_9000? + on_jruby? && ruby_version(JRUBY_VERSION, :>=, 9, 0, 0) + end + + def on_cruby? + ruby_engine == 'ruby' + end + + def on_rbx? + ruby_engine == 'rbx' + end + + def on_truffleruby? + ruby_engine == 'truffleruby' + end + + def on_windows? + !(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil? + end + + def on_osx? + !(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil? + end + + def on_linux? + !(RbConfig::CONFIG['host_os'] =~ /linux/).nil? + end + + def ruby_engine + defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' + end + + def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) + result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) + comparisons = { :== => [0], + :>= => [1, 0], + :<= => [-1, 0], + :> => [1], + :< => [-1] } + comparisons.fetch(comparison).include? result + end + end + end + + # @!visibility private + extend Utility::EngineDetector +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb new file mode 100644 index 0000000..c9f4b36 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb @@ -0,0 +1,58 @@ +require 'concurrent/synchronization' + +module Concurrent + + class_definition = Class.new(Synchronization::LockableObject) do + def initialize + @last_time = Time.now.to_f + super() + end + + if defined?(Process::CLOCK_MONOTONIC) + # @!visibility private + def get_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + elsif Concurrent.on_jruby? + # @!visibility private + def get_time + java.lang.System.nanoTime() / 1_000_000_000.0 + end + else + + # @!visibility private + def get_time + synchronize do + now = Time.now.to_f + if @last_time < now + @last_time = now + else # clock has moved back in time + @last_time += 0.000_001 + end + end + end + + end + end + + # Clock that cannot be set and represents monotonic time since + # some unspecified starting point. + # + # @!visibility private + GLOBAL_MONOTONIC_CLOCK = class_definition.new + private_constant :GLOBAL_MONOTONIC_CLOCK + + # @!macro monotonic_get_time + # + # Returns the current time a tracked by the application monotonic clock. + # + # @return [Float] The current monotonic time since some unspecified + # starting point + # + # @!macro monotonic_clock_warning + def monotonic_time + GLOBAL_MONOTONIC_CLOCK.get_time + end + + module_function :monotonic_time +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb new file mode 100644 index 0000000..a944bd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb @@ -0,0 +1,79 @@ +require 'concurrent/utility/engine' + +module Concurrent + + module Utility + + # @!visibility private + module NativeExtensionLoader + + def allow_c_extensions? + Concurrent.on_cruby? + end + + def c_extensions_loaded? + defined?(@c_extensions_loaded) && @c_extensions_loaded + end + + def java_extensions_loaded? + defined?(@java_extensions_loaded) && @java_extensions_loaded + end + + def load_native_extensions + unless defined? Synchronization::AbstractObject + raise 'native_extension_loader loaded before Synchronization::AbstractObject' + end + + if Concurrent.on_cruby? && !c_extensions_loaded? + ['concurrent/concurrent_ruby_ext', + "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" + ].each { |p| try_load_c_extension p } + end + + if Concurrent.on_jruby? && !java_extensions_loaded? + begin + require 'concurrent/concurrent_ruby.jar' + set_java_extensions_loaded + rescue LoadError => e + raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace + end + end + end + + private + + def load_error_path(error) + if error.respond_to? :path + error.path + else + error.message.split(' -- ').last + end + end + + def set_c_extensions_loaded + @c_extensions_loaded = true + end + + def set_java_extensions_loaded + @java_extensions_loaded = true + end + + def try_load_c_extension(path) + require path + set_c_extensions_loaded + rescue LoadError => e + if load_error_path(e) == path + # move on with pure-Ruby implementations + # TODO (pitr-ch 12-Jul-2018): warning on verbose? + else + raise e + end + end + + end + end + + # @!visibility private + extend Utility::NativeExtensionLoader +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_integer.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_integer.rb new file mode 100644 index 0000000..10719e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/native_integer.rb @@ -0,0 +1,53 @@ +module Concurrent + module Utility + # @private + module NativeInteger + # http://stackoverflow.com/questions/535721/ruby-max-integer + MIN_VALUE = -(2**(0.size * 8 - 2)) + MAX_VALUE = (2**(0.size * 8 - 2) - 1) + + def ensure_upper_bound(value) + if value > MAX_VALUE + raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") + end + value + end + + def ensure_lower_bound(value) + if value < MIN_VALUE + raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") + end + value + end + + def ensure_integer(value) + unless value.is_a?(Integer) + raise ArgumentError.new("#{value} is not an Integer") + end + value + end + + def ensure_integer_and_bounds(value) + ensure_integer value + ensure_upper_bound value + ensure_lower_bound value + end + + def ensure_positive(value) + if value < 0 + raise ArgumentError.new("#{value} cannot be negative") + end + value + end + + def ensure_positive_and_no_zero(value) + if value < 1 + raise ArgumentError.new("#{value} cannot be negative or zero") + end + value + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/processor_counter.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/processor_counter.rb new file mode 100644 index 0000000..531ca0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/utility/processor_counter.rb @@ -0,0 +1,163 @@ +require 'etc' +require 'rbconfig' +require 'concurrent/delay' + +module Concurrent + module Utility + + # @!visibility private + class ProcessorCounter + def initialize + @processor_count = Delay.new { compute_processor_count } + @physical_processor_count = Delay.new { compute_physical_processor_count } + end + + # Number of processors seen by the OS and used for process scheduling. For + # performance reasons the calculated value will be memoized on the first + # call. + # + # When running under JRuby the Java runtime call + # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According + # to the Java documentation this "value may change during a particular + # invocation of the virtual machine... [applications] should therefore + # occasionally poll this property." Subsequently the result will NOT be + # memoized under JRuby. + # + # Ruby's Etc.nprocessors will be used if available (MRI 2.2+). + # + # On Windows the Win32 API will be queried for the + # `NumberOfLogicalProcessors from Win32_Processor`. This will return the + # total number "logical processors for the current instance of the + # processor", which taked into account hyperthreading. + # + # * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev + # * Alpha: /usr/bin/nproc (/proc/cpuinfo exists but cannot be used) + # * BSD: /sbin/sysctl + # * Cygwin: /proc/cpuinfo + # * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl + # * HP-UX: /usr/sbin/ioscan + # * IRIX: /usr/sbin/sysconf + # * Linux: /proc/cpuinfo + # * Minix 3+: /proc/cpuinfo + # * Solaris: /usr/sbin/psrinfo + # * Tru64 UNIX: /usr/sbin/psrinfo + # * UnixWare: /usr/sbin/psrinfo + # + # @return [Integer] number of processors seen by the OS or Java runtime + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + def processor_count + @processor_count.value + end + + # Number of physical processor cores on the current system. For performance + # reasons the calculated value will be memoized on the first call. + # + # On Windows the Win32 API will be queried for the `NumberOfCores from + # Win32_Processor`. This will return the total number "of cores for the + # current instance of the processor." On Unix-like operating systems either + # the `hwprefs` or `sysctl` utility will be called in a subshell and the + # returned value will be used. In the rare case where none of these methods + # work or an exception is raised the function will simply return 1. + # + # @return [Integer] number physical processor cores on the current system + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + # @see http://www.unix.com/man-page/osx/1/HWPREFS/ + # @see http://linux.die.net/man/8/sysctl + def physical_processor_count + @physical_processor_count.value + end + + private + + def compute_processor_count + if Concurrent.on_jruby? + java.lang.Runtime.getRuntime.availableProcessors + elsif Etc.respond_to?(:nprocessors) && (nprocessor = Etc.nprocessors rescue nil) + nprocessor + else + os_name = RbConfig::CONFIG["target_os"] + if os_name =~ /mingw|mswin/ + require 'win32ole' + result = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfLogicalProcessors from Win32_Processor") + result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+) + elsif File.readable?("/proc/cpuinfo") && (cpuinfo_count = IO.read("/proc/cpuinfo").scan(/^processor/).size) > 0 + cpuinfo_count + elsif File.executable?("/usr/bin/nproc") + IO.popen("/usr/bin/nproc --all", &:read).to_i + elsif File.executable?("/usr/bin/hwprefs") + IO.popen("/usr/bin/hwprefs thread_count", &:read).to_i + elsif File.executable?("/usr/sbin/psrinfo") + IO.popen("/usr/sbin/psrinfo", &:read).scan(/^.*on-*line/).size + elsif File.executable?("/usr/sbin/ioscan") + IO.popen("/usr/sbin/ioscan -kC processor", &:read).scan(/^.*processor/).size + elsif File.executable?("/usr/sbin/pmcycles") + IO.popen("/usr/sbin/pmcycles -m", &:read).count("\n") + elsif File.executable?("/usr/sbin/lsdev") + IO.popen("/usr/sbin/lsdev -Cc processor -S 1", &:read).count("\n") + elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i + IO.popen("/usr/sbin/sysconf NPROC_ONLN", &:read).to_i + elsif File.executable?("/usr/sbin/sysctl") + IO.popen("/usr/sbin/sysctl -n hw.ncpu", &:read).to_i + elsif File.executable?("/sbin/sysctl") + IO.popen("/sbin/sysctl -n hw.ncpu", &:read).to_i + else + # TODO (pitr-ch 05-Nov-2016): warn about failures + 1 + end + end + rescue + return 1 + end + + def compute_physical_processor_count + ppc = case RbConfig::CONFIG["target_os"] + when /darwin1/ + IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i + when /linux/ + cores = {} # unique physical ID / core ID combinations + phy = 0 + IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| + if ln.start_with?("physical") + phy = ln[/\d+/] + elsif ln.start_with?("core") + cid = phy + ":" + ln[/\d+/] + cores[cid] = true if not cores[cid] + end + end + cores.count + when /mswin|mingw/ + require 'win32ole' + result_set = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfCores from Win32_Processor") + result_set.to_enum.collect(&:NumberOfCores).reduce(:+) + else + processor_count + end + # fall back to logical count if physical info is invalid + ppc > 0 ? ppc : processor_count + rescue + return 1 + end + end + end + + # create the default ProcessorCounter on load + @processor_counter = Utility::ProcessorCounter.new + singleton_class.send :attr_reader, :processor_counter + + def self.processor_count + processor_counter.processor_count + end + + def self.physical_processor_count + processor_counter.physical_processor_count + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/version.rb b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/version.rb new file mode 100644 index 0000000..7bc7970 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby/concurrent/version.rb @@ -0,0 +1,3 @@ +module Concurrent + VERSION = '1.1.9' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/Readme b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/Readme new file mode 100644 index 0000000..124e79b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/Readme @@ -0,0 +1,21 @@ += Escape + +[Home page:] http://__.rubyforge.org/ +[Project site:] http://rubyforge.org/projects/__ +[Gem install:] gem install escape +[Wiki:] http://wiki.qualitysmith.com/__ +[Author:] Your name +[Copyright:] 2007 QualitySmith, Inc. +[License:] {GNU General Public License}[http://www.gnu.org/copyleft/gpl.html] + +== Introduction + +... + +== Installation + +... + +== Usage + +... diff --git a/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb new file mode 100644 index 0000000..0f53b9b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb @@ -0,0 +1,631 @@ +module RDoc +module Page + +FONTS = "\"Bitstream Vera Sans\", Verdana, Arial, Helvetica, sans-serif" + +STYLE = < pre { + padding: 0.5em; + border: 1px dotted black; + background: #FFE; +} + +CSS + +XHTML_PREAMBLE = %{ + +} + +HEADER = XHTML_PREAMBLE + < + + %title% + + + + + + + +ENDHEADER + +FILE_PAGE = < + + + + +
File
%short_name%
+ + + + + + + + + +
Path:%full_path% +IF:cvsurl +  (CVS) +ENDIF:cvsurl +
Modified:%dtm_modified%
+
+ +
+HTML + +################################################################### + +CLASS_PAGE = < + %classmod%
%full_name% + + + + + + +IF:parent + + + + +ENDIF:parent +
In: +START:infiles +HREF:full_path_url:full_path: +IF:cvsurl + (CVS) +ENDIF:cvsurl +END:infiles +
Parent: +IF:par_url + +ENDIF:par_url +%parent% +IF:par_url + +ENDIF:par_url +
+ + + +HTML + +################################################################### + +METHOD_LIST = < +IF:diagram +
+ %diagram% +
+ENDIF:diagram + +IF:description +
%description%
+ENDIF:description + +IF:requires +
Required Files
+
    +START:requires +
  • HREF:aref:name:
  • +END:requires +
+ENDIF:requires + +IF:toc +
Contents
+ +ENDIF:toc + +IF:methods +
Methods
+
    +START:methods +
  • HREF:aref:name:
  • +END:methods +
+ENDIF:methods + +IF:includes +
Included Modules
+
    +START:includes +
  • HREF:aref:name:
  • +END:includes +
+ENDIF:includes + +START:sections +IF:sectitle + +IF:seccomment +
+%seccomment% +
+ENDIF:seccomment +ENDIF:sectitle + +IF:classlist +
Classes and Modules
+ %classlist% +ENDIF:classlist + +IF:constants +
Constants
+ +START:constants + + + + + +IF:desc + + + + +ENDIF:desc +END:constants +
%name%=%value%
 %desc%
+ENDIF:constants + +IF:attributes +
Attributes
+ +START:attributes + + + + + +END:attributes +
+IF:rw +[%rw%] +ENDIF:rw + %name%%a_desc%
+ENDIF:attributes + +IF:method_list +START:method_list +IF:methods +
%type% %category% methods
+START:methods +
+
+IF:callseq + %callseq% +ENDIF:callseq +IFNOT:callseq + %name%%params% +ENDIF:callseq +IF:codeurl +[ source ] +ENDIF:codeurl +
+IF:m_desc +
+ %m_desc% +
+ENDIF:m_desc +IF:aka +
+ This method is also aliased as +START:aka + %name% +END:aka +
+ENDIF:aka +IF:sourcecode +
+ +
+
+%sourcecode%
+
+
+
+ENDIF:sourcecode +
+END:methods +ENDIF:methods +END:method_list +ENDIF:method_list +END:sections + +HTML + +FOOTER = < + +ENDFOOTER + +BODY = HEADER + < + +
+ #{METHOD_LIST} +
+ + #{FOOTER} +ENDBODY + +########################## Source code ########################## + +SRC_PAGE = XHTML_PREAMBLE + < +%title% + + + + +
%code%
+ + +HTML + +########################## Index ################################ + +FR_INDEX_BODY = < + + + + + + + + + + +
+START:entries +%name% +END:entries +
+ +HTML + +CLASS_INDEX = FILE_INDEX +METHOD_INDEX = FILE_INDEX + +INDEX = XHTML_PREAMBLE + < + + %title% + + + + + + + + + + +IF:inline_source + +ENDIF:inline_source +IFNOT:inline_source + + + + +ENDIF:inline_source + + <body bgcolor="white"> + Click <a href="html/index.html">here</a> for a non-frames + version of this page. + </body> + + + + +HTML + +end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/lib/escape.rb b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/lib/escape.rb new file mode 100644 index 0000000..8999669 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/escape-0.0.4/lib/escape.rb @@ -0,0 +1,247 @@ +# escape.rb - escape/unescape library for several formats +# +# Copyright (C) 2006,2007 Tanaka Akira +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# 3. The name of the author may not be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY +# OF SUCH DAMAGE. + +# Escape module provides several escape functions. +# * URI +# * HTML +# * shell command +module Escape + module_function + + # Escape.shell_command composes + # a sequence of words to + # a single shell command line. + # All shell meta characters are quoted and + # the words are concatenated with interleaving space. + # + # Escape.shell_command(["ls", "/"]) #=> "ls /" + # Escape.shell_command(["echo", "*"]) #=> "echo '*'" + # + # Note that system(*command) and + # system(Escape.shell_command(command)) is roughly same. + # There are two exception as follows. + # * The first is that the later may invokes /bin/sh. + # * The second is an interpretation of an array with only one element: + # the element is parsed by the shell with the former but + # it is recognized as single word with the later. + # For example, system(*["echo foo"]) invokes echo command with an argument "foo". + # But system(Escape.shell_command(["echo foo"])) invokes "echo foo" command without arguments (and it probably fails). + def shell_command(command) + command.map {|word| shell_single_word(word) }.join(' ') + end + + # Escape.shell_single_word quotes shell meta characters. + # + # The result string is always single shell word, even if + # the argument is "". + # Escape.shell_single_word("") returns "''". + # + # Escape.shell_single_word("") #=> "''" + # Escape.shell_single_word("foo") #=> "foo" + # Escape.shell_single_word("*") #=> "'*'" + def shell_single_word(str) + if str.empty? + "''" + elsif %r{\A[0-9A-Za-z+,./:=@_-]+\z} =~ str + str + else + result = '' + str.scan(/('+)|[^']+/) { + if $1 + result << %q{\'} * $1.length + else + result << "'#{$&}'" + end + } + result + end + end + + # Escape.uri_segment escapes URI segment using percent-encoding. + # + # Escape.uri_segment("a/b") #=> "a%2Fb" + # + # The segment is "/"-splitted element after authority before query in URI, as follows. + # + # scheme://authority/segment1/segment2/.../segmentN?query#fragment + # + # See RFC 3986 for details of URI. + def uri_segment(str) + # pchar - pct-encoded = unreserved / sub-delims / ":" / "@" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + str.gsub(%r{[^A-Za-z0-9\-._~!$&'()*+,;=:@]}n) { + '%' + $&.unpack("H2")[0].upcase + } + end + + # Escape.uri_path escapes URI path using percent-encoding. + # The given path should be a sequence of (non-escaped) segments separated by "/". + # The segments cannot contains "/". + # + # Escape.uri_path("a/b/c") #=> "a/b/c" + # Escape.uri_path("a?b/c?d/e?f") #=> "a%3Fb/c%3Fd/e%3Ff" + # + # The path is the part after authority before query in URI, as follows. + # + # scheme://authority/path#fragment + # + # See RFC 3986 for details of URI. + # + # Note that this function is not appropriate to convert OS path to URI. + def uri_path(str) + str.gsub(%r{[^/]+}n) { uri_segment($&) } + end + + # :stopdoc: + def html_form_fast(pairs, sep=';') + pairs.map {|k, v| + # query-chars - pct-encoded - x-www-form-urlencoded-delimiters = + # unreserved / "!" / "$" / "'" / "(" / ")" / "*" / "," / ":" / "@" / "/" / "?" + # query-char - pct-encoded = unreserved / sub-delims / ":" / "@" / "/" / "?" + # query-char = pchar / "/" / "?" = unreserved / pct-encoded / sub-delims / ":" / "@" / "/" / "?" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + # x-www-form-urlencoded-delimiters = "&" / "+" / ";" / "=" + k = k.gsub(%r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n) { + '%' + $&.unpack("H2")[0].upcase + } + v = v.gsub(%r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n) { + '%' + $&.unpack("H2")[0].upcase + } + "#{k}=#{v}" + }.join(sep) + end + # :startdoc: + + # Escape.html_form composes HTML form key-value pairs as a x-www-form-urlencoded encoded string. + # + # Escape.html_form takes an array of pair of strings or + # an hash from string to string. + # + # Escape.html_form([["a","b"], ["c","d"]]) #=> "a=b&c=d" + # Escape.html_form({"a"=>"b", "c"=>"d"}) #=> "a=b&c=d" + # + # In the array form, it is possible to use same key more than once. + # (It is required for a HTML form which contains + # checkboxes and select element with multiple attribute.) + # + # Escape.html_form([["k","1"], ["k","2"]]) #=> "k=1&k=2" + # + # If the strings contains characters which must be escaped in x-www-form-urlencoded, + # they are escaped using %-encoding. + # + # Escape.html_form([["k=","&;="]]) #=> "k%3D=%26%3B%3D" + # + # The separator can be specified by the optional second argument. + # + # Escape.html_form([["a","b"], ["c","d"]], ";") #=> "a=b;c=d" + # + # See HTML 4.01 for details. + def html_form(pairs, sep='&') + r = '' + first = true + pairs.each {|k, v| + # query-chars - pct-encoded - x-www-form-urlencoded-delimiters = + # unreserved / "!" / "$" / "'" / "(" / ")" / "*" / "," / ":" / "@" / "/" / "?" + # query-char - pct-encoded = unreserved / sub-delims / ":" / "@" / "/" / "?" + # query-char = pchar / "/" / "?" = unreserved / pct-encoded / sub-delims / ":" / "@" / "/" / "?" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + # x-www-form-urlencoded-delimiters = "&" / "+" / ";" / "=" + r << sep if !first + first = false + k.each_byte {|byte| + ch = byte.chr + if %r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n =~ ch + r << "%" << ch.unpack("H2")[0].upcase + else + r << ch + end + } + r << '=' + v.each_byte {|byte| + ch = byte.chr + if %r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n =~ ch + r << "%" << ch.unpack("H2")[0].upcase + else + r << ch + end + } + } + r + end + + # :stopdoc: + HTML_TEXT_ESCAPE_HASH = { + '&' => '&', + '<' => '<', + '>' => '>', + } + # :startdoc: + + # Escape.html_text escapes a string appropriate for HTML text using character references. + # + # It escapes 3 characters: + # * '&' to '&' + # * '<' to '<' + # * '>' to '>' + # + # Escape.html_text("abc") #=> "abc" + # Escape.html_text("a & b < c > d") #=> "a & b < c > d" + # + # This function is not appropriate for escaping HTML element attribute + # because quotes are not escaped. + def html_text(str) + str.gsub(/[&<>]/) {|ch| HTML_TEXT_ESCAPE_HASH[ch] } + end + + # :stopdoc: + HTML_ATTR_ESCAPE_HASH = { + '&' => '&', + '<' => '<', + '>' => '>', + '"' => '"', + } + # :startdoc: + + # Escape.html_attr encodes a string as a double-quoted HTML attribute using character references. + # + # Escape.html_attr("abc") #=> "\"abc\"" + # Escape.html_attr("a&b") #=> "\"a&b\"" + # Escape.html_attr("ab&<>\"c") #=> "\"ab&<>"c\"" + # Escape.html_attr("a'c") #=> "\"a'c\"" + # + # It escapes 4 characters: + # * '&' to '&' + # * '<' to '<' + # * '>' to '>' + # * '"' to '"' + # + def html_attr(str) + '"' + str.gsub(/[&<>"]/) {|ch| HTML_ATTR_ESCAPE_HASH[ch] } + '"' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.github/workflows/ruby.yml b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.github/workflows/ruby.yml new file mode 100644 index 0000000..1bc6ba0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.github/workflows/ruby.yml @@ -0,0 +1,41 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. +# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake +# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby + +name: Ruby + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + test: + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + os: [ubuntu, macos] + ruby-version: [2.5, 2.6, 2.7, 3.0, head, debug, truffleruby, truffleruby-head] + continue-on-error: ${{ endsWith(matrix.ruby, 'head') || matrix.ruby == 'debug' }} + steps: + - uses: actions/checkout@v2 + - name: Install libcurl header + run: | + if ${{ matrix.os == 'macos' }} + then + brew install curl + else + sudo apt install -y libcurl4-openssl-dev + fi + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Run tests + run: bundle exec rake diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.gitignore new file mode 100644 index 0000000..75a08dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.gitignore @@ -0,0 +1,8 @@ +*.gem +.bundle +Gemfile.lock +.DS_Store +.yardoc +doc +coverage +.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.rspec b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.rspec new file mode 100644 index 0000000..7d8b8b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/.rspec @@ -0,0 +1,3 @@ +--tty +--color +--format documentation diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/CHANGELOG.md new file mode 100644 index 0000000..e78579e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/CHANGELOG.md @@ -0,0 +1,372 @@ +# Changelog + +## Master + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.15.0...master) + +## 0.15.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.14.0...v0.15.0) + +## 0.12.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.11.0...v0.12.0) + +- Removed deprecated `CURLE_SSL_CACERT` pinned in curl v7.62.0 ([@arku](https://github.com/arku) in [#158](https://github.com/typhoeus/ethon/pull/158)) + + +## 0.11.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.10.1...v0.11.0) + +## 0.10.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.10.0...v0.10.1) + +## 0.10.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.9.1...v0.10.0) + +## 0.9.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.9.0...v0.9.1) + +## 0.9.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.8.1...v0.9.0) + +## 0.8.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.8.0...v0.8.1) + +* Support optional escaping of params. + ([Tasos Laskos](https://github.com/zapotek) +* `Easy::Mirror`: Reduced object allocations and method calls during info handling. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.8.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.3...v0.7.4) + +* `Easy::Mirror`: Reduced object allocations and method calls during info handling. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.7.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.3...v0.7.4) + +* Support different array encodings for params. + ([Marcello Barnaba](https://github.com/ifad), [\#104](https://github.com/typhoeus/ethon/pull/104)) +* Programtic access to version infos. + ([Jonas Wagner](https://github.com/jwagner), [\#90](https://github.com/typhoeus/ethon/pull/90)) + + +## 0.7.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.2...v0.7.3) + +* `Ethon::Curl::FDSet` + * Set `:fd_array` size to the current MS Windows `FD_SETSIZE` (2048). + ([Tasos Laskos](https://github.com/zapotek) + +* Added `redirect_time` value to available informations and `Easy::Mirror`. + ([Adrien Jarthon](https://github.com/jarthod) + +## 0.7.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.1...v0.7.2) + +* FFI data-types updated to be more correct. + +## 0.7.1 + +* MS Windows determination delegated to `Gem.windows?` for better accuracy. +* FFI data-types updated to work on MS Windows. + +## 0.7.0 + +Not backwards compatible changes: + +* `mime-types` are no longer a dependency. The gem will be still used if available to determine the mime type of a file which is uploaded. That means you have to have take care of the gem installation yourself. + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.3...v0.7.0) + +## 0.6.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.2...v0.6.3) + +## 0.6.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.1...v0.6.2) + +## 0.6.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.0...v0.6.1) + +The changelog entries are coming soon! + +## 0.6.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.12...v0.6.0) + +The changelog entries are coming soon! + +Bugfixes: + + * URL-encode nullbytes in parameters instead of escaping them to `\\0`. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.5.12 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.11...v0.5.12) + +Enhancements: + +* Performance optimizations. + ([Kyle Oppenheim](https://github.com/koppenheim) and [Richie Vos](https://github.com/richievos), [\#48](https://github.com/typhoeus/ethon/pull/48)) +* Reuse memory pointer. + ([Richie Vos](https://github.com/richievos), [\#49](https://github.com/typhoeus/ethon/pull/49)) + +Bugfixes: + +* Fix windows install. + ([Derik Olsson](https://github.com/derikolsson), [\#47](https://github.com/typhoeus/ethon/pull/47)) +* Handle urls that already contain query params. + ([Turner King](https://github.com/turnerking ), [\#45](https://github.com/typhoeus/ethon/pull/45)) + +## 0.5.11 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.10...v0.5.11) + +Enhancements: + +* Add support for postredirs, unrestricted_auth. +* Add support for cookie, cookiejar, cookiefile. + ([erwanlr](https://github.com/erwanlr), [\#46](https://github.com/typhoeus/ethon/pull/46)) +* Relax ffi requirements. + ([voxik](https://github.com/voxik), [\#40](https://github.com/typhoeus/ethon/pull/40)) +* Various documentation improvements. + ([Craig Little](https://github.com/craiglittle)) + +Bugfixes: + +* Fix the memory leaks. + ([Richie Vos](https://github.com/richievos), [\#45](https://github.com/typhoeus/ethon/pull/45)) + +## 0.5.10 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.9...v0.5.10) + +Enhancements: + +* Allow custom requests. + ([Nathan Sutton](https://github.com/nate), [\#36](https://github.com/typhoeus/ethon/pull/36)) +* Use updated version of FFI. + +Bugfixes: + +* Fix windows install issue. + ([brainsucker](https://github.com/brainsucker), [\#38](https://github.com/typhoeus/ethon/pull/38)) + +## 0.5.9 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.8...v0.5.9) + +Enhancements: + +* Allow to set multiple protocols. + +## 0.5.8 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.7...v0.5.8) + +Enhancements: + +* Add support for protocols and redir_protocols( + [libcurl SASL buffer overflow vulnerability](http://curl.haxx.se/docs/adv_20130206.html)). +* Add max_send_speed_large and max_recv_speed_large([Paul Schuegraf](https://github.com/pschuegr), [\#33](https://github.com/typhoeus/ethon/pull/33)) + +## 0.5.7 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.6...v0.5.7) + +Enhancements: + +* Use new version of ffi. + +## 0.5.6 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.4...v0.5.6) + +Bugfixes: + +* Easy#reset resets on_complete callbacks. + +## 0.5.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.3...v0.5.4) + +Enhancements: + +* Use Libc#getdtablesize to get the FDSet size. +* New libcurl option accept_encoding. +* Documentation updates. + +## 0.5.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.2...v0.5.3) + +Enhancements: + +* Deprecate Easy#prepare. It is no longer necessary. +* Unroll metaprogramming for easy and multi options. +* More specs. + +Bugfixes: + +* Correct size for FDSets +* Add proxytypes to enums. + +## 0.5.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.1...v0.5.2) + +Enhancements: + +* New libcurl option keypasswd. + +Bugfixes: + +* Correct request logging when using multi interface. +* Remove invalid libcurl option sslcertpasswd. + +## 0.5.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.0...v0.5.1) + +Bugfixes: + +* Mark Curl.select and Curl.easy_perform as blocking so that the GIL is + released by ffi. + +## 0.5.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.4...v0.5.0) + +Enhancements: + +* New libcurl option proxyuserpwd +* Rename response_header to response_headers + +Bugfixes: + +* Mark Curl.select and Curl.easy_perform as blocking so that the GIL is + released by ffi. + +## 0.4.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.3...v0.4.4) + +Enhancements: + +* Prepare multi explicit like easy + +## 0.4.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.2...v0.4.3) + +Enhancements: + +* Remove deprecated libcurl option put +* More documentation +* New libcurl option connecttimeout_ms and timeout_ms +* Support multi options + +Bugfixes: + +* Handle nil values in query params + +## 0.4.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.1...v0.4.2) + +Enhancements: + +* New libcurl option forbid_reuse +* Use libcurls escape instead of CGI::escape + +## 0.4.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.0...v0.4.1) + +Bugfixes: + +* Handle nested hash in an array in params correct + ( [\#201](https://github.com/typhoeus/typhoeus/issues/201) ) + +## 0.4.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.3.0...v0.4.0) + +Enhancements: + +* ruby 1.8.7 compatible +* Ethon.logger +* Deal with string param/body +* More documentation + +Bugfixes: + +* Add multi_cleanup to curl + +## 0.3.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.2.0...v0.3.0) + +Enhancements: + +* New libcurl option proxyport +* Raise invalid value error when providing a wrong key for sslversion or httpauth + +Bugfixes: + +* Libcurl option sslversion is handled correct + +## 0.2.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.1.0...v0.2.0) + +Enhancements: + +* GET requests are using custom requests only when there is a request body +* Easy#on_complete takes multiple callbacks +* raise Errors::GlobalInit when libcurls global_init failed instead of + runtime error +* raise Errors::InvalidOption if option is invalid + +## 0.1.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.0.2...v0.1.0) + +Enhancements: + +* Documentation + ( [Alex P](https://github.com/ifesdjeen), [\#13](https://github.com/typhoeus/ethon/issues/13) ) +* New libcurl option dns_cache_timeout + ( [Chris Heald](https://github.com/cheald), [\#192](https://github.com/typhoeus/typhoeus/pull/192) ) + +Bugfixes: + +* Libcurl option ssl_verifyhost takes an integer. +* Add space between header key and value. + +## 0.0.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.0.1...v0.0.2) + +Bugfixes: + +* Add libcurl.so.4 to ffi_lib in order to load correct lib on Debian. +* Escape zero bytes. + +## 0.0.1 Initial version diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Gemfile new file mode 100644 index 0000000..a2f41e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Gemfile @@ -0,0 +1,43 @@ +# frozen_string_literal: true +source "https://rubygems.org" +gemspec + +if Gem.ruby_version < Gem::Version.new("1.9.3") + gem "rake", "< 11" +else + gem "rake" +end + +group :development, :test do + gem "rspec", "~> 3.4" + + gem "sinatra" + + if Gem.ruby_version < Gem::Version.new("2.0.0") + gem "json", "< 2" + else + gem "json" + end + + if Gem.ruby_version >= Gem::Version.new("2.0.0") + gem "mime-types", "~> 1.18" + end + + if Gem.ruby_version >= Gem::Version.new("2.2.0") + gem "mustermann" + elsif Gem.ruby_version >= Gem::Version.new("2.1.0") + gem "mustermann", "0.4.0" + elsif Gem.ruby_version >= Gem::Version.new("2.0.0") + gem "mustermann", "0.3.1" + end + + if Gem.ruby_version >= Gem::Version.new("3.0.0") + gem "webrick" + end +end + +group :perf do + gem "benchmark-ips" + gem "patron" + gem "curb" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Guardfile b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Guardfile new file mode 100644 index 0000000..37d79f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Guardfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true +# vim:set filetype=ruby: +guard( + "rspec", + :all_after_pass => false, + :cli => "--fail-fast --tty --format documentation --colour") do + + watch(%r{^spec/.+_spec\.rb$}) + watch(%r{^lib/(.+)\.rb$}) { |match| "spec/#{match[1]}_spec.rb" } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/LICENSE new file mode 100644 index 0000000..f064c17 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 Hans Hasselberg + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/README.md b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/README.md new file mode 100644 index 0000000..a86d1b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/README.md @@ -0,0 +1,95 @@ +[![Gem Version](https://badge.fury.io/rb/ethon.svg)](https://badge.fury.io/rb/ethon) +[![Build Status](https://github.com/typhoeus/ethon/workflows/Ruby/badge.svg)](https://github.com/typhoeus/ethon/actions/workflows/ruby.yml) + +# Ethon + +In Greek mythology, Ethon, the son of Typhoeus and Echidna, is a gigantic eagle. So much for the history. +In the modern world, Ethon is a very basic libcurl wrapper using ffi. + +* [Documentation](http://rubydoc.info/github/typhoeus/ethon/frames/Ethon) +* [Website](http://typhoeus.github.com/) +* [Mailing list](http://groups.google.com/group/typhoeus) + +## Installation + +With bundler: + + gem "ethon" + +With rubygems: + + gem install ethon + +## Usage + +Making the first request is simple: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com") +easy.perform +#=> :ok +``` + +You have access to various options, such as following redirects: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com", followlocation: true) +easy.perform +#=> :ok +``` + +Once you're done you can inspect the response code and body: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com", followlocation: true) +easy.perform +easy.response_code +#=> 200 +easy.response_body +#=> " :ok +``` + +```ruby +easy = Ethon::Easy.new +easy.http_request("www.example.com", :post, { params: { a: 1 }, body: { b: 2 } }) +easy.perform +#=> :ok +``` + +This is really handy when making requests since you don't have to care about setting +everything up correctly. + +## LICENSE + +(The MIT License) + +Copyright © 2012-2016 [Hans Hasselberg](http://www.hans.io) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without +limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Rakefile new file mode 100644 index 0000000..a5bc61b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/Rakefile @@ -0,0 +1,40 @@ +# frozen_string_literal: true +require "bundler" +Bundler.setup + +require "rake" +require "rspec/core/rake_task" +$LOAD_PATH.unshift File.expand_path("../lib", __FILE__) +require "ethon/version" + +task :gem => :build +task :build do + system "gem build ethon.gemspec" +end + +task :install => :build do + system "gem install ethon-#{Ethon::VERSION}.gem" +end + +task :release => :build do + system "git tag -a v#{Ethon::VERSION} -m 'Tagging #{Ethon::VERSION}'" + system "git push --tags" + system "gem push ethon-#{Ethon::VERSION}.gem" +end + +RSpec::Core::RakeTask.new(:spec) do |t| + t.verbose = false + t.ruby_opts = "-W -I./spec -rspec_helper" +end + +desc "Start up the test servers" +task :start do + require_relative 'spec/support/boot' + begin + Boot.start_servers(:rake) + rescue Exception + end +end + +task :default => :spec + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/ethon.gemspec b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/ethon.gemspec new file mode 100644 index 0000000..561330b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/ethon.gemspec @@ -0,0 +1,26 @@ +# encoding: utf-8 +# frozen_string_literal: true +lib = File.expand_path('../lib/', __FILE__) +$:.unshift lib unless $:.include?(lib) + +require 'ethon/version' + +Gem::Specification.new do |s| + s.name = "ethon" + s.version = Ethon::VERSION + s.platform = Gem::Platform::RUBY + s.authors = ["Hans Hasselberg"] + s.email = ["me@hans.io"] + s.homepage = "https://github.com/typhoeus/ethon" + s.summary = "Libcurl wrapper." + s.description = "Very lightweight libcurl wrapper." + + s.required_rubygems_version = ">= 1.3.6" + s.license = 'MIT' + + s.add_dependency('ffi', ['>= 1.15.0']) + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- spec/*`.split("\n") + s.require_path = 'lib' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon.rb new file mode 100644 index 0000000..461b4c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true +require 'logger' +require 'ffi' +require 'thread' +begin + require 'mime/types/columnar' +rescue LoadError + begin + require 'mime/types' + rescue LoadError + end +end +require 'tempfile' + +require 'ethon/libc' +require 'ethon/curl' +require 'ethon/easy' +require 'ethon/errors' +require 'ethon/loggable' +require 'ethon/multi' +require 'ethon/version' + +# Ethon is a very simple libcurl. +# It provides direct access to libcurl functionality +# as well as some helpers for doing http requests. +# +# Ethon was extracted from Typhoeus. If you want to +# see how others use Ethon look at the Typhoeus code. +# +# @see https://www.github.com/typhoeus/typhoeus Typhoeus +# +# @note Please update to the latest libcurl version in order +# to benefit from all features and bugfixes. +# http://curl.haxx.se/download.html +module Ethon +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curl.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curl.rb new file mode 100644 index 0000000..f9194c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curl.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true +require 'ethon/curls/codes' +require 'ethon/curls/options' +require 'ethon/curls/infos' +require 'ethon/curls/form_options' +require 'ethon/curls/messages' +require 'ethon/curls/functions' + +module Ethon + + # FFI Wrapper module for Curl. Holds constants and required initializers. + # + # @api private + module Curl + extend ::FFI::Library + extend Ethon::Curls::Codes + extend Ethon::Curls::Options + extend Ethon::Curls::Infos + extend Ethon::Curls::FormOptions + extend Ethon::Curls::Messages + + # :nodoc: + def self.windows? + Libc.windows? + end + + require 'ethon/curls/constants' + require 'ethon/curls/settings' + require 'ethon/curls/classes' + extend Ethon::Curls::Functions + + @blocking = true + + @@initialized = false + @@curl_mutex = Mutex.new + + class << self + # This function sets up the program environment that libcurl needs. + # Think of it as an extension of the library loader. + # + # This function must be called at least once within a program (a program is all the + # code that shares a memory space) before the program calls any other function in libcurl. + # The environment it sets up is constant for the life of the program and is the same for + # every program, so multiple calls have the same effect as one call. + # + # The flags option is a bit pattern that tells libcurl exactly what features to init, + # as described below. Set the desired bits by ORing the values together. In normal + # operation, you must specify CURL_GLOBAL_ALL. Don't use any other value unless + # you are familiar with it and mean to control internal operations of libcurl. + # + # This function is not thread safe. You must not call it when any other thread in + # the program (i.e. a thread sharing the same memory) is running. This doesn't just + # mean no other thread that is using libcurl. Because curl_global_init() calls + # functions of other libraries that are similarly thread unsafe, it could conflict with + # any other thread that uses these other libraries. + # + # @raise [ Ethon::Errors::GlobalInit ] If Curl.global_init fails. + def init + @@curl_mutex.synchronize { + if not @@initialized + raise Errors::GlobalInit.new if Curl.global_init(GLOBAL_ALL) != 0 + @@initialized = true + Ethon.logger.debug("ETHON: Libcurl initialized") if Ethon.logger + end + } + end + + # This function releases resources acquired by curl_global_init. + # You should call curl_global_cleanup once for each call you make to + # curl_global_init, after you are done using libcurl. + # This function is not thread safe. You must not call it when any other thread in the + # program (i.e. a thread sharing the same memory) is running. This doesn't just + # mean no other thread that is using libcurl. Because curl_global_cleanup calls functions of other + # libraries that are similarly thread unsafe, it could conflict with + # any other thread that uses these other libraries. + # See the description in libcurl of global environment requirements + # for details of how to use this function. + def cleanup + @@curl_mutex.synchronize { + if @@initialized + Curl.global_cleanup() + @@initialized = false + Ethon.logger.debug("ETHON: Libcurl cleanup") if Ethon.logger + end + } + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/classes.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/classes.rb new file mode 100644 index 0000000..d1702e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/classes.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true +module Ethon + module Curl + # :nodoc: + class MsgData < ::FFI::Union + layout :whatever, :pointer, :code, :easy_code + end + + # :nodoc: + class Msg < ::FFI::Struct + layout :code, :msg_code, :easy_handle, :pointer, :data, MsgData + end + + class VersionInfoData < ::FFI::Struct + layout :curl_version, :uint8, + :version, :string, + :version_num, :int, + :host, :string, + :features, :int, + :ssl_version, :string, + :ssl_version_num, :long, + :libz_version, :string, + :protocols, :pointer + end + + # :nodoc: + class FDSet < ::FFI::Struct + if Curl.windows? + layout :fd_count, :uint, + # TODO: Make it future proof by dynamically grabbing FD_SETSIZE. + :fd_array, [:uint, 2048] + + def clear; self[:fd_count] = 0; end + else + # FD Set size. + FD_SETSIZE = ::Ethon::Libc.getdtablesize + layout :fds_bits, [:long, FD_SETSIZE / ::FFI::Type::LONG.size] + + # :nodoc: + def clear; super; end + end + end + + # :nodoc: + class Timeval < ::FFI::Struct + if Curl.windows? + layout :sec, :long, + :usec, :long + else + layout :sec, :time_t, + :usec, :suseconds_t + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/codes.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/codes.rb new file mode 100644 index 0000000..4b53096 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/codes.rb @@ -0,0 +1,122 @@ +# frozen_string_literal: true +module Ethon + module Curls # :nodoc: + + # This module contains all easy and + # multi return codes. + module Codes + + # Libcurl error codes, refer + # https://github.com/bagder/curl/blob/master/include/curl/curl.h for details + def easy_codes + [ + :ok, + :unsupported_protocol, + :failed_init, + :url_malformat, + :not_built_in, + :couldnt_resolve_proxy, + :couldnt_resolve_host, + :couldnt_connect, + :ftp_weird_server_reply, + :remote_access_denied, + :ftp_accept_failed, + :ftp_weird_pass_reply, + :ftp_accept_timeout, + :ftp_weird_pasv_reply, + :ftp_weird_227_format, + :ftp_cant_get_host, + :obsolete16, + :ftp_couldnt_set_type, + :partial_file, + :ftp_couldnt_retr_file, + :obsolete20, + :quote_error, + :http_returned_error, + :write_error, + :obsolete24, + :upload_failed, + :read_error, + :out_of_memory, + :operation_timedout, + :obsolete29, + :ftp_port_failed, + :ftp_couldnt_use_rest, + :obsolete32, + :range_error, + :http_post_error, + :ssl_connect_error, + :bad_download_resume, + :file_couldnt_read_file, + :ldap_cannot_bind, + :ldap_search_failed, + :obsolete40, + :function_not_found, + :aborted_by_callback, + :bad_function_argument, + :obsolete44, + :interface_failed, + :obsolete46, + :too_many_redirects , + :unknown_option, + :telnet_option_syntax , + :obsolete50, + :peer_failed_verification, + :got_nothing, + :ssl_engine_notfound, + :ssl_engine_setfailed, + :send_error, + :recv_error, + :obsolete57, + :ssl_certproblem, + :ssl_cipher, + :bad_content_encoding, + :ldap_invalid_url, + :filesize_exceeded, + :use_ssl_failed, + :send_fail_rewind, + :ssl_engine_initfailed, + :login_denied, + :tftp_notfound, + :tftp_perm, + :remote_disk_full, + :tftp_illegal, + :tftp_unknownid, + :remote_file_exists, + :tftp_nosuchuser, + :conv_failed, + :conv_reqd, + :ssl_cacert_badfile, + :remote_file_not_found, + :ssh, + :ssl_shutdown_failed, + :again, + :ssl_crl_badfile, + :ssl_issuer_error, + :ftp_pret_failed, + :rtsp_cseq_error, + :rtsp_session_error, + :ftp_bad_file_list, + :chunk_failed, + :last + ] + end + + # Curl-Multi socket error codes, refer + # https://github.com/bagder/curl/blob/master/include/curl/multi.h for details + def multi_codes + [ + :call_multi_perform, -1, + :ok, + :bad_handle, + :bad_easy_handle, + :out_of_memory, + :internal_error, + :bad_socket, + :unknown_option, + :last + ] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/constants.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/constants.rb new file mode 100644 index 0000000..3c7c32d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/constants.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +module Ethon + module Curl + # :nodoc: + VERSION_NOW = 3 + + # Flag. Initialize SSL. + GLOBAL_SSL = 0x01 + # Flag. Initialize win32 socket libraries. + GLOBAL_WIN32 = 0x02 + # Flag. Initialize everything possible. + GLOBAL_ALL = (GLOBAL_SSL | GLOBAL_WIN32) + # Flag. Initialize everything by default. + GLOBAL_DEFAULT = GLOBAL_ALL + + # :nodoc: + EasyCode = enum(:easy_code, easy_codes) + # :nodoc: + MultiCode = enum(:multi_code, multi_codes) + + # :nodoc: + EasyOption = enum(:easy_option, easy_options(:enum).to_a.flatten) + # :nodoc: + MultiOption = enum(:multi_option, multi_options(:enum).to_a.flatten) + + # Used by curl_debug_callback when setting CURLOPT_DEBUGFUNCTION + # https://github.com/bagder/curl/blob/master/include/curl/curl.h#L378 for details + DebugInfoType = enum(:debug_info_type, debug_info_types) + + # :nodoc: + InfoType = enum(info_types.to_a.flatten) + + # Info details, refer + # https://github.com/bagder/curl/blob/master/src/tool_writeout.c#L66 for details + Info = enum(:info, infos.to_a.flatten) + + # Form options, used by FormAdd for temporary storage, refer + # https://github.com/bagder/curl/blob/master/lib/formdata.h#L51 for details + FormOption = enum(:form_option, form_options) + + # :nodoc: + MsgCode = enum(:msg_code, msg_codes) + + VERSION_IPV6 = (1<<0) # IPv6-enabled + VERSION_KERBEROS4 = (1<<1) # kerberos auth is supported + VERSION_SSL = (1<<2) # SSL options are present + VERSION_LIBZ = (1<<3) # libz features are present + VERSION_NTLM = (1<<4) # NTLM auth is supported + VERSION_GSSNEGOTIATE = (1<<5) # Negotiate auth supp + VERSION_DEBUG = (1<<6) # built with debug capabilities + VERSION_ASYNCHDNS = (1<<7) # asynchronous dns resolves + VERSION_SPNEGO = (1<<8) # SPNEGO auth is supported + VERSION_LARGEFILE = (1<<9) # supports files bigger than 2GB + VERSION_IDN = (1<<10) # International Domain Names support + VERSION_SSPI = (1<<11) # SSPI is supported + VERSION_CONV = (1<<12) # character conversions supported + VERSION_CURLDEBUG = (1<<13) # debug memory tracking supported + VERSION_TLSAUTH_SRP = (1<<14) # TLS-SRP auth is supported + VERSION_NTLM_WB = (1<<15) # NTLM delegating to winbind helper + VERSION_HTTP2 = (1<<16) # HTTP2 support built + VERSION_GSSAPI = (1<<17) # GSS-API is supported + + SOCKET_BAD = -1 + SOCKET_TIMEOUT = SOCKET_BAD + + PollAction = enum(:poll_action, [ + :none, + :in, + :out, + :inout, + :remove + ]) + + SocketReadiness = bitmask(:socket_readiness, [ + :in, # CURL_CSELECT_IN - 0x01 (bit 0) + :out, # CURL_CSELECT_OUT - 0x02 (bit 1) + :err, # CURL_CSELECT_ERR - 0x04 (bit 2) + ]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/form_options.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/form_options.rb new file mode 100644 index 0000000..ce5f079 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/form_options.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains the available options for forms. + module FormOptions + + # Form options, used by FormAdd for temporary storage, refer + # https://github.com/bagder/curl/blob/master/lib/formdata.h#L51 for details + def form_options + [ + :none, + :copyname, + :ptrname, + :namelength, + :copycontents, + :ptrcontents, + :contentslength, + :filecontent, + :array, + :obsolete, + :file, + :buffer, + :bufferptr, + :bufferlength, + :contenttype, + :contentheader, + :filename, + :end, + :obsolete2, + :stream, + :last + ] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/functions.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/functions.rb new file mode 100644 index 0000000..2c01bea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/functions.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains the functions to be attached in order to work with + # libcurl. + module Functions + + # :nodoc: + def self.extended(base) + base.attach_function :global_init, :curl_global_init, [:long], :int + base.attach_function :global_cleanup, :curl_global_cleanup, [], :void + base.attach_function :free, :curl_free, [:pointer], :void + + base.attach_function :easy_init, :curl_easy_init, [], :pointer + base.attach_function :easy_cleanup, :curl_easy_cleanup, [:pointer], :void + base.attach_function :easy_getinfo, :curl_easy_getinfo, [:pointer, :info, :varargs], :easy_code + base.attach_function :easy_setopt, :curl_easy_setopt, [:pointer, :easy_option, :varargs], :easy_code + base.instance_variable_set(:@blocking, true) + base.attach_function :easy_perform, :curl_easy_perform, [:pointer], :easy_code + base.attach_function :easy_strerror, :curl_easy_strerror, [:easy_code], :string + base.attach_function :easy_escape, :curl_easy_escape, [:pointer, :pointer, :int], :pointer + base.attach_function :easy_reset, :curl_easy_reset, [:pointer], :void + base.attach_function :easy_duphandle, :curl_easy_duphandle, [:pointer], :pointer + + base.attach_function :formadd, :curl_formadd, [:pointer, :pointer, :varargs], :int + base.attach_function :formfree, :curl_formfree, [:pointer], :void + + base.attach_function :multi_init, :curl_multi_init, [], :pointer + base.attach_function :multi_cleanup, :curl_multi_cleanup, [:pointer], :void + base.attach_function :multi_add_handle, :curl_multi_add_handle, [:pointer, :pointer], :multi_code + base.attach_function :multi_remove_handle, :curl_multi_remove_handle, [:pointer, :pointer], :multi_code + base.attach_function :multi_info_read, :curl_multi_info_read, [:pointer, :pointer], Curl::Msg.ptr + base.attach_function :multi_perform, :curl_multi_perform, [:pointer, :pointer], :multi_code + base.attach_function :multi_timeout, :curl_multi_timeout, [:pointer, :pointer], :multi_code + base.attach_function :multi_fdset, :curl_multi_fdset, [:pointer, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::FDSet.ptr, :pointer], :multi_code + base.attach_function :multi_strerror, :curl_multi_strerror, [:int], :string + base.attach_function :multi_setopt, :curl_multi_setopt, [:pointer, :multi_option, :varargs], :multi_code + base.attach_function :multi_socket_action, :curl_multi_socket_action, [:pointer, :int, :socket_readiness, :pointer], :multi_code + + base.attach_function :version, :curl_version, [], :string + base.attach_function :version_info, :curl_version_info, [], Curl::VersionInfoData.ptr + + base.attach_function :slist_append, :curl_slist_append, [:pointer, :string], :pointer + base.attach_function :slist_free_all, :curl_slist_free_all, [:pointer], :void + base.instance_variable_set(:@blocking, true) + + if Curl.windows? + base.ffi_lib 'ws2_32' + else + base.ffi_lib ::FFI::Library::LIBC + end + + base.attach_function :select, [:int, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::Timeval.ptr], :int + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/infos.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/infos.rb new file mode 100644 index 0000000..d285f5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/infos.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains logic for the available informations + # on an easy, eg.: connect_time. + module Infos + + # Return info types. + # + # @example Return info types. + # Ethon::Curl.info_types + # + # @return [ Hash ] The info types. + def info_types + { + :string =>0x100000, + :long => 0x200000, + :double =>0x300000, + :slist => 0x400000 + } + end + + # http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTDEBUGFUNCTION + # https://github.com/bagder/curl/blob/master/include/curl/curl.h#L378 + # + # @example Return debug info types. + # Ethon::Curl.debug_info_types + # + # @return [ Hash ] The info types available to curl_debug_callback. + def debug_info_types + [ + :text, 0, + :header_in, + :header_out, + :data_in, + :data_out, + :ssl_data_in, + :ssl_data_out + ] + end + + # Return Info details, refer + # https://github.com/bagder/curl/blob/master/src/tool_writeout.c#L66 for details + # + # @example Return infos. + # Ethon::Curl.infos + # + # @return [ Hash ] The infos. + def infos + { + :effective_url => info_types[:string] + 1, + :response_code => info_types[:long] + 2, + :total_time => info_types[:double] + 3, + :namelookup_time => info_types[:double] + 4, + :connect_time => info_types[:double] + 5, + :pretransfer_time => info_types[:double] + 6, + :size_upload => info_types[:double] + 7, + :size_download => info_types[:double] + 8, + :speed_download => info_types[:double] + 9, + :speed_upload => info_types[:double] + 10, + :header_size => info_types[:long] + 11, + :request_size => info_types[:long] + 12, + :ssl_verifyresult => info_types[:long] + 13, + :filetime => info_types[:long] + 14, + :content_length_download =>info_types[:double] + 15, + :content_length_upload => info_types[:double] + 16, + :starttransfer_time => info_types[:double] + 17, + :content_type => info_types[:string] + 18, + :redirect_time => info_types[:double] + 19, + :redirect_count => info_types[:long] + 20, + :private => info_types[:string] + 21, + :http_connectcode => info_types[:long] + 22, + :httpauth_avail => info_types[:long] + 23, + :proxyauth_avail => info_types[:long] + 24, + :os_errno => info_types[:long] + 25, + :num_connects => info_types[:long] + 26, + :ssl_engines => info_types[:slist] + 27, + :cookielist => info_types[:slist] + 28, + :lastsocket => info_types[:long] + 29, + :ftp_entry_path => info_types[:string] + 30, + :redirect_url => info_types[:string] + 31, + :primary_ip => info_types[:string] + 32, + :appconnect_time => info_types[:double] + 33, + :certinfo => info_types[:slist] + 34, + :condition_unmet => info_types[:long] + 35, + :rtsp_session_id => info_types[:string] + 36, + :rtsp_client_cseq => info_types[:long] + 37, + :rtsp_server_cseq => info_types[:long] + 38, + :rtsp_cseq_recv => info_types[:long] + 39, + :primary_port => info_types[:long] + 40, + :local_ip => info_types[:string] + 41, + :local_port => info_types[:long] + 42, + :last =>42 + } + end + + # Return info as string. + # + # @example Return info. + # Curl.get_info_string(:primary_ip, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ String ] The info. + def get_info_string(option, handle) + string_ptr = ::FFI::MemoryPointer.new(:pointer) + + if easy_getinfo(handle, option, :pointer, string_ptr) == :ok + ptr=string_ptr.read_pointer + ptr.null? ? nil : ptr.read_string + end + end + + # Return info as integer. + # + # @example Return info. + # Curl.get_info_long(:response_code, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ Integer ] The info. + def get_info_long(option, handle) + long_ptr = ::FFI::MemoryPointer.new(:long) + + if easy_getinfo(handle, option, :pointer, long_ptr) == :ok + long_ptr.read_long + end + end + + # Return info as float + # + # @example Return info. + # Curl.get_info_double(:response_code, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ Float ] The info. + def get_info_double(option, handle) + double_ptr = ::FFI::MemoryPointer.new(:double) + + if easy_getinfo(handle, option, :pointer, double_ptr) == :ok + double_ptr.read_double + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/messages.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/messages.rb new file mode 100644 index 0000000..72bc54d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/messages.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains available message codes. + module Messages + + # Return message codes. + # + # @example Return message codes. + # Ethon::Curl.msg_codes + # + # @return [ Array ] The messages codes. + def msg_codes + [:none, :done, :last] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/options.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/options.rb new file mode 100644 index 0000000..5ab99a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/options.rb @@ -0,0 +1,502 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains logic for setting options on + # easy or multi interface. + module Options + + OPTION_STRINGS = { :easy => 'easy_options', :multi => 'multi_options' }.freeze + FOPTION_STRINGS = { :easy => 'EASY_OPTIONS', :multi => 'MULTI_OPTIONS' }.freeze + FUNCS = { :easy => 'easy_setopt', :multi => 'multi_setopt' }.freeze + # Sets appropriate option for easy, depending on value type. + def set_option(option, value, handle, type = :easy) + type = type.to_sym unless type.is_a?(Symbol) + raise NameError, "Ethon::Curls::Options unknown type #{type}." unless respond_to?(OPTION_STRINGS[type]) + opthash=send(OPTION_STRINGS[type], nil) + raise Errors::InvalidOption.new(option) unless opthash.include?(option) + + case opthash[option][:type] + when :none + return if value.nil? + value=1 + va_type=:long + when :int + return if value.nil? + va_type=:long + value=value.to_i + when :bool + return if value.nil? + va_type=:long + value=(value&&value!=0) ? 1 : 0 + when :time + return if value.nil? + va_type=:long + value=value.to_i + when :enum + return if value.nil? + va_type=:long + value = case value + when Symbol + opthash[option][:opts][value] + when String + opthash[option][:opts][value.to_sym] + else + value + end.to_i + when :bitmask + return if value.nil? + va_type=:long + value = case value + when Symbol + opthash[option][:opts][value] + when Array + value.inject(0) { |res,v| res|opthash[option][:opts][v] } + else + value + end.to_i + when :string + va_type=:string + value=value.to_s unless value.nil? + when :string_as_pointer + va_type = :pointer + s = '' + s = value.to_s unless value.nil? + value = FFI::MemoryPointer.new(:char, s.bytesize) + value.put_bytes(0, s) + when :string_escape_null + va_type=:string + value=Util.escape_zero_byte(value) unless value.nil? + when :ffipointer + va_type=:pointer + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? FFI::Pointer + when :curl_slist + va_type=:pointer + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? FFI::Pointer + when :buffer + raise NotImplementedError, "Ethon::Curls::Options option #{option} buffer type not implemented." + when :dontuse_object + raise NotImplementedError, "Ethon::Curls::Options option #{option} type not implemented." + when :cbdata + raise NotImplementedError, "Ethon::Curls::Options option #{option} callback data type not implemented. Use Ruby closures." + when :callback + va_type=:callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :socket_callback + va_type=:socket_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :timer_callback + va_type=:timer_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :debug_callback + va_type=:debug_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :progress_callback + va_type=:progress_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :off_t + return if value.nil? + va_type=:int64 + value=value.to_i + end + + if va_type==:long or va_type==:int64 then + bits=FFI.type_size(va_type)*8 + tv=((value<0) ? value.abs-1 : value) + raise Errors::InvalidValue.new(option,value) unless tv<(1< 0, + :objectpoint => 10000, + :functionpoint => 20000, + :off_t => 30000 + } + OPTION_TYPE_MAP = { + :none => :long, + :int => :long, + :bool => :long, + :time => :long, + :enum => :long, # Two ways to specify values (as opts parameter): + # * Array of symbols, these will number sequentially + # starting at 0. Skip elements with nil. (see :netrc) + # * Hash of :symbol => enum_value (See :proxytype) + :bitmask => :long, # Three ways to specify values (as opts parameter): + # * Hash of :symbol => bitmask_value or Array. + # An Array can be an array of already defined + # Symbols, which represents a bitwise or of those + # symbols. (See :httpauth) + # * Array of symbols, these will number the bits + # sequentially (i.e. 0, 1, 2, 4, etc.). Skip + # elements with nil. The last element can be a + # Hash, which will be interpreted as above. + # (See :protocols) + # :all defaults to all bits set + :string => :objectpoint, + :string_escape_null => :objectpoint, + :string_as_pointer => :objectpoint, + :ffipointer => :objectpoint, # FFI::Pointer + :curl_slist => :objectpoint, + :buffer => :objectpoint, # A memory buffer of size defined in the options + :dontuse_object => :objectpoint, # An object we don't support (e.g. FILE*) + :cbdata => :objectpoint, + :callback => :functionpoint, + :socket_callback => :functionpoint, + :timer_callback => :functionpoint, + :debug_callback => :functionpoint, + :progress_callback => :functionpoint, + :off_t => :off_t, + } + + def self.option(ftype,name,type,num,opts=nil) + case type + when :enum + if opts.is_a? Array then + opts=Hash[opts.each_with_index.to_a] + elsif not opts.is_a? Hash then + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." + end + + when :bitmask + if opts.is_a? Array then + if opts.last.is_a? Hash then + hopts=opts.pop + else + hopts={} + end + opts.each_with_index do |v,i| + next if v.nil? + if i==0 then + hopts[v]=0 + else + hopts[v]=1<<(i-1) + end + end + opts=hopts + elsif not opts.is_a? Hash then + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." + end + opts[:all]=-1 unless opts.include? :all + opts.each do |k,v| + if v.is_a? Array then + opts[k]=v.map { |b| opts[b] }.inject :| + end + end + + when :buffer + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." unless opts.is_a? Integer + + else + raise ArgumentError, "Ethon::Curls::Options #{ftype} #{name} Expected no opts." unless opts.nil? + end + opthash=const_get(FOPTION_STRINGS[ftype]) + opthash[name] = { :type => type, + :opt => OPTION_TYPE_BASE[OPTION_TYPE_MAP[type]] + num, + :opts => opts } + end + + def self.option_alias(ftype,name,*aliases) + opthash=const_get(FOPTION_STRINGS[ftype]) + aliases.each { |a| opthash[a]=opthash[name] } + end + + def self.option_type(type) + cname = FOPTION_STRINGS[type] + const_set(cname, {}) + define_method(OPTION_STRINGS[type]) do |rt| + return Ethon::Curls::Options.const_get(cname).map { |k, v| [k, v[:opt]] } if rt == :enum + Ethon::Curls::Options.const_get(cname) + end + end + + # Curl multi options, refer + # Defined @ https://github.com/bagder/curl/blob/master/include/curl/multi.h + # Documentation @ http://curl.haxx.se/libcurl/c/curl_multi_setopt.html + option_type :multi + + option :multi, :socketfunction, :socket_callback, 1 + option :multi, :socketdata, :cbdata, 2 + option :multi, :pipelining, :int, 3 + option :multi, :timerfunction, :timer_callback, 4 + option :multi, :timerdata, :cbdata, 5 + option :multi, :maxconnects, :int, 6 + option :multi, :max_host_connections, :int, 7 + option :multi, :max_pipeline_length, :int, 8 + option :multi, :content_length_penalty_size, :off_t, 9 + option :multi, :chunk_length_penalty_size, :off_t, 10 + option :multi, :pipelining_site_bl, :dontuse_object, 11 + option :multi, :pipelining_server_bl, :dontuse_object, 12 + option :multi, :max_total_connections, :int, 3 + + # Curl easy options + # Defined @ https://github.com/bagder/curl/blob/master/include/curl/curl.h + # Documentation @ http://curl.haxx.se/libcurl/c/curl_easy_setopt.html + ## BEHAVIOR OPTIONS + option_type :easy + + option :easy, :verbose, :bool, 41 + option :easy, :header, :bool, 42 + option :easy, :noprogress, :bool, 43 + option :easy, :nosignal, :bool, 99 + option :easy, :wildcardmatch, :bool, 197 + ## CALLBACK OPTIONS + option :easy, :writefunction, :callback, 11 + option :easy, :file, :cbdata, 1 + option_alias :easy, :file, :writedata + option :easy, :readfunction, :callback, 12 + option :easy, :infile, :cbdata, 9 + option_alias :easy, :infile, :readdata + option :easy, :ioctlfunction, :callback, 130 + option :easy, :ioctldata, :cbdata, 131 + option :easy, :seekfunction, :callback, 167 + option :easy, :seekdata, :cbdata, 168 + option :easy, :sockoptfunction, :callback, 148 + option :easy, :sockoptdata, :cbdata, 149 + option :easy, :opensocketfunction, :callback, 163 + option :easy, :opensocketdata, :cbdata, 164 + option :easy, :closesocketfunction, :callback, 208 + option :easy, :closesocketdata, :cbdata, 209 + option :easy, :path_as_is, :bool, 234 + option :easy, :progressfunction, :progress_callback, 56 + option :easy, :progressdata, :cbdata, 57 + option :easy, :headerfunction, :callback, 79 + option :easy, :writeheader, :cbdata, 29 + option_alias :easy, :writeheader, :headerdata + option :easy, :debugfunction, :debug_callback, 94 + option :easy, :debugdata, :cbdata, 95 + option :easy, :ssl_ctx_function, :callback, 108 + option :easy, :ssl_ctx_data, :cbdata, 109 + option :easy, :conv_to_network_function, :callback, 143 + option :easy, :conv_from_network_function, :callback, 142 + option :easy, :conv_from_utf8_function, :callback, 144 + option :easy, :interleavefunction, :callback, 196 + option :easy, :interleavedata, :cbdata, 195 + option :easy, :chunk_bgn_function, :callback, 198 + option :easy, :chunk_end_function, :callback, 199 + option :easy, :chunk_data, :cbdata, 201 + option :easy, :fnmatch_function, :callback, 200 + option :easy, :fnmatch_data, :cbdata, 202 + option :easy, :xferinfofunction, :progress_callback, 219 + option :easy, :xferinfodata, :cbdata, 57 + ## ERROR OPTIONS + option :easy, :errorbuffer, :buffer, 10, 256 + option :easy, :stderr, :dontuse_object, 37 + option :easy, :failonerror, :bool, 45 + ## NETWORK OPTIONS + option :easy, :url, :string, 2 + option :easy, :protocols, :bitmask, 181, [nil, :http, :https, :ftp, :ftps, :scp, :sftp, :telnet, :ldap, :ldaps, :dict, :file, :tftp, :imap, :imaps, :pop3, :pop3s, :smtp, :smtps, :rtsp, :rtmp, :rtmpt, :rtmpe, :rtmpte, :rtmps, :rtmpts, :gopher] + option :easy, :redir_protocols, :bitmask, 182, [nil, :http, :https, :ftp, :ftps, :scp, :sftp, :telnet, :ldap, :ldaps, :dict, :file, :tftp, :imap, :imaps, :pop3, :pop3s, :smtp, :smtps, :rtsp, :rtmp, :rtmpt, :rtmpe, :rtmpte, :rtmps, :rtmpts, :gopher] + option :easy, :proxy, :string, 4 + option :easy, :proxyport, :int, 59 + option :easy, :proxytype, :enum, 101, [:http, :http_1_0, :https, nil, :socks4, :socks5, :socks4a, :socks5_hostname] + option :easy, :noproxy, :string, 177 + option :easy, :httpproxytunnel, :bool, 61 + option :easy, :socks5_gssapi_service, :string, 179 + option :easy, :socks5_gssapi_nec, :bool, 180 + option :easy, :interface, :string, 62 + option :easy, :localport, :int, 139 + option :easy, :localportrange, :int, 140 + option :easy, :dns_cache_timeout, :int, 92 + option :easy, :dns_use_global_cache, :bool, 91 # Obsolete + option :easy, :dns_interface, :string, 221 + option :easy, :dns_local_ip4, :string, 222 + option :easy, :dns_shuffle_addresses, :bool, 275 + option :easy, :buffersize, :int, 98 + option :easy, :port, :int, 3 + option :easy, :tcp_nodelay, :bool, 121 + option :easy, :address_scope, :int, 171 + option :easy, :tcp_keepalive, :bool, 213 + option :easy, :tcp_keepidle, :int, 214 + option :easy, :tcp_keepintvl, :int, 215 + ## NAMES and PASSWORDS OPTIONS (Authentication) + option :easy, :netrc, :enum, 51, [:ignored, :optional, :required] + option :easy, :netrc_file, :string, 118 + option :easy, :userpwd, :string, 5 + option :easy, :proxyuserpwd, :string, 6 + option :easy, :username, :string, 173 + option :easy, :password, :string, 174 + option :easy, :proxyusername, :string, 175 + option :easy, :proxypassword, :string, 176 + option :easy, :httpauth, :bitmask, 107, [:none, :basic, :digest, :gssnegotiate, :ntlm, :digest_ie, :ntlm_wb, {:only => 1<<31, :any => ~0x10, :anysafe => ~0x11, :auto => 0x1f}] + option :easy, :tlsauth_type, :enum, 206, [:none, :srp] + option :easy, :tlsauth_username, :string, 204 + option :easy, :tlsauth_password, :string, 205 + option :easy, :proxyauth, :bitmask, 111, [:none, :basic, :digest, :gssnegotiate, :ntlm, :digest_ie, :ntlm_wb, {:only => 1<<31, :any => ~0x10, :anysafe => ~0x11, :auto => 0x1f}] + option :easy, :sasl_ir, :bool, 218 + ## HTTP OPTIONS + option :easy, :autoreferer, :bool, 58 + option :easy, :accept_encoding, :string, 102 + option_alias :easy, :accept_encoding, :encoding + option :easy, :transfer_encoding, :bool, 207 + option :easy, :followlocation, :bool, 52 + option :easy, :unrestricted_auth, :bool, 105 + option :easy, :maxredirs, :int, 68 + option :easy, :postredir, :bitmask, 161, [:get_all, :post_301, :post_302, :post_303, {:post_all => [:post_301, :post_302, :post_303]}] + option_alias :easy, :postredir, :post301 + option :easy, :put, :bool, 54 + option :easy, :post, :bool, 47 + option :easy, :postfields, :string, 15 + option :easy, :postfieldsize, :int, 60 + option :easy, :postfieldsize_large, :off_t, 120 + option :easy, :copypostfields, :string_as_pointer, 165 + option :easy, :httppost, :ffipointer, 24 + option :easy, :referer, :string, 16 + option :easy, :useragent, :string, 18 + option :easy, :httpheader, :curl_slist, 23 + option :easy, :http200aliases, :curl_slist, 104 + option :easy, :cookie, :string, 22 + option :easy, :cookiefile, :string, 31 + option :easy, :cookiejar, :string, 82 + option :easy, :cookiesession, :bool, 96 + option :easy, :cookielist, :string, 135 + option :easy, :httpget, :bool, 80 + option :easy, :http_version, :enum, 84, [:none, :httpv1_0, :httpv1_1, :httpv2_0] + option :easy, :ignore_content_length, :bool, 136 + option :easy, :http_content_decoding, :bool, 158 + option :easy, :http_transfer_decoding, :bool, 157 + ## SMTP OPTIONS + option :easy, :mail_from, :string, 186 + option :easy, :mail_rcpt, :curl_slist, 187 + option :easy, :mail_auth, :string, 217 + ## TFTP OPTIONS + option :easy, :tftp_blksize, :int, 178 + ## FTP OPTIONS + option :easy, :ftpport, :string, 17 + option :easy, :quote, :curl_slist, 28 + option :easy, :postquote, :curl_slist, 39 + option :easy, :prequote, :curl_slist, 93 + option :easy, :dirlistonly, :bool, 48 + option_alias :easy, :dirlistonly, :ftplistonly + option :easy, :append, :bool, 50 + option_alias :easy, :append, :ftpappend + option :easy, :ftp_use_eprt, :bool, 106 + option :easy, :ftp_use_epsv, :bool, 85 + option :easy, :ftp_use_pret, :bool, 188 + option :easy, :ftp_create_missing_dirs, :bool, 110 + option :easy, :ftp_response_timeout, :int, 112 + option_alias :easy, :ftp_response_timeout, :server_response_timeout + option :easy, :ftp_alternative_to_user, :string, 147 + option :easy, :ftp_skip_pasv_ip, :bool, 137 + option :easy, :ftpsslauth, :enum, 129, [:default, :ssl, :tls] + option :easy, :ftp_ssl_ccc, :enum, 154, [:none, :passive, :active] + option :easy, :ftp_account, :string, 134 + option :easy, :ftp_filemethod, :enum, 138, [:default, :multicwd, :nocwd, :singlecwd] + ## RTSP OPTIONS + option :easy, :rtsp_request, :enum, 189, [:none, :options, :describe, :announce, :setup, :play, :pause, :teardown, :get_parameter, :set_parameter, :record, :receive] + option :easy, :rtsp_session_id, :string, 190 + option :easy, :rtsp_stream_uri, :string, 191 + option :easy, :rtsp_transport, :string, 192 + option_alias :easy, :httpheader, :rtspheader + option :easy, :rtsp_client_cseq, :int, 193 + option :easy, :rtsp_server_cseq, :int, 194 + ## PROTOCOL OPTIONS + option :easy, :transfertext, :bool, 53 + option :easy, :proxy_transfer_mode, :bool, 166 + option :easy, :crlf, :bool, 27 + option :easy, :range, :string, 7 + option :easy, :resume_from, :int, 21 + option :easy, :resume_from_large, :off_t, 116 + option :easy, :customrequest, :string, 36 + option :easy, :filetime, :bool, 69 + option :easy, :nobody, :bool, 44 + option :easy, :infilesize, :int, 14 + option :easy, :infilesize_large, :off_t, 115 + option :easy, :upload, :bool, 46 + option :easy, :maxfilesize, :int, 114 + option :easy, :maxfilesize_large, :off_t, 117 + option :easy, :timecondition, :enum, 33, [:none, :ifmodsince, :ifunmodsince, :lastmod] + option :easy, :timevalue, :time, 34 + ## CONNECTION OPTIONS + option :easy, :timeout, :int, 13 + option :easy, :timeout_ms, :int, 155 + option :easy, :low_speed_limit, :int, 19 + option :easy, :low_speed_time, :int, 20 + option :easy, :max_send_speed_large, :off_t, 145 + option :easy, :max_recv_speed_large, :off_t, 146 + option :easy, :maxconnects, :int, 71 + option :easy, :fresh_connect, :bool, 74 + option :easy, :forbid_reuse, :bool, 75 + option :easy, :connecttimeout, :int, 78 + option :easy, :connecttimeout_ms, :int, 156 + option :easy, :ipresolve, :enum, 113, [:whatever, :v4, :v6] + option :easy, :connect_only, :bool, 141 + option :easy, :use_ssl, :enum, 119, [:none, :try, :control, :all] + option_alias :easy, :use_ssl, :ftp_ssl + option :easy, :resolve, :curl_slist, 203 + option :easy, :dns_servers, :string, 211 + option :easy, :accepttimeout_ms, :int, 212 + option :easy, :unix_socket_path, :string, 231 + option :easy, :pipewait, :bool, 237 + option_alias :easy, :unix_socket_path, :unix_socket + ## SSL and SECURITY OPTIONS + option :easy, :sslcert, :string, 25 + option :easy, :sslcerttype, :string, 86 + option :easy, :sslkey, :string, 87 + option :easy, :sslkeytype, :string, 88 + option :easy, :keypasswd, :string, 26 + option_alias :easy, :keypasswd, :sslcertpasswd + option_alias :easy, :keypasswd, :sslkeypasswd + option :easy, :sslengine, :string, 89 + option :easy, :sslengine_default, :none, 90 + option :easy, :sslversion, :enum, 32, [:default, :tlsv1, :sslv2, :sslv3, :tlsv1_0, :tlsv1_1, :tlsv1_2] + option :easy, :ssl_verifypeer, :bool, 64 + option :easy, :cainfo, :string, 65 + option :easy, :issuercert, :string, 170 + option :easy, :capath, :string, 97 + option :easy, :crlfile, :string, 169 + option :easy, :ssl_verifyhost, :int, 81 + option :easy, :certinfo, :bool, 172 + option :easy, :random_file, :string, 76 + option :easy, :egdsocket, :string, 77 + option :easy, :ssl_cipher_list, :string, 83 + option :easy, :ssl_sessionid_cache, :bool, 150 + option :easy, :ssl_options, :bitmask, 216, [nil, :allow_beast] + option :easy, :krblevel, :string, 63 + option_alias :easy, :krblevel, :krb4level + option :easy, :gssapi_delegation, :bitmask, 210, [:none, :policy_flag, :flag] + option :easy, :pinnedpublickey, :string, 230 + option_alias :easy, :pinnedpublickey, :pinned_public_key + ## PROXY SSL OPTIONS + option :easy, :proxy_cainfo, :string, 246 + option :easy, :proxy_capath, :string, 247 + option :easy, :proxy_ssl_verifypeer, :bool, 248 + option :easy, :proxy_ssl_verifyhost, :int, 249 + option :easy, :proxy_sslversion, :enum, 250, [:default, :tlsv1, :sslv2, :sslv3, :tlsv1_0, :tlsv1_1, :tlsv1_2] + option :easy, :proxy_tlsauth_username, :string, 251 + option :easy, :proxy_tlsauth_password, :string, 252 + option :easy, :proxy_tlsauth_type, :enum, 253, [:none, :srp] + option :easy, :proxy_sslcert, :string, 254 + option :easy, :proxy_sslcerttype, :string, 255 + option :easy, :proxy_sslkey, :string, 256 + option :easy, :proxy_sslkeytype, :string, 257 + option :easy, :proxy_keypasswd, :string, 258 + option_alias :easy, :proxy_keypasswd, :proxy_sslcertpasswd + option_alias :easy, :proxy_keypasswd, :proxy_sslkeypasswd + option :easy, :proxy_ssl_cipher_list, :string, 259 + option :easy, :proxy_crlfile, :string, 260 + option :easy, :proxy_ssl_options, :bitmask, 261, [nil, :allow_beast] + option :easy, :pre_proxy, :string, 262 + option :easy, :proxy_pinnedpublickey, :string, 263 + option_alias :easy, :proxy_pinnedpublickey, :proxy_pinned_public_key + option :easy, :proxy_issuercert, :string, 296 + ## SSH OPTIONS + option :easy, :ssh_auth_types, :bitmask, 151, [:none, :publickey, :password, :host, :keyboard, :agent, {:any => [:all], :default => [:any]}] + option :easy, :ssh_host_public_key_md5, :string, 162 + option :easy, :ssh_public_keyfile, :string, 152 + option :easy, :ssh_private_keyfile, :string, 153 + option :easy, :ssh_knownhosts, :string, 183 + option :easy, :ssh_keyfunction, :callback, 184 + option :easy, :khstat, :enum, -1, [:fine_add_to_file, :fine, :reject, :defer] # Kludge to make this enum available... Access via CurL::EASY_OPTIONS[:khstat][:opt] + option :easy, :ssh_keydata, :cbdata, 185 + ## OTHER OPTIONS + option :easy, :private, :cbdata, 103 + option :easy, :share, :dontuse_object, 100 + option :easy, :new_file_perms, :int, 159 + option :easy, :new_directory_perms, :int, 160 + ## TELNET OPTIONS + option :easy, :telnetoptions, :curl_slist, 70 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/settings.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/settings.rb new file mode 100644 index 0000000..8c0161b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/curls/settings.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Curl + callback :callback, [:pointer, :size_t, :size_t, :pointer], :size_t + callback :socket_callback, [:pointer, :int, :poll_action, :pointer, :pointer], :multi_code + callback :timer_callback, [:pointer, :long, :pointer], :multi_code + callback :debug_callback, [:pointer, :debug_info_type, :pointer, :size_t, :pointer], :int + callback :progress_callback, [:pointer, :long_long, :long_long, :long_long, :long_long], :int + ffi_lib_flags :now, :global + ffi_lib ['libcurl', 'libcurl.so.4'] + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy.rb new file mode 100644 index 0000000..355a519 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy.rb @@ -0,0 +1,315 @@ +# frozen_string_literal: true +require 'ethon/easy/informations' +require 'ethon/easy/features' +require 'ethon/easy/callbacks' +require 'ethon/easy/options' +require 'ethon/easy/header' +require 'ethon/easy/util' +require 'ethon/easy/params' +require 'ethon/easy/form' +require 'ethon/easy/http' +require 'ethon/easy/operations' +require 'ethon/easy/response_callbacks' +require 'ethon/easy/debug_info' +require 'ethon/easy/mirror' + +module Ethon + + # This is the class representing the libcurl easy interface + # See http://curl.haxx.se/libcurl/c/libcurl-easy.html for more informations. + # + # @example You can access the libcurl easy interface through this class, every request is based on it. The simplest setup looks like that: + # + # e = Ethon::Easy.new(url: "www.example.com") + # e.perform + # #=> :ok + # + # @example You can the reuse this Easy for the next request: + # + # e.reset # reset easy handle + # e.url = "www.google.com" + # e.followlocation = true + # e.perform + # #=> :ok + # + # @see initialize + class Easy + include Ethon::Easy::Informations + include Ethon::Easy::Callbacks + include Ethon::Easy::Options + include Ethon::Easy::Header + include Ethon::Easy::Http + include Ethon::Easy::Operations + include Ethon::Easy::ResponseCallbacks + extend Ethon::Easy::Features + + # Returns the curl return code. + # + # @return [ Symbol ] The return code. + # * :ok: All fine. Proceed as usual. + # * :unsupported_protocol: The URL you passed to libcurl used a + # protocol that this libcurl does not support. The support + # might be a compile-time option that you didn't use, it can + # be a misspelled protocol string or just a protocol + # libcurl has no code for. + # * :failed_init: Very early initialization code failed. This + # is likely to be an internal error or problem, or a + # resource problem where something fundamental couldn't + # get done at init time. + # * :url_malformat: The URL was not properly formatted. + # * :not_built_in: A requested feature, protocol or option + # was not found built-in in this libcurl due to a build-time + # decision. This means that a feature or option was not enabled + # or explicitly disabled when libcurl was built and in + # order to get it to function you have to get a rebuilt libcurl. + # * :couldnt_resolve_proxy: Couldn't resolve proxy. The given + # proxy host could not be resolved. + # * :couldnt_resolve_host: Couldn't resolve host. The given remote + # host was not resolved. + # * :couldnt_connect: Failed to connect() to host or proxy. + # * :ftp_weird_server_reply: After connecting to a FTP server, + # libcurl expects to get a certain reply back. This error + # code implies that it got a strange or bad reply. The given + # remote server is probably not an OK FTP server. + # * :remote_access_denied: We were denied access to the resource + # given in the URL. For FTP, this occurs while trying to + # change to the remote directory. + # * :ftp_accept_failed: While waiting for the server to connect + # back when an active FTP session is used, an error code was + # sent over the control connection or similar. + # * :ftp_weird_pass_reply: After having sent the FTP password to + # the server, libcurl expects a proper reply. This error code + # indicates that an unexpected code was returned. + # * :ftp_accept_timeout: During an active FTP session while + # waiting for the server to connect, the CURLOPT_ACCEPTTIMOUT_MS + # (or the internal default) timeout expired. + # * :ftp_weird_pasv_reply: libcurl failed to get a sensible result + # back from the server as a response to either a PASV or a + # EPSV command. The server is flawed. + # * :ftp_weird_227_format: FTP servers return a 227-line as a response + # to a PASV command. If libcurl fails to parse that line, + # this return code is passed back. + # * :ftp_cant_get_host: An internal failure to lookup the host used + # for the new connection. + # * :ftp_couldnt_set_type: Received an error when trying to set + # the transfer mode to binary or ASCII. + # * :partial_file: A file transfer was shorter or larger than + # expected. This happens when the server first reports an expected + # transfer size, and then delivers data that doesn't match the + # previously given size. + # * :ftp_couldnt_retr_file: This was either a weird reply to a + # 'RETR' command or a zero byte transfer complete. + # * :quote_error: When sending custom "QUOTE" commands to the + # remote server, one of the commands returned an error code that + # was 400 or higher (for FTP) or otherwise indicated unsuccessful + # completion of the command. + # * :http_returned_error: This is returned if CURLOPT_FAILONERROR is + # set TRUE and the HTTP server returns an error code that is >= 400. + # * :write_error: An error occurred when writing received data to a + # local file, or an error was returned to libcurl from a write callback. + # * :upload_failed: Failed starting the upload. For FTP, the server + # typically denied the STOR command. The error buffer usually + # contains the server's explanation for this. + # * :read_error: There was a problem reading a local file or an error + # returned by the read callback. + # * :out_of_memory: A memory allocation request failed. This is serious + # badness and things are severely screwed up if this ever occurs. + # * :operation_timedout: Operation timeout. The specified time-out + # period was reached according to the conditions. + # * :ftp_port_failed: The FTP PORT command returned error. This mostly + # happens when you haven't specified a good enough address for + # libcurl to use. See CURLOPT_FTPPORT. + # * :ftp_couldnt_use_rest: The FTP REST command returned error. This + # should never happen if the server is sane. + # * :range_error: The server does not support or accept range requests. + # * :http_post_error: This is an odd error that mainly occurs due to + # internal confusion. + # * :ssl_connect_error: A problem occurred somewhere in the SSL/TLS + # handshake. You really want the error buffer and read the message + # there as it pinpoints the problem slightly more. Could be + # certificates (file formats, paths, permissions), passwords, and others. + # * :bad_download_resume: The download could not be resumed because + # the specified offset was out of the file boundary. + # * :file_couldnt_read_file: A file given with FILE:// couldn't be + # opened. Most likely because the file path doesn't identify an + # existing file. Did you check file permissions? + # * :ldap_cannot_bind: LDAP cannot bind. LDAP bind operation failed. + # * :ldap_search_failed: LDAP search failed. + # * :function_not_found: Function not found. A required zlib function was not found. + # * :aborted_by_callback: Aborted by callback. A callback returned + # "abort" to libcurl. + # * :bad_function_argument: Internal error. A function was called with + # a bad parameter. + # * :interface_failed: Interface error. A specified outgoing interface + # could not be used. Set which interface to use for outgoing + # connections' source IP address with CURLOPT_INTERFACE. + # * :too_many_redirects: Too many redirects. When following redirects, + # libcurl hit the maximum amount. Set your limit with CURLOPT_MAXREDIRS. + # * :unknown_option: An option passed to libcurl is not recognized/known. + # Refer to the appropriate documentation. This is most likely a + # problem in the program that uses libcurl. The error buffer might + # contain more specific information about which exact option it concerns. + # * :telnet_option_syntax: A telnet option string was Illegally formatted. + # * :peer_failed_verification: The remote server's SSL certificate or + # SSH md5 fingerprint was deemed not OK. + # * :got_nothing: Nothing was returned from the server, and under the + # circumstances, getting nothing is considered an error. + # * :ssl_engine_notfound: The specified crypto engine wasn't found. + # * :ssl_engine_setfailed: Failed setting the selected SSL crypto engine as default! + # * :send_error: Failed sending network data. + # * :recv_error: Failure with receiving network data. + # * :ssl_certproblem: problem with the local client certificate. + # * :ssl_cipher: Couldn't use specified cipher. + # * :bad_content_encoding: Unrecognized transfer encoding. + # * :ldap_invalid_url: Invalid LDAP URL. + # * :filesize_exceeded: Maximum file size exceeded. + # * :use_ssl_failed: Requested FTP SSL level failed. + # * :send_fail_rewind: When doing a send operation curl had to rewind the data to + # retransmit, but the rewinding operation failed. + # * :ssl_engine_initfailed: Initiating the SSL Engine failed. + # * :login_denied: The remote server denied curl to login + # * :tftp_notfound: File not found on TFTP server. + # * :tftp_perm: Permission problem on TFTP server. + # * :remote_disk_full: Out of disk space on the server. + # * :tftp_illegal: Illegal TFTP operation. + # * :tftp_unknownid: Unknown TFTP transfer ID. + # * :remote_file_exists: File already exists and will not be overwritten. + # * :tftp_nosuchuser: This error should never be returned by a properly + # functioning TFTP server. + # * :conv_failed: Character conversion failed. + # * :conv_reqd: Caller must register conversion callbacks. + # * :ssl_cacert_badfile: Problem with reading the SSL CA cert (path? access rights?): + # * :remote_file_not_found: The resource referenced in the URL does not exist. + # * :ssh: An unspecified error occurred during the SSH session. + # * :ssl_shutdown_failed: Failed to shut down the SSL connection. + # * :again: Socket is not ready for send/recv wait till it's ready and try again. + # This return code is only returned from curl_easy_recv(3) and curl_easy_send(3) + # * :ssl_crl_badfile: Failed to load CRL file + # * :ssl_issuer_error: Issuer check failed + # * :ftp_pret_failed: The FTP server does not understand the PRET command at + # all or does not support the given argument. Be careful when + # using CURLOPT_CUSTOMREQUEST, a custom LIST command will be sent with PRET CMD + # before PASV as well. + # * :rtsp_cseq_error: Mismatch of RTSP CSeq numbers. + # * :rtsp_session_error: Mismatch of RTSP Session Identifiers. + # * :ftp_bad_file_list: Unable to parse FTP file list (during FTP wildcard downloading). + # * :chunk_failed: Chunk callback reported error. + # * :obsolete: These error codes will never be returned. They were used in an old + # libcurl version and are currently unused. + # + # @see http://curl.haxx.se/libcurl/c/libcurl-errors.html + attr_accessor :return_code + + # Initialize a new Easy. + # It initializes curl, if not already done and applies the provided options. + # Look into {Ethon::Easy::Options Options} to see what you can provide in the + # options hash. + # + # @example Create a new Easy. + # Easy.new(url: "www.google.de") + # + # @param [ Hash ] options The options to set. + # @option options :headers [ Hash ] Request headers. + # + # @return [ Easy ] A new Easy. + # + # @see Ethon::Easy::Options + # @see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html + def initialize(options = {}) + Curl.init + set_attributes(options) + set_callbacks + end + + # Set given options. + # + # @example Set options. + # easy.set_attributes(options) + # + # @param [ Hash ] options The options. + # + # @raise InvalidOption + # + # @see initialize + def set_attributes(options) + options.each_pair do |key, value| + method = "#{key}=" + unless respond_to?(method) + raise Errors::InvalidOption.new(key) + end + send(method, value) + end + end + + # Reset easy. This means resetting all options and instance variables. + # Also the easy handle is resetted. + # + # @example Reset. + # easy.reset + def reset + @url = nil + @escape = nil + @hash = nil + @on_complete = nil + @on_headers = nil + @on_body = nil + @on_progress = nil + @procs = nil + @mirror = nil + Curl.easy_reset(handle) + set_callbacks + end + + # Clones libcurl session handle. This means that all options that is set in + # the current handle will be set on duplicated handle. + def dup + e = super + e.handle = Curl.easy_duphandle(handle) + e.instance_variable_set(:@body_write_callback, nil) + e.instance_variable_set(:@header_write_callback, nil) + e.instance_variable_set(:@debug_callback, nil) + e.instance_variable_set(:@progress_callback, nil) + e.set_callbacks + e + end + # Url escapes the value. + # + # @example Url escape. + # easy.escape(value) + # + # @param [ String ] value The value to escape. + # + # @return [ String ] The escaped value. + # + # @api private + def escape(value) + string_pointer = Curl.easy_escape(handle, value, value.bytesize) + returned_string = string_pointer.read_string + Curl.free(string_pointer) + returned_string + end + + # Returns the informations available through libcurl as + # a hash. + # + # @return [ Hash ] The informations hash. + def to_hash + Kernel.warn("Ethon: Easy#to_hash is deprecated and will be removed, please use #mirror.") + mirror.to_hash + end + + def mirror + @mirror ||= Mirror.from_easy(self) + end + + # Return pretty log out. + # + # @example Return log out. + # easy.log_inspect + # + # @return [ String ] The log out. + def log_inspect + "EASY #{mirror.log_informations.map{|k, v| "#{k}=#{v}"}.flatten.join(' ')}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/callbacks.rb new file mode 100644 index 0000000..d0425bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/callbacks.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains all the logic around the callbacks, + # which are needed to interact with libcurl. + # + # @api private + module Callbacks + + # :nodoc: + def self.included(base) + base.send(:attr_accessor, *[:response_body, :response_headers, :debug_info]) + end + + # Set writefunction and headerfunction callback. + # They are called by libcurl in order to provide the header and + # the body from the request. + # + # @example Set callbacks. + # easy.set_callbacks + def set_callbacks + Curl.set_option(:writefunction, body_write_callback, handle) + Curl.set_option(:headerfunction, header_write_callback, handle) + Curl.set_option(:debugfunction, debug_callback, handle) + @response_body = String.new + @response_headers = String.new + @headers_called = false + @debug_info = Ethon::Easy::DebugInfo.new + end + + # Returns the body write callback. + # + # @example Return the callback. + # easy.body_write_callback + # + # @return [ Proc ] The callback. + def body_write_callback + @body_write_callback ||= proc do |stream, size, num, object| + headers + result = body(chunk = stream.read_string(size * num)) + @response_body << chunk if result == :unyielded + result != :abort ? size * num : -1 + end + end + + # Returns the header write callback. + # + # @example Return the callback. + # easy.header_write_callback + # + # @return [ Proc ] The callback. + def header_write_callback + @header_write_callback ||= proc {|stream, size, num, object| + @response_headers << stream.read_string(size * num) + size * num + } + end + + # Returns the debug callback. This callback is currently used + # write the raw http request headers. + # + # @example Return the callback. + # easy.debug_callback + # + # @return [ Proc ] The callback. + def debug_callback + @debug_callback ||= proc {|handle, type, data, size, udata| + message = data.read_string(size) + @debug_info.add type, message + print message unless [:data_in, :data_out].include?(type) + 0 + } + end + + def set_progress_callback + if Curl.version_info[:version] >= "7.32.0" + Curl.set_option(:xferinfofunction, progress_callback, handle) + else + Curl.set_option(:progressfunction, progress_callback, handle) + end + end + + # Returns the progress callback. + # + # @example Return the callback. + # easy.progress_callback + # + # @return [ Proc ] The callback. + def progress_callback + @progress_callback ||= proc { |_, dltotal, dlnow, ultotal, ulnow| + progress(dltotal, dlnow, ultotal, ulnow) + 0 + } + end + + # Set the read callback. This callback is used by libcurl to + # read data when performing a PUT request. + # + # @example Set the callback. + # easy.set_read_callback("a=1") + # + # @param [ String ] body The body. + def set_read_callback(body) + @request_body_read = 0 + readfunction do |stream, size, num, object| + size = size * num + body_size = if body.respond_to?(:bytesize) + body.bytesize + elsif body.respond_to?(:size) + body.size + elsif body.is_a?(File) + File.size(body.path) + end + + left = body_size - @request_body_read + size = left if size > left + + if size > 0 + chunk = if body.respond_to?(:byteslice) + body.byteslice(@request_body_read, size) + elsif body.respond_to?(:read) + body.read(size) + else + body[@request_body_read, size] + end + + stream.write_string( + chunk, size + ) + @request_body_read += size + end + size + end + end + + # Returns the body read callback. + # + # @example Return the callback. + # easy.read_callback + # + # @return [ Proc ] The callback. + def read_callback + @read_callback + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/debug_info.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/debug_info.rb new file mode 100644 index 0000000..d8f15ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/debug_info.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This class is used to store and retreive debug information, + # which is only saved when verbose is set to true. + # + # @api private + class DebugInfo + + MESSAGE_TYPES = Ethon::Curl::DebugInfoType.to_h.keys + + class Message + attr_reader :type, :message + + def initialize(type, message) + @type = type + @message = message + end + end + + def initialize + @messages = [] + end + + def add(type, message) + @messages << Message.new(type, message) + end + + def messages_for(type) + @messages.select {|m| m.type == type }.map(&:message) + end + + MESSAGE_TYPES.each do |type| + eval %Q|def #{type}; messages_for(:#{type}); end| + end + + def to_a + @messages.map(&:message) + end + + def to_h + Hash[MESSAGE_TYPES.map {|k| [k, send(k)] }] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/features.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/features.rb new file mode 100644 index 0000000..c4a8962 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/features.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains class methods for feature checks + module Features + # Returns true if this curl version supports zlib. + # + # @example Return wether zlib is supported. + # Ethon::Easy.supports_zlib? + # + # @return [ Boolean ] True if supported, else false. + def supports_zlib? + !!(Curl.version_info[:features] & Curl::VERSION_LIBZ) + end + + # Returns true if this curl version supports AsynchDNS. + # + # @example + # Ethon::Easy.supports_asynch_dns? + # + # @return [ Boolean ] True if supported, else false. + def supports_asynch_dns? + !!(Curl.version_info[:features] & Curl::VERSION_ASYNCHDNS) + end + + alias :supports_timeout_ms? :supports_asynch_dns? + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/form.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/form.rb new file mode 100644 index 0000000..681d0f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/form.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/easy/queryable' + +module Ethon + class Easy + + # This class represents a form and is used to send a payload in the + # request body via POST/PUT. + # It handles multipart forms, too. + # + # @api private + class Form + include Ethon::Easy::Util + include Ethon::Easy::Queryable + + # Return a new Form. + # + # @example Return a new Form. + # Form.new({}) + # + # @param [ Hash ] params The parameter with which to initialize the form. + # + # @return [ Form ] A new Form. + def initialize(easy, params, multipart = nil) + @easy = easy + @params = params || {} + @multipart = multipart + end + + # Return a pointer to the first form element in libcurl. + # + # @example Return the first form element. + # form.first + # + # @return [ FFI::Pointer ] The first element. + def first + @first ||= FFI::MemoryPointer.new(:pointer) + end + + # Return a pointer to the last form element in libcurl. + # + # @example Return the last form element. + # form.last + # + # @return [ FFI::Pointer ] The last element. + def last + @last ||= FFI::MemoryPointer.new(:pointer) + end + + # Return if form is multipart. The form is multipart + # when it contains a file or multipart option is set on the form during creation. + # + # @example Return if form is multipart. + # form.multipart? + # + # @return [ Boolean ] True if form is multipart, else false. + def multipart? + return true if @multipart + query_pairs.any?{|pair| pair.respond_to?(:last) && pair.last.is_a?(Array)} + end + + # Add form elements to libcurl. + # + # @example Add form to libcurl. + # form.materialize + def materialize + query_pairs.each { |pair| form_add(pair.first.to_s, pair.last) } + end + + private + + def form_add(name, content) + case content + when Array + Curl.formadd(first, last, + :form_option, :copyname, :pointer, name, + :form_option, :namelength, :long, name.bytesize, + :form_option, :file, :string, content[2], + :form_option, :filename, :string, content[0], + :form_option, :contenttype, :string, content[1], + :form_option, :end + ) + else + Curl.formadd(first, last, + :form_option, :copyname, :pointer, name, + :form_option, :namelength, :long, name.bytesize, + :form_option, :copycontents, :pointer, content.to_s, + :form_option, :contentslength, :long, content ? content.to_s.bytesize : 0, + :form_option, :end + ) + end + + setup_garbage_collection + end + + def setup_garbage_collection + # first is a pointer to a pointer. Since it's a MemoryPointer it will + # auto clean itself up, but we need to clean up the object it points + # to. So this results in (pseudo-c): + # form_data_cleanup_handler = *first + # curl_form_free(form_data_cleanup_handler) + @form_data_cleanup_handler ||= FFI::AutoPointer.new(@first.get_pointer(0), Curl.method(:formfree)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/header.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/header.rb new file mode 100644 index 0000000..8984a24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/header.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true +module Ethon + class Easy + # This module contains the logic around adding headers to libcurl. + # + # @api private + module Header + # Return headers, return empty hash if none. + # + # @example Return the headers. + # easy.headers + # + # @return [ Hash ] The headers. + def headers + @headers ||= {} + end + + # Set the headers. + # + # @example Set the headers. + # easy.headers = {'User-Agent' => 'ethon'} + # + # @param [ Hash ] headers The headers. + def headers=(headers) + headers ||= {} + header_list = nil + headers.each do |k, v| + header_list = Curl.slist_append(header_list, compose_header(k,v)) + end + Curl.set_option(:httpheader, header_list, handle) + + @header_list = header_list && FFI::AutoPointer.new(header_list, Curl.method(:slist_free_all)) + end + + # Return header_list. + # + # @example Return header_list. + # easy.header_list + # + # @return [ FFI::Pointer ] The header list. + def header_list + @header_list + end + + # Compose libcurl header string from key and value. + # Also replaces null bytes, because libcurl will complain + # otherwise. + # + # @example Compose header. + # easy.compose_header('User-Agent', 'Ethon') + # + # @param [ String ] key The header name. + # @param [ String ] value The header value. + # + # @return [ String ] The composed header. + def compose_header(key, value) + Util.escape_zero_byte("#{key}: #{value}") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http.rb new file mode 100644 index 0000000..4359830 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true +require 'ethon/easy/http/actionable' +require 'ethon/easy/http/post' +require 'ethon/easy/http/get' +require 'ethon/easy/http/head' +require 'ethon/easy/http/put' +require 'ethon/easy/http/delete' +require 'ethon/easy/http/patch' +require 'ethon/easy/http/options' +require 'ethon/easy/http/custom' + +module Ethon + class Easy + + # This module contains logic about making valid HTTP requests. + module Http + + # Set specified options in order to make a HTTP request. + # Look at {Ethon::Easy::Options Options} to see what you can + # provide in the options hash. + # + # @example Set options for HTTP request. + # easy.http_request("www.google.com", :get, {}) + # + # @param [ String ] url The url. + # @param [ String ] action_name The HTTP action name. + # @param [ Hash ] options The options hash. + # + # @option options :params [ Hash ] Params hash which + # is attached to the url. + # @option options :body [ Hash ] Body hash which + # becomes the request body. It is a PUT body for + # PUT requests and a POST for everything else. + # @option options :headers [ Hash ] Request headers. + # + # @return [ void ] + # + # @see Ethon::Easy::Options + def http_request(url, action_name, options = {}) + fabricate(url, action_name, options).setup(self) + end + + private + + # Return the corresponding action class. + # + # @example Return the action. + # Action.fabricate(:get) + # Action.fabricate(:smash) + # + # @param [ String ] url The url. + # @param [ String ] action_name The HTTP action name. + # @param [ Hash ] options The option hash. + # + # @return [ Easy::Ethon::Actionable ] The request instance. + def fabricate(url, action_name, options) + constant_name = action_name.to_s.capitalize + + if Ethon::Easy::Http.const_defined?(constant_name) + Ethon::Easy::Http.const_get(constant_name).new(url, options) + else + Ethon::Easy::Http::Custom.new(constant_name.upcase, url, options) + end + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/actionable.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/actionable.rb new file mode 100644 index 0000000..374a21c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/actionable.rb @@ -0,0 +1,157 @@ +# frozen_string_literal: true +require 'ethon/easy/http/putable' +require 'ethon/easy/http/postable' + +module Ethon + class Easy + module Http + # This module represents a Http Action and is a factory + # for more real actions like GET, HEAD, POST and PUT. + module Actionable + + QUERY_OPTIONS = [ :params, :body, :params_encoding ] + + # Create a new action. + # + # @example Create a new action. + # Action.new("www.example.com", {}) + # + # @param [ String ] url The url. + # @param [ Hash ] options The options. + # + # @return [ Action ] A new action. + def initialize(url, options) + @url = url + @options, @query_options = parse_options(options) + end + + # Return the url. + # + # @example Return url. + # action.url + # + # @return [ String ] The url. + def url + @url + end + + # Return the options hash. + # + # @example Return options. + # action.options + # + # @return [ Hash ] The options. + def options + @options + end + + # Returns the query options hash. + # + # @example Return query options. + # action.query_options + # + # @return [ Hash ] The query options. + def query_options + @query_options + end + + # Return the params. + # + # @example Return params. + # action.params + # + # @return [ Params ] The params. + def params + @params ||= Params.new(@easy, query_options.fetch(:params, nil)) + end + + # Return the form. + # + # @example Return form. + # action.form + # + # @return [ Form ] The form. + def form + @form ||= Form.new(@easy, query_options.fetch(:body, nil), options.fetch(:multipart, nil)) + end + + # Get the requested array encoding. By default it's + # :typhoeus, but it can also be set to :rack. + # + # @example Get encoding from options + # action.params_encoding + # + def params_encoding + @params_encoding ||= query_options.fetch(:params_encoding, :typhoeus) + end + + # Setup everything necessary for a proper request. + # + # @example setup. + # action.setup(easy) + # + # @param [ easy ] easy the easy to setup. + def setup(easy) + @easy = easy + + # Order is important, @easy will be used to provide access to options + # relevant to the following operations (like whether or not to escape + # values). + easy.set_attributes(options) + + set_form(easy) unless form.empty? + + if params.empty? + easy.url = url + else + set_params(easy) + end + end + + # Setup request with params. + # + # @example Setup nothing. + # action.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_params(easy) + params.escape = easy.escape? + params.params_encoding = params_encoding + + base_url, base_params = url.split('?') + base_url << '?' + base_url << base_params.to_s + base_url << '&' if base_params + base_url << params.to_s + + easy.url = base_url + end + + # Setup request with form. + # + # @example Setup nothing. + # action.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + end + + private + + def parse_options(options) + query_options = {} + options = options.dup + + QUERY_OPTIONS.each do |query_option| + if options.key?(query_option) + query_options[query_option] = options.delete(query_option) + end + end + + return options, query_options + end + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/custom.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/custom.rb new file mode 100644 index 0000000..a00f3e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/custom.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making requests for custom HTTP verbs. + class Custom + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + def initialize(verb, url, options) + @verb = verb + super(url, options) + end + + # Setup easy to make a request. + # + # @example Setup. + # custom.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = @verb + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/delete.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/delete.rb new file mode 100644 index 0000000..9cc2226 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/delete.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making DELETE requests. + class Delete + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a DELETE request. + # + # @example Setup customrequest. + # delete.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "DELETE" + end + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/get.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/get.rb new file mode 100644 index 0000000..6175e42 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/get.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making GET requests. + class Get + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a GET request. + # + # @example Setup. + # get.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "GET" unless form.empty? + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/head.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/head.rb new file mode 100644 index 0000000..d9bdbd2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/head.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making HEAD requests. + class Head + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a HEAD request. + # + # @example Setup. + # get.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.nobody = true + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/options.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/options.rb new file mode 100644 index 0000000..6e12c29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/options.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making OPTIONS requests. + class Options + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a OPTIONS request. + # + # @example Setup. + # options.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "OPTIONS" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/patch.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/patch.rb new file mode 100644 index 0000000..e616383 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/patch.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making PATCH requests. + class Patch + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a PATCH request. + # + # @example Setup. + # patch.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "PATCH" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/post.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/post.rb new file mode 100644 index 0000000..f773f79 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/post.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + # This class knows everything about making POST requests. + class Post + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a POST request. + # + # @example Setup. + # post.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + if form.empty? + easy.postfieldsize = 0 + easy.copypostfields = "" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/postable.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/postable.rb new file mode 100644 index 0000000..bd42f63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/postable.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This module contains logic for setting up a [multipart] POST body. + module Postable + + # Set things up when form is provided. + # Deals with multipart forms. + # + # @example Setup. + # post.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + easy.url ||= url + form.params_encoding = params_encoding + if form.multipart? + form.escape = false + form.materialize + easy.httppost = form.first.read_pointer + else + form.escape = easy.escape? + easy.postfieldsize = form.to_s.bytesize + easy.copypostfields = form.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/put.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/put.rb new file mode 100644 index 0000000..202cbee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/put.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making PUT requests. + class Put + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Putable + + # Setup easy to make a PUT request. + # + # @example Setup. + # put.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + if form.empty? + easy.upload = true + easy.infilesize = 0 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/putable.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/putable.rb new file mode 100644 index 0000000..a639f06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/http/putable.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This module contains logic about setting up a PUT body. + module Putable + # Set things up when form is provided. + # Deals with multipart forms. + # + # @example Setup. + # put.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + easy.upload = true + form.escape = true + form.params_encoding = params_encoding + easy.infilesize = form.to_s.bytesize + easy.set_read_callback(form.to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/informations.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/informations.rb new file mode 100644 index 0000000..6eff633 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/informations.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the methods to return informations + # from the easy handle. See http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html + # for more information. + module Informations + + # Holds available informations and their type, which is needed to + # request the informations from libcurl. + AVAILABLE_INFORMATIONS = { + # Return the available HTTP auth methods. + :httpauth_avail => :long, + + # Return the total time in seconds for the previous + # transfer, including name resolution, TCP connection, etc. + :total_time => :double, + + # Return the time, in seconds, it took from the start + # until the first byte was received by libcurl. This + # includes pre-transfer time and also the time the + # server needs to calculate the result. + :starttransfer_time => :double, + + # Return the time, in seconds, it took from the start + # until the SSL/SSH connect/handshake to the remote + # host was completed. This time is most often very near + # to the pre-transfer time, except for cases such as HTTP + # pipelining where the pre-transfer time can be delayed + # due to waits in line for the pipeline and more. + :appconnect_time => :double, + + # Return the time, in seconds, it took from the start + # until the file transfer was just about to begin. This + # includes all pre-transfer commands and negotiations + # that are specific to the particular protocol(s) involved. + # It does not involve the sending of the protocol- + # specific request that triggers a transfer. + :pretransfer_time => :double, + + # Return the time, in seconds, it took from the start + # until the connect to the remote host (or proxy) was completed. + :connect_time => :double, + + # Return the time, in seconds, it took from the + # start until the name resolution was completed. + :namelookup_time => :double, + + # Return the time, in seconds, it took for all redirection steps + # include name lookup, connect, pretransfer and transfer before the + # final transaction was started. time_redirect shows the complete + # execution time for multiple redirections. (Added in 7.12.3) + :redirect_time => :double, + + # Return the last used effective url. + :effective_url => :string, + + # Return the string holding the IP address of the most recent + # connection done with this curl handle. This string + # may be IPv6 if that's enabled. + :primary_ip => :string, + + # Return the last received HTTP, FTP or SMTP response code. + # The value will be zero if no server response code has + # been received. Note that a proxy's CONNECT response should + # be read with http_connect_code and not this. + :response_code => :long, + + :request_size => :long, + + # Return the total number of redirections that were + # actually followed. + :redirect_count => :long, + + # Return the bytes, the total amount of bytes that were uploaded + :size_upload => :double, + + # Return the bytes, the total amount of bytes that were downloaded. + # The amount is only for the latest transfer and will be reset again + # for each new transfer. This counts actual payload data, what's + # also commonly called body. All meta and header data are excluded + # and will not be counted in this number. + :size_download => :double, + + # Return the bytes/second, the average upload speed that curl + # measured for the complete upload + :speed_upload => :double, + + # Return the bytes/second, the average download speed that curl + # measured for the complete download + :speed_download => :double + } + + AVAILABLE_INFORMATIONS.each do |name, type| + eval %Q|def #{name}; Curl.send(:get_info_#{type}, :#{name}, handle); end| + end + + # Returns true if this curl version supports zlib. + # + # @example Return wether zlib is supported. + # easy.supports_zlib? + # + # @return [ Boolean ] True if supported, else false. + # @deprecated Please use the static version instead + def supports_zlib? + Kernel.warn("Ethon: Easy#supports_zlib? is deprecated and will be removed, please use Easy#.") + Easy.supports_zlib? + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/mirror.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/mirror.rb new file mode 100644 index 0000000..f485c9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/mirror.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true +module Ethon + class Easy + class Mirror + attr_reader :options + alias_method :to_hash, :options + + INFORMATIONS_TO_MIRROR = Informations::AVAILABLE_INFORMATIONS.keys + + [:return_code, :response_headers, :response_body, :debug_info] + + INFORMATIONS_TO_LOG = [:effective_url, :response_code, :return_code, :total_time] + + def self.from_easy(easy) + options = {} + INFORMATIONS_TO_MIRROR.each do |info| + options[info] = easy.send(info) + end + new(options) + end + + def initialize(options = {}) + @options = options + end + + def log_informations + Hash[*INFORMATIONS_TO_LOG.map do |info| + [info, options[info]] + end.flatten] + end + + INFORMATIONS_TO_MIRROR.each do |info| + eval %Q|def #{info}; options[#{info}]; end| + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/operations.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/operations.rb new file mode 100644 index 0000000..ad5397d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/operations.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true +module Ethon + class Easy + # This module contains the logic to prepare and perform + # an easy. + module Operations + # Returns a pointer to the curl easy handle. + # + # @example Return the handle. + # easy.handle + # + # @return [ FFI::Pointer ] A pointer to the curl easy handle. + def handle + @handle ||= FFI::AutoPointer.new(Curl.easy_init, Curl.method(:easy_cleanup)) + end + + # Sets a pointer to the curl easy handle. + # @param [ ::FFI::Pointer ] Easy handle that will be assigned. + def handle=(h) + @handle = h + end + + # Perform the easy request. + # + # @example Perform the request. + # easy.perform + # + # @return [ Integer ] The return code. + def perform + @return_code = Curl.easy_perform(handle) + if Ethon.logger.debug? + Ethon.logger.debug { "ETHON: performed #{log_inspect}" } + end + complete + @return_code + end + + # Clean up the easy. + # + # @example Perform clean up. + # easy.cleanup + # + # @return the result of the free which is nil + def cleanup + handle.free + end + + # Prepare the easy. Options, headers and callbacks + # were set. + # + # @example Prepare easy. + # easy.prepare + # + # @deprecated It is no longer necessary to call prepare. + def prepare + Ethon.logger.warn( + "ETHON: It is no longer necessary to call "+ + "Easy#prepare. It's going to be removed "+ + "in future versions." + ) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/options.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/options.rb new file mode 100644 index 0000000..30c4df6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/options.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the logic and knowledge about the + # available options on easy. + module Options + attr_reader :url + + def url=(value) + @url = value + Curl.set_option(:url, value, handle) + end + + def escape=( b ) + @escape = b + end + + def escape? + return true if !defined?(@escape) || @escape.nil? + @escape + end + + def multipart=(b) + @multipart = b + end + + def multipart? + !!@multipart + end + + Curl.easy_options(nil).each do |opt, props| + method_name = "#{opt}=".freeze + unless method_defined? method_name + define_method(method_name) do |value| + Curl.set_option(opt, value, handle) + value + end + end + next if props[:type] != :callback || method_defined?(opt) + define_method(opt) do |&block| + @procs ||= {} + @procs[opt.to_sym] = block + Curl.set_option(opt, block, handle) + nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/params.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/params.rb new file mode 100644 index 0000000..41dad8b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/params.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/easy/queryable' + +module Ethon + class Easy + + # This class represents HTTP request parameters. + # + # @api private + class Params + include Ethon::Easy::Util + include Ethon::Easy::Queryable + + # Create a new Params. + # + # @example Create a new Params. + # Params.new({}) + # + # @param [ Hash ] params The params to use. + # + # @return [ Params ] A new Params. + def initialize(easy, params) + @easy = easy + @params = params || {} + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/queryable.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/queryable.rb new file mode 100644 index 0000000..47f7901 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/queryable.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains logic about building + # query parameters for url or form. + module Queryable + + # :nodoc: + def self.included(base) + base.send(:attr_accessor, :escape) + base.send(:attr_accessor, :params_encoding) + end + + # Return wether there are elements in params or not. + # + # @example Return if params is empty. + # form.empty? + # + # @return [ Boolean ] True if params is empty, else false. + def empty? + @params.empty? + end + + # Return the string representation of params. + # + # @example Return string representation. + # params.to_s + # + # @return [ String ] The string representation. + def to_s + @to_s ||= query_pairs.map{ |pair| + return pair if pair.is_a?(String) + + if escape && @easy + pair.map{ |e| @easy.escape(e.to_s) }.join("=") + else + pair.join("=") + end + }.join('&') + end + + # Return the query pairs. + # + # @example Return the query pairs. + # params.query_pairs + # + # @return [ Array ] The query pairs. + def query_pairs + @query_pairs ||= build_query_pairs(@params) + end + + # Return query pairs build from a hash. + # + # @example Build query pairs. + # action.build_query_pairs({a: 1, b: 2}) + # #=> [[:a, 1], [:b, 2]] + # + # @param [ Hash ] hash The hash to go through. + # + # @return [ Array ] The array of query pairs. + def build_query_pairs(hash) + return [hash] if hash.is_a?(String) + + pairs = [] + recursively_generate_pairs(hash, nil, pairs) + pairs + end + + # Return file info for a file. + # + # @example Return file info. + # action.file_info(File.open('fubar', 'r')) + # + # @param [ File ] file The file to handle. + # + # @return [ Array ] Array of informations. + def file_info(file) + filename = File.basename(file.path) + [ + filename, + mime_type(filename), + File.expand_path(file.path) + ] + end + + private + + def mime_type(filename) + if defined?(MIME) && t = MIME::Types.type_for(filename).first + t.to_s + else + 'application/octet-stream' + end + end + + def recursively_generate_pairs(h, prefix, pairs) + case h + when Hash + encode_hash_pairs(h, prefix, pairs) + when Array + if params_encoding == :rack + encode_rack_array_pairs(h, prefix, pairs) + elsif params_encoding == :multi + encode_multi_array_pairs(h, prefix, pairs) + elsif params_encoding == :none + pairs << [prefix, h] + else + encode_indexed_array_pairs(h, prefix, pairs) + end + end + end + + def encode_hash_pairs(h, prefix, pairs) + h.each_pair do |k,v| + key = prefix.nil? ? k : "#{prefix}[#{k}]" + pairs_for(v, key, pairs) + end + end + + def encode_indexed_array_pairs(h, prefix, pairs) + h.each_with_index do |v, i| + key = "#{prefix}[#{i}]" + pairs_for(v, key, pairs) + end + end + + def encode_rack_array_pairs(h, prefix, pairs) + h.each do |v| + key = "#{prefix}[]" + pairs_for(v, key, pairs) + end + end + + def encode_multi_array_pairs(h, prefix, pairs) + h.each_with_index do |v, i| + key = prefix + pairs_for(v, key, pairs) + end + end + + def pairs_for(v, key, pairs) + case v + when Hash, Array + recursively_generate_pairs(v, key, pairs) + when File, Tempfile + pairs << [key, file_info(v)] + else + pairs << [key, v] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/response_callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/response_callbacks.rb new file mode 100644 index 0000000..23369f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/response_callbacks.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the logic for the response callbacks. + # The on_complete callback is the only one at the moment. + # + # You can set multiple callbacks, which are then executed + # in the same order. + # + # easy.on_complete { p 1 } + # easy.on_complete { p 2 } + # easy.complete + # #=> 1 + # #=> 2 + # + # You can clear the callbacks: + # + # easy.on_complete { p 1 } + # easy.on_complete { p 2 } + # easy.on_complete.clear + # easy.on_complete + # #=> [] + module ResponseCallbacks + + # Set on_headers callback. + # + # @example Set on_headers. + # request.on_headers { p "yay" } + # + # @param [ Block ] block The block to execute. + def on_headers(&block) + @on_headers ||= [] + @on_headers << block if block_given? + @on_headers + end + + # Execute on_headers callbacks. + # + # @example Execute on_headers. + # request.headers + def headers + return if @headers_called + @headers_called = true + if defined?(@on_headers) and not @on_headers.nil? + @on_headers.each{ |callback| callback.call(self) } + end + end + + # Set on_complete callback. + # + # @example Set on_complete. + # request.on_complete { p "yay" } + # + # @param [ Block ] block The block to execute. + def on_complete(&block) + @on_complete ||= [] + @on_complete << block if block_given? + @on_complete + end + + # Execute on_complete callbacks. + # + # @example Execute on_completes. + # request.complete + def complete + headers unless @response_headers.empty? + if defined?(@on_complete) and not @on_complete.nil? + @on_complete.each{ |callback| callback.call(self) } + end + end + + # Set on_progress callback. + # + # @example Set on_progress. + # request.on_progress {|dltotal, dlnow, ultotal, ulnow| p "#{dltotal} #{dlnow} #{ultotal} #{ulnow}" } + # + # @param [ Block ] block The block to execute. + def on_progress(&block) + @on_progress ||= [] + if block_given? + @on_progress << block + set_progress_callback + self.noprogress = 0 + end + @on_progress + end + + # Execute on_progress callbacks. + # + # @example Execute on_progress. + # request.body(1, 1, 1, 1) + def progress(dltotal, dlnow, ultotal, ulnow) + if defined?(@on_progress) and not @on_progress.nil? + @on_progress.each{ |callback| callback.call(dltotal, dlnow, ultotal, ulnow) } + end + end + + # Set on_body callback. + # + # @example Set on_body. + # request.on_body { |chunk| p "yay" } + # + # @param [ Block ] block The block to execute. + def on_body(&block) + @on_body ||= [] + @on_body << block if block_given? + @on_body + end + + # Execute on_body callbacks. + # + # @example Execute on_body. + # request.body("This data came from HTTP.") + # + # @return [ Object ] If there are no on_body callbacks, returns the symbol :unyielded. + def body(chunk) + if defined?(@on_body) and not @on_body.nil? + result = nil + @on_body.each do |callback| + result = callback.call(chunk, self) + break if result == :abort + end + result + else + :unyielded + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/util.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/util.rb new file mode 100644 index 0000000..7649782 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/easy/util.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +module Ethon + class Easy # :nodoc: + + # This module contains small helpers. + # + # @api private + module Util + + # Escapes zero bytes in strings. + # + # @example Escape zero bytes. + # Util.escape_zero_byte("1\0") + # #=> "1\\0" + # + # @param [ Object ] value The value to escape. + # + # @return [ String, Object ] Escaped String if + # zero byte found, original object if not. + def escape_zero_byte(value) + return value unless value.to_s.include?(0.chr) + value.to_s.gsub(0.chr, '\\\0') + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors.rb new file mode 100644 index 0000000..17eecad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true +require 'ethon/errors/ethon_error' +require 'ethon/errors/global_init' +require 'ethon/errors/multi_timeout' +require 'ethon/errors/multi_fdset' +require 'ethon/errors/multi_add' +require 'ethon/errors/multi_remove' +require 'ethon/errors/select' +require 'ethon/errors/invalid_option' +require 'ethon/errors/invalid_value' + +module Ethon + + # This namespace contains all errors raised by ethon. + module Errors + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/ethon_error.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/ethon_error.rb new file mode 100644 index 0000000..84dbf6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/ethon_error.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Default Ethon error class for all custom errors. + class EthonError < StandardError + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/global_init.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/global_init.rb new file mode 100644 index 0000000..816b774 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/global_init.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when global_init failed. + class GlobalInit < EthonError + def initialize + super("An error occured initializing curl.") + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_option.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_option.rb new file mode 100644 index 0000000..4e558f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_option.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when option is invalid. + class InvalidOption < EthonError + def initialize(option) + super("The option: #{option} is invalid.") + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_value.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_value.rb new file mode 100644 index 0000000..aedb109 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/invalid_value.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when option is invalid. + class InvalidValue < EthonError + def initialize(option, value) + super("The value: #{value} is invalid for option: #{option}.") + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_add.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_add.rb new file mode 100644 index 0000000..a569a31 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_add.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_add_handle failed. + class MultiAdd < EthonError + def initialize(code, easy) + super("An error occured adding the easy handle: #{easy} to the multi: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_fdset.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_fdset.rb new file mode 100644 index 0000000..e84be72 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_fdset.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_fdset failed. + class MultiFdset < EthonError + def initialize(code) + super("An error occured getting the fdset: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_remove.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_remove.rb new file mode 100644 index 0000000..36074b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_remove.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_remove_handle failed. + class MultiRemove < EthonError + def initialize(code, easy) + super("An error occured removing the easy handle: #{easy} from the multi: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_timeout.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_timeout.rb new file mode 100644 index 0000000..79be3a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/multi_timeout.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raised when multi_timeout failed. + class MultiTimeout < EthonError + def initialize(code) + super("An error occured getting the timeout: #{code}") + # "An error occured getting the timeout: #{code}: #{Curl.multi_strerror(code)}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/select.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/select.rb new file mode 100644 index 0000000..464a43b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/errors/select.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raised when select failed. + class Select < EthonError + def initialize(errno) + super("An error occured on select: #{errno}") + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/libc.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/libc.rb new file mode 100644 index 0000000..1fddb8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/libc.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true +module Ethon + + # FFI Wrapper module for Libc. + # + # @api private + module Libc + extend FFI::Library + ffi_lib 'c' + + # :nodoc: + def self.windows? + Gem.win_platform? + end + + unless windows? + attach_function :getdtablesize, [], :int + attach_function :free, [:pointer], :void + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/loggable.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/loggable.rb new file mode 100644 index 0000000..02b7478 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/loggable.rb @@ -0,0 +1,59 @@ +# encoding: utf-8 +# frozen_string_literal: true +module Ethon + + # Contains logging behaviour. + module Loggable + + # Get the logger. + # + # @note Will try to grab Rails' logger first before creating a new logger + # with stdout. + # + # @example Get the logger. + # Loggable.logger + # + # @return [ Logger ] The logger. + def logger + return @logger if defined?(@logger) + @logger = rails_logger || default_logger + end + + # Set the logger. + # + # @example Set the logger. + # Loggable.logger = Logger.new($stdout) + # + # @param [ Logger ] logger The logger to set. + # + # @return [ Logger ] The new logger. + def logger=(logger) + @logger = logger + end + + private + + # Gets the default Ethon logger - stdout. + # + # @example Get the default logger. + # Loggable.default_logger + # + # @return [ Logger ] The default logger. + def default_logger + logger = Logger.new($stdout) + logger.level = Logger::INFO + logger + end + + # Get the Rails logger if it's defined. + # + # @example Get Rails' logger. + # Loggable.rails_logger + # + # @return [ Logger ] The Rails logger. + def rails_logger + defined?(::Rails) && ::Rails.respond_to?(:logger) && ::Rails.logger + end + end + extend Loggable +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi.rb new file mode 100644 index 0000000..0919583 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/multi/stack' +require 'ethon/multi/operations' +require 'ethon/multi/options' + +module Ethon + + # This class represents libcurl multi. + class Multi + include Ethon::Multi::Stack + include Ethon::Multi::Operations + include Ethon::Multi::Options + + # Create a new multi. Initialize curl in case + # it didn't happen before. + # + # @example Create a new Multi. + # Multi.new + # + # @param [ Hash ] options The options. + # + # @option options :socketdata [String] + # Pass a pointer to whatever you want passed to the + # curl_socket_callback's forth argument, the userp pointer. This is not + # used by libcurl but only passed-thru as-is. Set the callback pointer + # with CURLMOPT_SOCKETFUNCTION. + # @option options :pipelining [Boolean] + # Pass a long set to 1 to enable or 0 to disable. Enabling pipelining + # on a multi handle will make it attempt to perform HTTP Pipelining as + # far as possible for transfers using this handle. This means that if + # you add a second request that can use an already existing connection, + # the second request will be "piped" on the same connection rather than + # being executed in parallel. (Added in 7.16.0) + # @option options :timerfunction [Proc] + # Pass a pointer to a function matching the curl_multi_timer_callback + # prototype. This function will then be called when the timeout value + # changes. The timeout value is at what latest time the application + # should call one of the "performing" functions of the multi interface + # (curl_multi_socket_action(3) and curl_multi_perform(3)) - to allow + # libcurl to keep timeouts and retries etc to work. A timeout value of + # -1 means that there is no timeout at all, and 0 means that the + # timeout is already reached. Libcurl attempts to limit calling this + # only when the fixed future timeout time actually changes. See also + # CURLMOPT_TIMERDATA. This callback can be used instead of, or in + # addition to, curl_multi_timeout(3). (Added in 7.16.0) + # @option options :timerdata [String] + # Pass a pointer to whatever you want passed to the + # curl_multi_timer_callback's third argument, the userp pointer. This + # is not used by libcurl but only passed-thru as-is. Set the callback + # pointer with CURLMOPT_TIMERFUNCTION. (Added in 7.16.0) + # @option options :maxconnects [Integer] + # Pass a long. The set number will be used as the maximum amount of + # simultaneously open connections that libcurl may cache. Default is + # 10, and libcurl will enlarge the size for each added easy handle to + # make it fit 4 times the number of added easy handles. + # By setting this option, you can prevent the cache size from growing + # beyond the limit set by you. + # When the cache is full, curl closes the oldest one in the cache to + # prevent the number of open connections from increasing. + # This option is for the multi handle's use only, when using the easy + # interface you should instead use the CURLOPT_MAXCONNECTS option. + # (Added in 7.16.3) + # @option options :max_total_connections [Integer] + # Pass a long. The set number will be used as the maximum amount of + # simultaneously open connections in total. For each new session, + # libcurl will open a new connection up to the limit set by + # CURLMOPT_MAX_TOTAL_CONNECTIONS. When the limit is reached, the + # sessions will be pending until there are available connections. + # If CURLMOPT_PIPELINING is 1, libcurl will try to pipeline if the host + # is capable of it. + # The default value is 0, which means that there is no limit. However, + # for backwards compatibility, setting it to 0 when CURLMOPT_PIPELINING + # is 1 will not be treated as unlimited. Instead it will open only 1 + # connection and try to pipeline on it. + # (Added in 7.30.0) + # @option options :execution_mode [Boolean] + # Either :perform (default) or :socket_action, specifies the usage + # method that will be used on this multi object. The default :perform + # mode provides a #perform function that uses curl_multi_perform + # behind the scenes to automatically continue execution until all + # requests have completed. The :socket_action mode provides an API + # that allows the {Multi} object to be integrated into an external + # IO loop, by calling #socket_action and responding to the + # socketfunction and timerfunction callbacks, using the underlying + # curl_multi_socket_action semantics. + # + # @return [ Multi ] The new multi. + def initialize(options = {}) + Curl.init + @execution_mode = options.delete(:execution_mode) || :perform + set_attributes(options) + init_vars + end + + # Set given options. + # + # @example Set options. + # multi.set_attributes(options) + # + # @raise InvalidOption + # + # @see initialize + # + # @api private + def set_attributes(options) + options.each_pair do |key, value| + unless respond_to?("#{key}=") + raise Errors::InvalidOption.new(key) + end + method("#{key}=").call(value) + end + end + + private + + # Internal function to gate functions to a specific execution mode + # + # @raise ArgumentError + # + # @api private + def ensure_execution_mode(expected_mode) + raise ArgumentError, "Expected the Multi to be in #{expected_mode} but it was in #{@execution_mode}" if expected_mode != @execution_mode + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/operations.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/operations.rb new file mode 100644 index 0000000..b5c2796 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/operations.rb @@ -0,0 +1,228 @@ +# frozen_string_literal: true +module Ethon + class Multi # :nodoc + # This module contains logic to run a multi. + module Operations + STARTED_MULTI = "ETHON: started MULTI" + PERFORMED_MULTI = "ETHON: performed MULTI" + + # Return the multi handle. Inititialize multi handle, + # in case it didn't happened already. + # + # @example Return multi handle. + # multi.handle + # + # @return [ FFI::Pointer ] The multi handle. + def handle + @handle ||= FFI::AutoPointer.new(Curl.multi_init, Curl.method(:multi_cleanup)) + end + + # Initialize variables. + # + # @example Initialize variables. + # multi.init_vars + # + # @return [ void ] + def init_vars + if @execution_mode == :perform + @timeout = ::FFI::MemoryPointer.new(:long) + @timeval = Curl::Timeval.new + @fd_read = Curl::FDSet.new + @fd_write = Curl::FDSet.new + @fd_excep = Curl::FDSet.new + @max_fd = ::FFI::MemoryPointer.new(:int) + elsif @execution_mode == :socket_action + @running_count_pointer = FFI::MemoryPointer.new(:int) + end + end + + # Perform multi. + # + # @return [ nil ] + # + # @example Perform multi. + # multi.perform + def perform + ensure_execution_mode(:perform) + + Ethon.logger.debug(STARTED_MULTI) + while ongoing? + run + timeout = get_timeout + next if timeout == 0 + reset_fds + set_fds(timeout) + end + Ethon.logger.debug(PERFORMED_MULTI) + nil + end + + # Prepare multi. + # + # @return [ nil ] + # + # @example Prepare multi. + # multi.prepare + # + # @deprecated It is no longer necessary to call prepare. + def prepare + Ethon.logger.warn( + "ETHON: It is no longer necessay to call "+ + "Multi#prepare. Its going to be removed "+ + "in future versions." + ) + end + + # Continue execution with an external IO loop. + # + # @example When no sockets are ready yet, or to begin. + # multi.socket_action + # + # @example When a socket is readable + # multi.socket_action(io_object, [:in]) + # + # @example When a socket is readable and writable + # multi.socket_action(io_object, [:in, :out]) + # + # @return [ Symbol ] The Curl.multi_socket_action return code. + def socket_action(io = nil, readiness = 0) + ensure_execution_mode(:socket_action) + + fd = if io.nil? + ::Ethon::Curl::SOCKET_TIMEOUT + elsif io.is_a?(Integer) + io + else + io.fileno + end + + code = Curl.multi_socket_action(handle, fd, readiness, @running_count_pointer) + @running_count = @running_count_pointer.read_int + + check + + code + end + + # Return whether the multi still contains requests or not. + # + # @example Return if ongoing. + # multi.ongoing? + # + # @return [ Boolean ] True if ongoing, else false. + def ongoing? + easy_handles.size > 0 || (!defined?(@running_count) || running_count > 0) + end + + private + + # Get timeout. + # + # @example Get timeout. + # multi.get_timeout + # + # @return [ Integer ] The timeout. + # + # @raise [ Ethon::Errors::MultiTimeout ] If getting the timeout fails. + def get_timeout + code = Curl.multi_timeout(handle, @timeout) + raise Errors::MultiTimeout.new(code) unless code == :ok + timeout = @timeout.read_long + timeout = 1 if timeout < 0 + timeout + end + + # Reset file describtors. + # + # @example Reset fds. + # multi.reset_fds + # + # @return [ void ] + def reset_fds + @fd_read.clear + @fd_write.clear + @fd_excep.clear + end + + # Set fds. + # + # @example Set fds. + # multi.set_fds + # + # @return [ void ] + # + # @raise [ Ethon::Errors::MultiFdset ] If setting the file descriptors fails. + # @raise [ Ethon::Errors::Select ] If select fails. + def set_fds(timeout) + code = Curl.multi_fdset(handle, @fd_read, @fd_write, @fd_excep, @max_fd) + raise Errors::MultiFdset.new(code) unless code == :ok + max_fd = @max_fd.read_int + if max_fd == -1 + sleep(0.001) + else + @timeval[:sec] = timeout / 1000 + @timeval[:usec] = (timeout * 1000) % 1000000 + loop do + code = Curl.select(max_fd + 1, @fd_read, @fd_write, @fd_excep, @timeval) + break unless code < 0 && ::FFI.errno == Errno::EINTR::Errno + end + raise Errors::Select.new(::FFI.errno) if code < 0 + end + end + + # Check. + # + # @example Check. + # multi.check + # + # @return [ void ] + def check + msgs_left = ::FFI::MemoryPointer.new(:int) + while true + msg = Curl.multi_info_read(handle, msgs_left) + break if msg.null? + next if msg[:code] != :done + easy = easy_handles.find{ |e| e.handle == msg[:easy_handle] } + easy.return_code = msg[:data][:code] + Ethon.logger.debug { "ETHON: performed #{easy.log_inspect}" } + delete(easy) + easy.complete + end + end + + # Run. + # + # @example Run + # multi.run + # + # @return [ void ] + def run + running_count_pointer = FFI::MemoryPointer.new(:int) + begin code = trigger(running_count_pointer) end while code == :call_multi_perform + check + end + + # Trigger. + # + # @example Trigger. + # multi.trigger + # + # @return [ Symbol ] The Curl.multi_perform return code. + def trigger(running_count_pointer) + code = Curl.multi_perform(handle, running_count_pointer) + @running_count = running_count_pointer.read_int + code + end + + # Return number of running requests. + # + # @example Return count. + # multi.running_count + # + # @return [ Integer ] Number running requests. + def running_count + @running_count ||= nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/options.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/options.rb new file mode 100644 index 0000000..08a3c03 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/options.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true +module Ethon + class Multi + + # This module contains the logic and knowledge about the + # available options on multi. + module Options + + # Sets max_total_connections option. + # + # @example Set max_total_connections option. + # multi.max_total_conections = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def max_total_connections=(value) + Curl.set_option(:max_total_connections, value_for(value, :int), handle, :multi) + end + + # Sets maxconnects option. + # + # @example Set maxconnects option. + # multi.maxconnects = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def maxconnects=(value) + Curl.set_option(:maxconnects, value_for(value, :int), handle, :multi) + end + + # Sets pipelining option. + # + # @example Set pipelining option. + # multi.pipelining = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def pipelining=(value) + Curl.set_option(:pipelining, value_for(value, :int), handle, :multi) + end + + # Sets socketdata option. + # + # @example Set socketdata option. + # multi.socketdata = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def socketdata=(value) + Curl.set_option(:socketdata, value_for(value, :string), handle, :multi) + end + + # Sets socketfunction option. + # + # @example Set socketfunction option. + # multi.socketfunction = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def socketfunction=(value) + Curl.set_option(:socketfunction, value_for(value, :string), handle, :multi) + end + + # Sets timerdata option. + # + # @example Set timerdata option. + # multi.timerdata = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def timerdata=(value) + Curl.set_option(:timerdata, value_for(value, :string), handle, :multi) + end + + # Sets timerfunction option. + # + # @example Set timerfunction option. + # multi.timerfunction = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def timerfunction=(value) + Curl.set_option(:timerfunction, value_for(value, :string), handle, :multi) + end + + private + + # Return the value to set to multi handle. It is converted with the help + # of bool_options, enum_options and int_options. + # + # @example Return casted the value. + # multi.value_for(:verbose) + # + # @return [ Object ] The casted value. + def value_for(value, type, option = nil) + return nil if value.nil? + + if type == :bool + value ? 1 : 0 + elsif type == :int + value.to_i + elsif value.is_a?(String) + Ethon::Easy::Util.escape_zero_byte(value) + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/stack.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/stack.rb new file mode 100644 index 0000000..4c6381c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/multi/stack.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true +module Ethon + class Multi + + # This module provides the multi stack behaviour. + module Stack + + # Return easy handles. + # + # @example Return easy handles. + # multi.easy_handles + # + # @return [ Array ] The easy handles. + def easy_handles + @easy_handles ||= [] + end + + # Add an easy to the stack. + # + # @example Add easy. + # multi.add(easy) + # + # @param [ Easy ] easy The easy to add. + # + # @raise [ Ethon::Errors::MultiAdd ] If adding an easy failed. + def add(easy) + return nil if easy_handles.include?(easy) + + code = Curl.multi_add_handle(handle, easy.handle) + raise Errors::MultiAdd.new(code, easy) unless code == :ok + easy_handles << easy + end + + # Delete an easy from stack. + # + # @example Delete easy from stack. + # + # @param [ Easy ] easy The easy to delete. + # + # @raise [ Ethon::Errors::MultiRemove ] If removing an easy failed. + def delete(easy) + if easy_handles.delete(easy) + code = Curl.multi_remove_handle(handle, easy.handle) + raise Errors::MultiRemove.new(code, handle) unless code == :ok + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/version.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/version.rb new file mode 100644 index 0000000..2e4ca37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/lib/ethon/version.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true +module Ethon + + # Ethon version. + VERSION = '0.15.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/benchmarks.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/benchmarks.rb new file mode 100644 index 0000000..6cd68ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/benchmarks.rb @@ -0,0 +1,104 @@ +# encoding: utf-8 +# frozen_string_literal: true +require 'ethon' +require 'open-uri' +require 'patron' +require 'curb' +require 'net/http' +require 'cgi' +require 'benchmark/ips' + +require_relative '../spec/support/server' +require_relative '../spec/support/localhost_server' + +LocalhostServer.new(TESTSERVER.new, 3000) +LocalhostServer.new(TESTSERVER.new, 3001) +LocalhostServer.new(TESTSERVER.new, 3002) + +url = 'http://localhost:3000/'.freeze +uri = URI.parse('http://localhost:3000/').freeze +ethon = Ethon::Easy.new(url: url) +patron = Patron::Session.new +patron_url = Patron::Session.new(base_url: url) +curb = Curl::Easy.new(url) + +puts '[Creation]' +Benchmark.ips do |x| + x.report('String.new') { '' } + x.report('Easy.new') { Ethon::Easy.new } +end + +puts '[Escape]' +Benchmark.ips do |x| + x.report('CGI.escape') { CGI.escape("まつもと") } + x.report('Easy.escape') { ethon.escape("まつもと") } +end + +puts '[Requests]' +Benchmark.ips do |x| + x.report('net/http') { Net::HTTP.get_response(uri) } + x.report('open-uri') { open url } + + x.report('patron') do + patron.base_url = url + patron.get('/') + end + + x.report('patron reuse') { patron_url.get('/') } + + x.report('curb') do + curb.url = url + curb.perform + end + + x.report('curb reuse') { curb.perform } + + x.report('Easy.perform') do + ethon.url = url + ethon.perform + end + + x.report('Easy.perform reuse') { ethon.perform } +end + +puts "[ 4 delayed Requests ]" +Benchmark.ips do |x| + x.report('net/http') do + 3.times do |i| + uri = URI.parse("http://localhost:300#{i}/?delay=1") + Net::HTTP.get_response(uri) + end + end + + x.report("open-uri") do + 3.times do |i| + open("http://localhost:300#{i}/?delay=1") + end + end + + x.report("patron") do + sess = Patron::Session.new + 3.times do |i| + sess.base_url = "http://localhost:300#{i}/?delay=1" + sess.get("/") + end + end + + x.report("Easy.perform") do + easy = Ethon::Easy.new + 3.times do |i| + easy.url = "http://localhost:300#{i}/?delay=1" + easy.perform + end + end + + x.report("Multi.perform") do + multi = Ethon::Multi.new + 3.times do |i| + easy = Ethon::Easy.new + easy.url = "http://localhost:300#{i}/?delay=1" + multi.add(easy) + end + multi.perform + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/memory_leaks.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/memory_leaks.rb new file mode 100644 index 0000000..32000b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/memory_leaks.rb @@ -0,0 +1,114 @@ +# frozen_string_literal: true +require 'ethon' +require 'ethon/easy' + +require_relative 'perf_spec_helper' +require 'rspec/autorun' + +describe "low-level interactions with libcurl" do + describe Ethon::Multi do + memory_leak_test("init") do + Ethon::Multi.new + end + + memory_leak_test("handle") do + Ethon::Multi.new.handle + end + end + + describe Ethon::Easy do + memory_leak_test("init") do + Ethon::Easy.new + end + + memory_leak_test("handle") do + Ethon::Easy.new.handle + end + + memory_leak_test("headers") do + Ethon::Easy.new.headers = { "a" => 1, "b" => 2, "c" => 3, "d" => 4} + end + + memory_leak_test("escape") do + Ethon::Easy.new.escape("the_sky&is_blue") + end + end + + + describe Ethon::Easy::Form do + memory_leak_test("init") do + Ethon::Easy::Form.new(nil, {}) + end + + memory_leak_test("first") do + Ethon::Easy::Form.new(nil, {}).first + end + + memory_leak_test("last") do + Ethon::Easy::Form.new(nil, {}).last + end + + memory_leak_test("materialized with some params") do + form = Ethon::Easy::Form.new(nil, { "a" => "1" }) + form.materialize + end + + memory_leak_test("materialized with a file") do + File.open(__FILE__, "r") do |file| + form = Ethon::Easy::Form.new(nil, { "a" => file }) + form.materialize + end + end + end +end + +describe "higher level operations" do + memory_leak_test("a simple request") do + Ethon::Easy.new(:url => "http://localhost:3001/", + :forbid_reuse => true).perform + end + + memory_leak_test("a request with headers") do + Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true).perform + end + + memory_leak_test("a request with headers and params") do + easy = Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true) + easy.http_request("http://localhost:3001/", + :get, + :params => { "param1" => "value1", + "param2" => "value2", + "param3" => "value3", + "param4" => "value4"}) + end + + memory_leak_test("a request with headers, params, and body") do + easy = Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true) + easy.http_request("http://localhost:3001/", + :get, + :params => { "param1" => "value1", + "param2" => "value2", + "param3" => "value3", + "param4" => "value4"}, + :body => { + "body1" => "value1", + "body2" => "value2", + "body3" => "value3" + }) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/perf_spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/perf_spec_helper.rb new file mode 100644 index 0000000..35fd623 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/perf_spec_helper.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true +#### SETUP +require 'bundler' +Bundler.setup +require 'rspec' + +require 'support/localhost_server' +require 'support/server' +require_relative 'support/memory_test_helpers' + +require 'logger' + +if ENV['VERBOSE'] + Ethon.logger = Logger.new($stdout) + Ethon.logger.level = Logger::DEBUG +end + +RSpec.configure do |config| + config.before(:suite) do + LocalhostServer.new(TESTSERVER.new, 3001) + end + config.include(MemoryTestHelpers) + config.extend(MemoryTestHelpers::TestMethods) +end + +MemoryTestHelpers.setup +MemoryTestHelpers.logger = Logger.new($stdout) +MemoryTestHelpers.logger.level = Logger::INFO +MemoryTestHelpers.logger.formatter = proc do |severity, datetime, progname, msg| + "\t\t#{msg}\n" +end + +if ENV['VERBOSE'] + MemoryTestHelpers.logger.level = Logger::DEBUG +end + +MemoryTestHelpers.iterations = ENV.fetch("ITERATIONS", 10_000).to_i diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/memory_test_helpers.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/memory_test_helpers.rb new file mode 100644 index 0000000..bfb6ff4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/memory_test_helpers.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true +require_relative 'ruby_object_leak_tracker' +require_relative 'os_memory_leak_tracker' + +module MemoryTestHelpers + class << self + attr_accessor :gc_proc, :iterations, :logger + + def setup + if RUBY_PLATFORM == "java" + # for leak detection + JRuby.objectspace = true if defined?(JRuby) + # for gc + require 'java' + java_import 'java.lang.System' + self.gc_proc = proc { System.gc } + else + self.gc_proc = proc { GC.start } + end + end + end + + module TestMethods + def memory_leak_test(description, &block) + context(description) do + it "doesn't leak ruby objects" do + object_leak_tracker = RubyObjectLeakTracker.new + track_memory_usage(object_leak_tracker, &block) + object_leak_tracker.total_difference_between_runs.should be <= 10 + end + + it "doesn't leak OS memory (C interop check)" do + os_memory_leak_tracker = OSMemoryLeakTracker.new + track_memory_usage(os_memory_leak_tracker, &block) + os_memory_leak_tracker.total_difference_between_runs.should be <= 10 + end + end + end + end + + def track_memory_usage(tracker) + # Intentionally do all this setup before we do any testing + logger = MemoryTestHelpers.logger + iterations = MemoryTestHelpers.iterations + + checkpoint_frequency = (iterations / 10.0).to_i + gc_frequency = 20 + + warmup_iterations = [(iterations / 3.0).to_i, 500].min + logger.info "Performing #{warmup_iterations} warmup iterations" + warmup_iterations.times do + yield + MemoryTestHelpers.gc_proc.call + end + tracker.capture_initial_memory_usage + + logger.info "Performing #{iterations} iterations (checkpoint every #{checkpoint_frequency})" + + iterations.times do |i| + yield + + last_iteration = (i == iterations - 1) + checkpoint = last_iteration || (i % checkpoint_frequency == 0) + + if checkpoint || (i % gc_frequency == 0) + MemoryTestHelpers.gc_proc.call + end + + if checkpoint + logger.info "Iteration #{i} checkpoint" + tracker.capture_memory_usage + tracker.dump_status(logger) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/os_memory_leak_tracker.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/os_memory_leak_tracker.rb new file mode 100644 index 0000000..d9e62d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/os_memory_leak_tracker.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true +class OSMemoryLeakTracker + attr_reader :current_run + + def initialize + @previous_run = @current_run = 0 + end + + def difference_between_runs(basis=@previous_run) + @current_run - basis + end + + def total_difference_between_runs + difference_between_runs(@initial_count_run) + end + + def capture_initial_memory_usage + capture_memory_usage + @initial_count_run = @current_run + end + + def capture_memory_usage + @previous_run = @current_run + @current_run = rss_bytes + end + + def dump_status(logger) + delta = difference_between_runs + logger.add(log_level(delta), sprintf("\tTotal memory usage (kb): %d (%+d)", current_run, delta)) + end + + private + # amount of memory the current process "is using", in RAM + # (doesn't include any swap memory that it may be using, just that in actual RAM) + # Code loosely based on https://github.com/rdp/os/blob/master/lib/os.rb + # returns 0 on windows + def rss_bytes + if ENV['OS'] == 'Windows_NT' + 0 + else + `ps -o rss= -p #{Process.pid}`.to_i # in kilobytes + end + end + + def log_level(delta) + delta > 0 ? Logger::WARN : Logger::DEBUG + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/ruby_object_leak_tracker.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/ruby_object_leak_tracker.rb new file mode 100644 index 0000000..afcce5a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/profile/support/ruby_object_leak_tracker.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true +class RubyObjectLeakTracker + attr_reader :previous_count_hash, :current_count_hash + + def initialize + @previous_count_hash = @current_count_hash = {} + end + + def difference_between_runs(basis=@previous_count_hash) + @difference_between_runs ||= Hash[@current_count_hash.map do |object_class, count| + [object_class, count - (basis[object_class] || 0)] + end] + end + + def total_difference_between_runs + difference_between_runs(@initial_count_hash).values.inject(0) { |sum, count| sum + count } + end + + def capture_initial_memory_usage + capture_memory_usage + @initial_count_hash = @current_count_hash + end + + def capture_memory_usage + @difference_between_runs = nil + @previous_count_hash = @current_count_hash + + class_to_count = Hash.new { |hash, key| hash[key] = 0 } + ObjectSpace.each_object { |obj| class_to_count[obj.class] += 1 } + + sorted_class_to_count = class_to_count.sort_by { |k, v| -v } + @current_count_hash = Hash[sorted_class_to_count] + end + + def dump_status(logger) + diff = difference_between_runs + most_used_objects = current_count_hash.to_a.sort_by(&:last).reverse[0, 20] + + most_used_objects.each do |object_class, count| + delta = diff[object_class] + logger.add(log_level(delta), sprintf("\t%s: %d (%+d)", object_class, count, delta)) + end + end + + private + def log_level(delta) + delta > 0 ? Logger::WARN : Logger::DEBUG + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/curl_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/curl_spec.rb new file mode 100644 index 0000000..24d3726 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/curl_spec.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Curl do + describe ".init" do + before { Ethon::Curl.send(:class_variable_set, :@@initialized, false) } + + context "when global_init fails" do + it "raises global init error" do + expect(Ethon::Curl).to receive(:global_init).and_return(1) + expect{ Ethon::Curl.init }.to raise_error(Ethon::Errors::GlobalInit) + end + end + + context "when global_init works" do + before { expect(Ethon::Curl).to receive(:global_init).and_return(0) } + + it "doesn't raises global init error" do + expect{ Ethon::Curl.init }.to_not raise_error + end + + it "logs" do + expect(Ethon.logger).to receive(:debug) + Ethon::Curl.init + end + end + + context "when global_cleanup is called" do + before { expect(Ethon::Curl).to receive(:global_cleanup) } + + it "logs" do + expect(Ethon.logger).to receive(:debug).twice + Ethon::Curl.init + Ethon::Curl.cleanup + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/callbacks_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/callbacks_spec.rb new file mode 100644 index 0000000..4c15295 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/callbacks_spec.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Callbacks do + let!(:easy) { Ethon::Easy.new } + + describe "#set_callbacks" do + before do + expect(Ethon::Curl).to receive(:set_option).exactly(3).times + end + + it "sets write-, debug-, and headerfunction" do + easy.set_callbacks + end + + it "resets @response_body" do + easy.set_callbacks + expect(easy.instance_variable_get(:@response_body)).to eq("") + end + + it "resets @response_headers" do + easy.set_callbacks + expect(easy.instance_variable_get(:@response_headers)).to eq("") + end + + it "resets @debug_info" do + easy.set_callbacks + expect(easy.instance_variable_get(:@debug_info).to_a).to eq([]) + end + end + + describe "#progress_callback" do + it "returns 0" do + expect(easy.progress_callback.call(0,1,1,1,1)).to be(0) + end + end + + describe "#body_write_callback" do + let(:body_write_callback) { easy.instance_variable_get(:@body_write_callback) } + let(:stream) { double(:read_string => "") } + context "when body returns not :abort" do + it "returns number bigger than 0" do + expect(body_write_callback.call(stream, 1, 1, nil) > 0).to be(true) + end + end + + context "when body returns :abort" do + before do + easy.on_body.clear + easy.on_body { :abort } + end + let(:body_write_callback) { easy.instance_variable_get(:@body_write_callback) } + + it "returns -1 to indicate abort to libcurl" do + expect(body_write_callback.call(stream, 1, 1, nil)).to eq(-1) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/debug_info_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/debug_info_spec.rb new file mode 100644 index 0000000..8dccc38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/debug_info_spec.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::DebugInfo do + let(:easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001/" + easy.perform + end + + describe "#debug_info" do + context "when verbose is not set to true" do + it "does not save any debug info after a request" do + expect(easy.debug_info.to_a.length).to eq(0) + expect(easy.debug_info.to_h.values.flatten.length).to eq(0) + end + end + + context "when verbose is set to true" do + before do + easy.verbose = true + easy.perform + end + + after do + easy.verbose = false + easy.reset + end + + it "saves debug info after a request" do + expect(easy.debug_info.to_a.length).to be > 0 + end + + it "saves request headers" do + expect(easy.debug_info.header_out.join).to include('GET / HTTP/1.1') + end + + it "saves response headers" do + expect(easy.debug_info.header_in.length).to be > 0 + expect(easy.response_headers).to include(easy.debug_info.header_in.join) + end + + it "saves incoming data" do + expect(easy.debug_info.data_in.length).to be > 0 + expect(easy.response_body).to include(easy.debug_info.data_in.join) + end + + it "saves debug text" do + expect(easy.debug_info.text.length).to be > 0 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/features_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/features_spec.rb new file mode 100644 index 0000000..b174948 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/features_spec.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Informations do + + describe "#supports_asynch_dns?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_asynch_dns?).to be_truthy + end + end + + describe "#supports_zlib?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_zlib?).to be_truthy + end + end + + describe "#supports_timeout_ms?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_timeout_ms?).to be_truthy + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/form_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/form_spec.rb new file mode 100644 index 0000000..4cd34d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/form_spec.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Form do + let(:hash) { {} } + let!(:easy) { Ethon::Easy.new } + let(:form) { Ethon::Easy::Form.new(easy, hash) } + + describe ".new" do + it "assigns attribute to @params" do + expect(form.instance_variable_get(:@params)).to eq(hash) + end + end + + describe "#first" do + it "returns a pointer" do + expect(form.first).to be_a(FFI::Pointer) + end + end + + describe "#last" do + it "returns a pointer" do + expect(form.first).to be_a(FFI::Pointer) + end + end + + describe "#multipart?" do + before { form.instance_variable_set(:@query_pairs, pairs) } + + context "when query_pairs contains string values" do + let(:pairs) { [['a', '1'], ['b', '2']] } + + it "returns false" do + expect(form.multipart?).to be_falsey + end + end + + context "when query_pairs contains file" do + let(:pairs) { [['a', '1'], ['b', ['path', 'encoding', 'abs_path']]] } + + it "returns true" do + expect(form.multipart?).to be_truthy + end + end + + context "when options contains multipart=true" do + before { form.instance_variable_set(:@multipart, true) } + let(:pairs) { [['a', '1'], ['b', '2']] } + + it "returns true" do + expect(form.multipart?).to be_truthy + end + end + end + + describe "#materialize" do + before { form.instance_variable_set(:@query_pairs, pairs) } + + context "when query_pairs contains string values" do + let(:pairs) { [['a', '1']] } + + it "adds params to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains nil" do + let(:pairs) { [['a', nil]] } + + it "adds params to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains file" do + let(:pairs) { [['a', ["file", "type", "path/file"]]] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains file and string values" do + let(:pairs) { [['a', ["file", "type", "path/file"]], ['b', '1']] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd).twice + form.materialize + end + end + + context "when query_pairs contains file, string and int values" do + let(:pairs) { [['a', ["file", "type", "path/file"]], ['b', '1'], ['c', 1]] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd).exactly(3).times + form.materialize + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/header_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/header_spec.rb new file mode 100644 index 0000000..4931fc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/header_spec.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Header do + let(:easy) { Ethon::Easy.new } + + describe "#headers=" do + let(:headers) { { 'User-Agent' => 'Ethon' } } + + it "sets header" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option) + easy.headers = headers + end + + context "when requesting" do + before do + easy.headers = headers + easy.url = "http://localhost:3001" + easy.perform + end + + it "sends" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + + context "when header value contains null byte" do + let(:headers) { { 'User-Agent' => "Ethon\0" } } + + it "escapes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon\\\\0"') + end + end + + context "when header value has leading whitespace" do + let(:headers) { { 'User-Agent' => " Ethon" } } + + it "removes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + + context "when header value has traiing whitespace" do + let(:headers) { { 'User-Agent' => "Ethon " } } + + it "removes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + end + end + + describe "#compose_header" do + it "has space in between" do + expect(easy.compose_header('a', 'b')).to eq('a: b') + end + + context "when value is a symbol" do + it "works" do + expect{ easy.compose_header('a', :b) }.to_not raise_error + end + end + end + + describe "#header_list" do + context "when no set_headers" do + it "returns nil" do + expect(easy.header_list).to eq(nil) + end + end + + context "when set_headers" do + it "returns pointer to header list" do + easy.headers = {'User-Agent' => 'Custom'} + expect(easy.header_list).to be_a(FFI::Pointer) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/custom_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/custom_spec.rb new file mode 100644 index 0000000..ba0978b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/custom_spec.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Custom do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:custom) { described_class.new("PURGE", url, {:params => params, :body => form}) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + custom.setup(easy) + expect(easy.url).to eq(url) + end + + it "makes a custom request" do + custom.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + custom.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "does not use application/x-www-form-urlencoded content type" do + expect(easy.response_body).to_not include('"CONTENT_TYPE":"application/x-www-form-urlencoded"') + end + + it "requests parameterized url" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?a=1%26"') + end + end + end + + context "when body" do + context "when multipart" do + let(:form) { {:a => File.open(__FILE__, 'r')} } + + it "sets httppost" do + expect(easy).to receive(:httppost=) + custom.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"multipart/form-data') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":".+"') + end + + it "submits the data" do + expect(easy.response_body).to include('"filename":"custom_spec.rb"') + end + end + end + + context "when not multipart" do + let(:form) { {:a => "1&b=2"} } + let(:encoded) { "a=1%26b%3D2" } + + it "sets escaped copypostfields" do + expect(easy).to receive(:copypostfields=).with(encoded) + custom.setup(easy) + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(encoded.bytesize) + custom.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":"a=1%26b%3D2"') + end + + it "submits the data" do + expect(easy.response_body).to include('"rack.request.form_hash":{"a":"1&b=2"}') + end + end + end + + context "when string" do + let(:form) { "{a: 1}" } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends string" do + expect(easy.response_body).to include('"body":"{a: 1}"') + end + end + end + end + + context "when params and body" do + let(:form) { {:a => "1"} } + let(:params) { {:b => "2"} } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "url contains params" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?b=2"') + end + + it "body contains form" do + expect(easy.response_body).to include('"body":"a=1"') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/delete_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/delete_spec.rb new file mode 100644 index 0000000..fe7e55e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/delete_spec.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Delete do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:delete) { described_class.new(url, {:params => params, :body => form}) } + + context "when requesting" do + before do + delete.setup(easy) + easy.perform + end + + it "makes a delete request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"DELETE"') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/get_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/get_spec.rb new file mode 100644 index 0000000..e3a7e4f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/get_spec.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Get do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { {} } + let(:get) { described_class.new(url, {:params => params, :body => form}.merge(options)) } + + describe "#setup" do + it "sets url" do + get.setup(easy) + expect(easy.url).to eq(url) + end + + context "when body" do + let(:form) { { :a => 1 } } + + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("GET") + get.setup(easy) + end + end + + context "when no body" do + it "doesn't set customrequest" do + expect(easy).to receive(:customrequest=).never + get.setup(easy) + end + end + + context "when requesting" do + before do + get.setup(easy) + easy.perform + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + let(:params) { {:a => "1&b=2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + + context "when params and no body" do + let(:params) { {:a => "1&b=2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + end + + context "when params and body" do + let(:params) { {:a => "1&b=2"} } + let(:form) { {:b => "2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + end + + context "with :escape" do + let(:params) { {:a => "1&b=2"} } + + context 'missing' do + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'nil' do + let(:options) { {:escape => nil} } + + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'true' do + let(:options) { {:escape => true} } + + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'false' do + let(:options) { {:escape => false} } + + it "sends raw values" do + expect(easy.url).to eq("#{url}?a=1&b=2") + end + end + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/head_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/head_spec.rb new file mode 100644 index 0000000..5a04f06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/head_spec.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Head do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:head) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + context "when nothing" do + it "sets nobody" do + expect(easy).to receive(:nobody=).with(true) + head.setup(easy) + end + + it "sets url" do + head.setup(easy) + expect(easy.url).to eq(url) + end + end + + context "when params" do + let(:params) { {:a => "1&b=2"} } + + it "sets nobody" do + expect(easy).to receive(:nobody=).with(true) + head.setup(easy) + end + + it "attaches escaped to url" do + head.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + + context "when requesting" do + before do + head.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "has no body" do + expect(easy.response_body).to be_empty + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end + + context "when body" do + let(:form) { {:a => 1} } + + context "when requesting" do + before do + head.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/options_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/options_spec.rb new file mode 100644 index 0000000..99ca96f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/options_spec.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Options do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("OPTIONS") + options.setup(easy) + end + + it "sets url" do + options.setup(easy) + expect(easy.url).to eq(url) + end + + context "when requesting" do + let(:params) { {:a => "1&b=2"} } + + before do + options.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a options request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"OPTIONS"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/patch_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/patch_spec.rb new file mode 100644 index 0000000..a74cec7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/patch_spec.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Patch do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:patch) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("PATCH") + patch.setup(easy) + end + + it "sets url" do + patch.setup(easy) + expect(easy.url).to eq(url) + end + + context "when requesting" do + let(:params) { {:a => "1&b=2"} } + + before do + patch.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a patch request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PATCH"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/post_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/post_spec.rb new file mode 100644 index 0000000..a65ceff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/post_spec.rb @@ -0,0 +1,317 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Post do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { Hash.new } + let(:post) { described_class.new(url, options.merge({:params => params, :body => form})) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + post.setup(easy) + expect(easy.url).to eq(url) + end + + it "sets postfield_size" do + expect(easy).to receive(:postfieldsize=).with(0) + post.setup(easy) + end + + it "sets copy_postfields" do + expect(easy).to receive(:copypostfields=).with("") + post.setup(easy) + end + + it "makes a post request" do + post.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "with arrays" do + let(:params) { {:a => %w( foo bar )} } + + context "by default" do + it "encodes them with indexes" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a%5B0%5D=foo&a%5B1%5D=bar") + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + it "encodes them without indexes" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a%5B%5D=foo&a%5B%5D=bar") + end + end + end + + context "with :escape" do + context 'missing' do + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'nil' do + let(:options) { {:escape => nil} } + + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'true' do + let(:options) { {:escape => true} } + + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'false' do + let(:options) { {:escape => false} } + + it "sends raw values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1&") + end + end + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(0) + post.setup(easy) + end + + it "sets copypostfields" do + expect(easy).to receive(:copypostfields=).with("") + post.setup(easy) + end + + context "when requesting" do + let(:postredir) { nil } + + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.postredir = postredir + easy.followlocation = true + easy.perform + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses application/x-www-form-urlencoded content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded"') + end + + it "requests parameterized url" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?a=1%26"') + end + + context "when redirection" do + let(:url) { "localhost:3001/redirect" } + + context "when no postredirs" do + it "is a get" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + end + + unless ENV['TRAVIS'] + context "when postredirs" do + let(:postredir) { :post_all } + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + end + end + end + end + end + + context "when body" do + context "when multipart" do + let(:form) { {:a => File.open(__FILE__, 'r')} } + + it "sets httppost" do + expect(easy).to receive(:httppost=) + post.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"multipart/form-data') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":".+"') + end + + it "submits the data" do + expect(easy.response_body).to include('"filename":"post_spec.rb"') + end + end + end + + context "when not multipart" do + let(:form) { {:a => "1&b=2"} } + let(:encoded) { "a=1%26b%3D2" } + + it "sets escaped copypostfields" do + expect(easy).to receive(:copypostfields=).with(encoded) + post.setup(easy) + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(encoded.bytesize) + post.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":"a=1%26b%3D2"') + end + + it "submits the data" do + expect(easy.response_body).to include('"rack.request.form_hash":{"a":"1&b=2"}') + end + end + end + + context "when string" do + let(:form) { "{a: 1}" } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends string" do + expect(easy.response_body).to include('"body":"{a: 1}"') + end + end + end + + context "when binary with null bytes" do + let(:form) { [1, 0, 1].pack('c*') } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends binary data" do + expect(easy.response_body).to include('"body":"\\u0001\\u0000\\u0001"') + end + end + end + + context "when arrays" do + let(:form) { {:a => %w( foo bar )} } + + context "by default" do + it "sets copypostfields with indexed, escaped representation" do + expect(easy).to receive(:copypostfields=).with('a%5B0%5D=foo&a%5B1%5D=bar') + post.setup(easy) + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + + it "sets copypostfields with non-indexed, escaped representation" do + expect(easy).to receive(:copypostfields=).with('a%5B%5D=foo&a%5B%5D=bar') + post.setup(easy) + end + end + end + end + + context "when params and body" do + let(:form) { {:a => "1"} } + let(:params) { {:b => "2"} } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "url contains params" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?b=2"') + end + + it "body contains form" do + expect(easy.response_body).to include('"body":"a=1"') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/put_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/put_spec.rb new file mode 100644 index 0000000..f294c37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http/put_spec.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Put do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { Hash.new } + let(:put) { described_class.new(url, options.merge({:params => params, :body => form})) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + put.setup(easy) + expect(easy.url).to eq(url) + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(0) + put.setup(easy) + end + + context "when requesting" do + it "makes a put request" do + put.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "with arrays" do + let(:params) { {:a => %w( foo bar )} } + + context "by default" do + it "encodes them with indexes" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a%5B0%5D=foo&a%5B1%5D=bar") + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + it "encodes them without indexes" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a%5B%5D=foo&a%5B%5D=bar") + end + end + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(0) + put.setup(easy) + end + + context "when requesting" do + before do + put.setup(easy) + easy.perform + end + + it "makes a put request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + end + end + + context "when body" do + let(:form) { {:a => "1&b=2"} } + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(11) + put.setup(easy) + end + + it "sets readfunction" do + expect(easy).to receive(:readfunction) + put.setup(easy) + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + context "when requesting" do + context "sending string body" do + before do + easy.headers = { 'Expect' => '' } + put.setup(easy) + easy.perform + end + + it "makes a put request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + + it "submits a body" do + expect(easy.response_body).to include('"body":"a=1%26b%3D2"') + end + end + + context "when injecting a file as body" do + let(:file) { File.open(__FILE__) } + let(:easy) do + e = Ethon::Easy.new(:url => url, :upload => true) + e.set_read_callback(file) + e.infilesize = File.size(file.path) + e + end + + before do + easy.headers = { 'Expect' => '' } + easy.perform + end + + it "submits file" do + expect(easy.response_body).to include("injecting") + end + end + end + + context "when arrays" do + let(:form) { {:a => %w( foo bar )} } + + before do + put.setup(easy) + easy.perform + end + + context "by default" do + it "submits an indexed, escaped representation" do + expect(easy.response_body).to include('"body":"a%5B0%5D=foo&a%5B1%5D=bar"') + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + + it "submits an non-indexed, escaped representation" do + expect(easy.response_body).to include('"body":"a%5B%5D=foo&a%5B%5D=bar"') + end + end + end + end + + context "when params and body" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http_spec.rb new file mode 100644 index 0000000..b74a2df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/http_spec.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http do + let(:easy) { Ethon::Easy.new } + + describe "#http_request" do + let(:url) { "http://localhost:3001/" } + let(:action_name) { :get } + let(:options) { {} } + + let(:get) { double(:setup) } + let(:get_class) { Ethon::Easy::Http::Get } + + it "instanciates action" do + expect(get).to receive(:setup) + expect(get_class).to receive(:new).and_return(get) + easy.http_request(url, action_name, options) + end + + context "when requesting" do + [ :get, :post, :put, :delete, :head, :patch, :options ].map do |action| + it "returns ok" do + easy.http_request(url, action, options) + easy.perform + expect(easy.return_code).to be(:ok) + end + + unless action == :head + it "makes a #{action.to_s.upcase} request" do + easy.http_request(url, action, options) + easy.perform + expect(easy.response_body).to include("\"REQUEST_METHOD\":\"#{action.to_s.upcase}\"") + end + + it "streams the response body from the #{action.to_s.upcase} request" do + bytes_read = 0 + easy.on_body { |chunk, response| bytes_read += chunk.bytesize } + easy.http_request(url, action, options) + easy.perform + content_length = ((easy.response_headers =~ /Content-Length: (\d+)/) && $1.to_i) + expect(bytes_read).to eq(content_length) + expect(easy.response_body).to eq("") + end + + it "notifies when headers are ready" do + headers = [] + easy.on_headers { |r| headers << r.response_headers } + easy.http_request(url, action, options) + easy.perform + expect(headers).to eq([easy.response_headers]) + expect(headers.first).to match(/Content-Length: (\d+)/) + end + end + end + + it "makes requests with custom HTTP verbs" do + easy.http_request(url, :purge, options) + easy.perform + expect(easy.response_body).to include(%{"REQUEST_METHOD":"PURGE"}) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/informations_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/informations_spec.rb new file mode 100644 index 0000000..c86c9a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/informations_spec.rb @@ -0,0 +1,120 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Informations do + let(:easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001" + easy.perform + end + + describe "#httpauth_avail" do + it "returns" do + expect(easy.httpauth_avail).to be + end + end + + describe "#total_time" do + it "returns float" do + expect(easy.total_time).to be_a(Float) + end + end + + describe "#starttransfer_time" do + it "returns float" do + expect(easy.starttransfer_time).to be_a(Float) + end + end + + describe "#appconnect_time" do + it "returns float" do + expect(easy.appconnect_time).to be_a(Float) + end + end + + describe "#pretransfer_time" do + it "returns float" do + expect(easy.pretransfer_time).to be_a(Float) + end + end + + describe "#connect_time" do + it "returns float" do + expect(easy.connect_time).to be_a(Float) + end + end + + describe "#namelookup_time" do + it "returns float" do + expect(easy.namelookup_time).to be_a(Float) + end + end + + describe "#redirect_time" do + it "returns float" do + expect(easy.redirect_time).to be_a(Float) + end + end + + describe "#effective_url" do + it "returns url" do + expect(easy.effective_url).to match(/^http:\/\/localhost:3001\/?/) + end + end + + describe "#primary_ip" do + it "returns localhost" do + expect(easy.primary_ip).to match(/::1|127\.0\.0\.1/) + end + end + + describe "#response_code" do + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + + describe "#redirect_count" do + it "returns 0" do + expect(easy.redirect_count).to eq(0) + end + end + + describe "#request_size" do + it "returns 53" do + expect(easy.request_size).to eq(53) + end + end + + describe "#supports_zlib?" do + it "returns true" do + expect(Kernel).to receive(:warn) + expect(easy.supports_zlib?).to be_truthy + end + end + + describe "#size_upload" do + it "returns float" do + expect(easy.size_upload).to be_a(Float) + end + end + + describe "#size_download" do + it "returns float" do + expect(easy.size_download).to be_a(Float) + end + end + + describe "#speed_upload" do + it "returns float" do + expect(easy.speed_upload).to be_a(Float) + end + end + + describe "#speed_download" do + it "returns float" do + expect(easy.speed_download).to be_a(Float) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/mirror_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/mirror_spec.rb new file mode 100644 index 0000000..a44dd84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/mirror_spec.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Mirror do + let(:options) { nil } + let(:mirror) { described_class.new(options) } + + describe "::INFORMATIONS_TO_LOG" do + [ + :return_code, :response_code, :response_body, :response_headers, + :total_time, :starttransfer_time, :appconnect_time, + :pretransfer_time, :connect_time, :namelookup_time, :redirect_time, + :size_upload, :size_download, :speed_upload, :speed_upload, + :effective_url, :primary_ip, :redirect_count, :debug_info + ].each do |name| + it "contains #{name}" do + expect(described_class::INFORMATIONS_TO_MIRROR).to include(name) + end + end + end + + describe "#to_hash" do + let(:options) { {:return_code => 1} } + + it "returns mirror as hash" do + expect(mirror.to_hash).to eq(options) + end + end + + describe "#log_informations" do + let(:options) { {:return_code => 1} } + + it "returns hash" do + expect(mirror.log_informations).to be_a(Hash) + end + + it "only calls methods that exist" do + described_class::INFORMATIONS_TO_LOG.each do |method_name| + expect(mirror.respond_to? method_name).to eql(true) + end + end + + it "includes return code" do + expect(mirror.log_informations).to include(options) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/operations_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/operations_spec.rb new file mode 100644 index 0000000..b216f91 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/operations_spec.rb @@ -0,0 +1,268 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Operations do + let(:easy) { Ethon::Easy.new } + + describe "#handle" do + it "returns a pointer" do + expect(easy.handle).to be_a(FFI::Pointer) + end + end + + + describe "#perform" do + let(:url) { nil } + let(:timeout) { nil } + let(:connect_timeout) { nil } + let(:follow_location) { nil } + let(:max_redirs) { nil } + let(:user_pwd) { nil } + let(:http_auth) { nil } + let(:headers) { nil } + let(:protocols) { nil } + let(:redir_protocols) { nil } + let(:username) { nil } + let(:password) { nil } + + before do + Ethon.logger.level = Logger::DEBUG + easy.url = url + easy.timeout = timeout + easy.connecttimeout = connect_timeout + easy.followlocation = follow_location + easy.maxredirs = max_redirs + easy.httpauth = http_auth + easy.headers = headers + easy.protocols = protocols + easy.redir_protocols = redir_protocols + + if user_pwd + easy.userpwd = user_pwd + else + easy.username = username + easy.password = password + end + + easy.perform + end + + it "calls Curl.easy_perform" do + expect(Ethon::Curl).to receive(:easy_perform) + easy.perform + end + + it "calls Curl.easy_cleanup" do + expect_any_instance_of(FFI::AutoPointer).to receive(:free) + easy.cleanup + end + + it "logs" do + expect(Ethon.logger).to receive(:debug) + easy.perform + end + + it "doesn't log after completing because completing could reset" do + easy.on_complete{ expect(Ethon.logger).to receive(:debug).never } + easy.perform + end + + context "when url" do + let(:url) { "http://localhost:3001/" } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sets response body" do + expect(easy.response_body).to be + end + + it "sets response headers" do + expect(easy.response_headers).to be + end + + context "when request timed out" do + let(:url) { "http://localhost:3001/?delay=1" } + let(:timeout) { 1 } + + it "returns operation_timedout" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + + context "when connection timed out" do + let(:url) { "http://localhost:3009" } + let(:connect_timeout) { 1 } + + it "returns couldnt_connect" do + expect(easy.return_code).to eq(:couldnt_connect) + end + end + + context "when no follow location" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { false } + + it "doesn't follow" do + expect(easy.response_code).to eq(302) + end + end + + context "when follow location" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { true } + + it "follows" do + expect(easy.response_code).to eq(200) + end + + context "when infinite redirect loop" do + let(:url) { "http://localhost:3001/bad_redirect" } + let(:max_redirs) { 5 } + + context "when max redirect set" do + it "follows only x times" do + expect(easy.response_code).to eq(302) + end + end + end + end + + context "when user agent" do + let(:headers) { { 'User-Agent' => 'Ethon' } } + + it "sets" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + end + + context "when auth url" do + before { easy.url = url } + + context "when basic auth" do + let(:url) { "http://localhost:3001/auth_basic/username/password" } + + context "when no user_pwd" do + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when invalid user_pwd" do + let(:user_pwd) { "invalid:invalid" } + + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when valid user_pwd" do + let(:user_pwd) { "username:password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + + context "when user and password" do + let(:username) { "username" } + let(:password) { "password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + end + + context "when ntlm" do + let(:url) { "http://localhost:3001/auth_ntlm" } + let(:http_auth) { :ntlm } + + context "when no user_pwd" do + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when user_pwd" do + let(:user_pwd) { "username:password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + end + end + + context "when protocols" do + context "when asking for a allowed url" do + let(:url) { "http://localhost:3001" } + let(:protocols) { :http } + + it "returns ok" do + expect(easy.return_code).to be(:ok) + end + end + + context "when asking for a not allowed url" do + let(:url) { "http://localhost:3001" } + let(:protocols) { :https } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when multiple protocols" do + context "when asking for a allowed url" do + let(:protocols) { [:http, :https] } + + context "when http" do + let(:url) { "http://localhost:3001" } + + it "returns ok for http" do + expect(easy.return_code).to be(:ok) + end + end + + context "when https" do + let(:url) { "https://localhost:3001" } + + it "returns ssl_connect_error for https" do + expect(easy.return_code).to be(:ssl_connect_error) + end + end + end + + context "when asking for a not allowed url" do + let(:url) { "ssh://localhost" } + let(:protocols) { [:https, :http] } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when redir_protocols" do + context "when redirecting to a not allowed url" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { true } + let(:redir_protocols) { :https } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when no url" do + it "returns url_malformat" do + expect(easy.perform).to eq(:url_malformat) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/options_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/options_spec.rb new file mode 100644 index 0000000..e135bd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/options_spec.rb @@ -0,0 +1,193 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Options do + let(:easy) { Ethon::Easy.new } + + [ + :accept_encoding, :cainfo, :capath, :connecttimeout, :connecttimeout_ms, :cookie, + :cookiejar, :cookiefile, :copypostfields, :customrequest, :dns_cache_timeout, + :followlocation, :forbid_reuse, :http_version, :httpauth, :httpget, :httppost, + :infilesize, :interface, :keypasswd, :maxredirs, :nobody, :nosignal, + :postfieldsize, :postredir, :protocols, :proxy, :proxyauth, :proxyport, :proxytype, + :proxyuserpwd, :readdata, :readfunction, :redir_protocols, :ssl_verifyhost, + :ssl_verifypeer, :sslcert, :sslcerttype, :sslkey, :sslkeytype, :sslversion, + :timeout, :timeout_ms, :unrestricted_auth, :upload, :url, :useragent, + :userpwd, :verbose, :pipewait, :dns_shuffle_addresses, :path_as_is + ].each do |name| + describe "#{name}=" do + it "responds_to" do + expect(easy).to respond_to("#{name}=") + end + + it "sets option" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option).with(name, anything, anything) + value = case name + when :http_version + :httpv1_0 + when :httpauth + :basic + when :protocols, :redir_protocols + :http + when :postredir + :post_301 + when :proxytype + :http + when :sslversion + :default + when :httppost + FFI::Pointer::NULL + else + 1 + end + easy.method("#{name}=").call(value) + end + end + end + + describe '#escape?' do + context 'by default' do + it 'returns true' do + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=nil' do + it 'returns true' do + easy.escape = nil + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=true' do + it 'returns true' do + easy.escape = true + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=false' do + it 'returns true' do + easy.escape = false + expect(easy.escape?).to be_falsey + end + end + end + + describe '#multipart?' do + context 'by default' do + it 'returns false' do + expect(easy.multipart?).to be_falsey + end + end + + context 'when #multipart=nil' do + it 'returns false' do + easy.multipart = nil + expect(easy.multipart?).to be_falsey + end + end + + context 'when #multipart=true' do + it 'returns true' do + easy.multipart = true + expect(easy.multipart?).to be_truthy + end + end + + context 'when #multipart=false' do + it 'returns false' do + easy.multipart = false + expect(easy.multipart?).to be_falsey + end + end + end + + describe "#httppost=" do + it "raises unless given a FFI::Pointer" do + expect{ easy.httppost = 1 }.to raise_error(Ethon::Errors::InvalidValue) + end + end + + context "when requesting" do + let(:url) { "localhost:3001" } + let(:timeout) { nil } + let(:timeout_ms) { nil } + let(:connecttimeout) { nil } + let(:connecttimeout_ms) { nil } + let(:userpwd) { nil } + + before do + easy.url = url + easy.timeout = timeout + easy.timeout_ms = timeout_ms + easy.connecttimeout = connecttimeout + easy.connecttimeout_ms = connecttimeout_ms + easy.userpwd = userpwd + easy.perform + end + + context "when userpwd" do + context "when contains /" do + let(:url) { "localhost:3001/auth_basic/test/te%2Fst" } + let(:userpwd) { "test:te/st" } + + it "works" do + expect(easy.response_code).to eq(200) + end + end + end + + context "when timeout" do + let(:timeout) { 1 } + + context "when request takes longer" do + let(:url) { "localhost:3001?delay=2" } + + it "times out" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + end + + context "when connecttimeout" do + let(:connecttimeout) { 1 } + + context "when cannot connect" do + let(:url) { "localhost:3002" } + + it "times out" do + expect(easy.return_code).to eq(:couldnt_connect) + end + end + end + + if Ethon::Easy.supports_timeout_ms? + context "when timeout_ms" do + let(:timeout_ms) { 100 } + + context "when request takes longer" do + let(:url) { "localhost:3001?delay=1" } + + it "times out" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + end + + context "when connecttimeout_ms" do + let(:connecttimeout_ms) { 100 } + + context "when cannot connect" do + let(:url) { "localhost:3002" } + + it "times out" do + # this can either lead to a timeout or couldnt connect depending on which happens first + expect([:couldnt_connect, :operation_timedout]).to include(easy.return_code) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/queryable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/queryable_spec.rb new file mode 100644 index 0000000..89c4d11 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/queryable_spec.rb @@ -0,0 +1,235 @@ +# encoding: utf-8 +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Queryable do + let(:hash) { {} } + let!(:easy) { Ethon::Easy.new } + let(:params) { Ethon::Easy::Params.new(easy, hash) } + + describe "#to_s" do + context "when query_pairs empty" do + before { params.instance_variable_set(:@query_pairs, []) } + + it "returns empty string" do + expect(params.to_s).to eq("") + end + end + + context "when query_pairs not empty" do + context "when escape" do + before do + params.escape = true + end + + { + '!' => '%21', '*' => '%2A', "'" => '%27', '(' => '%28', + ')' => '%29', ';' => '%3B', ':' => '%3A', '@' => '%40', + '&' => '%26', '=' => '%3D', '+' => '%2B', '$' => '%24', + ',' => '%2C', '/' => '%2F', '?' => '%3F', '#' => '%23', + '[' => '%5B', ']' => '%5D', + + '<' => '%3C', '>' => '%3E', '"' => '%22', '{' => '%7B', + '}' => '%7D', '|' => '%7C', '\\' => '%5C', '`' => '%60', + '^' => '%5E', '%' => '%25', ' ' => '%20', "\0" => '%00', + + 'まつもと' => '%E3%81%BE%E3%81%A4%E3%82%82%E3%81%A8', + }.each do |value, percent| + it "turns #{value.inspect} into #{percent}" do + params.instance_variable_set(:@query_pairs, [[:a, value]]) + expect(params.to_s).to eq("a=#{percent}") + end + end + + { + '.' => '%2E', '-' => '%2D', '_' => '%5F', '~' => '%7E', + }.each do |value, percent| + it "leaves #{value.inspect} instead of turning into #{percent}" do + params.instance_variable_set(:@query_pairs, [[:a, value]]) + expect(params.to_s).to eq("a=#{value}") + end + end + end + + context "when no escape" do + before { params.instance_variable_set(:@query_pairs, [[:a, 1], [:b, 2]]) } + + it "returns concatenated query string" do + expect(params.to_s).to eq("a=1&b=2") + end + end + end + + context "when query_pairs contains a string" do + before { params.instance_variable_set(:@query_pairs, ["{a: 1}"]) } + + it "returns correct string" do + expect(params.to_s).to eq("{a: 1}") + end + end + end + + describe "#build_query_pairs" do + let(:pairs) { params.method(:build_query_pairs).call(hash) } + + context "when params is empty" do + it "returns empty array" do + expect(pairs).to eq([]) + end + end + + context "when params is string" do + let(:hash) { "{a: 1}" } + + it "wraps it in an array" do + expect(pairs).to eq([hash]) + end + end + + context "when params is simple hash" do + let(:hash) { {:a => 1, :b => 2} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, 2]) + end + end + + context "when params is a nested hash" do + let(:hash) { {:a => 1, :b => {:c => 2}} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[c]", 2]) + end + end + + context "when params contains an array" do + let(:hash) { {:a => 1, :b => [2, 3]} } + + context "by default" do + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[0]", 2]) + expect(pairs).to include(["b[1]", 3]) + end + end + + context "when params_encoding is :rack" do + before { params.params_encoding = :rack } + it "transforms without indexes" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[]", 2]) + expect(pairs).to include(["b[]", 3]) + end + end + + context "when params_encoding is :none" do + before { params.params_encoding = :none } + it "does no transformation" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, [2, 3]]) + end + end + end + + context "when params contains something nested in an array" do + context "when string" do + let(:hash) { {:a => {:b => ["hello", "world"]}} } + + it "transforms" do + expect(pairs).to eq([["a[b][0]", "hello"], ["a[b][1]", "world"]]) + end + end + + context "when hash" do + let(:hash) { {:a => {:b => [{:c =>1}, {:d => 2}]}} } + + it "transforms" do + expect(pairs).to eq([["a[b][0][c]", 1], ["a[b][1][d]", 2]]) + end + end + + context "when file" do + let(:file) { File.open("spec/spec_helper.rb") } + let(:file_info) { params.method(:file_info).call(file) } + let(:hash) { {:a => {:b => [file]}} } + let(:mime_type) { file_info[1] } + + it "transforms" do + expect(pairs).to eq([["a[b][0]", file_info]]) + end + + context "when MIME" do + if defined?(MIME) + context "when mime type" do + it "sets mime type to text" do + expect(mime_type).to eq("application/x-ruby") + end + end + end + + context "when no mime type" do + let(:file) { Tempfile.new("fubar") } + + it "sets mime type to default application/octet-stream" do + expect(mime_type).to eq("application/octet-stream") + end + end + end + + context "when no MIME" do + before { hide_const("MIME") } + + it "sets mime type to default application/octet-stream" do + expect(mime_type).to eq("application/octet-stream") + end + end + end + end + + + context "when params contains file" do + let(:file) { Tempfile.new("fubar") } + let(:file_info) { params.method(:file_info).call(file) } + let(:hash) { {:a => 1, :b => file} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, file_info]) + end + end + + context "when params key contains a null byte" do + let(:hash) { {:a => "1\0" } } + + it "preserves" do + expect(pairs).to eq([[:a, "1\0"]]) + end + end + + context "when params value contains a null byte" do + let(:hash) { {"a\0" => 1 } } + + it "preserves" do + expect(pairs).to eq([["a\0", 1]]) + end + end + end + + describe "#empty?" do + context "when params empty" do + it "returns true" do + expect(params.empty?).to be_truthy + end + end + + context "when params not empty" do + let(:hash) { {:a => 1} } + + it "returns false" do + expect(params.empty?).to be_falsey + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/response_callbacks_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/response_callbacks_spec.rb new file mode 100644 index 0000000..f142fad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/response_callbacks_spec.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::ResponseCallbacks do + let(:easy) { Ethon::Easy.new } + + [:on_complete, :on_headers, :on_body, :on_progress].each do |callback_type| + describe "##{callback_type}" do + it "responds" do + expect(easy).to respond_to("#{callback_type}") + end + + context "when no block given" do + it "returns @#{callback_type}" do + expect(easy.send("#{callback_type}")).to eq([]) + end + end + + context "when block given" do + it "stores" do + easy.send(callback_type) { p 1 } + expect(easy.instance_variable_get("@#{callback_type}").size).to eq(1) + end + end + + context "when multiple blocks given" do + it "stores" do + easy.send(callback_type) { p 1 } + easy.send(callback_type) { p 2 } + expect(easy.instance_variable_get("@#{callback_type}").size).to eq(2) + end + end + end + end + + describe "#complete" do + before do + easy.on_complete {|r| String.new(r.url) } + end + + it "executes blocks and passes self" do + expect(String).to receive(:new).with(easy.url) + easy.complete + end + + context "when @on_complete nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_complete, nil) + expect{ easy.complete }.to_not raise_error + end + end + end + + describe "#headers" do + before do + easy.on_headers {|r| String.new(r.url) } + end + + it "executes blocks and passes self" do + expect(String).to receive(:new).with(easy.url) + easy.headers + end + + context "when @on_headers nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_headers, nil) + expect{ easy.headers }.to_not raise_error + end + end + end + + describe "#progress" do + context "when requesting for realz" do + it "executes callback" do + post = Ethon::Easy::Http::Post.new("http://localhost:3001", {:body => "bar=fu"}) + post.setup(easy) + @called = false + @has_dltotal = false + @has_ultotal = false + easy.on_progress { @called = true } + easy.on_progress { |dltotal, _, _, _| @has_dltotal ||= true } + easy.on_progress { |_, _, ultotal, _| @has_ultotal ||= true } + easy.perform + expect(@called).to be true + expect(@has_dltotal).to be true + expect(@has_ultotal).to be true + end + end + + context "when pretending" do + before do + @dltotal = nil + @dlnow = nil + @ultotal = nil + @ulnow = nil + easy.on_progress { |dltotal, dlnow, ultotal, ulnow| @dltotal = dltotal ; @dlnow = dlnow; @ultotal = ultotal; @ulnow = ulnow } + end + + it "executes blocks and passes dltotal" do + easy.progress(1, 2, 3, 4) + expect(@dltotal).to eq(1) + end + + it "executes blocks and passes dlnow" do + easy.progress(1, 2, 3, 4) + expect(@dlnow).to eq(2) + end + + it "executes blocks and passes ultotal" do + easy.progress(1, 2, 3, 4) + expect(@ultotal).to eq(3) + end + + it "executes blocks and passes ulnow" do + easy.progress(1, 2, 3, 4) + expect(@ulnow).to eq(4) + end + + context "when @on_progress nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_progress, nil) + expect{ easy.progress(1, 2, 3, 4) }.to_not raise_error + end + end + end + end + + describe "#body" do + before do + @chunk = nil + @r = nil + easy.on_body { |chunk, r| @chunk = chunk ; @r = r } + end + + it "executes blocks and passes self" do + easy.body("the chunk") + expect(@r).to be(easy) + end + + it "executes blocks and passes chunk" do + easy.body("the chunk") + expect(@chunk).to eq("the chunk") + end + + context "when @on_body nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_body, nil) + expect{ easy.body("the chunk") }.to_not raise_error + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/util_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/util_spec.rb new file mode 100644 index 0000000..048c5bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy/util_spec.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Util do + class Dummy + include Ethon::Easy::Util + end + + let(:klass) { Dummy.new } + + describe "escape_zero_byte" do + context "when value has no zero byte" do + let(:value) { "hello world" } + + it "returns same value" do + expect(klass.escape_zero_byte(value)).to be(value) + end + end + + context "when value has zero byte" do + let(:value) { "hello \0world" } + + it "returns escaped" do + expect(klass.escape_zero_byte(value)).to eq("hello \\0world") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy_spec.rb new file mode 100644 index 0000000..c0e9565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/easy_spec.rb @@ -0,0 +1,203 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy do + let(:easy) { Ethon::Easy.new } + + describe ".new" do + it "inits curl" do + expect(Ethon::Curl).to receive(:init) + easy + end + + context "when options are empty" do + it "sets only callbacks" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Easy).to receive(:set_option).never + easy + end + end + + context "when options not empty" do + context "when followlocation is set" do + let(:options) { { :followlocation => true } } + let(:easy) { Ethon::Easy.new(options) } + + it "sets followlocation" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option).with(:followlocation, true, anything) + easy + end + end + end + end + + describe "#set_attributes" do + context "when options are empty" do + it "sets only callbacks" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Easy).to receive(:set_option).never + easy + end + end + + context "when options aren't empty" do + context "when valid key" do + it "sets" do + expect(easy).to receive(:verbose=).with(true) + easy.set_attributes({:verbose => true}) + end + end + + context "when invalid key" do + it "raises invalid option error" do + expect{ easy.set_attributes({:fubar => 1}) }.to raise_error(Ethon::Errors::InvalidOption) + end + end + end + end + + describe "#reset" do + before { easy.url = "www.example.com" } + + it "resets url" do + easy.reset + expect(easy.url).to be_nil + end + + it "resets escape?" do + easy.escape = false + easy.reset + expect(easy.escape?).to be_truthy + end + + it "resets hash" do + easy.reset + expect(easy.instance_variable_get(:@hash)).to be_nil + end + + it "resets easy handle" do + expect(Ethon::Curl).to receive(:easy_reset) + easy.reset + end + + it "resets on_complete" do + easy.on_complete { p 1 } + easy.reset + expect(easy.on_complete).to be_empty + end + + it "resets on_headers" do + easy.on_headers { p 1 } + easy.reset + expect(easy.on_headers).to be_empty + end + + it "resets on_body" do + easy.on_body { p 1 } + easy.reset + expect(easy.on_body).to be_empty + end + end + + describe "#dup" do + let!(:easy) do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/" + easy.on_complete { 'on_complete' } + easy.on_headers { 'on_headers' } + easy.on_progress { 'on_progress' } + easy.response_body = String.new('test_body') + easy.response_headers = String.new('test_headers') + easy + end + let!(:e) { easy.dup } + + it "sets a new handle" do + expect(e.handle).not_to eq(easy.handle) + end + + it "preserves url" do + expect(e.url).to eq(easy.url) + end + + it "preserves on_complete callback" do + expect(e.on_complete).to be(easy.on_complete) + end + + it "preserves on_headers callback" do + expect(e.on_headers).to be(easy.on_headers) + end + + it 'preserves body_write_callback of original handle' do + expect { easy.perform }.to change { easy.response_body } + expect { easy.perform }.not_to change { e.response_body } + end + + it "preserves on_progress callback" do + expect(e.on_progress).to be(easy.on_progress) + end + + it 'sets new body_write_callback of duplicated handle' do + expect { e.perform }.to change { e.response_body } + expect { e.perform }.not_to change { easy.response_body } + end + + it 'preserves headers_write_callback of original handle' do + expect { easy.perform }.to change { easy.response_headers } + expect { easy.perform }.not_to change { e.response_headers } + end + + it 'sets new headers_write_callback of duplicated handle' do + expect { e.perform }.to change { e.response_headers } + expect { e.perform }.not_to change { easy.response_headers } + end + + it "resets response_body" do + expect(e.response_body).to be_empty + end + + it "resets response_headers" do + expect(e.response_headers).to be_empty + end + + it "sets response_body for duplicated Easy" do + e.perform + expect(e.response_body).not_to be_empty + end + + it "sets response_headers for duplicated Easy" do + e.perform + expect(e.response_headers).not_to be_empty + end + + it "preserves response_body for original Easy" do + e.perform + expect(easy.response_body).to eq('test_body') + end + + it "preserves response_headers for original Easy" do + e.perform + expect(easy.response_headers).to eq('test_headers') + end + end + + describe "#mirror" do + it "returns a Mirror" do + expect(easy.mirror).to be_a(Ethon::Easy::Mirror) + end + + it "builds from easy" do + expect(Ethon::Easy::Mirror).to receive(:from_easy).with(easy) + easy.mirror + end + end + + describe "#log_inspect" do + [ :url, :response_code, :return_code, :total_time ].each do |name| + it "contains #{name}" do + expect(easy.log_inspect).to match name.to_s + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/libc_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/libc_spec.rb new file mode 100644 index 0000000..a323151 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/libc_spec.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Libc do + describe "#getdtablesize", :if => !Ethon::Curl.windows? do + it "returns an integer" do + expect(Ethon::Libc.getdtablesize).to be_a(Integer) + end + + it "returns bigger zero", :if => !Ethon::Curl.windows? do + expect(Ethon::Libc.getdtablesize).to_not be_zero + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/loggable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/loggable_spec.rb new file mode 100644 index 0000000..117cba3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/loggable_spec.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true +require "spec_helper" + +describe Ethon::Loggable do + + describe "#logger=" do + + let(:logger) do + Logger.new($stdout).tap do |log| + log.level = Logger::INFO + end + end + + before do + Ethon.logger = logger + end + + it "sets the logger" do + expect(Ethon.logger).to eq(logger) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/operations_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/operations_spec.rb new file mode 100644 index 0000000..781b62a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/operations_spec.rb @@ -0,0 +1,298 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Operations do + let(:multi) { Ethon::Multi.new } + let(:easy) { Ethon::Easy.new } + let(:pointer) { FFI::MemoryPointer.new(:int) } + + describe "#handle" do + it "returns a pointer" do + expect(multi.handle).to be_a(FFI::Pointer) + end + end + + describe "#running_count" do + context "when hydra has no easy" do + it "returns nil" do + expect(multi.send(:running_count)).to be_nil + end + end + + context "when hydra has easy" do + before do + easy.url = "http://localhost:3001/" + multi.add(easy) + multi.send(:trigger, pointer) + end + + it "returns 1" do + expect(multi.send(:running_count)).to eq(1) + end + end + + context "when hydra has more easys" do + let(:another_easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001/" + another_easy.url = "http://localhost:3001/" + multi.add(easy) + multi.add(another_easy) + multi.send(:trigger, pointer) + end + + it "returns 2" do + expect(multi.send(:running_count)).to eq(2) + end + end + end + + describe "#get_timeout" do + context "when code ok" do + let(:timeout) { 1 } + + before do + expect(Ethon::Curl).to receive(:multi_timeout).and_return(:ok) + multi.instance_variable_set(:@timeout, double(:read_long => timeout)) + end + + it "doesn't raise" do + expect{ multi.send(:get_timeout) }.to_not raise_error + end + + context "when timeout smaller zero" do + let(:timeout) { -1 } + + it "returns 1" do + expect(multi.send(:get_timeout)).to eq(1) + end + end + + context "when timeout bigger or equal zero" do + let(:timeout) { 2 } + + it "returns timeout" do + expect(multi.send(:get_timeout)).to eq(timeout) + end + end + end + + context "when code not ok" do + before { expect(Ethon::Curl).to receive(:multi_timeout).and_return(:not_ok) } + + it "raises MultiTimeout error" do + expect{ multi.send(:get_timeout) }.to raise_error(Ethon::Errors::MultiTimeout) + end + end + end + + describe "#set_fds" do + let(:timeout) { 1 } + let(:max_fd) { 1 } + + context "when code ok" do + before { expect(Ethon::Curl).to receive(:multi_fdset).and_return(:ok) } + + it "doesn't raise" do + expect{ multi.method(:set_fds).call(timeout) }.to_not raise_error + end + + context "when max_fd -1" do + let(:max_fd) { -1 } + + before do + multi.instance_variable_set(:@max_fd, double(:read_int => max_fd)) + expect(multi).to receive(:sleep).with(0.001) + end + + it "waits 100ms" do + multi.method(:set_fds).call(timeout) + end + end + + context "when max_fd not -1" do + context "when code smaller zero" do + before { expect(Ethon::Curl).to receive(:select).and_return(-1) } + + it "raises Select error" do + expect{ multi.method(:set_fds).call(timeout) }.to raise_error(Ethon::Errors::Select) + end + end + + context "when code bigger or equal zero" do + before { expect(Ethon::Curl).to receive(:select).and_return(0) } + + it "doesn't raise" do + expect{ multi.method(:set_fds).call(timeout) }.to_not raise_error + end + end + end + end + + context "when code not ok" do + before { expect(Ethon::Curl).to receive(:multi_fdset).and_return(:not_ok) } + + it "raises MultiFdset error" do + expect{ multi.method(:set_fds).call(timeout) }.to raise_error(Ethon::Errors::MultiFdset) + end + end + end + + describe "#perform" do + context "when no easy handles" do + it "returns nil" do + expect(multi.perform).to be_nil + end + + it "logs" do + expect(Ethon.logger).to receive(:debug).twice + multi.perform + end + end + + context "when easy handle" do + before do + easy.url = "http://localhost:3001/" + multi.add(easy) + end + + it "requests" do + multi.perform + end + + it "sets easy" do + multi.perform + expect(easy.response_code).to eq(200) + end + end + + context "when four easy handles" do + let(:easies) do + ary = [] + 4.times do + ary << another_easy = Ethon::Easy.new + another_easy.url = "http://localhost:3001/" + end + ary + end + + before do + easies.each { |e| multi.add(e) } + multi.perform + end + + it "sets response codes" do + expect(easies.all?{ |e| e.response_code == 200 }).to be_truthy + end + end + end + + describe "#ongoing?" do + context "when easy_handles" do + before { multi.easy_handles << 1 } + + context "when running_count not greater 0" do + before { multi.instance_variable_set(:@running_count, 0) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + + context "when running_count greater 0" do + before { multi.instance_variable_set(:@running_count, 1) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + end + + context "when no easy_handles" do + context "when running_count not greater 0" do + before { multi.instance_variable_set(:@running_count, 0) } + + it "returns false" do + expect(multi.method(:ongoing?).call).to be_falsey + end + end + + context "when running_count greater 0" do + before { multi.instance_variable_set(:@running_count, 1) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + end + end + + describe "#init_vars" do + it "sets @timeout" do + expect(multi.instance_variable_get(:@timeout)).to be_a(FFI::MemoryPointer) + end + + it "sets @timeval" do + expect(multi.instance_variable_get(:@timeval)).to be_a(Ethon::Curl::Timeval) + end + + it "sets @fd_read" do + expect(multi.instance_variable_get(:@fd_read)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @fd_write" do + expect(multi.instance_variable_get(:@fd_write)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @fd_excep" do + expect(multi.instance_variable_get(:@fd_excep)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @max_fd" do + expect(multi.instance_variable_get(:@max_fd)).to be_a(FFI::MemoryPointer) + end + end + + describe "#reset_fds" do + after { multi.method(:reset_fds).call } + + it "resets @fd_read" do + expect(multi.instance_variable_get(:@fd_read)).to receive(:clear) + end + + it "resets @fd_write" do + expect(multi.instance_variable_get(:@fd_write)).to receive(:clear) + end + + it "resets @fd_excep" do + expect(multi.instance_variable_get(:@fd_excep)).to receive(:clear) + end + end + + describe "#check" do + it { skip("untested") } + end + + describe "#run" do + it { skip("untested") } + end + + describe "#trigger" do + it "calls multi perform" do + expect(Ethon::Curl).to receive(:multi_perform) + multi.send(:trigger, pointer) + end + + it "sets running count" do + multi.instance_variable_set(:@running_count, nil) + multi.send(:trigger, pointer) + expect(multi.instance_variable_get(:@running_count)).to_not be_nil + end + + it "returns multi perform code" do + expect(Ethon::Curl).to receive(:multi_perform).and_return(:ok) + expect(multi.send(:trigger, pointer)).to eq(:ok) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/options_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/options_spec.rb new file mode 100644 index 0000000..b4832c6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/options_spec.rb @@ -0,0 +1,182 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Options do + let(:multi) { Ethon::Multi.new } + + [ + :maxconnects, :pipelining, :socketdata, :socketfunction, + :timerdata, :timerfunction, :max_total_connections + ].each do |name| + describe "#{name}=" do + it "responds_to" do + expect(multi).to respond_to("#{name}=") + end + + it "sets option" do + expect(Ethon::Curl).to receive(:set_option).with(name, anything, anything, anything) + multi.method("#{name}=").call(1) + end + end + end + + context "socket_action mode" do + let(:multi) { Ethon::Multi.new(execution_mode: :socket_action) } + + describe "#socketfunction callbacks" do + it "allows multi_code return values" do + calls = [] + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + calls << what + :ok + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect(calls).to eq([]) + 5.times do + multi.socket_action + break unless calls.empty? + sleep 0.1 + end + expect(calls.last).to eq(:in).or(eq(:out)) + multi.delete(easy) + expect(calls.last).to eq(:remove) + end + + it "allows integer return values (compatibility)" do + called = false + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + called = true + 0 + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + 5.times do + multi.socket_action + break if called + sleep 0.1 + end + multi.delete(easy) + + expect(called).to be_truthy + end + + it "errors on invalid return codes" do + called = false + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + called = true + "hi" + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect { + 5.times do + multi.socket_action + break if called + sleep 0.1 + end + }.to raise_error(ArgumentError) + expect { multi.delete(easy) }.to raise_error(ArgumentError) + end + end + + describe "#timerfunction callbacks" do + it "allows multi_code return values" do + calls = [] + multi.timerfunction = proc do |handle, timeout_ms, userp| + calls << timeout_ms + :ok + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect(calls.last).to be >= 0 # adds an immediate timeout + + multi.delete(easy) + expect(calls.last).to eq(-1) # cancels the timer + end + + it "allows integer return values (compatibility)" do + called = false + multi.timerfunction = proc do |handle, timeout_ms, userp| + called = true + 0 + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + multi.socket_action + multi.delete(easy) + + expect(called).to be_truthy + end + + it "errors on invalid return codes" do + called = false + multi.timerfunction = proc do |handle, timeout_ms, userp| + called = true + "hi" + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + expect { multi.add(easy) }.to raise_error(ArgumentError) + end + end + end + + describe "#value_for" do + context "when option in bool" do + context "when value true" do + let(:value) { true } + + it "returns 1" do + expect(multi.method(:value_for).call(value, :bool)).to eq(1) + end + end + + context "when value false" do + let(:value) { false } + + it "returns 0" do + expect(multi.method(:value_for).call(value, :bool)).to eq(0) + end + end + end + + + context "when value in int" do + let(:value) { "2" } + + it "returns value casted to int" do + expect(multi.method(:value_for).call(value, :int)).to eq(2) + end + end + + context "when value in unspecific_options" do + context "when value a string" do + let(:value) { "www.example.\0com" } + + it "returns zero byte escaped string" do + expect(multi.method(:value_for).call(value, nil)).to eq("www.example.\\0com") + end + end + + context "when value not a string" do + let(:value) { 1 } + + it "returns value" do + expect(multi.method(:value_for).call(value, nil)).to eq(1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/stack_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/stack_spec.rb new file mode 100644 index 0000000..5eb5900 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi/stack_spec.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Stack do + let(:multi) { Ethon::Multi.new } + let(:easy) { Ethon::Easy.new } + + describe "#add" do + context "when easy already added" do + before { multi.add(easy) } + + it "returns nil" do + expect(multi.add(easy)).to be_nil + end + end + + context "when easy new" do + it "adds easy to multi" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:ok) + multi.add(easy) + end + + it "adds easy to easy_handles" do + multi.add(easy) + expect(multi.easy_handles).to include(easy) + end + end + + context "when multi_add_handle fails" do + it "raises multi add error" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:bad_easy_handle) + expect{ multi.add(easy) }.to raise_error(Ethon::Errors::MultiAdd) + end + end + + context "when multi cleaned up before" do + it "raises multi add error" do + Ethon::Curl.multi_cleanup(multi.handle) + expect{ multi.add(easy) }.to raise_error(Ethon::Errors::MultiAdd) + end + end + end + + describe "#delete" do + context "when easy in easy_handles" do + before { multi.add(easy) } + + it "deletes easy from multi" do + expect(Ethon::Curl).to receive(:multi_remove_handle).and_return(:ok) + multi.delete(easy) + end + + it "deletes easy from easy_handles" do + multi.delete(easy) + expect(multi.easy_handles).to_not include(easy) + end + end + + context "when easy is not in easy_handles" do + it "does nothing" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:ok) + multi.add(easy) + end + + it "adds easy to easy_handles" do + multi.add(easy) + expect(multi.easy_handles).to include(easy) + end + end + + context "when multi_remove_handle fails" do + before { multi.add(easy) } + + it "raises multi remove error" do + expect(Ethon::Curl).to receive(:multi_remove_handle).and_return(:bad_easy_handle) + expect{ multi.delete(easy) }.to raise_error(Ethon::Errors::MultiRemove) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi_spec.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi_spec.rb new file mode 100644 index 0000000..a483357 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/ethon/multi_spec.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi do + describe ".new" do + it "inits curl" do + expect(Ethon::Curl).to receive(:init) + Ethon::Multi.new + end + + context "with default options" do + it "allows running #perform with the default execution_mode" do + Ethon::Multi.new.perform + end + + it "refuses to run #socket_action" do + expect { Ethon::Multi.new.socket_action }.to raise_error(ArgumentError) + end + end + + context "when options not empty" do + context "when pipelining is set" do + let(:options) { { :pipelining => true } } + + it "sets pipelining" do + expect_any_instance_of(Ethon::Multi).to receive(:pipelining=).with(true) + Ethon::Multi.new(options) + end + end + + context "when execution_mode option is :socket_action" do + let(:options) { { :execution_mode => :socket_action } } + let(:multi) { Ethon::Multi.new(options) } + + it "refuses to run #perform" do + expect { multi.perform }.to raise_error(ArgumentError) + end + + it "allows running #socket_action" do + multi.socket_action + end + end + end + end + + describe "#socket_action" do + let(:options) { { :execution_mode => :socket_action } } + let(:select_state) { { :readers => [], :writers => [], :timeout => 0 } } + let(:multi) { + multi = Ethon::Multi.new(options) + multi.timerfunction = proc do |handle, timeout_ms, userp| + timeout_ms = nil if timeout_ms == -1 + select_state[:timeout] = timeout_ms + :ok + end + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + case what + when :remove + select_state[:readers].delete(sock) + select_state[:writers].delete(sock) + when :in + select_state[:readers].push(sock) unless select_state[:readers].include? sock + select_state[:writers].delete(sock) + when :out + select_state[:readers].delete(sock) + select_state[:writers].push(sock) unless select_state[:writers].include? sock + when :inout + select_state[:readers].push(sock) unless select_state[:readers].include? sock + select_state[:writers].push(sock) unless select_state[:writers].include? sock + else + raise ArgumentError, "invalid value for 'what' in socketfunction callback" + end + :ok + end + multi + } + + def fds_to_ios(fds) + fds.map do |fd| + IO.for_fd(fd).tap { |io| io.autoclose = false } + end + end + + def perform_socket_action_until_complete + multi.socket_action # start things off + + while multi.ongoing? + readers, writers, _ = IO.select( + fds_to_ios(select_state[:readers]), + fds_to_ios(select_state[:writers]), + [], + select_state[:timeout] + ) + + to_notify = Hash.new { |hash, key| hash[key] = [] } + unless readers.nil? + readers.each do |reader| + to_notify[reader] << :in + end + end + unless writers.nil? + writers.each do |writer| + to_notify[writer] << :out + end + end + + to_notify.each do |io, readiness| + multi.socket_action(io, readiness) + end + + # if we didn't have anything to notify, then we timed out + multi.socket_action if to_notify.empty? + end + ensure + multi.easy_handles.dup.each do |h| + multi.delete(h) + end + end + + it "supports an end-to-end request" do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/" + multi.add(easy) + + perform_socket_action_until_complete + + expect(multi.ongoing?).to eq(false) + end + + it "supports multiple concurrent requests" do + handles = [] + 10.times do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + handles << easy + end + + start = Time.now + perform_socket_action_until_complete + duration = Time.now - start + + # these should have happened concurrently + expect(duration).to be < 2 + expect(multi.ongoing?).to eq(false) + + handles.each do |handle| + expect(handle.response_code).to eq(200) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/spec_helper.rb new file mode 100644 index 0000000..55bc9ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/spec_helper.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), "..", "lib")) + +require 'bundler' +Bundler.setup +require "ethon" +require 'rspec' + +if defined? require_relative + require_relative 'support/localhost_server' + require_relative 'support/server' +else + require 'support/localhost_server' + require 'support/server' +end + +# Ethon.logger = Logger.new($stdout).tap do |log| +# log.level = Logger::DEBUG +# end + +RSpec.configure do |config| + # config.order = :rand + + config.before(:suite) do + LocalhostServer.new(TESTSERVER.new, 3001) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/localhost_server.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/localhost_server.rb new file mode 100644 index 0000000..a7b119a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/localhost_server.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true +require 'rack' +require 'rack/handler/webrick' +require 'net/http' + +# The code for this is inspired by Capybara's server: +# http://github.com/jnicklas/capybara/blob/0.3.9/lib/capybara/server.rb +class LocalhostServer + READY_MESSAGE = "Server ready" + + class Identify + def initialize(app) + @app = app + end + + def call(env) + if env["PATH_INFO"] == "/__identify__" + [200, {}, [LocalhostServer::READY_MESSAGE]] + else + @app.call(env) + end + end + end + + attr_reader :port + + def initialize(rack_app, port = nil) + @port = port || find_available_port + @rack_app = rack_app + concurrently { boot } + wait_until(10, "Boot failed.") { booted? } + end + + private + + def find_available_port + server = TCPServer.new('127.0.0.1', 0) + server.addr[1] + ensure + server.close if server + end + + def boot + # Use WEBrick since it's part of the ruby standard library and is available on all ruby interpreters. + options = { :Port => port } + options.merge!(:AccessLog => [], :Logger => WEBrick::BasicLog.new(StringIO.new)) unless ENV['VERBOSE_SERVER'] + Rack::Handler::WEBrick.run(Identify.new(@rack_app), **options) + end + + def booted? + res = ::Net::HTTP.get_response("localhost", '/__identify__', port) + if res.is_a?(::Net::HTTPSuccess) or res.is_a?(::Net::HTTPRedirection) + return res.body == READY_MESSAGE + end + rescue Errno::ECONNREFUSED, Errno::EBADF + return false + end + + def concurrently + if should_use_subprocess? + pid = Process.fork do + trap(:INT) { ::Rack::Handler::WEBrick.shutdown } + yield + exit # manually exit; otherwise this sub-process will re-run the specs that haven't run yet. + end + + at_exit do + Process.kill('INT', pid) + begin + Process.wait(pid) + rescue Errno::ECHILD + # ignore this error...I think it means the child process has already exited. + end + end + else + Thread.new { yield } + end + end + + def should_use_subprocess? + # !ENV['THREADED'] + false + end + + def wait_until(timeout, error_message, &block) + start_time = Time.now + + while true + return if yield + raise TimeoutError.new(error_message) if (Time.now - start_time) > timeout + sleep(0.05) + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/server.rb b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/server.rb new file mode 100644 index 0000000..ab8ffd3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ethon-0.15.0/spec/support/server.rb @@ -0,0 +1,115 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true +require 'json' +require 'zlib' +require 'sinatra/base' + +TESTSERVER = Sinatra.new do + set :logging, nil + + fail_count = 0 + + post '/file' do + { + 'content-type' => params[:file][:type], + 'filename' => params[:file][:filename], + 'content' => params[:file][:tempfile].read, + 'request-content-type' => request.env['CONTENT_TYPE'] + }.to_json + end + + get '/multiple-headers' do + [200, { 'Set-Cookie' => %w[ foo bar ], 'Content-Type' => 'text/plain' }, ['']] + end + + get '/fail/:number' do + if fail_count >= params[:number].to_i + "ok" + else + fail_count += 1 + error 500, "oh noes!" + end + end + + get '/fail_forever' do + error 500, "oh noes!" + end + + get '/redirect' do + redirect '/' + end + + post '/redirect' do + redirect '/' + end + + get '/bad_redirect' do + redirect '/bad_redirect' + end + + get '/auth_basic/:username/:password' do + @auth ||= Rack::Auth::Basic::Request.new(request.env) + # Check that we've got a basic auth, and that it's credentials match the ones + # provided in the request + if @auth.provided? && @auth.basic? && @auth.credentials == [ params[:username], params[:password] ] + # auth is valid - confirm it + true + else + # invalid auth - request the authentication + response['WWW-Authenticate'] = %(Basic realm="Testing HTTP Auth") + throw(:halt, [401, "Not authorized\n"]) + end + end + + get '/auth_ntlm' do + # we're just checking for the existence if NTLM auth header here. It's validation + # is too troublesome and really doesn't bother is much, it's up to libcurl to make + # it valid + response['WWW-Authenticate'] = 'NTLM' + is_ntlm_auth = /^NTLM/ =~ request.env['HTTP_AUTHORIZATION'] + true if is_ntlm_auth + throw(:halt, [401, "Not authorized\n"]) if !is_ntlm_auth + end + + get '/gzipped' do + req_env = request.env.to_json + z = Zlib::Deflate.new + gzipped_env = z.deflate(req_env, Zlib::FINISH) + z.close + response['Content-Encoding'] = 'gzip' + gzipped_env + end + + get '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + request.env.merge!(:body => request.body.read).to_json + end + + head '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + end + + put '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + post '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + delete '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + patch '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + options '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + route 'PURGE', '/**' do + request.env.merge!(:body => request.body.read).to_json + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.appveyor.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.appveyor.yml new file mode 100644 index 0000000..4e61390 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.appveyor.yml @@ -0,0 +1,30 @@ +install: + - SET PATH=C:\Ruby%RUBYVER%\bin;%PATH% + - SET RAKEOPT=-rdevkit + - ps: | + if ($env:RUBYVER -like "*head*") { + $(new-object net.webclient).DownloadFile("https://github.com/oneclick/rubyinstaller2/releases/download/rubyinstaller-head/rubyinstaller-$env:RUBYVER.exe", "$pwd/ruby-setup.exe") + cmd /c ruby-setup.exe /verysilent /dir=C:/Ruby$env:RUBYVER + } + - ridk version + - gem --version + - gem install bundler --quiet --no-document + - bundle install + # Update to libffi-3.3 since Appveyor version fails on LongDouble specs + - ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi mingw-w64-i686-libffi +build: off +build_script: + - bundle exec rake libffi compile -- %EXTCONFOPTS% || bundle exec rake compile -- %EXTCONFOPTS% +test_script: + - bundle exec rake test + - bundle exec rake types_conf && git --no-pager diff +environment: + matrix: + - RUBYVER: "head-x64" + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 24 + EXTCONFOPTS: "--disable-system-libffi" + - RUBYVER: 25-x64 + EXTCONFOPTS: "--enable-system-libffi" + - RUBYVER: 26 + EXTCONFOPTS: "--enable-system-libffi" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.github/workflows/ci.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.github/workflows/ci.yml new file mode 100644 index 0000000..ee7d068 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.github/workflows/ci.yml @@ -0,0 +1,64 @@ +name: CI +on: [push, pull_request] +jobs: + system-libffi: + # Run on latest MRI with explicit selection of system or builtin libffi + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + extconfopts: [ --disable-system-libffi, --enable-system-libffi ] + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: 2.7 + + - run: brew install automake libffi pkg-config + if: matrix.os == 'macos' + - run: ridk exec pacman --sync --refresh --needed --noconfirm mingw-w64-x86_64-libffi + if: matrix.os == 'windows' && matrix.extconfopts == '--enable-system-libffi' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile -- ${{ matrix.extconfopts }} + env: + # work around misconfiguration of libffi on MacOS with homebrew + PKG_CONFIG_PATH: ${{ env.PKG_CONFIG_PATH }}:/usr/local/opt/libffi/lib/pkgconfig + - run: bundle exec rake test + - run: bundle exec rake types_conf && git --no-pager diff + + specs: + # Run all specs on all ruby implementations + # Use automatic libffi selection on MRI + strategy: + fail-fast: false + matrix: + os: [ ubuntu, macos, windows ] + ruby: [ 2.3, 2.4, 2.5, 2.6, 2.7, ruby-head, truffleruby-head ] + exclude: + - os: windows + ruby: truffleruby-head + - os: windows + ruby: 2.3 # compilation fails + runs-on: ${{ matrix.os }}-latest + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + + - run: brew install automake + if: matrix.os == 'macos' + + - run: bundle install + - run: bundle exec rake libffi + - run: bundle exec rake compile + + - run: bundle exec rake test + + - run: bundle exec rake bench:all + if: matrix.ruby != 'truffleruby-head' + env: + ITER: 10 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitignore b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitignore new file mode 100644 index 0000000..1d1f393 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitignore @@ -0,0 +1,25 @@ +doc/ +bin/ +.yardoc +*.orig +nbproject/private +pkg +*.orig +*.rej +*.patch +*.diff +build +*.so +*.[oa] +core +lib/ffi/types.conf +lib/ffi_c.bundle +lib/ffi_c.so +vendor +.bundle +Gemfile.lock +types_log +*.gem +embed-test.log +spec/ffi/embed-test/ext/Makefile +spec/ffi/embed-test/ext/embed_test.bundle \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitmodules b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitmodules new file mode 100644 index 0000000..1d6cbc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.gitmodules @@ -0,0 +1,4 @@ +[submodule "ext/ffi_c/libffi"] + path = ext/ffi_c/libffi + url = https://github.com/libffi/libffi.git + branch = master diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.travis.yml new file mode 100644 index 0000000..52f8913 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.travis.yml @@ -0,0 +1,58 @@ +dist: trusty +group: beta +language: ruby +git: + submodules: false + +script: + - bundle exec rake libffi + - bundle exec rake compile + - bundle exec rake test + - | + if [[ $(ruby -v) != *truffleruby* ]]; then + ITER=10 bundle exec rake bench:all + fi + - bundle exec rake types_conf && git --no-pager diff +os: + - linux + - osx +rvm: + - 2.3.8 + - 2.4.6 + - 2.5.5 + - 2.6.5 + - 2.7.0 + - ruby-head + - truffleruby-head + +env: + - CC=gcc + - CC=clang +matrix: + allow_failures: + - os: osx + rvm: ruby-head + - os: osx + rvm: 2.3.8 + - os: linux + rvm: ruby-head + include: + - name: powerpc + language: generic + before_install: | + docker run --rm --privileged multiarch/qemu-user-static:register --reset && + docker build --rm -t ffi-powerpc -f spec/env/Dockerfile.powerpc . + script: | + docker run --rm -t -v `pwd`:/ffi ffi-powerpc + - name: armhf + language: generic + before_install: | + docker run --rm --privileged multiarch/qemu-user-static:register --reset && + docker build --rm -t ffi-armhf -f spec/env/Dockerfile.armhf . + script: | + docker run --rm -t -v `pwd`:/ffi ffi-armhf + exclude: + - os: osx + rvm: truffleruby-head +after_failure: + - "find build -name mkmf.log | xargs cat" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.yardopts b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.yardopts new file mode 100644 index 0000000..8ef32b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/.yardopts @@ -0,0 +1,5 @@ +--title "Ruby FFI" +--charset UTF-8 +--private +lib/**/*.rb +ext/**/*.c \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/CHANGELOG.md new file mode 100644 index 0000000..f78b870 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/CHANGELOG.md @@ -0,0 +1,222 @@ +1.13.1 / 2020-06-09 +------------------- + +Changed: +* Revert use of `ucrtbase.dll` as default C library on Windows-MINGW. + `ucrtbase.dll` is still used on MSWIN target. #790 +* Test for `ffi_prep_closure_loc()` to make sure we can use this function. + This fixes incorrect use of system libffi on MacOS Mojave (10.14). #787 +* Update types.conf on x86_64-dragonflybsd + + +1.13.0 / 2020-06-01 +------------------- + +Added: +* Add TruffleRuby support. Almost all specs are running on TruffleRuby and succeed. #768 +* Add ruby source files to the java gem. This allows to ship the Ruby library code per platform java gem and add it as a default gem to JRuby. #763 +* Add FFI::Platform::LONG_DOUBLE_SIZE +* Add bounds checks for writing to an inline char[] . #756 +* Add long double as callback return value. #771 +* Update type definitions and add types from stdint.h and stddef.h on i386-windows, x86_64-windows, x86_64-darwin, x86_64-linux, arm-linux, powerpc-linux. #749 +* Add new type definitions for powerpc-openbsd and sparcv9-openbsd. #775, #778 + +Changed: +* Raise required ruby version to >= 2.3. +* Lots of cleanups and improvements in library, specs and benchmarks. +* Fix a lot of compiler warnings at the C-extension +* Fix several install issues on MacOS: + * Look for libffi in SDK paths, since recent versions of macOS removed it from `/usr/include` . #757 + * Fix error `ld: library not found for -lgcc_s.10.4` + * Don't built for i386 architecture as it is deprecated +* Several fixes for MSVC build on Windows. #779 +* Use `ucrtbase.dll` as default C library on Windows instead of old `msvcrt.dll`. #779 +* Update builtin libffi to fix a Powerpc issue with parameters of type long +* Allow unmodified sourcing of (the ruby code of) this gem in JRuby and TruffleRuby as a default gem. #747 +* Improve check to detect if a module has a #find_type method suitable for FFI. This fixes compatibility with stdlib `mkmf` . #776 + +Removed: +* Reject callback with `:string` return type at definition, because it didn't work so far and is not save to use. #751, #782 + + +1.12.2 / 2020-02-01 +------------------- + +* Fix possible segfault at FFI::Struct#[] and []= after GC.compact . #742 + + +1.12.1 / 2020-01-14 +------------------- + +Added: +* Add binary gem support for ruby-2.7 on Windows + + +1.12.0 / 2020-01-14 +------------------- + +Added: +* FFI::VERSION is defined as part of `require 'ffi'` now. + It is no longer necessary to `require 'ffi/version'` . + +Changed: +* Update libffi to latest master. + +Deprecated: +* Overwriting struct layouts is now warned and will be disallowed in ffi-2.0. #734, #735 + + +1.11.3 / 2019-11-25 +------------------- + +Removed: +* Remove support for tainted objects which cause deprecation warnings in ruby-2.7. #730 + + +1.11.2 / 2019-11-11 +------------------- + +Added: +* Add DragonFlyBSD as a platform. #724 + +Changed: +* Sort all types.conf files, so that files and changes are easier to compare. +* Regenerated type conf for freebsd12 and x86_64-linux targets. #722 +* Remove MACOSX_DEPLOYMENT_TARGET that was targeting very old version 10.4. #647 +* Fix library name mangling for non glibc Linux/UNIX. #727 +* Fix compiler warnings raised by ruby-2.7 +* Update libffi to latest master. + + +1.11.1 / 2019-05-20 +------------------- + +Changed: +* Raise required ruby version to >=2.0. #699, #700 +* Fix a possible linker error on ruby < 2.3 on Linux. + + +1.11.0 / 2019-05-17 +------------------- +This version was yanked on 2019-05-20 to fix an install issue on ruby-1.9.3. #700 + +Added: +* Add ability to disable or force use of system libffi. #669 + Use like `gem inst ffi -- --enable-system-libffi` . +* Add ability to call FFI callbacks from outside of FFI call frame. #584 +* Add proper documentation to FFI::Generator and ::Task +* Add gemspec metadata. #696, #698 + +Changed: +* Fix stdcall on Win32. #649, #669 +* Fix load paths for FFI::Generator::Task +* Fix FFI::Pointer#read_string(0) to return a binary String. #692 +* Fix benchmark suite so that it runs on ruby-2.x +* Move FFI::Platform::CPU from C to Ruby. #663 +* Move FFI::StructByReference to Ruby. #681 +* Move FFI::DataConverter to Ruby (#661) +* Various cleanups and improvements of specs and benchmarks + +Removed: +* Remove ruby-1.8 and 1.9 compatibility code. #683 +* Remove unused spec files. #684 + + +1.10.0 / 2019-01-06 +------------------- + +Added: +* Add /opt/local/lib/ to ffi's fallback library search path. #638 +* Add binary gem support for ruby-2.6 on Windows +* Add FreeBSD on AArch64 and ARM support. #644 +* Add FFI::LastError.winapi_error on Windows native or Cygwin. #633 + +Changed: +* Update to rake-compiler-dock-0.7.0 +* Use 64-bit inodes on FreeBSD >= 12. #644 +* Switch time_t and suseconds_t types to long on FreeBSD. #627 +* Make register_t long_long on 64-bit FreeBSD. #644 +* Fix Pointer#write_array_of_type #637 + +Removed: +* Drop binary gem support for ruby-2.0 and 2.1 on Windows + + +1.9.25 / 2018-06-03 +------------------- + +Changed: +* Revert closures via libffi. + This re-adds ClosurePool and fixes compat with SELinux enabled systems. #621 + + +1.9.24 / 2018-06-02 +------------------- + +Security Note: + +This update addresses vulnerability CVE-2018-1000201: DLL loading issue which can be hijacked on Windows OS, when a Symbol is used as DLL name instead of a String. Found by Matthew Bush. + +Added: +* Added a CHANGELOG file +* Add mips64(eb) support, and mips r6 support. (#601) + +Changed: +* Update libffi to latest changes on master. +* Don't search in hardcoded /usr paths on Windows. +* Don't treat Symbol args different to Strings in ffi_lib. +* Make sure size_t is defined in Thread.c. Fixes #609 + + +1.9.23 / 2018-02-25 +------------------- + +Changed: +* Fix unnecessary rebuild of configure in darwin multi arch. Fixes #605 + + +1.9.22 / 2018-02-22 +------------------- + +Changed: +* Update libffi to latest changes on master. +* Update detection of system libffi to match new requirements. Fixes #617 +* Prefer bundled libffi over system libffi on Mac OS. +* Do closures via libffi. This removes ClosurePool and fixes compat with PaX. #540 +* Use a more deterministic gem packaging. +* Fix unnecessary update of autoconf files at gem install. + + +1.9.21 / 2018-02-06 +------------------- + +Added: +* Ruby-2.5 support by Windows binary gems. Fixes #598 +* Add missing win64 types. +* Added support for Bitmask. (#573) +* Add support for MSYS2 (#572) and Sparc64 Linux. (#574) + +Changed: +* Fix read_string to not throw an error on length 0. +* Don't use absolute paths for sh and env. Fixes usage on Adroid #528 +* Use Ruby implementation for `which` for better compat with Windows. Fixes #315 +* Fix compatibility with PPC64LE platform. (#577) +* Normalize sparc64 to sparcv9. (#575) + +Removed: +* Drop Ruby 1.8.7 support (#480) + + +1.9.18 / 2017-03-03 +------------------- + +Added: +* Add compatibility with Ruby-2.4. + +Changed: +* Add missing shlwapi.h include to fix Windows build. +* Avoid undefined behaviour of LoadLibrary() on Windows. #553 + + +1.9.17 / 2017-01-13 +------------------- diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/COPYING b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/COPYING new file mode 100644 index 0000000..7622318 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/COPYING @@ -0,0 +1,49 @@ +Copyright (c) 2008-2013, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +libffi, used by this project, is licensed under the MIT license: + +libffi - Copyright (c) 1996-2011 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Gemfile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Gemfile new file mode 100644 index 0000000..ec88154 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Gemfile @@ -0,0 +1,17 @@ +source 'https://rubygems.org' + +group :development do + gem 'rake', '~> 13.0' + gem 'rake-compiler', '~> 1.0.3' + gem 'rake-compiler-dock', '~> 1.0' + gem 'rspec', '~> 3.0' + # irb is a dependency of rubygems-tasks 0.2.5. + # irb versions > 1.1.1 depend on reline, + # which sometimes causes 'bundle install' to fail on Ruby <= 2.4: https://github.com/rubygems/rubygems/issues/3463 + gem 'rubygems-tasks', '>= 0.2', '< 0.2.5', :require => 'rubygems/tasks' +end + +group :doc do + gem 'kramdown' + gem 'yard', '~> 0.9' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE new file mode 100644 index 0000000..20185fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2008-2016, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE.SPECS b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE.SPECS new file mode 100644 index 0000000..5c9ffce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/LICENSE.SPECS @@ -0,0 +1,22 @@ +Copyright (c) 2008-2012 Ruby-FFI contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/README.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/README.md new file mode 100644 index 0000000..326ed47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/README.md @@ -0,0 +1,124 @@ +# Ruby-FFI https://github.com/ffi/ffi/wiki [![Build Status](https://travis-ci.org/ffi/ffi.svg?branch=master)](https://travis-ci.org/ffi/ffi) [![Build status Windows](https://ci.appveyor.com/api/projects/status/r8wxn1sd4s794gg1/branch/master?svg=true)](https://ci.appveyor.com/project/larskanis/ffi-aofqa/branch/master) + +## Description + +Ruby-FFI is a gem for programmatically loading dynamically-linked native +libraries, binding functions within them, and calling those functions +from Ruby code. Moreover, a Ruby-FFI extension works without changes +on CRuby (MRI), JRuby, Rubinius and TruffleRuby. [Discover why you should write your next extension +using Ruby-FFI](https://github.com/ffi/ffi/wiki/why-use-ffi). + +## Features + +* Intuitive DSL +* Supports all C native types +* C structs (also nested), enums and global variables +* Callbacks from C to Ruby +* Automatic garbage collection of native memory + +## Synopsis + +```ruby +require 'ffi' + +module MyLib + extend FFI::Library + ffi_lib 'c' + attach_function :puts, [ :string ], :int +end + +MyLib.puts 'Hello, World using libc!' +``` + +For less minimalistic and more examples you may look at: + +* the `samples/` folder +* the examples on the [wiki](https://github.com/ffi/ffi/wiki) +* the projects using FFI listed on the wiki: https://github.com/ffi/ffi/wiki/projects-using-ffi + +## Requirements + +When installing the gem on CRuby (MRI), you will need: +* A C compiler (e.g., Xcode on macOS, `gcc` or `clang` on everything else) +Optionally (speeds up installation): +* The `libffi` library and development headers - this is commonly in the `libffi-dev` or `libffi-devel` packages + +The ffi gem comes with a builtin libffi version, which is used, when the system libffi library is not available or too old. +Use of the system libffi can be enforced by: +``` +gem install ffi -- --enable-system-libffi # to install the gem manually +bundle config build.ffi --enable-system-libffi # for bundle install +``` +or prevented by `--disable-system-libffi`. + +On Linux systems running with [PaX](https://en.wikipedia.org/wiki/PaX) (Gentoo, Alpine, etc.), FFI may trigger `mprotect` errors. You may need to disable [mprotect](https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options#Restrict_mprotect.28.29) for ruby (`paxctl -m [/path/to/ruby]`) for the time being until a solution is found. + +On FreeBSD systems pkgconf must be installed for the gem to be able to compile using clang. Install either via packages `pkg install pkgconf` or from ports via `devel/pkgconf`. + +On JRuby and TruffleRuby, there are no requirements to install the FFI gem, and `require 'ffi'` works even without installing the gem (i.e., the gem is preinstalled on these implementations). + +## Installation + +From rubygems: + + [sudo] gem install ffi + +or from the git repository on github: + + git clone git://github.com/ffi/ffi.git + git submodule update --init --recursive + cd ffi + rake install + +## License + +The ffi library is covered by the BSD license, also see the LICENSE file. +The specs are covered by the same license as [ruby/spec](https://github.com/ruby/spec), the MIT license. + +## Credits + +The following people have submitted code, bug reports, or otherwise contributed to the success of this project: + +* Alban Peignier +* Aman Gupta +* Andrea Fazzi +* Andreas Niederl +* Andrew Cholakian +* Antonio Terceiro +* Benoit Daloze +* Brian Candler +* Brian D. Burns +* Bryan Kearney +* Charlie Savage +* Chikanaga Tomoyuki +* Hongli Lai +* Ian MacLeod +* Jake Douglas +* Jean-Dominique Morani +* Jeremy Hinegardner +* Jesús García Sáez +* Joe Khoobyar +* Jurij Smakov +* KISHIMOTO, Makoto +* Kim Burgestrand +* Lars Kanis +* Luc Heinrich +* Luis Lavena +* Matijs van Zuijlen +* Matthew King +* Mike Dalessio +* NARUSE, Yui +* Park Heesob +* Shin Yee +* Stephen Bannasch +* Suraj N. Kurapati +* Sylvain Daubert +* Victor Costan +* beoran@gmail.com +* ctide +* emboss +* hobophobe +* meh +* postmodern +* wycats@gmail.com +* Wayne Meissner diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Rakefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Rakefile new file mode 100644 index 0000000..ce27f29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/Rakefile @@ -0,0 +1,177 @@ +require 'rubygems/tasks' +require 'rbconfig' +require 'rake/clean' +require_relative "lib/ffi/version" + +require 'date' +require 'fileutils' +require 'rbconfig' +require 'rspec/core/rake_task' +require 'rubygems/package_task' + +BUILD_DIR = "build" +BUILD_EXT_DIR = File.join(BUILD_DIR, "#{RbConfig::CONFIG['arch']}", 'ffi_c', RUBY_VERSION) + +def gem_spec + @gem_spec ||= Gem::Specification.load('ffi.gemspec') +end + +RSpec::Core::RakeTask.new(:spec => :compile) do |config| + config.rspec_opts = YAML.load_file 'spec/spec.opts' +end + +desc "Build all packages" +task :package => %w[ gem:java gem:windows ] + +CLOBBER.include 'lib/ffi/types.conf' +CLOBBER.include 'pkg' +CLOBBER.include 'log' + +CLEAN.include 'build' +CLEAN.include 'conftest.dSYM' +CLEAN.include 'spec/ffi/fixtures/libtest.{dylib,so,dll}' +CLEAN.include 'spec/ffi/fixtures/*.o' +CLEAN.include 'spec/ffi/embed-test/ext/*.{o,def}' +CLEAN.include 'spec/ffi/embed-test/ext/Makefile' +CLEAN.include "pkg/ffi-*-{mingw32,java}" +CLEAN.include 'lib/1.*' +CLEAN.include 'lib/2.*' + +task :distclean => :clobber + +desc "Test the extension" +task :test => [ :spec ] + + +namespace :bench do + ITER = ENV['ITER'] ? ENV['ITER'].to_i : 100000 + bench_files = Dir["bench/bench_*.rb"].sort.reject { |f| f == "bench/bench_helper.rb" } + bench_files.each do |bench| + task File.basename(bench, ".rb")[6..-1] => :compile do + sh %{#{Gem.ruby} #{bench} #{ITER}} + end + end + task :all => :compile do + bench_files.each do |bench| + sh %{#{Gem.ruby} #{bench}} + end + end +end + +task 'spec:run' => :compile +task 'spec:specdoc' => :compile + +task :default => :spec + +namespace 'java' do + + java_gem_spec = gem_spec.dup.tap do |s| + s.files.reject! { |f| File.fnmatch?("ext/*", f) } + s.extensions = [] + s.platform = 'java' + end + + Gem::PackageTask.new(java_gem_spec) do |pkg| + pkg.need_zip = true + pkg.need_tar = true + pkg.package_dir = 'pkg' + end +end + +task 'gem:java' => 'java:gem' + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'rake/extensiontask' + Rake::ExtensionTask.new('ffi_c', gem_spec) do |ext| + ext.name = 'ffi_c' # indicate the name of the extension. + # ext.lib_dir = BUILD_DIR # put binaries into this folder. + ext.tmp_dir = BUILD_DIR # temporary folder used during compilation. + ext.cross_compile = true # enable cross compilation (requires cross compile toolchain) + ext.cross_platform = %w[i386-mingw32 x64-mingw32] # forces the Windows platform instead of the default one + ext.cross_compiling do |spec| + spec.files.reject! { |path| File.fnmatch?('ext/*', path) } + end + end + + # To reduce the gem file size strip mingw32 dlls before packaging + ENV['RUBY_CC_VERSION'].to_s.split(':').each do |ruby_version| + task "build/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" do |t| + sh "i686-w64-mingw32-strip -S build/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" + end + + task "build/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" do |t| + sh "x86_64-w64-mingw32-strip -S build/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/ffi_c.so" + end + end +else + task :compile do + STDERR.puts "Nothing to compile on #{RUBY_ENGINE}" + end +end + +desc "build a windows gem without all the ceremony" +task "gem:windows" do + require "rake_compiler_dock" + sh "bundle package" + RakeCompilerDock.sh "sudo apt-get update && sudo apt-get install -y libltdl-dev && bundle --local && rake cross native gem MAKE='nice make -j`nproc`' RUBY_CC_VERSION=${RUBY_CC_VERSION/:2.2.2/}" +end + +directory "ext/ffi_c/libffi" +file "ext/ffi_c/libffi/autogen.sh" => "ext/ffi_c/libffi" do + warn "Downloading libffi ..." + sh "git submodule update --init --recursive" +end +task :libffi => "ext/ffi_c/libffi/autogen.sh" + +LIBFFI_GIT_FILES = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + +# Generate files in gemspec but not in libffi's git repo by running autogen.sh +gem_spec.files.select do |f| + f =~ /ext\/ffi_c\/libffi\/(.*)/ && !LIBFFI_GIT_FILES.include?($1) +end.each do |f| + file f => "ext/ffi_c/libffi/autogen.sh" do + chdir "ext/ffi_c/libffi" do + sh "sh ./autogen.sh" + end + touch f + if gem_spec.files != Gem::Specification.load('./ffi.gemspec').files + warn "gemspec files have changed -> Please restart rake!" + exit 1 + end + end +end + +require_relative "lib/ffi/platform" +types_conf = File.expand_path(File.join(FFI::Platform::CONF_DIR, 'types.conf')) +logfile = File.join(File.dirname(__FILE__), 'types_log') + +task types_conf do |task| + require 'fileutils' + require_relative "lib/ffi/tools/types_generator" + options = {} + FileUtils.mkdir_p(File.dirname(task.name), mode: 0755 ) + File.open(task.name, File::CREAT|File::TRUNC|File::RDWR, 0644) do |f| + f.puts FFI::TypesGenerator.generate(options) + end + File.open(logfile, 'w') do |log| + log.puts(types_conf) + end +end + +desc "Create or update type information for platform #{FFI::Platform::NAME}" +task :types_conf => types_conf + +Gem::Tasks.new do |t| + t.scm.tag.format = '%s' +end + +begin + require 'yard' + + namespace :doc do + YARD::Rake::YardocTask.new do |yard| + end + end +rescue LoadError + warn "[warn] YARD unavailable" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c new file mode 100644 index 0000000..f25c709 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.c @@ -0,0 +1,1105 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Jake Douglas + * Copyright (C) 2008 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif + +#include +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Function.h" +#include "LongDouble.h" + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +static inline char* memory_address(VALUE self); +VALUE rbffi_AbstractMemoryClass = Qnil; +static VALUE NullPointerErrorClass = Qnil; +static ID id_to_ptr = 0, id_plus = 0, id_call = 0; + +static VALUE +memory_allocate(VALUE klass) +{ + AbstractMemory* memory; + VALUE obj; + obj = Data_Make_Struct(klass, AbstractMemory, NULL, -1, memory); + memory->flags = MEM_RD | MEM_WR; + + return obj; +} +#define VAL(x, swap) (unlikely(((memory->flags & MEM_SWAP) != 0)) ? swap((x)) : (x)) + +#define NUM_OP(name, type, toNative, fromNative, swap) \ +static void memory_op_put_##name(AbstractMemory* memory, long off, VALUE value); \ +static void \ +memory_op_put_##name(AbstractMemory* memory, long off, VALUE value) \ +{ \ + type tmp = (type) VAL(toNative(value), swap); \ + checkWrite(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(memory->address + off, &tmp, sizeof(tmp)); \ +} \ +static VALUE memory_put_##name(VALUE self, VALUE offset, VALUE value); \ +static VALUE \ +memory_put_##name(VALUE self, VALUE offset, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, NUM2LONG(offset), value); \ + return self; \ +} \ +static VALUE memory_write_##name(VALUE self, VALUE value); \ +static VALUE \ +memory_write_##name(VALUE self, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, 0, value); \ + return self; \ +} \ +static VALUE memory_op_get_##name(AbstractMemory* memory, long off); \ +static VALUE \ +memory_op_get_##name(AbstractMemory* memory, long off) \ +{ \ + type tmp; \ + checkRead(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(&tmp, memory->address + off, sizeof(tmp)); \ + return fromNative(VAL(tmp, swap)); \ +} \ +static VALUE memory_get_##name(VALUE self, VALUE offset); \ +static VALUE \ +memory_get_##name(VALUE self, VALUE offset) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, NUM2LONG(offset)); \ +} \ +static VALUE memory_read_##name(VALUE self); \ +static VALUE \ +memory_read_##name(VALUE self) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, 0); \ +} \ +static MemoryOp memory_op_##name = { memory_op_get_##name, memory_op_put_##name }; \ +\ +static VALUE memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary); \ +static VALUE \ +memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary) \ +{ \ + long count = RARRAY_LEN(ary); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + long i; \ + checkWrite(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; i++) { \ + type tmp = (type) VAL(toNative(RARRAY_PTR(ary)[i]), swap); \ + memcpy(memory->address + off + (i * sizeof(type)), &tmp, sizeof(tmp)); \ + } \ + return self; \ +} \ +static VALUE memory_write_array_of_##name(VALUE self, VALUE ary); \ +static VALUE \ +memory_write_array_of_##name(VALUE self, VALUE ary) \ +{ \ + return memory_put_array_of_##name(self, INT2FIX(0), ary); \ +} \ +static VALUE memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length); \ +static VALUE \ +memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length) \ +{ \ + long count = NUM2LONG(length); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + VALUE retVal = rb_ary_new2(count); \ + long i; \ + checkRead(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; ++i) { \ + type tmp; \ + memcpy(&tmp, memory->address + off + (i * sizeof(type)), sizeof(tmp)); \ + rb_ary_push(retVal, fromNative(VAL(tmp, swap))); \ + } \ + return retVal; \ +} \ +static VALUE memory_read_array_of_##name(VALUE self, VALUE length); \ +static VALUE \ +memory_read_array_of_##name(VALUE self, VALUE length) \ +{ \ + return memory_get_array_of_##name(self, INT2FIX(0), length); \ +} + +#define NOSWAP(x) (x) +#define bswap16(x) (((x) >> 8) & 0xff) | (((x) << 8) & 0xff00); +static inline int16_t +SWAPS16(int16_t x) +{ + return bswap16(x); +} + +static inline uint16_t +SWAPU16(uint16_t x) +{ + return bswap16(x); +} + +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#define bswap32(x) \ + (((x << 24) & 0xff000000) | \ + ((x << 8) & 0x00ff0000) | \ + ((x >> 8) & 0x0000ff00) | \ + ((x >> 24) & 0x000000ff)) + +#define bswap64(x) \ + (((x << 56) & 0xff00000000000000ULL) | \ + ((x << 40) & 0x00ff000000000000ULL) | \ + ((x << 24) & 0x0000ff0000000000ULL) | \ + ((x << 8) & 0x000000ff00000000ULL) | \ + ((x >> 8) & 0x00000000ff000000ULL) | \ + ((x >> 24) & 0x0000000000ff0000ULL) | \ + ((x >> 40) & 0x000000000000ff00ULL) | \ + ((x >> 56) & 0x00000000000000ffULL)) + +static inline int32_t +SWAPS32(int32_t x) +{ + return bswap32(x); +} + +static inline uint32_t +SWAPU32(uint32_t x) +{ + return bswap32(x); +} + +static inline int64_t +SWAPS64(int64_t x) +{ + return bswap64(x); +} + +static inline uint64_t +SWAPU64(uint64_t x) +{ + return bswap64(x); +} + +#else +# define SWAPS32(x) ((int32_t) __builtin_bswap32(x)) +# define SWAPU32(x) ((uint32_t) __builtin_bswap32(x)) +# define SWAPS64(x) ((int64_t) __builtin_bswap64(x)) +# define SWAPU64(x) ((uint64_t) __builtin_bswap64(x)) +#endif + +#if LONG_MAX > INT_MAX +# define SWAPSLONG SWAPS64 +# define SWAPULONG SWAPU64 +#else +# define SWAPSLONG SWAPS32 +# define SWAPULONG SWAPU32 +#endif + +NUM_OP(int8, int8_t, NUM2INT, INT2NUM, NOSWAP); +NUM_OP(uint8, uint8_t, NUM2UINT, UINT2NUM, NOSWAP); +NUM_OP(int16, int16_t, NUM2INT, INT2NUM, SWAPS16); +NUM_OP(uint16, uint16_t, NUM2UINT, UINT2NUM, SWAPU16); +NUM_OP(int32, int32_t, NUM2INT, INT2NUM, SWAPS32); +NUM_OP(uint32, uint32_t, NUM2UINT, UINT2NUM, SWAPU32); +NUM_OP(int64, int64_t, NUM2LL, LL2NUM, SWAPS64); +NUM_OP(uint64, uint64_t, NUM2ULL, ULL2NUM, SWAPU64); +NUM_OP(long, long, NUM2LONG, LONG2NUM, SWAPSLONG); +NUM_OP(ulong, unsigned long, NUM2ULONG, ULONG2NUM, SWAPULONG); +NUM_OP(float32, float, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(float64, double, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(longdouble, long double, rbffi_num2longdouble, rbffi_longdouble_new, NOSWAP); + +static inline void* +get_pointer_value(VALUE value) +{ + const int type = TYPE(value); + if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_PointerClass)) { + return memory_address(value); + } else if (type == T_NIL) { + return NULL; + } else if (type == T_FIXNUM) { + return (void *) (uintptr_t) FIX2ULONG(value); + } else if (type == T_BIGNUM) { + return (void *) (uintptr_t) NUM2ULL(value); + } else if (rb_respond_to(value, id_to_ptr)) { + return MEMORY_PTR(rb_funcall2(value, id_to_ptr, 0, NULL)); + } else { + rb_raise(rb_eArgError, "value is not a pointer"); + return NULL; + } +} + +NUM_OP(pointer, void *, get_pointer_value, rbffi_Pointer_NewInstance, NOSWAP); + +static inline uint8_t +rbffi_bool_value(VALUE value) +{ + return RTEST(value); +} + +static inline VALUE +rbffi_bool_new(uint8_t value) +{ + return (value & 1) != 0 ? Qtrue : Qfalse; +} + +NUM_OP(bool, unsigned char, rbffi_bool_value, rbffi_bool_new, NOSWAP); + + +/* + * call-seq: memory.clear + * Set the memory to all-zero. + * @return [self] + */ +static VALUE +memory_clear(VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + memset(ptr->address, 0, ptr->size); + return self; +} + +/* + * call-seq: memory.size + * Return memory size in bytes (alias: #total) + * @return [Numeric] + */ +static VALUE +memory_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return LONG2NUM(ptr->size); +} + +/* + * call-seq: memory.get(type, offset) + * Return data of given type contained in memory. + * @param [Symbol, Type] type_name type of data to get + * @param [Numeric] offset point in buffer to start from + * @return [Object] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_get(VALUE self, VALUE type_name, VALUE offset) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + return op->get(ptr, NUM2LONG(offset)); + +undefined_type: { + VALUE msg = rb_sprintf("undefined type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.put(type, offset, value) + * @param [Symbol, Type] type_name type of data to put + * @param [Numeric] offset point in buffer to start from + * @return [nil] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_put(VALUE self, VALUE type_name, VALUE offset, VALUE value) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + op->put(ptr, NUM2LONG(offset), value); + return Qnil; + +undefined_type: { + VALUE msg = rb_sprintf("unsupported type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.get_string(offset, length=nil) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_string(int argc, VALUE* argv, VALUE self) +{ + VALUE length = Qnil, offset = Qnil; + AbstractMemory* ptr = MEMORY(self); + long off, len; + char* end; + int nargs = rb_scan_args(argc, argv, "11", &offset, &length); + + off = NUM2LONG(offset); + len = nargs > 1 && length != Qnil ? NUM2LONG(length) : (ptr->size - off); + checkRead(ptr); + checkBounds(ptr, off, len); + + end = memchr(ptr->address + off, 0, len); + return rb_str_new((char *) ptr->address + off, + (end != NULL ? end - ptr->address - off : len)); +} + +/* + * call-seq: memory.get_array_of_string(offset, count=nil) + * Return an array of strings contained in memory. + * @param [Numeric] offset point in memory to start from + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE offset = Qnil, countnum = Qnil, retVal = Qnil; + AbstractMemory* ptr; + long off; + int count; + + rb_scan_args(argc, argv, "11", &offset, &countnum); + off = NUM2LONG(offset); + count = (countnum == Qnil ? 0 : NUM2INT(countnum)); + retVal = rb_ary_new2(count); + + Data_Get_Struct(self, AbstractMemory, ptr); + checkRead(ptr); + + if (countnum != Qnil) { + int i; + + checkBounds(ptr, off, count * sizeof (char*)); + + for (i = 0; i < count; ++i) { + const char* strptr = *((const char**) (ptr->address + off) + i); + rb_ary_push(retVal, (strptr == NULL ? Qnil : rb_str_new2(strptr))); + } + + } else { + checkBounds(ptr, off, sizeof (char*)); + for ( ; off < ptr->size - (long) sizeof (void *); off += (long) sizeof (void *)) { + const char* strptr = *(const char**) (ptr->address + off); + if (strptr == NULL) { + break; + } + rb_ary_push(retVal, rb_str_new2(strptr)); + } + } + + return retVal; +} + +/* + * call-seq: memory.read_array_of_string(count=nil) + * Return an array of strings contained in memory. Same as: + * memory.get_array_of_string(0, count) + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + */ +static VALUE +memory_read_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE* rargv = ALLOCA_N(VALUE, argc + 1); + int i; + + rargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + rargv[i + 1] = argv[i]; + } + + return memory_get_array_of_string(argc + 1, rargv, self); +} + + +/* + * call-seq: memory.put_string(offset, str) + * @param [Numeric] offset + * @param [String] str + * @return [self] + * @raise {SecurityError} when writing unsafe string to memory + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + * Put a string in memory. + */ +static VALUE +memory_put_string(VALUE self, VALUE offset, VALUE str) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + Check_Type(str, T_STRING); + off = NUM2LONG(offset); + len = RSTRING_LEN(str); + + checkWrite(ptr); + checkBounds(ptr, off, len + 1); + + memcpy(ptr->address + off, RSTRING_PTR(str), len); + *((char *) ptr->address + off + len) = '\0'; + + return self; +} + +/* + * call-seq: memory.get_bytes(offset, length) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_bytes(VALUE self, VALUE offset, VALUE length) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + off = NUM2LONG(offset); + len = NUM2LONG(length); + + checkRead(ptr); + checkBounds(ptr, off, len); + + return rb_str_new((char *) ptr->address + off, len); +} + +/* + * call-seq: memory.put_bytes(offset, str, index=0, length=nil) + * Put a string in memory. + * @param [Numeric] offset point in buffer to start from + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + * @raise {RangeError} if +index+ is negative, or if index+length is greater than size of string + * @raise {SecurityError} when writing unsafe string to memory + */ +static VALUE +memory_put_bytes(int argc, VALUE* argv, VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + VALUE offset = Qnil, str = Qnil, rbIndex = Qnil, rbLength = Qnil; + long off, len, idx; + int nargs = rb_scan_args(argc, argv, "22", &offset, &str, &rbIndex, &rbLength); + + Check_Type(str, T_STRING); + + off = NUM2LONG(offset); + idx = nargs > 2 ? NUM2LONG(rbIndex) : 0; + if (idx < 0) { + rb_raise(rb_eRangeError, "index cannot be less than zero"); + return Qnil; + } + len = nargs > 3 ? NUM2LONG(rbLength) : (RSTRING_LEN(str) - idx); + if ((idx + len) > RSTRING_LEN(str)) { + rb_raise(rb_eRangeError, "index+length is greater than size of string"); + return Qnil; + } + + checkWrite(ptr); + checkBounds(ptr, off, len); + + memcpy(ptr->address + off, RSTRING_PTR(str) + idx, len); + + return self; +} + +/* + * call-seq: memory.read_bytes(length) + * @param [Numeric] length of string to return + * @return [String] + * equivalent to : + * memory.get_bytes(0, length) + */ +static VALUE +memory_read_bytes(VALUE self, VALUE length) +{ + return memory_get_bytes(self, INT2FIX(0), length); +} + +/* + * call-seq: memory.write_bytes(str, index=0, length=nil) + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * equivalent to : + * memory.put_bytes(0, str, index, length) + */ +static VALUE +memory_write_bytes(int argc, VALUE* argv, VALUE self) +{ + VALUE* wargv = ALLOCA_N(VALUE, argc + 1); + int i; + + wargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + wargv[i + 1] = argv[i]; + } + + return memory_put_bytes(argc + 1, wargv, self); +} + +/* + * call-seq: memory.type_size + * @return [Numeric] type size in bytes + * Get the memory's type size. + */ +static VALUE +memory_type_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return INT2NUM(ptr->typeSize); +} + +/* + * Document-method: [] + * call-seq: memory[idx] + * @param [Numeric] idx index to access in memory + * @return + * Memory read accessor. + */ +static VALUE +memory_aref(VALUE self, VALUE idx) +{ + AbstractMemory* ptr; + VALUE rbOffset = Qnil; + + Data_Get_Struct(self, AbstractMemory, ptr); + + rbOffset = ULONG2NUM(NUM2ULONG(idx) * ptr->typeSize); + + return rb_funcall2(self, id_plus, 1, &rbOffset); +} + +static inline char* +memory_address(VALUE obj) +{ + return ((AbstractMemory *) DATA_PTR(obj))->address; +} + +static VALUE +memory_copy_from(VALUE self, VALUE rbsrc, VALUE rblen) +{ + AbstractMemory* dst; + + Data_Get_Struct(self, AbstractMemory, dst); + + memcpy(dst->address, rbffi_AbstractMemory_Cast(rbsrc, rbffi_AbstractMemoryClass)->address, NUM2INT(rblen)); + + return self; +} + +AbstractMemory* +rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass) +{ + if (rb_obj_is_kind_of(obj, klass)) { + AbstractMemory* memory; + Data_Get_Struct(obj, AbstractMemory, memory); + return memory; + } + + rb_raise(rb_eArgError, "Invalid Memory object"); + return NULL; +} + +void +rbffi_AbstractMemory_Error(AbstractMemory *mem, int op) +{ + VALUE rbErrorClass = mem->address == NULL ? NullPointerErrorClass : rb_eRuntimeError; + if (op == MEM_RD) { + rb_raise(rbErrorClass, "invalid memory read at address=%p", mem->address); + } else if (op == MEM_WR) { + rb_raise(rbErrorClass, "invalid memory write at address=%p", mem->address); + } else { + rb_raise(rbErrorClass, "invalid memory access at address=%p", mem->address); + } +} + +static VALUE +memory_op_get_strptr(AbstractMemory* ptr, long offset) +{ + void* tmp = NULL; + + if (ptr != NULL && ptr->address != NULL) { + checkRead(ptr); + checkBounds(ptr, offset, sizeof(tmp)); + memcpy(&tmp, ptr->address + offset, sizeof(tmp)); + } + + return tmp != NULL ? rb_str_new2(tmp) : Qnil; +} + +static void +memory_op_put_strptr(AbstractMemory* ptr, long offset, VALUE value) +{ + rb_raise(rb_eArgError, "Cannot set :string fields"); +} + +static MemoryOp memory_op_strptr = { memory_op_get_strptr, memory_op_put_strptr }; + + +MemoryOps rbffi_AbstractMemoryOps = { + &memory_op_int8, /*.int8 */ + &memory_op_uint8, /* .uint8 */ + &memory_op_int16, /* .int16 */ + &memory_op_uint16, /* .uint16 */ + &memory_op_int32, /* .int32 */ + &memory_op_uint32, /* .uint32 */ + &memory_op_int64, /* .int64 */ + &memory_op_uint64, /* .uint64 */ + &memory_op_long, /* .slong */ + &memory_op_ulong, /* .uslong */ + &memory_op_float32, /* .float32 */ + &memory_op_float64, /* .float64 */ + &memory_op_longdouble, /* .longdouble */ + &memory_op_pointer, /* .pointer */ + &memory_op_strptr, /* .strptr */ + &memory_op_bool /* .boolOp */ +}; + +void +rbffi_AbstractMemory_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::AbstractMemory + * + * {AbstractMemory} is the base class for many memory management classes such as {Buffer}. + * + * This class has a lot of methods to work with integers : + * * put_intsize(offset, value) + * * get_intsize(offset) + * * put_uintsize(offset, value) + * * get_uintsize(offset) + * * writeuintsize(value) + * * read_intsize + * * write_uintsize(value) + * * read_uintsize + * * put_array_of_intsize(offset, ary) + * * get_array_of_intsize(offset, length) + * * put_array_of_uintsize(offset, ary) + * * get_array_of_uintsize(offset, length) + * * write_array_of_intsize(ary) + * * read_array_of_intsize(length) + * * write_array_of_uintsize(ary) + * * read_array_of_uintsize(length) + * where _size_ is 8, 16, 32 or 64. Same methods exist for long type. + * + * Aliases exist : _char_ for _int8_, _short_ for _int16_, _int_ for _int32_ and long_long for _int64_. + * + * Others methods are listed below. + */ + VALUE classMemory = rb_define_class_under(moduleFFI, "AbstractMemory", rb_cObject); + rbffi_AbstractMemoryClass = classMemory; + /* + * Document-variable: FFI::AbstractMemory + */ + rb_global_variable(&rbffi_AbstractMemoryClass); + rb_define_alloc_func(classMemory, memory_allocate); + + NullPointerErrorClass = rb_define_class_under(moduleFFI, "NullPointerError", rb_eRuntimeError); + /* Document-variable: NullPointerError */ + rb_global_variable(&NullPointerErrorClass); + + +#undef INT +#define INT(type) \ + rb_define_method(classMemory, "put_" #type, memory_put_##type, 2); \ + rb_define_method(classMemory, "get_" #type, memory_get_##type, 1); \ + rb_define_method(classMemory, "put_u" #type, memory_put_u##type, 2); \ + rb_define_method(classMemory, "get_u" #type, memory_get_u##type, 1); \ + rb_define_method(classMemory, "write_" #type, memory_write_##type, 1); \ + rb_define_method(classMemory, "read_" #type, memory_read_##type, 0); \ + rb_define_method(classMemory, "write_u" #type, memory_write_u##type, 1); \ + rb_define_method(classMemory, "read_u" #type, memory_read_u##type, 0); \ + rb_define_method(classMemory, "put_array_of_" #type, memory_put_array_of_##type, 2); \ + rb_define_method(classMemory, "get_array_of_" #type, memory_get_array_of_##type, 2); \ + rb_define_method(classMemory, "put_array_of_u" #type, memory_put_array_of_u##type, 2); \ + rb_define_method(classMemory, "get_array_of_u" #type, memory_get_array_of_u##type, 2); \ + rb_define_method(classMemory, "write_array_of_" #type, memory_write_array_of_##type, 1); \ + rb_define_method(classMemory, "read_array_of_" #type, memory_read_array_of_##type, 1); \ + rb_define_method(classMemory, "write_array_of_u" #type, memory_write_array_of_u##type, 1); \ + rb_define_method(classMemory, "read_array_of_u" #type, memory_read_array_of_u##type, 1); + + INT(int8); + INT(int16); + INT(int32); + INT(int64); + INT(long); + +#define ALIAS(name, old) \ + rb_define_alias(classMemory, "put_" #name, "put_" #old); \ + rb_define_alias(classMemory, "get_" #name, "get_" #old); \ + rb_define_alias(classMemory, "put_u" #name, "put_u" #old); \ + rb_define_alias(classMemory, "get_u" #name, "get_u" #old); \ + rb_define_alias(classMemory, "write_" #name, "write_" #old); \ + rb_define_alias(classMemory, "read_" #name, "read_" #old); \ + rb_define_alias(classMemory, "write_u" #name, "write_u" #old); \ + rb_define_alias(classMemory, "read_u" #name, "read_u" #old); \ + rb_define_alias(classMemory, "put_array_of_" #name, "put_array_of_" #old); \ + rb_define_alias(classMemory, "get_array_of_" #name, "get_array_of_" #old); \ + rb_define_alias(classMemory, "put_array_of_u" #name, "put_array_of_u" #old); \ + rb_define_alias(classMemory, "get_array_of_u" #name, "get_array_of_u" #old); \ + rb_define_alias(classMemory, "write_array_of_" #name, "write_array_of_" #old); \ + rb_define_alias(classMemory, "read_array_of_" #name, "read_array_of_" #old); \ + rb_define_alias(classMemory, "write_array_of_u" #name, "write_array_of_u" #old); \ + rb_define_alias(classMemory, "read_array_of_u" #name, "read_array_of_u" #old); + + ALIAS(char, int8); + ALIAS(short, int16); + ALIAS(int, int32); + ALIAS(long_long, int64); + + /* + * Document-method: put_float32 + * call-seq: memory.put_float32offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 32-bit float in memory at offset +offset+ (alias: #put_float). + */ + rb_define_method(classMemory, "put_float32", memory_put_float32, 2); + /* + * Document-method: get_float32 + * call-seq: memory.get_float32(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 32-bit float from memory at offset +offset+ (alias: #get_float). + */ + rb_define_method(classMemory, "get_float32", memory_get_float32, 1); + rb_define_alias(classMemory, "put_float", "put_float32"); + rb_define_alias(classMemory, "get_float", "get_float32"); + /* + * Document-method: write_float + * call-seq: memory.write_float(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 32-bit float in memory. + * + * Same as: + * memory.put_float(0, value) + */ + rb_define_method(classMemory, "write_float", memory_write_float32, 1); + /* + * Document-method: read_float + * call-seq: memory.read_float + * @return [Float] + * Read a 32-bit float from memory. + * + * Same as: + * memory.get_float(0) + */ + rb_define_method(classMemory, "read_float", memory_read_float32, 0); + /* + * Document-method: put_array_of_float32 + * call-seq: memory.put_array_of_float32(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 32-bit floats in memory from offset +offset+ (alias: #put_array_of_float). + */ + rb_define_method(classMemory, "put_array_of_float32", memory_put_array_of_float32, 2); + /* + * Document-method: get_array_of_float32 + * call-seq: memory.get_array_of_float32(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 32-bit floats in memory from offset +offset+ (alias: #get_array_of_float). + */ + rb_define_method(classMemory, "get_array_of_float32", memory_get_array_of_float32, 2); + /* + * Document-method: write_array_of_float + * call-seq: memory.write_array_of_float(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 32-bit floats in memory. + * + * Same as: + * memory.put_array_of_float(0, ary) + */ + rb_define_method(classMemory, "write_array_of_float", memory_write_array_of_float32, 1); + /* + * Document-method: read_array_of_float + * call-seq: memory.read_array_of_float(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 32-bit floats from memory. + * + * Same as: + * memory.get_array_of_float(0, ary) + */ + rb_define_method(classMemory, "read_array_of_float", memory_read_array_of_float32, 1); + rb_define_alias(classMemory, "put_array_of_float", "put_array_of_float32"); + rb_define_alias(classMemory, "get_array_of_float", "get_array_of_float32"); + /* + * Document-method: put_float64 + * call-seq: memory.put_float64(offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 64-bit float (double) in memory at offset +offset+ (alias: #put_double). + */ + rb_define_method(classMemory, "put_float64", memory_put_float64, 2); + /* + * Document-method: get_float64 + * call-seq: memory.get_float64(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 64-bit float (double) from memory at offset +offset+ (alias: #get_double). + */ + rb_define_method(classMemory, "get_float64", memory_get_float64, 1); + rb_define_alias(classMemory, "put_double", "put_float64"); + rb_define_alias(classMemory, "get_double", "get_float64"); + /* + * Document-method: write_double + * call-seq: memory.write_double(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 64-bit float (double) in memory. + * + * Same as: + * memory.put_double(0, value) + */ + rb_define_method(classMemory, "write_double", memory_write_float64, 1); + /* + * Document-method: read_double + * call-seq: memory.read_double + * @return [Float] + * Read a 64-bit float (double) from memory. + * + * Same as: + * memory.get_double(0) + */ + rb_define_method(classMemory, "read_double", memory_read_float64, 0); + /* + * Document-method: put_array_of_float64 + * call-seq: memory.put_array_of_float64(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 64-bit floats (doubles) in memory from offset +offset+ (alias: #put_array_of_double). + */ + rb_define_method(classMemory, "put_array_of_float64", memory_put_array_of_float64, 2); + /* + * Document-method: get_array_of_float64 + * call-seq: memory.get_array_of_float64(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 64-bit floats (doubles) in memory from offset +offset+ (alias: #get_array_of_double). + */ + rb_define_method(classMemory, "get_array_of_float64", memory_get_array_of_float64, 2); + /* + * Document-method: write_array_of_double + * call-seq: memory.write_array_of_double(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 64-bit floats (doubles) in memory. + * + * Same as: + * memory.put_array_of_double(0, ary) + */ + rb_define_method(classMemory, "write_array_of_double", memory_write_array_of_float64, 1); + /* + * Document-method: read_array_of_double + * call-seq: memory.read_array_of_double(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 64-bit floats (doubles) from memory. + * + * Same as: + * memory.get_array_of_double(0, ary) + */ + rb_define_method(classMemory, "read_array_of_double", memory_read_array_of_float64, 1); + rb_define_alias(classMemory, "put_array_of_double", "put_array_of_float64"); + rb_define_alias(classMemory, "get_array_of_double", "get_array_of_float64"); + /* + * Document-method: put_pointer + * call-seq: memory.put_pointer(offset, value) + * @param [Numeric] offset + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Put +value+ in memory from +offset+.. + */ + rb_define_method(classMemory, "put_pointer", memory_put_pointer, 2); + /* + * Document-method: get_pointer + * call-seq: memory.get_pointer(offset) + * @param [Numeric] offset + * @return [Pointer] + * Get a {Pointer} to the memory from +offset+. + */ + rb_define_method(classMemory, "get_pointer", memory_get_pointer, 1); + /* + * Document-method: write_pointer + * call-seq: memory.write_pointer(value) + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Write +value+ in memory. + * + * Equivalent to: + * memory.put_pointer(0, value) + */ + rb_define_method(classMemory, "write_pointer", memory_write_pointer, 1); + /* + * Document-method: read_pointer + * call-seq: memory.read_pointer + * @return [Pointer] + * Get a {Pointer} to the memory from base address. + * + * Equivalent to: + * memory.get_pointer(0) + */ + rb_define_method(classMemory, "read_pointer", memory_read_pointer, 0); + /* + * Document-method: put_array_of_pointer + * call-seq: memory.put_array_of_pointer(offset, ary) + * @param [Numeric] offset + * @param [Array<#to_ptr>] ary + * @return [self] + * Put an array of {Pointer} into memory from +offset+. + */ + rb_define_method(classMemory, "put_array_of_pointer", memory_put_array_of_pointer, 2); + /* + * Document-method: get_array_of_pointer + * call-seq: memory.get_array_of_pointer(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Array] + * Get an array of {Pointer} of length +length+ from +offset+. + */ + rb_define_method(classMemory, "get_array_of_pointer", memory_get_array_of_pointer, 2); + /* + * Document-method: write_array_of_pointer + * call-seq: memory.write_array_of_pointer(ary) + * @param [Array<#to_ptr>] ary + * @return [self] + * Write an array of {Pointer} into memory from +offset+. + * + * Same as : + * memory.put_array_of_pointer(0, ary) + */ + rb_define_method(classMemory, "write_array_of_pointer", memory_write_array_of_pointer, 1); + /* + * Document-method: read_array_of_pointer + * call-seq: memory.read_array_of_pointer(length) + * @param [Numeric] length + * @return [Array] + * Read an array of {Pointer} of length +length+. + * + * Same as: + * memory.get_array_of_pointer(0, length) + */ + rb_define_method(classMemory, "read_array_of_pointer", memory_read_array_of_pointer, 1); + + rb_define_method(classMemory, "get_string", memory_get_string, -1); + rb_define_method(classMemory, "put_string", memory_put_string, 2); + rb_define_method(classMemory, "get_bytes", memory_get_bytes, 2); + rb_define_method(classMemory, "put_bytes", memory_put_bytes, -1); + rb_define_method(classMemory, "read_bytes", memory_read_bytes, 1); + rb_define_method(classMemory, "write_bytes", memory_write_bytes, -1); + rb_define_method(classMemory, "get_array_of_string", memory_get_array_of_string, -1); + + rb_define_method(classMemory, "get", memory_get, 2); + rb_define_method(classMemory, "put", memory_put, 3); + + rb_define_method(classMemory, "clear", memory_clear, 0); + rb_define_method(classMemory, "total", memory_size, 0); + rb_define_alias(classMemory, "size", "total"); + rb_define_method(classMemory, "type_size", memory_type_size, 0); + rb_define_method(classMemory, "[]", memory_aref, 1); + rb_define_method(classMemory, "__copy_from__", memory_copy_from, 2); + + id_to_ptr = rb_intern("to_ptr"); + id_call = rb_intern("call"); + id_plus = rb_intern("+"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h new file mode 100644 index 0000000..1119288 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/AbstractMemory.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ABSTRACTMEMORY_H +#define RBFFI_ABSTRACTMEMORY_H + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _MSC_VER +#include +#endif + +#include "compat.h" +#include "Types.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#define MEM_RD 0x01 +#define MEM_WR 0x02 +#define MEM_CODE 0x04 +#define MEM_SWAP 0x08 +#define MEM_EMBED 0x10 + +typedef struct AbstractMemory_ AbstractMemory; + +typedef struct { + VALUE (*get)(AbstractMemory* ptr, long offset); + void (*put)(AbstractMemory* ptr, long offset, VALUE value); +} MemoryOp; + +typedef struct { + MemoryOp* int8; + MemoryOp* uint8; + MemoryOp* int16; + MemoryOp* uint16; + MemoryOp* int32; + MemoryOp* uint32; + MemoryOp* int64; + MemoryOp* uint64; + MemoryOp* slong; + MemoryOp* uslong; + MemoryOp* float32; + MemoryOp* float64; + MemoryOp* longdouble; + MemoryOp* pointer; + MemoryOp* strptr; + MemoryOp* boolOp; +} MemoryOps; + +struct AbstractMemory_ { + char* address; /* Use char* instead of void* to ensure adding to it works correctly */ + long size; + int flags; + int typeSize; +}; + + +extern VALUE rbffi_AbstractMemoryClass; +extern MemoryOps rbffi_AbstractMemoryOps; + +extern void rbffi_AbstractMemory_Init(VALUE ffiModule); + +extern AbstractMemory* rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass); + +extern void rbffi_AbstractMemory_Error(AbstractMemory *, int op); + +static inline void +checkBounds(AbstractMemory* mem, long off, long len) +{ + if (unlikely((off | len | (off + len) | (mem->size - (off + len))) < 0)) { + rb_raise(rb_eIndexError, "Memory access offset=%ld size=%ld is out of bounds", + off, len); + } +} + +static inline void +checkRead(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_RD) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_RD); + } +} + +static inline void +checkWrite(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_WR) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_WR); + } +} + +static inline MemoryOp* +get_memory_op(Type* type) +{ + switch (type->nativeType) { + case NATIVE_INT8: + return rbffi_AbstractMemoryOps.int8; + case NATIVE_UINT8: + return rbffi_AbstractMemoryOps.uint8; + case NATIVE_INT16: + return rbffi_AbstractMemoryOps.int16; + case NATIVE_UINT16: + return rbffi_AbstractMemoryOps.uint16; + case NATIVE_INT32: + return rbffi_AbstractMemoryOps.int32; + case NATIVE_UINT32: + return rbffi_AbstractMemoryOps.uint32; + case NATIVE_INT64: + return rbffi_AbstractMemoryOps.int64; + case NATIVE_UINT64: + return rbffi_AbstractMemoryOps.uint64; + case NATIVE_LONG: + return rbffi_AbstractMemoryOps.slong; + case NATIVE_ULONG: + return rbffi_AbstractMemoryOps.uslong; + case NATIVE_FLOAT32: + return rbffi_AbstractMemoryOps.float32; + case NATIVE_FLOAT64: + return rbffi_AbstractMemoryOps.float64; + case NATIVE_LONGDOUBLE: + return rbffi_AbstractMemoryOps.longdouble; + case NATIVE_POINTER: + return rbffi_AbstractMemoryOps.pointer; + case NATIVE_STRING: + return rbffi_AbstractMemoryOps.strptr; + case NATIVE_BOOL: + return rbffi_AbstractMemoryOps.boolOp; + default: + return NULL; + } +} + +#define MEMORY(obj) rbffi_AbstractMemory_Cast((obj), rbffi_AbstractMemoryClass) +#define MEMORY_PTR(obj) MEMORY((obj))->address +#define MEMORY_LEN(obj) MEMORY((obj))->size + + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ABSTRACTMEMORY_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c new file mode 100644 index 0000000..bfd666a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "ArrayType.h" + +static VALUE array_type_s_allocate(VALUE klass); +static VALUE array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength); +static void array_type_mark(ArrayType *); +static void array_type_free(ArrayType *); + +VALUE rbffi_ArrayTypeClass = Qnil; + +static VALUE +array_type_s_allocate(VALUE klass) +{ + ArrayType* array; + VALUE obj; + + obj = Data_Make_Struct(klass, ArrayType, array_type_mark, array_type_free, array); + + array->base.nativeType = NATIVE_ARRAY; + array->base.ffiType = xcalloc(1, sizeof(*array->base.ffiType)); + array->base.ffiType->type = FFI_TYPE_STRUCT; + array->base.ffiType->size = 0; + array->base.ffiType->alignment = 0; + array->rbComponentType = Qnil; + + return obj; +} + +static void +array_type_mark(ArrayType *array) +{ + rb_gc_mark(array->rbComponentType); +} + +static void +array_type_free(ArrayType *array) +{ + xfree(array->base.ffiType); + xfree(array->ffiTypes); + xfree(array); +} + + +/* + * call-seq: initialize(component_type, length) + * @param [Type] component_type + * @param [Numeric] length + * @return [self] + * A new instance of ArrayType. + */ +static VALUE +array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength) +{ + ArrayType* array; + int i; + + Data_Get_Struct(self, ArrayType, array); + + array->length = NUM2UINT(rbLength); + array->rbComponentType = rbComponentType; + Data_Get_Struct(rbComponentType, Type, array->componentType); + + array->ffiTypes = xcalloc(array->length + 1, sizeof(*array->ffiTypes)); + array->base.ffiType->elements = array->ffiTypes; + array->base.ffiType->size = array->componentType->ffiType->size * array->length; + array->base.ffiType->alignment = array->componentType->ffiType->alignment; + + for (i = 0; i < array->length; ++i) { + array->ffiTypes[i] = array->componentType->ffiType; + } + + return self; +} + +/* + * call-seq: length + * @return [Numeric] + * Get array's length + */ +static VALUE +array_type_length(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return UINT2NUM(array->length); +} + +/* + * call-seq: element_type + * @return [Type] + * Get element type. + */ +static VALUE +array_type_element_type(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return array->rbComponentType; +} + +void +rbffi_ArrayType_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::ArrayType < FFI::Type + * + * This is a typed array. The type is a {NativeType native type}. + */ + rbffi_ArrayTypeClass = rb_define_class_under(moduleFFI, "ArrayType", ffi_Type); + /* + * Document-variable: FFI::ArrayType + */ + rb_global_variable(&rbffi_ArrayTypeClass); + /* + * Document-constant: FFI::Type::Array + */ + rb_define_const(ffi_Type, "Array", rbffi_ArrayTypeClass); + + rb_define_alloc_func(rbffi_ArrayTypeClass, array_type_s_allocate); + rb_define_method(rbffi_ArrayTypeClass, "initialize", array_type_initialize, 2); + rb_define_method(rbffi_ArrayTypeClass, "length", array_type_length, 0); + rb_define_method(rbffi_ArrayTypeClass, "elem_type", array_type_element_type, 0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h new file mode 100644 index 0000000..356ffb1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ArrayType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ARRAYTYPE_H +#define RBFFI_ARRAYTYPE_H + +#include +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct ArrayType_ { + Type base; + int length; + ffi_type** ffiTypes; + Type* componentType; + VALUE rbComponentType; +} ArrayType; + +extern void rbffi_ArrayType_Init(VALUE moduleFFI); +extern VALUE rbffi_ArrayTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ARRAYTYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c new file mode 100644 index 0000000..6863dda --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Buffer.c @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" + +#define BUFFER_EMBED_MAXLEN (8) +typedef struct Buffer { + AbstractMemory memory; + + union { + VALUE rbParent; /* link to parent buffer */ + char* storage; /* start of malloc area */ + long embed[BUFFER_EMBED_MAXLEN / sizeof(long)]; /* storage for tiny allocations */ + } data; +} Buffer; + +static VALUE buffer_allocate(VALUE klass); +static VALUE buffer_initialize(int argc, VALUE* argv, VALUE self); +static void buffer_release(Buffer* ptr); +static void buffer_mark(Buffer* ptr); +static VALUE buffer_free(VALUE self); + +static VALUE BufferClass = Qnil; + +static VALUE +buffer_allocate(VALUE klass) +{ + Buffer* buffer; + VALUE obj; + + obj = Data_Make_Struct(klass, Buffer, NULL, buffer_release, buffer); + buffer->data.rbParent = Qnil; + buffer->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +static void +buffer_release(Buffer* ptr) +{ + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + xfree(ptr); +} + +/* + * call-seq: initialize(size, count=1, clear=false) + * @param [Integer, Symbol, #size] Type or size in bytes of a buffer cell + * @param [Fixnum] count number of cell in the Buffer + * @param [Boolean] clear if true, set the buffer to all-zero + * @return [self] + * @raise {NoMemoryError} if failed to allocate memory for Buffer + * A new instance of Buffer. + */ +static VALUE +buffer_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbSize = Qnil, rbCount = Qnil, rbClear = Qnil; + Buffer* p; + int nargs; + + Data_Get_Struct(self, Buffer, p); + + nargs = rb_scan_args(argc, argv, "12", &rbSize, &rbCount, &rbClear); + p->memory.typeSize = rbffi_type_size(rbSize); + p->memory.size = p->memory.typeSize * (nargs > 1 ? NUM2LONG(rbCount) : 1); + + if (p->memory.size > BUFFER_EMBED_MAXLEN) { + p->data.storage = xmalloc(p->memory.size + 7); + if (p->data.storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%lu bytes", p->memory.size); + return Qnil; + } + + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (void *) (((uintptr_t) p->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + + if (p->memory.size > 0 && (nargs < 3 || RTEST(rbClear))) { + memset(p->memory.address, 0, p->memory.size); + } + + } else { + p->memory.flags |= MEM_EMBED; + p->memory.address = (void *) &p->data.embed[0]; + } + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, buffer_free, self); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [self] + * DO NOT CALL THIS METHOD. + */ +static VALUE +buffer_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Buffer* dst; + + Data_Get_Struct(self, Buffer, dst); + src = rbffi_AbstractMemory_Cast(other, BufferClass); + if ((dst->memory.flags & MEM_EMBED) == 0 && dst->data.storage != NULL) { + xfree(dst->data.storage); + } + dst->data.storage = xmalloc(src->size + 7); + if (dst->data.storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->memory.address = (void *) (((uintptr_t) dst->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual buffer contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +buffer_alloc_inout(int argc, VALUE* argv, VALUE klass) +{ + return buffer_initialize(argc, argv, buffer_allocate(klass)); +} + +static VALUE +slice(VALUE self, long offset, long len) +{ + Buffer* ptr; + Buffer* result; + VALUE obj = Qnil; + + Data_Get_Struct(self, Buffer, ptr); + checkBounds(&ptr->memory, offset, len); + + obj = Data_Make_Struct(BufferClass, Buffer, buffer_mark, -1, result); + result->memory.address = ptr->memory.address + offset; + result->memory.size = len; + result->memory.flags = ptr->memory.flags; + result->memory.typeSize = ptr->memory.typeSize; + result->data.rbParent = self; + + return obj; +} + +/* + * call-seq: + offset + * @param [Numeric] offset + * @return [Buffer] a new instance of Buffer pointing from offset until end of previous buffer. + * Add a Buffer with an offset + */ +static VALUE +buffer_plus(VALUE self, VALUE rbOffset) +{ + Buffer* ptr; + long offset = NUM2LONG(rbOffset); + + Data_Get_Struct(self, Buffer, ptr); + + return slice(self, offset, ptr->memory.size - offset); +} + +/* + * call-seq: slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Buffer] a new instance of Buffer + * Slice an existing Buffer. + */ +static VALUE +buffer_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect a Buffer. + */ +static VALUE +buffer_inspect(VALUE self) +{ + char tmp[100]; + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + + snprintf(tmp, sizeof(tmp), "#", ptr, ptr->memory.address, ptr->memory.size); + + return rb_str_new2(tmp); +} + + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Set or get endianness of Buffer. + * @overload order + * @return [:big, :little] + * Get endianness of Buffer. + * @overload order(order) + * @param [:big, :little, :network] order + * @return [self] + * Set endianness of Buffer (+:network+ is an alias for +:big+). + */ +static VALUE +buffer_order(int argc, VALUE* argv, VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } + } + if (order != BYTE_ORDER) { + Buffer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Buffer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + +/* Only used to free the buffer if the yield in the initializer throws an exception */ +static VALUE +buffer_free(VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + return self; +} + +static void +buffer_mark(Buffer* ptr) +{ + rb_gc_mark(ptr->data.rbParent); +} + +void +rbffi_Buffer_Init(VALUE moduleFFI) +{ + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Buffer < FFI::AbstractMemory + * + * A Buffer is a function argument type. It should be use with functions playing with C arrays. + */ + BufferClass = rb_define_class_under(moduleFFI, "Buffer", ffi_AbstractMemory); + + /* + * Document-variable: FFI::Buffer + */ + rb_global_variable(&BufferClass); + rb_define_alloc_func(BufferClass, buffer_allocate); + + /* + * Document-method: alloc_inout + * call-seq: alloc_inout(*args) + * Create a new Buffer for in and out arguments (alias : new_inout). + */ + rb_define_singleton_method(BufferClass, "alloc_inout", buffer_alloc_inout, -1); + /* + * Document-method: alloc_out + * call-seq: alloc_out(*args) + * Create a new Buffer for out arguments (alias : new_out). + */ + rb_define_singleton_method(BufferClass, "alloc_out", buffer_alloc_inout, -1); + /* + * Document-method: alloc_in + * call-seq: alloc_in(*args) + * Create a new Buffer for in arguments (alias : new_in). + */ + rb_define_singleton_method(BufferClass, "alloc_in", buffer_alloc_inout, -1); + rb_define_alias(rb_singleton_class(BufferClass), "new_in", "alloc_in"); + rb_define_alias(rb_singleton_class(BufferClass), "new_out", "alloc_out"); + rb_define_alias(rb_singleton_class(BufferClass), "new_inout", "alloc_inout"); + + rb_define_method(BufferClass, "initialize", buffer_initialize, -1); + rb_define_method(BufferClass, "initialize_copy", buffer_initialize_copy, 1); + rb_define_method(BufferClass, "order", buffer_order, -1); + rb_define_method(BufferClass, "inspect", buffer_inspect, 0); + rb_define_alias(BufferClass, "length", "total"); + rb_define_method(BufferClass, "+", buffer_plus, 1); + rb_define_method(BufferClass, "slice", buffer_slice, 2); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.c new file mode 100644 index 0000000..8db60f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.c @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +# include +# include +#endif +#include +#include "extconf.h" +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Function.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" + +#ifdef USE_RAW +# ifndef __i386__ +# error "RAW argument packing only supported on i386" +# endif + +#define INT8_ADJ (4) +#define INT16_ADJ (4) +#define INT32_ADJ (4) +#define INT64_ADJ (8) +#define LONG_ADJ (sizeof(long)) +#define FLOAT32_ADJ (4) +#define FLOAT64_ADJ (8) +#define ADDRESS_ADJ (sizeof(void *)) +#define LONGDOUBLE_ADJ (ffi_type_longdouble.alignment) + +#endif /* USE_RAW */ + +#ifdef USE_RAW +# define ADJ(p, a) ((p) = (FFIStorage*) (((char *) p) + a##_ADJ)) +#else +# define ADJ(p, a) (++(p)) +#endif + +static void* callback_param(VALUE proc, VALUE cbinfo); +static inline void* getPointer(VALUE value, int type); + +static ID id_to_ptr, id_map_symbol, id_to_native; + +void +rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums) +{ + VALUE callbackProc = Qnil; + FFIStorage* param = ¶mStorage[0]; + int i, argidx, cbidx, argCount; + + if (unlikely(paramCount != -1 && paramCount != argc)) { + if (argc == (paramCount - 1) && callbackCount == 1 && rb_block_given_p()) { + callbackProc = rb_block_proc(); + } else { + rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)", argc, paramCount); + } + } + + argCount = paramCount != -1 ? paramCount : argc; + + for (i = 0, argidx = 0, cbidx = 0; i < argCount; ++i) { + Type* paramType = paramTypes[i]; + int type; + + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { argv[argidx], Qnil }; + argv[argidx] = rb_funcall2(((MappedType *) paramType)->rbConverter, id_to_native, 2, values); + paramType = ((MappedType *) paramType)->type; + } + + type = argidx < argc ? TYPE(argv[argidx]) : T_NONE; + ffiValues[i] = param; + + switch (paramType->nativeType) { + + case NATIVE_INT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s8 = NUM2INT(value); + } else { + param->s8 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT8); + break; + + case NATIVE_INT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s16 = NUM2INT(value); + + } else { + param->s16 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT16); + break; + + case NATIVE_INT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s32 = NUM2INT(value); + + } else { + param->s32 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT32); + break; + + case NATIVE_BOOL: + if (type != T_TRUE && type != T_FALSE) { + rb_raise(rb_eTypeError, "wrong argument type (expected a boolean parameter)"); + } + param->s8 = argv[argidx++] == Qtrue; + ADJ(param, INT8); + break; + + case NATIVE_UINT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u8 = NUM2UINT(value); + } else { + param->u8 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT8); + ++argidx; + break; + + case NATIVE_UINT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u16 = NUM2UINT(value); + } else { + param->u16 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT16); + ++argidx; + break; + + case NATIVE_UINT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u32 = NUM2UINT(value); + } else { + param->u32 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT32); + ++argidx; + break; + + case NATIVE_INT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->i64 = NUM2LL(value); + } else { + param->i64 = NUM2LL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_UINT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u64 = NUM2ULL(value); + } else { + param->u64 = NUM2ULL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_LONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_sarg *) param = NUM2LONG(value); + } else { + *(ffi_sarg *) param = NUM2LONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_ULONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_arg *) param = NUM2ULONG(value); + } else { + *(ffi_arg *) param = NUM2ULONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_FLOAT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f32 = (float) NUM2DBL(value); + } else { + param->f32 = (float) NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT32); + ++argidx; + break; + + case NATIVE_FLOAT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f64 = NUM2DBL(value); + } else { + param->f64 = NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT64); + ++argidx; + break; + + case NATIVE_LONGDOUBLE: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->ld = rbffi_num2longdouble(value); + } else { + param->ld = rbffi_num2longdouble(argv[argidx]); + } + + ADJ(param, LONGDOUBLE); + ++argidx; + break; + + + case NATIVE_STRING: + if (type == T_NIL) { + param->ptr = NULL; + + } else { + param->ptr = StringValueCStr(argv[argidx]); + } + + ADJ(param, ADDRESS); + ++argidx; + break; + + case NATIVE_POINTER: + case NATIVE_BUFFER_IN: + case NATIVE_BUFFER_OUT: + case NATIVE_BUFFER_INOUT: + param->ptr = getPointer(argv[argidx++], type); + ADJ(param, ADDRESS); + break; + + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + if (callbackProc != Qnil) { + param->ptr = callback_param(callbackProc, callbackParameters[cbidx++]); + } else { + param->ptr = callback_param(argv[argidx], callbackParameters[cbidx++]); + ++argidx; + } + ADJ(param, ADDRESS); + break; + + case NATIVE_STRUCT: + ffiValues[i] = getPointer(argv[argidx++], type); + break; + + default: + rb_raise(rb_eArgError, "Invalid parameter type: %d", paramType->nativeType); + } + } +} + +static void * +call_blocking_function(void* data) +{ + rbffi_blocking_call_t* b = (rbffi_blocking_call_t *) data; + ffi_call(&b->cif, FFI_FN(b->function), b->retval, b->ffiValues); + + return NULL; +} + +VALUE +rbffi_do_blocking_call(VALUE data) +{ + rb_thread_call_without_gvl(call_blocking_function, (void*)data, (rb_unblock_function_t *) -1, NULL); + + return Qnil; +} + +VALUE +rbffi_save_frame_exception(VALUE data, VALUE exc) +{ + rbffi_frame_t* frame = (rbffi_frame_t *) data; + frame->exc = exc; + return Qnil; +} + +VALUE +rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo) +{ + void* retval; + void** ffiValues; + FFIStorage* params; + VALUE rbReturnValue; + rbffi_frame_t frame = { 0 }; + + retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG)); + + if (unlikely(fnInfo->blocking)) { + rbffi_blocking_call_t* bc; + + /* allocate information passed to the blocking function on the stack */ + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->cif = fnInfo->ffi_cif; + bc->function = function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + rbffi_frame_pop(&frame); + + } else { + + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + ffi_call(&fnInfo->ffi_cif, FFI_FN(function), retval, ffiValues); + rbffi_frame_pop(&frame); + } + + if (unlikely(!fnInfo->ignoreErrno)) { + rbffi_save_errno(); + } + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + RB_GC_GUARD(rbReturnValue) = rbffi_NativeValue_ToRuby(fnInfo->returnType, fnInfo->rbReturnType, retval); + RB_GC_GUARD(fnInfo->rbReturnType); + + return rbReturnValue; +} + +static inline void* +getPointer(VALUE value, int type) +{ + if (likely(type == T_DATA && rb_obj_is_kind_of(value, rbffi_AbstractMemoryClass))) { + + return ((AbstractMemory *) DATA_PTR(value))->address; + + } else if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_StructClass)) { + + AbstractMemory* memory = ((Struct *) DATA_PTR(value))->pointer; + return memory != NULL ? memory->address : NULL; + + } else if (type == T_STRING) { + + return StringValuePtr(value); + + } else if (type == T_NIL) { + + return NULL; + + } else if (rb_respond_to(value, id_to_ptr)) { + + VALUE ptr = rb_funcall2(value, id_to_ptr, 0, NULL); + if (rb_obj_is_kind_of(ptr, rbffi_AbstractMemoryClass) && TYPE(ptr) == T_DATA) { + return ((AbstractMemory *) DATA_PTR(ptr))->address; + } + rb_raise(rb_eArgError, "to_ptr returned an invalid pointer"); + } + + rb_raise(rb_eArgError, ":pointer argument is not a valid pointer"); + return NULL; +} + +Invoker +rbffi_GetInvoker(FunctionType *fnInfo) +{ + return rbffi_CallFunction; +} + + +static void* +callback_param(VALUE proc, VALUE cbInfo) +{ + VALUE callback ; + if (unlikely(proc == Qnil)) { + return NULL ; + } + + /* Handle Function pointers here */ + if (rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + AbstractMemory* ptr; + Data_Get_Struct(proc, AbstractMemory, ptr); + return ptr->address; + } + + callback = rbffi_Function_ForProc(cbInfo, proc); + RB_GC_GUARD(callback); + + return ((AbstractMemory *) DATA_PTR(callback))->address; +} + + +void +rbffi_Call_Init(VALUE moduleFFI) +{ + id_to_ptr = rb_intern("to_ptr"); + id_to_native = rb_intern("to_native"); + id_map_symbol = rb_intern("__map_symbol"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.h new file mode 100644 index 0000000..b892d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Call.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_CALL_H +#define RBFFI_CALL_H + +#include "Thread.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__i386__) && \ + (defined(HAVE_RAW_API) || defined(USE_INTERNAL_LIBFFI)) && \ + !defined(_WIN32) && !defined(__WIN32__) +# define USE_RAW +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && !(defined(_WIN32) || defined(__WIN32__)) +# define BYPASS_FFI 1 +#endif + +typedef union { +#ifdef USE_RAW + signed int s8, s16, s32; + unsigned int u8, u16, u32; +#else + signed char s8; + unsigned char u8; + signed short s16; + unsigned short u16; + signed int s32; + unsigned int u32; +#endif + signed long long i64; + unsigned long long u64; + signed long sl; + unsigned long ul; + void* ptr; + float f32; + double f64; + long double ld; +} FFIStorage; + +extern void rbffi_Call_Init(VALUE moduleFFI); + +extern void rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums); + +struct FunctionType_; +extern VALUE rbffi_CallFunction(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +typedef VALUE (*Invoker)(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +Invoker rbffi_GetInvoker(struct FunctionType_* fnInfo); + +extern VALUE rbffi_GetEnumValue(VALUE enums, VALUE value); +extern int rbffi_GetSignedIntValue(VALUE value, int type, int minValue, int maxValue, const char* typeName, VALUE enums); + +typedef struct rbffi_blocking_call { + rbffi_frame_t* frame; + void* function; + ffi_cif cif; + void **ffiValues; + void* retval; + void* params; +} rbffi_blocking_call_t; + +VALUE rbffi_do_blocking_call(VALUE data); +VALUE rbffi_save_frame_exception(VALUE data, VALUE exc); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_CALL_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c new file mode 100644 index 0000000..45411b4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#endif +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include + +#if defined(_MSC_VER) && !defined(INT8_MIN) +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" + +#include "ClosurePool.h" + + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +typedef struct Memory { + void* code; + void* data; + struct Memory* next; +} Memory; + +struct ClosurePool_ { + void* ctx; + int closureSize; + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize); + struct Memory* blocks; /* Keeps track of all the allocated memory for this pool */ + Closure* list; + long refcnt; +}; + +static long pageSize; + +static void* allocatePage(void); +static bool freePage(void *); +static bool protectPage(void *); + +ClosurePool* +rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx) +{ + ClosurePool* pool; + + pool = xcalloc(1, sizeof(*pool)); + pool->closureSize = closureSize; + pool->ctx = ctx; + pool->prep = prep; + pool->refcnt = 1; + + return pool; +} + +void +cleanup_closure_pool(ClosurePool* pool) +{ + Memory* memory; + + for (memory = pool->blocks; memory != NULL; ) { + Memory* next = memory->next; + freePage(memory->code); + free(memory->data); + free(memory); + memory = next; + } + xfree(pool); +} + +void +rbffi_ClosurePool_Free(ClosurePool* pool) +{ + if (pool != NULL) { + long refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +Closure* +rbffi_Closure_Alloc(ClosurePool* pool) +{ + Closure *list = NULL; + Memory* block = NULL; + void *code = NULL; + char errmsg[256]; + int nclosures; + long trampolineSize; + int i; + + if (pool->list != NULL) { + Closure* closure = pool->list; + pool->list = pool->list->next; + pool->refcnt++; + + return closure; + } + + trampolineSize = roundup(pool->closureSize, 8); + nclosures = (int) (pageSize / trampolineSize); + block = calloc(1, sizeof(*block)); + list = calloc(nclosures, sizeof(*list)); + code = allocatePage(); + + if (block == NULL || list == NULL || code == NULL) { + snprintf(errmsg, sizeof(errmsg), "failed to allocate a page. errno=%d (%s)", errno, strerror(errno)); + goto error; + } + + for (i = 0; i < nclosures; ++i) { + Closure* closure = &list[i]; + closure->next = &list[i + 1]; + closure->pool = pool; + closure->code = ((char *)code + (i * trampolineSize)); + + if (!(*pool->prep)(pool->ctx, closure->code, closure, errmsg, sizeof(errmsg))) { + goto error; + } + } + + if (!protectPage(code)) { + goto error; + } + + /* Track the allocated page + Closure memory area */ + block->data = list; + block->code = code; + block->next = pool->blocks; + pool->blocks = block; + + /* Thread the new block onto the free list, apart from the first one. */ + list[nclosures - 1].next = pool->list; + pool->list = list->next; + pool->refcnt++; + + /* Use the first one as the new handle */ + return list; + +error: + free(block); + free(list); + if (code != NULL) { + freePage(code); + } + + + rb_raise(rb_eRuntimeError, "%s", errmsg); + return NULL; +} + +void +rbffi_Closure_Free(Closure* closure) +{ + if (closure != NULL) { + ClosurePool* pool = closure->pool; + long refcnt; + /* Just push it on the front of the free list */ + closure->next = pool->list; + pool->list = closure; + refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +void* +rbffi_Closure_CodeAddress(Closure* handle) +{ + return handle->code; +} + + +static long +getPageSize() +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +static void* +allocatePage(void) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualAlloc(NULL, pageSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + void *page = mmap(NULL, pageSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + return (page != (void *) -1) ? page : NULL; +#endif +} + +static bool +freePage(void *addr) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualFree(addr, 0, MEM_RELEASE); +#else + return munmap(addr, pageSize) == 0; +#endif +} + +static bool +protectPage(void* page) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + DWORD oldProtect; + return VirtualProtect(page, pageSize, PAGE_EXECUTE_READ, &oldProtect); +#else + return mprotect(page, pageSize, PROT_READ | PROT_EXEC) == 0; +#endif +} + +void +rbffi_ClosurePool_Init(VALUE module) +{ + pageSize = getPageSize(); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h new file mode 100644 index 0000000..b842375 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ClosurePool.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RUBYFFI_CLOSUREPOOL_H +#define RUBYFFI_CLOSUREPOOL_H + +typedef struct ClosurePool_ ClosurePool; +typedef struct Closure_ Closure; + +struct Closure_ { + void* info; /* opaque handle for storing closure-instance specific data */ + void* function; /* closure-instance specific function, called by custom trampoline */ + void* code; /* The native trampoline code location */ + struct ClosurePool_* pool; + Closure* next; +}; + +void rbffi_ClosurePool_Init(VALUE module); + +ClosurePool* rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx); + +void rbffi_ClosurePool_Free(ClosurePool *); + +Closure* rbffi_Closure_Alloc(ClosurePool *); +void rbffi_Closure_Free(Closure *); + +void* rbffi_Closure_GetCodeAddress(Closure *); + +#endif /* RUBYFFI_CLOSUREPOOL_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c new file mode 100644 index 0000000..c717aec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.c @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#ifndef _MSC_VER +# include +#endif +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +# include +# define _WINSOCKAPI_ +# include +# include +#else +# include +#endif +#include +#if defined(_MSC_VER) && !defined(INT8_MIN) +# include "win32/stdint.h" +#endif + +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "DynamicLibrary.h" + +typedef struct LibrarySymbol_ { + Pointer base; + VALUE library; + VALUE name; +} LibrarySymbol; + +static VALUE library_initialize(VALUE self, VALUE libname, VALUE libflags); +static void library_free(Library* lib); + + +static VALUE symbol_allocate(VALUE klass); +static VALUE symbol_new(VALUE library, void* address, VALUE name); +static void symbol_mark(LibrarySymbol* sym); + +static VALUE LibraryClass = Qnil, SymbolClass = Qnil; + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* dl_open(const char* name, int flags); +static void dl_error(char* buf, int size); +#define dl_sym(handle, name) GetProcAddress(handle, name) +#define dl_close(handle) FreeLibrary(handle) +#else +# define dl_open(name, flags) dlopen(name, flags != 0 ? flags : RTLD_LAZY) +# define dl_error(buf, size) do { snprintf(buf, size, "%s", dlerror()); } while(0) +# define dl_sym(handle, name) dlsym(handle, name) +# define dl_close(handle) dlclose(handle) +#endif + +static VALUE +library_allocate(VALUE klass) +{ + Library* library; + return Data_Make_Struct(klass, Library, NULL, library_free, library); +} + +/* + * call-seq: DynamicLibrary.open(libname, libflags) + * @param libname (see #initialize) + * @param libflags (see #initialize) + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * Open a library. + */ +static VALUE +library_open(VALUE klass, VALUE libname, VALUE libflags) +{ + return library_initialize(library_allocate(klass), libname, libflags); +} + +/* + * call-seq: initialize(libname, libflags) + * @param [String] libname name of library to open + * @param [Fixnum] libflags flags for library to open + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * A new DynamicLibrary instance. + */ +static VALUE +library_initialize(VALUE self, VALUE libname, VALUE libflags) +{ + Library* library; + int flags; + + Check_Type(libflags, T_FIXNUM); + + Data_Get_Struct(self, Library, library); + flags = libflags != Qnil ? NUM2UINT(libflags) : 0; + + library->handle = dl_open(libname != Qnil ? StringValueCStr(libname) : NULL, flags); + if (library->handle == NULL) { + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + rb_raise(rb_eLoadError, "Could not open library '%s': %s", + libname != Qnil ? StringValueCStr(libname) : "[current process]", + errmsg); + } +#ifdef __CYGWIN__ + // On Cygwin 1.7.17 "dlsym(dlopen(0,0), 'getpid')" fails. (dlerror: "No such process") + // As a workaround we can use "dlsym(RTLD_DEFAULT, 'getpid')" instead. + // Since 0 == RTLD_DEFAULT we won't call dl_close later. + if (libname == Qnil) { + dl_close(library->handle); + library->handle = RTLD_DEFAULT; + } +#endif + rb_iv_set(self, "@name", libname != Qnil ? libname : rb_str_new2("[current process]")); + return self; +} + +static VALUE +library_dlsym(VALUE self, VALUE name) +{ + Library* library; + void* address = NULL; + Check_Type(name, T_STRING); + + Data_Get_Struct(self, Library, library); + address = dl_sym(library->handle, StringValueCStr(name)); + + return address != NULL ? symbol_new(self, address, name) : Qnil; +} + +/* + * call-seq: last_error + * @return [String] library's last error string + */ +static VALUE +library_dlerror(VALUE self) +{ + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + return rb_str_new2(errmsg); +} + +static void +library_free(Library* library) +{ + /* dlclose() on MacOS tends to segfault - avoid it */ +#ifndef __APPLE__ + if (library->handle != NULL) { + dl_close(library->handle); + } +#endif + xfree(library); +} + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* +dl_open(const char* name, int flags) +{ + if (name == NULL) { + return GetModuleHandle(NULL); + } else { + DWORD dwFlags = PathIsRelativeA(name) ? 0 : LOAD_WITH_ALTERED_SEARCH_PATH; + return LoadLibraryExA(name, NULL, dwFlags); + } +} + +static void +dl_error(char* buf, int size) +{ + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), + 0, buf, size, NULL); +} +#endif + +static VALUE +symbol_allocate(VALUE klass) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(klass, LibrarySymbol, NULL, -1, sym); + sym->name = Qnil; + sym->library = Qnil; + sym->base.rbParent = Qnil; + + return obj; +} + + +/* + * call-seq: initialize_copy(other) + * @param [Object] other + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +symbol_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate symbol"); + return Qnil; +} + +static VALUE +symbol_new(VALUE library, void* address, VALUE name) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(SymbolClass, LibrarySymbol, symbol_mark, -1, sym); + + sym->base.memory.address = address; + sym->base.memory.size = LONG_MAX; + sym->base.memory.typeSize = 1; + sym->base.memory.flags = MEM_RD | MEM_WR; + sym->library = library; + sym->name = name; + + return obj; +} + +static void +symbol_mark(LibrarySymbol* sym) +{ + rb_gc_mark(sym->library); + rb_gc_mark(sym->name); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect. + */ +static VALUE +symbol_inspect(VALUE self) +{ + LibrarySymbol* sym; + char buf[256]; + + Data_Get_Struct(self, LibrarySymbol, sym); + snprintf(buf, sizeof(buf), "#", + StringValueCStr(sym->name), sym->base.memory.address); + return rb_str_new2(buf); +} + +void +rbffi_DynamicLibrary_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::DynamicLibrary + */ + LibraryClass = rb_define_class_under(moduleFFI, "DynamicLibrary", rb_cObject); + rb_global_variable(&LibraryClass); + /* + * Document-class: FFI::DynamicLibrary::Symbol < FFI::Pointer + * + * An instance of this class represents a library symbol. It may be a {Pointer pointer} to + * a function or to a variable. + */ + SymbolClass = rb_define_class_under(LibraryClass, "Symbol", rbffi_PointerClass); + rb_global_variable(&SymbolClass); + + /* + * Document-const: FFI::NativeLibrary + * Backward compatibility for FFI::DynamicLibrary + */ + rb_define_const(moduleFFI, "NativeLibrary", LibraryClass); /* backwards compat library */ + rb_define_alloc_func(LibraryClass, library_allocate); + rb_define_singleton_method(LibraryClass, "open", library_open, 2); + rb_define_singleton_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_method(LibraryClass, "initialize", library_initialize, 2); + /* + * Document-method: find_symbol + * call-seq: find_symbol(name) + * @param [String] name library symbol's name + * @return [FFI::DynamicLibrary::Symbol] library symbol + */ + rb_define_method(LibraryClass, "find_symbol", library_dlsym, 1); + /* + * Document-method: find_function + * call-seq: find_function(name) + * @param [String] name library function's name + * @return [FFI::DynamicLibrary::Symbol] library function symbol + */ + rb_define_method(LibraryClass, "find_function", library_dlsym, 1); + /* + * Document-method: find_variable + * call-seq: find_variable(name) + * @param [String] name library variable's name + * @return [FFI::DynamicLibrary::Symbol] library variable symbol + */ + rb_define_method(LibraryClass, "find_variable", library_dlsym, 1); + rb_define_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_attr(LibraryClass, "name", 1, 0); + + rb_define_alloc_func(SymbolClass, symbol_allocate); + rb_undef_method(SymbolClass, "new"); + rb_define_method(SymbolClass, "inspect", symbol_inspect, 0); + rb_define_method(SymbolClass, "initialize_copy", symbol_initialize_copy, 1); + +#define DEF(x) rb_define_const(LibraryClass, "RTLD_" #x, UINT2NUM(RTLD_##x)) + DEF(LAZY); + DEF(NOW); + DEF(GLOBAL); + DEF(LOCAL); + DEF(NOLOAD); + DEF(NODELETE); + DEF(FIRST); + DEF(DEEPBIND); + DEF(MEMBER); + DEF(BINDING_MASK); + DEF(LOCATION_MASK); + DEF(ALL_MASK); +#undef DEF + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h new file mode 100644 index 0000000..97bf7bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/DynamicLibrary.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIBRARY_H +#define _LIBRARY_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* if these aren't defined (eg. windows), we need sensible defaults */ +#ifndef RTLD_LAZY +#define RTLD_LAZY 1 +#endif + +#ifndef RTLD_NOW +#define RTLD_NOW 2 +#endif + +#ifndef RTLD_LOCAL +#define RTLD_LOCAL 4 +#endif + +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 8 +#endif + +/* If these aren't defined, they're not supported so define as 0 */ +#ifndef RTLD_NOLOAD +#define RTLD_NOLOAD 0 +#endif + +#ifndef RTLD_NODELETE +#define RTLD_NODELETE 0 +#endif + +#ifndef RTLD_FIRST +#define RTLD_FIRST 0 +#endif + +#ifndef RTLD_DEEPBIND +#define RTLD_DEEPBIND 0 +#endif + +#ifndef RTLD_MEMBER +#define RTLD_MEMBER 0 +#endif + +/* convenience */ +#ifndef RTLD_BINDING_MASK +#define RTLD_BINDING_MASK (RTLD_LAZY | RTLD_NOW) +#endif + +#ifndef RTLD_LOCATION_MASK +#define RTLD_LOCATION_MASK (RTLD_LOCAL | RTLD_GLOBAL) +#endif + +#ifndef RTLD_ALL_MASK +#define RTLD_ALL_MASK (RTLD_BINDING_MASK | RTLD_LOCATION_MASK | RTLD_NOLOAD | RTLD_NODELETE | RTLD_FIRST | RTLD_DEEPBIND | RTLD_MEMBER) +#endif + +typedef struct Library { + void* handle; +} Library; + +extern void rbffi_DynamicLibrary_Init(VALUE ffiModule); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBRARY_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.c new file mode 100644 index 0000000..506d79b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.c @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2009-2011 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +# include +#endif + +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# if !defined(INT8_MIN) +# include "win32/stdint.h" +# endif +#endif +#include +#include + +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +#include +#endif +#include + +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Platform.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" +#include "MethodHandle.h" +#include "Function.h" + +typedef struct Function_ { + Pointer base; + FunctionType* info; + MethodHandle* methodHandle; + bool autorelease; + Closure* closure; + VALUE rbProc; + VALUE rbFunctionInfo; +} Function; + +static void function_mark(Function *); +static void function_free(Function *); +static VALUE function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc); +static void callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data); +static bool callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static void* callback_with_gvl(void* data); +static VALUE invoke_callback(VALUE data); +static VALUE save_callback_exception(VALUE data, VALUE exc); + +#define DEFER_ASYNC_CALLBACK 1 + + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_event(void *); +static VALUE async_cb_call(void *); +#endif + +extern int ruby_thread_has_gvl_p(void); +extern int ruby_native_thread_p(void); + +VALUE rbffi_FunctionClass = Qnil; + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_thread = Qnil; +#endif + +static ID id_call = 0, id_to_native = 0, id_from_native = 0, id_cbtable = 0, id_cb_ref = 0; + +struct gvl_callback { + Closure* closure; + void* retval; + void** parameters; + bool done; + rbffi_frame_t *frame; +#if defined(DEFER_ASYNC_CALLBACK) + struct gvl_callback* next; +# ifndef _WIN32 + pthread_cond_t async_cond; + pthread_mutex_t async_mutex; +# else + HANDLE async_event; +# endif +#endif +}; + + +#if defined(DEFER_ASYNC_CALLBACK) +static struct gvl_callback* async_cb_list = NULL; +# ifndef _WIN32 + static pthread_mutex_t async_cb_mutex = PTHREAD_MUTEX_INITIALIZER; + static pthread_cond_t async_cb_cond = PTHREAD_COND_INITIALIZER; +# else + static HANDLE async_cb_cond; + static CRITICAL_SECTION async_cb_lock; +# endif +#endif + + +static VALUE +function_allocate(VALUE klass) +{ + Function *fn; + VALUE obj; + + obj = Data_Make_Struct(klass, Function, function_mark, function_free, fn); + + fn->base.memory.flags = MEM_RD; + fn->base.rbParent = Qnil; + fn->rbProc = Qnil; + fn->rbFunctionInfo = Qnil; + fn->autorelease = true; + + return obj; +} + +static void +function_mark(Function *fn) +{ + rb_gc_mark(fn->base.rbParent); + rb_gc_mark(fn->rbProc); + rb_gc_mark(fn->rbFunctionInfo); +} + +static void +function_free(Function *fn) +{ + if (fn->methodHandle != NULL) { + rbffi_MethodHandle_Free(fn->methodHandle); + } + + if (fn->closure != NULL && fn->autorelease) { + rbffi_Closure_Free(fn->closure); + } + + xfree(fn); +} + +/* + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options see {FFI::FunctionType} for available options + * @return [self] + * A new Function instance. + * + * Define a function from a Proc or a block. + * + * @overload initialize(return_type, param_types, options = {}) { |i| ... } + * @yieldparam i parameters for the function + * @overload initialize(return_type, param_types, proc, options = {}) + * @param [Proc] proc + */ +static VALUE +function_initialize(int argc, VALUE* argv, VALUE self) +{ + + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbProc = Qnil, rbOptions = Qnil; + VALUE rbFunctionInfo = Qnil; + VALUE infoArgv[3]; + int nargs; + + nargs = rb_scan_args(argc, argv, "22", &rbReturnType, &rbParamTypes, &rbProc, &rbOptions); + + /* + * Callback with block, + * e.g. Function.new(:int, [ :int ]) { |i| blah } + * or Function.new(:int, [ :int ], { :convention => :stdcall }) { |i| blah } + */ + if (rb_block_given_p()) { + if (nargs > 3) { + rb_raise(rb_eArgError, "cannot create function with both proc/address and block"); + } + rbOptions = rbProc; + rbProc = rb_block_proc(); + } else { + /* Callback with proc, or Function with address + * e.g. Function.new(:int, [ :int ], Proc.new { |i| }) + * Function.new(:int, [ :int ], Proc.new { |i| }, { :convention => :stdcall }) + * Function.new(:int, [ :int ], addr) + * Function.new(:int, [ :int ], addr, { :convention => :stdcall }) + */ + } + + infoArgv[0] = rbReturnType; + infoArgv[1] = rbParamTypes; + infoArgv[2] = rbOptions; + rbFunctionInfo = rb_class_new_instance(rbOptions != Qnil ? 3 : 2, infoArgv, rbffi_FunctionTypeClass); + + function_init(self, rbFunctionInfo, rbProc); + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +function_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate function instances"); + return Qnil; +} + +VALUE +rbffi_Function_NewInstance(VALUE rbFunctionInfo, VALUE rbProc) +{ + return function_init(function_allocate(rbffi_FunctionClass), rbFunctionInfo, rbProc); +} + +VALUE +rbffi_Function_ForProc(VALUE rbFunctionInfo, VALUE proc) +{ + VALUE callback, cbref, cbTable; + Function* fp; + + cbref = RTEST(rb_ivar_defined(proc, id_cb_ref)) ? rb_ivar_get(proc, id_cb_ref) : Qnil; + /* If the first callback reference has the same function function signature, use it */ + if (cbref != Qnil && CLASS_OF(cbref) == rbffi_FunctionClass) { + Data_Get_Struct(cbref, Function, fp); + if (fp->rbFunctionInfo == rbFunctionInfo) { + return cbref; + } + } + + cbTable = RTEST(rb_ivar_defined(proc, id_cbtable)) ? rb_ivar_get(proc, id_cbtable) : Qnil; + if (cbTable != Qnil && (callback = rb_hash_aref(cbTable, rbFunctionInfo)) != Qnil) { + return callback; + } + + /* No existing function for the proc with that signature, create a new one and cache it */ + callback = rbffi_Function_NewInstance(rbFunctionInfo, proc); + if (cbref == Qnil) { + /* If there is no other cb already cached for this proc, we can use the ivar slot */ + rb_ivar_set(proc, id_cb_ref, callback); + } else { + /* The proc instance has been used as more than one type of callback, store extras in a hash */ + cbTable = rb_hash_new(); + rb_ivar_set(proc, id_cbtable, cbTable); + rb_hash_aset(cbTable, rbFunctionInfo, callback); + } + + return callback; +} + +static VALUE +function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc) +{ + Function* fn = NULL; + + Data_Get_Struct(self, Function, fn); + + fn->rbFunctionInfo = rbFunctionInfo; + + Data_Get_Struct(fn->rbFunctionInfo, FunctionType, fn->info); + + if (rb_obj_is_kind_of(rbProc, rbffi_PointerClass)) { + Pointer* orig; + Data_Get_Struct(rbProc, Pointer, orig); + fn->base.memory = orig->memory; + fn->base.rbParent = rbProc; + + } else if (rb_obj_is_kind_of(rbProc, rb_cProc) || rb_respond_to(rbProc, id_call)) { + if (fn->info->closurePool == NULL) { + fn->info->closurePool = rbffi_ClosurePool_New(sizeof(ffi_closure), callback_prep, fn->info); + if (fn->info->closurePool == NULL) { + rb_raise(rb_eNoMemError, "failed to create closure pool"); + } + } + +#if defined(DEFER_ASYNC_CALLBACK) + if (async_cb_thread == Qnil) { + async_cb_thread = rb_thread_create(async_cb_event, NULL); + } +#endif + + fn->closure = rbffi_Closure_Alloc(fn->info->closurePool); + fn->closure->info = fn; + fn->base.memory.address = fn->closure->code; + fn->base.memory.size = sizeof(*fn->closure); + fn->autorelease = true; + + } else { + rb_raise(rb_eTypeError, "wrong argument type %s, expected pointer or proc", + rb_obj_classname(rbProc)); + } + + fn->rbProc = rbProc; + + return self; +} + +/* + * call-seq: call(*args) + * @param [Array] args function arguments + * @return [FFI::Type] + * Call the function + */ +static VALUE +function_call(int argc, VALUE* argv, VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return (*fn->info->invoke)(argc, argv, fn->base.memory.address, fn->info); +} + +/* + * call-seq: attach(m, name) + * @param [Module] m + * @param [String] name + * @return [self] + * Attach a Function to the Module +m+ as +name+. + */ +static VALUE +function_attach(VALUE self, VALUE module, VALUE name) +{ + Function* fn; + char var[1024]; + + Data_Get_Struct(self, Function, fn); + + if (fn->info->parameterCount == -1) { + rb_raise(rb_eRuntimeError, "cannot attach variadic functions"); + return Qnil; + } + + if (!rb_obj_is_kind_of(module, rb_cModule)) { + rb_raise(rb_eRuntimeError, "trying to attach function to non-module"); + return Qnil; + } + + if (fn->methodHandle == NULL) { + fn->methodHandle = rbffi_MethodHandle_Alloc(fn->info, fn->base.memory.address); + } + + /* + * Stash the Function in a module variable so it does not get garbage collected + */ + snprintf(var, sizeof(var), "@@%s", StringValueCStr(name)); + rb_cv_set(module, var, self); + + rb_define_singleton_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + + rb_define_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + return self; +} + +/* + * call-seq: autorelease = autorelease + * @param [Boolean] autorelease + * @return [self] + * Set +autorelease+ attribute (See {Pointer}). + */ +static VALUE +function_set_autorelease(VALUE self, VALUE autorelease) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + fn->autorelease = RTEST(autorelease); + + return self; +} + +static VALUE +function_autorelease_p(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return fn->autorelease ? Qtrue : Qfalse; +} + +/* + * call-seq: free + * @return [self] + * Free memory allocated by Function. + */ +static VALUE +function_release(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + if (fn->closure == NULL) { + rb_raise(rb_eRuntimeError, "cannot free function which was not allocated"); + } + + rbffi_Closure_Free(fn->closure); + fn->closure = NULL; + + return self; +} + +static void +callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data) +{ + struct gvl_callback cb = { 0 }; + + cb.closure = (Closure *) user_data; + cb.retval = retval; + cb.parameters = parameters; + cb.done = false; + cb.frame = rbffi_frame_current(); + + if (cb.frame != NULL) cb.frame->exc = Qnil; + + if (ruby_native_thread_p()) { + if(ruby_thread_has_gvl_p()) { + callback_with_gvl(&cb); + } else { + rb_thread_call_with_gvl(callback_with_gvl, &cb); + } +#if defined(DEFER_ASYNC_CALLBACK) && !defined(_WIN32) + } else { + bool empty = false; + + pthread_mutex_init(&cb.async_mutex, NULL); + pthread_cond_init(&cb.async_cond, NULL); + + /* Now signal the async callback thread */ + pthread_mutex_lock(&async_cb_mutex); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); + + /* Wait for the thread executing the ruby callback to signal it is done */ + pthread_mutex_lock(&cb.async_mutex); + while (!cb.done) { + pthread_cond_wait(&cb.async_cond, &cb.async_mutex); + } + pthread_mutex_unlock(&cb.async_mutex); + pthread_cond_destroy(&cb.async_cond); + pthread_mutex_destroy(&cb.async_mutex); + +#elif defined(DEFER_ASYNC_CALLBACK) && defined(_WIN32) + } else { + bool empty = false; + + cb.async_event = CreateEvent(NULL, FALSE, FALSE, NULL); + + /* Now signal the async callback thread */ + EnterCriticalSection(&async_cb_lock); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + LeaveCriticalSection(&async_cb_lock); + + SetEvent(async_cb_cond); + + /* Wait for the thread executing the ruby callback to signal it is done */ + WaitForSingleObject(cb.async_event, INFINITE); + CloseHandle(cb.async_event); +#endif + } +} + +#if defined(DEFER_ASYNC_CALLBACK) +struct async_wait { + void* cb; + bool stop; +}; + +static void * async_cb_wait(void *); +static void async_cb_stop(void *); + +static VALUE +async_cb_event(void* unused) +{ + struct async_wait w = { 0 }; + + w.stop = false; + while (!w.stop) { + rb_thread_call_without_gvl(async_cb_wait, &w, async_cb_stop, &w); + if (w.cb != NULL) { + /* Start up a new ruby thread to run the ruby callback */ + rb_thread_create(async_cb_call, w.cb); + } + } + + return Qnil; +} + +#ifdef _WIN32 +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + EnterCriticalSection(&async_cb_lock); + + while (!w->stop && async_cb_list == NULL) { + LeaveCriticalSection(&async_cb_lock); + WaitForSingleObject(async_cb_cond, INFINITE); + EnterCriticalSection(&async_cb_lock); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + LeaveCriticalSection(&async_cb_lock); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + EnterCriticalSection(&async_cb_lock); + w->stop = true; + LeaveCriticalSection(&async_cb_lock); + SetEvent(async_cb_cond); +} + +#else +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + pthread_mutex_lock(&async_cb_mutex); + + while (!w->stop && async_cb_list == NULL) { + pthread_cond_wait(&async_cb_cond, &async_cb_mutex); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + pthread_mutex_unlock(&async_cb_mutex); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + pthread_mutex_lock(&async_cb_mutex); + w->stop = true; + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); +} +#endif + +static VALUE +async_cb_call(void *data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + callback_with_gvl(data); + + /* Signal the original native thread that the ruby code has completed */ +#ifdef _WIN32 + SetEvent(cb->async_event); +#else + pthread_mutex_lock(&cb->async_mutex); + cb->done = true; + pthread_cond_signal(&cb->async_cond); + pthread_mutex_unlock(&cb->async_mutex); +#endif + + return Qnil; +} + +#endif + +static void * +callback_with_gvl(void* data) +{ + rb_rescue2(invoke_callback, (VALUE) data, save_callback_exception, (VALUE) data, rb_eException, (VALUE) 0); + return NULL; +} + +static VALUE +invoke_callback(VALUE data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + Function* fn = (Function *) cb->closure->info; + FunctionType *cbInfo = fn->info; + Type* returnType = cbInfo->returnType; + void* retval = cb->retval; + void** parameters = cb->parameters; + VALUE* rbParams; + VALUE rbReturnType = cbInfo->rbReturnType; + VALUE rbReturnValue; + int i; + + rbParams = ALLOCA_N(VALUE, cbInfo->parameterCount); + for (i = 0; i < cbInfo->parameterCount; ++i) { + VALUE param; + Type* paramType = cbInfo->parameterTypes[i]; + VALUE rbParamType = rb_ary_entry(cbInfo->rbParameterTypes, i); + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + rbParamType = ((MappedType *) paramType)->rbType; + paramType = ((MappedType *) paramType)->type; + } + + switch (paramType->nativeType) { + case NATIVE_INT8: + param = INT2NUM(*(int8_t *) parameters[i]); + break; + case NATIVE_UINT8: + param = UINT2NUM(*(uint8_t *) parameters[i]); + break; + case NATIVE_INT16: + param = INT2NUM(*(int16_t *) parameters[i]); + break; + case NATIVE_UINT16: + param = UINT2NUM(*(uint16_t *) parameters[i]); + break; + case NATIVE_INT32: + param = INT2NUM(*(int32_t *) parameters[i]); + break; + case NATIVE_UINT32: + param = UINT2NUM(*(uint32_t *) parameters[i]); + break; + case NATIVE_INT64: + param = LL2NUM(*(int64_t *) parameters[i]); + break; + case NATIVE_UINT64: + param = ULL2NUM(*(uint64_t *) parameters[i]); + break; + case NATIVE_LONG: + param = LONG2NUM(*(long *) parameters[i]); + break; + case NATIVE_ULONG: + param = ULONG2NUM(*(unsigned long *) parameters[i]); + break; + case NATIVE_FLOAT32: + param = rb_float_new(*(float *) parameters[i]); + break; + case NATIVE_FLOAT64: + param = rb_float_new(*(double *) parameters[i]); + break; + case NATIVE_LONGDOUBLE: + param = rbffi_longdouble_new(*(long double *) parameters[i]); + break; + case NATIVE_STRING: + param = (*(void **) parameters[i] != NULL) ? rb_str_new2(*(char **) parameters[i]) : Qnil; + break; + case NATIVE_POINTER: + param = rbffi_Pointer_NewInstance(*(void **) parameters[i]); + break; + case NATIVE_BOOL: + param = (*(uint8_t *) parameters[i]) ? Qtrue : Qfalse; + break; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + case NATIVE_STRUCT: + param = rbffi_NativeValue_ToRuby(paramType, rbParamType, parameters[i]); + break; + + default: + param = Qnil; + break; + } + + /* Convert the native value into a custom ruby value */ + if (unlikely(cbInfo->parameterTypes[i]->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { param, Qnil }; + param = rb_funcall2(((MappedType *) cbInfo->parameterTypes[i])->rbConverter, id_from_native, 2, values); + } + + rbParams[i] = param; + } + + rbReturnValue = rb_funcall2(fn->rbProc, id_call, cbInfo->parameterCount, rbParams); + + if (unlikely(returnType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { rbReturnValue, Qnil }; + rbReturnValue = rb_funcall2(((MappedType *) returnType)->rbConverter, id_to_native, 2, values); + rbReturnType = ((MappedType *) returnType)->rbType; + returnType = ((MappedType* ) returnType)->type; + } + + if (rbReturnValue == Qnil || TYPE(rbReturnValue) == T_NIL) { + memset(retval, 0, returnType->ffiType->size); + } else switch (returnType->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + *((ffi_sarg *) retval) = NUM2INT(rbReturnValue); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + *((ffi_arg *) retval) = NUM2UINT(rbReturnValue); + break; + case NATIVE_INT64: + *((int64_t *) retval) = NUM2LL(rbReturnValue); + break; + case NATIVE_UINT64: + *((uint64_t *) retval) = NUM2ULL(rbReturnValue); + break; + case NATIVE_LONG: + *((ffi_sarg *) retval) = NUM2LONG(rbReturnValue); + break; + case NATIVE_ULONG: + *((ffi_arg *) retval) = NUM2ULONG(rbReturnValue); + break; + case NATIVE_FLOAT32: + *((float *) retval) = (float) NUM2DBL(rbReturnValue); + break; + case NATIVE_FLOAT64: + *((double *) retval) = NUM2DBL(rbReturnValue); + break; + case NATIVE_LONGDOUBLE: + *((long double *) retval) = rbffi_num2longdouble(rbReturnValue); + break; + case NATIVE_POINTER: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + } else { + /* Default to returning NULL if not a value pointer object. handles nil case as well */ + *((void **) retval) = NULL; + } + break; + + case NATIVE_BOOL: + *((ffi_arg *) retval) = rbReturnValue == Qtrue; + break; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + + } else if (rb_obj_is_kind_of(rbReturnValue, rb_cProc) || rb_respond_to(rbReturnValue, id_call)) { + VALUE function; + + function = rbffi_Function_ForProc(rbReturnType, rbReturnValue); + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(function))->address; + } else { + *((void **) retval) = NULL; + } + break; + + case NATIVE_STRUCT: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_StructClass)) { + AbstractMemory* memory = ((Struct *) DATA_PTR(rbReturnValue))->pointer; + + if (memory->address != NULL) { + memcpy(retval, memory->address, returnType->ffiType->size); + + } else { + memset(retval, 0, returnType->ffiType->size); + } + + } else { + memset(retval, 0, returnType->ffiType->size); + } + break; + + default: + *((ffi_arg *) retval) = 0; + break; + } + + return Qnil; +} + +static VALUE +save_callback_exception(VALUE data, VALUE exc) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + memset(cb->retval, 0, ((Function *) cb->closure->info)->info->returnType->ffiType->size); + if (cb->frame != NULL) cb->frame->exc = exc; + + return Qnil; +} + +static bool +callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + FunctionType* fnInfo = (FunctionType *) ctx; + ffi_status ffiStatus; + + ffiStatus = ffi_prep_closure_loc(code, &fnInfo->ffi_cif, callback_invoke, closure, code); + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + +void +rbffi_Function_Init(VALUE moduleFFI) +{ + rbffi_FunctionInfo_Init(moduleFFI); + /* + * Document-class: FFI::Function < FFI::Pointer + */ + rbffi_FunctionClass = rb_define_class_under(moduleFFI, "Function", rbffi_PointerClass); + + rb_global_variable(&rbffi_FunctionClass); + rb_define_alloc_func(rbffi_FunctionClass, function_allocate); + + rb_define_method(rbffi_FunctionClass, "initialize", function_initialize, -1); + rb_define_method(rbffi_FunctionClass, "initialize_copy", function_initialize_copy, 1); + rb_define_method(rbffi_FunctionClass, "call", function_call, -1); + rb_define_method(rbffi_FunctionClass, "attach", function_attach, 2); + rb_define_method(rbffi_FunctionClass, "free", function_release, 0); + rb_define_method(rbffi_FunctionClass, "autorelease=", function_set_autorelease, 1); + /* + * call-seq: autorelease + * @return [Boolean] + * Get +autorelease+ attribute. + * Synonymous for {#autorelease?}. + */ + rb_define_method(rbffi_FunctionClass, "autorelease", function_autorelease_p, 0); + /* + * call-seq: autorelease? + * @return [Boolean] +autorelease+ attribute + * Get +autorelease+ attribute. + */ + rb_define_method(rbffi_FunctionClass, "autorelease?", function_autorelease_p, 0); + + id_call = rb_intern("call"); + id_cbtable = rb_intern("@__ffi_callback_table__"); + id_cb_ref = rb_intern("@__ffi_callback__"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); +#if defined(_WIN32) + InitializeCriticalSection(&async_cb_lock); + async_cb_cond = CreateEvent(NULL, FALSE, FALSE, NULL); +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.h new file mode 100644 index 0000000..052aaf6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Function.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_FUNCTION_H +#define RBFFI_FUNCTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif + +#include + +typedef struct FunctionType_ FunctionType; + +#include "Type.h" +#include "Call.h" +#include "ClosurePool.h" + +struct FunctionType_ { + Type type; /* The native type of a FunctionInfo object */ + VALUE rbReturnType; + VALUE rbParameterTypes; + + Type* returnType; + Type** parameterTypes; + NativeType* nativeParameterTypes; + ffi_type* ffiReturnType; + ffi_type** ffiParameterTypes; + ffi_cif ffi_cif; + Invoker invoke; + ClosurePool* closurePool; + int parameterCount; + int flags; + ffi_abi abi; + int callbackCount; + VALUE* callbackParameters; + VALUE rbEnums; + bool ignoreErrno; + bool blocking; + bool hasStruct; +}; + +extern VALUE rbffi_FunctionTypeClass, rbffi_FunctionClass; + +void rbffi_Function_Init(VALUE moduleFFI); +VALUE rbffi_Function_NewInstance(VALUE functionInfo, VALUE proc); +VALUE rbffi_Function_ForProc(VALUE cbInfo, VALUE proc); +void rbffi_FunctionInfo_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_FUNCTION_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c new file mode 100644 index 0000000..b7b23a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/FunctionInfo.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +#endif + +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Types.h" +#include "Type.h" +#include "StructByValue.h" +#include "Function.h" + +static VALUE fntype_allocate(VALUE klass); +static VALUE fntype_initialize(int argc, VALUE* argv, VALUE self); +static void fntype_mark(FunctionType*); +static void fntype_free(FunctionType *); + +VALUE rbffi_FunctionTypeClass = Qnil; + +static VALUE +fntype_allocate(VALUE klass) +{ + FunctionType* fnInfo; + VALUE obj = Data_Make_Struct(klass, FunctionType, fntype_mark, fntype_free, fnInfo); + + fnInfo->type.ffiType = &ffi_type_pointer; + fnInfo->type.nativeType = NATIVE_FUNCTION; + fnInfo->rbReturnType = Qnil; + fnInfo->rbParameterTypes = Qnil; + fnInfo->rbEnums = Qnil; + fnInfo->invoke = rbffi_CallFunction; + fnInfo->closurePool = NULL; + + return obj; +} + +static void +fntype_mark(FunctionType* fnInfo) +{ + rb_gc_mark(fnInfo->rbReturnType); + rb_gc_mark(fnInfo->rbParameterTypes); + rb_gc_mark(fnInfo->rbEnums); + if (fnInfo->callbackCount > 0 && fnInfo->callbackParameters != NULL) { + rb_gc_mark_locations(&fnInfo->callbackParameters[0], &fnInfo->callbackParameters[fnInfo->callbackCount]); + } +} + +static void +fntype_free(FunctionType* fnInfo) +{ + xfree(fnInfo->parameterTypes); + xfree(fnInfo->ffiParameterTypes); + xfree(fnInfo->nativeParameterTypes); + xfree(fnInfo->callbackParameters); + if (fnInfo->closurePool != NULL) { + rbffi_ClosurePool_Free(fnInfo->closurePool); + } + xfree(fnInfo); +} + +/* + * call-seq: initialize(return_type, param_types, options={}) + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options + * @option options [Boolean] :blocking set to true if the C function is a blocking call + * @option options [Symbol] :convention calling convention see {FFI::Library#calling_convention} + * @option options [FFI::Enums] :enums + * @return [self] + * A new FunctionType instance. + */ +static VALUE +fntype_initialize(int argc, VALUE* argv, VALUE self) +{ + FunctionType *fnInfo; + ffi_status status; + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbOptions = Qnil; + VALUE rbEnums = Qnil, rbConvention = Qnil, rbBlocking = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i, nargs; + + nargs = rb_scan_args(argc, argv, "21", &rbReturnType, &rbParamTypes, &rbOptions); + if (nargs >= 3 && rbOptions != Qnil) { + rbConvention = rb_hash_aref(rbOptions, ID2SYM(rb_intern("convention"))); + rbEnums = rb_hash_aref(rbOptions, ID2SYM(rb_intern("enums"))); + rbBlocking = rb_hash_aref(rbOptions, ID2SYM(rb_intern("blocking"))); + } + + Check_Type(rbParamTypes, T_ARRAY); + + Data_Get_Struct(self, FunctionType, fnInfo); + fnInfo->parameterCount = (int) RARRAY_LEN(rbParamTypes); + fnInfo->parameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->parameterTypes)); + fnInfo->ffiParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(ffi_type *)); + fnInfo->nativeParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->nativeParameterTypes)); + fnInfo->rbParameterTypes = rb_ary_new2(fnInfo->parameterCount); + fnInfo->rbEnums = rbEnums; + fnInfo->blocking = RTEST(rbBlocking); + fnInfo->hasStruct = false; + + for (i = 0; i < fnInfo->parameterCount; ++i) { + VALUE entry = rb_ary_entry(rbParamTypes, i); + VALUE type = rbffi_Type_Lookup(entry); + + if (!RTEST(type)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(type, rbffi_FunctionTypeClass)) { + REALLOC_N(fnInfo->callbackParameters, VALUE, fnInfo->callbackCount + 1); + fnInfo->callbackParameters[fnInfo->callbackCount++] = type; + } + + if (rb_obj_is_kind_of(type, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + rb_ary_push(fnInfo->rbParameterTypes, type); + Data_Get_Struct(type, Type, fnInfo->parameterTypes[i]); + fnInfo->ffiParameterTypes[i] = fnInfo->parameterTypes[i]->ffiType; + fnInfo->nativeParameterTypes[i] = fnInfo->parameterTypes[i]->nativeType; + } + + fnInfo->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(fnInfo->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(fnInfo->rbReturnType, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + Data_Get_Struct(fnInfo->rbReturnType, Type, fnInfo->returnType); + fnInfo->ffiReturnType = fnInfo->returnType->ffiType; + +#if defined(X86_WIN32) + rbConventionStr = (rbConvention != Qnil) ? rb_funcall2(rbConvention, rb_intern("to_s"), 0, NULL) : Qnil; + fnInfo->abi = (rbConventionStr != Qnil && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + fnInfo->abi = FFI_DEFAULT_ABI; +#endif + + status = ffi_prep_cif(&fnInfo->ffi_cif, fnInfo->abi, fnInfo->parameterCount, + fnInfo->ffiReturnType, fnInfo->ffiParameterTypes); + switch (status) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + fnInfo->invoke = rbffi_GetInvoker(fnInfo); + + return self; +} + +/* + * call-seq: result_type + * @return [Type] + * Get the return type of the function type + */ +static VALUE +fntype_result_type(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return ft->rbReturnType; +} + +/* + * call-seq: param_types + * @return [Array] + * Get parameters types. + */ +static VALUE +fntype_param_types(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return rb_ary_dup(ft->rbParameterTypes); +} + +void +rbffi_FunctionInfo_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::FunctionType < FFI::Type + */ + rbffi_FunctionTypeClass = rb_define_class_under(moduleFFI, "FunctionType",ffi_Type); + rb_global_variable(&rbffi_FunctionTypeClass); + /* + * Document-const: FFI::CallbackInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "CallbackInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::FunctionInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "FunctionInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::Type::Function = FFI::FunctionType + */ + rb_define_const(ffi_Type, "Function", rbffi_FunctionTypeClass); + + rb_define_alloc_func(rbffi_FunctionTypeClass, fntype_allocate); + rb_define_method(rbffi_FunctionTypeClass, "initialize", fntype_initialize, -1); + rb_define_method(rbffi_FunctionTypeClass, "result_type", fntype_result_type, 0); + rb_define_method(rbffi_FunctionTypeClass, "param_types", fntype_param_types, 0); + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c new file mode 100644 index 0000000..8a460f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +#endif +#include +#include + +#include "LastError.h" + +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +# define USE_PTHREAD_LOCAL +#endif + +#if defined(__CYGWIN__) +typedef uint32_t DWORD; +DWORD __stdcall GetLastError(void); +void __stdcall SetLastError(DWORD); +#endif + +typedef struct ThreadData { + int td_errno; +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD td_winapi_errno; +#endif +} ThreadData; + +#if defined(USE_PTHREAD_LOCAL) +static pthread_key_t threadDataKey; +#endif + +static inline ThreadData* thread_data_get(void); + +#if defined(USE_PTHREAD_LOCAL) + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td = xcalloc(1, sizeof(ThreadData)); + + pthread_setspecific(threadDataKey, td); + + return td; +} + + +static inline ThreadData* +thread_data_get(void) +{ + ThreadData* td = pthread_getspecific(threadDataKey); + return td != NULL ? td : thread_data_init(); +} + +static void +thread_data_free(void *ptr) +{ + xfree(ptr); +} + +#else +static ID id_thread_data; + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td; + VALUE obj; + + obj = Data_Make_Struct(rb_cObject, ThreadData, NULL, -1, td); + rb_thread_local_aset(rb_thread_current(), id_thread_data, obj); + + return td; +} + +static inline ThreadData* +thread_data_get() +{ + VALUE obj = rb_thread_local_aref(rb_thread_current(), id_thread_data); + + if (obj != Qnil && TYPE(obj) == T_DATA) { + return (ThreadData *) DATA_PTR(obj); + } + + return thread_data_init(); +} + +#endif + + +/* + * call-seq: error + * @return [Numeric] + * Get +errno+ value. + */ +static VALUE +get_last_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_errno); +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: winapi_error + * @return [Numeric] + * Get +GetLastError()+ value. Only Windows or Cygwin. + */ +static VALUE +get_last_winapi_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_winapi_errno); +} +#endif + + +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +errno+ value. + */ +static VALUE +set_last_error(VALUE self, VALUE error) +{ + +#ifdef _WIN32 + SetLastError(NUM2INT(error)); +#else + errno = NUM2INT(error); +#endif + + return Qnil; +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +GetLastError()+ value. Only on Windows and Cygwin. + */ +static VALUE +set_last_winapi_error(VALUE self, VALUE error) +{ + SetLastError(NUM2INT(error)); + return Qnil; +} +#endif + + +void +rbffi_save_errno(void) +{ + int error = 0; +#ifdef _WIN32 + error = GetLastError(); +#else + error = errno; +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD winapi_error = GetLastError(); + thread_data_get()->td_winapi_errno = winapi_error; +#endif + + thread_data_get()->td_errno = error; +} + +void +rbffi_LastError_Init(VALUE moduleFFI) +{ + /* + * Document-module: FFI::LastError + * This module defines a couple of method to set and get +errno+ + * for current thread. + */ + VALUE moduleError = rb_define_module_under(moduleFFI, "LastError"); + + rb_define_module_function(moduleError, "error", get_last_error, 0); + rb_define_module_function(moduleError, "error=", set_last_error, 1); + +#if defined(_WIN32) || defined(__CYGWIN__) + rb_define_module_function(moduleError, "winapi_error", get_last_winapi_error, 0); + rb_define_module_function(moduleError, "winapi_error=", set_last_winapi_error, 1); +#endif + +#if defined(USE_PTHREAD_LOCAL) + pthread_key_create(&threadDataKey, thread_data_free); +#else + id_thread_data = rb_intern("ffi_thread_local_data"); +#endif /* USE_PTHREAD_LOCAL */ +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h new file mode 100644 index 0000000..ee1dfbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LastError.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LASTERROR_H +#define RBFFI_LASTERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + + +void rbffi_LastError_Init(VALUE moduleFFI); + +void rbffi_save_errno(void); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LASTERROR_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c new file mode 100644 index 0000000..c95f2fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.c @@ -0,0 +1,65 @@ +#include "LongDouble.h" +#include +#include +#include + +#if defined (__CYGWIN__) || defined(__INTERIX) || defined(_MSC_VER) +# define strtold(str, endptr) ((long double) strtod((str), (endptr))) +#endif /* defined (__CYGWIN__) */ + +static VALUE rb_cBigDecimal = Qnil; +static VALUE bigdecimal_load(VALUE unused); +static VALUE bigdecimal_failed(VALUE value, VALUE exc); + +VALUE +rbffi_longdouble_new(long double ld) +{ + if (!RTEST(rb_cBigDecimal)) { + /* allow fallback if the bigdecimal library is unavailable in future ruby versions */ + rb_cBigDecimal = rb_rescue(bigdecimal_load, Qnil, bigdecimal_failed, rb_cObject); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject) { + char buf[128]; + return rb_funcall(rb_mKernel, rb_intern("BigDecimal"), 1, rb_str_new(buf, sprintf(buf, "%.35Le", ld))); + } + + /* Fall through to handling as a float */ + return rb_float_new(ld); +} + +long double +rbffi_num2longdouble(VALUE value) +{ + if (TYPE(value) == T_FLOAT) { + return rb_num2dbl(value); + } + + if (!RTEST(rb_cBigDecimal) && rb_const_defined(rb_cObject, rb_intern("BigDecimal"))) { + rb_cBigDecimal = rb_const_get(rb_cObject, rb_intern("BigDecimal")); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject && RTEST(rb_obj_is_kind_of(value, rb_cBigDecimal))) { + VALUE s = rb_funcall(value, rb_intern("to_s"), 1, rb_str_new2("E")); + long double ret = strtold(RSTRING_PTR(s), NULL); + RB_GC_GUARD(s); + return ret; + } + + /* Fall through to handling as a float */ + return rb_num2dbl(value); +} + + +static VALUE +bigdecimal_load(VALUE unused) +{ + rb_require("bigdecimal"); + return rb_const_get(rb_cObject, rb_intern("BigDecimal")); +} + +static VALUE +bigdecimal_failed(VALUE value, VALUE exc) +{ + return value; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h new file mode 100644 index 0000000..079e890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/LongDouble.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2012, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LONGDOUBLE_H +#define RBFFI_LONGDOUBLE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern VALUE rbffi_longdouble_new(long double ld); +extern long double rbffi_num2longdouble(VALUE value); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LONGDOUBLE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Makefile new file mode 100644 index 0000000..cfe4522 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Makefile @@ -0,0 +1,269 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 +hdrdir = $(topdir) +arch_hdrdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/Users/arthur/.rvm/rubies/ruby-3.0.0 +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20220301-11532-i1dnqb +sitelibdir = $(DESTDIR)./.gem.20220301-11532-i1dnqb +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(SDKROOT)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc -fdeclspec +CXX = g++ -fdeclspec +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = extconf.h +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 +debugflags = -ggdb3 +warnflags = +cppflags = +CCDLFLAGS = -fno-common +CFLAGS = $(CCDLFLAGS) -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi +DEFS = +CPPFLAGS = -DRUBY_EXTCONF_H=\"$(RUBY_EXTCONF_H)\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib +dldflags = -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup -Wl,-multiply_defined,suppress +ARCH_FLAG = -m64 +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.3.0 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = arm64-darwin21 +sitearch = $(arch) +ruby_version = 3.0.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h $(RUBY_EXTCONF_H) + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = /opt/homebrew/opt/coreutils/bin/gmkdir -p +INSTALL = /opt/homebrew/opt/coreutils/bin/ginstall -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) -lffi +ORIG_SRCS = AbstractMemory.c ArrayType.c Buffer.c Call.c ClosurePool.c DynamicLibrary.c Function.c FunctionInfo.c LastError.c LongDouble.c MappedType.c MemoryPointer.c MethodHandle.c Platform.c Pointer.c Struct.c StructByValue.c StructLayout.c Thread.c Type.c Types.c Variadic.c ffi.c +SRCS = $(ORIG_SRCS) +OBJS = AbstractMemory.o ArrayType.o Buffer.o Call.o ClosurePool.o DynamicLibrary.o Function.o FunctionInfo.o LastError.o LongDouble.o MappedType.o MemoryPointer.o MethodHandle.o Platform.o Pointer.o Struct.o StructByValue.o StructLayout.o Thread.o Type.o Types.o Variadic.o ffi.o +HDRS = $(srcdir)/AbstractMemory.h $(srcdir)/ArrayType.h $(srcdir)/Call.h $(srcdir)/ClosurePool.h $(srcdir)/DynamicLibrary.h $(srcdir)/Function.h $(srcdir)/LastError.h $(srcdir)/LongDouble.h $(srcdir)/MappedType.h $(srcdir)/MemoryPointer.h $(srcdir)/MethodHandle.h $(srcdir)/Platform.h $(srcdir)/Pointer.h $(srcdir)/Struct.h $(srcdir)/StructByValue.h $(srcdir)/Thread.h $(srcdir)/Type.h $(srcdir)/Types.h $(srcdir)/compat.h $(srcdir)/extconf.h $(srcdir)/rbffi.h $(srcdir)/rbffi_endian.h +LOCAL_HDRS = +TARGET = ffi_c +TARGET_NAME = ffi_c +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object $(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +$(OBJS): $(HDRS) $(ruby_headers) +LIBFFI_HOST=--host= +include ${srcdir}/libffi.darwin.mk diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c new file mode 100644 index 0000000..d1a4189 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include "rbffi.h" + +#include "Type.h" +#include "MappedType.h" + + +static VALUE mapped_allocate(VALUE); +static VALUE mapped_initialize(VALUE, VALUE); +static void mapped_mark(MappedType *); +static ID id_native_type, id_to_native, id_from_native; + +VALUE rbffi_MappedTypeClass = Qnil; + +static VALUE +mapped_allocate(VALUE klass) +{ + MappedType* m; + + VALUE obj = Data_Make_Struct(klass, MappedType, mapped_mark, -1, m); + + m->rbConverter = Qnil; + m->rbType = Qnil; + m->type = NULL; + m->base.nativeType = NATIVE_MAPPED; + m->base.ffiType = &ffi_type_void; + + return obj; +} + +/* + * call-seq: initialize(converter) + * @param [#native_type, #to_native, #from_native] converter +converter+ must respond to + * all these methods + * @return [self] + */ +static VALUE +mapped_initialize(VALUE self, VALUE rbConverter) +{ + MappedType* m = NULL; + + if (!rb_respond_to(rbConverter, id_native_type)) { + rb_raise(rb_eNoMethodError, "native_type method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_to_native)) { + rb_raise(rb_eNoMethodError, "to_native method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_from_native)) { + rb_raise(rb_eNoMethodError, "from_native method not implemented"); + } + + Data_Get_Struct(self, MappedType, m); + m->rbType = rb_funcall2(rbConverter, id_native_type, 0, NULL); + if (!(rb_obj_is_kind_of(m->rbType, rbffi_TypeClass))) { + rb_raise(rb_eTypeError, "native_type did not return instance of FFI::Type"); + } + + m->rbConverter = rbConverter; + Data_Get_Struct(m->rbType, Type, m->type); + m->base.ffiType = m->type->ffiType; + + return self; +} + +static void +mapped_mark(MappedType* m) +{ + rb_gc_mark(m->rbType); + rb_gc_mark(m->rbConverter); +} + +/* + * call-seq: mapped_type.native_type + * @return [Type] + * Get native type of mapped type. + */ +static VALUE +mapped_native_type(VALUE self) +{ + MappedType*m = NULL; + Data_Get_Struct(self, MappedType, m); + + return m->rbType; +} + +/* + * call-seq: mapped_type.to_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_to_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_to_native, argc, argv); +} + +/* + * call-seq: mapped_type.from_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_from_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_from_native, argc, argv); +} + +void +rbffi_MappedType_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type::Mapped < FFI::Type + */ + rbffi_MappedTypeClass = rb_define_class_under(rbffi_TypeClass, "Mapped", rbffi_TypeClass); + + rb_global_variable(&rbffi_MappedTypeClass); + + id_native_type = rb_intern("native_type"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); + + rb_define_alloc_func(rbffi_MappedTypeClass, mapped_allocate); + rb_define_method(rbffi_MappedTypeClass, "initialize", mapped_initialize, 1); + rb_define_method(rbffi_MappedTypeClass, "type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "native_type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "to_native", mapped_to_native, -1); + rb_define_method(rbffi_MappedTypeClass, "from_native", mapped_from_native, -1); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h new file mode 100644 index 0000000..4b26cc1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MappedType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MAPPEDTYPE_H +#define RBFFI_MAPPEDTYPE_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct MappedType_ { + Type base; + Type* type; + VALUE rbConverter; + VALUE rbType; + +} MappedType; + +void rbffi_MappedType_Init(VALUE moduleFFI); + +extern VALUE rbffi_MappedTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MAPPEDTYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c new file mode 100644 index 0000000..9dc59bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" + + +static VALUE memptr_allocate(VALUE klass); +static void memptr_release(Pointer* ptr); +static VALUE memptr_malloc(VALUE self, long size, long count, bool clear); +static VALUE memptr_free(VALUE self); + +VALUE rbffi_MemoryPointerClass; + +#define MEMPTR(obj) ((MemoryPointer *) rbffi_AbstractMemory_Cast(obj, rbffi_MemoryPointerClass)) + +VALUE +rbffi_MemoryPointer_NewInstance(long size, long count, bool clear) +{ + return memptr_malloc(memptr_allocate(rbffi_MemoryPointerClass), size, count, clear); +} + +static VALUE +memptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj = Data_Make_Struct(klass, Pointer, NULL, memptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * call-seq: initialize(size, count=1, clear=true) + * @param [Fixnum, Bignum, Symbol, FFI::Type] size size of a memory cell (in bytes, or type whom size will be used) + * @param [Numeric] count number of cells in memory + * @param [Boolean] clear set memory to all-zero if +true+ + * @return [self] + * A new instance of FFI::MemoryPointer. + */ +static VALUE +memptr_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE size = Qnil, count = Qnil, clear = Qnil; + int nargs = rb_scan_args(argc, argv, "12", &size, &count, &clear); + + memptr_malloc(self, rbffi_type_size(size), nargs > 1 ? NUM2LONG(count) : 1, + RTEST(clear) || clear == Qnil); + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, memptr_free, self); + } + + return self; +} + +static VALUE +memptr_malloc(VALUE self, long size, long count, bool clear) +{ + Pointer* p; + unsigned long msize; + + Data_Get_Struct(self, Pointer, p); + + msize = size * count; + + p->storage = xmalloc(msize + 7); + if (p->storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%ld bytes", msize); + return Qnil; + } + p->autorelease = true; + p->memory.typeSize = (int) size; + p->memory.size = msize; + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (char *) (((uintptr_t) p->storage + 0x7) & (uintptr_t) ~0x7ULL); + p->allocated = true; + + if (clear && p->memory.size > 0) { + memset(p->memory.address, 0, p->memory.size); + } + + return self; +} + +static VALUE +memptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + } + + return self; +} + +static void +memptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +/* + * call-seq: from_string(s) + * @param [String] s string + * @return [MemoryPointer] + * Create a {MemoryPointer} with +s+ inside. + */ +static VALUE +memptr_s_from_string(VALUE klass, VALUE to_str) +{ + VALUE s = StringValue(to_str); + VALUE args[] = { INT2FIX(1), LONG2NUM(RSTRING_LEN(s) + 1), Qfalse }; + VALUE obj = rb_class_new_instance(3, args, klass); + rb_funcall(obj, rb_intern("put_string"), 2, INT2FIX(0), s); + + return obj; +} + +void +rbffi_MemoryPointer_Init(VALUE moduleFFI) +{ + VALUE ffi_Pointer; + + ffi_Pointer = rbffi_PointerClass; + + /* + * Document-class: FFI::MemoryPointer < FFI::Pointer + * A MemoryPointer is a specific {Pointer}. It points to a memory composed of cells. All cells have the + * same size. + * + * @example Create a new MemoryPointer + * mp = FFI::MemoryPointer.new(:long, 16) # Create a pointer on a memory of 16 long ints. + * @example Create a new MemoryPointer from a String + * mp1 = FFI::MemoryPointer.from_string("this is a string") + * # same as: + * mp2 = FFI::MemoryPointer.new(:char,16) + * mp2.put_string("this is a string") + */ + rbffi_MemoryPointerClass = rb_define_class_under(moduleFFI, "MemoryPointer", ffi_Pointer); + rb_global_variable(&rbffi_MemoryPointerClass); + + rb_define_alloc_func(rbffi_MemoryPointerClass, memptr_allocate); + rb_define_method(rbffi_MemoryPointerClass, "initialize", memptr_initialize, -1); + rb_define_singleton_method(rbffi_MemoryPointerClass, "from_string", memptr_s_from_string, 1); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h new file mode 100644 index 0000000..1257683 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MemoryPointer.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2008, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MEMORYPOINTER_H +#define RBFFI_MEMORYPOINTER_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_MemoryPointer_Init(VALUE moduleFFI); + extern VALUE rbffi_MemoryPointerClass; + extern VALUE rbffi_MemoryPointer_NewInstance(long size, long count, bool clear); +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MEMORYPOINTER_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c new file mode 100644 index 0000000..4dc85fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +#endif +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#ifndef _WIN32 +# include +#endif +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +#endif + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MethodHandle.h" + + +#define MAX_METHOD_FIXED_ARITY (6) + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef USE_RAW +# define METHOD_CLOSURE ffi_raw_closure +# define METHOD_PARAMS ffi_raw* +#else +# define METHOD_CLOSURE ffi_closure +# define METHOD_PARAMS void** +#endif + + + +static bool prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static long trampoline_size(void); + +#if defined(__x86_64__) && (defined(__linux__) || defined(__APPLE__)) +# define CUSTOM_TRAMPOLINE 1 +#endif + + +struct MethodHandle { + Closure* closure; +}; + +static ClosurePool* defaultClosurePool; + + +MethodHandle* +rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function) +{ + MethodHandle* handle; + Closure* closure = rbffi_Closure_Alloc(defaultClosurePool); + if (closure == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate closure from pool"); + return NULL; + } + + handle = xcalloc(1, sizeof(*handle)); + handle->closure = closure; + closure->info = fnInfo; + closure->function = function; + + return handle; +} + +void +rbffi_MethodHandle_Free(MethodHandle* handle) +{ + if (handle != NULL) { + rbffi_Closure_Free(handle->closure); + } +} + +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle) +{ + return (rbffi_function_anyargs) handle->closure->code; +} + +#ifndef CUSTOM_TRAMPOLINE +static void attached_method_invoke(ffi_cif* cif, void* retval, METHOD_PARAMS parameters, void* user_data); + +static ffi_type* methodHandleParamTypes[3]; + +static ffi_cif mh_cif; + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + ffi_status ffiStatus; + +#if defined(USE_RAW) + ffiStatus = ffi_prep_raw_closure(code, &mh_cif, attached_method_invoke, closure); +#else + ffiStatus = ffi_prep_closure_loc(code, &mh_cif, attached_method_invoke, closure, code); +#endif + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + + +static long +trampoline_size(void) +{ + return sizeof(METHOD_CLOSURE); +} + +/* + * attached_method_invoke is used functions with more than 6 parameters, or + * with struct param or return values + */ +static void +attached_method_invoke(ffi_cif* cif, void* mretval, METHOD_PARAMS parameters, void* user_data) +{ + Closure* handle = (Closure *) user_data; + FunctionType* fnInfo = (FunctionType *) handle->info; + +#ifdef USE_RAW + int argc = parameters[0].sint; + VALUE* argv = *(VALUE **) ¶meters[1]; +#else + int argc = *(int *) parameters[0]; + VALUE* argv = *(VALUE **) parameters[1]; +#endif + + *(VALUE *) mretval = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); +} + +#endif + + + +#if defined(CUSTOM_TRAMPOLINE) +#if defined(__x86_64__) + +static VALUE custom_trampoline(int argc, VALUE* argv, VALUE self, Closure*); + +#define TRAMPOLINE_CTX_MAGIC (0xfee1deadcafebabe) +#define TRAMPOLINE_FUN_MAGIC (0xfeedfacebeeff00d) + +/* + * This is a hand-coded trampoline to speedup entry from ruby to the FFI translation + * layer for x86_64 arches. + * + * Since a ruby function has exactly 3 arguments, and the first 6 arguments are + * passed in registers for x86_64, we can tack on a context pointer by simply + * putting a value in %rcx, then jumping to the C trampoline code. + * + * This results in approx a 30% speedup for x86_64 FFI dispatch + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "movabsq $0xfee1deadcafebabe, %rcx\n\t" + "movabsq $0xfeedfacebeeff00d, %r11\n\t" + "jmpq *%r11\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(int argc, VALUE* argv, VALUE self, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + VALUE rbReturnValue; + + RB_GC_GUARD(rbReturnValue) = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); + RB_GC_GUARD(self); + + return rbReturnValue; +} + +#elif defined(__i386__) && 0 + +static VALUE custom_trampoline(void *args, Closure*); +#define TRAMPOLINE_CTX_MAGIC (0xfee1dead) +#define TRAMPOLINE_FUN_MAGIC (0xbeefcafe) + +/* + * This is a hand-coded trampoline to speed-up entry from ruby to the FFI translation + * layer for i386 arches. + * + * This does not make a discernible difference vs a raw closure, so for now, + * it is not enabled. + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "subl $12, %esp\n\t" + "leal 16(%esp), %eax\n\t" + "movl %eax, (%esp)\n\t" + "movl $0xfee1dead, 4(%esp)\n\t" + "movl $0xbeefcafe, %eax\n\t" + "call *%eax\n\t" + "addl $12, %esp\n\t" + "ret\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(void *args, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + return (*fnInfo->invoke)(*(int *) args, *(VALUE **) (args + 4), handle->function, fnInfo); +} + +#endif /* __x86_64__ else __i386__ */ + +extern void ffi_trampoline(int argc, VALUE* argv, VALUE self); +extern void ffi_trampoline_end(void); +static int trampoline_offsets(long *, long *); + +static long trampoline_ctx_offset, trampoline_func_offset; + +static long +trampoline_offset(int off, const long value) +{ + char *ptr; + for (ptr = (char *) &ffi_trampoline + off; ptr < (char *) &ffi_trampoline_end; ++ptr) { + if (*(long *) ptr == value) { + return ptr - (char *) &ffi_trampoline; + } + } + + return -1; +} + +static int +trampoline_offsets(long* ctxOffset, long* fnOffset) +{ + *ctxOffset = trampoline_offset(0, TRAMPOLINE_CTX_MAGIC); + if (*ctxOffset == -1) { + return -1; + } + + *fnOffset = trampoline_offset(0, TRAMPOLINE_FUN_MAGIC); + if (*fnOffset == -1) { + return -1; + } + + return 0; +} + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + memcpy(code, (void*) &ffi_trampoline, trampoline_size()); + /* Patch the context and function addresses into the stub code */ + *(intptr_t *)((char*)code + trampoline_ctx_offset) = (intptr_t) closure; + *(intptr_t *)((char*)code + trampoline_func_offset) = (intptr_t) custom_trampoline; + + return true; +} + +static long +trampoline_size(void) +{ + return (char *) &ffi_trampoline_end - (char *) &ffi_trampoline; +} + +#endif /* CUSTOM_TRAMPOLINE */ + + +void +rbffi_MethodHandle_Init(VALUE module) +{ +#ifndef CUSTOM_TRAMPOLINE + ffi_status ffiStatus; +#endif + + defaultClosurePool = rbffi_ClosurePool_New((int) trampoline_size(), prep_trampoline, NULL); + +#if defined(CUSTOM_TRAMPOLINE) + if (trampoline_offsets(&trampoline_ctx_offset, &trampoline_func_offset) != 0) { + rb_raise(rb_eFatal, "Could not locate offsets in trampoline code"); + } +#else + methodHandleParamTypes[0] = &ffi_type_sint; + methodHandleParamTypes[1] = &ffi_type_pointer; + methodHandleParamTypes[2] = &ffi_type_ulong; + + ffiStatus = ffi_prep_cif(&mh_cif, FFI_DEFAULT_ABI, 3, &ffi_type_ulong, + methodHandleParamTypes); + if (ffiStatus != FFI_OK) { + rb_raise(rb_eFatal, "ffi_prep_cif failed. status=%#x", ffiStatus); + } + +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h new file mode 100644 index 0000000..0dcc058 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/MethodHandle.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_METHODHANDLE_H +#define RBFFI_METHODHANDLE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "Function.h" + + +typedef struct MethodHandlePool MethodHandlePool; +typedef struct MethodHandle MethodHandle; +typedef VALUE (*rbffi_function_anyargs)(int argc, VALUE* argv, VALUE self); + + +MethodHandle* rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function); +void rbffi_MethodHandle_Free(MethodHandle* handle); +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle); +void rbffi_MethodHandle_Init(VALUE module); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_METHODHANDLE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c new file mode 100644 index 0000000..d873026 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +# include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#include +#include +#include "rbffi_endian.h" +#include "Platform.h" + +#if defined(__GNU__) || defined(__GLIBC__) +# include +#endif + +static VALUE PlatformModule = Qnil; + +static void +export_primitive_types(VALUE module) +{ +#define S(name, T) do { \ + typedef struct { char c; T v; } s; \ + rb_define_const(module, #name "_ALIGN", INT2NUM((sizeof(s) - sizeof(T)) * 8)); \ + rb_define_const(module, #name "_SIZE", INT2NUM(sizeof(T)* 8)); \ +} while(0) + S(INT8, char); + S(INT16, short); + S(INT32, int); + S(INT64, long long); + S(LONG, long); + S(FLOAT, float); + S(DOUBLE, double); + S(LONG_DOUBLE, long double); + S(ADDRESS, void*); +#undef S +} + +void +rbffi_Platform_Init(VALUE moduleFFI) +{ + PlatformModule = rb_define_module_under(moduleFFI, "Platform"); + rb_define_const(PlatformModule, "BYTE_ORDER", INT2FIX(BYTE_ORDER)); + rb_define_const(PlatformModule, "LITTLE_ENDIAN", INT2FIX(LITTLE_ENDIAN)); + rb_define_const(PlatformModule, "BIG_ENDIAN", INT2FIX(BIG_ENDIAN)); +#if defined(__GNU__) || defined(__GLIBC__) + rb_define_const(PlatformModule, "GNU_LIBC", rb_str_new2(LIBC_SO)); +#endif + export_primitive_types(PlatformModule); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h new file mode 100644 index 0000000..5575e34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Platform.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_PLATFORM_H +#define RBFFI_PLATFORM_H + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Platform_Init(VALUE moduleFFI); + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_PLATFORM_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c new file mode 100644 index 0000000..e5f24a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.c @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdint.h" +# include "win32/stdbool.h" +#endif +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" +#include "Pointer.h" + +#define POINTER(obj) rbffi_AbstractMemory_Cast((obj), rbffi_PointerClass) + +VALUE rbffi_PointerClass = Qnil; +VALUE rbffi_NullPointerSingleton = Qnil; + +static void ptr_release(Pointer* ptr); +static void ptr_mark(Pointer* ptr); + +VALUE +rbffi_Pointer_NewInstance(void* addr) +{ + Pointer* p; + VALUE obj; + + if (addr == NULL) { + return rbffi_NullPointerSingleton; + } + + obj = Data_Make_Struct(rbffi_PointerClass, Pointer, NULL, -1, p); + p->memory.address = addr; + p->memory.size = LONG_MAX; + p->memory.flags = (addr == NULL) ? 0 : (MEM_RD | MEM_WR); + p->memory.typeSize = 1; + p->rbParent = Qnil; + + return obj; +} + +static VALUE +ptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj; + + obj = Data_Make_Struct(klass, Pointer, ptr_mark, ptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * @overload initialize(pointer) + * @param [Pointer] pointer another pointer to initialize from + * Create a new pointer from another {Pointer}. + * @overload initialize(type, address) + * @param [Type] type type for pointer + * @param [Integer] address base address for pointer + * Create a new pointer from a {Type} and a base address + * @return [self] + * A new instance of Pointer. + */ +static VALUE +ptr_initialize(int argc, VALUE* argv, VALUE self) +{ + Pointer* p; + VALUE rbType = Qnil, rbAddress = Qnil; + int typeSize = 1; + + Data_Get_Struct(self, Pointer, p); + + switch (rb_scan_args(argc, argv, "11", &rbType, &rbAddress)) { + case 1: + rbAddress = rbType; + typeSize = 1; + break; + case 2: + typeSize = rbffi_type_size(rbType); + break; + default: + rb_raise(rb_eArgError, "Invalid arguments"); + } + + switch (TYPE(rbAddress)) { + case T_FIXNUM: + case T_BIGNUM: + p->memory.address = (void*) (uintptr_t) NUM2LL(rbAddress); + p->memory.size = LONG_MAX; + if (p->memory.address == NULL) { + p->memory.flags = 0; + } + break; + + default: + if (rb_obj_is_kind_of(rbAddress, rbffi_PointerClass)) { + Pointer* orig; + + p->rbParent = rbAddress; + Data_Get_Struct(rbAddress, Pointer, orig); + p->memory = orig->memory; + } else { + rb_raise(rb_eTypeError, "wrong argument type, expected Integer or FFI::Pointer"); + } + break; + } + + p->memory.typeSize = typeSize; + + return self; +} + +/* + * call-seq: ptr.initialize_copy(other) + * @param [Pointer] other source for cloning or dupping + * @return [self] + * @raise {RuntimeError} if +other+ is an unbounded memory area, or is unreadable/unwritable + * @raise {NoMemError} if failed to allocate memory for new object + * DO NOT CALL THIS METHOD. + * + * This method is internally used by #dup and #clone. Memory content is copied from +other+. + */ +static VALUE +ptr_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Pointer* dst; + + Data_Get_Struct(self, Pointer, dst); + src = POINTER(other); + if (src->size == LONG_MAX) { + rb_raise(rb_eRuntimeError, "cannot duplicate unbounded memory area"); + return Qnil; + } + + if ((dst->memory.flags & (MEM_RD | MEM_WR)) != (MEM_RD | MEM_WR)) { + rb_raise(rb_eRuntimeError, "cannot duplicate unreadable/unwritable memory area"); + return Qnil; + } + + if (dst->storage != NULL) { + xfree(dst->storage); + dst->storage = NULL; + } + + dst->storage = xmalloc(src->size + 7); + if (dst->storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->allocated = true; + dst->autorelease = true; + dst->memory.address = (void *) (((uintptr_t) dst->storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual memory contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +slice(VALUE self, long offset, long size) +{ + AbstractMemory* ptr; + Pointer* p; + VALUE retval; + + Data_Get_Struct(self, AbstractMemory, ptr); + checkBounds(ptr, offset, size == LONG_MAX ? 1 : size); + + retval = Data_Make_Struct(rbffi_PointerClass, Pointer, ptr_mark, -1, p); + + p->memory.address = ptr->address + offset; + p->memory.size = size; + p->memory.flags = ptr->flags; + p->memory.typeSize = ptr->typeSize; + p->rbParent = self; + + return retval; +} + +/* + * Document-method: + + * call-seq: ptr + offset + * @param [Numeric] offset + * @return [Pointer] + * Return a new {Pointer} from an existing pointer and an +offset+. + */ +static VALUE +ptr_plus(VALUE self, VALUE offset) +{ + AbstractMemory* ptr; + long off = NUM2LONG(offset); + + Data_Get_Struct(self, AbstractMemory, ptr); + + return slice(self, off, ptr->size == LONG_MAX ? LONG_MAX : ptr->size - off); +} + +/* + * call-seq: ptr.slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Pointer] + * Return a new {Pointer} from an existing one. This pointer points on same contents + * from +offset+ for a length +length+. + */ +static VALUE +ptr_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: ptr.inspect + * @return [String] + * Inspect pointer object. + */ +static VALUE +ptr_inspect(VALUE self) +{ + char buf[100]; + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->memory.size != LONG_MAX) { + snprintf(buf, sizeof(buf), "#<%s address=%p size=%lu>", + rb_obj_classname(self), ptr->memory.address, ptr->memory.size); + } else { + snprintf(buf, sizeof(buf), "#<%s address=%p>", rb_obj_classname(self), ptr->memory.address); + } + + return rb_str_new2(buf); +} + +/* + * Document-method: null? + * call-seq: ptr.null? + * @return [Boolean] + * Return +true+ if +self+ is a {NULL} pointer. + */ +static VALUE +ptr_null_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->memory.address == NULL ? Qtrue : Qfalse; +} + +/* + * Document-method: == + * call-seq: ptr == other + * @param [Pointer] other + * Check equality between +self+ and +other+. Equality is tested on {#address}. + */ +static VALUE +ptr_equals(VALUE self, VALUE other) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (NIL_P(other)) { + return ptr->memory.address == NULL ? Qtrue : Qfalse; + } + + return ptr->memory.address == POINTER(other)->address ? Qtrue : Qfalse; +} + +/* + * call-seq: ptr.address + * @return [Numeric] pointer's base address + * Return +self+'s base address (alias: #to_i). + */ +static VALUE +ptr_address(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ULL2NUM((uintptr_t) ptr->memory.address); +} + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Get or set +self+'s endianness + * @overload order + * @return [:big, :little] endianness of +self+ + * @overload order(order) + * @param [Symbol] order endianness to set (+:little+, +:big+ or +:network+). +:big+ and +:network+ + * are synonymous. + * @return [self] + */ +static VALUE +ptr_order(int argc, VALUE* argv, VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } + } + if (order != BYTE_ORDER) { + Pointer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Pointer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + + +/* + * call-seq: ptr.free + * @return [self] + * Free memory pointed by +self+. + */ +static VALUE +ptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + + } else { + VALUE caller = rb_funcall(rb_funcall(Qnil, rb_intern("caller"), 0), rb_intern("first"), 0); + + rb_warn("calling free on non allocated pointer %s from %s", RSTRING_PTR(ptr_inspect(self)), RSTRING_PTR(rb_str_to_str(caller))); + } + + return self; +} + +static VALUE +ptr_type_size(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return INT2NUM(ptr->memory.typeSize); +} + +/* + * call-seq: ptr.autorelease = autorelease + * @param [Boolean] autorelease + * @return [Boolean] +autorelease+ + * Set +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease(VALUE self, VALUE autorelease) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + ptr->autorelease = autorelease == Qtrue; + + return autorelease; +} + +/* + * call-seq: ptr.autorelease? + * @return [Boolean] + * Get +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->autorelease ? Qtrue : Qfalse; +} + + +static void +ptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +static void +ptr_mark(Pointer* ptr) +{ + rb_gc_mark(ptr->rbParent); +} + +void +rbffi_Pointer_Init(VALUE moduleFFI) +{ + VALUE rbNullAddress = ULL2NUM(0); + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Pointer < FFI::AbstractMemory + * Pointer class is used to manage C pointers with ease. A {Pointer} object is defined by his + * {#address} (as a C pointer). It permits additions with an integer for pointer arithmetic. + * + * ==Autorelease + * A pointer object may autorelease his contents when freed (by default). This behaviour may be + * changed with {#autorelease=} method. + */ + rbffi_PointerClass = rb_define_class_under(moduleFFI, "Pointer", ffi_AbstractMemory); + /* + * Document-variable: Pointer + */ + rb_global_variable(&rbffi_PointerClass); + + rb_define_alloc_func(rbffi_PointerClass, ptr_allocate); + rb_define_method(rbffi_PointerClass, "initialize", ptr_initialize, -1); + rb_define_method(rbffi_PointerClass, "initialize_copy", ptr_initialize_copy, 1); + rb_define_method(rbffi_PointerClass, "inspect", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "to_s", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "+", ptr_plus, 1); + rb_define_method(rbffi_PointerClass, "slice", ptr_slice, 2); + rb_define_method(rbffi_PointerClass, "null?", ptr_null_p, 0); + rb_define_method(rbffi_PointerClass, "address", ptr_address, 0); + rb_define_alias(rbffi_PointerClass, "to_i", "address"); + rb_define_method(rbffi_PointerClass, "==", ptr_equals, 1); + rb_define_method(rbffi_PointerClass, "order", ptr_order, -1); + rb_define_method(rbffi_PointerClass, "autorelease=", ptr_autorelease, 1); + rb_define_method(rbffi_PointerClass, "autorelease?", ptr_autorelease_p, 0); + rb_define_method(rbffi_PointerClass, "free", ptr_free, 0); + rb_define_method(rbffi_PointerClass, "type_size", ptr_type_size, 0); + + rbffi_NullPointerSingleton = rb_class_new_instance(1, &rbNullAddress, rbffi_PointerClass); + /* + * NULL pointer + */ + rb_define_const(rbffi_PointerClass, "NULL", rbffi_NullPointerSingleton); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h new file mode 100644 index 0000000..2d86851 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Pointer.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_POINTER_H +#define RBFFI_POINTER_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "AbstractMemory.h" + +extern void rbffi_Pointer_Init(VALUE moduleFFI); +extern VALUE rbffi_Pointer_NewInstance(void* addr); +extern VALUE rbffi_PointerClass; +extern VALUE rbffi_NullPointerSingleton; + +typedef struct Pointer { + AbstractMemory memory; + VALUE rbParent; + char* storage; /* start of malloc area */ + bool autorelease; + bool allocated; +} Pointer; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_POINTER_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c new file mode 100644 index 0000000..c6ccd14 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.c @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "Function.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "MappedType.h" +#include "Struct.h" + +typedef struct InlineArray_ { + VALUE rbMemory; + VALUE rbField; + + AbstractMemory* memory; + StructField* field; + MemoryOp *op; + Type* componentType; + ArrayType* arrayType; + int length; +} InlineArray; + + +static void struct_mark(Struct *); +static void struct_free(Struct *); +static VALUE struct_class_layout(VALUE klass); +static void struct_malloc(Struct* s); +static void inline_array_mark(InlineArray *); +static void store_reference_value(StructField* f, Struct* s, VALUE value); + +VALUE rbffi_StructClass = Qnil; + +VALUE rbffi_StructInlineArrayClass = Qnil; +VALUE rbffi_StructLayoutCharArrayClass = Qnil; + +static ID id_pointer_ivar = 0, id_layout_ivar = 0; +static ID id_get = 0, id_put = 0, id_to_ptr = 0, id_to_s = 0, id_layout = 0; + +static inline char* +memory_address(VALUE self) +{ + return ((AbstractMemory *)DATA_PTR((self)))->address; +} + +static VALUE +struct_allocate(VALUE klass) +{ + Struct* s; + VALUE obj = Data_Make_Struct(klass, Struct, struct_mark, struct_free, s); + + s->rbPointer = Qnil; + s->rbLayout = Qnil; + + return obj; +} + +/* + * call-seq: initialize + * @overload initialize(pointer, *args) + * @param [AbstractMemory] pointer + * @param [Array] args + * @return [self] + */ +static VALUE +struct_initialize(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + VALUE rbPointer = Qnil, rest = Qnil, klass = CLASS_OF(self); + int nargs; + + Data_Get_Struct(self, Struct, s); + + nargs = rb_scan_args(argc, argv, "01*", &rbPointer, &rest); + + /* Call up into ruby code to adjust the layout */ + if (nargs > 1) { + s->rbLayout = rb_funcall2(CLASS_OF(self), id_layout, (int) RARRAY_LEN(rest), RARRAY_PTR(rest)); + } else { + s->rbLayout = struct_class_layout(klass); + } + + if (!rb_obj_is_kind_of(s->rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "Invalid Struct layout"); + } + + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + + if (rbPointer != Qnil) { + s->pointer = MEMORY(rbPointer); + s->rbPointer = rbPointer; + } else { + struct_malloc(s); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +struct_initialize_copy(VALUE self, VALUE other) +{ + Struct* src; + Struct* dst; + + Data_Get_Struct(self, Struct, dst); + Data_Get_Struct(other, Struct, src); + if (dst == src) { + return self; + } + + dst->rbLayout = src->rbLayout; + dst->layout = src->layout; + + /* + * A new MemoryPointer instance is allocated here instead of just calling + * #dup on rbPointer, since the Pointer may not know its length, or may + * be longer than just this struct. + */ + if (src->pointer->address != NULL) { + dst->rbPointer = rbffi_MemoryPointer_NewInstance(1, src->layout->size, false); + dst->pointer = MEMORY(dst->rbPointer); + memcpy(dst->pointer->address, src->pointer->address, src->layout->size); + } else { + dst->rbPointer = src->rbPointer; + dst->pointer = src->pointer; + } + + if (src->layout->referenceFieldCount > 0) { + dst->rbReferences = ALLOC_N(VALUE, dst->layout->referenceFieldCount); + memcpy(dst->rbReferences, src->rbReferences, dst->layout->referenceFieldCount * sizeof(VALUE)); + } + + return self; +} + +static VALUE +struct_class_layout(VALUE klass) +{ + VALUE layout; + if (!rb_ivar_defined(klass, id_layout_ivar)) { + rb_raise(rb_eRuntimeError, "no Struct layout configured for %s", rb_class2name(klass)); + } + + layout = rb_ivar_get(klass, id_layout_ivar); + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "invalid Struct layout for %s", rb_class2name(klass)); + } + + return layout; +} + +static StructLayout* +struct_layout(VALUE self) +{ + Struct* s = (Struct *) DATA_PTR(self); + if (s->layout != NULL) { + return s->layout; + } + + if (s->layout == NULL) { + s->rbLayout = struct_class_layout(CLASS_OF(self)); + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + } + + return s->layout; +} + +static Struct* +struct_validate(VALUE self) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (struct_layout(self) == NULL) { + rb_raise(rb_eRuntimeError, "struct layout == null"); + } + + if (s->pointer == NULL) { + struct_malloc(s); + } + + return s; +} + +static void +struct_malloc(Struct* s) +{ + if (s->rbPointer == Qnil) { + s->rbPointer = rbffi_MemoryPointer_NewInstance(s->layout->size, 1, true); + + } else if (!rb_obj_is_kind_of(s->rbPointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eRuntimeError, "invalid pointer in struct"); + } + + s->pointer = (AbstractMemory *) DATA_PTR(s->rbPointer); +} + +static void +struct_mark(Struct *s) +{ + rb_gc_mark(s->rbPointer); + rb_gc_mark(s->rbLayout); + if (s->rbReferences != NULL) { + rb_gc_mark_locations(&s->rbReferences[0], &s->rbReferences[s->layout->referenceFieldCount]); + } +} + +static void +struct_free(Struct* s) +{ + xfree(s->rbReferences); + xfree(s); +} + + +static void +store_reference_value(StructField* f, Struct* s, VALUE value) +{ + if (unlikely(f->referenceIndex == -1)) { + rb_raise(rb_eRuntimeError, "put_reference_value called for non-reference type"); + return; + } + if (s->rbReferences == NULL) { + int i; + s->rbReferences = ALLOC_N(VALUE, s->layout->referenceFieldCount); + for (i = 0; i < s->layout->referenceFieldCount; ++i) { + s->rbReferences[i] = Qnil; + } + } + + s->rbReferences[f->referenceIndex] = value; +} + + +static StructField * +struct_field(Struct* s, VALUE fieldName) +{ + StructLayout* layout = s->layout; + struct field_cache_entry *p_ce = FIELD_CACHE_LOOKUP(layout, fieldName); + + /* Do a hash lookup only if cache entry is empty or fieldName is unexpected? */ + if (unlikely(!SYMBOL_P(fieldName) || !p_ce->fieldName || p_ce->fieldName != fieldName)) { + VALUE rbField = rb_hash_aref(layout->rbFieldMap, fieldName); + if (unlikely(NIL_P(rbField))) { + VALUE str = rb_funcall2(fieldName, id_to_s, 0, NULL); + rb_raise(rb_eArgError, "No such field '%s'", StringValuePtr(str)); + } + /* Write the retrieved coder to the cache */ + p_ce->fieldName = fieldName; + p_ce->field = (StructField *) DATA_PTR(rbField); + } + + return p_ce->field; +} + +/* + * call-seq: struct[field_name] + * @param field_name field to access + * Acces to a Struct field. + */ +static VALUE +struct_aref(VALUE self, VALUE fieldName) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->get != NULL) { + return (*f->get)(f, s); + + } else if (f->memoryOp != NULL) { + return (*f->memoryOp->get)(s->pointer, f->offset); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to fetch the value */ + return rb_funcall2(rbField, id_get, 1, &s->rbPointer); + } +} + +/* + * call-seq: []=(field_name, value) + * @param field_name field to access + * @param value value to set to +field_name+ + * @return [value] + * Set a field in Struct. + */ +static VALUE +struct_aset(VALUE self, VALUE fieldName, VALUE value) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->put != NULL) { + (*f->put)(f, s, value); + + } else if (f->memoryOp != NULL) { + + (*f->memoryOp->put)(s->pointer, f->offset, value); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to set the value */ + VALUE argv[2]; + argv[0] = s->rbPointer; + argv[1] = value; + rb_funcall2(rbField, id_put, 2, argv); + } + + if (f->referenceRequired) { + store_reference_value(f, s, value); + } + + return value; +} + +/* + * call-seq: pointer= pointer + * @param [AbstractMemory] pointer + * @return [self] + * Make Struct point to +pointer+. + */ +static VALUE +struct_set_pointer(VALUE self, VALUE pointer) +{ + Struct* s; + StructLayout* layout; + AbstractMemory* memory; + + if (!rb_obj_is_kind_of(pointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Pointer or Buffer)", + rb_obj_classname(pointer)); + return Qnil; + } + + + Data_Get_Struct(self, Struct, s); + Data_Get_Struct(pointer, AbstractMemory, memory); + layout = struct_layout(self); + + if ((int) layout->base.ffiType->size > memory->size) { + rb_raise(rb_eArgError, "memory of %ld bytes too small for struct %s (expected at least %ld)", + memory->size, rb_obj_classname(self), (long) layout->base.ffiType->size); + } + + s->pointer = MEMORY(pointer); + s->rbPointer = pointer; + rb_ivar_set(self, id_pointer_ivar, pointer); + + return self; +} + +/* + * call-seq: pointer + * @return [AbstractMemory] + * Get pointer to Struct contents. + */ +static VALUE +struct_get_pointer(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbPointer; +} + +/* + * call-seq: layout= layout + * @param [StructLayout] layout + * @return [self] + * Set the Struct's layout. + */ +static VALUE +struct_set_layout(VALUE self, VALUE layout) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected %s)", + rb_obj_classname(layout), rb_class2name(rbffi_StructLayoutClass)); + return Qnil; + } + + Data_Get_Struct(layout, StructLayout, s->layout); + rb_ivar_set(self, id_layout_ivar, layout); + + return self; +} + +/* + * call-seq: layout + * @return [StructLayout] + * Get the Struct's layout. + */ +static VALUE +struct_get_layout(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbLayout; +} + +/* + * call-seq: null? + * @return [Boolean] + * Test if Struct's pointer is NULL + */ +static VALUE +struct_null_p(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->pointer->address == NULL ? Qtrue : Qfalse; +} + +/* + * (see Pointer#order) + */ +static VALUE +struct_order(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + if (argc == 0) { + return rb_funcall(s->rbPointer, rb_intern("order"), 0); + + } else { + VALUE retval = rb_obj_dup(self); + VALUE rbPointer = rb_funcall2(s->rbPointer, rb_intern("order"), argc, argv); + struct_set_pointer(retval, rbPointer); + + return retval; + } +} + +static VALUE +inline_array_allocate(VALUE klass) +{ + InlineArray* array; + VALUE obj; + + obj = Data_Make_Struct(klass, InlineArray, inline_array_mark, -1, array); + array->rbField = Qnil; + array->rbMemory = Qnil; + + return obj; +} + +static void +inline_array_mark(InlineArray* array) +{ + rb_gc_mark(array->rbField); + rb_gc_mark(array->rbMemory); +} + +/* + * Document-method: FFI::Struct::InlineArray#initialize + * call-seq: initialize(memory, field) + * @param [AbstractMemory] memory + * @param [StructField] field + * @return [self] + */ +static VALUE +inline_array_initialize(VALUE self, VALUE rbMemory, VALUE rbField) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + array->rbMemory = rbMemory; + array->rbField = rbField; + + Data_Get_Struct(rbMemory, AbstractMemory, array->memory); + Data_Get_Struct(rbField, StructField, array->field); + Data_Get_Struct(array->field->rbType, ArrayType, array->arrayType); + Data_Get_Struct(array->arrayType->rbComponentType, Type, array->componentType); + + array->op = get_memory_op(array->componentType); + if (array->op == NULL && array->componentType->nativeType == NATIVE_MAPPED) { + array->op = get_memory_op(((MappedType *) array->componentType)->type); + } + + array->length = array->arrayType->length; + + return self; +} + +/* + * call-seq: size + * @return [Numeric] + * Get size + */ +static VALUE +inline_array_size(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return UINT2NUM(((ArrayType *) array->field->type)->length); +} + +static int +inline_array_offset(InlineArray* array, int index) +{ + if (index < 0 || (index >= array->length && array->length > 0)) { + rb_raise(rb_eIndexError, "index %d out of bounds", index); + } + + return (int) array->field->offset + (index * (int) array->componentType->ffiType->size); +} + +/* + * call-seq: [](index) + * @param [Numeric] index + * @return [Type, Struct] + */ +static VALUE +inline_array_aref(VALUE self, VALUE rbIndex) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + VALUE rbNativeValue = array->op->get(array->memory, + inline_array_offset(array, NUM2INT(rbIndex))); + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + return rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("from_native"), 2, rbNativeValue, Qnil); + } else { + return rbNativeValue; + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + VALUE rbOffset = INT2NUM(inline_array_offset(array, NUM2INT(rbIndex))); + VALUE rbLength = INT2NUM(array->componentType->ffiType->size); + VALUE rbPointer = rb_funcall(array->rbMemory, rb_intern("slice"), 2, rbOffset, rbLength); + + return rb_class_new_instance(1, &rbPointer, ((StructByValue *) array->componentType)->rbStructClass); + } else { + + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(array->arrayType->rbComponentType)); + return Qnil; + } +} + +/* + * call-seq: []=(index, value) + * @param [Numeric] index + * @param [Type, Struct] + * @return [value] + */ +static VALUE +inline_array_aset(VALUE self, VALUE rbIndex, VALUE rbValue) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + rbValue = rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("to_native"), 2, rbValue, Qnil); + } + array->op->put(array->memory, inline_array_offset(array, NUM2INT(rbIndex)), + rbValue); + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + int offset = inline_array_offset(array, NUM2INT(rbIndex)); + Struct* s; + + if (!rb_obj_is_kind_of(rbValue, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "argument not an instance of struct"); + return Qnil; + } + + checkWrite(array->memory); + checkBounds(array->memory, offset, array->componentType->ffiType->size); + + Data_Get_Struct(rbValue, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(array->memory->address + offset, s->pointer->address, array->componentType->ffiType->size); + + } else { + ArrayType* arrayType; + Data_Get_Struct(array->field->rbType, ArrayType, arrayType); + + rb_raise(rb_eArgError, "set not supported for %s", rb_obj_classname(arrayType->rbComponentType)); + return Qnil; + } + + return rbValue; +} + +/* + * call-seq: each + * Yield block for each element of +self+. + */ +static VALUE +inline_array_each(VALUE self) +{ + InlineArray* array; + + int i; + + Data_Get_Struct(self, InlineArray, array); + + for (i = 0; i < array->length; ++i) { + rb_yield(inline_array_aref(self, INT2FIX(i))); + } + + return self; +} + +/* + * call-seq: to_a + * @return [Array] + * Convert +self+ to an array. + */ +static VALUE +inline_array_to_a(VALUE self) +{ + InlineArray* array; + VALUE obj; + int i; + + Data_Get_Struct(self, InlineArray, array); + obj = rb_ary_new2(array->length); + + + for (i = 0; i < array->length; ++i) { + rb_ary_push(obj, inline_array_aref(self, INT2FIX(i))); + } + + return obj; +} + +/* + * Document-method: FFI::StructLayout::CharArray#to_s + * call-seq: to_s + * @return [String] + * Convert +self+ to a string. + */ +static VALUE +inline_array_to_s(VALUE self) +{ + InlineArray* array; + VALUE argv[2]; + + Data_Get_Struct(self, InlineArray, array); + + if (array->componentType->nativeType != NATIVE_INT8 && array->componentType->nativeType != NATIVE_UINT8) { + VALUE dummy = Qnil; + return rb_call_super(0, &dummy); + } + + argv[0] = UINT2NUM(array->field->offset); + argv[1] = UINT2NUM(array->length); + + return rb_funcall2(array->rbMemory, rb_intern("get_string"), 2, argv); +} + +/* + * call-seq: to_ptr + * @return [AbstractMemory] + * Get pointer to +self+ content. + */ +static VALUE +inline_array_to_ptr(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return rb_funcall(array->rbMemory, rb_intern("slice"), 2, + UINT2NUM(array->field->offset), UINT2NUM(array->arrayType->base.ffiType->size)); +} + + +void +rbffi_Struct_Init(VALUE moduleFFI) +{ + VALUE StructClass; + + rbffi_StructLayout_Init(moduleFFI); + + /* + * Document-class: FFI::Struct + * + * A FFI::Struct means to mirror a C struct. + * + * A Struct is defined as: + * class MyStruct < FFI::Struct + * layout :value1, :int, + * :value2, :double + * end + * and is used as: + * my_struct = MyStruct.new + * my_struct[:value1] = 12 + * + * For more information, see http://github.com/ffi/ffi/wiki/Structs + */ + rbffi_StructClass = rb_define_class_under(moduleFFI, "Struct", rb_cObject); + StructClass = rbffi_StructClass; // put on a line alone to help RDoc + rb_global_variable(&rbffi_StructClass); + + /* + * Document-class: FFI::Struct::InlineArray + */ + rbffi_StructInlineArrayClass = rb_define_class_under(rbffi_StructClass, "InlineArray", rb_cObject); + rb_global_variable(&rbffi_StructInlineArrayClass); + + /* + * Document-class: FFI::StructLayout::CharArray < FFI::Struct::InlineArray + */ + rbffi_StructLayoutCharArrayClass = rb_define_class_under(rbffi_StructLayoutClass, "CharArray", + rbffi_StructInlineArrayClass); + rb_global_variable(&rbffi_StructLayoutCharArrayClass); + + + rb_define_alloc_func(StructClass, struct_allocate); + rb_define_method(StructClass, "initialize", struct_initialize, -1); + rb_define_method(StructClass, "initialize_copy", struct_initialize_copy, 1); + rb_define_method(StructClass, "order", struct_order, -1); + + rb_define_alias(rb_singleton_class(StructClass), "alloc_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_inout", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_inout", "new"); + + rb_define_method(StructClass, "pointer", struct_get_pointer, 0); + rb_define_private_method(StructClass, "pointer=", struct_set_pointer, 1); + + rb_define_method(StructClass, "layout", struct_get_layout, 0); + rb_define_private_method(StructClass, "layout=", struct_set_layout, 1); + + rb_define_method(StructClass, "[]", struct_aref, 1); + rb_define_method(StructClass, "[]=", struct_aset, 2); + rb_define_method(StructClass, "null?", struct_null_p, 0); + + rb_include_module(rbffi_StructInlineArrayClass, rb_mEnumerable); + rb_define_alloc_func(rbffi_StructInlineArrayClass, inline_array_allocate); + rb_define_method(rbffi_StructInlineArrayClass, "initialize", inline_array_initialize, 2); + rb_define_method(rbffi_StructInlineArrayClass, "[]", inline_array_aref, 1); + rb_define_method(rbffi_StructInlineArrayClass, "[]=", inline_array_aset, 2); + rb_define_method(rbffi_StructInlineArrayClass, "each", inline_array_each, 0); + rb_define_method(rbffi_StructInlineArrayClass, "size", inline_array_size, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_a", inline_array_to_a, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_ptr", inline_array_to_ptr, 0); + + rb_define_method(rbffi_StructLayoutCharArrayClass, "to_s", inline_array_to_s, 0); + rb_define_alias(rbffi_StructLayoutCharArrayClass, "to_str", "to_s"); + + id_pointer_ivar = rb_intern("@pointer"); + id_layout_ivar = rb_intern("@layout"); + id_layout = rb_intern("layout"); + id_get = rb_intern("get"); + id_put = rb_intern("put"); + id_to_ptr = rb_intern("to_ptr"); + id_to_s = rb_intern("to_s"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h new file mode 100644 index 0000000..eb6edf2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Struct.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCT_H +#define RBFFI_STRUCT_H + +#include "extconf.h" +#include "AbstractMemory.h" +#include "Type.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Struct_Init(VALUE ffiModule); + extern void rbffi_StructLayout_Init(VALUE ffiModule); + typedef struct StructField_ StructField; + typedef struct StructLayout_ StructLayout; + typedef struct Struct_ Struct; + + struct StructField_ { + Type* type; + unsigned int offset; + + int referenceIndex; + + bool referenceRequired; + VALUE rbType; + VALUE rbName; + + VALUE (*get)(StructField* field, Struct* s); + void (*put)(StructField* field, Struct* s, VALUE value); + + MemoryOp* memoryOp; + }; + + struct StructLayout_ { + Type base; + StructField** fields; + int fieldCount; + int size; + int align; + ffi_type** ffiTypes; + + /* + * We use the fieldName's minor 8 Bits as index to a 256 entry cache. + * This avoids full ruby hash lookups for repeated lookups. + */ + #define FIELD_CACHE_LOOKUP(this, sym) ( &(this)->cache_row[((sym) >> 8) & 0xff] ) + + struct field_cache_entry { + VALUE fieldName; + StructField *field; + } cache_row[0x100]; + + /** The number of reference tracking fields in this struct */ + int referenceFieldCount; + + VALUE rbFieldNames; + VALUE rbFieldMap; + VALUE rbFields; + }; + + struct Struct_ { + StructLayout* layout; + AbstractMemory* pointer; + VALUE* rbReferences; + + VALUE rbLayout; + VALUE rbPointer; + }; + + extern VALUE rbffi_StructClass, rbffi_StructLayoutClass; + extern VALUE rbffi_StructLayoutFieldClass, rbffi_StructLayoutFunctionFieldClass; + extern VALUE rbffi_StructLayoutArrayFieldClass; + extern VALUE rbffi_StructInlineArrayClass; + extern VALUE rbffi_StructLayoutCharArrayClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCT_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c new file mode 100644 index 0000000..0d9fb9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Type.h" +#include "StructByValue.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static VALUE sbv_allocate(VALUE); +static VALUE sbv_initialize(VALUE, VALUE); +static void sbv_mark(StructByValue *); +static void sbv_free(StructByValue *); + +VALUE rbffi_StructByValueClass = Qnil; + +static VALUE +sbv_allocate(VALUE klass) +{ + StructByValue* sbv; + + VALUE obj = Data_Make_Struct(klass, StructByValue, sbv_mark, sbv_free, sbv); + + sbv->rbStructClass = Qnil; + sbv->rbStructLayout = Qnil; + sbv->base.nativeType = NATIVE_STRUCT; + + sbv->base.ffiType = xcalloc(1, sizeof(*sbv->base.ffiType)); + sbv->base.ffiType->size = 0; + sbv->base.ffiType->alignment = 1; + sbv->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +static VALUE +sbv_initialize(VALUE self, VALUE rbStructClass) +{ + StructByValue* sbv = NULL; + StructLayout* layout = NULL; + VALUE rbLayout = Qnil; + + rbLayout = rb_ivar_get(rbStructClass, rb_intern("@layout")); + if (!rb_obj_is_instance_of(rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong type in @layout ivar (expected FFI::StructLayout)"); + } + + Data_Get_Struct(rbLayout, StructLayout, layout); + Data_Get_Struct(self, StructByValue, sbv); + sbv->rbStructClass = rbStructClass; + sbv->rbStructLayout = rbLayout; + + /* We can just use everything from the ffi_type directly */ + *sbv->base.ffiType = *layout->base.ffiType; + + return self; +} + +static void +sbv_mark(StructByValue *sbv) +{ + rb_gc_mark(sbv->rbStructClass); + rb_gc_mark(sbv->rbStructLayout); +} + +static void +sbv_free(StructByValue *sbv) +{ + xfree(sbv->base.ffiType); + xfree(sbv); +} + + +static VALUE +sbv_layout(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + return sbv->rbStructLayout; +} + +static VALUE +sbv_struct_class(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + + return sbv->rbStructClass; +} + +void +rbffi_StructByValue_Init(VALUE moduleFFI) +{ + rbffi_StructByValueClass = rb_define_class_under(moduleFFI, "StructByValue", rbffi_TypeClass); + rb_global_variable(&rbffi_StructByValueClass); + rb_define_const(rbffi_TypeClass, "Struct", rbffi_StructByValueClass); + + rb_define_alloc_func(rbffi_StructByValueClass, sbv_allocate); + rb_define_method(rbffi_StructByValueClass, "initialize", sbv_initialize, 1); + rb_define_method(rbffi_StructByValueClass, "layout", sbv_layout, 0); + rb_define_method(rbffi_StructByValueClass, "struct_class", sbv_struct_class, 0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h new file mode 100644 index 0000000..07b2763 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructByValue.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCTBYVALUE_H +#define RBFFI_STRUCTBYVALUE_H + +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct StructByValue_ { + Type base; + VALUE rbStructClass; + VALUE rbStructLayout; +} StructByValue; + +void rbffi_StructByValue_Init(VALUE moduleFFI); + +extern VALUE rbffi_StructByValueClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCTBYVALUE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c new file mode 100644 index 0000000..9b748bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/StructLayout.c @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifndef _MSC_VER +# include +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "Function.h" +#include "MappedType.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static void struct_layout_mark(StructLayout *); +static void struct_layout_free(StructLayout *); +static void struct_field_mark(StructField* ); + +VALUE rbffi_StructLayoutFieldClass = Qnil; +VALUE rbffi_StructLayoutNumberFieldClass = Qnil, rbffi_StructLayoutPointerFieldClass = Qnil; +VALUE rbffi_StructLayoutStringFieldClass = Qnil; +VALUE rbffi_StructLayoutFunctionFieldClass = Qnil, rbffi_StructLayoutArrayFieldClass = Qnil; + +VALUE rbffi_StructLayoutClass = Qnil; + + +static VALUE +struct_field_allocate(VALUE klass) +{ + StructField* field; + VALUE obj; + + obj = Data_Make_Struct(klass, StructField, struct_field_mark, -1, field); + field->rbType = Qnil; + field->rbName = Qnil; + + return obj; +} + +static void +struct_field_mark(StructField* f) +{ + rb_gc_mark(f->rbType); + rb_gc_mark(f->rbName); +} + +/* + * call-seq: initialize(name, offset, type) + * @param [String,Symbol] name + * @param [Fixnum] offset + * @param [FFI::Type] type + * @return [self] + * A new FFI::StructLayout::Field instance. + */ +static VALUE +struct_field_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbOffset = Qnil, rbName = Qnil, rbType = Qnil; + StructField* field; + int nargs; + + Data_Get_Struct(self, StructField, field); + + nargs = rb_scan_args(argc, argv, "3", &rbName, &rbOffset, &rbType); + + if (TYPE(rbName) != T_SYMBOL && TYPE(rbName) != T_STRING) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Symbol/String)", + rb_obj_classname(rbName)); + } + + Check_Type(rbOffset, T_FIXNUM); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected FFI::Type)", + rb_obj_classname(rbType)); + } + + field->offset = NUM2UINT(rbOffset); + field->rbName = (TYPE(rbName) == T_SYMBOL) ? rbName : rb_str_intern(rbName); + field->rbType = rbType; + Data_Get_Struct(field->rbType, Type, field->type); + field->memoryOp = get_memory_op(field->type); + field->referenceIndex = -1; + + switch (field->type->nativeType == NATIVE_MAPPED ? ((MappedType *) field->type)->type->nativeType : field->type->nativeType) { + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: + case NATIVE_POINTER: + field->referenceRequired = true; + break; + + default: + field->referenceRequired = (rb_respond_to(self, rb_intern("reference_required?")) + && RTEST(rb_funcall2(self, rb_intern("reference_required?"), 0, NULL))) + || (rb_respond_to(rbType, rb_intern("reference_required?")) + && RTEST(rb_funcall2(rbType, rb_intern("reference_required?"), 0, NULL))); + break; + } + + return self; +} + +/* + * call-seq: offset + * @return [Numeric] + * Get the field offset. + */ +static VALUE +struct_field_offset(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->offset); +} + +/* + * call-seq: size + * @return [Numeric] + * Get the field size. + */ +static VALUE +struct_field_size(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->size); +} + +/* + * call-seq: alignment + * @return [Numeric] + * Get the field alignment. + */ +static VALUE +struct_field_alignment(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->alignment); +} + +/* + * call-seq: type + * @return [Type] + * Get the field type. + */ +static VALUE +struct_field_type(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + + return field->rbType; +} + +/* + * call-seq: name + * @return [Symbol] + * Get the field name. + */ +static VALUE +struct_field_name(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return field->rbName; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Object] + * Get an object of type {#type} from memory pointed by +pointer+. + */ +static VALUE +struct_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(f->rbType)); + return Qnil; + } + + return (*f->memoryOp->get)(MEMORY(pointer), f->offset); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [Object] value this object must be a kind of {#type} + * @return [self] + * Put an object to memory pointed by +pointer+. + */ +static VALUE +struct_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "put not supported for %s", rb_obj_classname(f->rbType)); + return self; + } + + (*f->memoryOp->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Function] + * Get a {Function} from memory pointed by +pointer+. + */ +static VALUE +function_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + + return rbffi_Function_NewInstance(f->rbType, (*rbffi_AbstractMemoryOps.pointer->get)(MEMORY(pointer), f->offset)); +} + +/* + * call-seq: put(pointer, proc) + * @param [AbstractMemory] pointer pointer to a {Struct} + * @param [Function, Proc] proc + * @return [Function] + * Set a {Function} to memory pointed by +pointer+ as a function. + * + * If a Proc is submitted as +proc+, it is automatically transformed to a {Function}. + */ +static VALUE +function_field_put(VALUE self, VALUE pointer, VALUE proc) +{ + StructField* f; + VALUE value = Qnil; + + Data_Get_Struct(self, StructField, f); + + if (NIL_P(proc) || rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + value = proc; + } else if (rb_obj_is_kind_of(proc, rb_cProc) || rb_respond_to(proc, rb_intern("call"))) { + value = rbffi_Function_ForProc(f->rbType, proc); + } else { + rb_raise(rb_eTypeError, "wrong type (expected Proc or Function)"); + } + + (*rbffi_AbstractMemoryOps.pointer->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +static inline bool +isCharArray(ArrayType* arrayType) +{ + return arrayType->componentType->nativeType == NATIVE_INT8 + || arrayType->componentType->nativeType == NATIVE_UINT8; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [FFI::StructLayout::CharArray, FFI::Struct::InlineArray] + * Get an array from a {Struct}. + */ +static VALUE +array_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + ArrayType* array; + VALUE argv[2]; + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + argv[0] = pointer; + argv[1] = self; + + return rb_class_new_instance(2, argv, isCharArray(array) + ? rbffi_StructLayoutCharArrayClass : rbffi_StructInlineArrayClass); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [String, Array] value +value+ may be a String only if array's type is a kind of +int8+ + * @return [value] + * Set an array in a {Struct}. + */ +static VALUE +array_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + ArrayType* array; + + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + if (isCharArray(array) && rb_obj_is_instance_of(value, rb_cString)) { + VALUE argv[2]; + + argv[0] = INT2FIX(f->offset); + argv[1] = value; + + if (RSTRING_LEN(value) < array->length) { + rb_funcall2(pointer, rb_intern("put_string"), 2, argv); + } else if (RSTRING_LEN(value) == array->length) { + rb_funcall2(pointer, rb_intern("put_bytes"), 2, argv); + } else { + rb_raise(rb_eIndexError, "String is longer (%ld bytes) than the char array (%d bytes)", RSTRING_LEN(value), array->length); + } + } else { +#ifdef notyet + MemoryOp* op; + int count = RARRAY_LEN(value); + int i; + AbstractMemory* memory = MEMORY(pointer); + + if (count > array->length) { + rb_raise(rb_eIndexError, "array too large"); + } + + /* clear the contents in case of a short write */ + checkWrite(memory); + checkBounds(memory, f->offset, f->type->ffiType->size); + if (count < array->length) { + memset(memory->address + f->offset + (count * array->componentType->ffiType->size), + 0, (array->length - count) * array->componentType->ffiType->size); + } + + /* now copy each element in */ + if ((op = get_memory_op(array->componentType)) != NULL) { + + for (i = 0; i < count; ++i) { + (*op->put)(memory, f->offset + (i * array->componentType->ffiType->size), rb_ary_entry(value, i)); + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + + for (i = 0; i < count; ++i) { + VALUE entry = rb_ary_entry(value, i); + Struct* s; + + if (!rb_obj_is_kind_of(entry, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "array element not an instance of FFI::Struct"); + break; + } + + Data_Get_Struct(entry, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(memory->address + f->offset + (i * array->componentType->ffiType->size), + s->pointer->address, array->componentType->ffiType->size); + } + + } else { + rb_raise(rb_eNotImpError, "put not supported for arrays of type %s", rb_obj_classname(array->rbComponentType)); + } +#else + rb_raise(rb_eNotImpError, "cannot set array field"); +#endif + } + + return value; +} + + +static VALUE +struct_layout_allocate(VALUE klass) +{ + StructLayout* layout; + VALUE obj; + + obj = Data_Make_Struct(klass, StructLayout, struct_layout_mark, struct_layout_free, layout); + layout->rbFieldMap = Qnil; + layout->rbFieldNames = Qnil; + layout->rbFields = Qnil; + layout->base.ffiType = xcalloc(1, sizeof(*layout->base.ffiType)); + layout->base.ffiType->size = 0; + layout->base.ffiType->alignment = 0; + layout->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +/* + * call-seq: initialize(fields, size, align) + * @param [Array] fields + * @param [Numeric] size + * @param [Numeric] align + * @return [self] + * A new StructLayout instance. + */ +static VALUE +struct_layout_initialize(VALUE self, VALUE fields, VALUE size, VALUE align) +{ + StructLayout* layout; + ffi_type* ltype; + int i; + + Data_Get_Struct(self, StructLayout, layout); + layout->fieldCount = (int) RARRAY_LEN(fields); + layout->rbFieldMap = rb_hash_new(); + layout->rbFieldNames = rb_ary_new2(layout->fieldCount); + layout->size = (int) FFI_ALIGN(NUM2INT(size), NUM2INT(align)); + layout->align = NUM2INT(align); + layout->fields = xcalloc(layout->fieldCount, sizeof(StructField *)); + layout->ffiTypes = xcalloc(layout->fieldCount + 1, sizeof(ffi_type *)); + layout->rbFields = rb_ary_new2(layout->fieldCount); + layout->referenceFieldCount = 0; + layout->base.ffiType->elements = layout->ffiTypes; + layout->base.ffiType->size = layout->size; + layout->base.ffiType->alignment = layout->align; + + ltype = layout->base.ffiType; + for (i = 0; i < (int) layout->fieldCount; ++i) { + VALUE rbField = rb_ary_entry(fields, i); + VALUE rbName; + StructField* field; + ffi_type* ftype; + + + if (!rb_obj_is_kind_of(rbField, rbffi_StructLayoutFieldClass)) { + rb_raise(rb_eTypeError, "wrong type for field %d.", i); + } + rbName = rb_funcall2(rbField, rb_intern("name"), 0, NULL); + + Data_Get_Struct(rbField, StructField, field); + layout->fields[i] = field; + + if (field->type == NULL || field->type->ffiType == NULL) { + rb_raise(rb_eRuntimeError, "type of field %d not supported", i); + } + + ftype = field->type->ffiType; + if (ftype->size == 0 && i < ((int) layout->fieldCount - 1)) { + rb_raise(rb_eTypeError, "type of field %d has zero size", i); + } + + if (field->referenceRequired) { + field->referenceIndex = layout->referenceFieldCount++; + } + + + layout->ffiTypes[i] = ftype->size > 0 ? ftype : NULL; + rb_hash_aset(layout->rbFieldMap, rbName, rbField); + rb_ary_push(layout->rbFields, rbField); + rb_ary_push(layout->rbFieldNames, rbName); + } + + if (ltype->size == 0) { + rb_raise(rb_eRuntimeError, "Struct size is zero"); + } + + return self; +} + +/* + * call-seq: [](field) + * @param [Symbol] field + * @return [StructLayout::Field] + * Get a field from the layout. + */ +static VALUE +struct_layout_union_bang(VALUE self) +{ + const ffi_type *alignment_types[] = { &ffi_type_sint8, &ffi_type_sint16, &ffi_type_sint32, &ffi_type_sint64, + &ffi_type_float, &ffi_type_double, &ffi_type_longdouble, NULL }; + StructLayout* layout; + ffi_type *t = NULL; + int count, i; + + Data_Get_Struct(self, StructLayout, layout); + + for (i = 0; alignment_types[i] != NULL; ++i) { + if (alignment_types[i]->alignment == layout->align) { + t = (ffi_type *) alignment_types[i]; + break; + } + } + if (t == NULL) { + rb_raise(rb_eRuntimeError, "cannot create libffi union representation for alignment %d", layout->align); + return Qnil; + } + + count = (int) layout->size / (int) t->size; + xfree(layout->ffiTypes); + layout->ffiTypes = xcalloc(count + 1, sizeof(ffi_type *)); + layout->base.ffiType->elements = layout->ffiTypes; + + for (i = 0; i < count; ++i) { + layout->ffiTypes[i] = t; + } + + return self; +} + +static VALUE +struct_layout_aref(VALUE self, VALUE field) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_hash_aref(layout->rbFieldMap, field); +} + +/* + * call-seq: fields + * @return [Array] + * Get fields list. + */ +static VALUE +struct_layout_fields(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +/* + * call-seq: members + * @return [Array] + * Get list of field names. + */ +static VALUE +struct_layout_members(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFieldNames); +} + +/* + * call-seq: to_a + * @return [Array] + * Get an array of fields. + */ +static VALUE +struct_layout_to_a(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +static void +struct_layout_mark(StructLayout *layout) +{ + rb_gc_mark(layout->rbFieldMap); + rb_gc_mark(layout->rbFieldNames); + rb_gc_mark(layout->rbFields); + /* Clear the cache, to be safe from changes of fieldName VALUE by GC.compact. + * TODO: Move cache clearing to compactation callback provided by Ruby-2.7+. + */ + memset(&layout->cache_row, 0, sizeof(layout->cache_row)); +} + +static void +struct_layout_free(StructLayout *layout) +{ + xfree(layout->ffiTypes); + xfree(layout->base.ffiType); + xfree(layout->fields); + xfree(layout); +} + + +void +rbffi_StructLayout_Init(VALUE moduleFFI) +{ + VALUE ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::StructLayout < FFI::Type + * + * This class aims at defining a struct layout. + */ + rbffi_StructLayoutClass = rb_define_class_under(moduleFFI, "StructLayout", ffi_Type); + rb_global_variable(&rbffi_StructLayoutClass); + + /* + * Document-class: FFI::StructLayout::Field + * A field in a {StructLayout}. + */ + rbffi_StructLayoutFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Field", rb_cObject); + rb_global_variable(&rbffi_StructLayoutFieldClass); + + /* + * Document-class: FFI::StructLayout::Number + * A numeric {Field} in a {StructLayout}. + */ + rbffi_StructLayoutNumberFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Number", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutNumberFieldClass); + + /* + * Document-class: FFI::StructLayout::String + * A string {Field} in a {StructLayout}. + */ + rbffi_StructLayoutStringFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "String", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutStringFieldClass); + + /* + * Document-class: FFI::StructLayout::Pointer + * A pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutPointerFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Pointer", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutPointerFieldClass); + + /* + * Document-class: FFI::StructLayout::Function + * A function pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutFunctionFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Function", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutFunctionFieldClass); + + /* + * Document-class: FFI::StructLayout::Array + * An array {Field} in a {StructLayout}. + */ + rbffi_StructLayoutArrayFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Array", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutArrayFieldClass); + + rb_define_alloc_func(rbffi_StructLayoutFieldClass, struct_field_allocate); + rb_define_method(rbffi_StructLayoutFieldClass, "initialize", struct_field_initialize, -1); + rb_define_method(rbffi_StructLayoutFieldClass, "offset", struct_field_offset, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "size", struct_field_size, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "alignment", struct_field_alignment, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "name", struct_field_name, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "type", struct_field_type, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "put", struct_field_put, 2); + rb_define_method(rbffi_StructLayoutFieldClass, "get", struct_field_get, 1); + + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "put", function_field_put, 2); + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "get", function_field_get, 1); + + rb_define_method(rbffi_StructLayoutArrayFieldClass, "get", array_field_get, 1); + rb_define_method(rbffi_StructLayoutArrayFieldClass, "put", array_field_put, 2); + + rb_define_alloc_func(rbffi_StructLayoutClass, struct_layout_allocate); + rb_define_method(rbffi_StructLayoutClass, "initialize", struct_layout_initialize, 3); + rb_define_method(rbffi_StructLayoutClass, "[]", struct_layout_aref, 1); + rb_define_method(rbffi_StructLayoutClass, "fields", struct_layout_fields, 0); + rb_define_method(rbffi_StructLayoutClass, "members", struct_layout_members, 0); + rb_define_method(rbffi_StructLayoutClass, "to_a", struct_layout_to_a, 0); + rb_define_method(rbffi_StructLayoutClass, "__union!", struct_layout_union_bang, 0); + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c new file mode 100644 index 0000000..aab3344 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +#include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif + +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +# include +# include +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include "Thread.h" + +#ifdef _WIN32 +static volatile DWORD frame_thread_key = TLS_OUT_OF_INDEXES; +#else +static pthread_key_t thread_data_key; +struct thread_data { + rbffi_frame_t* frame; +}; +static inline struct thread_data* thread_data_get(void); + +#endif + +rbffi_frame_t* +rbffi_frame_current(void) +{ +#ifdef _WIN32 + return (rbffi_frame_t *) TlsGetValue(frame_thread_key); +#else + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td->frame : NULL; +#endif +} + +void +rbffi_frame_push(rbffi_frame_t* frame) +{ + memset(frame, 0, sizeof(*frame)); + frame->exc = Qnil; + +#ifdef _WIN32 + frame->prev = TlsGetValue(frame_thread_key); + TlsSetValue(frame_thread_key, frame); +#else + frame->td = thread_data_get(); + frame->prev = frame->td->frame; + frame->td->frame = frame; +#endif +} + +void +rbffi_frame_pop(rbffi_frame_t* frame) +{ +#ifdef _WIN32 + TlsSetValue(frame_thread_key, frame->prev); +#else + frame->td->frame = frame->prev; +#endif +} + +#ifndef _WIN32 +static struct thread_data* thread_data_init(void); + +static inline struct thread_data* +thread_data_get(void) +{ + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td : thread_data_init(); +} + +static struct thread_data* +thread_data_init(void) +{ + struct thread_data* td = calloc(1, sizeof(struct thread_data)); + + pthread_setspecific(thread_data_key, td); + + return td; +} + +static void +thread_data_free(void *ptr) +{ + free(ptr); +} +#endif + +void +rbffi_Thread_Init(VALUE moduleFFI) +{ +#ifdef _WIN32 + frame_thread_key = TlsAlloc(); +#else + pthread_key_create(&thread_data_key, thread_data_free); +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h new file mode 100644 index 0000000..46a65c6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Thread.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_THREAD_H +#define RBFFI_THREAD_H + +#ifndef _MSC_VER +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include +#include "extconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifdef _WIN32 +# include +#else +# include +#endif + +typedef struct { +#ifdef _WIN32 + DWORD id; +#else + pthread_t id; +#endif + bool valid; + bool has_gvl; + VALUE exc; +} rbffi_thread_t; + +typedef struct rbffi_frame { +#ifndef _WIN32 + struct thread_data* td; +#endif + struct rbffi_frame* prev; + VALUE exc; +} rbffi_frame_t; + +rbffi_frame_t* rbffi_frame_current(void); +void rbffi_frame_push(rbffi_frame_t* frame); +void rbffi_frame_pop(rbffi_frame_t* frame); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_THREAD_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.c new file mode 100644 index 0000000..bc82857 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif + +#include +#include +#include +#include "rbffi.h" +#include "compat.h" +#include "Types.h" +#include "Type.h" + + +typedef struct BuiltinType_ { + Type type; + char* name; +} BuiltinType; + +static void builtin_type_free(BuiltinType *); + +VALUE rbffi_TypeClass = Qnil; + +static VALUE classBuiltinType = Qnil; +static VALUE moduleNativeType = Qnil; +static VALUE typeMap = Qnil, sizeMap = Qnil; +static ID id_find_type = 0, id_type_size = 0, id_size = 0; + +static VALUE +type_allocate(VALUE klass) +{ + Type* type; + VALUE obj = Data_Make_Struct(klass, Type, NULL, -1, type); + + type->nativeType = -1; + type->ffiType = &ffi_type_void; + + return obj; +} + +/* + * Document-method: initialize + * call-seq: initialize(value) + * @param [Fixnum,Type] value + * @return [self] + */ +static VALUE +type_initialize(VALUE self, VALUE value) +{ + Type* type; + Type* other; + + Data_Get_Struct(self, Type, type); + + if (FIXNUM_P(value)) { + type->nativeType = FIX2INT(value); + } else if (rb_obj_is_kind_of(value, rbffi_TypeClass)) { + Data_Get_Struct(value, Type, other); + type->nativeType = other->nativeType; + type->ffiType = other->ffiType; + } else { + rb_raise(rb_eArgError, "wrong type"); + } + + return self; +} + +/* + * call-seq: type.size + * @return [Fixnum] + * Return type's size, in bytes. + */ +static VALUE +type_size(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->size); +} + +/* + * call-seq: type.alignment + * @return [Fixnum] + * Get Type alignment. + */ +static VALUE +type_alignment(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->alignment); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type} object. + */ +static VALUE +type_inspect(VALUE self) +{ + char buf[100]; + Type *type; + + Data_Get_Struct(self, Type, type); + + snprintf(buf, sizeof(buf), "#<%s:%p size=%d alignment=%d>", + rb_obj_classname(self), type, (int) type->ffiType->size, (int) type->ffiType->alignment); + + return rb_str_new2(buf); +} + +static VALUE +builtin_type_new(VALUE klass, int nativeType, ffi_type* ffiType, const char* name) +{ + BuiltinType* type; + VALUE obj = Qnil; + + obj = Data_Make_Struct(klass, BuiltinType, NULL, builtin_type_free, type); + + type->name = strdup(name); + type->type.nativeType = nativeType; + type->type.ffiType = ffiType; + + return obj; +} + +static void +builtin_type_free(BuiltinType *type) +{ + free(type->name); + xfree(type); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type::Builtin} object. + */ +static VALUE +builtin_type_inspect(VALUE self) +{ + char buf[100]; + BuiltinType *type; + + Data_Get_Struct(self, BuiltinType, type); + snprintf(buf, sizeof(buf), "#<%s:%s size=%d alignment=%d>", + rb_obj_classname(self), type->name, (int) type->type.ffiType->size, type->type.ffiType->alignment); + + return rb_str_new2(buf); +} + +int +rbffi_type_size(VALUE type) +{ + int t = TYPE(type); + + if (t == T_FIXNUM || t == T_BIGNUM) { + return NUM2INT(type); + + } else if (t == T_SYMBOL) { + /* + * Try looking up directly in the type and size maps + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, type)) != Qnil) { + if (rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + Type* type; + Data_Get_Struct(nType, Type, type); + return (int) type->ffiType->size; + + } else if (rb_respond_to(nType, id_size)) { + return NUM2INT(rb_funcall2(nType, id_size, 0, NULL)); + } + } + + /* Not found - call up to the ruby version to resolve */ + return NUM2INT(rb_funcall2(rbffi_FFIModule, id_type_size, 1, &type)); + + } else { + return NUM2INT(rb_funcall2(type, id_size, 0, NULL)); + } +} + +VALUE +rbffi_Type_Lookup(VALUE name) +{ + int t = TYPE(name); + if (t == T_SYMBOL || t == T_STRING) { + /* + * Try looking up directly in the type Map + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, name)) != Qnil && rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + return nType; + } + } else if (rb_obj_is_kind_of(name, rbffi_TypeClass)) { + + return name; + } + + /* Nothing found - let caller handle raising exceptions */ + return Qnil; +} + +void +rbffi_Type_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type + * This class manages C types. + * + * It embbed {FFI::Type::Builtin} objects as constants (for names, + * see {FFI::NativeType}). + */ + rbffi_TypeClass = rb_define_class_under(moduleFFI, "Type", rb_cObject); + + /* + * Document-constant: FFI::TypeDefs + */ + rb_define_const(moduleFFI, "TypeDefs", typeMap = rb_hash_new()); + rb_define_const(moduleFFI, "SizeTypes", sizeMap = rb_hash_new()); + rb_global_variable(&typeMap); + rb_global_variable(&sizeMap); + id_find_type = rb_intern("find_type"); + id_type_size = rb_intern("type_size"); + id_size = rb_intern("size"); + + /* + * Document-class: FFI::Type::Builtin + * Class for Built-in types. + */ + classBuiltinType = rb_define_class_under(rbffi_TypeClass, "Builtin", rbffi_TypeClass); + /* + * Document-module: FFI::NativeType + * This module defines constants for native (C) types. + * + * ==Native type constants + * Native types are defined by constants : + * * INT8, SCHAR, CHAR + * * UINT8, UCHAR + * * INT16, SHORT, SSHORT + * * UINT16, USHORT + * * INT32,, INT, SINT + * * UINT32, UINT + * * INT64, LONG_LONG, SLONG_LONG + * * UINT64, ULONG_LONG + * * LONG, SLONG + * * ULONG + * * FLOAT32, FLOAT + * * FLOAT64, DOUBLE + * * POINTER + * * CALLBACK + * * FUNCTION + * * CHAR_ARRAY + * * BOOL + * * STRING (immutable string, nul terminated) + * * STRUCT (struct-b-value param or result) + * * ARRAY (array type definition) + * * MAPPED (custom native type) + * For function return type only : + * * VOID + * For function argument type only : + * * BUFFER_IN + * * BUFFER_OUT + * * VARARGS (function takes a variable number of arguments) + * + * All these constants are exported to {FFI} module prefixed with "TYPE_". + * They are objets from {FFI::Type::Builtin} class. + */ + moduleNativeType = rb_define_module_under(moduleFFI, "NativeType"); + + /* + * Document-global: FFI::Type + */ + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + rb_global_variable(&moduleNativeType); + + rb_define_alloc_func(rbffi_TypeClass, type_allocate); + rb_define_method(rbffi_TypeClass, "initialize", type_initialize, 1); + rb_define_method(rbffi_TypeClass, "size", type_size, 0); + rb_define_method(rbffi_TypeClass, "alignment", type_alignment, 0); + rb_define_method(rbffi_TypeClass, "inspect", type_inspect, 0); + + /* Make Type::Builtin non-allocatable */ + rb_undef_method(CLASS_OF(classBuiltinType), "new"); + rb_define_method(classBuiltinType, "inspect", builtin_type_inspect, 0); + + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + + /* Define all the builtin types */ + #define T(x, ffiType) do { \ + VALUE t = Qnil; \ + rb_define_const(rbffi_TypeClass, #x, t = builtin_type_new(classBuiltinType, NATIVE_##x, ffiType, #x)); \ + rb_define_const(moduleNativeType, #x, t); \ + rb_define_const(moduleFFI, "TYPE_" #x, t); \ + } while(0) + + #define A(old_type, new_type) do { \ + VALUE t = rb_const_get(rbffi_TypeClass, rb_intern(#old_type)); \ + rb_const_set(rbffi_TypeClass, rb_intern(#new_type), t); \ + } while(0) + + /* + * Document-constant: FFI::Type::Builtin::VOID + */ + T(VOID, &ffi_type_void); + T(INT8, &ffi_type_sint8); + A(INT8, SCHAR); + A(INT8, CHAR); + T(UINT8, &ffi_type_uint8); + A(UINT8, UCHAR); + + T(INT16, &ffi_type_sint16); + A(INT16, SHORT); + A(INT16, SSHORT); + T(UINT16, &ffi_type_uint16); + A(UINT16, USHORT); + T(INT32, &ffi_type_sint32); + A(INT32, INT); + A(INT32, SINT); + T(UINT32, &ffi_type_uint32); + A(UINT32, UINT); + T(INT64, &ffi_type_sint64); + A(INT64, LONG_LONG); + A(INT64, SLONG_LONG); + T(UINT64, &ffi_type_uint64); + A(UINT64, ULONG_LONG); + T(LONG, &ffi_type_slong); + A(LONG, SLONG); + T(ULONG, &ffi_type_ulong); + T(FLOAT32, &ffi_type_float); + A(FLOAT32, FLOAT); + T(FLOAT64, &ffi_type_double); + A(FLOAT64, DOUBLE); + T(LONGDOUBLE, &ffi_type_longdouble); + T(POINTER, &ffi_type_pointer); + T(STRING, &ffi_type_pointer); + T(BUFFER_IN, &ffi_type_pointer); + T(BUFFER_OUT, &ffi_type_pointer); + T(BUFFER_INOUT, &ffi_type_pointer); + T(BOOL, &ffi_type_uchar); + T(VARARGS, &ffi_type_void); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.h new file mode 100644 index 0000000..b81995a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Type.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * This file is part of ruby-ffi. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the Evan Phoenix nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef RBFFI_TYPE_H +#define RBFFI_TYPE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct Type_ Type; + +#include "Types.h" + +struct Type_ { + NativeType nativeType; + ffi_type* ffiType; +}; + +extern VALUE rbffi_TypeClass; +extern VALUE rbffi_Type_Lookup(VALUE type); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.c new file mode 100644 index 0000000..bb757c9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "Pointer.h" +#include "rbffi.h" +#include "Function.h" +#include "StructByValue.h" +#include "Types.h" +#include "Struct.h" +#include "MappedType.h" +#include "MemoryPointer.h" +#include "LongDouble.h" + +static ID id_from_native = 0; + + +VALUE +rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr) +{ + switch (type->nativeType) { + case NATIVE_VOID: + return Qnil; + case NATIVE_INT8: + return INT2NUM((signed char) *(ffi_sarg *) ptr); + case NATIVE_INT16: + return INT2NUM((signed short) *(ffi_sarg *) ptr); + case NATIVE_INT32: + return INT2NUM((signed int) *(ffi_sarg *) ptr); + case NATIVE_LONG: + return LONG2NUM((signed long) *(ffi_sarg *) ptr); + case NATIVE_INT64: + return LL2NUM(*(signed long long *) ptr); + + case NATIVE_UINT8: + return UINT2NUM((unsigned char) *(ffi_arg *) ptr); + case NATIVE_UINT16: + return UINT2NUM((unsigned short) *(ffi_arg *) ptr); + case NATIVE_UINT32: + return UINT2NUM((unsigned int) *(ffi_arg *) ptr); + case NATIVE_ULONG: + return ULONG2NUM((unsigned long) *(ffi_arg *) ptr); + case NATIVE_UINT64: + return ULL2NUM(*(unsigned long long *) ptr); + + case NATIVE_FLOAT32: + return rb_float_new(*(float *) ptr); + case NATIVE_FLOAT64: + return rb_float_new(*(double *) ptr); + + case NATIVE_LONGDOUBLE: + return rbffi_longdouble_new(*(long double *) ptr); + + case NATIVE_STRING: + return (*(void **) ptr != NULL) ? rb_str_new2(*(char **) ptr) : Qnil; + case NATIVE_POINTER: + return rbffi_Pointer_NewInstance(*(void **) ptr); + case NATIVE_BOOL: + return ((unsigned char) *(ffi_arg *) ptr) ? Qtrue : Qfalse; + + case NATIVE_FUNCTION: + case NATIVE_CALLBACK: { + return *(void **) ptr != NULL + ? rbffi_Function_NewInstance(rbType, rbffi_Pointer_NewInstance(*(void **) ptr)) + : Qnil; + } + + case NATIVE_STRUCT: { + StructByValue* sbv = (StructByValue *)type; + AbstractMemory* mem; + VALUE rbMemory = rbffi_MemoryPointer_NewInstance(1, sbv->base.ffiType->size, false); + + Data_Get_Struct(rbMemory, AbstractMemory, mem); + memcpy(mem->address, ptr, sbv->base.ffiType->size); + RB_GC_GUARD(rbMemory); + RB_GC_GUARD(rbType); + + return rb_class_new_instance(1, &rbMemory, sbv->rbStructClass); + } + + case NATIVE_MAPPED: { + /* + * For mapped types, first convert to the real native type, then upcall to + * ruby to convert to the expected return type + */ + MappedType* m = (MappedType *) type; + VALUE values[2], rbReturnValue; + + values[0] = rbffi_NativeValue_ToRuby(m->type, m->rbType, ptr); + values[1] = Qnil; + + + rbReturnValue = rb_funcall2(m->rbConverter, id_from_native, 2, values); + RB_GC_GUARD(values[0]); + RB_GC_GUARD(rbType); + + return rbReturnValue; + } + + default: + rb_raise(rb_eRuntimeError, "Unknown type: %d", type->nativeType); + return Qnil; + } +} + +void +rbffi_Types_Init(VALUE moduleFFI) +{ + id_from_native = rb_intern("from_native"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.h new file mode 100644 index 0000000..0d4806f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Types.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_TYPES_H +#define RBFFI_TYPES_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + NATIVE_VOID, + NATIVE_INT8, + NATIVE_UINT8, + NATIVE_INT16, + NATIVE_UINT16, + NATIVE_INT32, + NATIVE_UINT32, + NATIVE_INT64, + NATIVE_UINT64, + NATIVE_LONG, + NATIVE_ULONG, + NATIVE_FLOAT32, + NATIVE_FLOAT64, + NATIVE_LONGDOUBLE, + NATIVE_POINTER, + NATIVE_CALLBACK, + NATIVE_FUNCTION, + NATIVE_BUFFER_IN, + NATIVE_BUFFER_OUT, + NATIVE_BUFFER_INOUT, + NATIVE_CHAR_ARRAY, + NATIVE_BOOL, + + /** An immutable string. Nul terminated, but only copies in to the native function */ + NATIVE_STRING, + + /** The function takes a variable number of arguments */ + NATIVE_VARARGS, + + /** Struct-by-value param or result */ + NATIVE_STRUCT, + + /** An array type definition */ + NATIVE_ARRAY, + + /** Custom native type */ + NATIVE_MAPPED, +} NativeType; + +#include +#include "Type.h" + +VALUE rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr); +void rbffi_Types_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPES_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c new file mode 100644 index 0000000..aafe072 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/Variadic.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include + +#include +#ifndef _MSC_VER +# include +# include +#else +# include "win32/stdbool.h" +# include "win32/stdint.h" +#endif +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "MethodHandle.h" +#include "Call.h" +#include "Thread.h" + +typedef struct VariadicInvoker_ { + VALUE rbAddress; + VALUE rbReturnType; + VALUE rbEnums; + + Type* returnType; + ffi_abi abi; + void* function; + int paramCount; + bool blocking; +} VariadicInvoker; + + +static VALUE variadic_allocate(VALUE klass); +static VALUE variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, + VALUE rbReturnType, VALUE options); +static void variadic_mark(VariadicInvoker *); + +static VALUE classVariadicInvoker = Qnil; + + +static VALUE +variadic_allocate(VALUE klass) +{ + VariadicInvoker *invoker; + VALUE obj = Data_Make_Struct(klass, VariadicInvoker, variadic_mark, -1, invoker); + + invoker->rbAddress = Qnil; + invoker->rbEnums = Qnil; + invoker->rbReturnType = Qnil; + invoker->blocking = false; + + return obj; +} + +static void +variadic_mark(VariadicInvoker *invoker) +{ + rb_gc_mark(invoker->rbEnums); + rb_gc_mark(invoker->rbAddress); + rb_gc_mark(invoker->rbReturnType); +} + +static VALUE +variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, VALUE rbReturnType, VALUE options) +{ + VariadicInvoker* invoker = NULL; + VALUE retval = Qnil; + VALUE convention = Qnil; + VALUE fixed = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i; + + Check_Type(options, T_HASH); + convention = rb_hash_aref(options, ID2SYM(rb_intern("convention"))); + + Data_Get_Struct(self, VariadicInvoker, invoker); + invoker->rbEnums = rb_hash_aref(options, ID2SYM(rb_intern("enums"))); + invoker->rbAddress = rbFunction; + invoker->function = rbffi_AbstractMemory_Cast(rbFunction, rbffi_PointerClass)->address; + invoker->blocking = RTEST(rb_hash_aref(options, ID2SYM(rb_intern("blocking")))); + +#if defined(X86_WIN32) + rbConventionStr = rb_funcall2(convention, rb_intern("to_s"), 0, NULL); + invoker->abi = (RTEST(convention) && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + invoker->abi = FFI_DEFAULT_ABI; +#endif + + invoker->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(invoker->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + Data_Get_Struct(rbReturnType, Type, invoker->returnType); + + invoker->paramCount = -1; + + fixed = rb_ary_new2(RARRAY_LEN(rbParameterTypes) - 1); + for (i = 0; i < RARRAY_LEN(rbParameterTypes); ++i) { + VALUE entry = rb_ary_entry(rbParameterTypes, i); + VALUE rbType = rbffi_Type_Lookup(entry); + Type* type; + + if (!RTEST(rbType)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + Data_Get_Struct(rbType, Type, type); + if (type->nativeType != NATIVE_VARARGS) { + rb_ary_push(fixed, entry); + } + } + /* + * @fixed and @type_map are used by the parameter mangling ruby code + */ + rb_iv_set(self, "@fixed", fixed); + rb_iv_set(self, "@type_map", rb_hash_aref(options, ID2SYM(rb_intern("type_map")))); + + return retval; +} + +static VALUE +variadic_invoke(VALUE self, VALUE parameterTypes, VALUE parameterValues) +{ + VariadicInvoker* invoker; + FFIStorage* params; + void* retval; + ffi_cif cif; + void** ffiValues; + ffi_type** ffiParamTypes; + ffi_type* ffiReturnType; + Type** paramTypes; + VALUE* argv; + int paramCount = 0, fixedCount = 0, i; + ffi_status ffiStatus; + rbffi_frame_t frame = { 0 }; + + Check_Type(parameterTypes, T_ARRAY); + Check_Type(parameterValues, T_ARRAY); + + Data_Get_Struct(self, VariadicInvoker, invoker); + paramCount = (int) RARRAY_LEN(parameterTypes); + paramTypes = ALLOCA_N(Type *, paramCount); + ffiParamTypes = ALLOCA_N(ffi_type *, paramCount); + params = ALLOCA_N(FFIStorage, paramCount); + ffiValues = ALLOCA_N(void*, paramCount); + argv = ALLOCA_N(VALUE, paramCount); + retval = alloca(MAX(invoker->returnType->ffiType->size, FFI_SIZEOF_ARG)); + + for (i = 0; i < paramCount; ++i) { + VALUE rbType = rb_ary_entry(parameterTypes, i); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong type. Expected (FFI::Type)"); + } + Data_Get_Struct(rbType, Type, paramTypes[i]); + + switch (paramTypes[i]->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("INT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("UINT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + case NATIVE_FLOAT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("DOUBLE")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + default: + break; + } + + + ffiParamTypes[i] = paramTypes[i]->ffiType; + if (ffiParamTypes[i] == NULL) { + rb_raise(rb_eArgError, "Invalid parameter type #%x", paramTypes[i]->nativeType); + } + argv[i] = rb_ary_entry(parameterValues, i); + } + + ffiReturnType = invoker->returnType->ffiType; + if (ffiReturnType == NULL) { + rb_raise(rb_eArgError, "Invalid return type"); + } + + /*Get the number of fixed args from @fixed array*/ + fixedCount = RARRAY_LEN(rb_iv_get(self, "@fixed")); + +#ifdef HAVE_FFI_PREP_CIF_VAR + ffiStatus = ffi_prep_cif_var(&cif, invoker->abi, fixedCount, paramCount, ffiReturnType, ffiParamTypes); +#else + ffiStatus = ffi_prep_cif(&cif, invoker->abi, paramCount, ffiReturnType, ffiParamTypes); +#endif + switch (ffiStatus) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + rbffi_SetupCallParams(paramCount, argv, -1, paramTypes, params, + ffiValues, NULL, 0, invoker->rbEnums); + + rbffi_frame_push(&frame); + + if(unlikely(invoker->blocking)) { + rbffi_blocking_call_t* bc; + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->function = invoker->function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + bc->cif = cif; + + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + } else { + ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); + } + + rbffi_frame_pop(&frame); + + rbffi_save_errno(); + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + return rbffi_NativeValue_ToRuby(invoker->returnType, invoker->rbReturnType, retval); +} + + +void +rbffi_Variadic_Init(VALUE moduleFFI) +{ + classVariadicInvoker = rb_define_class_under(moduleFFI, "VariadicInvoker", rb_cObject); + rb_global_variable(&classVariadicInvoker); + + rb_define_alloc_func(classVariadicInvoker, variadic_allocate); + + rb_define_method(classVariadicInvoker, "initialize", variadic_initialize, 4); + rb_define_method(classVariadicInvoker, "invoke", variadic_invoke, 2); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/compat.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/compat.h new file mode 100644 index 0000000..3f7bbae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/compat.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_COMPAT_H +#define RBFFI_COMPAT_H + +#include + +#ifndef RARRAY_LEN +# define RARRAY_LEN(ary) RARRAY(ary)->len +#endif + +#ifndef RARRAY_PTR +# define RARRAY_PTR(ary) RARRAY(ary)->ptr +#endif + +#ifndef RSTRING_LEN +# define RSTRING_LEN(s) RSTRING(s)->len +#endif + +#ifndef RSTRING_PTR +# define RSTRING_PTR(s) RSTRING(s)->ptr +#endif + +#ifndef NUM2ULL +# define NUM2ULL(x) rb_num2ull((VALUE)x) +#endif + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef __GNUC__ +# define likely(x) __builtin_expect((x), 1) +# define unlikely(x) __builtin_expect((x), 0) +#else +# define likely(x) (x) +# define unlikely(x) (x) +#endif + +#ifdef _MSC_VER +#define ffi_type_longdouble ffi_type_double +#endif + +#ifndef MAX +# define MAX(a, b) ((a) < (b) ? (b) : (a)) +#endif +#ifndef MIN +# define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#ifndef RB_GC_GUARD +# define RB_GC_GUARD(x) (x) +#endif + +#endif /* RBFFI_COMPAT_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h new file mode 100644 index 0000000..6f70c69 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.h @@ -0,0 +1,5 @@ +#ifndef EXTCONF_H +#define EXTCONF_H +#define HAVE_FFI_PREP_CIF_VAR 1 +#define USE_INTERNAL_LIBFFI 1 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb new file mode 100644 index 0000000..6dd28aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/extconf.rb @@ -0,0 +1,82 @@ +#!/usr/bin/env ruby + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'mkmf' + require 'rbconfig' + + def system_libffi_usable? + # We need pkg_config or ffi.h + libffi_ok = pkg_config("libffi") || + have_header("ffi.h") || + find_header("ffi.h", "/usr/local/include", "/usr/include/ffi", + "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/ffi", + "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/ffi") || + (find_header("ffi.h", `xcrun --sdk macosx --show-sdk-path`.strip + "/usr/include/ffi") rescue false) + + # Ensure we can link to ffi_prep_closure_loc + libffi_ok &&= have_library("ffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi-8", "ffi_prep_closure_loc", [ "ffi.h" ]) + + if RbConfig::CONFIG['host_os'] =~ /mswin/ + have_library('libffi_convenience') + have_library('shlwapi') + end + + libffi_ok + end + + dir_config("ffi_c") + + # recent versions of ruby add restrictive ansi and warning flags on a whim - kill them all + $warnflags = '' + $CFLAGS.gsub!(/[\s+]-ansi/, '') + $CFLAGS.gsub!(/[\s+]-std=[^\s]+/, '') + # solaris 10 needs -c99 for + $CFLAGS << " -std=c99" if RbConfig::CONFIG['host_os'] =~ /solaris(!?2\.11)/ + + # Check whether we use system libffi + system_libffi = enable_config('system-libffi', :try) + + if system_libffi == :try + system_libffi = ENV['RUBY_CC_VERSION'].nil? && system_libffi_usable? + elsif system_libffi + abort "system libffi is not usable" unless system_libffi_usable? + end + + if system_libffi + have_func('ffi_prep_cif_var') + $defs << "-DHAVE_RAW_API" if have_func("ffi_raw_call") && have_func("ffi_prep_raw_closure") + else + $defs << "-DHAVE_FFI_PREP_CIF_VAR" + $defs << "-DUSE_INTERNAL_LIBFFI" + end + + $defs << "-DHAVE_EXTCONF_H" if $defs.empty? # needed so create_header works + + create_header + create_makefile("ffi_c") + + unless system_libffi + File.open("Makefile", "a") do |mf| + mf.puts "LIBFFI_HOST=--host=#{RbConfig::CONFIG['host_alias']}" if RbConfig::CONFIG.has_key?("host_alias") + if RbConfig::CONFIG['host_os'].downcase =~ /darwin/ + mf.puts "include ${srcdir}/libffi.darwin.mk" + elsif RbConfig::CONFIG['host_os'].downcase =~ /bsd/ + mf.puts '.include "${srcdir}/libffi.bsd.mk"' + elsif RbConfig::CONFIG['host_os'].downcase =~ /mswin64/ + mf.puts '!include $(srcdir)/libffi.vc64.mk' + elsif RbConfig::CONFIG['host_os'].downcase =~ /mswin32/ + mf.puts '!include $(srcdir)/libffi.vc.mk' + else + mf.puts "include ${srcdir}/libffi.mk" + end + end + end + +else + File.open("Makefile", "w") do |mf| + mf.puts "# Dummy makefile for non-mri rubies" + mf.puts "all install::\n" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c new file mode 100644 index 0000000..22ea3bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/ffi.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Struct.h" +#include "StructByValue.h" +#include "DynamicLibrary.h" +#include "Platform.h" +#include "Types.h" +#include "LastError.h" +#include "Function.h" +#include "ClosurePool.h" +#include "MethodHandle.h" +#include "Call.h" +#include "ArrayType.h" +#include "MappedType.h" + +void Init_ffi_c(void); + +VALUE rbffi_FFIModule = Qnil; + +static VALUE moduleFFI = Qnil; + +void +Init_ffi_c(void) +{ + /* + * Document-module: FFI + * + * This module embbed type constants from {FFI::NativeType}. + */ + rbffi_FFIModule = moduleFFI = rb_define_module("FFI"); + rb_global_variable(&rbffi_FFIModule); + + rbffi_Thread_Init(rbffi_FFIModule); + + /* FFI::Type needs to be initialized before most other classes */ + rbffi_Type_Init(moduleFFI); + + rbffi_ArrayType_Init(moduleFFI); + rbffi_LastError_Init(moduleFFI); + rbffi_Call_Init(moduleFFI); + rbffi_ClosurePool_Init(moduleFFI); + rbffi_MethodHandle_Init(moduleFFI); + rbffi_Platform_Init(moduleFFI); + rbffi_AbstractMemory_Init(moduleFFI); + rbffi_Pointer_Init(moduleFFI); + rbffi_Function_Init(moduleFFI); + rbffi_MemoryPointer_Init(moduleFFI); + rbffi_Buffer_Init(moduleFFI); + rbffi_StructByValue_Init(moduleFFI); + rbffi_Struct_Init(moduleFFI); + rbffi_DynamicLibrary_Init(moduleFFI); + rbffi_Variadic_Init(moduleFFI); + rbffi_Types_Init(moduleFFI); + rbffi_MappedType_Init(moduleFFI); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la new file mode 120000 index 0000000..0727452 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la @@ -0,0 +1 @@ +../libffi_convenience.la \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/Makefile new file mode 100644 index 0000000..bf061c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/Makefile @@ -0,0 +1,1796 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.3.0 +host_triplet = arm-apple-darwin21.3.0 +target_triplet = arm-apple-darwin21.3.0 +#am__append_1 = doc +#am__append_2 = src/debug.c +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +#am__append_3 = -DFFI_DEBUG +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(noinst_HEADERS) $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = fficonfig.h +CONFIG_CLEAN_FILES = libffi.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ + "$(DESTDIR)$(pkgconfigdir)" +LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am__libffi_la_SOURCES_DIST = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c src/debug.c +am__dirstamp = $(am__leading_dot)dirstamp +#am__objects_1 = src/debug.lo +am_libffi_la_OBJECTS = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) +AM_V_lt = $(am__v_lt_$(V)) +am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY)) +am__v_lt_0 = --silent +am__v_lt_1 = +libffi_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libffi_la_LDFLAGS) $(LDFLAGS) -o $@ +am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) +am__libffi_convenience_la_SOURCES_DIST = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c src/debug.c +am__objects_2 = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +am_libffi_convenience_la_OBJECTS = $(am__objects_2) +nodist_libffi_convenience_la_OBJECTS = +libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \ + $(nodist_libffi_convenience_la_OBJECTS) +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I. -I$(srcdir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) +LTCPPASCOMPILE = $(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CCASFLAGS) $(CCASFLAGS) +AM_V_CPPAS = $(am__v_CPPAS_$(V)) +am__v_CPPAS_ = $(am__v_CPPAS_$(AM_DEFAULT_VERBOSITY)) +am__v_CPPAS_0 = @echo " CPPAS " $@; +am__v_CPPAS_1 = +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_$(V)) +am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_$(V)) +am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libffi_la_SOURCES) $(EXTRA_libffi_la_SOURCES) \ + $(libffi_convenience_la_SOURCES) \ + $(EXTRA_libffi_convenience_la_SOURCES) \ + $(nodist_libffi_convenience_la_SOURCES) +DIST_SOURCES = $(am__libffi_la_SOURCES_DIST) \ + $(EXTRA_libffi_la_SOURCES) \ + $(am__libffi_convenience_la_SOURCES_DIST) \ + $(EXTRA_libffi_convenience_la_SOURCES) +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(pkgconfig_DATA) +HEADERS = $(noinst_HEADERS) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope distdir dist dist-all distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ + $(LISP)fficonfig.h.in +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +CSCOPE = cscope +DIST_SUBDIRS = include testsuite man doc +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fficonfig.h.in \ + $(srcdir)/libffi.pc.in compile config.guess config.sub depcomp \ + install-sh ltmain.sh missing +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21 +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = gcc -fdeclspec +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.3.0 +build_alias = +build_cpu = arm +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.3.0 +host_alias = +host_cpu = arm +host_os = darwin21.3.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.3.0 +target_alias = +target_cpu = arm +target_os = darwin21.3.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = +top_builddir = . +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign subdir-objects +ACLOCAL_AMFLAGS = -I m4 +SUBDIRS = include testsuite man $(am__append_1) +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) +MAKEOVERRIDES = +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la +libffi_la_SOURCES = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c $(am__append_2) +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +libffi_la_LIBADD = $(TARGET_OBJ) +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) +AM_CFLAGS = $(am__append_3) +libffi_version_script = +##libffi_version_script = -Wl,--version-script,libffi.map +##libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = +##libffi_version_dep = libffi.map +##libffi_version_dep = libffi.map-sun +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) +all: fficonfig.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +.SUFFIXES: .S .c .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: # $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +fficonfig.h: stamp-h1 + @test -f $@ || rm -f stamp-h1 + @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + +stamp-h1: $(srcdir)/fficonfig.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status fficonfig.h +$(srcdir)/fficonfig.h.in: # $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f fficonfig.h stamp-h1 +libffi.pc: $(top_builddir)/config.status $(srcdir)/libffi.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \ + } + +uninstall-toolexeclibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \ + done + +clean-toolexeclibLTLIBRARIES: + -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES) + @list='$(toolexeclib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } +src/$(am__dirstamp): + @$(MKDIR_P) src + @: > src/$(am__dirstamp) +src/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/$(DEPDIR) + @: > src/$(DEPDIR)/$(am__dirstamp) +src/prep_cif.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/types.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/java_raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/closures.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/aarch64/$(am__dirstamp): + @$(MKDIR_P) src/aarch64 + @: > src/aarch64/$(am__dirstamp) +src/aarch64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/aarch64/$(DEPDIR) + @: > src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/win64_armasm.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/alpha/$(am__dirstamp): + @$(MKDIR_P) src/alpha + @: > src/alpha/$(am__dirstamp) +src/alpha/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/alpha/$(DEPDIR) + @: > src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/ffi.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/osf.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/arc/$(am__dirstamp): + @$(MKDIR_P) src/arc + @: > src/arc/$(am__dirstamp) +src/arc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arc/$(DEPDIR) + @: > src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/ffi.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/arcompact.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arm/$(am__dirstamp): + @$(MKDIR_P) src/arm + @: > src/arm/$(am__dirstamp) +src/arm/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arm/$(DEPDIR) + @: > src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/ffi.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv_msvc_arm32.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/avr32/$(am__dirstamp): + @$(MKDIR_P) src/avr32 + @: > src/avr32/$(am__dirstamp) +src/avr32/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/avr32/$(DEPDIR) + @: > src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/ffi.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/sysv.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/bfin/$(am__dirstamp): + @$(MKDIR_P) src/bfin + @: > src/bfin/$(am__dirstamp) +src/bfin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/bfin/$(DEPDIR) + @: > src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/ffi.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/sysv.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/cris/$(am__dirstamp): + @$(MKDIR_P) src/cris + @: > src/cris/$(am__dirstamp) +src/cris/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/cris/$(DEPDIR) + @: > src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/ffi.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/sysv.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/frv/$(am__dirstamp): + @$(MKDIR_P) src/frv + @: > src/frv/$(am__dirstamp) +src/frv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/frv/$(DEPDIR) + @: > src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/ffi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/eabi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/ia64/$(am__dirstamp): + @$(MKDIR_P) src/ia64 + @: > src/ia64/$(am__dirstamp) +src/ia64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/ia64/$(DEPDIR) + @: > src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/ffi.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/unix.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/m32r/$(am__dirstamp): + @$(MKDIR_P) src/m32r + @: > src/m32r/$(am__dirstamp) +src/m32r/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m32r/$(DEPDIR) + @: > src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/ffi.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/sysv.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m68k/$(am__dirstamp): + @$(MKDIR_P) src/m68k + @: > src/m68k/$(am__dirstamp) +src/m68k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m68k/$(DEPDIR) + @: > src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/ffi.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/sysv.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m88k/$(am__dirstamp): + @$(MKDIR_P) src/m88k + @: > src/m88k/$(am__dirstamp) +src/m88k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m88k/$(DEPDIR) + @: > src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/ffi.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/obsd.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/metag/$(am__dirstamp): + @$(MKDIR_P) src/metag + @: > src/metag/$(am__dirstamp) +src/metag/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/metag/$(DEPDIR) + @: > src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/ffi.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/sysv.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/microblaze/$(am__dirstamp): + @$(MKDIR_P) src/microblaze + @: > src/microblaze/$(am__dirstamp) +src/microblaze/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/microblaze/$(DEPDIR) + @: > src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/ffi.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/sysv.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/mips/$(am__dirstamp): + @$(MKDIR_P) src/mips + @: > src/mips/$(am__dirstamp) +src/mips/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/mips/$(DEPDIR) + @: > src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/ffi.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/o32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/n32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/moxie/$(am__dirstamp): + @$(MKDIR_P) src/moxie + @: > src/moxie/$(am__dirstamp) +src/moxie/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/moxie/$(DEPDIR) + @: > src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/ffi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/eabi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/nios2/$(am__dirstamp): + @$(MKDIR_P) src/nios2 + @: > src/nios2/$(am__dirstamp) +src/nios2/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/nios2/$(DEPDIR) + @: > src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/ffi.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/sysv.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/or1k/$(am__dirstamp): + @$(MKDIR_P) src/or1k + @: > src/or1k/$(am__dirstamp) +src/or1k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/or1k/$(DEPDIR) + @: > src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/ffi.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/sysv.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/pa/$(am__dirstamp): + @$(MKDIR_P) src/pa + @: > src/pa/$(am__dirstamp) +src/pa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/pa/$(DEPDIR) + @: > src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/ffi.lo: src/pa/$(am__dirstamp) src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/linux.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/hpux32.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/powerpc/$(am__dirstamp): + @$(MKDIR_P) src/powerpc + @: > src/powerpc/$(am__dirstamp) +src/powerpc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/powerpc/$(DEPDIR) + @: > src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ppc_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/riscv/$(am__dirstamp): + @$(MKDIR_P) src/riscv + @: > src/riscv/$(am__dirstamp) +src/riscv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/riscv/$(DEPDIR) + @: > src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/ffi.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/sysv.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/s390/$(am__dirstamp): + @$(MKDIR_P) src/s390 + @: > src/s390/$(am__dirstamp) +src/s390/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/s390/$(DEPDIR) + @: > src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/ffi.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/sysv.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/sh/$(am__dirstamp): + @$(MKDIR_P) src/sh + @: > src/sh/$(am__dirstamp) +src/sh/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh/$(DEPDIR) + @: > src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/ffi.lo: src/sh/$(am__dirstamp) src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/sysv.lo: src/sh/$(am__dirstamp) \ + src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh64/$(am__dirstamp): + @$(MKDIR_P) src/sh64 + @: > src/sh64/$(am__dirstamp) +src/sh64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh64/$(DEPDIR) + @: > src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/ffi.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/sysv.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sparc/$(am__dirstamp): + @$(MKDIR_P) src/sparc + @: > src/sparc/$(am__dirstamp) +src/sparc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sparc/$(DEPDIR) + @: > src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi64.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v8.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v9.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/tile/$(am__dirstamp): + @$(MKDIR_P) src/tile + @: > src/tile/$(am__dirstamp) +src/tile/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/tile/$(DEPDIR) + @: > src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/ffi.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/tile.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/vax/$(am__dirstamp): + @$(MKDIR_P) src/vax + @: > src/vax/$(am__dirstamp) +src/vax/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/vax/$(DEPDIR) + @: > src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/ffi.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/elfbsd.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/x86/$(am__dirstamp): + @$(MKDIR_P) src/x86 + @: > src/x86/$(am__dirstamp) +src/x86/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/x86/$(DEPDIR) + @: > src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffiw64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/unix64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/xtensa/$(am__dirstamp): + @$(MKDIR_P) src/xtensa + @: > src/xtensa/$(am__dirstamp) +src/xtensa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/xtensa/$(DEPDIR) + @: > src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/ffi.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/sysv.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) + +libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES) $(EXTRA_libffi_la_DEPENDENCIES) + $(AM_V_CCLD)$(libffi_la_LINK) -rpath $(toolexeclibdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS) + +libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES) $(EXTRA_libffi_convenience_la_DEPENDENCIES) + $(AM_V_CCLD)$(LINK) $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f src/*.$(OBJEXT) + -rm -f src/*.lo + -rm -f src/aarch64/*.$(OBJEXT) + -rm -f src/aarch64/*.lo + -rm -f src/alpha/*.$(OBJEXT) + -rm -f src/alpha/*.lo + -rm -f src/arc/*.$(OBJEXT) + -rm -f src/arc/*.lo + -rm -f src/arm/*.$(OBJEXT) + -rm -f src/arm/*.lo + -rm -f src/avr32/*.$(OBJEXT) + -rm -f src/avr32/*.lo + -rm -f src/bfin/*.$(OBJEXT) + -rm -f src/bfin/*.lo + -rm -f src/cris/*.$(OBJEXT) + -rm -f src/cris/*.lo + -rm -f src/frv/*.$(OBJEXT) + -rm -f src/frv/*.lo + -rm -f src/ia64/*.$(OBJEXT) + -rm -f src/ia64/*.lo + -rm -f src/m32r/*.$(OBJEXT) + -rm -f src/m32r/*.lo + -rm -f src/m68k/*.$(OBJEXT) + -rm -f src/m68k/*.lo + -rm -f src/m88k/*.$(OBJEXT) + -rm -f src/m88k/*.lo + -rm -f src/metag/*.$(OBJEXT) + -rm -f src/metag/*.lo + -rm -f src/microblaze/*.$(OBJEXT) + -rm -f src/microblaze/*.lo + -rm -f src/mips/*.$(OBJEXT) + -rm -f src/mips/*.lo + -rm -f src/moxie/*.$(OBJEXT) + -rm -f src/moxie/*.lo + -rm -f src/nios2/*.$(OBJEXT) + -rm -f src/nios2/*.lo + -rm -f src/or1k/*.$(OBJEXT) + -rm -f src/or1k/*.lo + -rm -f src/pa/*.$(OBJEXT) + -rm -f src/pa/*.lo + -rm -f src/powerpc/*.$(OBJEXT) + -rm -f src/powerpc/*.lo + -rm -f src/riscv/*.$(OBJEXT) + -rm -f src/riscv/*.lo + -rm -f src/s390/*.$(OBJEXT) + -rm -f src/s390/*.lo + -rm -f src/sh/*.$(OBJEXT) + -rm -f src/sh/*.lo + -rm -f src/sh64/*.$(OBJEXT) + -rm -f src/sh64/*.lo + -rm -f src/sparc/*.$(OBJEXT) + -rm -f src/sparc/*.lo + -rm -f src/tile/*.$(OBJEXT) + -rm -f src/tile/*.lo + -rm -f src/vax/*.$(OBJEXT) + -rm -f src/vax/*.lo + -rm -f src/x86/*.$(OBJEXT) + -rm -f src/x86/*.lo + -rm -f src/xtensa/*.$(OBJEXT) + -rm -f src/xtensa/*.lo + +distclean-compile: + -rm -f *.tab.c + +#include src/$(DEPDIR)/closures.Plo +#include src/$(DEPDIR)/debug.Plo +#include src/$(DEPDIR)/java_raw_api.Plo +#include src/$(DEPDIR)/prep_cif.Plo +#include src/$(DEPDIR)/raw_api.Plo +#include src/$(DEPDIR)/types.Plo +#include src/aarch64/$(DEPDIR)/ffi.Plo +#include src/aarch64/$(DEPDIR)/sysv.Plo +#include src/aarch64/$(DEPDIR)/win64_armasm.Plo +#include src/alpha/$(DEPDIR)/ffi.Plo +#include src/alpha/$(DEPDIR)/osf.Plo +#include src/arc/$(DEPDIR)/arcompact.Plo +#include src/arc/$(DEPDIR)/ffi.Plo +#include src/arm/$(DEPDIR)/ffi.Plo +#include src/arm/$(DEPDIR)/sysv.Plo +#include src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo +#include src/avr32/$(DEPDIR)/ffi.Plo +#include src/avr32/$(DEPDIR)/sysv.Plo +#include src/bfin/$(DEPDIR)/ffi.Plo +#include src/bfin/$(DEPDIR)/sysv.Plo +#include src/cris/$(DEPDIR)/ffi.Plo +#include src/cris/$(DEPDIR)/sysv.Plo +#include src/frv/$(DEPDIR)/eabi.Plo +#include src/frv/$(DEPDIR)/ffi.Plo +#include src/ia64/$(DEPDIR)/ffi.Plo +#include src/ia64/$(DEPDIR)/unix.Plo +#include src/m32r/$(DEPDIR)/ffi.Plo +#include src/m32r/$(DEPDIR)/sysv.Plo +#include src/m68k/$(DEPDIR)/ffi.Plo +#include src/m68k/$(DEPDIR)/sysv.Plo +#include src/m88k/$(DEPDIR)/ffi.Plo +#include src/m88k/$(DEPDIR)/obsd.Plo +#include src/metag/$(DEPDIR)/ffi.Plo +#include src/metag/$(DEPDIR)/sysv.Plo +#include src/microblaze/$(DEPDIR)/ffi.Plo +#include src/microblaze/$(DEPDIR)/sysv.Plo +#include src/mips/$(DEPDIR)/ffi.Plo +#include src/mips/$(DEPDIR)/n32.Plo +#include src/mips/$(DEPDIR)/o32.Plo +#include src/moxie/$(DEPDIR)/eabi.Plo +#include src/moxie/$(DEPDIR)/ffi.Plo +#include src/nios2/$(DEPDIR)/ffi.Plo +#include src/nios2/$(DEPDIR)/sysv.Plo +#include src/or1k/$(DEPDIR)/ffi.Plo +#include src/or1k/$(DEPDIR)/sysv.Plo +#include src/pa/$(DEPDIR)/ffi.Plo +#include src/pa/$(DEPDIR)/hpux32.Plo +#include src/pa/$(DEPDIR)/linux.Plo +#include src/powerpc/$(DEPDIR)/aix.Plo +#include src/powerpc/$(DEPDIR)/aix_closure.Plo +#include src/powerpc/$(DEPDIR)/darwin.Plo +#include src/powerpc/$(DEPDIR)/darwin_closure.Plo +#include src/powerpc/$(DEPDIR)/ffi.Plo +#include src/powerpc/$(DEPDIR)/ffi_darwin.Plo +#include src/powerpc/$(DEPDIR)/ffi_linux64.Plo +#include src/powerpc/$(DEPDIR)/ffi_sysv.Plo +#include src/powerpc/$(DEPDIR)/linux64.Plo +#include src/powerpc/$(DEPDIR)/linux64_closure.Plo +#include src/powerpc/$(DEPDIR)/ppc_closure.Plo +#include src/powerpc/$(DEPDIR)/sysv.Plo +#include src/riscv/$(DEPDIR)/ffi.Plo +#include src/riscv/$(DEPDIR)/sysv.Plo +#include src/s390/$(DEPDIR)/ffi.Plo +#include src/s390/$(DEPDIR)/sysv.Plo +#include src/sh/$(DEPDIR)/ffi.Plo +#include src/sh/$(DEPDIR)/sysv.Plo +#include src/sh64/$(DEPDIR)/ffi.Plo +#include src/sh64/$(DEPDIR)/sysv.Plo +#include src/sparc/$(DEPDIR)/ffi.Plo +#include src/sparc/$(DEPDIR)/ffi64.Plo +#include src/sparc/$(DEPDIR)/v8.Plo +#include src/sparc/$(DEPDIR)/v9.Plo +#include src/tile/$(DEPDIR)/ffi.Plo +#include src/tile/$(DEPDIR)/tile.Plo +#include src/vax/$(DEPDIR)/elfbsd.Plo +#include src/vax/$(DEPDIR)/ffi.Plo +#include src/x86/$(DEPDIR)/ffi.Plo +#include src/x86/$(DEPDIR)/ffi64.Plo +#include src/x86/$(DEPDIR)/ffiw64.Plo +#include src/x86/$(DEPDIR)/sysv.Plo +#include src/x86/$(DEPDIR)/sysv_intel.Plo +#include src/x86/$(DEPDIR)/unix64.Plo +#include src/x86/$(DEPDIR)/win64.Plo +#include src/x86/$(DEPDIR)/win64_intel.Plo +#include src/xtensa/$(DEPDIR)/ffi.Plo +#include src/xtensa/$(DEPDIR)/sysv.Plo + +.S.o: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ $< + +.S.obj: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.S.lo: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CPPAS)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(LTCPPASCOMPILE) -c -o $@ $< + +.c.o: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ $< + +.c.obj: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CC)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + -rm -rf src/.libs src/_libs + -rm -rf src/aarch64/.libs src/aarch64/_libs + -rm -rf src/alpha/.libs src/alpha/_libs + -rm -rf src/arc/.libs src/arc/_libs + -rm -rf src/arm/.libs src/arm/_libs + -rm -rf src/avr32/.libs src/avr32/_libs + -rm -rf src/bfin/.libs src/bfin/_libs + -rm -rf src/cris/.libs src/cris/_libs + -rm -rf src/frv/.libs src/frv/_libs + -rm -rf src/ia64/.libs src/ia64/_libs + -rm -rf src/m32r/.libs src/m32r/_libs + -rm -rf src/m68k/.libs src/m68k/_libs + -rm -rf src/m88k/.libs src/m88k/_libs + -rm -rf src/metag/.libs src/metag/_libs + -rm -rf src/microblaze/.libs src/microblaze/_libs + -rm -rf src/mips/.libs src/mips/_libs + -rm -rf src/moxie/.libs src/moxie/_libs + -rm -rf src/nios2/.libs src/nios2/_libs + -rm -rf src/or1k/.libs src/or1k/_libs + -rm -rf src/pa/.libs src/pa/_libs + -rm -rf src/powerpc/.libs src/powerpc/_libs + -rm -rf src/riscv/.libs src/riscv/_libs + -rm -rf src/s390/.libs src/s390/_libs + -rm -rf src/sh/.libs src/sh/_libs + -rm -rf src/sh64/.libs src/sh64/_libs + -rm -rf src/sparc/.libs src/sparc/_libs + -rm -rf src/tile/.libs src/tile/_libs + -rm -rf src/vax/.libs src/vax/_libs + -rm -rf src/x86/.libs src/x86/_libs + -rm -rf src/xtensa/.libs src/xtensa/_libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) fficonfig.h +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f src/$(DEPDIR)/$(am__dirstamp) + -rm -f src/$(am__dirstamp) + -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/aarch64/$(am__dirstamp) + -rm -f src/alpha/$(DEPDIR)/$(am__dirstamp) + -rm -f src/alpha/$(am__dirstamp) + -rm -f src/arc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arc/$(am__dirstamp) + -rm -f src/arm/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arm/$(am__dirstamp) + -rm -f src/avr32/$(DEPDIR)/$(am__dirstamp) + -rm -f src/avr32/$(am__dirstamp) + -rm -f src/bfin/$(DEPDIR)/$(am__dirstamp) + -rm -f src/bfin/$(am__dirstamp) + -rm -f src/cris/$(DEPDIR)/$(am__dirstamp) + -rm -f src/cris/$(am__dirstamp) + -rm -f src/frv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/frv/$(am__dirstamp) + -rm -f src/ia64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/ia64/$(am__dirstamp) + -rm -f src/m32r/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m32r/$(am__dirstamp) + -rm -f src/m68k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m68k/$(am__dirstamp) + -rm -f src/m88k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m88k/$(am__dirstamp) + -rm -f src/metag/$(DEPDIR)/$(am__dirstamp) + -rm -f src/metag/$(am__dirstamp) + -rm -f src/microblaze/$(DEPDIR)/$(am__dirstamp) + -rm -f src/microblaze/$(am__dirstamp) + -rm -f src/mips/$(DEPDIR)/$(am__dirstamp) + -rm -f src/mips/$(am__dirstamp) + -rm -f src/moxie/$(DEPDIR)/$(am__dirstamp) + -rm -f src/moxie/$(am__dirstamp) + -rm -f src/nios2/$(DEPDIR)/$(am__dirstamp) + -rm -f src/nios2/$(am__dirstamp) + -rm -f src/or1k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/or1k/$(am__dirstamp) + -rm -f src/pa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/pa/$(am__dirstamp) + -rm -f src/powerpc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/powerpc/$(am__dirstamp) + -rm -f src/riscv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/riscv/$(am__dirstamp) + -rm -f src/s390/$(DEPDIR)/$(am__dirstamp) + -rm -f src/s390/$(am__dirstamp) + -rm -f src/sh/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh/$(am__dirstamp) + -rm -f src/sh64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh64/$(am__dirstamp) + -rm -f src/sparc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sparc/$(am__dirstamp) + -rm -f src/tile/$(DEPDIR)/$(am__dirstamp) + -rm -f src/tile/$(am__dirstamp) + -rm -f src/vax/$(DEPDIR)/$(am__dirstamp) + -rm -f src/vax/$(am__dirstamp) + -rm -f src/x86/$(DEPDIR)/$(am__dirstamp) + -rm -f src/x86/$(am__dirstamp) + -rm -f src/xtensa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/xtensa/$(am__dirstamp) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: install-toolexeclibLTLIBRARIES + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.MAKE: $(am__recursive_targets) all install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--refresh check check-am clean clean-cscope clean-generic \ + clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES cscope cscopelist-am ctags \ + ctags-am dist dist-all dist-bzip2 dist-gzip dist-hook \ + dist-lzip dist-shar dist-tarZ dist-xz dist-zip distcheck \ + distclean distclean-compile distclean-generic distclean-hdr \ + distclean-libtool distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkgconfigDATA install-ps \ + install-ps-am install-strip install-toolexeclibLTLIBRARIES \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.PRECIOUS: Makefile + +##libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ +## $(libffi_la_OBJECTS) $(libffi_la_LIBADD) +## perl $(top_srcdir)/make_sunver.pl libffi.map \ +## `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ +## sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ +## > $@ || (rm -f $@ ; exit 1) + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.log b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.log new file mode 100644 index 0000000..188d290 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.log @@ -0,0 +1,1860 @@ +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libffi configure 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure --disable-static --with-pic=yes --disable-dependency-tracking --disable-docs --host= + +## --------- ## +## Platform. ## +## --------- ## + +hostname = Arthurs-MacBook-Air.local +uname -m = arm64 +uname -r = 21.3.0 +uname -s = Darwin +uname -v = Darwin Kernel Version 21.3.0: Wed Jan 5 21:37:58 PST 2022; root:xnu-8019.80.24~20/RELEASE_ARM64_T8101 + +/usr/bin/uname -p = arm +/bin/uname -X = unknown + +/bin/arch = unknown +/usr/bin/arch -k = unknown +/usr/convex/getsysinfo = unknown +/usr/bin/hostinfo = Mach kernel version: + Darwin Kernel Version 21.3.0: Wed Jan 5 21:37:58 PST 2022; root:xnu-8019.80.24~20/RELEASE_ARM64_T8101 +Kernel configured for up to 8 processors. +8 processors are physically available. +8 processors are logically available. +Processor type: arm64e (ARM64E) +Processors active: 0 1 2 3 4 5 6 7 +Primary memory available: 16.00 gigabytes +Default processor set: 526 tasks, 2421 threads, 8 processors +Load average: 23.23, Mach factor: 0.44 +/bin/machine = unknown +/usr/bin/oslevel = unknown +/bin/universe = unknown + +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0@global/bin +PATH: /Users/arthur/.rvm/rubies/ruby-3.0.0/bin +PATH: /opt/homebrew/bin +PATH: /usr/local/bin +PATH: /usr/bin +PATH: /bin +PATH: /usr/sbin +PATH: /sbin +PATH: /usr/local/go/bin +PATH: /Library/Apple/usr/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0@global/bin +PATH: /Users/arthur/.rvm/rubies/ruby-3.0.0/bin +PATH: /Users/arthur/go/bin +PATH: /Users/arthur/.rvm/bin +PATH: /Users/arthur/.rvm/bin + + +## ----------- ## +## Core tests. ## +## ----------- ## + +configure:2704: checking build system type +configure:2718: result: arm-apple-darwin21.3.0 +configure:2738: checking host system type +configure:2751: result: arm-apple-darwin21.3.0 +configure:2771: checking target system type +configure:2784: result: arm-apple-darwin21.3.0 +configure:2881: checking for gsed +configure:2912: result: sed +configure:2940: checking for a BSD-compatible install +configure:3008: result: /opt/homebrew/bin/ginstall -c +configure:3019: checking whether build environment is sane +configure:3074: result: yes +configure:3222: checking for a thread-safe mkdir -p +configure:3261: result: /opt/homebrew/bin/gmkdir -p +configure:3268: checking for gawk +configure:3298: result: no +configure:3268: checking for mawk +configure:3298: result: no +configure:3268: checking for nawk +configure:3298: result: no +configure:3268: checking for awk +configure:3284: found /usr/bin/awk +configure:3295: result: awk +configure:3306: checking whether make sets $(MAKE) +configure:3328: result: yes +configure:3357: checking whether make supports nested variables +configure:3374: result: yes +configure:3559: checking for gcc +configure:3586: result: gcc -fdeclspec +configure:3815: checking for C compiler version +configure:3824: gcc -fdeclspec --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:3835: $? = 0 +configure:3824: gcc -fdeclspec -v >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +clang: warning: argument unused during compilation: '-fdeclspec' [-Wunused-command-line-argument] +configure:3835: $? = 0 +configure:3824: gcc -fdeclspec -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:3835: $? = 1 +configure:3824: gcc -fdeclspec -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:3835: $? = 1 +configure:3855: checking whether the C compiler works +configure:3877: gcc -fdeclspec -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3881: $? = 0 +configure:3929: result: yes +configure:3932: checking for C compiler default output file name +configure:3934: result: a.out +configure:3940: checking for suffix of executables +configure:3947: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3951: $? = 0 +configure:3973: result: +configure:3995: checking whether we are cross compiling +configure:4003: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:4007: $? = 0 +configure:4014: ./conftest +configure:4018: $? = 0 +configure:4033: result: no +configure:4038: checking for suffix of object files +configure:4060: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4064: $? = 0 +configure:4085: result: o +configure:4089: checking whether we are using the GNU C compiler +configure:4108: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4108: $? = 0 +configure:4117: result: yes +configure:4126: checking whether gcc -fdeclspec accepts -g +configure:4146: gcc -fdeclspec -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4146: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4161: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4161: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4177: gcc -fdeclspec -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4177: $? = 0 +configure:4187: result: yes +configure:4204: checking for gcc -fdeclspec option to accept ISO C89 +configure:4267: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4267: $? = 0 +configure:4280: result: none needed +configure:4305: checking whether gcc -fdeclspec understands -c and -o together +configure:4327: gcc -fdeclspec -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4327: gcc -fdeclspec -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4342: result: yes +configure:4370: checking for style of include used by make +configure:4398: result: GNU +configure:4424: checking dependency style of gcc -fdeclspec +configure:4535: result: none +configure:4608: checking for g++ +configure:4624: found /usr/bin/g++ +configure:4635: result: g++ +configure:4662: checking for C++ compiler version +configure:4671: g++ --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +configure:4682: $? = 0 +configure:4671: g++ -v >&5 +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:4682: $? = 0 +configure:4671: g++ -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:4682: $? = 1 +configure:4671: g++ -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:4682: $? = 1 +configure:4686: checking whether we are using the GNU C++ compiler +configure:4705: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4705: $? = 0 +configure:4714: result: yes +configure:4723: checking whether g++ accepts -g +configure:4743: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4743: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4758: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4758: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4774: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4774: $? = 0 +configure:4784: result: yes +configure:4809: checking dependency style of g++ +configure:4920: result: none +configure:4950: checking dependency style of gcc -fdeclspec +configure:5059: result: none +configure:5121: checking how to print strings +configure:5148: result: printf +configure:5169: checking for a sed that does not truncate output +configure:5233: result: /usr/bin/sed +configure:5251: checking for grep that handles long lines and -e +configure:5309: result: /usr/bin/grep +configure:5314: checking for egrep +configure:5376: result: /usr/bin/grep -E +configure:5381: checking for fgrep +configure:5443: result: /usr/bin/grep -F +configure:5478: checking for ld used by gcc -fdeclspec +configure:5545: result: ld +configure:5552: checking if the linker (ld) is GNU ld +configure:5567: result: no +configure:5579: checking for BSD- or MS-compatible name lister (nm) +configure:5633: result: /usr/bin/nm -B +configure:5763: checking the name lister (/usr/bin/nm -B) interface +configure:5770: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:5773: /usr/bin/nm -B "conftest.o" +configure:5776: output +0000000000000000 S _some_variable +0000000000000000 t ltmp0 +0000000000000000 s ltmp1 +configure:5783: result: BSD nm +configure:5786: checking whether ln -s works +configure:5790: result: yes +configure:5798: checking the maximum length of command line arguments +configure:5929: result: 786432 +configure:5977: checking how to convert arm-apple-darwin21.3.0 file names to arm-apple-darwin21.3.0 format +configure:6017: result: func_convert_file_noop +configure:6024: checking how to convert arm-apple-darwin21.3.0 file names to toolchain format +configure:6044: result: func_convert_file_noop +configure:6051: checking for ld option to reload object files +configure:6058: result: -r +configure:6132: checking for objdump +configure:6148: found /usr/bin/objdump +configure:6159: result: objdump +configure:6191: checking how to recognize dependent libraries +configure:6391: result: pass_all +configure:6476: checking for dlltool +configure:6506: result: no +configure:6536: checking how to associate runtime and link libraries +configure:6563: result: printf %s\n +configure:6624: checking for ar +configure:6640: found /usr/bin/ar +configure:6651: result: ar +configure:6688: checking for archiver @FILE support +configure:6705: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:6705: $? = 0 +configure:6708: ar cru libconftest.a @conftest.lst >&5 +ar: @conftest.lst: No such file or directory +configure:6711: $? = 1 +configure:6731: result: no +configure:6789: checking for strip +configure:6805: found /usr/bin/strip +configure:6816: result: strip +configure:6888: checking for ranlib +configure:6904: found /usr/bin/ranlib +configure:6915: result: ranlib +configure:7017: checking command to parse /usr/bin/nm -B output from gcc -fdeclspec object +configure:7170: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*\([_A-Za-z][_A-Za-z0-9]*\)$/\1 \2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +cannot find nm_test_var in conftest.nm +configure:7170: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +configure:7246: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c conftstm.o >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:7249: $? = 0 +configure:7287: result: ok +configure:7334: checking for sysroot +configure:7364: result: no +configure:7371: checking for a working dd +configure:7409: result: /bin/dd +configure:7413: checking how to truncate binary pipes +configure:7428: result: /bin/dd bs=4096 count=1 +configure:7757: checking for mt +configure:7787: result: no +configure:7807: checking if : is a manifest tool +configure:7813: : '-?' +configure:7821: result: no +configure:7877: checking for dsymutil +configure:7893: found /usr/bin/dsymutil +configure:7904: result: dsymutil +configure:7969: checking for nmedit +configure:7985: found /usr/bin/nmedit +configure:7996: result: nmedit +configure:8061: checking for lipo +configure:8077: found /usr/bin/lipo +configure:8088: result: lipo +configure:8153: checking for otool +configure:8169: found /usr/bin/otool +configure:8180: result: otool +configure:8245: checking for otool64 +configure:8275: result: no +configure:8320: checking for -single_module linker flag +gcc -fdeclspec -L/usr/local/opt/libffi/lib -o libconftest.dylib -dynamiclib -Wl,-single_module conftest.c +configure:8353: result: yes +configure:8356: checking for -exported_symbols_list linker flag +configure:8376: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib -Wl,-exported_symbols_list,conftest.sym conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8376: $? = 0 +configure:8386: result: yes +configure:8389: checking for -force_load linker flag +gcc -fdeclspec -c -o conftest.o conftest.c +ar cru libconftest.a conftest.o +ranlib libconftest.a +gcc -fdeclspec -L/usr/local/opt/libffi/lib -o conftest conftest.c -Wl,-force_load,./libconftest.a +configure:8421: result: yes +configure:8498: checking how to run the C preprocessor +configure:8529: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8529: $? = 0 +configure:8543: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8543: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8568: result: gcc -fdeclspec -E +configure:8588: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8588: $? = 0 +configure:8602: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8602: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8631: checking for ANSI C header files +configure:8651: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8651: $? = 0 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8724: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8724: $? = 0 +configure:8724: ./conftest +configure:8724: $? = 0 +configure:8735: result: yes +configure:8748: checking for sys/types.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for sys/stat.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdlib.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for string.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for memory.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for strings.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for inttypes.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdint.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for unistd.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8762: checking for dlfcn.h +configure:8762: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8762: $? = 0 +configure:8762: result: yes +configure:9029: checking for objdir +configure:9044: result: .libs +configure:9308: checking if gcc -fdeclspec supports -fno-rtti -fno-exceptions +configure:9326: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-rtti -fno-exceptions conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9330: $? = 0 +configure:9343: result: yes +configure:9701: checking for gcc -fdeclspec option to produce PIC +configure:9708: result: -fno-common -DPIC +configure:9716: checking if gcc -fdeclspec PIC flag -fno-common -DPIC works +configure:9734: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9738: $? = 0 +configure:9751: result: yes +configure:9780: checking if gcc -fdeclspec static flag -static works +configure:9808: result: no +configure:9823: checking if gcc -fdeclspec supports -c -o file.o +configure:9844: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9848: $? = 0 +configure:9870: result: yes +configure:9878: checking if gcc -fdeclspec supports -c -o file.o +configure:9925: result: yes +configure:9958: checking whether the gcc -fdeclspec linker (ld) supports shared libraries +configure:11221: result: yes +configure:11461: checking dynamic linker characteristics +configure:12291: result: darwin21.3.0 dyld +configure:12413: checking how to hardcode library paths into programs +configure:12438: result: immediate +configure:12986: checking whether stripping libraries is possible +configure:13000: result: yes +configure:13026: checking if libtool supports shared libraries +configure:13028: result: yes +configure:13031: checking whether to build shared libraries +configure:13056: result: yes +configure:13059: checking whether to build static libraries +configure:13063: result: no +configure:13086: checking how to run the C++ preprocessor +configure:13113: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13113: $? = 0 +configure:13127: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13127: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13152: result: g++ -E +configure:13172: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13172: $? = 0 +configure:13186: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13186: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13348: checking for ld used by g++ +configure:13415: result: ld +configure:13422: checking if the linker (ld) is GNU ld +configure:13437: result: no +configure:13492: checking whether the g++ linker (ld) supports shared libraries +configure:14565: result: yes +configure:14601: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:14604: $? = 0 +configure:15085: checking for g++ option to produce PIC +configure:15092: result: -fno-common -DPIC +configure:15100: checking if g++ PIC flag -fno-common -DPIC works +configure:15118: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15122: $? = 0 +configure:15135: result: yes +configure:15158: checking if g++ static flag -static works +configure:15186: result: no +configure:15198: checking if g++ supports -c -o file.o +configure:15219: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15223: $? = 0 +configure:15245: result: yes +configure:15250: checking if g++ supports -c -o file.o +configure:15297: result: yes +configure:15327: checking whether the g++ linker (ld) supports shared libraries +configure:15370: result: yes +configure:15511: checking dynamic linker characteristics +configure:16268: result: darwin21.3.0 dyld +configure:16333: checking how to hardcode library paths into programs +configure:16358: result: immediate +configure:16426: checking size of size_t +configure:16431: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:16431: $? = 0 +configure:16431: ./conftest +configure:16431: $? = 0 +configure:16445: result: 8 +configure:16456: checking for C compiler vendor +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__ICC) || defined(__ECC) || defined(__INTEL_COMPILER)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__xlc__) || defined(__xlC__) || defined(__IBMC__) || defined(__IBMCPP__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__PATHCC__) || defined(__PATHSCALE__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:16504: $? = 0 +configure:16512: result: clang +configure:17357: checking CFLAGS for maximum warnings +configure:17377: gcc -fdeclspec -c -warn all -warn all -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +clang: error: unknown argument: '-warn' +clang: error: unknown argument: '-warn' +clang: error: no such file or directory: 'all' +clang: error: no such file or directory: 'all' +configure:17377: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:17377: gcc -fdeclspec -c -pedantic -Wall -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17377: $? = 0 +configure:17385: result: -Wall +configure:17405: : CFLAGS="$CFLAGS" +configure:17408: $? = 0 +configure:17445: checking whether to enable maintainer-specific portions of Makefiles +configure:17454: result: no +configure:17470: checking sys/mman.h usability +configure:17470: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17470: $? = 0 +configure:17470: result: yes +configure:17470: checking sys/mman.h presence +configure:17470: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17470: $? = 0 +configure:17470: result: yes +configure:17470: checking for sys/mman.h +configure:17470: result: yes +configure:17483: checking for mmap +configure:17483: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17483: $? = 0 +configure:17483: result: yes +configure:17483: checking for mkostemp +configure:17483: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17483: $? = 0 +configure:17483: result: yes +configure:17493: checking for sys/mman.h +configure:17493: result: yes +configure:17501: checking for mmap +configure:17501: result: yes +configure:17514: checking whether read-only mmap of a plain file works +configure:17531: result: yes +configure:17533: checking whether mmap from /dev/zero works +configure:17555: result: no +configure:17559: checking for MAP_ANON(YMOUS) +configure:17582: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:38:5: warning: unused variable 'n' [-Wunused-variable] +int n = MAP_ANONYMOUS; + ^ +2 warnings generated. +configure:17582: $? = 0 +configure:17589: result: yes +configure:17595: checking whether mmap with MAP_ANON(YMOUS) works +configure:17612: result: yes +configure:17655: checking for ANSI C header files +configure:17759: result: yes +configure:17769: checking for memcpy +configure:17769: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:53:6: warning: incompatible redeclaration of library function 'memcpy' [-Wincompatible-library-redeclaration] +char memcpy (); + ^ +conftest.c:53:6: note: 'memcpy' is a builtin with type 'void *(void *, const void *, unsigned long)' +2 warnings generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17769: $? = 0 +configure:17769: result: yes +configure:17778: checking for size_t +configure:17778: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17778: $? = 0 +configure:17778: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:67:21: error: expected expression +if (sizeof ((size_t))) + ^ +1 warning and 1 error generated. +configure:17778: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| /* end confdefs.h. */ +| #include +| #ifdef HAVE_SYS_TYPES_H +| # include +| #endif +| #ifdef HAVE_SYS_STAT_H +| # include +| #endif +| #ifdef STDC_HEADERS +| # include +| # include +| #else +| # ifdef HAVE_STDLIB_H +| # include +| # endif +| #endif +| #ifdef HAVE_STRING_H +| # if !defined STDC_HEADERS && defined HAVE_MEMORY_H +| # include +| # endif +| # include +| #endif +| #ifdef HAVE_STRINGS_H +| # include +| #endif +| #ifdef HAVE_INTTYPES_H +| # include +| #endif +| #ifdef HAVE_STDINT_H +| # include +| #endif +| #ifdef HAVE_UNISTD_H +| # include +| #endif +| int +| main () +| { +| if (sizeof ((size_t))) +| return 0; +| ; +| return 0; +| } +configure:17778: result: yes +configure:17791: checking for working alloca.h +configure:17808: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17808: $? = 0 +configure:17816: result: yes +configure:17824: checking for alloca +configure:17861: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17861: $? = 0 +configure:17869: result: yes +configure:17980: checking size of double +configure:17985: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17985: $? = 0 +configure:17985: ./conftest +configure:17985: $? = 0 +configure:17999: result: 8 +configure:18013: checking size of long double +configure:18018: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:18018: $? = 0 +configure:18018: ./conftest +configure:18018: $? = 0 +configure:18032: result: 8 +configure:18065: checking whether byte ordering is bigendian +configure:18080: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18080: $? = 0 +configure:18125: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18125: $? = 0 +configure:18143: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:42:4: error: use of undeclared identifier 'not' + not big endian + ^ +1 warning and 1 error generated. +configure:18143: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| #include +| #include +| +| int +| main () +| { +| #if BYTE_ORDER != BIG_ENDIAN +| not big endian +| #endif +| +| ; +| return 0; +| } +configure:18271: result: no +configure:18290: checking assembler .cfi pseudo-op support +configure:18308: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +:1:14: error: Expected an identifier +.cfi_sections + ^ +1 warning and 1 error generated. +configure:18308: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc"); +| int +| main () +| { +| +| ; +| return 0; +| } +configure:18316: result: no +configure:18451: checking whether compiler supports pointer authentication +configure:18479: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:47:3: error: Pointer authentication not supported +# error Pointer authentication not supported + ^ +1 warning and 1 error generated. +configure:18479: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #ifdef __clang__ +| # if __has_feature(ptrauth_calls) +| # define HAVE_PTRAUTH 1 +| # endif +| #endif +| +| #ifndef HAVE_PTRAUTH +| # error Pointer authentication not supported +| #endif +| +| ; +| return 0; +| } +configure:18487: result: no +configure:18506: checking for _ prefix in compiled symbols +configure:18516: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:2:12: warning: expression result unused [-Wunused-value] +int main(){nm_test_func;return 0;} + ^~~~~~~~~~~~ +2 warnings generated. +configure:18519: $? = 0 +configure:18523: /usr/bin/nm -B conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:18526: $? = 0 +configure:18548: result: yes +configure:18623: checking whether .eh_frame section should be read-only +configure:18639: result: yes +configure:18654: checking for __attribute__((visibility("hidden"))) +configure:18663: gcc -fdeclspec -Werror -S conftest.c -o conftest.s 1>&5 +configure:18666: $? = 0 +configure:18675: result: yes +configure:18814: checking for ld used by gcc -fdeclspec +configure:18881: result: ld +configure:18888: checking if the linker (ld) is GNU ld +configure:18903: result: no +configure:19216: versioning on shared library symbols is no +configure:19337: checking that generated files are newer than configure +configure:19343: result: done +configure:19411: creating ./config.status + +## ---------------------- ## +## Running config.status. ## +## ---------------------- ## + +This file was extended by libffi config.status 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = + CONFIG_HEADERS = + CONFIG_LINKS = + CONFIG_COMMANDS = + $ ./config.status + +on Arthurs-MacBook-Air.local + +config.status:1221: creating include/Makefile +config.status:1221: creating include/ffi.h +config.status:1221: creating Makefile +config.status:1221: creating testsuite/Makefile +config.status:1221: creating man/Makefile +config.status:1221: creating doc/Makefile +config.status:1221: creating libffi.pc +config.status:1221: creating fficonfig.h +config.status:1435: executing buildir commands +config.status:1448: skipping top_srcdir/Makefile - not created +config.status:1435: executing depfiles commands +config.status:1435: executing libtool commands +config.status:1435: executing include commands +config.status:1435: executing src commands + +## ---------------- ## +## Cache variables. ## +## ---------------- ## + +ac_cv_build=arm-apple-darwin21.3.0 +ac_cv_c_bigendian=no +ac_cv_c_compiler_gnu=yes +ac_cv_cflags_warn_all=-Wall +ac_cv_cxx_compiler_gnu=yes +ac_cv_decl_map_anon=yes +ac_cv_env_CCASFLAGS_set= +ac_cv_env_CCASFLAGS_value= +ac_cv_env_CCAS_set= +ac_cv_env_CCAS_value= +ac_cv_env_CPPFLAGS_set=set +ac_cv_env_CPPFLAGS_value='-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +ac_cv_env_CPP_set= +ac_cv_env_CPP_value= +ac_cv_env_CXXCPP_set= +ac_cv_env_CXXCPP_value= +ac_cv_env_LT_SYS_LIBRARY_PATH_set= +ac_cv_env_LT_SYS_LIBRARY_PATH_value= +ac_cv_env_build_alias_set= +ac_cv_env_build_alias_value= +ac_cv_env_host_alias_set=set +ac_cv_env_host_alias_value= +ac_cv_env_target_alias_set= +ac_cv_env_target_alias_value= +ac_cv_func_alloca_works=yes +ac_cv_func_memcpy=yes +ac_cv_func_mkostemp=yes +ac_cv_func_mmap=yes +ac_cv_func_mmap_anon=yes +ac_cv_func_mmap_dev_zero=no +ac_cv_func_mmap_file=yes +ac_cv_header_dlfcn_h=yes +ac_cv_header_inttypes_h=yes +ac_cv_header_memory_h=yes +ac_cv_header_stdc=yes +ac_cv_header_stdint_h=yes +ac_cv_header_stdlib_h=yes +ac_cv_header_string_h=yes +ac_cv_header_strings_h=yes +ac_cv_header_sys_mman_h=yes +ac_cv_header_sys_stat_h=yes +ac_cv_header_sys_types_h=yes +ac_cv_header_unistd_h=yes +ac_cv_host=arm-apple-darwin21.3.0 +ac_cv_objext=o +ac_cv_path_EGREP='/usr/bin/grep -E' +ac_cv_path_FGREP='/usr/bin/grep -F' +ac_cv_path_GREP=/usr/bin/grep +ac_cv_path_SED=/usr/bin/sed +ac_cv_path_ax_enable_builddir_sed=sed +ac_cv_path_install='/opt/homebrew/bin/ginstall -c' +ac_cv_path_lt_DD=/bin/dd +ac_cv_path_mkdir=/opt/homebrew/bin/gmkdir +ac_cv_prog_AWK=awk +ac_cv_prog_CPP='gcc -fdeclspec -E' +ac_cv_prog_CXXCPP='g++ -E' +ac_cv_prog_ac_ct_AR=ar +ac_cv_prog_ac_ct_CC='gcc -fdeclspec' +ac_cv_prog_ac_ct_CXX=g++ +ac_cv_prog_ac_ct_DSYMUTIL=dsymutil +ac_cv_prog_ac_ct_LIPO=lipo +ac_cv_prog_ac_ct_NMEDIT=nmedit +ac_cv_prog_ac_ct_OBJDUMP=objdump +ac_cv_prog_ac_ct_OTOOL=otool +ac_cv_prog_ac_ct_RANLIB=ranlib +ac_cv_prog_ac_ct_STRIP=strip +ac_cv_prog_cc_c89= +ac_cv_prog_cc_g=yes +ac_cv_prog_cxx_g=yes +ac_cv_prog_make_make_set=yes +ac_cv_sizeof_double=8 +ac_cv_sizeof_long_double=8 +ac_cv_sizeof_size_t=8 +ac_cv_target=arm-apple-darwin21.3.0 +ac_cv_type_size_t=yes +ac_cv_working_alloca_h=yes +am_cv_CCAS_dependencies_compiler_type=none +am_cv_CC_dependencies_compiler_type=none +am_cv_CXX_dependencies_compiler_type=none +am_cv_make_support_nested_variables=yes +am_cv_prog_cc_c_o=yes +ax_cv_c_compiler_vendor=clang +gcc_cv_as_cfi_pseudo_op=no +libffi_cv_as_ptrauth=no +libffi_cv_hidden_visibility_attribute=yes +libffi_cv_ro_eh_frame=yes +lt_cv_apple_cc_single_mod=yes +lt_cv_ar_at_file=no +lt_cv_deplibs_check_method=pass_all +lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_ld_exported_symbols_list=yes +lt_cv_ld_force_load=yes +lt_cv_ld_reload_flag=-r +lt_cv_nm_interface='BSD nm' +lt_cv_objdir=.libs +lt_cv_path_LD=ld +lt_cv_path_LDCXX=ld +lt_cv_path_NM='/usr/bin/nm -B' +lt_cv_path_mainfest_tool=no +lt_cv_prog_compiler_c_o=yes +lt_cv_prog_compiler_c_o_CXX=yes +lt_cv_prog_compiler_pic='-fno-common -DPIC' +lt_cv_prog_compiler_pic_CXX='-fno-common -DPIC' +lt_cv_prog_compiler_pic_works=yes +lt_cv_prog_compiler_pic_works_CXX=yes +lt_cv_prog_compiler_rtti_exceptions=yes +lt_cv_prog_compiler_static_works=no +lt_cv_prog_compiler_static_works_CXX=no +lt_cv_prog_gnu_ld=no +lt_cv_prog_gnu_ldcxx=no +lt_cv_sharedlib_from_linklib_cmd='printf %s\n' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import= +lt_cv_sys_max_cmd_len=786432 +lt_cv_sys_symbol_underscore=yes +lt_cv_to_host_file_cmd=func_convert_file_noop +lt_cv_to_tool_file_cmd=func_convert_file_noop +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' + +## ----------------- ## +## Output variables. ## +## ----------------- ## + +ACLOCAL='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15' +ALLOCA='' +AMDEPBACKSLASH='' +AMDEP_FALSE='' +AMDEP_TRUE='#' +AMTAR='$${TAR-tar}' +AM_BACKSLASH='\' +AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +AM_DEFAULT_VERBOSITY='1' +AM_LTLDFLAGS='' +AM_RUNTESTFLAGS='' +AM_V='$(V)' +AR='ar' +AUTOCONF='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf' +AUTOHEADER='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader' +AUTOMAKE='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15' +AWK='awk' +BUILD_DOCS_FALSE='' +BUILD_DOCS_TRUE='#' +CC='gcc -fdeclspec' +CCAS='gcc -fdeclspec' +CCASDEPMODE='depmode=none' +CCASFLAGS='' +CCDEPMODE='depmode=none' +CFLAGS=' -Wall -fexceptions' +CPP='gcc -fdeclspec -E' +CPPFLAGS='-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +CXX='g++' +CXXCPP='g++ -E' +CXXDEPMODE='depmode=none' +CXXFLAGS='-g -O2' +CYGPATH_W='echo' +DEFS='-DHAVE_CONFIG_H' +DEPDIR='.deps' +DLLTOOL='false' +DSYMUTIL='dsymutil' +DUMPBIN='' +ECHO_C='\c' +ECHO_N='' +ECHO_T='' +EGREP='/usr/bin/grep -E' +EXEEXT='' +FFI_DEBUG_FALSE='' +FFI_DEBUG_TRUE='#' +FFI_EXEC_TRAMPOLINE_TABLE='1' +FFI_EXEC_TRAMPOLINE_TABLE_FALSE='#' +FFI_EXEC_TRAMPOLINE_TABLE_TRUE='' +FGREP='/usr/bin/grep -F' +GREP='/usr/bin/grep' +HAVE_LONG_DOUBLE='0' +HAVE_LONG_DOUBLE_VARIANT='0' +INSTALL_DATA='${INSTALL} -m 644' +INSTALL_PROGRAM='${INSTALL}' +INSTALL_SCRIPT='${INSTALL}' +INSTALL_STRIP_PROGRAM='$(install_sh) -c -s' +LD='ld' +LDFLAGS='-L/usr/local/opt/libffi/lib' +LIBFFI_BUILD_VERSIONED_SHLIB_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_TRUE='#' +LIBOBJS='' +LIBS='' +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +LIPO='lipo' +LN_S='ln -s' +LTLIBOBJS='' +LT_SYS_LIBRARY_PATH='' +MAINT='#' +MAINTAINER_MODE_FALSE='' +MAINTAINER_MODE_TRUE='#' +MAKEINFO='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo' +MANIFEST_TOOL=':' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +NM='/usr/bin/nm -B' +NMEDIT='nmedit' +OBJDUMP='objdump' +OBJEXT='o' +OPT_LDFLAGS='' +OTOOL64=':' +OTOOL='otool' +PACKAGE='libffi' +PACKAGE_BUGREPORT='http://github.com/libffi/libffi/issues' +PACKAGE_NAME='libffi' +PACKAGE_STRING='libffi 3.3' +PACKAGE_TARNAME='libffi' +PACKAGE_URL='' +PACKAGE_VERSION='3.3' +PATH_SEPARATOR=':' +PRTDIAG='' +RANLIB='ranlib' +SECTION_LDFLAGS='' +SED='/usr/bin/sed' +SET_MAKE='' +SHELL='/bin/sh' +STRIP='strip' +TARGET='ARM' +TARGETDIR='arm' +TARGET_OBJ=' src/arm/ffi.lo src/arm/sysv.lo' +TESTSUBDIR_FALSE='#' +TESTSUBDIR_TRUE='' +VERSION='3.3' +ac_ct_AR='ar' +ac_ct_CC='gcc -fdeclspec' +ac_ct_CXX='g++' +ac_ct_DUMPBIN='' +am__EXEEXT_FALSE='' +am__EXEEXT_TRUE='#' +am__fastdepCCAS_FALSE='' +am__fastdepCCAS_TRUE='#' +am__fastdepCC_FALSE='' +am__fastdepCC_TRUE='#' +am__fastdepCXX_FALSE='' +am__fastdepCXX_TRUE='#' +am__include='include' +am__isrc=' -I$(srcdir)' +am__leading_dot='.' +am__nodep='' +am__quote='' +am__tar='$${TAR-tar} chof - "$$tardir"' +am__untar='$${TAR-tar} xf -' +ax_enable_builddir_sed='sed' +bindir='${exec_prefix}/bin' +build='arm-apple-darwin21.3.0' +build_alias='' +build_cpu='arm' +build_os='darwin21.3.0' +build_vendor='apple' +datadir='${datarootdir}' +datarootdir='${prefix}/share' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +dvidir='${docdir}' +exec_prefix='${prefix}' +host='arm-apple-darwin21.3.0' +host_alias='' +host_cpu='arm' +host_os='darwin21.3.0' +host_vendor='apple' +htmldir='${docdir}' +includedir='${prefix}/include' +infodir='${datarootdir}/info' +install_sh='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh' +libdir='${exec_prefix}/lib' +libexecdir='${exec_prefix}/libexec' +localedir='${datarootdir}/locale' +localstatedir='${prefix}/var' +mandir='${datarootdir}/man' +mkdir_p='$(MKDIR_P)' +oldincludedir='/usr/include' +pdfdir='${docdir}' +prefix='/usr/local' +program_transform_name='s,x,x,' +psdir='${docdir}' +runstatedir='${localstatedir}/run' +sbindir='${exec_prefix}/sbin' +sharedstatedir='${prefix}/com' +sys_symbol_underscore='yes' +sysconfdir='${prefix}/etc' +target='arm-apple-darwin21.3.0' +target_alias='' +target_cpu='arm' +target_os='darwin21.3.0' +target_vendor='apple' +toolexecdir='${libdir}/gcc-lib/$(target_alias)' +toolexeclibdir='${libdir}' + +## ----------- ## +## confdefs.h. ## +## ----------- ## + +/* confdefs.h */ +#define PACKAGE_NAME "libffi" +#define PACKAGE_TARNAME "libffi" +#define PACKAGE_VERSION "3.3" +#define PACKAGE_STRING "libffi 3.3" +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +#define PACKAGE_URL "" +#define PACKAGE "libffi" +#define VERSION "3.3" +#define STDC_HEADERS 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_STRINGS_H 1 +#define HAVE_INTTYPES_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_DLFCN_H 1 +#define LT_OBJDIR ".libs/" +#define SIZEOF_SIZE_T 8 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_MMAP 1 +#define HAVE_MKOSTEMP 1 +#define HAVE_MMAP_FILE 1 +#define HAVE_MMAP_ANON 1 +#define STDC_HEADERS 1 +#define HAVE_MEMCPY 1 +#define HAVE_ALLOCA_H 1 +#define HAVE_ALLOCA 1 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_LONG_DOUBLE 8 +#define SYMBOL_UNDERSCORE 1 +#define FFI_EXEC_TRAMPOLINE_TABLE 1 +#define HAVE_RO_EH_FRAME 1 +#define EH_FRAME_FLAGS "a" +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +configure: exit 0 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.status b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.status new file mode 100755 index 0000000..7cb2615 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/config.status @@ -0,0 +1,2398 @@ +#! /bin/sh +# Generated by configure. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libffi $as_me 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +# Files that config.status was made for. +config_files=" include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc" +config_headers=" fficonfig.h" +config_commands=" buildir depfiles libtool include src" + +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +ac_cs_config="'--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=' 'host_alias=' 'CPPFLAGS=-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT '" +ac_cs_version="\ +libffi config.status 3.3 +configured by /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure, generated by GNU Autoconf 2.69, + with options \"$ac_cs_config\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21' +srcdir='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi' +INSTALL='/opt/homebrew/bin/ginstall -c' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +AWK='awk' +test -n "$AWK" || AWK=awk +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +if $ac_cs_recheck; then + set X /bin/sh '/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure' '--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=' 'host_alias=' 'CPPFLAGS=-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' $ac_configure_extra_args --no-create --no-recursion + shift + $as_echo "running CONFIG_SHELL=/bin/sh $*" >&6 + CONFIG_SHELL='/bin/sh' + export CONFIG_SHELL + exec "$@" +fi + +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +# +# INIT-COMMANDS +# +ax_enable_builddir_srcdir="/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi" # /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ax_enable_builddir_host="" # / arm-apple-darwin21.3.0 +ax_enable_builddir_version="3.3" # 3.3 +ax_enable_builddir_package="libffi" # libffi +ax_enable_builddir_auxdir="/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi" # +ax_enable_builddir_sed="sed" # /usr/bin/sed +ax_enable_builddir="." # + +AMDEP_TRUE="#" ac_aux_dir="/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +double_quote_subst='s/\(["`\\]\)/\\\1/g' +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +macro_version='2.4.6' +macro_revision='2.4.6' +enable_shared='yes' +enable_static='no' +pic_mode='yes' +enable_fast_install='needless' +shared_archive_member_spec='' +SHELL='/bin/sh' +ECHO='printf %s\n' +PATH_SEPARATOR=':' +host_alias='' +host='arm-apple-darwin21.3.0' +host_os='darwin21.3.0' +build_alias='' +build='arm-apple-darwin21.3.0' +build_os='darwin21.3.0' +SED='/usr/bin/sed' +Xsed='/usr/bin/sed -e 1s/^X//' +GREP='/usr/bin/grep' +EGREP='/usr/bin/grep -E' +FGREP='/usr/bin/grep -F' +LD='ld' +NM='/usr/bin/nm -B' +LN_S='ln -s' +max_cmd_len='786432' +ac_objext='o' +exeext='' +lt_unset='unset' +lt_SP2NL='tr \040 \012' +lt_NL2SP='tr \015\012 \040\040' +lt_cv_to_host_file_cmd='func_convert_file_noop' +lt_cv_to_tool_file_cmd='func_convert_file_noop' +reload_flag=' -r' +reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +OBJDUMP='objdump' +deplibs_check_method='pass_all' +file_magic_cmd='$MAGIC_CMD' +file_magic_glob='' +want_nocaseglob='no' +DLLTOOL='false' +sharedlib_from_linklib_cmd='printf %s\n' +AR='ar' +AR_FLAGS='cru' +archiver_list_spec='' +STRIP='strip' +RANLIB='ranlib' +old_postinstall_cmds='chmod 644 $oldlib~$RANLIB $tool_oldlib' +old_postuninstall_cmds='' +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +lock_old_archive_extraction='yes' +CC='gcc -fdeclspec' +CFLAGS=' -Wall -fexceptions' +compiler='g++' +GCC='yes' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import='' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_nm_interface='BSD nm' +nm_file_list_spec='@' +lt_sysroot='' +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' +objdir='.libs' +MAGIC_CMD='file' +lt_prog_compiler_no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions' +lt_prog_compiler_pic=' -fno-common -DPIC' +lt_prog_compiler_wl='-Wl,' +lt_prog_compiler_static='' +lt_cv_prog_compiler_c_o='yes' +need_locks='no' +MANIFEST_TOOL=':' +DSYMUTIL='dsymutil' +NMEDIT='nmedit' +LIPO='lipo' +OTOOL='otool' +OTOOL64=':' +libext='a' +shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +extract_expsyms_cmds='' +archive_cmds_need_lc='no' +enable_shared_with_static_runtimes='no' +export_dynamic_flag_spec='' +whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object='no' +old_archive_from_new_cmds='' +old_archive_from_expsyms_cmds='' +archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld='no' +allow_undefined_flag='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag='' +hardcode_libdir_flag_spec='' +hardcode_libdir_separator='' +hardcode_direct='no' +hardcode_direct_absolute='no' +hardcode_minus_L='no' +hardcode_shlibpath_var='unsupported' +hardcode_automatic='yes' +inherit_rpath='no' +link_all_deplibs='yes' +always_export_symbols='no' +export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms='' +prelink_cmds='' +postlink_cmds='' +file_list_spec='' +variables_saved_for_relink='PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH' +need_lib_prefix='no' +need_version='no' +version_type='darwin' +runpath_var='' +shlibpath_var='DYLD_LIBRARY_PATH' +shlibpath_overrides_runpath='yes' +libname_spec='lib$name' +library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +soname_spec='$libname$release$major$shared_ext' +install_override_mode='' +postinstall_cmds='' +postuninstall_cmds='' +finish_cmds='' +finish_eval='' +hardcode_into_libs='no' +sys_lib_search_path_spec='/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib' +configure_time_dlsearch_path='/usr/local/lib /lib /usr/lib' +configure_time_lt_sys_library_path='' +hardcode_action='immediate' +enable_dlopen='unknown' +enable_dlopen_self='unknown' +enable_dlopen_self_static='unknown' +old_striplib='strip -S' +striplib='strip -x' +compiler_lib_search_dirs='' +predep_objects='' +postdep_objects='' +predeps='' +postdeps='' +compiler_lib_search_path='' +LD_CXX='ld' +reload_flag_CXX=' -r' +reload_cmds_CXX='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +old_archive_cmds_CXX='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +compiler_CXX='g++' +GCC_CXX='yes' +lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +lt_prog_compiler_pic_CXX=' -fno-common -DPIC' +lt_prog_compiler_wl_CXX='-Wl,' +lt_prog_compiler_static_CXX='' +lt_cv_prog_compiler_c_o_CXX='yes' +archive_cmds_need_lc_CXX='no' +enable_shared_with_static_runtimes_CXX='no' +export_dynamic_flag_spec_CXX='' +whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object_CXX='no' +old_archive_from_new_cmds_CXX='' +old_archive_from_expsyms_cmds_CXX='' +archive_cmds_CXX='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds_CXX='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds_CXX='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld_CXX='no' +allow_undefined_flag_CXX='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag_CXX='' +hardcode_libdir_flag_spec_CXX='' +hardcode_libdir_separator_CXX='' +hardcode_direct_CXX='no' +hardcode_direct_absolute_CXX='no' +hardcode_minus_L_CXX='no' +hardcode_shlibpath_var_CXX='unsupported' +hardcode_automatic_CXX='yes' +inherit_rpath_CXX='no' +link_all_deplibs_CXX='yes' +always_export_symbols_CXX='no' +export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms_CXX='' +prelink_cmds_CXX='' +postlink_cmds_CXX='' +file_list_spec_CXX='' +hardcode_action_CXX='immediate' +compiler_lib_search_dirs_CXX='' +predep_objects_CXX='' +postdep_objects_CXX='' +predeps_CXX='' +postdeps_CXX='' +compiler_lib_search_path_CXX='' + +LTCC='gcc -fdeclspec' +LTCFLAGS='' +compiler='gcc -fdeclspec' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL ECHO PATH_SEPARATOR SED GREP EGREP FGREP LD NM LN_S lt_SP2NL lt_NL2SP reload_flag OBJDUMP deplibs_check_method file_magic_cmd file_magic_glob want_nocaseglob DLLTOOL sharedlib_from_linklib_cmd AR AR_FLAGS archiver_list_spec STRIP RANLIB CC CFLAGS compiler lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl lt_cv_sys_global_symbol_to_import lt_cv_sys_global_symbol_to_c_name_address lt_cv_sys_global_symbol_to_c_name_address_lib_prefix lt_cv_nm_interface nm_file_list_spec lt_cv_truncate_bin lt_prog_compiler_no_builtin_flag lt_prog_compiler_pic lt_prog_compiler_wl lt_prog_compiler_static lt_cv_prog_compiler_c_o need_locks MANIFEST_TOOL DSYMUTIL NMEDIT LIPO OTOOL OTOOL64 shrext_cmds export_dynamic_flag_spec whole_archive_flag_spec compiler_needs_object with_gnu_ld allow_undefined_flag no_undefined_flag hardcode_libdir_flag_spec hardcode_libdir_separator exclude_expsyms include_expsyms file_list_spec variables_saved_for_relink libname_spec library_names_spec soname_spec install_override_mode finish_eval old_striplib striplib compiler_lib_search_dirs predep_objects postdep_objects predeps postdeps compiler_lib_search_path LD_CXX reload_flag_CXX compiler_CXX lt_prog_compiler_no_builtin_flag_CXX lt_prog_compiler_pic_CXX lt_prog_compiler_wl_CXX lt_prog_compiler_static_CXX lt_cv_prog_compiler_c_o_CXX export_dynamic_flag_spec_CXX whole_archive_flag_spec_CXX compiler_needs_object_CXX with_gnu_ld_CXX allow_undefined_flag_CXX no_undefined_flag_CXX hardcode_libdir_flag_spec_CXX hardcode_libdir_separator_CXX exclude_expsyms_CXX include_expsyms_CXX file_list_spec_CXX compiler_lib_search_dirs_CXX predep_objects_CXX postdep_objects_CXX predeps_CXX postdeps_CXX compiler_lib_search_path_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED \"\$sed_quote_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds old_postinstall_cmds old_postuninstall_cmds old_archive_cmds extract_expsyms_cmds old_archive_from_new_cmds old_archive_from_expsyms_cmds archive_cmds archive_expsym_cmds module_cmds module_expsym_cmds export_symbols_cmds prelink_cmds postlink_cmds postinstall_cmds postuninstall_cmds finish_cmds sys_lib_search_path_spec configure_time_dlsearch_path configure_time_lt_sys_library_path reload_cmds_CXX old_archive_cmds_CXX old_archive_from_new_cmds_CXX old_archive_from_expsyms_cmds_CXX archive_cmds_CXX archive_expsym_cmds_CXX module_cmds_CXX module_expsym_cmds_CXX export_symbols_cmds_CXX prelink_cmds_CXX postlink_cmds_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +ac_aux_dir='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='libffi' + VERSION='3.3' + RM='rm -f' + ofile='libtool' + + + + + +TARGETDIR="arm" + + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fficonfig.h") CONFIG_HEADERS="$CONFIG_HEADERS fficonfig.h" ;; + "buildir") CONFIG_COMMANDS="$CONFIG_COMMANDS buildir" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "include") CONFIG_COMMANDS="$CONFIG_COMMANDS include" ;; + "src") CONFIG_COMMANDS="$CONFIG_COMMANDS src" ;; + "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; + "include/ffi.h") CONFIG_FILES="$CONFIG_FILES include/ffi.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;; + "man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "libffi.pc") CONFIG_FILES="$CONFIG_FILES libffi.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +cat >>"$ac_tmp/subs1.awk" <<\_ACAWK && +S["am__EXEEXT_FALSE"]="" +S["am__EXEEXT_TRUE"]="#" +S["LTLIBOBJS"]="" +S["LIBOBJS"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_TRUE"]="#" +S["OPT_LDFLAGS"]="" +S["SECTION_LDFLAGS"]="" +S["toolexeclibdir"]="${libdir}" +S["toolexecdir"]="${libdir}/gcc-lib/$(target_alias)" +S["FFI_DEBUG_FALSE"]="" +S["FFI_DEBUG_TRUE"]="#" +S["TARGET_OBJ"]=" src/arm/ffi.lo src/arm/sysv.lo" +S["TARGETDIR"]="arm" +S["TARGET"]="ARM" +S["BUILD_DOCS_FALSE"]="" +S["BUILD_DOCS_TRUE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE"]="1" +S["FFI_EXEC_TRAMPOLINE_TABLE_FALSE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE_TRUE"]="" +S["sys_symbol_underscore"]="yes" +S["HAVE_LONG_DOUBLE_VARIANT"]="0" +S["HAVE_LONG_DOUBLE"]="0" +S["ALLOCA"]="" +S["AM_LTLDFLAGS"]="" +S["AM_RUNTESTFLAGS"]="" +S["TESTSUBDIR_FALSE"]="#" +S["TESTSUBDIR_TRUE"]="" +S["MAINT"]="#" +S["MAINTAINER_MODE_FALSE"]="" +S["MAINTAINER_MODE_TRUE"]="#" +S["PRTDIAG"]="" +S["CXXCPP"]="g++ -E" +S["CPP"]="gcc -fdeclspec -E" +S["LT_SYS_LIBRARY_PATH"]="" +S["OTOOL64"]=":" +S["OTOOL"]="otool" +S["LIPO"]="lipo" +S["NMEDIT"]="nmedit" +S["DSYMUTIL"]="dsymutil" +S["MANIFEST_TOOL"]=":" +S["RANLIB"]="ranlib" +S["ac_ct_AR"]="ar" +S["AR"]="ar" +S["DLLTOOL"]="false" +S["OBJDUMP"]="objdump" +S["LN_S"]="ln -s" +S["NM"]="/usr/bin/nm -B" +S["ac_ct_DUMPBIN"]="" +S["DUMPBIN"]="" +S["LD"]="ld" +S["FGREP"]="/usr/bin/grep -F" +S["EGREP"]="/usr/bin/grep -E" +S["GREP"]="/usr/bin/grep" +S["SED"]="/usr/bin/sed" +S["LIBTOOL"]="$(SHELL) $(top_builddir)/libtool" +S["am__fastdepCCAS_FALSE"]="" +S["am__fastdepCCAS_TRUE"]="#" +S["CCASDEPMODE"]="depmode=none" +S["CCASFLAGS"]="" +S["CCAS"]="gcc -fdeclspec" +S["am__fastdepCXX_FALSE"]="" +S["am__fastdepCXX_TRUE"]="#" +S["CXXDEPMODE"]="depmode=none" +S["ac_ct_CXX"]="g++" +S["CXXFLAGS"]="-g -O2" +S["CXX"]="g++" +S["am__fastdepCC_FALSE"]="" +S["am__fastdepCC_TRUE"]="#" +S["CCDEPMODE"]="depmode=none" +S["am__nodep"]="" +S["AMDEPBACKSLASH"]="" +S["AMDEP_FALSE"]="" +S["AMDEP_TRUE"]="#" +S["am__quote"]="" +S["am__include"]="include" +S["DEPDIR"]=".deps" +S["OBJEXT"]="o" +S["EXEEXT"]="" +S["ac_ct_CC"]="gcc -fdeclspec" +S["CPPFLAGS"]="-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT " +S["LDFLAGS"]="-L/usr/local/opt/libffi/lib" +S["CFLAGS"]=" -Wall -fexceptions" +S["CC"]="gcc -fdeclspec" +S["AM_BACKSLASH"]="\\" +S["AM_DEFAULT_VERBOSITY"]="1" +S["AM_DEFAULT_V"]="$(AM_DEFAULT_VERBOSITY)" +S["AM_V"]="$(V)" +S["am__untar"]="$${TAR-tar} xf -" +S["am__tar"]="$${TAR-tar} chof - \"$$tardir\"" +S["AMTAR"]="$${TAR-tar}" +S["am__leading_dot"]="." +S["SET_MAKE"]="" +S["AWK"]="awk" +S["mkdir_p"]="$(MKDIR_P)" +S["MKDIR_P"]="/opt/homebrew/bin/gmkdir -p" +S["INSTALL_STRIP_PROGRAM"]="$(install_sh) -c -s" +S["STRIP"]="strip" +S["install_sh"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh" +S["MAKEINFO"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo" +S["AUTOHEADER"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader" +S["AUTOMAKE"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15" +S["AUTOCONF"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf" +S["ACLOCAL"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15" +S["VERSION"]="3.3" +S["PACKAGE"]="libffi" +S["CYGPATH_W"]="echo" +S["am__isrc"]=" -I$(srcdir)" +S["INSTALL_DATA"]="${INSTALL} -m 644" +S["INSTALL_SCRIPT"]="${INSTALL}" +S["INSTALL_PROGRAM"]="${INSTALL}" +S["ax_enable_builddir_sed"]="sed" +S["target_os"]="darwin21.3.0" +S["target_vendor"]="apple" +S["target_cpu"]="arm" +S["target"]="arm-apple-darwin21.3.0" +S["host_os"]="darwin21.3.0" +S["host_vendor"]="apple" +S["host_cpu"]="arm" +S["host"]="arm-apple-darwin21.3.0" +S["build_os"]="darwin21.3.0" +S["build_vendor"]="apple" +S["build_cpu"]="arm" +S["build"]="arm-apple-darwin21.3.0" +S["target_alias"]="" +S["host_alias"]="" +S["build_alias"]="" +S["LIBS"]="" +S["ECHO_T"]="" +S["ECHO_N"]="" +S["ECHO_C"]="\\c" +S["DEFS"]="-DHAVE_CONFIG_H" +S["mandir"]="${datarootdir}/man" +S["localedir"]="${datarootdir}/locale" +S["libdir"]="${exec_prefix}/lib" +S["psdir"]="${docdir}" +S["pdfdir"]="${docdir}" +S["dvidir"]="${docdir}" +S["htmldir"]="${docdir}" +S["infodir"]="${datarootdir}/info" +S["docdir"]="${datarootdir}/doc/${PACKAGE_TARNAME}" +S["oldincludedir"]="/usr/include" +S["includedir"]="${prefix}/include" +S["runstatedir"]="${localstatedir}/run" +S["localstatedir"]="${prefix}/var" +S["sharedstatedir"]="${prefix}/com" +S["sysconfdir"]="${prefix}/etc" +S["datadir"]="${datarootdir}" +S["datarootdir"]="${prefix}/share" +S["libexecdir"]="${exec_prefix}/libexec" +S["sbindir"]="${exec_prefix}/sbin" +S["bindir"]="${exec_prefix}/bin" +S["program_transform_name"]="s,x,x," +S["prefix"]="/usr/local" +S["exec_prefix"]="${prefix}" +S["PACKAGE_URL"]="" +S["PACKAGE_BUGREPORT"]="http://github.com/libffi/libffi/issues" +S["PACKAGE_STRING"]="libffi 3.3" +S["PACKAGE_VERSION"]="3.3" +S["PACKAGE_TARNAME"]="libffi" +S["PACKAGE_NAME"]="libffi" +S["PATH_SEPARATOR"]=":" +S["SHELL"]="/bin/sh" +_ACAWK +cat >>"$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +D["PACKAGE_NAME"]=" \"libffi\"" +D["PACKAGE_TARNAME"]=" \"libffi\"" +D["PACKAGE_VERSION"]=" \"3.3\"" +D["PACKAGE_STRING"]=" \"libffi 3.3\"" +D["PACKAGE_BUGREPORT"]=" \"http://github.com/libffi/libffi/issues\"" +D["PACKAGE_URL"]=" \"\"" +D["PACKAGE"]=" \"libffi\"" +D["VERSION"]=" \"3.3\"" +D["STDC_HEADERS"]=" 1" +D["HAVE_SYS_TYPES_H"]=" 1" +D["HAVE_SYS_STAT_H"]=" 1" +D["HAVE_STDLIB_H"]=" 1" +D["HAVE_STRING_H"]=" 1" +D["HAVE_MEMORY_H"]=" 1" +D["HAVE_STRINGS_H"]=" 1" +D["HAVE_INTTYPES_H"]=" 1" +D["HAVE_STDINT_H"]=" 1" +D["HAVE_UNISTD_H"]=" 1" +D["HAVE_DLFCN_H"]=" 1" +D["LT_OBJDIR"]=" \".libs/\"" +D["SIZEOF_SIZE_T"]=" 8" +D["HAVE_SYS_MMAN_H"]=" 1" +D["HAVE_MMAP"]=" 1" +D["HAVE_MKOSTEMP"]=" 1" +D["HAVE_MMAP_FILE"]=" 1" +D["HAVE_MMAP_ANON"]=" 1" +D["STDC_HEADERS"]=" 1" +D["HAVE_MEMCPY"]=" 1" +D["HAVE_ALLOCA_H"]=" 1" +D["HAVE_ALLOCA"]=" 1" +D["SIZEOF_DOUBLE"]=" 8" +D["SIZEOF_LONG_DOUBLE"]=" 8" +D["SYMBOL_UNDERSCORE"]=" 1" +D["FFI_EXEC_TRAMPOLINE_TABLE"]=" 1" +D["HAVE_RO_EH_FRAME"]=" 1" +D["EH_FRAME_FLAGS"]=" \"a\"" +D["HAVE_HIDDEN_VISIBILITY_ATTRIBUTE"]=" 1" + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*([\t (]|$)/ { + line = $ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + ac_datarootdir_hack=' + s&@datadir@&${datarootdir}&g + s&@docdir@&${datarootdir}/doc/${PACKAGE_TARNAME}&g + s&@infodir@&${datarootdir}/info&g + s&@localedir@&${datarootdir}/locale&g + s&@mandir@&${datarootdir}/man&g + s&\${datarootdir}&${prefix}/share&g' ;; +esac +ac_sed_extra=" + +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "buildir":C) ac_top_srcdir="$ax_enable_builddir_srcdir" + if test ".$ax_enable_builddir" = ".." ; then + if test -f "$top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - left untouched" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - left untouched" >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - not created" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - not created" >&6;} + fi + else + if test -f "$ac_top_srcdir/Makefile" ; then + a=`grep "^VERSION " "$ac_top_srcdir/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$ac_top_srcdir/Makefile" + fi + if test -f "$ac_top_srcdir/Makefile" ; then + echo "$ac_top_srcdir/Makefile : $ac_top_srcdir/Makefile.in" > $tmp/conftemp.mk + echo " @ echo 'REMOVED,,,' >\$@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$ac_top_srcdir/Makefile" >/dev/null + then rm $ac_top_srcdir/Makefile ; fi + cp $tmp/conftemp.mk $ac_top_srcdir/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$ac_top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: create top_srcdir/Makefile guessed from local Makefile" >&5 +$as_echo "$as_me: create top_srcdir/Makefile guessed from local Makefile" >&6;} + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[ ]*[\\#]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[:=]/!d +/^\\./d +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/ \\1 \\1-all\\2/g +s/^\\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/\\1 \\1-all\\2/ +s/ / /g +/^all all-all[ :]/i\\ +all-configured : all-all +s/ [a-zA-Z0-9-]*-all [a-zA-Z0-9-]*-all-all//g +/-all-all/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +/dist-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/dist-[a-zA-Z0-9]*-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/distclean-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefile.sed~" ## DEBUGGING + $ax_enable_builddir_sed -f $tmp/conftemp.sed Makefile >$ac_top_srcdir/Makefile + if test -f "$ac_top_srcdir/Makefile.mk" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&5 +$as_echo "$as_me: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&6;} + cat $ac_top_srcdir/Makefile.mk >>$ac_top_srcdir/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$ac_top_srcdir/Makefile + # sanity check + if grep '^; echo "MAKE ' $ac_top_srcdir/Makefile >/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: buggy sed found - it deletes tab in \"a\" text parts" >&5 +$as_echo "$as_me: buggy sed found - it deletes tab in \"a\" text parts" >&6;} + $ax_enable_builddir_sed -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $ac_top_srcdir/Makefile \ + >$ac_top_srcdir/Makefile~ + (test -s $ac_top_srcdir/Makefile~ && mv $ac_top_srcdir/Makefile~ $ac_top_srcdir/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [^|]* | *$ax_enable_builddir *\$!$xxxx ...... $ax_enable_builddir!" >$tmp/conftemp.sed + $ax_enable_builddir_sed -f "$tmp/conftemp.sed" "$ac_top_srcdir/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$ac_top_srcdir/makefiles.out~" ## DEBUGGING + if cmp -s "$ac_top_srcdir/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: keeping top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: keeping top_srcdir/Makefile from earlier configure" >&6;} + rm "$tmp/mkfile.tmp" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: reusing top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: reusing top_srcdir/Makefile from earlier configure" >&6;} + mv "$tmp/mkfile.tmp" "$ac_top_srcdir/Makefile" + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&5 +$as_echo "$as_me: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&6;} + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$ax_enable_builddir" >>$ac_top_srcdir/Makefile + fi + ;; + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named 'Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running 'make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "$am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "include":C) test -d include || mkdir include ;; + "src":C) +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h new file mode 100644 index 0000000..3589fe5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h @@ -0,0 +1,218 @@ +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "a" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#define FFI_EXEC_TRAMPOLINE_TABLE 1 + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +/* #undef FFI_MMAP_EXEC_WRIT */ + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +/* #undef HAVE_AS_CFI_PSEUDO_OP */ + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +/* #undef HAVE_LONG_DOUBLE */ + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +#define HAVE_MKOSTEMP 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if your compiler supports pointer authentication. */ +/* #undef HAVE_PTRAUTH */ + +/* Define if .eh_frame sections should be read-only. */ +#define HAVE_RO_EH_FRAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/Makefile new file mode 100644 index 0000000..4940e85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/Makefile @@ -0,0 +1,605 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# include/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.3.0 +host_triplet = arm-apple-darwin21.3.0 +target_triplet = arm-apple-darwin21.3.0 +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = ffi.h +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" +HEADERS = $(nodist_include_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ffi.h.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = gcc -fdeclspec +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.3.0 +build_alias = +build_cpu = arm +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.3.0 +host_alias = +host_cpu = arm +host_os = darwin21.3.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.3.0 +target_alias = +target_cpu = arm +target_os = darwin21.3.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +DISTCLEANFILES = ffitarget.h +noinst_HEADERS = ffi_common.h ffi_cfi.h +EXTRA_DIST = ffi.h.in +nodist_include_HEADERS = ffi.h ffitarget.h +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +ffi.h: $(top_builddir)/config.status $(srcdir)/ffi.h.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nodist_includeHEADERS: $(nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-nodist_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-nodist_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man \ + install-nodist_includeHEADERS install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h new file mode 100644 index 0000000..b876025 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3 - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef ARM +#define ARM +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 0 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 0 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 0 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi.pc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi.pc new file mode 100644 index 0000000..1f9d5ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi.pc @@ -0,0 +1,11 @@ +prefix=/usr/local +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +toolexeclibdir=${libdir} +includedir=${prefix}/include + +Name: libffi +Description: Library supporting Foreign Function Interfaces +Version: 3.3 +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la new file mode 100644 index 0000000..9055742 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la @@ -0,0 +1,41 @@ +# libffi_convenience.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='' + +# Names of this library. +library_names='' + +# The name of the static archive. +old_library='libffi_convenience.a' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L/usr/local/opt/libffi/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libffi_convenience. +current= +age= +revision= + +# Is this an already installed library? +installed=no + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libtool b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libtool new file mode 100755 index 0000000..54dcebf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/libtool @@ -0,0 +1,11813 @@ +#! /bin/sh +# Generated automatically by config.status (libffi) 3.3 +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: ${LT_SYS_LIBRARY_PATH=""} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=2.4.6 +macro_revision=2.4.6 + +# Whether or not to build shared libraries. +build_libtool_libs=yes + +# Whether or not to build static libraries. +build_old_libs=no + +# What type of objects to build. +pic_mode=yes + +# Whether or not to optimize for fast installation. +fast_install=needless + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec= + +# Shell to use when invoking shell scripts. +SHELL="/bin/sh" + +# An echo program that protects backslashes. +ECHO="printf %s\\n" + +# The PATH separator for the build system. +PATH_SEPARATOR=":" + +# The host system. +host_alias= +host=arm-apple-darwin21.3.0 +host_os=darwin21.3.0 + +# The build system. +build_alias= +build=arm-apple-darwin21.3.0 +build_os=darwin21.3.0 + +# A sed program that does not truncate output. +SED="/usr/bin/sed" + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP="/usr/bin/grep" + +# An ERE matcher. +EGREP="/usr/bin/grep -E" + +# A literal string matcher. +FGREP="/usr/bin/grep -F" + +# A BSD- or MS-compatible name lister. +NM="/usr/bin/nm -B" + +# Whether we need soft or hard links. +LN_S="ln -s" + +# What is the maximum length of a command? +max_cmd_len=786432 + +# Object file suffix (normally "o"). +objext=o + +# Executable file suffix (normally ""). +exeext= + +# whether the shell understands "unset". +lt_unset=unset + +# turn spaces into newlines. +SP2NL="tr \\040 \\012" + +# turn newlines into spaces. +NL2SP="tr \\015\\012 \\040\\040" + +# convert $build file names to $host format. +to_host_file_cmd=func_convert_file_noop + +# convert $build files to toolchain format. +to_tool_file_cmd=func_convert_file_noop + +# An object symbol dumper. +OBJDUMP="objdump" + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method="pass_all" + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd="\$MAGIC_CMD" + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob="" + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob="no" + +# DLL creation program. +DLLTOOL="false" + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd="printf %s\\n" + +# The archiver. +AR="ar" + +# Flags to create an archive. +AR_FLAGS="cru" + +# How to feed a file listing to the archiver. +archiver_list_spec="" + +# A symbol stripping program. +STRIP="strip" + +# Commands used to install an old-style archive. +RANLIB="ranlib" +old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$tool_oldlib" +old_postuninstall_cmds="" + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=yes + +# A C compiler. +LTCC="gcc -fdeclspec" + +# LTCC compiler flags. +LTCFLAGS=" -Wall -fexceptions" + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe="sed -n -e 's/^.*[ ]\\([BCDEGRST][BCDEGRST]*\\)[ ][ ]*_\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 _\\2 \\2/p' | sed '/ __gnu_lto/d'" + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl="sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/extern char \\1;/p'" + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import="" + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p'" + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(lib.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"lib\\1\", (void *) \\&\\1},/p'" + +# The name lister interface. +nm_interface="BSD nm" + +# Specify filename containing input files for $NM. +nm_file_list_spec="@" + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot= + +# Command to truncate a binary pipe. +lt_truncate_bin="/bin/dd bs=4096 count=1" + +# The name of the directory that contains temporary libtool files. +objdir=.libs + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=file + +# Must we lock files when doing compilation? +need_locks="no" + +# Manifest tool. +MANIFEST_TOOL=":" + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL="dsymutil" + +# Tool to change global to local symbols on Mac OS X. +NMEDIT="nmedit" + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO="lipo" + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL="otool" + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=":" + +# Old archive suffix (normally "a"). +libext=a + +# Shared library suffix (normally ".so"). +shrext_cmds="\`test .\$module = .yes && echo .so || echo .dylib\`" + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds="" + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink="PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + +# Do we need the "lib" prefix for modules? +need_lib_prefix=no + +# Do we need a version for libraries? +need_version=no + +# Library versioning type. +version_type=darwin + +# Shared library runtime path variable. +runpath_var= + +# Shared library path variable. +shlibpath_var=DYLD_LIBRARY_PATH + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=yes + +# Format of library name prefix. +libname_spec="lib\$name" + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec="\$libname\$release\$major\$shared_ext \$libname\$shared_ext" + +# The coded name of the library, if different from the real name. +soname_spec="\$libname\$release\$major\$shared_ext" + +# Permission mode override for installation of shared libraries. +install_override_mode="" + +# Command to use after installation of a shared archive. +postinstall_cmds="" + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds="" + +# Commands used to finish a libtool library installation in a directory. +finish_cmds="" + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval="" + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=no + +# Compile-time system search path for libraries. +sys_lib_search_path_spec="/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib" + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec="/usr/local/lib /lib /usr/lib" + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path="" + +# Whether dlopen is supported. +dlopen_support=unknown + +# Whether dlopen of programs is supported. +dlopen_self=unknown + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=unknown + +# Commands to strip libraries. +old_striplib="strip -S" +striplib="strip -x" + + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="gcc -fdeclspec" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin -fno-rtti -fno-exceptions" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL CONFIG + + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +#! /bin/sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2014-01-03.01 + +# libtool (GNU libtool) 2.4.6 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.6 Debian-2.4.6-2" +package_revision=2.4.6 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2015-01-20.17; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# Copyright (C) 2004-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. + +# As a special exception to the GNU General Public License, if you distribute +# this file as part of a program or library that is built using GNU Libtool, +# you may include this file under the same distribution terms that you use +# for the rest of that program. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1+=\\ \$func_quote_for_eval_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1=\$$1\\ \$func_quote_for_eval_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_for_eval ARG... +# -------------------------- +# Aesthetically quote ARGs to be evaled later. +# This function returns two values: +# i) func_quote_for_eval_result +# double-quoted, suitable for a subsequent eval +# ii) func_quote_for_eval_unquoted_result +# has all characters that are still active within double +# quotes backslashified. +func_quote_for_eval () +{ + $debug_cmd + + func_quote_for_eval_unquoted_result= + func_quote_for_eval_result= + while test 0 -lt $#; do + case $1 in + *[\\\`\"\$]*) + _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; + *) + _G_unquoted_arg=$1 ;; + esac + if test -n "$func_quote_for_eval_unquoted_result"; then + func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" + else + func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" + fi + + case $_G_unquoted_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_quoted_arg=\"$_G_unquoted_arg\" + ;; + *) + _G_quoted_arg=$_G_unquoted_arg + ;; + esac + + if test -n "$func_quote_for_eval_result"; then + func_append func_quote_for_eval_result " $_G_quoted_arg" + else + func_append func_quote_for_eval_result "$_G_quoted_arg" + fi + shift + done +} + + +# func_quote_for_expand ARG +# ------------------------- +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + $debug_cmd + + case $1 in + *[\\\`\"]*) + _G_arg=`$ECHO "$1" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; + *) + _G_arg=$1 ;; + esac + + case $_G_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_arg=\"$_G_arg\" + ;; + esac + + func_quote_for_expand_result=$_G_arg +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_for_expand "$_G_cmd" + eval "func_notquiet $func_quote_for_expand_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_for_expand "$_G_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# Set a version string for this script. +scriptversion=2014-01-07.03; # UTC + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# Copyright (C) 2010-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# warranty; '. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# to the main code. A hook is just a named list of of function, that can +# be run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of functions called by FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It is assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook funcions.n" ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + eval $_G_hook '"$@"' + + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + done + + func_quote_for_eval ${1+"$@"} + func_run_hooks_result=$func_quote_for_eval_result +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list in your hook function, remove any +# options that you action, and then pass back the remaining unprocessed +# options in '_result', escaped suitably for +# 'eval'. Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# +# func_quote_for_eval ${1+"$@"} +# my_options_prep_result=$func_quote_for_eval_result +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# # Note that for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# ;; +# *) set dummy "$_G_opt" "$*"; shift; break ;; +# esac +# done +# +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# +# func_quote_for_eval ${1+"$@"} +# my_option_validation_result=$func_quote_for_eval_result +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll alse need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + func_options_prep ${1+"$@"} + eval func_parse_options \ + ${func_options_prep_result+"$func_options_prep_result"} + eval func_validate_options \ + ${func_parse_options_result+"$func_parse_options_result"} + + eval func_run_hooks func_options \ + ${func_validate_options_result+"$func_validate_options_result"} + + # save modified positional parameters for caller + func_options_result=$func_run_hooks_result +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propogate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before +# returning. +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + func_run_hooks func_options_prep ${1+"$@"} + + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + func_parse_options_result= + + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + func_run_hooks func_parse_options ${1+"$@"} + + # Adjust func_parse_options positional parameters to match + eval set dummy "$func_run_hooks_result"; shift + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + test $# = 0 && func_missing_arg $_G_opt && break + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + func_run_hooks func_validate_options ${1+"$@"} + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE + + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables after +# splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + test "x$func_split_equals_lhs" = "x$1" \ + && func_split_equals_rhs= + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /(C)/!b go + :more + /\./!{ + N + s|\n# | | + b more + } + :go + /^# Written by /,/# warranty; / { + s|^# || + s|^# *$|| + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + p + } + /^# Written by / { + s|^# || + p + } + /^warranty; /q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.6' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname $scriptversion Debian-2.4.6-2 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func__fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + esac + + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + esac + done + + + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote_for_eval ${1+"$@"} + libtool_validate_options_result=$func_quote_for_eval_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type '$version_type'" + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="g++" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL TAG CONFIG: CXX diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/local.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/local.exp new file mode 100644 index 0000000..2af97f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/local.exp @@ -0,0 +1,2 @@ +set CC_FOR_TARGET "gcc -fdeclspec" +set CXX_FOR_TARGET "g++" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/man/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/man/Makefile new file mode 100644 index 0000000..ceb4002 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/man/Makefile @@ -0,0 +1,559 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# man/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.3.0 +host_triplet = arm-apple-darwin21.3.0 +target_triplet = arm-apple-darwin21.3.0 +subdir = man +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man3dir = $(mandir)/man3 +am__installdirs = "$(DESTDIR)$(man3dir)" +NROFF = nroff +MANS = $(man_MANS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/man +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = gcc -fdeclspec +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.3.0 +build_alias = +build_cpu = arm +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.3.0 +host_alias = +host_cpu = arm +host_os = darwin21.3.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.3.0 +target_alias = +target_cpu = arm +target_os = darwin21.3.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign man/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign man/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-man3: $(man_MANS) + @$(NORMAL_INSTALL) + @list1=''; \ + list2='$(man_MANS)'; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list=''; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ + sed -n '/\.3[a-z]*$$/p'; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(MANS) +installdirs: + for dir in "$(DESTDIR)$(man3dir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-man + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man3 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-man + +uninstall-man: uninstall-man3 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-man3 install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags-am uninstall uninstall-am uninstall-man \ + uninstall-man3 + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/.deps/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/.deps/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/ffi.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/ffi.lo new file mode 100644 index 0000000..45384d2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/ffi.lo @@ -0,0 +1,12 @@ +# src/arm/ffi.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/ffi.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/sysv.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/sysv.lo new file mode 100644 index 0000000..74823df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/arm/sysv.lo @@ -0,0 +1,12 @@ +# src/arm/sysv.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/sysv.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo new file mode 100644 index 0000000..613cbdf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo @@ -0,0 +1,12 @@ +# src/closures.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/closures.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo new file mode 100644 index 0000000..0b99622 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo @@ -0,0 +1,12 @@ +# src/java_raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/java_raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo new file mode 100644 index 0000000..1ab7932 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo @@ -0,0 +1,12 @@ +# src/prep_cif.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/prep_cif.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo new file mode 100644 index 0000000..e7e6231 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo @@ -0,0 +1,12 @@ +# src/raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/types.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/types.lo new file mode 100644 index 0000000..40b7aec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/src/types.lo @@ -0,0 +1,12 @@ +# src/types.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/types.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 new file mode 100644 index 0000000..e43c201 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 @@ -0,0 +1 @@ +timestamp for fficonfig.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile new file mode 100644 index 0000000..fbf70a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile @@ -0,0 +1,645 @@ +# Makefile.in generated by automake 1.15.1 from Makefile.am. +# testsuite/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2017 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = arm-apple-darwin21.3.0 +host_triplet = arm-apple-darwin21.3.0 +target_triplet = arm-apple-darwin21.3.0 +subdir = testsuite +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DEJATOOL = $(PACKAGE) +RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir +EXPECT = expect +RUNTEST = runtest +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing aclocal-1.15 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing automake-1.15 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = /usr/bin/nm -B +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = ARM +TARGETDIR = arm +TARGET_OBJ = src/arm/ffi.lo src/arm/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21/testsuite +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = gcc -fdeclspec +ac_ct_CXX = g++ +ac_ct_DUMPBIN = +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = arm-apple-darwin21.3.0 +build_alias = +build_cpu = arm +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = arm-apple-darwin21.3.0 +host_alias = +host_cpu = arm +host_os = darwin21.3.0 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = arm-apple-darwin21.3.0 +target_alias = +target_cpu = arm +target_os = darwin21.3.0 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign dejagnu +EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp +CLEANFILES = *.exe core* *.log *.sum +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign testsuite/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +check-DEJAGNU: site.exp + srcdir='$(srcdir)'; export srcdir; \ + EXPECT=$(EXPECT); export EXPECT; \ + if $(SHELL) -c "$(RUNTEST) --version" > /dev/null 2>&1; then \ + exit_status=0; l='$(DEJATOOL)'; for tool in $$l; do \ + if $(RUNTEST) $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \ + then :; else exit_status=1; fi; \ + done; \ + else echo "WARNING: could not find '$(RUNTEST)'" 1>&2; :;\ + fi; \ + exit $$exit_status +site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG) + @echo 'Making a new site.exp file ...' + @echo '## these variables are automatically generated by make ##' >site.tmp + @echo '# Do not edit here. If you wish to override these values' >>site.tmp + @echo '# edit the last section' >>site.tmp + @echo 'set srcdir "$(srcdir)"' >>site.tmp + @echo "set objdir `pwd`" >>site.tmp + @echo 'set build_alias "$(build_alias)"' >>site.tmp + @echo 'set build_triplet $(build_triplet)' >>site.tmp + @echo 'set host_alias "$(host_alias)"' >>site.tmp + @echo 'set host_triplet $(host_triplet)' >>site.tmp + @echo 'set target_alias "$(target_alias)"' >>site.tmp + @echo 'set target_triplet $(target_triplet)' >>site.tmp + @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \ + echo "## Begin content included from file $$f. Do not modify. ##" \ + && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \ + && echo "## End content included from file $$f. ##" \ + || exit 1; \ + done >> site.tmp + @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp + @if test -f site.exp; then \ + sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \ + fi + @-rm -f site.bak + @test ! -f site.exp || mv site.exp site.bak + @mv site.tmp site.exp + +distclean-DEJAGNU: + -rm -f site.exp site.bak + -l='$(DEJATOOL)'; for tool in $$l; do \ + rm -f $$tool.sum $$tool.log; \ + done + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-DEJAGNU distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am distclean \ + distclean-DEJAGNU distclean-generic distclean-libtool distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk new file mode 100644 index 0000000..ab66256 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.bsd.mk @@ -0,0 +1,40 @@ +# -*- makefile -*- +# +# Makefile for BSD systems +# + +LOCAL_LIBS += ${LIBFFI} -lpthread + +LIBFFI_CFLAGS = ${FFI_MMAP_EXEC} -pthread +LIBFFI_BUILD_DIR = ${.CURDIR}/libffi-${arch} +INCFLAGS := -I${LIBFFI_BUILD_DIR}/include -I${INCFLAGS} + +.if ${srcdir} == "." + LIBFFI_SRC_DIR := ${.CURDIR}/libffi +.else + LIBFFI_SRC_DIR := ${srcdir}/libffi +.endif + + +LIBFFI = ${LIBFFI_BUILD_DIR}/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = ${LIBFFI_SRC_DIR}/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): ${LIBFFI} + +$(LIBFFI): + @mkdir -p ${LIBFFI_BUILD_DIR} + @if [ ! -f $(LIBFFI_SRC_DIR)/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f ${LIBFFI_BUILD_DIR}/Makefile ]; then \ + echo "Configuring libffi"; \ + cd ${LIBFFI_BUILD_DIR} && \ + /usr/bin/env CC="${CC}" LD="${LD}" CFLAGS="${LIBFFI_CFLAGS}" GREP_OPTIONS="" \ + /bin/sh ${LIBFFI_CONFIGURE} ${LIBFFI_HOST} > /dev/null; \ + fi + @cd ${LIBFFI_BUILD_DIR} && ${MAKE} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk new file mode 100644 index 0000000..893a8e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.darwin.mk @@ -0,0 +1,105 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +CCACHE := $(shell type -p ccache) +BUILD_DIR := $(shell pwd) + +INCFLAGS += -I"$(BUILD_DIR)" + +# Work out which arches we need to compile the lib for +ARCHES := +ARCHFLAGS ?= $(filter -arch %, $(CFLAGS)) + +ifneq ($(findstring -arch ppc,$(ARCHFLAGS)),) + ARCHES += ppc +endif + +ifneq ($(findstring -arch i386,$(ARCHFLAGS)),) + ARCHES += i386 +endif + +ifneq ($(findstring -arch x86_64,$(ARCHFLAGS)),) + ARCHES += x86_64 +endif + +ifeq ($(strip $(ARCHES)),) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +# Just build the one (default) architecture +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$(@D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + /usr/bin/env CC="$(CC)" LD="$(LD)" CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + /bin/sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + cd "$(LIBFFI_BUILD_DIR)" && $(MAKE) + +else +LIBTARGETS = $(foreach arch,$(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + +# Build a fat binary and assemble +build_ffi = \ + mkdir -p "$(BUILD_DIR)"/libffi-$(1); \ + (if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi); \ + (if [ ! -f "$(BUILD_DIR)"/libffi-$(1)/Makefile ]; then \ + echo "Configuring libffi for $(1)"; \ + cd "$(BUILD_DIR)"/libffi-$(1) && \ + env CC="$(CCACHE) $(CC)" CFLAGS="-arch $(1) $(LIBFFI_CFLAGS)" LDFLAGS="-arch $(1)" \ + $(LIBFFI_CONFIGURE) --host=$(1)-apple-darwin > /dev/null; \ + fi); \ + $(MAKE) -C "$(BUILD_DIR)"/libffi-$(1) + +target_ffi = "$(BUILD_DIR)"/libffi-$(1)/.libs/libffi_convenience.a:; $(call build_ffi,$(1)) + +# Work out which arches we need to compile the lib for +ifneq ($(findstring ppc,$(ARCHES)),) + $(call target_ffi,ppc) +endif + +ifneq ($(findstring i386,$(ARCHES)),) + $(call target_ffi,i386) +endif + +ifneq ($(findstring x86_64,$(ARCHES)),) + $(call target_ffi,x86_64) +endif + + +$(LIBFFI): $(LIBTARGETS) + # Assemble into a FAT (x86_64, i386, ppc) library + @mkdir -p "$(@D)" + /usr/bin/libtool -static -o $@ \ + $(foreach arch, $(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + @mkdir -p "$(LIBFFI_BUILD_DIR)"/include + $(RM) "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffi.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffi.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffi.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffitarget.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffitarget.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffitarget.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffitarget.h + +endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk new file mode 100644 index 0000000..473b8fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.gnu.mk @@ -0,0 +1,32 @@ +# -*- makefile -*- +# +# Common definitions for all systems that use GNU make +# + + +# Tack the extra deps onto the autogenerated variables +LOCAL_LIBS += $(LIBFFI) +BUILD_DIR = $(shell pwd) +LIBFFI_CFLAGS = $(FFI_MMAP_EXEC) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +INCFLAGS := -I"$(LIBFFI_BUILD_DIR)"/include $(INCFLAGS) + +ifeq ($(srcdir),.) + LIBFFI_SRC_DIR := $(shell pwd)/libffi +else ifeq ($(srcdir),..) + LIBFFI_SRC_DIR := $(shell pwd)/../libffi +else + LIBFFI_SRC_DIR := $(realpath $(srcdir)/libffi) +endif + +LIBFFI = "$(LIBFFI_BUILD_DIR)"/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = "$(LIBFFI_SRC_DIR)"/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): $(LIBFFI) + +# +# libffi.mk or libffi.darwin.mk contains rules for building the actual library +# + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk new file mode 100644 index 0000000..3b58227 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.mk @@ -0,0 +1,18 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$@(D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + env CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + $(MAKE) -C "$(LIBFFI_BUILD_DIR)" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk new file mode 100644 index 0000000..8cd4603 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk new file mode 100644 index 0000000..6f3dbbc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi.vc64.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc64 + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml new file mode 100644 index 0000000..ece8a94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.appveyor.yml @@ -0,0 +1,66 @@ +shallow_clone: true + +# We're currently only testing libffi built with Microsoft's +# tools. +# This matrix should be expanded to include at least: +# 32- and 64-bit gcc/cygwin +# 32- and 64-bit gcc/mingw +# 32- and 64-bit clang/mingw +# and perhaps more. + +image: Visual Studio 2017 +platform: + - x64 + - x86 + - arm + - arm64 + +environment: + global: + CYG_ROOT: C:/cygwin + CYG_CACHE: C:/cygwin/var/cache/setup + CYG_MIRROR: http://mirrors.kernel.org/sourceware/cygwin/ + matrix: + - VSVER: 15 + +install: + - ps: >- + If ($env:Platform -Match "x86") { + $env:VCVARS_PLATFORM="x86" + $env:BUILD="i686-pc-cygwin" + $env:HOST="i686-pc-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh" + $env:SRC_ARCHITECTURE="x86" + } ElseIf ($env:Platform -Match "arm64") { + $env:VCVARS_PLATFORM="x86_arm64" + $env:BUILD="i686-pc-cygwin" + $env:HOST="aarch64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm64" + $env:SRC_ARCHITECTURE="aarch64" + } ElseIf ($env:Platform -Match "arm") { + $env:VCVARS_PLATFORM="x86_arm" + $env:BUILD="i686-pc-cygwin" + $env:HOST="arm-w32-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm" + $env:SRC_ARCHITECTURE="arm" + } Else { + $env:VCVARS_PLATFORM="amd64" + $env:BUILD="x86_64-w64-cygwin" + $env:HOST="x86_64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -m64" + $env:SRC_ARCHITECTURE="x86" + } + - 'appveyor DownloadFile https://cygwin.com/setup-x86.exe -FileName setup.exe' + - 'setup.exe -qnNdO -R "%CYG_ROOT%" -s "%CYG_MIRROR%" -l "%CYG_CACHE%" -P dejagnu >NUL' + - '%CYG_ROOT%/bin/bash -lc "cygcheck -dc cygwin"' + - echo call VsDevCmd to set VS150COMNTOOLS + - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + - ps: $env:VSCOMNTOOLS=(Get-Content ("env:VS" + "$env:VSVER" + "0COMNTOOLS")) + - echo "Using Visual Studio %VSVER%.0 at %VSCOMNTOOLS%" + - call "%VSCOMNTOOLS%..\..\vc\Auxiliary\Build\vcvarsall.bat" %VCVARS_PLATFORM% + +build_script: + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./autogen.sh;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./configure CC='%MSVCC%' CXX='%MSVCC%' LD='link' CPP='cl -nologo -EP' CXXCPP='cl -nologo -EP' CPPFLAGS='-DFFI_BUILDING_DLL' AR='/cygdrive/c/projects/libffi/.travis/ar-lib lib' NM='dumpbin -symbols' STRIP=':' --build=$BUILD --host=$HOST;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp src/%SRC_ARCHITECTURE%/ffitarget.h include; make; find .;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp `find . -name 'libffi-?.dll'` $HOST/testsuite/; make check; cat `find ./ -name libffi.log`)" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes new file mode 100644 index 0000000..f7d3833 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +*.sln text eol=crlf +*.vcxproj* text eol=crlf diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md new file mode 100644 index 0000000..e197e2c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.github/issue_template.md @@ -0,0 +1,10 @@ +## System Details + + + + +## Problems Description + + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore new file mode 100644 index 0000000..5d39689 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.gitignore @@ -0,0 +1,38 @@ +.libs +.deps +*.o +*.lo +.dirstamp +*.la +Makefile +!testsuite/libffi.bhaible/Makefile +Makefile.in +aclocal.m4 +compile +!.travis/compile +configure +depcomp +doc/libffi.info +*~ +fficonfig.h.in +fficonfig.h +include/ffi.h +include/ffitarget.h +install-sh +libffi.pc +libtool +libtool-ldflags +ltmain.sh +m4/libtool.m4 +m4/lt*.m4 +mdate-sh +missing +stamp-h1 +libffi*gz +autom4te.cache +libffi.xcodeproj/xcuserdata +libffi.xcodeproj/project.xcworkspace +build_*/ +darwin_*/ +src/arm/trampoline.S +**/texinfo.tex diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml new file mode 100644 index 0000000..8db2ddf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis.yml @@ -0,0 +1,83 @@ +--- +sudo: required + +language: cpp + +# For qemu-powered targets, get the list of supported processors from +# travis by setting QEMU_CPU=help, then set -mcpu= for the compilers +# accordingly. + +matrix: + include: + - os: linux + env: HOST=powerpc-eabisim RUNTESTFLAGS="--target_board powerpc-eabisim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=or1k-elf RUNTESTFLAGS="--target_board or1k-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=m32r-elf RUNTESTFLAGS="--target_board m32r-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=bfin-elf RUNTESTFLAGS="--target_board bfin-sim" DEJAGNU="/opt/.travis/site.exp" +# This configuration is still using the native x86 toolchain? +# - os: osx +# env: HOST=aarch64-apple-darwin13 + - os: osx + env: HOST=x86_64-apple-darwin10 + - os: linux + env: HOST=x86_64-w64-mingw32 MEVAL='export CC="x86_64-w64-mingw32-gcc" && CXX="x86_64-w64-mingw32-g++" RUNTESTFLAGS="--target_board wine-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" CONFIGURE_OPTIONS=--disable-shared LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=sh4-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/sh4-linux-gnu + - os: linux + env: HOST=alpha-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/alpha-linux-gnu + - os: linux + env: HOST=m68k-linux-gnu MEVAL='export CC="m68k-linux-gnu-gcc-8 -mcpu=547x" && CXX="m68k-linux-gnu-g++-8 -mcpu=547x"' CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/m68k-linux-gnu QEMU_CPU=cfv4e + - os: linux + arch: s390x + env: HOST=s390x-linux-gnu + - os: linux + arch: ppc64le + env: HOST=ppc64le-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + compiler: clang + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O0" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2 -fomit-frame-pointer" +# The sparc64 linux system in the GCC compile farm is non-responsive. +# - os: linux +# env: HOST=sparc64-linux-gnu +# The mips64 linux system in the GCC compile farm is not allowing logins +# - os: linux +# env: HOST=mips64el-linux-gnu + - os: linux + compiler: gcc + env: HOST=i386-pc-linux-gnu MEVAL='export CC="$CC -m32" && CXX="$CXX -m32"' + - os: linux + compiler: gcc + - os: linux + compiler: gcc + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + compiler: clang + - os: linux + compiler: clang + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + env: HOST=moxie-elf MEVAL='export PATH=/opt/moxielogic/bin:$PATH && CC=moxie-elf-gcc && CXX=moxie-elf-g++' LDFLAGS=-Tsim.ld RUNTESTFLAGS="--target_board moxie-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" + +before_install: + - if test x"$MEVAL" != x; then eval ${MEVAL}; fi + +install: + - travis_wait 30 ./.travis/install.sh + +script: + - if ! test x"$MEVAL" = x; then eval ${MEVAL}; fi + - travis_wait 115 sleep infinity & + - ./.travis/build.sh diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib new file mode 100755 index 0000000..0baa4f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/ar-lib @@ -0,0 +1,270 @@ +#! /bin/sh +# Wrapper for Microsoft lib.exe + +me=ar-lib +scriptversion=2012-03-01.08; # UTC + +# Copyright (C) 2010-2018 Free Software Foundation, Inc. +# Written by Peter Rosin . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + + +# func_error message +func_error () +{ + echo "$me: $1" 1>&2 + exit 1 +} + +file_conv= + +# func_file_conv build_file +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv in + mingw) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_at_file at_file operation archive +# Iterate over all members in AT_FILE performing OPERATION on ARCHIVE +# for each of them. +# When interpreting the content of the @FILE, do NOT use func_file_conv, +# since the user would need to supply preconverted file names to +# binutils ar, at least for MinGW. +func_at_file () +{ + operation=$2 + archive=$3 + at_file_contents=`cat "$1"` + eval set x "$at_file_contents" + shift + + for member + do + $AR -NOLOGO $operation:"$member" "$archive" || exit $? + done +} + +case $1 in + '') + func_error "no command. Try '$0 --help' for more information." + ;; + -h | --h*) + cat < libffi.log + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git libffi.log + exit $? +} + +function build_linux() +{ + ./autogen.sh + ./configure ${HOST+--host=$HOST} ${CONFIGURE_OPTIONS} || cat */config.log + make + make dist + make check RUNTESTFLAGS="-a $RUNTESTFLAGS" + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_foreign_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" $2 bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e HOST="${HOST}" -e CC="${HOST}-gcc-8 ${GCC_OPTIONS}" -e CXX="${HOST}-g++-8 ${GCC_OPTIONS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" moxielogic/cross-ci-build-container:latest bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross() +{ + ${DOCKER} pull quay.io/moxielogic/libffi-ci-${HOST} + ${DOCKER} run --rm -t -i -v $(pwd):/opt -e HOST="${HOST}" -e CC="${HOST}-gcc ${GCC_OPTIONS}" -e CXX="${HOST}-g++ ${GCC_OPTIONS}" -e TRAVIS_BUILD_DIR=/opt -e DEJAGNU="${DEJAGNU}" -e RUNTESTFLAGS="${RUNTESTFLAGS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" quay.io/moxielogic/libffi-ci-${HOST} bash -c /opt/.travis/build-cross-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_ios() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-ios + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-iOS" -configuration Release -sdk iphoneos11.4 + exit $? +} + +function build_macosx() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-osx + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-Mac" -configuration Release -sdk macosx10.13 + echo "Finished build" + exit $? +} + +case "$HOST" in + arm-apple-darwin*) + ./autogen.sh + build_ios + ;; + x86_64-apple-darwin*) + ./autogen.sh + build_macosx + ;; + arm32v7-linux-gnu) + ./autogen.sh + build_foreign_linux arm moxielogic/arm32v7-ci-build-container:latest + ;; + mips64el-linux-gnu | sparc64-linux-gnu) + build_cfarm + ;; + bfin-elf ) + ./autogen.sh + GCC_OPTIONS=-msim build_cross + ;; + m32r-elf ) + ./autogen.sh + build_cross + ;; + or1k-elf ) + ./autogen.sh + build_cross + ;; + powerpc-eabisim ) + ./autogen.sh + build_cross + ;; + m68k-linux-gnu ) + ./autogen.sh + GCC_OPTIONS=-mcpu=547x build_cross_linux + ;; + alpha-linux-gnu | sh4-linux-gnu ) + ./autogen.sh + build_cross_linux + ;; + *) + ./autogen.sh + build_linux + ;; +esac diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile new file mode 100755 index 0000000..655932a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/compile @@ -0,0 +1,351 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-27.18; # UTC + +# Copyright (C) 1999-2018 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -warn) + eat=1 + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh new file mode 100755 index 0000000..2420245 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/install.sh @@ -0,0 +1,71 @@ +#!/bin/bash +set -x + +if [[ $TRAVIS_OS_NAME != 'linux' ]]; then + brew update > brew-update.log 2>&1 + # fix an issue with libtool on travis by reinstalling it + brew uninstall libtool; + brew install libtool dejagnu; + + # Download and extract the rlgl client + wget -qO - https://rl.gl/cli/rlgl-darwin-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + +else + # Download and extract the rlgl client + case $HOST in + aarch64-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-arm.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + ppc64le-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-ppc64le.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + s390x-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-s390x.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + *) + wget -qO - https://rl.gl/cli/rlgl-linux-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + esac + + sudo apt-get clean # clear the cache + sudo apt-get update + case $HOST in + mips64el-linux-gnu | sparc64-linux-gnu) + ;; + alpha-linux-gnu | arm32v7-linux-gnu | m68k-linux-gnu | sh4-linux-gnu) + sudo apt-get install qemu-user-static + ;; + hppa-linux-gnu ) + sudo apt-get install -y qemu-user-static g++-5-hppa-linux-gnu + ;; + i386-pc-linux-gnu) + sudo apt-get install gcc-multilib g++-multilib; + ;; + moxie-elf) + echo 'deb https://repos.moxielogic.org:7114/MoxieLogic moxiedev main' | sudo tee -a /etc/apt/sources.list + sudo apt-get clean # clear the cache + sudo apt-get update ## -qq + sudo apt-get update + sudo apt-get install -y --allow-unauthenticated moxielogic-moxie-elf-gcc moxielogic-moxie-elf-gcc-c++ moxielogic-moxie-elf-gcc-libstdc++ moxielogic-moxie-elf-gdb-sim + ;; + x86_64-w64-mingw32) + sudo apt-get install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 wine; + ;; + i686-w32-mingw32) + sudo apt-get install gcc-mingw-w64-i686 g++-mingw-w64-i686 wine; + ;; + esac + case $HOST in + arm32v7-linux-gnu) + # don't install host tools + ;; + *) + sudo apt-get install dejagnu texinfo sharutils + ;; + esac +fi diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp new file mode 100644 index 0000000..b3341f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/m32r-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {m32r-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "m32r" is the name of the sim subdir in devo/sim. +setup_sim m32r + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp new file mode 100644 index 0000000..3a6042e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/moxie-sim.exp @@ -0,0 +1,60 @@ +# Copyright (C) 2010 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {moxie-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "moxie" is the name of the sim subdir in devo/sim. +setup_sim moxie + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" +# No linker script needed. +set_board_info ldscript "-Tsim.ld" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp new file mode 100644 index 0000000..64e1444 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/or1k-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {or1k-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "or1k" is the name of the sim subdir in devo/sim. +setup_sim or1k + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp new file mode 100644 index 0000000..bdc4389 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {powerpc-eabisim} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "powerpc" is the name of the sim subdir in devo/sim. +setup_sim powerpc + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp new file mode 100644 index 0000000..644ec63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/site.exp @@ -0,0 +1,27 @@ +# Copyright (C) 2008, 2010, 2018, 2019 Anthony Green + +# Make sure we look in the right place for the board description files. +if ![info exists boards_dir] { + set boards_dir {} +} + +lappend boards_dir $::env(TRAVIS_BUILD_DIR)/.travis + +verbose "Global Config File: target_triplet is $target_triplet" 2 +global target_list + +case "$target_triplet" in { + { "bfin-elf" } { + set target_list "bfin-sim" + } + { "m32r-elf" } { + set target_list "m32r-sim" + } + { "moxie-elf" } { + set target_list "moxie-sim" + } + { "or1k-elf" } { + set target_list "or1k-sim" + } +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp new file mode 100644 index 0000000..8bdab67 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/.travis/wine-sim.exp @@ -0,0 +1,55 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {i686-w64-mingw32} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +set_board_info sim "wineconsole --backend=curses" +set_board_info is_simulator 1 + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old new file mode 100644 index 0000000..8de1ca7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/ChangeLog.old @@ -0,0 +1,7407 @@ +Libffi change logs used to be maintained in separate ChangeLog files. +These days we generate them directly from the git commit messages. +The old ChangeLog files are saved here in order to maintain the historical +record. + +============================================================================= +From the old ChangeLog.libffi-3.1 file... + +2014-03-16 Josh Triplett + + * ChangeLog: Archive to ChangeLog.libffi-3.1 and delete. Future + changelogs will come from git, with autogenerated snapshots shipped in + distributed tarballs. + +2014-03-16 Josh Triplett + + Add support for stdcall, thiscall, and fastcall on non-Windows + x86-32. + + Linux supports the stdcall calling convention, either via + functions explicitly declared with the stdcall attribute, or via + code compiled with -mrtd which effectively makes stdcall the + default. + + This introduces FFI_STDCALL, FFI_THISCALL, and FFI_FASTCALL on + non-Windows x86-32 platforms, as non-default calling conventions. + + * Makefile.am: Compile in src/x86/win32.S on non-Windows x86-32. + * src/x86/ffitarget.h: Add FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32. Increase trampoline size to + accomodate these calling conventions, and unify some ifdeffery. + * src/x86/ffi.c: Add support for FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32 platforms; update ifdeffery. + * src/x86/win32.S: Support compiling on non-Windows x86-32 + platforms. On those platforms, avoid redefining the SYSV symbols + already provided by src/x86/sysv.S. + * testsuite/libffi.call/closure_stdcall.c: Run on non-Windows. + #define __stdcall if needed. + * testsuite/libffi.call/closure_thiscall.c: Run on non-Windows. + #define __fastcall if needed. + * testsuite/libffi.call/fastthis1_win32.c: Run on non-Windows. + * testsuite/libffi.call/fastthis2_win32.c: Ditto. + * testsuite/libffi.call/fastthis3_win32.c: Ditto. + * testsuite/libffi.call/many2_win32.c: Ditto. + * testsuite/libffi.call/many_win32.c: Ditto. + * testsuite/libffi.call/strlen2_win32.c: Ditto. + * testsuite/libffi.call/strlen_win32.c: Ditto. + * testsuite/libffi.call/struct1_win32.c: Ditto. + * testsuite/libffi.call/struct2_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * prep_cif.c: Remove unnecessary ifdef for X86_WIN32. + ffi_prep_cif_core had a special case for X86_WIN32, checking for + FFI_THISCALL in addition to the FFI_FIRST_ABI-to-FFI_LAST_ABI + range before returning FFI_BAD_ABI. However, on X86_WIN32, + FFI_THISCALL already falls in that range, making the special case + unnecessary. Remove it. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/closure_thiscall.c: Remove fragile stack + pointer checks. These files included inline assembly to save the + stack pointer before and after the call, and compare the values. + However, compilers can and do leave the stack in different states + for these two pieces of inline assembly, such as by saving a + temporary value on the stack across the call; observed with gcc + -Os, and verified as spurious through careful inspection of + disassembly. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/many.c: Avoid spurious failure due to + excess floating-point precision. + * testsuite/libffi.call/many_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * libtool-ldflags: Re-add. + +2014-03-16 Josh Triplett + + * Makefile.in, aclocal.m4, compile, config.guess, config.sub, + configure, depcomp, include/Makefile.in, install-sh, + libtool-ldflags, ltmain.sh, m4/libtool.m4, m4/ltoptions.m4, + m4/ltsugar.m4, m4/ltversion.m4, m4/lt~obsolete.m4, + man/Makefile.in, mdate-sh, missing, testsuite/Makefile.in: Delete + autogenerated files from version control. + * .gitignore: Add autogenerated files. + * autogen.sh: New script to generate the autogenerated files. + * README: Document requirement to run autogen.sh when building + directly from version control. + * .travis.yml: Run autogen.sh + +2014-03-14 Anthony Green + + * configure, Makefile.in: Rebuilt. + +2014-03-10 Mike Hommey + + * configure.ac: Allow building for mipsel with Android NDK r8. + * Makefile.am (AM_MAKEFLAGS): Replace double quotes with single + quotes. + +2014-03-10 Landry Breuil + + * configure.ac: Ensure the linker supports @unwind sections in libffi. + +2014-03-01 Anthony Green + + * Makefile.am (EXTRA_DIST): Replace old scripts with + generate-darwin-source-and-headers.py. + * Makefile.in: Rebuilt. + +2014-02-28 Anthony Green + + * Makefile.am (AM_CFLAGS): Reintroduce missing -DFFI_DEBUG for + --enable-debug builds. + * Makefile.in: Rebuilt. + +2014-02-28 Makoto Kato + + * src/closures.c: Fix build failure when using clang for Android. + +2014-02-28 Marcin Wojdyr + + * libffi.pc.in (toolexeclibdir): use -L${toolexeclibdir} instead + of -L${libdir}. + +2014-02-28 Paulo Pizarro + + * src/bfin/sysv.S: Calling functions in shared libraries requires + considering the GOT. + +2014-02-28 Josh Triplett + + * src/x86/ffi64.c (classify_argument): Handle case where + FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE. + +2014-02-28 Anthony Green + + * ltmain.sh: Generate with libtool-2.4.2.418. + * m4/libtool.m4, m4/ltoptions.m4, m4/ltversion.m4: Ditto. + * configure: Rebuilt. + +2014-02-28 Dominik Vogt + + * configure.ac (AC_ARG_ENABLE struct): Fix typo in help + message. + (AC_ARG_ENABLE raw_api): Ditto. + * configure, fficonfig.h.in: Rebuilt. + +2014-02-28 Will Newton + + * src/arm/sysv.S: Initialize IP register with FP. + +2014-02-28 Yufeng Zhang + + * src/aarch64/sysv.S (ffi_closure_SYSV): Use x29 as the + main CFA reg; update cfi_rel_offset. + +2014-02-15 Marcus Comstedt + + * src/powerpc/ffi_linux64.c, src/powerpc/linux64_closure.S: Remove + assumption on contents of r11 in closure. + +2014-02-09 Heiher + + * src/mips/n32.S: Fix call floating point va function. + +2014-01-21 Zachary Waldowski + + * src/aarch64/ffi.c: Fix missing semicolons on assertions under + debug mode. + +2013-12-30 Zachary Waldowski + + * .gitignore: Exclude darwin_* generated source and build_* trees. + * src/aarch64/ffi.c, src/arm/ffi.c, src/x86/ffi.c: Inhibit Clang + previous prototype warnings. + * src/arm/ffi.c: Prevent NULL dereference, fix short type warning + * src/dlmalloc.c: Fix warnings from set_segment_flags return type, + and the native use of size_t for malloc on platforms + * src/arm/sysv.S: Use unified syntax. Clang clean-ups for + ARM_FUNC_START. + * generate-osx-source-and-headers.py: Remove. + * build-ios.sh: Remove. + * libffi.xcodeproj/project.pbxproj: Rebuild targets. Include + x86_64+aarch64 pieces in library. Export headers properly. + * src/x86/ffi64.c: More Clang warning clean-ups. + * src/closures.c (open_temp_exec_file_dir): Use size_t. + * src/prep_cif.c (ffi_prep_cif_core): Cast ALIGN result. + * src/aarch64/sysv.S: Use CNAME for global symbols. Only use + .size for ELF targets. + * src/aarch64/ffi.c: Clean up for double == long double. Clean up + from Clang warnings. Use Clang cache invalidation builtin. Use + size_t in place of unsigned in many places. Accommodate for + differences in Apple AArch64 ABI. + +2013-12-02 Daniel Rodríguez Troitiño + + * generate-darwin-source-and-headers.py: Clean up, modernize, + merged version of previous scripts. + +2013-11-21 Anthony Green + + * configure, Makefile.in, include/Makefile.in, include/ffi.h.in, + man/Makefile.in, testsuite/Makefile.in, fficonfig.h.in: Rebuilt. + +2013-11-21 Alan Modra + + * Makefile.am (EXTRA_DIST): Add new src/powerpc files. + (nodist_libffi_la_SOURCES ): Likewise. + * configure.ac (HAVE_LONG_DOUBLE_VARIANT): Define for powerpc. + * include/ffi.h.in (ffi_prep_types): Declare. + * src/prep_cif.c (ffi_prep_cif_core): Call ffi_prep_types. + * src/types.c (FFI_NONCONST_TYPEDEF): Define and use for + HAVE_LONG_DOUBLE_VARIANT. + * src/powerpc/ffi_powerpc.h: New file. + * src/powerpc/ffi.c: Split into.. + * src/powerpc/ffi_sysv.c: ..new file, and.. + * src/powerpc/ffi_linux64.c: ..new file, rewriting parts. + * src/powerpc/ffitarget.h (enum ffi_abi): Rewrite powerpc ABI + selection as bits controlling features. + * src/powerpc/linux64.S: For consistency, use POWERPC64 rather + than __powerpc64__. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. Move .note.FNU-stack + inside guard. + * src/powerpc/sysv.S: Likewise. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + +2013-11-20 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use + NUM_FPR_ARG_REGISTERS64 and NUM_GPR_ARG_REGISTERS64 not their + 32-bit versions for 64-bit code. + * src/powerpc/linux64_closure.S: Don't use the return value area + as a parameter save area on ELFv2. + +2013-11-18 Iain Sandoe + + * src/powerpc/darwin.S (EH): Correct use of pcrel FDE encoding. + * src/powerpc/darwin_closure.S (EH): Likewise. Modernise picbase + labels. + +2013-11-18 Anthony Green + + * src/arm/ffi.c (ffi_call): Hoist declaration of temp to top of + function. + * src/arm/ffi.c (ffi_closure_inner): Moderize function declaration + to appease compiler. + Thanks for Gregory P. Smith . + +2013-11-18 Anthony Green + + * README (tested): Mention PowerPC ELFv2. + +2013-11-16 Alan Modra + + * src/powerpc/ppc_closure.S: Move errant #endif to where it belongs. + Don't bl .Luint128. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use #if _CALL_ELF + test to select parameter save sizing for ELFv2 vs. ELFv1. + * src/powerpc/ffitarget.h (FFI_V2_TYPE_FLOAT_HOMOG, + FFI_V2_TYPE_DOUBLE_HOMOG, FFI_V2_TYPE_SMALL_STRUCT): Define. + (FFI_TRAMPOLINE_SIZE): Define variant for ELFv2. + * src/powerpc/ffi.c (FLAG_ARG_NEEDS_PSAVE): Define. + (discover_homogeneous_aggregate): New function. + (ffi_prep_args64): Adjust start of param save area for ELFv2. + Handle homogenous floating point struct parms. + (ffi_prep_cif_machdep_core): Adjust space calculation for ELFv2. + Handle ELFv2 return values. Set FLAG_ARG_NEEDS_PSAVE. Handle + homogenous floating point structs. + (ffi_call): Increase size of smst_buffer for ELFv2. Handle ELFv2. + (flush_icache): Compile for ELFv2. + (ffi_prep_closure_loc): Set up ELFv2 trampoline. + (ffi_closure_helper_LINUX64): Don't return all structs directly + to caller. Handle homogenous floating point structs. Handle + ELFv2 struct return values. + * src/powerpc/linux64.S (ffi_call_LINUX64): Set up r2 for + ELFv2. Adjust toc save location. Call function pointer using + r12. Handle FLAG_RETURNS_SMST. Don't predict branches. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Set up r2 + for ELFv2. Define ELFv2 versions of STACKFRAME, PARMSAVE, and + RETVAL. Handle possibly missing parameter save area. Handle + ELFv2 return values. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Revert 2013-02-08 + change. Do not consume an int arg when returning a small struct + for FFI_SYSV ABI. + (ffi_call): Only use bounce buffer when FLAG_RETURNS_SMST. + Properly copy bounce buffer to destination. + * src/powerpc/sysv.S: Revert 2013-02-08 change. + * src/powerpc/ppc_closure.S: Remove stray '+'. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Align struct parameters + according to __STRUCT_PARM_ALIGN__. + (ffi_prep_cif_machdep_core): Likewise. + (ffi_closure_helper_LINUX64): Likewise. + +2013-11-16 Alan Modra + + * src/powerpc/linux64.S (ffi_call_LINUX64): Tweak restore of r28. + (.note.GNU-stack): Move inside outer #ifdef. + * src/powerpc/linux64_closure.S (STACKFRAME, PARMSAVE, + RETVAL): Define and use throughout. + (ffi_closure_LINUX64): Save fprs before buying stack. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffitarget.h (FFI_TARGET_SPECIFIC_VARIADIC): Define. + (FFI_EXTRA_CIF_FIELDS): Define. + * src/powerpc/ffi.c (ffi_prep_args64): Save fprs as per the + ABI, not to both fpr and param save area. + (ffi_prep_cif_machdep_core): Renamed from ffi_prep_cif_machdep. + Keep initial flags. Formatting. Remove dead FFI_LINUX_SOFT_FLOAT + code. + (ffi_prep_cif_machdep, ffi_prep_cif_machdep_var): New functions. + (ffi_closure_helper_LINUX64): Pass floating point as per ABI, + not to both fpr and parameter save areas. + + * libffi/testsuite/libffi.call/cls_double_va.c (main): Correct + function cast and don't call ffi_prep_cif. + * libffi/testsuite/libffi.call/cls_longdouble_va.c (main): Likewise. + +2013-11-15 Andrew Haley + + * doc/libffi.texi (Closure Example): Fix the sample code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-15 Andrew Haley + + * testsuite/libffi.call/va_struct1.c (main): Fix broken test. + * testsuite/libffi.call/cls_uint_va.c (cls_ret_T_fn): Likewise + * testsuite/libffi.call/cls_struct_va1.c (test_fn): Likewise. + * testsuite/libffi.call/va_1.c (main): Likewise. + +2013-11-14 David Schneider + + * src/arm/ffi.c: Fix register allocation for mixed float and + doubles. + * testsuite/libffi.call/cls_many_mixed_float_double.c: Testcase + for many mixed float and double arguments. + +2013-11-13 Alan Modra + + * doc/libffi.texi (Simple Example): Correct example code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-13 Anthony Green + + * include/ffi_common.h: Respect HAVE_ALLOCA_H for GNU compiler + based build. (Thanks to tmr111116 on github) + +2013-11-09 Anthony Green + + * m4/libtool.m4: Refresh. + * configure, Makefile.in: Rebuilt. + * README: Add more notes about next release. + +2013-11-09 Shigeharu TAKENO + + * m4/ax_gcc_archflag.m4 (ax_gcc_arch): Don't recognize + UltraSPARC-IIi as ultrasparc3. + +2013-11-06 Mark Kettenis + + * src/x86/freebsd.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2013-11-06 Konstantin Belousov + + * src/x86/freebsd.S (ffi_closure_raw_SYSV): Mark the assembler + source as not requiring executable stack. + +2013-11-02 Anthony Green + + * doc/libffi.texi (The Basics): Clarify return value buffer size + requirements. Also, NULL result buffer pointers are no longer + supported. + * doc/libffi.info: Rebuilt. + +2013-11-02 Mischa Jonker + + * Makefile.am (nodist_libffi_la_SOURCES): Fix build error. + * Makefile.in: Rebuilt. + +2013-11-02 David Schneider + + * src/arm/ffi.c: more robust argument handling for closures on arm hardfloat + * testsuite/libffi.call/many_mixed.c: New file. + * testsuite/libffi.call/cls_many_mixed_args.c: More tests. + +2013-11-02 Vitaly Budovski + + * src/x86/ffi.c (ffi_prep_cif_machdep): Don't align stack for win32. + +2013-10-23 Mark H Weaver + + * src/mips/ffi.c: Fix handling of uint32_t arguments on the + MIPS N32 ABI. + +2013-10-13 Sandra Loosemore + + * README: Add Nios II to table of supported platforms. + * Makefile.am (EXTRA_DIST): Add nios2 files. + (nodist_libffi_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + * configure.ac (nios2*-linux*): New host. + (NIOS2): Add AM_CONDITIONAL. + * configure: Regenerated. + * src/nios2/ffi.c: New. + * src/nios2/ffitarget.h: New. + * src/nios2/sysv.S: New. + * src/prep_cif.c (initialize_aggregate): Handle extra structure + alignment via FFI_AGGREGATE_ALIGNMENT. + (ffi_prep_cif_core): Conditionalize structure return for NIOS2. + +2013-10-10 Sandra Loosemore + + * testsuite/libffi.call/cls_many_mixed_args.c (cls_ret_double_fn): + Fix uninitialized variable. + +2013-10-11 Marcus Shawcroft + + * testsuite/libffi.call/many.c (many): Replace * with +. + +2013-10-08 Ondřej Bílka + + * src/aarch64/ffi.c, src/aarch64/sysv.S, src/arm/ffi.c, + src/arm/gentramp.sh, src/bfin/sysv.S, src/closures.c, + src/dlmalloc.c, src/ia64/ffi.c, src/microblaze/ffi.c, + src/microblaze/sysv.S, src/powerpc/darwin_closure.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/sh/ffi.c, + src/tile/tile.S, testsuite/libffi.call/nested_struct11.c: Fix + spelling errors. + +2013-10-08 Anthony Green + + * aclocal.m4, compile, config.guess, config.sub, depcomp, + install-sh, mdate-sh, missing, texinfo.tex: Update from upstream. + * configure.ac: Update version to 3.0.14-rc0. + * Makefile.in, configure, Makefile.in, include/Makefile.in, + man/Makefile.in, testsuite/Makefile.in: Rebuilt. + * README: Mention M88K and VAX. + +2013-07-15 Miod Vallat + + * Makefile.am, + configure.ac, + src/m88k/ffi.c, + src/m88k/ffitarget.h, + src/m88k/obsd.S, + src/vax/elfbsd.S, + src/vax/ffi.c, + src/vax/ffitarget.h: Add m88k and vax support. + +2013-06-24 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Move var declaration + before statements. + (ffi_prep_args64): Support little-endian. + (ffi_closure_helper_SYSV, ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Likewise. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Likewise. + +2013-06-12 Mischa Jonker + + * configure.ac: Add support for ARC. + * Makefile.am: Likewise. + * README: Add ARC details. + * src/arc/arcompact.S: New. + * src/arc/ffi.c: Likewise. + * src/arc/ffitarget.h: Likewise. + +2013-03-28 David Schneider + + * src/arm/ffi.c: Fix support for ARM hard-float calling convention. + * src/arm/sysv.S: call different methods for SYSV and VFP ABIs. + * testsuite/libffi.call/cls_many_mixed_args.c: testcase for a closure with + mixed arguments, many doubles. + * testsuite/libffi.call/many_double.c: testcase for calling a function using + more than 8 doubles. + * testcase/libffi.call/many.c: use absolute value to check result against an + epsilon + +2013-03-17 Anthony Green + + * README: Update for 3.0.13. + * configure.ac: Ditto. + * configure: Rebuilt. + * doc/*: Update version. + +2013-03-17 Dave Korn + + * src/closures.c (is_emutramp_enabled + [!FFI_MMAP_EXEC_EMUTRAMP_PAX]): Move default definition outside + enclosing #if scope. + +2013-03-17 Anthony Green + + * configure.ac: Only modify toolexecdir in certain cases. + * configure: Rebuilt. + +2013-03-16 Gilles Talis + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Don't use + fparg_count,etc on __NO_FPRS__ targets. + +2013-03-16 Alan Hourihane + + * src/m68k/sysv.S (epilogue): Don't use extb instruction on + m680000 machines. + +2013-03-16 Alex Gaynor + + * src/x86/ffi.c (ffi_prep_cif_machdep): Always align stack. + +2013-03-13 Markos Chandras + + * configure.ac: Add support for Imagination Technologies Meta. + * Makefile.am: Likewise. + * README: Add Imagination Technologies Meta details. + * src/metag/ffi.c: New. + * src/metag/ffitarget.h: Likewise. + * src/metag/sysv.S: Likewise. + +2013-02-24 Andreas Schwab + + * doc/libffi.texi (Structures): Fix missing category argument of + @deftp. + +2013-02-11 Anthony Green + + * configure.ac: Update release number to 3.0.12. + * configure: Rebuilt. + * README: Update release info. + +2013-02-10 Anthony Green + + * README: Add Moxie. + * src/moxie/ffi.c: Created. + * src/moxie/eabi.S: Created. + * src/moxie/ffitarget.h: Created. + * Makefile.am (nodist_libffi_la_SOURCES): Add Moxie. + * Makefile.in: Rebuilt. + * configure.ac: Add Moxie. + * configure: Rebuilt. + * testsuite/libffi.call/huge_struct.c: Disable format string + warnings for moxie*-*-elf tests. + +2013-02-10 Anthony Green + + * Makefile.am (LTLDFLAGS): Fix reference. + * Makefile.in: Rebuilt. + +2013-02-10 Anthony Green + + * README: Update supported platforms. Update test results link. + +2013-02-09 Anthony Green + + * testsuite/libffi.call/negint.c: Remove forced -O2. + * testsuite/libffi.call/many2.c (foo): Remove GCCism. + * testsuite/libffi.call/ffitest.h: Add default PRIuPTR definition. + + * src/sparc/v8.S (ffi_closure_v8): Import ancient ulonglong + closure return type fix developed by Martin v. Löwis for cpython + fork. + +2013-02-08 Andreas Tobler + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix small struct + support. + * src/powerpc/sysv.S: Ditto. + +2013-02-08 Anthony Green + + * testsuite/libffi.call/cls_longdouble.c: Remove xfail for + arm*-*-*. + +2013-02-08 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Fix cache flushing for GCC. + +2013-02-08 Matthias Klose + + * man/ffi_prep_cif.3: Clean up for debian linter. + +2013-02-08 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Account for FP args pushed + on the stack. + +2013-02-08 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am (EXTRA_DIST): Ditto. + * Makefile.in: Rebuilt. + +2013-02-08 Anthony Green + + * configure.ac: Move sparc asm config checks to within functions + for compatibility with sun tools. + * configure: Rebuilt. + * src/sparc/ffi.c (ffi_prep_closure_loc): Flush cache on v9 + systems. + * src/sparc/v8.S (ffi_flush_icache): Implement a sparc v9 cache + flusher. + +2013-02-08 Nathan Rossi + + * src/microblaze/ffi.c (ffi_closure_call_SYSV): Fix handling of + small big-endian structures. + (ffi_prep_args): Ditto. + +2013-02-07 Anthony Green + + * src/sparc/v8.S (ffi_call_v8): Fix typo from last patch + (effectively hiding ffi_call_v8). + +2013-02-07 Anthony Green + + * configure.ac: Update bug reporting address. + * configure.in: Rebuild. + + * src/sparc/v8.S (ffi_flush_icache): Out-of-line cache flusher for + Sun compiler. + * src/sparc/ffi.c (ffi_call): Remove warning. + Call ffi_flush_icache for non-GCC builds. + (ffi_prep_closure_loc): Use ffi_flush_icache. + + * Makefile.am (EXTRA_DIST): Add libtool-ldflags. + * Makefile.in: Rebuilt. + * libtool-ldflags: New file. + +2013-02-07 Daniel Schepler + + * configure.ac: Correctly identify x32 systems as 64-bit. + * m4/libtool.m4: Remove libtool expr error. + * aclocal.m4, configure: Rebuilt. + +2013-02-07 Anthony Green + + * configure.ac: Fix GCC usage test. + * configure: Rebuilt. + * README: Mention LLVM/GCC x86_64 issue. + * testsuite/Makefile.in: Rebuilt. + +2013-02-07 Anthony Green + + * testsuite/libffi.call/cls_double_va.c (main): Replace // style + comments with /* */ for xlc compiler. + * testsuite/libffi.call/stret_large.c (main): Ditto. + * testsuite/libffi.call/stret_large2.c (main): Ditto. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/huge_struct.c (main): Ditto. + * testsuite/libffi.call/float_va.c (main): Ditto. + * testsuite/libffi.call/cls_struct_va1.c (main): Ditto. + * testsuite/libffi.call/cls_pointer_stack.c (main): Ditto. + * testsuite/libffi.call/cls_pointer.c (main): Ditto. + * testsuite/libffi.call/cls_longdouble_va.c (main): Ditto. + +2013-02-06 Anthony Green + + * man/ffi_prep_cif.3: Clean up for debian lintian checker. + +2013-02-06 Anthony Green + + * Makefile.am (pkgconfigdir): Add missing pkgconfig install bits. + * Makefile.in: Rebuild. + +2013-02-02 Mark H Weaver + + * src/x86/ffi64.c (ffi_call): Sign-extend integer arguments passed + via general purpose registers. + +2013-01-21 Nathan Rossi + + * README: Add MicroBlaze details. + * Makefile.am: Add MicroBlaze support. + * configure.ac: Likewise. + * src/microblaze/ffi.c: New. + * src/microblaze/ffitarget.h: Likewise. + * src/microblaze/sysv.S: Likewise. + +2013-01-21 Nathan Rossi + * testsuite/libffi.call/return_uc.c: Fixed issue. + +2013-01-21 Chris Zankel + + * README: Add Xtensa support. + * Makefile.am: Likewise. + * configure.ac: Likewise. + * Makefile.in Regenerate. + * configure: Likewise. + * src/prep_cif.c: Handle Xtensa. + * src/xtensa: New directory. + * src/xtensa/ffi.c: New file. + * src/xtensa/ffitarget.h: Ditto. + * src/xtensa/sysv.S: Ditto. + +2013-01-11 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Replace // style + comments with /* */ for xlc compiler. + * src/powerpc/aix.S (ffi_call_AIX): Ditto. + * testsuite/libffi.call/ffitest.h (allocate_mmap): Delete + deprecated inline function. + * testsuite/libffi.special/ffitestcxx.h: Ditto. + * README: Add update for AIX support. + +2013-01-11 Anthony Green + + * configure.ac: Robustify pc relative reloc check. + * m4/ax_cc_maxopt.m4: Don't -malign-double. This is an ABI + changing option for 32-bit x86. + * aclocal.m4, configure: Rebuilt. + * README: Update supported target list. + +2013-01-10 Anthony Green + + * README (tested): Add Compiler column to table. + +2013-01-10 Anthony Green + + * src/x86/ffi64.c (struct register_args): Make sse array and array + of unions for sunpro compiler compatibility. + +2013-01-10 Anthony Green + + * configure.ac: Test target platform size_t size. Handle both 32 + and 64-bit builds for x86_64-* and i?86-* targets (allowing for + CFLAG option to change default settings). + * configure, aclocal.m4: Rebuilt. + +2013-01-10 Anthony Green + + * testsuite/libffi.special/special.exp: Only run exception + handling tests when using GNU compiler. + + * m4/ax_compiler_vendor.m4: New file. + * configure.ac: Test for compiler vendor and don't use + AX_CFLAGS_WARN_ALL with the sun compiler. + * aclocal.m4, configure: Rebuilt. + +2013-01-10 Anthony Green + + * include/ffi_common.h: Don't use GCCisms to define types when + building with the SUNPRO compiler. + +2013-01-10 Anthony Green + + * configure.ac: Put local.exp in the right place. + * configure: Rebuilt. + + * src/x86/ffi.c: Update comment about regparm function attributes. + * src/x86/sysv.S (ffi_closure_SYSV): The SUNPRO compiler requires + that all function arguments be passed on the stack (no regparm + support). + +2013-01-08 Anthony Green + + * configure.ac: Generate local.exp. This sets CC_FOR_TARGET + when we are using the vendor compiler. + * testsuite/Makefile.am (EXTRA_DEJAGNU_SITE_CONFIG): Point to + ../local.exp. + * configure, testsuite/Makefile.in: Rebuilt. + + * testsuite/libffi.call/call.exp: Run tests with different + options, depending on whether or not we are using gcc or the + vendor compiler. + * testsuite/lib/libffi.exp (libffi-init): Set using_gcc based on + whether or not we are building/testing with gcc. + +2013-01-08 Anthony Green + + * configure.ac: Switch x86 solaris target to X86 by default. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * configure.ac: Fix test for read-only eh_frame. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * src/x86/sysv.S, src/x86/unix64.S: Only emit DWARF unwind info + when building with the GNU toolchain. + * testsuite/libffi.call/ffitest.h (CHECK): Fix for Solaris vendor + compiler. + +2013-01-07 Thorsten Glaser + + * testsuite/libffi.call/cls_uchar_va.c, + testsuite/libffi.call/cls_ushort_va.c, + testsuite/libffi.call/va_1.c: Testsuite fixes. + +2013-01-07 Thorsten Glaser + + * src/m68k/ffi.c (CIF_FLAGS_SINT8, CIF_FLAGS_SINT16): Define. + (ffi_prep_cif_machdep): Fix 8-bit and 16-bit signed calls. + * src/m68k/sysv.S (ffi_call_SYSV, ffi_closure_SYSV): Ditto. + +2013-01-04 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't automatically add -fexceptions + and -Wall. This is set in the configure script after testing for + GCC. + * Makefile.in: Rebuilt. + +2013-01-02 rofl0r + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix build error on ppc + when long double == double. + +2013-01-02 Reini Urban + + * Makefile.am (libffi_la_LDFLAGS): Add -no-undefined to LDFLAGS + (required for shared libs on cygwin/mingw). + * Makefile.in: Rebuilt. + +2012-10-31 Alan Modra + + * src/powerpc/linux64_closure.S: Add new ABI support. + * src/powerpc/linux64.S: Likewise. + +2012-10-30 Magnus Granberg + Pavel Labushev + + * configure.ac: New options pax_emutramp + * configure, fficonfig.h.in: Regenerated + * src/closures.c: New function emutramp_enabled_check() and + checks. + +2012-10-30 Frederick Cheung + + * configure.ac: Enable FFI_MAP_EXEC_WRIT for Darwin 12 (mountain + lion) and future version. + * configure: Rebuild. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * README: Add details of aarch64 port. + * src/aarch64/ffi.c: New. + * src/aarch64/ffitarget.h: Likewise. + * src/aarch64/sysv.S: Likewise. + * Makefile.am: Support aarch64. + * configure.ac: Support aarch64. + * Makefile.in, configure: Rebuilt. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * testsuite/lib/libffi.exp: Add support for aarch64. + * testsuite/libffi.call/cls_struct_va1.c: New. + * testsuite/libffi.call/cls_uchar_va.c: Likewise. + * testsuite/libffi.call/cls_uint_va.c: Likewise. + * testsuite/libffi.call/cls_ulong_va.c: Likewise. + * testsuite/libffi.call/cls_ushort_va.c: Likewise. + * testsuite/libffi.call/nested_struct11.c: Likewise. + * testsuite/libffi.call/uninitialized.c: Likewise. + * testsuite/libffi.call/va_1.c: Likewise. + * testsuite/libffi.call/va_struct1.c: Likewise. + * testsuite/libffi.call/va_struct2.c: Likewise. + * testsuite/libffi.call/va_struct3.c: Likewise. + +2012-10-12 Walter Lee + + * Makefile.am: Add TILE-Gx/TILEPro support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + * src/prep_cif.c (ffi_prep_cif_core): Handle TILE-Gx/TILEPro. + * src/tile: New directory. + * src/tile/ffi.c: New file. + * src/tile/ffitarget.h: Ditto. + * src/tile/tile.S: Ditto. + +2012-10-12 Matthias Klose + + * generate-osx-source-and-headers.py: Normalize whitespace. + +2012-09-14 David Edelsohn + + * configure: Regenerated. + +2012-08-26 Andrew Pinski + + PR libffi/53014 + * src/mips/ffi.c (ffi_prep_closure_loc): Allow n32 with soft-float and n64 with + soft-float. + +2012-08-08 Uros Bizjak + + * src/s390/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-07-18 H.J. Lu + + PR libffi/53982 + PR libffi/53973 + * src/x86/ffitarget.h: Check __ILP32__ instead of __LP64__ for x32. + (FFI_SIZEOF_JAVA_RAW): Defined to 4 for x32. + +2012-05-16 H.J. Lu + + * configure: Regenerated. + +2012-05-05 Nicolas Lelong + + * libffi.xcodeproj/project.pbxproj: Fixes. + * README: Update for iOS builds. + +2012-04-23 Alexandre Keunecke I. de Mendonca + + * configure.ac: Add Blackfin/sysv support + * Makefile.am: Add Blackfin/sysv support + * src/bfin/ffi.c: Add Blackfin/sysv support + * src/bfin/ffitarget.h: Add Blackfin/sysv support + +2012-04-11 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new script. + * Makefile.in: Rebuilt. + +2012-04-11 Zachary Waldowski + + * generate-ios-source-and-headers.py, + libffi.xcodeproj/project.pbxproj: Support a Mac static library via + Xcode. Set iOS compatibility to 4.0. Move iOS trampoline + generation into an Xcode "run script" phase. Include both as + Xcode build scripts. Don't always regenerate config files. + +2012-04-10 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Add missing semicolon. + +2012-04-06 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new iOS/xcode files. + * Makefile.in: Rebuilt. + +2012-04-06 Mike Lewis + + * generate-ios-source-and-headers.py: New file. + * libffi.xcodeproj/project.pbxproj: New file. + * README: Update instructions on building iOS binary. + * build-ios.sh: Delete. + +2012-04-06 Anthony Green + + * src/x86/ffi64.c (UINT128): Define differently for Intel and GNU + compilers, then use it. + +2012-04-06 H.J. Lu + + * m4/libtool.m4 (_LT_ENABLE_LOCK): Support x32. + +2012-04-06 Anthony Green + + * testsuite/Makefile.am (EXTRA_DIST): Add missing test cases. + * testsuite/Makefile.in: Rebuilt. + +2012-04-05 Zachary Waldowski + + * include/ffi.h.in: Add missing trampoline table fields. + * src/arm/sysv.S: Fix ENTRY definition, and wrap symbol references + in CNAME. + * src/x86/ffi.c: Wrap Windows specific code in ifdefs. + +2012-04-02 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Declare double_tmp. + Silence casting pointer to integer of different size warning. + Delete goto to previously deleted label. + (ffi_call): Silence possibly undefined warning. + (ffi_closure_helper_SYSV): Declare variable type. + +2012-04-02 Peter Rosin + + * src/x86/win32.S (ffi_call_win32): Sign/zero extend the return + value in the Intel version as is already done for the AT&T version. + (ffi_closure_SYSV): Likewise. + (ffi_closure_raw_SYSV): Likewise. + (ffi_closure_STDCALL): Likewise. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_THISCALL): Unify the frame + generation, fix the ENDP label and remove the surplus third arg + from the 'lea' insn. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_SYSV): Make the 'stubraw' label + visible outside the PROC, so that ffi_closure_raw_THISCALL can see + it. Also instruct the assembler to add a frame to the function. + +2012-03-23 Peter Rosin + + * Makefile.am (AM_CPPFLAGS): Add -DFFI_BUILDING. + * Makefile.in: Rebuilt. + * include/ffi.h.in [MSVC]: Add __declspec(dllimport) decorations + to all data exports, when compiling libffi clients using MSVC. + +2012-03-29 Peter Rosin + + * src/x86/ffitarget.h (ffi_abi): Add new ABI FFI_MS_CDECL and + make it the default for MSVC. + (FFI_TYPE_MS_STRUCT): New structure return convention. + * src/x86/ffi.c (ffi_prep_cif_machdep): Tweak the structure + return convention for FFI_MS_CDECL to be FFI_TYPE_MS_STRUCT + instead of an ordinary FFI_TYPE_STRUCT. + (ffi_prep_args): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_call): Likewise. + (ffi_prep_incoming_args_SYSV): Likewise. + (ffi_raw_call): Likewise. + (ffi_prep_closure_loc): Treat FFI_MS_CDECL as FFI_SYSV. + * src/x86/win32.S (ffi_closure_SYSV): For FFI_TYPE_MS_STRUCT, + return a pointer to the result structure in eax and don't pop + that pointer from the stack, the caller takes care of it. + (ffi_call_win32): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_closure_raw_SYSV): Likewise. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/closure_stdcall.c [MSVC]: Add inline + assembly version with Intel syntax. + * testsuite/libffi.call/closure_thiscall.c [MSVC]: Likewise. + +2012-03-23 Peter Rosin + + * testsuite/libffi.call/ffitest.h: Provide abstration of + __attribute__((fastcall)) in the form of a __FASTCALL__ + define. Define it to __fastcall for MSVC. + * testsuite/libffi.call/fastthis1_win32.c: Use the above. + * testsuite/libffi.call/fastthis2_win32.c: Likewise. + * testsuite/libffi.call/fastthis3_win32.c: Likewise. + * testsuite/libffi.call/strlen2_win32.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + +2012-03-22 Peter Rosin + + * src/x86/win32.S [MSVC] (ffi_closure_THISCALL): Remove the manual + frame on function entry, MASM adds one automatically. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/ffitest.h [MSVC]: Add kludge for missing + bits in the MSVC headers. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/cls_12byte.c: Adjust to the C89 style + with no declarations after statements. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5_1_byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_dbls_struct.c: Likewise. + * testsuite/libffi.call/cls_pointer_stack.c: Likewise. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct10.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.call/testclosure.c: Likewise. + +2012-03-21 Peter Rosin + + * testsuite/libffi.call/float_va.c (float_va_fn): Use %f when + printing doubles (%lf is for long doubles). + (main): Likewise. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-19 Alan Hourihane + + * src/m68k/ffi.c: Add MINT support. + * src/m68k/sysv.S: Ditto. + +2012-03-06 Chung-Lin Tang + + * src/arm/ffi.c (ffi_call): Add __ARM_EABI__ guard around call to + ffi_call_VFP(). + (ffi_prep_closure_loc): Add __ARM_EABI__ guard around use of + ffi_closure_VFP. + * src/arm/sysv.S: Add __ARM_EABI__ guard around VFP code. + +2012-03-19 chennam + + * src/powerpc/ffi_darwin.c (ffi_prep_closure_loc): Fix AIX closure + support. + +2012-03-13 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/sh64/ffi.c (ffi_prep_closure_loc): Ditto. + +2012-03-09 David Edelsohn + + * src/powerpc/aix_closure.S (ffi_closure_ASM): Adjust for Darwin64 + change to return value of ffi_closure_helper_DARWIN and load type + from return type. + +2012-03-03 H.J. Lu + + * src/x86/ffi64.c (ffi_call): Cast the return value to unsigned + long. + (ffi_prep_closure_loc): Cast to 64bit address in trampoline. + (ffi_closure_unix64_inner): Cast return pointer to unsigned long + first. + + * src/x86/ffitarget.h (FFI_SIZEOF_ARG): Defined to 8 for x32. + (ffi_arg): Set to unsigned long long for x32. + (ffi_sarg): Set to long long for x32. + +2012-03-03 H.J. Lu + + * src/prep_cif.c (ffi_prep_cif_core): Properly check bad ABI. + +2012-03-03 Andoni Morales Alastruey + + * configure.ac: Add -no-undefined for both 32- and 64-bit x86 + windows-like hosts. + * configure: Rebuilt. + +2012-02-27 Mikael Pettersson + + PR libffi/52223 + * Makefile.am (FLAGS_TO_PASS): Define. + * Makefile.in: Regenerate. + +2012-02-23 Anthony Green + + * src/*/ffitarget.h: Ensure that users never include ffitarget.h + directly. + +2012-02-23 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_closure_raw_THISCALL): New + prototype. + (ffi_prep_raw_closure_loc): Use ffi_closure_raw_THISCALL for + thiscall-convention. + (ffi_raw_call): Use ffi_prep_args_raw. + * src/x86/win32.S (ffi_closure_raw_THISCALL): Add + implementation for stub. + +2012-02-10 Kai Tietz + + * configure.ac (AM_LTLDFLAGS): Add -no-undefine for x64 + windows target. + * configure: Regenerated. + +2012-02-08 Kai Tietz + + * src/prep_cif.c (ffi_prep_cif): Allow for X86_WIN32 + also FFI_THISCALL. + * src/x86/ffi.c (ffi_closure_THISCALL): Add prototype. + (FFI_INIT_TRAMPOLINE_THISCALL): New trampoline code. + (ffi_prep_closure_loc): Add FFI_THISCALL support. + * src/x86/ffitarget.h (FFI_TRAMPOLINE_SIZE): Adjust size. + * src/x86/win32.S (ffi_closure_THISCALL): New closure code + for thiscall-calling convention. + * testsuite/libffi.call/closure_thiscall.c: New test. + +2012-01-28 Kai Tietz + + * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new + argument to prototype for specify calling-convention. + (ffi_call): Add support for stdcall/thiscall convention. + (ffi_prep_args): Likewise. + (ffi_raw_call): Likewise. + * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and + FFI_FASTCALL. + * src/x86/win32.S (_ffi_call_win32): Add support for + fastcall/thiscall calling-convention calls. + * testsuite/libffi.call/fastthis1_win32.c: New test. + * testsuite/libffi.call/fastthis2_win32.c: New test. + * testsuite/libffi.call/fastthis3_win32.c: New test. + * testsuite/libffi.call/strlen2_win32.c: New test. + * testsuite/libffi.call/many2_win32.c: New test. + * testsuite/libffi.call/struct1_win32.c: New test. + * testsuite/libffi.call/struct2_win32.c: New test. + +2012-01-23 Uros Bizjak + + * src/alpha/ffi.c (ffi_prep_closure_loc): Check for bad ABI. + +2012-01-23 Anthony Green + Chris Young + + * configure.ac: Add Amiga support. + * configure: Rebuilt. + +2012-01-23 Dmitry Nadezhin + + * include/ffi_common.h (LIKELY, UNLIKELY): Fix definitions. + +2012-01-23 Andreas Schwab + + * src/m68k/sysv.S (ffi_call_SYSV): Properly test for plain + mc68000. Test for __HAVE_68881__ in addition to __MC68881__. + +2012-01-19 Jakub Jelinek + + PR rtl-optimization/48496 + * src/ia64/ffi.c (ffi_call): Fix up aliasing violations. + +2012-01-09 Rainer Orth + + * configure.ac (i?86-*-*): Set TARGET to X86_64. + * configure: Regenerate. + +2011-12-07 Andrew Pinski + + PR libffi/50051 + * src/mips/n32.S: Add ".set mips4". + +2011-11-21 Andreas Tobler + + * configure: Regenerate. + +2011-11-12 David Gilbert + + * doc/libffi.texi, include/ffi.h.in, include/ffi_common.h, + man/Makefile.am, man/ffi.3, man/ffi_prep_cif.3, + man/ffi_prep_cif_var.3, src/arm/ffi.c, src/arm/ffitarget.h, + src/cris/ffi.c, src/prep_cif.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/float_va.c: Many changes to support variadic + function calls. + +2011-11-12 Kyle Moffett + + * src/powerpc/ffi.c, src/powerpc/ffitarget.h, + src/powerpc/ppc_closure.S, src/powerpc/sysv.S: Many changes for + softfloat powerpc variants. + +2011-11-12 Petr Salinger + + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Fix kfreebsd support. + * configure: Rebuilt. + +2011-11-12 Timothy Wall + + * src/arm/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): Max + alignment of 4 for wince on ARM. + +2011-11-12 Kyle Moffett + Anthony Green + + * src/ppc/sysv.S, src/ppc/ffi.c: Remove use of ppc string + instructions (not available on some cores, like the PPC440). + +2011-11-12 Kimura Wataru + + * m4/ax_enable_builddir: Change from string comparison to numeric + comparison for wc output. + * configure.ac: Enable FFI_MMAP_EXEC_WRIT for darwin11 aka Mac OS + X 10.7. + * configure: Rebuilt. + +2011-11-12 Anthony Green + + * Makefile.am (AM_CCASFLAGS): Add -g option to build assembly + files with debug info. + * Makefile.in: Rebuilt. + +2011-11-12 Jasper Lievisse Adriaanse + + * README: Update list of supported OpenBSD systems. + +2011-11-12 Anthony Green + + * libtool-version: Update. + * Makefile.am (nodist_libffi_la_SOURCES): Add src/debug.c if + FFI_DEBUG. + (libffi_la_SOURCES): Remove src/debug.c + (EXTRA_DIST): Add src/debug.c + * Makefile.in: Rebuilt. + * README: Update for 3.0.11. + +2011-11-10 Richard Henderson + + * configure.ac (GCC_AS_CFI_PSEUDO_OP): Use it instead of inline check. + * configure, aclocal.m4: Rebuild. + +2011-09-04 Iain Sandoe + + PR libffi/49594 + * src/powerpc/darwin_closure.S (stubs): Make the stub binding + helper reference track the architecture pointer size. + +2011-08-25 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Remove hard-coded assembly + instructions. + * src/arm/sysv.S (ffi_arm_trampoline): Put them here instead. + +2011-07-11 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Clear icache. + +2011-06-29 Rainer Orth + + * testsuite/libffi.call/cls_double_va.c: Move PR number to comment. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-29 Rainer Orth + + PR libffi/46660 + * testsuite/libffi.call/cls_double_va.c: xfail dg-output on + mips-sgi-irix6*. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-14 Rainer Orth + + * testsuite/libffi.call/huge_struct.c (test_large_fn): Use PRIu8, + PRId8 instead of %hhu, %hhd. + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRId8, + PRIu8): Define. + [__sgi__] (PRId8, PRIu8): Define. + +2011-04-29 Rainer Orth + + * src/alpha/osf.S (UA_SI, FDE_ENCODING, FDE_ENCODE, FDE_ARANGE): + Define. + Use them to handle ELF vs. ECOFF differences. + [__osf__] (_GLOBAL__F_ffi_call_osf): Define. + +2011-03-30 Timothy Wall + + * src/powerpc/darwin.S: Fix unknown FDE encoding. + * src/powerpc/darwin_closure.S: ditto. + +2011-02-25 Anthony Green + + * src/powerpc/ffi.c (ffi_prep_closure_loc): Allow for more + 32-bit ABIs. + +2011-02-15 Anthony Green + + * m4/ax_cc_maxopt.m4: Don't -malign-double or use -ffast-math. + * configure: Rebuilt. + +2011-02-13 Ralf Wildenhues + + * configure: Regenerate. + +2011-02-13 Anthony Green + + * include/ffi_common.h (UNLIKELY, LIKELY): Define. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Remove definition. + * src/prep_cif.c (UNLIKELY, LIKELY): Remove definition. + + * src/prep_cif.c (initialize_aggregate): Convert assertion into + FFI_BAD_TYPEDEF return. Initialize arg size and alignment to 0. + + * src/pa/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/arm/ffi.c (ffi_prep_closure_loc): Ditto. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Ditto. + * src/mips/ffi.c (ffi_prep_closure_loc): Ditto. + * src/ia64/ffi.c (ffi_prep_closure_loc): Ditto. + * src/avr32/ffi.c (ffi_prep_closure_loc): Ditto. + +2011-02-11 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-02-11 Eric Botcazou + + * src/sparc/v9.S (STACKFRAME): Bump to 176. + +2011-02-09 Stuart Shelton + + http://bugs.gentoo.org/show_bug.cgi?id=286911 + * src/mips/ffitarget.h: Clean up error messages. + * src/java_raw_api.c (ffi_java_translate_args): Cast raw arg to + ffi_raw*. + * include/ffi.h.in: Add pragma for SGI compiler. + +2011-02-09 Anthony Green + + * configure.ac: Add powerpc64-*-darwin* support. + +2011-02-09 Anthony Green + + * README: Mention Interix. + +2011-02-09 Jonathan Callen + + * configure.ac: Add Interix to win32/cygwin/mingw case. + * configure: Ditto. + * src/closures.c: Treat Interix like Cygwin, instead of as a + generic win32. + +2011-02-09 Anthony Green + + * testsuite/libffi.call/err_bad_typedef.c: Remove xfail. + * testsuite/libffi.call/err_bad_abi.c: Remove xfail. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Define. + (ffi_prep_closure_loc): Check for bad ABI. + * src/prep_cif.c (UNLIKELY, LIKELY): Define. + (initialize_aggregate): Check for bad types. + +2011-02-09 Landon Fuller + + * Makefile.am (EXTRA_DIST): Add build-ios.sh, src/arm/gentramp.sh, + src/arm/trampoline.S. + (nodist_libffi_la_SOURCES): Add src/arc/trampoline.S. + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Define. + * src/arm/ffi.c (ffi_trampoline_table) + (ffi_closure_trampoline_table_page, ffi_trampoline_table_entry) + (FFI_TRAMPOLINE_CODELOC_CONFIG, FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) + (FFI_TRAMPOLINE_COUNT, ffi_trampoline_lock, ffi_trampoline_tables) + (ffi_trampoline_table_alloc, ffi_closure_alloc, ffi_closure_free): + Define for FFI_EXEC_TRAMPOLINE_TABLE case (iOS). + (ffi_prep_closure_loc): Handl FFI_EXEC_TRAMPOLINE_TABLE case + separately. + * src/arm/sysv.S: Handle Apple iOS host. + * src/closures.c: Handle FFI_EXEC_TRAMPOLINE_TABLE case. + * build-ios.sh: New file. + * fficonfig.h.in, configure, Makefile.in: Rebuilt. + * README: Mention ARM iOS. + +2011-02-08 Oren Held + + * src/dlmalloc.c (_STRUCT_MALLINFO): Define in order to avoid + redefinition of mallinfo on HP-UX. + +2011-02-08 Ginn Chen + + * src/sparc/ffi.c (ffi_call): Make compatible with Solaris Studio + aggregate return ABI. Flush cache. + (ffi_prep_closure_loc): Flush cache. + +2011-02-11 Anthony Green + + From Tom Honermann : + * src/powerpc/aix.S (ffi_call_AIX): Support for xlc toolchain on + AIX. Declare .ffi_prep_args. Insert nops after branch + instructions so that the AIX linker can insert TOC reload + instructions. + * src/powerpc/aix_closure.S: Declare .ffi_closure_helper_DARWIN. + +2011-02-08 Ed + + * src/powerpc/asm.h: Fix grammar nit in comment. + +2011-02-08 Uli Link + + * include/ffi.h.in (FFI_64_BIT_MAX): Define and use. + +2011-02-09 Rainer Orth + + PR libffi/46661 + * testsuite/libffi.call/cls_pointer.c (main): Cast void * to + uintptr_t first. + * testsuite/libffi.call/cls_pointer_stack.c (main): Likewise. + +2011-02-08 Rafael Avila de Espindola + + * configure.ac: Fix x86 test for pc related relocs. + * configure: Rebuilt. + +2011-02-07 Joel Sherrill + + * libffi/src/m68k/ffi.c: Add RTEMS support for cache flushing. + Handle case when CPU variant does not have long double support. + * libffi/src/m68k/sysv.S: Add support for mc68000, Coldfire, + and cores with soft floating point. + +2011-02-07 Joel Sherrill + + * configure.ac: Add mips*-*-rtems* support. + * configure: Regenerate. + * src/mips/ffitarget.h: Ensure needed constants are available + for targets which do not have sgidefs.h. + +2011-01-26 Dave Korn + + PR target/40125 + * configure.ac (AM_LTLDFLAGS): Add -bindir option for windows DLLs. + * configure: Regenerate. + +2010-12-18 Iain Sandoe + + PR libffi/29152 + PR libffi/42378 + * src/powerpc/darwin_closure.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffitarget.h (POWERPC_DARWIN64): New, + (FFI_TRAMPOLINE_SIZE): Update for Darwin64. + * src/powerpc/darwin.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffi_darwin.c: Likewise. + +2010-12-06 Rainer Orth + + * configure.ac (libffi_cv_as_ascii_pseudo_op): Use double + backslashes. + (libffi_cv_as_string_pseudo_op): Likewise. + * configure: Regenerate. + +2010-12-03 Chung-Lin Tang + + * src/arm/sysv.S (ffi_closure_SYSV): Add UNWIND to .pad directive. + (ffi_closure_VFP): Same. + (ffi_call_VFP): Move down to before ffi_closure_VFP. Add '.fpu vfp' + directive. + +2010-12-01 Rainer Orth + + * testsuite/libffi.call/ffitest.h [__sgi] (PRId64, PRIu64): Define. + (PRIuPTR): Define. + +2010-11-29 Richard Henderson + Rainer Orth + + * src/x86/sysv.S (FDE_ENCODING, FDE_ENCODE): Define. + (.eh_frame): Use FDE_ENCODING. + (.LASFDE1, .LASFDE2, LASFDE3): Simplify with FDE_ENCODE. + +2010-11-22 Jacek Caban + + * configure.ac: Check for symbol underscores on mingw-w64. + * configure: Rebuilt. + * src/x86/win64.S: Correctly access extern symbols in respect to + underscores. + +2010-11-15 Rainer Orth + + * testsuite/lib/libffi-dg.exp: Rename ... + * testsuite/lib/libffi.exp: ... to this. + * libffi/testsuite/libffi.call/call.exp: Don't load libffi-dg.exp. + * libffi/testsuite/libffi.special/special.exp: Likewise. + +2010-10-28 Chung-Lin Tang + + * src/arm/ffi.c (ffi_prep_args): Add VFP register argument handling + code, new parameter, and return value. Update comments. + (ffi_prep_cif_machdep): Add case for VFP struct return values. Add + call to layout_vfp_args(). + (ffi_call_SYSV): Update declaration. + (ffi_call_VFP): New declaration. + (ffi_call): Add VFP struct return conditions. Call ffi_call_VFP() + when ABI is FFI_VFP. + (ffi_closure_VFP): New declaration. + (ffi_closure_SYSV_inner): Add new vfp_args parameter, update call to + ffi_prep_incoming_args_SYSV(). + (ffi_prep_incoming_args_SYSV): Update parameters. Add VFP argument + case handling. + (ffi_prep_closure_loc): Pass ffi_closure_VFP to trampoline + construction under VFP hard-float. + (rec_vfp_type_p): New function. + (vfp_type_p): Same. + (place_vfp_arg): Same. + (layout_vfp_args): Same. + * src/arm/ffitarget.h (ffi_abi): Add FFI_VFP. Define FFI_DEFAULT_ABI + based on __ARM_PCS_VFP. + (FFI_EXTRA_CIF_FIELDS): Define for adding VFP hard-float specific + fields. + (FFI_TYPE_STRUCT_VFP_FLOAT): Define internally used type code. + (FFI_TYPE_STRUCT_VFP_DOUBLE): Same. + * src/arm/sysv.S (ffi_call_SYSV): Change call of ffi_prep_args() to + direct call. Move function pointer load upwards. + (ffi_call_VFP): New function. + (ffi_closure_VFP): Same. + + * testsuite/lib/libffi-dg.exp (check-flags): New function. + (dg-skip-if): New function. + * testsuite/libffi.call/cls_double_va.c: Skip if target is arm*-*-* + and compiler options include -mfloat-abi=hard. + * testsuite/libffi.call/cls_longdouble_va.c: Same. + +2010-10-01 Jakub Jelinek + + PR libffi/45677 + * src/x86/ffi64.c (ffi_prep_cif_machdep): Ensure cif->bytes is + a multiple of 8. + * testsuite/libffi.call/many2.c: New test. + +2010-08-20 Mark Wielaard + + * src/closures.c (open_temp_exec_file_mnt): Check if getmntent_r + returns NULL. + +2010-08-09 Andreas Tobler + + * configure.ac: Add target powerpc64-*-freebsd*. + * configure: Regenerate. + * testsuite/libffi.call/cls_align_longdouble_split.c: Pass + -mlong-double-128 only to linux targets. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_longdouble.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + +2010-08-05 Dan Witte + + * Makefile.am: Pass FFI_DEBUG define to msvcc.sh for linking to the + debug CRT when --enable-debug is given. + * configure.ac: Define it. + * msvcc.sh: Translate -g and -DFFI_DEBUG appropriately. + +2010-08-04 Dan Witte + + * src/x86/ffitarget.h: Add X86_ANY define for all x86/x86_64 + platforms. + * src/x86/ffi.c: Remove redundant ifdef checks. + * src/prep_cif.c: Push stack space computation into src/x86/ffi.c + for X86_ANY so return value space doesn't get added twice. + +2010-08-03 Neil Rashbrooke + + * msvcc.sh: Don't pass -safeseh to ml64 because behavior is buggy. + +2010-07-22 Dan Witte + + * src/*/ffitarget.h: Make FFI_LAST_ABI one past the last valid ABI. + * src/prep_cif.c: Fix ABI assertion. + * src/cris/ffi.c: Ditto. + +2010-07-10 Evan Phoenix + + * src/closures.c (selinux_enabled_check): Fix strncmp usage bug. + +2010-07-07 Dan Horák + + * include/ffi.h.in: Protect #define with #ifndef. + * src/powerpc/ffitarget.h: Ditto. + * src/s390/ffitarget.h: Ditto. + * src/sparc/ffitarget.h: Ditto. + +2010-07-07 Neil Roberts + + * src/x86/sysv.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2010-07-02 Jakub Jelinek + + * Makefile.am (AM_MAKEFLAGS): Pass also mandir to submakes. + * Makefile.in: Regenerated. + +2010-05-19 Rainer Orth + + * configure.ac (libffi_cv_as_x86_pcrel): Check for illegal in as + output, too. + (libffi_cv_as_ascii_pseudo_op): Check for .ascii. + (libffi_cv_as_string_pseudo_op): Check for .string. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S (.eh_frame): Use .ascii, .string or error. + +2010-05-11 Dan Witte + + * doc/libffi.tex: Document previous change. + +2010-05-11 Makoto Kato + + * src/x86/ffi.c (ffi_call): Don't copy structs passed by value. + +2010-05-05 Michael Kohler + + * src/dlmalloc.c (dlfree): Fix spelling. + * src/ia64/ffi.c (ffi_prep_cif_machdep): Ditto. + * configure.ac: Ditto. + * configure: Rebuilt. + +2010-04-13 Dan Witte + + * msvcc.sh: Build with -W3 instead of -Wall. + * src/powerpc/ffi_darwin.c: Remove build warnings. + * src/x86/ffi.c: Ditto. + * src/x86/ffitarget.h: Ditto. + +2010-04-12 Dan Witte + Walter Meinl + + * configure.ac: Add OS/2 support. + * configure: Rebuilt. + * src/closures.c: Ditto. + * src/dlmalloc.c: Ditto. + * src/x86/win32.S: Ditto. + +2010-04-07 Jakub Jelinek + + * testsuite/libffi.call/err_bad_abi.c: Remove unused args variable. + +2010-04-02 Ralf Wildenhues + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2010-03-30 Dan Witte + + * msvcc.sh: Disable build warnings. + * README (tested): Clarify windows build procedure. + +2010-03-15 Rainer Orth + + * configure.ac (libffi_cv_as_x86_64_unwind_section_type): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * libffi/src/x86/unix64.S (.eh_frame) + [HAVE_AS_X86_64_UNWIND_SECTION_TYPE]: Use @unwind section type. + +2010-03-14 Matthias Klose + + * src/x86/ffi64.c: Fix typo in comment. + * src/x86/ffi.c: Use /* ... */ comment style. + +2010-02-24 Rainer Orth + + * doc/libffi.texi (The Closure API): Fix typo. + * doc/libffi.info: Remove. + +2010-02-15 Matthias Klose + + * src/arm/sysv.S (__ARM_ARCH__): Define for processor + __ARM_ARCH_7EM__. + +2010-01-15 Anthony Green + + * README: Add notes on building with Microsoft Visual C++. + +2010-01-15 Daniel Witte + + * msvcc.sh: New file. + + * src/x86/win32.S: Port assembly routines to MSVC and #ifdef. + * src/x86/ffi.c: Tweak function declaration and remove excess + parens. + * include/ffi.h.in: Add __declspec(align(8)) to typedef struct + ffi_closure. + + * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new + function ffi_call_win32 on X86_WIN32. + * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32. + (ffi_call_STDCALL): Remove. + + * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code + to ffi_prep_cif_machdep for x86. + * src/x86/ffi.c (ffi_prep_cif_machdep): To here. + +2010-01-15 Oliver Kiddle + + * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for + Sun Studio compiler compatibility. + +2010-01-12 Conrad Irwin + + * doc/libffi.texi: Add closure example. + +2010-01-07 Rainer Orth + + PR libffi/40701 + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRIdLL, + PRIuLL, PRId64, PRIu64, PRIuPTR): Define. + * testsuite/libffi.call/cls_align_sint64.c: Add -Wno-format on + alpha*-dec-osf*. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/return_ll1.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.special/ffitestcxx.h (allocate_mmap): Cast + MAP_FAILED to char *. + +2010-01-06 Rainer Orth + + * src/mips/n32.S: Use .abicalls and .eh_frame with __GNUC__. + +2009-12-31 Anthony Green + + * README: Update for libffi 3.0.9. + +2009-12-27 Matthias Klose + + * configure.ac (HAVE_LONG_DOUBLE): Define for mips when + appropriate. + * configure: Rebuilt. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_longdouble_va.c: Mark as xfail for + avr32*-*-*. + * testsuite/libffi.call/cls_double_va.c: Ditto. + +2009-12-26 Andreas Tobler + + * testsuite/libffi.call/ffitest.h: Conditionally include stdint.h + and inttypes.h. + * testsuite/libffi.special/unwindtest.cc: Ditto. + +2009-12-26 Andreas Tobler + + * configure.ac: Add amd64-*-openbsd*. + * configure: Rebuilt. + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Link + openbsd programs with -lpthread. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: Remove xfail for + mips*-*-* and arm*-*-*. + * testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c: Remove xfail for arm*-*-*. + +2009-12-31 Kay Tietz + + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRuLL): Fix + definitions. + +2009-12-31 Carlo Bramini + + * configure.ac (AM_LTLDFLAGS): Define for windows hosts. + * Makefile.am (libffi_la_LDFLAGS): Add AM_LTLDFLAGS. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + +2009-12-31 Anthony Green + Blake Chaffin. + + * testsuite/libffi.call/huge_struct.c: New test case from Blake + Chaffin @ Apple. + +2009-12-28 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Copy abi and nargs to + local variables. + (aix_adjust_aggregate_sizes): New function. + (ffi_prep_cif_machdep): Call it. + +2009-12-26 Andreas Tobler + + * configure.ac: Define FFI_MMAP_EXEC_WRIT for the given targets. + * configure: Regenerate. + * fficonfig.h.in: Likewise. + * src/closures.c: Remove the FFI_MMAP_EXEC_WRIT definition for + Solaris/x86. + +2009-12-26 Andreas Schwab + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Advance intarg_count + when a float arguments is passed in memory. + (ffi_closure_helper_SYSV): Mark general registers as used up when + a 64bit or soft-float long double argument is passed in memory. + +2009-12-25 Matthias Klose + + * man/ffi_call.3: Fix #include in examples. + * doc/libffi.texi: Add dircategory. + +2009-12-25 Frank Everdij + + * include/ffi.h.in: Placed '__GNUC__' ifdef around + '__attribute__((aligned(8)))' in ffi_closure, fixes compile for + IRIX MIPSPro c99. + * include/ffi_common.h: Added '__sgi' define to non + '__attribute__((__mode__()))' integer typedefs. + * src/mips/ffi.c (ffi_call, ffi_closure_mips_inner_O32, + ffi_closure_mips_inner_N32): Added 'defined(_MIPSEB)' to BE check. + (ffi_closure_mips_inner_O32, ffi_closure_mips_inner_N32): Added + FFI_LONGDOUBLE support and alignment(N32 only). + * src/mips/ffitarget.h: Corrected '#include ' for IRIX and + fixed non '__attribute__((__mode__()))' integer typedefs. + * src/mips/n32.S: Put '#ifdef linux' around '.abicalls' and '.eh_frame' + since they are Linux/GNU Assembler specific. + +2009-12-25 Bradley Smith + + * configure.ac, Makefile.am, src/avr32/ffi.c, + src/avr32/ffitarget.h, + src/avr32/sysv.S: Add AVR32 port. + * configure, Makefile.in: Rebuilt. + +2009-12-21 Andreas Tobler + + * configure.ac: Make i?86 build on FreeBSD and OpenBSD. + * configure: Regenerate. + +2009-12-15 John David Anglin + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on PA HP-UX. + +2009-12-13 John David Anglin + + * src/pa/ffi.c (ffi_closure_inner_pa32): Handle FFI_TYPE_LONGDOUBLE + type on HP-UX. + +2012-02-13 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_prep_raw_closure_loc): Add thiscall + support for X86_WIN32. + (FFI_INIT_TRAMPOLINE_THISCALL): Fix displacement. + +2009-12-11 Eric Botcazou + + * src/sparc/ffi.c (ffi_closure_sparc_inner_v9): Properly align 'long + double' arguments. + +2009-12-11 Eric Botcazou + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on Solaris < 10. + +2009-12-10 Rainer Orth + + PR libffi/40700 + * src/closures.c [X86_64 && __sun__ && __svr4__] + (FFI_MMAP_EXEC_WRIT): Define. + +2009-12-08 David Daney + + * testsuite/libffi.call/stret_medium.c: Remove xfail for mips*-*-* + * testsuite/libffi.call/cls_align_longdouble_split2.c: Same. + * testsuite/libffi.call/stret_large.c: Same. + * testsuite/libffi.call/cls_align_longdouble_split.c: Same. + * testsuite/libffi.call/stret_large2.c: Same. + * testsuite/libffi.call/stret_medium2.c: Same. + +2009-12-07 David Edelsohn + + * src/powerpc/aix_closure.S (libffi_closure_ASM): Fix tablejump + typo. + +2009-12-05 David Edelsohn + + * src/powerpc/aix.S: Update AIX32 code to be consistent with AIX64 + code. + * src/powerpc/aix_closure.S: Same. + +2009-12-05 Ralf Wildenhues + + * Makefile.in: Regenerate. + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2009-12-04 David Edelsohn + + * src/powerpc/aix_closure.S: Reorganize 64-bit code to match + linux64_closure.S. + +2009-12-04 Uros Bizjak + + PR libffi/41908 + * src/x86/ffi64.c (classify_argument): Update from + gcc/config/i386/i386.c. + (ffi_closure_unix64_inner): Do not use the address of two consecutive + SSE registers directly. + * testsuite/libffi.call/cls_dbls_struct.c (main): Remove xfail + for x86_64 linux targets. + +2009-12-04 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_closure_helper_DARWIN): Increment + pfr for long double split between fpr13 and stack. + +2009-12-03 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Increment next_arg and + fparg_count twice for long double. + +2009-12-03 David Edelsohn + + PR libffi/42243 + * src/powerpc/ffi_darwin.c (ffi_prep_args): Remove extra parentheses. + +2009-12-03 Uros Bizjak + + * testsuite/libffi.call/cls_longdouble_va.c (main): Fix format string. + Remove xfails for x86 linux targets. + +2009-12-02 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Fix typo in INT64 + case. + +2009-12-01 David Edelsohn + + * src/powerpc/aix.S (ffi_call_AIX): Convert to more standard + register usage. Call ffi_prep_args directly. Add long double + return value support. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Double arg increment + applies to FFI_TYPE_DOUBLE. Correct fpr_base increment typo. + Separate FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases. + (ffi_prep_cif_machdep): Only 16 byte stack alignment in 64 bit + mode. + (ffi_closure_helper_DARWIN): Remove nf and ng counters. Move temp + into case. + * src/powerpc/aix_closure.S: Maintain 16 byte stack alignment. + Allocate result area between params and FPRs. + +2009-11-30 David Edelsohn + + PR target/35484 + * src/powerpc/ffitarget.h (POWERPC64): Define for PPC64 Linux and + AIX64. + * src/powerpc/aix.S: Implement AIX64 version. + * src/powerpc/aix_closure.S: Implement AIX64 version. + (ffi_closure_ASM): Use extsb, lha and displament addresses. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Implement AIX64 + support. + (ffi_prep_cif_machdep): Same. + (ffi_call): Same. + (ffi_closure_helper_DARWIN): Same. + +2009-11-02 Andreas Tobler + + PR libffi/41908 + * testsuite/libffi.call/testclosure.c: New test. + +2009-09-28 Kai Tietz + + * src/x86/win64.S (_ffi_call_win64 stack): Remove for gnu + assembly version use of ___chkstk. + +2009-09-23 Matthias Klose + + PR libffi/40242, PR libffi/41443 + * src/arm/sysv.S (__ARM_ARCH__): Define for processors + __ARM_ARCH_6T2__, __ARM_ARCH_6M__, __ARM_ARCH_7__, + __ARM_ARCH_7A__, __ARM_ARCH_7R__, __ARM_ARCH_7M__. + Change the conditionals to __SOFTFP__ || __ARM_EABI__ + for -mfloat-abi=softfp to work. + +2009-09-17 Loren J. Rittle + + PR testsuite/32843 (strikes again) + * src/x86/ffi.c (ffi_prep_cif_machdep): Add X86_FREEBSD to + enable proper extension on char and short. + +2009-09-15 David Daney + + * src/java_raw_api.c (ffi_java_raw_to_rvalue): Remove special + handling for FFI_TYPE_POINTER. + * src/mips/ffitarget.h (FFI_TYPE_STRUCT_D_SOFT, + FFI_TYPE_STRUCT_F_SOFT, FFI_TYPE_STRUCT_DD_SOFT, + FFI_TYPE_STRUCT_FF_SOFT, FFI_TYPE_STRUCT_FD_SOFT, + FFI_TYPE_STRUCT_DF_SOFT, FFI_TYPE_STRUCT_SOFT): New defines. + (FFI_N32_SOFT_FLOAT, FFI_N64_SOFT_FLOAT): New ffi_abi enumerations. + (enum ffi_abi): Set FFI_DEFAULT_ABI for soft-float. + * src/mips/n32.S (ffi_call_N32): Add handling for soft-float + structure and pointer returns. + (ffi_closure_N32): Add handling for pointer returns. + * src/mips/ffi.c (ffi_prep_args, calc_n32_struct_flags, + calc_n32_return_struct_flags): Handle soft-float. + (ffi_prep_cif_machdep): Handle soft-float, fix pointer handling. + (ffi_call_N32): Declare proper argument types. + (ffi_call, copy_struct_N32, ffi_closure_mips_inner_N32): Handle + soft-float. + +2009-08-24 Ralf Wildenhues + + * configure.ac (AC_PREREQ): Bump to 2.64. + +2009-08-22 Ralf Wildenhues + + * Makefile.am (install-html, install-pdf): Remove. + * Makefile.in: Regenerate. + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2011-08-22 Jasper Lievisse Adriaanse + + * configure.ac: Add OpenBSD/hppa and OpenBSD/powerpc support. + * configure: Rebuilt. + +2009-07-30 Ralf Wildenhues + + * configure.ac (_AC_ARG_VAR_PRECIOUS): Use m4_rename_force. + +2009-07-24 Dave Korn + + PR libffi/40807 + * src/x86/ffi.c (ffi_prep_cif_machdep): Also use sign/zero-extending + return types for X86_WIN32. + * src/x86/win32.S (_ffi_call_SYSV): Handle omitted return types. + (_ffi_call_STDCALL, _ffi_closure_SYSV, _ffi_closure_raw_SYSV, + _ffi_closure_STDCALL): Likewise. + + * src/closures.c (is_selinux_enabled): Define to const 0 for Cygwin. + (dlmmap, dlmunmap): Also use these functions on Cygwin. + +2009-07-11 Richard Sandiford + + PR testsuite/40699 + PR testsuite/40707 + PR testsuite/40709 + * testsuite/lib/libffi-dg.exp: Revert 2009-07-02, 2009-07-01 and + 2009-06-30 commits. + +2009-07-01 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Set ld_library_path + to "" before adding paths. (This reinstates an assignment that + was removed by my 2009-06-30 commit, but changes the initial + value from "." to "".) + +2009-07-01 H.J. Lu + + PR testsuite/40601 + * testsuite/lib/libffi-dg.exp (libffi-init): Properly set + gccdir. Adjust ld_library_path for gcc only if gccdir isn't + empty. + +2009-06-30 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Don't add "." + to ld_library_path. Use add_path. Add just find_libgcc_s + to ld_library_path, not every libgcc multilib directory. + +2009-06-16 Wim Lewis + + * src/powerpc/ffi.c: Avoid clobbering cr3 and cr4, which are + supposed to be callee-saved. + * src/powerpc/sysv.S (small_struct_return_value): Fix overrun of + return buffer for odd-size structs. + +2009-06-16 Andreas Tobler + + PR libffi/40444 + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Add + allow_stack_execute for Darwin. + +2009-06-16 Andrew Haley + + * configure.ac (TARGETDIR): Add missing blank lines. + * configure: Regenerate. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-15 Andrew Haley + + * testsuite/libffi.call/err_bad_typedef.c: xfail everywhere. + * testsuite/libffi.call/err_bad_abi.c: Likewise. + +2009-06-12 Andrew Haley + + * Makefile.am: Remove info_TEXINFOS. + +2009-06-12 Andrew Haley + + * ChangeLog.libffi: testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-11 Kaz Kojima + + * testsuite/libffi.call/cls_longdouble_va.c: Add xfail sh*-*-linux-*. + * testsuite/libffi.call/err_bad_abi.c: Add xfail sh*-*-*. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + +2009-06-09 Andrew Haley + + * src/x86/freebsd.S: Add missing file. + +2009-06-08 Andrew Haley + + Import from libffi 3.0.8: + + * doc/libffi.texi: New file. + * doc/libffi.info: Likewise. + * doc/stamp-vti: Likewise. + * man/Makefile.am: New file. + * man/ffi_call.3: New file. + + * Makefile.am (EXTRA_DIST): Add src/x86/darwin64.S, + src/dlmalloc.c. + (nodist_libffi_la_SOURCES): Add X86_FREEBSD. + + * configure.ac: Bump version to 3.0.8. + parisc*-*-linux*: Add. + i386-*-freebsd* | i386-*-openbsd*: Add. + powerpc-*-beos*: Add. + AM_CONDITIONAL X86_FREEBSD: Add. + AC_CONFIG_FILES: Add man/Makefile. + + * include/ffi.h.in (FFI_FN): Change void (*)() to void (*)(void). + +2009-06-08 Andrew Haley + + * README: Import from libffi 3.0.8. + +2009-06-08 Andrew Haley + + * testsuite/libffi.call/err_bad_abi.c: Add xfails. + * testsuite/libffi.call/cls_longdouble_va.c: Add xfails. + * testsuite/libffi.call/cls_dbls_struct.c: Add xfail x86_64-*-linux-*. + * testsuite/libffi.call/err_bad_typedef.c: Add xfails. + + * testsuite/libffi.call/stret_medium2.c: Add __UNUSED__ to args. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2009-06-05 Andrew Haley + + * src/x86/ffitarget.h, src/x86/ffi.c: Merge stdcall changes from + libffi. + +2009-06-04 Andrew Haley + + * src/x86/ffitarget.h, src/x86/win32.S, src/x86/ffi.c: Back out + stdcall changes. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2009-06-04 Andrew Haley + + * include/ffi.h.in: Change void (*)() to void (*)(void). + * src/x86/ffi.c: Likewise. + +2009-06-04 Andrew Haley + + * src/powerpc/ppc_closure.S: Insert licence header. + * src/powerpc/linux64_closure.S: Likewise. + * src/m68k/sysv.S: Likewise. + + * src/sh64/ffi.c: Change void (*)() to void (*)(void). + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/m32r/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/alpha/ffi.c: Likewise. + * src/alpha/osf.S: Likewise. + * src/frv/ffi.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/pa/hpux32.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/ia64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2009-06-04 Andrew Haley + + include/ffi.h.in, + src/arm/ffitarget.h, + src/arm/ffi.c, + src/arm/sysv.S, + src/powerpc/ffitarget.h, + src/closures.c, + src/sh64/ffitarget.h, + src/sh64/ffi.c, + src/sh64/sysv.S, + src/types.c, + src/x86/ffi64.c, + src/x86/ffitarget.h, + src/x86/win32.S, + src/x86/darwin.S, + src/x86/ffi.c, + src/x86/sysv.S, + src/x86/unix64.S, + src/alpha/ffitarget.h, + src/alpha/ffi.c, + src/alpha/osf.S, + src/m68k/ffitarget.h, + src/frv/ffitarget.h, + src/frv/ffi.c, + src/s390/ffitarget.h, + src/s390/sysv.S, + src/cris/ffitarget.h, + src/pa/linux.S, + src/pa/ffitarget.h, + src/pa/ffi.c, + src/raw_api.c, + src/ia64/ffitarget.h, + src/ia64/unix.S, + src/ia64/ffi.c, + src/ia64/ia64_flags.h, + src/java_raw_api.c, + src/debug.c, + src/sparc/v9.S, + src/sparc/ffitarget.h, + src/sparc/ffi.c, + src/sparc/v8.S, + src/mips/ffitarget.h, + src/mips/n32.S, + src/mips/o32.S, + src/mips/ffi.c, + src/prep_cif.c, + src/sh/ffitarget.h, + src/sh/ffi.c, + src/sh/sysv.S: Update license text. + +2009-05-22 Dave Korn + + * src/x86/win32.S (_ffi_closure_STDCALL): New function. + (.eh_frame): Add FDE for it. + +2009-05-22 Dave Korn + + * configure.ac: Also check if assembler supports pc-relative + relocs on X86_WIN32 targets. + * configure: Regenerate. + * src/x86/win32.S (ffi_prep_args): Declare extern, not global. + (_ffi_call_SYSV): Add missing function type symbol .def and + add EH markup labels. + (_ffi_call_STDCALL): Likewise. + (_ffi_closure_SYSV): Likewise. + (_ffi_closure_raw_SYSV): Likewise. + (.eh_frame): Add hand-crafted EH data. + +2009-04-09 Jakub Jelinek + + * testsuite/lib/libffi-dg.exp: Change copyright header to refer to + version 3 of the GNU General Public License and to point readers + at the COPYING3 file and the FSF's license web page. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.special/special.exp: Likewise. + +2009-03-01 Ralf Wildenhues + + * configure: Regenerate. + +2008-12-18 Rainer Orth + + PR libffi/26048 + * configure.ac (HAVE_AS_X86_PCREL): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S [!FFI_NO_RAW_API]: Precalculate + RAW_CLOSURE_CIF_OFFSET, RAW_CLOSURE_FUN_OFFSET, + RAW_CLOSURE_USER_DATA_OFFSET for the Solaris 10/x86 assembler. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + * src/x86/unix64.S (.Lstore_table): Move to .text section. + (.Lload_table): Likewise. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + +2008-12-18 Ralf Wildenhues + + * configure: Regenerate. + +2008-11-21 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_cif_machdep): Add support for + signed/unsigned int8/16 return values. + * src/sparc/v8.S (ffi_call_v8): Likewise. + (ffi_closure_v8): Likewise. + +2008-09-26 Peter O'Gorman + Steve Ellcey + + * configure: Regenerate for new libtool. + * Makefile.in: Ditto. + * include/Makefile.in: Ditto. + * aclocal.m4: Ditto. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-06-17 Ralf Wildenhues + + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2008-06-07 Joseph Myers + + * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, + powerpc-*-beos*): Remove. + * configure: Regenerate. + +2008-05-09 Julian Brown + + * Makefile.am (LTLDFLAGS): New. + (libffi_la_LDFLAGS): Use above. + * Makefile.in: Regenerate. + +2008-04-18 Paolo Bonzini + + PR bootstrap/35457 + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2008-03-26 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-03-26 Daniel Jacobowitz + + * src/arm/sysv.S: Fix ARM comment marker. + +2008-03-26 Jakub Jelinek + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-03-16 Ralf Wildenhues + + * aclocal.m4: Regenerate. + * configure: Likewise. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2008-02-12 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-24 David Edelsohn + + * configure: Regenerate. + +2008-01-06 Andreas Tobler + + * src/x86/ffi.c (ffi_prep_cif_machdep): Fix thinko. + +2008-01-05 Andreas Tobler + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): Add code for + signed/unsigned int8/16 for X86_DARWIN. + Updated copyright info. + Handle one and two byte structs with special cif->flags. + * src/x86/ffitarget.h: Add special types for one and two byte structs. + Updated copyright info. + * src/x86/darwin.S (ffi_call_SYSV): Rewrite to use a jump table like + sysv.S + Remove code to pop args from the stack after call. + Special-case signed/unsigned for int8/16, one and two byte structs. + (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + Updated copyright info. + +2007-12-08 David Daney + + * src/mips/n32.S (ffi_call_N32): Replace dadd with ADDU, dsub with + SUBU, add with ADDU and use smaller code sequences. + +2007-12-07 David Daney + + * src/mips/ffi.c (ffi_prep_cif_machdep): Handle long double return + type. + +2007-12-06 David Daney + + * include/ffi.h.in (FFI_SIZEOF_JAVA_RAW): Define if not already + defined. + (ffi_java_raw): New typedef. + (ffi_java_raw_call, ffi_java_ptrarray_to_raw, + ffi_java_raw_to_ptrarray): Change parameter types from ffi_raw to + ffi_java_raw. + (ffi_java_raw_closure) : Same. + (ffi_prep_java_raw_closure, ffi_prep_java_raw_closure_loc): Change + parameter types. + * src/java_raw_api.c (ffi_java_raw_size): Replace FFI_SIZEOF_ARG with + FFI_SIZEOF_JAVA_RAW. + (ffi_java_raw_to_ptrarray): Change type of raw to ffi_java_raw. + Replace FFI_SIZEOF_ARG with FFI_SIZEOF_JAVA_RAW. Use + sizeof(ffi_java_raw) for alignment calculations. + (ffi_java_ptrarray_to_raw): Same. + (ffi_java_rvalue_to_raw): Add special handling for FFI_TYPE_POINTER + if FFI_SIZEOF_JAVA_RAW == 4. + (ffi_java_raw_to_rvalue): Same. + (ffi_java_raw_call): Change type of raw to ffi_java_raw. + (ffi_java_translate_args): Same. + (ffi_prep_java_raw_closure_loc, ffi_prep_java_raw_closure): Change + parameter types. + * src/mips/ffitarget.h (FFI_SIZEOF_JAVA_RAW): Define for N32 ABI. + +2007-12-06 David Daney + + * src/mips/n32.S (ffi_closure_N32): Use 64-bit add instruction on + pointer values. + +2007-12-01 Andreas Tobler + + PR libffi/31937 + * src/powerpc/ffitarget.h: Introduce new ABI FFI_LINUX_SOFT_FLOAT. + Add local FFI_TYPE_UINT128 to handle soft-float long-double-128. + * src/powerpc/ffi.c: Distinguish between __NO_FPRS__ and not and + set the NUM_FPR_ARG_REGISTERS according to. + Add support for potential soft-float support under hard-float + architecture. + (ffi_prep_args_SYSV): Set NUM_FPR_ARG_REGISTERS to 0 in case of + FFI_LINUX_SOFT_FLOAT, handle float, doubles and long-doubles according + to the FFI_LINUX_SOFT_FLOAT ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Make sure not to store float/double + on archs where __NO_FPRS__ is true. + Add FFI_TYPE_UINT128 support. + * src/powerpc/sysv.S: Add support for soft-float long-double-128. + Adjust copyright notice. + +2007-11-25 Andreas Tobler + + * src/closures.c: Move defintion of MAYBE_UNUSED from here to ... + * include/ffi_common.h: ... here. + Update copyright. + +2007-11-17 Andreas Tobler + + * src/powerpc/sysv.S: Load correct cr to compare if we have long double. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/ffi.c: Add a comment to show which part goes into cr6. + * testsuite/libffi.call/return_ldl.c: New test. + +2007-09-04 + + * src/arm/sysv.S (UNWIND): New. + (Whole file): Conditionally compile unwinder directives. + * src/arm/sysv.S: Add unwinder directives. + + * src/arm/ffi.c (ffi_prep_args): Align structs by at least 4 bytes. + Only treat r0 as a struct address if we're actually returning a + struct by address. + Only copy the bytes that are actually within a struct. + (ffi_prep_cif_machdep): A Composite Type not larger than 4 bytes + is returned in r0, not passed by address. + (ffi_call): Allocate a word-sized temporary for the case where + a composite is returned in r0. + (ffi_prep_incoming_args_SYSV): Align as necessary. + +2007-08-05 Steven Newbury + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Use __clear_cache instead of + directly using the sys_cacheflush syscall. + +2007-07-27 Andrew Haley + + * src/arm/sysv.S (ffi_closure_SYSV): Add soft-float. + +2007-09-03 Maciej W. Rozycki + + * Makefile.am: Unify MIPS_IRIX and MIPS_LINUX into MIPS. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure: Likewise. + +2007-08-24 David Daney + + * testsuite/libffi.call/return_sl.c: New test. + +2007-08-10 David Daney + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Remove xfail for mips64*-*-*. + +2007-08-10 David Daney + + PR libffi/28313 + * configure.ac: Don't treat mips64 as a special case. + * Makefile.am (nodist_libffi_la_SOURCES): Add n32.S. + * configure: Regenerate + * Makefile.in: Ditto. + * fficonfig.h.in: Ditto. + * src/mips/ffitarget.h (REG_L, REG_S, SUBU, ADDU, SRL, LI): Indent. + (LA, EH_FRAME_ALIGN, FDE_ADDR_BYTES): New preprocessor macros. + (FFI_DEFAULT_ABI): Set for n64 case. + (FFI_CLOSURES, FFI_TRAMPOLINE_SIZE): Define for n32 and n64 cases. + * src/mips/n32.S (ffi_call_N32): Add debug macros and labels for FDE. + (ffi_closure_N32): New function. + (.eh_frame): New section + * src/mips/o32.S: Clean up comments. + (ffi_closure_O32): Pass ffi_closure parameter in $12. + * src/mips/ffi.c: Use FFI_MIPS_N32 instead of + _MIPS_SIM == _ABIN32 throughout. + (FFI_MIPS_STOP_HERE): New, use in place of + ffi_stop_here. + (ffi_prep_args): Use unsigned long to hold pointer values. Rewrite + to support n32/n64 ABIs. + (calc_n32_struct_flags): Rewrite. + (calc_n32_return_struct_flags): Remove unused variable. Reverse + position of flag bits. + (ffi_prep_cif_machdep): Rewrite n32 portion. + (ffi_call): Enable for n64. Add special handling for small structure + return values. + (ffi_prep_closure_loc): Add n32 and n64 support. + (ffi_closure_mips_inner_O32): Add cast to silence warning. + (copy_struct_N32, ffi_closure_mips_inner_N32): New functions. + +2007-08-08 David Daney + + * testsuite/libffi.call/ffitest.h (ffi_type_mylong): Remove definition. + * testsuite/libffi.call/cls_align_uint16.c (main): Use correct type + specifiers. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/cls_sint.c (main): Ditto. + * testsuite/libffi.call/nested_struct9.c (main): Ditto. + * testsuite/libffi.call/cls_20byte1.c (main): Ditto. + * testsuite/libffi.call/cls_9byte1.c (main): Ditto. + * testsuite/libffi.call/closure_fn1.c (main): Ditto. + * testsuite/libffi.call/closure_fn3.c (main): Ditto. + * testsuite/libffi.call/return_dbl2.c (main): Ditto. + * testsuite/libffi.call/cls_sshort.c (main): Ditto. + * testsuite/libffi.call/return_fl3.c (main): Ditto. + * testsuite/libffi.call/closure_fn5.c (main): Ditto. + * testsuite/libffi.call/nested_struct.c (main): Ditto. + * testsuite/libffi.call/nested_struct10.c (main): Ditto. + * testsuite/libffi.call/return_ll1.c (main): Ditto. + * testsuite/libffi.call/cls_8byte.c (main): Ditto. + * testsuite/libffi.call/cls_align_uint32.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint16.c (main): Ditto. + * testsuite/libffi.call/cls_20byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct2.c (main): Ditto. + * testsuite/libffi.call/cls_24byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct6.c (main): Ditto. + * testsuite/libffi.call/cls_uint.c (main): Ditto. + * testsuite/libffi.call/cls_12byte.c (main): Ditto. + * testsuite/libffi.call/cls_16byte.c (main): Ditto. + * testsuite/libffi.call/closure_fn0.c (main): Ditto. + * testsuite/libffi.call/cls_9byte2.c (main): Ditto. + * testsuite/libffi.call/closure_fn2.c (main): Ditto. + * testsuite/libffi.call/return_dbl1.c (main): Ditto. + * testsuite/libffi.call/closure_fn4.c (main): Ditto. + * testsuite/libffi.call/closure_fn6.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint32.c (main): Ditto. + +2007-08-07 Andrew Haley + + * src/x86/sysv.S (ffi_closure_raw_SYSV): Fix typo in previous + checkin. + +2007-08-06 Andrew Haley + + PR testsuite/32843 + * src/x86/sysv.S (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + +2007-08-02 David Daney + + * testsuite/libffi.call/return_ul.c (main): Define return type as + ffi_arg. Use proper printf conversion specifier. + +2007-07-30 Andrew Haley + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): in x86 case, add code for + signed/unsigned int8/16. + * src/x86/sysv.S (ffi_call_SYSV): Rewrite to: + Use a jump table. + Remove code to pop args from the stack after call. + Special-case signed/unsigned int8/16. + * testsuite/libffi.call/return_sc.c (main): Revert. + +2007-07-26 Richard Guenther + + PR testsuite/32843 + * testsuite/libffi.call/return_sc.c (main): Verify call + result as signed char, not ffi_arg. + +2007-07-16 Rainer Orth + + * configure.ac (i?86-*-solaris2.1[0-9]): Set TARGET to X86_64. + * configure: Regenerate. + +2007-07-11 David Daney + + * src/mips/ffi.c: Don't include sys/cachectl.h. + (ffi_prep_closure_loc): Use __builtin___clear_cache() instead of + cacheflush(). + +2007-05-18 Aurelien Jarno + + * src/arm/ffi.c (ffi_prep_closure_loc): Renamed and ajusted + from (ffi_prep_closure): ... this. + (FFI_INIT_TRAMPOLINE): Adjust. + +2005-12-31 Phil Blundell + + * src/arm/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner, ffi_prep_closure): New, add closure support. + * src/arm/sysv.S(ffi_closure_SYSV): Likewise. + * src/arm/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-07-03 Andrew Haley + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Enable for ARM. + +2007-07-05 H.J. Lu + + * aclocal.m4: Regenerated. + +2007-06-02 Paolo Bonzini + + * configure: Regenerate. + +2007-05-23 Steve Ellcey + + * Makefile.in: Regenerate. + * configure: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner,ffi_prep_closure): New, add closure support. + * src/m68k/sysv.S(ffi_closure_SYSV,ffi_closure_struct_SYSV): Likewise. + * src/m68k/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-05-10 Roman Zippel + + * configure.ac (HAVE_AS_CFI_PSEUDO_OP): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/m68k/sysv.S (CFI_STARTPROC,CFI_ENDPROC, + CFI_OFFSET,CFI_DEF_CFA): New macros. + (ffi_call_SYSV): Add callframe annotation. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_args,ffi_prep_cif_machdep): Fix + numerous test suite failures. + * src/m68k/sysv.S (ffi_call_SYSV): Likewise. + +2007-04-11 Paolo Bonzini + + * Makefile.am (EXTRA_DIST): Bring up to date. + * Makefile.in: Regenerate. + * src/frv/eabi.S: Remove RCS keyword. + +2007-04-06 Richard Henderson + + * configure.ac: Tidy target case. + (HAVE_LONG_DOUBLE): Allow the target to override. + * configure: Regenerate. + * include/ffi.h.in: Don't define ffi_type_foo if + LIBFFI_HIDE_BASIC_TYPES is defined. + (ffi_type_longdouble): If not HAVE_LONG_DOUBLE, define + to ffi_type_double. + * types.c (LIBFFI_HIDE_BASIC_TYPES): Define. + (FFI_TYPEDEF, ffi_type_void): Mark the data const. + (ffi_type_longdouble): Special case for Alpha. Don't define + if long double == double. + + * src/alpha/ffi.c (FFI_TYPE_LONGDOUBLE): Assert unique value. + (ffi_prep_cif_machdep): Handle it as the 128-bit type. + (ffi_call, ffi_closure_osf_inner): Likewise. + (ffi_closure_osf_inner): Likewise. Mark hidden. + (ffi_call_osf, ffi_closure_osf): Mark hidden. + * src/alpha/ffitarget.h (FFI_LAST_ABI): Tidy definition. + * src/alpha/osf.S (ffi_call_osf, ffi_closure_osf): Mark hidden. + (load_table): Handle 128-bit long double. + + * testsuite/libffi.call/float4.c: Add -mieee for alpha. + +2007-04-06 Tom Tromey + + PR libffi/31491: + * README: Fixed bug in example. + +2007-04-03 Jakub Jelinek + + * src/closures.c: Include sys/statfs.h. + (_GNU_SOURCE): Define on Linux. + (FFI_MMAP_EXEC_SELINUX): Define. + (selinux_enabled): New variable. + (selinux_enabled_check): New function. + (is_selinux_enabled): Define. + (dlmmap): Use it. + +2007-03-24 Uros Bizjak + + * testsuite/libffi.call/return_fl2.c (return_fl): Mark as static. + Use 'volatile float sum' to create sum of floats to avoid false + negative due to excess precision on ix86 targets. + (main): Ditto. + +2007-03-08 Alexandre Oliva + + * src/powerpc/ffi.c (flush_icache): Fix left-over from previous + patch. + (ffi_prep_closure_loc): Remove unneeded casts. Add needed ones. + +2007-03-07 Alexandre Oliva + + * include/ffi.h.in (ffi_closure_alloc, ffi_closure_free): New. + (ffi_prep_closure_loc): New. + (ffi_prep_raw_closure_loc): New. + (ffi_prep_java_raw_closure_loc): New. + * src/closures.c: New file. + * src/dlmalloc.c [FFI_MMAP_EXEC_WRIT] (struct malloc_segment): + Replace sflags with exec_offset. + [FFI_MMAP_EXEC_WRIT] (mmap_exec_offset, add_segment_exec_offset, + sub_segment_exec_offset): New macros. + (get_segment_flags, set_segment_flags, check_segment_merge): New + macros. + (is_mmapped_segment, is_extern_segment): Use get_segment_flags. + (add_segment, sys_alloc, create_mspace, create_mspace_with_base, + destroy_mspace): Use new macros. + (sys_alloc): Silence warning. + * Makefile.am (libffi_la_SOURCES): Add src/closures.c. + * Makefile.in: Rebuilt. + * src/prep_cif [FFI_CLOSURES] (ffi_prep_closure): Implement in + terms of ffi_prep_closure_loc. + * src/raw_api.c (ffi_prep_raw_closure_loc): Renamed and adjusted + from... + (ffi_prep_raw_closure): ... this. Re-implement in terms of the + renamed version. + * src/java_raw_api (ffi_prep_java_raw_closure_loc): Renamed and + adjusted from... + (ffi_prep_java_raw_closure): ... this. Re-implement in terms of + the renamed version. + * src/alpha/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + * src/pa/ffi.c: Likewise. + * src/cris/ffi.c: Likewise. Adjust. + * src/frv/ffi.c: Likewise. + * src/ia64/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/x86/ffi.c: Likewise. + (FFI_INIT_TRAMPOLINE): Adjust. + (ffi_prep_raw_closure_loc): Renamed and adjusted from... + (ffi_prep_raw_closure): ... this. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + (flush_icache): Adjust. + +2007-03-07 Alexandre Oliva + + * src/dlmalloc.c: New file, imported version 2.8.3 of Doug + Lea's malloc. + +2007-03-01 Brooks Moses + + * Makefile.am: Add dummy install-pdf target. + * Makefile.in: Regenerate + +2007-02-13 Andreas Krebbel + + * src/s390/ffi.c (ffi_prep_args, ffi_prep_cif_machdep, + ffi_closure_helper_SYSV): Add long double handling. + +2007-02-02 Jakub Jelinek + + * src/powerpc/linux64.S (ffi_call_LINUX64): Move restore of r2 + immediately after bctrl instruction. + +2007-01-18 Alexandre Oliva + + * Makefile.am (all-recursive, install-recursive, + mostlyclean-recursive, clean-recursive, distclean-recursive, + maintainer-clean-recursive): Add missing targets. + * Makefile.in: Rebuilt. + +2006-12-14 Andreas Tobler + + * configure.ac: Add TARGET for x86_64-*-darwin*. + * Makefile.am (nodist_libffi_la_SOURCES): Add rules for 64-bit sources + for X86_DARWIN. + * src/x86/ffitarget.h: Set trampoline size for x86_64-*-darwin*. + * src/x86/darwin64.S: New file for x86_64-*-darwin* support. + * configure: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + * testsuite/libffi.special/unwindtest_ffi_call.cc: New test case for + ffi_call only. + +2006-12-13 Andreas Tobler + + * aclocal.m4: Regenerate with aclocal -I .. as written in the + Makefile.am. + +2006-10-31 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (darwin_adjust_aggregate_sizes): New. + (ffi_prep_cif_machdep): Call darwin_adjust_aggregate_sizes for + Darwin. + * testsuite/libffi.call/nested_struct4.c: Remove Darwin XFAIL. + * testsuite/libffi.call/nested_struct6.c: Remove Darwin XFAIL. + +2006-10-10 Paolo Bonzini + Sandro Tolaini + + * configure.ac [i*86-*-darwin*]: Set X86_DARWIN symbol and + conditional. + * configure: Regenerated. + * Makefile.am (nodist_libffi_la_SOURCES) [X86_DARWIN]: New case. + (EXTRA_DIST): Add src/x86/darwin.S. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + + * src/x86/ffi.c (ffi_prep_cif_machdep) [X86_DARWIN]: Treat like + X86_WIN32, and additionally align stack to 16 bytes. + * src/x86/darwin.S: New, based on sysv.S. + * src/prep_cif.c (ffi_prep_cif) [X86_DARWIN]: Align > 8-byte structs. + +2006-09-12 David Daney + + PR libffi/23935 + * include/Makefile.am: Install both ffi.h and ffitarget.h in + $(libdir)/gcc/$(target_alias)/$(gcc_version)/include. + * aclocal.m4: Regenerated for automake 1.9.6. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + +2006-08-17 Andreas Tobler + + * include/ffi_common.h (struct): Revert accidental commit. + +2006-08-15 Andreas Tobler + + * include/ffi_common.h: Remove lint directives. + * include/ffi.h.in: Likewise. + +2006-07-25 Torsten Schoenfeld + + * include/ffi.h.in (ffi_type_ulong, ffi_type_slong): Define correctly + for 32-bit architectures. + * testsuite/libffi.call/return_ul.c: New test case. + +2006-07-19 David Daney + + * testsuite/libffi.call/closure_fn6.c: Remove xfail for mips, + xfail remains for mips64. + +2006-05-23 Carlos O'Donell + + * Makefile.am: Add install-html target. Add install-html to .PHONY + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2006-05-18 John David Anglin + + * pa/ffi.c (ffi_prep_args_pa32): Load floating point arguments from + stack slot. + +2006-04-22 Andreas Tobler + + * README: Remove notice about 'Crazy Comments'. + * src/debug.c: Remove lint directives. Cleanup white spaces. + * src/java_raw_api.c: Likewise. + * src/prep_cif.c: Likewise. + * src/raw_api.c: Likewise. + * src/ffitest.c: Delete. No longer needed, all test cases migrated + to the testsuite. + * src/arm/ffi.c: Remove lint directives. + * src/m32r/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + +2006-04-13 Andreas Tobler + + * src/pa/hpux32.S: Correct unwind offset calculation for + ffi_closure_pa32. + * src/pa/linux.S: Likewise. + +2006-04-12 James E Wilson + + PR libgcj/26483 + * src/ia64/ffi.c (stf_spill, ldf_fill): Rewrite as macros. + (hfa_type_load): Call stf_spill. + (hfa_type_store): Call ldf_fill. + (ffi_call): Adjust calls to above routines. Add local temps for + macro result. + +2006-04-10 Matthias Klose + + * testsuite/lib/libffi-dg.exp (libffi-init): Recognize multilib + directory names containing underscores. + +2006-04-07 James E Wilson + + * testsuite/libffi.call/float4.c: New testcase. + +2006-04-05 John David Anglin + Andreas Tobler + + * Makefile.am: Add PA_HPUX port. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add PA_HPUX rules. + * configure: Regenerate. + * src/pa/ffitarget.h: Rename linux target to PA_LINUX. + Add PA_HPUX and PA64_HPUX. + Rename FFI_LINUX ABI to FFI_PA32 ABI. + (FFI_TRAMPOLINE_SIZE): Define for 32-bit HP-UX targets. + (FFI_TYPE_SMALL_STRUCT2): Define. + (FFI_TYPE_SMALL_STRUCT4): Likewise. + (FFI_TYPE_SMALL_STRUCT8): Likewise. + (FFI_TYPE_SMALL_STRUCT3): Redefine. + (FFI_TYPE_SMALL_STRUCT5): Likewise. + (FFI_TYPE_SMALL_STRUCT6): Likewise. + (FFI_TYPE_SMALL_STRUCT7): Likewise. + * src/pa/ffi.c (ROUND_DOWN): Delete. + (fldw, fstw, fldd, fstd): Use '__asm__'. + (ffi_struct_type): Add support for FFI_TYPE_SMALL_STRUCT2, + FFI_TYPE_SMALL_STRUCT4 and FFI_TYPE_SMALL_STRUCT8. + (ffi_prep_args_LINUX): Rename to ffi_prep_args_pa32. Update comment. + Simplify incrementing of stack slot variable. Change type of local + 'n' to unsigned int. + (ffi_size_stack_LINUX): Rename to ffi_size_stack_pa32. Handle long + double on PA_HPUX. + (ffi_prep_cif_machdep): Likewise. + (ffi_call): Likewise. + (ffi_closure_inner_LINUX): Rename to ffi_closure_inner_pa32. Change + return type to ffi_status. Simplify incrementing of stack slot + variable. Only copy floating point argument registers when PA_LINUX + is true. Reformat debug statement. + Add support for FFI_TYPE_SMALL_STRUCT2, FFI_TYPE_SMALL_STRUCT4 and + FFI_TYPE_SMALL_STRUCT8. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Add 'extern' to + declaration. + (ffi_prep_closure): Make linux trampoline conditional on PA_LINUX. + Add nops to cache flush. Add trampoline for PA_HPUX. + * src/pa/hpux32.S: New file. + * src/pa/linux.S (ffi_call_LINUX): Rename to ffi_call_pa32. Rename + ffi_prep_args_LINUX to ffi_prep_args_pa32. + Localize labels. Add support for 2, 4 and 8-byte small structs. Handle + unaligned destinations in 3, 5, 6 and 7-byte small structs. Order + argument type checks so that common argument types appear first. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Rename + ffi_closure_inner_LINUX to ffi_closure_inner_pa32. + +2006-03-24 Alan Modra + + * src/powerpc/ffitarget.h (enum ffi_abi): Add FFI_LINUX. Default + for 32-bit using IBM extended double format. Fix FFI_LAST_ABI. + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Handle linux variant of + FFI_TYPE_LONGDOUBLE. + (ffi_prep_args64): Assert using IBM extended double. + (ffi_prep_cif_machdep): Don't munge FFI_TYPE_LONGDOUBLE type. + Handle FFI_LINUX FFI_TYPE_LONGDOUBLE return and args. + (ffi_call): Handle FFI_LINUX. + (ffi_closure_helper_SYSV): Non FFI_LINUX long double return needs + gpr3 return pointer as for struct return. Handle FFI_LINUX + FFI_TYPE_LONGDOUBLE return and args. Don't increment "nf" + unnecessarily. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Load both f1 and f2 + for FFI_TYPE_LONGDOUBLE. Move epilogue insns into case table. + Don't use r6 as pointer to results, instead use sp offset. Don't + make a special call to load lr with case table address, instead + use offset from previous call. + * src/powerpc/sysv.S (ffi_call_SYSV): Save long double return. + * src/powerpc/linux64.S (ffi_call_LINUX64): Simplify long double + return. + +2006-03-15 Kaz Kojima + + * src/sh64/ffi.c (ffi_prep_cif_machdep): Handle float arguments + passed with FP registers correctly. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S: Likewise. + +2006-03-01 Andreas Tobler + + * testsuite/libffi.special/unwindtest.cc (closure_test_fn): Mark cif, + args and userdata unused. + (closure_test_fn1): Mark cif and userdata unused. + (main): Remove unused res. + +2006-02-28 Andreas Tobler + + * testsuite/libffi.call/call.exp: Adjust FSF address. Add test runs for + -O2, -O3, -Os and the warning flags -W -Wall. + * testsuite/libffi.special/special.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Add an __UNUSED__ macro to mark + unused parameter unused for gcc or else do nothing. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/cls_12byte.c (cls_struct_12byte_gn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_16byte.c (cls_struct_16byte_gn): Likewise. + * testsuite/libffi.call/cls_18byte.c (cls_struct_18byte_gn): Likewise. + * testsuite/libffi.call/cls_19byte.c (cls_struct_19byte_gn): Likewise. + * testsuite/libffi.call/cls_1_1byte.c (cls_struct_1_1byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte1.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_24byte.c (cls_struct_24byte_gn): Likewise. + * testsuite/libffi.call/cls_2byte.c (cls_struct_2byte_gn): Likewise. + * testsuite/libffi.call/cls_3_1byte.c (cls_struct_3_1byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte1.c (cls_struct_3byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte2.c (cls_struct_3byte_gn1): Likewise. + * testsuite/libffi.call/cls_4_1byte.c (cls_struct_4_1byte_gn): Likewise. + * testsuite/libffi.call/cls_4byte.c (cls_struct_4byte_gn): Likewise. + * testsuite/libffi.call/cls_5_1_byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_5byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_64byte.c (cls_struct_64byte_gn): Likewise. + * testsuite/libffi.call/cls_6_1_byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_6byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_7_1_byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_7byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_8byte.c (cls_struct_8byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte1.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte2.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_align_double.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_float.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_longdouble.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_pointer.c (cls_struct_align_fn): Cast + void* to avoid compiler warning. + (main): Likewise. + (cls_struct_align_gn): Mark cif and userdata unused. + * testsuite/libffi.call/cls_align_sint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint64.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_double.c (cls_ret_double_fn): Likewise. + * testsuite/libffi.call/cls_float.c (cls_ret_float_fn): Likewise. + * testsuite/libffi.call/cls_multi_schar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_uchar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_schar.c (cls_ret_schar_fn): Mark cif and + userdata unused. + (cls_ret_schar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sint.c (cls_ret_sint_fn): Mark cif and + userdata unused. + (cls_ret_sint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sshort.c (cls_ret_sshort_fn): Mark cif and + userdata unused. + (cls_ret_sshort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uchar.c (cls_ret_uchar_fn): Mark cif and + userdata unused. + (cls_ret_uchar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Mark cif and + userdata unused. + (cls_ret_uint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_ulonglong.c (cls_ret_ulonglong_fn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_ushort.c (cls_ret_ushort_fn): Mark cif and + userdata unused. + (cls_ret_ushort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/float.c (floating): Remove unused parameter e. + * testsuite/libffi.call/float1.c (main): Remove unused variable i. + Cleanup white spaces. + * testsuite/libffi.call/negint.c (checking): Remove unused variable i. + * testsuite/libffi.call/nested_struct.c (cls_struct_combined_gn): Mark + cif and userdata unused. + * testsuite/libffi.call/nested_struct1.c (cls_struct_combined_gn): + Likewise. + * testsuite/libffi.call/nested_struct10.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct2.c (B_fn): Adjust printf + formatters to silence gcc. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct3.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct4.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct5.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct6.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct7.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct8.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct9.c (B_gn): Likewise. + * testsuite/libffi.call/problem1.c (stub): Likewise. + * testsuite/libffi.call/pyobjc-tc.c (main): Cast the result to silence + gcc. + * testsuite/libffi.call/return_fl2.c (return_fl): Add the note mentioned + in the last commit for this test case in the test case itself. + * testsuite/libffi.call/closure_fn0.c (closure_test_fn0): Mark cif as + unused. + * testsuite/libffi.call/closure_fn1.c (closure_test_fn1): Likewise. + * testsuite/libffi.call/closure_fn2.c (closure_test_fn2): Likewise. + * testsuite/libffi.call/closure_fn3.c (closure_test_fn3): Likewise. + * testsuite/libffi.call/closure_fn4.c (closure_test_fn0): Likewise. + * testsuite/libffi.call/closure_fn5.c (closure_test_fn5): Likewise. + * testsuite/libffi.call/closure_fn6.c (closure_test_fn0): Likewise. + +2006-02-22 Kaz Kojima + + * src/sh/sysv.S: Fix register numbers in the FDE for + ffi_closure_SYSV. + +2006-02-20 Andreas Tobler + + * testsuite/libffi.call/return_fl2.c (return_fl): Remove static + declaration to avoid a false negative on ix86. See PR323. + +2006-02-18 Kaz Kojima + + * src/sh/ffi.c (ffi_closure_helper_SYSV): Remove unused variable + and cast integer to void * if needed. Update the pointer to + the FP register saved area correctly. + +2006-02-17 Andreas Tobler + + * testsuite/libffi.call/nested_struct6.c: XFAIL this test until PR25630 + is fixed. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-02-16 Andreas Tobler + + * testsuite/libffi.call/return_dbl.c: New test case. + * testsuite/libffi.call/return_dbl1.c: Likewise. + * testsuite/libffi.call/return_dbl2.c: Likewise. + * testsuite/libffi.call/return_fl.c: Likewise. + * testsuite/libffi.call/return_fl1.c: Likewise. + * testsuite/libffi.call/return_fl2.c: Likewise. + * testsuite/libffi.call/return_fl3.c: Likewise. + * testsuite/libffi.call/closure_fn6.c: Likewise. + + * testsuite/libffi.call/nested_struct2.c: Remove ffi_type_mylong + definition. + * testsuite/libffi.call/ffitest.h: Add ffi_type_mylong definition + here to be used by other test cases too. + + * testsuite/libffi.call/nested_struct10.c: New test case. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-01-21 Andreas Tobler + + * configure.ac: Enable libffi for sparc64-*-freebsd*. + * configure: Rebuilt. + +2006-01-18 Jakub Jelinek + + * src/powerpc/sysv.S (smst_two_register): Don't call __ashldi3, + instead do the shifting inline. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't compute %r5 + shift count unconditionally. Simplify load sequences for 1, 2, 3, 4 + and 8 byte structs, for the remaining struct sizes don't call + __lshrdi3, instead do the shifting inline. + +2005-12-07 Thiemo Seufer + + * src/mips/ffitarget.h: Remove obsolete sgidefs.h include. Add + missing parentheses. + * src/mips/o32.S (ffi_call_O32): Code formatting. Define + and use A3_OFF, FP_OFF, RA_OFF. Micro-optimizations. + (ffi_closure_O32): Likewise, but with newly defined A3_OFF2, + A2_OFF2, A1_OFF2, A0_OFF2, RA_OFF2, FP_OFF2, S0_OFF2, GP_OFF2, + V1_OFF2, V0_OFF2, FA_1_1_OFF2, FA_1_0_OFF2, FA_0_1_OFF2, + FA_0_0_OFF2. + * src/mips/ffi.c (ffi_prep_args): Code formatting. Fix + endianness bugs. + (ffi_prep_closure): Improve trampoline instruction scheduling. + (ffi_closure_mips_inner_O32): Fix endianness bugs. + +2005-12-03 Alan Modra + + * src/powerpc/ffi.c: Formatting. + (ffi_prep_args_SYSV): Avoid possible aliasing problems by using unions. + (ffi_prep_args64): Likewise. + +2005-09-30 Geoffrey Keating + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): For + darwin, use -shared-libgcc not -lgcc_s, and explain why. + +2005-09-26 Tom Tromey + + * testsuite/libffi.call/float1.c (value_type): New typedef. + (CANARY): New define. + (main): Check for result buffer overflow. + * src/powerpc/linux64.S: Handle linux64 long double returns. + * src/powerpc/ffi.c (FLAG_RETURNS_128BITS): New constant. + (ffi_prep_cif_machdep): Handle linux64 long double returns. + +2005-08-25 Alan Modra + + PR target/23404 + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Correct placement of stack + homed fp args. + (ffi_status ffi_prep_cif_machdep): Correct stack sizing for same. + +2005-08-11 Jakub Jelinek + + * configure.ac (HAVE_HIDDEN_VISIBILITY_ATTRIBUTE): New test. + (AH_BOTTOM): Add FFI_HIDDEN definition. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * src/powerpc/ffi.c (hidden): Remove. + (ffi_closure_LINUX64, ffi_prep_args64, ffi_call_LINUX64, + ffi_closure_helper_LINUX64): Use FFI_HIDDEN instead of hidden. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64, + .ffi_closure_LINUX64): Use FFI_HIDDEN instead of .hidden. + * src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): Remove, + add FFI_HIDDEN to its prototype. + (ffi_closure_SYSV_inner): New. + * src/x86/sysv.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + * src/x86/win32.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + +2005-08-10 Alfred M. Szmidt + + PR libffi/21819: + * configure: Rebuilt. + * configure.ac: Handle i*86-*-gnu*. + +2005-08-09 Jakub Jelinek + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Use + DW_CFA_offset_extended_sf rather than + DW_CFA_GNU_negative_offset_extended. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise. + +2005-07-22 SUGIOKA Toshinobu + + * src/sh/sysv.S (ffi_call_SYSV): Stop argument popping correctly + on sh3. + (ffi_closure_SYSV): Change the stack layout for sh3 struct argument. + * src/sh/ffi.c (ffi_prep_args): Fix sh3 argument copy, when it is + partially on register. + (ffi_closure_helper_SYSV): Likewise. + (ffi_prep_cif_machdep): Don't set too many cif->flags. + +2005-07-20 Kaz Kojima + + * src/sh/ffi.c (ffi_call): Handle small structures correctly. + Remove empty line. + * src/sh64/ffi.c (simple_type): Remove. + (return_type): Handle small structures correctly. + (ffi_prep_args): Likewise. + (ffi_call): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S (ffi_call_SYSV): Handle 1, 2 and 4-byte return. + Emit position independent code if PIC and remove wrong datalabel + prefixes from EH data. + +2005-07-19 Andreas Tobler + + * Makefile.am (nodist_libffi_la_SOURCES): Add POWERPC_FREEBSD. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add POWERPC_FREEBSD rules. + * configure: Regenerate. + * src/powerpc/ffitarget.h: Add POWERPC_FREEBSD rules. + (FFI_SYSV_TYPE_SMALL_STRUCT): Define. + * src/powerpc/ffi.c: Add flags to handle small structure returns + in ffi_call_SYSV. + (ffi_prep_cif_machdep): Handle small structures for SYSV 4 ABI. + Aka FFI_SYSV. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Add return types for small structures. + * src/powerpc/sysv.S: Add bits to handle small structures for + final SYSV 4 ABI. + +2005-07-10 Andreas Tobler + + * testsuite/libffi.call/cls_5_1_byte.c: New test file. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + +2005-07-05 Randolph Chung + + * src/pa/ffi.c (ffi_struct_type): Rename FFI_TYPE_SMALL_STRUCT1 + as FFI_TYPE_SMALL_STRUCT3. Break out handling for 5-7 byte + structures. Kill compilation warnings. + (ffi_closure_inner_LINUX): Print return values as hex in debug + message. Rename FFI_TYPE_SMALL_STRUCT1 as FFI_TYPE_SMALL_STRUCT3. + Properly handle 5-7 byte structure returns. + * src/pa/ffitarget.h (FFI_TYPE_SMALL_STRUCT1) + (FFI_TYPE_SMALL_STRUCT2): Remove. + (FFI_TYPE_SMALL_STRUCT3, FFI_TYPE_SMALL_STRUCT5) + (FFI_TYPE_SMALL_STRUCT6, FFI_TYPE_SMALL_STRUCT7): Define. + * src/pa/linux.S: Mark source file as using PA1.1 assembly. + (checksmst1, checksmst2): Remove. + (checksmst3): Optimize handling of 3-byte struct returns. + (checksmst567): Properly handle 5-7 byte struct returns. + +2005-06-15 Rainer Orth + + PR libgcj/21943 + * src/mips/n32.S: Enforce PIC code. + * src/mips/o32.S: Likewise. + +2005-06-15 Rainer Orth + + * configure.ac: Treat i*86-*-solaris2.10 and up as X86_64. + * configure: Regenerate. + +2005-06-01 Alan Modra + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't use JUMPTARGET + to call ffi_closure_helper_SYSV. Append @local instead. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise for ffi_prep_args_SYSV. + +2005-05-17 Kelley Cook + + * configure.ac: Use AC_C_BIGENDIAN instead of AC_C_BIGENDIAN_CROSS. + Use AC_CHECK_SIZEOF instead of AC_COMPILE_CHECK_SIZEOF. + * Makefile.am (ACLOCAL_AMFLAGS): Remove -I ../config. + * aclocal.m4, configure, fficonfig.h.in, Makefile.in, + include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2005-05-09 Mike Stump + + * configure: Regenerate. + +2005-05-08 Richard Henderson + + PR libffi/21285 + * src/alpha/osf.S: Update unwind into to match code. + +2005-05-04 Andreas Degert + Richard Henderson + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Save sse-used flag in + bit 11 of flags. + (ffi_call): Mask return type field. Pass ssecount to ffi_call_unix64. + (ffi_prep_closure): Set carry bit if sse-used flag set. + * src/x86/unix64.S (ffi_call_unix64): Add ssecount argument. + Only load sse registers if ssecount non-zero. + (ffi_closure_unix64): Only save sse registers if carry set on entry. + +2005-04-29 Ralf Corsepius + + * configure.ac: Add i*86-*-rtems*, sparc*-*-rtems*, + powerpc-*rtems*, arm*-*-rtems*, sh-*-rtems*. + * configure: Regenerate. + +2005-04-20 Hans-Peter Nilsson + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): In regsub use, + have Tcl8.3-compatible intermediate variable. + +2005-04-18 Simon Posnjak + Hans-Peter Nilsson + + * Makefile.am: Add CRIS support. + * configure.ac: Likewise. + * Makefile.in, configure, testsuite/Makefile.in, + include/Makefile.in: Regenerate. + * src/cris: New directory. + * src/cris/ffi.c, src/cris/sysv.S, src/cris/ffitarget.h: New files. + * src/prep_cif.c (ffi_prep_cif): Wrap in #ifndef __CRIS__. + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): Replace \n with + \r?\n in output tests. + +2005-04-12 Mike Stump + + * configure: Regenerate. + +2005-03-30 Hans Boehm + + * src/ia64/ffitarget.h (ffi_arg): Use long long instead of DI. + +2005-03-30 Steve Ellcey + + * src/ia64/ffitarget.h (ffi_arg) ADD DI attribute. + (ffi_sarg) Ditto. + * src/ia64/unix.S (ffi_closure_unix): Extend gp + to 64 bits in ILP32 mode. + Load 64 bits even for short data. + +2005-03-23 Mike Stump + + * src/powerpc/darwin.S: Update for -m64 multilib. + * src/powerpc/darwin_closure.S: Likewise. + +2005-03-21 Zack Weinberg + + * configure.ac: Do not invoke TL_AC_GCC_VERSION. + Do not set tool_include_dir. + * aclocal.m4, configure, Makefile.in, testsuite/Makefile.in: + Regenerate. + * include/Makefile.am: Set gcc_version and toollibffidir. + * include/Makefile.in: Regenerate. + +2005-02-22 Andrew Haley + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Bump alignment to + odd-numbered register pairs for 64-bit integer types. + +2005-02-23 Andreas Tobler + + PR libffi/20104 + * testsuite/libffi.call/return_ll1.c: New test case. + +2005-02-11 Janis Johnson + + * testsuite/libffi.call/cls_align_longdouble.c: Remove dg-options. + * testsuite/libffi.call/float.c: Ditto. + * testsuite/libffi.call/float2.c: Ditto. + * testsuite/libffi.call/float3.c: Ditto. + +2005-02-08 Andreas Tobler + + * src/frv/ffitarget.h: Remove PPC stuff which does not belong to frv. + +2005-01-12 Eric Botcazou + + * testsuite/libffi.special/special.exp (cxx_options): Add + -shared-libgcc. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_AGGREGATE_TYPEDEF): Remove. + (FFI_TYPEDEF): Rename from FFI_INTEGRAL_TYPEDEF. Replace size and + offset parameters with a type parameter; deduce size and structure + alignment. Update all users. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_TYPE_POINTER): Define with sizeof. + (FFI_TYPE_LONGDOUBLE): Fix for ia64. + * src/ia64/ffitarget.h (struct ffi_ia64_trampoline_struct): Move + into ffi_prep_closure. + * src/ia64/ia64_flags.h, src/ia64/ffi.c, src/ia64/unix.S: Rewrite + from scratch. + +2004-12-27 Richard Henderson + + * src/x86/unix64.S: Fix typo in unwind info. + +2004-12-25 Richard Henderson + + * src/x86/ffi64.c (struct register_args): Rename from stackLayout. + (enum x86_64_reg_class): Add X86_64_COMPLEX_X87_CLASS. + (merge_classes): Check for it. + (SSE_CLASS_P): New. + (classify_argument): Pass byte_offset by value; perform all updates + inside struct case. + (examine_argument): Add classes argument; handle + X86_64_COMPLEX_X87_CLASS. + (ffi_prep_args): Merge into ... + (ffi_call): ... here. Share stack frame with ffi_call_unix64. + (ffi_prep_cif_machdep): Setup cif->flags for proper structure return. + (ffi_fill_return_value): Remove. + (ffi_prep_closure): Remove dead assert. + (ffi_closure_unix64_inner): Rename from ffi_closure_UNIX64_inner. + Rewrite to use struct register_args instead of va_list. Create + flags for handling structure returns. + * src/x86/unix64.S: Remove dead strings. + (ffi_call_unix64): Rename from ffi_call_UNIX64. Rewrite to share + stack frame with ffi_call. Handle structure returns properly. + (float2sse, floatfloat2sse, double2sse): Remove. + (sse2float, sse2double, sse2floatfloat): Remove. + (ffi_closure_unix64): Rename from ffi_closure_UNIX64. Rewrite + to handle structure returns properly. + +2004-12-08 David Edelsohn + + * Makefile.am (AM_MAKEFLAGS): Remove duplicate LIBCFLAGS and + PICFLAG. + * Makefile.in: Regenerated. + +2004-12-02 Richard Sandiford + + * configure.ac: Use TL_AC_GCC_VERSION to set gcc_version. + * configure, aclocal.m4, Makefile.in: Regenerate. + * include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2004-11-29 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-25 Kelley Cook + + * configure: Regenerate for libtool reversion. + +2004-11-24 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-23 John David Anglin + + * testsuite/lib/libffi-dg.exp: Use new procs in target-libpath.exp. + +2004-11-23 Richard Sandiford + + * src/mips/o32.S (ffi_call_O32, ffi_closure_O32): Use jalr instead + of jal. Use an absolute encoding for the frame information. + +2004-11-23 Kelley Cook + + * Makefile.am: Remove no-dependencies. Add ACLOCAL_AMFLAGS. + * acinclude.m4: Delete logic for sincludes. + * aclocal.m4, Makefile.in, configure: Regenerate. + * include/Makefile: Likewise. + * testsuite/Makefile: Likewise. + +2004-11-22 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_closure): Align doubles and 64-bit integers + on a 8-byte boundary. + * src/sparc/v8.S (ffi_closure_v8): Reserve frame space for arguments. + +2004-10-27 Richard Earnshaw + + * src/arm/ffi.c (ffi_prep_cif_machdep): Handle functions that return + long long values. Round stack allocation to a multiple of 8 bytes + for ATPCS compatibility. + * src/arm/sysv.S (ffi_call_SYSV): Rework to avoid use of APCS register + names. Handle returning long long types. Add Thumb and interworking + support. Improve soft-float code. + +2004-10-27 Richard Earnshaw + + * testsuite/lib/libffi-db.exp (load_gcc_lib): New function. + (libffi_exit): New function. + (libffi_init): Build the testglue wrapper if needed. + +2004-10-25 Eric Botcazou + + PR other/18138 + * testsuite/lib/libffi-dg.exp: Accept more than one multilib libgcc. + +2004-10-25 Kazuhiro Inaoka + + * src/m32r/libffitarget.h (FFI_CLOSURES): Set to 0. + +2004-10-20 Kaz Kojima + + * src/sh/sysv.S (ffi_call_SYSV): Don't align for double data. + * testsuite/libffi.call/float3.c: New test case. + +2004-10-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure): Set T bit in trampoline for + the function returning a structure pointed with R2. + * src/sh/sysv.S (ffi_closure_SYSV): Use R2 as the pointer to + the structure return value if T bit set. Emit position + independent code and EH data if PIC. + +2004-10-13 Kazuhiro Inaoka + + * Makefile.am: Add m32r support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * confiugre: Regenerate. + * src/types.c: Add m32r port to FFI_INTERNAL_TYPEDEF + (uint64, sint64, double, longdouble) + * src/m32r: New directory. + * src/m32r/ffi.c: New file. + * src/m32r/sysv.S: Likewise. + * src/m32r/ffitarget.h: Likewise. + +2004-10-02 Kaz Kojima + + * testsuite/libffi.call/negint.c: New test case. + +2004-09-14 H.J. Lu + + PR libgcj/17465 + * testsuite/lib/libffi-dg.exp: Don't use global ld_library_path. + Set up LD_LIBRARY_PATH, SHLIB_PATH, LD_LIBRARYN32_PATH, + LD_LIBRARY64_PATH, LD_LIBRARY_PATH_32, LD_LIBRARY_PATH_64 and + DYLD_LIBRARY_PATH. + +2004-09-05 Andreas Tobler + + * testsuite/libffi.call/many_win32.c: Remove whitespaces. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Remove unused var. Cleanup + whitespaces. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + +2004-09-05 Andreas Tobler + + * src/powerpc/darwin.S: Fix comments and identation. + * src/powerpc/darwin_closure.S: Likewise. + +2004-09-02 Andreas Tobler + + * src/powerpc/ffi_darwin.c: Add flag for longdouble return values. + (ffi_prep_args): Handle longdouble arguments. + (ffi_prep_cif_machdep): Set flags for longdouble. Calculate space for + longdouble. + (ffi_closure_helper_DARWIN): Add closure handling for longdouble. + * src/powerpc/darwin.S (_ffi_call_DARWIN): Add handling of longdouble + values. + * src/powerpc/darwin_closure.S (_ffi_closure_ASM): Likewise. + * src/types.c: Defined longdouble size and alignment for darwin. + +2004-09-02 Andreas Tobler + + * src/powerpc/aix.S: Remove whitespaces. + * src/powerpc/aix_closure.S: Likewise. + * src/powerpc/asm.h: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffitarget.h: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + +2004-08-30 Anthony Green + + * Makefile.am: Add frv support. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + * configure.ac: Read configure.host. + * configure.in: Read configure.host. + * configure.host: New file. frv-elf needs libgloss. + * include/ffi.h.in: Force ffi_closure to have a nice big (8) + alignment. This is needed to frv and shouldn't harm the others. + * include/ffi_common.h (ALIGN_DOWN): New macro. + * src/frv/ffi.c, src/frv/ffitarget.h, src/frv/eabi.S: New files. + +2004-08-24 David Daney + + * testsuite/libffi.call/closure_fn0.c: Xfail mips64* instead of mips*. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_sshort.c: Likewise. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise and set return value + to zero. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + +2004-08-23 David Daney + + PR libgcj/13141 + * src/mips/ffitarget.h (FFI_O32_SOFT_FLOAT): New ABI. + * src/mips/ffi.c (ffi_prep_args): Fix alignment calculation. + (ffi_prep_cif_machdep): Handle FFI_O32_SOFT_FLOAT floating point + parameters and return types. + (ffi_call): Handle FFI_O32_SOFT_FLOAT ABI. + (ffi_prep_closure): Ditto. + (ffi_closure_mips_inner_O32): Handle FFI_O32_SOFT_FLOAT ABI, fix + alignment calculations. + * src/mips/o32.S (ffi_closure_O32): Don't use floating point + instructions if FFI_O32_SOFT_FLOAT, make stack frame ABI compliant. + +2004-08-14 Casey Marshall + + * src/mips/ffi.c (ffi_pref_cif_machdep): set `cif->flags' to + contain `FFI_TYPE_UINT64' as return type for any 64-bit + integer (O32 ABI only). + (ffi_prep_closure): new function. + (ffi_closure_mips_inner_O32): new function. + * src/mips/ffitarget.h: Define `FFI_CLOSURES' and + `FFI_TRAMPOLINE_SIZE' appropriately if the ABI is o32. + * src/mips/o32.S (ffi_call_O32): add labels for .eh_frame. Return + 64 bit integers correctly. + (ffi_closure_O32): new function. + Added DWARF-2 unwind info for both functions. + +2004-08-10 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args ): 8-align all stack arguments. + +2004-08-01 Robert Millan + + * configure.ac: Detect knetbsd-gnu and kfreebsd-gnu. + * configure: Regenerate. + +2004-07-30 Maciej W. Rozycki + + * acinclude.m4 (AC_FUNC_MMAP_BLACKLIST): Check for + and mmap() explicitly instead of relying on preset autoconf cache + variables. + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2004-07-11 Ulrich Weigand + + * src/s390/ffi.c (ffi_prep_args): Fix C aliasing violation. + (ffi_check_float_struct): Remove unused prototype. + +2004-06-30 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (flush_icache): ';' is a comment + character on Darwin, use '\n\t' instead. + +2004-06-26 Matthias Klose + + * libtool-version: Fix typo in revision/age. + +2004-06-17 Matthias Klose + + * libtool-version: New. + * Makefile.am (libffi_la_LDFLAGS): Use -version-info for soname. + * Makefile.in: Regenerate. + +2004-06-15 Paolo Bonzini + + * Makefile.am: Remove useless multilib rules. + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate with automake 1.8.5. + * configure.ac: Remove useless multilib configury. + * configure: Regenerate. + +2004-06-15 Paolo Bonzini + + * .cvsignore: New file. + +2004-06-10 Jakub Jelinek + + * src/ia64/unix.S (ffi_call_unix): Insert group barrier break + fp_done. + (ffi_closure_UNIX): Fix f14/f15 adjustment if FLOAT_SZ is ever + changed from 8. + +2004-06-06 Sean McNeil + + * configure.ac: Add x86_64-*-freebsd* support. + * configure: Regenerate. + +2004-04-26 Joe Buck + + Bug 15093 + * configure.ac: Test for existence of mmap and sys/mman.h before + checking blacklist. Fix suggested by Jim Wilson. + * configure: Regenerate. + +2004-04-26 Matt Austern + + * src/powerpc/darwin.S: Go through a non-lazy pointer for initial + FDE location. + * src/powerpc/darwin_closure.S: Likewise. + +2004-04-24 Andreas Tobler + + * testsuite/libffi.call/cls_multi_schar.c (main): Fix initialization + error. Reported by Thomas Heller . + * testsuite/libffi.call/cls_multi_sshort.c (main): Likewise. + * testsuite/libffi.call/cls_multi_ushort.c (main): Likewise. + +2004-03-20 Matthias Klose + + * src/pa/linux.S: Fix typo. + +2004-03-19 Matthias Klose + + * Makefile.am: Update. + * Makefile.in: Regenerate. + * src/pa/ffi.h.in: Remove. + * src/pa/ffitarget.h: New file. + +2004-02-10 Randolph Chung + + * Makefile.am: Add PA support. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * configure.ac: Add PA target. + * configure: Regenerate. + * src/pa/ffi.c: New file. + * src/pa/ffi.h.in: Add PA support. + * src/pa/linux.S: New file. + * prep_cif.c: Add PA support. + +2004-03-16 Hosaka Yuji + + * src/types.c: Fix alignment size of X86_WIN32 case int64 and + double. + * src/x86/ffi.c (ffi_prep_args): Replace ecif->cif->rtype->type + with ecif->cif->flags. + (ffi_call, ffi_prep_incoming_args_SYSV): Replace cif->rtype->type + with cif->flags. + (ffi_prep_cif_machdep): Add X86_WIN32 struct case. + (ffi_closure_SYSV): Add 1 or 2-bytes struct case for X86_WIN32. + * src/x86/win32.S (retstruct1b, retstruct2b, sc_retstruct1b, + sc_retstruct2b): Add for 1 or 2-bytes struct case. + +2004-03-15 Kelley Cook + + * configure.in: Rename file to ... + * configure.ac: ... this. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2004-03-12 Matt Austern + + * src/powerpc/darwin.S: Fix EH information so it corresponds to + changes in EH format resulting from addition of linkonce support. + * src/powerpc/darwin_closure.S: Likewise. + +2004-03-11 Andreas Tobler + Paolo Bonzini + + * Makefile.am (AUTOMAKE_OPTIONS): Set them. + Remove VPATH. Remove rules for object files. Remove multilib support. + (AM_CCASFLAGS): Add. + * configure.in (AC_CONFIG_HEADERS): Relace AM_CONFIG_HEADER. + (AC_PREREQ): Bump version to 2.59. + (AC_INIT): Fill with version info and bug address. + (ORIGINAL_LD_FOR_MULTILIBS): Remove. + (AM_ENABLE_MULTILIB): Use this instead of AC_ARG_ENABLE. + De-precious CC so that the right flags are passed down to multilibs. + (AC_MSG_ERROR): Replace obsolete macro AC_ERROR. + (AC_CONFIG_FILES): Replace obsolete macro AC_LINK_FILES. + (AC_OUTPUT): Reorganize the output with AC_CONFIG_COMMANDS. + * configure: Rebuilt. + * aclocal.m4: Likewise. + * Makefile.in, include/Makefile.in, testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2004-03-11 Andreas Schwab + + * src/ia64/ffi.c (ffi_prep_incoming_args_UNIX): Get floating point + arguments from fp registers only for the first 8 parameter slots. + Don't convert a float parameter when passed in memory. + +2004-03-09 Hans-Peter Nilsson + + * configure: Regenerate for config/accross.m4 correction. + +2004-02-25 Matt Kraai + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Change + ecif->cif->bytes to bytes. + (ffi_prep_cif_machdep): Add braces around nested if statement. + +2004-02-09 Alan Modra + + * src/types.c (pointer): POWERPC64 has 8 byte pointers. + + * src/powerpc/ffi.c (ffi_prep_args64): Correct long double handling. + (ffi_closure_helper_LINUX64): Fix typo. + * testsuite/libffi.call/cls_align_longdouble.c: Pass -mlong-double-128 + for powerpc64-*-*. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + +2004-02-08 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep ): Correct + long double function return and long double arg handling. + (ffi_closure_helper_LINUX64): Formatting. Delete unused "ng" var. + Use "end_pfr" instead of "nf". Correct long double handling. + Localise "temp". + * src/powerpc/linux64.S (ffi_call_LINUX64): Save f2 long double + return value. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Allocate + space for long double return value. Adjust stack frame and offsets. + Load f2 long double return. + +2004-02-07 Alan Modra + + * src/types.c: Use 16 byte long double for POWERPC64. + +2004-01-25 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_args_v9): Shift the parameter array + when the structure return address is passed in %o0. + (ffi_V9_return_struct): Rename into ffi_v9_layout_struct. + (ffi_v9_layout_struct): Align the field following a nested structure + on a word boundary. Use memmove instead of memcpy. + (ffi_call): Update call to ffi_V9_return_struct. + (ffi_prep_closure): Define 'ctx' only for V8. + (ffi_closure_sparc_inner): Clone into ffi_closure_sparc_inner_v8 + and ffi_closure_sparc_inner_v9. + (ffi_closure_sparc_inner_v8): Return long doubles by reference. + Always skip the structure return address. For structures and long + doubles, copy the argument directly. + (ffi_closure_sparc_inner_v9): Skip the structure return address only + if required. Shift the maximum floating-point slot accordingly. For + big structures, copy the argument directly; otherwise, left-justify the + argument and call ffi_v9_layout_struct to lay out the structure on + the stack. + * src/sparc/v8.S: Undef STACKFRAME before defining it. + (ffi_closure_v8): Pass the structure return address. Update call to + ffi_closure_sparc_inner_v8. Short-circuit FFI_TYPE_INT handling. + Skip the 'unimp' insn when returning long doubles and structures. + * src/sparc/v9.S: Undef STACKFRAME before defining it. + (ffi_closure_v9): Increase the frame size by 2 words. Short-circuit + FFI_TYPE_INT handling. Load structures both in integers and + floating-point registers on return. + * README: Update status of the SPARC port. + +2004-01-24 Andreas Tobler + + * testsuite/libffi.call/pyobjc-tc.c (main): Treat result value + as of type ffi_arg. + * testsuite/libffi.call/struct3.c (main): Fix CHECK. + +2004-01-22 Ulrich Weigand + + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Treat result + value as of type ffi_arg, not unsigned int. + +2004-01-21 Michael Ritzert + + * ffi64.c (ffi_prep_args): Cast the RHS of an assignment instead + of the LHS. + +2004-01-12 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_32 for + Solaris. + +2004-01-08 Rainer Orth + + * testsuite/libffi.call/ffitest.h (allocate_mmap): Cast MAP_FAILED + to void *. + +2003-12-10 Richard Henderson + + * testsuite/libffi.call/cls_align_pointer.c: Cast pointers to + size_t instead of int. + +2003-12-04 Hosaka Yuji + + * testsuite/libffi.call/many_win32.c: Include . + * testsuite/libffi.call/many_win32.c (main): Replace variable + int i with unsigned long ul. + + * testsuite/libffi.call/cls_align_uint64.c: New test case. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + +2003-12-02 Hosaka Yuji + + PR other/13221 + * src/x86/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): + Align arguments to 32 bits. + +2003-12-01 Andreas Tobler + + PR other/13221 + * testsuite/libffi.call/cls_multi_sshort.c: New test case. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Cosmetics. + +2003-11-26 Kaveh R. Ghazi + + * testsuite/libffi.call/ffitest.h: Include . + * testsuite/libffi.special/ffitestcxx.h: Likewise. + +2003-11-22 Andreas Tobler + + * Makefile.in: Rebuilt. + * configure: Likewise. + * testsuite/libffi.special/unwindtest.cc: Convert the mmap to + the right type. + +2003-11-21 Andreas Jaeger + Andreas Tobler + + * acinclude.m4: Add AC_FUNC_MMAP_BLACKLIST. + * configure.in: Call AC_FUNC_MMAP_BLACKLIST. + * Makefile.in: Rebuilt. + * aclocal.m4: Likewise. + * configure: Likewise. + * fficonfig.h.in: Likewise. + * testsuite/lib/libffi-dg.exp: Add include dir. + * testsuite/libffi.call/ffitest.h: Add MMAP definitions. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Use MMAP functionality + for ffi_closure if available. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + +2003-11-20 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Make the -lgcc_s conditional. + +2003-11-19 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Add DYLD_LIBRARY_PATH for darwin. + Add -lgcc_s to additional flags. + +2003-11-12 Andreas Tobler + + * configure.in, include/Makefile.am: PR libgcj/11147, install + the ffitarget.h header file in a gcc versioned and target + dependent place. + * configure: Regenerated. + * Makefile.in, include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2003-11-09 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Print result and check + with dg-output to make debugging easier. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Make ffi_closure + static. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_9byte2.c: New test case. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_double.c: Do a check on the result. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/return_sc.c: Cleanup whitespaces. + +2003-11-06 Andreas Tobler + + * src/prep_cif.c (ffi_prep_cif): Move the validity check after + the initialization. + +2003-10-23 Andreas Tobler + + * src/java_raw_api.c (ffi_java_ptrarray_to_raw): Replace + FFI_ASSERT(FALSE) with FFI_ASSERT(0). + +2003-10-22 David Daney + + * src/mips/ffitarget.h: Replace undefined UINT32 and friends with + __attribute__((__mode__(__SI__))) and friends. + +2003-10-22 Andreas Schwab + + * src/ia64/ffi.c: Replace FALSE/TRUE with false/true. + +2003-10-21 Andreas Tobler + + * configure.in: AC_LINK_FILES(ffitarget.h). + * configure: Regenerate. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2003-10-21 Paolo Bonzini + Richard Henderson + + Avoid that ffi.h includes fficonfig.h. + + * Makefile.am (EXTRA_DIST): Include ffitarget.h files + (TARGET_SRC_MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (TARGET_SRC_MIPS_SGI): Removed. + (MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (MIPS_SGI): Removed. + (CLEANFILES): Removed. + (mostlyclean-am, clean-am, mostlyclean-sub, clean-sub): New + targets. + * acconfig.h: Removed. + * configure.in: Compute sizeofs only for double and long double. + Use them to define and subst HAVE_LONG_DOUBLE. Include comments + into AC_DEFINE instead of using acconfig.h. Create + include/ffitarget.h instead of include/fficonfig.h. Rename + MIPS_GCC to MIPS_IRIX, drop MIPS_SGI since we are in gcc's tree. + AC_DEFINE EH_FRAME_FLAGS. + * include/Makefile.am (DISTCLEANFILES): New automake macro. + (hack_DATA): Add ffitarget.h. + * include/ffi.h.in: Remove all system specific definitions. + Declare raw API even if it is not installed, why bother? + Use limits.h instead of SIZEOF_* to define ffi_type_*. Do + not define EH_FRAME_FLAGS, it is in fficonfig.h now. Include + ffitarget.h instead of fficonfig.h. Remove ALIGN macro. + (UINT_ARG, INT_ARG): Removed, use ffi_arg and ffi_sarg instead. + * include/ffi_common.h (bool): Do not define. + (ffi_assert): Accept failed assertion. + (ffi_type_test): Return void and accept file/line. + (FFI_ASSERT): Pass stringized failed assertion. + (FFI_ASSERT_AT): New macro. + (FFI_ASSERT_VALID_TYPE): New macro. + (UINT8, SINT8, UINT16, SINT16, UINT32, SINT32, + UINT64, SINT64): Define here with gcc's __attribute__ macro + instead of in ffi.h + (FLOAT32, ALIGN): Define here instead of in ffi.h + * include/ffi-mips.h: Removed. Its content moved to + src/mips/ffitarget.h after separating assembly and C sections. + * src/alpha/ffi.c, src/alpha/ffi.c, src/java_raw_api.c + src/prep_cif.c, src/raw_api.c, src/ia64/ffi.c, + src/mips/ffi.c, src/mips/n32.S, src/mips/o32.S, + src/mips/ffitarget.h, src/sparc/ffi.c, src/x86/ffi64.c: + SIZEOF_ARG -> FFI_SIZEOF_ARG. + * src/ia64/ffi.c: Include stdbool.h (provided by GCC 2.95+). + * src/debug.c (ffi_assert): Accept stringized failed assertion. + (ffi_type_test): Rewritten. + * src/prep-cif.c (initialize_aggregate, ffi_prep_cif): Call + FFI_ASSERT_VALID_TYPE. + * src/alpha/ffitarget.h, src/arm/ffitarget.h, + src/ia64/ffitarget.h, src/m68k/ffitarget.h, + src/mips/ffitarget.h, src/powerpc/ffitarget.h, + src/s390/ffitarget.h, src/sh/ffitarget.h, + src/sh64/ffitarget.h, src/sparc/ffitarget.h, + src/x86/ffitarget.h: New files. + * src/alpha/osf.S, src/arm/sysv.S, src/ia64/unix.S, + src/m68k/sysv.S, src/mips/n32.S, src/mips/o32.S, + src/powerpc/aix.S, src/powerpc/darwin.S, + src/powerpc/ffi_darwin.c, src/powerpc/linux64.S, + src/powerpc/linux64_closure.S, src/powerpc/ppc_closure.S, + src/powerpc/sysv.S, src/s390/sysv.S, src/sh/sysv.S, + src/sh64/sysv.S, src/sparc/v8.S, src/sparc/v9.S, + src/x86/sysv.S, src/x86/unix64.S, src/x86/win32.S: + include fficonfig.h + +2003-10-20 Rainer Orth + + * src/mips/ffi.c: Use _ABIN32, _ABIO32 instead of external + _MIPS_SIM_NABI32, _MIPS_SIM_ABI32. + +2003-10-19 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Declare bytes again. + Used when FFI_DEBUG = 1. + +2003-10-14 Alan Modra + + * src/types.c (double, longdouble): Default POWERPC64 to 8 byte size + and align. + +2003-10-06 Rainer Orth + + * include/ffi_mips.h: Define FFI_MIPS_N32 for N32/N64 ABIs, + FFI_MIPS_O32 for O32 ABI. + +2003-10-01 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_64 for + SPARC64. Cleanup whitespaces. + +2003-09-19 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Xfail mips, arm, + strongarm, xscale. Cleanup whitespaces. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Cleanup whitespaces. + +2003-09-18 David Edelsohn + + * src/powerpc/aix.S: Cleanup whitespaces. + * src/powerpc/aix_closure.S: Likewise. + +2003-09-18 Andreas Tobler + + * src/powerpc/darwin.S: Cleanup whitespaces, comment formatting. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + +2003-09-18 Andreas Tobler + David Edelsohn + + * src/types.c (double): Add AIX and Darwin to the right TYPEDEF. + * src/powerpc/aix_closure.S: Remove the pointer to the outgoing + parameter stack. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Handle structures + according to the Darwin/AIX ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_DARWIN): Likewise. + Remove the outgoing parameter stack logic. Simplify the evaluation + of the different CASE types. + (ffi_prep_clousure): Avoid the casts on lvalues. Change the branch + statement in the trampoline code. + +2003-09-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_args): Take account into the alignement + for the register size. + (ffi_closure_helper_SYSV): Handle the structure return value + address correctly. + (ffi_closure_helper_SYSV): Return the appropriate type when + the registers are used for the structure return value. + * src/sh/sysv.S (ffi_closure_SYSV): Fix the stack layout for + the 64-bit return value. Update copyright years. + +2003-09-17 Rainer Orth + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Search in + srcdir for ffi_mips.h. + +2003-09-12 Alan Modra + + * src/prep_cif.c (initialize_aggregate): Include tail padding in + structure size. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Correct + placement of float result. + * testsuite/libffi.special/unwindtest.cc (closure_test_fn1): Correct + cast of "resp" for big-endian 64 bit machines. + +2003-09-11 Alan Modra + + * src/types.c (double, longdouble): Merge identical SH and ARM + typedefs, and add POWERPC64. + * src/powerpc/ffi.c (ffi_prep_args64): Correct next_arg calc for + struct split over gpr and rest. + (ffi_prep_cif_machdep): Correct intarg_count for structures. + * src/powerpc/linux64.S (ffi_call_LINUX64): Fix gpr offsets. + +2003-09-09 Andreas Tobler + + * src/powerpc/ffi.c (ffi_closure_helper_SYSV) Handle struct + passing correctly. + +2003-09-09 Alan Modra + + * configure: Regenerate. + +2003-09-04 Andreas Tobler + + * Makefile.am: Remove build rules for ffitest. + * Makefile.in: Rebuilt. + +2003-09-04 Andreas Tobler + + * src/java_raw_api.c: Include to fix compiler warning + about implicit declaration of abort(). + +2003-09-04 Andreas Tobler + + * Makefile.am: Add dejagnu test framework. Fixes PR other/11411. + * Makefile.in: Rebuilt. + * configure.in: Add dejagnu test framework. + * configure: Rebuilt. + + * testsuite/Makefile.am: New file. + * testsuite/Makefile.in: Built + * testsuite/lib/libffi-dg.exp: New file. + * testsuite/config/default.exp: Likewise. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Likewise. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float1.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/many.c: Likewise. + * testsuite/libffi.call/many_win32.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Likewise. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + * testsuite/libffi.call/strlen.c: Likewise. + * testsuite/libffi.call/strlen_win32.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.special/special.exp: New file. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + + +2003-08-13 Kaz Kojima + + * src/sh/ffi.c (OFS_INT16): Set 0 for little endian case. Update + copyright years. + +2003-08-02 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Modify for changed gcc + structure passing. + (ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64.S: Remove code writing to parm save area. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Use return + address in lr from ffi_closure_helper_LINUX64 call to calculate + table address. Optimize function tail. + +2003-07-28 Andreas Tobler + + * src/sparc/ffi.c: Handle all floating point registers. + * src/sparc/v9.S: Likewise. Fixes second part of PR target/11410. + +2003-07-11 Gerald Pfeifer + + * README: Note that libffi is not part of GCC. Update the project + URL and status. + +2003-06-19 Franz Sirl + + * src/powerpc/ppc_closure.S: Include ffi.h. + +2003-06-13 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .uleb128/.sleb128 directives. + Use C style comments. + +2003-06-13 Kaz Kojima + + * Makefile.am: Add SHmedia support. Fix a typo of SH support. + * Makefile.in: Regenerate. + * configure.in (sh64-*-linux*, sh5*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SHmedia support. + * src/sh64/ffi.c: New file. + * src/sh64/sysv.S: New file. + +2003-05-16 Jakub Jelinek + + * configure.in (HAVE_RO_EH_FRAME): Check whether .eh_frame section + should be read-only. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * include/ffi.h.in (EH_FRAME_FLAGS): Define. + * src/alpha/osf.S: Use EH_FRAME_FLAGS. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. Include ffi.h. + * src/powerpc/sysv.S: Use EH_FRAME_FLAGS. Use pcrel encoding + if -fpic/-fPIC/-mrelocatable. + * src/powerpc/powerpc_closure.S: Likewise. + * src/sparc/v8.S: If HAVE_RO_EH_FRAME is defined, don't include + #write in .eh_frame flags. + * src/sparc/v9.S: Likewise. + * src/x86/unix64.S: Use EH_FRAME_FLAGS. + * src/x86/sysv.S: Likewise. Use pcrel encoding if -fpic/-fPIC. + * src/s390/sysv.S: Use EH_FRAME_FLAGS. Include ffi.h. + +2003-05-07 Jeff Sturm + + Fixes PR bootstrap/10656 + * configure.in (HAVE_AS_REGISTER_PSEUDO_OP): Test assembler + support for .register pseudo-op. + * src/sparc/v8.S: Use it. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2003-04-18 Jakub Jelinek + + * include/ffi.h.in (POWERPC64): Define if 64-bit. + (enum ffi_abi): Add FFI_LINUX64 on POWERPC. + Make it the default on POWERPC64. + (FFI_TRAMPOLINE_SIZE): Define to 24 on POWERPC64. + * configure.in: Change powerpc-*-linux* into powerpc*-*-linux*. + * configure: Rebuilt. + * src/powerpc/ffi.c (hidden): Define. + (ffi_prep_args_SYSV): Renamed from + ffi_prep_args. Cast pointers to unsigned long to shut up warnings. + (NUM_GPR_ARG_REGISTERS64, NUM_FPR_ARG_REGISTERS64, + ASM_NEEDS_REGISTERS64): New. + (ffi_prep_args64): New function. + (ffi_prep_cif_machdep): Handle FFI_LINUX64 ABI. + (ffi_call): Likewise. + (ffi_prep_closure): Likewise. + (flush_icache): Surround by #ifndef POWERPC64. + (ffi_dblfl): New union type. + (ffi_closure_helper_SYSV): Use it to avoid aliasing problems. + (ffi_closure_helper_LINUX64): New function. + * src/powerpc/ppc_closure.S: Surround whole file by #ifndef + __powerpc64__. + * src/powerpc/sysv.S: Likewise. + (ffi_call_SYSV): Rename ffi_prep_args to ffi_prep_args_SYSV. + * src/powerpc/linux64.S: New file. + * src/powerpc/linux64_closure.S: New file. + * Makefile.am (EXTRA_DIST): Add src/powerpc/linux64.S and + src/powerpc/linux64_closure.S. + (TARGET_SRC_POWERPC): Likewise. + + * src/ffitest.c (closure_test_fn, closure_test_fn1, closure_test_fn2, + closure_test_fn3): Fix result printing on big-endian 64-bit + machines. + (main): Print tst2_arg instead of uninitialized tst2_result. + + * src/ffitest.c (main): Hide what closure pointer really points to + from the compiler. + +2003-04-16 Richard Earnshaw + + * configure.in (arm-*-netbsdelf*): Add configuration. + (configure): Regenerated. + +2003-04-04 Loren J. Rittle + + * include/Makefile.in: Regenerate. + +2003-03-21 Zdenek Dvorak + + * libffi/include/ffi.h.in: Define X86 instead of X86_64 in 32 + bit mode. + * libffi/src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): + Receive closure pointer through parameter, read args using + __builtin_dwarf_cfa. + (FFI_INIT_TRAMPOLINE): Send closure reference through eax. + +2003-03-12 Andreas Schwab + + * configure.in: Avoid trailing /. in toolexeclibdir. + * configure: Rebuilt. + +2003-03-03 Andreas Tobler + + * src/powerpc/darwin_closure.S: Recode to fit dynamic libraries. + +2003-02-06 Andreas Tobler + + * libffi/src/powerpc/darwin_closure.S: + Fix alignement bug, allocate 8 bytes for the result. + * libffi/src/powerpc/aix_closure.S: + Likewise. + * libffi/src/powerpc/ffi_darwin.c: + Update stackframe description for aix/darwin_closure.S. + +2003-02-06 Jakub Jelinek + + * src/s390/ffi.c (ffi_closure_helper_SYSV): Add hidden visibility + attribute. + +2003-01-31 Christian Cornelssen , + Andreas Schwab + + * configure.in: Adjust command to source config-ml.in to account + for changes to the libffi_basedir definition. + (libffi_basedir): Remove ${srcdir} from value and include trailing + slash if nonempty. + + * configure: Regenerate. + +2003-01-29 Franz Sirl + + * src/powerpc/ppc_closure.S: Recode to fit shared libs. + +2003-01-28 Andrew Haley + + * include/ffi.h.in: Enable FFI_CLOSURES for x86_64. + * src/x86/ffi64.c (ffi_prep_closure): New. + (ffi_closure_UNIX64_inner): New. + * src/x86/unix64.S (ffi_closure_UNIX64): New. + +2003-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +2003-01027 David Edelsohn + + * Makefile.am (TARGET_SRC_POWERPC_AIX): Fix typo. + * Makefile.in: Regenerate. + +2003-01-22 Andrew Haley + + * src/powerpc/darwin.S (_ffi_call_AIX): Add Augmentation size to + unwind info. + +2003-01-21 Andreas Tobler + + * src/powerpc/darwin.S: Add unwind info. + * src/powerpc/darwin_closure.S: Likewise. + +2003-01-14 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args): Check for void retval. + (ffi_prep_cif_machdep): Likewise. + * src/x86/unix64.S: Add unwind info. + +2003-01-14 Andreas Jaeger + + * src/ffitest.c (main): Only use ffi_closures if those are + supported. + +2003-01-13 Andreas Tobler + + * libffi/src/ffitest.c + add closure testcases + +2003-01-13 Kevin B. Hendricks + + * libffi/src/powerpc/ffi.c + fix alignment bug for float (4 byte aligned iso 8 byte) + +2003-01-09 Geoffrey Keating + + * src/powerpc/ffi_darwin.c: Remove RCS version string. + * src/powerpc/darwin.S: Remove RCS version string. + +2003-01-03 Jeff Sturm + + * include/ffi.h.in: Add closure defines for SPARC, SPARC64. + * src/ffitest.c (main): Use static storage for closure. + * src/sparc/ffi.c (ffi_prep_closure, ffi_closure_sparc_inner): New. + * src/sparc/v8.S (ffi_closure_v8): New. + * src/sparc/v9.S (ffi_closure_v9): New. + +2002-11-10 Ranjit Mathew + + * include/ffi.h.in: Added FFI_STDCALL ffi_type + enumeration for X86_WIN32. + * src/x86/win32.S: Added ffi_call_STDCALL function + definition. + * src/x86/ffi.c (ffi_call/ffi_raw_call): Added + switch cases for recognising FFI_STDCALL and + calling ffi_call_STDCALL if target is X86_WIN32. + * src/ffitest.c (my_stdcall_strlen/stdcall_many): + stdcall versions of the "my_strlen" and "many" + test functions (for X86_WIN32). + Added test cases to test stdcall invocation using + these functions. + +2002-12-02 Kaz Kojima + + * src/sh/sysv.S: Add DWARF2 unwind info. + +2002-11-27 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Make section read-only. + +2002-11-26 Jim Wilson + + * src/types.c (FFI_TYPE_POINTER): Has size 8 on IA64. + +2002-11-23 H.J. Lu + + * acinclude.m4: Add dummy AM_PROG_LIBTOOL. + Include ../config/accross.m4. + * aclocal.m4; Rebuild. + * configure: Likewise. + +2002-11-15 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Adapt to pcrel FDE encoding. + +2002-11-11 DJ Delorie + + * configure.in: Look for common files in the right place. + +2002-10-08 Ulrich Weigand + + * src/java_raw_api.c (ffi_java_raw_to_ptrarray): Interpret + raw data as _Jv_word values, not ffi_raw. + (ffi_java_ptrarray_to_raw): Likewise. + (ffi_java_rvalue_to_raw): New function. + (ffi_java_raw_call): Call it. + (ffi_java_raw_to_rvalue): New function. + (ffi_java_translate_args): Call it. + * src/ffitest.c (closure_test_fn): Interpret return value + as ffi_arg, not int. + * src/s390/ffi.c (ffi_prep_cif_machdep): Add missing + FFI_TYPE_POINTER case. + (ffi_closure_helper_SYSV): Likewise. Also, assume return + values extended to word size. + +2002-10-02 Andreas Jaeger + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Remove debug output. + +2002-10-01 Bo Thorsen + + * include/ffi.h.in: Fix i386 win32 compilation. + +2002-09-30 Ulrich Weigand + + * configure.in: Add s390x-*-linux-* target. + * configure: Regenerate. + * include/ffi.h.in: Define S390X for s390x targets. + (FFI_CLOSURES): Define for s390/s390x. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * src/prep_cif.c (ffi_prep_cif): Do not compute stack space for s390. + * src/types.c (FFI_TYPE_POINTER): Use 8-byte pointers on s390x. + * src/s390/ffi.c: Major rework of existing code. Add support for + s390x targets. Add closure support. + * src/s390/sysv.S: Likewise. + +2002-09-29 Richard Earnshaw + + * src/arm/sysv.S: Fix typo. + +2002-09-28 Richard Earnshaw + + * src/arm/sysv.S: If we don't have machine/asm.h and the pre-processor + has defined __USER_LABEL_PREFIX__, then use it in CNAME. + (ffi_call_SYSV): Handle soft-float. + +2002-09-27 Bo Thorsen + + * include/ffi.h.in: Fix multilib x86-64 support. + +2002-09-22 Kaveh R. Ghazi + + * Makefile.am (all-multi): Fix multilib parallel build. + +2002-07-19 Kaz Kojima + + * configure.in (sh[34]*-*-linux*): Add brackets. + * configure: Regenerate. + +2002-07-18 Kaz Kojima + + * Makefile.am: Add SH support. + * Makefile.in: Regenerate. + * configure.in (sh-*-linux*, sh[34]*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SH support. + * src/sh/ffi.c: New file. + * src/sh/sysv.S: New file. + * src/types.c: Add SH support. + +2002-07-16 Bo Thorsen + + * src/x86/ffi64.c: New file that adds x86-64 support. + * src/x86/unix64.S: New file that handles argument setup for + x86-64. + * src/x86/sysv.S: Don't use this on x86-64. + * src/x86/ffi.c: Don't use this on x86-64. + Remove unused vars. + * src/prep_cif.c (ffi_prep_cif): Don't do stack size calculation + for x86-64. + * src/ffitest.c (struct6): New test that tests a special case in + the x86-64 ABI. + (struct7): Likewise. + (struct8): Likewise. + (struct9): Likewise. + (closure_test_fn): Silence warning about this when it's not used. + (main): Add the new tests. + (main): Fix a couple of wrong casts and silence some compiler warnings. + * include/ffi.h.in: Add x86-64 ABI definition. + * fficonfig.h.in: Regenerate. + * Makefile.am: Add x86-64 support. + * configure.in: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + +2002-06-24 Bo Thorsen + + * src/types.c: Merge settings for similar architectures. + Add x86-64 sizes and alignments. + +2002-06-23 Bo Thorsen + + * src/arm/ffi.c (ffi_prep_args): Remove unused vars. + * src/sparc/ffi.c (ffi_prep_args_v8): Likewise. + * src/mips/ffi.c (ffi_prep_args): Likewise. + * src/m68k/ffi.c (ffi_prep_args): Likewise. + +2002-07-18 H.J. Lu (hjl@gnu.org) + + * Makefile.am (TARGET_SRC_MIPS_LINUX): New. + (libffi_la_SOURCES): Support MIPS_LINUX. + (libffi_convenience_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + + * configure.in (mips64*-*): Skip. + (mips*-*-linux*): New. + * configure: Regenerated. + + * src/mips/ffi.c: Include . + +2002-06-06 Ulrich Weigand + + * src/s390/sysv.S: Save/restore %r6. Add DWARF-2 unwind info. + +2002-05-27 Roger Sayle + + * src/x86/ffi.c (ffi_prep_args): Remove reference to avn. + +2002-05-27 Bo Thorsen + + * src/x86/ffi.c (ffi_prep_args): Remove unused variable and + fix formatting. + +2002-05-13 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_closure): Declare fd at + beginning of function (for older apple cc). + +2002-05-08 Alexandre Oliva + + * configure.in (ORIGINAL_LD_FOR_MULTILIBS): Preserve LD at + script entry, and set LD to it when configuring multilibs. + * configure: Rebuilt. + +2002-05-05 Jason Thorpe + + * configure.in (sparc64-*-netbsd*): Add target. + (sparc-*-netbsdelf*): Likewise. + * configure: Regenerate. + +2002-04-28 David S. Miller + + * configure.in, configure: Fix SPARC test in previous change. + +2002-04-29 Gerhard Tonn + + * Makefile.am: Add Linux for S/390 support. + * Makefile.in: Regenerate. + * configure.in: Add Linux for S/390 support. + * configure: Regenerate. + * include/ffi.h.in: Add Linux for S/390 support. + * src/s390/ffi.c: New file from libffi CVS tree. + * src/s390/sysv.S: New file from libffi CVS tree. + +2002-04-28 Jakub Jelinek + + * configure.in (HAVE_AS_SPARC_UA_PCREL): Check for working + %r_disp32(). + * src/sparc/v8.S: Use it. + * src/sparc/v9.S: Likewise. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2002-04-08 Hans Boehm + + * src/java_raw_api.c (ffi_java_raw_size): Handle FFI_TYPE_DOUBLE + correctly. + * src/ia64/unix.S: Add unwind information. Fix comments. + Save sp in a way that's compatible with unwind info. + (ffi_call_unix): Correctly restore sp in all cases. + * src/ia64/ffi.c: Add, fix comments. + +2002-04-08 Jakub Jelinek + + * src/sparc/v8.S: Make .eh_frame dependent on target word size. + +2002-04-06 Jason Thorpe + + * configure.in (alpha*-*-netbsd*): Add target. + * configure: Regenerate. + +2002-04-04 Jeff Sturm + + * src/sparc/v8.S: Add unwind info. + * src/sparc/v9.S: Likewise. + +2002-03-30 Krister Walfridsson + + * configure.in: Enable i*86-*-netbsdelf*. + * configure: Rebuilt. + +2002-03-29 David Billinghurst + + PR other/2620 + * src/mips/n32.s: Delete + * src/mips/o32.s: Delete + +2002-03-21 Loren J. Rittle + + * configure.in: Enable alpha*-*-freebsd*. + * configure: Rebuilt. + +2002-03-17 Bryce McKinlay + + * Makefile.am: libfficonvenience -> libffi_convenience. + * Makefile.in: Rebuilt. + + * Makefile.am: Define ffitest_OBJECTS. + * Makefile.in: Rebuilt. + +2002-03-07 Andreas Tobler + David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX closure files. + (TARGET_SRC_POWERPC_AIX): Add aix_closure.S. + (TARGET_SRC_POWERPC_DARWIN): Add darwin_closure.S. + * Makefile.in: Regenerate. + * include/ffi.h.in: Add AIX and Darwin closure definitions. + * src/powerpc/ffi_darwin.c (ffi_prep_closure): New function. + (flush_icache, flush_range): New functions. + (ffi_closure_helper_DARWIN): New function. + * src/powerpc/aix_closure.S: New file. + * src/powerpc/darwin_closure.S: New file. + +2002-02-24 Jeff Sturm + + * include/ffi.h.in: Add typedef for ffi_arg. + * src/ffitest.c (main): Declare rint with ffi_arg. + +2002-02-21 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Skip appropriate + number of GPRs for floating-point arguments. + +2002-01-31 Anthony Green + + * configure: Rebuilt. + * configure.in: Replace CHECK_SIZEOF and endian tests with + cross-compiler friendly macros. + * aclocal.m4 (AC_COMPILE_CHECK_SIZEOF, AC_C_BIGENDIAN_CROSS): New + macros. + +2002-01-18 David Edelsohn + + * src/powerpc/darwin.S (_ffi_call_AIX): New. + * src/powerpc/aix.S (ffi_call_DARWIN): New. + +2002-01-17 David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX files. + (TARGET_SRC_POWERPC_AIX): New. + (POWERPC_AIX): New stanza. + * Makefile.in: Regenerate. + * configure.in: Add AIX case. + * configure: Regenerate. + * include/ffi.h.in (ffi_abi): Add FFI_AIX. + * src/powerpc/ffi_darwin.c (ffi_status): Use "long" to scale frame + size. Fix "long double" support. + (ffi_call): Add FFI_AIX case. + * src/powerpc/aix.S: New. + +2001-10-09 John Hornkvist + + Implement Darwin PowerPC ABI. + * configure.in: Handle powerpc-*-darwin*. + * Makefile.am: Set source files for POWERPC_DARWIN. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + * include/ffi.h.in: Define FFI_DARWIN and FFI_DEFAULT_ABI for + POWERPC_DARWIN. + * src/powerpc/darwin.S: New file. + * src/powerpc/ffi_darwin.c: New file. + +2001-10-07 Joseph S. Myers + + * src/x86/ffi.c: Fix spelling error of "separate" as "seperate". + +2001-07-16 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .balign directive. + Use C style comments. + +2001-07-16 Rainer Orth + + * src/alpha/ffi.c (ffi_prep_closure): Avoid gas-only mnemonic. + Fixes PR bootstrap/3563. + +2001-06-26 Rainer Orth + + * src/alpha/osf.S (ffi_closure_osf): Use .rdata for ECOFF. + +2001-06-25 Rainer Orth + + * configure.in: Recognize sparc*-sun-* host. + * configure: Regenerate. + +2001-06-06 Andrew Haley + + * src/alpha/osf.S (__FRAME_BEGIN__): Conditionalize for ELF. + +2001-06-03 Andrew Haley + + * src/alpha/osf.S: Add unwind info. + * src/powerpc/sysv.S: Add unwind info. + * src/powerpc/ppc_closure.S: Likewise. + +2000-05-31 Jeff Sturm + + * configure.in: Fix AC_ARG_ENABLE usage. + * configure: Rebuilt. + +2001-05-06 Bryce McKinlay + + * configure.in: Remove warning about beta code. + * configure: Rebuilt. + +2001-04-25 Hans Boehm + + * src/ia64/unix.S: Restore stack pointer when returning from + ffi_closure_UNIX. + * src/ia64/ffi.c: Fix typo in comment. + +2001-04-18 Jim Wilson + + * src/ia64/unix.S: Delete unnecessary increment and decrement of loc2 + to eliminate RAW DV. + +2001-04-12 Bryce McKinlay + + * Makefile.am: Make a libtool convenience library. + * Makefile.in: Rebuilt. + +2001-03-29 Bryce McKinlay + + * configure.in: Use different syntax for subdirectory creation. + * configure: Rebuilt. + +2001-03-27 Jon Beniston + + * configure.in: Added X86_WIN32 target (Win32, CygWin, MingW). + * configure: Rebuilt. + * Makefile.am: Added X86_WIN32 target support. + * Makefile.in: Rebuilt. + + * include/ffi.h.in: Added X86_WIN32 target support. + + * src/ffitest.c: Doesn't run structure tests for X86_WIN32 targets. + * src/types.c: Added X86_WIN32 target support. + + * src/x86/win32.S: New file. Based on sysv.S, but with EH + stuff removed and made to work with CygWin's gas. + +2001-03-26 Bryce McKinlay + + * configure.in: Make target subdirectory in build dir. + * Makefile.am: Override suffix based rules to specify correct output + subdirectory. + * Makefile.in: Rebuilt. + * configure: Rebuilt. + +2001-03-23 Kevin B Hendricks + + * src/powerpc/ppc_closure.S: New file. + * src/powerpc/ffi.c (ffi_prep_args): Fixed ABI compatibility bug + involving long long and register pairs. + (ffi_prep_closure): New function. + (flush_icache): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * include/ffi.h.in (FFI_CLOSURES): Define on PPC. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Added src/powerpc/ppc_closure.S. + (TARGET_SRC_POWERPC): Likewise. + +2001-03-19 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (ffitest_LDFLAGS): New macro. + +2001-03-02 Nick Clifton + + * include/ffi.h.in: Remove RCS ident string. + * include/ffi_mips.h: Remove RCS ident string. + * src/debug.c: Remove RCS ident string. + * src/ffitest.c: Remove RCS ident string. + * src/prep_cif.c: Remove RCS ident string. + * src/types.c: Remove RCS ident string. + * src/alpha/ffi.c: Remove RCS ident string. + * src/alpha/osf.S: Remove RCS ident string. + * src/arm/ffi.c: Remove RCS ident string. + * src/arm/sysv.S: Remove RCS ident string. + * src/mips/ffi.c: Remove RCS ident string. + * src/mips/n32.S: Remove RCS ident string. + * src/mips/o32.S: Remove RCS ident string. + * src/sparc/ffi.c: Remove RCS ident string. + * src/sparc/v8.S: Remove RCS ident string. + * src/sparc/v9.S: Remove RCS ident string. + * src/x86/ffi.c: Remove RCS ident string. + * src/x86/sysv.S: Remove RCS ident string. + +2001-02-08 Joseph S. Myers + + * include/ffi.h.in: Change sourceware.cygnus.com references to + gcc.gnu.org. + +2000-12-09 Richard Henderson + + * src/alpha/ffi.c (ffi_call): Simplify struct return test. + (ffi_closure_osf_inner): Index rather than increment avalue + and arg_types. Give ffi_closure_osf the raw return value type. + * src/alpha/osf.S (ffi_closure_osf): Handle return value type + promotion. + +2000-12-07 Richard Henderson + + * src/raw_api.c (ffi_translate_args): Fix typo. + (ffi_prep_closure): Likewise. + + * include/ffi.h.in [ALPHA]: Define FFI_CLOSURES and + FFI_TRAMPOLINE_SIZE. + * src/alpha/ffi.c (ffi_prep_cif_machdep): Adjust minimal + cif->bytes for new ffi_call_osf implementation. + (ffi_prep_args): Absorb into ... + (ffi_call): ... here. Do all stack allocation here and + avoid a callback function. + (ffi_prep_closure, ffi_closure_osf_inner): New. + * src/alpha/osf.S (ffi_call_osf): Reimplement with no callback. + (ffi_closure_osf): New. + +2000-09-10 Alexandre Oliva + + * config.guess, config.sub, install-sh: Removed. + * ltconfig, ltmain.sh, missing, mkinstalldirs: Likewise. + * Makefile.in: Rebuilt. + + * acinclude.m4: Include libtool macros from the top level. + * aclocal.m4, configure: Rebuilt. + +2000-08-22 Alexandre Oliva + + * configure.in [i*86-*-freebsd*] (TARGET, TARGETDIR): Set. + * configure: Rebuilt. + +2000-05-11 Scott Bambrough + + * libffi/src/arm/sysv.S (ffi_call_SYSV): Doubles are not saved to + memory correctly. Use conditional instructions, not branches where + possible. + +2000-05-04 Tom Tromey + + * configure: Rebuilt. + * configure.in: Match `arm*-*-linux-*'. + From Chris Dornan . + +2000-04-28 Jakub Jelinek + + * Makefile.am (SUBDIRS): Define. + (AM_MAKEFLAGS): Likewise. + (Multilib support.): Add section. + * Makefile.in: Rebuilt. + * ltconfig (extra_compiler_flags, extra_compiler_flags_value): + New variables. Set for gcc using -print-multi-lib. Export them + to libtool. + (sparc64-*-linux-gnu*): Use libsuff 64 for search paths. + * ltmain.sh (B|b|V): Don't throw away gcc's -B, -b and -V options + for -shared links. + (extra_compiler_flags_value, extra_compiler_flags): Check these + for extra compiler options which need to be passed down in + compiler_flags. + +2000-04-16 Anthony Green + + * configure: Rebuilt. + * configure.in: Change i*86-pc-linux* to i*86-*-linux*. + +2000-04-14 Jakub Jelinek + + * include/ffi.h.in (SPARC64): Define for 64bit SPARC builds. + Set SPARC FFI_DEFAULT_ABI based on SPARC64 define. + * src/sparc/ffi.c (ffi_prep_args_v8): Renamed from ffi_prep_args. + Replace all void * sizeofs with sizeof(int). + Only compare type with FFI_TYPE_LONGDOUBLE if LONGDOUBLE is + different than DOUBLE. + Remove FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases (handled elsewhere). + (ffi_prep_args_v9): New function. + (ffi_prep_cif_machdep): Handle V9 ABI and long long on V8. + (ffi_V9_return_struct): New function. + (ffi_call): Handle FFI_V9 ABI from 64bit code and FFI_V8 ABI from + 32bit code (not yet cross-arch calls). + * src/sparc/v8.S: Add struct return delay nop. + Handle long long. + * src/sparc/v9.S: New file. + * src/prep_cif.c (ffi_prep_cif): Return structure pointer + is used on sparc64 only for structures larger than 32 bytes. + Pass by reference for structures is done for structure arguments + larger than 16 bytes. + * src/ffitest.c (main): Use 64bit rint on sparc64. + Run long long tests on sparc. + * src/types.c (FFI_TYPE_POINTER): Pointer is 64bit on alpha and + sparc64. + (FFI_TYPE_LONGDOUBLE): long double is 128 bit aligned to 128 bits + on sparc64. + * configure.in (sparc-*-linux*): New supported target. + (sparc64-*-linux*): Likewise. + * configure: Rebuilt. + * Makefile.am: Add v9.S to SPARC files. + * Makefile.in: Likewise. + (LINK): Surround $(CCLD) into double quotes, so that multilib + compiles work correctly. + +2000-04-04 Alexandre Petit-Bianco + + * configure: Rebuilt. + * configure.in: (i*86-*-solaris*): New libffi target. Patch + proposed by Bryce McKinlay. + +2000-03-20 Tom Tromey + + * Makefile.in: Hand edit for java_raw_api.lo. + +2000-03-08 Bryce McKinlay + + * config.guess, config.sub: Update from the gcc tree. + Fix for PR libgcj/168. + +2000-03-03 Tom Tromey + + * Makefile.in: Fixed ia64 by hand. + + * configure: Rebuilt. + * configure.in (--enable-multilib): New option. + (libffi_basedir): New subst. + (AC_OUTPUT): Added multilib code. + +2000-03-02 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (TARGET_SRC_IA64): Use `ia64', not `alpha', as + directory name. + +2000-02-25 Hans Boehm + + * src/ia64/ffi.c, src/ia64/ia64_flags.h, src/ia64/unix.S: New + files. + * src/raw_api.c (ffi_translate_args): Fixed typo in argument + list. + (ffi_prep_raw_closure): Use ffi_translate_args, not + ffi_closure_translate. + * src/java_raw_api.c: New file. + * src/ffitest.c (closure_test_fn): New function. + (main): Define `rint' as long long on IA64. Added new test when + FFI_CLOSURES is defined. + * include/ffi.h.in (ALIGN): Use size_t, not unsigned. + (ffi_abi): Recognize IA64. + (ffi_raw): Added `flt' field. + Added "Java raw API" code. + * configure.in: Recognize ia64. + * Makefile.am (TARGET_SRC_IA64): New macro. + (libffi_la_common_SOURCES): Added java_raw_api.c. + (libffi_la_SOURCES): Define in IA64 case. + +2000-01-04 Tom Tromey + + * Makefile.in: Rebuilt with newer automake. + +1999-12-31 Tom Tromey + + * Makefile.am (INCLUDES): Added -I$(top_srcdir)/src. + +1999-09-01 Tom Tromey + + * include/ffi.h.in: Removed PACKAGE and VERSION defines and + undefs. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + * configure.in: Pass 3rd argument to AM_INIT_AUTOMAKE. + Use AM_PROG_LIBTOOL (automake 1.4 compatibility). + * acconfig.h: Don't #undef PACKAGE or VERSION. + +1999-08-09 Anthony Green + + * include/ffi.h.in: Try to work around messy header problem + with PACKAGE and VERSION. + + * configure: Rebuilt. + * configure.in: Change version to 2.00-beta. + + * fficonfig.h.in: Rebuilt. + * acconfig.h (FFI_NO_STRUCTS, FFI_NO_RAW_API): Define. + + * src/x86/ffi.c (ffi_raw_call): Rename. + +1999-08-02 Kresten Krab Thorup + + * src/x86/ffi.c (ffi_closure_SYSV): New function. + (ffi_prep_incoming_args_SYSV): Ditto. + (ffi_prep_closure): Ditto. + (ffi_closure_raw_SYSV): Ditto. + (ffi_prep_raw_closure): More ditto. + (ffi_call_raw): Final ditto. + + * include/ffi.h.in: Add definitions for closure and raw API. + + * src/x86/ffi.c (ffi_prep_cif_machdep): Added case for + FFI_TYPE_UINT64. + + * Makefile.am (libffi_la_common_SOURCES): Added raw_api.c + + * src/raw_api.c: New file. + + * include/ffi.h.in (ffi_raw): New type. + (UINT_ARG, SINT_ARG): New defines. + (ffi_closure, ffi_raw_closure): New types. + (ffi_prep_closure, ffi_prep_raw_closure): New declarations. + + * configure.in: Add check for endianness and sizeof void*. + + * src/x86/sysv.S (ffi_call_SYSV): Call fixup routine via argument, + instead of directly. + + * configure: Rebuilt. + +Thu Jul 8 14:28:42 1999 Anthony Green + + * configure.in: Add x86 and powerpc BeOS configurations. + From Makoto Kato . + +1999-05-09 Anthony Green + + * configure.in: Add warning about this being beta code. + Remove src/Makefile.am from the picture. + * configure: Rebuilt. + + * Makefile.am: Move logic from src/Makefile.am. Add changes + to support libffi as a target library. + * Makefile.in: Rebuilt. + + * aclocal.m4, config.guess, config.sub, ltconfig, ltmain.sh: + Upgraded to new autoconf, automake, libtool. + + * README: Tweaks. + + * LICENSE: Update copyright date. + + * src/Makefile.am, src/Makefile.in: Removed. + +1998-11-29 Anthony Green + + * include/ChangeLog: Removed. + * src/ChangeLog: Removed. + * src/mips/ChangeLog: Removed. + * src/sparc/ChangeLog: Remboved. + * src/x86/ChangeLog: Removed. + + * ChangeLog.v1: Created. + +============================================================================= +From the old ChangeLog.libffi file.... + +2011-02-08 Andreas Tobler + + * testsuite/lib/libffi.exp: Tweak for stand-alone mode. + +2009-12-25 Samuli Suominen + + * configure.ac: Undefine _AC_ARG_VAR_PRECIOUS for autoconf 2.64. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/huge_struct.c: Ad x86 XFAILs. + * testsuite/libffi.call/float2.c: Fix dg-excess-errors. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-12 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-04 Andrew Haley + + * src/powerpc/ffitarget.h: Fix misapplied merge from gcc. + +2009-06-04 Andrew Haley + + * src/mips/o32.S, + src/mips/n32.S: Fix licence formatting. + +2009-06-04 Andrew Haley + + * src/x86/darwin.S: Fix licence formatting. + src/x86/win32.S: Likewise. + src/sh64/sysv.S: Likewise. + src/sh/sysv.S: Likewise. + +2009-06-04 Andrew Haley + + * src/sh64/ffi.c: Remove lint directives. Was missing from merge + of Andreas Tobler's patch from 2006-04-22. + +2009-06-04 Andrew Haley + + * src/sh/ffi.c: Apply missing hunk from Alexandre Oliva's patch of + 2007-03-07. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-12-19 Anthony Green + + * configure.ac: Bump version to 3.0.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-11-11 Anthony Green + + * configure.ac: Bump version to 3.0.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-07-17 Anthony Green + + * configure.ac: Bump version to 3.0.6. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. Add documentation. + * README: Update for new release. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-07-16 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-04-03 Anthony Green + + * libffi.pc.in (Libs): Add -L${libdir}. + * configure.ac: Bump version to 3.0.5. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-04-03 Anthony Green + Xerces Ranby + + * include/ffi.h.in: Wrap definition of target architecture to + protect from double definitions. + +2008-03-22 Moriyoshi Koizumi + + * src/x86/ffi.c (ffi_prep_closure_loc): Fix for bug revealed in + closure_loc_fn0.c. + * testsuite/libffi.call/closure_loc_fn0.c (closure_loc_test_fn0): + New test. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/huge_struct.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2008-02-26 Jakub Jelinek + Anthony Green + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-26 Anthony Green + Thomas Heller + + * include/ffi.h.in: Change void (*)() to void (*)(void). + +2008-02-26 Anthony Green + Thomas Heller + + * src/alpha/ffi.c: Change void (*)() to void (*)(void). + src/alpha/osf.S, src/arm/ffi.c, src/frv/ffi.c, src/ia64/ffi.c, + src/ia64/unix.S, src/java_raw_api.c, src/m32r/ffi.c, + src/mips/ffi.c, src/pa/ffi.c, src/pa/hpux32.S, src/pa/linux.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/raw_api.c, + src/s390/ffi.c, src/sh/ffi.c, src/sh64/ffi.c, src/sparc/ffi.c, + src/x86/ffi.c, src/x86/unix64.S, src/x86/darwin64.S, + src/x86/ffi64.c: Ditto. + +2008-02-24 Anthony Green + + * configure.ac: Accept openbsd*, not just openbsd. + Bump version to 3.0.4. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-22 Anthony Green + + * README: Clean up list of tested platforms. + +2008-02-22 Anthony Green + + * configure.ac: Bump version to 3.0.3. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. Clean up test docs. + +2008-02-22 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-02-22 Thomas Heller + + * configure.ac: Add x86 OpenBSD support. + * configure: Rebuilt. + +2008-02-21 Thomas Heller + + * README: Change "make test" to "make check". + +2008-02-21 Anthony Green + + * configure.ac: Bump version to 3.0.2. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-21 Björn König + + * src/x86/freebsd.S: New file. + * configure.ac: Add x86 FreeBSD support. + * Makefile.am: Ditto. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.1. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-15 David Daney + + * src/mips/ffi.c: Remove extra '>' from include directive. + (ffi_prep_closure_loc): Use clear_location instead of tramp. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.0. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2008-02-15 Anthony Green + + * man/ffi_call.3, man/ffi_prep_cif.3, man/ffi.3: + Update dates and remove all references to ffi_prep_closure. + * configure.ac: Bump version to 2.99.9. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 Anthony Green + + * man/ffi_prep_closure.3: Delete. + * man/Makefile.am (EXTRA_DIST): Remove ffi_prep_closure.3. + (man_MANS): Ditto. + * man/Makefile.in: Rebuilt. + * configure.ac: Bump version to 2.99.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * include/ffi.h.in LICENSE src/debug.c src/closures.c + src/ffitest.c src/s390/sysv.S src/s390/ffitarget.h + src/types.c src/m68k/ffitarget.h src/raw_api.c src/frv/ffi.c + src/frv/ffitarget.h src/sh/ffi.c src/sh/sysv.S + src/sh/ffitarget.h src/powerpc/ffitarget.h src/pa/ffi.c + src/pa/ffitarget.h src/pa/linux.S src/java_raw_api.c + src/cris/ffitarget.h src/x86/ffi.c src/x86/sysv.S + src/x86/unix64.S src/x86/win32.S src/x86/ffitarget.h + src/x86/ffi64.c src/x86/darwin.S src/ia64/ffi.c + src/ia64/ffitarget.h src/ia64/ia64_flags.h src/ia64/unix.S + src/sparc/ffi.c src/sparc/v9.S src/sparc/ffitarget.h + src/sparc/v8.S src/alpha/ffi.c src/alpha/ffitarget.h + src/alpha/osf.S src/sh64/ffi.c src/sh64/sysv.S + src/sh64/ffitarget.h src/mips/ffi.c src/mips/ffitarget.h + src/mips/n32.S src/mips/o32.S src/arm/ffi.c src/arm/sysv.S + src/arm/ffitarget.h src/prep_cif.c: Update license text. + +2008-02-14 Anthony Green + + * README: Update tested platforms. + * configure.ac: Bump version to 2.99.6. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.5. + * configure: Rebuilt. + * Makefile.am (EXTRA_DIST): Add darwin64.S + * Makefile.in: Rebuilt. + * testsuite/lib/libffi-dg.exp: Remove libstdc++ bits from GCC tree. + * LICENSE: Update WARRANTY. + +2008-02-14 Anthony Green + + * libffi.pc.in (libdir): Fix libdir definition. + * configure.ac: Bump version to 2.99.4. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * README: Update. + * libffi.info: New file. + * doc/stamp-vti: New file. + * configure.ac: Bump version to 2.99.3. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * Makefile.am (SUBDIRS): Add man dir. + * Makefile.in: Rebuilt. + * configure.ac: Create Makefile. + * configure: Rebuilt. + * man/ffi_call.3 man/ffi_prep_cif.3 man/ffi_prep_closure.3 + man/Makefile.am man/Makefile.in: New files. + +2008-02-14 Tom Tromey + + * aclocal.m4, Makefile.in, configure, fficonfig.h.in: Rebuilt. + * mdate-sh, texinfo.tex: New files. + * Makefile.am (info_TEXINFOS): New variable. + * doc/libffi.texi: New file. + * doc/version.texi: Likewise. + +2008-02-14 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't compile with -D$(TARGET). + (lib_LTLIBRARIES): Define. + (toolexeclib_LIBRARIES): Undefine. + * Makefile.in: Rebuilt. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + +2008-02-14 Anthony Green + + * libffi.pc.in: Use @PACKAGE_NAME@ and @PACKAGE_VERSION@. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Add ChangeLog.libffi. + * Makefile.in: Rebuilt. + * LICENSE: Update copyright notice. + +2008-02-14 Anthony Green + + * include/Makefile.am (nodist_includes_HEADERS): Define. Don't + distribute ffitarget.h or ffi.h from the build include dir. + * Makefile.in: Rebuilt. + +2008-02-14 Anthony Green + + * include/Makefile.am (includesdir): Install headers under libdir. + (pkgconfigdir): Define. Install libffi.pc. + * include/Makefile.in: Rebuilt. + * libffi.pc.in: Create. + * libtool-version: Increment CURRENT + * configure.ac: Add libffi.pc.in + * configure: Rebuilt. + +2008-02-03 Anthony Green + + * include/Makefile.am (includesdir): Fix header install with + DESTDIR. + * include/Makefile.in: Rebuilt. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-02-01 Anthony Green + + * include/Makefile.am: Fix header installs. + * Makefile.am: Ditto. + * include/Makefile.in: Rebuilt. + * Makefile.in: Ditto. + +2008-02-01 Anthony Green + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL, + FFI_INIT_TRAMPOLINE): Revert my broken changes to twall's last + patch. + +2008-01-31 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am: Ditto. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-30 Anthony Green + + * Makefile.am, include/Makefile.am: Move headers to + libffi_la_SOURCES for new automake. + * Makefile.in, include/Makefile.in: Rebuilt. + + * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for + execution outside of gcc tree. + * testsuite/lib/target-libpath.exp: Ditto. + + * testsuite/lib/libffi-dg.exp: Many changes to allow for execution + outside of gcc tree. + + +============================================================================= +From the old ChangeLog.libgcj file.... + +2004-01-14 Kelley Cook + + * configure.in: Add in AC_PREREQ(2.13) + +2003-02-20 Alexandre Oliva + + * configure.in: Propagate ORIGINAL_LD_FOR_MULTILIBS to + config.status. + * configure: Rebuilt. + +2002-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +Mon Aug 9 18:33:38 1999 Rainer Orth + + * include/Makefile.in: Rebuilt. + * Makefile.in: Rebuilt + * Makefile.am (toolexeclibdir): Add $(MULTISUBDIR) even for native + builds. + Use USE_LIBDIR. + + * configure: Rebuilt. + * configure.in (USE_LIBDIR): Define for native builds. + Use lowercase in configure --help explanations. + +1999-08-08 Anthony Green + + * include/ffi.h.in (FFI_FN): Remove `...'. + +1999-08-08 Anthony Green + + * Makefile.in: Rebuilt. + * Makefile.am (AM_CFLAGS): Compile with -fexceptions. + + * src/x86/sysv.S: Add exception handling metadata. + + +============================================================================= + +The libffi version 1 ChangeLog archive. + +Version 1 of libffi had per-directory ChangeLogs. Current and future +versions have a single ChangeLog file in the root directory. The +version 1 ChangeLogs have all been concatenated into this file for +future reference only. + +--- libffi ---------------------------------------------------------------- + +Mon Oct 5 02:17:50 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +Mon Oct 5 01:03:03 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +1998-07-25 Andreas Schwab + + * m68k/ffi.c (ffi_prep_cif_machdep): Use bitmask for cif->flags. + Correctly handle small structures. + (ffi_prep_args): Also handle small structures. + (ffi_call): Pass size of return type to ffi_call_SYSV. + * m68k/sysv.S: Adjust for above changes. Correctly align small + structures in the return value. + + * types.c (uint64, sint64) [M68K]: Change alignment to 4. + +Fri Apr 17 17:26:58 1998 Anthony Green + + * configure.in: Boosted rev. + * configure,Makefile.in,aclocal.m4: Rebuilt. + * README: Boosted rev and added release notes. + +Sun Feb 22 00:50:41 1998 Geoff Keating + + * configure.in: Add PowerPC config bits. + +1998-02-14 Andreas Schwab + + * configure.in: Add m68k config bits. Change AC_CANONICAL_SYSTEM + to AC_CANONICAL_HOST, this is not a compiler. Use $host instead + of $target. Remove AC_CHECK_SIZEOF(char), we already know the + result. Fix argument of AC_ARG_ENABLE. + * configure, fficonfig.h.in: Rebuilt. + +Tue Feb 10 20:53:40 1998 Richard Henderson + + * configure.in: Add Alpha config bits. + +Tue May 13 13:39:20 1997 Anthony Green + + * README: Updated dates and reworded Irix comments. + + * configure.in: Removed AC_PROG_RANLIB. + + * Makefile.in, aclocal.m4, config.guess, config.sub, configure, + ltmain.sh, */Makefile.in: libtoolized again and rebuilt with + automake and autoconf. + +Sat May 10 18:44:50 1997 Tom Tromey + + * configure, aclocal.m4: Rebuilt. + * configure.in: Don't compute EXTRADIST; now handled in + src/Makefile.in. Removed macros implied by AM_INIT_AUTOMAKE. + Don't run AM_MAINTAINER_MODE. + +Thu May 8 14:34:05 1997 Anthony Green + + * missing, ltmain.sh, ltconfig.sh: Created. These are new files + required by automake and libtool. + + * README: Boosted rev to 1.14. Added notes. + + * acconfig.h: Moved PACKAGE and VERSION for new automake. + + * configure.in: Changes for libtool. + + * Makefile.am (check): make test now make check. Uses libtool now. + + * Makefile.in, configure.in, aclocal.h, fficonfig.h.in: Rebuilt. + +Thu May 1 16:27:07 1997 Anthony Green + + * missing: Added file required by new automake. + +Tue Nov 26 14:10:42 1996 Anthony Green + + * acconfig.h: Added USING_PURIFY flag. This is defined when + --enable-purify-safety was used at configure time. + + * configure.in (allsources): Added --enable-purify-safety switch. + (VERSION): Boosted rev to 1.13. + * configure: Rebuilt. + +Fri Nov 22 06:46:12 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.12. + Removed special CFLAGS hack for gcc. + * configure: Rebuilt. + + * README: Boosted rev to 1.12. Added notes. + + * Many files: Cygnus Support changed to Cygnus Solutions. + +Wed Oct 30 11:15:25 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.11. + * configure: Rebuilt. + + * README: Boosted rev to 1.11. Added notes about GNU make. + +Tue Oct 29 12:25:12 1996 Anthony Green + + * configure.in: Fixed -Wall trick. + (VERSION): Boosted rev. + * configure: Rebuilt + + * acconfig.h: Needed for --enable-debug configure switch. + + * README: Boosted rev to 1.09. Added more notes on building + libffi, and LCLint. + + * configure.in: Added --enable-debug switch. Boosted rev to + 1.09. + * configure: Rebuilt + +Tue Oct 15 13:11:28 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.08 + * configure: Rebuilt. + + * README: Added n32 bug fix notes. + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + +Mon Oct 14 10:54:46 1996 Anthony Green + + * README: Added web page reference. + + * configure.in, README: Boosted rev to 1.05 + * configure: Rebuilt. + + * README: Fixed n32 sample code. + +Fri Oct 11 17:09:28 1996 Anthony Green + + * README: Added sparc notes. + + * configure.in, README: Boosted rev to 1.04. + * configure: Rebuilt. + +Thu Oct 10 10:31:03 1996 Anthony Green + + * configure.in, README: Boosted rev to 1.03. + * configure: Rebuilt. + + * README: Added struct notes. + + * Makefile.am (EXTRA_DIST): Added LICENSE to distribution. + * Makefile.in: Rebuilt. + + * README: Removed Linux section. No special notes now + because aggregates arg/return types work. + +Wed Oct 9 16:16:42 1996 Anthony Green + + * README, configure.in (VERSION): Boosted rev to 1.02 + * configure: Rebuilt. + +Tue Oct 8 11:56:33 1996 Anthony Green + + * README (NOTE): Added n32 notes. + + * Makefile.am: Added test production. + * Makefile: Rebuilt + + * README: spell checked! + + * configure.in (VERSION): Boosted rev to 1.01 + * configure: Rebuilt. + +Mon Oct 7 15:50:22 1996 Anthony Green + + * configure.in: Added nasty bit to support SGI tools. + * configure: Rebuilt. + + * README: Added SGI notes. Added note about automake bug. + +Mon Oct 7 11:00:28 1996 Anthony Green + + * README: Rewrote intro, and fixed examples. + +Fri Oct 4 10:19:55 1996 Anthony Green + + * configure.in: -D$TARGET is no longer used as a compiler switch. + It is now inserted into ffi.h at configure time. + * configure: Rebuilt. + + * FFI_ABI and FFI_STATUS are now ffi_abi and ffi_status. + +Thu Oct 3 13:47:34 1996 Anthony Green + + * README, LICENSE: Created. Wrote some docs. + + * configure.in: Don't barf on i586-unknown-linuxaout. + Added EXTRADIST code for "make dist". + * configure: Rebuilt. + + * */Makefile.in: Rebuilt with patched automake. + +Tue Oct 1 17:12:25 1996 Anthony Green + + * Makefile.am, aclocal.m4, config.guess, config.sub, + configure.in, fficonfig.h.in, install-sh, mkinstalldirs, + stamp-h.in: Created + * Makefile.in, configure: Generated + +--- libffi/include -------------------------------------------------------- + +Tue Feb 24 13:09:36 1998 Anthony Green + + * ffi_mips.h: Updated FFI_TYPE_STRUCT_* values based on + ffi.h.in changes. This is a work-around for SGI's "simple" + assembler. + +Sun Feb 22 00:51:55 1998 Geoff Keating + + * ffi.h.in: PowerPC support. + +1998-02-14 Andreas Schwab + + * ffi.h.in: Add m68k support. + (FFI_TYPE_LONGDOUBLE): Make it a separate value. + +Tue Feb 10 20:55:16 1998 Richard Henderson + + * ffi.h.in (SIZEOF_ARG): Use a pointer type by default. + + * ffi.h.in: Alpha support. + +Fri Nov 22 06:48:45 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Cygnus Support -> Cygnus Solutions. + +Wed Nov 20 22:31:01 1996 Anthony Green + + * ffi.h.in: Added ffi_type_void definition. + +Tue Oct 29 12:22:40 1996 Anthony Green + + * Makefile.am (hack_DATA): Always install ffi_mips.h. + + * ffi.h.in: Removed FFI_DEBUG. It's now in the correct + place (acconfig.h). + Added #include for size_t definition. + +Tue Oct 15 17:23:35 1996 Anthony Green + + * ffi.h.in, ffi_common.h, ffi_mips.h: More clean up. + Commented out #define of FFI_DEBUG. + +Tue Oct 15 13:01:06 1996 Anthony Green + + * ffi_common.h: Added bool definition. + + * ffi.h.in, ffi_common.h: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Mon Oct 14 12:29:23 1996 Anthony Green + + * ffi.h.in: Interface changes based on feedback from Jim + Blandy. + +Fri Oct 11 16:49:35 1996 Anthony Green + + * ffi.h.in: Small change for sparc support. + +Thu Oct 10 14:53:37 1996 Anthony Green + + * ffi_mips.h: Added FFI_TYPE_STRUCT_* definitions for + special structure return types. + +Wed Oct 9 13:55:57 1996 Anthony Green + + * ffi.h.in: Added SIZEOF_ARG definition for X86 + +Tue Oct 8 11:40:36 1996 Anthony Green + + * ffi.h.in (FFI_FN): Added macro for eliminating compiler warnings. + Use it to case your function pointers to the proper type. + + * ffi_mips.h (SIZEOF_ARG): Added magic to fix type promotion bug. + + * Makefile.am (EXTRA_DIST): Added ffi_mips.h to EXTRA_DIST. + * Makefile: Rebuilt. + + * ffi_mips.h: Created. Moved all common mips definitions here. + +Mon Oct 7 10:58:12 1996 Anthony Green + + * ffi.h.in: The SGI assember is very picky about parens. Redefined + some macros to avoid problems. + + * ffi.h.in: Added FFI_DEFAULT_ABI definitions. Also added + externs for pointer, and 64bit integral ffi_types. + +Fri Oct 4 09:51:37 1996 Anthony Green + + * ffi.h.in: Added FFI_ABI member to ffi_cif and changed + function prototypes accordingly. + Added #define @TARGET@. Now programs including ffi.h don't + have to specify this themselves. + +Thu Oct 3 15:36:44 1996 Anthony Green + + * ffi.h.in: Changed ffi_prep_cif's values from void* to void** + + * Makefile.am (EXTRA_DIST): Added EXTRA_DIST for "make dist" + to work. + * Makefile.in: Regenerated. + +Wed Oct 2 10:16:59 1996 Anthony Green + + * Makefile.am: Created + * Makefile.in: Generated + + * ffi_common.h: Added rcsid comment + +Tue Oct 1 17:13:51 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Created + +--- libffi/src ------------------------------------------------------------ + +Mon Oct 5 02:17:50 1998 Anthony Green + + * arm/ffi.c, arm/sysv.S: Created. + + * Makefile.am: Added arm files. + * Makefile.in: Rebuilt. + +Mon Oct 5 01:41:38 1998 Anthony Green + + * Makefile.am (libffi_la_LDFLAGS): Incremented revision. + +Sun Oct 4 16:27:17 1998 Anthony Green + + * alpha/osf.S (ffi_call_osf): Patch for DU assembler. + + * ffitest.c (main): long long and long double return values work + for x86. + +Fri Apr 17 11:50:58 1998 Anthony Green + + * Makefile.in: Rebuilt. + + * ffitest.c (main): Floating point tests not executed for systems + with broken lond double (SunOS 4 w/ GCC). + + * types.c: Fixed x86 alignment info for long long types. + +Thu Apr 16 07:15:28 1998 Anthony Green + + * ffitest.c: Added more notes about GCC bugs under Irix 6. + +Wed Apr 15 08:42:22 1998 Anthony Green + + * ffitest.c (struct5): New test function. + (main): New test with struct5. + +Thu Mar 5 10:48:11 1998 Anthony Green + + * prep_cif.c (initialize_aggregate): Fix assertion for + nested structures. + +Tue Feb 24 16:33:41 1998 Anthony Green + + * prep_cif.c (ffi_prep_cif): Added long double support for sparc. + +Sun Feb 22 00:52:18 1998 Geoff Keating + + * powerpc/asm.h: New file. + * powerpc/ffi.c: New file. + * powerpc/sysv.S: New file. + * Makefile.am: PowerPC port. + * ffitest.c (main): Allow all tests to run even in presence of gcc + bug on PowerPC. + +1998-02-17 Anthony Green + + * mips/ffi.c: Fixed comment typo. + + * x86/ffi.c (ffi_prep_cif_machdep), x86/sysv.S (retfloat): + Fixed x86 long double return handling. + + * types.c: Fixed x86 long double alignment info. + +1998-02-14 Andreas Schwab + + * types.c: Add m68k support. + + * ffitest.c (floating): Add long double parameter. + (return_ll, ldblit): New functions to test long long and long + double return value. + (main): Fix type error in assignment of ts[1-4]_type.elements. + Add tests for long long and long double arguments and return + values. + + * prep_cif.c (ffi_prep_cif) [M68K]: Don't allocate argument for + struct value pointer. + + * m68k/ffi.c, m68k/sysv.S: New files. + * Makefile.am: Add bits for m68k port. Add kludge to work around + automake deficiency. + (test): Don't require "." in $PATH. + * Makefile.in: Rebuilt. + +Wed Feb 11 07:36:50 1998 Anthony Green + + * Makefile.in: Rebuilt. + +Tue Feb 10 20:56:00 1998 Richard Henderson + + * alpha/ffi.c, alpha/osf.S: New files. + * Makefile.am: Alpha port. + +Tue Nov 18 14:12:07 1997 Anthony Green + + * mips/ffi.c (ffi_prep_cif_machdep): Initialize rstruct_flag + for n32. + +Tue Jun 3 17:18:20 1997 Anthony Green + + * ffitest.c (main): Added hack to get structure tests working + correctly. + +Sat May 10 19:06:42 1997 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Explicitly list all distributable + files in subdirs. + (VERSION, CC): Removed. + +Thu May 8 17:19:01 1997 Anthony Green + + * Makefile.am: Many changes for new automake and libtool. + * Makefile.in: Rebuilt. + +Fri Nov 22 06:57:56 1996 Anthony Green + + * ffitest.c (main): Fixed test case for non mips machines. + +Wed Nov 20 22:31:59 1996 Anthony Green + + * types.c: Added ffi_type_void declaration. + +Tue Oct 29 13:07:19 1996 Anthony Green + + * ffitest.c (main): Fixed character constants. + (main): Emit warning for structure test 3 failure on Sun. + + * Makefile.am (VPATH): Fixed VPATH def'n so automake won't + strip it out. + Moved distdir hack from libffi to automake. + (ffitest): Added missing -c for $(COMPILE) (change in automake). + * Makefile.in: Rebuilt. + +Tue Oct 15 13:08:20 1996 Anthony Green + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + + * prep_cif.c (STACK_ARG_SIZE): Improved STACK_ARG_SIZE macro. + Clean up based on LCLint output. Added funny /*@...@*/ comments to + annotate source. + + * ffitest.c, debug.c: Cleaned up code. + +Mon Oct 14 12:26:56 1996 Anthony Green + + * ffitest.c: Changes based on interface changes. + + * prep_cif.c (ffi_prep_cif): Cleaned up interface based on + feedback from Jim Blandy. + +Fri Oct 11 15:53:18 1996 Anthony Green + + * ffitest.c: Reordered tests while porting to sparc. + Made changes to handle lame structure passing for sparc. + Removed calls to fflush(). + + * prep_cif.c (ffi_prep_cif): Added special case for sparc + aggregate type arguments. + +Thu Oct 10 09:56:51 1996 Anthony Green + + * ffitest.c (main): Added structure passing/returning tests. + + * prep_cif.c (ffi_prep_cif): Perform proper initialization + of structure return types if needed. + (initialize_aggregate): Bug fix + +Wed Oct 9 16:04:20 1996 Anthony Green + + * types.c: Added special definitions for x86 (double doesn't + need double word alignment). + + * ffitest.c: Added many tests + +Tue Oct 8 09:19:22 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Fixed assertion. + + * debug.c (ffi_assert): Must return a non void now. + + * Makefile.am: Added test production. + * Makefile: Rebuilt. + + * ffitest.c (main): Created. + + * types.c: Created. Stripped common code out of */ffi.c. + + * prep_cif.c: Added missing stdlib.h include. + + * debug.c (ffi_type_test): Used "a" to eliminate compiler + warnings in non-debug builds. Included ffi_common.h. + +Mon Oct 7 15:36:42 1996 Anthony Green + + * Makefile.am: Added a rule for .s -> .o + This is required by the SGI compiler. + * Makefile: Rebuilt. + +Fri Oct 4 09:51:08 1996 Anthony Green + + * prep_cif.c (initialize_aggregate): Moved abi specification + to ffi_prep_cif(). + +Thu Oct 3 15:37:37 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Changed values from void* to void**. + (initialize_aggregate): Fixed aggregate type initialization. + + * Makefile.am (EXTRA_DIST): Added support code for "make dist". + * Makefile.in: Regenerated. + +Wed Oct 2 11:41:57 1996 Anthony Green + + * debug.c, prep_cif: Created. + + * Makefile.am: Added debug.o and prep_cif.o to OBJ. + * Makefile.in: Regenerated. + + * Makefile.am (INCLUDES): Added missing -I../include + * Makefile.in: Regenerated. + +Tue Oct 1 17:11:51 1996 Anthony Green + + * error.c, Makefile.am: Created. + * Makefile.in: Generated. + +--- libffi/src/x86 -------------------------------------------------------- + +Sun Oct 4 16:27:17 1998 Anthony Green + + * sysv.S (retlongdouble): Fixed long long return value support. + * ffi.c (ffi_prep_cif_machdep): Ditto. + +Wed May 13 04:30:33 1998 Anthony Green + + * ffi.c (ffi_prep_cif_machdep): Fixed long double return value + support. + +Wed Apr 15 08:43:20 1998 Anthony Green + + * ffi.c (ffi_prep_args): small struct support was missing. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Mon Dec 2 15:12:58 1996 Tom Tromey + + * sysv.S: Use .balign, for a.out Linux boxes. + +Tue Oct 15 13:06:50 1996 Anthony Green + + * ffi.c: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Fri Oct 11 16:43:38 1996 Anthony Green + + * ffi.c (ffi_call): Added assertion for bad ABIs. + +Wed Oct 9 13:57:27 1996 Anthony Green + + * sysv.S (retdouble): Fixed double return problems. + + * ffi.c (ffi_call): Corrected fn arg definition. + (ffi_prep_cif_machdep): Fixed double return problems + +Tue Oct 8 12:12:49 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + +Mon Oct 7 15:53:06 1996 Anthony Green + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:54:53 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 10:07:05 1996 Anthony Green + + * ffi.c, sysv.S, objects.mak: Created. + (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep(). + +--- libffi/src/mips ------------------------------------------------------- + +Tue Feb 17 17:18:07 1998 Anthony Green + + * o32.S: Fixed typo in comment. + + * ffi.c (ffi_prep_cif_machdep): Fixed argument processing. + +Thu May 8 16:53:58 1997 Anthony Green + + * o32.s, n32.s: Wrappers for SGI tool support. + + * objects.mak: Removed. + +Tue Oct 29 14:37:45 1996 Anthony Green + + * ffi.c (ffi_prep_args): Changed int z to size_t z. + +Tue Oct 15 13:17:25 1996 Anthony Green + + * n32.S: Fixed bad stack munging. + + * ffi.c: Moved prototypes for ffi_call_?32() to here from + ffi_mips.h because extended_cif is not defined in ffi_mips.h. + +Mon Oct 14 12:42:02 1996 Anthony Green + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 11:22:16 1996 Anthony Green + + * n32.S, ffi.c: Lots of changes to support passing and + returning structures with the n32 calling convention. + + * n32.S: Fixed fn pointer bug. + + * ffi.c (ffi_prep_cif_machdep): Fix for o32 structure + return values. + (ffi_prep_args): Fixed n32 structure passing when structures + partially fit in registers. + +Wed Oct 9 13:49:25 1996 Anthony Green + + * objects.mak: Added n32.o. + + * n32.S: Created. + + * ffi.c (ffi_prep_args): Added magic to support proper + n32 processing. + +Tue Oct 8 10:37:35 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + + * o32.S: This code is only built for o32 compiles. + A lot of the #define cruft has moved to ffi_mips.h. + + * ffi.c (ffi_prep_cif_machdep): Fixed arg flags. Second arg + is only processed if the first is either a float or double. + +Mon Oct 7 15:33:59 1996 Anthony Green + + * o32.S: Modified to compile under each of o32, n32 and n64. + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:53:25 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 17:41:22 1996 Anthony Green + + * o32.S: Removed crufty definitions. + +Wed Oct 2 12:53:42 1996 Anthony Green + + * ffi.c (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved all machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep. Return types + of FFI_TYPE_STRUCT are no different than FFI_TYPE_INT. + +Tue Oct 1 17:11:02 1996 Anthony Green + + * ffi.c, o32.S, object.mak: Created + +--- libffi/src/sparc ------------------------------------------------------ + +Tue Feb 24 16:33:18 1998 Anthony Green + + * ffi.c (ffi_prep_args): Added long double support. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Thu May 1 16:07:56 1997 Anthony Green + + * v8.S: Fixed minor portability problem reported by + Russ McManus . + +Tue Nov 26 14:12:43 1996 Anthony Green + + * v8.S: Used STACKFRAME define elsewhere. + + * ffi.c (ffi_prep_args): Zero out space when USING_PURIFY + is set. + (ffi_prep_cif_machdep): Allocate the correct stack frame + space for functions with < 6 args. + +Tue Oct 29 15:08:55 1996 Anthony Green + + * ffi.c (ffi_prep_args): int z is now size_t z. + +Mon Oct 14 13:31:24 1996 Anthony Green + + * v8.S (ffi_call_V8): Gordon rewrites this again. It looks + great now. + + * ffi.c (ffi_call): The comment about hijacked registers + is no longer valid after gordoni hacked v8.S. + + * v8.S (ffi_call_V8): Rewrote with gordoni. Much simpler. + + * v8.S, ffi.c: ffi_call() had changed to accept more than + two args, so v8.S had to change (because it hijacks incoming + arg registers). + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 17:48:16 1996 Anthony Green + + * ffi.c, v8.S, objects.mak: Created. + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE new file mode 100644 index 0000000..4f0b762 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE @@ -0,0 +1,21 @@ +libffi - Copyright (c) 1996-2020 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS new file mode 100644 index 0000000..d1d626e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/LICENSE-BUILDTOOLS @@ -0,0 +1,353 @@ +The libffi source distribution contains certain code that is not part +of libffi, and is only used as tooling to assist with the building and +testing of libffi. This includes the msvcc.sh script used to wrap the +Microsoft compiler with GNU compatible command-line options, +make_sunver.pl, and the libffi test code distributed in the +testsuite/libffi.bhaible directory. This code is distributed with +libffi for the purpose of convenience only, and libffi is in no way +derived from this code. + +msvcc.sh an testsuite/libffi.bhaible are both distributed under the +terms of the GNU GPL version 2, as below. + + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am new file mode 100644 index 0000000..563e9f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/Makefile.am @@ -0,0 +1,160 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS = foreign subdir-objects + +ACLOCAL_AMFLAGS = -I m4 + +SUBDIRS = include testsuite man +if BUILD_DOCS +## This hack is needed because it doesn't seem possible to make a +## conditional info_TEXINFOS in Automake. At least Automake 1.14 +## either gives errors -- if this attempted in the most +## straightforward way -- or simply unconditionally tries to build the +## info file. +SUBDIRS += doc +endif + +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) + +MAKEOVERRIDES= + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc + +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la + +libffi_la_SOURCES = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c + +if FFI_DEBUG +libffi_la_SOURCES += src/debug.c +endif + +noinst_HEADERS = \ + src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h \ + src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h \ + src/bfin/ffitarget.h \ + src/cris/ffitarget.h \ + src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h \ + src/m68k/ffitarget.h \ + src/m88k/ffitarget.h \ + src/metag/ffitarget.h \ + src/microblaze/ffitarget.h \ + src/mips/ffitarget.h \ + src/moxie/ffitarget.h \ + src/nios2/ffitarget.h \ + src/or1k/ffitarget.h \ + src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h src/powerpc/ffi_powerpc.h \ + src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h \ + src/sh/ffitarget.h \ + src/sh64/ffitarget.h \ + src/sparc/ffitarget.h src/sparc/internal.h \ + src/tile/ffitarget.h \ + src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h src/x86/asmnames.h \ + src/xtensa/ffitarget.h \ + src/dlmalloc.c + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/frv/eabi.S src/ia64/ffi.c src/ia64/unix.S src/m32r/ffi.c \ + src/m32r/sysv.S src/m68k/ffi.c src/m68k/sysv.S src/m88k/ffi.c \ + src/m88k/obsd.S src/metag/ffi.c src/metag/sysv.S \ + src/microblaze/ffi.c src/microblaze/sysv.S src/mips/ffi.c \ + src/mips/o32.S src/mips/n32.S src/moxie/ffi.c \ + src/moxie/eabi.S src/nios2/ffi.c src/nios2/sysv.S \ + src/or1k/ffi.c src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S \ + src/pa/hpux32.S src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S + +TARGET_OBJ = @TARGET_OBJ@ +libffi_la_LIBADD = $(TARGET_OBJ) + +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) + +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) + +AM_CFLAGS = +if FFI_DEBUG +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +AM_CFLAGS += -DFFI_DEBUG +endif + +if LIBFFI_BUILD_VERSIONED_SHLIB +if LIBFFI_BUILD_VERSIONED_SHLIB_GNU +libffi_version_script = -Wl,--version-script,libffi.map +libffi_version_dep = libffi.map +endif +if LIBFFI_BUILD_VERSIONED_SHLIB_SUN +libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = libffi.map-sun +libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ + $(libffi_la_OBJECTS) $(libffi_la_LIBADD) + perl $(top_srcdir)/make_sunver.pl libffi.map \ + `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ + sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ + > $@ || (rm -f $@ ; exit 1) +endif +else +libffi_version_script = +libffi_version_dep = +endif +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $< + +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) + +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md new file mode 100644 index 0000000..66c9186 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/README.md @@ -0,0 +1,482 @@ +Status +====== + +[![Build Status](https://travis-ci.org/libffi/libffi.svg?branch=master)](https://travis-ci.org/libffi/libffi) +[![Build status](https://ci.appveyor.com/api/projects/status/8lko9vagbx4w2kxq?svg=true)](https://ci.appveyor.com/project/atgreen/libffi) + +libffi-3.4 was released on TBD. Check the libffi web +page for updates: . + + +What is libffi? +=============== + +Compilers for high level languages generate code that follow certain +conventions. These conventions are necessary, in part, for separate +compilation to work. One such convention is the "calling +convention". The "calling convention" is essentially a set of +assumptions made by the compiler about where function arguments will +be found on entry to a function. A "calling convention" also specifies +where the return value for a function is found. + +Some programs may not know at the time of compilation what arguments +are to be passed to a function. For instance, an interpreter may be +told at run-time about the number and types of arguments used to call +a given function. Libffi can be used in such programs to provide a +bridge from the interpreter program to compiled code. + +The libffi library provides a portable, high level programming +interface to various calling conventions. This allows a programmer to +call any function specified by a call interface description at run +time. + +FFI stands for Foreign Function Interface. A foreign function +interface is the popular name for the interface that allows code +written in one language to call code written in another language. The +libffi library really only provides the lowest, machine dependent +layer of a fully featured foreign function interface. A layer must +exist above libffi that handles type conversions for values passed +between the two languages. + + +Supported Platforms +=================== + +Libffi has been ported to many different platforms. + +At the time of release, the following basic configurations have been +tested: + +| Architecture | Operating System | Compiler | +| --------------- | ---------------- | ----------------------- | +| AArch64 (ARM64) | iOS | Clang | +| AArch64 | Linux | GCC | +| AArch64 | Windows | MSVC | +| Alpha | Linux | GCC | +| Alpha | Tru64 | GCC | +| ARC | Linux | GCC | +| ARM | Linux | GCC | +| ARM | iOS | GCC | +| ARM | Windows | MSVC | +| AVR32 | Linux | GCC | +| Blackfin | uClinux | GCC | +| HPPA | HPUX | GCC | +| IA-64 | Linux | GCC | +| M68K | FreeMiNT | GCC | +| M68K | Linux | GCC | +| M68K | RTEMS | GCC | +| M88K | OpenBSD/mvme88k | GCC | +| Meta | Linux | GCC | +| MicroBlaze | Linux | GCC | +| MIPS | IRIX | GCC | +| MIPS | Linux | GCC | +| MIPS | RTEMS | GCC | +| MIPS64 | Linux | GCC | +| Moxie | Bare metal | GCC | +| Nios II | Linux | GCC | +| OpenRISC | Linux | GCC | +| PowerPC 32-bit | AIX | IBM XL C | +| PowerPC 64-bit | AIX | IBM XL C | +| PowerPC | AMIGA | GCC | +| PowerPC | Linux | GCC | +| PowerPC | Mac OSX | GCC | +| PowerPC | FreeBSD | GCC | +| PowerPC 64-bit | FreeBSD | GCC | +| PowerPC 64-bit | Linux ELFv1 | GCC | +| PowerPC 64-bit | Linux ELFv2 | GCC | +| RISC-V 32-bit | Linux | GCC | +| RISC-V 64-bit | Linux | GCC | +| S390 | Linux | GCC | +| S390X | Linux | GCC | +| SPARC | Linux | GCC | +| SPARC | Solaris | GCC | +| SPARC | Solaris | Oracle Solaris Studio C | +| SPARC64 | Linux | GCC | +| SPARC64 | FreeBSD | GCC | +| SPARC64 | Solaris | Oracle Solaris Studio C | +| TILE-Gx/TILEPro | Linux | GCC | +| VAX | OpenBSD/vax | GCC | +| X86 | FreeBSD | GCC | +| X86 | GNU HURD | GCC | +| X86 | Interix | GCC | +| X86 | kFreeBSD | GCC | +| X86 | Linux | GCC | +| X86 | OpenBSD | GCC | +| X86 | OS/2 | GCC | +| X86 | Solaris | GCC | +| X86 | Solaris | Oracle Solaris Studio C | +| X86 | Windows/Cygwin | GCC | +| X86 | Windows/MingW | GCC | +| X86-64 | FreeBSD | GCC | +| X86-64 | Linux | GCC | +| X86-64 | Linux/x32 | GCC | +| X86-64 | OpenBSD | GCC | +| X86-64 | Solaris | Oracle Solaris Studio C | +| X86-64 | Windows/Cygwin | GCC | +| X86-64 | Windows/MingW | GCC | +| X86-64 | Mac OSX | GCC | +| Xtensa | Linux | GCC | + +Please send additional platform test results to +libffi-discuss@sourceware.org. + +Installing libffi +================= + +First you must configure the distribution for your particular +system. Go to the directory you wish to build libffi in and run the +"configure" program found in the root directory of the libffi source +distribution. Note that building libffi requires a C99 compatible +compiler. + +If you're building libffi directly from git hosted sources, configure +won't exist yet; run ./autogen.sh first. This will require that you +install autoconf, automake and libtool. + +You may want to tell configure where to install the libffi library and +header files. To do that, use the ``--prefix`` configure switch. Libffi +will install under /usr/local by default. + +If you want to enable extra run-time debugging checks use the the +``--enable-debug`` configure switch. This is useful when your program dies +mysteriously while using libffi. + +Another useful configure switch is ``--enable-purify-safety``. Using this +will add some extra code which will suppress certain warnings when you +are using Purify with libffi. Only use this switch when using +Purify, as it will slow down the library. + +If you don't want to build documentation, use the ``--disable-docs`` +configure switch. + +It's also possible to build libffi on Windows platforms with +Microsoft's Visual C++ compiler. In this case, use the msvcc.sh +wrapper script during configuration like so: + + path/to/configure CC=path/to/msvcc.sh CXX=path/to/msvcc.sh LD=link CPP="cl -nologo -EP" CPPFLAGS="-DFFI_BUILDING_DLL" + +For 64-bit Windows builds, use ``CC="path/to/msvcc.sh -m64"`` and +``CXX="path/to/msvcc.sh -m64"``. You may also need to specify +``--build`` appropriately. + +It is also possible to build libffi on Windows platforms with the LLVM +project's clang-cl compiler, like below: + + path/to/configure CC="path/to/msvcc.sh -clang-cl" CXX="path/to/msvcc.sh -clang-cl" LD=link CPP="clang-cl -EP" + +When building with MSVC under a MingW environment, you may need to +remove the line in configure that sets 'fix_srcfile_path' to a 'cygpath' +command. ('cygpath' is not present in MingW, and is not required when +using MingW-style paths.) + +To build static library for ARM64 with MSVC using visual studio solution, msvc_build folder have + aarch64/Ffi_staticLib.sln + required header files in aarch64/aarch64_include/ + + +SPARC Solaris builds require the use of the GNU assembler and linker. +Point ``AS`` and ``LD`` environment variables at those tool prior to +configuration. + +For iOS builds, the ``libffi.xcodeproj`` Xcode project is available. + +Configure has many other options. Use ``configure --help`` to see them all. + +Once configure has finished, type "make". Note that you must be using +GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make . + +To ensure that libffi is working as advertised, type "make check". +This will require that you have DejaGNU installed. + +To install the library and header files, type ``make install``. + + +History +======= + +See the git log for details at http://github.com/libffi/libffi. + + 3.4 TBD + Add support for Intel Control-flow Enforcement Technology (CET). + Add support for ARM Pointer Authentication (PA). + Fix 32-bit PPC regression. + Fix MIPS soft-float problem. + + 3.3 Nov-23-19 + Add RISC-V support. + New API in support of GO closures. + Add IEEE754 binary128 long double support for 64-bit Power + Default to Microsoft's 64 bit long double ABI with Visual C++. + GNU compiler uses 80 bits (128 in memory) FFI_GNUW64 ABI. + Add Windows on ARM64 (WOA) support. + Add Windows 32-bit ARM support. + Raw java (gcj) API deprecated. + Add pre-built PDF documentation to source distribution. + Many new test cases and bug fixes. + + 3.2.1 Nov-12-14 + Build fix for non-iOS AArch64 targets. + + 3.2 Nov-11-14 + Add C99 Complex Type support (currently only supported on + s390). + Add support for PASCAL and REGISTER calling conventions on x86 + Windows/Linux. + Add OpenRISC and Cygwin-64 support. + Bug fixes. + + 3.1 May-19-14 + Add AArch64 (ARM64) iOS support. + Add Nios II support. + Add m88k and DEC VAX support. + Add support for stdcall, thiscall, and fastcall on non-Windows + 32-bit x86 targets such as Linux. + Various Android, MIPS N32, x86, FreeBSD and UltraSPARC IIi + fixes. + Make the testsuite more robust: eliminate several spurious + failures, and respect the $CC and $CXX environment variables. + Archive off the manually maintained ChangeLog in favor of git + log. + + 3.0.13 Mar-17-13 + Add Meta support. + Add missing Moxie bits. + Fix stack alignment bug on 32-bit x86. + Build fix for m68000 targets. + Build fix for soft-float Power targets. + Fix the install dir location for some platforms when building + with GCC (OS X, Solaris). + Fix Cygwin regression. + + 3.0.12 Feb-11-13 + Add Moxie support. + Add AArch64 support. + Add Blackfin support. + Add TILE-Gx/TILEPro support. + Add MicroBlaze support. + Add Xtensa support. + Add support for PaX enabled kernels with MPROTECT. + Add support for native vendor compilers on + Solaris and AIX. + Work around LLVM/GCC interoperability issue on x86_64. + + 3.0.11 Apr-11-12 + Lots of build fixes. + Add support for variadic functions (ffi_prep_cif_var). + Add Linux/x32 support. + Add thiscall, fastcall and MSVC cdecl support on Windows. + Add Amiga and newer MacOS support. + Add m68k FreeMiNT support. + Integration with iOS' xcode build tools. + Fix Octeon and MC68881 support. + Fix code pessimizations. + + 3.0.10 Aug-23-11 + Add support for Apple's iOS. + Add support for ARM VFP ABI. + Add RTEMS support for MIPS and M68K. + Fix instruction cache clearing problems on + ARM and SPARC. + Fix the N64 build on mips-sgi-irix6.5. + Enable builds with Microsoft's compiler. + Enable x86 builds with Oracle's Solaris compiler. + Fix support for calling code compiled with Oracle's Sparc + Solaris compiler. + Testsuite fixes for Tru64 Unix. + Additional platform support. + + 3.0.9 Dec-31-09 + Add AVR32 and win64 ports. Add ARM softfp support. + Many fixes for AIX, Solaris, HP-UX, *BSD. + Several PowerPC and x86-64 bug fixes. + Build DLL for windows. + + 3.0.8 Dec-19-08 + Add *BSD, BeOS, and PA-Linux support. + + 3.0.7 Nov-11-08 + Fix for ppc FreeBSD. + (thanks to Andreas Tobler) + + 3.0.6 Jul-17-08 + Fix for closures on sh. + Mark the sh/sh64 stack as non-executable. + (both thanks to Kaz Kojima) + + 3.0.5 Apr-3-08 + Fix libffi.pc file. + Fix #define ARM for IcedTea users. + Fix x86 closure bug. + + 3.0.4 Feb-24-08 + Fix x86 OpenBSD configury. + + 3.0.3 Feb-22-08 + Enable x86 OpenBSD thanks to Thomas Heller, and + x86-64 FreeBSD thanks to Björn König and Andreas Tobler. + Clean up test instruction in README. + + 3.0.2 Feb-21-08 + Improved x86 FreeBSD support. + Thanks to Björn König. + + 3.0.1 Feb-15-08 + Fix instruction cache flushing bug on MIPS. + Thanks to David Daney. + + 3.0.0 Feb-15-08 + Many changes, mostly thanks to the GCC project. + Cygnus Solutions is now Red Hat. + + [10 years go by...] + + 1.20 Oct-5-98 + Raffaele Sena produces ARM port. + + 1.19 Oct-5-98 + Fixed x86 long double and long long return support. + m68k bug fixes from Andreas Schwab. + Patch for DU assembler compatibility for the Alpha from Richard + Henderson. + + 1.18 Apr-17-98 + Bug fixes and MIPS configuration changes. + + 1.17 Feb-24-98 + Bug fixes and m68k port from Andreas Schwab. PowerPC port from + Geoffrey Keating. Various bug x86, Sparc and MIPS bug fixes. + + 1.16 Feb-11-98 + Richard Henderson produces Alpha port. + + 1.15 Dec-4-97 + Fixed an n32 ABI bug. New libtool, auto* support. + + 1.14 May-13-97 + libtool is now used to generate shared and static libraries. + Fixed a minor portability problem reported by Russ McManus + . + + 1.13 Dec-2-96 + Added --enable-purify-safety to keep Purify from complaining + about certain low level code. + Sparc fix for calling functions with < 6 args. + Linux x86 a.out fix. + + 1.12 Nov-22-96 + Added missing ffi_type_void, needed for supporting void return + types. Fixed test case for non MIPS machines. Cygnus Support + is now Cygnus Solutions. + + 1.11 Oct-30-96 + Added notes about GNU make. + + 1.10 Oct-29-96 + Added configuration fix for non GNU compilers. + + 1.09 Oct-29-96 + Added --enable-debug configure switch. Clean-ups based on LCLint + feedback. ffi_mips.h is always installed. Many configuration + fixes. Fixed ffitest.c for sparc builds. + + 1.08 Oct-15-96 + Fixed n32 problem. Many clean-ups. + + 1.07 Oct-14-96 + Gordon Irlam rewrites v8.S again. Bug fixes. + + 1.06 Oct-14-96 + Gordon Irlam improved the sparc port. + + 1.05 Oct-14-96 + Interface changes based on feedback. + + 1.04 Oct-11-96 + Sparc port complete (modulo struct passing bug). + + 1.03 Oct-10-96 + Passing struct args, and returning struct values works for + all architectures/calling conventions. Expanded tests. + + 1.02 Oct-9-96 + Added SGI n32 support. Fixed bugs in both o32 and Linux support. + Added "make test". + + 1.01 Oct-8-96 + Fixed float passing bug in mips version. Restructured some + of the code. Builds cleanly with SGI tools. + + 1.00 Oct-7-96 + First release. No public announcement. + +Authors & Credits +================= + +libffi was originally written by Anthony Green . + +The developers of the GNU Compiler Collection project have made +innumerable valuable contributions. See the ChangeLog file for +details. + +Some of the ideas behind libffi were inspired by Gianni Mariani's free +gencall library for Silicon Graphics machines. + +The closure mechanism was designed and implemented by Kresten Krab +Thorup. + +Major processor architecture ports were contributed by the following +developers: + + aarch64 Marcus Shawcroft, James Greenhalgh + alpha Richard Henderson + arc Hackers at Synopsis + arm Raffaele Sena + avr32 Bradley Smith + blackfin Alexandre Keunecke I. de Mendonca + cris Simon Posnjak, Hans-Peter Nilsson + frv Anthony Green + ia64 Hans Boehm + m32r Kazuhiro Inaoka + m68k Andreas Schwab + m88k Miod Vallat + metag Hackers at Imagination Technologies + microblaze Nathan Rossi + mips Anthony Green, Casey Marshall + mips64 David Daney + moxie Anthony Green + nios ii Sandra Loosemore + openrisc Sebastian Macke + pa Randolph Chung, Dave Anglin, Andreas Tobler + powerpc Geoffrey Keating, Andreas Tobler, + David Edelsohn, John Hornkvist + powerpc64 Jakub Jelinek + riscv Michael Knyszek, Andrew Waterman, Stef O'Rear + s390 Gerhard Tonn, Ulrich Weigand + sh Kaz Kojima + sh64 Kaz Kojima + sparc Anthony Green, Gordon Irlam + tile-gx/tilepro Walter Lee + vax Miod Vallat + x86 Anthony Green, Jon Beniston + x86-64 Bo Thorsen + xtensa Chris Zankel + +Jesper Skov and Andrew Haley both did more than their fair share of +stepping through the code and tracking down bugs. + +Thanks also to Tom Tromey for bug fixes, documentation and +configuration help. + +Thanks to Jim Blandy, who provided some useful feedback on the libffi +interface. + +Andreas Tobler has done a tremendous amount of work on the testsuite. + +Alex Oliva solved the executable page problem for SElinux. + +The list above is almost certainly incomplete and inaccurate. I'm +happy to make corrections or additions upon request. + +If you have a problem, or have found a bug, please send a note to the +author at green@moxielogic.com, or the project mailing list at +libffi-discuss@sourceware.org. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 new file mode 100644 index 0000000..1a70efb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/acinclude.m4 @@ -0,0 +1,479 @@ +# mmap(2) blacklisting. Some platforms provide the mmap library routine +# but don't support all of the features we need from it. +AC_DEFUN([AC_FUNC_MMAP_BLACKLIST], +[ +AC_CHECK_HEADER([sys/mman.h], + [libffi_header_sys_mman_h=yes], [libffi_header_sys_mman_h=no]) +AC_CHECK_FUNC([mmap], [libffi_func_mmap=yes], [libffi_func_mmap=no]) +if test "$libffi_header_sys_mman_h" != yes \ + || test "$libffi_func_mmap" != yes; then + ac_cv_func_mmap_file=no + ac_cv_func_mmap_dev_zero=no + ac_cv_func_mmap_anon=no +else + AC_CACHE_CHECK([whether read-only mmap of a plain file works], + ac_cv_func_mmap_file, + [# Add a system to this blacklist if + # mmap(0, stat_size, PROT_READ, MAP_PRIVATE, fd, 0) doesn't return a + # memory area containing the same data that you'd get if you applied + # read() to the same fd. The only system known to have a problem here + # is VMS, where text files have record structure. + case "$host_os" in + vms* | ultrix*) + ac_cv_func_mmap_file=no ;; + *) + ac_cv_func_mmap_file=yes;; + esac]) + AC_CACHE_CHECK([whether mmap from /dev/zero works], + ac_cv_func_mmap_dev_zero, + [# Add a system to this blacklist if it has mmap() but /dev/zero + # does not exist, or if mmapping /dev/zero does not give anonymous + # zeroed pages with both the following properties: + # 1. If you map N consecutive pages in with one call, and then + # unmap any subset of those pages, the pages that were not + # explicitly unmapped remain accessible. + # 2. If you map two adjacent blocks of memory and then unmap them + # both at once, they must both go away. + # Systems known to be in this category are Windows (all variants), + # VMS, and Darwin. + case "$host_os" in + vms* | cygwin* | pe | mingw* | darwin* | ultrix* | hpux10* | hpux11.00) + ac_cv_func_mmap_dev_zero=no ;; + *) + ac_cv_func_mmap_dev_zero=yes;; + esac]) + + # Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for. + AC_CACHE_CHECK([for MAP_ANON(YMOUS)], ac_cv_decl_map_anon, + [AC_TRY_COMPILE( +[#include +#include +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +], +[int n = MAP_ANONYMOUS;], + ac_cv_decl_map_anon=yes, + ac_cv_decl_map_anon=no)]) + + if test $ac_cv_decl_map_anon = no; then + ac_cv_func_mmap_anon=no + else + AC_CACHE_CHECK([whether mmap with MAP_ANON(YMOUS) works], + ac_cv_func_mmap_anon, + [# Add a system to this blacklist if it has mmap() and MAP_ANON or + # MAP_ANONYMOUS, but using mmap(..., MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + # doesn't give anonymous zeroed pages with the same properties listed + # above for use of /dev/zero. + # Systems known to be in this category are Windows, VMS, and SCO Unix. + case "$host_os" in + vms* | cygwin* | pe | mingw* | sco* | udk* ) + ac_cv_func_mmap_anon=no ;; + *) + ac_cv_func_mmap_anon=yes;; + esac]) + fi +fi + +if test $ac_cv_func_mmap_file = yes; then + AC_DEFINE(HAVE_MMAP_FILE, 1, + [Define if read-only mmap of a plain file works.]) +fi +if test $ac_cv_func_mmap_dev_zero = yes; then + AC_DEFINE(HAVE_MMAP_DEV_ZERO, 1, + [Define if mmap of /dev/zero works.]) +fi +if test $ac_cv_func_mmap_anon = yes; then + AC_DEFINE(HAVE_MMAP_ANON, 1, + [Define if mmap with MAP_ANON(YMOUS) works.]) +fi +]) + +dnl ---------------------------------------------------------------------- +dnl This whole bit snagged from libstdc++-v3, via libatomic. + +dnl +dnl LIBFFI_ENABLE +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, permit a|b|c) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, SHELL-CODE-HANDLER) +dnl +dnl See docs/html/17_intro/configury.html#enable for documentation. +dnl +m4_define([LIBFFI_ENABLE],[dnl +m4_define([_g_switch],[--enable-$1])dnl +m4_define([_g_help],[AC_HELP_STRING(_g_switch$3,[$4 @<:@default=$2@:>@])])dnl + AC_ARG_ENABLE($1,_g_help, + m4_bmatch([$5], + [^permit ], + [[ + case "$enableval" in + m4_bpatsubst([$5],[permit ])) ;; + *) AC_MSG_ERROR(Unknown argument to enable/disable $1) ;; + dnl Idea for future: generate a URL pointing to + dnl "onlinedocs/configopts.html#whatever" + esac + ]], + [^$], + [[ + case "$enableval" in + yes|no) ;; + *) AC_MSG_ERROR(Argument to enable/disable $1 must be yes or no) ;; + esac + ]], + [[$5]]), + [enable_]m4_bpatsubst([$1],-,_)[=][$2]) +m4_undefine([_g_switch])dnl +m4_undefine([_g_help])dnl +]) + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + libat_ld_is_lld=no + if $LD --version 2>/dev/null | grep 'LLD '> /dev/null 2>&1; then + libat_ld_is_lld=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl Add version tags to symbols in shared library (or not), additionally +dnl marking other symbols as private/local (or not). +dnl +dnl --enable-symvers=style adds a version script to the linker call when +dnl creating the shared library. The choice of version script is +dnl controlled by 'style'. +dnl --disable-symvers does not. +dnl + Usage: LIBFFI_ENABLE_SYMVERS[(DEFAULT)] +dnl Where DEFAULT is either 'yes' or 'no'. Passing `yes' tries to +dnl choose a default style based on linker characteristics. Passing +dnl 'no' disables versioning. +dnl +AC_DEFUN([LIBFFI_ENABLE_SYMVERS], [ + +LIBFFI_ENABLE(symvers,yes,[=STYLE], + [enables symbol versioning of the shared library], + [permit yes|no|gnu*|sun]) + +# If we never went through the LIBFFI_CHECK_LINKER_FEATURES macro, then we +# don't know enough about $LD to do tricks... +AC_REQUIRE([LIBFFI_CHECK_LINKER_FEATURES]) + +# Turn a 'yes' into a suitable default. +if test x$enable_symvers = xyes ; then + # FIXME The following test is too strict, in theory. + if test $enable_shared = no || test "x$LD" = x; then + enable_symvers=no + else + if test $with_gnu_ld = yes ; then + enable_symvers=gnu + else + case ${target_os} in + # Sun symbol versioning exists since Solaris 2.5. + solaris2.[[5-9]]* | solaris2.1[[0-9]]*) + enable_symvers=sun ;; + *) + enable_symvers=no ;; + esac + fi + fi +fi + +# Check if 'sun' was requested on non-Solaris 2 platforms. +if test x$enable_symvers = xsun ; then + case ${target_os} in + solaris2*) + # All fine. + ;; + *) + # Unlikely to work. + AC_MSG_WARN([=== You have requested Sun symbol versioning, but]) + AC_MSG_WARN([=== you are not targetting Solaris 2.]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + ;; + esac +fi + +# Check to see if libgcc_s exists, indicating that shared libgcc is possible. +if test $enable_symvers != no; then + AC_MSG_CHECKING([for shared libgcc]) + ac_save_CFLAGS="$CFLAGS" + CFLAGS=' -lgcc_s' + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes, libat_shared_libgcc=no) + CFLAGS="$ac_save_CFLAGS" + if test $libat_shared_libgcc = no; then + cat > conftest.c <&1 >/dev/null \ + | sed -n 's/^.* -lgcc_s\([^ ]*\) .*$/\1/p'` +changequote([,])dnl + rm -f conftest.c conftest.so + if test x${libat_libgcc_s_suffix+set} = xset; then + CFLAGS=" -lgcc_s$libat_libgcc_s_suffix" + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes) + CFLAGS="$ac_save_CFLAGS" + fi + fi + AC_MSG_RESULT($libat_shared_libgcc) +fi + +# For GNU ld, we need at least this version. The format is described in +# LIBFFI_CHECK_LINKER_FEATURES above. +libat_min_gnu_ld_version=21400 +# XXXXXXXXXXX libat_gnu_ld_version=21390 + +# Check to see if unspecified "yes" value can win, given results above. +# Change "yes" into either "no" or a style name. +if test $enable_symvers != no && test $libat_shared_libgcc = yes; then + if test $with_gnu_ld = yes; then + if test $libat_gnu_ld_version -ge $libat_min_gnu_ld_version ; then + enable_symvers=gnu + elif test $libat_ld_is_gold = yes ; then + enable_symvers=gnu + elif test $libat_ld_is_lld = yes ; then + enable_symvers=gnu + else + # The right tools, the right setup, but too old. Fallbacks? + AC_MSG_WARN(=== Linker version $libat_gnu_ld_version is too old for) + AC_MSG_WARN(=== full symbol versioning support in this release of GCC.) + AC_MSG_WARN(=== You would need to upgrade your binutils to version) + AC_MSG_WARN(=== $libat_min_gnu_ld_version or later and rebuild GCC.) + if test $libat_gnu_ld_version -ge 21200 ; then + # Globbing fix is present, proper block support is not. + dnl AC_MSG_WARN([=== Dude, you are soooo close. Maybe we can fake it.]) + dnl enable_symvers=??? + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + else + # 2.11 or older. + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi + fi + elif test $enable_symvers = sun; then + : All interesting versions of Sun ld support sun style symbol versioning. + else + # just fail for now + AC_MSG_WARN([=== You have requested some kind of symbol versioning, but]) + AC_MSG_WARN([=== either you are not using a supported linker, or you are]) + AC_MSG_WARN([=== not building a shared libgcc_s (which is required).]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi +fi +if test $enable_symvers = gnu; then + AC_DEFINE(LIBFFI_GNU_SYMBOL_VERSIONING, 1, + [Define to 1 if GNU symbol versioning is used for libatomic.]) +fi + +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB, test $enable_symvers != no) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_GNU, test $enable_symvers = gnu) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_SUN, test $enable_symvers = sun) +AC_MSG_NOTICE(versioning on shared library symbols is $enable_symvers) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh new file mode 100755 index 0000000..fb014a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/autogen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec autoreconf -v -i diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess new file mode 100644 index 0000000..faa63aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.guess @@ -0,0 +1,1466 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-05-11' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub new file mode 100644 index 0000000..40ea5df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/config.sub @@ -0,0 +1,1836 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2017 Free Software Foundation, Inc. + +timestamp='2017-04-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2017 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia16 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | wasm32 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | wasm32-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + wasm32) + basic_machine=wasm32-unknown + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + pru-*) + os=-elf + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac new file mode 100644 index 0000000..3ca1f4f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.ac @@ -0,0 +1,410 @@ +dnl Process this with autoconf to create configure + +AC_PREREQ(2.68) + +AC_INIT([libffi], [3.3], [http://github.com/libffi/libffi/issues]) +AC_CONFIG_HEADERS([fficonfig.h]) + +AC_CANONICAL_SYSTEM +target_alias=${target_alias-$host_alias} + +case "${host}" in + frv*-elf) + LDFLAGS=`echo $LDFLAGS | sed "s/\-B[^ ]*libgloss\/frv\///"`\ -B`pwd`/../libgloss/frv/ + ;; +esac + +AX_ENABLE_BUILDDIR + +AM_INIT_AUTOMAKE + +# The same as in boehm-gc and libstdc++. Have to borrow it from there. +# We must force CC to /not/ be precious variables; otherwise +# the wrong, non-multilib-adjusted value will be used in multilibs. +# As a side effect, we have to subst CFLAGS ourselves. +# Also save and restore CFLAGS, since AC_PROG_CC will come up with +# defaults of its own if none are provided. + +m4_rename([_AC_ARG_VAR_PRECIOUS],[real_PRECIOUS]) +m4_define([_AC_ARG_VAR_PRECIOUS],[]) +save_CFLAGS=$CFLAGS +AC_PROG_CC +AC_PROG_CXX +CFLAGS=$save_CFLAGS +m4_undefine([_AC_ARG_VAR_PRECIOUS]) +m4_rename_force([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS]) + +AC_SUBST(CFLAGS) + +AM_PROG_AS +AM_PROG_CC_C_O +AC_PROG_LIBTOOL +AC_CONFIG_MACRO_DIR([m4]) + +# Test for 64-bit build. +AC_CHECK_SIZEOF([size_t]) + +AX_COMPILER_VENDOR +AX_CC_MAXOPT +# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro +# compiler. +if test "$ax_cv_c_compiler_vendor" != "sun"; then + AX_CFLAGS_WARN_ALL +fi + +if test "x$GCC" = "xyes"; then + CFLAGS="$CFLAGS -fexceptions" +fi + +cat > local.exp < conftest.s + if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then + libffi_cv_as_x86_pcrel=yes + fi + ]) + if test "x$libffi_cv_as_x86_pcrel" = xyes; then + AC_DEFINE(HAVE_AS_X86_PCREL, 1, + [Define if your assembler supports PC relative relocs.]) + fi + ;; + + S390) + AC_CACHE_CHECK([compiler uses zarch features], + libffi_cv_as_s390_zarch, [ + libffi_cv_as_s390_zarch=no + echo 'void foo(void) { bar(); bar(); }' > conftest.c + if $CC $CFLAGS -S conftest.c > /dev/null 2>&1; then + if grep -q brasl conftest.s; then + libffi_cv_as_s390_zarch=yes + fi + fi + ]) + if test "x$libffi_cv_as_s390_zarch" = xyes; then + AC_DEFINE(HAVE_AS_S390_ZARCH, 1, + [Define if the compiler uses zarch features.]) + fi + ;; +esac + +AC_CACHE_CHECK([whether compiler supports pointer authentication], + libffi_cv_as_ptrauth, [ + libffi_cv_as_ptrauth=unknown + AC_TRY_COMPILE(,[ +#ifdef __clang__ +# if __has_feature(ptrauth_calls) +# define HAVE_PTRAUTH 1 +# endif +#endif + +#ifndef HAVE_PTRAUTH +# error Pointer authentication not supported +#endif + ], + [libffi_cv_as_ptrauth=yes], + [libffi_cv_as_ptrauth=no]) +]) +if test "x$libffi_cv_as_ptrauth" = xyes; then + AC_DEFINE(HAVE_PTRAUTH, 1, + [Define if your compiler supports pointer authentication.]) +fi + +# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. +AC_ARG_ENABLE(pax_emutramp, + [ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC], + if test "$enable_pax_emutramp" = "yes"; then + AC_DEFINE(FFI_MMAP_EXEC_EMUTRAMP_PAX, 1, + [Define this if you want to enable pax emulated trampolines]) + fi) + +LT_SYS_SYMBOL_USCORE +if test "x$sys_symbol_underscore" = xyes; then + AC_DEFINE(SYMBOL_UNDERSCORE, 1, [Define if symbols are underscored.]) +fi + +FFI_EXEC_TRAMPOLINE_TABLE=0 +case "$target" in + *arm*-apple-* | aarch64-apple-*) + FFI_EXEC_TRAMPOLINE_TABLE=1 + AC_DEFINE(FFI_EXEC_TRAMPOLINE_TABLE, 1, + [Cannot use PROT_EXEC on this target, so, we revert to + alternative means]) + ;; + *-apple-* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris* | *-linux-android*) + AC_DEFINE(FFI_MMAP_EXEC_WRIT, 1, + [Cannot use malloc on this target, so, we revert to + alternative means]) + ;; +esac +AM_CONDITIONAL(FFI_EXEC_TRAMPOLINE_TABLE, test x$FFI_EXEC_TRAMPOLINE_TABLE = x1) +AC_SUBST(FFI_EXEC_TRAMPOLINE_TABLE) + +if test x$TARGET = xX86_64; then + AC_CACHE_CHECK([toolchain supports unwind section type], + libffi_cv_as_x86_64_unwind_section_type, [ + cat > conftest1.s << EOF +.text +.globl foo +foo: +jmp bar +.section .eh_frame,"a",@unwind +bar: +EOF + + cat > conftest2.c << EOF +extern void foo(); +int main(){foo();} +EOF + + libffi_cv_as_x86_64_unwind_section_type=no + # we ensure that we can compile _and_ link an assembly file containing an @unwind section + # since the compiler can support it and not the linker (ie old binutils) + if $CC -Wa,--fatal-warnings $CFLAGS -c conftest1.s > /dev/null 2>&1 && \ + $CC conftest2.c conftest1.o > /dev/null 2>&1 ; then + libffi_cv_as_x86_64_unwind_section_type=yes + fi + ]) + if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then + AC_DEFINE(HAVE_AS_X86_64_UNWIND_SECTION_TYPE, 1, + [Define if your assembler supports unwind section type.]) + fi +fi + +if test "x$GCC" = "xyes"; then + AC_CACHE_CHECK([whether .eh_frame section should be read-only], + libffi_cv_ro_eh_frame, [ + libffi_cv_ro_eh_frame=yes + echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c + if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then + if readelf -WS conftest.o | grep -q -n 'eh_frame .* WA'; then + libffi_cv_ro_eh_frame=no + fi + fi + rm -f conftest.* + ]) + if test "x$libffi_cv_ro_eh_frame" = xyes; then + AC_DEFINE(HAVE_RO_EH_FRAME, 1, + [Define if .eh_frame sections should be read-only.]) + AC_DEFINE(EH_FRAME_FLAGS, "a", + [Define to the flags needed for the .section .eh_frame directive. ]) + else + AC_DEFINE(EH_FRAME_FLAGS, "aw", + [Define to the flags needed for the .section .eh_frame directive. ]) + fi + + AC_CACHE_CHECK([for __attribute__((visibility("hidden")))], + libffi_cv_hidden_visibility_attribute, [ + echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c + libffi_cv_hidden_visibility_attribute=no + if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then + if egrep '(\.hidden|\.private_extern).*foo' conftest.s >/dev/null; then + libffi_cv_hidden_visibility_attribute=yes + fi + fi + rm -f conftest.* + ]) + if test $libffi_cv_hidden_visibility_attribute = yes; then + AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1, + [Define if __attribute__((visibility("hidden"))) is supported.]) + fi +fi + +AC_ARG_ENABLE(docs, + AC_HELP_STRING([--disable-docs], + [Disable building of docs (default: no)]), + [enable_docs=no], + [enable_docs=yes]) +AM_CONDITIONAL(BUILD_DOCS, [test x$enable_docs = xyes]) + +AH_BOTTOM([ +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif +]) + +AC_SUBST(TARGET) +AC_SUBST(TARGETDIR) + +changequote(<,>) +TARGET_OBJ= +for i in $SOURCES; do + TARGET_OBJ="${TARGET_OBJ} src/${TARGETDIR}/"`echo $i | sed 's/[cS]$/lo/'` +done +changequote([,]) +AC_SUBST(TARGET_OBJ) + +AC_SUBST(SHELL) + +AC_ARG_ENABLE(debug, +[ --enable-debug debugging mode], + if test "$enable_debug" = "yes"; then + AC_DEFINE(FFI_DEBUG, 1, [Define this if you want extra debugging.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(structs, +[ --disable-structs omit code for struct support], + if test "$enable_structs" = "no"; then + AC_DEFINE(FFI_NO_STRUCTS, 1, [Define this if you do not want support for aggregate types.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(raw-api, +[ --disable-raw-api make the raw api unavailable], + if test "$enable_raw_api" = "no"; then + AC_DEFINE(FFI_NO_RAW_API, 1, [Define this if you do not want support for the raw API.]) + fi) + +AC_ARG_ENABLE(purify-safety, +[ --enable-purify-safety purify-safe mode], + if test "$enable_purify_safety" = "yes"; then + AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.]) + fi) + +AC_ARG_ENABLE(multi-os-directory, +[ --disable-multi-os-directory + disable use of gcc --print-multi-os-directory to change the library installation directory]) + +# These variables are only ever used when we cross-build to X86_WIN32. +# And we only support this with GCC, so... +if test "x$GCC" = "xyes"; then + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + toolexecdir='${exec_prefix}'/'$(target_alias)' + toolexeclibdir='${toolexecdir}'/lib + else + toolexecdir='${libdir}'/gcc-lib/'$(target_alias)' + toolexeclibdir='${libdir}' + fi + if test x"$enable_multi_os_directory" != x"no"; then + multi_os_directory=`$CC $CFLAGS -print-multi-os-directory` + case $multi_os_directory in + .) ;; # Avoid trailing /. + ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;; + esac + fi + AC_SUBST(toolexecdir) +else + toolexeclibdir='${libdir}' +fi +AC_SUBST(toolexeclibdir) + +# Check linker support. +LIBFFI_ENABLE_SYMVERS + +AC_CONFIG_COMMANDS(include, [test -d include || mkdir include]) +AC_CONFIG_COMMANDS(src, [ +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR +], [TARGETDIR="$TARGETDIR"]) + +AC_CONFIG_FILES(include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc) + +AC_OUTPUT + +# Copy this file instead of using AC_CONFIG_LINK in order to support +# compiling with MSVC, which won't understand cygwin style symlinks. +cp ${srcdir}/src/$TARGETDIR/ffitarget.h include/ffitarget.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host new file mode 100644 index 0000000..055e955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/configure.host @@ -0,0 +1,306 @@ +# configure.host +# +# This shell script handles all host based configuration for libffi. +# + +# THIS TABLE IS SORTED. KEEP IT THAT WAY. +# Most of the time we can define all the variables all at once... +case "${host}" in + aarch64*-*-cygwin* | aarch64*-*-mingw* | aarch64*-*-win* ) + TARGET=ARM_WIN64; TARGETDIR=aarch64 + MSVC=1 + ;; + + aarch64*-*-*) + TARGET=AARCH64; TARGETDIR=aarch64 + SOURCES="ffi.c sysv.S" + ;; + + alpha*-*-*) + TARGET=ALPHA; TARGETDIR=alpha; + # Support 128-bit long double, changeable via command-line switch. + HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)' + SOURCES="ffi.c osf.S" + ;; + + arc*-*-*) + TARGET=ARC; TARGETDIR=arc + SOURCES="ffi.c arcompact.S" + ;; + + arm*-*-cygwin* | arm*-*-mingw* | arm*-*-win* ) + TARGET=ARM_WIN32; TARGETDIR=arm + MSVC=1 + ;; + + arm*-*-*) + TARGET=ARM; TARGETDIR=arm + SOURCES="ffi.c sysv.S" + ;; + + avr32*-*-*) + TARGET=AVR32; TARGETDIR=avr32 + SOURCES="ffi.c sysv.S" + ;; + + bfin*) + TARGET=BFIN; TARGETDIR=bfin + SOURCES="ffi.c sysv.S" + ;; + + cris-*-*) + TARGET=LIBFFI_CRIS; TARGETDIR=cris + SOURCES="ffi.c sysv.S" + ;; + + frv-*-*) + TARGET=FRV; TARGETDIR=frv + SOURCES="ffi.c eabi.S" + ;; + + hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*) + TARGET=PA_LINUX; TARGETDIR=pa + SOURCES="ffi.c linux.S" + ;; + hppa*64-*-hpux*) + TARGET=PA64_HPUX; TARGETDIR=pa + ;; + hppa*-*-hpux*) + TARGET=PA_HPUX; TARGETDIR=pa + SOURCES="ffi.c hpux32.S" + ;; + + i?86-*-freebsd* | i?86-*-openbsd*) + TARGET=X86_FREEBSD; TARGETDIR=x86 + ;; + + i?86-*-cygwin* | i?86-*-mingw* | i?86-*-win* | i?86-*-os2* | i?86-*-interix* \ + | x86_64-*-cygwin* | x86_64-*-mingw* | x86_64-*-win* ) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_WIN32 + else + TARGET=X86_WIN64 + fi + if test "${ax_cv_c_compiler_vendor}" = "microsoft"; then + MSVC=1 + fi + # All mingw/cygwin/win32 builds require -no-undefined for sharedlib. + # We must also check with_cross_host to decide if this is a native + # or cross-build and select where to install dlls appropriately. + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"'; + else + AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"'; + fi + ;; + + i?86-*-darwin* | x86_64-*-darwin* | i?86-*-ios | x86_64-*-ios) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_DARWIN + else + TARGET=X86_64 + fi + ;; + + i?86-*-* | x86_64-*-* | amd64-*) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + case "$host" in + x86_64-*x32|x86_64-x32-*) + TARGET_X32=yes + TARGET=X86_64 + ;; + *) + echo 'int foo (void) { return __x86_64__; }' > conftest.c + if $CC $CFLAGS -Werror -S conftest.c -o conftest.s > /dev/null 2>&1; then + TARGET_X32=yes + TARGET=X86_64 + else + TARGET=X86; + fi + rm -f conftest.* + ;; + esac + else + TARGET=X86_64; + fi + ;; + + ia64*-*-*) + TARGET=IA64; TARGETDIR=ia64 + SOURCES="ffi.c unix.S" + ;; + + m32r*-*-*) + TARGET=M32R; TARGETDIR=m32r + SOURCES="ffi.c sysv.S" + ;; + + m68k-*-*) + TARGET=M68K; TARGETDIR=m68k + SOURCES="ffi.c sysv.S" + ;; + + m88k-*-*) + TARGET=M88K; TARGETDIR=m88k + SOURCES="ffi.c obsd.S" + ;; + + microblaze*-*-*) + TARGET=MICROBLAZE; TARGETDIR=microblaze + SOURCES="ffi.c sysv.S" + ;; + + moxie-*-*) + TARGET=MOXIE; TARGETDIR=moxie + SOURCES="ffi.c eabi.S" + ;; + + metag-*-*) + TARGET=METAG; TARGETDIR=metag + SOURCES="ffi.c sysv.S" + ;; + + mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*) + TARGET=MIPS; TARGETDIR=mips + ;; + mips*-*linux* | mips*-*-openbsd*) + # Support 128-bit long double for NewABI. + HAVE_LONG_DOUBLE='defined(__mips64)' + TARGET=MIPS; TARGETDIR=mips + ;; + + nios2*-linux*) + TARGET=NIOS2; TARGETDIR=nios2 + SOURCES="ffi.c sysv.S" + ;; + + or1k*-*-*) + TARGET=OR1K; TARGETDIR=or1k + SOURCES="ffi.c sysv.S" + ;; + + powerpc*-*-linux* | powerpc-*-sysv*) + TARGET=POWERPC; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc-*-amigaos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-eabi*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-beos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-darwin* | powerpc64-*-darwin*) + TARGET=POWERPC_DARWIN; TARGETDIR=powerpc + ;; + powerpc-*-aix* | rs6000-*-aix*) + TARGET=POWERPC_AIX; TARGETDIR=powerpc + ;; + powerpc-*-freebsd* | powerpc-*-openbsd* | powerpc-*-netbsd*) + TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc64-*-freebsd*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc*-*-rtems*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + + riscv*-*) + TARGET=RISCV; TARGETDIR=riscv + SOURCES="ffi.c sysv.S" + ;; + + s390-*-* | s390x-*-*) + TARGET=S390; TARGETDIR=s390 + SOURCES="ffi.c sysv.S" + ;; + + sh-*-* | sh[34]*-*-*) + TARGET=SH; TARGETDIR=sh + SOURCES="ffi.c sysv.S" + ;; + sh64-*-* | sh5*-*-*) + TARGET=SH64; TARGETDIR=sh64 + SOURCES="ffi.c sysv.S" + ;; + + sparc*-*-*) + TARGET=SPARC; TARGETDIR=sparc + SOURCES="ffi.c ffi64.c v8.S v9.S" + ;; + + tile*-*) + TARGET=TILE; TARGETDIR=tile + SOURCES="ffi.c tile.S" + ;; + + vax-*-*) + TARGET=VAX; TARGETDIR=vax + SOURCES="ffi.c elfbsd.S" + ;; + + xtensa*-*) + TARGET=XTENSA; TARGETDIR=xtensa + SOURCES="ffi.c sysv.S" + ;; +esac + +# ... but some of the cases above share configury. +case "${TARGET}" in + ARM_WIN32) + SOURCES="ffi.c sysv_msvc_arm32.S" + ;; + ARM_WIN64) + SOURCES="ffi.c win64_armasm.S" + ;; + MIPS) + SOURCES="ffi.c o32.S n32.S" + ;; + POWERPC) + SOURCES="ffi.c ffi_sysv.c ffi_linux64.c sysv.S ppc_closure.S" + SOURCES="${SOURCES} linux64.S linux64_closure.S" + ;; + POWERPC_AIX) + SOURCES="ffi_darwin.c aix.S aix_closure.S" + ;; + POWERPC_DARWIN) + SOURCES="ffi_darwin.c darwin.S darwin_closure.S" + ;; + POWERPC_FREEBSD) + SOURCES="ffi.c ffi_sysv.c sysv.S ppc_closure.S" + ;; + X86 | X86_DARWIN | X86_FREEBSD | X86_WIN32) + if test "$MSVC" = 1; then + SOURCES="ffi.c sysv_intel.S" + else + SOURCES="ffi.c sysv.S" + fi + ;; + X86_64) + if test x"$TARGET_X32" = xyes; then + SOURCES="ffi64.c unix64.S" + else + SOURCES="ffi64.c unix64.S ffiw64.c win64.S" + fi + ;; + X86_WIN64) + if test "$MSVC" = 1; then + SOURCES="ffiw64.c win64_intel.S" + else + SOURCES="ffiw64.c win64.S" + fi + ;; +esac + +# If we failed to configure SOURCES, we can't do anything. +if test -z "${SOURCES}"; then + UNSUPPORTED=1 +fi diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py new file mode 100755 index 0000000..b8c28e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/generate-darwin-source-and-headers.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +import subprocess +import os +import errno +import collections +import glob +import argparse + +class Platform(object): + pass + +class simulator_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'i386' + triple = 'i386-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class simulator64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'x86_64' + triple = 'x86_64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +class device_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'armv7' + triple = 'arm-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm__\n\n" + suffix = "\n\n#endif" + src_dir = 'arm' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class device64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'arm64' + triple = 'aarch64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm64__\n\n" + suffix = "\n\n#endif" + src_dir = 'aarch64' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class desktop32_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'i386' + triple = 'i386-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + + +class desktop64_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'x86_64' + triple = 'x86_64-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno != errno.EEXIST: + raise + + +def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''): + mkdir_p(dst_dir) + out_filename = filename + + if file_suffix: + if filename in ['internal64.h', 'asmnames.h', 'internal.h']: + out_filename = filename + else: + split_name = os.path.splitext(filename) + out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1]) + + with open(os.path.join(src_dir, filename)) as in_file: + with open(os.path.join(dst_dir, out_filename), 'w') as out_file: + if prefix: + out_file.write(prefix) + + out_file.write(in_file.read()) + + if suffix: + out_file.write(suffix) + + +def list_files(src_dir, pattern=None, filelist=None): + if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern)) + for file in filelist: + yield os.path.basename(file) + + +def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None): + for filename in list_files(src_dir, pattern=pattern, filelist=filelist): + move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix) + + +def copy_src_platform_files(platform): + src_dir = os.path.join('src', platform.src_dir) + dst_dir = os.path.join(platform.directory, 'src', platform.src_dir) + copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix) + + +def build_target(platform, platform_headers): + def xcrun_cmd(cmd): + return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch) + + tag='%s-%s' % (platform.sdk, platform.arch) + build_dir = 'build_%s' % tag + mkdir_p(build_dir) + env = dict(CC=xcrun_cmd('clang'), + LD=xcrun_cmd('ld'), + CFLAGS='%s -fembed-bitcode' % (platform.version_min)) + working_dir = os.getcwd() + try: + os.chdir(build_dir) + subprocess.check_call(['../configure', '-host', platform.triple], env=env) + finally: + os.chdir(working_dir) + + for src_dir in [build_dir, os.path.join(build_dir, 'include')]: + copy_files(src_dir, + os.path.join(platform.directory, 'include'), + pattern='*.h', + file_suffix=platform.arch, + prefix=platform.prefix, + suffix=platform.suffix) + + for filename in list_files(src_dir, pattern='*.h'): + platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix)) + + +def generate_source_and_headers(generate_osx=True, generate_ios=True): + copy_files('src', 'darwin_common/src', pattern='*.c') + copy_files('include', 'darwin_common/include', pattern='*.h') + + if generate_ios: + copy_src_platform_files(simulator_platform) + copy_src_platform_files(simulator64_platform) + copy_src_platform_files(device_platform) + copy_src_platform_files(device64_platform) + if generate_osx: + copy_src_platform_files(desktop64_platform) + + platform_headers = collections.defaultdict(set) + + if generate_ios: + build_target(simulator_platform, platform_headers) + build_target(simulator64_platform, platform_headers) + build_target(device_platform, platform_headers) + build_target(device64_platform, platform_headers) + if generate_osx: + build_target(desktop64_platform, platform_headers) + + mkdir_p('darwin_common/include') + for header_name, tag_tuples in platform_headers.iteritems(): + basename, suffix = os.path.splitext(header_name) + with open(os.path.join('darwin_common/include', header_name), 'w') as header: + for tag_tuple in tag_tuples: + header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2])) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--only-ios', action='store_true', default=False) + parser.add_argument('--only-osx', action='store_true', default=False) + args = parser.parse_args() + + generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am new file mode 100644 index 0000000..c59df9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/Makefile.am @@ -0,0 +1,9 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +DISTCLEANFILES=ffitarget.h +noinst_HEADERS=ffi_common.h ffi_cfi.h +EXTRA_DIST=ffi.h.in + +nodist_include_HEADERS = ffi.h ffitarget.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in new file mode 100644 index 0000000..38885b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi.h.in @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi @VERSION@ - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef @TARGET@ +#define @TARGET@ +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if @HAVE_LONG_DOUBLE@ +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h new file mode 100644 index 0000000..244ce57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_cfi.h @@ -0,0 +1,55 @@ +/* ----------------------------------------------------------------------- + ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc. + + Conditionally assemble cfi directives. Only necessary for building libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_CFI_H +#define FFI_CFI_H + +#ifdef HAVE_AS_CFI_PSEUDO_OP + +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_escape(...) .cfi_escape __VA_ARGS__ + +#else + +# define cfi_startproc +# define cfi_endproc +# define cfi_def_cfa(reg, off) +# define cfi_def_cfa_register(reg) +# define cfi_def_cfa_offset(off) +# define cfi_adjust_cfa_offset(off) +# define cfi_offset(reg, off) +# define cfi_rel_offset(reg, off) +# define cfi_register(r1, r2) +# define cfi_return_column(reg) +# define cfi_restore(reg) +# define cfi_same_value(reg) +# define cfi_undefined(reg) +# define cfi_remember_state +# define cfi_restore_state +# define cfi_window_save +# define cfi_personality(enc, exp) +# define cfi_lsda(enc, exp) +# define cfi_escape(...) + +#endif /* HAVE_AS_CFI_PSEUDO_OP */ +#endif /* FFI_CFI_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h new file mode 100644 index 0000000..76b9dd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/include/ffi_common.h @@ -0,0 +1,153 @@ +/* ----------------------------------------------------------------------- + ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc + Copyright (c) 1996 Red Hat, Inc. + + Common internal definitions and macros. Only necessary for building + libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_COMMON_H +#define FFI_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Do not move this. Some versions of AIX are very picky about where + this is positioned. */ +#ifdef __GNUC__ +# if HAVE_ALLOCA_H +# include +# else + /* mingw64 defines this already in malloc.h. */ +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif +# define MAYBE_UNUSED __attribute__((__unused__)) +#else +# define MAYBE_UNUSED +# if HAVE_ALLOCA_H +# include +# else +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +# ifdef _MSC_VER +# define alloca _alloca +# else +char *alloca (); +# endif +# endif +# endif +# endif +#endif + +/* Check for the existence of memcpy. */ +#if STDC_HEADERS +# include +#else +# ifndef HAVE_MEMCPY +# define memcpy(d, s, n) bcopy ((s), (d), (n)) +# endif +#endif + +#if defined(FFI_DEBUG) +#include +#endif + +#ifdef FFI_DEBUG +void ffi_assert(char *expr, char *file, int line); +void ffi_stop_here(void); +void ffi_type_test(ffi_type *a, char *file, int line); + +#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__)) +#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l))) +#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__) +#else +#define FFI_ASSERT(x) +#define FFI_ASSERT_AT(x, f, l) +#define FFI_ASSERT_VALID_TYPE(x) +#endif + +/* v cast to size_t and aligned up to a multiple of a */ +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) +/* v cast to size_t and aligned down to a multiple of a */ +#define FFI_ALIGN_DOWN(v, a) (((size_t) (v)) & -a) + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif); +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs); + + +#if HAVE_LONG_DOUBLE_VARIANT +/* Used to adjust size/alignment of ffi types. */ +void ffi_prep_types (ffi_abi abi); +#endif + +/* Used internally, but overridden by some architectures */ +ffi_status ffi_prep_cif_core(ffi_cif *cif, + ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +/* Translate a data pointer to a code pointer. Needed for closures on + some targets. */ +void *ffi_data_to_code_pointer (void *data) FFI_HIDDEN; + +/* Extended cif, used in callback from assembly routine */ +typedef struct +{ + ffi_cif *cif; + void *rvalue; + void **avalue; +} extended_cif; + +/* Terse sized type definitions. */ +#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C) +typedef unsigned char UINT8; +typedef signed char SINT8; +typedef unsigned short UINT16; +typedef signed short SINT16; +typedef unsigned int UINT32; +typedef signed int SINT32; +# ifdef _MSC_VER +typedef unsigned __int64 UINT64; +typedef signed __int64 SINT64; +# else +# include +typedef uint64_t UINT64; +typedef int64_t SINT64; +# endif +#else +typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); +typedef signed int SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); +typedef signed int SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); +typedef signed int SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); +typedef signed int SINT64 __attribute__((__mode__(__DI__))); +#endif + +typedef float FLOAT32; + +#ifndef __GNUC__ +#define __builtin_expect(x, expected_value) (x) +#endif +#define LIKELY(x) __builtin_expect(!!(x),1) +#define UNLIKELY(x) __builtin_expect((x)!=0,0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in new file mode 100644 index 0000000..de8778a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.map.in @@ -0,0 +1,76 @@ +#define LIBFFI_ASM +#define LIBFFI_H +#include +#include + +/* These version numbers correspond to the libtool-version abi numbers, + not to the libffi release numbers. */ + +LIBFFI_BASE_8.0 { + global: + /* Exported data variables. */ + ffi_type_void; + ffi_type_uint8; + ffi_type_sint8; + ffi_type_uint16; + ffi_type_sint16; + ffi_type_uint32; + ffi_type_sint32; + ffi_type_uint64; + ffi_type_sint64; + ffi_type_float; + ffi_type_double; + ffi_type_longdouble; + ffi_type_pointer; + + /* Exported functions. */ + ffi_call; + ffi_prep_cif; + ffi_prep_cif_var; + + ffi_raw_call; + ffi_ptrarray_to_raw; + ffi_raw_to_ptrarray; + ffi_raw_size; + + ffi_java_raw_call; + ffi_java_ptrarray_to_raw; + ffi_java_raw_to_ptrarray; + ffi_java_raw_size; + + ffi_get_struct_offsets; + local: + *; +}; + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +LIBFFI_COMPLEX_8.0 { + global: + /* Exported data variables. */ + ffi_type_complex_float; + ffi_type_complex_double; + ffi_type_complex_longdouble; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_CLOSURES +LIBFFI_CLOSURE_8.0 { + global: + ffi_closure_alloc; + ffi_closure_free; + ffi_prep_closure; + ffi_prep_closure_loc; + ffi_prep_raw_closure; + ffi_prep_raw_closure_loc; + ffi_prep_java_raw_closure; + ffi_prep_java_raw_closure_loc; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_GO_CLOSURES +LIBFFI_GO_CLOSURE_8.0 { + global: + ffi_call_go; + ffi_prep_go_closure; +} LIBFFI_CLOSURE_8.0; +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in new file mode 100644 index 0000000..6fad83b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +toolexeclibdir=@toolexeclibdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Description: Library supporting Foreign Function Interfaces +Version: @PACKAGE_VERSION@ +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj new file mode 100644 index 0000000..480c4a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj @@ -0,0 +1,997 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + DBFA714A187F1D8600A76262 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713F187F1D8600A76262 /* ffi_common.h */; }; + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7140187F1D8600A76262 /* fficonfig.h */; }; + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + DBFA714E187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA714F187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA715A187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA715B187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */; }; + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715F187F1D9B00A76262 /* ffi_armv7.h */; }; + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */; }; + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F4F1F5D846400EF414E /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDDB2F511F5D846400EF414E /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + DB13B1641849DF1E0010F42D /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */, + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */, + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */, + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FC11F6144FA00AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */, + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FE11F6156E000AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */, + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + 43E9A5DA1D35373600926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DB1D35374400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DC1D35375400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DD1D35375400926A8F /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + DB13B1661849DF1E0010F42D /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + DB13B1911849DF510010F42D /* ffi.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = ffi.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + DBFA713E187F1D8600A76262 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = ""; }; + DBFA713F187F1D8600A76262 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = ""; }; + DBFA7140187F1D8600A76262 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = ""; }; + DBFA7141187F1D8600A76262 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = ""; }; + DBFA7143187F1D8600A76262 /* closures.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = closures.c; sourceTree = ""; }; + DBFA7145187F1D8600A76262 /* dlmalloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dlmalloc.c; sourceTree = ""; }; + DBFA7147187F1D8600A76262 /* prep_cif.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = prep_cif.c; sourceTree = ""; }; + DBFA7148187F1D8600A76262 /* raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = raw_api.c; sourceTree = ""; }; + DBFA7149187F1D8600A76262 /* types.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = types.c; sourceTree = ""; }; + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_arm64.h; sourceTree = ""; }; + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_armv7.h; sourceTree = ""; }; + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_arm64.h; sourceTree = ""; }; + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_armv7.h; sourceTree = ""; }; + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_arm64.h; sourceTree = ""; }; + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_armv7.h; sourceTree = ""; }; + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_arm64.c; sourceTree = ""; }; + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_arm64.S; sourceTree = ""; }; + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_armv7.c; sourceTree = ""; }; + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_armv7.S; sourceTree = ""; }; + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + FDB52FC51F6144FA00AA92E6 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asmnames.h; sourceTree = ""; }; + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + FDDB2F421F5D68C900EF414E /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + FDDB2F431F5D68C900EF414E /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + FDDB2F621F5D846400EF414E /* libffi.a */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + DB13B15B1849DEB70010F42D = { + isa = PBXGroup; + children = ( + DBFA713C187F1D8600A76262 /* darwin_common */, + DBFA715C187F1D9B00A76262 /* darwin_ios */, + DBFA7180187F1DA100A76262 /* darwin_osx */, + DB13B1671849DF1E0010F42D /* Products */, + ); + sourceTree = ""; + }; + DB13B1671849DF1E0010F42D /* Products */ = { + isa = PBXGroup; + children = ( + DB13B1661849DF1E0010F42D /* libffi.a */, + DB13B1911849DF510010F42D /* ffi.dylib */, + FDDB2F621F5D846400EF414E /* libffi.a */, + FDB52FC51F6144FA00AA92E6 /* libffi.a */, + ); + name = Products; + sourceTree = ""; + }; + DBFA713C187F1D8600A76262 /* darwin_common */ = { + isa = PBXGroup; + children = ( + DBFA713D187F1D8600A76262 /* include */, + DBFA7142187F1D8600A76262 /* src */, + ); + path = darwin_common; + sourceTree = ""; + }; + DBFA713D187F1D8600A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA713E187F1D8600A76262 /* ffi.h */, + DBFA713F187F1D8600A76262 /* ffi_common.h */, + DBFA7140187F1D8600A76262 /* fficonfig.h */, + DBFA7141187F1D8600A76262 /* ffitarget.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7142187F1D8600A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7143187F1D8600A76262 /* closures.c */, + DBFA7145187F1D8600A76262 /* dlmalloc.c */, + DBFA7147187F1D8600A76262 /* prep_cif.c */, + DBFA7148187F1D8600A76262 /* raw_api.c */, + DBFA7149187F1D8600A76262 /* types.c */, + ); + path = src; + sourceTree = ""; + }; + DBFA715C187F1D9B00A76262 /* darwin_ios */ = { + isa = PBXGroup; + children = ( + DBFA715D187F1D9B00A76262 /* include */, + DBFA716A187F1D9B00A76262 /* src */, + ); + path = darwin_ios; + sourceTree = ""; + }; + DBFA715D187F1D9B00A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */, + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */, + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */, + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */, + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */, + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */, + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */, + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */, + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA716A187F1D9B00A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA716B187F1D9B00A76262 /* aarch64 */, + DBFA716E187F1D9B00A76262 /* arm */, + DBFA7172187F1D9B00A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA716B187F1D9B00A76262 /* aarch64 */ = { + isa = PBXGroup; + children = ( + 43E9A5DA1D35373600926A8F /* internal.h */, + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */, + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */, + ); + path = aarch64; + sourceTree = ""; + }; + DBFA716E187F1D9B00A76262 /* arm */ = { + isa = PBXGroup; + children = ( + 43E9A5DB1D35374400926A8F /* internal.h */, + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */, + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */, + ); + path = arm; + sourceTree = ""; + }; + DBFA7172187F1D9B00A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + 43E9A5DC1D35375400926A8F /* internal.h */, + 43E9A5DD1D35375400926A8F /* internal64.h */, + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */, + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */, + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */, + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; + DBFA7180187F1DA100A76262 /* darwin_osx */ = { + isa = PBXGroup; + children = ( + DBFA7181187F1DA100A76262 /* include */, + DBFA7188187F1DA100A76262 /* src */, + ); + path = darwin_osx; + sourceTree = ""; + }; + DBFA7181187F1DA100A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */, + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */, + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7188187F1DA100A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7189187F1DA100A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA7189187F1DA100A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + FDDB2F431F5D68C900EF414E /* internal.h */, + FDDB2F421F5D68C900EF414E /* internal64.h */, + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */, + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */, + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */, + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */, + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + DB13B18F1849DF510010F42D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */, + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */, + DBFA714A187F1D8600A76262 /* ffi.h in Headers */, + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */, + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */, + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */, + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + DB13B1651849DF1E0010F42D /* libffi-iOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */; + buildPhases = ( + 43B5D3FB1D35480D00D1E1FD /* Run Script */, + DB13B1621849DF1E0010F42D /* Sources */, + DB13B1641849DF1E0010F42D /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-iOS"; + productName = ffi; + productReference = DB13B1661849DF1E0010F42D /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + DB13B1901849DF510010F42D /* libffi-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */; + buildPhases = ( + DB13B3061849E0490010F42D /* ShellScript */, + DB13B18D1849DF510010F42D /* Sources */, + DB13B18F1849DF510010F42D /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-Mac"; + productName = ffi; + productReference = DB13B1911849DF510010F42D /* ffi.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */; + buildPhases = ( + FDB52FB11F6144FA00AA92E6 /* Run Script */, + FDB52FB21F6144FA00AA92E6 /* Sources */, + FDB52FC11F6144FA00AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-tvOS"; + productName = ffi; + productReference = FDB52FC51F6144FA00AA92E6 /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + FDDB2F471F5D846400EF414E /* libffi-static-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */; + buildPhases = ( + FDDB2F481F5D846400EF414E /* ShellScript */, + FDDB2F491F5D846400EF414E /* Sources */, + FDB52FE11F6156E000AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-static-Mac"; + productName = ffi; + productReference = FDDB2F621F5D846400EF414E /* libffi.a */; + productType = "com.apple.product-type.library.dynamic"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + DB13B15C1849DEB70010F42D /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0830; + }; + buildConfigurationList = DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = DB13B15B1849DEB70010F42D; + productRefGroup = DB13B1671849DF1E0010F42D /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + DB13B1651849DF1E0010F42D /* libffi-iOS */, + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */, + DB13B1901849DF510010F42D /* libffi-Mac */, + FDDB2F471F5D846400EF414E /* libffi-static-Mac */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 43B5D3FB1D35480D00D1E1FD /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + DB13B3061849E0490010F42D /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; + FDB52FB11F6144FA00AA92E6 /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + FDDB2F481F5D846400EF414E /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + DB13B1621849DF1E0010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */, + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */, + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */, + DBFA714E187F1D8600A76262 /* closures.c in Sources */, + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */, + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */, + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */, + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */, + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */, + DBFA715A187F1D8600A76262 /* types.c in Sources */, + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */, + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DB13B18D1849DF510010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */, + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */, + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */, + DBFA715B187F1D8600A76262 /* types.c in Sources */, + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */, + DBFA714F187F1D8600A76262 /* closures.c in Sources */, + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */, + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FB21F6144FA00AA92E6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */, + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */, + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */, + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */, + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */, + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */, + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */, + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */, + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */, + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */, + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */, + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDDB2F491F5D846400EF414E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */, + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */, + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */, + FDDB2F4F1F5D846400EF414E /* types.c in Sources */, + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */, + FDDB2F511F5D846400EF414E /* closures.c in Sources */, + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */, + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + DB13B1601849DEB70010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + ONLY_ACTIVE_ARCH = YES; + }; + name = Debug; + }; + DB13B1611849DEB70010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + }; + name = Release; + }; + DB13B1871849DF1E0010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DSTROOT = /tmp/ffi.dst; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Debug; + }; + DB13B1881849DF1E0010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + DSTROOT = /tmp/ffi.dst; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALIDATE_PRODUCT = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Release; + }; + DB13B1B11849DF520010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + DB13B1B21849DF520010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; + FDB52FC31F6144FA00AA92E6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + }; + name = Debug; + }; + FDB52FC41F6144FA00AA92E6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + FDDB2F601F5D846400EF414E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + FDDB2F611F5D846400EF414E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1601849DEB70010F42D /* Debug */, + DB13B1611849DEB70010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1871849DF1E0010F42D /* Debug */, + DB13B1881849DF1E0010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1B11849DF520010F42D /* Debug */, + DB13B1B21849DF520010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDB52FC31F6144FA00AA92E6 /* Debug */, + FDB52FC41F6144FA00AA92E6 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDDB2F601F5D846400EF414E /* Debug */, + FDDB2F611F5D846400EF414E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = DB13B15C1849DEB70010F42D /* Project object */; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version new file mode 100644 index 0000000..607fee5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/libtool-version @@ -0,0 +1,29 @@ +# This file is used to maintain libtool version info for libffi. See +# the libtool manual to understand the meaning of the fields. This is +# a separate file so that version updates don't involve re-running +# automake. +# +# Here are a set of rules to help you update your library version +# information: +# +# 1. Start with version information of `0:0:0' for each libtool library. +# +# 2. Update the version information only immediately before a public +# release of your software. More frequent updates are unnecessary, +# and only guarantee that the current interface number gets larger +# faster. +# +# 3. If the library source code has changed at all since the last +# update, then increment revision (`c:r:a' becomes `c:r+1:a'). +# +# 4. If any interfaces have been added, removed, or changed since the +# last update, increment current, and set revision to 0. +# +# 5. If any interfaces have been added since the last public release, +# then increment age. +# +# 6. If any interfaces have been removed since the last public +# release, then set age to 0. +# +# CURRENT:REVISION:AGE +9:0:1 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 new file mode 100644 index 0000000..3e28602 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/asmcfi.m4 @@ -0,0 +1,13 @@ +AC_DEFUN([GCC_AS_CFI_PSEUDO_OP], +[AC_CACHE_CHECK([assembler .cfi pseudo-op support], + gcc_cv_as_cfi_pseudo_op, [ + gcc_cv_as_cfi_pseudo_op=unknown + AC_TRY_COMPILE([asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc");],, + [gcc_cv_as_cfi_pseudo_op=yes], + [gcc_cv_as_cfi_pseudo_op=no]) + ]) + if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then + AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1, + [Define if your assembler supports .cfi_* directives.]) + fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 new file mode 100644 index 0000000..dd6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_append_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 new file mode 100644 index 0000000..9e7f1ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 @@ -0,0 +1,194 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cc_maxopt.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CC_MAXOPT +# +# DESCRIPTION +# +# Try to turn on "good" C optimization flags for various compilers and +# architectures, for some definition of "good". (In our case, good for +# FFTW and hopefully for other scientific codes. Modify as needed.) +# +# The user can override the flags by setting the CFLAGS environment +# variable. The user can also specify --enable-portable-binary in order to +# disable any optimization flags that might result in a binary that only +# runs on the host architecture. +# +# Note also that the flags assume that ANSI C aliasing rules are followed +# by the code (e.g. for gcc's -fstrict-aliasing), and that floating-point +# computations can be re-ordered as needed. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_COMPILER_VENDOR, +# AX_GCC_ARCHFLAG, AX_GCC_X86_CPUID. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_CC_MAXOPT], +[ +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AX_COMPILER_VENDOR]) +AC_REQUIRE([AC_CANONICAL_HOST]) + +AC_ARG_ENABLE(portable-binary, [AS_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])], + acx_maxopt_portable=$enableval, acx_maxopt_portable=no) + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + AX_CHECK_COMPILE_FLAG($xlc_opt, + CFLAGS="-O3 -qansialias -w $xlc_opt", + [CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************"]) + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) icc_flags="-xK" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) icc_flags="-xSSE2 -xB -xK" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) icc_flags="-xSSE3 -xP -xO -xB -xK" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) icc_flags="-xSSSE3 -xT -xB -xK" ;; + *1?6[[7d]]?:*:*:*) icc_flags="-xSSE4.1 -xS -xT -xB -xK" ;; + *1?6[[aef]]?:*:*:*|*2?6[[5cef]]?:*:*:*) icc_flags="-xSSE4.2 -xS -xT -xB -xK" ;; + *2?6[[ad]]?:*:*:*) icc_flags="-xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[ae]]?:*:*:*) icc_flags="-xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) icc_flags="-xCORE-AVX2 -xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) icc_flags="-xSSE3 -xP -xO -xN -xW -xK" ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) icc_flags="-xSSE2 -xN -xW -xK" ;; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + AX_CHECK_COMPILE_FLAG($flag, [icc_archflag=$flag; break]) + done + fi + AC_MSG_CHECKING([for icc architecture flag]) + AC_MSG_RESULT($icc_archflag) + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + # libffi local change -- don't align double, as it changes the ABI + # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + AX_CHECK_COMPILE_FLAG(-fstrict-aliasing, + CFLAGS="$CFLAGS -fstrict-aliasing") + + # note that we enable "unsafe" fp optimization with other compilers, too + AX_CHECK_COMPILE_FLAG(-ffast-math, CFLAGS="$CFLAGS -ffast-math") + + AX_GCC_ARCHFLAG($acx_maxopt_portable) + ;; + + microsoft) + # default optimization flags for MSVC opt builds + CFLAGS="-O2" + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + AX_CHECK_COMPILE_FLAG($CFLAGS, [], [ + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + ]) + +fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 new file mode 100644 index 0000000..094577e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 @@ -0,0 +1,122 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# +# DESCRIPTION +# +# Try to find a compiler option that enables most reasonable warnings. +# +# For the GNU compiler it will be -Wall (and -ansi -pedantic) The result +# is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default. +# +# Currently this macro knows about the GCC, Solaris, Digital Unix, AIX, +# HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and +# Intel compilers. For a given compiler, the Fortran flags are much more +# experimental than their C equivalents. +# +# - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS +# - $2 add-value-if-not-found : nothing +# - $3 action-if-found : add value to shellvariable +# - $4 action-if-not-found : nothing +# +# NOTE: These macros depend on AX_APPEND_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2010 Rhys Ulerich +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 16 + +AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl +AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings], +VAR,[VAR="no, unknown" +ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-warn all % -warn all" dnl Intel + "-pedantic % -Wall" dnl GCC + "-xstrconst % -v" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix + "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + "-ansi -ansiE % -fullwarn" dnl IRIX + "+ESlit % +w1" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done +FLAGS="$ac_save_[]FLAGS" +]) +AS_VAR_POPDEF([FLAGS])dnl +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;; + *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +])dnl AX_FLAGS_WARN_ALL +dnl implementation tactics: +dnl the for-argument contains a list of options. The first part of +dnl these does only exist to detect the compiler - usually it is +dnl a global option to enable -ansi or -extrawarnings. All other +dnl compilers will fail about it. That was needed since a lot of +dnl compilers will give false positives for some option-syntax +dnl like -Woption or -Xoption as they think of it is a pass-through +dnl to later compile stages or something. The "%" is used as a +dnl delimiter. A non-option comment can be given after "%%" marks +dnl which will be shown but not added to the respective C/CXXFLAGS. + +AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C]) +]) + +AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C++]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C++]) +]) + +AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([Fortran]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([Fortran]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 new file mode 100644 index 0000000..bd753b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 new file mode 100644 index 0000000..73efdb0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 @@ -0,0 +1,88 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun, +# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft, +# watcom, etc. The vendor is returned in the cache variable +# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_COMPILER_VENDOR], +[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + dnl Please add if possible support to ax_compiler_version.m4 + [# note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC, __SDCC + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + tcc: __TINYC__ + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ + #if !($vencpp) + thisisanerror; + #endif + ])], [break]) + done + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 new file mode 100644 index 0000000..9237efe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_configure_args.m4 @@ -0,0 +1,49 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_configure_args.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CONFIGURE_ARGS +# +# DESCRIPTION +# +# Helper macro for AX_ENABLE_BUILDDIR. +# +# The traditional way of starting a subdir-configure is running the script +# with ${1+"$@"} but since autoconf 2.60 this is broken. Instead we have +# to rely on eval'ing $ac_configure_args however some old autoconf +# versions do not provide that. To ensure maximum portability of autoconf +# extension macros this helper can be AC_REQUIRE'd so that +# $ac_configure_args will always be present. +# +# Sadly, the traditional "exec $SHELL" of the enable_builddir macros is +# spoiled now and must be replaced by "eval + exit $?". +# +# Example: +# +# AC_DEFUN([AX_ENABLE_SUBDIR],[dnl +# AC_REQUIRE([AX_CONFIGURE_ARGS])dnl +# eval $SHELL $ac_configure_args || exit $? +# ...]) +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 14 + +AC_DEFUN([AX_CONFIGURE_ARGS],[ + # [$]@ is unusable in 2.60+ but earlier autoconf had no ac_configure_args + if test "${ac_configure_args+set}" != "set" ; then + ac_configure_args= + for ac_arg in ${1+"[$]@"}; do + ac_configure_args="$ac_configure_args '$ac_arg'" + done + fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 new file mode 100644 index 0000000..710384d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 @@ -0,0 +1,302 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_enable_builddir.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_ENABLE_BUILDDIR [(dirstring-or-command [,Makefile.mk [,-all]])] +# +# DESCRIPTION +# +# If the current configure was run within the srcdir then we move all +# configure-files into a subdir and let the configure steps continue +# there. We provide an option --disable-builddir to suppress the move into +# a separate builddir. +# +# Defaults: +# +# $1 = $host (overridden with $HOST) +# $2 = Makefile.mk +# $3 = -all +# +# This macro must be called before AM_INIT_AUTOMAKE. It creates a default +# toplevel srcdir Makefile from the information found in the created +# toplevel builddir Makefile. It just copies the variables and +# rule-targets, each extended with a default rule-execution that recurses +# into the build directory of the current "HOST". You can override the +# auto-detection through `config.guess` and build-time of course, as in +# +# make HOST=i386-mingw-cross +# +# which can of course set at configure time as well using +# +# configure --host=i386-mingw-cross +# +# After the default has been created, additional rules can be appended +# that will not just recurse into the subdirectories and only ever exist +# in the srcdir toplevel makefile - these parts are read from the $2 = +# Makefile.mk file +# +# The automatic rules are usually scanning the toplevel Makefile for lines +# like '#### $host |$builddir' to recognize the place where to recurse +# into. Usually, the last one is the only one used. However, almost all +# targets have an additional "*-all" rule which makes the script to +# recurse into _all_ variants of the current HOST (!!) setting. The "-all" +# suffix can be overridden for the macro as well. +# +# a special rule is only given for things like "dist" that will copy the +# tarball from the builddir to the sourcedir (or $(PUB)) for reason of +# convenience. +# +# LICENSE +# +# Copyright (c) 2009 Guido U. Draheim +# Copyright (c) 2009 Alan Jenkins +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 30 + +AC_DEFUN([AX_ENABLE_BUILDDIR],[ +AC_REQUIRE([AC_CANONICAL_HOST])[]dnl +AC_REQUIRE([AC_CANONICAL_TARGET])[]dnl +AC_REQUIRE([AX_CONFIGURE_ARGS])[]dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])[]dnl +AC_BEFORE([$0],[AM_INIT_AUTOMAKE])dnl +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +SUB="." +AC_ARG_ENABLE([builddir], AS_HELP_STRING( + [--disable-builddir],[disable automatic build in subdir of sources]) + ,[SUB="$enableval"], [SUB="auto"]) +if test ".$ac_srcdir_defaulted" != ".no" ; then +if test ".$srcdir" = ".." ; then + if test -f config.status ; then + AC_MSG_NOTICE(toplevel srcdir already configured... skipping subdir build) + else + test ".$SUB" = "." && SUB="." + test ".$SUB" = ".no" && SUB="." + test ".$TARGET" = "." && TARGET="$target" + test ".$SUB" = ".auto" && SUB="m4_ifval([$1], [$1],[$TARGET])" + if test ".$SUB" != ".." ; then # we know where to go and + AS_MKDIR_P([$SUB]) + echo __.$SUB.__ > $SUB/conftest.tmp + cd $SUB + if grep __.$SUB.__ conftest.tmp >/dev/null 2>/dev/null ; then + rm conftest.tmp + AC_MSG_RESULT([continue configure in default builddir "./$SUB"]) + else + AC_MSG_ERROR([could not change to default builddir "./$SUB"]) + fi + srcdir=`echo "$SUB" | + sed -e 's,^\./,,;s,[[^/]]$,&/,;s,[[^/]]*/,../,g;s,[[/]]$,,;'` + # going to restart from subdirectory location + test -f $srcdir/config.log && mv $srcdir/config.log . + test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h . + test -f $srcdir/conftest.log && mv $srcdir/conftest.log . + test -f $srcdir/$cache_file && mv $srcdir/$cache_file . + AC_MSG_RESULT(....exec $SHELL $srcdir/[$]0 "--srcdir=$srcdir" "--enable-builddir=$SUB" ${1+"[$]@"}) + case "[$]0" in # restart + [[\\/]]* | ?:[[\\/]]*) # Absolute name + eval $SHELL "'[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + *) eval $SHELL "'$srcdir/[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + esac ; exit $? + fi + fi +fi fi +test ".$SUB" = ".auto" && SUB="." +dnl ac_path_prog uses "set dummy" to override $@ which would defeat the "exec" +AC_PATH_PROG(SED,gsed sed, sed) +AUX="$am_aux_dir" +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SUB])dnl +AC_CONFIG_COMMANDS([buildir],[dnl .............. config.status .............. +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([TOP],[top_srcdir])dnl +AS_VAR_PUSHDEF([SRC],[ac_top_srcdir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +pushdef([END],[Makefile.mk])dnl +pushdef([_ALL],[ifelse([$3],,[-all],[$3])])dnl + SRC="$ax_enable_builddir_srcdir" + if test ".$SUB" = ".." ; then + if test -f "$TOP/Makefile" ; then + AC_MSG_NOTICE([skipping TOP/Makefile - left untouched]) + else + AC_MSG_NOTICE([skipping TOP/Makefile - not created]) + fi + else + if test -f "$SRC/Makefile" ; then + a=`grep "^VERSION " "$SRC/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$SRC/Makefile" + fi + if test -f "$SRC/Makefile" ; then + echo "$SRC/Makefile : $SRC/Makefile.in" > $tmp/conftemp.mk + echo " []@ echo 'REMOVED,,,' >\$[]@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$SRC/Makefile" >/dev/null + then rm $SRC/Makefile ; fi + cp $tmp/conftemp.mk $SRC/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$SRC/Makefile" ; then + AC_MSG_NOTICE([create TOP/Makefile guessed from local Makefile]) + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[[ ]]*[[\\#]]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[[:=]]/!d +/^\\./d +dnl Now handle rules (i.e. lines containing ":" but not " = "). +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/ \\1 \\1[]_ALL\\2/g +s/^\\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/\\1 \\1[]_ALL\\2/ +s/ / /g +/^all all[]_ALL[[ :]]/i\\ +all-configured : all[]_ALL +dnl dist-all exists... and would make for dist-all-all +s/ [[a-zA-Z0-9-]]*[]_ALL [[a-zA-Z0-9-]]*[]_ALL[]_ALL//g +/[]_ALL[]_ALL/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +dnl special rule add-on: "dist" copies the tarball to $(PUB). (source tree) +/dist[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "dist-foo" copies all the archives to $(PUB). (source tree) +/dist-[[a-zA-Z0-9]]*[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "distclean" removes all local builddirs completely +/distclean[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$SRC/makefile.sed~" ## DEBUGGING + $SED -f $tmp/conftemp.sed Makefile >$SRC/Makefile + if test -f "$SRC/m4_ifval([$2],[$2],[END])" ; then + AC_MSG_NOTICE([extend TOP/Makefile with TOP/m4_ifval([$2],[$2],[END])]) + cat $SRC/END >>$SRC/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$SRC/Makefile + # sanity check + if grep '^; echo "MAKE ' $SRC/Makefile >/dev/null ; then + AC_MSG_NOTICE([buggy sed found - it deletes tab in "a" text parts]) + $SED -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $SRC/Makefile \ + >$SRC/Makefile~ + (test -s $SRC/Makefile~ && mv $SRC/Makefile~ $SRC/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [[^|]]* | *$SUB *\$!$xxxx ...... $SUB!" >$tmp/conftemp.sed + $SED -f "$tmp/conftemp.sed" "$SRC/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$SRC/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$SRC/makefiles.out~" ## DEBUGGING + if cmp -s "$SRC/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + AC_MSG_NOTICE([keeping TOP/Makefile from earlier configure]) + rm "$tmp/mkfile.tmp" + else + AC_MSG_NOTICE([reusing TOP/Makefile from earlier configure]) + mv "$tmp/mkfile.tmp" "$SRC/Makefile" + fi + fi + AC_MSG_NOTICE([build in $SUB (HOST=$ax_enable_builddir_host)]) + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$SUB" >>$SRC/Makefile + fi +popdef([END])dnl +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SRC])dnl +AS_VAR_POPDEF([TOP])dnl +AS_VAR_POPDEF([SUB])dnl +],[dnl +ax_enable_builddir_srcdir="$srcdir" # $srcdir +ax_enable_builddir_host="$HOST" # $HOST / $host +ax_enable_builddir_version="$VERSION" # $VERSION +ax_enable_builddir_package="$PACKAGE" # $PACKAGE +ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX +ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED +ax_enable_builddir="$ax_enable_builddir" # $SUB +])dnl +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 new file mode 100644 index 0000000..c52b9b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 @@ -0,0 +1,267 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE]) +# +# DESCRIPTION +# +# This macro tries to guess the "native" arch corresponding to the target +# architecture for use with gcc's -march=arch or -mtune=arch flags. If +# found, the cache variable $ax_cv_gcc_archflag is set to this flag and +# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to +# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is +# to add $ax_cv_gcc_archflag to the end of $CFLAGS. +# +# PORTABLE? should be either [yes] (default) or [no]. In the former case, +# the flag is set to -mtune (or equivalent) so that the architecture is +# only used for tuning, but the instruction set used is still portable. In +# the latter case, the flag is set to -march (or equivalent) so that +# architecture-specific instructions are enabled. +# +# The user can specify --with-gcc-arch= in order to override the +# macro's choice of architecture, or --without-gcc-arch to disable this. +# +# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is +# called unless the user specified --with-gcc-arch manually. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID +# +# (The main emphasis here is on recent CPUs, on the principle that doing +# high-performance computing on old hardware is uncommon.) +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2014 Tsukasa Oi +# Copyright (c) 2017-2018 Alexey Kopytov +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 22 + +AC_DEFUN([AX_GCC_ARCHFLAG], +[AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_SED]) +AC_REQUIRE([AX_COMPILER_VENDOR]) + +AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=], [use architecture for gcc -march/-mtune, instead of guessing])], + ax_gcc_arch=$withval, ax_gcc_arch=yes) + +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT([]) +AC_CACHE_VAL(ax_cv_gcc_archflag, +[ +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[[3456]]86*|x86_64*|amd64*) # use cpuid codes + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[[4578]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5[[123]]?:*:*:*) ax_gcc_arch=pentium ;; + *0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;; + *0?6[[356]]?:*:*:*|?6[[356]]?:*:*:*|6[[356]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[aef]]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[5cf]]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[ad]]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;; + *3?67?:*:*:*|*[[45]]?6[[ad]]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;; + *000?f[[012]]?:*:*:*|?f[[012]]?:*:*:*|f[[012]]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;; + # fallback + *5??:*:*:*) ax_gcc_arch=pentium ;; + *??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + esac ;; + *:68747541:444d4163:69746e65) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;; + *5[[8]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[[9d]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *6[[678a]]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *000?f[[4578bcef]]?:*:*:*|?f[[4578bcef]]?:*:*:*|f[[4578bcef]]?:*:*:*|*001?f[[4578bcf]]?:*:*:*|1?f[[4578bcf]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *002?f[[13457bcf]]?:*:*:*|2?f[[13457bcf]]?:*:*:*|*004?f[[138bcf]]?:*:*:*|4?f[[138bcf]]?:*:*:*|*005?f[[df]]?:*:*:*|5?f[[df]]?:*:*:*|*006?f[[8bcf]]?:*:*:*|6?f[[8bcf]]?:*:*:*|*007?f[[cf]]?:*:*:*|7?f[[cf]]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *010?f[[245689a]]?:*:*:*|10?f[[245689a]]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *050?f[[12]]?:*:*:*|50?f[[12]]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *060?f2?:*:*:*|60?f2?:*:*:*|*061?f[[03]]?:*:*:*|61?f[[03]]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *07[[03]]?f0?:*:*:*|7[[03]]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + # fallback + *0[[13]]??f??:*:*:*|[[13]]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + *???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;; + esac ;; + *:746e6543:736c7561:48727561) # IDT / VIA (Centaur) + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *5[[89]]?:*:*:*) ax_gcc_arch=winchip2 ;; + *66?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;; + *6[[9adf]]?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/]) + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *POWER7*) ax_gcc_arch="power7";; + *POWER8*) ax_gcc_arch="power8";; + *POWER9*) ax_gcc_arch="power9";; + *POWER10*) ax_gcc_arch="power10";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; + aarch64) + cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + case $cpuimpl in + 0x42) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + 0x43) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx armv8-a native" ;; + 0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + esac + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code + flag_prefixes="-mtune=" + if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then flag_prefixes="-march="; fi + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac +else + flag_prefixes="-march= -mcpu= -m" +fi +for flag_prefix in $flag_prefixes; do + for arch in $ax_gcc_arch; do + flag="$flag_prefix$arch" + AX_CHECK_COMPILE_FLAG($flag, [if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then + if test "x[]m4_default([$1],yes)" = xyes; then + if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi + fi + fi; ax_cv_gcc_archflag=$flag; break]) + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes +]) +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT($ax_cv_gcc_archflag) +if test "x$ax_cv_gcc_archflag" = xunknown; then + m4_default([$3],:) +else + m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"]) +fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 new file mode 100644 index 0000000..df95465 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 @@ -0,0 +1,89 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_X86_CPUID(OP) +# AX_GCC_X86_CPUID_COUNT(OP, COUNT) +# +# DESCRIPTION +# +# On Pentium and later x86 processors, with gcc or a compiler that has a +# compatible syntax for inline assembly instructions, run a small program +# that executes the cpuid instruction with input OP. This can be used to +# detect the CPU type. AX_GCC_X86_CPUID_COUNT takes an additional COUNT +# parameter that gets passed into register ECX before calling cpuid. +# +# On output, the values of the eax, ebx, ecx, and edx registers are stored +# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable +# ax_cv_gcc_x86_cpuid_OP. +# +# If the cpuid instruction fails (because you are running a +# cross-compiler, or because you are not using gcc, or because you are on +# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP +# is set to the string "unknown". +# +# This macro mainly exists to be used in AX_GCC_ARCHFLAG. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2015 Michael Petch +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 10 + +AC_DEFUN([AX_GCC_X86_CPUID], +[AX_GCC_X86_CPUID_COUNT($1, 0) +]) + +AC_DEFUN([AX_GCC_X86_CPUID_COUNT], +[AC_REQUIRE([AC_PROG_CC]) +AC_LANG_PUSH([C]) +AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1, + [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include ], [ + int op = $1, level = $2, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; +])], + [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown])]) +AC_LANG_POP([C]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 new file mode 100644 index 0000000..17c3eab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/m4/ax_require_defined.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl new file mode 100644 index 0000000..8a90b1f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/make_sunver.pl @@ -0,0 +1,333 @@ +#!/usr/bin/perl -w + +# make_sunver.pl +# +# This script takes at least two arguments, a GNU style version script and +# a list of object and archive files, and generates a corresponding Sun +# style version script as follows: +# +# Each glob pattern, C++ mangled pattern or literal in the input script is +# matched against all global symbols in the input objects, emitting those +# that matched (or nothing if no match was found). +# A comment with the original pattern and its type is left in the output +# file to make it easy to understand the matches. +# +# It uses elfdump when present (native), GNU readelf otherwise. +# It depends on the GNU version of c++filt, since it must understand the +# GNU mangling style. + +use FileHandle; +use IPC::Open2; + +# Enforce C locale. +$ENV{'LC_ALL'} = "C"; +$ENV{'LANG'} = "C"; + +# Input version script, GNU style. +my $symvers = shift; + +########## +# Get all the symbols from the library, match them, and add them to a hash. + +my %sym_hash = (); + +# List of objects and archives to process. +my @OBJECTS = (); + +# List of shared objects to omit from processing. +my @SHAREDOBJS = (); + +# Filter out those input archives that have corresponding shared objects to +# avoid adding all symbols matched in the archive to the output map. +foreach $file (@ARGV) { + if (($so = $file) =~ s/\.a$/.so/ && -e $so) { + printf STDERR "omitted $file -> $so\n"; + push (@SHAREDOBJS, $so); + } else { + push (@OBJECTS, $file); + } +} + +# We need to detect and ignore hidden symbols. Solaris nm can only detect +# this in the harder to parse default output format, and GNU nm not at all, +# so use elfdump -s in the native case and GNU readelf -s otherwise. +# GNU objdump -t cannot be used since it produces a variable number of +# columns. + +# The path to elfdump. +my $elfdump = "/usr/ccs/bin/elfdump"; + +if (-f $elfdump) { + open ELFDUMP,$elfdump.' -s '.(join ' ',@OBJECTS).'|' or die $!; + my $skip_arsym = 0; + + while () { + chomp; + + # Ignore empty lines. + if (/^$/) { + # End of archive symbol table, stop skipping. + $skip_arsym = 0 if $skip_arsym; + next; + } + + # Keep skipping until end of archive symbol table. + next if ($skip_arsym); + + # Ignore object name header for individual objects and archives. + next if (/:$/); + + # Ignore table header lines. + next if (/^Symbol Table Section:/); + next if (/index.*value.*size/); + + # Start of archive symbol table: start skipping. + if (/^Symbol Table: \(archive/) { + $skip_arsym = 1; + next; + } + + # Split table. + (undef, undef, undef, undef, $bind, $oth, undef, $shndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCL"); + # Ignore hidden symbols. + next if ($oth eq "H"); + # Ignore undefined symbols. + next if ($shndx eq "UNDEF"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOB|WEAK)/ or $oth ne "D") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close ELFDUMP or die "$elfdump error"; +} else { + open READELF, 'readelf -s -W '.(join ' ',@OBJECTS).'|' or die $!; + # Process each symbol. + while () { + chomp; + + # Ignore empty lines. + next if (/^$/); + + # Ignore object name header. + next if (/^File: .*$/); + + # Ignore table header lines. + next if (/^Symbol table.*contains.*:/); + next if (/Num:.*Value.*Size/); + + # Split table. + (undef, undef, undef, undef, $bind, $vis, $ndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCAL"); + # Ignore hidden symbols. + next if ($vis eq "HIDDEN"); + # Ignore undefined symbols. + next if ($ndx eq "UND"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOBAL|WEAK)/ or $vis ne "DEFAULT") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close READELF or die "readelf error"; +} + +########## +# The various types of glob patterns. +# +# A glob pattern that is to be applied to the demangled name: 'cxx'. +# A glob patterns that applies directly to the name in the .o files: 'glob'. +# This pattern is ignored; used for local variables (usually just '*'): 'ign'. + +# The type of the current pattern. +my $glob = 'glob'; + +# We're currently inside `extern "C++"', which Sun ld doesn't understand. +my $in_extern = 0; + +# The c++filt command to use. This *must* be GNU c++filt; the Sun Studio +# c++filt doesn't handle the GNU mangling style. +my $cxxfilt = $ENV{'CXXFILT'} || "c++filt"; + +# The current version name. +my $current_version = ""; + +# Was there any attempt to match a symbol to this version? +my $matches_attempted; + +# The number of versions which matched this symbol. +my $matched_symbols; + +open F,$symvers or die $!; + +# Print information about generating this file +print "# This file was generated by make_sunver.pl. DO NOT EDIT!\n"; +print "# It was generated by:\n"; +printf "# %s %s %s\n", $0, $symvers, (join ' ',@ARGV); +printf "# Omitted archives with corresponding shared libraries: %s\n", + (join ' ', @SHAREDOBJS) if $#SHAREDOBJS >= 0; +print "#\n\n"; + +while () { + # Lines of the form '};' + if (/^([ \t]*)(\}[ \t]*;[ \t]*)$/) { + $glob = 'glob'; + if ($in_extern) { + $in_extern--; + print "$1##$2\n"; + } else { + print; + } + next; + } + + # Lines of the form '} SOME_VERSION_NAME_1.0;' + if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) { + $glob = 'glob'; + # We tried to match symbols agains this version, but none matched. + # Emit dummy hidden symbol to avoid marking this version WEAK. + if ($matches_attempted && $matched_symbols == 0) { + print " hidden:\n"; + print " .force_WEAK_off_$current_version = DATA S0x0 V0x0;\n"; + } + print; next; + } + + # Comment and blank lines + if (/^[ \t]*\#/) { print; next; } + if (/^[ \t]*$/) { print; next; } + + # Lines of the form '{' + if (/^([ \t]*){$/) { + if ($in_extern) { + print "$1##{\n"; + } else { + print; + } + next; + } + + # Lines of the form 'SOME_VERSION_NAME_1.1 {' + if (/^([A-Z0-9_.]+)[ \t]+{$/) { + # Record version name. + $current_version = $1; + # Reset match attempts, #matched symbols for this version. + $matches_attempted = 0; + $matched_symbols = 0; + print; + next; + } + + # Ignore 'global:' + if (/^[ \t]*global:$/) { print; next; } + + # After 'local:', globs should be ignored, they won't be exported. + if (/^[ \t]*local:$/) { + $glob = 'ign'; + print; + next; + } + + # After 'extern "C++"', globs are C++ patterns + if (/^([ \t]*)(extern \"C\+\+\"[ \t]*)$/) { + $in_extern++; + $glob = 'cxx'; + # Need to comment, Sun ld cannot handle this. + print "$1##$2\n"; next; + } + + # Chomp newline now we're done with passing through the input file. + chomp; + + # Catch globs. Note that '{}' is not allowed in globs by this script, + # so only '*' and '[]' are available. + if (/^([ \t]*)([^ \t;{}#]+);?[ \t]*$/) { + my $ws = $1; + my $ptn = $2; + # Turn the glob into a regex by replacing '*' with '.*', '?' with '.'. + # Keep $ptn so we can still print the original form. + ($pattern = $ptn) =~ s/\*/\.\*/g; + $pattern =~ s/\?/\./g; + + if ($glob eq 'ign') { + # We're in a local: * section; just continue. + print "$_\n"; + next; + } + + # Print the glob commented for human readers. + print "$ws##$ptn ($glob)\n"; + # We tried to match a symbol to this version. + $matches_attempted++; + + if ($glob eq 'glob') { + my %ptn_syms = (); + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # Maybe it matches one of the patterns based on the symbol in + # the .o file. + $ptn_syms{$sym}++ if ($sym =~ /^$pattern$/); + } + + foreach my $sym (sort keys(%ptn_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } elsif ($glob eq 'cxx') { + my %dem_syms = (); + + # Verify that we're actually using GNU c++filt. Other versions + # most likely cannot handle GNU style symbol mangling. + my $cxxout = `$cxxfilt --version 2>&1`; + $cxxout =~ m/GNU/ or die "$0 requires GNU c++filt to function"; + + # Talk to c++filt through a pair of file descriptors. + # Need to start a fresh instance per pattern, otherwise the + # process grows to 500+ MB. + my $pid = open2(*FILTIN, *FILTOUT, $cxxfilt) or die $!; + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # No? Well, maybe its demangled form matches one of those + # patterns. + printf FILTOUT "%s\n",$sym; + my $dem = ; + chomp $dem; + $dem_syms{$sym}++ if ($dem =~ /^$pattern$/); + } + + close FILTOUT or die "c++filt error"; + close FILTIN or die "c++filt error"; + # Need to wait for the c++filt process to avoid lots of zombies. + waitpid $pid, 0; + + foreach my $sym (sort keys(%dem_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } else { + # No? Well, then ignore it. + } + next; + } + # Important sanity check. This script can't handle lots of formats + # that GNU ld can, so be sure to error out if one is seen! + die "strange line `$_'"; +} +close F; diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am new file mode 100644 index 0000000..afcbfb6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/Makefile.am @@ -0,0 +1,8 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 new file mode 100644 index 0000000..1f1d303 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi.3 @@ -0,0 +1,41 @@ +.Dd February 15, 2008 +.Dt FFI 3 +.Sh NAME +.Nm FFI +.Nd Foreign Function Interface +.Sh LIBRARY +libffi, -lffi +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The foreign function interface provides a mechanism by which a function can +generate a call to another function at runtime without requiring knowledge of +the called function's interface at compile time. +.Sh SEE ALSO +.Xr ffi_prep_cif 3 , +.Xr ffi_prep_cif_var 3 , +.Xr ffi_call 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 new file mode 100644 index 0000000..5351513 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_call.3 @@ -0,0 +1,103 @@ +.Dd February 15, 2008 +.Dt ffi_call 3 +.Sh NAME +.Nm ffi_call +.Nd Invoke a foreign function. +.Sh SYNOPSIS +.In ffi.h +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_call +function provides a simple mechanism for invoking a function without +requiring knowledge of the function's interface at compile time. +.Fa fn +is called with the values retrieved from the pointers in the +.Fa avalue +array. The return value from +.Fa fn +is placed in storage pointed to by +.Fa rvalue . +.Fa cif +contains information describing the data types, sizes and alignments of the +arguments to and return value from +.Fa fn , +and must be initialized with +.Nm ffi_prep_cif +before it is used with +.Nm ffi_call . +.Pp +.Fa rvalue +must point to storage that is sizeof(ffi_arg) or larger for non-floating point +types. For smaller-sized return value types, the +.Nm ffi_arg +or +.Nm ffi_sarg +integral type must be used to hold +the return value. +.Sh EXAMPLES +.Bd -literal +#include +#include + +unsigned char +foo(unsigned int, float); + +int +main(int argc, const char **argv) +{ + ffi_cif cif; + ffi_type *arg_types[2]; + void *arg_values[2]; + ffi_status status; + + // Because the return value from foo() is smaller than sizeof(long), it + // must be passed as ffi_arg or ffi_sarg. + ffi_arg result; + + // Specify the data type of each argument. Available types are defined + // in . + arg_types[0] = &ffi_type_uint; + arg_types[1] = &ffi_type_float; + + // Prepare the ffi_cif structure. + if ((status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 2, &ffi_type_uint8, arg_types)) != FFI_OK) + { + // Handle the ffi_status error. + } + + // Specify the values of each argument. + unsigned int arg1 = 42; + float arg2 = 5.1; + + arg_values[0] = &arg1; + arg_values[1] = &arg2; + + // Invoke the function. + ffi_call(&cif, FFI_FN(foo), &result, arg_values); + + // The ffi_arg 'result' now contains the unsigned char returned from foo(), + // which can be accessed by a typecast. + printf("result is %hhu", (unsigned char)result); + + return 0; +} + +// The target function. +unsigned char +foo(unsigned int x, float y) +{ + unsigned char result = x - y; + return result; +} +.Ed +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 new file mode 100644 index 0000000..ab2be8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif.3 @@ -0,0 +1,68 @@ +.Dd February 15, 2008 +.Dt ffi_prep_cif 3 +.Sh NAME +.Nm ffi_prep_cif +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa nargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. Note that to call a variadic function +.Nm ffi_prep_cif_var +must be used instead. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm . +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif_var 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 new file mode 100644 index 0000000..7e19d0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 @@ -0,0 +1,73 @@ +.Dd January 25, 2011 +.Dt ffi_prep_cif_var 3 +.Sh NAME +.Nm ffi_prep_cif_var +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif_var +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa ntotalargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. +.Fa nfixedargs +must contain the number of fixed (non-variadic) arguments. +Note that to call a non-variadic function +.Nm ffi_prep_cif +must be used. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif_var +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm +. +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln new file mode 100644 index 0000000..d9119df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln @@ -0,0 +1,33 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.28302.56 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Ffi_staticLib_arm64", "Ffi_staticLib.vcxproj", "{115502C0-BE05-4767-BF19-5C87D805FAD6}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM64 = Debug|ARM64 + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|ARM64 = Release|ARM64 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.Build.0 = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x86.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.Build.0 = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x86.ActiveCfg = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {241C54C7-20DD-4897-9376-E6B6D1B43BD5} + EndGlobalSection +EndGlobal diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj new file mode 100644 index 0000000..3187699 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj @@ -0,0 +1,130 @@ + + + + + Debug + ARM64 + + + Release + ARM64 + + + + 15.0 + {115502C0-BE05-4767-BF19-5C87D805FAD6} + Win32Proj + FfistaticLib + 10.0.17763.0 + Ffi_staticLib_arm64 + + + + StaticLibrary + true + v141 + Unicode + + + StaticLibrary + false + v141 + true + Unicode + + + + + + + + + + + + + + + true + + + false + + + + NotUsing + Level3 + Disabled + true + FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + false + true + + + false + + + Windows + true + + + + + NotUsing + Level3 + MaxSpeed + true + true + true + FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + true + Speed + true + ..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories) + + + Windows + true + true + true + + + true + + + + + + + + + + + + + + + + + + + + + + + cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i + armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj" + + win64_armasm.obj;%(Outputs) + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters new file mode 100644 index 0000000..1f8c6e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters @@ -0,0 +1,57 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user new file mode 100644 index 0000000..be25078 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h new file mode 100644 index 0000000..02f26a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h @@ -0,0 +1,511 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#ifndef _M_ARM64 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#ifndef _M_ARM64 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh new file mode 100755 index 0000000..97facd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/msvcc.sh @@ -0,0 +1,353 @@ +#!/bin/sh + +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the MSVC wrappificator. +# +# The Initial Developer of the Original Code is +# Timothy Wall . +# Portions created by the Initial Developer are Copyright (C) 2009 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Daniel Witte +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +# +# GCC-compatible wrapper for cl.exe and ml.exe. Arguments are given in GCC +# format and translated into something sensible for cl or ml. +# + +args_orig=$@ +args="-nologo -W3" +linkargs= +static_crt= +debug_crt= +cl="cl" +ml="ml" +safeseh="-safeseh" +output= +libpaths= +libversion=7 +verbose= + +while [ $# -gt 0 ] +do + case $1 + in + --verbose) + verbose=1 + shift 1 + ;; + --version) + args="-help" + shift 1 + ;; + -fexceptions) + # Don't enable exceptions for now. + #args="$args -EHac" + shift 1 + ;; + -m32) + shift 1 + ;; + -m64) + ml="ml64" # "$MSVC/x86_amd64/ml64" + safeseh= + shift 1 + ;; + -marm) + ml='armasm' + safeseh= + shift 1 + ;; + -marm64) + ml='armasm64' + safeseh= + shift 1 + ;; + -clang-cl) + cl="clang-cl" + shift 1 + ;; + -O0) + args="$args -Od" + shift 1 + ;; + -O*) + # Runtime error checks (enabled by setting -RTC1 in the -DFFI_DEBUG + # case below) are not compatible with optimization flags and will + # cause the build to fail. Therefore, drop the optimization flag if + # -DFFI_DEBUG is also set. + case $args_orig in + *-DFFI_DEBUG*) + args="$args" + ;; + *) + # The ax_cc_maxopt.m4 macro from the upstream autoconf-archive + # project doesn't support MSVC and therefore ends up trying to + # use -O3. Use the equivalent "max optimization" flag for MSVC + # instead of erroring out. + case $1 in + -O3) + args="$args -O2" + ;; + *) + args="$args $1" + ;; + esac + opt="true" + ;; + esac + shift 1 + ;; + -g) + # Enable debug symbol generation. + args="$args -Zi" + shift 1 + ;; + -DFFI_DEBUG) + # Enable runtime error checks. + args="$args -RTC1" + defines="$defines $1" + shift 1 + ;; + -DUSE_STATIC_RTL) + # Link against static CRT. + static_crt=1 + shift 1 + ;; + -DUSE_DEBUG_RTL) + # Link against debug CRT. + debug_crt=1 + shift 1 + ;; + -c) + args="$args -c" + args="$(echo $args | sed 's%/Fe%/Fo%g')" + single="-c" + shift 1 + ;; + -D*=*) + name="$(echo $1|sed 's/-D\([^=][^=]*\)=.*/\1/g')" + value="$(echo $1|sed 's/-D[^=][^=]*=//g')" + args="$args -D${name}='$value'" + defines="$defines -D${name}='$value'" + shift 1 + ;; + -D*) + args="$args $1" + defines="$defines $1" + shift 1 + ;; + -I) + p=$(cygpath -m $2) + args="$args -I$p" + includes="$includes -I$p" + shift 2 + ;; + -I*) + p=$(cygpath -m ${1#-I}) + args="$args -I$p" + includes="$includes -I$p" + shift 1 + ;; + -L) + p=$(cygpath -m $2) + linkargs="$linkargs -LIBPATH:$p" + shift 2 + ;; + -L*) + p=$(cygpath -m ${1#-L}) + linkargs="$linkargs -LIBPATH:$p" + shift 1 + ;; + -link) + # add next argument verbatim to linker args + linkargs="$linkargs $2" + shift 2 + ;; + -l*) + case $1 + in + -lffi) + linkargs="$linkargs lib${1#-l}-${libversion}.lib" + ;; + *) + # ignore other libraries like -lm, hope they are + # covered by MSVCRT + # linkargs="$linkargs ${1#-l}.lib" + ;; + esac + shift 1 + ;; + -W|-Wextra) + # TODO map extra warnings + shift 1 + ;; + -Wall) + # -Wall on MSVC is overzealous, and we already build with -W3. Nothing + # to do here. + shift 1 + ;; + -pedantic) + # libffi tests -pedantic with -Wall, so drop it also. + shift 1 + ;; + -warn) + # ignore -warn all from libtool as well. + if test "$2" = "all"; then + shift 2 + else + args="$args -warn" + shift 1 + fi + ;; + -Werror) + args="$args -WX" + shift 1 + ;; + -W*) + # TODO map specific warnings + shift 1 + ;; + -S) + args="$args -FAs" + shift 1 + ;; + -o) + outdir="$(dirname $2)" + base="$(basename $2|sed 's/\.[^.]*//g')" + if [ -n "$single" ]; then + output="-Fo$2" + else + output="-Fe$2" + fi + armasm_output="-o $2" + if [ -n "$assembly" ]; then + args="$args $output" + else + args="$args $output -Fd$outdir/$base -Fp$outdir/$base -Fa$outdir/$base" + fi + shift 2 + ;; + *.S) + src=$1 + assembly="true" + shift 1 + ;; + *.c) + args="$args $1" + shift 1 + ;; + *) + # Assume it's an MSVC argument, and pass it through. + args="$args $1" + shift 1 + ;; + esac +done + +if [ -n "$linkargs" ]; then + + # If -Zi is specified, certain optimizations are implicitly disabled + # by MSVC. Add back those optimizations if this is an optimized build. + # NOTE: These arguments must come after all others. + if [ -n "$opt" ]; then + linkargs="$linkargs -OPT:REF -OPT:ICF -INCREMENTAL:NO" + fi + + args="$args -link $linkargs" +fi + +if [ -n "$static_crt" ]; then + md=-MT +else + md=-MD +fi + +if [ -n "$debug_crt" ]; then + md="${md}d" +fi + +if [ -n "$assembly" ]; then + if [ -z "$outdir" ]; then + outdir="." + fi + ppsrc="$outdir/$(basename $src|sed 's/.S$/.asm/g')" + + if [ $ml = "armasm" ]; then + defines="$defines -D_M_ARM" + fi + + if [ $ml = "armasm64" ]; then + defines="$defines -D_M_ARM64" + fi + + if test -n "$verbose"; then + echo "$cl -nologo -EP $includes $defines $src > $ppsrc" + fi + + "$cl" -nologo -EP $includes $defines $src > $ppsrc || exit $? + output="$(echo $output | sed 's%/F[dpa][^ ]*%%g')" + if [ $ml = "armasm" ]; then + args="-nologo -g -oldit $armasm_output $ppsrc -errorReport:prompt" + elif [ $ml = "armasm64" ]; then + args="-nologo -g $armasm_output $ppsrc -errorReport:prompt" + else + args="-nologo $safeseh $single $output $ppsrc" + fi + + if test -n "$verbose"; then + echo "$ml $args" + fi + + eval "\"$ml\" $args" + result=$? + + # required to fix ml64 broken output? + #mv *.obj $outdir +else + args="$md $args" + + if test -n "$verbose"; then + echo "$cl $args" + fi + # Return an error code of 1 if an invalid command line parameter is passed + # instead of just ignoring it. Any output that is not a warning or an + # error is filtered so this command behaves more like gcc. cl.exe prints + # the name of the compiled file otherwise, which breaks the dejagnu checks + # for excess warnings and errors. + eval "(\"$cl\" $args 2>&1 1>&3 | \ + awk '{print \$0} /D9002/ {error=1} END{exit error}' >&2) 3>&1 | \ + awk '/warning|error/'" + result=$? +fi + +exit $result + +# vim: noai:ts=4:sw=4 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c new file mode 100644 index 0000000..4cc5925 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffi.c @@ -0,0 +1,1015 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__)|| defined (_M_ARM64) +#include +#include +#include +#include +#include +#include +#include "internal.h" +#ifdef _M_ARM64 +#include /* FlushInstructionCache */ +#endif + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +union _d +{ + UINT64 d; + UINT32 s[2]; +}; + +struct _v +{ + union _d d[2] __attribute__((aligned(16))); +}; + +struct call_context +{ + struct _v v[N_V_ARG_REG]; + UINT64 x[N_X_ARG_REG]; +}; + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#endif + +#else + +#if defined (__clang__) && defined (__APPLE__) +extern void sys_icache_invalidate (void *start, size_t len); +#endif + +static inline void +ffi_clear_cache (void *start, void *end) +{ +#if defined (__clang__) && defined (__APPLE__) + sys_icache_invalidate (start, (char *)end - (char *)start); +#elif defined (__GNUC__) + __builtin___clear_cache (start, end); +#elif defined (_M_ARM64) + FlushInstructionCache(GetCurrentProcess(), start, (char*)end - (char*)start); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +/* A subroutine of is_vfp_type. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of is_vfp_type. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY may be allocated to the FP registers. This is both an + fp scalar type as well as an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is an fp scalar. + + Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_* + constant for the type. */ + +static int +is_vfp_type (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (candidate) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + switch (candidate) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 2; + goto done; + } + return 0; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */ + size = ty->size; + if (size < 4 || size > 64) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + case FFI_TYPE_LONGDOUBLE: + ele_count = size / sizeof(long double); + if (size != ele_count * sizeof(long double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return candidate * 4 + (4 - (int)ele_count); +} + +/* Representation of the procedure call argument marshalling + state. + + The terse state variable names match the names used in the AARCH64 + PCS. */ + +struct arg_state +{ + unsigned ngrn; /* Next general-purpose register number. */ + unsigned nsrn; /* Next vector register number. */ + size_t nsaa; /* Next stack offset. */ + +#if defined (__APPLE__) + unsigned allocating_variadic; +#endif +}; + +/* Initialize a procedure call argument marshalling state. */ +static void +arg_init (struct arg_state *state) +{ + state->ngrn = 0; + state->nsrn = 0; + state->nsaa = 0; +#if defined (__APPLE__) + state->allocating_variadic = 0; +#endif +} + +/* Allocate an aligned slot on the stack and return a pointer to it. */ +static void * +allocate_to_stack (struct arg_state *state, void *stack, + size_t alignment, size_t size) +{ + size_t nsaa = state->nsaa; + + /* Round up the NSAA to the larger of 8 or the natural + alignment of the argument's type. */ +#if defined (__APPLE__) + if (state->allocating_variadic && alignment < 8) + alignment = 8; +#else + if (alignment < 8) + alignment = 8; +#endif + + nsaa = FFI_ALIGN (nsaa, alignment); + state->nsaa = nsaa + size; + + return (char *)stack + nsaa; +} + +static ffi_arg +extend_integer_type (void *source, int type) +{ + switch (type) + { + case FFI_TYPE_UINT8: + return *(UINT8 *) source; + case FFI_TYPE_SINT8: + return *(SINT8 *) source; + case FFI_TYPE_UINT16: + return *(UINT16 *) source; + case FFI_TYPE_SINT16: + return *(SINT16 *) source; + case FFI_TYPE_UINT32: + return *(UINT32 *) source; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + return *(SINT32 *) source; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + return *(UINT64 *) source; + break; + case FFI_TYPE_POINTER: + return *(uintptr_t *) source; + default: + abort(); + } +} + +#if defined(_MSC_VER) +void extend_hfa_type (void *dest, void *src, int h); +#else +static void +extend_hfa_type (void *dest, void *src, int h) +{ + ssize_t f = h - AARCH64_RET_S4; + void *x0; + + asm volatile ( + "adr %0, 0f\n" +" add %0, %0, %1\n" +" br %0\n" +"0: ldp s16, s17, [%3]\n" /* S4 */ +" ldp s18, s19, [%3, #8]\n" +" b 4f\n" +" ldp s16, s17, [%3]\n" /* S3 */ +" ldr s18, [%3, #8]\n" +" b 3f\n" +" ldp s16, s17, [%3]\n" /* S2 */ +" b 2f\n" +" nop\n" +" ldr s16, [%3]\n" /* S1 */ +" b 1f\n" +" nop\n" +" ldp d16, d17, [%3]\n" /* D4 */ +" ldp d18, d19, [%3, #16]\n" +" b 4f\n" +" ldp d16, d17, [%3]\n" /* D3 */ +" ldr d18, [%3, #16]\n" +" b 3f\n" +" ldp d16, d17, [%3]\n" /* D2 */ +" b 2f\n" +" nop\n" +" ldr d16, [%3]\n" /* D1 */ +" b 1f\n" +" nop\n" +" ldp q16, q17, [%3]\n" /* Q4 */ +" ldp q18, q19, [%3, #32]\n" +" b 4f\n" +" ldp q16, q17, [%3]\n" /* Q3 */ +" ldr q18, [%3, #32]\n" +" b 3f\n" +" ldp q16, q17, [%3]\n" /* Q2 */ +" b 2f\n" +" nop\n" +" ldr q16, [%3]\n" /* Q1 */ +" b 1f\n" +"4: str q19, [%2, #48]\n" +"3: str q18, [%2, #32]\n" +"2: str q17, [%2, #16]\n" +"1: str q16, [%2]" + : "=&r"(x0) + : "r"(f * 12), "r"(dest), "r"(src) + : "memory", "v16", "v17", "v18", "v19"); +} +#endif + +#if defined(_MSC_VER) +void* compress_hfa_type (void *dest, void *src, int h); +#else +static void * +compress_hfa_type (void *dest, void *reg, int h) +{ + switch (h) + { + case AARCH64_RET_S1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 12; +#endif + } + else + *(float *)dest = *(float *)reg; + break; + case AARCH64_RET_S2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.s, v17.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_S3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.s, v17.s, v18.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_S4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + case AARCH64_RET_D1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 8; +#endif + } + else + *(double *)dest = *(double *)reg; + break; + case AARCH64_RET_D2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.d, v17.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_D3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.d, v17.d, v18.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_D4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + default: + if (dest != reg) + return memcpy (dest, reg, 16 * (4 - (h & 3))); + break; + } + return dest; +} +#endif + +/* Either allocate an appropriate register for the argument type, or if + none are available, allocate a stack slot and return a pointer + to the allocated space. */ + +static void * +allocate_int_to_reg_or_stack (struct call_context *context, + struct arg_state *state, + void *stack, size_t size) +{ + if (state->ngrn < N_X_ARG_REG) + return &context->x[state->ngrn++]; + + state->ngrn = N_X_ARG_REG; + return allocate_to_stack (state, stack, size, size); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + size_t bytes = cif->bytes; + int flags, i, n; + + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = AARCH64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = AARCH64_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = AARCH64_RET_UINT16; + break; + case FFI_TYPE_UINT32: + flags = AARCH64_RET_UINT32; + break; + case FFI_TYPE_SINT8: + flags = AARCH64_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = AARCH64_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = AARCH64_RET_SINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = AARCH64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + flags = is_vfp_type (rtype); + if (flags == 0) + { + size_t s = rtype->size; + if (s > 16) + { + flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM; + bytes += 8; + } + else if (s == 16) + flags = AARCH64_RET_INT128; + else if (s == 8) + flags = AARCH64_RET_INT64; + else + flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY; + } + break; + + default: + abort(); + } + + for (i = 0, n = cif->nargs; i < n; i++) + if (is_vfp_type (cif->arg_types[i])) + { + flags |= AARCH64_FLAG_ARG_V; + break; + } + + /* Round the stack up to a multiple of the stack alignment requirement. */ + cif->bytes = (unsigned) FFI_ALIGN(bytes, 16); + cif->flags = flags; +#if defined (__APPLE__) + cif->aarch64_nfixedargs = 0; +#endif + + return FFI_OK; +} + +#if defined (__APPLE__) +/* Perform Apple-specific cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->aarch64_nfixedargs = nfixedargs; + return status; +} +#endif /* __APPLE__ */ + +extern void ffi_call_SYSV (struct call_context *context, void *frame, + void (*fn)(void), void *rvalue, int flags, + void *closure) FFI_HIDDEN; + +/* Call a function with the provided arguments and capture the return + value. */ +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue, + void **avalue, void *closure) +{ + struct call_context *context; + void *stack, *frame, *rvalue; + struct arg_state state; + size_t stack_bytes, rtype_size, rsize; + int i, nargs, flags; + ffi_type *rtype; + + flags = cif->flags; + rtype = cif->rtype; + rtype_size = rtype->size; + stack_bytes = cif->bytes; + + /* If the target function returns a structure via hidden pointer, + then we cannot allow a null rvalue. Otherwise, mash a null + rvalue to void return type. */ + rsize = 0; + if (flags & AARCH64_RET_IN_MEM) + { + if (orig_rvalue == NULL) + rsize = rtype_size; + } + else if (orig_rvalue == NULL) + flags &= AARCH64_FLAG_ARG_V; + else if (flags & AARCH64_RET_NEED_COPY) + rsize = 16; + + /* Allocate consectutive stack for everything we'll need. */ + context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize); + stack = context + 1; + frame = (void*)((uintptr_t)stack + (uintptr_t)stack_bytes); + rvalue = (rsize ? (void*)((uintptr_t)frame + 32) : orig_rvalue); + + arg_init (&state); + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + size_t s = ty->size; + void *a = avalue[i]; + int h, t; + + t = ty->type; + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + /* If the argument is a basic type the argument is allocated to an + appropriate register, or if none are available, to the stack. */ + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_pointer: + { + ffi_arg ext = extend_integer_type (a, t); + if (state.ngrn < N_X_ARG_REG) + context->x[state.ngrn++] = ext; + else + { + void *d = allocate_to_stack (&state, stack, ty->alignment, s); + state.ngrn = N_X_ARG_REG; + /* Note that the default abi extends each argument + to a full 64-bit slot, while the iOS abi allocates + only enough space. */ +#ifdef __APPLE__ + memcpy(d, a, s); +#else + *(ffi_arg *)d = ext; +#endif + } + } + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + { + void *dest; + + h = is_vfp_type (ty); + if (h) + { + int elems = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + elems <= N_X_ARG_REG) + { + dest = &context->x[state.ngrn]; + state.ngrn += elems; + extend_hfa_type(dest, a, h); + break; + } + state.nsrn = N_X_ARG_REG; + dest = allocate_to_stack(&state, stack, ty->alignment, s); + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + elems <= N_V_ARG_REG) + { + dest = &context->v[state.nsrn]; + state.nsrn += elems; + extend_hfa_type (dest, a, h); + break; + } + state.nsrn = N_V_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* If the argument is a composite type that is larger than 16 + bytes, then the argument has been copied to memory, and + the argument is replaced by a pointer to the copy. */ + a = &avalue[i]; + t = FFI_TYPE_POINTER; + s = sizeof (void *); + goto do_pointer; + } + else + { + size_t n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + /* If the argument is a composite type and the size in + double-words is not more than the number of available + X registers, then the argument is copied into + consecutive X registers. */ + dest = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + /* Otherwise, there are insufficient X registers. Further + X register allocations are prevented, the NSAA is + adjusted and the argument is copied to memory at the + adjusted NSAA. */ + state.ngrn = N_X_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + memcpy (dest, a, s); + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + ffi_call_SYSV (context, frame, fn, rvalue, flags, closure); + + if (flags & AARCH64_RET_NEED_COPY) + memcpy (orig_rvalue, rvalue, rtype_size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif /* FFI_GO_CLOSURES */ + +/* Build a trampoline. */ + +extern void ffi_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + void (*start)(void); + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_closure_SYSV_V; + else + start = ffi_closure_SYSV; + +#if FFI_EXEC_TRAMPOLINE_TABLE +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH + codeloc = ptrauth_strip (codeloc, ptrauth_key_asia); +#endif + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = start; +#endif +#else + static const unsigned char trampoline[16] = { + 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */ + 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */ + 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ + }; + char *tramp = closure->tramp; + + memcpy (tramp, trampoline, sizeof(trampoline)); + + *(UINT64 *)(tramp + 16) = (uintptr_t)start; + + ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE); + + /* Also flush the cache for code mapping. */ +#ifdef _M_ARM64 + // Not using dlmalloc.c for Windows ARM64 builds + // so calling ffi_data_to_code_pointer() isn't necessary + unsigned char *tramp_code = tramp; + #else + unsigned char *tramp_code = ffi_data_to_code_pointer (tramp); + #endif + ffi_clear_cache (tramp_code, tramp_code + FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*start)(void); + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_go_closure_SYSV_V; + else + start = ffi_go_closure_SYSV; + + closure->tramp = start; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif /* FFI_GO_CLOSURES */ + +/* Primary handler to setup and invoke a function within a closure. + + A closure when invoked enters via the assembler wrapper + ffi_closure_SYSV(). The wrapper allocates a call context on the + stack, saves the interesting registers (from the perspective of + the calling convention) into the context then passes control to + ffi_closure_SYSV_inner() passing the saved context and a pointer to + the stack at the point ffi_closure_SYSV() was invoked. + + On the return path the assembler wrapper will reload call context + registers. + + ffi_closure_SYSV_inner() marshalls the call context into ffi value + descriptors, invokes the wrapped function, then marshalls the return + value back into the call context. */ + +int FFI_HIDDEN +ffi_closure_SYSV_inner (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + struct call_context *context, + void *stack, void *rvalue, void *struct_rvalue) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + int i, h, nargs, flags; + struct arg_state state; + + arg_init (&state); + + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + int t = ty->type; + size_t n, s = ty->size; + + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + h = is_vfp_type (ty); + if (h) + { + n = 4 - (h & 3); +#ifdef _M_ARM64 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + n <= N_X_ARG_REG) + { + void *reg = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + + /* Eeek! We need a pointer to the structure, however the + homogeneous float elements are being passed in individual + registers, therefore for float and double the structure + is not represented as a contiguous sequence of bytes in + our saved register context. We don't need the original + contents of the register storage, so we reformat the + structure into the same memory. */ + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + n <= N_V_ARG_REG) + { + void *reg = &context->v[state.nsrn]; + state.nsrn += (unsigned int)n; + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } +#ifdef _M_ARM64 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* Replace Composite type of size greater than 16 with a + pointer. */ + avalue[i] = *(void **) + allocate_int_to_reg_or_stack (context, &state, stack, + sizeof (void *)); + } + else + { + n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + avalue[i] = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + state.ngrn = N_X_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + flags = cif->flags; + if (flags & AARCH64_RET_IN_MEM) + rvalue = struct_rvalue; + + fun (cif, rvalue, avalue, user_data); + + return flags; +} + +#endif /* (__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)*/ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h new file mode 100644 index 0000000..ecb6d2d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/ffitarget.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#elif defined(_M_ARM64) +#define FFI_SIZEOF_ARG 8 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#ifdef _M_ARM64 +#define FFI_EXTRA_CIF_FIELDS unsigned is_variadic +#endif + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_TARGET_SPECIFIC_VARIADIC +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#elif !defined(_M_ARM64) +/* iOS and Windows reserve x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#ifndef _M_ARM64 +/* No complex type on Windows */ +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h new file mode 100644 index 0000000..9c3e077 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/internal.h @@ -0,0 +1,67 @@ +/* +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define AARCH64_RET_VOID 0 +#define AARCH64_RET_INT64 1 +#define AARCH64_RET_INT128 2 + +#define AARCH64_RET_UNUSED3 3 +#define AARCH64_RET_UNUSED4 4 +#define AARCH64_RET_UNUSED5 5 +#define AARCH64_RET_UNUSED6 6 +#define AARCH64_RET_UNUSED7 7 + +/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4, + so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */ +#define AARCH64_RET_S4 8 +#define AARCH64_RET_S3 9 +#define AARCH64_RET_S2 10 +#define AARCH64_RET_S1 11 + +#define AARCH64_RET_D4 12 +#define AARCH64_RET_D3 13 +#define AARCH64_RET_D2 14 +#define AARCH64_RET_D1 15 + +#define AARCH64_RET_Q4 16 +#define AARCH64_RET_Q3 17 +#define AARCH64_RET_Q2 18 +#define AARCH64_RET_Q1 19 + +/* Note that each of the sub-64-bit integers gets two entries. */ +#define AARCH64_RET_UINT8 20 +#define AARCH64_RET_UINT16 22 +#define AARCH64_RET_UINT32 24 + +#define AARCH64_RET_SINT8 26 +#define AARCH64_RET_SINT16 28 +#define AARCH64_RET_SINT32 30 + +#define AARCH64_RET_MASK 31 + +#define AARCH64_RET_IN_MEM (1 << 5) +#define AARCH64_RET_NEED_COPY (1 << 6) + +#define AARCH64_FLAG_ARG_V_BIT 7 +#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT) + +#define N_X_ARG_REG 8 +#define N_V_ARG_REG 8 +#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S new file mode 100644 index 0000000..a345439 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/sysv.S @@ -0,0 +1,451 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__) +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#endif + +#ifdef __AARCH64EB__ +# define BE(X) X +#else +# define BE(X) 0 +#endif + +#ifdef __ILP32__ +#define PTR_REG(n) w##n +#else +#define PTR_REG(n) x##n +#endif + +#ifdef __ILP32__ +#define PTR_SIZE 4 +#else +#define PTR_SIZE 8 +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) +# define BR(r) braaz r +# define BLR(r) blraaz r +#else +# define BR(r) br r +# define BLR(r) blr r +#endif + + .text + .align 4 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + + Therefore on entry we have: + + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + cfi_startproc +CNAME(ffi_call_SYSV): + /* Use a stack frame allocated by our caller. */ + cfi_def_cfa(x1, 32); + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + cfi_def_cfa_register(x29) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + mov x18, x5 /* install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] +1: + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + BLR(x9) /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + cfi_def_cfa_register (sp) + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, 0f + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + .align 4 +0: ret /* VOID */ + nop +1: str x0, [x3] /* INT64 */ + ret +2: stp x0, x1, [x3] /* INT128 */ + ret +3: brk #1000 /* UNUSED */ + ret +4: brk #1000 /* UNUSED */ + ret +5: brk #1000 /* UNUSED */ + ret +6: brk #1000 /* UNUSED */ + ret +7: brk #1000 /* UNUSED */ + ret +8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret +9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret +10: stp s0, s1, [x3] /* S2 */ + ret +11: str s0, [x3] /* S1 */ + ret +12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret +13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret +14: stp d0, d1, [x3] /* D2 */ + ret +15: str d0, [x3] /* D1 */ + ret +16: str q3, [x3, #48] /* Q4 */ + nop +17: str q2, [x3, #32] /* Q3 */ + nop +18: stp q0, q1, [x3] /* Q2 */ + ret +19: str q0, [x3] /* Q1 */ + ret +20: uxtb w0, w0 /* UINT8 */ + str x0, [x3] +21: ret /* reserved */ + nop +22: uxth w0, w0 /* UINT16 */ + str x0, [x3] +23: ret /* reserved */ + nop +24: mov w0, w0 /* UINT32 */ + str x0, [x3] +25: ret /* reserved */ + nop +26: sxtb x0, w0 /* SINT8 */ + str x0, [x3] +27: ret /* reserved */ + nop +28: sxth x0, w0 /* SINT16 */ + str x0, [x3] +29: ret /* reserved */ + nop +30: sxtw x0, w0 /* SINT32 */ + str x0, [x3] +31: ret /* reserved */ + nop + + cfi_endproc + + .globl CNAME(ffi_call_SYSV) + FFI_HIDDEN(CNAME(ffi_call_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_call_SYSV), #function + .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV) +#endif + +/* ffi_closure_SYSV + + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + .align 4 +CNAME(ffi_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV_V), #function + .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ +.Ldo_closure: + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + bl CNAME(ffi_closure_SYSV_inner) + + /* Load the return value as directed. */ +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) + autiza x1 +#endif + adr x1, 0f + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + .align 4 +0: b 99f /* VOID */ + nop +1: ldr x0, [x3] /* INT64 */ + b 99f +2: ldp x0, x1, [x3] /* INT128 */ + b 99f +3: brk #1000 /* UNUSED */ + nop +4: brk #1000 /* UNUSED */ + nop +5: brk #1000 /* UNUSED */ + nop +6: brk #1000 /* UNUSED */ + nop +7: brk #1000 /* UNUSED */ + nop +8: ldr s3, [x3, #12] /* S4 */ + nop +9: ldr s2, [x3, #8] /* S3 */ + nop +10: ldp s0, s1, [x3] /* S2 */ + b 99f +11: ldr s0, [x3] /* S1 */ + b 99f +12: ldr d3, [x3, #24] /* D4 */ + nop +13: ldr d2, [x3, #16] /* D3 */ + nop +14: ldp d0, d1, [x3] /* D2 */ + b 99f +15: ldr d0, [x3] /* D1 */ + b 99f +16: ldr q3, [x3, #48] /* Q4 */ + nop +17: ldr q2, [x3, #32] /* Q3 */ + nop +18: ldp q0, q1, [x3] /* Q2 */ + b 99f +19: ldr q0, [x3] /* Q1 */ + b 99f +20: ldrb w0, [x3, #BE(7)] /* UINT8 */ + b 99f +21: brk #1000 /* reserved */ + nop +22: ldrh w0, [x3, #BE(6)] /* UINT16 */ + b 99f +23: brk #1000 /* reserved */ + nop +24: ldr w0, [x3, #BE(4)] /* UINT32 */ + b 99f +25: brk #1000 /* reserved */ + nop +26: ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b 99f +27: brk #1000 /* reserved */ + nop +28: ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b 99f +29: brk #1000 /* reserved */ + nop +30: ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop +31: /* reserved */ +99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS + cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) + cfi_restore (x29) + cfi_restore (x30) + ret + cfi_endproc + + .globl CNAME(ffi_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV), #function + .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV) +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + .align PAGE_MAX_SHIFT +CNAME(ffi_closure_trampoline_table_page): + .rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr x16, -PAGE_MAX_SIZE + ldp x17, x16, [x16] + BR(x16) + nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller that 16 bytes */ + .endr + + .globl CNAME(ffi_closure_trampoline_table_page) + FFI_HIDDEN(CNAME(ffi_closure_trampoline_table_page)) + #ifdef __ELF__ + .type CNAME(ffi_closure_trampoline_table_page), #function + .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page) + #endif +#endif + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#ifdef FFI_GO_CLOSURES + .align 4 +CNAME(ffi_go_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV_V), #function + .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_go_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b .Ldo_closure + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV), #function + .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV) +#endif +#endif /* FFI_GO_CLOSURES */ +#endif /* __arm64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S new file mode 100644 index 0000000..a79f8a8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/aarch64/win64_armasm.S @@ -0,0 +1,506 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + OPT 2 /*disable listing */ +/* For some macros to add unwind information */ +#include "ksarm64.h" + OPT 1 /*re-enable listing */ + +#define BE(X) 0 +#define PTR_REG(n) x##n +#define PTR_SIZE 8 + + IMPORT ffi_closure_SYSV_inner + EXPORT ffi_call_SYSV + EXPORT ffi_closure_SYSV_V + EXPORT ffi_closure_SYSV + EXPORT extend_hfa_type + EXPORT compress_hfa_type +#ifdef FFI_GO_CLOSURES + EXPORT ffi_go_closure_SYSV_V + EXPORT ffi_go_closure_SYSV +#endif + + TEXTAREA, ALLIGN=8 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + Therefore on entry we have: + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + NESTED_ENTRY ffi_call_SYSV_fake + + /* For unwind information, Windows has to store fp and lr */ + PROLOG_SAVE_REG_PAIR x29, x30, #-32! + + ALTERNATE_ENTRY ffi_call_SYSV + /* Use a stack frame allocated by our caller. */ + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + /*mov x18, x5 install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1 + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] + +ffi_call_SYSV_L1 + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + blr x9 /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, ffi_call_SYSV_return + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + ALIGN 4 +ffi_call_SYSV_return + ret /* VOID */ + nop + str x0, [x3] /* INT64 */ + ret + stp x0, x1, [x3] /* INT128 */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret + st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret + stp s0, s1, [x3] /* S2 */ + ret + str s0, [x3] /* S1 */ + ret + st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret + st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret + stp d0, d1, [x3] /* D2 */ + ret + str d0, [x3] /* D1 */ + ret + str q3, [x3, #48] /* Q4 */ + nop + str q2, [x3, #32] /* Q3 */ + nop + stp q0, q1, [x3] /* Q2 */ + ret + str q0, [x3] /* Q1 */ + ret + uxtb w0, w0 /* UINT8 */ + str x0, [x3] + ret /* reserved */ + nop + uxth w0, w0 /* UINT16 */ + str x0, [x3] + ret /* reserved */ + nop + mov w0, w0 /* UINT32 */ + str x0, [x3] + ret /* reserved */ + nop + sxtb x0, w0 /* SINT8 */ + str x0, [x3] + ret /* reserved */ + nop + sxth x0, w0 /* SINT16 */ + str x0, [x3] + ret /* reserved */ + nop + sxtw x0, w0 /* SINT32 */ + str x0, [x3] + ret /* reserved */ + nop + + + NESTED_END ffi_call_SYSV_fake + + +/* ffi_closure_SYSV + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + NESTED_ENTRY ffi_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + + b ffi_closure_SYSV_save_argument + NESTED_END ffi_closure_SYSV_V + + NESTED_ENTRY ffi_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ + +do_closure + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + + bl ffi_closure_SYSV_inner + + /* Load the return value as directed. */ + adr x1, ffi_closure_SYSV_return_base + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + ALIGN 8 +ffi_closure_SYSV_return_base + b ffi_closure_SYSV_epilog /* VOID */ + nop + ldr x0, [x3] /* INT64 */ + b ffi_closure_SYSV_epilog + ldp x0, x1, [x3] /* INT128 */ + b ffi_closure_SYSV_epilog + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + ldr s3, [x3, #12] /* S4 */ + nop + ldr s2, [x3, #8] /* S3 */ + nop + ldp s0, s1, [x3] /* S2 */ + b ffi_closure_SYSV_epilog + ldr s0, [x3] /* S1 */ + b ffi_closure_SYSV_epilog + ldr d3, [x3, #24] /* D4 */ + nop + ldr d2, [x3, #16] /* D3 */ + nop + ldp d0, d1, [x3] /* D2 */ + b ffi_closure_SYSV_epilog + ldr d0, [x3] /* D1 */ + b ffi_closure_SYSV_epilog + ldr q3, [x3, #48] /* Q4 */ + nop + ldr q2, [x3, #32] /* Q3 */ + nop + ldp q0, q1, [x3] /* Q2 */ + b ffi_closure_SYSV_epilog + ldr q0, [x3] /* Q1 */ + b ffi_closure_SYSV_epilog + ldrb w0, [x3, #BE(7)] /* UINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrh w0, [x3, #BE(6)] /* UINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldr w0, [x3, #BE(4)] /* UINT32 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop + /* reserved */ + +ffi_closure_SYSV_epilog + EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS! + EPILOG_RETURN + NESTED_END ffi_closure_SYSV + + +#ifdef FFI_GO_CLOSURES + NESTED_ENTRY ffi_go_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b ffi_go_closure_SYSV_save_argument + NESTED_END ffi_go_closure_SYSV_V + + NESTED_ENTRY ffi_go_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_go_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b do_closure + NESTED_END ffi_go_closure_SYSV + +#endif /* FFI_GO_CLOSURES */ + + +/* void extend_hfa_type (void *dest, void *src, int h) */ + + LEAF_ENTRY extend_hfa_type + + adr x3, extend_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +extend_hfa_type_jump_base + ldp s16, s17, [x1] /* S4 */ + ldp s18, s19, [x1, #8] + b extend_hfa_type_store_4 + nop + + ldp s16, s17, [x1] /* S3 */ + ldr s18, [x1, #8] + b extend_hfa_type_store_3 + nop + + ldp s16, s17, [x1] /* S2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr s16, [x1] /* S1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp d16, d17, [x1] /* D4 */ + ldp d18, d19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp d16, d17, [x1] /* D3 */ + ldr d18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp d16, d17, [x1] /* D2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr d16, [x1] /* D1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp q16, q17, [x1] /* Q2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr q16, [x1] /* Q1 */ + b extend_hfa_type_store_1 + +extend_hfa_type_store_4 + str q19, [x0, #48] +extend_hfa_type_store_3 + str q18, [x0, #32] +extend_hfa_type_store_2 + str q17, [x0, #16] +extend_hfa_type_store_1 + str q16, [x0] + ret + + LEAF_END extend_hfa_type + + +/* void compress_hfa_type (void *dest, void *reg, int h) */ + + LEAF_ENTRY compress_hfa_type + + adr x3, compress_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +compress_hfa_type_jump_base + ldp q16, q17, [x1] /* S4 */ + ldp q18, q19, [x1, #32] + st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S3 */ + ldr q18, [x1, #32] + st3 { v16.s, v17.s, v18.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S2 */ + st2 { v16.s, v17.s }[0], [x0] + ret + nop + + ldr q16, [x1] /* S1 */ + st1 { v16.s }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* D4 */ + ldp q18, q19, [x1, #32] + st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D3 */ + ldr q18, [x1, #32] + st3 { v16.d, v17.d, v18.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D2 */ + st2 { v16.d, v17.d }[0], [x0] + ret + nop + + ldr q16, [x1] /* D1 */ + st1 { v16.d }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #32] + b compress_hfa_type_store_q4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #32] + b compress_hfa_type_store_q3 + nop + + ldp q16, q17, [x1] /* Q2 */ + stp q16, q17, [x0] + ret + nop + + ldr q16, [x1] /* Q1 */ + str q16, [x0] + ret + +compress_hfa_type_store_q4 + str q19, [x0, #48] +compress_hfa_type_store_q3 + str q18, [x0, #32] + stp q16, q17, [x0] + ret + + LEAF_END compress_hfa_type + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c new file mode 100644 index 0000000..7a95e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffi.c @@ -0,0 +1,521 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Anthony Green + Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. + + Alpha Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if defined(__LONG_DOUBLE_128__) +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +extern void ffi_call_osf(void *stack, void *frame, unsigned flags, + void *raddr, void (*fn)(void), void *closure) + FFI_HIDDEN; +extern void ffi_closure_osf(void) FFI_HIDDEN; +extern void ffi_go_closure_osf(void) FFI_HIDDEN; + +/* Promote a float value to its in-register double representation. + Unlike actually casting to double, this does not trap on NaN. */ +static inline UINT64 lds(void *ptr) +{ + UINT64 ret; + asm("lds %0,%1" : "=f"(ret) : "m"(*(UINT32 *)ptr)); + return ret; +} + +/* And the reverse. */ +static inline void sts(void *ptr, UINT64 val) +{ + asm("sts %1,%0" : "=m"(*(UINT32 *)ptr) : "f"(val)); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int flags, i, avn; + ffi_type *rtype, *itype; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + /* Compute the size of the argument area. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + itype = cif->arg_types[i]; + switch (itype->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + /* All take one 8 byte slot. */ + bytes += 8; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + /* Passed by value in N slots. */ + bytes += FFI_ALIGN(itype->size, FFI_SIZEOF_ARG); + break; + + case FFI_TYPE_COMPLEX: + /* _Complex long double passed by reference; others in 2 slots. */ + if (itype->elements[0]->type == FFI_TYPE_LONGDOUBLE) + bytes += 8; + else + bytes += 16; + break; + + default: + abort(); + } + } + + /* Set the return type flag */ + rtype = cif->rtype; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = ALPHA_FLAGS(ALPHA_ST_VOID, ALPHA_LD_VOID); + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT32); + break; + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_FLOAT, ALPHA_LD_FLOAT); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_DOUBLE, ALPHA_LD_DOUBLE); + break; + case FFI_TYPE_UINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT8); + break; + case FFI_TYPE_SINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT8); + break; + case FFI_TYPE_UINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT16); + break; + case FFI_TYPE_SINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT16); + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + /* Passed in memory, with a hidden pointer. */ + flags = ALPHA_RET_IN_MEM; + break; + case FFI_TYPE_COMPLEX: + itype = rtype->elements[0]; + switch (itype->type) + { + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXF, ALPHA_LD_CPLXF); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXD, ALPHA_LD_CPLXD); + break; + default: + if (rtype->size <= 8) + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + else + flags = ALPHA_RET_IN_MEM; + break; + } + break; + default: + abort(); + } + cif->flags = flags; + + /* Include the hidden structure pointer in args requirement. */ + if (flags == ALPHA_RET_IN_MEM) + bytes += 8; + /* Minimum size is 6 slots, so that ffi_call_osf can pop them. */ + if (bytes < 6*8) + bytes = 6*8; + cif->bytes = bytes; + + return FFI_OK; +} + +static unsigned long +extend_basic_type(void *valp, int type, int argn) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)valp; + case FFI_TYPE_UINT8: + return *(UINT8 *)valp; + case FFI_TYPE_SINT16: + return *(SINT16 *)valp; + case FFI_TYPE_UINT16: + return *(UINT16 *)valp; + + case FFI_TYPE_FLOAT: + if (argn < 6) + return lds(valp); + /* FALLTHRU */ + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Note that unsigned 32-bit quantities are sign extended. */ + return *(SINT32 *)valp; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + return *(UINT64 *)valp; + + default: + abort(); + } +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + unsigned long *argp; + long i, avn, argn, flags = cif->flags; + ffi_type **arg_types; + void *frame; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + if (rvalue == NULL && flags == ALPHA_RET_IN_MEM) + rvalue = alloca(cif->rtype->size); + + /* Allocate the space for the arguments, plus 4 words of temp + space for ffi_call_osf. */ + argp = frame = alloca(cif->bytes + 4*FFI_SIZEOF_ARG); + frame += cif->bytes; + + argn = 0; + if (flags == ALPHA_RET_IN_MEM) + argp[argn++] = (unsigned long)rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + int type = ty->type; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + argp[argn] = extend_basic_type(valp, type, argn); + argn++; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Note that 128-bit long double is passed by reference. */ + argp[argn++] = (unsigned long)valp; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + memcpy(argp + argn, valp, size); + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + if (type == FFI_TYPE_LONGDOUBLE) + goto by_reference; + + /* Most complex types passed as two separate arguments. */ + size = ty->elements[0]->size; + argp[argn] = extend_basic_type(valp, type, argn); + argp[argn + 1] = extend_basic_type(valp + size, type, argn + 1); + argn += 2; + break; + + default: + abort(); + } + } + + flags = (flags >> ALPHA_ST_SHIFT) & 0xff; + ffi_call_osf(argp, frame, flags, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x47fb0401; /* mov $27,$1 */ + tramp[1] = 0xa77b0010; /* ldq $27,16($27) */ + tramp[2] = 0x6bfb0000; /* jmp $31,($27),0 */ + tramp[3] = 0x47ff041f; /* nop */ + *(void **) &tramp[4] = ffi_closure_osf; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the Icache. + + Tru64 UNIX as doesn't understand the imb mnemonic, so use call_pal + instead, since both Compaq as and gas can handle it. + + 0x86 is PAL_imb in Tru64 UNIX . */ + asm volatile ("call_pal 0x86" : : : "memory"); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + closure->tramp = (void *)ffi_go_closure_osf; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +long FFI_HIDDEN +ffi_closure_osf_inner (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, unsigned long *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn, argn, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + flags = cif->flags; + argn = 0; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (flags == ALPHA_RET_IN_MEM) + { + rvalue = (void *) argp[0]; + argn = 1; + } + + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + int type = ty->type; + void *valp = &argp[argn]; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + argn += 1; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_FLOAT: + /* Floats coming from registers need conversion from double + back to float format. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + argn += 1; + break; + + case FFI_TYPE_DOUBLE: + if (argn < 6) + valp = &argp[argn - 6]; + argn += 1; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* 128-bit long double is passed by reference. */ + valp = (void *)argp[argn]; + argn += 1; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + switch (type) + { + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passed as separate arguments, but they wind up sequential. */ + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Passed as separate arguments. Disjoint, but there's room + enough in one slot to hold the pair. */ + size = ty->elements[0]->size; + memcpy(valp + size, valp + 8, size); + break; + + case FFI_TYPE_FLOAT: + /* Passed as separate arguments. Disjoint, and each piece + may need conversion back to float. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + if (argn + 1 < 6) + sts(valp + 4, argp[argn + 1 - 6]); + else + *(UINT32 *)(valp + 4) = argp[argn + 1]; + break; + + case FFI_TYPE_DOUBLE: + /* Passed as separate arguments. Only disjoint if one part + is in fp regs and the other is on the stack. */ + if (argn < 5) + valp = &argp[argn - 6]; + else if (argn == 5) + { + valp = alloca(16); + ((UINT64 *)valp)[0] = argp[5 - 6]; + ((UINT64 *)valp)[1] = argp[6]; + } + break; + + case FFI_TYPE_LONGDOUBLE: + goto by_reference; + + default: + abort(); + } + argn += 2; + break; + + default: + abort (); + } + + avalue[i] = valp; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_osf how to perform return type promotions. */ + return (flags >> ALPHA_LD_SHIFT) & 0xff; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h new file mode 100644 index 0000000..a02dbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/ffitarget.h @@ -0,0 +1,57 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Alpha. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OSF, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_OSF +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h new file mode 100644 index 0000000..44da192 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/internal.h @@ -0,0 +1,23 @@ +#define ALPHA_ST_VOID 0 +#define ALPHA_ST_INT 1 +#define ALPHA_ST_FLOAT 2 +#define ALPHA_ST_DOUBLE 3 +#define ALPHA_ST_CPLXF 4 +#define ALPHA_ST_CPLXD 5 + +#define ALPHA_LD_VOID 0 +#define ALPHA_LD_INT64 1 +#define ALPHA_LD_INT32 2 +#define ALPHA_LD_UINT16 3 +#define ALPHA_LD_SINT16 4 +#define ALPHA_LD_UINT8 5 +#define ALPHA_LD_SINT8 6 +#define ALPHA_LD_FLOAT 7 +#define ALPHA_LD_DOUBLE 8 +#define ALPHA_LD_CPLXF 9 +#define ALPHA_LD_CPLXD 10 + +#define ALPHA_ST_SHIFT 0 +#define ALPHA_LD_SHIFT 8 +#define ALPHA_RET_IN_MEM 0x10000 +#define ALPHA_FLAGS(S, L) (((L) << ALPHA_LD_SHIFT) | (S)) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S new file mode 100644 index 0000000..b031828 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/alpha/osf.S @@ -0,0 +1,282 @@ +/* ----------------------------------------------------------------------- + osf.S - Copyright (c) 1998, 2001, 2007, 2008, 2011, 2014 Red Hat + + Alpha/OSF Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + .arch ev6 + .text + +/* Aid in building a direct addressed jump table, 4 insns per entry. */ +.macro E index + .align 4 + .org 99b + \index * 16 +.endm + +/* ffi_call_osf (void *stack, void *frame, unsigned flags, + void *raddr, void (*fnaddr)(void), void *closure) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 4 + .globl ffi_call_osf + .ent ffi_call_osf + FFI_HIDDEN(ffi_call_osf) + +ffi_call_osf: + cfi_startproc + cfi_def_cfa($17, 32) + mov $16, $30 + stq $26, 0($17) + stq $15, 8($17) + mov $17, $15 + .prologue 0 + cfi_def_cfa_register($15) + cfi_rel_offset($26, 0) + cfi_rel_offset($15, 8) + + stq $18, 16($17) # save flags into frame + stq $19, 24($17) # save rvalue into frame + mov $20, $27 # fn into place for call + mov $21, $1 # closure into static chain + + # Load up all of the (potential) argument registers. + ldq $16, 0($30) + ldt $f16, 0($30) + ldt $f17, 8($30) + ldq $17, 8($30) + ldt $f18, 16($30) + ldq $18, 16($30) + ldt $f19, 24($30) + ldq $19, 24($30) + ldt $f20, 32($30) + ldq $20, 32($30) + ldt $f21, 40($30) + ldq $21, 40($30) + + # Deallocate the register argument area. + lda $30, 48($30) + + jsr $26, ($27), 0 +0: + ldah $29, 0($26) !gpdisp!1 + ldq $2, 24($15) # reload rvalue + lda $29, 0($29) !gpdisp!1 + ldq $3, 16($15) # reload flags + lda $1, 99f-0b($26) + ldq $26, 0($15) + ldq $15, 8($15) + cfi_restore($26) + cfi_restore($15) + cfi_def_cfa($sp, 0) + cmoveq $2, ALPHA_ST_VOID, $3 # mash null rvalue to void + addq $3, $3, $3 + s8addq $3, $1, $1 # 99f + stcode * 16 + jmp $31, ($1), $st_int + + .align 4 +99: +E ALPHA_ST_VOID + ret +E ALPHA_ST_INT +$st_int: + stq $0, 0($2) + ret +E ALPHA_ST_FLOAT + sts $f0, 0($2) + ret +E ALPHA_ST_DOUBLE + stt $f0, 0($2) + ret +E ALPHA_ST_CPLXF + sts $f0, 0($2) + sts $f1, 4($2) + ret +E ALPHA_ST_CPLXD + stt $f0, 0($2) + stt $f1, 8($2) + ret + + cfi_endproc + .end ffi_call_osf + +/* ffi_closure_osf(...) + + Receives the closure argument in $1. */ + +#define CLOSURE_FS (16*8) + + .align 4 + .globl ffi_go_closure_osf + .ent ffi_go_closure_osf + FFI_HIDDEN(ffi_go_closure_osf) + +ffi_go_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 8($1) # load cif + ldq $17, 16($1) # load fun + mov $1, $18 # closure is user_data + br $do_closure + + cfi_endproc + .end ffi_go_closure_osf + + .align 4 + .globl ffi_closure_osf + .ent ffi_closure_osf + FFI_HIDDEN(ffi_closure_osf) + +ffi_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + # Store all of the potential argument registers in va_list format. + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 24($1) # load cif + ldq $17, 32($1) # load fun + ldq $18, 40($1) # load user_data + +$do_closure: + stq $19, 13*8($30) + stq $20, 14*8($30) + stq $21, 15*8($30) + stt $f16, 4*8($30) + stt $f17, 5*8($30) + stt $f18, 6*8($30) + stt $f19, 7*8($30) + stt $f20, 8*8($30) + stt $f21, 9*8($30) + + # Call ffi_closure_osf_inner to do the bulk of the work. + lda $19, 2*8($30) + lda $20, 10*8($30) + jsr $26, ffi_closure_osf_inner +0: + ldah $29, 0($26) !gpdisp!2 + lda $2, 99f-0b($26) + s4addq $0, 0, $1 # ldcode * 4 + ldq $0, 16($30) # preload return value + s4addq $1, $2, $1 # 99f + ldcode * 16 + lda $29, 0($29) !gpdisp!2 + ldq $26, 0($30) + cfi_restore($26) + jmp $31, ($1), $load_32 + +.macro epilogue + addq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(-CLOSURE_FS) + ret + .align 4 + cfi_adjust_cfa_offset(CLOSURE_FS) +.endm + + .align 4 +99: +E ALPHA_LD_VOID + epilogue + +E ALPHA_LD_INT64 + epilogue + +E ALPHA_LD_INT32 +$load_32: + sextl $0, $0 + epilogue + +E ALPHA_LD_UINT16 + zapnot $0, 3, $0 + epilogue + +E ALPHA_LD_SINT16 +#ifdef __alpha_bwx__ + sextw $0, $0 +#else + sll $0, 48, $0 + sra $0, 48, $0 +#endif + epilogue + +E ALPHA_LD_UINT8 + and $0, 0xff, $0 + epilogue + +E ALPHA_LD_SINT8 +#ifdef __alpha_bwx__ + sextb $0, $0 +#else + sll $0, 56, $0 + sra $0, 56, $0 +#endif + epilogue + +E ALPHA_LD_FLOAT + lds $f0, 16($sp) + epilogue + +E ALPHA_LD_DOUBLE + ldt $f0, 16($sp) + epilogue + +E ALPHA_LD_CPLXF + lds $f0, 16($sp) + lds $f1, 20($sp) + epilogue + +E ALPHA_LD_CPLXD + ldt $f0, 16($sp) + ldt $f1, 24($sp) + epilogue + + cfi_endproc + .end ffi_closure_osf + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S new file mode 100644 index 0000000..03715fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/arcompact.S @@ -0,0 +1,135 @@ +/* ----------------------------------------------------------------------- + arcompact.S - Copyright (c) 2013 Synposys, Inc. (www.synopsys.com) + + ARCompact Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)` .type CNAME(x),%function` CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* R4: ecif.rvalue */ + /* R5: fn */ +ENTRY(ffi_call_ARCompact) + /* Save registers. */ + st.a fp, [sp, -4] /* fp + 20, fp */ + push_s blink /* fp + 16, blink */ + st.a r4, [sp, -4] /* fp + 12, ecif.rvalue */ + push_s r3 /* fp + 8, fig->flags */ + st.a r5, [sp, -4] /* fp + 4, fn */ + push_s r2 /* fp + 0, cif->bytes */ + mov fp, sp + + /* Make room for all of the new args. */ + sub sp, sp, r2 + + /* Place all of the ffi_prep_args in position. */ + /* ffi_prep_args(char *stack, extended_cif *ecif) */ + /* R1 already set. */ + + /* And call. */ + jl_s.d [r0] + mov_s r0, sp + + ld.ab r12, [fp, 4] /* cif->bytes */ + ld.ab r11, [fp, 4] /* fn */ + + /* Move first 8 parameters in registers... */ + ld_s r0, [sp] + ld_s r1, [sp, 4] + ld_s r2, [sp, 8] + ld_s r3, [sp, 12] + ld r4, [sp, 16] + ld r5, [sp, 20] + ld r6, [sp, 24] + ld r7, [sp, 28] + + /* ...and adjust the stack. */ + min r12, r12, 32 + + /* Call the function. */ + jl.d [r11] + add sp, sp, r12 + + mov sp, fp + pop_s r3 /* fig->flags, return type */ + pop_s r2 /* ecif.rvalue, pointer for return value */ + + /* If the return value pointer is NULL, assume no return value. */ + breq.d r2, 0, epilogue + pop_s blink + + /* Return INT. */ + brne r3, FFI_TYPE_INT, return_double + b.d epilogue + st_s r0, [r2] + +return_double: + brne r3, FFI_TYPE_DOUBLE, epilogue + st_s r0, [r2] + st_s r1, [r2,4] + +epilogue: + j_s.d [blink] + ld.ab fp, [sp, 4] + +ENTRY(ffi_closure_ARCompact) + st.a r0, [sp, -32] + st_s r1, [sp, 4] + st_s r2, [sp, 8] + st_s r3, [sp, 12] + st r4, [sp, 16] + st r5, [sp, 20] + st r6, [sp, 24] + st r7, [sp, 28] + + /* pointer to arguments */ + mov_s r2, sp + + /* return value goes here */ + sub sp, sp, 8 + mov_s r1, sp + + push_s blink + + bl.d ffi_closure_inner_ARCompact + mov_s r0, r8 /* codeloc, set by trampoline */ + + pop_s blink + + /* set return value to r1:r0 */ + pop_s r0 + pop_s r1 + j_s.d [blink] + add_s sp, sp, 32 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c new file mode 100644 index 0000000..4d10b21 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffi.c @@ -0,0 +1,266 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + + ARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#include + +/* for little endian ARC, the code is in fact stored as mixed endian for + performance reasons */ +#if __BIG_ENDIAN__ +#define CODE_ENDIAN(x) (x) +#else +#define CODE_ENDIAN(x) ( (((uint32_t) (x)) << 16) | (((uint32_t) (x)) >> 16)) +#endif + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_arg)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) (*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, (*p_arg)->size); + break; + + default: + FFI_ASSERT (0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + memcpy (argp, *p_argv, z); + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_ARCompact (void (*)(char *, extended_cif *), + extended_cif *, unsigned, unsigned, + unsigned *, void (*fn) (void)); + +void +ffi_call (ffi_cif * cif, void (*fn) (void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ARCOMPACT: + ffi_call_ARCompact (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +int +ffi_closure_inner_ARCompact (ffi_closure * closure, void *rvalue, + ffi_arg * args) +{ + void **arg_area, **p_argv; + ffi_cif *cif = closure->cif; + char *argp = (char *) args; + ffi_type **p_argt; + int i; + + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + /* handle hidden argument */ + if (cif->flags == FFI_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + + p_argv = arg_area; + + for (i = 0, p_argt = cif->arg_types; i < cif->nargs; + i++, p_argt++, p_argv++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_argt)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_argt)->size; + *p_argv = (void *) argp; + argp += z; + } + + (closure->fun) (cif, rvalue, arg_area, closure->user_data); + + return cif->flags; +} + +extern void ffi_closure_ARCompact (void); + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) & (closure->tramp[0]); + + switch (cif->abi) + { + case FFI_ARCOMPACT: + FFI_ASSERT (tramp == codeloc); + tramp[0] = CODE_ENDIAN (0x200a1fc0); /* mov r8, pcl */ + tramp[1] = CODE_ENDIAN (0x20200f80); /* j [long imm] */ + tramp[2] = CODE_ENDIAN (ffi_closure_ARCompact); + break; + + default: + return FFI_BAD_ABI; + } + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + cacheflush (codeloc, FFI_TRAMPOLINE_SIZE, BCACHE); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h new file mode 100644 index 0000000..bf8311b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arc/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + Target configuration macros for ARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi +{ + FFI_FIRST_ABI = 0, + FFI_ARCOMPACT, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_ARCOMPACT +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c new file mode 100644 index 0000000..4e27071 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffi.c @@ -0,0 +1,854 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Timothy Wall + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2011 Anthony Green + Copyright (c) 2011 Free Software Foundation + Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__arm__) || defined(_M_ARM) +#include +#include +#include +#include +#include +#include "internal.h" + +#if defined(_MSC_VER) && defined(_M_ARM) +#define WIN32_LEAN_AND_MEAN +#include +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else +#ifndef _M_ARM +extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN; +#else +extern unsigned int ffi_arm_trampoline[3] FFI_HIDDEN; +#endif +#endif + +/* Forward declares. */ +static int vfp_type_p (const ffi_type *); +static void layout_vfp_args (ffi_cif *); + +static void * +ffi_align (ffi_type *ty, void *p) +{ + /* Align if necessary */ + size_t alignment; +#ifdef _WIN32_WCE + alignment = 4; +#else + alignment = ty->alignment; + if (alignment < 4) + alignment = 4; +#endif + return (void *) FFI_ALIGN (p, alignment); +} + +static size_t +ffi_put_arg (ffi_type *ty, void *src, void *dst) +{ + size_t z = ty->size; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *(UINT32 *)dst = *(SINT8 *)src; + break; + case FFI_TYPE_UINT8: + *(UINT32 *)dst = *(UINT8 *)src; + break; + case FFI_TYPE_SINT16: + *(UINT32 *)dst = *(SINT16 *)src; + break; + case FFI_TYPE_UINT16: + *(UINT32 *)dst = *(UINT16 *)src; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: +#ifndef _MSC_VER + case FFI_TYPE_FLOAT: +#endif + *(UINT32 *)dst = *(UINT32 *)src; + break; + +#ifdef _MSC_VER + // casting a float* to a UINT32* doesn't work on Windows + case FFI_TYPE_FLOAT: + *(uintptr_t *)dst = 0; + *(float *)dst = *(float *)src; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + *(UINT64 *)dst = *(UINT64 *)src; + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + memcpy (dst, src, z); + break; + + default: + abort(); + } + + return FFI_ALIGN (z, 4); +} + +/* ffi_prep_args is called once stack space has been allocated + for the function's arguments. + + The vfp_space parameter is the load area for VFP regs, the return + value is cif->vfp_used (word bitset of VFP regs used for passing + arguments). These are only used for the VFP hard-float ABI. +*/ +static void +ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *argp) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (flags == ARM_TYPE_STRUCT) + { + *(void **) argp = rvalue; + argp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, avalue[i], argp); + } +} + +static void +ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *stack, char *vfp_space) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char stack_used = 0; + char done_with_regs = 0; + + /* The first 4 words on the stack are used for values + passed in core registers. */ + regp = stack; + eo_regp = argp = regp + 16; + + /* If the function returns an FFI_TYPE_STRUCT in memory, + that address is passed in r0 to the function. */ + if (flags == ARM_TYPE_STRUCT) + { + *(void **) regp = rvalue; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *a = avalue[i]; + int is_vfp_type = vfp_type_p (ty); + + /* Allocated in VFP registers. */ + if (vi < cif->vfp_nargs && is_vfp_type) + { + char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4; + ffi_put_arg (ty, a, vfp_slot); + continue; + } + /* Try allocating in core registers. */ + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + size_t size = ty->size; + size = (size < 4) ? 4 : size; // pad + /* Check if there is space left in the aligned register + area to place the argument. */ + if (tregp + size <= eo_regp) + { + regp = tregp + ffi_put_arg (ty, a, tregp); + done_with_regs = (regp == argp); + // ensure we did not write into the stack area + FFI_ASSERT (regp <= argp); + continue; + } + /* In case there are no arguments in the stack area yet, + the argument is passed in the remaining core registers + and on the stack. */ + else if (!stack_used) + { + stack_used = 1; + done_with_regs = 1; + argp = tregp + ffi_put_arg (ty, a, tregp); + FFI_ASSERT (eo_regp < argp); + continue; + } + } + /* Base case, arguments are passed on the stack */ + stack_used = 1; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, a, argp); + } +} + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int flags = 0, cabi = cif->abi; + size_t bytes = cif->bytes; + + /* Map out the register placements of VFP register args. The VFP + hard-float calling conventions are slightly more sophisticated + than the base calling conventions, so we do it here instead of + in ffi_prep_args(). */ + if (cabi == FFI_VFP) + layout_vfp_args (cif); + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = ARM_TYPE_VOID; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + flags = ARM_TYPE_INT; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = ARM_TYPE_INT64; + break; + + case FFI_TYPE_FLOAT: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT); + break; + case FFI_TYPE_DOUBLE: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64); + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + if (cabi == FFI_VFP) + { + int h = vfp_type_p (cif->rtype); + + flags = ARM_TYPE_VFP_N; + if (h == 0x100 + FFI_TYPE_FLOAT) + flags = ARM_TYPE_VFP_S; + if (h == 0x100 + FFI_TYPE_DOUBLE) + flags = ARM_TYPE_VFP_D; + if (h != 0) + break; + } + + /* A Composite Type not larger than 4 bytes is returned in r0. + A Composite Type larger than 4 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + if (cif->rtype->size <= 4) + flags = ARM_TYPE_INT; + else + { + flags = ARM_TYPE_STRUCT; + bytes += 4; + } + break; + + default: + abort(); + } + + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't harm anything + when it isn't needed. */ + bytes = FFI_ALIGN (bytes, 8); + + /* Minimum stack space is the 4 register arguments that we pop. */ + if (bytes < 4*4) + bytes = 4*4; + + cif->bytes = bytes; + cif->flags = flags; + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif * cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + /* VFP variadic calls actually use the SYSV ABI */ + if (cif->abi == FFI_VFP) + cif->abi = FFI_SYSV; + + return ffi_prep_cif_machdep (cif); +} + +/* Prototypes for assembly functions, in sysv.S. */ + +struct call_frame +{ + void *fp; + void *lr; + void *rvalue; + int flags; + void *closure; +}; + +extern void ffi_call_SYSV (void *stack, struct call_frame *, + void (*fn) (void)) FFI_HIDDEN; +extern void ffi_call_VFP (void *vfp_space, struct call_frame *, + void (*fn) (void), unsigned vfp_used) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + int flags = cif->flags; + ffi_type *rtype = cif->rtype; + size_t bytes, rsize, vfp_size; + char *stack, *vfp_space, *new_rvalue; + struct call_frame *frame; + + rsize = 0; + if (rvalue == NULL) + { + /* If the return value is a struct and we don't have a return + value address then we need to make one. Otherwise the return + value is in registers and we can ignore them. */ + if (flags == ARM_TYPE_STRUCT) + rsize = rtype->size; + else + flags = ARM_TYPE_VOID; + } + else if (flags == ARM_TYPE_VFP_N) + { + /* Largest case is double x 4. */ + rsize = 32; + } + else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT) + rsize = 4; + + /* Largest case. */ + vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0); + + bytes = cif->bytes; + stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize); + + vfp_space = NULL; + if (vfp_size) + { + vfp_space = stack; + stack += vfp_size; + } + + frame = (struct call_frame *)(stack + bytes); + + new_rvalue = rvalue; + if (rsize) + new_rvalue = (void *)(frame + 1); + + frame->rvalue = new_rvalue; + frame->flags = flags; + frame->closure = closure; + + if (vfp_space) + { + ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space); + ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used); + } + else + { + ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack); + ffi_call_SYSV (stack, frame, fn); + } + + if (rvalue && rvalue != new_rvalue) + memcpy (rvalue, new_rvalue, rtype->size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +static void * +ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue, + char *argp, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + else + { + if (cif->rtype->size && cif->rtype->size < 4) + *(uint32_t *) rvalue = 0; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +static void * +ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack, + char *vfp_space, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char done_with_regs = 0; + char stack_used = 0; + + regp = stack; + eo_regp = argp = regp + 16; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) regp; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + int is_vfp_type = vfp_type_p (ty); + size_t z = ty->size; + + if (vi < cif->vfp_nargs && is_vfp_type) + { + avalue[i] = vfp_space + cif->vfp_args[vi++] * 4; + continue; + } + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + + z = (z < 4) ? 4 : z; // pad + + /* If the arguments either fits into the registers or uses registers + and stack, while we haven't read other things from the stack */ + if (tregp + z <= eo_regp || !stack_used) + { + /* Because we're little endian, this is what it turns into. */ + avalue[i] = (void *) tregp; + regp = tregp + z; + + /* If we read past the last core register, make sure we + have not read from the stack before and continue + reading after regp. */ + if (regp > eo_regp) + { + FFI_ASSERT (!stack_used); + argp = regp; + } + if (regp >= eo_regp) + { + done_with_regs = 1; + stack_used = 1; + } + continue; + } + } + + stack_used = 1; + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +struct closure_frame +{ + char vfp_space[8*8] __attribute__((aligned(8))); + char result[8*4]; + char argp[]; +}; + +int FFI_HIDDEN +ffi_closure_inner_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result, + frame->argp, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +int FFI_HIDDEN +ffi_closure_inner_VFP (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp, + frame->vfp_space, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +void ffi_closure_SYSV (void) FFI_HIDDEN; +void ffi_closure_VFP (void) FFI_HIDDEN; +void ffi_go_closure_SYSV (void) FFI_HIDDEN; +void ffi_go_closure_VFP (void) FFI_HIDDEN; + +/* the cif must already be prep'ed */ + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, + ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + void (*closure_func) (void) = ffi_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + +#if FFI_EXEC_TRAMPOLINE_TABLE + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = closure_func; +#else + +#ifndef _M_ARM + memcpy(closure->tramp, ffi_arm_trampoline, 8); +#else + // cast away function type so MSVC doesn't set the lower bit of the function pointer + memcpy(closure->tramp, (void*)((uintptr_t)ffi_arm_trampoline & 0xFFFFFFFE), FFI_TRAMPOLINE_CLOSURE_OFFSET); +#endif + +#if defined (__QNX__) + msync(closure->tramp, 8, 0x1000000); /* clear data map */ + msync(codeloc, 8, 0x1000000); /* clear insn map */ +#elif defined(_MSC_VER) + FlushInstructionCache(GetCurrentProcess(), closure->tramp, FFI_TRAMPOLINE_SIZE); +#else + __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */ + __clear_cache(codeloc, codeloc + 8); /* clear insn map */ +#endif +#ifdef _M_ARM + *(void(**)(void))(closure->tramp + FFI_TRAMPOLINE_CLOSURE_FUNCTION) = closure_func; +#else + *(void (**)(void))(closure->tramp + 8) = closure_func; +#endif +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + void (*closure_func) (void) = ffi_go_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_go_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = closure_func; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Below are routines for VFP hard-float support. */ + +/* A subroutine of vfp_type_p. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of vfp_type_p. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY is an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is a floating point scalar. + + Returns non-zero iff TY is an HFA. The result is an encoded value where + bits 0-7 contain the type code, and bits 8-10 contain the element count. */ + +static int +vfp_type_p (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (ty->type) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE) + return 0; + ele_count = 2; + goto done; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */ + size = ty->size; + if (size < 4 || size > 32) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return (ele_count << 8) | candidate; +} + +static int +place_vfp_arg (ffi_cif *cif, int h) +{ + unsigned short reg = cif->vfp_reg_free; + int align = 1, nregs = h >> 8; + + if ((h & 0xff) == FFI_TYPE_DOUBLE) + align = 2, nregs *= 2; + + /* Align register number. */ + if ((reg & 1) && align == 2) + reg++; + + while (reg + nregs <= 16) + { + int s, new_used = 0; + for (s = reg; s < reg + nregs; s++) + { + new_used |= (1 << s); + if (cif->vfp_used & (1 << s)) + { + reg += align; + goto next_reg; + } + } + /* Found regs to allocate. */ + cif->vfp_used |= new_used; + cif->vfp_args[cif->vfp_nargs++] = (signed char)reg; + + /* Update vfp_reg_free. */ + if (cif->vfp_used & (1 << cif->vfp_reg_free)) + { + reg += nregs; + while (cif->vfp_used & (1 << reg)) + reg += 1; + cif->vfp_reg_free = reg; + } + return 0; + next_reg:; + } + // done, mark all regs as used + cif->vfp_reg_free = 16; + cif->vfp_used = 0xFFFF; + return 1; +} + +static void +layout_vfp_args (ffi_cif * cif) +{ + unsigned int i; + /* Init VFP fields */ + cif->vfp_used = 0; + cif->vfp_nargs = 0; + cif->vfp_reg_free = 0; + memset (cif->vfp_args, -1, 16); /* Init to -1. */ + + for (i = 0; i < cif->nargs; i++) + { + int h = vfp_type_p (cif->arg_types[i]); + if (h && place_vfp_arg (cif, h) == 1) + break; + } +} + +#endif /* __arm__ or _M_ARM */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/ffitarget.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h new file mode 100644 index 0000000..6cf0b2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/internal.h @@ -0,0 +1,7 @@ +#define ARM_TYPE_VFP_S 0 +#define ARM_TYPE_VFP_D 1 +#define ARM_TYPE_VFP_N 2 +#define ARM_TYPE_INT64 3 +#define ARM_TYPE_INT 4 +#define ARM_TYPE_VOID 5 +#define ARM_TYPE_STRUCT 6 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S new file mode 100644 index 0000000..63180a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv.S @@ -0,0 +1,385 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __arm__ +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6M__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +/* Conditionally compile unwinder directives. */ +#ifdef __ARM_EABI__ +# define UNWIND(...) __VA_ARGS__ +#else +# define UNWIND(...) +#endif + +#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__) + .cfi_sections .debug_frame +#endif + +#define CONCAT(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +#ifdef __USER_LABEL_PREFIX__ +# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X) +#else +# define CNAME(X) X +#endif +#ifdef __ELF__ +# define SIZE(X) .size CNAME(X), . - CNAME(X) +# define TYPE(X, Y) .type CNAME(X), Y +#else +# define SIZE(X) +# define TYPE(X, Y) +#endif + +#define ARM_FUNC_START_LOCAL(name) \ + .align 3; \ + TYPE(CNAME(name), %function); \ + CNAME(name): + +#define ARM_FUNC_START(name) \ + .globl CNAME(name); \ + FFI_HIDDEN(CNAME(name)); \ + ARM_FUNC_START_LOCAL(name) + +#define ARM_FUNC_END(name) \ + SIZE(name) + +/* Aid in defining a jump table with 8 bytes between entries. */ +/* ??? The clang assembler doesn't handle .if with symbolic expressions. */ +#ifdef __clang__ +# define E(index) +#else +# define E(index) \ + .if . - 0b - 8*index; \ + .error "type table out of sync"; \ + .endif +#endif + + .text + .syntax unified + .arm + +#ifndef __clang__ + /* We require interworking on LDM, which implies ARMv5T, + which implies the existance of BLX. */ + .arch armv5t +#endif + + /* Note that we use STC and LDC to encode VFP instructions, + so that we do not need ".fpu vfp", nor get that added to + the object file attributes. These will not be executed + unless the FFI_VFP abi is used. */ + + @ r0: stack + @ r1: frame + @ r2: fn + @ r3: vfp_used + +ARM_FUNC_START(ffi_call_VFP) + UNWIND(.fnstart) + cfi_startproc + + cmp r3, #3 @ load only d0 if possible +#ifdef __clang__ + vldrle d0, [sp] + vldmgt sp, {d0-d7} +#else + ldcle p11, cr0, [r0] @ vldrle d0, [sp] + ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7} +#endif + add r0, r0, #64 @ discard the vfp register args + /* FALLTHRU */ +ARM_FUNC_END(ffi_call_VFP) + +ARM_FUNC_START(ffi_call_SYSV) + stm r1, {fp, lr} + mov fp, r1 + + @ This is a bit of a lie wrt the origin of the unwind info, but + @ now we've got the usual frame pointer and two saved registers. + UNWIND(.save {fp,lr}) + UNWIND(.setfp fp, sp) + cfi_def_cfa(fp, 8) + cfi_rel_offset(fp, 0) + cfi_rel_offset(lr, 4) + + mov sp, r0 @ install the stack pointer + mov lr, r2 @ move the fn pointer out of the way + ldr ip, [fp, #16] @ install the static chain + ldmia sp!, {r0-r3} @ move first 4 parameters in registers. + blx lr @ call fn + + @ Load r2 with the pointer to storage for the return value + @ Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + @ Deallocate the stack with the arguments. + mov sp, fp + cfi_def_cfa_register(sp) + + @ Store values stored in registers. + .align 3 + add pc, pc, r3, lsl #3 + nop +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vstr s0, [r2] +#else + stc p10, cr0, [r2] @ vstr s0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vstr d0, [r2] +#else + stc p11, cr0, [r2] @ vstr d0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vstm r2, {d0-d3} +#else + stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3} +#endif + pop {fp,pc} +E(ARM_TYPE_INT64) + str r1, [r2, #4] + nop +E(ARM_TYPE_INT) + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID) + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT) + pop {fp,pc} + + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_call_SYSV) + + +/* + int ffi_closure_inner_* (cif, fun, user_data, frame) +*/ + +ARM_FUNC_START(ffi_go_closure_SYSV) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_SYSV) + +ARM_FUNC_START(ffi_closure_SYSV) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 @ compute entry sp + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) + stmdb sp!, {ip,lr} + + /* Remember that EABI unwind info only applies at call sites. + We need do nothing except note the save of the stack pointer + and the link registers. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_SYSV) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_SYSV) + +ARM_FUNC_START(ffi_go_closure_VFP) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_VFP) + +ARM_FUNC_START(ffi_closure_VFP) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) +#ifdef __clang__ + vstm sp, {d0-d7} +#else + stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7} +#endif + stmdb sp!, {ip,lr} + + /* See above. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_VFP) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_VFP) + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + +ARM_FUNC_START_LOCAL(ffi_closure_ret) + cfi_startproc + cfi_rel_offset(sp, 0) + cfi_rel_offset(lr, 4) +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vldr s0, [r2] +#else + ldc p10, cr0, [r2] @ vldr s0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vldr d0, [r2] +#else + ldc p11, cr0, [r2] @ vldr d0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vldm r2, {d0-d3} +#else + ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3} +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_INT64) + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT) + ldr r0, [r2] + ldm sp, {sp,pc} +E(ARM_TYPE_VOID) + ldm sp, {sp,pc} + nop +E(ARM_TYPE_STRUCT) + ldm sp, {sp,pc} + cfi_endproc +ARM_FUNC_END(ffi_closure_ret) + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + +.align PAGE_MAX_SHIFT +ARM_FUNC_START(ffi_closure_trampoline_table_page) +.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page + sub ip, #8 @ account for pc bias + ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP +.endr +ARM_FUNC_END(ffi_closure_trampoline_table_page) +#endif + +#else + +ARM_FUNC_START(ffi_arm_trampoline) +0: adr ip, 0b + ldr pc, 1f +1: .long 0 +ARM_FUNC_END(ffi_arm_trampoline) + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ +#endif /* __arm__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S new file mode 100644 index 0000000..5c99d02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2019 Microsoft Corporation. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" +#include "ksarm.h" + + + ; 8 byte aligned AREA to support 8 byte aligned jump tables + MACRO + NESTED_ENTRY_FFI $FuncName, $AreaName, $ExceptHandler + + ; compute the function's labels + __DeriveFunctionLabels $FuncName + + ; determine the area we will put the function into +__FuncArea SETS "|.text|" + IF "$AreaName" != "" +__FuncArea SETS "$AreaName" + ENDIF + + ; set up the exception handler itself +__FuncExceptionHandler SETS "" + IF "$ExceptHandler" != "" +__FuncExceptionHandler SETS "|$ExceptHandler|" + ENDIF + + ; switch to the specified area, jump tables require 8 byte alignment + AREA $__FuncArea,CODE,CODEALIGN,ALIGN=3,READONLY + + ; export the function name + __ExportProc $FuncName + + ; flush any pending literal pool stuff + ROUT + + ; reset the state of the unwind code tracking + __ResetUnwindState + + MEND + +; MACRO +; TABLE_ENTRY $Type, $Table +;$Type_$Table +; MEND + +#define E(index,table) return_##index##_##table + + ; r0: stack + ; r1: frame + ; r2: fn + ; r3: vfp_used + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_VFP + cmp r3, #3 ; load only d0 if possible + vldrle d0, [r0] + vldmgt r0, {d0-d7} + add r0, r0, #64 ; discard the vfp register args + b ffi_call_SYSV + NESTED_END ffi_call_VFP_fake + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_SYSV + stm r1, {fp, lr} + mov fp, r1 + + mov sp, r0 ; install the stack pointer + mov lr, r2 ; move the fn pointer out of the way + ldr ip, [fp, #16] ; install the static chain + ldmia sp!, {r0-r3} ; move first 4 parameters in registers. + blx lr ; call fn + + ; Load r2 with the pointer to storage for the return value + ; Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + ; Deallocate the stack with the arguments. + mov sp, fp + + ; Store values stored in registers. + ALIGN 8 + lsl r3, #3 + add r3, r3, pc + add r3, #8 + mov pc, r3 + + +E(ARM_TYPE_VFP_S, ffi_call) + ALIGN 8 + vstr s0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_D, ffi_call) + ALIGN 8 + vstr d0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_N, ffi_call) + ALIGN 8 + vstm r2, {d0-d3} + pop {fp,pc} +E(ARM_TYPE_INT64, ffi_call) + ALIGN 8 + str r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_call) + ALIGN 8 + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID, ffi_call) + ALIGN 8 + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT, ffi_call) + ALIGN 8 + cmp r3, #ARM_TYPE_STRUCT + pop {fp,pc} + NESTED_END ffi_call_SYSV_fake + + IMPORT |ffi_closure_inner_SYSV| + /* + int ffi_closure_inner_SYSV + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_SYSV + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_SYSV_0 + NESTED_END ffi_go_closure_SYSV + + ; r3: ffi_closure + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_closure_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + ALTERNATE_ENTRY ffi_closure_SYSV + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; ffi_closure->cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; ffi_closure->fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; ffi_closure->user_data + + ALTERNATE_ENTRY ffi_go_closure_SYSV_0 + add ip, sp, #16 ; compute entry sp + + sub sp, sp, #64+32 ; allocate frame parameter (sizeof(vfp_space) = 64, sizeof(result) = 32) + mov r3, sp ; set frame parameter + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_SYSV ; call the Python closure + + ; Load values returned in registers. + add r2, sp, #64+8 ; address of closure_frame->result + bl ffi_closure_ret ; move result to correct register or memory for type + + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_SYSV_fake + + IMPORT |ffi_closure_inner_VFP| + /* + int ffi_closure_inner_VFP + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_VFP + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_VFP_0 + NESTED_END ffi_go_closure_VFP + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + ; r3: closure + NESTED_ENTRY_FFI ffi_closure_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_closure_VFP + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; load user_data + + ALTERNATE_ENTRY ffi_go_closure_VFP_0 + add ip, sp, #16 ; compute entry sp + sub sp, sp, #32 ; save space for closure_frame->result + vstmdb sp!, {d0-d7} ; push closure_frame->vfp_space + + mov r3, sp ; save closure_frame + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_VFP + + ; Load values returned in registers. + add r2, sp, #64+8 ; load result + bl ffi_closure_ret + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_VFP_fake + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + + NESTED_ENTRY_FFI ffi_closure_ret + stmdb sp!, {fp,lr} + + ALIGN 8 + lsl r0, #3 + add r0, r0, pc + add r0, #8 + mov pc, r0 + +E(ARM_TYPE_VFP_S, ffi_closure) + ALIGN 8 + vldr s0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_D, ffi_closure) + ALIGN 8 + vldr d0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_N, ffi_closure) + ALIGN 8 + vldm r2, {d0-d3} + b call_epilogue +E(ARM_TYPE_INT64, ffi_closure) + ALIGN 8 + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_closure) + ALIGN 8 + ldr r0, [r2] + b call_epilogue +E(ARM_TYPE_VOID, ffi_closure) + ALIGN 8 + b call_epilogue + nop +E(ARM_TYPE_STRUCT, ffi_closure) + ALIGN 8 + b call_epilogue +call_epilogue + ldmfd sp!, {fp,pc} + NESTED_END ffi_closure_ret + + AREA |.trampoline|, DATA, THUMB, READONLY + EXPORT |ffi_arm_trampoline| +|ffi_arm_trampoline| DATA +thisproc adr ip, thisproc + stmdb sp!, {ip, r0} + ldr pc, [pc, #0] + DCD 0 + ;ENDP + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c new file mode 100644 index 0000000..3d43397 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffi.c @@ -0,0 +1,423 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include + +/* #define DEBUG */ + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned int, unsigned int, unsigned int*, unsigned int, + void (*fn)(void)); +extern void ffi_closure_SYSV (ffi_closure *); + +unsigned int pass_struct_on_stack(ffi_type *type) +{ + if(type->type != FFI_TYPE_STRUCT) + return 0; + + if(type->alignment < type->size && + !(type->size == 4 || type->size == 8) && + !(type->size == 8 && type->alignment >= 4)) + return 1; + + if(type->size == 3 || type->size == 5 || type->size == 6 || + type->size == 7) + return 1; + + return 0; +} + +/* ffi_prep_args is called by the assembly routine once stack space + * has been allocated for the function's arguments + * + * This is annoyingly complex since we need to keep track of used + * registers. + */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + char *reg_base = stack; + char *stack_base = stack + 20; + unsigned int stack_offset = 0; + unsigned int reg_mask = 0; + + p_argv = ecif->avalue; + + /* If cif->flags is struct then we know it's not passed in registers */ + if(ecif->cif->flags == FFI_TYPE_STRUCT) + { + *(void**)reg_base = ecif->rvalue; + reg_mask |= 1; + } + + for(i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + int type = (*p_arg)->type; + char *addr = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + else if(z == sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + addr = reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + addr = reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + addr = reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!addr) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + + if(type == FFI_TYPE_STRUCT && (*p_arg)->elements[1] == NULL) + type = (*p_arg)->elements[0]->type; + + switch(type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8 *)(*p_argv); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8 *)(*p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16 *)(*p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16 *)(*p_argv); + break; + default: + memcpy(addr, *p_argv, z); + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < 5; i++) + { + if((reg_mask & (1 << i)) == 0) + printf("r%d: (unused)\n", 12 - i); + else + printf("r%d: 0x%08x\n", 12 - i, ((unsigned int*)reg_base)[i]); + } + + for(i = 0; i < stack_offset / 4; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack_base)[i]); + } +#endif +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Round the stack up to a multiple of 8 bytes. This isn't needed + * everywhere, but it is on some platforms, and it doesn't harm + * anything when it isn't needed. */ + cif->bytes = (cif->bytes + 7) & ~7; + + /* Flag to indicate that he return value is in fact a struct */ + cif->rstruct_flag = 0; + + /* Set the return type flag */ + switch(cif->rtype->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + cif->flags = (unsigned)FFI_TYPE_UINT8; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = (unsigned)FFI_TYPE_UINT16; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + cif->flags = (unsigned)FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned)FFI_TYPE_UINT64; + break; + case FFI_TYPE_STRUCT: + cif->rstruct_flag = 1; + if(!pass_struct_on_stack(cif->rtype)) + { + if(cif->rtype->size <= 1) + cif->flags = (unsigned)FFI_TYPE_UINT8; + else if(cif->rtype->size <= 2) + cif->flags = (unsigned)FFI_TYPE_UINT16; + else if(cif->rtype->size <= 4) + cif->flags = (unsigned)FFI_TYPE_UINT32; + else if(cif->rtype->size <= 8) + cif->flags = (unsigned)FFI_TYPE_UINT64; + else + cif->flags = (unsigned)cif->rtype->type; + } + else + cif->flags = (unsigned)cif->rtype->type; + break; + default: + cif->flags = (unsigned)cif->rtype->type; + break; + } + + return FFI_OK; +} + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + unsigned int size = 0, i = 0; + ffi_type **p_arg; + + ecif.cif = cif; + ecif.avalue = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + /* If the return value is a struct and we don't have a return value + * address then we need to make one */ + + /* If cif->flags is struct then it's not suitable for registers */ + if((rvalue == NULL) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch(cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, size, cif->flags, + ecif.rvalue, cif->rstruct_flag, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif) +{ + register unsigned int i, reg_mask = 0; + register void **p_argv; + register ffi_type **p_arg; + register char *reg_base = stack; + register char *stack_base = stack + 20; + register unsigned int stack_offset = 0; + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs + 7; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack)[i]); + } +#endif + + /* If cif->flags is struct then we know it's not passed in registers */ + if(cif->flags == FFI_TYPE_STRUCT) + { + *rvalue = *(void **)reg_base; + reg_mask |= 1; + } + + p_argv = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + + *p_argv = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + else if(z <= sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + *p_argv = (void*)reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + *p_argv = (void*)reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + *p_argv = (void*)reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!*p_argv) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + + if((*p_arg)->type != FFI_TYPE_STRUCT || + (*p_arg)->elements[1] == NULL) + { + if(alignment == 1) + **(unsigned int**)p_argv <<= 24; + else if(alignment == 2) + **(unsigned int**)p_argv <<= 16; + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs; i++) + { + printf("sp+%d: 0x%08x\n", i*4, *(((unsigned int**)avalue)[i])); + } +#endif +} + +/* This function is jumped to by the trampoline */ + +unsigned int ffi_closure_SYSV_inner(ffi_closure *closure, void **respp, + void *args) +{ + ffi_cif *cif; + void **arg_area; + unsigned int i, size = 0; + ffi_type **p_arg; + + cif = closure->cif; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + arg_area = (void **)alloca(size); + + /* this call will initialize ARG_AREA, such that each element in that + * array points to the corresponding value on the stack; and if the + * function returns a structure, it will re-set RESP to point to the + * structure return address. */ + + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif); + + (closure->fun)(cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status ffi_prep_closure_loc(ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + unsigned char *__tramp = (unsigned char*)(&closure->tramp[0]); + unsigned int __fun = (unsigned int)(&ffi_closure_SYSV); + unsigned int __ctx = (unsigned int)(codeloc); + unsigned int __rstruct_flag = (unsigned int)(cif->rstruct_flag); + unsigned int __inner = (unsigned int)(&ffi_closure_SYSV_inner); + *(unsigned int*) &__tramp[0] = 0xebcd1f00; /* pushm r8-r12 */ + *(unsigned int*) &__tramp[4] = 0xfefc0010; /* ld.w r12, pc[16] */ + *(unsigned int*) &__tramp[8] = 0xfefb0010; /* ld.w r11, pc[16] */ + *(unsigned int*) &__tramp[12] = 0xfefa0010; /* ld.w r10, pc[16] */ + *(unsigned int*) &__tramp[16] = 0xfeff0010; /* ld.w pc, pc[16] */ + *(unsigned int*) &__tramp[20] = __ctx; + *(unsigned int*) &__tramp[24] = __rstruct_flag; + *(unsigned int*) &__tramp[28] = __inner; + *(unsigned int*) &__tramp[32] = __fun; + syscall(__NR_cacheflush, 0, (&__tramp[0]), 36); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h new file mode 100644 index 0000000..d0c7586 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/ffitarget.h @@ -0,0 +1,55 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2009 Bradley Smith + Target configuration macros for AVR32. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS unsigned int rstruct_flag + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 36 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S new file mode 100644 index 0000000..a984b3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/avr32/sysv.S @@ -0,0 +1,208 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + --------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* r12: ffi_prep_args + * r11: &ecif + * r10: size + * r9: cif->flags + * r8: ecif.rvalue + * sp+0: cif->rstruct_flag + * sp+4: fn */ + + .text + .align 1 + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + stm --sp, r0,r1,lr + stm --sp, r8-r12 + mov r0, sp + + /* Make room for all of the new args. */ + sub sp, r10 + /* Pad to make way for potential skipped registers */ + sub sp, 20 + + /* Call ffi_prep_args(stack, &ecif). */ + /* r11 already set */ + mov r1, r12 + mov r12, sp + icall r1 + + /* Save new argument size */ + mov r1, r12 + + /* Move first 5 parameters in registers. */ + ldm sp++, r8-r12 + + /* call (fn) (...). */ + ld.w r1, r0[36] + icall r1 + + /* Remove the space we pushed for the args. */ + mov sp, r0 + + /* Load r1 with the rstruct flag. */ + ld.w r1, sp[32] + + /* Load r9 with the return type code. */ + ld.w r9, sp[12] + + /* Load r8 with the return value pointer. */ + ld.w r8, sp[16] + + /* If the return value pointer is NULL, assume no return value. */ + cp.w r8, 0 + breq .Lend + + /* Check if return type is actually a struct */ + cp.w r1, 0 + breq 1f + + /* Return 8bit */ + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore8 + + /* Return 16bit */ + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore16 + +1: + /* Return 32bit */ + cp.w r9, FFI_TYPE_UINT32 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore32 + + /* Return 64bit */ + cp.w r9, FFI_TYPE_UINT64 + breq .Lstore64 + + /* Didn't match anything */ + bral .Lend + +.Lstore64: + st.w r8[0], r11 + st.w r8[4], r10 + bral .Lend + +.Lstore32: + st.w r8[0], r12 + bral .Lend + +.Lstore16: + st.h r8[0], r12 + bral .Lend + +.Lstore8: + st.b r8[0], r12 + bral .Lend + +.Lend: + sub sp, -20 + ldm sp++, r0,r1,pc + + .size ffi_call_SYSV, . - ffi_call_SYSV + + + /* r12: __ctx + * r11: __rstruct_flag + * r10: __inner */ + + .align 1 + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + stm --sp, r0,lr + mov r0, r11 + mov r8, r10 + sub r10, sp, -8 + sub sp, 12 + st.w sp[8], sp + sub r11, sp, -8 + icall r8 + + /* Check if return type is actually a struct */ + cp.w r0, 0 + breq 1f + + /* Return 8bit */ + cp.w r12, FFI_TYPE_UINT8 + breq .Lget8 + + /* Return 16bit */ + cp.w r12, FFI_TYPE_UINT16 + breq .Lget16 + +1: + /* Return 32bit */ + cp.w r12, FFI_TYPE_UINT32 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT16 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT8 + breq .Lget32 + + /* Return 64bit */ + cp.w r12, FFI_TYPE_UINT64 + breq .Lget64 + + /* Didn't match anything */ + bral .Lclend + +.Lget64: + ld.w r11, sp[0] + ld.w r10, sp[4] + bral .Lclend + +.Lget32: + ld.w r12, sp[0] + bral .Lclend + +.Lget16: + ld.uh r12, sp[0] + bral .Lclend + +.Lget8: + ld.ub r12, sp[0] + bral .Lclend + +.Lclend: + sub sp, -12 + ldm sp++, r0,lr + sub sp, -20 + mov pc, lr + + .size ffi_closure_SYSV, . - ffi_closure_SYSV + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c new file mode 100644 index 0000000..22a2acd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffi.c @@ -0,0 +1,196 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#include +#include + +#include +#include + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 3 + +/* + * Return types + */ +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 + +/*====================================================================*/ +/* PROTOTYPE * + /*====================================================================*/ +void ffi_prep_args(unsigned char *, extended_cif *); + +/*====================================================================*/ +/* Externals */ +/* (Assembly) */ +/*====================================================================*/ + +extern void ffi_call_SYSV(unsigned, extended_cif *, void(*)(unsigned char *, extended_cif *), unsigned, void *, void(*fn)(void)); + +/*====================================================================*/ +/* Implementation */ +/* */ +/*====================================================================*/ + + +/* + * This function calculates the return type (size) based on type. + */ + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* --------------------------------------* + * Return handling * + * --------------------------------------*/ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + cif->flags = FFIBFIN_RET_VOID; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + cif->flags = FFIBFIN_RET_HALFWORD; + break; + case FFI_TYPE_UINT8: + cif->flags = FFIBFIN_RET_BYTE; + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_SINT8: + cif->flags = FFIBFIN_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFIBFIN_RET_INT64; + break; + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4){ + cif->flags = FFIBFIN_RET_INT32; + }else if (cif->rtype->size == 8){ + cif->flags = FFIBFIN_RET_INT64; + }else{ + //it will return via a hidden pointer in P0 + cif->flags = FFIBFIN_RET_VOID; + } + break; + default: + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* + * This will prepare the arguments and will call the assembly routine + * cif = the call interface + * fn = the function to be called + * rvalue = the return value + * avalue = the arguments + */ +void ffi_call(ffi_cif *cif, void(*fn)(void), void *rvalue, void **avalue) +{ + int ret_type = cif->flags; + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(cif->bytes, &ecif, ffi_prep_args, ret_type, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +/* +* This function prepares the parameters (copies them from the ecif to the stack) +* to call the function (ffi_prep_args is called by the assembly routine in file +* sysv.S, which also calls the actual function) +*/ +void ffi_prep_args(unsigned char *stack, extended_cif *ecif) +{ + register unsigned int i = 0; + void **p_argv; + unsigned char *argp; + ffi_type **p_arg; + argp = stack; + p_argv = ecif->avalue; + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) { + size_t z; + z = (*p_arg)->size; + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) { + case FFI_TYPE_SINT8: { + signed char v = *(SINT8 *)(* p_argv); + signed int t = v; + *(signed int *) argp = t; + } + break; + case FFI_TYPE_UINT8: { + unsigned char v = *(UINT8 *)(* p_argv); + unsigned int t = v; + *(unsigned int *) argp = t; + } + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) * (SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) * (UINT16 *)(* p_argv); + break; + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + break; + } + } else if (z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int) * (UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + } +} + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h new file mode 100644 index 0000000..2175c01 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/ffitarget.h @@ -0,0 +1,43 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Alexandre K. I. de Mendonca + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S new file mode 100644 index 0000000..f4278be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/bfin/sysv.S @@ -0,0 +1,179 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text +.align 4 + + /* + There is a "feature" in the bfin toolchain that it puts a _ before function names + that's why the function here it's called _ffi_call_SYSV and not ffi_call_SYSV + */ + .global _ffi_call_SYSV; + .type _ffi_call_SYSV, STT_FUNC; + .func ffi_call_SYSV + + /* + cif->bytes = R0 (fp+8) + &ecif = R1 (fp+12) + ffi_prep_args = R2 (fp+16) + ret_type = stack (fp+20) + ecif.rvalue = stack (fp+24) + fn = stack (fp+28) + got (fp+32) + + There is room for improvement here (we can use temporary registers + instead of saving the values in the memory) + REGS: + P5 => Stack pointer (function arguments) + R5 => cif->bytes + R4 => ret->type + + FP-20 = P3 + FP-16 = SP (parameters area) + FP-12 = SP (temp) + FP-08 = function return part 1 [R0] + FP-04 = function return part 2 [R1] + */ + +_ffi_call_SYSV: +.prologue: + LINK 20; + [FP-20] = P3; + [FP+8] = R0; + [FP+12] = R1; + [FP+16] = R2; + +.allocate_stack: + //alocate cif->bytes into the stack + R1 = [FP+8]; + R0 = SP; + R0 = R0 - R1; + R1 = 4; + R0 = R0 - R1; + [FP-12] = SP; + SP = R0; + [FP-16] = SP; + +.call_prep_args: + //get the addr of prep_args + P0 = [P3 + _ffi_prep_args@FUNCDESC_GOT17M4]; + P1 = [P0]; + P3 = [P0+4]; + R0 = [FP-16];//SP (parameter area) + R1 = [FP+12];//ecif + call (P1); + +.call_user_function: + //ajust SP so as to allow the user function access the parameters on the stack + SP = [FP-16]; //point to function parameters + R0 = [SP]; + R1 = [SP+4]; + R2 = [SP+8]; + //load user function address + P0 = FP; + P0 +=28; + P1 = [P0]; + P1 = [P1]; + P3 = [P0+4]; + /* + For functions returning aggregate values (struct) occupying more than 8 bytes, + the caller allocates the return value object on the stack and the address + of this object is passed to the callee as a hidden argument in register P0. + */ + P0 = [FP+24]; + + call (P1); + SP = [FP-12]; +.compute_return: + P2 = [FP-20]; + [FP-8] = R0; + [FP-4] = R1; + + R0 = [FP+20]; + R1 = R0 << 2; + + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R1 = [P2]; + + P2 = [FP+-20]; + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R0 = [FP-8]; + R1 = [FP-4]; + jump (P2); + +/* +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 +*/ +.align 4 +.align 4 +.rettable: + .dd .epilogue - .rettable + .dd .rbyte - .rettable; + .dd .rhalfword - .rettable; + .dd .rint64 - .rettable; + .dd .rint32 - .rettable; + +.rbyte: + P0 = [FP+24]; + R0 = R0.B (Z); + [P0] = R0; + JUMP .epilogue +.rhalfword: + P0 = [FP+24]; + R0 = R0.L; + [P0] = R0; + JUMP .epilogue +.rint64: + P0 = [FP+24];// &rvalue + [P0] = R0; + [P0+4] = R1; + JUMP .epilogue +.rint32: + P0 = [FP+24]; + [P0] = R0; +.epilogue: + R0 = [FP+8]; + R1 = [FP+12]; + R2 = [FP+16]; + P3 = [FP-20]; + UNLINK; + RTS; + +.size _ffi_call_SYSV,.-_ffi_call_SYSV; +.endfunc diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c new file mode 100644 index 0000000..b5eb2bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/closures.c @@ -0,0 +1,1004 @@ +/* ----------------------------------------------------------------------- + closures.c - Copyright (c) 2019 Anthony Green + Copyright (c) 2007, 2009, 2010 Red Hat, Inc. + Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + Code to allocate and deallocate memory for closures. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined __linux__ && !defined _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif + +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if __NetBSD_Version__ - 0 >= 799007200 +/* NetBSD with PROT_MPROTECT */ +#include + +#include +#include + +static const size_t overhead = + (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ? + sizeof(max_align_t) + : sizeof(void *) + sizeof(size_t); + +#define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d))) + +void * +ffi_closure_alloc (size_t size, void **code) +{ + static size_t page_size; + size_t rounded_size; + void *codeseg, *dataseg; + int prot; + + /* Expect that PAX mprotect is active and a separate code mapping is necessary. */ + if (!code) + return NULL; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */ + rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1); + + /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */ + prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC); + dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0); + if (dataseg == MAP_FAILED) + return NULL; + + /* Create secondary mapping and switch it to RX. */ + codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP); + if (codeseg == MAP_FAILED) { + munmap(dataseg, rounded_size); + return NULL; + } + if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) { + munmap(codeseg, rounded_size); + munmap(dataseg, rounded_size); + return NULL; + } + + /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */ + memcpy(dataseg, &rounded_size, sizeof(rounded_size)); + memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *)); + *code = ADD_TO_POINTER(codeseg, overhead); + return ADD_TO_POINTER(dataseg, overhead); +} + +void +ffi_closure_free (void *ptr) +{ + void *codeseg, *dataseg; + size_t rounded_size; + + dataseg = ADD_TO_POINTER(ptr, -overhead); + memcpy(&rounded_size, dataseg, sizeof(rounded_size)); + memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); + munmap(dataseg, rounded_size); + munmap(codeseg, rounded_size); +} +#else /* !NetBSD with PROT_MPROTECT */ + +#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE +# if __linux__ && !defined(__ANDROID__) +/* This macro indicates it may be forbidden to map anonymous memory + with both write and execute permission. Code compiled when this + option is defined will attempt to map such pages once, but if it + fails, it falls back to creating a temporary file in a writable and + executable filesystem and mapping pages from it into separate + locations in the virtual memory space, one location writable and + another executable. */ +# define FFI_MMAP_EXEC_WRIT 1 +# define HAVE_MNTENT 1 +# endif +# if defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__) +/* Windows systems may have Data Execution Protection (DEP) enabled, + which requires the use of VirtualMalloc/VirtualFree to alloc/free + executable memory. */ +# define FFI_MMAP_EXEC_WRIT 1 +# endif +#endif + +#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX +# if defined(__linux__) && !defined(__ANDROID__) +/* When defined to 1 check for SELinux and if SELinux is active, + don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that + might cause audit messages. */ +# define FFI_MMAP_EXEC_SELINUX 1 +# endif +#endif + +#if FFI_CLOSURES + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ + +#include +#include +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#include + +extern void *ffi_closure_trampoline_table_page; + +typedef struct ffi_trampoline_table ffi_trampoline_table; +typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry; + +struct ffi_trampoline_table +{ + /* contiguous writable and executable pages */ + vm_address_t config_page; + vm_address_t trampoline_page; + + /* free list tracking */ + uint16_t free_count; + ffi_trampoline_table_entry *free_list; + ffi_trampoline_table_entry *free_list_pool; + + ffi_trampoline_table *prev; + ffi_trampoline_table *next; +}; + +struct ffi_trampoline_table_entry +{ + void *(*trampoline) (void); + ffi_trampoline_table_entry *next; +}; + +/* Total number of trampolines that fit in one trampoline table */ +#define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE) + +static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER; +static ffi_trampoline_table *ffi_trampoline_tables = NULL; + +static ffi_trampoline_table * +ffi_trampoline_table_alloc (void) +{ + ffi_trampoline_table *table; + vm_address_t config_page; + vm_address_t trampoline_page; + vm_address_t trampoline_page_template; + vm_prot_t cur_prot; + vm_prot_t max_prot; + kern_return_t kt; + uint16_t i; + + /* Allocate two pages -- a config page and a placeholder page */ + config_page = 0x0; + kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, + VM_FLAGS_ANYWHERE); + if (kt != KERN_SUCCESS) + return NULL; + + /* Remap the trampoline table on top of the placeholder page */ + trampoline_page = config_page + PAGE_MAX_SIZE; + trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; +#ifdef __arm__ + /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ + trampoline_page_template &= ~1UL; +#endif + kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, + VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, + FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); + if (kt != KERN_SUCCESS) + { + vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); + return NULL; + } + + /* We have valid trampoline and config pages */ + table = calloc (1, sizeof (ffi_trampoline_table)); + table->free_count = FFI_TRAMPOLINE_COUNT; + table->config_page = config_page; + table->trampoline_page = trampoline_page; + + /* Create and initialize the free list */ + table->free_list_pool = + calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); + + for (i = 0; i < table->free_count; i++) + { + ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; + entry->trampoline = + (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); + + if (i < table->free_count - 1) + entry->next = &table->free_list_pool[i + 1]; + } + + table->free_list = table->free_list_pool; + + return table; +} + +static void +ffi_trampoline_table_free (ffi_trampoline_table *table) +{ + /* Remove from the list */ + if (table->prev != NULL) + table->prev->next = table->next; + + if (table->next != NULL) + table->next->prev = table->prev; + + /* Deallocate pages */ + vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2); + + /* Deallocate free list */ + free (table->free_list_pool); + free (table); +} + +void * +ffi_closure_alloc (size_t size, void **code) +{ + /* Create the closure */ + ffi_closure *closure = malloc (size); + if (closure == NULL) + return NULL; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Check for an active trampoline table with available entries. */ + ffi_trampoline_table *table = ffi_trampoline_tables; + if (table == NULL || table->free_list == NULL) + { + table = ffi_trampoline_table_alloc (); + if (table == NULL) + { + pthread_mutex_unlock (&ffi_trampoline_lock); + free (closure); + return NULL; + } + + /* Insert the new table at the top of the list */ + table->next = ffi_trampoline_tables; + if (table->next != NULL) + table->next->prev = table; + + ffi_trampoline_tables = table; + } + + /* Claim the free entry */ + ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list; + ffi_trampoline_tables->free_list = entry->next; + ffi_trampoline_tables->free_count--; + entry->next = NULL; + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Initialize the return values */ + *code = entry->trampoline; +#ifdef HAVE_PTRAUTH + *code = ptrauth_sign_unauthenticated (*code, ptrauth_key_asia, 0); +#endif + closure->trampoline_table = table; + closure->trampoline_table_entry = entry; + + return closure; +} + +void +ffi_closure_free (void *ptr) +{ + ffi_closure *closure = ptr; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Fetch the table and entry references */ + ffi_trampoline_table *table = closure->trampoline_table; + ffi_trampoline_table_entry *entry = closure->trampoline_table_entry; + + /* Return the entry to the free list */ + entry->next = table->free_list; + table->free_list = entry; + table->free_count++; + + /* If all trampolines within this table are free, and at least one other table exists, deallocate + * the table */ + if (table->free_count == FFI_TRAMPOLINE_COUNT + && ffi_trampoline_tables != table) + { + ffi_trampoline_table_free (table); + } + else if (ffi_trampoline_tables != table) + { + /* Otherwise, bump this table to the top of the list */ + table->prev = NULL; + table->next = ffi_trampoline_tables; + if (ffi_trampoline_tables != NULL) + ffi_trampoline_tables->prev = table; + + ffi_trampoline_tables = table; + } + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Free the closure */ + free (closure); +} + +#endif + +// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations. + +#elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */ + +#define USE_LOCKS 1 +#define USE_DL_PREFIX 1 +#ifdef __GNUC__ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 1 +#endif +#endif + +/* We need to use mmap, not sbrk. */ +#define HAVE_MORECORE 0 + +/* We could, in theory, support mremap, but it wouldn't buy us anything. */ +#define HAVE_MREMAP 0 + +/* We have no use for this, so save some code and data. */ +#define NO_MALLINFO 1 + +/* We need all allocations to be in regular segments, otherwise we + lose track of the corresponding code address. */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + +/* Don't allocate more than a page unless needed. */ +#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize) + +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#if !defined(X86_WIN32) && !defined(X86_WIN64) && !defined(_M_ARM64) +#ifdef HAVE_MNTENT +#include +#endif /* HAVE_MNTENT */ +#include +#include + +/* We don't want sys/mman.h to be included after we redefine mmap and + dlmunmap. */ +#include +#define LACKS_SYS_MMAN_H 1 + +#if FFI_MMAP_EXEC_SELINUX +#include +#include + +static int selinux_enabled = -1; + +static int +selinux_enabled_check (void) +{ + struct statfs sfs; + FILE *f; + char *buf = NULL; + size_t len = 0; + + if (statfs ("/selinux", &sfs) >= 0 + && (unsigned int) sfs.f_type == 0xf97cff8cU) + return 1; + f = fopen ("/proc/mounts", "r"); + if (f == NULL) + return 0; + while (getline (&buf, &len, f) >= 0) + { + char *p = strchr (buf, ' '); + if (p == NULL) + break; + p = strchr (p + 1, ' '); + if (p == NULL) + break; + if (strncmp (p + 1, "selinuxfs ", 10) == 0) + { + free (buf); + fclose (f); + return 1; + } + } + free (buf); + fclose (f); + return 0; +} + +#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \ + : (selinux_enabled = selinux_enabled_check ())) + +#else + +#define is_selinux_enabled() 0 + +#endif /* !FFI_MMAP_EXEC_SELINUX */ + +/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */ +#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX +#include + +static int emutramp_enabled = -1; + +static int +emutramp_enabled_check (void) +{ + char *buf = NULL; + size_t len = 0; + FILE *f; + int ret; + f = fopen ("/proc/self/status", "r"); + if (f == NULL) + return 0; + ret = 0; + + while (getline (&buf, &len, f) != -1) + if (!strncmp (buf, "PaX:", 4)) + { + char emutramp; + if (sscanf (buf, "%*s %*c%c", &emutramp) == 1) + ret = (emutramp == 'E'); + break; + } + free (buf); + fclose (f); + return ret; +} + +#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \ + : (emutramp_enabled = emutramp_enabled_check ())) +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +#elif defined (__CYGWIN__) || defined(__INTERIX) + +#include + +/* Cygwin is Linux-like, but not quite that Linux-like. */ +#define is_selinux_enabled() 0 + +#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */ + +#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX +#define is_emutramp_enabled() 0 +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Declare all functions defined in dlmalloc.c as static. */ +static void *dlmalloc(size_t); +static void dlfree(void*); +static void *dlcalloc(size_t, size_t) MAYBE_UNUSED; +static void *dlrealloc(void *, size_t) MAYBE_UNUSED; +static void *dlmemalign(size_t, size_t) MAYBE_UNUSED; +static void *dlvalloc(size_t) MAYBE_UNUSED; +static int dlmallopt(int, int) MAYBE_UNUSED; +static size_t dlmalloc_footprint(void) MAYBE_UNUSED; +static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED; +static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED; +static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED; +static void *dlpvalloc(size_t) MAYBE_UNUSED; +static int dlmalloc_trim(size_t) MAYBE_UNUSED; +static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED; +static void dlmalloc_stats(void) MAYBE_UNUSED; + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) +/* Use these for mmap and munmap within dlmalloc.c. */ +static void *dlmmap(void *, size_t, int, int, int, off_t); +static int dlmunmap(void *, size_t); +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +#define mmap dlmmap +#define munmap dlmunmap + +#include "dlmalloc.c" + +#undef mmap +#undef munmap + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) + +/* A mutex used to synchronize access to *exec* variables in this file. */ +static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* A file descriptor of a temporary file from which we'll map + executable pages. */ +static int execfd = -1; + +/* The amount of space already allocated from the temporary file. */ +static size_t execsize = 0; + +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_name (char *name, int flags) +{ + int fd; + +#ifdef HAVE_MKOSTEMP + fd = mkostemp (name, flags); +#else + fd = mkstemp (name); +#endif + + if (fd != -1) + unlink (name); + + return fd; +} + +/* Open a temporary file in the named directory. */ +static int +open_temp_exec_file_dir (const char *dir) +{ + static const char suffix[] = "/ffiXXXXXX"; + int lendir, flags; + char *tempname; +#ifdef O_TMPFILE + int fd; +#endif + +#ifdef O_CLOEXEC + flags = O_CLOEXEC; +#else + flags = 0; +#endif + +#ifdef O_TMPFILE + fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700); + /* If the running system does not support the O_TMPFILE flag then retry without it. */ + if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) { + return fd; + } else { + errno = 0; + } +#endif + + lendir = (int) strlen (dir); + tempname = __builtin_alloca (lendir + sizeof (suffix)); + + if (!tempname) + return -1; + + memcpy (tempname, dir, lendir); + memcpy (tempname + lendir, suffix, sizeof (suffix)); + + return open_temp_exec_file_name (tempname, flags); +} + +/* Open a temporary file in the directory in the named environment + variable. */ +static int +open_temp_exec_file_env (const char *envvar) +{ + const char *value = getenv (envvar); + + if (!value) + return -1; + + return open_temp_exec_file_dir (value); +} + +#ifdef HAVE_MNTENT +/* Open a temporary file in an executable and writable mount point + listed in the mounts file. Subsequent calls with the same mounts + keep searching for mount points in the same file. Providing NULL + as the mounts file closes the file. */ +static int +open_temp_exec_file_mnt (const char *mounts) +{ + static const char *last_mounts; + static FILE *last_mntent; + + if (mounts != last_mounts) + { + if (last_mntent) + endmntent (last_mntent); + + last_mounts = mounts; + + if (mounts) + last_mntent = setmntent (mounts, "r"); + else + last_mntent = NULL; + } + + if (!last_mntent) + return -1; + + for (;;) + { + int fd; + struct mntent mnt; + char buf[MAXPATHLEN * 3]; + + if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL) + return -1; + + if (hasmntopt (&mnt, "ro") + || hasmntopt (&mnt, "noexec") + || access (mnt.mnt_dir, W_OK)) + continue; + + fd = open_temp_exec_file_dir (mnt.mnt_dir); + + if (fd != -1) + return fd; + } +} +#endif /* HAVE_MNTENT */ + +/* Instructions to look for a location to hold a temporary file that + can be mapped in for execution. */ +static struct +{ + int (*func)(const char *); + const char *arg; + int repeat; +} open_temp_exec_file_opts[] = { + { open_temp_exec_file_env, "TMPDIR", 0 }, + { open_temp_exec_file_dir, "/tmp", 0 }, + { open_temp_exec_file_dir, "/var/tmp", 0 }, + { open_temp_exec_file_dir, "/dev/shm", 0 }, + { open_temp_exec_file_env, "HOME", 0 }, +#ifdef HAVE_MNTENT + { open_temp_exec_file_mnt, "/etc/mtab", 1 }, + { open_temp_exec_file_mnt, "/proc/mounts", 1 }, +#endif /* HAVE_MNTENT */ +}; + +/* Current index into open_temp_exec_file_opts. */ +static int open_temp_exec_file_opts_idx = 0; + +/* Reset a current multi-call func, then advances to the next entry. + If we're at the last, go back to the first and return nonzero, + otherwise return zero. */ +static int +open_temp_exec_file_opts_next (void) +{ + if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL); + + open_temp_exec_file_opts_idx++; + if (open_temp_exec_file_opts_idx + == (sizeof (open_temp_exec_file_opts) + / sizeof (*open_temp_exec_file_opts))) + { + open_temp_exec_file_opts_idx = 0; + return 1; + } + + return 0; +} + +/* Return a file descriptor of a temporary zero-sized file in a + writable and executable filesystem. */ +static int +open_temp_exec_file (void) +{ + int fd; + + do + { + fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func + (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg); + + if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat + || fd == -1) + { + if (open_temp_exec_file_opts_next ()) + break; + } + } + while (fd == -1); + + return fd; +} + +/* We need to allocate space in a file that will be backing a writable + mapping. Several problems exist with the usual approaches: + - fallocate() is Linux-only + - posix_fallocate() is not available on all platforms + - ftruncate() does not allocate space on filesystems with sparse files + Failure to allocate the space will cause SIGBUS to be thrown when + the mapping is subsequently written to. */ +static int +allocate_space (int fd, off_t offset, off_t len) +{ + static size_t page_size; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + unsigned char buf[page_size]; + memset (buf, 0, page_size); + + while (len > 0) + { + off_t to_write = (len < page_size) ? len : page_size; + if (write (fd, buf, to_write) < to_write) + return -1; + len -= to_write; + } + + return 0; +} + +/* Map in a chunk of memory from the temporary exec file into separate + locations in the virtual memory address space, one writable and one + executable. Returns the address of the writable portion, after + storing an offset to the corresponding executable portion at the + last word of the requested chunk. */ +static void * +dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset) +{ + void *ptr; + + if (execfd == -1) + { + open_temp_exec_file_opts_idx = 0; + retry_open: + execfd = open_temp_exec_file (); + if (execfd == -1) + return MFAIL; + } + + offset = execsize; + + if (allocate_space (execfd, offset, length)) + return MFAIL; + + flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS); + flags |= MAP_SHARED; + + ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC, + flags, execfd, offset); + if (ptr == MFAIL) + { + if (!offset) + { + close (execfd); + goto retry_open; + } + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + + return MFAIL; + } + else if (!offset + && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts_next (); + + start = mmap (start, length, prot, flags, execfd, offset); + + if (start == MFAIL) + { + munmap (ptr, length); + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + return start; + } + + mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start; + + execsize += length; + + return start; +} + +/* Map in a writable and executable chunk of memory if possible. + Failing that, fall back to dlmmap_locked. */ +static void * +dlmmap (void *start, size_t length, int prot, + int flags, int fd, off_t offset) +{ + void *ptr; + + assert (start == NULL && length % malloc_getpagesize == 0 + && prot == (PROT_READ | PROT_WRITE) + && flags == (MAP_PRIVATE | MAP_ANONYMOUS) + && fd == -1 && offset == 0); + + if (execfd == -1 && is_emutramp_enabled ()) + { + ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset); + return ptr; + } + + if (execfd == -1 && !is_selinux_enabled ()) + { + ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset); + + if (ptr != MFAIL || (errno != EPERM && errno != EACCES)) + /* Cool, no need to mess with separate segments. */ + return ptr; + + /* If MREMAP_DUP is ever introduced and implemented, try mmap + with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with + MREMAP_DUP and prot at this point. */ + } + + if (execsize == 0 || execfd == -1) + { + pthread_mutex_lock (&open_temp_exec_file_mutex); + ptr = dlmmap_locked (start, length, prot, flags, offset); + pthread_mutex_unlock (&open_temp_exec_file_mutex); + + return ptr; + } + + return dlmmap_locked (start, length, prot, flags, offset); +} + +/* Release memory at the given address, as well as the corresponding + executable page if it's separate. */ +static int +dlmunmap (void *start, size_t length) +{ + /* We don't bother decreasing execsize or truncating the file, since + we can't quite tell whether we're unmapping the end of the file. + We don't expect frequent deallocation anyway. If we did, we + could locate pages in the file by writing to the pages being + deallocated and checking that the file contents change. + Yuck. */ + msegmentptr seg = segment_holding (gm, start); + void *code; + + if (seg && (code = add_segment_exec_offset (start, seg)) != start) + { + int ret = munmap (code, length); + if (ret) + return ret; + } + + return munmap (start, length); +} + +#if FFI_CLOSURE_FREE_CODE +/* Return segment holding given code address. */ +static msegmentptr +segment_holding_code (mstate m, char* addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= add_segment_exec_offset (sp->base, sp) + && addr < add_segment_exec_offset (sp->base, sp) + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} +#endif + +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(_M_ARM64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +/* Allocate a chunk of memory with the given size. Returns a pointer + to the writable address, and sets *CODE to the executable + corresponding virtual address. */ +void * +ffi_closure_alloc (size_t size, void **code) +{ + void *ptr; + + if (!code) + return NULL; + + ptr = FFI_CLOSURE_PTR (dlmalloc (size)); + + if (ptr) + { + msegmentptr seg = segment_holding (gm, ptr); + + *code = add_segment_exec_offset (ptr, seg); + } + + return ptr; +} + +void * +ffi_data_to_code_pointer (void *data) +{ + msegmentptr seg = segment_holding (gm, data); + /* We expect closures to be allocated with ffi_closure_alloc(), in + which case seg will be non-NULL. However, some users take on the + burden of managing this memory themselves, in which case this + we'll just return data. */ + if (seg) + return add_segment_exec_offset (data, seg); + else + return data; +} + +/* Release a chunk of memory allocated with ffi_closure_alloc. If + FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the + writable or the executable address given. Otherwise, only the + writable address can be provided here. */ +void +ffi_closure_free (void *ptr) +{ +#if FFI_CLOSURE_FREE_CODE + msegmentptr seg = segment_holding_code (gm, ptr); + + if (seg) + ptr = sub_segment_exec_offset (ptr, seg); +#endif + + dlfree (FFI_RESTORE_PTR (ptr)); +} + +# else /* ! FFI_MMAP_EXEC_WRIT */ + +/* On many systems, memory returned by malloc is writable and + executable, so just use it. */ + +#include + +void * +ffi_closure_alloc (size_t size, void **code) +{ + if (!code) + return NULL; + + return *code = FFI_CLOSURE_PTR (malloc (size)); +} + +void +ffi_closure_free (void *ptr) +{ + free (FFI_RESTORE_PTR (ptr)); +} + +void * +ffi_data_to_code_pointer (void *data) +{ + return data; +} + +# endif /* ! FFI_MMAP_EXEC_WRIT */ +#endif /* FFI_CLOSURES */ + +#endif /* NetBSD with PROT_MPROTECT */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c new file mode 100644 index 0000000..9011fde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffi.c @@ -0,0 +1,386 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998 Cygnus Solutions + Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + Copyright (C) 2007 Free Software Foundation, Inc. + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +static ffi_status +initialize_aggregate_packed_struct (ffi_type * arg) +{ + ffi_type **ptr; + + FFI_ASSERT (arg != NULL); + + FFI_ASSERT (arg->elements != NULL); + FFI_ASSERT (arg->size == 0); + FFI_ASSERT (arg->alignment == 0); + + ptr = &(arg->elements[0]); + + while ((*ptr) != NULL) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT (ffi_type_test ((*ptr))); + + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +int +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + unsigned int struct_count = 0; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + + switch ((*p_arg)->type) + { + case FFI_TYPE_STRUCT: + { + z = (*p_arg)->size; + if (z <= 4) + { + memcpy (argp, *p_argv, z); + z = 4; + } + else if (z <= 8) + { + memcpy (argp, *p_argv, z); + z = 8; + } + else + { + unsigned int uiLocOnStack; + z = sizeof (void *); + uiLocOnStack = 4 * ecif->cif->nargs + struct_count; + struct_count = struct_count + (*p_arg)->size; + *(unsigned int *) argp = + (unsigned int) (UINT32 *) (stack + uiLocOnStack); + memcpy ((stack + uiLocOnStack), *p_argv, (*p_arg)->size); + } + break; + } + default: + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = + (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = + (unsigned int) *(UINT16 *) (*p_argv); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else if (z == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + else + memcpy (argp, *p_argv, z); + break; + } + p_argv++; + argp += z; + } + + return (struct_count); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_core (ffi_cif * cif, + ffi_abi abi, unsigned int isvariadic, + unsigned int nfixedargs, unsigned int ntotalargs, + ffi_type * rtype, ffi_type ** atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT (cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + FFI_ASSERT (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI); + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; + + if ((cif->rtype->size == 0) + && (initialize_aggregate_packed_struct (cif->rtype) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (cif->rtype); + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (*ptr); + + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN (bytes, (*ptr)->alignment); + if ((*ptr)->type == FFI_TYPE_STRUCT) + { + if ((*ptr)->size > 8) + { + bytes += (*ptr)->size; + bytes += sizeof (void *); + } + else + { + if ((*ptr)->size > 4) + bytes += 8; + else + bytes += 4; + } + } + else + bytes += STACK_ARG_SIZE ((*ptr)->size); + } + + cif->bytes = bytes; + + return ffi_prep_cif_machdep (cif); +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV (int (*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, unsigned *, void (*fn) ()) + __attribute__ ((__visibility__ ("hidden"))); + +void +ffi_call (ffi_cif * cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT (0); + break; + } +} + +/* Because the following variables are not exported outside libffi, we + mark them hidden. */ + +/* Assembly code for the jump stub. */ +extern const char ffi_cris_trampoline_template[] + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + ffi_prep_closure_inner function. */ +extern const int ffi_cris_trampoline_fn_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + closure data. */ +extern const int ffi_cris_trampoline_closure_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* This function is sibling-called (jumped to) by the closure + trampoline. We get R10..R13 at PARAMS[0..3] and a copy of [SP] at + PARAMS[4] to simplify handling of a straddling parameter. A copy + of R9 is at PARAMS[5] and SP at PARAMS[6]. These parameters are + put at the appropriate place in CLOSURE which is then executed and + the return value is passed back to the caller. */ + +static unsigned long long +ffi_prep_closure_inner (void **params, ffi_closure* closure) +{ + char *register_args = (char *) params; + void *struct_ret = params[5]; + char *stack_args = params[6]; + char *ptr = register_args; + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + + /* Max room needed is number of arguments as 64-bit values. */ + void **avalue = alloca (closure->cif->nargs * sizeof(void *)); + int i; + int doing_regs; + long long llret = 0; + + /* Find the address of each argument. */ + for (i = 0, doing_regs = 1; i < cif->nargs; i++) + { + /* Types up to and including 8 bytes go by-value. */ + if (arg_types[i]->size <= 4) + { + avalue[i] = ptr; + ptr += 4; + } + else if (arg_types[i]->size <= 8) + { + avalue[i] = ptr; + ptr += 8; + } + else + { + FFI_ASSERT (arg_types[i]->type == FFI_TYPE_STRUCT); + + /* Passed by-reference, so copy the pointer. */ + avalue[i] = *(void **) ptr; + ptr += 4; + } + + /* If we've handled more arguments than fit in registers, start + looking at the those passed on the stack. Step over the + first one if we had a straddling parameter. */ + if (doing_regs && ptr >= register_args + 4*4) + { + ptr = stack_args + ((ptr > register_args + 4*4) ? 4 : 0); + doing_regs = 0; + } + } + + /* Invoke the closure. */ + (closure->fun) (cif, + + cif->rtype->type == FFI_TYPE_STRUCT + /* The caller allocated space for the return + structure, and passed a pointer to this space in + R9. */ + ? struct_ret + + /* We take advantage of being able to ignore that + the high part isn't set if the return value is + not in R10:R11, but in R10 only. */ + : (void *) &llret, + + avalue, closure->user_data); + + return llret; +} + +/* API function: Prepare the trampoline. */ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif *, void *, void **, void*), + void *user_data, + void *codeloc) +{ + void *innerfn = ffi_prep_closure_inner; + FFI_ASSERT (cif->abi == FFI_SYSV); + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + memcpy (closure->tramp, ffi_cris_trampoline_template, + FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE); + memcpy (closure->tramp + ffi_cris_trampoline_fn_offset, + &innerfn, sizeof (void *)); + memcpy (closure->tramp + ffi_cris_trampoline_closure_offset, + &codeloc, sizeof (void *)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h new file mode 100644 index 0000000..b837e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for CRIS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE 36 +#define FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE (7*4) +#define FFI_TRAMPOLINE_SIZE \ + (FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE + FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE) +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S new file mode 100644 index 0000000..79abaee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/cris/sysv.S @@ -0,0 +1,215 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#define CONCAT(x,y) x ## y +#define XCONCAT(x,y) CONCAT (x, y) +#define L(x) XCONCAT (__USER_LABEL_PREFIX__, x) + + .text + + ;; OK, when we get called we should have this (according to + ;; AXIS ETRAX 100LX Programmer's Manual chapter 6.3). + ;; + ;; R10: ffi_prep_args (func. pointer) + ;; R11: &ecif + ;; R12: cif->bytes + ;; R13: fig->flags + ;; sp+0: ecif.rvalue + ;; sp+4: fn (function pointer to the function that we need to call) + + .globl L(ffi_call_SYSV) + .type L(ffi_call_SYSV),@function + .hidden L(ffi_call_SYSV) + +L(ffi_call_SYSV): + ;; Save the regs to the stack. + push $srp + ;; Used for stack pointer saving. + push $r6 + ;; Used for function address pointer. + push $r7 + ;; Used for stack pointer saving. + push $r8 + ;; We save fig->flags to stack we will need them after we + ;; call The Function. + push $r13 + + ;; Saving current stack pointer. + move.d $sp,$r8 + move.d $sp,$r6 + + ;; Move address of ffi_prep_args to r13. + move.d $r10,$r13 + + ;; Make room on the stack for the args of fn. + sub.d $r12,$sp + + ;; Function void ffi_prep_args(char *stack, extended_cif *ecif) parameters are: + ;; r10 <-- stack pointer + ;; r11 <-- &ecif (already there) + move.d $sp,$r10 + + ;; Call the function. + jsr $r13 + + ;; Save the size of the structures which are passed on stack. + move.d $r10,$r7 + + ;; Move first four args in to r10..r13. + move.d [$sp+0],$r10 + move.d [$sp+4],$r11 + move.d [$sp+8],$r12 + move.d [$sp+12],$r13 + + ;; Adjust the stack and check if any parameters are given on stack. + addq 16,$sp + sub.d $r7,$r6 + cmp.d $sp,$r6 + + bpl go_on + nop + +go_on_no_params_on_stack: + move.d $r6,$sp + +go_on: + ;; Discover if we need to put rval address in to r9. + move.d [$r8+0],$r7 + cmpq FFI_TYPE_STRUCT,$r7 + bne call_now + nop + + ;; Move rval address to $r9. + move.d [$r8+20],$r9 + +call_now: + ;; Move address of The Function in to r7. + move.d [$r8+24],$r7 + + ;; Call The Function. + jsr $r7 + + ;; Reset stack. + move.d $r8,$sp + + ;; Load rval type (fig->flags) in to r13. + pop $r13 + + ;; Detect rval type. + cmpq FFI_TYPE_VOID,$r13 + beq epilogue + + cmpq FFI_TYPE_STRUCT,$r13 + beq epilogue + + cmpq FFI_TYPE_DOUBLE,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_UINT64,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_SINT64,$r13 + beq return_double_or_longlong + nop + + ;; Just return the 32 bit value. + ba return + nop + +return_double_or_longlong: + ;; Load half of the rval to r10 and the other half to r11. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + addq 4,$r13 + move.d $r11,[$r13] + ba epilogue + nop + +return: + ;; Load the rval to r10. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + +epilogue: + pop $r8 + pop $r7 + pop $r6 + Jump [$sp+] + + .size ffi_call_SYSV,.-ffi_call_SYSV + +/* Save R10..R13 into an array, somewhat like varargs. Copy the next + argument too, to simplify handling of any straddling parameter. + Save R9 and SP after those. Jump to function handling the rest. + Since this is a template, copied and the main function filled in by + the user. */ + + .globl L(ffi_cris_trampoline_template) + .type L(ffi_cris_trampoline_template),@function + .hidden L(ffi_cris_trampoline_template) + +L(ffi_cris_trampoline_template): +0: + /* The value we get for "PC" is right after the prefix instruction, + two bytes from the beginning, i.e. 0b+2. */ + move.d $r10,[$pc+2f-(0b+2)] + move.d $pc,$r10 +1: + addq 2f-1b+4,$r10 + move.d $r11,[$r10+] + move.d $r12,[$r10+] + move.d $r13,[$r10+] + move.d [$sp],$r11 + move.d $r11,[$r10+] + move.d $r9,[$r10+] + move.d $sp,[$r10+] + subq FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE,$r10 + move.d 0,$r11 +3: + jump 0 +2: + .size ffi_cris_trampoline_template,.-0b + +/* This macro create a constant usable as "extern const int \name" in + C from within libffi, when \name has no prefix decoration. */ + + .macro const name,value + .globl \name + .type \name,@object + .hidden \name +\name: + .dword \value + .size \name,4 + .endm + +/* Constants for offsets within the trampoline. We could do this with + just symbols, avoiding memory contents and memory accesses, but the + C usage code would look a bit stranger. */ + + const L(ffi_cris_trampoline_fn_offset),2b-4-0b + const L(ffi_cris_trampoline_closure_offset),3b-4-0b diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c new file mode 100644 index 0000000..f3172b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/debug.c @@ -0,0 +1,64 @@ +/* ----------------------------------------------------------------------- + debug.c - Copyright (c) 1996 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +/* General debugging routines */ + +void ffi_stop_here(void) +{ + /* This function is only useful for debugging purposes. + Place a breakpoint on ffi_stop_here to be notified of + significant events. */ +} + +/* This function should only be called via the FFI_ASSERT() macro */ + +void ffi_assert(char *expr, char *file, int line) +{ + fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line); + ffi_stop_here(); + abort(); +} + +/* Perform a sanity check on an ffi_type structure */ + +void ffi_type_test(ffi_type *a, char *file, int line) +{ + FFI_ASSERT_AT(a != NULL, file, line); + + FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line); + FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX) + || a->elements != NULL, file, line); + FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX + || (a->elements != NULL + && a->elements[0] != NULL && a->elements[1] == NULL), + file, line); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c new file mode 100644 index 0000000..d63dd36 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/dlmalloc.c @@ -0,0 +1,5166 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using ptmalloc, which is derived from + a version of this malloc. (See http://www.malloc.de). + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. See + near the end of this file for guidelines for creating a custom + version of MORECORE. + +MORECORE_CONTIGUOUS default: 1 (true) + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 on unix + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. (On most x86s, the asm version is only + slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +*/ + +#if defined __linux__ && !defined _GNU_SOURCE +/* mremap() on Linux requires this via sys/mman.h */ +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define MALLOC_FAILURE_ACTION +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ +#endif /* WIN32 */ + +#ifdef __OS2__ +#define INCL_DOS +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_SYS_MMAN_H +#endif /* __OS2__ */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#ifndef MORECORE +#define MORECORE sbrk +#endif /* MORECORE */ +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if MORECORE_CONTIGUOUS +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ + +/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(x) +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#if HAVE_MORECORE +#ifndef LACKS_UNISTD_H +#include /* for sbrk */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ +#endif /* HAVE_MMAP */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if !HAVE_MMAP +#define IS_MMAPPED_BIT (SIZE_T_ZERO) +#define USE_MMAP_BIT (SIZE_T_ZERO) +#define CALL_MMAP(s) MFAIL +#define CALL_MUNMAP(a, s) (-1) +#define DIRECT_MMAP(s) MFAIL + +#else /* HAVE_MMAP */ +#define IS_MMAPPED_BIT (SIZE_T_ONE) +#define USE_MMAP_BIT (SIZE_T_ONE) + +#if !defined(WIN32) && !defined (__OS2__) +#define CALL_MUNMAP(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP(s) CALL_MMAP(s) + +#elif defined(__OS2__) + +/* OS/2 MMAP via DosAllocMem */ +static void* os2mmap(size_t size) { + void* ptr; + if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) && + DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE)) + return MFAIL; + return ptr; +} + +#define os2direct_mmap(n) os2mmap(n) + +/* This function supports releasing coalesed segments */ +static int os2munmap(void* ptr, size_t size) { + while (size) { + ULONG ulSize = size; + ULONG ulFlags = 0; + if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0) + return -1; + if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 || + ulSize > size) + return -1; + if (DosFreeMem(ptr) != 0) + return -1; + ptr = ( void * ) ( ( char * ) ptr + ulSize ); + size -= ulSize; + } + return 0; +} + +#define CALL_MMAP(s) os2mmap(s) +#define CALL_MUNMAP(a, s) os2munmap((a), (s)) +#define DIRECT_MMAP(s) os2direct_mmap(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define CALL_MMAP(s) win32mmap(s) +#define CALL_MUNMAP(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MMAP && HAVE_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#else /* HAVE_MMAP && HAVE_MREMAP */ +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +#if HAVE_MORECORE +#define CALL_MORECORE(S) MORECORE(S) +#else /* HAVE_MORECORE */ +#define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/* mstate bit set if contiguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + +#if !defined(WIN32) && !defined(__OS2__) +/* By default use posix locks */ +#include +#define MLOCK_T pthread_mutex_t +#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) +#define ACQUIRE_LOCK(l) pthread_mutex_lock(l) +#define RELEASE_LOCK(l) pthread_mutex_unlock(l) + +#if HAVE_MORECORE +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif /* HAVE_MORECORE */ + +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; + +#elif defined(__OS2__) +#define MLOCK_T HMTX +#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE) +#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT) +#define RELEASE_LOCK(l) DosReleaseMutexSem(*l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; + +#else /* WIN32 */ +/* + Because lock-protected regions have bounded times, and there + are no recursive lock calls, we can use simple spinlocks. +*/ + +#define MLOCK_T long +static int win32_acquire_lock (MLOCK_T *sl) { + for (;;) { +#ifdef InterlockedCompareExchangePointer + if (!InterlockedCompareExchange(sl, 1, 0)) + return 0; +#else /* Use older void* version */ + if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) + return 0; +#endif /* InterlockedCompareExchangePointer */ + Sleep (0); + } +} + +static void win32_release_lock (MLOCK_T *sl) { + InterlockedExchange (sl, 0); +} + +#define INITIAL_LOCK(l) *(l)=0 +#define ACQUIRE_LOCK(l) win32_acquire_lock(l) +#define RELEASE_LOCK(l) win32_release_lock(l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; +#endif /* WIN32 */ + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS && HAVE_MORECORE +#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); +#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); +#else /* USE_LOCKS && HAVE_MORECORE */ +#define ACQUIRE_MORECORE_LOCK() +#define RELEASE_MORECORE_LOCK() +#endif /* USE_LOCKS && HAVE_MORECORE */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +#if FFI_MMAP_EXEC_WRIT + /* The mmap magic is supposed to store the address of the executable + segment at the very end of the requested block. */ + +# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t))) + + /* We can only merge segments if their corresponding executable + segments are at identical offsets. */ +# define check_segment_merge(S,b,s) \ + (mmap_exec_offset((b),(s)) == (S)->exec_offset) + +# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset) +# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset) + + /* The removal of sflags only works with HAVE_MORECORE == 0. */ + +# define get_segment_flags(S) (IS_MMAPPED_BIT) +# define set_segment_flags(S,v) \ + (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \ + (((S)->exec_offset = \ + mmap_exec_offset((S)->base, (S)->size)), \ + (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \ + (S)->exec_offset) ? (ABORT, (v)) : \ + (mmap_exec_offset((S)->base, (S)->size) = 0), (v))) + + /* We use an offset here, instead of a pointer, because then, when + base changes, we don't have to modify this. On architectures + with segmented addresses, this might not work. */ + ptrdiff_t exec_offset; +#else + +# define get_segment_flags(S) ((S)->sflags) +# define set_segment_flags(S,v) ((S)->sflags = (v)) +# define check_segment_merge(S,b,s) (1) + + flag_t sflags; /* mmap and extern flag */ +#endif +}; + +#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT) +#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(__i386__) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(__i386__) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + INITIAL_LOCK(&gm->mutex); + gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + +#if !defined(WIN32) && !defined(__OS2__) + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); +#elif defined (__OS2__) + /* if low-memory is used, os2munmap() would break + if it were anything other than 64k */ + mparams.page_size = 4096u; + mparams.granularity = 65536u; +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + mparams.page_size = system_info.dwPageSize; + mparams.granularity = system_info.dwAllocationGranularity; + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT; + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!next_pinuse(p)); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->topsize == chunksize(m->top)); + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmapped); + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + init_mparams(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MORECORE_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { + size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MORECORE_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MORECORE_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MORECORE_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmap_flag); + m->magic = mparams.magic; + init_bins(m); + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MORECORE_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MORECORE_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + set_segment_flags(&m->seg, IS_MMAPPED_BIT); + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + set_segment_flags(&m->seg, EXTERN_BIT); + set_lock(m, locked); + } + return (mspace)m; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = get_segment_flags(sp); + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S new file mode 100644 index 0000000..379ea4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/eabi.S @@ -0,0 +1,128 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2004 Anthony Green + + FR-V Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # gr8 : ffi_prep_args + # gr9 : &ecif + # gr10: cif->bytes + # gr11: fig->flags + # gr12: ecif.rvalue + # gr13: fn + +ffi_call_EABI: + addi sp, #-80, sp + sti fp, @(sp, #24) + addi sp, #24, fp + movsg lr, gr5 + + /* Make room for the new arguments. */ + /* subi sp, fp, gr10 */ + + /* Store return address and incoming args on stack. */ + sti gr5, @(fp, #8) + sti gr8, @(fp, #-4) + sti gr9, @(fp, #-8) + sti gr10, @(fp, #-12) + sti gr11, @(fp, #-16) + sti gr12, @(fp, #-20) + sti gr13, @(fp, #-24) + + sub sp, gr10, sp + + /* Call ffi_prep_args. */ + ldi @(fp, #-4), gr4 + addi sp, #0, gr8 + ldi @(fp, #-8), gr9 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* ffi_prep_args returns the new stack pointer. */ + mov gr8, gr4 + + ldi @(sp, #0), gr8 + ldi @(sp, #4), gr9 + ldi @(sp, #8), gr10 + ldi @(sp, #12), gr11 + ldi @(sp, #16), gr12 + ldi @(sp, #20), gr13 + + /* Always copy the return value pointer into the hidden + parameter register. This is only strictly necessary + when we're returning an aggregate type, but it doesn't + hurt to do this all the time, and it saves a branch. */ + ldi @(fp, #-20), gr3 + + /* Use the ffi_prep_args return value for the new sp. */ + mov gr4, sp + + /* Call the target function. */ + ldi @(fp, -24), gr4 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* Store the result. */ + ldi @(fp, #-16), gr10 /* fig->flags */ + ldi @(fp, #-20), gr4 /* ecif.rvalue */ + + /* Is the return value stored in two registers? */ + cmpi gr10, #8, icc0 + bne icc0, 0, .L2 + /* Yes, save them. */ + sti gr8, @(gr4, #0) + sti gr9, @(gr4, #4) + bra .L3 +.L2: + /* Is the return value a structure? */ + cmpi gr10, #-1, icc0 + beq icc0, 0, .L3 + /* No, save a 4 byte return value. */ + sti gr8, @(gr4, #0) +.L3: + + /* Restore the stack, and return. */ + ldi @(fp, 8), gr5 + ld @(fp, gr0), fp + addi sp,#80,sp + jmpl @(gr5,gr0) + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c new file mode 100644 index 0000000..ed1c65a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffi.c @@ -0,0 +1,292 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2004 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2008 Red Hat, Inc. + + FR-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + /* if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (count > 24) + { + // This is going on the stack. Turn it into a double. + *(double *) argp = (double) *(float*)(* p_argv); + z = sizeof(double); + } + else + *(void **) argp = *(void **)(* p_argv); + } */ + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in gr7. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("gr7"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("fp"); + char *stack_args = frame_pointer + 16; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + avalue[i] = ptr; + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == ((char *)register_args + (6*4))) + ptr = stack_args; + } + + /* Invoke the closure. */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + /* The caller allocates space for the return structure, and + passes a pointer to this space in gr3. Use this value directly + as the return value. */ + register void *return_struct_ptr __asm__("gr3"); + (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + + /* Functions return 4-byte or smaller results in gr8. 8-byte + values also use gr9. We fill the both, even for small return + values, just to avoid a branch. */ + asm ("ldi @(%0, #0), gr8" : : "r" (&rvalue)); + asm ("ldi @(%0, #0), gr9" : : "r" (&((int *) &rvalue)[1])); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; +#ifdef __FRV_FDPIC__ + register void *got __asm__("gr15"); +#endif + int i; + + fn = (unsigned long) ffi_closure_eabi; + +#ifdef __FRV_FDPIC__ + tramp[0] = &((unsigned int *)codeloc)[2]; + tramp[1] = got; + tramp[2] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[3] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[4] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[5] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[6] = 0x9cc86000; /* ldi @(gr6, #0), gr14 */ + tramp[7] = 0x8030e000; /* jmpl @(gr14, gr0) */ +#else + tramp[0] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[1] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[2] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[3] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[4] = 0x80300006; /* jmpl @(gr0, gr6) */ +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Cache flushing. */ + for (i = 0; i < FFI_TRAMPOLINE_SIZE; i++) + __asm__ volatile ("dcf @(%0,%1)\n\tici @(%2,%1)" :: "r" (tramp), "r" (i), + "r" (codeloc)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h new file mode 100644 index 0000000..d42540e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/frv/ffitarget.h @@ -0,0 +1,62 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2004 Red Hat, Inc. + Target configuration macros for FR-V + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_EABI +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef __FRV_FDPIC__ +/* Trampolines are 8 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (8*4) +#else +/* Trampolines are 5 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (5*4) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c new file mode 100644 index 0000000..b1d04c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffi.c @@ -0,0 +1,604 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + Copyright (c) 2011 Anthony Green + + IA64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include + +#include "ia64_flags.h" + +/* A 64-bit pointer value. In LP64 mode, this is effectively a plain + pointer. In ILP32 mode, it's a pointer that's been extended to + 64 bits by "addp4". */ +typedef void *PTR64 __attribute__((mode(DI))); + +/* Memory image of fp register contents. This is the implementation + specific format used by ldf.fill/stf.spill. All we care about is + that it wants a 16 byte aligned slot. */ +typedef struct +{ + UINT64 x[2] __attribute__((aligned(16))); +} fpreg; + + +/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */ + +struct ia64_args +{ + fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */ + UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */ + UINT64 other_args[]; /* Arguments passed on stack, variable size. */ +}; + + +/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */ + +static inline void * +endian_adjust (void *addr, size_t len) +{ +#ifdef __BIG_ENDIAN__ + return addr + (8 - len); +#else + return addr; +#endif +} + +/* Store VALUE to ADDR in the current cpu implementation's fp spill format. + This is a macro instead of a function, so that it works for all 3 floating + point types without type conversions. Type conversion to long double breaks + the denorm support. */ + +#define stf_spill(addr, value) \ + asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value)); + +/* Load a value from ADDR, which is in the current cpu implementation's + fp spill format. As above, this must also be a macro. */ + +#define ldf_fill(result, addr) \ + asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr)); + +/* Return the size of the C type associated with with TYPE. Which will + be one of the FFI_IA64_TYPE_HFA_* values. */ + +static size_t +hfa_type_size (int type) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + return sizeof(float); + case FFI_IA64_TYPE_HFA_DOUBLE: + return sizeof(double); + case FFI_IA64_TYPE_HFA_LDOUBLE: + return sizeof(__float80); + default: + abort (); + } +} + +/* Load from ADDR a value indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_load (fpreg *fpaddr, int type, void *addr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + stf_spill (fpaddr, *(float *) addr); + return; + case FFI_IA64_TYPE_HFA_DOUBLE: + stf_spill (fpaddr, *(double *) addr); + return; + case FFI_IA64_TYPE_HFA_LDOUBLE: + stf_spill (fpaddr, *(__float80 *) addr); + return; + default: + abort (); + } +} + +/* Load VALUE into ADDR as indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_store (int type, void *addr, fpreg *fpaddr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + { + float result; + ldf_fill (result, fpaddr); + *(float *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_DOUBLE: + { + double result; + ldf_fill (result, fpaddr); + *(double *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_LDOUBLE: + { + __float80 result; + ldf_fill (result, fpaddr); + *(__float80 *) addr = result; + break; + } + default: + abort (); + } +} + +/* Is TYPE a struct containing floats, doubles, or extended doubles, + all of the same fp type? If so, return the element type. Return + FFI_TYPE_VOID if not. */ + +static int +hfa_element_type (ffi_type *type, int nested) +{ + int element = FFI_TYPE_VOID; + + switch (type->type) + { + case FFI_TYPE_FLOAT: + /* We want to return VOID for raw floating-point types, but the + synthetic HFA type if we're nested within an aggregate. */ + if (nested) + element = FFI_IA64_TYPE_HFA_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + /* Similarly. */ + if (nested) + element = FFI_IA64_TYPE_HFA_DOUBLE; + break; + + case FFI_TYPE_LONGDOUBLE: + /* Similarly, except that that HFA is true for double extended, + but not quad precision. Both have sizeof == 16, so tell the + difference based on the precision. */ + if (LDBL_MANT_DIG == 64 && nested) + element = FFI_IA64_TYPE_HFA_LDOUBLE; + break; + + case FFI_TYPE_STRUCT: + { + ffi_type **ptr = &type->elements[0]; + + for (ptr = &type->elements[0]; *ptr ; ptr++) + { + int sub_element = hfa_element_type (*ptr, 1); + if (sub_element == FFI_TYPE_VOID) + return FFI_TYPE_VOID; + + if (element == FFI_TYPE_VOID) + element = sub_element; + else if (element != sub_element) + return FFI_TYPE_VOID; + } + } + break; + + default: + return FFI_TYPE_VOID; + } + + return element; +} + + +/* Perform machine dependent cif processing. */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + int flags; + + /* Adjust cif->bytes to include space for the bits of the ia64_args frame + that precedes the integer register portion. The estimate that the + generic bits did for the argument space required is good enough for the + integer component. */ + cif->bytes += offsetof(struct ia64_args, gp_regs[0]); + if (cif->bytes < sizeof(struct ia64_args)) + cif->bytes = sizeof(struct ia64_args); + + /* Set the return type flag. */ + flags = cif->rtype->type; + switch (cif->rtype->type) + { + case FFI_TYPE_LONGDOUBLE: + /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision, + and encode quad precision as a two-word integer structure. */ + if (LDBL_MANT_DIG != 64) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8); + break; + + case FFI_TYPE_STRUCT: + { + size_t size = cif->rtype->size; + int hfa_type = hfa_element_type (cif->rtype, 0); + + if (hfa_type != FFI_TYPE_VOID) + { + size_t nelts = size / hfa_type_size (hfa_type); + if (nelts <= 8) + flags = hfa_type | (size << 8); + } + else + { + if (size <= 32) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8); + } + } + break; + + default: + break; + } + cif->flags = flags; + + return FFI_OK; +} + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status +ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(void), UINT64); + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + struct ia64_args *stack; + long i, avn, gpcount, fpcount; + ffi_type **p_arg; + + FFI_ASSERT (cif->abi == FFI_UNIX); + + /* If we have no spot for a return value, make one. */ + if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID) + rvalue = alloca (cif->rtype->size); + + /* Allocate the stack frame. */ + stack = alloca (cif->bytes); + + gpcount = fpcount = 0; + avn = cif->nargs; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i]; + break; + case FFI_TYPE_UINT8: + stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i]; + break; + case FFI_TYPE_SINT16: + stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i]; + break; + case FFI_TYPE_UINT16: + stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i]; + break; + case FFI_TYPE_SINT32: + stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i]; + break; + case FFI_TYPE_UINT32: + stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i]; + break; + + case FFI_TYPE_POINTER: + stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i]; + break; + + case FFI_TYPE_FLOAT: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]); + { + UINT32 tmp; + memcpy (&tmp, avalue[i], sizeof (UINT32)); + stack->gp_regs[gpcount++] = tmp; + } + break; + + case FFI_TYPE_DOUBLE: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]); + memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64)); + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]); + memcpy (&stack->gp_regs[gpcount], avalue[i], 16); + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_load (&stack->fp_regs[fpcount], hfa_type, + avalue[i] + offset); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + } + + memcpy (&stack->gp_regs[gpcount], avalue[i], size); + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + ffi_call_unix (stack, rvalue, fn, cif->flags); +} + +/* Closures represent a pair consisting of a function pointer, and + some user data. A closure is invoked by reinterpreting the closure + as a function pointer, and branching to it. Thus we can make an + interpreted function callable as a C function: We turn the + interpreter itself, together with a pointer specifying the + interpreted procedure, into a closure. + + For IA64, function pointer are already pairs consisting of a code + pointer, and a gp pointer. The latter is needed to access global + variables. Here we set up such a pair as the first two words of + the closure (in the "trampoline" area), but we replace the gp + pointer with a pointer to the closure itself. We also add the real + gp pointer to the closure. This allows the function entry code to + both retrieve the user data, and to restore the correct gp pointer. */ + +extern void ffi_closure_unix (); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + /* The layout of a function descriptor. A C function pointer really + points to one of these. */ + struct ia64_fd + { + UINT64 code_pointer; + UINT64 gp; + }; + + struct ffi_ia64_trampoline_struct + { + UINT64 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT64 fake_gp; /* Pointer to closure, installed as gp. */ + UINT64 real_gp; /* Real gp value. */ + }; + + struct ffi_ia64_trampoline_struct *tramp; + struct ia64_fd *fd; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp; + fd = (struct ia64_fd *)(void *)ffi_closure_unix; + + tramp->code_pointer = fd->code_pointer; + tramp->real_gp = fd->gp; + tramp->fake_gp = (UINT64)(PTR64)codeloc; + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +UINT64 +ffi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack, + void *rvalue, void *r8) +{ + ffi_cif *cif; + void **avalue; + ffi_type **p_arg; + long i, avn, gpcount, fpcount, nfixedargs; + + cif = closure->cif; + avn = cif->nargs; + nfixedargs = cif->nfixedargs; + avalue = alloca (avn * sizeof (void *)); + + /* If the structure return value is passed in memory get that location + from r8 so as to pass the value directly back to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = r8; + + gpcount = fpcount = 0; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + int named = i < nfixedargs; + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1); + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2); + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4); + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + avalue[i] = &stack->gp_regs[gpcount++]; + break; + case FFI_TYPE_POINTER: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*)); + break; + + case FFI_TYPE_FLOAT: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + float result; + avalue[i] = addr; + ldf_fill (result, addr); + *(float *)addr = result; + } + else + avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4); + gpcount++; + break; + + case FFI_TYPE_DOUBLE: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + double result; + avalue[i] = addr; + ldf_fill (result, addr); + *(double *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount++; + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + __float80 result; + avalue[i] = addr; + ldf_fill (result, addr); + *(__float80 *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + void *addr = alloca (size); + + avalue[i] = addr; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_store (hfa_type, addr + offset, + &stack->fp_regs[fpcount]); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + + if (offset < size) + memcpy (addr + offset, (char *)stack->gp_regs + gp_offset, + size - offset); + } + else + avalue[i] = &stack->gp_regs[gpcount]; + + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + closure->fun (cif, rvalue, avalue, closure->user_data); + + return cif->flags; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h new file mode 100644 index 0000000..fd5b9a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for IA-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, /* Linux and all Unix variants use the same conventions */ + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 /* Really the following struct, which */ + /* can be interpreted as a C function */ + /* descriptor: */ +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h new file mode 100644 index 0000000..9d652ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/ia64_flags.h @@ -0,0 +1,40 @@ +/* ----------------------------------------------------------------------- + ia64_flags.h - Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Original author: Hans Boehm, HP Labs + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* "Type" codes used between assembly and C. When used as a part of + a cfi->flags value, the low byte will be these extra type codes, + and bits 8-31 will be the actual size of the type. */ + +/* Small structures containing N words in integer registers. */ +#define FFI_IA64_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 1) + +/* Homogeneous Floating Point Aggregates (HFAs) which are returned + in FP registers. */ +#define FFI_IA64_TYPE_HFA_FLOAT (FFI_TYPE_LAST + 2) +#define FFI_IA64_TYPE_HFA_DOUBLE (FFI_TYPE_LAST + 3) +#define FFI_IA64_TYPE_HFA_LDOUBLE (FFI_TYPE_LAST + 4) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S new file mode 100644 index 0000000..e2547e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/ia64/unix.S @@ -0,0 +1,567 @@ +/* ----------------------------------------------------------------------- + unix.S - Copyright (c) 1998, 2008 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Primary author: Hans Boehm, HP Labs + + Loosely modeled on Cygnus code for other platforms. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "ia64_flags.h" + + .pred.safe_across_calls p1-p5,p16-p63 +.text + +/* int ffi_call_unix (struct ia64_args *stack, PTR64 rvalue, + void (*fn)(void), int flags); + */ + + .align 16 + .global ffi_call_unix + .proc ffi_call_unix +ffi_call_unix: + .prologue + /* Bit o trickiness. We actually share a stack frame with ffi_call. + Rely on the fact that ffi_call uses a vframe and don't bother + tracking one here at all. */ + .fframe 0 + .save ar.pfs, r36 // loc0 + alloc loc0 = ar.pfs, 4, 3, 8, 0 + .save rp, loc1 + mov loc1 = b0 + .body + add r16 = 16, in0 + mov loc2 = gp + mov r8 = in1 + ;; + + /* Load up all of the argument registers. */ + ldf.fill f8 = [in0], 32 + ldf.fill f9 = [r16], 32 + ;; + ldf.fill f10 = [in0], 32 + ldf.fill f11 = [r16], 32 + ;; + ldf.fill f12 = [in0], 32 + ldf.fill f13 = [r16], 32 + ;; + ldf.fill f14 = [in0], 32 + ldf.fill f15 = [r16], 24 + ;; + ld8 out0 = [in0], 16 + ld8 out1 = [r16], 16 + ;; + ld8 out2 = [in0], 16 + ld8 out3 = [r16], 16 + ;; + ld8 out4 = [in0], 16 + ld8 out5 = [r16], 16 + ;; + ld8 out6 = [in0] + ld8 out7 = [r16] + ;; + + /* Deallocate the register save area from the stack frame. */ + mov sp = in0 + + /* Call the target function. */ + ld8 r16 = [in2], 8 + ;; + ld8 gp = [in2] + mov b6 = r16 + br.call.sptk.many b0 = b6 + ;; + + /* Dispatch to handle return value. */ + mov gp = loc2 + zxt1 r16 = in3 + ;; + mov ar.pfs = loc0 + addl r18 = @ltoffx(.Lst_table), gp + ;; + ld8.mov r18 = [r18], .Lst_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + ;; + ld8 r17 = [r18] + shr in3 = in3, 8 + ;; + add r17 = r17, r18 + ;; + mov b6 = r17 + br b6 + ;; + +.Lst_void: + br.ret.sptk.many b0 + ;; +.Lst_uint8: + zxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint8: + sxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint16: + zxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint16: + sxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint32: + zxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint32: + sxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_int64: + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_float: + stfs [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_double: + stfd [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_ldouble: + stfe [in1] = f8 + br.ret.sptk.many b0 + ;; + +.Lst_small_struct: + cmp.lt p6, p0 = 8, in3 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; + add r16 = 8, sp + add r17 = 16, sp + add r18 = 24, sp + ;; + st8 [sp] = r8 +(p6) st8 [r16] = r9 + mov out0 = in1 +(p7) st8 [r17] = r10 +(p8) st8 [r18] = r11 + mov out1 = sp + mov out2 = in3 + ;; + // ia64 software calling convention requires + // top 16 bytes of stack to be scratch space + // PLT resolver uses that scratch space at + // 'memcpy' symbol reolution time + add sp = -16, sp + br.call.sptk.many b0 = memcpy# + ;; + mov ar.pfs = loc0 + mov b0 = loc1 + mov gp = loc2 + br.ret.sptk.many b0 + +.Lst_hfa_float: + add r16 = 4, in1 + cmp.lt p6, p0 = 4, in3 + ;; + stfs [in1] = f8, 8 +(p6) stfs [r16] = f9, 8 + cmp.lt p7, p0 = 8, in3 + cmp.lt p8, p0 = 12, in3 + ;; +(p7) stfs [in1] = f10, 8 +(p8) stfs [r16] = f11, 8 + cmp.lt p9, p0 = 16, in3 + cmp.lt p10, p0 = 20, in3 + ;; +(p9) stfs [in1] = f12, 8 +(p10) stfs [r16] = f13, 8 + cmp.lt p6, p0 = 24, in3 + cmp.lt p7, p0 = 28, in3 + ;; +(p6) stfs [in1] = f14 +(p7) stfs [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_double: + add r16 = 8, in1 + cmp.lt p6, p0 = 8, in3 + ;; + stfd [in1] = f8, 16 +(p6) stfd [r16] = f9, 16 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; +(p7) stfd [in1] = f10, 16 +(p8) stfd [r16] = f11, 16 + cmp.lt p9, p0 = 32, in3 + cmp.lt p10, p0 = 40, in3 + ;; +(p9) stfd [in1] = f12, 16 +(p10) stfd [r16] = f13, 16 + cmp.lt p6, p0 = 48, in3 + cmp.lt p7, p0 = 56, in3 + ;; +(p6) stfd [in1] = f14 +(p7) stfd [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_ldouble: + add r16 = 16, in1 + cmp.lt p6, p0 = 16, in3 + ;; + stfe [in1] = f8, 32 +(p6) stfe [r16] = f9, 32 + cmp.lt p7, p0 = 32, in3 + cmp.lt p8, p0 = 48, in3 + ;; +(p7) stfe [in1] = f10, 32 +(p8) stfe [r16] = f11, 32 + cmp.lt p9, p0 = 64, in3 + cmp.lt p10, p0 = 80, in3 + ;; +(p9) stfe [in1] = f12, 32 +(p10) stfe [r16] = f13, 32 + cmp.lt p6, p0 = 96, in3 + cmp.lt p7, p0 = 112, in3 + ;; +(p6) stfe [in1] = f14 +(p7) stfe [r16] = f15 + br.ret.sptk.many b0 + ;; + + .endp ffi_call_unix + + .align 16 + .global ffi_closure_unix + .proc ffi_closure_unix + +#define FRAME_SIZE (8*16 + 8*8 + 8*16) + +ffi_closure_unix: + .prologue + .save ar.pfs, r40 // loc0 + alloc loc0 = ar.pfs, 8, 4, 4, 0 + .fframe FRAME_SIZE + add r12 = -FRAME_SIZE, r12 + .save rp, loc1 + mov loc1 = b0 + .save ar.unat, loc2 + mov loc2 = ar.unat + .body + + /* Retrieve closure pointer and real gp. */ +#ifdef _ILP32 + addp4 out0 = 0, gp + addp4 gp = 16, gp +#else + mov out0 = gp + add gp = 16, gp +#endif + ;; + ld8 gp = [gp] + + /* Spill all of the possible argument registers. */ + add r16 = 16 + 8*16, sp + add r17 = 16 + 8*16 + 16, sp + ;; + stf.spill [r16] = f8, 32 + stf.spill [r17] = f9, 32 + mov loc3 = gp + ;; + stf.spill [r16] = f10, 32 + stf.spill [r17] = f11, 32 + ;; + stf.spill [r16] = f12, 32 + stf.spill [r17] = f13, 32 + ;; + stf.spill [r16] = f14, 32 + stf.spill [r17] = f15, 24 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in0, 16 + .mem.offset 8, 0 + st8.spill [r17] = in1, 16 + add out1 = 16 + 8*16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in2, 16 + .mem.offset 8, 0 + st8.spill [r17] = in3, 16 + add out2 = 16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in4, 16 + .mem.offset 8, 0 + st8.spill [r17] = in5, 16 + mov out3 = r8 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in6 + .mem.offset 8, 0 + st8.spill [r17] = in7 + + /* Invoke ffi_closure_unix_inner for the hard work. */ + br.call.sptk.many b0 = ffi_closure_unix_inner + ;; + + /* Dispatch to handle return value. */ + mov gp = loc3 + zxt1 r16 = r8 + ;; + addl r18 = @ltoffx(.Lld_table), gp + mov ar.pfs = loc0 + ;; + ld8.mov r18 = [r18], .Lld_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + mov ar.unat = loc2 + ;; + ld8 r17 = [r18] + shr r8 = r8, 8 + ;; + add r17 = r17, r18 + add r16 = 16, sp + ;; + mov b6 = r17 + br b6 + ;; + .label_state 1 + +.Lld_void: + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_int: + .body + .copy_state 1 + ld8 r8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_float: + .body + .copy_state 1 + ldfs f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_double: + .body + .copy_state 1 + ldfd f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_ldouble: + .body + .copy_state 1 + ldfe f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_small_struct: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; + ld8 r8 = [r16], 16 +(p6) ld8 r9 = [r17], 16 + ;; +(p7) ld8 r10 = [r16] +(p8) ld8 r11 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_float: + .body + .copy_state 1 + add r17 = 4, r16 + cmp.lt p6, p0 = 4, r8 + ;; + ldfs f8 = [r16], 8 +(p6) ldfs f9 = [r17], 8 + cmp.lt p7, p0 = 8, r8 + cmp.lt p8, p0 = 12, r8 + ;; +(p7) ldfs f10 = [r16], 8 +(p8) ldfs f11 = [r17], 8 + cmp.lt p9, p0 = 16, r8 + cmp.lt p10, p0 = 20, r8 + ;; +(p9) ldfs f12 = [r16], 8 +(p10) ldfs f13 = [r17], 8 + cmp.lt p6, p0 = 24, r8 + cmp.lt p7, p0 = 28, r8 + ;; +(p6) ldfs f14 = [r16] +(p7) ldfs f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_double: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + ;; + ldfd f8 = [r16], 16 +(p6) ldfd f9 = [r17], 16 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; +(p7) ldfd f10 = [r16], 16 +(p8) ldfd f11 = [r17], 16 + cmp.lt p9, p0 = 32, r8 + cmp.lt p10, p0 = 40, r8 + ;; +(p9) ldfd f12 = [r16], 16 +(p10) ldfd f13 = [r17], 16 + cmp.lt p6, p0 = 48, r8 + cmp.lt p7, p0 = 56, r8 + ;; +(p6) ldfd f14 = [r16] +(p7) ldfd f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_ldouble: + .body + .copy_state 1 + add r17 = 16, r16 + cmp.lt p6, p0 = 16, r8 + ;; + ldfe f8 = [r16], 32 +(p6) ldfe f9 = [r17], 32 + cmp.lt p7, p0 = 32, r8 + cmp.lt p8, p0 = 48, r8 + ;; +(p7) ldfe f10 = [r16], 32 +(p8) ldfe f11 = [r17], 32 + cmp.lt p9, p0 = 64, r8 + cmp.lt p10, p0 = 80, r8 + ;; +(p9) ldfe f12 = [r16], 32 +(p10) ldfe f13 = [r17], 32 + cmp.lt p6, p0 = 96, r8 + cmp.lt p7, p0 = 112, r8 + ;; +(p6) ldfe f14 = [r16] +(p7) ldfe f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + + .endp ffi_closure_unix + + .section .rodata + .align 8 +.Lst_table: + data8 @pcrel(.Lst_void) // FFI_TYPE_VOID + data8 @pcrel(.Lst_sint32) // FFI_TYPE_INT + data8 @pcrel(.Lst_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lst_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lst_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lst_uint8) // FFI_TYPE_UINT8 + data8 @pcrel(.Lst_sint8) // FFI_TYPE_SINT8 + data8 @pcrel(.Lst_uint16) // FFI_TYPE_UINT16 + data8 @pcrel(.Lst_sint16) // FFI_TYPE_SINT16 + data8 @pcrel(.Lst_uint32) // FFI_TYPE_UINT32 + data8 @pcrel(.Lst_sint32) // FFI_TYPE_SINT32 + data8 @pcrel(.Lst_int64) // FFI_TYPE_UINT64 + data8 @pcrel(.Lst_int64) // FFI_TYPE_SINT64 + data8 @pcrel(.Lst_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lst_int64) // FFI_TYPE_POINTER + data8 @pcrel(.Lst_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lst_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lst_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lst_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lst_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +.Lld_table: + data8 @pcrel(.Lld_void) // FFI_TYPE_VOID + data8 @pcrel(.Lld_int) // FFI_TYPE_INT + data8 @pcrel(.Lld_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lld_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lld_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT64 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT64 + data8 @pcrel(.Lld_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lld_int) // FFI_TYPE_POINTER + data8 @pcrel(.Lld_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lld_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lld_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lld_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lld_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c new file mode 100644 index 0000000..114d3e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/java_raw_api.c @@ -0,0 +1,374 @@ +/* ----------------------------------------------------------------------- + java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc. + + Cloned from raw_api.c + + Raw_api.c author: Kresten Krab Thorup + Java_raw_api.c author: Hans-J. Boehm + + $Id $ + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This defines a Java- and 64-bit specific variant of the raw API. */ +/* It assumes that "raw" argument blocks look like Java stacks on a */ +/* 64-bit machine. Arguments that can be stored in a single stack */ +/* stack slots (longs, doubles) occupy 128 bits, but only the first */ +/* 64 bits are actually used. */ + +#include +#include +#include + +#if !defined(NO_JAVA_RAW_API) + +size_t +ffi_java_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { + switch((*at) -> type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + result += 2 * FFI_SIZEOF_JAVA_RAW; + break; + case FFI_TYPE_STRUCT: + /* No structure parameters in Java. */ + abort(); + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + result += FFI_SIZEOF_JAVA_RAW; + } + } + + return result; +} + + +void +ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + 3); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + 2); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void *)raw; + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + *args = raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if FFI_SIZEOF_JAVA_RAW == 8 + switch((*tp)->type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void*) raw; + raw += 2; + break; + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + *args = (void*) raw++; + } +#else /* FFI_SIZEOF_JAVA_RAW != 8 */ + *args = (void*) raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif /* FFI_SIZEOF_JAVA_RAW == 8 */ + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT8*) (*args); +#else + (raw++)->uint = *(UINT8*) (*args); +#endif + break; + + case FFI_TYPE_SINT8: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT8*) (*args); +#else + (raw++)->sint = *(SINT8*) (*args); +#endif + break; + + case FFI_TYPE_UINT16: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT16*) (*args); +#else + (raw++)->uint = *(UINT16*) (*args); +#endif + break; + + case FFI_TYPE_SINT16: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT16*) (*args); +#else + (raw++)->sint = *(SINT16*) (*args); +#endif + break; + + case FFI_TYPE_UINT32: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT32*) (*args); +#else + (raw++)->uint = *(UINT32*) (*args); +#endif + break; + + case FFI_TYPE_SINT32: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT32*) (*args); +#else + (raw++)->sint = *(SINT32*) (*args); +#endif + break; + + case FFI_TYPE_FLOAT: + (raw++)->flt = *(FLOAT32*) (*args); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + raw->uint = *(UINT64*) (*args); + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: +#if FFI_SIZEOF_JAVA_RAW == 8 + FFI_ASSERT(0); /* Should have covered all cases */ +#else + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif + } + } +} + +#if !FFI_NATIVE_RAW_API + +static void +ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: +#if FFI_SIZEOF_JAVA_RAW == 4 + case FFI_TYPE_POINTER: +#endif + *(SINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +static void +ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: + *(SINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, + ffi_java_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_java_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); + ffi_java_rvalue_to_raw (cif, rvalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_java_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_java_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data); + ffi_java_raw_to_rvalue (cif, rvalue); +} + +ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_java_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) +{ + return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ +#endif /* !NO_JAVA_RAW_API */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c new file mode 100644 index 0000000..ab8fc4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffi.c @@ -0,0 +1,232 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2004 Renesas Technology + Copyright (c) 2008 Red Hat, Inc. + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + int tmp; + unsigned int avn; + void **p_argv; + char *argp; + ffi_type **p_arg; + + tmp = 0; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 8) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0) && (avn != 0); + i--, p_arg++) + { + size_t z; + + /* Align if necessary. */ + if (((*p_arg)->alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, (*p_arg)->alignment); + + if (avn != 0) + { + avn--; + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + z = (*p_arg)->size; + if ((*p_arg)->alignment != 1) + memcpy (argp, *p_argv, z); + else + memcpy (argp + 4 - z, *p_argv, z); + z = sizeof (int); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + if (z > 8) + { + *(unsigned int *) argp = (unsigned int)(void *)(* p_argv); + z = sizeof(void *); + } + else + { + memcpy(argp, *p_argv, z); + z = 8; + } + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_DOUBLE; + + else + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + int size = cif->rtype->size; + int align = cif->rtype->alignment; + + if (size < 4) + { + if (align == 1) + *(unsigned long *)(ecif.rvalue) <<= (4 - size) * 8; + } + else if (4 < size && size < 8) + { + if (align == 1) + { + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + else if (align == 2) + { + if (size & 1) + size += 1; + + if (size != 8) + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + } + } + break; + + default: + FFI_ASSERT(0); + break; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h new file mode 100644 index 0000000..6c34801 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2004 Renesas Technology. + Target configuration macros for M32R. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +#define FFI_CLOSURES 0 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S new file mode 100644 index 0000000..06b75c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m32r/sysv.S @@ -0,0 +1,121 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Renesas Technology + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)! .type CNAME(x),%function! CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* sp+0: ecif.rvalue */ + /* sp+4: fn */ + + /* This assumes we are using gas. */ +ENTRY(ffi_call_SYSV) + /* Save registers. */ + push fp + push lr + push r3 + push r2 + push r1 + push r0 + mv fp, sp + + /* Make room for all of the new args. */ + sub sp, r2 + + /* Place all of the ffi_prep_args in position. */ + mv lr, r0 + mv r0, sp + /* R1 already set. */ + + /* And call. */ + jl lr + + /* Move first 4 parameters in registers... */ + ld r0, @(0,sp) + ld r1, @(4,sp) + ld r2, @(8,sp) + ld r3, @(12,sp) + + /* ...and adjust the stack. */ + ld lr, @(8,fp) + cmpi lr, #16 + bc adjust_stack + ldi lr, #16 +adjust_stack: + add sp, lr + + /* Call the function. */ + ld lr, @(28,fp) + jl lr + + /* Remove the space we pushed for the args. */ + mv sp, fp + + /* Load R2 with the pointer to storage for the return value. */ + ld r2, @(24,sp) + + /* Load R3 with the return type code. */ + ld r3, @(12,sp) + + /* If the return value pointer is NULL, assume no return value. */ + beqz r2, epilogue + + /* Return INT. */ + ldi r4, #FFI_TYPE_INT + bne r3, r4, return_double + st r0, @r2 + bra epilogue + +return_double: + /* Return DOUBLE or LONGDOUBLE. */ + ldi r4, #FFI_TYPE_DOUBLE + bne r3, r4, epilogue + st r0, @r2 + st r1, @(4,r2) + +epilogue: + pop r0 + pop r1 + pop r2 + pop r3 + pop lr + pop fp + jmp lr + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c new file mode 100644 index 0000000..0330184 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffi.c @@ -0,0 +1,362 @@ +/* ----------------------------------------------------------------------- + ffi.c + + m68k Foreign Function Interface + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#ifdef __rtems__ +void rtems_cache_flush_multiple_data_lines( const void *, size_t ); +#else +#include +#ifdef __MINT__ +#include +#include +#else +#include +#endif +#endif + +void ffi_call_SYSV (extended_cif *, + unsigned, unsigned, + void *, void (*fn) ()); +void *ffi_prep_args (void *stack, extended_cif *ecif); +void ffi_closure_SYSV (ffi_closure *); +void ffi_closure_struct_SYSV (ffi_closure *); +unsigned int ffi_closure_SYSV_inner (ffi_closure *closure, + void *resp, void *args); + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if ( +#ifdef __MINT__ + (ecif->cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((ecif->cif->rtype->type == FFI_TYPE_STRUCT) + && !ecif->cif->flags))) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z = (*p_arg)->size; + int type = (*p_arg)->type; + + if (z < sizeof (int)) + { + switch (type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: +#ifdef __MINT__ + if (z == 1 || z == 2) + memcpy (argp + 2, *p_argv, z); + else + memcpy (argp, *p_argv, z); +#else + memcpy (argp + sizeof (int) - z, *p_argv, z); +#endif + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +#define CIF_FLAGS_INT 1 +#define CIF_FLAGS_DINT 2 +#define CIF_FLAGS_FLOAT 4 +#define CIF_FLAGS_DOUBLE 8 +#define CIF_FLAGS_LDOUBLE 16 +#define CIF_FLAGS_POINTER 32 +#define CIF_FLAGS_STRUCT1 64 +#define CIF_FLAGS_STRUCT2 128 +#define CIF_FLAGS_SINT8 256 +#define CIF_FLAGS_SINT16 512 + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + switch (cif->rtype->size) + { + case 1: +#ifdef __MINT__ + cif->flags = CIF_FLAGS_STRUCT2; +#else + cif->flags = CIF_FLAGS_STRUCT1; +#endif + break; + case 2: + cif->flags = CIF_FLAGS_STRUCT2; + break; +#ifdef __MINT__ + case 3: +#endif + case 4: + cif->flags = CIF_FLAGS_INT; + break; +#ifdef __MINT__ + case 7: +#endif + case 8: + cif->flags = CIF_FLAGS_DINT; + break; + default: + cif->flags = 0; + break; + } + break; + + case FFI_TYPE_FLOAT: + cif->flags = CIF_FLAGS_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = CIF_FLAGS_DOUBLE; + break; + +#if (FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE) + case FFI_TYPE_LONGDOUBLE: +#ifdef __MINT__ + cif->flags = 0; +#else + cif->flags = CIF_FLAGS_LDOUBLE; +#endif + break; +#endif + + case FFI_TYPE_POINTER: + cif->flags = CIF_FLAGS_POINTER; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + case FFI_TYPE_SINT16: + cif->flags = CIF_FLAGS_SINT16; + break; + + case FFI_TYPE_SINT8: + cif->flags = CIF_FLAGS_SINT8; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (&ecif, cif->bytes, cif->flags, + ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +static void +ffi_prep_incoming_args_SYSV (char *stack, void **avalue, ffi_cif *cif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; +#ifdef __MINT__ + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 1 || z == 2)) + { + *p_argv = (void *) (argp + 2); + + z = 4; + } + else + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 3 || z == 4)) + { + *p_argv = (void *) (argp); + + z = 4; + } + else +#endif + if (z <= 4) + { + *p_argv = (void *) (argp + 4 - z); + + z = 4; + } + else + { + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } +} + +unsigned int +ffi_closure_SYSV_inner (ffi_closure *closure, void *resp, void *args) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_incoming_args_SYSV(args, arg_area, cif); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + *(unsigned short *)closure->tramp = 0x207c; + *(void **)(closure->tramp + 2) = codeloc; + *(unsigned short *)(closure->tramp + 6) = 0x4ef9; + + if ( +#ifdef __MINT__ + (cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((cif->rtype->type == FFI_TYPE_STRUCT) + && !cif->flags))) + *(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV; + else + *(void **)(closure->tramp + 8) = ffi_closure_SYSV; + +#ifdef __rtems__ + rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE ); +#elif defined(__MINT__) + Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE); +#else + syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE, + FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h new file mode 100644 index 0000000..e81dde2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Motorola 68K. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S new file mode 100644 index 0000000..ea40f11 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m68k/sysv.S @@ -0,0 +1,357 @@ +/* ----------------------------------------------------------------------- + + sysv.S - Copyright (c) 2012 Alan Hourihane + Copyright (c) 1998, 2012 Andreas Schwab + Copyright (c) 2008 Red Hat, Inc. + Copyright (c) 2012, 2016 Thorsten Glaser + + m68k Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef HAVE_AS_CFI_PSEUDO_OP +#define CFI_STARTPROC() .cfi_startproc +#define CFI_OFFSET(reg,off) .cfi_offset reg,off +#define CFI_DEF_CFA(reg,off) .cfi_def_cfa reg,off +#define CFI_ENDPROC() .cfi_endproc +#else +#define CFI_STARTPROC() +#define CFI_OFFSET(reg,off) +#define CFI_DEF_CFA(reg,off) +#define CFI_ENDPROC() +#endif + +#ifdef __MINT__ +#define CALLFUNC(funcname) _ ## funcname +#else +#define CALLFUNC(funcname) funcname +#endif + + .text + + .globl CALLFUNC(ffi_call_SYSV) + .type CALLFUNC(ffi_call_SYSV),@function + .align 4 + +CALLFUNC(ffi_call_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %d2,-(%sp) + CFI_OFFSET(2,-12) + + | Make room for all of the new args. + sub.l 12(%fp),%sp + + | Call ffi_prep_args + move.l 8(%fp),-(%sp) + pea 4(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_prep_args) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_prep_args@PLTPC) +#endif + addq.l #8,%sp + + | Pass pointer to struct value, if any +#ifdef __MINT__ + move.l %d0,%a1 +#else + move.l %a0,%a1 +#endif + + | Call the function + move.l 24(%fp),%a0 + jsr (%a0) + + | Remove the space we pushed for the args + add.l 12(%fp),%sp + + | Load the pointer to storage for the return value + move.l 20(%fp),%a1 + + | Load the return type code + move.l 16(%fp),%d2 + + | If the return value pointer is NULL, assume no return value. + | NOTE: On the mc68000, tst on an address register is not supported. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + cmp.w #0, %a1 +#else + tst.l %a1 +#endif + jbeq noretval + + btst #0,%d2 + jbeq retlongint + move.l %d0,(%a1) + jbra epilogue + +retlongint: + btst #1,%d2 + jbeq retfloat + move.l %d0,(%a1) + move.l %d1,4(%a1) + jbra epilogue + +retfloat: + btst #2,%d2 + jbeq retdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s %fp0,(%a1) +#else + move.l %d0,(%a1) +#endif + jbra epilogue + +retdouble: + btst #3,%d2 + jbeq retlongdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1) +#endif + jbra epilogue + +retlongdouble: + btst #4,%d2 + jbeq retpointer +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1)+ + move.l %d2,(%a1) +#endif + jbra epilogue + +retpointer: + btst #5,%d2 + jbeq retstruct1 +#ifdef __MINT__ + move.l %d0,(%a1) +#else + move.l %a0,(%a1) +#endif + jbra epilogue + +retstruct1: + btst #6,%d2 + jbeq retstruct2 + move.b %d0,(%a1) + jbra epilogue + +retstruct2: + btst #7,%d2 + jbeq retsint8 + move.w %d0,(%a1) + jbra epilogue + +retsint8: + btst #8,%d2 + jbeq retsint16 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + move.l %d0,(%a1) + jbra epilogue + +retsint16: + btst #9,%d2 + jbeq noretval + ext.l %d0 + move.l %d0,(%a1) + +noretval: +epilogue: + move.l (%sp)+,%d2 + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_call_SYSV),.-CALLFUNC(ffi_call_SYSV) + + .globl CALLFUNC(ffi_closure_SYSV) + .type CALLFUNC(ffi_closure_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_SYSV): + CFI_STARTPROC() + link %fp,#-12 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + pea -12(%fp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + + lsr.l #1,%d0 + jne 1f + jcc .Lcls_epilogue + | CIF_FLAGS_INT + move.l -12(%fp),%d0 +.Lcls_epilogue: + | no CIF_FLAGS_* + unlk %fp + rts +1: + lea -12(%fp),%a0 + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_float + | CIF_FLAGS_DINT + move.l (%a0)+,%d0 + move.l (%a0),%d1 + jra .Lcls_epilogue +.Lcls_ret_float: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s (%a0),%fp0 +#else + move.l (%a0),%d0 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_ldouble + | CIF_FLAGS_DOUBLE +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0),%d1 +#endif + jra .Lcls_epilogue +.Lcls_ret_ldouble: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0)+,%d1 + move.l (%a0),%d2 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_struct1 + | CIF_FLAGS_POINTER + move.l (%a0),%a0 + move.l %a0,%d0 + jra .Lcls_epilogue +.Lcls_ret_struct1: + move.b (%a0),%d0 + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_sint8 + | CIF_FLAGS_STRUCT2 + move.w (%a0),%d0 + jra .Lcls_epilogue +.Lcls_ret_sint8: + move.l (%a0),%d0 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + jra .Lcls_epilogue +1: + | CIF_FLAGS_SINT16 + move.l (%a0),%d0 + ext.l %d0 + jra .Lcls_epilogue + CFI_ENDPROC() + + .size CALLFUNC(ffi_closure_SYSV),.-CALLFUNC(ffi_closure_SYSV) + + .globl CALLFUNC(ffi_closure_struct_SYSV) + .type CALLFUNC(ffi_closure_struct_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_struct_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + move.l %a1,-(%sp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_closure_struct_SYSV),.-CALLFUNC(ffi_closure_struct_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c new file mode 100644 index 0000000..57b344f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffi.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + * + * Only OpenBSD/m88k is currently supported; other platforms (such as + * Motorola's SysV/m88k) could be supported with the following tweaks: + * + * - non-OpenBSD systems use an `outgoing parameter area' as part of the + * 88BCS calling convention, which is not supported under OpenBSD from + * release 3.6 onwards. Supporting it should be as easy as taking it + * into account when adjusting the stack, in the assembly code. + * + * - the logic deciding whether a function argument gets passed through + * registers, or on the stack, has changed several times in OpenBSD in + * edge cases (especially for structs larger than 32 bytes being passed + * by value). The code below attemps to match the logic used by the + * system compiler of OpenBSD 5.3, i.e. gcc 3.3.6 with many m88k backend + * fixes. + */ + +#include +#include + +#include +#include + +void ffi_call_OBSD (unsigned int, extended_cif *, unsigned int, void *, + void (*fn) ()); +void *ffi_prep_args (void *, extended_cif *); +void ffi_closure_OBSD (ffi_closure *); +void ffi_closure_struct_OBSD (ffi_closure *); +unsigned int ffi_closure_OBSD_inner (ffi_closure *, void *, unsigned int *, + char *); +void ffi_cacheflush_OBSD (unsigned int, unsigned int); + +#define CIF_FLAGS_INT (1 << 0) +#define CIF_FLAGS_DINT (1 << 1) + +/* + * Foreign Function Interface API + */ + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp, *stackp; + unsigned int *regp; + unsigned int regused; + ffi_type **p_arg; + void *struct_value_ptr; + + regp = (unsigned int *)stack; + stackp = (char *)(regp + 8); + regused = 0; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument can be passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + switch (t) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *(unsigned int *) argp = *(unsigned int *) *p_argv; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + + /* Align if necessary. */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are filled, and about to continue + on stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } + + return struct_value_ptr; +} + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size == sizeof (int) && + cif->rtype->alignment == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = 0; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && (cif->rtype->size != sizeof (int) + || cif->rtype->alignment != sizeof (int))) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_OBSD: + ffi_call_OBSD (cif->bytes, &ecif, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +static void +ffi_prep_closure_args_OBSD (ffi_cif *cif, void **avalue, unsigned int *regp, + char *stackp) +{ + unsigned int i; + void **p_argv; + char *argp; + unsigned int regused; + ffi_type **p_arg; + + regused = 0; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument has been passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + if (z < sizeof (int) && t != FFI_TYPE_STRUCT) + *p_argv = (void *) (argp + sizeof (int) - z); + else + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are exhausted, and about to fetch from + stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } +} + +unsigned int +ffi_closure_OBSD_inner (ffi_closure *closure, void *resp, unsigned int *regp, + char *stackp) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_args_OBSD(cif, arg_area, regp, stackp); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, void *codeloc) +{ + unsigned int *tramp = (unsigned int *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_OBSD); + + if (cif->rtype->type == FFI_TYPE_STRUCT && !cif->flags) + fn = &ffi_closure_struct_OBSD; + else + fn = &ffi_closure_OBSD; + + /* or.u %r10, %r0, %hi16(fn) */ + tramp[0] = 0x5d400000 | (((unsigned int)fn) >> 16); + /* or.u %r13, %r0, %hi16(closure) */ + tramp[1] = 0x5da00000 | ((unsigned int)closure >> 16); + /* or %r10, %r10, %lo16(fn) */ + tramp[2] = 0x594a0000 | (((unsigned int)fn) & 0xffff); + /* jmp.n %r10 */ + tramp[3] = 0xf400c40a; + /* or %r13, %r13, %lo16(closure) */ + tramp[4] = 0x59ad0000 | ((unsigned int)closure & 0xffff); + + ffi_cacheflush_OBSD((unsigned int)codeloc, FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h new file mode 100644 index 0000000..e52bf9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OBSD, + FFI_DEFAULT_ABI = FFI_OBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 0x14 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S new file mode 100644 index 0000000..1944a23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/m88k/obsd.S @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * ffi_cacheflush_OBSD(unsigned int addr, %r2 + * unsigned int size); %r3 + */ + .align 4 + .globl ffi_cacheflush_OBSD + .type ffi_cacheflush_OBSD,@function +ffi_cacheflush_OBSD: + tb0 0, %r0, 451 + or %r0, %r0, %r0 + jmp %r1 + .size ffi_cacheflush_OBSD, . - ffi_cacheflush_OBSD + +/* + * ffi_call_OBSD(unsigned bytes, %r2 + * extended_cif *ecif, %r3 + * unsigned flags, %r4 + * void *rvalue, %r5 + * void (*fn)()); %r6 + */ + .align 4 + .globl ffi_call_OBSD + .type ffi_call_OBSD,@function +ffi_call_OBSD: + subu %r31, %r31, 32 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 32 + + | Save the few arguments we'll need after ffi_prep_args() + st.d %r4, %r31, 8 + st %r6, %r31, 16 + + | Allocate room for the image of r2-r9, and the stack space for + | the args (rounded to a 16-byte boundary) + addu %r2, %r2, (8 * 4) + 15 + clr %r2, %r2, 4<0> + subu %r31, %r31, %r2 + + | Fill register and stack image + or %r2, %r31, %r0 +#ifdef PIC + bsr ffi_prep_args#plt +#else + bsr ffi_prep_args +#endif + + | Save pointer to return struct address, if any + or %r12, %r2, %r0 + + | Get function pointer + subu %r4, %r30, 32 + ld %r1, %r4, 16 + + | Fetch the register arguments + ld.d %r2, %r31, (0 * 4) + ld.d %r4, %r31, (2 * 4) + ld.d %r6, %r31, (4 * 4) + ld.d %r8, %r31, (6 * 4) + addu %r31, %r31, (8 * 4) + + | Invoke the function + jsr %r1 + + | Restore stack now that we don't need the args anymore + subu %r31, %r30, 32 + + | Figure out what to return as the function's return value + ld %r5, %r31, 12 | rvalue + ld %r4, %r31, 8 | flags + + bcnd eq0, %r5, 9f + + bb0 0, %r4, 1f | CIF_FLAGS_INT + st %r2, %r5, 0 + br 9f + +1: + bb0 1, %r4, 1f | CIF_FLAGS_DINT + st.d %r2, %r5, 0 + br 9f + +1: +9: + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 32 + .size ffi_call_OBSD, . - ffi_call_OBSD + +/* + * ffi_closure_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_OBSD + .type ffi_closure_OBSD, @function +ffi_closure_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments and return + | value + subu %r31, %r31, (8 * 4) + (2 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + addu %r3, %r31, (8 * 4) | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + | Figure out what to return as the function's return value + bb0 0, %r2, 1f | CIF_FLAGS_INT + ld %r2, %r31, (8 * 4) + br 9f + +1: + bb0 1, %r2, 1f | CIF_FLAGS_DINT + ld.d %r2, %r31, (8 * 4) + br 9f + +1: +9: + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_OBSD,.-ffi_closure_OBSD + +/* + * ffi_closure_struct_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_struct_OBSD + .type ffi_closure_struct_OBSD, @function +ffi_closure_struct_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments + subu %r31, %r31, (8 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + or %r3, %r12, 0 | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_struct_OBSD,.-ffi_closure_struct_OBSD diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c new file mode 100644 index 0000000..3aecb0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffi.c @@ -0,0 +1,330 @@ +/* ---------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Imagination Technologies + + Meta Foreign Function Interface + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + `Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED `AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. +----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) + +/* + * ffi_prep_args is called by the assembly routine once stack space has been + * allocated for the function's arguments + */ + +unsigned int ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + /* Store return value */ + if ( ecif->cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *(void **) argp = ecif->rvalue; + } + + p_argv = ecif->avalue; + + /* point to next location */ + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; (i != 0); i--, p_arg++, p_argv++) + { + size_t z; + + /* Move argp to address of argument */ + z = (*p_arg)->size; + argp -= z; + + /* Align if necessary */ + argp = (char *) FFI_ALIGN_DOWN(FFI_ALIGN_DOWN(argp, (*p_arg)->alignment), 4); + + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + } + } else if ( z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + } + + /* return the size of the arguments to be passed in registers, + padded to an 8 byte boundary to preserve stack alignment */ + return FFI_ALIGN(MIN(stack - argp, 6*4), 8); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type **ptr; + unsigned i, bytes = 0; + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) { + if ((*ptr)->size == 0) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN(bytes, (*ptr)->alignment); + + bytes += FFI_ALIGN((*ptr)->size, 4); + } + + /* Ensure arg space is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT) { + bytes += sizeof(void*); + + /* Ensure stack is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + } + + cif->bytes = bytes; + + /* Set the return type flag */ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) FFI_TYPE_SINT64; + break; + case FFI_TYPE_STRUCT: + /* Meta can store return values which are <= 64 bits */ + if (cif->rtype->size <= 4) + /* Returned to D0Re0 as 32-bit value */ + cif->flags = (unsigned)FFI_TYPE_INT; + else if ((cif->rtype->size > 4) && (cif->rtype->size <= 8)) + /* Returned valued is stored to D1Re0|R0Re0 */ + cif->flags = (unsigned)FFI_TYPE_DOUBLE; + else + /* value stored in memory */ + cif->flags = (unsigned)FFI_TYPE_STRUCT; + break; + default: + cif->flags = (unsigned)FFI_TYPE_INT; + break; + } + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*fn)(void), extended_cif *, unsigned, unsigned, double *); + +/* + * Exported in API. Entry point + * cif -> ffi_cif object + * fn -> function pointer + * rvalue -> pointer to return value + * avalue -> vector of void * pointers pointing to memory locations holding the + * arguments + */ +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + int small_struct = (((cif->flags == FFI_TYPE_INT) || (cif->flags == FFI_TYPE_DOUBLE)) && (cif->rtype->type == FFI_TYPE_STRUCT)); + ecif.cif = cif; + ecif.avalue = avalue; + + double temp; + + /* + * If the return value is a struct and we don't have a return value address + * then we need to make one + */ + + if ((rvalue == NULL ) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else if (small_struct) + ecif.rvalue = &temp; + else + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(fn, &ecif, cif->bytes, cif->flags, ecif.rvalue); + break; + default: + FFI_ASSERT(0); + break; + } + + if (small_struct) + memcpy (rvalue, &temp, cif->rtype->size); +} + +/* private members */ + +static void ffi_prep_incoming_args_SYSV (char *, void **, void **, + ffi_cif*, float *); + +void ffi_closure_SYSV (ffi_closure *); + +/* Do NOT change that without changing the FFI_TRAMPOLINE_SIZE */ +extern unsigned int ffi_metag_trampoline[10]; /* 10 instructions */ + +/* end of private members */ + +/* + * __tramp: trampoline memory location + * __fun: assembly routine + * __ctx: memory location for wrapper + * + * At this point, tramp[0] == __ctx ! + */ +void ffi_init_trampoline(unsigned char *__tramp, unsigned int __fun, unsigned int __ctx) { + memcpy (__tramp, ffi_metag_trampoline, sizeof(ffi_metag_trampoline)); + *(unsigned int*) &__tramp[40] = __ctx; + *(unsigned int*) &__tramp[44] = __fun; + /* This will flush the instruction cache */ + __builtin_meta2_cachewd(&__tramp[0], 1); + __builtin_meta2_cachewd(&__tramp[47], 1); +} + + + +/* the cif must already be prepared */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + void (*closure_func)(ffi_closure*) = NULL; + + if (cif->abi == FFI_SYSV) + closure_func = &ffi_closure_SYSV; + else + return FFI_BAD_ABI; + + ffi_init_trampoline( + (unsigned char*)&closure->tramp[0], + (unsigned int)closure_func, + (unsigned int)codeloc); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +/* This function is jumped to by the trampoline */ +unsigned int ffi_closure_SYSV_inner (closure, respp, args, vfp_args) + ffi_closure *closure; + void **respp; + void *args; + void *vfp_args; +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void*)); + + /* + * This call will initialize ARG_AREA, such that each + * element in that array points to the corresponding + * value on the stack; and if the function returns + * a structure, it will re-set RESP to point to the + * structure return address. + */ + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args); + + (closure->fun) ( cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif, + float *vfp_stack) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + /* stack points to original arguments */ + argp = stack; + + /* Store return value */ + if ( cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *rvalue = *(void **) argp; + } + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) { + size_t z; + size_t alignment; + + alignment = (*p_arg)->alignment; + if (alignment < 4) + alignment = 4; + if ((alignment - 1) & (unsigned)argp) + argp = (char *) FFI_ALIGN(argp, alignment); + + z = (*p_arg)->size; + *p_argv = (void*) argp; + p_argv++; + argp -= z; + } + return; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h new file mode 100644 index 0000000..7b9dbeb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Imagination Technologies Ltd. + Target configuration macros for Meta + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_DEFAULT_ABI = FFI_SYSV, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1, +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 48 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S new file mode 100644 index 0000000..b4b2a3b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/metag/sysv.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Imagination Technologies Ltd. + + Meta Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#define ENTRY(x) .globl CNAME(x); .type CNAME(x), %function; CNAME(x): +#endif + +#ifdef __ELF__ +#define LSYM(x) .x +#else +#define LSYM(x) x +#endif + +.macro call_reg x= + .text + .balign 4 + mov D1RtP, \x + swap D1RtP, PC +.endm + +! Save register arguments +.macro SAVE_ARGS + .text + .balign 4 + setl [A0StP++], D0Ar6, D1Ar5 + setl [A0StP++], D0Ar4, D1Ar3 + setl [A0StP++], D0Ar2, D1Ar1 +.endm + +! Save retrun, frame pointer and other regs +.macro SAVE_REGS regs= + .text + .balign 4 + setl [A0StP++], D0FrT, D1RtP + ! Needs to be a pair of regs + .ifnc "\regs","" + setl [A0StP++], \regs + .endif +.endm + +! Declare a global function +.macro METAG_FUNC_START name + .text + .balign 4 + ENTRY(\name) +.endm + +! Return registers from the stack. Reverse SAVE_REGS operation +.macro RET_REGS regs=, cond= + .ifnc "\regs", "" + getl \regs, [--A0StP] + .endif + getl D0FrT, D1RtP, [--A0StP] +.endm + +! Return arguments +.macro RET_ARGS + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] +.endm + + + ! D1Ar1: fn + ! D0Ar2: &ecif + ! D1Ar3: cif->bytes + ! D0Ar4: fig->flags + ! D1Ar5: ecif.rvalue + + ! This assumes we are using GNU as +METAG_FUNC_START ffi_call_SYSV + ! Save argument registers + + SAVE_ARGS + + ! new frame + mov D0FrT, A0FrP + add A0FrP, A0StP, #0 + + ! Preserve the old frame pointer + SAVE_REGS "D1.5, D0.5" + + ! Make room for new args. cifs->bytes is the total space for input + ! and return arguments + + add A0StP, A0StP, D1Ar3 + + ! Preserve cifs->bytes & fn + mov D0.5, D1Ar3 + mov D1.5, D1Ar1 + + ! Place all of the ffi_prep_args in position + mov D1Ar1, A0StP + + ! Call ffi_prep_args(stack, &ecif) +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_prep_args@PLT) +#else + callr D1RtP, CNAME(ffi_prep_args) +#endif + + ! Restore fn pointer + + ! The foreign stack should look like this + ! XXXXX XXXXXX <--- stack pointer + ! FnArgN rvalue + ! FnArgN+2 FnArgN+1 + ! FnArgN+4 FnArgN+3 + ! .... + ! + + ! A0StP now points to the first (or return) argument + 4 + + ! Preserve cif->bytes + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] + + ! Place A0StP to the first argument again + add A0StP, A0StP, #24 ! That's because we loaded 6 regs x 4 byte each + + ! A0FrP points to the initial stack without the reserved space for the + ! cifs->bytes, whilst A0StP points to the stack after the space allocation + + ! fn was the first argument of ffi_call_SYSV. + ! The stack at this point looks like this: + ! + ! A0StP(on entry to _SYSV) -> Arg6 Arg5 | low + ! Arg4 Arg3 | + ! Arg2 Arg1 | + ! A0FrP ----> D0FrtP D1RtP | + ! D1.5 D0.5 | + ! A0StP(bf prep_args) -> FnArgn FnArgn-1 | + ! FnArgn-2FnArgn-3 | + ! ................ | <= cifs->bytes + ! FnArg4 FnArg3 | + ! A0StP (prv_A0StP+cifs->bytes) FnArg2 FnArg1 | high + ! + ! fn was in Arg1 so it's located in in A0FrP+#-0xC + ! + + ! D0Re0 contains the size of arguments stored in registers + sub A0StP, A0StP, D0Re0 + + ! Arg1 is the function pointer for the foreign call. This has been + ! preserved in D1.5 + + ! Time to call (fn). Arguments should be like this: + ! Arg1-Arg6 are loaded to regs + ! The rest of the arguments are stored in stack pointed by A0StP + + call_reg D1.5 + + ! Reset stack. + + mov A0StP, A0FrP + + ! Load Arg1 with the pointer to storage for the return type + ! This was stored in Arg5 + + getd D1Ar1, [A0FrP+#-20] + + ! Load D0Ar2 with the return type code. This was stored in Arg4 (flags) + + getd D0Ar2, [A0FrP+#-16] + + ! We are ready to start processing the return value + ! D0Re0 (and D1Re0) hold the return value + + ! If the return value is NULL, assume no return value + cmp D1Ar1, #0 + beq LSYM(Lepilogue) + + ! return INT + cmp D0Ar2, #FFI_TYPE_INT + ! Sadly, there is no setd{cc} instruction so we need to workaround that + bne .INT64 + setd [D1Ar1], D0Re0 + b LSYM(Lepilogue) + + ! return INT64 +.INT64: + cmp D0Ar2, #FFI_TYPE_SINT64 + setleq [D1Ar1], D0Re0, D1Re0 + + ! return DOUBLE + cmp D0Ar2, #FFI_TYPE_DOUBLE + setl [D1AR1++], D0Re0, D1Re0 + +LSYM(Lepilogue): + ! At this point, the stack pointer points right after the argument + ! saved area. We need to restore 4 regs, therefore we need to move + ! 16 bytes ahead. + add A0StP, A0StP, #16 + RET_REGS "D1.5, D0.5" + RET_ARGS + getd D0Re0, [A0StP] + mov A0FrP, D0FrT + swap D1RtP, PC + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + +/* + (called by ffi_metag_trampoline) + void ffi_closure_SYSV (ffi_closure*) + + (called by ffi_closure_SYSV) + unsigned int FFI_HIDDEN + ffi_closure_SYSV_inner (closure,respp, args) + ffi_closure *closure; + void **respp; + void *args; +*/ + +METAG_FUNC_START ffi_closure_SYSV + ! We assume that D1Ar1 holds the address of the + ! ffi_closure struct. We will use that to fetch the + ! arguments. The stack pointer points to an empty space + ! and it is ready to store more data. + + ! D1Ar1 is ready + ! Allocate stack space for return value + add A0StP, A0StP, #8 + ! Store it to D0Ar2 + sub D0Ar2, A0StP, #8 + + sub D1Ar3, A0FrP, #4 + + ! D1Ar3 contains the address of the original D1Ar1 argument + ! We need to subtract #4 later on + + ! Preverve D0Ar2 + mov D0.5, D0Ar2 + +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_closure_SYSV_inner@PLT) +#else + callr D1RtP, CNAME(ffi_closure_SYSV_inner) +#endif + + ! Check the return value and store it to D0.5 + cmp D0Re0, #FFI_TYPE_INT + beq .Lretint + cmp D0Re0, #FFI_TYPE_DOUBLE + beq .Lretdouble +.Lclosure_epilogue: + sub A0StP, A0StP, #8 + RET_REGS "D1.5, D0.5" + RET_ARGS + swap D1RtP, PC + +.Lretint: + setd [D0.5], D0Re0 + b .Lclosure_epilogue +.Lretdouble: + setl [D0.5++], D0Re0, D1Re0 + b .Lclosure_epilogue +.ffi_closure_SYSV_end: +.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + + +ENTRY(ffi_metag_trampoline) + SAVE_ARGS + ! New frame + mov A0FrP, A0StP + SAVE_REGS "D1.5, D0.5" + mov D0.5, PC + ! Load D1Ar1 the value of ffi_metag_trampoline + getd D1Ar1, [D0.5 + #8] + ! Jump to ffi_closure_SYSV + getd PC, [D0.5 + #12] diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c new file mode 100644 index 0000000..df6e33c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffi.c @@ -0,0 +1,321 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +extern void ffi_call_SYSV(void (*)(void*, extended_cif*), extended_cif*, + unsigned int, unsigned int, unsigned int*, void (*fn)(void), + unsigned int, unsigned int); + +extern void ffi_closure_SYSV(void); + +#define WORD_SIZE sizeof(unsigned int) +#define ARGS_REGISTER_SIZE (WORD_SIZE * 6) +#define WORD_FFI_ALIGN(x) FFI_ALIGN(x, WORD_SIZE) + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ +void ffi_prep_args(void* stack, extended_cif* ecif) +{ + unsigned int i; + ffi_type** p_arg; + void** p_argv; + void* stack_args_p = stack; + + if (ecif == NULL || ecif->cif == NULL) { + return; /* no description to prepare */ + } + + p_argv = ecif->avalue; + + if ((ecif->cif->rtype != NULL) && + (ecif->cif->rtype->type == FFI_TYPE_STRUCT)) + { + /* if return type is a struct which is referenced on the stack/reg5, + * by a pointer. Stored the return value pointer in r5. + */ + char* addr = stack_args_p; + memcpy(addr, &(ecif->rvalue), WORD_SIZE); + stack_args_p += WORD_SIZE; + } + + if (ecif->avalue == NULL) { + return; /* no arguments to prepare */ + } + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t size = (*p_arg)->size; + int type = (*p_arg)->type; + void* value = p_argv[i]; + char* addr = stack_args_p; + int aligned_size = WORD_FFI_ALIGN(size); + + /* force word alignment on the stack */ + stack_args_p += aligned_size; + + switch (type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8*)(value); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8*)(value); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16*)(value); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16*)(value); + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * MicroBlaze toolchain appears to emit: + * bsrli r5, r5, 8 (caller) + * ... + * + * ... + * bslli r5, r5, 8 (callee) + * + * For structs like "struct a { uint8_t a[3]; };", when passed + * by value. + * + * Structs like "struct b { uint16_t a; };" are also expected + * to be packed strangely in registers. + * + * This appears to be because the microblaze toolchain expects + * "struct b == uint16_t", which is only any issue for big + * endian. + * + * The following is a work around for big-endian only, for the + * above mentioned case, it will re-align the contents of a + * <= 3-byte struct value. + */ + if (size < WORD_SIZE) + { + memcpy (addr + (WORD_SIZE - size), value, size); + break; + } +#endif + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + default: + memcpy(addr, value, aligned_size); + } + } +} + +ffi_status ffi_prep_cif_machdep(ffi_cif* cif) +{ + /* check ABI */ + switch (cif->abi) + { + case FFI_SYSV: + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} + +void ffi_call(ffi_cif* cif, void (*fn)(void), void* rvalue, void** avalue) +{ + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ecif.rvalue = alloca(cif->rtype->size); + } else { + ecif.rvalue = rvalue; + } + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, + ecif.rvalue, fn, cif->rtype->type, cif->rtype->size); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_call_SYSV(void* register_args, void* stack_args, + ffi_closure* closure, void* rvalue, + unsigned int* rtype, unsigned int* rsize) +{ + /* prepare arguments for closure call */ + ffi_cif* cif = closure->cif; + ffi_type** arg_types = cif->arg_types; + + /* re-allocate data for the args. This needs to be done in order to keep + * multi-word objects (e.g. structs) in contiguous memory. Callers are not + * required to store the value of args in the lower 6 words in the stack + * (although they are allocated in the stack). + */ + char* stackclone = alloca(cif->bytes); + void** avalue = alloca(cif->nargs * sizeof(void*)); + void* struct_rvalue = NULL; + char* ptr = stackclone; + int i; + + /* copy registers into stack clone */ + int registers_used = cif->bytes; + if (registers_used > ARGS_REGISTER_SIZE) { + registers_used = ARGS_REGISTER_SIZE; + } + memcpy(stackclone, register_args, registers_used); + + /* copy stack allocated args into stack clone */ + if (cif->bytes > ARGS_REGISTER_SIZE) { + int stack_used = cif->bytes - ARGS_REGISTER_SIZE; + memcpy(stackclone + ARGS_REGISTER_SIZE, stack_args, stack_used); + } + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + struct_rvalue = *((void**)ptr); + ptr += WORD_SIZE; + } + + /* populate arg pointer list */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 3; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 2; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * Work around strange ABI behaviour. + * (see info in ffi_prep_args) + */ + if (arg_types[i]->size < WORD_SIZE) + { + memcpy (ptr, ptr + (WORD_SIZE - arg_types[i]->size), arg_types[i]->size); + } +#endif + avalue[i] = (void*)ptr; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + avalue[i] = ptr; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + default: + /* default 4-byte argument */ + avalue[i] = ptr; + break; + } + ptr += WORD_FFI_ALIGN(arg_types[i]->size); + } + + /* set the return type info passed back to the wrapper */ + *rsize = cif->rtype->size; + *rtype = cif->rtype->type; + if (struct_rvalue != NULL) { + closure->fun(cif, struct_rvalue, avalue, closure->user_data); + /* copy struct return pointer value into function return value */ + *((void**)rvalue) = struct_rvalue; + } else { + closure->fun(cif, rvalue, avalue, closure->user_data); + } +} + +ffi_status ffi_prep_closure_loc( + ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void* user_data, void* codeloc) +{ + unsigned long* tramp = (unsigned long*)&(closure->tramp[0]); + unsigned long cls = (unsigned long)codeloc; + unsigned long fn = 0; + unsigned long fn_closure_call_sysv = (unsigned long)ffi_closure_call_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + switch (cif->abi) + { + case FFI_SYSV: + fn = (unsigned long)ffi_closure_SYSV; + + /* load r11 (temp) with fn */ + /* imm fn(upper) */ + tramp[0] = 0xb0000000 | ((fn >> 16) & 0xffff); + /* addik r11, r0, fn(lower) */ + tramp[1] = 0x31600000 | (fn & 0xffff); + + /* load r12 (temp) with cls */ + /* imm cls(upper) */ + tramp[2] = 0xb0000000 | ((cls >> 16) & 0xffff); + /* addik r12, r0, cls(lower) */ + tramp[3] = 0x31800000 | (cls & 0xffff); + + /* load r3 (temp) with ffi_closure_call_SYSV */ + /* imm fn_closure_call_sysv(upper) */ + tramp[4] = 0xb0000000 | ((fn_closure_call_sysv >> 16) & 0xffff); + /* addik r3, r0, fn_closure_call_sysv(lower) */ + tramp[5] = 0x30600000 | (fn_closure_call_sysv & 0xffff); + /* branch/jump to address stored in r11 (fn) */ + tramp[6] = 0x98085800; /* bra r11 */ + + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h new file mode 100644 index 0000000..c6fa5a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012, 2013 Xilinx, Inc + + Target configuration macros for MicroBlaze. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#define FFI_TRAMPOLINE_SIZE (4*8) + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S new file mode 100644 index 0000000..ea43e9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/microblaze/sysv.S @@ -0,0 +1,302 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* + * arg[0] (r5) = ffi_prep_args, + * arg[1] (r6) = &ecif, + * arg[2] (r7) = cif->bytes, + * arg[3] (r8) = cif->flags, + * arg[4] (r9) = ecif.rvalue, + * arg[5] (r10) = fn + * arg[6] (sp[0]) = cif->rtype->type + * arg[7] (sp[4]) = cif->rtype->size + */ + .text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + /* push callee saves */ + addik r1, r1, -20 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + swi r22, r1, 12 /* save for locals */ + swi r23, r1, 16 /* save for locals */ + + /* save the r5-r10 registers in the stack */ + addik r1, r1, -24 /* increment sp to store 6x 32-bit words */ + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* save function pointer */ + addik r3, r5, 0 /* copy ffi_prep_args into r3 */ + addik r22, r1, 0 /* save sp for unallocated args into r22 (callee-saved) */ + addik r23, r10, 0 /* save function address into r23 (callee-saved) */ + + /* prepare stack with allocation for n (bytes = r7) args */ + rsub r1, r7, r1 /* subtract bytes from sp */ + + /* prep args for ffi_prep_args call */ + addik r5, r1, 0 /* store stack pointer into arg[0] */ + /* r6 still holds ecif for arg[1] */ + + /* Call ffi_prep_args(stack, &ecif). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + /* returns calling stack pointer location */ + + /* prepare args for fn call, prep_args populates them onto the stack */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + + /* call (fn) (...). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r23 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + + /* Remove the space we pushed for the args. */ + addik r1, r22, 0 /* restore old SP */ + + /* restore this functions parameters */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + addik r1, r1, 24 /* decrement sp to de-allocate 6x 32-bit words */ + + /* If the return value pointer is NULL, assume no return value. */ + beqi r9, ffi_call_SYSV_end + + lwi r22, r1, 48 /* get return type (20 for locals + 28 for arg[6]) */ + lwi r23, r1, 52 /* get return size (20 for locals + 32 for arg[7]) */ + + /* Check if return type is actually a struct, do nothing */ + rsubi r11, r22, FFI_TYPE_STRUCT + beqi r11, ffi_call_SYSV_end + + /* Return 8bit */ + rsubi r11, r23, 1 + beqi r11, ffi_call_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r23, 2 + beqi r11, ffi_call_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r23, 4 + beqi r11, ffi_call_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r23, 8 + beqi r11, ffi_call_SYSV_store64 + + /* Didn't match anything */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store64: + swi r3, r9, 0 /* store word r3 into return value */ + swi r4, r9, 4 /* store word r4 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store32: + swi r3, r9, 0 /* store word r3 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store16: +#ifdef __BIG_ENDIAN__ + shi r3, r9, 2 /* store half-word r3 into return value */ +#else + shi r3, r9, 0 /* store half-word r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_store8: +#ifdef __BIG_ENDIAN__ + sbi r3, r9, 3 /* store byte r3 into return value */ +#else + sbi r3, r9, 0 /* store byte r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_end: + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + lwi r22, r1, 12 + lwi r23, r1, 16 + addik r1, r1, 20 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_call_SYSV, . - ffi_call_SYSV + +/* ------------------------------------------------------------------------- */ + + /* + * args passed into this function, are passed down to the callee. + * this function is the target of the closure trampoline, as such r12 is + * a pointer to the closure object. + */ + .text + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + /* push callee saves */ + addik r11, r1, 28 /* save stack args start location (excluding regs/link) */ + addik r1, r1, -12 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + + /* store register args on stack */ + addik r1, r1, -24 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* setup args */ + addik r5, r1, 0 /* register_args */ + addik r6, r11, 0 /* stack_args */ + addik r7, r12, 0 /* closure object */ + addik r1, r1, -8 /* allocate return value */ + addik r8, r1, 0 /* void* rvalue */ + addik r1, r1, -8 /* allocate for return type/size values */ + addik r9, r1, 0 /* void* rtype */ + addik r10, r1, 4 /* void* rsize */ + + /* call the wrap_call function */ + addik r1, r1, -28 /* allocate args + link reg */ + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 28 /* restore the link register from the frame */ + +ffi_closure_SYSV_prepare_return: + lwi r9, r1, 0 /* rtype */ + lwi r10, r1, 4 /* rsize */ + addik r1, r1, 8 /* de-allocate return info values */ + + /* Check if return type is actually a struct, store 4 bytes */ + rsubi r11, r9, FFI_TYPE_STRUCT + beqi r11, ffi_closure_SYSV_store32 + + /* Return 8bit */ + rsubi r11, r10, 1 + beqi r11, ffi_closure_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r10, 2 + beqi r11, ffi_closure_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r10, 4 + beqi r11, ffi_closure_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r10, 8 + beqi r11, ffi_closure_SYSV_store64 + + /* Didn't match anything */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store64: + lwi r3, r1, 0 /* store word r3 into return value */ + lwi r4, r1, 4 /* store word r4 into return value */ + /* 64 bits == 2 words, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store32: + lwi r3, r1, 0 /* store word r3 into return value */ + /* 32 bits == 1 word, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store16: +#ifdef __BIG_ENDIAN__ + lhui r3, r1, 2 /* store half-word r3 into return value */ +#else + lhui r3, r1, 0 /* store half-word r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT16 + bnei r11, ffi_closure_SYSV_end + sext16 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store8: +#ifdef __BIG_ENDIAN__ + lbui r3, r1, 3 /* store byte r3 into return value */ +#else + lbui r3, r1, 0 /* store byte r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT8 + bnei r11, ffi_closure_SYSV_end + sext8 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_end: + addik r1, r1, 8 /* de-allocate return value */ + + /* de-allocate stored args */ + addik r1, r1, 24 + + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + addik r1, r1, 12 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_closure_SYSV, . - ffi_closure_SYSV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c new file mode 100644 index 0000000..057b046 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffi.c @@ -0,0 +1,1130 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2008 David Daney + Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#ifdef __GNUC__ +# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) +# define USE__BUILTIN___CLEAR_CACHE 1 +# endif +#endif + +#ifndef USE__BUILTIN___CLEAR_CACHE +# if defined(__OpenBSD__) +# include +# else +# include +# endif +#endif + +#ifdef FFI_DEBUG +# define FFI_MIPS_STOP_HERE() ffi_stop_here() +#else +# define FFI_MIPS_STOP_HERE() do {} while(0) +#endif + +#ifdef FFI_MIPS_N32 +#define FIX_ARGP \ +FFI_ASSERT(argp <= &stack[bytes]); \ +if (argp == &stack[bytes]) \ +{ \ + argp = stack; \ + FFI_MIPS_STOP_HERE(); \ +} +#else +#define FIX_ARGP +#endif + + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +static void ffi_prep_args(char *stack, + extended_cif *ecif, + int bytes, + int flags) +{ + int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + +#ifdef FFI_MIPS_N32 + /* If more than 8 double words are used, the remainder go + on the stack. We reorder stuff on the stack here to + support this easily. */ + if (bytes > 8 * sizeof(ffi_arg)) + argp = &stack[bytes - (8 * sizeof(ffi_arg))]; + else + argp = stack; +#else + argp = stack; +#endif + + memset(stack, 0, bytes); + +#ifdef FFI_MIPS_N32 + if ( ecif->cif->rstruct_flag != 0 ) +#else + if ( ecif->cif->rtype->type == FFI_TYPE_STRUCT ) +#endif + { + *(ffi_arg *) argp = (ffi_arg) ecif->rvalue; + argp += sizeof(ffi_arg); + FIX_ARGP; + } + + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; i++, p_arg++) + { + size_t z; + unsigned int a; + + /* Align if necessary. */ + a = (*p_arg)->alignment; + if (a < sizeof(ffi_arg)) + a = sizeof(ffi_arg); + + if ((a - 1) & (unsigned long) argp) + { + argp = (char *) FFI_ALIGN(argp, a); + FIX_ARGP; + } + + z = (*p_arg)->size; + if (z <= sizeof(ffi_arg)) + { + int type = (*p_arg)->type; + z = sizeof(ffi_arg); + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (ecif->cif->abi == FFI_N64 + || ecif->cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (i < 8 && (ecif->cif->abi == FFI_N32_SOFT_FLOAT + || ecif->cif->abi == FFI_N64_SOFT_FLOAT)) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_SINT8: + *(ffi_arg *)argp = *(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(ffi_arg *)argp = *(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(ffi_arg *)argp = *(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(ffi_arg *)argp = *(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_SINT32: + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); + break; + + case FFI_TYPE_UINT32: +#ifdef FFI_MIPS_N32 + /* The N32 ABI requires that 32-bit integers + be sign-extended to 64-bits, regardless of + whether they are signed or unsigned. */ + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); +#else + *(ffi_arg *)argp = *(UINT32 *)(* p_argv); +#endif + break; + + /* This can only happen with 64bit slots. */ + case FFI_TYPE_FLOAT: + *(float *) argp = *(float *)(* p_argv); + break; + + /* Handle structures. */ + default: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + } + } + else + { +#ifdef FFI_MIPS_O32 + memcpy(argp, *p_argv, z); +#else + { + unsigned long end = (unsigned long) argp + z; + unsigned long cap = (unsigned long) stack + bytes; + + /* Check if the data will fit within the register space. + Handle it if it doesn't. */ + + if (end <= cap) + memcpy(argp, *p_argv, z); + else + { + unsigned long portion = cap - (unsigned long)argp; + + memcpy(argp, *p_argv, portion); + argp = stack; + z -= portion; + memcpy(argp, (void*)((unsigned long)(*p_argv) + portion), + z); + } + } +#endif + } + p_argv++; + argp += z; + FIX_ARGP; + } +} + +#ifdef FFI_MIPS_N32 + +/* The n32 spec says that if "a chunk consists solely of a double + float field (but not a double, which is part of a union), it + is passed in a floating point register. Any other chunk is + passed in an integer register". This code traverses structure + definitions and generates the appropriate flags. */ + +static unsigned +calc_n32_struct_flags(int soft_float, ffi_type *arg, + unsigned *loc, unsigned *arg_reg) +{ + unsigned flags = 0; + unsigned index = 0; + + ffi_type *e; + + if (soft_float) + return 0; + + while ((e = arg->elements[index])) + { + /* Align this object. */ + *loc = FFI_ALIGN(*loc, e->alignment); + if (e->type == FFI_TYPE_DOUBLE) + { + /* Already aligned to FFI_SIZEOF_ARG. */ + *arg_reg = *loc / FFI_SIZEOF_ARG; + if (*arg_reg > 7) + break; + flags += (FFI_TYPE_DOUBLE << (*arg_reg * FFI_FLAG_BITS)); + *loc += e->size; + } + else + *loc += e->size; + index++; + } + /* Next Argument register at alignment of FFI_SIZEOF_ARG. */ + *arg_reg = FFI_ALIGN(*loc, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + + return flags; +} + +static unsigned +calc_n32_return_struct_flags(int soft_float, ffi_type *arg) +{ + unsigned flags = 0; + unsigned small = FFI_TYPE_SMALLSTRUCT; + ffi_type *e; + + /* Returning structures under n32 is a tricky thing. + A struct with only one or two floating point fields + is returned in $f0 (and $f2 if necessary). Any other + struct results at most 128 bits are returned in $2 + (the first 64 bits) and $3 (remainder, if necessary). + Larger structs are handled normally. */ + + if (arg->size > 16) + return 0; + + if (arg->size > 8) + small = FFI_TYPE_SMALLSTRUCT2; + + e = arg->elements[0]; + + if (e->type == FFI_TYPE_DOUBLE) + flags = FFI_TYPE_DOUBLE; + else if (e->type == FFI_TYPE_FLOAT) + flags = FFI_TYPE_FLOAT; + + if (flags && (e = arg->elements[1])) + { + if (e->type == FFI_TYPE_DOUBLE) + flags += FFI_TYPE_DOUBLE << FFI_FLAG_BITS; + else if (e->type == FFI_TYPE_FLOAT) + flags += FFI_TYPE_FLOAT << FFI_FLAG_BITS; + else + return small; + + if (flags && (arg->elements[2])) + { + /* There are three arguments and the first two are + floats! This must be passed the old way. */ + return small; + } + if (soft_float) + flags += FFI_TYPE_STRUCT_SOFT; + } + else + if (!flags) + return small; + + return flags; +} + +#endif + +/* Perform machine dependent cif processing */ +static ffi_status ffi_prep_cif_machdep_int(ffi_cif *cif, unsigned nfixedargs) +{ + cif->flags = 0; + cif->mips_nfixedargs = nfixedargs; + +#ifdef FFI_MIPS_O32 + /* Set the flags necessary for O32 processing. FFI_O32_SOFT_FLOAT + * does not have special handling for floating point args. + */ + + if (cif->rtype->type != FFI_TYPE_STRUCT && cif->abi == FFI_O32) + { + if (cif->nargs > 0 && cif->nargs == nfixedargs) + { + switch ((cif->arg_types)[0]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[0]->type; + break; + + default: + break; + } + + if (cif->nargs > 1) + { + /* Only handle the second argument if the first + is a float or double. */ + if (cif->flags) + { + switch ((cif->arg_types)[1]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[1]->type << FFI_FLAG_BITS; + break; + + default: + break; + } + } + } + } + } + + /* Set the return type flag */ + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } + else + { + /* FFI_O32 */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } +#endif + +#ifdef FFI_MIPS_N32 + /* Set the flags necessary for N32 processing */ + { + int type; + unsigned arg_reg = 0; + unsigned loc = 0; + unsigned count = (cif->nargs < 8) ? cif->nargs : 8; + unsigned index = 0; + + unsigned struct_flags = 0; + int soft_float = (cif->abi == FFI_N32_SOFT_FLOAT + || cif->abi == FFI_N64_SOFT_FLOAT); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + struct_flags = calc_n32_return_struct_flags(soft_float, cif->rtype); + + if (struct_flags == 0) + { + /* This means that the structure is being passed as + a hidden argument */ + + arg_reg = 1; + count = (cif->nargs < 7) ? cif->nargs : 7; + + cif->rstruct_flag = !0; + } + else + cif->rstruct_flag = 0; + } + else + cif->rstruct_flag = 0; + + while (count-- > 0 && arg_reg < 8) + { + type = (cif->arg_types)[index]->type; + + // Pass variadic arguments in integer registers even if they're floats + if (soft_float || index >= nfixedargs) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += + ((cif->arg_types)[index]->type << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + break; + case FFI_TYPE_LONGDOUBLE: + /* Align it. */ + arg_reg = FFI_ALIGN(arg_reg, 2); + /* Treat it as two adjacent doubles. */ + if (soft_float || index >= nfixedargs) + { + arg_reg += 2; + } + else + { + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + } + break; + + case FFI_TYPE_STRUCT: + loc = arg_reg * FFI_SIZEOF_ARG; + cif->flags += calc_n32_struct_flags(soft_float || index >= nfixedargs, + (cif->arg_types)[index], + &loc, &arg_reg); + break; + + default: + arg_reg++; + break; + } + + index++; + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + { + if (struct_flags == 0) + { + /* The structure is returned through a hidden + first argument. Do nothing, 'cause FFI_TYPE_VOID + is 0 */ + } + else + { + /* The structure is returned via some tricky + mechanism */ + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += struct_flags << (4 + (FFI_FLAG_BITS * 8)); + } + break; + } + + case FFI_TYPE_VOID: + /* Do nothing, 'cause FFI_TYPE_VOID is 0 */ + break; + + case FFI_TYPE_POINTER: + if (cif->abi == FFI_N32_SOFT_FLOAT || cif->abi == FFI_N32) + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + else + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_FLOAT: + if (soft_float) + { + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + break; + } + /* else fall through */ + case FFI_TYPE_DOUBLE: + if (soft_float) + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + else + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_LONGDOUBLE: + /* Long double is returned as if it were a struct containing + two doubles. */ + if (soft_float) + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += FFI_TYPE_SMALLSTRUCT2 << (4 + (FFI_FLAG_BITS * 8)); + } + else + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += (FFI_TYPE_DOUBLE + + (FFI_TYPE_DOUBLE << FFI_FLAG_BITS)) + << (4 + (FFI_FLAG_BITS * 8)); + } + break; + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + } + } +#endif + + return FFI_OK; +} + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + return ffi_prep_cif_machdep_int(cif, cif->nargs); +} + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned nfixedargs, + unsigned ntotalargs MAYBE_UNUSED) +{ + return ffi_prep_cif_machdep_int(cif, nfixedargs); +} + +/* Low level routine for calling O32 functions */ +extern int ffi_call_O32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, unsigned *, void (*)(void), void *closure); + +/* Low level routine for calling N32 functions */ +extern int ffi_call_N32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, void *, void (*)(void), void *closure); + +void ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { +#ifdef FFI_MIPS_O32 + case FFI_O32: + case FFI_O32_SOFT_FLOAT: + ffi_call_O32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn, closure); + break; +#endif + +#ifdef FFI_MIPS_N32 + case FFI_N32: + case FFI_N32_SOFT_FLOAT: + case FFI_N64: + case FFI_N64_SOFT_FLOAT: + { + int copy_rvalue = 0; + int copy_offset = 0; + char *rvalue_copy = ecif.rvalue; + if (cif->rtype->type == FFI_TYPE_STRUCT && cif->rtype->size < 16) + { + /* For structures smaller than 16 bytes we clobber memory + in 8 byte increments. Make a copy so we don't clobber + the callers memory outside of the struct bounds. */ + rvalue_copy = alloca(16); + copy_rvalue = 1; + } + else if (cif->rtype->type == FFI_TYPE_FLOAT + && (cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT)) + { + rvalue_copy = alloca (8); + copy_rvalue = 1; +#if defined(__MIPSEB__) || defined(_MIPSEB) + copy_offset = 4; +#endif + } + ffi_call_N32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, rvalue_copy, fn, closure); + if (copy_rvalue) + memcpy(ecif.rvalue, rvalue_copy + copy_offset, cif->rtype->size); + } + break; +#endif + + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +#if FFI_CLOSURES +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + void * fn; + char *clear_location = (char *) codeloc; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_closure_N32; +#endif /* FFI_MIPS_O32 */ + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned)fn >> 16); + /* ori $25,low(fn) */ + tramp[1] = 0x37390000 | ((unsigned)fn & 0xffff); + /* lui $12,high(codeloc) */ + tramp[2] = 0x3c0c0000 | ((unsigned)codeloc >> 16); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[3] = 0x03200008; +#else + tramp[3] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[4] = 0x358c0000 | ((unsigned)codeloc & 0xffff); +#else + /* N64 has a somewhat larger trampoline. */ + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned long)fn >> 48); + /* lui $12,high(codeloc) */ + tramp[1] = 0x3c0c0000 | ((unsigned long)codeloc >> 48); + /* ori $25,mid-high(fn) */ + tramp[2] = 0x37390000 | (((unsigned long)fn >> 32 ) & 0xffff); + /* ori $12,mid-high(codeloc) */ + tramp[3] = 0x358c0000 | (((unsigned long)codeloc >> 32) & 0xffff); + /* dsll $25,$25,16 */ + tramp[4] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[5] = 0x000c6438; + /* ori $25,mid-low(fn) */ + tramp[6] = 0x37390000 | (((unsigned long)fn >> 16 ) & 0xffff); + /* ori $12,mid-low(codeloc) */ + tramp[7] = 0x358c0000 | (((unsigned long)codeloc >> 16) & 0xffff); + /* dsll $25,$25,16 */ + tramp[8] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[9] = 0x000c6438; + /* ori $25,low(fn) */ + tramp[10] = 0x37390000 | ((unsigned long)fn & 0xffff); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[11] = 0x03200008; +#else + tramp[11] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[12] = 0x358c0000 | ((unsigned long)codeloc & 0xffff); + +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#ifdef USE__BUILTIN___CLEAR_CACHE + __builtin___clear_cache(clear_location, clear_location + FFI_TRAMPOLINE_SIZE); +#else + cacheflush (clear_location, FFI_TRAMPOLINE_SIZE, ICACHE); +#endif + return FFI_OK; +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer arguments + * (and, depending upon the arguments, some floating-point arguments + * as well). FPR is a pointer to the area where floating point + * registers have been saved, if any. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return type. + * + * Based on the similar routine for sparc. + */ +int +ffi_closure_mips_inner_O32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + double *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn, seen_int; + + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + seen_int = (cif->abi == FFI_O32_SOFT_FLOAT) || (cif->mips_nfixedargs != cif->nargs); + argn = 0; + + if ((cif->flags >> (FFI_FLAG_BITS * 2)) == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)ar[0]; + argn = 1; + seen_int = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->alignment == 8 && (argn & 0x1)) + argn++; + if (i < 2 && !seen_int && + (arg_types[i]->type == FFI_TYPE_FLOAT || + arg_types[i]->type == FFI_TYPE_DOUBLE || + arg_types[i]->type == FFI_TYPE_LONGDOUBLE)) + { +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT) + avaluep[i] = ((char *) &fpr[i]) + sizeof (float); + else +#endif + avaluep[i] = (char *) &fpr[i]; + } + else + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) ar[argn]; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) ar[argn]; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) ar[argn]; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) ar[argn]; + break; + + default: + avaluep[i] = (char *) &ar[argn]; + break; + } + seen_int = 1; + } + argn += FFI_ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + i++; + } + + /* Invoke the closure. */ + fun(cif, rvalue, avaluep, user_data); + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_FLOAT: + return FFI_TYPE_INT; + case FFI_TYPE_DOUBLE: + return FFI_TYPE_UINT64; + default: + return cif->rtype->type; + } + } + else + { + return cif->rtype->type; + } +} + +#if defined(FFI_MIPS_N32) + +static void +copy_struct_N32(char *target, unsigned offset, ffi_abi abi, ffi_type *type, + int argn, unsigned arg_offset, ffi_arg *ar, + ffi_arg *fpr, int soft_float) +{ + ffi_type **elt_typep = type->elements; + while(*elt_typep) + { + ffi_type *elt_type = *elt_typep; + unsigned o; + char *tp; + char *argp; + char *fpp; + + o = FFI_ALIGN(offset, elt_type->alignment); + arg_offset += o - offset; + offset = o; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + + argp = (char *)(ar + argn); + fpp = (char *)(argn >= 8 ? ar + argn : fpr + argn); + + tp = target + offset; + + if (elt_type->type == FFI_TYPE_DOUBLE && !soft_float) + *(double *)tp = *(double *)fpp; + else + memcpy(tp, argp + arg_offset, elt_type->size); + + offset += elt_type->size; + arg_offset += elt_type->size; + elt_typep++; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + } +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer + * arguments. FPR is a pointer to the area where floating point + * registers have been saved. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return flags. + * + */ +int +ffi_closure_mips_inner_N32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + ffi_arg *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn; + int soft_float; + ffi_arg *argp; + + soft_float = cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT; + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + argn = 0; + + if (cif->rstruct_flag) + { +#if _MIPS_SIM==_ABIN32 + rvalue = (void *)(UINT32)ar[0]; +#else /* N64 */ + rvalue = (void *)ar[0]; +#endif + argn = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->type == FFI_TYPE_FLOAT + || arg_types[i]->type == FFI_TYPE_DOUBLE + || arg_types[i]->type == FFI_TYPE_LONGDOUBLE) + { + argp = (argn >= 8 || i >= cif->mips_nfixedargs || soft_float) ? ar + argn : fpr + argn; + if ((arg_types[i]->type == FFI_TYPE_LONGDOUBLE) && ((uintptr_t)argp & (arg_types[i]->alignment-1))) + { + argp=(ffi_arg*)FFI_ALIGN(argp,arg_types[i]->alignment); + argn++; + } +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT && argn < 8) + avaluep[i] = ((char *) argp) + sizeof (float); + else +#endif + avaluep[i] = (char *) argp; + } + else + { + unsigned type = arg_types[i]->type; + + if (arg_types[i]->alignment > sizeof(ffi_arg)) + argn = FFI_ALIGN(argn, arg_types[i]->alignment / sizeof(ffi_arg)); + + argp = ar + argn; + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (cif->abi == FFI_N64 || cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (soft_float && type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + + switch (type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) *argp; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) *argp; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) *argp; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) *argp; + break; + + case FFI_TYPE_SINT32: + avaluep[i] = &avalue[i]; + *(SINT32 *) &avalue[i] = (SINT32) *argp; + break; + + case FFI_TYPE_UINT32: + avaluep[i] = &avalue[i]; + *(UINT32 *) &avalue[i] = (UINT32) *argp; + break; + + case FFI_TYPE_STRUCT: + if (argn < 8) + { + /* Allocate space for the struct as at least part of + it was passed in registers. */ + avaluep[i] = alloca(arg_types[i]->size); + copy_struct_N32(avaluep[i], 0, cif->abi, arg_types[i], + argn, 0, ar, fpr, i >= cif->mips_nfixedargs || soft_float); + + break; + } + /* Else fall through. */ + default: + avaluep[i] = (char *) argp; + break; + } + } + argn += FFI_ALIGN(arg_types[i]->size, sizeof(ffi_arg)) / sizeof(ffi_arg); + i++; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avaluep, user_data); + + return cif->flags >> (FFI_FLAG_BITS * 8); +} + +#endif /* FFI_MIPS_N32 */ + +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void * fn; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_go_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_go_closure_N32; +#endif /* FFI_MIPS_O32 */ + + closure->tramp = (void *)fn; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_CLOSURES */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h new file mode 100644 index 0000000..fffdb97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/ffitarget.h @@ -0,0 +1,244 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for MIPS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifdef __linux__ +# include +#elif defined(__rtems__) +/* + * Subprogram calling convention - copied from sgidefs.h + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 +#elif !defined(__OpenBSD__) +# include +#endif + +# ifndef _ABIN32 +# define _ABIN32 _MIPS_SIM_NABI32 +# endif +# ifndef _ABI64 +# define _ABI64 _MIPS_SIM_ABI64 +# endif +# ifndef _ABIO32 +# define _ABIO32 _MIPS_SIM_ABI32 +# endif + +#if !defined(_MIPS_SIM) +# error -- something is very wrong -- +#else +# if (_MIPS_SIM==_ABIN32 && defined(_ABIN32)) || (_MIPS_SIM==_ABI64 && defined(_ABI64)) +# define FFI_MIPS_N32 +# else +# if (_MIPS_SIM==_ABIO32 && defined(_ABIO32)) +# define FFI_MIPS_O32 +# else +# error -- this is an unsupported platform -- +# endif +# endif +#endif + +#ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +# define FFI_SIZEOF_ARG 4 +#else +/* N32 and N64 frames have 64bit integer args */ +# define FFI_SIZEOF_ARG 8 +# if _MIPS_SIM == _ABIN32 +# define FFI_SIZEOF_JAVA_RAW 4 +# endif +#endif + +#define FFI_FLAG_BITS 2 + +/* SGI's strange assembler requires that we multiply by 4 rather + than shift left by FFI_FLAG_BITS */ + +#define FFI_ARGS_D FFI_TYPE_DOUBLE +#define FFI_ARGS_F FFI_TYPE_FLOAT +#define FFI_ARGS_DD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_DOUBLE +#define FFI_ARGS_FF FFI_TYPE_FLOAT * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_FD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_DF FFI_TYPE_FLOAT * 4 + FFI_TYPE_DOUBLE + +/* Needed for N32 structure returns */ +#define FFI_TYPE_SMALLSTRUCT FFI_TYPE_UINT8 +#define FFI_TYPE_SMALLSTRUCT2 FFI_TYPE_SINT8 + +#if 0 +/* The SGI assembler can't handle this.. */ +#define FFI_TYPE_STRUCT_DD (( FFI_ARGS_DD ) << 4) + FFI_TYPE_STRUCT +/* (and so on) */ +#else +/* ...so we calculate these by hand! */ +#define FFI_TYPE_STRUCT_D 61 +#define FFI_TYPE_STRUCT_F 45 +#define FFI_TYPE_STRUCT_DD 253 +#define FFI_TYPE_STRUCT_FF 173 +#define FFI_TYPE_STRUCT_FD 237 +#define FFI_TYPE_STRUCT_DF 189 +#define FFI_TYPE_STRUCT_SMALL 93 +#define FFI_TYPE_STRUCT_SMALL2 109 + +/* and for n32 soft float, add 16 * 2^4 */ +#define FFI_TYPE_STRUCT_D_SOFT 317 +#define FFI_TYPE_STRUCT_F_SOFT 301 +#define FFI_TYPE_STRUCT_DD_SOFT 509 +#define FFI_TYPE_STRUCT_FF_SOFT 429 +#define FFI_TYPE_STRUCT_FD_SOFT 493 +#define FFI_TYPE_STRUCT_DF_SOFT 445 +#define FFI_TYPE_STRUCT_SOFT 16 +#endif + +#ifdef LIBFFI_ASM +#define v0 $2 +#define v1 $3 +#define a0 $4 +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define a4 $8 +#define a5 $9 +#define a6 $10 +#define a7 $11 +#define t0 $8 +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define t8 $24 +#define t9 $25 +#define ra $31 + +#ifdef FFI_MIPS_O32 +# define REG_L lw +# define REG_S sw +# define SUBU subu +# define ADDU addu +# define SRL srl +# define LI li +#else /* !FFI_MIPS_O32 */ +# define REG_L ld +# define REG_S sd +# define SUBU dsubu +# define ADDU daddu +# define SRL dsrl +# define LI dli +# if (_MIPS_SIM==_ABI64) +# define LA dla +# define EH_FRAME_ALIGN 3 +# define FDE_ADDR_BYTES .8byte +# else +# define LA la +# define EH_FRAME_ALIGN 2 +# define FDE_ADDR_BYTES .4byte +# endif /* _MIPS_SIM==_ABI64 */ +#endif /* !FFI_MIPS_O32 */ +#else /* !LIBFFI_ASM */ +# ifdef __GNUC__ +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__SI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__SI__))); +#else +/* N32 and N64 frames have 64bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__DI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__DI__))); +# endif +# else +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef __uint32_t ffi_arg; +typedef __int32_t ffi_sarg; +# else +/* N32 and N64 frames have 64bit integer args */ +typedef __uint64_t ffi_arg; +typedef __int64_t ffi_sarg; +# endif +# endif /* __GNUC__ */ + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_O32, + FFI_N32, + FFI_N64, + FFI_O32_SOFT_FLOAT, + FFI_N32_SOFT_FLOAT, + FFI_N64_SOFT_FLOAT, + FFI_LAST_ABI, + +#ifdef FFI_MIPS_O32 +#ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT +#else + FFI_DEFAULT_ABI = FFI_O32 +#endif +#else +# if _MIPS_SIM==_ABI64 +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N64 +# endif +# else +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N32 +# endif +# endif +#endif +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS unsigned rstruct_flag; unsigned mips_nfixedargs +#define FFI_TARGET_SPECIFIC_VARIADIC +#endif /* !LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) +# define FFI_TRAMPOLINE_SIZE 20 +#else +# define FFI_TRAMPOLINE_SIZE 56 +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32.S new file mode 100644 index 0000000..1a940b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/mips/n32.S @@ -0,0 +1,663 @@ +/* ----------------------------------------------------------------------- + n32.S - Copyright (c) 1996, 1998, 2005, 2007, 2009, 2010 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Only build this code if we are compiling for n32 */ + +#if defined(FFI_MIPS_N32) + +#define callback a0 +#define bytes a2 +#define flags a3 +#define raddr a4 +#define fn a5 +#define closure a6 + +/* Note: to keep stack 16 byte aligned we need even number slots + used 9 slots here +*/ +#define SIZEOF_FRAME ( 10 * FFI_SIZEOF_ARG ) + +#ifdef __GNUC__ + .abicalls +#endif +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + .set mips4 +#endif + .text + .align 2 + .globl ffi_call_N32 + .ent ffi_call_N32 +ffi_call_N32: +.LFB0: + .frame $fp, SIZEOF_FRAME, ra + .mask 0xc0000000,-FFI_SIZEOF_ARG + .fmask 0x00000000,0 + + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +.LCFI00: + REG_S $fp, SIZEOF_FRAME - 2*FFI_SIZEOF_ARG($sp) # Save frame pointer + REG_S ra, SIZEOF_FRAME - 1*FFI_SIZEOF_ARG($sp) # Save return address +.LCFI01: + move $fp, $sp +.LCFI02: + move t9, callback # callback function pointer + REG_S bytes, 2*FFI_SIZEOF_ARG($fp) # bytes + REG_S flags, 3*FFI_SIZEOF_ARG($fp) # flags + REG_S raddr, 4*FFI_SIZEOF_ARG($fp) # raddr + REG_S fn, 5*FFI_SIZEOF_ARG($fp) # fn + REG_S closure, 6*FFI_SIZEOF_ARG($fp) # closure + + # Allocate at least 4 words in the argstack + move v0, bytes + bge bytes, 4 * FFI_SIZEOF_ARG, bigger + LI v0, 4 * FFI_SIZEOF_ARG + b sixteen + + bigger: + ADDU t4, v0, 2 * FFI_SIZEOF_ARG -1 # make sure it is aligned + and v0, t4, -2 * FFI_SIZEOF_ARG # to a proper boundry. + +sixteen: + SUBU $sp, $sp, v0 # move the stack pointer to reflect the + # arg space + + move a0, $sp # 4 * FFI_SIZEOF_ARG + ADDU a3, $fp, 3 * FFI_SIZEOF_ARG + + # Call ffi_prep_args + jal t9 + + # Copy the stack pointer to t9 + move t9, $sp + + # Fix the stack if there are more than 8 64bit slots worth + # of arguments. + + # Load the number of bytes + REG_L t6, 2*FFI_SIZEOF_ARG($fp) + + # Is it bigger than 8 * FFI_SIZEOF_ARG? + daddiu t8, t6, -(8 * FFI_SIZEOF_ARG) + bltz t8, loadregs + + ADDU t9, t9, t8 + +loadregs: + + REG_L t6, 3*FFI_SIZEOF_ARG($fp) # load the flags word into t6. + +#ifdef __mips_soft_float + REG_L a0, 0*FFI_SIZEOF_ARG(t9) + REG_L a1, 1*FFI_SIZEOF_ARG(t9) + REG_L a2, 2*FFI_SIZEOF_ARG(t9) + REG_L a3, 3*FFI_SIZEOF_ARG(t9) + REG_L a4, 4*FFI_SIZEOF_ARG(t9) + REG_L a5, 5*FFI_SIZEOF_ARG(t9) + REG_L a6, 6*FFI_SIZEOF_ARG(t9) + REG_L a7, 7*FFI_SIZEOF_ARG(t9) +#else + and t4, t6, ((1< +#include + +/* Only build this code if we are compiling for o32 */ + +#if defined(FFI_MIPS_O32) + +#define callback a0 +#define bytes a2 +#define flags a3 + +#define SIZEOF_FRAME (4 * FFI_SIZEOF_ARG + 2 * FFI_SIZEOF_ARG) +#define A3_OFF (SIZEOF_FRAME + 3 * FFI_SIZEOF_ARG) +#define FP_OFF (SIZEOF_FRAME - 2 * FFI_SIZEOF_ARG) +#define RA_OFF (SIZEOF_FRAME - 1 * FFI_SIZEOF_ARG) + + .abicalls + .text + .align 2 + .globl ffi_call_O32 + .ent ffi_call_O32 +ffi_call_O32: +$LFB0: + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +$LCFI00: + REG_S $fp, FP_OFF($sp) # Save frame pointer +$LCFI01: + REG_S ra, RA_OFF($sp) # Save return address +$LCFI02: + move $fp, $sp + +$LCFI03: + move t9, callback # callback function pointer + REG_S flags, A3_OFF($fp) # flags + + # Allocate at least 4 words in the argstack + LI v0, 4 * FFI_SIZEOF_ARG + blt bytes, v0, sixteen + + ADDU v0, bytes, 7 # make sure it is aligned + and v0, -8 # to an 8 byte boundry + +sixteen: + SUBU $sp, v0 # move the stack pointer to reflect the + # arg space + + ADDU a0, $sp, 4 * FFI_SIZEOF_ARG + + jalr t9 + + REG_L t0, A3_OFF($fp) # load the flags word + SRL t2, t0, 4 # shift our arg info + and t0, ((1<<4)-1) # mask out the return type + + ADDU $sp, 4 * FFI_SIZEOF_ARG # adjust $sp to new args + +#ifndef __mips_soft_float + bnez t0, pass_d # make it quick for int +#endif + REG_L a0, 0*FFI_SIZEOF_ARG($sp) # just go ahead and load the + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # four regs. + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +#ifndef __mips_soft_float +pass_d: + bne t0, FFI_ARGS_D, pass_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a2, 2*FFI_SIZEOF_ARG($sp) # passing a double + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f: + bne t0, FFI_ARGS_F, pass_d_d + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # passing a float + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_d: + bne t0, FFI_ARGS_DD, pass_f_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing two doubles + b call_it + +pass_f_f: + bne t0, FFI_ARGS_FF, pass_d_f + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 1*FFI_SIZEOF_ARG($sp) # passing two floats + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_f: + bne t0, FFI_ARGS_DF, pass_f_d + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f_d: + # assume that the only other combination must be float then double + # bne t0, FFI_ARGS_F_D, call_it + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float +#endif + +call_it: + # Load the static chain pointer + REG_L t7, SIZEOF_FRAME + 6*FFI_SIZEOF_ARG($fp) + + # Load the function pointer + REG_L t9, SIZEOF_FRAME + 5*FFI_SIZEOF_ARG($fp) + + # If the return value pointer is NULL, assume no return value. + REG_L t1, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + beqz t1, noretval + + bne t2, FFI_TYPE_INT, retlonglong + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v0, 0(t0) + b epilogue + +retlonglong: + # Really any 64-bit int, signed or not. + bne t2, FFI_TYPE_UINT64, retfloat + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v1, 4(t0) + REG_S v0, 0(t0) + b epilogue + +retfloat: + bne t2, FFI_TYPE_FLOAT, retdouble + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.s $f0, 0(t0) +#else + REG_S v0, 0(t0) +#endif + b epilogue + +retdouble: + bne t2, FFI_TYPE_DOUBLE, noretval + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.d $f0, 0(t0) +#else + REG_S v1, 4(t0) + REG_S v0, 0(t0) +#endif + b epilogue + +noretval: + jalr t9 + + # Epilogue +epilogue: + move $sp, $fp + REG_L $fp, FP_OFF($sp) # Restore frame pointer + REG_L ra, RA_OFF($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME # Fix stack pointer + j ra + +$LFE0: + .end ffi_call_O32 + + +/* ffi_closure_O32. Expects address of the passed-in ffi_closure + in t4 ($12). Stores any arguments passed in registers onto the + stack, then calls ffi_closure_mips_inner_O32, which + then decodes them. + + Stack layout: + + 3 - a3 save + 2 - a2 save + 1 - a1 save + 0 - a0 save, original sp + -1 - ra save + -2 - fp save + -3 - $16 (s0) save + -4 - cprestore + -5 - return value high (v1) + -6 - return value low (v0) + -7 - f14 (le high, be low) + -8 - f14 (le low, be high) + -9 - f12 (le high, be low) + -10 - f12 (le low, be high) + -11 - Called function a5 save + -12 - Called function a4 save + -13 - Called function a3 save + -14 - Called function a2 save + -15 - Called function a1 save + -16 - Called function a0 save, our sp and fp point here + */ + +#define SIZEOF_FRAME2 (16 * FFI_SIZEOF_ARG) +#define A3_OFF2 (SIZEOF_FRAME2 + 3 * FFI_SIZEOF_ARG) +#define A2_OFF2 (SIZEOF_FRAME2 + 2 * FFI_SIZEOF_ARG) +#define A1_OFF2 (SIZEOF_FRAME2 + 1 * FFI_SIZEOF_ARG) +#define A0_OFF2 (SIZEOF_FRAME2 + 0 * FFI_SIZEOF_ARG) +#define RA_OFF2 (SIZEOF_FRAME2 - 1 * FFI_SIZEOF_ARG) +#define FP_OFF2 (SIZEOF_FRAME2 - 2 * FFI_SIZEOF_ARG) +#define S0_OFF2 (SIZEOF_FRAME2 - 3 * FFI_SIZEOF_ARG) +#define GP_OFF2 (SIZEOF_FRAME2 - 4 * FFI_SIZEOF_ARG) +#define V1_OFF2 (SIZEOF_FRAME2 - 5 * FFI_SIZEOF_ARG) +#define V0_OFF2 (SIZEOF_FRAME2 - 6 * FFI_SIZEOF_ARG) +#define FA_1_1_OFF2 (SIZEOF_FRAME2 - 7 * FFI_SIZEOF_ARG) +#define FA_1_0_OFF2 (SIZEOF_FRAME2 - 8 * FFI_SIZEOF_ARG) +#define FA_0_1_OFF2 (SIZEOF_FRAME2 - 9 * FFI_SIZEOF_ARG) +#define FA_0_0_OFF2 (SIZEOF_FRAME2 - 10 * FFI_SIZEOF_ARG) +#define CALLED_A5_OFF2 (SIZEOF_FRAME2 - 11 * FFI_SIZEOF_ARG) +#define CALLED_A4_OFF2 (SIZEOF_FRAME2 - 12 * FFI_SIZEOF_ARG) + + .text + + .align 2 + .globl ffi_go_closure_O32 + .ent ffi_go_closure_O32 +ffi_go_closure_O32: +$LFB1: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI10: + + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI11: + + move $fp, $sp +$LCFI12: + + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 4($15) # cif + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 4($15) # cif + REG_L a1, 8($15) # fun + move a2, $15 # user_data = go closure + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + + b $do_closure + +$LFE1: + .end ffi_go_closure_O32 + + .align 2 + .globl ffi_closure_O32 + .ent ffi_closure_O32 +ffi_closure_O32: +$LFB2: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI20: + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI21: + move $fp, $sp + +$LCFI22: + # Store all possible argument registers. If there are more than + # four arguments, then they are stored above where we put a3. + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 20($12) # cif pointer follows tramp. + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 20($12) # cif pointer follows tramp. + REG_L a1, 24($12) # fun + REG_L a2, 28($12) # user_data + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + +$do_closure: + la t9, ffi_closure_mips_inner_O32 + # Call ffi_closure_mips_inner_O32 to do the work. + jalr t9 + + # Load the return value into the appropriate register. + move $8, $2 + li $9, FFI_TYPE_VOID + beq $8, $9, closure_done + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp restore if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + li $9, FFI_TYPE_FLOAT + l.s $f0, V0_OFF2($fp) + beq $8, $9, closure_done + + li $9, FFI_TYPE_DOUBLE + l.d $f0, V0_OFF2($fp) + beq $8, $9, closure_done +#endif +1: + REG_L $3, V1_OFF2($fp) + REG_L $2, V0_OFF2($fp) + +closure_done: + # Epilogue + move $sp, $fp + REG_L $16, S0_OFF2($sp) # Restore s0 + REG_L $fp, FP_OFF2($sp) # Restore frame pointer + REG_L ra, RA_OFF2($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME2 + j ra +$LFE2: + .end ffi_closure_O32 + +/* DWARF-2 unwind info. */ + + .section .eh_frame,"a",@progbits +$Lframe0: + .4byte $LECIE0-$LSCIE0 # Length of Common Information Entry +$LSCIE0: + .4byte 0x0 # CIE Identifier Tag + .byte 0x1 # CIE Version + .ascii "zR\0" # CIE Augmentation + .uleb128 0x1 # CIE Code Alignment Factor + .sleb128 4 # CIE Data Alignment Factor + .byte 0x1f # CIE RA Column + .uleb128 0x1 # Augmentation size + .byte 0x00 # FDE Encoding (absptr) + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1d + .uleb128 0x0 + .align 2 +$LECIE0: + +$LSFDE0: + .4byte $LEFDE0-$LASFDE0 # FDE Length +$LASFDE0: + .4byte $LASFDE0-$Lframe0 # FDE CIE offset + .4byte $LFB0 # FDE initial location + .4byte $LFE0-$LFB0 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI00-$LFB0 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 0x18 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI01-$LCFI00 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI02-$LCFI01 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x18 + .align 2 +$LEFDE0: + +$LSFDE1: + .4byte $LEFDE1-$LASFDE1 # FDE Length +$LASFDE1: + .4byte $LASFDE1-$Lframe0 # FDE CIE offset + .4byte $LFB1 # FDE initial location + .4byte $LFE1-$LFB1 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI10-$LFB1 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI11-$LCFI10 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI12-$LCFI11 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE1: + +$LSFDE2: + .4byte $LEFDE2-$LASFDE2 # FDE Length +$LASFDE2: + .4byte $LASFDE2-$Lframe0 # FDE CIE offset + .4byte $LFB2 # FDE initial location + .4byte $LFE2-$LFB2 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI20-$LFB2 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI21-$LCFI20 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI22-$LCFI21 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE2: + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S new file mode 100644 index 0000000..10cfb04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/eabi.S @@ -0,0 +1,101 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2012, 2013 Anthony Green + + Moxie Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # $r0 : ffi_prep_args + # $r1 : &ecif + # $r2 : cif->bytes + # $r3 : fig->flags + # $r4 : ecif.rvalue + # $r5 : fn + +ffi_call_EABI: + push $sp, $r6 + push $sp, $r7 + push $sp, $r8 + dec $sp, 24 + + /* Store incoming args on stack. */ + sto.l 0($sp), $r0 /* ffi_prep_args */ + sto.l 4($sp), $r1 /* ecif */ + sto.l 8($sp), $r2 /* bytes */ + sto.l 12($sp), $r3 /* flags */ + sto.l 16($sp), $r4 /* &rvalue */ + sto.l 20($sp), $r5 /* fn */ + + /* Call ffi_prep_args. */ + mov $r6, $r4 /* Save result buffer */ + mov $r7, $r5 /* Save the target fn */ + mov $r8, $r3 /* Save the flags */ + sub $sp, $r2 /* Allocate stack space */ + mov $r0, $sp /* We can stomp over $r0 */ + /* $r1 is already set up */ + jsra ffi_prep_args + + /* Load register arguments. */ + ldo.l $r0, 0($sp) + ldo.l $r1, 4($sp) + ldo.l $r2, 8($sp) + ldo.l $r3, 12($sp) + ldo.l $r4, 16($sp) + ldo.l $r5, 20($sp) + + /* Call the target function. */ + jsr $r7 + + ldi.l $r7, 0xffffffff + cmp $r8, $r7 + beq retstruct + + ldi.l $r7, 4 + cmp $r8, $r7 + bgt ret2reg + + st.l ($r6), $r0 + jmpa retdone + +ret2reg: + st.l ($r6), $r0 + sto.l 4($r6), $r1 + +retstruct: +retdone: + /* Return. */ + ldo.l $r6, -4($fp) + ldo.l $r7, -8($fp) + ldo.l $r8, -12($fp) + ret + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c new file mode 100644 index 0000000..16d2bb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffi.c @@ -0,0 +1,285 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2012, 2013, 2018 Anthony Green + + Moxie Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in $r12. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("$r12"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("$fp"); + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) arg1; + + /* 6 words reserved for register args + 3 words from jsr */ + char *stack_args = frame_pointer + 9*4; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + char *register_args_ptr = (char *) register_args; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ptr += 4; + register_args_ptr = (char *)®ister_args[1]; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + if (ptr == (char *) ®ister_args[5]) + { + /* The value is split across two locations */ + unsigned *ip = alloca(8); + avalue[i] = ip; + ip[0] = *(unsigned *) ptr; + ip[1] = *(unsigned *) stack_args; + } + else + { + avalue[i] = ptr; + } + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == (char *) ®ister_args[6]) + ptr = stack_args; + else if (ptr == (char *) ®ister_args[7]) + ptr = stack_args + 4; + } + + /* Invoke the closure. */ + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + asm ("mov $r12, %0\n ld.l $r0, ($r12)\n ldo.l $r1, 4($r12)" : : "r" (&rvalue)); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; + + if (cif->abi != FFI_EABI) + return FFI_BAD_ABI; + + fn = (unsigned long) ffi_closure_eabi; + + tramp[0] = 0x01e0; /* ldi.l $r12, .... */ + tramp[1] = cls >> 16; + tramp[2] = cls & 0xffff; + tramp[3] = 0x1a00; /* jmpa .... */ + tramp[4] = fn >> 16; + tramp[5] = fn & 0xffff; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h new file mode 100644 index 0000000..623e3ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/moxie/ffitarget.h @@ -0,0 +1,52 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2013 Anthony Green + Target configuration macros for Moxie + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_DEFAULT_ABI = FFI_EABI, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +/* Trampolines are 12-bytes long. See ffi_prep_closure_loc. */ +#define FFI_TRAMPOLINE_SIZE (12) + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c new file mode 100644 index 0000000..721080d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffi.c @@ -0,0 +1,304 @@ +/* libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#include +#include + +#include + +/* The Nios II Processor Reference Handbook defines the procedure call + ABI as follows. + + Arguments are passed as if a structure containing the types of + the arguments were constructed. The first 16 bytes are passed in r4 + through r7, the remainder on the stack. The first 16 bytes of a function + taking variable arguments are passed in r4-r7 in the same way. + + Return values of types up to 8 bytes are returned in r2 and r3. For + return values greater than 8 bytes, the caller must allocate memory for + the result and pass the address as if it were argument 0. + + While this isn't specified explicitly in the ABI documentation, GCC + promotes integral arguments smaller than int size to 32 bits. + + Also of note, the ABI specifies that all structure objects are + aligned to 32 bits even if all their fields have a smaller natural + alignment. See FFI_AGGREGATE_ALIGNMENT. */ + + +/* Declare the assembly language hooks. */ + +extern UINT64 ffi_call_sysv (void (*) (char *, extended_cif *), + extended_cif *, + unsigned, + void (*fn) (void)); +extern void ffi_closure_sysv (void); + +/* Perform machine-dependent cif processing. */ + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* We always want at least 16 bytes in the parameter block since it + simplifies the low-level call function. Also round the parameter + block size up to a multiple of 4 bytes to preserve + 32-bit alignment of the stack pointer. */ + if (cif->bytes < 16) + cif->bytes = 16; + else + cif->bytes = (cif->bytes + 3) & ~3; + + return FFI_OK; +} + + +/* ffi_prep_args is called by the assembly routine to transfer arguments + to the stack using the pointers in the ecif array. + Note that the stack buffer is big enough to fit all the arguments, + but the first 16 bytes will be copied to registers for the actual + call. */ + +void ffi_prep_args (char *stack, extended_cif *ecif) +{ + char *argp = stack; + unsigned int i; + + /* The implicit return value pointer is passed as if it were a hidden + first argument. */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && ecif->cif->rtype->size > 8) + { + (*(void **) argp) = ecif->rvalue; + argp += 4; + } + + for (i = 0; i < ecif->cif->nargs; i++) + { + void *avalue = ecif->avalue[i]; + ffi_type *atype = ecif->cif->arg_types[i]; + size_t size = atype->size; + size_t alignment = atype->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Copy the argument, promoting integral types smaller than a + word to word size. */ + if (size < sizeof (int)) + { + size = sizeof (int); + switch (atype->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) avalue; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) avalue; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) avalue; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) avalue; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, avalue, atype->size); + break; + + default: + FFI_ASSERT(0); + } + } + else if (size == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) avalue; + else + memcpy (argp, avalue, size); + argp += size; + } +} + + +/* Call FN using the prepared CIF. RVALUE points to space allocated by + the caller for the return value, and AVALUE is an array of argument + pointers. */ + +void ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + + extended_cif ecif; + UINT64 result; + + /* If bigret is true, this is the case where a return value of larger + than 8 bytes is handled by being passed by reference as an implicit + argument. */ + int bigret = (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8); + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Allocate space for return value if this is the pass-by-reference case + and the caller did not provide a buffer. */ + if (rvalue == NULL && bigret) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + result = ffi_call_sysv (ffi_prep_args, &ecif, cif->bytes, fn); + + /* Now result contains the 64 bit contents returned from fn in + r2 and r3. Copy the value of the appropriate size to the user-provided + rvalue buffer. */ + if (rvalue && !bigret) + switch (cif->rtype->size) + { + case 1: + *(UINT8 *)rvalue = (UINT8) result; + break; + case 2: + *(UINT16 *)rvalue = (UINT16) result; + break; + case 4: + *(UINT32 *)rvalue = (UINT32) result; + break; + case 8: + *(UINT64 *)rvalue = (UINT64) result; + break; + default: + memcpy (rvalue, (void *)&result, cif->rtype->size); + break; + } +} + +/* This function is invoked from the closure trampoline to invoke + CLOSURE with argument block ARGS. Parse ARGS according to + CLOSURE->cfi and invoke CLOSURE->fun. */ + +static UINT64 +ffi_closure_helper (unsigned char *args, + ffi_closure *closure) +{ + ffi_cif *cif = closure->cif; + unsigned char *argp = args; + void **parsed_args = alloca (cif->nargs * sizeof (void *)); + UINT64 result; + void *retptr; + unsigned int i; + + /* First figure out what to do about the return type. If this is the + big-structure-return case, the first arg is the hidden return buffer + allocated by the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + { + retptr = *((void **) argp); + argp += 4; + } + else + retptr = (void *) &result; + + /* Fill in the array of argument pointers. */ + for (i = 0; i < cif->nargs; i++) + { + size_t size = cif->arg_types[i]->size; + size_t alignment = cif->arg_types[i]->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Arguments smaller than an int are promoted to int. */ + if (size < sizeof (int)) + size = sizeof (int); + + /* Store the pointer. */ + parsed_args[i] = argp; + argp += size; + } + + /* Call the user-supplied function. */ + (closure->fun) (cif, retptr, parsed_args, closure->user_data); + return result; +} + + +/* Initialize CLOSURE with a trampoline to call FUN with + CIF and USER_DATA. */ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun) (ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + int i; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + /* The trampoline looks like: + movhi r8, %hi(ffi_closure_sysv) + ori r8, r8, %lo(ffi_closure_sysv) + movhi r9, %hi(ffi_closure_helper) + ori r0, r9, %lo(ffi_closure_helper) + movhi r10, %hi(closure) + ori r10, r10, %lo(closure) + jmp r8 + and then ffi_closure_sysv retrieves the closure pointer out of r10 + in addition to the arguments passed in the normal way for the call, + and invokes ffi_closure_helper. We encode the pointer to + ffi_closure_helper in the trampoline because making a PIC call + to it in ffi_closure_sysv would be messy (it would have to indirect + through the GOT). */ + +#define HI(x) ((((unsigned int) (x)) >> 16) & 0xffff) +#define LO(x) (((unsigned int) (x)) & 0xffff) + tramp[0] = (0 << 27) | (8 << 22) | (HI (ffi_closure_sysv) << 6) | 0x34; + tramp[1] = (8 << 27) | (8 << 22) | (LO (ffi_closure_sysv) << 6) | 0x14; + tramp[2] = (0 << 27) | (9 << 22) | (HI (ffi_closure_helper) << 6) | 0x34; + tramp[3] = (9 << 27) | (9 << 22) | (LO (ffi_closure_helper) << 6) | 0x14; + tramp[4] = (0 << 27) | (10 << 22) | (HI (closure) << 6) | 0x34; + tramp[5] = (10 << 27) | (10 << 22) | (LO (closure) << 6) | 0x14; + tramp[6] = (8 << 27) | (0x0d << 11) | 0x3a; +#undef HI +#undef LO + + /* Flush the caches. + See Example 9-4 in the Nios II Software Developer's Handbook. */ + for (i = 0; i < 7; i++) + asm volatile ("flushd 0(%0); flushi %0" :: "r"(tramp + i) : "memory"); + asm volatile ("flushp" ::: "memory"); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h new file mode 100644 index 0000000..134d118 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/ffitarget.h @@ -0,0 +1,52 @@ +/* libffi target includes for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Structures have a 4-byte alignment even if all the fields have lesser + alignment requirements. */ +#define FFI_AGGREGATE_ALIGNMENT 4 + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 28 /* 7 instructions */ +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S new file mode 100644 index 0000000..75f442b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/nios2/sysv.S @@ -0,0 +1,136 @@ +/* Low-level libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +/* This function is declared on the C side as + + extern UINT64 ffi_call_sysv (void (*arghook) (char *, extended_cif *), + extended_cif *ecif, + unsigned nbytes, + void (*fn) (void)); + + On input, the arguments appear as + r4 = arghook + r5 = ecif + r6 = nbytes + r7 = fn +*/ + + .section .text + .align 2 + .global ffi_call_sysv + .type ffi_call_sysv, @function + +ffi_call_sysv: + .cfi_startproc + + /* Create the stack frame, saving r16 so we can use it locally. */ + addi sp, sp, -12 + .cfi_def_cfa_offset 12 + stw ra, 8(sp) + stw fp, 4(sp) + stw r16, 0(sp) + .cfi_offset 31, -4 + .cfi_offset 28, -8 + .cfi_offset 16, -12 + mov fp, sp + .cfi_def_cfa_register 28 + mov r16, r7 + + /* Adjust the stack pointer to create the argument buffer + nbytes long. */ + sub sp, sp, r6 + + /* Call the arghook function. */ + mov r2, r4 /* fn */ + mov r4, sp /* argbuffer */ + callr r2 /* r5 already contains ecif */ + + /* Pop off the first 16 bytes of the argument buffer on the stack, + transferring the contents to the argument registers. */ + ldw r4, 0(sp) + ldw r5, 4(sp) + ldw r6, 8(sp) + ldw r7, 12(sp) + addi sp, sp, 16 + + /* Call the user function, which leaves its result in r2 and r3. */ + callr r16 + + /* Pop off the stack frame. */ + mov sp, fp + ldw ra, 8(sp) + ldw fp, 4(sp) + ldw r16, 0(sp) + addi sp, sp, 12 + ret + .cfi_endproc + .size ffi_call_sysv, .-ffi_call_sysv + + +/* Closure trampolines jump here after putting the C helper address + in r9 and the closure pointer in r10. The user-supplied arguments + to the closure are in the normal places, in r4-r7 and on the + stack. Push the register arguments on the stack too and then call the + C helper function to deal with them. */ + + .section .text + .align 2 + .global ffi_closure_sysv + .type ffi_closure_sysv, @function + +ffi_closure_sysv: + .cfi_startproc + + /* Create the stack frame, pushing the register args on the stack + just below the stack args. This is the same trick illustrated + in Figure 7-3 in the Nios II Processor Reference Handbook, used + for variable arguments and structures passed by value. */ + addi sp, sp, -20 + .cfi_def_cfa_offset 20 + stw ra, 0(sp) + .cfi_offset 31, -20 + stw r4, 4(sp) + .cfi_offset 4, -16 + stw r5, 8(sp) + .cfi_offset 5, -12 + stw r6, 12(sp) + .cfi_offset 6, -8 + stw r7, 16(sp) + .cfi_offset 7, -4 + + /* Call the helper. + r4 = pointer to arguments on stack + r5 = closure pointer (loaded in r10 by the trampoline) + r9 = address of helper function (loaded by trampoline) */ + addi r4, sp, 4 + mov r5, r10 + callr r9 + + /* Pop the stack and return. */ + ldw ra, 0(sp) + addi sp, sp, 20 + .cfi_def_cfa_offset -20 + ret + .cfi_endproc + .size ffi_closure_sysv, .-ffi_closure_sysv + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c new file mode 100644 index 0000000..2bad938 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffi.c @@ -0,0 +1,328 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include "ffi_common.h" + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void* ffi_prep_args(char *stack, extended_cif *ecif) +{ + char *stacktemp = stack; + int i, s; + ffi_type **arg; + int count = 0; + int nfixedargs; + + nfixedargs = ecif->cif->nfixedargs; + arg = ecif->cif->arg_types; + void **argv = ecif->avalue; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) stack = ecif->rvalue; + stack += 4; + count = 4; + } + for(i=0; icif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + count = 24; + stack = stacktemp + 24; + } + nfixedargs--; + + s = 4; + switch((*arg)->type) + { + case FFI_TYPE_STRUCT: + *(void **)stack = *argv; + break; + + case FFI_TYPE_SINT8: + *(signed int *) stack = (signed int)*(SINT8 *)(* argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) stack = (unsigned int)*(UINT8 *)(* argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) stack = (signed int)*(SINT16 *)(* argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) stack = (unsigned int)*(UINT16 *)(* argv); + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + *(int *)stack = *(int*)(*argv); + break; + + default: /* 8 byte types */ + if (count == 20) /* never split arguments */ + { + stack += 4; + count += 4; + } + s = (*arg)->size; + memcpy(stack, *argv, s); + break; + } + + stack += s; + count += s; + argv++; + arg++; + } + return stacktemp + ((count>24)?24:0); +} + +extern void ffi_call_SYSV(unsigned, + extended_cif *, + void *(*)(int *, extended_cif *), + unsigned *, + void (*fn)(void), + unsigned); + + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + int i; + int size; + ffi_type **arg; + + /* Calculate size to allocate on stack */ + + for(i = 0, arg = cif->arg_types, size=0; i < cif->nargs; i++, arg++) + { + if ((*arg)->type == FFI_TYPE_STRUCT) + size += 4; + else + if ((*arg)->size <= 4) + size += 4; + else + size += 8; + } + + /* for variadic functions more space is needed on the stack */ + if (cif->nargs != cif->nfixedargs) + size += 24; + + if (cif->rtype->type == FFI_TYPE_STRUCT) + size += 4; + + + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(size, &ecif, ffi_prep_args, rvalue, fn, cif->flags); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +void ffi_closure_SYSV(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, unsigned long r8) +{ + register int *sp __asm__ ("r17"); + register int *r13 __asm__ ("r13"); + + ffi_closure* closure = (ffi_closure*) r13; + char *stack_args = sp; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { r3, r4, r5, r6, r7, r8 }; + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) r3; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int count = 0; + int nfixedargs = cif->nfixedargs; + int i; + + /* preserve struct type return pointer passing */ + + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ptr += 4; + count = 4; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + ptr = stack_args; + count = 24; + } + nfixedargs--; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + + default: + /* 8-byte values */ + + /* arguments are never splitted */ + if (ptr == ®ister_args[5]) + ptr = stack_args; + avalue[i] = ptr; + ptr += 4; + count += 4; + break; + } + ptr += 4; + count += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + + if (count == 24) + ptr = stack_args; + } + + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } else + { + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + if (cif->rtype) + asm ("l.ori r12, %0, 0x0\n l.lwz r11, 0(r12)\n l.lwz r12, 4(r12)" : : "r" (&rvalue)); + } +} + + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) closure->tramp; + unsigned long fn = (unsigned long) ffi_closure_SYSV; + unsigned long cls = (unsigned long) codeloc; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + /* write pointers to temporary registers */ + tramp[0] = (0x6 << 10) | (13 << 5); /* l.movhi r13, ... */ + tramp[1] = cls >> 16; + tramp[2] = (0x2a << 10) | (13 << 5) | 13; /* l.ori r13, r13, ... */ + tramp[3] = cls & 0xFFFF; + + tramp[4] = (0x6 << 10) | (15 << 5); /* l.movhi r15, ... */ + tramp[5] = fn >> 16; + tramp[6] = (0x2a << 10) | (15 << 5) | 15; /* l.ori r15, r15 ... */ + tramp[7] = fn & 0xFFFF; + + tramp[8] = (0x11 << 10); /* l.jr r15 */ + tramp[9] = 15 << 11; + + tramp[10] = (0x2a << 10) | (17 << 5) | 1; /* l.ori r17, r1, ... */ + tramp[11] = 0x0; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + cif->flags = 0; + + /* structures are returned as pointers */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = FFI_TYPE_STRUCT; + else + if (cif->rtype->size > 4) + cif->flags = FFI_TYPE_UINT64; + + cif->nfixedargs = cif->nargs; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + ffi_status status; + + status = ffi_prep_cif_machdep (cif); + cif->nfixedargs = nfixedargs; + return status; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h new file mode 100644 index 0000000..e55da28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/ffitarget.h @@ -0,0 +1,58 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2014 Sebastian Macke + + OpenRISC Target configuration macros + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE (24) + +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs; + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S new file mode 100644 index 0000000..df6570b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/or1k/sysv.S @@ -0,0 +1,107 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +/* + r3: size to allocate on stack + r4: extended cif structure + r5: function pointer ffi_prep_args + r6: ret address + r7: function to call + r8: flag for return type +*/ + +ffi_call_SYSV: + /* Store registers used on stack */ + l.sw -4(r1), r9 /* return address */ + l.sw -8(r1), r1 /* stack address */ + l.sw -12(r1), r14 /* callee saved registers */ + l.sw -16(r1), r16 + l.sw -20(r1), r18 + l.sw -24(r1), r20 + + l.ori r14, r1, 0x0 /* save stack pointer */ + l.addi r1, r1, -24 + + l.ori r16, r7, 0x0 /* save function address */ + l.ori r18, r6, 0x0 /* save ret address */ + l.ori r20, r8, 0x0 /* save flag */ + + l.sub r1, r1, r3 /* reserve space on stack */ + + /* Call ffi_prep_args */ + l.ori r3, r1, 0x0 /* first argument stack address, second already ecif */ + l.jalr r5 + l.nop + + /* Load register arguments and call*/ + + l.lwz r3, 0(r1) + l.lwz r4, 4(r1) + l.lwz r5, 8(r1) + l.lwz r6, 12(r1) + l.lwz r7, 16(r1) + l.lwz r8, 20(r1) + l.ori r1, r11, 0x0 /* new stack pointer */ + l.jalr r16 + l.nop + + /* handle return values */ + + l.sfeqi r20, FFI_TYPE_STRUCT + l.bf ret /* structs don't return an rvalue */ + l.nop + + /* copy ret address */ + + l.sfeqi r20, FFI_TYPE_UINT64 + l.bnf four_byte_ret /* 8 byte value is returned */ + l.nop + + l.sw 4(r18), r12 + +four_byte_ret: + l.sw 0(r18), r11 + +ret: + /* return */ + l.ori r1, r14, 0x0 /* reset stack pointer */ + l.lwz r9, -4(r1) + l.lwz r1, -8(r1) + l.lwz r14, -12(r1) + l.lwz r16, -16(r1) + l.lwz r18, -20(r1) + l.lwz r20, -24(r1) + l.jr r9 + l.nop + +.size ffi_call_SYSV, .-ffi_call_SYSV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c new file mode 100644 index 0000000..95e6694 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffi.c @@ -0,0 +1,674 @@ +/* ----------------------------------------------------------------------- + ffi.c - (c) 2011 Anthony Green + (c) 2008 Red Hat, Inc. + (c) 2006 Free Software Foundation, Inc. + (c) 2003-2004 Randolph Chung + + HPPA Foreign Function Interface + HP-UX PA ABI support + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#define ROUND_UP(v, a) (((size_t)(v) + (a) - 1) & ~((a) - 1)) + +#define MIN_STACK_SIZE 64 +#define FIRST_ARG_SLOT 9 +#define DEBUG_LEVEL 0 + +#define fldw(addr, fpreg) \ + __asm__ volatile ("fldw 0(%0), %%" #fpreg "L" : : "r"(addr) : #fpreg) +#define fstw(fpreg, addr) \ + __asm__ volatile ("fstw %%" #fpreg "L, 0(%0)" : : "r"(addr)) +#define fldd(addr, fpreg) \ + __asm__ volatile ("fldd 0(%0), %%" #fpreg : : "r"(addr) : #fpreg) +#define fstd(fpreg, addr) \ + __asm__ volatile ("fstd %%" #fpreg "L, 0(%0)" : : "r"(addr)) + +#define debug(lvl, x...) do { if (lvl <= DEBUG_LEVEL) { printf(x); } } while (0) + +static inline int ffi_struct_type(ffi_type *t) +{ + size_t sz = t->size; + + /* Small structure results are passed in registers, + larger ones are passed by pointer. Note that + small structures of size 2, 4 and 8 differ from + the corresponding integer types in that they have + different alignment requirements. */ + + if (sz <= 1) + return FFI_TYPE_UINT8; + else if (sz == 2) + return FFI_TYPE_SMALL_STRUCT2; + else if (sz == 3) + return FFI_TYPE_SMALL_STRUCT3; + else if (sz == 4) + return FFI_TYPE_SMALL_STRUCT4; + else if (sz == 5) + return FFI_TYPE_SMALL_STRUCT5; + else if (sz == 6) + return FFI_TYPE_SMALL_STRUCT6; + else if (sz == 7) + return FFI_TYPE_SMALL_STRUCT7; + else if (sz <= 8) + return FFI_TYPE_SMALL_STRUCT8; + else + return FFI_TYPE_STRUCT; /* else, we pass it by pointer. */ +} + +/* PA has a downward growing stack, which looks like this: + + Offset + [ Variable args ] + SP = (4*(n+9)) arg word N + ... + SP-52 arg word 4 + [ Fixed args ] + SP-48 arg word 3 + SP-44 arg word 2 + SP-40 arg word 1 + SP-36 arg word 0 + [ Frame marker ] + ... + SP-20 RP + SP-4 previous SP + + The first four argument words on the stack are reserved for use by + the callee. Instead, the general and floating registers replace + the first four argument slots. Non FP arguments are passed solely + in the general registers. FP arguments are passed in both general + and floating registers when using libffi. + + Non-FP 32-bit args are passed in gr26, gr25, gr24 and gr23. + Non-FP 64-bit args are passed in register pairs, starting + on an odd numbered register (i.e. r25+r26 and r23+r24). + FP 32-bit arguments are passed in fr4L, fr5L, fr6L and fr7L. + FP 64-bit arguments are passed in fr5 and fr7. + + The registers are allocated in the same manner as stack slots. + This allows the callee to save its arguments on the stack if + necessary: + + arg word 3 -> gr23 or fr7L + arg word 2 -> gr24 or fr6L or fr7R + arg word 1 -> gr25 or fr5L + arg word 0 -> gr26 or fr4L or fr5R + + Note that fr4R and fr6R are never used for arguments (i.e., + doubles are not passed in fr4 or fr6). + + The rest of the arguments are passed on the stack starting at SP-52, + but 64-bit arguments need to be aligned to an 8-byte boundary + + This means we can have holes either in the register allocation, + or in the stack. */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments + + The following code will put everything into the stack frame + (which was allocated by the asm routine), and on return + the asm routine will load the arguments that should be + passed by register into the appropriate registers + + NOTE: We load floating point args in this function... that means we + assume gcc will not mess with fp regs in here. */ + +void ffi_prep_args_pa32(UINT32 *stack, extended_cif *ecif, unsigned bytes) +{ + register unsigned int i; + register ffi_type **p_arg; + register void **p_argv; + unsigned int slot = FIRST_ARG_SLOT; + char *dest_cpy; + size_t len; + + debug(1, "%s: stack = %p, ecif = %p, bytes = %u\n", __FUNCTION__, stack, + ecif, bytes); + + p_arg = ecif->cif->arg_types; + p_argv = ecif->avalue; + + for (i = 0; i < ecif->cif->nargs; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + *(SINT32 *)(stack - slot) = *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT32 *)(stack - slot) = *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT32 *)(stack - slot) = *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT32 *)(stack - slot) = *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + debug(3, "Storing UINT32 %u in slot %u\n", *(UINT32 *)(*p_argv), + slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + break; + + case FFI_TYPE_FLOAT: + /* First 4 args go in fr4L - fr7L. */ + debug(3, "Storing UINT32(float) in slot %u\n", slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 4 args go in fr4L - fr7L. */ + case 0: fldw(stack - slot, fr4); break; + case 1: fldw(stack - slot, fr5); break; + case 2: fldw(stack - slot, fr6); break; + case 3: fldw(stack - slot, fr7); break; + } + break; + + case FFI_TYPE_DOUBLE: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + debug(3, "Storing UINT64(double) at slot %u\n", slot); + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 2 args go in fr5, fr7. */ + case 1: fldd(stack - slot, fr5); break; + case 3: fldd(stack - slot, fr7); break; + } + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are passed in the same manner as structures + larger than 8 bytes. */ + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; +#endif + + case FFI_TYPE_STRUCT: + + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + + len = (*p_arg)->size; + if (len <= 4) + { + dest_cpy = (char *)(stack - slot) + 4 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else if (len <= 8) + { + slot += (slot & 1) ? 1 : 2; + dest_cpy = (char *)(stack - slot) + 8 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + p_argv++; + } + + /* Make sure we didn't mess up and scribble on the stack. */ + { + unsigned int n; + + debug(5, "Stack setup:\n"); + for (n = 0; n < (bytes + 3) / 4; n++) + { + if ((n%4) == 0) { debug(5, "\n%08x: ", (unsigned int)(stack - n)); } + debug(5, "%08x ", *(stack - n)); + } + debug(5, "\n"); + } + + FFI_ASSERT(slot * 4 <= bytes); + + return; +} + +static void ffi_size_stack_pa32(ffi_cif *cif) +{ + ffi_type **ptr; + int i; + int z = 0; /* # stack slots */ + + for (ptr = cif->arg_types, i = 0; i < cif->nargs; ptr++, i++) + { + int type = (*ptr)->type; + + switch (type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + z += 2 + (z & 1); /* must start on even regs, so we may waste one */ + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: +#endif + case FFI_TYPE_STRUCT: + z += 1; /* pass by ptr, callee will copy */ + break; + + default: /* <= 32-bit values */ + z++; + } + } + + /* We can fit up to 6 args in the default 64-byte stack frame, + if we need more, we need more stack. */ + if (z <= 6) + cif->bytes = MIN_STACK_SIZE; /* min stack size */ + else + cif->bytes = 64 + ROUND_UP((z - 6) * sizeof(UINT32), MIN_STACK_SIZE); + + debug(3, "Calculated stack size is %u bytes\n", cif->bytes); +} + +/* Perform machine dependent cif processing. */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a structure. */ + cif->flags = FFI_TYPE_STRUCT; + break; +#endif + + case FFI_TYPE_STRUCT: + /* For the return type we have to check the size of the structures. + If the size is smaller or equal 4 bytes, the result is given back + in one register. If the size is smaller or equal 8 bytes than we + return the result in two registers. But if the size is bigger than + 8 bytes, we work with pointers. */ + cif->flags = ffi_struct_type(cif->rtype); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + /* Lucky us, because of the unique PA ABI we get to do our + own stack sizing. */ + switch (cif->abi) + { + case FFI_PA32: + ffi_size_stack_pa32(cif); + break; + + default: + FFI_ASSERT(0); + break; + } + + return FFI_OK; +} + +extern void ffi_call_pa32(void (*)(UINT32 *, extended_cif *, unsigned), + extended_cif *, unsigned, unsigned, unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if (rvalue == NULL +#ifdef PA_HPUX + && (cif->rtype->type == FFI_TYPE_STRUCT + || cif->rtype->type == FFI_TYPE_LONGDOUBLE)) +#else + && cif->rtype->type == FFI_TYPE_STRUCT) +#endif + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_PA32: + debug(3, "Calling ffi_call_pa32: ecif=%p, bytes=%u, flags=%u, rvalue=%p, fn=%p\n", &ecif, cif->bytes, cif->flags, ecif.rvalue, (void *)fn); + ffi_call_pa32(ffi_prep_args_pa32, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT(0); + break; + } +} + +#if FFI_CLOSURES +/* This is more-or-less an inverse of ffi_call -- we have arguments on + the stack, and we need to fill them into a cif structure and invoke + the user function. This really ought to be in asm to make sure + the compiler doesn't do things we don't expect. */ +ffi_status ffi_closure_inner_pa32(ffi_closure *closure, UINT32 *stack) +{ + ffi_cif *cif; + void **avalue; + void *rvalue; + /* Functions can return up to 64-bits in registers. Return address + must be double word aligned. */ + union { double rd; UINT32 ret[2]; } u; + ffi_type **p_arg; + char *tmp; + int i, avn; + unsigned int slot = FIRST_ARG_SLOT; + register UINT32 r28 asm("r28"); + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + cif = closure->cif; + + /* If returning via structure, callee will write to our pointer. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = (void *)r28; + else + rvalue = &u; + + avalue = (void **)alloca(cif->nargs * FFI_SIZEOF_ARG); + avn = cif->nargs; + p_arg = cif->arg_types; + + for (i = 0; i < avn; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + avalue[i] = (char *)(stack - slot) + sizeof(UINT32) - (*p_arg)->size; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_FLOAT: +#ifdef PA_LINUX + /* The closure call is indirect. In Linux, floating point + arguments in indirect calls with a prototype are passed + in the floating point registers instead of the general + registers. So, we need to replace what was previously + stored in the current slot with the value in the + corresponding floating point register. */ + switch (slot - FIRST_ARG_SLOT) + { + case 0: fstw(fr4, (void *)(stack - slot)); break; + case 1: fstw(fr5, (void *)(stack - slot)); break; + case 2: fstw(fr6, (void *)(stack - slot)); break; + case 3: fstw(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_DOUBLE: + slot += (slot & 1) ? 1 : 2; +#ifdef PA_LINUX + /* See previous comment for FFI_TYPE_FLOAT. */ + switch (slot - FIRST_ARG_SLOT) + { + case 1: fstd(fr5, (void *)(stack - slot)); break; + case 3: fstd(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a big structure. */ + avalue[i] = (void *) *(stack - slot); + break; +#endif + + case FFI_TYPE_STRUCT: + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + if((*p_arg)->size <= 4) + { + avalue[i] = (void *)(stack - slot) + sizeof(UINT32) - + (*p_arg)->size; + } + else if ((*p_arg)->size <= 8) + { + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot) + sizeof(UINT64) - + (*p_arg)->size; + } + else + avalue[i] = (void *) *(stack - slot); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + } + + /* Invoke the closure. */ + (c->fun) (cif, rvalue, avalue, c->user_data); + + debug(3, "after calling function, ret[0] = %08x, ret[1] = %08x\n", u.ret[0], + u.ret[1]); + + /* Store the result using the lower 2 bytes of the flags. */ + switch (cif->flags) + { + case FFI_TYPE_UINT8: + *(stack - FIRST_ARG_SLOT) = (UINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_SINT8: + *(stack - FIRST_ARG_SLOT) = (SINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_UINT16: + *(stack - FIRST_ARG_SLOT) = (UINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_SINT16: + *(stack - FIRST_ARG_SLOT) = (SINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + *(stack - FIRST_ARG_SLOT - 1) = u.ret[1]; + break; + + case FFI_TYPE_DOUBLE: + fldd(rvalue, fr4); + break; + + case FFI_TYPE_FLOAT: + fldw(rvalue, fr4); + break; + + case FFI_TYPE_STRUCT: + /* Don't need a return value, done by caller. */ + break; + + case FFI_TYPE_SMALL_STRUCT2: + case FFI_TYPE_SMALL_STRUCT3: + case FFI_TYPE_SMALL_STRUCT4: + tmp = (void*)(stack - FIRST_ARG_SLOT); + tmp += 4 - cif->rtype->size; + memcpy((void*)tmp, &u, cif->rtype->size); + break; + + case FFI_TYPE_SMALL_STRUCT5: + case FFI_TYPE_SMALL_STRUCT6: + case FFI_TYPE_SMALL_STRUCT7: + case FFI_TYPE_SMALL_STRUCT8: + { + unsigned int ret2[2]; + int off; + + /* Right justify ret[0] and ret[1] */ + switch (cif->flags) + { + case FFI_TYPE_SMALL_STRUCT5: off = 3; break; + case FFI_TYPE_SMALL_STRUCT6: off = 2; break; + case FFI_TYPE_SMALL_STRUCT7: off = 1; break; + default: off = 0; break; + } + + memset (ret2, 0, sizeof (ret2)); + memcpy ((char *)ret2 + off, &u, 8 - off); + + *(stack - FIRST_ARG_SLOT) = ret2[0]; + *(stack - FIRST_ARG_SLOT - 1) = ret2[1]; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_VOID: + break; + + default: + debug(0, "assert with cif->flags: %d\n",cif->flags); + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* Fill in a closure to refer to the specified fun and user_data. + cif specifies the argument and result types for fun. + The cif must already be prep'ed. */ + +extern void ffi_closure_pa32(void); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + /* The layout of a function descriptor. A function pointer with the PLABEL + bit set points to a function descriptor. */ + struct pa32_fd + { + UINT32 code_pointer; + UINT32 gp; + }; + + struct ffi_pa32_trampoline_struct + { + UINT32 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT32 fake_gp; /* Pointer to closure, installed as gp. */ + UINT32 real_gp; /* Real gp value. */ + }; + + struct ffi_pa32_trampoline_struct *tramp; + struct pa32_fd *fd; + + if (cif->abi != FFI_PA32) + return FFI_BAD_ABI; + + /* Get function descriptor address for ffi_closure_pa32. */ + fd = (struct pa32_fd *)((UINT32)ffi_closure_pa32 & ~3); + + /* Setup trampoline. */ + tramp = (struct ffi_pa32_trampoline_struct *)c->tramp; + tramp->code_pointer = fd->code_pointer; + tramp->fake_gp = (UINT32)codeloc & ~3; + tramp->real_gp = fd->gp; + + c->cif = cif; + c->user_data = user_data; + c->fun = fun; + + return FFI_OK; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h new file mode 100644 index 0000000..df1209e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/ffitarget.h @@ -0,0 +1,80 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for hppa. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#ifdef PA_LINUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA_HPUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA64_HPUX +#error "PA64_HPUX FFI is not yet implemented" + FFI_PA64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA64 +#endif +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 12 + +#define FFI_TYPE_SMALL_STRUCT2 -1 +#define FFI_TYPE_SMALL_STRUCT3 -2 +#define FFI_TYPE_SMALL_STRUCT4 -3 +#define FFI_TYPE_SMALL_STRUCT5 -4 +#define FFI_TYPE_SMALL_STRUCT6 -5 +#define FFI_TYPE_SMALL_STRUCT7 -6 +#define FFI_TYPE_SMALL_STRUCT8 -7 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S new file mode 100644 index 0000000..d0e5f69 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/hpux32.S @@ -0,0 +1,370 @@ +/* ----------------------------------------------------------------------- + hpux32.S - Copyright (c) 2006 Free Software Foundation, Inc. + (c) 2008 Red Hat, Inc. + based on src/pa/linux.S + + HP-UX PA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .LEVEL 1.1 + .SPACE $PRIVATE$ + .IMPORT $global$,DATA + .IMPORT $$dyncall,MILLICODE + .SUBSPA $DATA$ + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,ENTRY,PRIV_LEV=3 + .import ffi_prep_args_pa32,CODE + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .align 4 + +L$FB1 +ffi_call_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI11 + copy %sp, %r3 +L$CFI12 + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +L$CFI13 + copy %sp, %r4 + + addl %arg2, %r4, %arg0 ; arg stack + stw %arg3, -48(%r3) ; save flags we need it later + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args are loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 ; %ret0 <- rvalue + ldw -56(%r3), %r22 ; %r22 <- function to call + bl $$dyncall, %r31 ; Call the user function + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 ; r21 <- flags + ldw -52(%r3), %r20 ; r20 <- rvalue + + /* Store the result according to the return type. The most + likely types should come first. */ + +L$checkint + comib,<>,n FFI_TYPE_INT, %r21, L$checkint8 + b L$done + stw %ret0, 0(%r20) + +L$checkint8 + comib,<>,n FFI_TYPE_UINT8, %r21, L$checkint16 + b L$done + stb %ret0, 0(%r20) + +L$checkint16 + comib,<>,n FFI_TYPE_UINT16, %r21, L$checkdbl + b L$done + sth %ret0, 0(%r20) + +L$checkdbl + comib,<>,n FFI_TYPE_DOUBLE, %r21, L$checkfloat + b L$done + fstd %fr4,0(%r20) + +L$checkfloat + comib,<>,n FFI_TYPE_FLOAT, %r21, L$checkll + b L$done + fstw %fr4L,0(%r20) + +L$checkll + comib,<>,n FFI_TYPE_UINT64, %r21, L$checksmst2 + stw %ret0, 0(%r20) + b L$done + stw %ret1, 4(%r20) + +L$checksmst2 + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, L$checksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst3 + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, L$checksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst4 + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, L$checksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst5 + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, L$checksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst6 + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, L$checksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst7 + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, L$checksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst8 + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, L$done + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +L$done + /* all done, return */ + copy %r4, %sp ; pop arg stack + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 ; .. and pop stack + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +L$FE1 + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .export ffi_closure_pa32,ENTRY,PRIV_LEV=3,RTNVAL=GR + .import ffi_closure_inner_pa32,CODE + .align 4 +L$FB2 +ffi_closure_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI21 + copy %sp, %r3 +L$CFI22 + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%rp) + ldw -40(%sp), %ret1 + .exit + .procend +L$FE2: + + .SPACE $PRIVATE$ + .SUBSPA $DATA$ + + .align 4 + .EXPORT _GLOBAL__F_ffi_call_pa32,DATA +_GLOBAL__F_ffi_call_pa32 +L$frame1: + .word L$ECIE1-L$SCIE1 ;# Length of Common Information Entry +L$SCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version + .ascii "\0" ;# CIE Augmentation + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +L$ECIE1: +L$SFDE1: + .word L$EFDE1-L$ASFDE1 ;# FDE Length +L$ASFDE1: + .word L$ASFDE1-L$frame1 ;# FDE CIE offset + .word L$FB1 ;# FDE initial location + .word L$FE1-L$FB1 ;# FDE address range + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI11-L$FB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI12-L$CFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI13-L$CFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +L$EFDE1: + +L$SFDE2: + .word L$EFDE2-L$ASFDE2 ;# FDE Length +L$ASFDE2: + .word L$ASFDE2-L$frame1 ;# FDE CIE offset + .word L$FB2 ;# FDE initial location + .word L$FE2-L$FB2 ;# FDE address range + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI21-L$FB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI22-L$CFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +L$EFDE2: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S new file mode 100644 index 0000000..33ef0b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/pa/linux.S @@ -0,0 +1,380 @@ +/* ----------------------------------------------------------------------- + linux.S - (c) 2003-2004 Randolph Chung + (c) 2008 Red Hat, Inc. + + HPPA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + .level 1.1 + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,code + .import ffi_prep_args_pa32,code + + .type ffi_call_pa32, @function +.LFB1: +ffi_call_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +.LCFI11: + + copy %sp, %r3 +.LCFI12: + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +.LCFI13: + copy %sp, %r4 + + addl %arg2, %r4, %arg0 /* arg stack */ + stw %arg3, -48(%r3) /* save flags; we need it later */ + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args were loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 /* %ret0 <- rvalue */ + ldw -56(%r3), %r22 /* %r22 <- function to call */ + bl $$dyncall, %r31 /* Call the user function */ + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 /* r21 <- flags */ + ldw -52(%r3), %r20 /* r20 <- rvalue */ + + /* Store the result according to the return type. */ + +.Lcheckint: + comib,<>,n FFI_TYPE_INT, %r21, .Lcheckint8 + b .Ldone + stw %ret0, 0(%r20) + +.Lcheckint8: + comib,<>,n FFI_TYPE_UINT8, %r21, .Lcheckint16 + b .Ldone + stb %ret0, 0(%r20) + +.Lcheckint16: + comib,<>,n FFI_TYPE_UINT16, %r21, .Lcheckdbl + b .Ldone + sth %ret0, 0(%r20) + +.Lcheckdbl: + comib,<>,n FFI_TYPE_DOUBLE, %r21, .Lcheckfloat + b .Ldone + fstd %fr4,0(%r20) + +.Lcheckfloat: + comib,<>,n FFI_TYPE_FLOAT, %r21, .Lcheckll + b .Ldone + fstw %fr4L,0(%r20) + +.Lcheckll: + comib,<>,n FFI_TYPE_UINT64, %r21, .Lchecksmst2 + stw %ret0, 0(%r20) + b .Ldone + stw %ret1, 4(%r20) + +.Lchecksmst2: + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, .Lchecksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst3: + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, .Lchecksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst4: + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, .Lchecksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst5: + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, .Lchecksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst6: + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, .Lchecksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst7: + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, .Lchecksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst8: + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, .Ldone + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +.Ldone: + /* all done, return */ + copy %r4, %sp /* pop arg stack */ + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 /* .. and pop stack */ + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +.LFE1: + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + .export ffi_closure_pa32,code + .import ffi_closure_inner_pa32,code + + .type ffi_closure_pa32, @function +.LFB2: +ffi_closure_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) +.LCFI20: + copy %r3, %r1 +.LCFI21: + copy %sp, %r3 +.LCFI22: + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%r2) + ldw -40(%sp), %ret1 + + .exit + .procend +.LFE2: + + .section ".eh_frame",EH_FRAME_FLAGS,@progbits +.Lframe1: + .word .LECIE1-.LSCIE1 ;# Length of Common Information Entry +.LSCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version +#ifdef __PIC__ + .ascii "zR\0" ;# CIE Augmentation: 'z' - data, 'R' - DW_EH_PE_... data +#else + .ascii "\0" ;# CIE Augmentation +#endif + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column +#ifdef __PIC__ + .uleb128 0x1 ;# Augmentation size + .byte 0x1b ;# FDE Encoding (DW_EH_PE_pcrel|DW_EH_PE_sdata4) +#endif + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +.LECIE1: +.LSFDE1: + .word .LEFDE1-.LASFDE1 ;# FDE Length +.LASFDE1: + .word .LASFDE1-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB1-. ;# FDE initial location +#else + .word .LFB1 ;# FDE initial location +#endif + .word .LFE1-.LFB1 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI11-.LFB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI12-.LCFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI13-.LCFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +.LEFDE1: + +.LSFDE2: + .word .LEFDE2-.LASFDE2 ;# FDE Length +.LASFDE2: + .word .LASFDE2-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB2-. ;# FDE initial location +#else + .word .LFB2 ;# FDE initial location +#endif + .word .LFE2-.LFB2 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI21-.LFB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI22-.LCFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +.LEFDE2: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S new file mode 100644 index 0000000..7ba5415 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix.S @@ -0,0 +1,566 @@ +/* ----------------------------------------------------------------------- + aix.S - Copyright (c) 2002, 2009 Free Software Foundation, Inc. + based on darwin.S by John Hornkvist + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_prep_args + +#define LIBFFI_ASM +#include +#include +#define JUMPTARGET(name) name +#define L(x) x + .file "aix.S" + .toc + + /* void ffi_call_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const)); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_AIX + .globl .ffi_call_AIX +.csect ffi_call_AIX[DS] +ffi_call_AIX: +#ifdef __64BIT__ + .llong .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r0, 16(r1) +LCFI..0: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address. */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 16(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-32-(13*8)(r28) + lfd f2,-32-(12*8)(r28) + lfd f3,-32-(11*8)(r28) + lfd f4,-32-(10*8)(r28) + nop + lfd f5,-32-(9*8)(r28) + lfd f6,-32-(8*8)(r28) + lfd f7,-32-(7*8)(r28) + lfd f8,-32-(6*8)(r28) + nop + lfd f9,-32-(5*8)(r28) + lfd f10,-32-(4*8)(r28) + lfd f11,-32-(3*8)(r28) + lfd f12,-32-(2*8)(r28) + nop + lfd f13,-32-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + ld r2, 40(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + std r3, 0(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + ld r0, 16(r28) + ld r28, -32(r1) + mtlr r0 + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + bf 31, L(done_return_value) + stfd f2, 8(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#else /* ! __64BIT__ */ + + .long .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r0, 8(r1) +LCFI..0: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 8(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-16-(13*8)(r28) + lfd f2,-16-(12*8)(r28) + lfd f3,-16-(11*8)(r28) + lfd f4,-16-(10*8)(r28) + nop + lfd f5,-16-(9*8)(r28) + lfd f6,-16-(8*8)(r28) + lfd f7,-16-(7*8)(r28) + lfd f8,-16-(6*8)(r28) + nop + lfd f9,-16-(5*8)(r28) + lfd f10,-16-(4*8)(r28) + lfd f11,-16-(3*8)(r28) + lfd f12,-16-(2*8)(r28) + nop + lfd f13,-16-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + lwz r2, 20(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + stw r3, 0(r30) + bf 28, L(done_return_value) + stw r4, 4(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + lwz r0, 8(r28) + lwz r28,-16(r1) + mtlr r0 + lwz r29,-12(r1) + lwz r30, -8(r1) + lwz r31, -4(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_AIX) */ + + /* void ffi_call_go_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const), + * void *closure); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args, r9=closure + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_go_AIX + .globl .ffi_call_go_AIX +.csect ffi_call_go_AIX[DS] +ffi_call_go_AIX: +#ifdef __64BIT__ + .llong .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r9, 8(r1) /* closure, saved in cr field. */ + std r0, 16(r1) +LCFI..2: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 8(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + + b L1 +LFE..1: +#else /* ! __64BIT__ */ + + .long .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 + /* Save registers we use. */ +LFB..1: + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r9, 4(r1) /* closure, saved in cr field. */ + stw r0, 8(r1) +LCFI..2: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 4(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + + b L1 +LFE..1: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_go_AIX) */ + +.csect .text[PR] + .align 2 + .globl ffi_call_DARWIN + .globl .ffi_call_DARWIN +.csect ffi_call_DARWIN[DS] +ffi_call_DARWIN: +#ifdef __64BIT__ + .llong .ffi_call_DARWIN, TOC[tc0], 0 +#else + .long .ffi_call_DARWIN, TOC[tc0], 0 +#endif + .csect .text[PR] +.ffi_call_DARWIN: + blr + .long 0 + .byte 0,0,0,0,0,0,0,0 +/* END(ffi_call_DARWIN) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix +_GLOBAL__F_libffi_src_powerpc_aix: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte 0x41 /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S new file mode 100644 index 0000000..132c785 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/aix_closure.S @@ -0,0 +1,694 @@ +/* ----------------------------------------------------------------------- + aix_closure.S - Copyright (c) 2002, 2003, 2009 Free Software Foundation, Inc. + based on darwin_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_closure_helper_DARWIN + .extern .ffi_go_closure_helper_DARWIN + +#define LIBFFI_ASM +#define JUMPTARGET(name) name +#define L(x) x + .file "aix_closure.S" + .toc +LC..60: + .tc L..60[TC],L..60 + .csect .text[PR] + .align 2 + +.csect .text[PR] + .align 2 + .globl ffi_closure_ASM + .globl .ffi_closure_ASM +.csect ffi_closure_ASM[DS] +ffi_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..0: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 10(r3) /* load type from return type */ + ld r4, LC..60(2) /* get address of jump table */ + sldi r3, r3, 4 /* now multiply return type by 16 */ + ld r0, 240+16(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_INT */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + lfd f2, 112+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 112+6(r1) + mtlr r0 +L..finish: + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 112+6(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT32 */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_POINTER */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr +LFE..0: + +#else /* ! __64BIT__ */ + + .long .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..0: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 6(r3) /* load type from return type */ + lwz r4, LC..60(2) /* get address of jump table */ + slwi r3, r3, 4 /* now multiply return type by 16 */ + lwz r0, 176+8(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_INT */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + lfd f2, 56+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_SINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_POINTER */ + lwz r3, 56+0(r1) + mtlr r0 +L..finish: + addi r1, r1, 176 + blr +LFE..0: +#endif + .ef __LINE__ +/* END(ffi_closure_ASM) */ + + +.csect .text[PR] + .align 2 + .globl ffi_go_closure_ASM + .globl .ffi_go_closure_ASM +.csect ffi_go_closure_ASM[DS] +ffi_go_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..2: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, r11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: + +#else /* ! __64BIT__ */ + + .long .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..2: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, 11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: +#endif + .ef __LINE__ +/* END(ffi_go_closure_ASM) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define CFA_OFFSET 0xf0,0x01 /* LEB128 240 */ +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define CFA_OFFSET 0xb0,0x01 /* LEB128 176 */ +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix_closure +_GLOBAL__F_libffi_src_powerpc_aix_closure: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte LR_REGNO /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix_closure /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h new file mode 100644 index 0000000..27b22f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/asm.h @@ -0,0 +1,125 @@ +/* ----------------------------------------------------------------------- + asm.h - Copyright (c) 1998 Geoffrey Keating + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define ASM_GLOBAL_DIRECTIVE .globl + + +#define C_SYMBOL_NAME(name) name +/* Macro for a label. */ +#ifdef __STDC__ +#define C_LABEL(name) name##: +#else +#define C_LABEL(name) name/**/: +#endif + +/* This seems to always be the case on PPC. */ +#define ALIGNARG(log2) log2 +/* For ELF we need the `.type' directive to make shared libs work right. */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +/* If compiled for profiling, call `_mcount' at the start of each function. */ +#ifdef PROF +/* The mcount code relies on the return address being on the stack + to locate our caller and so it can restore it; so store one just + for its benefit. */ +#ifdef PIC +#define CALL_MCOUNT \ + .pushsection; \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + stw %r0,4(%r1); \ + bl _GLOBAL_OFFSET_TABLE_@local-4; \ + mflr %r11; \ + lwz %r0,0b@got(%r11); \ + bl JUMPTARGET(_mcount); +#else /* PIC */ +#define CALL_MCOUNT \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + lis %r11,0b@ha; \ + stw %r0,4(%r1); \ + addi %r0,%r11,0b@l; \ + bl JUMPTARGET(_mcount); +#endif /* PIC */ +#else /* PROF */ +#define CALL_MCOUNT /* Do nothing. */ +#endif /* PROF */ + +#define ENTRY(name) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT + +#define EALIGN_W_0 /* No words to insert. */ +#define EALIGN_W_1 nop +#define EALIGN_W_2 nop;nop +#define EALIGN_W_3 nop;nop;nop +#define EALIGN_W_4 EALIGN_W_3;nop +#define EALIGN_W_5 EALIGN_W_4;nop +#define EALIGN_W_6 EALIGN_W_5;nop +#define EALIGN_W_7 EALIGN_W_6;nop + +/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes + past a 2^align boundary. */ +#ifdef PROF +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT \ + b 0f; \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + 0: +#else /* PROF */ +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + C_LABEL(name) +#endif + +#define END(name) \ + ASM_SIZE_DIRECTIVE(name) + +#ifdef PIC +#define JUMPTARGET(name) name##@plt +#else +#define JUMPTARGET(name) name +#endif + +/* Local labels stripped out by the linker. */ +#define L(x) .L##x diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S new file mode 100644 index 0000000..066eb82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin.S @@ -0,0 +1,378 @@ +/* ----------------------------------------------------------------------- + darwin.S - Copyright (c) 2000 John Hornkvist + Copyright (c) 2004, 2010 Free Software Foundation, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) +#define sgux MODE_CHOICE(stwux,stdux) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* If there is any FP stuff we make space for all of the regs. */ +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +#define RESULT_BYTES 16 + +/* This should be kept in step with the same value in ffi_darwin.c. */ +#define ASM_NEEDS_REGISTERS 4 +#define SAVE_REGS_SIZE (ASM_NEEDS_REGISTERS * GPR_BYTES) + +#include +#include + +#define JUMPTARGET(name) name +#define L(x) x + + .text + .align 2 + .globl _ffi_prep_args + + .align 2 + .globl _ffi_call_DARWIN + + /* We arrive here with: + r3 = ptr to extended cif. + r4 = -bytes. + r5 = cif flags. + r6 = ptr to return value. + r7 = fn pointer (user func). + r8 = fn pointer (ffi_prep_args). + r9 = ffi_type* for the ret val. */ + +_ffi_call_DARWIN: +Lstartcode: + mr r12,r8 /* We only need r12 until the call, + so it does not have to be saved. */ +LFB1: + /* Save the old stack pointer as AP. */ + mr r8,r1 +LCFI0: + + /* Save the retval type in parents frame. */ + sg r9,(LINKAGE_SIZE+6*GPR_BYTES)(r8) + + /* Allocate the stack space we need. */ + sgux r1,r1,r4 + + /* Save registers we use. */ + mflr r9 + sg r9,SAVED_LR_OFFSET(r8) + + sg r28,-(4 * GPR_BYTES)(r8) + sg r29,-(3 * GPR_BYTES)(r8) + sg r30,-(2 * GPR_BYTES)(r8) + sg r31,-( GPR_BYTES)(r8) + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + sg r2,(5 * GPR_BYTES)(r1) +#endif + +LCFI1: + + /* Save arguments over call. */ + mr r31,r5 /* flags, */ + mr r30,r6 /* rvalue, */ + mr r29,r7 /* function address, */ + mr r28,r8 /* our AP. */ +LCFI2: + /* Call ffi_prep_args. r3 = extended cif, r4 = stack ptr copy. */ + mr r4,r1 + li r9,0 + + mtctr r12 /* r12 holds address of _ffi_prep_args. */ + bctrl + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + lg r2,(5 * GPR_BYTES)(r1) +#endif + /* Now do the call. + Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,r31 + /* Get the address to call into CTR. */ + mtctr r29 + /* Load all those argument registers. + We have set up a nice stack frame, just load it into registers. */ + lg r3, (LINKAGE_SIZE )(r1) + lg r4, (LINKAGE_SIZE + GPR_BYTES)(r1) + lg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r1) + lg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r1) + nop + lg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r1) + lg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r1) + lg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r1) + lg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r1) + +L1: + /* ... Load all the FP registers. */ + bf 6,L2 /* No floats to load. */ + lfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + lfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + lfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + lfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + lfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + lfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + lfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + lfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + lfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + lfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + lfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + lfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + lfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + +L2: + mr r12,r29 /* Put the target address in r12 as specified. */ + mtctr r12 + nop + nop + + /* Make the call. */ + bctrl + + /* Now, deal with the return value. */ + + /* m64 structure returns can occupy the same set of registers as + would be used to pass such a structure as arg0 - so take care + not to step on any possibly hot regs. */ + + /* Get the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + ; FLAG_RETURNS_NOTHING also covers struct ret-by-ref. + bt 30,L(done_return_value) ; FLAG_RETURNS_NOTHING + bf 27,L(scalar_return_value) ; not FLAG_RETURNS_STRUCT + + /* OK, so we have a struct. */ +#if defined(__ppc64__) + bt 31,L(maybe_return_128) ; FLAG_RETURNS_128BITS, special case + + /* OK, we have to map the return back to a mem struct. + We are about to trample the parents param area, so recover the + return type. r29 is free, since the call is done. */ + lg r29,(LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + + sg r3, (LINKAGE_SIZE )(r28) + sg r4, (LINKAGE_SIZE + GPR_BYTES)(r28) + sg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r28) + sg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r28) + nop + sg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r28) + sg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r28) + sg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + sg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + /* OK, so do the block move - we trust that memcpy will not trample + the fprs... */ + mr r3,r30 ; dest + addi r4,r28,LINKAGE_SIZE ; source + /* The size is a size_t, should be long. */ + lg r5,0(r29) + /* Figure out small structs */ + cmpi 0,r5,4 + bgt L3 ; 1, 2 and 4 bytes have special rules. + cmpi 0,r5,3 + beq L3 ; not 3 + addi r4,r4,8 + subf r4,r5,r4 +L3: + bl _memcpy + + /* ... do we need the FP registers? - recover the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + bf 29,L(done_return_value) /* No floats in the struct. */ + stfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + stfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + stfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + stfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + stfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + stfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + stfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + stfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + stfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + stfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + stfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + stfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + stfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + + mr r3,r29 ; ffi_type * + mr r4,r30 ; dest + addi r5,r28,-SAVE_REGS_SIZE-(13*FPR_SIZE) ; fprs + xor r6,r6,r6 + sg r6,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + addi r6,r28,(LINKAGE_SIZE + 7 * GPR_BYTES) ; point to a zeroed counter. + bl _darwin64_struct_floats_to_mem + + b L(done_return_value) +#else + stw r3,0(r30) ; m32 the only struct return in reg is 4 bytes. +#endif + b L(done_return_value) + +L(fp_return_value): + /* Do we have long double to store? */ + bf 31,L(fd_return_value) ; FLAG_RETURNS_128BITS + stfd f1,0(r30) + stfd f2,FPR_SIZE(r30) + b L(done_return_value) + +L(fd_return_value): + /* Do we have double to store? */ + bf 28,L(float_return_value) + stfd f1,0(r30) + b L(done_return_value) + +L(float_return_value): + /* We only have a float to store. */ + stfs f1,0(r30) + b L(done_return_value) + +L(scalar_return_value): + bt 29,L(fp_return_value) ; FLAG_RETURNS_FP + ; ffi_arg is defined as unsigned long. + sg r3,0(r30) ; Save the reg. + bf 28,L(done_return_value) ; not FLAG_RETURNS_64BITS + +#if defined(__ppc64__) +L(maybe_return_128): + std r3,0(r30) + bf 31,L(done_return_value) ; not FLAG_RETURNS_128BITS + std r4,8(r30) +#else + stw r4,4(r30) +#endif + + /* Fall through. */ + /* We want this at the end to simplify eh epilog computation. */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lg r29,SAVED_LR_OFFSET(r28) + ; epilog + lg r31,-(1 * GPR_BYTES)(r28) + mtlr r29 + lg r30,-(2 * GPR_BYTES)(r28) + lg r29,-(3 * GPR_BYTES)(r28) + lg r28,-(4 * GPR_BYTES)(r28) + lg r1,0(r1) + blr +LFE1: + .align 1 +/* END(_ffi_call_DARWIN) */ + +/* Provide a null definition of _ffi_call_AIX. */ + .text + .globl _ffi_call_AIX + .align 2 +_ffi_call_AIX: + blr +/* END(_ffi_call_AIX) */ + +/* EH stuff. */ + +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + + .globl _ffi_call_DARWIN.eh +_ffi_call_DARWIN.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$3,LFE1-Lstartcode + .g_long L$set$3 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x08 ; uleb128 0x08 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$5,LCFI1-LCFI0 + .long L$set$5 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .byte 0x9f ; DW_CFA_offset, column 0x1f + .byte 0x1 ; uleb128 0x1 + .byte 0x9e ; DW_CFA_offset, column 0x1e + .byte 0x2 ; uleb128 0x2 + .byte 0x9d ; DW_CFA_offset, column 0x1d + .byte 0x3 ; uleb128 0x3 + .byte 0x9c ; DW_CFA_offset, column 0x1c + .byte 0x4 ; uleb128 0x4 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$6,LCFI2-LCFI1 + .long L$set$6 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x1c ; uleb128 0x1c + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S new file mode 100644 index 0000000..3121e6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/darwin_closure.S @@ -0,0 +1,571 @@ +/* ----------------------------------------------------------------------- + darwin_closure.S - Copyright (c) 2002, 2003, 2004, 2010, + Free Software Foundation, Inc. + based on ppc_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#define L(x) x + +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) + +#define SAVED_CR_OFFSET MODE_CHOICE(4,8) /* save position for CR */ +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* WARNING: if ffi_type is changed... here be monsters. + Offsets of items within the result type. */ +#define FFI_TYPE_TYPE MODE_CHOICE(6,10) +#define FFI_TYPE_ELEM MODE_CHOICE(8,16) + +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +/* biggest m64 struct ret is 8GPRS + 13FPRS = 168 bytes - rounded to 16bytes = 176. */ +#define RESULT_BYTES MODE_CHOICE(16,176) + +; The whole stack frame **MUST** be 16byte-aligned. +#define SAVE_SIZE (((LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)+15) & -16LL) +#define PAD_SIZE (SAVE_SIZE-(LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)) + +#define PARENT_PARM_BASE (SAVE_SIZE+LINKAGE_SIZE) +#define FP_SAVE_BASE (LINKAGE_SIZE+PARAM_AREA) + +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 +; We no longer need the pic symbol stub for Darwin >= 9. +#define BLCLS_HELP _ffi_closure_helper_DARWIN +#define STRUCT_RETVALUE_P _darwin64_struct_ret_by_value_p +#define PASS_STR_FLOATS _darwin64_pass_struct_floats +#undef WANT_STUB +#else +#define BLCLS_HELP L_ffi_closure_helper_DARWIN$stub +#define STRUCT_RETVALUE_P L_darwin64_struct_ret_by_value_p$stub +#define PASS_STR_FLOATS L_darwin64_pass_struct_floats$stub +#define WANT_STUB +#endif + +/* m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee`s LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent`s frame. + |--------------------------------------------| <+ <<< on entry to + | Result Bytes 16/176 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callees LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< call. + +*/ + + .file "darwin_closure.S" + + .machine machine_choice + + .text + .globl _ffi_closure_ASM + .align LOG2_GPR_BYTES +_ffi_closure_ASM: +LFB1: +Lstartcode: + mflr r0 /* extract return address */ + sg r0,SAVED_LR_OFFSET(r1) /* save the return address */ +LCFI0: + sgu r1,-SAVE_SIZE(r1) /* skip over caller save area + keep stack aligned to 16. */ +LCFI1: + /* We want to build up an area for the parameters passed + in registers. (both floating point and integer) */ + + /* Put gpr 3 to gpr 10 in the parents outgoing area... + ... the remainder of any params that overflowed the regs will + follow here. */ + sg r3, (PARENT_PARM_BASE )(r1) + sg r4, (PARENT_PARM_BASE + GPR_BYTES )(r1) + sg r5, (PARENT_PARM_BASE + GPR_BYTES * 2)(r1) + sg r6, (PARENT_PARM_BASE + GPR_BYTES * 3)(r1) + sg r7, (PARENT_PARM_BASE + GPR_BYTES * 4)(r1) + sg r8, (PARENT_PARM_BASE + GPR_BYTES * 5)(r1) + sg r9, (PARENT_PARM_BASE + GPR_BYTES * 6)(r1) + sg r10,(PARENT_PARM_BASE + GPR_BYTES * 7)(r1) + + /* We save fpr 1 to fpr 14 in our own save frame. */ + stfd f1, (FP_SAVE_BASE )(r1) + stfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + stfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + stfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + stfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + stfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + stfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + stfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + stfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + stfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + stfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + stfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + stfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* Set up registers for the routine that actually does the work + get the context pointer from the trampoline. */ + mr r3,r11 + + /* Now load up the pointer to the result storage. */ + addi r4,r1,(SAVE_SIZE-RESULT_BYTES) + + /* Now load up the pointer to the saved gpr registers. */ + addi r5,r1,PARENT_PARM_BASE + + /* Now load up the pointer to the saved fpr registers. */ + addi r6,r1,FP_SAVE_BASE + + /* Make the call. */ + bl BLCLS_HELP + + /* r3 contains the rtype pointer... save it since we will need + it later. */ + sg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type + lg r0,0(r3) ; size => r0 + lhz r3,FFI_TYPE_TYPE(r3) ; type => r3 + + /* The helper will have intercepted structure returns and inserted + the caller`s destination address for structs returned by ref. */ + + /* r3 contains the return type so use it to look up in a table + so we know how to deal with each type. */ + + addi r5,r1,(SAVE_SIZE-RESULT_BYTES) /* Otherwise, our return is here. */ + bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */ + mflr r4 /* Move to r4. */ + slwi r3,r3,4 /* Now multiply return type by 16. */ + add r3,r3,r4 /* Add contents of table to table address. */ + mtctr r3 + bctr /* Jump to it. */ +LFE1: +/* Each of the ret_typeX code fragments has to be exactly 16 bytes long + (4 instructions). For cache effectiveness we align to a 16 byte boundary + first. */ + + .align 4 + + nop + nop + nop +Lget_ret_type0_addr: + blrl + +/* case FFI_TYPE_VOID */ +Lret_type0: + b Lfinish + nop + nop + nop + +/* case FFI_TYPE_INT */ +Lret_type1: + lg r3,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_FLOAT */ +Lret_type2: + lfs f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_DOUBLE */ +Lret_type3: + lfd f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_LONGDOUBLE */ +Lret_type4: + lfd f1,0(r5) + lfd f2,8(r5) + b Lfinish + nop + +/* case FFI_TYPE_UINT8 */ +Lret_type5: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT8 */ +Lret_type6: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + extsb r3,r3 + b Lfinish + nop + +/* case FFI_TYPE_UINT16 */ +Lret_type7: +#if defined(__ppc64__) + lhz r3,6(r5) +#else + lhz r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT16 */ +Lret_type8: +#if defined(__ppc64__) + lha r3,6(r5) +#else + lha r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT32 */ +Lret_type9: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT32 */ +Lret_type10: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT64 */ +Lret_type11: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_SINT64 */ +Lret_type12: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_STRUCT */ +Lret_type13: +#if defined(__ppc64__) + lg r3,0(r5) ; we need at least this... + cmpi 0,r0,4 + bgt Lstructend ; not a special small case + b Lsmallstruct ; see if we need more. +#else + cmpwi 0,r0,4 + bgt Lfinish ; not by value + lg r3,0(r5) + b Lfinish +#endif +/* case FFI_TYPE_POINTER */ +Lret_type14: + lg r3,0(r5) + b Lfinish + nop + nop + +#if defined(__ppc64__) +Lsmallstruct: + beq Lfour ; continuation of Lret13. + cmpi 0,r0,3 + beq Lfinish ; don`t adjust this - can`t be any floats here... + srdi r3,r3,48 + cmpi 0,r0,2 + beq Lfinish ; .. or here .. + srdi r3,r3,8 + b Lfinish ; .. or here. + +Lfour: + lg r6,LINKAGE_SIZE(r1) ; get the result type + lg r6,FFI_TYPE_ELEM(r6) ; elements array pointer + lg r6,0(r6) ; first element + lhz r0,FFI_TYPE_TYPE(r6) ; OK go the type + cmpi 0,r0,2 ; FFI_TYPE_FLOAT + bne Lfourint + lfs f1,0(r5) ; just one float in the struct. + b Lfinish + +Lfourint: + srdi r3,r3,32 ; four bytes. + b Lfinish + +Lstructend: + lg r3,LINKAGE_SIZE(r1) ; get the result type + bl STRUCT_RETVALUE_P + cmpi 0,r3,0 + beq Lfinish ; nope. + /* Recover a pointer to the results. */ + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we need at least this... + lg r4,8(r11) + cmpi 0,r0,16 + beq Lfinish ; special case 16 bytes we don't consider floats. + + /* OK, frustratingly, the process of saving the struct to mem might have + messed with the FPRs, so we have to re-load them :(. + We`ll use our FPRs space again - calling: + void darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) + We`ll temporarily pinch the first two slots of the param area for local + vars used by the routine. */ + xor r6,r6,r6 + addi r5,r1,PARENT_PARM_BASE ; some space + sg r6,0(r5) ; *nfpr zeroed. + addi r6,r5,8 ; **fprs + addi r3,r1,FP_SAVE_BASE ; pointer to FPRs space + sg r3,0(r6) + mr r4,r11 ; the struct is here... + lg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type. + bl PASS_STR_FLOATS ; get struct floats into FPR save space. + /* See if we used any floats */ + lwz r0,(SAVE_SIZE-RESULT_BYTES)(r1) + cmpi 0,r0,0 + beq Lstructints ; nope. + /* OK load `em up... */ + lfd f1, (FP_SAVE_BASE )(r1) + lfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + lfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + lfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + lfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + lfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + lfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + lfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + lfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + lfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + lfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + lfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + lfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* point back at our saved struct. */ +Lstructints: + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we end up picking the + lg r4,8(r11) ; first two again. + lg r5,16(r11) + lg r6,24(r11) + lg r7,32(r11) + lg r8,40(r11) + lg r9,48(r11) + lg r10,56(r11) +#endif + +/* case done */ +Lfinish: + addi r1,r1,SAVE_SIZE /* Restore stack pointer. */ + lg r0,SAVED_LR_OFFSET(r1) /* Get return address. */ + mtlr r0 /* Reset link register. */ + blr +Lendcode: + .align 1 + +/* END(ffi_closure_ASM) */ + +/* EH frame stuff. */ +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) +/* 176, 400 */ +#define EH_FRAME_OFFSETA MODE_CHOICE(176,0x90) +#define EH_FRAME_OFFSETB MODE_CHOICE(1,3) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + .globl _ffi_closure_ASM.eh +_ffi_closure_ASM.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length + +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$2,LFE1-Lstartcode + .g_long L$set$2 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$3,LCFI1-LCFI0 + .long L$set$3 + .byte 0xe ; DW_CFA_def_cfa_offset + .byte EH_FRAME_OFFSETA,EH_FRAME_OFFSETB ; uleb128 176,1/190,3 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + +#ifdef WANT_STUB + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_ffi_closure_helper_DARWIN$stub: + .indirect_symbol _ffi_closure_helper_DARWIN + mflr r0 + bcl 20,31,"L1$spb" +"L1$spb": + mflr r11 + addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb") + mtlr r0 + lwzu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_ffi_closure_helper_DARWIN$lazy_ptr: + .indirect_symbol _ffi_closure_helper_DARWIN + .g_long dyld_stub_binding_helper + +#if defined(__ppc64__) + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_struct_ret_by_value_p$stub: + .indirect_symbol _darwin64_struct_ret_by_value_p + mflr r0 + bcl 20,31,"L2$spb" +"L2$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_struct_ret_by_value_p$lazy_ptr: + .indirect_symbol _darwin64_struct_ret_by_value_p + .g_long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_pass_struct_floats$stub: + .indirect_symbol _darwin64_pass_struct_floats + mflr r0 + bcl 20,31,"L3$spb" +"L3$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_pass_struct_floats$lazy_ptr: + .indirect_symbol _darwin64_pass_struct_floats + .g_long dyld_stub_binding_helper +# endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c new file mode 100644 index 0000000..a19bcbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi.c @@ -0,0 +1,175 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" +#include "ffi_common.h" +#include "ffi_powerpc.h" + +#if HAVE_LONG_DOUBLE_VARIANT +/* Adjust ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types (ffi_abi abi) +{ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# ifdef POWERPC64 + ffi_prep_types_linux64 (abi); +# else + ffi_prep_types_sysv (abi); +# endif +# endif +} +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64 (cif); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif *cif, + unsigned int nfixedargs MAYBE_UNUSED, + unsigned int ntotalargs MAYBE_UNUSED) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64_var (cif, nfixedargs, ntotalargs); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +static void +ffi_call_int (ffi_cif *cif, + void (*fn) (void), + void *rvalue, + void **avalue, + void *closure) +{ + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead returns + them in memory. + + We bounce-buffer SYSV small struct return values so that sysv.S + can write r3 and r4 to memory without worrying about struct size. + + For ELFv2 ABI, use a bounce buffer for homogeneous structs too, + for similar reasons. This bounce buffer must be aligned to 16 + bytes for use with homogeneous structs of vectors (float128). */ + float128 smst_buffer[8]; + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + ecif.rvalue = rvalue; + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + ecif.rvalue = smst_buffer; + /* Ensure that we have a valid struct return value. + FIXME: Isn't this just papering over a user problem? */ + else if (!rvalue && cif->rtype->type == FFI_TYPE_STRUCT) + ecif.rvalue = alloca (cif->rtype->size); + +#ifdef POWERPC64 + ffi_call_LINUX64 (&ecif, fn, ecif.rvalue, cif->flags, closure, + -(long) cif->bytes); +#else + ffi_call_SYSV (&ecif, fn, ecif.rvalue, cif->flags, closure, -cif->bytes); +#endif + + /* Check for a bounce-buffered return value */ + if (rvalue && ecif.rvalue == smst_buffer) + { + unsigned int rsize = cif->rtype->size; +#ifndef __LITTLE_ENDIAN__ + /* The SYSV ABI returns a structure of up to 4 bytes in size + left-padded in r3. */ +# ifndef POWERPC64 + if (rsize <= 4) + memcpy (rvalue, (char *) smst_buffer + 4 - rsize, rsize); + else +# endif + /* The SYSV ABI returns a structure of up to 8 bytes in size + left-padded in r3/r4, and the ELFv2 ABI similarly returns a + structure of up to 8 bytes in size left-padded in r3. But + note that a structure of a single float is not paddded. */ + if (rsize <= 8 && (cif->flags & FLAG_RETURNS_FP) == 0) + memcpy (rvalue, (char *) smst_buffer + 8 - rsize, rsize); + else +#endif + memcpy (rvalue, smst_buffer, rsize); + } +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#ifdef POWERPC64 + return ffi_prep_closure_loc_linux64 (closure, cif, fun, user_data, codeloc); +#else + return ffi_prep_closure_loc_sysv (closure, cif, fun, user_data, codeloc); +#endif +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ +#ifdef POWERPC64 + closure->tramp = ffi_go_closure_linux64; +#else + closure->tramp = ffi_go_closure_sysv; +#endif + closure->cif = cif; + closure->fun = fun; + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c new file mode 100644 index 0000000..61a18c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c @@ -0,0 +1,1440 @@ +/* ----------------------------------------------------------------------- + ffi_darwin.c + + Copyright (C) 1998 Geoffrey Keating + Copyright (C) 2001 John Hornkvist + Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. + + FFI support for Darwin and AIX. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +extern void ffi_closure_ASM (void); +extern void ffi_go_closure_ASM (void); + +enum { + /* The assembly depends on these exact flags. + For Darwin64 (when FLAG_RETURNS_STRUCT is set): + FLAG_RETURNS_FP indicates that the structure embeds FP data. + FLAG_RETURNS_128BITS signals a special struct size that is not + expanded for float content. */ + FLAG_RETURNS_128BITS = 1 << (31-31), /* These go in cr7 */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_64BITS = 1 << (31-28), + + FLAG_RETURNS_STRUCT = 1 << (31-27), /* This goes in cr6 */ + + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4) +}; + +/* About the DARWIN ABI. */ +enum { + NUM_GPR_ARG_REGISTERS = 8, + NUM_FPR_ARG_REGISTERS = 13, + LINKAGE_AREA_GPRS = 6 +}; + +enum { ASM_NEEDS_REGISTERS = 4 }; /* r28-r31 */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments. + + m32/m64 + + The stack layout we want looks like this: + + | Return address from ffi_call_DARWIN | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4/8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | ASM_NEEDS_REGISTERS=r28-r31 4*(4/8) | | ffi_call_DARWIN + |--------------------------------------------| | + | When we have any FP activity... the | | + | FPRs occupy NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 from high to low addr. | | + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_call_DARWIN + + */ + +#if defined(POWERPC_DARWIN64) +static void +darwin64_pass_struct_by_value + (ffi_type *, char *, unsigned, unsigned *, double **, unsigned long **); +#endif + +/* This depends on GPR_SIZE = sizeof (unsigned long) */ + +void +ffi_prep_args (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + const unsigned nargs = ecif->cif->nargs; +#if !defined(POWERPC_DARWIN64) + const ffi_abi abi = ecif->cif->abi; +#endif + + /* 'stacktop' points at the previous backchain pointer. */ + unsigned long *const stacktop = stack + (bytes / sizeof(unsigned long)); + + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + double *fpr_base = (double *) (stacktop - ASM_NEEDS_REGISTERS) - NUM_FPR_ARG_REGISTERS; + int gp_count = 0, fparg_count = 0; + + /* 'next_arg' grows up as we put parameters in it. */ + unsigned long *next_arg = stack + LINKAGE_AREA_GPRS; /* 6 reserved positions. */ + + int i; + double double_tmp; + void **p_argv = ecif->avalue; + unsigned long gprvalue; + ffi_type** ptr = ecif->cif->arg_types; +#if !defined(POWERPC_DARWIN64) + char *dest_cpy; +#endif + unsigned size_al = 0; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT(((unsigned) (char *) stack & 0xF) == 0); + FFI_ASSERT(((unsigned) (char *) stacktop & 0xF) == 0); + FFI_ASSERT((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. + Rule: + Return values are referenced by r3, so r4 is the first parameter. */ + + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + for (i = nargs; i > 0; i--, ptr++, p_argv++) + { + switch ((*ptr)->type) + { + /* If a floating-point parameter appears before all of the general- + purpose registers are filled, the corresponding GPRs that match + the size of the floating-point parameter are skipped. */ + case FFI_TYPE_FLOAT: + double_tmp = *(float *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; +#if defined(POWERPC_DARWIN) + *(float *)next_arg = *(float *) *p_argv; +#else + *(double *)next_arg = double_tmp; +#endif + next_arg++; + gp_count++; + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_DOUBLE: + double_tmp = *(double *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *)next_arg = double_tmp; +#ifdef POWERPC64 + next_arg++; + gp_count++; +#else + next_arg += 2; + gp_count += 2; +#endif + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +# if defined(POWERPC64) && !defined(POWERPC_DARWIN64) + /* ??? This will exceed the regs count when the value starts at fp13 + and it will not put the extra bit on the stack. */ + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *(long double *) fpr_base++ = *(long double *) *p_argv; + else + *(long double *) next_arg = *(long double *) *p_argv; + next_arg += 2; + fparg_count += 2; +# else + double_tmp = ((double *) *p_argv)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; + double_tmp = ((double *) *p_argv)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; +# endif + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; +#endif + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + gprvalue = *(long long *) *p_argv; + goto putgpr; +#else + *(long long *) next_arg = *(long long *) *p_argv; + next_arg += 2; + gp_count += 2; +#endif + break; + case FFI_TYPE_POINTER: + gprvalue = *(unsigned long *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT8: + gprvalue = *(unsigned char *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = *(signed char *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = *(unsigned short *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = *(signed short *) *p_argv; + goto putgpr; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + next_arg = (unsigned long *)FFI_ALIGN((char *)next_arg, (*ptr)->alignment); + darwin64_pass_struct_by_value (*ptr, (char *) *p_argv, + (unsigned) size_al, + (unsigned int *) &fparg_count, + &fpr_base, &next_arg); +#else + dest_cpy = (char *) next_arg; + + /* If the first member of the struct is a double, then include enough + padding in the struct size to align it to double-word. */ + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); + +# if defined(POWERPC64) + FFI_ASSERT (abi != FFI_DARWIN); + memcpy ((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. + Structures with 3 byte in size are padded upwards. */ + if (size_al < 3 && abi == FFI_DARWIN) + dest_cpy += 4 - size_al; + + memcpy((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = *(signed int *) *p_argv; + goto putgpr; + + case FFI_TYPE_UINT32: + gprvalue = *(unsigned int *) *p_argv; + putgpr: + *next_arg++ = gprvalue; + gp_count++; + break; + default: + break; + } + } + + /* Check that we didn't overrun the stack... */ + /* FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS); + FFI_ASSERT((unsigned *)fpr_base + <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS); + FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4); */ +} + +#if defined(POWERPC_DARWIN64) + +/* See if we can put some of the struct into fprs. + This should not be called for structures of size 16 bytes, since these are not + broken out this way. */ +static void +darwin64_scan_struct_for_floats (ffi_type *s, unsigned *nfpr) +{ + int i; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p = s->elements[i]; + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_scan_struct_for_floats (p, nfpr); + break; + case FFI_TYPE_LONGDOUBLE: + (*nfpr) += 2; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + (*nfpr) += 1; + break; + default: + break; + } + } +} + +static int +darwin64_struct_size_exceeds_gprs_p (ffi_type *s, char *src, unsigned *nfpr) +{ + unsigned struct_offset=0, i; + + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + if (darwin64_struct_size_exceeds_gprs_p (p, item_base, nfpr)) + return 1; + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + default: + /* If we try and place any item, that is non-float, once we've + exceeded the 8 GPR mark, then we can't fit the struct. */ + if ((unsigned long)item_base >= 8*8) + return 1; + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return 0; +} + +/* Can this struct be returned by value? */ +int +darwin64_struct_ret_by_value_p (ffi_type *s) +{ + unsigned nfp = 0; + + FFI_ASSERT (s && s->type == FFI_TYPE_STRUCT); + + /* The largest structure we can return is 8long + 13 doubles. */ + if (s->size > 168) + return 0; + + /* We can't pass more than 13 floats. */ + darwin64_scan_struct_for_floats (s, &nfp); + if (nfp > 13) + return 0; + + /* If there are not too many floats, and the struct is + small enough to accommodate in the GPRs, then it must be OK. */ + if (s->size <= 64) + return 1; + + /* Well, we have to look harder. */ + nfp = 0; + if (darwin64_struct_size_exceeds_gprs_p (s, NULL, &nfp)) + return 0; + + return 1; +} + +void +darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) +{ + int i; + double *fpr_base = *fprs; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_pass_struct_floats (p, item_base, nfpr, + &fpr_base); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = (double) *(float *)item_base; + (*nfpr) += 1; + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + /* Update the scores. */ + *fprs = fpr_base; +} + +/* Darwin64 special rules. + Break out a struct into params and float registers. */ +static void +darwin64_pass_struct_by_value (ffi_type *s, char *src, unsigned size, + unsigned *nfpr, double **fprs, unsigned long **arg) +{ + unsigned long *next_arg = *arg; + char *dest_cpy = (char *)next_arg; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + if (!size) + return; + + /* First... special cases. */ + if (size < 3 + || (size == 4 + && s->elements[0] + && s->elements[0]->type != FFI_TYPE_FLOAT)) + { + /* Must be at least one GPR, padding is unspecified in value, + let's make it zero. */ + *next_arg = 0UL; + dest_cpy += 8 - size; + memcpy ((char *) dest_cpy, src, size); + next_arg++; + } + else if (size == 16) + { + memcpy ((char *) dest_cpy, src, size); + next_arg += 2; + } + else + { + /* now the general case, we consider embedded floats. */ + memcpy ((char *) dest_cpy, src, size); + darwin64_pass_struct_floats (s, src, nfpr, fprs); + next_arg += (size+7)/8; + } + + *arg = next_arg; +} + +double * +darwin64_struct_floats_to_mem (ffi_type *s, char *dest, double *fprs, unsigned *nf) +{ + int i; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = dest + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + fprs = darwin64_struct_floats_to_mem (p, item_base, fprs, nf); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + break; + case FFI_TYPE_FLOAT: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(float *)item_base = (float) *fprs++ ; + (*nf) += 1; + } + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return fprs; +} + +#endif + +/* Adjust the size of S to be correct for Darwin. + On Darwin m32, the first field of a structure has natural alignment. + On Darwin m64, all fields have natural alignment. */ + +static void +darwin_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + if (p->type == FFI_TYPE_STRUCT) + darwin_adjust_aggregate_sizes (p); +#if defined(POWERPC_DARWIN64) + /* Natural alignment for all items. */ + align = p->alignment; +#else + /* Natural alignment for the first item... */ + if (i == 0) + align = p->alignment; + else if (p->alignment == 16 || p->alignment < 4) + /* .. subsequent items with vector or align < 4 have natural align. */ + align = p->alignment; + else + /* .. or align is 4. */ + align = 4; +#endif + /* Pad, if necessary, before adding the current item. */ + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + /* This should not be necessary on m64, but harmless. */ + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Adjust the size of S to be correct for AIX. + Word-align double unless it is the first member of a structure. */ + +static void +aix_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + aix_adjust_aggregate_sizes (p); + align = p->alignment; + if (i != 0 && p->type == FFI_TYPE_DOUBLE) + align = 4; + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* All this is for the DARWIN ABI. */ + unsigned i; + ffi_type **ptr; + unsigned bytes; + unsigned fparg_count = 0, intarg_count = 0; + unsigned flags = 0; + unsigned size_al = 0; + + /* All the machine-independent calculation of cif->bytes will be wrong. + All the calculation of structure sizes will also be wrong. + Redo the calculation for DARWIN. */ + + if (cif->abi == FFI_DARWIN) + { + darwin_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + darwin_adjust_aggregate_sizes (cif->arg_types[i]); + } + + if (cif->abi == FFI_AIX) + { + aix_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + aix_adjust_aggregate_sizes (cif->arg_types[i]); + } + + /* Space for the frame pointer, callee's LR, CR, etc, and for + the asm's temp regs. */ + + bytes = (LINKAGE_AREA_GPRS + ASM_NEEDS_REGISTERS) * sizeof(unsigned long); + + /* Return value handling. + The rules m32 are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values [??? and structures between 5 and 8 bytes] are + returned in gpr3 and gpr4; + - Single/double FP values are returned in fpr1; + - Long double FP (if not equivalent to double) values are returned in + fpr1 and fpr2; + m64: + - 64-bit or smaller integral values are returned in GPR3 + - Single/double FP values are returned in fpr1; + - Long double FP values are returned in fpr1 and fpr2; + m64 Structures: + - If the structure could be accommodated in registers were it to be the + first argument to a routine, then it is returned in those registers. + m32/m64 structures otherwise: + - Larger structures values are allocated space and a pointer is passed + as the first argument. */ + switch (cif->rtype->type) + { + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + flags |= FLAG_RETURNS_FP; + break; +#endif + + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + case FFI_TYPE_POINTER: +#endif + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if defined(POWERPC_DARWIN64) + { + /* Can we fit the struct into regs? */ + if (darwin64_struct_ret_by_value_p (cif->rtype)) + { + unsigned nfpr = 0; + flags |= FLAG_RETURNS_STRUCT; + if (cif->rtype->size != 16) + darwin64_scan_struct_for_floats (cif->rtype, &nfpr) ; + else + flags |= FLAG_RETURNS_128BITS; + /* Will be 0 for 16byte struct. */ + if (nfpr) + flags |= FLAG_RETURNS_FP; + } + else /* By ref. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size <= 4) + flags |= FLAG_RETURNS_STRUCT; + else /* else by reference. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } +#else /* assume we pass by ref. */ + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; +#endif + break; + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. + ??? Structures are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. + For m64 the count is effectively of half-GPRs. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned align_words; + switch ((*ptr)->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + fparg_count++; +#if !defined(POWERPC_DARWIN64) + /* If this FP arg is going on the stack, it must be + 8-byte-aligned. */ + if (fparg_count > NUM_FPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0) + intarg_count++; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + fparg_count += 2; + /* If this FP arg is going on the stack, it must be + 16-byte-aligned. */ + if (fparg_count >= NUM_FPR_ARG_REGISTERS) +#if defined (POWERPC64) + intarg_count = FFI_ALIGN(intarg_count, 2); +#else + intarg_count = FFI_ALIGN(intarg_count, 4); +#endif + break; +#endif + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#if defined(POWERPC64) + intarg_count++; +#else + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. */ + if (intarg_count == NUM_GPR_ARG_REGISTERS-1 + || (intarg_count >= NUM_GPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0)) + intarg_count++; + intarg_count += 2; +#endif + break; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + align_words = (*ptr)->alignment >> 3; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* Base size of the struct. */ + intarg_count += (size_al + 7) / 8; + /* If 16 bytes then don't worry about floats. */ + if (size_al != 16) + /* Scan through for floats to be placed in regs. */ + darwin64_scan_struct_for_floats (*ptr, &fparg_count) ; +#else + align_words = (*ptr)->alignment >> 2; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* If the first member of the struct is a double, then align + the struct to double-word. + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); */ +# ifdef POWERPC64 + intarg_count += (size_al + 7) / 8; +# else + intarg_count += (size_al + 3) / 4; +# endif +#endif + break; + + default: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + break; + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + +#if defined(POWERPC_DARWIN64) + /* Space to image the FPR registers, if needed - which includes when they might be + used in a struct return. */ + if (fparg_count != 0 + || ((flags & FLAG_RETURNS_STRUCT) + && (flags & FLAG_RETURNS_FP))) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#else + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#endif + + /* Stack space. */ +#ifdef POWERPC64 + if ((intarg_count + fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + fparg_count) * sizeof(long); +#else + if ((intarg_count + 2 * fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + 2 * fparg_count) * sizeof(long); +#endif + else + bytes += NUM_GPR_ARG_REGISTERS * sizeof(long); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = FFI_ALIGN(bytes, 16) ; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void)); + +extern void ffi_call_go_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), void *closure); + +extern void ffi_call_DARWIN(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), ffi_type*); + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args)); + break; + case FFI_DARWIN: + ffi_call_DARWIN(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), cif->rtype); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_go_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), closure); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void flush_icache(char *); +static void flush_range(char *, int); + +/* The layout of a function descriptor. A C function pointer really + points to one of these. */ + +typedef struct aix_fd_struct { + void *code_pointer; + void *toc; +} aix_fd; + +/* here I'd like to add the stack frame layout we use in darwin_closure.S + and aix_closure.S + + m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee's LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent's frame. + |--------------------------------------------| <+ <<< on entry to ffi_closure_ASM + | Result Bytes 16 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_closure_ASM. + +*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + struct ffi_aix_trampoline_struct *tramp_aix; + aix_fd *fd; + + switch (cif->abi) + { + case FFI_DARWIN: + + FFI_ASSERT (cif->abi == FFI_DARWIN); + + tramp = (unsigned int *) &closure->tramp[0]; +#if defined(POWERPC_DARWIN64) + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0015; /* bcl- 20,4*cr7+so, +0x18 (L1) */ + /* We put the addresses here. */ + tramp[6] = 0x7d6802a6; /*L1: mflr r11 */ + tramp[7] = 0xe98b0000; /* ld r12,0(r11) function address */ + tramp[8] = 0x7c0803a6; /* mtlr r0 */ + tramp[9] = 0x7d8903a6; /* mtctr r12 */ + tramp[10] = 0xe96b0008; /* lwz r11,8(r11) static chain */ + tramp[11] = 0x4e800420; /* bctr */ + + *((unsigned long *)&tramp[2]) = (unsigned long) ffi_closure_ASM; /* function */ + *((unsigned long *)&tramp[4]) = (unsigned long) codeloc; /* context */ +#else + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */ + tramp[4] = 0x7d6802a6; /* mflr r11 */ + tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */ + tramp[6] = 0x7c0803a6; /* mtlr r0 */ + tramp[7] = 0x7d8903a6; /* mtctr r12 */ + tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */ + tramp[9] = 0x4e800420; /* bctr */ + tramp[2] = (unsigned long) ffi_closure_ASM; /* function */ + tramp[3] = (unsigned long) codeloc; /* context */ +#endif + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. Only necessary on Darwin. */ + flush_range(codeloc, FFI_TRAMPOLINE_SIZE); + + break; + + case FFI_AIX: + + tramp_aix = (struct ffi_aix_trampoline_struct *) (closure->tramp); + fd = (aix_fd *)(void *)ffi_closure_ASM; + + FFI_ASSERT (cif->abi == FFI_AIX); + + tramp_aix->code_pointer = fd->code_pointer; + tramp_aix->toc = fd->toc; + tramp_aix->static_chain = codeloc; + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + break; + + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_AIX: + + FFI_ASSERT (cif->abi == FFI_AIX); + + closure->tramp = (void *)ffi_go_closure_ASM; + closure->cif = cif; + closure->fun = fun; + return FFI_OK; + + // For now, ffi_prep_go_closure is only implemented for AIX, not for Darwin + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +static void +flush_icache(char *addr) +{ +#ifndef _AIX + __asm__ volatile ( + "dcbf 0,%0\n" + "\tsync\n" + "\ticbi 0,%0\n" + "\tsync\n" + "\tisync" + : : "r"(addr) : "memory"); +#endif +} + +static void +flush_range(char * addr1, int size) +{ +#define MIN_LINE_SIZE 32 + int i; + for (i = 0; i < size; i += MIN_LINE_SIZE) + flush_icache(addr1+i); + flush_icache(addr1+size-1); +} + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *, void *, + unsigned long *, ffi_dblfl *); + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure*, void *, + unsigned long *, ffi_dblfl *); + +/* Basically the trampoline invokes ffi_closure_ASM, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_ASM invokes the + following helper function to do most of the work. */ + +static ffi_type * +ffi_closure_helper_common (ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + /* rvalue is the pointer to space for return value in closure assembly + pgr is the pointer to where r3-r10 are stored in ffi_closure_ASM + pfr is the pointer to where f1-f13 are stored in ffi_closure_ASM. */ + + typedef double ldbits[2]; + + union ldu + { + ldbits lb; + long double ld; + }; + + void ** avalue; + ffi_type ** arg_types; + long i, avn; + ffi_dblfl * end_pfr = pfr + NUM_FPR_ARG_REGISTERS; + unsigned size_al; +#if defined(POWERPC_DARWIN64) + unsigned fpsused = 0; +#endif + + avalue = alloca (cif->nargs * sizeof(void *)); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { +#if defined(POWERPC_DARWIN64) + if (!darwin64_struct_ret_by_value_p (cif->rtype)) + { + /* Won't fit into the regs - return by ref. */ + rvalue = (void *) *pgr; + pgr++; + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size > 4) + { + rvalue = (void *) *pgr; + pgr++; + } +#else /* assume we return by ref. */ + rvalue = (void *) *pgr; + pgr++; +#endif + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 7; +#else + avalue[i] = (char *) pgr + 3; +#endif + pgr++; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 6; +#else + avalue[i] = (char *) pgr + 2; +#endif + pgr++; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 4; +#else + case FFI_TYPE_POINTER: + avalue[i] = pgr; +#endif + pgr++; + break; + + case FFI_TYPE_STRUCT: + size_al = arg_types[i]->size; +#if defined(POWERPC_DARWIN64) + pgr = (unsigned long *)FFI_ALIGN((char *)pgr, arg_types[i]->alignment); + if (size_al < 3 || size_al == 4) + { + avalue[i] = ((char *)pgr)+8-size_al; + if (arg_types[i]->elements[0]->type == FFI_TYPE_FLOAT + && fpsused < NUM_FPR_ARG_REGISTERS) + { + *(float *)pgr = (float) *(double *)pfr; + pfr++; + fpsused++; + } + } + else + { + if (size_al != 16) + pfr = (ffi_dblfl *) + darwin64_struct_floats_to_mem (arg_types[i], (char *)pgr, + (double *)pfr, &fpsused); + avalue[i] = pgr; + } + pgr += (size_al + 7) / 8; +#else + /* If the first member of the struct is a double, then align + the struct to double-word. */ + if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN(arg_types[i]->size, 8); +# if defined(POWERPC64) + FFI_ASSERT (cif->abi != FFI_DARWIN); + avalue[i] = pgr; + pgr += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. */ + if (size_al < 3 && cif->abi == FFI_DARWIN) + avalue[i] = (char*) pgr + 4 - size_al; + else + avalue[i] = pgr; + pgr += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: +#if defined(POWERPC64) + case FFI_TYPE_POINTER: + avalue[i] = pgr; + pgr++; + break; +#else + /* Long long ints are passed in two gpr's. */ + avalue[i] = pgr; + pgr += 2; + break; +#endif + + case FFI_TYPE_FLOAT: + /* A float value consumes a GPR. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr++; + break; + + case FFI_TYPE_DOUBLE: + /* A double value consumes two GPRs. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } +#ifdef POWERPC64 + pgr++; +#else + pgr += 2; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +#ifdef POWERPC64 + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr) + { + *pgr = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pgr; + } + pgr += 2; +#else /* POWERPC64 */ + /* A long double value consumes four GPRs and two FPRs. + There are 13 64bit floating point registers. */ + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + /* Here we have the situation where one part of the long double + is stored in fpr13 and the other part is already on the stack. + We use a union to pass the long double to avalue[i]. */ + else if (pfr + 1 == end_pfr) + { + union ldu temp_ld; + memcpy (&temp_ld.lb[0], pfr, sizeof(ldbits)); + memcpy (&temp_ld.lb[1], pgr + 2, sizeof(ldbits)); + avalue[i] = &temp_ld.ld; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr += 4; +#endif /* POWERPC64 */ + break; +#endif + default: + FFI_ASSERT(0); + } + i++; + } + + (fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_ASM to perform return type promotions. */ + return cif->rtype; +} + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure->user_data, rvalue, pgr, pfr); +} + +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure, rvalue, pgr, pfr); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c new file mode 100644 index 0000000..4d50878 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c @@ -0,0 +1,1153 @@ +/* ----------------------------------------------------------------------- + ffi_linux64.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifdef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the LINUX64 ABI. */ +enum { + NUM_GPR_ARG_REGISTERS64 = 8, + NUM_FPR_ARG_REGISTERS64 = 13, + NUM_VEC_ARG_REGISTERS64 = 12, +}; +enum { ASM_NEEDS_REGISTERS64 = 4 }; + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_linux64 (ffi_abi abi) +{ + if ((abi & (FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128)) == FFI_LINUX) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + + +static unsigned int +discover_homogeneous_aggregate (ffi_abi abi, + const ffi_type *t, + unsigned int *elnum) +{ + switch (t->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 64-bit long doubles are equivalent to doubles. */ + if ((abi & FFI_LINUX_LONG_DOUBLE_128) == 0) + { + *elnum = 1; + return FFI_TYPE_DOUBLE; + } + /* IBM extended precision values use unaligned pairs + of FPRs, but according to the ABI must be considered + distinct from doubles. They are also limited to a + maximum of four members in a homogeneous aggregate. */ + else if ((abi & FFI_LINUX_LONG_DOUBLE_IEEE128) == 0) + { + *elnum = 2; + return FFI_TYPE_LONGDOUBLE; + } + /* Fall through. */ +#endif + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + *elnum = 1; + return (int) t->type; + + case FFI_TYPE_STRUCT:; + { + unsigned int base_elt = 0, total_elnum = 0; + ffi_type **el = t->elements; + while (*el) + { + unsigned int el_elt, el_elnum = 0; + el_elt = discover_homogeneous_aggregate (abi, *el, &el_elnum); + if (el_elt == 0 + || (base_elt && base_elt != el_elt)) + return 0; + base_elt = el_elt; + total_elnum += el_elnum; +#if _CALL_ELF == 2 + if (total_elnum > 8) + return 0; +#else + if (total_elnum > 1) + return 0; +#endif + el++; + } + *elnum = total_elnum; + return base_elt; + } + + default: + return 0; + } +} + + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_linux64_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fparg_count = 0, intarg_count = 0, vecarg_count = 0; + unsigned flags = cif->flags; + unsigned elt, elnum, rtype; + +#if FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE + /* If compiled without long double support... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0 || + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#elif !defined(__VEC__) + /* If compiled without vector register support (used by assembly)... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#else + /* If the IEEE128 flag is set, but long double is only 64 bits wide... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) == 0 && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#endif + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ +#if _CALL_ELF == 2 + /* Space for backchain, CR, LR, TOC and the asm's temp regs. */ + bytes = (4 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the general registers. */ + bytes += NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#else + /* Space for backchain, CR, LR, cc/ld doubleword, TOC and the asm's temp + regs. */ + bytes = (6 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the mandatory parm save area and general registers. */ + bytes += 2 * NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#endif + + /* Return value handling. */ + rtype = cif->rtype->type; +#if _CALL_ELF == 2 +homogeneous: +#endif + switch (rtype) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + flags |= FLAG_RETURNS_VEC; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if _CALL_ELF == 2 + elt = discover_homogeneous_aggregate (cif->abi, cif->rtype, &elnum); + if (elt) + { + flags |= FLAG_RETURNS_SMST; + rtype = elt; + goto homogeneous; + } + if (cif->rtype->size <= 16) + { + flags |= FLAG_RETURNS_SMST; + break; + } +#endif + intarg_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned int align; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count++; + /* Align to 16 bytes, plus the 16-byte argument. */ + intarg_count = (intarg_count + 3) & ~0x1; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + fparg_count++; + intarg_count++; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + fparg_count++; + intarg_count++; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + align = align / 8; + if (align > 1) + intarg_count = FFI_ALIGN (intarg_count, align); + } + intarg_count += ((*ptr)->size + 7) / 8; + elt = discover_homogeneous_aggregate (cif->abi, *ptr, &elnum); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count += elnum; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + else +#endif + if (elt) + { + fparg_count += elnum; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + else + { + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 8-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + default: + FFI_ASSERT (0); + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (intarg_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (vecarg_count != 0) + flags |= FLAG_VEC_ARGUMENTS; + + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS64 * sizeof (double); + /* Space for the vector registers, if needed, aligned to 16 bytes. */ + if (vecarg_count != 0) { + bytes = (bytes + 15) & ~0xF; + bytes += NUM_VEC_ARG_REGISTERS64 * sizeof (float128); + } + + /* Stack space. */ +#if _CALL_ELF == 2 + if ((flags & FLAG_ARG_NEEDS_PSAVE) != 0) + bytes += intarg_count * sizeof (long); +#else + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + bytes += (intarg_count - NUM_GPR_ARG_REGISTERS64) * sizeof (long); +#endif + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64 (ffi_cif *cif) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = cif->nargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; + return ffi_prep_cif_linux64_core (cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64_var (ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = nfixedargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; +#if _CALL_ELF == 2 + cif->flags |= FLAG_ARG_NEEDS_PSAVE; +#endif + return ffi_prep_cif_linux64_core (cif); +} + + +/* ffi_prep_args64 is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Ret addr from ffi_call_LINUX64 8bytes | higher addresses + |--------------------------------------------| + | CR save area 8bytes | + |--------------------------------------------| + | Previous backchain pointer 8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*8 | | ffi_call_LINUX64 + |--------------------------------------------| | + | GPR registers r3-r10 8*8 | | + |--------------------------------------------| | + | FPR registers f1-f13 (optional) 13*8 | | + |--------------------------------------------| | + | VEC registers v2-v13 (optional) 12*16 | | + |--------------------------------------------| | + | Parameter save area | | + |--------------------------------------------| | + | TOC save area 8 | | + |--------------------------------------------| | stack | + | Linker doubleword 8 | | grows | + |--------------------------------------------| | down V + | Compiler doubleword 8 | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 8 | | + |--------------------------------------------| | + | CR save area 8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 8 |-/ during + |--------------------------------------------| <<< ffi_call_LINUX64 + +*/ + +void FFI_HIDDEN +ffi_prep_args64 (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned long bytes = ecif->cif->bytes; + const unsigned long flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'next_arg' points at the space for gpr3, and grows upwards as + we use GPR registers, then continues at rest. */ + valp gpr_base; + valp gpr_end; + valp rest; + valp next_arg; + + /* 'fpr_base' points at the space for f1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + unsigned int fparg_count; + + /* 'vec_base' points at the space for v2, and grows upwards as + we use vector registers. */ + valp vec_base; + unsigned int vecarg_count; + + unsigned int i, words, nargs, nfixedargs; + ffi_type **ptr; + double double_tmp; + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + signed int **si; + unsigned int **ui; + unsigned long **ul; + float **f; + double **d; + float128 **f128; + } p_argv; + unsigned long gprvalue; + unsigned long align; + + stacktop.c = (char *) stack + bytes; + gpr_base.ul = stacktop.ul - ASM_NEEDS_REGISTERS64 - NUM_GPR_ARG_REGISTERS64; + gpr_end.ul = gpr_base.ul + NUM_GPR_ARG_REGISTERS64; +#if _CALL_ELF == 2 + rest.ul = stack + 4 + NUM_GPR_ARG_REGISTERS64; +#else + rest.ul = stack + 6 + NUM_GPR_ARG_REGISTERS64; +#endif + fpr_base.d = gpr_base.d - NUM_FPR_ARG_REGISTERS64; + fparg_count = 0; + /* Place the vector args below the FPRs, if used, else the GPRs. */ + if (ecif->cif->flags & FLAG_FP_ARGUMENTS) + vec_base.p = fpr_base.p & ~0xF; + else + vec_base.p = gpr_base.p; + vec_base.f128 -= NUM_VEC_ARG_REGISTERS64; + vecarg_count = 0; + next_arg.ul = gpr_base.ul; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_base.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_end.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) vec_base.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg.ul++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + nargs = ecif->cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = ecif->cif->nfixedargs; + for (ptr = ecif->cif->arg_types, i = 0; + i < nargs; + i++, ptr++, p_argv.v++) + { + unsigned int elt, elnum; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + next_arg.p = FFI_ALIGN (next_arg.p, 16); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 && i < nfixedargs) + memcpy (vec_base.f128++, *p_argv.f128, sizeof (float128)); + else + memcpy (next_arg.f128, *p_argv.f128, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 113); + FFI_ASSERT (flags & FLAG_VEC_ARGUMENTS); + break; + } + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + double_tmp = (*p_argv.d)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + double_tmp = (*p_argv.d)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 106); + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + double_tmp = **p_argv.d; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +#endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + double_tmp = **p_argv.f; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } +#endif + } + else + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_STRUCT: + if ((ecif->cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + if (align > 1) + { + next_arg.p = FFI_ALIGN (next_arg.p, align); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + elt = discover_homogeneous_aggregate (ecif->cif->abi, *ptr, &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + float *f; + double *d; + float128 *f128; + } arg; + + arg.v = *p_argv.v; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 + && i < nfixedargs) + memcpy (vec_base.f128++, arg.f128, sizeof (float128)); + else + memcpy (next_arg.f128, arg.f128++, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + double_tmp = *arg.f++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 + && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.f = (float) double_tmp; + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + fparg_count++; + } + while (--elnum != 0); + if ((next_arg.p & 7) != 0) + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + } + else + do + { + double_tmp = *arg.d++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.d = double_tmp; + if (++next_arg.d == gpr_end.d) + next_arg.d = rest.d; + fparg_count++; + } + while (--elnum != 0); +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { + words = ((*ptr)->size + 7) / 8; + if (next_arg.ul >= gpr_base.ul && next_arg.ul + words > gpr_end.ul) + { + size_t first = gpr_end.c - next_arg.c; + memcpy (next_arg.c, *p_argv.c, first); + memcpy (rest.c, *p_argv.c + first, (*ptr)->size - first); + next_arg.c = rest.c + words * 8 - first; + } + else + { + char *where = next_arg.c; + +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if ((*ptr)->size < 8) + where += 8 - (*ptr)->size; +#endif + memcpy (where, *p_argv.c, (*ptr)->size); + next_arg.ul += words; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + break; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + case FFI_TYPE_UINT32: + gprvalue = **p_argv.ui; + goto putgpr; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = **p_argv.si; + goto putgpr; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + gprvalue = **p_argv.ul; + putgpr: + *next_arg.ul++ = gprvalue; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + break; + } + } + + FFI_ASSERT (flags & FLAG_4_GPR_ARGUMENTS + || (next_arg.ul >= gpr_base.ul + && next_arg.ul <= gpr_base.ul + 4)); +} + + +#if _CALL_ELF == 2 +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} +#endif + + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_linux64 (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#if _CALL_ELF == 2 + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp[0] = 0xe96c0018; /* 0: ld 11,2f-0b(12) */ + tramp[1] = 0xe98c0010; /* ld 12,1f-0b(12) */ + tramp[2] = 0x7d8903a6; /* mtctr 12 */ + tramp[3] = 0x4e800420; /* bctr */ + /* 1: .quad function_addr */ + /* 2: .quad context */ + *(void **) &tramp[4] = (void *) ffi_closure_LINUX64; + *(void **) &tramp[6] = codeloc; + flush_icache ((char *) tramp, (char *) codeloc, 4 * 4); +#else + void **tramp = (void **) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* Copy function address and TOC from ffi_closure_LINUX64 OPD. */ + memcpy (&tramp[0], (void **) ffi_closure_LINUX64, sizeof (void *)); + tramp[1] = codeloc; + memcpy (&tramp[2], (void **) ffi_closure_LINUX64 + 1, sizeof (void *)); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + + +int FFI_HIDDEN +ffi_closure_helper_LINUX64 (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pst, + ffi_dblfl *pfr, + float128 *pvec) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pst is the pointer to parameter save area + (r3-r10 are stored into its first 8 slots by ffi_closure_LINUX64) */ + /* pfr is the pointer to where f1-f13 are stored in ffi_closure_LINUX64 */ + /* pvec is the pointer to where v2-v13 are stored in ffi_closure_LINUX64 */ + + void **avalue; + ffi_type **arg_types; + unsigned long i, avn, nfixedargs; + ffi_dblfl *end_pfr = pfr + NUM_FPR_ARG_REGISTERS64; + float128 *end_pvec = pvec + NUM_VEC_ARG_REGISTERS64; + unsigned long align; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the + closure returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && (cif->flags & FLAG_RETURNS_SMST) == 0) + { + rvalue = (void *) *pst; + pst++; + } + + i = 0; + avn = cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((cif->flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = cif->nfixedargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + unsigned int elt, elnum; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 7; + pst++; + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 6; + pst++; + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; + pst++; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = arg_types[i]->alignment; + if (align > 16) + align = 16; + if (align > 1) + pst = (unsigned long *) FFI_ALIGN ((size_t) pst, align); + } + elt = discover_homogeneous_aggregate (cif->abi, arg_types[i], &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } to, from; + + /* Repackage the aggregate from its parts. The + aggregate size is not greater than the space taken by + the registers so store back to the register/parameter + save arrays. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (pvec + elnum <= end_pvec) + to.v = pvec; + else + to.v = pst; + } + else +#endif + if (pfr + elnum <= end_pfr) + to.v = pfr; + else + to.v = pst; + + avalue[i] = to.v; + from.ul = pst; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (pvec < end_pvec && i < nfixedargs) + memcpy (to.f128, pvec++, sizeof (float128)); + else + memcpy (to.f128, from.f128, sizeof (float128)); + to.f128++; + from.f128++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.f = (float) pfr->d; + pfr++; + } + else + *to.f = *from.f; + to.f++; + from.f++; + } + while (--elnum != 0); + } + else + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.d = pfr->d; + pfr++; + } + else + *to.d = *from.d; + to.d++; + from.d++; + } + while (--elnum != 0); + } +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if (arg_types[i]->size < 8) + avalue[i] = (char *) pst + 8 - arg_types[i]->size; + else +#endif + avalue[i] = pst; + } + pst += (arg_types[i]->size + 7) / 8; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (((unsigned long) pst & 0xF) != 0) + ++pst; + if (pvec < end_pvec && i < nfixedargs) + avalue[i] = pvec++; + else + avalue[i] = pst; + pst += 2; + break; + } + else if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + if (pfr + 1 < end_pfr && i + 1 < nfixedargs) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr && i < nfixedargs) + { + /* Passed partly in f13 and partly on the stack. + Move it all to the stack. */ + *pst = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pst; + } + pst += 2; + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + /* On the outgoing stack all values are aligned to 8 */ + /* there are 13 64bit floating point registers */ + + if (pfr < end_pfr && i < nfixedargs) + { + avalue[i] = pfr; + pfr++; + } + else + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + if (pfr < end_pfr && i < nfixedargs) + { + /* Float values are stored as doubles in the + ffi_closure_LINUX64 code. Fix them here. */ + pfr->f = (float) pfr->d; + avalue[i] = pfr; + pfr++; + } + else + { +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; +#else + avalue[i] = pst; +#endif + } + pst++; + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_LINUX64 how to perform return type promotions. */ + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + { + if ((cif->flags & (FLAG_RETURNS_FP | FLAG_RETURNS_VEC)) == 0) + return FFI_V2_TYPE_SMALL_STRUCT + cif->rtype->size - 1; + else if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR_HOMOG; + else if ((cif->flags & FLAG_RETURNS_64BITS) != 0) + return FFI_V2_TYPE_DOUBLE_HOMOG; + else + return FFI_V2_TYPE_FLOAT_HOMOG; + } + if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR; + return cif->rtype->type; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h new file mode 100644 index 0000000..8e2f2f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h @@ -0,0 +1,105 @@ +/* ----------------------------------------------------------------------- + ffi_powerpc.h - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +enum { + /* The assembly depends on these exact flags. */ + /* These go in cr7 */ + FLAG_RETURNS_SMST = 1 << (31-31), /* Used for FFI_SYSV small structs. */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_VEC = 1 << (31-28), + + /* These go in cr6 */ + FLAG_RETURNS_64BITS = 1 << (31-27), + FLAG_RETURNS_128BITS = 1 << (31-26), + + FLAG_COMPAT = 1 << (31- 8), /* Not used by assembly */ + + /* These go in cr1 */ + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), /* Used by sysv code */ + FLAG_ARG_NEEDS_PSAVE = FLAG_ARG_NEEDS_COPY, /* Used by linux64 code */ + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4), + FLAG_VEC_ARGUMENTS = 1 << (31- 3), +}; + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +#if defined(__FLOAT128_TYPE__) +typedef _Float128 float128; +#elif defined(__FLOAT128__) +typedef __float128 float128; +#else +typedef char float128[16] __attribute__((aligned(16))); +#endif + +void FFI_HIDDEN ffi_closure_SYSV (void); +void FFI_HIDDEN ffi_go_closure_sysv (void); +void FFI_HIDDEN ffi_call_SYSV(extended_cif *, void (*)(void), void *, + unsigned, void *, int); + +void FFI_HIDDEN ffi_prep_types_sysv (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_sysv (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_sysv (ffi_closure *, + ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_SYSV (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, unsigned long *, + ffi_dblfl *, unsigned long *); + +void FFI_HIDDEN ffi_call_LINUX64(extended_cif *, void (*) (void), void *, + unsigned long, void *, long); +void FFI_HIDDEN ffi_closure_LINUX64 (void); +void FFI_HIDDEN ffi_go_closure_linux64 (void); + +void FFI_HIDDEN ffi_prep_types_linux64 (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64 (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64_var (ffi_cif *, unsigned int, + unsigned int); +void FFI_HIDDEN ffi_prep_args64 (extended_cif *, unsigned long *const); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_linux64 (ffi_closure *, ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_LINUX64 (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, + unsigned long *, ffi_dblfl *, + float128 *); diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c new file mode 100644 index 0000000..4078e75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c @@ -0,0 +1,923 @@ +/* ----------------------------------------------------------------------- + ffi_sysv.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifndef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the SYSV ABI. */ +#define ASM_NEEDS_REGISTERS 6 +#define NUM_GPR_ARG_REGISTERS 8 +#define NUM_FPR_ARG_REGISTERS 8 + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_sysv (ffi_abi abi) +{ + if ((abi & (FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128)) == FFI_SYSV) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + +/* Transform long double, double and float to other types as per abi. */ +static int +translate_float (int abi, int type) +{ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (type == FFI_TYPE_LONGDOUBLE + && (abi & FFI_SYSV_LONG_DOUBLE_128) == 0) + type = FFI_TYPE_DOUBLE; +#endif + if ((abi & FFI_SYSV_SOFT_FLOAT) != 0) + { + if (type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + else if (type == FFI_TYPE_DOUBLE) + type = FFI_TYPE_UINT64; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + else if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_UINT128; + } + else if ((abi & FFI_SYSV_IBM_LONG_DOUBLE) == 0) + { + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + } + return type; +} + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_sysv_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fpr_count = 0, gpr_count = 0, stack_count = 0; + unsigned flags = cif->flags; + unsigned struct_copy_size = 0; + unsigned type = cif->rtype->type; + unsigned size = cif->rtype->size; + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ + + /* Space for the frame pointer, callee's LR, and the asm's temp regs. */ + bytes = (2 + ASM_NEEDS_REGISTERS) * sizeof (int); + + /* Space for the GPR registers. */ + bytes += NUM_GPR_ARG_REGISTERS * sizeof (int); + + /* Return value handling. The rules for SYSV are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - Structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values and structures between 5 and 8 bytes are returned + in gpr3 and gpr4; + - Larger structures are allocated space and a pointer is passed as + the first argument. + - Single/double FP values are returned in fpr1; + - long doubles (if not equivalent to double) are returned in + fpr1,fpr2 for Linux and as for large structs for SysV. */ + + type = translate_float (cif->abi, type); + + switch (type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead + returns them in memory. */ + if ((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + { + flags |= FLAG_RETURNS_SMST; + break; + } + gpr_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. Structures and long doubles (if not equivalent + to double) are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned short typenum = (*ptr)->type; + + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS - 1) + { + fpr_count = NUM_FPR_ARG_REGISTERS; + /* 8-byte align long doubles. */ + stack_count += stack_count & 1; + stack_count += 4; + } + else + fpr_count += 2; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; +#endif + + case FFI_TYPE_DOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + { + /* 8-byte align doubles. */ + stack_count += stack_count & 1; + stack_count += 2; + } + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_FLOAT: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + /* Yes, we don't follow the ABI, but neither does gcc. */ + stack_count += 1; + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + /* A long double in FFI_LINUX_SOFT_FLOAT can use only a set + of four consecutive gprs. If we do not have enough, we + have to adjust the gpr_count value. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS - 3) + gpr_count = NUM_GPR_ARG_REGISTERS; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 4; + else + gpr_count += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. + + Also, only certain register pairs can be used for + passing long long int -- specifically (r3,r4), (r5,r6), + (r7,r8), (r9,r10). */ + gpr_count += gpr_count & 1; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + { + stack_count += stack_count & 1; + stack_count += 2; + } + else + gpr_count += 2; + break; + + case FFI_TYPE_STRUCT: + /* We must allocate space for a copy of these to enforce + pass-by-value. Pad the space up to a multiple of 16 + bytes (the maximum alignment required for anything under + the SYSV ABI). */ + struct_copy_size += ((*ptr)->size + 15) & ~0xF; + /* Fall through (allocate space for the pointer). */ + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 1; + else + gpr_count += 1; + break; + + default: + FFI_ASSERT (0); + } + } + + if (fpr_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (gpr_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (struct_copy_size != 0) + flags |= FLAG_ARG_NEEDS_COPY; + + /* Space for the FPR registers, if needed. */ + if (fpr_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof (double); + + /* Stack space. */ + bytes += stack_count * sizeof (int); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + /* Add in the space for the copied structures. */ + bytes += struct_copy_size; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_sysv (ffi_cif *cif) +{ + if ((cif->abi & FFI_SYSV) == 0) + { + /* This call is from old code. Translate to new ABI values. */ + cif->flags |= FLAG_COMPAT; + switch (cif->abi) + { + default: + return FFI_BAD_ABI; + + case FFI_COMPAT_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_STRUCT_RET | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_GCC_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_LINUX: + cif->abi = (FFI_SYSV | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + + case FFI_COMPAT_LINUX_SOFT_FLOAT: + cif->abi = (FFI_SYSV | FFI_SYSV_SOFT_FLOAT | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + } + } + return ffi_prep_cif_sysv_core (cif); +} + +/* ffi_prep_args_SYSV is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Return address from ffi_call_SYSV 4bytes | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*4 | | ffi_call_SYSV + |--------------------------------------------| | + | GPR registers r3-r10 8*4 | | ffi_call_SYSV + |--------------------------------------------| | + | FPR registers f1-f8 (optional) 8*8 | | + |--------------------------------------------| | stack | + | Space for copied structures | | grows | + |--------------------------------------------| | down V + | Parameters that didn't fit in registers | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 4 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4 |-/ during + |--------------------------------------------| <<< ffi_call_SYSV + +*/ + +void FFI_HIDDEN +ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned *u; + long long *ll; + float *f; + double *d; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'gpr_base' points at the space for gpr3, and grows upwards as + we use GPR registers. */ + valp gpr_base; + valp gpr_end; + +#ifndef __NO_FPRS__ + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + valp fpr_end; +#endif + + /* 'copy_space' grows down as we put structures in it. It should + stay 16-byte aligned. */ + valp copy_space; + + /* 'next_arg' grows up as we put parameters in it. */ + valp next_arg; + + int i; + ffi_type **ptr; +#ifndef __NO_FPRS__ + double double_tmp; +#endif + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **ui; + long long **ll; + float **f; + double **d; + } p_argv; + size_t struct_copy_size; + unsigned gprvalue; + + stacktop.c = (char *) stack + bytes; + gpr_end.u = stacktop.u - ASM_NEEDS_REGISTERS; + gpr_base.u = gpr_end.u - NUM_GPR_ARG_REGISTERS; +#ifndef __NO_FPRS__ + fpr_end.d = gpr_base.d; + fpr_base.d = fpr_end.d - NUM_FPR_ARG_REGISTERS; + copy_space.c = ((flags & FLAG_FP_ARGUMENTS) ? fpr_base.c : gpr_base.c); +#else + copy_space.c = gpr_base.c; +#endif + next_arg.u = stack + 2; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) copy_space.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + FFI_ASSERT (copy_space.c >= next_arg.c); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *gpr_base.u++ = (unsigned) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + for (ptr = ecif->cif->arg_types, i = ecif->cif->nargs; + i > 0; + i--, ptr++, p_argv.v++) + { + unsigned int typenum = (*ptr)->type; + + typenum = translate_float (ecif->cif->abi, typenum); + + /* Now test the translated value */ + switch (typenum) + { +#ifndef __NO_FPRS__ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + double_tmp = (*p_argv.d)[0]; + + if (fpr_base.d >= fpr_end.d - 1) + { + fpr_base.d = fpr_end.d; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + double_tmp = (*p_argv.d)[1]; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + { + *fpr_base.d++ = double_tmp; + double_tmp = (*p_argv.d)[1]; + *fpr_base.d++ = double_tmp; + } + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +# endif + case FFI_TYPE_DOUBLE: + double_tmp = **p_argv.d; + + if (fpr_base.d >= fpr_end.d) + { + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: + double_tmp = **p_argv.f; + if (fpr_base.d >= fpr_end.d) + { + *next_arg.f = (float) double_tmp; + next_arg.u += 1; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +#endif /* have FPRs */ + + case FFI_TYPE_UINT128: + /* The soft float ABI for long doubles works like this, a long double + is passed in four consecutive GPRs if available. A maximum of 2 + long doubles can be passed in gprs. If we do not have 4 GPRs + left, the long double is passed on the stack, 4-byte aligned. */ + if (gpr_base.u >= gpr_end.u - 3) + { + unsigned int ii; + gpr_base.u = gpr_end.u; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *next_arg.u++ = int_tmp; + } + } + else + { + unsigned int ii; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *gpr_base.u++ = int_tmp; + } + } + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (gpr_base.u >= gpr_end.u - 1) + { + gpr_base.u = gpr_end.u; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u++; + *next_arg.ll = **p_argv.ll; + next_arg.u += 2; + } + else + { + /* The abi states only certain register pairs can be + used for passing long long int specifically (r3,r4), + (r5,r6), (r7,r8), (r9,r10). If next arg is long long + but not correct starting register of pair then skip + until the proper starting register. */ + if (((gpr_end.u - gpr_base.u) & 1) != 0) + gpr_base.u++; + *gpr_base.ll++ = **p_argv.ll; + } + break; + + case FFI_TYPE_STRUCT: + struct_copy_size = ((*ptr)->size + 15) & ~0xF; + copy_space.c -= struct_copy_size; + memcpy (copy_space.c, *p_argv.c, (*ptr)->size); + + gprvalue = (unsigned long) copy_space.c; + + FFI_ASSERT (copy_space.c > next_arg.c); + FFI_ASSERT (flags & FLAG_ARG_NEEDS_COPY); + goto putgpr; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + + gprvalue = **p_argv.ui; + + putgpr: + if (gpr_base.u >= gpr_end.u) + *next_arg.u++ = gprvalue; + else + *gpr_base.u++ = gprvalue; + break; + } + } + + /* Check that we didn't overrun the stack... */ + FFI_ASSERT (copy_space.c >= next_arg.c); + FFI_ASSERT (gpr_base.u <= gpr_end.u); +#ifndef __NO_FPRS__ + FFI_ASSERT (fpr_base.u <= fpr_end.u); +#endif + FFI_ASSERT (((flags & FLAG_4_GPR_ARGUMENTS) != 0) + == (gpr_end.u - gpr_base.u < 4)); +} + +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_sysv (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi < FFI_SYSV || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0005; /* bcl 20,31,.+4 */ + tramp[2] = 0x7d6802a6; /* mflr r11 */ + tramp[3] = 0x7c0803a6; /* mtlr r0 */ + tramp[4] = 0x800b0018; /* lwz r0,24(r11) */ + tramp[5] = 0x816b001c; /* lwz r11,28(r11) */ + tramp[6] = 0x7c0903a6; /* mtctr r0 */ + tramp[7] = 0x4e800420; /* bctr */ + *(void **) &tramp[8] = (void *) ffi_closure_SYSV; /* function */ + *(void **) &tramp[9] = codeloc; /* context */ + + /* Flush the icache. */ + flush_icache ((char *)tramp, (char *)codeloc, 8 * 4); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_SYSV invokes the + following helper function to do most of the work. */ + +int +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pgr, + ffi_dblfl *pfr, + unsigned long *pst) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pgr is the pointer to where r3-r10 are stored in ffi_closure_SYSV */ + /* pfr is the pointer to where f1-f8 are stored in ffi_closure_SYSV */ + /* pst is the pointer to outgoing parameter stack in original caller */ + + void ** avalue; + ffi_type ** arg_types; + long i, avn; +#ifndef __NO_FPRS__ + long nf = 0; /* number of floating registers already used */ +#endif + long ng = 0; /* number of general registers already used */ + + unsigned size = cif->rtype->size; + unsigned short rtypenum = cif->rtype->type; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* First translate for softfloat/nonlinux */ + rtypenum = translate_float (cif->abi, rtypenum); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. + For FFI_SYSV the result is passed in r3/r4 if the struct size is less + or equal 8 bytes. */ + if (rtypenum == FFI_TYPE_STRUCT + && !((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8)) + { + rvalue = (void *) *pgr; + ng++; + pgr++; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) { + unsigned short typenum = arg_types[i]->type; + + /* We may need to handle some values depending on ABI. */ + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#ifndef __NO_FPRS__ + case FFI_TYPE_FLOAT: + /* Unfortunately float values are stored as doubles + in the ffi_closure_SYSV code (since we don't check + the type in that routine). */ + if (nf < NUM_FPR_ARG_REGISTERS) + { + /* FIXME? here we are really changing the values + stored in the original calling routines outgoing + parameter stack. This is probably a really + naughty thing to do but... */ + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + avalue[i] = pst; + pst += 1; + } + break; + + case FFI_TYPE_DOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS) + { + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + } + break; + +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS - 1) + { + avalue[i] = pfr; + pfr += 2; + nf += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 4; + nf = 8; + } + break; +# endif +#endif + + case FFI_TYPE_UINT128: + /* Test if for the whole long double, 4 gprs are available. + otherwise the stuff ends up on the stack. */ + if (ng < NUM_GPR_ARG_REGISTERS - 3) + { + avalue[i] = pgr; + pgr += 4; + ng += 4; + } + else + { + avalue[i] = pst; + pst += 4; + ng = 8+4; + } + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 3; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 3; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 2; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 2; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = pgr; + ng++; + pgr++; + } + else + { + avalue[i] = pst; + pst++; + } + break; + + case FFI_TYPE_STRUCT: + /* Structs are passed by reference. The address will appear in a + gpr if it is one of the first 8 arguments. */ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (void *) *pgr; + ng++; + pgr++; + } + else + { + avalue[i] = (void *) *pst; + pst++; + } + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passing long long ints are complex, they must + be passed in suitable register pairs such as + (r3,r4) or (r5,r6) or (r6,r7), or (r7,r8) or (r9,r10) + and if the entire pair aren't available then the outgoing + parameter stack is used for both but an alignment of 8 + must will be kept. So we must either look in pgr + or pst to find the correct address for this type + of parameter. */ + if (ng < NUM_GPR_ARG_REGISTERS - 1) + { + if (ng & 1) + { + /* skip r4, r6, r8 as starting points */ + ng++; + pgr++; + } + avalue[i] = pgr; + ng += 2; + pgr += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + ng = NUM_GPR_ARG_REGISTERS; + } + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. + Because the FFI_SYSV ABI returns the structures <= 8 bytes in + r3/r4 we have to tell ffi_closure_SYSV how to treat them. We + combine the base type FFI_SYSV_TYPE_SMALL_STRUCT with the size of + the struct less one. We never have a struct with size zero. + See the comment in ffitarget.h about ordering. */ + if (rtypenum == FFI_TYPE_STRUCT + && (cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + return FFI_SYSV_TYPE_SMALL_STRUCT - 1 + size; + return rtypenum; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h new file mode 100644 index 0000000..7fb9a93 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ffitarget.h @@ -0,0 +1,204 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for PowerPC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined (POWERPC) && defined (__powerpc64__) /* linux64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#ifndef POWERPC_DARWIN64 +#define POWERPC_DARWIN64 +#endif +#elif defined (POWERPC_AIX) && defined (__64BIT__) /* AIX64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#if defined (POWERPC_AIX) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_AIX, + FFI_LAST_ABI + +#elif defined (POWERPC_DARWIN) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_DARWIN, + FFI_LAST_ABI + +#else + /* The FFI_COMPAT values are used by old code. Since libffi may be + a shared library we have to support old values for backwards + compatibility. */ + FFI_COMPAT_SYSV, + FFI_COMPAT_GCC_SYSV, + FFI_COMPAT_LINUX64, + FFI_COMPAT_LINUX, + FFI_COMPAT_LINUX_SOFT_FLOAT, + +# if defined (POWERPC64) + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 64-bit linux. We + only need worry about FFI_COMPAT_LINUX64, but to be safe avoid + all old values. */ + FFI_LINUX = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_LINUX_STRUCT_ALIGN = 1, + FFI_LINUX_LONG_DOUBLE_128 = 2, + FFI_LINUX_LONG_DOUBLE_IEEE128 = 4, + FFI_DEFAULT_ABI = (FFI_LINUX +# ifdef __STRUCT_PARM_ALIGN__ + | FFI_LINUX_STRUCT_ALIGN +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_LINUX_LONG_DOUBLE_128 +# ifdef __LONG_DOUBLE_IEEE128__ + | FFI_LINUX_LONG_DOUBLE_IEEE128 +# endif +# endif + ), + FFI_LAST_ABI = 16 + +# else + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 32-bit linux/sysv/bsd. */ + FFI_SYSV = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_SYSV_SOFT_FLOAT = 1, + FFI_SYSV_STRUCT_RET = 2, + FFI_SYSV_IBM_LONG_DOUBLE = 4, + FFI_SYSV_LONG_DOUBLE_128 = 16, + + FFI_DEFAULT_ABI = (FFI_SYSV +# ifdef __NO_FPRS__ + | FFI_SYSV_SOFT_FLOAT +# endif +# if (defined (__SVR4_STRUCT_RETURN) \ + || defined (POWERPC_FREEBSD) && !defined (__AIX_STRUCT_RETURN)) + | FFI_SYSV_STRUCT_RET +# endif +# if __LDBL_MANT_DIG__ == 106 + | FFI_SYSV_IBM_LONG_DOUBLE +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_SYSV_LONG_DOUBLE_128 +# endif + ), + FFI_LAST_ABI = 32 +# endif +#endif + +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#if defined (POWERPC) || defined (POWERPC_FREEBSD) +# define FFI_GO_CLOSURES 1 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs +#endif +#if defined (POWERPC_AIX) +# define FFI_GO_CLOSURES 1 +#endif + +/* ppc_closure.S and linux64_closure.S expect this. */ +#define FFI_PPC_TYPE_LAST FFI_TYPE_POINTER + +/* We define additional types below. If generic types are added that + must be supported by powerpc libffi then it is likely that + FFI_PPC_TYPE_LAST needs increasing *and* the jump tables in + ppc_closure.S and linux64_closure.S be extended. */ + +#if !(FFI_TYPE_LAST == FFI_PPC_TYPE_LAST \ + || (FFI_TYPE_LAST == FFI_TYPE_COMPLEX \ + && !defined FFI_TARGET_HAS_COMPLEX_TYPE)) +# error "You likely have a broken powerpc libffi" +#endif + +/* Needed for soft-float long-double-128 support. */ +#define FFI_TYPE_UINT128 (FFI_PPC_TYPE_LAST + 1) + +/* Needed for FFI_SYSV small structure returns. */ +#define FFI_SYSV_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 2) + +/* Used by ELFv2 for homogenous structure returns. */ +#define FFI_V2_TYPE_VECTOR (FFI_PPC_TYPE_LAST + 1) +#define FFI_V2_TYPE_VECTOR_HOMOG (FFI_PPC_TYPE_LAST + 2) +#define FFI_V2_TYPE_FLOAT_HOMOG (FFI_PPC_TYPE_LAST + 3) +#define FFI_V2_TYPE_DOUBLE_HOMOG (FFI_PPC_TYPE_LAST + 4) +#define FFI_V2_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 5) + +#if _CALL_ELF == 2 +# define FFI_TRAMPOLINE_SIZE 32 +#else +# if defined(POWERPC64) || defined(POWERPC_AIX) +# if defined(POWERPC_DARWIN64) +# define FFI_TRAMPOLINE_SIZE 48 +# else +# define FFI_TRAMPOLINE_SIZE 24 +# endif +# else /* POWERPC || POWERPC_AIX */ +# define FFI_TRAMPOLINE_SIZE 40 +# endif +#endif + +#ifndef LIBFFI_ASM +#if defined(POWERPC_DARWIN) || defined(POWERPC_AIX) +struct ffi_aix_trampoline_struct { + void * code_pointer; /* Pointer to ffi_closure_ASM */ + void * toc; /* TOC */ + void * static_chain; /* Pointer to closure */ +}; +#endif +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S new file mode 100644 index 0000000..c99889c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64.S @@ -0,0 +1,283 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef POWERPC64 + .hidden ffi_call_LINUX64 + .globl ffi_call_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_call_LINUX64: + addis %r2, %r12, .TOC.-ffi_call_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_call_LINUX64@l + .localentry ffi_call_LINUX64, . - ffi_call_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_call_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_call_LINUX64,.TOC.@tocbase,0 + .type ffi_call_LINUX64,@function + .text +.L.ffi_call_LINUX64: +# else + .hidden .ffi_call_LINUX64 + .globl .ffi_call_LINUX64 + .quad .ffi_call_LINUX64,.TOC.@tocbase,0 + .size ffi_call_LINUX64,24 + .type .ffi_call_LINUX64,@function + .text +.ffi_call_LINUX64: +# endif +# endif + mflr %r0 + std %r28, -32(%r1) + std %r29, -24(%r1) + std %r30, -16(%r1) + std %r31, -8(%r1) + std %r7, 8(%r1) /* closure, saved in cr field. */ + std %r0, 16(%r1) + + mr %r28, %r1 /* our AP. */ + .cfi_def_cfa_register 28 + .cfi_offset 65, 16 + .cfi_offset 31, -8 + .cfi_offset 30, -16 + .cfi_offset 29, -24 + .cfi_offset 28, -32 + + stdux %r1, %r1, %r8 + mr %r31, %r6 /* flags, */ + mr %r30, %r5 /* rvalue, */ + mr %r29, %r4 /* function address. */ +/* Save toc pointer, not for the ffi_prep_args64 call, but for the later + bctrl function call. */ +# if _CALL_ELF == 2 + std %r2, 24(%r1) +# else + std %r2, 40(%r1) +# endif + + /* Call ffi_prep_args64. */ + mr %r4, %r1 +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_prep_args64 +# else + bl .ffi_prep_args64 +# endif + +# if _CALL_ELF == 2 + mr %r12, %r29 +# else + ld %r12, 0(%r29) + ld %r2, 8(%r29) +# endif + /* Now do the call. */ + /* Set up cr1 with bits 3-7 of the flags. */ + mtcrf 0xc0, %r31 + + /* Get the address to call into CTR. */ + mtctr %r12 + /* Load all those argument registers. */ + addi %r29, %r28, -32-(8*8) + ld %r3, (0*8)(%r29) + ld %r4, (1*8)(%r29) + ld %r5, (2*8)(%r29) + ld %r6, (3*8)(%r29) + bf- 5, 1f + ld %r7, (4*8)(%r29) + ld %r8, (5*8)(%r29) + ld %r9, (6*8)(%r29) + ld %r10, (7*8)(%r29) +1: + + /* Load all the FP registers. */ + bf- 6, 2f + addi %r29, %r29, -(14*8) + lfd %f1, ( 1*8)(%r29) + lfd %f2, ( 2*8)(%r29) + lfd %f3, ( 3*8)(%r29) + lfd %f4, ( 4*8)(%r29) + lfd %f5, ( 5*8)(%r29) + lfd %f6, ( 6*8)(%r29) + lfd %f7, ( 7*8)(%r29) + lfd %f8, ( 8*8)(%r29) + lfd %f9, ( 9*8)(%r29) + lfd %f10, (10*8)(%r29) + lfd %f11, (11*8)(%r29) + lfd %f12, (12*8)(%r29) + lfd %f13, (13*8)(%r29) +2: + + /* Load all the vector registers. */ + bf- 3, 3f + addi %r29, %r29, -16 + lvx %v13, 0, %r29 + addi %r29, %r29, -16 + lvx %v12, 0, %r29 + addi %r29, %r29, -16 + lvx %v11, 0, %r29 + addi %r29, %r29, -16 + lvx %v10, 0, %r29 + addi %r29, %r29, -16 + lvx %v9, 0, %r29 + addi %r29, %r29, -16 + lvx %v8, 0, %r29 + addi %r29, %r29, -16 + lvx %v7, 0, %r29 + addi %r29, %r29, -16 + lvx %v6, 0, %r29 + addi %r29, %r29, -16 + lvx %v5, 0, %r29 + addi %r29, %r29, -16 + lvx %v4, 0, %r29 + addi %r29, %r29, -16 + lvx %v3, 0, %r29 + addi %r29, %r29, -16 + lvx %v2, 0, %r29 +3: + + /* Make the call. */ + ld %r11, 8(%r28) + bctrl + + /* This must follow the call immediately, the unwinder + uses this to find out if r2 has been saved or not. */ +# if _CALL_ELF == 2 + ld %r2, 24(%r1) +# else + ld %r2, 40(%r1) +# endif + + /* Now, deal with the return value. */ + mtcrf 0x01, %r31 + bt 31, .Lstruct_return_value + bt 30, .Ldone_return_value + bt 29, .Lfp_return_value + bt 28, .Lvec_return_value + std %r3, 0(%r30) + /* Fall through... */ + +.Ldone_return_value: + /* Restore the registers we used and return. */ + mr %r1, %r28 + .cfi_def_cfa_register 1 + ld %r0, 16(%r28) + ld %r28, -32(%r28) + mtlr %r0 + ld %r29, -24(%r1) + ld %r30, -16(%r1) + ld %r31, -8(%r1) + blr + +.Lvec_return_value: + stvx %v2, 0, %r30 + b .Ldone_return_value + +.Lfp_return_value: + .cfi_def_cfa_register 28 + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_return_value + stfd %f1, 0(%r30) + bf 26, .Ldone_return_value + stfd %f2, 8(%r30) + b .Ldone_return_value +.Lfloat_return_value: + stfs %f1, 0(%r30) + b .Ldone_return_value + +.Lstruct_return_value: + bf 29, .Lvec_homog_or_small_struct + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_homog_return_value + stfd %f1, 0(%r30) + stfd %f2, 8(%r30) + stfd %f3, 16(%r30) + stfd %f4, 24(%r30) + stfd %f5, 32(%r30) + stfd %f6, 40(%r30) + stfd %f7, 48(%r30) + stfd %f8, 56(%r30) + b .Ldone_return_value + +.Lfloat_homog_return_value: + stfs %f1, 0(%r30) + stfs %f2, 4(%r30) + stfs %f3, 8(%r30) + stfs %f4, 12(%r30) + stfs %f5, 16(%r30) + stfs %f6, 20(%r30) + stfs %f7, 24(%r30) + stfs %f8, 28(%r30) + b .Ldone_return_value + +.Lvec_homog_or_small_struct: + bf 28, .Lsmall_struct + stvx %v2, 0, %r30 + addi %r30, %r30, 16 + stvx %v3, 0, %r30 + addi %r30, %r30, 16 + stvx %v4, 0, %r30 + addi %r30, %r30, 16 + stvx %v5, 0, %r30 + addi %r30, %r30, 16 + stvx %v6, 0, %r30 + addi %r30, %r30, 16 + stvx %v7, 0, %r30 + addi %r30, %r30, 16 + stvx %v8, 0, %r30 + addi %r30, %r30, 16 + stvx %v9, 0, %r30 + b .Ldone_return_value + +.Lsmall_struct: + std %r3, 0(%r30) + std %r4, 8(%r30) + b .Ldone_return_value + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_call_LINUX64,.-ffi_call_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_call_LINUX64,.-.L.ffi_call_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,4,0,0 + .size .ffi_call_LINUX64,.-.ffi_call_LINUX64 +# endif +# endif + +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S new file mode 100644 index 0000000..d67e4bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/linux64_closure.S @@ -0,0 +1,552 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include + + .file "linux64_closure.S" + +#ifdef POWERPC64 + FFI_HIDDEN (ffi_closure_LINUX64) + .globl ffi_closure_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_closure_LINUX64: + addis %r2, %r12, .TOC.-ffi_closure_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_closure_LINUX64@l + .localentry ffi_closure_LINUX64, . - ffi_closure_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_closure_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_closure_LINUX64,.TOC.@tocbase,0 + .type ffi_closure_LINUX64,@function + .text +.L.ffi_closure_LINUX64: +# else + FFI_HIDDEN (.ffi_closure_LINUX64) + .globl .ffi_closure_LINUX64 + .quad .ffi_closure_LINUX64,.TOC.@tocbase,0 + .size ffi_closure_LINUX64,24 + .type .ffi_closure_LINUX64,@function + .text +.ffi_closure_LINUX64: +# endif +# endif + +# if _CALL_ELF == 2 +# ifdef __VEC__ +# 32 byte special reg save area + 64 byte parm save area +# + 128 byte retval area + 13*8 fpr save area + 12*16 vec save area + round to 16 +# define STACKFRAME 528 +# else +# 32 byte special reg save area + 64 byte parm save area +# + 64 byte retval area + 13*8 fpr save area + round to 16 +# define STACKFRAME 272 +# endif +# define PARMSAVE 32 +# define RETVAL PARMSAVE+64 +# else +# 48 bytes special reg save area + 64 bytes parm save area +# + 16 bytes retval area + 13*8 bytes fpr save area + round to 16 +# define STACKFRAME 240 +# define PARMSAVE 48 +# define RETVAL PARMSAVE+64 +# endif + +# if _CALL_ELF == 2 + ld %r12, FFI_TRAMPOLINE_SIZE(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + # copy r2 to r11 and load TOC into r2 + mr %r11, %r2 + ld %r2, 16(%r2) + + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + ld %r4, FFI_TRAMPOLINE_SIZE+8(%r11) + # closure->user_data + ld %r5, FFI_TRAMPOLINE_SIZE+16(%r11) + +.Ldoclosure: + # next save fpr 1 to fpr 13 + stfd %f1, -104+(0*8)(%r1) + stfd %f2, -104+(1*8)(%r1) + stfd %f3, -104+(2*8)(%r1) + stfd %f4, -104+(3*8)(%r1) + stfd %f5, -104+(4*8)(%r1) + stfd %f6, -104+(5*8)(%r1) + stfd %f7, -104+(6*8)(%r1) + stfd %f8, -104+(7*8)(%r1) + stfd %f9, -104+(8*8)(%r1) + stfd %f10, -104+(9*8)(%r1) + stfd %f11, -104+(10*8)(%r1) + stfd %f12, -104+(11*8)(%r1) + stfd %f13, -104+(12*8)(%r1) + + # load up the pointer to the saved fpr registers + addi %r8, %r1, -104 + +# ifdef __VEC__ + # load up the pointer to the saved vector registers + # 8 bytes padding for 16-byte alignment at -112(%r1) + addi %r9, %r8, -24 + stvx %v13, 0, %r9 + addi %r9, %r9, -16 + stvx %v12, 0, %r9 + addi %r9, %r9, -16 + stvx %v11, 0, %r9 + addi %r9, %r9, -16 + stvx %v10, 0, %r9 + addi %r9, %r9, -16 + stvx %v9, 0, %r9 + addi %r9, %r9, -16 + stvx %v8, 0, %r9 + addi %r9, %r9, -16 + stvx %v7, 0, %r9 + addi %r9, %r9, -16 + stvx %v6, 0, %r9 + addi %r9, %r9, -16 + stvx %v5, 0, %r9 + addi %r9, %r9, -16 + stvx %v4, 0, %r9 + addi %r9, %r9, -16 + stvx %v3, 0, %r9 + addi %r9, %r9, -16 + stvx %v2, 0, %r9 +# endif + + # load up the pointer to the result storage + addi %r6, %r1, -STACKFRAME+RETVAL + + stdu %r1, -STACKFRAME(%r1) + .cfi_def_cfa_offset STACKFRAME + .cfi_offset 65, 16 + + # make the call +# if defined _CALL_LINUX || _CALL_ELF == 2 + bl ffi_closure_helper_LINUX64 +# else + bl .ffi_closure_helper_LINUX64 +# endif +.Lret: + + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + ld %r0, STACKFRAME+16(%r1) + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + bge .Lsmall + mflr %r4 # move address of .Lret to r4 + sldi %r3, %r3, 4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + add %r3, %r3, %r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 + +.Lret_type0: +# case FFI_TYPE_VOID + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_INT +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_FLOAT + lfs %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_DOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_LONGDOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + lfd %f2, RETVAL+8(%r1) + b .Lfinish +# case FFI_TYPE_UINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish +# case FFI_TYPE_UINT16 +# ifdef __LITTLE_ENDIAN__ + lhz %r3, RETVAL+0(%r1) +# else + lhz %r3, RETVAL+6(%r1) +# endif + mtlr %r0 +.Lfinish: + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT16 +# ifdef __LITTLE_ENDIAN__ + lha %r3, RETVAL+0(%r1) +# else + lha %r3, RETVAL+6(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT32 +# ifdef __LITTLE_ENDIAN__ + lwz %r3, RETVAL+0(%r1) +# else + lwz %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT32 +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_POINTER + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_V2_TYPE_VECTOR + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + mtlr %r0 + b .Lfinish +# case FFI_V2_TYPE_VECTOR_HOMOG + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + addi %r3, %r3, 16 + b .Lmorevector +# case FFI_V2_TYPE_FLOAT_HOMOG + lfs %f1, RETVAL+0(%r1) + lfs %f2, RETVAL+4(%r1) + lfs %f3, RETVAL+8(%r1) + b .Lmorefloat +# case FFI_V2_TYPE_DOUBLE_HOMOG + lfd %f1, RETVAL+0(%r1) + lfd %f2, RETVAL+8(%r1) + lfd %f3, RETVAL+16(%r1) + lfd %f4, RETVAL+24(%r1) + mtlr %r0 + lfd %f5, RETVAL+32(%r1) + lfd %f6, RETVAL+40(%r1) + lfd %f7, RETVAL+48(%r1) + lfd %f8, RETVAL+56(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorevector: + lvx %v3, 0, %r3 + addi %r3, %r3, 16 + lvx %v4, 0, %r3 + addi %r3, %r3, 16 + lvx %v5, 0, %r3 + mtlr %r0 + addi %r3, %r3, 16 + lvx %v6, 0, %r3 + addi %r3, %r3, 16 + lvx %v7, 0, %r3 + addi %r3, %r3, 16 + lvx %v8, 0, %r3 + addi %r3, %r3, 16 + lvx %v9, 0, %r3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorefloat: + lfs %f4, RETVAL+12(%r1) + mtlr %r0 + lfs %f5, RETVAL+16(%r1) + lfs %f6, RETVAL+20(%r1) + lfs %f7, RETVAL+24(%r1) + lfs %f8, RETVAL+28(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmall: +# ifdef __LITTLE_ENDIAN__ + ld %r3,RETVAL+0(%r1) + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr +# else + # A struct smaller than a dword is returned in the low bits of r3 + # ie. right justified. Larger structs are passed left justified + # in r3 and r4. The return value area on the stack will have + # the structs as they are usually stored in memory. + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + 7 # size 8 bytes? + neg %r5, %r3 + ld %r3,RETVAL+0(%r1) + blt .Lsmalldown + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmalldown: + addi %r5, %r5, FFI_V2_TYPE_SMALL_STRUCT + 7 + mtlr %r0 + sldi %r5, %r5, 3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + srd %r3, %r3, %r5 + blr +# endif + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_closure_LINUX64,.-ffi_closure_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_closure_LINUX64,.-.L.ffi_closure_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_closure_LINUX64,.-.ffi_closure_LINUX64 +# endif +# endif + + + FFI_HIDDEN (ffi_go_closure_linux64) + .globl ffi_go_closure_linux64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_go_closure_linux64: + addis %r2, %r12, .TOC.-ffi_go_closure_linux64@ha + addi %r2, %r2, .TOC.-ffi_go_closure_linux64@l + .localentry ffi_go_closure_linux64, . - ffi_go_closure_linux64 +# else + .section ".opd","aw" + .align 3 +ffi_go_closure_linux64: +# ifdef _CALL_LINUX + .quad .L.ffi_go_closure_linux64,.TOC.@tocbase,0 + .type ffi_go_closure_linux64,@function + .text +.L.ffi_go_closure_linux64: +# else + FFI_HIDDEN (.ffi_go_closure_linux64) + .globl .ffi_go_closure_linux64 + .quad .ffi_go_closure_linux64,.TOC.@tocbase,0 + .size ffi_go_closure_linux64,24 + .type .ffi_go_closure_linux64,@function + .text +.ffi_go_closure_linux64: +# endif +# endif + +# if _CALL_ELF == 2 + ld %r12, 8(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, 8(%r11) + # closure->fun + ld %r4, 16(%r11) + # user_data + mr %r5, %r11 + b .Ldoclosure + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_go_closure_linux64,.-ffi_go_closure_linux64 +# else +# ifdef _CALL_LINUX + .size ffi_go_closure_linux64,.-.L.ffi_go_closure_linux64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_go_closure_linux64,.-.ffi_go_closure_linux64 +# endif +# endif +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S new file mode 100644 index 0000000..b6d209d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/ppc_closure.S @@ -0,0 +1,397 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include +#include + + .file "ppc_closure.S" + +#ifndef POWERPC64 + +FFI_HIDDEN(ffi_closure_SYSV) +ENTRY(ffi_closure_SYSV) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + +# we want to build up an areas for the parameters passed +# in registers (both floating point and integer) + + # so first save gpr 3 to gpr 10 (aligned to 4) + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # set up registers for the routine that does the work + + # closure->cif + lwz %r3,FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + lwz %r4,FFI_TRAMPOLINE_SIZE+4(%r11) + # closure->user_data + lwz %r5,FFI_TRAMPOLINE_SIZE+8(%r11) + +.Ldoclosure: + stw %r6, 28(%r1) + stw %r7, 32(%r1) + stw %r8, 36(%r1) + stw %r9, 40(%r1) + stw %r10,44(%r1) + +#ifndef __NO_FPRS__ + # next save fpr 1 to fpr 8 (aligned to 8) + stfd %f1, 48(%r1) + stfd %f2, 56(%r1) + stfd %f3, 64(%r1) + stfd %f4, 72(%r1) + stfd %f5, 80(%r1) + stfd %f6, 88(%r1) + stfd %f7, 96(%r1) + stfd %f8, 104(%r1) +#endif + + # pointer to the result storage + addi %r6,%r1,112 + + # pointer to the saved gpr registers + addi %r7,%r1,16 + + # pointer to the saved fpr registers + addi %r8,%r1,48 + + # pointer to the outgoing parameter save area in the previous frame + # i.e. the previous frame pointer + 8 + addi %r9,%r1,152 + + # make the call + bl ffi_closure_helper_SYSV@local +.Lret: + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + + mflr %r4 # move address of .Lret to r4 + slwi %r3,%r3,4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + lwz %r0,148(%r1) + add %r3,%r3,%r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 +# case FFI_TYPE_VOID +.Lret_type0: + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_INT + lwz %r3,112+0(%r1) + mtlr %r0 +.Lfinish: + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_FLOAT +#ifndef __NO_FPRS__ + lfs %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_DOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_LONGDOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) + lfd %f2,112+8(%r1) + mtlr %r0 + b .Lfinish +#else + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop +#endif + +# case FFI_TYPE_UINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_UINT16 +#ifdef __LITTLE_ENDIAN__ + lhz %r3,112+0(%r1) +#else + lhz %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT16 +#ifdef __LITTLE_ENDIAN__ + lha %r3,112+0(%r1) +#else + lha %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_SINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_POINTER + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT128 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + lwz %r5,112+8(%r1) + b .Luint128 + +# The return types below are only used when the ABI type is FFI_SYSV. +# case FFI_SYSV_TYPE_SMALL_STRUCT + 1. One byte struct. + lbz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 2. Two byte struct. + lhz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 3. Three byte struct. + lwz %r3,112+0(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#else + srwi %r3,%r3,8 + mtlr %r0 + b .Lfinish +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 4. Four byte struct. + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 5. Five byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,24 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 6. Six byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,16 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 7. Seven byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,8 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 8. Eight byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +#ifndef __LITTLE_ENDIAN__ +.Lstruct567: + subfic %r6,%r5,32 + srw %r4,%r4,%r5 + slw %r6,%r3,%r6 + srw %r3,%r3,%r5 + or %r4,%r6,%r4 + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#endif + +.Luint128: + lwz %r6,112+12(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_endproc +END(ffi_closure_SYSV) + + +FFI_HIDDEN(ffi_go_closure_sysv) +ENTRY(ffi_go_closure_sysv) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # closure->cif + lwz %r3,4(%r11) + # closure->fun + lwz %r4,8(%r11) + # user_data + mr %r5,%r11 + b .Ldoclosure + .cfi_endproc +END(ffi_go_closure_sysv) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S new file mode 100644 index 0000000..df97734 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/powerpc/sysv.S @@ -0,0 +1,173 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998 Geoffrey Keating + Copyright (C) 2007 Free Software Foundation, Inc + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include + +#ifndef POWERPC64 +FFI_HIDDEN(ffi_call_SYSV) +ENTRY(ffi_call_SYSV) + .cfi_startproc + /* Save the old stack pointer as AP. */ + mr %r10,%r1 + .cfi_def_cfa_register 10 + + /* Allocate the stack space we need. */ + stwux %r1,%r1,%r8 + /* Save registers we use. */ + mflr %r9 + stw %r28,-16(%r10) + stw %r29,-12(%r10) + stw %r30, -8(%r10) + stw %r31, -4(%r10) + stw %r9, 4(%r10) + .cfi_offset 65, 4 + .cfi_offset 31, -4 + .cfi_offset 30, -8 + .cfi_offset 29, -12 + .cfi_offset 28, -16 + + /* Save arguments over call... */ + stw %r7, -20(%r10) /* closure, */ + mr %r31,%r6 /* flags, */ + mr %r30,%r5 /* rvalue, */ + mr %r29,%r4 /* function address, */ + mr %r28,%r10 /* our AP. */ + .cfi_def_cfa_register 28 + + /* Call ffi_prep_args_SYSV. */ + mr %r4,%r1 + bl ffi_prep_args_SYSV@local + + /* Now do the call. */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,%r31 + /* Get the address to call into CTR. */ + mtctr %r29 + /* Load all those argument registers. */ + lwz %r3,-24-(8*4)(%r28) + lwz %r4,-24-(7*4)(%r28) + lwz %r5,-24-(6*4)(%r28) + lwz %r6,-24-(5*4)(%r28) + bf- 5,1f + nop + lwz %r7,-24-(4*4)(%r28) + lwz %r8,-24-(3*4)(%r28) + lwz %r9,-24-(2*4)(%r28) + lwz %r10,-24-(1*4)(%r28) + nop +1: + +#ifndef __NO_FPRS__ + /* Load all the FP registers. */ + bf- 6,2f + lfd %f1,-24-(8*4)-(8*8)(%r28) + lfd %f2,-24-(8*4)-(7*8)(%r28) + lfd %f3,-24-(8*4)-(6*8)(%r28) + lfd %f4,-24-(8*4)-(5*8)(%r28) + nop + lfd %f5,-24-(8*4)-(4*8)(%r28) + lfd %f6,-24-(8*4)-(3*8)(%r28) + lfd %f7,-24-(8*4)-(2*8)(%r28) + lfd %f8,-24-(8*4)-(1*8)(%r28) +#endif +2: + + /* Make the call. */ + lwz %r11, -20(%r28) + bctrl + + /* Now, deal with the return value. */ + mtcrf 0x03,%r31 /* cr6-cr7 */ + bt- 31,L(small_struct_return_value) + bt- 30,L(done_return_value) +#ifndef __NO_FPRS__ + bt- 29,L(fp_return_value) +#endif + stw %r3,0(%r30) + bf+ 27,L(done_return_value) + stw %r4,4(%r30) + bf 26,L(done_return_value) + stw %r5,8(%r30) + stw %r6,12(%r30) + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lwz %r9, 4(%r28) + lwz %r31, -4(%r28) + mtlr %r9 + lwz %r30, -8(%r28) + lwz %r29,-12(%r28) + lwz %r28,-16(%r28) + .cfi_remember_state + /* At this point we don't have a cfa register. Say all our + saved regs have been restored. */ + .cfi_same_value 65 + .cfi_same_value 31 + .cfi_same_value 30 + .cfi_same_value 29 + .cfi_same_value 28 + /* Hopefully this works.. */ + .cfi_def_cfa_register 1 + .cfi_offset 1, 0 + lwz %r1,0(%r1) + .cfi_same_value 1 + blr + +#ifndef __NO_FPRS__ +L(fp_return_value): + .cfi_restore_state + bf 27,L(float_return_value) + stfd %f1,0(%r30) + bf 26,L(done_return_value) + stfd %f2,8(%r30) + b L(done_return_value) +L(float_return_value): + stfs %f1,0(%r30) + b L(done_return_value) +#endif + +L(small_struct_return_value): + /* + * The C code always allocates a properly-aligned 8-byte bounce + * buffer to make this assembly code very simple. Just write out + * r3 and r4 to the buffer to allow the C code to handle the rest. + */ + stw %r3, 0(%r30) + stw %r4, 4(%r30) + b L(done_return_value) + .cfi_endproc + +END(ffi_call_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c new file mode 100644 index 0000000..06c6544 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/prep_cif.c @@ -0,0 +1,263 @@ +/* ----------------------------------------------------------------------- + prep_cif.c - Copyright (c) 2011, 2012 Anthony Green + Copyright (c) 1996, 1998, 2007 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include + +/* Round up to FFI_SIZEOF_ARG. */ + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +/* Perform machine independent initialization of aggregate type + specifications. */ + +static ffi_status initialize_aggregate(ffi_type *arg, size_t *offsets) +{ + ffi_type **ptr; + + if (UNLIKELY(arg == NULL || arg->elements == NULL)) + return FFI_BAD_TYPEDEF; + + arg->size = 0; + arg->alignment = 0; + + ptr = &(arg->elements[0]); + + if (UNLIKELY(ptr == 0)) + return FFI_BAD_TYPEDEF; + + while ((*ptr) != NULL) + { + if (UNLIKELY(((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK))) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type */ + FFI_ASSERT_VALID_TYPE(*ptr); + + arg->size = FFI_ALIGN(arg->size, (*ptr)->alignment); + if (offsets) + *offsets++ = arg->size; + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + /* Structure size includes tail padding. This is important for + structures that fit in one register on ABIs like the PowerPC64 + Linux ABI that right justify small structs in a register. + It's also needed for nested structure layout, for example + struct A { long a; char b; }; struct B { struct A x; char y; }; + should find y at an offset of 2*sizeof(long) and result in a + total size of 3*sizeof(long). */ + arg->size = FFI_ALIGN (arg->size, arg->alignment); + + /* On some targets, the ABI defines that structures have an additional + alignment beyond the "natural" one based on their elements. */ +#ifdef FFI_AGGREGATE_ALIGNMENT + if (FFI_AGGREGATE_ALIGNMENT > arg->alignment) + arg->alignment = FFI_AGGREGATE_ALIGNMENT; +#endif + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +#ifndef __CRIS__ +/* The CRIS ABI specifies structure elements to have byte + alignment only, so it completely overrides this functions, + which assumes "natural" alignment and padding. */ + +/* Perform machine independent ffi_cif preparation, then call + machine dependent routine. */ + +/* For non variadic functions isvariadic should be 0 and + nfixedargs==ntotalargs. + + For variadic calls, isvariadic should be 1 and nfixedargs + and ntotalargs set as appropriate. nfixedargs must always be >=1 */ + + +ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, ffi_type **atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT(cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; +#ifdef _M_ARM64 + cif->is_variadic = isvariadic; +#endif +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + /* Initialize the return type if necessary */ + if ((cif->rtype->size == 0) + && (initialize_aggregate(cif->rtype, NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if (rtype->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the return type */ + FFI_ASSERT_VALID_TYPE(cif->rtype); + + /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */ +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT +#ifdef TILE + && (cif->rtype->size > 10 * FFI_SIZEOF_ARG) +#endif +#ifdef XTENSA + && (cif->rtype->size > 16) +#endif +#ifdef NIOS2 + && (cif->rtype->size > 8) +#endif + ) + bytes = STACK_ARG_SIZE(sizeof(void*)); +#endif + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + + /* Initialize any uninitialized aggregate type definitions */ + if (((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if ((*ptr)->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + { + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = (unsigned)FFI_ALIGN(bytes, (*ptr)->alignment); + +#ifdef TILE + if (bytes < 10 * FFI_SIZEOF_ARG && + bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG) + { + /* An argument is never split between the 10 parameter + registers and the stack. */ + bytes = 10 * FFI_SIZEOF_ARG; + } +#endif +#ifdef XTENSA + if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4) + bytes = 6*4; +#endif + + bytes += (unsigned int)STACK_ARG_SIZE((*ptr)->size); + } +#endif + } + + cif->bytes = bytes; + + /* Perform machine dependent cif processing */ +#ifdef FFI_TARGET_SPECIFIC_VARIADIC + if (isvariadic) + return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs); +#endif + + return ffi_prep_cif_machdep(cif); +} +#endif /* not __CRIS__ */ + +ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs, + ffi_type *rtype, ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes); +} + +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes); +} + +#if FFI_CLOSURES + +ffi_status +ffi_prep_closure (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +{ + return ffi_prep_closure_loc (closure, cif, fun, user_data, closure); +} + +#endif + +ffi_status +ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +{ + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + if (struct_type->type != FFI_TYPE_STRUCT) + return FFI_BAD_TYPEDEF; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + return initialize_aggregate(struct_type, offsets); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c new file mode 100644 index 0000000..be15611 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/raw_api.c @@ -0,0 +1,267 @@ +/* ----------------------------------------------------------------------- + raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc. + + Author: Kresten Krab Thorup + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This file defines generic functions for use with the raw api. */ + +#include +#include + +#if !FFI_NO_RAW_API + +size_t +ffi_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { +#if !FFI_NO_STRUCTS + if ((*at)->type == FFI_TYPE_STRUCT) + result += FFI_ALIGN (sizeof (void*), FFI_SIZEOF_ARG); + else +#endif + result += FFI_ALIGN ((*at)->size, FFI_SIZEOF_ARG); + } + + return result; +} + + +void +ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + *args = (raw++)->ptr; + break; +#endif + + case FFI_TYPE_COMPLEX: + *args = (raw++)->ptr; + break; + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + default: + *args = raw; + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if !FFI_NO_STRUCTS + if ((*tp)->type == FFI_TYPE_STRUCT) + { + *args = (raw++)->ptr; + } + else +#endif + if ((*tp)->type == FFI_TYPE_COMPLEX) + { + *args = (raw++)->ptr; + } + else + { + *args = (void*) raw; + raw += FFI_ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*); + } + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + (raw++)->uint = *(UINT8*) (*args); + break; + + case FFI_TYPE_SINT8: + (raw++)->sint = *(SINT8*) (*args); + break; + + case FFI_TYPE_UINT16: + (raw++)->uint = *(UINT16*) (*args); + break; + + case FFI_TYPE_SINT16: + (raw++)->sint = *(SINT16*) (*args); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + (raw++)->uint = *(UINT32*) (*args); + break; + + case FFI_TYPE_SINT32: + (raw++)->sint = *(SINT32*) (*args); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + (raw++)->ptr = *args; + break; +#endif + + case FFI_TYPE_COMPLEX: + (raw++)->ptr = *args; + break; + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } +} + +#if !FFI_NATIVE_RAW_API + + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, raw, cl->user_data); +} + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ + +#if FFI_CLOSURES + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_raw_closure (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data) +{ + return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ + +#endif /* !FFI_NO_RAW_API */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c new file mode 100644 index 0000000..c910858 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffi.c @@ -0,0 +1,481 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + Based on MIPS N32/64 port + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#if __riscv_float_abi_double +#define ABI_FLEN 64 +#define ABI_FLOAT double +#elif __riscv_float_abi_single +#define ABI_FLEN 32 +#define ABI_FLOAT float +#endif + +#define NARGREG 8 +#define STKALIGN 16 +#define MAXCOPYARG (2 * sizeof(double)) + +typedef struct call_context +{ +#if ABI_FLEN + ABI_FLOAT fa[8]; +#endif + size_t a[8]; + /* used by the assembly code to in-place construct its own stack frame */ + char frame[16]; +} call_context; + +typedef struct call_builder +{ + call_context *aregs; + int used_integer; + int used_float; + size_t *used_stack; +} call_builder; + +/* integer (not pointer) less than ABI XLEN */ +/* FFI_TYPE_INT does not appear to be used */ +#if __SIZEOF_POINTER__ == 8 +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) +#else +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) +#endif + +#if ABI_FLEN +typedef struct { + char as_elements, type1, offset2, type2; +} float_struct_info; + +#if ABI_FLEN >= 64 +#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) +#else +#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) +#endif + +static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) { + int i; + if (out == out_end) return out; + if (in->type != FFI_TYPE_STRUCT) { + *(out++) = in; + } else { + for (i = 0; in->elements[i]; i++) + out = flatten_struct(in->elements[i], out, out_end); + } + return out; +} + +/* Structs with at most two fields after flattening, one of which is of + floating point type, are passed in multiple registers if sufficient + registers are available. */ +static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) { + float_struct_info ret = {0, 0, 0, 0}; + ffi_type *fields[3]; + int num_floats, num_ints; + int num_fields = flatten_struct(top, fields, fields + 3) - fields; + + if (num_fields == 1) { + if (IS_FLOAT(fields[0]->type)) { + ret.as_elements = 1; + ret.type1 = fields[0]->type; + } + } else if (num_fields == 2) { + num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type); + num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type); + if (num_floats == 0 || num_floats + num_ints != 2) + return ret; + if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG) + return ret; + if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type)) + return ret; + + ret.type1 = fields[0]->type; + ret.type2 = fields[1]->type; + ret.offset2 = FFI_ALIGN(fields[0]->size, fields[1]->alignment); + ret.as_elements = 1; + } + + return ret; +} +#endif + +/* allocates a single register, float register, or XLEN-sized stack slot to a datum */ +static void marshal_atom(call_builder *cb, int type, void *data) { + size_t value = 0; + switch (type) { + case FFI_TYPE_UINT8: value = *(uint8_t *)data; break; + case FFI_TYPE_SINT8: value = *(int8_t *)data; break; + case FFI_TYPE_UINT16: value = *(uint16_t *)data; break; + case FFI_TYPE_SINT16: value = *(int16_t *)data; break; + /* 32-bit quantities are always sign-extended in the ABI */ + case FFI_TYPE_UINT32: value = *(int32_t *)data; break; + case FFI_TYPE_SINT32: value = *(int32_t *)data; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: value = *(uint64_t *)data; break; + case FFI_TYPE_SINT64: value = *(int64_t *)data; break; +#endif + case FFI_TYPE_POINTER: value = *(size_t *)data; break; + + /* float values may be recoded in an implementation-defined way + by hardware conforming to 2.1 or earlier, so use asm to + reinterpret floats as doubles */ +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data)); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data)); + return; +#endif + default: FFI_ASSERT(0); break; + } + + if (cb->used_integer == NARGREG) { + *cb->used_stack++ = value; + } else { + cb->aregs->a[cb->used_integer++] = value; + } +} + +static void unmarshal_atom(call_builder *cb, int type, void *data) { + size_t value; + switch (type) { +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif + } + + if (cb->used_integer == NARGREG) { + value = *cb->used_stack++; + } else { + value = cb->aregs->a[cb->used_integer++]; + } + + switch (type) { + case FFI_TYPE_UINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_SINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_UINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_SINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_UINT32: *(uint32_t *)data = value; break; + case FFI_TYPE_SINT32: *(uint32_t *)data = value; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: *(uint64_t *)data = value; break; + case FFI_TYPE_SINT64: *(uint64_t *)data = value; break; +#endif + case FFI_TYPE_POINTER: *(size_t *)data = value; break; + default: FFI_ASSERT(0); break; + } +} + +/* adds an argument to a call, or a not by reference return value */ +static void marshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + marshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + marshal_atom(cb, type->type, data); + return; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + marshal_atom(cb, FFI_TYPE_POINTER, &data); + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + marshal_atom(cb, type->type, data); + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + memcpy(realign, data, type->size); + if (type->size > 0) + marshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + marshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + } +} + +/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */ +static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + void *pointer; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + unmarshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return data; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + unmarshal_atom(cb, type->type, data); + return data; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer); + return pointer; + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + unmarshal_atom(cb, type->type, data); + return data; + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + if (type->size > 0) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + memcpy(data, realign, type->size); + return data; + } +} + +static int passed_by_ref(call_builder *cb, ffi_type *type, int var) { +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) return 0; + } +#endif + + return type->size > 2 * __SIZEOF_POINTER__; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { + cif->riscv_nfixedargs = cif->nargs; + return FFI_OK; +} + +/* Perform machine dependent cif processing when we have a variadic function */ + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) { + cif->riscv_nfixedargs = nfixedargs; + return FFI_OK; +} + +/* Low level routine for calling functions */ +extern void ffi_call_asm (void *stack, struct call_context *regs, + void (*fn) (void), void *closure) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + /* this is a conservative estimate, assuming a complex return value and + that all remaining arguments are long long / __int128 */ + size_t arg_bytes = cif->nargs <= 3 ? 0 : + FFI_ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN); + size_t rval_bytes = 0; + if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__) + rval_bytes = FFI_ALIGN(cif->rtype->size, STKALIGN); + size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context); + + /* the assembly code will deallocate all stack data at lower addresses + than the argument region, so we need to allocate the frame and the + return value after the arguments in a single allocation */ + size_t alloc_base; + /* Argument region must be 16-byte aligned */ + if (_Alignof(max_align_t) >= STKALIGN) { + /* since sizeof long double is normally 16, the compiler will + guarantee alloca alignment to at least that much */ + alloc_base = (size_t)alloca(alloc_size); + } else { + alloc_base = FFI_ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN); + } + + if (rval_bytes) + rvalue = (void*)(alloc_base + arg_bytes); + + call_builder cb; + cb.used_float = cb.used_integer = 0; + cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes); + cb.used_stack = (void*)alloc_base; + + int return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + marshal(&cb, &ffi_type_pointer, 0, &rvalue); + + int i; + for (i = 0; i < cif->nargs; i++) + marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]); + + ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); + + cb.used_float = cb.used_integer = 0; + if (!return_by_ref && rvalue) + unmarshal(&cb, cif->rtype, 0, rvalue); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +extern void ffi_closure_asm(void) FFI_HIDDEN; + +ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) &closure->tramp[0]; + uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; + + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* we will call ffi_closure_inner with codeloc, not closure, but as long + as the memory is readable it should work */ + + tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */ +#if __SIZEOF_POINTER__ == 8 + tramp[1] = 0x01033383; /* ld t2, 16(t1) */ +#else + tramp[1] = 0x01032383; /* lw t2, 16(t1) */ +#endif + tramp[2] = 0x00038067; /* jr t2 */ + tramp[3] = 0x00000013; /* nop */ + tramp[4] = fn; + tramp[5] = fn >> 32; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE); + + return FFI_OK; +} + +extern void ffi_go_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + closure->tramp = (void *) ffi_go_closure_asm; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Called by the assembly code with aregs pointing to saved argument registers + and stack pointing to the stacked arguments. Return values passed in + registers will be reloaded from aregs. */ +void FFI_HIDDEN +ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stack, call_context *aregs) +{ + void **avalue = alloca(cif->nargs * sizeof(void*)); + /* storage for arguments which will be copied by unmarshal(). We could + theoretically avoid the copies in many cases and use at most 128 bytes + of memory, but allocating disjoint storage for each argument is + simpler. */ + char *astorage = alloca(cif->nargs * MAXCOPYARG); + void *rvalue; + call_builder cb; + int return_by_ref; + int i; + + cb.aregs = aregs; + cb.used_integer = cb.used_float = 0; + cb.used_stack = stack; + + return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + unmarshal(&cb, &ffi_type_pointer, 0, &rvalue); + else + rvalue = alloca(cif->rtype->size); + + for (i = 0; i < cif->nargs; i++) + avalue[i] = unmarshal(&cb, cif->arg_types[i], + i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG); + + fun (cif, rvalue, avalue, user_data); + + if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) { + cb.used_integer = cb.used_float = 0; + marshal(&cb, cif->rtype, 0, rvalue); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h new file mode 100644 index 0000000..75e6462 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/ffitarget.h @@ -0,0 +1,69 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - 2014 Michael Knyszek + + Target configuration macros for RISC-V. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef __riscv +#error "libffi was configured for a RISC-V target but this does not appear to be a RISC-V compiler." +#endif + +#ifndef LIBFFI_ASM + +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +/* FFI_UNUSED_NN and riscv_unused are to maintain ABI compatibility with a + distributed Berkeley patch from 2014, and can be removed at SONAME bump */ +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_UNUSED_1, + FFI_UNUSED_2, + FFI_UNUSED_3, + FFI_LAST_ABI, + + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#endif /* LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 +#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused; +#define FFI_TARGET_SPECIFIC_VARIADIC + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S new file mode 100644 index 0000000..522d0b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/riscv/sysv.S @@ -0,0 +1,293 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Define aliases so that we can handle all ABIs uniformly */ + +#if __SIZEOF_POINTER__ == 8 +#define PTRS 8 +#define LARG ld +#define SARG sd +#else +#define PTRS 4 +#define LARG lw +#define SARG sw +#endif + +#if __riscv_float_abi_double +#define FLTS 8 +#define FLARG fld +#define FSARG fsd +#elif __riscv_float_abi_single +#define FLTS 4 +#define FLARG flw +#define FSARG fsw +#else +#define FLTS 0 +#endif + +#define fp s0 + + .text + .globl ffi_call_asm + .type ffi_call_asm, @function + .hidden ffi_call_asm +/* + struct call_context { + floatreg fa[8]; + intreg a[8]; + intreg pad[rv32 ? 2 : 0]; + intreg save_fp, save_ra; + } + void ffi_call_asm (size_t *stackargs, struct call_context *regargs, + void (*fn) (void), void *closure); +*/ + +#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16) + +ffi_call_asm: + .cfi_startproc + + /* + We are NOT going to set up an ordinary stack frame. In order to pass + the stacked args to the called function, we adjust our stack pointer to + a0, which is in the _caller's_ alloca area. We establish our own stack + frame at the end of the call_context. + + Anything below the arguments will be freed at this point, although we + preserve the call_context so that it can be read back in the caller. + */ + + .cfi_def_cfa 11, FRAME_LEN # interim CFA based on a1 + SARG fp, FRAME_LEN - 2*PTRS(a1) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(a1) + .cfi_offset 1, -1*PTRS + + addi fp, a1, FRAME_LEN + mv sp, a0 + .cfi_def_cfa 8, 0 # our frame is fully set up + + # Load arguments + mv t1, a2 + mv t2, a3 + +#if FLTS + FLARG fa0, -FRAME_LEN+0*FLTS(fp) + FLARG fa1, -FRAME_LEN+1*FLTS(fp) + FLARG fa2, -FRAME_LEN+2*FLTS(fp) + FLARG fa3, -FRAME_LEN+3*FLTS(fp) + FLARG fa4, -FRAME_LEN+4*FLTS(fp) + FLARG fa5, -FRAME_LEN+5*FLTS(fp) + FLARG fa6, -FRAME_LEN+6*FLTS(fp) + FLARG fa7, -FRAME_LEN+7*FLTS(fp) +#endif + + LARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + LARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + LARG a2, -FRAME_LEN+8*FLTS+2*PTRS(fp) + LARG a3, -FRAME_LEN+8*FLTS+3*PTRS(fp) + LARG a4, -FRAME_LEN+8*FLTS+4*PTRS(fp) + LARG a5, -FRAME_LEN+8*FLTS+5*PTRS(fp) + LARG a6, -FRAME_LEN+8*FLTS+6*PTRS(fp) + LARG a7, -FRAME_LEN+8*FLTS+7*PTRS(fp) + + /* Call */ + jalr t1 + + /* Save return values - only a0/a1 (fa0/fa1) are used */ +#if FLTS + FSARG fa0, -FRAME_LEN+0*FLTS(fp) + FSARG fa1, -FRAME_LEN+1*FLTS(fp) +#endif + + SARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + SARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + + /* Restore and return */ + addi sp, fp, -FRAME_LEN + .cfi_def_cfa 2, FRAME_LEN + LARG ra, -1*PTRS(fp) + .cfi_restore 1 + LARG fp, -2*PTRS(fp) + .cfi_restore 8 + ret + .cfi_endproc + .size ffi_call_asm, .-ffi_call_asm + + +/* + ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_closure_asm + .hidden ffi_closure_asm + .type ffi_closure_asm, @function +ffi_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, FFI_TRAMPOLINE_SIZE+0*PTRS(t1) + LARG a1, FFI_TRAMPOLINE_SIZE+1*PTRS(t1) + LARG a2, FFI_TRAMPOLINE_SIZE+2*PTRS(t1) + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_closure_asm, .-ffi_closure_asm + +/* + ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_go_closure_asm + .hidden ffi_go_closure_asm + .type ffi_go_closure_asm, @function +ffi_go_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, 1*PTRS(t2) + LARG a1, 2*PTRS(t2) + mv a2, t2 + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_go_closure_asm, .-ffi_go_closure_asm diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c new file mode 100644 index 0000000..4035b6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffi.c @@ -0,0 +1,756 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2000, 2007 Software AG + Copyright (c) 2008 Red Hat, Inc + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +/*====================================================================*/ +/* Includes */ +/* -------- */ +/*====================================================================*/ + +#include +#include +#include +#include "internal.h" + +/*====================== End of Includes =============================*/ + +/*====================================================================*/ +/* Defines */ +/* ------- */ +/*====================================================================*/ + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 5 + +/* Maximum number of FPRs available for argument passing. */ +#ifdef __s390x__ +#define MAX_FPRARGS 4 +#else +#define MAX_FPRARGS 2 +#endif + +/* Round to multiple of 16. */ +#define ROUND_SIZE(size) (((size) + 15) & ~15) + +/*===================== End of Defines ===============================*/ + +/*====================================================================*/ +/* Externals */ +/* --------- */ +/*====================================================================*/ + +struct call_frame +{ + void *back_chain; + void *eos; + unsigned long gpr_args[5]; + unsigned long gpr_save[9]; + unsigned long long fpr_args[4]; +}; + +extern void FFI_HIDDEN ffi_call_SYSV(struct call_frame *, unsigned, void *, + void (*fn)(void), void *); + +extern void ffi_closure_SYSV(void); +extern void ffi_go_closure_SYSV(void); + +/*====================== End of Externals ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_check_struct_type. */ +/* */ +/* Function - Determine if a structure can be passed within a */ +/* general purpose or floating point register. */ +/* */ +/*====================================================================*/ + +static int +ffi_check_struct_type (ffi_type *arg) +{ + size_t size = arg->size; + + /* If the struct has just one element, look at that element + to find out whether to consider the struct as floating point. */ + while (arg->type == FFI_TYPE_STRUCT + && arg->elements[0] && !arg->elements[1]) + arg = arg->elements[0]; + + /* Structs of size 1, 2, 4, and 8 are passed in registers, + just like the corresponding int/float types. */ + switch (size) + { + case 1: + return FFI_TYPE_UINT8; + + case 2: + return FFI_TYPE_UINT16; + + case 4: + if (arg->type == FFI_TYPE_FLOAT) + return FFI_TYPE_FLOAT; + else + return FFI_TYPE_UINT32; + + case 8: + if (arg->type == FFI_TYPE_DOUBLE) + return FFI_TYPE_DOUBLE; + else + return FFI_TYPE_UINT64; + + default: + break; + } + + /* Other structs are passed via a pointer to the data. */ + return FFI_TYPE_POINTER; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_cif_machdep. */ +/* */ +/* Function - Perform machine dependent CIF processing. */ +/* */ +/*====================================================================*/ + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t struct_size = 0; + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Determine return value handling. */ + + switch (cif->rtype->type) + { + /* Void is easy. */ + case FFI_TYPE_VOID: + cif->flags = FFI390_RET_VOID; + break; + + /* Structures and complex are returned via a hidden pointer. */ + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; /* We need one GPR to pass the pointer. */ + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + cif->flags = FFI390_RET_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = FFI390_RET_DOUBLE; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; + break; +#endif + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI390_RET_INT64; + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* These are to be extended to word size. */ +#ifdef __s390x__ + cif->flags = FFI390_RET_INT64; +#else + cif->flags = FFI390_RET_INT32; +#endif + break; + + default: + FFI_ASSERT (0); + break; + } + + /* Now for the arguments. */ + + for (ptr = cif->arg_types, i = cif->nargs; + i > 0; + i--, ptr++) + { + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, we must reserve space + to copy its data for proper call-by-value semantics. */ + if (type == FFI_TYPE_POINTER) + struct_size += ROUND_SIZE ((*ptr)->size); + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + /* The first MAX_FPRARGS floating point arguments + go in FPRs, the rest overflow to the stack. */ + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov++; + break; + + /* On 31-bit machines, 64-bit integers are passed in GPR pairs, + if one is still available, or else on the stack. If only one + register is free, skip the register (it won't be used for any + subsequent argument either). */ + +#ifndef __s390x__ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + n_gpr += 2; + else + n_ov += 2; + break; +#endif + + /* Everything else is passed in GPRs (until MAX_GPRARGS + have been used) or overflows to the stack. */ + + default: + if (n_gpr < MAX_GPRARGS) + n_gpr++; + else + n_ov++; + break; + } + } + + /* Total stack space as required for overflow arguments + and temporary structure copies. */ + + cif->bytes = ROUND_SIZE (n_ov * sizeof (long)) + struct_size; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_call. */ +/* */ +/* Function - Call the FFI routine. */ +/* */ +/*====================================================================*/ + +static void +ffi_call_int(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue, + void *closure) +{ + int ret_type = cif->flags; + size_t rsize = 0, bytes = cif->bytes; + unsigned char *stack, *p_struct; + struct call_frame *frame; + unsigned long *p_ov, *p_gpr; + unsigned long long *p_fpr; + int n_fpr, n_gpr, n_ov, i, n; + ffi_type **arg_types; + + FFI_ASSERT (cif->abi == FFI_SYSV); + + /* If we don't have a return value, we need to fake one. */ + if (rvalue == NULL) + { + if (ret_type & FFI390_RET_IN_MEM) + rsize = cif->rtype->size; + else + ret_type = FFI390_RET_VOID; + } + + /* The stack space will be filled with those areas: + + dummy structure return (highest addresses) + FPR argument register save area + GPR argument register save area + stack frame for ffi_call_SYSV + temporary struct copies + overflow argument area (lowest addresses) + + We set up the following pointers: + + p_fpr: bottom of the FPR area (growing upwards) + p_gpr: bottom of the GPR area (growing upwards) + p_ov: bottom of the overflow area (growing upwards) + p_struct: top of the struct copy area (growing downwards) + + All areas are kept aligned to twice the word size. + + Note that we're going to create the stack frame for both + ffi_call_SYSV _and_ the target function right here. This + works because we don't make any function calls with more + than 5 arguments (indeed only memcpy and ffi_call_SYSV), + and thus we don't have any stacked outgoing parameters. */ + + stack = alloca (bytes + sizeof(struct call_frame) + rsize); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + /* Link the new frame back to the one from this function. */ + frame->back_chain = __builtin_frame_address (0); + + /* Fill in all of the argument stuff. */ + p_ov = (unsigned long *)stack; + p_struct = (unsigned char *)frame; + p_gpr = frame->gpr_args; + p_fpr = frame->fpr_args; + n_fpr = n_gpr = n_ov = 0; + + /* If we returning a structure then we set the first parameter register + to the address of where we are returning this structure. */ + if (cif->flags & FFI390_RET_IN_MEM) + p_gpr[n_gpr++] = (uintptr_t) rvalue; + + /* Now for the arguments. */ + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + void *arg = avalue[i]; + int type = ty->type; + ffi_arg val; + + restart: + switch (type) + { + case FFI_TYPE_SINT8: + val = *(SINT8 *)arg; + goto do_int; + case FFI_TYPE_UINT8: + val = *(UINT8 *)arg; + goto do_int; + case FFI_TYPE_SINT16: + val = *(SINT16 *)arg; + goto do_int; + case FFI_TYPE_UINT16: + val = *(UINT16 *)arg; + goto do_int; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + val = *(SINT32 *)arg; + goto do_int; + case FFI_TYPE_UINT32: + val = *(UINT32 *)arg; + goto do_int; + case FFI_TYPE_POINTER: + val = *(uintptr_t *)arg; + do_int: + *(n_gpr < MAX_GPRARGS ? p_gpr + n_gpr++ : p_ov + n_ov++) = val; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + val = *(UINT64 *)arg; + goto do_int; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + p_gpr[n_gpr++] = ((UINT32 *) arg)[0], + p_gpr[n_gpr++] = ((UINT32 *) arg)[1]; + else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + break; + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = *(UINT64 *) arg; + else + { +#ifdef __s390x__ + p_ov[n_ov++] = *(UINT64 *) arg; +#else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + } + break; + + case FFI_TYPE_FLOAT: + val = *(UINT32 *)arg; + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = (UINT64)val << 32; + else + p_ov[n_ov++] = val; + break; + + case FFI_TYPE_STRUCT: + /* Check how a structure type is passed. */ + type = ffi_check_struct_type (ty); + /* Some structures are passed via a type they contain. */ + if (type != FFI_TYPE_POINTER) + goto restart; + /* ... otherwise, passed by reference. fallthru. */ + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 16-byte long double is passed via reference. */ +#endif + case FFI_TYPE_COMPLEX: + /* Complex types are passed via reference. */ + p_struct -= ROUND_SIZE (ty->size); + memcpy (p_struct, arg, ty->size); + val = (uintptr_t)p_struct; + goto do_int; + + default: + FFI_ASSERT (0); + break; + } + } + + ffi_call_SYSV (frame, ret_type & FFI360_RET_MASK, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_closure_helper_SYSV. */ +/* */ +/* Function - Call a FFI closure target function. */ +/* */ +/*====================================================================*/ + +void FFI_HIDDEN +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + unsigned long *p_gpr, + unsigned long long *p_fpr, + unsigned long *p_ov) +{ + unsigned long long ret_buffer; + + void *rvalue = &ret_buffer; + void **avalue; + void **p_arg; + + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Allocate buffer for argument list pointers. */ + p_arg = avalue = alloca (cif->nargs * sizeof (void *)); + + /* If we returning a structure, pass the structure address + directly to the target function. Otherwise, have the target + function store the return value to the GPR save area. */ + if (cif->flags & FFI390_RET_IN_MEM) + rvalue = (void *) p_gpr[n_gpr++]; + + /* Now for the arguments. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, p_arg++, ptr++) + { + int deref_struct_pointer = 0; + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, remember to + retrieve the pointer later. */ + if (type == FFI_TYPE_POINTER) + deref_struct_pointer = 1; + } + + /* Pointers are passed like UINTs of the same size. */ + if (type == FFI_TYPE_POINTER) + { +#ifdef __s390x__ + type = FFI_TYPE_UINT64; +#else + type = FFI_TYPE_UINT32; +#endif + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = &p_ov[n_ov], + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr++]; + else + *p_arg = &p_ov[n_ov++]; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr], n_gpr += 2; + else + *p_arg = &p_ov[n_ov], n_ov += 2; +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 4; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 2; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 2; + break; + + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 1; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 1; + break; + + default: + FFI_ASSERT (0); + break; + } + + /* If this is a struct passed via pointer, we need to + actually retrieve that pointer. */ + if (deref_struct_pointer) + *p_arg = *(void **)*p_arg; + } + + + /* Call the target function. */ + (fun) (cif, rvalue, avalue, user_data); + + /* Convert the return value. */ + switch (cif->rtype->type) + { + /* Void is easy, and so is struct. */ + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: +#endif + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + p_fpr[0] = (long long) *(unsigned int *) rvalue << 32; + break; + + case FFI_TYPE_DOUBLE: + p_fpr[0] = *(unsigned long long *) rvalue; + break; + + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + p_gpr[0] = *(unsigned long *) rvalue; +#else + p_gpr[0] = ((unsigned long *) rvalue)[0], + p_gpr[1] = ((unsigned long *) rvalue)[1]; +#endif + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT8: + p_gpr[0] = *(unsigned long *) rvalue; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT8: + p_gpr[0] = *(signed long *) rvalue; + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_closure_loc. */ +/* */ +/* Function - Prepare a FFI closure. */ +/* */ +/*====================================================================*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + static unsigned short const template[] = { + 0x0d10, /* basr %r1,0 */ +#ifndef __s390x__ + 0x9801, 0x1006, /* lm %r0,%r1,6(%r1) */ +#else + 0xeb01, 0x100e, 0x0004, /* lmg %r0,%r1,14(%r1) */ +#endif + 0x07f1 /* br %r1 */ + }; + + unsigned long *tramp = (unsigned long *)&closure->tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + memcpy (tramp, template, sizeof(template)); + tramp[2] = (unsigned long)codeloc; + tramp[3] = (unsigned long)&ffi_closure_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/* Build a Go language closure. */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_SYSV; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h new file mode 100644 index 0000000..d8a4ee4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/ffitarget.h @@ -0,0 +1,70 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for S390. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#if defined (__s390x__) +#ifndef S390X +#define S390X +#endif +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#ifdef S390X +#define FFI_TRAMPOLINE_SIZE 32 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h new file mode 100644 index 0000000..b875578 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/internal.h @@ -0,0 +1,11 @@ +/* If these values change, sysv.S must be adapted! */ +#define FFI390_RET_DOUBLE 0 +#define FFI390_RET_FLOAT 1 +#define FFI390_RET_INT64 2 +#define FFI390_RET_INT32 3 +#define FFI390_RET_VOID 4 + +#define FFI360_RET_MASK 7 +#define FFI390_RET_IN_MEM 8 + +#define FFI390_RET_STRUCT (FFI390_RET_VOID | FFI390_RET_IN_MEM) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S new file mode 100644 index 0000000..c4b5006 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/s390/sysv.S @@ -0,0 +1,325 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2000 Software AG + Copyright (c) 2008 Red Hat, Inc. + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + +#ifndef __s390x__ + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + st %r6,44(%r2) # Save registers + stm %r12,%r14,48(%r2) + lr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 44 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_def_cfa_register r13 + st %r2,0(%r15) # Set up back chain + sla %r3,3 # ret_type *= 8 + lr %r12,%r4 # Save ret_addr + lr %r1,%r5 # Save fun + lr %r0,%r6 # Install static chain + + # Set return address, so that there is only one indirect jump. +#ifdef HAVE_AS_S390_ZARCH + larl %r14,.Ltable + ar %r14,%r3 +#else + basr %r14,0 +0: la %r14,.Ltable-0b(%r14,%r3) +#endif + + lm %r2,%r6,8(%r13) # Load arguments + ld %f0,64(%r13) + ld %f2,72(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_FLOAT + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + st %r3,4(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_INT32 + st %r2,0(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_VOID +.Ldone: + l %r14,56(%r13) + l %r12,48(%r13) + l %r6,44(%r13) + l %r13,52(%r13) + .cfi_restore 14 + .cfi_restore 13 + .cfi_restore 12 + .cfi_restore 6 + .cfi_def_cfa r15, 96 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Load closure -> user_data + l %r2,4(%r4) # ->cif + l %r3,8(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Closure + l %r2,16(%r4) # ->cif + l %r3,20(%r4) # ->fun + l %r4,24(%r4) # ->user_data +.Ldoclosure: + stm %r12,%r15,48(%r15) # Save registers + lr %r12,%r15 + .cfi_def_cfa_register r12 + .cfi_rel_offset r6, 24 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_rel_offset r15, 60 +#ifndef HAVE_AS_S390_ZARCH + basr %r13,0 # Set up base register +.Lcbase: + l %r1,.Lchelper-.Lcbase(%r13) # Get helper function +#endif + ahi %r15,-96-8 # Set up stack frame + st %r12,0(%r15) # Set up back chain + + std %f0,64(%r12) # Save fp arguments + std %f2,72(%r12) + + la %r5,96(%r12) # Overflow + st %r5,96(%r15) + la %r6,64(%r12) # FPRs + la %r5,8(%r12) # GPRs +#ifdef HAVE_AS_S390_ZARCH + brasl %r14,ffi_closure_helper_SYSV +#else + bas %r14,0(%r1,%r13) # Call helper +#endif + + lr %r15,%r12 + .cfi_def_cfa_register r15 + lm %r12,%r14,48(%r12) # Restore saved registers + l %r6,24(%r15) + ld %f0,64(%r15) # Load return registers + lm %r2,%r3,8(%r15) + br %r14 + .cfi_endproc + +#ifndef HAVE_AS_S390_ZARCH + .align 4 +.Lchelper: + .long ffi_closure_helper_SYSV-.Lcbase +#endif + + .size ffi_closure_SYSV,.-ffi_closure_SYSV + +#else + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + stg %r6,88(%r2) # Save registers + stmg %r12,%r14,96(%r2) + lgr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 88 + .cfi_rel_offset r12, 96 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_def_cfa_register r13 + stg %r2,0(%r15) # Set up back chain + larl %r14,.Ltable # Set up return address + slag %r3,%r3,3 # ret_type *= 8 + lgr %r12,%r4 # Save ret_addr + lgr %r1,%r5 # Save fun + lgr %r0,%r6 # Install static chain + agr %r14,%r3 + lmg %r2,%r6,16(%r13) # Load arguments + ld %f0,128(%r13) + ld %f2,136(%r13) + ld %f4,144(%r13) + ld %f6,152(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_DOUBLE + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + stg %r2,0(%r12) + + .balign 8 +# FFI390_RET_INT32 + # Never used, as we always store type ffi_arg. + # But the stg above is 6 bytes and we cannot + # jump around this case, so fall through. + nop + nop + + .balign 8 +# FFI390_RET_VOID +.Ldone: + lg %r14,112(%r13) + lg %r12,96(%r13) + lg %r6,88(%r13) + lg %r13,104(%r13) + .cfi_restore r14 + .cfi_restore r13 + .cfi_restore r12 + .cfi_restore r6 + .cfi_def_cfa r15, 160 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure -> user_data + lg %r2,8(%r4) # ->cif + lg %r3,16(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + .size ffi_go_closure_SYSV,.-ffi_go_closure_SYSV + + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure + lg %r2,32(%r4) # ->cif + lg %r3,40(%r4) # ->fun + lg %r4,48(%r4) # ->user_data +.Ldoclosure: + stmg %r13,%r15,104(%r15) # Save registers + lgr %r13,%r15 + .cfi_def_cfa_register r13 + .cfi_rel_offset r6, 48 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_rel_offset r15, 120 + aghi %r15,-160-16 # Set up stack frame + stg %r13,0(%r15) # Set up back chain + + std %f0,128(%r13) # Save fp arguments + std %f2,136(%r13) + std %f4,144(%r13) + std %f6,152(%r13) + la %r5,160(%r13) # Overflow + stg %r5,160(%r15) + la %r6,128(%r13) # FPRs + la %r5,16(%r13) # GPRs + brasl %r14,ffi_closure_helper_SYSV # Call helper + + lgr %r15,%r13 + .cfi_def_cfa_register r15 + lmg %r13,%r14,104(%r13) # Restore saved registers + lg %r6,48(%r15) + ld %f0,128(%r15) # Load return registers + lg %r2,16(%r15) + br %r14 + .cfi_endproc + .size ffi_closure_SYSV,.-ffi_closure_SYSV +#endif /* !s390x */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c new file mode 100644 index 0000000..9ec86bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffi.c @@ -0,0 +1,717 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2002-2008, 2012 Kaz Kojima + Copyright (c) 2008 Red Hat, Inc. + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 4 +#if defined(__SH4__) +#define NFREGARG 8 +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +/* If the structure has essentially an unique element, return its type. */ +static int +simple_type (ffi_type *arg) +{ + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + else if (arg->elements[1]) + return FFI_TYPE_STRUCT; + + return simple_type (arg->elements[0]); +} + +static int +return_type (ffi_type *arg) +{ + unsigned short type; + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + type = simple_type (arg->elements[0]); + if (! arg->elements[1]) + { + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + return FFI_TYPE_INT; + + default: + return type; + } + } + + /* gcc uses r0/r1 pair for some kind of structures. */ + if (arg->size <= 2 * sizeof (int)) + { + int i = 0; + ffi_type *e; + + while ((e = arg->elements[i++])) + { + type = simple_type (e); + switch (type) + { + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + return FFI_TYPE_UINT64; + + default: + break; + } + } + } + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register int tmp; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + int greg, ireg; +#if defined(__SH4__) + int freg = 0; +#endif + + tmp = 0; + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + ireg = STRUCT_VALUE_ADDRESS_WITH_ARG ? 1 : 0; + } + else + ireg = 0; + + /* Set arguments for registers. */ + greg = ireg; + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + /* Set arguments on stack. */ + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg = NGREGARG; + continue; + } +#endif + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; +#if defined(__SH4__) + int freg = 0; +#endif + + cif->flags = 0; + + greg = ((return_type (cif->rtype) == FFI_TYPE_STRUCT) && + STRUCT_VALUE_ADDRESS_WITH_ARG) ? 1 : 0; + +#if defined(__SH4__) + for (i = j = 0; i < cif->nargs && j < 12; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + if (freg >= NFREGARG) + continue; + freg++; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + case FFI_TYPE_DOUBLE: + if ((freg + 1) >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + default: + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 >= NGREGARG) + continue; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + break; + } + } +#else + for (i = j = 0; i < cif->nargs && j < 4; i++) + { + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + n = NGREGARG - greg; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + } +#endif + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags += (unsigned) (return_type (cif->rtype)) << 24; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += (unsigned) cif->rtype->type << 24; + break; + + default: + cif->flags += FFI_TYPE_INT << 24; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, + fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +#if defined(__SH4__) +extern void __ic_invalidate (void *line); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + unsigned int insn; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Set T bit if the function returns a struct pointed with R2. */ + insn = (return_type (cif->rtype) == FFI_TYPE_STRUCT + ? 0x0018 /* sett */ + : 0x0008 /* clrt */); + +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0xd301d102; + tramp[1] = 0x0000412b | (insn << 16); +#else + tramp[0] = 0xd102d301; + tramp[1] = 0x412b0000 | insn; +#endif + *(void **) &tramp[2] = (void *)codeloc; /* ctx */ + *(void **) &tramp[3] = (void *)ffi_closure_SYSV; /* funaddr */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#if defined(__SH4__) + /* Flush the icache. */ + __ic_invalidate(codeloc); +#endif + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +#ifdef __LITTLE_ENDIAN__ +#define OFS_INT8 0 +#define OFS_INT16 0 +#else +#define OFS_INT8 3 +#define OFS_INT16 2 +#endif + +int +ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue, + unsigned long *pgr, unsigned long *pfr, + unsigned long *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int ireg, greg = 0; +#if defined(__SH4__) + int freg = 0; +#endif + ffi_cif *cif; + + cif = closure->cif; + avalue = alloca(cif->nargs * sizeof(void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT && STRUCT_VALUE_ADDRESS_WITH_ARG) + { + rvalue = (void *) *pgr++; + ireg = 1; + } + else + ireg = 0; + + cif = closure->cif; + greg = ireg; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pgr) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pgr) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pgr; + break; + + default: + FFI_ASSERT(0); + } + pgr++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + avalue[i] = pfr; + pfr++; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + avalue[i] = pgr; + pgr++; + } + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + if (freg & 1) + pfr++; + freg = (freg + 1) & ~1; + freg += 2; + avalue[i] = pfr; + pfr += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + avalue[i] = pgr; + pgr += n; + } + } + + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pst) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pst) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pst; + break; + + default: + FFI_ASSERT(0); + } + pst++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + avalue[i] = pst; + pst++; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + avalue[i] = pst; + pst += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg += n; + pst += greg - NGREGARG; + continue; + } +#endif + avalue[i] = pst; + pst += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h new file mode 100644 index 0000000..a36bf42 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S new file mode 100644 index 0000000..5be7516 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh/sysv.S @@ -0,0 +1,850 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2002, 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +.text + + # r4: ffi_prep_args + # r5: &ecif + # r6: bytes + # r7: flags + # sp+0: rvalue + # sp+4: fn + + # This assumes we are using gas. +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + mov.l r8,@-r15 +.LCFI0: + mov.l r9,@-r15 +.LCFI1: + mov.l r10,@-r15 +.LCFI2: + mov.l r12,@-r15 +.LCFI3: + mov.l r14,@-r15 +.LCFI4: + sts.l pr,@-r15 +.LCFI5: + mov r15,r14 +.LCFI6: +#if defined(__SH4__) + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r1 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + mov #4,r3 + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r1,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_f + + mov r3,r0 + and #1,r0 + tst r0,r0 + bt 1f + add #1,r3 +1: + mov #12,r0 + cmp/hs r0,r3 + bt/s 3f + shlr2 r1 + bsr L_pop_d + nop +3: + add #2,r3 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r3,r0 + add r0,r0 + add r3,r0 + add #-12,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_f: + cmp/eq #FFI_TYPE_FLOAT,r0 + bf L_pass_i + + mov #12,r0 + cmp/hs r0,r3 + bt/s 2f + shlr2 r1 + bsr L_pop_f + nop +2: + add #1,r3 + bra L_pass + add #-4,r8 + +L_pop_f: + mov r3,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r1 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr1,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr0,@r1 +#else + fmov.s fr0,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr1,@r1 +#endif + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_f + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_f: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bf L_ret_i + + mov.l @(24,r14),r1 + bra L_epilogue + fmov.s fr0,@r1 + +L_ret_i: + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue + + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#else + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r3 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r3,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_i + + mov r15,r0 + and #7,r0 + tst r0,r0 + bt 1f + add #4,r15 +1: + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_d + nop +2: + add #2,r2 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r2,r0 + add r0,r0 + add r2,r0 + add #-12,r0 + add r0,r0 + braf r0 + nop + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + rts + mov.l @r15+,r7 + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_i + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_i: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bt 1f + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue +1: + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#endif +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + +.globl ffi_closure_helper_SYSV + +ENTRY(ffi_closure_SYSV) +.LFB2: + mov.l r7,@-r15 +.LCFI7: + mov.l r6,@-r15 +.LCFI8: + mov.l r5,@-r15 +.LCFI9: + mov.l r4,@-r15 +.LCFIA: + mov.l r14,@-r15 +.LCFIB: + sts.l pr,@-r15 + + /* Stack layout: + xx bytes (on stack parameters) + 16 bytes (register parameters) + 4 bytes (saved frame pointer) + 4 bytes (saved return address) + 32 bytes (floating register parameters, SH-4 only) + 8 bytes (result) + 4 bytes (pad) + 4 bytes (5th arg) + <- new stack pointer + */ +.LCFIC: +#if defined(__SH4__) + add #-48,r15 +#else + add #-16,r15 +#endif +.LCFID: + mov r15,r14 +.LCFIE: + +#if defined(__SH4__) + mov r14,r1 + add #48,r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr10,@-r1 + fmov.s fr11,@-r1 + fmov.s fr8,@-r1 + fmov.s fr9,@-r1 + fmov.s fr6,@-r1 + fmov.s fr7,@-r1 + fmov.s fr4,@-r1 + fmov.s fr5,@-r1 +#else + fmov.s fr11,@-r1 + fmov.s fr10,@-r1 + fmov.s fr9,@-r1 + fmov.s fr8,@-r1 + fmov.s fr7,@-r1 + fmov.s fr6,@-r1 + fmov.s fr5,@-r1 + fmov.s fr4,@-r1 +#endif + mov r1,r7 + mov r14,r6 + add #56,r6 +#else + mov r14,r6 + add #24,r6 +#endif + + bt/s 10f + mov r2, r5 + mov r14,r1 + add #8,r1 + mov r1,r5 +10: + + mov r14,r1 +#if defined(__SH4__) + add #72,r1 +#else + add #40,r1 +#endif + mov.l r1,@r14 + +#ifdef PIC + mov.l L_got,r1 + mova L_got,r0 + add r0,r1 + mov.l L_helper,r0 + add r1,r0 +#else + mov.l L_helper,r0 +#endif + jsr @r0 + mov r3,r4 + + shll r0 + mov r0,r1 + mova L_table,r0 + add r1,r0 + mov.w @r0,r0 + mov r14,r2 + braf r0 + add #8,r2 +0: + .align 2 +#ifdef PIC +L_got: + .long _GLOBAL_OFFSET_TABLE_ +L_helper: + .long ffi_closure_helper_SYSV@GOTOFF +#else +L_helper: + .long ffi_closure_helper_SYSV +#endif +L_table: + .short L_case_v - 0b /* FFI_TYPE_VOID */ + .short L_case_i - 0b /* FFI_TYPE_INT */ +#if defined(__SH4__) + .short L_case_f - 0b /* FFI_TYPE_FLOAT */ + .short L_case_d - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_d - 0b /* FFI_TYPE_LONGDOUBLE */ +#else + .short L_case_i - 0b /* FFI_TYPE_FLOAT */ + .short L_case_ll - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_ll - 0b /* FFI_TYPE_LONGDOUBLE */ +#endif + .short L_case_uq - 0b /* FFI_TYPE_UINT8 */ + .short L_case_q - 0b /* FFI_TYPE_SINT8 */ + .short L_case_uh - 0b /* FFI_TYPE_UINT16 */ + .short L_case_h - 0b /* FFI_TYPE_SINT16 */ + .short L_case_i - 0b /* FFI_TYPE_UINT32 */ + .short L_case_i - 0b /* FFI_TYPE_SINT32 */ + .short L_case_ll - 0b /* FFI_TYPE_UINT64 */ + .short L_case_ll - 0b /* FFI_TYPE_SINT64 */ + .short L_case_v - 0b /* FFI_TYPE_STRUCT */ + .short L_case_i - 0b /* FFI_TYPE_POINTER */ + +#if defined(__SH4__) +L_case_d: +#ifdef __LITTLE_ENDIAN__ + fmov.s @r2+,fr1 + bra L_case_v + fmov.s @r2,fr0 +#else + fmov.s @r2+,fr0 + bra L_case_v + fmov.s @r2,fr1 +#endif + +L_case_f: + bra L_case_v + fmov.s @r2,fr0 +#endif + +L_case_ll: + mov.l @r2+,r0 + bra L_case_v + mov.l @r2,r1 + +L_case_i: + bra L_case_v + mov.l @r2,r0 + +L_case_q: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + bra L_case_v + mov.b @r2,r0 + +L_case_uq: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + mov.b @r2,r0 + bra L_case_v + extu.b r0,r0 + +L_case_h: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + bra L_case_v + mov.w @r2,r0 + +L_case_uh: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + mov.w @r2,r0 + extu.w r0,r0 + /* fall through */ + +L_case_v: +#if defined(__SH4__) + add #48,r15 +#else + add #16,r15 +#endif + lds.l @r15+,pr + mov.l @r15+,r14 + rts + add #16,r15 +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte 0x7c /* sleb128 -4; CIE Data Alignment Factor */ + .byte 0x11 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0xf /* uleb128 0xf */ + .byte 0x0 /* uleb128 0x0 */ + .align 2 +.LECIE1: +.LSFDE1: + .4byte .LEFDE1-.LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte .LASFDE1-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte .LFE1-.LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI0-.LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI1-.LCFI0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI2-.LCFI1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI3-.LCFI2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI4-.LCFI3 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI5-.LCFI4 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x8a /* DW_CFA_offset, column 0xa */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x89 /* DW_CFA_offset, column 0x9 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x88 /* DW_CFA_offset, column 0x8 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI6-.LCFI5 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte .LEFDE3-.LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte .LASFDE3-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte .LFE2-.LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI7-.LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI8-.LCFI7 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI9-.LCFI8 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIA-.LCFI9 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIB-.LCFIA + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIC-.LCFIB + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFID-.LCFIC + .byte 0xe /* DW_CFA_def_cfa_offset */ +#if defined(__SH4__) + .byte 24+48 /* uleb128 24+48 */ +#else + .byte 24+16 /* uleb128 24+16 */ +#endif + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x84 /* DW_CFA_offset, column 0x4 */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x85 /* DW_CFA_offset, column 0x5 */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x86 /* DW_CFA_offset, column 0x6 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x87 /* DW_CFA_offset, column 0x7 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIE-.LCFID + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c new file mode 100644 index 0000000..123b87a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffi.c @@ -0,0 +1,469 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima + Copyright (c) 2008 Anthony Green + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 8 +#define NFREGARG 12 + +static int +return_type (ffi_type *arg) +{ + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + /* gcc uses r2 if the result can be packed in on register. */ + if (arg->size <= sizeof (UINT8)) + return FFI_TYPE_UINT8; + else if (arg->size <= sizeof (UINT16)) + return FFI_TYPE_UINT16; + else if (arg->size <= sizeof (UINT32)) + return FFI_TYPE_UINT32; + else if (arg->size <= sizeof (UINT64)) + return FFI_TYPE_UINT64; + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += sizeof (UINT64); + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + int align; + + z = (*p_arg)->size; + align = (*p_arg)->alignment; + if (z < sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(SINT64 *) argp = (SINT64) *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT64 *) argp = (UINT64) *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT64 *) argp = (SINT64) *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT64 *) argp = (UINT64) *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT(0); + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT32) && align == sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *(SINT64 *) argp = (SINT64) *(SINT32 *) (*p_argv); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_STRUCT: + *(UINT64 *) argp = (UINT64) *(UINT32 *) (*p_argv); + break; + + default: + FFI_ASSERT(0); + break; + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT64) + && align == sizeof (UINT64) + && ((int) *p_argv & (sizeof (UINT64) - 1)) == 0) + { + *(UINT64 *) argp = *(UINT64 *) (*p_argv); + argp += sizeof (UINT64); + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + memcpy (argp, *p_argv, z); + argp += n * sizeof (UINT64); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; + int freg; + int fpair = -1; + + greg = (return_type (cif->rtype) == FFI_TYPE_STRUCT ? 1 : 0); + freg = 0; + cif->flags2 = 0; + + for (i = j = 0; i < cif->nargs; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + greg++; + cif->bytes += sizeof (UINT64) - sizeof (float); + if (freg >= NFREGARG - 1) + continue; + if (fpair < 0) + { + fpair = freg; + freg += 2; + } + else + fpair = -1; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + break; + + case FFI_TYPE_DOUBLE: + if (greg++ >= NGREGARG && (freg + 1) >= NFREGARG) + continue; + if ((freg + 1) < NFREGARG) + { + freg += 2; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + } + else + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + + default: + size = (cif->arg_types)[i]->size; + if (size < sizeof (UINT64)) + cif->bytes += sizeof (UINT64) - size; + n = (size + sizeof (UINT64) - 1) / sizeof (UINT64); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + greg = NGREGARG; + else + greg += n; + for (m = 0; m < n; m++) + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + } + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags = return_type (cif->rtype); + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +/*@-declundef@*/ +/*@-exportheader@*/ +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), + /*@out@*/ extended_cif *, + unsigned, unsigned, long long, + /*@out@*/ unsigned *, + void (*fn)(void)); +/*@=declundef@*/ +/*@=exportheader@*/ + +void ffi_call(/*@dependent@*/ ffi_cif *cif, + void (*fn)(void), + /*@out@*/ void *rvalue, + /*@dependent@*/ void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, cif->flags2, + ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +extern void __ic_invalidate (void *line); + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Since ffi_closure is an aligned object, the ffi trampoline is + called as an SHcompact code. Sigh. + SHcompact part: + mova @(1,pc),r0; add #1,r0; jmp @r0; nop; + SHmedia part: + movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0 + movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */ +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0x7001c701; + tramp[1] = 0x0009402b; +#else + tramp[0] = 0xc7017001; + tramp[1] = 0x402b0009; +#endif + tramp[2] = 0xcc000010 | (((UINT32) ffi_closure_SYSV) >> 16) << 10; + tramp[3] = 0xc8000010 | (((UINT32) ffi_closure_SYSV) & 0xffff) << 10; + tramp[4] = 0x6bf10600; + tramp[5] = 0xcc000010 | (((UINT32) codeloc) >> 16) << 10; + tramp[6] = 0xc8000010 | (((UINT32) codeloc) & 0xffff) << 10; + tramp[7] = 0x4401fff0; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. */ + asm volatile ("ocbwb %0,0; synco; icbi %1,0; synci" : : "r" (tramp), + "r"(codeloc)); + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +int +ffi_closure_helper_SYSV (ffi_closure *closure, UINT64 *rvalue, + UINT64 *pgr, UINT64 *pfr, UINT64 *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int greg, freg; + ffi_cif *cif; + int fpair = -1; + + cif = closure->cif; + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (return_type (cif->rtype) == FFI_TYPE_STRUCT) + { + rvalue = (UINT64 *) *pgr; + greg = 1; + } + else + greg = 0; + + freg = 0; + cif = closure->cif; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + void *p; + + z = (*p_arg)->size; + if (z < sizeof (UINT32)) + { + p = pgr + greg++; + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_STRUCT: +#ifdef __LITTLE_ENDIAN__ + avalue[i] = p; +#else + avalue[i] = ((char *) p) + sizeof (UINT32) - z; +#endif + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (UINT32)) + { + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg < NFREGARG - 1) + { + if (fpair >= 0) + { + avalue[i] = (UINT32 *) pfr + fpair; + fpair = -1; + } + else + { +#ifdef __LITTLE_ENDIAN__ + fpair = freg; + avalue[i] = (UINT32 *) pfr + (1 ^ freg); +#else + fpair = 1 ^ freg; + avalue[i] = (UINT32 *) pfr + freg; +#endif + freg += 2; + } + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + greg++; + } + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + avalue[i] = pgr + greg; + else + { + avalue[i] = pfr + (freg >> 1); + freg += 2; + } + greg++; + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + avalue[i] = pgr + greg; + greg += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h new file mode 100644 index 0000000..08a6fe9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/ffitarget.h @@ -0,0 +1,58 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH - SHmedia. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS long long flags2 +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 32 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S new file mode 100644 index 0000000..c4587d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sh64/sysv.S @@ -0,0 +1,539 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#ifdef __LITTLE_ENDIAN__ +#define OFS_FLT 0 +#else +#define OFS_FLT 4 +#endif + + .section .text..SHmedia32,"ax" + + # r2: ffi_prep_args + # r3: &ecif + # r4: bytes + # r5: flags + # r6: flags2 + # r7: rvalue + # r8: fn + + # This assumes we are using gas. + .align 5 +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + addi.l r15, -48, r15 +.LCFI0: + st.q r15, 40, r32 + st.q r15, 32, r31 + st.q r15, 24, r30 + st.q r15, 16, r29 + st.q r15, 8, r28 + st.l r15, 4, r18 + st.l r15, 0, r14 +.LCFI1: + add.l r15, r63, r14 +.LCFI2: +# add r4, r63, r28 + add r5, r63, r29 + add r6, r63, r30 + add r7, r63, r31 + add r8, r63, r32 + + addi r4, (64 + 7), r4 + andi r4, ~7, r4 + sub.l r15, r4, r15 + + ptabs/l r2, tr0 + add r15, r63, r2 + blink tr0, r18 + + addi r15, 64, r22 + movi 0, r0 + movi 0, r1 + movi -1, r23 + + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + ld.l r15, 0, r19 + addi r15, 8, r15 + addi r0, 1, r0 +1: + +.L_pass: + andi r30, 3, r20 + shlri r30, 2, r30 + + pt/l .L_call_it, tr0 + pt/l .L_pass_i, tr1 + pt/l .L_pass_f, tr2 + + beqi/l r20, FFI_TYPE_VOID, tr0 + beqi/l r20, FFI_TYPE_INT, tr1 + beqi/l r20, FFI_TYPE_FLOAT, tr2 + +.L_pass_d: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_d, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r1, 2, r1 + blink tr0, r63 + +.L_pop_d: + pt/l .L_pop_d_tbl, tr1 + gettr tr1, r20 + shlli r1, 2, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_d_tbl: + fld.d r15, 0, dr0 + blink tr0, r63 + fld.d r15, 0, dr2 + blink tr0, r63 + fld.d r15, 0, dr4 + blink tr0, r63 + fld.d r15, 0, dr6 + blink tr0, r63 + fld.d r15, 0, dr8 + blink tr0, r63 + fld.d r15, 0, dr10 + blink tr0, r63 + +.L_pass_f: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_f, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + blink tr0, r63 + +.L_pop_f: + pt/l .L_pop_f_tbl, tr1 + pt/l 5f, tr2 + gettr tr1, r20 + bge/l r23, r63, tr2 + add r1, r63, r23 + shlli r1, 3, r21 + addi r1, 2, r1 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 +5: + addi r23, 1, r21 + movi -1, r23 + shlli r21, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_f_tbl: + fld.s r15, OFS_FLT, fr0 + blink tr0, r63 + fld.s r15, OFS_FLT, fr1 + blink tr0, r63 + fld.s r15, OFS_FLT, fr2 + blink tr0, r63 + fld.s r15, OFS_FLT, fr3 + blink tr0, r63 + fld.s r15, OFS_FLT, fr4 + blink tr0, r63 + fld.s r15, OFS_FLT, fr5 + blink tr0, r63 + fld.s r15, OFS_FLT, fr6 + blink tr0, r63 + fld.s r15, OFS_FLT, fr7 + blink tr0, r63 + fld.s r15, OFS_FLT, fr8 + blink tr0, r63 + fld.s r15, OFS_FLT, fr9 + blink tr0, r63 + fld.s r15, OFS_FLT, fr10 + blink tr0, r63 + fld.s r15, OFS_FLT, fr11 + blink tr0, r63 + +.L_pass_i: + pt/l 3f, tr0 + movi 8, r20 + bge/l r0, r20, tr0 + + pt/l .L_pop_i, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r0, 1, r0 + blink tr0, r63 + +.L_pop_i: + pt/l .L_pop_i_tbl, tr1 + gettr tr1, r20 + shlli r0, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_i_tbl: + ld.q r15, 0, r2 + blink tr0, r63 + ld.q r15, 0, r3 + blink tr0, r63 + ld.q r15, 0, r4 + blink tr0, r63 + ld.q r15, 0, r5 + blink tr0, r63 + ld.q r15, 0, r6 + blink tr0, r63 + ld.q r15, 0, r7 + blink tr0, r63 + ld.q r15, 0, r8 + blink tr0, r63 + ld.q r15, 0, r9 + blink tr0, r63 + +.L_call_it: + # call function + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + add r19, r63, r2 +1: + add r22, r63, r15 + ptabs/l r32, tr0 + blink tr0, r18 + + pt/l .L_ret_i, tr0 + pt/l .L_ret_ll, tr1 + pt/l .L_ret_d, tr2 + pt/l .L_ret_f, tr3 + pt/l .L_epilogue, tr4 + + beqi/l r29, FFI_TYPE_INT, tr0 + beqi/l r29, FFI_TYPE_UINT32, tr0 + beqi/l r29, FFI_TYPE_SINT64, tr1 + beqi/l r29, FFI_TYPE_UINT64, tr1 + beqi/l r29, FFI_TYPE_DOUBLE, tr2 + beqi/l r29, FFI_TYPE_FLOAT, tr3 + + pt/l .L_ret_q, tr0 + pt/l .L_ret_h, tr1 + + beqi/l r29, FFI_TYPE_UINT8, tr0 + beqi/l r29, FFI_TYPE_UINT16, tr1 + blink tr4, r63 + +.L_ret_d: + fst.d r31, 0, dr0 + blink tr4, r63 + +.L_ret_ll: + st.q r31, 0, r2 + blink tr4, r63 + +.L_ret_f: + fst.s r31, OFS_FLT, fr0 + blink tr4, r63 + +.L_ret_q: + st.b r31, 0, r2 + blink tr4, r63 + +.L_ret_h: + st.w r31, 0, r2 + blink tr4, r63 + +.L_ret_i: + st.l r31, 0, r2 + # Fall + +.L_epilogue: + # Remove the space we pushed for the args + add r14, r63, r15 + + ld.l r15, 0, r14 + ld.l r15, 4, r18 + ld.q r15, 8, r28 + ld.q r15, 16, r29 + ld.q r15, 24, r30 + ld.q r15, 32, r31 + ld.q r15, 40, r32 + addi.l r15, 48, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + .align 5 +ENTRY(ffi_closure_SYSV) +.LFB2: + addi.l r15, -136, r15 +.LCFI3: + st.l r15, 12, r18 + st.l r15, 8, r14 + st.l r15, 4, r12 +.LCFI4: + add r15, r63, r14 +.LCFI5: + /* Stack layout: + ... + 64 bytes (register parameters) + 48 bytes (floating register parameters) + 8 bytes (result) + 4 bytes (r18) + 4 bytes (r14) + 4 bytes (r12) + 4 bytes (for align) + <- new stack pointer + */ + fst.d r14, 24, dr0 + fst.d r14, 32, dr2 + fst.d r14, 40, dr4 + fst.d r14, 48, dr6 + fst.d r14, 56, dr8 + fst.d r14, 64, dr10 + st.q r14, 72, r2 + st.q r14, 80, r3 + st.q r14, 88, r4 + st.q r14, 96, r5 + st.q r14, 104, r6 + st.q r14, 112, r7 + st.q r14, 120, r8 + st.q r14, 128, r9 + + add r1, r63, r2 + addi r14, 16, r3 + addi r14, 72, r4 + addi r14, 24, r5 + addi r14, 136, r6 +#ifdef PIC + movi (((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) >> 16) & 65535), r12 + shori ((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) & 65535), r12 +.LPCS0: ptrel/u r12, tr0 + movi ((ffi_closure_helper_SYSV@GOTPLT) & 65535), r1 + gettr tr0, r12 + ldx.l r1, r12, r1 + ptabs r1, tr0 +#else + pt/l ffi_closure_helper_SYSV, tr0 +#endif + blink tr0, r18 + + shlli r2, 1, r1 + movi (((datalabel .L_table) >> 16) & 65535), r2 + shori ((datalabel .L_table) & 65535), r2 + ldx.w r2, r1, r1 + add r1, r2, r1 + pt/l .L_case_v, tr1 + ptabs r1, tr0 + blink tr0, r63 + + .align 2 +.L_table: + .word .L_case_v - datalabel .L_table /* FFI_TYPE_VOID */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_INT */ + .word .L_case_f - datalabel .L_table /* FFI_TYPE_FLOAT */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_DOUBLE */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_LONGDOUBLE */ + .word .L_case_uq - datalabel .L_table /* FFI_TYPE_UINT8 */ + .word .L_case_q - datalabel .L_table /* FFI_TYPE_SINT8 */ + .word .L_case_uh - datalabel .L_table /* FFI_TYPE_UINT16 */ + .word .L_case_h - datalabel .L_table /* FFI_TYPE_SINT16 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_UINT32 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_SINT32 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_UINT64 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_SINT64 */ + .word .L_case_v - datalabel .L_table /* FFI_TYPE_STRUCT */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_POINTER */ + + .align 2 +.L_case_d: + fld.d r14, 16, dr0 + blink tr1, r63 +.L_case_f: + fld.s r14, 16, fr0 + blink tr1, r63 +.L_case_ll: + ld.q r14, 16, r2 + blink tr1, r63 +.L_case_i: + ld.l r14, 16, r2 + blink tr1, r63 +.L_case_q: + ld.b r14, 16, r2 + blink tr1, r63 +.L_case_uq: + ld.ub r14, 16, r2 + blink tr1, r63 +.L_case_h: + ld.w r14, 16, r2 + blink tr1, r63 +.L_case_uh: + ld.uw r14, 16, r2 + blink tr1, r63 +.L_case_v: + add.l r14, r63, r15 + ld.l r15, 4, r12 + ld.l r15, 8, r14 + ld.l r15, 12, r18 + addi.l r15, 136, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .uleb128 0x1 /* CIE Code Alignment Factor */ + .sleb128 -4 /* CIE Data Alignment Factor */ + .byte 0x12 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .uleb128 0xf + .uleb128 0x0 + .align 2 +.LECIE1: +.LSFDE1: + .4byte datalabel .LEFDE1-datalabel .LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte datalabel .LASFDE1-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte datalabel .LFE1-datalabel .LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI0-datalabel .LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x30 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI1-datalabel .LCFI0 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0xc + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0xb + .byte 0x9c /* DW_CFA_offset, column 0x1c */ + .uleb128 0xa + .byte 0x9d /* DW_CFA_offset, column 0x1d */ + .uleb128 0x8 + .byte 0x9e /* DW_CFA_offset, column 0x1e */ + .uleb128 0x6 + .byte 0x9f /* DW_CFA_offset, column 0x1f */ + .uleb128 0x4 + .byte 0xa0 /* DW_CFA_offset, column 0x20 */ + .uleb128 0x2 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI2-datalabel .LCFI1 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte datalabel .LEFDE3-datalabel .LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte datalabel .LASFDE3-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte datalabel .LFE2-datalabel .LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI3-datalabel .LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x88 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI4-datalabel .LCFI3 + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .uleb128 0x21 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0x20 + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0x1f + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI5-datalabel .LCFI4 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c new file mode 100644 index 0000000..9e406d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi.c @@ -0,0 +1,468 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + flags = (rtype->size & 0xfff) << SPARC_SIZEMASK_SHIFT; + flags |= SPARC_RET_STRUCT; + break; + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_COMPLEX: + rtt = rtype->elements[0]->type; + switch (rtt) + { + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_4; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_8; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT128; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = SP_V8_RET_CPLX16; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = SP_V8_RET_CPLX8; + break; + default: + abort(); + } + break; + default: + abort(); + } + cif->flags = flags; + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + int tt = ty->type; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Passed by reference. */ + z = 4; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + /* FALLTHRU */ + + default: + z = FFI_ALIGN(z, 4); + } + bytes += z; + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 4) + bytes = 6 * 4; + + /* The ABI always requires space for the struct return pointer. */ + bytes += 4; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 2 * 4); + + /* Include the call frame to prep_args. */ + bytes += 4*16 + 4*8; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_v8(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +int FFI_HIDDEN +ffi_prep_args_v8(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + + /* This could only really be done when we are returning a structure. + However, the space is reserved so we can do it unconditionally. */ + *argp++ = (unsigned long)rvalue; + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*4); +#endif + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + int tt = ty->type; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + *argp++ = (unsigned long)a; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + memcpy(argp, a, 8); + argp += 2; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *argp++ = *(unsigned *)a; + break; + + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + { + memcpy((char *)argp + 4 - z, a, z); + argp++; + } + else + { + memcpy(argp, a, z); + argp += z / 4; + } + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V8); + + /* If we've not got a return value, we need to create one if we've + got to pass the return value to the callee. Otherwise ignore it. */ + if (rvalue == NULL + && (cif->flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + bytes += FFI_ALIGN (cif->rtype->size, 8); + + ffi_call_v8(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + /* SPARC v8 requires 5 instructions for flush to be visible */ + asm volatile ("iflush %0; iflush %0+8; nop; nop; nop; nop; nop" + : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v8(void) FFI_HIDDEN; +extern void ffi_go_closure_v8(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long ctx = (unsigned long) closure; + unsigned long fn = (unsigned long) ffi_closure_v8; + + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + tramp[0] = 0x03000000 | fn >> 10; /* sethi %hi(fn), %g1 */ + tramp[1] = 0x05000000 | ctx >> 10; /* sethi %hi(ctx), %g2 */ + tramp[2] = 0x81c06000 | (fn & 0x3ff); /* jmp %g1+%lo(fn) */ + tramp[3] = 0x8410a000 | (ctx & 0x3ff);/* or %g2, %lo(ctx) */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v8; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v8(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *argp) +{ + ffi_type **arg_types; + void **avalue; + int i, nargs, flags; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. Also install it so we + can return the address in %o0. */ + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + void *new_rvalue = (void *)*argp; + *(void **)rvalue = new_rvalue; + rvalue = new_rvalue; + } + + /* Always skip the structure return address. */ + argp++; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++) + { + ffi_type *ty = arg_types[i]; + int tt = ty->type; + void *a = argp; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Straight copy of invisible reference. */ + a = (void *)*argp; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + if ((unsigned long)a & 7) + { + /* Align on a 8-byte boundary. */ + UINT64 *tmp = alloca(8); + *tmp = ((UINT64)argp[0] << 32) | argp[1]; + a = tmp; + } + argp++; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 2; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 3; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + a += 4 - z; + else if (z > 4) + argp++; + break; + + default: + abort(); + } + argp++; + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* !SPARC64 */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c new file mode 100644 index 0000000..9e04061 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffi64.c @@ -0,0 +1,608 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#ifdef SPARC64 + +/* Flatten the contents of a structure to the parts that are passed in + floating point registers. The return is a bit mask wherein bit N + set means bytes [4*n, 4*n+3] are passed in %fN. + + We encode both the (running) size (maximum 32) and mask (maxumum 255) + into one integer. The size is placed in the low byte, so that align + and addition work correctly. The mask is placed in the second byte. */ + +static int +ffi_struct_float_mask (ffi_type *outer_type, int size_mask) +{ + ffi_type **elts; + ffi_type *t; + + if (outer_type->type == FFI_TYPE_COMPLEX) + { + int m = 0, tt = outer_type->elements[0]->type; + size_t z = outer_type->size; + + if (tt == FFI_TYPE_FLOAT + || tt == FFI_TYPE_DOUBLE + || tt == FFI_TYPE_LONGDOUBLE) + m = (1 << (z / 4)) - 1; + return (m << 8) | z; + } + FFI_ASSERT (outer_type->type == FFI_TYPE_STRUCT); + + for (elts = outer_type->elements; (t = *elts) != NULL; elts++) + { + size_t z = t->size; + int o, m, tt; + + size_mask = FFI_ALIGN(size_mask, t->alignment); + switch (t->type) + { + case FFI_TYPE_STRUCT: + size_mask = ffi_struct_float_mask (t, size_mask); + continue; + case FFI_TYPE_COMPLEX: + tt = t->elements[0]->type; + if (tt != FFI_TYPE_FLOAT + && tt != FFI_TYPE_DOUBLE + && tt != FFI_TYPE_LONGDOUBLE) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + m = (1 << (z / 4)) - 1; /* compute mask for type */ + o = (size_mask >> 2) & 0x3f; /* extract word offset */ + size_mask |= m << (o + 8); /* insert mask into place */ + break; + } + size_mask += z; + } + + size_mask = FFI_ALIGN(size_mask, outer_type->alignment); + FFI_ASSERT ((size_mask & 0xff) == outer_type->size); + + return size_mask; +} + +/* Merge floating point data into integer data. If the structure is + entirely floating point, simply return a pointer to the fp data. */ + +static void * +ffi_struct_float_merge (int size_mask, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + return vi; + else if (mask == (1 << n) - 1) + return vf; + else + { + unsigned int *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + if ((mask >> i) & 1) + wi[i] = wf[i]; + + return vi; + } +} + +/* Similar, but place the data into VD in the end. */ + +void FFI_HIDDEN +ffi_struct_float_copy (int size_mask, void *vd, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + ; + else if (mask == (1 << n) - 1) + vi = vf; + else + { + unsigned int *wd = vd, *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + wd[i] = ((mask >> i) & 1 ? wf : wi)[i]; + return; + } + memcpy (vd, vi, size); +} + +/* Perform machine dependent cif processing */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes = 0; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_4; + break; + + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + if (rtype->size > 32) + { + flags = SPARC_RET_VOID | SPARC_FLAG_RET_IN_MEM; + bytes = 8; + } + else + { + int size_mask = ffi_struct_float_mask (rtype, 0); + int word_size = (size_mask >> 2) & 0x3f; + int all_mask = (1 << word_size) - 1; + int fp_mask = size_mask >> 8; + + flags = (size_mask << SPARC_SIZEMASK_SHIFT) | SPARC_RET_STRUCT; + + /* For special cases of all-int or all-fp, we can return + the value directly without popping through a struct copy. */ + if (fp_mask == 0) + { + if (rtype->alignment >= 8) + { + if (rtype->size == 8) + flags = SPARC_RET_INT64; + else if (rtype->size == 16) + flags = SPARC_RET_INT128; + } + } + else if (fp_mask == all_mask) + switch (word_size) + { + case 1: flags = SPARC_RET_F_1; break; + case 2: flags = SPARC_RET_F_2; break; + case 3: flags = SP_V9_RET_F_3; break; + case 4: flags = SPARC_RET_F_4; break; + /* 5 word structures skipped; handled via RET_STRUCT. */ + case 6: flags = SPARC_RET_F_6; break; + /* 7 word structures skipped; handled via RET_STRUCT. */ + case 8: flags = SPARC_RET_F_8; break; + } + } + break; + + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = SP_V9_RET_SINT32; + break; + case FFI_TYPE_UINT32: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + flags = SPARC_RET_INT64; + break; + + default: + abort(); + } + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + size_t a = ty->alignment; + + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + /* Large structs passed by reference. */ + if (z > 16) + { + a = z = 8; + break; + } + /* Small structs may be passed in integer or fp regs or both. */ + if (bytes >= 16*8) + break; + if ((ffi_struct_float_mask (ty, 0) & 0xff00) == 0) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + flags |= SPARC_FLAG_FP_ARGS; + break; + } + bytes = FFI_ALIGN(bytes, a); + bytes += FFI_ALIGN(z, 8); + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 8) + bytes = 6 * 8; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 16); + + /* Include the call frame to prep_args. */ + bytes += 8*16 + 8*8; + + cif->bytes = bytes; + cif->flags = flags; + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned nfixedargs, unsigned ntotalargs) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern void ffi_call_v9(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +int FFI_HIDDEN +ffi_prep_args_v9(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if (flags & SPARC_FLAG_RET_IN_MEM) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*8); +#endif + + if (flags & SPARC_FLAG_RET_IN_MEM) + *argp++ = (unsigned long)rvalue; + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + size_t z; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *argp++ = *(SINT32 *)a; + break; + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + *argp++ = *(UINT32 *)a; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + *argp++ = *(UINT64 *)a; + break; + + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + { + /* For structures larger than 16 bytes we pass reference. */ + *argp++ = (unsigned long)a; + break; + } + if (((unsigned long)argp & 15) && ty->alignment > 8) + argp++; + memcpy(argp, a, z); + argp += FFI_ALIGN(z, 8) / 8; + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V9); + + if (rvalue == NULL && (cif->flags & SPARC_FLAG_RET_IN_MEM)) + bytes += FFI_ALIGN (cif->rtype->size, 16); + + ffi_call_v9(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + asm volatile ("flush %0; flush %0+8" : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v9(void) FFI_HIDDEN; +extern void ffi_go_closure_v9(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn; + + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + /* Trampoline address is equal to the closure address. We take advantage + of that to reduce the trampoline size by 8 bytes. */ + fn = (unsigned long) ffi_closure_v9; + tramp[0] = 0x83414000; /* rd %pc, %g1 */ + tramp[1] = 0xca586010; /* ldx [%g1+16], %g5 */ + tramp[2] = 0x81c14000; /* jmp %g5 */ + tramp[3] = 0x01000000; /* nop */ + *((unsigned long *) &tramp[4]) = fn; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v9; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v9(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *gpr, unsigned long *fpr) +{ + ffi_type **arg_types; + void **avalue; + int i, argn, argx, nargs, flags, nfixedargs; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + nfixedargs = cif->nfixedargs; + + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. */ + if (flags & SPARC_FLAG_RET_IN_MEM) + { + rvalue = (void *) gpr[0]; + /* Skip the structure return address. */ + argn = 1; + } + else + argn = 0; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++, argn = argx) + { + int named = i < nfixedargs; + ffi_type *ty = arg_types[i]; + void *a = &gpr[argn]; + size_t z; + + argx = argn + 1; + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + a = *(void **)a; + else + { + argx = argn + FFI_ALIGN (z, 8) / 8; + if (named && argn < 16) + { + int size_mask = ffi_struct_float_mask (ty, 0); + int argn_mask = (0xffff00 >> argn) & 0xff00; + + /* Eliminate fp registers off the end. */ + size_mask = (size_mask & 0xff) | (size_mask & argn_mask); + a = ffi_struct_float_merge (size_mask, gpr+argn, fpr+argn); + } + } + break; + + case FFI_TYPE_LONGDOUBLE: + argn = FFI_ALIGN (argn, 2); + a = (named && argn < 16 ? fpr : gpr) + argn; + argx = argn + 2; + break; + case FFI_TYPE_DOUBLE: + if (named && argn < 16) + a = fpr + argn; + break; + case FFI_TYPE_FLOAT: + if (named && argn < 16) + a = fpr + argn; + a += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + a += 4; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 6; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 7; + break; + + default: + abort(); + } + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* SPARC64 */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h new file mode 100644 index 0000000..2f4cd9a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/ffitarget.h @@ -0,0 +1,81 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SPARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined(__arch64__) || defined(__sparcv9) +#ifndef SPARC64 +#define SPARC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, +#ifdef SPARC64 + FFI_V9, + FFI_DEFAULT_ABI = FFI_V9, +#else + FFI_V8, + FFI_DEFAULT_ABI = FFI_V8, +#endif + FFI_LAST_ABI +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION 1 +#define FFI_TARGET_HAS_COMPLEX_TYPE 1 + +#ifdef SPARC64 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned int nfixedargs +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef SPARC64 +#define FFI_TRAMPOLINE_SIZE 24 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h new file mode 100644 index 0000000..0a66472 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/internal.h @@ -0,0 +1,26 @@ +#define SPARC_RET_VOID 0 +#define SPARC_RET_STRUCT 1 +#define SPARC_RET_UINT8 2 +#define SPARC_RET_SINT8 3 +#define SPARC_RET_UINT16 4 +#define SPARC_RET_SINT16 5 +#define SPARC_RET_UINT32 6 +#define SP_V9_RET_SINT32 7 /* v9 only */ +#define SP_V8_RET_CPLX16 7 /* v8 only */ +#define SPARC_RET_INT64 8 +#define SPARC_RET_INT128 9 + +/* Note that F_7 is missing, and is handled by SPARC_RET_STRUCT. */ +#define SPARC_RET_F_8 10 +#define SPARC_RET_F_6 11 +#define SPARC_RET_F_4 12 +#define SPARC_RET_F_2 13 +#define SP_V9_RET_F_3 14 /* v9 only */ +#define SP_V8_RET_CPLX8 14 /* v8 only */ +#define SPARC_RET_F_1 15 + +#define SPARC_FLAG_RET_MASK 15 +#define SPARC_FLAG_RET_IN_MEM 32 +#define SPARC_FLAG_FP_ARGS 64 + +#define SPARC_SIZEMASK_SHIFT 8 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S new file mode 100644 index 0000000..a2e4908 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v8.S @@ -0,0 +1,443 @@ +/* ----------------------------------------------------------------------- + v8.S - Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + + .text + +#ifndef __GNUC__ + .align 8 + .globl C(ffi_flush_icache) + .type C(ffi_flush_icache),#function + FFI_HIDDEN(C(ffi_flush_icache)) + +C(ffi_flush_icache): +1: iflush %o0 + iflush %o+8 + nop + nop + nop + nop + nop + retl + nop + .size C(ffi_flush_icache), . - C(ffi_flush_icache) +#endif + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + + .align 8 + .globl C(ffi_call_v8) + .type C(ffi_call_v8),#function + FFI_HIDDEN(C(ffi_call_v8)) + +C(ffi_call_v8): +.LUW0: + ! Allocate a stack frame sized by ffi_call. + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, 64+32, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v8) + mov %i3, %o3 ! copy avalue + + add %sp, 32, %sp ! deallocate prep frame + and %o0, SPARC_FLAG_RET_MASK, %l0 ! save return type + srl %o0, SPARC_SIZEMASK_SHIFT, %l1 ! save return size + ld [%sp+64+4], %o0 ! load all argument registers + ld [%sp+64+8], %o1 + ld [%sp+64+12], %o2 + ld [%sp+64+16], %o3 + cmp %l0, SPARC_RET_STRUCT ! struct return needs an unimp 4 + ld [%sp+64+20], %o4 + be 8f + ld [%sp+64+24], %o5 + + ! Call foreign function + call %i1 + mov %i5, %g2 ! load static chain + +0: call 1f ! load pc in %o7 + sll %l0, 4, %l0 +1: add %o7, %l0, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + unimp +E(SPARC_RET_UINT8) + and %o0, 0xff, %o0 + st %o0, [%i2] + ret + restore +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + b 7f + sra %o0, 24, %o0 +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + b 7f + srl %o0, 16, %o0 +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + b 7f + sra %o0, 16, %o0 +E(SPARC_RET_UINT32) +7: st %o0, [%i2] + ret + restore +E(SP_V8_RET_CPLX16) + sth %o0, [%i2+2] + b 9f + srl %o0, 16, %o0 +E(SPARC_RET_INT64) + st %o0, [%i2] + st %o1, [%i2+4] + ret + restore +E(SPARC_RET_INT128) + std %o0, [%i2] + std %o2, [%i2+8] + ret + restore +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + st %f3, [%i2+3*4] + nop + st %f2, [%i2+2*4] + nop +E(SPARC_RET_F_2) + st %f1, [%i2+4] + st %f0, [%i2] + ret + restore +E(SP_V8_RET_CPLX8) + stb %o0, [%i2+1] + b 0f + srl %o0, 8, %o0 +E(SPARC_RET_F_1) + st %f0, [%i2] + ret + restore + + .align 8 +9: sth %o0, [%i2] + ret + restore + .align 8 +0: stb %o0, [%i2] + ret + restore + + ! Struct returning functions expect and skip the unimp here. + ! To make it worse, conforming callees examine the unimp and + ! make sure the low 12 bits of the unimp match the size of + ! the struct being returned. + .align 8 +8: call 1f ! load pc in %o7 + sll %l1, 2, %l0 ! size * 4 +1: sll %l1, 4, %l1 ! size * 16 + add %l0, %l1, %l0 ! size * 20 + add %o7, %l0, %o7 ! o7 = 8b + size*20 + jmp %o7+(2f-8b) + mov %i5, %g2 ! load static chain +2: + +/* The Sun assembler doesn't understand .rept 0x1000. */ +#define rept1 \ + call %i1; \ + nop; \ + unimp (. - 2b) / 20; \ + ret; \ + restore + +#define rept16 \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1 + +#define rept256 \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16 + + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + +.LUW2: + .size C(ffi_call_v8),. - C(ffi_call_v8) + + +/* 16*4 register window + 1*4 struct return + 6*4 args backing store + + 8*4 return storage + 1*4 alignment. */ +#define STACKFRAME (16*4 + 4 + 6*4 + 8*4 + 4) + +/* ffi_closure_v8(...) + + Receives the closure argument in %g2. */ + +#ifdef HAVE_AS_REGISTER_PSEUDO_OP + .register %g2, #scratch +#endif + + .align 8 + .globl C(ffi_go_closure_v8) + .type C(ffi_go_closure_v8),#function + FFI_HIDDEN(C(ffi_go_closure_v8)) + +C(ffi_go_closure_v8): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ld [%g2+4], %o0 ! load cif + ld [%g2+8], %o1 ! load fun + b 0f + mov %g2, %o2 ! load user_data +.LUW5: + .size C(ffi_go_closure_v8), . - C(ffi_go_closure_v8) + + .align 8 + .globl C(ffi_closure_v8) + .type C(ffi_closure_v8),#function + FFI_HIDDEN(C(ffi_closure_v8)) + +C(ffi_closure_v8): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ld [%g2+FFI_TRAMPOLINE_SIZE], %o0 ! load cif + ld [%g2+FFI_TRAMPOLINE_SIZE+4], %o1 ! load fun + ld [%g2+FFI_TRAMPOLINE_SIZE+8], %o2 ! load user_data +0: + ! Store all of the potential argument registers in va_list format. + st %i0, [%fp+68+0] + st %i1, [%fp+68+4] + st %i2, [%fp+68+8] + st %i3, [%fp+68+12] + st %i4, [%fp+68+16] + st %i5, [%fp+68+20] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, -8*4, %o3 + call ffi_closure_sparc_inner_v8 + add %fp, 64, %o4 + +0: call 1f + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o0 = o0 * 16 + add %o7, %o0, %o7 ! o7 = 0b + o0*16 + jmp %o7+(2f-0b) + add %fp, -8*4, %i2 + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + ld [%i2], %i0 + jmp %i7+12 + restore +E(SPARC_RET_UINT8) + ldub [%i2+3], %i0 + ret + restore +E(SPARC_RET_SINT8) + ldsb [%i2+3], %i0 + ret + restore +E(SPARC_RET_UINT16) + lduh [%i2+2], %i0 + ret + restore +E(SPARC_RET_SINT16) + ldsh [%i2+2], %i0 + ret + restore +E(SPARC_RET_UINT32) + ld [%i2], %i0 + ret + restore +E(SP_V8_RET_CPLX16) + ld [%i2], %i0 + ret + restore +E(SPARC_RET_INT64) + ldd [%i2], %i0 + ret + restore +E(SPARC_RET_INT128) + ldd [%i2], %i0 + ldd [%i2+8], %i2 + ret + restore +E(SPARC_RET_F_8) + ld [%i2+7*4], %f7 + nop + ld [%i2+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [%i2+5*4], %f5 + nop + ld [%i2+4*4], %f4 + nop +E(SPARC_RET_F_4) + ld [%i2+3*4], %f3 + nop + ld [%i2+2*4], %f2 + nop +E(SPARC_RET_F_2) + ldd [%i2], %f0 + ret + restore +E(SP_V8_RET_CPLX8) + lduh [%i2], %i0 + ret + restore +E(SPARC_RET_F_1) + ld [%i2], %f0 + ret + restore + +.LUW8: + .size C(ffi_closure_v8), . - C(ffi_closure_v8) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_ADDR(X) %r_disp32(X) +#else +# define FDE_ADDR(X) X +#endif + + .align 4 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x7c ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0 ! DW_CFA_def_cfa, %o6, offset 0 + .align 4 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW0) ! Initial location + .long .LUW2 - .LUW0 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW3) ! Initial location + .long .LUW5 - .LUW3 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW6) ! Initial location + .long .LUW8 - .LUW6 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE3: + +#endif /* !SPARC64 */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S new file mode 100644 index 0000000..55f8f43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/sparc/v9.S @@ -0,0 +1,440 @@ +/* ----------------------------------------------------------------------- + v9.S - Copyright (c) 2000, 2003, 2004, 2008 Red Hat, Inc. + + SPARC 64-bit Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifdef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + +#define STACK_BIAS 2047 + + .text + .align 8 + .globl C(ffi_call_v9) + .type C(ffi_call_v9),#function + FFI_HIDDEN(C(ffi_call_v9)) + +C(ffi_call_v9): +.LUW0: + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, STACK_BIAS+128+48, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v9) + mov %i3, %o3 ! copy avalue + + andcc %o0, SPARC_FLAG_FP_ARGS, %g0 ! need fp regs? + add %sp, 48, %sp ! deallocate prep frame + be,pt %xcc, 1f + mov %o0, %l0 ! save flags + + ldd [%sp+STACK_BIAS+128], %f0 ! load all fp arg regs + ldd [%sp+STACK_BIAS+128+8], %f2 + ldd [%sp+STACK_BIAS+128+16], %f4 + ldd [%sp+STACK_BIAS+128+24], %f6 + ldd [%sp+STACK_BIAS+128+32], %f8 + ldd [%sp+STACK_BIAS+128+40], %f10 + ldd [%sp+STACK_BIAS+128+48], %f12 + ldd [%sp+STACK_BIAS+128+56], %f14 + ldd [%sp+STACK_BIAS+128+64], %f16 + ldd [%sp+STACK_BIAS+128+72], %f18 + ldd [%sp+STACK_BIAS+128+80], %f20 + ldd [%sp+STACK_BIAS+128+88], %f22 + ldd [%sp+STACK_BIAS+128+96], %f24 + ldd [%sp+STACK_BIAS+128+104], %f26 + ldd [%sp+STACK_BIAS+128+112], %f28 + ldd [%sp+STACK_BIAS+128+120], %f30 + +1: ldx [%sp+STACK_BIAS+128], %o0 ! load all int arg regs + ldx [%sp+STACK_BIAS+128+8], %o1 + ldx [%sp+STACK_BIAS+128+16], %o2 + ldx [%sp+STACK_BIAS+128+24], %o3 + ldx [%sp+STACK_BIAS+128+32], %o4 + ldx [%sp+STACK_BIAS+128+40], %o5 + call %i1 + mov %i5, %g5 ! load static chain + +0: call 1f ! load pc in %o7 + and %l0, SPARC_FLAG_RET_MASK, %l1 +1: sll %l1, 4, %l1 + add %o7, %l1, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + add %sp, STACK_BIAS-64+128+48, %l2 + sub %sp, 64, %sp + b 8f + stx %o0, [%l2] +E(SPARC_RET_UINT8) + and %o0, 0xff, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + sra %o0, 24, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + srl %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + sra %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT32) + srl %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SP_V9_RET_SINT32) + sra %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_INT64) + stx %o0, [%i2] + return %i7+8 + nop +E(SPARC_RET_INT128) + stx %o0, [%i2] + stx %o1, [%i2+8] + return %i7+8 + nop +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + std %f2, [%i2+2*4] + return %i7+8 + std %f0, [%o2] +E(SPARC_RET_F_2) + return %i7+8 + std %f0, [%o2] +E(SP_V9_RET_F_3) + st %f2, [%i2+2*4] + nop + st %f1, [%i2+1*4] + nop +E(SPARC_RET_F_1) + return %i7+8 + st %f0, [%o2] + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: stx %o1, [%l2+8] + stx %o2, [%l2+16] + stx %o3, [%l2+24] + std %f0, [%l2+32] + std %f2, [%l2+40] + std %f4, [%l2+48] + std %f6, [%l2+56] + + ! Copy the structure into place. + srl %l0, SPARC_SIZEMASK_SHIFT, %o0 ! load size_mask + mov %i2, %o1 ! load dst + mov %l2, %o2 ! load src_gp + call C(ffi_struct_float_copy) + add %l2, 32, %o3 ! load src_fp + + return %i7+8 + nop + +.LUW2: + .size C(ffi_call_v9), . - C(ffi_call_v9) + + +#undef STACKFRAME +#define STACKFRAME 336 /* 16*8 register window + + 6*8 args backing store + + 20*8 locals */ +#define FP %fp+STACK_BIAS + +/* ffi_closure_v9(...) + + Receives the closure argument in %g1. */ + + .align 8 + .globl C(ffi_go_closure_v9) + .type C(ffi_go_closure_v9),#function + FFI_HIDDEN(C(ffi_go_closure_v9)) + +C(ffi_go_closure_v9): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ldx [%g5+8], %o0 + ldx [%g5+16], %o1 + b 0f + mov %g5, %o2 + +.LUW5: + .size C(ffi_go_closure_v9), . - C(ffi_go_closure_v9) + + .align 8 + .globl C(ffi_closure_v9) + .type C(ffi_closure_v9),#function + FFI_HIDDEN(C(ffi_closure_v9)) + +C(ffi_closure_v9): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ldx [%g1+FFI_TRAMPOLINE_SIZE], %o0 + ldx [%g1+FFI_TRAMPOLINE_SIZE+8], %o1 + ldx [%g1+FFI_TRAMPOLINE_SIZE+16], %o2 +0: + ! Store all of the potential argument registers in va_list format. + stx %i0, [FP+128+0] + stx %i1, [FP+128+8] + stx %i2, [FP+128+16] + stx %i3, [FP+128+24] + stx %i4, [FP+128+32] + stx %i5, [FP+128+40] + + ! Store possible floating point argument registers too. + std %f0, [FP-128] + std %f2, [FP-120] + std %f4, [FP-112] + std %f6, [FP-104] + std %f8, [FP-96] + std %f10, [FP-88] + std %f12, [FP-80] + std %f14, [FP-72] + std %f16, [FP-64] + std %f18, [FP-56] + std %f20, [FP-48] + std %f22, [FP-40] + std %f24, [FP-32] + std %f26, [FP-24] + std %f28, [FP-16] + std %f30, [FP-8] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, STACK_BIAS-160, %o3 + add %fp, STACK_BIAS+128, %o4 + call C(ffi_closure_sparc_inner_v9) + add %fp, STACK_BIAS-128, %o5 + +0: call 1f ! load pc in %o7 + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o2 = i2 * 16 + add %o7, %o0, %o7 ! o7 = 0b + i2*16 + jmp %o7+(2f-0b) + nop + + ! Note that we cannot load the data in the delay slot of + ! the return insn because the data is in the stack frame + ! that is deallocated by the return. + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + ldx [FP-160], %i0 + ldd [FP-160], %f0 + b 8f + ldx [FP-152], %i1 +E(SPARC_RET_UINT8) + ldub [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT8) + ldsb [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT16) + lduh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT16) + ldsh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT32) + lduw [FP-160+4], %i0 + return %i7+8 + nop +E(SP_V9_RET_SINT32) + ldsw [FP-160+4], %i0 + return %i7+8 + nop +E(SPARC_RET_INT64) + ldx [FP-160], %i0 + return %i7+8 + nop +E(SPARC_RET_INT128) + ldx [FP-160], %i0 + ldx [FP-160+8], %i1 + return %i7+8 + nop +E(SPARC_RET_F_8) + ld [FP-160+7*4], %f7 + nop + ld [FP-160+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [FP-160+5*4], %f5 + nop + ld [FP-160+4*4], %f4 + nop +E(SPARC_RET_F_4) + ldd [FP-160], %f0 + ldd [FP-160+8], %f2 + return %i7+8 + nop +E(SPARC_RET_F_2) + ldd [FP-160], %f0 + return %i7+8 + nop +E(SP_V9_RET_F_3) + ld [FP-160+2*4], %f2 + nop + ld [FP-160+1*4], %f1 + nop +E(SPARC_RET_F_1) + ld [FP-160], %f0 + return %i7+8 + nop + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: ldd [FP-152], %f2 + ldx [FP-144], %i2 + ldd [FP-144], %f4 + ldx [FP-136], %i3 + ldd [FP-136], %f6 + return %i7+8 + nop + +.LUW8: + .size C(ffi_closure_v9), . - C(ffi_closure_v9) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_RANGE(B, E) .long %r_disp32(B), E - B +#else +# define FDE_RANGE(B, E) .align 8; .xword B, E - B +#endif + + .align 8 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x78 ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0xff, 0xf ! DW_CFA_def_cfa, %o6, offset 0x7ff + .align 8 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW0, .LUW2) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW3, .LUW5) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW6, .LUW8) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE3: + +#endif /* SPARC64 */ +#ifdef __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c new file mode 100644 index 0000000..3a94469 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffi.c @@ -0,0 +1,355 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Tilera Corp. + + TILE Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +/* Performs a raw function call with the given NUM_ARG_REGS register arguments + and the specified additional stack arguments (if any). */ +extern void ffi_call_tile(ffi_sarg reg_args[NUM_ARG_REGS], + const ffi_sarg *stack_args, + size_t stack_args_bytes, + void (*fnaddr)(void)) + FFI_HIDDEN; + +/* This handles the raw call from the closure stub, cleaning up the + parameters and delegating to ffi_closure_tile_inner. */ +extern void ffi_closure_tile(void) FFI_HIDDEN; + + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* We always allocate room for all registers. Even if we don't + use them as parameters, they get returned in the same array + as struct return values so we need to make room. */ + if (cif->bytes < NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->bytes = NUM_ARG_REGS * FFI_SIZEOF_ARG; + + if (cif->rtype->size > NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->flags = FFI_TYPE_STRUCT; + else + cif->flags = FFI_TYPE_INT; + + /* Nothing to do. */ + return FFI_OK; +} + + +static long +assign_to_ffi_arg(ffi_sarg *out, void *in, const ffi_type *type, + int write_to_reg) +{ + switch (type->type) + { + case FFI_TYPE_SINT8: + *out = *(SINT8 *)in; + return 1; + + case FFI_TYPE_UINT8: + *out = *(UINT8 *)in; + return 1; + + case FFI_TYPE_SINT16: + *out = *(SINT16 *)in; + return 1; + + case FFI_TYPE_UINT16: + *out = *(UINT16 *)in; + return 1; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LP64__ + case FFI_TYPE_POINTER: +#endif + /* Note that even unsigned 32-bit quantities are sign extended + on tilegx when stored in a register. */ + *out = *(SINT32 *)in; + return 1; + + case FFI_TYPE_FLOAT: +#ifdef __tilegx__ + if (write_to_reg) + { + /* Properly sign extend the value. */ + union { float f; SINT32 s32; } val; + val.f = *(float *)in; + *out = val.s32; + } + else +#endif + { + *(float *)out = *(float *)in; + } + return 1; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: +#ifdef __LP64__ + case FFI_TYPE_POINTER: +#endif + *(UINT64 *)out = *(UINT64 *)in; + return sizeof(UINT64) / FFI_SIZEOF_ARG; + + case FFI_TYPE_STRUCT: + memcpy(out, in, type->size); + return (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + case FFI_TYPE_VOID: + /* Must be a return type. Nothing to do. */ + return 0; + + default: + FFI_ASSERT(0); + return -1; + } +} + + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_sarg * const arg_mem = alloca(cif->bytes); + ffi_sarg * const reg_args = arg_mem; + ffi_sarg * const stack_args = ®_args[NUM_ARG_REGS]; + ffi_sarg *argp = arg_mem; + ffi_type ** const arg_types = cif->arg_types; + const long num_args = cif->nargs; + long i; + + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Pass a hidden pointer to the return value. We make sure there + is scratch space for the callee to store the return value even if + our caller doesn't care about it. */ + *argp++ = (intptr_t)(rvalue ? rvalue : alloca(cif->rtype->size)); + + /* No more work needed to return anything. */ + rvalue = NULL; + } + + for (i = 0; i < num_args; i++) + { + ffi_type *type = arg_types[i]; + void * const arg_in = avalue[i]; + ptrdiff_t arg_word = argp - arg_mem; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (type->type == FFI_TYPE_STRUCT) + { + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + if (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS) + { + /* Args are not allowed to span registers and the stack. */ + argp = stack_args; + } + + memcpy(argp, arg_in, type->size); + argp += arg_size_in_words; + } + else + { + argp += assign_to_ffi_arg(argp, arg_in, arg_types[i], 1); + } + } + + /* Actually do the call. */ + ffi_call_tile(reg_args, stack_args, + cif->bytes - (NUM_ARG_REGS * FFI_SIZEOF_ARG), fn); + + if (rvalue != NULL) + assign_to_ffi_arg(rvalue, reg_args, cif->rtype, 0); +} + + +/* Template code for closure. */ +extern const UINT64 ffi_template_tramp_tile[] FFI_HIDDEN; + + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ +#ifdef __tilegx__ + /* TILE-Gx */ + SINT64 c; + SINT64 h; + int s; + UINT64 *out; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + + c = (intptr_t)closure; + h = (intptr_t)ffi_closure_tile; + s = 0; + + /* Find the smallest shift count that doesn't lose information + (i.e. no need to explicitly insert high bits of the address that + are just the sign extension of the low bits). */ + while ((c >> s) != (SINT16)(c >> s) || (h >> s) != (SINT16)(h >> s)) + s += 16; + +#define OPS(a, b, shift) \ + (create_Imm16_X0((a) >> (shift)) | create_Imm16_X1((b) >> (shift))) + + /* Emit the moveli. */ + *out++ = ffi_template_tramp_tile[0] | OPS(c, h, s); + for (s -= 16; s >= 0; s -= 16) + *out++ = ffi_template_tramp_tile[1] | OPS(c, h, s); + +#undef OPS + + *out++ = ffi_template_tramp_tile[2]; + +#else + /* TILEPro */ + UINT64 *out; + intptr_t delta; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + delta = (intptr_t)ffi_closure_tile - (intptr_t)codeloc; + + *out++ = ffi_template_tramp_tile[0] | create_JOffLong_X1(delta >> 3); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + invalidate_icache(closure->tramp, (char *)out - closure->tramp, + getpagesize()); + + return FFI_OK; +} + + +/* This is called by the assembly wrapper for closures. This does + all of the work. On entry reg_args[0] holds the values the registers + had when the closure was invoked. On return reg_args[1] holds the register + values to be returned to the caller (many of which may be garbage). */ +void FFI_HIDDEN +ffi_closure_tile_inner(ffi_closure *closure, + ffi_sarg reg_args[2][NUM_ARG_REGS], + ffi_sarg *stack_args) +{ + ffi_cif * const cif = closure->cif; + void ** const avalue = alloca(cif->nargs * sizeof(void *)); + void *rvalue; + ffi_type ** const arg_types = cif->arg_types; + ffi_sarg * const reg_args_in = reg_args[0]; + ffi_sarg * const reg_args_out = reg_args[1]; + ffi_sarg * argp; + long i, arg_word, nargs = cif->nargs; + /* Use a union to guarantee proper alignment for double. */ + union { ffi_sarg arg[NUM_ARG_REGS]; double d; UINT64 u64; } closure_ret; + + /* Start out reading register arguments. */ + argp = reg_args_in; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Return by reference via hidden pointer. */ + rvalue = (void *)(intptr_t)*argp++; + arg_word = 1; + } + else + { + /* Return the value in registers. */ + rvalue = &closure_ret; + arg_word = 0; + } + + /* Grab the addresses of the arguments. */ + for (i = 0; i < nargs; i++) + { + ffi_type * const type = arg_types[i]; + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (arg_word == NUM_ARG_REGS || + (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS)) + { + /* Switch to reading arguments from the stack. */ + argp = stack_args; + arg_word = NUM_ARG_REGS; + } + + avalue[i] = argp; + argp += arg_size_in_words; + arg_word += arg_size_in_words; + } + + /* Invoke the closure. */ + closure->fun(cif, rvalue, avalue, closure->user_data); + + if (cif->flags != FFI_TYPE_STRUCT) + { + /* Canonicalize for register representation. */ + assign_to_ffi_arg(reg_args_out, &closure_ret, cif->rtype, 1); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h new file mode 100644 index 0000000..679fb5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/ffitarget.h @@ -0,0 +1,65 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Tilera Corp. + Target configuration macros for TILE. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM + +#include + +typedef uint_reg_t ffi_arg; +typedef int_reg_t ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ +#define FFI_CLOSURES 1 + +#ifdef __tilegx__ +/* We always pass 8-byte values, even in -m32 mode. */ +# define FFI_SIZEOF_ARG 8 +# ifdef __LP64__ +# define FFI_TRAMPOLINE_SIZE (8 * 5) /* 5 bundles */ +# else +# define FFI_TRAMPOLINE_SIZE (8 * 3) /* 3 bundles */ +# endif +#else +# define FFI_SIZEOF_ARG 4 +# define FFI_TRAMPOLINE_SIZE 8 /* 1 bundle */ +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S new file mode 100644 index 0000000..d1f82cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/tile/tile.S @@ -0,0 +1,360 @@ +/* ----------------------------------------------------------------------- + tile.S - Copyright (c) 2011 Tilera Corp. + + Tilera TILEPro and TILE-Gx Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Number of bytes in a register. */ +#define REG_SIZE FFI_SIZEOF_ARG + +/* Number of bytes in stack linkage area for backtracing. + + A note about the ABI: on entry to a procedure, sp points to a stack + slot where it must spill the return address if it's not a leaf. + REG_SIZE bytes beyond that is a slot owned by the caller which + contains the sp value that the caller had when it was originally + entered (i.e. the caller's frame pointer). */ +#define LINKAGE_SIZE (2 * REG_SIZE) + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +#ifdef __tilegx__ +#define SW st +#define LW ld +#define BGZT bgtzt +#else +#define SW sw +#define LW lw +#define BGZT bgzt +#endif + + +/* void ffi_call_tile (int_reg_t reg_args[NUM_ARG_REGS], + const int_reg_t *stack_args, + unsigned long stack_args_bytes, + void (*fnaddr)(void)); + + On entry, REG_ARGS contain the outgoing register values, + and STACK_ARGS contains STACK_ARG_BYTES of additional values + to be passed on the stack. If STACK_ARG_BYTES is zero, then + STACK_ARGS is ignored. + + When the invoked function returns, the values of r0-r9 are + blindly stored back into REG_ARGS for the caller to examine. */ + + .section .text.ffi_call_tile, "ax", @progbits + .align 8 + .globl ffi_call_tile + FFI_HIDDEN(ffi_call_tile) +ffi_call_tile: + +/* Incoming arguments. */ +#define REG_ARGS r0 +#define INCOMING_STACK_ARGS r1 +#define STACK_ARG_BYTES r2 +#define ORIG_FNADDR r3 + +/* Temporary values. */ +#define FRAME_SIZE r10 +#define TMP r11 +#define TMP2 r12 +#define OUTGOING_STACK_ARGS r13 +#define REG_ADDR_PTR r14 +#define RETURN_REG_ADDR r15 +#define FNADDR r16 + + .cfi_startproc + { + /* Save return address. */ + SW sp, lr + .cfi_offset lr, 0 + /* Prepare to spill incoming r52. */ + addi TMP, sp, -REG_SIZE + /* Increase frame size to have room to spill r52 and REG_ARGS. + The +7 is to round up mod 8. */ + addi FRAME_SIZE, STACK_ARG_BYTES, \ + REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7 + } + { + /* Round stack frame size to a multiple of 8 to satisfy ABI. */ + andi FRAME_SIZE, FRAME_SIZE, -8 + /* Compute where to spill REG_ARGS value. */ + addi TMP2, sp, -(REG_SIZE * 2) + } + { + /* Spill incoming r52. */ + SW TMP, r52 + .cfi_offset r52, -REG_SIZE + /* Set up our frame pointer. */ + move r52, sp + .cfi_def_cfa_register r52 + /* Push stack frame. */ + sub sp, sp, FRAME_SIZE + } + { + /* Prepare to set up stack linkage. */ + addi TMP, sp, REG_SIZE + /* Prepare to memcpy stack args. */ + addi OUTGOING_STACK_ARGS, sp, LINKAGE_SIZE + /* Save REG_ARGS which we will need after we call the subroutine. */ + SW TMP2, REG_ARGS + } + { + /* Set up linkage info to hold incoming stack pointer. */ + SW TMP, r52 + } + { + /* Skip stack args memcpy if we don't have any stack args (common). */ + blezt STACK_ARG_BYTES, .Ldone_stack_args_memcpy + } + +.Lmemcpy_stack_args: + { + /* Load incoming argument from stack_args. */ + LW TMP, INCOMING_STACK_ARGS + addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE + } + { + /* Store stack argument into outgoing stack argument area. */ + SW OUTGOING_STACK_ARGS, TMP + addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE + addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE + } + { + BGZT STACK_ARG_BYTES, .Lmemcpy_stack_args + } +.Ldone_stack_args_memcpy: + + { + /* Copy aside ORIG_FNADDR so we can overwrite its register. */ + move FNADDR, ORIG_FNADDR + /* Prepare to load argument registers. */ + addi REG_ADDR_PTR, r0, REG_SIZE + /* Load outgoing r0. */ + LW r0, r0 + } + + /* Load up argument registers from the REG_ARGS array. */ +#define LOAD_REG(REG, PTR) \ + { \ + LW REG, PTR ; \ + addi PTR, PTR, REG_SIZE \ + } + + LOAD_REG(r1, REG_ADDR_PTR) + LOAD_REG(r2, REG_ADDR_PTR) + LOAD_REG(r3, REG_ADDR_PTR) + LOAD_REG(r4, REG_ADDR_PTR) + LOAD_REG(r5, REG_ADDR_PTR) + LOAD_REG(r6, REG_ADDR_PTR) + LOAD_REG(r7, REG_ADDR_PTR) + LOAD_REG(r8, REG_ADDR_PTR) + LOAD_REG(r9, REG_ADDR_PTR) + + { + /* Call the subroutine. */ + jalr FNADDR + } + + { + /* Restore original lr. */ + LW lr, r52 + /* Prepare to recover ARGS, which we spilled earlier. */ + addi TMP, r52, -(2 * REG_SIZE) + } + { + /* Restore ARGS, so we can fill it in with the return regs r0-r9. */ + LW RETURN_REG_ADDR, TMP + /* Prepare to restore original r52. */ + addi TMP, r52, -REG_SIZE + } + + { + /* Pop stack frame. */ + move sp, r52 + /* Restore original r52. */ + LW r52, TMP + } + +#define STORE_REG(REG, PTR) \ + { \ + SW PTR, REG ; \ + addi PTR, PTR, REG_SIZE \ + } + + /* Return all register values by reference. */ + STORE_REG(r0, RETURN_REG_ADDR) + STORE_REG(r1, RETURN_REG_ADDR) + STORE_REG(r2, RETURN_REG_ADDR) + STORE_REG(r3, RETURN_REG_ADDR) + STORE_REG(r4, RETURN_REG_ADDR) + STORE_REG(r5, RETURN_REG_ADDR) + STORE_REG(r6, RETURN_REG_ADDR) + STORE_REG(r7, RETURN_REG_ADDR) + STORE_REG(r8, RETURN_REG_ADDR) + STORE_REG(r9, RETURN_REG_ADDR) + + { + jrp lr + } + + .cfi_endproc + .size ffi_call_tile, .-ffi_call_tile + +/* ffi_closure_tile(...) + + On entry, lr points to the closure plus 8 bytes, and r10 + contains the actual return address. + + This function simply dumps all register parameters into a stack array + and passes the closure, the registers array, and the stack arguments + to C code that does all of the actual closure processing. */ + + .section .text.ffi_closure_tile, "ax", @progbits + .align 8 + .globl ffi_closure_tile + FFI_HIDDEN(ffi_closure_tile) + + .cfi_startproc +/* Room to spill all NUM_ARG_REGS incoming registers, plus frame linkage. */ +#define CLOSURE_FRAME_SIZE (((NUM_ARG_REGS * REG_SIZE * 2 + LINKAGE_SIZE) + 7) & -8) +ffi_closure_tile: + { +#ifdef __tilegx__ + st sp, lr + .cfi_offset lr, 0 +#else + /* Save return address (in r10 due to closure stub wrapper). */ + SW sp, r10 + .cfi_return_column r10 + .cfi_offset r10, 0 +#endif + /* Compute address for stack frame linkage. */ + addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + } + { + /* Save incoming stack pointer in linkage area. */ + SW r10, sp + .cfi_offset sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + /* Push a new stack frame. */ + addli sp, sp, -CLOSURE_FRAME_SIZE + .cfi_adjust_cfa_offset CLOSURE_FRAME_SIZE + } + + { + /* Create pointer to where to start spilling registers. */ + addi r10, sp, LINKAGE_SIZE + } + + /* Spill all the incoming registers. */ + STORE_REG(r0, r10) + STORE_REG(r1, r10) + STORE_REG(r2, r10) + STORE_REG(r3, r10) + STORE_REG(r4, r10) + STORE_REG(r5, r10) + STORE_REG(r6, r10) + STORE_REG(r7, r10) + STORE_REG(r8, r10) + { + /* Save r9. */ + SW r10, r9 +#ifdef __tilegx__ + /* Pointer to closure is passed in r11. */ + move r0, r11 +#else + /* Compute pointer to the closure object. Because the closure + starts with a "jal ffi_closure_tile", we can just take the + value of lr (a phony return address pointing into the closure) + and subtract 8. */ + addi r0, lr, -8 +#endif + /* Compute a pointer to the register arguments we just spilled. */ + addi r1, sp, LINKAGE_SIZE + } + { + /* Compute a pointer to the extra stack arguments (if any). */ + addli r2, sp, CLOSURE_FRAME_SIZE + LINKAGE_SIZE + /* Call C code to deal with all of the grotty details. */ + jal ffi_closure_tile_inner + } + { + addli r10, sp, CLOSURE_FRAME_SIZE + } + { + /* Restore the return address. */ + LW lr, r10 + /* Compute pointer to registers array. */ + addli r10, sp, LINKAGE_SIZE + (NUM_ARG_REGS * REG_SIZE) + } + /* Return all the register values, which C code may have set. */ + LOAD_REG(r0, r10) + LOAD_REG(r1, r10) + LOAD_REG(r2, r10) + LOAD_REG(r3, r10) + LOAD_REG(r4, r10) + LOAD_REG(r5, r10) + LOAD_REG(r6, r10) + LOAD_REG(r7, r10) + LOAD_REG(r8, r10) + LOAD_REG(r9, r10) + { + /* Pop the frame. */ + addli sp, sp, CLOSURE_FRAME_SIZE + jrp lr + } + + .cfi_endproc + .size ffi_closure_tile, . - ffi_closure_tile + + +/* What follows are code template instructions that get copied to the + closure trampoline by ffi_prep_closure_loc. The zeroed operands + get replaced by their proper values at runtime. */ + + .section .text.ffi_template_tramp_tile, "ax", @progbits + .align 8 + .globl ffi_template_tramp_tile + FFI_HIDDEN(ffi_template_tramp_tile) +ffi_template_tramp_tile: +#ifdef __tilegx__ + { + moveli r11, 0 /* backpatched to address of containing closure. */ + moveli r10, 0 /* backpatched to ffi_closure_tile. */ + } + /* Note: the following bundle gets generated multiple times + depending on the pointer value (esp. useful for -m32 mode). */ + { shl16insli r11, r11, 0 ; shl16insli r10, r10, 0 } + { info 2+8 /* for backtracer: -> pc in lr, frame size 0 */ ; jr r10 } +#else + /* 'jal .' yields a PC-relative offset of zero so we can OR in the + right offset at runtime. */ + { move r10, lr ; jal . /* ffi_closure_tile */ } +#endif + + .size ffi_template_tramp_tile, . - ffi_template_tramp_tile diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c new file mode 100644 index 0000000..9ec27f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/types.c @@ -0,0 +1,108 @@ +/* ----------------------------------------------------------------------- + types.c - Copyright (c) 1996, 1998 Red Hat, Inc. + + Predefined ffi_types needed by libffi. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* Hide the basic type definitions from the header file, so that we + can redefine them here as "const". */ +#define LIBFFI_HIDE_BASIC_TYPES + +#include +#include + +/* Type definitions */ + +#define FFI_TYPEDEF(name, type, id, maybe_const)\ +struct struct_align_##name { \ + char c; \ + type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_##name = { \ + sizeof(type), \ + offsetof(struct struct_align_##name, x), \ + id, NULL \ +} + +#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \ +static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffi_type_##name), NULL \ +}; \ +struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ +} + +/* Size and alignment are fake here. They must not be 0. */ +FFI_EXTERN const ffi_type ffi_type_void = { + 1, 1, FFI_TYPE_VOID, NULL +}; + +FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const); +FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const); +FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const); +FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const); +FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const); +FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const); +FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const); +FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const); + +FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + +FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); +FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ +#define FFI_LDBL_CONST const +#else +#define FFI_LDBL_CONST +#endif + +#ifdef __alpha__ +/* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +/* Validate the hard-coded number below. */ +# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL }; +#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST); +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_COMPLEX_TYPEDEF(float, float, const); +FFI_COMPLEX_TYPEDEF(double, double, const); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST); +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S new file mode 100644 index 0000000..01ca313 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/elfbsd.S @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * void * %r0 + * ffi_call_elfbsd(extended_cif *ecif, 4(%ap) + * unsigned bytes, 8(%ap) + * unsigned flags, 12(%ap) + * void *rvalue, 16(%ap) + * void (*fn)()); 20(%ap) + */ + .globl ffi_call_elfbsd + .type ffi_call_elfbsd,@function + .align 2 +ffi_call_elfbsd: + .word 0x00c # save R2 and R3 + + # Allocate stack space for the args + subl2 8(%ap), %sp + + # Call ffi_prep_args + pushl %sp + pushl 4(%ap) + calls $2, ffi_prep_args + + # Get function pointer + movl 20(%ap), %r1 + + # Build a CALLS frame + ashl $-2, 8(%ap), %r0 + pushl %r0 # argument stack usage + movl %sp, %r0 # future %ap + # saved registers + bbc $11, 0(%r1), 1f + pushl %r11 +1: bbc $10, 0(%r1), 1f + pushl %r10 +1: bbc $9, 0(%r1), 1f + pushl %r9 +1: bbc $8, 0(%r1), 1f + pushl %r8 +1: bbc $7, 0(%r1), 1f + pushl %r7 +1: bbc $6, 0(%r1), 1f + pushl %r6 +1: bbc $5, 0(%r1), 1f + pushl %r5 +1: bbc $4, 0(%r1), 1f + pushl %r4 +1: bbc $3, 0(%r1), 1f + pushl %r3 +1: bbc $2, 0(%r1), 1f + pushl %r2 +1: + pushal 9f + pushl %fp + pushl %ap + movl 16(%ap), %r3 # struct return address, if needed + movl %r0, %ap + movzwl 4(%fp), %r0 # previous PSW, without the saved registers mask + bisl2 $0x20000000, %r0 # calls frame + movzwl 0(%r1), %r2 + bicw2 $0xf003, %r2 # only keep R11-R2 + ashl $16, %r2, %r2 + bisl2 %r2, %r0 # saved register mask of the called function + pushl %r0 + pushl $0 + movl %sp, %fp + + # Invoke the function + pushal 2(%r1) # skip procedure entry mask + movl %r3, %r1 + bicpsw $0x000f + rsb + +9: + # Copy return value if necessary + tstl 16(%ap) + jeql 9f + movl 16(%ap), %r2 + + bbc $0, 12(%ap), 1f # CIF_FLAGS_CHAR + movb %r0, 0(%r2) + brb 9f +1: + bbc $1, 12(%ap), 1f # CIF_FLAGS_SHORT + movw %r0, 0(%r2) + brb 9f +1: + bbc $2, 12(%ap), 1f # CIF_FLAGS_INT + movl %r0, 0(%r2) + brb 9f +1: + bbc $3, 12(%ap), 1f # CIF_FLAGS_DINT + movq %r0, 0(%r2) + brb 9f +1: + movl %r1, %r0 # might have been a struct + #brb 9f + +9: + ret + +/* + * ffi_closure_elfbsd(void); + * invoked with %r0: ffi_closure *closure + */ + .globl ffi_closure_elfbsd + .type ffi_closure_elfbsd, @function + .align 2 +ffi_closure_elfbsd: + .word 0 + + # Allocate room on stack for return value + subl2 $8, %sp + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushal 4(%sp) # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + # Copy return value if necessary + bitb $1, %r0 # CIF_FLAGS_CHAR + beql 1f + movb 0(%sp), %r0 + brb 9f +1: + bitb $2, %r0 # CIF_FLAGS_SHORT + beql 1f + movw 0(%sp), %r0 + brb 9f +1: + bitb $4, %r0 # CIF_FLAGS_INT + beql 1f + movl 0(%sp), %r0 + brb 9f +1: + bitb $8, %r0 # CIF_FLAGS_DINT + beql 1f + movq 0(%sp), %r0 + #brb 9f +1: + +9: + ret + +/* + * ffi_closure_struct_elfbsd(void); + * invoked with %r0: ffi_closure *closure + * %r1: struct return address + */ + .globl ffi_closure_struct_elfbsd + .type ffi_closure_struct_elfbsd, @function + .align 2 +ffi_closure_struct_elfbsd: + .word 0 + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushl %r1 # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + ret diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c new file mode 100644 index 0000000..e52caec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffi.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + */ + +#include +#include + +#include +#include + +#define CIF_FLAGS_CHAR 1 /* for struct only */ +#define CIF_FLAGS_SHORT 2 /* for struct only */ +#define CIF_FLAGS_INT 4 +#define CIF_FLAGS_DINT 8 + +/* + * Foreign Function Interface API + */ + +void ffi_call_elfbsd (extended_cif *, unsigned, unsigned, void *, + void (*) ()); +void *ffi_prep_args (extended_cif *ecif, void *stack); + +void * +ffi_prep_args (extended_cif *ecif, void *stack) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + if (cif->rtype->size == sizeof (char)) + cif->flags = CIF_FLAGS_CHAR; + else if (cif->rtype->size == sizeof (short)) + cif->flags = CIF_FLAGS_SHORT; + else if (cif->rtype->size == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else if (cif->rtype->size == 2 * sizeof (int)) + cif->flags = CIF_FLAGS_DINT; + else + cif->flags = 0; + break; + + default: + if (cif->rtype->size <= sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = CIF_FLAGS_DINT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->flags == 0) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ELFBSD: + ffi_call_elfbsd (&ecif, cif->bytes, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +void ffi_closure_elfbsd (void); +void ffi_closure_struct_elfbsd (void); +unsigned int ffi_closure_elfbsd_inner (ffi_closure *, void *, char *); + +static void +ffi_prep_closure_elfbsd (ffi_cif *cif, void **avalue, char *stackp) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + *p_argv = stackp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + stackp += z; + } +} + +unsigned int +ffi_closure_elfbsd_inner (ffi_closure *closure, void *resp, char *stack) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_elfbsd (cif, arg_area, stack); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + char *tramp = (char *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_ELFBSD); + + /* entry mask */ + *(unsigned short *)(tramp + 0) = 0x0000; + /* movl #closure, r0 */ + tramp[2] = 0xd0; + tramp[3] = 0x8f; + *(unsigned int *)(tramp + 4) = (unsigned int) closure; + tramp[8] = 0x50; + + if (cif->rtype->type == FFI_TYPE_STRUCT + && !cif->flags) + fn = &ffi_closure_struct_elfbsd; + else + fn = &ffi_closure_elfbsd; + + /* jmpl #fn */ + tramp[9] = 0x17; + tramp[10] = 0xef; + *(unsigned int *)(tramp + 11) = (unsigned int)fn + 2 - + (unsigned int)tramp - 9 - 6; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h new file mode 100644 index 0000000..2fc9488 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/vax/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_ELFBSD, + FFI_DEFAULT_ABI = FFI_ELFBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 15 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h new file mode 100644 index 0000000..7551021 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/asmnames.h @@ -0,0 +1,30 @@ +#ifndef ASMNAMES_H +#define ASMNAMES_H + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef __APPLE__ +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#if defined(__ELF__) && defined(__PIC__) +# define PLT(X) X@PLT +#else +# define PLT(X) X +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +#endif /* ASMNAMES_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c new file mode 100644 index 0000000..346e784 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi.c @@ -0,0 +1,764 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2017 Anthony Green + Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__i386__) || defined(_M_IX86) +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 80-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#if defined(__GNUC__) && !defined(__declspec) +# define __declspec(x) __attribute__((x)) +#endif + +#if defined(_MSC_VER) && defined(_M_IX86) +/* Stack is not 16-byte aligned on Windows. */ +#define STACK_ALIGN(bytes) (bytes) +#else +#define STACK_ALIGN(bytes) FFI_ALIGN (bytes, 16) +#endif + +/* Perform machine dependent cif processing. */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int i, n, flags, cabi = cif->abi; + + switch (cabi) + { + case FFI_SYSV: + case FFI_STDCALL: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + case FFI_PASCAL: + case FFI_REGISTER: + break; + default: + return FFI_BAD_ABI; + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = X86_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = X86_RET_FLOAT; + break; + case FFI_TYPE_DOUBLE: + flags = X86_RET_DOUBLE; + break; + case FFI_TYPE_LONGDOUBLE: + flags = X86_RET_LDOUBLE; + break; + case FFI_TYPE_UINT8: + flags = X86_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = X86_RET_UINT16; + break; + case FFI_TYPE_SINT8: + flags = X86_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = X86_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = X86_RET_INT64; + break; + case FFI_TYPE_STRUCT: +#ifndef X86 + /* ??? This should be a different ABI rather than an ifdef. */ + if (cif->rtype->size == 1) + flags = X86_RET_STRUCT_1B; + else if (cif->rtype->size == 2) + flags = X86_RET_STRUCT_2B; + else if (cif->rtype->size == 4) + flags = X86_RET_INT32; + else if (cif->rtype->size == 8) + flags = X86_RET_INT64; + else +#endif + { + do_struct: + switch (cabi) + { + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_STDCALL: + case FFI_MS_CDECL: + flags = X86_RET_STRUCTARG; + break; + default: + flags = X86_RET_STRUCTPOP; + break; + } + /* Allocate space for return value pointer. */ + bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); + } + break; + case FFI_TYPE_COMPLEX: + switch (cif->rtype->elements[0]->type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + goto do_struct; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = X86_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = X86_RET_STRUCT_2B; + break; + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + cif->flags = flags; + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *t = cif->arg_types[i]; + + bytes = FFI_ALIGN (bytes, t->alignment); + bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); + } + cif->bytes = bytes; + + return FFI_OK; +} + +static ffi_arg +extend_basic_type(void *arg, int type) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)arg; + case FFI_TYPE_UINT8: + return *(UINT8 *)arg; + case FFI_TYPE_SINT16: + return *(SINT16 *)arg; + case FFI_TYPE_UINT16: + return *(UINT16 *)arg; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + return *(UINT32 *)arg; + + default: + abort(); + } +} + +struct call_frame +{ + void *ebp; /* 0 */ + void *retaddr; /* 4 */ + void (*fn)(void); /* 8 */ + int flags; /* 12 */ + void *rvalue; /* 16 */ + unsigned regs[3]; /* 20-28 */ +}; + +struct abi_params +{ + int dir; /* parameter growth direction */ + int static_chain; /* the static chain register used by gcc */ + int nregs; /* number of register parameters */ + int regs[3]; +}; + +static const struct abi_params abi_params[FFI_LAST_ABI] = { + [FFI_SYSV] = { 1, R_ECX, 0 }, + [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } }, + [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } }, + [FFI_STDCALL] = { 1, R_ECX, 0 }, + [FFI_PASCAL] = { -1, R_ECX, 0 }, + /* ??? No defined static chain; gcc does not support REGISTER. */ + [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } }, + [FFI_MS_CDECL] = { 1, R_ECX, 0 } +}; + +#ifdef HAVE_FASTCALL + #ifdef _MSC_VER + #define FFI_DECLARE_FASTCALL __fastcall + #else + #define FFI_DECLARE_FASTCALL __declspec(fastcall) + #endif +#else + #define FFI_DECLARE_FASTCALL +#endif + +extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, dir, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + dir = pabi->dir; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + stack = alloca(bytes + sizeof(*frame) + rsize); + argp = (dir < 0 ? stack + bytes : stack); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + frame->regs[pabi->static_chain] = (unsigned)closure; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + ffi_arg val = extend_basic_type (valp, t); + + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + frame->regs[pabi->regs[narg_reg++]] = val; + else if (dir < 0) + { + argp -= 4; + *(ffi_arg *)argp = val; + } + else + { + *(ffi_arg *)argp = val; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + /* Alignment rules for arguments are quite complex. Vectors and + structures with 16 byte alignment get it. Note that long double + on Darwin does have 16 byte alignment, and does not get this + alignment if passed directly; a structure with a long double + inside, however, would get 16 byte alignment. Since libffi does + not support vectors, we need non concern ourselves with other + cases. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + memcpy (argp, valp, z); + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + memcpy (argp, valp, z); + argp += za; + } + } + } + FFI_ASSERT (dir > 0 || argp == stack); + + ffi_call_i386 (frame, stack); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +/** private members **/ + +void FFI_HIDDEN ffi_closure_i386(void); +void FFI_HIDDEN ffi_closure_STDCALL(void); +void FFI_HIDDEN ffi_closure_REGISTER(void); + +struct closure_frame +{ + unsigned rettemp[4]; /* 0 */ + unsigned regs[3]; /* 16-24 */ + ffi_cif *cif; /* 28 */ + void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ + void *user_data; /* 36 */ +}; + +int FFI_HIDDEN FFI_DECLARE_FASTCALL +ffi_closure_inner (struct closure_frame *frame, char *stack) +{ + ffi_cif *cif = frame->cif; + int cabi, i, n, flags, dir, narg_reg; + const struct abi_params *pabi; + ffi_type **arg_types; + char *argp; + void *rvalue; + void **avalue; + + cabi = cif->abi; + flags = cif->flags; + narg_reg = 0; + rvalue = frame->rettemp; + pabi = &abi_params[cabi]; + dir = pabi->dir; + argp = (dir < 0 ? stack + STACK_ALIGN (cif->bytes) : stack); + + switch (flags) + { + case X86_RET_STRUCTARG: + if (pabi->nregs > 0) + { + rvalue = (void *)frame->regs[pabi->regs[0]]; + narg_reg = 1; + frame->rettemp[0] = (unsigned)rvalue; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + rvalue = *(void **)argp; + argp += sizeof(void *); + frame->rettemp[0] = (unsigned)rvalue; + break; + } + + n = cif->nargs; + avalue = alloca(sizeof(void *) * n); + + arg_types = cif->arg_types; + for (i = 0; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + void *valp; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + valp = &frame->regs[pabi->regs[narg_reg++]]; + else if (dir < 0) + { + argp -= 4; + valp = argp; + } + else + { + valp = argp; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* See the comment in ffi_call_int. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + valp = argp; + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + valp = argp; + argp += za; + } + } + + avalue[i] = valp; + } + + frame->fun (cif, rvalue, avalue, frame->user_data); + + if (cabi == FFI_STDCALL) + return flags + (cif->bytes << X86_RET_POP_SHIFT); + else + return flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int op = 0xb8; /* movl imm, %eax */ + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + dest = ffi_closure_i386; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_closure_STDCALL; + break; + case FFI_REGISTER: + dest = ffi_closure_REGISTER; + op = 0x68; /* pushl imm */ + break; + default: + return FFI_BAD_ABI; + } + + /* endbr32. */ + *(UINT32 *) tramp = 0xfb1e0ff3; + + /* movl or pushl immediate. */ + tramp[4] = op; + *(void **)(tramp + 5) = codeloc; + + /* jmp dest */ + tramp[9] = 0xe9; + *(unsigned *)(tramp + 10) = (unsigned)dest - ((unsigned)codeloc + 14); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void FFI_HIDDEN ffi_go_closure_EAX(void); +void FFI_HIDDEN ffi_go_closure_ECX(void); +void FFI_HIDDEN ffi_go_closure_STDCALL(void); + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*dest)(void); + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_MS_CDECL: + dest = ffi_go_closure_ECX; + break; + case FFI_THISCALL: + case FFI_FASTCALL: + dest = ffi_go_closure_EAX; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_go_closure_STDCALL; + break; + case FFI_REGISTER: + default: + return FFI_BAD_ABI; + } + + closure->tramp = dest; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + +void FFI_HIDDEN ffi_closure_raw_SYSV(void); +void FFI_HIDDEN ffi_closure_raw_THISCALL(void); + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int i; + + /* We currently don't support certain kinds of arguments for raw + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ + for (i = cif->nargs-1; i >= 0; i--) + switch (cif->arg_types[i]->type) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + return FFI_BAD_TYPEDEF; + } + + switch (cif->abi) + { + case FFI_THISCALL: + dest = ffi_closure_raw_THISCALL; + break; + case FFI_SYSV: + dest = ffi_closure_raw_SYSV; + break; + default: + return FFI_BAD_ABI; + } + + /* movl imm, %eax. */ + tramp[0] = 0xb8; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void +ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + argp = stack = + (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + bytes -= sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) + { + ffi_arg val = extend_basic_type (avalue, t); + frame->regs[pabi->regs[narg_reg++]] = val; + z = FFI_SIZEOF_ARG; + } + else + { + memcpy (argp, avalue, z); + z = FFI_ALIGN (z, FFI_SIZEOF_ARG); + argp += z; + } + avalue += z; + bytes -= z; + } + if (i < n) + memcpy (argp, avalue, bytes); + + ffi_call_i386 (frame, stack); +} +#endif /* !FFI_NO_RAW_API */ +#endif /* __i386__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c new file mode 100644 index 0000000..ed82e23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffi64.c @@ -0,0 +1,888 @@ +/* ----------------------------------------------------------------------- + ffi64.c - Copyright (c) 2011, 2018 Anthony Green + Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include "internal64.h" + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) +#include "xmmintrin.h" +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; + union big_int_union sse[MAX_SSE_REGS]; + UINT64 rax; /* ssecount */ + UINT64 r10; /* static chain */ +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ +static size_t +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_integer: + { + size_t size = byte_offset + type->size; + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; +#endif + case FFI_TYPE_STRUCT: + { + const size_t UNITS_PER_WORD = 8; + size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + ffi_type **ptr; + unsigned int i; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + case FFI_TYPE_VOID: + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { + size_t num; + + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { + size_t pos = byte_offset / 8; + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (i > 1 && classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (i > 1 && classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } + case FFI_TYPE_COMPLEX: + { + ffi_type *inner = type->elements[0]; + switch (inner->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + goto do_integer; + + case FFI_TYPE_FLOAT: + classes[0] = X86_64_SSE_CLASS; + if (byte_offset % 8) + { + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = classes[1] = X86_64_SSEDF_CLASS; + return 2; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; +#endif + } + } + } + abort(); +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + +static size_t +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ + size_t n; + unsigned int i; + int ngpr, nsse; + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_cif_machdep_efi64(ffi_cif *cif); +#endif + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int gprcount, ssecount, i, avn, ngpr, nsse; + unsigned flags; + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t bytes, n, rtype_size; + ffi_type *rtype; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_cif_machdep_efi64(cif); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + gprcount = ssecount = 0; + + rtype = cif->rtype; + rtype_size = rtype->size; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = UNIX64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = UNIX64_RET_UINT8; + break; + case FFI_TYPE_SINT8: + flags = UNIX64_RET_SINT8; + break; + case FFI_TYPE_UINT16: + flags = UNIX64_RET_UINT16; + break; + case FFI_TYPE_SINT16: + flags = UNIX64_RET_SINT16; + break; + case FFI_TYPE_UINT32: + flags = UNIX64_RET_UINT32; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = UNIX64_RET_SINT32; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM32; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_XMM64; + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87; + break; +#endif + case FFI_TYPE_STRUCT: + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ + flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; + } + else + { + _Bool sse0 = SSE_CLASS_P (classes[0]); + + if (rtype_size == 4 && sse0) + flags = UNIX64_RET_XMM32; + else if (rtype_size == 8) + flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64; + else + { + _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); + if (sse0 && sse1) + flags = UNIX64_RET_ST_XMM0_XMM1; + else if (sse0) + flags = UNIX64_RET_ST_XMM0_RAX; + else if (sse1) + flags = UNIX64_RET_ST_RAX_XMM0; + else + flags = UNIX64_RET_ST_RAX_RDX; + flags |= rtype_size << UNIX64_SIZE_SHIFT; + } + } + break; + case FFI_TYPE_COMPLEX: + switch (rtype->elements[0]->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_ST_RAX_RDX | ((unsigned) rtype_size << UNIX64_SIZE_SHIFT); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT); + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87_2; + break; +#endif + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + + bytes = FFI_ALIGN (bytes, align); + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) + flags |= UNIX64_FLAG_XMM_ARGS; + + cif->flags = flags; + cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; + int gprcount, ssecount, ngpr, nsse, i, avn, flags; + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value + address then we need to make one. Otherwise we can ignore it. */ + flags = cif->flags; + if (rvalue == NULL) + { + if (flags & UNIX64_FLAG_RET_IN_MEM) + rvalue = alloca (cif->rtype->size); + else + flags = UNIX64_RET_VOID; + } + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + + reg_args->r10 = (uintptr_t) closure; + + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ + if (flags & UNIX64_FLAG_RET_IN_MEM) + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { + size_t n, size = arg_types[i]->size; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; + unsigned int j; + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); + break; + case FFI_TYPE_SINT16: + reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); + break; + case FFI_TYPE_SINT32: + reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); + break; + default: + reg_args->gpr[gprcount] = 0; + memcpy (®_args->gpr[gprcount], a, size); + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: + memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); + break; + case X86_64_SSESF_CLASS: + memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); + break; + default: + abort(); + } + } + } + } + reg_args->rax = ssecount; + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + flags, rvalue, fn); +} + +#ifndef __ILP32__ +extern void +ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); +#endif + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_efi64(cif, fn, rvalue, avalue); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifndef __ILP32__ +extern void +ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); +#endif + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_go_efi64(cif, fn, rvalue, avalue, closure); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_unix64(void) FFI_HIDDEN; +extern void ffi_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_closure_loc_efi64(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[24] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + void (*dest)(void); + char *tramp = closure->tramp; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + if (cif->flags & UNIX64_FLAG_XMM_ARGS) + dest = ffi_closure_unix64_sse; + else + dest = ffi_closure_unix64; + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)dest; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_unix64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, + struct register_args *reg_args, + char *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; + int flags; + + avn = cif->nargs; + flags = cif->flags; + avalue = alloca(avn * sizeof(void *)); + gprcount = ssecount = 0; + + if (flags & UNIX64_FLAG_RET_IN_MEM) + { + /* On return, %rax will contain the address that was passed + by the caller in %rdi. */ + void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; + *(void **)rvalue = r; + rvalue = r; + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t n; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); + unsigned int j; + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell assembly how to perform return type promotions. */ + return flags; +} + +extern void ffi_go_closure_unix64(void) FFI_HIDDEN; +extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_go_closure_efi64(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)); +#endif + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_go_closure_efi64(closure, cif, fun); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS + ? ffi_go_closure_unix64_sse + : ffi_go_closure_unix64); + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h new file mode 100644 index 0000000..a34f3e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffitarget.h @@ -0,0 +1,160 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +/* 4 bytes of ENDBR64 + 7 bytes of LEA + 6 bytes of JMP + 7 bytes of NOP + + 8 bytes of pointer. */ +# define FFI_TRAMPOLINE_SIZE 32 +# define FFI_NATIVE_RAW_API 0 +#else +/* 4 bytes of ENDBR32 + 5 bytes of MOV + 5 bytes of JMP + 2 unused + bytes. */ +# define FFI_TRAMPOLINE_SIZE 16 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#if !defined(GENERATE_LIBFFI_MAP) && defined(__ASSEMBLER__) \ + && defined(__CET__) +# include +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c new file mode 100644 index 0000000..034dffd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/ffiw64.c @@ -0,0 +1,313 @@ +/* ----------------------------------------------------------------------- + ffiw64.c - Copyright (c) 2018 Anthony Green + Copyright (c) 2014 Red Hat, Inc. + + x86 win64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__x86_64__) || defined(_M_AMD64) +#include +#include +#include +#include + +#ifdef X86_WIN64 +#define EFI64(name) name +#else +#define EFI64(name) FFI_HIDDEN name##_efi64 +#endif + +struct win64_call_frame +{ + UINT64 rbp; /* 0 */ + UINT64 retaddr; /* 8 */ + UINT64 fn; /* 16 */ + UINT64 flags; /* 24 */ + UINT64 rvalue; /* 32 */ +}; + +extern void ffi_call_win64 (void *stack, struct win64_call_frame *, + void *closure) FFI_HIDDEN; + +ffi_status FFI_HIDDEN +EFI64(ffi_prep_cif_machdep)(ffi_cif *cif) +{ + int flags, n; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + flags = cif->rtype->type; + switch (flags) + { + default: + break; + case FFI_TYPE_LONGDOUBLE: + /* GCC returns long double values by reference, like a struct */ + if (cif->abi == FFI_GNUW64) + flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_COMPLEX: + flags = FFI_TYPE_STRUCT; + /* FALLTHRU */ + case FFI_TYPE_STRUCT: + switch (cif->rtype->size) + { + case 8: + flags = FFI_TYPE_UINT64; + break; + case 4: + flags = FFI_TYPE_SMALL_STRUCT_4B; + break; + case 2: + flags = FFI_TYPE_SMALL_STRUCT_2B; + break; + case 1: + flags = FFI_TYPE_SMALL_STRUCT_1B; + break; + } + break; + } + cif->flags = flags; + + /* Each argument either fits in a register, an 8 byte slot, or is + passed by reference with the pointer in the 8 byte slot. */ + n = cif->nargs; + n += (flags == FFI_TYPE_STRUCT); + if (n < 4) + n = 4; + cif->bytes = n * 8; + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + int i, j, n, flags; + UINT64 *stack; + size_t rsize; + struct win64_call_frame *frame; + + FFI_ASSERT(cif->abi == FFI_GNUW64 || cif->abi == FFI_WIN64); + + flags = cif->flags; + rsize = 0; + + /* If we have no return value for a structure, we need to create one. + Otherwise we can ignore the return type entirely. */ + if (rvalue == NULL) + { + if (flags == FFI_TYPE_STRUCT) + rsize = cif->rtype->size; + else + flags = FFI_TYPE_VOID; + } + + stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize); + frame = (struct win64_call_frame *)((char *)stack + cif->bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = (uintptr_t)fn; + frame->flags = flags; + frame->rvalue = (uintptr_t)rvalue; + + j = 0; + if (flags == FFI_TYPE_STRUCT) + { + stack[0] = (uintptr_t)rvalue; + j = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++j) + { + switch (cif->arg_types[i]->size) + { + case 8: + stack[j] = *(UINT64 *)avalue[i]; + break; + case 4: + stack[j] = *(UINT32 *)avalue[i]; + break; + case 2: + stack[j] = *(UINT16 *)avalue[i]; + break; + case 1: + stack[j] = *(UINT8 *)avalue[i]; + break; + default: + stack[j] = (uintptr_t)avalue[i]; + break; + } + } + + ffi_call_win64 (stack, frame, closure); +} + +void +EFI64(ffi_call)(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +EFI64(ffi_call_go)(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_win64(void) FFI_HIDDEN; +extern void ffi_go_closure_win64(void) FFI_HIDDEN; + +ffi_status +EFI64(ffi_prep_closure_loc)(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[FFI_TRAMPOLINE_SIZE - 8] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + char *tramp = closure->tramp; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)ffi_closure_win64; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +EFI64(ffi_prep_go_closure)(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + closure->tramp = ffi_go_closure_win64; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +struct win64_closure_frame +{ + UINT64 rvalue[2]; + UINT64 fargs[4]; + UINT64 retaddr; + UINT64 args[]; +}; + +/* Force the inner function to use the MS ABI. When compiling on win64 + this is a nop. When compiling on unix, this simplifies the assembly, + and places the burden of saving the extra call-saved registers on + the compiler. */ +int FFI_HIDDEN __attribute__((ms_abi)) +ffi_closure_win64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + struct win64_closure_frame *frame) +{ + void **avalue; + void *rvalue; + int i, n, nreg, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + rvalue = frame->rvalue; + nreg = 0; + + /* When returning a structure, the address is in the first argument. + We must also be prepared to return the same address in eax, so + install that address in the frame and pretend we return a pointer. */ + flags = cif->flags; + if (flags == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)frame->args[0]; + frame->rvalue[0] = frame->args[0]; + nreg = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++nreg) + { + size_t size = cif->arg_types[i]->size; + size_t type = cif->arg_types[i]->type; + void *a; + + if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT) + { + if (nreg < 4) + a = &frame->fargs[nreg]; + else + a = &frame->args[nreg]; + } + else if (size == 1 || size == 2 || size == 4 || size == 8) + a = &frame->args[nreg]; + else + a = (void *)(uintptr_t)frame->args[nreg]; + + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + return flags; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h new file mode 100644 index 0000000..09771ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal.h @@ -0,0 +1,29 @@ +#define X86_RET_FLOAT 0 +#define X86_RET_DOUBLE 1 +#define X86_RET_LDOUBLE 2 +#define X86_RET_SINT8 3 +#define X86_RET_SINT16 4 +#define X86_RET_UINT8 5 +#define X86_RET_UINT16 6 +#define X86_RET_INT64 7 +#define X86_RET_INT32 8 +#define X86_RET_VOID 9 +#define X86_RET_STRUCTPOP 10 +#define X86_RET_STRUCTARG 11 +#define X86_RET_STRUCT_1B 12 +#define X86_RET_STRUCT_2B 13 +#define X86_RET_UNUSED14 14 +#define X86_RET_UNUSED15 15 + +#define X86_RET_TYPE_MASK 15 +#define X86_RET_POP_SHIFT 4 + +#define R_EAX 0 +#define R_EDX 1 +#define R_ECX 2 + +#ifdef __PCC__ +# define HAVE_FASTCALL 0 +#else +# define HAVE_FASTCALL 1 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h new file mode 100644 index 0000000..512e955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/internal64.h @@ -0,0 +1,22 @@ +#define UNIX64_RET_VOID 0 +#define UNIX64_RET_UINT8 1 +#define UNIX64_RET_UINT16 2 +#define UNIX64_RET_UINT32 3 +#define UNIX64_RET_SINT8 4 +#define UNIX64_RET_SINT16 5 +#define UNIX64_RET_SINT32 6 +#define UNIX64_RET_INT64 7 +#define UNIX64_RET_XMM32 8 +#define UNIX64_RET_XMM64 9 +#define UNIX64_RET_X87 10 +#define UNIX64_RET_X87_2 11 +#define UNIX64_RET_ST_XMM0_RAX 12 +#define UNIX64_RET_ST_RAX_XMM0 13 +#define UNIX64_RET_ST_XMM0_XMM1 14 +#define UNIX64_RET_ST_RAX_RDX 15 + +#define UNIX64_RET_LAST 15 + +#define UNIX64_FLAG_RET_IN_MEM (1 << 10) +#define UNIX64_FLAG_XMM_ARGS (1 << 11) +#define UNIX64_SIZE_SHIFT 12 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S new file mode 100644 index 0000000..6d56483 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv.S @@ -0,0 +1,1138 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __i386__ +#ifndef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef X86_DARWIN +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +/* Handle win32 fastcall name mangling. */ +#ifdef X86_WIN32 +# define ffi_call_i386 @ffi_call_i386@8 +# define ffi_closure_inner @ffi_closure_inner@8 +#else +# define ffi_call_i386 C(ffi_call_i386) +# define ffi_closure_inner C(ffi_closure_inner) +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + .balign 16 + .globl ffi_call_i386 + FFI_HIDDEN(ffi_call_i386) + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ffi_call_i386: +L(UW0): + # cfi_startproc + _CET_ENDBR +#if !HAVE_FASTCALL + movl 4(%esp), %ecx + movl 8(%esp), %edx +#endif + movl (%esp), %eax /* move the return address */ + movl %ebp, (%ecx) /* store %ebp into local frame */ + movl %eax, 4(%ecx) /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + movl %ecx, %ebp +L(UW1): + # cfi_def_cfa(%ebp, 8) + # cfi_rel_offset(%ebp, 0) + + movl %edx, %esp /* set outgoing argument stack */ + movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ + movl 20+R_EDX*4(%ebp), %edx + movl 20+R_ECX*4(%ebp), %ecx + + call *8(%ebp) + + movl 12(%ebp), %ecx /* load return type code */ + movl %ebx, 8(%ebp) /* preserve %ebx */ +L(UW2): + # cfi_rel_offset(%ebx, 8) + + andl $X86_RET_TYPE_MASK, %ecx +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc1): + leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx +#else + leal L(store_table)(,%ecx, 8), %ebx +#endif + movl 16(%ebp), %ecx /* load result address */ + _CET_NOTRACK jmp *%ebx + + .balign 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstps (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstpl (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstpt (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movswl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzwl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_INT64) + movl %edx, 4(%ecx) + /* fallthru */ +E(L(store_table), X86_RET_INT32) + movl %eax, (%ecx) + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + movl 8(%ebp), %ebx + movl %ebp, %esp + popl %ebp +L(UW3): + # cfi_remember_state + # cfi_def_cfa(%esp, 4) + # cfi_restore(%ebx) + # cfi_restore(%ebp) + ret +L(UW4): + # cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + movb %al, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + movw %ax, (%ecx) + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + ud2 +E(L(store_table), X86_RET_UNUSED15) + ud2 + +L(UW5): + # cfi_endproc +ENDF(ffi_call_i386) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +#define FFI_CLOSURE_SAVE_REGS \ + movl %eax, closure_CF+16+R_EAX*4(%esp); \ + movl %edx, closure_CF+16+R_EDX*4(%esp); \ + movl %ecx, closure_CF+16+R_ECX*4(%esp) + +#define FFI_CLOSURE_COPY_TRAMP_DATA \ + movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ + movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ + movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \ + movl %edx, closure_CF+28(%esp); \ + movl %ecx, closure_CF+32(%esp); \ + movl %eax, closure_CF+36(%esp) + +#if HAVE_FASTCALL +# define FFI_CLOSURE_PREP_CALL \ + movl %esp, %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ +#else +# define FFI_CLOSURE_PREP_CALL \ + leal closure_CF(%esp), %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ + movl %ecx, (%esp); \ + movl %edx, 4(%esp) +#endif + +#define FFI_CLOSURE_CALL_INNER(UWN) \ + call ffi_closure_inner + +#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))(, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx + +#ifdef __PIC__ +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + call C(__x86.get_pc_thunk.dx); \ +L(C1(pc,N)): \ + leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# else +# define FFI_CLOSURE_CALL_INNER_SAVE_EBX +# undef FFI_CLOSURE_CALL_INNER +# define FFI_CLOSURE_CALL_INNER(UWN) \ + movl %ebx, 40(%esp); /* save ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_rel_offset(%ebx, 40); */ \ + call C(__x86.get_pc_thunk.bx); /* load got register */ \ + addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \ + call ffi_closure_inner@PLT +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \ + movl 40(%esp), %ebx; /* restore ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_restore(%ebx); */ \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + + .balign 16 + .globl C(ffi_go_closure_EAX) + FFI_HIDDEN(C(ffi_go_closure_EAX)) +C(ffi_go_closure_EAX): +L(UW6): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW7): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%eax), %edx /* copy cif */ + movl 8(%eax), %ecx /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %ecx, closure_CF+32(%esp) + movl %eax, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + # cfi_endproc +ENDF(C(ffi_go_closure_EAX)) + + .balign 16 + .globl C(ffi_go_closure_ECX) + FFI_HIDDEN(C(ffi_go_closure_ECX)) +C(ffi_go_closure_ECX): +L(UW9): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW10): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + # cfi_endproc +ENDF(C(ffi_go_closure_ECX)) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + + .balign 16 + .globl C(ffi_closure_i386) + FFI_HIDDEN(C(ffi_closure_i386)) + +C(ffi_closure_i386): +L(UW12): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW13): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP(2, 15) + + .balign 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + flds closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fldl closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + addl $closure_FS, %esp +L(UW16): + # cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + addl $closure_FS, %esp +L(UW18): + # cfi_adjust_cfa_offset(-closure_FS) + ret $4 +L(UW19): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + ud2 +E(L(load_table2), X86_RET_UNUSED15) + ud2 + +L(UW20): + # cfi_endproc +ENDF(C(ffi_closure_i386)) + + .balign 16 + .globl C(ffi_go_closure_STDCALL) + FFI_HIDDEN(C(ffi_go_closure_STDCALL)) +C(ffi_go_closure_STDCALL): +L(UW21): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW22): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + # cfi_endproc +ENDF(C(ffi_go_closure_STDCALL)) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + + .balign 16 + .globl C(ffi_closure_REGISTER) + FFI_HIDDEN(C(ffi_closure_REGISTER)) +C(ffi_closure_REGISTER): +L(UW24): + # cfi_startproc + # cfi_def_cfa(%esp, 8) + # cfi_offset(%eip, -8) + _CET_ENDBR + subl $closure_FS-4, %esp +L(UW25): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl closure_FS-4(%esp), %ecx /* load retaddr */ + movl closure_FS(%esp), %eax /* load closure */ + movl %ecx, closure_FS(%esp) /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + # cfi_endproc +ENDF(C(ffi_closure_REGISTER)) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + + .balign 16 + .globl C(ffi_closure_STDCALL) + FFI_HIDDEN(C(ffi_closure_STDCALL)) +C(ffi_closure_STDCALL): +L(UW27): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW28): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER): + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + movl %eax, %ecx + shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */ + leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */ + movl closure_FS(%esp), %edx /* move return address */ + movl %edx, (%ecx) + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP(3, 30) + + .balign 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + flds closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_DOUBLE) + fldl closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT8) + movsbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT16) + movswl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT8) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT16) + movzwl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT32) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_VOID) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTPOP) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTARG) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzwl %ax, %eax + movl %ecx, %esp + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + ud2 +E(L(load_table3), X86_RET_UNUSED15) + ud2 + +L(UW31): + # cfi_endproc +ENDF(C(ffi_closure_STDCALL)) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + + .balign 16 + .globl C(ffi_closure_raw_SYSV) + FFI_HIDDEN(C(ffi_closure_raw_SYSV)) +C(ffi_closure_raw_SYSV): +L(UW32): + # cfi_startproc + _CET_ENDBR + subl $raw_closure_S_FS, %esp +L(UW33): + # cfi_def_cfa_offset(raw_closure_S_FS + 4) + movl %ebx, raw_closure_S_FS-4(%esp) +L(UW34): + # cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc4): + leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +#else + leal L(load_table4)(,%eax, 8), %ecx +#endif + movl raw_closure_S_FS-4(%esp), %ebx +L(UW35): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e4) +E(L(load_table4), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + addl $raw_closure_S_FS, %esp +L(UW36): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + addl $raw_closure_S_FS, %esp +L(UW38): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret $4 +L(UW39): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + ud2 +E(L(load_table4), X86_RET_UNUSED15) + ud2 + +L(UW40): + # cfi_endproc +ENDF(C(ffi_closure_raw_SYSV)) + +#define raw_closure_T_FS (16+16+8) + + .balign 16 + .globl C(ffi_closure_raw_THISCALL) + FFI_HIDDEN(C(ffi_closure_raw_THISCALL)) +C(ffi_closure_raw_THISCALL): +L(UW41): + # cfi_startproc + _CET_ENDBR + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + popl %edx +L(UW42): + # cfi_def_cfa_offset(0) + # cfi_register(%eip, %edx) + pushl %ecx +L(UW43): + # cfi_adjust_cfa_offset(4) + pushl %edx +L(UW44): + # cfi_adjust_cfa_offset(4) + # cfi_rel_offset(%eip, 0) + subl $raw_closure_T_FS, %esp +L(UW45): + # cfi_adjust_cfa_offset(raw_closure_T_FS) + movl %ebx, raw_closure_T_FS-4(%esp) +L(UW46): + # cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc5): + leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +#else + leal L(load_table5)(,%eax, 8), %ecx +#endif + movl raw_closure_T_FS-4(%esp), %ebx +L(UW47): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e5) +E(L(load_table5), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + addl $raw_closure_T_FS, %esp +L(UW48): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret $4 +L(UW49): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + addl $raw_closure_T_FS, %esp +L(UW50): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret $8 +L(UW51): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + ud2 +E(L(load_table5), X86_RET_UNUSED15) + ud2 + +L(UW52): + # cfi_endproc +ENDF(C(ffi_closure_raw_THISCALL)) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + .globl X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +#if defined(__PIC__) + COMDAT(C(__x86.get_pc_thunk.bx)) +C(__x86.get_pc_thunk.bx): + movl (%esp), %ebx + ret +ENDF(C(__x86.get_pc_thunk.bx)) +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + COMDAT(C(__x86.get_pc_thunk.dx)) +C(__x86.get_pc_thunk.dx): + movl (%esp), %edx + ret +ENDF(C(__x86.get_pc_thunk.dx)) +#endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + .globl @feat.00 +@feat.00 = 1 +#endif + +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_i386 */ + .long C(ffi_call_i386) + .set L1,L(UW5)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_EAX */ + .long C(ffi_go_closure_EAX) + .set L2,L(UW8)-L(UW6) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_ECX */ + .long C(ffi_go_closure_ECX) + .set L3,L(UW11)-L(UW9) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_i386 */ + .long C(ffi_closure_i386) + .set L4,L(UW20)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_STDCALL */ + .long C(ffi_go_closure_STDCALL) + .set L5,L(UW23)-L(UW21) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_REGISTER */ + .long C(ffi_closure_REGISTER) + .set L6,L(UW26)-L(UW24) + .long L6 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_STDCALL */ + .long C(ffi_closure_STDCALL) + .set L7,L(UW31)-L(UW27) + .long L7 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_SYSV */ + .long C(ffi_closure_raw_SYSV) + .set L8,L(UW40)-L(UW32) + .long L8 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_THISCALL */ + .long C(ffi_closure_raw_THISCALL) + .set L9,L(UW52)-L(UW41) + .long L9 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 +#endif /* __APPLE__ */ + +#endif /* ifndef _MSC_VER */ +#endif /* ifdef __i386__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S new file mode 100644 index 0000000..3cafd71 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/sysv_intel.S @@ -0,0 +1,995 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#ifdef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#define L(X) C1(L, X) +# define ENDF(X) X ENDP + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .686P + .MODEL FLAT + +EXTRN @ffi_closure_inner@8:PROC +_TEXT SEGMENT + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ALIGN 16 +PUBLIC @ffi_call_i386@8 +@ffi_call_i386@8 PROC +L(UW0): + cfi_startproc + #if !HAVE_FASTCALL + mov ecx, [esp+4] + mov edx, [esp+8] + #endif + mov eax, [esp] /* move the return address */ + mov [ecx], ebp /* store ebp into local frame */ + mov [ecx+4], eax /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + mov ebp, ecx +L(UW1): + // cfi_def_cfa(%ebp, 8) + // cfi_rel_offset(%ebp, 0) + + mov esp, edx /* set outgoing argument stack */ + mov eax, [20+R_EAX*4+ebp] /* set register arguments */ + mov edx, [20+R_EDX*4+ebp] + mov ecx, [20+R_ECX*4+ebp] + + call dword ptr [ebp+8] + + mov ecx, [12+ebp] /* load return type code */ + mov [ebp+8], ebx /* preserve %ebx */ +L(UW2): + // cfi_rel_offset(%ebx, 8) + + and ecx, X86_RET_TYPE_MASK + lea ebx, [L(store_table) + ecx * 8] + mov ecx, [ebp+16] /* load result address */ + jmp ebx + + ALIGN 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstp DWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movsx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_INT64) + mov [ecx+4], edx + /* fallthru */ +E(L(store_table), X86_RET_int 32) + mov [ecx], eax + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + mov ebx, [ebp+8] + mov esp, ebp + pop ebp +L(UW3): + // cfi_remember_state + // cfi_def_cfa(%esp, 4) + // cfi_restore(%ebx) + // cfi_restore(%ebp) + ret +L(UW4): + // cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + mov [ecx], al + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + mov [ecx], ax + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + int 3 +E(L(store_table), X86_RET_UNUSED15) + int 3 + +L(UW5): + // cfi_endproc +ENDF(@ffi_call_i386@8) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +FFI_CLOSURE_SAVE_REGS MACRO + mov [esp + closure_CF+16+R_EAX*4], eax + mov [esp + closure_CF+16+R_EDX*4], edx + mov [esp + closure_CF+16+R_ECX*4], ecx +ENDM + +FFI_CLOSURE_COPY_TRAMP_DATA MACRO + mov edx, [eax+FFI_TRAMPOLINE_SIZE] /* copy cif */ + mov ecx, [eax+FFI_TRAMPOLINE_SIZE+4] /* copy fun */ + mov eax, [eax+FFI_TRAMPOLINE_SIZE+8]; /* copy user_data */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax +ENDM + +#if HAVE_FASTCALL +FFI_CLOSURE_PREP_CALL MACRO + mov ecx, esp /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ +ENDM +#else +FFI_CLOSURE_PREP_CALL MACRO + lea ecx, [esp+closure_CF] /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ + mov [esp], ecx + mov [esp+4], edx +ENDM +#endif + +FFI_CLOSURE_CALL_INNER MACRO UWN + call @ffi_closure_inner@8 +ENDM + +FFI_CLOSURE_MASK_AND_JUMP MACRO LABEL + and eax, X86_RET_TYPE_MASK + lea edx, [LABEL+eax*8] + mov eax, [esp+closure_CF] /* optimiztic load */ + jmp edx +ENDM + +ALIGN 16 +PUBLIC ffi_go_closure_EAX +ffi_go_closure_EAX PROC C +L(UW6): + // cfi_startproc + sub esp, closure_FS +L(UW7): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [eax+4] /* copy cif */ + mov ecx, [eax +8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + // cfi_endproc +ENDF(ffi_go_closure_EAX) + +ALIGN 16 +PUBLIC ffi_go_closure_ECX +ffi_go_closure_ECX PROC C +L(UW9): + // cfi_startproc + sub esp, closure_FS +L(UW10): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + // cfi_endproc +ENDF(ffi_go_closure_ECX) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + +ALIGN 16 +PUBLIC ffi_closure_i386 +ffi_closure_i386 PROC C +L(UW12): + // cfi_startproc + sub esp, closure_FS +L(UW13): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,2)) + + ALIGN 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + fld dword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movsx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + add esp, closure_FS +L(UW16): + // cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + add esp, closure_FS +L(UW18): + // cfi_adjust_cfa_offset(-closure_FS) + ret 4 +L(UW19): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + int 3 +E(L(load_table2), X86_RET_UNUSED15) + int 3 + +L(UW20): + // cfi_endproc +ENDF(ffi_closure_i386) + +ALIGN 16 +PUBLIC ffi_go_closure_STDCALL +ffi_go_closure_STDCALL PROC C +L(UW21): + // cfi_startproc + sub esp, closure_FS +L(UW22): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + // cfi_endproc +ENDF(ffi_go_closure_STDCALL) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + +ALIGN 16 +PUBLIC ffi_closure_REGISTER +ffi_closure_REGISTER PROC C +L(UW24): + // cfi_startproc + // cfi_def_cfa(%esp, 8) + // cfi_offset(%eip, -8) + sub esp, closure_FS-4 +L(UW25): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov ecx, [esp+closure_FS-4] /* load retaddr */ + mov eax, [esp+closure_FS] /* load closure */ + mov [esp+closure_FS], ecx /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + // cfi_endproc +ENDF(ffi_closure_REGISTER) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + +ALIGN 16 +PUBLIC ffi_closure_STDCALL +ffi_closure_STDCALL PROC C +L(UW27): + // cfi_startproc + sub esp, closure_FS +L(UW28): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER):: + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + mov ecx, eax + shr ecx, X86_RET_POP_SHIFT /* isolate pop count */ + lea ecx, [esp+closure_FS+ecx] /* compute popped esp */ + mov edx, [esp+closure_FS] /* move return address */ + mov [ecx], edx + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,3)) + + ALIGN 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + fld DWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_DOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_LDOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT8) + movsx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT16) + movsx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT8) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT16) + movzx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + mov esp, ecx + ret +E(L(load_table3), X86_RET_int 32) + mov esp, ecx + ret +E(L(load_table3), X86_RET_VOID) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTPOP) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTARG) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzx eax, ax + mov esp, ecx + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + int 3 +E(L(load_table3), X86_RET_UNUSED15) + int 3 + +L(UW31): + // cfi_endproc +ENDF(ffi_closure_STDCALL) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + +ALIGN 16 +PUBLIC ffi_closure_raw_SYSV +ffi_closure_raw_SYSV PROC C +L(UW32): + // cfi_startproc + sub esp, raw_closure_S_FS +L(UW33): + // cfi_def_cfa_offset(raw_closure_S_FS + 4) + mov [esp+raw_closure_S_FS-4], ebx +L(UW34): + // cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_S_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc4): +// lea ecx, L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table4)+eax+8] +// #endif + mov ebx, [esp+raw_closure_S_FS-4] +L(UW35): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp dword ptr [ecx] + + ALIGN 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movsx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e4) +E(L(load_table4), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + add esp, raw_closure_S_FS +L(UW36): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + add esp, raw_closure_S_FS +L(UW38): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret 4 +L(UW39): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + int 3 +E(L(load_table4), X86_RET_UNUSED15) + int 3 + +L(UW40): + // cfi_endproc +ENDF(ffi_closure_raw_SYSV) + +#define raw_closure_T_FS (16+16+8) + +ALIGN 16 +PUBLIC ffi_closure_raw_THISCALL +ffi_closure_raw_THISCALL PROC C +L(UW41): + // cfi_startproc + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + pop edx +L(UW42): + // cfi_def_cfa_offset(0) + // cfi_register(%eip, %edx) + push ecx +L(UW43): + // cfi_adjust_cfa_offset(4) + push edx +L(UW44): + // cfi_adjust_cfa_offset(4) + // cfi_rel_offset(%eip, 0) + sub esp, raw_closure_T_FS +L(UW45): + // cfi_adjust_cfa_offset(raw_closure_T_FS) + mov [esp+raw_closure_T_FS-4], ebx +L(UW46): + // cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_T_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc5): +// leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table5)+eax*8] +//#endif + mov ebx, [esp+raw_closure_T_FS-4] +L(UW47): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp DWORD PTR [ecx] + + AlIGN 4 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fld QWORD PTR [esp+16] + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movsx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e5) +E(L(load_table5), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + add esp, raw_closure_T_FS +L(UW48): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret 4 +L(UW49): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + add esp, raw_closure_T_FS +L(UW50): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret 8 +L(UW51): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + int 3 +E(L(load_table5), X86_RET_UNUSED15) + int 3 + +L(UW52): + // cfi_endproc +ENDF(ffi_closure_raw_THISCALL) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + PUBLIC X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +// #if defined(__PIC__) +// COMDAT(C(__x86.get_pc_thunk.bx)) +// C(__x86.get_pc_thunk.bx): +// movl (%esp), %ebx +// ret +// ENDF(C(__x86.get_pc_thunk.bx)) +// # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +// COMDAT(C(__x86.get_pc_thunk.dx)) +// C(__x86.get_pc_thunk.dx): +// movl (%esp), %edx +// ret +// ENDF(C(__x86.get_pc_thunk.dx)) +// #endif /* DARWIN || HIDDEN */ +// #endif /* __PIC__ */ + +#if 0 +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + PUBLIC @feat.00 +@feat.00 = 1 +#endif + +#endif /* ifndef _MSC_VER */ +#endif /* ifndef __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif + +END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S new file mode 100644 index 0000000..ee3c04f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/unix64.S @@ -0,0 +1,622 @@ +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include "internal64.h" +#include "asmnames.h" + + .text + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# ifdef __CET__ +# define E(BASE, X) .balign 8; .org BASE + X * 16 +# else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +# endif +#endif + +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .balign 8 + .globl C(ffi_call_unix64) + FFI_HIDDEN(C(ffi_call_unix64)) + +C(ffi_call_unix64): +L(UW0): + _CET_ENDBR + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ + + /* New stack frame based off rbp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-8, so from the + perspective of the unwind info, it hasn't moved. */ +L(UW1): + /* cfi_def_cfa(%rbp, 32) */ + /* cfi_rel_offset(%rbp, 16) */ + + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + movl %r9d, %eax /* Set number of SSE registers. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi + movq 0x08(%r10), %rsi + movq 0x10(%r10), %rdx + movq 0x18(%r10), %rcx + movq 0x20(%r10), %r8 + movq 0x28(%r10), %r9 + movl 0xb0(%r10), %eax + testl %eax, %eax + jnz L(load_sse) +L(ret_from_load_sse): + + /* Deallocate the reg arg area, except for r10, then load via pop. */ + leaq 0xb8(%r10), %rsp + popq %r10 + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ +L(UW2): + /* cfi_remember_state */ + /* cfi_def_cfa(%rsp, 8) */ + /* cfi_restore(%rbp) */ + + /* The first byte of the flags contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %cl + movzbl %cl, %r10d + leaq L(store_table)(%rip), %r11 + ja L(sa) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + + /* Prep for the structure cases: scratch area in redzone. */ + leaq -20(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(store_table): +E(L(store_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(store_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl %al, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl %ax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl %eax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbq %al, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswq %ax, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT32) + _CET_ENDBR + cltq + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_INT64) + _CET_ENDBR + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_X87) + _CET_ENDBR + fstpt (%rdi) + ret +E(L(store_table), UNIX64_RET_X87_2) + _CET_ENDBR + fstpt (%rdi) + fstpt 16(%rdi) + ret +E(L(store_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq %rax, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq %xmm0, 8(%rsi) + jmp L(s2) +E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq %xmm1, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq %rdx, 8(%rsi) +L(s2): + movq %rax, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + .balign 8 +L(s3): + movq %xmm0, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + +L(sa): call PLT(C(abort)) + + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ + .balign 2 +L(UW3): + /* cfi_restore_state */ +L(load_sse): + movdqa 0x30(%r10), %xmm0 + movdqa 0x40(%r10), %xmm1 + movdqa 0x50(%r10), %xmm2 + movdqa 0x60(%r10), %xmm3 + movdqa 0x70(%r10), %xmm4 + movdqa 0x80(%r10), %xmm5 + movdqa 0x90(%r10), %xmm6 + movdqa 0xa0(%r10), %xmm7 + jmp L(ret_from_load_sse) + +L(UW4): +ENDF(C(ffi_call_unix64)) + +/* 6 general registers, 8 vector registers, + 32 bytes of rvalue, 8 bytes of alignment. */ +#define ffi_closure_OFS_G 0 +#define ffi_closure_OFS_V (6*8) +#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) +#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) + +/* The location of rvalue within the red zone after deallocating the frame. */ +#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) + + .balign 2 + .globl C(ffi_closure_unix64_sse) + FFI_HIDDEN(C(ffi_closure_unix64_sse)) + +C(ffi_closure_unix64_sse): +L(UW5): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW6): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry1) + +L(UW7): +ENDF(C(ffi_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_closure_unix64) + FFI_HIDDEN(C(ffi_closure_unix64)) + +C(ffi_closure_unix64): +L(UW8): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW9): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry1): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ + movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ + movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ +#else + movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ +#endif +L(do_closure): + leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ + movq %rsp, %r8 /* Load reg_args */ + leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ + call PLT(C(ffi_closure_unix64_inner)) + + /* Deallocate stack frame early; return value is now in redzone. */ + addq $ffi_closure_FS, %rsp +L(UW10): + /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ + + /* The first byte of the return value contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %al + movzbl %al, %r10d + leaq L(load_table)(%rip), %r11 + ja L(la) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + leaq ffi_closure_RED_RVALUE(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(load_table): +E(L(load_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(load_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_INT64) + _CET_ENDBR + movq (%rsi), %rax + ret +E(L(load_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_X87) + _CET_ENDBR + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_X87_2) + _CET_ENDBR + fldt 16(%rsi) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq 8(%rsi), %rax + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq 8(%rsi), %xmm0 + jmp L(l2) +E(L(load_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq 8(%rsi), %xmm1 + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq 8(%rsi), %rdx +L(l2): + movq (%rsi), %rax + ret + .balign 8 +L(l3): + movq (%rsi), %xmm0 + ret + +L(la): call PLT(C(abort)) + +L(UW11): +ENDF(C(ffi_closure_unix64)) + + .balign 2 + .globl C(ffi_go_closure_unix64_sse) + FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) + +C(ffi_go_closure_unix64_sse): +L(UW12): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW13): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry2) + +L(UW14): +ENDF(C(ffi_go_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_go_closure_unix64) + FFI_HIDDEN(C(ffi_go_closure_unix64)) + +C(ffi_go_closure_unix64): +L(UW15): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW16): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry2): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl 4(%r10), %edi /* Load cif */ + movl 8(%r10), %esi /* Load fun */ + movl %r10d, %edx /* Load closure (user_data) */ +#else + movq 8(%r10), %rdi /* Load cif */ + movq 16(%r10), %rsi /* Load fun */ + movq %r10, %rdx /* Load closure (user_data) */ +#endif + jmp L(do_closure) + +L(UW17): +ENDF(C(ffi_go_closure_unix64)) + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,"a",@unwind +#else +.section .eh_frame,"a",@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#ifdef __CET__ +/* Use DW_CFA_advance_loc2 when IBT is enabled. */ +# define ADV(N, P) .byte 3; .2byte L(N)-L(P) +#else +# define ADV(N, P) .byte 2, L(N)-L(P) +#endif + + .balign 8 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x78 /* CIE Data Alignment Factor */ + .byte 0x10 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ + .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ + .balign 8 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW4)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ + .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ + ADV(UW2, UW1) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + ADV(UW3, UW2) + .byte 0xb /* DW_CFA_restore_state */ + .balign 8 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW5)) /* Initial location */ + .long L(UW7)-L(UW5) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW6, UW5) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW8)) /* Initial location */ + .long L(UW11)-L(UW8) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW9, UW8) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + ADV(UW10, UW9) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW14)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW15)) /* Initial location */ + .long L(UW17)-L(UW15) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW16, UW15) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE5): +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_unix64 */ + .quad C(ffi_call_unix64) + .set L1,L(UW4)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64_sse */ + .quad C(ffi_closure_unix64_sse) + .set L2,L(UW7)-L(UW5) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64 */ + .quad C(ffi_closure_unix64) + .set L3,L(UW11)-L(UW8) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64_sse */ + .quad C(ffi_go_closure_unix64_sse) + .set L4,L(UW14)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64 */ + .quad C(ffi_go_closure_unix64) + .set L5,L(UW17)-L(UW15) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 +#endif + +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S new file mode 100644 index 0000000..57c0e65 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64.S @@ -0,0 +1,240 @@ +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 %rcx +#define arg1 %rdx +#define arg2 %r8 +#define arg3 %r9 +#else +#define SEH(...) +#define arg0 %rdi +#define arg1 %rsi +#define arg2 %rdx +#define arg3 %rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 8 + .globl C(ffi_call_win64) + FFI_HIDDEN(C(ffi_call_win64)) + + SEH(.seh_proc ffi_call_win64) +C(ffi_call_win64): + cfi_startproc + _CET_ENDBR + /* Set up the local stack frame and install it in rbp/rsp. */ + movq (%rsp), %rax + movq %rbp, (arg1) + movq %rax, 8(arg1) + movq arg1, %rbp + cfi_def_cfa(%rbp, 16) + cfi_rel_offset(%rbp, 0) + SEH(.seh_pushreg %rbp) + SEH(.seh_setframe %rbp, 0) + SEH(.seh_endprologue) + movq arg0, %rsp + + movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + movq (%rsp), %rcx + movsd (%rsp), %xmm0 + movq 8(%rsp), %rdx + movsd 8(%rsp), %xmm1 + movq 16(%rsp), %r8 + movsd 16(%rsp), %xmm2 + movq 24(%rsp), %r9 + movsd 24(%rsp), %xmm3 + + call *16(%rbp) + + movl 24(%rbp), %ecx + movq 32(%rbp), %r8 + leaq 0f(%rip), %r10 + cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + leaq (%r10, %rcx, 8), %r10 + ja 99f + _CET_NOTRACK jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +.macro epilogue + leaveq + cfi_remember_state + cfi_def_cfa(%rsp, 8) + cfi_restore(%rbp) + ret + cfi_restore_state +.endm + + .align 8 +0: +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzbl %al, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsbq %al, %rax + jmp 98f +E(0b, FFI_TYPE_UINT16) + movzwl %ax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movswq %ax, %rax + jmp 98f +E(0b, FFI_TYPE_UINT32) + movl %eax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +98: movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + movl %eax, (%r8) + epilogue + + .align 8 +99: call PLT(C(abort)) + + epilogue + + cfi_endproc + SEH(.seh_endproc) + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + .align 8 + .globl C(ffi_go_closure_win64) + FFI_HIDDEN(C(ffi_go_closure_win64)) + + SEH(.seh_proc ffi_go_closure_win64) +C(ffi_go_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq 8(%r10), %rcx /* load cif */ + movq 16(%r10), %rdx /* load fun */ + movq %r10, %r8 /* closure is user_data */ + jmp 0f + cfi_endproc + SEH(.seh_endproc) + + .align 8 + .globl C(ffi_closure_win64) + FFI_HIDDEN(C(ffi_closure_win64)) + + SEH(.seh_proc ffi_closure_win64) +C(ffi_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +0: + subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.seh_stackalloc ffi_clo_FS) + SEH(.seh_endprologue) + + /* Save all sse arguments into the stack frame. */ + movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + leaq ffi_clo_OFF_R(%rsp), %r9 + call PLT(C(ffi_closure_win64_inner)) + + /* Load the result into both possible result registers. */ + movq ffi_clo_OFF_R(%rsp), %rax + movsd ffi_clo_OFF_R(%rsp), %xmm0 + + addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + SEH(.seh_endproc) +#endif /* __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S new file mode 100644 index 0000000..7df78b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/x86/win64_intel.S @@ -0,0 +1,237 @@ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 rcx +#define arg1 rdx +#define arg2 r8 +#define arg3 r9 +#else +#define SEH(...) +#define arg0 rdi +#define arg1 rsi +#define arg2 rdx +#define arg3 rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .CODE + extern PLT(C(abort)):near + extern C(ffi_closure_win64_inner):near + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + ALIGN 8 + PUBLIC C(ffi_call_win64) + + ; SEH(.safesh ffi_call_win64) +C(ffi_call_win64) proc SEH(frame) + cfi_startproc + /* Set up the local stack frame and install it in rbp/rsp. */ + mov RAX, [RSP] ; movq (%rsp), %rax + mov [arg1], RBP ; movq %rbp, (arg1) + mov [arg1 + 8], RAX; movq %rax, 8(arg1) + mov RBP, arg1; movq arg1, %rbp + cfi_def_cfa(rbp, 16) + cfi_rel_offset(rbp, 0) + SEH(.pushreg rbp) + SEH(.setframe rbp, 0) + SEH(.endprolog) + mov RSP, arg0 ; movq arg0, %rsp + + mov R10, arg2 ; movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + mov RCX, [RSP] ; movq (%rsp), %rcx + movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0 + mov RDX, [RSP + 8] ;movq 8(%rsp), %rdx + movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1 + mov R8, [RSP + 16] ; movq 16(%rsp), %r8 + movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2 + mov R9, [RSP + 24] ; movq 24(%rsp), %r9 + movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3 + + CALL qword ptr [RBP + 16] ; call *16(%rbp) + + mov ECX, [RBP + 24] ; movl 24(%rbp), %ecx + mov R8, [RBP + 32] ; movq 32(%rbp), %r8 + LEA R10, ffi_call_win64_tab ; leaq 0f(%rip), %r10 + CMP ECX, FFI_TYPE_SMALL_STRUCT_4B ; cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + LEA R10, [R10 + RCX*8] ; leaq (%r10, %rcx, 8), %r10 + JA L99 ; ja 99f + JMP R10 ; jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +epilogue macro + LEAVE + cfi_remember_state + cfi_def_cfa(rsp, 8) + cfi_restore(rbp) + RET + cfi_restore_state +endm + + ALIGN 8 +ffi_call_win64_tab LABEL NEAR +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movsxd rax, eax ; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss dword ptr [r8], xmm0 ; movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzx eax, al ;movzbl %al, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsx rax, al ; movsbq %al, %rax + jmp L98 +E(0b, FFI_TYPE_UINT16) + movzx eax, ax ; movzwl %ax, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movsx rax, ax; movswq %ax, %rax + jmp L98 +E(0b, FFI_TYPE_UINT32) + mov eax, eax; movl %eax, %eax + mov qword ptr[r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movsxd rax, eax; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +L98 LABEL near + mov qword ptr [r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + mov qword ptr [r8], rax;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + mov qword ptr [r8], rax ;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + mov byte ptr [r8], al ; movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + mov word ptr [r8], ax ; movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + mov dword ptr [r8], eax ; movl %eax, (%r8) + epilogue + + align 8 +L99 LABEL near + call PLT(C(abort)) + + epilogue + + cfi_endproc + C(ffi_call_win64) endp + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + align 8 + PUBLIC C(ffi_go_closure_win64) + +C(ffi_go_closure_win64) proc + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9 ;movq %r9, 32(%rsp) + + mov rcx, qword ptr [r10 + 8]; movq 8(%r10), %rcx /* load cif */ + mov rdx, qword ptr [r10 + 16]; movq 16(%r10), %rdx /* load fun */ + mov r8, r10 ; movq %r10, %r8 /* closure is user_data */ + jmp ffi_closure_win64_2 + cfi_endproc + C(ffi_go_closure_win64) endp + + align 8 + +PUBLIC C(ffi_closure_win64) +C(ffi_closure_win64) PROC FRAME + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9; movq %r9, 32(%rsp) + + mov rcx, qword ptr [FFI_TRAMPOLINE_SIZE + r10] ;movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + mov rdx, qword ptr [FFI_TRAMPOLINE_SIZE + 8 + r10] ; movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + mov r8, qword ptr [FFI_TRAMPOLINE_SIZE+16+r10] ;movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +ffi_closure_win64_2 LABEL near + sub rsp, ffi_clo_FS ;subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.allocstack ffi_clo_FS) + SEH(.endprolog) + + /* Save all sse arguments into the stack frame. */ + movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + lea r9, [ffi_clo_OFF_R + rsp] ; leaq ffi_clo_OFF_R(%rsp), %r9 + call C(ffi_closure_win64_inner) + + /* Load the result into both possible result registers. */ + + mov rax, qword ptr [ffi_clo_OFF_R + rsp] ;movq ffi_clo_OFF_R(%rsp), %rax + movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0 + + add rsp, ffi_clo_FS ;addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + C(ffi_closure_win64) endp + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +_text ends +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c new file mode 100644 index 0000000..9a0575f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffi.c @@ -0,0 +1,298 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +/* + |----------------------------------------| + | | + on entry to ffi_call ----> |----------------------------------------| + | caller stack frame for registers a0-a3 | + |----------------------------------------| + | | + | additional arguments | + entry of the function ---> |----------------------------------------| + | copy of function arguments a2-a7 | + | - - - - - - - - - - - - - | + | | + + The area below the entry line becomes the new stack frame for the function. + +*/ + + +#define FFI_TYPE_STRUCT_REGS FFI_TYPE_LAST + + +extern void ffi_call_SYSV(void *rvalue, unsigned rsize, unsigned flags, + void(*fn)(void), unsigned nbytes, extended_cif*); +extern void ffi_closure_SYSV(void) FFI_HIDDEN; + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + switch(cif->rtype->type) { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = cif->rtype->type; + break; + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + cif->flags = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; // cif->rtype->type; + break; + case FFI_TYPE_STRUCT: + cif->flags = FFI_TYPE_STRUCT; //_REGS; + /* Up to 16 bytes are returned in registers */ + if (cif->rtype->size > 4 * 4) { + /* returned structure is referenced by a register; use 8 bytes + (including 4 bytes for potential additional alignment) */ + cif->flags = FFI_TYPE_STRUCT; + cif->bytes += 8; + } + break; + + default: + cif->flags = FFI_TYPE_UINT32; + break; + } + + /* Round the stack up to a full 4 register frame, just in case + (we use this size in movsp). This way, it's also a multiple of + 8 bytes for 64-bit arguments. */ + cif->bytes = FFI_ALIGN(cif->bytes, 16); + + return FFI_OK; +} + +void ffi_prep_args(extended_cif *ecif, unsigned char* stack) +{ + unsigned int i; + unsigned long *addr; + ffi_type **ptr; + + union { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **i; + long long **ll; + float **f; + double **d; + } p_argv; + + /* Verify that everything is aligned up properly */ + FFI_ASSERT (((unsigned long) stack & 0x7) == 0); + + p_argv.v = ecif->avalue; + addr = (unsigned long*)stack; + + /* structures with a size greater than 16 bytes are passed in memory */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 16) + { + *addr++ = (unsigned long)ecif->rvalue; + } + + for (i = ecif->cif->nargs, ptr = ecif->cif->arg_types; + i > 0; + i--, ptr++, p_argv.v++) + { + switch ((*ptr)->type) + { + case FFI_TYPE_SINT8: + *addr++ = **p_argv.sc; + break; + case FFI_TYPE_UINT8: + *addr++ = **p_argv.uc; + break; + case FFI_TYPE_SINT16: + *addr++ = **p_argv.ss; + break; + case FFI_TYPE_UINT16: + *addr++ = **p_argv.us; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *addr++ = **p_argv.i; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (((unsigned long)addr & 4) != 0) + addr++; + *(unsigned long long*)addr = **p_argv.ll; + addr += sizeof(unsigned long long) / sizeof (addr); + break; + + case FFI_TYPE_STRUCT: + { + unsigned long offs; + unsigned long size; + + if (((unsigned long)addr & 4) != 0 && (*ptr)->alignment > 4) + addr++; + + offs = (unsigned long) addr - (unsigned long) stack; + size = (*ptr)->size; + + /* Entire structure must fit the argument registers or referenced */ + if (offs < FFI_REGISTER_NARGS * 4 + && offs + size > FFI_REGISTER_NARGS * 4) + addr = (unsigned long*) (stack + FFI_REGISTER_NARGS * 4); + + memcpy((char*) addr, *p_argv.c, size); + addr += (size + 3) / 4; + break; + } + + default: + FFI_ASSERT(0); + } + } +} + + +void ffi_call(ffi_cif* cif, void(*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + unsigned long rsize = cif->rtype->size; + int flags = cif->flags; + void *alloc = NULL; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Note that for structures that are returned in registers (size <= 16 bytes) + we allocate a temporary buffer and use memcpy to copy it to the final + destination. The reason is that the target address might be misaligned or + the length not a multiple of 4 bytes. Handling all those cases would be + very complex. */ + + if (flags == FFI_TYPE_STRUCT && (rsize <= 16 || rvalue == NULL)) + { + alloc = alloca(FFI_ALIGN(rsize, 4)); + ecif.rvalue = alloc; + } + else + { + ecif.rvalue = rvalue; + } + + if (cif->abi != FFI_SYSV) + FFI_ASSERT(0); + + ffi_call_SYSV (ecif.rvalue, rsize, cif->flags, fn, cif->bytes, &ecif); + + if (alloc != NULL && rvalue != NULL) + memcpy(rvalue, alloc, rsize); +} + +extern void ffi_trampoline(); +extern void ffi_cacheflush(void* start, void* end); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + /* copye trampoline to stack and patch 'ffi_closure_SYSV' pointer */ + memcpy(closure->tramp, ffi_trampoline, FFI_TRAMPOLINE_SIZE); + *(unsigned int*)(&closure->tramp[8]) = (unsigned int)ffi_closure_SYSV; + + // Do we have this function? + // __builtin___clear_cache(closer->tramp, closer->tramp + FFI_TRAMPOLINE_SIZE) + ffi_cacheflush(closure->tramp, closure->tramp + FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + return FFI_OK; +} + + +long FFI_HIDDEN +ffi_closure_SYSV_inner(ffi_closure *closure, void **values, void *rvalue) +{ + ffi_cif *cif; + ffi_type **arg_types; + void **avalue; + int i, areg; + + cif = closure->cif; + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + areg = 0; + + int rtype = cif->rtype->type; + if (rtype == FFI_TYPE_STRUCT && cif->rtype->size > 4 * 4) + { + rvalue = *values; + areg++; + } + + cif = closure->cif; + arg_types = cif->arg_types; + avalue = alloca(cif->nargs * sizeof(void *)); + + for (i = 0; i < cif->nargs; i++) + { + if (arg_types[i]->alignment == 8 && (areg & 1) != 0) + areg++; + + // skip the entry 16,a1 framework, add 16 bytes (4 registers) + if (areg == FFI_REGISTER_NARGS) + areg += 4; + + if (arg_types[i]->type == FFI_TYPE_STRUCT) + { + int numregs = ((arg_types[i]->size + 3) & ~3) / 4; + if (areg < FFI_REGISTER_NARGS && areg + numregs > FFI_REGISTER_NARGS) + areg = FFI_REGISTER_NARGS + 4; + } + + avalue[i] = &values[areg]; + areg += (arg_types[i]->size + 3) / 4; + } + + (closure->fun)(cif, rvalue, avalue, closure->user_data); + + return rtype; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h new file mode 100644 index 0000000..0ba728b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Tensilica, Inc. + Target configuration macros for XTENSA. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_REGISTER_NARGS 6 + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 24 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S new file mode 100644 index 0000000..e942179 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/src/xtensa/sysv.S @@ -0,0 +1,258 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#define ENTRY(name) .text; .globl name; .type name,@function; .align 4; name: +#define END(name) .size name , . - name + +/* Assert that the table below is in sync with ffi.h. */ + +#if FFI_TYPE_UINT8 != 5 \ + || FFI_TYPE_SINT8 != 6 \ + || FFI_TYPE_UINT16 != 7 \ + || FFI_TYPE_SINT16 != 8 \ + || FFI_TYPE_UINT32 != 9 \ + || FFI_TYPE_SINT32 != 10 \ + || FFI_TYPE_UINT64 != 11 +#error "xtensa/sysv.S out of sync with ffi.h" +#endif + + +/* ffi_call_SYSV (rvalue, rbytes, flags, (*fnaddr)(), bytes, ecif) + void *rvalue; a2 + unsigned long rbytes; a3 + unsigned flags; a4 + void (*fnaddr)(); a5 + unsigned long bytes; a6 + extended_cif* ecif) a7 +*/ + +ENTRY(ffi_call_SYSV) + + entry a1, 32 # 32 byte frame for using call8 below + + mov a10, a7 # a10(->arg0): ecif + sub a11, a1, a6 # a11(->arg1): stack pointer + mov a7, a1 # fp + movsp a1, a11 # set new sp = old_sp - bytes + + movi a8, ffi_prep_args + callx8 a8 # ffi_prep_args(ecif, stack) + + # prepare to move stack pointer back up to 6 arguments + # note that 'bytes' is already aligned + + movi a10, 6*4 + sub a11, a6, a10 + movgez a6, a10, a11 + add a6, a1, a6 + + + # we can pass up to 6 arguments in registers + # for simplicity, just load 6 arguments + # (the stack size is at least 32 bytes, so no risk to cross boundaries) + + l32i a10, a1, 0 + l32i a11, a1, 4 + l32i a12, a1, 8 + l32i a13, a1, 12 + l32i a14, a1, 16 + l32i a15, a1, 20 + + # move stack pointer + + movsp a1, a6 + + callx8 a5 # (*fn)(args...) + + # Handle return value(s) + + beqz a2, .Lexit + + movi a5, FFI_TYPE_STRUCT + bne a4, a5, .Lstore + movi a5, 16 + blt a5, a3, .Lexit + + s32i a10, a2, 0 + blti a3, 5, .Lexit + addi a3, a3, -1 + s32i a11, a2, 4 + blti a3, 8, .Lexit + s32i a12, a2, 8 + blti a3, 12, .Lexit + s32i a13, a2, 12 + +.Lexit: retw + +.Lstore: + addi a4, a4, -FFI_TYPE_UINT8 + bgei a4, 7, .Lexit # should never happen + movi a6, store_calls + add a4, a4, a4 + addx4 a6, a4, a6 # store_table + idx * 8 + jx a6 + + .align 8 +store_calls: + # UINT8 + s8i a10, a2, 0 + retw + + # SINT8 + .align 8 + s8i a10, a2, 0 + retw + + # UINT16 + .align 8 + s16i a10, a2, 0 + retw + + # SINT16 + .align 8 + s16i a10, a2, 0 + retw + + # UINT32 + .align 8 + s32i a10, a2, 0 + retw + + # SINT32 + .align 8 + s32i a10, a2, 0 + retw + + # UINT64 + .align 8 + s32i a10, a2, 0 + s32i a11, a2, 4 + retw + +END(ffi_call_SYSV) + + +/* + * void ffi_cacheflush (unsigned long start, unsigned long end) + */ + +#define EXTRA_ARGS_SIZE 24 + +ENTRY(ffi_cacheflush) + + entry a1, 16 + +1: +#if XCHAL_DCACHE_SIZE + dhwbi a2, 0 +#endif +#if XCHAL_ICACHE_SIZE + ihi a2, 0 +#endif + addi a2, a2, 4 + blt a2, a3, 1b + + retw + +END(ffi_cacheflush) + +/* ffi_trampoline is copied to the stack */ + +ENTRY(ffi_trampoline) + + entry a1, 16 + (FFI_REGISTER_NARGS * 4) + (4 * 4) # [ 0] + j 2f # [ 3] + .align 4 # [ 6] +1: .long 0 # [ 8] +2: l32r a15, 1b # [12] + _mov a14, a0 # [15] + callx0 a15 # [18] + # [21] +END(ffi_trampoline) + +/* + * ffi_closure() + * + * a0: closure + 21 + * a14: return address (a0) + */ + +ENTRY(ffi_closure_SYSV) + + /* intentionally omitting entry here */ + + # restore return address (a0) and move pointer to closure to a10 + addi a10, a0, -21 + mov a0, a14 + + # allow up to 4 arguments as return values + addi a11, a1, 4 * 4 + + # save up to 6 arguments to stack (allocated by entry below) + s32i a2, a11, 0 + s32i a3, a11, 4 + s32i a4, a11, 8 + s32i a5, a11, 12 + s32i a6, a11, 16 + s32i a7, a11, 20 + + movi a8, ffi_closure_SYSV_inner + mov a12, a1 + callx8 a8 # .._inner(*closure, **avalue, *rvalue) + + # load up to four return arguments + l32i a2, a1, 0 + l32i a3, a1, 4 + l32i a4, a1, 8 + l32i a5, a1, 12 + + # (sign-)extend return value + movi a11, FFI_TYPE_UINT8 + bne a10, a11, 1f + extui a2, a2, 0, 8 + retw + +1: movi a11, FFI_TYPE_SINT8 + bne a10, a11, 1f + sext a2, a2, 7 + retw + +1: movi a11, FFI_TYPE_UINT16 + bne a10, a11, 1f + extui a2, a2, 0, 16 + retw + +1: movi a11, FFI_TYPE_SINT16 + bne a10, a11, 1f + sext a2, a2, 15 + +1: retw + +END(ffi_closure_SYSV) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in new file mode 100644 index 0000000..9788f70 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/stamp-h.in @@ -0,0 +1 @@ +timestamp diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am new file mode 100644 index 0000000..bcfea57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/Makefile.am @@ -0,0 +1,122 @@ +## Process this file with automake to produce Makefile.in. + +AUTOMAKE_OPTIONS = foreign dejagnu + +EXTRA_DEJAGNU_SITE_CONFIG=../local.exp + +CLEANFILES = *.exe core* *.log *.sum + +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp new file mode 100644 index 0000000..90967cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/config/default.exp @@ -0,0 +1 @@ +load_lib "standard.exp" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp new file mode 100644 index 0000000..c02adf9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/libffi.exp @@ -0,0 +1,640 @@ +# Copyright (C) 2003, 2005, 2008, 2009, 2010, 2011, 2014, 2019 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +proc load_gcc_lib { filename } { + global srcdir + load_file $srcdir/lib/$filename +} + +load_lib dg.exp +load_lib libgloss.exp +load_gcc_lib target-libpath.exp +load_gcc_lib wrapper.exp + +# Return 1 if the target matches the effective target 'arg', 0 otherwise. +# This can be used with any check_* proc that takes no argument and +# returns only 1 or 0. It could be used with check_* procs that take +# arguments with keywords that pass particular arguments. + +proc is-effective-target { arg } { + global et_index + set selected 0 + if { ![info exists et_index] } { + # Initialize the effective target index that is used in some + # check_effective_target_* procs. + set et_index 0 + } + if { [info procs check_effective_target_${arg}] != [list] } { + set selected [check_effective_target_${arg}] + } else { + error "unknown effective target keyword `$arg'" + } + verbose "is-effective-target: $arg $selected" 2 + return $selected +} + +proc is-effective-target-keyword { arg } { + if { [info procs check_effective_target_${arg}] != [list] } { + return 1 + } else { + return 0 + } +} + +# Intercept the call to the DejaGnu version of dg-process-target to +# support use of an effective-target keyword in place of a list of +# target triplets to xfail or skip a test. +# +# The argument to dg-process-target is the keyword "target" or "xfail" +# followed by a selector: +# target-triplet-1 ... +# effective-target-keyword +# selector-expression +# +# For a target list the result is "S" if the target is selected, "N" otherwise. +# For an xfail list the result is "F" if the target is affected, "P" otherwise. + +# In contexts that allow either "target" or "xfail" the argument can be +# target selector1 xfail selector2 +# which returns "N" if selector1 is not selected, otherwise the result of +# "xfail selector2". +# +# A selector expression appears within curly braces and uses a single logical +# operator: !, &&, or ||. An operand is another selector expression, an +# effective-target keyword, or a list of target triplets within quotes or +# curly braces. + +if { [info procs saved-dg-process-target] == [list] } { + rename dg-process-target saved-dg-process-target + + # Evaluate an operand within a selector expression. + proc selector_opd { op } { + set selector "target" + lappend selector $op + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_opd: `$op' $answer" 2 + return $answer + } + + # Evaluate a target triplet list within a selector expression. + # Unlike other operands, this needs to be expanded from a list to + # the same string as "target". + proc selector_list { op } { + set selector "target [join $op]" + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_list: `$op' $answer" 2 + return $answer + } + + # Evaluate a selector expression. + proc selector_expression { exp } { + if { [llength $exp] == 2 } { + if [string match "!" [lindex $exp 0]] { + set op1 [lindex $exp 1] + set answer [expr { ! [selector_opd $op1] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } elseif { [llength $exp] == 3 } { + set op1 [lindex $exp 0] + set opr [lindex $exp 1] + set op2 [lindex $exp 2] + if [string match "&&" $opr] { + set answer [expr { [selector_opd $op1] && [selector_opd $op2] }] + } elseif [string match "||" $opr] { + set answer [expr { [selector_opd $op1] || [selector_opd $op2] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + + verbose "selector_expression: `$exp' $answer" 2 + return $answer + } + + # Evaluate "target selector" or "xfail selector". + + proc dg-process-target-1 { args } { + verbose "dg-process-target-1: `$args'" 2 + + # Extract the 'what' keyword from the argument list. + set selector [string trim [lindex $args 0]] + if [regexp "^xfail " $selector] { + set what "xfail" + } elseif [regexp "^target " $selector] { + set what "target" + } else { + error "syntax error in target selector \"$selector\"" + } + + # Extract the rest of the list, which might be a keyword. + regsub "^${what}" $selector "" rest + set rest [string trim $rest] + + if [is-effective-target-keyword $rest] { + # The selector is an effective target keyword. + if [is-effective-target $rest] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + if [string match "{*}" $rest] { + if [selector_expression [lindex $rest 0]] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + # The selector is not an effective-target keyword, so process + # the list of target triplets. + return [saved-dg-process-target $selector] + } + + # Intercept calls to the DejaGnu function. In addition to + # processing "target selector" or "xfail selector", handle + # "target selector1 xfail selector2". + + proc dg-process-target { args } { + verbose "replacement dg-process-target: `$args'" 2 + + set selector [string trim [lindex $args 0]] + + # If the argument list contains both 'target' and 'xfail', + # process 'target' and, if that succeeds, process 'xfail'. + if [regexp "^target .* xfail .*" $selector] { + set xfail_index [string first "xfail" $selector] + set xfail_selector [string range $selector $xfail_index end] + set target_selector [string range $selector 0 [expr $xfail_index-1]] + set target_selector [string trim $target_selector] + if { [dg-process-target-1 $target_selector] == "N" } { + return "N" + } + return [dg-process-target-1 $xfail_selector] + + } + return [dg-process-target-1 $selector] + } +} + +# Define libffi callbacks for dg.exp. + +proc libffi-dg-test-1 { target_compile prog do_what extra_tool_flags } { + + # To get all \n in dg-output test strings to match printf output + # in a system that outputs it as \015\012 (i.e. not just \012), we + # need to change all \n into \r?\n. As there is no dejagnu flag + # or hook to do that, we simply change the text being tested. + # Unfortunately, we have to know that the variable is called + # dg-output-text and lives in the caller of libffi-dg-test, which + # is two calls up. Overriding proc dg-output would be longer and + # would necessarily have the same assumption. + upvar 2 dg-output-text output_match + + if { [llength $output_match] > 1 } { + regsub -all "\n" [lindex $output_match 1] "\r?\n" x + set output_match [lreplace $output_match 1 1 $x] + } + + # Set up the compiler flags, based on what we're going to do. + + set options [list] + switch $do_what { + "compile" { + set compile_type "assembly" + set output_file "[file rootname [file tail $prog]].s" + } + "link" { + set compile_type "executable" + set output_file "[file rootname [file tail $prog]].exe" + # The following line is needed for targets like the i960 where + # the default output file is b.out. Sigh. + } + "run" { + set compile_type "executable" + # FIXME: "./" is to cope with "." not being in $PATH. + # Should this be handled elsewhere? + # YES. + set output_file "./[file rootname [file tail $prog]].exe" + # This is the only place where we care if an executable was + # created or not. If it was, dg.exp will try to run it. + remote_file build delete $output_file; + } + default { + perror "$do_what: not a valid dg-do keyword" + return "" + } + } + + if { $extra_tool_flags != "" } { + lappend options "additional_flags=$extra_tool_flags" + } + + set comp_output [libffi_target_compile "$prog" "$output_file" "$compile_type" $options]; + + + return [list $comp_output $output_file] +} + + +proc libffi-dg-test { prog do_what extra_tool_flags } { + return [libffi-dg-test-1 target_compile $prog $do_what $extra_tool_flags] +} + +proc libffi-dg-prune { target_triplet text } { + # We get this with some qemu emulated systems (eg. ppc64le-linux-gnu) + regsub -all "(^|\n)\[^\n\]*unable to perform all requested operations" $text "" text + return $text +} + +proc libffi-init { args } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global objdir + global TOOL_OPTIONS + global tool + global libffi_include + global libffi_link_flags + global tool_root_dir + global ld_library_path + global compiler_vendor + + if ![info exists blddirffi] { + set blddirffi [pwd]/.. + } + + verbose "libffi $blddirffi" + + # Which compiler are we building with? + set tmp [grep "$blddirffi/config.log" "^ax_cv_c_compiler_vendor.*$"] + regexp -- {^[^=]*=(.*)$} $tmp nil compiler_vendor + + if { [string match $compiler_vendor "gnu"] } { + set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a] + if {$gccdir != ""} { + set gccdir [file dirname $gccdir] + } + verbose "gccdir $gccdir" + + set ld_library_path "." + append ld_library_path ":${gccdir}" + + set compiler "${gccdir}/xgcc" + if { [is_remote host] == 0 && [which $compiler] != 0 } { + foreach i "[exec $compiler --print-multi-lib]" { + set mldir "" + regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir + set mldir [string trimright $mldir "\;@"] + if { "$mldir" == "." } { + continue + } + if { [llength [glob -nocomplain ${gccdir}/${mldir}/libgcc_s*.so.*]] >= 1 } { + append ld_library_path ":${gccdir}/${mldir}" + } + } + } + } + + # add the library path for libffi. + append ld_library_path ":${blddirffi}/.libs" + + verbose "ld_library_path: $ld_library_path" + + # Point to the Libffi headers in libffi. + set libffi_include "${blddirffi}/include" + verbose "libffi_include $libffi_include" + + set libffi_dir "${blddirffi}/.libs" + verbose "libffi_dir $libffi_dir" + if { $libffi_dir != "" } { + set libffi_dir [file dirname ${libffi_dir}] + set libffi_link_flags "-L${libffi_dir}/.libs" + } + + set_ld_library_path_env_vars + libffi_maybe_build_wrapper "${objdir}/testglue.o" +} + +proc libffi_exit { } { + global gluefile; + + if [info exists gluefile] { + file_on_build delete $gluefile; + unset gluefile; + } +} + +proc libffi_target_compile { source dest type options } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global TOOL_OPTIONS + global libffi_link_flags + global libffi_include + global target_triplet + global compiler_vendor + + if { [target_info needs_status_wrapper]!="" && [info exists gluefile] } { + lappend options "libs=${gluefile}" + lappend options "ldflags=$wrap_flags" + } + + # TOOL_OPTIONS must come first, so that it doesn't override testcase + # specific options. + if [info exists TOOL_OPTIONS] { + lappend options "additional_flags=$TOOL_OPTIONS" + } + + # search for ffi_mips.h in srcdir, too + lappend options "additional_flags=-I${libffi_include} -I${srcdir}/../include -I${libffi_include}/.." + lappend options "additional_flags=${libffi_link_flags}" + + # Darwin needs a stack execution allowed flag. + + if { [istarget "*-*-darwin9*"] || [istarget "*-*-darwin1*"] + || [istarget "*-*-darwin2*"] } { + lappend options "additional_flags=-Wl,-allow_stack_execute" + } + + # If you're building the compiler with --prefix set to a place + # where it's not yet installed, then the linker won't be able to + # find the libgcc used by libffi.dylib. We could pass the + # -dylib_file option, but that's complicated, and it's much easier + # to just make the linker find libgcc using -L options. + if { [string match "*-*-darwin*" $target_triplet] } { + lappend options "libs= -shared-libgcc" + } + + if { [string match "*-*-openbsd*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + lappend options "libs= -lffi" + + if { [string match "aarch64*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + # this may be required for g++, but just confused clang. + if { [string match "*.cc" $source] } { + lappend options "c++" + } + + if { [string match "arc*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + verbose "options: $options" + return [target_compile $source $dest $type $options] +} + +# TEST should be a preprocessor condition. Returns true if it holds. +proc libffi_feature_test { test } { + set src "ffitest[pid].c" + + set f [open $src "w"] + puts $f "#include " + puts $f $test + puts $f "/* OK */" + puts $f "#else" + puts $f "# error Failed $test" + puts $f "#endif" + close $f + + set lines [libffi_target_compile $src /dev/null assembly ""] + file delete $src + + return [string match "" $lines] +} + +# Utility routines. + +# +# search_for -- looks for a string match in a file +# +proc search_for { file pattern } { + set fd [open $file r] + while { [gets $fd cur_line]>=0 } { + if [string match "*$pattern*" $cur_line] then { + close $fd + return 1 + } + } + close $fd + return 0 +} + +# Modified dg-runtest that can cycle through a list of optimization options +# as c-torture does. +proc libffi-dg-runtest { testcases default-extra-flags } { + global runtests + + foreach test $testcases { + # If we're only testing specific files and this isn't one of + # them, skip it. + if ![runtest_file_p $runtests $test] { + continue + } + + # Look for a loop within the source code - if we don't find one, + # don't pass -funroll[-all]-loops. + global torture_with_loops torture_without_loops + if [expr [search_for $test "for*("]+[search_for $test "while*("]] { + set option_list $torture_with_loops + } else { + set option_list $torture_without_loops + } + + set nshort [file tail [file dirname $test]]/[file tail $test] + + foreach flags $option_list { + verbose "Testing $nshort, $flags" 1 + dg-test $test $flags ${default-extra-flags} + } + } +} + +proc run-many-tests { testcases extra_flags } { + global compiler_vendor + global env + switch $compiler_vendor { + "clang" { + set common "-W -Wall" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + "gnu" { + set common "-W -Wall -Wno-psabi" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + default { + # Assume we are using the vendor compiler. + set common "" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "" } + } + } + } + + info exists env(LD_LIBRARY_PATH) + + set targetabis { "" } + if [string match $compiler_vendor "gnu"] { + if [libffi_feature_test "#ifdef __i386__"] { + set targetabis { + "" + "-DABI_NUM=FFI_STDCALL -DABI_ATTR=__STDCALL__" + "-DABI_NUM=FFI_THISCALL -DABI_ATTR=__THISCALL__" + "-DABI_NUM=FFI_FASTCALL -DABI_ATTR=__FASTCALL__" + } + } elseif { [istarget "x86_64-*-*"] \ + && [libffi_feature_test "#if !defined __ILP32__ \ + && !defined __i386__"] } { + set targetabis { + "" + "-DABI_NUM=FFI_GNUW64 -DABI_ATTR=__MSABI__" + } + } + } + + set common [ concat $common $extra_flags ] + foreach test $testcases { + set testname [file tail $test] + if [search_for $test "ABI_NUM"] { + set abis $targetabis + } else { + set abis { "" } + } + foreach opt $optimizations { + foreach abi $abis { + set options [concat $common $opt $abi] + verbose "Testing $testname, $options" 1 + dg-test $test $options "" + } + } + } +} + +# Like check_conditional_xfail, but callable from a dg test. + +proc dg-xfail-if { args } { + set args [lreplace $args 0 0] + set selector "target [join [lindex $args 1]]" + if { [dg-process-target $selector] == "S" } { + global compiler_conditional_xfail_data + set compiler_conditional_xfail_data $args + } +} + +proc check-flags { args } { + + # The args are within another list; pull them out. + set args [lindex $args 0] + + # The next two arguments are optional. If they were not specified, + # use the defaults. + if { [llength $args] == 2 } { + lappend $args [list "*"] + } + if { [llength $args] == 3 } { + lappend $args [list ""] + } + + # If the option strings are the defaults, or the same as the + # defaults, there is no need to call check_conditional_xfail to + # compare them to the actual options. + if { [string compare [lindex $args 2] "*"] == 0 + && [string compare [lindex $args 3] "" ] == 0 } { + set result 1 + } else { + # The target list might be an effective-target keyword, so replace + # the original list with "*-*-*", since we already know it matches. + set result [check_conditional_xfail [lreplace $args 1 1 "*-*-*"]] + } + + return $result +} + +proc dg-skip-if { args } { + # Verify the number of arguments. The last two are optional. + set args [lreplace $args 0 0] + if { [llength $args] < 2 || [llength $args] > 4 } { + error "dg-skip-if 2: need 2, 3, or 4 arguments" + } + + # Don't bother if we're already skipping the test. + upvar dg-do-what dg-do-what + if { [lindex ${dg-do-what} 1] == "N" } { + return + } + + set selector [list target [lindex $args 1]] + if { [dg-process-target $selector] == "S" } { + if [check-flags $args] { + upvar dg-do-what dg-do-what + set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"] + } + } +} + +# We need to make sure that additional_files and additional_sources +# are both cleared out after every test. It is not enough to clear +# them out *before* the next test run because gcc-target-compile gets +# run directly from some .exp files (outside of any test). (Those +# uses should eventually be eliminated.) + +# Because the DG framework doesn't provide a hook that is run at the +# end of a test, we must replace dg-test with a wrapper. + +if { [info procs saved-dg-test] == [list] } { + rename dg-test saved-dg-test + + proc dg-test { args } { + global additional_files + global additional_sources + global errorInfo + + if { [ catch { eval saved-dg-test $args } errmsg ] } { + set saved_info $errorInfo + set additional_files "" + set additional_sources "" + error $errmsg $saved_info + } + set additional_files "" + set additional_sources "" + } +} + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp new file mode 100644 index 0000000..6b7beba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp @@ -0,0 +1,283 @@ +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file was contributed by John David Anglin (dave.anglin@nrc-cnrc.gc.ca) + +set orig_environment_saved 0 +set orig_ld_library_path_saved 0 +set orig_ld_run_path_saved 0 +set orig_shlib_path_saved 0 +set orig_ld_libraryn32_path_saved 0 +set orig_ld_library64_path_saved 0 +set orig_ld_library_path_32_saved 0 +set orig_ld_library_path_64_saved 0 +set orig_dyld_library_path_saved 0 +set orig_path_saved 0 + +####################################### +# proc set_ld_library_path_env_vars { } +####################################### + +proc set_ld_library_path_env_vars { } { + global ld_library_path + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + global GCC_EXEC_PREFIX + + # Set the relocated compiler prefix, but only if the user hasn't specified one. + if { [info exists GCC_EXEC_PREFIX] && ![info exists env(GCC_EXEC_PREFIX)] } { + setenv GCC_EXEC_PREFIX "$GCC_EXEC_PREFIX" + } + + # Setting the ld library path causes trouble when testing cross-compilers. + if { [is_remote target] } { + return + } + + if { $orig_environment_saved == 0 } { + global env + + set orig_environment_saved 1 + + # Save the original environment. + if [info exists env(LD_LIBRARY_PATH)] { + set orig_ld_library_path "$env(LD_LIBRARY_PATH)" + set orig_ld_library_path_saved 1 + } + if [info exists env(LD_RUN_PATH)] { + set orig_ld_run_path "$env(LD_RUN_PATH)" + set orig_ld_run_path_saved 1 + } + if [info exists env(SHLIB_PATH)] { + set orig_shlib_path "$env(SHLIB_PATH)" + set orig_shlib_path_saved 1 + } + if [info exists env(LD_LIBRARYN32_PATH)] { + set orig_ld_libraryn32_path "$env(LD_LIBRARYN32_PATH)" + set orig_ld_libraryn32_path_saved 1 + } + if [info exists env(LD_LIBRARY64_PATH)] { + set orig_ld_library64_path "$env(LD_LIBRARY64_PATH)" + set orig_ld_library64_path_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_32)] { + set orig_ld_library_path_32 "$env(LD_LIBRARY_PATH_32)" + set orig_ld_library_path_32_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_64)] { + set orig_ld_library_path_64 "$env(LD_LIBRARY_PATH_64)" + set orig_ld_library_path_64_saved 1 + } + if [info exists env(DYLD_LIBRARY_PATH)] { + set orig_dyld_library_path "$env(DYLD_LIBRARY_PATH)" + set orig_dyld_library_path_saved 1 + } + if [info exists env(PATH)] { + set orig_path "$env(PATH)" + set orig_path_saved 1 + } + } + + # We need to set ld library path in the environment. Currently, + # unix.exp doesn't set the environment correctly for all systems. + # It only sets SHLIB_PATH and LD_LIBRARY_PATH when it executes a + # program. We also need the environment set for compilations, etc. + # + # On IRIX 6, we have to set variables akin to LD_LIBRARY_PATH, but + # called LD_LIBRARYN32_PATH (for the N32 ABI) and LD_LIBRARY64_PATH + # (for the 64-bit ABI). The same applies to Darwin (DYLD_LIBRARY_PATH), + # Solaris 32 bit (LD_LIBRARY_PATH_32), Solaris 64 bit (LD_LIBRARY_PATH_64), + # and HP-UX (SHLIB_PATH). In some cases, the variables are independent + # of LD_LIBRARY_PATH, and in other cases LD_LIBRARY_PATH is used if the + # variable is not defined. + # + # Doing this is somewhat of a hack as ld_library_path gets repeated in + # SHLIB_PATH and LD_LIBRARY_PATH when unix_load sets these variables. + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH "$ld_library_path" + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$ld_library_path:$orig_ld_run_path" + } else { + setenv LD_RUN_PATH "$ld_library_path" + } + # The default shared library dynamic path search for 64-bit + # HP-UX executables searches LD_LIBRARY_PATH before SHLIB_PATH. + # LD_LIBRARY_PATH isn't used for 32-bit executables. Thus, we + # set LD_LIBRARY_PATH and SHLIB_PATH as if they were independent. + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$ld_library_path:$orig_shlib_path" + } else { + setenv SHLIB_PATH "$ld_library_path" + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_libraryn32_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARYN32_PATH "$ld_library_path" + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library64_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY64_PATH "$ld_library_path" + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path_32" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_32 "$ld_library_path" + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path_64" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_64 "$ld_library_path" + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$ld_library_path:$orig_dyld_library_path" + } else { + setenv DYLD_LIBRARY_PATH "$ld_library_path" + } + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + if { $orig_path_saved } { + setenv PATH "$ld_library_path:$orig_path" + } else { + setenv PATH "$ld_library_path" + } + } + + verbose -log "set_ld_library_path_env_vars: ld_library_path=$ld_library_path" +} + +####################################### +# proc restore_ld_library_path_env_vars { } +####################################### + +proc restore_ld_library_path_env_vars { } { + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + + if { $orig_environment_saved == 0 } { + return + } + + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$orig_ld_library_path" + } elseif [info exists env(LD_LIBRARY_PATH)] { + unsetenv LD_LIBRARY_PATH + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$orig_ld_run_path" + } elseif [info exists env(LD_RUN_PATH)] { + unsetenv LD_RUN_PATH + } + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$orig_shlib_path" + } elseif [info exists env(SHLIB_PATH)] { + unsetenv SHLIB_PATH + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$orig_ld_libraryn32_path" + } elseif [info exists env(LD_LIBRARYN32_PATH)] { + unsetenv LD_LIBRARYN32_PATH + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$orig_ld_library64_path" + } elseif [info exists env(LD_LIBRARY64_PATH)] { + unsetenv LD_LIBRARY64_PATH + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$orig_ld_library_path_32" + } elseif [info exists env(LD_LIBRARY_PATH_32)] { + unsetenv LD_LIBRARY_PATH_32 + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$orig_ld_library_path_64" + } elseif [info exists env(LD_LIBRARY_PATH_64)] { + unsetenv LD_LIBRARY_PATH_64 + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$orig_dyld_library_path" + } elseif [info exists env(DYLD_LIBRARY_PATH)] { + unsetenv DYLD_LIBRARY_PATH + } + if { $orig_path_saved } { + setenv PATH "$orig_path" + } elseif [info exists env(PATH)] { + unsetenv PATH + } +} + +####################################### +# proc get_shlib_extension { } +####################################### + +proc get_shlib_extension { } { + global shlib_ext + + if { [ istarget *-*-darwin* ] } { + set shlib_ext "dylib" + } elseif { [ istarget *-*-cygwin* ] || [ istarget *-*-mingw* ] } { + set shlib_ext "dll" + } elseif { [ istarget hppa*-*-hpux* ] } { + set shlib_ext "sl" + } else { + set shlib_ext "so" + } + return $shlib_ext +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp new file mode 100644 index 0000000..4e5ae43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/lib/wrapper.exp @@ -0,0 +1,45 @@ +# Copyright (C) 2004, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file contains GCC-specifics for status wrappers for test programs. + +# ${tool}_maybe_build_wrapper -- Build wrapper object if the target +# needs it. FILENAME is the path to the wrapper file. If there are +# additional arguments, they are command-line options to provide to +# the compiler when compiling FILENAME. + +proc ${tool}_maybe_build_wrapper { filename args } { + global gluefile wrap_flags + + if { [target_info needs_status_wrapper] != "" \ + && [target_info needs_status_wrapper] != "0" \ + && ![info exists gluefile] } { + set saved_wrap_compile_flags [target_info wrap_compile_flags] + set flags [join $args " "] + # The wrapper code may contain code that gcc objects on. This + # became true for dejagnu-1.4.4. The set of warnings and code + # that gcc objects on may change, so just make sure -w is always + # passed to turn off all warnings. + set_currtarget_info wrap_compile_flags \ + "$saved_wrap_compile_flags -w $flags" + set result [build_wrapper $filename] + set_currtarget_info wrap_compile_flags "$saved_wrap_compile_flags" + if { $result != "" } { + set gluefile [lindex $result 0] + set wrap_flags [lindex $result 1] + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile new file mode 100644 index 0000000..3322de9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile @@ -0,0 +1,28 @@ +CC = gcc +CFLAGS = -O2 -Wall +prefix = +includedir = $(prefix)/include +libdir = $(prefix)/lib +CPPFLAGS = -I$(includedir) +LDFLAGS = -L$(libdir) -Wl,-rpath,$(libdir) + +all: check-call check-callback + +test-call: test-call.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-call test-call.c -lffi + +test-callback: test-callback.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-callback test-callback.c -lffi + +check-call: test-call + ./test-call > test-call.out + LC_ALL=C uniq -u < test-call.out > failed-call + test '!' -s failed-call + +check-callback: test-callback + ./test-callback > test-callback.out + LC_ALL=C uniq -u < test-callback.out > failed-callback + test '!' -s failed-callback + +clean: + rm -f test-call test-callback test-call.out test-callback.out failed-call failed-callback diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README new file mode 100644 index 0000000..be8540b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/README @@ -0,0 +1,78 @@ +This package contains a test suite for libffi. + +This test suite can be compiled with a C compiler. No need for 'expect' +or some other package that is often not installed. + +The test suite consists of 81 C functions, each with a different signature. +* test-call verifies that calling each function directly produces the same + results as calling the function indirectly through 'ffi_call'. +* test-callback verifies that calling each function directly produces the same + results as calling a function that is a callback (object build by + 'ffi_prep_closure_loc') and simulates the original function. + +Each direct or indirect invocation should produce one line of output to +stdout. A correct output consists of paired lines, such as + +void f(void): +void f(void): +int f(void):->99 +int f(void):->99 +int f(int):(1)->2 +int f(int):(1)->2 +int f(2*int):(1,2)->3 +int f(2*int):(1,2)->3 +... + +The Makefile then creates two files: +* failed-call, which consists of the non-paired lines of output of + 'test-call', +* failed-callback, which consists of the non-paired lines of output of + 'test-callback'. + +The test suite passes if both failed-call and failed-callback come out +as empty. + + +How to use the test suite +------------------------- + +1. Modify the Makefile's variables + prefix = the directory in which libffi was installed + CC = the C compiler, often with options such as "-m32" or "-m64" + that enforce a certain ABI, + CFLAGS = optimization options (need to change them only for non-GCC + compilers) +2. Run "make". If it fails already in "test-call", run also + "make check-callback". +3. If this failed, inspect the output files. + + +How to interpret the results +---------------------------- + +The failed-call and failed-callback files consist of paired lines: +The first line is the result of the direct invocation. +The second line is the result of invocation through libffi. + +For example, this output + +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->255 +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->0 + +indicates that the arguments were passed correctly, but the return +value came out wrong. + +And this output + +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,6,7,8,561,1105,1729,2465,2821,6601)->15319.1 +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,-140443648,10,268042216,-72537980,-140443648,-140443648,-140443648,-140443648,-140443648)->-6.47158e+08 + +indicates that integer arguments that come after 17 floating-point arguments +were not passed correctly. + + +Credits +------- + +The test suite is based on the one of GNU libffcall-2.0. +Authors: Bill Triggs, Bruno Haible diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h new file mode 100644 index 0000000..00604a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h @@ -0,0 +1,50 @@ +/* Determine alignment of types. + Copyright (C) 2003-2004, 2006, 2009-2017 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +#ifndef _ALIGNOF_H +#define _ALIGNOF_H + +#include + +/* alignof_slot (TYPE) + Determine the alignment of a structure slot (field) of a given type, + at compile time. Note that the result depends on the ABI. + This is the same as alignof (TYPE) and _Alignof (TYPE), defined in + if __alignof_is_defined is 1. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __cplusplus + template struct alignof_helper { char __slot1; type __slot2; }; +# define alignof_slot(type) offsetof (alignof_helper, __slot2) +#else +# define alignof_slot(type) offsetof (struct { char __slot1; type __slot2; }, __slot2) +#endif + +/* alignof_type (TYPE) + Determine the good alignment of an object of the given type at compile time. + Note that this is not necessarily the same as alignof_slot(type). + For example, with GNU C on x86 platforms: alignof_type(double) = 8, but + - when -malign-double is not specified: alignof_slot(double) = 4, + - when -malign-double is specified: alignof_slot(double) = 8. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __GNUC__ || defined __IBM__ALIGNOF__ +# define alignof_type __alignof__ +#else +# define alignof_type alignof_slot +#endif + +#endif /* _ALIGNOF_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp new file mode 100644 index 0000000..44aebc5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp @@ -0,0 +1,63 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2018 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir +global compiler_vendor + +# The conversion of this testsuite into a dejagnu compatible testsuite +# was done in a pretty lazy fashion, and requires the use of compiler +# flags to disable warnings for now. +if { [string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-unused-but-set-variable -Wno-uninitialized"; +} +if { [string match $compiler_vendor "microsoft"] } { + # -wd4996 suggest use of vsprintf_s instead of vsprintf + # -wd4116 unnamed type definition + # -wd4101 unreferenced local variable + # -wd4244 warning about implicit double to float conversion + set warning_options "-wd4996 -wd4116 -wd4101 -wd4244"; +} +if { ![string match $compiler_vendor "microsoft"] && ![string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-uninitialized"; +} + + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-call.c]] + +for {set i 1} {$i < 82} {incr i} { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-callback.c]] + +for {set i 1} {$i < 81} {incr i} { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] + } else { + foreach test $tlist { + unsupported [format "%s -DDGTEST=%d %s" $test $i $warning_options] + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c new file mode 100644 index 0000000..01a8a21 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c @@ -0,0 +1,1745 @@ +/** + Copyright 1993 Bill Triggs + Copyright 1995-2017 Bruno Haible + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +**/ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() +#define FFI_CALL(cif,fn,args,retaddr) \ + ffi_call(&(cif),(void(*)(void))(fn),retaddr,args) + +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +void + void_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 1 + v_v(); + clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + { + FFI_CALL(cif,v_v,NULL,NULL); + } + } +#endif + return; +} +void + int_tests (void) +{ + int ir; + ffi_arg retvalue; +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + { + FFI_CALL(cif,i_v,NULL,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1 }; + FFI_CALL(cif,i_i,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2 }; + FFI_CALL(cif,i_i2,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4 }; + FFI_CALL(cif,i_i4,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8 }; + FFI_CALL(cif,i_i8,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8, &i9, &i10, &i11, &i12, &i13, &i14, &i15, &i16 }; + FFI_CALL(cif,i_i16,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + + return; +} +void + float_tests (void) +{ + float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1 }; + FFI_CALL(cif,f_f,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2 }; + FFI_CALL(cif,f_f2,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4 }; + FFI_CALL(cif,f_f4,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8 }; + FFI_CALL(cif,f_f8,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16 }; + FFI_CALL(cif,f_f16,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &f18, &f19, &f20, &f21, &f22, &f23, &f24 }; + FFI_CALL(cif,f_f24,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +} +void + double_tests (void) +{ + double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1 }; + FFI_CALL(cif,d_d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2 }; + FFI_CALL(cif,d_d2,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4 }; + FFI_CALL(cif,d_d4,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8 }; + FFI_CALL(cif,d_d8,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16 }; + FFI_CALL(cif,d_d16,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + pointer_tests (void) +{ + void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + { + void* puc1 = &uc1; + void* pd2 = &d2; + void* pstr3 = str3; + void* pI4 = &I4; + /*const*/ void* args[] = { &puc1, &pd2, &pstr3, &pI4 }; + FFI_CALL(cif,vp_vpdpcpsp,args,&vpr); + } + } + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + return; +} +void + mixed_number_tests (void) +{ + uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + + /* Unsigned types. + */ +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1, us2, ui3, ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + { + ffi_arg r; + /*const*/ void* args[] = { &uc1, &us2, &ui3, &ul4 }; + FFI_CALL(cif,uc_ucsil,args,&r); + ucr = (uchar) r; + } + } + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + /* Mixed int & float types. + */ + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &d3, &d4 }; + FFI_CALL(cif,d_iidd,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &d4, &i5 }; + FFI_CALL(cif,d_iiidi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &d2, &i3, &d4 }; + FFI_CALL(cif,d_idid,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &f1, &d2, &i3 }; + FFI_CALL(cif,d_fdi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + { + ffi_arg rint; + /*const*/ void* args[] = { &c1, &d2, &c3, &d4 }; + FFI_CALL(cif,us_cdcd,args,&rint); + usr = (ushort) rint; + } + } + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + /* Long long types. + */ + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &ll1, &i13 }; + FFI_CALL(cif,ll_iiilli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &f13, &ll1, &i13 }; + FFI_CALL(cif,ll_flli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &i9 }; + FFI_CALL(cif,f_fi,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &i9 }; + FFI_CALL(cif,f_f2i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &i9 }; + FFI_CALL(cif,f_f3i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &i9 }; + FFI_CALL(cif,f_f4i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &i9 }; + FFI_CALL(cif,f_f7i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &i9 }; + FFI_CALL(cif,f_f8i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f12i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &i9 }; + FFI_CALL(cif,f_f12i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &i9 }; + FFI_CALL(cif,f_f13i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &i9 }; + FFI_CALL(cif,d_di,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &i9 }; + FFI_CALL(cif,d_d2i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &i9 }; + FFI_CALL(cif,d_d3i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &i9 }; + FFI_CALL(cif,d_d4i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &i9 }; + FFI_CALL(cif,d_d7i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &i9 }; + FFI_CALL(cif,d_d8i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &i9 }; + FFI_CALL(cif,d_d12i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 43 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &i9 }; + FFI_CALL(cif,d_d13i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + small_structure_return_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + { + FFI_CALL(cif,S1_v,NULL,&r); + } + } + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + { + FFI_CALL(cif,S2_v,NULL,&r); + } + } + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + { + FFI_CALL(cif,S3_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + { + FFI_CALL(cif,S4_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + { + FFI_CALL(cif,S7_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + { + FFI_CALL(cif,S8_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + { + FFI_CALL(cif,S12_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + { + FFI_CALL(cif,S15_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 52 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + { + FFI_CALL(cif,S16_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif +} +void + structure_tests (void) +{ + Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + { + /*const*/ void* args[] = { &I1, &I2, &I3 }; + FFI_CALL(cif,I_III,args,&Ir); + } + } + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 54 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + { + /*const*/ void* args[] = { &C1, &d2, &C3 }; + FFI_CALL(cif,C_CdC,args,&Cr); + } + } + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 55 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + { + /*const*/ void* args[] = { &F1, &f2, &d3 }; + FFI_CALL(cif,F_Ffd,args,&Fr); + } + } + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &f1, &D2, &d3 }; + FFI_CALL(cif,D_fDd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 57 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &D1, &f2, &d3 }; + FFI_CALL(cif,D_Dfd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 58 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + { + /*const*/ void* args[] = { &J1, &i2, &J2 }; + FFI_CALL(cif,J_JiJ,args,&Jr); + } + } + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 59 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + { + char space = ' '; + /*const*/ void* args[] = { &T1, &space, &T2 }; + FFI_CALL(cif,T_TcT,args,&Tr); + } + } + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 60 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + { + /*const*/ void* args[] = { &B1, &c2, &d3, &B2 }; + FFI_CALL(cif,X_BcdB,args,&Xr); + } + } + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif + + return; +} + +void + gpargs_boundary_tests (void) +{ + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &K1, &l9 }; + FFI_CALL(cif,l_l0K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &K1, &l9 }; + FFI_CALL(cif,l_l1K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &K1, &l9 }; + FFI_CALL(cif,l_l2K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &K1, &l9 }; + FFI_CALL(cif,l_l3K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &K1, &l9 }; + FFI_CALL(cif,l_l4K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &K1, &l9 }; + FFI_CALL(cif,l_l5K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 67 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &K1, &l9 }; + FFI_CALL(cif,l_l6K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 68 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,f_f17l3L,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 69 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16, &d17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,d_d17l3L,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &ll1, &l9 }; + FFI_CALL(cif,ll_l2ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &ll1, &l9 }; + FFI_CALL(cif,ll_l3ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &ll1, &l9 }; + FFI_CALL(cif,ll_l4ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &ll1, &l9 }; + FFI_CALL(cif,ll_l5ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &ll1, &l9 }; + FFI_CALL(cif,ll_l6ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 75 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &ll1, &l9 }; + FFI_CALL(cif,ll_l7ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l2d(l1,l2,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &d2, &l9 }; + FFI_CALL(cif,d_l2d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l3d(l1,l2,l3,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &d2, &l9 }; + FFI_CALL(cif,d_l3d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l4d(l1,l2,l3,l4,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &d2, &l9 }; + FFI_CALL(cif,d_l4d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l5d(l1,l2,l3,l4,l5,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &d2, &l9 }; + FFI_CALL(cif,d_l5d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l6d(l1,l2,l3,l4,l5,l6,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &d2, &l9 }; + FFI_CALL(cif,d_l6d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 81 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &d2, &l9 }; + FFI_CALL(cif,d_l7d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} + +int + main (void) +{ + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + + void_tests(); + int_tests(); + float_tests(); + double_tests(); + pointer_tests(); + mixed_number_tests(); + small_structure_return_tests(); + structure_tests(); + gpargs_boundary_tests(); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c new file mode 100644 index 0000000..c2633c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c @@ -0,0 +1,2885 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* { dg-do run } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() + +#if defined(__sparc__) && defined(__sun) && defined(__SUNPRO_C) /* SUNWspro cc */ +/* SunPRO cc miscompiles the simulator function for X_BcdB: d.i[1] is + * temporarily stored in %l2 and put onto the stack from %l2, but in between + * the copy of X has used %l2 as a counter without saving and restoring its + * value. + */ +#define SKIP_X +#endif +#if defined(__mipsn32__) && !defined(__GNUC__) +/* The X test crashes for an unknown reason. */ +#define SKIP_X +#endif + + +/* These functions simulate the behaviour of the functions defined in testcases.c. */ + +/* void tests */ +void v_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&v_v) { fprintf(out,"wrong data for v_v\n"); exit(1); } + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +void i_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_v) { fprintf(out,"wrong data for i_v\n"); exit(1); } + {int r=99; + fprintf(out,"int f(void):"); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i) { fprintf(out,"wrong data for i_i\n"); exit(1); } + int a = *(int*)(*args++); + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + *(ffi_arg*)retp = r; +} +void i_i2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i2) { fprintf(out,"wrong data for i_i2\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i4) { fprintf(out,"wrong data for i_i4\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i8) { fprintf(out,"wrong data for i_i8\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i16) { fprintf(out,"wrong data for i_i16\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int i = *(int*)(*args++); + int j = *(int*)(*args++); + int k = *(int*)(*args++); + int l = *(int*)(*args++); + int m = *(int*)(*args++); + int n = *(int*)(*args++); + int o = *(int*)(*args++); + int p = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(ffi_arg*)retp = r; +}} + +/* float tests */ +void f_f_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f) { fprintf(out,"wrong data for f_f\n"); exit(1); } + {float a = *(float*)(*args++); + float r=a+1.0; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + *(float*)retp = r; +}} +void f_f2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2) { fprintf(out,"wrong data for f_f2\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + *(float*)retp = r; +}} +void f_f4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4) { fprintf(out,"wrong data for f_f4\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(float*)retp = r; +}} +void f_f8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8) { fprintf(out,"wrong data for f_f8\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(float*)retp = r; +}} +void f_f16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f16) { fprintf(out,"wrong data for f_f16\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(float*)retp = r; +}} +void f_f24_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f24) { fprintf(out,"wrong data for f_f24\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + float s = *(float*)(*args++); + float t = *(float*)(*args++); + float u = *(float*)(*args++); + float v = *(float*)(*args++); + float w = *(float*)(*args++); + float x = *(float*)(*args++); + float y = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + *(float*)retp = r; +}} + +/* double tests */ +void d_d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d) { fprintf(out,"wrong data for d_d\n"); exit(1); } + {double a = *(double*)(*args++); + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + *(double*)retp = r; +}} +void d_d2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2) { fprintf(out,"wrong data for d_d2\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + *(double*)retp = r; +}} +void d_d4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4) { fprintf(out,"wrong data for d_d4\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_d8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8) { fprintf(out,"wrong data for d_d8\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(double*)retp = r; +}} +void d_d16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d16) { fprintf(out,"wrong data for d_d16\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(double*)retp = r; +}} + +/* pointer tests */ +void vp_vpdpcpsp_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&vp_vpdpcpsp) { fprintf(out,"wrong data for vp_vpdpcpsp\n"); exit(1); } + {void* a = *(void* *)(*args++); + double* b = *(double* *)(*args++); + char* c = *(char* *)(*args++); + Int* d = *(Int* *)(*args++); + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + *(void* *)retp = ret; +}} + +/* mixed number tests */ +void uc_ucsil_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&uc_ucsil) { fprintf(out,"wrong data for uc_ucsil\n"); exit(1); } + {uchar a = *(unsigned char *)(*args++); + ushort b = *(unsigned short *)(*args++); + uint c = *(unsigned int *)(*args++); + ulong d = *(unsigned long *)(*args++); + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void d_iidd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iidd) { fprintf(out,"wrong data for d_iidd\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_iiidi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iiidi) { fprintf(out,"wrong data for d_iiidi\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + int e = *(int*)(*args++); + double r=a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + *(double*)retp = r; +}} +void d_idid_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_idid) { fprintf(out,"wrong data for d_idid\n"); exit(1); } + {int a = *(int*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_fdi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_fdi) { fprintf(out,"wrong data for d_fdi\n"); exit(1); } + {float a = *(float*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double r=a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + *(double*)retp = r; +}} +void us_cdcd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&us_cdcd) { fprintf(out,"wrong data for us_cdcd\n"); exit(1); } + {char a = *(char*)(*args++); + double b = *(double*)(*args++); + char c = *(char*)(*args++); + double d = *(double*)(*args++); + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void ll_iiilli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_iiilli) { fprintf(out,"wrong data for ll_iiilli\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + long long d = *(long long *)(*args++); + int e = *(int*)(*args++); + long long r = (long long)(int)a + (long long)(int)b + (long long)(int)c + d + (long long)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + *(long long *)retp = r; +}} +void ll_flli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_flli) { fprintf(out,"wrong data for ll_flli\n"); exit(1); } + {float a = *(float*)(*args++); + long long b = *(long long *)(*args++); + int c = *(int*)(*args++); + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + *(long long *)retp = r; +}} +void f_fi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_fi) { fprintf(out,"wrong data for f_fi\n"); exit(1); } + {float a = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + *(float*)retp = r; +}} +void f_f2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2i) { fprintf(out,"wrong data for f_f2i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(float*)retp = r; +}} +void f_f3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f3i) { fprintf(out,"wrong data for f_f3i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(float*)retp = r; +}} +void f_f4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4i) { fprintf(out,"wrong data for f_f4i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(float*)retp = r; +}} +void f_f7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f7i) { fprintf(out,"wrong data for f_f7i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(float*)retp = r; +}} +void f_f8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8i) { fprintf(out,"wrong data for f_f8i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(float*)retp = r; +}} +void f_f12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f12i) { fprintf(out,"wrong data for f_f12i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(float*)retp = r; +}} +void f_f13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f13i) { fprintf(out,"wrong data for f_f13i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(float*)retp = r; +}} +void d_di_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_di) { fprintf(out,"wrong data for d_di\n"); exit(1); } + {double a = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + *(double*)retp = r; +}} +void d_d2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2i) { fprintf(out,"wrong data for d_d2i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(double*)retp = r; +}} +void d_d3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d3i) { fprintf(out,"wrong data for d_d3i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(double*)retp = r; +}} +void d_d4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4i) { fprintf(out,"wrong data for d_d4i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(double*)retp = r; +}} +void d_d7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d7i) { fprintf(out,"wrong data for d_d7i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(double*)retp = r; +}} +void d_d8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8i) { fprintf(out,"wrong data for d_d8i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(double*)retp = r; +}} +void d_d12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d12i) { fprintf(out,"wrong data for d_d12i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(double*)retp = r; +}} +void d_d13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d13i) { fprintf(out,"wrong data for d_d13i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(double*)retp = r; +}} + +/* small structure return tests */ +void S1_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S1_v) { fprintf(out,"wrong data for S1_v\n"); exit(1); } + {Size1 r = Size1_1; + fprintf(out,"Size1 f(void):"); + fflush(out); + *(Size1*)retp = r; +}} +void S2_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S2_v) { fprintf(out,"wrong data for S2_v\n"); exit(1); } + {Size2 r = Size2_1; + fprintf(out,"Size2 f(void):"); + fflush(out); + *(Size2*)retp = r; +}} +void S3_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S3_v) { fprintf(out,"wrong data for S3_v\n"); exit(1); } + {Size3 r = Size3_1; + fprintf(out,"Size3 f(void):"); + fflush(out); + *(Size3*)retp = r; +}} +void S4_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S4_v) { fprintf(out,"wrong data for S4_v\n"); exit(1); } + {Size4 r = Size4_1; + fprintf(out,"Size4 f(void):"); + fflush(out); + *(Size4*)retp = r; +}} +void S7_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S7_v) { fprintf(out,"wrong data for S7_v\n"); exit(1); } + {Size7 r = Size7_1; + fprintf(out,"Size7 f(void):"); + fflush(out); + *(Size7*)retp = r; +}} +void S8_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S8_v) { fprintf(out,"wrong data for S8_v\n"); exit(1); } + {Size8 r = Size8_1; + fprintf(out,"Size8 f(void):"); + fflush(out); + *(Size8*)retp = r; +}} +void S12_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S12_v) { fprintf(out,"wrong data for S12_v\n"); exit(1); } + {Size12 r = Size12_1; + fprintf(out,"Size12 f(void):"); + fflush(out); + *(Size12*)retp = r; +}} +void S15_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S15_v) { fprintf(out,"wrong data for S15_v\n"); exit(1); } + {Size15 r = Size15_1; + fprintf(out,"Size15 f(void):"); + fflush(out); + *(Size15*)retp = r; +}} +void S16_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S16_v) { fprintf(out,"wrong data for S16_v\n"); exit(1); } + {Size16 r = Size16_1; + fprintf(out,"Size16 f(void):"); + fflush(out); + *(Size16*)retp = r; +}} + +/* structure tests */ +void I_III_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&I_III) { fprintf(out,"wrong data for I_III\n"); exit(1); } + {Int a = *(Int*)(*args++); + Int b = *(Int*)(*args++); + Int c = *(Int*)(*args++); + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + *(Int*)retp = r; +}} +void C_CdC_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&C_CdC) { fprintf(out,"wrong data for C_CdC\n"); exit(1); } + {Char a = *(Char*)(*args++); + double b = *(double*)(*args++); + Char c = *(Char*)(*args++); + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + *(Char*)retp = r; +}} +void F_Ffd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&F_Ffd) { fprintf(out,"wrong data for F_Ffd\n"); exit(1); } + {Float a = *(Float*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Float r; + r.x = a.x + b + c; + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Float*)retp = r; +}} +void D_fDd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_fDd) { fprintf(out,"wrong data for D_fDd\n"); exit(1); } + {float a = *(float*)(*args++); + Double b = *(Double*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + *(Double*)retp = r; +}} +void D_Dfd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_Dfd) { fprintf(out,"wrong data for D_Dfd\n"); exit(1); } + {Double a = *(Double*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Double*)retp = r; +}} +void J_JiJ_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&J_JiJ) { fprintf(out,"wrong data for J_JiJ\n"); exit(1); } + {J a = *(J*)(*args++); + int b= *(int*)(*args++); + J c = *(J*)(*args++); + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + *(J*)retp = r; +}} +#ifndef SKIP_EXTRA_STRUCTS +void T_TcT_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&T_TcT) { fprintf(out,"wrong data for T_TcT\n"); exit(1); } + {T a = *(T*)(*args++); + char b = *(char*)(*args++); + T c = *(T*)(*args++); + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + *(T*)retp = r; +}} +void X_BcdB_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&X_BcdB) { fprintf(out,"wrong data for X_BcdB\n"); exit(1); } + {B a = *(B*)(*args++); + char b = *(char*)(*args++); + double c = *(double*)(*args++); + B d = *(B*)(*args++); + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + *(X*)retp = r; +}} +#endif + +/* gpargs boundary tests */ +void l_l0K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l0K) { fprintf(out,"wrong data for l_l0K\n"); exit(1); } + {K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l1K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l1K) { fprintf(out,"wrong data for l_l1K\n"); exit(1); } + {long a1 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l2K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l2K) { fprintf(out,"wrong data for l_l2K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l3K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l3K) { fprintf(out,"wrong data for l_l3K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l4K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l4K) { fprintf(out,"wrong data for l_l4K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l5K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l5K) { fprintf(out,"wrong data for l_l5K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l6K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l6K) { fprintf(out,"wrong data for l_l6K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void f_f17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f17l3L) { fprintf(out,"wrong data for f_f17l3L\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(float*)retp = r; +}} +void d_d17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d17l3L) { fprintf(out,"wrong data for d_d17l3L\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double q = *(double*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(double*)retp = r; +}} +void ll_l2ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l2ll) { fprintf(out,"wrong data for ll_l2ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l3ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l3ll) { fprintf(out,"wrong data for ll_l3ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l4ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l4ll) { fprintf(out,"wrong data for ll_l4ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l5ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l5ll) { fprintf(out,"wrong data for ll_l5ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l6ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l6ll) { fprintf(out,"wrong data for ll_l6ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l7ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l7ll) { fprintf(out,"wrong data for ll_l7ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void d_l2d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l2d) { fprintf(out,"wrong data for d_l2d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l3d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l3d) { fprintf(out,"wrong data for d_l3d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l4d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l4d) { fprintf(out,"wrong data for d_l4d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l5d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l5d) { fprintf(out,"wrong data for d_l5d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l6d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l6d) { fprintf(out,"wrong data for d_l6d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l7d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l7d) { fprintf(out,"wrong data for d_l7d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + *(double*)retp = r; +}} + + +/* + * The way we run these tests - first call the function directly, then + * through vacall() - there is the danger that arguments or results seem + * to be passed correctly, but what we are seeing are in fact the vestiges + * (traces) or the previous call. This may seriously fake the test. + * Avoid this by clearing the registers between the first and the second call. + */ +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +int main (void) +{ + void* callback_code; + void* callback_writable; +#define ALLOC_CALLBACK() \ + callback_writable = ffi_closure_alloc(sizeof(ffi_closure),&callback_code); \ + if (!callback_writable) abort() +#define PREP_CALLBACK(cif,simulator,data) \ + if (ffi_prep_closure_loc(callback_writable,&(cif),simulator,data,callback_code) != FFI_OK) abort() +#define FREE_CALLBACK() \ + ffi_closure_free(callback_writable) + + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + +#if (!defined(DGTEST)) || DGTEST == 1 + /* void tests */ + v_v(); + clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + PREP_CALLBACK(cif,v_v_simulator,(void*)&v_v); + ((void (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); +#endif + + /* int tests */ + { int ir; + +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + PREP_CALLBACK(cif,i_v_simulator,(void*)&i_v); + ir = ((int (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i_simulator,(void*)&i_i); + ir = ((int (ABI_ATTR *) (int)) callback_code) (i1); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i2_simulator,(void*)&i_i2); + ir = ((int (ABI_ATTR *) (int,int)) callback_code) (i1,i2); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i4_simulator,(void*)&i_i4); + ir = ((int (ABI_ATTR *) (int,int,int,int)) callback_code) (i1,i2,i3,i4); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i8_simulator,(void*)&i_i8); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i16_simulator,(void*)&i_i16); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int,int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + } + + /* float tests */ + { float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f_simulator,(void*)&f_f); + fr = ((float (ABI_ATTR *) (float)) callback_code) (f1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2_simulator,(void*)&f_f2); + fr = ((float (ABI_ATTR *) (float,float)) callback_code) (f1,f2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4_simulator,(void*)&f_f4); + fr = ((float (ABI_ATTR *) (float,float,float,float)) callback_code) (f1,f2,f3,f4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8_simulator,(void*)&f_f8); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f16_simulator,(void*)&f_f16); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f24_simulator,(void*)&f_f24); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + + } + + /* double tests */ + { double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d_simulator,(void*)&d_d); + dr = ((double (ABI_ATTR *) (double)) callback_code) (d1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2_simulator,(void*)&d_d2); + dr = ((double (ABI_ATTR *) (double,double)) callback_code) (d1,d2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4_simulator,(void*)&d_d4); + dr = ((double (ABI_ATTR *) (double,double,double,double)) callback_code) (d1,d2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8_simulator,(void*)&d_d8); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d16_simulator,(void*)&d_d16); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* pointer tests */ + { void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + PREP_CALLBACK(cif,vp_vpdpcpsp_simulator,(void*)&vp_vpdpcpsp); + vpr = ((void* (ABI_ATTR *) (void*,double*,char*,Int*)) callback_code) (&uc1,&d2,str3,&I4); + } + FREE_CALLBACK(); + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + } + + /* mixed number tests */ + { uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1,us2,ui3,ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + PREP_CALLBACK(cif,uc_ucsil_simulator,(void*)&uc_ucsil); + ucr = ((uchar (ABI_ATTR *) (uchar,ushort,uint,ulong)) callback_code) (uc1,us2,ui3,ul4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iidd_simulator,(void*)&d_iidd); + dr = ((double (ABI_ATTR *) (int,int,double,double)) callback_code) (i1,i2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iiidi_simulator,(void*)&d_iiidi); + dr = ((double (ABI_ATTR *) (int,int,int,double,int)) callback_code) (i1,i2,i3,d4,i5); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_idid_simulator,(void*)&d_idid); + dr = ((double (ABI_ATTR *) (int,double,int,double)) callback_code) (i1,d2,i3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_fdi_simulator,(void*)&d_fdi); + dr = ((double (ABI_ATTR *) (float,double,int)) callback_code) (f1,d2,i3); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + PREP_CALLBACK(cif,us_cdcd_simulator,(void*)&us_cdcd); + usr = ((ushort (ABI_ATTR *) (char,double,char,double)) callback_code) (c1,d2,c3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_iiilli_simulator,(void*)&ll_iiilli); + llr = ((long long (ABI_ATTR *) (int,int,int,long long,int)) callback_code) (i1,i2,i3,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_flli_simulator,(void*)&ll_flli); + llr = ((long long (ABI_ATTR *) (float,long long,int)) callback_code) (f13,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_fi_simulator,(void*)&f_fi); + fr = ((float (ABI_ATTR *) (float,int)) callback_code) (f1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2i_simulator,(void*)&f_f2i); + fr = ((float (ABI_ATTR *) (float,float,int)) callback_code) (f1,f2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f3i_simulator,(void*)&f_f3i); + fr = ((float (ABI_ATTR *) (float,float,float,int)) callback_code) (f1,f2,f3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4i_simulator,(void*)&f_f4i); + fr = ((float (ABI_ATTR *) (float,float,float,float,int)) callback_code) (f1,f2,f3,f4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f7i_simulator,(void*)&f_f7i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8i_simulator,(void*)&f_f8i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f13i_simulator,(void*)&f_f13i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_di_simulator,(void*)&d_di); + dr = ((double (ABI_ATTR *) (double,int)) callback_code) (d1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2i_simulator,(void*)&d_d2i); + dr = ((double (ABI_ATTR *) (double,double,int)) callback_code) (d1,d2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d3i_simulator,(void*)&d_d3i); + dr = ((double (ABI_ATTR *) (double,double,double,int)) callback_code) (d1,d2,d3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4i_simulator,(void*)&d_d4i); + dr = ((double (ABI_ATTR *) (double,double,double,double,int)) callback_code) (d1,d2,d3,d4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d7i_simulator,(void*)&d_d7i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8i_simulator,(void*)&d_d8i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d12i_simulator,(void*)&d_d12i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d13i_simulator,(void*)&d_d13i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* small structure return tests */ +#if (!defined(DGTEST)) || DGTEST == 43 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + PREP_CALLBACK(cif,S1_v_simulator,(void*)&S1_v); + r = ((Size1 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + PREP_CALLBACK(cif,S2_v_simulator,(void*)&S2_v); + r = ((Size2 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + PREP_CALLBACK(cif,S3_v_simulator,(void*)&S3_v); + r = ((Size3 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + PREP_CALLBACK(cif,S4_v_simulator,(void*)&S4_v); + r = ((Size4 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + PREP_CALLBACK(cif,S7_v_simulator,(void*)&S7_v); + r = ((Size7 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + PREP_CALLBACK(cif,S8_v_simulator,(void*)&S8_v); + r = ((Size8 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + PREP_CALLBACK(cif,S12_v_simulator,(void*)&S12_v); + r = ((Size12 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + PREP_CALLBACK(cif,S15_v_simulator,(void*)&S15_v); + r = ((Size15 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + PREP_CALLBACK(cif,S16_v_simulator,(void*)&S16_v); + r = ((Size16 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif + + + /* structure tests */ + { Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 52 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + PREP_CALLBACK(cif,I_III_simulator,(void*)&I_III); + Ir = ((Int (ABI_ATTR *) (Int,Int,Int)) callback_code) (I1,I2,I3); + } + FREE_CALLBACK(); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + PREP_CALLBACK(cif,C_CdC_simulator,(void*)&C_CdC); + Cr = ((Char (ABI_ATTR *) (Char,double,Char)) callback_code) (C1,d2,C3); + } + FREE_CALLBACK(); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 54 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + PREP_CALLBACK(cif,F_Ffd_simulator,(void*)&F_Ffd); + Fr = ((Float (ABI_ATTR *) (Float,float,double)) callback_code) (F1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 55 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_fDd_simulator,(void*)&D_fDd); + Dr = ((Double (ABI_ATTR *) (float,Double,double)) callback_code) (f1,D2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_Dfd_simulator,(void*)&D_Dfd); + Dr = ((Double (ABI_ATTR *) (Double,float,double)) callback_code) (D1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 57 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + PREP_CALLBACK(cif,J_JiJ_simulator,(void*)&J_JiJ); + Jr = ((J (ABI_ATTR *) (J,int,J)) callback_code) (J1,i2,J2); + } + FREE_CALLBACK(); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif + +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 58 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + PREP_CALLBACK(cif,T_TcT_simulator,(void*)&T_TcT); + Tr = ((T (ABI_ATTR *) (T,char,T)) callback_code) (T1,' ',T2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif + +#ifndef SKIP_X +#if (!defined(DGTEST)) || DGTEST == 59 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + PREP_CALLBACK(cif,X_BcdB_simulator,(void*)&X_BcdB); + Xr = ((X (ABI_ATTR *) (B,char,double,B)) callback_code) (B1,c2,d3,B2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif +#endif + } + + + /* gpargs boundary tests */ + { + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 60 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l0K_simulator,(void*)l_l0K); + lr = ((long (ABI_ATTR *) (K,long)) callback_code) (K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l1K_simulator,(void*)l_l1K); + lr = ((long (ABI_ATTR *) (long,K,long)) callback_code) (l1,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l2K_simulator,(void*)l_l2K); + lr = ((long (ABI_ATTR *) (long,long,K,long)) callback_code) (l1,l2,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l3K_simulator,(void*)l_l3K); + lr = ((long (ABI_ATTR *) (long,long,long,K,long)) callback_code) (l1,l2,l3,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l4K_simulator,(void*)l_l4K); + lr = ((long (ABI_ATTR *) (long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l5K_simulator,(void*)l_l5K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l6K_simulator,(void*)l_l6K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,l6,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 67 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f17l3L_simulator,(void*)&f_f17l3L); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,long,long,long,L)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 68 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d17l3L_simulator,(void*)&d_d17l3L); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,long,long,long,L)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 69 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l2ll_simulator,(void*)ll_l2ll); + llr = ((long long (ABI_ATTR *) (long,long,long long,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l3ll_simulator,(void*)ll_l3ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long long,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l4ll_simulator,(void*)ll_l4ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l5ll_simulator,(void*)ll_l5ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l6ll_simulator,(void*)ll_l6ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l7ll_simulator,(void*)ll_l7ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 75 + dr = d_l2d(l1,l2,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l2d_simulator,(void*)d_l2d); + dr = ((double (ABI_ATTR *) (long,long,double,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l3d(l1,l2,l3,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l3d_simulator,(void*)d_l3d); + dr = ((double (ABI_ATTR *) (long,long,long,double,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l4d(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l4d_simulator,(void*)d_l4d); + dr = ((double (ABI_ATTR *) (long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l5d(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l5d_simulator,(void*)d_l5d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l6d(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l6d_simulator,(void*)d_l6d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l7d_simulator,(void*)d_l7d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + + } + + exit(0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c new file mode 100644 index 0000000..d25ebf4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c @@ -0,0 +1,743 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* This file defines test functions of selected signatures, that exercise + dark corners of the various ABIs. */ + +#include + +FILE* out; + +#define uchar unsigned char +#define ushort unsigned short +#define uint unsigned int +#define ulong unsigned long + +typedef struct { char x; } Char; +typedef struct { short x; } Short; +typedef struct { int x; } Int; +typedef struct { long x; } Long; +typedef struct { float x; } Float; +typedef struct { double x; } Double; +typedef struct { char c; float f; } A; +typedef struct { double d; int i[3]; } B; +typedef struct { long l1; long l2; } J; +typedef struct { long l1; long l2; long l3; long l4; } K; +typedef struct { long l1; long l2; long l3; long l4; long l5; long l6; } L; +typedef struct { char x1; } Size1; +typedef struct { char x1; char x2; } Size2; +typedef struct { char x1; char x2; char x3; } Size3; +typedef struct { char x1; char x2; char x3; char x4; } Size4; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; +} Size7; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; +} Size8; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; +} Size12; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; +} Size15; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; char x16; +} Size16; +typedef struct { char c[3]; } T; +typedef struct { char c[33],c1; } X; + +char c1='a', c2=127, c3=(char)128, c4=(char)255, c5=-1; +short s1=32767, s2=(short)32768, s3=3, s4=4, s5=5, s6=6, s7=7, s8=8, s9=9; +int i1=1, i2=2, i3=3, i4=4, i5=5, i6=6, i7=7, i8=8, i9=9, + i10=11, i11=12, i12=13, i13=14, i14=15, i15=16, i16=17; +long l1=1, l2=2, l3=3, l4=4, l5=5, l6=6, l7=7, l8=8, l9=9; +long long ll1 = 3875056143130689530LL; +float f1=0.1f, f2=0.2f, f3=0.3f, f4=0.4f, f5=0.5f, f6=0.6f, f7=0.7f, f8=0.8f, f9=0.9f, + f10=1.1f, f11=1.2f, f12=1.3f, f13=1.4f, f14=1.5f, f15=1.6f, f16=1.7f, f17=1.8f, + f18=1.9f, f19=2.1f, f20=2.2f, f21=2.3f, f22=2.4f, f23=2.5f, f24=2.6f; +double d1=0.1, d2=0.2, d3=0.3, d4=0.4, d5=0.5, d6=0.6, d7=0.7, d8=0.8, d9=0.9, + d10=1.1, d11=1.2, d12=1.3, d13=1.4, d14=1.5, d15=1.6, d16=1.7, d17=1.8; + +uchar uc1='a', uc2=127, uc3=128, uc4=255, uc5=(uchar)-1; +ushort us1=1, us2=2, us3=3, us4=4, us5=5, us6=6, us7=7, us8=8, us9=9; +uint ui1=1, ui2=2, ui3=3, ui4=4, ui5=5, ui6=6, ui7=7, ui8=8, ui9=9; +ulong ul1=1, ul2=2, ul3=3, ul4=4, ul5=5, ul6=6, ul7=7, ul8=8, ul9=9; + +char *str1="hello",str2[]="goodbye",*str3="still here?"; +Char C1={'A'}, C2={'B'}, C3={'C'}, C4={'\377'}, C5={(char)(-1)}; +Short S1={1}, S2={2}, S3={3}, S4={4}, S5={5}, S6={6}, S7={7}, S8={8}, S9={9}; +Int I1={1}, I2={2}, I3={3}, I4={4}, I5={5}, I6={6}, I7={7}, I8={8}, I9={9}; +Float F1={0.1f}, F2={0.2f}, F3={0.3f}, F4={0.4f}, F5={0.5f}, F6={0.6f}, F7={0.7f}, F8={0.8f}, F9={0.9f}; +Double D1={0.1}, D2={0.2}, D3={0.3}, D4={0.4}, D5={0.5}, D6={0.6}, D7={0.7}, D8={0.8}, D9={0.9}; + +A A1={'a',0.1f},A2={'b',0.2f},A3={'\377',0.3f}; +B B1={0.1,{1,2,3}},B2={0.2,{5,4,3}}; +J J1={47,11},J2={73,55}; +K K1={19,69,12,28}; +L L1={561,1105,1729,2465,2821,6601}; /* A002997 */ +Size1 Size1_1={'a'}; +Size2 Size2_1={'a','b'}; +Size3 Size3_1={'a','b','c'}; +Size4 Size4_1={'a','b','c','d'}; +Size7 Size7_1={'a','b','c','d','e','f','g'}; +Size8 Size8_1={'a','b','c','d','e','f','g','h'}; +Size12 Size12_1={'a','b','c','d','e','f','g','h','i','j','k','l'}; +Size15 Size15_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o'}; +Size16 Size16_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'}; +T T1={{'t','h','e'}},T2={{'f','o','x'}}; +X X1={"abcdefghijklmnopqrstuvwxyzABCDEF",'G'}, X2={"123",'9'}, X3={"return-return-return",'R'}; + +#if defined(__GNUC__) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_ATTR +#define ABI_ATTR +#endif + +/* void tests */ +void ABI_ATTR v_v (void) +{ + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +int ABI_ATTR i_v (void) +{ + int r=99; + fprintf(out,"int f(void):"); + fflush(out); + return r; +} +int ABI_ATTR i_i (int a) +{ + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + return r; +} +int ABI_ATTR i_i2 (int a, int b) +{ + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + return r; +} +int ABI_ATTR i_i4 (int a, int b, int c, int d) +{ + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + return r; +} +int ABI_ATTR i_i8 (int a, int b, int c, int d, int e, int f, int g, int h) +{ + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +int ABI_ATTR i_i16 (int a, int b, int c, int d, int e, int f, int g, int h, + int i, int j, int k, int l, int m, int n, int o, int p) +{ + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* float tests */ +float ABI_ATTR f_f (float a) +{ + float r=a+1.0f; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + return r; +} +float ABI_ATTR f_f2 (float a, float b) +{ + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + return r; +} +float ABI_ATTR f_f4 (float a, float b, float c, float d) +{ + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +float ABI_ATTR f_f8 (float a, float b, float c, float d, float e, float f, + float g, float h) +{ + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +float ABI_ATTR f_f16 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} +float ABI_ATTR f_f24 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p, + float q, float s, float t, float u, float v, float w, float x, float y) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + return r; +} + +/* double tests */ +double ABI_ATTR d_d (double a) +{ + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + return r; +} +double ABI_ATTR d_d2 (double a, double b) +{ + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + return r; +} +double ABI_ATTR d_d4 (double a, double b, double c, double d) +{ + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_d8 (double a, double b, double c, double d, double e, double f, + double g, double h) +{ + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +double ABI_ATTR d_d16 (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p) +{ + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* pointer tests */ +void* ABI_ATTR vp_vpdpcpsp (void* a, double* b, char* c, Int* d) +{ + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + return ret; +} + +/* mixed number tests */ +uchar ABI_ATTR uc_ucsil (uchar a, ushort b, uint c, ulong d) +{ + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iidd (int a, int b, double c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iiidi (int a, int b, int c, double d, int e) +{ + double r = a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + return r; +} +double ABI_ATTR d_idid (int a, double b, int c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_fdi (float a, double b, int c) +{ + double r = a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + return r; +} +ushort ABI_ATTR us_cdcd (char a, double b, char c, double d) +{ + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + return r; +} + +long long ABI_ATTR ll_iiilli (int a, int b, int c, long long d, int e) +{ + long long r = (long long)(int)a+(long long)(int)b+(long long)(int)c+d+(long long)(int)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + return r; +} +long long ABI_ATTR ll_flli (float a, long long b, int c) +{ + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + return r; +} + +float ABI_ATTR f_fi (float a, int z) +{ + float r = a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + return r; +} +float ABI_ATTR f_f2i (float a, float b, int z) +{ + float r = a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +float ABI_ATTR f_f3i (float a, float b, float c, int z) +{ + float r = a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +float ABI_ATTR f_f4i (float a, float b, float c, float d, int z) +{ + float r = a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +float ABI_ATTR f_f7i (float a, float b, float c, float d, float e, float f, float g, + int z) +{ + float r = a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +float ABI_ATTR f_f8i (float a, float b, float c, float d, float e, float f, float g, + float h, int z) +{ + float r = a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +float ABI_ATTR f_f12i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +float ABI_ATTR f_f13i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +double ABI_ATTR d_di (double a, int z) +{ + double r = a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + return r; +} +double ABI_ATTR d_d2i (double a, double b, int z) +{ + double r = a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +double ABI_ATTR d_d3i (double a, double b, double c, int z) +{ + double r = a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +double ABI_ATTR d_d4i (double a, double b, double c, double d, int z) +{ + double r = a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +double ABI_ATTR d_d7i (double a, double b, double c, double d, double e, double f, + double g, int z) +{ + double r = a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +double ABI_ATTR d_d8i (double a, double b, double c, double d, double e, double f, + double g, double h, int z) +{ + double r = a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +double ABI_ATTR d_d12i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +double ABI_ATTR d_d13i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +/* small structure return tests */ +Size1 ABI_ATTR S1_v (void) +{ + fprintf(out,"Size1 f(void):"); + fflush(out); + return Size1_1; +} +Size2 ABI_ATTR S2_v (void) +{ + fprintf(out,"Size2 f(void):"); + fflush(out); + return Size2_1; +} +Size3 ABI_ATTR S3_v (void) +{ + fprintf(out,"Size3 f(void):"); + fflush(out); + return Size3_1; +} +Size4 ABI_ATTR S4_v (void) +{ + fprintf(out,"Size4 f(void):"); + fflush(out); + return Size4_1; +} +Size7 ABI_ATTR S7_v (void) +{ + fprintf(out,"Size7 f(void):"); + fflush(out); + return Size7_1; +} +Size8 ABI_ATTR S8_v (void) +{ + fprintf(out,"Size8 f(void):"); + fflush(out); + return Size8_1; +} +Size12 ABI_ATTR S12_v (void) +{ + fprintf(out,"Size12 f(void):"); + fflush(out); + return Size12_1; +} +Size15 ABI_ATTR S15_v (void) +{ + fprintf(out,"Size15 f(void):"); + fflush(out); + return Size15_1; +} +Size16 ABI_ATTR S16_v (void) +{ + fprintf(out,"Size16 f(void):"); + fflush(out); + return Size16_1; +} + +/* structure tests */ +Int ABI_ATTR I_III (Int a, Int b, Int c) +{ + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + return r; +} +Char ABI_ATTR C_CdC (Char a, double b, Char c) +{ + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + return r; +} +Float ABI_ATTR F_Ffd (Float a, float b, double c) +{ + Float r; + r.x = (float) (a.x + b + c); + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +Double ABI_ATTR D_fDd (float a, Double b, double c) +{ + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + return r; +} +Double ABI_ATTR D_Dfd (Double a, float b, double c) +{ + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +J ABI_ATTR J_JiJ (J a, int b, J c) +{ + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + return r; +} +T ABI_ATTR T_TcT (T a, char b, T c) +{ + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + return r; +} +X ABI_ATTR X_BcdB (B a, char b, double c, B d) +{ + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + return r; +} + +/* Test for cases where some argument (especially structure, 'long long', or + 'double') may be passed partially in general-purpose argument registers + and partially on the stack. Different ABIs pass between 4 and 8 arguments + (or none) in general-purpose argument registers. */ + +long ABI_ATTR l_l0K (K b, long c) +{ + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l1K (long a1, K b, long c) +{ + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l2K (long a1, long a2, K b, long c) +{ + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l3K (long a1, long a2, long a3, K b, long c) +{ + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l4K (long a1, long a2, long a3, long a4, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l5K (long a1, long a2, long a3, long a4, long a5, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l6K (long a1, long a2, long a3, long a4, long a5, long a6, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +/* These tests is crafted on the knowledge that for all known ABIs: + * 17 > number of floating-point argument registers, + * 3 < number of general-purpose argument registers < 3 + 6. */ +float ABI_ATTR f_f17l3L (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p, float q, + long s, long t, long u, L z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} +double ABI_ATTR d_d17l3L (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p, double q, + long s, long t, long u, L z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} + +long long ABI_ATTR ll_l2ll (long a1, long a2, long long b, long c) +{ + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l3ll (long a1, long a2, long a3, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l4ll (long a1, long a2, long a3, long a4, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l5ll (long a1, long a2, long a3, long a4, long a5, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l6ll (long a1, long a2, long a3, long a4, long a5, long a6, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l7ll (long a1, long a2, long a3, long a4, long a5, long a6, long a7, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} + +double ABI_ATTR d_l2d (long a1, long a2, double b, long c) +{ + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l3d (long a1, long a2, long a3, double b, long c) +{ + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l4d (long a1, long a2, long a3, long a4, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l5d (long a1, long a2, long a3, long a4, long a5, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l6d (long a1, long a2, long a3, long a4, long a5, long a6, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l7d (long a1, long a2, long a3, long a4, long a5, long a6, long a7, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + return r; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c new file mode 100644 index 0000000..5d4959c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]); + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c new file mode 100644 index 0000000..5e5cb86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]);; + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp new file mode 100644 index 0000000..13ba2bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/call.exp @@ -0,0 +1,54 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +run-many-tests $tlist $additional_options + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + run-many-tests $tlist $additional_options +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c new file mode 100644 index 0000000..bf60161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c @@ -0,0 +1,26 @@ +/* Area: ffi_prep_cif + Purpose: Test error return for bad typedefs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +int main (void) +{ + ffi_cif cif; + ffi_type* arg_types[1]; + + ffi_type badType = ffi_type_void; + + arg_types[0] = NULL; + + badType.size = 0; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &badType, + arg_types) == FFI_BAD_TYPEDEF); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c new file mode 100644 index 0000000..fbc272d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int floating(int a, float b, double c, long double d) +{ + int i; + + i = (int) ((float)a/b + ((float)c/(float)d)); + + return i; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + float f; + signed int si1; + double d; + long double ld; + + args[0] = &ffi_type_sint; + values[0] = &si1; + args[1] = &ffi_type_float; + values[1] = &f; + args[2] = &ffi_type_double; + values[2] = &d; + args[3] = &ffi_type_longdouble; + values[3] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + si1 = 6; + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating (si1, f, d, ld); + + ffi_call(&cif, FFI_FN(floating), &rint, values); + + printf ("%d vs %d\n", (int)rint, floating (si1, f, d, ld)); + + CHECK((int)rint == floating(si1, f, d, ld)); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c new file mode 100644 index 0000000..c48493c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float1.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +#include "float.h" + +#include + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(float f) +{ + return f/3.0; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* These are not always the same!! Check for a reasonable delta */ + + CHECK(fabs(result[0].d - dblit(f)) < DBL_EPSILON); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c new file mode 100644 index 0000000..57cd9e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static long double ldblit(float f) +{ + return (long double) (((long double) f)/ (long double) 3.0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + long double ld; + long double original; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + f = 3.14159; + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf\n", ldblit(f)); +#endif + + ld = 666; + ffi_call(&cif, FFI_FN(ldblit), &ld, values); + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf, %Lf, %Lf, %Lf\n", ld, ldblit(f), ld - ldblit(f), LDBL_EPSILON); +#endif + + /* These are not always the same!! Check for a reasonable delta */ + original = ldblit(f); + if (((ld > original) ? (ld - original) : (original - ld)) < LDBL_EPSILON) + puts("long double return value tests ok!"); + else + CHECK(0); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c new file mode 100644 index 0000000..bab3206 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float3.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check float arguments with different orders. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static double floating_1(float a, double b, long double c) +{ + return (double) a + b + (double) c; +} + +static double floating_2(long double a, double b, float c) +{ + return (double) a + b + (double) c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double rd; + + float f; + double d; + long double ld; + + args[0] = &ffi_type_float; + values[0] = &f; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_longdouble; + values[2] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating_1 (f, d, ld); + + ffi_call(&cif, FFI_FN(floating_1), &rd, values); + + CHECK(fabs(rd - floating_1(f, d, ld)) < DBL_EPSILON); + + args[0] = &ffi_type_longdouble; + values[0] = &ld; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_float; + values[2] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + floating_2 (ld, d, f); + + ffi_call(&cif, FFI_FN(floating_2), &rd, values); + + CHECK(fabs(rd - floating_2(ld, d, f)) < DBL_EPSILON); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c new file mode 100644 index 0000000..0dd6d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float4.c @@ -0,0 +1,62 @@ +/* Area: ffi_call + Purpose: Check denorm double value. + Limitations: none. + PR: PR26483. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +/* { dg-options "-mieee" { target alpha*-*-* } } */ + +#include "ffitest.h" +#include "float.h" + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(double d) +{ + return d; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double d; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_double; + values[0] = &d; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + d = DBL_MIN / 2; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* The standard delta check doesn't work for denorms. Since we didn't do + any arithmetic, we should get the original result back, and hence an + exact check should be OK here. */ + + CHECK(result[0].d == dblit(d)); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c new file mode 100644 index 0000000..5acff91 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c @@ -0,0 +1,107 @@ +/* Area: fp and variadics + Purpose: check fp inputs and returns work on variadics, even the fixed params + Limitations: None + PR: none + Originator: 2011-01-25 + + Intended to stress the difference in ABI on ARM vfp +*/ + +/* { dg-do run } */ + +#include + +#include "ffitest.h" + +/* prints out all the parameters, and returns the sum of them all. + * 'x' is the number of variadic parameters all of which are double in this test + */ +double float_va_fn(unsigned int x, double y,...) +{ + double total=0.0; + va_list ap; + unsigned int i; + + total+=(double)x; + total+=y; + + printf("%u: %.1f :", x, y); + + va_start(ap, y); + for(i=0;i +#include +#include + +static float ABI_ATTR many(float f1, float f2, float f3, float f4, float f5, float f6, float f7, float f8, float f9, float f10, float f11, float f12, float f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + float fa[13]; + float f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_float; + values[i] = &fa[i]; + fa[i] = (float) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 13, + &ffi_type_float, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c new file mode 100644 index 0000000..1c85746 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many2.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check uint8_t arguments. + Limitations: none. + PR: PR45677. + Originator: Dan Witte 20100916 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +#define NARGS 7 + +typedef unsigned char u8; + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +uint8_t +foo (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return a + b + c + d + e + f + g; +} + +uint8_t ABI_ATTR +bar (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return foo (a, b, c, d, e, f, g); +} + +int +main (void) +{ + ffi_type *ffitypes[NARGS]; + int i; + ffi_cif cif; + ffi_arg result = 0; + uint8_t args[NARGS]; + void *argptrs[NARGS]; + + for (i = 0; i < NARGS; ++i) + ffitypes[i] = &ffi_type_uint8; + + CHECK (ffi_prep_cif (&cif, ABI_NUM, NARGS, + &ffi_type_uint8, ffitypes) == FFI_OK); + + for (i = 0; i < NARGS; ++i) + { + args[i] = i; + argptrs[i] = &args[i]; + } + ffi_call (&cif, FFI_FN (bar), &result, argptrs); + + CHECK (result == 21); + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c new file mode 100644 index 0000000..4ef8c8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c @@ -0,0 +1,70 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + double f3, + double f4, + double f5, + double f6, + double f7, + double f8, + double f9, + double f10, + double f11, + double f12, + double f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return ((f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + double fa[13]; + double f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_double; + values[i] = &fa[i]; + fa[i] = (double) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c new file mode 100644 index 0000000..85ec36e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c @@ -0,0 +1,78 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + long int i1, + double f3, + double f4, + long int i2, + double f5, + double f6, + long int i3, + double f7, + double f8, + long int i4, + double f9, + double f10, + long int i5, + double f11, + double f12, + long int i6, + double f13) +{ + return ((double) (i1 + i2 + i3 + i4 + i5 + i6) + (f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[19]; + void *values[19]; + double fa[19]; + long int la[19]; + double f, ff; + int i; + + for (i = 0; i < 19; i++) + { + if( (i - 2) % 3 == 0) { + args[i] = &ffi_type_slong; + la[i] = (long int) i; + values[i] = &la[i]; + } + else { + args[i] = &ffi_type_double; + fa[i] = (double) i; + values[i] = &fa[i]; + } + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 19, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], la[2], + fa[3], fa[4], la[5], + fa[6], fa[7], la[8], + fa[9], fa[10], la[11], + fa[12], fa[13], la[14], + fa[15], fa[16], la[17], + fa[18]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c new file mode 100644 index 0000000..6e2f26f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/negint.c @@ -0,0 +1,52 @@ +/* Area: ffi_call + Purpose: Check that negative integers are passed correctly. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int checking(int a, short b, signed char c) +{ + + return (a < 0 && b < 0 && c < 0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + + checking (si, ss, sc); + + ffi_call(&cif, FFI_FN(checking), &rint, values); + + printf ("%d vs %d\n", (int)rint, checking (si, ss, sc)); + + CHECK(rint != 0); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c new file mode 100644 index 0000000..23d88b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c @@ -0,0 +1,46 @@ +/* Area: Struct layout + Purpose: Test ffi_get_struct_offsets + Limitations: none. + PR: none. + Originator: Tom Tromey. */ + +/* { dg-do run } */ +#include "ffitest.h" +#include + +struct test_1 +{ + char c; + float f; + char c2; + int i; +}; + +int +main (void) +{ + ffi_type test_1_type; + ffi_type *test_1_elements[5]; + size_t test_1_offsets[4]; + + test_1_elements[0] = &ffi_type_schar; + test_1_elements[1] = &ffi_type_float; + test_1_elements[2] = &ffi_type_schar; + test_1_elements[3] = &ffi_type_sint; + test_1_elements[4] = NULL; + + test_1_type.size = 0; + test_1_type.alignment = 0; + test_1_type.type = FFI_TYPE_STRUCT; + test_1_type.elements = test_1_elements; + + CHECK (ffi_get_struct_offsets (FFI_DEFAULT_ABI, &test_1_type, test_1_offsets) + == FFI_OK); + CHECK (test_1_type.size == sizeof (struct test_1)); + CHECK (offsetof (struct test_1, c) == test_1_offsets[0]); + CHECK (offsetof (struct test_1, f) == test_1_offsets[1]); + CHECK (offsetof (struct test_1, c2) == test_1_offsets[2]); + CHECK (offsetof (struct test_1, i) == test_1_offsets[3]); + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c new file mode 100644 index 0000000..7da1621 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c @@ -0,0 +1,127 @@ +/* Area: ffi_call + Purpose: Reproduce bug found in python ctypes + Limitations: none. + PR: Fedora 1174037 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + +static RECT ABI_ATTR pr_test(int i __UNUSED__, RECT ar __UNUSED__, + RECT* br __UNUSED__, POINT cp __UNUSED__, + RECT dr __UNUSED__, RECT *er __UNUSED__, + POINT fp, RECT gr __UNUSED__) +{ + RECT result; + + result.left = fp.x; + result.right = fp.y; + result.top = fp.x; + result.bottom = fp.y; + + return result; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type point_type, rect_type; + ffi_type *point_type_elements[3]; + ffi_type *rect_type_elements[5]; + + int i; + POINT cp, fp; + RECT ar, br, dr, er, gr; + RECT *p1, *p2; + + /* This is a hack to get a properly aligned result buffer */ + RECT *rect_result = + (RECT *) malloc (sizeof(RECT)); + + point_type.size = 0; + point_type.alignment = 0; + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = point_type_elements; + point_type_elements[0] = &ffi_type_slong; + point_type_elements[1] = &ffi_type_slong; + point_type_elements[2] = NULL; + + rect_type.size = 0; + rect_type.alignment = 0; + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = rect_type_elements; + rect_type_elements[0] = &ffi_type_slong; + rect_type_elements[1] = &ffi_type_slong; + rect_type_elements[2] = &ffi_type_slong; + rect_type_elements[3] = &ffi_type_slong; + rect_type_elements[4] = NULL; + + args[0] = &ffi_type_sint; + args[1] = &rect_type; + args[2] = &ffi_type_pointer; + args[3] = &point_type; + args[4] = &rect_type; + args[5] = &ffi_type_pointer; + args[6] = &point_type; + args[7] = &rect_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 8, &rect_type, args) == FFI_OK); + + i = 1; + ar.left = 2; + ar.right = 3; + ar.top = 4; + ar.bottom = 5; + br.left = 6; + br.right = 7; + br.top = 8; + br.bottom = 9; + cp.x = 10; + cp.y = 11; + dr.left = 12; + dr.right = 13; + dr.top = 14; + dr.bottom = 15; + er.left = 16; + er.right = 17; + er.top = 18; + er.bottom = 19; + fp.x = 20; + fp.y = 21; + gr.left = 22; + gr.right = 23; + gr.top = 24; + gr.bottom = 25; + + values[0] = &i; + values[1] = &ar; + p1 = &br; + values[2] = &p1; + values[3] = &cp; + values[4] = &dr; + p2 = &er; + values[5] = &p2; + values[6] = &fp; + values[7] = &gr; + + ffi_call (&cif, FFI_FN(pr_test), rect_result, values); + + CHECK(rect_result->top == 20); + + free (rect_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c new file mode 100644 index 0000000..4456161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Promotion test. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static int promotion(signed char sc, signed short ss, + unsigned char uc, unsigned short us) +{ + int r = (int) sc + (int) ss + (int) uc + (int) us; + + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + unsigned char uc; + signed short ss; + unsigned short us; + unsigned long ul; + + args[0] = &ffi_type_schar; + args[1] = &ffi_type_sshort; + args[2] = &ffi_type_uchar; + args[3] = &ffi_type_ushort; + values[0] = ≻ + values[1] = &ss; + values[2] = &uc; + values[3] = &us; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + us = 0; + ul = 0; + + for (sc = (signed char) -127; + sc <= (signed char) 120; sc += 1) + for (ss = -30000; ss <= 30000; ss += 10000) + for (uc = (unsigned char) 0; + uc <= (unsigned char) 200; uc += 20) + for (us = 0; us <= 60000; us += 10000) + { + ul++; + ffi_call(&cif, FFI_FN(promotion), &rint, values); + CHECK((int)rint == (signed char) sc + (signed short) ss + + (unsigned char) uc + (unsigned short) us); + } + printf("%lu promotion tests run\n", ul); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c new file mode 100644 index 0000000..e29bd6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c @@ -0,0 +1,114 @@ +/* Area: ffi_call + Purpose: Check different structures. + Limitations: none. + PR: none. + Originator: Ronald Oussoren 20030824 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct Point { + float x; + float y; +} Point; + +typedef struct Size { + float h; + float w; +} Size; + +typedef struct Rect { + Point o; + Size s; +} Rect; + +int doit(int o, char* s, Point p, Rect r, int last) +{ + printf("CALLED WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, s, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, last); + return 42; +} + + +int main(void) +{ + ffi_type point_type; + ffi_type size_type; + ffi_type rect_type; + ffi_cif cif; + ffi_type* arglist[6]; + void* values[6]; + int r; + + /* + * First set up FFI types for the 3 struct types + */ + + point_type.size = 0; /*sizeof(Point);*/ + point_type.alignment = 0; /*__alignof__(Point);*/ + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = malloc(3 * sizeof(ffi_type*)); + point_type.elements[0] = &ffi_type_float; + point_type.elements[1] = &ffi_type_float; + point_type.elements[2] = NULL; + + size_type.size = 0;/* sizeof(Size);*/ + size_type.alignment = 0;/* __alignof__(Size);*/ + size_type.type = FFI_TYPE_STRUCT; + size_type.elements = malloc(3 * sizeof(ffi_type*)); + size_type.elements[0] = &ffi_type_float; + size_type.elements[1] = &ffi_type_float; + size_type.elements[2] = NULL; + + rect_type.size = 0;/*sizeof(Rect);*/ + rect_type.alignment =0;/* __alignof__(Rect);*/ + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = malloc(3 * sizeof(ffi_type*)); + rect_type.elements[0] = &point_type; + rect_type.elements[1] = &size_type; + rect_type.elements[2] = NULL; + + /* + * Create a CIF + */ + arglist[0] = &ffi_type_sint; + arglist[1] = &ffi_type_pointer; + arglist[2] = &point_type; + arglist[3] = &rect_type; + arglist[4] = &ffi_type_sint; + arglist[5] = NULL; + + r = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 5, &ffi_type_sint, arglist); + if (r != FFI_OK) { + abort(); + } + + + /* And call the function through the CIF */ + + { + Point p = { 1.0, 2.0 }; + Rect r = { { 9.0, 10.0}, { -1.0, -2.0 } }; + int o = 0; + int l = 42; + char* m = "myMethod"; + ffi_arg result; + + values[0] = &o; + values[1] = &m; + values[2] = &p; + values[3] = &r; + values[4] = &l; + values[5] = NULL; + + printf("CALLING WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, m, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, l); + + ffi_call(&cif, FFI_FN(doit), &result, values); + + printf ("The result is %d\n", (int)result); + + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c new file mode 100644 index 0000000..fd07e50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl) +{ + printf ("%f\n", dbl); + return 2 * dbl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl, rdbl; + + args[0] = &ffi_type_double; + values[0] = &dbl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + for (dbl = -127.3; dbl < 127; dbl++) + { + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl)); + CHECK(rdbl == 2 * dbl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c new file mode 100644 index 0000000..0ea5d50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) +{ + return dbl1 + fl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl4, rdbl; + float fl2; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + fl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); + CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c new file mode 100644 index 0000000..b3818f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) +{ + return dbl1 + dbl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl2, dbl4, rdbl; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_double; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &dbl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + dbl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); + CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c new file mode 100644 index 0000000..fb8a09e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c @@ -0,0 +1,35 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl) +{ + return 2 * fl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl, rfl; + + args[0] = &ffi_type_float; + values[0] = &fl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, args) == FFI_OK); + + for (fl = -127.0; fl < 127; fl++) + { + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl)); + CHECK(rfl == 2 * fl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c new file mode 100644 index 0000000..c3d92c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2) +{ + return fl1 + fl2; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, rfl; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2)); + CHECK(rfl == fl1 + fl2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c new file mode 100644 index 0000000..ddb976c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +/* Use volatile float to avoid false negative on ix86. See PR target/323. */ +static float return_fl(float fl1, float fl2, float fl3, float fl4) +{ + volatile float sum; + + sum = fl1 + fl2 + fl3 + fl4; + return sum; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl3, fl4, rfl; + volatile float sum; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_float; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &fl3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + fl3 = 255.1; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, fl3, fl4)); + + sum = fl1 + fl2 + fl3 + fl4; + CHECK(rfl == sum); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c new file mode 100644 index 0000000..c37877b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) +{ + return fl1 + fl2 + in3 + fl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl4, rfl; + unsigned int in3; + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + in3 = 255; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); + CHECK(rfl == fl1 + fl2 + in3 + fl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c new file mode 100644 index 0000000..52a92fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c @@ -0,0 +1,34 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: 20071113 */ +/* { dg-do run } */ + +#include "ffitest.h" + +static long double return_ldl(long double ldl) +{ + return 2*ldl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long double ldl, rldl; + + args[0] = &ffi_type_longdouble; + values[0] = &ldl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + for (ldl = -127.0; ldl < 127.0; ldl++) + { + ffi_call(&cif, FFI_FN(return_ldl), &rldl, values); + CHECK(rldl == 2 * ldl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c new file mode 100644 index 0000000..ea4a1e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c @@ -0,0 +1,41 @@ +/* Area: ffi_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static long long return_ll(long long ll) +{ + return ll; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll; + + args[0] = &ffi_type_sint64; + values[0] = ≪ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint64, args) == FFI_OK); + + for (ll = 0LL; ll < 100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + + for (ll = 55555555555000LL; ll < 55555555555100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c new file mode 100644 index 0000000..593e8a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check if long long are passed in the corresponding regs on ppc. + Limitations: none. + PR: 20104. + Originator: 20050222 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" +static long long return_ll(int ll0, long long ll1, int ll2) +{ + return ll0 + ll1 + ll2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll1; + unsigned ll0, ll2; + + args[0] = &ffi_type_sint; + args[1] = &ffi_type_sint64; + args[2] = &ffi_type_sint; + values[0] = &ll0; + values[1] = &ll1; + values[2] = &ll2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint64, args) == FFI_OK); + + ll0 = 11111111; + ll1 = 11111111111000LL; + ll2 = 11111111; + + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + printf("res: %" PRIdLL ", %" PRIdLL "\n", rlonglong, ll0 + ll1 + ll2); + /* { dg-output "res: 11111133333222, 11111133333222" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c new file mode 100644 index 0000000..a36cf3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value signed char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static signed char return_sc(signed char sc) +{ + return sc; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + + args[0] = &ffi_type_schar; + values[0] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, args) == FFI_OK); + + for (sc = (signed char) -127; + sc < (signed char) 127; sc++) + { + ffi_call(&cif, FFI_FN(return_sc), &rint, values); + CHECK((signed char)rint == sc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c new file mode 100644 index 0000000..f0fd345 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if long as return type is handled correctly. + Limitations: none. + PR: none. + */ + +/* { dg-do run } */ +#include "ffitest.h" +static long return_sl(long l1, long l2) +{ + return l1 - l2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long l1, l2; + + args[0] = &ffi_type_slong; + args[1] = &ffi_type_slong; + values[0] = &l1; + values[1] = &l2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_slong, args) == FFI_OK); + + l1 = 1073741823L; + l2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_sl), &res, values); + printf("res: %ld, %ld\n", (long)res, l1 - l2); + /* { dg-output "res: -1, -1" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c new file mode 100644 index 0000000..6fe5546 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check return value unsigned char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static unsigned char return_uc(unsigned char uc) +{ + return uc; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + unsigned char uc; + + args[0] = &ffi_type_uchar; + values[0] = &uc; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, args) == FFI_OK); + + for (uc = (unsigned char) '\x00'; + uc < (unsigned char) '\xff'; uc++) + { + ffi_call(&cif, FFI_FN(return_uc), &rint, values); + CHECK((unsigned char)rint == uc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c new file mode 100644 index 0000000..12b266f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if unsigned long as return type is handled correctly. + Limitations: none. + PR: none. + Originator: 20060724 */ + +/* { dg-do run } */ +#include "ffitest.h" +static unsigned long return_ul(unsigned long ul1, unsigned long ul2) +{ + return ul1 + ul2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long ul1, ul2; + + args[0] = &ffi_type_ulong; + args[1] = &ffi_type_ulong; + values[0] = &ul1; + values[1] = &ul2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ulong, args) == FFI_OK); + + ul1 = 1073741823L; + ul2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_ul), &res, values); + printf("res: %lu, %lu\n", (unsigned long)res, ul1 + ul2); + /* { dg-output "res: 2147483647, 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c new file mode 100644 index 0000000..35b70ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c @@ -0,0 +1,44 @@ +/* Area: ffi_call + Purpose: Check strlen function call. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static size_t ABI_ATTR my_strlen(char *s) +{ + return (strlen(s)); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + + args[0] = &ffi_type_pointer; + values[0] = (void*) &s; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 7); + + s = "1234567890123456789012345"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 25); + + exit (0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c new file mode 100644 index 0000000..96282bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(char *s, float a) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[0] = &ffi_type_pointer; + args[1] = &ffi_type_float; + values[0] = (void*) &s; + values[1] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c new file mode 100644 index 0000000..beba86e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c new file mode 100644 index 0000000..d5d42b4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c @@ -0,0 +1,55 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s, int i) +{ + return (size_t) ((int) strlen(s) + (int) a + i); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + int v1; + float v2; + args[2] = &ffi_type_sint; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[2] = (void*) &v1; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 3, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v1 = 1; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 2); + + s = "1234567"; + v2 = -1.0; + v1 = -2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 4); + + s = "1234567890123456789012345"; + v2 = 1.0; + v1 = 2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 28); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c new file mode 100644 index 0000000..c13e23f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 ABI_ATTR struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + test_structure_1 ts1_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c new file mode 100644 index 0000000..17b1377 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: Sergei Trofimovich + + The test originally discovered in ruby's bindings + for ffi in https://bugs.gentoo.org/634190 */ + +/* { dg-do run } */ +#include "ffitest.h" + +struct s { + int s32; + float f32; + signed char s8; +}; + +struct s make_s(void) { + struct s r; + r.s32 = 0x1234; + r.f32 = 7.0; + r.s8 = 0x78; + return r; +} + +int main() { + ffi_cif cif; + struct s r; + ffi_type rtype; + ffi_type* s_fields[] = { + &ffi_type_sint, + &ffi_type_float, + &ffi_type_schar, + NULL, + }; + + rtype.size = 0; + rtype.alignment = 0, + rtype.type = FFI_TYPE_STRUCT, + rtype.elements = s_fields, + + r.s32 = 0xbad; + r.f32 = 999.999; + r.s8 = 0x51; + + // Here we emulate the following call: + //r = make_s(); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &rtype, NULL) == FFI_OK); + ffi_call(&cif, FFI_FN(make_s), &r, NULL); + + CHECK(r.s32 == 0x1234); + CHECK(r.f32 == 7.0); + CHECK(r.s8 == 0x78); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c new file mode 100644 index 0000000..5077a5e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + double d1; + double d2; +} test_structure_2; + +static test_structure_2 ABI_ATTR struct2(test_structure_2 ts) +{ + ts.d1--; + ts.d2--; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + test_structure_2 ts2_arg; + ffi_type ts2_type; + ffi_type *ts2_type_elements[3]; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_2 *ts2_result = + (test_structure_2 *) malloc (sizeof(test_structure_2)); + + ts2_type.size = 0; + ts2_type.alignment = 0; + ts2_type.type = FFI_TYPE_STRUCT; + ts2_type.elements = ts2_type_elements; + ts2_type_elements[0] = &ffi_type_double; + ts2_type_elements[1] = &ffi_type_double; + ts2_type_elements[2] = NULL; + + args[0] = &ts2_type; + values[0] = &ts2_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts2_type, args) == FFI_OK); + + ts2_arg.d1 = 5.55; + ts2_arg.d2 = 6.66; + + printf ("%g\n", ts2_arg.d1); + printf ("%g\n", ts2_arg.d2); + + ffi_call(&cif, FFI_FN(struct2), ts2_result, values); + + printf ("%g\n", ts2_result->d1); + printf ("%g\n", ts2_result->d2); + + CHECK(ts2_result->d1 == 5.55 - 1); + CHECK(ts2_result->d2 == 6.66 - 1); + + free (ts2_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c new file mode 100644 index 0000000..7eba0ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + int si; +} test_structure_3; + +static test_structure_3 ABI_ATTR struct3(test_structure_3 ts) +{ + ts.si = -(ts.si*2); + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + int compare_value; + ffi_type ts3_type; + ffi_type *ts3_type_elements[2]; + + test_structure_3 ts3_arg; + test_structure_3 *ts3_result = + (test_structure_3 *) malloc (sizeof(test_structure_3)); + + ts3_type.size = 0; + ts3_type.alignment = 0; + ts3_type.type = FFI_TYPE_STRUCT; + ts3_type.elements = ts3_type_elements; + ts3_type_elements[0] = &ffi_type_sint; + ts3_type_elements[1] = NULL; + + args[0] = &ts3_type; + values[0] = &ts3_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts3_type, args) == FFI_OK); + + ts3_arg.si = -123; + compare_value = ts3_arg.si; + + ffi_call(&cif, FFI_FN(struct3), ts3_result, values); + + printf ("%d %d\n", ts3_result->si, -(compare_value*2)); + + CHECK(ts3_result->si == -(compare_value*2)); + + free (ts3_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c new file mode 100644 index 0000000..66a9551 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned ui1; + unsigned ui2; + unsigned ui3; +} test_structure_4; + +static test_structure_4 ABI_ATTR struct4(test_structure_4 ts) +{ + ts.ui3 = ts.ui1 * ts.ui2 * ts.ui3; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts4_type; + ffi_type *ts4_type_elements[4]; + + test_structure_4 ts4_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_4 *ts4_result = + (test_structure_4 *) malloc (sizeof(test_structure_4)); + + ts4_type.size = 0; + ts4_type.alignment = 0; + ts4_type.type = FFI_TYPE_STRUCT; + ts4_type.elements = ts4_type_elements; + ts4_type_elements[0] = &ffi_type_uint; + ts4_type_elements[1] = &ffi_type_uint; + ts4_type_elements[2] = &ffi_type_uint; + ts4_type_elements[3] = NULL; + + args[0] = &ts4_type; + values[0] = &ts4_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts4_type, args) == FFI_OK); + + ts4_arg.ui1 = 2; + ts4_arg.ui2 = 3; + ts4_arg.ui3 = 4; + + ffi_call (&cif, FFI_FN(struct4), ts4_result, values); + + CHECK(ts4_result->ui3 == 2U * 3U * 4U); + + + free (ts4_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c new file mode 100644 index 0000000..23e2a3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c @@ -0,0 +1,66 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + char c1; + char c2; +} test_structure_5; + +static test_structure_5 ABI_ATTR struct5(test_structure_5 ts1, test_structure_5 ts2) +{ + ts1.c1 += ts2.c1; + ts1.c2 -= ts2.c2; + + return ts1; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts5_type; + ffi_type *ts5_type_elements[3]; + + test_structure_5 ts5_arg1, ts5_arg2; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_5 *ts5_result = + (test_structure_5 *) malloc (sizeof(test_structure_5)); + + ts5_type.size = 0; + ts5_type.alignment = 0; + ts5_type.type = FFI_TYPE_STRUCT; + ts5_type.elements = ts5_type_elements; + ts5_type_elements[0] = &ffi_type_schar; + ts5_type_elements[1] = &ffi_type_schar; + ts5_type_elements[2] = NULL; + + args[0] = &ts5_type; + args[1] = &ts5_type; + values[0] = &ts5_arg1; + values[1] = &ts5_arg2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, &ts5_type, args) == FFI_OK); + + ts5_arg1.c1 = 2; + ts5_arg1.c2 = 6; + ts5_arg2.c1 = 5; + ts5_arg2.c2 = 3; + + ffi_call (&cif, FFI_FN(struct5), ts5_result, values); + + CHECK(ts5_result->c1 == 7); + CHECK(ts5_result->c2 == 3); + + + free (ts5_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c new file mode 100644 index 0000000..173c66e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f; + double d; +} test_structure_6; + +static test_structure_6 ABI_ATTR struct6 (test_structure_6 ts) +{ + ts.f += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts6_type; + ffi_type *ts6_type_elements[3]; + + test_structure_6 ts6_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_6 *ts6_result = + (test_structure_6 *) malloc (sizeof(test_structure_6)); + + ts6_type.size = 0; + ts6_type.alignment = 0; + ts6_type.type = FFI_TYPE_STRUCT; + ts6_type.elements = ts6_type_elements; + ts6_type_elements[0] = &ffi_type_float; + ts6_type_elements[1] = &ffi_type_double; + ts6_type_elements[2] = NULL; + + args[0] = &ts6_type; + values[0] = &ts6_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts6_type, args) == FFI_OK); + + ts6_arg.f = 5.55f; + ts6_arg.d = 6.66; + + printf ("%g\n", ts6_arg.f); + printf ("%g\n", ts6_arg.d); + + ffi_call(&cif, FFI_FN(struct6), ts6_result, values); + + CHECK(ts6_result->f == 5.55f + 1); + CHECK(ts6_result->d == 6.66 + 1); + + free (ts6_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c new file mode 100644 index 0000000..badc7e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + double d; +} test_structure_7; + +static test_structure_7 ABI_ATTR struct7 (test_structure_7 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts7_type; + ffi_type *ts7_type_elements[4]; + + test_structure_7 ts7_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_7 *ts7_result = + (test_structure_7 *) malloc (sizeof(test_structure_7)); + + ts7_type.size = 0; + ts7_type.alignment = 0; + ts7_type.type = FFI_TYPE_STRUCT; + ts7_type.elements = ts7_type_elements; + ts7_type_elements[0] = &ffi_type_float; + ts7_type_elements[1] = &ffi_type_float; + ts7_type_elements[2] = &ffi_type_double; + ts7_type_elements[3] = NULL; + + args[0] = &ts7_type; + values[0] = &ts7_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts7_type, args) == FFI_OK); + + ts7_arg.f1 = 5.55f; + ts7_arg.f2 = 55.5f; + ts7_arg.d = 6.66; + + printf ("%g\n", ts7_arg.f1); + printf ("%g\n", ts7_arg.f2); + printf ("%g\n", ts7_arg.d); + + ffi_call(&cif, FFI_FN(struct7), ts7_result, values); + + printf ("%g\n", ts7_result->f1); + printf ("%g\n", ts7_result->f2); + printf ("%g\n", ts7_result->d); + + CHECK(ts7_result->f1 == 5.55f + 1); + CHECK(ts7_result->f2 == 55.5f + 1); + CHECK(ts7_result->d == 6.66 + 1); + + free (ts7_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c new file mode 100644 index 0000000..ef204ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c @@ -0,0 +1,81 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + float f3; + float f4; +} test_structure_8; + +static test_structure_8 ABI_ATTR struct8 (test_structure_8 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.f3 += 1; + ts.f4 += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts8_type; + ffi_type *ts8_type_elements[5]; + + test_structure_8 ts8_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_8 *ts8_result = + (test_structure_8 *) malloc (sizeof(test_structure_8)); + + ts8_type.size = 0; + ts8_type.alignment = 0; + ts8_type.type = FFI_TYPE_STRUCT; + ts8_type.elements = ts8_type_elements; + ts8_type_elements[0] = &ffi_type_float; + ts8_type_elements[1] = &ffi_type_float; + ts8_type_elements[2] = &ffi_type_float; + ts8_type_elements[3] = &ffi_type_float; + ts8_type_elements[4] = NULL; + + args[0] = &ts8_type; + values[0] = &ts8_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts8_type, args) == FFI_OK); + + ts8_arg.f1 = 5.55f; + ts8_arg.f2 = 55.5f; + ts8_arg.f3 = -5.55f; + ts8_arg.f4 = -55.5f; + + printf ("%g\n", ts8_arg.f1); + printf ("%g\n", ts8_arg.f2); + printf ("%g\n", ts8_arg.f3); + printf ("%g\n", ts8_arg.f4); + + ffi_call(&cif, FFI_FN(struct8), ts8_result, values); + + printf ("%g\n", ts8_result->f1); + printf ("%g\n", ts8_result->f2); + printf ("%g\n", ts8_result->f3); + printf ("%g\n", ts8_result->f4); + + CHECK(ts8_result->f1 == 5.55f + 1); + CHECK(ts8_result->f2 == 55.5f + 1); + CHECK(ts8_result->f3 == -5.55f + 1); + CHECK(ts8_result->f4 == -55.5f + 1); + + free (ts8_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c new file mode 100644 index 0000000..4a13b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c @@ -0,0 +1,68 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + float f; + int i; +} test_structure_9; + +static test_structure_9 ABI_ATTR struct9 (test_structure_9 ts) +{ + ts.f += 1; + ts.i += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts9_type; + ffi_type *ts9_type_elements[3]; + + test_structure_9 ts9_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_9 *ts9_result = + (test_structure_9 *) malloc (sizeof(test_structure_9)); + + ts9_type.size = 0; + ts9_type.alignment = 0; + ts9_type.type = FFI_TYPE_STRUCT; + ts9_type.elements = ts9_type_elements; + ts9_type_elements[0] = &ffi_type_float; + ts9_type_elements[1] = &ffi_type_sint; + ts9_type_elements[2] = NULL; + + args[0] = &ts9_type; + values[0] = &ts9_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts9_type, args) == FFI_OK); + + ts9_arg.f = 5.55f; + ts9_arg.i = 5; + + printf ("%g\n", ts9_arg.f); + printf ("%d\n", ts9_arg.i); + + ffi_call(&cif, FFI_FN(struct9), ts9_result, values); + + printf ("%g\n", ts9_result->f); + printf ("%d\n", ts9_result->i); + + CHECK(ts9_result->f == 5.55f + 1); + CHECK(ts9_result->i == 5 + 1); + + free (ts9_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c new file mode 100644 index 0000000..f00d830 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c @@ -0,0 +1,61 @@ +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + memset(&cif, 1, sizeof(cif)); + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + test_structure_1 ts1_arg; + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c new file mode 100644 index 0000000..59d085c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c @@ -0,0 +1,196 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* m68k-*-* alpha-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + float f; + double d; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + + uc = va_arg (ap, unsigned); + sc = va_arg (ap, signed); + + us = va_arg (ap, unsigned); + ss = va_arg (ap, signed); + + ui = va_arg (ap, unsigned int); + si = va_arg (ap, signed int); + + ul = va_arg (ap, unsigned long); + sl = va_arg (ap, signed long); + + f = va_arg (ap, double); /* C standard promotes float->double + when anonymous */ + d = va_arg (ap, double); + + printf ("%u %u %u %u %u %u %u %u %u uc=%u sc=%d %u %d %u %d %lu %ld %f %f\n", + s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b, + uc, sc, + us, ss, + ui, si, + ul, sl, + f, d); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[15]; + ffi_type* arg_types[15]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + double d1; + double f1; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = &ffi_type_uchar; + arg_types[5] = &ffi_type_schar; + arg_types[6] = &ffi_type_ushort; + arg_types[7] = &ffi_type_sshort; + arg_types[8] = &ffi_type_uint; + arg_types[9] = &ffi_type_sint; + arg_types[10] = &ffi_type_ulong; + arg_types[11] = &ffi_type_slong; + arg_types[12] = &ffi_type_double; + arg_types[13] = &ffi_type_double; + arg_types[14] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 14, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + uc = 9; + sc = 10; + us = 11; + ss = 12; + ui = 13; + si = 14; + ul = 15; + sl = 16; + f1 = 2.12; + d1 = 3.13; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = &uc; + args[5] = ≻ + args[6] = &us; + args[7] = &ss; + args[8] = &ui; + args[9] = &si; + args[10] = &ul; + args[11] = &sl; + args[12] = &f1; + args[13] = &d1; + args[14] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8 uc=9 sc=10 11 12 13 14 15 16 2.120000 3.130000" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c new file mode 100644 index 0000000..e645206 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c @@ -0,0 +1,121 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c new file mode 100644 index 0000000..56f5b9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c @@ -0,0 +1,123 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct small_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + s1.a += s2.a; + s1.b += s2.b; + return s1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct small_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &s_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d\n", res.a, res.b); + /* { dg-output "\nres: 12 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c new file mode 100644 index 0000000..9a27e7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c @@ -0,0 +1,125 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct large_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + l.a += s1.a; + l.b += s1.b; + l.c += s2.a; + l.d += s2.b; + return l; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct large_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &l_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d %d %d %d\n", res.a, res.b, res.c, res.d, res.e); + /* { dg-output "\nres: 15 17 19 21 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp new file mode 100644 index 0000000..ed4145c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp @@ -0,0 +1,67 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2019 Free Software Foundation, Inc. +# Copyright (C) 2019 Anthony Green + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist $additional_options + } else { + foreach test $tlist { + unsupported "$test" + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c new file mode 100644 index 0000000..a579ff6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + void * code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c new file mode 100644 index 0000000..9123173 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c @@ -0,0 +1,81 @@ +/* Area: closure_call. + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + + +static void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c new file mode 100644 index 0000000..08ff9d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c @@ -0,0 +1,81 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn2(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(double *)args[0] +(int)(*(double *)args[1]) + + (int)(*(double *)args[2]) + (int)*(double *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(double *)args[0], (int)(*(double *)args[1]), + (int)(*(double *)args[2]), (int)*(double *)args[3], + (int)(*(signed short *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(int *)args[7]), + (int)(*(double*)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type2)(double, double, double, double, signed short, + double, double, int, double, int, int, float, + int, float, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = &ffi_type_double; + cl_arg_types[2] = &ffi_type_double; + cl_arg_types[3] = &ffi_type_double; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn2, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type2)code)) + (1, 2, 3, 4, 127, 5, 6, 8, 9, 10, 11, 12.0, 13, + 19.0, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c new file mode 100644 index 0000000..9b54d80 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c @@ -0,0 +1,82 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn3(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(float *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(float *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(float *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(float *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(float *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(float *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); + + } + +typedef int (*closure_test_type3)(float, float, float, float, float, float, + float, float, double, int, float, float, int, + float, float, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_float; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_float; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_float; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn3, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type3)code)) + (1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9, 10, 11.11, 12.0, 13, + 19.19, 21.21, 1); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 19 21 1 3: 135" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 135" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c new file mode 100644 index 0000000..d4a1530 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(unsigned long long *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(unsigned long long *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11LL, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c new file mode 100644 index 0000000..9907442 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c @@ -0,0 +1,92 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Exceed the limit of gpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn5(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(int *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(int *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + int, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 10; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[10] = &ffi_type_sint; + for (i = 11; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn5, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c new file mode 100644 index 0000000..73c54fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c @@ -0,0 +1,90 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC. + Limitations: none. + PR: PR23404 + Originator: 20050830 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + + (int)(*(unsigned long long *)args[1]) + + (int)(*(unsigned long long *)args[2]) + + (int)*(unsigned long long *)args[3] + + (int)(*(int *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(double *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(double *)args[14]) + (int)*(double *)args[15] + + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)(*(unsigned long long *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(unsigned long long *)args[3], + (int)(*(int *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(double *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(double *)args[14]), (int)(*(double *)args[15]), + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + int, double, double, float, double, double, + int, float, int, int, double, double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_uint64; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_uint64; + cl_arg_types[4] = &ffi_type_sint; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_double; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_double; + cl_arg_types[15] = &ffi_type_double; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1, 2, 3, 4, 127, 429., 7., 8., 9.5, 10., 11, 12., 13, + 19, 21., 1.); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c new file mode 100644 index 0000000..b3afa0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c @@ -0,0 +1,95 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_loc_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_loc_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + ffi_closure *pcl; + ffi_type * cl_arg_types[17]; + int res; + void *codeloc; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + pcl = ffi_closure_alloc(sizeof(ffi_closure), &codeloc); + CHECK(pcl != NULL); + CHECK(codeloc != NULL); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_loc_test_fn0, + (void *) 3 /* userdata */, codeloc) == FFI_OK); + + CHECK(memcmp(pcl, codeloc, sizeof(*pcl)) == 0); + + res = (*((closure_loc_test_type0)codeloc)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c new file mode 100644 index 0000000..5a4e728 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check simple closure handling with all ABIs + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata) +{ + *(ffi_arg*)resp = + (int)*(int *)args[0] + (int)(*(int *)args[1]) + + (int)(*(int *)args[2]) + (int)(*(int *)args[3]) + + (int)(intptr_t)userdata; + + printf("%d %d %d %d: %d\n", + (int)*(int *)args[0], (int)(*(int *)args[1]), + (int)(*(int *)args[2]), (int)(*(int *)args[3]), + (int)*(ffi_arg *)resp); + +} + +typedef int (ABI_ATTR *closure_test_type0)(int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = &ffi_type_uint; + cl_arg_types[3] = &ffi_type_uint; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*(closure_test_type0)code)(0, 1, 2, 3); + /* { dg-output "0 1 2 3: 9" } */ + + printf("res: %d\n",res); + /* { dg-output "\nres: 9" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c new file mode 100644 index 0000000..ea0825d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c @@ -0,0 +1,94 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_12byte { + int a; + int b; + int c; +} cls_struct_12byte; + +cls_struct_12byte cls_struct_12byte_fn(struct cls_struct_12byte b1, + struct cls_struct_12byte b2) +{ + struct cls_struct_12byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_12byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args , void* userdata __UNUSED__) +{ + struct cls_struct_12byte b1, b2; + + b1 = *(struct cls_struct_12byte*)(args[0]); + b2 = *(struct cls_struct_12byte*)(args[1]); + + *(cls_struct_12byte*)resp = cls_struct_12byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_12byte h_dbl = { 7, 4, 9 }; + struct cls_struct_12byte j_dbl = { 1, 5, 3 }; + struct cls_struct_12byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_12byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_12byte_gn, NULL, code) == FFI_OK); + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + res_dbl = ((cls_struct_12byte(*)(cls_struct_12byte, cls_struct_12byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c new file mode 100644 index 0000000..89a08a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte { + int a; + double b; + int c; +} cls_struct_16byte; + +cls_struct_16byte cls_struct_16byte_fn(struct cls_struct_16byte b1, + struct cls_struct_16byte b2) +{ + struct cls_struct_16byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_16byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_16byte b1, b2; + + b1 = *(struct cls_struct_16byte*)(args[0]); + b2 = *(struct cls_struct_16byte*)(args[1]); + + *(cls_struct_16byte*)resp = cls_struct_16byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte h_dbl = { 7, 8.0, 9 }; + struct cls_struct_16byte j_dbl = { 1, 9.0, 3 }; + struct cls_struct_16byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_16byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0.0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_16byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_16byte(*)(cls_struct_16byte, cls_struct_16byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c new file mode 100644 index 0000000..9f75da8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c @@ -0,0 +1,96 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_18byte { + double a; + unsigned char b; + unsigned char c; + double d; +} cls_struct_18byte; + +cls_struct_18byte cls_struct_18byte_fn(struct cls_struct_18byte a1, + struct cls_struct_18byte a2) +{ + struct cls_struct_18byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + + printf("%g %d %d %g %g %d %d %g: %g %d %d %g\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + return result; +} + +static void +cls_struct_18byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_18byte a1, a2; + + a1 = *(struct cls_struct_18byte*)(args[0]); + a2 = *(struct cls_struct_18byte*)(args[1]); + + *(cls_struct_18byte*)resp = cls_struct_18byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 }; + struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 }; + struct cls_struct_18byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_18byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_18byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_18byte(*)(cls_struct_18byte, cls_struct_18byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c new file mode 100644 index 0000000..278794b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c @@ -0,0 +1,102 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_19byte { + double a; + unsigned char b; + unsigned char c; + double d; + unsigned char e; +} cls_struct_19byte; + +cls_struct_19byte cls_struct_19byte_fn(struct cls_struct_19byte a1, + struct cls_struct_19byte a2) +{ + struct cls_struct_19byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + + printf("%g %d %d %g %d %g %d %d %g %d: %g %d %d %g %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + return result; +} + +static void +cls_struct_19byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_19byte a1, a2; + + a1 = *(struct cls_struct_19byte*)(args[0]); + a2 = *(struct cls_struct_19byte*)(args[1]); + + *(cls_struct_19byte*)resp = cls_struct_19byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 }; + struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 }; + struct cls_struct_19byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_19byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_19byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_19byte(*)(cls_struct_19byte, cls_struct_19byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c new file mode 100644 index 0000000..82492c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c @@ -0,0 +1,89 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_1_1byte { + unsigned char a; +} cls_struct_1_1byte; + +cls_struct_1_1byte cls_struct_1_1byte_fn(struct cls_struct_1_1byte a1, + struct cls_struct_1_1byte a2) +{ + struct cls_struct_1_1byte result; + + result.a = a1.a + a2.a; + + printf("%d %d: %d\n", a1.a, a2.a, result.a); + + return result; +} + +static void +cls_struct_1_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_1_1byte a1, a2; + + a1 = *(struct cls_struct_1_1byte*)(args[0]); + a2 = *(struct cls_struct_1_1byte*)(args[1]); + + *(cls_struct_1_1byte*)resp = cls_struct_1_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[2]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_1_1byte g_dbl = { 12 }; + struct cls_struct_1_1byte f_dbl = { 178 }; + struct cls_struct_1_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_1_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_1_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_1_1byte(*)(cls_struct_1_1byte, cls_struct_1_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c new file mode 100644 index 0000000..3f8bb28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + double a; + double b; + int c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%g %g %d %g %g %d: %g %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 }; + struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c new file mode 100644 index 0000000..6562727 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + int a; + double b; + double c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %g %d %g %g: %d %g %g\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 }; + struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c new file mode 100644 index 0000000..1d82f6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_24byte { + double a; + double b; + int c; + float d; +} cls_struct_24byte; + +cls_struct_24byte cls_struct_24byte_fn(struct cls_struct_24byte b0, + struct cls_struct_24byte b1, + struct cls_struct_24byte b2, + struct cls_struct_24byte b3) +{ + struct cls_struct_24byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + + printf("%g %g %d %g %g %g %d %g %g %g %d %g %g %g %d %g: %g %g %d %g\n", + b0.a, b0.b, b0.c, b0.d, + b1.a, b1.b, b1.c, b1.d, + b2.a, b2.b, b2.c, b2.d, + b3.a, b3.b, b3.c, b2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_24byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_24byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_24byte*)(args[0]); + b1 = *(struct cls_struct_24byte*)(args[1]); + b2 = *(struct cls_struct_24byte*)(args[2]); + b3 = *(struct cls_struct_24byte*)(args[3]); + + *(cls_struct_24byte*)resp = cls_struct_24byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_24byte e_dbl = { 9.0, 2.0, 6, 5.0 }; + struct cls_struct_24byte f_dbl = { 1.0, 2.0, 3, 7.0 }; + struct cls_struct_24byte g_dbl = { 4.0, 5.0, 7, 9.0 }; + struct cls_struct_24byte h_dbl = { 8.0, 6.0, 1, 4.0 }; + struct cls_struct_24byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = &ffi_type_float; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_24byte_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_24byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_24byte(*)(cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c new file mode 100644 index 0000000..81bb0a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_2byte { + unsigned char a; + unsigned char b; +} cls_struct_2byte; + +cls_struct_2byte cls_struct_2byte_fn(struct cls_struct_2byte a1, + struct cls_struct_2byte a2) +{ + struct cls_struct_2byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_2byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_2byte a1, a2; + + a1 = *(struct cls_struct_2byte*)(args[0]); + a2 = *(struct cls_struct_2byte*)(args[1]); + + *(cls_struct_2byte*)resp = cls_struct_2byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_2byte g_dbl = { 12, 127 }; + struct cls_struct_2byte f_dbl = { 1, 13 }; + struct cls_struct_2byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_2byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_2byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_2byte(*)(cls_struct_2byte, cls_struct_2byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c new file mode 100644 index 0000000..b782746 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3_1byte { + unsigned char a; + unsigned char b; + unsigned char c; +} cls_struct_3_1byte; + +cls_struct_3_1byte cls_struct_3_1byte_fn(struct cls_struct_3_1byte a1, + struct cls_struct_3_1byte a2) +{ + struct cls_struct_3_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_3_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3_1byte a1, a2; + + a1 = *(struct cls_struct_3_1byte*)(args[0]); + a2 = *(struct cls_struct_3_1byte*)(args[1]); + + *(cls_struct_3_1byte*)resp = cls_struct_3_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3_1byte g_dbl = { 12, 13, 14 }; + struct cls_struct_3_1byte f_dbl = { 178, 179, 180 }; + struct cls_struct_3_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3_1byte(*)(cls_struct_3_1byte, cls_struct_3_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c new file mode 100644 index 0000000..a02c463 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte { + unsigned short a; + unsigned char b; +} cls_struct_3byte; + +cls_struct_3byte cls_struct_3byte_fn(struct cls_struct_3byte a1, + struct cls_struct_3byte a2) +{ + struct cls_struct_3byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte a1, a2; + + a1 = *(struct cls_struct_3byte*)(args[0]); + a2 = *(struct cls_struct_3byte*)(args[1]); + + *(cls_struct_3byte*)resp = cls_struct_3byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte g_dbl = { 12, 119 }; + struct cls_struct_3byte f_dbl = { 1, 15 }; + struct cls_struct_3byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte(*)(cls_struct_3byte, cls_struct_3byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c new file mode 100644 index 0000000..c7251ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte_1 { + unsigned char a; + unsigned short b; +} cls_struct_3byte_1; + +cls_struct_3byte_1 cls_struct_3byte_fn1(struct cls_struct_3byte_1 a1, + struct cls_struct_3byte_1 a2) +{ + struct cls_struct_3byte_1 result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte_1 a1, a2; + + a1 = *(struct cls_struct_3byte_1*)(args[0]); + a2 = *(struct cls_struct_3byte_1*)(args[1]); + + *(cls_struct_3byte_1*)resp = cls_struct_3byte_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte_1 g_dbl = { 15, 125 }; + struct cls_struct_3byte_1 f_dbl = { 9, 19 }; + struct cls_struct_3byte_1 res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn1), &res_dbl, args_dbl); + /* { dg-output "15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn1, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte_1(*)(cls_struct_3byte_1, cls_struct_3byte_1))(code))(g_dbl, f_dbl); + /* { dg-output "\n15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c new file mode 100644 index 0000000..48888f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations:>none. + PR: none. + Originator: 20171026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_3float { + float f; + float g; + float h; +} cls_struct_3float; + +cls_struct_3float cls_struct_3float_fn(struct cls_struct_3float a1, + struct cls_struct_3float a2) +{ + struct cls_struct_3float result; + + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + result.h = a1.h + a2.h; + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.f, a1.g, a1.h, + a2.f, a2.g, a2.h, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_3float_gn(ffi_cif *cif __UNUSED__, void* resp, void **args, + void* userdata __UNUSED__) +{ + struct cls_struct_3float a1, a2; + + a1 = *(struct cls_struct_3float*)(args[0]); + a2 = *(struct cls_struct_3float*)(args[1]); + + *(cls_struct_3float*)resp = cls_struct_3float_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void *args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_3float g_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float f_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float res_dbl; + + cls_struct_fields[0] = &ffi_type_float; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_float; + cls_struct_fields[3] = NULL; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3float_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3float_gn, NULL, code) == + FFI_OK); + + res_dbl = ((cls_struct_3float(*)(cls_struct_3float, + cls_struct_3float))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c new file mode 100644 index 0000000..2d6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_4_1byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; +} cls_struct_4_1byte; + +cls_struct_4_1byte cls_struct_4_1byte_fn(struct cls_struct_4_1byte a1, + struct cls_struct_4_1byte a2) +{ + struct cls_struct_4_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_4_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4_1byte a1, a2; + + a1 = *(struct cls_struct_4_1byte*)(args[0]); + a2 = *(struct cls_struct_4_1byte*)(args[1]); + + *(cls_struct_4_1byte*)resp = cls_struct_4_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 }; + struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 }; + struct cls_struct_4_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4_1byte(*)(cls_struct_4_1byte, cls_struct_4_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c new file mode 100644 index 0000000..4ac3787 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_4byte { + unsigned short a; + unsigned short b; +} cls_struct_4byte; + +cls_struct_4byte cls_struct_4byte_fn(struct cls_struct_4byte a1, + struct cls_struct_4byte a2) +{ + struct cls_struct_4byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_4byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4byte a1, a2; + + a1 = *(struct cls_struct_4byte*)(args[0]); + a2 = *(struct cls_struct_4byte*)(args[1]); + + *(cls_struct_4byte*)resp = cls_struct_4byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4byte g_dbl = { 127, 120 }; + struct cls_struct_4byte f_dbl = { 12, 128 }; + struct cls_struct_4byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4byte(*)(cls_struct_4byte, cls_struct_4byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c new file mode 100644 index 0000000..ad9d51c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c @@ -0,0 +1,109 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + printf("%d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c new file mode 100644 index 0000000..4e0c000 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned short a; + unsigned short b; + unsigned char c; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c new file mode 100644 index 0000000..a55edc2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_64byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; +} cls_struct_64byte; + +cls_struct_64byte cls_struct_64byte_fn(struct cls_struct_64byte b0, + struct cls_struct_64byte b1, + struct cls_struct_64byte b2, + struct cls_struct_64byte b3) +{ + struct cls_struct_64byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + + printf("%g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_64byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_64byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_64byte*)(args[0]); + b1 = *(struct cls_struct_64byte*)(args[1]); + b2 = *(struct cls_struct_64byte*)(args[2]); + b3 = *(struct cls_struct_64byte*)(args[3]); + + *(cls_struct_64byte*)resp = cls_struct_64byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[9]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_64byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0 }; + struct cls_struct_64byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0 }; + struct cls_struct_64byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0 }; + struct cls_struct_64byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0 }; + struct cls_struct_64byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_64byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_64byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_64byte(*)(cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c new file mode 100644 index 0000000..b4dcdba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, + result.a, result.b, result.c, result.d, result.e, result.f); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[7]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 }; + struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c new file mode 100644 index 0000000..7406780 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c @@ -0,0 +1,99 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned char d; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 }; + struct cls_struct_6byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c new file mode 100644 index 0000000..14a7e96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c @@ -0,0 +1,117 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; + unsigned char g; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + result.a, result.b, result.c, result.d, result.e, result.f, result.g); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 }; + struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = &ffi_type_uchar; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + res_dbl.g = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c new file mode 100644 index 0000000..1645cc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c @@ -0,0 +1,97 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned short d; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 }; + struct cls_struct_7byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_ushort; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c new file mode 100644 index 0000000..f6c1ea5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c @@ -0,0 +1,88 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_8byte { + int a; + float b; +} cls_struct_8byte; + +cls_struct_8byte cls_struct_8byte_fn(struct cls_struct_8byte a1, + struct cls_struct_8byte a2) +{ + struct cls_struct_8byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %g %d %g: %d %g\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_8byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_8byte a1, a2; + + a1 = *(struct cls_struct_8byte*)(args[0]); + a2 = *(struct cls_struct_8byte*)(args[1]); + + *(cls_struct_8byte*)resp = cls_struct_8byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_8byte g_dbl = { 1, 2.0 }; + struct cls_struct_8byte f_dbl = { 4, 5.0 }; + struct cls_struct_8byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_8byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_8byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_8byte(*)(cls_struct_8byte, cls_struct_8byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c new file mode 100644 index 0000000..0b85722 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does not here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + int a; + double b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%d %g %d %g: %d %g\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7, 8.0}; + struct cls_struct_9byte j_dbl = { 1, 9.0}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c new file mode 100644 index 0000000..edf991d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + double a; + int b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%g %d %g %d: %g %d\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7.0, 8}; + struct cls_struct_9byte j_dbl = { 1.0, 9}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c new file mode 100644 index 0000000..aad5f3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of double. + Limitations: none. + PR: none. + Originator: 20031203 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c new file mode 100644 index 0000000..37e0855 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of float. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + float b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c new file mode 100644 index 0000000..b3322d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + long double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c new file mode 100644 index 0000000..cc1c43b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c @@ -0,0 +1,132 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + long double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +cls_struct_align cls_struct_align_fn2( + cls_struct_align a1) +{ + struct cls_struct_align r; + + r.a = a1.a + 1; + r.b = a1.b + 1; + r.c = a1.c + 1; + r.d = a1.d + 1; + r.e = a1.e + 1; + r.f = a1.f + 1; + r.g = a1.g + 1; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_longdouble; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c new file mode 100644 index 0000000..5d3bec0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c @@ -0,0 +1,115 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %g %Lg %Lg %Lg %Lg %Lg %Lg %g %Lg: " + "%Lg %Lg %Lg %Lg %Lg %g %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c new file mode 100644 index 0000000..8fbf36a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of pointer. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + void *b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = (void *)((uintptr_t)a1.b + (uintptr_t)a2.b); + result.c = a1.c + a2.c; + + printf("%d %" PRIuPTR " %d %d %" PRIuPTR " %d: %d %" PRIuPTR " %d\n", + a1.a, (uintptr_t)a1.b, a1.c, + a2.a, (uintptr_t)a2.b, a2.c, + result.a, (uintptr_t)result.b, + result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, (void *)4951, 127 }; + struct cls_struct_align f_dbl = { 1, (void *)9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_pointer; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c new file mode 100644 index 0000000..039b874 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sshort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c new file mode 100644 index 0000000..c96c6d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c new file mode 100644 index 0000000..9aa7bdd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c new file mode 100644 index 0000000..97620b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c new file mode 100644 index 0000000..5766fad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c new file mode 100644 index 0000000..a52cb89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c new file mode 100644 index 0000000..e451dea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c @@ -0,0 +1,66 @@ +/* Area: ffi_call, closure_call + Purpose: Check double arguments in structs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/23/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct Dbls { + double x; + double y; +} Dbls; + +void +closure_test_fn(Dbls p) +{ + printf("%.1f %.1f\n", p.x, p.y); +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Dbls*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Dbls arg = { 1.0, 2.0 }; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &ffi_type_double; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + ((void*(*)(Dbls))(code))(arg); + /* { dg-output "1.0 2.0" } */ + + closure_test_fn(arg); + /* { dg-output "\n1.0 2.0" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c new file mode 100644 index 0000000..84ad4cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(double *)resp = *(double *)args[0]; + + printf("%f: %f\n",*(double *)args[0], + *(double *)resp); + } +typedef double (*cls_ret_double)(double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + double res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_double)code))(21474.789); + /* { dg-output "21474.789000: 21474.789000" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: 21474.789000" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c new file mode 100644 index 0000000..e077f92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_double_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + double doubleValue = *(double*)args[1]; + + *(ffi_arg*)resp = printf(format, doubleValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1f\n"; + double doubleArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_double; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &doubleArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_double_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, doubleArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c new file mode 100644 index 0000000..0090fed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_float_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(float *)resp = *(float *)args[0]; + + printf("%g: %g\n",*(float *)args[0], + *(float *)resp); + } + +typedef float (*cls_ret_float)(float); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + float res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_float_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_float)code)(-2122.12))); + /* { dg-output "\\-2122.12: \\-2122.12" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: \-2122.120117" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c new file mode 100644 index 0000000..d24e72e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c @@ -0,0 +1,105 @@ +/* Area: ffi_call, closure_call + Purpose: Check long double arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin */ + +/* This test is known to PASS on armv7l-unknown-linux-gnueabihf, so I have + remove the xfail for arm*-*-* below, until we know more. */ +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +long double cls_ldouble_fn( + long double a1, + long double a2, + long double a3, + long double a4, + long double a5, + long double a6, + long double a7, + long double a8) +{ + long double r = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: %Lg\n", + a1, a2, a3, a4, a5, a6, a7, a8, r); + + return r; +} + +static void +cls_ldouble_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + long double a1 = *(long double*)args[0]; + long double a2 = *(long double*)args[1]; + long double a3 = *(long double*)args[2]; + long double a4 = *(long double*)args[3]; + long double a5 = *(long double*)args[4]; + long double a6 = *(long double*)args[5]; + long double a7 = *(long double*)args[6]; + long double a8 = *(long double*)args[7]; + + *(long double*)resp = cls_ldouble_fn( + a1, a2, a3, a4, a5, a6, a7, a8); +} + +int main(void) +{ + ffi_cif cif; + void* code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[9]; + ffi_type* arg_types[9]; + long double res = 0; + + long double arg1 = 1; + long double arg2 = 2; + long double arg3 = 3; + long double arg4 = 4; + long double arg5 = 5; + long double arg6 = 6; + long double arg7 = 7; + long double arg8 = 8; + + arg_types[0] = &ffi_type_longdouble; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = &ffi_type_longdouble; + arg_types[3] = &ffi_type_longdouble; + arg_types[4] = &ffi_type_longdouble; + arg_types[5] = &ffi_type_longdouble; + arg_types[6] = &ffi_type_longdouble; + arg_types[7] = &ffi_type_longdouble; + arg_types[8] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 8, &ffi_type_longdouble, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = &arg3; + args[3] = &arg4; + args[4] = &arg5; + args[5] = &arg6; + args[6] = &arg7; + args[7] = &arg8; + args[8] = NULL; + + ffi_call(&cif, FFI_FN(cls_ldouble_fn), &res, args); + /* { dg-output "1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ldouble_gn, NULL, code) == FFI_OK); + + res = ((long double(*)(long double, long double, long double, long double, + long double, long double, long double, long double))(code))(arg1, arg2, + arg3, arg4, arg5, arg6, arg7, arg8); + /* { dg-output "\n1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c new file mode 100644 index 0000000..39b438b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test long doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* x86_64-*-mingw* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_longdouble_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + long double ldValue = *(long double*)args[1]; + + *(ffi_arg*)resp = printf(format, ldValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1Lf\n"; + long double ldArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &ldArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_longdouble_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, ldArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c new file mode 100644 index 0000000..7fd6c82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check closures called with many args of mixed types + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + int i; + double r = 0; + double t; + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + { + t = *(long int *)args[i]; + CHECK(t == i+1); + } + else + { + t = *(double *)args[i]; + CHECK(fabs(t - ((i+1) * 0.1)) < FLT_EPSILON); + } + r += t; + } + *(double *)resp = r; +} +typedef double (*cls_ret_double)(double, double, double, double, long int, +double, double, double, double, long int, double, long int, double, long int, +double, long int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[NARGS]; + double res; + int i; + double expected = 64.9; + + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + cl_arg_types[i] = &ffi_type_slong; + else + cl_arg_types[i] = &ffi_type_double; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NARGS, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (((cls_ret_double)code))(0.1, 0.2, 0.3, 0.4, 5, 0.6, 0.7, 0.8, 0.9, 10, + 1.1, 12, 1.3, 14, 1.5, 16); + if (fabs(res - expected) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c new file mode 100644 index 0000000..62b0697 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check register allocation for closure calls with many float and double arguments + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_mixed_float_double_fn(ffi_cif* cif , void* ret, void** args, + void* userdata __UNUSED__) +{ + double r = 0; + unsigned int i; + double t; + for(i=0; i < cif->nargs; i++) + { + if(cif->arg_types[i] == &ffi_type_double) { + t = *(((double**)(args))[i]); + } else { + t = *(((float**)(args))[i]); + } + r += t; + } + *((double*)ret) = r; +} +typedef double (*cls_mixed)(double, float, double, double, double, double, double, float, float, double, float, float); + +int main (void) +{ + ffi_cif cif; + ffi_closure *closure; + void* code; + ffi_type *argtypes[12] = {&ffi_type_double, &ffi_type_float, &ffi_type_double, + &ffi_type_double, &ffi_type_double, &ffi_type_double, + &ffi_type_double, &ffi_type_float, &ffi_type_float, + &ffi_type_double, &ffi_type_float, &ffi_type_float}; + + + closure = ffi_closure_alloc(sizeof(ffi_closure), (void**)&code); + if(closure ==NULL) + abort(); + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 12, &ffi_type_double, argtypes) == FFI_OK); + CHECK(ffi_prep_closure_loc(closure, &cif, cls_mixed_float_double_fn, NULL, code) == FFI_OK); + double ret = ((cls_mixed)code)(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2); + ffi_closure_free(closure); + if(fabs(ret - 7.8) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c new file mode 100644 index 0000000..71df7b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed char test_func_fn(signed char a1, signed char a2) +{ + signed char result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a2; + + a1 = *(signed char *)avals[0]; + a2 = *(signed char *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed char (*test_type)(signed char, signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + signed char a, b, res_closure; + + a = 2; + b = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_schar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 125: 127" } */ + printf("res: %d\n", (signed char)res_call); + /* { dg-output "\nres: 127" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 125); + /* { dg-output "\n2 125: 127" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c new file mode 100644 index 0000000..4c39153 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed short a1, signed short a2) +{ + signed short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed short a1, a2; + + a1 = *(signed short *)avals[0]; + a2 = *(signed short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed short (*test_type)(signed short, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c new file mode 100644 index 0000000..1c3aeb5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed char a1, signed short a2, + signed char a3, signed short a4) +{ + signed short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a3; + signed short a2, a4; + + a1 = *(signed char *)avals[0]; + a2 = *(signed short *)avals[1]; + a3 = *(signed char *)avals[2]; + a4 = *(signed short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef signed short (*test_type)(signed char, signed short, + signed char, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + signed char a, c; + signed short b, d, res_closure; + + a = 1; + b = 32765; + c = 127; + d = -128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = &ffi_type_schar; + cl_arg_types[3] = &ffi_type_sshort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 32765 127 -128: 32765" } */ + printf("res: %d\n", (signed short)res_call); + /* { dg-output "\nres: 32765" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 32765, 127, -128); + /* { dg-output "\n1 32765 127 -128: 32765" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32765" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c new file mode 100644 index 0000000..009c02c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned char test_func_fn(unsigned char a1, unsigned char a2, + unsigned char a3, unsigned char a4) +{ + unsigned char result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a2, a3, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned char *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned char *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned char (*test_type)(unsigned char, unsigned char, + unsigned char, unsigned char); + +void test_func(ffi_cif *cif __UNUSED__, void *rval __UNUSED__, void **avals, + void *data __UNUSED__) +{ + printf("%d %d %d %d\n", *(unsigned char *)avals[0], + *(unsigned char *)avals[1], *(unsigned char *)avals[2], + *(unsigned char *)avals[3]); +} +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, b, c, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_uchar; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 125: 255" } */ + printf("res: %d\n", (unsigned char)res_call); + /* { dg-output "\nres: 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 125); + /* { dg-output "\n1 2 127 125: 255" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c new file mode 100644 index 0000000..dd10ca7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned short a1, unsigned short a2) +{ + unsigned short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned short a1, a2; + + a1 = *(unsigned short *)avals[0]; + a2 = *(unsigned short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef unsigned short (*test_type)(unsigned short, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c new file mode 100644 index 0000000..2588e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned char a1, unsigned short a2, + unsigned char a3, unsigned short a4) +{ + unsigned short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a3; + unsigned short a2, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned short *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned short (*test_type)(unsigned char, unsigned short, + unsigned char, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, c; + unsigned short b, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_ushort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 128: 258" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 258" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 128); + /* { dg-output "\n1 2 127 128: 258" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 258" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c new file mode 100644 index 0000000..d82a87a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +void* cls_pointer_fn(void* a1, void* a2) +{ + void* result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + *(void**)resp = cls_pointer_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x12345678; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_pointer_fn), &res, args); + /* { dg-output "0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + /* { dg-output "\n0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c new file mode 100644 index 0000000..1f1d915 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c @@ -0,0 +1,142 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments across multiple hideous stack frames. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/7/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +static long dummyVar; + +long dummy_func( + long double a1, char b1, + long double a2, char b2, + long double a3, char b3, + long double a4, char b4) +{ + return a1 + b1 + a2 + b2 + a3 + b3 + a4 + b4; +} + +void* cls_pointer_fn2(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +void* cls_pointer_fn1(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(intptr_t) a1, + (unsigned int)(intptr_t) a2, + (unsigned int)(intptr_t) result); + + result = cls_pointer_fn2(result, a1); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + *(void**)resp = cls_pointer_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x01234567; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + printf("\n"); + ffi_call(&cif, FFI_FN(cls_pointer_fn1), &res, args); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c new file mode 100644 index 0000000..82986b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Check return value schar. + Limitations: none. + PR: none. + Originator: 20031108 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_schar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed char *)args[0]; + printf("%d: %d\n",*(signed char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed char (*cls_ret_schar)(signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed char res; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_schar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_schar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c new file mode 100644 index 0000000..c7e13b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sint32. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed int *)args[0]; + printf("%d: %d\n",*(signed int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed int (*cls_ret_sint)(signed int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed int res; + + cl_arg_types[0] = &ffi_type_sint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sint)code))(65534); + /* { dg-output "65534: 65534" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65534" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c new file mode 100644 index 0000000..846d57e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sshort. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sshort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed short *)args[0]; + printf("%d: %d\n",*(signed short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed short (*cls_ret_sshort)(signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed short res; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sshort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sshort)code))(255); + /* { dg-output "255: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c new file mode 100644 index 0000000..6d1fdae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c @@ -0,0 +1,114 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +#include "ffitest.h" + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static void +test_fn (ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + int n = *(int*)args[0]; + struct small_tag s1 = * (struct small_tag *) args[1]; + struct large_tag l1 = * (struct large_tag *) args[2]; + struct small_tag s2 = * (struct small_tag *) args[3]; + + printf ("%d %d %d %d %d %d %d %d %d %d\n", n, s1.a, s1.b, + l1.a, l1.b, l1.c, l1.d, l1.e, + s2.a, s2.b); + * (ffi_arg*) resp = 42; +} + +int +main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc (sizeof (ffi_closure), &code); + ffi_type* arg_types[5]; + + ffi_arg res = 0; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int si; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, + arg_types) == FFI_OK); + + si = 4; + s1.a = 5; + s1.b = 6; + + s2.a = 20; + s2.b = 21; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_fn, NULL, code) == FFI_OK); + + res = ((int (*)(int, ...))(code))(si, s1, l1, s2); + /* { dg-output "4 5 6 10 11 12 13 14 20 21" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c new file mode 100644 index 0000000..c1317e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value uchar. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uchar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned char *)args[0]; + printf("%d: %d\n",*(unsigned char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned char (*cls_ret_uchar)(unsigned char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned char res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uchar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uchar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c new file mode 100644 index 0000000..6491c5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned char argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned char T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c new file mode 100644 index 0000000..885cff5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value uint. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg *)resp = *(unsigned int *)args[0]; + + printf("%d: %d\n",*(unsigned int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned int (*cls_ret_uint)(unsigned int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uint)code))(2147483647); + /* { dg-output "2147483647: 2147483647" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c new file mode 100644 index 0000000..b04cfd1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned int argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned int T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)*(ffi_arg *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c new file mode 100644 index 0000000..0315082 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned long argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned long T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(T *)resp = *(T *)args[0]; + + printf("%ld: %ld %ld\n", *(T *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ulong; + cl_arg_types[1] = &ffi_type_ulong; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ulong, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %ld\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c new file mode 100644 index 0000000..62f2cae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c @@ -0,0 +1,47 @@ +/* Area: closure_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +static void cls_ret_ulonglong_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + *(unsigned long long *)resp= 0xfffffffffffffffLL ^ *(unsigned long long *)args[0]; + + printf("%" PRIuLL ": %" PRIuLL "\n",*(unsigned long long *)args[0], + *(unsigned long long *)(resp)); +} +typedef unsigned long long (*cls_ret_ulonglong)(unsigned long long); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned long long res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint64, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ulonglong_fn, NULL, code) == FFI_OK); + res = (*((cls_ret_ulonglong)code))(214LL); + /* { dg-output "214: 1152921504606846761" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 1152921504606846761" } */ + + res = (*((cls_ret_ulonglong)code))(9223372035854775808LL); + /* { dg-output "\n9223372035854775808: 8070450533247928831" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 8070450533247928831" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c new file mode 100644 index 0000000..a00100e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value ushort. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_ushort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned short *)args[0]; + + printf("%d: %d\n",*(unsigned short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned short (*cls_ret_ushort)(unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned short res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ushort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_ushort)code))(65535); + /* { dg-output "65535: 65535" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65535" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c new file mode 100644 index 0000000..37aa106 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned short argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned short T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c new file mode 100644 index 0000000..f5a7317 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c @@ -0,0 +1,36 @@ +/* Area: ffi_prep_cif, ffi_prep_closure + Purpose: Test error return for bad ABIs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +dummy_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* arg_types[1]; + + arg_types[0] = NULL; + + CHECK(ffi_prep_cif(&cif, 255, 0, &ffi_type_void, + arg_types) == FFI_BAD_ABI); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &ffi_type_void, + arg_types) == FFI_OK); + + cif.abi= 255; + + CHECK(ffi_prep_closure_loc(pcl, &cif, dummy_fn, NULL, code) == FFI_BAD_ABI); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c new file mode 100644 index 0000000..1915c3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c @@ -0,0 +1,341 @@ +/* Area: ffi_call, closure_call + Purpose: Check large structure returns. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ +/* { dg-options -Wformat=0 { target moxie*-*-elf or1k-*-* } } */ + +#include "ffitest.h" + +typedef struct BigStruct{ + uint8_t a; + int8_t b; + uint16_t c; + int16_t d; + uint32_t e; + int32_t f; + uint64_t g; + int64_t h; + float i; + double j; + long double k; + char* l; + uint8_t m; + int8_t n; + uint16_t o; + int16_t p; + uint32_t q; + int32_t r; + uint64_t s; + int64_t t; + float u; + double v; + long double w; + char* x; + uint8_t y; + int8_t z; + uint16_t aa; + int16_t bb; + uint32_t cc; + int32_t dd; + uint64_t ee; + int64_t ff; + float gg; + double hh; + long double ii; + char* jj; + uint8_t kk; + int8_t ll; + uint16_t mm; + int16_t nn; + uint32_t oo; + int32_t pp; + uint64_t qq; + int64_t rr; + float ss; + double tt; + long double uu; + char* vv; + uint8_t ww; + int8_t xx; +} BigStruct; + +BigStruct +test_large_fn( + uint8_t ui8_1, + int8_t si8_1, + uint16_t ui16_1, + int16_t si16_1, + uint32_t ui32_1, + int32_t si32_1, + uint64_t ui64_1, + int64_t si64_1, + float f_1, + double d_1, + long double ld_1, + char* p_1, + uint8_t ui8_2, + int8_t si8_2, + uint16_t ui16_2, + int16_t si16_2, + uint32_t ui32_2, + int32_t si32_2, + uint64_t ui64_2, + int64_t si64_2, + float f_2, + double d_2, + long double ld_2, + char* p_2, + uint8_t ui8_3, + int8_t si8_3, + uint16_t ui16_3, + int16_t si16_3, + uint32_t ui32_3, + int32_t si32_3, + uint64_t ui64_3, + int64_t si64_3, + float f_3, + double d_3, + long double ld_3, + char* p_3, + uint8_t ui8_4, + int8_t si8_4, + uint16_t ui16_4, + int16_t si16_4, + uint32_t ui32_4, + int32_t si32_4, + uint64_t ui64_4, + int64_t si64_4, + float f_4, + double d_4, + long double ld_4, + char* p_4, + uint8_t ui8_5, + int8_t si8_5) +{ + BigStruct retVal = { + ui8_1 + 1, si8_1 + 1, ui16_1 + 1, si16_1 + 1, ui32_1 + 1, si32_1 + 1, + ui64_1 + 1, si64_1 + 1, f_1 + 1, d_1 + 1, ld_1 + 1, (char*)((intptr_t)p_1 + 1), + ui8_2 + 2, si8_2 + 2, ui16_2 + 2, si16_2 + 2, ui32_2 + 2, si32_2 + 2, + ui64_2 + 2, si64_2 + 2, f_2 + 2, d_2 + 2, ld_2 + 2, (char*)((intptr_t)p_2 + 2), + ui8_3 + 3, si8_3 + 3, ui16_3 + 3, si16_3 + 3, ui32_3 + 3, si32_3 + 3, + ui64_3 + 3, si64_3 + 3, f_3 + 3, d_3 + 3, ld_3 + 3, (char*)((intptr_t)p_3 + 3), + ui8_4 + 4, si8_4 + 4, ui16_4 + 4, si16_4 + 4, ui32_4 + 4, si32_4 + 4, + ui64_4 + 4, si64_4 + 4, f_4 + 4, d_4 + 4, ld_4 + 4, (char*)((intptr_t)p_4 + 4), + ui8_5 + 5, si8_5 + 5}; + + printf("%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 ": " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, (unsigned long)p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, (unsigned long)p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, (unsigned long)p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, (unsigned long)p_4, ui8_5, si8_5, + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + + return retVal; +} + +static void +cls_large_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + uint8_t ui8_1 = *(uint8_t*)args[0]; + int8_t si8_1 = *(int8_t*)args[1]; + uint16_t ui16_1 = *(uint16_t*)args[2]; + int16_t si16_1 = *(int16_t*)args[3]; + uint32_t ui32_1 = *(uint32_t*)args[4]; + int32_t si32_1 = *(int32_t*)args[5]; + uint64_t ui64_1 = *(uint64_t*)args[6]; + int64_t si64_1 = *(int64_t*)args[7]; + float f_1 = *(float*)args[8]; + double d_1 = *(double*)args[9]; + long double ld_1 = *(long double*)args[10]; + char* p_1 = *(char**)args[11]; + uint8_t ui8_2 = *(uint8_t*)args[12]; + int8_t si8_2 = *(int8_t*)args[13]; + uint16_t ui16_2 = *(uint16_t*)args[14]; + int16_t si16_2 = *(int16_t*)args[15]; + uint32_t ui32_2 = *(uint32_t*)args[16]; + int32_t si32_2 = *(int32_t*)args[17]; + uint64_t ui64_2 = *(uint64_t*)args[18]; + int64_t si64_2 = *(int64_t*)args[19]; + float f_2 = *(float*)args[20]; + double d_2 = *(double*)args[21]; + long double ld_2 = *(long double*)args[22]; + char* p_2 = *(char**)args[23]; + uint8_t ui8_3 = *(uint8_t*)args[24]; + int8_t si8_3 = *(int8_t*)args[25]; + uint16_t ui16_3 = *(uint16_t*)args[26]; + int16_t si16_3 = *(int16_t*)args[27]; + uint32_t ui32_3 = *(uint32_t*)args[28]; + int32_t si32_3 = *(int32_t*)args[29]; + uint64_t ui64_3 = *(uint64_t*)args[30]; + int64_t si64_3 = *(int64_t*)args[31]; + float f_3 = *(float*)args[32]; + double d_3 = *(double*)args[33]; + long double ld_3 = *(long double*)args[34]; + char* p_3 = *(char**)args[35]; + uint8_t ui8_4 = *(uint8_t*)args[36]; + int8_t si8_4 = *(int8_t*)args[37]; + uint16_t ui16_4 = *(uint16_t*)args[38]; + int16_t si16_4 = *(int16_t*)args[39]; + uint32_t ui32_4 = *(uint32_t*)args[40]; + int32_t si32_4 = *(int32_t*)args[41]; + uint64_t ui64_4 = *(uint64_t*)args[42]; + int64_t si64_4 = *(int64_t*)args[43]; + float f_4 = *(float*)args[44]; + double d_4 = *(double*)args[45]; + long double ld_4 = *(long double*)args[46]; + char* p_4 = *(char**)args[47]; + uint8_t ui8_5 = *(uint8_t*)args[48]; + int8_t si8_5 = *(int8_t*)args[49]; + + *(BigStruct*)resp = test_large_fn( + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, p_4, + ui8_5, si8_5); +} + +int +main(int argc __UNUSED__, const char** argv __UNUSED__) +{ + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + + ffi_cif cif; + ffi_type* argTypes[51]; + void* argValues[51]; + + ffi_type ret_struct_type; + ffi_type* st_fields[51]; + BigStruct retVal; + + uint8_t ui8 = 1; + int8_t si8 = 2; + uint16_t ui16 = 3; + int16_t si16 = 4; + uint32_t ui32 = 5; + int32_t si32 = 6; + uint64_t ui64 = 7; + int64_t si64 = 8; + float f = 9; + double d = 10; + long double ld = 11; + char* p = (char*)0x12345678; + + memset (&retVal, 0, sizeof(retVal)); + + ret_struct_type.size = 0; + ret_struct_type.alignment = 0; + ret_struct_type.type = FFI_TYPE_STRUCT; + ret_struct_type.elements = st_fields; + + st_fields[0] = st_fields[12] = st_fields[24] = st_fields[36] = st_fields[48] = &ffi_type_uint8; + st_fields[1] = st_fields[13] = st_fields[25] = st_fields[37] = st_fields[49] = &ffi_type_sint8; + st_fields[2] = st_fields[14] = st_fields[26] = st_fields[38] = &ffi_type_uint16; + st_fields[3] = st_fields[15] = st_fields[27] = st_fields[39] = &ffi_type_sint16; + st_fields[4] = st_fields[16] = st_fields[28] = st_fields[40] = &ffi_type_uint32; + st_fields[5] = st_fields[17] = st_fields[29] = st_fields[41] = &ffi_type_sint32; + st_fields[6] = st_fields[18] = st_fields[30] = st_fields[42] = &ffi_type_uint64; + st_fields[7] = st_fields[19] = st_fields[31] = st_fields[43] = &ffi_type_sint64; + st_fields[8] = st_fields[20] = st_fields[32] = st_fields[44] = &ffi_type_float; + st_fields[9] = st_fields[21] = st_fields[33] = st_fields[45] = &ffi_type_double; + st_fields[10] = st_fields[22] = st_fields[34] = st_fields[46] = &ffi_type_longdouble; + st_fields[11] = st_fields[23] = st_fields[35] = st_fields[47] = &ffi_type_pointer; + + st_fields[50] = NULL; + + argTypes[0] = argTypes[12] = argTypes[24] = argTypes[36] = argTypes[48] = &ffi_type_uint8; + argValues[0] = argValues[12] = argValues[24] = argValues[36] = argValues[48] = &ui8; + argTypes[1] = argTypes[13] = argTypes[25] = argTypes[37] = argTypes[49] = &ffi_type_sint8; + argValues[1] = argValues[13] = argValues[25] = argValues[37] = argValues[49] = &si8; + argTypes[2] = argTypes[14] = argTypes[26] = argTypes[38] = &ffi_type_uint16; + argValues[2] = argValues[14] = argValues[26] = argValues[38] = &ui16; + argTypes[3] = argTypes[15] = argTypes[27] = argTypes[39] = &ffi_type_sint16; + argValues[3] = argValues[15] = argValues[27] = argValues[39] = &si16; + argTypes[4] = argTypes[16] = argTypes[28] = argTypes[40] = &ffi_type_uint32; + argValues[4] = argValues[16] = argValues[28] = argValues[40] = &ui32; + argTypes[5] = argTypes[17] = argTypes[29] = argTypes[41] = &ffi_type_sint32; + argValues[5] = argValues[17] = argValues[29] = argValues[41] = &si32; + argTypes[6] = argTypes[18] = argTypes[30] = argTypes[42] = &ffi_type_uint64; + argValues[6] = argValues[18] = argValues[30] = argValues[42] = &ui64; + argTypes[7] = argTypes[19] = argTypes[31] = argTypes[43] = &ffi_type_sint64; + argValues[7] = argValues[19] = argValues[31] = argValues[43] = &si64; + argTypes[8] = argTypes[20] = argTypes[32] = argTypes[44] = &ffi_type_float; + argValues[8] = argValues[20] = argValues[32] = argValues[44] = &f; + argTypes[9] = argTypes[21] = argTypes[33] = argTypes[45] = &ffi_type_double; + argValues[9] = argValues[21] = argValues[33] = argValues[45] = &d; + argTypes[10] = argTypes[22] = argTypes[34] = argTypes[46] = &ffi_type_longdouble; + argValues[10] = argValues[22] = argValues[34] = argValues[46] = &ld; + argTypes[11] = argTypes[23] = argTypes[35] = argTypes[47] = &ffi_type_pointer; + argValues[11] = argValues[23] = argValues[35] = argValues[47] = &p; + + argTypes[50] = NULL; + argValues[50] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 50, &ret_struct_type, argTypes) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_large_fn), &retVal, argValues); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_large_fn, NULL, code) == FFI_OK); + + retVal = ((BigStruct(*)( + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t))(code))( + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c new file mode 100644 index 0000000..c15e3a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c @@ -0,0 +1,152 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined)) + (code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c new file mode 100644 index 0000000..477a6b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c @@ -0,0 +1,161 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2, + struct cls_struct_16byte1 b3) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g %g %g %d: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + b3.a, b3.b, b3.c, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + struct cls_struct_16byte1 b3; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + b3 = *(struct cls_struct_16byte1*)(args[3]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined, + cls_struct_16byte1)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + /* CHECK( 1 == 0); */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c new file mode 100644 index 0000000..3cf2b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c @@ -0,0 +1,134 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + unsigned char y; + struct A x; + unsigned int z; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b3.z + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + result.z = 0; + + printf("%d %d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, b3.z, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[4]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = { 99, {12LL , 127}, 255}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &ffi_type_uchar; + cls_struct_fields1[1] = &cls_struct_type; + cls_struct_fields1[2] = &ffi_type_uint; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c new file mode 100644 index 0000000..3510493 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c @@ -0,0 +1,121 @@ +/* Area: ffi_call, closure_call + Purpose: Check parameter passing with nested structs + of a single type. This tests the special cases + for homogeneous floating-point aggregates in the + AArch64 PCS. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + float a_x; + float a_y; +} A; + +typedef struct B { + float b_x; + float b_y; +} B; + +typedef struct C { + A a; + B b; +} C; + +static C C_fn (int x, int y, int z, C source, int i, int j, int k) +{ + C result; + result.a.a_x = source.a.a_x; + result.a.a_y = source.a.a_y; + result.b.b_x = source.b.b_x; + result.b.b_y = source.b.b_y; + + printf ("%d, %d, %d, %d, %d, %d\n", x, y, z, i, j, k); + + printf ("%.1f, %.1f, %.1f, %.1f, " + "%.1f, %.1f, %.1f, %.1f\n", + source.a.a_x, source.a.a_y, + source.b.b_x, source.b.b_y, + result.a.a_x, result.a.a_y, + result.b.b_x, result.b.b_y); + + return result; +} + +int main (void) +{ + ffi_cif cif; + + ffi_type* struct_fields_source_a[3]; + ffi_type* struct_fields_source_b[3]; + ffi_type* struct_fields_source_c[3]; + ffi_type* arg_types[8]; + + ffi_type struct_type_a, struct_type_b, struct_type_c; + + struct A source_fld_a = {1.0, 2.0}; + struct B source_fld_b = {4.0, 8.0}; + int k = 1; + + struct C result; + struct C source = {source_fld_a, source_fld_b}; + + struct_type_a.size = 0; + struct_type_a.alignment = 0; + struct_type_a.type = FFI_TYPE_STRUCT; + struct_type_a.elements = struct_fields_source_a; + + struct_type_b.size = 0; + struct_type_b.alignment = 0; + struct_type_b.type = FFI_TYPE_STRUCT; + struct_type_b.elements = struct_fields_source_b; + + struct_type_c.size = 0; + struct_type_c.alignment = 0; + struct_type_c.type = FFI_TYPE_STRUCT; + struct_type_c.elements = struct_fields_source_c; + + struct_fields_source_a[0] = &ffi_type_float; + struct_fields_source_a[1] = &ffi_type_float; + struct_fields_source_a[2] = NULL; + + struct_fields_source_b[0] = &ffi_type_float; + struct_fields_source_b[1] = &ffi_type_float; + struct_fields_source_b[2] = NULL; + + struct_fields_source_c[0] = &struct_type_a; + struct_fields_source_c[1] = &struct_type_b; + struct_fields_source_c[2] = NULL; + + arg_types[0] = &ffi_type_sint32; + arg_types[1] = &ffi_type_sint32; + arg_types[2] = &ffi_type_sint32; + arg_types[3] = &struct_type_c; + arg_types[4] = &ffi_type_sint32; + arg_types[5] = &ffi_type_sint32; + arg_types[6] = &ffi_type_sint32; + arg_types[7] = NULL; + + void *args[7]; + args[0] = &k; + args[1] = &k; + args[2] = &k; + args[3] = &source; + args[4] = &k; + args[5] = &k; + args[6] = &k; + CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, 7, &struct_type_c, + arg_types) == FFI_OK); + + ffi_call (&cif, FFI_FN (C_fn), &result, args); + /* { dg-output "1, 1, 1, 1, 1, 1\n" } */ + /* { dg-output "1.0, 2.0, 4.0, 8.0, 1.0, 2.0, 4.0, 8.0" } */ + CHECK (result.a.a_x == source.a.a_x); + CHECK (result.a.a_y == source.a.a_y); + CHECK (result.b.b_x == source.b.b_x); + CHECK (result.b.b_y == source.b.b_y); + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c new file mode 100644 index 0000000..69268cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c @@ -0,0 +1,110 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%lu %d %lu %d %d: %lu %d %d\n", b0.a, b0.b, b1.x.a, b1.x.b, b1.y, + result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1, 7}; + struct B f_dbl = {{12 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_ulong; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c new file mode 100644 index 0000000..ab18cad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b0.a, b0.b, + (int)b1.x.a, b1.x.b, b1.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c new file mode 100644 index 0000000..2ffb4d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c new file mode 100644 index 0000000..6c79845 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c @@ -0,0 +1,112 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + long double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c new file mode 100644 index 0000000..59d3579 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_slong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c new file mode 100644 index 0000000..27595e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c new file mode 100644 index 0000000..0e6c682 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c new file mode 100644 index 0000000..5f7ac67 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned char a; + unsigned long long b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", b2.a, (int)b2.b, + b3.x.a, (int)b3.x.b, b3.y, (int)b4.d, b4.e, + result.x.a, (int)result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1, 7LL}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_ulong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c new file mode 100644 index 0000000..6a91555 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct my_ffi_struct { + double a; + double b; + double c; +} my_ffi_struct; + +my_ffi_struct callee(struct my_ffi_struct a1, struct my_ffi_struct a2) +{ + struct my_ffi_struct result; + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +void stub(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct my_ffi_struct a1; + struct my_ffi_struct a2; + + a1 = *(struct my_ffi_struct*)(args[0]); + a2 = *(struct my_ffi_struct*)(args[1]); + + *(my_ffi_struct *)resp = callee(a1, a2); +} + + +int main(void) +{ + ffi_type* my_ffi_struct_fields[4]; + ffi_type my_ffi_struct_type; + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[3]; + + struct my_ffi_struct g = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct f = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct res; + + my_ffi_struct_type.size = 0; + my_ffi_struct_type.alignment = 0; + my_ffi_struct_type.type = FFI_TYPE_STRUCT; + my_ffi_struct_type.elements = my_ffi_struct_fields; + + my_ffi_struct_fields[0] = &ffi_type_double; + my_ffi_struct_fields[1] = &ffi_type_double; + my_ffi_struct_fields[2] = &ffi_type_double; + my_ffi_struct_fields[3] = NULL; + + arg_types[0] = &my_ffi_struct_type; + arg_types[1] = &my_ffi_struct_type; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &my_ffi_struct_type, + arg_types) == FFI_OK); + + args[0] = &g; + args[1] = &f; + args[2] = NULL; + ffi_call(&cif, FFI_FN(callee), &res, args); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, stub, NULL, code) == FFI_OK); + + res = ((my_ffi_struct(*)(struct my_ffi_struct, struct my_ffi_struct))(code))(g, f); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0);; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c new file mode 100644 index 0000000..71c2469 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c @@ -0,0 +1,145 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_108byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + int n; +} struct_108byte; + +struct_108byte cls_struct_108byte_fn( + struct_108byte b0, + struct_108byte b1, + struct_108byte b2, + struct_108byte b3) +{ + struct_108byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n); + + return result; +} + +static void +cls_struct_108byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_108byte b0, b1, b2, b3; + + b0 = *(struct_108byte*)(args[0]); + b1 = *(struct_108byte*)(args[1]); + b2 = *(struct_108byte*)(args[2]); + b3 = *(struct_108byte*)(args[3]); + + *(struct_108byte*)resp = cls_struct_108byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[15]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_108byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 7 }; + struct_108byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 4 }; + struct_108byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 3 }; + struct_108byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 2 }; + struct_108byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_sint32; + cls_struct_fields[14] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_108byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_108byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_108byte(*)(struct_108byte, struct_108byte, + struct_108byte, struct_108byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c new file mode 100644 index 0000000..d9c750e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c @@ -0,0 +1,148 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_116byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + double n; + int o; +} struct_116byte; + +struct_116byte cls_struct_116byte_fn( + struct_116byte b0, + struct_116byte b1, + struct_116byte b2, + struct_116byte b3) +{ + struct_116byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + result.o = b0.o + b1.o + b2.o + b3.o; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n, result.o); + + return result; +} + +static void +cls_struct_116byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_116byte b0, b1, b2, b3; + + b0 = *(struct_116byte*)(args[0]); + b1 = *(struct_116byte*)(args[1]); + b2 = *(struct_116byte*)(args[2]); + b3 = *(struct_116byte*)(args[3]); + + *(struct_116byte*)resp = cls_struct_116byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[16]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_116byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 7 }; + struct_116byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 6.0, 4 }; + struct_116byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 7.0, 3 }; + struct_116byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 8.0, 2 }; + struct_116byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_double; + cls_struct_fields[14] = &ffi_type_sint32; + cls_struct_fields[15] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_116byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_116byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_116byte(*)(struct_116byte, struct_116byte, + struct_116byte, struct_116byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c new file mode 100644 index 0000000..973ee02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7.0 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3.0 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2.0 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c new file mode 100644 index 0000000..84323d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c @@ -0,0 +1,125 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + long long i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %" PRIdLL "\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_sint64; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c new file mode 100644 index 0000000..ca31056 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: 41908. + Originator: 20091102 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_combined { + float a; + float b; + float c; + float d; +} cls_struct_combined; + +void cls_struct_combined_fn(struct cls_struct_combined arg) +{ + printf("%g %g %g %g\n", + arg.a, arg.b, + arg.c, arg.d); + fflush(stdout); +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_combined a0; + + a0 = *(struct cls_struct_combined*)(args[0]); + + cls_struct_combined_fn(a0); +} + + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cls_struct_fields0[5]; + ffi_type cls_struct_type0; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0}; + + cls_struct_type0.size = 0; + cls_struct_type0.alignment = 0; + cls_struct_type0.type = FFI_TYPE_STRUCT; + cls_struct_type0.elements = cls_struct_fields0; + + cls_struct_fields0[0] = &ffi_type_float; + cls_struct_fields0[1] = &ffi_type_float; + cls_struct_fields0[2] = &ffi_type_float; + cls_struct_fields0[3] = &ffi_type_float; + cls_struct_fields0[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type0; + dbl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, &ffi_type_void, + dbl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + ((void(*)(cls_struct_combined)) (code))(g_dbl); + /* { dg-output "4 5 1 8" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc new file mode 100644 index 0000000..e114565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc @@ -0,0 +1,117 @@ +/* Area: ffi_closure, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Jeff Sturm */ + +/* { dg-do run { xfail x86_64-apple-darwin* moxie*-*-* } } */ + +#include "ffitest.h" + +void ABI_ATTR +closure_test_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{ + throw 9; +} + +typedef void (*closure_test_type)(); + +void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (int)(intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg*)resp); + + throw (int)*(ffi_arg*)resp; +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = (ffi_closure *)ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + + { + cl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, + &ffi_type_void, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn, NULL, code) == FFI_OK); + + try + { + (*((closure_test_type)(code)))(); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + + { + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_uint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_uint; + cl_arg_types[10] = &ffi_type_uint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_uint; + cl_arg_types[13] = &ffi_type_uint; + cl_arg_types[14] = &ffi_type_uint; + cl_arg_types[15] = &ffi_type_uint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + try + { + (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "\n1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + } catch (int exception_code) + { + CHECK(exception_code == 255); + } + printf("part two OK\n"); + /* { dg-output "\npart two OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc new file mode 100644 index 0000000..153d240 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc @@ -0,0 +1,54 @@ +/* Area: ffi_call, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Andreas Tobler 20061213 */ + +/* { dg-do run { xfail moxie*-*-* } } */ + +#include "ffitest.h" + +static int checking(int a __UNUSED__, short b __UNUSED__, + signed char c __UNUSED__) +{ + throw 9; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + { + try + { + ffi_call(&cif, FFI_FN(checking), &rint, values); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc new file mode 100644 index 0000000..4a812ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc @@ -0,0 +1,91 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct cls_struct_align { + unsigned char a; + _Complex T_C_TYPE b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + struct cls_struct_align a1, struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %f,%fi %d %d %f,%fi %d: %d %f,%fi %d\n", + a1.a, T_CONV creal (a1.b), T_CONV cimag (a1.b), a1.c, + a2.a, T_CONV creal (a2.b), T_CONV cimag (a2.b), a2.c, + result.a, T_CONV creal (result.b), T_CONV cimag (result.b), result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_c[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* c_arg_types[5]; + + struct cls_struct_align g_c = { 12, 4951 + 7 * I, 127 }; + struct cls_struct_align f_c = { 1, 9320 + 1 * I, 13 }; + struct cls_struct_align res_c; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &T_FFI_TYPE; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + c_arg_types[0] = &cls_struct_type; + c_arg_types[1] = &cls_struct_type; + c_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + c_arg_types) == FFI_OK); + + args_c[0] = &g_c; + args_c[1] = &f_c; + args_c[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_c, args_c); + /* { dg-output "12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_c = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_c, f_c); + /* { dg-output "\n12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c new file mode 100644 index 0000000..0dff23a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c new file mode 100644 index 0000000..0affbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c new file mode 100644 index 0000000..7889ba8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc new file mode 100644 index 0000000..f937404 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc @@ -0,0 +1,42 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static void cls_ret_complex_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + _Complex T_C_TYPE *pa; + _Complex T_C_TYPE *pr; + pa = (_Complex T_C_TYPE *)args[0]; + pr = (_Complex T_C_TYPE *)resp; + *pr = *pa; + + printf("%.6f,%.6fi: %.6f,%.6fi\n", + T_CONV creal (*pa), T_CONV cimag (*pa), + T_CONV creal (*pr), T_CONV cimag (*pr)); + } +typedef _Complex T_C_TYPE (*cls_ret_complex)(_Complex T_C_TYPE); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + _Complex T_C_TYPE res; + + cl_arg_types[0] = &T_FFI_TYPE; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_complex_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_complex)code))(0.125 + 128.0 * I); + printf("res: %.6f,%.6fi\n", T_CONV creal (res), T_CONV cimag (res)); + CHECK (res == (0.125 + 128.0 * I)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c new file mode 100644 index 0000000..05e3534 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c new file mode 100644 index 0000000..5df7849 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c new file mode 100644 index 0000000..2b1c320 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc new file mode 100644 index 0000000..df8708d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc @@ -0,0 +1,71 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct Cs { + _Complex T_C_TYPE x; + _Complex T_C_TYPE y; +} Cs; + +Cs gc; + +void +closure_test_fn(Cs p) +{ + printf("%.1f,%.1fi %.1f,%.1fi\n", + T_CONV creal (p.x), T_CONV cimag (p.x), + T_CONV creal (p.y), T_CONV cimag (p.y)); + gc = p; +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Cs*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type *cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Cs arg = { 1.0 + 11.0 * I, 2.0 + 22.0 * I}; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &T_FFI_TYPE; + ts1_type_elements[1] = &T_FFI_TYPE; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + ((void*(*)(Cs))(code))(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + closure_test_fn(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c new file mode 100644 index 0000000..ec71346 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c new file mode 100644 index 0000000..96fdf75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c new file mode 100644 index 0000000..005b467 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc new file mode 100644 index 0000000..8a3e15f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc @@ -0,0 +1,80 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include +#include +#include +#include + +static _Complex T_C_TYPE gComplexValue1 = 1 + 2 * I; +static _Complex T_C_TYPE gComplexValue2 = 3 + 4 * I; + +static int cls_variadic(const char *format, ...) +{ + va_list ap; + _Complex T_C_TYPE p1, p2; + + va_start (ap, format); + p1 = va_arg (ap, _Complex T_C_TYPE); + p2 = va_arg (ap, _Complex T_C_TYPE); + va_end (ap); + + return printf(format, T_CONV creal (p1), T_CONV cimag (p1), + T_CONV creal (p2), T_CONV cimag (p2)); +} + +static void +cls_complex_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + gComplexValue1 = *(_Complex T_C_TYPE*)args[1]; + gComplexValue2 = *(_Complex T_C_TYPE*)args[2]; + + *(ffi_arg*)resp = + printf(format, + T_CONV creal (gComplexValue1), T_CONV cimag (gComplexValue1), + T_CONV creal (gComplexValue2), T_CONV cimag (gComplexValue2)); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[4]; + char *format = "%.1f,%.1fi %.1f,%.1fi\n"; + + _Complex T_C_TYPE complexArg1 = 1.0 + 22.0 *I; + _Complex T_C_TYPE complexArg2 = 333.0 + 4444.0 *I; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &T_FFI_TYPE; + arg_types[2] = &T_FFI_TYPE; + arg_types[3] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 3, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &complexArg1; + args[2] = &complexArg2; + args[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_variadic), &res, args); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_complex_va_fn, NULL, code) + == FFI_OK); + + res = ((int(*)(char *, ...))(code))(format, complexArg1, complexArg2); + CHECK (gComplexValue1 == complexArg1); + CHECK (gComplexValue2 == complexArg2); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c new file mode 100644 index 0000000..879ccf3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c new file mode 100644 index 0000000..2b17826 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c @@ -0,0 +1,16 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +/* Alpha splits _Complex into two arguments. It's illegal to pass + float through varargs, so _Complex float goes badly. In sort of + gets passed as _Complex double, but the compiler doesn't agree + with itself on this issue. */ +/* { dg-do run { xfail alpha*-*-* } } */ + +#include "complex_defs_float.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c new file mode 100644 index 0000000..6eca965 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp new file mode 100644 index 0000000..4631db2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_TARGET_HAS_COMPLEX_TYPE"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc new file mode 100644 index 0000000..515ae3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc @@ -0,0 +1,51 @@ +/* -*-c-*-*/ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE f_complex(_Complex T_C_TYPE c, int x, int *py) +{ + c = -(2 * creal (c)) + (cimag (c) + 1)* I; + *py += x; + + return c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex T_C_TYPE tc_arg; + _Complex T_C_TYPE tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &T_FFI_TYPE, args) == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%f,%fi %f,%fi, x %d 1234, y %d 11110\n", + T_CONV creal (tc_result), T_CONV cimag (tc_result), + T_CONV creal (2.0), T_CONV creal (8.0), tc_int_arg_x, tc_y); + + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc new file mode 100644 index 0000000..3583e16 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_double +/* C type corresponding to the base type. */ +#define T_C_TYPE double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc new file mode 100644 index 0000000..bbd9375 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_float +/* C type corresponding to the base type. */ +#define T_C_TYPE float +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV (double) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc new file mode 100644 index 0000000..14b9f24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_longdouble +/* C type corresponding to the base type. */ +#define T_C_TYPE long double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c new file mode 100644 index 0000000..8a3297b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c new file mode 100644 index 0000000..5044ebb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c new file mode 100644 index 0000000..bac3190 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c @@ -0,0 +1,86 @@ +/* Area: ffi_call + Purpose: Check non-standard complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "ffi.h" +#include + +_Complex int f_complex(_Complex int c, int x, int *py) +{ + __real__ c = -2 * __real__ c; + __imag__ c = __imag__ c + 1; + *py += x; + return c; +} + +/* + * This macro can be used to define new complex type descriptors + * in a platform independent way. + * + * name: Name of the new descriptor is ffi_type_complex_. + * type: The C base type of the complex type. + */ +#define FFI_COMPLEX_TYPEDEF(name, type, ffitype) \ + static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffitype), NULL \ + }; \ + struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ + }; \ + ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ + } + +/* Define new complex type descriptors using the macro: */ +/* ffi_type_complex_sint */ +FFI_COMPLEX_TYPEDEF(sint, int, ffi_type_sint); +/* ffi_type_complex_uchar */ +FFI_COMPLEX_TYPEDEF(uchar, unsigned char, ffi_type_uint8); + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex int tc_arg; + _Complex int tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &ffi_type_complex_sint; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &ffi_type_complex_sint, args) + == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%d,%di %d,%di, x %d 1234, y %d 11110\n", + (int)tc_result, (int)(tc_result * -I), 2, 8, tc_int_arg_x, tc_y); + /* dg-output "-2,8i 2,8i, x 1234 1234, y 11110 11110" */ + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c new file mode 100644 index 0000000..7e78366 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc new file mode 100644 index 0000000..e37a774 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc @@ -0,0 +1,78 @@ +/* -*-c-*- */ +#include "ffitest.h" + +#include +#include + +static _Complex T_C_TYPE many(_Complex T_C_TYPE c1, + _Complex T_C_TYPE c2, + _Complex T_C_TYPE c3, + _Complex T_C_TYPE c4, + _Complex T_C_TYPE c5, + _Complex T_C_TYPE c6, + _Complex T_C_TYPE c7, + _Complex T_C_TYPE c8, + _Complex T_C_TYPE c9, + _Complex T_C_TYPE c10, + _Complex T_C_TYPE c11, + _Complex T_C_TYPE c12, + _Complex T_C_TYPE c13) +{ + printf("0 :%f,%fi\n" + "1 :%f,%fi\n" + "2 :%f,%fi\n" + "3 :%f,%fi\n" + "4 :%f,%fi\n" + "5 :%f,%fi\n" + "6 :%f,%fi\n" + "7 :%f,%fi\n" + "8 :%f,%fi\n" + "9 :%f,%fi\n" + "10:%f,%fi\n" + "11:%f,%fi\n" + "12:%f,%fi\n", + T_CONV creal (c1), T_CONV cimag (c1), + T_CONV creal (c2), T_CONV cimag (c2), + T_CONV creal (c3), T_CONV cimag (c3), + T_CONV creal (c4), T_CONV cimag (c4), + T_CONV creal (c5), T_CONV cimag (c5), + T_CONV creal (c6), T_CONV cimag (c6), + T_CONV creal (c7), T_CONV cimag (c7), + T_CONV creal (c8), T_CONV cimag (c8), + T_CONV creal (c9), T_CONV cimag (c9), + T_CONV creal (c10), T_CONV cimag (c10), + T_CONV creal (c11), T_CONV cimag (c11), + T_CONV creal (c12), T_CONV cimag (c12), + T_CONV creal (c13), T_CONV cimag (c13)); + + return (c1+c2-c3-c4+c5+c6+c7-c8-c9-c10-c11+c12+c13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + _Complex T_C_TYPE ca[13]; + _Complex T_C_TYPE c, cc; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &T_FFI_TYPE; + values[i] = &ca[i]; + ca[i] = i + (-20 - i) * I; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, &T_FFI_TYPE, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &c, values); + + cc = many(ca[0], ca[1], ca[2], ca[3], ca[4], ca[5], ca[6], ca[7], ca[8], + ca[9], ca[10], ca[11], ca[12]); + CHECK(creal (cc) == creal (c)); + CHECK(cimag (cc) == cimag (c)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c new file mode 100644 index 0000000..3fd53c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c new file mode 100644 index 0000000..c43d21c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c new file mode 100644 index 0000000..dbab723 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc new file mode 100644 index 0000000..8bf0c1f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc @@ -0,0 +1,37 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c) +{ + printf ("%f,%fi\n", T_CONV creal (c), T_CONV cimag (c)); + return 2 * c; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c, rc, rc2; + T_C_TYPE cr, ci; + + args[0] = &T_FFI_TYPE; + values[0] = &c; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, args) == FFI_OK); + + for (cr = -127.0; cr < 127; cr++) + { + ci = 1000.0 - cr; + c = cr + ci * I; + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == 2 * c); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc new file mode 100644 index 0000000..7cecc0f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc @@ -0,0 +1,41 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c1, float fl2, unsigned int in3, _Complex T_C_TYPE c4) +{ + return c1 + fl2 + in3 + c4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c4, rc, rc2; + float fl2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + fl2 = 128.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, fl2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c new file mode 100644 index 0000000..727410d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c new file mode 100644 index 0000000..a2aeada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c new file mode 100644 index 0000000..103504b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc new file mode 100644 index 0000000..265170b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc @@ -0,0 +1,44 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +_Complex T_C_TYPE +return_c(_Complex T_C_TYPE c1, _Complex T_C_TYPE c2, + unsigned int in3, _Complex T_C_TYPE c4) +{ + volatile _Complex T_C_TYPE r = c1 + c2 + in3 + c4; + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c2, c4, rc, rc2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &T_FFI_TYPE; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &c2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + c2 = 128.0 + 256.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, c2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c new file mode 100644 index 0000000..ab9efac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c new file mode 100644 index 0000000..d7f22c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c new file mode 100644 index 0000000..3edea62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c new file mode 100644 index 0000000..e2497cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c new file mode 100644 index 0000000..a35528f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c new file mode 100644 index 0000000..142d7be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c new file mode 100644 index 0000000..b00c404 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c @@ -0,0 +1,34 @@ +/* { dg-do run } */ + +#include "static-chain.h" + +#if defined(__GNUC__) && !defined(__clang__) && defined(STATIC_CHAIN_REG) + +#include "ffitest.h" + +/* Blatent assumption here that the prologue doesn't clobber the + static chain for trivial functions. If this is not true, don't + define STATIC_CHAIN_REG, and we'll test what we can via other tests. */ +void *doit(void) +{ + register void *chain __asm__(STATIC_CHAIN_REG); + return chain; +} + +int main() +{ + ffi_cif cif; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(doit), &result, NULL, &result); + + CHECK(result == &result); + + return 0; +} + +#else /* UNSUPPORTED */ +int main() { return 0; } +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c new file mode 100644 index 0000000..7b34afc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c @@ -0,0 +1,28 @@ +/* { dg-do run } */ + +#include "ffitest.h" + +void doit(ffi_cif *cif, void *rvalue, void **avalue, void *closure) +{ + (void)cif; + (void)avalue; + *(void **)rvalue = closure; +} + +typedef void * (*FN)(void); + +int main() +{ + ffi_cif cif; + ffi_go_closure cl; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + CHECK(ffi_prep_go_closure(&cl, &cif, doit) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(*(FN *)&cl), &result, NULL, &cl); + + CHECK(result == &cl); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp new file mode 100644 index 0000000..100c5e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/go.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_GO_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h new file mode 100644 index 0000000..3675b40 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h @@ -0,0 +1,19 @@ +#ifdef __aarch64__ +# define STATIC_CHAIN_REG "x18" +#elif defined(__alpha__) +# define STATIC_CHAIN_REG "$1" +#elif defined(__arm__) +# define STATIC_CHAIN_REG "ip" +#elif defined(__sparc__) +# if defined(__arch64__) || defined(__sparcv9) +# define STATIC_CHAIN_REG "g5" +# else +# define STATIC_CHAIN_REG "g2" +# endif +#elif defined(__x86_64__) +# define STATIC_CHAIN_REG "r10" +#elif defined(__i386__) +# ifndef ABI_NUM +# define STATIC_CHAIN_REG "ecx" /* FFI_DEFAULT_ABI only */ +# endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h new file mode 100644 index 0000000..89b3e32 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_RBFFI_H +#define RBFFI_RBFFI_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_PARAMETERS (32) + +extern VALUE rbffi_FFIModule; + +extern void rbffi_Type_Init(VALUE ffiModule); +extern void rbffi_Buffer_Init(VALUE ffiModule); +extern void rbffi_Invoker_Init(VALUE ffiModule); +extern void rbffi_Variadic_Init(VALUE ffiModule); +extern VALUE rbffi_AbstractMemoryClass, rbffi_InvokerClass; +extern int rbffi_type_size(VALUE type); +extern void rbffi_Thread_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_RBFFI_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h new file mode 100644 index 0000000..ebb8420 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/rbffi_endian.h @@ -0,0 +1,59 @@ +#ifndef JFFI_ENDIAN_H +#define JFFI_ENDIAN_H + +#ifndef _MSC_VER +#include +#endif + +#include + +#if defined(__linux__) || defined(__CYGWIN__) || defined(__GNU__) || defined(__GLIBC__) || defined(__HAIKU__) +# include +# if !defined(LITTLE_ENDIAN) && defined(__LITTLE_ENDIAN) +# define LITTLE_ENDIAN __LITTLE_ENDIAN +# endif +# if !defined(BIG_ENDIAN) && defined(__BIG_ENDIAN) +# define BIG_ENDIAN __BIG_ENDIAN +# endif +# if !defined(BYTE_ORDER) && defined(__BYTE_ORDER) +# define BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#ifdef __sun +# include +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(_BIG_ENDIAN) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(_LITTLE_ENDIAN) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_AIX) && !defined(BYTE_ORDER) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(__BIG_ENDIAN__) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(__LITTLE_ENDIAN__) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_WIN32) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# define BYTE_ORDER LITTLE_ENDIAN +#endif + +#if !defined(BYTE_ORDER) || !defined(LITTLE_ENDIAN) || !defined(BIG_ENDIAN) +# error "Cannot determine the endian-ness of this platform" +#endif + +#endif /* JFFI_ENDIAN_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h new file mode 100644 index 0000000..9130a8b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdbool.h @@ -0,0 +1,8 @@ +#ifndef FFI_STDBOOL_H +#define FFI_STDBOOL_H + +typedef int bool; +#define true 1 +#define false 0 + +#endif /* FFI_STDBOOL_H */ \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h new file mode 100644 index 0000000..6ce7457 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ext/ffi_c/win32/stdint.h @@ -0,0 +1,201 @@ +/* stdint.h standard header */ +#if !defined(_MSC_VER) && !defined(INT8_MIN) +#pragma once +#ifndef _STDINT +#define _STDINT +#ifndef RC_INVOKED +#include + +/* NB: assumes + byte has 8 bits + long is 32 bits + pointer can convert to and from long long + long long is longest type + */ + +_C_STD_BEGIN + /* TYPE DEFINITIONS */ +typedef signed char int8_t; +typedef short int16_t; +typedef int int32_t; + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; + +typedef signed char int_least8_t; +typedef short int_least16_t; +typedef int int_least32_t; + +typedef unsigned char uint_least8_t; +typedef unsigned short uint_least16_t; +typedef unsigned int uint_least32_t; + +typedef char int_fast8_t; +typedef int int_fast16_t; +typedef int int_fast32_t; + +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef unsigned int uint_fast32_t; + +#ifndef _INTPTR_T_DEFINED + #define _INTPTR_T_DEFINED + #ifdef _WIN64 +typedef __int64 intptr_t; + #else /* _WIN64 */ +typedef _W64 int intptr_t; + #endif /* _WIN64 */ +#endif /* _INTPTR_T_DEFINED */ + +#ifndef _UINTPTR_T_DEFINED + #define _UINTPTR_T_DEFINED + #ifdef _WIN64 +typedef unsigned __int64 uintptr_t; + #else /* _WIN64 */ +typedef _W64 unsigned int uintptr_t; + #endif /* _WIN64 */ +#endif /* _UINTPTR_T_DEFINED */ + +typedef _Longlong int64_t; +typedef _ULonglong uint64_t; + +typedef _Longlong int_least64_t; +typedef _ULonglong uint_least64_t; + +typedef _Longlong int_fast64_t; +typedef _ULonglong uint_fast64_t; + +typedef _Longlong intmax_t; +typedef _ULonglong uintmax_t; + + /* LIMIT MACROS */ +#define INT8_MIN (-0x7f - _C2) +#define INT16_MIN (-0x7fff - _C2) +#define INT32_MIN (-0x7fffffff - _C2) + +#define INT8_MAX 0x7f +#define INT16_MAX 0x7fff +#define INT32_MAX 0x7fffffff +#define UINT8_MAX 0xff +#define UINT16_MAX 0xffff +#define UINT32_MAX 0xffffffff + +#define INT_LEAST8_MIN (-0x7f - _C2) +#define INT_LEAST16_MIN (-0x7fff - _C2) +#define INT_LEAST32_MIN (-0x7fffffff - _C2) + +#define INT_LEAST8_MAX 0x7f +#define INT_LEAST16_MAX 0x7fff +#define INT_LEAST32_MAX 0x7fffffff +#define UINT_LEAST8_MAX 0xff +#define UINT_LEAST16_MAX 0xffff +#define UINT_LEAST32_MAX 0xffffffff + +#define INT_FAST8_MIN (-0x7f - _C2) +#define INT_FAST16_MIN (-0x7fff - _C2) +#define INT_FAST32_MIN (-0x7fffffff - _C2) + +#define INT_FAST8_MAX 0x7f +#define INT_FAST16_MAX 0x7fff +#define INT_FAST32_MAX 0x7fffffff +#define UINT_FAST8_MAX 0xff +#define UINT_FAST16_MAX 0xffff +#define UINT_FAST32_MAX 0xffffffff + + #if _INTPTR == 0 || _INTPTR == 1 +#define INTPTR_MAX 0x7fffffff +#define INTPTR_MIN (-INTPTR_MAX - _C2) +#define UINTPTR_MAX 0xffffffff + + #else /* _INTPTR == 2 */ +#define INTPTR_MIN (-_LLONG_MAX - _C2) +#define INTPTR_MAX _LLONG_MAX +#define UINTPTR_MAX _ULLONG_MAX +#endif /* _INTPTR */ + +#define INT8_C(x) (x) +#define INT16_C(x) (x) +#define INT32_C(x) ((x) + (INT32_MAX - INT32_MAX)) + +#define UINT8_C(x) (x) +#define UINT16_C(x) (x) +#define UINT32_C(x) ((x) + (UINT32_MAX - UINT32_MAX)) + +#ifdef _WIN64 + #define PTRDIFF_MIN INT64_MIN + #define PTRDIFF_MAX INT64_MAX +#else /* _WIN64 */ + #define PTRDIFF_MIN INT32_MIN + #define PTRDIFF_MAX INT32_MAX +#endif /* _WIN64 */ + +#define SIG_ATOMIC_MIN INT32_MIN +#define SIG_ATOMIC_MAX INT32_MAX + +#ifndef SIZE_MAX + #ifdef _WIN64 + #define SIZE_MAX UINT64_MAX + #else /* _WIN64 */ + #define SIZE_MAX UINT32_MAX + #endif /* _WIN64 */ +#endif /* SIZE_MAX */ + +#define WCHAR_MIN 0x0000 +#define WCHAR_MAX 0xffff + +#define WINT_MIN 0x0000 +#define WINT_MAX 0xffff + + #define INT64_MIN (-0x7fffffffffffffff - _C2) + #define INT64_MAX 0x7fffffffffffffff + #define UINT64_MAX 0xffffffffffffffffU + + #define INT_LEAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_LEAST64_MAX 0x7fffffffffffffff + #define UINT_LEAST64_MAX 0xffffffffffffffffU + + #define INT_FAST64_MIN (-0x7fffffffffffffff - _C2) + #define INT_FAST64_MAX 0x7fffffffffffffff + #define UINT_FAST64_MAX 0xffffffffffffffffU + + #define INTMAX_MIN (-0x7fffffffffffffff - _C2) + #define INTMAX_MAX 0x7fffffffffffffff + #define UINTMAX_MAX 0xffffffffffffffffU + +#define INT64_C(x) ((x) + (INT64_MAX - INT64_MAX)) +#define UINT64_C(x) ((x) + (UINT64_MAX - UINT64_MAX)) +#define INTMAX_C(x) INT64_C(x) +#define UINTMAX_C(x) UINT64_C(x) +_C_STD_END +#endif /* RC_INVOKED */ +#endif /* _STDINT */ + + #if defined(_STD_USING) +using _CSTD int8_t; using _CSTD int16_t; +using _CSTD int32_t; using _CSTD int64_t; + +using _CSTD uint8_t; using _CSTD uint16_t; +using _CSTD uint32_t; using _CSTD uint64_t; + +using _CSTD int_least8_t; using _CSTD int_least16_t; +using _CSTD int_least32_t; using _CSTD int_least64_t; +using _CSTD uint_least8_t; using _CSTD uint_least16_t; +using _CSTD uint_least32_t; using _CSTD uint_least64_t; + +using _CSTD intmax_t; using _CSTD uintmax_t; + +using _CSTD uintptr_t; +using _CSTD intptr_t; + +using _CSTD int_fast8_t; using _CSTD int_fast16_t; +using _CSTD int_fast32_t; using _CSTD int_fast64_t; +using _CSTD uint_fast8_t; using _CSTD uint_fast16_t; +using _CSTD uint_fast32_t; using _CSTD uint_fast64_t; + #endif /* defined(_STD_USING) */ + +/* + * Copyright (c) 1992-2009 by P.J. Plauger. ALL RIGHTS RESERVED. + * Consult your license regarding permissions and restrictions. +V5.20:0009 */ +#endif /* !defined(_MSC_VER) && !defined(INT8_MIN) */ \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ffi.gemspec b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ffi.gemspec new file mode 100644 index 0000000..48964f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/ffi.gemspec @@ -0,0 +1,43 @@ +require File.expand_path("../lib/#{File.basename(__FILE__, '.gemspec')}/version", __FILE__) + +Gem::Specification.new do |s| + s.name = 'ffi' + s.version = FFI::VERSION + s.author = 'Wayne Meissner' + s.email = 'wmeissner@gmail.com' + s.homepage = 'https://github.com/ffi/ffi/wiki' + s.summary = 'Ruby FFI' + s.description = 'Ruby FFI library' + if s.respond_to?(:metadata) + s.metadata['bug_tracker_uri'] = 'https://github.com/ffi/ffi/issues' + s.metadata['changelog_uri'] = 'https://github.com/ffi/ffi/blob/master/CHANGELOG.md' + s.metadata['documentation_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['wiki_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['source_code_uri'] = 'https://github.com/ffi/ffi/' + s.metadata['mailing_list_uri'] = 'http://groups.google.com/group/ruby-ffi' + end + s.files = `git ls-files -z`.split("\x0").reject do |f| + f =~ /^(bench|gen|libtest|nbproject|spec)/ + end + + # Add libffi git files + lfs = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + # Add autoconf generated files of libffi + lfs += %w[ configure config.guess config.sub install-sh ltmain.sh missing fficonfig.h.in ] + # Add automake generated files of libffi + lfs += `git --git-dir ext/ffi_c/libffi/.git ls-files -z *.am */*.am`.gsub(".am\0", ".in\0").split("\x0") + s.files += lfs.map do |f| + File.join("ext/ffi_c/libffi", f) + end + + s.extensions << 'ext/ffi_c/extconf.rb' + s.rdoc_options = %w[--exclude=ext/ffi_c/.*\.o$ --exclude=ffi_c\.(bundle|so)$] + s.license = 'BSD-3-Clause' + s.require_paths << 'ext/ffi_c' + s.required_ruby_version = '>= 2.3' + s.add_development_dependency 'rake', '~> 13.0' + s.add_development_dependency 'rake-compiler', '~> 1.0' + s.add_development_dependency 'rake-compiler-dock', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.14.1' + s.add_development_dependency 'rubygems-tasks', "~> 0.2.4" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi.rb new file mode 100644 index 0000000..e6c75a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi.rb @@ -0,0 +1,28 @@ +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + Object.send(:remove_const, :FFI) if defined?(::FFI) + begin + require RUBY_VERSION.split('.')[0, 2].join('.') + '/ffi_c' + rescue Exception + require 'ffi_c' + end + + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'jruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("9.3.pre") + JRuby::Util.load_ext("org.jruby.ext.ffi.FFIService") + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'truffleruby' && Gem::Version.new(RUBY_ENGINE_VERSION) >= Gem::Version.new("20.1.0-dev-a") + require 'truffleruby/ffi_backend' + require 'ffi/ffi' + +else + # Remove the ffi gem dir from the load path, then reload the internal ffi implementation + $LOAD_PATH.delete(File.dirname(__FILE__)) + $LOAD_PATH.delete(File.join(File.dirname(__FILE__), 'ffi')) + unless $LOADED_FEATURES.nil? + $LOADED_FEATURES.delete(__FILE__) + $LOADED_FEATURES.delete('ffi.rb') + end + require 'ffi.rb' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb new file mode 100644 index 0000000..889a3e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/autopointer.rb @@ -0,0 +1,203 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + class AutoPointer < Pointer + extend DataConverter + + # @overload initialize(pointer, method) + # @param pointer [Pointer] + # @param method [Method] + # @return [self] + # The passed Method will be invoked at GC time. + # @overload initialize(pointer, proc) + # @param pointer [Pointer] + # @return [self] + # The passed Proc will be invoked at GC time (SEE WARNING BELOW!) + # @note WARNING: passing a proc _may_ cause your pointer to never be + # GC'd, unless you're careful to avoid trapping a reference to the + # pointer in the proc. See the test specs for examples. + # @overload initialize(pointer) { |p| ... } + # @param pointer [Pointer] + # @yieldparam [Pointer] p +pointer+ passed to the block + # @return [self] + # The passed block will be invoked at GC time. + # @note + # WARNING: passing a block will cause your pointer to never be GC'd. + # This is bad. + # @overload initialize(pointer) + # @param pointer [Pointer] + # @return [self] + # The pointer's release() class method will be invoked at GC time. + # + # @note The safest, and therefore preferred, calling + # idiom is to pass a Method as the second parameter. Example usage: + # + # class PointerHelper + # def self.release(pointer) + # ... + # end + # end + # + # p = AutoPointer.new(other_pointer, PointerHelper.method(:release)) + # + # The above code will cause PointerHelper#release to be invoked at GC time. + # + # @note + # The last calling idiom (only one parameter) is generally only + # going to be useful if you subclass {AutoPointer}, and override + # #release, which by default does nothing. + def initialize(ptr, proc=nil, &block) + super(ptr.type_size, ptr) + raise TypeError, "Invalid pointer" if ptr.nil? || !ptr.kind_of?(Pointer) \ + || ptr.kind_of?(MemoryPointer) || ptr.kind_of?(AutoPointer) + + @releaser = if proc + if not proc.respond_to?(:call) + raise RuntimeError.new("proc must be callable") + end + CallableReleaser.new(ptr, proc) + + else + if not self.class.respond_to?(:release) + raise RuntimeError.new("no release method defined") + end + DefaultReleaser.new(ptr, self.class) + end + + ObjectSpace.define_finalizer(self, @releaser) + self + end + + # @return [nil] + # Free the pointer. + def free + @releaser.free + end + + # @param [Boolean] autorelease + # @return [Boolean] +autorelease+ + # Set +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease=(autorelease) + @releaser.autorelease=(autorelease) + end + + # @return [Boolean] +autorelease+ + # Get +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease? + @releaser.autorelease + end + + # @abstract Base class for {AutoPointer}'s releasers. + # + # All subclasses of Releaser should define a +#release(ptr)+ method. + # A releaser is an object in charge of release an {AutoPointer}. + class Releaser + attr_accessor :autorelease + + # @param [Pointer] ptr + # @param [#call] proc + # @return [nil] + # A new instance of Releaser. + def initialize(ptr, proc) + @ptr = ptr + @proc = proc + @autorelease = true + end + + # @return [nil] + # Free pointer. + def free + if @ptr + release(@ptr) + @autorelease = false + @ptr = nil + @proc = nil + end + end + + # @param args + # Release pointer if +autorelease+ is set. + def call(*args) + release(@ptr) if @autorelease && @ptr + end + end + + # DefaultReleaser is a {Releaser} used when an {AutoPointer} is defined + # without Proc or Method. In this case, the pointer to release must be of + # a class derived from AutoPointer with a {release} class method. + class DefaultReleaser < Releaser + # @param [Pointer] ptr + # @return [nil] + # Release +ptr+ using the {release} class method of its class. + def release(ptr) + @proc.release(ptr) + end + end + + # CallableReleaser is a {Releaser} used when an {AutoPointer} is defined with a + # Proc or a Method. + class CallableReleaser < Releaser + # Release +ptr+ by using Proc or Method defined at +ptr+ + # {AutoPointer#initialize initialization}. + # + # @param [Pointer] ptr + # @return [nil] + def release(ptr) + @proc.call(ptr) + end + end + + # Return native type of AutoPointer. + # + # Override {DataConverter#native_type}. + # @return [Type::POINTER] + # @raise {RuntimeError} if class does not implement a +#release+ method + def self.native_type + if not self.respond_to?(:release) + raise RuntimeError.new("no release method defined for #{self.inspect}") + end + Type::POINTER + end + + # Create a new AutoPointer. + # + # Override {DataConverter#from_native}. + # @overload self.from_native(ptr, ctx) + # @param [Pointer] ptr + # @param ctx not used. Please set +nil+. + # @return [AutoPointer] + def self.from_native(val, ctx) + self.new(val) + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/buffer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/buffer.rb new file mode 100644 index 0000000..449e45b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/buffer.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/buffer' in user code +# diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/callback.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/callback.rb new file mode 100644 index 0000000..32d52f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/callback.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/callback' in user code +# diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb new file mode 100644 index 0000000..1527588 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/data_converter.rb @@ -0,0 +1,67 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This module is used to extend somes classes and give then a common API. + # + # Most of methods defined here must be overriden. + module DataConverter + # Get native type. + # + # @overload native_type(type) + # @param [String, Symbol, Type] type + # @return [Type] + # Get native type from +type+. + # + # @overload native_type + # @raise {NotImplementedError} This method must be overriden. + def native_type(type = nil) + if type + @native_type = FFI.find_type(type) + else + native_type = @native_type + unless native_type + raise NotImplementedError, 'native_type method not overridden and no native_type set' + end + native_type + end + end + + # Convert to a native type. + def to_native(value, ctx) + value + end + + # Convert from a native type. + def from_native(value, ctx) + value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/enum.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/enum.rb new file mode 100644 index 0000000..8fcb498 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/enum.rb @@ -0,0 +1,296 @@ +# +# Copyright (C) 2009, 2010 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # An instance of this class permits to manage {Enum}s. In fact, Enums is a collection of {Enum}s. + class Enums + + # @return [nil] + def initialize + @all_enums = Array.new + @tagged_enums = Hash.new + @symbol_map = Hash.new + end + + # @param [Enum] enum + # Add an {Enum} to the collection. + def <<(enum) + @all_enums << enum + @tagged_enums[enum.tag] = enum unless enum.tag.nil? + @symbol_map.merge!(enum.symbol_map) + end + + # @param query enum tag or part of an enum name + # @return [Enum] + # Find a {Enum} in collection. + def find(query) + if @tagged_enums.has_key?(query) + @tagged_enums[query] + else + @all_enums.detect { |enum| enum.symbols.include?(query) } + end + end + + # @param symbol a symbol to find in merge symbol maps of all enums. + # @return a symbol + def __map_symbol(symbol) + @symbol_map[symbol] + end + + end + + # Represents a C enum. + # + # For a C enum: + # enum fruits { + # apple, + # banana, + # orange, + # pineapple + # }; + # are defined this vocabulary: + # * a _symbol_ is a word from the enumeration (ie. _apple_, by example); + # * a _value_ is the value of a symbol in the enumeration (by example, apple has value _0_ and banana _1_). + class Enum + include DataConverter + + attr_reader :tag + attr_reader :native_type + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info + # @param [nil, Symbol] tag enum tag + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Enum + # @param [nil, Enumerable] info symbols and values for new Enum + # @param [nil, Symbol] tag name of new Enum + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate enum key" if @kv_map.has_key?(i) + @kv_map[i] = value + last_cst = i + value += 1 + when Integer + @kv_map[last_cst] = i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # @return [Array] enum symbol names + def symbols + @kv_map.keys + end + + # Get a symbol or a value from the enum. + # @overload [](query) + # Get enum value from symbol. + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get enum symbol from value. + # @param [Integer] query + # @return [Symbol] + def [](query) + case query + when Symbol + @kv_map[query] + when Integer + @vk_map[query] + end + end + alias find [] + + # Get the symbol map. + # @return [Hash] + def symbol_map + @kv_map + end + + alias to_h symbol_map + alias to_hash symbol_map + + # @param [Symbol, Integer, #to_int] val + # @param ctx unused + # @return [Integer] value of a enum symbol + def to_native(val, ctx) + @kv_map[val] || if val.is_a?(Integer) + val + elsif val.respond_to?(:to_int) + val.to_int + else + raise ArgumentError, "invalid enum value, #{val.inspect}" + end + end + + # @param val + # @return symbol name if it exists for +val+. + def from_native(val, ctx) + @vk_map[val] || val + end + end + + # Represents a C enum whose values are power of 2 + # + # @example + # enum { + # red = (1<<0), + # green = (1<<1), + # blue = (1<<2) + # } + # + # Contrary to classical enums, bitmask values are usually combined + # when used. + class Bitmask < Enum + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Bitmask + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate bitmask key" if @kv_map.has_key?(i) + @kv_map[i] = 1 << value + last_cst = i + value += 1 + when Integer + raise ArgumentError, "bitmask index should be positive" if i<0 + @kv_map[last_cst] = 1 << i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # Get a symbol list or a value from the bitmask + # @overload [](*query) + # Get bitmask value from symbol list + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get bitmaks value from symbol array + # @param [Array] query + # @return [Integer] + # @overload [](*query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Integer] query + # @return [Array] + # @overload [](query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Array] query + # @return [Array] + def [](*query) + flat_query = query.flatten + raise ArgumentError, "query should be homogeneous, #{query.inspect}" unless flat_query.all? { |o| o.is_a?(Symbol) } || flat_query.all? { |o| o.is_a?(Integer) || o.respond_to?(:to_int) } + case flat_query[0] + when Symbol + flat_query.inject(0) do |val, o| + v = @kv_map[o] + if v then val |= v else val end + end + when Integer, ->(o) { o.respond_to?(:to_int) } + val = flat_query.inject(0) { |mask, o| mask |= o.to_int } + @kv_map.select { |_, v| v & val != 0 }.keys + end + end + + # Get the native value of a bitmask + # @overload to_native(query, ctx) + # @param [Symbol, Integer, #to_int] query + # @param ctx unused + # @return [Integer] value of a bitmask + # @overload to_native(query, ctx) + # @param [Array] query + # @param ctx unused + # @return [Integer] value of a bitmask + def to_native(query, ctx) + return 0 if query.nil? + flat_query = [query].flatten + flat_query.inject(0) do |val, o| + case o + when Symbol + v = @kv_map[o] + raise ArgumentError, "invalid bitmask value, #{o.inspect}" unless v + val |= v + when Integer + val |= o + when ->(obj) { obj.respond_to?(:to_int) } + val |= o.to_int + else + raise ArgumentError, "invalid bitmask value, #{o.inspect}" + end + end + end + + # @param [Integer] val + # @param ctx unused + # @return [Array] list of symbol names corresponding to val, plus an optional remainder if some bits don't match any constant + def from_native(val, ctx) + list = @kv_map.select { |_, v| v & val != 0 }.keys + # If there are unmatch flags, + # return them in an integer, + # else information can be lost. + # Similar to Enum behavior. + remainder = val ^ list.inject(0) do |tmp, o| + v = @kv_map[o] + if v then tmp |= v else tmp end + end + list.push remainder unless remainder == 0 + return list + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/errno.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/errno.rb new file mode 100644 index 0000000..de82d89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/errno.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # @return (see FFI::LastError.error) + # @see FFI::LastError.error + def self.errno + FFI::LastError.error + end + # @param error (see FFI::LastError.error=) + # @return (see FFI::LastError.error=) + # @see FFI::LastError.error= + def self.errno=(error) + FFI::LastError.error = error + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/ffi.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/ffi.rb new file mode 100644 index 0000000..5201dae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/ffi.rb @@ -0,0 +1,46 @@ +# +# Copyright (C) 2008-2010 JRuby project +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'ffi/platform' +require 'ffi/data_converter' +require 'ffi/types' +require 'ffi/library' +require 'ffi/errno' +require 'ffi/pointer' +require 'ffi/memorypointer' +require 'ffi/struct' +require 'ffi/union' +require 'ffi/managedstruct' +require 'ffi/callback' +require 'ffi/io' +require 'ffi/autopointer' +require 'ffi/variadic' +require 'ffi/enum' +require 'ffi/version' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/io.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/io.rb new file mode 100644 index 0000000..7fa1cf7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/io.rb @@ -0,0 +1,62 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + + # This module implements a couple of class methods to play with IO. + module IO + # @param [Integer] fd file decriptor + # @param [String] mode mode string + # @return [::IO] + # Synonym for IO::for_fd. + def self.for_fd(fd, mode = "r") + ::IO.for_fd(fd, mode) + end + + # @param [#read] io io to read from + # @param [AbstractMemory] buf destination for data read from +io+ + # @param [nil, Numeric] len maximul number of bytes to read from +io+. If +nil+, + # read until end of file. + # @return [Numeric] length really read, in bytes + # + # A version of IO#read that reads data from an IO and put then into a native buffer. + # + # This will be optimized at some future time to eliminate the double copy. + # + def self.native_read(io, buf, len) + tmp = io.read(len) + return -1 unless tmp + buf.put_bytes(0, tmp) + tmp.length + end + + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/library.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/library.rb new file mode 100644 index 0000000..1a96c8e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/library.rb @@ -0,0 +1,592 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + CURRENT_PROCESS = USE_THIS_PROCESS_AS_LIBRARY = Object.new + + # @param [#to_s] lib library name + # @return [String] library name formatted for current platform + # Transform a generic library name to a platform library name + # @example + # # Linux + # FFI.map_library_name 'c' # -> "libc.so.6" + # FFI.map_library_name 'jpeg' # -> "libjpeg.so" + # # Windows + # FFI.map_library_name 'c' # -> "msvcrt.dll" + # FFI.map_library_name 'jpeg' # -> "jpeg.dll" + def self.map_library_name(lib) + # Mangle the library name to reflect the native library naming conventions + lib = Library::LIBC if lib == 'c' + + if lib && File.basename(lib) == lib + lib = Platform::LIBPREFIX + lib unless lib =~ /^#{Platform::LIBPREFIX}/ + r = Platform::IS_WINDOWS || Platform::IS_MAC ? "\\.#{Platform::LIBSUFFIX}$" : "\\.so($|\\.[1234567890]+)" + lib += ".#{Platform::LIBSUFFIX}" unless lib =~ /#{r}/ + end + + lib + end + + # Exception raised when a function is not found in libraries + class NotFoundError < LoadError + def initialize(function, *libraries) + super("Function '#{function}' not found in [#{libraries[0].nil? ? 'current process' : libraries.join(", ")}]") + end + end + + # This module is the base to use native functions. + # + # A basic usage may be: + # require 'ffi' + # + # module Hello + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function 'puts', [ :string ], :int + # end + # + # Hello.puts("Hello, World") + # + # + module Library + CURRENT_PROCESS = FFI::CURRENT_PROCESS + LIBC = FFI::Platform::LIBC + + # @param mod extended object + # @return [nil] + # @raise {RuntimeError} if +mod+ is not a Module + # Test if extended object is a Module. If not, raise RuntimeError. + def self.extended(mod) + raise RuntimeError.new("must only be extended by module") unless mod.kind_of?(Module) + end + + + # @param [Array] names names of libraries to load + # @return [Array] + # @raise {LoadError} if a library cannot be opened + # Load native libraries. + def ffi_lib(*names) + raise LoadError.new("library names list must not be empty") if names.empty? + + lib_flags = defined?(@ffi_lib_flags) ? @ffi_lib_flags : FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL + ffi_libs = names.map do |name| + + if name == FFI::CURRENT_PROCESS + FFI::DynamicLibrary.open(nil, FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL) + + else + libnames = (name.is_a?(::Array) ? name : [ name ]).map(&:to_s).map { |n| [ n, FFI.map_library_name(n) ].uniq }.flatten.compact + lib = nil + errors = {} + + libnames.each do |libname| + begin + orig = libname + lib = FFI::DynamicLibrary.open(libname, lib_flags) + break if lib + + rescue Exception => ex + ldscript = false + if ex.message =~ /(([^ \t()])+\.so([^ \t:()])*):([ \t])*(invalid ELF header|file too short|invalid file format)/ + if File.read($1) =~ /(?:GROUP|INPUT) *\( *([^ \)]+)/ + libname = $1 + ldscript = true + end + end + + if ldscript + retry + else + # TODO better library lookup logic + unless libname.start_with?("/") || FFI::Platform.windows? + path = ['/usr/lib/','/usr/local/lib/','/opt/local/lib/'].find do |pth| + File.exist?(pth + libname) + end + if path + libname = path + libname + retry + end + end + + libr = (orig == libname ? orig : "#{orig} #{libname}") + errors[libr] = ex + end + end + end + + if lib.nil? + raise LoadError.new(errors.values.join(".\n")) + end + + # return the found lib + lib + end + end + + @ffi_libs = ffi_libs + end + + # Set the calling convention for {#attach_function} and {#callback} + # + # @see http://en.wikipedia.org/wiki/Stdcall#stdcall + # @note +:stdcall+ is typically used for attaching Windows API functions + # + # @param [Symbol] convention one of +:default+, +:stdcall+ + # @return [Symbol] the new calling convention + def ffi_convention(convention = nil) + @ffi_convention ||= :default + @ffi_convention = convention if convention + @ffi_convention + end + + # @see #ffi_lib + # @return [Array] array of currently loaded FFI libraries + # @raise [LoadError] if no libraries have been loaded (using {#ffi_lib}) + # Get FFI libraries loaded using {#ffi_lib}. + def ffi_libraries + raise LoadError.new("no library specified") if !defined?(@ffi_libs) || @ffi_libs.empty? + @ffi_libs + end + + # Flags used in {#ffi_lib}. + # + # This map allows you to supply symbols to {#ffi_lib_flags} instead of + # the actual constants. + FlagsMap = { + :global => DynamicLibrary::RTLD_GLOBAL, + :local => DynamicLibrary::RTLD_LOCAL, + :lazy => DynamicLibrary::RTLD_LAZY, + :now => DynamicLibrary::RTLD_NOW + } + + # Sets library flags for {#ffi_lib}. + # + # @example + # ffi_lib_flags(:lazy, :local) # => 5 + # + # @param [Symbol, …] flags (see {FlagsMap}) + # @return [Fixnum] the new value + def ffi_lib_flags(*flags) + @ffi_lib_flags = flags.inject(0) { |result, f| result | FlagsMap[f] } + end + + + ## + # @overload attach_function(func, args, returns, options = {}) + # @example attach function without an explicit name + # module Foo + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :malloc, [:size_t], :pointer + # end + # # now callable via Foo.malloc + # @overload attach_function(name, func, args, returns, options = {}) + # @example attach function with an explicit name + # module Bar + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :c_malloc, :malloc, [:size_t], :pointer + # end + # # now callable via Bar.c_malloc + # + # Attach C function +func+ to this module. + # + # + # @param [#to_s] name name of ruby method to attach as + # @param [#to_s] func name of C function to attach + # @param [Array] args an array of types + # @param [Symbol] returns type of return value + # @option options [Boolean] :blocking (@blocking) set to true if the C function is a blocking call + # @option options [Symbol] :convention (:default) calling convention (see {#ffi_convention}) + # @option options [FFI::Enums] :enums + # @option options [Hash] :type_map + # + # @return [FFI::VariadicInvoker] + # + # @raise [FFI::NotFoundError] if +func+ cannot be found in the attached libraries (see {#ffi_lib}) + def attach_function(name, func, args, returns = nil, options = nil) + mname, a2, a3, a4, a5 = name, func, args, returns, options + cname, arg_types, ret_type, opts = (a4 && (a2.is_a?(String) || a2.is_a?(Symbol))) ? [ a2, a3, a4, a5 ] : [ mname.to_s, a2, a3, a4 ] + + # Convert :foo to the native type + arg_types = arg_types.map { |e| find_type(e) } + options = { + :convention => ffi_convention, + :type_map => defined?(@ffi_typedefs) ? @ffi_typedefs : nil, + :blocking => defined?(@blocking) && @blocking, + :enums => defined?(@ffi_enums) ? @ffi_enums : nil, + } + + @blocking = false + options.merge!(opts) if opts && opts.is_a?(Hash) + + # Try to locate the function in any of the libraries + invokers = [] + ffi_libraries.each do |lib| + if invokers.empty? + begin + function = nil + function_names(cname, arg_types).find do |fname| + function = lib.find_function(fname) + end + raise LoadError unless function + + invokers << if arg_types.length > 0 && arg_types[arg_types.length - 1] == FFI::NativeType::VARARGS + VariadicInvoker.new(function, arg_types, find_type(ret_type), options) + + else + Function.new(find_type(ret_type), arg_types, function, options) + end + + rescue LoadError + end + end + end + invoker = invokers.compact.shift + raise FFI::NotFoundError.new(cname.to_s, ffi_libraries.map { |lib| lib.name }) unless invoker + + invoker.attach(self, mname.to_s) + invoker + end + + # @param [#to_s] name function name + # @param [Array] arg_types function's argument types + # @return [Array] + # This function returns a list of possible names to lookup. + # @note Function names on windows may be decorated if they are using stdcall. See + # * http://en.wikipedia.org/wiki/Name_mangling#C_name_decoration_in_Microsoft_Windows + # * http://msdn.microsoft.com/en-us/library/zxk0tw93%28v=VS.100%29.aspx + # * http://en.wikibooks.org/wiki/X86_Disassembly/Calling_Conventions#STDCALL + # Note that decorated names can be overridden via def files. Also note that the + # windows api, although using, doesn't have decorated names. + def function_names(name, arg_types) + result = [name.to_s] + if ffi_convention == :stdcall + # Get the size of each parameter + size = arg_types.inject(0) do |mem, arg| + size = arg.size + # The size must be a multiple of 4 + size += (4 - size) % 4 + mem + size + end + + result << "_#{name.to_s}@#{size}" # win32 + result << "#{name.to_s}@#{size}" # win64 + end + result + end + + # @overload attach_variable(mname, cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [#to_s] cname name of C variable to attach + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :c_myvar, :myvar, :long + # end + # # now callable via Bar.c_myvar + # @overload attach_variable(cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :myvar, :long + # end + # # now callable via Bar.myvar + # @return [DynamicLibrary::Symbol] + # @raise {FFI::NotFoundError} if +cname+ cannot be found in libraries + # + # Attach C variable +cname+ to this module. + def attach_variable(mname, a1, a2 = nil) + cname, type = a2 ? [ a1, a2 ] : [ mname.to_s, a1 ] + address = nil + ffi_libraries.each do |lib| + begin + address = lib.find_variable(cname.to_s) + break unless address.nil? + rescue LoadError + end + end + + raise FFI::NotFoundError.new(cname, ffi_libraries) if address.nil? || address.null? + if type.is_a?(Class) && type < FFI::Struct + # If it is a global struct, just attach directly to the pointer + s = s = type.new(address) # Assigning twice to suppress unused variable warning + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname} + end + code + + else + sc = Class.new(FFI::Struct) + sc.layout :gvar, find_type(type) + s = sc.new(address) + # + # Attach to this module as mname/mname= + # + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname}[:gvar] + end + def self.#{mname}=(value) + @@ffi_gvar_#{mname}[:gvar] = value + end + code + + end + + address + end + + + # @overload callback(name, params, ret) + # @param name callback name to add to type map + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @overload callback(params, ret) + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @return [FFI::CallbackInfo] + def callback(*args) + raise ArgumentError, "wrong number of arguments" if args.length < 2 || args.length > 3 + name, params, ret = if args.length == 3 + args + else + [ nil, args[0], args[1] ] + end + + native_params = params.map { |e| find_type(e) } + raise ArgumentError, "callbacks cannot have variadic parameters" if native_params.include?(FFI::Type::VARARGS) + options = Hash.new + options[:convention] = ffi_convention + options[:enums] = @ffi_enums if defined?(@ffi_enums) + ret_type = find_type(ret) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + cb = FFI::CallbackInfo.new(ret_type, native_params, options) + + # Add to the symbol -> type map (unless there was no name) + unless name.nil? + typedef cb, name + end + + cb + end + + # Register or get an already registered type definition. + # + # To register a new type definition, +old+ should be a {FFI::Type}. +add+ + # is in this case the type definition. + # + # If +old+ is a {DataConverter}, a {Type::Mapped} is returned. + # + # If +old+ is +:enum+ + # * and +add+ is an +Array+, a call to {#enum} is made with +add+ as single parameter; + # * in others cases, +info+ is used to create a named enum. + # + # If +old+ is a key for type map, #typedef get +old+ type definition. + # + # @param [DataConverter, Symbol, Type] old + # @param [Symbol] add + # @param [Symbol] info + # @return [FFI::Enum, FFI::Type] + def typedef(old, add, info=nil) + @ffi_typedefs = Hash.new unless defined?(@ffi_typedefs) + + @ffi_typedefs[add] = if old.kind_of?(FFI::Type) + old + + elsif @ffi_typedefs.has_key?(old) + @ffi_typedefs[old] + + elsif old.is_a?(DataConverter) + FFI::Type::Mapped.new(old) + + elsif old == :enum + if add.kind_of?(Array) + self.enum(add) + else + self.enum(info, add) + end + + else + FFI.find_type(old) + end + end + + private + # Generic enum builder + # @param [Class] klass can be one of FFI::Enum or FFI::Bitmask + # @param args (see #enum or #bitmask) + def generic_enum(klass, *args) + native_type = args.first.kind_of?(FFI::Type) ? args.shift : nil + name, values = if args[0].kind_of?(Symbol) && args[1].kind_of?(Array) + [ args[0], args[1] ] + elsif args[0].kind_of?(Array) + [ nil, args[0] ] + else + [ nil, args ] + end + @ffi_enums = FFI::Enums.new unless defined?(@ffi_enums) + @ffi_enums << (e = native_type ? klass.new(native_type, values, name) : klass.new(values, name)) + + # If called with a name, add a typedef alias + typedef(e, name) if name + e + end + + public + # @overload enum(name, values) + # Create a named enum. + # @example + # enum :foo, [:zero, :one, :two] # named enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(*args) + # Create an unnamed enum. + # @example + # enum :zero, :one, :two # unnamed enum + # @param args values for enum + # @overload enum(values) + # Create an unnamed enum. + # @example + # enum [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [Array] values values for enum + # @overload enum(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :foo, [:zero, :one, :two] # named enum + # @param [FFI::Type] native_type native type for new enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(native_type, *args) + # Create an unnamed enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :zero, :one, :two # unnamed enum + # @param [FFI::Type] native_type native type for new enum + # @param args values for enum + # @overload enum(native_type, values) + # Create an unnamed enum and specify the native type. + # @example + # enum Type::UINT64, [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [FFI::Type] native_type native type for new enum + # @param [Array] values values for enum + # @return [FFI::Enum] + # Create a new {FFI::Enum}. + def enum(*args) + generic_enum(FFI::Enum, *args) + end + + # @overload bitmask(name, values) + # Create a named bitmask + # @example + # bitmask :foo, [:red, :green, :blue] # bits 0,1,2 are used + # bitmask :foo, [:red, :green, 5, :blue] # bits 0,5,6 are used + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(*args) + # Create an unamed bitmask + # @example + # bm = bitmask :red, :green, :blue # bits 0,1,2 are used + # bm = bitmask :red, :green, 5, blue # bits 0,5,6 are used + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(values) + # Create an unamed bitmask + # @example + # bm = bitmask [:red, :green, :blue] # bits 0,1,2 are used + # bm = bitmask [:red, :green, 5, blue] # bits 0,5,6 are used + # @param [Array] values for new bitmask + # @overload bitmask(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, :foo, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(native_type, *args) + # @example + # bitmask FFI::Type::UINT64, :red, :green, :blue + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(native_type, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Array] values for new bitmask + # @return [FFI::Bitmask] + # Create a new FFI::Bitmask + def bitmask(*args) + generic_enum(FFI::Bitmask, *args) + end + + # @param name + # @return [FFI::Enum] + # Find an enum by name. + def enum_type(name) + @ffi_enums.find(name) if defined?(@ffi_enums) + end + + # @param symbol + # @return [FFI::Enum] + # Find an enum by a symbol it contains. + def enum_value(symbol) + @ffi_enums.__map_symbol(symbol) + end + + # @param [DataConverter, Type, Struct, Symbol] t type to find + # @return [Type] + # Find a type definition. + def find_type(t) + if t.kind_of?(Type) + t + + elsif defined?(@ffi_typedefs) && @ffi_typedefs.has_key?(t) + @ffi_typedefs[t] + + elsif t.is_a?(Class) && t < Struct + Type::POINTER + + elsif t.is_a?(DataConverter) + # Add a typedef so next time the converter is used, it hits the cache + typedef Type::Mapped.new(t), t + + end || FFI.find_type(t) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb new file mode 100644 index 0000000..0536280 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/managedstruct.rb @@ -0,0 +1,84 @@ +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + # + # FFI::ManagedStruct allows custom garbage-collection of your FFI::Structs. + # + # The typical use case would be when interacting with a library + # that has a nontrivial memory management design, such as a linked + # list or a binary tree. + # + # When the {Struct} instance is garbage collected, FFI::ManagedStruct will + # invoke the class's release() method during object finalization. + # + # @example Example usage: + # module MyLibrary + # ffi_lib "libmylibrary" + # attach_function :new_dlist, [], :pointer + # attach_function :destroy_dlist, [:pointer], :void + # end + # + # class DoublyLinkedList < FFI::ManagedStruct + # @@@ + # struct do |s| + # s.name 'struct dlist' + # s.include 'dlist.h' + # s.field :head, :pointer + # s.field :tail, :pointer + # end + # @@@ + # + # def self.release ptr + # MyLibrary.destroy_dlist(ptr) + # end + # end + # + # begin + # ptr = DoublyLinkedList.new(MyLibrary.new_dlist) + # # do something with the list + # end + # # struct is out of scope, and will be GC'd using DoublyLinkedList#release + # + # + class ManagedStruct < FFI::Struct + + # @overload initialize(pointer) + # @param [Pointer] pointer + # Create a new ManagedStruct which will invoke the class method #release on + # @overload initialize + # A new instance of FFI::ManagedStruct. + def initialize(pointer=nil) + raise NoMethodError, "release() not implemented for class #{self}" unless self.class.respond_to? :release + raise ArgumentError, "Must supply a pointer to memory for the Struct" unless pointer + super AutoPointer.new(pointer, self.class.method(:release)) + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/memorypointer.rb @@ -0,0 +1 @@ +# This class is now implemented in C diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform.rb new file mode 100644 index 0000000..ebc9d8b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform.rb @@ -0,0 +1,179 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +require 'rbconfig' +module FFI + class PlatformError < LoadError; end + + # This module defines different constants and class methods to play with + # various platforms. + module Platform + OS = case RbConfig::CONFIG['host_os'].downcase + when /linux/ + "linux" + when /darwin/ + "darwin" + when /freebsd/ + "freebsd" + when /netbsd/ + "netbsd" + when /openbsd/ + "openbsd" + when /dragonfly/ + "dragonflybsd" + when /sunos|solaris/ + "solaris" + when /mingw|mswin/ + "windows" + else + RbConfig::CONFIG['host_os'].downcase + end + + OSVERSION = RbConfig::CONFIG['host_os'].gsub(/[^\d]/, '').to_i + + CPU = RbConfig::CONFIG['host_cpu'] + + ARCH = case CPU.downcase + when /amd64|x86_64|x64/ + "x86_64" + when /i?86|x86|i86pc/ + "i386" + when /ppc64|powerpc64/ + "powerpc64" + when /ppc|powerpc/ + "powerpc" + when /sparcv9|sparc64/ + "sparcv9" + else + case RbConfig::CONFIG['host_cpu'] + when /^arm/ + "arm" + else + RbConfig::CONFIG['host_cpu'] + end + end + + private + # @param [String) os + # @return [Boolean] + # Test if current OS is +os+. + def self.is_os(os) + OS == os + end + + IS_GNU = defined?(GNU_LIBC) + IS_LINUX = is_os("linux") + IS_MAC = is_os("darwin") + IS_FREEBSD = is_os("freebsd") + IS_NETBSD = is_os("netbsd") + IS_OPENBSD = is_os("openbsd") + IS_DRAGONFLYBSD = is_os("dragonfly") + IS_SOLARIS = is_os("solaris") + IS_WINDOWS = is_os("windows") + IS_BSD = IS_MAC || IS_FREEBSD || IS_NETBSD || IS_OPENBSD || IS_DRAGONFLYBSD + + # Add the version for known ABI breaks + name_version = "12" if IS_FREEBSD && OSVERSION >= 12 # 64-bit inodes + + NAME = "#{ARCH}-#{OS}#{name_version}" + CONF_DIR = File.join(File.dirname(__FILE__), 'platform', NAME) + + public + + LIBPREFIX = case OS + when /windows|msys/ + '' + when /cygwin/ + 'cyg' + else + 'lib' + end + + LIBSUFFIX = case OS + when /darwin/ + 'dylib' + when /linux|bsd|solaris/ + 'so' + when /windows|cygwin|msys/ + 'dll' + else + # Punt and just assume a sane unix (i.e. anything but AIX) + 'so' + end + + LIBC = if IS_WINDOWS + if RbConfig::CONFIG['host_os'] =~ /mingw/i + RbConfig::CONFIG['RUBY_SO_NAME'].split('-')[-2] + '.dll' + else + "ucrtbase.dll" + end + elsif IS_GNU + GNU_LIBC + elsif OS == 'cygwin' + "cygwin1.dll" + elsif OS == 'msys' + # Not sure how msys 1.0 behaves, tested on MSYS2. + "msys-2.0.dll" + else + "#{LIBPREFIX}c.#{LIBSUFFIX}" + end + + # Test if current OS is a *BSD (include MAC) + # @return [Boolean] + def self.bsd? + IS_BSD + end + + # Test if current OS is Windows + # @return [Boolean] + def self.windows? + IS_WINDOWS + end + + # Test if current OS is Mac OS + # @return [Boolean] + def self.mac? + IS_MAC + end + + # Test if current OS is Solaris (Sun OS) + # @return [Boolean] + def self.solaris? + IS_SOLARIS + end + + # Test if current OS is a unix OS + # @return [Boolean] + def self.unix? + !IS_WINDOWS + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf new file mode 100644 index 0000000..8255343 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf new file mode 100644 index 0000000..c296932 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-freebsd12/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf new file mode 100644 index 0000000..4cd2438 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/aarch64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf new file mode 100644 index 0000000..cfbb90f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf new file mode 100644 index 0000000..a070d39 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/arm-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf new file mode 100644 index 0000000..93f6b86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf new file mode 100644 index 0000000..6c882d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf new file mode 100644 index 0000000..fa2fa8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-gnu/types.conf @@ -0,0 +1,107 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsid_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__pthread_key = int +rbx.platform.typedef.__pthread_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__sigset_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.fsid_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.pthread_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf new file mode 100644 index 0000000..feb6bc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-linux/types.conf @@ -0,0 +1,103 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf new file mode 100644 index 0000000..a5aba89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-netbsd/types.conf @@ -0,0 +1,126 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-openbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf new file mode 100644 index 0000000..22a2414 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf new file mode 100644 index 0000000..a5d0b05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/i386-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = int +rbx.platform.typedef._sigset_t = ulong +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rsize_t = uint +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf new file mode 100644 index 0000000..e0eeecc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/ia64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mips64el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsel-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa32r6el-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/mipsisa64r6el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf new file mode 100644 index 0000000..cbd20e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-aix/types.conf @@ -0,0 +1,180 @@ +rbx.platform.typedef.UTF32Char = uint +rbx.platform.typedef.UniChar = ushort +rbx.platform.typedef.__cptr32 = string +rbx.platform.typedef.__cptr64 = ulong_long +rbx.platform.typedef.__long32_t = long +rbx.platform.typedef.__long64_t = int +rbx.platform.typedef.__ptr32 = pointer +rbx.platform.typedef.__ptr64 = ulong_long +rbx.platform.typedef.__ulong32_t = ulong +rbx.platform.typedef.__ulong64_t = uint +rbx.platform.typedef.aptx_t = ushort +rbx.platform.typedef.blkcnt32_t = int +rbx.platform.typedef.blkcnt64_t = ulong_long +rbx.platform.typedef.blkcnt_t = int +rbx.platform.typedef.blksize32_t = int +rbx.platform.typedef.blksize64_t = ulong_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.boolean_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.chan_t = int +rbx.platform.typedef.class_id_t = uint +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = long_long +rbx.platform.typedef.cnt64_t = long_long +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.crid_t = int +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev32_t = uint +rbx.platform.typedef.dev64_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.esid_t = uint +rbx.platform.typedef.ext_t = int +rbx.platform.typedef.fpos64_t = long_long +rbx.platform.typedef.fpos_t = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino32_t = uint +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16 = short +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32 = int +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int32long64_t = int +rbx.platform.typedef.int64 = long_long +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8 = char +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intfast_t = int +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.krpn_t = int +rbx.platform.typedef.kvmhandle_t = ulong +rbx.platform.typedef.kvmid_t = long +rbx.platform.typedef.kvpn_t = int +rbx.platform.typedef.level_t = int +rbx.platform.typedef.liobn_t = uint +rbx.platform.typedef.long32int64_t = long +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.mid_t = pointer +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.mtyp_t = long +rbx.platform.typedef.nlink_t = short +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.paddr_t = long +rbx.platform.typedef.pdtx_t = int +rbx.platform.typedef.pid32_t = int +rbx.platform.typedef.pid64_t = ulong_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pshift_t = ushort +rbx.platform.typedef.psize_t = long_long +rbx.platform.typedef.psx_t = short +rbx.platform.typedef.ptex_t = ulong +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.rpn64_t = long_long +rbx.platform.typedef.rpn_t = int +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.signal_t = int +rbx.platform.typedef.size64_t = ulong_long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.slab_t[12] = char +rbx.platform.typedef.snidx_t = int +rbx.platform.typedef.socklen_t = ulong +rbx.platform.typedef.soff_t = int +rbx.platform.typedef.sshift_t = ushort +rbx.platform.typedef.ssize64_t = long_long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.swhatx_t = ulong +rbx.platform.typedef.tid32_t = int +rbx.platform.typedef.tid64_t = ulong_long +rbx.platform.typedef.tid_t = int +rbx.platform.typedef.time32_t = int +rbx.platform.typedef.time64_t = long_long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer32_t = int +rbx.platform.typedef.timer64_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16 = ushort +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32 = uint +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64 = ulong_long +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8 = uchar +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar = uchar +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint32long64_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintfast_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong32int64_t = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unidx_t = int +rbx.platform.typedef.unit_addr_t = ulong_long +rbx.platform.typedef.ureg_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.va_list = string +rbx.platform.typedef.vmfkey_t = uint +rbx.platform.typedef.vmhandle32_t = uint +rbx.platform.typedef.vmhandle_t = ulong +rbx.platform.typedef.vmhwkey_t = int +rbx.platform.typedef.vmid32_t = int +rbx.platform.typedef.vmid64_t = long_long +rbx.platform.typedef.vmid_t = long +rbx.platform.typedef.vmidx_t = int +rbx.platform.typedef.vmkey_t = int +rbx.platform.typedef.vmlpghandle_t = ulong +rbx.platform.typedef.vmm_lock_t = int +rbx.platform.typedef.vmnodeidx_t = int +rbx.platform.typedef.vmprkey_t = uint +rbx.platform.typedef.vmsize_t = int +rbx.platform.typedef.vpn_t = int +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = uint +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf new file mode 100644 index 0000000..42eec13 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-linux/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/powerpc64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf new file mode 100644 index 0000000..291b032 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf new file mode 100644 index 0000000..32a7feb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/s390x-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf new file mode 100644 index 0000000..aea09d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf new file mode 100644 index 0000000..7626bfc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparc64-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/sparcv9-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf new file mode 100644 index 0000000..5bef6d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int64 +rbx.platform.typedef.size_t = uint64 +rbx.platform.typedef.ssize_t = int64 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf new file mode 100644 index 0000000..68841bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-darwin/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = long +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_string_t[37] = char +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sae_associd_t = uint +rbx.platform.typedef.sae_connid_t = uint +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_off_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf new file mode 100644 index 0000000..889f6a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-dragonflybsd/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intlp_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintlp_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_daddr_t = uint +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.v_caddr_t = pointer +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf new file mode 100644 index 0000000..8dbe370 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf new file mode 100644 index 0000000..31b1073 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-freebsd12/types.conf @@ -0,0 +1,158 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__char16_t = ushort +rbx.platform.typedef.__char32_t = uint +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = long +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__rman_res_t = ulong +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = long +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = ulong +rbx.platform.typedef.__vm_paddr_t = ulong +rbx.platform.typedef.__vm_size_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.cap_ioctl_t = ulong +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.ksize_t = ulong +rbx.platform.typedef.kvaddr_t = ulong +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.rman_res_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_ooffset_t = long +rbx.platform.typedef.vm_paddr_t = ulong +rbx.platform.typedef.vm_pindex_t = ulong +rbx.platform.typedef.vm_size_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf new file mode 100644 index 0000000..060f161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = long +rbx.platform.typedef.int_fast32_t = long +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ulong +rbx.platform.typedef.uint_fast32_t = ulong +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-netbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf new file mode 100644 index 0000000..6abc9c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-openbsd/types.conf @@ -0,0 +1,134 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf new file mode 100644 index 0000000..a7890d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = int +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt64_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = int +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = int +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = uint +rbx.platform.typedef.minor_t = uint +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.poolid_t = int +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = int +rbx.platform.typedef.t_uscalar_t = uint +rbx.platform.typedef.taskid_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf new file mode 100644 index 0000000..5bdbbde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/platform/x86_64-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = long_long +rbx.platform.typedef._sigset_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long_long +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = long_long +rbx.platform.typedef.ptrdiff_t = long_long +rbx.platform.typedef.rsize_t = ulong_long +rbx.platform.typedef.size_t = ulong_long +rbx.platform.typedef.ssize_t = long_long +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong_long +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/pointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/pointer.rb new file mode 100644 index 0000000..a5ee655 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/pointer.rb @@ -0,0 +1,167 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' + +# NOTE: all method definitions in this file are conditional on +# whether they are not already defined. This is needed because +# some Ruby implementations (e.g., TruffleRuby) might already +# provide these methods due to using FFI internally, and we +# should not override them to avoid warnings. + +module FFI + class Pointer + + # Pointer size + SIZE = Platform::ADDRESS_SIZE / 8 unless const_defined?(:SIZE) + + # Return the size of a pointer on the current platform, in bytes + # @return [Numeric] + def self.size + SIZE + end unless respond_to?(:size) + + # @param [nil,Numeric] len length of string to return + # @return [String] + # Read pointer's contents as a string, or the first +len+ bytes of the + # equivalent string if +len+ is not +nil+. + def read_string(len=nil) + if len + return ''.b if len == 0 + get_bytes(0, len) + else + get_string(0) + end + end unless method_defined?(:read_string) + + # @param [Numeric] len length of string to return + # @return [String] + # Read the first +len+ bytes of pointer's contents as a string. + # + # Same as: + # ptr.read_string(len) # with len not nil + def read_string_length(len) + get_bytes(0, len) + end unless method_defined?(:read_string_length) + + # @return [String] + # Read pointer's contents as a string. + # + # Same as: + # ptr.read_string # with no len + def read_string_to_null + get_string(0) + end unless method_defined?(:read_string_to_null) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +len+ first bytes of +str+ in pointer's contents. + # + # Same as: + # ptr.write_string(str, len) # with len not nil + def write_string_length(str, len) + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string_length) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +str+ in pointer's contents, or first +len+ bytes if + # +len+ is not +nil+. + def write_string(str, len=nil) + len = str.bytesize unless len + # Write the string data without NUL termination + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string) + + # @param [Type] type type of data to read from pointer's contents + # @param [Symbol] reader method to send to +self+ to read +type+ + # @param [Numeric] length + # @return [Array] + # Read an array of +type+ of length +length+. + # @example + # ptr.read_array_of_type(TYPE_UINT8, :read_uint8, 4) # -> [1, 2, 3, 4] + def read_array_of_type(type, reader, length) + ary = [] + size = FFI.type_size(type) + tmp = self + length.times { |j| + ary << tmp.send(reader) + tmp += size unless j == length-1 # avoid OOB + } + ary + end unless method_defined?(:read_array_of_type) + + # @param [Type] type type of data to write to pointer's contents + # @param [Symbol] writer method to send to +self+ to write +type+ + # @param [Array] ary + # @return [self] + # Write +ary+ in pointer's contents as +type+. + # @example + # ptr.write_array_of_type(TYPE_UINT8, :put_uint8, [1, 2, 3 ,4]) + def write_array_of_type(type, writer, ary) + size = FFI.type_size(type) + ary.each_with_index { |val, i| + break unless i < self.size + self.send(writer, i * size, val) + } + self + end unless method_defined?(:write_array_of_type) + + # @return [self] + def to_ptr + self + end unless method_defined?(:to_ptr) + + # @param [Symbol,Type] type of data to read + # @return [Object] + # Read pointer's contents as +type+ + # + # Same as: + # ptr.get(type, 0) + def read(type) + get(type, 0) + end unless method_defined?(:read) + + # @param [Symbol,Type] type of data to read + # @param [Object] value to write + # @return [nil] + # Write +value+ of type +type+ to pointer's content + # + # Same as: + # ptr.put(type, 0) + def write(type, value) + put(type, 0, value) + end unless method_defined?(:write) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct.rb new file mode 100644 index 0000000..7028258 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct.rb @@ -0,0 +1,316 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' +require 'ffi/struct_layout' +require 'ffi/struct_layout_builder' +require 'ffi/struct_by_reference' + +module FFI + + class Struct + + # Get struct size + # @return [Numeric] + def size + self.class.size + end + + # @return [Fixnum] Struct alignment + def alignment + self.class.alignment + end + alias_method :align, :alignment + + # (see FFI::StructLayout#offset_of) + def offset_of(name) + self.class.offset_of(name) + end + + # (see FFI::StructLayout#members) + def members + self.class.members + end + + # @return [Array] + # Get array of values from Struct fields. + def values + members.map { |m| self[m] } + end + + # (see FFI::StructLayout#offsets) + def offsets + self.class.offsets + end + + # Clear the struct content. + # @return [self] + def clear + pointer.clear + self + end + + # Get {Pointer} to struct content. + # @return [AbstractMemory] + def to_ptr + pointer + end + + # Get struct size + # @return [Numeric] + def self.size + defined?(@layout) ? @layout.size : defined?(@size) ? @size : 0 + end + + # set struct size + # @param [Numeric] size + # @return [size] + def self.size=(size) + raise ArgumentError, "Size already set" if defined?(@size) || defined?(@layout) + @size = size + end + + # @return (see Struct#alignment) + def self.alignment + @layout.alignment + end + + # (see FFI::Type#members) + def self.members + @layout.members + end + + # (see FFI::StructLayout#offsets) + def self.offsets + @layout.offsets + end + + # (see FFI::StructLayout#offset_of) + def self.offset_of(name) + @layout.offset_of(name) + end + + def self.in + ptr(:in) + end + + def self.out + ptr(:out) + end + + def self.ptr(flags = :inout) + @ref_data_type ||= Type::Mapped.new(StructByReference.new(self)) + end + + def self.val + @val_data_type ||= StructByValue.new(self) + end + + def self.by_value + self.val + end + + def self.by_ref(flags = :inout) + self.ptr(flags) + end + + class ManagedStructConverter < StructByReference + + # @param [Struct] struct_class + def initialize(struct_class) + super(struct_class) + + raise NoMethodError, "release() not implemented for class #{struct_class}" unless struct_class.respond_to? :release + @method = struct_class.method(:release) + end + + # @param [Pointer] ptr + # @param [nil] ctx + # @return [Struct] + def from_native(ptr, ctx) + struct_class.new(AutoPointer.new(ptr, @method)) + end + end + + def self.auto_ptr + @managed_type ||= Type::Mapped.new(ManagedStructConverter.new(self)) + end + + + class << self + public + + # @return [StructLayout] + # @overload layout + # @return [StructLayout] + # Get struct layout. + # @overload layout(*spec) + # @param [Array,Array(Hash)] spec + # @return [StructLayout] + # Create struct layout from +spec+. + # @example Creating a layout from an array +spec+ + # class MyStruct < Struct + # layout :field1, :int, + # :field2, :pointer, + # :field3, :string + # end + # @example Creating a layout from an array +spec+ with offset + # class MyStructWithOffset < Struct + # layout :field1, :int, + # :field2, :pointer, 6, # set offset to 6 for this field + # :field3, :string + # end + # @example Creating a layout from a hash +spec+ + # class MyStructFromHash < Struct + # layout :field1 => :int, + # :field2 => :pointer, + # :field3 => :string + # end + # @example Creating a layout with pointers to functions + # class MyFunctionTable < Struct + # layout :function1, callback([:int, :int], :int), + # :function2, callback([:pointer], :void), + # :field3, :string + # end + def layout(*spec) + warn "[DEPRECATION] Struct layout is already defined for class #{self.inspect}. Redefinition as in #{caller[0]} will be disallowed in ffi-2.0." if defined?(@layout) + return @layout if spec.size == 0 + + builder = StructLayoutBuilder.new + builder.union = self < Union + builder.packed = @packed if defined?(@packed) + builder.alignment = @min_alignment if defined?(@min_alignment) + + if spec[0].kind_of?(Hash) + hash_layout(builder, spec) + else + array_layout(builder, spec) + end + builder.size = @size if defined?(@size) && @size > builder.size + cspec = builder.build + @layout = cspec unless self == Struct + @size = cspec.size + return cspec + end + + + protected + + def callback(params, ret) + mod = enclosing_module + ret_type = find_type(ret, mod) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + FFI::CallbackInfo.new(ret_type, params.map { |e| find_type(e, mod) }) + end + + def packed(packed = 1) + @packed = packed + end + alias :pack :packed + + def aligned(alignment = 1) + @min_alignment = alignment + end + alias :align :aligned + + def enclosing_module + begin + mod = self.name.split("::")[0..-2].inject(Object) { |obj, c| obj.const_get(c) } + if mod.respond_to?(:find_type) && (mod.is_a?(FFI::Library) || mod < FFI::Struct) + mod + end + rescue Exception + nil + end + end + + + def find_field_type(type, mod = enclosing_module) + if type.kind_of?(Class) && type < Struct + FFI::Type::Struct.new(type) + + elsif type.kind_of?(Class) && type < FFI::StructLayout::Field + type + + elsif type.kind_of?(::Array) + FFI::Type::Array.new(find_field_type(type[0]), type[1]) + + else + find_type(type, mod) + end + end + + def find_type(type, mod = enclosing_module) + if mod + mod.find_type(type) + end || FFI.find_type(type) + end + + private + + # @param [StructLayoutBuilder] builder + # @param [Hash] spec + # @return [builder] + # Add hash +spec+ to +builder+. + def hash_layout(builder, spec) + spec[0].each do |name, type| + builder.add name, find_field_type(type), nil + end + end + + # @param [StructLayoutBuilder] builder + # @param [Array] spec + # @return [builder] + # Add array +spec+ to +builder+. + def array_layout(builder, spec) + i = 0 + while i < spec.size + name, type = spec[i, 2] + i += 2 + + # If the next param is a Integer, it specifies the offset + if spec[i].kind_of?(Integer) + offset = spec[i] + i += 1 + else + offset = nil + end + + builder.add name, find_field_type(type), offset + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb new file mode 100644 index 0000000..27f25ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_by_reference.rb @@ -0,0 +1,72 @@ +# +# Copyright (C) 2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This class includes the {FFI::DataConverter} module. + class StructByReference + include DataConverter + + attr_reader :struct_class + + # @param [Struct] struct_class + def initialize(struct_class) + unless Class === struct_class and struct_class < FFI::Struct + raise TypeError, 'wrong type (expected subclass of FFI::Struct)' + end + @struct_class = struct_class + end + + # Always get {FFI::Type}::POINTER. + def native_type + FFI::Type::POINTER + end + + # @param [nil, Struct] value + # @param [nil] ctx + # @return [AbstractMemory] Pointer on +value+. + def to_native(value, ctx) + return Pointer::NULL if value.nil? + + unless @struct_class === value + raise TypeError, "wrong argument type #{value.class} (expected #{@struct_class})" + end + + value.pointer + end + + # @param [AbstractMemory] value + # @param [nil] ctx + # @return [Struct] + # Create a struct from content of memory +value+. + def from_native(value, ctx) + @struct_class.new(value) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb new file mode 100644 index 0000000..d5a78a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout.rb @@ -0,0 +1,96 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + class StructLayout + + # @return [Array + # Get an array of tuples (field name, offset of the field). + def offsets + members.map { |m| [ m, self[m].offset ] } + end + + # @return [Numeric] + # Get the offset of a field. + def offset_of(field_name) + self[field_name].offset + end + + # An enum {Field} in a {StructLayout}. + class Enum < Field + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @return [Object] + # Get an object of type {#type} from memory pointed by +ptr+. + def get(ptr) + type.find(ptr.get_int(offset)) + end + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @param value + # @return [nil] + # Set +value+ into memory pointed by +ptr+. + def put(ptr, value) + ptr.put_int(offset, type.find(value)) + end + + end + + class InnerStruct < Field + def get(ptr) + type.struct_class.new(ptr.slice(self.offset, self.size)) + end + + def put(ptr, value) + raise TypeError, "wrong value type (expected #{type.struct_class})" unless value.is_a?(type.struct_class) + ptr.slice(self.offset, self.size).__copy_from__(value.pointer, self.size) + end + end + + class Mapped < Field + def initialize(name, offset, type, orig_field) + super(name, offset, type) + @orig_field = orig_field + end + + def get(ptr) + type.from_native(@orig_field.get(ptr), nil) + end + + def put(ptr, value) + @orig_field.put(ptr, type.to_native(value, nil)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb new file mode 100644 index 0000000..4d6a464 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/struct_layout_builder.rb @@ -0,0 +1,227 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # Build a {StructLayout struct layout}. + class StructLayoutBuilder + attr_reader :size + attr_reader :alignment + + def initialize + @size = 0 + @alignment = 1 + @min_alignment = 1 + @packed = false + @union = false + @fields = Array.new + end + + # Set size attribute with +size+ only if +size+ is greater than attribute value. + # @param [Numeric] size + def size=(size) + @size = size if size > @size + end + + # Set alignment attribute with +align+ only if it is greater than attribute value. + # @param [Numeric] align + def alignment=(align) + @alignment = align if align > @alignment + @min_alignment = align + end + + # Set union attribute. + # Set to +true+ to build a {Union} instead of a {Struct}. + # @param [Boolean] is_union + # @return [is_union] + def union=(is_union) + @union = is_union + end + + # Building a {Union} or a {Struct} ? + # + # @return [Boolean] + # + def union? + @union + end + + # Set packed attribute + # @overload packed=(packed) Set alignment and packed attributes to + # +packed+. + # + # @param [Fixnum] packed + # + # @return [packed] + # @overload packed=(packed) Set packed attribute. + # @param packed + # + # @return [0,1] + # + def packed=(packed) + if packed.is_a?(0.class) + @alignment = packed + @packed = packed + else + @packed = packed ? 1 : 0 + end + end + + + # List of number types + NUMBER_TYPES = [ + Type::INT8, + Type::UINT8, + Type::INT16, + Type::UINT16, + Type::INT32, + Type::UINT32, + Type::LONG, + Type::ULONG, + Type::INT64, + Type::UINT64, + Type::FLOAT32, + Type::FLOAT64, + Type::LONGDOUBLE, + Type::BOOL, + ] + + # @param [String, Symbol] name name of the field + # @param [Array, DataConverter, Struct, StructLayout::Field, Symbol, Type] type type of the field + # @param [Numeric, nil] offset + # @return [self] + # Add a field to the builder. + # @note Setting +offset+ to +nil+ or +-1+ is equivalent to +0+. + def add(name, type, offset = nil) + + if offset.nil? || offset == -1 + offset = @union ? 0 : align(@size, @packed ? [ @packed, type.alignment ].min : [ @min_alignment, type.alignment ].max) + end + + # + # If a FFI::Type type was passed in as the field arg, try and convert to a StructLayout::Field instance + # + field = type.is_a?(StructLayout::Field) ? type : field_for_type(name, offset, type) + @fields << field + @alignment = [ @alignment, field.alignment ].max unless @packed + @size = [ @size, field.size + (@union ? 0 : field.offset) ].max + + return self + end + + # @param (see #add) + # @return (see #add) + # Same as {#add}. + # @see #add + def add_field(name, type, offset = nil) + add(name, type, offset) + end + + # @param (see #add) + # @return (see #add) + # Add a struct as a field to the builder. + def add_struct(name, type, offset = nil) + add(name, Type::Struct.new(type), offset) + end + + # @param name (see #add) + # @param type (see #add) + # @param [Numeric] count array length + # @param offset (see #add) + # @return (see #add) + # Add an array as a field to the builder. + def add_array(name, type, count, offset = nil) + add(name, Type::Array.new(type, count), offset) + end + + # @return [StructLayout] + # Build and return the struct layout. + def build + # Add tail padding if the struct is not packed + size = @packed ? @size : align(@size, @alignment) + + layout = StructLayout.new(@fields, size, @alignment) + layout.__union! if @union + layout + end + + private + + # @param [Numeric] offset + # @param [Numeric] align + # @return [Numeric] + def align(offset, align) + align + ((offset - 1) & ~(align - 1)); + end + + # @param (see #add) + # @return [StructLayout::Field] + def field_for_type(name, offset, type) + field_class = case + when type.is_a?(Type::Function) + StructLayout::Function + + when type.is_a?(Type::Struct) + StructLayout::InnerStruct + + when type.is_a?(Type::Array) + StructLayout::Array + + when type.is_a?(FFI::Enum) + StructLayout::Enum + + when NUMBER_TYPES.include?(type) + StructLayout::Number + + when type == Type::POINTER + StructLayout::Pointer + + when type == Type::STRING + StructLayout::String + + when type.is_a?(Class) && type < StructLayout::Field + type + + when type.is_a?(DataConverter) + return StructLayout::Mapped.new(name, offset, Type::Mapped.new(type), field_for_type(name, offset, type.native_type)) + + when type.is_a?(Type::Mapped) + return StructLayout::Mapped.new(name, offset, type, field_for_type(name, offset, type.native_type)) + + else + raise TypeError, "invalid struct field type #{type.inspect}" + end + + field_class.new(name, offset, type) + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb new file mode 100644 index 0000000..fb34d94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/const_generator.rb @@ -0,0 +1,230 @@ +require 'tempfile' +require 'open3' + +module FFI + + # ConstGenerator turns C constants into ruby values. + # + # @example a simple example for stdio + # require 'ffi/tools/const_generator' + # cg = FFI::ConstGenerator.new('stdio') do |gen| + # gen.const(:SEEK_SET) + # gen.const('SEEK_CUR') + # gen.const('seek_end') # this constant does not exist + # end # #calculate called automatically at the end of the block + # + # cg['SEEK_SET'] # => 0 + # cg['SEEK_CUR'] # => 1 + # cg['seek_end'] # => nil + # cg.to_ruby # => "SEEK_SET = 0\nSEEK_CUR = 1\n# seek_end not available" + class ConstGenerator + @options = {} + attr_reader :constants + + # Creates a new constant generator that uses +prefix+ as a name, and an + # options hash. + # + # The only option is +:required+, which if set to +true+ raises an error if a + # constant you have requested was not found. + # + # @param [#to_s] prefix + # @param [Hash] options + # @return + # @option options [Boolean] :required + # @overload initialize(prefix, options) + # @overload initialize(prefix, options) { |gen| ... } + # @yieldparam [ConstGenerator] gen new generator is passed to the block + # When passed a block, {#calculate} is automatically called at the end of + # the block, otherwise you must call it yourself. + def initialize(prefix = nil, options = {}) + @includes = ['stdio.h', 'stddef.h'] + @constants = {} + @prefix = prefix + + @required = options[:required] + @options = options + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + # Set class options + # These options are merged with {#initialize} options when it is called with a block. + # @param [Hash] options + # @return [Hash] class options + def self.options=(options) + @options = options + end + # Get class options. + # @return [Hash] class options + def self.options + @options + end + # @param [String] name + # @return constant value (converted if a +converter+ was defined). + # Access a constant by name. + def [](name) + @constants[name].converted_value + end + + # Request the value for C constant +name+. + # + # @param [#to_s] name C constant name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # + # @overload const(name, format=nil, cast='', ruby_name=nil, converter=nil) + # +converter+ is a Method or a Proc. + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + # @overload const(name, format=nil, cast='', ruby_name=nil) { |value| ... } + # Use a converter block. This block convert the value from a string to the + # appropriate type for {#to_ruby}. + # @yieldparam value constant value + def const(name, format = nil, cast = '', ruby_name = nil, converter = nil, + &converter_proc) + format ||= '%d' + cast ||= '' + + if converter_proc and converter then + raise ArgumentError, "Supply only converter or converter block" + end + + converter = converter_proc if converter.nil? + + const = Constant.new name, format, cast, ruby_name, converter + @constants[name.to_s] = const + return const + end + + # Calculate constants values. + # @param [Hash] options + # @option options [String] :cppflags flags for C compiler + # @return [nil] + # @raise if a constant is missing and +:required+ was set to +true+ (see {#initialize}) + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_const_gen_bin_#{Process.pid}" + + Tempfile.open("#{@prefix}.const_generator") do |f| + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + f.puts "\nint main(int argc, char **argv)\n{" + + @constants.each_value do |const| + f.puts <<-EOF + #ifdef #{const.name} + printf("#{const.name} #{const.format}\\n", #{const.cast}#{const.name}); + #endif + EOF + end + + f.puts "\n\treturn 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating constants #{@prefix}:\n#{output}" + end + end + + output = `#{binary}` + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + output.each_line do |line| + line =~ /^(\S+)\s(.*)$/ + const = @constants[$1] + const.value = $2 + end + + missing_constants = @constants.select do |name, constant| + constant.value.nil? + end.map { |name,| name } + + if @required and not missing_constants.empty? then + raise "Missing required constants for #{@prefix}: #{missing_constants.join ', '}" + end + end + + # Dump constants to +io+. + # @param [#puts] io + # @return [nil] + def dump_constants(io) + @constants.each do |name, constant| + name = [@prefix, name].join '.' if @prefix + io.puts "#{name} = #{constant.converted_value}" + end + end + + # Outputs values for discovered constants. If the constant's value was + # not discovered it is not omitted. + # @return [String] + def to_ruby + @constants.sort_by { |name,| name }.map do |name, constant| + if constant.value.nil? then + "# #{name} not available" + else + constant.to_ruby + end + end.join "\n" + end + + # Add additional C include file(s) to calculate constants from. + # @note +stdio.h+ and +stddef.h+ automatically included + # @param [List, Array] i include file(s) + # @return [Array] array of include files + def include(*i) + @includes |= i.flatten + end + + end + + # This class hold constants for {ConstGenerator} + class ConstGenerator::Constant + + attr_reader :name, :format, :cast + attr_accessor :value + + # @param [#to_s] name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + def initialize(name, format, cast, ruby_name = nil, converter=nil) + @name = name + @format = format + @cast = cast + @ruby_name = ruby_name + @converter = converter + @value = nil + end + + # Return constant value (converted if a +converter+ was defined). + # @return constant value. + def converted_value + if @converter + @converter.call(@value) + else + @value + end + end + + # get constant ruby name + # @return [String] + def ruby_name + @ruby_name || @name + end + + # Get an evaluable string from constant. + # @return [String] + def to_ruby + "#{ruby_name} = #{converted_value}" + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb new file mode 100644 index 0000000..5552ea5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator.rb @@ -0,0 +1,105 @@ +require 'ffi/tools/struct_generator' +require 'ffi/tools/const_generator' + +module FFI + + ## + # Generate files with C structs for FFI::Struct and C constants. + # + # == A simple example + # + # In file +zlib.rb.ffi+: + # module Zlib + # @@@ + # constants do |c| + # c.include "zlib.h" + # c.const :ZLIB_VERNUM + # end + # @@@ + # + # class ZStream < FFI::Struct + # + # struct do |s| + # s.name "struct z_stream_s" + # s.include "zlib.h" + # + # s.field :next_in, :pointer + # s.field :avail_in, :uint + # s.field :total_in, :ulong + # end + # @@@ + # end + # end + # + # Translate the file: + # require "ffi/tools/generator" + # FFI::Generator.new "zlib.rb.ffi", "zlib.rb" + # + # Generates the file +zlib.rb+ with constant values and offsets: + # module Zlib + # ZLIB_VERNUM = 4784 + # + # class ZStream < FFI::Struct + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 8, + # :total_in, :ulong, 16 + # end + # + # @see FFI::Generator::Task for easy integration in a Rakefile + class Generator + + def initialize(ffi_name, rb_name, options = {}) + @ffi_name = ffi_name + @rb_name = rb_name + @options = options + @name = File.basename rb_name, '.rb' + + file = File.read @ffi_name + + new_file = file.gsub(/^( *)@@@(.*?)@@@/m) do + @constants = [] + @structs = [] + + indent = $1 + original_lines = $2.count "\n" + + instance_eval $2, @ffi_name, $`.count("\n") + + new_lines = [] + @constants.each { |c| new_lines << c.to_ruby } + @structs.each { |s| new_lines << s.generate_layout } + + new_lines = new_lines.join("\n").split "\n" # expand multiline blocks + new_lines = new_lines.map { |line| indent + line } + + padding = original_lines - new_lines.length + new_lines += [nil] * padding if padding >= 0 + + new_lines.join "\n" + end + + open @rb_name, 'w' do |f| + f.puts "# This file is generated from `#{@ffi_name}'. Do not edit." + f.puts + f.puts new_file + end + end + + def constants(options = {}, &block) + @constants << FFI::ConstGenerator.new(@name, @options.merge(options), &block) + end + + def struct(options = {}, &block) + @structs << FFI::StructGenerator.new(@name, @options.merge(options), &block) + end + + ## + # Utility converter for constants + + def to_s + proc { |obj| obj.to_s.inspect } + end + + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb new file mode 100644 index 0000000..da72968 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/generator_task.rb @@ -0,0 +1,32 @@ +require 'ffi/tools/generator' +require 'rake' +require 'rake/tasklib' + +## +# Add Rake tasks that generate files with C structs for FFI::Struct and C constants. +# +# @example a simple example for your Rakefile +# require "ffi/tools/generator_task" +# # Add a task to generate my_object.rb out of my_object.rb.ffi +# FFI::Generator::Task.new ["my_object.rb"], cflags: "-I/usr/local/mylibrary" +# +# The generated files are also added to the 'clear' task. +# +# @see FFI::Generator for a description of the file content +class FFI::Generator::Task < Rake::TaskLib + + def initialize(rb_names, options={}) + task :clean do rm_f rb_names end + + rb_names.each do |rb_name| + ffi_name = "#{rb_name}.ffi" + + file rb_name => ffi_name do |t| + puts "Generating #{rb_name}..." if Rake.application.options.trace + + FFI::Generator.new ffi_name, rb_name, options + end + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb new file mode 100644 index 0000000..7d2a6e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/struct_generator.rb @@ -0,0 +1,194 @@ +require 'tempfile' + +module FFI + + ## + # Generates an FFI Struct layout. + # + # Given the @@@ portion in: + # + # class Zlib::ZStream < FFI::Struct + # @@@ + # name "struct z_stream_s" + # include "zlib.h" + # + # field :next_in, :pointer + # field :avail_in, :uint + # field :total_in, :ulong + # + # # ... + # @@@ + # end + # + # StructGenerator will create the layout: + # + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 4, + # :total_in, :ulong, 8, + # # ... + # + # StructGenerator does its best to pad the layout it produces to preserve + # line numbers. Place the struct definition as close to the top of the file + # for best results. + + class StructGenerator + @options = {} + attr_accessor :size + attr_reader :fields + + def initialize(name, options = {}) + @name = name + @struct_name = nil + @includes = [] + @fields = [] + @found = false + @size = nil + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + def self.options=(options) + @options = options + end + def self.options + @options + end + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_struct_gen_bin_#{Process.pid}" + + raise "struct name not set" if @struct_name.nil? + + Tempfile.open("#{@name}.struct_generator") do |f| + f.puts "#include " + + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + + f.puts "#include \n\n" + f.puts "int main(int argc, char **argv)\n{" + f.puts " #{@struct_name} s;" + f.puts %[ printf("sizeof(#{@struct_name}) %u\\n", (unsigned int) sizeof(#{@struct_name}));] + + @fields.each do |field| + f.puts <<-EOF + printf("#{field.name} %u %u\\n", (unsigned int) offsetof(#{@struct_name}, #{field.name}), + (unsigned int) sizeof(s.#{field.name})); + EOF + end + + f.puts "\n return 0;\n}" + f.flush + + output = `gcc #{options[:cppflags]} #{options[:cflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + @found = false + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating struct #{@name} (#{@struct_name}):\n#{output}" + end + end + + output = `#{binary}`.split "\n" + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + sizeof = output.shift + unless @size + m = /\s*sizeof\([^)]+\) (\d+)/.match sizeof + @size = m[1] + end + + line_no = 0 + output.each do |line| + md = line.match(/.+ (\d+) (\d+)/) + @fields[line_no].offset = md[1].to_i + @fields[line_no].size = md[2].to_i + + line_no += 1 + end + + @found = true + end + + def field(name, type=nil) + field = Field.new(name, type) + @fields << field + return field + end + + def found? + @found + end + + def dump_config(io) + io.puts "rbx.platform.#{@name}.sizeof = #{@size}" + + @fields.each { |field| io.puts field.to_config(@name) } + end + + def generate_layout + buf = "" + + @fields.each_with_index do |field, i| + if buf.empty? + buf << "layout :#{field.name}, :#{field.type}, #{field.offset}" + else + buf << " :#{field.name}, :#{field.type}, #{field.offset}" + end + + if i < @fields.length - 1 + buf << ",\n" + end + end + + buf + end + + def get_field(name) + @fields.find { |f| name == f.name } + end + + def include(i) + @includes << i + end + + def name(n) + @struct_name = n + end + + end + + ## + # A field in a Struct. + + class StructGenerator::Field + + attr_reader :name + attr_reader :type + attr_reader :offset + attr_accessor :size + + def initialize(name, type) + @name = name + @type = type + @offset = nil + @size = nil + end + + def offset=(o) + @offset = o + end + + def to_config(name) + buf = [] + buf << "rbx.platform.#{name}.#{@name}.offset = #{@offset}" + buf << "rbx.platform.#{name}.#{@name}.size = #{@size}" + buf << "rbx.platform.#{name}.#{@name}.type = #{@type}" if @type + buf + end + + end + +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb new file mode 100644 index 0000000..ba2d8c5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/tools/types_generator.rb @@ -0,0 +1,137 @@ +require 'tempfile' + +module FFI + + # @private + class TypesGenerator + + ## + # Maps different C types to the C type representations we use + + TYPE_MAP = { + "char" => :char, + "signed char" => :char, + "__signed char" => :char, + "unsigned char" => :uchar, + + "short" => :short, + "signed short" => :short, + "signed short int" => :short, + "unsigned short" => :ushort, + "unsigned short int" => :ushort, + + "int" => :int, + "signed int" => :int, + "unsigned int" => :uint, + + "long" => :long, + "long int" => :long, + "signed long" => :long, + "signed long int" => :long, + "unsigned long" => :ulong, + "unsigned long int" => :ulong, + "long unsigned int" => :ulong, + + "long long" => :long_long, + "long long int" => :long_long, + "signed long long" => :long_long, + "signed long long int" => :long_long, + "unsigned long long" => :ulong_long, + "unsigned long long int" => :ulong_long, + + "char *" => :string, + "void *" => :pointer, + } + + def self.generate(options = {}) + typedefs = nil + Tempfile.open 'ffi_types_generator' do |io| + io.puts <<-C +#include +#include +#include +#if !(defined(WIN32)) +#include +#include +#include +#endif + C + + io.close + cc = ENV['CC'] || 'gcc' + cmd = "#{cc} -E -x c #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -c" + if options[:input] + typedefs = File.read(options[:input]) + elsif options[:remote] + typedefs = `ssh #{options[:remote]} #{cmd} - < #{io.path}` + else + typedefs = `#{cmd} #{io.path}` + end + end + + code = [] + + typedefs.each_line do |type| + # We only care about single line typedef + next unless type =~ /typedef/ + # Ignore unions or structs + next if type =~ /union|struct/ + + # strip off the starting typedef and ending ; + type.gsub!(/^(.*typedef\s*)/, "") + type.gsub!(/\s*;\s*$/, "") + + parts = type.split(/\s+/) + def_type = parts.join(" ") + + # GCC does mapping with __attribute__ stuf, also see + # http://hal.cs.berkeley.edu/cil/cil016.html section 16.2.7. Problem + # with this is that the __attribute__ stuff can either occur before or + # after the new type that is defined... + if type =~ /__attribute__/ + if parts.last =~ /__QI__|__HI__|__SI__|__DI__|__word__/ + + # In this case, the new type is BEFORE __attribute__ we need to + # find the final_type as the type before the part that starts with + # __attribute__ + final_type = "" + parts.each do |p| + break if p =~ /__attribute__/ + final_type = p + end + else + final_type = parts.pop + end + + def_type = case type + when /__QI__/ then "char" + when /__HI__/ then "short" + when /__SI__/ then "int" + when /__DI__/ then "long long" + when /__word__/ then "long" + else "int" + end + + def_type = "unsigned #{def_type}" if type =~ /unsigned/ + else + final_type = parts.pop + def_type = parts.join(" ") + end + + if type = TYPE_MAP[def_type] + code << "rbx.platform.typedef.#{final_type} = #{type}" + TYPE_MAP[final_type] = TYPE_MAP[def_type] + else + # Fallback to an ordinary pointer if we don't know the type + if def_type =~ /\*/ + code << "rbx.platform.typedef.#{final_type} = pointer" + TYPE_MAP[final_type] = :pointer + end + end + end + + code.sort.join("\n") + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/types.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/types.rb new file mode 100644 index 0000000..90f50c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/types.rb @@ -0,0 +1,194 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# see {file:README} +module FFI + + # @param [Type, DataConverter, Symbol] old type definition used by {FFI.find_type} + # @param [Symbol] add new type definition's name to add + # @return [Type] + # Add a definition type to type definitions. + def self.typedef(old, add) + TypeDefs[add] = self.find_type(old) + end + + # (see FFI.typedef) + def self.add_typedef(old, add) + typedef old, add + end + + + # @param [Type, DataConverter, Symbol] name + # @param [Hash] type_map if nil, {FFI::TypeDefs} is used + # @return [Type] + # Find a type in +type_map+ ({FFI::TypeDefs}, by default) from + # a type objet, a type name (symbol). If +name+ is a {DataConverter}, + # a new {Type::Mapped} is created. + def self.find_type(name, type_map = nil) + if name.is_a?(Type) + name + + elsif type_map && type_map.has_key?(name) + type_map[name] + + elsif TypeDefs.has_key?(name) + TypeDefs[name] + + elsif name.is_a?(DataConverter) + (type_map || TypeDefs)[name] = Type::Mapped.new(name) + else + raise TypeError, "unable to resolve type '#{name}'" + end + end + + # List of type definitions + TypeDefs.merge!({ + # The C void type; only useful for function return types + :void => Type::VOID, + + # C boolean type + :bool => Type::BOOL, + + # C nul-terminated string + :string => Type::STRING, + + # C signed char + :char => Type::CHAR, + # C unsigned char + :uchar => Type::UCHAR, + + # C signed short + :short => Type::SHORT, + # C unsigned short + :ushort => Type::USHORT, + + # C signed int + :int => Type::INT, + # C unsigned int + :uint => Type::UINT, + + # C signed long + :long => Type::LONG, + + # C unsigned long + :ulong => Type::ULONG, + + # C signed long long integer + :long_long => Type::LONG_LONG, + + # C unsigned long long integer + :ulong_long => Type::ULONG_LONG, + + # C single precision float + :float => Type::FLOAT, + + # C double precision float + :double => Type::DOUBLE, + + # C long double + :long_double => Type::LONGDOUBLE, + + # Native memory address + :pointer => Type::POINTER, + + # 8 bit signed integer + :int8 => Type::INT8, + # 8 bit unsigned integer + :uint8 => Type::UINT8, + + # 16 bit signed integer + :int16 => Type::INT16, + # 16 bit unsigned integer + :uint16 => Type::UINT16, + + # 32 bit signed integer + :int32 => Type::INT32, + # 32 bit unsigned integer + :uint32 => Type::UINT32, + + # 64 bit signed integer + :int64 => Type::INT64, + # 64 bit unsigned integer + :uint64 => Type::UINT64, + + :buffer_in => Type::BUFFER_IN, + :buffer_out => Type::BUFFER_OUT, + :buffer_inout => Type::BUFFER_INOUT, + + # Used in function prototypes to indicate the arguments are variadic + :varargs => Type::VARARGS, + }) + + # This will convert a pointer to a Ruby string (just like `:string`), but + # also allow to work with the pointer itself. This is useful when you want + # a Ruby string already containing a copy of the data, but also the pointer + # to the data for you to do something with it, like freeing it, in case the + # library handed the memory off to the caller (Ruby-FFI). + # + # It's {typedef}'d as +:strptr+. + class StrPtrConverter + extend DataConverter + native_type Type::POINTER + + # @param [Pointer] val + # @param ctx not used + # @return [Array(String, Pointer)] + # Returns a [ String, Pointer ] tuple so the C memory for the string can be freed + def self.from_native(val, ctx) + [ val.null? ? nil : val.get_string(0), val ] + end + end + + typedef(StrPtrConverter, :strptr) + + # @param type +type+ is an instance of class accepted by {FFI.find_type} + # @return [Numeric] + # Get +type+ size, in bytes. + def self.type_size(type) + find_type(type).size + end + + # Load all the platform dependent types + begin + File.open(File.join(Platform::CONF_DIR, 'types.conf'), "r") do |f| + prefix = "rbx.platform.typedef." + f.each_line { |line| + if line.index(prefix) == 0 + new_type, orig_type = line.chomp.slice(prefix.length..-1).split(/\s*=\s*/) + typedef(orig_type.to_sym, new_type.to_sym) + end + } + end + typedef :pointer, :caddr_t + rescue Errno::ENOENT + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/union.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/union.rb new file mode 100644 index 0000000..38414ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/union.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2009 Andrea Fazzi +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/struct' + +module FFI + + class Union < FFI::Struct + def self.builder + b = StructLayoutBuilder.new + b.union = true + b + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/variadic.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/variadic.rb new file mode 100644 index 0000000..2414055 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/variadic.rb @@ -0,0 +1,78 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + class VariadicInvoker + def init(arg_types, type_map) + @fixed = Array.new + @type_map = type_map + arg_types.each_with_index do |type, i| + @fixed << type unless type == Type::VARARGS + end + end + + + def call(*args, &block) + param_types = Array.new(@fixed) + param_values = Array.new + @fixed.each_with_index do |t, i| + param_values << args[i] + end + i = @fixed.length + while i < args.length + param_types << FFI.find_type(args[i], @type_map) + param_values << args[i + 1] + i += 2 + end + invoke(param_types, param_values, &block) + end + + # + # Attach the invoker to module +mod+ as +mname+ + # + def attach(mod, mname) + invoker = self + params = "*args" + call = "call" + mod.module_eval <<-code + @@#{mname} = invoker + def self.#{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + def #{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + code + invoker + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/version.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/version.rb new file mode 100644 index 0000000..c0b65cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/lib/ffi/version.rb @@ -0,0 +1,3 @@ +module FFI + VERSION = '1.13.1' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getlogin.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getlogin.rb new file mode 100644 index 0000000..6713021 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getlogin.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getlogin, [ ], :string +end +puts "getlogin=#{Foo.getlogin}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getpid.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getpid.rb new file mode 100644 index 0000000..1720635 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/getpid.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getpid, [ ], :int +end +puts "My pid=#{Foo.getpid}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/gettimeofday.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/gettimeofday.rb new file mode 100644 index 0000000..864bbb6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/gettimeofday.rb @@ -0,0 +1,18 @@ +require 'ffi' +require 'rbconfig' + +class Timeval < FFI::Struct + layout tv_sec: :ulong, tv_usec: :ulong +end +module LibC + extend FFI::Library + if FFI::Platform.windows? + ffi_lib RbConfig::CONFIG["LIBRUBY_SO"] + else + ffi_lib FFI::Library::LIBC + end + attach_function :gettimeofday, [ :pointer, :pointer ], :int +end +t = Timeval.new +LibC.gettimeofday(t.pointer, nil) +puts "t.tv_sec=#{t[:tv_sec]} t.tv_usec=#{t[:tv_usec]}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/hello.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/hello.rb new file mode 100644 index 0000000..f2ccf37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/hello.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function("cputs", "puts", [ :string ], :int) +end +Foo.cputs("Hello, World via libc puts using FFI on MRI ruby") diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/inotify.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/inotify.rb new file mode 100644 index 0000000..018d78c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/inotify.rb @@ -0,0 +1,60 @@ +require 'ffi' + +module Inotify + extend FFI::Library + ffi_lib FFI::Library::LIBC + class Event < FFI::Struct + layout \ + :wd, :int, + :mask, :uint, + :cookie, :uint, + :len, :uint + end + attach_function :init, :inotify_init, [ ], :int + attach_function :add_watch, :inotify_add_watch, [ :int, :string, :uint ], :int + attach_function :rm_watch, :inotify_rm_watch, [ :int, :uint ], :int + attach_function :read, [ :int, :buffer_out, :uint ], :int + IN_ACCESS=0x00000001 + IN_MODIFY=0x00000002 + IN_ATTRIB=0x00000004 + IN_CLOSE_WRITE=0x00000008 + IN_CLOSE_NOWRITE=0x00000010 + IN_CLOSE=(IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_OPEN=0x00000020 + IN_MOVED_FROM=0x00000040 + IN_MOVED_TO=0x00000080 + IN_MOVE= (IN_MOVED_FROM | IN_MOVED_TO) + IN_CREATE=0x00000100 + IN_DELETE=0x00000200 + IN_DELETE_SELF=0x00000400 + IN_MOVE_SELF=0x00000800 + # Events sent by the kernel. + IN_UNMOUNT=0x00002000 + IN_Q_OVERFLOW=0x00004000 + IN_IGNORED=0x00008000 + IN_ONLYDIR=0x01000000 + IN_DONT_FOLLOW=0x02000000 + IN_MASK_ADD=0x20000000 + IN_ISDIR=0x40000000 + IN_ONESHOT=0x80000000 + IN_ALL_EVENTS=(IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE \ + | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM \ + | IN_MOVED_TO | IN_CREATE | IN_DELETE \ + | IN_DELETE_SELF | IN_MOVE_SELF) + +end +if $0 == __FILE__ + fd = Inotify.init + puts "fd=#{fd}" + wd = Inotify.add_watch(fd, "/tmp/", Inotify::IN_ALL_EVENTS) + fp = FFI::IO.for_fd(fd) + puts "wfp=#{fp}" + while true + buf = FFI::Buffer.alloc_out(Inotify::Event.size + 4096, 1, false) + ev = Inotify::Event.new buf + ready = IO.select([ fp ], nil, nil, nil) + n = Inotify.read(fd, buf, buf.total) + puts "Read #{n} bytes from inotify fd" + puts "event.wd=#{ev[:wd]} mask=#{ev[:mask]} len=#{ev[:len]} name=#{ev[:len] > 0 ? buf.get_string(16) : 'unknown'}" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/pty.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/pty.rb new file mode 100644 index 0000000..8b6b885 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/pty.rb @@ -0,0 +1,75 @@ +require 'ffi' + +module PTY + private + module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :forkpty, [ :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :openpty, [ :buffer_out, :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :login_tty, [ :int ], :int + attach_function :close, [ :int ], :int + attach_function :strerror, [ :int ], :string + attach_function :fork, [], :int + attach_function :execv, [ :string, :buffer_in ], :int + attach_function :execvp, [ :string, :buffer_in ], :int + attach_function :dup2, [ :int, :int ], :int + attach_function :dup, [ :int ], :int + end + Buffer = FFI::Buffer + def self.build_args(args) + cmd = args.shift + cmd_args = args.map do |arg| + MemoryPointer.from_string(arg) + end + exec_args = MemoryPointer.new(:pointer, 1 + cmd_args.length + 1) + exec_cmd = MemoryPointer.from_string(cmd) + exec_args[0].put_pointer(0, exec_cmd) + cmd_args.each_with_index do |arg, i| + exec_args[i + 1].put_pointer(0, arg) + end + [ cmd, exec_args ] + end + public + def self.getpty(*args) + mfdp = Buffer.new :int + name = Buffer.new 1024 + # + # All the execv setup is done in the parent, since doing anything other than + # execv in the child after fork is really flakey + # + exec_cmd, exec_args = build_args(args) + pid = LibC.forkpty(mfdp, name, nil, nil) + raise "forkpty failed: #{LibC.strerror(FFI.errno)}" if pid < 0 + if pid == 0 + LibC.execvp(exec_cmd, exec_args) + exit 1 + end + masterfd = mfdp.get_int(0) + rfp = FFI::IO.for_fd(masterfd, "r") + wfp = FFI::IO.for_fd(LibC.dup(masterfd), "w") + if block_given? + yield rfp, wfp, pid + rfp.close unless rfp.closed? + wfp.close unless wfp.closed? + else + [ rfp, wfp, pid ] + end + end + def self.spawn(*args, &block) + self.getpty("/bin/sh", "-c", args[0], &block) + end +end +module LibC + extend FFI::Library + attach_function :close, [ :int ], :int + attach_function :write, [ :int, :buffer_in, :ulong ], :long + attach_function :read, [ :int, :buffer_out, :ulong ], :long +end +PTY.getpty("/bin/ls", "-alR", "/") { |rfd, wfd, pid| +#PTY.spawn("ls -laR /") { |rfd, wfd, pid| + puts "child pid=#{pid}" + while !rfd.eof? && (buf = rfd.gets) + puts "child: '#{buf.strip}'" + end +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/qsort.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/qsort.rb new file mode 100644 index 0000000..58622c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.13.1/samples/qsort.rb @@ -0,0 +1,20 @@ +require 'ffi' + +module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + callback :qsort_cmp, [ :pointer, :pointer ], :int + attach_function :qsort, [ :pointer, :ulong, :ulong, :qsort_cmp ], :int +end + +p = FFI::MemoryPointer.new(:int, 2) +p.put_array_of_int32(0, [ 2, 1 ]) +puts "ptr=#{p.inspect}" +puts "Before qsort #{p.get_array_of_int32(0, 2).join(', ')}" +LibC.qsort(p, 2, 4) do |p1, p2| + i1 = p1.get_int32(0) + i2 = p2.get_int32(0) + puts "In block: comparing #{i1} and #{i2}" + i1 < i2 ? -1 : i1 > i2 ? 1 : 0 +end +puts "After qsort #{p.get_array_of_int32(0, 2).join(', ')}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/CHANGELOG.md new file mode 100644 index 0000000..26b9ce7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/CHANGELOG.md @@ -0,0 +1,338 @@ +1.15.5 / 2022-01-10 +------------------- + +Fixed: +* Fix long double argument or return values on 32bit i686. #849 +* FFI::ConstGenerator: avoid usage of the same binary file simultaneously. #929 + +Added: +* Add Windows fat binary gem for Ruby-3.1 + +Removed: +* Remove Windows fat binary gem for Ruby < 2.4 + + +1.15.4 / 2021-09-01 +------------------- + +Fixed: +* Fix build for uClibc. #913 +* Correct module lookup when including `ffi-module` gem. #912 + +Changed: +* Use ruby code of the ffi gem in JRuby-9.2.20+. #915 + + +1.15.3 / 2021-06-16 +------------------- + +Fixed: +* Fix temporary packaging issue with libffi. #904 + + +1.15.2 / 2021-06-16 +------------------- + +Added: +* Add support for Windows MINGW-UCRT build. #903 +* Add `/opt/homebrew/lib/` to fallback search paths to improve homebrew support. #880 #882 + +Changed: +* Regenerate `types.conf` for FreeBSD12 aarch64. #902 + + +1.15.1 / 2021-05-22 +------------------- + +Fixed: +* Append -pthread to linker options. #893 +* Use arm or aarch64 to identify Apple ARM CPU arch. #899 +* Allow overriding `gcc` with the `CC` env var in `const_generator.rb` and `struct_generator.rb`. #897 + + +1.15.0 / 2021-03-05 +------------------- + +Fixed: +* Fix MSVC build +* Fix async callbacks in conjunction with fork(). #884 + +Added: +* Allow to pass callbacks in varargs. #885 +* Name the threads for FFI callback dispatcher and async thread calls for easier debugging. #883 + The name can be retrieved by Thread.name and is shown by Thread.list.inspect etc. + Even gdb shows the thread name on supported operating systems. +* Add types.conf for powerpc64le-linux +* Add types.conf for riscv64-linux +* More release automation of ffi gems + +Changed: +* Switch from rubygems-tasks to bundler/gem_helper + +Removed: +* Remove unused VariadicInvoker#init + + +1.14.2 / 2020-12-21 +------------------- + +Fixed: +* Fix builtin libffi on newer Ubuntu caused by an outdated Makefile.in . #863 + + +1.14.1 / 2020-12-19 +------------------- + +Changed: +* Revert changes to FFI::Pointer#write_string made in ffi-1.14.0. + It breaks compatibilty in a way that can cause hard to find errors. #857 + + +1.14.0 / 2020-12-18 +------------------- + +Added: +* Add types.conf for x86_64-msys, x86_64-haiku, aarch64-openbsd and aarch64-darwin (alias arm64-darwin) +* Add method AbstractMemory#size_limit? . #829 +* Add new extconf option --enable-libffi-alloc which is enabled per default on Apple M1 (arm64-darwin). + +Changed: +* Do NULL pointer check only when array length > 0 . #305 +* Raise an error on an unknown order argument. #830 +* Change FFI::Pointer#write_string to terminate with a NUL byte like other string methods. #805 +* Update bundled libffi to latest master. + +Removed: +* Remove win32/stdint.h and stdbool.h because of copyright issue. #693 + +Fixed: +* Fix possible UTF-8 load error in loader script interpretation. #792 +* Fix segfault on non-array argument to #write_array_of_* +* Fix memory leak in MethodHandle . #815 +* Fix possible segfault in combination with fiddle or other libffi using gems . #835 +* Fix possibility to use ffi ruby gem with JRuby-9.3 . #763 +* Fix a GC issue, when a callback Proc is used on more than 2 callback signatures. #820 + + +1.13.1 / 2020-06-09 +------------------- + +Changed: +* Revert use of `ucrtbase.dll` as default C library on Windows-MINGW. + `ucrtbase.dll` is still used on MSWIN target. #790 +* Test for `ffi_prep_closure_loc()` to make sure we can use this function. + This fixes incorrect use of system libffi on MacOS Mojave (10.14). #787 +* Update types.conf on x86_64-dragonflybsd + + +1.13.0 / 2020-06-01 +------------------- + +Added: +* Add TruffleRuby support. Almost all specs are running on TruffleRuby and succeed. #768 +* Add ruby source files to the java gem. This allows to ship the Ruby library code per platform java gem and add it as a default gem to JRuby. #763 +* Add FFI::Platform::LONG_DOUBLE_SIZE +* Add bounds checks for writing to an inline char[] . #756 +* Add long double as callback return value. #771 +* Update type definitions and add types from stdint.h and stddef.h on i386-windows, x86_64-windows, x86_64-darwin, x86_64-linux, arm-linux, powerpc-linux. #749 +* Add new type definitions for powerpc-openbsd and sparcv9-openbsd. #775, #778 + +Changed: +* Raise required ruby version to >= 2.3. +* Lots of cleanups and improvements in library, specs and benchmarks. +* Fix a lot of compiler warnings at the C-extension +* Fix several install issues on MacOS: + * Look for libffi in SDK paths, since recent versions of macOS removed it from `/usr/include` . #757 + * Fix error `ld: library not found for -lgcc_s.10.4` + * Don't built for i386 architecture as it is deprecated +* Several fixes for MSVC build on Windows. #779 +* Use `ucrtbase.dll` as default C library on Windows instead of old `msvcrt.dll`. #779 +* Update builtin libffi to fix a Powerpc issue with parameters of type long +* Allow unmodified sourcing of (the ruby code of) this gem in JRuby and TruffleRuby as a default gem. #747 +* Improve check to detect if a module has a #find_type method suitable for FFI. This fixes compatibility with stdlib `mkmf` . #776 + +Removed: +* Reject callback with `:string` return type at definition, because it didn't work so far and is not save to use. #751, #782 + + +1.12.2 / 2020-02-01 +------------------- + +* Fix possible segfault at FFI::Struct#[] and []= after GC.compact . #742 + + +1.12.1 / 2020-01-14 +------------------- + +Added: +* Add binary gem support for ruby-2.7 on Windows + + +1.12.0 / 2020-01-14 +------------------- + +Added: +* FFI::VERSION is defined as part of `require 'ffi'` now. + It is no longer necessary to `require 'ffi/version'` . + +Changed: +* Update libffi to latest master. + +Deprecated: +* Overwriting struct layouts is now warned and will be disallowed in ffi-2.0. #734, #735 + + +1.11.3 / 2019-11-25 +------------------- + +Removed: +* Remove support for tainted objects which cause deprecation warnings in ruby-2.7. #730 + + +1.11.2 / 2019-11-11 +------------------- + +Added: +* Add DragonFlyBSD as a platform. #724 + +Changed: +* Sort all types.conf files, so that files and changes are easier to compare. +* Regenerated type conf for freebsd12 and x86_64-linux targets. #722 +* Remove MACOSX_DEPLOYMENT_TARGET that was targeting very old version 10.4. #647 +* Fix library name mangling for non glibc Linux/UNIX. #727 +* Fix compiler warnings raised by ruby-2.7 +* Update libffi to latest master. + + +1.11.1 / 2019-05-20 +------------------- + +Changed: +* Raise required ruby version to >=2.0. #699, #700 +* Fix a possible linker error on ruby < 2.3 on Linux. + + +1.11.0 / 2019-05-17 +------------------- +This version was yanked on 2019-05-20 to fix an install issue on ruby-1.9.3. #700 + +Added: +* Add ability to disable or force use of system libffi. #669 + Use like `gem inst ffi -- --enable-system-libffi` . +* Add ability to call FFI callbacks from outside of FFI call frame. #584 +* Add proper documentation to FFI::Generator and ::Task +* Add gemspec metadata. #696, #698 + +Changed: +* Fix stdcall on Win32. #649, #669 +* Fix load paths for FFI::Generator::Task +* Fix FFI::Pointer#read_string(0) to return a binary String. #692 +* Fix benchmark suite so that it runs on ruby-2.x +* Move FFI::Platform::CPU from C to Ruby. #663 +* Move FFI::StructByReference to Ruby. #681 +* Move FFI::DataConverter to Ruby (#661) +* Various cleanups and improvements of specs and benchmarks + +Removed: +* Remove ruby-1.8 and 1.9 compatibility code. #683 +* Remove unused spec files. #684 + + +1.10.0 / 2019-01-06 +------------------- + +Added: +* Add /opt/local/lib/ to ffi's fallback library search path. #638 +* Add binary gem support for ruby-2.6 on Windows +* Add FreeBSD on AArch64 and ARM support. #644 +* Add FFI::LastError.winapi_error on Windows native or Cygwin. #633 + +Changed: +* Update to rake-compiler-dock-0.7.0 +* Use 64-bit inodes on FreeBSD >= 12. #644 +* Switch time_t and suseconds_t types to long on FreeBSD. #627 +* Make register_t long_long on 64-bit FreeBSD. #644 +* Fix Pointer#write_array_of_type #637 + +Removed: +* Drop binary gem support for ruby-2.0 and 2.1 on Windows + + +1.9.25 / 2018-06-03 +------------------- + +Changed: +* Revert closures via libffi. + This re-adds ClosurePool and fixes compat with SELinux enabled systems. #621 + + +1.9.24 / 2018-06-02 +------------------- + +Security Note: + +This update addresses vulnerability CVE-2018-1000201: DLL loading issue which can be hijacked on Windows OS, when a Symbol is used as DLL name instead of a String. Found by Matthew Bush. + +Added: +* Added a CHANGELOG file +* Add mips64(eb) support, and mips r6 support. (#601) + +Changed: +* Update libffi to latest changes on master. +* Don't search in hardcoded /usr paths on Windows. +* Don't treat Symbol args different to Strings in ffi_lib. +* Make sure size_t is defined in Thread.c. Fixes #609 + + +1.9.23 / 2018-02-25 +------------------- + +Changed: +* Fix unnecessary rebuild of configure in darwin multi arch. Fixes #605 + + +1.9.22 / 2018-02-22 +------------------- + +Changed: +* Update libffi to latest changes on master. +* Update detection of system libffi to match new requirements. Fixes #617 +* Prefer bundled libffi over system libffi on Mac OS. +* Do closures via libffi. This removes ClosurePool and fixes compat with PaX. #540 +* Use a more deterministic gem packaging. +* Fix unnecessary update of autoconf files at gem install. + + +1.9.21 / 2018-02-06 +------------------- + +Added: +* Ruby-2.5 support by Windows binary gems. Fixes #598 +* Add missing win64 types. +* Added support for Bitmask. (#573) +* Add support for MSYS2 (#572) and Sparc64 Linux. (#574) + +Changed: +* Fix read_string to not throw an error on length 0. +* Don't use absolute paths for sh and env. Fixes usage on Adroid #528 +* Use Ruby implementation for `which` for better compat with Windows. Fixes #315 +* Fix compatibility with PPC64LE platform. (#577) +* Normalize sparc64 to sparcv9. (#575) + +Removed: +* Drop Ruby 1.8.7 support (#480) + + +1.9.18 / 2017-03-03 +------------------- + +Added: +* Add compatibility with Ruby-2.4. + +Changed: +* Add missing shlwapi.h include to fix Windows build. +* Avoid undefined behaviour of LoadLibrary() on Windows. #553 + + +1.9.17 / 2017-01-13 +------------------- diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/COPYING b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/COPYING new file mode 100644 index 0000000..7622318 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/COPYING @@ -0,0 +1,49 @@ +Copyright (c) 2008-2013, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +libffi, used by this project, is licensed under the MIT license: + +libffi - Copyright (c) 1996-2011 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Gemfile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Gemfile new file mode 100644 index 0000000..ad819ef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Gemfile @@ -0,0 +1,14 @@ +source 'https://rubygems.org' + +group :development do + gem 'rake', '~> 13.0' + gem 'rake-compiler', '~> 1.0.3' + gem 'rake-compiler-dock', '~> 1.0' + gem 'rspec', '~> 3.0' + gem 'bundler', '>= 1.16', '< 3' +end + +group :doc do + gem 'kramdown' + gem 'yard', '~> 0.9' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE new file mode 100644 index 0000000..20185fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2008-2016, Ruby FFI project contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Ruby FFI project nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE.SPECS b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE.SPECS new file mode 100644 index 0000000..5c9ffce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/LICENSE.SPECS @@ -0,0 +1,22 @@ +Copyright (c) 2008-2012 Ruby-FFI contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/README.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/README.md new file mode 100644 index 0000000..5845f26 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/README.md @@ -0,0 +1,136 @@ +# Ruby-FFI https://github.com/ffi/ffi/wiki [![Build Status](https://travis-ci.com/ffi/ffi.svg?branch=master)](https://travis-ci.com/ffi/ffi) [![Build status Windows](https://ci.appveyor.com/api/projects/status/r8wxn1sd4s794gg1/branch/master?svg=true)](https://ci.appveyor.com/project/larskanis/ffi-aofqa/branch/master) + +## Description + +Ruby-FFI is a gem for programmatically loading dynamically-linked native +libraries, binding functions within them, and calling those functions +from Ruby code. Moreover, a Ruby-FFI extension works without changes +on CRuby (MRI), JRuby, Rubinius and TruffleRuby. [Discover why you should write your next extension +using Ruby-FFI](https://github.com/ffi/ffi/wiki/why-use-ffi). + +## Features + +* Intuitive DSL +* Supports all C native types +* C structs (also nested), enums and global variables +* Callbacks from C to Ruby +* Automatic garbage collection of native memory + +## Synopsis + +```ruby +require 'ffi' + +module MyLib + extend FFI::Library + ffi_lib 'c' + attach_function :puts, [ :string ], :int +end + +MyLib.puts 'Hello, World using libc!' +``` + +For less minimalistic and more examples you may look at: + +* the `samples/` folder +* the examples on the [wiki](https://github.com/ffi/ffi/wiki) +* the projects using FFI listed on the wiki: https://github.com/ffi/ffi/wiki/projects-using-ffi + +## Requirements + +When installing the gem on CRuby (MRI), you will need: +* A C compiler (e.g., Xcode on macOS, `gcc` or `clang` on everything else) +Optionally (speeds up installation): +* The `libffi` library and development headers - this is commonly in the `libffi-dev` or `libffi-devel` packages + +The ffi gem comes with a builtin libffi version, which is used, when the system libffi library is not available or too old. +Use of the system libffi can be enforced by: +``` +gem install ffi -- --enable-system-libffi # to install the gem manually +bundle config build.ffi --enable-system-libffi # for bundle install +``` +or prevented by `--disable-system-libffi`. + +On Linux systems running with [PaX](https://en.wikipedia.org/wiki/PaX) (Gentoo, Alpine, etc.), FFI may trigger `mprotect` errors. You may need to disable [mprotect](https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options#Restrict_mprotect.28.29) for ruby (`paxctl -m [/path/to/ruby]`) for the time being until a solution is found. + +On FreeBSD systems pkgconf must be installed for the gem to be able to compile using clang. Install either via packages `pkg install pkgconf` or from ports via `devel/pkgconf`. + +On JRuby and TruffleRuby, there are no requirements to install the FFI gem, and `require 'ffi'` works even without installing the gem (i.e., the gem is preinstalled on these implementations). + +## Installation + +From rubygems: + + [sudo] gem install ffi + +From a Gemfile using git or GitHub + + gem 'ffi', github: 'ffi/ffi', submodules: true + +or from the git repository on github: + + git clone git://github.com/ffi/ffi.git + cd ffi + git submodule update --init --recursive + bundle install + rake install + +### Install options: + +* `--enable-system-libffi` : Force usage of system libffi +* `--disable-system-libffi` : Force usage of builtin libffi +* `--enable-libffi-alloc` : Force closure allocation by libffi +* `--disable-libffi-alloc` : Force closure allocation by builtin method + +## License + +The ffi library is covered by the BSD license, also see the LICENSE file. +The specs are covered by the same license as [ruby/spec](https://github.com/ruby/spec), the MIT license. + +## Credits + +The following people have submitted code, bug reports, or otherwise contributed to the success of this project: + +* Alban Peignier +* Aman Gupta +* Andrea Fazzi +* Andreas Niederl +* Andrew Cholakian +* Antonio Terceiro +* Benoit Daloze +* Brian Candler +* Brian D. Burns +* Bryan Kearney +* Charlie Savage +* Chikanaga Tomoyuki +* Hongli Lai +* Ian MacLeod +* Jake Douglas +* Jean-Dominique Morani +* Jeremy Hinegardner +* Jesús García Sáez +* Joe Khoobyar +* Jurij Smakov +* KISHIMOTO, Makoto +* Kim Burgestrand +* Lars Kanis +* Luc Heinrich +* Luis Lavena +* Matijs van Zuijlen +* Matthew King +* Mike Dalessio +* NARUSE, Yui +* Park Heesob +* Shin Yee +* Stephen Bannasch +* Suraj N. Kurapati +* Sylvain Daubert +* Victor Costan +* beoran@gmail.com +* ctide +* emboss +* hobophobe +* meh +* postmodern +* wycats@gmail.com +* Wayne Meissner diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Rakefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Rakefile new file mode 100644 index 0000000..7175060 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/Rakefile @@ -0,0 +1,191 @@ +require 'rbconfig' +require 'date' +require 'fileutils' +require 'rbconfig' +require 'rspec/core/rake_task' +require 'rubygems/package_task' +require 'rake/extensiontask' +require_relative "lib/ffi/version" +require_relative "rakelib/ffi_gem_helper" + +BUILD_DIR = "build" +BUILD_EXT_DIR = File.join(BUILD_DIR, "#{RbConfig::CONFIG['arch']}", 'ffi_c', RUBY_VERSION) + +gem_spec = Bundler.load_gemspec('ffi.gemspec') + +RSpec::Core::RakeTask.new(:spec => :compile) do |config| + config.rspec_opts = YAML.load_file 'spec/spec.opts' +end + +desc "Build all packages" +task :package => %w[ gem:java gem:native ] + +CLOBBER.include 'lib/ffi/types.conf' +CLOBBER.include 'pkg' +CLOBBER.include 'log' + +CLEAN.include 'build' +CLEAN.include 'conftest.dSYM' +CLEAN.include 'spec/ffi/fixtures/libtest.{dylib,so,dll}' +CLEAN.include 'spec/ffi/fixtures/*.o' +CLEAN.include 'spec/ffi/embed-test/ext/*.{o,def}' +CLEAN.include 'spec/ffi/embed-test/ext/Makefile' +CLEAN.include "pkg/ffi-*-{mingw32,java}" +CLEAN.include 'lib/1.*' +CLEAN.include 'lib/2.*' + +# clean all shipped files, that are not in git +CLEAN.include( + gem_spec.files - + `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0").map { |f| File.join("ext/ffi_c/libffi", f) } - + `git ls-files -z`.split("\x0") +) + +task :distclean => :clobber + +desc "Test the extension" +task :test => [ :spec ] + + +namespace :bench do + ITER = ENV['ITER'] ? ENV['ITER'].to_i : 100000 + bench_files = Dir["bench/bench_*.rb"].sort.reject { |f| f == "bench/bench_helper.rb" } + bench_files.each do |bench| + task File.basename(bench, ".rb")[6..-1] => :compile do + sh %{#{Gem.ruby} #{bench} #{ITER}} + end + end + task :all => :compile do + bench_files.each do |bench| + sh %{#{Gem.ruby} #{bench}} + end + end +end + +task 'spec:run' => :compile +task 'spec:specdoc' => :compile + +task :default => :spec + +namespace 'java' do + + java_gem_spec = gem_spec.dup.tap do |s| + s.files.reject! { |f| File.fnmatch?("ext/*", f) } + s.extensions = [] + s.platform = 'java' + end + + Gem::PackageTask.new(java_gem_spec) do |pkg| + pkg.need_zip = true + pkg.need_tar = true + pkg.package_dir = 'pkg' + end +end + +task 'gem:java' => 'java:gem' + +FfiGemHelper.install_tasks +# Register windows gems to be pushed to rubygems.org +Bundler::GemHelper.instance.cross_platforms = %w[x86-mingw32 x64-mingw-ucrt x64-mingw32] + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'rake/extensiontask' + Rake::ExtensionTask.new('ffi_c', gem_spec) do |ext| + ext.name = 'ffi_c' # indicate the name of the extension. + # ext.lib_dir = BUILD_DIR # put binaries into this folder. + ext.tmp_dir = BUILD_DIR # temporary folder used during compilation. + ext.cross_compile = true # enable cross compilation (requires cross compile toolchain) + ext.cross_platform = Bundler::GemHelper.instance.cross_platforms + ext.cross_compiling do |spec| + spec.files.reject! { |path| File.fnmatch?('ext/*', path) } + end + + end +else + task :compile do + STDERR.puts "Nothing to compile on #{RUBY_ENGINE}" + end +end + + +namespace "gem" do + task 'prepare' do + require 'rake_compiler_dock' + sh "bundle package --all" + end + + Bundler::GemHelper.instance.cross_platforms.each do |plat| + desc "Build all native binary gems in parallel" + multitask 'native' => plat + + desc "Build the native gem for #{plat}" + task plat => ['prepare', 'build'] do + RakeCompilerDock.sh <<-EOT, platform: plat + sudo apt-get update && + sudo apt-get install -y libltdl-dev && bundle --local && + rake cross native gem MAKE='nice make -j`nproc`' RUBY_CC_VERSION=${RUBY_CC_VERSION/:2.2.2/} + EOT + end + end +end + +directory "ext/ffi_c/libffi" +file "ext/ffi_c/libffi/autogen.sh" => "ext/ffi_c/libffi" do + warn "Downloading libffi ..." + sh "git submodule update --init --recursive" +end +task :libffi => "ext/ffi_c/libffi/autogen.sh" + +LIBFFI_GIT_FILES = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + +# Generate files which are in the gemspec but not in libffi's git repo by running autogen.sh +gem_spec.files.select do |f| + f =~ /ext\/ffi_c\/libffi\/(.*)/ && !LIBFFI_GIT_FILES.include?($1) +end.each do |f| + file f => "ext/ffi_c/libffi/autogen.sh" do + chdir "ext/ffi_c/libffi" do + sh "sh ./autogen.sh" + end + touch f + if gem_spec.files != Gem::Specification.load('./ffi.gemspec').files + warn "gemspec files have changed -> Please restart rake!" + exit 1 + end + end +end + +# Make sure we have all gemspec files before packaging +task :build => gem_spec.files +task :gem => :build + + +require_relative "lib/ffi/platform" +types_conf = File.expand_path(File.join(FFI::Platform::CONF_DIR, 'types.conf')) +logfile = File.join(File.dirname(__FILE__), 'types_log') + +task types_conf do |task| + require 'fileutils' + require_relative "lib/ffi/tools/types_generator" + options = {} + FileUtils.mkdir_p(File.dirname(task.name), mode: 0755 ) + File.open(task.name, File::CREAT|File::TRUNC|File::RDWR, 0644) do |f| + f.puts FFI::TypesGenerator.generate(options) + end + File.open(logfile, 'w') do |log| + log.puts(types_conf) + end +end + +desc "Create or update type information for platform #{FFI::Platform::NAME}" +task :types_conf => types_conf + +begin + require 'yard' + + namespace :doc do + YARD::Rake::YardocTask.new do |yard| + end + end +rescue LoadError + warn "[warn] YARD unavailable" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/.sitearchdir.time b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/.sitearchdir.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.c new file mode 100644 index 0000000..1a7fcde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.c @@ -0,0 +1,1104 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Jake Douglas + * Copyright (C) 2008 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +#endif +#include +#include + +#include +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Function.h" +#include "LongDouble.h" + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +static inline char* memory_address(VALUE self); +VALUE rbffi_AbstractMemoryClass = Qnil; +static VALUE NullPointerErrorClass = Qnil; +static ID id_to_ptr = 0, id_plus = 0, id_call = 0; + +static VALUE +memory_allocate(VALUE klass) +{ + AbstractMemory* memory; + VALUE obj; + obj = Data_Make_Struct(klass, AbstractMemory, NULL, -1, memory); + memory->flags = MEM_RD | MEM_WR; + + return obj; +} +#define VAL(x, swap) (unlikely(((memory->flags & MEM_SWAP) != 0)) ? swap((x)) : (x)) + +#define NUM_OP(name, type, toNative, fromNative, swap) \ +static void memory_op_put_##name(AbstractMemory* memory, long off, VALUE value); \ +static void \ +memory_op_put_##name(AbstractMemory* memory, long off, VALUE value) \ +{ \ + type tmp = (type) VAL(toNative(value), swap); \ + checkWrite(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(memory->address + off, &tmp, sizeof(tmp)); \ +} \ +static VALUE memory_put_##name(VALUE self, VALUE offset, VALUE value); \ +static VALUE \ +memory_put_##name(VALUE self, VALUE offset, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, NUM2LONG(offset), value); \ + return self; \ +} \ +static VALUE memory_write_##name(VALUE self, VALUE value); \ +static VALUE \ +memory_write_##name(VALUE self, VALUE value) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + memory_op_put_##name(memory, 0, value); \ + return self; \ +} \ +static VALUE memory_op_get_##name(AbstractMemory* memory, long off); \ +static VALUE \ +memory_op_get_##name(AbstractMemory* memory, long off) \ +{ \ + type tmp; \ + checkRead(memory); \ + checkBounds(memory, off, sizeof(type)); \ + memcpy(&tmp, memory->address + off, sizeof(tmp)); \ + return fromNative(VAL(tmp, swap)); \ +} \ +static VALUE memory_get_##name(VALUE self, VALUE offset); \ +static VALUE \ +memory_get_##name(VALUE self, VALUE offset) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, NUM2LONG(offset)); \ +} \ +static VALUE memory_read_##name(VALUE self); \ +static VALUE \ +memory_read_##name(VALUE self) \ +{ \ + AbstractMemory* memory; \ + Data_Get_Struct(self, AbstractMemory, memory); \ + return memory_op_get_##name(memory, 0); \ +} \ +static MemoryOp memory_op_##name = { memory_op_get_##name, memory_op_put_##name }; \ +\ +static VALUE memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary); \ +static VALUE \ +memory_put_array_of_##name(VALUE self, VALUE offset, VALUE ary) \ +{ \ + long count; \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + long i; \ + Check_Type(ary, T_ARRAY); \ + count = RARRAY_LEN(ary); \ + if (likely(count > 0)) checkWrite(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; i++) { \ + type tmp = (type) VAL(toNative(RARRAY_PTR(ary)[i]), swap); \ + memcpy(memory->address + off + (i * sizeof(type)), &tmp, sizeof(tmp)); \ + } \ + return self; \ +} \ +static VALUE memory_write_array_of_##name(VALUE self, VALUE ary); \ +static VALUE \ +memory_write_array_of_##name(VALUE self, VALUE ary) \ +{ \ + return memory_put_array_of_##name(self, INT2FIX(0), ary); \ +} \ +static VALUE memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length); \ +static VALUE \ +memory_get_array_of_##name(VALUE self, VALUE offset, VALUE length) \ +{ \ + long count = NUM2LONG(length); \ + long off = NUM2LONG(offset); \ + AbstractMemory* memory = MEMORY(self); \ + VALUE retVal = rb_ary_new2(count); \ + long i; \ + if (likely(count > 0)) checkRead(memory); \ + checkBounds(memory, off, count * sizeof(type)); \ + for (i = 0; i < count; ++i) { \ + type tmp; \ + memcpy(&tmp, memory->address + off + (i * sizeof(type)), sizeof(tmp)); \ + rb_ary_push(retVal, fromNative(VAL(tmp, swap))); \ + } \ + return retVal; \ +} \ +static VALUE memory_read_array_of_##name(VALUE self, VALUE length); \ +static VALUE \ +memory_read_array_of_##name(VALUE self, VALUE length) \ +{ \ + return memory_get_array_of_##name(self, INT2FIX(0), length); \ +} + +#define NOSWAP(x) (x) +#define bswap16(x) (((x) >> 8) & 0xff) | (((x) << 8) & 0xff00); +static inline int16_t +SWAPS16(int16_t x) +{ + return bswap16(x); +} + +static inline uint16_t +SWAPU16(uint16_t x) +{ + return bswap16(x); +} + +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#define bswap32(x) \ + (((x << 24) & 0xff000000) | \ + ((x << 8) & 0x00ff0000) | \ + ((x >> 8) & 0x0000ff00) | \ + ((x >> 24) & 0x000000ff)) + +#define bswap64(x) \ + (((x << 56) & 0xff00000000000000ULL) | \ + ((x << 40) & 0x00ff000000000000ULL) | \ + ((x << 24) & 0x0000ff0000000000ULL) | \ + ((x << 8) & 0x000000ff00000000ULL) | \ + ((x >> 8) & 0x00000000ff000000ULL) | \ + ((x >> 24) & 0x0000000000ff0000ULL) | \ + ((x >> 40) & 0x000000000000ff00ULL) | \ + ((x >> 56) & 0x00000000000000ffULL)) + +static inline int32_t +SWAPS32(int32_t x) +{ + return bswap32(x); +} + +static inline uint32_t +SWAPU32(uint32_t x) +{ + return bswap32(x); +} + +static inline int64_t +SWAPS64(int64_t x) +{ + return bswap64(x); +} + +static inline uint64_t +SWAPU64(uint64_t x) +{ + return bswap64(x); +} + +#else +# define SWAPS32(x) ((int32_t) __builtin_bswap32(x)) +# define SWAPU32(x) ((uint32_t) __builtin_bswap32(x)) +# define SWAPS64(x) ((int64_t) __builtin_bswap64(x)) +# define SWAPU64(x) ((uint64_t) __builtin_bswap64(x)) +#endif + +#if LONG_MAX > INT_MAX +# define SWAPSLONG SWAPS64 +# define SWAPULONG SWAPU64 +#else +# define SWAPSLONG SWAPS32 +# define SWAPULONG SWAPU32 +#endif + +NUM_OP(int8, int8_t, NUM2INT, INT2NUM, NOSWAP); +NUM_OP(uint8, uint8_t, NUM2UINT, UINT2NUM, NOSWAP); +NUM_OP(int16, int16_t, NUM2INT, INT2NUM, SWAPS16); +NUM_OP(uint16, uint16_t, NUM2UINT, UINT2NUM, SWAPU16); +NUM_OP(int32, int32_t, NUM2INT, INT2NUM, SWAPS32); +NUM_OP(uint32, uint32_t, NUM2UINT, UINT2NUM, SWAPU32); +NUM_OP(int64, int64_t, NUM2LL, LL2NUM, SWAPS64); +NUM_OP(uint64, uint64_t, NUM2ULL, ULL2NUM, SWAPU64); +NUM_OP(long, long, NUM2LONG, LONG2NUM, SWAPSLONG); +NUM_OP(ulong, unsigned long, NUM2ULONG, ULONG2NUM, SWAPULONG); +NUM_OP(float32, float, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(float64, double, NUM2DBL, rb_float_new, NOSWAP); +NUM_OP(longdouble, long double, rbffi_num2longdouble, rbffi_longdouble_new, NOSWAP); + +static inline void* +get_pointer_value(VALUE value) +{ + const int type = TYPE(value); + if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_PointerClass)) { + return memory_address(value); + } else if (type == T_NIL) { + return NULL; + } else if (type == T_FIXNUM) { + return (void *) (uintptr_t) FIX2ULONG(value); + } else if (type == T_BIGNUM) { + return (void *) (uintptr_t) NUM2ULL(value); + } else if (rb_respond_to(value, id_to_ptr)) { + return MEMORY_PTR(rb_funcall2(value, id_to_ptr, 0, NULL)); + } else { + rb_raise(rb_eArgError, "value is not a pointer"); + return NULL; + } +} + +NUM_OP(pointer, void *, get_pointer_value, rbffi_Pointer_NewInstance, NOSWAP); + +static inline uint8_t +rbffi_bool_value(VALUE value) +{ + return RTEST(value); +} + +static inline VALUE +rbffi_bool_new(uint8_t value) +{ + return (value & 1) != 0 ? Qtrue : Qfalse; +} + +NUM_OP(bool, unsigned char, rbffi_bool_value, rbffi_bool_new, NOSWAP); + + +/* + * call-seq: memory.clear + * Set the memory to all-zero. + * @return [self] + */ +static VALUE +memory_clear(VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + memset(ptr->address, 0, ptr->size); + return self; +} + +/* + * call-seq: memory.size + * Return memory size in bytes (alias: #total) + * @return [Numeric] + */ +static VALUE +memory_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return LONG2NUM(ptr->size); +} + +/* + * call-seq: memory.get(type, offset) + * Return data of given type contained in memory. + * @param [Symbol, Type] type_name type of data to get + * @param [Numeric] offset point in buffer to start from + * @return [Object] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_get(VALUE self, VALUE type_name, VALUE offset) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + return op->get(ptr, NUM2LONG(offset)); + +undefined_type: { + VALUE msg = rb_sprintf("undefined type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.put(type, offset, value) + * @param [Symbol, Type] type_name type of data to put + * @param [Numeric] offset point in buffer to start from + * @return [nil] + * @raise {ArgumentError} if type is not supported + */ +static VALUE +memory_put(VALUE self, VALUE type_name, VALUE offset, VALUE value) +{ + AbstractMemory* ptr; + VALUE nType; + Type *type; + + nType = rbffi_Type_Lookup(type_name); + if(NIL_P(nType)) goto undefined_type; + + Data_Get_Struct(self, AbstractMemory, ptr); + Data_Get_Struct(nType, Type, type); + + MemoryOp *op = get_memory_op(type); + if(op == NULL) goto undefined_type; + + op->put(ptr, NUM2LONG(offset), value); + return Qnil; + +undefined_type: { + VALUE msg = rb_sprintf("unsupported type '%" PRIsVALUE "'", type_name); + rb_exc_raise(rb_exc_new3(rb_eArgError, msg)); + return Qnil; + } +} + +/* + * call-seq: memory.get_string(offset, length=nil) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_string(int argc, VALUE* argv, VALUE self) +{ + VALUE length = Qnil, offset = Qnil; + AbstractMemory* ptr = MEMORY(self); + long off, len; + char* end; + int nargs = rb_scan_args(argc, argv, "11", &offset, &length); + + off = NUM2LONG(offset); + len = nargs > 1 && length != Qnil ? NUM2LONG(length) : (ptr->size - off); + checkRead(ptr); + checkBounds(ptr, off, len); + + end = memchr(ptr->address + off, 0, len); + return rb_str_new((char *) ptr->address + off, + (end != NULL ? end - ptr->address - off : len)); +} + +/* + * call-seq: memory.get_array_of_string(offset, count=nil) + * Return an array of strings contained in memory. + * @param [Numeric] offset point in memory to start from + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE offset = Qnil, countnum = Qnil, retVal = Qnil; + AbstractMemory* ptr; + long off; + int count; + + rb_scan_args(argc, argv, "11", &offset, &countnum); + off = NUM2LONG(offset); + count = (countnum == Qnil ? 0 : NUM2INT(countnum)); + retVal = rb_ary_new2(count); + + Data_Get_Struct(self, AbstractMemory, ptr); + checkRead(ptr); + + if (countnum != Qnil) { + int i; + + checkBounds(ptr, off, count * sizeof (char*)); + + for (i = 0; i < count; ++i) { + const char* strptr = *((const char**) (ptr->address + off) + i); + rb_ary_push(retVal, (strptr == NULL ? Qnil : rb_str_new2(strptr))); + } + + } else { + checkBounds(ptr, off, sizeof (char*)); + for ( ; off < ptr->size - (long) sizeof (void *); off += (long) sizeof (void *)) { + const char* strptr = *(const char**) (ptr->address + off); + if (strptr == NULL) { + break; + } + rb_ary_push(retVal, rb_str_new2(strptr)); + } + } + + return retVal; +} + +/* + * call-seq: memory.read_array_of_string(count=nil) + * Return an array of strings contained in memory. Same as: + * memory.get_array_of_string(0, count) + * @param [Numeric] count number of strings to get. If nil, return all strings + * @return [Array] + */ +static VALUE +memory_read_array_of_string(int argc, VALUE* argv, VALUE self) +{ + VALUE* rargv = ALLOCA_N(VALUE, argc + 1); + int i; + + rargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + rargv[i + 1] = argv[i]; + } + + return memory_get_array_of_string(argc + 1, rargv, self); +} + + +/* + * call-seq: memory.put_string(offset, str) + * @param [Numeric] offset + * @param [String] str + * @return [self] + * @raise {SecurityError} when writing unsafe string to memory + * @raise {IndexError} if +offset+ is too great + * @raise {NullPointerError} if memory not initialized + * Put a string in memory. + */ +static VALUE +memory_put_string(VALUE self, VALUE offset, VALUE str) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + Check_Type(str, T_STRING); + off = NUM2LONG(offset); + len = RSTRING_LEN(str); + + checkWrite(ptr); + checkBounds(ptr, off, len + 1); + + memcpy(ptr->address + off, RSTRING_PTR(str), len); + *((char *) ptr->address + off + len) = '\0'; + + return self; +} + +/* + * call-seq: memory.get_bytes(offset, length) + * Return string contained in memory. + * @param [Numeric] offset point in buffer to start from + * @param [Numeric] length string's length in bytes. + * @return [String] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + */ +static VALUE +memory_get_bytes(VALUE self, VALUE offset, VALUE length) +{ + AbstractMemory* ptr = MEMORY(self); + long off, len; + + off = NUM2LONG(offset); + len = NUM2LONG(length); + + checkRead(ptr); + checkBounds(ptr, off, len); + + return rb_str_new((char *) ptr->address + off, len); +} + +/* + * call-seq: memory.put_bytes(offset, str, index=0, length=nil) + * Put a string in memory. + * @param [Numeric] offset point in buffer to start from + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * @raise {IndexError} if +length+ is too great + * @raise {NullPointerError} if memory not initialized + * @raise {RangeError} if +index+ is negative, or if index+length is greater than size of string + * @raise {SecurityError} when writing unsafe string to memory + */ +static VALUE +memory_put_bytes(int argc, VALUE* argv, VALUE self) +{ + AbstractMemory* ptr = MEMORY(self); + VALUE offset = Qnil, str = Qnil, rbIndex = Qnil, rbLength = Qnil; + long off, len, idx; + int nargs = rb_scan_args(argc, argv, "22", &offset, &str, &rbIndex, &rbLength); + + Check_Type(str, T_STRING); + + off = NUM2LONG(offset); + idx = nargs > 2 ? NUM2LONG(rbIndex) : 0; + if (idx < 0) { + rb_raise(rb_eRangeError, "index cannot be less than zero"); + return Qnil; + } + len = nargs > 3 ? NUM2LONG(rbLength) : (RSTRING_LEN(str) - idx); + if ((idx + len) > RSTRING_LEN(str)) { + rb_raise(rb_eRangeError, "index+length is greater than size of string"); + return Qnil; + } + + checkWrite(ptr); + checkBounds(ptr, off, len); + + memcpy(ptr->address + off, RSTRING_PTR(str) + idx, len); + + return self; +} + +/* + * call-seq: memory.read_bytes(length) + * @param [Numeric] length of string to return + * @return [String] + * equivalent to : + * memory.get_bytes(0, length) + */ +static VALUE +memory_read_bytes(VALUE self, VALUE length) +{ + return memory_get_bytes(self, INT2FIX(0), length); +} + +/* + * call-seq: memory.write_bytes(str, index=0, length=nil) + * @param [String] str string to put to memory + * @param [Numeric] index + * @param [Numeric] length string's length in bytes. If nil, a (memory size - offset) length string is returned). + * @return [self] + * equivalent to : + * memory.put_bytes(0, str, index, length) + */ +static VALUE +memory_write_bytes(int argc, VALUE* argv, VALUE self) +{ + VALUE* wargv = ALLOCA_N(VALUE, argc + 1); + int i; + + wargv[0] = INT2FIX(0); + for (i = 0; i < argc; i++) { + wargv[i + 1] = argv[i]; + } + + return memory_put_bytes(argc + 1, wargv, self); +} + +/* + * call-seq: memory.type_size + * @return [Numeric] type size in bytes + * Get the memory's type size. + */ +static VALUE +memory_type_size(VALUE self) +{ + AbstractMemory* ptr; + + Data_Get_Struct(self, AbstractMemory, ptr); + + return INT2NUM(ptr->typeSize); +} + +/* + * Document-method: [] + * call-seq: memory[idx] + * @param [Numeric] idx index to access in memory + * @return + * Memory read accessor. + */ +static VALUE +memory_aref(VALUE self, VALUE idx) +{ + AbstractMemory* ptr; + VALUE rbOffset = Qnil; + + Data_Get_Struct(self, AbstractMemory, ptr); + + rbOffset = ULONG2NUM(NUM2ULONG(idx) * ptr->typeSize); + + return rb_funcall2(self, id_plus, 1, &rbOffset); +} + +static inline char* +memory_address(VALUE obj) +{ + return ((AbstractMemory *) DATA_PTR(obj))->address; +} + +static VALUE +memory_copy_from(VALUE self, VALUE rbsrc, VALUE rblen) +{ + AbstractMemory* dst; + + Data_Get_Struct(self, AbstractMemory, dst); + + memcpy(dst->address, rbffi_AbstractMemory_Cast(rbsrc, rbffi_AbstractMemoryClass)->address, NUM2INT(rblen)); + + return self; +} + +AbstractMemory* +rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass) +{ + if (rb_obj_is_kind_of(obj, klass)) { + AbstractMemory* memory; + Data_Get_Struct(obj, AbstractMemory, memory); + return memory; + } + + rb_raise(rb_eArgError, "Invalid Memory object"); + return NULL; +} + +void +rbffi_AbstractMemory_Error(AbstractMemory *mem, int op) +{ + VALUE rbErrorClass = mem->address == NULL ? NullPointerErrorClass : rb_eRuntimeError; + if (op == MEM_RD) { + rb_raise(rbErrorClass, "invalid memory read at address=%p", mem->address); + } else if (op == MEM_WR) { + rb_raise(rbErrorClass, "invalid memory write at address=%p", mem->address); + } else { + rb_raise(rbErrorClass, "invalid memory access at address=%p", mem->address); + } +} + +static VALUE +memory_op_get_strptr(AbstractMemory* ptr, long offset) +{ + void* tmp = NULL; + + if (ptr != NULL && ptr->address != NULL) { + checkRead(ptr); + checkBounds(ptr, offset, sizeof(tmp)); + memcpy(&tmp, ptr->address + offset, sizeof(tmp)); + } + + return tmp != NULL ? rb_str_new2(tmp) : Qnil; +} + +static void +memory_op_put_strptr(AbstractMemory* ptr, long offset, VALUE value) +{ + rb_raise(rb_eArgError, "Cannot set :string fields"); +} + +static MemoryOp memory_op_strptr = { memory_op_get_strptr, memory_op_put_strptr }; + + +MemoryOps rbffi_AbstractMemoryOps = { + &memory_op_int8, /*.int8 */ + &memory_op_uint8, /* .uint8 */ + &memory_op_int16, /* .int16 */ + &memory_op_uint16, /* .uint16 */ + &memory_op_int32, /* .int32 */ + &memory_op_uint32, /* .uint32 */ + &memory_op_int64, /* .int64 */ + &memory_op_uint64, /* .uint64 */ + &memory_op_long, /* .slong */ + &memory_op_ulong, /* .uslong */ + &memory_op_float32, /* .float32 */ + &memory_op_float64, /* .float64 */ + &memory_op_longdouble, /* .longdouble */ + &memory_op_pointer, /* .pointer */ + &memory_op_strptr, /* .strptr */ + &memory_op_bool /* .boolOp */ +}; + +void +rbffi_AbstractMemory_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::AbstractMemory + * + * {AbstractMemory} is the base class for many memory management classes such as {Buffer}. + * + * This class has a lot of methods to work with integers : + * * put_intsize(offset, value) + * * get_intsize(offset) + * * put_uintsize(offset, value) + * * get_uintsize(offset) + * * writeuintsize(value) + * * read_intsize + * * write_uintsize(value) + * * read_uintsize + * * put_array_of_intsize(offset, ary) + * * get_array_of_intsize(offset, length) + * * put_array_of_uintsize(offset, ary) + * * get_array_of_uintsize(offset, length) + * * write_array_of_intsize(ary) + * * read_array_of_intsize(length) + * * write_array_of_uintsize(ary) + * * read_array_of_uintsize(length) + * where _size_ is 8, 16, 32 or 64. Same methods exist for long type. + * + * Aliases exist : _char_ for _int8_, _short_ for _int16_, _int_ for _int32_ and long_long for _int64_. + * + * Others methods are listed below. + */ + VALUE classMemory = rb_define_class_under(moduleFFI, "AbstractMemory", rb_cObject); + rbffi_AbstractMemoryClass = classMemory; + /* + * Document-variable: FFI::AbstractMemory + */ + rb_global_variable(&rbffi_AbstractMemoryClass); + rb_define_alloc_func(classMemory, memory_allocate); + + NullPointerErrorClass = rb_define_class_under(moduleFFI, "NullPointerError", rb_eRuntimeError); + /* Document-variable: NullPointerError */ + rb_global_variable(&NullPointerErrorClass); + + +#undef INT +#define INT(type) \ + rb_define_method(classMemory, "put_" #type, memory_put_##type, 2); \ + rb_define_method(classMemory, "get_" #type, memory_get_##type, 1); \ + rb_define_method(classMemory, "put_u" #type, memory_put_u##type, 2); \ + rb_define_method(classMemory, "get_u" #type, memory_get_u##type, 1); \ + rb_define_method(classMemory, "write_" #type, memory_write_##type, 1); \ + rb_define_method(classMemory, "read_" #type, memory_read_##type, 0); \ + rb_define_method(classMemory, "write_u" #type, memory_write_u##type, 1); \ + rb_define_method(classMemory, "read_u" #type, memory_read_u##type, 0); \ + rb_define_method(classMemory, "put_array_of_" #type, memory_put_array_of_##type, 2); \ + rb_define_method(classMemory, "get_array_of_" #type, memory_get_array_of_##type, 2); \ + rb_define_method(classMemory, "put_array_of_u" #type, memory_put_array_of_u##type, 2); \ + rb_define_method(classMemory, "get_array_of_u" #type, memory_get_array_of_u##type, 2); \ + rb_define_method(classMemory, "write_array_of_" #type, memory_write_array_of_##type, 1); \ + rb_define_method(classMemory, "read_array_of_" #type, memory_read_array_of_##type, 1); \ + rb_define_method(classMemory, "write_array_of_u" #type, memory_write_array_of_u##type, 1); \ + rb_define_method(classMemory, "read_array_of_u" #type, memory_read_array_of_u##type, 1); + + INT(int8); + INT(int16); + INT(int32); + INT(int64); + INT(long); + +#define ALIAS(name, old) \ + rb_define_alias(classMemory, "put_" #name, "put_" #old); \ + rb_define_alias(classMemory, "get_" #name, "get_" #old); \ + rb_define_alias(classMemory, "put_u" #name, "put_u" #old); \ + rb_define_alias(classMemory, "get_u" #name, "get_u" #old); \ + rb_define_alias(classMemory, "write_" #name, "write_" #old); \ + rb_define_alias(classMemory, "read_" #name, "read_" #old); \ + rb_define_alias(classMemory, "write_u" #name, "write_u" #old); \ + rb_define_alias(classMemory, "read_u" #name, "read_u" #old); \ + rb_define_alias(classMemory, "put_array_of_" #name, "put_array_of_" #old); \ + rb_define_alias(classMemory, "get_array_of_" #name, "get_array_of_" #old); \ + rb_define_alias(classMemory, "put_array_of_u" #name, "put_array_of_u" #old); \ + rb_define_alias(classMemory, "get_array_of_u" #name, "get_array_of_u" #old); \ + rb_define_alias(classMemory, "write_array_of_" #name, "write_array_of_" #old); \ + rb_define_alias(classMemory, "read_array_of_" #name, "read_array_of_" #old); \ + rb_define_alias(classMemory, "write_array_of_u" #name, "write_array_of_u" #old); \ + rb_define_alias(classMemory, "read_array_of_u" #name, "read_array_of_u" #old); + + ALIAS(char, int8); + ALIAS(short, int16); + ALIAS(int, int32); + ALIAS(long_long, int64); + + /* + * Document-method: put_float32 + * call-seq: memory.put_float32offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 32-bit float in memory at offset +offset+ (alias: #put_float). + */ + rb_define_method(classMemory, "put_float32", memory_put_float32, 2); + /* + * Document-method: get_float32 + * call-seq: memory.get_float32(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 32-bit float from memory at offset +offset+ (alias: #get_float). + */ + rb_define_method(classMemory, "get_float32", memory_get_float32, 1); + rb_define_alias(classMemory, "put_float", "put_float32"); + rb_define_alias(classMemory, "get_float", "get_float32"); + /* + * Document-method: write_float + * call-seq: memory.write_float(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 32-bit float in memory. + * + * Same as: + * memory.put_float(0, value) + */ + rb_define_method(classMemory, "write_float", memory_write_float32, 1); + /* + * Document-method: read_float + * call-seq: memory.read_float + * @return [Float] + * Read a 32-bit float from memory. + * + * Same as: + * memory.get_float(0) + */ + rb_define_method(classMemory, "read_float", memory_read_float32, 0); + /* + * Document-method: put_array_of_float32 + * call-seq: memory.put_array_of_float32(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 32-bit floats in memory from offset +offset+ (alias: #put_array_of_float). + */ + rb_define_method(classMemory, "put_array_of_float32", memory_put_array_of_float32, 2); + /* + * Document-method: get_array_of_float32 + * call-seq: memory.get_array_of_float32(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 32-bit floats in memory from offset +offset+ (alias: #get_array_of_float). + */ + rb_define_method(classMemory, "get_array_of_float32", memory_get_array_of_float32, 2); + /* + * Document-method: write_array_of_float + * call-seq: memory.write_array_of_float(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 32-bit floats in memory. + * + * Same as: + * memory.put_array_of_float(0, ary) + */ + rb_define_method(classMemory, "write_array_of_float", memory_write_array_of_float32, 1); + /* + * Document-method: read_array_of_float + * call-seq: memory.read_array_of_float(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 32-bit floats from memory. + * + * Same as: + * memory.get_array_of_float(0, ary) + */ + rb_define_method(classMemory, "read_array_of_float", memory_read_array_of_float32, 1); + rb_define_alias(classMemory, "put_array_of_float", "put_array_of_float32"); + rb_define_alias(classMemory, "get_array_of_float", "get_array_of_float32"); + /* + * Document-method: put_float64 + * call-seq: memory.put_float64(offset, value) + * @param [Numeric] offset + * @param [Numeric] value + * @return [self] + * Put +value+ as a 64-bit float (double) in memory at offset +offset+ (alias: #put_double). + */ + rb_define_method(classMemory, "put_float64", memory_put_float64, 2); + /* + * Document-method: get_float64 + * call-seq: memory.get_float64(offset) + * @param [Numeric] offset + * @return [Float] + * Get a 64-bit float (double) from memory at offset +offset+ (alias: #get_double). + */ + rb_define_method(classMemory, "get_float64", memory_get_float64, 1); + rb_define_alias(classMemory, "put_double", "put_float64"); + rb_define_alias(classMemory, "get_double", "get_float64"); + /* + * Document-method: write_double + * call-seq: memory.write_double(value) + * @param [Numeric] value + * @return [self] + * Write +value+ as a 64-bit float (double) in memory. + * + * Same as: + * memory.put_double(0, value) + */ + rb_define_method(classMemory, "write_double", memory_write_float64, 1); + /* + * Document-method: read_double + * call-seq: memory.read_double + * @return [Float] + * Read a 64-bit float (double) from memory. + * + * Same as: + * memory.get_double(0) + */ + rb_define_method(classMemory, "read_double", memory_read_float64, 0); + /* + * Document-method: put_array_of_float64 + * call-seq: memory.put_array_of_float64(offset, ary) + * @param [Numeric] offset + * @param [Array] ary + * @return [self] + * Put values from +ary+ as 64-bit floats (doubles) in memory from offset +offset+ (alias: #put_array_of_double). + */ + rb_define_method(classMemory, "put_array_of_float64", memory_put_array_of_float64, 2); + /* + * Document-method: get_array_of_float64 + * call-seq: memory.get_array_of_float64(offset, length) + * @param [Numeric] offset + * @param [Numeric] length number of Float to get + * @return [Array] + * Get 64-bit floats (doubles) in memory from offset +offset+ (alias: #get_array_of_double). + */ + rb_define_method(classMemory, "get_array_of_float64", memory_get_array_of_float64, 2); + /* + * Document-method: write_array_of_double + * call-seq: memory.write_array_of_double(ary) + * @param [Array] ary + * @return [self] + * Write values from +ary+ as 64-bit floats (doubles) in memory. + * + * Same as: + * memory.put_array_of_double(0, ary) + */ + rb_define_method(classMemory, "write_array_of_double", memory_write_array_of_float64, 1); + /* + * Document-method: read_array_of_double + * call-seq: memory.read_array_of_double(length) + * @param [Numeric] length number of Float to read + * @return [Array] + * Read 64-bit floats (doubles) from memory. + * + * Same as: + * memory.get_array_of_double(0, ary) + */ + rb_define_method(classMemory, "read_array_of_double", memory_read_array_of_float64, 1); + rb_define_alias(classMemory, "put_array_of_double", "put_array_of_float64"); + rb_define_alias(classMemory, "get_array_of_double", "get_array_of_float64"); + /* + * Document-method: put_pointer + * call-seq: memory.put_pointer(offset, value) + * @param [Numeric] offset + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Put +value+ in memory from +offset+.. + */ + rb_define_method(classMemory, "put_pointer", memory_put_pointer, 2); + /* + * Document-method: get_pointer + * call-seq: memory.get_pointer(offset) + * @param [Numeric] offset + * @return [Pointer] + * Get a {Pointer} to the memory from +offset+. + */ + rb_define_method(classMemory, "get_pointer", memory_get_pointer, 1); + /* + * Document-method: write_pointer + * call-seq: memory.write_pointer(value) + * @param [nil,Pointer, Integer, #to_ptr] value + * @return [self] + * Write +value+ in memory. + * + * Equivalent to: + * memory.put_pointer(0, value) + */ + rb_define_method(classMemory, "write_pointer", memory_write_pointer, 1); + /* + * Document-method: read_pointer + * call-seq: memory.read_pointer + * @return [Pointer] + * Get a {Pointer} to the memory from base address. + * + * Equivalent to: + * memory.get_pointer(0) + */ + rb_define_method(classMemory, "read_pointer", memory_read_pointer, 0); + /* + * Document-method: put_array_of_pointer + * call-seq: memory.put_array_of_pointer(offset, ary) + * @param [Numeric] offset + * @param [Array<#to_ptr>] ary + * @return [self] + * Put an array of {Pointer} into memory from +offset+. + */ + rb_define_method(classMemory, "put_array_of_pointer", memory_put_array_of_pointer, 2); + /* + * Document-method: get_array_of_pointer + * call-seq: memory.get_array_of_pointer(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Array] + * Get an array of {Pointer} of length +length+ from +offset+. + */ + rb_define_method(classMemory, "get_array_of_pointer", memory_get_array_of_pointer, 2); + /* + * Document-method: write_array_of_pointer + * call-seq: memory.write_array_of_pointer(ary) + * @param [Array<#to_ptr>] ary + * @return [self] + * Write an array of {Pointer} into memory from +offset+. + * + * Same as : + * memory.put_array_of_pointer(0, ary) + */ + rb_define_method(classMemory, "write_array_of_pointer", memory_write_array_of_pointer, 1); + /* + * Document-method: read_array_of_pointer + * call-seq: memory.read_array_of_pointer(length) + * @param [Numeric] length + * @return [Array] + * Read an array of {Pointer} of length +length+. + * + * Same as: + * memory.get_array_of_pointer(0, length) + */ + rb_define_method(classMemory, "read_array_of_pointer", memory_read_array_of_pointer, 1); + + rb_define_method(classMemory, "get_string", memory_get_string, -1); + rb_define_method(classMemory, "put_string", memory_put_string, 2); + rb_define_method(classMemory, "get_bytes", memory_get_bytes, 2); + rb_define_method(classMemory, "put_bytes", memory_put_bytes, -1); + rb_define_method(classMemory, "read_bytes", memory_read_bytes, 1); + rb_define_method(classMemory, "write_bytes", memory_write_bytes, -1); + rb_define_method(classMemory, "get_array_of_string", memory_get_array_of_string, -1); + + rb_define_method(classMemory, "get", memory_get, 2); + rb_define_method(classMemory, "put", memory_put, 3); + + rb_define_method(classMemory, "clear", memory_clear, 0); + rb_define_method(classMemory, "total", memory_size, 0); + rb_define_alias(classMemory, "size", "total"); + rb_define_method(classMemory, "type_size", memory_type_size, 0); + rb_define_method(classMemory, "[]", memory_aref, 1); + rb_define_method(classMemory, "__copy_from__", memory_copy_from, 2); + + id_to_ptr = rb_intern("to_ptr"); + id_call = rb_intern("call"); + id_plus = rb_intern("+"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.h new file mode 100644 index 0000000..1119288 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ABSTRACTMEMORY_H +#define RBFFI_ABSTRACTMEMORY_H + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _MSC_VER +#include +#endif + +#include "compat.h" +#include "Types.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#define MEM_RD 0x01 +#define MEM_WR 0x02 +#define MEM_CODE 0x04 +#define MEM_SWAP 0x08 +#define MEM_EMBED 0x10 + +typedef struct AbstractMemory_ AbstractMemory; + +typedef struct { + VALUE (*get)(AbstractMemory* ptr, long offset); + void (*put)(AbstractMemory* ptr, long offset, VALUE value); +} MemoryOp; + +typedef struct { + MemoryOp* int8; + MemoryOp* uint8; + MemoryOp* int16; + MemoryOp* uint16; + MemoryOp* int32; + MemoryOp* uint32; + MemoryOp* int64; + MemoryOp* uint64; + MemoryOp* slong; + MemoryOp* uslong; + MemoryOp* float32; + MemoryOp* float64; + MemoryOp* longdouble; + MemoryOp* pointer; + MemoryOp* strptr; + MemoryOp* boolOp; +} MemoryOps; + +struct AbstractMemory_ { + char* address; /* Use char* instead of void* to ensure adding to it works correctly */ + long size; + int flags; + int typeSize; +}; + + +extern VALUE rbffi_AbstractMemoryClass; +extern MemoryOps rbffi_AbstractMemoryOps; + +extern void rbffi_AbstractMemory_Init(VALUE ffiModule); + +extern AbstractMemory* rbffi_AbstractMemory_Cast(VALUE obj, VALUE klass); + +extern void rbffi_AbstractMemory_Error(AbstractMemory *, int op); + +static inline void +checkBounds(AbstractMemory* mem, long off, long len) +{ + if (unlikely((off | len | (off + len) | (mem->size - (off + len))) < 0)) { + rb_raise(rb_eIndexError, "Memory access offset=%ld size=%ld is out of bounds", + off, len); + } +} + +static inline void +checkRead(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_RD) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_RD); + } +} + +static inline void +checkWrite(AbstractMemory* mem) +{ + if (unlikely((mem->flags & MEM_WR) == 0)) { + rbffi_AbstractMemory_Error(mem, MEM_WR); + } +} + +static inline MemoryOp* +get_memory_op(Type* type) +{ + switch (type->nativeType) { + case NATIVE_INT8: + return rbffi_AbstractMemoryOps.int8; + case NATIVE_UINT8: + return rbffi_AbstractMemoryOps.uint8; + case NATIVE_INT16: + return rbffi_AbstractMemoryOps.int16; + case NATIVE_UINT16: + return rbffi_AbstractMemoryOps.uint16; + case NATIVE_INT32: + return rbffi_AbstractMemoryOps.int32; + case NATIVE_UINT32: + return rbffi_AbstractMemoryOps.uint32; + case NATIVE_INT64: + return rbffi_AbstractMemoryOps.int64; + case NATIVE_UINT64: + return rbffi_AbstractMemoryOps.uint64; + case NATIVE_LONG: + return rbffi_AbstractMemoryOps.slong; + case NATIVE_ULONG: + return rbffi_AbstractMemoryOps.uslong; + case NATIVE_FLOAT32: + return rbffi_AbstractMemoryOps.float32; + case NATIVE_FLOAT64: + return rbffi_AbstractMemoryOps.float64; + case NATIVE_LONGDOUBLE: + return rbffi_AbstractMemoryOps.longdouble; + case NATIVE_POINTER: + return rbffi_AbstractMemoryOps.pointer; + case NATIVE_STRING: + return rbffi_AbstractMemoryOps.strptr; + case NATIVE_BOOL: + return rbffi_AbstractMemoryOps.boolOp; + default: + return NULL; + } +} + +#define MEMORY(obj) rbffi_AbstractMemory_Cast((obj), rbffi_AbstractMemoryClass) +#define MEMORY_PTR(obj) MEMORY((obj))->address +#define MEMORY_LEN(obj) MEMORY((obj))->size + + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ABSTRACTMEMORY_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.o new file mode 100644 index 0000000..8ef8d8e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/AbstractMemory.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.c new file mode 100644 index 0000000..bfd666a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "ArrayType.h" + +static VALUE array_type_s_allocate(VALUE klass); +static VALUE array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength); +static void array_type_mark(ArrayType *); +static void array_type_free(ArrayType *); + +VALUE rbffi_ArrayTypeClass = Qnil; + +static VALUE +array_type_s_allocate(VALUE klass) +{ + ArrayType* array; + VALUE obj; + + obj = Data_Make_Struct(klass, ArrayType, array_type_mark, array_type_free, array); + + array->base.nativeType = NATIVE_ARRAY; + array->base.ffiType = xcalloc(1, sizeof(*array->base.ffiType)); + array->base.ffiType->type = FFI_TYPE_STRUCT; + array->base.ffiType->size = 0; + array->base.ffiType->alignment = 0; + array->rbComponentType = Qnil; + + return obj; +} + +static void +array_type_mark(ArrayType *array) +{ + rb_gc_mark(array->rbComponentType); +} + +static void +array_type_free(ArrayType *array) +{ + xfree(array->base.ffiType); + xfree(array->ffiTypes); + xfree(array); +} + + +/* + * call-seq: initialize(component_type, length) + * @param [Type] component_type + * @param [Numeric] length + * @return [self] + * A new instance of ArrayType. + */ +static VALUE +array_type_initialize(VALUE self, VALUE rbComponentType, VALUE rbLength) +{ + ArrayType* array; + int i; + + Data_Get_Struct(self, ArrayType, array); + + array->length = NUM2UINT(rbLength); + array->rbComponentType = rbComponentType; + Data_Get_Struct(rbComponentType, Type, array->componentType); + + array->ffiTypes = xcalloc(array->length + 1, sizeof(*array->ffiTypes)); + array->base.ffiType->elements = array->ffiTypes; + array->base.ffiType->size = array->componentType->ffiType->size * array->length; + array->base.ffiType->alignment = array->componentType->ffiType->alignment; + + for (i = 0; i < array->length; ++i) { + array->ffiTypes[i] = array->componentType->ffiType; + } + + return self; +} + +/* + * call-seq: length + * @return [Numeric] + * Get array's length + */ +static VALUE +array_type_length(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return UINT2NUM(array->length); +} + +/* + * call-seq: element_type + * @return [Type] + * Get element type. + */ +static VALUE +array_type_element_type(VALUE self) +{ + ArrayType* array; + + Data_Get_Struct(self, ArrayType, array); + + return array->rbComponentType; +} + +void +rbffi_ArrayType_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::ArrayType < FFI::Type + * + * This is a typed array. The type is a {NativeType native type}. + */ + rbffi_ArrayTypeClass = rb_define_class_under(moduleFFI, "ArrayType", ffi_Type); + /* + * Document-variable: FFI::ArrayType + */ + rb_global_variable(&rbffi_ArrayTypeClass); + /* + * Document-constant: FFI::Type::Array + */ + rb_define_const(ffi_Type, "Array", rbffi_ArrayTypeClass); + + rb_define_alloc_func(rbffi_ArrayTypeClass, array_type_s_allocate); + rb_define_method(rbffi_ArrayTypeClass, "initialize", array_type_initialize, 2); + rb_define_method(rbffi_ArrayTypeClass, "length", array_type_length, 0); + rb_define_method(rbffi_ArrayTypeClass, "elem_type", array_type_element_type, 0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.h new file mode 100644 index 0000000..356ffb1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_ARRAYTYPE_H +#define RBFFI_ARRAYTYPE_H + +#include +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct ArrayType_ { + Type base; + int length; + ffi_type** ffiTypes; + Type* componentType; + VALUE rbComponentType; +} ArrayType; + +extern void rbffi_ArrayType_Init(VALUE moduleFFI); +extern VALUE rbffi_ArrayTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_ARRAYTYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.o new file mode 100644 index 0000000..19c863a Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ArrayType.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.c new file mode 100644 index 0000000..b5f39a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" + +#define BUFFER_EMBED_MAXLEN (8) +typedef struct Buffer { + AbstractMemory memory; + + union { + VALUE rbParent; /* link to parent buffer */ + char* storage; /* start of malloc area */ + long embed[BUFFER_EMBED_MAXLEN / sizeof(long)]; /* storage for tiny allocations */ + } data; +} Buffer; + +static VALUE buffer_allocate(VALUE klass); +static VALUE buffer_initialize(int argc, VALUE* argv, VALUE self); +static void buffer_release(Buffer* ptr); +static void buffer_mark(Buffer* ptr); +static VALUE buffer_free(VALUE self); + +static VALUE BufferClass = Qnil; + +static VALUE +buffer_allocate(VALUE klass) +{ + Buffer* buffer; + VALUE obj; + + obj = Data_Make_Struct(klass, Buffer, NULL, buffer_release, buffer); + buffer->data.rbParent = Qnil; + buffer->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +static void +buffer_release(Buffer* ptr) +{ + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + xfree(ptr); +} + +/* + * call-seq: initialize(size, count=1, clear=false) + * @param [Integer, Symbol, #size] Type or size in bytes of a buffer cell + * @param [Fixnum] count number of cell in the Buffer + * @param [Boolean] clear if true, set the buffer to all-zero + * @return [self] + * @raise {NoMemoryError} if failed to allocate memory for Buffer + * A new instance of Buffer. + */ +static VALUE +buffer_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbSize = Qnil, rbCount = Qnil, rbClear = Qnil; + Buffer* p; + int nargs; + + Data_Get_Struct(self, Buffer, p); + + nargs = rb_scan_args(argc, argv, "12", &rbSize, &rbCount, &rbClear); + p->memory.typeSize = rbffi_type_size(rbSize); + p->memory.size = p->memory.typeSize * (nargs > 1 ? NUM2LONG(rbCount) : 1); + + if (p->memory.size > BUFFER_EMBED_MAXLEN) { + p->data.storage = xmalloc(p->memory.size + 7); + if (p->data.storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%lu bytes", p->memory.size); + return Qnil; + } + + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (void *) (((uintptr_t) p->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + + if (p->memory.size > 0 && (nargs < 3 || RTEST(rbClear))) { + memset(p->memory.address, 0, p->memory.size); + } + + } else { + p->memory.flags |= MEM_EMBED; + p->memory.address = (void *) &p->data.embed[0]; + } + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, buffer_free, self); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [self] + * DO NOT CALL THIS METHOD. + */ +static VALUE +buffer_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Buffer* dst; + + Data_Get_Struct(self, Buffer, dst); + src = rbffi_AbstractMemory_Cast(other, BufferClass); + if ((dst->memory.flags & MEM_EMBED) == 0 && dst->data.storage != NULL) { + xfree(dst->data.storage); + } + dst->data.storage = xmalloc(src->size + 7); + if (dst->data.storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->memory.address = (void *) (((uintptr_t) dst->data.storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual buffer contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +buffer_alloc_inout(int argc, VALUE* argv, VALUE klass) +{ + return buffer_initialize(argc, argv, buffer_allocate(klass)); +} + +static VALUE +slice(VALUE self, long offset, long len) +{ + Buffer* ptr; + Buffer* result; + VALUE obj = Qnil; + + Data_Get_Struct(self, Buffer, ptr); + checkBounds(&ptr->memory, offset, len); + + obj = Data_Make_Struct(BufferClass, Buffer, buffer_mark, -1, result); + result->memory.address = ptr->memory.address + offset; + result->memory.size = len; + result->memory.flags = ptr->memory.flags; + result->memory.typeSize = ptr->memory.typeSize; + result->data.rbParent = self; + + return obj; +} + +/* + * call-seq: + offset + * @param [Numeric] offset + * @return [Buffer] a new instance of Buffer pointing from offset until end of previous buffer. + * Add a Buffer with an offset + */ +static VALUE +buffer_plus(VALUE self, VALUE rbOffset) +{ + Buffer* ptr; + long offset = NUM2LONG(rbOffset); + + Data_Get_Struct(self, Buffer, ptr); + + return slice(self, offset, ptr->memory.size - offset); +} + +/* + * call-seq: slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Buffer] a new instance of Buffer + * Slice an existing Buffer. + */ +static VALUE +buffer_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect a Buffer. + */ +static VALUE +buffer_inspect(VALUE self) +{ + char tmp[100]; + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + + snprintf(tmp, sizeof(tmp), "#", ptr, ptr->memory.address, ptr->memory.size); + + return rb_str_new2(tmp); +} + + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Set or get endianness of Buffer. + * @overload order + * @return [:big, :little] + * Get endianness of Buffer. + * @overload order(order) + * @param [:big, :little, :network] order + * @return [self] + * Set endianness of Buffer (+:network+ is an alias for +:big+). + */ +static VALUE +buffer_order(int argc, VALUE* argv, VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } + } + if (order != BYTE_ORDER) { + Buffer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Buffer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + +/* Only used to free the buffer if the yield in the initializer throws an exception */ +static VALUE +buffer_free(VALUE self) +{ + Buffer* ptr; + + Data_Get_Struct(self, Buffer, ptr); + if ((ptr->memory.flags & MEM_EMBED) == 0 && ptr->data.storage != NULL) { + xfree(ptr->data.storage); + ptr->data.storage = NULL; + } + + return self; +} + +static void +buffer_mark(Buffer* ptr) +{ + rb_gc_mark(ptr->data.rbParent); +} + +void +rbffi_Buffer_Init(VALUE moduleFFI) +{ + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Buffer < FFI::AbstractMemory + * + * A Buffer is a function argument type. It should be use with functions playing with C arrays. + */ + BufferClass = rb_define_class_under(moduleFFI, "Buffer", ffi_AbstractMemory); + + /* + * Document-variable: FFI::Buffer + */ + rb_global_variable(&BufferClass); + rb_define_alloc_func(BufferClass, buffer_allocate); + + /* + * Document-method: alloc_inout + * call-seq: alloc_inout(*args) + * Create a new Buffer for in and out arguments (alias : new_inout). + */ + rb_define_singleton_method(BufferClass, "alloc_inout", buffer_alloc_inout, -1); + /* + * Document-method: alloc_out + * call-seq: alloc_out(*args) + * Create a new Buffer for out arguments (alias : new_out). + */ + rb_define_singleton_method(BufferClass, "alloc_out", buffer_alloc_inout, -1); + /* + * Document-method: alloc_in + * call-seq: alloc_in(*args) + * Create a new Buffer for in arguments (alias : new_in). + */ + rb_define_singleton_method(BufferClass, "alloc_in", buffer_alloc_inout, -1); + rb_define_alias(rb_singleton_class(BufferClass), "new_in", "alloc_in"); + rb_define_alias(rb_singleton_class(BufferClass), "new_out", "alloc_out"); + rb_define_alias(rb_singleton_class(BufferClass), "new_inout", "alloc_inout"); + + rb_define_method(BufferClass, "initialize", buffer_initialize, -1); + rb_define_method(BufferClass, "initialize_copy", buffer_initialize_copy, 1); + rb_define_method(BufferClass, "order", buffer_order, -1); + rb_define_method(BufferClass, "inspect", buffer_inspect, 0); + rb_define_alias(BufferClass, "length", "total"); + rb_define_method(BufferClass, "+", buffer_plus, 1); + rb_define_method(BufferClass, "slice", buffer_slice, 2); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.o new file mode 100644 index 0000000..c91963b Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Buffer.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.c new file mode 100644 index 0000000..bd6c277 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.c @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +# include +# include +#endif +#include +#include "extconf.h" +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Function.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" + +#ifdef USE_RAW +# ifndef __i386__ +# error "RAW argument packing only supported on i386" +# endif + +#define INT8_ADJ (4) +#define INT16_ADJ (4) +#define INT32_ADJ (4) +#define INT64_ADJ (8) +#define LONG_ADJ (sizeof(long)) +#define FLOAT32_ADJ (4) +#define FLOAT64_ADJ (8) +#define ADDRESS_ADJ (sizeof(void *)) +#define LONGDOUBLE_ADJ (ffi_type_longdouble.alignment > sizeof(long double) ? ffi_type_longdouble.alignment : sizeof(long double)) + +#endif /* USE_RAW */ + +#ifdef USE_RAW +# define ADJ(p, a) ((p) = (FFIStorage*) (((char *) p) + a##_ADJ)) +#else +# define ADJ(p, a) (++(p)) +#endif + +static void* callback_param(VALUE proc, VALUE cbinfo); +static inline void* getPointer(VALUE value, int type); + +static ID id_to_ptr, id_map_symbol, id_to_native; + +void +rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums) +{ + VALUE callbackProc = Qnil; + FFIStorage* param = ¶mStorage[0]; + int i, argidx, cbidx, argCount; + + if (unlikely(paramCount != -1 && paramCount != argc)) { + if (argc == (paramCount - 1) && callbackCount == 1 && rb_block_given_p()) { + callbackProc = rb_block_proc(); + } else { + rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)", argc, paramCount); + } + } + + argCount = paramCount != -1 ? paramCount : argc; + + for (i = 0, argidx = 0, cbidx = 0; i < argCount; ++i) { + Type* paramType = paramTypes[i]; + int type; + + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { argv[argidx], Qnil }; + argv[argidx] = rb_funcall2(((MappedType *) paramType)->rbConverter, id_to_native, 2, values); + paramType = ((MappedType *) paramType)->type; + } + + type = argidx < argc ? TYPE(argv[argidx]) : T_NONE; + ffiValues[i] = param; + + switch (paramType->nativeType) { + + case NATIVE_INT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s8 = NUM2INT(value); + } else { + param->s8 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT8); + break; + + case NATIVE_INT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s16 = NUM2INT(value); + + } else { + param->s16 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT16); + break; + + case NATIVE_INT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->s32 = NUM2INT(value); + + } else { + param->s32 = NUM2INT(argv[argidx]); + } + + ++argidx; + ADJ(param, INT32); + break; + + case NATIVE_BOOL: + if (type != T_TRUE && type != T_FALSE) { + rb_raise(rb_eTypeError, "wrong argument type (expected a boolean parameter)"); + } + param->s8 = argv[argidx++] == Qtrue; + ADJ(param, INT8); + break; + + case NATIVE_UINT8: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u8 = NUM2UINT(value); + } else { + param->u8 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT8); + ++argidx; + break; + + case NATIVE_UINT16: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u16 = NUM2UINT(value); + } else { + param->u16 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT16); + ++argidx; + break; + + case NATIVE_UINT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u32 = NUM2UINT(value); + } else { + param->u32 = NUM2UINT(argv[argidx]); + } + + ADJ(param, INT32); + ++argidx; + break; + + case NATIVE_INT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->i64 = NUM2LL(value); + } else { + param->i64 = NUM2LL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_UINT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->u64 = NUM2ULL(value); + } else { + param->u64 = NUM2ULL(argv[argidx]); + } + + ADJ(param, INT64); + ++argidx; + break; + + case NATIVE_LONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_sarg *) param = NUM2LONG(value); + } else { + *(ffi_sarg *) param = NUM2LONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_ULONG: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + *(ffi_arg *) param = NUM2ULONG(value); + } else { + *(ffi_arg *) param = NUM2ULONG(argv[argidx]); + } + + ADJ(param, LONG); + ++argidx; + break; + + case NATIVE_FLOAT32: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f32 = (float) NUM2DBL(value); + } else { + param->f32 = (float) NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT32); + ++argidx; + break; + + case NATIVE_FLOAT64: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->f64 = NUM2DBL(value); + } else { + param->f64 = NUM2DBL(argv[argidx]); + } + + ADJ(param, FLOAT64); + ++argidx; + break; + + case NATIVE_LONGDOUBLE: + if (unlikely(type == T_SYMBOL && enums != Qnil)) { + VALUE value = rb_funcall(enums, id_map_symbol, 1, argv[argidx]); + param->ld = rbffi_num2longdouble(value); + } else { + param->ld = rbffi_num2longdouble(argv[argidx]); + } + + ADJ(param, LONGDOUBLE); + ++argidx; + break; + + + case NATIVE_STRING: + if (type == T_NIL) { + param->ptr = NULL; + + } else { + param->ptr = StringValueCStr(argv[argidx]); + } + + ADJ(param, ADDRESS); + ++argidx; + break; + + case NATIVE_POINTER: + case NATIVE_BUFFER_IN: + case NATIVE_BUFFER_OUT: + case NATIVE_BUFFER_INOUT: + param->ptr = getPointer(argv[argidx++], type); + ADJ(param, ADDRESS); + break; + + + case NATIVE_FUNCTION: + if (callbackProc != Qnil) { + param->ptr = callback_param(callbackProc, callbackParameters[cbidx++]); + } else { + param->ptr = callback_param(argv[argidx], callbackParameters[cbidx++]); + ++argidx; + } + ADJ(param, ADDRESS); + break; + + case NATIVE_STRUCT: + ffiValues[i] = getPointer(argv[argidx++], type); + break; + + default: + rb_raise(rb_eArgError, "Invalid parameter type: %d", paramType->nativeType); + } + } +} + +static void * +call_blocking_function(void* data) +{ + rbffi_blocking_call_t* b = (rbffi_blocking_call_t *) data; + ffi_call(&b->cif, FFI_FN(b->function), b->retval, b->ffiValues); + + return NULL; +} + +VALUE +rbffi_do_blocking_call(VALUE data) +{ + rb_thread_call_without_gvl(call_blocking_function, (void*)data, (rb_unblock_function_t *) -1, NULL); + + return Qnil; +} + +VALUE +rbffi_save_frame_exception(VALUE data, VALUE exc) +{ + rbffi_frame_t* frame = (rbffi_frame_t *) data; + frame->exc = exc; + return Qnil; +} + +VALUE +rbffi_CallFunction(int argc, VALUE* argv, void* function, FunctionType* fnInfo) +{ + void* retval; + void** ffiValues; + FFIStorage* params; + VALUE rbReturnValue; + rbffi_frame_t frame = { 0 }; + + retval = alloca(MAX(fnInfo->ffi_cif.rtype->size, FFI_SIZEOF_ARG)); + + if (unlikely(fnInfo->blocking)) { + rbffi_blocking_call_t* bc; + + /* allocate information passed to the blocking function on the stack */ + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->cif = fnInfo->ffi_cif; + bc->function = function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + rbffi_frame_pop(&frame); + + } else { + + ffiValues = ALLOCA_N(void *, fnInfo->parameterCount); + params = ALLOCA_N(FFIStorage, fnInfo->parameterCount); + + rbffi_SetupCallParams(argc, argv, + fnInfo->parameterCount, fnInfo->parameterTypes, params, ffiValues, + fnInfo->callbackParameters, fnInfo->callbackCount, fnInfo->rbEnums); + + rbffi_frame_push(&frame); + ffi_call(&fnInfo->ffi_cif, FFI_FN(function), retval, ffiValues); + rbffi_frame_pop(&frame); + } + + if (unlikely(!fnInfo->ignoreErrno)) { + rbffi_save_errno(); + } + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + RB_GC_GUARD(rbReturnValue) = rbffi_NativeValue_ToRuby(fnInfo->returnType, fnInfo->rbReturnType, retval); + RB_GC_GUARD(fnInfo->rbReturnType); + + return rbReturnValue; +} + +static inline void* +getPointer(VALUE value, int type) +{ + if (likely(type == T_DATA && rb_obj_is_kind_of(value, rbffi_AbstractMemoryClass))) { + + return ((AbstractMemory *) DATA_PTR(value))->address; + + } else if (type == T_DATA && rb_obj_is_kind_of(value, rbffi_StructClass)) { + + AbstractMemory* memory = ((Struct *) DATA_PTR(value))->pointer; + return memory != NULL ? memory->address : NULL; + + } else if (type == T_STRING) { + + return StringValuePtr(value); + + } else if (type == T_NIL) { + + return NULL; + + } else if (rb_respond_to(value, id_to_ptr)) { + + VALUE ptr = rb_funcall2(value, id_to_ptr, 0, NULL); + if (rb_obj_is_kind_of(ptr, rbffi_AbstractMemoryClass) && TYPE(ptr) == T_DATA) { + return ((AbstractMemory *) DATA_PTR(ptr))->address; + } + rb_raise(rb_eArgError, "to_ptr returned an invalid pointer"); + } + + rb_raise(rb_eArgError, ":pointer argument is not a valid pointer"); + return NULL; +} + +Invoker +rbffi_GetInvoker(FunctionType *fnInfo) +{ + return rbffi_CallFunction; +} + + +static void* +callback_param(VALUE proc, VALUE cbInfo) +{ + VALUE callback ; + if (unlikely(proc == Qnil)) { + return NULL ; + } + + /* Handle Function pointers here */ + if (rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + AbstractMemory* ptr; + Data_Get_Struct(proc, AbstractMemory, ptr); + return ptr->address; + } + + callback = rbffi_Function_ForProc(cbInfo, proc); + RB_GC_GUARD(callback); + + return ((AbstractMemory *) DATA_PTR(callback))->address; +} + + +void +rbffi_Call_Init(VALUE moduleFFI) +{ + id_to_ptr = rb_intern("to_ptr"); + id_to_native = rb_intern("to_native"); + id_map_symbol = rb_intern("__map_symbol"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.h new file mode 100644 index 0000000..b892d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Mike Dalessio + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_CALL_H +#define RBFFI_CALL_H + +#include "Thread.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__i386__) && \ + (defined(HAVE_RAW_API) || defined(USE_INTERNAL_LIBFFI)) && \ + !defined(_WIN32) && !defined(__WIN32__) +# define USE_RAW +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && !(defined(_WIN32) || defined(__WIN32__)) +# define BYPASS_FFI 1 +#endif + +typedef union { +#ifdef USE_RAW + signed int s8, s16, s32; + unsigned int u8, u16, u32; +#else + signed char s8; + unsigned char u8; + signed short s16; + unsigned short u16; + signed int s32; + unsigned int u32; +#endif + signed long long i64; + unsigned long long u64; + signed long sl; + unsigned long ul; + void* ptr; + float f32; + double f64; + long double ld; +} FFIStorage; + +extern void rbffi_Call_Init(VALUE moduleFFI); + +extern void rbffi_SetupCallParams(int argc, VALUE* argv, int paramCount, Type** paramTypes, + FFIStorage* paramStorage, void** ffiValues, + VALUE* callbackParameters, int callbackCount, VALUE enums); + +struct FunctionType_; +extern VALUE rbffi_CallFunction(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +typedef VALUE (*Invoker)(int argc, VALUE* argv, void* function, struct FunctionType_* fnInfo); + +Invoker rbffi_GetInvoker(struct FunctionType_* fnInfo); + +extern VALUE rbffi_GetEnumValue(VALUE enums, VALUE value); +extern int rbffi_GetSignedIntValue(VALUE value, int type, int minValue, int maxValue, const char* typeName, VALUE enums); + +typedef struct rbffi_blocking_call { + rbffi_frame_t* frame; + void* function; + ffi_cif cif; + void **ffiValues; + void* retval; + void* params; +} rbffi_blocking_call_t; + +VALUE rbffi_do_blocking_call(VALUE data); +VALUE rbffi_save_frame_exception(VALUE data, VALUE exc); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_CALL_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.o new file mode 100644 index 0000000..30a0e48 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Call.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.c new file mode 100644 index 0000000..cfdcf6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#endif +#include +#include +#include +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" + +#include "ClosurePool.h" + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +typedef struct Memory { + void* code; + void* data; + struct Memory* next; +} Memory; + +struct ClosurePool_ { + void* ctx; + int closureSize; + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize); + struct Memory* blocks; /* Keeps track of all the allocated memory for this pool */ + Closure* list; + long refcnt; +}; + +static long pageSize; + +static void* allocatePage(void); +static bool freePage(void *); +static bool protectPage(void *); + +ClosurePool* +rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx) +{ + ClosurePool* pool; + + pool = xcalloc(1, sizeof(*pool)); + pool->closureSize = closureSize; + pool->ctx = ctx; + pool->prep = prep; + pool->refcnt = 1; + + return pool; +} + +void +cleanup_closure_pool(ClosurePool* pool) +{ + Memory* memory; + + for (memory = pool->blocks; memory != NULL; ) { + Memory* next = memory->next; +#if !USE_FFI_ALLOC + freePage(memory->code); +#else + ffi_closure_free(memory->code); +#endif + free(memory->data); + free(memory); + memory = next; + } + xfree(pool); +} + +void +rbffi_ClosurePool_Free(ClosurePool* pool) +{ + if (pool != NULL) { + long refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +#if !USE_FFI_ALLOC + +Closure* +rbffi_Closure_Alloc(ClosurePool* pool) +{ + Closure *list = NULL; + Memory* block = NULL; + void *code = NULL; + char errmsg[256]; + int nclosures; + long trampolineSize; + int i; + + if (pool->list != NULL) { + Closure* closure = pool->list; + pool->list = pool->list->next; + pool->refcnt++; + + return closure; + } + + trampolineSize = roundup(pool->closureSize, 8); + nclosures = (int) (pageSize / trampolineSize); + block = calloc(1, sizeof(*block)); + list = calloc(nclosures, sizeof(*list)); + code = allocatePage(); + + if (block == NULL || list == NULL || code == NULL) { + snprintf(errmsg, sizeof(errmsg), "failed to allocate a page. errno=%d (%s)", errno, strerror(errno)); + goto error; + } + + for (i = 0; i < nclosures; ++i) { + Closure* closure = &list[i]; + closure->next = &list[i + 1]; + closure->pool = pool; + closure->code = ((char *)code + (i * trampolineSize)); + closure->pcl = closure->code; + + if (!(*pool->prep)(pool->ctx, closure->code, closure, errmsg, sizeof(errmsg))) { + goto error; + } + } + + if (!protectPage(code)) { + goto error; + } + + /* Track the allocated page + Closure memory area */ + block->data = list; + block->code = code; + block->next = pool->blocks; + pool->blocks = block; + + /* Thread the new block onto the free list, apart from the first one. */ + list[nclosures - 1].next = pool->list; + pool->list = list->next; + pool->refcnt++; + + /* Use the first one as the new handle */ + return list; + +error: + free(block); + free(list); + if (code != NULL) { + freePage(code); + } + + + rb_raise(rb_eRuntimeError, "%s", errmsg); + return NULL; +} + +#else + +Closure* +rbffi_Closure_Alloc(ClosurePool* pool) +{ + Closure *closure = NULL; + Memory* block = NULL; + void *code = NULL; + void *pcl = NULL; + char errmsg[256]; + + block = calloc(1, sizeof(*block)); + closure = calloc(1, sizeof(*closure)); + pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + + if (block == NULL || closure == NULL || pcl == NULL) { + snprintf(errmsg, sizeof(errmsg), "failed to allocate a page. errno=%d (%s)", errno, strerror(errno)); + goto error; + } + + closure->pool = pool; + closure->code = code; + closure->pcl = pcl; + + if (!(*pool->prep)(pool->ctx, closure->code, closure, errmsg, sizeof(errmsg))) { + goto error; + } + + /* Track the allocated page + Closure memory area */ + block->data = closure; + block->code = pcl; + pool->blocks = block; + + /* Thread the new block onto the free list, apart from the first one. */ + pool->refcnt++; + + return closure; + +error: + free(block); + free(closure); + if (pcl != NULL) { + ffi_closure_free(pcl); + } + + rb_raise(rb_eRuntimeError, "%s", errmsg); + return NULL; +} + +#endif /* !USE_FFI_ALLOC */ + +void +rbffi_Closure_Free(Closure* closure) +{ + if (closure != NULL) { + ClosurePool* pool = closure->pool; + long refcnt; + /* Just push it on the front of the free list */ + closure->next = pool->list; + pool->list = closure; + refcnt = --(pool->refcnt); + if (refcnt == 0) { + cleanup_closure_pool(pool); + } + } +} + +void* +rbffi_Closure_CodeAddress(Closure* handle) +{ + return handle->code; +} + + +static long +getPageSize() +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +#if !USE_FFI_ALLOC + +static void* +allocatePage(void) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualAlloc(NULL, pageSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + void *page = mmap(NULL, pageSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + return (page != (void *) -1) ? page : NULL; +#endif +} + +static bool +freePage(void *addr) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + return VirtualFree(addr, 0, MEM_RELEASE); +#else + return munmap(addr, pageSize) == 0; +#endif +} + +static bool +protectPage(void* page) +{ +#if !defined(__CYGWIN__) && (defined(_WIN32) || defined(__WIN32__)) + DWORD oldProtect; + return VirtualProtect(page, pageSize, PAGE_EXECUTE_READ, &oldProtect); +#else + return mprotect(page, pageSize, PROT_READ | PROT_EXEC) == 0; +#endif +} + +#endif /* !USE_FFI_ALLOC */ + +void +rbffi_ClosurePool_Init(VALUE module) +{ + pageSize = getPageSize(); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.h new file mode 100644 index 0000000..99e3a47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RUBYFFI_CLOSUREPOOL_H +#define RUBYFFI_CLOSUREPOOL_H + +typedef struct ClosurePool_ ClosurePool; +typedef struct Closure_ Closure; + +struct Closure_ { + void* info; /* opaque handle for storing closure-instance specific data */ + void* function; /* closure-instance specific function, called by custom trampoline */ + void* code; /* Executable address for the native trampoline code location */ + void* pcl; /* Writeable address for the native trampoline code location */ + + struct ClosurePool_* pool; + Closure* next; +}; + +void rbffi_ClosurePool_Init(VALUE module); + +ClosurePool* rbffi_ClosurePool_New(int closureSize, + bool (*prep)(void* ctx, void *code, Closure* closure, char* errbuf, size_t errbufsize), + void* ctx); + +void rbffi_ClosurePool_Free(ClosurePool *); + +Closure* rbffi_Closure_Alloc(ClosurePool *); +void rbffi_Closure_Free(Closure *); + +void* rbffi_Closure_GetCodeAddress(Closure *); + +#endif /* RUBYFFI_CLOSUREPOOL_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.o new file mode 100644 index 0000000..bce670d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ClosurePool.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.c new file mode 100644 index 0000000..78b3de6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.c @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +# include +# define _WINSOCKAPI_ +# include +# include +#else +# include +#endif +#include + +#include + +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "DynamicLibrary.h" + +typedef struct LibrarySymbol_ { + Pointer base; + VALUE library; + VALUE name; +} LibrarySymbol; + +static VALUE library_initialize(VALUE self, VALUE libname, VALUE libflags); +static void library_free(Library* lib); + + +static VALUE symbol_allocate(VALUE klass); +static VALUE symbol_new(VALUE library, void* address, VALUE name); +static void symbol_mark(LibrarySymbol* sym); + +static VALUE LibraryClass = Qnil, SymbolClass = Qnil; + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* dl_open(const char* name, int flags); +static void dl_error(char* buf, int size); +#define dl_sym(handle, name) GetProcAddress(handle, name) +#define dl_close(handle) FreeLibrary(handle) +#else +# define dl_open(name, flags) dlopen(name, flags != 0 ? flags : RTLD_LAZY) +# define dl_error(buf, size) do { snprintf(buf, size, "%s", dlerror()); } while(0) +# define dl_sym(handle, name) dlsym(handle, name) +# define dl_close(handle) dlclose(handle) +#endif + +static VALUE +library_allocate(VALUE klass) +{ + Library* library; + return Data_Make_Struct(klass, Library, NULL, library_free, library); +} + +/* + * call-seq: DynamicLibrary.open(libname, libflags) + * @param libname (see #initialize) + * @param libflags (see #initialize) + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * Open a library. + */ +static VALUE +library_open(VALUE klass, VALUE libname, VALUE libflags) +{ + return library_initialize(library_allocate(klass), libname, libflags); +} + +/* + * call-seq: initialize(libname, libflags) + * @param [String] libname name of library to open + * @param [Fixnum] libflags flags for library to open + * @return [FFI::DynamicLibrary] + * @raise {LoadError} if +libname+ cannot be opened + * A new DynamicLibrary instance. + */ +static VALUE +library_initialize(VALUE self, VALUE libname, VALUE libflags) +{ + Library* library; + int flags; + + Check_Type(libflags, T_FIXNUM); + + Data_Get_Struct(self, Library, library); + flags = libflags != Qnil ? NUM2UINT(libflags) : 0; + + library->handle = dl_open(libname != Qnil ? StringValueCStr(libname) : NULL, flags); + if (library->handle == NULL) { + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + rb_raise(rb_eLoadError, "Could not open library '%s': %s", + libname != Qnil ? StringValueCStr(libname) : "[current process]", + errmsg); + } +#ifdef __CYGWIN__ + // On Cygwin 1.7.17 "dlsym(dlopen(0,0), 'getpid')" fails. (dlerror: "No such process") + // As a workaround we can use "dlsym(RTLD_DEFAULT, 'getpid')" instead. + // Since 0 == RTLD_DEFAULT we won't call dl_close later. + if (libname == Qnil) { + dl_close(library->handle); + library->handle = RTLD_DEFAULT; + } +#endif + rb_iv_set(self, "@name", libname != Qnil ? libname : rb_str_new2("[current process]")); + return self; +} + +static VALUE +library_dlsym(VALUE self, VALUE name) +{ + Library* library; + void* address = NULL; + Check_Type(name, T_STRING); + + Data_Get_Struct(self, Library, library); + address = dl_sym(library->handle, StringValueCStr(name)); + + return address != NULL ? symbol_new(self, address, name) : Qnil; +} + +/* + * call-seq: last_error + * @return [String] library's last error string + */ +static VALUE +library_dlerror(VALUE self) +{ + char errmsg[1024]; + dl_error(errmsg, sizeof(errmsg)); + return rb_str_new2(errmsg); +} + +static void +library_free(Library* library) +{ + /* dlclose() on MacOS tends to segfault - avoid it */ +#ifndef __APPLE__ + if (library->handle != NULL) { + dl_close(library->handle); + } +#endif + xfree(library); +} + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(__CYGWIN__) +static void* +dl_open(const char* name, int flags) +{ + if (name == NULL) { + return GetModuleHandle(NULL); + } else { + DWORD dwFlags = PathIsRelativeA(name) ? 0 : LOAD_WITH_ALTERED_SEARCH_PATH; + return LoadLibraryExA(name, NULL, dwFlags); + } +} + +static void +dl_error(char* buf, int size) +{ + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), + 0, buf, size, NULL); +} +#endif + +static VALUE +symbol_allocate(VALUE klass) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(klass, LibrarySymbol, NULL, -1, sym); + sym->name = Qnil; + sym->library = Qnil; + sym->base.rbParent = Qnil; + + return obj; +} + + +/* + * call-seq: initialize_copy(other) + * @param [Object] other + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +symbol_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate symbol"); + return Qnil; +} + +static VALUE +symbol_new(VALUE library, void* address, VALUE name) +{ + LibrarySymbol* sym; + VALUE obj = Data_Make_Struct(SymbolClass, LibrarySymbol, symbol_mark, -1, sym); + + sym->base.memory.address = address; + sym->base.memory.size = LONG_MAX; + sym->base.memory.typeSize = 1; + sym->base.memory.flags = MEM_RD | MEM_WR; + sym->library = library; + sym->name = name; + + return obj; +} + +static void +symbol_mark(LibrarySymbol* sym) +{ + rb_gc_mark(sym->library); + rb_gc_mark(sym->name); +} + +/* + * call-seq: inspect + * @return [String] + * Inspect. + */ +static VALUE +symbol_inspect(VALUE self) +{ + LibrarySymbol* sym; + char buf[256]; + + Data_Get_Struct(self, LibrarySymbol, sym); + snprintf(buf, sizeof(buf), "#", + StringValueCStr(sym->name), sym->base.memory.address); + return rb_str_new2(buf); +} + +void +rbffi_DynamicLibrary_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::DynamicLibrary + */ + LibraryClass = rb_define_class_under(moduleFFI, "DynamicLibrary", rb_cObject); + rb_global_variable(&LibraryClass); + /* + * Document-class: FFI::DynamicLibrary::Symbol < FFI::Pointer + * + * An instance of this class represents a library symbol. It may be a {Pointer pointer} to + * a function or to a variable. + */ + SymbolClass = rb_define_class_under(LibraryClass, "Symbol", rbffi_PointerClass); + rb_global_variable(&SymbolClass); + + /* + * Document-const: FFI::NativeLibrary + * Backward compatibility for FFI::DynamicLibrary + */ + rb_define_const(moduleFFI, "NativeLibrary", LibraryClass); /* backwards compat library */ + rb_define_alloc_func(LibraryClass, library_allocate); + rb_define_singleton_method(LibraryClass, "open", library_open, 2); + rb_define_singleton_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_method(LibraryClass, "initialize", library_initialize, 2); + /* + * Document-method: find_symbol + * call-seq: find_symbol(name) + * @param [String] name library symbol's name + * @return [FFI::DynamicLibrary::Symbol] library symbol + */ + rb_define_method(LibraryClass, "find_symbol", library_dlsym, 1); + /* + * Document-method: find_function + * call-seq: find_function(name) + * @param [String] name library function's name + * @return [FFI::DynamicLibrary::Symbol] library function symbol + */ + rb_define_method(LibraryClass, "find_function", library_dlsym, 1); + /* + * Document-method: find_variable + * call-seq: find_variable(name) + * @param [String] name library variable's name + * @return [FFI::DynamicLibrary::Symbol] library variable symbol + */ + rb_define_method(LibraryClass, "find_variable", library_dlsym, 1); + rb_define_method(LibraryClass, "last_error", library_dlerror, 0); + rb_define_attr(LibraryClass, "name", 1, 0); + + rb_define_alloc_func(SymbolClass, symbol_allocate); + rb_undef_method(SymbolClass, "new"); + rb_define_method(SymbolClass, "inspect", symbol_inspect, 0); + rb_define_method(SymbolClass, "initialize_copy", symbol_initialize_copy, 1); + +#define DEF(x) rb_define_const(LibraryClass, "RTLD_" #x, UINT2NUM(RTLD_##x)) + DEF(LAZY); + DEF(NOW); + DEF(GLOBAL); + DEF(LOCAL); + DEF(NOLOAD); + DEF(NODELETE); + DEF(FIRST); + DEF(DEEPBIND); + DEF(MEMBER); + DEF(BINDING_MASK); + DEF(LOCATION_MASK); + DEF(ALL_MASK); +#undef DEF + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.h new file mode 100644 index 0000000..97bf7bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIBRARY_H +#define _LIBRARY_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* if these aren't defined (eg. windows), we need sensible defaults */ +#ifndef RTLD_LAZY +#define RTLD_LAZY 1 +#endif + +#ifndef RTLD_NOW +#define RTLD_NOW 2 +#endif + +#ifndef RTLD_LOCAL +#define RTLD_LOCAL 4 +#endif + +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 8 +#endif + +/* If these aren't defined, they're not supported so define as 0 */ +#ifndef RTLD_NOLOAD +#define RTLD_NOLOAD 0 +#endif + +#ifndef RTLD_NODELETE +#define RTLD_NODELETE 0 +#endif + +#ifndef RTLD_FIRST +#define RTLD_FIRST 0 +#endif + +#ifndef RTLD_DEEPBIND +#define RTLD_DEEPBIND 0 +#endif + +#ifndef RTLD_MEMBER +#define RTLD_MEMBER 0 +#endif + +/* convenience */ +#ifndef RTLD_BINDING_MASK +#define RTLD_BINDING_MASK (RTLD_LAZY | RTLD_NOW) +#endif + +#ifndef RTLD_LOCATION_MASK +#define RTLD_LOCATION_MASK (RTLD_LOCAL | RTLD_GLOBAL) +#endif + +#ifndef RTLD_ALL_MASK +#define RTLD_ALL_MASK (RTLD_BINDING_MASK | RTLD_LOCATION_MASK | RTLD_NOLOAD | RTLD_NODELETE | RTLD_FIRST | RTLD_DEEPBIND | RTLD_MEMBER) +#endif + +typedef struct Library { + void* handle; +} Library; + +extern void rbffi_DynamicLibrary_Init(VALUE ffiModule); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBRARY_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.o new file mode 100644 index 0000000..4c7a414 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/DynamicLibrary.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.c new file mode 100644 index 0000000..1a57591 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.c @@ -0,0 +1,917 @@ +/* + * Copyright (c) 2009-2011 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +# include +#endif + +#include +#include +#include +#include +#include + +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) +#include +#endif +#include + +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Struct.h" +#include "Platform.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MappedType.h" +#include "Thread.h" +#include "LongDouble.h" +#include "MethodHandle.h" +#include "Function.h" + +typedef struct Function_ { + Pointer base; + FunctionType* info; + MethodHandle* methodHandle; + bool autorelease; + Closure* closure; + VALUE rbProc; + VALUE rbFunctionInfo; +} Function; + +static void function_mark(Function *); +static void function_free(Function *); +static VALUE function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc); +static void callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data); +static bool callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static void* callback_with_gvl(void* data); +static VALUE invoke_callback(VALUE data); +static VALUE save_callback_exception(VALUE data, VALUE exc); + +#define DEFER_ASYNC_CALLBACK 1 + + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_event(void *); +static VALUE async_cb_call(void *); +#endif + +extern int ruby_thread_has_gvl_p(void); +extern int ruby_native_thread_p(void); + +VALUE rbffi_FunctionClass = Qnil; + +#if defined(DEFER_ASYNC_CALLBACK) +static VALUE async_cb_thread = Qnil; +#endif + +static ID id_call = 0, id_to_native = 0, id_from_native = 0, id_cbtable = 0, id_cb_ref = 0; + +struct gvl_callback { + Closure* closure; + void* retval; + void** parameters; + bool done; + rbffi_frame_t *frame; +#if defined(DEFER_ASYNC_CALLBACK) + struct gvl_callback* next; +# ifndef _WIN32 + pthread_cond_t async_cond; + pthread_mutex_t async_mutex; +# else + HANDLE async_event; +# endif +#endif +}; + + +#if defined(DEFER_ASYNC_CALLBACK) +static struct gvl_callback* async_cb_list = NULL; +# ifndef _WIN32 + static pthread_mutex_t async_cb_mutex = PTHREAD_MUTEX_INITIALIZER; + static pthread_cond_t async_cb_cond = PTHREAD_COND_INITIALIZER; +# else + static HANDLE async_cb_cond; + static CRITICAL_SECTION async_cb_lock; +# endif +#endif + + +static VALUE +function_allocate(VALUE klass) +{ + Function *fn; + VALUE obj; + + obj = Data_Make_Struct(klass, Function, function_mark, function_free, fn); + + fn->base.memory.flags = MEM_RD; + fn->base.rbParent = Qnil; + fn->rbProc = Qnil; + fn->rbFunctionInfo = Qnil; + fn->autorelease = true; + + return obj; +} + +static void +function_mark(Function *fn) +{ + rb_gc_mark(fn->base.rbParent); + rb_gc_mark(fn->rbProc); + rb_gc_mark(fn->rbFunctionInfo); +} + +static void +function_free(Function *fn) +{ + if (fn->methodHandle != NULL) { + rbffi_MethodHandle_Free(fn->methodHandle); + } + + if (fn->closure != NULL && fn->autorelease) { + rbffi_Closure_Free(fn->closure); + } + + xfree(fn); +} + +/* + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options see {FFI::FunctionType} for available options + * @return [self] + * A new Function instance. + * + * Define a function from a Proc or a block. + * + * @overload initialize(return_type, param_types, options = {}) { |i| ... } + * @yieldparam i parameters for the function + * @overload initialize(return_type, param_types, proc, options = {}) + * @param [Proc] proc + */ +static VALUE +function_initialize(int argc, VALUE* argv, VALUE self) +{ + + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbProc = Qnil, rbOptions = Qnil; + VALUE rbFunctionInfo = Qnil; + VALUE infoArgv[3]; + int nargs; + + nargs = rb_scan_args(argc, argv, "22", &rbReturnType, &rbParamTypes, &rbProc, &rbOptions); + + /* + * Callback with block, + * e.g. Function.new(:int, [ :int ]) { |i| blah } + * or Function.new(:int, [ :int ], { :convention => :stdcall }) { |i| blah } + */ + if (rb_block_given_p()) { + if (nargs > 3) { + rb_raise(rb_eArgError, "cannot create function with both proc/address and block"); + } + rbOptions = rbProc; + rbProc = rb_block_proc(); + } else { + /* Callback with proc, or Function with address + * e.g. Function.new(:int, [ :int ], Proc.new { |i| }) + * Function.new(:int, [ :int ], Proc.new { |i| }, { :convention => :stdcall }) + * Function.new(:int, [ :int ], addr) + * Function.new(:int, [ :int ], addr, { :convention => :stdcall }) + */ + } + + infoArgv[0] = rbReturnType; + infoArgv[1] = rbParamTypes; + infoArgv[2] = rbOptions; + rbFunctionInfo = rb_class_new_instance(rbOptions != Qnil ? 3 : 2, infoArgv, rbffi_FunctionTypeClass); + + function_init(self, rbFunctionInfo, rbProc); + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +function_initialize_copy(VALUE self, VALUE other) +{ + rb_raise(rb_eRuntimeError, "cannot duplicate function instances"); + return Qnil; +} + +VALUE +rbffi_Function_NewInstance(VALUE rbFunctionInfo, VALUE rbProc) +{ + return function_init(function_allocate(rbffi_FunctionClass), rbFunctionInfo, rbProc); +} + +VALUE +rbffi_Function_ForProc(VALUE rbFunctionInfo, VALUE proc) +{ + VALUE callback, cbref, cbTable; + + cbref = RTEST(rb_ivar_defined(proc, id_cb_ref)) ? rb_ivar_get(proc, id_cb_ref) : Qnil; + /* If the first callback reference has the same function function signature, use it */ + if (cbref != Qnil && CLASS_OF(cbref) == rbffi_FunctionClass) { + Function* fp; + Data_Get_Struct(cbref, Function, fp); + if (fp->rbFunctionInfo == rbFunctionInfo) { + return cbref; + } + } + + cbTable = RTEST(rb_ivar_defined(proc, id_cbtable)) ? rb_ivar_get(proc, id_cbtable) : Qnil; + if (cbTable != Qnil && (callback = rb_hash_aref(cbTable, rbFunctionInfo)) != Qnil) { + return callback; + } + + /* No existing function for the proc with that signature, create a new one and cache it */ + callback = rbffi_Function_NewInstance(rbFunctionInfo, proc); + if (cbref == Qnil) { + /* If there is no other cb already cached for this proc, we can use the ivar slot */ + rb_ivar_set(proc, id_cb_ref, callback); + } else { + /* The proc instance has been used as more than one type of callback, store extras in a hash */ + if(cbTable == Qnil) { + cbTable = rb_hash_new(); + rb_ivar_set(proc, id_cbtable, cbTable); + } + rb_hash_aset(cbTable, rbFunctionInfo, callback); + } + + return callback; +} + +#if !defined(_WIN32) && defined(DEFER_ASYNC_CALLBACK) +static void +after_fork_callback(void) +{ + /* Ensure that a new dispatcher thread is started in a forked process */ + async_cb_thread = Qnil; + pthread_mutex_init(&async_cb_mutex, NULL); + pthread_cond_init(&async_cb_cond, NULL); +} +#endif + +static VALUE +function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc) +{ + Function* fn = NULL; + + Data_Get_Struct(self, Function, fn); + + fn->rbFunctionInfo = rbFunctionInfo; + + Data_Get_Struct(fn->rbFunctionInfo, FunctionType, fn->info); + + if (rb_obj_is_kind_of(rbProc, rbffi_PointerClass)) { + Pointer* orig; + Data_Get_Struct(rbProc, Pointer, orig); + fn->base.memory = orig->memory; + fn->base.rbParent = rbProc; + + } else if (rb_obj_is_kind_of(rbProc, rb_cProc) || rb_respond_to(rbProc, id_call)) { + if (fn->info->closurePool == NULL) { + fn->info->closurePool = rbffi_ClosurePool_New(sizeof(ffi_closure), callback_prep, fn->info); + if (fn->info->closurePool == NULL) { + rb_raise(rb_eNoMemError, "failed to create closure pool"); + } + } + +#if defined(DEFER_ASYNC_CALLBACK) + if (async_cb_thread == Qnil) { + +#if !defined(_WIN32) + if( pthread_atfork(NULL, NULL, after_fork_callback) ){ + rb_warn("FFI: unable to register fork callback"); + } +#endif + + async_cb_thread = rb_thread_create(async_cb_event, NULL); + /* Name thread, for better debugging */ + rb_funcall(async_cb_thread, rb_intern("name="), 1, rb_str_new2("FFI Callback Dispatcher")); + } +#endif + + fn->closure = rbffi_Closure_Alloc(fn->info->closurePool); + fn->closure->info = fn; + fn->base.memory.address = fn->closure->code; + fn->base.memory.size = sizeof(*fn->closure); + fn->autorelease = true; + + } else { + rb_raise(rb_eTypeError, "wrong argument type %s, expected pointer or proc", + rb_obj_classname(rbProc)); + } + + fn->rbProc = rbProc; + + return self; +} + +/* + * call-seq: call(*args) + * @param [Array] args function arguments + * @return [FFI::Type] + * Call the function + */ +static VALUE +function_call(int argc, VALUE* argv, VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return (*fn->info->invoke)(argc, argv, fn->base.memory.address, fn->info); +} + +/* + * call-seq: attach(m, name) + * @param [Module] m + * @param [String] name + * @return [self] + * Attach a Function to the Module +m+ as +name+. + */ +static VALUE +function_attach(VALUE self, VALUE module, VALUE name) +{ + Function* fn; + char var[1024]; + + Data_Get_Struct(self, Function, fn); + + if (fn->info->parameterCount == -1) { + rb_raise(rb_eRuntimeError, "cannot attach variadic functions"); + return Qnil; + } + + if (!rb_obj_is_kind_of(module, rb_cModule)) { + rb_raise(rb_eRuntimeError, "trying to attach function to non-module"); + return Qnil; + } + + if (fn->methodHandle == NULL) { + fn->methodHandle = rbffi_MethodHandle_Alloc(fn->info, fn->base.memory.address); + } + + /* + * Stash the Function in a module variable so it does not get garbage collected + */ + snprintf(var, sizeof(var), "@@%s", StringValueCStr(name)); + rb_cv_set(module, var, self); + + rb_define_singleton_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + + rb_define_method(module, StringValueCStr(name), + rbffi_MethodHandle_CodeAddress(fn->methodHandle), -1); + + return self; +} + +/* + * call-seq: autorelease = autorelease + * @param [Boolean] autorelease + * @return [self] + * Set +autorelease+ attribute (See {Pointer}). + */ +static VALUE +function_set_autorelease(VALUE self, VALUE autorelease) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + fn->autorelease = RTEST(autorelease); + + return self; +} + +static VALUE +function_autorelease_p(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + return fn->autorelease ? Qtrue : Qfalse; +} + +/* + * call-seq: free + * @return [self] + * Free memory allocated by Function. + */ +static VALUE +function_release(VALUE self) +{ + Function* fn; + + Data_Get_Struct(self, Function, fn); + + if (fn->closure == NULL) { + rb_raise(rb_eRuntimeError, "cannot free function which was not allocated"); + } + + rbffi_Closure_Free(fn->closure); + fn->closure = NULL; + + return self; +} + +static void +callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data) +{ + struct gvl_callback cb = { 0 }; + + cb.closure = (Closure *) user_data; + cb.retval = retval; + cb.parameters = parameters; + cb.done = false; + cb.frame = rbffi_frame_current(); + + if (cb.frame != NULL) cb.frame->exc = Qnil; + + if (ruby_native_thread_p()) { + if(ruby_thread_has_gvl_p()) { + callback_with_gvl(&cb); + } else { + rb_thread_call_with_gvl(callback_with_gvl, &cb); + } +#if defined(DEFER_ASYNC_CALLBACK) && !defined(_WIN32) + } else { + bool empty = false; + + pthread_mutex_init(&cb.async_mutex, NULL); + pthread_cond_init(&cb.async_cond, NULL); + + /* Now signal the async callback thread */ + pthread_mutex_lock(&async_cb_mutex); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); + + /* Wait for the thread executing the ruby callback to signal it is done */ + pthread_mutex_lock(&cb.async_mutex); + while (!cb.done) { + pthread_cond_wait(&cb.async_cond, &cb.async_mutex); + } + pthread_mutex_unlock(&cb.async_mutex); + pthread_cond_destroy(&cb.async_cond); + pthread_mutex_destroy(&cb.async_mutex); + +#elif defined(DEFER_ASYNC_CALLBACK) && defined(_WIN32) + } else { + bool empty = false; + + cb.async_event = CreateEvent(NULL, FALSE, FALSE, NULL); + + /* Now signal the async callback thread */ + EnterCriticalSection(&async_cb_lock); + empty = async_cb_list == NULL; + cb.next = async_cb_list; + async_cb_list = &cb; + LeaveCriticalSection(&async_cb_lock); + + SetEvent(async_cb_cond); + + /* Wait for the thread executing the ruby callback to signal it is done */ + WaitForSingleObject(cb.async_event, INFINITE); + CloseHandle(cb.async_event); +#endif + } +} + +#if defined(DEFER_ASYNC_CALLBACK) +struct async_wait { + void* cb; + bool stop; +}; + +static void * async_cb_wait(void *); +static void async_cb_stop(void *); + +static VALUE +async_cb_event(void* unused) +{ + struct async_wait w = { 0 }; + + w.stop = false; + while (!w.stop) { + rb_thread_call_without_gvl(async_cb_wait, &w, async_cb_stop, &w); + if (w.cb != NULL) { + /* Start up a new ruby thread to run the ruby callback */ + VALUE new_thread = rb_thread_create(async_cb_call, w.cb); + /* Name thread, for better debugging */ + rb_funcall(new_thread, rb_intern("name="), 1, rb_str_new2("FFI Callback Runner")); + } + } + + return Qnil; +} + +#ifdef _WIN32 +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + EnterCriticalSection(&async_cb_lock); + + while (!w->stop && async_cb_list == NULL) { + LeaveCriticalSection(&async_cb_lock); + WaitForSingleObject(async_cb_cond, INFINITE); + EnterCriticalSection(&async_cb_lock); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + LeaveCriticalSection(&async_cb_lock); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + EnterCriticalSection(&async_cb_lock); + w->stop = true; + LeaveCriticalSection(&async_cb_lock); + SetEvent(async_cb_cond); +} + +#else +static void * +async_cb_wait(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + w->cb = NULL; + + pthread_mutex_lock(&async_cb_mutex); + + while (!w->stop && async_cb_list == NULL) { + pthread_cond_wait(&async_cb_cond, &async_cb_mutex); + } + + if (async_cb_list != NULL) { + w->cb = async_cb_list; + async_cb_list = async_cb_list->next; + } + + pthread_mutex_unlock(&async_cb_mutex); + + return NULL; +} + +static void +async_cb_stop(void *data) +{ + struct async_wait* w = (struct async_wait *) data; + + pthread_mutex_lock(&async_cb_mutex); + w->stop = true; + pthread_cond_signal(&async_cb_cond); + pthread_mutex_unlock(&async_cb_mutex); +} +#endif + +static VALUE +async_cb_call(void *data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + callback_with_gvl(data); + + /* Signal the original native thread that the ruby code has completed */ +#ifdef _WIN32 + SetEvent(cb->async_event); +#else + pthread_mutex_lock(&cb->async_mutex); + cb->done = true; + pthread_cond_signal(&cb->async_cond); + pthread_mutex_unlock(&cb->async_mutex); +#endif + + return Qnil; +} + +#endif + +static void * +callback_with_gvl(void* data) +{ + rb_rescue2(invoke_callback, (VALUE) data, save_callback_exception, (VALUE) data, rb_eException, (VALUE) 0); + return NULL; +} + +static VALUE +invoke_callback(VALUE data) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + Function* fn = (Function *) cb->closure->info; + FunctionType *cbInfo = fn->info; + Type* returnType = cbInfo->returnType; + void* retval = cb->retval; + void** parameters = cb->parameters; + VALUE* rbParams; + VALUE rbReturnType = cbInfo->rbReturnType; + VALUE rbReturnValue; + int i; + + rbParams = ALLOCA_N(VALUE, cbInfo->parameterCount); + for (i = 0; i < cbInfo->parameterCount; ++i) { + VALUE param; + Type* paramType = cbInfo->parameterTypes[i]; + VALUE rbParamType = rb_ary_entry(cbInfo->rbParameterTypes, i); + + if (unlikely(paramType->nativeType == NATIVE_MAPPED)) { + rbParamType = ((MappedType *) paramType)->rbType; + paramType = ((MappedType *) paramType)->type; + } + + switch (paramType->nativeType) { + case NATIVE_INT8: + param = INT2NUM(*(int8_t *) parameters[i]); + break; + case NATIVE_UINT8: + param = UINT2NUM(*(uint8_t *) parameters[i]); + break; + case NATIVE_INT16: + param = INT2NUM(*(int16_t *) parameters[i]); + break; + case NATIVE_UINT16: + param = UINT2NUM(*(uint16_t *) parameters[i]); + break; + case NATIVE_INT32: + param = INT2NUM(*(int32_t *) parameters[i]); + break; + case NATIVE_UINT32: + param = UINT2NUM(*(uint32_t *) parameters[i]); + break; + case NATIVE_INT64: + param = LL2NUM(*(int64_t *) parameters[i]); + break; + case NATIVE_UINT64: + param = ULL2NUM(*(uint64_t *) parameters[i]); + break; + case NATIVE_LONG: + param = LONG2NUM(*(long *) parameters[i]); + break; + case NATIVE_ULONG: + param = ULONG2NUM(*(unsigned long *) parameters[i]); + break; + case NATIVE_FLOAT32: + param = rb_float_new(*(float *) parameters[i]); + break; + case NATIVE_FLOAT64: + param = rb_float_new(*(double *) parameters[i]); + break; + case NATIVE_LONGDOUBLE: + param = rbffi_longdouble_new(*(long double *) parameters[i]); + break; + case NATIVE_STRING: + param = (*(void **) parameters[i] != NULL) ? rb_str_new2(*(char **) parameters[i]) : Qnil; + break; + case NATIVE_POINTER: + param = rbffi_Pointer_NewInstance(*(void **) parameters[i]); + break; + case NATIVE_BOOL: + param = (*(uint8_t *) parameters[i]) ? Qtrue : Qfalse; + break; + + case NATIVE_FUNCTION: + case NATIVE_STRUCT: + param = rbffi_NativeValue_ToRuby(paramType, rbParamType, parameters[i]); + break; + + default: + param = Qnil; + break; + } + + /* Convert the native value into a custom ruby value */ + if (unlikely(cbInfo->parameterTypes[i]->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { param, Qnil }; + param = rb_funcall2(((MappedType *) cbInfo->parameterTypes[i])->rbConverter, id_from_native, 2, values); + } + + rbParams[i] = param; + } + + rbReturnValue = rb_funcall2(fn->rbProc, id_call, cbInfo->parameterCount, rbParams); + + if (unlikely(returnType->nativeType == NATIVE_MAPPED)) { + VALUE values[] = { rbReturnValue, Qnil }; + rbReturnValue = rb_funcall2(((MappedType *) returnType)->rbConverter, id_to_native, 2, values); + rbReturnType = ((MappedType *) returnType)->rbType; + returnType = ((MappedType* ) returnType)->type; + } + + if (rbReturnValue == Qnil || TYPE(rbReturnValue) == T_NIL) { + memset(retval, 0, returnType->ffiType->size); + } else switch (returnType->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + *((ffi_sarg *) retval) = NUM2INT(rbReturnValue); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + *((ffi_arg *) retval) = NUM2UINT(rbReturnValue); + break; + case NATIVE_INT64: + *((int64_t *) retval) = NUM2LL(rbReturnValue); + break; + case NATIVE_UINT64: + *((uint64_t *) retval) = NUM2ULL(rbReturnValue); + break; + case NATIVE_LONG: + *((ffi_sarg *) retval) = NUM2LONG(rbReturnValue); + break; + case NATIVE_ULONG: + *((ffi_arg *) retval) = NUM2ULONG(rbReturnValue); + break; + case NATIVE_FLOAT32: + *((float *) retval) = (float) NUM2DBL(rbReturnValue); + break; + case NATIVE_FLOAT64: + *((double *) retval) = NUM2DBL(rbReturnValue); + break; + case NATIVE_LONGDOUBLE: + *((long double *) retval) = rbffi_num2longdouble(rbReturnValue); + break; + case NATIVE_POINTER: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + } else { + /* Default to returning NULL if not a value pointer object. handles nil case as well */ + *((void **) retval) = NULL; + } + break; + + case NATIVE_BOOL: + *((ffi_arg *) retval) = rbReturnValue == Qtrue; + break; + + case NATIVE_FUNCTION: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; + + } else if (rb_obj_is_kind_of(rbReturnValue, rb_cProc) || rb_respond_to(rbReturnValue, id_call)) { + VALUE function; + + function = rbffi_Function_ForProc(rbReturnType, rbReturnValue); + + *((void **) retval) = ((AbstractMemory *) DATA_PTR(function))->address; + } else { + *((void **) retval) = NULL; + } + break; + + case NATIVE_STRUCT: + if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_StructClass)) { + AbstractMemory* memory = ((Struct *) DATA_PTR(rbReturnValue))->pointer; + + if (memory->address != NULL) { + memcpy(retval, memory->address, returnType->ffiType->size); + + } else { + memset(retval, 0, returnType->ffiType->size); + } + + } else { + memset(retval, 0, returnType->ffiType->size); + } + break; + + default: + *((ffi_arg *) retval) = 0; + break; + } + + return Qnil; +} + +static VALUE +save_callback_exception(VALUE data, VALUE exc) +{ + struct gvl_callback* cb = (struct gvl_callback *) data; + + memset(cb->retval, 0, ((Function *) cb->closure->info)->info->returnType->ffiType->size); + if (cb->frame != NULL) cb->frame->exc = exc; + + return Qnil; +} + +static bool +callback_prep(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + FunctionType* fnInfo = (FunctionType *) ctx; + ffi_status ffiStatus; + + ffiStatus = ffi_prep_closure_loc(closure->pcl, &fnInfo->ffi_cif, callback_invoke, closure, code); + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + +void +rbffi_Function_Init(VALUE moduleFFI) +{ + rbffi_FunctionInfo_Init(moduleFFI); + /* + * Document-class: FFI::Function < FFI::Pointer + */ + rbffi_FunctionClass = rb_define_class_under(moduleFFI, "Function", rbffi_PointerClass); + + rb_global_variable(&rbffi_FunctionClass); + rb_define_alloc_func(rbffi_FunctionClass, function_allocate); + + rb_define_method(rbffi_FunctionClass, "initialize", function_initialize, -1); + rb_define_method(rbffi_FunctionClass, "initialize_copy", function_initialize_copy, 1); + rb_define_method(rbffi_FunctionClass, "call", function_call, -1); + rb_define_method(rbffi_FunctionClass, "attach", function_attach, 2); + rb_define_method(rbffi_FunctionClass, "free", function_release, 0); + rb_define_method(rbffi_FunctionClass, "autorelease=", function_set_autorelease, 1); + /* + * call-seq: autorelease + * @return [Boolean] + * Get +autorelease+ attribute. + * Synonymous for {#autorelease?}. + */ + rb_define_method(rbffi_FunctionClass, "autorelease", function_autorelease_p, 0); + /* + * call-seq: autorelease? + * @return [Boolean] +autorelease+ attribute + * Get +autorelease+ attribute. + */ + rb_define_method(rbffi_FunctionClass, "autorelease?", function_autorelease_p, 0); + + id_call = rb_intern("call"); + id_cbtable = rb_intern("@__ffi_callback_table__"); + id_cb_ref = rb_intern("@__ffi_callback__"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); +#if defined(_WIN32) + InitializeCriticalSection(&async_cb_lock); + async_cb_cond = CreateEvent(NULL, FALSE, FALSE, NULL); +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.h new file mode 100644 index 0000000..406b4d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_FUNCTION_H +#define RBFFI_FUNCTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +# include + +#include + +typedef struct FunctionType_ FunctionType; + +#include "Type.h" +#include "Call.h" +#include "ClosurePool.h" + +struct FunctionType_ { + Type type; /* The native type of a FunctionInfo object */ + VALUE rbReturnType; + VALUE rbParameterTypes; + + Type* returnType; + Type** parameterTypes; + NativeType* nativeParameterTypes; + ffi_type* ffiReturnType; + ffi_type** ffiParameterTypes; + ffi_cif ffi_cif; + Invoker invoke; + ClosurePool* closurePool; + int parameterCount; + int flags; + ffi_abi abi; + int callbackCount; + VALUE* callbackParameters; + VALUE rbEnums; + bool ignoreErrno; + bool blocking; + bool hasStruct; +}; + +extern VALUE rbffi_FunctionTypeClass, rbffi_FunctionClass; + +void rbffi_Function_Init(VALUE moduleFFI); +VALUE rbffi_Function_NewInstance(VALUE functionInfo, VALUE proc); +VALUE rbffi_Function_ForProc(VALUE cbInfo, VALUE proc); +void rbffi_FunctionInfo_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_FUNCTION_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.o new file mode 100644 index 0000000..71e7da8 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Function.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.c new file mode 100644 index 0000000..64e9874 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include + +#include +#include + +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Types.h" +#include "Type.h" +#include "StructByValue.h" +#include "Function.h" + +static VALUE fntype_allocate(VALUE klass); +static VALUE fntype_initialize(int argc, VALUE* argv, VALUE self); +static void fntype_mark(FunctionType*); +static void fntype_free(FunctionType *); + +VALUE rbffi_FunctionTypeClass = Qnil; + +static VALUE +fntype_allocate(VALUE klass) +{ + FunctionType* fnInfo; + VALUE obj = Data_Make_Struct(klass, FunctionType, fntype_mark, fntype_free, fnInfo); + + fnInfo->type.ffiType = &ffi_type_pointer; + fnInfo->type.nativeType = NATIVE_FUNCTION; + fnInfo->rbReturnType = Qnil; + fnInfo->rbParameterTypes = Qnil; + fnInfo->rbEnums = Qnil; + fnInfo->invoke = rbffi_CallFunction; + fnInfo->closurePool = NULL; + + return obj; +} + +static void +fntype_mark(FunctionType* fnInfo) +{ + rb_gc_mark(fnInfo->rbReturnType); + rb_gc_mark(fnInfo->rbParameterTypes); + rb_gc_mark(fnInfo->rbEnums); + if (fnInfo->callbackCount > 0 && fnInfo->callbackParameters != NULL) { + rb_gc_mark_locations(&fnInfo->callbackParameters[0], &fnInfo->callbackParameters[fnInfo->callbackCount]); + } +} + +static void +fntype_free(FunctionType* fnInfo) +{ + xfree(fnInfo->parameterTypes); + xfree(fnInfo->ffiParameterTypes); + xfree(fnInfo->nativeParameterTypes); + xfree(fnInfo->callbackParameters); + if (fnInfo->closurePool != NULL) { + rbffi_ClosurePool_Free(fnInfo->closurePool); + } + xfree(fnInfo); +} + +/* + * call-seq: initialize(return_type, param_types, options={}) + * @param [Type, Symbol] return_type return type for the function + * @param [Array] param_types array of parameters types + * @param [Hash] options + * @option options [Boolean] :blocking set to true if the C function is a blocking call + * @option options [Symbol] :convention calling convention see {FFI::Library#calling_convention} + * @option options [FFI::Enums] :enums + * @return [self] + * A new FunctionType instance. + */ +static VALUE +fntype_initialize(int argc, VALUE* argv, VALUE self) +{ + FunctionType *fnInfo; + ffi_status status; + VALUE rbReturnType = Qnil, rbParamTypes = Qnil, rbOptions = Qnil; + VALUE rbEnums = Qnil, rbConvention = Qnil, rbBlocking = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i, nargs; + + nargs = rb_scan_args(argc, argv, "21", &rbReturnType, &rbParamTypes, &rbOptions); + if (nargs >= 3 && rbOptions != Qnil) { + rbConvention = rb_hash_aref(rbOptions, ID2SYM(rb_intern("convention"))); + rbEnums = rb_hash_aref(rbOptions, ID2SYM(rb_intern("enums"))); + rbBlocking = rb_hash_aref(rbOptions, ID2SYM(rb_intern("blocking"))); + } + + Check_Type(rbParamTypes, T_ARRAY); + + Data_Get_Struct(self, FunctionType, fnInfo); + fnInfo->parameterCount = (int) RARRAY_LEN(rbParamTypes); + fnInfo->parameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->parameterTypes)); + fnInfo->ffiParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(ffi_type *)); + fnInfo->nativeParameterTypes = xcalloc(fnInfo->parameterCount, sizeof(*fnInfo->nativeParameterTypes)); + fnInfo->rbParameterTypes = rb_ary_new2(fnInfo->parameterCount); + fnInfo->rbEnums = rbEnums; + fnInfo->blocking = RTEST(rbBlocking); + fnInfo->hasStruct = false; + + for (i = 0; i < fnInfo->parameterCount; ++i) { + VALUE entry = rb_ary_entry(rbParamTypes, i); + VALUE type = rbffi_Type_Lookup(entry); + + if (!RTEST(type)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(type, rbffi_FunctionTypeClass)) { + REALLOC_N(fnInfo->callbackParameters, VALUE, fnInfo->callbackCount + 1); + fnInfo->callbackParameters[fnInfo->callbackCount++] = type; + } + + if (rb_obj_is_kind_of(type, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + rb_ary_push(fnInfo->rbParameterTypes, type); + Data_Get_Struct(type, Type, fnInfo->parameterTypes[i]); + fnInfo->ffiParameterTypes[i] = fnInfo->parameterTypes[i]->ffiType; + fnInfo->nativeParameterTypes[i] = fnInfo->parameterTypes[i]->nativeType; + } + + fnInfo->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(fnInfo->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + if (rb_obj_is_kind_of(fnInfo->rbReturnType, rbffi_StructByValueClass)) { + fnInfo->hasStruct = true; + } + + Data_Get_Struct(fnInfo->rbReturnType, Type, fnInfo->returnType); + fnInfo->ffiReturnType = fnInfo->returnType->ffiType; + +#if defined(X86_WIN32) + rbConventionStr = (rbConvention != Qnil) ? rb_funcall2(rbConvention, rb_intern("to_s"), 0, NULL) : Qnil; + fnInfo->abi = (rbConventionStr != Qnil && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + fnInfo->abi = FFI_DEFAULT_ABI; +#endif + + status = ffi_prep_cif(&fnInfo->ffi_cif, fnInfo->abi, fnInfo->parameterCount, + fnInfo->ffiReturnType, fnInfo->ffiParameterTypes); + switch (status) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + fnInfo->invoke = rbffi_GetInvoker(fnInfo); + + return self; +} + +/* + * call-seq: result_type + * @return [Type] + * Get the return type of the function type + */ +static VALUE +fntype_result_type(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return ft->rbReturnType; +} + +/* + * call-seq: param_types + * @return [Array] + * Get parameters types. + */ +static VALUE +fntype_param_types(VALUE self) +{ + FunctionType* ft; + + Data_Get_Struct(self, FunctionType, ft); + + return rb_ary_dup(ft->rbParameterTypes); +} + +void +rbffi_FunctionInfo_Init(VALUE moduleFFI) +{ + VALUE ffi_Type; + + ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::FunctionType < FFI::Type + */ + rbffi_FunctionTypeClass = rb_define_class_under(moduleFFI, "FunctionType",ffi_Type); + rb_global_variable(&rbffi_FunctionTypeClass); + /* + * Document-const: FFI::CallbackInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "CallbackInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::FunctionInfo = FFI::FunctionType + */ + rb_define_const(moduleFFI, "FunctionInfo", rbffi_FunctionTypeClass); + /* + * Document-const: FFI::Type::Function = FFI::FunctionType + */ + rb_define_const(ffi_Type, "Function", rbffi_FunctionTypeClass); + + rb_define_alloc_func(rbffi_FunctionTypeClass, fntype_allocate); + rb_define_method(rbffi_FunctionTypeClass, "initialize", fntype_initialize, -1); + rb_define_method(rbffi_FunctionTypeClass, "result_type", fntype_result_type, 0); + rb_define_method(rbffi_FunctionTypeClass, "param_types", fntype_param_types, 0); + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.o new file mode 100644 index 0000000..7c8af5f Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/FunctionInfo.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.c new file mode 100644 index 0000000..6beecef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Aman Gupta + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +#include +#include +#include +#include +#include +#include + +#include "LastError.h" + +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +# define USE_PTHREAD_LOCAL +#endif + +#if defined(__CYGWIN__) +typedef uint32_t DWORD; +DWORD __stdcall GetLastError(void); +void __stdcall SetLastError(DWORD); +#endif + +typedef struct ThreadData { + int td_errno; +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD td_winapi_errno; +#endif +} ThreadData; + +#if defined(USE_PTHREAD_LOCAL) +static pthread_key_t threadDataKey; +#endif + +static inline ThreadData* thread_data_get(void); + +#if defined(USE_PTHREAD_LOCAL) + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td = xcalloc(1, sizeof(ThreadData)); + + pthread_setspecific(threadDataKey, td); + + return td; +} + + +static inline ThreadData* +thread_data_get(void) +{ + ThreadData* td = pthread_getspecific(threadDataKey); + return td != NULL ? td : thread_data_init(); +} + +static void +thread_data_free(void *ptr) +{ + xfree(ptr); +} + +#else +static ID id_thread_data; + +static ThreadData* +thread_data_init(void) +{ + ThreadData* td; + VALUE obj; + + obj = Data_Make_Struct(rb_cObject, ThreadData, NULL, -1, td); + rb_thread_local_aset(rb_thread_current(), id_thread_data, obj); + + return td; +} + +static inline ThreadData* +thread_data_get() +{ + VALUE obj = rb_thread_local_aref(rb_thread_current(), id_thread_data); + + if (obj != Qnil && TYPE(obj) == T_DATA) { + return (ThreadData *) DATA_PTR(obj); + } + + return thread_data_init(); +} + +#endif + + +/* + * call-seq: error + * @return [Numeric] + * Get +errno+ value. + */ +static VALUE +get_last_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_errno); +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: winapi_error + * @return [Numeric] + * Get +GetLastError()+ value. Only Windows or Cygwin. + */ +static VALUE +get_last_winapi_error(VALUE self) +{ + return INT2NUM(thread_data_get()->td_winapi_errno); +} +#endif + + +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +errno+ value. + */ +static VALUE +set_last_error(VALUE self, VALUE error) +{ + +#ifdef _WIN32 + SetLastError(NUM2INT(error)); +#else + errno = NUM2INT(error); +#endif + + return Qnil; +} + +#if defined(_WIN32) || defined(__CYGWIN__) +/* + * call-seq: error(error) + * @param [Numeric] error + * @return [nil] + * Set +GetLastError()+ value. Only on Windows and Cygwin. + */ +static VALUE +set_last_winapi_error(VALUE self, VALUE error) +{ + SetLastError(NUM2INT(error)); + return Qnil; +} +#endif + + +void +rbffi_save_errno(void) +{ + int error = 0; +#ifdef _WIN32 + error = GetLastError(); +#else + error = errno; +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) + DWORD winapi_error = GetLastError(); + thread_data_get()->td_winapi_errno = winapi_error; +#endif + + thread_data_get()->td_errno = error; +} + +void +rbffi_LastError_Init(VALUE moduleFFI) +{ + /* + * Document-module: FFI::LastError + * This module defines a couple of method to set and get +errno+ + * for current thread. + */ + VALUE moduleError = rb_define_module_under(moduleFFI, "LastError"); + + rb_define_module_function(moduleError, "error", get_last_error, 0); + rb_define_module_function(moduleError, "error=", set_last_error, 1); + +#if defined(_WIN32) || defined(__CYGWIN__) + rb_define_module_function(moduleError, "winapi_error", get_last_winapi_error, 0); + rb_define_module_function(moduleError, "winapi_error=", set_last_winapi_error, 1); +#endif + +#if defined(USE_PTHREAD_LOCAL) + pthread_key_create(&threadDataKey, thread_data_free); +#else + id_thread_data = rb_intern("ffi_thread_local_data"); +#endif /* USE_PTHREAD_LOCAL */ +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.h new file mode 100644 index 0000000..ee1dfbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LASTERROR_H +#define RBFFI_LASTERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + + +void rbffi_LastError_Init(VALUE moduleFFI); + +void rbffi_save_errno(void); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LASTERROR_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.o new file mode 100644 index 0000000..3ff3f46 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LastError.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.c new file mode 100644 index 0000000..c95f2fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.c @@ -0,0 +1,65 @@ +#include "LongDouble.h" +#include +#include +#include + +#if defined (__CYGWIN__) || defined(__INTERIX) || defined(_MSC_VER) +# define strtold(str, endptr) ((long double) strtod((str), (endptr))) +#endif /* defined (__CYGWIN__) */ + +static VALUE rb_cBigDecimal = Qnil; +static VALUE bigdecimal_load(VALUE unused); +static VALUE bigdecimal_failed(VALUE value, VALUE exc); + +VALUE +rbffi_longdouble_new(long double ld) +{ + if (!RTEST(rb_cBigDecimal)) { + /* allow fallback if the bigdecimal library is unavailable in future ruby versions */ + rb_cBigDecimal = rb_rescue(bigdecimal_load, Qnil, bigdecimal_failed, rb_cObject); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject) { + char buf[128]; + return rb_funcall(rb_mKernel, rb_intern("BigDecimal"), 1, rb_str_new(buf, sprintf(buf, "%.35Le", ld))); + } + + /* Fall through to handling as a float */ + return rb_float_new(ld); +} + +long double +rbffi_num2longdouble(VALUE value) +{ + if (TYPE(value) == T_FLOAT) { + return rb_num2dbl(value); + } + + if (!RTEST(rb_cBigDecimal) && rb_const_defined(rb_cObject, rb_intern("BigDecimal"))) { + rb_cBigDecimal = rb_const_get(rb_cObject, rb_intern("BigDecimal")); + } + + if (RTEST(rb_cBigDecimal) && rb_cBigDecimal != rb_cObject && RTEST(rb_obj_is_kind_of(value, rb_cBigDecimal))) { + VALUE s = rb_funcall(value, rb_intern("to_s"), 1, rb_str_new2("E")); + long double ret = strtold(RSTRING_PTR(s), NULL); + RB_GC_GUARD(s); + return ret; + } + + /* Fall through to handling as a float */ + return rb_num2dbl(value); +} + + +static VALUE +bigdecimal_load(VALUE unused) +{ + rb_require("bigdecimal"); + return rb_const_get(rb_cObject, rb_intern("BigDecimal")); +} + +static VALUE +bigdecimal_failed(VALUE value, VALUE exc) +{ + return value; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.h new file mode 100644 index 0000000..079e890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2012, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_LONGDOUBLE_H +#define RBFFI_LONGDOUBLE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern VALUE rbffi_longdouble_new(long double ld); +extern long double rbffi_num2longdouble(VALUE value); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_LONGDOUBLE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.o new file mode 100644 index 0000000..20b7888 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/LongDouble.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Makefile new file mode 100644 index 0000000..c951e29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Makefile @@ -0,0 +1,270 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 +hdrdir = $(topdir) +arch_hdrdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/Users/arthur/.rvm/rubies/ruby-3.0.0 +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20220301-14832-l5yuww +sitelibdir = $(DESTDIR)./.gem.20220301-14832-l5yuww +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(SDKROOT)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc -fdeclspec +CXX = g++ -fdeclspec +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = extconf.h +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 +debugflags = -ggdb3 +warnflags = +cppflags = +CCDLFLAGS = -fno-common +CFLAGS = $(CCDLFLAGS) -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) -I/Library/Developer/CommandLineTools/SDKs/MacOSX11.sdk/usr/include/ffi +DEFS = +CPPFLAGS = -DRUBY_EXTCONF_H=\"$(RUBY_EXTCONF_H)\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib -pthread +dldflags = -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup -Wl,-multiply_defined,suppress +ARCH_FLAG = -m64 +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.3.0 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = arm64-darwin21 +sitearch = $(arch) +ruby_version = 3.0.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h $(RUBY_EXTCONF_H) + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = /opt/homebrew/opt/coreutils/bin/gmkdir -p +INSTALL = /opt/homebrew/opt/coreutils/bin/ginstall -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) -lffi +ORIG_SRCS = AbstractMemory.c ArrayType.c Buffer.c Call.c ClosurePool.c DynamicLibrary.c Function.c FunctionInfo.c LastError.c LongDouble.c MappedType.c MemoryPointer.c MethodHandle.c Platform.c Pointer.c Struct.c StructByValue.c StructLayout.c Thread.c Type.c Types.c Variadic.c ffi.c +SRCS = $(ORIG_SRCS) +OBJS = AbstractMemory.o ArrayType.o Buffer.o Call.o ClosurePool.o DynamicLibrary.o Function.o FunctionInfo.o LastError.o LongDouble.o MappedType.o MemoryPointer.o MethodHandle.o Platform.o Pointer.o Struct.o StructByValue.o StructLayout.o Thread.o Type.o Types.o Variadic.o ffi.o +HDRS = $(srcdir)/AbstractMemory.h $(srcdir)/ArrayType.h $(srcdir)/Call.h $(srcdir)/ClosurePool.h $(srcdir)/DynamicLibrary.h $(srcdir)/Function.h $(srcdir)/LastError.h $(srcdir)/LongDouble.h $(srcdir)/MappedType.h $(srcdir)/MemoryPointer.h $(srcdir)/MethodHandle.h $(srcdir)/Platform.h $(srcdir)/Pointer.h $(srcdir)/Struct.h $(srcdir)/StructByValue.h $(srcdir)/Thread.h $(srcdir)/Type.h $(srcdir)/Types.h $(srcdir)/compat.h $(srcdir)/extconf.h $(srcdir)/rbffi.h $(srcdir)/rbffi_endian.h +LOCAL_HDRS = +TARGET = ffi_c +TARGET_NAME = ffi_c +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object $(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +$(OBJS): $(HDRS) $(ruby_headers) +LIBFFI_HOST=--host= +LIBFFI_HOST=--host=aarch64-apple-darwin21 +include ${srcdir}/libffi.darwin.mk diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.c new file mode 100644 index 0000000..d1a4189 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include "rbffi.h" + +#include "Type.h" +#include "MappedType.h" + + +static VALUE mapped_allocate(VALUE); +static VALUE mapped_initialize(VALUE, VALUE); +static void mapped_mark(MappedType *); +static ID id_native_type, id_to_native, id_from_native; + +VALUE rbffi_MappedTypeClass = Qnil; + +static VALUE +mapped_allocate(VALUE klass) +{ + MappedType* m; + + VALUE obj = Data_Make_Struct(klass, MappedType, mapped_mark, -1, m); + + m->rbConverter = Qnil; + m->rbType = Qnil; + m->type = NULL; + m->base.nativeType = NATIVE_MAPPED; + m->base.ffiType = &ffi_type_void; + + return obj; +} + +/* + * call-seq: initialize(converter) + * @param [#native_type, #to_native, #from_native] converter +converter+ must respond to + * all these methods + * @return [self] + */ +static VALUE +mapped_initialize(VALUE self, VALUE rbConverter) +{ + MappedType* m = NULL; + + if (!rb_respond_to(rbConverter, id_native_type)) { + rb_raise(rb_eNoMethodError, "native_type method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_to_native)) { + rb_raise(rb_eNoMethodError, "to_native method not implemented"); + } + + if (!rb_respond_to(rbConverter, id_from_native)) { + rb_raise(rb_eNoMethodError, "from_native method not implemented"); + } + + Data_Get_Struct(self, MappedType, m); + m->rbType = rb_funcall2(rbConverter, id_native_type, 0, NULL); + if (!(rb_obj_is_kind_of(m->rbType, rbffi_TypeClass))) { + rb_raise(rb_eTypeError, "native_type did not return instance of FFI::Type"); + } + + m->rbConverter = rbConverter; + Data_Get_Struct(m->rbType, Type, m->type); + m->base.ffiType = m->type->ffiType; + + return self; +} + +static void +mapped_mark(MappedType* m) +{ + rb_gc_mark(m->rbType); + rb_gc_mark(m->rbConverter); +} + +/* + * call-seq: mapped_type.native_type + * @return [Type] + * Get native type of mapped type. + */ +static VALUE +mapped_native_type(VALUE self) +{ + MappedType*m = NULL; + Data_Get_Struct(self, MappedType, m); + + return m->rbType; +} + +/* + * call-seq: mapped_type.to_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_to_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_to_native, argc, argv); +} + +/* + * call-seq: mapped_type.from_native(*args) + * @param args depends on {FFI::DataConverter} used to initialize +self+ + */ +static VALUE +mapped_from_native(int argc, VALUE* argv, VALUE self) +{ + MappedType*m = NULL; + + Data_Get_Struct(self, MappedType, m); + + return rb_funcall2(m->rbConverter, id_from_native, argc, argv); +} + +void +rbffi_MappedType_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type::Mapped < FFI::Type + */ + rbffi_MappedTypeClass = rb_define_class_under(rbffi_TypeClass, "Mapped", rbffi_TypeClass); + + rb_global_variable(&rbffi_MappedTypeClass); + + id_native_type = rb_intern("native_type"); + id_to_native = rb_intern("to_native"); + id_from_native = rb_intern("from_native"); + + rb_define_alloc_func(rbffi_MappedTypeClass, mapped_allocate); + rb_define_method(rbffi_MappedTypeClass, "initialize", mapped_initialize, 1); + rb_define_method(rbffi_MappedTypeClass, "type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "native_type", mapped_native_type, 0); + rb_define_method(rbffi_MappedTypeClass, "to_native", mapped_to_native, -1); + rb_define_method(rbffi_MappedTypeClass, "from_native", mapped_from_native, -1); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.h new file mode 100644 index 0000000..4b26cc1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2010, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MAPPEDTYPE_H +#define RBFFI_MAPPEDTYPE_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef struct MappedType_ { + Type base; + Type* type; + VALUE rbConverter; + VALUE rbType; + +} MappedType; + +void rbffi_MappedType_Init(VALUE moduleFFI); + +extern VALUE rbffi_MappedTypeClass; + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MAPPEDTYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.o new file mode 100644 index 0000000..9635388 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MappedType.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.c new file mode 100644 index 0000000..1a64f2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" + + +static VALUE memptr_allocate(VALUE klass); +static void memptr_release(Pointer* ptr); +static VALUE memptr_malloc(VALUE self, long size, long count, bool clear); +static VALUE memptr_free(VALUE self); + +VALUE rbffi_MemoryPointerClass; + +#define MEMPTR(obj) ((MemoryPointer *) rbffi_AbstractMemory_Cast(obj, rbffi_MemoryPointerClass)) + +VALUE +rbffi_MemoryPointer_NewInstance(long size, long count, bool clear) +{ + return memptr_malloc(memptr_allocate(rbffi_MemoryPointerClass), size, count, clear); +} + +static VALUE +memptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj = Data_Make_Struct(klass, Pointer, NULL, memptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * call-seq: initialize(size, count=1, clear=true) + * @param [Fixnum, Bignum, Symbol, FFI::Type] size size of a memory cell (in bytes, or type whom size will be used) + * @param [Numeric] count number of cells in memory + * @param [Boolean] clear set memory to all-zero if +true+ + * @return [self] + * A new instance of FFI::MemoryPointer. + */ +static VALUE +memptr_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE size = Qnil, count = Qnil, clear = Qnil; + int nargs = rb_scan_args(argc, argv, "12", &size, &count, &clear); + + memptr_malloc(self, rbffi_type_size(size), nargs > 1 ? NUM2LONG(count) : 1, + RTEST(clear) || clear == Qnil); + + if (rb_block_given_p()) { + return rb_ensure(rb_yield, self, memptr_free, self); + } + + return self; +} + +static VALUE +memptr_malloc(VALUE self, long size, long count, bool clear) +{ + Pointer* p; + unsigned long msize; + + Data_Get_Struct(self, Pointer, p); + + msize = size * count; + + p->storage = xmalloc(msize + 7); + if (p->storage == NULL) { + rb_raise(rb_eNoMemError, "Failed to allocate memory size=%ld bytes", msize); + return Qnil; + } + p->autorelease = true; + p->memory.typeSize = (int) size; + p->memory.size = msize; + /* ensure the memory is aligned on at least a 8 byte boundary */ + p->memory.address = (char *) (((uintptr_t) p->storage + 0x7) & (uintptr_t) ~0x7ULL); + p->allocated = true; + + if (clear && p->memory.size > 0) { + memset(p->memory.address, 0, p->memory.size); + } + + return self; +} + +static VALUE +memptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + } + + return self; +} + +static void +memptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +/* + * call-seq: from_string(s) + * @param [String] s string + * @return [MemoryPointer] + * Create a {MemoryPointer} with +s+ inside. + */ +static VALUE +memptr_s_from_string(VALUE klass, VALUE to_str) +{ + VALUE s = StringValue(to_str); + VALUE args[] = { INT2FIX(1), LONG2NUM(RSTRING_LEN(s) + 1), Qfalse }; + VALUE obj = rb_class_new_instance(3, args, klass); + rb_funcall(obj, rb_intern("put_string"), 2, INT2FIX(0), s); + + return obj; +} + +void +rbffi_MemoryPointer_Init(VALUE moduleFFI) +{ + VALUE ffi_Pointer; + + ffi_Pointer = rbffi_PointerClass; + + /* + * Document-class: FFI::MemoryPointer < FFI::Pointer + * A MemoryPointer is a specific {Pointer}. It points to a memory composed of cells. All cells have the + * same size. + * + * @example Create a new MemoryPointer + * mp = FFI::MemoryPointer.new(:long, 16) # Create a pointer on a memory of 16 long ints. + * @example Create a new MemoryPointer from a String + * mp1 = FFI::MemoryPointer.from_string("this is a string") + * # same as: + * mp2 = FFI::MemoryPointer.new(:char,16) + * mp2.put_string("this is a string") + */ + rbffi_MemoryPointerClass = rb_define_class_under(moduleFFI, "MemoryPointer", ffi_Pointer); + rb_global_variable(&rbffi_MemoryPointerClass); + + rb_define_alloc_func(rbffi_MemoryPointerClass, memptr_allocate); + rb_define_method(rbffi_MemoryPointerClass, "initialize", memptr_initialize, -1); + rb_define_singleton_method(rbffi_MemoryPointerClass, "from_string", memptr_s_from_string, 1); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.h new file mode 100644 index 0000000..8106030 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2008, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_MEMORYPOINTER_H +#define RBFFI_MEMORYPOINTER_H + +# include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_MemoryPointer_Init(VALUE moduleFFI); + extern VALUE rbffi_MemoryPointerClass; + extern VALUE rbffi_MemoryPointer_NewInstance(long size, long count, bool clear); +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_MEMORYPOINTER_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.o new file mode 100644 index 0000000..a0fb74a Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MemoryPointer.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.c new file mode 100644 index 0000000..d047e10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2009, 2010 Wayne Meissner + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#ifndef _WIN32 +# include +#endif +#include +#include +#include +#ifndef _WIN32 +# include +#endif +#include +#include +#if defined(HAVE_NATIVETHREAD) && !defined(_WIN32) && !defined(__WIN32__) +# include +#endif + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Function.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "Call.h" +#include "ClosurePool.h" +#include "MethodHandle.h" + + +#define MAX_METHOD_FIXED_ARITY (6) + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef USE_RAW +# define METHOD_CLOSURE ffi_raw_closure +# define METHOD_PARAMS ffi_raw* +#else +# define METHOD_CLOSURE ffi_closure +# define METHOD_PARAMS void** +#endif + + + +static bool prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize); +static long trampoline_size(void); + +#if defined(__x86_64__) && (defined(__linux__) || defined(__APPLE__)) +# define CUSTOM_TRAMPOLINE 1 +#endif + + +struct MethodHandle { + Closure* closure; +}; + +static ClosurePool* defaultClosurePool; + + +MethodHandle* +rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function) +{ + MethodHandle* handle; + Closure* closure = rbffi_Closure_Alloc(defaultClosurePool); + if (closure == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate closure from pool"); + return NULL; + } + + handle = xcalloc(1, sizeof(*handle)); + handle->closure = closure; + closure->info = fnInfo; + closure->function = function; + + return handle; +} + +void +rbffi_MethodHandle_Free(MethodHandle* handle) +{ + if (handle != NULL) { + rbffi_Closure_Free(handle->closure); + xfree(handle); + } +} + +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle) +{ + return (rbffi_function_anyargs) handle->closure->code; +} + +#ifndef CUSTOM_TRAMPOLINE +static void attached_method_invoke(ffi_cif* cif, void* retval, METHOD_PARAMS parameters, void* user_data); + +static ffi_type* methodHandleParamTypes[3]; + +static ffi_cif mh_cif; + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + ffi_status ffiStatus; + +#if defined(USE_RAW) + ffiStatus = ffi_prep_raw_closure(code, &mh_cif, attached_method_invoke, closure); +#else + ffiStatus = ffi_prep_closure_loc(closure->pcl, &mh_cif, attached_method_invoke, closure, code); +#endif + if (ffiStatus != FFI_OK) { + snprintf(errmsg, errmsgsize, "ffi_prep_closure_loc failed. status=%#x", ffiStatus); + return false; + } + + return true; +} + + +static long +trampoline_size(void) +{ + return sizeof(METHOD_CLOSURE); +} + +/* + * attached_method_invoke is used functions with more than 6 parameters, or + * with struct param or return values + */ +static void +attached_method_invoke(ffi_cif* cif, void* mretval, METHOD_PARAMS parameters, void* user_data) +{ + Closure* handle = (Closure *) user_data; + FunctionType* fnInfo = (FunctionType *) handle->info; + +#ifdef USE_RAW + int argc = parameters[0].sint; + VALUE* argv = *(VALUE **) ¶meters[1]; +#else + int argc = *(int *) parameters[0]; + VALUE* argv = *(VALUE **) parameters[1]; +#endif + + *(VALUE *) mretval = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); +} + +#endif + + + +#if defined(CUSTOM_TRAMPOLINE) +#if defined(__x86_64__) + +static VALUE custom_trampoline(int argc, VALUE* argv, VALUE self, Closure*); + +#define TRAMPOLINE_CTX_MAGIC (0xfee1deadcafebabe) +#define TRAMPOLINE_FUN_MAGIC (0xfeedfacebeeff00d) + +/* + * This is a hand-coded trampoline to speedup entry from ruby to the FFI translation + * layer for x86_64 arches. + * + * Since a ruby function has exactly 3 arguments, and the first 6 arguments are + * passed in registers for x86_64, we can tack on a context pointer by simply + * putting a value in %rcx, then jumping to the C trampoline code. + * + * This results in approx a 30% speedup for x86_64 FFI dispatch + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "movabsq $0xfee1deadcafebabe, %rcx\n\t" + "movabsq $0xfeedfacebeeff00d, %r11\n\t" + "jmpq *%r11\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(int argc, VALUE* argv, VALUE self, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + VALUE rbReturnValue; + + RB_GC_GUARD(rbReturnValue) = (*fnInfo->invoke)(argc, argv, handle->function, fnInfo); + RB_GC_GUARD(self); + + return rbReturnValue; +} + +#elif defined(__i386__) && 0 + +static VALUE custom_trampoline(void *args, Closure*); +#define TRAMPOLINE_CTX_MAGIC (0xfee1dead) +#define TRAMPOLINE_FUN_MAGIC (0xbeefcafe) + +/* + * This is a hand-coded trampoline to speed-up entry from ruby to the FFI translation + * layer for i386 arches. + * + * This does not make a discernible difference vs a raw closure, so for now, + * it is not enabled. + */ +__asm__( + ".text\n\t" + ".globl ffi_trampoline\n\t" + ".globl _ffi_trampoline\n\t" + "ffi_trampoline:\n\t" + "_ffi_trampoline:\n\t" + "subl $12, %esp\n\t" + "leal 16(%esp), %eax\n\t" + "movl %eax, (%esp)\n\t" + "movl $0xfee1dead, 4(%esp)\n\t" + "movl $0xbeefcafe, %eax\n\t" + "call *%eax\n\t" + "addl $12, %esp\n\t" + "ret\n\t" + ".globl ffi_trampoline_end\n\t" + "ffi_trampoline_end:\n\t" + ".globl _ffi_trampoline_end\n\t" + "_ffi_trampoline_end:\n\t" +); + +static VALUE +custom_trampoline(void *args, Closure* handle) +{ + FunctionType* fnInfo = (FunctionType *) handle->info; + return (*fnInfo->invoke)(*(int *) args, *(VALUE **) (args + 4), handle->function, fnInfo); +} + +#endif /* __x86_64__ else __i386__ */ + +extern void ffi_trampoline(int argc, VALUE* argv, VALUE self); +extern void ffi_trampoline_end(void); +static int trampoline_offsets(long *, long *); + +static long trampoline_ctx_offset, trampoline_func_offset; + +static long +trampoline_offset(int off, const long value) +{ + char *ptr; + for (ptr = (char *) &ffi_trampoline + off; ptr < (char *) &ffi_trampoline_end; ++ptr) { + if (*(long *) ptr == value) { + return ptr - (char *) &ffi_trampoline; + } + } + + return -1; +} + +static int +trampoline_offsets(long* ctxOffset, long* fnOffset) +{ + *ctxOffset = trampoline_offset(0, TRAMPOLINE_CTX_MAGIC); + if (*ctxOffset == -1) { + return -1; + } + + *fnOffset = trampoline_offset(0, TRAMPOLINE_FUN_MAGIC); + if (*fnOffset == -1) { + return -1; + } + + return 0; +} + +static bool +prep_trampoline(void* ctx, void* code, Closure* closure, char* errmsg, size_t errmsgsize) +{ + memcpy(code, (void*) &ffi_trampoline, trampoline_size()); + /* Patch the context and function addresses into the stub code */ + *(intptr_t *)((char*)code + trampoline_ctx_offset) = (intptr_t) closure; + *(intptr_t *)((char*)code + trampoline_func_offset) = (intptr_t) custom_trampoline; + + return true; +} + +static long +trampoline_size(void) +{ + return (char *) &ffi_trampoline_end - (char *) &ffi_trampoline; +} + +#endif /* CUSTOM_TRAMPOLINE */ + + +void +rbffi_MethodHandle_Init(VALUE module) +{ +#ifndef CUSTOM_TRAMPOLINE + ffi_status ffiStatus; +#endif + + defaultClosurePool = rbffi_ClosurePool_New((int) trampoline_size(), prep_trampoline, NULL); + +#if defined(CUSTOM_TRAMPOLINE) + if (trampoline_offsets(&trampoline_ctx_offset, &trampoline_func_offset) != 0) { + rb_raise(rb_eFatal, "Could not locate offsets in trampoline code"); + } +#else + methodHandleParamTypes[0] = &ffi_type_sint; + methodHandleParamTypes[1] = &ffi_type_pointer; + methodHandleParamTypes[2] = &ffi_type_ulong; + + ffiStatus = ffi_prep_cif(&mh_cif, FFI_DEFAULT_ABI, 3, &ffi_type_ulong, + methodHandleParamTypes); + if (ffiStatus != FFI_OK) { + rb_raise(rb_eFatal, "ffi_prep_cif failed. status=%#x", ffiStatus); + } + +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.h new file mode 100644 index 0000000..0dcc058 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_METHODHANDLE_H +#define RBFFI_METHODHANDLE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "Function.h" + + +typedef struct MethodHandlePool MethodHandlePool; +typedef struct MethodHandle MethodHandle; +typedef VALUE (*rbffi_function_anyargs)(int argc, VALUE* argv, VALUE self); + + +MethodHandle* rbffi_MethodHandle_Alloc(FunctionType* fnInfo, void* function); +void rbffi_MethodHandle_Free(MethodHandle* handle); +rbffi_function_anyargs rbffi_MethodHandle_CodeAddress(MethodHandle* handle); +void rbffi_MethodHandle_Init(VALUE module); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_METHODHANDLE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.o new file mode 100644 index 0000000..f9df8d5 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/MethodHandle.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.c new file mode 100644 index 0000000..57f3219 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif +# include +#include +#include +#include +#include +#include "rbffi_endian.h" +#include "Platform.h" + +#if defined(__GNU__) || (defined(__GLIBC__) && !defined(__UCLIBC__)) +# include +#endif + +static VALUE PlatformModule = Qnil; + +static void +export_primitive_types(VALUE module) +{ +#define S(name, T) do { \ + typedef struct { char c; T v; } s; \ + rb_define_const(module, #name "_ALIGN", INT2NUM((sizeof(s) - sizeof(T)) * 8)); \ + rb_define_const(module, #name "_SIZE", INT2NUM(sizeof(T)* 8)); \ +} while(0) + S(INT8, char); + S(INT16, short); + S(INT32, int); + S(INT64, long long); + S(LONG, long); + S(FLOAT, float); + S(DOUBLE, double); + S(LONG_DOUBLE, long double); + S(ADDRESS, void*); +#undef S +} + +void +rbffi_Platform_Init(VALUE moduleFFI) +{ + PlatformModule = rb_define_module_under(moduleFFI, "Platform"); + rb_define_const(PlatformModule, "BYTE_ORDER", INT2FIX(BYTE_ORDER)); + rb_define_const(PlatformModule, "LITTLE_ENDIAN", INT2FIX(LITTLE_ENDIAN)); + rb_define_const(PlatformModule, "BIG_ENDIAN", INT2FIX(BIG_ENDIAN)); +#if defined(__GNU__) || (defined(__GLIBC__) && !defined(__UCLIBC__)) + rb_define_const(PlatformModule, "GNU_LIBC", rb_str_new2(LIBC_SO)); +#endif + export_primitive_types(PlatformModule); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.h new file mode 100644 index 0000000..5575e34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_PLATFORM_H +#define RBFFI_PLATFORM_H + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Platform_Init(VALUE moduleFFI); + + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_PLATFORM_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.o new file mode 100644 index 0000000..0351d67 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Platform.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.c new file mode 100644 index 0000000..153fff1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.c @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "rbffi.h" +#include "rbffi_endian.h" +#include "AbstractMemory.h" +#include "Pointer.h" + +#define POINTER(obj) rbffi_AbstractMemory_Cast((obj), rbffi_PointerClass) + +VALUE rbffi_PointerClass = Qnil; +VALUE rbffi_NullPointerSingleton = Qnil; + +static void ptr_release(Pointer* ptr); +static void ptr_mark(Pointer* ptr); + +VALUE +rbffi_Pointer_NewInstance(void* addr) +{ + Pointer* p; + VALUE obj; + + if (addr == NULL) { + return rbffi_NullPointerSingleton; + } + + obj = Data_Make_Struct(rbffi_PointerClass, Pointer, NULL, -1, p); + p->memory.address = addr; + p->memory.size = LONG_MAX; + p->memory.flags = (addr == NULL) ? 0 : (MEM_RD | MEM_WR); + p->memory.typeSize = 1; + p->rbParent = Qnil; + + return obj; +} + +static VALUE +ptr_allocate(VALUE klass) +{ + Pointer* p; + VALUE obj; + + obj = Data_Make_Struct(klass, Pointer, ptr_mark, ptr_release, p); + p->rbParent = Qnil; + p->memory.flags = MEM_RD | MEM_WR; + + return obj; +} + +/* + * @overload initialize(pointer) + * @param [Pointer] pointer another pointer to initialize from + * Create a new pointer from another {Pointer}. + * @overload initialize(type, address) + * @param [Type] type type for pointer + * @param [Integer] address base address for pointer + * Create a new pointer from a {Type} and a base address + * @return [self] + * A new instance of Pointer. + */ +static VALUE +ptr_initialize(int argc, VALUE* argv, VALUE self) +{ + Pointer* p; + VALUE rbType = Qnil, rbAddress = Qnil; + int typeSize = 1; + + Data_Get_Struct(self, Pointer, p); + + switch (rb_scan_args(argc, argv, "11", &rbType, &rbAddress)) { + case 1: + rbAddress = rbType; + typeSize = 1; + break; + case 2: + typeSize = rbffi_type_size(rbType); + break; + default: + rb_raise(rb_eArgError, "Invalid arguments"); + } + + switch (TYPE(rbAddress)) { + case T_FIXNUM: + case T_BIGNUM: + p->memory.address = (void*) (uintptr_t) NUM2LL(rbAddress); + p->memory.size = LONG_MAX; + if (p->memory.address == NULL) { + p->memory.flags = 0; + } + break; + + default: + if (rb_obj_is_kind_of(rbAddress, rbffi_PointerClass)) { + Pointer* orig; + + p->rbParent = rbAddress; + Data_Get_Struct(rbAddress, Pointer, orig); + p->memory = orig->memory; + } else { + rb_raise(rb_eTypeError, "wrong argument type, expected Integer or FFI::Pointer"); + } + break; + } + + p->memory.typeSize = typeSize; + + return self; +} + +/* + * call-seq: ptr.initialize_copy(other) + * @param [Pointer] other source for cloning or dupping + * @return [self] + * @raise {RuntimeError} if +other+ is an unbounded memory area, or is unreadable/unwritable + * @raise {NoMemError} if failed to allocate memory for new object + * DO NOT CALL THIS METHOD. + * + * This method is internally used by #dup and #clone. Memory content is copied from +other+. + */ +static VALUE +ptr_initialize_copy(VALUE self, VALUE other) +{ + AbstractMemory* src; + Pointer* dst; + + Data_Get_Struct(self, Pointer, dst); + src = POINTER(other); + if (src->size == LONG_MAX) { + rb_raise(rb_eRuntimeError, "cannot duplicate unbounded memory area"); + return Qnil; + } + + if ((dst->memory.flags & (MEM_RD | MEM_WR)) != (MEM_RD | MEM_WR)) { + rb_raise(rb_eRuntimeError, "cannot duplicate unreadable/unwritable memory area"); + return Qnil; + } + + if (dst->storage != NULL) { + xfree(dst->storage); + dst->storage = NULL; + } + + dst->storage = xmalloc(src->size + 7); + if (dst->storage == NULL) { + rb_raise(rb_eNoMemError, "failed to allocate memory size=%lu bytes", src->size); + return Qnil; + } + + dst->allocated = true; + dst->autorelease = true; + dst->memory.address = (void *) (((uintptr_t) dst->storage + 0x7) & (uintptr_t) ~0x7ULL); + dst->memory.size = src->size; + dst->memory.typeSize = src->typeSize; + + /* finally, copy the actual memory contents */ + memcpy(dst->memory.address, src->address, src->size); + + return self; +} + +static VALUE +slice(VALUE self, long offset, long size) +{ + AbstractMemory* ptr; + Pointer* p; + VALUE retval; + + Data_Get_Struct(self, AbstractMemory, ptr); + checkBounds(ptr, offset, size == LONG_MAX ? 1 : size); + + retval = Data_Make_Struct(rbffi_PointerClass, Pointer, ptr_mark, -1, p); + + p->memory.address = ptr->address + offset; + p->memory.size = size; + p->memory.flags = ptr->flags; + p->memory.typeSize = ptr->typeSize; + p->rbParent = self; + + return retval; +} + +/* + * Document-method: + + * call-seq: ptr + offset + * @param [Numeric] offset + * @return [Pointer] + * Return a new {Pointer} from an existing pointer and an +offset+. + */ +static VALUE +ptr_plus(VALUE self, VALUE offset) +{ + AbstractMemory* ptr; + long off = NUM2LONG(offset); + + Data_Get_Struct(self, AbstractMemory, ptr); + + return slice(self, off, ptr->size == LONG_MAX ? LONG_MAX : ptr->size - off); +} + +/* + * call-seq: ptr.slice(offset, length) + * @param [Numeric] offset + * @param [Numeric] length + * @return [Pointer] + * Return a new {Pointer} from an existing one. This pointer points on same contents + * from +offset+ for a length +length+. + */ +static VALUE +ptr_slice(VALUE self, VALUE rbOffset, VALUE rbLength) +{ + return slice(self, NUM2LONG(rbOffset), NUM2LONG(rbLength)); +} + +/* + * call-seq: ptr.inspect + * @return [String] + * Inspect pointer object. + */ +static VALUE +ptr_inspect(VALUE self) +{ + char buf[100]; + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->memory.size != LONG_MAX) { + snprintf(buf, sizeof(buf), "#<%s address=%p size=%lu>", + rb_obj_classname(self), ptr->memory.address, ptr->memory.size); + } else { + snprintf(buf, sizeof(buf), "#<%s address=%p>", rb_obj_classname(self), ptr->memory.address); + } + + return rb_str_new2(buf); +} + +/* + * Document-method: null? + * call-seq: ptr.null? + * @return [Boolean] + * Return +true+ if +self+ is a {NULL} pointer. + */ +static VALUE +ptr_null_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->memory.address == NULL ? Qtrue : Qfalse; +} + +/* + * Document-method: == + * call-seq: ptr == other + * @param [Pointer] other + * Check equality between +self+ and +other+. Equality is tested on {#address}. + */ +static VALUE +ptr_equals(VALUE self, VALUE other) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (NIL_P(other)) { + return ptr->memory.address == NULL ? Qtrue : Qfalse; + } + + return ptr->memory.address == POINTER(other)->address ? Qtrue : Qfalse; +} + +/* + * call-seq: ptr.address + * @return [Numeric] pointer's base address + * Return +self+'s base address (alias: #to_i). + */ +static VALUE +ptr_address(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ULL2NUM((uintptr_t) ptr->memory.address); +} + +#if BYTE_ORDER == LITTLE_ENDIAN +# define SWAPPED_ORDER BIG_ENDIAN +#else +# define SWAPPED_ORDER LITTLE_ENDIAN +#endif + +/* + * Get or set +self+'s endianness + * @overload order + * @return [:big, :little] endianness of +self+ + * @overload order(order) + * @param [Symbol] order endianness to set (+:little+, +:big+ or +:network+). +:big+ and +:network+ + * are synonymous. + * @return a new pointer with the new order + */ +static VALUE +ptr_order(int argc, VALUE* argv, VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + if (argc == 0) { + int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; + return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); + } else { + VALUE rbOrder = Qnil; + int order = BYTE_ORDER; + + if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { + rb_raise(rb_eArgError, "need byte order"); + } + if (SYMBOL_P(rbOrder)) { + ID id = SYM2ID(rbOrder); + if (id == rb_intern("little")) { + order = LITTLE_ENDIAN; + + } else if (id == rb_intern("big") || id == rb_intern("network")) { + order = BIG_ENDIAN; + } else { + rb_raise(rb_eArgError, "unknown byte order"); + } + } + if (order != BYTE_ORDER) { + Pointer* p2; + VALUE retval = slice(self, 0, ptr->memory.size); + + Data_Get_Struct(retval, Pointer, p2); + p2->memory.flags |= MEM_SWAP; + return retval; + } + + return self; + } +} + + +/* + * call-seq: ptr.free + * @return [self] + * Free memory pointed by +self+. + */ +static VALUE +ptr_free(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + if (ptr->allocated) { + if (ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + ptr->allocated = false; + + } else { + VALUE caller = rb_funcall(rb_funcall(Qnil, rb_intern("caller"), 0), rb_intern("first"), 0); + + rb_warn("calling free on non allocated pointer %s from %s", RSTRING_PTR(ptr_inspect(self)), RSTRING_PTR(rb_str_to_str(caller))); + } + + return self; +} + +static VALUE +ptr_type_size(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return INT2NUM(ptr->memory.typeSize); +} + +/* + * call-seq: ptr.autorelease = autorelease + * @param [Boolean] autorelease + * @return [Boolean] +autorelease+ + * Set +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease(VALUE self, VALUE autorelease) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + ptr->autorelease = autorelease == Qtrue; + + return autorelease; +} + +/* + * call-seq: ptr.autorelease? + * @return [Boolean] + * Get +autorelease+ attribute. See also Autorelease section. + */ +static VALUE +ptr_autorelease_p(VALUE self) +{ + Pointer* ptr; + + Data_Get_Struct(self, Pointer, ptr); + + return ptr->autorelease ? Qtrue : Qfalse; +} + + +static void +ptr_release(Pointer* ptr) +{ + if (ptr->autorelease && ptr->allocated && ptr->storage != NULL) { + xfree(ptr->storage); + ptr->storage = NULL; + } + xfree(ptr); +} + +static void +ptr_mark(Pointer* ptr) +{ + rb_gc_mark(ptr->rbParent); +} + +void +rbffi_Pointer_Init(VALUE moduleFFI) +{ + VALUE rbNullAddress = ULL2NUM(0); + VALUE ffi_AbstractMemory = rbffi_AbstractMemoryClass; + + /* + * Document-class: FFI::Pointer < FFI::AbstractMemory + * Pointer class is used to manage C pointers with ease. A {Pointer} object is defined by his + * {#address} (as a C pointer). It permits additions with an integer for pointer arithmetic. + * + * == Autorelease + * By default a pointer object frees its content when it's garbage collected. + * Therefore it's usually not necessary to call {#free} explicit. + * This behaviour may be changed with {#autorelease=} method. + * If it's set to +false+, the memory isn't freed by the garbage collector, but stays valid until +free()+ is called on C level or when the process terminates. + */ + rbffi_PointerClass = rb_define_class_under(moduleFFI, "Pointer", ffi_AbstractMemory); + /* + * Document-variable: Pointer + */ + rb_global_variable(&rbffi_PointerClass); + + rb_define_alloc_func(rbffi_PointerClass, ptr_allocate); + rb_define_method(rbffi_PointerClass, "initialize", ptr_initialize, -1); + rb_define_method(rbffi_PointerClass, "initialize_copy", ptr_initialize_copy, 1); + rb_define_method(rbffi_PointerClass, "inspect", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "to_s", ptr_inspect, 0); + rb_define_method(rbffi_PointerClass, "+", ptr_plus, 1); + rb_define_method(rbffi_PointerClass, "slice", ptr_slice, 2); + rb_define_method(rbffi_PointerClass, "null?", ptr_null_p, 0); + rb_define_method(rbffi_PointerClass, "address", ptr_address, 0); + rb_define_alias(rbffi_PointerClass, "to_i", "address"); + rb_define_method(rbffi_PointerClass, "==", ptr_equals, 1); + rb_define_method(rbffi_PointerClass, "order", ptr_order, -1); + rb_define_method(rbffi_PointerClass, "autorelease=", ptr_autorelease, 1); + rb_define_method(rbffi_PointerClass, "autorelease?", ptr_autorelease_p, 0); + rb_define_method(rbffi_PointerClass, "free", ptr_free, 0); + rb_define_method(rbffi_PointerClass, "type_size", ptr_type_size, 0); + + rbffi_NullPointerSingleton = rb_class_new_instance(1, &rbNullAddress, rbffi_PointerClass); + /* + * NULL pointer + */ + rb_define_const(rbffi_PointerClass, "NULL", rbffi_NullPointerSingleton); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.h new file mode 100644 index 0000000..b3d6c85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_POINTER_H +#define RBFFI_POINTER_H + +# include + +#ifdef __cplusplus +extern "C" { +#endif + +#include "AbstractMemory.h" + +extern void rbffi_Pointer_Init(VALUE moduleFFI); +extern VALUE rbffi_Pointer_NewInstance(void* addr); +extern VALUE rbffi_PointerClass; +extern VALUE rbffi_NullPointerSingleton; + +typedef struct Pointer { + AbstractMemory memory; + VALUE rbParent; + char* storage; /* start of malloc area */ + bool autorelease; + bool allocated; +} Pointer; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_POINTER_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.o new file mode 100644 index 0000000..65b80b5 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Pointer.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.c new file mode 100644 index 0000000..92731c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.c @@ -0,0 +1,822 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifndef _MSC_VER +# include +#endif +#include +#include +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "Function.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "MappedType.h" +#include "Struct.h" + +typedef struct InlineArray_ { + VALUE rbMemory; + VALUE rbField; + + AbstractMemory* memory; + StructField* field; + MemoryOp *op; + Type* componentType; + ArrayType* arrayType; + int length; +} InlineArray; + + +static void struct_mark(Struct *); +static void struct_free(Struct *); +static VALUE struct_class_layout(VALUE klass); +static void struct_malloc(Struct* s); +static void inline_array_mark(InlineArray *); +static void store_reference_value(StructField* f, Struct* s, VALUE value); + +VALUE rbffi_StructClass = Qnil; + +VALUE rbffi_StructInlineArrayClass = Qnil; +VALUE rbffi_StructLayoutCharArrayClass = Qnil; + +static ID id_pointer_ivar = 0, id_layout_ivar = 0; +static ID id_get = 0, id_put = 0, id_to_ptr = 0, id_to_s = 0, id_layout = 0; + +static inline char* +memory_address(VALUE self) +{ + return ((AbstractMemory *)DATA_PTR((self)))->address; +} + +static VALUE +struct_allocate(VALUE klass) +{ + Struct* s; + VALUE obj = Data_Make_Struct(klass, Struct, struct_mark, struct_free, s); + + s->rbPointer = Qnil; + s->rbLayout = Qnil; + + return obj; +} + +/* + * call-seq: initialize + * @overload initialize(pointer, *args) + * @param [AbstractMemory] pointer + * @param [Array] args + * @return [self] + */ +static VALUE +struct_initialize(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + VALUE rbPointer = Qnil, rest = Qnil, klass = CLASS_OF(self); + int nargs; + + Data_Get_Struct(self, Struct, s); + + nargs = rb_scan_args(argc, argv, "01*", &rbPointer, &rest); + + /* Call up into ruby code to adjust the layout */ + if (nargs > 1) { + s->rbLayout = rb_funcall2(CLASS_OF(self), id_layout, (int) RARRAY_LEN(rest), RARRAY_PTR(rest)); + } else { + s->rbLayout = struct_class_layout(klass); + } + + if (!rb_obj_is_kind_of(s->rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "Invalid Struct layout"); + } + + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + + if (rbPointer != Qnil) { + s->pointer = MEMORY(rbPointer); + s->rbPointer = rbPointer; + } else { + struct_malloc(s); + } + + return self; +} + +/* + * call-seq: initialize_copy(other) + * @return [nil] + * DO NOT CALL THIS METHOD + */ +static VALUE +struct_initialize_copy(VALUE self, VALUE other) +{ + Struct* src; + Struct* dst; + + Data_Get_Struct(self, Struct, dst); + Data_Get_Struct(other, Struct, src); + if (dst == src) { + return self; + } + + dst->rbLayout = src->rbLayout; + dst->layout = src->layout; + + /* + * A new MemoryPointer instance is allocated here instead of just calling + * #dup on rbPointer, since the Pointer may not know its length, or may + * be longer than just this struct. + */ + if (src->pointer->address != NULL) { + dst->rbPointer = rbffi_MemoryPointer_NewInstance(1, src->layout->size, false); + dst->pointer = MEMORY(dst->rbPointer); + memcpy(dst->pointer->address, src->pointer->address, src->layout->size); + } else { + dst->rbPointer = src->rbPointer; + dst->pointer = src->pointer; + } + + if (src->layout->referenceFieldCount > 0) { + dst->rbReferences = ALLOC_N(VALUE, dst->layout->referenceFieldCount); + memcpy(dst->rbReferences, src->rbReferences, dst->layout->referenceFieldCount * sizeof(VALUE)); + } + + return self; +} + +static VALUE +struct_class_layout(VALUE klass) +{ + VALUE layout; + if (!rb_ivar_defined(klass, id_layout_ivar)) { + rb_raise(rb_eRuntimeError, "no Struct layout configured for %s", rb_class2name(klass)); + } + + layout = rb_ivar_get(klass, id_layout_ivar); + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eRuntimeError, "invalid Struct layout for %s", rb_class2name(klass)); + } + + return layout; +} + +static StructLayout* +struct_layout(VALUE self) +{ + Struct* s = (Struct *) DATA_PTR(self); + if (s->layout != NULL) { + return s->layout; + } + + if (s->layout == NULL) { + s->rbLayout = struct_class_layout(CLASS_OF(self)); + Data_Get_Struct(s->rbLayout, StructLayout, s->layout); + } + + return s->layout; +} + +static Struct* +struct_validate(VALUE self) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (struct_layout(self) == NULL) { + rb_raise(rb_eRuntimeError, "struct layout == null"); + } + + if (s->pointer == NULL) { + struct_malloc(s); + } + + return s; +} + +static void +struct_malloc(Struct* s) +{ + if (s->rbPointer == Qnil) { + s->rbPointer = rbffi_MemoryPointer_NewInstance(s->layout->size, 1, true); + + } else if (!rb_obj_is_kind_of(s->rbPointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eRuntimeError, "invalid pointer in struct"); + } + + s->pointer = (AbstractMemory *) DATA_PTR(s->rbPointer); +} + +static void +struct_mark(Struct *s) +{ + rb_gc_mark(s->rbPointer); + rb_gc_mark(s->rbLayout); + if (s->rbReferences != NULL) { + rb_gc_mark_locations(&s->rbReferences[0], &s->rbReferences[s->layout->referenceFieldCount]); + } +} + +static void +struct_free(Struct* s) +{ + xfree(s->rbReferences); + xfree(s); +} + + +static void +store_reference_value(StructField* f, Struct* s, VALUE value) +{ + if (unlikely(f->referenceIndex == -1)) { + rb_raise(rb_eRuntimeError, "put_reference_value called for non-reference type"); + return; + } + if (s->rbReferences == NULL) { + int i; + s->rbReferences = ALLOC_N(VALUE, s->layout->referenceFieldCount); + for (i = 0; i < s->layout->referenceFieldCount; ++i) { + s->rbReferences[i] = Qnil; + } + } + + s->rbReferences[f->referenceIndex] = value; +} + + +static StructField * +struct_field(Struct* s, VALUE fieldName) +{ + StructLayout* layout = s->layout; + struct field_cache_entry *p_ce = FIELD_CACHE_LOOKUP(layout, fieldName); + + /* Do a hash lookup only if cache entry is empty or fieldName is unexpected? */ + if (unlikely(!SYMBOL_P(fieldName) || !p_ce->fieldName || p_ce->fieldName != fieldName)) { + VALUE rbField = rb_hash_aref(layout->rbFieldMap, fieldName); + if (unlikely(NIL_P(rbField))) { + VALUE str = rb_funcall2(fieldName, id_to_s, 0, NULL); + rb_raise(rb_eArgError, "No such field '%s'", StringValueCStr(str)); + } + /* Write the retrieved coder to the cache */ + p_ce->fieldName = fieldName; + p_ce->field = (StructField *) DATA_PTR(rbField); + } + + return p_ce->field; +} + +/* + * call-seq: struct[field_name] + * @param field_name field to access + * Acces to a Struct field. + */ +static VALUE +struct_aref(VALUE self, VALUE fieldName) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->get != NULL) { + return (*f->get)(f, s); + + } else if (f->memoryOp != NULL) { + return (*f->memoryOp->get)(s->pointer, f->offset); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to fetch the value */ + return rb_funcall2(rbField, id_get, 1, &s->rbPointer); + } +} + +/* + * call-seq: []=(field_name, value) + * @param field_name field to access + * @param value value to set to +field_name+ + * @return [value] + * Set a field in Struct. + */ +static VALUE +struct_aset(VALUE self, VALUE fieldName, VALUE value) +{ + Struct* s; + StructField* f; + + s = struct_validate(self); + + f = struct_field(s, fieldName); + if (f->put != NULL) { + (*f->put)(f, s, value); + + } else if (f->memoryOp != NULL) { + + (*f->memoryOp->put)(s->pointer, f->offset, value); + + } else { + VALUE rbField = rb_hash_aref(s->layout->rbFieldMap, fieldName); + /* call up to the ruby code to set the value */ + VALUE argv[2]; + argv[0] = s->rbPointer; + argv[1] = value; + rb_funcall2(rbField, id_put, 2, argv); + } + + if (f->referenceRequired) { + store_reference_value(f, s, value); + } + + return value; +} + +/* + * call-seq: pointer= pointer + * @param [AbstractMemory] pointer + * @return [self] + * Make Struct point to +pointer+. + */ +static VALUE +struct_set_pointer(VALUE self, VALUE pointer) +{ + Struct* s; + StructLayout* layout; + AbstractMemory* memory; + + if (!rb_obj_is_kind_of(pointer, rbffi_AbstractMemoryClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Pointer or Buffer)", + rb_obj_classname(pointer)); + return Qnil; + } + + + Data_Get_Struct(self, Struct, s); + Data_Get_Struct(pointer, AbstractMemory, memory); + layout = struct_layout(self); + + if ((int) layout->base.ffiType->size > memory->size) { + rb_raise(rb_eArgError, "memory of %ld bytes too small for struct %s (expected at least %ld)", + memory->size, rb_obj_classname(self), (long) layout->base.ffiType->size); + } + + s->pointer = MEMORY(pointer); + s->rbPointer = pointer; + rb_ivar_set(self, id_pointer_ivar, pointer); + + return self; +} + +/* + * call-seq: pointer + * @return [AbstractMemory] + * Get pointer to Struct contents. + */ +static VALUE +struct_get_pointer(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbPointer; +} + +/* + * call-seq: layout= layout + * @param [StructLayout] layout + * @return [self] + * Set the Struct's layout. + */ +static VALUE +struct_set_layout(VALUE self, VALUE layout) +{ + Struct* s; + Data_Get_Struct(self, Struct, s); + + if (!rb_obj_is_kind_of(layout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected %s)", + rb_obj_classname(layout), rb_class2name(rbffi_StructLayoutClass)); + return Qnil; + } + + Data_Get_Struct(layout, StructLayout, s->layout); + rb_ivar_set(self, id_layout_ivar, layout); + + return self; +} + +/* + * call-seq: layout + * @return [StructLayout] + * Get the Struct's layout. + */ +static VALUE +struct_get_layout(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->rbLayout; +} + +/* + * call-seq: null? + * @return [Boolean] + * Test if Struct's pointer is NULL + */ +static VALUE +struct_null_p(VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + + return s->pointer->address == NULL ? Qtrue : Qfalse; +} + +/* + * (see Pointer#order) + */ +static VALUE +struct_order(int argc, VALUE* argv, VALUE self) +{ + Struct* s; + + Data_Get_Struct(self, Struct, s); + if (argc == 0) { + return rb_funcall(s->rbPointer, rb_intern("order"), 0); + + } else { + VALUE retval = rb_obj_dup(self); + VALUE rbPointer = rb_funcall2(s->rbPointer, rb_intern("order"), argc, argv); + struct_set_pointer(retval, rbPointer); + + return retval; + } +} + +static VALUE +inline_array_allocate(VALUE klass) +{ + InlineArray* array; + VALUE obj; + + obj = Data_Make_Struct(klass, InlineArray, inline_array_mark, -1, array); + array->rbField = Qnil; + array->rbMemory = Qnil; + + return obj; +} + +static void +inline_array_mark(InlineArray* array) +{ + rb_gc_mark(array->rbField); + rb_gc_mark(array->rbMemory); +} + +/* + * Document-method: FFI::Struct::InlineArray#initialize + * call-seq: initialize(memory, field) + * @param [AbstractMemory] memory + * @param [StructField] field + * @return [self] + */ +static VALUE +inline_array_initialize(VALUE self, VALUE rbMemory, VALUE rbField) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + array->rbMemory = rbMemory; + array->rbField = rbField; + + Data_Get_Struct(rbMemory, AbstractMemory, array->memory); + Data_Get_Struct(rbField, StructField, array->field); + Data_Get_Struct(array->field->rbType, ArrayType, array->arrayType); + Data_Get_Struct(array->arrayType->rbComponentType, Type, array->componentType); + + array->op = get_memory_op(array->componentType); + if (array->op == NULL && array->componentType->nativeType == NATIVE_MAPPED) { + array->op = get_memory_op(((MappedType *) array->componentType)->type); + } + + array->length = array->arrayType->length; + + return self; +} + +/* + * call-seq: size + * @return [Numeric] + * Get size + */ +static VALUE +inline_array_size(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return UINT2NUM(((ArrayType *) array->field->type)->length); +} + +static int +inline_array_offset(InlineArray* array, int index) +{ + if (index < 0 || (index >= array->length && array->length > 0)) { + rb_raise(rb_eIndexError, "index %d out of bounds", index); + } + + return (int) array->field->offset + (index * (int) array->componentType->ffiType->size); +} + +/* + * call-seq: [](index) + * @param [Numeric] index + * @return [Type, Struct] + */ +static VALUE +inline_array_aref(VALUE self, VALUE rbIndex) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + VALUE rbNativeValue = array->op->get(array->memory, + inline_array_offset(array, NUM2INT(rbIndex))); + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + return rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("from_native"), 2, rbNativeValue, Qnil); + } else { + return rbNativeValue; + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + VALUE rbOffset = INT2NUM(inline_array_offset(array, NUM2INT(rbIndex))); + VALUE rbLength = INT2NUM(array->componentType->ffiType->size); + VALUE rbPointer = rb_funcall(array->rbMemory, rb_intern("slice"), 2, rbOffset, rbLength); + + return rb_class_new_instance(1, &rbPointer, ((StructByValue *) array->componentType)->rbStructClass); + } else { + + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(array->arrayType->rbComponentType)); + return Qnil; + } +} + +/* + * call-seq: []=(index, value) + * @param [Numeric] index + * @param [Type, Struct] + * @return [value] + */ +static VALUE +inline_array_aset(VALUE self, VALUE rbIndex, VALUE rbValue) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + if (array->op != NULL) { + if (unlikely(array->componentType->nativeType == NATIVE_MAPPED)) { + rbValue = rb_funcall(((MappedType *) array->componentType)->rbConverter, + rb_intern("to_native"), 2, rbValue, Qnil); + } + array->op->put(array->memory, inline_array_offset(array, NUM2INT(rbIndex)), + rbValue); + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + int offset = inline_array_offset(array, NUM2INT(rbIndex)); + Struct* s; + + if (!rb_obj_is_kind_of(rbValue, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "argument not an instance of struct"); + return Qnil; + } + + checkWrite(array->memory); + checkBounds(array->memory, offset, array->componentType->ffiType->size); + + Data_Get_Struct(rbValue, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(array->memory->address + offset, s->pointer->address, array->componentType->ffiType->size); + + } else { + ArrayType* arrayType; + Data_Get_Struct(array->field->rbType, ArrayType, arrayType); + + rb_raise(rb_eArgError, "set not supported for %s", rb_obj_classname(arrayType->rbComponentType)); + return Qnil; + } + + return rbValue; +} + +/* + * call-seq: each + * Yield block for each element of +self+. + */ +static VALUE +inline_array_each(VALUE self) +{ + InlineArray* array; + + int i; + + Data_Get_Struct(self, InlineArray, array); + + for (i = 0; i < array->length; ++i) { + rb_yield(inline_array_aref(self, INT2FIX(i))); + } + + return self; +} + +/* + * call-seq: to_a + * @return [Array] + * Convert +self+ to an array. + */ +static VALUE +inline_array_to_a(VALUE self) +{ + InlineArray* array; + VALUE obj; + int i; + + Data_Get_Struct(self, InlineArray, array); + obj = rb_ary_new2(array->length); + + + for (i = 0; i < array->length; ++i) { + rb_ary_push(obj, inline_array_aref(self, INT2FIX(i))); + } + + return obj; +} + +/* + * Document-method: FFI::StructLayout::CharArray#to_s + * call-seq: to_s + * @return [String] + * Convert +self+ to a string. + */ +static VALUE +inline_array_to_s(VALUE self) +{ + InlineArray* array; + VALUE argv[2]; + + Data_Get_Struct(self, InlineArray, array); + + if (array->componentType->nativeType != NATIVE_INT8 && array->componentType->nativeType != NATIVE_UINT8) { + VALUE dummy = Qnil; + return rb_call_super(0, &dummy); + } + + argv[0] = UINT2NUM(array->field->offset); + argv[1] = UINT2NUM(array->length); + + return rb_funcall2(array->rbMemory, rb_intern("get_string"), 2, argv); +} + +/* + * call-seq: to_ptr + * @return [AbstractMemory] + * Get pointer to +self+ content. + */ +static VALUE +inline_array_to_ptr(VALUE self) +{ + InlineArray* array; + + Data_Get_Struct(self, InlineArray, array); + + return rb_funcall(array->rbMemory, rb_intern("slice"), 2, + UINT2NUM(array->field->offset), UINT2NUM(array->arrayType->base.ffiType->size)); +} + + +void +rbffi_Struct_Init(VALUE moduleFFI) +{ + VALUE StructClass; + + rbffi_StructLayout_Init(moduleFFI); + + /* + * Document-class: FFI::Struct + * + * A FFI::Struct means to mirror a C struct. + * + * A Struct is defined as: + * class MyStruct < FFI::Struct + * layout :value1, :int, + * :value2, :double + * end + * and is used as: + * my_struct = MyStruct.new + * my_struct[:value1] = 12 + * + * For more information, see http://github.com/ffi/ffi/wiki/Structs + */ + rbffi_StructClass = rb_define_class_under(moduleFFI, "Struct", rb_cObject); + StructClass = rbffi_StructClass; // put on a line alone to help RDoc + rb_global_variable(&rbffi_StructClass); + + /* + * Document-class: FFI::Struct::InlineArray + */ + rbffi_StructInlineArrayClass = rb_define_class_under(rbffi_StructClass, "InlineArray", rb_cObject); + rb_global_variable(&rbffi_StructInlineArrayClass); + + /* + * Document-class: FFI::StructLayout::CharArray < FFI::Struct::InlineArray + */ + rbffi_StructLayoutCharArrayClass = rb_define_class_under(rbffi_StructLayoutClass, "CharArray", + rbffi_StructInlineArrayClass); + rb_global_variable(&rbffi_StructLayoutCharArrayClass); + + + rb_define_alloc_func(StructClass, struct_allocate); + rb_define_method(StructClass, "initialize", struct_initialize, -1); + rb_define_method(StructClass, "initialize_copy", struct_initialize_copy, 1); + rb_define_method(StructClass, "order", struct_order, -1); + + rb_define_alias(rb_singleton_class(StructClass), "alloc_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "alloc_inout", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_in", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_out", "new"); + rb_define_alias(rb_singleton_class(StructClass), "new_inout", "new"); + + rb_define_method(StructClass, "pointer", struct_get_pointer, 0); + rb_define_private_method(StructClass, "pointer=", struct_set_pointer, 1); + + rb_define_method(StructClass, "layout", struct_get_layout, 0); + rb_define_private_method(StructClass, "layout=", struct_set_layout, 1); + + rb_define_method(StructClass, "[]", struct_aref, 1); + rb_define_method(StructClass, "[]=", struct_aset, 2); + rb_define_method(StructClass, "null?", struct_null_p, 0); + + rb_include_module(rbffi_StructInlineArrayClass, rb_mEnumerable); + rb_define_alloc_func(rbffi_StructInlineArrayClass, inline_array_allocate); + rb_define_method(rbffi_StructInlineArrayClass, "initialize", inline_array_initialize, 2); + rb_define_method(rbffi_StructInlineArrayClass, "[]", inline_array_aref, 1); + rb_define_method(rbffi_StructInlineArrayClass, "[]=", inline_array_aset, 2); + rb_define_method(rbffi_StructInlineArrayClass, "each", inline_array_each, 0); + rb_define_method(rbffi_StructInlineArrayClass, "size", inline_array_size, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_a", inline_array_to_a, 0); + rb_define_method(rbffi_StructInlineArrayClass, "to_ptr", inline_array_to_ptr, 0); + + rb_define_method(rbffi_StructLayoutCharArrayClass, "to_s", inline_array_to_s, 0); + rb_define_alias(rbffi_StructLayoutCharArrayClass, "to_str", "to_s"); + + id_pointer_ivar = rb_intern("@pointer"); + id_layout_ivar = rb_intern("@layout"); + id_layout = rb_intern("layout"); + id_get = rb_intern("get"); + id_put = rb_intern("put"); + id_to_ptr = rb_intern("to_ptr"); + id_to_s = rb_intern("to_s"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.h new file mode 100644 index 0000000..eb6edf2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCT_H +#define RBFFI_STRUCT_H + +#include "extconf.h" +#include "AbstractMemory.h" +#include "Type.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + + extern void rbffi_Struct_Init(VALUE ffiModule); + extern void rbffi_StructLayout_Init(VALUE ffiModule); + typedef struct StructField_ StructField; + typedef struct StructLayout_ StructLayout; + typedef struct Struct_ Struct; + + struct StructField_ { + Type* type; + unsigned int offset; + + int referenceIndex; + + bool referenceRequired; + VALUE rbType; + VALUE rbName; + + VALUE (*get)(StructField* field, Struct* s); + void (*put)(StructField* field, Struct* s, VALUE value); + + MemoryOp* memoryOp; + }; + + struct StructLayout_ { + Type base; + StructField** fields; + int fieldCount; + int size; + int align; + ffi_type** ffiTypes; + + /* + * We use the fieldName's minor 8 Bits as index to a 256 entry cache. + * This avoids full ruby hash lookups for repeated lookups. + */ + #define FIELD_CACHE_LOOKUP(this, sym) ( &(this)->cache_row[((sym) >> 8) & 0xff] ) + + struct field_cache_entry { + VALUE fieldName; + StructField *field; + } cache_row[0x100]; + + /** The number of reference tracking fields in this struct */ + int referenceFieldCount; + + VALUE rbFieldNames; + VALUE rbFieldMap; + VALUE rbFields; + }; + + struct Struct_ { + StructLayout* layout; + AbstractMemory* pointer; + VALUE* rbReferences; + + VALUE rbLayout; + VALUE rbPointer; + }; + + extern VALUE rbffi_StructClass, rbffi_StructLayoutClass; + extern VALUE rbffi_StructLayoutFieldClass, rbffi_StructLayoutFunctionFieldClass; + extern VALUE rbffi_StructLayoutArrayFieldClass; + extern VALUE rbffi_StructInlineArrayClass; + extern VALUE rbffi_StructLayoutCharArrayClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCT_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.o new file mode 100644 index 0000000..b431722 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Struct.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.c new file mode 100644 index 0000000..a3255f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include +#include +#include +#include +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "Type.h" +#include "StructByValue.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static VALUE sbv_allocate(VALUE); +static VALUE sbv_initialize(VALUE, VALUE); +static void sbv_mark(StructByValue *); +static void sbv_free(StructByValue *); + +VALUE rbffi_StructByValueClass = Qnil; + +static VALUE +sbv_allocate(VALUE klass) +{ + StructByValue* sbv; + + VALUE obj = Data_Make_Struct(klass, StructByValue, sbv_mark, sbv_free, sbv); + + sbv->rbStructClass = Qnil; + sbv->rbStructLayout = Qnil; + sbv->base.nativeType = NATIVE_STRUCT; + + sbv->base.ffiType = xcalloc(1, sizeof(*sbv->base.ffiType)); + sbv->base.ffiType->size = 0; + sbv->base.ffiType->alignment = 1; + sbv->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +static VALUE +sbv_initialize(VALUE self, VALUE rbStructClass) +{ + StructByValue* sbv = NULL; + StructLayout* layout = NULL; + VALUE rbLayout = Qnil; + + rbLayout = rb_ivar_get(rbStructClass, rb_intern("@layout")); + if (!rb_obj_is_instance_of(rbLayout, rbffi_StructLayoutClass)) { + rb_raise(rb_eTypeError, "wrong type in @layout ivar (expected FFI::StructLayout)"); + } + + Data_Get_Struct(rbLayout, StructLayout, layout); + Data_Get_Struct(self, StructByValue, sbv); + sbv->rbStructClass = rbStructClass; + sbv->rbStructLayout = rbLayout; + + /* We can just use everything from the ffi_type directly */ + *sbv->base.ffiType = *layout->base.ffiType; + + return self; +} + +static void +sbv_mark(StructByValue *sbv) +{ + rb_gc_mark(sbv->rbStructClass); + rb_gc_mark(sbv->rbStructLayout); +} + +static void +sbv_free(StructByValue *sbv) +{ + xfree(sbv->base.ffiType); + xfree(sbv); +} + + +static VALUE +sbv_layout(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + return sbv->rbStructLayout; +} + +static VALUE +sbv_struct_class(VALUE self) +{ + StructByValue* sbv; + + Data_Get_Struct(self, StructByValue, sbv); + + return sbv->rbStructClass; +} + +void +rbffi_StructByValue_Init(VALUE moduleFFI) +{ + rbffi_StructByValueClass = rb_define_class_under(moduleFFI, "StructByValue", rbffi_TypeClass); + rb_global_variable(&rbffi_StructByValueClass); + rb_define_const(rbffi_TypeClass, "Struct", rbffi_StructByValueClass); + + rb_define_alloc_func(rbffi_StructByValueClass, sbv_allocate); + rb_define_method(rbffi_StructByValueClass, "initialize", sbv_initialize, 1); + rb_define_method(rbffi_StructByValueClass, "layout", sbv_layout, 0); + rb_define_method(rbffi_StructByValueClass, "struct_class", sbv_struct_class, 0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.h new file mode 100644 index 0000000..07b2763 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_STRUCTBYVALUE_H +#define RBFFI_STRUCTBYVALUE_H + +#include +#include "Type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct StructByValue_ { + Type base; + VALUE rbStructClass; + VALUE rbStructLayout; +} StructByValue; + +void rbffi_StructByValue_Init(VALUE moduleFFI); + +extern VALUE rbffi_StructByValueClass; + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_STRUCTBYVALUE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.o new file mode 100644 index 0000000..4ad0da9 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructByValue.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.c new file mode 100644 index 0000000..d318b8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.c @@ -0,0 +1,700 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifndef _MSC_VER +# include +#endif +#include +#include +#include +#include "rbffi.h" +#include "compat.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Function.h" +#include "Types.h" +#include "StructByValue.h" +#include "ArrayType.h" +#include "Function.h" +#include "MappedType.h" +#include "Struct.h" + +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) + +static void struct_layout_mark(StructLayout *); +static void struct_layout_free(StructLayout *); +static void struct_field_mark(StructField* ); + +VALUE rbffi_StructLayoutFieldClass = Qnil; +VALUE rbffi_StructLayoutNumberFieldClass = Qnil, rbffi_StructLayoutPointerFieldClass = Qnil; +VALUE rbffi_StructLayoutStringFieldClass = Qnil; +VALUE rbffi_StructLayoutFunctionFieldClass = Qnil, rbffi_StructLayoutArrayFieldClass = Qnil; + +VALUE rbffi_StructLayoutClass = Qnil; + + +static VALUE +struct_field_allocate(VALUE klass) +{ + StructField* field; + VALUE obj; + + obj = Data_Make_Struct(klass, StructField, struct_field_mark, -1, field); + field->rbType = Qnil; + field->rbName = Qnil; + + return obj; +} + +static void +struct_field_mark(StructField* f) +{ + rb_gc_mark(f->rbType); + rb_gc_mark(f->rbName); +} + +/* + * call-seq: initialize(name, offset, type) + * @param [String,Symbol] name + * @param [Fixnum] offset + * @param [FFI::Type] type + * @return [self] + * A new FFI::StructLayout::Field instance. + */ +static VALUE +struct_field_initialize(int argc, VALUE* argv, VALUE self) +{ + VALUE rbOffset = Qnil, rbName = Qnil, rbType = Qnil; + StructField* field; + int nargs; + + Data_Get_Struct(self, StructField, field); + + nargs = rb_scan_args(argc, argv, "3", &rbName, &rbOffset, &rbType); + + if (TYPE(rbName) != T_SYMBOL && TYPE(rbName) != T_STRING) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected Symbol/String)", + rb_obj_classname(rbName)); + } + + Check_Type(rbOffset, T_FIXNUM); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong argument type %s (expected FFI::Type)", + rb_obj_classname(rbType)); + } + + field->offset = NUM2UINT(rbOffset); + field->rbName = (TYPE(rbName) == T_SYMBOL) ? rbName : rb_str_intern(rbName); + field->rbType = rbType; + Data_Get_Struct(field->rbType, Type, field->type); + field->memoryOp = get_memory_op(field->type); + field->referenceIndex = -1; + + switch (field->type->nativeType == NATIVE_MAPPED ? ((MappedType *) field->type)->type->nativeType : field->type->nativeType) { + case NATIVE_FUNCTION: + case NATIVE_POINTER: + field->referenceRequired = true; + break; + + default: + field->referenceRequired = (rb_respond_to(self, rb_intern("reference_required?")) + && RTEST(rb_funcall2(self, rb_intern("reference_required?"), 0, NULL))) + || (rb_respond_to(rbType, rb_intern("reference_required?")) + && RTEST(rb_funcall2(rbType, rb_intern("reference_required?"), 0, NULL))); + break; + } + + return self; +} + +/* + * call-seq: offset + * @return [Numeric] + * Get the field offset. + */ +static VALUE +struct_field_offset(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->offset); +} + +/* + * call-seq: size + * @return [Numeric] + * Get the field size. + */ +static VALUE +struct_field_size(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->size); +} + +/* + * call-seq: alignment + * @return [Numeric] + * Get the field alignment. + */ +static VALUE +struct_field_alignment(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return UINT2NUM(field->type->ffiType->alignment); +} + +/* + * call-seq: type + * @return [Type] + * Get the field type. + */ +static VALUE +struct_field_type(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + + return field->rbType; +} + +/* + * call-seq: name + * @return [Symbol] + * Get the field name. + */ +static VALUE +struct_field_name(VALUE self) +{ + StructField* field; + Data_Get_Struct(self, StructField, field); + return field->rbName; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Object] + * Get an object of type {#type} from memory pointed by +pointer+. + */ +static VALUE +struct_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "get not supported for %s", rb_obj_classname(f->rbType)); + return Qnil; + } + + return (*f->memoryOp->get)(MEMORY(pointer), f->offset); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [Object] value this object must be a kind of {#type} + * @return [self] + * Put an object to memory pointed by +pointer+. + */ +static VALUE +struct_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + if (f->memoryOp == NULL) { + rb_raise(rb_eArgError, "put not supported for %s", rb_obj_classname(f->rbType)); + return self; + } + + (*f->memoryOp->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [Function] + * Get a {Function} from memory pointed by +pointer+. + */ +static VALUE +function_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + + Data_Get_Struct(self, StructField, f); + + return rbffi_Function_NewInstance(f->rbType, (*rbffi_AbstractMemoryOps.pointer->get)(MEMORY(pointer), f->offset)); +} + +/* + * call-seq: put(pointer, proc) + * @param [AbstractMemory] pointer pointer to a {Struct} + * @param [Function, Proc] proc + * @return [Function] + * Set a {Function} to memory pointed by +pointer+ as a function. + * + * If a Proc is submitted as +proc+, it is automatically transformed to a {Function}. + */ +static VALUE +function_field_put(VALUE self, VALUE pointer, VALUE proc) +{ + StructField* f; + VALUE value = Qnil; + + Data_Get_Struct(self, StructField, f); + + if (NIL_P(proc) || rb_obj_is_kind_of(proc, rbffi_FunctionClass)) { + value = proc; + } else if (rb_obj_is_kind_of(proc, rb_cProc) || rb_respond_to(proc, rb_intern("call"))) { + value = rbffi_Function_ForProc(f->rbType, proc); + } else { + rb_raise(rb_eTypeError, "wrong type (expected Proc or Function)"); + } + + (*rbffi_AbstractMemoryOps.pointer->put)(MEMORY(pointer), f->offset, value); + + return self; +} + +static inline bool +isCharArray(ArrayType* arrayType) +{ + return arrayType->componentType->nativeType == NATIVE_INT8 + || arrayType->componentType->nativeType == NATIVE_UINT8; +} + +/* + * call-seq: get(pointer) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @return [FFI::StructLayout::CharArray, FFI::Struct::InlineArray] + * Get an array from a {Struct}. + */ +static VALUE +array_field_get(VALUE self, VALUE pointer) +{ + StructField* f; + ArrayType* array; + VALUE argv[2]; + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + argv[0] = pointer; + argv[1] = self; + + return rb_class_new_instance(2, argv, isCharArray(array) + ? rbffi_StructLayoutCharArrayClass : rbffi_StructInlineArrayClass); +} + +/* + * call-seq: put(pointer, value) + * @param [AbstractMemory] pointer pointer on a {Struct} + * @param [String, Array] value +value+ may be a String only if array's type is a kind of +int8+ + * @return [value] + * Set an array in a {Struct}. + */ +static VALUE +array_field_put(VALUE self, VALUE pointer, VALUE value) +{ + StructField* f; + ArrayType* array; + + + Data_Get_Struct(self, StructField, f); + Data_Get_Struct(f->rbType, ArrayType, array); + + if (isCharArray(array) && rb_obj_is_instance_of(value, rb_cString)) { + VALUE argv[2]; + + argv[0] = INT2FIX(f->offset); + argv[1] = value; + + if (RSTRING_LEN(value) < array->length) { + rb_funcall2(pointer, rb_intern("put_string"), 2, argv); + } else if (RSTRING_LEN(value) == array->length) { + rb_funcall2(pointer, rb_intern("put_bytes"), 2, argv); + } else { + rb_raise(rb_eIndexError, "String is longer (%ld bytes) than the char array (%d bytes)", RSTRING_LEN(value), array->length); + } + } else { +#ifdef notyet + MemoryOp* op; + int count = RARRAY_LEN(value); + int i; + AbstractMemory* memory = MEMORY(pointer); + + if (count > array->length) { + rb_raise(rb_eIndexError, "array too large"); + } + + /* clear the contents in case of a short write */ + checkWrite(memory); + checkBounds(memory, f->offset, f->type->ffiType->size); + if (count < array->length) { + memset(memory->address + f->offset + (count * array->componentType->ffiType->size), + 0, (array->length - count) * array->componentType->ffiType->size); + } + + /* now copy each element in */ + if ((op = get_memory_op(array->componentType)) != NULL) { + + for (i = 0; i < count; ++i) { + (*op->put)(memory, f->offset + (i * array->componentType->ffiType->size), rb_ary_entry(value, i)); + } + + } else if (array->componentType->nativeType == NATIVE_STRUCT) { + + for (i = 0; i < count; ++i) { + VALUE entry = rb_ary_entry(value, i); + Struct* s; + + if (!rb_obj_is_kind_of(entry, rbffi_StructClass)) { + rb_raise(rb_eTypeError, "array element not an instance of FFI::Struct"); + break; + } + + Data_Get_Struct(entry, Struct, s); + checkRead(s->pointer); + checkBounds(s->pointer, 0, array->componentType->ffiType->size); + + memcpy(memory->address + f->offset + (i * array->componentType->ffiType->size), + s->pointer->address, array->componentType->ffiType->size); + } + + } else { + rb_raise(rb_eNotImpError, "put not supported for arrays of type %s", rb_obj_classname(array->rbComponentType)); + } +#else + rb_raise(rb_eNotImpError, "cannot set array field"); +#endif + } + + return value; +} + + +static VALUE +struct_layout_allocate(VALUE klass) +{ + StructLayout* layout; + VALUE obj; + + obj = Data_Make_Struct(klass, StructLayout, struct_layout_mark, struct_layout_free, layout); + layout->rbFieldMap = Qnil; + layout->rbFieldNames = Qnil; + layout->rbFields = Qnil; + layout->base.ffiType = xcalloc(1, sizeof(*layout->base.ffiType)); + layout->base.ffiType->size = 0; + layout->base.ffiType->alignment = 0; + layout->base.ffiType->type = FFI_TYPE_STRUCT; + + return obj; +} + +/* + * call-seq: initialize(fields, size, align) + * @param [Array] fields + * @param [Numeric] size + * @param [Numeric] align + * @return [self] + * A new StructLayout instance. + */ +static VALUE +struct_layout_initialize(VALUE self, VALUE fields, VALUE size, VALUE align) +{ + StructLayout* layout; + ffi_type* ltype; + int i; + + Data_Get_Struct(self, StructLayout, layout); + layout->fieldCount = (int) RARRAY_LEN(fields); + layout->rbFieldMap = rb_hash_new(); + layout->rbFieldNames = rb_ary_new2(layout->fieldCount); + layout->size = (int) FFI_ALIGN(NUM2INT(size), NUM2INT(align)); + layout->align = NUM2INT(align); + layout->fields = xcalloc(layout->fieldCount, sizeof(StructField *)); + layout->ffiTypes = xcalloc(layout->fieldCount + 1, sizeof(ffi_type *)); + layout->rbFields = rb_ary_new2(layout->fieldCount); + layout->referenceFieldCount = 0; + layout->base.ffiType->elements = layout->ffiTypes; + layout->base.ffiType->size = layout->size; + layout->base.ffiType->alignment = layout->align; + + ltype = layout->base.ffiType; + for (i = 0; i < (int) layout->fieldCount; ++i) { + VALUE rbField = rb_ary_entry(fields, i); + VALUE rbName; + StructField* field; + ffi_type* ftype; + + + if (!rb_obj_is_kind_of(rbField, rbffi_StructLayoutFieldClass)) { + rb_raise(rb_eTypeError, "wrong type for field %d.", i); + } + rbName = rb_funcall2(rbField, rb_intern("name"), 0, NULL); + + Data_Get_Struct(rbField, StructField, field); + layout->fields[i] = field; + + if (field->type == NULL || field->type->ffiType == NULL) { + rb_raise(rb_eRuntimeError, "type of field %d not supported", i); + } + + ftype = field->type->ffiType; + if (ftype->size == 0 && i < ((int) layout->fieldCount - 1)) { + rb_raise(rb_eTypeError, "type of field %d has zero size", i); + } + + if (field->referenceRequired) { + field->referenceIndex = layout->referenceFieldCount++; + } + + + layout->ffiTypes[i] = ftype->size > 0 ? ftype : NULL; + rb_hash_aset(layout->rbFieldMap, rbName, rbField); + rb_ary_push(layout->rbFields, rbField); + rb_ary_push(layout->rbFieldNames, rbName); + } + + if (ltype->size == 0) { + rb_raise(rb_eRuntimeError, "Struct size is zero"); + } + + return self; +} + +/* + * call-seq: [](field) + * @param [Symbol] field + * @return [StructLayout::Field] + * Get a field from the layout. + */ +static VALUE +struct_layout_union_bang(VALUE self) +{ + const ffi_type *alignment_types[] = { &ffi_type_sint8, &ffi_type_sint16, &ffi_type_sint32, &ffi_type_sint64, + &ffi_type_float, &ffi_type_double, &ffi_type_longdouble, NULL }; + StructLayout* layout; + ffi_type *t = NULL; + int count, i; + + Data_Get_Struct(self, StructLayout, layout); + + for (i = 0; alignment_types[i] != NULL; ++i) { + if (alignment_types[i]->alignment == layout->align) { + t = (ffi_type *) alignment_types[i]; + break; + } + } + if (t == NULL) { + rb_raise(rb_eRuntimeError, "cannot create libffi union representation for alignment %d", layout->align); + return Qnil; + } + + count = (int) layout->size / (int) t->size; + xfree(layout->ffiTypes); + layout->ffiTypes = xcalloc(count + 1, sizeof(ffi_type *)); + layout->base.ffiType->elements = layout->ffiTypes; + + for (i = 0; i < count; ++i) { + layout->ffiTypes[i] = t; + } + + return self; +} + +static VALUE +struct_layout_aref(VALUE self, VALUE field) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_hash_aref(layout->rbFieldMap, field); +} + +/* + * call-seq: fields + * @return [Array] + * Get fields list. + */ +static VALUE +struct_layout_fields(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +/* + * call-seq: members + * @return [Array] + * Get list of field names. + */ +static VALUE +struct_layout_members(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFieldNames); +} + +/* + * call-seq: to_a + * @return [Array] + * Get an array of fields. + */ +static VALUE +struct_layout_to_a(VALUE self) +{ + StructLayout* layout; + + Data_Get_Struct(self, StructLayout, layout); + + return rb_ary_dup(layout->rbFields); +} + +static void +struct_layout_mark(StructLayout *layout) +{ + rb_gc_mark(layout->rbFieldMap); + rb_gc_mark(layout->rbFieldNames); + rb_gc_mark(layout->rbFields); + /* Clear the cache, to be safe from changes of fieldName VALUE by GC.compact. + * TODO: Move cache clearing to compactation callback provided by Ruby-2.7+. + */ + memset(&layout->cache_row, 0, sizeof(layout->cache_row)); +} + +static void +struct_layout_free(StructLayout *layout) +{ + xfree(layout->ffiTypes); + xfree(layout->base.ffiType); + xfree(layout->fields); + xfree(layout); +} + + +void +rbffi_StructLayout_Init(VALUE moduleFFI) +{ + VALUE ffi_Type = rbffi_TypeClass; + + /* + * Document-class: FFI::StructLayout < FFI::Type + * + * This class aims at defining a struct layout. + */ + rbffi_StructLayoutClass = rb_define_class_under(moduleFFI, "StructLayout", ffi_Type); + rb_global_variable(&rbffi_StructLayoutClass); + + /* + * Document-class: FFI::StructLayout::Field + * A field in a {StructLayout}. + */ + rbffi_StructLayoutFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Field", rb_cObject); + rb_global_variable(&rbffi_StructLayoutFieldClass); + + /* + * Document-class: FFI::StructLayout::Number + * A numeric {Field} in a {StructLayout}. + */ + rbffi_StructLayoutNumberFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Number", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutNumberFieldClass); + + /* + * Document-class: FFI::StructLayout::String + * A string {Field} in a {StructLayout}. + */ + rbffi_StructLayoutStringFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "String", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutStringFieldClass); + + /* + * Document-class: FFI::StructLayout::Pointer + * A pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutPointerFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Pointer", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutPointerFieldClass); + + /* + * Document-class: FFI::StructLayout::Function + * A function pointer {Field} in a {StructLayout}. + */ + rbffi_StructLayoutFunctionFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Function", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutFunctionFieldClass); + + /* + * Document-class: FFI::StructLayout::Array + * An array {Field} in a {StructLayout}. + */ + rbffi_StructLayoutArrayFieldClass = rb_define_class_under(rbffi_StructLayoutClass, "Array", rbffi_StructLayoutFieldClass); + rb_global_variable(&rbffi_StructLayoutArrayFieldClass); + + rb_define_alloc_func(rbffi_StructLayoutFieldClass, struct_field_allocate); + rb_define_method(rbffi_StructLayoutFieldClass, "initialize", struct_field_initialize, -1); + rb_define_method(rbffi_StructLayoutFieldClass, "offset", struct_field_offset, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "size", struct_field_size, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "alignment", struct_field_alignment, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "name", struct_field_name, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "type", struct_field_type, 0); + rb_define_method(rbffi_StructLayoutFieldClass, "put", struct_field_put, 2); + rb_define_method(rbffi_StructLayoutFieldClass, "get", struct_field_get, 1); + + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "put", function_field_put, 2); + rb_define_method(rbffi_StructLayoutFunctionFieldClass, "get", function_field_get, 1); + + rb_define_method(rbffi_StructLayoutArrayFieldClass, "get", array_field_get, 1); + rb_define_method(rbffi_StructLayoutArrayFieldClass, "put", array_field_put, 2); + + rb_define_alloc_func(rbffi_StructLayoutClass, struct_layout_allocate); + rb_define_method(rbffi_StructLayoutClass, "initialize", struct_layout_initialize, 3); + rb_define_method(rbffi_StructLayoutClass, "[]", struct_layout_aref, 1); + rb_define_method(rbffi_StructLayoutClass, "fields", struct_layout_fields, 0); + rb_define_method(rbffi_StructLayoutClass, "members", struct_layout_members, 0); + rb_define_method(rbffi_StructLayoutClass, "to_a", struct_layout_to_a, 0); + rb_define_method(rbffi_StructLayoutClass, "__union!", struct_layout_union_bang, 0); + +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.o new file mode 100644 index 0000000..9d69b77 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/StructLayout.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.c new file mode 100644 index 0000000..f6a8387 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#if defined(__CYGWIN__) || !defined(_WIN32) +# include +# include +# include +# include +#else +# include +# define _WINSOCKAPI_ +# include +#endif +#include +#include "Thread.h" + +#ifdef _WIN32 +static volatile DWORD frame_thread_key = TLS_OUT_OF_INDEXES; +#else +static pthread_key_t thread_data_key; +struct thread_data { + rbffi_frame_t* frame; +}; +static inline struct thread_data* thread_data_get(void); + +#endif + +rbffi_frame_t* +rbffi_frame_current(void) +{ +#ifdef _WIN32 + return (rbffi_frame_t *) TlsGetValue(frame_thread_key); +#else + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td->frame : NULL; +#endif +} + +void +rbffi_frame_push(rbffi_frame_t* frame) +{ + memset(frame, 0, sizeof(*frame)); + frame->exc = Qnil; + +#ifdef _WIN32 + frame->prev = TlsGetValue(frame_thread_key); + TlsSetValue(frame_thread_key, frame); +#else + frame->td = thread_data_get(); + frame->prev = frame->td->frame; + frame->td->frame = frame; +#endif +} + +void +rbffi_frame_pop(rbffi_frame_t* frame) +{ +#ifdef _WIN32 + TlsSetValue(frame_thread_key, frame->prev); +#else + frame->td->frame = frame->prev; +#endif +} + +#ifndef _WIN32 +static struct thread_data* thread_data_init(void); + +static inline struct thread_data* +thread_data_get(void) +{ + struct thread_data* td = (struct thread_data *) pthread_getspecific(thread_data_key); + return td != NULL ? td : thread_data_init(); +} + +static struct thread_data* +thread_data_init(void) +{ + struct thread_data* td = calloc(1, sizeof(struct thread_data)); + + pthread_setspecific(thread_data_key, td); + + return td; +} + +static void +thread_data_free(void *ptr) +{ + free(ptr); +} +#endif + +void +rbffi_Thread_Init(VALUE moduleFFI) +{ +#ifdef _WIN32 + frame_thread_key = TlsAlloc(); +#else + pthread_key_create(&thread_data_key, thread_data_free); +#endif +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.h new file mode 100644 index 0000000..8c335e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2010 Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_THREAD_H +#define RBFFI_THREAD_H + +#include +#include +#include "extconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifdef _WIN32 +# include +#else +# include +#endif + +typedef struct { +#ifdef _WIN32 + DWORD id; +#else + pthread_t id; +#endif + bool valid; + bool has_gvl; + VALUE exc; +} rbffi_thread_t; + +typedef struct rbffi_frame { +#ifndef _WIN32 + struct thread_data* td; +#endif + struct rbffi_frame* prev; + VALUE exc; +} rbffi_frame_t; + +rbffi_frame_t* rbffi_frame_current(void); +void rbffi_frame_push(rbffi_frame_t* frame); +void rbffi_frame_pop(rbffi_frame_t* frame); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_THREAD_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.o new file mode 100644 index 0000000..86fa82d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Thread.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.c new file mode 100644 index 0000000..7776bb0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +# include +#endif + +#include +#include +#include +#include "rbffi.h" +#include "compat.h" +#include "Types.h" +#include "Type.h" + + +typedef struct BuiltinType_ { + Type type; + char* name; +} BuiltinType; + +static void builtin_type_free(BuiltinType *); + +VALUE rbffi_TypeClass = Qnil; + +static VALUE classBuiltinType = Qnil; +static VALUE moduleNativeType = Qnil; +static VALUE typeMap = Qnil, sizeMap = Qnil; +static ID id_find_type = 0, id_type_size = 0, id_size = 0; + +static VALUE +type_allocate(VALUE klass) +{ + Type* type; + VALUE obj = Data_Make_Struct(klass, Type, NULL, -1, type); + + type->nativeType = -1; + type->ffiType = &ffi_type_void; + + return obj; +} + +/* + * Document-method: initialize + * call-seq: initialize(value) + * @param [Fixnum,Type] value + * @return [self] + */ +static VALUE +type_initialize(VALUE self, VALUE value) +{ + Type* type; + Type* other; + + Data_Get_Struct(self, Type, type); + + if (FIXNUM_P(value)) { + type->nativeType = FIX2INT(value); + } else if (rb_obj_is_kind_of(value, rbffi_TypeClass)) { + Data_Get_Struct(value, Type, other); + type->nativeType = other->nativeType; + type->ffiType = other->ffiType; + } else { + rb_raise(rb_eArgError, "wrong type"); + } + + return self; +} + +/* + * call-seq: type.size + * @return [Fixnum] + * Return type's size, in bytes. + */ +static VALUE +type_size(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->size); +} + +/* + * call-seq: type.alignment + * @return [Fixnum] + * Get Type alignment. + */ +static VALUE +type_alignment(VALUE self) +{ + Type *type; + + Data_Get_Struct(self, Type, type); + + return INT2FIX(type->ffiType->alignment); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type} object. + */ +static VALUE +type_inspect(VALUE self) +{ + char buf[100]; + Type *type; + + Data_Get_Struct(self, Type, type); + + snprintf(buf, sizeof(buf), "#<%s:%p size=%d alignment=%d>", + rb_obj_classname(self), type, (int) type->ffiType->size, (int) type->ffiType->alignment); + + return rb_str_new2(buf); +} + +static VALUE +builtin_type_new(VALUE klass, int nativeType, ffi_type* ffiType, const char* name) +{ + BuiltinType* type; + VALUE obj = Qnil; + + obj = Data_Make_Struct(klass, BuiltinType, NULL, builtin_type_free, type); + + type->name = strdup(name); + type->type.nativeType = nativeType; + type->type.ffiType = ffiType; + + return obj; +} + +static void +builtin_type_free(BuiltinType *type) +{ + free(type->name); + xfree(type); +} + +/* + * call-seq: type.inspect + * @return [String] + * Inspect {Type::Builtin} object. + */ +static VALUE +builtin_type_inspect(VALUE self) +{ + char buf[100]; + BuiltinType *type; + + Data_Get_Struct(self, BuiltinType, type); + snprintf(buf, sizeof(buf), "#<%s:%s size=%d alignment=%d>", + rb_obj_classname(self), type->name, (int) type->type.ffiType->size, type->type.ffiType->alignment); + + return rb_str_new2(buf); +} + +int +rbffi_type_size(VALUE type) +{ + int t = TYPE(type); + + if (t == T_FIXNUM || t == T_BIGNUM) { + return NUM2INT(type); + + } else if (t == T_SYMBOL) { + /* + * Try looking up directly in the type and size maps + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, type)) != Qnil) { + if (rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + Type* type; + Data_Get_Struct(nType, Type, type); + return (int) type->ffiType->size; + + } else if (rb_respond_to(nType, id_size)) { + return NUM2INT(rb_funcall2(nType, id_size, 0, NULL)); + } + } + + /* Not found - call up to the ruby version to resolve */ + return NUM2INT(rb_funcall2(rbffi_FFIModule, id_type_size, 1, &type)); + + } else { + return NUM2INT(rb_funcall2(type, id_size, 0, NULL)); + } +} + +VALUE +rbffi_Type_Lookup(VALUE name) +{ + int t = TYPE(name); + if (t == T_SYMBOL || t == T_STRING) { + /* + * Try looking up directly in the type Map + */ + VALUE nType; + if ((nType = rb_hash_lookup(typeMap, name)) != Qnil && rb_obj_is_kind_of(nType, rbffi_TypeClass)) { + return nType; + } + } else if (rb_obj_is_kind_of(name, rbffi_TypeClass)) { + + return name; + } + + /* Nothing found - let caller handle raising exceptions */ + return Qnil; +} + +void +rbffi_Type_Init(VALUE moduleFFI) +{ + /* + * Document-class: FFI::Type + * This class manages C types. + * + * It embbed {FFI::Type::Builtin} objects as constants (for names, + * see {FFI::NativeType}). + */ + rbffi_TypeClass = rb_define_class_under(moduleFFI, "Type", rb_cObject); + + /* + * Document-constant: FFI::TypeDefs + */ + rb_define_const(moduleFFI, "TypeDefs", typeMap = rb_hash_new()); + rb_define_const(moduleFFI, "SizeTypes", sizeMap = rb_hash_new()); + rb_global_variable(&typeMap); + rb_global_variable(&sizeMap); + id_find_type = rb_intern("find_type"); + id_type_size = rb_intern("type_size"); + id_size = rb_intern("size"); + + /* + * Document-class: FFI::Type::Builtin + * Class for Built-in types. + */ + classBuiltinType = rb_define_class_under(rbffi_TypeClass, "Builtin", rbffi_TypeClass); + /* + * Document-module: FFI::NativeType + * This module defines constants for native (C) types. + * + * ==Native type constants + * Native types are defined by constants : + * * INT8, SCHAR, CHAR + * * UINT8, UCHAR + * * INT16, SHORT, SSHORT + * * UINT16, USHORT + * * INT32,, INT, SINT + * * UINT32, UINT + * * INT64, LONG_LONG, SLONG_LONG + * * UINT64, ULONG_LONG + * * LONG, SLONG + * * ULONG + * * FLOAT32, FLOAT + * * FLOAT64, DOUBLE + * * POINTER + * * CALLBACK + * * FUNCTION + * * CHAR_ARRAY + * * BOOL + * * STRING (immutable string, nul terminated) + * * STRUCT (struct-b-value param or result) + * * ARRAY (array type definition) + * * MAPPED (custom native type) + * For function return type only : + * * VOID + * For function argument type only : + * * BUFFER_IN + * * BUFFER_OUT + * * VARARGS (function takes a variable number of arguments) + * + * All these constants are exported to {FFI} module prefixed with "TYPE_". + * They are objets from {FFI::Type::Builtin} class. + */ + moduleNativeType = rb_define_module_under(moduleFFI, "NativeType"); + + /* + * Document-global: FFI::Type + */ + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + rb_global_variable(&moduleNativeType); + + rb_define_alloc_func(rbffi_TypeClass, type_allocate); + rb_define_method(rbffi_TypeClass, "initialize", type_initialize, 1); + rb_define_method(rbffi_TypeClass, "size", type_size, 0); + rb_define_method(rbffi_TypeClass, "alignment", type_alignment, 0); + rb_define_method(rbffi_TypeClass, "inspect", type_inspect, 0); + + /* Make Type::Builtin non-allocatable */ + rb_undef_method(CLASS_OF(classBuiltinType), "new"); + rb_define_method(classBuiltinType, "inspect", builtin_type_inspect, 0); + + rb_global_variable(&rbffi_TypeClass); + rb_global_variable(&classBuiltinType); + + /* Define all the builtin types */ + #define T(x, ffiType) do { \ + VALUE t = Qnil; \ + rb_define_const(rbffi_TypeClass, #x, t = builtin_type_new(classBuiltinType, NATIVE_##x, ffiType, #x)); \ + rb_define_const(moduleNativeType, #x, t); \ + rb_define_const(moduleFFI, "TYPE_" #x, t); \ + } while(0) + + #define A(old_type, new_type) do { \ + VALUE t = rb_const_get(rbffi_TypeClass, rb_intern(#old_type)); \ + rb_const_set(rbffi_TypeClass, rb_intern(#new_type), t); \ + } while(0) + + /* + * Document-constant: FFI::Type::Builtin::VOID + */ + T(VOID, &ffi_type_void); + T(INT8, &ffi_type_sint8); + A(INT8, SCHAR); + A(INT8, CHAR); + T(UINT8, &ffi_type_uint8); + A(UINT8, UCHAR); + + T(INT16, &ffi_type_sint16); + A(INT16, SHORT); + A(INT16, SSHORT); + T(UINT16, &ffi_type_uint16); + A(UINT16, USHORT); + T(INT32, &ffi_type_sint32); + A(INT32, INT); + A(INT32, SINT); + T(UINT32, &ffi_type_uint32); + A(UINT32, UINT); + T(INT64, &ffi_type_sint64); + A(INT64, LONG_LONG); + A(INT64, SLONG_LONG); + T(UINT64, &ffi_type_uint64); + A(UINT64, ULONG_LONG); + T(LONG, &ffi_type_slong); + A(LONG, SLONG); + T(ULONG, &ffi_type_ulong); + T(FLOAT32, &ffi_type_float); + A(FLOAT32, FLOAT); + T(FLOAT64, &ffi_type_double); + A(FLOAT64, DOUBLE); + T(LONGDOUBLE, &ffi_type_longdouble); + T(POINTER, &ffi_type_pointer); + T(STRING, &ffi_type_pointer); + T(BUFFER_IN, &ffi_type_pointer); + T(BUFFER_OUT, &ffi_type_pointer); + T(BUFFER_INOUT, &ffi_type_pointer); + T(BOOL, &ffi_type_uchar); + T(VARARGS, &ffi_type_void); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.h new file mode 100644 index 0000000..b81995a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * This file is part of ruby-ffi. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the Evan Phoenix nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef RBFFI_TYPE_H +#define RBFFI_TYPE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct Type_ Type; + +#include "Types.h" + +struct Type_ { + NativeType nativeType; + ffi_type* ffiType; +}; + +extern VALUE rbffi_TypeClass; +extern VALUE rbffi_Type_Lookup(VALUE type); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPE_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.o new file mode 100644 index 0000000..b87f352 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Type.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.c new file mode 100644 index 0000000..77741e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * Copyright (c) 2009, Aman Gupta. + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "Pointer.h" +#include "rbffi.h" +#include "Function.h" +#include "StructByValue.h" +#include "Types.h" +#include "Struct.h" +#include "MappedType.h" +#include "MemoryPointer.h" +#include "LongDouble.h" + +static ID id_from_native = 0; + + +VALUE +rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr) +{ + switch (type->nativeType) { + case NATIVE_VOID: + return Qnil; + case NATIVE_INT8: + return INT2NUM((signed char) *(ffi_sarg *) ptr); + case NATIVE_INT16: + return INT2NUM((signed short) *(ffi_sarg *) ptr); + case NATIVE_INT32: + return INT2NUM((signed int) *(ffi_sarg *) ptr); + case NATIVE_LONG: + return LONG2NUM((signed long) *(ffi_sarg *) ptr); + case NATIVE_INT64: + return LL2NUM(*(signed long long *) ptr); + + case NATIVE_UINT8: + return UINT2NUM((unsigned char) *(ffi_arg *) ptr); + case NATIVE_UINT16: + return UINT2NUM((unsigned short) *(ffi_arg *) ptr); + case NATIVE_UINT32: + return UINT2NUM((unsigned int) *(ffi_arg *) ptr); + case NATIVE_ULONG: + return ULONG2NUM((unsigned long) *(ffi_arg *) ptr); + case NATIVE_UINT64: + return ULL2NUM(*(unsigned long long *) ptr); + + case NATIVE_FLOAT32: + return rb_float_new(*(float *) ptr); + case NATIVE_FLOAT64: + return rb_float_new(*(double *) ptr); + + case NATIVE_LONGDOUBLE: + return rbffi_longdouble_new(*(long double *) ptr); + + case NATIVE_STRING: + return (*(void **) ptr != NULL) ? rb_str_new2(*(char **) ptr) : Qnil; + case NATIVE_POINTER: + return rbffi_Pointer_NewInstance(*(void **) ptr); + case NATIVE_BOOL: + return ((unsigned char) *(ffi_arg *) ptr) ? Qtrue : Qfalse; + + case NATIVE_FUNCTION: { + return *(void **) ptr != NULL + ? rbffi_Function_NewInstance(rbType, rbffi_Pointer_NewInstance(*(void **) ptr)) + : Qnil; + } + + case NATIVE_STRUCT: { + StructByValue* sbv = (StructByValue *)type; + AbstractMemory* mem; + VALUE rbMemory = rbffi_MemoryPointer_NewInstance(1, sbv->base.ffiType->size, false); + + Data_Get_Struct(rbMemory, AbstractMemory, mem); + memcpy(mem->address, ptr, sbv->base.ffiType->size); + RB_GC_GUARD(rbMemory); + RB_GC_GUARD(rbType); + + return rb_class_new_instance(1, &rbMemory, sbv->rbStructClass); + } + + case NATIVE_MAPPED: { + /* + * For mapped types, first convert to the real native type, then upcall to + * ruby to convert to the expected return type + */ + MappedType* m = (MappedType *) type; + VALUE values[2], rbReturnValue; + + values[0] = rbffi_NativeValue_ToRuby(m->type, m->rbType, ptr); + values[1] = Qnil; + + + rbReturnValue = rb_funcall2(m->rbConverter, id_from_native, 2, values); + RB_GC_GUARD(values[0]); + RB_GC_GUARD(rbType); + + return rbReturnValue; + } + + default: + rb_raise(rb_eRuntimeError, "Unknown type: %d", type->nativeType); + return Qnil; + } +} + +void +rbffi_Types_Init(VALUE moduleFFI) +{ + id_from_native = rb_intern("from_native"); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.h new file mode 100644 index 0000000..4b72320 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (c) 2009, Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_TYPES_H +#define RBFFI_TYPES_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + NATIVE_VOID, + NATIVE_INT8, + NATIVE_UINT8, + NATIVE_INT16, + NATIVE_UINT16, + NATIVE_INT32, + NATIVE_UINT32, + NATIVE_INT64, + NATIVE_UINT64, + NATIVE_LONG, + NATIVE_ULONG, + NATIVE_FLOAT32, + NATIVE_FLOAT64, + NATIVE_LONGDOUBLE, + NATIVE_POINTER, + NATIVE_FUNCTION, + NATIVE_BUFFER_IN, + NATIVE_BUFFER_OUT, + NATIVE_BUFFER_INOUT, + NATIVE_CHAR_ARRAY, + NATIVE_BOOL, + + /** An immutable string. Nul terminated, but only copies in to the native function */ + NATIVE_STRING, + + /** The function takes a variable number of arguments */ + NATIVE_VARARGS, + + /** Struct-by-value param or result */ + NATIVE_STRUCT, + + /** An array type definition */ + NATIVE_ARRAY, + + /** Custom native type */ + NATIVE_MAPPED, +} NativeType; + +#include +#include "Type.h" + +VALUE rbffi_NativeValue_ToRuby(Type* type, VALUE rbType, const void* ptr); +void rbffi_Types_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_TYPES_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.o new file mode 100644 index 0000000..4caa63e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Types.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.c new file mode 100644 index 0000000..8ad38b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.c @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2008-2010 Wayne Meissner + * Copyright (C) 2009 Andrea Fazzi + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MSC_VER +#include +#endif +#include + +#include +#include +#include +#include + +#include +#include "rbffi.h" +#include "compat.h" + +#include "AbstractMemory.h" +#include "Pointer.h" +#include "Types.h" +#include "Type.h" +#include "LastError.h" +#include "MethodHandle.h" +#include "Call.h" +#include "Thread.h" + +typedef struct VariadicInvoker_ { + VALUE rbAddress; + VALUE rbReturnType; + VALUE rbEnums; + + Type* returnType; + ffi_abi abi; + void* function; + int paramCount; + bool blocking; +} VariadicInvoker; + + +static VALUE variadic_allocate(VALUE klass); +static VALUE variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, + VALUE rbReturnType, VALUE options); +static void variadic_mark(VariadicInvoker *); + +static VALUE classVariadicInvoker = Qnil; + + +static VALUE +variadic_allocate(VALUE klass) +{ + VariadicInvoker *invoker; + VALUE obj = Data_Make_Struct(klass, VariadicInvoker, variadic_mark, -1, invoker); + + invoker->rbAddress = Qnil; + invoker->rbEnums = Qnil; + invoker->rbReturnType = Qnil; + invoker->blocking = false; + + return obj; +} + +static void +variadic_mark(VariadicInvoker *invoker) +{ + rb_gc_mark(invoker->rbEnums); + rb_gc_mark(invoker->rbAddress); + rb_gc_mark(invoker->rbReturnType); +} + +static VALUE +variadic_initialize(VALUE self, VALUE rbFunction, VALUE rbParameterTypes, VALUE rbReturnType, VALUE options) +{ + VariadicInvoker* invoker = NULL; + VALUE retval = Qnil; + VALUE convention = Qnil; + VALUE fixed = Qnil; +#if defined(X86_WIN32) + VALUE rbConventionStr; +#endif + int i; + + Check_Type(options, T_HASH); + convention = rb_hash_aref(options, ID2SYM(rb_intern("convention"))); + + Data_Get_Struct(self, VariadicInvoker, invoker); + invoker->rbEnums = rb_hash_aref(options, ID2SYM(rb_intern("enums"))); + invoker->rbAddress = rbFunction; + invoker->function = rbffi_AbstractMemory_Cast(rbFunction, rbffi_PointerClass)->address; + invoker->blocking = RTEST(rb_hash_aref(options, ID2SYM(rb_intern("blocking")))); + +#if defined(X86_WIN32) + rbConventionStr = rb_funcall2(convention, rb_intern("to_s"), 0, NULL); + invoker->abi = (RTEST(convention) && strcmp(StringValueCStr(rbConventionStr), "stdcall") == 0) + ? FFI_STDCALL : FFI_DEFAULT_ABI; +#else + invoker->abi = FFI_DEFAULT_ABI; +#endif + + invoker->rbReturnType = rbffi_Type_Lookup(rbReturnType); + if (!RTEST(invoker->rbReturnType)) { + VALUE typeName = rb_funcall2(rbReturnType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid return type (%s)", RSTRING_PTR(typeName)); + } + + Data_Get_Struct(rbReturnType, Type, invoker->returnType); + + invoker->paramCount = -1; + + fixed = rb_ary_new2(RARRAY_LEN(rbParameterTypes) - 1); + for (i = 0; i < RARRAY_LEN(rbParameterTypes); ++i) { + VALUE entry = rb_ary_entry(rbParameterTypes, i); + VALUE rbType = rbffi_Type_Lookup(entry); + Type* type; + + if (!RTEST(rbType)) { + VALUE typeName = rb_funcall2(entry, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Invalid parameter type (%s)", RSTRING_PTR(typeName)); + } + Data_Get_Struct(rbType, Type, type); + if (type->nativeType != NATIVE_VARARGS) { + rb_ary_push(fixed, entry); + } + } + /* + * @fixed and @type_map are used by the parameter mangling ruby code + */ + rb_iv_set(self, "@fixed", fixed); + rb_iv_set(self, "@type_map", rb_hash_aref(options, ID2SYM(rb_intern("type_map")))); + + return retval; +} + +static VALUE +variadic_invoke(VALUE self, VALUE parameterTypes, VALUE parameterValues) +{ + VariadicInvoker* invoker; + FFIStorage* params; + void* retval; + ffi_cif cif; + void** ffiValues; + ffi_type** ffiParamTypes; + ffi_type* ffiReturnType; + Type** paramTypes; + VALUE* argv; + VALUE* callbackParameters; + int paramCount = 0, fixedCount = 0, callbackCount = 0, i; + ffi_status ffiStatus; + rbffi_frame_t frame = { 0 }; + + Check_Type(parameterTypes, T_ARRAY); + Check_Type(parameterValues, T_ARRAY); + + Data_Get_Struct(self, VariadicInvoker, invoker); + paramCount = (int) RARRAY_LEN(parameterTypes); + paramTypes = ALLOCA_N(Type *, paramCount); + ffiParamTypes = ALLOCA_N(ffi_type *, paramCount); + params = ALLOCA_N(FFIStorage, paramCount); + ffiValues = ALLOCA_N(void*, paramCount); + argv = ALLOCA_N(VALUE, paramCount); + callbackParameters = ALLOCA_N(VALUE, paramCount); + retval = alloca(MAX(invoker->returnType->ffiType->size, FFI_SIZEOF_ARG)); + + for (i = 0; i < paramCount; ++i) { + VALUE rbType = rb_ary_entry(parameterTypes, i); + + if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { + rb_raise(rb_eTypeError, "wrong type. Expected (FFI::Type)"); + } + Data_Get_Struct(rbType, Type, paramTypes[i]); + + switch (paramTypes[i]->nativeType) { + case NATIVE_INT8: + case NATIVE_INT16: + case NATIVE_INT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("INT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + case NATIVE_UINT8: + case NATIVE_UINT16: + case NATIVE_UINT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("UINT32")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + case NATIVE_FLOAT32: + rbType = rb_const_get(rbffi_TypeClass, rb_intern("DOUBLE")); + Data_Get_Struct(rbType, Type, paramTypes[i]); + break; + + case NATIVE_FUNCTION: + if (!rb_obj_is_kind_of(rbType, rbffi_FunctionTypeClass)) { + VALUE typeName = rb_funcall2(rbType, rb_intern("inspect"), 0, NULL); + rb_raise(rb_eTypeError, "Incorrect parameter type (%s)", RSTRING_PTR(typeName)); + } + callbackParameters[callbackCount++] = rbType; + break; + + default: + break; + } + + + ffiParamTypes[i] = paramTypes[i]->ffiType; + if (ffiParamTypes[i] == NULL) { + rb_raise(rb_eArgError, "Invalid parameter type #%x", paramTypes[i]->nativeType); + } + argv[i] = rb_ary_entry(parameterValues, i); + } + + ffiReturnType = invoker->returnType->ffiType; + if (ffiReturnType == NULL) { + rb_raise(rb_eArgError, "Invalid return type"); + } + + /*Get the number of fixed args from @fixed array*/ + fixedCount = RARRAY_LEN(rb_iv_get(self, "@fixed")); + +#ifdef HAVE_FFI_PREP_CIF_VAR + ffiStatus = ffi_prep_cif_var(&cif, invoker->abi, fixedCount, paramCount, ffiReturnType, ffiParamTypes); +#else + ffiStatus = ffi_prep_cif(&cif, invoker->abi, paramCount, ffiReturnType, ffiParamTypes); +#endif + switch (ffiStatus) { + case FFI_BAD_ABI: + rb_raise(rb_eArgError, "Invalid ABI specified"); + case FFI_BAD_TYPEDEF: + rb_raise(rb_eArgError, "Invalid argument type specified"); + case FFI_OK: + break; + default: + rb_raise(rb_eArgError, "Unknown FFI error"); + } + + rbffi_SetupCallParams(paramCount, argv, -1, paramTypes, params, + ffiValues, callbackParameters, callbackCount, invoker->rbEnums); + + rbffi_frame_push(&frame); + + if(unlikely(invoker->blocking)) { + rbffi_blocking_call_t* bc; + bc = ALLOCA_N(rbffi_blocking_call_t, 1); + bc->retval = retval; + bc->function = invoker->function; + bc->ffiValues = ffiValues; + bc->params = params; + bc->frame = &frame; + bc->cif = cif; + + rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); + } else { + ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); + } + + rbffi_frame_pop(&frame); + + rbffi_save_errno(); + + if (RTEST(frame.exc) && frame.exc != Qnil) { + rb_exc_raise(frame.exc); + } + + return rbffi_NativeValue_ToRuby(invoker->returnType, invoker->rbReturnType, retval); +} + + +void +rbffi_Variadic_Init(VALUE moduleFFI) +{ + classVariadicInvoker = rb_define_class_under(moduleFFI, "VariadicInvoker", rb_cObject); + rb_global_variable(&classVariadicInvoker); + + rb_define_alloc_func(classVariadicInvoker, variadic_allocate); + + rb_define_method(classVariadicInvoker, "initialize", variadic_initialize, 4); + rb_define_method(classVariadicInvoker, "invoke", variadic_invoke, 2); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.o new file mode 100644 index 0000000..910de64 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/Variadic.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/compat.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/compat.h new file mode 100644 index 0000000..3f7bbae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/compat.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_COMPAT_H +#define RBFFI_COMPAT_H + +#include + +#ifndef RARRAY_LEN +# define RARRAY_LEN(ary) RARRAY(ary)->len +#endif + +#ifndef RARRAY_PTR +# define RARRAY_PTR(ary) RARRAY(ary)->ptr +#endif + +#ifndef RSTRING_LEN +# define RSTRING_LEN(s) RSTRING(s)->len +#endif + +#ifndef RSTRING_PTR +# define RSTRING_PTR(s) RSTRING(s)->ptr +#endif + +#ifndef NUM2ULL +# define NUM2ULL(x) rb_num2ull((VALUE)x) +#endif + +#ifndef roundup +# define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#ifdef __GNUC__ +# define likely(x) __builtin_expect((x), 1) +# define unlikely(x) __builtin_expect((x), 0) +#else +# define likely(x) (x) +# define unlikely(x) (x) +#endif + +#ifdef _MSC_VER +#define ffi_type_longdouble ffi_type_double +#endif + +#ifndef MAX +# define MAX(a, b) ((a) < (b) ? (b) : (a)) +#endif +#ifndef MIN +# define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#ifndef RB_GC_GUARD +# define RB_GC_GUARD(x) (x) +#endif + +#endif /* RBFFI_COMPAT_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.h new file mode 100644 index 0000000..ac2ec31 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.h @@ -0,0 +1,6 @@ +#ifndef EXTCONF_H +#define EXTCONF_H +#define HAVE_FFI_PREP_CIF_VAR 1 +#define USE_INTERNAL_LIBFFI 1 +#define USE_FFI_ALLOC 1 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.rb new file mode 100644 index 0000000..720fb06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/extconf.rb @@ -0,0 +1,98 @@ +#!/usr/bin/env ruby + +if RUBY_ENGINE == 'ruby' || RUBY_ENGINE == 'rbx' + require 'mkmf' + require 'rbconfig' + + def system_libffi_usable? + # We need pkg_config or ffi.h + libffi_ok = pkg_config("libffi") || + have_header("ffi.h") || + find_header("ffi.h", "/usr/local/include", "/usr/include/ffi", + "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/ffi", + "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/ffi") || + (find_header("ffi.h", `xcrun --sdk macosx --show-sdk-path`.strip + "/usr/include/ffi") rescue false) + + # Ensure we can link to ffi_prep_closure_loc + libffi_ok &&= have_library("ffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi", "ffi_prep_closure_loc", [ "ffi.h" ]) || + have_library("libffi-8", "ffi_prep_closure_loc", [ "ffi.h" ]) + + if RbConfig::CONFIG['host_os'] =~ /mswin/ + have_library('libffi_convenience') + have_library('shlwapi') + end + + libffi_ok + end + + dir_config("ffi_c") + + # recent versions of ruby add restrictive ansi and warning flags on a whim - kill them all + $warnflags = '' + $CFLAGS.gsub!(/[\s+]-ansi/, '') + $CFLAGS.gsub!(/[\s+]-std=[^\s]+/, '') + # solaris 10 needs -c99 for + $CFLAGS << " -std=c99" if RbConfig::CONFIG['host_os'] =~ /solaris(!?2\.11)/ + + # Check whether we use system libffi + system_libffi = enable_config('system-libffi', :try) + + if system_libffi == :try + system_libffi = ENV['RUBY_CC_VERSION'].nil? && system_libffi_usable? + elsif system_libffi + abort "system libffi is not usable" unless system_libffi_usable? + end + + if system_libffi + have_func('ffi_prep_cif_var') + $defs << "-DHAVE_RAW_API" if have_func("ffi_raw_call") && have_func("ffi_prep_raw_closure") + else + $defs << "-DHAVE_FFI_PREP_CIF_VAR" + $defs << "-DUSE_INTERNAL_LIBFFI" + + # Ensure libffi symbols aren't exported when using static libffi. + # This is to avoid interference with other gems like fiddle. + # See https://github.com/ffi/ffi/issues/835 + append_ldflags "-Wl,--exclude-libs,ALL" + end + + # Some linux archs need explicit linking to pthread, see https://github.com/ffi/ffi/issues/893 + append_ldflags "-pthread" + + ffi_alloc_default = RbConfig::CONFIG['host_os'] =~ /darwin/i && RbConfig::CONFIG['host'] =~ /arm|aarch64/i + if enable_config('libffi-alloc', ffi_alloc_default) + $defs << "-DUSE_FFI_ALLOC" + end + + $defs << "-DHAVE_EXTCONF_H" if $defs.empty? # needed so create_header works + + create_header + create_makefile("ffi_c") + + unless system_libffi + File.open("Makefile", "a") do |mf| + mf.puts "LIBFFI_HOST=--host=#{RbConfig::CONFIG['host_alias']}" if RbConfig::CONFIG.has_key?("host_alias") + if RbConfig::CONFIG['host_os'] =~ /darwin/i + if RbConfig::CONFIG['host'] =~ /arm|aarch64/i + mf.puts "LIBFFI_HOST=--host=aarch64-apple-#{RbConfig::CONFIG['host_os']}" + end + mf.puts "include ${srcdir}/libffi.darwin.mk" + elsif RbConfig::CONFIG['host_os'] =~ /bsd/i + mf.puts '.include "${srcdir}/libffi.bsd.mk"' + elsif RbConfig::CONFIG['host_os'] =~ /mswin64/i + mf.puts '!include $(srcdir)/libffi.vc64.mk' + elsif RbConfig::CONFIG['host_os'] =~ /mswin32/i + mf.puts '!include $(srcdir)/libffi.vc.mk' + else + mf.puts "include ${srcdir}/libffi.mk" + end + end + end + +else + File.open("Makefile", "w") do |mf| + mf.puts "# Dummy makefile for non-mri rubies" + mf.puts "all install::\n" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.c new file mode 100644 index 0000000..22ea3bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * Copyright (C) 2009 Luc Heinrich + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "rbffi.h" +#include "AbstractMemory.h" +#include "Pointer.h" +#include "MemoryPointer.h" +#include "Struct.h" +#include "StructByValue.h" +#include "DynamicLibrary.h" +#include "Platform.h" +#include "Types.h" +#include "LastError.h" +#include "Function.h" +#include "ClosurePool.h" +#include "MethodHandle.h" +#include "Call.h" +#include "ArrayType.h" +#include "MappedType.h" + +void Init_ffi_c(void); + +VALUE rbffi_FFIModule = Qnil; + +static VALUE moduleFFI = Qnil; + +void +Init_ffi_c(void) +{ + /* + * Document-module: FFI + * + * This module embbed type constants from {FFI::NativeType}. + */ + rbffi_FFIModule = moduleFFI = rb_define_module("FFI"); + rb_global_variable(&rbffi_FFIModule); + + rbffi_Thread_Init(rbffi_FFIModule); + + /* FFI::Type needs to be initialized before most other classes */ + rbffi_Type_Init(moduleFFI); + + rbffi_ArrayType_Init(moduleFFI); + rbffi_LastError_Init(moduleFFI); + rbffi_Call_Init(moduleFFI); + rbffi_ClosurePool_Init(moduleFFI); + rbffi_MethodHandle_Init(moduleFFI); + rbffi_Platform_Init(moduleFFI); + rbffi_AbstractMemory_Init(moduleFFI); + rbffi_Pointer_Init(moduleFFI); + rbffi_Function_Init(moduleFFI); + rbffi_MemoryPointer_Init(moduleFFI); + rbffi_Buffer_Init(moduleFFI); + rbffi_StructByValue_Init(moduleFFI); + rbffi_Struct_Init(moduleFFI); + rbffi_DynamicLibrary_Init(moduleFFI); + rbffi_Variadic_Init(moduleFFI); + rbffi_Types_Init(moduleFFI); + rbffi_MappedType_Init(moduleFFI); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.o new file mode 100644 index 0000000..4733f17 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi_c.bundle b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi_c.bundle new file mode 100755 index 0000000..8edc525 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/ffi_c.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.8.dylib b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.8.dylib new file mode 100755 index 0000000..6ca972f Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.8.dylib differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.dylib b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.dylib new file mode 120000 index 0000000..bd6312a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.dylib @@ -0,0 +1 @@ +libffi.8.dylib \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.la new file mode 120000 index 0000000..004dbb5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.la @@ -0,0 +1 @@ +../libffi.la \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.lai b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.lai new file mode 100644 index 0000000..db17c8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi.lai @@ -0,0 +1,41 @@ +# libffi.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='libffi.8.dylib' + +# Names of this library. +library_names='libffi.8.dylib libffi.dylib' + +# The name of the static archive. +old_library='' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L/usr/local/opt/libffi/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libffi. +current=9 +age=1 +revision=0 + +# Is this an already installed library? +installed=yes + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='/usr/local/lib' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.a b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.a new file mode 100644 index 0000000..5565394 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.a differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la new file mode 120000 index 0000000..0727452 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/.libs/libffi_convenience.la @@ -0,0 +1 @@ +../libffi_convenience.la \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/Makefile new file mode 100644 index 0000000..7f42c46 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/Makefile @@ -0,0 +1,2060 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = aarch64-apple-darwin21.3.0 +host_triplet = aarch64-apple-darwin21 +target_triplet = aarch64-apple-darwin21 +#am__append_1 = doc +#am__append_2 = src/debug.c +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +#am__append_3 = -DFFI_DEBUG +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(noinst_HEADERS) $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = fficonfig.h +CONFIG_CLEAN_FILES = libffi.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ + "$(DESTDIR)$(pkgconfigdir)" +LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am__libffi_la_SOURCES_DIST = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c src/debug.c +am__dirstamp = $(am__leading_dot)dirstamp +#am__objects_1 = src/debug.lo +am_libffi_la_OBJECTS = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) +AM_V_lt = $(am__v_lt_$(V)) +am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY)) +am__v_lt_0 = --silent +am__v_lt_1 = +libffi_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libffi_la_LDFLAGS) $(LDFLAGS) -o $@ +am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) +am__libffi_convenience_la_SOURCES_DIST = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c src/debug.c +am__objects_2 = src/prep_cif.lo src/types.lo src/raw_api.lo \ + src/java_raw_api.lo src/closures.lo $(am__objects_1) +am_libffi_convenience_la_OBJECTS = $(am__objects_2) +nodist_libffi_convenience_la_OBJECTS = +libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \ + $(nodist_libffi_convenience_la_OBJECTS) +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I. -I$(srcdir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = src/$(DEPDIR)/closures.Plo \ + src/$(DEPDIR)/debug.Plo src/$(DEPDIR)/java_raw_api.Plo \ + src/$(DEPDIR)/prep_cif.Plo src/$(DEPDIR)/raw_api.Plo \ + src/$(DEPDIR)/types.Plo src/aarch64/$(DEPDIR)/ffi.Plo \ + src/aarch64/$(DEPDIR)/sysv.Plo \ + src/aarch64/$(DEPDIR)/win64_armasm.Plo \ + src/alpha/$(DEPDIR)/ffi.Plo src/alpha/$(DEPDIR)/osf.Plo \ + src/arc/$(DEPDIR)/arcompact.Plo src/arc/$(DEPDIR)/ffi.Plo \ + src/arm/$(DEPDIR)/ffi.Plo src/arm/$(DEPDIR)/sysv.Plo \ + src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo \ + src/avr32/$(DEPDIR)/ffi.Plo src/avr32/$(DEPDIR)/sysv.Plo \ + src/bfin/$(DEPDIR)/ffi.Plo src/bfin/$(DEPDIR)/sysv.Plo \ + src/cris/$(DEPDIR)/ffi.Plo src/cris/$(DEPDIR)/sysv.Plo \ + src/csky/$(DEPDIR)/ffi.Plo src/csky/$(DEPDIR)/sysv.Plo \ + src/frv/$(DEPDIR)/eabi.Plo src/frv/$(DEPDIR)/ffi.Plo \ + src/ia64/$(DEPDIR)/ffi.Plo src/ia64/$(DEPDIR)/unix.Plo \ + src/kvx/$(DEPDIR)/ffi.Plo src/kvx/$(DEPDIR)/sysv.Plo \ + src/m32r/$(DEPDIR)/ffi.Plo src/m32r/$(DEPDIR)/sysv.Plo \ + src/m68k/$(DEPDIR)/ffi.Plo src/m68k/$(DEPDIR)/sysv.Plo \ + src/m88k/$(DEPDIR)/ffi.Plo src/m88k/$(DEPDIR)/obsd.Plo \ + src/metag/$(DEPDIR)/ffi.Plo src/metag/$(DEPDIR)/sysv.Plo \ + src/microblaze/$(DEPDIR)/ffi.Plo \ + src/microblaze/$(DEPDIR)/sysv.Plo src/mips/$(DEPDIR)/ffi.Plo \ + src/mips/$(DEPDIR)/n32.Plo src/mips/$(DEPDIR)/o32.Plo \ + src/moxie/$(DEPDIR)/eabi.Plo src/moxie/$(DEPDIR)/ffi.Plo \ + src/nios2/$(DEPDIR)/ffi.Plo src/nios2/$(DEPDIR)/sysv.Plo \ + src/or1k/$(DEPDIR)/ffi.Plo src/or1k/$(DEPDIR)/sysv.Plo \ + src/pa/$(DEPDIR)/ffi.Plo src/pa/$(DEPDIR)/hpux32.Plo \ + src/pa/$(DEPDIR)/linux.Plo src/powerpc/$(DEPDIR)/aix.Plo \ + src/powerpc/$(DEPDIR)/aix_closure.Plo \ + src/powerpc/$(DEPDIR)/darwin.Plo \ + src/powerpc/$(DEPDIR)/darwin_closure.Plo \ + src/powerpc/$(DEPDIR)/ffi.Plo \ + src/powerpc/$(DEPDIR)/ffi_darwin.Plo \ + src/powerpc/$(DEPDIR)/ffi_linux64.Plo \ + src/powerpc/$(DEPDIR)/ffi_sysv.Plo \ + src/powerpc/$(DEPDIR)/linux64.Plo \ + src/powerpc/$(DEPDIR)/linux64_closure.Plo \ + src/powerpc/$(DEPDIR)/ppc_closure.Plo \ + src/powerpc/$(DEPDIR)/sysv.Plo src/riscv/$(DEPDIR)/ffi.Plo \ + src/riscv/$(DEPDIR)/sysv.Plo src/s390/$(DEPDIR)/ffi.Plo \ + src/s390/$(DEPDIR)/sysv.Plo src/sh/$(DEPDIR)/ffi.Plo \ + src/sh/$(DEPDIR)/sysv.Plo src/sh64/$(DEPDIR)/ffi.Plo \ + src/sh64/$(DEPDIR)/sysv.Plo src/sparc/$(DEPDIR)/ffi.Plo \ + src/sparc/$(DEPDIR)/ffi64.Plo src/sparc/$(DEPDIR)/v8.Plo \ + src/sparc/$(DEPDIR)/v9.Plo src/tile/$(DEPDIR)/ffi.Plo \ + src/tile/$(DEPDIR)/tile.Plo src/vax/$(DEPDIR)/elfbsd.Plo \ + src/vax/$(DEPDIR)/ffi.Plo src/x86/$(DEPDIR)/ffi.Plo \ + src/x86/$(DEPDIR)/ffi64.Plo src/x86/$(DEPDIR)/ffiw64.Plo \ + src/x86/$(DEPDIR)/sysv.Plo src/x86/$(DEPDIR)/sysv_intel.Plo \ + src/x86/$(DEPDIR)/unix64.Plo src/x86/$(DEPDIR)/win64.Plo \ + src/x86/$(DEPDIR)/win64_intel.Plo src/xtensa/$(DEPDIR)/ffi.Plo \ + src/xtensa/$(DEPDIR)/sysv.Plo +am__mv = mv -f +CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) +LTCPPASCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CCASFLAGS) $(CCASFLAGS) +AM_V_CPPAS = $(am__v_CPPAS_$(V)) +am__v_CPPAS_ = $(am__v_CPPAS_$(AM_DEFAULT_VERBOSITY)) +am__v_CPPAS_0 = @echo " CPPAS " $@; +am__v_CPPAS_1 = +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_$(V)) +am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_$(V)) +am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libffi_la_SOURCES) $(EXTRA_libffi_la_SOURCES) \ + $(libffi_convenience_la_SOURCES) \ + $(EXTRA_libffi_convenience_la_SOURCES) \ + $(nodist_libffi_convenience_la_SOURCES) +DIST_SOURCES = $(am__libffi_la_SOURCES_DIST) \ + $(EXTRA_libffi_la_SOURCES) \ + $(am__libffi_convenience_la_SOURCES_DIST) \ + $(EXTRA_libffi_convenience_la_SOURCES) +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(pkgconfig_DATA) +HEADERS = $(noinst_HEADERS) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope distdir distdir-am dist dist-all distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ + $(LISP)fficonfig.h.in +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +CSCOPE = cscope +DIST_SUBDIRS = include testsuite man doc +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fficonfig.h.in \ + $(srcdir)/libffi.pc.in compile config.guess config.sub depcomp \ + install-sh ltmain.sh missing +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = : +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = nm +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = AARCH64 +TARGETDIR = aarch64 +TARGET_OBJ = src/aarch64/ffi.lo src/aarch64/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = +ac_ct_CXX = g++ +ac_ct_DUMPBIN = link -dump +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = aarch64-apple-darwin21.3.0 +build_alias = +build_cpu = aarch64 +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = aarch64-apple-darwin21 +host_alias = aarch64-apple-darwin21 +host_cpu = aarch64 +host_os = darwin21 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = aarch64-apple-darwin21 +target_alias = aarch64-apple-darwin21 +target_cpu = aarch64 +target_os = darwin21 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = +top_builddir = . +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign subdir-objects +ACLOCAL_AMFLAGS = -I m4 +SUBDIRS = include testsuite man $(am__append_1) +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) +MAKEOVERRIDES = +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la +libffi_la_SOURCES = src/prep_cif.c src/types.c src/raw_api.c \ + src/java_raw_api.c src/closures.c $(am__append_2) +noinst_HEADERS = src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h src/bfin/ffitarget.h \ + src/cris/ffitarget.h src/csky/ffitarget.h src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h src/m68k/ffitarget.h \ + src/m88k/ffitarget.h src/metag/ffitarget.h \ + src/microblaze/ffitarget.h src/mips/ffitarget.h \ + src/moxie/ffitarget.h src/nios2/ffitarget.h \ + src/or1k/ffitarget.h src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h \ + src/powerpc/ffi_powerpc.h src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h src/sh/ffitarget.h \ + src/sh64/ffitarget.h src/sparc/ffitarget.h \ + src/sparc/internal.h src/tile/ffitarget.h src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h \ + src/x86/asmnames.h src/xtensa/ffitarget.h src/dlmalloc.c \ + src/kvx/ffitarget.h + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/csky/ffi.c src/csky/sysv.S src/frv/eabi.S src/ia64/ffi.c \ + src/ia64/unix.S src/m32r/ffi.c src/m32r/sysv.S src/m68k/ffi.c \ + src/m68k/sysv.S src/m88k/ffi.c src/m88k/obsd.S \ + src/metag/ffi.c src/metag/sysv.S src/microblaze/ffi.c \ + src/microblaze/sysv.S src/mips/ffi.c src/mips/o32.S \ + src/mips/n32.S src/moxie/ffi.c src/moxie/eabi.S \ + src/nios2/ffi.c src/nios2/sysv.S src/or1k/ffi.c \ + src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S src/pa/hpux32.S \ + src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S src/kvx/ffi.c \ + src/kvx/sysv.S + +libffi_la_LIBADD = $(TARGET_OBJ) +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) +AM_CFLAGS = $(am__append_3) +libffi_version_script = +##libffi_version_script = -Wl,--version-script,libffi.map +##libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = +##libffi_version_dep = libffi.map +##libffi_version_dep = libffi.map-sun +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) +all: fficonfig.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +.SUFFIXES: .S .c .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: # $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +fficonfig.h: stamp-h1 + @test -f $@ || rm -f stamp-h1 + @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + +stamp-h1: $(srcdir)/fficonfig.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status fficonfig.h +$(srcdir)/fficonfig.h.in: # $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f fficonfig.h stamp-h1 +libffi.pc: $(top_builddir)/config.status $(srcdir)/libffi.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \ + } + +uninstall-toolexeclibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \ + done + +clean-toolexeclibLTLIBRARIES: + -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES) + @list='$(toolexeclib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } +src/$(am__dirstamp): + @$(MKDIR_P) src + @: > src/$(am__dirstamp) +src/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/$(DEPDIR) + @: > src/$(DEPDIR)/$(am__dirstamp) +src/prep_cif.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/types.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/java_raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/closures.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +src/aarch64/$(am__dirstamp): + @$(MKDIR_P) src/aarch64 + @: > src/aarch64/$(am__dirstamp) +src/aarch64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/aarch64/$(DEPDIR) + @: > src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/aarch64/win64_armasm.lo: src/aarch64/$(am__dirstamp) \ + src/aarch64/$(DEPDIR)/$(am__dirstamp) +src/alpha/$(am__dirstamp): + @$(MKDIR_P) src/alpha + @: > src/alpha/$(am__dirstamp) +src/alpha/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/alpha/$(DEPDIR) + @: > src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/ffi.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/alpha/osf.lo: src/alpha/$(am__dirstamp) \ + src/alpha/$(DEPDIR)/$(am__dirstamp) +src/arc/$(am__dirstamp): + @$(MKDIR_P) src/arc + @: > src/arc/$(am__dirstamp) +src/arc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arc/$(DEPDIR) + @: > src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/ffi.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arc/arcompact.lo: src/arc/$(am__dirstamp) \ + src/arc/$(DEPDIR)/$(am__dirstamp) +src/arm/$(am__dirstamp): + @$(MKDIR_P) src/arm + @: > src/arm/$(am__dirstamp) +src/arm/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/arm/$(DEPDIR) + @: > src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/ffi.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/arm/sysv_msvc_arm32.lo: src/arm/$(am__dirstamp) \ + src/arm/$(DEPDIR)/$(am__dirstamp) +src/avr32/$(am__dirstamp): + @$(MKDIR_P) src/avr32 + @: > src/avr32/$(am__dirstamp) +src/avr32/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/avr32/$(DEPDIR) + @: > src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/ffi.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/avr32/sysv.lo: src/avr32/$(am__dirstamp) \ + src/avr32/$(DEPDIR)/$(am__dirstamp) +src/bfin/$(am__dirstamp): + @$(MKDIR_P) src/bfin + @: > src/bfin/$(am__dirstamp) +src/bfin/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/bfin/$(DEPDIR) + @: > src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/ffi.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/bfin/sysv.lo: src/bfin/$(am__dirstamp) \ + src/bfin/$(DEPDIR)/$(am__dirstamp) +src/cris/$(am__dirstamp): + @$(MKDIR_P) src/cris + @: > src/cris/$(am__dirstamp) +src/cris/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/cris/$(DEPDIR) + @: > src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/ffi.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/cris/sysv.lo: src/cris/$(am__dirstamp) \ + src/cris/$(DEPDIR)/$(am__dirstamp) +src/frv/$(am__dirstamp): + @$(MKDIR_P) src/frv + @: > src/frv/$(am__dirstamp) +src/frv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/frv/$(DEPDIR) + @: > src/frv/$(DEPDIR)/$(am__dirstamp) +src/frv/ffi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/csky/$(am__dirstamp): + @$(MKDIR_P) src/csky + @: > src/csky/$(am__dirstamp) +src/csky/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/csky/$(DEPDIR) + @: > src/csky/$(DEPDIR)/$(am__dirstamp) +src/csky/ffi.lo: src/csky/$(am__dirstamp) \ + src/csky/$(DEPDIR)/$(am__dirstamp) +src/csky/sysv.lo: src/csky/$(am__dirstamp) \ + src/csky/$(DEPDIR)/$(am__dirstamp) +src/frv/eabi.lo: src/frv/$(am__dirstamp) \ + src/frv/$(DEPDIR)/$(am__dirstamp) +src/ia64/$(am__dirstamp): + @$(MKDIR_P) src/ia64 + @: > src/ia64/$(am__dirstamp) +src/ia64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/ia64/$(DEPDIR) + @: > src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/ffi.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/ia64/unix.lo: src/ia64/$(am__dirstamp) \ + src/ia64/$(DEPDIR)/$(am__dirstamp) +src/m32r/$(am__dirstamp): + @$(MKDIR_P) src/m32r + @: > src/m32r/$(am__dirstamp) +src/m32r/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m32r/$(DEPDIR) + @: > src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/ffi.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m32r/sysv.lo: src/m32r/$(am__dirstamp) \ + src/m32r/$(DEPDIR)/$(am__dirstamp) +src/m68k/$(am__dirstamp): + @$(MKDIR_P) src/m68k + @: > src/m68k/$(am__dirstamp) +src/m68k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m68k/$(DEPDIR) + @: > src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/ffi.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m68k/sysv.lo: src/m68k/$(am__dirstamp) \ + src/m68k/$(DEPDIR)/$(am__dirstamp) +src/m88k/$(am__dirstamp): + @$(MKDIR_P) src/m88k + @: > src/m88k/$(am__dirstamp) +src/m88k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/m88k/$(DEPDIR) + @: > src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/ffi.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/m88k/obsd.lo: src/m88k/$(am__dirstamp) \ + src/m88k/$(DEPDIR)/$(am__dirstamp) +src/metag/$(am__dirstamp): + @$(MKDIR_P) src/metag + @: > src/metag/$(am__dirstamp) +src/metag/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/metag/$(DEPDIR) + @: > src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/ffi.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/metag/sysv.lo: src/metag/$(am__dirstamp) \ + src/metag/$(DEPDIR)/$(am__dirstamp) +src/microblaze/$(am__dirstamp): + @$(MKDIR_P) src/microblaze + @: > src/microblaze/$(am__dirstamp) +src/microblaze/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/microblaze/$(DEPDIR) + @: > src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/ffi.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/microblaze/sysv.lo: src/microblaze/$(am__dirstamp) \ + src/microblaze/$(DEPDIR)/$(am__dirstamp) +src/mips/$(am__dirstamp): + @$(MKDIR_P) src/mips + @: > src/mips/$(am__dirstamp) +src/mips/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/mips/$(DEPDIR) + @: > src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/ffi.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/o32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/mips/n32.lo: src/mips/$(am__dirstamp) \ + src/mips/$(DEPDIR)/$(am__dirstamp) +src/moxie/$(am__dirstamp): + @$(MKDIR_P) src/moxie + @: > src/moxie/$(am__dirstamp) +src/moxie/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/moxie/$(DEPDIR) + @: > src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/ffi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/moxie/eabi.lo: src/moxie/$(am__dirstamp) \ + src/moxie/$(DEPDIR)/$(am__dirstamp) +src/nios2/$(am__dirstamp): + @$(MKDIR_P) src/nios2 + @: > src/nios2/$(am__dirstamp) +src/nios2/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/nios2/$(DEPDIR) + @: > src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/ffi.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/nios2/sysv.lo: src/nios2/$(am__dirstamp) \ + src/nios2/$(DEPDIR)/$(am__dirstamp) +src/or1k/$(am__dirstamp): + @$(MKDIR_P) src/or1k + @: > src/or1k/$(am__dirstamp) +src/or1k/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/or1k/$(DEPDIR) + @: > src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/ffi.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/or1k/sysv.lo: src/or1k/$(am__dirstamp) \ + src/or1k/$(DEPDIR)/$(am__dirstamp) +src/pa/$(am__dirstamp): + @$(MKDIR_P) src/pa + @: > src/pa/$(am__dirstamp) +src/pa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/pa/$(DEPDIR) + @: > src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/ffi.lo: src/pa/$(am__dirstamp) src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/linux.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/pa/hpux32.lo: src/pa/$(am__dirstamp) \ + src/pa/$(DEPDIR)/$(am__dirstamp) +src/powerpc/$(am__dirstamp): + @$(MKDIR_P) src/powerpc + @: > src/powerpc/$(am__dirstamp) +src/powerpc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/powerpc/$(DEPDIR) + @: > src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/sysv.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/linux64_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ppc_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/aix_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/powerpc/ffi_darwin.lo: src/powerpc/$(am__dirstamp) \ + src/powerpc/$(DEPDIR)/$(am__dirstamp) +src/riscv/$(am__dirstamp): + @$(MKDIR_P) src/riscv + @: > src/riscv/$(am__dirstamp) +src/riscv/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/riscv/$(DEPDIR) + @: > src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/ffi.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/riscv/sysv.lo: src/riscv/$(am__dirstamp) \ + src/riscv/$(DEPDIR)/$(am__dirstamp) +src/s390/$(am__dirstamp): + @$(MKDIR_P) src/s390 + @: > src/s390/$(am__dirstamp) +src/s390/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/s390/$(DEPDIR) + @: > src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/ffi.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/s390/sysv.lo: src/s390/$(am__dirstamp) \ + src/s390/$(DEPDIR)/$(am__dirstamp) +src/sh/$(am__dirstamp): + @$(MKDIR_P) src/sh + @: > src/sh/$(am__dirstamp) +src/sh/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh/$(DEPDIR) + @: > src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/ffi.lo: src/sh/$(am__dirstamp) src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh/sysv.lo: src/sh/$(am__dirstamp) \ + src/sh/$(DEPDIR)/$(am__dirstamp) +src/sh64/$(am__dirstamp): + @$(MKDIR_P) src/sh64 + @: > src/sh64/$(am__dirstamp) +src/sh64/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sh64/$(DEPDIR) + @: > src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/ffi.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sh64/sysv.lo: src/sh64/$(am__dirstamp) \ + src/sh64/$(DEPDIR)/$(am__dirstamp) +src/sparc/$(am__dirstamp): + @$(MKDIR_P) src/sparc + @: > src/sparc/$(am__dirstamp) +src/sparc/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/sparc/$(DEPDIR) + @: > src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/ffi64.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v8.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/sparc/v9.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) +src/tile/$(am__dirstamp): + @$(MKDIR_P) src/tile + @: > src/tile/$(am__dirstamp) +src/tile/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/tile/$(DEPDIR) + @: > src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/ffi.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/tile/tile.lo: src/tile/$(am__dirstamp) \ + src/tile/$(DEPDIR)/$(am__dirstamp) +src/vax/$(am__dirstamp): + @$(MKDIR_P) src/vax + @: > src/vax/$(am__dirstamp) +src/vax/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/vax/$(DEPDIR) + @: > src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/ffi.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/vax/elfbsd.lo: src/vax/$(am__dirstamp) \ + src/vax/$(DEPDIR)/$(am__dirstamp) +src/x86/$(am__dirstamp): + @$(MKDIR_P) src/x86 + @: > src/x86/$(am__dirstamp) +src/x86/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/x86/$(DEPDIR) + @: > src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffiw64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/ffi64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/unix64.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/sysv_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/x86/win64_intel.lo: src/x86/$(am__dirstamp) \ + src/x86/$(DEPDIR)/$(am__dirstamp) +src/xtensa/$(am__dirstamp): + @$(MKDIR_P) src/xtensa + @: > src/xtensa/$(am__dirstamp) +src/xtensa/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/xtensa/$(DEPDIR) + @: > src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/ffi.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/xtensa/sysv.lo: src/xtensa/$(am__dirstamp) \ + src/xtensa/$(DEPDIR)/$(am__dirstamp) +src/kvx/$(am__dirstamp): + @$(MKDIR_P) src/kvx + @: > src/kvx/$(am__dirstamp) +src/kvx/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/kvx/$(DEPDIR) + @: > src/kvx/$(DEPDIR)/$(am__dirstamp) +src/kvx/ffi.lo: src/kvx/$(am__dirstamp) \ + src/kvx/$(DEPDIR)/$(am__dirstamp) +src/kvx/sysv.lo: src/kvx/$(am__dirstamp) \ + src/kvx/$(DEPDIR)/$(am__dirstamp) + +libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES) $(EXTRA_libffi_la_DEPENDENCIES) + $(AM_V_CCLD)$(libffi_la_LINK) -rpath $(toolexeclibdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS) + +libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES) $(EXTRA_libffi_convenience_la_DEPENDENCIES) + $(AM_V_CCLD)$(LINK) $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f src/*.$(OBJEXT) + -rm -f src/*.lo + -rm -f src/aarch64/*.$(OBJEXT) + -rm -f src/aarch64/*.lo + -rm -f src/alpha/*.$(OBJEXT) + -rm -f src/alpha/*.lo + -rm -f src/arc/*.$(OBJEXT) + -rm -f src/arc/*.lo + -rm -f src/arm/*.$(OBJEXT) + -rm -f src/arm/*.lo + -rm -f src/avr32/*.$(OBJEXT) + -rm -f src/avr32/*.lo + -rm -f src/bfin/*.$(OBJEXT) + -rm -f src/bfin/*.lo + -rm -f src/cris/*.$(OBJEXT) + -rm -f src/cris/*.lo + -rm -f src/csky/*.$(OBJEXT) + -rm -f src/csky/*.lo + -rm -f src/frv/*.$(OBJEXT) + -rm -f src/frv/*.lo + -rm -f src/ia64/*.$(OBJEXT) + -rm -f src/ia64/*.lo + -rm -f src/kvx/*.$(OBJEXT) + -rm -f src/kvx/*.lo + -rm -f src/m32r/*.$(OBJEXT) + -rm -f src/m32r/*.lo + -rm -f src/m68k/*.$(OBJEXT) + -rm -f src/m68k/*.lo + -rm -f src/m88k/*.$(OBJEXT) + -rm -f src/m88k/*.lo + -rm -f src/metag/*.$(OBJEXT) + -rm -f src/metag/*.lo + -rm -f src/microblaze/*.$(OBJEXT) + -rm -f src/microblaze/*.lo + -rm -f src/mips/*.$(OBJEXT) + -rm -f src/mips/*.lo + -rm -f src/moxie/*.$(OBJEXT) + -rm -f src/moxie/*.lo + -rm -f src/nios2/*.$(OBJEXT) + -rm -f src/nios2/*.lo + -rm -f src/or1k/*.$(OBJEXT) + -rm -f src/or1k/*.lo + -rm -f src/pa/*.$(OBJEXT) + -rm -f src/pa/*.lo + -rm -f src/powerpc/*.$(OBJEXT) + -rm -f src/powerpc/*.lo + -rm -f src/riscv/*.$(OBJEXT) + -rm -f src/riscv/*.lo + -rm -f src/s390/*.$(OBJEXT) + -rm -f src/s390/*.lo + -rm -f src/sh/*.$(OBJEXT) + -rm -f src/sh/*.lo + -rm -f src/sh64/*.$(OBJEXT) + -rm -f src/sh64/*.lo + -rm -f src/sparc/*.$(OBJEXT) + -rm -f src/sparc/*.lo + -rm -f src/tile/*.$(OBJEXT) + -rm -f src/tile/*.lo + -rm -f src/vax/*.$(OBJEXT) + -rm -f src/vax/*.lo + -rm -f src/x86/*.$(OBJEXT) + -rm -f src/x86/*.lo + -rm -f src/xtensa/*.$(OBJEXT) + -rm -f src/xtensa/*.lo + +distclean-compile: + -rm -f *.tab.c + +#include src/$(DEPDIR)/closures.Plo # am--include-marker +#include src/$(DEPDIR)/debug.Plo # am--include-marker +#include src/$(DEPDIR)/java_raw_api.Plo # am--include-marker +#include src/$(DEPDIR)/prep_cif.Plo # am--include-marker +#include src/$(DEPDIR)/raw_api.Plo # am--include-marker +#include src/$(DEPDIR)/types.Plo # am--include-marker +#include src/aarch64/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/aarch64/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/aarch64/$(DEPDIR)/win64_armasm.Plo # am--include-marker +#include src/alpha/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/alpha/$(DEPDIR)/osf.Plo # am--include-marker +#include src/arc/$(DEPDIR)/arcompact.Plo # am--include-marker +#include src/arc/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/arm/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/arm/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo # am--include-marker +#include src/avr32/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/avr32/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/bfin/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/bfin/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/cris/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/cris/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/csky/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/csky/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/frv/$(DEPDIR)/eabi.Plo # am--include-marker +#include src/frv/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/ia64/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/ia64/$(DEPDIR)/unix.Plo # am--include-marker +#include src/kvx/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/kvx/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/m32r/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/m32r/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/m68k/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/m68k/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/m88k/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/m88k/$(DEPDIR)/obsd.Plo # am--include-marker +#include src/metag/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/metag/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/microblaze/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/microblaze/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/mips/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/mips/$(DEPDIR)/n32.Plo # am--include-marker +#include src/mips/$(DEPDIR)/o32.Plo # am--include-marker +#include src/moxie/$(DEPDIR)/eabi.Plo # am--include-marker +#include src/moxie/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/nios2/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/nios2/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/or1k/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/or1k/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/pa/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/pa/$(DEPDIR)/hpux32.Plo # am--include-marker +#include src/pa/$(DEPDIR)/linux.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/aix.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/aix_closure.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/darwin.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/darwin_closure.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/ffi_darwin.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/ffi_linux64.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/ffi_sysv.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/linux64.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/linux64_closure.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/ppc_closure.Plo # am--include-marker +#include src/powerpc/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/riscv/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/riscv/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/s390/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/s390/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/sh/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/sh/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/sh64/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/sh64/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/sparc/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/sparc/$(DEPDIR)/ffi64.Plo # am--include-marker +#include src/sparc/$(DEPDIR)/v8.Plo # am--include-marker +#include src/sparc/$(DEPDIR)/v9.Plo # am--include-marker +#include src/tile/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/tile/$(DEPDIR)/tile.Plo # am--include-marker +#include src/vax/$(DEPDIR)/elfbsd.Plo # am--include-marker +#include src/vax/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/x86/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/x86/$(DEPDIR)/ffi64.Plo # am--include-marker +#include src/x86/$(DEPDIR)/ffiw64.Plo # am--include-marker +#include src/x86/$(DEPDIR)/sysv.Plo # am--include-marker +#include src/x86/$(DEPDIR)/sysv_intel.Plo # am--include-marker +#include src/x86/$(DEPDIR)/unix64.Plo # am--include-marker +#include src/x86/$(DEPDIR)/win64.Plo # am--include-marker +#include src/x86/$(DEPDIR)/win64_intel.Plo # am--include-marker +#include src/xtensa/$(DEPDIR)/ffi.Plo # am--include-marker +#include src/xtensa/$(DEPDIR)/sysv.Plo # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.S.o: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ $< + +.S.obj: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CPPAS)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.S.lo: +# $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CPPAS)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) + $(AM_V_CPPAS)$(LTCPPASCOMPILE) -c -o $@ $< + +.c.o: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ $< + +.c.obj: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +# $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Po +# $(AM_V_CC)source='$<' object='$@' libtool=no +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +# $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +# $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +# $(am__mv) $$depbase.Tpo $$depbase.Plo +# $(AM_V_CC)source='$<' object='$@' libtool=yes +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) + $(AM_V_CC)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + -rm -rf src/.libs src/_libs + -rm -rf src/aarch64/.libs src/aarch64/_libs + -rm -rf src/alpha/.libs src/alpha/_libs + -rm -rf src/arc/.libs src/arc/_libs + -rm -rf src/arm/.libs src/arm/_libs + -rm -rf src/avr32/.libs src/avr32/_libs + -rm -rf src/bfin/.libs src/bfin/_libs + -rm -rf src/cris/.libs src/cris/_libs + -rm -rf src/csky/.libs src/csky/_libs + -rm -rf src/frv/.libs src/frv/_libs + -rm -rf src/ia64/.libs src/ia64/_libs + -rm -rf src/kvx/.libs src/kvx/_libs + -rm -rf src/m32r/.libs src/m32r/_libs + -rm -rf src/m68k/.libs src/m68k/_libs + -rm -rf src/m88k/.libs src/m88k/_libs + -rm -rf src/metag/.libs src/metag/_libs + -rm -rf src/microblaze/.libs src/microblaze/_libs + -rm -rf src/mips/.libs src/mips/_libs + -rm -rf src/moxie/.libs src/moxie/_libs + -rm -rf src/nios2/.libs src/nios2/_libs + -rm -rf src/or1k/.libs src/or1k/_libs + -rm -rf src/pa/.libs src/pa/_libs + -rm -rf src/powerpc/.libs src/powerpc/_libs + -rm -rf src/riscv/.libs src/riscv/_libs + -rm -rf src/s390/.libs src/s390/_libs + -rm -rf src/sh/.libs src/sh/_libs + -rm -rf src/sh64/.libs src/sh64/_libs + -rm -rf src/sparc/.libs src/sparc/_libs + -rm -rf src/tile/.libs src/tile/_libs + -rm -rf src/vax/.libs src/vax/_libs + -rm -rf src/x86/.libs src/x86/_libs + -rm -rf src/xtensa/.libs src/xtensa/_libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) fficonfig.h +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f src/$(DEPDIR)/$(am__dirstamp) + -rm -f src/$(am__dirstamp) + -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/aarch64/$(am__dirstamp) + -rm -f src/alpha/$(DEPDIR)/$(am__dirstamp) + -rm -f src/alpha/$(am__dirstamp) + -rm -f src/arc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arc/$(am__dirstamp) + -rm -f src/arm/$(DEPDIR)/$(am__dirstamp) + -rm -f src/arm/$(am__dirstamp) + -rm -f src/avr32/$(DEPDIR)/$(am__dirstamp) + -rm -f src/avr32/$(am__dirstamp) + -rm -f src/bfin/$(DEPDIR)/$(am__dirstamp) + -rm -f src/bfin/$(am__dirstamp) + -rm -f src/cris/$(DEPDIR)/$(am__dirstamp) + -rm -f src/cris/$(am__dirstamp) + -rm -f src/csky/$(DEPDIR)/$(am__dirstamp) + -rm -f src/csky/$(am__dirstamp) + -rm -f src/frv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/frv/$(am__dirstamp) + -rm -f src/ia64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/ia64/$(am__dirstamp) + -rm -f src/kvx/$(DEPDIR)/$(am__dirstamp) + -rm -f src/kvx/$(am__dirstamp) + -rm -f src/m32r/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m32r/$(am__dirstamp) + -rm -f src/m68k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m68k/$(am__dirstamp) + -rm -f src/m88k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/m88k/$(am__dirstamp) + -rm -f src/metag/$(DEPDIR)/$(am__dirstamp) + -rm -f src/metag/$(am__dirstamp) + -rm -f src/microblaze/$(DEPDIR)/$(am__dirstamp) + -rm -f src/microblaze/$(am__dirstamp) + -rm -f src/mips/$(DEPDIR)/$(am__dirstamp) + -rm -f src/mips/$(am__dirstamp) + -rm -f src/moxie/$(DEPDIR)/$(am__dirstamp) + -rm -f src/moxie/$(am__dirstamp) + -rm -f src/nios2/$(DEPDIR)/$(am__dirstamp) + -rm -f src/nios2/$(am__dirstamp) + -rm -f src/or1k/$(DEPDIR)/$(am__dirstamp) + -rm -f src/or1k/$(am__dirstamp) + -rm -f src/pa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/pa/$(am__dirstamp) + -rm -f src/powerpc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/powerpc/$(am__dirstamp) + -rm -f src/riscv/$(DEPDIR)/$(am__dirstamp) + -rm -f src/riscv/$(am__dirstamp) + -rm -f src/s390/$(DEPDIR)/$(am__dirstamp) + -rm -f src/s390/$(am__dirstamp) + -rm -f src/sh/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh/$(am__dirstamp) + -rm -f src/sh64/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sh64/$(am__dirstamp) + -rm -f src/sparc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sparc/$(am__dirstamp) + -rm -f src/tile/$(DEPDIR)/$(am__dirstamp) + -rm -f src/tile/$(am__dirstamp) + -rm -f src/vax/$(DEPDIR)/$(am__dirstamp) + -rm -f src/vax/$(am__dirstamp) + -rm -f src/x86/$(DEPDIR)/$(am__dirstamp) + -rm -f src/x86/$(am__dirstamp) + -rm -f src/xtensa/$(DEPDIR)/$(am__dirstamp) + -rm -f src/xtensa/$(am__dirstamp) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f src/$(DEPDIR)/closures.Plo + -rm -f src/$(DEPDIR)/debug.Plo + -rm -f src/$(DEPDIR)/java_raw_api.Plo + -rm -f src/$(DEPDIR)/prep_cif.Plo + -rm -f src/$(DEPDIR)/raw_api.Plo + -rm -f src/$(DEPDIR)/types.Plo + -rm -f src/aarch64/$(DEPDIR)/ffi.Plo + -rm -f src/aarch64/$(DEPDIR)/sysv.Plo + -rm -f src/aarch64/$(DEPDIR)/win64_armasm.Plo + -rm -f src/alpha/$(DEPDIR)/ffi.Plo + -rm -f src/alpha/$(DEPDIR)/osf.Plo + -rm -f src/arc/$(DEPDIR)/arcompact.Plo + -rm -f src/arc/$(DEPDIR)/ffi.Plo + -rm -f src/arm/$(DEPDIR)/ffi.Plo + -rm -f src/arm/$(DEPDIR)/sysv.Plo + -rm -f src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo + -rm -f src/avr32/$(DEPDIR)/ffi.Plo + -rm -f src/avr32/$(DEPDIR)/sysv.Plo + -rm -f src/bfin/$(DEPDIR)/ffi.Plo + -rm -f src/bfin/$(DEPDIR)/sysv.Plo + -rm -f src/cris/$(DEPDIR)/ffi.Plo + -rm -f src/cris/$(DEPDIR)/sysv.Plo + -rm -f src/csky/$(DEPDIR)/ffi.Plo + -rm -f src/csky/$(DEPDIR)/sysv.Plo + -rm -f src/frv/$(DEPDIR)/eabi.Plo + -rm -f src/frv/$(DEPDIR)/ffi.Plo + -rm -f src/ia64/$(DEPDIR)/ffi.Plo + -rm -f src/ia64/$(DEPDIR)/unix.Plo + -rm -f src/kvx/$(DEPDIR)/ffi.Plo + -rm -f src/kvx/$(DEPDIR)/sysv.Plo + -rm -f src/m32r/$(DEPDIR)/ffi.Plo + -rm -f src/m32r/$(DEPDIR)/sysv.Plo + -rm -f src/m68k/$(DEPDIR)/ffi.Plo + -rm -f src/m68k/$(DEPDIR)/sysv.Plo + -rm -f src/m88k/$(DEPDIR)/ffi.Plo + -rm -f src/m88k/$(DEPDIR)/obsd.Plo + -rm -f src/metag/$(DEPDIR)/ffi.Plo + -rm -f src/metag/$(DEPDIR)/sysv.Plo + -rm -f src/microblaze/$(DEPDIR)/ffi.Plo + -rm -f src/microblaze/$(DEPDIR)/sysv.Plo + -rm -f src/mips/$(DEPDIR)/ffi.Plo + -rm -f src/mips/$(DEPDIR)/n32.Plo + -rm -f src/mips/$(DEPDIR)/o32.Plo + -rm -f src/moxie/$(DEPDIR)/eabi.Plo + -rm -f src/moxie/$(DEPDIR)/ffi.Plo + -rm -f src/nios2/$(DEPDIR)/ffi.Plo + -rm -f src/nios2/$(DEPDIR)/sysv.Plo + -rm -f src/or1k/$(DEPDIR)/ffi.Plo + -rm -f src/or1k/$(DEPDIR)/sysv.Plo + -rm -f src/pa/$(DEPDIR)/ffi.Plo + -rm -f src/pa/$(DEPDIR)/hpux32.Plo + -rm -f src/pa/$(DEPDIR)/linux.Plo + -rm -f src/powerpc/$(DEPDIR)/aix.Plo + -rm -f src/powerpc/$(DEPDIR)/aix_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/darwin.Plo + -rm -f src/powerpc/$(DEPDIR)/darwin_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_darwin.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_linux64.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_sysv.Plo + -rm -f src/powerpc/$(DEPDIR)/linux64.Plo + -rm -f src/powerpc/$(DEPDIR)/linux64_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/ppc_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/sysv.Plo + -rm -f src/riscv/$(DEPDIR)/ffi.Plo + -rm -f src/riscv/$(DEPDIR)/sysv.Plo + -rm -f src/s390/$(DEPDIR)/ffi.Plo + -rm -f src/s390/$(DEPDIR)/sysv.Plo + -rm -f src/sh/$(DEPDIR)/ffi.Plo + -rm -f src/sh/$(DEPDIR)/sysv.Plo + -rm -f src/sh64/$(DEPDIR)/ffi.Plo + -rm -f src/sh64/$(DEPDIR)/sysv.Plo + -rm -f src/sparc/$(DEPDIR)/ffi.Plo + -rm -f src/sparc/$(DEPDIR)/ffi64.Plo + -rm -f src/sparc/$(DEPDIR)/v8.Plo + -rm -f src/sparc/$(DEPDIR)/v9.Plo + -rm -f src/tile/$(DEPDIR)/ffi.Plo + -rm -f src/tile/$(DEPDIR)/tile.Plo + -rm -f src/vax/$(DEPDIR)/elfbsd.Plo + -rm -f src/vax/$(DEPDIR)/ffi.Plo + -rm -f src/x86/$(DEPDIR)/ffi.Plo + -rm -f src/x86/$(DEPDIR)/ffi64.Plo + -rm -f src/x86/$(DEPDIR)/ffiw64.Plo + -rm -f src/x86/$(DEPDIR)/sysv.Plo + -rm -f src/x86/$(DEPDIR)/sysv_intel.Plo + -rm -f src/x86/$(DEPDIR)/unix64.Plo + -rm -f src/x86/$(DEPDIR)/win64.Plo + -rm -f src/x86/$(DEPDIR)/win64_intel.Plo + -rm -f src/xtensa/$(DEPDIR)/ffi.Plo + -rm -f src/xtensa/$(DEPDIR)/sysv.Plo + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: install-toolexeclibLTLIBRARIES + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f src/$(DEPDIR)/closures.Plo + -rm -f src/$(DEPDIR)/debug.Plo + -rm -f src/$(DEPDIR)/java_raw_api.Plo + -rm -f src/$(DEPDIR)/prep_cif.Plo + -rm -f src/$(DEPDIR)/raw_api.Plo + -rm -f src/$(DEPDIR)/types.Plo + -rm -f src/aarch64/$(DEPDIR)/ffi.Plo + -rm -f src/aarch64/$(DEPDIR)/sysv.Plo + -rm -f src/aarch64/$(DEPDIR)/win64_armasm.Plo + -rm -f src/alpha/$(DEPDIR)/ffi.Plo + -rm -f src/alpha/$(DEPDIR)/osf.Plo + -rm -f src/arc/$(DEPDIR)/arcompact.Plo + -rm -f src/arc/$(DEPDIR)/ffi.Plo + -rm -f src/arm/$(DEPDIR)/ffi.Plo + -rm -f src/arm/$(DEPDIR)/sysv.Plo + -rm -f src/arm/$(DEPDIR)/sysv_msvc_arm32.Plo + -rm -f src/avr32/$(DEPDIR)/ffi.Plo + -rm -f src/avr32/$(DEPDIR)/sysv.Plo + -rm -f src/bfin/$(DEPDIR)/ffi.Plo + -rm -f src/bfin/$(DEPDIR)/sysv.Plo + -rm -f src/cris/$(DEPDIR)/ffi.Plo + -rm -f src/cris/$(DEPDIR)/sysv.Plo + -rm -f src/csky/$(DEPDIR)/ffi.Plo + -rm -f src/csky/$(DEPDIR)/sysv.Plo + -rm -f src/frv/$(DEPDIR)/eabi.Plo + -rm -f src/frv/$(DEPDIR)/ffi.Plo + -rm -f src/ia64/$(DEPDIR)/ffi.Plo + -rm -f src/ia64/$(DEPDIR)/unix.Plo + -rm -f src/kvx/$(DEPDIR)/ffi.Plo + -rm -f src/kvx/$(DEPDIR)/sysv.Plo + -rm -f src/m32r/$(DEPDIR)/ffi.Plo + -rm -f src/m32r/$(DEPDIR)/sysv.Plo + -rm -f src/m68k/$(DEPDIR)/ffi.Plo + -rm -f src/m68k/$(DEPDIR)/sysv.Plo + -rm -f src/m88k/$(DEPDIR)/ffi.Plo + -rm -f src/m88k/$(DEPDIR)/obsd.Plo + -rm -f src/metag/$(DEPDIR)/ffi.Plo + -rm -f src/metag/$(DEPDIR)/sysv.Plo + -rm -f src/microblaze/$(DEPDIR)/ffi.Plo + -rm -f src/microblaze/$(DEPDIR)/sysv.Plo + -rm -f src/mips/$(DEPDIR)/ffi.Plo + -rm -f src/mips/$(DEPDIR)/n32.Plo + -rm -f src/mips/$(DEPDIR)/o32.Plo + -rm -f src/moxie/$(DEPDIR)/eabi.Plo + -rm -f src/moxie/$(DEPDIR)/ffi.Plo + -rm -f src/nios2/$(DEPDIR)/ffi.Plo + -rm -f src/nios2/$(DEPDIR)/sysv.Plo + -rm -f src/or1k/$(DEPDIR)/ffi.Plo + -rm -f src/or1k/$(DEPDIR)/sysv.Plo + -rm -f src/pa/$(DEPDIR)/ffi.Plo + -rm -f src/pa/$(DEPDIR)/hpux32.Plo + -rm -f src/pa/$(DEPDIR)/linux.Plo + -rm -f src/powerpc/$(DEPDIR)/aix.Plo + -rm -f src/powerpc/$(DEPDIR)/aix_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/darwin.Plo + -rm -f src/powerpc/$(DEPDIR)/darwin_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_darwin.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_linux64.Plo + -rm -f src/powerpc/$(DEPDIR)/ffi_sysv.Plo + -rm -f src/powerpc/$(DEPDIR)/linux64.Plo + -rm -f src/powerpc/$(DEPDIR)/linux64_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/ppc_closure.Plo + -rm -f src/powerpc/$(DEPDIR)/sysv.Plo + -rm -f src/riscv/$(DEPDIR)/ffi.Plo + -rm -f src/riscv/$(DEPDIR)/sysv.Plo + -rm -f src/s390/$(DEPDIR)/ffi.Plo + -rm -f src/s390/$(DEPDIR)/sysv.Plo + -rm -f src/sh/$(DEPDIR)/ffi.Plo + -rm -f src/sh/$(DEPDIR)/sysv.Plo + -rm -f src/sh64/$(DEPDIR)/ffi.Plo + -rm -f src/sh64/$(DEPDIR)/sysv.Plo + -rm -f src/sparc/$(DEPDIR)/ffi.Plo + -rm -f src/sparc/$(DEPDIR)/ffi64.Plo + -rm -f src/sparc/$(DEPDIR)/v8.Plo + -rm -f src/sparc/$(DEPDIR)/v9.Plo + -rm -f src/tile/$(DEPDIR)/ffi.Plo + -rm -f src/tile/$(DEPDIR)/tile.Plo + -rm -f src/vax/$(DEPDIR)/elfbsd.Plo + -rm -f src/vax/$(DEPDIR)/ffi.Plo + -rm -f src/x86/$(DEPDIR)/ffi.Plo + -rm -f src/x86/$(DEPDIR)/ffi64.Plo + -rm -f src/x86/$(DEPDIR)/ffiw64.Plo + -rm -f src/x86/$(DEPDIR)/sysv.Plo + -rm -f src/x86/$(DEPDIR)/sysv_intel.Plo + -rm -f src/x86/$(DEPDIR)/unix64.Plo + -rm -f src/x86/$(DEPDIR)/win64.Plo + -rm -f src/x86/$(DEPDIR)/win64_intel.Plo + -rm -f src/xtensa/$(DEPDIR)/ffi.Plo + -rm -f src/xtensa/$(DEPDIR)/sysv.Plo + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.MAKE: $(am__recursive_targets) all install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--depfiles am--refresh check check-am clean clean-cscope \ + clean-generic clean-libtool clean-noinstLTLIBRARIES \ + clean-toolexeclibLTLIBRARIES cscope cscopelist-am ctags \ + ctags-am dist dist-all dist-bzip2 dist-gzip dist-hook \ + dist-lzip dist-shar dist-tarZ dist-xz dist-zip distcheck \ + distclean distclean-compile distclean-generic distclean-hdr \ + distclean-libtool distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkgconfigDATA install-ps \ + install-ps-am install-strip install-toolexeclibLTLIBRARIES \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-pkgconfigDATA uninstall-toolexeclibLTLIBRARIES + +.PRECIOUS: Makefile + +##libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ +## $(libffi_la_OBJECTS) $(libffi_la_LIBADD) +## perl $(top_srcdir)/make_sunver.pl libffi.map \ +## `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ +## sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ +## > $@ || (rm -f $@ ; exit 1) + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $(top_srcdir)/libffi.map.in + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.log b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.log new file mode 100644 index 0000000..9c848e2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.log @@ -0,0 +1,2123 @@ +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libffi configure 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure --disable-static --with-pic=yes --disable-dependency-tracking --disable-docs --host=aarch64-apple-darwin21 + +## --------- ## +## Platform. ## +## --------- ## + +hostname = Arthurs-MacBook-Air.local +uname -m = arm64 +uname -r = 21.3.0 +uname -s = Darwin +uname -v = Darwin Kernel Version 21.3.0: Wed Jan 5 21:37:58 PST 2022; root:xnu-8019.80.24~20/RELEASE_ARM64_T8101 + +/usr/bin/uname -p = arm +/bin/uname -X = unknown + +/bin/arch = unknown +/usr/bin/arch -k = unknown +/usr/convex/getsysinfo = unknown +/usr/bin/hostinfo = Mach kernel version: + Darwin Kernel Version 21.3.0: Wed Jan 5 21:37:58 PST 2022; root:xnu-8019.80.24~20/RELEASE_ARM64_T8101 +Kernel configured for up to 8 processors. +8 processors are physically available. +8 processors are logically available. +Processor type: arm64e (ARM64E) +Processors active: 0 1 2 3 4 5 6 7 +Primary memory available: 16.00 gigabytes +Default processor set: 524 tasks, 2418 threads, 8 processors +Load average: 25.92, Mach factor: 0.45 +/bin/machine = unknown +/usr/bin/oslevel = unknown +/bin/universe = unknown + +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0@global/bin +PATH: /Users/arthur/.rvm/rubies/ruby-3.0.0/bin +PATH: /opt/homebrew/bin +PATH: /usr/local/bin +PATH: /usr/bin +PATH: /bin +PATH: /usr/sbin +PATH: /sbin +PATH: /usr/local/go/bin +PATH: /Library/Apple/usr/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0/bin +PATH: /Users/arthur/.rvm/gems/ruby-3.0.0@global/bin +PATH: /Users/arthur/.rvm/rubies/ruby-3.0.0/bin +PATH: /Users/arthur/go/bin +PATH: /Users/arthur/.rvm/bin +PATH: /Users/arthur/.rvm/bin + + +## ----------- ## +## Core tests. ## +## ----------- ## + +configure:2704: checking build system type +configure:2718: result: aarch64-apple-darwin21.3.0 +configure:2738: checking host system type +configure:2751: result: aarch64-apple-darwin21 +configure:2771: checking target system type +configure:2784: result: aarch64-apple-darwin21 +configure:2881: checking for gsed +configure:2912: result: sed +configure:2940: checking for a BSD-compatible install +configure:3008: result: /opt/homebrew/bin/ginstall -c +configure:3019: checking whether build environment is sane +configure:3074: result: yes +configure:3130: checking for aarch64-apple-darwin21-strip +configure:3160: result: no +configure:3170: checking for strip +configure:3186: found /usr/bin/strip +configure:3197: result: strip +configure:3222: checking for a thread-safe mkdir -p +configure:3261: result: /opt/homebrew/bin/gmkdir -p +configure:3268: checking for gawk +configure:3298: result: no +configure:3268: checking for mawk +configure:3298: result: no +configure:3268: checking for nawk +configure:3298: result: no +configure:3268: checking for awk +configure:3284: found /usr/bin/awk +configure:3295: result: awk +configure:3306: checking whether make sets $(MAKE) +configure:3328: result: yes +configure:3357: checking whether make supports nested variables +configure:3374: result: yes +configure:3519: checking for aarch64-apple-darwin21-gcc +configure:3546: result: gcc -fdeclspec +configure:3815: checking for C compiler version +configure:3824: gcc -fdeclspec --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:3835: $? = 0 +configure:3824: gcc -fdeclspec -v >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +clang: warning: argument unused during compilation: '-fdeclspec' [-Wunused-command-line-argument] +configure:3835: $? = 0 +configure:3824: gcc -fdeclspec -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:3835: $? = 1 +configure:3824: gcc -fdeclspec -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:3835: $? = 1 +configure:3855: checking whether the C compiler works +configure:3877: gcc -fdeclspec -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3881: $? = 0 +configure:3929: result: yes +configure:3932: checking for C compiler default output file name +configure:3934: result: a.out +configure:3940: checking for suffix of executables +configure:3947: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:3951: $? = 0 +configure:3973: result: +configure:3995: checking whether we are cross compiling +configure:4003: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:4007: $? = 0 +configure:4014: ./conftest +configure:4018: $? = 0 +configure:4033: result: no +configure:4038: checking for suffix of object files +configure:4060: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4064: $? = 0 +configure:4085: result: o +configure:4089: checking whether we are using the GNU C compiler +configure:4108: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4108: $? = 0 +configure:4117: result: yes +configure:4126: checking whether gcc -fdeclspec accepts -g +configure:4146: gcc -fdeclspec -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4146: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4161: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4161: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4177: gcc -fdeclspec -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4177: $? = 0 +configure:4187: result: yes +configure:4204: checking for gcc -fdeclspec option to accept ISO C89 +configure:4267: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4267: $? = 0 +configure:4280: result: none needed +configure:4305: checking whether gcc -fdeclspec understands -c and -o together +configure:4327: gcc -fdeclspec -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4327: gcc -fdeclspec -c conftest.c -o conftest2.o +configure:4330: $? = 0 +configure:4342: result: yes +configure:4362: checking whether make supports the include directive +configure:4377: make -f confmf.GNU && cat confinc.out +this is the am__doit target +configure:4380: $? = 0 +configure:4399: result: yes (GNU style) +configure:4424: checking dependency style of gcc -fdeclspec +configure:4535: result: none +configure:4564: checking for aarch64-apple-darwin21-g++ +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-c++ +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-gpp +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-aCC +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-CC +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-cxx +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-cc++ +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-cl.exe +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-FCC +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-KCC +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-RCC +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-xlC_r +configure:4594: result: no +configure:4564: checking for aarch64-apple-darwin21-xlC +configure:4594: result: no +configure:4608: checking for g++ +configure:4624: found /usr/bin/g++ +configure:4635: result: g++ +configure:4662: checking for C++ compiler version +configure:4671: g++ --version >&5 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +configure:4682: $? = 0 +configure:4671: g++ -v >&5 +Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/c++/4.2.1 +Apple clang version 13.0.0 (clang-1300.0.29.30) +Target: arm64-apple-darwin21.3.0 +Thread model: posix +InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin +configure:4682: $? = 0 +configure:4671: g++ -V >&5 +clang: error: argument to '-V' is missing (expected 1 value) +clang: error: no input files +configure:4682: $? = 1 +configure:4671: g++ -qversion >&5 +clang: error: unknown argument '-qversion'; did you mean '--version'? +clang: error: no input files +configure:4682: $? = 1 +configure:4686: checking whether we are using the GNU C++ compiler +configure:4705: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4705: $? = 0 +configure:4714: result: yes +configure:4723: checking whether g++ accepts -g +configure:4743: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4743: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4758: g++ -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4758: $? = 0 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:4774: g++ -c -g -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:4774: $? = 0 +configure:4784: result: yes +configure:4809: checking dependency style of g++ +configure:4920: result: none +configure:4950: checking dependency style of gcc -fdeclspec +configure:5059: result: none +configure:5121: checking how to print strings +configure:5148: result: printf +configure:5169: checking for a sed that does not truncate output +configure:5233: result: /usr/bin/sed +configure:5251: checking for grep that handles long lines and -e +configure:5309: result: /usr/bin/grep +configure:5314: checking for egrep +configure:5376: result: /usr/bin/grep -E +configure:5381: checking for fgrep +configure:5443: result: /usr/bin/grep -F +configure:5478: checking for ld used by gcc -fdeclspec +configure:5545: result: ld +configure:5552: checking if the linker (ld) is GNU ld +configure:5567: result: no +configure:5579: checking for BSD- or MS-compatible name lister (nm) +configure:5633: result: no +configure:5647: checking for aarch64-apple-darwin21-dumpbin +configure:5677: result: no +configure:5647: checking for aarch64-apple-darwin21-link +configure:5677: result: no +configure:5691: checking for dumpbin +configure:5721: result: no +configure:5691: checking for link +configure:5707: found /bin/link +configure:5718: result: link -dump +configure:5763: checking the name lister (nm) interface +configure:5770: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:5773: nm "conftest.o" +configure:5776: output +0000000000000000 S _some_variable +0000000000000000 t ltmp0 +0000000000000000 s ltmp1 +configure:5783: result: BSD nm +configure:5786: checking whether ln -s works +configure:5790: result: yes +configure:5798: checking the maximum length of command line arguments +configure:5929: result: 786432 +configure:5977: checking how to convert aarch64-apple-darwin21.3.0 file names to aarch64-apple-darwin21 format +configure:6017: result: func_convert_file_noop +configure:6024: checking how to convert aarch64-apple-darwin21.3.0 file names to toolchain format +configure:6044: result: func_convert_file_noop +configure:6051: checking for ld option to reload object files +configure:6058: result: -r +configure:6092: checking for aarch64-apple-darwin21-objdump +configure:6122: result: no +configure:6132: checking for objdump +configure:6148: found /usr/bin/objdump +configure:6159: result: objdump +configure:6191: checking how to recognize dependent libraries +configure:6391: result: pass_all +configure:6436: checking for aarch64-apple-darwin21-dlltool +configure:6466: result: no +configure:6476: checking for dlltool +configure:6506: result: no +configure:6536: checking how to associate runtime and link libraries +configure:6563: result: printf %s\n +configure:6580: checking for aarch64-apple-darwin21-ar +configure:6610: result: no +configure:6624: checking for ar +configure:6640: found /usr/bin/ar +configure:6651: result: ar +configure:6688: checking for archiver @FILE support +configure:6705: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:6705: $? = 0 +configure:6708: ar cru libconftest.a @conftest.lst >&5 +ar: @conftest.lst: No such file or directory +configure:6711: $? = 1 +configure:6731: result: no +configure:6749: checking for aarch64-apple-darwin21-strip +configure:6776: result: strip +configure:6848: checking for aarch64-apple-darwin21-ranlib +configure:6878: result: no +configure:6888: checking for ranlib +configure:6904: found /usr/bin/ranlib +configure:6915: result: ranlib +configure:7017: checking command to parse nm output from gcc -fdeclspec object +configure:7170: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: nm conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*\([_A-Za-z][_A-Za-z0-9]*\)$/\1 \2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +cannot find nm_test_var in conftest.nm +configure:7170: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:7173: $? = 0 +configure:7177: nm conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:7180: $? = 0 +configure:7246: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c conftstm.o >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:7249: $? = 0 +configure:7287: result: ok +configure:7334: checking for sysroot +configure:7364: result: no +configure:7371: checking for a working dd +configure:7409: result: /bin/dd +configure:7413: checking how to truncate binary pipes +configure:7428: result: /bin/dd bs=4096 count=1 +configure:7717: checking for aarch64-apple-darwin21-mt +configure:7747: result: no +configure:7757: checking for mt +configure:7787: result: no +configure:7807: checking if : is a manifest tool +configure:7813: : '-?' +configure:7821: result: no +configure:7837: checking for aarch64-apple-darwin21-dsymutil +configure:7867: result: no +configure:7877: checking for dsymutil +configure:7893: found /usr/bin/dsymutil +configure:7904: result: dsymutil +configure:7929: checking for aarch64-apple-darwin21-nmedit +configure:7959: result: no +configure:7969: checking for nmedit +configure:7985: found /usr/bin/nmedit +configure:7996: result: nmedit +configure:8021: checking for aarch64-apple-darwin21-lipo +configure:8051: result: no +configure:8061: checking for lipo +configure:8077: found /usr/bin/lipo +configure:8088: result: lipo +configure:8113: checking for aarch64-apple-darwin21-otool +configure:8143: result: no +configure:8153: checking for otool +configure:8169: found /usr/bin/otool +configure:8180: result: otool +configure:8205: checking for aarch64-apple-darwin21-otool64 +configure:8235: result: no +configure:8245: checking for otool64 +configure:8275: result: no +configure:8320: checking for -single_module linker flag +gcc -fdeclspec -L/usr/local/opt/libffi/lib -o libconftest.dylib -dynamiclib -Wl,-single_module conftest.c +configure:8353: result: yes +configure:8356: checking for -exported_symbols_list linker flag +configure:8376: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib -Wl,-exported_symbols_list,conftest.sym conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8376: $? = 0 +configure:8386: result: yes +configure:8389: checking for -force_load linker flag +gcc -fdeclspec -c -o conftest.o conftest.c +ar cru libconftest.a conftest.o +ranlib libconftest.a +gcc -fdeclspec -L/usr/local/opt/libffi/lib -o conftest conftest.c -Wl,-force_load,./libconftest.a +configure:8421: result: yes +configure:8498: checking how to run the C preprocessor +configure:8529: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8529: $? = 0 +configure:8543: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8543: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8568: result: gcc -fdeclspec -E +configure:8588: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8588: $? = 0 +configure:8602: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:11:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:8602: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| /* end confdefs.h. */ +| #include +configure:8631: checking for ANSI C header files +configure:8651: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8651: $? = 0 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8724: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:8724: $? = 0 +configure:8724: ./conftest +configure:8724: $? = 0 +configure:8735: result: yes +configure:8748: checking for sys/types.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for sys/stat.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdlib.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for string.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for memory.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for strings.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for inttypes.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for stdint.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8748: checking for unistd.h +configure:8748: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8748: $? = 0 +configure:8748: result: yes +configure:8762: checking for dlfcn.h +configure:8762: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:8762: $? = 0 +configure:8762: result: yes +configure:9029: checking for objdir +configure:9044: result: .libs +configure:9308: checking if gcc -fdeclspec supports -fno-rtti -fno-exceptions +configure:9326: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-rtti -fno-exceptions conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9330: $? = 0 +configure:9343: result: yes +configure:9701: checking for gcc -fdeclspec option to produce PIC +configure:9708: result: -fno-common -DPIC +configure:9716: checking if gcc -fdeclspec PIC flag -fno-common -DPIC works +configure:9734: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9738: $? = 0 +configure:9751: result: yes +configure:9780: checking if gcc -fdeclspec static flag -static works +configure:9808: result: no +configure:9823: checking if gcc -fdeclspec supports -c -o file.o +configure:9844: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:9848: $? = 0 +configure:9870: result: yes +configure:9878: checking if gcc -fdeclspec supports -c -o file.o +configure:9925: result: yes +configure:9958: checking whether the gcc -fdeclspec linker (ld) supports shared libraries +configure:11221: result: yes +configure:11461: checking dynamic linker characteristics +configure:12291: result: darwin21 dyld +configure:12413: checking how to hardcode library paths into programs +configure:12438: result: immediate +configure:12986: checking whether stripping libraries is possible +configure:13000: result: yes +configure:13026: checking if libtool supports shared libraries +configure:13028: result: yes +configure:13031: checking whether to build shared libraries +configure:13056: result: yes +configure:13059: checking whether to build static libraries +configure:13063: result: no +configure:13086: checking how to run the C++ preprocessor +configure:13113: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13113: $? = 0 +configure:13127: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13127: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13152: result: g++ -E +configure:13172: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:13172: $? = 0 +configure:13186: g++ -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.cpp:23:10: fatal error: 'ac_nonexistent.h' file not found +#include + ^~~~~~~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:13186: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| /* end confdefs.h. */ +| #include +configure:13348: checking for ld used by g++ +configure:13415: result: ld +configure:13422: checking if the linker (ld) is GNU ld +configure:13437: result: no +configure:13492: checking whether the g++ linker (ld) supports shared libraries +configure:14565: result: yes +configure:14601: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:14604: $? = 0 +configure:15085: checking for g++ option to produce PIC +configure:15092: result: -fno-common -DPIC +configure:15100: checking if g++ PIC flag -fno-common -DPIC works +configure:15118: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fno-common -DPIC -DPIC conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15122: $? = 0 +configure:15135: result: yes +configure:15158: checking if g++ static flag -static works +configure:15186: result: no +configure:15198: checking if g++ supports -c -o file.o +configure:15219: g++ -c -g -O2 -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -o out/conftest2.o conftest.cpp >&5 +In file included from :405: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:15223: $? = 0 +configure:15245: result: yes +configure:15250: checking if g++ supports -c -o file.o +configure:15297: result: yes +configure:15327: checking whether the g++ linker (ld) supports shared libraries +configure:15370: result: yes +configure:15511: checking dynamic linker characteristics +configure:16268: result: darwin21 dyld +configure:16333: checking how to hardcode library paths into programs +configure:16358: result: immediate +configure:16426: checking size of size_t +configure:16431: gcc -fdeclspec -o conftest -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:16431: $? = 0 +configure:16431: ./conftest +configure:16431: $? = 0 +configure:16445: result: 8 +configure:16456: checking for C compiler vendor +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__ICC) || defined(__ECC) || defined(__INTEL_COMPILER)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__xlc__) || defined(__xlC__) || defined(__IBMC__) || defined(__IBMCPP__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:30:9: error: use of undeclared identifier 'thisisanerror' + thisisanerror; + ^ +1 warning and 1 error generated. +configure:16504: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #if !(defined(__PATHCC__) || defined(__PATHSCALE__)) +| thisisanerror; +| #endif +| +| ; +| return 0; +| } +configure:16504: gcc -fdeclspec -c -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:16504: $? = 0 +configure:16512: result: clang +configure:17357: checking CFLAGS for maximum warnings +configure:17377: gcc -fdeclspec -c -warn all -warn all -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +clang: error: unknown argument: '-warn' +clang: error: unknown argument: '-warn' +clang: error: no such file or directory: 'all' +clang: error: no such file or directory: 'all' +configure:17377: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| ; +| return 0; +| } +configure:17377: gcc -fdeclspec -c -pedantic -Wall -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17377: $? = 0 +configure:17385: result: -Wall +configure:17405: : CFLAGS="$CFLAGS" +configure:17408: $? = 0 +configure:17445: checking whether to enable maintainer-specific portions of Makefiles +configure:17454: result: no +configure:17470: checking sys/memfd.h usability +configure:17470: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:57:10: fatal error: 'sys/memfd.h' file not found +#include + ^~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:17470: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| #include +| #ifdef HAVE_SYS_TYPES_H +| # include +| #endif +| #ifdef HAVE_SYS_STAT_H +| # include +| #endif +| #ifdef STDC_HEADERS +| # include +| # include +| #else +| # ifdef HAVE_STDLIB_H +| # include +| # endif +| #endif +| #ifdef HAVE_STRING_H +| # if !defined STDC_HEADERS && defined HAVE_MEMORY_H +| # include +| # endif +| # include +| #endif +| #ifdef HAVE_STRINGS_H +| # include +| #endif +| #ifdef HAVE_INTTYPES_H +| # include +| #endif +| #ifdef HAVE_STDINT_H +| # include +| #endif +| #ifdef HAVE_UNISTD_H +| # include +| #endif +| #include +configure:17470: result: no +configure:17470: checking sys/memfd.h presence +configure:17470: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:24:10: fatal error: 'sys/memfd.h' file not found +#include + ^~~~~~~~~~~~~ +1 warning and 1 error generated. +configure:17470: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| #include +configure:17470: result: no +configure:17470: checking for sys/memfd.h +configure:17470: result: no +configure:17482: checking for memfd_create +configure:17482: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +Undefined symbols for architecture arm64: + "_memfd_create", referenced from: + _main in conftest-845fe3.o +ld: symbol(s) not found for architecture arm64 +clang: error: linker command failed with exit code 1 (use -v to see invocation) +configure:17482: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| /* end confdefs.h. */ +| /* Define memfd_create to an innocuous variant, in case declares memfd_create. +| For example, HP-UX 11i declares gettimeofday. */ +| #define memfd_create innocuous_memfd_create +| +| /* System header to define __stub macros and hopefully few prototypes, +| which can conflict with char memfd_create (); below. +| Prefer to if __STDC__ is defined, since +| exists even on freestanding compilers. */ +| +| #ifdef __STDC__ +| # include +| #else +| # include +| #endif +| +| #undef memfd_create +| +| /* Override any GCC internal prototype to avoid an error. +| Use char because int might match the return type of a GCC +| builtin and then its argument prototype would still apply. */ +| #ifdef __cplusplus +| extern "C" +| #endif +| char memfd_create (); +| /* The GNU C library defines this for functions which it implements +| to always fail with ENOSYS. Some functions are actually named +| something starting with __ and the normal name is an alias. */ +| #if defined __stub_memfd_create || defined __stub___memfd_create +| choke me +| #endif +| +| int +| main () +| { +| return memfd_create (); +| ; +| return 0; +| } +configure:17482: result: no +configure:17494: checking sys/mman.h usability +configure:17494: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17494: $? = 0 +configure:17494: result: yes +configure:17494: checking sys/mman.h presence +configure:17494: gcc -fdeclspec -E -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c +In file included from :391: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17494: $? = 0 +configure:17494: result: yes +configure:17494: checking for sys/mman.h +configure:17494: result: yes +configure:17507: checking for mmap +configure:17507: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17507: $? = 0 +configure:17507: result: yes +configure:17507: checking for mkostemp +configure:17507: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17507: $? = 0 +configure:17507: result: yes +configure:17517: checking for sys/mman.h +configure:17517: result: yes +configure:17525: checking for mmap +configure:17525: result: yes +configure:17538: checking whether read-only mmap of a plain file works +configure:17555: result: yes +configure:17557: checking whether mmap from /dev/zero works +configure:17579: result: no +configure:17583: checking for MAP_ANON(YMOUS) +configure:17606: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:38:5: warning: unused variable 'n' [-Wunused-variable] +int n = MAP_ANONYMOUS; + ^ +2 warnings generated. +configure:17606: $? = 0 +configure:17613: result: yes +configure:17619: checking whether mmap with MAP_ANON(YMOUS) works +configure:17636: result: yes +configure:17679: checking for ANSI C header files +configure:17783: result: yes +configure:17793: checking for memcpy +configure:17793: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:53:6: warning: incompatible redeclaration of library function 'memcpy' [-Wincompatible-library-redeclaration] +char memcpy (); + ^ +conftest.c:53:6: note: 'memcpy' is a builtin with type 'void *(void *, const void *, unsigned long)' +2 warnings generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17793: $? = 0 +configure:17793: result: yes +configure:17802: checking for size_t +configure:17802: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:17802: $? = 0 +configure:17802: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:67:21: error: expected expression +if (sizeof ((size_t))) + ^ +1 warning and 1 error generated. +configure:17802: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| /* end confdefs.h. */ +| #include +| #ifdef HAVE_SYS_TYPES_H +| # include +| #endif +| #ifdef HAVE_SYS_STAT_H +| # include +| #endif +| #ifdef STDC_HEADERS +| # include +| # include +| #else +| # ifdef HAVE_STDLIB_H +| # include +| # endif +| #endif +| #ifdef HAVE_STRING_H +| # if !defined STDC_HEADERS && defined HAVE_MEMORY_H +| # include +| # endif +| # include +| #endif +| #ifdef HAVE_STRINGS_H +| # include +| #endif +| #ifdef HAVE_INTTYPES_H +| # include +| #endif +| #ifdef HAVE_STDINT_H +| # include +| #endif +| #ifdef HAVE_UNISTD_H +| # include +| #endif +| int +| main () +| { +| if (sizeof ((size_t))) +| return 0; +| ; +| return 0; +| } +configure:17802: result: yes +configure:17815: checking for working alloca.h +configure:17832: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17832: $? = 0 +configure:17840: result: yes +configure:17848: checking for alloca +configure:17885: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:17885: $? = 0 +configure:17893: result: yes +configure:18004: checking size of double +configure:18009: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:18009: $? = 0 +configure:18009: ./conftest +configure:18009: $? = 0 +configure:18023: result: 8 +configure:18037: checking size of long double +configure:18042: gcc -fdeclspec -o conftest -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -L/usr/local/opt/libffi/lib conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +ld: warning: directory not found for option '-L/usr/local/opt/libffi/lib' +configure:18042: $? = 0 +configure:18042: ./conftest +configure:18042: $? = 0 +configure:18056: result: 8 +configure:18089: checking whether byte ordering is bigendian +configure:18104: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18104: $? = 0 +configure:18149: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18149: $? = 0 +configure:18167: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:42:4: error: use of undeclared identifier 'not' + not big endian + ^ +1 warning and 1 error generated. +configure:18167: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| #include +| #include +| +| int +| main () +| { +| #if BYTE_ORDER != BIG_ENDIAN +| not big endian +| #endif +| +| ; +| return 0; +| } +configure:18295: result: no +configure:18314: checking assembler .cfi pseudo-op support +configure:18332: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +:1:14: error: Expected an identifier +.cfi_sections + ^ +1 warning and 1 error generated. +configure:18332: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc"); +| int +| main () +| { +| +| ; +| return 0; +| } +configure:18340: result: no +configure:18475: checking whether compiler supports pointer authentication +configure:18503: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:47:3: error: Pointer authentication not supported +# error Pointer authentication not supported + ^ +1 warning and 1 error generated. +configure:18503: $? = 1 +configure: failed program was: +| /* confdefs.h */ +| #define PACKAGE_NAME "libffi" +| #define PACKAGE_TARNAME "libffi" +| #define PACKAGE_VERSION "3.3" +| #define PACKAGE_STRING "libffi 3.3" +| #define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +| #define PACKAGE_URL "" +| #define PACKAGE "libffi" +| #define VERSION "3.3" +| #define STDC_HEADERS 1 +| #define HAVE_SYS_TYPES_H 1 +| #define HAVE_SYS_STAT_H 1 +| #define HAVE_STDLIB_H 1 +| #define HAVE_STRING_H 1 +| #define HAVE_MEMORY_H 1 +| #define HAVE_STRINGS_H 1 +| #define HAVE_INTTYPES_H 1 +| #define HAVE_STDINT_H 1 +| #define HAVE_UNISTD_H 1 +| #define HAVE_DLFCN_H 1 +| #define LT_OBJDIR ".libs/" +| #define SIZEOF_SIZE_T 8 +| #define HAVE_SYS_MMAN_H 1 +| #define HAVE_MMAP 1 +| #define HAVE_MKOSTEMP 1 +| #define HAVE_MMAP_FILE 1 +| #define HAVE_MMAP_ANON 1 +| #define STDC_HEADERS 1 +| #define HAVE_MEMCPY 1 +| #define HAVE_ALLOCA_H 1 +| #define HAVE_ALLOCA 1 +| #define SIZEOF_DOUBLE 8 +| #define SIZEOF_LONG_DOUBLE 8 +| /* end confdefs.h. */ +| +| int +| main () +| { +| +| #ifdef __clang__ +| # if __has_feature(ptrauth_calls) +| # define HAVE_PTRAUTH 1 +| # endif +| #endif +| +| #ifndef HAVE_PTRAUTH +| # error Pointer authentication not supported +| #endif +| +| ; +| return 0; +| } +configure:18511: result: no +configure:18530: checking for _ prefix in compiled symbols +configure:18540: gcc -fdeclspec -c -Wall -fexceptions -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +conftest.c:2:12: warning: expression result unused [-Wunused-value] +int main(){nm_test_func;return 0;} + ^~~~~~~~~~~~ +2 warnings generated. +configure:18543: $? = 0 +configure:18547: nm conftest.o \| sed -n -e 's/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p' | sed '/ __gnu_lto/d' \> conftest.nm +configure:18550: $? = 0 +configure:18572: result: yes +configure:18647: checking whether C compiler accepts -fno-lto +configure:18666: gcc -fdeclspec -c -Wall -fexceptions -fno-lto -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT conftest.c >&5 +In file included from :392: +:1:25: warning: missing terminating '"' character [-Winvalid-pp-token] +#define RUBY_EXTCONF_H \"extconf.h\" + ^ +1 warning generated. +configure:18666: $? = 0 +configure:18674: result: yes +configure:18683: checking whether .eh_frame section should be read-only +configure:18699: result: yes +configure:18714: checking for __attribute__((visibility("hidden"))) +configure:18723: gcc -fdeclspec -Werror -S conftest.c -o conftest.s 1>&5 +configure:18726: $? = 0 +configure:18735: result: yes +configure:18874: checking for ld used by gcc -fdeclspec +configure:18941: result: ld +configure:18948: checking if the linker (ld) is GNU ld +configure:18963: result: no +configure:19276: versioning on shared library symbols is no +configure:19397: checking that generated files are newer than configure +configure:19403: result: done +configure:19471: creating ./config.status + +## ---------------------- ## +## Running config.status. ## +## ---------------------- ## + +This file was extended by libffi config.status 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = + CONFIG_HEADERS = + CONFIG_LINKS = + CONFIG_COMMANDS = + $ ./config.status + +on Arthurs-MacBook-Air.local + +config.status:1221: creating include/Makefile +config.status:1221: creating include/ffi.h +config.status:1221: creating Makefile +config.status:1221: creating testsuite/Makefile +config.status:1221: creating man/Makefile +config.status:1221: creating doc/Makefile +config.status:1221: creating libffi.pc +config.status:1221: creating fficonfig.h +config.status:1435: executing buildir commands +config.status:1448: skipping top_srcdir/Makefile - not created +config.status:1435: executing depfiles commands +config.status:1435: executing libtool commands +config.status:1435: executing include commands +config.status:1435: executing src commands + +## ---------------- ## +## Cache variables. ## +## ---------------- ## + +ac_cv_build=aarch64-apple-darwin21.3.0 +ac_cv_c_bigendian=no +ac_cv_c_compiler_gnu=yes +ac_cv_cflags_warn_all=-Wall +ac_cv_cxx_compiler_gnu=yes +ac_cv_decl_map_anon=yes +ac_cv_env_CCASFLAGS_set= +ac_cv_env_CCASFLAGS_value= +ac_cv_env_CCAS_set= +ac_cv_env_CCAS_value= +ac_cv_env_CPPFLAGS_set=set +ac_cv_env_CPPFLAGS_value='-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +ac_cv_env_CPP_set= +ac_cv_env_CPP_value= +ac_cv_env_CXXCPP_set= +ac_cv_env_CXXCPP_value= +ac_cv_env_LT_SYS_LIBRARY_PATH_set= +ac_cv_env_LT_SYS_LIBRARY_PATH_value= +ac_cv_env_build_alias_set= +ac_cv_env_build_alias_value= +ac_cv_env_host_alias_set=set +ac_cv_env_host_alias_value=aarch64-apple-darwin21 +ac_cv_env_target_alias_set= +ac_cv_env_target_alias_value= +ac_cv_func_alloca_works=yes +ac_cv_func_memcpy=yes +ac_cv_func_memfd_create=no +ac_cv_func_mkostemp=yes +ac_cv_func_mmap=yes +ac_cv_func_mmap_anon=yes +ac_cv_func_mmap_dev_zero=no +ac_cv_func_mmap_file=yes +ac_cv_header_dlfcn_h=yes +ac_cv_header_inttypes_h=yes +ac_cv_header_memory_h=yes +ac_cv_header_stdc=yes +ac_cv_header_stdint_h=yes +ac_cv_header_stdlib_h=yes +ac_cv_header_string_h=yes +ac_cv_header_strings_h=yes +ac_cv_header_sys_memfd_h=no +ac_cv_header_sys_mman_h=yes +ac_cv_header_sys_stat_h=yes +ac_cv_header_sys_types_h=yes +ac_cv_header_unistd_h=yes +ac_cv_host=aarch64-apple-darwin21 +ac_cv_objext=o +ac_cv_path_EGREP='/usr/bin/grep -E' +ac_cv_path_FGREP='/usr/bin/grep -F' +ac_cv_path_GREP=/usr/bin/grep +ac_cv_path_SED=/usr/bin/sed +ac_cv_path_ax_enable_builddir_sed=sed +ac_cv_path_install='/opt/homebrew/bin/ginstall -c' +ac_cv_path_lt_DD=/bin/dd +ac_cv_path_mkdir=/opt/homebrew/bin/gmkdir +ac_cv_prog_AWK=awk +ac_cv_prog_CC='gcc -fdeclspec' +ac_cv_prog_CPP='gcc -fdeclspec -E' +ac_cv_prog_CXXCPP='g++ -E' +ac_cv_prog_STRIP=strip +ac_cv_prog_ac_ct_AR=ar +ac_cv_prog_ac_ct_CXX=g++ +ac_cv_prog_ac_ct_DSYMUTIL=dsymutil +ac_cv_prog_ac_ct_DUMPBIN='link -dump' +ac_cv_prog_ac_ct_LIPO=lipo +ac_cv_prog_ac_ct_NMEDIT=nmedit +ac_cv_prog_ac_ct_OBJDUMP=objdump +ac_cv_prog_ac_ct_OTOOL=otool +ac_cv_prog_ac_ct_RANLIB=ranlib +ac_cv_prog_ac_ct_STRIP=strip +ac_cv_prog_cc_c89= +ac_cv_prog_cc_g=yes +ac_cv_prog_cxx_g=yes +ac_cv_prog_make_make_set=yes +ac_cv_sizeof_double=8 +ac_cv_sizeof_long_double=8 +ac_cv_sizeof_size_t=8 +ac_cv_target=aarch64-apple-darwin21 +ac_cv_type_size_t=yes +ac_cv_working_alloca_h=yes +am_cv_CCAS_dependencies_compiler_type=none +am_cv_CC_dependencies_compiler_type=none +am_cv_CXX_dependencies_compiler_type=none +am_cv_make_support_nested_variables=yes +am_cv_prog_cc_c_o=yes +ax_cv_c_compiler_vendor=clang +ax_cv_check_cflags___fno_lto=yes +gcc_cv_as_cfi_pseudo_op=no +libffi_cv_as_ptrauth=no +libffi_cv_hidden_visibility_attribute=yes +libffi_cv_no_lto=-fno-lto +libffi_cv_ro_eh_frame=yes +lt_cv_apple_cc_single_mod=yes +lt_cv_ar_at_file=no +lt_cv_deplibs_check_method=pass_all +lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_ld_exported_symbols_list=yes +lt_cv_ld_force_load=yes +lt_cv_ld_reload_flag=-r +lt_cv_nm_interface='BSD nm' +lt_cv_objdir=.libs +lt_cv_path_LD=ld +lt_cv_path_LDCXX=ld +lt_cv_path_NM=no +lt_cv_path_mainfest_tool=no +lt_cv_prog_compiler_c_o=yes +lt_cv_prog_compiler_c_o_CXX=yes +lt_cv_prog_compiler_pic='-fno-common -DPIC' +lt_cv_prog_compiler_pic_CXX='-fno-common -DPIC' +lt_cv_prog_compiler_pic_works=yes +lt_cv_prog_compiler_pic_works_CXX=yes +lt_cv_prog_compiler_rtti_exceptions=yes +lt_cv_prog_compiler_static_works=no +lt_cv_prog_compiler_static_works_CXX=no +lt_cv_prog_gnu_ld=no +lt_cv_prog_gnu_ldcxx=no +lt_cv_sharedlib_from_linklib_cmd='printf %s\n' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import= +lt_cv_sys_max_cmd_len=786432 +lt_cv_sys_symbol_underscore=yes +lt_cv_to_host_file_cmd=func_convert_file_noop +lt_cv_to_tool_file_cmd=func_convert_file_noop +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' + +## ----------------- ## +## Output variables. ## +## ----------------- ## + +ACLOCAL='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16' +ALLOCA='' +AMDEPBACKSLASH='' +AMDEP_FALSE='' +AMDEP_TRUE='#' +AMTAR='$${TAR-tar}' +AM_BACKSLASH='\' +AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +AM_DEFAULT_VERBOSITY='1' +AM_LTLDFLAGS='' +AM_RUNTESTFLAGS='' +AM_V='$(V)' +AR='ar' +AUTOCONF='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf' +AUTOHEADER='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader' +AUTOMAKE='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16' +AWK='awk' +BUILD_DOCS_FALSE='' +BUILD_DOCS_TRUE='#' +CC='gcc -fdeclspec' +CCAS='gcc -fdeclspec' +CCASDEPMODE='depmode=none' +CCASFLAGS='' +CCDEPMODE='depmode=none' +CFLAGS=' -Wall -fexceptions' +CPP='gcc -fdeclspec -E' +CPPFLAGS='-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' +CXX='g++' +CXXCPP='g++ -E' +CXXDEPMODE='depmode=none' +CXXFLAGS='-g -O2' +CYGPATH_W='echo' +DEFS='-DHAVE_CONFIG_H' +DEPDIR='.deps' +DLLTOOL='false' +DSYMUTIL='dsymutil' +DUMPBIN=':' +ECHO_C='\c' +ECHO_N='' +ECHO_T='' +EGREP='/usr/bin/grep -E' +EXEEXT='' +FFI_DEBUG_FALSE='' +FFI_DEBUG_TRUE='#' +FFI_EXEC_TRAMPOLINE_TABLE='1' +FFI_EXEC_TRAMPOLINE_TABLE_FALSE='#' +FFI_EXEC_TRAMPOLINE_TABLE_TRUE='' +FGREP='/usr/bin/grep -F' +GREP='/usr/bin/grep' +HAVE_LONG_DOUBLE='0' +HAVE_LONG_DOUBLE_VARIANT='0' +INSTALL_DATA='${INSTALL} -m 644' +INSTALL_PROGRAM='${INSTALL}' +INSTALL_SCRIPT='${INSTALL}' +INSTALL_STRIP_PROGRAM='$(install_sh) -c -s' +LD='ld' +LDFLAGS='-L/usr/local/opt/libffi/lib' +LIBFFI_BUILD_VERSIONED_SHLIB_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE='' +LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE='#' +LIBFFI_BUILD_VERSIONED_SHLIB_TRUE='#' +LIBOBJS='' +LIBS='' +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +LIPO='lipo' +LN_S='ln -s' +LTLIBOBJS='' +LT_SYS_LIBRARY_PATH='' +MAINT='#' +MAINTAINER_MODE_FALSE='' +MAINTAINER_MODE_TRUE='#' +MAKEINFO='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo' +MANIFEST_TOOL=':' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +NM='nm' +NMEDIT='nmedit' +OBJDUMP='objdump' +OBJEXT='o' +OPT_LDFLAGS='' +OTOOL64=':' +OTOOL='otool' +PACKAGE='libffi' +PACKAGE_BUGREPORT='http://github.com/libffi/libffi/issues' +PACKAGE_NAME='libffi' +PACKAGE_STRING='libffi 3.3' +PACKAGE_TARNAME='libffi' +PACKAGE_URL='' +PACKAGE_VERSION='3.3' +PATH_SEPARATOR=':' +PRTDIAG='' +RANLIB='ranlib' +SECTION_LDFLAGS='' +SED='/usr/bin/sed' +SET_MAKE='' +SHELL='/bin/sh' +STRIP='strip' +TARGET='AARCH64' +TARGETDIR='aarch64' +TARGET_OBJ=' src/aarch64/ffi.lo src/aarch64/sysv.lo' +TESTSUBDIR_FALSE='#' +TESTSUBDIR_TRUE='' +VERSION='3.3' +ac_ct_AR='ar' +ac_ct_CC='' +ac_ct_CXX='g++' +ac_ct_DUMPBIN='link -dump' +am__EXEEXT_FALSE='' +am__EXEEXT_TRUE='#' +am__fastdepCCAS_FALSE='' +am__fastdepCCAS_TRUE='#' +am__fastdepCC_FALSE='' +am__fastdepCC_TRUE='#' +am__fastdepCXX_FALSE='' +am__fastdepCXX_TRUE='#' +am__include='include' +am__isrc=' -I$(srcdir)' +am__leading_dot='.' +am__nodep='' +am__quote='' +am__tar='$${TAR-tar} chof - "$$tardir"' +am__untar='$${TAR-tar} xf -' +ax_enable_builddir_sed='sed' +bindir='${exec_prefix}/bin' +build='aarch64-apple-darwin21.3.0' +build_alias='' +build_cpu='aarch64' +build_os='darwin21.3.0' +build_vendor='apple' +datadir='${datarootdir}' +datarootdir='${prefix}/share' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +dvidir='${docdir}' +exec_prefix='${prefix}' +host='aarch64-apple-darwin21' +host_alias='aarch64-apple-darwin21' +host_cpu='aarch64' +host_os='darwin21' +host_vendor='apple' +htmldir='${docdir}' +includedir='${prefix}/include' +infodir='${datarootdir}/info' +install_sh='${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh' +libdir='${exec_prefix}/lib' +libexecdir='${exec_prefix}/libexec' +localedir='${datarootdir}/locale' +localstatedir='${prefix}/var' +mandir='${datarootdir}/man' +mkdir_p='$(MKDIR_P)' +oldincludedir='/usr/include' +pdfdir='${docdir}' +prefix='/usr/local' +program_transform_name='s,x,x,' +psdir='${docdir}' +runstatedir='${localstatedir}/run' +sbindir='${exec_prefix}/sbin' +sharedstatedir='${prefix}/com' +sys_symbol_underscore='yes' +sysconfdir='${prefix}/etc' +target='aarch64-apple-darwin21' +target_alias='aarch64-apple-darwin21' +target_cpu='aarch64' +target_os='darwin21' +target_vendor='apple' +toolexecdir='${libdir}/gcc-lib/$(target_alias)' +toolexeclibdir='${libdir}' + +## ----------- ## +## confdefs.h. ## +## ----------- ## + +/* confdefs.h */ +#define PACKAGE_NAME "libffi" +#define PACKAGE_TARNAME "libffi" +#define PACKAGE_VERSION "3.3" +#define PACKAGE_STRING "libffi 3.3" +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" +#define PACKAGE_URL "" +#define PACKAGE "libffi" +#define VERSION "3.3" +#define STDC_HEADERS 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_STRINGS_H 1 +#define HAVE_INTTYPES_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_DLFCN_H 1 +#define LT_OBJDIR ".libs/" +#define SIZEOF_SIZE_T 8 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_MMAP 1 +#define HAVE_MKOSTEMP 1 +#define HAVE_MMAP_FILE 1 +#define HAVE_MMAP_ANON 1 +#define STDC_HEADERS 1 +#define HAVE_MEMCPY 1 +#define HAVE_ALLOCA_H 1 +#define HAVE_ALLOCA 1 +#define SIZEOF_DOUBLE 8 +#define SIZEOF_LONG_DOUBLE 8 +#define SYMBOL_UNDERSCORE 1 +#define FFI_EXEC_TRAMPOLINE_TABLE 1 +#define HAVE_RO_EH_FRAME 1 +#define EH_FRAME_FLAGS "a" +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +configure: exit 0 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.status b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.status new file mode 100755 index 0000000..2eb32bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/config.status @@ -0,0 +1,2400 @@ +#! /bin/sh +# Generated by configure. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libffi $as_me 3.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +# Files that config.status was made for. +config_files=" include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc" +config_headers=" fficonfig.h" +config_commands=" buildir depfiles libtool include src" + +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +ac_cs_config="'--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=aarch64-apple-darwin21' 'host_alias=aarch64-apple-darwin21' 'CPPFLAGS=-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT '" +ac_cs_version="\ +libffi config.status 3.3 +configured by /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure, generated by GNU Autoconf 2.69, + with options \"$ac_cs_config\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21' +srcdir='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi' +INSTALL='/opt/homebrew/bin/ginstall -c' +MKDIR_P='/opt/homebrew/bin/gmkdir -p' +AWK='awk' +test -n "$AWK" || AWK=awk +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +if $ac_cs_recheck; then + set X /bin/sh '/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure' '--disable-static' '--with-pic=yes' '--disable-dependency-tracking' '--disable-docs' '--host=aarch64-apple-darwin21' 'host_alias=aarch64-apple-darwin21' 'CPPFLAGS=-DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT ' $ac_configure_extra_args --no-create --no-recursion + shift + $as_echo "running CONFIG_SHELL=/bin/sh $*" >&6 + CONFIG_SHELL='/bin/sh' + export CONFIG_SHELL + exec "$@" +fi + +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +# +# INIT-COMMANDS +# +ax_enable_builddir_srcdir="/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi" # /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ax_enable_builddir_host="" # / aarch64-apple-darwin21 +ax_enable_builddir_version="3.3" # 3.3 +ax_enable_builddir_package="libffi" # libffi +ax_enable_builddir_auxdir="/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi" # +ax_enable_builddir_sed="sed" # /usr/bin/sed +ax_enable_builddir="." # + +AMDEP_TRUE="#" MAKE="make" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +double_quote_subst='s/\(["`\\]\)/\\\1/g' +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +macro_version='2.4.6' +macro_revision='2.4.6' +enable_shared='yes' +enable_static='no' +pic_mode='yes' +enable_fast_install='needless' +shared_archive_member_spec='' +SHELL='/bin/sh' +ECHO='printf %s\n' +PATH_SEPARATOR=':' +host_alias='aarch64-apple-darwin21' +host='aarch64-apple-darwin21' +host_os='darwin21' +build_alias='' +build='aarch64-apple-darwin21.3.0' +build_os='darwin21.3.0' +SED='/usr/bin/sed' +Xsed='/usr/bin/sed -e 1s/^X//' +GREP='/usr/bin/grep' +EGREP='/usr/bin/grep -E' +FGREP='/usr/bin/grep -F' +LD='ld' +NM='nm' +LN_S='ln -s' +max_cmd_len='786432' +ac_objext='o' +exeext='' +lt_unset='unset' +lt_SP2NL='tr \040 \012' +lt_NL2SP='tr \015\012 \040\040' +lt_cv_to_host_file_cmd='func_convert_file_noop' +lt_cv_to_tool_file_cmd='func_convert_file_noop' +reload_flag=' -r' +reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +OBJDUMP='objdump' +deplibs_check_method='pass_all' +file_magic_cmd='$MAGIC_CMD' +file_magic_glob='' +want_nocaseglob='no' +DLLTOOL='false' +sharedlib_from_linklib_cmd='printf %s\n' +AR='ar' +AR_FLAGS='cru' +archiver_list_spec='' +STRIP='strip' +RANLIB='ranlib' +old_postinstall_cmds='chmod 644 $oldlib~$RANLIB $tool_oldlib' +old_postuninstall_cmds='' +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +lock_old_archive_extraction='yes' +CC='gcc -fdeclspec' +CFLAGS=' -Wall -fexceptions' +compiler='g++' +GCC='yes' +lt_cv_sys_global_symbol_pipe='sed -n -e '\''s/^.*[ ]\([BCDEGRST][BCDEGRST]*\)[ ][ ]*_\([_A-Za-z][_A-Za-z0-9]*\)$/\1 _\2 \2/p'\'' | sed '\''/ __gnu_lto/d'\''' +lt_cv_sys_global_symbol_to_cdecl='sed -n -e '\''s/^T .* \(.*\)$/extern int \1();/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/extern char \1;/p'\''' +lt_cv_sys_global_symbol_to_import='' +lt_cv_sys_global_symbol_to_c_name_address='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"\1", (void *) \&\1},/p'\''' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='sed -n -e '\''s/^: \(.*\) .*$/ {"\1", (void *) 0},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(lib.*\)$/ {"\1", (void *) \&\1},/p'\'' -e '\''s/^[BCDEGRST][BCDEGRST]* .* \(.*\)$/ {"lib\1", (void *) \&\1},/p'\''' +lt_cv_nm_interface='BSD nm' +nm_file_list_spec='@' +lt_sysroot='' +lt_cv_truncate_bin='/bin/dd bs=4096 count=1' +objdir='.libs' +MAGIC_CMD='file' +lt_prog_compiler_no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions' +lt_prog_compiler_pic=' -fno-common -DPIC' +lt_prog_compiler_wl='-Wl,' +lt_prog_compiler_static='' +lt_cv_prog_compiler_c_o='yes' +need_locks='no' +MANIFEST_TOOL=':' +DSYMUTIL='dsymutil' +NMEDIT='nmedit' +LIPO='lipo' +OTOOL='otool' +OTOOL64=':' +libext='a' +shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +extract_expsyms_cmds='' +archive_cmds_need_lc='no' +enable_shared_with_static_runtimes='no' +export_dynamic_flag_spec='' +whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object='no' +old_archive_from_new_cmds='' +old_archive_from_expsyms_cmds='' +archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld='no' +allow_undefined_flag='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag='' +hardcode_libdir_flag_spec='' +hardcode_libdir_separator='' +hardcode_direct='no' +hardcode_direct_absolute='no' +hardcode_minus_L='no' +hardcode_shlibpath_var='unsupported' +hardcode_automatic='yes' +inherit_rpath='no' +link_all_deplibs='yes' +always_export_symbols='no' +export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms='' +prelink_cmds='' +postlink_cmds='' +file_list_spec='' +variables_saved_for_relink='PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH' +need_lib_prefix='no' +need_version='no' +version_type='darwin' +runpath_var='' +shlibpath_var='DYLD_LIBRARY_PATH' +shlibpath_overrides_runpath='yes' +libname_spec='lib$name' +library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +soname_spec='$libname$release$major$shared_ext' +install_override_mode='' +postinstall_cmds='' +postuninstall_cmds='' +finish_cmds='' +finish_eval='' +hardcode_into_libs='no' +sys_lib_search_path_spec='/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib' +configure_time_dlsearch_path='/usr/local/lib /lib /usr/lib' +configure_time_lt_sys_library_path='' +hardcode_action='immediate' +enable_dlopen='unknown' +enable_dlopen_self='unknown' +enable_dlopen_self_static='unknown' +old_striplib='strip -S' +striplib='strip -x' +compiler_lib_search_dirs='' +predep_objects='' +postdep_objects='' +predeps='' +postdeps='' +compiler_lib_search_path='' +LD_CXX='ld' +reload_flag_CXX=' -r' +reload_cmds_CXX='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' +old_archive_cmds_CXX='$AR $AR_FLAGS $oldlib$oldobjs~$RANLIB $tool_oldlib' +compiler_CXX='g++' +GCC_CXX='yes' +lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' +lt_prog_compiler_pic_CXX=' -fno-common -DPIC' +lt_prog_compiler_wl_CXX='-Wl,' +lt_prog_compiler_static_CXX='' +lt_cv_prog_compiler_c_o_CXX='yes' +archive_cmds_need_lc_CXX='no' +enable_shared_with_static_runtimes_CXX='no' +export_dynamic_flag_spec_CXX='' +whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +compiler_needs_object_CXX='no' +old_archive_from_new_cmds_CXX='' +old_archive_from_expsyms_cmds_CXX='' +archive_cmds_CXX='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module' +archive_expsym_cmds_CXX='sed '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring $single_module $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags' +module_expsym_cmds_CXX='sed -e '\''s|^|_|'\'' < $export_symbols > $output_objdir/$libname-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs $compiler_flags $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +with_gnu_ld_CXX='no' +allow_undefined_flag_CXX='$wl-flat_namespace $wl-undefined ${wl}suppress' +no_undefined_flag_CXX='' +hardcode_libdir_flag_spec_CXX='' +hardcode_libdir_separator_CXX='' +hardcode_direct_CXX='no' +hardcode_direct_absolute_CXX='no' +hardcode_minus_L_CXX='no' +hardcode_shlibpath_var_CXX='unsupported' +hardcode_automatic_CXX='yes' +inherit_rpath_CXX='no' +link_all_deplibs_CXX='yes' +always_export_symbols_CXX='no' +export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' +include_expsyms_CXX='' +prelink_cmds_CXX='' +postlink_cmds_CXX='' +file_list_spec_CXX='' +hardcode_action_CXX='immediate' +compiler_lib_search_dirs_CXX='' +predep_objects_CXX='' +postdep_objects_CXX='' +predeps_CXX='' +postdeps_CXX='' +compiler_lib_search_path_CXX='' + +LTCC='gcc -fdeclspec' +LTCFLAGS='' +compiler='gcc -fdeclspec' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL ECHO PATH_SEPARATOR SED GREP EGREP FGREP LD NM LN_S lt_SP2NL lt_NL2SP reload_flag OBJDUMP deplibs_check_method file_magic_cmd file_magic_glob want_nocaseglob DLLTOOL sharedlib_from_linklib_cmd AR AR_FLAGS archiver_list_spec STRIP RANLIB CC CFLAGS compiler lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl lt_cv_sys_global_symbol_to_import lt_cv_sys_global_symbol_to_c_name_address lt_cv_sys_global_symbol_to_c_name_address_lib_prefix lt_cv_nm_interface nm_file_list_spec lt_cv_truncate_bin lt_prog_compiler_no_builtin_flag lt_prog_compiler_pic lt_prog_compiler_wl lt_prog_compiler_static lt_cv_prog_compiler_c_o need_locks MANIFEST_TOOL DSYMUTIL NMEDIT LIPO OTOOL OTOOL64 shrext_cmds export_dynamic_flag_spec whole_archive_flag_spec compiler_needs_object with_gnu_ld allow_undefined_flag no_undefined_flag hardcode_libdir_flag_spec hardcode_libdir_separator exclude_expsyms include_expsyms file_list_spec variables_saved_for_relink libname_spec library_names_spec soname_spec install_override_mode finish_eval old_striplib striplib compiler_lib_search_dirs predep_objects postdep_objects predeps postdeps compiler_lib_search_path LD_CXX reload_flag_CXX compiler_CXX lt_prog_compiler_no_builtin_flag_CXX lt_prog_compiler_pic_CXX lt_prog_compiler_wl_CXX lt_prog_compiler_static_CXX lt_cv_prog_compiler_c_o_CXX export_dynamic_flag_spec_CXX whole_archive_flag_spec_CXX compiler_needs_object_CXX with_gnu_ld_CXX allow_undefined_flag_CXX no_undefined_flag_CXX hardcode_libdir_flag_spec_CXX hardcode_libdir_separator_CXX exclude_expsyms_CXX include_expsyms_CXX file_list_spec_CXX compiler_lib_search_dirs_CXX predep_objects_CXX postdep_objects_CXX predeps_CXX postdeps_CXX compiler_lib_search_path_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED \"\$sed_quote_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds old_postinstall_cmds old_postuninstall_cmds old_archive_cmds extract_expsyms_cmds old_archive_from_new_cmds old_archive_from_expsyms_cmds archive_cmds archive_expsym_cmds module_cmds module_expsym_cmds export_symbols_cmds prelink_cmds postlink_cmds postinstall_cmds postuninstall_cmds finish_cmds sys_lib_search_path_spec configure_time_dlsearch_path configure_time_lt_sys_library_path reload_cmds_CXX old_archive_cmds_CXX old_archive_from_new_cmds_CXX old_archive_from_expsyms_cmds_CXX archive_cmds_CXX archive_expsym_cmds_CXX module_cmds_CXX module_expsym_cmds_CXX export_symbols_cmds_CXX prelink_cmds_CXX postlink_cmds_CXX; do + case `eval \\$ECHO \\""\\$$var"\\"` in + *[\\\`\"\$]*) + eval "lt_$var=\\\"\`\$ECHO \"\$$var\" | \$SED -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_$var=\\\"\$$var\\\"" + ;; + esac +done + +ac_aux_dir='/Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='libffi' + VERSION='3.3' + RM='rm -f' + ofile='libtool' + + + + + +TARGETDIR="aarch64" + + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fficonfig.h") CONFIG_HEADERS="$CONFIG_HEADERS fficonfig.h" ;; + "buildir") CONFIG_COMMANDS="$CONFIG_COMMANDS buildir" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "include") CONFIG_COMMANDS="$CONFIG_COMMANDS include" ;; + "src") CONFIG_COMMANDS="$CONFIG_COMMANDS src" ;; + "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; + "include/ffi.h") CONFIG_FILES="$CONFIG_FILES include/ffi.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;; + "man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "libffi.pc") CONFIG_FILES="$CONFIG_FILES libffi.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +cat >>"$ac_tmp/subs1.awk" <<\_ACAWK && +S["am__EXEEXT_FALSE"]="" +S["am__EXEEXT_TRUE"]="#" +S["LTLIBOBJS"]="" +S["LIBOBJS"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_SUN_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_GNU_TRUE"]="#" +S["LIBFFI_BUILD_VERSIONED_SHLIB_FALSE"]="" +S["LIBFFI_BUILD_VERSIONED_SHLIB_TRUE"]="#" +S["OPT_LDFLAGS"]="" +S["SECTION_LDFLAGS"]="" +S["toolexeclibdir"]="${libdir}" +S["toolexecdir"]="${libdir}/gcc-lib/$(target_alias)" +S["FFI_DEBUG_FALSE"]="" +S["FFI_DEBUG_TRUE"]="#" +S["TARGET_OBJ"]=" src/aarch64/ffi.lo src/aarch64/sysv.lo" +S["TARGETDIR"]="aarch64" +S["TARGET"]="AARCH64" +S["BUILD_DOCS_FALSE"]="" +S["BUILD_DOCS_TRUE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE"]="1" +S["FFI_EXEC_TRAMPOLINE_TABLE_FALSE"]="#" +S["FFI_EXEC_TRAMPOLINE_TABLE_TRUE"]="" +S["sys_symbol_underscore"]="yes" +S["HAVE_LONG_DOUBLE_VARIANT"]="0" +S["HAVE_LONG_DOUBLE"]="0" +S["ALLOCA"]="" +S["AM_LTLDFLAGS"]="" +S["AM_RUNTESTFLAGS"]="" +S["TESTSUBDIR_FALSE"]="#" +S["TESTSUBDIR_TRUE"]="" +S["MAINT"]="#" +S["MAINTAINER_MODE_FALSE"]="" +S["MAINTAINER_MODE_TRUE"]="#" +S["PRTDIAG"]="" +S["CXXCPP"]="g++ -E" +S["CPP"]="gcc -fdeclspec -E" +S["LT_SYS_LIBRARY_PATH"]="" +S["OTOOL64"]=":" +S["OTOOL"]="otool" +S["LIPO"]="lipo" +S["NMEDIT"]="nmedit" +S["DSYMUTIL"]="dsymutil" +S["MANIFEST_TOOL"]=":" +S["RANLIB"]="ranlib" +S["ac_ct_AR"]="ar" +S["AR"]="ar" +S["DLLTOOL"]="false" +S["OBJDUMP"]="objdump" +S["LN_S"]="ln -s" +S["NM"]="nm" +S["ac_ct_DUMPBIN"]="link -dump" +S["DUMPBIN"]=":" +S["LD"]="ld" +S["FGREP"]="/usr/bin/grep -F" +S["EGREP"]="/usr/bin/grep -E" +S["GREP"]="/usr/bin/grep" +S["SED"]="/usr/bin/sed" +S["LIBTOOL"]="$(SHELL) $(top_builddir)/libtool" +S["am__fastdepCCAS_FALSE"]="" +S["am__fastdepCCAS_TRUE"]="#" +S["CCASDEPMODE"]="depmode=none" +S["CCASFLAGS"]="" +S["CCAS"]="gcc -fdeclspec" +S["am__fastdepCXX_FALSE"]="" +S["am__fastdepCXX_TRUE"]="#" +S["CXXDEPMODE"]="depmode=none" +S["ac_ct_CXX"]="g++" +S["CXXFLAGS"]="-g -O2" +S["CXX"]="g++" +S["am__fastdepCC_FALSE"]="" +S["am__fastdepCC_TRUE"]="#" +S["CCDEPMODE"]="depmode=none" +S["am__nodep"]="" +S["AMDEPBACKSLASH"]="" +S["AMDEP_FALSE"]="" +S["AMDEP_TRUE"]="#" +S["am__include"]="include" +S["DEPDIR"]=".deps" +S["OBJEXT"]="o" +S["EXEEXT"]="" +S["ac_ct_CC"]="" +S["CPPFLAGS"]="-DRUBY_EXTCONF_H=\\\"extconf.h\\\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT " +S["LDFLAGS"]="-L/usr/local/opt/libffi/lib" +S["CFLAGS"]=" -Wall -fexceptions" +S["CC"]="gcc -fdeclspec" +S["AM_BACKSLASH"]="\\" +S["AM_DEFAULT_VERBOSITY"]="1" +S["AM_DEFAULT_V"]="$(AM_DEFAULT_VERBOSITY)" +S["AM_V"]="$(V)" +S["am__untar"]="$${TAR-tar} xf -" +S["am__tar"]="$${TAR-tar} chof - \"$$tardir\"" +S["AMTAR"]="$${TAR-tar}" +S["am__leading_dot"]="." +S["SET_MAKE"]="" +S["AWK"]="awk" +S["mkdir_p"]="$(MKDIR_P)" +S["MKDIR_P"]="/opt/homebrew/bin/gmkdir -p" +S["INSTALL_STRIP_PROGRAM"]="$(install_sh) -c -s" +S["STRIP"]="strip" +S["install_sh"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh" +S["MAKEINFO"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo" +S["AUTOHEADER"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader" +S["AUTOMAKE"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16" +S["AUTOCONF"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf" +S["ACLOCAL"]="${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16" +S["VERSION"]="3.3" +S["PACKAGE"]="libffi" +S["CYGPATH_W"]="echo" +S["am__isrc"]=" -I$(srcdir)" +S["INSTALL_DATA"]="${INSTALL} -m 644" +S["INSTALL_SCRIPT"]="${INSTALL}" +S["INSTALL_PROGRAM"]="${INSTALL}" +S["ax_enable_builddir_sed"]="sed" +S["target_os"]="darwin21" +S["target_vendor"]="apple" +S["target_cpu"]="aarch64" +S["target"]="aarch64-apple-darwin21" +S["host_os"]="darwin21" +S["host_vendor"]="apple" +S["host_cpu"]="aarch64" +S["host"]="aarch64-apple-darwin21" +S["build_os"]="darwin21.3.0" +S["build_vendor"]="apple" +S["build_cpu"]="aarch64" +S["build"]="aarch64-apple-darwin21.3.0" +S["target_alias"]="aarch64-apple-darwin21" +S["host_alias"]="aarch64-apple-darwin21" +S["build_alias"]="" +S["LIBS"]="" +S["ECHO_T"]="" +S["ECHO_N"]="" +S["ECHO_C"]="\\c" +S["DEFS"]="-DHAVE_CONFIG_H" +S["mandir"]="${datarootdir}/man" +S["localedir"]="${datarootdir}/locale" +S["libdir"]="${exec_prefix}/lib" +S["psdir"]="${docdir}" +S["pdfdir"]="${docdir}" +S["dvidir"]="${docdir}" +S["htmldir"]="${docdir}" +S["infodir"]="${datarootdir}/info" +S["docdir"]="${datarootdir}/doc/${PACKAGE_TARNAME}" +S["oldincludedir"]="/usr/include" +S["includedir"]="${prefix}/include" +S["runstatedir"]="${localstatedir}/run" +S["localstatedir"]="${prefix}/var" +S["sharedstatedir"]="${prefix}/com" +S["sysconfdir"]="${prefix}/etc" +S["datadir"]="${datarootdir}" +S["datarootdir"]="${prefix}/share" +S["libexecdir"]="${exec_prefix}/libexec" +S["sbindir"]="${exec_prefix}/sbin" +S["bindir"]="${exec_prefix}/bin" +S["program_transform_name"]="s,x,x," +S["prefix"]="/usr/local" +S["exec_prefix"]="${prefix}" +S["PACKAGE_URL"]="" +S["PACKAGE_BUGREPORT"]="http://github.com/libffi/libffi/issues" +S["PACKAGE_STRING"]="libffi 3.3" +S["PACKAGE_VERSION"]="3.3" +S["PACKAGE_TARNAME"]="libffi" +S["PACKAGE_NAME"]="libffi" +S["PATH_SEPARATOR"]=":" +S["SHELL"]="/bin/sh" +S["am__quote"]="" +_ACAWK +cat >>"$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +D["PACKAGE_NAME"]=" \"libffi\"" +D["PACKAGE_TARNAME"]=" \"libffi\"" +D["PACKAGE_VERSION"]=" \"3.3\"" +D["PACKAGE_STRING"]=" \"libffi 3.3\"" +D["PACKAGE_BUGREPORT"]=" \"http://github.com/libffi/libffi/issues\"" +D["PACKAGE_URL"]=" \"\"" +D["PACKAGE"]=" \"libffi\"" +D["VERSION"]=" \"3.3\"" +D["STDC_HEADERS"]=" 1" +D["HAVE_SYS_TYPES_H"]=" 1" +D["HAVE_SYS_STAT_H"]=" 1" +D["HAVE_STDLIB_H"]=" 1" +D["HAVE_STRING_H"]=" 1" +D["HAVE_MEMORY_H"]=" 1" +D["HAVE_STRINGS_H"]=" 1" +D["HAVE_INTTYPES_H"]=" 1" +D["HAVE_STDINT_H"]=" 1" +D["HAVE_UNISTD_H"]=" 1" +D["HAVE_DLFCN_H"]=" 1" +D["LT_OBJDIR"]=" \".libs/\"" +D["SIZEOF_SIZE_T"]=" 8" +D["HAVE_SYS_MMAN_H"]=" 1" +D["HAVE_MMAP"]=" 1" +D["HAVE_MKOSTEMP"]=" 1" +D["HAVE_MMAP_FILE"]=" 1" +D["HAVE_MMAP_ANON"]=" 1" +D["STDC_HEADERS"]=" 1" +D["HAVE_MEMCPY"]=" 1" +D["HAVE_ALLOCA_H"]=" 1" +D["HAVE_ALLOCA"]=" 1" +D["SIZEOF_DOUBLE"]=" 8" +D["SIZEOF_LONG_DOUBLE"]=" 8" +D["SYMBOL_UNDERSCORE"]=" 1" +D["FFI_EXEC_TRAMPOLINE_TABLE"]=" 1" +D["HAVE_RO_EH_FRAME"]=" 1" +D["EH_FRAME_FLAGS"]=" \"a\"" +D["HAVE_HIDDEN_VISIBILITY_ATTRIBUTE"]=" 1" + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*([\t (]|$)/ { + line = $ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + ac_datarootdir_hack=' + s&@datadir@&${datarootdir}&g + s&@docdir@&${datarootdir}/doc/${PACKAGE_TARNAME}&g + s&@infodir@&${datarootdir}/info&g + s&@localedir@&${datarootdir}/locale&g + s&@mandir@&${datarootdir}/man&g + s&\${datarootdir}&${prefix}/share&g' ;; +esac +ac_sed_extra=" + +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "buildir":C) ac_top_srcdir="$ax_enable_builddir_srcdir" + if test ".$ax_enable_builddir" = ".." ; then + if test -f "$top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - left untouched" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - left untouched" >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - not created" >&5 +$as_echo "$as_me: skipping top_srcdir/Makefile - not created" >&6;} + fi + else + if test -f "$ac_top_srcdir/Makefile" ; then + a=`grep "^VERSION " "$ac_top_srcdir/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$ac_top_srcdir/Makefile" + fi + if test -f "$ac_top_srcdir/Makefile" ; then + echo "$ac_top_srcdir/Makefile : $ac_top_srcdir/Makefile.in" > $tmp/conftemp.mk + echo " @ echo 'REMOVED,,,' >\$@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$ac_top_srcdir/Makefile" >/dev/null + then rm $ac_top_srcdir/Makefile ; fi + cp $tmp/conftemp.mk $ac_top_srcdir/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$ac_top_srcdir/Makefile" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: create top_srcdir/Makefile guessed from local Makefile" >&5 +$as_echo "$as_me: create top_srcdir/Makefile guessed from local Makefile" >&6;} + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[ ]*[\\#]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[:=]/!d +/^\\./d +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/ \\1 \\1-all\\2/g +s/^\\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/\\1 \\1-all\\2/ +s/ / /g +/^all all-all[ :]/i\\ +all-configured : all-all +s/ [a-zA-Z0-9-]*-all [a-zA-Z0-9-]*-all-all//g +/-all-all/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +/dist-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/dist-[a-zA-Z0-9]*-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +/distclean-all *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefile.sed~" ## DEBUGGING + $ax_enable_builddir_sed -f $tmp/conftemp.sed Makefile >$ac_top_srcdir/Makefile + if test -f "$ac_top_srcdir/Makefile.mk" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&5 +$as_echo "$as_me: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&6;} + cat $ac_top_srcdir/Makefile.mk >>$ac_top_srcdir/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$ac_top_srcdir/Makefile + # sanity check + if grep '^; echo "MAKE ' $ac_top_srcdir/Makefile >/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: buggy sed found - it deletes tab in \"a\" text parts" >&5 +$as_echo "$as_me: buggy sed found - it deletes tab in \"a\" text parts" >&6;} + $ax_enable_builddir_sed -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $ac_top_srcdir/Makefile \ + >$ac_top_srcdir/Makefile~ + (test -s $ac_top_srcdir/Makefile~ && mv $ac_top_srcdir/Makefile~ $ac_top_srcdir/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [^|]* | *$ax_enable_builddir *\$!$xxxx ...... $ax_enable_builddir!" >$tmp/conftemp.sed + $ax_enable_builddir_sed -f "$tmp/conftemp.sed" "$ac_top_srcdir/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$ac_top_srcdir/makefiles.out~" ## DEBUGGING + if cmp -s "$ac_top_srcdir/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: keeping top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: keeping top_srcdir/Makefile from earlier configure" >&6;} + rm "$tmp/mkfile.tmp" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: reusing top_srcdir/Makefile from earlier configure" >&5 +$as_echo "$as_me: reusing top_srcdir/Makefile from earlier configure" >&6;} + mv "$tmp/mkfile.tmp" "$ac_top_srcdir/Makefile" + fi + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&5 +$as_echo "$as_me: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&6;} + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$ax_enable_builddir" >>$ac_top_srcdir/Makefile + fi + ;; + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + case $CONFIG_FILES in #( + *\'*) : + eval set x "$CONFIG_FILES" ;; #( + *) : + set x $CONFIG_FILES ;; #( + *) : + ;; +esac + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`$as_echo "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`$as_dirname -- "$am_mf" || +$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$am_mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + am_filepart=`$as_basename -- "$am_mf" || +$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { echo "$as_me:$LINENO: cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles" >&5 + (cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } || am_rc=$? + done + if test $am_rc -ne 0; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. Try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking). +See \`config.log' for more details" "$LINENO" 5; } + fi + { am_dirpart=; unset am_dirpart;} + { am_filepart=; unset am_filepart;} + { am_mf=; unset am_mf;} + { am_rc=; unset am_rc;} + rm -f conftest-deps.mk +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "include":C) test -d include || mkdir include ;; + "src":C) +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/doc/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/doc/Makefile new file mode 100644 index 0000000..5924b9e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/doc/Makefile @@ -0,0 +1,815 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# doc/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = aarch64-apple-darwin21.3.0 +host_triplet = aarch64-apple-darwin21 +target_triplet = aarch64-apple-darwin21 +subdir = doc +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/version.texi \ + $(srcdir)/stamp-vti $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +AM_V_DVIPS = $(am__v_DVIPS_$(V)) +am__v_DVIPS_ = $(am__v_DVIPS_$(AM_DEFAULT_VERBOSITY)) +am__v_DVIPS_0 = @echo " DVIPS " $@; +am__v_DVIPS_1 = +AM_V_MAKEINFO = $(am__v_MAKEINFO_$(V)) +am__v_MAKEINFO_ = $(am__v_MAKEINFO_$(AM_DEFAULT_VERBOSITY)) +am__v_MAKEINFO_0 = @echo " MAKEINFO" $@; +am__v_MAKEINFO_1 = +AM_V_INFOHTML = $(am__v_INFOHTML_$(V)) +am__v_INFOHTML_ = $(am__v_INFOHTML_$(AM_DEFAULT_VERBOSITY)) +am__v_INFOHTML_0 = @echo " INFOHTML" $@; +am__v_INFOHTML_1 = +AM_V_TEXI2DVI = $(am__v_TEXI2DVI_$(V)) +am__v_TEXI2DVI_ = $(am__v_TEXI2DVI_$(AM_DEFAULT_VERBOSITY)) +am__v_TEXI2DVI_0 = @echo " TEXI2DVI" $@; +am__v_TEXI2DVI_1 = +AM_V_TEXI2PDF = $(am__v_TEXI2PDF_$(V)) +am__v_TEXI2PDF_ = $(am__v_TEXI2PDF_$(AM_DEFAULT_VERBOSITY)) +am__v_TEXI2PDF_0 = @echo " TEXI2PDF" $@; +am__v_TEXI2PDF_1 = +AM_V_texinfo = $(am__v_texinfo_$(V)) +am__v_texinfo_ = $(am__v_texinfo_$(AM_DEFAULT_VERBOSITY)) +am__v_texinfo_0 = -q +am__v_texinfo_1 = +AM_V_texidevnull = $(am__v_texidevnull_$(V)) +am__v_texidevnull_ = $(am__v_texidevnull_$(AM_DEFAULT_VERBOSITY)) +am__v_texidevnull_0 = > /dev/null +am__v_texidevnull_1 = +INFO_DEPS = $(srcdir)/libffi.info +am__TEXINFO_TEX_DIR = $(srcdir) +DVIS = libffi.dvi +PDFS = libffi.pdf +PSS = libffi.ps +HTMLS = libffi.html +TEXINFOS = libffi.texi +TEXI2DVI = texi2dvi +TEXI2PDF = $(TEXI2DVI) --pdf --batch +MAKEINFOHTML = $(MAKEINFO) --html +AM_MAKEINFOHTMLFLAGS = $(AM_MAKEINFOFLAGS) +DVIPS = dvips +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__installdirs = "$(DESTDIR)$(infodir)" +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in mdate-sh texinfo.tex +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = : +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = nm +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = AARCH64 +TARGETDIR = aarch64 +TARGET_OBJ = src/aarch64/ffi.lo src/aarch64/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/doc +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = +ac_ct_CXX = g++ +ac_ct_DUMPBIN = link -dump +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = aarch64-apple-darwin21.3.0 +build_alias = +build_cpu = aarch64 +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = aarch64-apple-darwin21 +host_alias = aarch64-apple-darwin21 +host_cpu = aarch64 +host_os = darwin21 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = aarch64-apple-darwin21 +target_alias = aarch64-apple-darwin21 +target_cpu = aarch64 +target_os = darwin21 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +info_TEXINFOS = libffi.texi +all: all-am + +.SUFFIXES: +.SUFFIXES: .dvi .html .info .pdf .ps .texi +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu doc/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +.texi.info: + $(AM_V_MAKEINFO)restore=: && backupdir="$(am__leading_dot)am$$$$" && \ + am__cwd=`pwd` && $(am__cd) $(srcdir) && \ + rm -rf $$backupdir && mkdir $$backupdir && \ + if ($(MAKEINFO) --version) >/dev/null 2>&1; then \ + for f in $@ $@-[0-9] $@-[0-9][0-9] $(@:.info=).i[0-9] $(@:.info=).i[0-9][0-9]; do \ + if test -f $$f; then mv $$f $$backupdir; restore=mv; else :; fi; \ + done; \ + else :; fi && \ + cd "$$am__cwd"; \ + if $(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \ + -o $@ $<; \ + then \ + rc=0; \ + $(am__cd) $(srcdir); \ + else \ + rc=$$?; \ + $(am__cd) $(srcdir) && \ + $$restore $$backupdir/* `echo "./$@" | sed 's|[^/]*$$||'`; \ + fi; \ + rm -rf $$backupdir; exit $$rc + +.texi.dvi: + $(AM_V_TEXI2DVI)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \ + $(TEXI2DVI) $(AM_V_texinfo) --build-dir=$(@:.dvi=.t2d) -o $@ $(AM_V_texidevnull) \ + $< + +.texi.pdf: + $(AM_V_TEXI2PDF)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \ + $(TEXI2PDF) $(AM_V_texinfo) --build-dir=$(@:.pdf=.t2p) -o $@ $(AM_V_texidevnull) \ + $< + +.texi.html: + $(AM_V_MAKEINFO)rm -rf $(@:.html=.htp) + $(AM_V_at)if $(MAKEINFOHTML) $(AM_MAKEINFOHTMLFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \ + -o $(@:.html=.htp) $<; \ + then \ + rm -rf $@ && mv $(@:.html=.htp) $@; \ + else \ + rm -rf $(@:.html=.htp); exit 1; \ + fi +$(srcdir)/libffi.info: libffi.texi $(srcdir)/version.texi +libffi.dvi: libffi.texi $(srcdir)/version.texi +libffi.pdf: libffi.texi $(srcdir)/version.texi +libffi.html: libffi.texi $(srcdir)/version.texi +$(srcdir)/version.texi: # $(srcdir)/stamp-vti +$(srcdir)/stamp-vti: libffi.texi $(top_srcdir)/configure + @(dir=.; test -f ./libffi.texi || dir=$(srcdir); \ + set `$(SHELL) $(srcdir)/mdate-sh $$dir/libffi.texi`; \ + echo "@set UPDATED $$1 $$2 $$3"; \ + echo "@set UPDATED-MONTH $$2 $$3"; \ + echo "@set EDITION $(VERSION)"; \ + echo "@set VERSION $(VERSION)") > vti.tmp$$$$ && \ + (cmp -s vti.tmp$$$$ $(srcdir)/version.texi \ + || (echo "Updating $(srcdir)/version.texi" && \ + cp vti.tmp$$$$ $(srcdir)/version.texi.tmp$$$$ && \ + mv $(srcdir)/version.texi.tmp$$$$ $(srcdir)/version.texi)) && \ + rm -f vti.tmp$$$$ $(srcdir)/version.texi.$$$$ + @cp $(srcdir)/version.texi $@ + +mostlyclean-vti: + -rm -f vti.tmp* $(srcdir)/version.texi.tmp* + +maintainer-clean-vti: +# -rm -f $(srcdir)/stamp-vti $(srcdir)/version.texi +.dvi.ps: + $(AM_V_DVIPS)TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + $(DVIPS) $(AM_V_texinfo) -o $@ $< + +uninstall-dvi-am: + @$(NORMAL_UNINSTALL) + @list='$(DVIS)'; test -n "$(dvidir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(dvidir)/$$f'"; \ + rm -f "$(DESTDIR)$(dvidir)/$$f"; \ + done + +uninstall-html-am: + @$(NORMAL_UNINSTALL) + @list='$(HTMLS)'; test -n "$(htmldir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " rm -rf '$(DESTDIR)$(htmldir)/$$f'"; \ + rm -rf "$(DESTDIR)$(htmldir)/$$f"; \ + done + +uninstall-info-am: + @$(PRE_UNINSTALL) + @if test -d '$(DESTDIR)$(infodir)' && $(am__can_run_installinfo); then \ + list='$(INFO_DEPS)'; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + echo " install-info --info-dir='$(DESTDIR)$(infodir)' --remove '$(DESTDIR)$(infodir)/$$relfile'"; \ + if install-info --info-dir="$(DESTDIR)$(infodir)" --remove "$(DESTDIR)$(infodir)/$$relfile"; \ + then :; else test ! -f "$(DESTDIR)$(infodir)/$$relfile" || exit 1; fi; \ + done; \ + else :; fi + @$(NORMAL_UNINSTALL) + @list='$(INFO_DEPS)'; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + relfile_i=`echo "$$relfile" | sed 's|\.info$$||;s|$$|.i|'`; \ + (if test -d "$(DESTDIR)$(infodir)" && cd "$(DESTDIR)$(infodir)"; then \ + echo " cd '$(DESTDIR)$(infodir)' && rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]"; \ + rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]; \ + else :; fi); \ + done + +uninstall-pdf-am: + @$(NORMAL_UNINSTALL) + @list='$(PDFS)'; test -n "$(pdfdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(pdfdir)/$$f'"; \ + rm -f "$(DESTDIR)$(pdfdir)/$$f"; \ + done + +uninstall-ps-am: + @$(NORMAL_UNINSTALL) + @list='$(PSS)'; test -n "$(psdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(psdir)/$$f'"; \ + rm -f "$(DESTDIR)$(psdir)/$$f"; \ + done + +dist-info: $(INFO_DEPS) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + list='$(INFO_DEPS)'; \ + for base in $$list; do \ + case $$base in \ + $(srcdir)/*) base=`echo "$$base" | sed "s|^$$srcdirstrip/||"`;; \ + esac; \ + if test -f $$base; then d=.; else d=$(srcdir); fi; \ + base_i=`echo "$$base" | sed 's|\.info$$||;s|$$|.i|'`; \ + for file in $$d/$$base $$d/$$base-[0-9] $$d/$$base-[0-9][0-9] $$d/$$base_i[0-9] $$d/$$base_i[0-9][0-9]; do \ + if test -f $$file; then \ + relfile=`expr "$$file" : "$$d/\(.*\)"`; \ + test -f "$(distdir)/$$relfile" || \ + cp -p $$file "$(distdir)/$$relfile"; \ + else :; fi; \ + done; \ + done + +mostlyclean-aminfo: + -rm -rf libffi.t2d libffi.t2p + +clean-aminfo: + -test -z "libffi.dvi libffi.pdf libffi.ps libffi.html" \ + || rm -rf libffi.dvi libffi.pdf libffi.ps libffi.html + +maintainer-clean-aminfo: + @list='$(INFO_DEPS)'; for i in $$list; do \ + i_i=`echo "$$i" | sed 's|\.info$$||;s|$$|.i|'`; \ + echo " rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]"; \ + rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]; \ + done +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-info +check-am: all-am +check: check-am +all-am: Makefile $(INFO_DEPS) +installdirs: + for dir in "$(DESTDIR)$(infodir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-aminfo clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: $(DVIS) + +html: html-am + +html-am: $(HTMLS) + +info: info-am + +info-am: $(INFO_DEPS) + +install-data-am: install-info-am + +install-dvi: install-dvi-am + +install-dvi-am: $(DVIS) + @$(NORMAL_INSTALL) + @list='$(DVIS)'; test -n "$(dvidir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(dvidir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(dvidir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(dvidir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(dvidir)" || exit $$?; \ + done +install-exec-am: + +install-html: install-html-am + +install-html-am: $(HTMLS) + @$(NORMAL_INSTALL) + @list='$(HTMLS)'; list2=; test -n "$(htmldir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(htmldir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p" || test -d "$$p"; then d=; else d="$(srcdir)/"; fi; \ + $(am__strip_dir) \ + d2=$$d$$p; \ + if test -d "$$d2"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)/$$f'"; \ + $(MKDIR_P) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \ + echo " $(INSTALL_DATA) '$$d2'/* '$(DESTDIR)$(htmldir)/$$f'"; \ + $(INSTALL_DATA) "$$d2"/* "$(DESTDIR)$(htmldir)/$$f" || exit $$?; \ + else \ + list2="$$list2 $$d2"; \ + fi; \ + done; \ + test -z "$$list2" || { echo "$$list2" | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(htmldir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(htmldir)" || exit $$?; \ + done; } +install-info: install-info-am + +install-info-am: $(INFO_DEPS) + @$(NORMAL_INSTALL) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(infodir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(infodir)" || exit 1; \ + fi; \ + for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + esac; \ + if test -f $$file; then d=.; else d=$(srcdir); fi; \ + file_i=`echo "$$file" | sed 's|\.info$$||;s|$$|.i|'`; \ + for ifile in $$d/$$file $$d/$$file-[0-9] $$d/$$file-[0-9][0-9] \ + $$d/$$file_i[0-9] $$d/$$file_i[0-9][0-9] ; do \ + if test -f $$ifile; then \ + echo "$$ifile"; \ + else : ; fi; \ + done; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(infodir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(infodir)" || exit $$?; done + @$(POST_INSTALL) + @if $(am__can_run_installinfo); then \ + list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + echo " install-info --info-dir='$(DESTDIR)$(infodir)' '$(DESTDIR)$(infodir)/$$relfile'";\ + install-info --info-dir="$(DESTDIR)$(infodir)" "$(DESTDIR)$(infodir)/$$relfile" || :;\ + done; \ + else : ; fi +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: $(PDFS) + @$(NORMAL_INSTALL) + @list='$(PDFS)'; test -n "$(pdfdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pdfdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pdfdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pdfdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pdfdir)" || exit $$?; done +install-ps: install-ps-am + +install-ps-am: $(PSS) + @$(NORMAL_INSTALL) + @list='$(PSS)'; test -n "$(psdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(psdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(psdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(psdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(psdir)" || exit $$?; done +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-aminfo \ + maintainer-clean-generic maintainer-clean-vti + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-aminfo mostlyclean-generic \ + mostlyclean-libtool mostlyclean-vti + +pdf: pdf-am + +pdf-am: $(PDFS) + +ps: ps-am + +ps-am: $(PSS) + +uninstall-am: uninstall-dvi-am uninstall-html-am uninstall-info-am \ + uninstall-pdf-am uninstall-ps-am + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-aminfo clean-generic \ + clean-libtool cscopelist-am ctags-am dist-info distclean \ + distclean-generic distclean-libtool distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-aminfo maintainer-clean-generic \ + maintainer-clean-vti mostlyclean mostlyclean-aminfo \ + mostlyclean-generic mostlyclean-libtool mostlyclean-vti pdf \ + pdf-am ps ps-am tags-am uninstall uninstall-am \ + uninstall-dvi-am uninstall-html-am uninstall-info-am \ + uninstall-pdf-am uninstall-ps-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h new file mode 100644 index 0000000..484f26d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/fficonfig.h @@ -0,0 +1,224 @@ +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "a" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#define FFI_EXEC_TRAMPOLINE_TABLE 1 + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +/* #undef FFI_MMAP_EXEC_WRIT */ + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +/* #undef HAVE_AS_CFI_PSEUDO_OP */ + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +/* #undef HAVE_LONG_DOUBLE */ + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the `memfd_create' function. */ +/* #undef HAVE_MEMFD_CREATE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +#define HAVE_MKOSTEMP 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if your compiler supports pointer authentication. */ +/* #undef HAVE_PTRAUTH */ + +/* Define if .eh_frame sections should be read-only. */ +#define HAVE_RO_EH_FRAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_MEMFD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/Makefile new file mode 100644 index 0000000..1559f7d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/Makefile @@ -0,0 +1,608 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# include/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = aarch64-apple-darwin21.3.0 +host_triplet = aarch64-apple-darwin21 +target_triplet = aarch64-apple-darwin21 +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = ffi.h +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" +HEADERS = $(nodist_include_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ffi.h.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = : +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = nm +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = AARCH64 +TARGETDIR = aarch64 +TARGET_OBJ = src/aarch64/ffi.lo src/aarch64/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = +ac_ct_CXX = g++ +ac_ct_DUMPBIN = link -dump +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = aarch64-apple-darwin21.3.0 +build_alias = +build_cpu = aarch64 +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = aarch64-apple-darwin21 +host_alias = aarch64-apple-darwin21 +host_cpu = aarch64 +host_os = darwin21 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = aarch64-apple-darwin21 +target_alias = aarch64-apple-darwin21 +target_cpu = aarch64 +target_os = darwin21 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +DISTCLEANFILES = ffitarget.h +noinst_HEADERS = ffi_common.h ffi_cfi.h +EXTRA_DIST = ffi.h.in +nodist_include_HEADERS = ffi.h ffitarget.h +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +ffi.h: $(top_builddir)/config.status $(srcdir)/ffi.h.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nodist_includeHEADERS: $(nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-nodist_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-nodist_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man \ + install-nodist_includeHEADERS install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ + uninstall-am uninstall-nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h new file mode 100644 index 0000000..ad7a2fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffi.h @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3 - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 0 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 0 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 1 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 0 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h new file mode 100644 index 0000000..d5622e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/include/ffitarget.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#elif defined(_WIN32) +#define FFI_SIZEOF_ARG 8 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_WIN64, + FFI_LAST_ABI, +#if defined(_WIN32) + FFI_DEFAULT_ABI = FFI_WIN64 +#else + FFI_DEFAULT_ABI = FFI_SYSV +#endif + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#ifdef _WIN32 +#define FFI_EXTRA_CIF_FIELDS unsigned is_variadic +#endif +#define FFI_TARGET_SPECIFIC_VARIADIC + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#elif !defined(_WIN32) +/* iOS and Windows reserve x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#ifndef _WIN32 +/* No complex type on Windows */ +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.la new file mode 100644 index 0000000..896b4cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.la @@ -0,0 +1,41 @@ +# libffi.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='libffi.8.dylib' + +# Names of this library. +library_names='libffi.8.dylib libffi.dylib' + +# The name of the static archive. +old_library='' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L/usr/local/opt/libffi/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libffi. +current=9 +age=1 +revision=0 + +# Is this an already installed library? +installed=no + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='/usr/local/lib' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.pc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.pc new file mode 100644 index 0000000..1f9d5ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi.pc @@ -0,0 +1,11 @@ +prefix=/usr/local +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +toolexeclibdir=${libdir} +includedir=${prefix}/include + +Name: libffi +Description: Library supporting Foreign Function Interfaces +Version: 3.3 +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la new file mode 100644 index 0000000..aeb5cf0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libffi_convenience.la @@ -0,0 +1,41 @@ +# libffi_convenience.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='' + +# Names of this library. +library_names='' + +# The name of the static archive. +old_library='libffi_convenience.a' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L/usr/local/opt/libffi/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libffi_convenience. +current= +age= +revision= + +# Is this an already installed library? +installed=no + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libtool b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libtool new file mode 100755 index 0000000..4bc852a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/libtool @@ -0,0 +1,11909 @@ +#! /bin/sh +# Generated automatically by config.status (libffi) 3.3 +# Libtool was configured on host Arthurs-MacBook-Air.local: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: ${LT_SYS_LIBRARY_PATH=""} + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=2.4.6 +macro_revision=2.4.6 + +# Whether or not to build shared libraries. +build_libtool_libs=yes + +# Whether or not to build static libraries. +build_old_libs=no + +# What type of objects to build. +pic_mode=yes + +# Whether or not to optimize for fast installation. +fast_install=needless + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec= + +# Shell to use when invoking shell scripts. +SHELL="/bin/sh" + +# An echo program that protects backslashes. +ECHO="printf %s\\n" + +# The PATH separator for the build system. +PATH_SEPARATOR=":" + +# The host system. +host_alias=aarch64-apple-darwin21 +host=aarch64-apple-darwin21 +host_os=darwin21 + +# The build system. +build_alias= +build=aarch64-apple-darwin21.3.0 +build_os=darwin21.3.0 + +# A sed program that does not truncate output. +SED="/usr/bin/sed" + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP="/usr/bin/grep" + +# An ERE matcher. +EGREP="/usr/bin/grep -E" + +# A literal string matcher. +FGREP="/usr/bin/grep -F" + +# A BSD- or MS-compatible name lister. +NM="nm" + +# Whether we need soft or hard links. +LN_S="ln -s" + +# What is the maximum length of a command? +max_cmd_len=786432 + +# Object file suffix (normally "o"). +objext=o + +# Executable file suffix (normally ""). +exeext= + +# whether the shell understands "unset". +lt_unset=unset + +# turn spaces into newlines. +SP2NL="tr \\040 \\012" + +# turn newlines into spaces. +NL2SP="tr \\015\\012 \\040\\040" + +# convert $build file names to $host format. +to_host_file_cmd=func_convert_file_noop + +# convert $build files to toolchain format. +to_tool_file_cmd=func_convert_file_noop + +# An object symbol dumper. +OBJDUMP="objdump" + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method="pass_all" + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd="\$MAGIC_CMD" + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob="" + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob="no" + +# DLL creation program. +DLLTOOL="false" + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd="printf %s\\n" + +# The archiver. +AR="ar" + +# Flags to create an archive. +AR_FLAGS="cru" + +# How to feed a file listing to the archiver. +archiver_list_spec="" + +# A symbol stripping program. +STRIP="strip" + +# Commands used to install an old-style archive. +RANLIB="ranlib" +old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$tool_oldlib" +old_postuninstall_cmds="" + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=yes + +# A C compiler. +LTCC="gcc -fdeclspec" + +# LTCC compiler flags. +LTCFLAGS=" -Wall -fexceptions" + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe="sed -n -e 's/^.*[ ]\\([BCDEGRST][BCDEGRST]*\\)[ ][ ]*_\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 _\\2 \\2/p' | sed '/ __gnu_lto/d'" + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl="sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/extern char \\1;/p'" + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import="" + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p'" + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \\(.*\\) .*\$/ {\"\\1\", (void *) 0},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(lib.*\\)\$/ {\"\\1\", (void *) \\&\\1},/p' -e 's/^[BCDEGRST][BCDEGRST]* .* \\(.*\\)\$/ {\"lib\\1\", (void *) \\&\\1},/p'" + +# The name lister interface. +nm_interface="BSD nm" + +# Specify filename containing input files for $NM. +nm_file_list_spec="@" + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot= + +# Command to truncate a binary pipe. +lt_truncate_bin="/bin/dd bs=4096 count=1" + +# The name of the directory that contains temporary libtool files. +objdir=.libs + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=file + +# Must we lock files when doing compilation? +need_locks="no" + +# Manifest tool. +MANIFEST_TOOL=":" + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL="dsymutil" + +# Tool to change global to local symbols on Mac OS X. +NMEDIT="nmedit" + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO="lipo" + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL="otool" + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=":" + +# Old archive suffix (normally "a"). +libext=a + +# Shared library suffix (normally ".so"). +shrext_cmds="\`test .\$module = .yes && echo .so || echo .dylib\`" + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds="" + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink="PATH DYLD_LIBRARY_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + +# Do we need the "lib" prefix for modules? +need_lib_prefix=no + +# Do we need a version for libraries? +need_version=no + +# Library versioning type. +version_type=darwin + +# Shared library runtime path variable. +runpath_var= + +# Shared library path variable. +shlibpath_var=DYLD_LIBRARY_PATH + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=yes + +# Format of library name prefix. +libname_spec="lib\$name" + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec="\$libname\$release\$major\$shared_ext \$libname\$shared_ext" + +# The coded name of the library, if different from the real name. +soname_spec="\$libname\$release\$major\$shared_ext" + +# Permission mode override for installation of shared libraries. +install_override_mode="" + +# Command to use after installation of a shared archive. +postinstall_cmds="" + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds="" + +# Commands used to finish a libtool library installation in a directory. +finish_cmds="" + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval="" + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=no + +# Compile-time system search path for libraries. +sys_lib_search_path_spec="/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/13.0.0 /usr/local/lib" + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec="/usr/local/lib /lib /usr/lib" + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path="" + +# Whether dlopen is supported. +dlopen_support=unknown + +# Whether dlopen of programs is supported. +dlopen_self=unknown + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=unknown + +# Commands to strip libraries. +old_striplib="strip -S" +striplib="strip -x" + + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="gcc -fdeclspec" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin -fno-rtti -fno-exceptions" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL CONFIG + + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +#! /bin/sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2014-01-03.01 + +# libtool (GNU libtool) 2.4.6 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.6 Debian-2.4.6-14" +package_revision=2.4.6 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2015-01-20.17; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# Copyright (C) 2004-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. + +# As a special exception to the GNU General Public License, if you distribute +# this file as part of a program or library that is built using GNU Libtool, +# you may include this file under the same distribution terms that you use +# for the rest of that program. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1+=\\ \$func_quote_for_eval_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1=\$$1\\ \$func_quote_for_eval_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_for_eval ARG... +# -------------------------- +# Aesthetically quote ARGs to be evaled later. +# This function returns two values: +# i) func_quote_for_eval_result +# double-quoted, suitable for a subsequent eval +# ii) func_quote_for_eval_unquoted_result +# has all characters that are still active within double +# quotes backslashified. +func_quote_for_eval () +{ + $debug_cmd + + func_quote_for_eval_unquoted_result= + func_quote_for_eval_result= + while test 0 -lt $#; do + case $1 in + *[\\\`\"\$]*) + _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; + *) + _G_unquoted_arg=$1 ;; + esac + if test -n "$func_quote_for_eval_unquoted_result"; then + func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" + else + func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" + fi + + case $_G_unquoted_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_quoted_arg=\"$_G_unquoted_arg\" + ;; + *) + _G_quoted_arg=$_G_unquoted_arg + ;; + esac + + if test -n "$func_quote_for_eval_result"; then + func_append func_quote_for_eval_result " $_G_quoted_arg" + else + func_append func_quote_for_eval_result "$_G_quoted_arg" + fi + shift + done +} + + +# func_quote_for_expand ARG +# ------------------------- +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + $debug_cmd + + case $1 in + *[\\\`\"]*) + _G_arg=`$ECHO "$1" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; + *) + _G_arg=$1 ;; + esac + + case $_G_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_arg=\"$_G_arg\" + ;; + esac + + func_quote_for_expand_result=$_G_arg +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_for_expand "$_G_cmd" + eval "func_notquiet $func_quote_for_expand_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_for_expand "$_G_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# Set a version string for this script. +scriptversion=2015-10-07.11; # UTC + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# Copyright (C) 2010-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# warranty; '. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# to the main code. A hook is just a named list of of function, that can +# be run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of functions called by FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It is assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + _G_rc_run_hooks=false + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook funcions.n" ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + if eval $_G_hook '"$@"'; then + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + _G_rc_run_hooks=: + fi + done + + $_G_rc_run_hooks && func_run_hooks_result=$_G_hook_result +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list in your hook function, you may remove/edit +# any options that you action, and then pass back the remaining unprocessed +# options in '_result', escaped suitably for +# 'eval'. In this case you also must return $EXIT_SUCCESS to let the +# hook's caller know that it should pay attention to +# '_result'. Returning $EXIT_FAILURE signalizes that +# arguments are left untouched by the hook and therefore caller will ignore the +# result variable. +# +# Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# # No change in '$@' (ignored completely by this hook). There is +# # no need to do the equivalent (but slower) action: +# # func_quote_for_eval ${1+"$@"} +# # my_options_prep_result=$func_quote_for_eval_result +# false +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# args_changed=false +# +# # Note that for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: +# args_changed=: +# ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# args_changed=: +# ;; +# *) # Make sure the first unrecognised option "$_G_opt" +# # is added back to "$@", we could need that later +# # if $args_changed is true. +# set dummy "$_G_opt" ${1+"$@"}; shift; break ;; +# esac +# done +# +# if $args_changed; then +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# fi +# +# $args_changed +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# +# false +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll also need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options_finish [ARG]... +# ---------------------------- +# Finishing the option parse loop (call 'func_options' hooks ATM). +func_options_finish () +{ + $debug_cmd + + _G_func_options_finish_exit=false + if func_run_hooks func_options ${1+"$@"}; then + func_options_finish_result=$func_run_hooks_result + _G_func_options_finish_exit=: + fi + + $_G_func_options_finish_exit +} + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + _G_rc_options=false + + for my_func in options_prep parse_options validate_options options_finish + do + if eval func_$my_func '${1+"$@"}'; then + eval _G_res_var='$'"func_${my_func}_result" + eval set dummy "$_G_res_var" ; shift + _G_rc_options=: + fi + done + + # Save modified positional parameters for caller. As a top-level + # options-parser function we always need to set the 'func_options_result' + # variable (regardless the $_G_rc_options value). + if $_G_rc_options; then + func_options_result=$_G_res_var + else + func_quote_for_eval ${1+"$@"} + func_options_result=$func_quote_for_eval_result + fi + + $_G_rc_options +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propagate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before +# returning $EXIT_SUCCESS (otherwise $EXIT_FAILURE is returned). +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + _G_rc_options_prep=false + if func_run_hooks func_options_prep ${1+"$@"}; then + _G_rc_options_prep=: + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result + fi + + $_G_rc_options_prep +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + func_parse_options_result= + + _G_rc_parse_options=false + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + if func_run_hooks func_parse_options ${1+"$@"}; then + eval set dummy "$func_run_hooks_result"; shift + _G_rc_parse_options=: + fi + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + _G_match_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + if test $# = 0 && func_missing_arg $_G_opt; then + _G_rc_parse_options=: + break + fi + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) _G_rc_parse_options=: ; break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift + _G_match_parse_options=false + break + ;; + esac + + $_G_match_parse_options && _G_rc_parse_options=: + done + + + if $_G_rc_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result + fi + + $_G_rc_parse_options +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + _G_rc_validate_options=false + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + if func_run_hooks func_validate_options ${1+"$@"}; then + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result + _G_rc_validate_options=: + fi + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE + + $_G_rc_validate_options +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables after +# splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + test "x$func_split_equals_lhs" = "x$1" \ + && func_split_equals_rhs= + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /(C)/!b go + :more + /\./!{ + N + s|\n# | | + b more + } + :go + /^# Written by /,/# warranty; / { + s|^# || + s|^# *$|| + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + p + } + /^# Written by / { + s|^# || + p + } + /^warranty; /q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.6' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname $scriptversion Debian-2.4.6-14 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func__fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + _G_rc_lt_options_prep=: + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + *) + _G_rc_lt_options_prep=false + ;; + esac + + if $_G_rc_lt_options_prep; then + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result + fi + + $_G_rc_lt_options_prep +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + _G_rc_lt_parse_options=false + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_match_lt_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"} ; shift + _G_match_lt_parse_options=false + break + ;; + esac + $_G_match_lt_parse_options && _G_rc_lt_parse_options=: + done + + if $_G_rc_lt_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result + fi + + $_G_rc_lt_parse_options +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote_for_eval ${1+"$@"} + libtool_validate_options_result=$func_quote_for_eval_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + # -fuse-ld=* Linker select flags for GCC + # -static-* direct GCC to link specific libraries statically + # -fcilkplus Cilk Plus language extension features for C/C++ + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*|-fuse-ld=*|-static-*|-fcilkplus) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type '$version_type'" + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD="ld" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LTCC \$LTCFLAGS -nostdlib \$wl-r -o \$output\$reload_objs" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$tool_oldlib" + +# A language specific compiler. +CC="g++" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin" + +# Additional compiler flags for building library objects. +pic_flag=" -fno-common -DPIC" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Compiler flag to prevent dynamic linking. +link_static_flag="" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\`for conv in \$convenience\\\"\\\"; do test -n \\\"\$conv\\\" && new_convenience=\\\"\$new_convenience \$wl-force_load,\$conv\\\"; done; func_echo_all \\\"\$new_convenience\\\"\`" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module" +archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring \$single_module \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags" +module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags \$wl-exported_symbols_list,\$output_objdir/\$libname-symbols.expsym" + +# Whether we are building with GNU ld or not. +with_gnu_ld="no" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="\$wl-flat_namespace \$wl-undefined \${wl}suppress" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting $shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=yes + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=yes + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Commands necessary for finishing linking programs. +postlink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs="" + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects="" +postdep_objects="" +predeps="" +postdeps="" + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path="" + +# ### END LIBTOOL TAG CONFIG: CXX diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/local.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/local.exp new file mode 100644 index 0000000..2af97f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/local.exp @@ -0,0 +1,2 @@ +set CC_FOR_TARGET "gcc -fdeclspec" +set CXX_FOR_TARGET "g++" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/man/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/man/Makefile new file mode 100644 index 0000000..fca1178 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/man/Makefile @@ -0,0 +1,562 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# man/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = aarch64-apple-darwin21.3.0 +host_triplet = aarch64-apple-darwin21 +target_triplet = aarch64-apple-darwin21 +subdir = man +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man3dir = $(mandir)/man3 +am__installdirs = "$(DESTDIR)$(man3dir)" +NROFF = nroff +MANS = $(man_MANS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = : +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = nm +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = AARCH64 +TARGETDIR = aarch64 +TARGET_OBJ = src/aarch64/ffi.lo src/aarch64/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/man +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = +ac_ct_CXX = g++ +ac_ct_DUMPBIN = link -dump +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = aarch64-apple-darwin21.3.0 +build_alias = +build_cpu = aarch64 +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = aarch64-apple-darwin21 +host_alias = aarch64-apple-darwin21 +host_cpu = aarch64 +host_os = darwin21 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = aarch64-apple-darwin21 +target_alias = aarch64-apple-darwin21 +target_cpu = aarch64 +target_os = darwin21 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign man/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign man/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-man3: $(man_MANS) + @$(NORMAL_INSTALL) + @list1=''; \ + list2='$(man_MANS)'; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list=''; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ + sed -n '/\.3[a-z]*$$/p'; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(MANS) +installdirs: + for dir in "$(DESTDIR)$(man3dir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-man + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man3 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-man + +uninstall-man: uninstall-man3 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-man3 install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags-am uninstall uninstall-am uninstall-man \ + uninstall-man3 + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.deps/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/closures.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/closures.o new file mode 100644 index 0000000..1670e76 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/closures.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/java_raw_api.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/java_raw_api.o new file mode 100644 index 0000000..1dddb12 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/java_raw_api.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/prep_cif.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/prep_cif.o new file mode 100644 index 0000000..00229e1 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/prep_cif.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/raw_api.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/raw_api.o new file mode 100644 index 0000000..09676fb Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/raw_api.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/types.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/types.o new file mode 100644 index 0000000..76522d3 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/.libs/types.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.deps/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.deps/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.dirstamp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.dirstamp new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/ffi.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/ffi.o new file mode 100644 index 0000000..60cf237 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/ffi.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/sysv.o b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/sysv.o new file mode 100644 index 0000000..9c02cec Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/.libs/sysv.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/ffi.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/ffi.lo new file mode 100644 index 0000000..ffe2cab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/ffi.lo @@ -0,0 +1,12 @@ +# src/aarch64/ffi.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/ffi.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/sysv.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/sysv.lo new file mode 100644 index 0000000..879d6e2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/aarch64/sysv.lo @@ -0,0 +1,12 @@ +# src/aarch64/sysv.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/sysv.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo new file mode 100644 index 0000000..b0b3791 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/closures.lo @@ -0,0 +1,12 @@ +# src/closures.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/closures.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo new file mode 100644 index 0000000..2e586c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/java_raw_api.lo @@ -0,0 +1,12 @@ +# src/java_raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/java_raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo new file mode 100644 index 0000000..63b28af --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/prep_cif.lo @@ -0,0 +1,12 @@ +# src/prep_cif.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/prep_cif.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo new file mode 100644 index 0000000..87ad90e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/raw_api.lo @@ -0,0 +1,12 @@ +# src/raw_api.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/raw_api.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/types.lo b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/types.lo new file mode 100644 index 0000000..a250ea4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/src/types.lo @@ -0,0 +1,12 @@ +# src/types.lo - a libtool object file +# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-14 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# Name of the PIC object. +pic_object='.libs/types.o' + +# Name of the non-PIC object +non_pic_object=none + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 new file mode 100644 index 0000000..e43c201 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/stamp-h1 @@ -0,0 +1 @@ +timestamp for fficonfig.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile new file mode 100644 index 0000000..5ab3b62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/testsuite/Makefile @@ -0,0 +1,648 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# testsuite/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + +VPATH = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/libffi +pkgincludedir = $(includedir)/libffi +pkglibdir = $(libdir)/libffi +pkglibexecdir = $(libexecdir)/libffi +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = aarch64-apple-darwin21.3.0 +host_triplet = aarch64-apple-darwin21 +target_triplet = aarch64-apple-darwin21 +subdir = testsuite +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \ + $(top_srcdir)/m4/ax_append_flag.m4 \ + $(top_srcdir)/m4/ax_cc_maxopt.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all.m4 \ + $(top_srcdir)/m4/ax_check_compile_flag.m4 \ + $(top_srcdir)/m4/ax_compiler_vendor.m4 \ + $(top_srcdir)/m4/ax_configure_args.m4 \ + $(top_srcdir)/m4/ax_enable_builddir.m4 \ + $(top_srcdir)/m4/ax_gcc_archflag.m4 \ + $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \ + $(top_srcdir)/m4/ax_require_defined.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/fficonfig.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_$(V)) +am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DEJATOOL = $(PACKAGE) +RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir +EXPECT = expect +RUNTEST = runtest +am__DIST_COMMON = $(srcdir)/Makefile.in +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing aclocal-1.16 +ALLOCA = +AMTAR = $${TAR-tar} +AM_DEFAULT_VERBOSITY = 1 +AM_LTLDFLAGS = +AM_RUNTESTFLAGS = +AR = ar +AUTOCONF = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoconf +AUTOHEADER = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing autoheader +AUTOMAKE = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing automake-1.16 +AWK = awk +CC = gcc -fdeclspec +CCAS = gcc -fdeclspec +CCASDEPMODE = depmode=none +CCASFLAGS = +CCDEPMODE = depmode=none +CFLAGS = -Wall -fexceptions +CPP = gcc -fdeclspec -E +CPPFLAGS = -DRUBY_EXTCONF_H=\"extconf.h\" -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=none +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DLLTOOL = false +DSYMUTIL = dsymutil +DUMPBIN = : +ECHO_C = \c +ECHO_N = +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +FFI_EXEC_TRAMPOLINE_TABLE = 1 +FGREP = /usr/bin/grep -F +GREP = /usr/bin/grep +HAVE_LONG_DOUBLE = 0 +HAVE_LONG_DOUBLE_VARIANT = 0 +INSTALL = /opt/homebrew/bin/ginstall -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +LD = ld +LDFLAGS = -L/usr/local/opt/libffi/lib +LIBOBJS = +LIBS = +LIBTOOL = $(SHELL) $(top_builddir)/libtool +LIPO = lipo +LN_S = ln -s +LTLIBOBJS = +LT_SYS_LIBRARY_PATH = +MAINT = # +MAKEINFO = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/missing makeinfo +MANIFEST_TOOL = : +MKDIR_P = /opt/homebrew/bin/gmkdir -p +NM = nm +NMEDIT = nmedit +OBJDUMP = objdump +OBJEXT = o +OPT_LDFLAGS = +OTOOL = otool +OTOOL64 = : +PACKAGE = libffi +PACKAGE_BUGREPORT = http://github.com/libffi/libffi/issues +PACKAGE_NAME = libffi +PACKAGE_STRING = libffi 3.3 +PACKAGE_TARNAME = libffi +PACKAGE_URL = +PACKAGE_VERSION = 3.3 +PATH_SEPARATOR = : +PRTDIAG = +RANLIB = ranlib +SECTION_LDFLAGS = +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +STRIP = strip +TARGET = AARCH64 +TARGETDIR = aarch64 +TARGET_OBJ = src/aarch64/ffi.lo src/aarch64/sysv.lo +VERSION = 3.3 +abs_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21/testsuite +abs_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite +abs_top_builddir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi-arm64-darwin21 +abs_top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +ac_ct_AR = ar +ac_ct_CC = +ac_ct_CXX = g++ +ac_ct_DUMPBIN = link -dump +am__include = include +am__leading_dot = . +am__quote = +am__tar = $${TAR-tar} chof - "$$tardir" +am__untar = $${TAR-tar} xf - +ax_enable_builddir_sed = sed +bindir = ${exec_prefix}/bin +build = aarch64-apple-darwin21.3.0 +build_alias = +build_cpu = aarch64 +build_os = darwin21.3.0 +build_vendor = apple +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} +dvidir = ${docdir} +exec_prefix = ${prefix} +host = aarch64-apple-darwin21 +host_alias = aarch64-apple-darwin21 +host_cpu = aarch64 +host_os = darwin21 +host_vendor = apple +htmldir = ${docdir} +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = ${SHELL} /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mkdir_p = $(MKDIR_P) +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /usr/local +program_transform_name = s,x,x, +psdir = ${docdir} +runstatedir = ${localstatedir}/run +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite +sys_symbol_underscore = yes +sysconfdir = ${prefix}/etc +target = aarch64-apple-darwin21 +target_alias = aarch64-apple-darwin21 +target_cpu = aarch64 +target_os = darwin21 +target_vendor = apple +toolexecdir = ${libdir}/gcc-lib/$(target_alias) +toolexeclibdir = ${libdir} +top_build_prefix = ../ +top_builddir = .. +top_srcdir = /Users/arthur/Developer/zephyr/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi +AUTOMAKE_OPTIONS = foreign dejagnu +EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp +CLEANFILES = *.exe core* *.log *.sum +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign testsuite/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: # $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): # $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +check-DEJAGNU: site.exp + srcdir='$(srcdir)'; export srcdir; \ + EXPECT=$(EXPECT); export EXPECT; \ + if $(SHELL) -c "$(RUNTEST) --version" > /dev/null 2>&1; then \ + exit_status=0; l='$(DEJATOOL)'; for tool in $$l; do \ + if $(RUNTEST) $(RUNTESTDEFAULTFLAGS) $(AM_RUNTESTFLAGS) $(RUNTESTFLAGS); \ + then :; else exit_status=1; fi; \ + done; \ + else echo "WARNING: could not find '$(RUNTEST)'" 1>&2; :;\ + fi; \ + exit $$exit_status +site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG) + @echo 'Making a new site.exp file ...' + @echo '## these variables are automatically generated by make ##' >site.tmp + @echo '# Do not edit here. If you wish to override these values' >>site.tmp + @echo '# edit the last section' >>site.tmp + @echo 'set srcdir "$(srcdir)"' >>site.tmp + @echo "set objdir `pwd`" >>site.tmp + @echo 'set build_alias "$(build_alias)"' >>site.tmp + @echo 'set build_triplet $(build_triplet)' >>site.tmp + @echo 'set host_alias "$(host_alias)"' >>site.tmp + @echo 'set host_triplet $(host_triplet)' >>site.tmp + @echo 'set target_alias "$(target_alias)"' >>site.tmp + @echo 'set target_triplet $(target_triplet)' >>site.tmp + @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \ + echo "## Begin content included from file $$f. Do not modify. ##" \ + && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \ + && echo "## End content included from file $$f. ##" \ + || exit 1; \ + done >> site.tmp + @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp + @if test -f site.exp; then \ + sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \ + fi + @-rm -f site.bak + @test ! -f site.exp || mv site.exp site.bak + @mv site.tmp site.exp + +distclean-DEJAGNU: + -rm -f site.exp site.bak + -l='$(DEJATOOL)'; for tool in $$l; do \ + rm -f $$tool.sum $$tool.log; \ + done + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-DEJAGNU distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am distclean \ + distclean-DEJAGNU distclean-generic distclean-libtool distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.bsd.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.bsd.mk new file mode 100644 index 0000000..ab66256 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.bsd.mk @@ -0,0 +1,40 @@ +# -*- makefile -*- +# +# Makefile for BSD systems +# + +LOCAL_LIBS += ${LIBFFI} -lpthread + +LIBFFI_CFLAGS = ${FFI_MMAP_EXEC} -pthread +LIBFFI_BUILD_DIR = ${.CURDIR}/libffi-${arch} +INCFLAGS := -I${LIBFFI_BUILD_DIR}/include -I${INCFLAGS} + +.if ${srcdir} == "." + LIBFFI_SRC_DIR := ${.CURDIR}/libffi +.else + LIBFFI_SRC_DIR := ${srcdir}/libffi +.endif + + +LIBFFI = ${LIBFFI_BUILD_DIR}/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = ${LIBFFI_SRC_DIR}/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): ${LIBFFI} + +$(LIBFFI): + @mkdir -p ${LIBFFI_BUILD_DIR} + @if [ ! -f $(LIBFFI_SRC_DIR)/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f ${LIBFFI_BUILD_DIR}/Makefile ]; then \ + echo "Configuring libffi"; \ + cd ${LIBFFI_BUILD_DIR} && \ + /usr/bin/env CC="${CC}" LD="${LD}" CFLAGS="${LIBFFI_CFLAGS}" GREP_OPTIONS="" \ + /bin/sh ${LIBFFI_CONFIGURE} ${LIBFFI_HOST} > /dev/null; \ + fi + @cd ${LIBFFI_BUILD_DIR} && ${MAKE} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.darwin.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.darwin.mk new file mode 100644 index 0000000..893a8e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.darwin.mk @@ -0,0 +1,105 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +CCACHE := $(shell type -p ccache) +BUILD_DIR := $(shell pwd) + +INCFLAGS += -I"$(BUILD_DIR)" + +# Work out which arches we need to compile the lib for +ARCHES := +ARCHFLAGS ?= $(filter -arch %, $(CFLAGS)) + +ifneq ($(findstring -arch ppc,$(ARCHFLAGS)),) + ARCHES += ppc +endif + +ifneq ($(findstring -arch i386,$(ARCHFLAGS)),) + ARCHES += i386 +endif + +ifneq ($(findstring -arch x86_64,$(ARCHFLAGS)),) + ARCHES += x86_64 +endif + +ifeq ($(strip $(ARCHES)),) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +# Just build the one (default) architecture +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$(@D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + /usr/bin/env CC="$(CC)" LD="$(LD)" CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + /bin/sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + cd "$(LIBFFI_BUILD_DIR)" && $(MAKE) + +else +LIBTARGETS = $(foreach arch,$(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + +# Build a fat binary and assemble +build_ffi = \ + mkdir -p "$(BUILD_DIR)"/libffi-$(1); \ + (if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi); \ + (if [ ! -f "$(BUILD_DIR)"/libffi-$(1)/Makefile ]; then \ + echo "Configuring libffi for $(1)"; \ + cd "$(BUILD_DIR)"/libffi-$(1) && \ + env CC="$(CCACHE) $(CC)" CFLAGS="-arch $(1) $(LIBFFI_CFLAGS)" LDFLAGS="-arch $(1)" \ + $(LIBFFI_CONFIGURE) --host=$(1)-apple-darwin > /dev/null; \ + fi); \ + $(MAKE) -C "$(BUILD_DIR)"/libffi-$(1) + +target_ffi = "$(BUILD_DIR)"/libffi-$(1)/.libs/libffi_convenience.a:; $(call build_ffi,$(1)) + +# Work out which arches we need to compile the lib for +ifneq ($(findstring ppc,$(ARCHES)),) + $(call target_ffi,ppc) +endif + +ifneq ($(findstring i386,$(ARCHES)),) + $(call target_ffi,i386) +endif + +ifneq ($(findstring x86_64,$(ARCHES)),) + $(call target_ffi,x86_64) +endif + + +$(LIBFFI): $(LIBTARGETS) + # Assemble into a FAT (x86_64, i386, ppc) library + @mkdir -p "$(@D)" + /usr/bin/libtool -static -o $@ \ + $(foreach arch, $(ARCHES),"$(BUILD_DIR)"/libffi-$(arch)/.libs/libffi_convenience.a) + @mkdir -p "$(LIBFFI_BUILD_DIR)"/include + $(RM) "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffi.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffi.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffi.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffi.h + @( \ + printf "#if defined(__i386__)\n"; \ + printf "#include \"libffi-i386/include/ffitarget.h\"\n"; \ + printf "#elif defined(__x86_64__)\n"; \ + printf "#include \"libffi-x86_64/include/ffitarget.h\"\n";\ + printf "#elif defined(__ppc__)\n"; \ + printf "#include \"libffi-ppc/include/ffitarget.h\"\n";\ + printf "#endif\n";\ + ) > "$(LIBFFI_BUILD_DIR)"/include/ffitarget.h + +endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.gnu.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.gnu.mk new file mode 100644 index 0000000..473b8fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.gnu.mk @@ -0,0 +1,32 @@ +# -*- makefile -*- +# +# Common definitions for all systems that use GNU make +# + + +# Tack the extra deps onto the autogenerated variables +LOCAL_LIBS += $(LIBFFI) +BUILD_DIR = $(shell pwd) +LIBFFI_CFLAGS = $(FFI_MMAP_EXEC) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi-$(arch) +INCFLAGS := -I"$(LIBFFI_BUILD_DIR)"/include $(INCFLAGS) + +ifeq ($(srcdir),.) + LIBFFI_SRC_DIR := $(shell pwd)/libffi +else ifeq ($(srcdir),..) + LIBFFI_SRC_DIR := $(shell pwd)/../libffi +else + LIBFFI_SRC_DIR := $(realpath $(srcdir)/libffi) +endif + +LIBFFI = "$(LIBFFI_BUILD_DIR)"/.libs/libffi_convenience.a +LIBFFI_AUTOGEN = ${LIBFFI_SRC_DIR}/autogen.sh +LIBFFI_CONFIGURE = "$(LIBFFI_SRC_DIR)"/configure --disable-static \ + --with-pic=yes --disable-dependency-tracking --disable-docs + +$(OBJS): $(LIBFFI) + +# +# libffi.mk or libffi.darwin.mk contains rules for building the actual library +# + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.mk new file mode 100644 index 0000000..3b58227 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.mk @@ -0,0 +1,18 @@ +# -*- makefile -*- + +include ${srcdir}/libffi.gnu.mk + +$(LIBFFI): + @mkdir -p "$(LIBFFI_BUILD_DIR)" "$@(D)" + @if [ ! -f "$(LIBFFI_SRC_DIR)"/configure ]; then \ + echo "Running autoreconf for libffi"; \ + cd "$(LIBFFI_SRC_DIR)" && \ + /bin/sh $(LIBFFI_AUTOGEN) > /dev/null; \ + fi + @if [ ! -f "$(LIBFFI_BUILD_DIR)"/Makefile ]; then \ + echo "Configuring libffi"; \ + cd "$(LIBFFI_BUILD_DIR)" && \ + env CFLAGS="$(LIBFFI_CFLAGS)" GREP_OPTIONS="" \ + sh $(LIBFFI_CONFIGURE) $(LIBFFI_HOST) > /dev/null; \ + fi + $(MAKE) -C "$(LIBFFI_BUILD_DIR)" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc.mk new file mode 100644 index 0000000..8cd4603 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc64.mk b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc64.mk new file mode 100644 index 0000000..6f3dbbc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi.vc64.mk @@ -0,0 +1,26 @@ +# -*- makefile -*- +# +# Makefile for msvc +# + +# Tack the extra deps onto the autogenerated variables +INCFLAGS = -I$(LIBFFI_BUILD_DIR)/include -I$(LIBFFI_BUILD_DIR)/src/x86 $(INCFLAGS) +LOCAL_LIBS = $(LOCAL_LIBS) $(LIBFFI) +BUILD_DIR = $(MAKEDIR) +LIBFFI_BUILD_DIR = $(BUILD_DIR)/libffi + +!IF "$(srcdir)" == "." +LIBFFI_SRC_DIR = $(MAKEDIR)/libffi +!ELSE +LIBFFI_SRC_DIR = $(srcdir)/libffi +!ENDIF + +LIBFFI = $(LIBFFI_BUILD_DIR)/.libs/libffi_convenience.lib + +$(OBJS): $(LIBFFI) + +$(LIBFFI): + @$(MAKEDIRS) $(LIBFFI_BUILD_DIR) + @cd $(LIBFFI_BUILD_DIR) && $(MAKE) -f Makefile.vc64 + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.appveyor.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.appveyor.yml new file mode 100644 index 0000000..ece8a94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.appveyor.yml @@ -0,0 +1,66 @@ +shallow_clone: true + +# We're currently only testing libffi built with Microsoft's +# tools. +# This matrix should be expanded to include at least: +# 32- and 64-bit gcc/cygwin +# 32- and 64-bit gcc/mingw +# 32- and 64-bit clang/mingw +# and perhaps more. + +image: Visual Studio 2017 +platform: + - x64 + - x86 + - arm + - arm64 + +environment: + global: + CYG_ROOT: C:/cygwin + CYG_CACHE: C:/cygwin/var/cache/setup + CYG_MIRROR: http://mirrors.kernel.org/sourceware/cygwin/ + matrix: + - VSVER: 15 + +install: + - ps: >- + If ($env:Platform -Match "x86") { + $env:VCVARS_PLATFORM="x86" + $env:BUILD="i686-pc-cygwin" + $env:HOST="i686-pc-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh" + $env:SRC_ARCHITECTURE="x86" + } ElseIf ($env:Platform -Match "arm64") { + $env:VCVARS_PLATFORM="x86_arm64" + $env:BUILD="i686-pc-cygwin" + $env:HOST="aarch64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm64" + $env:SRC_ARCHITECTURE="aarch64" + } ElseIf ($env:Platform -Match "arm") { + $env:VCVARS_PLATFORM="x86_arm" + $env:BUILD="i686-pc-cygwin" + $env:HOST="arm-w32-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -marm" + $env:SRC_ARCHITECTURE="arm" + } Else { + $env:VCVARS_PLATFORM="amd64" + $env:BUILD="x86_64-w64-cygwin" + $env:HOST="x86_64-w64-cygwin" + $env:MSVCC="/cygdrive/c/projects/libffi/msvcc.sh -m64" + $env:SRC_ARCHITECTURE="x86" + } + - 'appveyor DownloadFile https://cygwin.com/setup-x86.exe -FileName setup.exe' + - 'setup.exe -qnNdO -R "%CYG_ROOT%" -s "%CYG_MIRROR%" -l "%CYG_CACHE%" -P dejagnu >NUL' + - '%CYG_ROOT%/bin/bash -lc "cygcheck -dc cygwin"' + - echo call VsDevCmd to set VS150COMNTOOLS + - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + - ps: $env:VSCOMNTOOLS=(Get-Content ("env:VS" + "$env:VSVER" + "0COMNTOOLS")) + - echo "Using Visual Studio %VSVER%.0 at %VSCOMNTOOLS%" + - call "%VSCOMNTOOLS%..\..\vc\Auxiliary\Build\vcvarsall.bat" %VCVARS_PLATFORM% + +build_script: + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./autogen.sh;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; ./configure CC='%MSVCC%' CXX='%MSVCC%' LD='link' CPP='cl -nologo -EP' CXXCPP='cl -nologo -EP' CPPFLAGS='-DFFI_BUILDING_DLL' AR='/cygdrive/c/projects/libffi/.travis/ar-lib lib' NM='dumpbin -symbols' STRIP=':' --build=$BUILD --host=$HOST;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp src/%SRC_ARCHITECTURE%/ffitarget.h include; make; find .;)" + - c:\cygwin\bin\sh -lc "(cd $OLDPWD; cp `find . -name 'libffi-?.dll'` $HOST/testsuite/; make check; cat `find ./ -name libffi.log`)" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitattributes b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitattributes new file mode 100644 index 0000000..f7d3833 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +*.sln text eol=crlf +*.vcxproj* text eol=crlf diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.github/issue_template.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.github/issue_template.md new file mode 100644 index 0000000..e197e2c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.github/issue_template.md @@ -0,0 +1,10 @@ +## System Details + + + + +## Problems Description + + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitignore b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitignore new file mode 100644 index 0000000..5d39689 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.gitignore @@ -0,0 +1,38 @@ +.libs +.deps +*.o +*.lo +.dirstamp +*.la +Makefile +!testsuite/libffi.bhaible/Makefile +Makefile.in +aclocal.m4 +compile +!.travis/compile +configure +depcomp +doc/libffi.info +*~ +fficonfig.h.in +fficonfig.h +include/ffi.h +include/ffitarget.h +install-sh +libffi.pc +libtool +libtool-ldflags +ltmain.sh +m4/libtool.m4 +m4/lt*.m4 +mdate-sh +missing +stamp-h1 +libffi*gz +autom4te.cache +libffi.xcodeproj/xcuserdata +libffi.xcodeproj/project.xcworkspace +build_*/ +darwin_*/ +src/arm/trampoline.S +**/texinfo.tex diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis.yml new file mode 100644 index 0000000..8db2ddf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis.yml @@ -0,0 +1,83 @@ +--- +sudo: required + +language: cpp + +# For qemu-powered targets, get the list of supported processors from +# travis by setting QEMU_CPU=help, then set -mcpu= for the compilers +# accordingly. + +matrix: + include: + - os: linux + env: HOST=powerpc-eabisim RUNTESTFLAGS="--target_board powerpc-eabisim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=or1k-elf RUNTESTFLAGS="--target_board or1k-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=m32r-elf RUNTESTFLAGS="--target_board m32r-sim" DEJAGNU="/opt/.travis/site.exp" + - os: linux + env: HOST=bfin-elf RUNTESTFLAGS="--target_board bfin-sim" DEJAGNU="/opt/.travis/site.exp" +# This configuration is still using the native x86 toolchain? +# - os: osx +# env: HOST=aarch64-apple-darwin13 + - os: osx + env: HOST=x86_64-apple-darwin10 + - os: linux + env: HOST=x86_64-w64-mingw32 MEVAL='export CC="x86_64-w64-mingw32-gcc" && CXX="x86_64-w64-mingw32-g++" RUNTESTFLAGS="--target_board wine-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" CONFIGURE_OPTIONS=--disable-shared LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=sh4-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/sh4-linux-gnu + - os: linux + env: HOST=alpha-linux-gnu CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/alpha-linux-gnu + - os: linux + env: HOST=m68k-linux-gnu MEVAL='export CC="m68k-linux-gnu-gcc-8 -mcpu=547x" && CXX="m68k-linux-gnu-g++-8 -mcpu=547x"' CONFIGURE_OPTIONS=--disable-shared QEMU_LD_PREFIX=/usr/m68k-linux-gnu QEMU_CPU=cfv4e + - os: linux + arch: s390x + env: HOST=s390x-linux-gnu + - os: linux + arch: ppc64le + env: HOST=ppc64le-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + - os: linux + arch: arm64 + env: HOST=aarch64-linux-gnu + compiler: clang + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O0" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2" + - os: linux + env: HOST=arm32v7-linux-gnu LIBFFI_TEST_OPTIMIZATION="-O2 -fomit-frame-pointer" +# The sparc64 linux system in the GCC compile farm is non-responsive. +# - os: linux +# env: HOST=sparc64-linux-gnu +# The mips64 linux system in the GCC compile farm is not allowing logins +# - os: linux +# env: HOST=mips64el-linux-gnu + - os: linux + compiler: gcc + env: HOST=i386-pc-linux-gnu MEVAL='export CC="$CC -m32" && CXX="$CXX -m32"' + - os: linux + compiler: gcc + - os: linux + compiler: gcc + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + compiler: clang + - os: linux + compiler: clang + env: CONFIGURE_OPTIONS=--disable-shared + - os: linux + env: HOST=moxie-elf MEVAL='export PATH=/opt/moxielogic/bin:$PATH && CC=moxie-elf-gcc && CXX=moxie-elf-g++' LDFLAGS=-Tsim.ld RUNTESTFLAGS="--target_board moxie-sim" DEJAGNU="$TRAVIS_BUILD_DIR/.travis/site.exp" + +before_install: + - if test x"$MEVAL" != x; then eval ${MEVAL}; fi + +install: + - travis_wait 30 ./.travis/install.sh + +script: + - if ! test x"$MEVAL" = x; then eval ${MEVAL}; fi + - travis_wait 115 sleep infinity & + - ./.travis/build.sh diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/ar-lib b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/ar-lib new file mode 100755 index 0000000..0baa4f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/ar-lib @@ -0,0 +1,270 @@ +#! /bin/sh +# Wrapper for Microsoft lib.exe + +me=ar-lib +scriptversion=2012-03-01.08; # UTC + +# Copyright (C) 2010-2018 Free Software Foundation, Inc. +# Written by Peter Rosin . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + + +# func_error message +func_error () +{ + echo "$me: $1" 1>&2 + exit 1 +} + +file_conv= + +# func_file_conv build_file +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv in + mingw) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_at_file at_file operation archive +# Iterate over all members in AT_FILE performing OPERATION on ARCHIVE +# for each of them. +# When interpreting the content of the @FILE, do NOT use func_file_conv, +# since the user would need to supply preconverted file names to +# binutils ar, at least for MinGW. +func_at_file () +{ + operation=$2 + archive=$3 + at_file_contents=`cat "$1"` + eval set x "$at_file_contents" + shift + + for member + do + $AR -NOLOGO $operation:"$member" "$archive" || exit $? + done +} + +case $1 in + '') + func_error "no command. Try '$0 --help' for more information." + ;; + -h | --h*) + cat < libffi.log + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git libffi.log + exit $? +} + +function build_linux() +{ + ./autogen.sh + ./configure ${HOST+--host=$HOST} ${CONFIGURE_OPTIONS} || cat */config.log + make + make dist + make check RUNTESTFLAGS="-a $RUNTESTFLAGS" + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_foreign_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" $2 bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross_linux() +{ + ${DOCKER} run --rm -t -i -v $(pwd):/opt ${SET_QEMU_CPU} -e HOST="${HOST}" -e CC="${HOST}-gcc-8 ${GCC_OPTIONS}" -e CXX="${HOST}-g++-8 ${GCC_OPTIONS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" moxielogic/cross-ci-build-container:latest bash -c /opt/.travis/build-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_cross() +{ + ${DOCKER} pull quay.io/moxielogic/libffi-ci-${HOST} + ${DOCKER} run --rm -t -i -v $(pwd):/opt -e HOST="${HOST}" -e CC="${HOST}-gcc ${GCC_OPTIONS}" -e CXX="${HOST}-g++ ${GCC_OPTIONS}" -e TRAVIS_BUILD_DIR=/opt -e DEJAGNU="${DEJAGNU}" -e RUNTESTFLAGS="${RUNTESTFLAGS}" -e LIBFFI_TEST_OPTIMIZATION="${LIBFFI_TEST_OPTIMIZATION}" quay.io/moxielogic/libffi-ci-${HOST} bash -c /opt/.travis/build-cross-in-container.sh + + ./rlgl l --key=${RLGL_KEY} https://rl.gl + ID=$(./rlgl start) + ./rlgl e --id=$ID --policy=https://github.com/libffi/rlgl-policy.git */testsuite/libffi.log + exit $? +} + +function build_ios() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-ios + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-iOS" -configuration Release -sdk iphoneos11.4 + exit $? +} + +function build_macosx() +{ + which python +# export PYTHON_BIN=/usr/local/bin/python + ./generate-darwin-source-and-headers.py --only-osx + xcodebuild -showsdks + xcodebuild -project libffi.xcodeproj -target "libffi-Mac" -configuration Release -sdk macosx10.13 + echo "Finished build" + exit $? +} + +case "$HOST" in + arm-apple-darwin*) + ./autogen.sh + build_ios + ;; + x86_64-apple-darwin*) + ./autogen.sh + build_macosx + ;; + arm32v7-linux-gnu) + ./autogen.sh + build_foreign_linux arm moxielogic/arm32v7-ci-build-container:latest + ;; + mips64el-linux-gnu | sparc64-linux-gnu) + build_cfarm + ;; + bfin-elf ) + ./autogen.sh + GCC_OPTIONS=-msim build_cross + ;; + m32r-elf ) + ./autogen.sh + build_cross + ;; + or1k-elf ) + ./autogen.sh + build_cross + ;; + powerpc-eabisim ) + ./autogen.sh + build_cross + ;; + m68k-linux-gnu ) + ./autogen.sh + GCC_OPTIONS=-mcpu=547x build_cross_linux + ;; + alpha-linux-gnu | sh4-linux-gnu ) + ./autogen.sh + build_cross_linux + ;; + *) + ./autogen.sh + build_linux + ;; +esac diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/compile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/compile new file mode 100755 index 0000000..655932a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/compile @@ -0,0 +1,351 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-27.18; # UTC + +# Copyright (C) 1999-2018 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -warn) + eat=1 + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/install.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/install.sh new file mode 100755 index 0000000..2420245 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/install.sh @@ -0,0 +1,71 @@ +#!/bin/bash +set -x + +if [[ $TRAVIS_OS_NAME != 'linux' ]]; then + brew update > brew-update.log 2>&1 + # fix an issue with libtool on travis by reinstalling it + brew uninstall libtool; + brew install libtool dejagnu; + + # Download and extract the rlgl client + wget -qO - https://rl.gl/cli/rlgl-darwin-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + +else + # Download and extract the rlgl client + case $HOST in + aarch64-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-arm.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + ppc64le-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-ppc64le.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + s390x-linux-gnu) + wget -qO - https://rl.gl/cli/rlgl-linux-s390x.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + *) + wget -qO - https://rl.gl/cli/rlgl-linux-amd64.tgz | \ + tar --strip-components=2 -xvzf - ./rlgl/rlgl; + ;; + esac + + sudo apt-get clean # clear the cache + sudo apt-get update + case $HOST in + mips64el-linux-gnu | sparc64-linux-gnu) + ;; + alpha-linux-gnu | arm32v7-linux-gnu | m68k-linux-gnu | sh4-linux-gnu) + sudo apt-get install qemu-user-static + ;; + hppa-linux-gnu ) + sudo apt-get install -y qemu-user-static g++-5-hppa-linux-gnu + ;; + i386-pc-linux-gnu) + sudo apt-get install gcc-multilib g++-multilib; + ;; + moxie-elf) + echo 'deb https://repos.moxielogic.org:7114/MoxieLogic moxiedev main' | sudo tee -a /etc/apt/sources.list + sudo apt-get clean # clear the cache + sudo apt-get update ## -qq + sudo apt-get update + sudo apt-get install -y --allow-unauthenticated moxielogic-moxie-elf-gcc moxielogic-moxie-elf-gcc-c++ moxielogic-moxie-elf-gcc-libstdc++ moxielogic-moxie-elf-gdb-sim + ;; + x86_64-w64-mingw32) + sudo apt-get install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 wine; + ;; + i686-w32-mingw32) + sudo apt-get install gcc-mingw-w64-i686 g++-mingw-w64-i686 wine; + ;; + esac + case $HOST in + arm32v7-linux-gnu) + # don't install host tools + ;; + *) + sudo apt-get install dejagnu texinfo sharutils + ;; + esac +fi diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/m32r-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/m32r-sim.exp new file mode 100644 index 0000000..c18123f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/m32r-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, MA 02110, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {m32r-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "m32r" is the name of the sim subdir in devo/sim. +setup_sim m32r + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/moxie-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/moxie-sim.exp new file mode 100644 index 0000000..32979ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/moxie-sim.exp @@ -0,0 +1,60 @@ +# Copyright (C) 2010 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, MA 02110, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {moxie-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "moxie" is the name of the sim subdir in devo/sim. +setup_sim moxie + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" +# No linker script needed. +set_board_info ldscript "-Tsim.ld" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/or1k-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/or1k-sim.exp new file mode 100644 index 0000000..3920413 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/or1k-sim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, MA 02110, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {or1k-elf} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "or1k" is the name of the sim subdir in devo/sim. +setup_sim or1k + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp new file mode 100644 index 0000000..285fd4f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/powerpc-eabisim.exp @@ -0,0 +1,58 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, MA 02110, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {powerpc-eabisim} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +# basic-sim.exp is a basic description for the standard Cygnus simulator. +load_base_board_description "basic-sim" + +# "powerpc" is the name of the sim subdir in devo/sim. +setup_sim powerpc + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/site.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/site.exp new file mode 100644 index 0000000..644ec63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/site.exp @@ -0,0 +1,27 @@ +# Copyright (C) 2008, 2010, 2018, 2019 Anthony Green + +# Make sure we look in the right place for the board description files. +if ![info exists boards_dir] { + set boards_dir {} +} + +lappend boards_dir $::env(TRAVIS_BUILD_DIR)/.travis + +verbose "Global Config File: target_triplet is $target_triplet" 2 +global target_list + +case "$target_triplet" in { + { "bfin-elf" } { + set target_list "bfin-sim" + } + { "m32r-elf" } { + set target_list "m32r-sim" + } + { "moxie-elf" } { + set target_list "moxie-sim" + } + { "or1k-elf" } { + set target_list "or1k-sim" + } +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/wine-sim.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/wine-sim.exp new file mode 100644 index 0000000..1ad6038 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/.travis/wine-sim.exp @@ -0,0 +1,55 @@ +# Copyright (C) 2010, 2019 Free Software Foundation, Inc. +# +# This file is part of DejaGnu. +# +# DejaGnu is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# DejaGnu is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with DejaGnu; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, MA 02110, USA. + +# This is a list of toolchains that are supported on this board. +set_board_info target_install {i686-w64-mingw32} + +# Load the generic configuration for this board. This will define a basic set +# of routines needed by the tool to communicate with the board. +load_generic_config "sim" + +set_board_info sim "wineconsole --backend=curses" +set_board_info is_simulator 1 + +# No multilib options needed by default. +process_multilib_options "" + +# We only support newlib on this target. We assume that all multilib +# options have been specified before we get here. + +set_board_info compiler "[find_gcc]" +set_board_info cflags "[libgloss_include_flags] [newlib_include_flags]" +set_board_info ldflags "[libgloss_link_flags] [newlib_link_flags]" + +# Configuration settings for testsuites +set_board_info noargs 1 +set_board_info gdb,nosignals 1 +set_board_info gdb,noresults 1 +set_board_info gdb,cannot_call_functions 1 +set_board_info gdb,skip_float_tests 1 +set_board_info gdb,can_reverse 1 +set_board_info gdb,use_precord 1 + +# More time is needed +set_board_info gcc,timeout 800 +set_board_info gdb,timeout 60 + +# Used by a few gcc.c-torture testcases to delimit how large the stack can +# be. +set_board_info gcc,stack_size 5000 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/ChangeLog.old b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/ChangeLog.old new file mode 100644 index 0000000..8de1ca7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/ChangeLog.old @@ -0,0 +1,7407 @@ +Libffi change logs used to be maintained in separate ChangeLog files. +These days we generate them directly from the git commit messages. +The old ChangeLog files are saved here in order to maintain the historical +record. + +============================================================================= +From the old ChangeLog.libffi-3.1 file... + +2014-03-16 Josh Triplett + + * ChangeLog: Archive to ChangeLog.libffi-3.1 and delete. Future + changelogs will come from git, with autogenerated snapshots shipped in + distributed tarballs. + +2014-03-16 Josh Triplett + + Add support for stdcall, thiscall, and fastcall on non-Windows + x86-32. + + Linux supports the stdcall calling convention, either via + functions explicitly declared with the stdcall attribute, or via + code compiled with -mrtd which effectively makes stdcall the + default. + + This introduces FFI_STDCALL, FFI_THISCALL, and FFI_FASTCALL on + non-Windows x86-32 platforms, as non-default calling conventions. + + * Makefile.am: Compile in src/x86/win32.S on non-Windows x86-32. + * src/x86/ffitarget.h: Add FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32. Increase trampoline size to + accomodate these calling conventions, and unify some ifdeffery. + * src/x86/ffi.c: Add support for FFI_STDCALL, FFI_THISCALL, and + FFI_FASTCALL on non-Windows x86-32 platforms; update ifdeffery. + * src/x86/win32.S: Support compiling on non-Windows x86-32 + platforms. On those platforms, avoid redefining the SYSV symbols + already provided by src/x86/sysv.S. + * testsuite/libffi.call/closure_stdcall.c: Run on non-Windows. + #define __stdcall if needed. + * testsuite/libffi.call/closure_thiscall.c: Run on non-Windows. + #define __fastcall if needed. + * testsuite/libffi.call/fastthis1_win32.c: Run on non-Windows. + * testsuite/libffi.call/fastthis2_win32.c: Ditto. + * testsuite/libffi.call/fastthis3_win32.c: Ditto. + * testsuite/libffi.call/many2_win32.c: Ditto. + * testsuite/libffi.call/many_win32.c: Ditto. + * testsuite/libffi.call/strlen2_win32.c: Ditto. + * testsuite/libffi.call/strlen_win32.c: Ditto. + * testsuite/libffi.call/struct1_win32.c: Ditto. + * testsuite/libffi.call/struct2_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * prep_cif.c: Remove unnecessary ifdef for X86_WIN32. + ffi_prep_cif_core had a special case for X86_WIN32, checking for + FFI_THISCALL in addition to the FFI_FIRST_ABI-to-FFI_LAST_ABI + range before returning FFI_BAD_ABI. However, on X86_WIN32, + FFI_THISCALL already falls in that range, making the special case + unnecessary. Remove it. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/closure_thiscall.c: Remove fragile stack + pointer checks. These files included inline assembly to save the + stack pointer before and after the call, and compare the values. + However, compilers can and do leave the stack in different states + for these two pieces of inline assembly, such as by saving a + temporary value on the stack across the call; observed with gcc + -Os, and verified as spurious through careful inspection of + disassembly. + +2014-03-16 Josh Triplett + + * testsuite/libffi.call/many.c: Avoid spurious failure due to + excess floating-point precision. + * testsuite/libffi.call/many_win32.c: Ditto. + +2014-03-16 Josh Triplett + + * libtool-ldflags: Re-add. + +2014-03-16 Josh Triplett + + * Makefile.in, aclocal.m4, compile, config.guess, config.sub, + configure, depcomp, include/Makefile.in, install-sh, + libtool-ldflags, ltmain.sh, m4/libtool.m4, m4/ltoptions.m4, + m4/ltsugar.m4, m4/ltversion.m4, m4/lt~obsolete.m4, + man/Makefile.in, mdate-sh, missing, testsuite/Makefile.in: Delete + autogenerated files from version control. + * .gitignore: Add autogenerated files. + * autogen.sh: New script to generate the autogenerated files. + * README: Document requirement to run autogen.sh when building + directly from version control. + * .travis.yml: Run autogen.sh + +2014-03-14 Anthony Green + + * configure, Makefile.in: Rebuilt. + +2014-03-10 Mike Hommey + + * configure.ac: Allow building for mipsel with Android NDK r8. + * Makefile.am (AM_MAKEFLAGS): Replace double quotes with single + quotes. + +2014-03-10 Landry Breuil + + * configure.ac: Ensure the linker supports @unwind sections in libffi. + +2014-03-01 Anthony Green + + * Makefile.am (EXTRA_DIST): Replace old scripts with + generate-darwin-source-and-headers.py. + * Makefile.in: Rebuilt. + +2014-02-28 Anthony Green + + * Makefile.am (AM_CFLAGS): Reintroduce missing -DFFI_DEBUG for + --enable-debug builds. + * Makefile.in: Rebuilt. + +2014-02-28 Makoto Kato + + * src/closures.c: Fix build failure when using clang for Android. + +2014-02-28 Marcin Wojdyr + + * libffi.pc.in (toolexeclibdir): use -L${toolexeclibdir} instead + of -L${libdir}. + +2014-02-28 Paulo Pizarro + + * src/bfin/sysv.S: Calling functions in shared libraries requires + considering the GOT. + +2014-02-28 Josh Triplett + + * src/x86/ffi64.c (classify_argument): Handle case where + FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE. + +2014-02-28 Anthony Green + + * ltmain.sh: Generate with libtool-2.4.2.418. + * m4/libtool.m4, m4/ltoptions.m4, m4/ltversion.m4: Ditto. + * configure: Rebuilt. + +2014-02-28 Dominik Vogt + + * configure.ac (AC_ARG_ENABLE struct): Fix typo in help + message. + (AC_ARG_ENABLE raw_api): Ditto. + * configure, fficonfig.h.in: Rebuilt. + +2014-02-28 Will Newton + + * src/arm/sysv.S: Initialize IP register with FP. + +2014-02-28 Yufeng Zhang + + * src/aarch64/sysv.S (ffi_closure_SYSV): Use x29 as the + main CFA reg; update cfi_rel_offset. + +2014-02-15 Marcus Comstedt + + * src/powerpc/ffi_linux64.c, src/powerpc/linux64_closure.S: Remove + assumption on contents of r11 in closure. + +2014-02-09 Heiher + + * src/mips/n32.S: Fix call floating point va function. + +2014-01-21 Zachary Waldowski + + * src/aarch64/ffi.c: Fix missing semicolons on assertions under + debug mode. + +2013-12-30 Zachary Waldowski + + * .gitignore: Exclude darwin_* generated source and build_* trees. + * src/aarch64/ffi.c, src/arm/ffi.c, src/x86/ffi.c: Inhibit Clang + previous prototype warnings. + * src/arm/ffi.c: Prevent NULL dereference, fix short type warning + * src/dlmalloc.c: Fix warnings from set_segment_flags return type, + and the native use of size_t for malloc on platforms + * src/arm/sysv.S: Use unified syntax. Clang clean-ups for + ARM_FUNC_START. + * generate-osx-source-and-headers.py: Remove. + * build-ios.sh: Remove. + * libffi.xcodeproj/project.pbxproj: Rebuild targets. Include + x86_64+aarch64 pieces in library. Export headers properly. + * src/x86/ffi64.c: More Clang warning clean-ups. + * src/closures.c (open_temp_exec_file_dir): Use size_t. + * src/prep_cif.c (ffi_prep_cif_core): Cast ALIGN result. + * src/aarch64/sysv.S: Use CNAME for global symbols. Only use + .size for ELF targets. + * src/aarch64/ffi.c: Clean up for double == long double. Clean up + from Clang warnings. Use Clang cache invalidation builtin. Use + size_t in place of unsigned in many places. Accommodate for + differences in Apple AArch64 ABI. + +2013-12-02 Daniel Rodríguez Troitiño + + * generate-darwin-source-and-headers.py: Clean up, modernize, + merged version of previous scripts. + +2013-11-21 Anthony Green + + * configure, Makefile.in, include/Makefile.in, include/ffi.h.in, + man/Makefile.in, testsuite/Makefile.in, fficonfig.h.in: Rebuilt. + +2013-11-21 Alan Modra + + * Makefile.am (EXTRA_DIST): Add new src/powerpc files. + (nodist_libffi_la_SOURCES ): Likewise. + * configure.ac (HAVE_LONG_DOUBLE_VARIANT): Define for powerpc. + * include/ffi.h.in (ffi_prep_types): Declare. + * src/prep_cif.c (ffi_prep_cif_core): Call ffi_prep_types. + * src/types.c (FFI_NONCONST_TYPEDEF): Define and use for + HAVE_LONG_DOUBLE_VARIANT. + * src/powerpc/ffi_powerpc.h: New file. + * src/powerpc/ffi.c: Split into.. + * src/powerpc/ffi_sysv.c: ..new file, and.. + * src/powerpc/ffi_linux64.c: ..new file, rewriting parts. + * src/powerpc/ffitarget.h (enum ffi_abi): Rewrite powerpc ABI + selection as bits controlling features. + * src/powerpc/linux64.S: For consistency, use POWERPC64 rather + than __powerpc64__. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. Move .note.FNU-stack + inside guard. + * src/powerpc/sysv.S: Likewise. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + +2013-11-20 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use + NUM_FPR_ARG_REGISTERS64 and NUM_GPR_ARG_REGISTERS64 not their + 32-bit versions for 64-bit code. + * src/powerpc/linux64_closure.S: Don't use the return value area + as a parameter save area on ELFv2. + +2013-11-18 Iain Sandoe + + * src/powerpc/darwin.S (EH): Correct use of pcrel FDE encoding. + * src/powerpc/darwin_closure.S (EH): Likewise. Modernise picbase + labels. + +2013-11-18 Anthony Green + + * src/arm/ffi.c (ffi_call): Hoist declaration of temp to top of + function. + * src/arm/ffi.c (ffi_closure_inner): Moderize function declaration + to appease compiler. + Thanks for Gregory P. Smith . + +2013-11-18 Anthony Green + + * README (tested): Mention PowerPC ELFv2. + +2013-11-16 Alan Modra + + * src/powerpc/ppc_closure.S: Move errant #endif to where it belongs. + Don't bl .Luint128. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep_core): Use #if _CALL_ELF + test to select parameter save sizing for ELFv2 vs. ELFv1. + * src/powerpc/ffitarget.h (FFI_V2_TYPE_FLOAT_HOMOG, + FFI_V2_TYPE_DOUBLE_HOMOG, FFI_V2_TYPE_SMALL_STRUCT): Define. + (FFI_TRAMPOLINE_SIZE): Define variant for ELFv2. + * src/powerpc/ffi.c (FLAG_ARG_NEEDS_PSAVE): Define. + (discover_homogeneous_aggregate): New function. + (ffi_prep_args64): Adjust start of param save area for ELFv2. + Handle homogenous floating point struct parms. + (ffi_prep_cif_machdep_core): Adjust space calculation for ELFv2. + Handle ELFv2 return values. Set FLAG_ARG_NEEDS_PSAVE. Handle + homogenous floating point structs. + (ffi_call): Increase size of smst_buffer for ELFv2. Handle ELFv2. + (flush_icache): Compile for ELFv2. + (ffi_prep_closure_loc): Set up ELFv2 trampoline. + (ffi_closure_helper_LINUX64): Don't return all structs directly + to caller. Handle homogenous floating point structs. Handle + ELFv2 struct return values. + * src/powerpc/linux64.S (ffi_call_LINUX64): Set up r2 for + ELFv2. Adjust toc save location. Call function pointer using + r12. Handle FLAG_RETURNS_SMST. Don't predict branches. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Set up r2 + for ELFv2. Define ELFv2 versions of STACKFRAME, PARMSAVE, and + RETVAL. Handle possibly missing parameter save area. Handle + ELFv2 return values. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Revert 2013-02-08 + change. Do not consume an int arg when returning a small struct + for FFI_SYSV ABI. + (ffi_call): Only use bounce buffer when FLAG_RETURNS_SMST. + Properly copy bounce buffer to destination. + * src/powerpc/sysv.S: Revert 2013-02-08 change. + * src/powerpc/ppc_closure.S: Remove stray '+'. + +2013-11-16 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Align struct parameters + according to __STRUCT_PARM_ALIGN__. + (ffi_prep_cif_machdep_core): Likewise. + (ffi_closure_helper_LINUX64): Likewise. + +2013-11-16 Alan Modra + + * src/powerpc/linux64.S (ffi_call_LINUX64): Tweak restore of r28. + (.note.GNU-stack): Move inside outer #ifdef. + * src/powerpc/linux64_closure.S (STACKFRAME, PARMSAVE, + RETVAL): Define and use throughout. + (ffi_closure_LINUX64): Save fprs before buying stack. + (.note.GNU-stack): Move inside outer #ifdef. + +2013-11-16 Alan Modra + + * src/powerpc/ffitarget.h (FFI_TARGET_SPECIFIC_VARIADIC): Define. + (FFI_EXTRA_CIF_FIELDS): Define. + * src/powerpc/ffi.c (ffi_prep_args64): Save fprs as per the + ABI, not to both fpr and param save area. + (ffi_prep_cif_machdep_core): Renamed from ffi_prep_cif_machdep. + Keep initial flags. Formatting. Remove dead FFI_LINUX_SOFT_FLOAT + code. + (ffi_prep_cif_machdep, ffi_prep_cif_machdep_var): New functions. + (ffi_closure_helper_LINUX64): Pass floating point as per ABI, + not to both fpr and parameter save areas. + + * libffi/testsuite/libffi.call/cls_double_va.c (main): Correct + function cast and don't call ffi_prep_cif. + * libffi/testsuite/libffi.call/cls_longdouble_va.c (main): Likewise. + +2013-11-15 Andrew Haley + + * doc/libffi.texi (Closure Example): Fix the sample code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-15 Andrew Haley + + * testsuite/libffi.call/va_struct1.c (main): Fix broken test. + * testsuite/libffi.call/cls_uint_va.c (cls_ret_T_fn): Likewise + * testsuite/libffi.call/cls_struct_va1.c (test_fn): Likewise. + * testsuite/libffi.call/va_1.c (main): Likewise. + +2013-11-14 David Schneider + + * src/arm/ffi.c: Fix register allocation for mixed float and + doubles. + * testsuite/libffi.call/cls_many_mixed_float_double.c: Testcase + for many mixed float and double arguments. + +2013-11-13 Alan Modra + + * doc/libffi.texi (Simple Example): Correct example code. + * doc/libffi.info, doc/stamp-vti, doc/version.texi: Rebuilt. + +2013-11-13 Anthony Green + + * include/ffi_common.h: Respect HAVE_ALLOCA_H for GNU compiler + based build. (Thanks to tmr111116 on github) + +2013-11-09 Anthony Green + + * m4/libtool.m4: Refresh. + * configure, Makefile.in: Rebuilt. + * README: Add more notes about next release. + +2013-11-09 Shigeharu TAKENO + + * m4/ax_gcc_archflag.m4 (ax_gcc_arch): Don't recognize + UltraSPARC-IIi as ultrasparc3. + +2013-11-06 Mark Kettenis + + * src/x86/freebsd.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2013-11-06 Konstantin Belousov + + * src/x86/freebsd.S (ffi_closure_raw_SYSV): Mark the assembler + source as not requiring executable stack. + +2013-11-02 Anthony Green + + * doc/libffi.texi (The Basics): Clarify return value buffer size + requirements. Also, NULL result buffer pointers are no longer + supported. + * doc/libffi.info: Rebuilt. + +2013-11-02 Mischa Jonker + + * Makefile.am (nodist_libffi_la_SOURCES): Fix build error. + * Makefile.in: Rebuilt. + +2013-11-02 David Schneider + + * src/arm/ffi.c: more robust argument handling for closures on arm hardfloat + * testsuite/libffi.call/many_mixed.c: New file. + * testsuite/libffi.call/cls_many_mixed_args.c: More tests. + +2013-11-02 Vitaly Budovski + + * src/x86/ffi.c (ffi_prep_cif_machdep): Don't align stack for win32. + +2013-10-23 Mark H Weaver + + * src/mips/ffi.c: Fix handling of uint32_t arguments on the + MIPS N32 ABI. + +2013-10-13 Sandra Loosemore + + * README: Add Nios II to table of supported platforms. + * Makefile.am (EXTRA_DIST): Add nios2 files. + (nodist_libffi_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + * configure.ac (nios2*-linux*): New host. + (NIOS2): Add AM_CONDITIONAL. + * configure: Regenerated. + * src/nios2/ffi.c: New. + * src/nios2/ffitarget.h: New. + * src/nios2/sysv.S: New. + * src/prep_cif.c (initialize_aggregate): Handle extra structure + alignment via FFI_AGGREGATE_ALIGNMENT. + (ffi_prep_cif_core): Conditionalize structure return for NIOS2. + +2013-10-10 Sandra Loosemore + + * testsuite/libffi.call/cls_many_mixed_args.c (cls_ret_double_fn): + Fix uninitialized variable. + +2013-10-11 Marcus Shawcroft + + * testsuite/libffi.call/many.c (many): Replace * with +. + +2013-10-08 Ondřej Bílka + + * src/aarch64/ffi.c, src/aarch64/sysv.S, src/arm/ffi.c, + src/arm/gentramp.sh, src/bfin/sysv.S, src/closures.c, + src/dlmalloc.c, src/ia64/ffi.c, src/microblaze/ffi.c, + src/microblaze/sysv.S, src/powerpc/darwin_closure.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/sh/ffi.c, + src/tile/tile.S, testsuite/libffi.call/nested_struct11.c: Fix + spelling errors. + +2013-10-08 Anthony Green + + * aclocal.m4, compile, config.guess, config.sub, depcomp, + install-sh, mdate-sh, missing, texinfo.tex: Update from upstream. + * configure.ac: Update version to 3.0.14-rc0. + * Makefile.in, configure, Makefile.in, include/Makefile.in, + man/Makefile.in, testsuite/Makefile.in: Rebuilt. + * README: Mention M88K and VAX. + +2013-07-15 Miod Vallat + + * Makefile.am, + configure.ac, + src/m88k/ffi.c, + src/m88k/ffitarget.h, + src/m88k/obsd.S, + src/vax/elfbsd.S, + src/vax/ffi.c, + src/vax/ffitarget.h: Add m88k and vax support. + +2013-06-24 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Move var declaration + before statements. + (ffi_prep_args64): Support little-endian. + (ffi_closure_helper_SYSV, ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Likewise. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Likewise. + +2013-06-12 Mischa Jonker + + * configure.ac: Add support for ARC. + * Makefile.am: Likewise. + * README: Add ARC details. + * src/arc/arcompact.S: New. + * src/arc/ffi.c: Likewise. + * src/arc/ffitarget.h: Likewise. + +2013-03-28 David Schneider + + * src/arm/ffi.c: Fix support for ARM hard-float calling convention. + * src/arm/sysv.S: call different methods for SYSV and VFP ABIs. + * testsuite/libffi.call/cls_many_mixed_args.c: testcase for a closure with + mixed arguments, many doubles. + * testsuite/libffi.call/many_double.c: testcase for calling a function using + more than 8 doubles. + * testcase/libffi.call/many.c: use absolute value to check result against an + epsilon + +2013-03-17 Anthony Green + + * README: Update for 3.0.13. + * configure.ac: Ditto. + * configure: Rebuilt. + * doc/*: Update version. + +2013-03-17 Dave Korn + + * src/closures.c (is_emutramp_enabled + [!FFI_MMAP_EXEC_EMUTRAMP_PAX]): Move default definition outside + enclosing #if scope. + +2013-03-17 Anthony Green + + * configure.ac: Only modify toolexecdir in certain cases. + * configure: Rebuilt. + +2013-03-16 Gilles Talis + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Don't use + fparg_count,etc on __NO_FPRS__ targets. + +2013-03-16 Alan Hourihane + + * src/m68k/sysv.S (epilogue): Don't use extb instruction on + m680000 machines. + +2013-03-16 Alex Gaynor + + * src/x86/ffi.c (ffi_prep_cif_machdep): Always align stack. + +2013-03-13 Markos Chandras + + * configure.ac: Add support for Imagination Technologies Meta. + * Makefile.am: Likewise. + * README: Add Imagination Technologies Meta details. + * src/metag/ffi.c: New. + * src/metag/ffitarget.h: Likewise. + * src/metag/sysv.S: Likewise. + +2013-02-24 Andreas Schwab + + * doc/libffi.texi (Structures): Fix missing category argument of + @deftp. + +2013-02-11 Anthony Green + + * configure.ac: Update release number to 3.0.12. + * configure: Rebuilt. + * README: Update release info. + +2013-02-10 Anthony Green + + * README: Add Moxie. + * src/moxie/ffi.c: Created. + * src/moxie/eabi.S: Created. + * src/moxie/ffitarget.h: Created. + * Makefile.am (nodist_libffi_la_SOURCES): Add Moxie. + * Makefile.in: Rebuilt. + * configure.ac: Add Moxie. + * configure: Rebuilt. + * testsuite/libffi.call/huge_struct.c: Disable format string + warnings for moxie*-*-elf tests. + +2013-02-10 Anthony Green + + * Makefile.am (LTLDFLAGS): Fix reference. + * Makefile.in: Rebuilt. + +2013-02-10 Anthony Green + + * README: Update supported platforms. Update test results link. + +2013-02-09 Anthony Green + + * testsuite/libffi.call/negint.c: Remove forced -O2. + * testsuite/libffi.call/many2.c (foo): Remove GCCism. + * testsuite/libffi.call/ffitest.h: Add default PRIuPTR definition. + + * src/sparc/v8.S (ffi_closure_v8): Import ancient ulonglong + closure return type fix developed by Martin v. Löwis for cpython + fork. + +2013-02-08 Andreas Tobler + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix small struct + support. + * src/powerpc/sysv.S: Ditto. + +2013-02-08 Anthony Green + + * testsuite/libffi.call/cls_longdouble.c: Remove xfail for + arm*-*-*. + +2013-02-08 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Fix cache flushing for GCC. + +2013-02-08 Matthias Klose + + * man/ffi_prep_cif.3: Clean up for debian linter. + +2013-02-08 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Account for FP args pushed + on the stack. + +2013-02-08 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am (EXTRA_DIST): Ditto. + * Makefile.in: Rebuilt. + +2013-02-08 Anthony Green + + * configure.ac: Move sparc asm config checks to within functions + for compatibility with sun tools. + * configure: Rebuilt. + * src/sparc/ffi.c (ffi_prep_closure_loc): Flush cache on v9 + systems. + * src/sparc/v8.S (ffi_flush_icache): Implement a sparc v9 cache + flusher. + +2013-02-08 Nathan Rossi + + * src/microblaze/ffi.c (ffi_closure_call_SYSV): Fix handling of + small big-endian structures. + (ffi_prep_args): Ditto. + +2013-02-07 Anthony Green + + * src/sparc/v8.S (ffi_call_v8): Fix typo from last patch + (effectively hiding ffi_call_v8). + +2013-02-07 Anthony Green + + * configure.ac: Update bug reporting address. + * configure.in: Rebuild. + + * src/sparc/v8.S (ffi_flush_icache): Out-of-line cache flusher for + Sun compiler. + * src/sparc/ffi.c (ffi_call): Remove warning. + Call ffi_flush_icache for non-GCC builds. + (ffi_prep_closure_loc): Use ffi_flush_icache. + + * Makefile.am (EXTRA_DIST): Add libtool-ldflags. + * Makefile.in: Rebuilt. + * libtool-ldflags: New file. + +2013-02-07 Daniel Schepler + + * configure.ac: Correctly identify x32 systems as 64-bit. + * m4/libtool.m4: Remove libtool expr error. + * aclocal.m4, configure: Rebuilt. + +2013-02-07 Anthony Green + + * configure.ac: Fix GCC usage test. + * configure: Rebuilt. + * README: Mention LLVM/GCC x86_64 issue. + * testsuite/Makefile.in: Rebuilt. + +2013-02-07 Anthony Green + + * testsuite/libffi.call/cls_double_va.c (main): Replace // style + comments with /* */ for xlc compiler. + * testsuite/libffi.call/stret_large.c (main): Ditto. + * testsuite/libffi.call/stret_large2.c (main): Ditto. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/huge_struct.c (main): Ditto. + * testsuite/libffi.call/float_va.c (main): Ditto. + * testsuite/libffi.call/cls_struct_va1.c (main): Ditto. + * testsuite/libffi.call/cls_pointer_stack.c (main): Ditto. + * testsuite/libffi.call/cls_pointer.c (main): Ditto. + * testsuite/libffi.call/cls_longdouble_va.c (main): Ditto. + +2013-02-06 Anthony Green + + * man/ffi_prep_cif.3: Clean up for debian lintian checker. + +2013-02-06 Anthony Green + + * Makefile.am (pkgconfigdir): Add missing pkgconfig install bits. + * Makefile.in: Rebuild. + +2013-02-02 Mark H Weaver + + * src/x86/ffi64.c (ffi_call): Sign-extend integer arguments passed + via general purpose registers. + +2013-01-21 Nathan Rossi + + * README: Add MicroBlaze details. + * Makefile.am: Add MicroBlaze support. + * configure.ac: Likewise. + * src/microblaze/ffi.c: New. + * src/microblaze/ffitarget.h: Likewise. + * src/microblaze/sysv.S: Likewise. + +2013-01-21 Nathan Rossi + * testsuite/libffi.call/return_uc.c: Fixed issue. + +2013-01-21 Chris Zankel + + * README: Add Xtensa support. + * Makefile.am: Likewise. + * configure.ac: Likewise. + * Makefile.in Regenerate. + * configure: Likewise. + * src/prep_cif.c: Handle Xtensa. + * src/xtensa: New directory. + * src/xtensa/ffi.c: New file. + * src/xtensa/ffitarget.h: Ditto. + * src/xtensa/sysv.S: Ditto. + +2013-01-11 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Replace // style + comments with /* */ for xlc compiler. + * src/powerpc/aix.S (ffi_call_AIX): Ditto. + * testsuite/libffi.call/ffitest.h (allocate_mmap): Delete + deprecated inline function. + * testsuite/libffi.special/ffitestcxx.h: Ditto. + * README: Add update for AIX support. + +2013-01-11 Anthony Green + + * configure.ac: Robustify pc relative reloc check. + * m4/ax_cc_maxopt.m4: Don't -malign-double. This is an ABI + changing option for 32-bit x86. + * aclocal.m4, configure: Rebuilt. + * README: Update supported target list. + +2013-01-10 Anthony Green + + * README (tested): Add Compiler column to table. + +2013-01-10 Anthony Green + + * src/x86/ffi64.c (struct register_args): Make sse array and array + of unions for sunpro compiler compatibility. + +2013-01-10 Anthony Green + + * configure.ac: Test target platform size_t size. Handle both 32 + and 64-bit builds for x86_64-* and i?86-* targets (allowing for + CFLAG option to change default settings). + * configure, aclocal.m4: Rebuilt. + +2013-01-10 Anthony Green + + * testsuite/libffi.special/special.exp: Only run exception + handling tests when using GNU compiler. + + * m4/ax_compiler_vendor.m4: New file. + * configure.ac: Test for compiler vendor and don't use + AX_CFLAGS_WARN_ALL with the sun compiler. + * aclocal.m4, configure: Rebuilt. + +2013-01-10 Anthony Green + + * include/ffi_common.h: Don't use GCCisms to define types when + building with the SUNPRO compiler. + +2013-01-10 Anthony Green + + * configure.ac: Put local.exp in the right place. + * configure: Rebuilt. + + * src/x86/ffi.c: Update comment about regparm function attributes. + * src/x86/sysv.S (ffi_closure_SYSV): The SUNPRO compiler requires + that all function arguments be passed on the stack (no regparm + support). + +2013-01-08 Anthony Green + + * configure.ac: Generate local.exp. This sets CC_FOR_TARGET + when we are using the vendor compiler. + * testsuite/Makefile.am (EXTRA_DEJAGNU_SITE_CONFIG): Point to + ../local.exp. + * configure, testsuite/Makefile.in: Rebuilt. + + * testsuite/libffi.call/call.exp: Run tests with different + options, depending on whether or not we are using gcc or the + vendor compiler. + * testsuite/lib/libffi.exp (libffi-init): Set using_gcc based on + whether or not we are building/testing with gcc. + +2013-01-08 Anthony Green + + * configure.ac: Switch x86 solaris target to X86 by default. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * configure.ac: Fix test for read-only eh_frame. + * configure: Rebuilt. + +2013-01-08 Anthony Green + + * src/x86/sysv.S, src/x86/unix64.S: Only emit DWARF unwind info + when building with the GNU toolchain. + * testsuite/libffi.call/ffitest.h (CHECK): Fix for Solaris vendor + compiler. + +2013-01-07 Thorsten Glaser + + * testsuite/libffi.call/cls_uchar_va.c, + testsuite/libffi.call/cls_ushort_va.c, + testsuite/libffi.call/va_1.c: Testsuite fixes. + +2013-01-07 Thorsten Glaser + + * src/m68k/ffi.c (CIF_FLAGS_SINT8, CIF_FLAGS_SINT16): Define. + (ffi_prep_cif_machdep): Fix 8-bit and 16-bit signed calls. + * src/m68k/sysv.S (ffi_call_SYSV, ffi_closure_SYSV): Ditto. + +2013-01-04 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't automatically add -fexceptions + and -Wall. This is set in the configure script after testing for + GCC. + * Makefile.in: Rebuilt. + +2013-01-02 rofl0r + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix build error on ppc + when long double == double. + +2013-01-02 Reini Urban + + * Makefile.am (libffi_la_LDFLAGS): Add -no-undefined to LDFLAGS + (required for shared libs on cygwin/mingw). + * Makefile.in: Rebuilt. + +2012-10-31 Alan Modra + + * src/powerpc/linux64_closure.S: Add new ABI support. + * src/powerpc/linux64.S: Likewise. + +2012-10-30 Magnus Granberg + Pavel Labushev + + * configure.ac: New options pax_emutramp + * configure, fficonfig.h.in: Regenerated + * src/closures.c: New function emutramp_enabled_check() and + checks. + +2012-10-30 Frederick Cheung + + * configure.ac: Enable FFI_MAP_EXEC_WRIT for Darwin 12 (mountain + lion) and future version. + * configure: Rebuild. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * README: Add details of aarch64 port. + * src/aarch64/ffi.c: New. + * src/aarch64/ffitarget.h: Likewise. + * src/aarch64/sysv.S: Likewise. + * Makefile.am: Support aarch64. + * configure.ac: Support aarch64. + * Makefile.in, configure: Rebuilt. + +2012-10-30 James Greenhalgh + Marcus Shawcroft + + * testsuite/lib/libffi.exp: Add support for aarch64. + * testsuite/libffi.call/cls_struct_va1.c: New. + * testsuite/libffi.call/cls_uchar_va.c: Likewise. + * testsuite/libffi.call/cls_uint_va.c: Likewise. + * testsuite/libffi.call/cls_ulong_va.c: Likewise. + * testsuite/libffi.call/cls_ushort_va.c: Likewise. + * testsuite/libffi.call/nested_struct11.c: Likewise. + * testsuite/libffi.call/uninitialized.c: Likewise. + * testsuite/libffi.call/va_1.c: Likewise. + * testsuite/libffi.call/va_struct1.c: Likewise. + * testsuite/libffi.call/va_struct2.c: Likewise. + * testsuite/libffi.call/va_struct3.c: Likewise. + +2012-10-12 Walter Lee + + * Makefile.am: Add TILE-Gx/TILEPro support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + * src/prep_cif.c (ffi_prep_cif_core): Handle TILE-Gx/TILEPro. + * src/tile: New directory. + * src/tile/ffi.c: New file. + * src/tile/ffitarget.h: Ditto. + * src/tile/tile.S: Ditto. + +2012-10-12 Matthias Klose + + * generate-osx-source-and-headers.py: Normalize whitespace. + +2012-09-14 David Edelsohn + + * configure: Regenerated. + +2012-08-26 Andrew Pinski + + PR libffi/53014 + * src/mips/ffi.c (ffi_prep_closure_loc): Allow n32 with soft-float and n64 with + soft-float. + +2012-08-08 Uros Bizjak + + * src/s390/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-07-18 H.J. Lu + + PR libffi/53982 + PR libffi/53973 + * src/x86/ffitarget.h: Check __ILP32__ instead of __LP64__ for x32. + (FFI_SIZEOF_JAVA_RAW): Defined to 4 for x32. + +2012-05-16 H.J. Lu + + * configure: Regenerated. + +2012-05-05 Nicolas Lelong + + * libffi.xcodeproj/project.pbxproj: Fixes. + * README: Update for iOS builds. + +2012-04-23 Alexandre Keunecke I. de Mendonca + + * configure.ac: Add Blackfin/sysv support + * Makefile.am: Add Blackfin/sysv support + * src/bfin/ffi.c: Add Blackfin/sysv support + * src/bfin/ffitarget.h: Add Blackfin/sysv support + +2012-04-11 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new script. + * Makefile.in: Rebuilt. + +2012-04-11 Zachary Waldowski + + * generate-ios-source-and-headers.py, + libffi.xcodeproj/project.pbxproj: Support a Mac static library via + Xcode. Set iOS compatibility to 4.0. Move iOS trampoline + generation into an Xcode "run script" phase. Include both as + Xcode build scripts. Don't always regenerate config files. + +2012-04-10 Anthony Green + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Add missing semicolon. + +2012-04-06 Anthony Green + + * Makefile.am (EXTRA_DIST): Add new iOS/xcode files. + * Makefile.in: Rebuilt. + +2012-04-06 Mike Lewis + + * generate-ios-source-and-headers.py: New file. + * libffi.xcodeproj/project.pbxproj: New file. + * README: Update instructions on building iOS binary. + * build-ios.sh: Delete. + +2012-04-06 Anthony Green + + * src/x86/ffi64.c (UINT128): Define differently for Intel and GNU + compilers, then use it. + +2012-04-06 H.J. Lu + + * m4/libtool.m4 (_LT_ENABLE_LOCK): Support x32. + +2012-04-06 Anthony Green + + * testsuite/Makefile.am (EXTRA_DIST): Add missing test cases. + * testsuite/Makefile.in: Rebuilt. + +2012-04-05 Zachary Waldowski + + * include/ffi.h.in: Add missing trampoline table fields. + * src/arm/sysv.S: Fix ENTRY definition, and wrap symbol references + in CNAME. + * src/x86/ffi.c: Wrap Windows specific code in ifdefs. + +2012-04-02 Peter Bergner + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Declare double_tmp. + Silence casting pointer to integer of different size warning. + Delete goto to previously deleted label. + (ffi_call): Silence possibly undefined warning. + (ffi_closure_helper_SYSV): Declare variable type. + +2012-04-02 Peter Rosin + + * src/x86/win32.S (ffi_call_win32): Sign/zero extend the return + value in the Intel version as is already done for the AT&T version. + (ffi_closure_SYSV): Likewise. + (ffi_closure_raw_SYSV): Likewise. + (ffi_closure_STDCALL): Likewise. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_THISCALL): Unify the frame + generation, fix the ENDP label and remove the surplus third arg + from the 'lea' insn. + +2012-03-29 Peter Rosin + + * src/x86/win32.S (ffi_closure_raw_SYSV): Make the 'stubraw' label + visible outside the PROC, so that ffi_closure_raw_THISCALL can see + it. Also instruct the assembler to add a frame to the function. + +2012-03-23 Peter Rosin + + * Makefile.am (AM_CPPFLAGS): Add -DFFI_BUILDING. + * Makefile.in: Rebuilt. + * include/ffi.h.in [MSVC]: Add __declspec(dllimport) decorations + to all data exports, when compiling libffi clients using MSVC. + +2012-03-29 Peter Rosin + + * src/x86/ffitarget.h (ffi_abi): Add new ABI FFI_MS_CDECL and + make it the default for MSVC. + (FFI_TYPE_MS_STRUCT): New structure return convention. + * src/x86/ffi.c (ffi_prep_cif_machdep): Tweak the structure + return convention for FFI_MS_CDECL to be FFI_TYPE_MS_STRUCT + instead of an ordinary FFI_TYPE_STRUCT. + (ffi_prep_args): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_call): Likewise. + (ffi_prep_incoming_args_SYSV): Likewise. + (ffi_raw_call): Likewise. + (ffi_prep_closure_loc): Treat FFI_MS_CDECL as FFI_SYSV. + * src/x86/win32.S (ffi_closure_SYSV): For FFI_TYPE_MS_STRUCT, + return a pointer to the result structure in eax and don't pop + that pointer from the stack, the caller takes care of it. + (ffi_call_win32): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT. + (ffi_closure_raw_SYSV): Likewise. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/closure_stdcall.c [MSVC]: Add inline + assembly version with Intel syntax. + * testsuite/libffi.call/closure_thiscall.c [MSVC]: Likewise. + +2012-03-23 Peter Rosin + + * testsuite/libffi.call/ffitest.h: Provide abstration of + __attribute__((fastcall)) in the form of a __FASTCALL__ + define. Define it to __fastcall for MSVC. + * testsuite/libffi.call/fastthis1_win32.c: Use the above. + * testsuite/libffi.call/fastthis2_win32.c: Likewise. + * testsuite/libffi.call/fastthis3_win32.c: Likewise. + * testsuite/libffi.call/strlen2_win32.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + +2012-03-22 Peter Rosin + + * src/x86/win32.S [MSVC] (ffi_closure_THISCALL): Remove the manual + frame on function entry, MASM adds one automatically. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/ffitest.h [MSVC]: Add kludge for missing + bits in the MSVC headers. + +2012-03-22 Peter Rosin + + * testsuite/libffi.call/cls_12byte.c: Adjust to the C89 style + with no declarations after statements. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5_1_byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_dbls_struct.c: Likewise. + * testsuite/libffi.call/cls_pointer_stack.c: Likewise. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct10.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct1_win32.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct2_win32.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.call/testclosure.c: Likewise. + +2012-03-21 Peter Rosin + + * testsuite/libffi.call/float_va.c (float_va_fn): Use %f when + printing doubles (%lf is for long doubles). + (main): Likewise. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-21 Peter Rosin + + * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*] + (set_ld_library_path_env_vars): Add the library search dir to PATH + (and save PATH for later). + (restore_ld_library_path_env_vars): Restore PATH. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-20 Peter Rosin + + * testsuite/libffi.call/strlen2_win32.c (main): Remove bug. + * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label + visible outside the PROC, so that ffi_closure_THISCALL can see it. + +2012-03-19 Alan Hourihane + + * src/m68k/ffi.c: Add MINT support. + * src/m68k/sysv.S: Ditto. + +2012-03-06 Chung-Lin Tang + + * src/arm/ffi.c (ffi_call): Add __ARM_EABI__ guard around call to + ffi_call_VFP(). + (ffi_prep_closure_loc): Add __ARM_EABI__ guard around use of + ffi_closure_VFP. + * src/arm/sysv.S: Add __ARM_EABI__ guard around VFP code. + +2012-03-19 chennam + + * src/powerpc/ffi_darwin.c (ffi_prep_closure_loc): Fix AIX closure + support. + +2012-03-13 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/sh64/ffi.c (ffi_prep_closure_loc): Ditto. + +2012-03-09 David Edelsohn + + * src/powerpc/aix_closure.S (ffi_closure_ASM): Adjust for Darwin64 + change to return value of ffi_closure_helper_DARWIN and load type + from return type. + +2012-03-03 H.J. Lu + + * src/x86/ffi64.c (ffi_call): Cast the return value to unsigned + long. + (ffi_prep_closure_loc): Cast to 64bit address in trampoline. + (ffi_closure_unix64_inner): Cast return pointer to unsigned long + first. + + * src/x86/ffitarget.h (FFI_SIZEOF_ARG): Defined to 8 for x32. + (ffi_arg): Set to unsigned long long for x32. + (ffi_sarg): Set to long long for x32. + +2012-03-03 H.J. Lu + + * src/prep_cif.c (ffi_prep_cif_core): Properly check bad ABI. + +2012-03-03 Andoni Morales Alastruey + + * configure.ac: Add -no-undefined for both 32- and 64-bit x86 + windows-like hosts. + * configure: Rebuilt. + +2012-02-27 Mikael Pettersson + + PR libffi/52223 + * Makefile.am (FLAGS_TO_PASS): Define. + * Makefile.in: Regenerate. + +2012-02-23 Anthony Green + + * src/*/ffitarget.h: Ensure that users never include ffitarget.h + directly. + +2012-02-23 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_closure_raw_THISCALL): New + prototype. + (ffi_prep_raw_closure_loc): Use ffi_closure_raw_THISCALL for + thiscall-convention. + (ffi_raw_call): Use ffi_prep_args_raw. + * src/x86/win32.S (ffi_closure_raw_THISCALL): Add + implementation for stub. + +2012-02-10 Kai Tietz + + * configure.ac (AM_LTLDFLAGS): Add -no-undefine for x64 + windows target. + * configure: Regenerated. + +2012-02-08 Kai Tietz + + * src/prep_cif.c (ffi_prep_cif): Allow for X86_WIN32 + also FFI_THISCALL. + * src/x86/ffi.c (ffi_closure_THISCALL): Add prototype. + (FFI_INIT_TRAMPOLINE_THISCALL): New trampoline code. + (ffi_prep_closure_loc): Add FFI_THISCALL support. + * src/x86/ffitarget.h (FFI_TRAMPOLINE_SIZE): Adjust size. + * src/x86/win32.S (ffi_closure_THISCALL): New closure code + for thiscall-calling convention. + * testsuite/libffi.call/closure_thiscall.c: New test. + +2012-01-28 Kai Tietz + + * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new + argument to prototype for specify calling-convention. + (ffi_call): Add support for stdcall/thiscall convention. + (ffi_prep_args): Likewise. + (ffi_raw_call): Likewise. + * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and + FFI_FASTCALL. + * src/x86/win32.S (_ffi_call_win32): Add support for + fastcall/thiscall calling-convention calls. + * testsuite/libffi.call/fastthis1_win32.c: New test. + * testsuite/libffi.call/fastthis2_win32.c: New test. + * testsuite/libffi.call/fastthis3_win32.c: New test. + * testsuite/libffi.call/strlen2_win32.c: New test. + * testsuite/libffi.call/many2_win32.c: New test. + * testsuite/libffi.call/struct1_win32.c: New test. + * testsuite/libffi.call/struct2_win32.c: New test. + +2012-01-23 Uros Bizjak + + * src/alpha/ffi.c (ffi_prep_closure_loc): Check for bad ABI. + +2012-01-23 Anthony Green + Chris Young + + * configure.ac: Add Amiga support. + * configure: Rebuilt. + +2012-01-23 Dmitry Nadezhin + + * include/ffi_common.h (LIKELY, UNLIKELY): Fix definitions. + +2012-01-23 Andreas Schwab + + * src/m68k/sysv.S (ffi_call_SYSV): Properly test for plain + mc68000. Test for __HAVE_68881__ in addition to __MC68881__. + +2012-01-19 Jakub Jelinek + + PR rtl-optimization/48496 + * src/ia64/ffi.c (ffi_call): Fix up aliasing violations. + +2012-01-09 Rainer Orth + + * configure.ac (i?86-*-*): Set TARGET to X86_64. + * configure: Regenerate. + +2011-12-07 Andrew Pinski + + PR libffi/50051 + * src/mips/n32.S: Add ".set mips4". + +2011-11-21 Andreas Tobler + + * configure: Regenerate. + +2011-11-12 David Gilbert + + * doc/libffi.texi, include/ffi.h.in, include/ffi_common.h, + man/Makefile.am, man/ffi.3, man/ffi_prep_cif.3, + man/ffi_prep_cif_var.3, src/arm/ffi.c, src/arm/ffitarget.h, + src/cris/ffi.c, src/prep_cif.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/float_va.c: Many changes to support variadic + function calls. + +2011-11-12 Kyle Moffett + + * src/powerpc/ffi.c, src/powerpc/ffitarget.h, + src/powerpc/ppc_closure.S, src/powerpc/sysv.S: Many changes for + softfloat powerpc variants. + +2011-11-12 Petr Salinger + + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Fix kfreebsd support. + * configure: Rebuilt. + +2011-11-12 Timothy Wall + + * src/arm/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): Max + alignment of 4 for wince on ARM. + +2011-11-12 Kyle Moffett + Anthony Green + + * src/ppc/sysv.S, src/ppc/ffi.c: Remove use of ppc string + instructions (not available on some cores, like the PPC440). + +2011-11-12 Kimura Wataru + + * m4/ax_enable_builddir: Change from string comparison to numeric + comparison for wc output. + * configure.ac: Enable FFI_MMAP_EXEC_WRIT for darwin11 aka Mac OS + X 10.7. + * configure: Rebuilt. + +2011-11-12 Anthony Green + + * Makefile.am (AM_CCASFLAGS): Add -g option to build assembly + files with debug info. + * Makefile.in: Rebuilt. + +2011-11-12 Jasper Lievisse Adriaanse + + * README: Update list of supported OpenBSD systems. + +2011-11-12 Anthony Green + + * libtool-version: Update. + * Makefile.am (nodist_libffi_la_SOURCES): Add src/debug.c if + FFI_DEBUG. + (libffi_la_SOURCES): Remove src/debug.c + (EXTRA_DIST): Add src/debug.c + * Makefile.in: Rebuilt. + * README: Update for 3.0.11. + +2011-11-10 Richard Henderson + + * configure.ac (GCC_AS_CFI_PSEUDO_OP): Use it instead of inline check. + * configure, aclocal.m4: Rebuild. + +2011-09-04 Iain Sandoe + + PR libffi/49594 + * src/powerpc/darwin_closure.S (stubs): Make the stub binding + helper reference track the architecture pointer size. + +2011-08-25 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Remove hard-coded assembly + instructions. + * src/arm/sysv.S (ffi_arm_trampoline): Put them here instead. + +2011-07-11 Andrew Haley + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Clear icache. + +2011-06-29 Rainer Orth + + * testsuite/libffi.call/cls_double_va.c: Move PR number to comment. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-29 Rainer Orth + + PR libffi/46660 + * testsuite/libffi.call/cls_double_va.c: xfail dg-output on + mips-sgi-irix6*. + * testsuite/libffi.call/cls_longdouble_va.c: Likewise. + +2011-06-14 Rainer Orth + + * testsuite/libffi.call/huge_struct.c (test_large_fn): Use PRIu8, + PRId8 instead of %hhu, %hhd. + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRId8, + PRIu8): Define. + [__sgi__] (PRId8, PRIu8): Define. + +2011-04-29 Rainer Orth + + * src/alpha/osf.S (UA_SI, FDE_ENCODING, FDE_ENCODE, FDE_ARANGE): + Define. + Use them to handle ELF vs. ECOFF differences. + [__osf__] (_GLOBAL__F_ffi_call_osf): Define. + +2011-03-30 Timothy Wall + + * src/powerpc/darwin.S: Fix unknown FDE encoding. + * src/powerpc/darwin_closure.S: ditto. + +2011-02-25 Anthony Green + + * src/powerpc/ffi.c (ffi_prep_closure_loc): Allow for more + 32-bit ABIs. + +2011-02-15 Anthony Green + + * m4/ax_cc_maxopt.m4: Don't -malign-double or use -ffast-math. + * configure: Rebuilt. + +2011-02-13 Ralf Wildenhues + + * configure: Regenerate. + +2011-02-13 Anthony Green + + * include/ffi_common.h (UNLIKELY, LIKELY): Define. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Remove definition. + * src/prep_cif.c (UNLIKELY, LIKELY): Remove definition. + + * src/prep_cif.c (initialize_aggregate): Convert assertion into + FFI_BAD_TYPEDEF return. Initialize arg size and alignment to 0. + + * src/pa/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + * src/arm/ffi.c (ffi_prep_closure_loc): Ditto. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Ditto. + * src/mips/ffi.c (ffi_prep_closure_loc): Ditto. + * src/ia64/ffi.c (ffi_prep_closure_loc): Ditto. + * src/avr32/ffi.c (ffi_prep_closure_loc): Ditto. + +2011-02-11 Anthony Green + + * src/sparc/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test, + just return FFI_BAD_ABI when things are wrong. + +2012-02-11 Eric Botcazou + + * src/sparc/v9.S (STACKFRAME): Bump to 176. + +2011-02-09 Stuart Shelton + + http://bugs.gentoo.org/show_bug.cgi?id=286911 + * src/mips/ffitarget.h: Clean up error messages. + * src/java_raw_api.c (ffi_java_translate_args): Cast raw arg to + ffi_raw*. + * include/ffi.h.in: Add pragma for SGI compiler. + +2011-02-09 Anthony Green + + * configure.ac: Add powerpc64-*-darwin* support. + +2011-02-09 Anthony Green + + * README: Mention Interix. + +2011-02-09 Jonathan Callen + + * configure.ac: Add Interix to win32/cygwin/mingw case. + * configure: Ditto. + * src/closures.c: Treat Interix like Cygwin, instead of as a + generic win32. + +2011-02-09 Anthony Green + + * testsuite/libffi.call/err_bad_typedef.c: Remove xfail. + * testsuite/libffi.call/err_bad_abi.c: Remove xfail. + * src/x86/ffi64.c (UNLIKELY, LIKELY): Define. + (ffi_prep_closure_loc): Check for bad ABI. + * src/prep_cif.c (UNLIKELY, LIKELY): Define. + (initialize_aggregate): Check for bad types. + +2011-02-09 Landon Fuller + + * Makefile.am (EXTRA_DIST): Add build-ios.sh, src/arm/gentramp.sh, + src/arm/trampoline.S. + (nodist_libffi_la_SOURCES): Add src/arc/trampoline.S. + * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Define. + * src/arm/ffi.c (ffi_trampoline_table) + (ffi_closure_trampoline_table_page, ffi_trampoline_table_entry) + (FFI_TRAMPOLINE_CODELOC_CONFIG, FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) + (FFI_TRAMPOLINE_COUNT, ffi_trampoline_lock, ffi_trampoline_tables) + (ffi_trampoline_table_alloc, ffi_closure_alloc, ffi_closure_free): + Define for FFI_EXEC_TRAMPOLINE_TABLE case (iOS). + (ffi_prep_closure_loc): Handl FFI_EXEC_TRAMPOLINE_TABLE case + separately. + * src/arm/sysv.S: Handle Apple iOS host. + * src/closures.c: Handle FFI_EXEC_TRAMPOLINE_TABLE case. + * build-ios.sh: New file. + * fficonfig.h.in, configure, Makefile.in: Rebuilt. + * README: Mention ARM iOS. + +2011-02-08 Oren Held + + * src/dlmalloc.c (_STRUCT_MALLINFO): Define in order to avoid + redefinition of mallinfo on HP-UX. + +2011-02-08 Ginn Chen + + * src/sparc/ffi.c (ffi_call): Make compatible with Solaris Studio + aggregate return ABI. Flush cache. + (ffi_prep_closure_loc): Flush cache. + +2011-02-11 Anthony Green + + From Tom Honermann : + * src/powerpc/aix.S (ffi_call_AIX): Support for xlc toolchain on + AIX. Declare .ffi_prep_args. Insert nops after branch + instructions so that the AIX linker can insert TOC reload + instructions. + * src/powerpc/aix_closure.S: Declare .ffi_closure_helper_DARWIN. + +2011-02-08 Ed + + * src/powerpc/asm.h: Fix grammar nit in comment. + +2011-02-08 Uli Link + + * include/ffi.h.in (FFI_64_BIT_MAX): Define and use. + +2011-02-09 Rainer Orth + + PR libffi/46661 + * testsuite/libffi.call/cls_pointer.c (main): Cast void * to + uintptr_t first. + * testsuite/libffi.call/cls_pointer_stack.c (main): Likewise. + +2011-02-08 Rafael Avila de Espindola + + * configure.ac: Fix x86 test for pc related relocs. + * configure: Rebuilt. + +2011-02-07 Joel Sherrill + + * libffi/src/m68k/ffi.c: Add RTEMS support for cache flushing. + Handle case when CPU variant does not have long double support. + * libffi/src/m68k/sysv.S: Add support for mc68000, Coldfire, + and cores with soft floating point. + +2011-02-07 Joel Sherrill + + * configure.ac: Add mips*-*-rtems* support. + * configure: Regenerate. + * src/mips/ffitarget.h: Ensure needed constants are available + for targets which do not have sgidefs.h. + +2011-01-26 Dave Korn + + PR target/40125 + * configure.ac (AM_LTLDFLAGS): Add -bindir option for windows DLLs. + * configure: Regenerate. + +2010-12-18 Iain Sandoe + + PR libffi/29152 + PR libffi/42378 + * src/powerpc/darwin_closure.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffitarget.h (POWERPC_DARWIN64): New, + (FFI_TRAMPOLINE_SIZE): Update for Darwin64. + * src/powerpc/darwin.S: Provide Darwin64 implementation, + update comments. + * src/powerpc/ffi_darwin.c: Likewise. + +2010-12-06 Rainer Orth + + * configure.ac (libffi_cv_as_ascii_pseudo_op): Use double + backslashes. + (libffi_cv_as_string_pseudo_op): Likewise. + * configure: Regenerate. + +2010-12-03 Chung-Lin Tang + + * src/arm/sysv.S (ffi_closure_SYSV): Add UNWIND to .pad directive. + (ffi_closure_VFP): Same. + (ffi_call_VFP): Move down to before ffi_closure_VFP. Add '.fpu vfp' + directive. + +2010-12-01 Rainer Orth + + * testsuite/libffi.call/ffitest.h [__sgi] (PRId64, PRIu64): Define. + (PRIuPTR): Define. + +2010-11-29 Richard Henderson + Rainer Orth + + * src/x86/sysv.S (FDE_ENCODING, FDE_ENCODE): Define. + (.eh_frame): Use FDE_ENCODING. + (.LASFDE1, .LASFDE2, LASFDE3): Simplify with FDE_ENCODE. + +2010-11-22 Jacek Caban + + * configure.ac: Check for symbol underscores on mingw-w64. + * configure: Rebuilt. + * src/x86/win64.S: Correctly access extern symbols in respect to + underscores. + +2010-11-15 Rainer Orth + + * testsuite/lib/libffi-dg.exp: Rename ... + * testsuite/lib/libffi.exp: ... to this. + * libffi/testsuite/libffi.call/call.exp: Don't load libffi-dg.exp. + * libffi/testsuite/libffi.special/special.exp: Likewise. + +2010-10-28 Chung-Lin Tang + + * src/arm/ffi.c (ffi_prep_args): Add VFP register argument handling + code, new parameter, and return value. Update comments. + (ffi_prep_cif_machdep): Add case for VFP struct return values. Add + call to layout_vfp_args(). + (ffi_call_SYSV): Update declaration. + (ffi_call_VFP): New declaration. + (ffi_call): Add VFP struct return conditions. Call ffi_call_VFP() + when ABI is FFI_VFP. + (ffi_closure_VFP): New declaration. + (ffi_closure_SYSV_inner): Add new vfp_args parameter, update call to + ffi_prep_incoming_args_SYSV(). + (ffi_prep_incoming_args_SYSV): Update parameters. Add VFP argument + case handling. + (ffi_prep_closure_loc): Pass ffi_closure_VFP to trampoline + construction under VFP hard-float. + (rec_vfp_type_p): New function. + (vfp_type_p): Same. + (place_vfp_arg): Same. + (layout_vfp_args): Same. + * src/arm/ffitarget.h (ffi_abi): Add FFI_VFP. Define FFI_DEFAULT_ABI + based on __ARM_PCS_VFP. + (FFI_EXTRA_CIF_FIELDS): Define for adding VFP hard-float specific + fields. + (FFI_TYPE_STRUCT_VFP_FLOAT): Define internally used type code. + (FFI_TYPE_STRUCT_VFP_DOUBLE): Same. + * src/arm/sysv.S (ffi_call_SYSV): Change call of ffi_prep_args() to + direct call. Move function pointer load upwards. + (ffi_call_VFP): New function. + (ffi_closure_VFP): Same. + + * testsuite/lib/libffi-dg.exp (check-flags): New function. + (dg-skip-if): New function. + * testsuite/libffi.call/cls_double_va.c: Skip if target is arm*-*-* + and compiler options include -mfloat-abi=hard. + * testsuite/libffi.call/cls_longdouble_va.c: Same. + +2010-10-01 Jakub Jelinek + + PR libffi/45677 + * src/x86/ffi64.c (ffi_prep_cif_machdep): Ensure cif->bytes is + a multiple of 8. + * testsuite/libffi.call/many2.c: New test. + +2010-08-20 Mark Wielaard + + * src/closures.c (open_temp_exec_file_mnt): Check if getmntent_r + returns NULL. + +2010-08-09 Andreas Tobler + + * configure.ac: Add target powerpc64-*-freebsd*. + * configure: Regenerate. + * testsuite/libffi.call/cls_align_longdouble_split.c: Pass + -mlong-double-128 only to linux targets. + * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise. + * testsuite/libffi.call/cls_longdouble.c: Likewise. + * testsuite/libffi.call/huge_struct.c: Likewise. + +2010-08-05 Dan Witte + + * Makefile.am: Pass FFI_DEBUG define to msvcc.sh for linking to the + debug CRT when --enable-debug is given. + * configure.ac: Define it. + * msvcc.sh: Translate -g and -DFFI_DEBUG appropriately. + +2010-08-04 Dan Witte + + * src/x86/ffitarget.h: Add X86_ANY define for all x86/x86_64 + platforms. + * src/x86/ffi.c: Remove redundant ifdef checks. + * src/prep_cif.c: Push stack space computation into src/x86/ffi.c + for X86_ANY so return value space doesn't get added twice. + +2010-08-03 Neil Rashbrooke + + * msvcc.sh: Don't pass -safeseh to ml64 because behavior is buggy. + +2010-07-22 Dan Witte + + * src/*/ffitarget.h: Make FFI_LAST_ABI one past the last valid ABI. + * src/prep_cif.c: Fix ABI assertion. + * src/cris/ffi.c: Ditto. + +2010-07-10 Evan Phoenix + + * src/closures.c (selinux_enabled_check): Fix strncmp usage bug. + +2010-07-07 Dan Horák + + * include/ffi.h.in: Protect #define with #ifndef. + * src/powerpc/ffitarget.h: Ditto. + * src/s390/ffitarget.h: Ditto. + * src/sparc/ffitarget.h: Ditto. + +2010-07-07 Neil Roberts + + * src/x86/sysv.S (ffi_call_SYSV): Align the stack pointer to + 16-bytes. + +2010-07-02 Jakub Jelinek + + * Makefile.am (AM_MAKEFLAGS): Pass also mandir to submakes. + * Makefile.in: Regenerated. + +2010-05-19 Rainer Orth + + * configure.ac (libffi_cv_as_x86_pcrel): Check for illegal in as + output, too. + (libffi_cv_as_ascii_pseudo_op): Check for .ascii. + (libffi_cv_as_string_pseudo_op): Check for .string. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S (.eh_frame): Use .ascii, .string or error. + +2010-05-11 Dan Witte + + * doc/libffi.tex: Document previous change. + +2010-05-11 Makoto Kato + + * src/x86/ffi.c (ffi_call): Don't copy structs passed by value. + +2010-05-05 Michael Kohler + + * src/dlmalloc.c (dlfree): Fix spelling. + * src/ia64/ffi.c (ffi_prep_cif_machdep): Ditto. + * configure.ac: Ditto. + * configure: Rebuilt. + +2010-04-13 Dan Witte + + * msvcc.sh: Build with -W3 instead of -Wall. + * src/powerpc/ffi_darwin.c: Remove build warnings. + * src/x86/ffi.c: Ditto. + * src/x86/ffitarget.h: Ditto. + +2010-04-12 Dan Witte + Walter Meinl + + * configure.ac: Add OS/2 support. + * configure: Rebuilt. + * src/closures.c: Ditto. + * src/dlmalloc.c: Ditto. + * src/x86/win32.S: Ditto. + +2010-04-07 Jakub Jelinek + + * testsuite/libffi.call/err_bad_abi.c: Remove unused args variable. + +2010-04-02 Ralf Wildenhues + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2010-03-30 Dan Witte + + * msvcc.sh: Disable build warnings. + * README (tested): Clarify windows build procedure. + +2010-03-15 Rainer Orth + + * configure.ac (libffi_cv_as_x86_64_unwind_section_type): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * libffi/src/x86/unix64.S (.eh_frame) + [HAVE_AS_X86_64_UNWIND_SECTION_TYPE]: Use @unwind section type. + +2010-03-14 Matthias Klose + + * src/x86/ffi64.c: Fix typo in comment. + * src/x86/ffi.c: Use /* ... */ comment style. + +2010-02-24 Rainer Orth + + * doc/libffi.texi (The Closure API): Fix typo. + * doc/libffi.info: Remove. + +2010-02-15 Matthias Klose + + * src/arm/sysv.S (__ARM_ARCH__): Define for processor + __ARM_ARCH_7EM__. + +2010-01-15 Anthony Green + + * README: Add notes on building with Microsoft Visual C++. + +2010-01-15 Daniel Witte + + * msvcc.sh: New file. + + * src/x86/win32.S: Port assembly routines to MSVC and #ifdef. + * src/x86/ffi.c: Tweak function declaration and remove excess + parens. + * include/ffi.h.in: Add __declspec(align(8)) to typedef struct + ffi_closure. + + * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new + function ffi_call_win32 on X86_WIN32. + * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32. + (ffi_call_STDCALL): Remove. + + * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code + to ffi_prep_cif_machdep for x86. + * src/x86/ffi.c (ffi_prep_cif_machdep): To here. + +2010-01-15 Oliver Kiddle + + * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for + Sun Studio compiler compatibility. + +2010-01-12 Conrad Irwin + + * doc/libffi.texi: Add closure example. + +2010-01-07 Rainer Orth + + PR libffi/40701 + * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRIdLL, + PRIuLL, PRId64, PRIu64, PRIuPTR): Define. + * testsuite/libffi.call/cls_align_sint64.c: Add -Wno-format on + alpha*-dec-osf*. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/return_ll1.c: Likewise. + * testsuite/libffi.call/stret_medium2.c: Likewise. + * testsuite/libffi.special/ffitestcxx.h (allocate_mmap): Cast + MAP_FAILED to char *. + +2010-01-06 Rainer Orth + + * src/mips/n32.S: Use .abicalls and .eh_frame with __GNUC__. + +2009-12-31 Anthony Green + + * README: Update for libffi 3.0.9. + +2009-12-27 Matthias Klose + + * configure.ac (HAVE_LONG_DOUBLE): Define for mips when + appropriate. + * configure: Rebuilt. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_longdouble_va.c: Mark as xfail for + avr32*-*-*. + * testsuite/libffi.call/cls_double_va.c: Ditto. + +2009-12-26 Andreas Tobler + + * testsuite/libffi.call/ffitest.h: Conditionally include stdint.h + and inttypes.h. + * testsuite/libffi.special/unwindtest.cc: Ditto. + +2009-12-26 Andreas Tobler + + * configure.ac: Add amd64-*-openbsd*. + * configure: Rebuilt. + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Link + openbsd programs with -lpthread. + +2009-12-26 Anthony Green + + * testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: Remove xfail for + mips*-*-* and arm*-*-*. + * testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c: Remove xfail for arm*-*-*. + +2009-12-31 Kay Tietz + + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRuLL): Fix + definitions. + +2009-12-31 Carlo Bramini + + * configure.ac (AM_LTLDFLAGS): Define for windows hosts. + * Makefile.am (libffi_la_LDFLAGS): Add AM_LTLDFLAGS. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + +2009-12-31 Anthony Green + Blake Chaffin. + + * testsuite/libffi.call/huge_struct.c: New test case from Blake + Chaffin @ Apple. + +2009-12-28 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Copy abi and nargs to + local variables. + (aix_adjust_aggregate_sizes): New function. + (ffi_prep_cif_machdep): Call it. + +2009-12-26 Andreas Tobler + + * configure.ac: Define FFI_MMAP_EXEC_WRIT for the given targets. + * configure: Regenerate. + * fficonfig.h.in: Likewise. + * src/closures.c: Remove the FFI_MMAP_EXEC_WRIT definition for + Solaris/x86. + +2009-12-26 Andreas Schwab + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Advance intarg_count + when a float arguments is passed in memory. + (ffi_closure_helper_SYSV): Mark general registers as used up when + a 64bit or soft-float long double argument is passed in memory. + +2009-12-25 Matthias Klose + + * man/ffi_call.3: Fix #include in examples. + * doc/libffi.texi: Add dircategory. + +2009-12-25 Frank Everdij + + * include/ffi.h.in: Placed '__GNUC__' ifdef around + '__attribute__((aligned(8)))' in ffi_closure, fixes compile for + IRIX MIPSPro c99. + * include/ffi_common.h: Added '__sgi' define to non + '__attribute__((__mode__()))' integer typedefs. + * src/mips/ffi.c (ffi_call, ffi_closure_mips_inner_O32, + ffi_closure_mips_inner_N32): Added 'defined(_MIPSEB)' to BE check. + (ffi_closure_mips_inner_O32, ffi_closure_mips_inner_N32): Added + FFI_LONGDOUBLE support and alignment(N32 only). + * src/mips/ffitarget.h: Corrected '#include ' for IRIX and + fixed non '__attribute__((__mode__()))' integer typedefs. + * src/mips/n32.S: Put '#ifdef linux' around '.abicalls' and '.eh_frame' + since they are Linux/GNU Assembler specific. + +2009-12-25 Bradley Smith + + * configure.ac, Makefile.am, src/avr32/ffi.c, + src/avr32/ffitarget.h, + src/avr32/sysv.S: Add AVR32 port. + * configure, Makefile.in: Rebuilt. + +2009-12-21 Andreas Tobler + + * configure.ac: Make i?86 build on FreeBSD and OpenBSD. + * configure: Regenerate. + +2009-12-15 John David Anglin + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on PA HP-UX. + +2009-12-13 John David Anglin + + * src/pa/ffi.c (ffi_closure_inner_pa32): Handle FFI_TYPE_LONGDOUBLE + type on HP-UX. + +2012-02-13 Kai Tietz + + PR libffi/52221 + * src/x86/ffi.c (ffi_prep_raw_closure_loc): Add thiscall + support for X86_WIN32. + (FFI_INIT_TRAMPOLINE_THISCALL): Fix displacement. + +2009-12-11 Eric Botcazou + + * src/sparc/ffi.c (ffi_closure_sparc_inner_v9): Properly align 'long + double' arguments. + +2009-12-11 Eric Botcazou + + * testsuite/libffi.call/ffitest.h: Define PRIuPTR on Solaris < 10. + +2009-12-10 Rainer Orth + + PR libffi/40700 + * src/closures.c [X86_64 && __sun__ && __svr4__] + (FFI_MMAP_EXEC_WRIT): Define. + +2009-12-08 David Daney + + * testsuite/libffi.call/stret_medium.c: Remove xfail for mips*-*-* + * testsuite/libffi.call/cls_align_longdouble_split2.c: Same. + * testsuite/libffi.call/stret_large.c: Same. + * testsuite/libffi.call/cls_align_longdouble_split.c: Same. + * testsuite/libffi.call/stret_large2.c: Same. + * testsuite/libffi.call/stret_medium2.c: Same. + +2009-12-07 David Edelsohn + + * src/powerpc/aix_closure.S (libffi_closure_ASM): Fix tablejump + typo. + +2009-12-05 David Edelsohn + + * src/powerpc/aix.S: Update AIX32 code to be consistent with AIX64 + code. + * src/powerpc/aix_closure.S: Same. + +2009-12-05 Ralf Wildenhues + + * Makefile.in: Regenerate. + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2009-12-04 David Edelsohn + + * src/powerpc/aix_closure.S: Reorganize 64-bit code to match + linux64_closure.S. + +2009-12-04 Uros Bizjak + + PR libffi/41908 + * src/x86/ffi64.c (classify_argument): Update from + gcc/config/i386/i386.c. + (ffi_closure_unix64_inner): Do not use the address of two consecutive + SSE registers directly. + * testsuite/libffi.call/cls_dbls_struct.c (main): Remove xfail + for x86_64 linux targets. + +2009-12-04 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_closure_helper_DARWIN): Increment + pfr for long double split between fpr13 and stack. + +2009-12-03 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Increment next_arg and + fparg_count twice for long double. + +2009-12-03 David Edelsohn + + PR libffi/42243 + * src/powerpc/ffi_darwin.c (ffi_prep_args): Remove extra parentheses. + +2009-12-03 Uros Bizjak + + * testsuite/libffi.call/cls_longdouble_va.c (main): Fix format string. + Remove xfails for x86 linux targets. + +2009-12-02 David Edelsohn + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Fix typo in INT64 + case. + +2009-12-01 David Edelsohn + + * src/powerpc/aix.S (ffi_call_AIX): Convert to more standard + register usage. Call ffi_prep_args directly. Add long double + return value support. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Double arg increment + applies to FFI_TYPE_DOUBLE. Correct fpr_base increment typo. + Separate FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases. + (ffi_prep_cif_machdep): Only 16 byte stack alignment in 64 bit + mode. + (ffi_closure_helper_DARWIN): Remove nf and ng counters. Move temp + into case. + * src/powerpc/aix_closure.S: Maintain 16 byte stack alignment. + Allocate result area between params and FPRs. + +2009-11-30 David Edelsohn + + PR target/35484 + * src/powerpc/ffitarget.h (POWERPC64): Define for PPC64 Linux and + AIX64. + * src/powerpc/aix.S: Implement AIX64 version. + * src/powerpc/aix_closure.S: Implement AIX64 version. + (ffi_closure_ASM): Use extsb, lha and displament addresses. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Implement AIX64 + support. + (ffi_prep_cif_machdep): Same. + (ffi_call): Same. + (ffi_closure_helper_DARWIN): Same. + +2009-11-02 Andreas Tobler + + PR libffi/41908 + * testsuite/libffi.call/testclosure.c: New test. + +2009-09-28 Kai Tietz + + * src/x86/win64.S (_ffi_call_win64 stack): Remove for gnu + assembly version use of ___chkstk. + +2009-09-23 Matthias Klose + + PR libffi/40242, PR libffi/41443 + * src/arm/sysv.S (__ARM_ARCH__): Define for processors + __ARM_ARCH_6T2__, __ARM_ARCH_6M__, __ARM_ARCH_7__, + __ARM_ARCH_7A__, __ARM_ARCH_7R__, __ARM_ARCH_7M__. + Change the conditionals to __SOFTFP__ || __ARM_EABI__ + for -mfloat-abi=softfp to work. + +2009-09-17 Loren J. Rittle + + PR testsuite/32843 (strikes again) + * src/x86/ffi.c (ffi_prep_cif_machdep): Add X86_FREEBSD to + enable proper extension on char and short. + +2009-09-15 David Daney + + * src/java_raw_api.c (ffi_java_raw_to_rvalue): Remove special + handling for FFI_TYPE_POINTER. + * src/mips/ffitarget.h (FFI_TYPE_STRUCT_D_SOFT, + FFI_TYPE_STRUCT_F_SOFT, FFI_TYPE_STRUCT_DD_SOFT, + FFI_TYPE_STRUCT_FF_SOFT, FFI_TYPE_STRUCT_FD_SOFT, + FFI_TYPE_STRUCT_DF_SOFT, FFI_TYPE_STRUCT_SOFT): New defines. + (FFI_N32_SOFT_FLOAT, FFI_N64_SOFT_FLOAT): New ffi_abi enumerations. + (enum ffi_abi): Set FFI_DEFAULT_ABI for soft-float. + * src/mips/n32.S (ffi_call_N32): Add handling for soft-float + structure and pointer returns. + (ffi_closure_N32): Add handling for pointer returns. + * src/mips/ffi.c (ffi_prep_args, calc_n32_struct_flags, + calc_n32_return_struct_flags): Handle soft-float. + (ffi_prep_cif_machdep): Handle soft-float, fix pointer handling. + (ffi_call_N32): Declare proper argument types. + (ffi_call, copy_struct_N32, ffi_closure_mips_inner_N32): Handle + soft-float. + +2009-08-24 Ralf Wildenhues + + * configure.ac (AC_PREREQ): Bump to 2.64. + +2009-08-22 Ralf Wildenhues + + * Makefile.am (install-html, install-pdf): Remove. + * Makefile.in: Regenerate. + + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * include/Makefile.in: Regenerate. + * man/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2011-08-22 Jasper Lievisse Adriaanse + + * configure.ac: Add OpenBSD/hppa and OpenBSD/powerpc support. + * configure: Rebuilt. + +2009-07-30 Ralf Wildenhues + + * configure.ac (_AC_ARG_VAR_PRECIOUS): Use m4_rename_force. + +2009-07-24 Dave Korn + + PR libffi/40807 + * src/x86/ffi.c (ffi_prep_cif_machdep): Also use sign/zero-extending + return types for X86_WIN32. + * src/x86/win32.S (_ffi_call_SYSV): Handle omitted return types. + (_ffi_call_STDCALL, _ffi_closure_SYSV, _ffi_closure_raw_SYSV, + _ffi_closure_STDCALL): Likewise. + + * src/closures.c (is_selinux_enabled): Define to const 0 for Cygwin. + (dlmmap, dlmunmap): Also use these functions on Cygwin. + +2009-07-11 Richard Sandiford + + PR testsuite/40699 + PR testsuite/40707 + PR testsuite/40709 + * testsuite/lib/libffi-dg.exp: Revert 2009-07-02, 2009-07-01 and + 2009-06-30 commits. + +2009-07-01 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Set ld_library_path + to "" before adding paths. (This reinstates an assignment that + was removed by my 2009-06-30 commit, but changes the initial + value from "." to "".) + +2009-07-01 H.J. Lu + + PR testsuite/40601 + * testsuite/lib/libffi-dg.exp (libffi-init): Properly set + gccdir. Adjust ld_library_path for gcc only if gccdir isn't + empty. + +2009-06-30 Richard Sandiford + + * testsuite/lib/libffi-dg.exp (libffi-init): Don't add "." + to ld_library_path. Use add_path. Add just find_libgcc_s + to ld_library_path, not every libgcc multilib directory. + +2009-06-16 Wim Lewis + + * src/powerpc/ffi.c: Avoid clobbering cr3 and cr4, which are + supposed to be callee-saved. + * src/powerpc/sysv.S (small_struct_return_value): Fix overrun of + return buffer for odd-size structs. + +2009-06-16 Andreas Tobler + + PR libffi/40444 + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Add + allow_stack_execute for Darwin. + +2009-06-16 Andrew Haley + + * configure.ac (TARGETDIR): Add missing blank lines. + * configure: Regenerate. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-15 Andrew Haley + + * testsuite/libffi.call/err_bad_typedef.c: xfail everywhere. + * testsuite/libffi.call/err_bad_abi.c: Likewise. + +2009-06-12 Andrew Haley + + * Makefile.am: Remove info_TEXINFOS. + +2009-06-12 Andrew Haley + + * ChangeLog.libffi: testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-11 Kaz Kojima + + * testsuite/libffi.call/cls_longdouble_va.c: Add xfail sh*-*-linux-*. + * testsuite/libffi.call/err_bad_abi.c: Add xfail sh*-*-*. + * testsuite/libffi.call/err_bad_typedef.c: Likewise. + +2009-06-09 Andrew Haley + + * src/x86/freebsd.S: Add missing file. + +2009-06-08 Andrew Haley + + Import from libffi 3.0.8: + + * doc/libffi.texi: New file. + * doc/libffi.info: Likewise. + * doc/stamp-vti: Likewise. + * man/Makefile.am: New file. + * man/ffi_call.3: New file. + + * Makefile.am (EXTRA_DIST): Add src/x86/darwin64.S, + src/dlmalloc.c. + (nodist_libffi_la_SOURCES): Add X86_FREEBSD. + + * configure.ac: Bump version to 3.0.8. + parisc*-*-linux*: Add. + i386-*-freebsd* | i386-*-openbsd*: Add. + powerpc-*-beos*: Add. + AM_CONDITIONAL X86_FREEBSD: Add. + AC_CONFIG_FILES: Add man/Makefile. + + * include/ffi.h.in (FFI_FN): Change void (*)() to void (*)(void). + +2009-06-08 Andrew Haley + + * README: Import from libffi 3.0.8. + +2009-06-08 Andrew Haley + + * testsuite/libffi.call/err_bad_abi.c: Add xfails. + * testsuite/libffi.call/cls_longdouble_va.c: Add xfails. + * testsuite/libffi.call/cls_dbls_struct.c: Add xfail x86_64-*-linux-*. + * testsuite/libffi.call/err_bad_typedef.c: Add xfails. + + * testsuite/libffi.call/stret_medium2.c: Add __UNUSED__ to args. + * testsuite/libffi.call/stret_medium.c: Likewise. + * testsuite/libffi.call/stret_large2.c: Likewise. + * testsuite/libffi.call/stret_large.c: Likewise. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2009-06-05 Andrew Haley + + * src/x86/ffitarget.h, src/x86/ffi.c: Merge stdcall changes from + libffi. + +2009-06-04 Andrew Haley + + * src/x86/ffitarget.h, src/x86/win32.S, src/x86/ffi.c: Back out + stdcall changes. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2009-06-04 Andrew Haley + + * include/ffi.h.in: Change void (*)() to void (*)(void). + * src/x86/ffi.c: Likewise. + +2009-06-04 Andrew Haley + + * src/powerpc/ppc_closure.S: Insert licence header. + * src/powerpc/linux64_closure.S: Likewise. + * src/m68k/sysv.S: Likewise. + + * src/sh64/ffi.c: Change void (*)() to void (*)(void). + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/m32r/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/alpha/ffi.c: Likewise. + * src/alpha/osf.S: Likewise. + * src/frv/ffi.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/pa/hpux32.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/ia64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2009-06-04 Andrew Haley + + include/ffi.h.in, + src/arm/ffitarget.h, + src/arm/ffi.c, + src/arm/sysv.S, + src/powerpc/ffitarget.h, + src/closures.c, + src/sh64/ffitarget.h, + src/sh64/ffi.c, + src/sh64/sysv.S, + src/types.c, + src/x86/ffi64.c, + src/x86/ffitarget.h, + src/x86/win32.S, + src/x86/darwin.S, + src/x86/ffi.c, + src/x86/sysv.S, + src/x86/unix64.S, + src/alpha/ffitarget.h, + src/alpha/ffi.c, + src/alpha/osf.S, + src/m68k/ffitarget.h, + src/frv/ffitarget.h, + src/frv/ffi.c, + src/s390/ffitarget.h, + src/s390/sysv.S, + src/cris/ffitarget.h, + src/pa/linux.S, + src/pa/ffitarget.h, + src/pa/ffi.c, + src/raw_api.c, + src/ia64/ffitarget.h, + src/ia64/unix.S, + src/ia64/ffi.c, + src/ia64/ia64_flags.h, + src/java_raw_api.c, + src/debug.c, + src/sparc/v9.S, + src/sparc/ffitarget.h, + src/sparc/ffi.c, + src/sparc/v8.S, + src/mips/ffitarget.h, + src/mips/n32.S, + src/mips/o32.S, + src/mips/ffi.c, + src/prep_cif.c, + src/sh/ffitarget.h, + src/sh/ffi.c, + src/sh/sysv.S: Update license text. + +2009-05-22 Dave Korn + + * src/x86/win32.S (_ffi_closure_STDCALL): New function. + (.eh_frame): Add FDE for it. + +2009-05-22 Dave Korn + + * configure.ac: Also check if assembler supports pc-relative + relocs on X86_WIN32 targets. + * configure: Regenerate. + * src/x86/win32.S (ffi_prep_args): Declare extern, not global. + (_ffi_call_SYSV): Add missing function type symbol .def and + add EH markup labels. + (_ffi_call_STDCALL): Likewise. + (_ffi_closure_SYSV): Likewise. + (_ffi_closure_raw_SYSV): Likewise. + (.eh_frame): Add hand-crafted EH data. + +2009-04-09 Jakub Jelinek + + * testsuite/lib/libffi-dg.exp: Change copyright header to refer to + version 3 of the GNU General Public License and to point readers + at the COPYING3 file and the FSF's license web page. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.special/special.exp: Likewise. + +2009-03-01 Ralf Wildenhues + + * configure: Regenerate. + +2008-12-18 Rainer Orth + + PR libffi/26048 + * configure.ac (HAVE_AS_X86_PCREL): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/x86/sysv.S [!FFI_NO_RAW_API]: Precalculate + RAW_CLOSURE_CIF_OFFSET, RAW_CLOSURE_FUN_OFFSET, + RAW_CLOSURE_USER_DATA_OFFSET for the Solaris 10/x86 assembler. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + * src/x86/unix64.S (.Lstore_table): Move to .text section. + (.Lload_table): Likewise. + (.eh_frame): Only use SYMBOL-. iff HAVE_AS_X86_PCREL. + +2008-12-18 Ralf Wildenhues + + * configure: Regenerate. + +2008-11-21 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_cif_machdep): Add support for + signed/unsigned int8/16 return values. + * src/sparc/v8.S (ffi_call_v8): Likewise. + (ffi_closure_v8): Likewise. + +2008-09-26 Peter O'Gorman + Steve Ellcey + + * configure: Regenerate for new libtool. + * Makefile.in: Ditto. + * include/Makefile.in: Ditto. + * aclocal.m4: Ditto. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-06-17 Ralf Wildenhues + + * configure: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2008-06-07 Joseph Myers + + * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, + powerpc-*-beos*): Remove. + * configure: Regenerate. + +2008-05-09 Julian Brown + + * Makefile.am (LTLDFLAGS): New. + (libffi_la_LDFLAGS): Use above. + * Makefile.in: Regenerate. + +2008-04-18 Paolo Bonzini + + PR bootstrap/35457 + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2008-03-26 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-03-26 Daniel Jacobowitz + + * src/arm/sysv.S: Fix ARM comment marker. + +2008-03-26 Jakub Jelinek + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-03-16 Ralf Wildenhues + + * aclocal.m4: Regenerate. + * configure: Likewise. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2008-02-12 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-24 David Edelsohn + + * configure: Regenerate. + +2008-01-06 Andreas Tobler + + * src/x86/ffi.c (ffi_prep_cif_machdep): Fix thinko. + +2008-01-05 Andreas Tobler + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): Add code for + signed/unsigned int8/16 for X86_DARWIN. + Updated copyright info. + Handle one and two byte structs with special cif->flags. + * src/x86/ffitarget.h: Add special types for one and two byte structs. + Updated copyright info. + * src/x86/darwin.S (ffi_call_SYSV): Rewrite to use a jump table like + sysv.S + Remove code to pop args from the stack after call. + Special-case signed/unsigned for int8/16, one and two byte structs. + (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + Updated copyright info. + +2007-12-08 David Daney + + * src/mips/n32.S (ffi_call_N32): Replace dadd with ADDU, dsub with + SUBU, add with ADDU and use smaller code sequences. + +2007-12-07 David Daney + + * src/mips/ffi.c (ffi_prep_cif_machdep): Handle long double return + type. + +2007-12-06 David Daney + + * include/ffi.h.in (FFI_SIZEOF_JAVA_RAW): Define if not already + defined. + (ffi_java_raw): New typedef. + (ffi_java_raw_call, ffi_java_ptrarray_to_raw, + ffi_java_raw_to_ptrarray): Change parameter types from ffi_raw to + ffi_java_raw. + (ffi_java_raw_closure) : Same. + (ffi_prep_java_raw_closure, ffi_prep_java_raw_closure_loc): Change + parameter types. + * src/java_raw_api.c (ffi_java_raw_size): Replace FFI_SIZEOF_ARG with + FFI_SIZEOF_JAVA_RAW. + (ffi_java_raw_to_ptrarray): Change type of raw to ffi_java_raw. + Replace FFI_SIZEOF_ARG with FFI_SIZEOF_JAVA_RAW. Use + sizeof(ffi_java_raw) for alignment calculations. + (ffi_java_ptrarray_to_raw): Same. + (ffi_java_rvalue_to_raw): Add special handling for FFI_TYPE_POINTER + if FFI_SIZEOF_JAVA_RAW == 4. + (ffi_java_raw_to_rvalue): Same. + (ffi_java_raw_call): Change type of raw to ffi_java_raw. + (ffi_java_translate_args): Same. + (ffi_prep_java_raw_closure_loc, ffi_prep_java_raw_closure): Change + parameter types. + * src/mips/ffitarget.h (FFI_SIZEOF_JAVA_RAW): Define for N32 ABI. + +2007-12-06 David Daney + + * src/mips/n32.S (ffi_closure_N32): Use 64-bit add instruction on + pointer values. + +2007-12-01 Andreas Tobler + + PR libffi/31937 + * src/powerpc/ffitarget.h: Introduce new ABI FFI_LINUX_SOFT_FLOAT. + Add local FFI_TYPE_UINT128 to handle soft-float long-double-128. + * src/powerpc/ffi.c: Distinguish between __NO_FPRS__ and not and + set the NUM_FPR_ARG_REGISTERS according to. + Add support for potential soft-float support under hard-float + architecture. + (ffi_prep_args_SYSV): Set NUM_FPR_ARG_REGISTERS to 0 in case of + FFI_LINUX_SOFT_FLOAT, handle float, doubles and long-doubles according + to the FFI_LINUX_SOFT_FLOAT ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Make sure not to store float/double + on archs where __NO_FPRS__ is true. + Add FFI_TYPE_UINT128 support. + * src/powerpc/sysv.S: Add support for soft-float long-double-128. + Adjust copyright notice. + +2007-11-25 Andreas Tobler + + * src/closures.c: Move defintion of MAYBE_UNUSED from here to ... + * include/ffi_common.h: ... here. + Update copyright. + +2007-11-17 Andreas Tobler + + * src/powerpc/sysv.S: Load correct cr to compare if we have long double. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/ffi.c: Add a comment to show which part goes into cr6. + * testsuite/libffi.call/return_ldl.c: New test. + +2007-09-04 + + * src/arm/sysv.S (UNWIND): New. + (Whole file): Conditionally compile unwinder directives. + * src/arm/sysv.S: Add unwinder directives. + + * src/arm/ffi.c (ffi_prep_args): Align structs by at least 4 bytes. + Only treat r0 as a struct address if we're actually returning a + struct by address. + Only copy the bytes that are actually within a struct. + (ffi_prep_cif_machdep): A Composite Type not larger than 4 bytes + is returned in r0, not passed by address. + (ffi_call): Allocate a word-sized temporary for the case where + a composite is returned in r0. + (ffi_prep_incoming_args_SYSV): Align as necessary. + +2007-08-05 Steven Newbury + + * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Use __clear_cache instead of + directly using the sys_cacheflush syscall. + +2007-07-27 Andrew Haley + + * src/arm/sysv.S (ffi_closure_SYSV): Add soft-float. + +2007-09-03 Maciej W. Rozycki + + * Makefile.am: Unify MIPS_IRIX and MIPS_LINUX into MIPS. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure: Likewise. + +2007-08-24 David Daney + + * testsuite/libffi.call/return_sl.c: New test. + +2007-08-10 David Daney + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Remove xfail for mips64*-*-*. + +2007-08-10 David Daney + + PR libffi/28313 + * configure.ac: Don't treat mips64 as a special case. + * Makefile.am (nodist_libffi_la_SOURCES): Add n32.S. + * configure: Regenerate + * Makefile.in: Ditto. + * fficonfig.h.in: Ditto. + * src/mips/ffitarget.h (REG_L, REG_S, SUBU, ADDU, SRL, LI): Indent. + (LA, EH_FRAME_ALIGN, FDE_ADDR_BYTES): New preprocessor macros. + (FFI_DEFAULT_ABI): Set for n64 case. + (FFI_CLOSURES, FFI_TRAMPOLINE_SIZE): Define for n32 and n64 cases. + * src/mips/n32.S (ffi_call_N32): Add debug macros and labels for FDE. + (ffi_closure_N32): New function. + (.eh_frame): New section + * src/mips/o32.S: Clean up comments. + (ffi_closure_O32): Pass ffi_closure parameter in $12. + * src/mips/ffi.c: Use FFI_MIPS_N32 instead of + _MIPS_SIM == _ABIN32 throughout. + (FFI_MIPS_STOP_HERE): New, use in place of + ffi_stop_here. + (ffi_prep_args): Use unsigned long to hold pointer values. Rewrite + to support n32/n64 ABIs. + (calc_n32_struct_flags): Rewrite. + (calc_n32_return_struct_flags): Remove unused variable. Reverse + position of flag bits. + (ffi_prep_cif_machdep): Rewrite n32 portion. + (ffi_call): Enable for n64. Add special handling for small structure + return values. + (ffi_prep_closure_loc): Add n32 and n64 support. + (ffi_closure_mips_inner_O32): Add cast to silence warning. + (copy_struct_N32, ffi_closure_mips_inner_N32): New functions. + +2007-08-08 David Daney + + * testsuite/libffi.call/ffitest.h (ffi_type_mylong): Remove definition. + * testsuite/libffi.call/cls_align_uint16.c (main): Use correct type + specifiers. + * testsuite/libffi.call/nested_struct1.c (main): Ditto. + * testsuite/libffi.call/cls_sint.c (main): Ditto. + * testsuite/libffi.call/nested_struct9.c (main): Ditto. + * testsuite/libffi.call/cls_20byte1.c (main): Ditto. + * testsuite/libffi.call/cls_9byte1.c (main): Ditto. + * testsuite/libffi.call/closure_fn1.c (main): Ditto. + * testsuite/libffi.call/closure_fn3.c (main): Ditto. + * testsuite/libffi.call/return_dbl2.c (main): Ditto. + * testsuite/libffi.call/cls_sshort.c (main): Ditto. + * testsuite/libffi.call/return_fl3.c (main): Ditto. + * testsuite/libffi.call/closure_fn5.c (main): Ditto. + * testsuite/libffi.call/nested_struct.c (main): Ditto. + * testsuite/libffi.call/nested_struct10.c (main): Ditto. + * testsuite/libffi.call/return_ll1.c (main): Ditto. + * testsuite/libffi.call/cls_8byte.c (main): Ditto. + * testsuite/libffi.call/cls_align_uint32.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint16.c (main): Ditto. + * testsuite/libffi.call/cls_20byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct2.c (main): Ditto. + * testsuite/libffi.call/cls_24byte.c (main): Ditto. + * testsuite/libffi.call/nested_struct6.c (main): Ditto. + * testsuite/libffi.call/cls_uint.c (main): Ditto. + * testsuite/libffi.call/cls_12byte.c (main): Ditto. + * testsuite/libffi.call/cls_16byte.c (main): Ditto. + * testsuite/libffi.call/closure_fn0.c (main): Ditto. + * testsuite/libffi.call/cls_9byte2.c (main): Ditto. + * testsuite/libffi.call/closure_fn2.c (main): Ditto. + * testsuite/libffi.call/return_dbl1.c (main): Ditto. + * testsuite/libffi.call/closure_fn4.c (main): Ditto. + * testsuite/libffi.call/closure_fn6.c (main): Ditto. + * testsuite/libffi.call/cls_align_sint32.c (main): Ditto. + +2007-08-07 Andrew Haley + + * src/x86/sysv.S (ffi_closure_raw_SYSV): Fix typo in previous + checkin. + +2007-08-06 Andrew Haley + + PR testsuite/32843 + * src/x86/sysv.S (ffi_closure_raw_SYSV): Handle FFI_TYPE_UINT8, + FFI_TYPE_SINT8, FFI_TYPE_UINT16, FFI_TYPE_SINT16, FFI_TYPE_UINT32, + FFI_TYPE_SINT32. + +2007-08-02 David Daney + + * testsuite/libffi.call/return_ul.c (main): Define return type as + ffi_arg. Use proper printf conversion specifier. + +2007-07-30 Andrew Haley + + PR testsuite/32843 + * src/x86/ffi.c (ffi_prep_cif_machdep): in x86 case, add code for + signed/unsigned int8/16. + * src/x86/sysv.S (ffi_call_SYSV): Rewrite to: + Use a jump table. + Remove code to pop args from the stack after call. + Special-case signed/unsigned int8/16. + * testsuite/libffi.call/return_sc.c (main): Revert. + +2007-07-26 Richard Guenther + + PR testsuite/32843 + * testsuite/libffi.call/return_sc.c (main): Verify call + result as signed char, not ffi_arg. + +2007-07-16 Rainer Orth + + * configure.ac (i?86-*-solaris2.1[0-9]): Set TARGET to X86_64. + * configure: Regenerate. + +2007-07-11 David Daney + + * src/mips/ffi.c: Don't include sys/cachectl.h. + (ffi_prep_closure_loc): Use __builtin___clear_cache() instead of + cacheflush(). + +2007-05-18 Aurelien Jarno + + * src/arm/ffi.c (ffi_prep_closure_loc): Renamed and ajusted + from (ffi_prep_closure): ... this. + (FFI_INIT_TRAMPOLINE): Adjust. + +2005-12-31 Phil Blundell + + * src/arm/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner, ffi_prep_closure): New, add closure support. + * src/arm/sysv.S(ffi_closure_SYSV): Likewise. + * src/arm/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-07-03 Andrew Haley + + * testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.special/unwindtest_ffi_call.cc, + testsuite/libffi.special/unwindtest.cc: Enable for ARM. + +2007-07-05 H.J. Lu + + * aclocal.m4: Regenerated. + +2007-06-02 Paolo Bonzini + + * configure: Regenerate. + +2007-05-23 Steve Ellcey + + * Makefile.in: Regenerate. + * configure: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_incoming_args_SYSV, + ffi_closure_SYSV_inner,ffi_prep_closure): New, add closure support. + * src/m68k/sysv.S(ffi_closure_SYSV,ffi_closure_struct_SYSV): Likewise. + * src/m68k/ffitarget.h (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_CLOSURES): Enable closure support. + +2007-05-10 Roman Zippel + + * configure.ac (HAVE_AS_CFI_PSEUDO_OP): New test. + * configure: Regenerate. + * fficonfig.h.in: Regenerate. + * src/m68k/sysv.S (CFI_STARTPROC,CFI_ENDPROC, + CFI_OFFSET,CFI_DEF_CFA): New macros. + (ffi_call_SYSV): Add callframe annotation. + +2007-05-10 Roman Zippel + + * src/m68k/ffi.c (ffi_prep_args,ffi_prep_cif_machdep): Fix + numerous test suite failures. + * src/m68k/sysv.S (ffi_call_SYSV): Likewise. + +2007-04-11 Paolo Bonzini + + * Makefile.am (EXTRA_DIST): Bring up to date. + * Makefile.in: Regenerate. + * src/frv/eabi.S: Remove RCS keyword. + +2007-04-06 Richard Henderson + + * configure.ac: Tidy target case. + (HAVE_LONG_DOUBLE): Allow the target to override. + * configure: Regenerate. + * include/ffi.h.in: Don't define ffi_type_foo if + LIBFFI_HIDE_BASIC_TYPES is defined. + (ffi_type_longdouble): If not HAVE_LONG_DOUBLE, define + to ffi_type_double. + * types.c (LIBFFI_HIDE_BASIC_TYPES): Define. + (FFI_TYPEDEF, ffi_type_void): Mark the data const. + (ffi_type_longdouble): Special case for Alpha. Don't define + if long double == double. + + * src/alpha/ffi.c (FFI_TYPE_LONGDOUBLE): Assert unique value. + (ffi_prep_cif_machdep): Handle it as the 128-bit type. + (ffi_call, ffi_closure_osf_inner): Likewise. + (ffi_closure_osf_inner): Likewise. Mark hidden. + (ffi_call_osf, ffi_closure_osf): Mark hidden. + * src/alpha/ffitarget.h (FFI_LAST_ABI): Tidy definition. + * src/alpha/osf.S (ffi_call_osf, ffi_closure_osf): Mark hidden. + (load_table): Handle 128-bit long double. + + * testsuite/libffi.call/float4.c: Add -mieee for alpha. + +2007-04-06 Tom Tromey + + PR libffi/31491: + * README: Fixed bug in example. + +2007-04-03 Jakub Jelinek + + * src/closures.c: Include sys/statfs.h. + (_GNU_SOURCE): Define on Linux. + (FFI_MMAP_EXEC_SELINUX): Define. + (selinux_enabled): New variable. + (selinux_enabled_check): New function. + (is_selinux_enabled): Define. + (dlmmap): Use it. + +2007-03-24 Uros Bizjak + + * testsuite/libffi.call/return_fl2.c (return_fl): Mark as static. + Use 'volatile float sum' to create sum of floats to avoid false + negative due to excess precision on ix86 targets. + (main): Ditto. + +2007-03-08 Alexandre Oliva + + * src/powerpc/ffi.c (flush_icache): Fix left-over from previous + patch. + (ffi_prep_closure_loc): Remove unneeded casts. Add needed ones. + +2007-03-07 Alexandre Oliva + + * include/ffi.h.in (ffi_closure_alloc, ffi_closure_free): New. + (ffi_prep_closure_loc): New. + (ffi_prep_raw_closure_loc): New. + (ffi_prep_java_raw_closure_loc): New. + * src/closures.c: New file. + * src/dlmalloc.c [FFI_MMAP_EXEC_WRIT] (struct malloc_segment): + Replace sflags with exec_offset. + [FFI_MMAP_EXEC_WRIT] (mmap_exec_offset, add_segment_exec_offset, + sub_segment_exec_offset): New macros. + (get_segment_flags, set_segment_flags, check_segment_merge): New + macros. + (is_mmapped_segment, is_extern_segment): Use get_segment_flags. + (add_segment, sys_alloc, create_mspace, create_mspace_with_base, + destroy_mspace): Use new macros. + (sys_alloc): Silence warning. + * Makefile.am (libffi_la_SOURCES): Add src/closures.c. + * Makefile.in: Rebuilt. + * src/prep_cif [FFI_CLOSURES] (ffi_prep_closure): Implement in + terms of ffi_prep_closure_loc. + * src/raw_api.c (ffi_prep_raw_closure_loc): Renamed and adjusted + from... + (ffi_prep_raw_closure): ... this. Re-implement in terms of the + renamed version. + * src/java_raw_api (ffi_prep_java_raw_closure_loc): Renamed and + adjusted from... + (ffi_prep_java_raw_closure): ... this. Re-implement in terms of + the renamed version. + * src/alpha/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + * src/pa/ffi.c: Likewise. + * src/cris/ffi.c: Likewise. Adjust. + * src/frv/ffi.c: Likewise. + * src/ia64/ffi.c: Likewise. + * src/mips/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/s390/ffi.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/sparc/ffi.c: Likewise. + * src/x86/ffi64.c: Likewise. + * src/x86/ffi.c: Likewise. + (FFI_INIT_TRAMPOLINE): Adjust. + (ffi_prep_raw_closure_loc): Renamed and adjusted from... + (ffi_prep_raw_closure): ... this. + * src/powerpc/ffi.c (ffi_prep_closure_loc): Renamed from + (ffi_prep_closure): ... this. + (flush_icache): Adjust. + +2007-03-07 Alexandre Oliva + + * src/dlmalloc.c: New file, imported version 2.8.3 of Doug + Lea's malloc. + +2007-03-01 Brooks Moses + + * Makefile.am: Add dummy install-pdf target. + * Makefile.in: Regenerate + +2007-02-13 Andreas Krebbel + + * src/s390/ffi.c (ffi_prep_args, ffi_prep_cif_machdep, + ffi_closure_helper_SYSV): Add long double handling. + +2007-02-02 Jakub Jelinek + + * src/powerpc/linux64.S (ffi_call_LINUX64): Move restore of r2 + immediately after bctrl instruction. + +2007-01-18 Alexandre Oliva + + * Makefile.am (all-recursive, install-recursive, + mostlyclean-recursive, clean-recursive, distclean-recursive, + maintainer-clean-recursive): Add missing targets. + * Makefile.in: Rebuilt. + +2006-12-14 Andreas Tobler + + * configure.ac: Add TARGET for x86_64-*-darwin*. + * Makefile.am (nodist_libffi_la_SOURCES): Add rules for 64-bit sources + for X86_DARWIN. + * src/x86/ffitarget.h: Set trampoline size for x86_64-*-darwin*. + * src/x86/darwin64.S: New file for x86_64-*-darwin* support. + * configure: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + * testsuite/libffi.special/unwindtest_ffi_call.cc: New test case for + ffi_call only. + +2006-12-13 Andreas Tobler + + * aclocal.m4: Regenerate with aclocal -I .. as written in the + Makefile.am. + +2006-10-31 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (darwin_adjust_aggregate_sizes): New. + (ffi_prep_cif_machdep): Call darwin_adjust_aggregate_sizes for + Darwin. + * testsuite/libffi.call/nested_struct4.c: Remove Darwin XFAIL. + * testsuite/libffi.call/nested_struct6.c: Remove Darwin XFAIL. + +2006-10-10 Paolo Bonzini + Sandro Tolaini + + * configure.ac [i*86-*-darwin*]: Set X86_DARWIN symbol and + conditional. + * configure: Regenerated. + * Makefile.am (nodist_libffi_la_SOURCES) [X86_DARWIN]: New case. + (EXTRA_DIST): Add src/x86/darwin.S. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + + * src/x86/ffi.c (ffi_prep_cif_machdep) [X86_DARWIN]: Treat like + X86_WIN32, and additionally align stack to 16 bytes. + * src/x86/darwin.S: New, based on sysv.S. + * src/prep_cif.c (ffi_prep_cif) [X86_DARWIN]: Align > 8-byte structs. + +2006-09-12 David Daney + + PR libffi/23935 + * include/Makefile.am: Install both ffi.h and ffitarget.h in + $(libdir)/gcc/$(target_alias)/$(gcc_version)/include. + * aclocal.m4: Regenerated for automake 1.9.6. + * Makefile.in: Regenerated. + * include/Makefile.in: Regenerated. + * testsuite/Makefile.in: Regenerated. + +2006-08-17 Andreas Tobler + + * include/ffi_common.h (struct): Revert accidental commit. + +2006-08-15 Andreas Tobler + + * include/ffi_common.h: Remove lint directives. + * include/ffi.h.in: Likewise. + +2006-07-25 Torsten Schoenfeld + + * include/ffi.h.in (ffi_type_ulong, ffi_type_slong): Define correctly + for 32-bit architectures. + * testsuite/libffi.call/return_ul.c: New test case. + +2006-07-19 David Daney + + * testsuite/libffi.call/closure_fn6.c: Remove xfail for mips, + xfail remains for mips64. + +2006-05-23 Carlos O'Donell + + * Makefile.am: Add install-html target. Add install-html to .PHONY + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2006-05-18 John David Anglin + + * pa/ffi.c (ffi_prep_args_pa32): Load floating point arguments from + stack slot. + +2006-04-22 Andreas Tobler + + * README: Remove notice about 'Crazy Comments'. + * src/debug.c: Remove lint directives. Cleanup white spaces. + * src/java_raw_api.c: Likewise. + * src/prep_cif.c: Likewise. + * src/raw_api.c: Likewise. + * src/ffitest.c: Delete. No longer needed, all test cases migrated + to the testsuite. + * src/arm/ffi.c: Remove lint directives. + * src/m32r/ffi.c: Likewise. + * src/pa/ffi.c: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + * src/sh/ffi.c: Likewise. + * src/sh64/ffi.c: Likewise. + * src/x86/ffi.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + +2006-04-13 Andreas Tobler + + * src/pa/hpux32.S: Correct unwind offset calculation for + ffi_closure_pa32. + * src/pa/linux.S: Likewise. + +2006-04-12 James E Wilson + + PR libgcj/26483 + * src/ia64/ffi.c (stf_spill, ldf_fill): Rewrite as macros. + (hfa_type_load): Call stf_spill. + (hfa_type_store): Call ldf_fill. + (ffi_call): Adjust calls to above routines. Add local temps for + macro result. + +2006-04-10 Matthias Klose + + * testsuite/lib/libffi-dg.exp (libffi-init): Recognize multilib + directory names containing underscores. + +2006-04-07 James E Wilson + + * testsuite/libffi.call/float4.c: New testcase. + +2006-04-05 John David Anglin + Andreas Tobler + + * Makefile.am: Add PA_HPUX port. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add PA_HPUX rules. + * configure: Regenerate. + * src/pa/ffitarget.h: Rename linux target to PA_LINUX. + Add PA_HPUX and PA64_HPUX. + Rename FFI_LINUX ABI to FFI_PA32 ABI. + (FFI_TRAMPOLINE_SIZE): Define for 32-bit HP-UX targets. + (FFI_TYPE_SMALL_STRUCT2): Define. + (FFI_TYPE_SMALL_STRUCT4): Likewise. + (FFI_TYPE_SMALL_STRUCT8): Likewise. + (FFI_TYPE_SMALL_STRUCT3): Redefine. + (FFI_TYPE_SMALL_STRUCT5): Likewise. + (FFI_TYPE_SMALL_STRUCT6): Likewise. + (FFI_TYPE_SMALL_STRUCT7): Likewise. + * src/pa/ffi.c (ROUND_DOWN): Delete. + (fldw, fstw, fldd, fstd): Use '__asm__'. + (ffi_struct_type): Add support for FFI_TYPE_SMALL_STRUCT2, + FFI_TYPE_SMALL_STRUCT4 and FFI_TYPE_SMALL_STRUCT8. + (ffi_prep_args_LINUX): Rename to ffi_prep_args_pa32. Update comment. + Simplify incrementing of stack slot variable. Change type of local + 'n' to unsigned int. + (ffi_size_stack_LINUX): Rename to ffi_size_stack_pa32. Handle long + double on PA_HPUX. + (ffi_prep_cif_machdep): Likewise. + (ffi_call): Likewise. + (ffi_closure_inner_LINUX): Rename to ffi_closure_inner_pa32. Change + return type to ffi_status. Simplify incrementing of stack slot + variable. Only copy floating point argument registers when PA_LINUX + is true. Reformat debug statement. + Add support for FFI_TYPE_SMALL_STRUCT2, FFI_TYPE_SMALL_STRUCT4 and + FFI_TYPE_SMALL_STRUCT8. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Add 'extern' to + declaration. + (ffi_prep_closure): Make linux trampoline conditional on PA_LINUX. + Add nops to cache flush. Add trampoline for PA_HPUX. + * src/pa/hpux32.S: New file. + * src/pa/linux.S (ffi_call_LINUX): Rename to ffi_call_pa32. Rename + ffi_prep_args_LINUX to ffi_prep_args_pa32. + Localize labels. Add support for 2, 4 and 8-byte small structs. Handle + unaligned destinations in 3, 5, 6 and 7-byte small structs. Order + argument type checks so that common argument types appear first. + (ffi_closure_LINUX): Rename to ffi_closure_pa32. Rename + ffi_closure_inner_LINUX to ffi_closure_inner_pa32. + +2006-03-24 Alan Modra + + * src/powerpc/ffitarget.h (enum ffi_abi): Add FFI_LINUX. Default + for 32-bit using IBM extended double format. Fix FFI_LAST_ABI. + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Handle linux variant of + FFI_TYPE_LONGDOUBLE. + (ffi_prep_args64): Assert using IBM extended double. + (ffi_prep_cif_machdep): Don't munge FFI_TYPE_LONGDOUBLE type. + Handle FFI_LINUX FFI_TYPE_LONGDOUBLE return and args. + (ffi_call): Handle FFI_LINUX. + (ffi_closure_helper_SYSV): Non FFI_LINUX long double return needs + gpr3 return pointer as for struct return. Handle FFI_LINUX + FFI_TYPE_LONGDOUBLE return and args. Don't increment "nf" + unnecessarily. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Load both f1 and f2 + for FFI_TYPE_LONGDOUBLE. Move epilogue insns into case table. + Don't use r6 as pointer to results, instead use sp offset. Don't + make a special call to load lr with case table address, instead + use offset from previous call. + * src/powerpc/sysv.S (ffi_call_SYSV): Save long double return. + * src/powerpc/linux64.S (ffi_call_LINUX64): Simplify long double + return. + +2006-03-15 Kaz Kojima + + * src/sh64/ffi.c (ffi_prep_cif_machdep): Handle float arguments + passed with FP registers correctly. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S: Likewise. + +2006-03-01 Andreas Tobler + + * testsuite/libffi.special/unwindtest.cc (closure_test_fn): Mark cif, + args and userdata unused. + (closure_test_fn1): Mark cif and userdata unused. + (main): Remove unused res. + +2006-02-28 Andreas Tobler + + * testsuite/libffi.call/call.exp: Adjust FSF address. Add test runs for + -O2, -O3, -Os and the warning flags -W -Wall. + * testsuite/libffi.special/special.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Add an __UNUSED__ macro to mark + unused parameter unused for gcc or else do nothing. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/cls_12byte.c (cls_struct_12byte_gn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_16byte.c (cls_struct_16byte_gn): Likewise. + * testsuite/libffi.call/cls_18byte.c (cls_struct_18byte_gn): Likewise. + * testsuite/libffi.call/cls_19byte.c (cls_struct_19byte_gn): Likewise. + * testsuite/libffi.call/cls_1_1byte.c (cls_struct_1_1byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_20byte1.c (cls_struct_20byte_gn): Likewise. + * testsuite/libffi.call/cls_24byte.c (cls_struct_24byte_gn): Likewise. + * testsuite/libffi.call/cls_2byte.c (cls_struct_2byte_gn): Likewise. + * testsuite/libffi.call/cls_3_1byte.c (cls_struct_3_1byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte1.c (cls_struct_3byte_gn): Likewise. + * testsuite/libffi.call/cls_3byte2.c (cls_struct_3byte_gn1): Likewise. + * testsuite/libffi.call/cls_4_1byte.c (cls_struct_4_1byte_gn): Likewise. + * testsuite/libffi.call/cls_4byte.c (cls_struct_4byte_gn): Likewise. + * testsuite/libffi.call/cls_5_1_byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_5byte.c (cls_struct_5byte_gn): Likewise. + * testsuite/libffi.call/cls_64byte.c (cls_struct_64byte_gn): Likewise. + * testsuite/libffi.call/cls_6_1_byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_6byte.c (cls_struct_6byte_gn): Likewise. + * testsuite/libffi.call/cls_7_1_byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_7byte.c (cls_struct_7byte_gn): Likewise. + * testsuite/libffi.call/cls_8byte.c (cls_struct_8byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte1.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_9byte2.c (cls_struct_9byte_gn): Likewise. + * testsuite/libffi.call/cls_align_double.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_float.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_longdouble.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_pointer.c (cls_struct_align_fn): Cast + void* to avoid compiler warning. + (main): Likewise. + (cls_struct_align_gn): Mark cif and userdata unused. + * testsuite/libffi.call/cls_align_sint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_sint64.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint16.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_align_uint32.c (cls_struct_align_gn): + Likewise. + * testsuite/libffi.call/cls_double.c (cls_ret_double_fn): Likewise. + * testsuite/libffi.call/cls_float.c (cls_ret_float_fn): Likewise. + * testsuite/libffi.call/cls_multi_schar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_sshortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_uchar.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushort.c (test_func_gn): Mark cif and + data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_multi_ushortchar.c (test_func_gn): Mark cif + and data unused. + (main): Cast res_call to silence gcc. + * testsuite/libffi.call/cls_schar.c (cls_ret_schar_fn): Mark cif and + userdata unused. + (cls_ret_schar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sint.c (cls_ret_sint_fn): Mark cif and + userdata unused. + (cls_ret_sint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_sshort.c (cls_ret_sshort_fn): Mark cif and + userdata unused. + (cls_ret_sshort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uchar.c (cls_ret_uchar_fn): Mark cif and + userdata unused. + (cls_ret_uchar_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Mark cif and + userdata unused. + (cls_ret_uint_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/cls_ulonglong.c (cls_ret_ulonglong_fn): Mark cif + and userdata unused. + * testsuite/libffi.call/cls_ushort.c (cls_ret_ushort_fn): Mark cif and + userdata unused. + (cls_ret_ushort_fn): Cast printf parameter to silence gcc. + * testsuite/libffi.call/float.c (floating): Remove unused parameter e. + * testsuite/libffi.call/float1.c (main): Remove unused variable i. + Cleanup white spaces. + * testsuite/libffi.call/negint.c (checking): Remove unused variable i. + * testsuite/libffi.call/nested_struct.c (cls_struct_combined_gn): Mark + cif and userdata unused. + * testsuite/libffi.call/nested_struct1.c (cls_struct_combined_gn): + Likewise. + * testsuite/libffi.call/nested_struct10.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct2.c (B_fn): Adjust printf + formatters to silence gcc. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct3.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct4.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct5.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct6.c: Mention related PR. + (B_gn): Mark cif and userdata unused. + * testsuite/libffi.call/nested_struct7.c (B_gn): Mark cif and userdata + unused. + * testsuite/libffi.call/nested_struct8.c (B_gn): Likewise. + * testsuite/libffi.call/nested_struct9.c (B_gn): Likewise. + * testsuite/libffi.call/problem1.c (stub): Likewise. + * testsuite/libffi.call/pyobjc-tc.c (main): Cast the result to silence + gcc. + * testsuite/libffi.call/return_fl2.c (return_fl): Add the note mentioned + in the last commit for this test case in the test case itself. + * testsuite/libffi.call/closure_fn0.c (closure_test_fn0): Mark cif as + unused. + * testsuite/libffi.call/closure_fn1.c (closure_test_fn1): Likewise. + * testsuite/libffi.call/closure_fn2.c (closure_test_fn2): Likewise. + * testsuite/libffi.call/closure_fn3.c (closure_test_fn3): Likewise. + * testsuite/libffi.call/closure_fn4.c (closure_test_fn0): Likewise. + * testsuite/libffi.call/closure_fn5.c (closure_test_fn5): Likewise. + * testsuite/libffi.call/closure_fn6.c (closure_test_fn0): Likewise. + +2006-02-22 Kaz Kojima + + * src/sh/sysv.S: Fix register numbers in the FDE for + ffi_closure_SYSV. + +2006-02-20 Andreas Tobler + + * testsuite/libffi.call/return_fl2.c (return_fl): Remove static + declaration to avoid a false negative on ix86. See PR323. + +2006-02-18 Kaz Kojima + + * src/sh/ffi.c (ffi_closure_helper_SYSV): Remove unused variable + and cast integer to void * if needed. Update the pointer to + the FP register saved area correctly. + +2006-02-17 Andreas Tobler + + * testsuite/libffi.call/nested_struct6.c: XFAIL this test until PR25630 + is fixed. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-02-16 Andreas Tobler + + * testsuite/libffi.call/return_dbl.c: New test case. + * testsuite/libffi.call/return_dbl1.c: Likewise. + * testsuite/libffi.call/return_dbl2.c: Likewise. + * testsuite/libffi.call/return_fl.c: Likewise. + * testsuite/libffi.call/return_fl1.c: Likewise. + * testsuite/libffi.call/return_fl2.c: Likewise. + * testsuite/libffi.call/return_fl3.c: Likewise. + * testsuite/libffi.call/closure_fn6.c: Likewise. + + * testsuite/libffi.call/nested_struct2.c: Remove ffi_type_mylong + definition. + * testsuite/libffi.call/ffitest.h: Add ffi_type_mylong definition + here to be used by other test cases too. + + * testsuite/libffi.call/nested_struct10.c: New test case. + * testsuite/libffi.call/nested_struct9.c: Likewise. + * testsuite/libffi.call/nested_struct8.c: Likewise. + * testsuite/libffi.call/nested_struct7.c: Likewise. + * testsuite/libffi.call/nested_struct6.c: Likewise. + * testsuite/libffi.call/nested_struct5.c: Likewise. + * testsuite/libffi.call/nested_struct4.c: Likewise. + +2006-01-21 Andreas Tobler + + * configure.ac: Enable libffi for sparc64-*-freebsd*. + * configure: Rebuilt. + +2006-01-18 Jakub Jelinek + + * src/powerpc/sysv.S (smst_two_register): Don't call __ashldi3, + instead do the shifting inline. + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't compute %r5 + shift count unconditionally. Simplify load sequences for 1, 2, 3, 4 + and 8 byte structs, for the remaining struct sizes don't call + __lshrdi3, instead do the shifting inline. + +2005-12-07 Thiemo Seufer + + * src/mips/ffitarget.h: Remove obsolete sgidefs.h include. Add + missing parentheses. + * src/mips/o32.S (ffi_call_O32): Code formatting. Define + and use A3_OFF, FP_OFF, RA_OFF. Micro-optimizations. + (ffi_closure_O32): Likewise, but with newly defined A3_OFF2, + A2_OFF2, A1_OFF2, A0_OFF2, RA_OFF2, FP_OFF2, S0_OFF2, GP_OFF2, + V1_OFF2, V0_OFF2, FA_1_1_OFF2, FA_1_0_OFF2, FA_0_1_OFF2, + FA_0_0_OFF2. + * src/mips/ffi.c (ffi_prep_args): Code formatting. Fix + endianness bugs. + (ffi_prep_closure): Improve trampoline instruction scheduling. + (ffi_closure_mips_inner_O32): Fix endianness bugs. + +2005-12-03 Alan Modra + + * src/powerpc/ffi.c: Formatting. + (ffi_prep_args_SYSV): Avoid possible aliasing problems by using unions. + (ffi_prep_args64): Likewise. + +2005-09-30 Geoffrey Keating + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): For + darwin, use -shared-libgcc not -lgcc_s, and explain why. + +2005-09-26 Tom Tromey + + * testsuite/libffi.call/float1.c (value_type): New typedef. + (CANARY): New define. + (main): Check for result buffer overflow. + * src/powerpc/linux64.S: Handle linux64 long double returns. + * src/powerpc/ffi.c (FLAG_RETURNS_128BITS): New constant. + (ffi_prep_cif_machdep): Handle linux64 long double returns. + +2005-08-25 Alan Modra + + PR target/23404 + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Correct placement of stack + homed fp args. + (ffi_status ffi_prep_cif_machdep): Correct stack sizing for same. + +2005-08-11 Jakub Jelinek + + * configure.ac (HAVE_HIDDEN_VISIBILITY_ATTRIBUTE): New test. + (AH_BOTTOM): Add FFI_HIDDEN definition. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * src/powerpc/ffi.c (hidden): Remove. + (ffi_closure_LINUX64, ffi_prep_args64, ffi_call_LINUX64, + ffi_closure_helper_LINUX64): Use FFI_HIDDEN instead of hidden. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64, + .ffi_closure_LINUX64): Use FFI_HIDDEN instead of .hidden. + * src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): Remove, + add FFI_HIDDEN to its prototype. + (ffi_closure_SYSV_inner): New. + * src/x86/sysv.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + * src/x86/win32.S (ffi_closure_SYSV, ffi_closure_raw_SYSV): New. + +2005-08-10 Alfred M. Szmidt + + PR libffi/21819: + * configure: Rebuilt. + * configure.ac: Handle i*86-*-gnu*. + +2005-08-09 Jakub Jelinek + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Use + DW_CFA_offset_extended_sf rather than + DW_CFA_GNU_negative_offset_extended. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise. + +2005-07-22 SUGIOKA Toshinobu + + * src/sh/sysv.S (ffi_call_SYSV): Stop argument popping correctly + on sh3. + (ffi_closure_SYSV): Change the stack layout for sh3 struct argument. + * src/sh/ffi.c (ffi_prep_args): Fix sh3 argument copy, when it is + partially on register. + (ffi_closure_helper_SYSV): Likewise. + (ffi_prep_cif_machdep): Don't set too many cif->flags. + +2005-07-20 Kaz Kojima + + * src/sh/ffi.c (ffi_call): Handle small structures correctly. + Remove empty line. + * src/sh64/ffi.c (simple_type): Remove. + (return_type): Handle small structures correctly. + (ffi_prep_args): Likewise. + (ffi_call): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * src/sh64/sysv.S (ffi_call_SYSV): Handle 1, 2 and 4-byte return. + Emit position independent code if PIC and remove wrong datalabel + prefixes from EH data. + +2005-07-19 Andreas Tobler + + * Makefile.am (nodist_libffi_la_SOURCES): Add POWERPC_FREEBSD. + * Makefile.in: Regenerate. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * configure.ac: Add POWERPC_FREEBSD rules. + * configure: Regenerate. + * src/powerpc/ffitarget.h: Add POWERPC_FREEBSD rules. + (FFI_SYSV_TYPE_SMALL_STRUCT): Define. + * src/powerpc/ffi.c: Add flags to handle small structure returns + in ffi_call_SYSV. + (ffi_prep_cif_machdep): Handle small structures for SYSV 4 ABI. + Aka FFI_SYSV. + (ffi_closure_helper_SYSV): Likewise. + * src/powerpc/ppc_closure.S: Add return types for small structures. + * src/powerpc/sysv.S: Add bits to handle small structures for + final SYSV 4 ABI. + +2005-07-10 Andreas Tobler + + * testsuite/libffi.call/cls_5_1_byte.c: New test file. + * testsuite/libffi.call/cls_6_1_byte.c: Likewise. + * testsuite/libffi.call/cls_7_1_byte.c: Likewise. + +2005-07-05 Randolph Chung + + * src/pa/ffi.c (ffi_struct_type): Rename FFI_TYPE_SMALL_STRUCT1 + as FFI_TYPE_SMALL_STRUCT3. Break out handling for 5-7 byte + structures. Kill compilation warnings. + (ffi_closure_inner_LINUX): Print return values as hex in debug + message. Rename FFI_TYPE_SMALL_STRUCT1 as FFI_TYPE_SMALL_STRUCT3. + Properly handle 5-7 byte structure returns. + * src/pa/ffitarget.h (FFI_TYPE_SMALL_STRUCT1) + (FFI_TYPE_SMALL_STRUCT2): Remove. + (FFI_TYPE_SMALL_STRUCT3, FFI_TYPE_SMALL_STRUCT5) + (FFI_TYPE_SMALL_STRUCT6, FFI_TYPE_SMALL_STRUCT7): Define. + * src/pa/linux.S: Mark source file as using PA1.1 assembly. + (checksmst1, checksmst2): Remove. + (checksmst3): Optimize handling of 3-byte struct returns. + (checksmst567): Properly handle 5-7 byte struct returns. + +2005-06-15 Rainer Orth + + PR libgcj/21943 + * src/mips/n32.S: Enforce PIC code. + * src/mips/o32.S: Likewise. + +2005-06-15 Rainer Orth + + * configure.ac: Treat i*86-*-solaris2.10 and up as X86_64. + * configure: Regenerate. + +2005-06-01 Alan Modra + + * src/powerpc/ppc_closure.S (ffi_closure_SYSV): Don't use JUMPTARGET + to call ffi_closure_helper_SYSV. Append @local instead. + * src/powerpc/sysv.S (ffi_call_SYSV): Likewise for ffi_prep_args_SYSV. + +2005-05-17 Kelley Cook + + * configure.ac: Use AC_C_BIGENDIAN instead of AC_C_BIGENDIAN_CROSS. + Use AC_CHECK_SIZEOF instead of AC_COMPILE_CHECK_SIZEOF. + * Makefile.am (ACLOCAL_AMFLAGS): Remove -I ../config. + * aclocal.m4, configure, fficonfig.h.in, Makefile.in, + include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2005-05-09 Mike Stump + + * configure: Regenerate. + +2005-05-08 Richard Henderson + + PR libffi/21285 + * src/alpha/osf.S: Update unwind into to match code. + +2005-05-04 Andreas Degert + Richard Henderson + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Save sse-used flag in + bit 11 of flags. + (ffi_call): Mask return type field. Pass ssecount to ffi_call_unix64. + (ffi_prep_closure): Set carry bit if sse-used flag set. + * src/x86/unix64.S (ffi_call_unix64): Add ssecount argument. + Only load sse registers if ssecount non-zero. + (ffi_closure_unix64): Only save sse registers if carry set on entry. + +2005-04-29 Ralf Corsepius + + * configure.ac: Add i*86-*-rtems*, sparc*-*-rtems*, + powerpc-*rtems*, arm*-*-rtems*, sh-*-rtems*. + * configure: Regenerate. + +2005-04-20 Hans-Peter Nilsson + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): In regsub use, + have Tcl8.3-compatible intermediate variable. + +2005-04-18 Simon Posnjak + Hans-Peter Nilsson + + * Makefile.am: Add CRIS support. + * configure.ac: Likewise. + * Makefile.in, configure, testsuite/Makefile.in, + include/Makefile.in: Regenerate. + * src/cris: New directory. + * src/cris/ffi.c, src/cris/sysv.S, src/cris/ffitarget.h: New files. + * src/prep_cif.c (ffi_prep_cif): Wrap in #ifndef __CRIS__. + + * testsuite/lib/libffi-dg.exp (libffi-dg-test-1): Replace \n with + \r?\n in output tests. + +2005-04-12 Mike Stump + + * configure: Regenerate. + +2005-03-30 Hans Boehm + + * src/ia64/ffitarget.h (ffi_arg): Use long long instead of DI. + +2005-03-30 Steve Ellcey + + * src/ia64/ffitarget.h (ffi_arg) ADD DI attribute. + (ffi_sarg) Ditto. + * src/ia64/unix.S (ffi_closure_unix): Extend gp + to 64 bits in ILP32 mode. + Load 64 bits even for short data. + +2005-03-23 Mike Stump + + * src/powerpc/darwin.S: Update for -m64 multilib. + * src/powerpc/darwin_closure.S: Likewise. + +2005-03-21 Zack Weinberg + + * configure.ac: Do not invoke TL_AC_GCC_VERSION. + Do not set tool_include_dir. + * aclocal.m4, configure, Makefile.in, testsuite/Makefile.in: + Regenerate. + * include/Makefile.am: Set gcc_version and toollibffidir. + * include/Makefile.in: Regenerate. + +2005-02-22 Andrew Haley + + * src/powerpc/ffi.c (ffi_prep_cif_machdep): Bump alignment to + odd-numbered register pairs for 64-bit integer types. + +2005-02-23 Andreas Tobler + + PR libffi/20104 + * testsuite/libffi.call/return_ll1.c: New test case. + +2005-02-11 Janis Johnson + + * testsuite/libffi.call/cls_align_longdouble.c: Remove dg-options. + * testsuite/libffi.call/float.c: Ditto. + * testsuite/libffi.call/float2.c: Ditto. + * testsuite/libffi.call/float3.c: Ditto. + +2005-02-08 Andreas Tobler + + * src/frv/ffitarget.h: Remove PPC stuff which does not belong to frv. + +2005-01-12 Eric Botcazou + + * testsuite/libffi.special/special.exp (cxx_options): Add + -shared-libgcc. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_AGGREGATE_TYPEDEF): Remove. + (FFI_TYPEDEF): Rename from FFI_INTEGRAL_TYPEDEF. Replace size and + offset parameters with a type parameter; deduce size and structure + alignment. Update all users. + +2004-12-31 Richard Henderson + + * src/types.c (FFI_TYPE_POINTER): Define with sizeof. + (FFI_TYPE_LONGDOUBLE): Fix for ia64. + * src/ia64/ffitarget.h (struct ffi_ia64_trampoline_struct): Move + into ffi_prep_closure. + * src/ia64/ia64_flags.h, src/ia64/ffi.c, src/ia64/unix.S: Rewrite + from scratch. + +2004-12-27 Richard Henderson + + * src/x86/unix64.S: Fix typo in unwind info. + +2004-12-25 Richard Henderson + + * src/x86/ffi64.c (struct register_args): Rename from stackLayout. + (enum x86_64_reg_class): Add X86_64_COMPLEX_X87_CLASS. + (merge_classes): Check for it. + (SSE_CLASS_P): New. + (classify_argument): Pass byte_offset by value; perform all updates + inside struct case. + (examine_argument): Add classes argument; handle + X86_64_COMPLEX_X87_CLASS. + (ffi_prep_args): Merge into ... + (ffi_call): ... here. Share stack frame with ffi_call_unix64. + (ffi_prep_cif_machdep): Setup cif->flags for proper structure return. + (ffi_fill_return_value): Remove. + (ffi_prep_closure): Remove dead assert. + (ffi_closure_unix64_inner): Rename from ffi_closure_UNIX64_inner. + Rewrite to use struct register_args instead of va_list. Create + flags for handling structure returns. + * src/x86/unix64.S: Remove dead strings. + (ffi_call_unix64): Rename from ffi_call_UNIX64. Rewrite to share + stack frame with ffi_call. Handle structure returns properly. + (float2sse, floatfloat2sse, double2sse): Remove. + (sse2float, sse2double, sse2floatfloat): Remove. + (ffi_closure_unix64): Rename from ffi_closure_UNIX64. Rewrite + to handle structure returns properly. + +2004-12-08 David Edelsohn + + * Makefile.am (AM_MAKEFLAGS): Remove duplicate LIBCFLAGS and + PICFLAG. + * Makefile.in: Regenerated. + +2004-12-02 Richard Sandiford + + * configure.ac: Use TL_AC_GCC_VERSION to set gcc_version. + * configure, aclocal.m4, Makefile.in: Regenerate. + * include/Makefile.in, testsuite/Makefile.in: Regenerate. + +2004-11-29 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-25 Kelley Cook + + * configure: Regenerate for libtool reversion. + +2004-11-24 Kelley Cook + + * configure: Regenerate for libtool change. + +2004-11-23 John David Anglin + + * testsuite/lib/libffi-dg.exp: Use new procs in target-libpath.exp. + +2004-11-23 Richard Sandiford + + * src/mips/o32.S (ffi_call_O32, ffi_closure_O32): Use jalr instead + of jal. Use an absolute encoding for the frame information. + +2004-11-23 Kelley Cook + + * Makefile.am: Remove no-dependencies. Add ACLOCAL_AMFLAGS. + * acinclude.m4: Delete logic for sincludes. + * aclocal.m4, Makefile.in, configure: Regenerate. + * include/Makefile: Likewise. + * testsuite/Makefile: Likewise. + +2004-11-22 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_closure): Align doubles and 64-bit integers + on a 8-byte boundary. + * src/sparc/v8.S (ffi_closure_v8): Reserve frame space for arguments. + +2004-10-27 Richard Earnshaw + + * src/arm/ffi.c (ffi_prep_cif_machdep): Handle functions that return + long long values. Round stack allocation to a multiple of 8 bytes + for ATPCS compatibility. + * src/arm/sysv.S (ffi_call_SYSV): Rework to avoid use of APCS register + names. Handle returning long long types. Add Thumb and interworking + support. Improve soft-float code. + +2004-10-27 Richard Earnshaw + + * testsuite/lib/libffi-db.exp (load_gcc_lib): New function. + (libffi_exit): New function. + (libffi_init): Build the testglue wrapper if needed. + +2004-10-25 Eric Botcazou + + PR other/18138 + * testsuite/lib/libffi-dg.exp: Accept more than one multilib libgcc. + +2004-10-25 Kazuhiro Inaoka + + * src/m32r/libffitarget.h (FFI_CLOSURES): Set to 0. + +2004-10-20 Kaz Kojima + + * src/sh/sysv.S (ffi_call_SYSV): Don't align for double data. + * testsuite/libffi.call/float3.c: New test case. + +2004-10-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure): Set T bit in trampoline for + the function returning a structure pointed with R2. + * src/sh/sysv.S (ffi_closure_SYSV): Use R2 as the pointer to + the structure return value if T bit set. Emit position + independent code and EH data if PIC. + +2004-10-13 Kazuhiro Inaoka + + * Makefile.am: Add m32r support. + * configure.ac: Likewise. + * Makefile.in: Regenerate. + * confiugre: Regenerate. + * src/types.c: Add m32r port to FFI_INTERNAL_TYPEDEF + (uint64, sint64, double, longdouble) + * src/m32r: New directory. + * src/m32r/ffi.c: New file. + * src/m32r/sysv.S: Likewise. + * src/m32r/ffitarget.h: Likewise. + +2004-10-02 Kaz Kojima + + * testsuite/libffi.call/negint.c: New test case. + +2004-09-14 H.J. Lu + + PR libgcj/17465 + * testsuite/lib/libffi-dg.exp: Don't use global ld_library_path. + Set up LD_LIBRARY_PATH, SHLIB_PATH, LD_LIBRARYN32_PATH, + LD_LIBRARY64_PATH, LD_LIBRARY_PATH_32, LD_LIBRARY_PATH_64 and + DYLD_LIBRARY_PATH. + +2004-09-05 Andreas Tobler + + * testsuite/libffi.call/many_win32.c: Remove whitespaces. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Remove unused var. Cleanup + whitespaces. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + +2004-09-05 Andreas Tobler + + * src/powerpc/darwin.S: Fix comments and identation. + * src/powerpc/darwin_closure.S: Likewise. + +2004-09-02 Andreas Tobler + + * src/powerpc/ffi_darwin.c: Add flag for longdouble return values. + (ffi_prep_args): Handle longdouble arguments. + (ffi_prep_cif_machdep): Set flags for longdouble. Calculate space for + longdouble. + (ffi_closure_helper_DARWIN): Add closure handling for longdouble. + * src/powerpc/darwin.S (_ffi_call_DARWIN): Add handling of longdouble + values. + * src/powerpc/darwin_closure.S (_ffi_closure_ASM): Likewise. + * src/types.c: Defined longdouble size and alignment for darwin. + +2004-09-02 Andreas Tobler + + * src/powerpc/aix.S: Remove whitespaces. + * src/powerpc/aix_closure.S: Likewise. + * src/powerpc/asm.h: Likewise. + * src/powerpc/ffi.c: Likewise. + * src/powerpc/ffitarget.h: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + +2004-08-30 Anthony Green + + * Makefile.am: Add frv support. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + * configure.ac: Read configure.host. + * configure.in: Read configure.host. + * configure.host: New file. frv-elf needs libgloss. + * include/ffi.h.in: Force ffi_closure to have a nice big (8) + alignment. This is needed to frv and shouldn't harm the others. + * include/ffi_common.h (ALIGN_DOWN): New macro. + * src/frv/ffi.c, src/frv/ffitarget.h, src/frv/eabi.S: New files. + +2004-08-24 David Daney + + * testsuite/libffi.call/closure_fn0.c: Xfail mips64* instead of mips*. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint64.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_sshort.c: Likewise. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise and set return value + to zero. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + +2004-08-23 David Daney + + PR libgcj/13141 + * src/mips/ffitarget.h (FFI_O32_SOFT_FLOAT): New ABI. + * src/mips/ffi.c (ffi_prep_args): Fix alignment calculation. + (ffi_prep_cif_machdep): Handle FFI_O32_SOFT_FLOAT floating point + parameters and return types. + (ffi_call): Handle FFI_O32_SOFT_FLOAT ABI. + (ffi_prep_closure): Ditto. + (ffi_closure_mips_inner_O32): Handle FFI_O32_SOFT_FLOAT ABI, fix + alignment calculations. + * src/mips/o32.S (ffi_closure_O32): Don't use floating point + instructions if FFI_O32_SOFT_FLOAT, make stack frame ABI compliant. + +2004-08-14 Casey Marshall + + * src/mips/ffi.c (ffi_pref_cif_machdep): set `cif->flags' to + contain `FFI_TYPE_UINT64' as return type for any 64-bit + integer (O32 ABI only). + (ffi_prep_closure): new function. + (ffi_closure_mips_inner_O32): new function. + * src/mips/ffitarget.h: Define `FFI_CLOSURES' and + `FFI_TRAMPOLINE_SIZE' appropriately if the ABI is o32. + * src/mips/o32.S (ffi_call_O32): add labels for .eh_frame. Return + 64 bit integers correctly. + (ffi_closure_O32): new function. + Added DWARF-2 unwind info for both functions. + +2004-08-10 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args ): 8-align all stack arguments. + +2004-08-01 Robert Millan + + * configure.ac: Detect knetbsd-gnu and kfreebsd-gnu. + * configure: Regenerate. + +2004-07-30 Maciej W. Rozycki + + * acinclude.m4 (AC_FUNC_MMAP_BLACKLIST): Check for + and mmap() explicitly instead of relying on preset autoconf cache + variables. + * aclocal.m4: Regenerate. + * configure: Regenerate. + +2004-07-11 Ulrich Weigand + + * src/s390/ffi.c (ffi_prep_args): Fix C aliasing violation. + (ffi_check_float_struct): Remove unused prototype. + +2004-06-30 Geoffrey Keating + + * src/powerpc/ffi_darwin.c (flush_icache): ';' is a comment + character on Darwin, use '\n\t' instead. + +2004-06-26 Matthias Klose + + * libtool-version: Fix typo in revision/age. + +2004-06-17 Matthias Klose + + * libtool-version: New. + * Makefile.am (libffi_la_LDFLAGS): Use -version-info for soname. + * Makefile.in: Regenerate. + +2004-06-15 Paolo Bonzini + + * Makefile.am: Remove useless multilib rules. + * Makefile.in: Regenerate. + * aclocal.m4: Regenerate with automake 1.8.5. + * configure.ac: Remove useless multilib configury. + * configure: Regenerate. + +2004-06-15 Paolo Bonzini + + * .cvsignore: New file. + +2004-06-10 Jakub Jelinek + + * src/ia64/unix.S (ffi_call_unix): Insert group barrier break + fp_done. + (ffi_closure_UNIX): Fix f14/f15 adjustment if FLOAT_SZ is ever + changed from 8. + +2004-06-06 Sean McNeil + + * configure.ac: Add x86_64-*-freebsd* support. + * configure: Regenerate. + +2004-04-26 Joe Buck + + Bug 15093 + * configure.ac: Test for existence of mmap and sys/mman.h before + checking blacklist. Fix suggested by Jim Wilson. + * configure: Regenerate. + +2004-04-26 Matt Austern + + * src/powerpc/darwin.S: Go through a non-lazy pointer for initial + FDE location. + * src/powerpc/darwin_closure.S: Likewise. + +2004-04-24 Andreas Tobler + + * testsuite/libffi.call/cls_multi_schar.c (main): Fix initialization + error. Reported by Thomas Heller . + * testsuite/libffi.call/cls_multi_sshort.c (main): Likewise. + * testsuite/libffi.call/cls_multi_ushort.c (main): Likewise. + +2004-03-20 Matthias Klose + + * src/pa/linux.S: Fix typo. + +2004-03-19 Matthias Klose + + * Makefile.am: Update. + * Makefile.in: Regenerate. + * src/pa/ffi.h.in: Remove. + * src/pa/ffitarget.h: New file. + +2004-02-10 Randolph Chung + + * Makefile.am: Add PA support. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * configure.ac: Add PA target. + * configure: Regenerate. + * src/pa/ffi.c: New file. + * src/pa/ffi.h.in: Add PA support. + * src/pa/linux.S: New file. + * prep_cif.c: Add PA support. + +2004-03-16 Hosaka Yuji + + * src/types.c: Fix alignment size of X86_WIN32 case int64 and + double. + * src/x86/ffi.c (ffi_prep_args): Replace ecif->cif->rtype->type + with ecif->cif->flags. + (ffi_call, ffi_prep_incoming_args_SYSV): Replace cif->rtype->type + with cif->flags. + (ffi_prep_cif_machdep): Add X86_WIN32 struct case. + (ffi_closure_SYSV): Add 1 or 2-bytes struct case for X86_WIN32. + * src/x86/win32.S (retstruct1b, retstruct2b, sc_retstruct1b, + sc_retstruct2b): Add for 1 or 2-bytes struct case. + +2004-03-15 Kelley Cook + + * configure.in: Rename file to ... + * configure.ac: ... this. + * fficonfig.h.in: Regenerate. + * Makefile.in: Regenerate. + * include/Makefile.in: Regenerate. + * testsuite/Makefile.in: Regenerate. + +2004-03-12 Matt Austern + + * src/powerpc/darwin.S: Fix EH information so it corresponds to + changes in EH format resulting from addition of linkonce support. + * src/powerpc/darwin_closure.S: Likewise. + +2004-03-11 Andreas Tobler + Paolo Bonzini + + * Makefile.am (AUTOMAKE_OPTIONS): Set them. + Remove VPATH. Remove rules for object files. Remove multilib support. + (AM_CCASFLAGS): Add. + * configure.in (AC_CONFIG_HEADERS): Relace AM_CONFIG_HEADER. + (AC_PREREQ): Bump version to 2.59. + (AC_INIT): Fill with version info and bug address. + (ORIGINAL_LD_FOR_MULTILIBS): Remove. + (AM_ENABLE_MULTILIB): Use this instead of AC_ARG_ENABLE. + De-precious CC so that the right flags are passed down to multilibs. + (AC_MSG_ERROR): Replace obsolete macro AC_ERROR. + (AC_CONFIG_FILES): Replace obsolete macro AC_LINK_FILES. + (AC_OUTPUT): Reorganize the output with AC_CONFIG_COMMANDS. + * configure: Rebuilt. + * aclocal.m4: Likewise. + * Makefile.in, include/Makefile.in, testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2004-03-11 Andreas Schwab + + * src/ia64/ffi.c (ffi_prep_incoming_args_UNIX): Get floating point + arguments from fp registers only for the first 8 parameter slots. + Don't convert a float parameter when passed in memory. + +2004-03-09 Hans-Peter Nilsson + + * configure: Regenerate for config/accross.m4 correction. + +2004-02-25 Matt Kraai + + * src/powerpc/ffi.c (ffi_prep_args_SYSV): Change + ecif->cif->bytes to bytes. + (ffi_prep_cif_machdep): Add braces around nested if statement. + +2004-02-09 Alan Modra + + * src/types.c (pointer): POWERPC64 has 8 byte pointers. + + * src/powerpc/ffi.c (ffi_prep_args64): Correct long double handling. + (ffi_closure_helper_LINUX64): Fix typo. + * testsuite/libffi.call/cls_align_longdouble.c: Pass -mlong-double-128 + for powerpc64-*-*. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + +2004-02-08 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_cif_machdep ): Correct + long double function return and long double arg handling. + (ffi_closure_helper_LINUX64): Formatting. Delete unused "ng" var. + Use "end_pfr" instead of "nf". Correct long double handling. + Localise "temp". + * src/powerpc/linux64.S (ffi_call_LINUX64): Save f2 long double + return value. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Allocate + space for long double return value. Adjust stack frame and offsets. + Load f2 long double return. + +2004-02-07 Alan Modra + + * src/types.c: Use 16 byte long double for POWERPC64. + +2004-01-25 Eric Botcazou + + * src/sparc/ffi.c (ffi_prep_args_v9): Shift the parameter array + when the structure return address is passed in %o0. + (ffi_V9_return_struct): Rename into ffi_v9_layout_struct. + (ffi_v9_layout_struct): Align the field following a nested structure + on a word boundary. Use memmove instead of memcpy. + (ffi_call): Update call to ffi_V9_return_struct. + (ffi_prep_closure): Define 'ctx' only for V8. + (ffi_closure_sparc_inner): Clone into ffi_closure_sparc_inner_v8 + and ffi_closure_sparc_inner_v9. + (ffi_closure_sparc_inner_v8): Return long doubles by reference. + Always skip the structure return address. For structures and long + doubles, copy the argument directly. + (ffi_closure_sparc_inner_v9): Skip the structure return address only + if required. Shift the maximum floating-point slot accordingly. For + big structures, copy the argument directly; otherwise, left-justify the + argument and call ffi_v9_layout_struct to lay out the structure on + the stack. + * src/sparc/v8.S: Undef STACKFRAME before defining it. + (ffi_closure_v8): Pass the structure return address. Update call to + ffi_closure_sparc_inner_v8. Short-circuit FFI_TYPE_INT handling. + Skip the 'unimp' insn when returning long doubles and structures. + * src/sparc/v9.S: Undef STACKFRAME before defining it. + (ffi_closure_v9): Increase the frame size by 2 words. Short-circuit + FFI_TYPE_INT handling. Load structures both in integers and + floating-point registers on return. + * README: Update status of the SPARC port. + +2004-01-24 Andreas Tobler + + * testsuite/libffi.call/pyobjc-tc.c (main): Treat result value + as of type ffi_arg. + * testsuite/libffi.call/struct3.c (main): Fix CHECK. + +2004-01-22 Ulrich Weigand + + * testsuite/libffi.call/cls_uint.c (cls_ret_uint_fn): Treat result + value as of type ffi_arg, not unsigned int. + +2004-01-21 Michael Ritzert + + * ffi64.c (ffi_prep_args): Cast the RHS of an assignment instead + of the LHS. + +2004-01-12 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_32 for + Solaris. + +2004-01-08 Rainer Orth + + * testsuite/libffi.call/ffitest.h (allocate_mmap): Cast MAP_FAILED + to void *. + +2003-12-10 Richard Henderson + + * testsuite/libffi.call/cls_align_pointer.c: Cast pointers to + size_t instead of int. + +2003-12-04 Hosaka Yuji + + * testsuite/libffi.call/many_win32.c: Include . + * testsuite/libffi.call/many_win32.c (main): Replace variable + int i with unsigned long ul. + + * testsuite/libffi.call/cls_align_uint64.c: New test case. + * testsuite/libffi.call/cls_align_sint64.c: Likewise. + * testsuite/libffi.call/cls_align_uint32.c: Likewise. + * testsuite/libffi.call/cls_align_sint32.c: Likewise. + * testsuite/libffi.call/cls_align_uint16.c: Likewise. + * testsuite/libffi.call/cls_align_sint16.c: Likewise. + * testsuite/libffi.call/cls_align_float.c: Likewise. + * testsuite/libffi.call/cls_align_double.c: Likewise. + * testsuite/libffi.call/cls_align_longdouble.c: Likewise. + * testsuite/libffi.call/cls_align_pointer.c: Likewise. + +2003-12-02 Hosaka Yuji + + PR other/13221 + * src/x86/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): + Align arguments to 32 bits. + +2003-12-01 Andreas Tobler + + PR other/13221 + * testsuite/libffi.call/cls_multi_sshort.c: New test case. + * testsuite/libffi.call/cls_multi_sshortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_uchar.c: Likewise. + * testsuite/libffi.call/cls_multi_schar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushortchar.c: Likewise. + * testsuite/libffi.call/cls_multi_ushort.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Cosmetics. + +2003-11-26 Kaveh R. Ghazi + + * testsuite/libffi.call/ffitest.h: Include . + * testsuite/libffi.special/ffitestcxx.h: Likewise. + +2003-11-22 Andreas Tobler + + * Makefile.in: Rebuilt. + * configure: Likewise. + * testsuite/libffi.special/unwindtest.cc: Convert the mmap to + the right type. + +2003-11-21 Andreas Jaeger + Andreas Tobler + + * acinclude.m4: Add AC_FUNC_MMAP_BLACKLIST. + * configure.in: Call AC_FUNC_MMAP_BLACKLIST. + * Makefile.in: Rebuilt. + * aclocal.m4: Likewise. + * configure: Likewise. + * fficonfig.h.in: Likewise. + * testsuite/lib/libffi-dg.exp: Add include dir. + * testsuite/libffi.call/ffitest.h: Add MMAP definitions. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Use MMAP functionality + for ffi_closure if available. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + +2003-11-20 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Make the -lgcc_s conditional. + +2003-11-19 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Add DYLD_LIBRARY_PATH for darwin. + Add -lgcc_s to additional flags. + +2003-11-12 Andreas Tobler + + * configure.in, include/Makefile.am: PR libgcj/11147, install + the ffitarget.h header file in a gcc versioned and target + dependent place. + * configure: Regenerated. + * Makefile.in, include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + +2003-11-09 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Print result and check + with dg-output to make debugging easier. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_9byte2.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + + * testsuite/libffi.special/unwindtest.cc: Make ffi_closure + static. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_9byte2.c: New test case. + * testsuite/libffi.call/cls_9byte1.c: Likewise. + * testsuite/libffi.call/cls_64byte.c: Likewise. + * testsuite/libffi.call/cls_20byte1.c: Likewise. + * testsuite/libffi.call/cls_19byte.c: Likewise. + * testsuite/libffi.call/cls_18byte.c: Likewise. + * testsuite/libffi.call/closure_fn4.c: Likewise. + * testsuite/libffi.call/closure_fn5.c: Likewise. + * testsuite/libffi.call/cls_schar.c: Likewise. + * testsuite/libffi.call/cls_sint.c: Likewise. + * testsuite/libffi.call/cls_sshort.c: Likewise. + * testsuite/libffi.call/nested_struct2.c: Likewise. + * testsuite/libffi.call/nested_struct3.c: Likewise. + +2003-11-08 Andreas Tobler + + * testsuite/libffi.call/cls_double.c: Do a check on the result. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/return_sc.c: Cleanup whitespaces. + +2003-11-06 Andreas Tobler + + * src/prep_cif.c (ffi_prep_cif): Move the validity check after + the initialization. + +2003-10-23 Andreas Tobler + + * src/java_raw_api.c (ffi_java_ptrarray_to_raw): Replace + FFI_ASSERT(FALSE) with FFI_ASSERT(0). + +2003-10-22 David Daney + + * src/mips/ffitarget.h: Replace undefined UINT32 and friends with + __attribute__((__mode__(__SI__))) and friends. + +2003-10-22 Andreas Schwab + + * src/ia64/ffi.c: Replace FALSE/TRUE with false/true. + +2003-10-21 Andreas Tobler + + * configure.in: AC_LINK_FILES(ffitarget.h). + * configure: Regenerate. + * Makefile.in: Likewise. + * include/Makefile.in: Likewise. + * testsuite/Makefile.in: Likewise. + * fficonfig.h.in: Likewise. + +2003-10-21 Paolo Bonzini + Richard Henderson + + Avoid that ffi.h includes fficonfig.h. + + * Makefile.am (EXTRA_DIST): Include ffitarget.h files + (TARGET_SRC_MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (TARGET_SRC_MIPS_SGI): Removed. + (MIPS_GCC): Renamed to TARGET_SRC_MIPS_IRIX. + (MIPS_SGI): Removed. + (CLEANFILES): Removed. + (mostlyclean-am, clean-am, mostlyclean-sub, clean-sub): New + targets. + * acconfig.h: Removed. + * configure.in: Compute sizeofs only for double and long double. + Use them to define and subst HAVE_LONG_DOUBLE. Include comments + into AC_DEFINE instead of using acconfig.h. Create + include/ffitarget.h instead of include/fficonfig.h. Rename + MIPS_GCC to MIPS_IRIX, drop MIPS_SGI since we are in gcc's tree. + AC_DEFINE EH_FRAME_FLAGS. + * include/Makefile.am (DISTCLEANFILES): New automake macro. + (hack_DATA): Add ffitarget.h. + * include/ffi.h.in: Remove all system specific definitions. + Declare raw API even if it is not installed, why bother? + Use limits.h instead of SIZEOF_* to define ffi_type_*. Do + not define EH_FRAME_FLAGS, it is in fficonfig.h now. Include + ffitarget.h instead of fficonfig.h. Remove ALIGN macro. + (UINT_ARG, INT_ARG): Removed, use ffi_arg and ffi_sarg instead. + * include/ffi_common.h (bool): Do not define. + (ffi_assert): Accept failed assertion. + (ffi_type_test): Return void and accept file/line. + (FFI_ASSERT): Pass stringized failed assertion. + (FFI_ASSERT_AT): New macro. + (FFI_ASSERT_VALID_TYPE): New macro. + (UINT8, SINT8, UINT16, SINT16, UINT32, SINT32, + UINT64, SINT64): Define here with gcc's __attribute__ macro + instead of in ffi.h + (FLOAT32, ALIGN): Define here instead of in ffi.h + * include/ffi-mips.h: Removed. Its content moved to + src/mips/ffitarget.h after separating assembly and C sections. + * src/alpha/ffi.c, src/alpha/ffi.c, src/java_raw_api.c + src/prep_cif.c, src/raw_api.c, src/ia64/ffi.c, + src/mips/ffi.c, src/mips/n32.S, src/mips/o32.S, + src/mips/ffitarget.h, src/sparc/ffi.c, src/x86/ffi64.c: + SIZEOF_ARG -> FFI_SIZEOF_ARG. + * src/ia64/ffi.c: Include stdbool.h (provided by GCC 2.95+). + * src/debug.c (ffi_assert): Accept stringized failed assertion. + (ffi_type_test): Rewritten. + * src/prep-cif.c (initialize_aggregate, ffi_prep_cif): Call + FFI_ASSERT_VALID_TYPE. + * src/alpha/ffitarget.h, src/arm/ffitarget.h, + src/ia64/ffitarget.h, src/m68k/ffitarget.h, + src/mips/ffitarget.h, src/powerpc/ffitarget.h, + src/s390/ffitarget.h, src/sh/ffitarget.h, + src/sh64/ffitarget.h, src/sparc/ffitarget.h, + src/x86/ffitarget.h: New files. + * src/alpha/osf.S, src/arm/sysv.S, src/ia64/unix.S, + src/m68k/sysv.S, src/mips/n32.S, src/mips/o32.S, + src/powerpc/aix.S, src/powerpc/darwin.S, + src/powerpc/ffi_darwin.c, src/powerpc/linux64.S, + src/powerpc/linux64_closure.S, src/powerpc/ppc_closure.S, + src/powerpc/sysv.S, src/s390/sysv.S, src/sh/sysv.S, + src/sh64/sysv.S, src/sparc/v8.S, src/sparc/v9.S, + src/x86/sysv.S, src/x86/unix64.S, src/x86/win32.S: + include fficonfig.h + +2003-10-20 Rainer Orth + + * src/mips/ffi.c: Use _ABIN32, _ABIO32 instead of external + _MIPS_SIM_NABI32, _MIPS_SIM_ABI32. + +2003-10-19 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Declare bytes again. + Used when FFI_DEBUG = 1. + +2003-10-14 Alan Modra + + * src/types.c (double, longdouble): Default POWERPC64 to 8 byte size + and align. + +2003-10-06 Rainer Orth + + * include/ffi_mips.h: Define FFI_MIPS_N32 for N32/N64 ABIs, + FFI_MIPS_O32 for O32 ABI. + +2003-10-01 Andreas Tobler + + * testsuite/lib/libffi-dg.exp: Set LD_LIBRARY_PATH_64 for + SPARC64. Cleanup whitespaces. + +2003-09-19 Andreas Tobler + + * testsuite/libffi.call/closure_fn0.c: Xfail mips, arm, + strongarm, xscale. Cleanup whitespaces. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Cleanup whitespaces. + +2003-09-18 David Edelsohn + + * src/powerpc/aix.S: Cleanup whitespaces. + * src/powerpc/aix_closure.S: Likewise. + +2003-09-18 Andreas Tobler + + * src/powerpc/darwin.S: Cleanup whitespaces, comment formatting. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c: Likewise. + +2003-09-18 Andreas Tobler + David Edelsohn + + * src/types.c (double): Add AIX and Darwin to the right TYPEDEF. + * src/powerpc/aix_closure.S: Remove the pointer to the outgoing + parameter stack. + * src/powerpc/darwin_closure.S: Likewise. + * src/powerpc/ffi_darwin.c (ffi_prep_args): Handle structures + according to the Darwin/AIX ABI. + (ffi_prep_cif_machdep): Likewise. + (ffi_closure_helper_DARWIN): Likewise. + Remove the outgoing parameter stack logic. Simplify the evaluation + of the different CASE types. + (ffi_prep_clousure): Avoid the casts on lvalues. Change the branch + statement in the trampoline code. + +2003-09-18 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_args): Take account into the alignement + for the register size. + (ffi_closure_helper_SYSV): Handle the structure return value + address correctly. + (ffi_closure_helper_SYSV): Return the appropriate type when + the registers are used for the structure return value. + * src/sh/sysv.S (ffi_closure_SYSV): Fix the stack layout for + the 64-bit return value. Update copyright years. + +2003-09-17 Rainer Orth + + * testsuite/lib/libffi-dg.exp (libffi_target_compile): Search in + srcdir for ffi_mips.h. + +2003-09-12 Alan Modra + + * src/prep_cif.c (initialize_aggregate): Include tail padding in + structure size. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Correct + placement of float result. + * testsuite/libffi.special/unwindtest.cc (closure_test_fn1): Correct + cast of "resp" for big-endian 64 bit machines. + +2003-09-11 Alan Modra + + * src/types.c (double, longdouble): Merge identical SH and ARM + typedefs, and add POWERPC64. + * src/powerpc/ffi.c (ffi_prep_args64): Correct next_arg calc for + struct split over gpr and rest. + (ffi_prep_cif_machdep): Correct intarg_count for structures. + * src/powerpc/linux64.S (ffi_call_LINUX64): Fix gpr offsets. + +2003-09-09 Andreas Tobler + + * src/powerpc/ffi.c (ffi_closure_helper_SYSV) Handle struct + passing correctly. + +2003-09-09 Alan Modra + + * configure: Regenerate. + +2003-09-04 Andreas Tobler + + * Makefile.am: Remove build rules for ffitest. + * Makefile.in: Rebuilt. + +2003-09-04 Andreas Tobler + + * src/java_raw_api.c: Include to fix compiler warning + about implicit declaration of abort(). + +2003-09-04 Andreas Tobler + + * Makefile.am: Add dejagnu test framework. Fixes PR other/11411. + * Makefile.in: Rebuilt. + * configure.in: Add dejagnu test framework. + * configure: Rebuilt. + + * testsuite/Makefile.am: New file. + * testsuite/Makefile.in: Built + * testsuite/lib/libffi-dg.exp: New file. + * testsuite/config/default.exp: Likewise. + * testsuite/libffi.call/call.exp: Likewise. + * testsuite/libffi.call/ffitest.h: Likewise. + * testsuite/libffi.call/closure_fn0.c: Likewise. + * testsuite/libffi.call/closure_fn1.c: Likewise. + * testsuite/libffi.call/closure_fn2.c: Likewise. + * testsuite/libffi.call/closure_fn3.c: Likewise. + * testsuite/libffi.call/cls_1_1byte.c: Likewise. + * testsuite/libffi.call/cls_3_1byte.c: Likewise. + * testsuite/libffi.call/cls_4_1byte.c: Likewise. + * testsuite/libffi.call/cls_2byte.c: Likewise. + * testsuite/libffi.call/cls_3byte1.c: Likewise. + * testsuite/libffi.call/cls_3byte2.c: Likewise. + * testsuite/libffi.call/cls_4byte.c: Likewise. + * testsuite/libffi.call/cls_5byte.c: Likewise. + * testsuite/libffi.call/cls_6byte.c: Likewise. + * testsuite/libffi.call/cls_7byte.c: Likewise. + * testsuite/libffi.call/cls_8byte.c: Likewise. + * testsuite/libffi.call/cls_12byte.c: Likewise. + * testsuite/libffi.call/cls_16byte.c: Likewise. + * testsuite/libffi.call/cls_20byte.c: Likewise. + * testsuite/libffi.call/cls_24byte.c: Likewise. + * testsuite/libffi.call/cls_double.c: Likewise. + * testsuite/libffi.call/cls_float.c: Likewise. + * testsuite/libffi.call/cls_uchar.c: Likewise. + * testsuite/libffi.call/cls_uint.c: Likewise. + * testsuite/libffi.call/cls_ulonglong.c: Likewise. + * testsuite/libffi.call/cls_ushort.c: Likewise. + * testsuite/libffi.call/float.c: Likewise. + * testsuite/libffi.call/float1.c: Likewise. + * testsuite/libffi.call/float2.c: Likewise. + * testsuite/libffi.call/many.c: Likewise. + * testsuite/libffi.call/many_win32.c: Likewise. + * testsuite/libffi.call/nested_struct.c: Likewise. + * testsuite/libffi.call/nested_struct1.c: Likewise. + * testsuite/libffi.call/pyobjc-tc.c: Likewise. + * testsuite/libffi.call/problem1.c: Likewise. + * testsuite/libffi.call/promotion.c: Likewise. + * testsuite/libffi.call/return_ll.c: Likewise. + * testsuite/libffi.call/return_sc.c: Likewise. + * testsuite/libffi.call/return_uc.c: Likewise. + * testsuite/libffi.call/strlen.c: Likewise. + * testsuite/libffi.call/strlen_win32.c: Likewise. + * testsuite/libffi.call/struct1.c: Likewise. + * testsuite/libffi.call/struct2.c: Likewise. + * testsuite/libffi.call/struct3.c: Likewise. + * testsuite/libffi.call/struct4.c: Likewise. + * testsuite/libffi.call/struct5.c: Likewise. + * testsuite/libffi.call/struct6.c: Likewise. + * testsuite/libffi.call/struct7.c: Likewise. + * testsuite/libffi.call/struct8.c: Likewise. + * testsuite/libffi.call/struct9.c: Likewise. + * testsuite/libffi.special/special.exp: New file. + * testsuite/libffi.special/ffitestcxx.h: Likewise. + * testsuite/libffi.special/unwindtest.cc: Likewise. + + +2003-08-13 Kaz Kojima + + * src/sh/ffi.c (OFS_INT16): Set 0 for little endian case. Update + copyright years. + +2003-08-02 Alan Modra + + * src/powerpc/ffi.c (ffi_prep_args64): Modify for changed gcc + structure passing. + (ffi_closure_helper_LINUX64): Likewise. + * src/powerpc/linux64.S: Remove code writing to parm save area. + * src/powerpc/linux64_closure.S (ffi_closure_LINUX64): Use return + address in lr from ffi_closure_helper_LINUX64 call to calculate + table address. Optimize function tail. + +2003-07-28 Andreas Tobler + + * src/sparc/ffi.c: Handle all floating point registers. + * src/sparc/v9.S: Likewise. Fixes second part of PR target/11410. + +2003-07-11 Gerald Pfeifer + + * README: Note that libffi is not part of GCC. Update the project + URL and status. + +2003-06-19 Franz Sirl + + * src/powerpc/ppc_closure.S: Include ffi.h. + +2003-06-13 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .uleb128/.sleb128 directives. + Use C style comments. + +2003-06-13 Kaz Kojima + + * Makefile.am: Add SHmedia support. Fix a typo of SH support. + * Makefile.in: Regenerate. + * configure.in (sh64-*-linux*, sh5*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SHmedia support. + * src/sh64/ffi.c: New file. + * src/sh64/sysv.S: New file. + +2003-05-16 Jakub Jelinek + + * configure.in (HAVE_RO_EH_FRAME): Check whether .eh_frame section + should be read-only. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + * include/ffi.h.in (EH_FRAME_FLAGS): Define. + * src/alpha/osf.S: Use EH_FRAME_FLAGS. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. Include ffi.h. + * src/powerpc/sysv.S: Use EH_FRAME_FLAGS. Use pcrel encoding + if -fpic/-fPIC/-mrelocatable. + * src/powerpc/powerpc_closure.S: Likewise. + * src/sparc/v8.S: If HAVE_RO_EH_FRAME is defined, don't include + #write in .eh_frame flags. + * src/sparc/v9.S: Likewise. + * src/x86/unix64.S: Use EH_FRAME_FLAGS. + * src/x86/sysv.S: Likewise. Use pcrel encoding if -fpic/-fPIC. + * src/s390/sysv.S: Use EH_FRAME_FLAGS. Include ffi.h. + +2003-05-07 Jeff Sturm + + Fixes PR bootstrap/10656 + * configure.in (HAVE_AS_REGISTER_PSEUDO_OP): Test assembler + support for .register pseudo-op. + * src/sparc/v8.S: Use it. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2003-04-18 Jakub Jelinek + + * include/ffi.h.in (POWERPC64): Define if 64-bit. + (enum ffi_abi): Add FFI_LINUX64 on POWERPC. + Make it the default on POWERPC64. + (FFI_TRAMPOLINE_SIZE): Define to 24 on POWERPC64. + * configure.in: Change powerpc-*-linux* into powerpc*-*-linux*. + * configure: Rebuilt. + * src/powerpc/ffi.c (hidden): Define. + (ffi_prep_args_SYSV): Renamed from + ffi_prep_args. Cast pointers to unsigned long to shut up warnings. + (NUM_GPR_ARG_REGISTERS64, NUM_FPR_ARG_REGISTERS64, + ASM_NEEDS_REGISTERS64): New. + (ffi_prep_args64): New function. + (ffi_prep_cif_machdep): Handle FFI_LINUX64 ABI. + (ffi_call): Likewise. + (ffi_prep_closure): Likewise. + (flush_icache): Surround by #ifndef POWERPC64. + (ffi_dblfl): New union type. + (ffi_closure_helper_SYSV): Use it to avoid aliasing problems. + (ffi_closure_helper_LINUX64): New function. + * src/powerpc/ppc_closure.S: Surround whole file by #ifndef + __powerpc64__. + * src/powerpc/sysv.S: Likewise. + (ffi_call_SYSV): Rename ffi_prep_args to ffi_prep_args_SYSV. + * src/powerpc/linux64.S: New file. + * src/powerpc/linux64_closure.S: New file. + * Makefile.am (EXTRA_DIST): Add src/powerpc/linux64.S and + src/powerpc/linux64_closure.S. + (TARGET_SRC_POWERPC): Likewise. + + * src/ffitest.c (closure_test_fn, closure_test_fn1, closure_test_fn2, + closure_test_fn3): Fix result printing on big-endian 64-bit + machines. + (main): Print tst2_arg instead of uninitialized tst2_result. + + * src/ffitest.c (main): Hide what closure pointer really points to + from the compiler. + +2003-04-16 Richard Earnshaw + + * configure.in (arm-*-netbsdelf*): Add configuration. + (configure): Regenerated. + +2003-04-04 Loren J. Rittle + + * include/Makefile.in: Regenerate. + +2003-03-21 Zdenek Dvorak + + * libffi/include/ffi.h.in: Define X86 instead of X86_64 in 32 + bit mode. + * libffi/src/x86/ffi.c (ffi_closure_SYSV, ffi_closure_raw_SYSV): + Receive closure pointer through parameter, read args using + __builtin_dwarf_cfa. + (FFI_INIT_TRAMPOLINE): Send closure reference through eax. + +2003-03-12 Andreas Schwab + + * configure.in: Avoid trailing /. in toolexeclibdir. + * configure: Rebuilt. + +2003-03-03 Andreas Tobler + + * src/powerpc/darwin_closure.S: Recode to fit dynamic libraries. + +2003-02-06 Andreas Tobler + + * libffi/src/powerpc/darwin_closure.S: + Fix alignement bug, allocate 8 bytes for the result. + * libffi/src/powerpc/aix_closure.S: + Likewise. + * libffi/src/powerpc/ffi_darwin.c: + Update stackframe description for aix/darwin_closure.S. + +2003-02-06 Jakub Jelinek + + * src/s390/ffi.c (ffi_closure_helper_SYSV): Add hidden visibility + attribute. + +2003-01-31 Christian Cornelssen , + Andreas Schwab + + * configure.in: Adjust command to source config-ml.in to account + for changes to the libffi_basedir definition. + (libffi_basedir): Remove ${srcdir} from value and include trailing + slash if nonempty. + + * configure: Regenerate. + +2003-01-29 Franz Sirl + + * src/powerpc/ppc_closure.S: Recode to fit shared libs. + +2003-01-28 Andrew Haley + + * include/ffi.h.in: Enable FFI_CLOSURES for x86_64. + * src/x86/ffi64.c (ffi_prep_closure): New. + (ffi_closure_UNIX64_inner): New. + * src/x86/unix64.S (ffi_closure_UNIX64): New. + +2003-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +2003-01027 David Edelsohn + + * Makefile.am (TARGET_SRC_POWERPC_AIX): Fix typo. + * Makefile.in: Regenerate. + +2003-01-22 Andrew Haley + + * src/powerpc/darwin.S (_ffi_call_AIX): Add Augmentation size to + unwind info. + +2003-01-21 Andreas Tobler + + * src/powerpc/darwin.S: Add unwind info. + * src/powerpc/darwin_closure.S: Likewise. + +2003-01-14 Andrew Haley + + * src/x86/ffi64.c (ffi_prep_args): Check for void retval. + (ffi_prep_cif_machdep): Likewise. + * src/x86/unix64.S: Add unwind info. + +2003-01-14 Andreas Jaeger + + * src/ffitest.c (main): Only use ffi_closures if those are + supported. + +2003-01-13 Andreas Tobler + + * libffi/src/ffitest.c + add closure testcases + +2003-01-13 Kevin B. Hendricks + + * libffi/src/powerpc/ffi.c + fix alignment bug for float (4 byte aligned iso 8 byte) + +2003-01-09 Geoffrey Keating + + * src/powerpc/ffi_darwin.c: Remove RCS version string. + * src/powerpc/darwin.S: Remove RCS version string. + +2003-01-03 Jeff Sturm + + * include/ffi.h.in: Add closure defines for SPARC, SPARC64. + * src/ffitest.c (main): Use static storage for closure. + * src/sparc/ffi.c (ffi_prep_closure, ffi_closure_sparc_inner): New. + * src/sparc/v8.S (ffi_closure_v8): New. + * src/sparc/v9.S (ffi_closure_v9): New. + +2002-11-10 Ranjit Mathew + + * include/ffi.h.in: Added FFI_STDCALL ffi_type + enumeration for X86_WIN32. + * src/x86/win32.S: Added ffi_call_STDCALL function + definition. + * src/x86/ffi.c (ffi_call/ffi_raw_call): Added + switch cases for recognising FFI_STDCALL and + calling ffi_call_STDCALL if target is X86_WIN32. + * src/ffitest.c (my_stdcall_strlen/stdcall_many): + stdcall versions of the "my_strlen" and "many" + test functions (for X86_WIN32). + Added test cases to test stdcall invocation using + these functions. + +2002-12-02 Kaz Kojima + + * src/sh/sysv.S: Add DWARF2 unwind info. + +2002-11-27 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Make section read-only. + +2002-11-26 Jim Wilson + + * src/types.c (FFI_TYPE_POINTER): Has size 8 on IA64. + +2002-11-23 H.J. Lu + + * acinclude.m4: Add dummy AM_PROG_LIBTOOL. + Include ../config/accross.m4. + * aclocal.m4; Rebuild. + * configure: Likewise. + +2002-11-15 Ulrich Weigand + + * src/s390/sysv.S (.eh_frame section): Adapt to pcrel FDE encoding. + +2002-11-11 DJ Delorie + + * configure.in: Look for common files in the right place. + +2002-10-08 Ulrich Weigand + + * src/java_raw_api.c (ffi_java_raw_to_ptrarray): Interpret + raw data as _Jv_word values, not ffi_raw. + (ffi_java_ptrarray_to_raw): Likewise. + (ffi_java_rvalue_to_raw): New function. + (ffi_java_raw_call): Call it. + (ffi_java_raw_to_rvalue): New function. + (ffi_java_translate_args): Call it. + * src/ffitest.c (closure_test_fn): Interpret return value + as ffi_arg, not int. + * src/s390/ffi.c (ffi_prep_cif_machdep): Add missing + FFI_TYPE_POINTER case. + (ffi_closure_helper_SYSV): Likewise. Also, assume return + values extended to word size. + +2002-10-02 Andreas Jaeger + + * src/x86/ffi64.c (ffi_prep_cif_machdep): Remove debug output. + +2002-10-01 Bo Thorsen + + * include/ffi.h.in: Fix i386 win32 compilation. + +2002-09-30 Ulrich Weigand + + * configure.in: Add s390x-*-linux-* target. + * configure: Regenerate. + * include/ffi.h.in: Define S390X for s390x targets. + (FFI_CLOSURES): Define for s390/s390x. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * src/prep_cif.c (ffi_prep_cif): Do not compute stack space for s390. + * src/types.c (FFI_TYPE_POINTER): Use 8-byte pointers on s390x. + * src/s390/ffi.c: Major rework of existing code. Add support for + s390x targets. Add closure support. + * src/s390/sysv.S: Likewise. + +2002-09-29 Richard Earnshaw + + * src/arm/sysv.S: Fix typo. + +2002-09-28 Richard Earnshaw + + * src/arm/sysv.S: If we don't have machine/asm.h and the pre-processor + has defined __USER_LABEL_PREFIX__, then use it in CNAME. + (ffi_call_SYSV): Handle soft-float. + +2002-09-27 Bo Thorsen + + * include/ffi.h.in: Fix multilib x86-64 support. + +2002-09-22 Kaveh R. Ghazi + + * Makefile.am (all-multi): Fix multilib parallel build. + +2002-07-19 Kaz Kojima + + * configure.in (sh[34]*-*-linux*): Add brackets. + * configure: Regenerate. + +2002-07-18 Kaz Kojima + + * Makefile.am: Add SH support. + * Makefile.in: Regenerate. + * configure.in (sh-*-linux*, sh[34]*-*-linux*): Add target. + * configure: Regenerate. + * include/ffi.h.in: Add SH support. + * src/sh/ffi.c: New file. + * src/sh/sysv.S: New file. + * src/types.c: Add SH support. + +2002-07-16 Bo Thorsen + + * src/x86/ffi64.c: New file that adds x86-64 support. + * src/x86/unix64.S: New file that handles argument setup for + x86-64. + * src/x86/sysv.S: Don't use this on x86-64. + * src/x86/ffi.c: Don't use this on x86-64. + Remove unused vars. + * src/prep_cif.c (ffi_prep_cif): Don't do stack size calculation + for x86-64. + * src/ffitest.c (struct6): New test that tests a special case in + the x86-64 ABI. + (struct7): Likewise. + (struct8): Likewise. + (struct9): Likewise. + (closure_test_fn): Silence warning about this when it's not used. + (main): Add the new tests. + (main): Fix a couple of wrong casts and silence some compiler warnings. + * include/ffi.h.in: Add x86-64 ABI definition. + * fficonfig.h.in: Regenerate. + * Makefile.am: Add x86-64 support. + * configure.in: Likewise. + * Makefile.in: Regenerate. + * configure: Likewise. + +2002-06-24 Bo Thorsen + + * src/types.c: Merge settings for similar architectures. + Add x86-64 sizes and alignments. + +2002-06-23 Bo Thorsen + + * src/arm/ffi.c (ffi_prep_args): Remove unused vars. + * src/sparc/ffi.c (ffi_prep_args_v8): Likewise. + * src/mips/ffi.c (ffi_prep_args): Likewise. + * src/m68k/ffi.c (ffi_prep_args): Likewise. + +2002-07-18 H.J. Lu (hjl@gnu.org) + + * Makefile.am (TARGET_SRC_MIPS_LINUX): New. + (libffi_la_SOURCES): Support MIPS_LINUX. + (libffi_convenience_la_SOURCES): Likewise. + * Makefile.in: Regenerated. + + * configure.in (mips64*-*): Skip. + (mips*-*-linux*): New. + * configure: Regenerated. + + * src/mips/ffi.c: Include . + +2002-06-06 Ulrich Weigand + + * src/s390/sysv.S: Save/restore %r6. Add DWARF-2 unwind info. + +2002-05-27 Roger Sayle + + * src/x86/ffi.c (ffi_prep_args): Remove reference to avn. + +2002-05-27 Bo Thorsen + + * src/x86/ffi.c (ffi_prep_args): Remove unused variable and + fix formatting. + +2002-05-13 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_closure): Declare fd at + beginning of function (for older apple cc). + +2002-05-08 Alexandre Oliva + + * configure.in (ORIGINAL_LD_FOR_MULTILIBS): Preserve LD at + script entry, and set LD to it when configuring multilibs. + * configure: Rebuilt. + +2002-05-05 Jason Thorpe + + * configure.in (sparc64-*-netbsd*): Add target. + (sparc-*-netbsdelf*): Likewise. + * configure: Regenerate. + +2002-04-28 David S. Miller + + * configure.in, configure: Fix SPARC test in previous change. + +2002-04-29 Gerhard Tonn + + * Makefile.am: Add Linux for S/390 support. + * Makefile.in: Regenerate. + * configure.in: Add Linux for S/390 support. + * configure: Regenerate. + * include/ffi.h.in: Add Linux for S/390 support. + * src/s390/ffi.c: New file from libffi CVS tree. + * src/s390/sysv.S: New file from libffi CVS tree. + +2002-04-28 Jakub Jelinek + + * configure.in (HAVE_AS_SPARC_UA_PCREL): Check for working + %r_disp32(). + * src/sparc/v8.S: Use it. + * src/sparc/v9.S: Likewise. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + +2002-04-08 Hans Boehm + + * src/java_raw_api.c (ffi_java_raw_size): Handle FFI_TYPE_DOUBLE + correctly. + * src/ia64/unix.S: Add unwind information. Fix comments. + Save sp in a way that's compatible with unwind info. + (ffi_call_unix): Correctly restore sp in all cases. + * src/ia64/ffi.c: Add, fix comments. + +2002-04-08 Jakub Jelinek + + * src/sparc/v8.S: Make .eh_frame dependent on target word size. + +2002-04-06 Jason Thorpe + + * configure.in (alpha*-*-netbsd*): Add target. + * configure: Regenerate. + +2002-04-04 Jeff Sturm + + * src/sparc/v8.S: Add unwind info. + * src/sparc/v9.S: Likewise. + +2002-03-30 Krister Walfridsson + + * configure.in: Enable i*86-*-netbsdelf*. + * configure: Rebuilt. + +2002-03-29 David Billinghurst + + PR other/2620 + * src/mips/n32.s: Delete + * src/mips/o32.s: Delete + +2002-03-21 Loren J. Rittle + + * configure.in: Enable alpha*-*-freebsd*. + * configure: Rebuilt. + +2002-03-17 Bryce McKinlay + + * Makefile.am: libfficonvenience -> libffi_convenience. + * Makefile.in: Rebuilt. + + * Makefile.am: Define ffitest_OBJECTS. + * Makefile.in: Rebuilt. + +2002-03-07 Andreas Tobler + David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX closure files. + (TARGET_SRC_POWERPC_AIX): Add aix_closure.S. + (TARGET_SRC_POWERPC_DARWIN): Add darwin_closure.S. + * Makefile.in: Regenerate. + * include/ffi.h.in: Add AIX and Darwin closure definitions. + * src/powerpc/ffi_darwin.c (ffi_prep_closure): New function. + (flush_icache, flush_range): New functions. + (ffi_closure_helper_DARWIN): New function. + * src/powerpc/aix_closure.S: New file. + * src/powerpc/darwin_closure.S: New file. + +2002-02-24 Jeff Sturm + + * include/ffi.h.in: Add typedef for ffi_arg. + * src/ffitest.c (main): Declare rint with ffi_arg. + +2002-02-21 Andreas Tobler + + * src/powerpc/ffi_darwin.c (ffi_prep_args): Skip appropriate + number of GPRs for floating-point arguments. + +2002-01-31 Anthony Green + + * configure: Rebuilt. + * configure.in: Replace CHECK_SIZEOF and endian tests with + cross-compiler friendly macros. + * aclocal.m4 (AC_COMPILE_CHECK_SIZEOF, AC_C_BIGENDIAN_CROSS): New + macros. + +2002-01-18 David Edelsohn + + * src/powerpc/darwin.S (_ffi_call_AIX): New. + * src/powerpc/aix.S (ffi_call_DARWIN): New. + +2002-01-17 David Edelsohn + + * Makefile.am (EXTRA_DIST): Add Darwin and AIX files. + (TARGET_SRC_POWERPC_AIX): New. + (POWERPC_AIX): New stanza. + * Makefile.in: Regenerate. + * configure.in: Add AIX case. + * configure: Regenerate. + * include/ffi.h.in (ffi_abi): Add FFI_AIX. + * src/powerpc/ffi_darwin.c (ffi_status): Use "long" to scale frame + size. Fix "long double" support. + (ffi_call): Add FFI_AIX case. + * src/powerpc/aix.S: New. + +2001-10-09 John Hornkvist + + Implement Darwin PowerPC ABI. + * configure.in: Handle powerpc-*-darwin*. + * Makefile.am: Set source files for POWERPC_DARWIN. + * configure: Rebuilt. + * Makefile.in: Rebuilt. + * include/ffi.h.in: Define FFI_DARWIN and FFI_DEFAULT_ABI for + POWERPC_DARWIN. + * src/powerpc/darwin.S: New file. + * src/powerpc/ffi_darwin.c: New file. + +2001-10-07 Joseph S. Myers + + * src/x86/ffi.c: Fix spelling error of "separate" as "seperate". + +2001-07-16 Rainer Orth + + * src/x86/sysv.S: Avoid gas-only .balign directive. + Use C style comments. + +2001-07-16 Rainer Orth + + * src/alpha/ffi.c (ffi_prep_closure): Avoid gas-only mnemonic. + Fixes PR bootstrap/3563. + +2001-06-26 Rainer Orth + + * src/alpha/osf.S (ffi_closure_osf): Use .rdata for ECOFF. + +2001-06-25 Rainer Orth + + * configure.in: Recognize sparc*-sun-* host. + * configure: Regenerate. + +2001-06-06 Andrew Haley + + * src/alpha/osf.S (__FRAME_BEGIN__): Conditionalize for ELF. + +2001-06-03 Andrew Haley + + * src/alpha/osf.S: Add unwind info. + * src/powerpc/sysv.S: Add unwind info. + * src/powerpc/ppc_closure.S: Likewise. + +2000-05-31 Jeff Sturm + + * configure.in: Fix AC_ARG_ENABLE usage. + * configure: Rebuilt. + +2001-05-06 Bryce McKinlay + + * configure.in: Remove warning about beta code. + * configure: Rebuilt. + +2001-04-25 Hans Boehm + + * src/ia64/unix.S: Restore stack pointer when returning from + ffi_closure_UNIX. + * src/ia64/ffi.c: Fix typo in comment. + +2001-04-18 Jim Wilson + + * src/ia64/unix.S: Delete unnecessary increment and decrement of loc2 + to eliminate RAW DV. + +2001-04-12 Bryce McKinlay + + * Makefile.am: Make a libtool convenience library. + * Makefile.in: Rebuilt. + +2001-03-29 Bryce McKinlay + + * configure.in: Use different syntax for subdirectory creation. + * configure: Rebuilt. + +2001-03-27 Jon Beniston + + * configure.in: Added X86_WIN32 target (Win32, CygWin, MingW). + * configure: Rebuilt. + * Makefile.am: Added X86_WIN32 target support. + * Makefile.in: Rebuilt. + + * include/ffi.h.in: Added X86_WIN32 target support. + + * src/ffitest.c: Doesn't run structure tests for X86_WIN32 targets. + * src/types.c: Added X86_WIN32 target support. + + * src/x86/win32.S: New file. Based on sysv.S, but with EH + stuff removed and made to work with CygWin's gas. + +2001-03-26 Bryce McKinlay + + * configure.in: Make target subdirectory in build dir. + * Makefile.am: Override suffix based rules to specify correct output + subdirectory. + * Makefile.in: Rebuilt. + * configure: Rebuilt. + +2001-03-23 Kevin B Hendricks + + * src/powerpc/ppc_closure.S: New file. + * src/powerpc/ffi.c (ffi_prep_args): Fixed ABI compatibility bug + involving long long and register pairs. + (ffi_prep_closure): New function. + (flush_icache): Likewise. + (ffi_closure_helper_SYSV): Likewise. + * include/ffi.h.in (FFI_CLOSURES): Define on PPC. + (FFI_TRAMPOLINE_SIZE): Likewise. + (FFI_NATIVE_RAW_API): Likewise. + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Added src/powerpc/ppc_closure.S. + (TARGET_SRC_POWERPC): Likewise. + +2001-03-19 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (ffitest_LDFLAGS): New macro. + +2001-03-02 Nick Clifton + + * include/ffi.h.in: Remove RCS ident string. + * include/ffi_mips.h: Remove RCS ident string. + * src/debug.c: Remove RCS ident string. + * src/ffitest.c: Remove RCS ident string. + * src/prep_cif.c: Remove RCS ident string. + * src/types.c: Remove RCS ident string. + * src/alpha/ffi.c: Remove RCS ident string. + * src/alpha/osf.S: Remove RCS ident string. + * src/arm/ffi.c: Remove RCS ident string. + * src/arm/sysv.S: Remove RCS ident string. + * src/mips/ffi.c: Remove RCS ident string. + * src/mips/n32.S: Remove RCS ident string. + * src/mips/o32.S: Remove RCS ident string. + * src/sparc/ffi.c: Remove RCS ident string. + * src/sparc/v8.S: Remove RCS ident string. + * src/sparc/v9.S: Remove RCS ident string. + * src/x86/ffi.c: Remove RCS ident string. + * src/x86/sysv.S: Remove RCS ident string. + +2001-02-08 Joseph S. Myers + + * include/ffi.h.in: Change sourceware.cygnus.com references to + gcc.gnu.org. + +2000-12-09 Richard Henderson + + * src/alpha/ffi.c (ffi_call): Simplify struct return test. + (ffi_closure_osf_inner): Index rather than increment avalue + and arg_types. Give ffi_closure_osf the raw return value type. + * src/alpha/osf.S (ffi_closure_osf): Handle return value type + promotion. + +2000-12-07 Richard Henderson + + * src/raw_api.c (ffi_translate_args): Fix typo. + (ffi_prep_closure): Likewise. + + * include/ffi.h.in [ALPHA]: Define FFI_CLOSURES and + FFI_TRAMPOLINE_SIZE. + * src/alpha/ffi.c (ffi_prep_cif_machdep): Adjust minimal + cif->bytes for new ffi_call_osf implementation. + (ffi_prep_args): Absorb into ... + (ffi_call): ... here. Do all stack allocation here and + avoid a callback function. + (ffi_prep_closure, ffi_closure_osf_inner): New. + * src/alpha/osf.S (ffi_call_osf): Reimplement with no callback. + (ffi_closure_osf): New. + +2000-09-10 Alexandre Oliva + + * config.guess, config.sub, install-sh: Removed. + * ltconfig, ltmain.sh, missing, mkinstalldirs: Likewise. + * Makefile.in: Rebuilt. + + * acinclude.m4: Include libtool macros from the top level. + * aclocal.m4, configure: Rebuilt. + +2000-08-22 Alexandre Oliva + + * configure.in [i*86-*-freebsd*] (TARGET, TARGETDIR): Set. + * configure: Rebuilt. + +2000-05-11 Scott Bambrough + + * libffi/src/arm/sysv.S (ffi_call_SYSV): Doubles are not saved to + memory correctly. Use conditional instructions, not branches where + possible. + +2000-05-04 Tom Tromey + + * configure: Rebuilt. + * configure.in: Match `arm*-*-linux-*'. + From Chris Dornan . + +2000-04-28 Jakub Jelinek + + * Makefile.am (SUBDIRS): Define. + (AM_MAKEFLAGS): Likewise. + (Multilib support.): Add section. + * Makefile.in: Rebuilt. + * ltconfig (extra_compiler_flags, extra_compiler_flags_value): + New variables. Set for gcc using -print-multi-lib. Export them + to libtool. + (sparc64-*-linux-gnu*): Use libsuff 64 for search paths. + * ltmain.sh (B|b|V): Don't throw away gcc's -B, -b and -V options + for -shared links. + (extra_compiler_flags_value, extra_compiler_flags): Check these + for extra compiler options which need to be passed down in + compiler_flags. + +2000-04-16 Anthony Green + + * configure: Rebuilt. + * configure.in: Change i*86-pc-linux* to i*86-*-linux*. + +2000-04-14 Jakub Jelinek + + * include/ffi.h.in (SPARC64): Define for 64bit SPARC builds. + Set SPARC FFI_DEFAULT_ABI based on SPARC64 define. + * src/sparc/ffi.c (ffi_prep_args_v8): Renamed from ffi_prep_args. + Replace all void * sizeofs with sizeof(int). + Only compare type with FFI_TYPE_LONGDOUBLE if LONGDOUBLE is + different than DOUBLE. + Remove FFI_TYPE_SINT32 and FFI_TYPE_UINT32 cases (handled elsewhere). + (ffi_prep_args_v9): New function. + (ffi_prep_cif_machdep): Handle V9 ABI and long long on V8. + (ffi_V9_return_struct): New function. + (ffi_call): Handle FFI_V9 ABI from 64bit code and FFI_V8 ABI from + 32bit code (not yet cross-arch calls). + * src/sparc/v8.S: Add struct return delay nop. + Handle long long. + * src/sparc/v9.S: New file. + * src/prep_cif.c (ffi_prep_cif): Return structure pointer + is used on sparc64 only for structures larger than 32 bytes. + Pass by reference for structures is done for structure arguments + larger than 16 bytes. + * src/ffitest.c (main): Use 64bit rint on sparc64. + Run long long tests on sparc. + * src/types.c (FFI_TYPE_POINTER): Pointer is 64bit on alpha and + sparc64. + (FFI_TYPE_LONGDOUBLE): long double is 128 bit aligned to 128 bits + on sparc64. + * configure.in (sparc-*-linux*): New supported target. + (sparc64-*-linux*): Likewise. + * configure: Rebuilt. + * Makefile.am: Add v9.S to SPARC files. + * Makefile.in: Likewise. + (LINK): Surround $(CCLD) into double quotes, so that multilib + compiles work correctly. + +2000-04-04 Alexandre Petit-Bianco + + * configure: Rebuilt. + * configure.in: (i*86-*-solaris*): New libffi target. Patch + proposed by Bryce McKinlay. + +2000-03-20 Tom Tromey + + * Makefile.in: Hand edit for java_raw_api.lo. + +2000-03-08 Bryce McKinlay + + * config.guess, config.sub: Update from the gcc tree. + Fix for PR libgcj/168. + +2000-03-03 Tom Tromey + + * Makefile.in: Fixed ia64 by hand. + + * configure: Rebuilt. + * configure.in (--enable-multilib): New option. + (libffi_basedir): New subst. + (AC_OUTPUT): Added multilib code. + +2000-03-02 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (TARGET_SRC_IA64): Use `ia64', not `alpha', as + directory name. + +2000-02-25 Hans Boehm + + * src/ia64/ffi.c, src/ia64/ia64_flags.h, src/ia64/unix.S: New + files. + * src/raw_api.c (ffi_translate_args): Fixed typo in argument + list. + (ffi_prep_raw_closure): Use ffi_translate_args, not + ffi_closure_translate. + * src/java_raw_api.c: New file. + * src/ffitest.c (closure_test_fn): New function. + (main): Define `rint' as long long on IA64. Added new test when + FFI_CLOSURES is defined. + * include/ffi.h.in (ALIGN): Use size_t, not unsigned. + (ffi_abi): Recognize IA64. + (ffi_raw): Added `flt' field. + Added "Java raw API" code. + * configure.in: Recognize ia64. + * Makefile.am (TARGET_SRC_IA64): New macro. + (libffi_la_common_SOURCES): Added java_raw_api.c. + (libffi_la_SOURCES): Define in IA64 case. + +2000-01-04 Tom Tromey + + * Makefile.in: Rebuilt with newer automake. + +1999-12-31 Tom Tromey + + * Makefile.am (INCLUDES): Added -I$(top_srcdir)/src. + +1999-09-01 Tom Tromey + + * include/ffi.h.in: Removed PACKAGE and VERSION defines and + undefs. + * fficonfig.h.in: Rebuilt. + * configure: Rebuilt. + * configure.in: Pass 3rd argument to AM_INIT_AUTOMAKE. + Use AM_PROG_LIBTOOL (automake 1.4 compatibility). + * acconfig.h: Don't #undef PACKAGE or VERSION. + +1999-08-09 Anthony Green + + * include/ffi.h.in: Try to work around messy header problem + with PACKAGE and VERSION. + + * configure: Rebuilt. + * configure.in: Change version to 2.00-beta. + + * fficonfig.h.in: Rebuilt. + * acconfig.h (FFI_NO_STRUCTS, FFI_NO_RAW_API): Define. + + * src/x86/ffi.c (ffi_raw_call): Rename. + +1999-08-02 Kresten Krab Thorup + + * src/x86/ffi.c (ffi_closure_SYSV): New function. + (ffi_prep_incoming_args_SYSV): Ditto. + (ffi_prep_closure): Ditto. + (ffi_closure_raw_SYSV): Ditto. + (ffi_prep_raw_closure): More ditto. + (ffi_call_raw): Final ditto. + + * include/ffi.h.in: Add definitions for closure and raw API. + + * src/x86/ffi.c (ffi_prep_cif_machdep): Added case for + FFI_TYPE_UINT64. + + * Makefile.am (libffi_la_common_SOURCES): Added raw_api.c + + * src/raw_api.c: New file. + + * include/ffi.h.in (ffi_raw): New type. + (UINT_ARG, SINT_ARG): New defines. + (ffi_closure, ffi_raw_closure): New types. + (ffi_prep_closure, ffi_prep_raw_closure): New declarations. + + * configure.in: Add check for endianness and sizeof void*. + + * src/x86/sysv.S (ffi_call_SYSV): Call fixup routine via argument, + instead of directly. + + * configure: Rebuilt. + +Thu Jul 8 14:28:42 1999 Anthony Green + + * configure.in: Add x86 and powerpc BeOS configurations. + From Makoto Kato . + +1999-05-09 Anthony Green + + * configure.in: Add warning about this being beta code. + Remove src/Makefile.am from the picture. + * configure: Rebuilt. + + * Makefile.am: Move logic from src/Makefile.am. Add changes + to support libffi as a target library. + * Makefile.in: Rebuilt. + + * aclocal.m4, config.guess, config.sub, ltconfig, ltmain.sh: + Upgraded to new autoconf, automake, libtool. + + * README: Tweaks. + + * LICENSE: Update copyright date. + + * src/Makefile.am, src/Makefile.in: Removed. + +1998-11-29 Anthony Green + + * include/ChangeLog: Removed. + * src/ChangeLog: Removed. + * src/mips/ChangeLog: Removed. + * src/sparc/ChangeLog: Remboved. + * src/x86/ChangeLog: Removed. + + * ChangeLog.v1: Created. + +============================================================================= +From the old ChangeLog.libffi file.... + +2011-02-08 Andreas Tobler + + * testsuite/lib/libffi.exp: Tweak for stand-alone mode. + +2009-12-25 Samuli Suominen + + * configure.ac: Undefine _AC_ARG_VAR_PRECIOUS for autoconf 2.64. + * configure: Rebuilt. + * fficonfig.h.in: Rebuilt. + +2009-06-16 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + * testsuite/libffi.call/huge_struct.c: Ad x86 XFAILs. + * testsuite/libffi.call/float2.c: Fix dg-excess-errors. + * testsuite/libffi.call/ffitest.h, + testsuite/libffi.special/ffitestcxx.h (PRIdLL, PRIuLL): Define. + +2009-06-12 Andrew Haley + + * testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_medium2.c: Fix printf format + specifiers. + testsuite/libffi.special/unwindtest.cc: include stdint.h. + +2009-06-11 Timothy Wall + + * Makefile.am, + configure.ac, + include/ffi.h.in, + include/ffi_common.h, + src/closures.c, + src/dlmalloc.c, + src/x86/ffi.c, + src/x86/ffitarget.h, + src/x86/win64.S (new), + README: Added win64 support (mingw or MSVC) + * Makefile.in, + include/Makefile.in, + man/Makefile.in, + testsuite/Makefile.in, + configure, + aclocal.m4: Regenerated + * ltcf-c.sh: properly escape cygwin/w32 path + * man/ffi_call.3: Clarify size requirements for return value. + * src/x86/ffi64.c: Fix filename in comment. + * src/x86/win32.S: Remove unused extern. + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_12byte.c, + testsuite/libffi.call/cls_16byte.c, + testsuite/libffi.call/cls_18byte.c, + testsuite/libffi.call/cls_19byte.c, + testsuite/libffi.call/cls_1_1byte.c, + testsuite/libffi.call/cls_20byte.c, + testsuite/libffi.call/cls_20byte1.c, + testsuite/libffi.call/cls_24byte.c, + testsuite/libffi.call/cls_2byte.c, + testsuite/libffi.call/cls_3_1byte.c, + testsuite/libffi.call/cls_3byte1.c, + testsuite/libffi.call/cls_3byte2.c, + testsuite/libffi.call/cls_4_1byte.c, + testsuite/libffi.call/cls_4byte.c, + testsuite/libffi.call/cls_5_1_byte.c, + testsuite/libffi.call/cls_5byte.c, + testsuite/libffi.call/cls_64byte.c, + testsuite/libffi.call/cls_6_1_byte.c, + testsuite/libffi.call/cls_6byte.c, + testsuite/libffi.call/cls_7_1_byte.c, + testsuite/libffi.call/cls_7byte.c, + testsuite/libffi.call/cls_8byte.c, + testsuite/libffi.call/cls_9byte1.c, + testsuite/libffi.call/cls_9byte2.c, + testsuite/libffi.call/cls_align_double.c, + testsuite/libffi.call/cls_align_float.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_align_sint16.c, + testsuite/libffi.call/cls_align_sint32.c, + testsuite/libffi.call/cls_align_sint64.c, + testsuite/libffi.call/cls_align_uint16.c, + testsuite/libffi.call/cls_align_uint32.c, + testsuite/libffi.call/cls_align_uint64.c, + testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_float.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_multi_schar.c, + testsuite/libffi.call/cls_multi_sshort.c, + testsuite/libffi.call/cls_multi_sshortchar.c, + testsuite/libffi.call/cls_multi_uchar.c, + testsuite/libffi.call/cls_multi_ushort.c, + testsuite/libffi.call/cls_multi_ushortchar.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/cls_schar.c, + testsuite/libffi.call/cls_sint.c, + testsuite/libffi.call/cls_sshort.c, + testsuite/libffi.call/cls_uchar.c, + testsuite/libffi.call/cls_uint.c, + testsuite/libffi.call/cls_ulonglong.c, + testsuite/libffi.call/cls_ushort.c, + testsuite/libffi.call/err_bad_abi.c, + testsuite/libffi.call/err_bad_typedef.c, + testsuite/libffi.call/float2.c, + testsuite/libffi.call/huge_struct.c, + testsuite/libffi.call/nested_struct.c, + testsuite/libffi.call/nested_struct1.c, + testsuite/libffi.call/nested_struct10.c, + testsuite/libffi.call/nested_struct2.c, + testsuite/libffi.call/nested_struct3.c, + testsuite/libffi.call/nested_struct4.c, + testsuite/libffi.call/nested_struct5.c, + testsuite/libffi.call/nested_struct6.c, + testsuite/libffi.call/nested_struct7.c, + testsuite/libffi.call/nested_struct8.c, + testsuite/libffi.call/nested_struct9.c, + testsuite/libffi.call/problem1.c, + testsuite/libffi.call/return_ldl.c, + testsuite/libffi.call/return_ll1.c, + testsuite/libffi.call/stret_large.c, + testsuite/libffi.call/stret_large2.c, + testsuite/libffi.call/stret_medium.c, + testsuite/libffi.call/stret_medium2.c, + testsuite/libffi.special/unwindtest.cc: use ffi_closure_alloc instead + of checking for MMAP. Use intptr_t instead of long casts. + +2009-06-04 Andrew Haley + + * src/powerpc/ffitarget.h: Fix misapplied merge from gcc. + +2009-06-04 Andrew Haley + + * src/mips/o32.S, + src/mips/n32.S: Fix licence formatting. + +2009-06-04 Andrew Haley + + * src/x86/darwin.S: Fix licence formatting. + src/x86/win32.S: Likewise. + src/sh64/sysv.S: Likewise. + src/sh/sysv.S: Likewise. + +2009-06-04 Andrew Haley + + * src/sh64/ffi.c: Remove lint directives. Was missing from merge + of Andreas Tobler's patch from 2006-04-22. + +2009-06-04 Andrew Haley + + * src/sh/ffi.c: Apply missing hunk from Alexandre Oliva's patch of + 2007-03-07. + +2008-12-26 Timothy Wall + + * testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_align_longdouble.c, + testsuite/libffi.call/cls_align_longdouble_split.c, + testsuite/libffi.call/cls_align_longdouble_split2.c: mark expected + failures on x86_64 cygwin/mingw. + +2008-12-22 Timothy Wall + + * testsuite/libffi.call/closure_fn0.c, + testsuite/libffi.call/closure_fn1.c, + testsuite/libffi.call/closure_fn2.c, + testsuite/libffi.call/closure_fn3.c, + testsuite/libffi.call/closure_fn4.c, + testsuite/libffi.call/closure_fn5.c, + testsuite/libffi.call/closure_fn6.c, + testsuite/libffi.call/closure_loc_fn0.c, + testsuite/libffi.call/closure_stdcall.c, + testsuite/libffi.call/cls_align_pointer.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c: use portable cast from + pointer to integer (intptr_t). + * testsuite/libffi.call/cls_longdouble.c: disable for win64. + +2008-12-19 Anthony Green + + * configure.ac: Bump version to 3.0.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-11-11 Anthony Green + + * configure.ac: Bump version to 3.0.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-08-25 Andreas Tobler + + * src/powerpc/ffitarget.h (ffi_abi): Add FFI_LINUX and + FFI_LINUX_SOFT_FLOAT to the POWERPC_FREEBSD enum. + Add note about flag bits used for FFI_SYSV_TYPE_SMALL_STRUCT. + Adjust copyright notice. + * src/powerpc/ffi.c: Add two new flags to indicate if we have one + register or two register to use for FFI_SYSV structs. + (ffi_prep_cif_machdep): Pass the right register flag introduced above. + (ffi_closure_helper_SYSV): Fix the return type for + FFI_SYSV_TYPE_SMALL_STRUCT. Comment. + Adjust copyright notice. + +2008-07-24 Anthony Green + + * testsuite/libffi.call/cls_dbls_struct.c, + testsuite/libffi.call/cls_double_va.c, + testsuite/libffi.call/cls_longdouble.c, + testsuite/libffi.call/cls_longdouble_va.c, + testsuite/libffi.call/cls_pointer.c, + testsuite/libffi.call/cls_pointer_stack.c, + testsuite/libffi.call/err_bad_abi.c: Clean up failures from + compiler warnings. + +2008-07-17 Anthony Green + + * configure.ac: Bump version to 3.0.6. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. Add documentation. + * README: Update for new release. + +2008-07-16 Kaz Kojima + + * src/sh/ffi.c (ffi_prep_closure_loc): Turn INSN into an unsigned + int. + +2008-07-16 Kaz Kojima + + * src/sh/sysv.S: Add .note.GNU-stack on Linux. + * src/sh64/sysv.S: Likewise. + +2008-04-03 Anthony Green + + * libffi.pc.in (Libs): Add -L${libdir}. + * configure.ac: Bump version to 3.0.5. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-04-03 Anthony Green + Xerces Ranby + + * include/ffi.h.in: Wrap definition of target architecture to + protect from double definitions. + +2008-03-22 Moriyoshi Koizumi + + * src/x86/ffi.c (ffi_prep_closure_loc): Fix for bug revealed in + closure_loc_fn0.c. + * testsuite/libffi.call/closure_loc_fn0.c (closure_loc_test_fn0): + New test. + +2008-03-04 Anthony Green + Blake Chaffin + hos@tamanegi.org + + * testsuite/libffi.call/cls_align_longdouble_split2.c + testsuite/libffi.call/cls_align_longdouble_split.c + testsuite/libffi.call/cls_dbls_struct.c + testsuite/libffi.call/cls_double_va.c + testsuite/libffi.call/cls_longdouble.c + testsuite/libffi.call/cls_longdouble_va.c + testsuite/libffi.call/cls_pointer.c + testsuite/libffi.call/cls_pointer_stack.c + testsuite/libffi.call/err_bad_abi.c + testsuite/libffi.call/err_bad_typedef.c + testsuite/libffi.call/huge_struct.c + testsuite/libffi.call/stret_large2.c + testsuite/libffi.call/stret_large.c + testsuite/libffi.call/stret_medium2.c + testsuite/libffi.call/stret_medium.c: New tests from Apple. + +2008-02-26 Jakub Jelinek + Anthony Green + + * src/alpha/osf.S: Add .note.GNU-stack on Linux. + * src/s390/sysv.S: Likewise. + * src/powerpc/linux64.S: Likewise. + * src/powerpc/linux64_closure.S: Likewise. + * src/powerpc/ppc_closure.S: Likewise. + * src/powerpc/sysv.S: Likewise. + * src/x86/unix64.S: Likewise. + * src/x86/sysv.S: Likewise. + * src/sparc/v8.S: Likewise. + * src/sparc/v9.S: Likewise. + * src/m68k/sysv.S: Likewise. + * src/ia64/unix.S: Likewise. + * src/arm/sysv.S: Likewise. + +2008-02-26 Anthony Green + Thomas Heller + + * src/x86/ffi.c (ffi_closure_SYSV_inner): Change C++ comment to C + comment. + +2008-02-26 Anthony Green + Thomas Heller + + * include/ffi.h.in: Change void (*)() to void (*)(void). + +2008-02-26 Anthony Green + Thomas Heller + + * src/alpha/ffi.c: Change void (*)() to void (*)(void). + src/alpha/osf.S, src/arm/ffi.c, src/frv/ffi.c, src/ia64/ffi.c, + src/ia64/unix.S, src/java_raw_api.c, src/m32r/ffi.c, + src/mips/ffi.c, src/pa/ffi.c, src/pa/hpux32.S, src/pa/linux.S, + src/powerpc/ffi.c, src/powerpc/ffi_darwin.c, src/raw_api.c, + src/s390/ffi.c, src/sh/ffi.c, src/sh64/ffi.c, src/sparc/ffi.c, + src/x86/ffi.c, src/x86/unix64.S, src/x86/darwin64.S, + src/x86/ffi64.c: Ditto. + +2008-02-24 Anthony Green + + * configure.ac: Accept openbsd*, not just openbsd. + Bump version to 3.0.4. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-22 Anthony Green + + * README: Clean up list of tested platforms. + +2008-02-22 Anthony Green + + * configure.ac: Bump version to 3.0.3. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. Clean up test docs. + +2008-02-22 Bjoern Koenig + Andreas Tobler + + * configure.ac: Add amd64-*-freebsd* target. + * configure: Regenerate. + +2008-02-22 Thomas Heller + + * configure.ac: Add x86 OpenBSD support. + * configure: Rebuilt. + +2008-02-21 Thomas Heller + + * README: Change "make test" to "make check". + +2008-02-21 Anthony Green + + * configure.ac: Bump version to 3.0.2. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-21 Björn König + + * src/x86/freebsd.S: New file. + * configure.ac: Add x86 FreeBSD support. + * Makefile.am: Ditto. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.1. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * libtool-version: Increment revision. + * README: Update for new release. + +2008-02-15 David Daney + + * src/mips/ffi.c: Remove extra '>' from include directive. + (ffi_prep_closure_loc): Use clear_location instead of tramp. + +2008-02-15 Anthony Green + + * configure.ac: Bump version to 3.0.0. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 David Daney + + * src/mips/ffi.c (USE__BUILTIN___CLEAR_CACHE): + Define (conditionally), and use it to include cachectl.h. + (ffi_prep_closure_loc): Fix cache flushing. + * src/mips/ffitarget.h (_ABIN32, _ABI64, _ABIO32): Define. + +2008-02-15 Anthony Green + + * man/ffi_call.3, man/ffi_prep_cif.3, man/ffi.3: + Update dates and remove all references to ffi_prep_closure. + * configure.ac: Bump version to 2.99.9. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-15 Anthony Green + + * man/ffi_prep_closure.3: Delete. + * man/Makefile.am (EXTRA_DIST): Remove ffi_prep_closure.3. + (man_MANS): Ditto. + * man/Makefile.in: Rebuilt. + * configure.ac: Bump version to 2.99.8. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.7. + * configure, doc/stamp-vti, doc/version.texi: Rebuilt. + * include/ffi.h.in LICENSE src/debug.c src/closures.c + src/ffitest.c src/s390/sysv.S src/s390/ffitarget.h + src/types.c src/m68k/ffitarget.h src/raw_api.c src/frv/ffi.c + src/frv/ffitarget.h src/sh/ffi.c src/sh/sysv.S + src/sh/ffitarget.h src/powerpc/ffitarget.h src/pa/ffi.c + src/pa/ffitarget.h src/pa/linux.S src/java_raw_api.c + src/cris/ffitarget.h src/x86/ffi.c src/x86/sysv.S + src/x86/unix64.S src/x86/win32.S src/x86/ffitarget.h + src/x86/ffi64.c src/x86/darwin.S src/ia64/ffi.c + src/ia64/ffitarget.h src/ia64/ia64_flags.h src/ia64/unix.S + src/sparc/ffi.c src/sparc/v9.S src/sparc/ffitarget.h + src/sparc/v8.S src/alpha/ffi.c src/alpha/ffitarget.h + src/alpha/osf.S src/sh64/ffi.c src/sh64/sysv.S + src/sh64/ffitarget.h src/mips/ffi.c src/mips/ffitarget.h + src/mips/n32.S src/mips/o32.S src/arm/ffi.c src/arm/sysv.S + src/arm/ffitarget.h src/prep_cif.c: Update license text. + +2008-02-14 Anthony Green + + * README: Update tested platforms. + * configure.ac: Bump version to 2.99.6. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * configure.ac: Bump version to 2.99.5. + * configure: Rebuilt. + * Makefile.am (EXTRA_DIST): Add darwin64.S + * Makefile.in: Rebuilt. + * testsuite/lib/libffi-dg.exp: Remove libstdc++ bits from GCC tree. + * LICENSE: Update WARRANTY. + +2008-02-14 Anthony Green + + * libffi.pc.in (libdir): Fix libdir definition. + * configure.ac: Bump version to 2.99.4. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * README: Update. + * libffi.info: New file. + * doc/stamp-vti: New file. + * configure.ac: Bump version to 2.99.3. + * configure: Rebuilt. + +2008-02-14 Anthony Green + + * Makefile.am (SUBDIRS): Add man dir. + * Makefile.in: Rebuilt. + * configure.ac: Create Makefile. + * configure: Rebuilt. + * man/ffi_call.3 man/ffi_prep_cif.3 man/ffi_prep_closure.3 + man/Makefile.am man/Makefile.in: New files. + +2008-02-14 Tom Tromey + + * aclocal.m4, Makefile.in, configure, fficonfig.h.in: Rebuilt. + * mdate-sh, texinfo.tex: New files. + * Makefile.am (info_TEXINFOS): New variable. + * doc/libffi.texi: New file. + * doc/version.texi: Likewise. + +2008-02-14 Anthony Green + + * Makefile.am (AM_CFLAGS): Don't compile with -D$(TARGET). + (lib_LTLIBRARIES): Define. + (toolexeclib_LIBRARIES): Undefine. + * Makefile.in: Rebuilt. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + +2008-02-14 Anthony Green + + * libffi.pc.in: Use @PACKAGE_NAME@ and @PACKAGE_VERSION@. + * configure.ac: Reset version to 2.99.1. + * configure.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Add ChangeLog.libffi. + * Makefile.in: Rebuilt. + * LICENSE: Update copyright notice. + +2008-02-14 Anthony Green + + * include/Makefile.am (nodist_includes_HEADERS): Define. Don't + distribute ffitarget.h or ffi.h from the build include dir. + * Makefile.in: Rebuilt. + +2008-02-14 Anthony Green + + * include/Makefile.am (includesdir): Install headers under libdir. + (pkgconfigdir): Define. Install libffi.pc. + * include/Makefile.in: Rebuilt. + * libffi.pc.in: Create. + * libtool-version: Increment CURRENT + * configure.ac: Add libffi.pc.in + * configure: Rebuilt. + +2008-02-03 Anthony Green + + * include/Makefile.am (includesdir): Fix header install with + DESTDIR. + * include/Makefile.in: Rebuilt. + +2008-02-03 Timothy Wall + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL): Calculate jump return + offset based on code pointer, not data pointer. + +2008-02-01 Anthony Green + + * include/Makefile.am: Fix header installs. + * Makefile.am: Ditto. + * include/Makefile.in: Rebuilt. + * Makefile.in: Ditto. + +2008-02-01 Anthony Green + + * src/x86/ffi.c (FFI_INIT_TRAMPOLINE_STDCALL, + FFI_INIT_TRAMPOLINE): Revert my broken changes to twall's last + patch. + +2008-01-31 Anthony Green + + * Makefile.am (EXTRA_DIST): Add missing files. + * testsuite/Makefile.am: Ditto. + * Makefile.in, testsuite/Makefile.in: Rebuilt. + +2008-01-31 Timothy Wall + + * testsuite/libffi.call/closure_stdcall.c: Add test for stdcall + closures. + * src/x86/ffitarget.h: Increase size of trampoline for stdcall + closures. + * src/x86/win32.S: Add assembly for stdcall closure. + * src/x86/ffi.c: Initialize stdcall closure trampoline. + +2008-01-30 H.J. Lu + + PR libffi/34612 + * src/x86/sysv.S (ffi_closure_SYSV): Pop 4 byte from stack when + returning struct. + + * testsuite/libffi.call/call.exp: Add "-O2 -fomit-frame-pointer" + tests. + +2008-01-30 Anthony Green + + * Makefile.am, include/Makefile.am: Move headers to + libffi_la_SOURCES for new automake. + * Makefile.in, include/Makefile.in: Rebuilt. + + * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for + execution outside of gcc tree. + * testsuite/lib/target-libpath.exp: Ditto. + + * testsuite/lib/libffi-dg.exp: Many changes to allow for execution + outside of gcc tree. + + +============================================================================= +From the old ChangeLog.libgcj file.... + +2004-01-14 Kelley Cook + + * configure.in: Add in AC_PREREQ(2.13) + +2003-02-20 Alexandre Oliva + + * configure.in: Propagate ORIGINAL_LD_FOR_MULTILIBS to + config.status. + * configure: Rebuilt. + +2002-01-27 Alexandre Oliva + + * configure.in (toolexecdir, toolexeclibdir): Set and AC_SUBST. + Remove USE_LIBDIR conditional. + * Makefile.am (toolexecdir, toolexeclibdir): Don't override. + * Makefile.in, configure: Rebuilt. + +Mon Aug 9 18:33:38 1999 Rainer Orth + + * include/Makefile.in: Rebuilt. + * Makefile.in: Rebuilt + * Makefile.am (toolexeclibdir): Add $(MULTISUBDIR) even for native + builds. + Use USE_LIBDIR. + + * configure: Rebuilt. + * configure.in (USE_LIBDIR): Define for native builds. + Use lowercase in configure --help explanations. + +1999-08-08 Anthony Green + + * include/ffi.h.in (FFI_FN): Remove `...'. + +1999-08-08 Anthony Green + + * Makefile.in: Rebuilt. + * Makefile.am (AM_CFLAGS): Compile with -fexceptions. + + * src/x86/sysv.S: Add exception handling metadata. + + +============================================================================= + +The libffi version 1 ChangeLog archive. + +Version 1 of libffi had per-directory ChangeLogs. Current and future +versions have a single ChangeLog file in the root directory. The +version 1 ChangeLogs have all been concatenated into this file for +future reference only. + +--- libffi ---------------------------------------------------------------- + +Mon Oct 5 02:17:50 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +Mon Oct 5 01:03:03 1998 Anthony Green + + * configure.in: Boosted rev. + * configure, Makefile.in, aclocal.m4: Rebuilt. + * README: Boosted rev and updated release notes. + +1998-07-25 Andreas Schwab + + * m68k/ffi.c (ffi_prep_cif_machdep): Use bitmask for cif->flags. + Correctly handle small structures. + (ffi_prep_args): Also handle small structures. + (ffi_call): Pass size of return type to ffi_call_SYSV. + * m68k/sysv.S: Adjust for above changes. Correctly align small + structures in the return value. + + * types.c (uint64, sint64) [M68K]: Change alignment to 4. + +Fri Apr 17 17:26:58 1998 Anthony Green + + * configure.in: Boosted rev. + * configure,Makefile.in,aclocal.m4: Rebuilt. + * README: Boosted rev and added release notes. + +Sun Feb 22 00:50:41 1998 Geoff Keating + + * configure.in: Add PowerPC config bits. + +1998-02-14 Andreas Schwab + + * configure.in: Add m68k config bits. Change AC_CANONICAL_SYSTEM + to AC_CANONICAL_HOST, this is not a compiler. Use $host instead + of $target. Remove AC_CHECK_SIZEOF(char), we already know the + result. Fix argument of AC_ARG_ENABLE. + * configure, fficonfig.h.in: Rebuilt. + +Tue Feb 10 20:53:40 1998 Richard Henderson + + * configure.in: Add Alpha config bits. + +Tue May 13 13:39:20 1997 Anthony Green + + * README: Updated dates and reworded Irix comments. + + * configure.in: Removed AC_PROG_RANLIB. + + * Makefile.in, aclocal.m4, config.guess, config.sub, configure, + ltmain.sh, */Makefile.in: libtoolized again and rebuilt with + automake and autoconf. + +Sat May 10 18:44:50 1997 Tom Tromey + + * configure, aclocal.m4: Rebuilt. + * configure.in: Don't compute EXTRADIST; now handled in + src/Makefile.in. Removed macros implied by AM_INIT_AUTOMAKE. + Don't run AM_MAINTAINER_MODE. + +Thu May 8 14:34:05 1997 Anthony Green + + * missing, ltmain.sh, ltconfig.sh: Created. These are new files + required by automake and libtool. + + * README: Boosted rev to 1.14. Added notes. + + * acconfig.h: Moved PACKAGE and VERSION for new automake. + + * configure.in: Changes for libtool. + + * Makefile.am (check): make test now make check. Uses libtool now. + + * Makefile.in, configure.in, aclocal.h, fficonfig.h.in: Rebuilt. + +Thu May 1 16:27:07 1997 Anthony Green + + * missing: Added file required by new automake. + +Tue Nov 26 14:10:42 1996 Anthony Green + + * acconfig.h: Added USING_PURIFY flag. This is defined when + --enable-purify-safety was used at configure time. + + * configure.in (allsources): Added --enable-purify-safety switch. + (VERSION): Boosted rev to 1.13. + * configure: Rebuilt. + +Fri Nov 22 06:46:12 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.12. + Removed special CFLAGS hack for gcc. + * configure: Rebuilt. + + * README: Boosted rev to 1.12. Added notes. + + * Many files: Cygnus Support changed to Cygnus Solutions. + +Wed Oct 30 11:15:25 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.11. + * configure: Rebuilt. + + * README: Boosted rev to 1.11. Added notes about GNU make. + +Tue Oct 29 12:25:12 1996 Anthony Green + + * configure.in: Fixed -Wall trick. + (VERSION): Boosted rev. + * configure: Rebuilt + + * acconfig.h: Needed for --enable-debug configure switch. + + * README: Boosted rev to 1.09. Added more notes on building + libffi, and LCLint. + + * configure.in: Added --enable-debug switch. Boosted rev to + 1.09. + * configure: Rebuilt + +Tue Oct 15 13:11:28 1996 Anthony Green + + * configure.in (VERSION): Boosted rev to 1.08 + * configure: Rebuilt. + + * README: Added n32 bug fix notes. + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + +Mon Oct 14 10:54:46 1996 Anthony Green + + * README: Added web page reference. + + * configure.in, README: Boosted rev to 1.05 + * configure: Rebuilt. + + * README: Fixed n32 sample code. + +Fri Oct 11 17:09:28 1996 Anthony Green + + * README: Added sparc notes. + + * configure.in, README: Boosted rev to 1.04. + * configure: Rebuilt. + +Thu Oct 10 10:31:03 1996 Anthony Green + + * configure.in, README: Boosted rev to 1.03. + * configure: Rebuilt. + + * README: Added struct notes. + + * Makefile.am (EXTRA_DIST): Added LICENSE to distribution. + * Makefile.in: Rebuilt. + + * README: Removed Linux section. No special notes now + because aggregates arg/return types work. + +Wed Oct 9 16:16:42 1996 Anthony Green + + * README, configure.in (VERSION): Boosted rev to 1.02 + * configure: Rebuilt. + +Tue Oct 8 11:56:33 1996 Anthony Green + + * README (NOTE): Added n32 notes. + + * Makefile.am: Added test production. + * Makefile: Rebuilt + + * README: spell checked! + + * configure.in (VERSION): Boosted rev to 1.01 + * configure: Rebuilt. + +Mon Oct 7 15:50:22 1996 Anthony Green + + * configure.in: Added nasty bit to support SGI tools. + * configure: Rebuilt. + + * README: Added SGI notes. Added note about automake bug. + +Mon Oct 7 11:00:28 1996 Anthony Green + + * README: Rewrote intro, and fixed examples. + +Fri Oct 4 10:19:55 1996 Anthony Green + + * configure.in: -D$TARGET is no longer used as a compiler switch. + It is now inserted into ffi.h at configure time. + * configure: Rebuilt. + + * FFI_ABI and FFI_STATUS are now ffi_abi and ffi_status. + +Thu Oct 3 13:47:34 1996 Anthony Green + + * README, LICENSE: Created. Wrote some docs. + + * configure.in: Don't barf on i586-unknown-linuxaout. + Added EXTRADIST code for "make dist". + * configure: Rebuilt. + + * */Makefile.in: Rebuilt with patched automake. + +Tue Oct 1 17:12:25 1996 Anthony Green + + * Makefile.am, aclocal.m4, config.guess, config.sub, + configure.in, fficonfig.h.in, install-sh, mkinstalldirs, + stamp-h.in: Created + * Makefile.in, configure: Generated + +--- libffi/include -------------------------------------------------------- + +Tue Feb 24 13:09:36 1998 Anthony Green + + * ffi_mips.h: Updated FFI_TYPE_STRUCT_* values based on + ffi.h.in changes. This is a work-around for SGI's "simple" + assembler. + +Sun Feb 22 00:51:55 1998 Geoff Keating + + * ffi.h.in: PowerPC support. + +1998-02-14 Andreas Schwab + + * ffi.h.in: Add m68k support. + (FFI_TYPE_LONGDOUBLE): Make it a separate value. + +Tue Feb 10 20:55:16 1998 Richard Henderson + + * ffi.h.in (SIZEOF_ARG): Use a pointer type by default. + + * ffi.h.in: Alpha support. + +Fri Nov 22 06:48:45 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Cygnus Support -> Cygnus Solutions. + +Wed Nov 20 22:31:01 1996 Anthony Green + + * ffi.h.in: Added ffi_type_void definition. + +Tue Oct 29 12:22:40 1996 Anthony Green + + * Makefile.am (hack_DATA): Always install ffi_mips.h. + + * ffi.h.in: Removed FFI_DEBUG. It's now in the correct + place (acconfig.h). + Added #include for size_t definition. + +Tue Oct 15 17:23:35 1996 Anthony Green + + * ffi.h.in, ffi_common.h, ffi_mips.h: More clean up. + Commented out #define of FFI_DEBUG. + +Tue Oct 15 13:01:06 1996 Anthony Green + + * ffi_common.h: Added bool definition. + + * ffi.h.in, ffi_common.h: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Mon Oct 14 12:29:23 1996 Anthony Green + + * ffi.h.in: Interface changes based on feedback from Jim + Blandy. + +Fri Oct 11 16:49:35 1996 Anthony Green + + * ffi.h.in: Small change for sparc support. + +Thu Oct 10 14:53:37 1996 Anthony Green + + * ffi_mips.h: Added FFI_TYPE_STRUCT_* definitions for + special structure return types. + +Wed Oct 9 13:55:57 1996 Anthony Green + + * ffi.h.in: Added SIZEOF_ARG definition for X86 + +Tue Oct 8 11:40:36 1996 Anthony Green + + * ffi.h.in (FFI_FN): Added macro for eliminating compiler warnings. + Use it to case your function pointers to the proper type. + + * ffi_mips.h (SIZEOF_ARG): Added magic to fix type promotion bug. + + * Makefile.am (EXTRA_DIST): Added ffi_mips.h to EXTRA_DIST. + * Makefile: Rebuilt. + + * ffi_mips.h: Created. Moved all common mips definitions here. + +Mon Oct 7 10:58:12 1996 Anthony Green + + * ffi.h.in: The SGI assember is very picky about parens. Redefined + some macros to avoid problems. + + * ffi.h.in: Added FFI_DEFAULT_ABI definitions. Also added + externs for pointer, and 64bit integral ffi_types. + +Fri Oct 4 09:51:37 1996 Anthony Green + + * ffi.h.in: Added FFI_ABI member to ffi_cif and changed + function prototypes accordingly. + Added #define @TARGET@. Now programs including ffi.h don't + have to specify this themselves. + +Thu Oct 3 15:36:44 1996 Anthony Green + + * ffi.h.in: Changed ffi_prep_cif's values from void* to void** + + * Makefile.am (EXTRA_DIST): Added EXTRA_DIST for "make dist" + to work. + * Makefile.in: Regenerated. + +Wed Oct 2 10:16:59 1996 Anthony Green + + * Makefile.am: Created + * Makefile.in: Generated + + * ffi_common.h: Added rcsid comment + +Tue Oct 1 17:13:51 1996 Anthony Green + + * ffi.h.in, ffi_common.h: Created + +--- libffi/src ------------------------------------------------------------ + +Mon Oct 5 02:17:50 1998 Anthony Green + + * arm/ffi.c, arm/sysv.S: Created. + + * Makefile.am: Added arm files. + * Makefile.in: Rebuilt. + +Mon Oct 5 01:41:38 1998 Anthony Green + + * Makefile.am (libffi_la_LDFLAGS): Incremented revision. + +Sun Oct 4 16:27:17 1998 Anthony Green + + * alpha/osf.S (ffi_call_osf): Patch for DU assembler. + + * ffitest.c (main): long long and long double return values work + for x86. + +Fri Apr 17 11:50:58 1998 Anthony Green + + * Makefile.in: Rebuilt. + + * ffitest.c (main): Floating point tests not executed for systems + with broken lond double (SunOS 4 w/ GCC). + + * types.c: Fixed x86 alignment info for long long types. + +Thu Apr 16 07:15:28 1998 Anthony Green + + * ffitest.c: Added more notes about GCC bugs under Irix 6. + +Wed Apr 15 08:42:22 1998 Anthony Green + + * ffitest.c (struct5): New test function. + (main): New test with struct5. + +Thu Mar 5 10:48:11 1998 Anthony Green + + * prep_cif.c (initialize_aggregate): Fix assertion for + nested structures. + +Tue Feb 24 16:33:41 1998 Anthony Green + + * prep_cif.c (ffi_prep_cif): Added long double support for sparc. + +Sun Feb 22 00:52:18 1998 Geoff Keating + + * powerpc/asm.h: New file. + * powerpc/ffi.c: New file. + * powerpc/sysv.S: New file. + * Makefile.am: PowerPC port. + * ffitest.c (main): Allow all tests to run even in presence of gcc + bug on PowerPC. + +1998-02-17 Anthony Green + + * mips/ffi.c: Fixed comment typo. + + * x86/ffi.c (ffi_prep_cif_machdep), x86/sysv.S (retfloat): + Fixed x86 long double return handling. + + * types.c: Fixed x86 long double alignment info. + +1998-02-14 Andreas Schwab + + * types.c: Add m68k support. + + * ffitest.c (floating): Add long double parameter. + (return_ll, ldblit): New functions to test long long and long + double return value. + (main): Fix type error in assignment of ts[1-4]_type.elements. + Add tests for long long and long double arguments and return + values. + + * prep_cif.c (ffi_prep_cif) [M68K]: Don't allocate argument for + struct value pointer. + + * m68k/ffi.c, m68k/sysv.S: New files. + * Makefile.am: Add bits for m68k port. Add kludge to work around + automake deficiency. + (test): Don't require "." in $PATH. + * Makefile.in: Rebuilt. + +Wed Feb 11 07:36:50 1998 Anthony Green + + * Makefile.in: Rebuilt. + +Tue Feb 10 20:56:00 1998 Richard Henderson + + * alpha/ffi.c, alpha/osf.S: New files. + * Makefile.am: Alpha port. + +Tue Nov 18 14:12:07 1997 Anthony Green + + * mips/ffi.c (ffi_prep_cif_machdep): Initialize rstruct_flag + for n32. + +Tue Jun 3 17:18:20 1997 Anthony Green + + * ffitest.c (main): Added hack to get structure tests working + correctly. + +Sat May 10 19:06:42 1997 Tom Tromey + + * Makefile.in: Rebuilt. + * Makefile.am (EXTRA_DIST): Explicitly list all distributable + files in subdirs. + (VERSION, CC): Removed. + +Thu May 8 17:19:01 1997 Anthony Green + + * Makefile.am: Many changes for new automake and libtool. + * Makefile.in: Rebuilt. + +Fri Nov 22 06:57:56 1996 Anthony Green + + * ffitest.c (main): Fixed test case for non mips machines. + +Wed Nov 20 22:31:59 1996 Anthony Green + + * types.c: Added ffi_type_void declaration. + +Tue Oct 29 13:07:19 1996 Anthony Green + + * ffitest.c (main): Fixed character constants. + (main): Emit warning for structure test 3 failure on Sun. + + * Makefile.am (VPATH): Fixed VPATH def'n so automake won't + strip it out. + Moved distdir hack from libffi to automake. + (ffitest): Added missing -c for $(COMPILE) (change in automake). + * Makefile.in: Rebuilt. + +Tue Oct 15 13:08:20 1996 Anthony Green + + * Makefile.am: Added "make lint" production. + * Makefile.in: Rebuilt. + + * prep_cif.c (STACK_ARG_SIZE): Improved STACK_ARG_SIZE macro. + Clean up based on LCLint output. Added funny /*@...@*/ comments to + annotate source. + + * ffitest.c, debug.c: Cleaned up code. + +Mon Oct 14 12:26:56 1996 Anthony Green + + * ffitest.c: Changes based on interface changes. + + * prep_cif.c (ffi_prep_cif): Cleaned up interface based on + feedback from Jim Blandy. + +Fri Oct 11 15:53:18 1996 Anthony Green + + * ffitest.c: Reordered tests while porting to sparc. + Made changes to handle lame structure passing for sparc. + Removed calls to fflush(). + + * prep_cif.c (ffi_prep_cif): Added special case for sparc + aggregate type arguments. + +Thu Oct 10 09:56:51 1996 Anthony Green + + * ffitest.c (main): Added structure passing/returning tests. + + * prep_cif.c (ffi_prep_cif): Perform proper initialization + of structure return types if needed. + (initialize_aggregate): Bug fix + +Wed Oct 9 16:04:20 1996 Anthony Green + + * types.c: Added special definitions for x86 (double doesn't + need double word alignment). + + * ffitest.c: Added many tests + +Tue Oct 8 09:19:22 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Fixed assertion. + + * debug.c (ffi_assert): Must return a non void now. + + * Makefile.am: Added test production. + * Makefile: Rebuilt. + + * ffitest.c (main): Created. + + * types.c: Created. Stripped common code out of */ffi.c. + + * prep_cif.c: Added missing stdlib.h include. + + * debug.c (ffi_type_test): Used "a" to eliminate compiler + warnings in non-debug builds. Included ffi_common.h. + +Mon Oct 7 15:36:42 1996 Anthony Green + + * Makefile.am: Added a rule for .s -> .o + This is required by the SGI compiler. + * Makefile: Rebuilt. + +Fri Oct 4 09:51:08 1996 Anthony Green + + * prep_cif.c (initialize_aggregate): Moved abi specification + to ffi_prep_cif(). + +Thu Oct 3 15:37:37 1996 Anthony Green + + * prep_cif.c (ffi_prep_cif): Changed values from void* to void**. + (initialize_aggregate): Fixed aggregate type initialization. + + * Makefile.am (EXTRA_DIST): Added support code for "make dist". + * Makefile.in: Regenerated. + +Wed Oct 2 11:41:57 1996 Anthony Green + + * debug.c, prep_cif: Created. + + * Makefile.am: Added debug.o and prep_cif.o to OBJ. + * Makefile.in: Regenerated. + + * Makefile.am (INCLUDES): Added missing -I../include + * Makefile.in: Regenerated. + +Tue Oct 1 17:11:51 1996 Anthony Green + + * error.c, Makefile.am: Created. + * Makefile.in: Generated. + +--- libffi/src/x86 -------------------------------------------------------- + +Sun Oct 4 16:27:17 1998 Anthony Green + + * sysv.S (retlongdouble): Fixed long long return value support. + * ffi.c (ffi_prep_cif_machdep): Ditto. + +Wed May 13 04:30:33 1998 Anthony Green + + * ffi.c (ffi_prep_cif_machdep): Fixed long double return value + support. + +Wed Apr 15 08:43:20 1998 Anthony Green + + * ffi.c (ffi_prep_args): small struct support was missing. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Mon Dec 2 15:12:58 1996 Tom Tromey + + * sysv.S: Use .balign, for a.out Linux boxes. + +Tue Oct 15 13:06:50 1996 Anthony Green + + * ffi.c: Clean up based on LCLint output. + Added funny /*@...@*/ comments to annotate source. + +Fri Oct 11 16:43:38 1996 Anthony Green + + * ffi.c (ffi_call): Added assertion for bad ABIs. + +Wed Oct 9 13:57:27 1996 Anthony Green + + * sysv.S (retdouble): Fixed double return problems. + + * ffi.c (ffi_call): Corrected fn arg definition. + (ffi_prep_cif_machdep): Fixed double return problems + +Tue Oct 8 12:12:49 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + +Mon Oct 7 15:53:06 1996 Anthony Green + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:54:53 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 10:07:05 1996 Anthony Green + + * ffi.c, sysv.S, objects.mak: Created. + (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep(). + +--- libffi/src/mips ------------------------------------------------------- + +Tue Feb 17 17:18:07 1998 Anthony Green + + * o32.S: Fixed typo in comment. + + * ffi.c (ffi_prep_cif_machdep): Fixed argument processing. + +Thu May 8 16:53:58 1997 Anthony Green + + * o32.s, n32.s: Wrappers for SGI tool support. + + * objects.mak: Removed. + +Tue Oct 29 14:37:45 1996 Anthony Green + + * ffi.c (ffi_prep_args): Changed int z to size_t z. + +Tue Oct 15 13:17:25 1996 Anthony Green + + * n32.S: Fixed bad stack munging. + + * ffi.c: Moved prototypes for ffi_call_?32() to here from + ffi_mips.h because extended_cif is not defined in ffi_mips.h. + +Mon Oct 14 12:42:02 1996 Anthony Green + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 11:22:16 1996 Anthony Green + + * n32.S, ffi.c: Lots of changes to support passing and + returning structures with the n32 calling convention. + + * n32.S: Fixed fn pointer bug. + + * ffi.c (ffi_prep_cif_machdep): Fix for o32 structure + return values. + (ffi_prep_args): Fixed n32 structure passing when structures + partially fit in registers. + +Wed Oct 9 13:49:25 1996 Anthony Green + + * objects.mak: Added n32.o. + + * n32.S: Created. + + * ffi.c (ffi_prep_args): Added magic to support proper + n32 processing. + +Tue Oct 8 10:37:35 1996 Anthony Green + + * ffi.c: Moved ffi_type definitions to types.c. + (ffi_prep_args): Fixed type promotion bug. + + * o32.S: This code is only built for o32 compiles. + A lot of the #define cruft has moved to ffi_mips.h. + + * ffi.c (ffi_prep_cif_machdep): Fixed arg flags. Second arg + is only processed if the first is either a float or double. + +Mon Oct 7 15:33:59 1996 Anthony Green + + * o32.S: Modified to compile under each of o32, n32 and n64. + + * ffi.c (FFI_*_TYPEDEF): Removed redundant ';' + +Fri Oct 4 09:53:25 1996 Anthony Green + + * ffi.c (ffi_call): Removed FFI_ABI arg, and swapped + remaining args. + +Wed Oct 2 17:41:22 1996 Anthony Green + + * o32.S: Removed crufty definitions. + +Wed Oct 2 12:53:42 1996 Anthony Green + + * ffi.c (ffi_prep_cif): cif->rvalue no longer initialized to NULL. + (ffi_prep_cif_machdep): Moved all machine independent cif processing + to src/prep_cif.c. Introduced ffi_prep_cif_machdep. Return types + of FFI_TYPE_STRUCT are no different than FFI_TYPE_INT. + +Tue Oct 1 17:11:02 1996 Anthony Green + + * ffi.c, o32.S, object.mak: Created + +--- libffi/src/sparc ------------------------------------------------------ + +Tue Feb 24 16:33:18 1998 Anthony Green + + * ffi.c (ffi_prep_args): Added long double support. + +Thu May 8 16:53:58 1997 Anthony Green + + * objects.mak: Removed. + +Thu May 1 16:07:56 1997 Anthony Green + + * v8.S: Fixed minor portability problem reported by + Russ McManus . + +Tue Nov 26 14:12:43 1996 Anthony Green + + * v8.S: Used STACKFRAME define elsewhere. + + * ffi.c (ffi_prep_args): Zero out space when USING_PURIFY + is set. + (ffi_prep_cif_machdep): Allocate the correct stack frame + space for functions with < 6 args. + +Tue Oct 29 15:08:55 1996 Anthony Green + + * ffi.c (ffi_prep_args): int z is now size_t z. + +Mon Oct 14 13:31:24 1996 Anthony Green + + * v8.S (ffi_call_V8): Gordon rewrites this again. It looks + great now. + + * ffi.c (ffi_call): The comment about hijacked registers + is no longer valid after gordoni hacked v8.S. + + * v8.S (ffi_call_V8): Rewrote with gordoni. Much simpler. + + * v8.S, ffi.c: ffi_call() had changed to accept more than + two args, so v8.S had to change (because it hijacks incoming + arg registers). + + * ffi.c: Interface changes based on feedback from Jim Blandy. + +Thu Oct 10 17:48:16 1996 Anthony Green + + * ffi.c, v8.S, objects.mak: Created. + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE new file mode 100644 index 0000000..4f0b762 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE @@ -0,0 +1,21 @@ +libffi - Copyright (c) 1996-2020 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE-BUILDTOOLS b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE-BUILDTOOLS new file mode 100644 index 0000000..d1d626e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/LICENSE-BUILDTOOLS @@ -0,0 +1,353 @@ +The libffi source distribution contains certain code that is not part +of libffi, and is only used as tooling to assist with the building and +testing of libffi. This includes the msvcc.sh script used to wrap the +Microsoft compiler with GNU compatible command-line options, +make_sunver.pl, and the libffi test code distributed in the +testsuite/libffi.bhaible directory. This code is distributed with +libffi for the purpose of convenience only, and libffi is in no way +derived from this code. + +msvcc.sh an testsuite/libffi.bhaible are both distributed under the +terms of the GNU GPL version 2, as below. + + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/Makefile.am new file mode 100644 index 0000000..7654bf5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/Makefile.am @@ -0,0 +1,150 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS = foreign subdir-objects + +ACLOCAL_AMFLAGS = -I m4 + +SUBDIRS = include testsuite man +if BUILD_DOCS +## This hack is needed because it doesn't seem possible to make a +## conditional info_TEXINFOS in Automake. At least Automake 1.14 +## either gives errors -- if this attempted in the most +## straightforward way -- or simply unconditionally tries to build the +## info file. +SUBDIRS += doc +endif + +EXTRA_DIST = LICENSE ChangeLog.old \ + m4/libtool.m4 m4/lt~obsolete.m4 \ + m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/ltversion.m4 src/debug.c msvcc.sh \ + generate-darwin-source-and-headers.py \ + libffi.xcodeproj/project.pbxproj \ + libtool-ldflags libtool-version configure.host README.md \ + libffi.map.in LICENSE-BUILDTOOLS msvc_build make_sunver.pl + +# local.exp is generated by configure +DISTCLEANFILES = local.exp + +# Subdir rules rely on $(FLAGS_TO_PASS) +FLAGS_TO_PASS = $(AM_MAKEFLAGS) + +MAKEOVERRIDES= + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libffi.pc + +toolexeclib_LTLIBRARIES = libffi.la +noinst_LTLIBRARIES = libffi_convenience.la + +libffi_la_SOURCES = src/prep_cif.c src/types.c \ + src/raw_api.c src/java_raw_api.c src/closures.c + +if FFI_DEBUG +libffi_la_SOURCES += src/debug.c +endif + +noinst_HEADERS = src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/alpha/ffitarget.h src/alpha/internal.h \ + src/arc/ffitarget.h src/arm/ffitarget.h src/arm/internal.h \ + src/avr32/ffitarget.h src/bfin/ffitarget.h \ + src/cris/ffitarget.h src/csky/ffitarget.h src/frv/ffitarget.h \ + src/ia64/ffitarget.h src/ia64/ia64_flags.h \ + src/m32r/ffitarget.h src/m68k/ffitarget.h \ + src/m88k/ffitarget.h src/metag/ffitarget.h \ + src/microblaze/ffitarget.h src/mips/ffitarget.h \ + src/moxie/ffitarget.h src/nios2/ffitarget.h \ + src/or1k/ffitarget.h src/pa/ffitarget.h \ + src/powerpc/ffitarget.h src/powerpc/asm.h \ + src/powerpc/ffi_powerpc.h src/riscv/ffitarget.h \ + src/s390/ffitarget.h src/s390/internal.h src/sh/ffitarget.h \ + src/sh64/ffitarget.h src/sparc/ffitarget.h \ + src/sparc/internal.h src/tile/ffitarget.h src/vax/ffitarget.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h \ + src/x86/asmnames.h src/xtensa/ffitarget.h src/dlmalloc.c \ + src/kvx/ffitarget.h + +EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/aarch64/win64_armasm.S src/alpha/ffi.c src/alpha/osf.S \ + src/arc/ffi.c src/arc/arcompact.S src/arm/ffi.c \ + src/arm/sysv.S src/arm/ffi.c src/arm/sysv_msvc_arm32.S \ + src/avr32/ffi.c src/avr32/sysv.S src/bfin/ffi.c \ + src/bfin/sysv.S src/cris/ffi.c src/cris/sysv.S src/frv/ffi.c \ + src/csky/ffi.c src/csky/sysv.S src/frv/eabi.S src/ia64/ffi.c \ + src/ia64/unix.S src/m32r/ffi.c src/m32r/sysv.S src/m68k/ffi.c \ + src/m68k/sysv.S src/m88k/ffi.c src/m88k/obsd.S \ + src/metag/ffi.c src/metag/sysv.S src/microblaze/ffi.c \ + src/microblaze/sysv.S src/mips/ffi.c src/mips/o32.S \ + src/mips/n32.S src/moxie/ffi.c src/moxie/eabi.S \ + src/nios2/ffi.c src/nios2/sysv.S src/or1k/ffi.c \ + src/or1k/sysv.S src/pa/ffi.c src/pa/linux.S src/pa/hpux32.S \ + src/powerpc/ffi.c src/powerpc/ffi_sysv.c \ + src/powerpc/ffi_linux64.c src/powerpc/sysv.S \ + src/powerpc/linux64.S src/powerpc/linux64_closure.S \ + src/powerpc/ppc_closure.S src/powerpc/aix.S \ + src/powerpc/darwin.S src/powerpc/aix_closure.S \ + src/powerpc/darwin_closure.S src/powerpc/ffi_darwin.c \ + src/riscv/ffi.c src/riscv/sysv.S src/s390/ffi.c \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ + src/xtensa/ffi.c src/xtensa/sysv.S src/kvx/ffi.c \ + src/kvx/sysv.S + +TARGET_OBJ = @TARGET_OBJ@ +libffi_la_LIBADD = $(TARGET_OBJ) + +libffi_convenience_la_SOURCES = $(libffi_la_SOURCES) +EXTRA_libffi_convenience_la_SOURCES = $(EXTRA_libffi_la_SOURCES) +libffi_convenience_la_LIBADD = $(libffi_la_LIBADD) +libffi_convenience_la_DEPENDENCIES = $(libffi_la_DEPENDENCIES) +nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES) + +LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS)) + +AM_CFLAGS = +if FFI_DEBUG +# Build debug. Define FFI_DEBUG on the commandline so that, when building with +# MSVC, it can link against the debug CRT. +AM_CFLAGS += -DFFI_DEBUG +endif + +if LIBFFI_BUILD_VERSIONED_SHLIB +if LIBFFI_BUILD_VERSIONED_SHLIB_GNU +libffi_version_script = -Wl,--version-script,libffi.map +libffi_version_dep = libffi.map +endif +if LIBFFI_BUILD_VERSIONED_SHLIB_SUN +libffi_version_script = -Wl,-M,libffi.map-sun +libffi_version_dep = libffi.map-sun +libffi.map-sun : libffi.map $(top_srcdir)/make_sunver.pl \ + $(libffi_la_OBJECTS) $(libffi_la_LIBADD) + perl $(top_srcdir)/make_sunver.pl libffi.map \ + `echo $(libffi_la_OBJECTS) $(libffi_la_LIBADD) | \ + sed 's,\([^/ ]*\)\.l\([ao]\),.libs/\1.\2,g'` \ + > $@ || (rm -f $@ ; exit 1) +endif +else +libffi_version_script = +libffi_version_dep = +endif +libffi_version_info = -version-info `grep -v '^\#' $(srcdir)/libtool-version` + +libffi.map: $(top_srcdir)/libffi.map.in + $(COMPILE) -D$(TARGET) -DGENERATE_LIBFFI_MAP \ + -E -x assembler-with-cpp -o $@ $(top_srcdir)/libffi.map.in + +libffi_la_LDFLAGS = -no-undefined $(libffi_version_info) $(libffi_version_script) $(LTLDFLAGS) $(AM_LTLDFLAGS) +libffi_la_DEPENDENCIES = $(libffi_la_LIBADD) $(libffi_version_dep) + +AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src +AM_CCASFLAGS = $(AM_CPPFLAGS) + +dist-hook: + d=`(cd $(distdir); pwd)`; (cd doc; make pdf; cp *.pdf $$d/doc) + if [ -d $(top_srcdir)/.git ] ; then (cd $(top_srcdir); git log --no-decorate) ; else echo 'See git log for history.' ; fi > $(distdir)/ChangeLog + s=`awk '/was released on/{ print NR; exit}' $(top_srcdir)/README.md`; tail -n +$$(($$s-1)) $(top_srcdir)/README.md > $(distdir)/README.md + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/README.md b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/README.md new file mode 100644 index 0000000..4225d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/README.md @@ -0,0 +1,486 @@ +Status +====== + +[![Build Status](https://travis-ci.org/libffi/libffi.svg?branch=master)](https://travis-ci.org/libffi/libffi) +[![Build status](https://ci.appveyor.com/api/projects/status/8lko9vagbx4w2kxq?svg=true)](https://ci.appveyor.com/project/atgreen/libffi) + +libffi-3.4 was released on TBD. Check the libffi web +page for updates: . + + +What is libffi? +=============== + +Compilers for high level languages generate code that follow certain +conventions. These conventions are necessary, in part, for separate +compilation to work. One such convention is the "calling +convention". The "calling convention" is essentially a set of +assumptions made by the compiler about where function arguments will +be found on entry to a function. A "calling convention" also specifies +where the return value for a function is found. + +Some programs may not know at the time of compilation what arguments +are to be passed to a function. For instance, an interpreter may be +told at run-time about the number and types of arguments used to call +a given function. Libffi can be used in such programs to provide a +bridge from the interpreter program to compiled code. + +The libffi library provides a portable, high level programming +interface to various calling conventions. This allows a programmer to +call any function specified by a call interface description at run +time. + +FFI stands for Foreign Function Interface. A foreign function +interface is the popular name for the interface that allows code +written in one language to call code written in another language. The +libffi library really only provides the lowest, machine dependent +layer of a fully featured foreign function interface. A layer must +exist above libffi that handles type conversions for values passed +between the two languages. + + +Supported Platforms +=================== + +Libffi has been ported to many different platforms. + +At the time of release, the following basic configurations have been +tested: + +| Architecture | Operating System | Compiler | +| --------------- | ---------------- | ----------------------- | +| AArch64 (ARM64) | iOS | Clang | +| AArch64 | Linux | GCC | +| AArch64 | Windows | MSVC | +| Alpha | Linux | GCC | +| Alpha | Tru64 | GCC | +| ARC | Linux | GCC | +| ARM | Linux | GCC | +| ARM | iOS | GCC | +| ARM | Windows | MSVC | +| AVR32 | Linux | GCC | +| Blackfin | uClinux | GCC | +| CSKY | Linux | GCC | +| HPPA | HPUX | GCC | +| KVX | Linux | GCC | +| IA-64 | Linux | GCC | +| M68K | FreeMiNT | GCC | +| M68K | Linux | GCC | +| M68K | RTEMS | GCC | +| M88K | OpenBSD/mvme88k | GCC | +| Meta | Linux | GCC | +| MicroBlaze | Linux | GCC | +| MIPS | IRIX | GCC | +| MIPS | Linux | GCC | +| MIPS | RTEMS | GCC | +| MIPS64 | Linux | GCC | +| Moxie | Bare metal | GCC | +| Nios II | Linux | GCC | +| OpenRISC | Linux | GCC | +| PowerPC 32-bit | AIX | IBM XL C | +| PowerPC 64-bit | AIX | IBM XL C | +| PowerPC | AMIGA | GCC | +| PowerPC | Linux | GCC | +| PowerPC | Mac OSX | GCC | +| PowerPC | FreeBSD | GCC | +| PowerPC 64-bit | FreeBSD | GCC | +| PowerPC 64-bit | Linux ELFv1 | GCC | +| PowerPC 64-bit | Linux ELFv2 | GCC | +| RISC-V 32-bit | Linux | GCC | +| RISC-V 64-bit | Linux | GCC | +| S390 | Linux | GCC | +| S390X | Linux | GCC | +| SPARC | Linux | GCC | +| SPARC | Solaris | GCC | +| SPARC | Solaris | Oracle Solaris Studio C | +| SPARC64 | Linux | GCC | +| SPARC64 | FreeBSD | GCC | +| SPARC64 | Solaris | Oracle Solaris Studio C | +| TILE-Gx/TILEPro | Linux | GCC | +| VAX | OpenBSD/vax | GCC | +| X86 | FreeBSD | GCC | +| X86 | GNU HURD | GCC | +| X86 | Interix | GCC | +| X86 | kFreeBSD | GCC | +| X86 | Linux | GCC | +| X86 | OpenBSD | GCC | +| X86 | OS/2 | GCC | +| X86 | Solaris | GCC | +| X86 | Solaris | Oracle Solaris Studio C | +| X86 | Windows/Cygwin | GCC | +| X86 | Windows/MingW | GCC | +| X86-64 | FreeBSD | GCC | +| X86-64 | Linux | GCC | +| X86-64 | Linux/x32 | GCC | +| X86-64 | OpenBSD | GCC | +| X86-64 | Solaris | Oracle Solaris Studio C | +| X86-64 | Windows/Cygwin | GCC | +| X86-64 | Windows/MingW | GCC | +| X86-64 | Mac OSX | GCC | +| Xtensa | Linux | GCC | + +Please send additional platform test results to +libffi-discuss@sourceware.org. + +Installing libffi +================= + +First you must configure the distribution for your particular +system. Go to the directory you wish to build libffi in and run the +"configure" program found in the root directory of the libffi source +distribution. Note that building libffi requires a C99 compatible +compiler. + +If you're building libffi directly from git hosted sources, configure +won't exist yet; run ./autogen.sh first. This will require that you +install autoconf, automake and libtool. + +You may want to tell configure where to install the libffi library and +header files. To do that, use the ``--prefix`` configure switch. Libffi +will install under /usr/local by default. + +If you want to enable extra run-time debugging checks use the the +``--enable-debug`` configure switch. This is useful when your program dies +mysteriously while using libffi. + +Another useful configure switch is ``--enable-purify-safety``. Using this +will add some extra code which will suppress certain warnings when you +are using Purify with libffi. Only use this switch when using +Purify, as it will slow down the library. + +If you don't want to build documentation, use the ``--disable-docs`` +configure switch. + +It's also possible to build libffi on Windows platforms with +Microsoft's Visual C++ compiler. In this case, use the msvcc.sh +wrapper script during configuration like so: + + path/to/configure CC=path/to/msvcc.sh CXX=path/to/msvcc.sh LD=link CPP="cl -nologo -EP" CPPFLAGS="-DFFI_BUILDING_DLL" + +For 64-bit Windows builds, use ``CC="path/to/msvcc.sh -m64"`` and +``CXX="path/to/msvcc.sh -m64"``. You may also need to specify +``--build`` appropriately. + +It is also possible to build libffi on Windows platforms with the LLVM +project's clang-cl compiler, like below: + + path/to/configure CC="path/to/msvcc.sh -clang-cl" CXX="path/to/msvcc.sh -clang-cl" LD=link CPP="clang-cl -EP" + +When building with MSVC under a MingW environment, you may need to +remove the line in configure that sets 'fix_srcfile_path' to a 'cygpath' +command. ('cygpath' is not present in MingW, and is not required when +using MingW-style paths.) + +To build static library for ARM64 with MSVC using visual studio solution, msvc_build folder have + aarch64/Ffi_staticLib.sln + required header files in aarch64/aarch64_include/ + + +SPARC Solaris builds require the use of the GNU assembler and linker. +Point ``AS`` and ``LD`` environment variables at those tool prior to +configuration. + +For iOS builds, the ``libffi.xcodeproj`` Xcode project is available. + +Configure has many other options. Use ``configure --help`` to see them all. + +Once configure has finished, type "make". Note that you must be using +GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make . + +To ensure that libffi is working as advertised, type "make check". +This will require that you have DejaGNU installed. + +To install the library and header files, type ``make install``. + + +History +======= + +See the git log for details at http://github.com/libffi/libffi. + + 3.4 TBD + Add support for Alibaba's CSKY architecture. + Add support for Intel Control-flow Enforcement Technology (CET). + Add support for ARM Pointer Authentication (PA). + Fix 32-bit PPC regression. + Fix MIPS soft-float problem. + + 3.3 Nov-23-19 + Add RISC-V support. + New API in support of GO closures. + Add IEEE754 binary128 long double support for 64-bit Power + Default to Microsoft's 64 bit long double ABI with Visual C++. + GNU compiler uses 80 bits (128 in memory) FFI_GNUW64 ABI. + Add Windows on ARM64 (WOA) support. + Add Windows 32-bit ARM support. + Raw java (gcj) API deprecated. + Add pre-built PDF documentation to source distribution. + Many new test cases and bug fixes. + + 3.2.1 Nov-12-14 + Build fix for non-iOS AArch64 targets. + + 3.2 Nov-11-14 + Add C99 Complex Type support (currently only supported on + s390). + Add support for PASCAL and REGISTER calling conventions on x86 + Windows/Linux. + Add OpenRISC and Cygwin-64 support. + Bug fixes. + + 3.1 May-19-14 + Add AArch64 (ARM64) iOS support. + Add Nios II support. + Add m88k and DEC VAX support. + Add support for stdcall, thiscall, and fastcall on non-Windows + 32-bit x86 targets such as Linux. + Various Android, MIPS N32, x86, FreeBSD and UltraSPARC IIi + fixes. + Make the testsuite more robust: eliminate several spurious + failures, and respect the $CC and $CXX environment variables. + Archive off the manually maintained ChangeLog in favor of git + log. + + 3.0.13 Mar-17-13 + Add Meta support. + Add missing Moxie bits. + Fix stack alignment bug on 32-bit x86. + Build fix for m68000 targets. + Build fix for soft-float Power targets. + Fix the install dir location for some platforms when building + with GCC (OS X, Solaris). + Fix Cygwin regression. + + 3.0.12 Feb-11-13 + Add Moxie support. + Add AArch64 support. + Add Blackfin support. + Add TILE-Gx/TILEPro support. + Add MicroBlaze support. + Add Xtensa support. + Add support for PaX enabled kernels with MPROTECT. + Add support for native vendor compilers on + Solaris and AIX. + Work around LLVM/GCC interoperability issue on x86_64. + + 3.0.11 Apr-11-12 + Lots of build fixes. + Add support for variadic functions (ffi_prep_cif_var). + Add Linux/x32 support. + Add thiscall, fastcall and MSVC cdecl support on Windows. + Add Amiga and newer MacOS support. + Add m68k FreeMiNT support. + Integration with iOS' xcode build tools. + Fix Octeon and MC68881 support. + Fix code pessimizations. + + 3.0.10 Aug-23-11 + Add support for Apple's iOS. + Add support for ARM VFP ABI. + Add RTEMS support for MIPS and M68K. + Fix instruction cache clearing problems on + ARM and SPARC. + Fix the N64 build on mips-sgi-irix6.5. + Enable builds with Microsoft's compiler. + Enable x86 builds with Oracle's Solaris compiler. + Fix support for calling code compiled with Oracle's Sparc + Solaris compiler. + Testsuite fixes for Tru64 Unix. + Additional platform support. + + 3.0.9 Dec-31-09 + Add AVR32 and win64 ports. Add ARM softfp support. + Many fixes for AIX, Solaris, HP-UX, *BSD. + Several PowerPC and x86-64 bug fixes. + Build DLL for windows. + + 3.0.8 Dec-19-08 + Add *BSD, BeOS, and PA-Linux support. + + 3.0.7 Nov-11-08 + Fix for ppc FreeBSD. + (thanks to Andreas Tobler) + + 3.0.6 Jul-17-08 + Fix for closures on sh. + Mark the sh/sh64 stack as non-executable. + (both thanks to Kaz Kojima) + + 3.0.5 Apr-3-08 + Fix libffi.pc file. + Fix #define ARM for IcedTea users. + Fix x86 closure bug. + + 3.0.4 Feb-24-08 + Fix x86 OpenBSD configury. + + 3.0.3 Feb-22-08 + Enable x86 OpenBSD thanks to Thomas Heller, and + x86-64 FreeBSD thanks to Björn König and Andreas Tobler. + Clean up test instruction in README. + + 3.0.2 Feb-21-08 + Improved x86 FreeBSD support. + Thanks to Björn König. + + 3.0.1 Feb-15-08 + Fix instruction cache flushing bug on MIPS. + Thanks to David Daney. + + 3.0.0 Feb-15-08 + Many changes, mostly thanks to the GCC project. + Cygnus Solutions is now Red Hat. + + [10 years go by...] + + 1.20 Oct-5-98 + Raffaele Sena produces ARM port. + + 1.19 Oct-5-98 + Fixed x86 long double and long long return support. + m68k bug fixes from Andreas Schwab. + Patch for DU assembler compatibility for the Alpha from Richard + Henderson. + + 1.18 Apr-17-98 + Bug fixes and MIPS configuration changes. + + 1.17 Feb-24-98 + Bug fixes and m68k port from Andreas Schwab. PowerPC port from + Geoffrey Keating. Various bug x86, Sparc and MIPS bug fixes. + + 1.16 Feb-11-98 + Richard Henderson produces Alpha port. + + 1.15 Dec-4-97 + Fixed an n32 ABI bug. New libtool, auto* support. + + 1.14 May-13-97 + libtool is now used to generate shared and static libraries. + Fixed a minor portability problem reported by Russ McManus + . + + 1.13 Dec-2-96 + Added --enable-purify-safety to keep Purify from complaining + about certain low level code. + Sparc fix for calling functions with < 6 args. + Linux x86 a.out fix. + + 1.12 Nov-22-96 + Added missing ffi_type_void, needed for supporting void return + types. Fixed test case for non MIPS machines. Cygnus Support + is now Cygnus Solutions. + + 1.11 Oct-30-96 + Added notes about GNU make. + + 1.10 Oct-29-96 + Added configuration fix for non GNU compilers. + + 1.09 Oct-29-96 + Added --enable-debug configure switch. Clean-ups based on LCLint + feedback. ffi_mips.h is always installed. Many configuration + fixes. Fixed ffitest.c for sparc builds. + + 1.08 Oct-15-96 + Fixed n32 problem. Many clean-ups. + + 1.07 Oct-14-96 + Gordon Irlam rewrites v8.S again. Bug fixes. + + 1.06 Oct-14-96 + Gordon Irlam improved the sparc port. + + 1.05 Oct-14-96 + Interface changes based on feedback. + + 1.04 Oct-11-96 + Sparc port complete (modulo struct passing bug). + + 1.03 Oct-10-96 + Passing struct args, and returning struct values works for + all architectures/calling conventions. Expanded tests. + + 1.02 Oct-9-96 + Added SGI n32 support. Fixed bugs in both o32 and Linux support. + Added "make test". + + 1.01 Oct-8-96 + Fixed float passing bug in mips version. Restructured some + of the code. Builds cleanly with SGI tools. + + 1.00 Oct-7-96 + First release. No public announcement. + +Authors & Credits +================= + +libffi was originally written by Anthony Green . + +The developers of the GNU Compiler Collection project have made +innumerable valuable contributions. See the ChangeLog file for +details. + +Some of the ideas behind libffi were inspired by Gianni Mariani's free +gencall library for Silicon Graphics machines. + +The closure mechanism was designed and implemented by Kresten Krab +Thorup. + +Major processor architecture ports were contributed by the following +developers: + + aarch64 Marcus Shawcroft, James Greenhalgh + alpha Richard Henderson + arc Hackers at Synopsis + arm Raffaele Sena + avr32 Bradley Smith + blackfin Alexandre Keunecke I. de Mendonca + cris Simon Posnjak, Hans-Peter Nilsson + csky Ma Jun, Zhang Wenmeng + frv Anthony Green + ia64 Hans Boehm + m32r Kazuhiro Inaoka + m68k Andreas Schwab + m88k Miod Vallat + metag Hackers at Imagination Technologies + microblaze Nathan Rossi + mips Anthony Green, Casey Marshall + mips64 David Daney + moxie Anthony Green + nios ii Sandra Loosemore + openrisc Sebastian Macke + pa Randolph Chung, Dave Anglin, Andreas Tobler + powerpc Geoffrey Keating, Andreas Tobler, + David Edelsohn, John Hornkvist + powerpc64 Jakub Jelinek + riscv Michael Knyszek, Andrew Waterman, Stef O'Rear + s390 Gerhard Tonn, Ulrich Weigand + sh Kaz Kojima + sh64 Kaz Kojima + sparc Anthony Green, Gordon Irlam + tile-gx/tilepro Walter Lee + vax Miod Vallat + x86 Anthony Green, Jon Beniston + x86-64 Bo Thorsen + xtensa Chris Zankel + +Jesper Skov and Andrew Haley both did more than their fair share of +stepping through the code and tracking down bugs. + +Thanks also to Tom Tromey for bug fixes, documentation and +configuration help. + +Thanks to Jim Blandy, who provided some useful feedback on the libffi +interface. + +Andreas Tobler has done a tremendous amount of work on the testsuite. + +Alex Oliva solved the executable page problem for SElinux. + +The list above is almost certainly incomplete and inaccurate. I'm +happy to make corrections or additions upon request. + +If you have a problem, or have found a bug, please send a note to the +author at green@moxielogic.com, or the project mailing list at +libffi-discuss@sourceware.org. diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/acinclude.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/acinclude.m4 new file mode 100644 index 0000000..1a70efb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/acinclude.m4 @@ -0,0 +1,479 @@ +# mmap(2) blacklisting. Some platforms provide the mmap library routine +# but don't support all of the features we need from it. +AC_DEFUN([AC_FUNC_MMAP_BLACKLIST], +[ +AC_CHECK_HEADER([sys/mman.h], + [libffi_header_sys_mman_h=yes], [libffi_header_sys_mman_h=no]) +AC_CHECK_FUNC([mmap], [libffi_func_mmap=yes], [libffi_func_mmap=no]) +if test "$libffi_header_sys_mman_h" != yes \ + || test "$libffi_func_mmap" != yes; then + ac_cv_func_mmap_file=no + ac_cv_func_mmap_dev_zero=no + ac_cv_func_mmap_anon=no +else + AC_CACHE_CHECK([whether read-only mmap of a plain file works], + ac_cv_func_mmap_file, + [# Add a system to this blacklist if + # mmap(0, stat_size, PROT_READ, MAP_PRIVATE, fd, 0) doesn't return a + # memory area containing the same data that you'd get if you applied + # read() to the same fd. The only system known to have a problem here + # is VMS, where text files have record structure. + case "$host_os" in + vms* | ultrix*) + ac_cv_func_mmap_file=no ;; + *) + ac_cv_func_mmap_file=yes;; + esac]) + AC_CACHE_CHECK([whether mmap from /dev/zero works], + ac_cv_func_mmap_dev_zero, + [# Add a system to this blacklist if it has mmap() but /dev/zero + # does not exist, or if mmapping /dev/zero does not give anonymous + # zeroed pages with both the following properties: + # 1. If you map N consecutive pages in with one call, and then + # unmap any subset of those pages, the pages that were not + # explicitly unmapped remain accessible. + # 2. If you map two adjacent blocks of memory and then unmap them + # both at once, they must both go away. + # Systems known to be in this category are Windows (all variants), + # VMS, and Darwin. + case "$host_os" in + vms* | cygwin* | pe | mingw* | darwin* | ultrix* | hpux10* | hpux11.00) + ac_cv_func_mmap_dev_zero=no ;; + *) + ac_cv_func_mmap_dev_zero=yes;; + esac]) + + # Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for. + AC_CACHE_CHECK([for MAP_ANON(YMOUS)], ac_cv_decl_map_anon, + [AC_TRY_COMPILE( +[#include +#include +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +], +[int n = MAP_ANONYMOUS;], + ac_cv_decl_map_anon=yes, + ac_cv_decl_map_anon=no)]) + + if test $ac_cv_decl_map_anon = no; then + ac_cv_func_mmap_anon=no + else + AC_CACHE_CHECK([whether mmap with MAP_ANON(YMOUS) works], + ac_cv_func_mmap_anon, + [# Add a system to this blacklist if it has mmap() and MAP_ANON or + # MAP_ANONYMOUS, but using mmap(..., MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + # doesn't give anonymous zeroed pages with the same properties listed + # above for use of /dev/zero. + # Systems known to be in this category are Windows, VMS, and SCO Unix. + case "$host_os" in + vms* | cygwin* | pe | mingw* | sco* | udk* ) + ac_cv_func_mmap_anon=no ;; + *) + ac_cv_func_mmap_anon=yes;; + esac]) + fi +fi + +if test $ac_cv_func_mmap_file = yes; then + AC_DEFINE(HAVE_MMAP_FILE, 1, + [Define if read-only mmap of a plain file works.]) +fi +if test $ac_cv_func_mmap_dev_zero = yes; then + AC_DEFINE(HAVE_MMAP_DEV_ZERO, 1, + [Define if mmap of /dev/zero works.]) +fi +if test $ac_cv_func_mmap_anon = yes; then + AC_DEFINE(HAVE_MMAP_ANON, 1, + [Define if mmap with MAP_ANON(YMOUS) works.]) +fi +]) + +dnl ---------------------------------------------------------------------- +dnl This whole bit snagged from libstdc++-v3, via libatomic. + +dnl +dnl LIBFFI_ENABLE +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, permit a|b|c) +dnl (FEATURE, DEFAULT, HELP-ARG, HELP-STRING, SHELL-CODE-HANDLER) +dnl +dnl See docs/html/17_intro/configury.html#enable for documentation. +dnl +m4_define([LIBFFI_ENABLE],[dnl +m4_define([_g_switch],[--enable-$1])dnl +m4_define([_g_help],[AC_HELP_STRING(_g_switch$3,[$4 @<:@default=$2@:>@])])dnl + AC_ARG_ENABLE($1,_g_help, + m4_bmatch([$5], + [^permit ], + [[ + case "$enableval" in + m4_bpatsubst([$5],[permit ])) ;; + *) AC_MSG_ERROR(Unknown argument to enable/disable $1) ;; + dnl Idea for future: generate a URL pointing to + dnl "onlinedocs/configopts.html#whatever" + esac + ]], + [^$], + [[ + case "$enableval" in + yes|no) ;; + *) AC_MSG_ERROR(Argument to enable/disable $1 must be yes or no) ;; + esac + ]], + [[$5]]), + [enable_]m4_bpatsubst([$1],-,_)[=][$2]) +m4_undefine([_g_switch])dnl +m4_undefine([_g_help])dnl +]) + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl If GNU ld is in use, check to see if tricky linker opts can be used. If +dnl the native linker is in use, all variables will be defined to something +dnl safe (like an empty string). +dnl +dnl Defines: +dnl SECTION_LDFLAGS='-Wl,--gc-sections' if possible +dnl OPT_LDFLAGS='-Wl,-O1' if possible +dnl LD (as a side effect of testing) +dnl Sets: +dnl with_gnu_ld +dnl libat_ld_is_gold (possibly) +dnl libat_gnu_ld_version (possibly) +dnl +dnl The last will be a single integer, e.g., version 1.23.45.0.67.89 will +dnl set libat_gnu_ld_version to 12345. Zeros cause problems. +dnl +AC_DEFUN([LIBFFI_CHECK_LINKER_FEATURES], [ + # If we're not using GNU ld, then there's no point in even trying these + # tests. Check for that first. We should have already tested for gld + # by now (in libtool), but require it now just to be safe... + test -z "$SECTION_LDFLAGS" && SECTION_LDFLAGS='' + test -z "$OPT_LDFLAGS" && OPT_LDFLAGS='' + AC_REQUIRE([AC_PROG_LD]) + AC_REQUIRE([AC_PROG_AWK]) + + # The name set by libtool depends on the version of libtool. Shame on us + # for depending on an impl detail, but c'est la vie. Older versions used + # ac_cv_prog_gnu_ld, but now it's lt_cv_prog_gnu_ld, and is copied back on + # top of with_gnu_ld (which is also set by --with-gnu-ld, so that actually + # makes sense). We'll test with_gnu_ld everywhere else, so if that isn't + # set (hence we're using an older libtool), then set it. + if test x${with_gnu_ld+set} != xset; then + if test x${ac_cv_prog_gnu_ld+set} != xset; then + # We got through "ac_require(ac_prog_ld)" and still not set? Huh? + with_gnu_ld=no + else + with_gnu_ld=$ac_cv_prog_gnu_ld + fi + fi + + # Start by getting the version number. I think the libtool test already + # does some of this, but throws away the result. + libat_ld_is_gold=no + if $LD --version 2>/dev/null | grep 'GNU gold'> /dev/null 2>&1; then + libat_ld_is_gold=yes + fi + libat_ld_is_lld=no + if $LD --version 2>/dev/null | grep 'LLD '> /dev/null 2>&1; then + libat_ld_is_lld=yes + fi + changequote(,) + ldver=`$LD --version 2>/dev/null | + sed -e 's/GNU gold /GNU ld /;s/GNU ld version /GNU ld /;s/GNU ld ([^)]*) /GNU ld /;s/GNU ld \([0-9.][0-9.]*\).*/\1/; q'` + changequote([,]) + libat_gnu_ld_version=`echo $ldver | \ + $AWK -F. '{ if (NF<3) [$]3=0; print ([$]1*100+[$]2)*100+[$]3 }'` + + # Set --gc-sections. + if test "$with_gnu_ld" = "notbroken"; then + # GNU ld it is! Joy and bunny rabbits! + + # All these tests are for C++; save the language and the compiler flags. + # Need to do this so that g++ won't try to link in libstdc++ + ac_test_CFLAGS="${CFLAGS+set}" + ac_save_CFLAGS="$CFLAGS" + CFLAGS='-x c++ -Wl,--gc-sections' + + # Check for -Wl,--gc-sections + # XXX This test is broken at the moment, as symbols required for linking + # are now in libsupc++ (not built yet). In addition, this test has + # cored on solaris in the past. In addition, --gc-sections doesn't + # really work at the moment (keeps on discarding used sections, first + # .eh_frame and now some of the glibc sections for iconv). + # Bzzzzt. Thanks for playing, maybe next time. + AC_MSG_CHECKING([for ld that supports -Wl,--gc-sections]) + AC_TRY_RUN([ + int main(void) + { + try { throw 1; } + catch (...) { }; + return 0; + } + ], [ac_sectionLDflags=yes],[ac_sectionLDflags=no], [ac_sectionLDflags=yes]) + if test "$ac_test_CFLAGS" = set; then + CFLAGS="$ac_save_CFLAGS" + else + # this is the suspicious part + CFLAGS='' + fi + if test "$ac_sectionLDflags" = "yes"; then + SECTION_LDFLAGS="-Wl,--gc-sections $SECTION_LDFLAGS" + fi + AC_MSG_RESULT($ac_sectionLDflags) + fi + + # Set linker optimization flags. + if test x"$with_gnu_ld" = x"yes"; then + OPT_LDFLAGS="-Wl,-O1 $OPT_LDFLAGS" + fi + + AC_SUBST(SECTION_LDFLAGS) + AC_SUBST(OPT_LDFLAGS) +]) + + +dnl +dnl Add version tags to symbols in shared library (or not), additionally +dnl marking other symbols as private/local (or not). +dnl +dnl --enable-symvers=style adds a version script to the linker call when +dnl creating the shared library. The choice of version script is +dnl controlled by 'style'. +dnl --disable-symvers does not. +dnl + Usage: LIBFFI_ENABLE_SYMVERS[(DEFAULT)] +dnl Where DEFAULT is either 'yes' or 'no'. Passing `yes' tries to +dnl choose a default style based on linker characteristics. Passing +dnl 'no' disables versioning. +dnl +AC_DEFUN([LIBFFI_ENABLE_SYMVERS], [ + +LIBFFI_ENABLE(symvers,yes,[=STYLE], + [enables symbol versioning of the shared library], + [permit yes|no|gnu*|sun]) + +# If we never went through the LIBFFI_CHECK_LINKER_FEATURES macro, then we +# don't know enough about $LD to do tricks... +AC_REQUIRE([LIBFFI_CHECK_LINKER_FEATURES]) + +# Turn a 'yes' into a suitable default. +if test x$enable_symvers = xyes ; then + # FIXME The following test is too strict, in theory. + if test $enable_shared = no || test "x$LD" = x; then + enable_symvers=no + else + if test $with_gnu_ld = yes ; then + enable_symvers=gnu + else + case ${target_os} in + # Sun symbol versioning exists since Solaris 2.5. + solaris2.[[5-9]]* | solaris2.1[[0-9]]*) + enable_symvers=sun ;; + *) + enable_symvers=no ;; + esac + fi + fi +fi + +# Check if 'sun' was requested on non-Solaris 2 platforms. +if test x$enable_symvers = xsun ; then + case ${target_os} in + solaris2*) + # All fine. + ;; + *) + # Unlikely to work. + AC_MSG_WARN([=== You have requested Sun symbol versioning, but]) + AC_MSG_WARN([=== you are not targetting Solaris 2.]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + ;; + esac +fi + +# Check to see if libgcc_s exists, indicating that shared libgcc is possible. +if test $enable_symvers != no; then + AC_MSG_CHECKING([for shared libgcc]) + ac_save_CFLAGS="$CFLAGS" + CFLAGS=' -lgcc_s' + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes, libat_shared_libgcc=no) + CFLAGS="$ac_save_CFLAGS" + if test $libat_shared_libgcc = no; then + cat > conftest.c <&1 >/dev/null \ + | sed -n 's/^.* -lgcc_s\([^ ]*\) .*$/\1/p'` +changequote([,])dnl + rm -f conftest.c conftest.so + if test x${libat_libgcc_s_suffix+set} = xset; then + CFLAGS=" -lgcc_s$libat_libgcc_s_suffix" + AC_TRY_LINK(, [return 0;], libat_shared_libgcc=yes) + CFLAGS="$ac_save_CFLAGS" + fi + fi + AC_MSG_RESULT($libat_shared_libgcc) +fi + +# For GNU ld, we need at least this version. The format is described in +# LIBFFI_CHECK_LINKER_FEATURES above. +libat_min_gnu_ld_version=21400 +# XXXXXXXXXXX libat_gnu_ld_version=21390 + +# Check to see if unspecified "yes" value can win, given results above. +# Change "yes" into either "no" or a style name. +if test $enable_symvers != no && test $libat_shared_libgcc = yes; then + if test $with_gnu_ld = yes; then + if test $libat_gnu_ld_version -ge $libat_min_gnu_ld_version ; then + enable_symvers=gnu + elif test $libat_ld_is_gold = yes ; then + enable_symvers=gnu + elif test $libat_ld_is_lld = yes ; then + enable_symvers=gnu + else + # The right tools, the right setup, but too old. Fallbacks? + AC_MSG_WARN(=== Linker version $libat_gnu_ld_version is too old for) + AC_MSG_WARN(=== full symbol versioning support in this release of GCC.) + AC_MSG_WARN(=== You would need to upgrade your binutils to version) + AC_MSG_WARN(=== $libat_min_gnu_ld_version or later and rebuild GCC.) + if test $libat_gnu_ld_version -ge 21200 ; then + # Globbing fix is present, proper block support is not. + dnl AC_MSG_WARN([=== Dude, you are soooo close. Maybe we can fake it.]) + dnl enable_symvers=??? + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + else + # 2.11 or older. + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi + fi + elif test $enable_symvers = sun; then + : All interesting versions of Sun ld support sun style symbol versioning. + else + # just fail for now + AC_MSG_WARN([=== You have requested some kind of symbol versioning, but]) + AC_MSG_WARN([=== either you are not using a supported linker, or you are]) + AC_MSG_WARN([=== not building a shared libgcc_s (which is required).]) + AC_MSG_WARN([=== Symbol versioning will be disabled.]) + enable_symvers=no + fi +fi +if test $enable_symvers = gnu; then + AC_DEFINE(LIBFFI_GNU_SYMBOL_VERSIONING, 1, + [Define to 1 if GNU symbol versioning is used for libatomic.]) +fi + +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB, test $enable_symvers != no) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_GNU, test $enable_symvers = gnu) +AM_CONDITIONAL(LIBFFI_BUILD_VERSIONED_SHLIB_SUN, test $enable_symvers = sun) +AC_MSG_NOTICE(versioning on shared library symbols is $enable_symvers) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/autogen.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/autogen.sh new file mode 100755 index 0000000..fb014a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/autogen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec autoreconf -v -i diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.guess b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.guess new file mode 100644 index 0000000..e94095c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.guess @@ -0,0 +1,1687 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-07-12' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 + +set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039 + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$driver" + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if test -f /.attbin/uname ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + set_cc_for_build + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + + # If ldd exists, use it to detect musl libc. + if command -v ldd >/dev/null && \ + ldd --version 2>&1 | grep -q ^musl + then + LIBC=musl + fi + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)` + case "$UNAME_MACHINE_ARCH" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "$UNAME_VERSION" in + Debian*) + release='-gnu' + ;; + *) + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "$machine-${os}${release}${abi-}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" + exit ;; + *:ekkoBSD:*:*) + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" + exit ;; + *:SolidBSD:*:*) + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" + exit ;; + *:OS108:*:*) + echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:MirBSD:*:*) + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Twizzler:*:*) + echo "$UNAME_MACHINE"-unknown-twizzler + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix"$UNAME_RELEASE" + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux"$UNAME_RELEASE" + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos"$UNAME_RELEASE" + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos"$UNAME_RELEASE" + ;; + sun4) + echo sparc-sun-sunos"$UNAME_RELEASE" + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos"$UNAME_RELEASE" + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint"$UNAME_RELEASE" + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint"$UNAME_RELEASE" + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint"$UNAME_RELEASE" + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten"$UNAME_RELEASE" + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten"$UNAME_RELEASE" + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix"$UNAME_RELEASE" + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix"$UNAME_RELEASE" + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix"$UNAME_RELEASE" + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos"$UNAME_RELEASE" + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] + then + if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ + [ "$TARGET_BINARY_INTERFACE"x = x ] + then + echo m88k-dg-dgux"$UNAME_RELEASE" + else + echo m88k-dg-dguxbcs"$UNAME_RELEASE" + fi + else + echo i586-dg-dgux"$UNAME_RELEASE" + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "$HP_ARCH" = "" ]; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ "$HP_ARCH" = hppa2.0w ] + then + set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux"$HPUX_REV" + exit ;; + 3050*:HI-UX:*:*) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo "$UNAME_MACHINE"-unknown-osf1mk + else + echo "$UNAME_MACHINE"-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi"$UNAME_RELEASE" + exit ;; + *:BSD/OS:*:*) + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + arm:FreeBSD:*:*) + UNAME_PROCESSOR=`uname -p` + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi + else + echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf + fi + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case "$UNAME_PROCESSOR" in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" + exit ;; + i*:CYGWIN*:*) + echo "$UNAME_MACHINE"-pc-cygwin + exit ;; + *:MINGW64*:*) + echo "$UNAME_MACHINE"-pc-mingw64 + exit ;; + *:MINGW*:*) + echo "$UNAME_MACHINE"-pc-mingw32 + exit ;; + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys + exit ;; + i*:PW*:*) + echo "$UNAME_MACHINE"-pc-pw32 + exit ;; + *:Interix*:*) + case "$UNAME_MACHINE" in + x86) + echo i586-pc-interix"$UNAME_RELEASE" + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix"$UNAME_RELEASE" + exit ;; + IA64) + echo ia64-unknown-interix"$UNAME_RELEASE" + exit ;; + esac ;; + i*:UWIN*:*) + echo "$UNAME_MACHINE"-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-pc-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" + exit ;; + *:GNU:*:*) + # the GNU system + echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" + exit ;; + *:Minix:*:*) + echo "$UNAME_MACHINE"-unknown-minix + exit ;; + aarch64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arm*:Linux:*:*) + set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + else + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + cris:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + crisv32:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + frv:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + hexagon:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:Linux:*:*) + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + exit ;; + ia64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m32r*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m68*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + set_cc_for_build + IS_GLIBC=0 + test x"${LIBC}" = xgnu && IS_GLIBC=1 + sed 's/^ //' << EOF > "$dummy.c" + #undef CPU + #undef mips + #undef mipsel + #undef mips64 + #undef mips64el + #if ${IS_GLIBC} && defined(_ABI64) + LIBCABI=gnuabi64 + #else + #if ${IS_GLIBC} && defined(_ABIN32) + LIBCABI=gnuabin32 + #else + LIBCABI=${LIBC} + #endif + #endif + + #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa64r6 + #else + #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa32r6 + #else + #if defined(__mips64) + CPU=mips64 + #else + CPU=mips + #endif + #endif + #endif + + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + MIPS_ENDIAN=el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + MIPS_ENDIAN= + #else + MIPS_ENDIAN= + #endif + #endif +EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'`" + test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } + ;; + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-"$LIBC" + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-"$LIBC" + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-"$LIBC" + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" + exit ;; + sh64*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sh*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + tile*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + vax:Linux:*:*) + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" + exit ;; + x86_64:Linux:*:*) + set_cc_for_build + LIBCABI=$LIBC + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_X32 >/dev/null + then + LIBCABI="$LIBC"x32 + fi + fi + echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI" + exit ;; + xtensa*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo "$UNAME_MACHINE"-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo "$UNAME_MACHINE"-unknown-stop + exit ;; + i*86:atheos:*:*) + echo "$UNAME_MACHINE"-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo "$UNAME_MACHINE"-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos"$UNAME_RELEASE" + exit ;; + i*86:*DOS:*:*) + echo "$UNAME_MACHINE"-pc-msdosdjgpp + exit ;; + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos"$UNAME_RELEASE" + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos"$UNAME_RELEASE" + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv"$UNAME_RELEASE" + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo "$UNAME_MACHINE"-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo "$UNAME_MACHINE"-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux"$UNAME_RELEASE" + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv"$UNAME_RELEASE" + else + echo mips-unknown-sysv"$UNAME_RELEASE" + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux"$UNAME_RELEASE" + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux"$UNAME_RELEASE" + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux"$UNAME_RELEASE" + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody"$UNAME_RELEASE" + exit ;; + *:Rhapsody:*:*) + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" + exit ;; + arm64:Darwin:*:*) + echo aarch64-apple-darwin"$UNAME_RELEASE" + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + if command -v xcode-select > /dev/null 2> /dev/null && \ + ! xcode-select --print-path > /dev/null 2> /dev/null ; then + # Avoid executing cc if there is no toolchain installed as + # cc will be a stub that puts up a graphical alert + # prompting the user to install developer tools. + CC_FOR_BUILD=no_compiler_found + else + set_cc_for_build + fi + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # uname -m returns i386 or x86_64 + UNAME_PROCESSOR=$UNAME_MACHINE + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + # shellcheck disable=SC2154 + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo "$UNAME_MACHINE"-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux"$UNAME_RELEASE" + exit ;; + *:DragonFly:*:*) + echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "$UNAME_MACHINE" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" + exit ;; + i*86:rdos:*:*) + echo "$UNAME_MACHINE"-pc-rdos + exit ;; + i*86:AROS:*:*) + echo "$UNAME_MACHINE"-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; + *:Unleashed:*:*) + echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" + exit ;; +esac + +# No uname command or uname output not recognized. +set_cc_for_build +cat > "$dummy.c" < +#include +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#include +#if defined(_SIZE_T_) || defined(SIGLOST) +#include +#endif +#endif +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); +#endif + +#if defined (vax) +#if !defined (ultrix) +#include +#if defined (BSD) +#if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +#else +#if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#endif +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#else +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname un; + uname (&un); + printf ("vax-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("vax-dec-ultrix\n"); exit (0); +#endif +#endif +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname *un; + uname (&un); + printf ("mips-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("mips-dec-ultrix\n"); exit (0); +#endif +#endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. +test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } + +echo "$0: unable to guess system type" >&2 + +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 <&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" +EOF +fi + +exit 1 + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.sub b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.sub new file mode 100644 index 0000000..14b5150 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/config.sub @@ -0,0 +1,1851 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-08-05' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo "$1" + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Split fields of configuration type +# shellcheck disable=SC2162 +IFS="-" read field1 field2 field3 field4 <&2 + exit 1 + ;; + *-*-*-*) + basic_machine=$field1-$field2 + basic_os=$field3-$field4 + ;; + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + basic_os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + basic_os=linux-android + ;; + *) + basic_machine=$field1-$field2 + basic_os=$field3 + ;; + esac + ;; + *-*) + # A lone config we happen to match not fitting any pattern + case $field1-$field2 in + decstation-3100) + basic_machine=mips-dec + basic_os= + ;; + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + basic_os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* \ + | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ + | ultra | tti* | harris | dolphin | highlevel | gould \ + | cbm | ns | masscomp | apple | axis | knuth | cray \ + | microblaze* | sim | cisco \ + | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + basic_os= + ;; + *) + basic_machine=$field1 + basic_os=$field2 + ;; + esac + ;; + esac + ;; + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + basic_os=bsd + ;; + a29khif) + basic_machine=a29k-amd + basic_os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + basic_os=scout + ;; + alliant) + basic_machine=fx80-alliant + basic_os= + ;; + altos | altos3068) + basic_machine=m68k-altos + basic_os= + ;; + am29k) + basic_machine=a29k-none + basic_os=bsd + ;; + amdahl) + basic_machine=580-amdahl + basic_os=sysv + ;; + amiga) + basic_machine=m68k-unknown + basic_os= + ;; + amigaos | amigados) + basic_machine=m68k-unknown + basic_os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + basic_os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + basic_os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + basic_os=bsd + ;; + aros) + basic_machine=i386-pc + basic_os=aros + ;; + aux) + basic_machine=m68k-apple + basic_os=aux + ;; + balance) + basic_machine=ns32k-sequent + basic_os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + basic_os=linux + ;; + cegcc) + basic_machine=arm-unknown + basic_os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + basic_os=bsd + ;; + convex-c2) + basic_machine=c2-convex + basic_os=bsd + ;; + convex-c32) + basic_machine=c32-convex + basic_os=bsd + ;; + convex-c34) + basic_machine=c34-convex + basic_os=bsd + ;; + convex-c38) + basic_machine=c38-convex + basic_os=bsd + ;; + cray) + basic_machine=j90-cray + basic_os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + basic_os= + ;; + da30) + basic_machine=m68k-da30 + basic_os= + ;; + decstation | pmax | pmin | dec3100 | decstatn) + basic_machine=mips-dec + basic_os= + ;; + delta88) + basic_machine=m88k-motorola + basic_os=sysv3 + ;; + dicos) + basic_machine=i686-pc + basic_os=dicos + ;; + djgpp) + basic_machine=i586-pc + basic_os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + basic_os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + basic_os=ose + ;; + gmicro) + basic_machine=tron-gmicro + basic_os=sysv + ;; + go32) + basic_machine=i386-pc + basic_os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + basic_os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + basic_os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + basic_os=hms + ;; + harris) + basic_machine=m88k-harris + basic_os=sysv3 + ;; + hp300 | hp300hpux) + basic_machine=m68k-hp + basic_os=hpux + ;; + hp300bsd) + basic_machine=m68k-hp + basic_os=bsd + ;; + hppaosf) + basic_machine=hppa1.1-hp + basic_os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + basic_os=proelf + ;; + i386mach) + basic_machine=i386-mach + basic_os=mach + ;; + isi68 | isi) + basic_machine=m68k-isi + basic_os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + basic_os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + basic_os=sysv + ;; + merlin) + basic_machine=ns32k-utek + basic_os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + basic_os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + basic_os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + basic_os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + basic_os=coff + ;; + morphos) + basic_machine=powerpc-unknown + basic_os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + basic_os=moxiebox + ;; + msdos) + basic_machine=i386-pc + basic_os=msdos + ;; + msys) + basic_machine=i686-pc + basic_os=msys + ;; + mvs) + basic_machine=i370-ibm + basic_os=mvs + ;; + nacl) + basic_machine=le32-unknown + basic_os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + basic_os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + basic_os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + basic_os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + basic_os=newsos + ;; + news1000) + basic_machine=m68030-sony + basic_os=newsos + ;; + necv70) + basic_machine=v70-nec + basic_os=sysv + ;; + nh3000) + basic_machine=m68k-harris + basic_os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + basic_os=cxux + ;; + nindy960) + basic_machine=i960-intel + basic_os=nindy + ;; + mon960) + basic_machine=i960-intel + basic_os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + basic_os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + basic_os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + basic_os=ose + ;; + os68k) + basic_machine=m68k-none + basic_os=os68k + ;; + paragon) + basic_machine=i860-intel + basic_os=osf + ;; + parisc) + basic_machine=hppa-unknown + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp + ;; + pw32) + basic_machine=i586-unknown + basic_os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + basic_os=rdos + ;; + rdos32) + basic_machine=i386-pc + basic_os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + basic_os=coff + ;; + sa29200) + basic_machine=a29k-amd + basic_os=udi + ;; + sei) + basic_machine=mips-sei + basic_os=seiux + ;; + sequent) + basic_machine=i386-sequent + basic_os= + ;; + sps7) + basic_machine=m68k-bull + basic_os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + basic_os= + ;; + stratus) + basic_machine=i860-stratus + basic_os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + basic_os= + ;; + sun2os3) + basic_machine=m68000-sun + basic_os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + basic_os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + basic_os= + ;; + sun3os3) + basic_machine=m68k-sun + basic_os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + basic_os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + basic_os= + ;; + sun4os3) + basic_machine=sparc-sun + basic_os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + basic_os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + basic_os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + basic_os= + ;; + sv1) + basic_machine=sv1-cray + basic_os=unicos + ;; + symmetry) + basic_machine=i386-sequent + basic_os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + basic_os=unicos + ;; + t90) + basic_machine=t90-cray + basic_os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + basic_os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + basic_os=tpf + ;; + udi29k) + basic_machine=a29k-amd + basic_os=udi + ;; + ultra3) + basic_machine=a29k-nyu + basic_os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + basic_os=none + ;; + vaxv) + basic_machine=vax-dec + basic_os=sysv + ;; + vms) + basic_machine=vax-dec + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta + ;; + vxworks960) + basic_machine=i960-wrs + basic_os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + basic_os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + basic_os=vxworks + ;; + xbox) + basic_machine=i686-pc + basic_os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + basic_os=unicos + ;; + *) + basic_machine=$1 + basic_os= + ;; + esac + ;; +esac + +# Decode 1-component or ad-hoc basic machines +case $basic_machine in + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + w89k) + cpu=hppa1.1 + vendor=winbond + ;; + op50n) + cpu=hppa1.1 + vendor=oki + ;; + op60c) + cpu=hppa1.1 + vendor=oki + ;; + ibm*) + cpu=i370 + vendor=ibm + ;; + orion105) + cpu=clipper + vendor=highlevel + ;; + mac | mpw | mac-mpw) + cpu=m68k + vendor=apple + ;; + pmac | pmac-mpw) + cpu=powerpc + vendor=apple + ;; + + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + cpu=m68000 + vendor=att + ;; + 3b*) + cpu=we32k + vendor=att + ;; + bluegene*) + cpu=powerpc + vendor=ibm + basic_os=cnk + ;; + decsystem10* | dec10*) + cpu=pdp10 + vendor=dec + basic_os=tops10 + ;; + decsystem20* | dec20*) + cpu=pdp10 + vendor=dec + basic_os=tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + cpu=m68k + vendor=motorola + ;; + dpx2*) + cpu=m68k + vendor=bull + basic_os=sysv3 + ;; + encore | umax | mmax) + cpu=ns32k + vendor=encore + ;; + elxsi) + cpu=elxsi + vendor=elxsi + basic_os=${basic_os:-bsd} + ;; + fx2800) + cpu=i860 + vendor=alliant + ;; + genix) + cpu=ns32k + vendor=ns + ;; + h3050r* | hiux*) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + cpu=m68000 + vendor=hp + ;; + hp9k3[2-9][0-9]) + cpu=m68k + vendor=hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + i*86v32) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv32 + ;; + i*86v4*) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv4 + ;; + i*86v) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=sysv + ;; + i*86sol2) + cpu=`echo "$1" | sed -e 's/86.*/86/'` + vendor=pc + basic_os=solaris2 + ;; + j90 | j90-cray) + cpu=j90 + vendor=cray + basic_os=${basic_os:-unicos} + ;; + iris | iris4d) + cpu=mips + vendor=sgi + case $basic_os in + irix*) + ;; + *) + basic_os=irix4 + ;; + esac + ;; + miniframe) + cpu=m68000 + vendor=convergent + ;; + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) + cpu=m68k + vendor=atari + basic_os=mint + ;; + news-3600 | risc-news) + cpu=mips + vendor=sony + basic_os=newsos + ;; + next | m*-next) + cpu=m68k + vendor=next + case $basic_os in + openstep*) + ;; + nextstep*) + ;; + ns2*) + basic_os=nextstep2 + ;; + *) + basic_os=nextstep3 + ;; + esac + ;; + np1) + cpu=np1 + vendor=gould + ;; + op50n-* | op60c-*) + cpu=hppa1.1 + vendor=oki + basic_os=proelf + ;; + pa-hitachi) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + pbd) + cpu=sparc + vendor=tti + ;; + pbb) + cpu=m68k + vendor=tti + ;; + pc532) + cpu=ns32k + vendor=pc532 + ;; + pn) + cpu=pn + vendor=gould + ;; + power) + cpu=power + vendor=ibm + ;; + ps2) + cpu=i386 + vendor=ibm + ;; + rm[46]00) + cpu=mips + vendor=siemens + ;; + rtpc | rtpc-*) + cpu=romp + vendor=ibm + ;; + sde) + cpu=mipsisa32 + vendor=sde + basic_os=${basic_os:-elf} + ;; + simso-wrs) + cpu=sparclite + vendor=wrs + basic_os=vxworks + ;; + tower | tower-32) + cpu=m68k + vendor=ncr + ;; + vpp*|vx|vx-*) + cpu=f301 + vendor=fujitsu + ;; + w65) + cpu=w65 + vendor=wdc + ;; + w89k-*) + cpu=hppa1.1 + vendor=winbond + basic_os=proelf + ;; + none) + cpu=none + vendor=none + ;; + leon|leon[3-9]) + cpu=sparc + vendor=$basic_machine + ;; + leon-*|leon[3-9]-*) + cpu=sparc + vendor=`echo "$basic_machine" | sed 's/-.*//'` + ;; + + *-*) + # shellcheck disable=SC2162 + IFS="-" read cpu vendor <&2 + exit 1 + ;; + esac + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $vendor in + digital*) + vendor=dec + ;; + commodore*) + vendor=cbm + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x$basic_os != x ] +then + +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# set os. +case $basic_os in + gnu/linux*) + kernel=linux + os=`echo $basic_os | sed -e 's|gnu/linux|gnu|'` + ;; + nto-qnx*) + kernel=nto + os=`echo $basic_os | sed -e 's|nto-qnx|qnx|'` + ;; + *-*) + # shellcheck disable=SC2162 + IFS="-" read kernel os <&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* ) + ;; + -dietlibc* | -newlib* | -musl* | -uclibc* ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + exit 1 + ;; + kfreebsd*-gnu* | kopensolaris*-gnu*) + ;; + nto-qnx*) + ;; + *-eabi* | *-gnueabi*) + ;; + -*) + # Blank kernel with real OS is always fine. + ;; + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + exit 1 + ;; +esac + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +case $vendor in + unknown) + case $cpu-$os in + *-riscix*) + vendor=acorn + ;; + *-sunos*) + vendor=sun + ;; + *-cnk* | *-aix*) + vendor=ibm + ;; + *-beos*) + vendor=be + ;; + *-hpux*) + vendor=hp + ;; + *-mpeix*) + vendor=hp + ;; + *-hiux*) + vendor=hitachi + ;; + *-unos*) + vendor=crds + ;; + *-dgux*) + vendor=dg + ;; + *-luna*) + vendor=omron + ;; + *-genix*) + vendor=ns + ;; + *-clix*) + vendor=intergraph + ;; + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) + vendor=ibm + ;; + s390-* | s390x-*) + vendor=ibm + ;; + *-ptx*) + vendor=sequent + ;; + *-tpf*) + vendor=ibm + ;; + *-vxsim* | *-vxworks* | *-windiss*) + vendor=wrs + ;; + *-aux*) + vendor=apple + ;; + *-hms*) + vendor=hitachi + ;; + *-mpw* | *-macos*) + vendor=apple + ;; + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) + vendor=atari + ;; + *-vos*) + vendor=stratus + ;; + esac + ;; +esac + +echo "$cpu-$vendor-${kernel:+$kernel-}$os" +exit + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.ac b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.ac new file mode 100644 index 0000000..093b87d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.ac @@ -0,0 +1,415 @@ +dnl Process this with autoconf to create configure + +AC_PREREQ(2.68) + +AC_INIT([libffi], [3.3], [http://github.com/libffi/libffi/issues]) +AC_CONFIG_HEADERS([fficonfig.h]) + +AC_CANONICAL_SYSTEM +target_alias=${target_alias-$host_alias} + +case "${host}" in + frv*-elf) + LDFLAGS=`echo $LDFLAGS | sed "s/\-B[^ ]*libgloss\/frv\///"`\ -B`pwd`/../libgloss/frv/ + ;; +esac + +AX_ENABLE_BUILDDIR + +AM_INIT_AUTOMAKE + +# The same as in boehm-gc and libstdc++. Have to borrow it from there. +# We must force CC to /not/ be precious variables; otherwise +# the wrong, non-multilib-adjusted value will be used in multilibs. +# As a side effect, we have to subst CFLAGS ourselves. +# Also save and restore CFLAGS, since AC_PROG_CC will come up with +# defaults of its own if none are provided. + +m4_rename([_AC_ARG_VAR_PRECIOUS],[real_PRECIOUS]) +m4_define([_AC_ARG_VAR_PRECIOUS],[]) +save_CFLAGS=$CFLAGS +AC_PROG_CC +AC_PROG_CXX +CFLAGS=$save_CFLAGS +m4_undefine([_AC_ARG_VAR_PRECIOUS]) +m4_rename_force([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS]) + +AC_SUBST(CFLAGS) + +AM_PROG_AS +AM_PROG_CC_C_O +AC_PROG_LIBTOOL +AC_CONFIG_MACRO_DIR([m4]) + +# Test for 64-bit build. +AC_CHECK_SIZEOF([size_t]) + +AX_COMPILER_VENDOR +AX_CC_MAXOPT +# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro +# compiler. +if test "$ax_cv_c_compiler_vendor" != "sun"; then + AX_CFLAGS_WARN_ALL +fi + +if test "x$GCC" = "xyes"; then + CFLAGS="$CFLAGS -fexceptions" +fi + +cat > local.exp < conftest.s + if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then + libffi_cv_as_x86_pcrel=yes + fi + ]) + if test "x$libffi_cv_as_x86_pcrel" = xyes; then + AC_DEFINE(HAVE_AS_X86_PCREL, 1, + [Define if your assembler supports PC relative relocs.]) + fi + ;; + + S390) + AC_CACHE_CHECK([compiler uses zarch features], + libffi_cv_as_s390_zarch, [ + libffi_cv_as_s390_zarch=no + echo 'void foo(void) { bar(); bar(); }' > conftest.c + if $CC $CFLAGS -S conftest.c > /dev/null 2>&1; then + if grep -q brasl conftest.s; then + libffi_cv_as_s390_zarch=yes + fi + fi + ]) + if test "x$libffi_cv_as_s390_zarch" = xyes; then + AC_DEFINE(HAVE_AS_S390_ZARCH, 1, + [Define if the compiler uses zarch features.]) + fi + ;; +esac + +AC_CACHE_CHECK([whether compiler supports pointer authentication], + libffi_cv_as_ptrauth, [ + libffi_cv_as_ptrauth=unknown + AC_TRY_COMPILE(,[ +#ifdef __clang__ +# if __has_feature(ptrauth_calls) +# define HAVE_PTRAUTH 1 +# endif +#endif + +#ifndef HAVE_PTRAUTH +# error Pointer authentication not supported +#endif + ], + [libffi_cv_as_ptrauth=yes], + [libffi_cv_as_ptrauth=no]) +]) +if test "x$libffi_cv_as_ptrauth" = xyes; then + AC_DEFINE(HAVE_PTRAUTH, 1, + [Define if your compiler supports pointer authentication.]) +fi + +# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. +AC_ARG_ENABLE(pax_emutramp, + [ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC], + if test "$enable_pax_emutramp" = "yes"; then + AC_DEFINE(FFI_MMAP_EXEC_EMUTRAMP_PAX, 1, + [Define this if you want to enable pax emulated trampolines]) + fi) + +LT_SYS_SYMBOL_USCORE +if test "x$sys_symbol_underscore" = xyes; then + AC_DEFINE(SYMBOL_UNDERSCORE, 1, [Define if symbols are underscored.]) +fi + +FFI_EXEC_TRAMPOLINE_TABLE=0 +case "$target" in + *arm*-apple-* | aarch64-apple-*) + FFI_EXEC_TRAMPOLINE_TABLE=1 + AC_DEFINE(FFI_EXEC_TRAMPOLINE_TABLE, 1, + [Cannot use PROT_EXEC on this target, so, we revert to + alternative means]) + ;; + *-apple-* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris* | *-linux-android*) + AC_DEFINE(FFI_MMAP_EXEC_WRIT, 1, + [Cannot use malloc on this target, so, we revert to + alternative means]) + ;; +esac +AM_CONDITIONAL(FFI_EXEC_TRAMPOLINE_TABLE, test x$FFI_EXEC_TRAMPOLINE_TABLE = x1) +AC_SUBST(FFI_EXEC_TRAMPOLINE_TABLE) + +if test x$TARGET = xX86_64; then + AC_CACHE_CHECK([toolchain supports unwind section type], + libffi_cv_as_x86_64_unwind_section_type, [ + cat > conftest1.s << EOF +.text +.globl foo +foo: +jmp bar +.section .eh_frame,"a",@unwind +bar: +EOF + + cat > conftest2.c << EOF +extern void foo(); +int main(){foo();} +EOF + + libffi_cv_as_x86_64_unwind_section_type=no + # we ensure that we can compile _and_ link an assembly file containing an @unwind section + # since the compiler can support it and not the linker (ie old binutils) + if $CC -Wa,--fatal-warnings $CFLAGS -c conftest1.s > /dev/null 2>&1 && \ + $CC conftest2.c conftest1.o > /dev/null 2>&1 ; then + libffi_cv_as_x86_64_unwind_section_type=yes + fi + ]) + if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then + AC_DEFINE(HAVE_AS_X86_64_UNWIND_SECTION_TYPE, 1, + [Define if your assembler supports unwind section type.]) + fi +fi + +if test "x$GCC" = "xyes"; then + AX_CHECK_COMPILE_FLAG(-fno-lto, libffi_cv_no_lto=-fno-lto) + + AC_CACHE_CHECK([whether .eh_frame section should be read-only], + libffi_cv_ro_eh_frame, [ + libffi_cv_ro_eh_frame=yes + echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c + if $CC $CFLAGS -c -fpic -fexceptions $libffi_cv_no_lto -o conftest.o conftest.c > /dev/null 2>&1; then + if readelf -WS conftest.o | grep -q -n 'eh_frame .* WA'; then + libffi_cv_ro_eh_frame=no + fi + fi + rm -f conftest.* + ]) + if test "x$libffi_cv_ro_eh_frame" = xyes; then + AC_DEFINE(HAVE_RO_EH_FRAME, 1, + [Define if .eh_frame sections should be read-only.]) + AC_DEFINE(EH_FRAME_FLAGS, "a", + [Define to the flags needed for the .section .eh_frame directive. ]) + else + AC_DEFINE(EH_FRAME_FLAGS, "aw", + [Define to the flags needed for the .section .eh_frame directive. ]) + fi + + AC_CACHE_CHECK([for __attribute__((visibility("hidden")))], + libffi_cv_hidden_visibility_attribute, [ + echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c + libffi_cv_hidden_visibility_attribute=no + if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then + if egrep '(\.hidden|\.private_extern).*foo' conftest.s >/dev/null; then + libffi_cv_hidden_visibility_attribute=yes + fi + fi + rm -f conftest.* + ]) + if test $libffi_cv_hidden_visibility_attribute = yes; then + AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1, + [Define if __attribute__((visibility("hidden"))) is supported.]) + fi +fi + +AC_ARG_ENABLE(docs, + AC_HELP_STRING([--disable-docs], + [Disable building of docs (default: no)]), + [enable_docs=no], + [enable_docs=yes]) +AM_CONDITIONAL(BUILD_DOCS, [test x$enable_docs = xyes]) + +AH_BOTTOM([ +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif +]) + +AC_SUBST(TARGET) +AC_SUBST(TARGETDIR) + +changequote(<,>) +TARGET_OBJ= +for i in $SOURCES; do + TARGET_OBJ="${TARGET_OBJ} src/${TARGETDIR}/"`echo $i | sed 's/[cS]$/lo/'` +done +changequote([,]) +AC_SUBST(TARGET_OBJ) + +AC_SUBST(SHELL) + +AC_ARG_ENABLE(debug, +[ --enable-debug debugging mode], + if test "$enable_debug" = "yes"; then + AC_DEFINE(FFI_DEBUG, 1, [Define this if you want extra debugging.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(structs, +[ --disable-structs omit code for struct support], + if test "$enable_structs" = "no"; then + AC_DEFINE(FFI_NO_STRUCTS, 1, [Define this if you do not want support for aggregate types.]) + fi) +AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes") + +AC_ARG_ENABLE(raw-api, +[ --disable-raw-api make the raw api unavailable], + if test "$enable_raw_api" = "no"; then + AC_DEFINE(FFI_NO_RAW_API, 1, [Define this if you do not want support for the raw API.]) + fi) + +AC_ARG_ENABLE(purify-safety, +[ --enable-purify-safety purify-safe mode], + if test "$enable_purify_safety" = "yes"; then + AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.]) + fi) + +AC_ARG_ENABLE(multi-os-directory, +[ --disable-multi-os-directory + disable use of gcc --print-multi-os-directory to change the library installation directory]) + +# These variables are only ever used when we cross-build to X86_WIN32. +# And we only support this with GCC, so... +if test "x$GCC" = "xyes"; then + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + toolexecdir='${exec_prefix}'/'$(target_alias)' + toolexeclibdir='${toolexecdir}'/lib + else + toolexecdir='${libdir}'/gcc-lib/'$(target_alias)' + toolexeclibdir='${libdir}' + fi + if test x"$enable_multi_os_directory" != x"no"; then + multi_os_directory=`$CC $CFLAGS -print-multi-os-directory` + case $multi_os_directory in + .) ;; # Avoid trailing /. + ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;; + esac + fi + AC_SUBST(toolexecdir) +else + toolexeclibdir='${libdir}' +fi +AC_SUBST(toolexeclibdir) + +# Check linker support. +LIBFFI_ENABLE_SYMVERS + +AC_CONFIG_COMMANDS(include, [test -d include || mkdir include]) +AC_CONFIG_COMMANDS(src, [ +test -d src || mkdir src +test -d src/$TARGETDIR || mkdir src/$TARGETDIR +], [TARGETDIR="$TARGETDIR"]) + +AC_CONFIG_FILES(include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile doc/Makefile libffi.pc) + +AC_OUTPUT + +# Copy this file instead of using AC_CONFIG_LINK in order to support +# compiling with MSVC, which won't understand cygwin style symlinks. +cp ${srcdir}/src/$TARGETDIR/ffitarget.h include/ffitarget.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.host b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.host new file mode 100644 index 0000000..257b784 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/configure.host @@ -0,0 +1,318 @@ +# configure.host +# +# This shell script handles all host based configuration for libffi. +# + +# THIS TABLE IS SORTED. KEEP IT THAT WAY. +# Most of the time we can define all the variables all at once... +case "${host}" in + aarch64*-*-cygwin* | aarch64*-*-mingw* | aarch64*-*-win* ) + TARGET=ARM_WIN64; TARGETDIR=aarch64 + if test "${ax_cv_c_compiler_vendor}" = "microsoft"; then + MSVC=1 + fi + ;; + + aarch64*-*-*) + TARGET=AARCH64; TARGETDIR=aarch64 + SOURCES="ffi.c sysv.S" + ;; + + alpha*-*-*) + TARGET=ALPHA; TARGETDIR=alpha; + # Support 128-bit long double, changeable via command-line switch. + HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)' + SOURCES="ffi.c osf.S" + ;; + + arc*-*-*) + TARGET=ARC; TARGETDIR=arc + SOURCES="ffi.c arcompact.S" + ;; + + arm*-*-cygwin* | arm*-*-mingw* | arm*-*-win* ) + TARGET=ARM_WIN32; TARGETDIR=arm + MSVC=1 + ;; + + arm*-*-*) + TARGET=ARM; TARGETDIR=arm + SOURCES="ffi.c sysv.S" + ;; + + avr32*-*-*) + TARGET=AVR32; TARGETDIR=avr32 + SOURCES="ffi.c sysv.S" + ;; + + bfin*) + TARGET=BFIN; TARGETDIR=bfin + SOURCES="ffi.c sysv.S" + ;; + + cris-*-*) + TARGET=LIBFFI_CRIS; TARGETDIR=cris + SOURCES="ffi.c sysv.S" + ;; + + csky-*-*) + TARGET=CSKY; TARGETDIR=csky + SOURCES="ffi.c sysv.S" + ;; + + frv-*-*) + TARGET=FRV; TARGETDIR=frv + SOURCES="ffi.c eabi.S" + ;; + + hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*) + TARGET=PA_LINUX; TARGETDIR=pa + SOURCES="ffi.c linux.S" + ;; + hppa*64-*-hpux*) + TARGET=PA64_HPUX; TARGETDIR=pa + ;; + hppa*-*-hpux*) + TARGET=PA_HPUX; TARGETDIR=pa + SOURCES="ffi.c hpux32.S" + ;; + + i?86-*-freebsd* | i?86-*-openbsd*) + TARGET=X86_FREEBSD; TARGETDIR=x86 + ;; + + i?86-*-cygwin* | i?86-*-mingw* | i?86-*-win* | i?86-*-os2* | i?86-*-interix* \ + | x86_64-*-cygwin* | x86_64-*-mingw* | x86_64-*-win* ) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_WIN32 + else + TARGET=X86_WIN64 + fi + if test "${ax_cv_c_compiler_vendor}" = "microsoft"; then + MSVC=1 + fi + # All mingw/cygwin/win32 builds require -no-undefined for sharedlib. + # We must also check with_cross_host to decide if this is a native + # or cross-build and select where to install dlls appropriately. + if test -n "$with_cross_host" && + test x"$with_cross_host" != x"no"; then + AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"'; + else + AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"'; + fi + ;; + + i?86-*-darwin* | x86_64-*-darwin* | i?86-*-ios | x86_64-*-ios) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + TARGET=X86_DARWIN + else + TARGET=X86_64 + fi + ;; + + i?86-*-* | x86_64-*-* | amd64-*) + TARGETDIR=x86 + if test $ac_cv_sizeof_size_t = 4; then + echo 'int foo (void) { return __x86_64__; }' > conftest.c + if $CC $CFLAGS -Werror -S conftest.c -o conftest.s > /dev/null 2>&1; then + TARGET_X32=yes + TARGET=X86_64 + else + TARGET=X86; + fi + rm -f conftest.* + else + TARGET=X86_64; + fi + ;; + + ia64*-*-*) + TARGET=IA64; TARGETDIR=ia64 + SOURCES="ffi.c unix.S" + ;; + + kvx-*-*) + TARGET=KVX; TARGETDIR=kvx + SOURCES="ffi.c sysv.S" + ;; + + m32r*-*-*) + TARGET=M32R; TARGETDIR=m32r + SOURCES="ffi.c sysv.S" + ;; + + m68k-*-*) + TARGET=M68K; TARGETDIR=m68k + SOURCES="ffi.c sysv.S" + ;; + + m88k-*-*) + TARGET=M88K; TARGETDIR=m88k + SOURCES="ffi.c obsd.S" + ;; + + microblaze*-*-*) + TARGET=MICROBLAZE; TARGETDIR=microblaze + SOURCES="ffi.c sysv.S" + ;; + + moxie-*-*) + TARGET=MOXIE; TARGETDIR=moxie + SOURCES="ffi.c eabi.S" + ;; + + metag-*-*) + TARGET=METAG; TARGETDIR=metag + SOURCES="ffi.c sysv.S" + ;; + + mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*) + TARGET=MIPS; TARGETDIR=mips + ;; + mips*-*linux* | mips*-*-openbsd* | mips*-*-freebsd*) + # Support 128-bit long double for NewABI. + HAVE_LONG_DOUBLE='defined(__mips64)' + TARGET=MIPS; TARGETDIR=mips + ;; + + nios2*-linux*) + TARGET=NIOS2; TARGETDIR=nios2 + SOURCES="ffi.c sysv.S" + ;; + + or1k*-*-*) + TARGET=OR1K; TARGETDIR=or1k + SOURCES="ffi.c sysv.S" + ;; + + powerpc*-*-linux* | powerpc-*-sysv*) + TARGET=POWERPC; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpc-*-amigaos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-eabi*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-beos*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc-*-darwin* | powerpc64-*-darwin*) + TARGET=POWERPC_DARWIN; TARGETDIR=powerpc + ;; + powerpc-*-aix* | rs6000-*-aix*) + TARGET=POWERPC_AIX; TARGETDIR=powerpc + ;; + powerpc-*-freebsd* | powerpc-*-openbsd* | powerpc-*-netbsd*) + TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc + HAVE_LONG_DOUBLE_VARIANT=1 + ;; + powerpcspe-*-freebsd*) + TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc + CFLAGS="$CFLAGS -D__NO_FPRS__" + ;; + powerpc64-*-freebsd* | powerpc64le-*-freebsd*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + powerpc*-*-rtems*) + TARGET=POWERPC; TARGETDIR=powerpc + ;; + + riscv*-*) + TARGET=RISCV; TARGETDIR=riscv + SOURCES="ffi.c sysv.S" + ;; + + s390-*-* | s390x-*-*) + TARGET=S390; TARGETDIR=s390 + SOURCES="ffi.c sysv.S" + ;; + + sh-*-* | sh[34]*-*-*) + TARGET=SH; TARGETDIR=sh + SOURCES="ffi.c sysv.S" + ;; + sh64-*-* | sh5*-*-*) + TARGET=SH64; TARGETDIR=sh64 + SOURCES="ffi.c sysv.S" + ;; + + sparc*-*-*) + TARGET=SPARC; TARGETDIR=sparc + SOURCES="ffi.c ffi64.c v8.S v9.S" + ;; + + tile*-*) + TARGET=TILE; TARGETDIR=tile + SOURCES="ffi.c tile.S" + ;; + + vax-*-*) + TARGET=VAX; TARGETDIR=vax + SOURCES="ffi.c elfbsd.S" + ;; + + xtensa*-*) + TARGET=XTENSA; TARGETDIR=xtensa + SOURCES="ffi.c sysv.S" + ;; +esac + +# ... but some of the cases above share configury. +case "${TARGET}" in + ARM_WIN32) + SOURCES="ffi.c sysv_msvc_arm32.S" + ;; + ARM_WIN64) + if test "$MSVC" = 1; then + SOURCES="ffi.c win64_armasm.S" + else + SOURCES="ffi.c sysv.S" + fi + ;; + MIPS) + SOURCES="ffi.c o32.S n32.S" + ;; + POWERPC) + SOURCES="ffi.c ffi_sysv.c ffi_linux64.c sysv.S ppc_closure.S" + SOURCES="${SOURCES} linux64.S linux64_closure.S" + ;; + POWERPC_AIX) + SOURCES="ffi_darwin.c aix.S aix_closure.S" + ;; + POWERPC_DARWIN) + SOURCES="ffi_darwin.c darwin.S darwin_closure.S" + ;; + POWERPC_FREEBSD) + SOURCES="ffi.c ffi_sysv.c sysv.S ppc_closure.S" + ;; + X86 | X86_DARWIN | X86_FREEBSD | X86_WIN32) + if test "$MSVC" = 1; then + SOURCES="ffi.c sysv_intel.S" + else + SOURCES="ffi.c sysv.S" + fi + ;; + X86_64) + if test x"$TARGET_X32" = xyes; then + SOURCES="ffi64.c unix64.S" + else + SOURCES="ffi64.c unix64.S ffiw64.c win64.S" + fi + ;; + X86_WIN64) + if test "$MSVC" = 1; then + SOURCES="ffiw64.c win64_intel.S" + else + SOURCES="ffiw64.c win64.S" + fi + ;; +esac + +# If we failed to configure SOURCES, we can't do anything. +if test -z "${SOURCES}"; then + UNSUPPORTED=1 +fi diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/Makefile.am new file mode 100644 index 0000000..43b650a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/Makefile.am @@ -0,0 +1,3 @@ +## Process this with automake to create Makefile.in + +info_TEXINFOS = libffi.texi diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/libffi.texi b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/libffi.texi new file mode 100644 index 0000000..bd30593 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/libffi.texi @@ -0,0 +1,997 @@ +\input texinfo @c -*-texinfo-*- +@c %**start of header +@setfilename libffi.info +@include version.texi +@settitle libffi: the portable foreign function interface library +@setchapternewpage off +@c %**end of header + +@c Merge the standard indexes into a single one. +@syncodeindex fn cp +@syncodeindex vr cp +@syncodeindex ky cp +@syncodeindex pg cp +@syncodeindex tp cp + +@copying + +This manual is for libffi, a portable foreign function interface +library. + +Copyright @copyright{} 2008--2019 Anthony Green and Red Hat, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@end copying + +@dircategory Development +@direntry +* libffi: (libffi). Portable foreign function interface library. +@end direntry + +@titlepage +@title libffi: a foreign function interface library +@subtitle For Version @value{VERSION} of libffi +@author Anthony Green +@page +@vskip 0pt plus 1filll +@insertcopying +@end titlepage + + +@ifnottex +@node Top +@top libffi + +@insertcopying + +@menu +* Introduction:: What is libffi? +* Using libffi:: How to use libffi. +* Missing Features:: Things libffi can't do. +* Index:: Index. +@end menu + +@end ifnottex + + +@node Introduction +@chapter What is libffi? + +Compilers for high level languages generate code that follow certain +conventions. These conventions are necessary, in part, for separate +compilation to work. One such convention is the @dfn{calling +convention}. The calling convention is a set of assumptions made by +the compiler about where function arguments will be found on entry to +a function. A calling convention also specifies where the return +value for a function is found. The calling convention is also +sometimes called the @dfn{ABI} or @dfn{Application Binary Interface}. +@cindex calling convention +@cindex ABI +@cindex Application Binary Interface + +Some programs may not know at the time of compilation what arguments +are to be passed to a function. For instance, an interpreter may be +told at run-time about the number and types of arguments used to call +a given function. @samp{Libffi} can be used in such programs to +provide a bridge from the interpreter program to compiled code. + +The @samp{libffi} library provides a portable, high level programming +interface to various calling conventions. This allows a programmer to +call any function specified by a call interface description at run +time. + +@acronym{FFI} stands for Foreign Function Interface. A foreign +function interface is the popular name for the interface that allows +code written in one language to call code written in another language. +The @samp{libffi} library really only provides the lowest, machine +dependent layer of a fully featured foreign function interface. A +layer must exist above @samp{libffi} that handles type conversions for +values passed between the two languages. +@cindex FFI +@cindex Foreign Function Interface + + +@node Using libffi +@chapter Using libffi + +@menu +* The Basics:: The basic libffi API. +* Simple Example:: A simple example. +* Types:: libffi type descriptions. +* Multiple ABIs:: Different passing styles on one platform. +* The Closure API:: Writing a generic function. +* Closure Example:: A closure example. +* Thread Safety:: Thread safety. +@end menu + + +@node The Basics +@section The Basics + +@samp{Libffi} assumes that you have a pointer to the function you wish +to call and that you know the number and types of arguments to pass +it, as well as the return type of the function. + +The first thing you must do is create an @code{ffi_cif} object that +matches the signature of the function you wish to call. This is a +separate step because it is common to make multiple calls using a +single @code{ffi_cif}. The @dfn{cif} in @code{ffi_cif} stands for +Call InterFace. To prepare a call interface object, use the function +@code{ffi_prep_cif}. +@cindex cif + +@findex ffi_prep_cif +@defun ffi_status ffi_prep_cif (ffi_cif *@var{cif}, ffi_abi @var{abi}, unsigned int @var{nargs}, ffi_type *@var{rtype}, ffi_type **@var{argtypes}) +This initializes @var{cif} according to the given parameters. + +@var{abi} is the ABI to use; normally @code{FFI_DEFAULT_ABI} is what +you want. @ref{Multiple ABIs} for more information. + +@var{nargs} is the number of arguments that this function accepts. + +@var{rtype} is a pointer to an @code{ffi_type} structure that +describes the return type of the function. @xref{Types}. + +@var{argtypes} is a vector of @code{ffi_type} pointers. +@var{argtypes} must have @var{nargs} elements. If @var{nargs} is 0, +this argument is ignored. + +@code{ffi_prep_cif} returns a @code{libffi} status code, of type +@code{ffi_status}. This will be either @code{FFI_OK} if everything +worked properly; @code{FFI_BAD_TYPEDEF} if one of the @code{ffi_type} +objects is incorrect; or @code{FFI_BAD_ABI} if the @var{abi} parameter +is invalid. +@end defun + +If the function being called is variadic (varargs) then +@code{ffi_prep_cif_var} must be used instead of @code{ffi_prep_cif}. + +@findex ffi_prep_cif_var +@defun ffi_status ffi_prep_cif_var (ffi_cif *@var{cif}, ffi_abi @var{abi}, unsigned int @var{nfixedargs}, unsigned int @var{ntotalargs}, ffi_type *@var{rtype}, ffi_type **@var{argtypes}) +This initializes @var{cif} according to the given parameters for +a call to a variadic function. In general its operation is the +same as for @code{ffi_prep_cif} except that: + +@var{nfixedargs} is the number of fixed arguments, prior to any +variadic arguments. It must be greater than zero. + +@var{ntotalargs} the total number of arguments, including variadic +and fixed arguments. @var{argtypes} must have this many elements. + +Note that, different cif's must be prepped for calls to the same +function when different numbers of arguments are passed. + +Also note that a call to @code{ffi_prep_cif_var} with +@var{nfixedargs}=@var{nototalargs} is NOT equivalent to a call to +@code{ffi_prep_cif}. + +@end defun + +Note that the resulting @code{ffi_cif} holds pointers to all the +@code{ffi_type} objects that were used during initialization. You +must ensure that these type objects have a lifetime at least as long +as that of the @code{ffi_cif}. + +To call a function using an initialized @code{ffi_cif}, use the +@code{ffi_call} function: + +@findex ffi_call +@defun void ffi_call (ffi_cif *@var{cif}, void *@var{fn}, void *@var{rvalue}, void **@var{avalues}) +This calls the function @var{fn} according to the description given in +@var{cif}. @var{cif} must have already been prepared using +@code{ffi_prep_cif}. + +@var{rvalue} is a pointer to a chunk of memory that will hold the +result of the function call. This must be large enough to hold the +result, no smaller than the system register size (generally 32 or 64 +bits), and must be suitably aligned; it is the caller's responsibility +to ensure this. If @var{cif} declares that the function returns +@code{void} (using @code{ffi_type_void}), then @var{rvalue} is +ignored. + +In most situations, @samp{libffi} will handle promotion according to +the ABI. However, for historical reasons, there is a special case +with return values that must be handled by your code. In particular, +for integral (not @code{struct}) types that are narrower than the +system register size, the return value will be widened by +@samp{libffi}. @samp{libffi} provides a type, @code{ffi_arg}, that +can be used as the return type. For example, if the CIF was defined +with a return type of @code{char}, @samp{libffi} will try to store a +full @code{ffi_arg} into the return value. + +@var{avalues} is a vector of @code{void *} pointers that point to the +memory locations holding the argument values for a call. If @var{cif} +declares that the function has no arguments (i.e., @var{nargs} was 0), +then @var{avalues} is ignored. Note that argument values may be +modified by the callee (for instance, structs passed by value); the +burden of copying pass-by-value arguments is placed on the caller. + +Note that while the return value must be register-sized, arguments +should exactly match their declared type. For example, if an argument +is a @code{short}, then the entry in @var{avalues} should point to an +object declared as @code{short}; but if the return type is +@code{short}, then @var{rvalue} should point to an object declared as +a larger type -- usually @code{ffi_arg}. +@end defun + + +@node Simple Example +@section Simple Example + +Here is a trivial example that calls @code{puts} a few times. + +@example +#include +#include + +int main() +@{ + ffi_cif cif; + ffi_type *args[1]; + void *values[1]; + char *s; + ffi_arg rc; + + /* Initialize the argument info vectors */ + args[0] = &ffi_type_pointer; + values[0] = &s; + + /* Initialize the cif */ + if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, args) == FFI_OK) + @{ + s = "Hello World!"; + ffi_call(&cif, puts, &rc, values); + /* rc now holds the result of the call to puts */ + + /* values holds a pointer to the function's arg, so to + call puts() again all we need to do is change the + value of s */ + s = "This is cool!"; + ffi_call(&cif, puts, &rc, values); + @} + + return 0; +@} +@end example + + +@node Types +@section Types + +@menu +* Primitive Types:: Built-in types. +* Structures:: Structure types. +* Size and Alignment:: Size and alignment of types. +* Arrays Unions Enums:: Arrays, unions, and enumerations. +* Type Example:: Structure type example. +* Complex:: Complex types. +* Complex Type Example:: Complex type example. +@end menu + +@node Primitive Types +@subsection Primitive Types + +@code{Libffi} provides a number of built-in type descriptors that can +be used to describe argument and return types: + +@table @code +@item ffi_type_void +@tindex ffi_type_void +The type @code{void}. This cannot be used for argument types, only +for return values. + +@item ffi_type_uint8 +@tindex ffi_type_uint8 +An unsigned, 8-bit integer type. + +@item ffi_type_sint8 +@tindex ffi_type_sint8 +A signed, 8-bit integer type. + +@item ffi_type_uint16 +@tindex ffi_type_uint16 +An unsigned, 16-bit integer type. + +@item ffi_type_sint16 +@tindex ffi_type_sint16 +A signed, 16-bit integer type. + +@item ffi_type_uint32 +@tindex ffi_type_uint32 +An unsigned, 32-bit integer type. + +@item ffi_type_sint32 +@tindex ffi_type_sint32 +A signed, 32-bit integer type. + +@item ffi_type_uint64 +@tindex ffi_type_uint64 +An unsigned, 64-bit integer type. + +@item ffi_type_sint64 +@tindex ffi_type_sint64 +A signed, 64-bit integer type. + +@item ffi_type_float +@tindex ffi_type_float +The C @code{float} type. + +@item ffi_type_double +@tindex ffi_type_double +The C @code{double} type. + +@item ffi_type_uchar +@tindex ffi_type_uchar +The C @code{unsigned char} type. + +@item ffi_type_schar +@tindex ffi_type_schar +The C @code{signed char} type. (Note that there is not an exact +equivalent to the C @code{char} type in @code{libffi}; ordinarily you +should either use @code{ffi_type_schar} or @code{ffi_type_uchar} +depending on whether @code{char} is signed.) + +@item ffi_type_ushort +@tindex ffi_type_ushort +The C @code{unsigned short} type. + +@item ffi_type_sshort +@tindex ffi_type_sshort +The C @code{short} type. + +@item ffi_type_uint +@tindex ffi_type_uint +The C @code{unsigned int} type. + +@item ffi_type_sint +@tindex ffi_type_sint +The C @code{int} type. + +@item ffi_type_ulong +@tindex ffi_type_ulong +The C @code{unsigned long} type. + +@item ffi_type_slong +@tindex ffi_type_slong +The C @code{long} type. + +@item ffi_type_longdouble +@tindex ffi_type_longdouble +On platforms that have a C @code{long double} type, this is defined. +On other platforms, it is not. + +@item ffi_type_pointer +@tindex ffi_type_pointer +A generic @code{void *} pointer. You should use this for all +pointers, regardless of their real type. + +@item ffi_type_complex_float +@tindex ffi_type_complex_float +The C @code{_Complex float} type. + +@item ffi_type_complex_double +@tindex ffi_type_complex_double +The C @code{_Complex double} type. + +@item ffi_type_complex_longdouble +@tindex ffi_type_complex_longdouble +The C @code{_Complex long double} type. +On platforms that have a C @code{long double} type, this is defined. +On other platforms, it is not. +@end table + +Each of these is of type @code{ffi_type}, so you must take the address +when passing to @code{ffi_prep_cif}. + + +@node Structures +@subsection Structures + +@samp{libffi} is perfectly happy passing structures back and forth. +You must first describe the structure to @samp{libffi} by creating a +new @code{ffi_type} object for it. + +@tindex ffi_type +@deftp {Data type} ffi_type +The @code{ffi_type} has the following members: +@table @code +@item size_t size +This is set by @code{libffi}; you should initialize it to zero. + +@item unsigned short alignment +This is set by @code{libffi}; you should initialize it to zero. + +@item unsigned short type +For a structure, this should be set to @code{FFI_TYPE_STRUCT}. + +@item ffi_type **elements +This is a @samp{NULL}-terminated array of pointers to @code{ffi_type} +objects. There is one element per field of the struct. + +Note that @samp{libffi} has no special support for bit-fields. You +must manage these manually. +@end table +@end deftp + +The @code{size} and @code{alignment} fields will be filled in by +@code{ffi_prep_cif} or @code{ffi_prep_cif_var}, as needed. + +@node Size and Alignment +@subsection Size and Alignment + +@code{libffi} will set the @code{size} and @code{alignment} fields of +an @code{ffi_type} object for you. It does so using its knowledge of +the ABI. + +You might expect that you can simply read these fields for a type that +has been laid out by @code{libffi}. However, there are some caveats. + +@itemize @bullet +@item +The size or alignment of some of the built-in types may vary depending +on the chosen ABI. + +@item +The size and alignment of a new structure type will not be set by +@code{libffi} until it has been passed to @code{ffi_prep_cif} or +@code{ffi_get_struct_offsets}. + +@item +A structure type cannot be shared across ABIs. Instead each ABI needs +its own copy of the structure type. +@end itemize + +So, before examining these fields, it is safest to pass the +@code{ffi_type} object to @code{ffi_prep_cif} or +@code{ffi_get_struct_offsets} first. This function will do all the +needed setup. + +@example +ffi_type *desired_type; +ffi_abi desired_abi; +@dots{} +ffi_cif cif; +if (ffi_prep_cif (&cif, desired_abi, 0, desired_type, NULL) == FFI_OK) + @{ + size_t size = desired_type->size; + unsigned short alignment = desired_type->alignment; + @} +@end example + +@code{libffi} also provides a way to get the offsets of the members of +a structure. + +@findex ffi_get_struct_offsets +@defun ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +Compute the offset of each element of the given structure type. +@var{abi} is the ABI to use; this is needed because in some cases the +layout depends on the ABI. + +@var{offsets} is an out parameter. The caller is responsible for +providing enough space for all the results to be written -- one +element per element type in @var{struct_type}. If @var{offsets} is +@code{NULL}, then the type will be laid out but not otherwise +modified. This can be useful for accessing the type's size or layout, +as mentioned above. + +This function returns @code{FFI_OK} on success; @code{FFI_BAD_ABI} if +@var{abi} is invalid; or @code{FFI_BAD_TYPEDEF} if @var{struct_type} +is invalid in some way. Note that only @code{FFI_STRUCT} types are +valid here. +@end defun + +@node Arrays Unions Enums +@subsection Arrays, Unions, and Enumerations + +@subsubsection Arrays + +@samp{libffi} does not have direct support for arrays or unions. +However, they can be emulated using structures. + +To emulate an array, simply create an @code{ffi_type} using +@code{FFI_TYPE_STRUCT} with as many members as there are elements in +the array. + +@example +ffi_type array_type; +ffi_type **elements +int i; + +elements = malloc ((n + 1) * sizeof (ffi_type *)); +for (i = 0; i < n; ++i) + elements[i] = array_element_type; +elements[n] = NULL; + +array_type.size = array_type.alignment = 0; +array_type.type = FFI_TYPE_STRUCT; +array_type.elements = elements; +@end example + +Note that arrays cannot be passed or returned by value in C -- +structure types created like this should only be used to refer to +members of real @code{FFI_TYPE_STRUCT} objects. + +However, a phony array type like this will not cause any errors from +@samp{libffi} if you use it as an argument or return type. This may +be confusing. + +@subsubsection Unions + +A union can also be emulated using @code{FFI_TYPE_STRUCT}. In this +case, however, you must make sure that the size and alignment match +the real requirements of the union. + +One simple way to do this is to ensue that each element type is laid +out. Then, give the new structure type a single element; the size of +the largest element; and the largest alignment seen as well. + +This example uses the @code{ffi_prep_cif} trick to ensure that each +element type is laid out. + +@example +ffi_abi desired_abi; +ffi_type union_type; +ffi_type **union_elements; + +int i; +ffi_type element_types[2]; + +element_types[1] = NULL; + +union_type.size = union_type.alignment = 0; +union_type.type = FFI_TYPE_STRUCT; +union_type.elements = element_types; + +for (i = 0; union_elements[i]; ++i) + @{ + ffi_cif cif; + if (ffi_prep_cif (&cif, desired_abi, 0, union_elements[i], NULL) == FFI_OK) + @{ + if (union_elements[i]->size > union_type.size) + @{ + union_type.size = union_elements[i]; + size = union_elements[i]->size; + @} + if (union_elements[i]->alignment > union_type.alignment) + union_type.alignment = union_elements[i]->alignment; + @} + @} +@end example + +@subsubsection Enumerations + +@code{libffi} does not have any special support for C @code{enum}s. +Although any given @code{enum} is implemented using a specific +underlying integral type, exactly which type will be used cannot be +determined by @code{libffi} -- it may depend on the values in the +enumeration or on compiler flags such as @option{-fshort-enums}. +@xref{Structures unions enumerations and bit-fields implementation, , , gcc}, +for more information about how GCC handles enumerations. + +@node Type Example +@subsection Type Example + +The following example initializes a @code{ffi_type} object +representing the @code{tm} struct from Linux's @file{time.h}. + +Here is how the struct is defined: + +@example +struct tm @{ + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; + /* Those are for future use. */ + long int __tm_gmtoff__; + __const char *__tm_zone__; +@}; +@end example + +Here is the corresponding code to describe this struct to +@code{libffi}: + +@example + @{ + ffi_type tm_type; + ffi_type *tm_type_elements[12]; + int i; + + tm_type.size = tm_type.alignment = 0; + tm_type.type = FFI_TYPE_STRUCT; + tm_type.elements = &tm_type_elements; + + for (i = 0; i < 9; i++) + tm_type_elements[i] = &ffi_type_sint; + + tm_type_elements[9] = &ffi_type_slong; + tm_type_elements[10] = &ffi_type_pointer; + tm_type_elements[11] = NULL; + + /* tm_type can now be used to represent tm argument types and + return types for ffi_prep_cif() */ + @} +@end example + +@node Complex +@subsection Complex Types + +@samp{libffi} supports the complex types defined by the C99 +standard (@code{_Complex float}, @code{_Complex double} and +@code{_Complex long double} with the built-in type descriptors +@code{ffi_type_complex_float}, @code{ffi_type_complex_double} and +@code{ffi_type_complex_longdouble}. + +Custom complex types like @code{_Complex int} can also be used. +An @code{ffi_type} object has to be defined to describe the +complex type to @samp{libffi}. + +@tindex ffi_type +@deftp {Data type} ffi_type +@table @code +@item size_t size +This must be manually set to the size of the complex type. + +@item unsigned short alignment +This must be manually set to the alignment of the complex type. + +@item unsigned short type +For a complex type, this must be set to @code{FFI_TYPE_COMPLEX}. + +@item ffi_type **elements + +This is a @samp{NULL}-terminated array of pointers to +@code{ffi_type} objects. The first element is set to the +@code{ffi_type} of the complex's base type. The second element +must be set to @code{NULL}. +@end table +@end deftp + +The section @ref{Complex Type Example} shows a way to determine +the @code{size} and @code{alignment} members in a platform +independent way. + +For platforms that have no complex support in @code{libffi} yet, +the functions @code{ffi_prep_cif} and @code{ffi_prep_args} abort +the program if they encounter a complex type. + +@node Complex Type Example +@subsection Complex Type Example + +This example demonstrates how to use complex types: + +@example +#include +#include +#include + +void complex_fn(_Complex float cf, + _Complex double cd, + _Complex long double cld) +@{ + printf("cf=%f+%fi\ncd=%f+%fi\ncld=%f+%fi\n", + (float)creal (cf), (float)cimag (cf), + (float)creal (cd), (float)cimag (cd), + (float)creal (cld), (float)cimag (cld)); +@} + +int main() +@{ + ffi_cif cif; + ffi_type *args[3]; + void *values[3]; + _Complex float cf; + _Complex double cd; + _Complex long double cld; + + /* Initialize the argument info vectors */ + args[0] = &ffi_type_complex_float; + args[1] = &ffi_type_complex_double; + args[2] = &ffi_type_complex_longdouble; + values[0] = &cf; + values[1] = &cd; + values[2] = &cld; + + /* Initialize the cif */ + if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_void, args) == FFI_OK) + @{ + cf = 1.0 + 20.0 * I; + cd = 300.0 + 4000.0 * I; + cld = 50000.0 + 600000.0 * I; + /* Call the function */ + ffi_call(&cif, (void (*)(void))complex_fn, 0, values); + @} + + return 0; +@} +@end example + +This is an example for defining a custom complex type descriptor +for compilers that support them: + +@example +/* + * This macro can be used to define new complex type descriptors + * in a platform independent way. + * + * name: Name of the new descriptor is ffi_type_complex_. + * type: The C base type of the complex type. + */ +#define FFI_COMPLEX_TYPEDEF(name, type, ffitype) \ + static ffi_type *ffi_elements_complex_##name [2] = @{ \ + (ffi_type *)(&ffitype), NULL \ + @}; \ + struct struct_align_complex_##name @{ \ + char c; \ + _Complex type x; \ + @}; \ + ffi_type ffi_type_complex_##name = @{ \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ + @} + +/* Define new complex type descriptors using the macro: */ +/* ffi_type_complex_sint */ +FFI_COMPLEX_TYPEDEF(sint, int, ffi_type_sint); +/* ffi_type_complex_uchar */ +FFI_COMPLEX_TYPEDEF(uchar, unsigned char, ffi_type_uint8); +@end example + +The new type descriptors can then be used like one of the built-in +type descriptors in the previous example. + +@node Multiple ABIs +@section Multiple ABIs + +A given platform may provide multiple different ABIs at once. For +instance, the x86 platform has both @samp{stdcall} and @samp{fastcall} +functions. + +@code{libffi} provides some support for this. However, this is +necessarily platform-specific. + +@c FIXME: document the platforms + +@node The Closure API +@section The Closure API + +@code{libffi} also provides a way to write a generic function -- a +function that can accept and decode any combination of arguments. +This can be useful when writing an interpreter, or to provide wrappers +for arbitrary functions. + +This facility is called the @dfn{closure API}. Closures are not +supported on all platforms; you can check the @code{FFI_CLOSURES} +define to determine whether they are supported on the current +platform. +@cindex closures +@cindex closure API +@findex FFI_CLOSURES + +Because closures work by assembling a tiny function at runtime, they +require special allocation on platforms that have a non-executable +heap. Memory management for closures is handled by a pair of +functions: + +@findex ffi_closure_alloc +@defun void *ffi_closure_alloc (size_t @var{size}, void **@var{code}) +Allocate a chunk of memory holding @var{size} bytes. This returns a +pointer to the writable address, and sets *@var{code} to the +corresponding executable address. + +@var{size} should be sufficient to hold a @code{ffi_closure} object. +@end defun + +@findex ffi_closure_free +@defun void ffi_closure_free (void *@var{writable}) +Free memory allocated using @code{ffi_closure_alloc}. The argument is +the writable address that was returned. +@end defun + + +Once you have allocated the memory for a closure, you must construct a +@code{ffi_cif} describing the function call. Finally you can prepare +the closure function: + +@findex ffi_prep_closure_loc +@defun ffi_status ffi_prep_closure_loc (ffi_closure *@var{closure}, ffi_cif *@var{cif}, void (*@var{fun}) (ffi_cif *@var{cif}, void *@var{ret}, void **@var{args}, void *@var{user_data}), void *@var{user_data}, void *@var{codeloc}) +Prepare a closure function. The arguments to +@code{ffi_prep_closure_loc} are: + +@table @var +@item closure +The address of a @code{ffi_closure} object; this is the writable +address returned by @code{ffi_closure_alloc}. + +@item cif +The @code{ffi_cif} describing the function parameters. Note that this +object, and the types to which it refers, must be kept alive until the +closure itself is freed. + +@item user_data +An arbitrary datum that is passed, uninterpreted, to your closure +function. + +@item codeloc +The executable address returned by @code{ffi_closure_alloc}. + +@item fun +The function which will be called when the closure is invoked. It is +called with the arguments: + +@table @var +@item cif +The @code{ffi_cif} passed to @code{ffi_prep_closure_loc}. + +@item ret +A pointer to the memory used for the function's return value. + +If the function is declared as returning @code{void}, then this value +is garbage and should not be used. + +Otherwise, @var{fun} must fill the object to which this points, +following the same special promotion behavior as @code{ffi_call}. +That is, in most cases, @var{ret} points to an object of exactly the +size of the type specified when @var{cif} was constructed. However, +integral types narrower than the system register size are widened. In +these cases your program may assume that @var{ret} points to an +@code{ffi_arg} object. + +@item args +A vector of pointers to memory holding the arguments to the function. + +@item user_data +The same @var{user_data} that was passed to +@code{ffi_prep_closure_loc}. +@end table +@end table + +@code{ffi_prep_closure_loc} will return @code{FFI_OK} if everything +went ok, and one of the other @code{ffi_status} values on error. + +After calling @code{ffi_prep_closure_loc}, you can cast @var{codeloc} +to the appropriate pointer-to-function type. +@end defun + +You may see old code referring to @code{ffi_prep_closure}. This +function is deprecated, as it cannot handle the need for separate +writable and executable addresses. + +@node Closure Example +@section Closure Example + +A trivial example that creates a new @code{puts} by binding +@code{fputs} with @code{stdout}. + +@example +#include +#include + +/* Acts like puts with the file given at time of enclosure. */ +void puts_binding(ffi_cif *cif, void *ret, void* args[], + void *stream) +@{ + *(ffi_arg *)ret = fputs(*(char **)args[0], (FILE *)stream); +@} + +typedef int (*puts_t)(char *); + +int main() +@{ + ffi_cif cif; + ffi_type *args[1]; + ffi_closure *closure; + + void *bound_puts; + int rc; + + /* Allocate closure and bound_puts */ + closure = ffi_closure_alloc(sizeof(ffi_closure), &bound_puts); + + if (closure) + @{ + /* Initialize the argument info vectors */ + args[0] = &ffi_type_pointer; + + /* Initialize the cif */ + if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, args) == FFI_OK) + @{ + /* Initialize the closure, setting stream to stdout */ + if (ffi_prep_closure_loc(closure, &cif, puts_binding, + stdout, bound_puts) == FFI_OK) + @{ + rc = ((puts_t)bound_puts)("Hello World!"); + /* rc now holds the result of the call to fputs */ + @} + @} + @} + + /* Deallocate both closure, and bound_puts */ + ffi_closure_free(closure); + + return 0; +@} + +@end example + +@node Thread Safety +@section Thread Safety + +@code{libffi} is not completely thread-safe. However, many parts are, +and if you follow some simple rules, you can use it safely in a +multi-threaded program. + +@itemize @bullet +@item +@code{ffi_prep_cif} may modify the @code{ffi_type} objects passed to +it. It is best to ensure that only a single thread prepares a given +@code{ffi_cif} at a time. + +@item +On some platforms, @code{ffi_prep_cif} may modify the size and +alignment of some types, depending on the chosen ABI. On these +platforms, if you switch between ABIs, you must ensure that there is +only one call to @code{ffi_prep_cif} at a time. + +Currently the only affected platform is PowerPC and the only affected +type is @code{long double}. +@end itemize + +@node Missing Features +@chapter Missing Features + +@code{libffi} is missing a few features. We welcome patches to add +support for these. + +@itemize @bullet +@item +Variadic closures. + +@item +There is no support for bit fields in structures. + +@item +The ``raw'' API is undocumented. +@c anything else? + +@item +The Go API is undocumented. +@end itemize + +Note that variadic support is very new and tested on a relatively +small number of platforms. + +@node Index +@unnumbered Index + +@printindex cp + +@bye diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/version.texi b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/version.texi new file mode 100644 index 0000000..d9d5094 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/doc/version.texi @@ -0,0 +1,4 @@ +@set UPDATED 22 November 2019 +@set UPDATED-MONTH November 2019 +@set EDITION 3.3 +@set VERSION 3.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/generate-darwin-source-and-headers.py b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/generate-darwin-source-and-headers.py new file mode 100755 index 0000000..516464f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/generate-darwin-source-and-headers.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +import subprocess +import os +import errno +import collections +import glob +import argparse + +class Platform(object): + pass + +class simulator_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'i386' + triple = 'i386-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class simulator64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphonesimulator' + arch = 'x86_64' + triple = 'x86_64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +class device_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'armv7' + triple = 'arm-apple-darwin11' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm__\n\n" + suffix = "\n\n#endif" + src_dir = 'arm' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class device64_platform(Platform): + directory = 'darwin_ios' + sdk = 'iphoneos' + arch = 'arm64' + triple = 'aarch64-apple-darwin13' + version_min = '-miphoneos-version-min=7.0' + + prefix = "#ifdef __arm64__\n\n" + suffix = "\n\n#endif" + src_dir = 'aarch64' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + +class desktop32_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'i386' + triple = 'i386-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + src_dir = 'x86' + src_files = ['sysv.S', 'ffi.c', 'internal.h'] + + prefix = "#ifdef __i386__\n\n" + suffix = "\n\n#endif" + + +class desktop64_platform(Platform): + directory = 'darwin_osx' + sdk = 'macosx' + arch = 'x86_64' + triple = 'x86_64-apple-darwin10' + version_min = '-mmacosx-version-min=10.6' + + prefix = "#ifdef __x86_64__\n\n" + suffix = "\n\n#endif" + src_dir = 'x86' + src_files = ['unix64.S', 'ffi64.c', 'ffiw64.c', 'win64.S', 'internal64.h', 'asmnames.h'] + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno != errno.EEXIST: + raise + + +def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''): + mkdir_p(dst_dir) + out_filename = filename + + if file_suffix: + if filename in ['internal64.h', 'asmnames.h', 'internal.h']: + out_filename = filename + else: + split_name = os.path.splitext(filename) + out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1]) + + with open(os.path.join(src_dir, filename)) as in_file: + with open(os.path.join(dst_dir, out_filename), 'w') as out_file: + if prefix: + out_file.write(prefix) + + out_file.write(in_file.read()) + + if suffix: + out_file.write(suffix) + + +def list_files(src_dir, pattern=None, filelist=None): + if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern)) + for file in filelist: + yield os.path.basename(file) + + +def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None): + for filename in list_files(src_dir, pattern=pattern, filelist=filelist): + move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix) + + +def copy_src_platform_files(platform): + src_dir = os.path.join('src', platform.src_dir) + dst_dir = os.path.join(platform.directory, 'src', platform.src_dir) + copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix) + + +def build_target(platform, platform_headers): + def xcrun_cmd(cmd): + return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch) + + tag='%s-%s' % (platform.sdk, platform.arch) + build_dir = 'build_%s' % tag + mkdir_p(build_dir) + env = dict(CC=xcrun_cmd('clang'), + LD=xcrun_cmd('ld'), + CFLAGS='%s -fembed-bitcode' % (platform.version_min)) + working_dir = os.getcwd() + try: + os.chdir(build_dir) + subprocess.check_call(['../configure', '-host', platform.triple], env=env) + finally: + os.chdir(working_dir) + + for src_dir in [build_dir, os.path.join(build_dir, 'include')]: + copy_files(src_dir, + os.path.join(platform.directory, 'include'), + pattern='*.h', + file_suffix=platform.arch, + prefix=platform.prefix, + suffix=platform.suffix) + + for filename in list_files(src_dir, pattern='*.h'): + platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix)) + + +def generate_source_and_headers(generate_osx=True, generate_ios=True): + copy_files('src', 'darwin_common/src', pattern='*.c') + copy_files('include', 'darwin_common/include', pattern='*.h') + + if generate_ios: + copy_src_platform_files(simulator_platform) + copy_src_platform_files(simulator64_platform) + copy_src_platform_files(device_platform) + copy_src_platform_files(device64_platform) + if generate_osx: + copy_src_platform_files(desktop64_platform) + + platform_headers = collections.defaultdict(set) + + if generate_ios: + build_target(simulator_platform, platform_headers) + build_target(simulator64_platform, platform_headers) + build_target(device_platform, platform_headers) + build_target(device64_platform, platform_headers) + if generate_osx: + build_target(desktop64_platform, platform_headers) + + mkdir_p('darwin_common/include') + for header_name, tag_tuples in platform_headers.items(): + basename, suffix = os.path.splitext(header_name) + with open(os.path.join('darwin_common/include', header_name), 'w') as header: + for tag_tuple in tag_tuples: + header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2])) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--only-ios', action='store_true', default=False) + parser.add_argument('--only-osx', action='store_true', default=False) + args = parser.parse_args() + + generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/Makefile.am new file mode 100644 index 0000000..c59df9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/Makefile.am @@ -0,0 +1,9 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +DISTCLEANFILES=ffitarget.h +noinst_HEADERS=ffi_common.h ffi_cfi.h +EXTRA_DIST=ffi.h.in + +nodist_include_HEADERS = ffi.h ffitarget.h diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi.h.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi.h.in new file mode 100644 index 0000000..38885b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi.h.in @@ -0,0 +1,523 @@ +/* -----------------------------------------------------------------*-C-*- + libffi @VERSION@ - Copyright (c) 2011, 2014, 2019 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef @TARGET@ +#define @TARGET@ +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if @HAVE_LONG_DOUBLE@ +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +#if !FFI_NATIVE_RAW_API +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue) __attribute__((deprecated)); +#endif + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) __attribute__((deprecated)); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) __attribute__((deprecated)); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif) __attribute__((deprecated)); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +#if defined(PA_LINUX) || defined(PA_HPUX) +#define FFI_CLOSURE_PTR(X) ((void *)((unsigned int)(X) | 2)) +#define FFI_RESTORE_PTR(X) ((void *)((unsigned int)(X) & ~3)) +#else +#define FFI_CLOSURE_PTR(X) (X) +#define FFI_RESTORE_PTR(X) (X) +#endif + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if @FFI_EXEC_TRAMPOLINE_TABLE@ + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +#if !FFI_NATIVE_RAW_API +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) __attribute__((deprecated)); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) __attribute__((deprecated)); +#endif + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if @HAVE_LONG_DOUBLE@ +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_cfi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_cfi.h new file mode 100644 index 0000000..244ce57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_cfi.h @@ -0,0 +1,55 @@ +/* ----------------------------------------------------------------------- + ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc. + + Conditionally assemble cfi directives. Only necessary for building libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_CFI_H +#define FFI_CFI_H + +#ifdef HAVE_AS_CFI_PSEUDO_OP + +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_escape(...) .cfi_escape __VA_ARGS__ + +#else + +# define cfi_startproc +# define cfi_endproc +# define cfi_def_cfa(reg, off) +# define cfi_def_cfa_register(reg) +# define cfi_def_cfa_offset(off) +# define cfi_adjust_cfa_offset(off) +# define cfi_offset(reg, off) +# define cfi_rel_offset(reg, off) +# define cfi_register(r1, r2) +# define cfi_return_column(reg) +# define cfi_restore(reg) +# define cfi_same_value(reg) +# define cfi_undefined(reg) +# define cfi_remember_state +# define cfi_restore_state +# define cfi_window_save +# define cfi_personality(enc, exp) +# define cfi_lsda(enc, exp) +# define cfi_escape(...) + +#endif /* HAVE_AS_CFI_PSEUDO_OP */ +#endif /* FFI_CFI_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_common.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_common.h new file mode 100644 index 0000000..76b9dd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/include/ffi_common.h @@ -0,0 +1,153 @@ +/* ----------------------------------------------------------------------- + ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc + Copyright (c) 1996 Red Hat, Inc. + + Common internal definitions and macros. Only necessary for building + libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_COMMON_H +#define FFI_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Do not move this. Some versions of AIX are very picky about where + this is positioned. */ +#ifdef __GNUC__ +# if HAVE_ALLOCA_H +# include +# else + /* mingw64 defines this already in malloc.h. */ +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif +# define MAYBE_UNUSED __attribute__((__unused__)) +#else +# define MAYBE_UNUSED +# if HAVE_ALLOCA_H +# include +# else +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +# ifdef _MSC_VER +# define alloca _alloca +# else +char *alloca (); +# endif +# endif +# endif +# endif +#endif + +/* Check for the existence of memcpy. */ +#if STDC_HEADERS +# include +#else +# ifndef HAVE_MEMCPY +# define memcpy(d, s, n) bcopy ((s), (d), (n)) +# endif +#endif + +#if defined(FFI_DEBUG) +#include +#endif + +#ifdef FFI_DEBUG +void ffi_assert(char *expr, char *file, int line); +void ffi_stop_here(void); +void ffi_type_test(ffi_type *a, char *file, int line); + +#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__)) +#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l))) +#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__) +#else +#define FFI_ASSERT(x) +#define FFI_ASSERT_AT(x, f, l) +#define FFI_ASSERT_VALID_TYPE(x) +#endif + +/* v cast to size_t and aligned up to a multiple of a */ +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) +/* v cast to size_t and aligned down to a multiple of a */ +#define FFI_ALIGN_DOWN(v, a) (((size_t) (v)) & -a) + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif); +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs); + + +#if HAVE_LONG_DOUBLE_VARIANT +/* Used to adjust size/alignment of ffi types. */ +void ffi_prep_types (ffi_abi abi); +#endif + +/* Used internally, but overridden by some architectures */ +ffi_status ffi_prep_cif_core(ffi_cif *cif, + ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +/* Translate a data pointer to a code pointer. Needed for closures on + some targets. */ +void *ffi_data_to_code_pointer (void *data) FFI_HIDDEN; + +/* Extended cif, used in callback from assembly routine */ +typedef struct +{ + ffi_cif *cif; + void *rvalue; + void **avalue; +} extended_cif; + +/* Terse sized type definitions. */ +#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C) +typedef unsigned char UINT8; +typedef signed char SINT8; +typedef unsigned short UINT16; +typedef signed short SINT16; +typedef unsigned int UINT32; +typedef signed int SINT32; +# ifdef _MSC_VER +typedef unsigned __int64 UINT64; +typedef signed __int64 SINT64; +# else +# include +typedef uint64_t UINT64; +typedef int64_t SINT64; +# endif +#else +typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); +typedef signed int SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); +typedef signed int SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); +typedef signed int SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); +typedef signed int SINT64 __attribute__((__mode__(__DI__))); +#endif + +typedef float FLOAT32; + +#ifndef __GNUC__ +#define __builtin_expect(x, expected_value) (x) +#endif +#define LIKELY(x) __builtin_expect(!!(x),1) +#define UNLIKELY(x) __builtin_expect((x)!=0,0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.map.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.map.in new file mode 100644 index 0000000..de8778a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.map.in @@ -0,0 +1,76 @@ +#define LIBFFI_ASM +#define LIBFFI_H +#include +#include + +/* These version numbers correspond to the libtool-version abi numbers, + not to the libffi release numbers. */ + +LIBFFI_BASE_8.0 { + global: + /* Exported data variables. */ + ffi_type_void; + ffi_type_uint8; + ffi_type_sint8; + ffi_type_uint16; + ffi_type_sint16; + ffi_type_uint32; + ffi_type_sint32; + ffi_type_uint64; + ffi_type_sint64; + ffi_type_float; + ffi_type_double; + ffi_type_longdouble; + ffi_type_pointer; + + /* Exported functions. */ + ffi_call; + ffi_prep_cif; + ffi_prep_cif_var; + + ffi_raw_call; + ffi_ptrarray_to_raw; + ffi_raw_to_ptrarray; + ffi_raw_size; + + ffi_java_raw_call; + ffi_java_ptrarray_to_raw; + ffi_java_raw_to_ptrarray; + ffi_java_raw_size; + + ffi_get_struct_offsets; + local: + *; +}; + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +LIBFFI_COMPLEX_8.0 { + global: + /* Exported data variables. */ + ffi_type_complex_float; + ffi_type_complex_double; + ffi_type_complex_longdouble; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_CLOSURES +LIBFFI_CLOSURE_8.0 { + global: + ffi_closure_alloc; + ffi_closure_free; + ffi_prep_closure; + ffi_prep_closure_loc; + ffi_prep_raw_closure; + ffi_prep_raw_closure_loc; + ffi_prep_java_raw_closure; + ffi_prep_java_raw_closure_loc; +} LIBFFI_BASE_8.0; +#endif + +#if FFI_GO_CLOSURES +LIBFFI_GO_CLOSURE_8.0 { + global: + ffi_call_go; + ffi_prep_go_closure; +} LIBFFI_CLOSURE_8.0; +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.pc.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.pc.in new file mode 100644 index 0000000..6fad83b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +toolexeclibdir=@toolexeclibdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Description: Library supporting Foreign Function Interfaces +Version: @PACKAGE_VERSION@ +Libs: -L${toolexeclibdir} -lffi +Cflags: -I${includedir} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj new file mode 100644 index 0000000..480c4a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libffi.xcodeproj/project.pbxproj @@ -0,0 +1,997 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + DBFA714A187F1D8600A76262 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA713F187F1D8600A76262 /* ffi_common.h */; }; + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7140187F1D8600A76262 /* fficonfig.h */; }; + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + DBFA714E187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA714F187F1D8600A76262 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + DBFA715A187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA715B187F1D8600A76262 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */; }; + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */; }; + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */; }; + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716F187F1D9B00A76262 /* ffi_armv7.c */; }; + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7170187F1D9B00A76262 /* sysv_armv7.S */; }; + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */; }; + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716D187F1D9B00A76262 /* sysv_arm64.S */; }; + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA716C187F1D9B00A76262 /* ffi_arm64.c */; }; + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */; }; + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715F187F1D9B00A76262 /* ffi_armv7.h */; }; + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA715E187F1D9B00A76262 /* ffi_arm64.h */; }; + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */; }; + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */; }; + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */; }; + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */; }; + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA713E187F1D8600A76262 /* ffi.h */; }; + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7183187F1DA100A76262 /* ffi_x86_64.h */; }; + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7141187F1D8600A76262 /* ffitarget.h */; }; + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */ = {isa = PBXBuildFile; fileRef = DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */; }; + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */; }; + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7147187F1D8600A76262 /* prep_cif.c */; }; + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */; }; + FDDB2F4F1F5D846400EF414E /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7149187F1D8600A76262 /* types.c */; }; + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7148187F1D8600A76262 /* raw_api.c */; }; + FDDB2F511F5D846400EF414E /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = DBFA7143187F1D8600A76262 /* closures.c */; }; + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = DBFA718A187F1DA100A76262 /* unix64_x86_64.S */; }; + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */ = {isa = PBXBuildFile; fileRef = FDDB2F441F5D68C900EF414E /* win64_x86_64.S */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + DB13B1641849DF1E0010F42D /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD01F614A8B00AA92E6 /* ffi.h in CopyFiles */, + FDB52FD11F614AA700AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD21F614AAB00AA92E6 /* ffi_armv7.h in CopyFiles */, + FDB52FD41F614AB500AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FD81F614B8700AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FD91F614B8E00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FDA1F614B9300AA92E6 /* ffitarget_armv7.h in CopyFiles */, + FDB52FDD1F614BA900AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FC11F6144FA00AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 12; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FD51F614AE200AA92E6 /* ffi.h in CopyFiles */, + FDB52FD61F614AEA00AA92E6 /* ffi_arm64.h in CopyFiles */, + FDB52FD71F614AED00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FDE1F6155E300AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FDF1F6155EA00AA92E6 /* ffitarget_arm64.h in CopyFiles */, + FDB52FE01F6155EF00AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FE11F6156E000AA92E6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = "include/$(PRODUCT_NAME)"; + dstSubfolderSpec = 16; + files = ( + FDB52FE21F6156FA00AA92E6 /* ffi.h in CopyFiles */, + FDB52FE31F61571A00AA92E6 /* ffi_x86_64.h in CopyFiles */, + FDB52FE41F61571D00AA92E6 /* ffitarget.h in CopyFiles */, + FDB52FE61F61573100AA92E6 /* ffitarget_x86_64.h in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + 43E9A5DA1D35373600926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DB1D35374400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DC1D35375400926A8F /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 43E9A5DD1D35375400926A8F /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + DB13B1661849DF1E0010F42D /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + DB13B1911849DF510010F42D /* ffi.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = ffi.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + DBFA713E187F1D8600A76262 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = ""; }; + DBFA713F187F1D8600A76262 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = ""; }; + DBFA7140187F1D8600A76262 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = ""; }; + DBFA7141187F1D8600A76262 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = ""; }; + DBFA7143187F1D8600A76262 /* closures.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = closures.c; sourceTree = ""; }; + DBFA7145187F1D8600A76262 /* dlmalloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dlmalloc.c; sourceTree = ""; }; + DBFA7147187F1D8600A76262 /* prep_cif.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = prep_cif.c; sourceTree = ""; }; + DBFA7148187F1D8600A76262 /* raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = raw_api.c; sourceTree = ""; }; + DBFA7149187F1D8600A76262 /* types.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = types.c; sourceTree = ""; }; + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_arm64.h; sourceTree = ""; }; + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_armv7.h; sourceTree = ""; }; + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_arm64.h; sourceTree = ""; }; + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_armv7.h; sourceTree = ""; }; + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_arm64.h; sourceTree = ""; }; + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_armv7.h; sourceTree = ""; }; + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_arm64.c; sourceTree = ""; }; + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_arm64.S; sourceTree = ""; }; + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi_armv7.c; sourceTree = ""; }; + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv_armv7.S; sourceTree = ""; }; + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = ""; }; + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = ""; }; + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = ""; }; + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = unix64_x86_64.S; sourceTree = ""; }; + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; indentWidth = 2; lastKnownFileType = sourcecode.c.c; path = ffi64_x86_64.c; sourceTree = ""; }; + FDB52FC51F6144FA00AA92E6 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asmnames.h; sourceTree = ""; }; + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffiw64_x86_64.c; sourceTree = ""; }; + FDDB2F421F5D68C900EF414E /* internal64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal64.h; sourceTree = ""; }; + FDDB2F431F5D68C900EF414E /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = win64_x86_64.S; sourceTree = ""; }; + FDDB2F621F5D846400EF414E /* libffi.a */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + DB13B15B1849DEB70010F42D = { + isa = PBXGroup; + children = ( + DBFA713C187F1D8600A76262 /* darwin_common */, + DBFA715C187F1D9B00A76262 /* darwin_ios */, + DBFA7180187F1DA100A76262 /* darwin_osx */, + DB13B1671849DF1E0010F42D /* Products */, + ); + sourceTree = ""; + }; + DB13B1671849DF1E0010F42D /* Products */ = { + isa = PBXGroup; + children = ( + DB13B1661849DF1E0010F42D /* libffi.a */, + DB13B1911849DF510010F42D /* ffi.dylib */, + FDDB2F621F5D846400EF414E /* libffi.a */, + FDB52FC51F6144FA00AA92E6 /* libffi.a */, + ); + name = Products; + sourceTree = ""; + }; + DBFA713C187F1D8600A76262 /* darwin_common */ = { + isa = PBXGroup; + children = ( + DBFA713D187F1D8600A76262 /* include */, + DBFA7142187F1D8600A76262 /* src */, + ); + path = darwin_common; + sourceTree = ""; + }; + DBFA713D187F1D8600A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA713E187F1D8600A76262 /* ffi.h */, + DBFA713F187F1D8600A76262 /* ffi_common.h */, + DBFA7140187F1D8600A76262 /* fficonfig.h */, + DBFA7141187F1D8600A76262 /* ffitarget.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7142187F1D8600A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7143187F1D8600A76262 /* closures.c */, + DBFA7145187F1D8600A76262 /* dlmalloc.c */, + DBFA7147187F1D8600A76262 /* prep_cif.c */, + DBFA7148187F1D8600A76262 /* raw_api.c */, + DBFA7149187F1D8600A76262 /* types.c */, + ); + path = src; + sourceTree = ""; + }; + DBFA715C187F1D9B00A76262 /* darwin_ios */ = { + isa = PBXGroup; + children = ( + DBFA715D187F1D9B00A76262 /* include */, + DBFA716A187F1D9B00A76262 /* src */, + ); + path = darwin_ios; + sourceTree = ""; + }; + DBFA715D187F1D9B00A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA715E187F1D9B00A76262 /* ffi_arm64.h */, + DBFA715F187F1D9B00A76262 /* ffi_armv7.h */, + DBFA7161187F1D9B00A76262 /* ffi_x86_64.h */, + DBFA7162187F1D9B00A76262 /* fficonfig_arm64.h */, + DBFA7163187F1D9B00A76262 /* fficonfig_armv7.h */, + DBFA7165187F1D9B00A76262 /* fficonfig_x86_64.h */, + DBFA7166187F1D9B00A76262 /* ffitarget_arm64.h */, + DBFA7167187F1D9B00A76262 /* ffitarget_armv7.h */, + DBFA7169187F1D9B00A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA716A187F1D9B00A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA716B187F1D9B00A76262 /* aarch64 */, + DBFA716E187F1D9B00A76262 /* arm */, + DBFA7172187F1D9B00A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA716B187F1D9B00A76262 /* aarch64 */ = { + isa = PBXGroup; + children = ( + 43E9A5DA1D35373600926A8F /* internal.h */, + DBFA716C187F1D9B00A76262 /* ffi_arm64.c */, + DBFA716D187F1D9B00A76262 /* sysv_arm64.S */, + ); + path = aarch64; + sourceTree = ""; + }; + DBFA716E187F1D9B00A76262 /* arm */ = { + isa = PBXGroup; + children = ( + 43E9A5DB1D35374400926A8F /* internal.h */, + DBFA716F187F1D9B00A76262 /* ffi_armv7.c */, + DBFA7170187F1D9B00A76262 /* sysv_armv7.S */, + ); + path = arm; + sourceTree = ""; + }; + DBFA7172187F1D9B00A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + 43E9A5DC1D35375400926A8F /* internal.h */, + 43E9A5DD1D35375400926A8F /* internal64.h */, + DBFA7175187F1D9B00A76262 /* ffi64_x86_64.c */, + 43B5D3F71D35473200D1E1FD /* ffiw64_x86_64.c */, + 43E9A5C61D352C1500926A8F /* unix64_x86_64.S */, + 43B5D3F91D3547CE00D1E1FD /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; + DBFA7180187F1DA100A76262 /* darwin_osx */ = { + isa = PBXGroup; + children = ( + DBFA7181187F1DA100A76262 /* include */, + DBFA7188187F1DA100A76262 /* src */, + ); + path = darwin_osx; + sourceTree = ""; + }; + DBFA7181187F1DA100A76262 /* include */ = { + isa = PBXGroup; + children = ( + DBFA7183187F1DA100A76262 /* ffi_x86_64.h */, + DBFA7185187F1DA100A76262 /* fficonfig_x86_64.h */, + DBFA7187187F1DA100A76262 /* ffitarget_x86_64.h */, + ); + path = include; + sourceTree = ""; + }; + DBFA7188187F1DA100A76262 /* src */ = { + isa = PBXGroup; + children = ( + DBFA7189187F1DA100A76262 /* x86 */, + ); + path = src; + sourceTree = ""; + }; + DBFA7189187F1DA100A76262 /* x86 */ = { + isa = PBXGroup; + children = ( + FDDB2F431F5D68C900EF414E /* internal.h */, + FDDB2F421F5D68C900EF414E /* internal64.h */, + FDDB2F3E1F5D61BC00EF414E /* asmnames.h */, + DBFA718C187F1DA100A76262 /* ffi64_x86_64.c */, + FDDB2F3F1F5D666900EF414E /* ffiw64_x86_64.c */, + DBFA718A187F1DA100A76262 /* unix64_x86_64.S */, + FDDB2F441F5D68C900EF414E /* win64_x86_64.S */, + ); + path = x86; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + DB13B18F1849DF510010F42D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA714C187F1D8600A76262 /* fficonfig.h in Headers */, + DBFA714D187F1D8600A76262 /* ffitarget.h in Headers */, + DBFA714A187F1D8600A76262 /* ffi.h in Headers */, + DBFA718F187F1DA100A76262 /* ffi_x86_64.h in Headers */, + DBFA7191187F1DA100A76262 /* fficonfig_x86_64.h in Headers */, + DBFA714B187F1D8600A76262 /* ffi_common.h in Headers */, + DBFA7193187F1DA100A76262 /* ffitarget_x86_64.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + DB13B1651849DF1E0010F42D /* libffi-iOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */; + buildPhases = ( + 43B5D3FB1D35480D00D1E1FD /* Run Script */, + DB13B1621849DF1E0010F42D /* Sources */, + DB13B1641849DF1E0010F42D /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-iOS"; + productName = ffi; + productReference = DB13B1661849DF1E0010F42D /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + DB13B1901849DF510010F42D /* libffi-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */; + buildPhases = ( + DB13B3061849E0490010F42D /* ShellScript */, + DB13B18D1849DF510010F42D /* Sources */, + DB13B18F1849DF510010F42D /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-Mac"; + productName = ffi; + productReference = DB13B1911849DF510010F42D /* ffi.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */; + buildPhases = ( + FDB52FB11F6144FA00AA92E6 /* Run Script */, + FDB52FB21F6144FA00AA92E6 /* Sources */, + FDB52FC11F6144FA00AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-tvOS"; + productName = ffi; + productReference = FDB52FC51F6144FA00AA92E6 /* libffi.a */; + productType = "com.apple.product-type.library.static"; + }; + FDDB2F471F5D846400EF414E /* libffi-static-Mac */ = { + isa = PBXNativeTarget; + buildConfigurationList = FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */; + buildPhases = ( + FDDB2F481F5D846400EF414E /* ShellScript */, + FDDB2F491F5D846400EF414E /* Sources */, + FDB52FE11F6156E000AA92E6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libffi-static-Mac"; + productName = ffi; + productReference = FDDB2F621F5D846400EF414E /* libffi.a */; + productType = "com.apple.product-type.library.dynamic"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + DB13B15C1849DEB70010F42D /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0830; + }; + buildConfigurationList = DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = DB13B15B1849DEB70010F42D; + productRefGroup = DB13B1671849DF1E0010F42D /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + DB13B1651849DF1E0010F42D /* libffi-iOS */, + FDB52FB01F6144FA00AA92E6 /* libffi-tvOS */, + DB13B1901849DF510010F42D /* libffi-Mac */, + FDDB2F471F5D846400EF414E /* libffi-static-Mac */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 43B5D3FB1D35480D00D1E1FD /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + DB13B3061849E0490010F42D /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; + FDB52FB11F6144FA00AA92E6 /* Run Script */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Run Script"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-ios\nfi"; + }; + FDDB2F481F5D846400EF414E /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "if [ ! -f \"./compile\" ]\nthen\nautoreconf -i -f -v\nif [ -f \"../ltmain.sh\" ]\nthen\necho \"fixing ltmain.sh for some reason\"\nmv ../ltmain.sh ./\nautoreconf -i -f -v\nfi\n/usr/bin/python generate-darwin-source-and-headers.py --only-osx\nfi"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + DB13B1621849DF1E0010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 43E9A5C81D352C1500926A8F /* unix64_x86_64.S in Sources */, + DBFA717E187F1D9B00A76262 /* ffi64_x86_64.c in Sources */, + DBFA7179187F1D9B00A76262 /* ffi_armv7.c in Sources */, + DBFA714E187F1D8600A76262 /* closures.c in Sources */, + DBFA717A187F1D9B00A76262 /* sysv_armv7.S in Sources */, + 43B5D3F81D35473200D1E1FD /* ffiw64_x86_64.c in Sources */, + DBFA7156187F1D8600A76262 /* prep_cif.c in Sources */, + DBFA7158187F1D8600A76262 /* raw_api.c in Sources */, + DBFA7178187F1D9B00A76262 /* sysv_arm64.S in Sources */, + DBFA715A187F1D8600A76262 /* types.c in Sources */, + DBFA7177187F1D9B00A76262 /* ffi_arm64.c in Sources */, + 43B5D3FA1D3547CE00D1E1FD /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + DB13B18D1849DF510010F42D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + DBFA7196187F1DA100A76262 /* ffi64_x86_64.c in Sources */, + DBFA7157187F1D8600A76262 /* prep_cif.c in Sources */, + FDDB2F411F5D66E200EF414E /* ffiw64_x86_64.c in Sources */, + DBFA715B187F1D8600A76262 /* types.c in Sources */, + DBFA7159187F1D8600A76262 /* raw_api.c in Sources */, + DBFA714F187F1D8600A76262 /* closures.c in Sources */, + DBFA7194187F1DA100A76262 /* unix64_x86_64.S in Sources */, + FDDB2F461F5D691E00EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDB52FB21F6144FA00AA92E6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDB52FB31F6144FA00AA92E6 /* unix64_x86_64.S in Sources */, + FDB52FB51F6144FA00AA92E6 /* ffi64_x86_64.c in Sources */, + FDB52FB61F6144FA00AA92E6 /* ffi_armv7.c in Sources */, + FDB52FB71F6144FA00AA92E6 /* closures.c in Sources */, + FDB52FB81F6144FA00AA92E6 /* sysv_armv7.S in Sources */, + FDB52FB91F6144FA00AA92E6 /* ffiw64_x86_64.c in Sources */, + FDB52FBA1F6144FA00AA92E6 /* prep_cif.c in Sources */, + FDB52FBC1F6144FA00AA92E6 /* raw_api.c in Sources */, + FDB52FBD1F6144FA00AA92E6 /* sysv_arm64.S in Sources */, + FDB52FBE1F6144FA00AA92E6 /* types.c in Sources */, + FDB52FBF1F6144FA00AA92E6 /* ffi_arm64.c in Sources */, + FDB52FC01F6144FA00AA92E6 /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FDDB2F491F5D846400EF414E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FDDB2F4A1F5D846400EF414E /* ffi64_x86_64.c in Sources */, + FDDB2F4C1F5D846400EF414E /* prep_cif.c in Sources */, + FDDB2F4E1F5D846400EF414E /* ffiw64_x86_64.c in Sources */, + FDDB2F4F1F5D846400EF414E /* types.c in Sources */, + FDDB2F501F5D846400EF414E /* raw_api.c in Sources */, + FDDB2F511F5D846400EF414E /* closures.c in Sources */, + FDDB2F521F5D846400EF414E /* unix64_x86_64.S in Sources */, + FDDB2F531F5D846400EF414E /* win64_x86_64.S in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + DB13B1601849DEB70010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + ONLY_ACTIVE_ARCH = YES; + }; + name = Debug; + }; + DB13B1611849DEB70010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_common/include, + ); + }; + name = Release; + }; + DB13B1871849DF1E0010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DSTROOT = /tmp/ffi.dst; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Debug; + }; + DB13B1881849DF1E0010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + DSTROOT = /tmp/ffi.dst; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + IPHONEOS_DEPLOYMENT_TARGET = 8.0; + PRODUCT_NAME = ffi; + SDKROOT = iphoneos; + SKIP_INSTALL = YES; + VALIDATE_PRODUCT = YES; + VALID_ARCHS = "arm64 armv7 armv7s x86_64"; + }; + name = Release; + }; + DB13B1B11849DF520010F42D /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + DB13B1B21849DF520010F42D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_LDFLAGS = "-Wl,-no_compact_unwind"; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; + FDB52FC31F6144FA00AA92E6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + }; + name = Debug; + }; + FDB52FC41F6144FA00AA92E6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_ios/include, + ); + PRODUCT_NAME = ffi; + SDKROOT = appletvos; + SKIP_INSTALL = YES; + TVOS_DEPLOYMENT_TARGET = 9.0; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + FDDB2F601F5D846400EF414E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Debug; + }; + FDDB2F611F5D846400EF414E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + EXECUTABLE_EXTENSION = a; + EXECUTABLE_PREFIX = lib; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + darwin_osx/include, + ); + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + PRODUCT_NAME = ffi; + SDKROOT = macosx; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + DB13B15F1849DEB70010F42D /* Build configuration list for PBXProject "libffi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1601849DEB70010F42D /* Debug */, + DB13B1611849DEB70010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B18B1849DF1E0010F42D /* Build configuration list for PBXNativeTarget "libffi-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1871849DF1E0010F42D /* Debug */, + DB13B1881849DF1E0010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + DB13B1B01849DF520010F42D /* Build configuration list for PBXNativeTarget "libffi-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DB13B1B11849DF520010F42D /* Debug */, + DB13B1B21849DF520010F42D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDB52FC21F6144FA00AA92E6 /* Build configuration list for PBXNativeTarget "libffi-tvOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDB52FC31F6144FA00AA92E6 /* Debug */, + FDB52FC41F6144FA00AA92E6 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FDDB2F5F1F5D846400EF414E /* Build configuration list for PBXNativeTarget "libffi-static-Mac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FDDB2F601F5D846400EF414E /* Debug */, + FDDB2F611F5D846400EF414E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = DB13B15C1849DEB70010F42D /* Project object */; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libtool-version b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libtool-version new file mode 100644 index 0000000..607fee5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/libtool-version @@ -0,0 +1,29 @@ +# This file is used to maintain libtool version info for libffi. See +# the libtool manual to understand the meaning of the fields. This is +# a separate file so that version updates don't involve re-running +# automake. +# +# Here are a set of rules to help you update your library version +# information: +# +# 1. Start with version information of `0:0:0' for each libtool library. +# +# 2. Update the version information only immediately before a public +# release of your software. More frequent updates are unnecessary, +# and only guarantee that the current interface number gets larger +# faster. +# +# 3. If the library source code has changed at all since the last +# update, then increment revision (`c:r:a' becomes `c:r+1:a'). +# +# 4. If any interfaces have been added, removed, or changed since the +# last update, increment current, and set revision to 0. +# +# 5. If any interfaces have been added since the last public release, +# then increment age. +# +# 6. If any interfaces have been removed since the last public +# release, then set age to 0. +# +# CURRENT:REVISION:AGE +9:0:1 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/asmcfi.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/asmcfi.m4 new file mode 100644 index 0000000..3e28602 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/asmcfi.m4 @@ -0,0 +1,13 @@ +AC_DEFUN([GCC_AS_CFI_PSEUDO_OP], +[AC_CACHE_CHECK([assembler .cfi pseudo-op support], + gcc_cv_as_cfi_pseudo_op, [ + gcc_cv_as_cfi_pseudo_op=unknown + AC_TRY_COMPILE([asm (".cfi_sections\n\t.cfi_startproc\n\t.cfi_endproc");],, + [gcc_cv_as_cfi_pseudo_op=yes], + [gcc_cv_as_cfi_pseudo_op=no]) + ]) + if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then + AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1, + [Define if your assembler supports .cfi_* directives.]) + fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_append_flag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_append_flag.m4 new file mode 100644 index 0000000..dd6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_append_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 new file mode 100644 index 0000000..9e7f1ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cc_maxopt.m4 @@ -0,0 +1,194 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cc_maxopt.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CC_MAXOPT +# +# DESCRIPTION +# +# Try to turn on "good" C optimization flags for various compilers and +# architectures, for some definition of "good". (In our case, good for +# FFTW and hopefully for other scientific codes. Modify as needed.) +# +# The user can override the flags by setting the CFLAGS environment +# variable. The user can also specify --enable-portable-binary in order to +# disable any optimization flags that might result in a binary that only +# runs on the host architecture. +# +# Note also that the flags assume that ANSI C aliasing rules are followed +# by the code (e.g. for gcc's -fstrict-aliasing), and that floating-point +# computations can be re-ordered as needed. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_COMPILER_VENDOR, +# AX_GCC_ARCHFLAG, AX_GCC_X86_CPUID. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_CC_MAXOPT], +[ +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AX_COMPILER_VENDOR]) +AC_REQUIRE([AC_CANONICAL_HOST]) + +AC_ARG_ENABLE(portable-binary, [AS_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])], + acx_maxopt_portable=$enableval, acx_maxopt_portable=no) + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + AX_CHECK_COMPILE_FLAG($xlc_opt, + CFLAGS="-O3 -qansialias -w $xlc_opt", + [CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************"]) + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) icc_flags="-xK" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) icc_flags="-xSSE2 -xB -xK" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) icc_flags="-xSSE3 -xP -xO -xB -xK" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) icc_flags="-xSSSE3 -xT -xB -xK" ;; + *1?6[[7d]]?:*:*:*) icc_flags="-xSSE4.1 -xS -xT -xB -xK" ;; + *1?6[[aef]]?:*:*:*|*2?6[[5cef]]?:*:*:*) icc_flags="-xSSE4.2 -xS -xT -xB -xK" ;; + *2?6[[ad]]?:*:*:*) icc_flags="-xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[ae]]?:*:*:*) icc_flags="-xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) icc_flags="-xCORE-AVX2 -xCORE-AVX-I -xAVX -SSE4.2 -xS -xT -xB -xK" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) icc_flags="-xSSE3 -xP -xO -xN -xW -xK" ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) icc_flags="-xSSE2 -xN -xW -xK" ;; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + AX_CHECK_COMPILE_FLAG($flag, [icc_archflag=$flag; break]) + done + fi + AC_MSG_CHECKING([for icc architecture flag]) + AC_MSG_RESULT($icc_archflag) + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + # libffi local change -- don't align double, as it changes the ABI + # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + AX_CHECK_COMPILE_FLAG(-fstrict-aliasing, + CFLAGS="$CFLAGS -fstrict-aliasing") + + # note that we enable "unsafe" fp optimization with other compilers, too + AX_CHECK_COMPILE_FLAG(-ffast-math, CFLAGS="$CFLAGS -ffast-math") + + AX_GCC_ARCHFLAG($acx_maxopt_portable) + ;; + + microsoft) + # default optimization flags for MSVC opt builds + CFLAGS="-O2" + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + AX_CHECK_COMPILE_FLAG($CFLAGS, [], [ + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + ]) + +fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 new file mode 100644 index 0000000..094577e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_cflags_warn_all.m4 @@ -0,0 +1,122 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] +# +# DESCRIPTION +# +# Try to find a compiler option that enables most reasonable warnings. +# +# For the GNU compiler it will be -Wall (and -ansi -pedantic) The result +# is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default. +# +# Currently this macro knows about the GCC, Solaris, Digital Unix, AIX, +# HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and +# Intel compilers. For a given compiler, the Fortran flags are much more +# experimental than their C equivalents. +# +# - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS +# - $2 add-value-if-not-found : nothing +# - $3 action-if-found : add value to shellvariable +# - $4 action-if-not-found : nothing +# +# NOTE: These macros depend on AX_APPEND_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2010 Rhys Ulerich +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 16 + +AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl +AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings], +VAR,[VAR="no, unknown" +ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-warn all % -warn all" dnl Intel + "-pedantic % -Wall" dnl GCC + "-xstrconst % -v" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix + "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + "-ansi -ansiE % -fullwarn" dnl IRIX + "+ESlit % +w1" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done +FLAGS="$ac_save_[]FLAGS" +]) +AS_VAR_POPDEF([FLAGS])dnl +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;; + *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +])dnl AX_FLAGS_WARN_ALL +dnl implementation tactics: +dnl the for-argument contains a list of options. The first part of +dnl these does only exist to detect the compiler - usually it is +dnl a global option to enable -ansi or -extrawarnings. All other +dnl compilers will fail about it. That was needed since a lot of +dnl compilers will give false positives for some option-syntax +dnl like -Woption or -Xoption as they think of it is a pass-through +dnl to later compile stages or something. The "%" is used as a +dnl delimiter. A non-option comment can be given after "%%" marks +dnl which will be shown but not added to the respective C/CXXFLAGS. + +AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C]) +]) + +AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([C++]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([C++]) +]) + +AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl +AC_LANG_PUSH([Fortran]) +AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) +AC_LANG_POP([Fortran]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 new file mode 100644 index 0000000..bd753b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 new file mode 100644 index 0000000..73efdb0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_compiler_vendor.m4 @@ -0,0 +1,88 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun, +# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft, +# watcom, etc. The vendor is returned in the cache variable +# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 17 + +AC_DEFUN([AX_COMPILER_VENDOR], +[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + dnl Please add if possible support to ax_compiler_version.m4 + [# note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC, __SDCC + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + tcc: __TINYC__ + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ + #if !($vencpp) + thisisanerror; + #endif + ])], [break]) + done + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_configure_args.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_configure_args.m4 new file mode 100644 index 0000000..9237efe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_configure_args.m4 @@ -0,0 +1,49 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_configure_args.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CONFIGURE_ARGS +# +# DESCRIPTION +# +# Helper macro for AX_ENABLE_BUILDDIR. +# +# The traditional way of starting a subdir-configure is running the script +# with ${1+"$@"} but since autoconf 2.60 this is broken. Instead we have +# to rely on eval'ing $ac_configure_args however some old autoconf +# versions do not provide that. To ensure maximum portability of autoconf +# extension macros this helper can be AC_REQUIRE'd so that +# $ac_configure_args will always be present. +# +# Sadly, the traditional "exec $SHELL" of the enable_builddir macros is +# spoiled now and must be replaced by "eval + exit $?". +# +# Example: +# +# AC_DEFUN([AX_ENABLE_SUBDIR],[dnl +# AC_REQUIRE([AX_CONFIGURE_ARGS])dnl +# eval $SHELL $ac_configure_args || exit $? +# ...]) +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 14 + +AC_DEFUN([AX_CONFIGURE_ARGS],[ + # [$]@ is unusable in 2.60+ but earlier autoconf had no ac_configure_args + if test "${ac_configure_args+set}" != "set" ; then + ac_configure_args= + for ac_arg in ${1+"[$]@"}; do + ac_configure_args="$ac_configure_args '$ac_arg'" + done + fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 new file mode 100644 index 0000000..710384d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_enable_builddir.m4 @@ -0,0 +1,302 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_enable_builddir.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_ENABLE_BUILDDIR [(dirstring-or-command [,Makefile.mk [,-all]])] +# +# DESCRIPTION +# +# If the current configure was run within the srcdir then we move all +# configure-files into a subdir and let the configure steps continue +# there. We provide an option --disable-builddir to suppress the move into +# a separate builddir. +# +# Defaults: +# +# $1 = $host (overridden with $HOST) +# $2 = Makefile.mk +# $3 = -all +# +# This macro must be called before AM_INIT_AUTOMAKE. It creates a default +# toplevel srcdir Makefile from the information found in the created +# toplevel builddir Makefile. It just copies the variables and +# rule-targets, each extended with a default rule-execution that recurses +# into the build directory of the current "HOST". You can override the +# auto-detection through `config.guess` and build-time of course, as in +# +# make HOST=i386-mingw-cross +# +# which can of course set at configure time as well using +# +# configure --host=i386-mingw-cross +# +# After the default has been created, additional rules can be appended +# that will not just recurse into the subdirectories and only ever exist +# in the srcdir toplevel makefile - these parts are read from the $2 = +# Makefile.mk file +# +# The automatic rules are usually scanning the toplevel Makefile for lines +# like '#### $host |$builddir' to recognize the place where to recurse +# into. Usually, the last one is the only one used. However, almost all +# targets have an additional "*-all" rule which makes the script to +# recurse into _all_ variants of the current HOST (!!) setting. The "-all" +# suffix can be overridden for the macro as well. +# +# a special rule is only given for things like "dist" that will copy the +# tarball from the builddir to the sourcedir (or $(PUB)) for reason of +# convenience. +# +# LICENSE +# +# Copyright (c) 2009 Guido U. Draheim +# Copyright (c) 2009 Alan Jenkins +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 30 + +AC_DEFUN([AX_ENABLE_BUILDDIR],[ +AC_REQUIRE([AC_CANONICAL_HOST])[]dnl +AC_REQUIRE([AC_CANONICAL_TARGET])[]dnl +AC_REQUIRE([AX_CONFIGURE_ARGS])[]dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])[]dnl +AC_BEFORE([$0],[AM_INIT_AUTOMAKE])dnl +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +SUB="." +AC_ARG_ENABLE([builddir], AS_HELP_STRING( + [--disable-builddir],[disable automatic build in subdir of sources]) + ,[SUB="$enableval"], [SUB="auto"]) +if test ".$ac_srcdir_defaulted" != ".no" ; then +if test ".$srcdir" = ".." ; then + if test -f config.status ; then + AC_MSG_NOTICE(toplevel srcdir already configured... skipping subdir build) + else + test ".$SUB" = "." && SUB="." + test ".$SUB" = ".no" && SUB="." + test ".$TARGET" = "." && TARGET="$target" + test ".$SUB" = ".auto" && SUB="m4_ifval([$1], [$1],[$TARGET])" + if test ".$SUB" != ".." ; then # we know where to go and + AS_MKDIR_P([$SUB]) + echo __.$SUB.__ > $SUB/conftest.tmp + cd $SUB + if grep __.$SUB.__ conftest.tmp >/dev/null 2>/dev/null ; then + rm conftest.tmp + AC_MSG_RESULT([continue configure in default builddir "./$SUB"]) + else + AC_MSG_ERROR([could not change to default builddir "./$SUB"]) + fi + srcdir=`echo "$SUB" | + sed -e 's,^\./,,;s,[[^/]]$,&/,;s,[[^/]]*/,../,g;s,[[/]]$,,;'` + # going to restart from subdirectory location + test -f $srcdir/config.log && mv $srcdir/config.log . + test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h . + test -f $srcdir/conftest.log && mv $srcdir/conftest.log . + test -f $srcdir/$cache_file && mv $srcdir/$cache_file . + AC_MSG_RESULT(....exec $SHELL $srcdir/[$]0 "--srcdir=$srcdir" "--enable-builddir=$SUB" ${1+"[$]@"}) + case "[$]0" in # restart + [[\\/]]* | ?:[[\\/]]*) # Absolute name + eval $SHELL "'[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + *) eval $SHELL "'$srcdir/[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;; + esac ; exit $? + fi + fi +fi fi +test ".$SUB" = ".auto" && SUB="." +dnl ac_path_prog uses "set dummy" to override $@ which would defeat the "exec" +AC_PATH_PROG(SED,gsed sed, sed) +AUX="$am_aux_dir" +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SUB])dnl +AC_CONFIG_COMMANDS([buildir],[dnl .............. config.status .............. +AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl +AS_VAR_PUSHDEF([TOP],[top_srcdir])dnl +AS_VAR_PUSHDEF([SRC],[ac_top_srcdir])dnl +AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl +AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl +pushdef([END],[Makefile.mk])dnl +pushdef([_ALL],[ifelse([$3],,[-all],[$3])])dnl + SRC="$ax_enable_builddir_srcdir" + if test ".$SUB" = ".." ; then + if test -f "$TOP/Makefile" ; then + AC_MSG_NOTICE([skipping TOP/Makefile - left untouched]) + else + AC_MSG_NOTICE([skipping TOP/Makefile - not created]) + fi + else + if test -f "$SRC/Makefile" ; then + a=`grep "^VERSION " "$SRC/Makefile"` ; b=`grep "^VERSION " Makefile` + test "$a" != "$b" && rm "$SRC/Makefile" + fi + if test -f "$SRC/Makefile" ; then + echo "$SRC/Makefile : $SRC/Makefile.in" > $tmp/conftemp.mk + echo " []@ echo 'REMOVED,,,' >\$[]@" >> $tmp/conftemp.mk + eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null" + if grep '^REMOVED,,,' "$SRC/Makefile" >/dev/null + then rm $SRC/Makefile ; fi + cp $tmp/conftemp.mk $SRC/makefiles.mk~ ## DEBUGGING + fi + if test ! -f "$SRC/Makefile" ; then + AC_MSG_NOTICE([create TOP/Makefile guessed from local Makefile]) + x='`' ; cat >$tmp/conftemp.sed <<_EOF +/^\$/n +x +/^\$/bS +x +/\\\\\$/{H;d;} +{H;s/.*//;x;} +bM +:S +x +/\\\\\$/{h;d;} +{h;s/.*//;x;} +:M +s/\\(\\n\\) /\\1 /g +/^ /d +/^[[ ]]*[[\\#]]/d +/^VPATH *=/d +s/^srcdir *=.*/srcdir = ./ +s/^top_srcdir *=.*/top_srcdir = ./ +/[[:=]]/!d +/^\\./d +dnl Now handle rules (i.e. lines containing ":" but not " = "). +/ = /b +/ .= /b +/:/!b +s/:.*/:/ +s/ / /g +s/ \\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/ \\1 \\1[]_ALL\\2/g +s/^\\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/\\1 \\1[]_ALL\\2/ +s/ / /g +/^all all[]_ALL[[ :]]/i\\ +all-configured : all[]_ALL +dnl dist-all exists... and would make for dist-all-all +s/ [[a-zA-Z0-9-]]*[]_ALL [[a-zA-Z0-9-]]*[]_ALL[]_ALL//g +/[]_ALL[]_ALL/d +a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done +dnl special rule add-on: "dist" copies the tarball to $(PUB). (source tree) +/dist[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "dist-foo" copies all the archives to $(PUB). (source tree) +/dist-[[a-zA-Z0-9]]*[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\ + ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\ + ; found=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\ + ; if test "\$\$found" -eq "0" ; then : \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\ + ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\ + ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done +dnl special rule add-on: "distclean" removes all local builddirs completely +/distclean[]_ALL *:/a\\ + @ HOST="\$(HOST)\" \\\\\\ + ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\ + ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\ + ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\ + ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\ + ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\ + ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\ + ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\ + ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\ + ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile +_EOF + cp "$tmp/conftemp.sed" "$SRC/makefile.sed~" ## DEBUGGING + $SED -f $tmp/conftemp.sed Makefile >$SRC/Makefile + if test -f "$SRC/m4_ifval([$2],[$2],[END])" ; then + AC_MSG_NOTICE([extend TOP/Makefile with TOP/m4_ifval([$2],[$2],[END])]) + cat $SRC/END >>$SRC/Makefile + fi ; xxxx="####" + echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$SRC/Makefile + # sanity check + if grep '^; echo "MAKE ' $SRC/Makefile >/dev/null ; then + AC_MSG_NOTICE([buggy sed found - it deletes tab in "a" text parts]) + $SED -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $SRC/Makefile \ + >$SRC/Makefile~ + (test -s $SRC/Makefile~ && mv $SRC/Makefile~ $SRC/Makefile) 2>/dev/null + fi + else + xxxx="\\#\\#\\#\\#" + # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed + echo "s!^$xxxx [[^|]]* | *$SUB *\$!$xxxx ...... $SUB!" >$tmp/conftemp.sed + $SED -f "$tmp/conftemp.sed" "$SRC/Makefile" >$tmp/mkfile.tmp + cp "$tmp/conftemp.sed" "$SRC/makefiles.sed~" ## DEBUGGING + cp "$tmp/mkfile.tmp" "$SRC/makefiles.out~" ## DEBUGGING + if cmp -s "$SRC/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then + AC_MSG_NOTICE([keeping TOP/Makefile from earlier configure]) + rm "$tmp/mkfile.tmp" + else + AC_MSG_NOTICE([reusing TOP/Makefile from earlier configure]) + mv "$tmp/mkfile.tmp" "$SRC/Makefile" + fi + fi + AC_MSG_NOTICE([build in $SUB (HOST=$ax_enable_builddir_host)]) + xxxx="####" + echo "$xxxx" "$ax_enable_builddir_host" "|$SUB" >>$SRC/Makefile + fi +popdef([END])dnl +AS_VAR_POPDEF([SED])dnl +AS_VAR_POPDEF([AUX])dnl +AS_VAR_POPDEF([SRC])dnl +AS_VAR_POPDEF([TOP])dnl +AS_VAR_POPDEF([SUB])dnl +],[dnl +ax_enable_builddir_srcdir="$srcdir" # $srcdir +ax_enable_builddir_host="$HOST" # $HOST / $host +ax_enable_builddir_version="$VERSION" # $VERSION +ax_enable_builddir_package="$PACKAGE" # $PACKAGE +ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX +ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED +ax_enable_builddir="$ax_enable_builddir" # $SUB +])dnl +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 new file mode 100644 index 0000000..c52b9b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_archflag.m4 @@ -0,0 +1,267 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE]) +# +# DESCRIPTION +# +# This macro tries to guess the "native" arch corresponding to the target +# architecture for use with gcc's -march=arch or -mtune=arch flags. If +# found, the cache variable $ax_cv_gcc_archflag is set to this flag and +# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to +# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is +# to add $ax_cv_gcc_archflag to the end of $CFLAGS. +# +# PORTABLE? should be either [yes] (default) or [no]. In the former case, +# the flag is set to -mtune (or equivalent) so that the architecture is +# only used for tuning, but the instruction set used is still portable. In +# the latter case, the flag is set to -march (or equivalent) so that +# architecture-specific instructions are enabled. +# +# The user can specify --with-gcc-arch= in order to override the +# macro's choice of architecture, or --without-gcc-arch to disable this. +# +# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is +# called unless the user specified --with-gcc-arch manually. +# +# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID +# +# (The main emphasis here is on recent CPUs, on the principle that doing +# high-performance computing on old hardware is uncommon.) +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2014 Tsukasa Oi +# Copyright (c) 2017-2018 Alexey Kopytov +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 22 + +AC_DEFUN([AX_GCC_ARCHFLAG], +[AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_REQUIRE([AC_PROG_SED]) +AC_REQUIRE([AX_COMPILER_VENDOR]) + +AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=], [use architecture for gcc -march/-mtune, instead of guessing])], + ax_gcc_arch=$withval, ax_gcc_arch=yes) + +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT([]) +AC_CACHE_VAL(ax_cv_gcc_archflag, +[ +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[[3456]]86*|x86_64*|amd64*) # use cpuid codes + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:6c65746e:49656e69) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[[4578]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5[[123]]?:*:*:*) ax_gcc_arch=pentium ;; + *0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;; + *0?6[[356]]?:*:*:*|?6[[356]]?:*:*:*|6[[356]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;; + *0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;; + *1?6[[aef]]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[5cf]]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;; + *2?6[[ad]]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;; + *1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;; + *3?67?:*:*:*|*[[45]]?6[[ad]]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;; + *000?f[[012]]?:*:*:*|?f[[012]]?:*:*:*|f[[012]]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + *000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;; + # fallback + *5??:*:*:*) ax_gcc_arch=pentium ;; + *??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;; + esac ;; + *:68747541:444d4163:69746e65) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;; + *5[[8]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[[9d]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *6[[678a]]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *000?f[[4578bcef]]?:*:*:*|?f[[4578bcef]]?:*:*:*|f[[4578bcef]]?:*:*:*|*001?f[[4578bcf]]?:*:*:*|1?f[[4578bcf]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *002?f[[13457bcf]]?:*:*:*|2?f[[13457bcf]]?:*:*:*|*004?f[[138bcf]]?:*:*:*|4?f[[138bcf]]?:*:*:*|*005?f[[df]]?:*:*:*|5?f[[df]]?:*:*:*|*006?f[[8bcf]]?:*:*:*|6?f[[8bcf]]?:*:*:*|*007?f[[cf]]?:*:*:*|7?f[[cf]]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *010?f[[245689a]]?:*:*:*|10?f[[245689a]]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *050?f[[12]]?:*:*:*|50?f[[12]]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *060?f2?:*:*:*|60?f2?:*:*:*|*061?f[[03]]?:*:*:*|61?f[[03]]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *07[[03]]?f0?:*:*:*|7[[03]]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + # fallback + *0[[13]]??f??:*:*:*|[[13]]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;; + *020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;; + *05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;; + *060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;; + *061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;; + *06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;; + *070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;; + *???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;; + esac ;; + *:746e6543:736c7561:48727561) # IDT / VIA (Centaur) + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *5[[89]]?:*:*:*) ax_gcc_arch=winchip2 ;; + *66?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;; + *6[[9adf]]?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/]) + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *POWER7*) ax_gcc_arch="power7";; + *POWER8*) ax_gcc_arch="power8";; + *POWER9*) ax_gcc_arch="power9";; + *POWER10*) ax_gcc_arch="power10";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; + aarch64) + cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1` + case $cpuimpl in + 0x42) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + 0x43) case $cpuarch in + 8) case $cpuvar in + 0x0) ax_gcc_arch="thunderx armv8-a native" ;; + 0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;; + esac + ;; + esac + ;; + esac + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code + flag_prefixes="-mtune=" + if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then flag_prefixes="-march="; fi + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac +else + flag_prefixes="-march= -mcpu= -m" +fi +for flag_prefix in $flag_prefixes; do + for arch in $ax_gcc_arch; do + flag="$flag_prefix$arch" + AX_CHECK_COMPILE_FLAG($flag, [if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then + if test "x[]m4_default([$1],yes)" = xyes; then + if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi + fi + fi; ax_cv_gcc_archflag=$flag; break]) + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes +]) +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT($ax_cv_gcc_archflag) +if test "x$ax_cv_gcc_archflag" = xunknown; then + m4_default([$3],:) +else + m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"]) +fi +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 new file mode 100644 index 0000000..df95465 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_gcc_x86_cpuid.m4 @@ -0,0 +1,89 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_GCC_X86_CPUID(OP) +# AX_GCC_X86_CPUID_COUNT(OP, COUNT) +# +# DESCRIPTION +# +# On Pentium and later x86 processors, with gcc or a compiler that has a +# compatible syntax for inline assembly instructions, run a small program +# that executes the cpuid instruction with input OP. This can be used to +# detect the CPU type. AX_GCC_X86_CPUID_COUNT takes an additional COUNT +# parameter that gets passed into register ECX before calling cpuid. +# +# On output, the values of the eax, ebx, ecx, and edx registers are stored +# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable +# ax_cv_gcc_x86_cpuid_OP. +# +# If the cpuid instruction fails (because you are running a +# cross-compiler, or because you are not using gcc, or because you are on +# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP +# is set to the string "unknown". +# +# This macro mainly exists to be used in AX_GCC_ARCHFLAG. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2015 Michael Petch +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 10 + +AC_DEFUN([AX_GCC_X86_CPUID], +[AX_GCC_X86_CPUID_COUNT($1, 0) +]) + +AC_DEFUN([AX_GCC_X86_CPUID_COUNT], +[AC_REQUIRE([AC_PROG_CC]) +AC_LANG_PUSH([C]) +AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1, + [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include ], [ + int op = $1, level = $2, eax, ebx, ecx, edx; + FILE *f; + __asm__ __volatile__ ("xchg %%ebx, %1\n" + "cpuid\n" + "xchg %%ebx, %1\n" + : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op), "2" (level)); + + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; +])], + [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown])]) +AC_LANG_POP([C]) +]) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_require_defined.m4 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_require_defined.m4 new file mode 100644 index 0000000..17c3eab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/m4/ax_require_defined.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/make_sunver.pl b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/make_sunver.pl new file mode 100644 index 0000000..8a90b1f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/make_sunver.pl @@ -0,0 +1,333 @@ +#!/usr/bin/perl -w + +# make_sunver.pl +# +# This script takes at least two arguments, a GNU style version script and +# a list of object and archive files, and generates a corresponding Sun +# style version script as follows: +# +# Each glob pattern, C++ mangled pattern or literal in the input script is +# matched against all global symbols in the input objects, emitting those +# that matched (or nothing if no match was found). +# A comment with the original pattern and its type is left in the output +# file to make it easy to understand the matches. +# +# It uses elfdump when present (native), GNU readelf otherwise. +# It depends on the GNU version of c++filt, since it must understand the +# GNU mangling style. + +use FileHandle; +use IPC::Open2; + +# Enforce C locale. +$ENV{'LC_ALL'} = "C"; +$ENV{'LANG'} = "C"; + +# Input version script, GNU style. +my $symvers = shift; + +########## +# Get all the symbols from the library, match them, and add them to a hash. + +my %sym_hash = (); + +# List of objects and archives to process. +my @OBJECTS = (); + +# List of shared objects to omit from processing. +my @SHAREDOBJS = (); + +# Filter out those input archives that have corresponding shared objects to +# avoid adding all symbols matched in the archive to the output map. +foreach $file (@ARGV) { + if (($so = $file) =~ s/\.a$/.so/ && -e $so) { + printf STDERR "omitted $file -> $so\n"; + push (@SHAREDOBJS, $so); + } else { + push (@OBJECTS, $file); + } +} + +# We need to detect and ignore hidden symbols. Solaris nm can only detect +# this in the harder to parse default output format, and GNU nm not at all, +# so use elfdump -s in the native case and GNU readelf -s otherwise. +# GNU objdump -t cannot be used since it produces a variable number of +# columns. + +# The path to elfdump. +my $elfdump = "/usr/ccs/bin/elfdump"; + +if (-f $elfdump) { + open ELFDUMP,$elfdump.' -s '.(join ' ',@OBJECTS).'|' or die $!; + my $skip_arsym = 0; + + while () { + chomp; + + # Ignore empty lines. + if (/^$/) { + # End of archive symbol table, stop skipping. + $skip_arsym = 0 if $skip_arsym; + next; + } + + # Keep skipping until end of archive symbol table. + next if ($skip_arsym); + + # Ignore object name header for individual objects and archives. + next if (/:$/); + + # Ignore table header lines. + next if (/^Symbol Table Section:/); + next if (/index.*value.*size/); + + # Start of archive symbol table: start skipping. + if (/^Symbol Table: \(archive/) { + $skip_arsym = 1; + next; + } + + # Split table. + (undef, undef, undef, undef, $bind, $oth, undef, $shndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCL"); + # Ignore hidden symbols. + next if ($oth eq "H"); + # Ignore undefined symbols. + next if ($shndx eq "UNDEF"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOB|WEAK)/ or $oth ne "D") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close ELFDUMP or die "$elfdump error"; +} else { + open READELF, 'readelf -s -W '.(join ' ',@OBJECTS).'|' or die $!; + # Process each symbol. + while () { + chomp; + + # Ignore empty lines. + next if (/^$/); + + # Ignore object name header. + next if (/^File: .*$/); + + # Ignore table header lines. + next if (/^Symbol table.*contains.*:/); + next if (/Num:.*Value.*Size/); + + # Split table. + (undef, undef, undef, undef, $bind, $vis, $ndx, $name) = split; + + # Error out for unknown input. + die "unknown input line:\n$_" unless defined($bind); + + # Ignore local symbols. + next if ($bind eq "LOCAL"); + # Ignore hidden symbols. + next if ($vis eq "HIDDEN"); + # Ignore undefined symbols. + next if ($ndx eq "UND"); + # Error out for unhandled cases. + if ($bind !~ /^(GLOBAL|WEAK)/ or $vis ne "DEFAULT") { + die "unhandled symbol:\n$_"; + } + + # Remember symbol. + $sym_hash{$name}++; + } + close READELF or die "readelf error"; +} + +########## +# The various types of glob patterns. +# +# A glob pattern that is to be applied to the demangled name: 'cxx'. +# A glob patterns that applies directly to the name in the .o files: 'glob'. +# This pattern is ignored; used for local variables (usually just '*'): 'ign'. + +# The type of the current pattern. +my $glob = 'glob'; + +# We're currently inside `extern "C++"', which Sun ld doesn't understand. +my $in_extern = 0; + +# The c++filt command to use. This *must* be GNU c++filt; the Sun Studio +# c++filt doesn't handle the GNU mangling style. +my $cxxfilt = $ENV{'CXXFILT'} || "c++filt"; + +# The current version name. +my $current_version = ""; + +# Was there any attempt to match a symbol to this version? +my $matches_attempted; + +# The number of versions which matched this symbol. +my $matched_symbols; + +open F,$symvers or die $!; + +# Print information about generating this file +print "# This file was generated by make_sunver.pl. DO NOT EDIT!\n"; +print "# It was generated by:\n"; +printf "# %s %s %s\n", $0, $symvers, (join ' ',@ARGV); +printf "# Omitted archives with corresponding shared libraries: %s\n", + (join ' ', @SHAREDOBJS) if $#SHAREDOBJS >= 0; +print "#\n\n"; + +while () { + # Lines of the form '};' + if (/^([ \t]*)(\}[ \t]*;[ \t]*)$/) { + $glob = 'glob'; + if ($in_extern) { + $in_extern--; + print "$1##$2\n"; + } else { + print; + } + next; + } + + # Lines of the form '} SOME_VERSION_NAME_1.0;' + if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) { + $glob = 'glob'; + # We tried to match symbols agains this version, but none matched. + # Emit dummy hidden symbol to avoid marking this version WEAK. + if ($matches_attempted && $matched_symbols == 0) { + print " hidden:\n"; + print " .force_WEAK_off_$current_version = DATA S0x0 V0x0;\n"; + } + print; next; + } + + # Comment and blank lines + if (/^[ \t]*\#/) { print; next; } + if (/^[ \t]*$/) { print; next; } + + # Lines of the form '{' + if (/^([ \t]*){$/) { + if ($in_extern) { + print "$1##{\n"; + } else { + print; + } + next; + } + + # Lines of the form 'SOME_VERSION_NAME_1.1 {' + if (/^([A-Z0-9_.]+)[ \t]+{$/) { + # Record version name. + $current_version = $1; + # Reset match attempts, #matched symbols for this version. + $matches_attempted = 0; + $matched_symbols = 0; + print; + next; + } + + # Ignore 'global:' + if (/^[ \t]*global:$/) { print; next; } + + # After 'local:', globs should be ignored, they won't be exported. + if (/^[ \t]*local:$/) { + $glob = 'ign'; + print; + next; + } + + # After 'extern "C++"', globs are C++ patterns + if (/^([ \t]*)(extern \"C\+\+\"[ \t]*)$/) { + $in_extern++; + $glob = 'cxx'; + # Need to comment, Sun ld cannot handle this. + print "$1##$2\n"; next; + } + + # Chomp newline now we're done with passing through the input file. + chomp; + + # Catch globs. Note that '{}' is not allowed in globs by this script, + # so only '*' and '[]' are available. + if (/^([ \t]*)([^ \t;{}#]+);?[ \t]*$/) { + my $ws = $1; + my $ptn = $2; + # Turn the glob into a regex by replacing '*' with '.*', '?' with '.'. + # Keep $ptn so we can still print the original form. + ($pattern = $ptn) =~ s/\*/\.\*/g; + $pattern =~ s/\?/\./g; + + if ($glob eq 'ign') { + # We're in a local: * section; just continue. + print "$_\n"; + next; + } + + # Print the glob commented for human readers. + print "$ws##$ptn ($glob)\n"; + # We tried to match a symbol to this version. + $matches_attempted++; + + if ($glob eq 'glob') { + my %ptn_syms = (); + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # Maybe it matches one of the patterns based on the symbol in + # the .o file. + $ptn_syms{$sym}++ if ($sym =~ /^$pattern$/); + } + + foreach my $sym (sort keys(%ptn_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } elsif ($glob eq 'cxx') { + my %dem_syms = (); + + # Verify that we're actually using GNU c++filt. Other versions + # most likely cannot handle GNU style symbol mangling. + my $cxxout = `$cxxfilt --version 2>&1`; + $cxxout =~ m/GNU/ or die "$0 requires GNU c++filt to function"; + + # Talk to c++filt through a pair of file descriptors. + # Need to start a fresh instance per pattern, otherwise the + # process grows to 500+ MB. + my $pid = open2(*FILTIN, *FILTOUT, $cxxfilt) or die $!; + + # Match ptn against symbols in %sym_hash. + foreach my $sym (keys %sym_hash) { + # No? Well, maybe its demangled form matches one of those + # patterns. + printf FILTOUT "%s\n",$sym; + my $dem = ; + chomp $dem; + $dem_syms{$sym}++ if ($dem =~ /^$pattern$/); + } + + close FILTOUT or die "c++filt error"; + close FILTIN or die "c++filt error"; + # Need to wait for the c++filt process to avoid lots of zombies. + waitpid $pid, 0; + + foreach my $sym (sort keys(%dem_syms)) { + $matched_symbols++; + print "$ws$sym;\n"; + } + } else { + # No? Well, then ignore it. + } + next; + } + # Important sanity check. This script can't handle lots of formats + # that GNU ld can, so be sure to error out if one is seen! + die "strange line `$_'"; +} +close F; diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/Makefile.am new file mode 100644 index 0000000..afcbfb6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/Makefile.am @@ -0,0 +1,8 @@ +## Process this with automake to create Makefile.in + +AUTOMAKE_OPTIONS=foreign + +EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + +man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi.3 new file mode 100644 index 0000000..1f1d303 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi.3 @@ -0,0 +1,41 @@ +.Dd February 15, 2008 +.Dt FFI 3 +.Sh NAME +.Nm FFI +.Nd Foreign Function Interface +.Sh LIBRARY +libffi, -lffi +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The foreign function interface provides a mechanism by which a function can +generate a call to another function at runtime without requiring knowledge of +the called function's interface at compile time. +.Sh SEE ALSO +.Xr ffi_prep_cif 3 , +.Xr ffi_prep_cif_var 3 , +.Xr ffi_call 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_call.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_call.3 new file mode 100644 index 0000000..5351513 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_call.3 @@ -0,0 +1,103 @@ +.Dd February 15, 2008 +.Dt ffi_call 3 +.Sh NAME +.Nm ffi_call +.Nd Invoke a foreign function. +.Sh SYNOPSIS +.In ffi.h +.Ft void +.Fo ffi_call +.Fa "ffi_cif *cif" +.Fa "void (*fn)(void)" +.Fa "void *rvalue" +.Fa "void **avalue" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_call +function provides a simple mechanism for invoking a function without +requiring knowledge of the function's interface at compile time. +.Fa fn +is called with the values retrieved from the pointers in the +.Fa avalue +array. The return value from +.Fa fn +is placed in storage pointed to by +.Fa rvalue . +.Fa cif +contains information describing the data types, sizes and alignments of the +arguments to and return value from +.Fa fn , +and must be initialized with +.Nm ffi_prep_cif +before it is used with +.Nm ffi_call . +.Pp +.Fa rvalue +must point to storage that is sizeof(ffi_arg) or larger for non-floating point +types. For smaller-sized return value types, the +.Nm ffi_arg +or +.Nm ffi_sarg +integral type must be used to hold +the return value. +.Sh EXAMPLES +.Bd -literal +#include +#include + +unsigned char +foo(unsigned int, float); + +int +main(int argc, const char **argv) +{ + ffi_cif cif; + ffi_type *arg_types[2]; + void *arg_values[2]; + ffi_status status; + + // Because the return value from foo() is smaller than sizeof(long), it + // must be passed as ffi_arg or ffi_sarg. + ffi_arg result; + + // Specify the data type of each argument. Available types are defined + // in . + arg_types[0] = &ffi_type_uint; + arg_types[1] = &ffi_type_float; + + // Prepare the ffi_cif structure. + if ((status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 2, &ffi_type_uint8, arg_types)) != FFI_OK) + { + // Handle the ffi_status error. + } + + // Specify the values of each argument. + unsigned int arg1 = 42; + float arg2 = 5.1; + + arg_values[0] = &arg1; + arg_values[1] = &arg2; + + // Invoke the function. + ffi_call(&cif, FFI_FN(foo), &result, arg_values); + + // The ffi_arg 'result' now contains the unsigned char returned from foo(), + // which can be accessed by a typecast. + printf("result is %hhu", (unsigned char)result); + + return 0; +} + +// The target function. +unsigned char +foo(unsigned int x, float y) +{ + unsigned char result = x - y; + return result; +} +.Ed +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif.3 new file mode 100644 index 0000000..ab2be8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif.3 @@ -0,0 +1,68 @@ +.Dd February 15, 2008 +.Dt ffi_prep_cif 3 +.Sh NAME +.Nm ffi_prep_cif +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa nargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. Note that to call a variadic function +.Nm ffi_prep_cif_var +must be used instead. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm . +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif_var 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 new file mode 100644 index 0000000..7e19d0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/man/ffi_prep_cif_var.3 @@ -0,0 +1,73 @@ +.Dd January 25, 2011 +.Dt ffi_prep_cif_var 3 +.Sh NAME +.Nm ffi_prep_cif_var +.Nd Prepare a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Sh SYNOPSIS +.In ffi.h +.Ft ffi_status +.Fo ffi_prep_cif_var +.Fa "ffi_cif *cif" +.Fa "ffi_abi abi" +.Fa "unsigned int nfixedargs" +.Fa "unsigned int ntotalargs" +.Fa "ffi_type *rtype" +.Fa "ffi_type **atypes" +.Fc +.Sh DESCRIPTION +The +.Nm ffi_prep_cif_var +function prepares a +.Nm ffi_cif +structure for use with +.Nm ffi_call +for variadic functions. +.Fa abi +specifies a set of calling conventions to use. +.Fa atypes +is an array of +.Fa ntotalargs +pointers to +.Nm ffi_type +structs that describe the data type, size and alignment of each argument. +.Fa rtype +points to an +.Nm ffi_type +that describes the data type, size and alignment of the +return value. +.Fa nfixedargs +must contain the number of fixed (non-variadic) arguments. +Note that to call a non-variadic function +.Nm ffi_prep_cif +must be used. +.Sh RETURN VALUES +Upon successful completion, +.Nm ffi_prep_cif_var +returns +.Nm FFI_OK . +It will return +.Nm FFI_BAD_TYPEDEF +if +.Fa cif +is +.Nm NULL +or +.Fa atypes +or +.Fa rtype +is malformed. If +.Fa abi +does not refer to a valid ABI, +.Nm FFI_BAD_ABI +will be returned. Available ABIs are +defined in +.Nm +. +.Sh SEE ALSO +.Xr ffi 3 , +.Xr ffi_call 3 , +.Xr ffi_prep_cif 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln new file mode 100644 index 0000000..d9119df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.sln @@ -0,0 +1,33 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.28302.56 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Ffi_staticLib_arm64", "Ffi_staticLib.vcxproj", "{115502C0-BE05-4767-BF19-5C87D805FAD6}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM64 = Debug|ARM64 + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|ARM64 = Release|ARM64 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|ARM64.Build.0 = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x64.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Debug|x86.ActiveCfg = Debug|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|ARM64.Build.0 = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x64.ActiveCfg = Release|ARM64 + {115502C0-BE05-4767-BF19-5C87D805FAD6}.Release|x86.ActiveCfg = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {241C54C7-20DD-4897-9376-E6B6D1B43BD5} + EndGlobalSection +EndGlobal diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj new file mode 100644 index 0000000..3187699 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj @@ -0,0 +1,130 @@ + + + + + Debug + ARM64 + + + Release + ARM64 + + + + 15.0 + {115502C0-BE05-4767-BF19-5C87D805FAD6} + Win32Proj + FfistaticLib + 10.0.17763.0 + Ffi_staticLib_arm64 + + + + StaticLibrary + true + v141 + Unicode + + + StaticLibrary + false + v141 + true + Unicode + + + + + + + + + + + + + + + true + + + false + + + + NotUsing + Level3 + Disabled + true + FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + false + true + + + false + + + Windows + true + + + + + NotUsing + Level3 + MaxSpeed + true + true + true + FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + ..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories) + true + Speed + true + ..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories) + + + Windows + true + true + true + + + true + + + + + + + + + + + + + + + + + + + + + + + cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i + armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj" + + win64_armasm.obj;%(Outputs) + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters new file mode 100644 index 0000000..1f8c6e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.filters @@ -0,0 +1,57 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user new file mode 100644 index 0000000..be25078 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/Ffi_staticLib.vcxproj.user @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h new file mode 100644 index 0000000..02f26a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvc_build/aarch64/aarch64_include/ffi.h @@ -0,0 +1,511 @@ +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#ifndef _M_ARM64 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#ifndef _M_ARM64 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvcc.sh b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvcc.sh new file mode 100755 index 0000000..7cfc509 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/msvcc.sh @@ -0,0 +1,353 @@ +#!/bin/sh + +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the MSVC wrappificator. +# +# The Initial Developer of the Original Code is +# Timothy Wall . +# Portions created by the Initial Developer are Copyright (C) 2009 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Daniel Witte +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + +# +# GCC-compatible wrapper for cl.exe and ml.exe. Arguments are given in GCC +# format and translated into something sensible for cl or ml. +# + +args_orig=$@ +args="-nologo -W3" +linkargs= +static_crt= +debug_crt= +cl="cl" +ml="ml" +safeseh="-safeseh" +output= +libpaths= +libversion=7 +verbose= + +while [ $# -gt 0 ] +do + case $1 + in + --verbose) + verbose=1 + shift 1 + ;; + --version) + args="-help" + shift 1 + ;; + -fexceptions) + # Don't enable exceptions for now. + #args="$args -EHac" + shift 1 + ;; + -m32) + shift 1 + ;; + -m64) + ml="ml64" # "$MSVC/x86_amd64/ml64" + safeseh= + shift 1 + ;; + -marm) + ml='armasm' + safeseh= + shift 1 + ;; + -marm64) + ml='armasm64' + safeseh= + shift 1 + ;; + -clang-cl) + cl="clang-cl" + shift 1 + ;; + -O0) + args="$args -Od" + shift 1 + ;; + -O*) + # Runtime error checks (enabled by setting -RTC1 in the -DFFI_DEBUG + # case below) are not compatible with optimization flags and will + # cause the build to fail. Therefore, drop the optimization flag if + # -DFFI_DEBUG is also set. + case $args_orig in + *-DFFI_DEBUG*) + args="$args" + ;; + *) + # The ax_cc_maxopt.m4 macro from the upstream autoconf-archive + # project doesn't support MSVC and therefore ends up trying to + # use -O3. Use the equivalent "max optimization" flag for MSVC + # instead of erroring out. + case $1 in + -O3) + args="$args -O2" + ;; + *) + args="$args $1" + ;; + esac + opt="true" + ;; + esac + shift 1 + ;; + -g) + # Enable debug symbol generation. + args="$args -Zi" + shift 1 + ;; + -DFFI_DEBUG) + # Enable runtime error checks. + args="$args -RTC1" + defines="$defines $1" + shift 1 + ;; + -DUSE_STATIC_RTL) + # Link against static CRT. + static_crt=1 + shift 1 + ;; + -DUSE_DEBUG_RTL) + # Link against debug CRT. + debug_crt=1 + shift 1 + ;; + -c) + args="$args -c" + args="$(echo $args | sed 's%/Fe%/Fo%g')" + single="-c" + shift 1 + ;; + -D*=*) + name="$(echo $1|sed 's/-D\([^=][^=]*\)=.*/\1/g')" + value="$(echo $1|sed 's/-D[^=][^=]*=//g')" + args="$args -D${name}='$value'" + defines="$defines -D${name}='$value'" + shift 1 + ;; + -D*) + args="$args $1" + defines="$defines $1" + shift 1 + ;; + -I) + p=$(cygpath -ma "$2") + args="$args -I\"$p\"" + includes="$includes -I\"$p\"" + shift 2 + ;; + -I*) + p=$(cygpath -ma "${1#-I}") + args="$args -I\"$p\"" + includes="$includes -I\"$p\"" + shift 1 + ;; + -L) + p=$(cygpath -ma $2) + linkargs="$linkargs -LIBPATH:$p" + shift 2 + ;; + -L*) + p=$(cygpath -ma ${1#-L}) + linkargs="$linkargs -LIBPATH:$p" + shift 1 + ;; + -link) + # add next argument verbatim to linker args + linkargs="$linkargs $2" + shift 2 + ;; + -l*) + case $1 + in + -lffi) + linkargs="$linkargs lib${1#-l}-${libversion}.lib" + ;; + *) + # ignore other libraries like -lm, hope they are + # covered by MSVCRT + # linkargs="$linkargs ${1#-l}.lib" + ;; + esac + shift 1 + ;; + -W|-Wextra) + # TODO map extra warnings + shift 1 + ;; + -Wall) + # -Wall on MSVC is overzealous, and we already build with -W3. Nothing + # to do here. + shift 1 + ;; + -pedantic) + # libffi tests -pedantic with -Wall, so drop it also. + shift 1 + ;; + -warn) + # ignore -warn all from libtool as well. + if test "$2" = "all"; then + shift 2 + else + args="$args -warn" + shift 1 + fi + ;; + -Werror) + args="$args -WX" + shift 1 + ;; + -W*) + # TODO map specific warnings + shift 1 + ;; + -S) + args="$args -FAs" + shift 1 + ;; + -o) + outdir="$(dirname $2)" + base="$(basename $2|sed 's/\.[^.]*//g')" + if [ -n "$single" ]; then + output="-Fo$2" + else + output="-Fe$2" + fi + armasm_output="-o $2" + if [ -n "$assembly" ]; then + args="$args $output" + else + args="$args $output -Fd$outdir/$base -Fp$outdir/$base -Fa$outdir/$base" + fi + shift 2 + ;; + *.S) + src="$(cygpath -ma $1)" + assembly="true" + shift 1 + ;; + *.c) + args="$args $(cygpath -ma $1)" + shift 1 + ;; + *) + # Assume it's an MSVC argument, and pass it through. + args="$args $1" + shift 1 + ;; + esac +done + +if [ -n "$linkargs" ]; then + + # If -Zi is specified, certain optimizations are implicitly disabled + # by MSVC. Add back those optimizations if this is an optimized build. + # NOTE: These arguments must come after all others. + if [ -n "$opt" ]; then + linkargs="$linkargs -OPT:REF -OPT:ICF -INCREMENTAL:NO" + fi + + args="$args -link $linkargs" +fi + +if [ -n "$static_crt" ]; then + md=-MT +else + md=-MD +fi + +if [ -n "$debug_crt" ]; then + md="${md}d" +fi + +if [ -n "$assembly" ]; then + if [ -z "$outdir" ]; then + outdir="." + fi + ppsrc="$outdir/$(basename $src|sed 's/.S$/.asm/g')" + + if [ $ml = "armasm" ]; then + defines="$defines -D_M_ARM" + fi + + if [ $ml = "armasm64" ]; then + defines="$defines -D_M_ARM64" + fi + + if test -n "$verbose"; then + echo "$cl -nologo -EP $includes $defines $src > $ppsrc" + fi + + eval "\"$cl\" -nologo -EP $includes $defines $src" > $ppsrc || exit $? + output="$(echo $output | sed 's%/F[dpa][^ ]*%%g')" + if [ $ml = "armasm" ]; then + args="-nologo -g -oldit $armasm_output $ppsrc -errorReport:prompt" + elif [ $ml = "armasm64" ]; then + args="-nologo -g $armasm_output $ppsrc -errorReport:prompt" + else + args="-nologo $safeseh $single $output $ppsrc" + fi + + if test -n "$verbose"; then + echo "$ml $args" + fi + + eval "\"$ml\" $args" + result=$? + + # required to fix ml64 broken output? + #mv *.obj $outdir +else + args="$md $args" + + if test -n "$verbose"; then + echo "$cl $args" + fi + # Return an error code of 1 if an invalid command line parameter is passed + # instead of just ignoring it. Any output that is not a warning or an + # error is filtered so this command behaves more like gcc. cl.exe prints + # the name of the compiled file otherwise, which breaks the dejagnu checks + # for excess warnings and errors. + eval "(\"$cl\" $args 2>&1 1>&3 | \ + awk '{print \$0} /D9002/ {error=1} END{exit error}' >&2) 3>&1 | \ + awk '/warning|error/'" + result=$? +fi + +exit $result + +# vim: noai:ts=4:sw=4 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffi.c new file mode 100644 index 0000000..ef09f4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffi.c @@ -0,0 +1,1025 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__)|| defined (_M_ARM64) +#include +#include +#include +#include +#include +#include +#include "internal.h" +#ifdef _WIN32 +#include /* FlushInstructionCache */ +#endif + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +union _d +{ + UINT64 d; + UINT32 s[2]; +}; + +struct _v +{ + union _d d[2] __attribute__((aligned(16))); +}; + +struct call_context +{ + struct _v v[N_V_ARG_REG]; + UINT64 x[N_X_ARG_REG]; +}; + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#endif + +#else + +#if defined (__clang__) && defined (__APPLE__) +extern void sys_icache_invalidate (void *start, size_t len); +#endif + +static inline void +ffi_clear_cache (void *start, void *end) +{ +#if defined (__clang__) && defined (__APPLE__) + sys_icache_invalidate (start, (char *)end - (char *)start); +#elif defined (__GNUC__) + __builtin___clear_cache (start, end); +#elif defined (_WIN32) + FlushInstructionCache(GetCurrentProcess(), start, (char*)end - (char*)start); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +/* A subroutine of is_vfp_type. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of is_vfp_type. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY may be allocated to the FP registers. This is both an + fp scalar type as well as an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is an fp scalar. + + Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_* + constant for the type. */ + +static int +is_vfp_type (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (candidate) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + switch (candidate) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 2; + goto done; + } + return 0; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */ + size = ty->size; + if (size < 4 || size > 64) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + case FFI_TYPE_LONGDOUBLE: + ele_count = size / sizeof(long double); + if (size != ele_count * sizeof(long double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return candidate * 4 + (4 - (int)ele_count); +} + +/* Representation of the procedure call argument marshalling + state. + + The terse state variable names match the names used in the AARCH64 + PCS. */ + +struct arg_state +{ + unsigned ngrn; /* Next general-purpose register number. */ + unsigned nsrn; /* Next vector register number. */ + size_t nsaa; /* Next stack offset. */ + +#if defined (__APPLE__) + unsigned allocating_variadic; +#endif +}; + +/* Initialize a procedure call argument marshalling state. */ +static void +arg_init (struct arg_state *state) +{ + state->ngrn = 0; + state->nsrn = 0; + state->nsaa = 0; +#if defined (__APPLE__) + state->allocating_variadic = 0; +#endif +} + +/* Allocate an aligned slot on the stack and return a pointer to it. */ +static void * +allocate_to_stack (struct arg_state *state, void *stack, + size_t alignment, size_t size) +{ + size_t nsaa = state->nsaa; + + /* Round up the NSAA to the larger of 8 or the natural + alignment of the argument's type. */ +#if defined (__APPLE__) + if (state->allocating_variadic && alignment < 8) + alignment = 8; +#else + if (alignment < 8) + alignment = 8; +#endif + + nsaa = FFI_ALIGN (nsaa, alignment); + state->nsaa = nsaa + size; + + return (char *)stack + nsaa; +} + +static ffi_arg +extend_integer_type (void *source, int type) +{ + switch (type) + { + case FFI_TYPE_UINT8: + return *(UINT8 *) source; + case FFI_TYPE_SINT8: + return *(SINT8 *) source; + case FFI_TYPE_UINT16: + return *(UINT16 *) source; + case FFI_TYPE_SINT16: + return *(SINT16 *) source; + case FFI_TYPE_UINT32: + return *(UINT32 *) source; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + return *(SINT32 *) source; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + return *(UINT64 *) source; + break; + case FFI_TYPE_POINTER: + return *(uintptr_t *) source; + default: + abort(); + } +} + +#if defined(_MSC_VER) +void extend_hfa_type (void *dest, void *src, int h); +#else +static void +extend_hfa_type (void *dest, void *src, int h) +{ + ssize_t f = h - AARCH64_RET_S4; + void *x0; + + asm volatile ( + "adr %0, 0f\n" +" add %0, %0, %1\n" +" br %0\n" +"0: ldp s16, s17, [%3]\n" /* S4 */ +" ldp s18, s19, [%3, #8]\n" +" b 4f\n" +" ldp s16, s17, [%3]\n" /* S3 */ +" ldr s18, [%3, #8]\n" +" b 3f\n" +" ldp s16, s17, [%3]\n" /* S2 */ +" b 2f\n" +" nop\n" +" ldr s16, [%3]\n" /* S1 */ +" b 1f\n" +" nop\n" +" ldp d16, d17, [%3]\n" /* D4 */ +" ldp d18, d19, [%3, #16]\n" +" b 4f\n" +" ldp d16, d17, [%3]\n" /* D3 */ +" ldr d18, [%3, #16]\n" +" b 3f\n" +" ldp d16, d17, [%3]\n" /* D2 */ +" b 2f\n" +" nop\n" +" ldr d16, [%3]\n" /* D1 */ +" b 1f\n" +" nop\n" +" ldp q16, q17, [%3]\n" /* Q4 */ +" ldp q18, q19, [%3, #32]\n" +" b 4f\n" +" ldp q16, q17, [%3]\n" /* Q3 */ +" ldr q18, [%3, #32]\n" +" b 3f\n" +" ldp q16, q17, [%3]\n" /* Q2 */ +" b 2f\n" +" nop\n" +" ldr q16, [%3]\n" /* Q1 */ +" b 1f\n" +"4: str q19, [%2, #48]\n" +"3: str q18, [%2, #32]\n" +"2: str q17, [%2, #16]\n" +"1: str q16, [%2]" + : "=&r"(x0) + : "r"(f * 12), "r"(dest), "r"(src) + : "memory", "v16", "v17", "v18", "v19"); +} +#endif + +#if defined(_MSC_VER) +void* compress_hfa_type (void *dest, void *src, int h); +#else +static void * +compress_hfa_type (void *dest, void *reg, int h) +{ + switch (h) + { + case AARCH64_RET_S1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 12; +#endif + } + else + *(float *)dest = *(float *)reg; + break; + case AARCH64_RET_S2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.s, v17.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_S3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.s, v17.s, v18.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_S4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + case AARCH64_RET_D1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 8; +#endif + } + else + *(double *)dest = *(double *)reg; + break; + case AARCH64_RET_D2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.d, v17.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_D3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.d, v17.d, v18.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_D4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + default: + if (dest != reg) + return memcpy (dest, reg, 16 * (4 - (h & 3))); + break; + } + return dest; +} +#endif + +/* Either allocate an appropriate register for the argument type, or if + none are available, allocate a stack slot and return a pointer + to the allocated space. */ + +static void * +allocate_int_to_reg_or_stack (struct call_context *context, + struct arg_state *state, + void *stack, size_t size) +{ + if (state->ngrn < N_X_ARG_REG) + return &context->x[state->ngrn++]; + + state->ngrn = N_X_ARG_REG; + return allocate_to_stack (state, stack, size, size); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + size_t bytes = cif->bytes; + int flags, i, n; + + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = AARCH64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = AARCH64_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = AARCH64_RET_UINT16; + break; + case FFI_TYPE_UINT32: + flags = AARCH64_RET_UINT32; + break; + case FFI_TYPE_SINT8: + flags = AARCH64_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = AARCH64_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = AARCH64_RET_SINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = AARCH64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + flags = is_vfp_type (rtype); + if (flags == 0) + { + size_t s = rtype->size; + if (s > 16) + { + flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM; + bytes += 8; + } + else if (s == 16) + flags = AARCH64_RET_INT128; + else if (s == 8) + flags = AARCH64_RET_INT64; + else + flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY; + } + break; + + default: + abort(); + } + + for (i = 0, n = cif->nargs; i < n; i++) + if (is_vfp_type (cif->arg_types[i])) + { + flags |= AARCH64_FLAG_ARG_V; + break; + } + + /* Round the stack up to a multiple of the stack alignment requirement. */ + cif->bytes = (unsigned) FFI_ALIGN(bytes, 16); + cif->flags = flags; +#if defined (__APPLE__) + cif->aarch64_nfixedargs = 0; +#endif + + return FFI_OK; +} + +#if defined (__APPLE__) +/* Perform Apple-specific cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->aarch64_nfixedargs = nfixedargs; + return status; +} +#else +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->flags |= AARCH64_FLAG_VARARG; + return status; +} +#endif /* __APPLE__ */ + +extern void ffi_call_SYSV (struct call_context *context, void *frame, + void (*fn)(void), void *rvalue, int flags, + void *closure) FFI_HIDDEN; + +/* Call a function with the provided arguments and capture the return + value. */ +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue, + void **avalue, void *closure) +{ + struct call_context *context; + void *stack, *frame, *rvalue; + struct arg_state state; + size_t stack_bytes, rtype_size, rsize; + int i, nargs, flags, isvariadic = 0; + ffi_type *rtype; + + flags = cif->flags; + rtype = cif->rtype; + rtype_size = rtype->size; + stack_bytes = cif->bytes; + + if (flags & AARCH64_FLAG_VARARG) + { + isvariadic = 1; + flags &= ~AARCH64_FLAG_VARARG; + } + + /* If the target function returns a structure via hidden pointer, + then we cannot allow a null rvalue. Otherwise, mash a null + rvalue to void return type. */ + rsize = 0; + if (flags & AARCH64_RET_IN_MEM) + { + if (orig_rvalue == NULL) + rsize = rtype_size; + } + else if (orig_rvalue == NULL) + flags &= AARCH64_FLAG_ARG_V; + else if (flags & AARCH64_RET_NEED_COPY) + rsize = 16; + + /* Allocate consectutive stack for everything we'll need. */ + context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize); + stack = context + 1; + frame = (void*)((uintptr_t)stack + (uintptr_t)stack_bytes); + rvalue = (rsize ? (void*)((uintptr_t)frame + 32) : orig_rvalue); + + arg_init (&state); + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + size_t s = ty->size; + void *a = avalue[i]; + int h, t; + + t = ty->type; + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + /* If the argument is a basic type the argument is allocated to an + appropriate register, or if none are available, to the stack. */ + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_pointer: + { + ffi_arg ext = extend_integer_type (a, t); + if (state.ngrn < N_X_ARG_REG) + context->x[state.ngrn++] = ext; + else + { + void *d = allocate_to_stack (&state, stack, ty->alignment, s); + state.ngrn = N_X_ARG_REG; + /* Note that the default abi extends each argument + to a full 64-bit slot, while the iOS abi allocates + only enough space. */ +#ifdef __APPLE__ + memcpy(d, a, s); +#else + *(ffi_arg *)d = ext; +#endif + } + } + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + { + void *dest; + + h = is_vfp_type (ty); + if (h) + { + int elems = 4 - (h & 3); + if (cif->abi == FFI_WIN64 && isvariadic) + { + if (state.ngrn + elems <= N_X_ARG_REG) + { + dest = &context->x[state.ngrn]; + state.ngrn += elems; + extend_hfa_type(dest, a, h); + break; + } + state.nsrn = N_X_ARG_REG; + dest = allocate_to_stack(&state, stack, ty->alignment, s); + } + else + { + if (state.nsrn + elems <= N_V_ARG_REG) + { + dest = &context->v[state.nsrn]; + state.nsrn += elems; + extend_hfa_type (dest, a, h); + break; + } + state.nsrn = N_V_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + else if (s > 16) + { + /* If the argument is a composite type that is larger than 16 + bytes, then the argument has been copied to memory, and + the argument is replaced by a pointer to the copy. */ + a = &avalue[i]; + t = FFI_TYPE_POINTER; + s = sizeof (void *); + goto do_pointer; + } + else + { + size_t n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + /* If the argument is a composite type and the size in + double-words is not more than the number of available + X registers, then the argument is copied into + consecutive X registers. */ + dest = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + /* Otherwise, there are insufficient X registers. Further + X register allocations are prevented, the NSAA is + adjusted and the argument is copied to memory at the + adjusted NSAA. */ + state.ngrn = N_X_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + memcpy (dest, a, s); + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + ffi_call_SYSV (context, frame, fn, rvalue, flags, closure); + + if (flags & AARCH64_RET_NEED_COPY) + memcpy (orig_rvalue, rvalue, rtype_size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif /* FFI_GO_CLOSURES */ + +/* Build a trampoline. */ + +extern void ffi_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + void (*start)(void); + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_closure_SYSV_V; + else + start = ffi_closure_SYSV; + +#if FFI_EXEC_TRAMPOLINE_TABLE +#ifdef __MACH__ +#ifdef HAVE_PTRAUTH + codeloc = ptrauth_strip (codeloc, ptrauth_key_asia); +#endif + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = start; +#endif +#else + static const unsigned char trampoline[16] = { + 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */ + 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */ + 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ + }; + char *tramp = closure->tramp; + + memcpy (tramp, trampoline, sizeof(trampoline)); + + *(UINT64 *)(tramp + 16) = (uintptr_t)start; + + ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE); + + /* Also flush the cache for code mapping. */ +#ifdef _WIN32 + // Not using dlmalloc.c for Windows ARM64 builds + // so calling ffi_data_to_code_pointer() isn't necessary + unsigned char *tramp_code = tramp; + #else + unsigned char *tramp_code = ffi_data_to_code_pointer (tramp); + #endif + ffi_clear_cache (tramp_code, tramp_code + FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*start)(void); + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_go_closure_SYSV_V; + else + start = ffi_go_closure_SYSV; + + closure->tramp = start; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif /* FFI_GO_CLOSURES */ + +/* Primary handler to setup and invoke a function within a closure. + + A closure when invoked enters via the assembler wrapper + ffi_closure_SYSV(). The wrapper allocates a call context on the + stack, saves the interesting registers (from the perspective of + the calling convention) into the context then passes control to + ffi_closure_SYSV_inner() passing the saved context and a pointer to + the stack at the point ffi_closure_SYSV() was invoked. + + On the return path the assembler wrapper will reload call context + registers. + + ffi_closure_SYSV_inner() marshalls the call context into ffi value + descriptors, invokes the wrapped function, then marshalls the return + value back into the call context. */ + +int FFI_HIDDEN +ffi_closure_SYSV_inner (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + struct call_context *context, + void *stack, void *rvalue, void *struct_rvalue) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + int i, h, nargs, flags; + struct arg_state state; + + arg_init (&state); + + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + int t = ty->type; + size_t n, s = ty->size; + + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + h = is_vfp_type (ty); + if (h) + { + n = 4 - (h & 3); +#ifdef _WIN32 /* for handling armasm calling convention */ + if (cif->is_variadic) + { + if (state.ngrn + n <= N_X_ARG_REG) + { + void *reg = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + + /* Eeek! We need a pointer to the structure, however the + homogeneous float elements are being passed in individual + registers, therefore for float and double the structure + is not represented as a contiguous sequence of bytes in + our saved register context. We don't need the original + contents of the register storage, so we reformat the + structure into the same memory. */ + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + else + { +#endif /* for handling armasm calling convention */ + if (state.nsrn + n <= N_V_ARG_REG) + { + void *reg = &context->v[state.nsrn]; + state.nsrn += (unsigned int)n; + avalue[i] = compress_hfa_type(reg, reg, h); + } + else + { + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } +#ifdef _WIN32 /* for handling armasm calling convention */ + } +#endif /* for handling armasm calling convention */ + } + else if (s > 16) + { + /* Replace Composite type of size greater than 16 with a + pointer. */ + avalue[i] = *(void **) + allocate_int_to_reg_or_stack (context, &state, stack, + sizeof (void *)); + } + else + { + n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + avalue[i] = &context->x[state.ngrn]; + state.ngrn += (unsigned int)n; + } + else + { + state.ngrn = N_X_ARG_REG; + avalue[i] = allocate_to_stack(&state, stack, + ty->alignment, s); + } + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + flags = cif->flags; + if (flags & AARCH64_RET_IN_MEM) + rvalue = struct_rvalue; + + fun (cif, rvalue, avalue, user_data); + + return flags; +} + +#endif /* (__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)*/ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffitarget.h new file mode 100644 index 0000000..d5622e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/ffitarget.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#elif defined(_WIN32) +#define FFI_SIZEOF_ARG 8 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_WIN64, + FFI_LAST_ABI, +#if defined(_WIN32) + FFI_DEFAULT_ABI = FFI_WIN64 +#else + FFI_DEFAULT_ABI = FFI_SYSV +#endif + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#ifdef _WIN32 +#define FFI_EXTRA_CIF_FIELDS unsigned is_variadic +#endif +#define FFI_TARGET_SPECIFIC_VARIADIC + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#elif !defined(_WIN32) +/* iOS and Windows reserve x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#ifndef _WIN32 +/* No complex type on Windows */ +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/internal.h new file mode 100644 index 0000000..3d4d035 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/internal.h @@ -0,0 +1,68 @@ +/* +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define AARCH64_RET_VOID 0 +#define AARCH64_RET_INT64 1 +#define AARCH64_RET_INT128 2 + +#define AARCH64_RET_UNUSED3 3 +#define AARCH64_RET_UNUSED4 4 +#define AARCH64_RET_UNUSED5 5 +#define AARCH64_RET_UNUSED6 6 +#define AARCH64_RET_UNUSED7 7 + +/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4, + so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */ +#define AARCH64_RET_S4 8 +#define AARCH64_RET_S3 9 +#define AARCH64_RET_S2 10 +#define AARCH64_RET_S1 11 + +#define AARCH64_RET_D4 12 +#define AARCH64_RET_D3 13 +#define AARCH64_RET_D2 14 +#define AARCH64_RET_D1 15 + +#define AARCH64_RET_Q4 16 +#define AARCH64_RET_Q3 17 +#define AARCH64_RET_Q2 18 +#define AARCH64_RET_Q1 19 + +/* Note that each of the sub-64-bit integers gets two entries. */ +#define AARCH64_RET_UINT8 20 +#define AARCH64_RET_UINT16 22 +#define AARCH64_RET_UINT32 24 + +#define AARCH64_RET_SINT8 26 +#define AARCH64_RET_SINT16 28 +#define AARCH64_RET_SINT32 30 + +#define AARCH64_RET_MASK 31 + +#define AARCH64_RET_IN_MEM (1 << 5) +#define AARCH64_RET_NEED_COPY (1 << 6) + +#define AARCH64_FLAG_ARG_V_BIT 7 +#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT) +#define AARCH64_FLAG_VARARG (1 << 8) + +#define N_X_ARG_REG 8 +#define N_V_ARG_REG 8 +#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/sysv.S new file mode 100644 index 0000000..b720a92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/sysv.S @@ -0,0 +1,451 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__aarch64__) || defined(__arm64__) +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#endif + +#ifdef __AARCH64EB__ +# define BE(X) X +#else +# define BE(X) 0 +#endif + +#ifdef __ILP32__ +#define PTR_REG(n) w##n +#else +#define PTR_REG(n) x##n +#endif + +#ifdef __ILP32__ +#define PTR_SIZE 4 +#else +#define PTR_SIZE 8 +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) +# define BR(r) braaz r +# define BLR(r) blraaz r +#else +# define BR(r) br r +# define BLR(r) blr r +#endif + + .text + .align 4 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + + Therefore on entry we have: + + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + cfi_startproc +CNAME(ffi_call_SYSV): + /* Use a stack frame allocated by our caller. */ + cfi_def_cfa(x1, 32); + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + cfi_def_cfa_register(x29) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + mov x18, x5 /* install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] +1: + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + BLR(x9) /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + cfi_def_cfa_register (sp) + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, 0f + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + .align 4 +0: ret /* VOID */ + nop +1: str x0, [x3] /* INT64 */ + ret +2: stp x0, x1, [x3] /* INT128 */ + ret +3: brk #1000 /* UNUSED */ + ret +4: brk #1000 /* UNUSED */ + ret +5: brk #1000 /* UNUSED */ + ret +6: brk #1000 /* UNUSED */ + ret +7: brk #1000 /* UNUSED */ + ret +8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret +9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret +10: stp s0, s1, [x3] /* S2 */ + ret +11: str s0, [x3] /* S1 */ + ret +12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret +13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret +14: stp d0, d1, [x3] /* D2 */ + ret +15: str d0, [x3] /* D1 */ + ret +16: str q3, [x3, #48] /* Q4 */ + nop +17: str q2, [x3, #32] /* Q3 */ + nop +18: stp q0, q1, [x3] /* Q2 */ + ret +19: str q0, [x3] /* Q1 */ + ret +20: uxtb w0, w0 /* UINT8 */ + str x0, [x3] +21: ret /* reserved */ + nop +22: uxth w0, w0 /* UINT16 */ + str x0, [x3] +23: ret /* reserved */ + nop +24: mov w0, w0 /* UINT32 */ + str x0, [x3] +25: ret /* reserved */ + nop +26: sxtb x0, w0 /* SINT8 */ + str x0, [x3] +27: ret /* reserved */ + nop +28: sxth x0, w0 /* SINT16 */ + str x0, [x3] +29: ret /* reserved */ + nop +30: sxtw x0, w0 /* SINT32 */ + str x0, [x3] +31: ret /* reserved */ + nop + + cfi_endproc + + .globl CNAME(ffi_call_SYSV) + FFI_HIDDEN(CNAME(ffi_call_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_call_SYSV), #function + .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV) +#endif + +/* ffi_closure_SYSV + + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + .align 4 +CNAME(ffi_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV_V), #function + .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ +.Ldo_closure: + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + bl CNAME(ffi_closure_SYSV_inner) + + /* Load the return value as directed. */ +#if FFI_EXEC_TRAMPOLINE_TABLE && defined(__MACH__) && defined(HAVE_PTRAUTH) + autiza x1 +#endif + adr x1, 0f + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + .align 4 +0: b 99f /* VOID */ + nop +1: ldr x0, [x3] /* INT64 */ + b 99f +2: ldp x0, x1, [x3] /* INT128 */ + b 99f +3: brk #1000 /* UNUSED */ + nop +4: brk #1000 /* UNUSED */ + nop +5: brk #1000 /* UNUSED */ + nop +6: brk #1000 /* UNUSED */ + nop +7: brk #1000 /* UNUSED */ + nop +8: ldr s3, [x3, #12] /* S4 */ + nop +9: ldr s2, [x3, #8] /* S3 */ + nop +10: ldp s0, s1, [x3] /* S2 */ + b 99f +11: ldr s0, [x3] /* S1 */ + b 99f +12: ldr d3, [x3, #24] /* D4 */ + nop +13: ldr d2, [x3, #16] /* D3 */ + nop +14: ldp d0, d1, [x3] /* D2 */ + b 99f +15: ldr d0, [x3] /* D1 */ + b 99f +16: ldr q3, [x3, #48] /* Q4 */ + nop +17: ldr q2, [x3, #32] /* Q3 */ + nop +18: ldp q0, q1, [x3] /* Q2 */ + b 99f +19: ldr q0, [x3] /* Q1 */ + b 99f +20: ldrb w0, [x3, #BE(7)] /* UINT8 */ + b 99f +21: brk #1000 /* reserved */ + nop +22: ldrh w0, [x3, #BE(6)] /* UINT16 */ + b 99f +23: brk #1000 /* reserved */ + nop +24: ldr w0, [x3, #BE(4)] /* UINT32 */ + b 99f +25: brk #1000 /* reserved */ + nop +26: ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b 99f +27: brk #1000 /* reserved */ + nop +28: ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b 99f +29: brk #1000 /* reserved */ + nop +30: ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop +31: /* reserved */ +99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS + cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) + cfi_restore (x29) + cfi_restore (x30) + ret + cfi_endproc + + .globl CNAME(ffi_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV), #function + .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV) +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + .align PAGE_MAX_SHIFT +CNAME(ffi_closure_trampoline_table_page): + .rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr x16, -PAGE_MAX_SIZE + ldp x17, x16, [x16] + BR(x16) + nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller than 16 bytes */ + .endr + + .globl CNAME(ffi_closure_trampoline_table_page) + FFI_HIDDEN(CNAME(ffi_closure_trampoline_table_page)) + #ifdef __ELF__ + .type CNAME(ffi_closure_trampoline_table_page), #function + .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page) + #endif +#endif + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#ifdef FFI_GO_CLOSURES + .align 4 +CNAME(ffi_go_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV_V) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV_V)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV_V), #function + .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_go_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b .Ldo_closure + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV) + FFI_HIDDEN(CNAME(ffi_go_closure_SYSV)) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV), #function + .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV) +#endif +#endif /* FFI_GO_CLOSURES */ +#endif /* __arm64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/win64_armasm.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/win64_armasm.S new file mode 100644 index 0000000..7fc185b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/aarch64/win64_armasm.S @@ -0,0 +1,506 @@ +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + OPT 2 /*disable listing */ +/* For some macros to add unwind information */ +#include "ksarm64.h" + OPT 1 /*re-enable listing */ + +#define BE(X) 0 +#define PTR_REG(n) x##n +#define PTR_SIZE 8 + + IMPORT ffi_closure_SYSV_inner + EXPORT ffi_call_SYSV + EXPORT ffi_closure_SYSV_V + EXPORT ffi_closure_SYSV + EXPORT extend_hfa_type + EXPORT compress_hfa_type +#ifdef FFI_GO_CLOSURES + EXPORT ffi_go_closure_SYSV_V + EXPORT ffi_go_closure_SYSV +#endif + + TEXTAREA, ALIGN=8 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + Therefore on entry we have: + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + NESTED_ENTRY ffi_call_SYSV_fake + + /* For unwind information, Windows has to store fp and lr */ + PROLOG_SAVE_REG_PAIR x29, x30, #-32! + + ALTERNATE_ENTRY ffi_call_SYSV + /* Use a stack frame allocated by our caller. */ + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + /*mov x18, x5 install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1 + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] + +ffi_call_SYSV_L1 + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + blr x9 /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, ffi_call_SYSV_return + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + ALIGN 4 +ffi_call_SYSV_return + ret /* VOID */ + nop + str x0, [x3] /* INT64 */ + ret + stp x0, x1, [x3] /* INT128 */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + brk #1000 /* UNUSED */ + ret + st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret + st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret + stp s0, s1, [x3] /* S2 */ + ret + str s0, [x3] /* S1 */ + ret + st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret + st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret + stp d0, d1, [x3] /* D2 */ + ret + str d0, [x3] /* D1 */ + ret + str q3, [x3, #48] /* Q4 */ + nop + str q2, [x3, #32] /* Q3 */ + nop + stp q0, q1, [x3] /* Q2 */ + ret + str q0, [x3] /* Q1 */ + ret + uxtb w0, w0 /* UINT8 */ + str x0, [x3] + ret /* reserved */ + nop + uxth w0, w0 /* UINT16 */ + str x0, [x3] + ret /* reserved */ + nop + mov w0, w0 /* UINT32 */ + str x0, [x3] + ret /* reserved */ + nop + sxtb x0, w0 /* SINT8 */ + str x0, [x3] + ret /* reserved */ + nop + sxth x0, w0 /* SINT16 */ + str x0, [x3] + ret /* reserved */ + nop + sxtw x0, w0 /* SINT32 */ + str x0, [x3] + ret /* reserved */ + nop + + + NESTED_END ffi_call_SYSV_fake + + +/* ffi_closure_SYSV + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + NESTED_ENTRY ffi_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + + b ffi_closure_SYSV_save_argument + NESTED_END ffi_closure_SYSV_V + + NESTED_ENTRY ffi_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ + +do_closure + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + + bl ffi_closure_SYSV_inner + + /* Load the return value as directed. */ + adr x1, ffi_closure_SYSV_return_base + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + ALIGN 8 +ffi_closure_SYSV_return_base + b ffi_closure_SYSV_epilog /* VOID */ + nop + ldr x0, [x3] /* INT64 */ + b ffi_closure_SYSV_epilog + ldp x0, x1, [x3] /* INT128 */ + b ffi_closure_SYSV_epilog + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + brk #1000 /* UNUSED */ + nop + ldr s3, [x3, #12] /* S4 */ + nop + ldr s2, [x3, #8] /* S3 */ + nop + ldp s0, s1, [x3] /* S2 */ + b ffi_closure_SYSV_epilog + ldr s0, [x3] /* S1 */ + b ffi_closure_SYSV_epilog + ldr d3, [x3, #24] /* D4 */ + nop + ldr d2, [x3, #16] /* D3 */ + nop + ldp d0, d1, [x3] /* D2 */ + b ffi_closure_SYSV_epilog + ldr d0, [x3] /* D1 */ + b ffi_closure_SYSV_epilog + ldr q3, [x3, #48] /* Q4 */ + nop + ldr q2, [x3, #32] /* Q3 */ + nop + ldp q0, q1, [x3] /* Q2 */ + b ffi_closure_SYSV_epilog + ldr q0, [x3] /* Q1 */ + b ffi_closure_SYSV_epilog + ldrb w0, [x3, #BE(7)] /* UINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrh w0, [x3, #BE(6)] /* UINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldr w0, [x3, #BE(4)] /* UINT32 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b ffi_closure_SYSV_epilog + brk #1000 /* reserved */ + nop + ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop + /* reserved */ + +ffi_closure_SYSV_epilog + EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS! + EPILOG_RETURN + NESTED_END ffi_closure_SYSV + + +#ifdef FFI_GO_CLOSURES + NESTED_ENTRY ffi_go_closure_SYSV_V + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b ffi_go_closure_SYSV_save_argument + NESTED_END ffi_go_closure_SYSV_V + + NESTED_ENTRY ffi_go_closure_SYSV + PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS! + +ffi_go_closure_SYSV_save_argument + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b do_closure + NESTED_END ffi_go_closure_SYSV + +#endif /* FFI_GO_CLOSURES */ + + +/* void extend_hfa_type (void *dest, void *src, int h) */ + + LEAF_ENTRY extend_hfa_type + + adr x3, extend_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +extend_hfa_type_jump_base + ldp s16, s17, [x1] /* S4 */ + ldp s18, s19, [x1, #8] + b extend_hfa_type_store_4 + nop + + ldp s16, s17, [x1] /* S3 */ + ldr s18, [x1, #8] + b extend_hfa_type_store_3 + nop + + ldp s16, s17, [x1] /* S2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr s16, [x1] /* S1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp d16, d17, [x1] /* D4 */ + ldp d18, d19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp d16, d17, [x1] /* D3 */ + ldr d18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp d16, d17, [x1] /* D2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr d16, [x1] /* D1 */ + b extend_hfa_type_store_1 + nop + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #16] + b extend_hfa_type_store_4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #16] + b extend_hfa_type_store_3 + nop + + ldp q16, q17, [x1] /* Q2 */ + b extend_hfa_type_store_2 + nop + nop + + ldr q16, [x1] /* Q1 */ + b extend_hfa_type_store_1 + +extend_hfa_type_store_4 + str q19, [x0, #48] +extend_hfa_type_store_3 + str q18, [x0, #32] +extend_hfa_type_store_2 + str q17, [x0, #16] +extend_hfa_type_store_1 + str q16, [x0] + ret + + LEAF_END extend_hfa_type + + +/* void compress_hfa_type (void *dest, void *reg, int h) */ + + LEAF_ENTRY compress_hfa_type + + adr x3, compress_hfa_type_jump_base + and w2, w2, #AARCH64_RET_MASK + sub x2, x2, #AARCH64_RET_S4 + add x3, x3, x2, lsl #4 + br x3 + + ALIGN 4 +compress_hfa_type_jump_base + ldp q16, q17, [x1] /* S4 */ + ldp q18, q19, [x1, #32] + st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S3 */ + ldr q18, [x1, #32] + st3 { v16.s, v17.s, v18.s }[0], [x0] + ret + + ldp q16, q17, [x1] /* S2 */ + st2 { v16.s, v17.s }[0], [x0] + ret + nop + + ldr q16, [x1] /* S1 */ + st1 { v16.s }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* D4 */ + ldp q18, q19, [x1, #32] + st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D3 */ + ldr q18, [x1, #32] + st3 { v16.d, v17.d, v18.d }[0], [x0] + ret + + ldp q16, q17, [x1] /* D2 */ + st2 { v16.d, v17.d }[0], [x0] + ret + nop + + ldr q16, [x1] /* D1 */ + st1 { v16.d }[0], [x0] + ret + nop + + ldp q16, q17, [x1] /* Q4 */ + ldp q18, q19, [x1, #32] + b compress_hfa_type_store_q4 + nop + + ldp q16, q17, [x1] /* Q3 */ + ldr q18, [x1, #32] + b compress_hfa_type_store_q3 + nop + + ldp q16, q17, [x1] /* Q2 */ + stp q16, q17, [x0] + ret + nop + + ldr q16, [x1] /* Q1 */ + str q16, [x0] + ret + +compress_hfa_type_store_q4 + str q19, [x0, #48] +compress_hfa_type_store_q3 + str q18, [x0, #32] + stp q16, q17, [x0] + ret + + LEAF_END compress_hfa_type + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffi.c new file mode 100644 index 0000000..7a95e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffi.c @@ -0,0 +1,521 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Anthony Green + Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. + + Alpha Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if defined(__LONG_DOUBLE_128__) +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +extern void ffi_call_osf(void *stack, void *frame, unsigned flags, + void *raddr, void (*fn)(void), void *closure) + FFI_HIDDEN; +extern void ffi_closure_osf(void) FFI_HIDDEN; +extern void ffi_go_closure_osf(void) FFI_HIDDEN; + +/* Promote a float value to its in-register double representation. + Unlike actually casting to double, this does not trap on NaN. */ +static inline UINT64 lds(void *ptr) +{ + UINT64 ret; + asm("lds %0,%1" : "=f"(ret) : "m"(*(UINT32 *)ptr)); + return ret; +} + +/* And the reverse. */ +static inline void sts(void *ptr, UINT64 val) +{ + asm("sts %1,%0" : "=m"(*(UINT32 *)ptr) : "f"(val)); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int flags, i, avn; + ffi_type *rtype, *itype; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + /* Compute the size of the argument area. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + itype = cif->arg_types[i]; + switch (itype->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + /* All take one 8 byte slot. */ + bytes += 8; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + /* Passed by value in N slots. */ + bytes += FFI_ALIGN(itype->size, FFI_SIZEOF_ARG); + break; + + case FFI_TYPE_COMPLEX: + /* _Complex long double passed by reference; others in 2 slots. */ + if (itype->elements[0]->type == FFI_TYPE_LONGDOUBLE) + bytes += 8; + else + bytes += 16; + break; + + default: + abort(); + } + } + + /* Set the return type flag */ + rtype = cif->rtype; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = ALPHA_FLAGS(ALPHA_ST_VOID, ALPHA_LD_VOID); + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT32); + break; + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_FLOAT, ALPHA_LD_FLOAT); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_DOUBLE, ALPHA_LD_DOUBLE); + break; + case FFI_TYPE_UINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT8); + break; + case FFI_TYPE_SINT8: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT8); + break; + case FFI_TYPE_UINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_UINT16); + break; + case FFI_TYPE_SINT16: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_SINT16); + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + /* Passed in memory, with a hidden pointer. */ + flags = ALPHA_RET_IN_MEM; + break; + case FFI_TYPE_COMPLEX: + itype = rtype->elements[0]; + switch (itype->type) + { + case FFI_TYPE_FLOAT: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXF, ALPHA_LD_CPLXF); + break; + case FFI_TYPE_DOUBLE: + flags = ALPHA_FLAGS(ALPHA_ST_CPLXD, ALPHA_LD_CPLXD); + break; + default: + if (rtype->size <= 8) + flags = ALPHA_FLAGS(ALPHA_ST_INT, ALPHA_LD_INT64); + else + flags = ALPHA_RET_IN_MEM; + break; + } + break; + default: + abort(); + } + cif->flags = flags; + + /* Include the hidden structure pointer in args requirement. */ + if (flags == ALPHA_RET_IN_MEM) + bytes += 8; + /* Minimum size is 6 slots, so that ffi_call_osf can pop them. */ + if (bytes < 6*8) + bytes = 6*8; + cif->bytes = bytes; + + return FFI_OK; +} + +static unsigned long +extend_basic_type(void *valp, int type, int argn) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)valp; + case FFI_TYPE_UINT8: + return *(UINT8 *)valp; + case FFI_TYPE_SINT16: + return *(SINT16 *)valp; + case FFI_TYPE_UINT16: + return *(UINT16 *)valp; + + case FFI_TYPE_FLOAT: + if (argn < 6) + return lds(valp); + /* FALLTHRU */ + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Note that unsigned 32-bit quantities are sign extended. */ + return *(SINT32 *)valp; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + return *(UINT64 *)valp; + + default: + abort(); + } +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + unsigned long *argp; + long i, avn, argn, flags = cif->flags; + ffi_type **arg_types; + void *frame; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + if (rvalue == NULL && flags == ALPHA_RET_IN_MEM) + rvalue = alloca(cif->rtype->size); + + /* Allocate the space for the arguments, plus 4 words of temp + space for ffi_call_osf. */ + argp = frame = alloca(cif->bytes + 4*FFI_SIZEOF_ARG); + frame += cif->bytes; + + argn = 0; + if (flags == ALPHA_RET_IN_MEM) + argp[argn++] = (unsigned long)rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + int type = ty->type; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + argp[argn] = extend_basic_type(valp, type, argn); + argn++; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Note that 128-bit long double is passed by reference. */ + argp[argn++] = (unsigned long)valp; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + memcpy(argp + argn, valp, size); + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + if (type == FFI_TYPE_LONGDOUBLE) + goto by_reference; + + /* Most complex types passed as two separate arguments. */ + size = ty->elements[0]->size; + argp[argn] = extend_basic_type(valp, type, argn); + argp[argn + 1] = extend_basic_type(valp + size, type, argn + 1); + argn += 2; + break; + + default: + abort(); + } + } + + flags = (flags >> ALPHA_ST_SHIFT) & 0xff; + ffi_call_osf(argp, frame, flags, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x47fb0401; /* mov $27,$1 */ + tramp[1] = 0xa77b0010; /* ldq $27,16($27) */ + tramp[2] = 0x6bfb0000; /* jmp $31,($27),0 */ + tramp[3] = 0x47ff041f; /* nop */ + *(void **) &tramp[4] = ffi_closure_osf; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the Icache. + + Tru64 UNIX as doesn't understand the imb mnemonic, so use call_pal + instead, since both Compaq as and gas can handle it. + + 0x86 is PAL_imb in Tru64 UNIX . */ + asm volatile ("call_pal 0x86" : : : "memory"); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_OSF) + return FFI_BAD_ABI; + + closure->tramp = (void *)ffi_go_closure_osf; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +long FFI_HIDDEN +ffi_closure_osf_inner (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, unsigned long *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn, argn, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + flags = cif->flags; + argn = 0; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (flags == ALPHA_RET_IN_MEM) + { + rvalue = (void *) argp[0]; + argn = 1; + } + + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, avn = cif->nargs; i < avn; i++) + { + ffi_type *ty = arg_types[i]; + int type = ty->type; + void *valp = &argp[argn]; + size_t size; + + switch (type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + argn += 1; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + size = ty->size; + argn += FFI_ALIGN(size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + break; + + case FFI_TYPE_FLOAT: + /* Floats coming from registers need conversion from double + back to float format. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + argn += 1; + break; + + case FFI_TYPE_DOUBLE: + if (argn < 6) + valp = &argp[argn - 6]; + argn += 1; + break; + + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* 128-bit long double is passed by reference. */ + valp = (void *)argp[argn]; + argn += 1; + break; + + case FFI_TYPE_COMPLEX: + type = ty->elements[0]->type; + switch (type) + { + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passed as separate arguments, but they wind up sequential. */ + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + /* Passed as separate arguments. Disjoint, but there's room + enough in one slot to hold the pair. */ + size = ty->elements[0]->size; + memcpy(valp + size, valp + 8, size); + break; + + case FFI_TYPE_FLOAT: + /* Passed as separate arguments. Disjoint, and each piece + may need conversion back to float. */ + if (argn < 6) + { + valp = &argp[argn - 6]; + sts(valp, argp[argn - 6]); + } + if (argn + 1 < 6) + sts(valp + 4, argp[argn + 1 - 6]); + else + *(UINT32 *)(valp + 4) = argp[argn + 1]; + break; + + case FFI_TYPE_DOUBLE: + /* Passed as separate arguments. Only disjoint if one part + is in fp regs and the other is on the stack. */ + if (argn < 5) + valp = &argp[argn - 6]; + else if (argn == 5) + { + valp = alloca(16); + ((UINT64 *)valp)[0] = argp[5 - 6]; + ((UINT64 *)valp)[1] = argp[6]; + } + break; + + case FFI_TYPE_LONGDOUBLE: + goto by_reference; + + default: + abort(); + } + argn += 2; + break; + + default: + abort (); + } + + avalue[i] = valp; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_osf how to perform return type promotions. */ + return (flags >> ALPHA_LD_SHIFT) & 0xff; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffitarget.h new file mode 100644 index 0000000..a02dbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/ffitarget.h @@ -0,0 +1,57 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Alpha. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OSF, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_OSF +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/internal.h new file mode 100644 index 0000000..44da192 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/internal.h @@ -0,0 +1,23 @@ +#define ALPHA_ST_VOID 0 +#define ALPHA_ST_INT 1 +#define ALPHA_ST_FLOAT 2 +#define ALPHA_ST_DOUBLE 3 +#define ALPHA_ST_CPLXF 4 +#define ALPHA_ST_CPLXD 5 + +#define ALPHA_LD_VOID 0 +#define ALPHA_LD_INT64 1 +#define ALPHA_LD_INT32 2 +#define ALPHA_LD_UINT16 3 +#define ALPHA_LD_SINT16 4 +#define ALPHA_LD_UINT8 5 +#define ALPHA_LD_SINT8 6 +#define ALPHA_LD_FLOAT 7 +#define ALPHA_LD_DOUBLE 8 +#define ALPHA_LD_CPLXF 9 +#define ALPHA_LD_CPLXD 10 + +#define ALPHA_ST_SHIFT 0 +#define ALPHA_LD_SHIFT 8 +#define ALPHA_RET_IN_MEM 0x10000 +#define ALPHA_FLAGS(S, L) (((L) << ALPHA_LD_SHIFT) | (S)) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/osf.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/osf.S new file mode 100644 index 0000000..b031828 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/alpha/osf.S @@ -0,0 +1,282 @@ +/* ----------------------------------------------------------------------- + osf.S - Copyright (c) 1998, 2001, 2007, 2008, 2011, 2014 Red Hat + + Alpha/OSF Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + + .arch ev6 + .text + +/* Aid in building a direct addressed jump table, 4 insns per entry. */ +.macro E index + .align 4 + .org 99b + \index * 16 +.endm + +/* ffi_call_osf (void *stack, void *frame, unsigned flags, + void *raddr, void (*fnaddr)(void), void *closure) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 4 + .globl ffi_call_osf + .ent ffi_call_osf + FFI_HIDDEN(ffi_call_osf) + +ffi_call_osf: + cfi_startproc + cfi_def_cfa($17, 32) + mov $16, $30 + stq $26, 0($17) + stq $15, 8($17) + mov $17, $15 + .prologue 0 + cfi_def_cfa_register($15) + cfi_rel_offset($26, 0) + cfi_rel_offset($15, 8) + + stq $18, 16($17) # save flags into frame + stq $19, 24($17) # save rvalue into frame + mov $20, $27 # fn into place for call + mov $21, $1 # closure into static chain + + # Load up all of the (potential) argument registers. + ldq $16, 0($30) + ldt $f16, 0($30) + ldt $f17, 8($30) + ldq $17, 8($30) + ldt $f18, 16($30) + ldq $18, 16($30) + ldt $f19, 24($30) + ldq $19, 24($30) + ldt $f20, 32($30) + ldq $20, 32($30) + ldt $f21, 40($30) + ldq $21, 40($30) + + # Deallocate the register argument area. + lda $30, 48($30) + + jsr $26, ($27), 0 +0: + ldah $29, 0($26) !gpdisp!1 + ldq $2, 24($15) # reload rvalue + lda $29, 0($29) !gpdisp!1 + ldq $3, 16($15) # reload flags + lda $1, 99f-0b($26) + ldq $26, 0($15) + ldq $15, 8($15) + cfi_restore($26) + cfi_restore($15) + cfi_def_cfa($sp, 0) + cmoveq $2, ALPHA_ST_VOID, $3 # mash null rvalue to void + addq $3, $3, $3 + s8addq $3, $1, $1 # 99f + stcode * 16 + jmp $31, ($1), $st_int + + .align 4 +99: +E ALPHA_ST_VOID + ret +E ALPHA_ST_INT +$st_int: + stq $0, 0($2) + ret +E ALPHA_ST_FLOAT + sts $f0, 0($2) + ret +E ALPHA_ST_DOUBLE + stt $f0, 0($2) + ret +E ALPHA_ST_CPLXF + sts $f0, 0($2) + sts $f1, 4($2) + ret +E ALPHA_ST_CPLXD + stt $f0, 0($2) + stt $f1, 8($2) + ret + + cfi_endproc + .end ffi_call_osf + +/* ffi_closure_osf(...) + + Receives the closure argument in $1. */ + +#define CLOSURE_FS (16*8) + + .align 4 + .globl ffi_go_closure_osf + .ent ffi_go_closure_osf + FFI_HIDDEN(ffi_go_closure_osf) + +ffi_go_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 8($1) # load cif + ldq $17, 16($1) # load fun + mov $1, $18 # closure is user_data + br $do_closure + + cfi_endproc + .end ffi_go_closure_osf + + .align 4 + .globl ffi_closure_osf + .ent ffi_closure_osf + FFI_HIDDEN(ffi_closure_osf) + +ffi_closure_osf: + cfi_startproc + ldgp $29, 0($27) + subq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(CLOSURE_FS) + stq $26, 0($30) + .prologue 1 + cfi_rel_offset($26, 0) + + # Store all of the potential argument registers in va_list format. + stq $16, 10*8($30) + stq $17, 11*8($30) + stq $18, 12*8($30) + + ldq $16, 24($1) # load cif + ldq $17, 32($1) # load fun + ldq $18, 40($1) # load user_data + +$do_closure: + stq $19, 13*8($30) + stq $20, 14*8($30) + stq $21, 15*8($30) + stt $f16, 4*8($30) + stt $f17, 5*8($30) + stt $f18, 6*8($30) + stt $f19, 7*8($30) + stt $f20, 8*8($30) + stt $f21, 9*8($30) + + # Call ffi_closure_osf_inner to do the bulk of the work. + lda $19, 2*8($30) + lda $20, 10*8($30) + jsr $26, ffi_closure_osf_inner +0: + ldah $29, 0($26) !gpdisp!2 + lda $2, 99f-0b($26) + s4addq $0, 0, $1 # ldcode * 4 + ldq $0, 16($30) # preload return value + s4addq $1, $2, $1 # 99f + ldcode * 16 + lda $29, 0($29) !gpdisp!2 + ldq $26, 0($30) + cfi_restore($26) + jmp $31, ($1), $load_32 + +.macro epilogue + addq $30, CLOSURE_FS, $30 + cfi_adjust_cfa_offset(-CLOSURE_FS) + ret + .align 4 + cfi_adjust_cfa_offset(CLOSURE_FS) +.endm + + .align 4 +99: +E ALPHA_LD_VOID + epilogue + +E ALPHA_LD_INT64 + epilogue + +E ALPHA_LD_INT32 +$load_32: + sextl $0, $0 + epilogue + +E ALPHA_LD_UINT16 + zapnot $0, 3, $0 + epilogue + +E ALPHA_LD_SINT16 +#ifdef __alpha_bwx__ + sextw $0, $0 +#else + sll $0, 48, $0 + sra $0, 48, $0 +#endif + epilogue + +E ALPHA_LD_UINT8 + and $0, 0xff, $0 + epilogue + +E ALPHA_LD_SINT8 +#ifdef __alpha_bwx__ + sextb $0, $0 +#else + sll $0, 56, $0 + sra $0, 56, $0 +#endif + epilogue + +E ALPHA_LD_FLOAT + lds $f0, 16($sp) + epilogue + +E ALPHA_LD_DOUBLE + ldt $f0, 16($sp) + epilogue + +E ALPHA_LD_CPLXF + lds $f0, 16($sp) + lds $f1, 20($sp) + epilogue + +E ALPHA_LD_CPLXD + ldt $f0, 16($sp) + ldt $f1, 24($sp) + epilogue + + cfi_endproc + .end ffi_closure_osf + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/arcompact.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/arcompact.S new file mode 100644 index 0000000..03715fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/arcompact.S @@ -0,0 +1,135 @@ +/* ----------------------------------------------------------------------- + arcompact.S - Copyright (c) 2013 Synposys, Inc. (www.synopsys.com) + + ARCompact Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)` .type CNAME(x),%function` CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* R4: ecif.rvalue */ + /* R5: fn */ +ENTRY(ffi_call_ARCompact) + /* Save registers. */ + st.a fp, [sp, -4] /* fp + 20, fp */ + push_s blink /* fp + 16, blink */ + st.a r4, [sp, -4] /* fp + 12, ecif.rvalue */ + push_s r3 /* fp + 8, fig->flags */ + st.a r5, [sp, -4] /* fp + 4, fn */ + push_s r2 /* fp + 0, cif->bytes */ + mov fp, sp + + /* Make room for all of the new args. */ + sub sp, sp, r2 + + /* Place all of the ffi_prep_args in position. */ + /* ffi_prep_args(char *stack, extended_cif *ecif) */ + /* R1 already set. */ + + /* And call. */ + jl_s.d [r0] + mov_s r0, sp + + ld.ab r12, [fp, 4] /* cif->bytes */ + ld.ab r11, [fp, 4] /* fn */ + + /* Move first 8 parameters in registers... */ + ld_s r0, [sp] + ld_s r1, [sp, 4] + ld_s r2, [sp, 8] + ld_s r3, [sp, 12] + ld r4, [sp, 16] + ld r5, [sp, 20] + ld r6, [sp, 24] + ld r7, [sp, 28] + + /* ...and adjust the stack. */ + min r12, r12, 32 + + /* Call the function. */ + jl.d [r11] + add sp, sp, r12 + + mov sp, fp + pop_s r3 /* fig->flags, return type */ + pop_s r2 /* ecif.rvalue, pointer for return value */ + + /* If the return value pointer is NULL, assume no return value. */ + breq.d r2, 0, epilogue + pop_s blink + + /* Return INT. */ + brne r3, FFI_TYPE_INT, return_double + b.d epilogue + st_s r0, [r2] + +return_double: + brne r3, FFI_TYPE_DOUBLE, epilogue + st_s r0, [r2] + st_s r1, [r2,4] + +epilogue: + j_s.d [blink] + ld.ab fp, [sp, 4] + +ENTRY(ffi_closure_ARCompact) + st.a r0, [sp, -32] + st_s r1, [sp, 4] + st_s r2, [sp, 8] + st_s r3, [sp, 12] + st r4, [sp, 16] + st r5, [sp, 20] + st r6, [sp, 24] + st r7, [sp, 28] + + /* pointer to arguments */ + mov_s r2, sp + + /* return value goes here */ + sub sp, sp, 8 + mov_s r1, sp + + push_s blink + + bl.d ffi_closure_inner_ARCompact + mov_s r0, r8 /* codeloc, set by trampoline */ + + pop_s blink + + /* set return value to r1:r0 */ + pop_s r0 + pop_s r1 + j_s.d [blink] + add_s sp, sp, 32 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffi.c new file mode 100644 index 0000000..4d10b21 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffi.c @@ -0,0 +1,266 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + + ARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#include + +/* for little endian ARC, the code is in fact stored as mixed endian for + performance reasons */ +#if __BIG_ENDIAN__ +#define CODE_ENDIAN(x) (x) +#else +#define CODE_ENDIAN(x) ( (((uint32_t) (x)) << 16) | (((uint32_t) (x)) >> 16)) +#endif + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_arg)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) (*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, (*p_arg)->size); + break; + + default: + FFI_ASSERT (0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + memcpy (argp, *p_argv, z); + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_ARCompact (void (*)(char *, extended_cif *), + extended_cif *, unsigned, unsigned, + unsigned *, void (*fn) (void)); + +void +ffi_call (ffi_cif * cif, void (*fn) (void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ARCOMPACT: + ffi_call_ARCompact (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +int +ffi_closure_inner_ARCompact (ffi_closure * closure, void *rvalue, + ffi_arg * args) +{ + void **arg_area, **p_argv; + ffi_cif *cif = closure->cif; + char *argp = (char *) args; + ffi_type **p_argt; + int i; + + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + /* handle hidden argument */ + if (cif->flags == FFI_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + + p_argv = arg_area; + + for (i = 0, p_argt = cif->arg_types; i < cif->nargs; + i++, p_argt++, p_argv++) + { + size_t z; + int alignment; + + /* align alignment to 4 */ + alignment = (((*p_argt)->alignment - 1) | 3) + 1; + + /* Align if necessary. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + z = (*p_argt)->size; + *p_argv = (void *) argp; + argp += z; + } + + (closure->fun) (cif, rvalue, arg_area, closure->user_data); + + return cif->flags; +} + +extern void ffi_closure_ARCompact (void); + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) & (closure->tramp[0]); + + switch (cif->abi) + { + case FFI_ARCOMPACT: + FFI_ASSERT (tramp == codeloc); + tramp[0] = CODE_ENDIAN (0x200a1fc0); /* mov r8, pcl */ + tramp[1] = CODE_ENDIAN (0x20200f80); /* j [long imm] */ + tramp[2] = CODE_ENDIAN (ffi_closure_ARCompact); + break; + + default: + return FFI_BAD_ABI; + } + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + cacheflush (codeloc, FFI_TRAMPOLINE_SIZE, BCACHE); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffitarget.h new file mode 100644 index 0000000..bf8311b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arc/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) + Target configuration macros for ARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi +{ + FFI_FIRST_ABI = 0, + FFI_ARCOMPACT, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_ARCOMPACT +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffi.c new file mode 100644 index 0000000..0058390 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffi.c @@ -0,0 +1,876 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Timothy Wall + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2011 Anthony Green + Copyright (c) 2011 Free Software Foundation + Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__arm__) || defined(_M_ARM) +#include +#include +#include +#include +#include +#include "internal.h" + +#if defined(_MSC_VER) && defined(_M_ARM) +#define WIN32_LEAN_AND_MEAN +#include +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else +#ifndef _M_ARM +extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN; +#else +extern unsigned int ffi_arm_trampoline[3] FFI_HIDDEN; +#endif +#endif + +#if defined(__FreeBSD__) && defined(__arm__) +#include +#include +#endif + +/* Forward declares. */ +static int vfp_type_p (const ffi_type *); +static void layout_vfp_args (ffi_cif *); + +static void * +ffi_align (ffi_type *ty, void *p) +{ + /* Align if necessary */ + size_t alignment; +#ifdef _WIN32_WCE + alignment = 4; +#else + alignment = ty->alignment; + if (alignment < 4) + alignment = 4; +#endif + return (void *) FFI_ALIGN (p, alignment); +} + +static size_t +ffi_put_arg (ffi_type *ty, void *src, void *dst) +{ + size_t z = ty->size; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *(UINT32 *)dst = *(SINT8 *)src; + break; + case FFI_TYPE_UINT8: + *(UINT32 *)dst = *(UINT8 *)src; + break; + case FFI_TYPE_SINT16: + *(UINT32 *)dst = *(SINT16 *)src; + break; + case FFI_TYPE_UINT16: + *(UINT32 *)dst = *(UINT16 *)src; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: +#ifndef _MSC_VER + case FFI_TYPE_FLOAT: +#endif + *(UINT32 *)dst = *(UINT32 *)src; + break; + +#ifdef _MSC_VER + // casting a float* to a UINT32* doesn't work on Windows + case FFI_TYPE_FLOAT: + *(uintptr_t *)dst = 0; + *(float *)dst = *(float *)src; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + *(UINT64 *)dst = *(UINT64 *)src; + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + memcpy (dst, src, z); + break; + + default: + abort(); + } + + return FFI_ALIGN (z, 4); +} + +/* ffi_prep_args is called once stack space has been allocated + for the function's arguments. + + The vfp_space parameter is the load area for VFP regs, the return + value is cif->vfp_used (word bitset of VFP regs used for passing + arguments). These are only used for the VFP hard-float ABI. +*/ +static void +ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *argp) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (flags == ARM_TYPE_STRUCT) + { + *(void **) argp = rvalue; + argp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, avalue[i], argp); + } +} + +static void +ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *stack, char *vfp_space) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char stack_used = 0; + char done_with_regs = 0; + + /* The first 4 words on the stack are used for values + passed in core registers. */ + regp = stack; + eo_regp = argp = regp + 16; + + /* If the function returns an FFI_TYPE_STRUCT in memory, + that address is passed in r0 to the function. */ + if (flags == ARM_TYPE_STRUCT) + { + *(void **) regp = rvalue; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *a = avalue[i]; + int is_vfp_type = vfp_type_p (ty); + + /* Allocated in VFP registers. */ + if (vi < cif->vfp_nargs && is_vfp_type) + { + char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4; + ffi_put_arg (ty, a, vfp_slot); + continue; + } + /* Try allocating in core registers. */ + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + size_t size = ty->size; + size = (size < 4) ? 4 : size; // pad + /* Check if there is space left in the aligned register + area to place the argument. */ + if (tregp + size <= eo_regp) + { + regp = tregp + ffi_put_arg (ty, a, tregp); + done_with_regs = (regp == argp); + // ensure we did not write into the stack area + FFI_ASSERT (regp <= argp); + continue; + } + /* In case there are no arguments in the stack area yet, + the argument is passed in the remaining core registers + and on the stack. */ + else if (!stack_used) + { + stack_used = 1; + done_with_regs = 1; + argp = tregp + ffi_put_arg (ty, a, tregp); + FFI_ASSERT (eo_regp < argp); + continue; + } + } + /* Base case, arguments are passed on the stack */ + stack_used = 1; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, a, argp); + } +} + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int flags = 0, cabi = cif->abi; + size_t bytes = cif->bytes; + + /* Map out the register placements of VFP register args. The VFP + hard-float calling conventions are slightly more sophisticated + than the base calling conventions, so we do it here instead of + in ffi_prep_args(). */ + if (cabi == FFI_VFP) + layout_vfp_args (cif); + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = ARM_TYPE_VOID; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + flags = ARM_TYPE_INT; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = ARM_TYPE_INT64; + break; + + case FFI_TYPE_FLOAT: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT); + break; + case FFI_TYPE_DOUBLE: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64); + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + if (cabi == FFI_VFP) + { + int h = vfp_type_p (cif->rtype); + + flags = ARM_TYPE_VFP_N; + if (h == 0x100 + FFI_TYPE_FLOAT) + flags = ARM_TYPE_VFP_S; + if (h == 0x100 + FFI_TYPE_DOUBLE) + flags = ARM_TYPE_VFP_D; + if (h != 0) + break; + } + + /* A Composite Type not larger than 4 bytes is returned in r0. + A Composite Type larger than 4 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + if (cif->rtype->size <= 4) + flags = ARM_TYPE_INT; + else + { + flags = ARM_TYPE_STRUCT; + bytes += 4; + } + break; + + default: + abort(); + } + + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't harm anything + when it isn't needed. */ + bytes = FFI_ALIGN (bytes, 8); + + /* Minimum stack space is the 4 register arguments that we pop. */ + if (bytes < 4*4) + bytes = 4*4; + + cif->bytes = bytes; + cif->flags = flags; + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif * cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + /* VFP variadic calls actually use the SYSV ABI */ + if (cif->abi == FFI_VFP) + cif->abi = FFI_SYSV; + + return ffi_prep_cif_machdep (cif); +} + +/* Prototypes for assembly functions, in sysv.S. */ + +struct call_frame +{ + void *fp; + void *lr; + void *rvalue; + int flags; + void *closure; +}; + +extern void ffi_call_SYSV (void *stack, struct call_frame *, + void (*fn) (void)) FFI_HIDDEN; +extern void ffi_call_VFP (void *vfp_space, struct call_frame *, + void (*fn) (void), unsigned vfp_used) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + int flags = cif->flags; + ffi_type *rtype = cif->rtype; + size_t bytes, rsize, vfp_size; + char *stack, *vfp_space, *new_rvalue; + struct call_frame *frame; + + rsize = 0; + if (rvalue == NULL) + { + /* If the return value is a struct and we don't have a return + value address then we need to make one. Otherwise the return + value is in registers and we can ignore them. */ + if (flags == ARM_TYPE_STRUCT) + rsize = rtype->size; + else + flags = ARM_TYPE_VOID; + } + else if (flags == ARM_TYPE_VFP_N) + { + /* Largest case is double x 4. */ + rsize = 32; + } + else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT) + rsize = 4; + + /* Largest case. */ + vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0); + + bytes = cif->bytes; + stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize); + + vfp_space = NULL; + if (vfp_size) + { + vfp_space = stack; + stack += vfp_size; + } + + frame = (struct call_frame *)(stack + bytes); + + new_rvalue = rvalue; + if (rsize) + new_rvalue = (void *)(frame + 1); + + frame->rvalue = new_rvalue; + frame->flags = flags; + frame->closure = closure; + + if (vfp_space) + { + ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space); + ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used); + } + else + { + ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack); + ffi_call_SYSV (stack, frame, fn); + } + + if (rvalue && rvalue != new_rvalue) + memcpy (rvalue, new_rvalue, rtype->size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif + +static void * +ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue, + char *argp, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + else + { + if (cif->rtype->size && cif->rtype->size < 4) + *(uint32_t *) rvalue = 0; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +static void * +ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack, + char *vfp_space, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char done_with_regs = 0; + char stack_used = 0; + + regp = stack; + eo_regp = argp = regp + 16; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) regp; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + int is_vfp_type = vfp_type_p (ty); + size_t z = ty->size; + + if (vi < cif->vfp_nargs && is_vfp_type) + { + avalue[i] = vfp_space + cif->vfp_args[vi++] * 4; + continue; + } + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + + z = (z < 4) ? 4 : z; // pad + + /* If the arguments either fits into the registers or uses registers + and stack, while we haven't read other things from the stack */ + if (tregp + z <= eo_regp || !stack_used) + { + /* Because we're little endian, this is what it turns into. */ + avalue[i] = (void *) tregp; + regp = tregp + z; + + /* If we read past the last core register, make sure we + have not read from the stack before and continue + reading after regp. */ + if (regp > eo_regp) + { + FFI_ASSERT (!stack_used); + argp = regp; + } + if (regp >= eo_regp) + { + done_with_regs = 1; + stack_used = 1; + } + continue; + } + } + + stack_used = 1; + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +struct closure_frame +{ + char vfp_space[8*8] __attribute__((aligned(8))); + char result[8*4]; + char argp[]; +}; + +int FFI_HIDDEN +ffi_closure_inner_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result, + frame->argp, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +int FFI_HIDDEN +ffi_closure_inner_VFP (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp, + frame->vfp_space, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +void ffi_closure_SYSV (void) FFI_HIDDEN; +void ffi_closure_VFP (void) FFI_HIDDEN; + +#ifdef FFI_GO_CLOSURES +void ffi_go_closure_SYSV (void) FFI_HIDDEN; +void ffi_go_closure_VFP (void) FFI_HIDDEN; +#endif + +/* the cif must already be prep'ed */ + +#if defined(__FreeBSD__) && defined(__arm__) +#define __clear_cache(start, end) do { \ + struct arm_sync_icache_args ua; \ + \ + ua.addr = (uintptr_t)(start); \ + ua.len = (char *)(end) - (char *)start; \ + sysarch(ARM_SYNC_ICACHE, &ua); \ + } while (0); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, + ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + void (*closure_func) (void) = ffi_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + +#if FFI_EXEC_TRAMPOLINE_TABLE + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = closure_func; +#else + +#ifndef _M_ARM + memcpy(closure->tramp, ffi_arm_trampoline, 8); +#else + // cast away function type so MSVC doesn't set the lower bit of the function pointer + memcpy(closure->tramp, (void*)((uintptr_t)ffi_arm_trampoline & 0xFFFFFFFE), FFI_TRAMPOLINE_CLOSURE_OFFSET); +#endif + +#if defined (__QNX__) + msync(closure->tramp, 8, 0x1000000); /* clear data map */ + msync(codeloc, 8, 0x1000000); /* clear insn map */ +#elif defined(_MSC_VER) + FlushInstructionCache(GetCurrentProcess(), closure->tramp, FFI_TRAMPOLINE_SIZE); +#else + __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */ + __clear_cache(codeloc, codeloc + 8); /* clear insn map */ +#endif +#ifdef _M_ARM + *(void(**)(void))(closure->tramp + FFI_TRAMPOLINE_CLOSURE_FUNCTION) = closure_func; +#else + *(void (**)(void))(closure->tramp + 8) = closure_func; +#endif +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + void (*closure_func) (void) = ffi_go_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_go_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = closure_func; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif + +/* Below are routines for VFP hard-float support. */ + +/* A subroutine of vfp_type_p. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of vfp_type_p. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY is an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is a floating point scalar. + + Returns non-zero iff TY is an HFA. The result is an encoded value where + bits 0-7 contain the type code, and bits 8-10 contain the element count. */ + +static int +vfp_type_p (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (ty->type) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE) + return 0; + ele_count = 2; + goto done; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */ + size = ty->size; + if (size < 4 || size > 32) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return (ele_count << 8) | candidate; +} + +static int +place_vfp_arg (ffi_cif *cif, int h) +{ + unsigned short reg = cif->vfp_reg_free; + int align = 1, nregs = h >> 8; + + if ((h & 0xff) == FFI_TYPE_DOUBLE) + align = 2, nregs *= 2; + + /* Align register number. */ + if ((reg & 1) && align == 2) + reg++; + + while (reg + nregs <= 16) + { + int s, new_used = 0; + for (s = reg; s < reg + nregs; s++) + { + new_used |= (1 << s); + if (cif->vfp_used & (1 << s)) + { + reg += align; + goto next_reg; + } + } + /* Found regs to allocate. */ + cif->vfp_used |= new_used; + cif->vfp_args[cif->vfp_nargs++] = (signed char)reg; + + /* Update vfp_reg_free. */ + if (cif->vfp_used & (1 << cif->vfp_reg_free)) + { + reg += nregs; + while (cif->vfp_used & (1 << reg)) + reg += 1; + cif->vfp_reg_free = reg; + } + return 0; + next_reg:; + } + // done, mark all regs as used + cif->vfp_reg_free = 16; + cif->vfp_used = 0xFFFF; + return 1; +} + +static void +layout_vfp_args (ffi_cif * cif) +{ + unsigned int i; + /* Init VFP fields */ + cif->vfp_used = 0; + cif->vfp_nargs = 0; + cif->vfp_reg_free = 0; + memset (cif->vfp_args, -1, 16); /* Init to -1. */ + + for (i = 0; i < cif->nargs; i++) + { + int h = vfp_type_p (cif->arg_types[i]); + if (h && place_vfp_arg (cif, h) == 1) + break; + } +} + +#endif /* __arm__ or _M_ARM */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffitarget.h new file mode 100644 index 0000000..cb57b84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/ffitarget.h @@ -0,0 +1,89 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#if defined(__ARM_PCS_VFP) || defined(_M_ARM) + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#ifndef _M_ARM +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#ifdef _MSC_VER +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_FUNCTION 12 +#else +#define FFI_TRAMPOLINE_SIZE 12 +#endif +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/internal.h new file mode 100644 index 0000000..6cf0b2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/internal.h @@ -0,0 +1,7 @@ +#define ARM_TYPE_VFP_S 0 +#define ARM_TYPE_VFP_D 1 +#define ARM_TYPE_VFP_N 2 +#define ARM_TYPE_INT64 3 +#define ARM_TYPE_INT 4 +#define ARM_TYPE_VOID 5 +#define ARM_TYPE_STRUCT 6 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv.S new file mode 100644 index 0000000..74bc53f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv.S @@ -0,0 +1,385 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __arm__ +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6M__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +/* Conditionally compile unwinder directives. */ +#ifdef __ARM_EABI__ +# define UNWIND(...) __VA_ARGS__ +#else +# define UNWIND(...) +#endif + +#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__) + .cfi_sections .debug_frame +#endif + +#define CONCAT(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +#ifdef __USER_LABEL_PREFIX__ +# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X) +#else +# define CNAME(X) X +#endif +#ifdef __ELF__ +# define SIZE(X) .size CNAME(X), . - CNAME(X) +# define TYPE(X, Y) .type CNAME(X), Y +#else +# define SIZE(X) +# define TYPE(X, Y) +#endif + +#define ARM_FUNC_START_LOCAL(name) \ + .align 3; \ + TYPE(CNAME(name), %function); \ + CNAME(name): + +#define ARM_FUNC_START(name) \ + .globl CNAME(name); \ + FFI_HIDDEN(CNAME(name)); \ + ARM_FUNC_START_LOCAL(name) + +#define ARM_FUNC_END(name) \ + SIZE(name) + +/* Aid in defining a jump table with 8 bytes between entries. */ +/* ??? The clang assembler doesn't handle .if with symbolic expressions. */ +#ifdef __clang__ +# define E(index) +#else +# define E(index) \ + .if . - 0b - 8*index; \ + .error "type table out of sync"; \ + .endif +#endif + + .text + .syntax unified + .arm + +#ifndef __clang__ + /* We require interworking on LDM, which implies ARMv5T, + which implies the existance of BLX. */ + .arch armv5t +#endif + + /* Note that we use STC and LDC to encode VFP instructions, + so that we do not need ".fpu vfp", nor get that added to + the object file attributes. These will not be executed + unless the FFI_VFP abi is used. */ + + @ r0: stack + @ r1: frame + @ r2: fn + @ r3: vfp_used + +ARM_FUNC_START(ffi_call_VFP) + UNWIND(.fnstart) + cfi_startproc + + cmp r3, #3 @ load only d0 if possible +#ifdef __clang__ + vldrle d0, [r0] + vldmgt r0, {d0-d7} +#else + ldcle p11, cr0, [r0] @ vldrle d0, [r0] + ldcgt p11, cr0, [r0], {16} @ vldmgt r0, {d0-d7} +#endif + add r0, r0, #64 @ discard the vfp register args + /* FALLTHRU */ +ARM_FUNC_END(ffi_call_VFP) + +ARM_FUNC_START(ffi_call_SYSV) + stm r1, {fp, lr} + mov fp, r1 + + @ This is a bit of a lie wrt the origin of the unwind info, but + @ now we've got the usual frame pointer and two saved registers. + UNWIND(.save {fp,lr}) + UNWIND(.setfp fp, sp) + cfi_def_cfa(fp, 8) + cfi_rel_offset(fp, 0) + cfi_rel_offset(lr, 4) + + mov sp, r0 @ install the stack pointer + mov lr, r2 @ move the fn pointer out of the way + ldr ip, [fp, #16] @ install the static chain + ldmia sp!, {r0-r3} @ move first 4 parameters in registers. + blx lr @ call fn + + @ Load r2 with the pointer to storage for the return value + @ Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + @ Deallocate the stack with the arguments. + mov sp, fp + cfi_def_cfa_register(sp) + + @ Store values stored in registers. + .align 3 + add pc, pc, r3, lsl #3 + nop +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vstr s0, [r2] +#else + stc p10, cr0, [r2] @ vstr s0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vstr d0, [r2] +#else + stc p11, cr0, [r2] @ vstr d0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vstm r2, {d0-d3} +#else + stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3} +#endif + pop {fp,pc} +E(ARM_TYPE_INT64) + str r1, [r2, #4] + nop +E(ARM_TYPE_INT) + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID) + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT) + pop {fp,pc} + + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_call_SYSV) + + +/* + int ffi_closure_inner_* (cif, fun, user_data, frame) +*/ + +ARM_FUNC_START(ffi_go_closure_SYSV) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_SYSV) + +ARM_FUNC_START(ffi_closure_SYSV) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 @ compute entry sp + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) + stmdb sp!, {ip,lr} + + /* Remember that EABI unwind info only applies at call sites. + We need do nothing except note the save of the stack pointer + and the link registers. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_SYSV) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_SYSV) + +ARM_FUNC_START(ffi_go_closure_VFP) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_VFP) + +ARM_FUNC_START(ffi_closure_VFP) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) +#ifdef __clang__ + vstm sp, {d0-d7} +#else + stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7} +#endif + stmdb sp!, {ip,lr} + + /* See above. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_VFP) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_VFP) + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + +ARM_FUNC_START_LOCAL(ffi_closure_ret) + cfi_startproc + cfi_rel_offset(sp, 0) + cfi_rel_offset(lr, 4) +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vldr s0, [r2] +#else + ldc p10, cr0, [r2] @ vldr s0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vldr d0, [r2] +#else + ldc p11, cr0, [r2] @ vldr d0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vldm r2, {d0-d3} +#else + ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3} +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_INT64) + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT) + ldr r0, [r2] + ldm sp, {sp,pc} +E(ARM_TYPE_VOID) + ldm sp, {sp,pc} + nop +E(ARM_TYPE_STRUCT) + ldm sp, {sp,pc} + cfi_endproc +ARM_FUNC_END(ffi_closure_ret) + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + +.align PAGE_MAX_SHIFT +ARM_FUNC_START(ffi_closure_trampoline_table_page) +.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page + sub ip, #8 @ account for pc bias + ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP +.endr +ARM_FUNC_END(ffi_closure_trampoline_table_page) +#endif + +#else + +ARM_FUNC_START(ffi_arm_trampoline) +0: adr ip, 0b + ldr pc, 1f +1: .long 0 +ARM_FUNC_END(ffi_arm_trampoline) + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ +#endif /* __arm__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S new file mode 100644 index 0000000..5c99d02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/arm/sysv_msvc_arm32.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2019 Microsoft Corporation. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" +#include "ksarm.h" + + + ; 8 byte aligned AREA to support 8 byte aligned jump tables + MACRO + NESTED_ENTRY_FFI $FuncName, $AreaName, $ExceptHandler + + ; compute the function's labels + __DeriveFunctionLabels $FuncName + + ; determine the area we will put the function into +__FuncArea SETS "|.text|" + IF "$AreaName" != "" +__FuncArea SETS "$AreaName" + ENDIF + + ; set up the exception handler itself +__FuncExceptionHandler SETS "" + IF "$ExceptHandler" != "" +__FuncExceptionHandler SETS "|$ExceptHandler|" + ENDIF + + ; switch to the specified area, jump tables require 8 byte alignment + AREA $__FuncArea,CODE,CODEALIGN,ALIGN=3,READONLY + + ; export the function name + __ExportProc $FuncName + + ; flush any pending literal pool stuff + ROUT + + ; reset the state of the unwind code tracking + __ResetUnwindState + + MEND + +; MACRO +; TABLE_ENTRY $Type, $Table +;$Type_$Table +; MEND + +#define E(index,table) return_##index##_##table + + ; r0: stack + ; r1: frame + ; r2: fn + ; r3: vfp_used + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_VFP + cmp r3, #3 ; load only d0 if possible + vldrle d0, [r0] + vldmgt r0, {d0-d7} + add r0, r0, #64 ; discard the vfp register args + b ffi_call_SYSV + NESTED_END ffi_call_VFP_fake + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_call_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_call_SYSV + stm r1, {fp, lr} + mov fp, r1 + + mov sp, r0 ; install the stack pointer + mov lr, r2 ; move the fn pointer out of the way + ldr ip, [fp, #16] ; install the static chain + ldmia sp!, {r0-r3} ; move first 4 parameters in registers. + blx lr ; call fn + + ; Load r2 with the pointer to storage for the return value + ; Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + ; Deallocate the stack with the arguments. + mov sp, fp + + ; Store values stored in registers. + ALIGN 8 + lsl r3, #3 + add r3, r3, pc + add r3, #8 + mov pc, r3 + + +E(ARM_TYPE_VFP_S, ffi_call) + ALIGN 8 + vstr s0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_D, ffi_call) + ALIGN 8 + vstr d0, [r2] + pop {fp,pc} +E(ARM_TYPE_VFP_N, ffi_call) + ALIGN 8 + vstm r2, {d0-d3} + pop {fp,pc} +E(ARM_TYPE_INT64, ffi_call) + ALIGN 8 + str r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_call) + ALIGN 8 + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID, ffi_call) + ALIGN 8 + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT, ffi_call) + ALIGN 8 + cmp r3, #ARM_TYPE_STRUCT + pop {fp,pc} + NESTED_END ffi_call_SYSV_fake + + IMPORT |ffi_closure_inner_SYSV| + /* + int ffi_closure_inner_SYSV + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_SYSV + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_SYSV_0 + NESTED_END ffi_go_closure_SYSV + + ; r3: ffi_closure + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + NESTED_ENTRY_FFI ffi_closure_SYSV_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + ALTERNATE_ENTRY ffi_closure_SYSV + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; ffi_closure->cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; ffi_closure->fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; ffi_closure->user_data + + ALTERNATE_ENTRY ffi_go_closure_SYSV_0 + add ip, sp, #16 ; compute entry sp + + sub sp, sp, #64+32 ; allocate frame parameter (sizeof(vfp_space) = 64, sizeof(result) = 32) + mov r3, sp ; set frame parameter + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_SYSV ; call the Python closure + + ; Load values returned in registers. + add r2, sp, #64+8 ; address of closure_frame->result + bl ffi_closure_ret ; move result to correct register or memory for type + + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_SYSV_fake + + IMPORT |ffi_closure_inner_VFP| + /* + int ffi_closure_inner_VFP + ( + cif, ; r0 + fun, ; r1 + user_data, ; r2 + frame ; r3 + ) + */ + + NESTED_ENTRY_FFI ffi_go_closure_VFP + stmdb sp!, {r0-r3} ; save argument regs + ldr r0, [ip, #4] ; load cif + ldr r1, [ip, #8] ; load fun + mov r2, ip ; load user_data + b ffi_go_closure_VFP_0 + NESTED_END ffi_go_closure_VFP + + ; fake entry point exists only to generate exists only to + ; generate .pdata for exception unwinding + ; r3: closure + NESTED_ENTRY_FFI ffi_closure_VFP_fake + PROLOG_PUSH {r11, lr} ; save fp and lr for unwind + + ALTERNATE_ENTRY ffi_closure_VFP + ldmfd sp!, {ip,r0} ; restore fp (r0 is used for stack alignment) + stmdb sp!, {r0-r3} ; save argument regs + + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] ; load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] ; load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] ; load user_data + + ALTERNATE_ENTRY ffi_go_closure_VFP_0 + add ip, sp, #16 ; compute entry sp + sub sp, sp, #32 ; save space for closure_frame->result + vstmdb sp!, {d0-d7} ; push closure_frame->vfp_space + + mov r3, sp ; save closure_frame + stmdb sp!, {ip,lr} + + bl ffi_closure_inner_VFP + + ; Load values returned in registers. + add r2, sp, #64+8 ; load result + bl ffi_closure_ret + ldmfd sp!, {ip,lr} + mov sp, ip ; restore stack pointer + mov pc, lr + NESTED_END ffi_closure_VFP_fake + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + + NESTED_ENTRY_FFI ffi_closure_ret + stmdb sp!, {fp,lr} + + ALIGN 8 + lsl r0, #3 + add r0, r0, pc + add r0, #8 + mov pc, r0 + +E(ARM_TYPE_VFP_S, ffi_closure) + ALIGN 8 + vldr s0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_D, ffi_closure) + ALIGN 8 + vldr d0, [r2] + b call_epilogue +E(ARM_TYPE_VFP_N, ffi_closure) + ALIGN 8 + vldm r2, {d0-d3} + b call_epilogue +E(ARM_TYPE_INT64, ffi_closure) + ALIGN 8 + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT, ffi_closure) + ALIGN 8 + ldr r0, [r2] + b call_epilogue +E(ARM_TYPE_VOID, ffi_closure) + ALIGN 8 + b call_epilogue + nop +E(ARM_TYPE_STRUCT, ffi_closure) + ALIGN 8 + b call_epilogue +call_epilogue + ldmfd sp!, {fp,pc} + NESTED_END ffi_closure_ret + + AREA |.trampoline|, DATA, THUMB, READONLY + EXPORT |ffi_arm_trampoline| +|ffi_arm_trampoline| DATA +thisproc adr ip, thisproc + stmdb sp!, {ip, r0} + ldr pc, [pc, #0] + DCD 0 + ;ENDP + + END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffi.c new file mode 100644 index 0000000..3d43397 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffi.c @@ -0,0 +1,423 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include + +/* #define DEBUG */ + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned int, unsigned int, unsigned int*, unsigned int, + void (*fn)(void)); +extern void ffi_closure_SYSV (ffi_closure *); + +unsigned int pass_struct_on_stack(ffi_type *type) +{ + if(type->type != FFI_TYPE_STRUCT) + return 0; + + if(type->alignment < type->size && + !(type->size == 4 || type->size == 8) && + !(type->size == 8 && type->alignment >= 4)) + return 1; + + if(type->size == 3 || type->size == 5 || type->size == 6 || + type->size == 7) + return 1; + + return 0; +} + +/* ffi_prep_args is called by the assembly routine once stack space + * has been allocated for the function's arguments + * + * This is annoyingly complex since we need to keep track of used + * registers. + */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + char *reg_base = stack; + char *stack_base = stack + 20; + unsigned int stack_offset = 0; + unsigned int reg_mask = 0; + + p_argv = ecif->avalue; + + /* If cif->flags is struct then we know it's not passed in registers */ + if(ecif->cif->flags == FFI_TYPE_STRUCT) + { + *(void**)reg_base = ecif->rvalue; + reg_mask |= 1; + } + + for(i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + int type = (*p_arg)->type; + char *addr = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + else if(z == sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + addr = reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + addr = reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + addr = reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!addr) + { + addr = stack_base + stack_offset; + stack_offset += z; + } + + if(type == FFI_TYPE_STRUCT && (*p_arg)->elements[1] == NULL) + type = (*p_arg)->elements[0]->type; + + switch(type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8 *)(*p_argv); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8 *)(*p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16 *)(*p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16 *)(*p_argv); + break; + default: + memcpy(addr, *p_argv, z); + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < 5; i++) + { + if((reg_mask & (1 << i)) == 0) + printf("r%d: (unused)\n", 12 - i); + else + printf("r%d: 0x%08x\n", 12 - i, ((unsigned int*)reg_base)[i]); + } + + for(i = 0; i < stack_offset / 4; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack_base)[i]); + } +#endif +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Round the stack up to a multiple of 8 bytes. This isn't needed + * everywhere, but it is on some platforms, and it doesn't harm + * anything when it isn't needed. */ + cif->bytes = (cif->bytes + 7) & ~7; + + /* Flag to indicate that he return value is in fact a struct */ + cif->rstruct_flag = 0; + + /* Set the return type flag */ + switch(cif->rtype->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + cif->flags = (unsigned)FFI_TYPE_UINT8; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = (unsigned)FFI_TYPE_UINT16; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + cif->flags = (unsigned)FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned)FFI_TYPE_UINT64; + break; + case FFI_TYPE_STRUCT: + cif->rstruct_flag = 1; + if(!pass_struct_on_stack(cif->rtype)) + { + if(cif->rtype->size <= 1) + cif->flags = (unsigned)FFI_TYPE_UINT8; + else if(cif->rtype->size <= 2) + cif->flags = (unsigned)FFI_TYPE_UINT16; + else if(cif->rtype->size <= 4) + cif->flags = (unsigned)FFI_TYPE_UINT32; + else if(cif->rtype->size <= 8) + cif->flags = (unsigned)FFI_TYPE_UINT64; + else + cif->flags = (unsigned)cif->rtype->type; + } + else + cif->flags = (unsigned)cif->rtype->type; + break; + default: + cif->flags = (unsigned)cif->rtype->type; + break; + } + + return FFI_OK; +} + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + unsigned int size = 0, i = 0; + ffi_type **p_arg; + + ecif.cif = cif; + ecif.avalue = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + /* If the return value is a struct and we don't have a return value + * address then we need to make one */ + + /* If cif->flags is struct then it's not suitable for registers */ + if((rvalue == NULL) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch(cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, size, cif->flags, + ecif.rvalue, cif->rstruct_flag, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif) +{ + register unsigned int i, reg_mask = 0; + register void **p_argv; + register ffi_type **p_arg; + register char *reg_base = stack; + register char *stack_base = stack + 20; + register unsigned int stack_offset = 0; + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs + 7; i++) + { + printf("sp+%d: 0x%08x\n", i*4, ((unsigned int*)stack)[i]); + } +#endif + + /* If cif->flags is struct then we know it's not passed in registers */ + if(cif->flags == FFI_TYPE_STRUCT) + { + *rvalue = *(void **)reg_base; + reg_mask |= 1; + } + + p_argv = avalue; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + { + size_t z = (*p_arg)->size; + int alignment = (*p_arg)->alignment; + + *p_argv = 0; + + if(z % 4 != 0) + z += (4 - z % 4); + + if(reg_mask != 0x1f) + { + if(pass_struct_on_stack(*p_arg)) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + else if(z <= sizeof(int)) + { + char index = 0; + + while((reg_mask >> index) & 1) + index++; + + *p_argv = (void*)reg_base + (index * 4); + reg_mask |= (1 << index); + } + else if(z == 2 * sizeof(int)) + { + if(!((reg_mask >> 1) & 1)) + { + *p_argv = (void*)reg_base + 4; + reg_mask |= (3 << 1); + } + else if(!((reg_mask >> 3) & 1)) + { + *p_argv = (void*)reg_base + 12; + reg_mask |= (3 << 3); + } + } + } + + if(!*p_argv) + { + *p_argv = (void*)stack_base + stack_offset; + stack_offset += z; + } + + if((*p_arg)->type != FFI_TYPE_STRUCT || + (*p_arg)->elements[1] == NULL) + { + if(alignment == 1) + **(unsigned int**)p_argv <<= 24; + else if(alignment == 2) + **(unsigned int**)p_argv <<= 16; + } + + p_argv++; + } + +#ifdef DEBUG + /* Debugging */ + for(i = 0; i < cif->nargs; i++) + { + printf("sp+%d: 0x%08x\n", i*4, *(((unsigned int**)avalue)[i])); + } +#endif +} + +/* This function is jumped to by the trampoline */ + +unsigned int ffi_closure_SYSV_inner(ffi_closure *closure, void **respp, + void *args) +{ + ffi_cif *cif; + void **arg_area; + unsigned int i, size = 0; + ffi_type **p_arg; + + cif = closure->cif; + + for(i = 0, p_arg = cif->arg_types; i < cif->nargs; i++, p_arg++) + size += (*p_arg)->size + (4 - (*p_arg)->size % 4); + + arg_area = (void **)alloca(size); + + /* this call will initialize ARG_AREA, such that each element in that + * array points to the corresponding value on the stack; and if the + * function returns a structure, it will re-set RESP to point to the + * structure return address. */ + + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif); + + (closure->fun)(cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status ffi_prep_closure_loc(ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + unsigned char *__tramp = (unsigned char*)(&closure->tramp[0]); + unsigned int __fun = (unsigned int)(&ffi_closure_SYSV); + unsigned int __ctx = (unsigned int)(codeloc); + unsigned int __rstruct_flag = (unsigned int)(cif->rstruct_flag); + unsigned int __inner = (unsigned int)(&ffi_closure_SYSV_inner); + *(unsigned int*) &__tramp[0] = 0xebcd1f00; /* pushm r8-r12 */ + *(unsigned int*) &__tramp[4] = 0xfefc0010; /* ld.w r12, pc[16] */ + *(unsigned int*) &__tramp[8] = 0xfefb0010; /* ld.w r11, pc[16] */ + *(unsigned int*) &__tramp[12] = 0xfefa0010; /* ld.w r10, pc[16] */ + *(unsigned int*) &__tramp[16] = 0xfeff0010; /* ld.w pc, pc[16] */ + *(unsigned int*) &__tramp[20] = __ctx; + *(unsigned int*) &__tramp[24] = __rstruct_flag; + *(unsigned int*) &__tramp[28] = __inner; + *(unsigned int*) &__tramp[32] = __fun; + syscall(__NR_cacheflush, 0, (&__tramp[0]), 36); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffitarget.h new file mode 100644 index 0000000..d0c7586 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/ffitarget.h @@ -0,0 +1,55 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2009 Bradley Smith + Target configuration macros for AVR32. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS unsigned int rstruct_flag + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 36 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/sysv.S new file mode 100644 index 0000000..a984b3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/avr32/sysv.S @@ -0,0 +1,208 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2009 Bradley Smith + + AVR32 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + --------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* r12: ffi_prep_args + * r11: &ecif + * r10: size + * r9: cif->flags + * r8: ecif.rvalue + * sp+0: cif->rstruct_flag + * sp+4: fn */ + + .text + .align 1 + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + stm --sp, r0,r1,lr + stm --sp, r8-r12 + mov r0, sp + + /* Make room for all of the new args. */ + sub sp, r10 + /* Pad to make way for potential skipped registers */ + sub sp, 20 + + /* Call ffi_prep_args(stack, &ecif). */ + /* r11 already set */ + mov r1, r12 + mov r12, sp + icall r1 + + /* Save new argument size */ + mov r1, r12 + + /* Move first 5 parameters in registers. */ + ldm sp++, r8-r12 + + /* call (fn) (...). */ + ld.w r1, r0[36] + icall r1 + + /* Remove the space we pushed for the args. */ + mov sp, r0 + + /* Load r1 with the rstruct flag. */ + ld.w r1, sp[32] + + /* Load r9 with the return type code. */ + ld.w r9, sp[12] + + /* Load r8 with the return value pointer. */ + ld.w r8, sp[16] + + /* If the return value pointer is NULL, assume no return value. */ + cp.w r8, 0 + breq .Lend + + /* Check if return type is actually a struct */ + cp.w r1, 0 + breq 1f + + /* Return 8bit */ + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore8 + + /* Return 16bit */ + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore16 + +1: + /* Return 32bit */ + cp.w r9, FFI_TYPE_UINT32 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT16 + breq .Lstore32 + cp.w r9, FFI_TYPE_UINT8 + breq .Lstore32 + + /* Return 64bit */ + cp.w r9, FFI_TYPE_UINT64 + breq .Lstore64 + + /* Didn't match anything */ + bral .Lend + +.Lstore64: + st.w r8[0], r11 + st.w r8[4], r10 + bral .Lend + +.Lstore32: + st.w r8[0], r12 + bral .Lend + +.Lstore16: + st.h r8[0], r12 + bral .Lend + +.Lstore8: + st.b r8[0], r12 + bral .Lend + +.Lend: + sub sp, -20 + ldm sp++, r0,r1,pc + + .size ffi_call_SYSV, . - ffi_call_SYSV + + + /* r12: __ctx + * r11: __rstruct_flag + * r10: __inner */ + + .align 1 + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + stm --sp, r0,lr + mov r0, r11 + mov r8, r10 + sub r10, sp, -8 + sub sp, 12 + st.w sp[8], sp + sub r11, sp, -8 + icall r8 + + /* Check if return type is actually a struct */ + cp.w r0, 0 + breq 1f + + /* Return 8bit */ + cp.w r12, FFI_TYPE_UINT8 + breq .Lget8 + + /* Return 16bit */ + cp.w r12, FFI_TYPE_UINT16 + breq .Lget16 + +1: + /* Return 32bit */ + cp.w r12, FFI_TYPE_UINT32 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT16 + breq .Lget32 + cp.w r12, FFI_TYPE_UINT8 + breq .Lget32 + + /* Return 64bit */ + cp.w r12, FFI_TYPE_UINT64 + breq .Lget64 + + /* Didn't match anything */ + bral .Lclend + +.Lget64: + ld.w r11, sp[0] + ld.w r10, sp[4] + bral .Lclend + +.Lget32: + ld.w r12, sp[0] + bral .Lclend + +.Lget16: + ld.uh r12, sp[0] + bral .Lclend + +.Lget8: + ld.ub r12, sp[0] + bral .Lclend + +.Lclend: + sub sp, -12 + ldm sp++, r0,lr + sub sp, -20 + mov pc, lr + + .size ffi_closure_SYSV, . - ffi_closure_SYSV + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffi.c new file mode 100644 index 0000000..22a2acd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffi.c @@ -0,0 +1,196 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#include +#include + +#include +#include + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 3 + +/* + * Return types + */ +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 + +/*====================================================================*/ +/* PROTOTYPE * + /*====================================================================*/ +void ffi_prep_args(unsigned char *, extended_cif *); + +/*====================================================================*/ +/* Externals */ +/* (Assembly) */ +/*====================================================================*/ + +extern void ffi_call_SYSV(unsigned, extended_cif *, void(*)(unsigned char *, extended_cif *), unsigned, void *, void(*fn)(void)); + +/*====================================================================*/ +/* Implementation */ +/* */ +/*====================================================================*/ + + +/* + * This function calculates the return type (size) based on type. + */ + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* --------------------------------------* + * Return handling * + * --------------------------------------*/ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + cif->flags = FFIBFIN_RET_VOID; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + cif->flags = FFIBFIN_RET_HALFWORD; + break; + case FFI_TYPE_UINT8: + cif->flags = FFIBFIN_RET_BYTE; + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_SINT8: + cif->flags = FFIBFIN_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFIBFIN_RET_INT64; + break; + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4){ + cif->flags = FFIBFIN_RET_INT32; + }else if (cif->rtype->size == 8){ + cif->flags = FFIBFIN_RET_INT64; + }else{ + //it will return via a hidden pointer in P0 + cif->flags = FFIBFIN_RET_VOID; + } + break; + default: + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* + * This will prepare the arguments and will call the assembly routine + * cif = the call interface + * fn = the function to be called + * rvalue = the return value + * avalue = the arguments + */ +void ffi_call(ffi_cif *cif, void(*fn)(void), void *rvalue, void **avalue) +{ + int ret_type = cif->flags; + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(cif->bytes, &ecif, ffi_prep_args, ret_type, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +/* +* This function prepares the parameters (copies them from the ecif to the stack) +* to call the function (ffi_prep_args is called by the assembly routine in file +* sysv.S, which also calls the actual function) +*/ +void ffi_prep_args(unsigned char *stack, extended_cif *ecif) +{ + register unsigned int i = 0; + void **p_argv; + unsigned char *argp; + ffi_type **p_arg; + argp = stack; + p_argv = ecif->avalue; + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) { + size_t z; + z = (*p_arg)->size; + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) { + case FFI_TYPE_SINT8: { + signed char v = *(SINT8 *)(* p_argv); + signed int t = v; + *(signed int *) argp = t; + } + break; + case FFI_TYPE_UINT8: { + unsigned char v = *(UINT8 *)(* p_argv); + unsigned int t = v; + *(unsigned int *) argp = t; + } + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) * (SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) * (UINT16 *)(* p_argv); + break; + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + break; + } + } else if (z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int) * (UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + } +} + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffitarget.h new file mode 100644 index 0000000..2175c01 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/ffitarget.h @@ -0,0 +1,43 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012 Alexandre K. I. de Mendonca + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/sysv.S new file mode 100644 index 0000000..f4278be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/bfin/sysv.S @@ -0,0 +1,179 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012 Alexandre K. I. de Mendonca , + Paulo Pizarro + + Blackfin Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text +.align 4 + + /* + There is a "feature" in the bfin toolchain that it puts a _ before function names + that's why the function here it's called _ffi_call_SYSV and not ffi_call_SYSV + */ + .global _ffi_call_SYSV; + .type _ffi_call_SYSV, STT_FUNC; + .func ffi_call_SYSV + + /* + cif->bytes = R0 (fp+8) + &ecif = R1 (fp+12) + ffi_prep_args = R2 (fp+16) + ret_type = stack (fp+20) + ecif.rvalue = stack (fp+24) + fn = stack (fp+28) + got (fp+32) + + There is room for improvement here (we can use temporary registers + instead of saving the values in the memory) + REGS: + P5 => Stack pointer (function arguments) + R5 => cif->bytes + R4 => ret->type + + FP-20 = P3 + FP-16 = SP (parameters area) + FP-12 = SP (temp) + FP-08 = function return part 1 [R0] + FP-04 = function return part 2 [R1] + */ + +_ffi_call_SYSV: +.prologue: + LINK 20; + [FP-20] = P3; + [FP+8] = R0; + [FP+12] = R1; + [FP+16] = R2; + +.allocate_stack: + //alocate cif->bytes into the stack + R1 = [FP+8]; + R0 = SP; + R0 = R0 - R1; + R1 = 4; + R0 = R0 - R1; + [FP-12] = SP; + SP = R0; + [FP-16] = SP; + +.call_prep_args: + //get the addr of prep_args + P0 = [P3 + _ffi_prep_args@FUNCDESC_GOT17M4]; + P1 = [P0]; + P3 = [P0+4]; + R0 = [FP-16];//SP (parameter area) + R1 = [FP+12];//ecif + call (P1); + +.call_user_function: + //ajust SP so as to allow the user function access the parameters on the stack + SP = [FP-16]; //point to function parameters + R0 = [SP]; + R1 = [SP+4]; + R2 = [SP+8]; + //load user function address + P0 = FP; + P0 +=28; + P1 = [P0]; + P1 = [P1]; + P3 = [P0+4]; + /* + For functions returning aggregate values (struct) occupying more than 8 bytes, + the caller allocates the return value object on the stack and the address + of this object is passed to the callee as a hidden argument in register P0. + */ + P0 = [FP+24]; + + call (P1); + SP = [FP-12]; +.compute_return: + P2 = [FP-20]; + [FP-8] = R0; + [FP-4] = R1; + + R0 = [FP+20]; + R1 = R0 << 2; + + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R1 = [P2]; + + P2 = [FP+-20]; + R0 = [P2+.rettable@GOT17M4]; + R0 = R1 + R0; + P2 = R0; + R0 = [FP-8]; + R1 = [FP-4]; + jump (P2); + +/* +#define FFIBFIN_RET_VOID 0 +#define FFIBFIN_RET_BYTE 1 +#define FFIBFIN_RET_HALFWORD 2 +#define FFIBFIN_RET_INT64 3 +#define FFIBFIN_RET_INT32 4 +*/ +.align 4 +.align 4 +.rettable: + .dd .epilogue - .rettable + .dd .rbyte - .rettable; + .dd .rhalfword - .rettable; + .dd .rint64 - .rettable; + .dd .rint32 - .rettable; + +.rbyte: + P0 = [FP+24]; + R0 = R0.B (Z); + [P0] = R0; + JUMP .epilogue +.rhalfword: + P0 = [FP+24]; + R0 = R0.L; + [P0] = R0; + JUMP .epilogue +.rint64: + P0 = [FP+24];// &rvalue + [P0] = R0; + [P0+4] = R1; + JUMP .epilogue +.rint32: + P0 = [FP+24]; + [P0] = R0; +.epilogue: + R0 = [FP+8]; + R1 = [FP+12]; + R2 = [FP+16]; + P3 = [FP-20]; + UNLINK; + RTS; + +.size _ffi_call_SYSV,.-_ffi_call_SYSV; +.endfunc diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/closures.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/closures.c new file mode 100644 index 0000000..dfc2f68 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/closures.c @@ -0,0 +1,1021 @@ +/* ----------------------------------------------------------------------- + closures.c - Copyright (c) 2019 Anthony Green + Copyright (c) 2007, 2009, 2010 Red Hat, Inc. + Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + Code to allocate and deallocate memory for closures. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined __linux__ && !defined _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif + +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if __NetBSD_Version__ - 0 >= 799007200 +/* NetBSD with PROT_MPROTECT */ +#include + +#include +#include +#ifdef HAVE_SYS_MEMFD_H +#include +#endif + +static const size_t overhead = + (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ? + sizeof(max_align_t) + : sizeof(void *) + sizeof(size_t); + +#define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d))) + +void * +ffi_closure_alloc (size_t size, void **code) +{ + static size_t page_size; + size_t rounded_size; + void *codeseg, *dataseg; + int prot; + + /* Expect that PAX mprotect is active and a separate code mapping is necessary. */ + if (!code) + return NULL; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */ + rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1); + + /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */ + prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC); + dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0); + if (dataseg == MAP_FAILED) + return NULL; + + /* Create secondary mapping and switch it to RX. */ + codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP); + if (codeseg == MAP_FAILED) { + munmap(dataseg, rounded_size); + return NULL; + } + if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) { + munmap(codeseg, rounded_size); + munmap(dataseg, rounded_size); + return NULL; + } + + /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */ + memcpy(dataseg, &rounded_size, sizeof(rounded_size)); + memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *)); + *code = ADD_TO_POINTER(codeseg, overhead); + return ADD_TO_POINTER(dataseg, overhead); +} + +void +ffi_closure_free (void *ptr) +{ + void *codeseg, *dataseg; + size_t rounded_size; + + dataseg = ADD_TO_POINTER(ptr, -overhead); + memcpy(&rounded_size, dataseg, sizeof(rounded_size)); + memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); + munmap(dataseg, rounded_size); + munmap(codeseg, rounded_size); +} +#else /* !NetBSD with PROT_MPROTECT */ + +#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE +# if __linux__ && !defined(__ANDROID__) +/* This macro indicates it may be forbidden to map anonymous memory + with both write and execute permission. Code compiled when this + option is defined will attempt to map such pages once, but if it + fails, it falls back to creating a temporary file in a writable and + executable filesystem and mapping pages from it into separate + locations in the virtual memory space, one location writable and + another executable. */ +# define FFI_MMAP_EXEC_WRIT 1 +# define HAVE_MNTENT 1 +# endif +# if defined(_WIN32) || defined(__OS2__) +/* Windows systems may have Data Execution Protection (DEP) enabled, + which requires the use of VirtualMalloc/VirtualFree to alloc/free + executable memory. */ +# define FFI_MMAP_EXEC_WRIT 1 +# endif +#endif + +#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX +# if defined(__linux__) && !defined(__ANDROID__) +/* When defined to 1 check for SELinux and if SELinux is active, + don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that + might cause audit messages. */ +# define FFI_MMAP_EXEC_SELINUX 1 +# endif +#endif + +#if FFI_CLOSURES + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ + +#include +#include +#ifdef HAVE_PTRAUTH +#include +#endif +#include +#include + +extern void *ffi_closure_trampoline_table_page; + +typedef struct ffi_trampoline_table ffi_trampoline_table; +typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry; + +struct ffi_trampoline_table +{ + /* contiguous writable and executable pages */ + vm_address_t config_page; + vm_address_t trampoline_page; + + /* free list tracking */ + uint16_t free_count; + ffi_trampoline_table_entry *free_list; + ffi_trampoline_table_entry *free_list_pool; + + ffi_trampoline_table *prev; + ffi_trampoline_table *next; +}; + +struct ffi_trampoline_table_entry +{ + void *(*trampoline) (void); + ffi_trampoline_table_entry *next; +}; + +/* Total number of trampolines that fit in one trampoline table */ +#define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE) + +static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER; +static ffi_trampoline_table *ffi_trampoline_tables = NULL; + +static ffi_trampoline_table * +ffi_trampoline_table_alloc (void) +{ + ffi_trampoline_table *table; + vm_address_t config_page; + vm_address_t trampoline_page; + vm_address_t trampoline_page_template; + vm_prot_t cur_prot; + vm_prot_t max_prot; + kern_return_t kt; + uint16_t i; + + /* Allocate two pages -- a config page and a placeholder page */ + config_page = 0x0; + kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, + VM_FLAGS_ANYWHERE); + if (kt != KERN_SUCCESS) + return NULL; + + /* Remap the trampoline table on top of the placeholder page */ + trampoline_page = config_page + PAGE_MAX_SIZE; + trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; +#ifdef __arm__ + /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ + trampoline_page_template &= ~1UL; +#endif + kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, + VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, + FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); + if (kt != KERN_SUCCESS) + { + vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); + return NULL; + } + + /* We have valid trampoline and config pages */ + table = calloc (1, sizeof (ffi_trampoline_table)); + table->free_count = FFI_TRAMPOLINE_COUNT; + table->config_page = config_page; + table->trampoline_page = trampoline_page; + + /* Create and initialize the free list */ + table->free_list_pool = + calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); + + for (i = 0; i < table->free_count; i++) + { + ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; + entry->trampoline = + (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); + + if (i < table->free_count - 1) + entry->next = &table->free_list_pool[i + 1]; + } + + table->free_list = table->free_list_pool; + + return table; +} + +static void +ffi_trampoline_table_free (ffi_trampoline_table *table) +{ + /* Remove from the list */ + if (table->prev != NULL) + table->prev->next = table->next; + + if (table->next != NULL) + table->next->prev = table->prev; + + /* Deallocate pages */ + vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2); + + /* Deallocate free list */ + free (table->free_list_pool); + free (table); +} + +void * +ffi_closure_alloc (size_t size, void **code) +{ + /* Create the closure */ + ffi_closure *closure = malloc (size); + if (closure == NULL) + return NULL; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Check for an active trampoline table with available entries. */ + ffi_trampoline_table *table = ffi_trampoline_tables; + if (table == NULL || table->free_list == NULL) + { + table = ffi_trampoline_table_alloc (); + if (table == NULL) + { + pthread_mutex_unlock (&ffi_trampoline_lock); + free (closure); + return NULL; + } + + /* Insert the new table at the top of the list */ + table->next = ffi_trampoline_tables; + if (table->next != NULL) + table->next->prev = table; + + ffi_trampoline_tables = table; + } + + /* Claim the free entry */ + ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list; + ffi_trampoline_tables->free_list = entry->next; + ffi_trampoline_tables->free_count--; + entry->next = NULL; + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Initialize the return values */ + *code = entry->trampoline; +#ifdef HAVE_PTRAUTH + *code = ptrauth_sign_unauthenticated (*code, ptrauth_key_asia, 0); +#endif + closure->trampoline_table = table; + closure->trampoline_table_entry = entry; + + return closure; +} + +void +ffi_closure_free (void *ptr) +{ + ffi_closure *closure = ptr; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Fetch the table and entry references */ + ffi_trampoline_table *table = closure->trampoline_table; + ffi_trampoline_table_entry *entry = closure->trampoline_table_entry; + + /* Return the entry to the free list */ + entry->next = table->free_list; + table->free_list = entry; + table->free_count++; + + /* If all trampolines within this table are free, and at least one other table exists, deallocate + * the table */ + if (table->free_count == FFI_TRAMPOLINE_COUNT + && ffi_trampoline_tables != table) + { + ffi_trampoline_table_free (table); + } + else if (ffi_trampoline_tables != table) + { + /* Otherwise, bump this table to the top of the list */ + table->prev = NULL; + table->next = ffi_trampoline_tables; + if (ffi_trampoline_tables != NULL) + ffi_trampoline_tables->prev = table; + + ffi_trampoline_tables = table; + } + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Free the closure */ + free (closure); +} + +#endif + +// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations. + +#elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */ + +#define USE_LOCKS 1 +#define USE_DL_PREFIX 1 +#ifdef __GNUC__ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 1 +#endif +#endif + +/* We need to use mmap, not sbrk. */ +#define HAVE_MORECORE 0 + +/* We could, in theory, support mremap, but it wouldn't buy us anything. */ +#define HAVE_MREMAP 0 + +/* We have no use for this, so save some code and data. */ +#define NO_MALLINFO 1 + +/* We need all allocations to be in regular segments, otherwise we + lose track of the corresponding code address. */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + +/* Don't allocate more than a page unless needed. */ +#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize) + +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#if !defined(_WIN32) +#ifdef HAVE_MNTENT +#include +#endif /* HAVE_MNTENT */ +#include +#include + +/* We don't want sys/mman.h to be included after we redefine mmap and + dlmunmap. */ +#include +#define LACKS_SYS_MMAN_H 1 + +#if FFI_MMAP_EXEC_SELINUX +#include +#include + +static int selinux_enabled = -1; + +static int +selinux_enabled_check (void) +{ + struct statfs sfs; + FILE *f; + char *buf = NULL; + size_t len = 0; + + if (statfs ("/selinux", &sfs) >= 0 + && (unsigned int) sfs.f_type == 0xf97cff8cU) + return 1; + f = fopen ("/proc/mounts", "r"); + if (f == NULL) + return 0; + while (getline (&buf, &len, f) >= 0) + { + char *p = strchr (buf, ' '); + if (p == NULL) + break; + p = strchr (p + 1, ' '); + if (p == NULL) + break; + if (strncmp (p + 1, "selinuxfs ", 10) == 0) + { + free (buf); + fclose (f); + return 1; + } + } + free (buf); + fclose (f); + return 0; +} + +#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \ + : (selinux_enabled = selinux_enabled_check ())) + +#else + +#define is_selinux_enabled() 0 + +#endif /* !FFI_MMAP_EXEC_SELINUX */ + +/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */ +#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX +#include + +static int emutramp_enabled = -1; + +static int +emutramp_enabled_check (void) +{ + char *buf = NULL; + size_t len = 0; + FILE *f; + int ret; + f = fopen ("/proc/self/status", "r"); + if (f == NULL) + return 0; + ret = 0; + + while (getline (&buf, &len, f) != -1) + if (!strncmp (buf, "PaX:", 4)) + { + char emutramp; + if (sscanf (buf, "%*s %*c%c", &emutramp) == 1) + ret = (emutramp == 'E'); + break; + } + free (buf); + fclose (f); + return ret; +} + +#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \ + : (emutramp_enabled = emutramp_enabled_check ())) +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +#elif defined (__CYGWIN__) || defined(__INTERIX) + +#include + +/* Cygwin is Linux-like, but not quite that Linux-like. */ +#define is_selinux_enabled() 0 + +#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */ + +#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX +#define is_emutramp_enabled() 0 +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Declare all functions defined in dlmalloc.c as static. */ +static void *dlmalloc(size_t); +static void dlfree(void*); +static void *dlcalloc(size_t, size_t) MAYBE_UNUSED; +static void *dlrealloc(void *, size_t) MAYBE_UNUSED; +static void *dlmemalign(size_t, size_t) MAYBE_UNUSED; +static void *dlvalloc(size_t) MAYBE_UNUSED; +static int dlmallopt(int, int) MAYBE_UNUSED; +static size_t dlmalloc_footprint(void) MAYBE_UNUSED; +static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED; +static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED; +static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED; +static void *dlpvalloc(size_t) MAYBE_UNUSED; +static int dlmalloc_trim(size_t) MAYBE_UNUSED; +static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED; +static void dlmalloc_stats(void) MAYBE_UNUSED; + +#if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) +/* Use these for mmap and munmap within dlmalloc.c. */ +static void *dlmmap(void *, size_t, int, int, int, off_t); +static int dlmunmap(void *, size_t); +#endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +#define mmap dlmmap +#define munmap dlmunmap + +#include "dlmalloc.c" + +#undef mmap +#undef munmap + +#if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) + +/* A mutex used to synchronize access to *exec* variables in this file. */ +static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* A file descriptor of a temporary file from which we'll map + executable pages. */ +static int execfd = -1; + +/* The amount of space already allocated from the temporary file. */ +static size_t execsize = 0; + +#ifdef HAVE_MEMFD_CREATE +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_memfd (const char *name) +{ + int fd; + fd = memfd_create (name, MFD_CLOEXEC); + return fd; +} +#endif + +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_name (char *name, int flags) +{ + int fd; + +#ifdef HAVE_MKOSTEMP + fd = mkostemp (name, flags); +#else + fd = mkstemp (name); +#endif + + if (fd != -1) + unlink (name); + + return fd; +} + +/* Open a temporary file in the named directory. */ +static int +open_temp_exec_file_dir (const char *dir) +{ + static const char suffix[] = "/ffiXXXXXX"; + int lendir, flags; + char *tempname; +#ifdef O_TMPFILE + int fd; +#endif + +#ifdef O_CLOEXEC + flags = O_CLOEXEC; +#else + flags = 0; +#endif + +#ifdef O_TMPFILE + fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700); + /* If the running system does not support the O_TMPFILE flag then retry without it. */ + if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) { + return fd; + } else { + errno = 0; + } +#endif + + lendir = (int) strlen (dir); + tempname = __builtin_alloca (lendir + sizeof (suffix)); + + if (!tempname) + return -1; + + memcpy (tempname, dir, lendir); + memcpy (tempname + lendir, suffix, sizeof (suffix)); + + return open_temp_exec_file_name (tempname, flags); +} + +/* Open a temporary file in the directory in the named environment + variable. */ +static int +open_temp_exec_file_env (const char *envvar) +{ + const char *value = getenv (envvar); + + if (!value) + return -1; + + return open_temp_exec_file_dir (value); +} + +#ifdef HAVE_MNTENT +/* Open a temporary file in an executable and writable mount point + listed in the mounts file. Subsequent calls with the same mounts + keep searching for mount points in the same file. Providing NULL + as the mounts file closes the file. */ +static int +open_temp_exec_file_mnt (const char *mounts) +{ + static const char *last_mounts; + static FILE *last_mntent; + + if (mounts != last_mounts) + { + if (last_mntent) + endmntent (last_mntent); + + last_mounts = mounts; + + if (mounts) + last_mntent = setmntent (mounts, "r"); + else + last_mntent = NULL; + } + + if (!last_mntent) + return -1; + + for (;;) + { + int fd; + struct mntent mnt; + char buf[MAXPATHLEN * 3]; + + if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL) + return -1; + + if (hasmntopt (&mnt, "ro") + || hasmntopt (&mnt, "noexec") + || access (mnt.mnt_dir, W_OK)) + continue; + + fd = open_temp_exec_file_dir (mnt.mnt_dir); + + if (fd != -1) + return fd; + } +} +#endif /* HAVE_MNTENT */ + +/* Instructions to look for a location to hold a temporary file that + can be mapped in for execution. */ +static struct +{ + int (*func)(const char *); + const char *arg; + int repeat; +} open_temp_exec_file_opts[] = { +#ifdef HAVE_MEMFD_CREATE + { open_temp_exec_file_memfd, "libffi", 0 }, +#endif + { open_temp_exec_file_env, "TMPDIR", 0 }, + { open_temp_exec_file_dir, "/tmp", 0 }, + { open_temp_exec_file_dir, "/var/tmp", 0 }, + { open_temp_exec_file_dir, "/dev/shm", 0 }, + { open_temp_exec_file_env, "HOME", 0 }, +#ifdef HAVE_MNTENT + { open_temp_exec_file_mnt, "/etc/mtab", 1 }, + { open_temp_exec_file_mnt, "/proc/mounts", 1 }, +#endif /* HAVE_MNTENT */ +}; + +/* Current index into open_temp_exec_file_opts. */ +static int open_temp_exec_file_opts_idx = 0; + +/* Reset a current multi-call func, then advances to the next entry. + If we're at the last, go back to the first and return nonzero, + otherwise return zero. */ +static int +open_temp_exec_file_opts_next (void) +{ + if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL); + + open_temp_exec_file_opts_idx++; + if (open_temp_exec_file_opts_idx + == (sizeof (open_temp_exec_file_opts) + / sizeof (*open_temp_exec_file_opts))) + { + open_temp_exec_file_opts_idx = 0; + return 1; + } + + return 0; +} + +/* Return a file descriptor of a temporary zero-sized file in a + writable and executable filesystem. */ +static int +open_temp_exec_file (void) +{ + int fd; + + do + { + fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func + (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg); + + if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat + || fd == -1) + { + if (open_temp_exec_file_opts_next ()) + break; + } + } + while (fd == -1); + + return fd; +} + +/* We need to allocate space in a file that will be backing a writable + mapping. Several problems exist with the usual approaches: + - fallocate() is Linux-only + - posix_fallocate() is not available on all platforms + - ftruncate() does not allocate space on filesystems with sparse files + Failure to allocate the space will cause SIGBUS to be thrown when + the mapping is subsequently written to. */ +static int +allocate_space (int fd, off_t offset, off_t len) +{ + static size_t page_size; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + unsigned char buf[page_size]; + memset (buf, 0, page_size); + + while (len > 0) + { + off_t to_write = (len < page_size) ? len : page_size; + if (write (fd, buf, to_write) < to_write) + return -1; + len -= to_write; + } + + return 0; +} + +/* Map in a chunk of memory from the temporary exec file into separate + locations in the virtual memory address space, one writable and one + executable. Returns the address of the writable portion, after + storing an offset to the corresponding executable portion at the + last word of the requested chunk. */ +static void * +dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset) +{ + void *ptr; + + if (execfd == -1) + { + open_temp_exec_file_opts_idx = 0; + retry_open: + execfd = open_temp_exec_file (); + if (execfd == -1) + return MFAIL; + } + + offset = execsize; + + if (allocate_space (execfd, offset, length)) + return MFAIL; + + flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS); + flags |= MAP_SHARED; + + ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC, + flags, execfd, offset); + if (ptr == MFAIL) + { + if (!offset) + { + close (execfd); + goto retry_open; + } + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + + return MFAIL; + } + else if (!offset + && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts_next (); + + start = mmap (start, length, prot, flags, execfd, offset); + + if (start == MFAIL) + { + munmap (ptr, length); + if (ftruncate (execfd, offset) != 0) + { + /* Fixme : Error logs can be added here. Returning an error for + * ftruncte() will not add any advantage as it is being + * validating in the error case. */ + } + return start; + } + + mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start; + + execsize += length; + + return start; +} + +/* Map in a writable and executable chunk of memory if possible. + Failing that, fall back to dlmmap_locked. */ +static void * +dlmmap (void *start, size_t length, int prot, + int flags, int fd, off_t offset) +{ + void *ptr; + + assert (start == NULL && length % malloc_getpagesize == 0 + && prot == (PROT_READ | PROT_WRITE) + && flags == (MAP_PRIVATE | MAP_ANONYMOUS) + && fd == -1 && offset == 0); + + if (execfd == -1 && is_emutramp_enabled ()) + { + ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset); + return ptr; + } + + if (execfd == -1 && !is_selinux_enabled ()) + { + ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset); + + if (ptr != MFAIL || (errno != EPERM && errno != EACCES)) + /* Cool, no need to mess with separate segments. */ + return ptr; + + /* If MREMAP_DUP is ever introduced and implemented, try mmap + with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with + MREMAP_DUP and prot at this point. */ + } + + if (execsize == 0 || execfd == -1) + { + pthread_mutex_lock (&open_temp_exec_file_mutex); + ptr = dlmmap_locked (start, length, prot, flags, offset); + pthread_mutex_unlock (&open_temp_exec_file_mutex); + + return ptr; + } + + return dlmmap_locked (start, length, prot, flags, offset); +} + +/* Release memory at the given address, as well as the corresponding + executable page if it's separate. */ +static int +dlmunmap (void *start, size_t length) +{ + /* We don't bother decreasing execsize or truncating the file, since + we can't quite tell whether we're unmapping the end of the file. + We don't expect frequent deallocation anyway. If we did, we + could locate pages in the file by writing to the pages being + deallocated and checking that the file contents change. + Yuck. */ + msegmentptr seg = segment_holding (gm, start); + void *code; + + if (seg && (code = add_segment_exec_offset (start, seg)) != start) + { + int ret = munmap (code, length); + if (ret) + return ret; + } + + return munmap (start, length); +} + +#if FFI_CLOSURE_FREE_CODE +/* Return segment holding given code address. */ +static msegmentptr +segment_holding_code (mstate m, char* addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= add_segment_exec_offset (sp->base, sp) + && addr < add_segment_exec_offset (sp->base, sp) + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} +#endif + +#endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +/* Allocate a chunk of memory with the given size. Returns a pointer + to the writable address, and sets *CODE to the executable + corresponding virtual address. */ +void * +ffi_closure_alloc (size_t size, void **code) +{ + void *ptr; + + if (!code) + return NULL; + + ptr = FFI_CLOSURE_PTR (dlmalloc (size)); + + if (ptr) + { + msegmentptr seg = segment_holding (gm, ptr); + + *code = add_segment_exec_offset (ptr, seg); + } + + return ptr; +} + +void * +ffi_data_to_code_pointer (void *data) +{ + msegmentptr seg = segment_holding (gm, data); + /* We expect closures to be allocated with ffi_closure_alloc(), in + which case seg will be non-NULL. However, some users take on the + burden of managing this memory themselves, in which case this + we'll just return data. */ + if (seg) + return add_segment_exec_offset (data, seg); + else + return data; +} + +/* Release a chunk of memory allocated with ffi_closure_alloc. If + FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the + writable or the executable address given. Otherwise, only the + writable address can be provided here. */ +void +ffi_closure_free (void *ptr) +{ +#if FFI_CLOSURE_FREE_CODE + msegmentptr seg = segment_holding_code (gm, ptr); + + if (seg) + ptr = sub_segment_exec_offset (ptr, seg); +#endif + + dlfree (FFI_RESTORE_PTR (ptr)); +} + +# else /* ! FFI_MMAP_EXEC_WRIT */ + +/* On many systems, memory returned by malloc is writable and + executable, so just use it. */ + +#include + +void * +ffi_closure_alloc (size_t size, void **code) +{ + if (!code) + return NULL; + + return *code = FFI_CLOSURE_PTR (malloc (size)); +} + +void +ffi_closure_free (void *ptr) +{ + free (FFI_RESTORE_PTR (ptr)); +} + +void * +ffi_data_to_code_pointer (void *data) +{ + return data; +} + +# endif /* ! FFI_MMAP_EXEC_WRIT */ +#endif /* FFI_CLOSURES */ + +#endif /* NetBSD with PROT_MPROTECT */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffi.c new file mode 100644 index 0000000..9011fde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffi.c @@ -0,0 +1,386 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998 Cygnus Solutions + Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + Copyright (C) 2007 Free Software Foundation, Inc. + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +static ffi_status +initialize_aggregate_packed_struct (ffi_type * arg) +{ + ffi_type **ptr; + + FFI_ASSERT (arg != NULL); + + FFI_ASSERT (arg->elements != NULL); + FFI_ASSERT (arg->size == 0); + FFI_ASSERT (arg->alignment == 0); + + ptr = &(arg->elements[0]); + + while ((*ptr) != NULL) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT (ffi_type_test ((*ptr))); + + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +int +ffi_prep_args (char *stack, extended_cif * ecif) +{ + unsigned int i; + unsigned int struct_count = 0; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); i--, p_arg++) + { + size_t z; + + switch ((*p_arg)->type) + { + case FFI_TYPE_STRUCT: + { + z = (*p_arg)->size; + if (z <= 4) + { + memcpy (argp, *p_argv, z); + z = 4; + } + else if (z <= 8) + { + memcpy (argp, *p_argv, z); + z = 8; + } + else + { + unsigned int uiLocOnStack; + z = sizeof (void *); + uiLocOnStack = 4 * ecif->cif->nargs + struct_count; + struct_count = struct_count + (*p_arg)->size; + *(unsigned int *) argp = + (unsigned int) (UINT32 *) (stack + uiLocOnStack); + memcpy ((stack + uiLocOnStack), *p_argv, (*p_arg)->size); + } + break; + } + default: + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) (*p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = + (unsigned int) *(UINT8 *) (*p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) (*p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = + (unsigned int) *(UINT16 *) (*p_argv); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else if (z == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) (*p_argv); + else + memcpy (argp, *p_argv, z); + break; + } + p_argv++; + argp += z; + } + + return (struct_count); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_core (ffi_cif * cif, + ffi_abi abi, unsigned int isvariadic, + unsigned int nfixedargs, unsigned int ntotalargs, + ffi_type * rtype, ffi_type ** atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT (cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + FFI_ASSERT (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI); + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; + + if ((cif->rtype->size == 0) + && (initialize_aggregate_packed_struct (cif->rtype) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (cif->rtype); + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + if (((*ptr)->size == 0) + && (initialize_aggregate_packed_struct ((*ptr)) != FFI_OK)) + return FFI_BAD_TYPEDEF; + + FFI_ASSERT_VALID_TYPE (*ptr); + + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN (bytes, (*ptr)->alignment); + if ((*ptr)->type == FFI_TYPE_STRUCT) + { + if ((*ptr)->size > 8) + { + bytes += (*ptr)->size; + bytes += sizeof (void *); + } + else + { + if ((*ptr)->size > 4) + bytes += 8; + else + bytes += 4; + } + } + else + bytes += STACK_ARG_SIZE ((*ptr)->size); + } + + cif->bytes = bytes; + + return ffi_prep_cif_machdep (cif); +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif * cif) +{ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV (int (*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, unsigned *, void (*fn) ()) + __attribute__ ((__visibility__ ("hidden"))); + +void +ffi_call (ffi_cif * cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT (0); + break; + } +} + +/* Because the following variables are not exported outside libffi, we + mark them hidden. */ + +/* Assembly code for the jump stub. */ +extern const char ffi_cris_trampoline_template[] + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + ffi_prep_closure_inner function. */ +extern const int ffi_cris_trampoline_fn_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* Offset into ffi_cris_trampoline_template of where to put the + closure data. */ +extern const int ffi_cris_trampoline_closure_offset + __attribute__ ((__visibility__ ("hidden"))); + +/* This function is sibling-called (jumped to) by the closure + trampoline. We get R10..R13 at PARAMS[0..3] and a copy of [SP] at + PARAMS[4] to simplify handling of a straddling parameter. A copy + of R9 is at PARAMS[5] and SP at PARAMS[6]. These parameters are + put at the appropriate place in CLOSURE which is then executed and + the return value is passed back to the caller. */ + +static unsigned long long +ffi_prep_closure_inner (void **params, ffi_closure* closure) +{ + char *register_args = (char *) params; + void *struct_ret = params[5]; + char *stack_args = params[6]; + char *ptr = register_args; + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + + /* Max room needed is number of arguments as 64-bit values. */ + void **avalue = alloca (closure->cif->nargs * sizeof(void *)); + int i; + int doing_regs; + long long llret = 0; + + /* Find the address of each argument. */ + for (i = 0, doing_regs = 1; i < cif->nargs; i++) + { + /* Types up to and including 8 bytes go by-value. */ + if (arg_types[i]->size <= 4) + { + avalue[i] = ptr; + ptr += 4; + } + else if (arg_types[i]->size <= 8) + { + avalue[i] = ptr; + ptr += 8; + } + else + { + FFI_ASSERT (arg_types[i]->type == FFI_TYPE_STRUCT); + + /* Passed by-reference, so copy the pointer. */ + avalue[i] = *(void **) ptr; + ptr += 4; + } + + /* If we've handled more arguments than fit in registers, start + looking at the those passed on the stack. Step over the + first one if we had a straddling parameter. */ + if (doing_regs && ptr >= register_args + 4*4) + { + ptr = stack_args + ((ptr > register_args + 4*4) ? 4 : 0); + doing_regs = 0; + } + } + + /* Invoke the closure. */ + (closure->fun) (cif, + + cif->rtype->type == FFI_TYPE_STRUCT + /* The caller allocated space for the return + structure, and passed a pointer to this space in + R9. */ + ? struct_ret + + /* We take advantage of being able to ignore that + the high part isn't set if the return value is + not in R10:R11, but in R10 only. */ + : (void *) &llret, + + avalue, closure->user_data); + + return llret; +} + +/* API function: Prepare the trampoline. */ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif *, void *, void **, void*), + void *user_data, + void *codeloc) +{ + void *innerfn = ffi_prep_closure_inner; + FFI_ASSERT (cif->abi == FFI_SYSV); + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + memcpy (closure->tramp, ffi_cris_trampoline_template, + FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE); + memcpy (closure->tramp + ffi_cris_trampoline_fn_offset, + &innerfn, sizeof (void *)); + memcpy (closure->tramp + ffi_cris_trampoline_closure_offset, + &codeloc, sizeof (void *)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffitarget.h new file mode 100644 index 0000000..b837e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for CRIS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE 36 +#define FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE (7*4) +#define FFI_TRAMPOLINE_SIZE \ + (FFI_CRIS_TRAMPOLINE_CODE_PART_SIZE + FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE) +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/sysv.S new file mode 100644 index 0000000..79abaee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/cris/sysv.S @@ -0,0 +1,215 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Simon Posnjak + Copyright (c) 2005 Axis Communications AB + + CRIS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#define CONCAT(x,y) x ## y +#define XCONCAT(x,y) CONCAT (x, y) +#define L(x) XCONCAT (__USER_LABEL_PREFIX__, x) + + .text + + ;; OK, when we get called we should have this (according to + ;; AXIS ETRAX 100LX Programmer's Manual chapter 6.3). + ;; + ;; R10: ffi_prep_args (func. pointer) + ;; R11: &ecif + ;; R12: cif->bytes + ;; R13: fig->flags + ;; sp+0: ecif.rvalue + ;; sp+4: fn (function pointer to the function that we need to call) + + .globl L(ffi_call_SYSV) + .type L(ffi_call_SYSV),@function + .hidden L(ffi_call_SYSV) + +L(ffi_call_SYSV): + ;; Save the regs to the stack. + push $srp + ;; Used for stack pointer saving. + push $r6 + ;; Used for function address pointer. + push $r7 + ;; Used for stack pointer saving. + push $r8 + ;; We save fig->flags to stack we will need them after we + ;; call The Function. + push $r13 + + ;; Saving current stack pointer. + move.d $sp,$r8 + move.d $sp,$r6 + + ;; Move address of ffi_prep_args to r13. + move.d $r10,$r13 + + ;; Make room on the stack for the args of fn. + sub.d $r12,$sp + + ;; Function void ffi_prep_args(char *stack, extended_cif *ecif) parameters are: + ;; r10 <-- stack pointer + ;; r11 <-- &ecif (already there) + move.d $sp,$r10 + + ;; Call the function. + jsr $r13 + + ;; Save the size of the structures which are passed on stack. + move.d $r10,$r7 + + ;; Move first four args in to r10..r13. + move.d [$sp+0],$r10 + move.d [$sp+4],$r11 + move.d [$sp+8],$r12 + move.d [$sp+12],$r13 + + ;; Adjust the stack and check if any parameters are given on stack. + addq 16,$sp + sub.d $r7,$r6 + cmp.d $sp,$r6 + + bpl go_on + nop + +go_on_no_params_on_stack: + move.d $r6,$sp + +go_on: + ;; Discover if we need to put rval address in to r9. + move.d [$r8+0],$r7 + cmpq FFI_TYPE_STRUCT,$r7 + bne call_now + nop + + ;; Move rval address to $r9. + move.d [$r8+20],$r9 + +call_now: + ;; Move address of The Function in to r7. + move.d [$r8+24],$r7 + + ;; Call The Function. + jsr $r7 + + ;; Reset stack. + move.d $r8,$sp + + ;; Load rval type (fig->flags) in to r13. + pop $r13 + + ;; Detect rval type. + cmpq FFI_TYPE_VOID,$r13 + beq epilogue + + cmpq FFI_TYPE_STRUCT,$r13 + beq epilogue + + cmpq FFI_TYPE_DOUBLE,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_UINT64,$r13 + beq return_double_or_longlong + + cmpq FFI_TYPE_SINT64,$r13 + beq return_double_or_longlong + nop + + ;; Just return the 32 bit value. + ba return + nop + +return_double_or_longlong: + ;; Load half of the rval to r10 and the other half to r11. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + addq 4,$r13 + move.d $r11,[$r13] + ba epilogue + nop + +return: + ;; Load the rval to r10. + move.d [$sp+16],$r13 + move.d $r10,[$r13] + +epilogue: + pop $r8 + pop $r7 + pop $r6 + Jump [$sp+] + + .size ffi_call_SYSV,.-ffi_call_SYSV + +/* Save R10..R13 into an array, somewhat like varargs. Copy the next + argument too, to simplify handling of any straddling parameter. + Save R9 and SP after those. Jump to function handling the rest. + Since this is a template, copied and the main function filled in by + the user. */ + + .globl L(ffi_cris_trampoline_template) + .type L(ffi_cris_trampoline_template),@function + .hidden L(ffi_cris_trampoline_template) + +L(ffi_cris_trampoline_template): +0: + /* The value we get for "PC" is right after the prefix instruction, + two bytes from the beginning, i.e. 0b+2. */ + move.d $r10,[$pc+2f-(0b+2)] + move.d $pc,$r10 +1: + addq 2f-1b+4,$r10 + move.d $r11,[$r10+] + move.d $r12,[$r10+] + move.d $r13,[$r10+] + move.d [$sp],$r11 + move.d $r11,[$r10+] + move.d $r9,[$r10+] + move.d $sp,[$r10+] + subq FFI_CRIS_TRAMPOLINE_DATA_PART_SIZE,$r10 + move.d 0,$r11 +3: + jump 0 +2: + .size ffi_cris_trampoline_template,.-0b + +/* This macro create a constant usable as "extern const int \name" in + C from within libffi, when \name has no prefix decoration. */ + + .macro const name,value + .globl \name + .type \name,@object + .hidden \name +\name: + .dword \value + .size \name,4 + .endm + +/* Constants for offsets within the trampoline. We could do this with + just symbols, avoiding memory contents and memory accesses, but the + C usage code would look a bit stranger. */ + + const L(ffi_cris_trampoline_fn_offset),2b-4-0b + const L(ffi_cris_trampoline_closure_offset),3b-4-0b diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffi.c new file mode 100644 index 0000000..af50b7c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffi.c @@ -0,0 +1,395 @@ +/* ----------------------------------------------------------------------- + ffi.c + + CSKY Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments +*/ +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if ( ecif->cif->flags == FFI_TYPE_STRUCT ) { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + size_t alignment; + + /* Align if necessary */ + alignment = (*p_arg)->alignment; +#ifdef __CSKYABIV1__ + /* + * Adapt ABIV1 bug. + * If struct's size is larger than 8 bytes, then it always alignment as 4 bytes. + */ + if (((*p_arg)->type == FFI_TYPE_STRUCT) && ((*p_arg)->size > 8) && (alignment == 8)) { + alignment = 4; + } +#endif + + if ((alignment - 1) & (unsigned) argp) { + argp = (char *) FFI_ALIGN(argp, alignment); + } + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + argp = (char *) FFI_ALIGN(argp, 4); + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: +#ifdef __CSKYBE__ + memcpy((argp + 4 - (*p_arg)->size), *p_argv, (*p_arg)->size); +#else + memcpy(argp, *p_argv, (*p_arg)->size); +#endif + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't hcsky anything + when it isn't needed. */ + cif->bytes = (cif->bytes + 7) & ~7; + + /* Set the return type flag */ + switch (cif->rtype->type) + { + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) FFI_TYPE_SINT64; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4) + /* A Composite Type not larger than 4 bytes is returned in r0. */ + cif->flags = (unsigned)FFI_TYPE_INT; + else if (cif->rtype->size <= 8) + /* A Composite Type not larger than 8 bytes is returned in r0, r1. */ + cif->flags = (unsigned)FFI_TYPE_SINT64; + else + /* A Composite Type larger than 8 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + cif->flags = (unsigned)FFI_TYPE_STRUCT; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs) +{ + return ffi_prep_cif_machdep(cif); +} + +/* Prototypes for assembly functions, in sysv.S */ +extern void ffi_call_SYSV (void (*fn)(void), extended_cif *, unsigned, unsigned, unsigned *); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + int small_struct = (cif->flags == FFI_TYPE_INT + && cif->rtype->type == FFI_TYPE_STRUCT); + + ecif.cif = cif; + ecif.avalue = avalue; + + unsigned int temp; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->flags == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else if (small_struct) + ecif.rvalue = &temp; + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (fn, &ecif, cif->bytes, cif->flags, ecif.rvalue); + break; + + default: + FFI_ASSERT(0); + break; + } + if (small_struct) +#ifdef __CSKYBE__ + memcpy (rvalue, ((unsigned char *)&temp + (4 - cif->rtype->size)), cif->rtype->size); +#else + memcpy (rvalue, &temp, cif->rtype->size); +#endif +} + +/** private members **/ + +static void ffi_prep_incoming_args_SYSV (char *stack, void **ret, + void** args, ffi_cif* cif); + +void ffi_closure_SYSV (ffi_closure *); + +/* This function is jumped to by the trampoline */ + +unsigned int +ffi_closure_SYSV_inner (closure, respp, args) + ffi_closure *closure; + void **respp; + void *args; +{ + // our various things... + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void*)); + + /* this call will initialize ARG_AREA, such that each + * element in that array points to the corresponding + * value on the stack; and if the function returns + * a structure, it will re-set RESP to point to the + * structure return address. */ + + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif); + + (closure->fun) (cif, *respp, arg_area, closure->user_data); + +#ifdef __CSKYBE__ + if (cif->flags == FFI_TYPE_INT && cif->rtype->type == FFI_TYPE_STRUCT) { + unsigned int tmp = 0; + tmp = *(unsigned int *)(*respp); + *(unsigned int *)(*respp) = (tmp >> ((4 - cif->rtype->size) * 8)); + } +#endif + + return cif->flags; +} + + +static void +ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if ( cif->flags == FFI_TYPE_STRUCT ) { + *rvalue = *(void **) argp; + argp += 4; + } + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) + { + size_t z; + size_t alignment; + + alignment = (*p_arg)->alignment; + if (alignment < 4) + alignment = 4; + +#ifdef __CSKYABIV1__ + /* + * Adapt ABIV1 bug. + * If struct's size is larger than 8 bytes, then it always alignment as 4 bytes. + */ + if (((*p_arg)->type == FFI_TYPE_STRUCT) && ((*p_arg)->size > 8) && (alignment == 8)) { + alignment = 4; + } +#endif + + /* Align if necessary */ + if ((alignment - 1) & (unsigned) argp) { + argp = (char *) FFI_ALIGN(argp, alignment); + } + + z = (*p_arg)->size; + +#ifdef __CSKYBE__ + unsigned int tmp = 0; + if ((*p_arg)->size < 4) { + tmp = *(unsigned int *)argp; + memcpy(argp, ((unsigned char *)&tmp + (4 - (*p_arg)->size)), (*p_arg)->size); + } +#else + /* because we're little endian, this is what it turns into. */ +#endif + *p_argv = (void*) argp; + + p_argv++; + argp += z; + } + + return; +} + +/* How to make a trampoline. */ + +extern unsigned char ffi_csky_trampoline[TRAMPOLINE_SIZE]; + +/* + * Since there is no __clear_cache in libgcc in csky toolchain. + * define ffi_csky_cacheflush in sysv.S. + * void ffi_csky_cacheflush(uint32 start_addr, uint32 size, int cache) + */ +#define CACHEFLUSH_IN_FFI 1 +#if CACHEFLUSH_IN_FFI +extern void ffi_csky_cacheflush(unsigned char *__tramp, unsigned int k, + int i); +#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX) \ +({ unsigned char *__tramp = (unsigned char*)(TRAMP); \ + unsigned int __fun = (unsigned int)(FUN); \ + unsigned int __ctx = (unsigned int)(CTX); \ + unsigned char *insns = (unsigned char *)(CTX); \ + memcpy (__tramp, ffi_csky_trampoline, TRAMPOLINE_SIZE); \ + *(unsigned int*) &__tramp[TRAMPOLINE_SIZE] = __ctx; \ + *(unsigned int*) &__tramp[TRAMPOLINE_SIZE + 4] = __fun; \ + ffi_csky_cacheflush(&__tramp[0], TRAMPOLINE_SIZE, 3); /* Clear data mapping. */ \ + ffi_csky_cacheflush(insns, TRAMPOLINE_SIZE, 3); \ + /* Clear instruction \ + mapping. */ \ + }) +#else +#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX) \ +({ unsigned char *__tramp = (unsigned char*)(TRAMP); \ + unsigned int __fun = (unsigned int)(FUN); \ + unsigned int __ctx = (unsigned int)(CTX); \ + unsigned char *insns = (unsigned char *)(CTX); \ + memcpy (__tramp, ffi_csky_trampoline, TRAMPOLINE_SIZE); \ + *(unsigned int*) &__tramp[TRAMPOLINE_SIZE] = __ctx; \ + *(unsigned int*) &__tramp[TRAMPOLINE_SIZE + 4] = __fun; \ + __clear_cache((&__tramp[0]), (&__tramp[TRAMPOLINE_SIZE-1])); /* Clear data mapping. */ \ + __clear_cache(insns, insns + TRAMPOLINE_SIZE); \ + /* Clear instruction \ + mapping. */ \ + }) +#endif + +/* the cif must already be prep'ed */ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + void (*closure_func)(ffi_closure*) = NULL; + + if (cif->abi == FFI_SYSV) + closure_func = &ffi_closure_SYSV; + else + return FFI_BAD_ABI; + + FFI_INIT_TRAMPOLINE (&closure->tramp[0], \ + closure_func, \ + codeloc); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffitarget.h new file mode 100644 index 0000000..f770aac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/ffitarget.h @@ -0,0 +1,63 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for CSKY. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV, +} ffi_abi; +#endif + +#ifdef __CSKYABIV2__ +#define FFI_ASM_ARGREG_SIZE 16 +#define TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_SIZE 24 +#else +#define FFI_ASM_ARGREG_SIZE 24 +#define TRAMPOLINE_SIZE 20 +#define FFI_TRAMPOLINE_SIZE 28 +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/sysv.S new file mode 100644 index 0000000..21670bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/csky/sysv.S @@ -0,0 +1,371 @@ +/* ----------------------------------------------------------------------- + sysv.S + + CSKY Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.macro CSKY_FUNC_START name + .text + .align 2 + .globl \name + .type \name, @function + \name: +.endm + +#ifdef __CSKYABIV2__ + + /* + * a0: fn + * a1: &ecif + * a2: cif->bytes + * a3: fig->flags + * sp+0: ecif.rvalue + */ +CSKY_FUNC_START ffi_call_SYSV + /* Save registers */ + .cfi_startproc + subi sp, 28 + .cfi_def_cfa_offset 28 + stw a0, (sp, 0x0) + .cfi_offset 0, -28 + stw a1, (sp, 0x4) + .cfi_offset 1, -24 + stw a2, (sp, 0x8) + .cfi_offset 2, -20 + stw a3, (sp, 0xC) + .cfi_offset 3, -16 + stw l0, (sp, 0x10) + .cfi_offset 4, -12 + stw l1, (sp, 0x14) + .cfi_offset 5, -8 + stw lr, (sp, 0x18) + .cfi_offset 15, -4 + + mov l0, sp + .cfi_def_cfa_register 4 + + /* Make room for all of the new args. */ + subu sp, sp, a2 + + /* Place all of the ffi_prep_args in position */ + mov a0, sp + /* a1 already set */ + + /* Call ffi_prep_args(stack, &ecif) */ + jsri ffi_prep_args + + /* move first 4 parameters in registers */ + ldw a0, (sp, 0x0) + ldw a1, (sp, 0x4) + ldw a2, (sp, 0x8) + ldw a3, (sp, 0xC) + + /* and adjust stack */ + subu lr, l0, sp /* cif->bytes == l0 - sp */ + cmphsi lr, 16 + movi l1, 16 + movt lr, l1 + addu sp, sp, lr + + ldw l1, (l0, 0) /* load fn() in advance */ + + /* call (fn) (...) */ + jsr l1 + + /* Remove the space we pushed for the args */ + mov sp, l0 + + /* Load r2 with the pointer to storage for the return value */ + ldw a2, (sp, 0x1C) + + /* Load r3 with the return type code */ + ldw a3, (sp, 0xC) + + /* If the return value pointer is NULL, assume no return value. */ + cmpnei a2, 0 + bf .Lepilogue + + cmpnei a3, FFI_TYPE_STRUCT + bf .Lepilogue + + /* return INT64 */ + cmpnei a3, FFI_TYPE_SINT64 + bt .Lretint + /* stw a0, (a2, 0x0) at .Lretint */ + stw a1, (a2, 0x4) + +.Lretint: + /* return INT */ + stw a0, (a2, 0x0) + +.Lepilogue: + ldw a0, (sp, 0x0) + ldw a1, (sp, 0x4) + ldw a2, (sp, 0x8) + ldw a3, (sp, 0xC) + ldw l0, (sp, 0x10) + ldw l1, (sp, 0x14) + ldw lr, (sp, 0x18) + addi sp, sp, 28 + rts + .cfi_endproc + .size ffi_call_SYSV, .-ffi_call_SYSV + + + /* + * unsigned int FFI_HIDDEN + * ffi_closure_SYSV_inner (closure, respp, args) + * ffi_closure *closure; + * void **respp; + * void *args; + */ +CSKY_FUNC_START ffi_closure_SYSV + .cfi_startproc + mov a2, sp + addi a1, sp, 16 + subi sp, sp, 24 + .cfi_def_cfa_offset 40 + stw a1, (sp, 0x10) + .cfi_offset 1, -24 + stw lr, (sp, 0x14) + .cfi_offset 15, -20 + stw sp, (sp, 0x8) + addi a1, sp, 8 + jsri ffi_closure_SYSV_inner + ldw a0, (sp, 0x0) + /* + * if FFI_TYPE_SINT64, need a1. + * if FFI_TYPE_INT, ignore a1. + */ + ldw a1, (sp, 0x4) + + ldw lr, (sp, 0x14) + addi sp, sp, 40 + rts + .cfi_endproc + .size ffi_closure_SYSV, .-ffi_closure_SYSV + +CSKY_FUNC_START ffi_csky_trampoline + subi sp, sp, 16 + stw a0, (sp, 0x0) + stw a1, (sp, 0x4) + stw a2, (sp, 0x8) + stw a3, (sp, 0xC) + lrw a0, [.Lctx] + lrw a1, [.Lfun] + jmp a1 +.Lctx: + mov a0, a0 + mov a0, a0 +.Lfun: + + .size ffi_csky_trampoline, .-ffi_csky_trampoline + +CSKY_FUNC_START ffi_csky_cacheflush + mov t0, r7 + movi r7, 123 + trap 0 + mov r7, t0 + rts + + .size ffi_csky_cacheflush, .-ffi_csky_cacheflush + +#else /* !__CSKYABIV2__ */ + + /* + * a0: fn + * a1: &ecif + * a2: cif->bytes + * a3: fig->flags + * a4: ecif.rvalue + */ +CSKY_FUNC_START ffi_call_SYSV + /* Save registers */ + .cfi_startproc + subi sp, 32 + subi sp, 8 + .cfi_def_cfa_offset 40 + stw a0, (sp, 0x0) + .cfi_offset 2, -40 + stw a1, (sp, 0x4) + .cfi_offset 3, -36 + stw a2, (sp, 0x8) + .cfi_offset 4, -32 + stw a3, (sp, 0xC) + .cfi_offset 5, -28 + stw a4, (sp, 0x10) + .cfi_offset 6, -24 + stw a5, (sp, 0x14) + .cfi_offset 7, -20 + stw l0, (sp, 0x18) + .cfi_offset 8, -16 + stw l1, (sp, 0x1C) + .cfi_offset 9, -12 + stw lr, (sp, 0x20) + .cfi_offset 15, -8 + + mov l0, sp + .cfi_def_cfa_register 8 + + /* Make room for all of the new args. */ + subu sp, sp, a2 + + /* Place all of the ffi_prep_args in position */ + mov a0, sp + /* a1 already set */ + + /* Call ffi_prep_args(stack, &ecif) */ + jsri ffi_prep_args + + /* move first 4 parameters in registers */ + ldw a0, (sp, 0x0) + ldw a1, (sp, 0x4) + ldw a2, (sp, 0x8) + ldw a3, (sp, 0xC) + ldw a4, (sp, 0x10) + ldw a5, (sp, 0x14) + + /* and adjust stack */ + mov lr, l0 + subu lr, sp /* cif->bytes == l0 - sp */ + movi l1, 24 + cmphs lr, l1 + movt lr, l1 + addu sp, sp, lr + + ldw l1, (l0, 0) /* load fn() in advance */ + + /* call (fn) (...) */ + jsr l1 + + /* Remove the space we pushed for the args */ + mov sp, l0 + + /* Load r2 with the pointer to storage for the return value */ + ldw a2, (sp, 0x10) + + /* Load r3 with the return type code */ + ldw a3, (sp, 0xC) + + /* If the return value pointer is NULL, assume no return value. */ + cmpnei a2, 0 + bf .Lepilogue + + cmpnei a3, FFI_TYPE_STRUCT + bf .Lepilogue + + /* return INT64 */ + cmpnei a3, FFI_TYPE_SINT64 + bt .Lretint + /* stw a0, (a2, 0x0) at .Lretint */ + stw a1, (a2, 0x4) + +.Lretint: + /* return INT */ + stw a0, (a2, 0x0) + +.Lepilogue: + ldw a0, (sp, 0x0) + ldw a1, (sp, 0x4) + ldw a2, (sp, 0x8) + ldw a3, (sp, 0xC) + ldw a4, (sp, 0x10) + ldw a5, (sp, 0x14) + ldw l0, (sp, 0x18) + ldw l1, (sp, 0x1C) + ldw lr, (sp, 0x20) + addi sp, sp, 32 + addi sp, sp, 8 + rts + .cfi_endproc + + .size ffi_call_SYSV, .-ffi_call_SYSV + + + /* + * unsigned int FFI_HIDDEN + * ffi_closure_SYSV_inner (closure, respp, args) + * ffi_closure *closure; + * void **respp; + * void *args; + */ +CSKY_FUNC_START ffi_closure_SYSV + .cfi_startproc + mov a2, sp + mov a1, sp + addi a1, 24 + subi sp, sp, 24 + .cfi_def_cfa_offset 48 + stw a1, (sp, 0x10) + .cfi_offset 3, -32 + stw lr, (sp, 0x14) + .cfi_offset 15, -28 + stw sp, (sp, 0x8) + mov a1, sp + addi a1, 8 + jsri ffi_closure_SYSV_inner + ldw a0, (sp, 0x0) + /* + * if FFI_TYPE_SINT64, need a1. + * if FFI_TYPE_INT, ignore a1. + */ + ldw a1, (sp, 0x4) + + ldw lr, (sp, 0x14) + addi sp, sp, 24 + addi sp, sp, 24 + rts + .cfi_endproc + + .size ffi_closure_SYSV, .-ffi_closure_SYSV + +CSKY_FUNC_START ffi_csky_trampoline + subi sp, 24 + stw a0, (sp, 0x0) + stw a1, (sp, 0x4) + stw a2, (sp, 0x8) + stw a3, (sp, 0xC) + stw a4, (sp, 0x10) + stw a5, (sp, 0x14) + lrw a0, [.Lctx] + lrw a1, [.Lfun] + jmp a1 +.Lctx: + mov a0, a0 + mov a0, a0 +.Lfun: + + .size ffi_csky_trampoline, .-ffi_csky_trampoline + +CSKY_FUNC_START ffi_csky_cacheflush + lrw r1, 123 + trap 0 + rts + + .size ffi_csky_cacheflush, .-ffi_csky_cacheflush + +#endif /* __CSKYABIV2__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/debug.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/debug.c new file mode 100644 index 0000000..f3172b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/debug.c @@ -0,0 +1,64 @@ +/* ----------------------------------------------------------------------- + debug.c - Copyright (c) 1996 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +/* General debugging routines */ + +void ffi_stop_here(void) +{ + /* This function is only useful for debugging purposes. + Place a breakpoint on ffi_stop_here to be notified of + significant events. */ +} + +/* This function should only be called via the FFI_ASSERT() macro */ + +void ffi_assert(char *expr, char *file, int line) +{ + fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line); + ffi_stop_here(); + abort(); +} + +/* Perform a sanity check on an ffi_type structure */ + +void ffi_type_test(ffi_type *a, char *file, int line) +{ + FFI_ASSERT_AT(a != NULL, file, line); + + FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line); + FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX) + || a->elements != NULL, file, line); + FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX + || (a->elements != NULL + && a->elements[0] != NULL && a->elements[1] == NULL), + file, line); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/dlmalloc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/dlmalloc.c new file mode 100644 index 0000000..1aba657 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/dlmalloc.c @@ -0,0 +1,5166 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using ptmalloc, which is derived from + a version of this malloc. (See http://www.malloc.de). + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. See + near the end of this file for guidelines for creating a custom + version of MORECORE. + +MORECORE_CONTIGUOUS default: 1 (true) + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 on unix + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. (On most x86s, the asm version is only + slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +*/ + +#if defined __linux__ && !defined _GNU_SOURCE +/* mremap() on Linux requires this via sys/mman.h */ +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define MALLOC_FAILURE_ACTION +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ +#endif /* WIN32 */ + +#ifdef __OS2__ +#define INCL_DOS +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_SYS_MMAN_H +#endif /* __OS2__ */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#ifndef MORECORE +#define MORECORE sbrk +#endif /* MORECORE */ +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if MORECORE_CONTIGUOUS +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ + +/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(x) +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#if HAVE_MORECORE +#ifndef LACKS_UNISTD_H +#include /* for sbrk */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ +#endif /* HAVE_MMAP */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if !HAVE_MMAP +#define IS_MMAPPED_BIT (SIZE_T_ZERO) +#define USE_MMAP_BIT (SIZE_T_ZERO) +#define CALL_MMAP(s) MFAIL +#define CALL_MUNMAP(a, s) (-1) +#define DIRECT_MMAP(s) MFAIL + +#else /* HAVE_MMAP */ +#define IS_MMAPPED_BIT (SIZE_T_ONE) +#define USE_MMAP_BIT (SIZE_T_ONE) + +#if !defined(WIN32) && !defined (__OS2__) +#define CALL_MUNMAP(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP(s) CALL_MMAP(s) + +#elif defined(__OS2__) + +/* OS/2 MMAP via DosAllocMem */ +static void* os2mmap(size_t size) { + void* ptr; + if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) && + DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE)) + return MFAIL; + return ptr; +} + +#define os2direct_mmap(n) os2mmap(n) + +/* This function supports releasing coalesed segments */ +static int os2munmap(void* ptr, size_t size) { + while (size) { + ULONG ulSize = size; + ULONG ulFlags = 0; + if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0) + return -1; + if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 || + ulSize > size) + return -1; + if (DosFreeMem(ptr) != 0) + return -1; + ptr = ( void * ) ( ( char * ) ptr + ulSize ); + size -= ulSize; + } + return 0; +} + +#define CALL_MMAP(s) os2mmap(s) +#define CALL_MUNMAP(a, s) os2munmap((a), (s)) +#define DIRECT_MMAP(s) os2direct_mmap(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define CALL_MMAP(s) win32mmap(s) +#define CALL_MUNMAP(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MMAP && HAVE_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#else /* HAVE_MMAP && HAVE_MREMAP */ +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +#if HAVE_MORECORE +#define CALL_MORECORE(S) MORECORE(S) +#else /* HAVE_MORECORE */ +#define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/* mstate bit set if contiguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + +#if !defined(WIN32) && !defined(__OS2__) +/* By default use posix locks */ +#include +#define MLOCK_T pthread_mutex_t +#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) +#define ACQUIRE_LOCK(l) pthread_mutex_lock(l) +#define RELEASE_LOCK(l) pthread_mutex_unlock(l) + +#if HAVE_MORECORE +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif /* HAVE_MORECORE */ + +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; + +#elif defined(__OS2__) +#define MLOCK_T HMTX +#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE) +#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT) +#define RELEASE_LOCK(l) DosReleaseMutexSem(*l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; + +#else /* WIN32 */ +/* + Because lock-protected regions have bounded times, and there + are no recursive lock calls, we can use simple spinlocks. +*/ + +#define MLOCK_T long +static int win32_acquire_lock (MLOCK_T *sl) { + for (;;) { +#ifdef InterlockedCompareExchangePointer + if (!InterlockedCompareExchange(sl, 1, 0)) + return 0; +#else /* Use older void* version */ + if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) + return 0; +#endif /* InterlockedCompareExchangePointer */ + Sleep (0); + } +} + +static void win32_release_lock (MLOCK_T *sl) { + InterlockedExchange (sl, 0); +} + +#define INITIAL_LOCK(l) *(l)=0 +#define ACQUIRE_LOCK(l) win32_acquire_lock(l) +#define RELEASE_LOCK(l) win32_release_lock(l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; +#endif /* WIN32 */ + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS && HAVE_MORECORE +#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); +#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); +#else /* USE_LOCKS && HAVE_MORECORE */ +#define ACQUIRE_MORECORE_LOCK() +#define RELEASE_MORECORE_LOCK() +#endif /* USE_LOCKS && HAVE_MORECORE */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +#if FFI_MMAP_EXEC_WRIT + /* The mmap magic is supposed to store the address of the executable + segment at the very end of the requested block. */ + +# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t))) + + /* We can only merge segments if their corresponding executable + segments are at identical offsets. */ +# define check_segment_merge(S,b,s) \ + (mmap_exec_offset((b),(s)) == (S)->exec_offset) + +# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset) +# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset) + + /* The removal of sflags only works with HAVE_MORECORE == 0. */ + +# define get_segment_flags(S) (IS_MMAPPED_BIT) +# define set_segment_flags(S,v) \ + (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \ + (((S)->exec_offset = \ + mmap_exec_offset((S)->base, (S)->size)), \ + (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \ + (S)->exec_offset) ? (ABORT, (v)) : \ + (mmap_exec_offset((S)->base, (S)->size) = 0), (v))) + + /* We use an offset here, instead of a pointer, because then, when + base changes, we don't have to modify this. On architectures + with segmented addresses, this might not work. */ + ptrdiff_t exec_offset; +#else + +# define get_segment_flags(S) ((S)->sflags) +# define set_segment_flags(S,v) ((S)->sflags = (v)) +# define check_segment_merge(S,b,s) (1) + + flag_t sflags; /* mmap and extern flag */ +#endif +}; + +#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT) +#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(__i386__) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(__i386__) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = __builtin_ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + INITIAL_LOCK(&gm->mutex); + gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + +#if !defined(WIN32) && !defined(__OS2__) + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); +#elif defined (__OS2__) + /* if low-memory is used, os2munmap() would break + if it were anything other than 64k */ + mparams.page_size = 4096u; + mparams.granularity = 65536u; +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + mparams.page_size = system_info.dwPageSize; + mparams.granularity = system_info.dwAllocationGranularity; + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT; + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!next_pinuse(p)); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->topsize == chunksize(m->top)); + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmapped); + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + init_mparams(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MORECORE_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { + size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MORECORE_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MORECORE_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MORECORE_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmap_flag); + m->magic = mparams.magic; + init_bins(m); + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MORECORE_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MORECORE_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + set_segment_flags(&m->seg, IS_MMAPPED_BIT); + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + set_segment_flags(&m->seg, EXTERN_BIT); + set_lock(m, locked); + } + return (mspace)m; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = get_segment_flags(sp); + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/eabi.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/eabi.S new file mode 100644 index 0000000..379ea4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/eabi.S @@ -0,0 +1,128 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2004 Anthony Green + + FR-V Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # gr8 : ffi_prep_args + # gr9 : &ecif + # gr10: cif->bytes + # gr11: fig->flags + # gr12: ecif.rvalue + # gr13: fn + +ffi_call_EABI: + addi sp, #-80, sp + sti fp, @(sp, #24) + addi sp, #24, fp + movsg lr, gr5 + + /* Make room for the new arguments. */ + /* subi sp, fp, gr10 */ + + /* Store return address and incoming args on stack. */ + sti gr5, @(fp, #8) + sti gr8, @(fp, #-4) + sti gr9, @(fp, #-8) + sti gr10, @(fp, #-12) + sti gr11, @(fp, #-16) + sti gr12, @(fp, #-20) + sti gr13, @(fp, #-24) + + sub sp, gr10, sp + + /* Call ffi_prep_args. */ + ldi @(fp, #-4), gr4 + addi sp, #0, gr8 + ldi @(fp, #-8), gr9 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* ffi_prep_args returns the new stack pointer. */ + mov gr8, gr4 + + ldi @(sp, #0), gr8 + ldi @(sp, #4), gr9 + ldi @(sp, #8), gr10 + ldi @(sp, #12), gr11 + ldi @(sp, #16), gr12 + ldi @(sp, #20), gr13 + + /* Always copy the return value pointer into the hidden + parameter register. This is only strictly necessary + when we're returning an aggregate type, but it doesn't + hurt to do this all the time, and it saves a branch. */ + ldi @(fp, #-20), gr3 + + /* Use the ffi_prep_args return value for the new sp. */ + mov gr4, sp + + /* Call the target function. */ + ldi @(fp, -24), gr4 +#ifdef __FRV_FDPIC__ + ldd @(gr4, gr0), gr14 + calll @(gr14, gr0) +#else + calll @(gr4, gr0) +#endif + + /* Store the result. */ + ldi @(fp, #-16), gr10 /* fig->flags */ + ldi @(fp, #-20), gr4 /* ecif.rvalue */ + + /* Is the return value stored in two registers? */ + cmpi gr10, #8, icc0 + bne icc0, 0, .L2 + /* Yes, save them. */ + sti gr8, @(gr4, #0) + sti gr9, @(gr4, #4) + bra .L3 +.L2: + /* Is the return value a structure? */ + cmpi gr10, #-1, icc0 + beq icc0, 0, .L3 + /* No, save a 4 byte return value. */ + sti gr8, @(gr4, #0) +.L3: + + /* Restore the stack, and return. */ + ldi @(fp, 8), gr5 + ld @(fp, gr0), fp + addi sp,#80,sp + jmpl @(gr5,gr0) + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffi.c new file mode 100644 index 0000000..ed1c65a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffi.c @@ -0,0 +1,292 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2004 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2008 Red Hat, Inc. + + FR-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + /* if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (count > 24) + { + // This is going on the stack. Turn it into a double. + *(double *) argp = (double) *(float*)(* p_argv); + z = sizeof(double); + } + else + *(void **) argp = *(void **)(* p_argv); + } */ + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in gr7. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("gr7"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("fp"); + char *stack_args = frame_pointer + 16; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + avalue[i] = ptr; + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == ((char *)register_args + (6*4))) + ptr = stack_args; + } + + /* Invoke the closure. */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + /* The caller allocates space for the return structure, and + passes a pointer to this space in gr3. Use this value directly + as the return value. */ + register void *return_struct_ptr __asm__("gr3"); + (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + + /* Functions return 4-byte or smaller results in gr8. 8-byte + values also use gr9. We fill the both, even for small return + values, just to avoid a branch. */ + asm ("ldi @(%0, #0), gr8" : : "r" (&rvalue)); + asm ("ldi @(%0, #0), gr9" : : "r" (&((int *) &rvalue)[1])); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; +#ifdef __FRV_FDPIC__ + register void *got __asm__("gr15"); +#endif + int i; + + fn = (unsigned long) ffi_closure_eabi; + +#ifdef __FRV_FDPIC__ + tramp[0] = &((unsigned int *)codeloc)[2]; + tramp[1] = got; + tramp[2] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[3] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[4] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[5] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[6] = 0x9cc86000; /* ldi @(gr6, #0), gr14 */ + tramp[7] = 0x8030e000; /* jmpl @(gr14, gr0) */ +#else + tramp[0] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */ + tramp[1] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */ + tramp[2] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */ + tramp[3] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */ + tramp[4] = 0x80300006; /* jmpl @(gr0, gr6) */ +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Cache flushing. */ + for (i = 0; i < FFI_TRAMPOLINE_SIZE; i++) + __asm__ volatile ("dcf @(%0,%1)\n\tici @(%2,%1)" :: "r" (tramp), "r" (i), + "r" (codeloc)); + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffitarget.h new file mode 100644 index 0000000..d42540e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/frv/ffitarget.h @@ -0,0 +1,62 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2004 Red Hat, Inc. + Target configuration macros for FR-V + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_EABI +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef __FRV_FDPIC__ +/* Trampolines are 8 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (8*4) +#else +/* Trampolines are 5 4-byte instructions long. */ +#define FFI_TRAMPOLINE_SIZE (5*4) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffi.c new file mode 100644 index 0000000..b1d04c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffi.c @@ -0,0 +1,604 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + Copyright (c) 2011 Anthony Green + + IA64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include + +#include "ia64_flags.h" + +/* A 64-bit pointer value. In LP64 mode, this is effectively a plain + pointer. In ILP32 mode, it's a pointer that's been extended to + 64 bits by "addp4". */ +typedef void *PTR64 __attribute__((mode(DI))); + +/* Memory image of fp register contents. This is the implementation + specific format used by ldf.fill/stf.spill. All we care about is + that it wants a 16 byte aligned slot. */ +typedef struct +{ + UINT64 x[2] __attribute__((aligned(16))); +} fpreg; + + +/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */ + +struct ia64_args +{ + fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */ + UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */ + UINT64 other_args[]; /* Arguments passed on stack, variable size. */ +}; + + +/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */ + +static inline void * +endian_adjust (void *addr, size_t len) +{ +#ifdef __BIG_ENDIAN__ + return addr + (8 - len); +#else + return addr; +#endif +} + +/* Store VALUE to ADDR in the current cpu implementation's fp spill format. + This is a macro instead of a function, so that it works for all 3 floating + point types without type conversions. Type conversion to long double breaks + the denorm support. */ + +#define stf_spill(addr, value) \ + asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value)); + +/* Load a value from ADDR, which is in the current cpu implementation's + fp spill format. As above, this must also be a macro. */ + +#define ldf_fill(result, addr) \ + asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr)); + +/* Return the size of the C type associated with with TYPE. Which will + be one of the FFI_IA64_TYPE_HFA_* values. */ + +static size_t +hfa_type_size (int type) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + return sizeof(float); + case FFI_IA64_TYPE_HFA_DOUBLE: + return sizeof(double); + case FFI_IA64_TYPE_HFA_LDOUBLE: + return sizeof(__float80); + default: + abort (); + } +} + +/* Load from ADDR a value indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_load (fpreg *fpaddr, int type, void *addr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + stf_spill (fpaddr, *(float *) addr); + return; + case FFI_IA64_TYPE_HFA_DOUBLE: + stf_spill (fpaddr, *(double *) addr); + return; + case FFI_IA64_TYPE_HFA_LDOUBLE: + stf_spill (fpaddr, *(__float80 *) addr); + return; + default: + abort (); + } +} + +/* Load VALUE into ADDR as indicated by TYPE. Which will be one of + the FFI_IA64_TYPE_HFA_* values. */ + +static void +hfa_type_store (int type, void *addr, fpreg *fpaddr) +{ + switch (type) + { + case FFI_IA64_TYPE_HFA_FLOAT: + { + float result; + ldf_fill (result, fpaddr); + *(float *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_DOUBLE: + { + double result; + ldf_fill (result, fpaddr); + *(double *) addr = result; + break; + } + case FFI_IA64_TYPE_HFA_LDOUBLE: + { + __float80 result; + ldf_fill (result, fpaddr); + *(__float80 *) addr = result; + break; + } + default: + abort (); + } +} + +/* Is TYPE a struct containing floats, doubles, or extended doubles, + all of the same fp type? If so, return the element type. Return + FFI_TYPE_VOID if not. */ + +static int +hfa_element_type (ffi_type *type, int nested) +{ + int element = FFI_TYPE_VOID; + + switch (type->type) + { + case FFI_TYPE_FLOAT: + /* We want to return VOID for raw floating-point types, but the + synthetic HFA type if we're nested within an aggregate. */ + if (nested) + element = FFI_IA64_TYPE_HFA_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + /* Similarly. */ + if (nested) + element = FFI_IA64_TYPE_HFA_DOUBLE; + break; + + case FFI_TYPE_LONGDOUBLE: + /* Similarly, except that that HFA is true for double extended, + but not quad precision. Both have sizeof == 16, so tell the + difference based on the precision. */ + if (LDBL_MANT_DIG == 64 && nested) + element = FFI_IA64_TYPE_HFA_LDOUBLE; + break; + + case FFI_TYPE_STRUCT: + { + ffi_type **ptr = &type->elements[0]; + + for (ptr = &type->elements[0]; *ptr ; ptr++) + { + int sub_element = hfa_element_type (*ptr, 1); + if (sub_element == FFI_TYPE_VOID) + return FFI_TYPE_VOID; + + if (element == FFI_TYPE_VOID) + element = sub_element; + else if (element != sub_element) + return FFI_TYPE_VOID; + } + } + break; + + default: + return FFI_TYPE_VOID; + } + + return element; +} + + +/* Perform machine dependent cif processing. */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + int flags; + + /* Adjust cif->bytes to include space for the bits of the ia64_args frame + that precedes the integer register portion. The estimate that the + generic bits did for the argument space required is good enough for the + integer component. */ + cif->bytes += offsetof(struct ia64_args, gp_regs[0]); + if (cif->bytes < sizeof(struct ia64_args)) + cif->bytes = sizeof(struct ia64_args); + + /* Set the return type flag. */ + flags = cif->rtype->type; + switch (cif->rtype->type) + { + case FFI_TYPE_LONGDOUBLE: + /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision, + and encode quad precision as a two-word integer structure. */ + if (LDBL_MANT_DIG != 64) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8); + break; + + case FFI_TYPE_STRUCT: + { + size_t size = cif->rtype->size; + int hfa_type = hfa_element_type (cif->rtype, 0); + + if (hfa_type != FFI_TYPE_VOID) + { + size_t nelts = size / hfa_type_size (hfa_type); + if (nelts <= 8) + flags = hfa_type | (size << 8); + } + else + { + if (size <= 32) + flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8); + } + } + break; + + default: + break; + } + cif->flags = flags; + + return FFI_OK; +} + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status +ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(void), UINT64); + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + struct ia64_args *stack; + long i, avn, gpcount, fpcount; + ffi_type **p_arg; + + FFI_ASSERT (cif->abi == FFI_UNIX); + + /* If we have no spot for a return value, make one. */ + if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID) + rvalue = alloca (cif->rtype->size); + + /* Allocate the stack frame. */ + stack = alloca (cif->bytes); + + gpcount = fpcount = 0; + avn = cif->nargs; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i]; + break; + case FFI_TYPE_UINT8: + stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i]; + break; + case FFI_TYPE_SINT16: + stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i]; + break; + case FFI_TYPE_UINT16: + stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i]; + break; + case FFI_TYPE_SINT32: + stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i]; + break; + case FFI_TYPE_UINT32: + stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i]; + break; + + case FFI_TYPE_POINTER: + stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i]; + break; + + case FFI_TYPE_FLOAT: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]); + { + UINT32 tmp; + memcpy (&tmp, avalue[i], sizeof (UINT32)); + stack->gp_regs[gpcount++] = tmp; + } + break; + + case FFI_TYPE_DOUBLE: + if (gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]); + memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64)); + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8) + stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]); + memcpy (&stack->gp_regs[gpcount], avalue[i], 16); + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_load (&stack->fp_regs[fpcount], hfa_type, + avalue[i] + offset); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + } + + memcpy (&stack->gp_regs[gpcount], avalue[i], size); + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + ffi_call_unix (stack, rvalue, fn, cif->flags); +} + +/* Closures represent a pair consisting of a function pointer, and + some user data. A closure is invoked by reinterpreting the closure + as a function pointer, and branching to it. Thus we can make an + interpreted function callable as a C function: We turn the + interpreter itself, together with a pointer specifying the + interpreted procedure, into a closure. + + For IA64, function pointer are already pairs consisting of a code + pointer, and a gp pointer. The latter is needed to access global + variables. Here we set up such a pair as the first two words of + the closure (in the "trampoline" area), but we replace the gp + pointer with a pointer to the closure itself. We also add the real + gp pointer to the closure. This allows the function entry code to + both retrieve the user data, and to restore the correct gp pointer. */ + +extern void ffi_closure_unix (); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + /* The layout of a function descriptor. A C function pointer really + points to one of these. */ + struct ia64_fd + { + UINT64 code_pointer; + UINT64 gp; + }; + + struct ffi_ia64_trampoline_struct + { + UINT64 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT64 fake_gp; /* Pointer to closure, installed as gp. */ + UINT64 real_gp; /* Real gp value. */ + }; + + struct ffi_ia64_trampoline_struct *tramp; + struct ia64_fd *fd; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp; + fd = (struct ia64_fd *)(void *)ffi_closure_unix; + + tramp->code_pointer = fd->code_pointer; + tramp->real_gp = fd->gp; + tramp->fake_gp = (UINT64)(PTR64)codeloc; + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +UINT64 +ffi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack, + void *rvalue, void *r8) +{ + ffi_cif *cif; + void **avalue; + ffi_type **p_arg; + long i, avn, gpcount, fpcount, nfixedargs; + + cif = closure->cif; + avn = cif->nargs; + nfixedargs = cif->nfixedargs; + avalue = alloca (avn * sizeof (void *)); + + /* If the structure return value is passed in memory get that location + from r8 so as to pass the value directly back to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = r8; + + gpcount = fpcount = 0; + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + int named = i < nfixedargs; + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1); + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2); + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4); + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + avalue[i] = &stack->gp_regs[gpcount++]; + break; + case FFI_TYPE_POINTER: + avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*)); + break; + + case FFI_TYPE_FLOAT: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + float result; + avalue[i] = addr; + ldf_fill (result, addr); + *(float *)addr = result; + } + else + avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4); + gpcount++; + break; + + case FFI_TYPE_DOUBLE: + if (named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + double result; + avalue[i] = addr; + ldf_fill (result, addr); + *(double *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount++; + break; + + case FFI_TYPE_LONGDOUBLE: + if (gpcount & 1) + gpcount++; + if (LDBL_MANT_DIG == 64 && named && gpcount < 8 && fpcount < 8) + { + fpreg *addr = &stack->fp_regs[fpcount++]; + __float80 result; + avalue[i] = addr; + ldf_fill (result, addr); + *(__float80 *)addr = result; + } + else + avalue[i] = &stack->gp_regs[gpcount]; + gpcount += 2; + break; + + case FFI_TYPE_STRUCT: + { + size_t size = (*p_arg)->size; + size_t align = (*p_arg)->alignment; + int hfa_type = hfa_element_type (*p_arg, 0); + + FFI_ASSERT (align <= 16); + if (align == 16 && (gpcount & 1)) + gpcount++; + + if (hfa_type != FFI_TYPE_VOID) + { + size_t hfa_size = hfa_type_size (hfa_type); + size_t offset = 0; + size_t gp_offset = gpcount * 8; + void *addr = alloca (size); + + avalue[i] = addr; + + while (fpcount < 8 + && offset < size + && gp_offset < 8 * 8) + { + hfa_type_store (hfa_type, addr + offset, + &stack->fp_regs[fpcount]); + offset += hfa_size; + gp_offset += hfa_size; + fpcount += 1; + } + + if (offset < size) + memcpy (addr + offset, (char *)stack->gp_regs + gp_offset, + size - offset); + } + else + avalue[i] = &stack->gp_regs[gpcount]; + + gpcount += (size + 7) / 8; + } + break; + + default: + abort (); + } + } + + closure->fun (cif, rvalue, avalue, closure->user_data); + + return cif->flags; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffitarget.h new file mode 100644 index 0000000..fd5b9a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ffitarget.h @@ -0,0 +1,56 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for IA-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, /* Linux and all Unix variants use the same conventions */ + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 /* Really the following struct, which */ + /* can be interpreted as a C function */ + /* descriptor: */ +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ia64_flags.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ia64_flags.h new file mode 100644 index 0000000..9d652ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/ia64_flags.h @@ -0,0 +1,40 @@ +/* ----------------------------------------------------------------------- + ia64_flags.h - Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Original author: Hans Boehm, HP Labs + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* "Type" codes used between assembly and C. When used as a part of + a cfi->flags value, the low byte will be these extra type codes, + and bits 8-31 will be the actual size of the type. */ + +/* Small structures containing N words in integer registers. */ +#define FFI_IA64_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 1) + +/* Homogeneous Floating Point Aggregates (HFAs) which are returned + in FP registers. */ +#define FFI_IA64_TYPE_HFA_FLOAT (FFI_TYPE_LAST + 2) +#define FFI_IA64_TYPE_HFA_DOUBLE (FFI_TYPE_LAST + 3) +#define FFI_IA64_TYPE_HFA_LDOUBLE (FFI_TYPE_LAST + 4) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/unix.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/unix.S new file mode 100644 index 0000000..e2547e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/ia64/unix.S @@ -0,0 +1,567 @@ +/* ----------------------------------------------------------------------- + unix.S - Copyright (c) 1998, 2008 Red Hat, Inc. + Copyright (c) 2000 Hewlett Packard Company + + IA64/unix Foreign Function Interface + + Primary author: Hans Boehm, HP Labs + + Loosely modeled on Cygnus code for other platforms. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "ia64_flags.h" + + .pred.safe_across_calls p1-p5,p16-p63 +.text + +/* int ffi_call_unix (struct ia64_args *stack, PTR64 rvalue, + void (*fn)(void), int flags); + */ + + .align 16 + .global ffi_call_unix + .proc ffi_call_unix +ffi_call_unix: + .prologue + /* Bit o trickiness. We actually share a stack frame with ffi_call. + Rely on the fact that ffi_call uses a vframe and don't bother + tracking one here at all. */ + .fframe 0 + .save ar.pfs, r36 // loc0 + alloc loc0 = ar.pfs, 4, 3, 8, 0 + .save rp, loc1 + mov loc1 = b0 + .body + add r16 = 16, in0 + mov loc2 = gp + mov r8 = in1 + ;; + + /* Load up all of the argument registers. */ + ldf.fill f8 = [in0], 32 + ldf.fill f9 = [r16], 32 + ;; + ldf.fill f10 = [in0], 32 + ldf.fill f11 = [r16], 32 + ;; + ldf.fill f12 = [in0], 32 + ldf.fill f13 = [r16], 32 + ;; + ldf.fill f14 = [in0], 32 + ldf.fill f15 = [r16], 24 + ;; + ld8 out0 = [in0], 16 + ld8 out1 = [r16], 16 + ;; + ld8 out2 = [in0], 16 + ld8 out3 = [r16], 16 + ;; + ld8 out4 = [in0], 16 + ld8 out5 = [r16], 16 + ;; + ld8 out6 = [in0] + ld8 out7 = [r16] + ;; + + /* Deallocate the register save area from the stack frame. */ + mov sp = in0 + + /* Call the target function. */ + ld8 r16 = [in2], 8 + ;; + ld8 gp = [in2] + mov b6 = r16 + br.call.sptk.many b0 = b6 + ;; + + /* Dispatch to handle return value. */ + mov gp = loc2 + zxt1 r16 = in3 + ;; + mov ar.pfs = loc0 + addl r18 = @ltoffx(.Lst_table), gp + ;; + ld8.mov r18 = [r18], .Lst_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + ;; + ld8 r17 = [r18] + shr in3 = in3, 8 + ;; + add r17 = r17, r18 + ;; + mov b6 = r17 + br b6 + ;; + +.Lst_void: + br.ret.sptk.many b0 + ;; +.Lst_uint8: + zxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint8: + sxt1 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint16: + zxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint16: + sxt2 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_uint32: + zxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_sint32: + sxt4 r8 = r8 + ;; + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_int64: + st8 [in1] = r8 + br.ret.sptk.many b0 + ;; +.Lst_float: + stfs [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_double: + stfd [in1] = f8 + br.ret.sptk.many b0 + ;; +.Lst_ldouble: + stfe [in1] = f8 + br.ret.sptk.many b0 + ;; + +.Lst_small_struct: + cmp.lt p6, p0 = 8, in3 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; + add r16 = 8, sp + add r17 = 16, sp + add r18 = 24, sp + ;; + st8 [sp] = r8 +(p6) st8 [r16] = r9 + mov out0 = in1 +(p7) st8 [r17] = r10 +(p8) st8 [r18] = r11 + mov out1 = sp + mov out2 = in3 + ;; + // ia64 software calling convention requires + // top 16 bytes of stack to be scratch space + // PLT resolver uses that scratch space at + // 'memcpy' symbol reolution time + add sp = -16, sp + br.call.sptk.many b0 = memcpy# + ;; + mov ar.pfs = loc0 + mov b0 = loc1 + mov gp = loc2 + br.ret.sptk.many b0 + +.Lst_hfa_float: + add r16 = 4, in1 + cmp.lt p6, p0 = 4, in3 + ;; + stfs [in1] = f8, 8 +(p6) stfs [r16] = f9, 8 + cmp.lt p7, p0 = 8, in3 + cmp.lt p8, p0 = 12, in3 + ;; +(p7) stfs [in1] = f10, 8 +(p8) stfs [r16] = f11, 8 + cmp.lt p9, p0 = 16, in3 + cmp.lt p10, p0 = 20, in3 + ;; +(p9) stfs [in1] = f12, 8 +(p10) stfs [r16] = f13, 8 + cmp.lt p6, p0 = 24, in3 + cmp.lt p7, p0 = 28, in3 + ;; +(p6) stfs [in1] = f14 +(p7) stfs [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_double: + add r16 = 8, in1 + cmp.lt p6, p0 = 8, in3 + ;; + stfd [in1] = f8, 16 +(p6) stfd [r16] = f9, 16 + cmp.lt p7, p0 = 16, in3 + cmp.lt p8, p0 = 24, in3 + ;; +(p7) stfd [in1] = f10, 16 +(p8) stfd [r16] = f11, 16 + cmp.lt p9, p0 = 32, in3 + cmp.lt p10, p0 = 40, in3 + ;; +(p9) stfd [in1] = f12, 16 +(p10) stfd [r16] = f13, 16 + cmp.lt p6, p0 = 48, in3 + cmp.lt p7, p0 = 56, in3 + ;; +(p6) stfd [in1] = f14 +(p7) stfd [r16] = f15 + br.ret.sptk.many b0 + ;; + +.Lst_hfa_ldouble: + add r16 = 16, in1 + cmp.lt p6, p0 = 16, in3 + ;; + stfe [in1] = f8, 32 +(p6) stfe [r16] = f9, 32 + cmp.lt p7, p0 = 32, in3 + cmp.lt p8, p0 = 48, in3 + ;; +(p7) stfe [in1] = f10, 32 +(p8) stfe [r16] = f11, 32 + cmp.lt p9, p0 = 64, in3 + cmp.lt p10, p0 = 80, in3 + ;; +(p9) stfe [in1] = f12, 32 +(p10) stfe [r16] = f13, 32 + cmp.lt p6, p0 = 96, in3 + cmp.lt p7, p0 = 112, in3 + ;; +(p6) stfe [in1] = f14 +(p7) stfe [r16] = f15 + br.ret.sptk.many b0 + ;; + + .endp ffi_call_unix + + .align 16 + .global ffi_closure_unix + .proc ffi_closure_unix + +#define FRAME_SIZE (8*16 + 8*8 + 8*16) + +ffi_closure_unix: + .prologue + .save ar.pfs, r40 // loc0 + alloc loc0 = ar.pfs, 8, 4, 4, 0 + .fframe FRAME_SIZE + add r12 = -FRAME_SIZE, r12 + .save rp, loc1 + mov loc1 = b0 + .save ar.unat, loc2 + mov loc2 = ar.unat + .body + + /* Retrieve closure pointer and real gp. */ +#ifdef _ILP32 + addp4 out0 = 0, gp + addp4 gp = 16, gp +#else + mov out0 = gp + add gp = 16, gp +#endif + ;; + ld8 gp = [gp] + + /* Spill all of the possible argument registers. */ + add r16 = 16 + 8*16, sp + add r17 = 16 + 8*16 + 16, sp + ;; + stf.spill [r16] = f8, 32 + stf.spill [r17] = f9, 32 + mov loc3 = gp + ;; + stf.spill [r16] = f10, 32 + stf.spill [r17] = f11, 32 + ;; + stf.spill [r16] = f12, 32 + stf.spill [r17] = f13, 32 + ;; + stf.spill [r16] = f14, 32 + stf.spill [r17] = f15, 24 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in0, 16 + .mem.offset 8, 0 + st8.spill [r17] = in1, 16 + add out1 = 16 + 8*16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in2, 16 + .mem.offset 8, 0 + st8.spill [r17] = in3, 16 + add out2 = 16, sp + ;; + .mem.offset 0, 0 + st8.spill [r16] = in4, 16 + .mem.offset 8, 0 + st8.spill [r17] = in5, 16 + mov out3 = r8 + ;; + .mem.offset 0, 0 + st8.spill [r16] = in6 + .mem.offset 8, 0 + st8.spill [r17] = in7 + + /* Invoke ffi_closure_unix_inner for the hard work. */ + br.call.sptk.many b0 = ffi_closure_unix_inner + ;; + + /* Dispatch to handle return value. */ + mov gp = loc3 + zxt1 r16 = r8 + ;; + addl r18 = @ltoffx(.Lld_table), gp + mov ar.pfs = loc0 + ;; + ld8.mov r18 = [r18], .Lld_table + mov b0 = loc1 + ;; + shladd r18 = r16, 3, r18 + mov ar.unat = loc2 + ;; + ld8 r17 = [r18] + shr r8 = r8, 8 + ;; + add r17 = r17, r18 + add r16 = 16, sp + ;; + mov b6 = r17 + br b6 + ;; + .label_state 1 + +.Lld_void: + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_int: + .body + .copy_state 1 + ld8 r8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_float: + .body + .copy_state 1 + ldfs f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_double: + .body + .copy_state 1 + ldfd f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; +.Lld_ldouble: + .body + .copy_state 1 + ldfe f8 = [r16] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_small_struct: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; + ld8 r8 = [r16], 16 +(p6) ld8 r9 = [r17], 16 + ;; +(p7) ld8 r10 = [r16] +(p8) ld8 r11 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_float: + .body + .copy_state 1 + add r17 = 4, r16 + cmp.lt p6, p0 = 4, r8 + ;; + ldfs f8 = [r16], 8 +(p6) ldfs f9 = [r17], 8 + cmp.lt p7, p0 = 8, r8 + cmp.lt p8, p0 = 12, r8 + ;; +(p7) ldfs f10 = [r16], 8 +(p8) ldfs f11 = [r17], 8 + cmp.lt p9, p0 = 16, r8 + cmp.lt p10, p0 = 20, r8 + ;; +(p9) ldfs f12 = [r16], 8 +(p10) ldfs f13 = [r17], 8 + cmp.lt p6, p0 = 24, r8 + cmp.lt p7, p0 = 28, r8 + ;; +(p6) ldfs f14 = [r16] +(p7) ldfs f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_double: + .body + .copy_state 1 + add r17 = 8, r16 + cmp.lt p6, p0 = 8, r8 + ;; + ldfd f8 = [r16], 16 +(p6) ldfd f9 = [r17], 16 + cmp.lt p7, p0 = 16, r8 + cmp.lt p8, p0 = 24, r8 + ;; +(p7) ldfd f10 = [r16], 16 +(p8) ldfd f11 = [r17], 16 + cmp.lt p9, p0 = 32, r8 + cmp.lt p10, p0 = 40, r8 + ;; +(p9) ldfd f12 = [r16], 16 +(p10) ldfd f13 = [r17], 16 + cmp.lt p6, p0 = 48, r8 + cmp.lt p7, p0 = 56, r8 + ;; +(p6) ldfd f14 = [r16] +(p7) ldfd f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + +.Lld_hfa_ldouble: + .body + .copy_state 1 + add r17 = 16, r16 + cmp.lt p6, p0 = 16, r8 + ;; + ldfe f8 = [r16], 32 +(p6) ldfe f9 = [r17], 32 + cmp.lt p7, p0 = 32, r8 + cmp.lt p8, p0 = 48, r8 + ;; +(p7) ldfe f10 = [r16], 32 +(p8) ldfe f11 = [r17], 32 + cmp.lt p9, p0 = 64, r8 + cmp.lt p10, p0 = 80, r8 + ;; +(p9) ldfe f12 = [r16], 32 +(p10) ldfe f13 = [r17], 32 + cmp.lt p6, p0 = 96, r8 + cmp.lt p7, p0 = 112, r8 + ;; +(p6) ldfe f14 = [r16] +(p7) ldfe f15 = [r17] + .restore sp + add sp = FRAME_SIZE, sp + br.ret.sptk.many b0 + ;; + + .endp ffi_closure_unix + + .section .rodata + .align 8 +.Lst_table: + data8 @pcrel(.Lst_void) // FFI_TYPE_VOID + data8 @pcrel(.Lst_sint32) // FFI_TYPE_INT + data8 @pcrel(.Lst_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lst_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lst_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lst_uint8) // FFI_TYPE_UINT8 + data8 @pcrel(.Lst_sint8) // FFI_TYPE_SINT8 + data8 @pcrel(.Lst_uint16) // FFI_TYPE_UINT16 + data8 @pcrel(.Lst_sint16) // FFI_TYPE_SINT16 + data8 @pcrel(.Lst_uint32) // FFI_TYPE_UINT32 + data8 @pcrel(.Lst_sint32) // FFI_TYPE_SINT32 + data8 @pcrel(.Lst_int64) // FFI_TYPE_UINT64 + data8 @pcrel(.Lst_int64) // FFI_TYPE_SINT64 + data8 @pcrel(.Lst_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lst_int64) // FFI_TYPE_POINTER + data8 @pcrel(.Lst_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lst_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lst_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lst_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lst_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +.Lld_table: + data8 @pcrel(.Lld_void) // FFI_TYPE_VOID + data8 @pcrel(.Lld_int) // FFI_TYPE_INT + data8 @pcrel(.Lld_float) // FFI_TYPE_FLOAT + data8 @pcrel(.Lld_double) // FFI_TYPE_DOUBLE + data8 @pcrel(.Lld_ldouble) // FFI_TYPE_LONGDOUBLE + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT8 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT16 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT32 + data8 @pcrel(.Lld_int) // FFI_TYPE_UINT64 + data8 @pcrel(.Lld_int) // FFI_TYPE_SINT64 + data8 @pcrel(.Lld_void) // FFI_TYPE_STRUCT + data8 @pcrel(.Lld_int) // FFI_TYPE_POINTER + data8 @pcrel(.Lld_void) // FFI_TYPE_COMPLEX (not implemented) + data8 @pcrel(.Lld_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT + data8 @pcrel(.Lld_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT + data8 @pcrel(.Lld_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE + data8 @pcrel(.Lld_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c new file mode 100644 index 0000000..114d3e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/java_raw_api.c @@ -0,0 +1,374 @@ +/* ----------------------------------------------------------------------- + java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc. + + Cloned from raw_api.c + + Raw_api.c author: Kresten Krab Thorup + Java_raw_api.c author: Hans-J. Boehm + + $Id $ + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This defines a Java- and 64-bit specific variant of the raw API. */ +/* It assumes that "raw" argument blocks look like Java stacks on a */ +/* 64-bit machine. Arguments that can be stored in a single stack */ +/* stack slots (longs, doubles) occupy 128 bits, but only the first */ +/* 64 bits are actually used. */ + +#include +#include +#include + +#if !defined(NO_JAVA_RAW_API) + +size_t +ffi_java_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { + switch((*at) -> type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + result += 2 * FFI_SIZEOF_JAVA_RAW; + break; + case FFI_TYPE_STRUCT: + /* No structure parameters in Java. */ + abort(); + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + result += FFI_SIZEOF_JAVA_RAW; + } + } + + return result; +} + + +void +ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + 3); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + 2); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void *)raw; + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + *args = raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if FFI_SIZEOF_JAVA_RAW == 8 + switch((*tp)->type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void*) raw; + raw += 2; + break; + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + *args = (void*) raw++; + } +#else /* FFI_SIZEOF_JAVA_RAW != 8 */ + *args = (void*) raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif /* FFI_SIZEOF_JAVA_RAW == 8 */ + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT8*) (*args); +#else + (raw++)->uint = *(UINT8*) (*args); +#endif + break; + + case FFI_TYPE_SINT8: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT8*) (*args); +#else + (raw++)->sint = *(SINT8*) (*args); +#endif + break; + + case FFI_TYPE_UINT16: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT16*) (*args); +#else + (raw++)->uint = *(UINT16*) (*args); +#endif + break; + + case FFI_TYPE_SINT16: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT16*) (*args); +#else + (raw++)->sint = *(SINT16*) (*args); +#endif + break; + + case FFI_TYPE_UINT32: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT32*) (*args); +#else + (raw++)->uint = *(UINT32*) (*args); +#endif + break; + + case FFI_TYPE_SINT32: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT32*) (*args); +#else + (raw++)->sint = *(SINT32*) (*args); +#endif + break; + + case FFI_TYPE_FLOAT: + (raw++)->flt = *(FLOAT32*) (*args); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + raw->uint = *(UINT64*) (*args); + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: +#if FFI_SIZEOF_JAVA_RAW == 8 + FFI_ASSERT(0); /* Should have covered all cases */ +#else + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif + } + } +} + +#if !FFI_NATIVE_RAW_API + +static void +ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: +#if FFI_SIZEOF_JAVA_RAW == 4 + case FFI_TYPE_POINTER: +#endif + *(SINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +static void +ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: + *(SINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, + ffi_java_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_java_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); + ffi_java_rvalue_to_raw (cif, rvalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_java_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_java_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data); + ffi_java_raw_to_rvalue (cif, rvalue); +} + +ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_java_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) +{ + return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ +#endif /* !NO_JAVA_RAW_API */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/asm.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/asm.h new file mode 100644 index 0000000..4edba41 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/asm.h @@ -0,0 +1,5 @@ +/* args are passed on registers from r0 up to r11 => 12*8 bytes */ +#define REG_ARGS_SIZE (12*8) +#define KVX_REGISTER_SIZE (8) +#define KVX_ABI_SLOT_SIZE (KVX_REGISTER_SIZE) +#define KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE (4*KVX_ABI_SLOT_SIZE) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffi.c new file mode 100644 index 0000000..58f6aef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffi.c @@ -0,0 +1,273 @@ +/* Copyright (c) 2020 Kalray + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__kvx__) +#include +#include +#include +#include +#include +#include "ffi_common.h" +#include "asm.h" + +#define ALIGN(x, a) ALIGN_MASK(x, (typeof(x))(a) - 1) +#define ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define KVX_ABI_STACK_ALIGNMENT (32) +#define KVX_ABI_STACK_ARG_ALIGNMENT (8) +#define max(a,b) ((a) > (b) ? (a) : (b)) + +#ifdef FFI_DEBUG +#define DEBUG_PRINT(...) do{ fprintf( stderr, __VA_ARGS__ ); } while(0) +#else +#define DEBUG_PRINT(...) +#endif + +struct ret_value { + unsigned long int r0; + unsigned long int r1; + unsigned long int r2; + unsigned long int r3; +}; + +extern struct ret_value ffi_call_SYSV(unsigned total_size, + unsigned size, + extended_cif *ecif, + unsigned *rvalue_addr, + void *fn, + unsigned int_ext_method); + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->flags = cif->rtype->size; + return FFI_OK; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, unsigned int arg_slots_size, extended_cif *ecif) +{ + char *stacktemp = stack; + char *current_arg_passed_by_value = stack + arg_slots_size; + int i, s; + ffi_type **arg; + int count = 0; + ffi_cif *cif = ecif->cif; + void **argv = ecif->avalue; + + arg = cif->arg_types; + + DEBUG_PRINT("stack: %p\n", stack); + DEBUG_PRINT("arg_slots_size: %u\n", arg_slots_size); + DEBUG_PRINT("current_arg_passed_by_value: %p\n", current_arg_passed_by_value); + DEBUG_PRINT("ecif: %p\n", ecif); + DEBUG_PRINT("ecif->avalue: %p\n", ecif->avalue); + + for (i = 0; i < cif->nargs; i++) { + + s = KVX_ABI_SLOT_SIZE; + switch((*arg)->type) { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + DEBUG_PRINT("INT64/32/16/8/FLOAT/DOUBLE or POINTER @%p\n", stack); + *(uint64_t *) stack = *(uint64_t *)(* argv); + break; + + case FFI_TYPE_COMPLEX: + if ((*arg)->size == 8) + *(_Complex float *) stack = *(_Complex float *)(* argv); + else if ((*arg)->size == 16) { + *(_Complex double *) stack = *(_Complex double *)(* argv); + s = 16; + } else + abort(); + break; + case FFI_TYPE_STRUCT: { + char *value; + unsigned int written_size = 0; + DEBUG_PRINT("struct by value @%p\n", stack); + if ((*arg)->size > KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE) { + DEBUG_PRINT("big struct\n"); + *(uint64_t *) stack = (uintptr_t)current_arg_passed_by_value; + value = current_arg_passed_by_value; + current_arg_passed_by_value += (*arg)->size; + written_size = KVX_ABI_SLOT_SIZE; + } else { + value = stack; + written_size = (*arg)->size; + } + memcpy(value, *argv, (*arg)->size); + s = ALIGN(written_size, KVX_ABI_STACK_ARG_ALIGNMENT); + break; + } + default: + printf("Error: unsupported arg type %d\n", (*arg)->type); + abort(); + break; + + } + stack += s; + count += s; + argv++; + arg++; + } +#ifdef FFI_DEBUG + FFI_ASSERT(((intptr_t)(stacktemp + REG_ARGS_SIZE) & (KVX_ABI_STACK_ALIGNMENT-1)) == 0); +#endif + return stacktemp + REG_ARGS_SIZE; +} + +/* Perform machine dependent cif processing when we have a variadic function */ + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, + unsigned int ntotalargs) +{ + cif->flags = cif->rtype->size; + return FFI_OK; +} + +static unsigned long handle_small_int_ext(kvx_intext_method *int_ext_method, + const ffi_type *rtype) +{ + switch (rtype->type) { + case FFI_TYPE_SINT8: + *int_ext_method = KVX_RET_SXBD; + return KVX_REGISTER_SIZE; + + case FFI_TYPE_SINT16: + *int_ext_method = KVX_RET_SXHD; + return KVX_REGISTER_SIZE; + + case FFI_TYPE_SINT32: + *int_ext_method = KVX_RET_SXWD; + return KVX_REGISTER_SIZE; + + case FFI_TYPE_UINT8: + *int_ext_method = KVX_RET_ZXBD; + return KVX_REGISTER_SIZE; + + case FFI_TYPE_UINT16: + *int_ext_method = KVX_RET_ZXHD; + return KVX_REGISTER_SIZE; + + case FFI_TYPE_UINT32: + *int_ext_method = KVX_RET_ZXWD; + return KVX_REGISTER_SIZE; + + default: + *int_ext_method = KVX_RET_NONE; + return rtype->size; + } +} + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + int i; + unsigned long int slot_fitting_args_size = 0; + unsigned long int total_size = 0; + unsigned long int big_struct_size = 0; + kvx_intext_method int_extension_method; + ffi_type **arg; + struct ret_value local_rvalue = {0}; + size_t wb_size; + + + /* Calculate size to allocate on stack */ + for (i = 0, arg = cif->arg_types; i < cif->nargs; i++, arg++) { + DEBUG_PRINT("argument %d, type %d, size %lu\n", i, (*arg)->type, (*arg)->size); + if (((*arg)->type == FFI_TYPE_STRUCT) || ((*arg)->type == FFI_TYPE_COMPLEX)) { + if ((*arg)->size <= KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE) { + slot_fitting_args_size += ALIGN((*arg)->size, KVX_ABI_SLOT_SIZE); + } else { + slot_fitting_args_size += KVX_ABI_SLOT_SIZE; /* aggregate passed by reference */ + big_struct_size += ALIGN((*arg)->size, KVX_ABI_SLOT_SIZE); + } + } else if ((*arg)->size <= KVX_ABI_SLOT_SIZE) { + slot_fitting_args_size += KVX_ABI_SLOT_SIZE; + } else { + printf("Error: unsupported arg size %ld arg type %d\n", (*arg)->size, (*arg)->type); + abort(); /* should never happen? */ + } + } + + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + /* This implementation allocates anyway for all register based args */ + slot_fitting_args_size = max(slot_fitting_args_size, REG_ARGS_SIZE); + total_size = slot_fitting_args_size + big_struct_size; + total_size = ALIGN(total_size, KVX_ABI_STACK_ALIGNMENT); + + /* wb_size: write back size, the size we will need to write back to user + * provided buffer. In theory it should always be cif->flags which is + * cif->rtype->size. But libffi API mandates that for integral types + * of size <= system register size, then we *MUST* write back + * the size of system register size. + * in our case, if size <= 8 bytes we must write back 8 bytes. + * floats, complex and structs are not affected, only integrals. + */ + wb_size = handle_small_int_ext(&int_extension_method, cif->rtype); + + switch (cif->abi) { + case FFI_SYSV: + DEBUG_PRINT("total_size: %lu\n", total_size); + DEBUG_PRINT("slot fitting args size: %lu\n", slot_fitting_args_size); + DEBUG_PRINT("rvalue: %p\n", rvalue); + DEBUG_PRINT("fn: %p\n", fn); + DEBUG_PRINT("rsize: %u\n", cif->flags); + DEBUG_PRINT("wb_size: %u\n", wb_size); + DEBUG_PRINT("int_extension_method: %u\n", int_extension_method); + local_rvalue = ffi_call_SYSV(total_size, slot_fitting_args_size, + &ecif, rvalue, fn, int_extension_method); + if ((cif->flags <= KVX_ABI_MAX_AGGREGATE_IN_REG_SIZE) + && (cif->rtype->type != FFI_TYPE_VOID)) + memcpy(rvalue, &local_rvalue, wb_size); + break; + default: + abort(); + break; + } +} + +/* Closures not supported yet */ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + return FFI_BAD_ABI; +} + +#endif /* (__kvx__) */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffitarget.h new file mode 100644 index 0000000..8df8735 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/ffitarget.h @@ -0,0 +1,75 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2020 Kalray + + KVX Target configuration macros + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +/* Those values are set depending on return type + * they are used in the assembly code in sysv.S + */ +typedef enum kvx_intext_method { + KVX_RET_NONE = 0, + KVX_RET_SXBD = 1, + KVX_RET_SXHD = 2, + KVX_RET_SXWD = 3, + KVX_RET_ZXBD = 4, + KVX_RET_ZXHD = 5, + KVX_RET_ZXWD = 6 +} kvx_intext_method; + +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +/* This is only to allow Python to compile + * but closures are not supported yet + */ +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 0 + +#define FFI_NATIVE_RAW_API 0 +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_TARGET_HAS_COMPLEX_TYPE + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/sysv.S new file mode 100644 index 0000000..952afc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/kvx/sysv.S @@ -0,0 +1,127 @@ +/* Copyright (c) 2020 Kalray + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#if defined(__kvx__) +#define LIBFFI_ASM +#include +#include +#include +#include + +.text +.global ffi_call_SYSV +.type ffi_call_SYSV, @function +.type ffi_prep_args, @function +.align 8 + +/* ffi_call_SYSV + + r0: total size to allocate on stack + r1: size of arg slots + r2: extended cif structure, DO NOT REMOVE: it is used by ffi_prep_args() + r3: return value address + r4: function to call + r5: integer sign extension method to be used +*/ +ffi_call_SYSV: + addd $r12 = $r12, -64 + so (-32)[$r12] = $r20r21r22r23 + ;; + sd (0)[$r12] = $r24 + ;; + get $r23 = $ra + copyd $r20 = $r12 + sbfd $r12 = $r0, $r12 + ;; + copyd $r0 = $r12 + copyd $r21 = $r3 + copyd $r22 = $r4 + copyd $r24 = $r5 + call ffi_prep_args + ;; + lo $r8r9r10r11 = (64)[$r12] + ;; + lo $r4r5r6r7 = (32)[$r12] + ;; + lo $r0r1r2r3 = (0)[$r12] + copyd $r12 = $r0 + /* $r15 is the register used by the ABI to return big (>32 bytes) + * structs by value. + * It is also referred to as the "struct register" in the ABI. + */ + copyd $r15 = $r21 + icall $r22 + ;; + pcrel $r4 = @pcrel(.Ltable) + cb.deqz $r24 ? .Lend + ;; + addx8d $r24 = $r24, $r4 + ;; + igoto $r24 + ;; +.Ltable: +0: /* we should never arrive here */ + goto .Lerror + nop + ;; +1: /* Sign extend byte to double */ + sxbd $r0 = $r0 + goto .Lend + ;; +2: /* Sign extend half to double */ + sxhd $r0 = $r0 + goto .Lend + ;; +3: /* Sign extend word to double */ + sxwd $r0 = $r0 + goto .Lend + ;; +4: /* Zero extend byte to double */ + zxbd $r0 = $r0 + goto .Lend + ;; +5: /* Zero extend half to double */ + zxhd $r0 = $r0 + goto .Lend + ;; +6: /* Zero extend word to double */ + zxwd $r0 = $r0 + /* Fallthrough to .Lend */ + ;; +.Lend: + ld $r24 = (0)[$r12] + ;; + set $ra = $r23 + lo $r20r21r22r23 = (32)[$r20] + addd $r12 = $r20, 64 + ;; + ret + ;; +.Lerror: + errop + ;; + +#endif /* __kvx__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffi.c new file mode 100644 index 0000000..ab8fc4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffi.c @@ -0,0 +1,232 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2004 Renesas Technology + Copyright (c) 2008 Red Hat, Inc. + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack + space has been allocated for the function's arguments. */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + unsigned int i; + int tmp; + unsigned int avn; + void **p_argv; + char *argp; + ffi_type **p_arg; + + tmp = 0; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 8) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0) && (avn != 0); + i--, p_arg++) + { + size_t z; + + /* Align if necessary. */ + if (((*p_arg)->alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, (*p_arg)->alignment); + + if (avn != 0) + { + avn--; + z = (*p_arg)->size; + if (z < sizeof (int)) + { + z = sizeof (int); + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + z = (*p_arg)->size; + if ((*p_arg)->alignment != 1) + memcpy (argp, *p_argv, z); + else + memcpy (argp + 4 - z, *p_argv, z); + z = sizeof (int); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + if (z > 8) + { + *(unsigned int *) argp = (unsigned int)(void *)(* p_argv); + z = sizeof(void *); + } + else + { + memcpy(argp, *p_argv, z); + z = 8; + } + } + else + { + /* Double or long long 64bit. */ + memcpy (argp, *p_argv, z); + } + } + p_argv++; + argp += z; + } + } + + return; +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag. */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_DOUBLE; + + else + cif->flags = (unsigned) cif->rtype->type; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags = FFI_TYPE_DOUBLE; + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have + a return value address then we need to make one. */ + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + int size = cif->rtype->size; + int align = cif->rtype->alignment; + + if (size < 4) + { + if (align == 1) + *(unsigned long *)(ecif.rvalue) <<= (4 - size) * 8; + } + else if (4 < size && size < 8) + { + if (align == 1) + { + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + else if (align == 2) + { + if (size & 1) + size += 1; + + if (size != 8) + memcpy (ecif.rvalue, ecif.rvalue + 8-size, size); + } + } + } + break; + + default: + FFI_ASSERT(0); + break; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffitarget.h new file mode 100644 index 0000000..6c34801 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2004 Renesas Technology. + Target configuration macros for M32R. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +#define FFI_CLOSURES 0 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/sysv.S new file mode 100644 index 0000000..06b75c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m32r/sysv.S @@ -0,0 +1,121 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2004 Renesas Technology + + M32R Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x)! .type CNAME(x),%function! CNAME(x): +#endif + +.text + + /* R0: ffi_prep_args */ + /* R1: &ecif */ + /* R2: cif->bytes */ + /* R3: fig->flags */ + /* sp+0: ecif.rvalue */ + /* sp+4: fn */ + + /* This assumes we are using gas. */ +ENTRY(ffi_call_SYSV) + /* Save registers. */ + push fp + push lr + push r3 + push r2 + push r1 + push r0 + mv fp, sp + + /* Make room for all of the new args. */ + sub sp, r2 + + /* Place all of the ffi_prep_args in position. */ + mv lr, r0 + mv r0, sp + /* R1 already set. */ + + /* And call. */ + jl lr + + /* Move first 4 parameters in registers... */ + ld r0, @(0,sp) + ld r1, @(4,sp) + ld r2, @(8,sp) + ld r3, @(12,sp) + + /* ...and adjust the stack. */ + ld lr, @(8,fp) + cmpi lr, #16 + bc adjust_stack + ldi lr, #16 +adjust_stack: + add sp, lr + + /* Call the function. */ + ld lr, @(28,fp) + jl lr + + /* Remove the space we pushed for the args. */ + mv sp, fp + + /* Load R2 with the pointer to storage for the return value. */ + ld r2, @(24,sp) + + /* Load R3 with the return type code. */ + ld r3, @(12,sp) + + /* If the return value pointer is NULL, assume no return value. */ + beqz r2, epilogue + + /* Return INT. */ + ldi r4, #FFI_TYPE_INT + bne r3, r4, return_double + st r0, @r2 + bra epilogue + +return_double: + /* Return DOUBLE or LONGDOUBLE. */ + ldi r4, #FFI_TYPE_DOUBLE + bne r3, r4, epilogue + st r0, @r2 + st r1, @(4,r2) + +epilogue: + pop r0 + pop r1 + pop r2 + pop r3 + pop lr + pop fp + jmp lr + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffi.c new file mode 100644 index 0000000..0330184 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffi.c @@ -0,0 +1,362 @@ +/* ----------------------------------------------------------------------- + ffi.c + + m68k Foreign Function Interface + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#ifdef __rtems__ +void rtems_cache_flush_multiple_data_lines( const void *, size_t ); +#else +#include +#ifdef __MINT__ +#include +#include +#else +#include +#endif +#endif + +void ffi_call_SYSV (extended_cif *, + unsigned, unsigned, + void *, void (*fn) ()); +void *ffi_prep_args (void *stack, extended_cif *ecif); +void ffi_closure_SYSV (ffi_closure *); +void ffi_closure_struct_SYSV (ffi_closure *); +unsigned int ffi_closure_SYSV_inner (ffi_closure *closure, + void *resp, void *args); + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if ( +#ifdef __MINT__ + (ecif->cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((ecif->cif->rtype->type == FFI_TYPE_STRUCT) + && !ecif->cif->flags))) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z = (*p_arg)->size; + int type = (*p_arg)->type; + + if (z < sizeof (int)) + { + switch (type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: +#ifdef __MINT__ + if (z == 1 || z == 2) + memcpy (argp + 2, *p_argv, z); + else + memcpy (argp, *p_argv, z); +#else + memcpy (argp + sizeof (int) - z, *p_argv, z); +#endif + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +#define CIF_FLAGS_INT 1 +#define CIF_FLAGS_DINT 2 +#define CIF_FLAGS_FLOAT 4 +#define CIF_FLAGS_DOUBLE 8 +#define CIF_FLAGS_LDOUBLE 16 +#define CIF_FLAGS_POINTER 32 +#define CIF_FLAGS_STRUCT1 64 +#define CIF_FLAGS_STRUCT2 128 +#define CIF_FLAGS_SINT8 256 +#define CIF_FLAGS_SINT16 512 + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + switch (cif->rtype->size) + { + case 1: +#ifdef __MINT__ + cif->flags = CIF_FLAGS_STRUCT2; +#else + cif->flags = CIF_FLAGS_STRUCT1; +#endif + break; + case 2: + cif->flags = CIF_FLAGS_STRUCT2; + break; +#ifdef __MINT__ + case 3: +#endif + case 4: + cif->flags = CIF_FLAGS_INT; + break; +#ifdef __MINT__ + case 7: +#endif + case 8: + cif->flags = CIF_FLAGS_DINT; + break; + default: + cif->flags = 0; + break; + } + break; + + case FFI_TYPE_FLOAT: + cif->flags = CIF_FLAGS_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = CIF_FLAGS_DOUBLE; + break; + +#if (FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE) + case FFI_TYPE_LONGDOUBLE: +#ifdef __MINT__ + cif->flags = 0; +#else + cif->flags = CIF_FLAGS_LDOUBLE; +#endif + break; +#endif + + case FFI_TYPE_POINTER: + cif->flags = CIF_FLAGS_POINTER; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + case FFI_TYPE_SINT16: + cif->flags = CIF_FLAGS_SINT16; + break; + + case FFI_TYPE_SINT8: + cif->flags = CIF_FLAGS_SINT8; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV (&ecif, cif->bytes, cif->flags, + ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +static void +ffi_prep_incoming_args_SYSV (char *stack, void **avalue, ffi_cif *cif) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + + argp = stack; + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; +#ifdef __MINT__ + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 1 || z == 2)) + { + *p_argv = (void *) (argp + 2); + + z = 4; + } + else + if (cif->flags && + cif->rtype->type == FFI_TYPE_STRUCT && + (z == 3 || z == 4)) + { + *p_argv = (void *) (argp); + + z = 4; + } + else +#endif + if (z <= 4) + { + *p_argv = (void *) (argp + 4 - z); + + z = 4; + } + else + { + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } +} + +unsigned int +ffi_closure_SYSV_inner (ffi_closure *closure, void *resp, void *args) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_incoming_args_SYSV(args, arg_area, cif); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + *(unsigned short *)closure->tramp = 0x207c; + *(void **)(closure->tramp + 2) = codeloc; + *(unsigned short *)(closure->tramp + 6) = 0x4ef9; + + if ( +#ifdef __MINT__ + (cif->rtype->type == FFI_TYPE_LONGDOUBLE) || +#endif + (((cif->rtype->type == FFI_TYPE_STRUCT) + && !cif->flags))) + *(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV; + else + *(void **)(closure->tramp + 8) = ffi_closure_SYSV; + +#ifdef __rtems__ + rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE ); +#elif defined(__MINT__) + Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE); +#else + syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE, + FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffitarget.h new file mode 100644 index 0000000..e81dde2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for Motorola 68K. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/sysv.S new file mode 100644 index 0000000..ea40f11 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m68k/sysv.S @@ -0,0 +1,357 @@ +/* ----------------------------------------------------------------------- + + sysv.S - Copyright (c) 2012 Alan Hourihane + Copyright (c) 1998, 2012 Andreas Schwab + Copyright (c) 2008 Red Hat, Inc. + Copyright (c) 2012, 2016 Thorsten Glaser + + m68k Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef HAVE_AS_CFI_PSEUDO_OP +#define CFI_STARTPROC() .cfi_startproc +#define CFI_OFFSET(reg,off) .cfi_offset reg,off +#define CFI_DEF_CFA(reg,off) .cfi_def_cfa reg,off +#define CFI_ENDPROC() .cfi_endproc +#else +#define CFI_STARTPROC() +#define CFI_OFFSET(reg,off) +#define CFI_DEF_CFA(reg,off) +#define CFI_ENDPROC() +#endif + +#ifdef __MINT__ +#define CALLFUNC(funcname) _ ## funcname +#else +#define CALLFUNC(funcname) funcname +#endif + + .text + + .globl CALLFUNC(ffi_call_SYSV) + .type CALLFUNC(ffi_call_SYSV),@function + .align 4 + +CALLFUNC(ffi_call_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %d2,-(%sp) + CFI_OFFSET(2,-12) + + | Make room for all of the new args. + sub.l 12(%fp),%sp + + | Call ffi_prep_args + move.l 8(%fp),-(%sp) + pea 4(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_prep_args) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_prep_args@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_prep_args@PLTPC) +#endif + addq.l #8,%sp + + | Pass pointer to struct value, if any +#ifdef __MINT__ + move.l %d0,%a1 +#else + move.l %a0,%a1 +#endif + + | Call the function + move.l 24(%fp),%a0 + jsr (%a0) + + | Remove the space we pushed for the args + add.l 12(%fp),%sp + + | Load the pointer to storage for the return value + move.l 20(%fp),%a1 + + | Load the return type code + move.l 16(%fp),%d2 + + | If the return value pointer is NULL, assume no return value. + | NOTE: On the mc68000, tst on an address register is not supported. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + cmp.w #0, %a1 +#else + tst.l %a1 +#endif + jbeq noretval + + btst #0,%d2 + jbeq retlongint + move.l %d0,(%a1) + jbra epilogue + +retlongint: + btst #1,%d2 + jbeq retfloat + move.l %d0,(%a1) + move.l %d1,4(%a1) + jbra epilogue + +retfloat: + btst #2,%d2 + jbeq retdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s %fp0,(%a1) +#else + move.l %d0,(%a1) +#endif + jbra epilogue + +retdouble: + btst #3,%d2 + jbeq retlongdouble +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1) +#endif + jbra epilogue + +retlongdouble: + btst #4,%d2 + jbeq retpointer +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x %fp0,(%a1) +#else + move.l %d0,(%a1)+ + move.l %d1,(%a1)+ + move.l %d2,(%a1) +#endif + jbra epilogue + +retpointer: + btst #5,%d2 + jbeq retstruct1 +#ifdef __MINT__ + move.l %d0,(%a1) +#else + move.l %a0,(%a1) +#endif + jbra epilogue + +retstruct1: + btst #6,%d2 + jbeq retstruct2 + move.b %d0,(%a1) + jbra epilogue + +retstruct2: + btst #7,%d2 + jbeq retsint8 + move.w %d0,(%a1) + jbra epilogue + +retsint8: + btst #8,%d2 + jbeq retsint16 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + move.l %d0,(%a1) + jbra epilogue + +retsint16: + btst #9,%d2 + jbeq noretval + ext.l %d0 + move.l %d0,(%a1) + +noretval: +epilogue: + move.l (%sp)+,%d2 + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_call_SYSV),.-CALLFUNC(ffi_call_SYSV) + + .globl CALLFUNC(ffi_closure_SYSV) + .type CALLFUNC(ffi_closure_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_SYSV): + CFI_STARTPROC() + link %fp,#-12 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + pea -12(%fp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + + lsr.l #1,%d0 + jne 1f + jcc .Lcls_epilogue + | CIF_FLAGS_INT + move.l -12(%fp),%d0 +.Lcls_epilogue: + | no CIF_FLAGS_* + unlk %fp + rts +1: + lea -12(%fp),%a0 + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_float + | CIF_FLAGS_DINT + move.l (%a0)+,%d0 + move.l (%a0),%d1 + jra .Lcls_epilogue +.Lcls_ret_float: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.s (%a0),%fp0 +#else + move.l (%a0),%d0 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_ldouble + | CIF_FLAGS_DOUBLE +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.d (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0),%d1 +#endif + jra .Lcls_epilogue +.Lcls_ret_ldouble: +#if defined(__MC68881__) || defined(__HAVE_68881__) + fmove.x (%a0),%fp0 +#else + move.l (%a0)+,%d0 + move.l (%a0)+,%d1 + move.l (%a0),%d2 +#endif + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_struct1 + | CIF_FLAGS_POINTER + move.l (%a0),%a0 + move.l %a0,%d0 + jra .Lcls_epilogue +.Lcls_ret_struct1: + move.b (%a0),%d0 + jra .Lcls_epilogue +1: + lsr.l #2,%d0 + jne 1f + jcs .Lcls_ret_sint8 + | CIF_FLAGS_STRUCT2 + move.w (%a0),%d0 + jra .Lcls_epilogue +.Lcls_ret_sint8: + move.l (%a0),%d0 + | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32. +#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__) + ext.w %d0 + ext.l %d0 +#else + extb.l %d0 +#endif + jra .Lcls_epilogue +1: + | CIF_FLAGS_SINT16 + move.l (%a0),%d0 + ext.l %d0 + jra .Lcls_epilogue + CFI_ENDPROC() + + .size CALLFUNC(ffi_closure_SYSV),.-CALLFUNC(ffi_closure_SYSV) + + .globl CALLFUNC(ffi_closure_struct_SYSV) + .type CALLFUNC(ffi_closure_struct_SYSV), @function + .align 4 + +CALLFUNC(ffi_closure_struct_SYSV): + CFI_STARTPROC() + link %fp,#0 + CFI_OFFSET(14,-8) + CFI_DEF_CFA(14,8) + move.l %sp,-12(%fp) + pea 8(%fp) + move.l %a1,-(%sp) + move.l %a0,-(%sp) +#if !defined __PIC__ + jsr CALLFUNC(ffi_closure_SYSV_inner) +#elif defined(__uClinux__) && defined(__ID_SHARED_LIBRARY__) + move.l _current_shared_library_a5_offset_(%a5),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#elif defined(__mcoldfire__) && !defined(__mcfisab__) && !defined(__mcfisac__) + move.l #_GLOBAL_OFFSET_TABLE_@GOTPC,%a0 + lea (-6,%pc,%a0),%a0 + move.l CALLFUNC(ffi_closure_SYSV_inner@GOT)(%a0),%a0 + jsr (%a0) +#else + bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC) +#endif + unlk %fp + rts + CFI_ENDPROC() + .size CALLFUNC(ffi_closure_struct_SYSV),.-CALLFUNC(ffi_closure_struct_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffi.c new file mode 100644 index 0000000..57b344f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffi.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + * + * Only OpenBSD/m88k is currently supported; other platforms (such as + * Motorola's SysV/m88k) could be supported with the following tweaks: + * + * - non-OpenBSD systems use an `outgoing parameter area' as part of the + * 88BCS calling convention, which is not supported under OpenBSD from + * release 3.6 onwards. Supporting it should be as easy as taking it + * into account when adjusting the stack, in the assembly code. + * + * - the logic deciding whether a function argument gets passed through + * registers, or on the stack, has changed several times in OpenBSD in + * edge cases (especially for structs larger than 32 bytes being passed + * by value). The code below attemps to match the logic used by the + * system compiler of OpenBSD 5.3, i.e. gcc 3.3.6 with many m88k backend + * fixes. + */ + +#include +#include + +#include +#include + +void ffi_call_OBSD (unsigned int, extended_cif *, unsigned int, void *, + void (*fn) ()); +void *ffi_prep_args (void *, extended_cif *); +void ffi_closure_OBSD (ffi_closure *); +void ffi_closure_struct_OBSD (ffi_closure *); +unsigned int ffi_closure_OBSD_inner (ffi_closure *, void *, unsigned int *, + char *); +void ffi_cacheflush_OBSD (unsigned int, unsigned int); + +#define CIF_FLAGS_INT (1 << 0) +#define CIF_FLAGS_DINT (1 << 1) + +/* + * Foreign Function Interface API + */ + +/* ffi_prep_args is called by the assembly routine once stack space has + been allocated for the function's arguments. */ + +void * +ffi_prep_args (void *stack, extended_cif *ecif) +{ + unsigned int i; + void **p_argv; + char *argp, *stackp; + unsigned int *regp; + unsigned int regused; + ffi_type **p_arg; + void *struct_value_ptr; + + regp = (unsigned int *)stack; + stackp = (char *)(regp + 8); + regused = 0; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument can be passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + switch (t) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *(unsigned int *) argp = *(unsigned int *) *p_argv; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + + /* Align if necessary. */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are filled, and about to continue + on stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } + + return struct_value_ptr; +} + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->size == sizeof (int) && + cif->rtype->alignment == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = 0; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = CIF_FLAGS_DINT; + break; + + default: + cif->flags = CIF_FLAGS_INT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && (cif->rtype->size != sizeof (int) + || cif->rtype->alignment != sizeof (int))) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_OBSD: + ffi_call_OBSD (cif->bytes, &ecif, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +static void +ffi_prep_closure_args_OBSD (ffi_cif *cif, void **avalue, unsigned int *regp, + char *stackp) +{ + unsigned int i; + void **p_argv; + char *argp; + unsigned int regused; + ffi_type **p_arg; + + regused = 0; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + unsigned short t, a; + + z = (*p_arg)->size; + t = (*p_arg)->type; + a = (*p_arg)->alignment; + + /* + * Figure out whether the argument has been passed through registers + * or on the stack. + * The rule is that registers can only receive simple types not larger + * than 64 bits, or structs the exact size of a register and aligned to + * the size of a register. + */ + if (t == FFI_TYPE_STRUCT) + { + if (z == sizeof (int) && a == sizeof (int) && regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + else + { + if (z > sizeof (int) && regused < 8 - 1) + { + /* align to an even register pair */ + if (regused & 1) + { + regp++; + regused++; + } + } + if (regused < 8) + argp = (char *)regp; + else + argp = stackp; + } + + /* Enforce proper stack alignment of 64-bit types */ + if (argp == stackp && a > sizeof (int)) + { + stackp = (char *) FFI_ALIGN(stackp, a); + argp = stackp; + } + + if (z < sizeof (int) && t != FFI_TYPE_STRUCT) + *p_argv = (void *) (argp + sizeof (int) - z); + else + *p_argv = (void *) argp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + + /* Be careful, once all registers are exhausted, and about to fetch from + stack, regp == stackp. Therefore the check for regused as well. */ + if (argp == (char *)regp && regused < 8) + { + regp += z / sizeof (int); + regused += z / sizeof (int); + } + else + stackp += z; + } +} + +unsigned int +ffi_closure_OBSD_inner (ffi_closure *closure, void *resp, unsigned int *regp, + char *stackp) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_args_OBSD(cif, arg_area, regp, stackp); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, void *codeloc) +{ + unsigned int *tramp = (unsigned int *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_OBSD); + + if (cif->rtype->type == FFI_TYPE_STRUCT && !cif->flags) + fn = &ffi_closure_struct_OBSD; + else + fn = &ffi_closure_OBSD; + + /* or.u %r10, %r0, %hi16(fn) */ + tramp[0] = 0x5d400000 | (((unsigned int)fn) >> 16); + /* or.u %r13, %r0, %hi16(closure) */ + tramp[1] = 0x5da00000 | ((unsigned int)closure >> 16); + /* or %r10, %r10, %lo16(fn) */ + tramp[2] = 0x594a0000 | (((unsigned int)fn) & 0xffff); + /* jmp.n %r10 */ + tramp[3] = 0xf400c40a; + /* or %r13, %r13, %lo16(closure) */ + tramp[4] = 0x59ad0000 | ((unsigned int)closure & 0xffff); + + ffi_cacheflush_OBSD((unsigned int)codeloc, FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffitarget.h new file mode 100644 index 0000000..e52bf9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_OBSD, + FFI_DEFAULT_ABI = FFI_OBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 0x14 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/obsd.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/obsd.S new file mode 100644 index 0000000..1944a23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/m88k/obsd.S @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * m88k Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * ffi_cacheflush_OBSD(unsigned int addr, %r2 + * unsigned int size); %r3 + */ + .align 4 + .globl ffi_cacheflush_OBSD + .type ffi_cacheflush_OBSD,@function +ffi_cacheflush_OBSD: + tb0 0, %r0, 451 + or %r0, %r0, %r0 + jmp %r1 + .size ffi_cacheflush_OBSD, . - ffi_cacheflush_OBSD + +/* + * ffi_call_OBSD(unsigned bytes, %r2 + * extended_cif *ecif, %r3 + * unsigned flags, %r4 + * void *rvalue, %r5 + * void (*fn)()); %r6 + */ + .align 4 + .globl ffi_call_OBSD + .type ffi_call_OBSD,@function +ffi_call_OBSD: + subu %r31, %r31, 32 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 32 + + | Save the few arguments we'll need after ffi_prep_args() + st.d %r4, %r31, 8 + st %r6, %r31, 16 + + | Allocate room for the image of r2-r9, and the stack space for + | the args (rounded to a 16-byte boundary) + addu %r2, %r2, (8 * 4) + 15 + clr %r2, %r2, 4<0> + subu %r31, %r31, %r2 + + | Fill register and stack image + or %r2, %r31, %r0 +#ifdef PIC + bsr ffi_prep_args#plt +#else + bsr ffi_prep_args +#endif + + | Save pointer to return struct address, if any + or %r12, %r2, %r0 + + | Get function pointer + subu %r4, %r30, 32 + ld %r1, %r4, 16 + + | Fetch the register arguments + ld.d %r2, %r31, (0 * 4) + ld.d %r4, %r31, (2 * 4) + ld.d %r6, %r31, (4 * 4) + ld.d %r8, %r31, (6 * 4) + addu %r31, %r31, (8 * 4) + + | Invoke the function + jsr %r1 + + | Restore stack now that we don't need the args anymore + subu %r31, %r30, 32 + + | Figure out what to return as the function's return value + ld %r5, %r31, 12 | rvalue + ld %r4, %r31, 8 | flags + + bcnd eq0, %r5, 9f + + bb0 0, %r4, 1f | CIF_FLAGS_INT + st %r2, %r5, 0 + br 9f + +1: + bb0 1, %r4, 1f | CIF_FLAGS_DINT + st.d %r2, %r5, 0 + br 9f + +1: +9: + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 32 + .size ffi_call_OBSD, . - ffi_call_OBSD + +/* + * ffi_closure_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_OBSD + .type ffi_closure_OBSD, @function +ffi_closure_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments and return + | value + subu %r31, %r31, (8 * 4) + (2 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + addu %r3, %r31, (8 * 4) | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + | Figure out what to return as the function's return value + bb0 0, %r2, 1f | CIF_FLAGS_INT + ld %r2, %r31, (8 * 4) + br 9f + +1: + bb0 1, %r2, 1f | CIF_FLAGS_DINT + ld.d %r2, %r31, (8 * 4) + br 9f + +1: +9: + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_OBSD,.-ffi_closure_OBSD + +/* + * ffi_closure_struct_OBSD(ffi_closure *closure); %r13 + */ + .align 4 + .globl ffi_closure_struct_OBSD + .type ffi_closure_struct_OBSD, @function +ffi_closure_struct_OBSD: + subu %r31, %r31, 16 + st %r30, %r31, 4 + st %r1, %r31, 0 + addu %r30, %r31, 16 + + | Make room on the stack for saved register arguments + subu %r31, %r31, (8 * 4) + st.d %r2, %r31, (0 * 4) + st.d %r4, %r31, (2 * 4) + st.d %r6, %r31, (4 * 4) + st.d %r8, %r31, (6 * 4) + + | Invoke the closure function + or %r5, %r30, 0 | calling stack + addu %r4, %r31, 0 | saved registers + or %r3, %r12, 0 | return value + or %r2, %r13, %r0 | closure +#ifdef PIC + bsr ffi_closure_OBSD_inner#plt +#else + bsr ffi_closure_OBSD_inner +#endif + + subu %r31, %r30, 16 + ld %r1, %r31, 0 + ld %r30, %r31, 4 + jmp.n %r1 + addu %r31, %r31, 16 + .size ffi_closure_struct_OBSD,.-ffi_closure_struct_OBSD diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffi.c new file mode 100644 index 0000000..3aecb0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffi.c @@ -0,0 +1,330 @@ +/* ---------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Imagination Technologies + + Meta Foreign Function Interface + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + `Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED `AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. +----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) + +/* + * ffi_prep_args is called by the assembly routine once stack space has been + * allocated for the function's arguments + */ + +unsigned int ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + /* Store return value */ + if ( ecif->cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *(void **) argp = ecif->rvalue; + } + + p_argv = ecif->avalue; + + /* point to next location */ + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; (i != 0); i--, p_arg++, p_argv++) + { + size_t z; + + /* Move argp to address of argument */ + z = (*p_arg)->size; + argp -= z; + + /* Align if necessary */ + argp = (char *) FFI_ALIGN_DOWN(FFI_ALIGN_DOWN(argp, (*p_arg)->alignment), 4); + + if (z < sizeof(int)) { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + case FFI_TYPE_STRUCT: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + default: + FFI_ASSERT(0); + } + } else if ( z == sizeof(int)) { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } else { + memcpy(argp, *p_argv, z); + } + } + + /* return the size of the arguments to be passed in registers, + padded to an 8 byte boundary to preserve stack alignment */ + return FFI_ALIGN(MIN(stack - argp, 6*4), 8); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type **ptr; + unsigned i, bytes = 0; + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) { + if ((*ptr)->size == 0) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = FFI_ALIGN(bytes, (*ptr)->alignment); + + bytes += FFI_ALIGN((*ptr)->size, 4); + } + + /* Ensure arg space is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT) { + bytes += sizeof(void*); + + /* Ensure stack is aligned to an 8-byte boundary */ + bytes = FFI_ALIGN(bytes, 8); + } + + cif->bytes = bytes; + + /* Set the return type flag */ + switch (cif->rtype->type) { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = (unsigned) FFI_TYPE_SINT64; + break; + case FFI_TYPE_STRUCT: + /* Meta can store return values which are <= 64 bits */ + if (cif->rtype->size <= 4) + /* Returned to D0Re0 as 32-bit value */ + cif->flags = (unsigned)FFI_TYPE_INT; + else if ((cif->rtype->size > 4) && (cif->rtype->size <= 8)) + /* Returned valued is stored to D1Re0|R0Re0 */ + cif->flags = (unsigned)FFI_TYPE_DOUBLE; + else + /* value stored in memory */ + cif->flags = (unsigned)FFI_TYPE_STRUCT; + break; + default: + cif->flags = (unsigned)FFI_TYPE_INT; + break; + } + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*fn)(void), extended_cif *, unsigned, unsigned, double *); + +/* + * Exported in API. Entry point + * cif -> ffi_cif object + * fn -> function pointer + * rvalue -> pointer to return value + * avalue -> vector of void * pointers pointing to memory locations holding the + * arguments + */ +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + int small_struct = (((cif->flags == FFI_TYPE_INT) || (cif->flags == FFI_TYPE_DOUBLE)) && (cif->rtype->type == FFI_TYPE_STRUCT)); + ecif.cif = cif; + ecif.avalue = avalue; + + double temp; + + /* + * If the return value is a struct and we don't have a return value address + * then we need to make one + */ + + if ((rvalue == NULL ) && (cif->flags == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else if (small_struct) + ecif.rvalue = &temp; + else + ecif.rvalue = rvalue; + + switch (cif->abi) { + case FFI_SYSV: + ffi_call_SYSV(fn, &ecif, cif->bytes, cif->flags, ecif.rvalue); + break; + default: + FFI_ASSERT(0); + break; + } + + if (small_struct) + memcpy (rvalue, &temp, cif->rtype->size); +} + +/* private members */ + +static void ffi_prep_incoming_args_SYSV (char *, void **, void **, + ffi_cif*, float *); + +void ffi_closure_SYSV (ffi_closure *); + +/* Do NOT change that without changing the FFI_TRAMPOLINE_SIZE */ +extern unsigned int ffi_metag_trampoline[10]; /* 10 instructions */ + +/* end of private members */ + +/* + * __tramp: trampoline memory location + * __fun: assembly routine + * __ctx: memory location for wrapper + * + * At this point, tramp[0] == __ctx ! + */ +void ffi_init_trampoline(unsigned char *__tramp, unsigned int __fun, unsigned int __ctx) { + memcpy (__tramp, ffi_metag_trampoline, sizeof(ffi_metag_trampoline)); + *(unsigned int*) &__tramp[40] = __ctx; + *(unsigned int*) &__tramp[44] = __fun; + /* This will flush the instruction cache */ + __builtin_meta2_cachewd(&__tramp[0], 1); + __builtin_meta2_cachewd(&__tramp[47], 1); +} + + + +/* the cif must already be prepared */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + void (*closure_func)(ffi_closure*) = NULL; + + if (cif->abi == FFI_SYSV) + closure_func = &ffi_closure_SYSV; + else + return FFI_BAD_ABI; + + ffi_init_trampoline( + (unsigned char*)&closure->tramp[0], + (unsigned int)closure_func, + (unsigned int)codeloc); + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} + + +/* This function is jumped to by the trampoline */ +unsigned int ffi_closure_SYSV_inner (closure, respp, args, vfp_args) + ffi_closure *closure; + void **respp; + void *args; + void *vfp_args; +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void**) alloca (cif->nargs * sizeof (void*)); + + /* + * This call will initialize ARG_AREA, such that each + * element in that array points to the corresponding + * value on the stack; and if the function returns + * a structure, it will re-set RESP to point to the + * structure return address. + */ + ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args); + + (closure->fun) ( cif, *respp, arg_area, closure->user_data); + + return cif->flags; +} + +static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, + void **avalue, ffi_cif *cif, + float *vfp_stack) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + /* stack points to original arguments */ + argp = stack; + + /* Store return value */ + if ( cif->flags == FFI_TYPE_STRUCT ) { + argp -= 4; + *rvalue = *(void **) argp; + } + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) { + size_t z; + size_t alignment; + + alignment = (*p_arg)->alignment; + if (alignment < 4) + alignment = 4; + if ((alignment - 1) & (unsigned)argp) + argp = (char *) FFI_ALIGN(argp, alignment); + + z = (*p_arg)->size; + *p_argv = (void*) argp; + p_argv++; + argp -= z; + } + return; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffitarget.h new file mode 100644 index 0000000..7b9dbeb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Imagination Technologies Ltd. + Target configuration macros for Meta + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_DEFAULT_ABI = FFI_SYSV, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1, +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 48 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/sysv.S new file mode 100644 index 0000000..b4b2a3b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/metag/sysv.S @@ -0,0 +1,311 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Imagination Technologies Ltd. + + Meta Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#define ENTRY(x) .globl CNAME(x); .type CNAME(x), %function; CNAME(x): +#endif + +#ifdef __ELF__ +#define LSYM(x) .x +#else +#define LSYM(x) x +#endif + +.macro call_reg x= + .text + .balign 4 + mov D1RtP, \x + swap D1RtP, PC +.endm + +! Save register arguments +.macro SAVE_ARGS + .text + .balign 4 + setl [A0StP++], D0Ar6, D1Ar5 + setl [A0StP++], D0Ar4, D1Ar3 + setl [A0StP++], D0Ar2, D1Ar1 +.endm + +! Save retrun, frame pointer and other regs +.macro SAVE_REGS regs= + .text + .balign 4 + setl [A0StP++], D0FrT, D1RtP + ! Needs to be a pair of regs + .ifnc "\regs","" + setl [A0StP++], \regs + .endif +.endm + +! Declare a global function +.macro METAG_FUNC_START name + .text + .balign 4 + ENTRY(\name) +.endm + +! Return registers from the stack. Reverse SAVE_REGS operation +.macro RET_REGS regs=, cond= + .ifnc "\regs", "" + getl \regs, [--A0StP] + .endif + getl D0FrT, D1RtP, [--A0StP] +.endm + +! Return arguments +.macro RET_ARGS + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] +.endm + + + ! D1Ar1: fn + ! D0Ar2: &ecif + ! D1Ar3: cif->bytes + ! D0Ar4: fig->flags + ! D1Ar5: ecif.rvalue + + ! This assumes we are using GNU as +METAG_FUNC_START ffi_call_SYSV + ! Save argument registers + + SAVE_ARGS + + ! new frame + mov D0FrT, A0FrP + add A0FrP, A0StP, #0 + + ! Preserve the old frame pointer + SAVE_REGS "D1.5, D0.5" + + ! Make room for new args. cifs->bytes is the total space for input + ! and return arguments + + add A0StP, A0StP, D1Ar3 + + ! Preserve cifs->bytes & fn + mov D0.5, D1Ar3 + mov D1.5, D1Ar1 + + ! Place all of the ffi_prep_args in position + mov D1Ar1, A0StP + + ! Call ffi_prep_args(stack, &ecif) +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_prep_args@PLT) +#else + callr D1RtP, CNAME(ffi_prep_args) +#endif + + ! Restore fn pointer + + ! The foreign stack should look like this + ! XXXXX XXXXXX <--- stack pointer + ! FnArgN rvalue + ! FnArgN+2 FnArgN+1 + ! FnArgN+4 FnArgN+3 + ! .... + ! + + ! A0StP now points to the first (or return) argument + 4 + + ! Preserve cif->bytes + getl D0Ar2, D1Ar1, [--A0StP] + getl D0Ar4, D1Ar3, [--A0StP] + getl D0Ar6, D1Ar5, [--A0StP] + + ! Place A0StP to the first argument again + add A0StP, A0StP, #24 ! That's because we loaded 6 regs x 4 byte each + + ! A0FrP points to the initial stack without the reserved space for the + ! cifs->bytes, whilst A0StP points to the stack after the space allocation + + ! fn was the first argument of ffi_call_SYSV. + ! The stack at this point looks like this: + ! + ! A0StP(on entry to _SYSV) -> Arg6 Arg5 | low + ! Arg4 Arg3 | + ! Arg2 Arg1 | + ! A0FrP ----> D0FrtP D1RtP | + ! D1.5 D0.5 | + ! A0StP(bf prep_args) -> FnArgn FnArgn-1 | + ! FnArgn-2FnArgn-3 | + ! ................ | <= cifs->bytes + ! FnArg4 FnArg3 | + ! A0StP (prv_A0StP+cifs->bytes) FnArg2 FnArg1 | high + ! + ! fn was in Arg1 so it's located in in A0FrP+#-0xC + ! + + ! D0Re0 contains the size of arguments stored in registers + sub A0StP, A0StP, D0Re0 + + ! Arg1 is the function pointer for the foreign call. This has been + ! preserved in D1.5 + + ! Time to call (fn). Arguments should be like this: + ! Arg1-Arg6 are loaded to regs + ! The rest of the arguments are stored in stack pointed by A0StP + + call_reg D1.5 + + ! Reset stack. + + mov A0StP, A0FrP + + ! Load Arg1 with the pointer to storage for the return type + ! This was stored in Arg5 + + getd D1Ar1, [A0FrP+#-20] + + ! Load D0Ar2 with the return type code. This was stored in Arg4 (flags) + + getd D0Ar2, [A0FrP+#-16] + + ! We are ready to start processing the return value + ! D0Re0 (and D1Re0) hold the return value + + ! If the return value is NULL, assume no return value + cmp D1Ar1, #0 + beq LSYM(Lepilogue) + + ! return INT + cmp D0Ar2, #FFI_TYPE_INT + ! Sadly, there is no setd{cc} instruction so we need to workaround that + bne .INT64 + setd [D1Ar1], D0Re0 + b LSYM(Lepilogue) + + ! return INT64 +.INT64: + cmp D0Ar2, #FFI_TYPE_SINT64 + setleq [D1Ar1], D0Re0, D1Re0 + + ! return DOUBLE + cmp D0Ar2, #FFI_TYPE_DOUBLE + setl [D1AR1++], D0Re0, D1Re0 + +LSYM(Lepilogue): + ! At this point, the stack pointer points right after the argument + ! saved area. We need to restore 4 regs, therefore we need to move + ! 16 bytes ahead. + add A0StP, A0StP, #16 + RET_REGS "D1.5, D0.5" + RET_ARGS + getd D0Re0, [A0StP] + mov A0FrP, D0FrT + swap D1RtP, PC + +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + +/* + (called by ffi_metag_trampoline) + void ffi_closure_SYSV (ffi_closure*) + + (called by ffi_closure_SYSV) + unsigned int FFI_HIDDEN + ffi_closure_SYSV_inner (closure,respp, args) + ffi_closure *closure; + void **respp; + void *args; +*/ + +METAG_FUNC_START ffi_closure_SYSV + ! We assume that D1Ar1 holds the address of the + ! ffi_closure struct. We will use that to fetch the + ! arguments. The stack pointer points to an empty space + ! and it is ready to store more data. + + ! D1Ar1 is ready + ! Allocate stack space for return value + add A0StP, A0StP, #8 + ! Store it to D0Ar2 + sub D0Ar2, A0StP, #8 + + sub D1Ar3, A0FrP, #4 + + ! D1Ar3 contains the address of the original D1Ar1 argument + ! We need to subtract #4 later on + + ! Preverve D0Ar2 + mov D0.5, D0Ar2 + +#ifdef __PIC__ + callr D1RtP, CNAME(ffi_closure_SYSV_inner@PLT) +#else + callr D1RtP, CNAME(ffi_closure_SYSV_inner) +#endif + + ! Check the return value and store it to D0.5 + cmp D0Re0, #FFI_TYPE_INT + beq .Lretint + cmp D0Re0, #FFI_TYPE_DOUBLE + beq .Lretdouble +.Lclosure_epilogue: + sub A0StP, A0StP, #8 + RET_REGS "D1.5, D0.5" + RET_ARGS + swap D1RtP, PC + +.Lretint: + setd [D0.5], D0Re0 + b .Lclosure_epilogue +.Lretdouble: + setl [D0.5++], D0Re0, D1Re0 + b .Lclosure_epilogue +.ffi_closure_SYSV_end: +.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + + +ENTRY(ffi_metag_trampoline) + SAVE_ARGS + ! New frame + mov A0FrP, A0StP + SAVE_REGS "D1.5, D0.5" + mov D0.5, PC + ! Load D1Ar1 the value of ffi_metag_trampoline + getd D1Ar1, [D0.5 + #8] + ! Jump to ffi_closure_SYSV + getd PC, [D0.5 + #12] diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffi.c new file mode 100644 index 0000000..df6e33c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffi.c @@ -0,0 +1,321 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +extern void ffi_call_SYSV(void (*)(void*, extended_cif*), extended_cif*, + unsigned int, unsigned int, unsigned int*, void (*fn)(void), + unsigned int, unsigned int); + +extern void ffi_closure_SYSV(void); + +#define WORD_SIZE sizeof(unsigned int) +#define ARGS_REGISTER_SIZE (WORD_SIZE * 6) +#define WORD_FFI_ALIGN(x) FFI_ALIGN(x, WORD_SIZE) + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ +void ffi_prep_args(void* stack, extended_cif* ecif) +{ + unsigned int i; + ffi_type** p_arg; + void** p_argv; + void* stack_args_p = stack; + + if (ecif == NULL || ecif->cif == NULL) { + return; /* no description to prepare */ + } + + p_argv = ecif->avalue; + + if ((ecif->cif->rtype != NULL) && + (ecif->cif->rtype->type == FFI_TYPE_STRUCT)) + { + /* if return type is a struct which is referenced on the stack/reg5, + * by a pointer. Stored the return value pointer in r5. + */ + char* addr = stack_args_p; + memcpy(addr, &(ecif->rvalue), WORD_SIZE); + stack_args_p += WORD_SIZE; + } + + if (ecif->avalue == NULL) { + return; /* no arguments to prepare */ + } + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; + i++, p_arg++) + { + size_t size = (*p_arg)->size; + int type = (*p_arg)->type; + void* value = p_argv[i]; + char* addr = stack_args_p; + int aligned_size = WORD_FFI_ALIGN(size); + + /* force word alignment on the stack */ + stack_args_p += aligned_size; + + switch (type) + { + case FFI_TYPE_UINT8: + *(unsigned int *)addr = (unsigned int)*(UINT8*)(value); + break; + case FFI_TYPE_SINT8: + *(signed int *)addr = (signed int)*(SINT8*)(value); + break; + case FFI_TYPE_UINT16: + *(unsigned int *)addr = (unsigned int)*(UINT16*)(value); + break; + case FFI_TYPE_SINT16: + *(signed int *)addr = (signed int)*(SINT16*)(value); + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * MicroBlaze toolchain appears to emit: + * bsrli r5, r5, 8 (caller) + * ... + * + * ... + * bslli r5, r5, 8 (callee) + * + * For structs like "struct a { uint8_t a[3]; };", when passed + * by value. + * + * Structs like "struct b { uint16_t a; };" are also expected + * to be packed strangely in registers. + * + * This appears to be because the microblaze toolchain expects + * "struct b == uint16_t", which is only any issue for big + * endian. + * + * The following is a work around for big-endian only, for the + * above mentioned case, it will re-align the contents of a + * <= 3-byte struct value. + */ + if (size < WORD_SIZE) + { + memcpy (addr + (WORD_SIZE - size), value, size); + break; + } +#endif + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + default: + memcpy(addr, value, aligned_size); + } + } +} + +ffi_status ffi_prep_cif_machdep(ffi_cif* cif) +{ + /* check ABI */ + switch (cif->abi) + { + case FFI_SYSV: + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} + +void ffi_call(ffi_cif* cif, void (*fn)(void), void* rvalue, void** avalue) +{ + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ecif.rvalue = alloca(cif->rtype->size); + } else { + ecif.rvalue = rvalue; + } + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, + ecif.rvalue, fn, cif->rtype->type, cif->rtype->size); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_call_SYSV(void* register_args, void* stack_args, + ffi_closure* closure, void* rvalue, + unsigned int* rtype, unsigned int* rsize) +{ + /* prepare arguments for closure call */ + ffi_cif* cif = closure->cif; + ffi_type** arg_types = cif->arg_types; + + /* re-allocate data for the args. This needs to be done in order to keep + * multi-word objects (e.g. structs) in contiguous memory. Callers are not + * required to store the value of args in the lower 6 words in the stack + * (although they are allocated in the stack). + */ + char* stackclone = alloca(cif->bytes); + void** avalue = alloca(cif->nargs * sizeof(void*)); + void* struct_rvalue = NULL; + char* ptr = stackclone; + int i; + + /* copy registers into stack clone */ + int registers_used = cif->bytes; + if (registers_used > ARGS_REGISTER_SIZE) { + registers_used = ARGS_REGISTER_SIZE; + } + memcpy(stackclone, register_args, registers_used); + + /* copy stack allocated args into stack clone */ + if (cif->bytes > ARGS_REGISTER_SIZE) { + int stack_used = cif->bytes - ARGS_REGISTER_SIZE; + memcpy(stackclone + ARGS_REGISTER_SIZE, stack_args, stack_used); + } + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + struct_rvalue = *((void**)ptr); + ptr += WORD_SIZE; + } + + /* populate arg pointer list */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 3; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifdef __BIG_ENDIAN__ + avalue[i] = ptr + 2; +#else + avalue[i] = ptr; +#endif + break; + case FFI_TYPE_STRUCT: +#if __BIG_ENDIAN__ + /* + * Work around strange ABI behaviour. + * (see info in ffi_prep_args) + */ + if (arg_types[i]->size < WORD_SIZE) + { + memcpy (ptr, ptr + (WORD_SIZE - arg_types[i]->size), arg_types[i]->size); + } +#endif + avalue[i] = (void*)ptr; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + avalue[i] = ptr; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + default: + /* default 4-byte argument */ + avalue[i] = ptr; + break; + } + ptr += WORD_FFI_ALIGN(arg_types[i]->size); + } + + /* set the return type info passed back to the wrapper */ + *rsize = cif->rtype->size; + *rtype = cif->rtype->type; + if (struct_rvalue != NULL) { + closure->fun(cif, struct_rvalue, avalue, closure->user_data); + /* copy struct return pointer value into function return value */ + *((void**)rvalue) = struct_rvalue; + } else { + closure->fun(cif, rvalue, avalue, closure->user_data); + } +} + +ffi_status ffi_prep_closure_loc( + ffi_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void* user_data, void* codeloc) +{ + unsigned long* tramp = (unsigned long*)&(closure->tramp[0]); + unsigned long cls = (unsigned long)codeloc; + unsigned long fn = 0; + unsigned long fn_closure_call_sysv = (unsigned long)ffi_closure_call_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + switch (cif->abi) + { + case FFI_SYSV: + fn = (unsigned long)ffi_closure_SYSV; + + /* load r11 (temp) with fn */ + /* imm fn(upper) */ + tramp[0] = 0xb0000000 | ((fn >> 16) & 0xffff); + /* addik r11, r0, fn(lower) */ + tramp[1] = 0x31600000 | (fn & 0xffff); + + /* load r12 (temp) with cls */ + /* imm cls(upper) */ + tramp[2] = 0xb0000000 | ((cls >> 16) & 0xffff); + /* addik r12, r0, cls(lower) */ + tramp[3] = 0x31800000 | (cls & 0xffff); + + /* load r3 (temp) with ffi_closure_call_SYSV */ + /* imm fn_closure_call_sysv(upper) */ + tramp[4] = 0xb0000000 | ((fn_closure_call_sysv >> 16) & 0xffff); + /* addik r3, r0, fn_closure_call_sysv(lower) */ + tramp[5] = 0x30600000 | (fn_closure_call_sysv & 0xffff); + /* branch/jump to address stored in r11 (fn) */ + tramp[6] = 0x98085800; /* bra r11 */ + + break; + default: + return FFI_BAD_ABI; + } + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffitarget.h new file mode 100644 index 0000000..c6fa5a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/ffitarget.h @@ -0,0 +1,53 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2012, 2013 Xilinx, Inc + + Target configuration macros for MicroBlaze. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Definitions for closures */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#define FFI_TRAMPOLINE_SIZE (4*8) + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/sysv.S new file mode 100644 index 0000000..ea43e9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/microblaze/sysv.S @@ -0,0 +1,302 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2012, 2013 Xilinx, Inc + + MicroBlaze Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + /* + * arg[0] (r5) = ffi_prep_args, + * arg[1] (r6) = &ecif, + * arg[2] (r7) = cif->bytes, + * arg[3] (r8) = cif->flags, + * arg[4] (r9) = ecif.rvalue, + * arg[5] (r10) = fn + * arg[6] (sp[0]) = cif->rtype->type + * arg[7] (sp[4]) = cif->rtype->size + */ + .text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +ffi_call_SYSV: + /* push callee saves */ + addik r1, r1, -20 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + swi r22, r1, 12 /* save for locals */ + swi r23, r1, 16 /* save for locals */ + + /* save the r5-r10 registers in the stack */ + addik r1, r1, -24 /* increment sp to store 6x 32-bit words */ + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* save function pointer */ + addik r3, r5, 0 /* copy ffi_prep_args into r3 */ + addik r22, r1, 0 /* save sp for unallocated args into r22 (callee-saved) */ + addik r23, r10, 0 /* save function address into r23 (callee-saved) */ + + /* prepare stack with allocation for n (bytes = r7) args */ + rsub r1, r7, r1 /* subtract bytes from sp */ + + /* prep args for ffi_prep_args call */ + addik r5, r1, 0 /* store stack pointer into arg[0] */ + /* r6 still holds ecif for arg[1] */ + + /* Call ffi_prep_args(stack, &ecif). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + /* returns calling stack pointer location */ + + /* prepare args for fn call, prep_args populates them onto the stack */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + + /* call (fn) (...). */ + addik r1, r1, -4 + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r23 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 4 /* restore the link register from the frame */ + + /* Remove the space we pushed for the args. */ + addik r1, r22, 0 /* restore old SP */ + + /* restore this functions parameters */ + lwi r5, r1, 0 /* arg[0] */ + lwi r6, r1, 4 /* arg[1] */ + lwi r7, r1, 8 /* arg[2] */ + lwi r8, r1, 12 /* arg[3] */ + lwi r9, r1, 16 /* arg[4] */ + lwi r10, r1, 20 /* arg[5] */ + addik r1, r1, 24 /* decrement sp to de-allocate 6x 32-bit words */ + + /* If the return value pointer is NULL, assume no return value. */ + beqi r9, ffi_call_SYSV_end + + lwi r22, r1, 48 /* get return type (20 for locals + 28 for arg[6]) */ + lwi r23, r1, 52 /* get return size (20 for locals + 32 for arg[7]) */ + + /* Check if return type is actually a struct, do nothing */ + rsubi r11, r22, FFI_TYPE_STRUCT + beqi r11, ffi_call_SYSV_end + + /* Return 8bit */ + rsubi r11, r23, 1 + beqi r11, ffi_call_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r23, 2 + beqi r11, ffi_call_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r23, 4 + beqi r11, ffi_call_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r23, 8 + beqi r11, ffi_call_SYSV_store64 + + /* Didn't match anything */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store64: + swi r3, r9, 0 /* store word r3 into return value */ + swi r4, r9, 4 /* store word r4 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store32: + swi r3, r9, 0 /* store word r3 into return value */ + bri ffi_call_SYSV_end + +ffi_call_SYSV_store16: +#ifdef __BIG_ENDIAN__ + shi r3, r9, 2 /* store half-word r3 into return value */ +#else + shi r3, r9, 0 /* store half-word r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_store8: +#ifdef __BIG_ENDIAN__ + sbi r3, r9, 3 /* store byte r3 into return value */ +#else + sbi r3, r9, 0 /* store byte r3 into return value */ +#endif + bri ffi_call_SYSV_end + +ffi_call_SYSV_end: + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + lwi r22, r1, 12 + lwi r23, r1, 16 + addik r1, r1, 20 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_call_SYSV, . - ffi_call_SYSV + +/* ------------------------------------------------------------------------- */ + + /* + * args passed into this function, are passed down to the callee. + * this function is the target of the closure trampoline, as such r12 is + * a pointer to the closure object. + */ + .text + .globl ffi_closure_SYSV + .type ffi_closure_SYSV, @function +ffi_closure_SYSV: + /* push callee saves */ + addik r11, r1, 28 /* save stack args start location (excluding regs/link) */ + addik r1, r1, -12 + swi r19, r1, 0 /* Frame Pointer */ + swi r20, r1, 4 /* PIC register */ + swi r21, r1, 8 /* PIC register */ + + /* store register args on stack */ + addik r1, r1, -24 + swi r5, r1, 0 + swi r6, r1, 4 + swi r7, r1, 8 + swi r8, r1, 12 + swi r9, r1, 16 + swi r10, r1, 20 + + /* setup args */ + addik r5, r1, 0 /* register_args */ + addik r6, r11, 0 /* stack_args */ + addik r7, r12, 0 /* closure object */ + addik r1, r1, -8 /* allocate return value */ + addik r8, r1, 0 /* void* rvalue */ + addik r1, r1, -8 /* allocate for return type/size values */ + addik r9, r1, 0 /* void* rtype */ + addik r10, r1, 4 /* void* rsize */ + + /* call the wrap_call function */ + addik r1, r1, -28 /* allocate args + link reg */ + swi r15, r1, 0 /* store the link register in the frame */ + brald r15, r3 + nop /* branch has delay slot */ + lwi r15, r1, 0 + addik r1, r1, 28 /* restore the link register from the frame */ + +ffi_closure_SYSV_prepare_return: + lwi r9, r1, 0 /* rtype */ + lwi r10, r1, 4 /* rsize */ + addik r1, r1, 8 /* de-allocate return info values */ + + /* Check if return type is actually a struct, store 4 bytes */ + rsubi r11, r9, FFI_TYPE_STRUCT + beqi r11, ffi_closure_SYSV_store32 + + /* Return 8bit */ + rsubi r11, r10, 1 + beqi r11, ffi_closure_SYSV_store8 + + /* Return 16bit */ + rsubi r11, r10, 2 + beqi r11, ffi_closure_SYSV_store16 + + /* Return 32bit */ + rsubi r11, r10, 4 + beqi r11, ffi_closure_SYSV_store32 + + /* Return 64bit */ + rsubi r11, r10, 8 + beqi r11, ffi_closure_SYSV_store64 + + /* Didn't match anything */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store64: + lwi r3, r1, 0 /* store word r3 into return value */ + lwi r4, r1, 4 /* store word r4 into return value */ + /* 64 bits == 2 words, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store32: + lwi r3, r1, 0 /* store word r3 into return value */ + /* 32 bits == 1 word, no sign extend occurs */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store16: +#ifdef __BIG_ENDIAN__ + lhui r3, r1, 2 /* store half-word r3 into return value */ +#else + lhui r3, r1, 0 /* store half-word r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT16 + bnei r11, ffi_closure_SYSV_end + sext16 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_store8: +#ifdef __BIG_ENDIAN__ + lbui r3, r1, 3 /* store byte r3 into return value */ +#else + lbui r3, r1, 0 /* store byte r3 into return value */ +#endif + rsubi r11, r9, FFI_TYPE_SINT8 + bnei r11, ffi_closure_SYSV_end + sext8 r3, r3 /* fix sign extend of sint8 */ + bri ffi_closure_SYSV_end + +ffi_closure_SYSV_end: + addik r1, r1, 8 /* de-allocate return value */ + + /* de-allocate stored args */ + addik r1, r1, 24 + + /* callee restores */ + lwi r19, r1, 0 /* frame pointer */ + lwi r20, r1, 4 /* PIC register */ + lwi r21, r1, 8 /* PIC register */ + addik r1, r1, 12 + + /* return from sub-routine (with delay slot) */ + rtsd r15, 8 + nop + + .size ffi_closure_SYSV, . - ffi_closure_SYSV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffi.c new file mode 100644 index 0000000..979ca49 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffi.c @@ -0,0 +1,1134 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Anthony Green + Copyright (c) 2008 David Daney + Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#ifdef __GNUC__ +# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) +# define USE__BUILTIN___CLEAR_CACHE 1 +# endif +#endif + +#ifndef USE__BUILTIN___CLEAR_CACHE +# if defined(__FreeBSD__) +# include +# elif defined(__OpenBSD__) +# include +# else +# include +# endif +#endif + +#ifdef FFI_DEBUG +# define FFI_MIPS_STOP_HERE() ffi_stop_here() +#else +# define FFI_MIPS_STOP_HERE() do {} while(0) +#endif + +#ifdef FFI_MIPS_N32 +#define FIX_ARGP \ +FFI_ASSERT(argp <= &stack[bytes]); \ +if (argp == &stack[bytes]) \ +{ \ + argp = stack; \ + FFI_MIPS_STOP_HERE(); \ +} +#else +#define FIX_ARGP +#endif + + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +static void ffi_prep_args(char *stack, + extended_cif *ecif, + int bytes, + int flags) +{ + int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + +#ifdef FFI_MIPS_N32 + /* If more than 8 double words are used, the remainder go + on the stack. We reorder stuff on the stack here to + support this easily. */ + if (bytes > 8 * sizeof(ffi_arg)) + argp = &stack[bytes - (8 * sizeof(ffi_arg))]; + else + argp = stack; +#else + argp = stack; +#endif + + memset(stack, 0, bytes); + +#ifdef FFI_MIPS_N32 + if ( ecif->cif->rstruct_flag != 0 ) +#else + if ( ecif->cif->rtype->type == FFI_TYPE_STRUCT ) +#endif + { + *(ffi_arg *) argp = (ffi_arg) ecif->rvalue; + argp += sizeof(ffi_arg); + FIX_ARGP; + } + + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs; i++, p_arg++) + { + size_t z; + unsigned int a; + + /* Align if necessary. */ + a = (*p_arg)->alignment; + if (a < sizeof(ffi_arg)) + a = sizeof(ffi_arg); + + if ((a - 1) & (unsigned long) argp) + { + argp = (char *) FFI_ALIGN(argp, a); + FIX_ARGP; + } + + z = (*p_arg)->size; + if (z <= sizeof(ffi_arg)) + { + int type = (*p_arg)->type; + z = sizeof(ffi_arg); + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (ecif->cif->abi == FFI_N64 + || ecif->cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (i < 8 && (ecif->cif->abi == FFI_N32_SOFT_FLOAT + || ecif->cif->abi == FFI_N64_SOFT_FLOAT)) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_SINT8: + *(ffi_arg *)argp = *(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(ffi_arg *)argp = *(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(ffi_arg *)argp = *(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(ffi_arg *)argp = *(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_SINT32: + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); + break; + + case FFI_TYPE_UINT32: +#ifdef FFI_MIPS_N32 + /* The N32 ABI requires that 32-bit integers + be sign-extended to 64-bits, regardless of + whether they are signed or unsigned. */ + *(ffi_arg *)argp = *(SINT32 *)(* p_argv); +#else + *(ffi_arg *)argp = *(UINT32 *)(* p_argv); +#endif + break; + + /* This can only happen with 64bit slots. */ + case FFI_TYPE_FLOAT: + *(float *) argp = *(float *)(* p_argv); + break; + + /* Handle structures. */ + default: + memcpy(argp, *p_argv, (*p_arg)->size); + break; + } + } + else + { +#ifdef FFI_MIPS_O32 + memcpy(argp, *p_argv, z); +#else + { + unsigned long end = (unsigned long) argp + z; + unsigned long cap = (unsigned long) stack + bytes; + + /* Check if the data will fit within the register space. + Handle it if it doesn't. */ + + if (end <= cap) + memcpy(argp, *p_argv, z); + else + { + unsigned long portion = cap - (unsigned long)argp; + + memcpy(argp, *p_argv, portion); + argp = stack; + z -= portion; + memcpy(argp, (void*)((unsigned long)(*p_argv) + portion), + z); + } + } +#endif + } + p_argv++; + argp += z; + FIX_ARGP; + } +} + +#ifdef FFI_MIPS_N32 + +/* The n32 spec says that if "a chunk consists solely of a double + float field (but not a double, which is part of a union), it + is passed in a floating point register. Any other chunk is + passed in an integer register". This code traverses structure + definitions and generates the appropriate flags. */ + +static unsigned +calc_n32_struct_flags(int soft_float, ffi_type *arg, + unsigned *loc, unsigned *arg_reg) +{ + unsigned flags = 0; + unsigned index = 0; + + ffi_type *e; + + if (soft_float) + return 0; + + while ((e = arg->elements[index])) + { + /* Align this object. */ + *loc = FFI_ALIGN(*loc, e->alignment); + if (e->type == FFI_TYPE_DOUBLE) + { + /* Already aligned to FFI_SIZEOF_ARG. */ + *arg_reg = *loc / FFI_SIZEOF_ARG; + if (*arg_reg > 7) + break; + flags += (FFI_TYPE_DOUBLE << (*arg_reg * FFI_FLAG_BITS)); + *loc += e->size; + } + else + *loc += e->size; + index++; + } + /* Next Argument register at alignment of FFI_SIZEOF_ARG. */ + *arg_reg = FFI_ALIGN(*loc, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + + return flags; +} + +static unsigned +calc_n32_return_struct_flags(int soft_float, ffi_type *arg) +{ + unsigned flags = 0; + unsigned small = FFI_TYPE_SMALLSTRUCT; + ffi_type *e; + + /* Returning structures under n32 is a tricky thing. + A struct with only one or two floating point fields + is returned in $f0 (and $f2 if necessary). Any other + struct results at most 128 bits are returned in $2 + (the first 64 bits) and $3 (remainder, if necessary). + Larger structs are handled normally. */ + + if (arg->size > 16) + return 0; + + if (arg->size > 8) + small = FFI_TYPE_SMALLSTRUCT2; + + e = arg->elements[0]; + + if (e->type == FFI_TYPE_DOUBLE) + flags = FFI_TYPE_DOUBLE; + else if (e->type == FFI_TYPE_FLOAT) + flags = FFI_TYPE_FLOAT; + + if (flags && (e = arg->elements[1])) + { + if (e->type == FFI_TYPE_DOUBLE) + flags += FFI_TYPE_DOUBLE << FFI_FLAG_BITS; + else if (e->type == FFI_TYPE_FLOAT) + flags += FFI_TYPE_FLOAT << FFI_FLAG_BITS; + else + return small; + + if (flags && (arg->elements[2])) + { + /* There are three arguments and the first two are + floats! This must be passed the old way. */ + return small; + } + if (soft_float) + flags += FFI_TYPE_STRUCT_SOFT; + } + else + if (!flags) + return small; + + return flags; +} + +#endif + +/* Perform machine dependent cif processing */ +static ffi_status ffi_prep_cif_machdep_int(ffi_cif *cif, unsigned nfixedargs) +{ + cif->flags = 0; + cif->mips_nfixedargs = nfixedargs; + +#ifdef FFI_MIPS_O32 + /* Set the flags necessary for O32 processing. FFI_O32_SOFT_FLOAT + * does not have special handling for floating point args. + */ + + if (cif->rtype->type != FFI_TYPE_STRUCT && cif->abi == FFI_O32) + { + if (cif->nargs > 0 && cif->nargs == nfixedargs) + { + switch ((cif->arg_types)[0]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[0]->type; + break; + + default: + break; + } + + if (cif->nargs > 1) + { + /* Only handle the second argument if the first + is a float or double. */ + if (cif->flags) + { + switch ((cif->arg_types)[1]->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += (cif->arg_types)[1]->type << FFI_FLAG_BITS; + break; + + default: + break; + } + } + } + } + } + + /* Set the return type flag */ + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_FLOAT: + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } + else + { + /* FFI_O32 */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 2); + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += FFI_TYPE_UINT64 << (FFI_FLAG_BITS * 2); + break; + + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 2); + break; + } + } +#endif + +#ifdef FFI_MIPS_N32 + /* Set the flags necessary for N32 processing */ + { + int type; + unsigned arg_reg = 0; + unsigned loc = 0; + unsigned count = (cif->nargs < 8) ? cif->nargs : 8; + unsigned index = 0; + + unsigned struct_flags = 0; + int soft_float = (cif->abi == FFI_N32_SOFT_FLOAT + || cif->abi == FFI_N64_SOFT_FLOAT); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { + struct_flags = calc_n32_return_struct_flags(soft_float, cif->rtype); + + if (struct_flags == 0) + { + /* This means that the structure is being passed as + a hidden argument */ + + arg_reg = 1; + count = (cif->nargs < 7) ? cif->nargs : 7; + + cif->rstruct_flag = !0; + } + else + cif->rstruct_flag = 0; + } + else + cif->rstruct_flag = 0; + + while (count-- > 0 && arg_reg < 8) + { + type = (cif->arg_types)[index]->type; + + // Pass variadic arguments in integer registers even if they're floats + if (soft_float || index >= nfixedargs) + { + switch (type) + { + case FFI_TYPE_FLOAT: + type = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + type = FFI_TYPE_UINT64; + break; + default: + break; + } + } + switch (type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags += + ((cif->arg_types)[index]->type << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + break; + case FFI_TYPE_LONGDOUBLE: + /* Align it. */ + arg_reg = FFI_ALIGN(arg_reg, 2); + /* Treat it as two adjacent doubles. */ + if (soft_float || index >= nfixedargs) + { + arg_reg += 2; + } + else + { + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + cif->flags += + (FFI_TYPE_DOUBLE << (arg_reg * FFI_FLAG_BITS)); + arg_reg++; + } + break; + + case FFI_TYPE_STRUCT: + loc = arg_reg * FFI_SIZEOF_ARG; + cif->flags += calc_n32_struct_flags(soft_float || index >= nfixedargs, + (cif->arg_types)[index], + &loc, &arg_reg); + break; + + default: + arg_reg++; + break; + } + + index++; + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + { + if (struct_flags == 0) + { + /* The structure is returned through a hidden + first argument. Do nothing, 'cause FFI_TYPE_VOID + is 0 */ + } + else + { + /* The structure is returned via some tricky + mechanism */ + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += struct_flags << (4 + (FFI_FLAG_BITS * 8)); + } + break; + } + + case FFI_TYPE_VOID: + /* Do nothing, 'cause FFI_TYPE_VOID is 0 */ + break; + + case FFI_TYPE_POINTER: + if (cif->abi == FFI_N32_SOFT_FLOAT || cif->abi == FFI_N32) + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + else + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_FLOAT: + if (soft_float) + { + cif->flags += FFI_TYPE_SINT32 << (FFI_FLAG_BITS * 8); + break; + } + /* else fall through */ + case FFI_TYPE_DOUBLE: + if (soft_float) + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + else + cif->flags += cif->rtype->type << (FFI_FLAG_BITS * 8); + break; + + case FFI_TYPE_LONGDOUBLE: + /* Long double is returned as if it were a struct containing + two doubles. */ + if (soft_float) + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += FFI_TYPE_SMALLSTRUCT2 << (4 + (FFI_FLAG_BITS * 8)); + } + else + { + cif->flags += FFI_TYPE_STRUCT << (FFI_FLAG_BITS * 8); + cif->flags += (FFI_TYPE_DOUBLE + + (FFI_TYPE_DOUBLE << FFI_FLAG_BITS)) + << (4 + (FFI_FLAG_BITS * 8)); + } + break; + default: + cif->flags += FFI_TYPE_INT << (FFI_FLAG_BITS * 8); + break; + } + } +#endif + + return FFI_OK; +} + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + return ffi_prep_cif_machdep_int(cif, cif->nargs); +} + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned nfixedargs, + unsigned ntotalargs MAYBE_UNUSED) +{ + return ffi_prep_cif_machdep_int(cif, nfixedargs); +} + +/* Low level routine for calling O32 functions */ +extern int ffi_call_O32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, unsigned *, void (*)(void), void *closure); + +/* Low level routine for calling N32 functions */ +extern int ffi_call_N32(void (*)(char *, extended_cif *, int, int), + extended_cif *, unsigned, + unsigned, void *, void (*)(void), void *closure); + +void ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + ecif.rvalue = alloca(cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { +#ifdef FFI_MIPS_O32 + case FFI_O32: + case FFI_O32_SOFT_FLOAT: + ffi_call_O32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn, closure); + break; +#endif + +#ifdef FFI_MIPS_N32 + case FFI_N32: + case FFI_N32_SOFT_FLOAT: + case FFI_N64: + case FFI_N64_SOFT_FLOAT: + { + int copy_rvalue = 0; + int copy_offset = 0; + char *rvalue_copy = ecif.rvalue; + if (cif->rtype->type == FFI_TYPE_STRUCT && cif->rtype->size < 16) + { + /* For structures smaller than 16 bytes we clobber memory + in 8 byte increments. Make a copy so we don't clobber + the callers memory outside of the struct bounds. */ + rvalue_copy = alloca(16); + copy_rvalue = 1; + } + else if (cif->rtype->type == FFI_TYPE_FLOAT + && (cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT)) + { + rvalue_copy = alloca (8); + copy_rvalue = 1; +#if defined(__MIPSEB__) || defined(_MIPSEB) + copy_offset = 4; +#endif + } + ffi_call_N32(ffi_prep_args, &ecif, cif->bytes, + cif->flags, rvalue_copy, fn, closure); + if (copy_rvalue) + memcpy(ecif.rvalue, rvalue_copy + copy_offset, cif->rtype->size); + } + break; +#endif + + default: + FFI_ASSERT(0); + break; + } +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +#if FFI_CLOSURES +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + void * fn; + char *clear_location = (char *) codeloc; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_closure_N32; +#endif /* FFI_MIPS_O32 */ + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned)fn >> 16); + /* ori $25,low(fn) */ + tramp[1] = 0x37390000 | ((unsigned)fn & 0xffff); + /* lui $12,high(codeloc) */ + tramp[2] = 0x3c0c0000 | ((unsigned)codeloc >> 16); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[3] = 0x03200008; +#else + tramp[3] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[4] = 0x358c0000 | ((unsigned)codeloc & 0xffff); +#else + /* N64 has a somewhat larger trampoline. */ + /* lui $25,high(fn) */ + tramp[0] = 0x3c190000 | ((unsigned long)fn >> 48); + /* lui $12,high(codeloc) */ + tramp[1] = 0x3c0c0000 | ((unsigned long)codeloc >> 48); + /* ori $25,mid-high(fn) */ + tramp[2] = 0x37390000 | (((unsigned long)fn >> 32 ) & 0xffff); + /* ori $12,mid-high(codeloc) */ + tramp[3] = 0x358c0000 | (((unsigned long)codeloc >> 32) & 0xffff); + /* dsll $25,$25,16 */ + tramp[4] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[5] = 0x000c6438; + /* ori $25,mid-low(fn) */ + tramp[6] = 0x37390000 | (((unsigned long)fn >> 16 ) & 0xffff); + /* ori $12,mid-low(codeloc) */ + tramp[7] = 0x358c0000 | (((unsigned long)codeloc >> 16) & 0xffff); + /* dsll $25,$25,16 */ + tramp[8] = 0x0019cc38; + /* dsll $12,$12,16 */ + tramp[9] = 0x000c6438; + /* ori $25,low(fn) */ + tramp[10] = 0x37390000 | ((unsigned long)fn & 0xffff); + /* jr $25 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + tramp[11] = 0x03200008; +#else + tramp[11] = 0x03200009; +#endif + /* ori $12,low(codeloc) */ + tramp[12] = 0x358c0000 | ((unsigned long)codeloc & 0xffff); + +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#if !defined(__FreeBSD__) +#ifdef USE__BUILTIN___CLEAR_CACHE + __builtin___clear_cache(clear_location, clear_location + FFI_TRAMPOLINE_SIZE); +#else + cacheflush (clear_location, FFI_TRAMPOLINE_SIZE, ICACHE); +#endif +#endif /* ! __FreeBSD__ */ + return FFI_OK; +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer arguments + * (and, depending upon the arguments, some floating-point arguments + * as well). FPR is a pointer to the area where floating point + * registers have been saved, if any. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return type. + * + * Based on the similar routine for sparc. + */ +int +ffi_closure_mips_inner_O32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + double *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn, seen_int; + + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + seen_int = (cif->abi == FFI_O32_SOFT_FLOAT) || (cif->mips_nfixedargs != cif->nargs); + argn = 0; + + if ((cif->flags >> (FFI_FLAG_BITS * 2)) == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)ar[0]; + argn = 1; + seen_int = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->alignment == 8 && (argn & 0x1)) + argn++; + if (i < 2 && !seen_int && + (arg_types[i]->type == FFI_TYPE_FLOAT || + arg_types[i]->type == FFI_TYPE_DOUBLE || + arg_types[i]->type == FFI_TYPE_LONGDOUBLE)) + { +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT) + avaluep[i] = ((char *) &fpr[i]) + sizeof (float); + else +#endif + avaluep[i] = (char *) &fpr[i]; + } + else + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) ar[argn]; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) ar[argn]; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) ar[argn]; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) ar[argn]; + break; + + default: + avaluep[i] = (char *) &ar[argn]; + break; + } + seen_int = 1; + } + argn += FFI_ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + i++; + } + + /* Invoke the closure. */ + fun(cif, rvalue, avaluep, user_data); + + if (cif->abi == FFI_O32_SOFT_FLOAT) + { + switch (cif->rtype->type) + { + case FFI_TYPE_FLOAT: + return FFI_TYPE_INT; + case FFI_TYPE_DOUBLE: + return FFI_TYPE_UINT64; + default: + return cif->rtype->type; + } + } + else + { + return cif->rtype->type; + } +} + +#if defined(FFI_MIPS_N32) + +static void +copy_struct_N32(char *target, unsigned offset, ffi_abi abi, ffi_type *type, + int argn, unsigned arg_offset, ffi_arg *ar, + ffi_arg *fpr, int soft_float) +{ + ffi_type **elt_typep = type->elements; + while(*elt_typep) + { + ffi_type *elt_type = *elt_typep; + unsigned o; + char *tp; + char *argp; + char *fpp; + + o = FFI_ALIGN(offset, elt_type->alignment); + arg_offset += o - offset; + offset = o; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + + argp = (char *)(ar + argn); + fpp = (char *)(argn >= 8 ? ar + argn : fpr + argn); + + tp = target + offset; + + if (elt_type->type == FFI_TYPE_DOUBLE && !soft_float) + *(double *)tp = *(double *)fpp; + else + memcpy(tp, argp + arg_offset, elt_type->size); + + offset += elt_type->size; + arg_offset += elt_type->size; + elt_typep++; + argn += arg_offset / sizeof(ffi_arg); + arg_offset = arg_offset % sizeof(ffi_arg); + } +} + +/* + * Decodes the arguments to a function, which will be stored on the + * stack. AR is the pointer to the beginning of the integer + * arguments. FPR is a pointer to the area where floating point + * registers have been saved. + * + * RVALUE is the location where the function return value will be + * stored. CLOSURE is the prepared closure to invoke. + * + * This function should only be called from assembly, which is in + * turn called from a trampoline. + * + * Returns the function return flags. + * + */ +int +ffi_closure_mips_inner_N32 (ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, ffi_arg *ar, + ffi_arg *fpr) +{ + void **avaluep; + ffi_arg *avalue; + ffi_type **arg_types; + int i, avn, argn; + int soft_float; + ffi_arg *argp; + + soft_float = cif->abi == FFI_N64_SOFT_FLOAT + || cif->abi == FFI_N32_SOFT_FLOAT; + avalue = alloca (cif->nargs * sizeof (ffi_arg)); + avaluep = alloca (cif->nargs * sizeof (ffi_arg)); + + argn = 0; + + if (cif->rstruct_flag) + { +#if _MIPS_SIM==_ABIN32 + rvalue = (void *)(UINT32)ar[0]; +#else /* N64 */ + rvalue = (void *)ar[0]; +#endif + argn = 1; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + while (i < avn) + { + if (arg_types[i]->type == FFI_TYPE_FLOAT + || arg_types[i]->type == FFI_TYPE_DOUBLE + || arg_types[i]->type == FFI_TYPE_LONGDOUBLE) + { + argp = (argn >= 8 || i >= cif->mips_nfixedargs || soft_float) ? ar + argn : fpr + argn; + if ((arg_types[i]->type == FFI_TYPE_LONGDOUBLE) && ((uintptr_t)argp & (arg_types[i]->alignment-1))) + { + argp=(ffi_arg*)FFI_ALIGN(argp,arg_types[i]->alignment); + argn++; + } +#if defined(__MIPSEB__) || defined(_MIPSEB) + if (arg_types[i]->type == FFI_TYPE_FLOAT && argn < 8) + avaluep[i] = ((char *) argp) + sizeof (float); + else +#endif + avaluep[i] = (char *) argp; + } + else + { + unsigned type = arg_types[i]->type; + + if (arg_types[i]->alignment > sizeof(ffi_arg)) + argn = FFI_ALIGN(argn, arg_types[i]->alignment / sizeof(ffi_arg)); + + argp = ar + argn; + + /* The size of a pointer depends on the ABI */ + if (type == FFI_TYPE_POINTER) + type = (cif->abi == FFI_N64 || cif->abi == FFI_N64_SOFT_FLOAT) + ? FFI_TYPE_SINT64 : FFI_TYPE_SINT32; + + if (soft_float && type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + + switch (type) + { + case FFI_TYPE_SINT8: + avaluep[i] = &avalue[i]; + *(SINT8 *) &avalue[i] = (SINT8) *argp; + break; + + case FFI_TYPE_UINT8: + avaluep[i] = &avalue[i]; + *(UINT8 *) &avalue[i] = (UINT8) *argp; + break; + + case FFI_TYPE_SINT16: + avaluep[i] = &avalue[i]; + *(SINT16 *) &avalue[i] = (SINT16) *argp; + break; + + case FFI_TYPE_UINT16: + avaluep[i] = &avalue[i]; + *(UINT16 *) &avalue[i] = (UINT16) *argp; + break; + + case FFI_TYPE_SINT32: + avaluep[i] = &avalue[i]; + *(SINT32 *) &avalue[i] = (SINT32) *argp; + break; + + case FFI_TYPE_UINT32: + avaluep[i] = &avalue[i]; + *(UINT32 *) &avalue[i] = (UINT32) *argp; + break; + + case FFI_TYPE_STRUCT: + if (argn < 8) + { + /* Allocate space for the struct as at least part of + it was passed in registers. */ + avaluep[i] = alloca(arg_types[i]->size); + copy_struct_N32(avaluep[i], 0, cif->abi, arg_types[i], + argn, 0, ar, fpr, i >= cif->mips_nfixedargs || soft_float); + + break; + } + /* Else fall through. */ + default: + avaluep[i] = (char *) argp; + break; + } + } + argn += FFI_ALIGN(arg_types[i]->size, sizeof(ffi_arg)) / sizeof(ffi_arg); + i++; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avaluep, user_data); + + return cif->flags >> (FFI_FLAG_BITS * 8); +} + +#endif /* FFI_MIPS_N32 */ + +#if defined(FFI_MIPS_O32) +extern void ffi_closure_O32(void); +extern void ffi_go_closure_O32(void); +#else +extern void ffi_closure_N32(void); +extern void ffi_go_closure_N32(void); +#endif /* FFI_MIPS_O32 */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void * fn; + +#if defined(FFI_MIPS_O32) + if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT) + return FFI_BAD_ABI; + fn = ffi_go_closure_O32; +#else +#if _MIPS_SIM ==_ABIN32 + if (cif->abi != FFI_N32 + && cif->abi != FFI_N32_SOFT_FLOAT) + return FFI_BAD_ABI; +#else + if (cif->abi != FFI_N64 + && cif->abi != FFI_N64_SOFT_FLOAT) + return FFI_BAD_ABI; +#endif + fn = ffi_go_closure_N32; +#endif /* FFI_MIPS_O32 */ + + closure->tramp = (void *)fn; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_CLOSURES */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffitarget.h new file mode 100644 index 0000000..fdd5ca9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/ffitarget.h @@ -0,0 +1,244 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for MIPS. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifdef __linux__ +# include +#elif defined(__rtems__) +/* + * Subprogram calling convention - copied from sgidefs.h + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 +#elif !defined(__OpenBSD__) && !defined(__FreeBSD__) +# include +#endif + +# ifndef _ABIN32 +# define _ABIN32 _MIPS_SIM_NABI32 +# endif +# ifndef _ABI64 +# define _ABI64 _MIPS_SIM_ABI64 +# endif +# ifndef _ABIO32 +# define _ABIO32 _MIPS_SIM_ABI32 +# endif + +#if !defined(_MIPS_SIM) +# error -- something is very wrong -- +#else +# if (_MIPS_SIM==_ABIN32 && defined(_ABIN32)) || (_MIPS_SIM==_ABI64 && defined(_ABI64)) +# define FFI_MIPS_N32 +# else +# if (_MIPS_SIM==_ABIO32 && defined(_ABIO32)) +# define FFI_MIPS_O32 +# else +# error -- this is an unsupported platform -- +# endif +# endif +#endif + +#ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +# define FFI_SIZEOF_ARG 4 +#else +/* N32 and N64 frames have 64bit integer args */ +# define FFI_SIZEOF_ARG 8 +# if _MIPS_SIM == _ABIN32 +# define FFI_SIZEOF_JAVA_RAW 4 +# endif +#endif + +#define FFI_FLAG_BITS 2 + +/* SGI's strange assembler requires that we multiply by 4 rather + than shift left by FFI_FLAG_BITS */ + +#define FFI_ARGS_D FFI_TYPE_DOUBLE +#define FFI_ARGS_F FFI_TYPE_FLOAT +#define FFI_ARGS_DD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_DOUBLE +#define FFI_ARGS_FF FFI_TYPE_FLOAT * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_FD FFI_TYPE_DOUBLE * 4 + FFI_TYPE_FLOAT +#define FFI_ARGS_DF FFI_TYPE_FLOAT * 4 + FFI_TYPE_DOUBLE + +/* Needed for N32 structure returns */ +#define FFI_TYPE_SMALLSTRUCT FFI_TYPE_UINT8 +#define FFI_TYPE_SMALLSTRUCT2 FFI_TYPE_SINT8 + +#if 0 +/* The SGI assembler can't handle this.. */ +#define FFI_TYPE_STRUCT_DD (( FFI_ARGS_DD ) << 4) + FFI_TYPE_STRUCT +/* (and so on) */ +#else +/* ...so we calculate these by hand! */ +#define FFI_TYPE_STRUCT_D 61 +#define FFI_TYPE_STRUCT_F 45 +#define FFI_TYPE_STRUCT_DD 253 +#define FFI_TYPE_STRUCT_FF 173 +#define FFI_TYPE_STRUCT_FD 237 +#define FFI_TYPE_STRUCT_DF 189 +#define FFI_TYPE_STRUCT_SMALL 93 +#define FFI_TYPE_STRUCT_SMALL2 109 + +/* and for n32 soft float, add 16 * 2^4 */ +#define FFI_TYPE_STRUCT_D_SOFT 317 +#define FFI_TYPE_STRUCT_F_SOFT 301 +#define FFI_TYPE_STRUCT_DD_SOFT 509 +#define FFI_TYPE_STRUCT_FF_SOFT 429 +#define FFI_TYPE_STRUCT_FD_SOFT 493 +#define FFI_TYPE_STRUCT_DF_SOFT 445 +#define FFI_TYPE_STRUCT_SOFT 16 +#endif + +#ifdef LIBFFI_ASM +#define v0 $2 +#define v1 $3 +#define a0 $4 +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define a4 $8 +#define a5 $9 +#define a6 $10 +#define a7 $11 +#define t0 $8 +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define t8 $24 +#define t9 $25 +#define ra $31 + +#ifdef FFI_MIPS_O32 +# define REG_L lw +# define REG_S sw +# define SUBU subu +# define ADDU addu +# define SRL srl +# define LI li +#else /* !FFI_MIPS_O32 */ +# define REG_L ld +# define REG_S sd +# define SUBU dsubu +# define ADDU daddu +# define SRL dsrl +# define LI dli +# if (_MIPS_SIM==_ABI64) +# define LA dla +# define EH_FRAME_ALIGN 3 +# define FDE_ADDR_BYTES .8byte +# else +# define LA la +# define EH_FRAME_ALIGN 2 +# define FDE_ADDR_BYTES .4byte +# endif /* _MIPS_SIM==_ABI64 */ +#endif /* !FFI_MIPS_O32 */ +#else /* !LIBFFI_ASM */ +# ifdef __GNUC__ +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__SI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__SI__))); +#else +/* N32 and N64 frames have 64bit integer args */ +typedef unsigned int ffi_arg __attribute__((__mode__(__DI__))); +typedef signed int ffi_sarg __attribute__((__mode__(__DI__))); +# endif +# else +# ifdef FFI_MIPS_O32 +/* O32 stack frames have 32bit integer args */ +typedef __uint32_t ffi_arg; +typedef __int32_t ffi_sarg; +# else +/* N32 and N64 frames have 64bit integer args */ +typedef __uint64_t ffi_arg; +typedef __int64_t ffi_sarg; +# endif +# endif /* __GNUC__ */ + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_O32, + FFI_N32, + FFI_N64, + FFI_O32_SOFT_FLOAT, + FFI_N32_SOFT_FLOAT, + FFI_N64_SOFT_FLOAT, + FFI_LAST_ABI, + +#ifdef FFI_MIPS_O32 +#ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT +#else + FFI_DEFAULT_ABI = FFI_O32 +#endif +#else +# if _MIPS_SIM==_ABI64 +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N64 +# endif +# else +# ifdef __mips_soft_float + FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT +# else + FFI_DEFAULT_ABI = FFI_N32 +# endif +# endif +#endif +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS unsigned rstruct_flag; unsigned mips_nfixedargs +#define FFI_TARGET_SPECIFIC_VARIADIC +#endif /* !LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined(FFI_MIPS_O32) || (_MIPS_SIM ==_ABIN32) +# define FFI_TRAMPOLINE_SIZE 20 +#else +# define FFI_TRAMPOLINE_SIZE 56 +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/n32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/n32.S new file mode 100644 index 0000000..1a940b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/mips/n32.S @@ -0,0 +1,663 @@ +/* ----------------------------------------------------------------------- + n32.S - Copyright (c) 1996, 1998, 2005, 2007, 2009, 2010 Red Hat, Inc. + + MIPS Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Only build this code if we are compiling for n32 */ + +#if defined(FFI_MIPS_N32) + +#define callback a0 +#define bytes a2 +#define flags a3 +#define raddr a4 +#define fn a5 +#define closure a6 + +/* Note: to keep stack 16 byte aligned we need even number slots + used 9 slots here +*/ +#define SIZEOF_FRAME ( 10 * FFI_SIZEOF_ARG ) + +#ifdef __GNUC__ + .abicalls +#endif +#if !defined(__mips_isa_rev) || (__mips_isa_rev<6) + .set mips4 +#endif + .text + .align 2 + .globl ffi_call_N32 + .ent ffi_call_N32 +ffi_call_N32: +.LFB0: + .frame $fp, SIZEOF_FRAME, ra + .mask 0xc0000000,-FFI_SIZEOF_ARG + .fmask 0x00000000,0 + + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +.LCFI00: + REG_S $fp, SIZEOF_FRAME - 2*FFI_SIZEOF_ARG($sp) # Save frame pointer + REG_S ra, SIZEOF_FRAME - 1*FFI_SIZEOF_ARG($sp) # Save return address +.LCFI01: + move $fp, $sp +.LCFI02: + move t9, callback # callback function pointer + REG_S bytes, 2*FFI_SIZEOF_ARG($fp) # bytes + REG_S flags, 3*FFI_SIZEOF_ARG($fp) # flags + REG_S raddr, 4*FFI_SIZEOF_ARG($fp) # raddr + REG_S fn, 5*FFI_SIZEOF_ARG($fp) # fn + REG_S closure, 6*FFI_SIZEOF_ARG($fp) # closure + + # Allocate at least 4 words in the argstack + move v0, bytes + bge bytes, 4 * FFI_SIZEOF_ARG, bigger + LI v0, 4 * FFI_SIZEOF_ARG + b sixteen + + bigger: + ADDU t4, v0, 2 * FFI_SIZEOF_ARG -1 # make sure it is aligned + and v0, t4, -2 * FFI_SIZEOF_ARG # to a proper boundry. + +sixteen: + SUBU $sp, $sp, v0 # move the stack pointer to reflect the + # arg space + + move a0, $sp # 4 * FFI_SIZEOF_ARG + ADDU a3, $fp, 3 * FFI_SIZEOF_ARG + + # Call ffi_prep_args + jal t9 + + # Copy the stack pointer to t9 + move t9, $sp + + # Fix the stack if there are more than 8 64bit slots worth + # of arguments. + + # Load the number of bytes + REG_L t6, 2*FFI_SIZEOF_ARG($fp) + + # Is it bigger than 8 * FFI_SIZEOF_ARG? + daddiu t8, t6, -(8 * FFI_SIZEOF_ARG) + bltz t8, loadregs + + ADDU t9, t9, t8 + +loadregs: + + REG_L t6, 3*FFI_SIZEOF_ARG($fp) # load the flags word into t6. + +#ifdef __mips_soft_float + REG_L a0, 0*FFI_SIZEOF_ARG(t9) + REG_L a1, 1*FFI_SIZEOF_ARG(t9) + REG_L a2, 2*FFI_SIZEOF_ARG(t9) + REG_L a3, 3*FFI_SIZEOF_ARG(t9) + REG_L a4, 4*FFI_SIZEOF_ARG(t9) + REG_L a5, 5*FFI_SIZEOF_ARG(t9) + REG_L a6, 6*FFI_SIZEOF_ARG(t9) + REG_L a7, 7*FFI_SIZEOF_ARG(t9) +#else + and t4, t6, ((1< +#include + +/* Only build this code if we are compiling for o32 */ + +#if defined(FFI_MIPS_O32) + +#define callback a0 +#define bytes a2 +#define flags a3 + +#define SIZEOF_FRAME (4 * FFI_SIZEOF_ARG + 2 * FFI_SIZEOF_ARG) +#define A3_OFF (SIZEOF_FRAME + 3 * FFI_SIZEOF_ARG) +#define FP_OFF (SIZEOF_FRAME - 2 * FFI_SIZEOF_ARG) +#define RA_OFF (SIZEOF_FRAME - 1 * FFI_SIZEOF_ARG) + + .abicalls + .text + .align 2 + .globl ffi_call_O32 + .ent ffi_call_O32 +ffi_call_O32: +$LFB0: + # Prologue + SUBU $sp, SIZEOF_FRAME # Frame size +$LCFI00: + REG_S $fp, FP_OFF($sp) # Save frame pointer +$LCFI01: + REG_S ra, RA_OFF($sp) # Save return address +$LCFI02: + move $fp, $sp + +$LCFI03: + move t9, callback # callback function pointer + REG_S flags, A3_OFF($fp) # flags + + # Allocate at least 4 words in the argstack + LI v0, 4 * FFI_SIZEOF_ARG + blt bytes, v0, sixteen + + ADDU v0, bytes, 7 # make sure it is aligned + and v0, -8 # to an 8 byte boundry + +sixteen: + SUBU $sp, v0 # move the stack pointer to reflect the + # arg space + + ADDU a0, $sp, 4 * FFI_SIZEOF_ARG + + jalr t9 + + REG_L t0, A3_OFF($fp) # load the flags word + SRL t2, t0, 4 # shift our arg info + and t0, ((1<<4)-1) # mask out the return type + + ADDU $sp, 4 * FFI_SIZEOF_ARG # adjust $sp to new args + +#ifndef __mips_soft_float + bnez t0, pass_d # make it quick for int +#endif + REG_L a0, 0*FFI_SIZEOF_ARG($sp) # just go ahead and load the + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # four regs. + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +#ifndef __mips_soft_float +pass_d: + bne t0, FFI_ARGS_D, pass_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a2, 2*FFI_SIZEOF_ARG($sp) # passing a double + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f: + bne t0, FFI_ARGS_F, pass_d_d + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + REG_L a1, 1*FFI_SIZEOF_ARG($sp) # passing a float + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_d: + bne t0, FFI_ARGS_DD, pass_f_f + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing two doubles + b call_it + +pass_f_f: + bne t0, FFI_ARGS_FF, pass_d_f + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 1*FFI_SIZEOF_ARG($sp) # passing two floats + REG_L a2, 2*FFI_SIZEOF_ARG($sp) + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_d_f: + bne t0, FFI_ARGS_DF, pass_f_d + l.d $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.s $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float + REG_L a3, 3*FFI_SIZEOF_ARG($sp) + b call_it + +pass_f_d: + # assume that the only other combination must be float then double + # bne t0, FFI_ARGS_F_D, call_it + l.s $f12, 0*FFI_SIZEOF_ARG($sp) # load $fp regs from args + l.d $f14, 2*FFI_SIZEOF_ARG($sp) # passing double and float +#endif + +call_it: + # Load the static chain pointer + REG_L t7, SIZEOF_FRAME + 6*FFI_SIZEOF_ARG($fp) + + # Load the function pointer + REG_L t9, SIZEOF_FRAME + 5*FFI_SIZEOF_ARG($fp) + + # If the return value pointer is NULL, assume no return value. + REG_L t1, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + beqz t1, noretval + + bne t2, FFI_TYPE_INT, retlonglong + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v0, 0(t0) + b epilogue + +retlonglong: + # Really any 64-bit int, signed or not. + bne t2, FFI_TYPE_UINT64, retfloat + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) + REG_S v1, 4(t0) + REG_S v0, 0(t0) + b epilogue + +retfloat: + bne t2, FFI_TYPE_FLOAT, retdouble + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.s $f0, 0(t0) +#else + REG_S v0, 0(t0) +#endif + b epilogue + +retdouble: + bne t2, FFI_TYPE_DOUBLE, noretval + jalr t9 + REG_L t0, SIZEOF_FRAME + 4*FFI_SIZEOF_ARG($fp) +#ifndef __mips_soft_float + s.d $f0, 0(t0) +#else + REG_S v1, 4(t0) + REG_S v0, 0(t0) +#endif + b epilogue + +noretval: + jalr t9 + + # Epilogue +epilogue: + move $sp, $fp + REG_L $fp, FP_OFF($sp) # Restore frame pointer + REG_L ra, RA_OFF($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME # Fix stack pointer + j ra + +$LFE0: + .end ffi_call_O32 + + +/* ffi_closure_O32. Expects address of the passed-in ffi_closure + in t4 ($12). Stores any arguments passed in registers onto the + stack, then calls ffi_closure_mips_inner_O32, which + then decodes them. + + Stack layout: + + 3 - a3 save + 2 - a2 save + 1 - a1 save + 0 - a0 save, original sp + -1 - ra save + -2 - fp save + -3 - $16 (s0) save + -4 - cprestore + -5 - return value high (v1) + -6 - return value low (v0) + -7 - f14 (le high, be low) + -8 - f14 (le low, be high) + -9 - f12 (le high, be low) + -10 - f12 (le low, be high) + -11 - Called function a5 save + -12 - Called function a4 save + -13 - Called function a3 save + -14 - Called function a2 save + -15 - Called function a1 save + -16 - Called function a0 save, our sp and fp point here + */ + +#define SIZEOF_FRAME2 (16 * FFI_SIZEOF_ARG) +#define A3_OFF2 (SIZEOF_FRAME2 + 3 * FFI_SIZEOF_ARG) +#define A2_OFF2 (SIZEOF_FRAME2 + 2 * FFI_SIZEOF_ARG) +#define A1_OFF2 (SIZEOF_FRAME2 + 1 * FFI_SIZEOF_ARG) +#define A0_OFF2 (SIZEOF_FRAME2 + 0 * FFI_SIZEOF_ARG) +#define RA_OFF2 (SIZEOF_FRAME2 - 1 * FFI_SIZEOF_ARG) +#define FP_OFF2 (SIZEOF_FRAME2 - 2 * FFI_SIZEOF_ARG) +#define S0_OFF2 (SIZEOF_FRAME2 - 3 * FFI_SIZEOF_ARG) +#define GP_OFF2 (SIZEOF_FRAME2 - 4 * FFI_SIZEOF_ARG) +#define V1_OFF2 (SIZEOF_FRAME2 - 5 * FFI_SIZEOF_ARG) +#define V0_OFF2 (SIZEOF_FRAME2 - 6 * FFI_SIZEOF_ARG) +#define FA_1_1_OFF2 (SIZEOF_FRAME2 - 7 * FFI_SIZEOF_ARG) +#define FA_1_0_OFF2 (SIZEOF_FRAME2 - 8 * FFI_SIZEOF_ARG) +#define FA_0_1_OFF2 (SIZEOF_FRAME2 - 9 * FFI_SIZEOF_ARG) +#define FA_0_0_OFF2 (SIZEOF_FRAME2 - 10 * FFI_SIZEOF_ARG) +#define CALLED_A5_OFF2 (SIZEOF_FRAME2 - 11 * FFI_SIZEOF_ARG) +#define CALLED_A4_OFF2 (SIZEOF_FRAME2 - 12 * FFI_SIZEOF_ARG) + + .text + + .align 2 + .globl ffi_go_closure_O32 + .ent ffi_go_closure_O32 +ffi_go_closure_O32: +$LFB1: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI10: + + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI11: + + move $fp, $sp +$LCFI12: + + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 4($15) # cif + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 4($15) # cif + REG_L a1, 8($15) # fun + move a2, $15 # user_data = go closure + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + + b $do_closure + +$LFE1: + .end ffi_go_closure_O32 + + .align 2 + .globl ffi_closure_O32 + .ent ffi_closure_O32 +ffi_closure_O32: +$LFB2: + # Prologue + .frame $fp, SIZEOF_FRAME2, ra + .set noreorder + .cpload t9 + .set reorder + SUBU $sp, SIZEOF_FRAME2 + .cprestore GP_OFF2 +$LCFI20: + REG_S $16, S0_OFF2($sp) # Save s0 + REG_S $fp, FP_OFF2($sp) # Save frame pointer + REG_S ra, RA_OFF2($sp) # Save return address +$LCFI21: + move $fp, $sp + +$LCFI22: + # Store all possible argument registers. If there are more than + # four arguments, then they are stored above where we put a3. + REG_S a0, A0_OFF2($fp) + REG_S a1, A1_OFF2($fp) + REG_S a2, A2_OFF2($fp) + REG_S a3, A3_OFF2($fp) + + # Load ABI enum to s0 + REG_L $16, 20($12) # cif pointer follows tramp. + REG_L $16, 0($16) # abi is first member. + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp save if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + # Store all possible float/double registers. + s.d $f12, FA_0_0_OFF2($fp) + s.d $f14, FA_1_0_OFF2($fp) +#endif +1: + # prepare arguments for ffi_closure_mips_inner_O32 + REG_L a0, 20($12) # cif pointer follows tramp. + REG_L a1, 24($12) # fun + REG_L a2, 28($12) # user_data + addu a3, $fp, V0_OFF2 # rvalue + + addu t9, $fp, A0_OFF2 # ar + REG_S t9, CALLED_A4_OFF2($fp) + + addu t9, $fp, FA_0_0_OFF2 #fpr + REG_S t9, CALLED_A5_OFF2($fp) + +$do_closure: + la t9, ffi_closure_mips_inner_O32 + # Call ffi_closure_mips_inner_O32 to do the work. + jalr t9 + + # Load the return value into the appropriate register. + move $8, $2 + li $9, FFI_TYPE_VOID + beq $8, $9, closure_done + + li $13, 1 # FFI_O32 + bne $16, $13, 1f # Skip fp restore if FFI_O32_SOFT_FLOAT + +#ifndef __mips_soft_float + li $9, FFI_TYPE_FLOAT + l.s $f0, V0_OFF2($fp) + beq $8, $9, closure_done + + li $9, FFI_TYPE_DOUBLE + l.d $f0, V0_OFF2($fp) + beq $8, $9, closure_done +#endif +1: + REG_L $3, V1_OFF2($fp) + REG_L $2, V0_OFF2($fp) + +closure_done: + # Epilogue + move $sp, $fp + REG_L $16, S0_OFF2($sp) # Restore s0 + REG_L $fp, FP_OFF2($sp) # Restore frame pointer + REG_L ra, RA_OFF2($sp) # Restore return address + ADDU $sp, SIZEOF_FRAME2 + j ra +$LFE2: + .end ffi_closure_O32 + +/* DWARF-2 unwind info. */ + + .section .eh_frame,"a",@progbits +$Lframe0: + .4byte $LECIE0-$LSCIE0 # Length of Common Information Entry +$LSCIE0: + .4byte 0x0 # CIE Identifier Tag + .byte 0x1 # CIE Version + .ascii "zR\0" # CIE Augmentation + .uleb128 0x1 # CIE Code Alignment Factor + .sleb128 4 # CIE Data Alignment Factor + .byte 0x1f # CIE RA Column + .uleb128 0x1 # Augmentation size + .byte 0x00 # FDE Encoding (absptr) + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1d + .uleb128 0x0 + .align 2 +$LECIE0: + +$LSFDE0: + .4byte $LEFDE0-$LASFDE0 # FDE Length +$LASFDE0: + .4byte $LASFDE0-$Lframe0 # FDE CIE offset + .4byte $LFB0 # FDE initial location + .4byte $LFE0-$LFB0 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI00-$LFB0 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 0x18 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI01-$LCFI00 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI02-$LCFI01 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x18 + .align 2 +$LEFDE0: + +$LSFDE1: + .4byte $LEFDE1-$LASFDE1 # FDE Length +$LASFDE1: + .4byte $LASFDE1-$Lframe0 # FDE CIE offset + .4byte $LFB1 # FDE initial location + .4byte $LFE1-$LFB1 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI10-$LFB1 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI11-$LCFI10 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI12-$LCFI11 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE1: + +$LSFDE2: + .4byte $LEFDE2-$LASFDE2 # FDE Length +$LASFDE2: + .4byte $LASFDE2-$Lframe0 # FDE CIE offset + .4byte $LFB2 # FDE initial location + .4byte $LFE2-$LFB2 # FDE address range + .uleb128 0x0 # Augmentation size + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI20-$LFB2 + .byte 0xe # DW_CFA_def_cfa_offset + .uleb128 SIZEOF_FRAME2 + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI21-$LCFI20 + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x10 # $16 + .sleb128 -3 # SIZEOF_FRAME2 - 3*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1e # $fp + .sleb128 -2 # SIZEOF_FRAME2 - 2*FFI_SIZEOF_ARG($sp) + .byte 0x11 # DW_CFA_offset_extended_sf + .uleb128 0x1f # $ra + .sleb128 -1 # SIZEOF_FRAME2 - 1*FFI_SIZEOF_ARG($sp) + .byte 0x4 # DW_CFA_advance_loc4 + .4byte $LCFI22-$LCFI21 + .byte 0xc # DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 SIZEOF_FRAME2 + .align 2 +$LEFDE2: + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/eabi.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/eabi.S new file mode 100644 index 0000000..10cfb04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/eabi.S @@ -0,0 +1,101 @@ +/* ----------------------------------------------------------------------- + eabi.S - Copyright (c) 2012, 2013 Anthony Green + + Moxie Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .globl ffi_prep_args_EABI + + .text + .p2align 4 + .globl ffi_call_EABI + .type ffi_call_EABI, @function + + # $r0 : ffi_prep_args + # $r1 : &ecif + # $r2 : cif->bytes + # $r3 : fig->flags + # $r4 : ecif.rvalue + # $r5 : fn + +ffi_call_EABI: + push $sp, $r6 + push $sp, $r7 + push $sp, $r8 + dec $sp, 24 + + /* Store incoming args on stack. */ + sto.l 0($sp), $r0 /* ffi_prep_args */ + sto.l 4($sp), $r1 /* ecif */ + sto.l 8($sp), $r2 /* bytes */ + sto.l 12($sp), $r3 /* flags */ + sto.l 16($sp), $r4 /* &rvalue */ + sto.l 20($sp), $r5 /* fn */ + + /* Call ffi_prep_args. */ + mov $r6, $r4 /* Save result buffer */ + mov $r7, $r5 /* Save the target fn */ + mov $r8, $r3 /* Save the flags */ + sub $sp, $r2 /* Allocate stack space */ + mov $r0, $sp /* We can stomp over $r0 */ + /* $r1 is already set up */ + jsra ffi_prep_args + + /* Load register arguments. */ + ldo.l $r0, 0($sp) + ldo.l $r1, 4($sp) + ldo.l $r2, 8($sp) + ldo.l $r3, 12($sp) + ldo.l $r4, 16($sp) + ldo.l $r5, 20($sp) + + /* Call the target function. */ + jsr $r7 + + ldi.l $r7, 0xffffffff + cmp $r8, $r7 + beq retstruct + + ldi.l $r7, 4 + cmp $r8, $r7 + bgt ret2reg + + st.l ($r6), $r0 + jmpa retdone + +ret2reg: + st.l ($r6), $r0 + sto.l 4($r6), $r1 + +retstruct: +retdone: + /* Return. */ + ldo.l $r6, -4($fp) + ldo.l $r7, -8($fp) + ldo.l $r8, -12($fp) + ret + .size ffi_call_EABI, .-ffi_call_EABI + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffi.c new file mode 100644 index 0000000..16d2bb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffi.c @@ -0,0 +1,285 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2012, 2013, 2018 Anthony Green + + Moxie Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void *ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + register int count = 0; + + p_argv = ecif->avalue; + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + } + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + (i != 0); + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + + if ((*p_arg)->type == FFI_TYPE_STRUCT) + { + z = sizeof(void*); + *(void **) argp = *p_argv; + } + else if (z < sizeof(int)) + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof(int)) + { + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + } + else + { + memcpy(argp, *p_argv, z); + } + p_argv++; + argp += z; + count += z; + } + + return (stack + ((count > 24) ? 24 : FFI_ALIGN_DOWN(count, 8))); +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = -1; + else + cif->flags = cif->rtype->size; + + cif->bytes = FFI_ALIGN (cif->bytes, 8); + + return FFI_OK; +} + +extern void ffi_call_EABI(void *(*)(char *, extended_cif *), + extended_cif *, + unsigned, unsigned, + unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_EABI: + ffi_call_EABI(ffi_prep_args, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } +} + +void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, + unsigned arg4, unsigned arg5, unsigned arg6) +{ + /* This function is called by a trampoline. The trampoline stows a + pointer to the ffi_closure object in $r12. We must save this + pointer in a place that will persist while we do our work. */ + register ffi_closure *creg __asm__ ("$r12"); + ffi_closure *closure = creg; + + /* Arguments that don't fit in registers are found on the stack + at a fixed offset above the current frame pointer. */ + register char *frame_pointer __asm__ ("$fp"); + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) arg1; + + /* 6 words reserved for register args + 3 words from jsr */ + char *stack_args = frame_pointer + 9*4; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { arg1, arg2, arg3, arg4, arg5, arg6 }; + char *register_args_ptr = (char *) register_args; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int i; + + /* preserve struct type return pointer passing */ + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) { + ptr += 4; + register_args_ptr = (char *)®ister_args[1]; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + default: + /* This is an 8-byte value. */ + if (ptr == (char *) ®ister_args[5]) + { + /* The value is split across two locations */ + unsigned *ip = alloca(8); + avalue[i] = ip; + ip[0] = *(unsigned *) ptr; + ip[1] = *(unsigned *) stack_args; + } + else + { + avalue[i] = ptr; + } + ptr += 4; + break; + } + ptr += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + if (ptr == (char *) ®ister_args[6]) + ptr = stack_args; + else if (ptr == (char *) ®ister_args[7]) + ptr = stack_args + 4; + } + + /* Invoke the closure. */ + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } + else + { + /* Allocate space for the return value and call the function. */ + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + asm ("mov $r12, %0\n ld.l $r0, ($r12)\n ldo.l $r1, 4($r12)" : : "r" (&rvalue)); + } +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) &closure->tramp[0]; + unsigned long fn = (long) ffi_closure_eabi; + unsigned long cls = (long) codeloc; + + if (cif->abi != FFI_EABI) + return FFI_BAD_ABI; + + fn = (unsigned long) ffi_closure_eabi; + + tramp[0] = 0x01e0; /* ldi.l $r12, .... */ + tramp[1] = cls >> 16; + tramp[2] = cls & 0xffff; + tramp[3] = 0x1a00; /* jmpa .... */ + tramp[4] = fn >> 16; + tramp[5] = fn & 0xffff; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffitarget.h new file mode 100644 index 0000000..623e3ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/moxie/ffitarget.h @@ -0,0 +1,52 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2013 Anthony Green + Target configuration macros for Moxie + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_EABI, + FFI_DEFAULT_ABI = FFI_EABI, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +/* Trampolines are 12-bytes long. See ffi_prep_closure_loc. */ +#define FFI_TRAMPOLINE_SIZE (12) + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffi.c new file mode 100644 index 0000000..721080d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffi.c @@ -0,0 +1,304 @@ +/* libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#include +#include + +#include + +/* The Nios II Processor Reference Handbook defines the procedure call + ABI as follows. + + Arguments are passed as if a structure containing the types of + the arguments were constructed. The first 16 bytes are passed in r4 + through r7, the remainder on the stack. The first 16 bytes of a function + taking variable arguments are passed in r4-r7 in the same way. + + Return values of types up to 8 bytes are returned in r2 and r3. For + return values greater than 8 bytes, the caller must allocate memory for + the result and pass the address as if it were argument 0. + + While this isn't specified explicitly in the ABI documentation, GCC + promotes integral arguments smaller than int size to 32 bits. + + Also of note, the ABI specifies that all structure objects are + aligned to 32 bits even if all their fields have a smaller natural + alignment. See FFI_AGGREGATE_ALIGNMENT. */ + + +/* Declare the assembly language hooks. */ + +extern UINT64 ffi_call_sysv (void (*) (char *, extended_cif *), + extended_cif *, + unsigned, + void (*fn) (void)); +extern void ffi_closure_sysv (void); + +/* Perform machine-dependent cif processing. */ + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* We always want at least 16 bytes in the parameter block since it + simplifies the low-level call function. Also round the parameter + block size up to a multiple of 4 bytes to preserve + 32-bit alignment of the stack pointer. */ + if (cif->bytes < 16) + cif->bytes = 16; + else + cif->bytes = (cif->bytes + 3) & ~3; + + return FFI_OK; +} + + +/* ffi_prep_args is called by the assembly routine to transfer arguments + to the stack using the pointers in the ecif array. + Note that the stack buffer is big enough to fit all the arguments, + but the first 16 bytes will be copied to registers for the actual + call. */ + +void ffi_prep_args (char *stack, extended_cif *ecif) +{ + char *argp = stack; + unsigned int i; + + /* The implicit return value pointer is passed as if it were a hidden + first argument. */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && ecif->cif->rtype->size > 8) + { + (*(void **) argp) = ecif->rvalue; + argp += 4; + } + + for (i = 0; i < ecif->cif->nargs; i++) + { + void *avalue = ecif->avalue[i]; + ffi_type *atype = ecif->cif->arg_types[i]; + size_t size = atype->size; + size_t alignment = atype->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Copy the argument, promoting integral types smaller than a + word to word size. */ + if (size < sizeof (int)) + { + size = sizeof (int); + switch (atype->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) avalue; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) avalue; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) avalue; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) avalue; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, avalue, atype->size); + break; + + default: + FFI_ASSERT(0); + } + } + else if (size == sizeof (int)) + *(unsigned int *) argp = (unsigned int) *(UINT32 *) avalue; + else + memcpy (argp, avalue, size); + argp += size; + } +} + + +/* Call FN using the prepared CIF. RVALUE points to space allocated by + the caller for the return value, and AVALUE is an array of argument + pointers. */ + +void ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + + extended_cif ecif; + UINT64 result; + + /* If bigret is true, this is the case where a return value of larger + than 8 bytes is handled by being passed by reference as an implicit + argument. */ + int bigret = (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8); + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Allocate space for return value if this is the pass-by-reference case + and the caller did not provide a buffer. */ + if (rvalue == NULL && bigret) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + result = ffi_call_sysv (ffi_prep_args, &ecif, cif->bytes, fn); + + /* Now result contains the 64 bit contents returned from fn in + r2 and r3. Copy the value of the appropriate size to the user-provided + rvalue buffer. */ + if (rvalue && !bigret) + switch (cif->rtype->size) + { + case 1: + *(UINT8 *)rvalue = (UINT8) result; + break; + case 2: + *(UINT16 *)rvalue = (UINT16) result; + break; + case 4: + *(UINT32 *)rvalue = (UINT32) result; + break; + case 8: + *(UINT64 *)rvalue = (UINT64) result; + break; + default: + memcpy (rvalue, (void *)&result, cif->rtype->size); + break; + } +} + +/* This function is invoked from the closure trampoline to invoke + CLOSURE with argument block ARGS. Parse ARGS according to + CLOSURE->cfi and invoke CLOSURE->fun. */ + +static UINT64 +ffi_closure_helper (unsigned char *args, + ffi_closure *closure) +{ + ffi_cif *cif = closure->cif; + unsigned char *argp = args; + void **parsed_args = alloca (cif->nargs * sizeof (void *)); + UINT64 result; + void *retptr; + unsigned int i; + + /* First figure out what to do about the return type. If this is the + big-structure-return case, the first arg is the hidden return buffer + allocated by the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && cif->rtype->size > 8) + { + retptr = *((void **) argp); + argp += 4; + } + else + retptr = (void *) &result; + + /* Fill in the array of argument pointers. */ + for (i = 0; i < cif->nargs; i++) + { + size_t size = cif->arg_types[i]->size; + size_t alignment = cif->arg_types[i]->alignment; + + /* Align argp as appropriate for the argument type. */ + if ((alignment - 1) & (unsigned) argp) + argp = (char *) FFI_ALIGN (argp, alignment); + + /* Arguments smaller than an int are promoted to int. */ + if (size < sizeof (int)) + size = sizeof (int); + + /* Store the pointer. */ + parsed_args[i] = argp; + argp += size; + } + + /* Call the user-supplied function. */ + (closure->fun) (cif, retptr, parsed_args, closure->user_data); + return result; +} + + +/* Initialize CLOSURE with a trampoline to call FUN with + CIF and USER_DATA. */ +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun) (ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + int i; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + /* The trampoline looks like: + movhi r8, %hi(ffi_closure_sysv) + ori r8, r8, %lo(ffi_closure_sysv) + movhi r9, %hi(ffi_closure_helper) + ori r0, r9, %lo(ffi_closure_helper) + movhi r10, %hi(closure) + ori r10, r10, %lo(closure) + jmp r8 + and then ffi_closure_sysv retrieves the closure pointer out of r10 + in addition to the arguments passed in the normal way for the call, + and invokes ffi_closure_helper. We encode the pointer to + ffi_closure_helper in the trampoline because making a PIC call + to it in ffi_closure_sysv would be messy (it would have to indirect + through the GOT). */ + +#define HI(x) ((((unsigned int) (x)) >> 16) & 0xffff) +#define LO(x) (((unsigned int) (x)) & 0xffff) + tramp[0] = (0 << 27) | (8 << 22) | (HI (ffi_closure_sysv) << 6) | 0x34; + tramp[1] = (8 << 27) | (8 << 22) | (LO (ffi_closure_sysv) << 6) | 0x14; + tramp[2] = (0 << 27) | (9 << 22) | (HI (ffi_closure_helper) << 6) | 0x34; + tramp[3] = (9 << 27) | (9 << 22) | (LO (ffi_closure_helper) << 6) | 0x14; + tramp[4] = (0 << 27) | (10 << 22) | (HI (closure) << 6) | 0x34; + tramp[5] = (10 << 27) | (10 << 22) | (LO (closure) << 6) | 0x14; + tramp[6] = (8 << 27) | (0x0d << 11) | 0x3a; +#undef HI +#undef LO + + /* Flush the caches. + See Example 9-4 in the Nios II Software Developer's Handbook. */ + for (i = 0; i < 7; i++) + asm volatile ("flushd 0(%0); flushi %0" :: "r"(tramp + i) : "memory"); + asm volatile ("flushp" ::: "memory"); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffitarget.h new file mode 100644 index 0000000..134d118 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/ffitarget.h @@ -0,0 +1,52 @@ +/* libffi target includes for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* Structures have a 4-byte alignment even if all the fields have lesser + alignment requirements. */ +#define FFI_AGGREGATE_ALIGNMENT 4 + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 28 /* 7 instructions */ +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/sysv.S new file mode 100644 index 0000000..75f442b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/nios2/sysv.S @@ -0,0 +1,136 @@ +/* Low-level libffi support for Altera Nios II. + + Copyright (c) 2013 Mentor Graphics. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +/* This function is declared on the C side as + + extern UINT64 ffi_call_sysv (void (*arghook) (char *, extended_cif *), + extended_cif *ecif, + unsigned nbytes, + void (*fn) (void)); + + On input, the arguments appear as + r4 = arghook + r5 = ecif + r6 = nbytes + r7 = fn +*/ + + .section .text + .align 2 + .global ffi_call_sysv + .type ffi_call_sysv, @function + +ffi_call_sysv: + .cfi_startproc + + /* Create the stack frame, saving r16 so we can use it locally. */ + addi sp, sp, -12 + .cfi_def_cfa_offset 12 + stw ra, 8(sp) + stw fp, 4(sp) + stw r16, 0(sp) + .cfi_offset 31, -4 + .cfi_offset 28, -8 + .cfi_offset 16, -12 + mov fp, sp + .cfi_def_cfa_register 28 + mov r16, r7 + + /* Adjust the stack pointer to create the argument buffer + nbytes long. */ + sub sp, sp, r6 + + /* Call the arghook function. */ + mov r2, r4 /* fn */ + mov r4, sp /* argbuffer */ + callr r2 /* r5 already contains ecif */ + + /* Pop off the first 16 bytes of the argument buffer on the stack, + transferring the contents to the argument registers. */ + ldw r4, 0(sp) + ldw r5, 4(sp) + ldw r6, 8(sp) + ldw r7, 12(sp) + addi sp, sp, 16 + + /* Call the user function, which leaves its result in r2 and r3. */ + callr r16 + + /* Pop off the stack frame. */ + mov sp, fp + ldw ra, 8(sp) + ldw fp, 4(sp) + ldw r16, 0(sp) + addi sp, sp, 12 + ret + .cfi_endproc + .size ffi_call_sysv, .-ffi_call_sysv + + +/* Closure trampolines jump here after putting the C helper address + in r9 and the closure pointer in r10. The user-supplied arguments + to the closure are in the normal places, in r4-r7 and on the + stack. Push the register arguments on the stack too and then call the + C helper function to deal with them. */ + + .section .text + .align 2 + .global ffi_closure_sysv + .type ffi_closure_sysv, @function + +ffi_closure_sysv: + .cfi_startproc + + /* Create the stack frame, pushing the register args on the stack + just below the stack args. This is the same trick illustrated + in Figure 7-3 in the Nios II Processor Reference Handbook, used + for variable arguments and structures passed by value. */ + addi sp, sp, -20 + .cfi_def_cfa_offset 20 + stw ra, 0(sp) + .cfi_offset 31, -20 + stw r4, 4(sp) + .cfi_offset 4, -16 + stw r5, 8(sp) + .cfi_offset 5, -12 + stw r6, 12(sp) + .cfi_offset 6, -8 + stw r7, 16(sp) + .cfi_offset 7, -4 + + /* Call the helper. + r4 = pointer to arguments on stack + r5 = closure pointer (loaded in r10 by the trampoline) + r9 = address of helper function (loaded by trampoline) */ + addi r4, sp, 4 + mov r5, r10 + callr r9 + + /* Pop the stack and return. */ + ldw ra, 0(sp) + addi sp, sp, 20 + .cfi_def_cfa_offset -20 + ret + .cfi_endproc + .size ffi_closure_sysv, .-ffi_closure_sysv + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffi.c new file mode 100644 index 0000000..2bad938 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffi.c @@ -0,0 +1,328 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include "ffi_common.h" + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void* ffi_prep_args(char *stack, extended_cif *ecif) +{ + char *stacktemp = stack; + int i, s; + ffi_type **arg; + int count = 0; + int nfixedargs; + + nfixedargs = ecif->cif->nfixedargs; + arg = ecif->cif->arg_types; + void **argv = ecif->avalue; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + { + *(void **) stack = ecif->rvalue; + stack += 4; + count = 4; + } + for(i=0; icif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + count = 24; + stack = stacktemp + 24; + } + nfixedargs--; + + s = 4; + switch((*arg)->type) + { + case FFI_TYPE_STRUCT: + *(void **)stack = *argv; + break; + + case FFI_TYPE_SINT8: + *(signed int *) stack = (signed int)*(SINT8 *)(* argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) stack = (unsigned int)*(UINT8 *)(* argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) stack = (signed int)*(SINT16 *)(* argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) stack = (unsigned int)*(UINT16 *)(* argv); + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + *(int *)stack = *(int*)(*argv); + break; + + default: /* 8 byte types */ + if (count == 20) /* never split arguments */ + { + stack += 4; + count += 4; + } + s = (*arg)->size; + memcpy(stack, *argv, s); + break; + } + + stack += s; + count += s; + argv++; + arg++; + } + return stacktemp + ((count>24)?24:0); +} + +extern void ffi_call_SYSV(unsigned, + extended_cif *, + void *(*)(int *, extended_cif *), + unsigned *, + void (*fn)(void), + unsigned); + + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + int i; + int size; + ffi_type **arg; + + /* Calculate size to allocate on stack */ + + for(i = 0, arg = cif->arg_types, size=0; i < cif->nargs; i++, arg++) + { + if ((*arg)->type == FFI_TYPE_STRUCT) + size += 4; + else + if ((*arg)->size <= 4) + size += 4; + else + size += 8; + } + + /* for variadic functions more space is needed on the stack */ + if (cif->nargs != cif->nfixedargs) + size += 24; + + if (cif->rtype->type == FFI_TYPE_STRUCT) + size += 4; + + + extended_cif ecif; + ecif.cif = cif; + ecif.avalue = avalue; + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(size, &ecif, ffi_prep_args, rvalue, fn, cif->flags); + break; + default: + FFI_ASSERT(0); + break; + } +} + + +void ffi_closure_SYSV(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, unsigned long r8) +{ + register int *sp __asm__ ("r17"); + register int *r13 __asm__ ("r13"); + + ffi_closure* closure = (ffi_closure*) r13; + char *stack_args = sp; + + /* Lay the register arguments down in a continuous chunk of memory. */ + unsigned register_args[6] = + { r3, r4, r5, r6, r7, r8 }; + + /* Pointer to a struct return value. */ + void *struct_rvalue = (void *) r3; + + ffi_cif *cif = closure->cif; + ffi_type **arg_types = cif->arg_types; + void **avalue = alloca (cif->nargs * sizeof(void *)); + char *ptr = (char *) register_args; + int count = 0; + int nfixedargs = cif->nfixedargs; + int i; + + /* preserve struct type return pointer passing */ + + if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ptr += 4; + count = 4; + } + + /* Find the address of each argument. */ + for (i = 0; i < cif->nargs; i++) + { + + /* variadic args are saved on stack */ + if ((nfixedargs == 0) && (count < 24)) + { + ptr = stack_args; + count = 24; + } + nfixedargs--; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = ptr + 3; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = ptr + 2; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + avalue[i] = ptr; + break; + + case FFI_TYPE_STRUCT: + avalue[i] = *(void**)ptr; + break; + + default: + /* 8-byte values */ + + /* arguments are never splitted */ + if (ptr == ®ister_args[5]) + ptr = stack_args; + avalue[i] = ptr; + ptr += 4; + count += 4; + break; + } + ptr += 4; + count += 4; + + /* If we've handled more arguments than fit in registers, + start looking at the those passed on the stack. */ + + if (count == 24) + ptr = stack_args; + } + + if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT)) + { + (closure->fun) (cif, struct_rvalue, avalue, closure->user_data); + } else + { + long long rvalue; + (closure->fun) (cif, &rvalue, avalue, closure->user_data); + if (cif->rtype) + asm ("l.ori r12, %0, 0x0\n l.lwz r11, 0(r12)\n l.lwz r12, 4(r12)" : : "r" (&rvalue)); + } +} + + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + unsigned short *tramp = (unsigned short *) closure->tramp; + unsigned long fn = (unsigned long) ffi_closure_SYSV; + unsigned long cls = (unsigned long) codeloc; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + /* write pointers to temporary registers */ + tramp[0] = (0x6 << 10) | (13 << 5); /* l.movhi r13, ... */ + tramp[1] = cls >> 16; + tramp[2] = (0x2a << 10) | (13 << 5) | 13; /* l.ori r13, r13, ... */ + tramp[3] = cls & 0xFFFF; + + tramp[4] = (0x6 << 10) | (15 << 5); /* l.movhi r15, ... */ + tramp[5] = fn >> 16; + tramp[6] = (0x2a << 10) | (15 << 5) | 15; /* l.ori r15, r15 ... */ + tramp[7] = fn & 0xFFFF; + + tramp[8] = (0x11 << 10); /* l.jr r15 */ + tramp[9] = 15 << 11; + + tramp[10] = (0x2a << 10) | (17 << 5) | 1; /* l.ori r17, r1, ... */ + tramp[11] = 0x0; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep (ffi_cif *cif) +{ + cif->flags = 0; + + /* structures are returned as pointers */ + if (cif->rtype->type == FFI_TYPE_STRUCT) + cif->flags = FFI_TYPE_STRUCT; + else + if (cif->rtype->size > 4) + cif->flags = FFI_TYPE_UINT64; + + cif->nfixedargs = cif->nargs; + + return FFI_OK; +} + + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + ffi_status status; + + status = ffi_prep_cif_machdep (cif); + cif->nfixedargs = nfixedargs; + return status; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffitarget.h new file mode 100644 index 0000000..e55da28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/ffitarget.h @@ -0,0 +1,58 @@ +/* ----------------------------------------------------------------------- + ffitarget.h - Copyright (c) 2014 Sebastian Macke + + OpenRISC Target configuration macros + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE (24) + +#define FFI_TARGET_SPECIFIC_VARIADIC 1 +#define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs; + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/sysv.S new file mode 100644 index 0000000..df6570b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/or1k/sysv.S @@ -0,0 +1,107 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2014 Sebastian Macke + + OpenRISC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +.text + .globl ffi_call_SYSV + .type ffi_call_SYSV, @function +/* + r3: size to allocate on stack + r4: extended cif structure + r5: function pointer ffi_prep_args + r6: ret address + r7: function to call + r8: flag for return type +*/ + +ffi_call_SYSV: + /* Store registers used on stack */ + l.sw -4(r1), r9 /* return address */ + l.sw -8(r1), r1 /* stack address */ + l.sw -12(r1), r14 /* callee saved registers */ + l.sw -16(r1), r16 + l.sw -20(r1), r18 + l.sw -24(r1), r20 + + l.ori r14, r1, 0x0 /* save stack pointer */ + l.addi r1, r1, -24 + + l.ori r16, r7, 0x0 /* save function address */ + l.ori r18, r6, 0x0 /* save ret address */ + l.ori r20, r8, 0x0 /* save flag */ + + l.sub r1, r1, r3 /* reserve space on stack */ + + /* Call ffi_prep_args */ + l.ori r3, r1, 0x0 /* first argument stack address, second already ecif */ + l.jalr r5 + l.nop + + /* Load register arguments and call*/ + + l.lwz r3, 0(r1) + l.lwz r4, 4(r1) + l.lwz r5, 8(r1) + l.lwz r6, 12(r1) + l.lwz r7, 16(r1) + l.lwz r8, 20(r1) + l.ori r1, r11, 0x0 /* new stack pointer */ + l.jalr r16 + l.nop + + /* handle return values */ + + l.sfeqi r20, FFI_TYPE_STRUCT + l.bf ret /* structs don't return an rvalue */ + l.nop + + /* copy ret address */ + + l.sfeqi r20, FFI_TYPE_UINT64 + l.bnf four_byte_ret /* 8 byte value is returned */ + l.nop + + l.sw 4(r18), r12 + +four_byte_ret: + l.sw 0(r18), r11 + +ret: + /* return */ + l.ori r1, r14, 0x0 /* reset stack pointer */ + l.lwz r9, -4(r1) + l.lwz r1, -8(r1) + l.lwz r14, -12(r1) + l.lwz r16, -16(r1) + l.lwz r18, -20(r1) + l.lwz r20, -24(r1) + l.jr r9 + l.nop + +.size ffi_call_SYSV, .-ffi_call_SYSV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffi.c new file mode 100644 index 0000000..95e6694 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffi.c @@ -0,0 +1,674 @@ +/* ----------------------------------------------------------------------- + ffi.c - (c) 2011 Anthony Green + (c) 2008 Red Hat, Inc. + (c) 2006 Free Software Foundation, Inc. + (c) 2003-2004 Randolph Chung + + HPPA Foreign Function Interface + HP-UX PA ABI support + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#define ROUND_UP(v, a) (((size_t)(v) + (a) - 1) & ~((a) - 1)) + +#define MIN_STACK_SIZE 64 +#define FIRST_ARG_SLOT 9 +#define DEBUG_LEVEL 0 + +#define fldw(addr, fpreg) \ + __asm__ volatile ("fldw 0(%0), %%" #fpreg "L" : : "r"(addr) : #fpreg) +#define fstw(fpreg, addr) \ + __asm__ volatile ("fstw %%" #fpreg "L, 0(%0)" : : "r"(addr)) +#define fldd(addr, fpreg) \ + __asm__ volatile ("fldd 0(%0), %%" #fpreg : : "r"(addr) : #fpreg) +#define fstd(fpreg, addr) \ + __asm__ volatile ("fstd %%" #fpreg "L, 0(%0)" : : "r"(addr)) + +#define debug(lvl, x...) do { if (lvl <= DEBUG_LEVEL) { printf(x); } } while (0) + +static inline int ffi_struct_type(ffi_type *t) +{ + size_t sz = t->size; + + /* Small structure results are passed in registers, + larger ones are passed by pointer. Note that + small structures of size 2, 4 and 8 differ from + the corresponding integer types in that they have + different alignment requirements. */ + + if (sz <= 1) + return FFI_TYPE_UINT8; + else if (sz == 2) + return FFI_TYPE_SMALL_STRUCT2; + else if (sz == 3) + return FFI_TYPE_SMALL_STRUCT3; + else if (sz == 4) + return FFI_TYPE_SMALL_STRUCT4; + else if (sz == 5) + return FFI_TYPE_SMALL_STRUCT5; + else if (sz == 6) + return FFI_TYPE_SMALL_STRUCT6; + else if (sz == 7) + return FFI_TYPE_SMALL_STRUCT7; + else if (sz <= 8) + return FFI_TYPE_SMALL_STRUCT8; + else + return FFI_TYPE_STRUCT; /* else, we pass it by pointer. */ +} + +/* PA has a downward growing stack, which looks like this: + + Offset + [ Variable args ] + SP = (4*(n+9)) arg word N + ... + SP-52 arg word 4 + [ Fixed args ] + SP-48 arg word 3 + SP-44 arg word 2 + SP-40 arg word 1 + SP-36 arg word 0 + [ Frame marker ] + ... + SP-20 RP + SP-4 previous SP + + The first four argument words on the stack are reserved for use by + the callee. Instead, the general and floating registers replace + the first four argument slots. Non FP arguments are passed solely + in the general registers. FP arguments are passed in both general + and floating registers when using libffi. + + Non-FP 32-bit args are passed in gr26, gr25, gr24 and gr23. + Non-FP 64-bit args are passed in register pairs, starting + on an odd numbered register (i.e. r25+r26 and r23+r24). + FP 32-bit arguments are passed in fr4L, fr5L, fr6L and fr7L. + FP 64-bit arguments are passed in fr5 and fr7. + + The registers are allocated in the same manner as stack slots. + This allows the callee to save its arguments on the stack if + necessary: + + arg word 3 -> gr23 or fr7L + arg word 2 -> gr24 or fr6L or fr7R + arg word 1 -> gr25 or fr5L + arg word 0 -> gr26 or fr4L or fr5R + + Note that fr4R and fr6R are never used for arguments (i.e., + doubles are not passed in fr4 or fr6). + + The rest of the arguments are passed on the stack starting at SP-52, + but 64-bit arguments need to be aligned to an 8-byte boundary + + This means we can have holes either in the register allocation, + or in the stack. */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments + + The following code will put everything into the stack frame + (which was allocated by the asm routine), and on return + the asm routine will load the arguments that should be + passed by register into the appropriate registers + + NOTE: We load floating point args in this function... that means we + assume gcc will not mess with fp regs in here. */ + +void ffi_prep_args_pa32(UINT32 *stack, extended_cif *ecif, unsigned bytes) +{ + register unsigned int i; + register ffi_type **p_arg; + register void **p_argv; + unsigned int slot = FIRST_ARG_SLOT; + char *dest_cpy; + size_t len; + + debug(1, "%s: stack = %p, ecif = %p, bytes = %u\n", __FUNCTION__, stack, + ecif, bytes); + + p_arg = ecif->cif->arg_types; + p_argv = ecif->avalue; + + for (i = 0; i < ecif->cif->nargs; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + *(SINT32 *)(stack - slot) = *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT32 *)(stack - slot) = *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT32 *)(stack - slot) = *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT32 *)(stack - slot) = *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + debug(3, "Storing UINT32 %u in slot %u\n", *(UINT32 *)(*p_argv), + slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + break; + + case FFI_TYPE_FLOAT: + /* First 4 args go in fr4L - fr7L. */ + debug(3, "Storing UINT32(float) in slot %u\n", slot); + *(UINT32 *)(stack - slot) = *(UINT32 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 4 args go in fr4L - fr7L. */ + case 0: fldw(stack - slot, fr4); break; + case 1: fldw(stack - slot, fr5); break; + case 2: fldw(stack - slot, fr6); break; + case 3: fldw(stack - slot, fr7); break; + } + break; + + case FFI_TYPE_DOUBLE: + /* Align slot for 64-bit type. */ + slot += (slot & 1) ? 1 : 2; + debug(3, "Storing UINT64(double) at slot %u\n", slot); + *(UINT64 *)(stack - slot) = *(UINT64 *)(*p_argv); + switch (slot - FIRST_ARG_SLOT) + { + /* First 2 args go in fr5, fr7. */ + case 1: fldd(stack - slot, fr5); break; + case 3: fldd(stack - slot, fr7); break; + } + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are passed in the same manner as structures + larger than 8 bytes. */ + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; +#endif + + case FFI_TYPE_STRUCT: + + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + + len = (*p_arg)->size; + if (len <= 4) + { + dest_cpy = (char *)(stack - slot) + 4 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else if (len <= 8) + { + slot += (slot & 1) ? 1 : 2; + dest_cpy = (char *)(stack - slot) + 8 - len; + memcpy(dest_cpy, (char *)*p_argv, len); + } + else + *(UINT32 *)(stack - slot) = (UINT32)(*p_argv); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + p_argv++; + } + + /* Make sure we didn't mess up and scribble on the stack. */ + { + unsigned int n; + + debug(5, "Stack setup:\n"); + for (n = 0; n < (bytes + 3) / 4; n++) + { + if ((n%4) == 0) { debug(5, "\n%08x: ", (unsigned int)(stack - n)); } + debug(5, "%08x ", *(stack - n)); + } + debug(5, "\n"); + } + + FFI_ASSERT(slot * 4 <= bytes); + + return; +} + +static void ffi_size_stack_pa32(ffi_cif *cif) +{ + ffi_type **ptr; + int i; + int z = 0; /* # stack slots */ + + for (ptr = cif->arg_types, i = 0; i < cif->nargs; ptr++, i++) + { + int type = (*ptr)->type; + + switch (type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + z += 2 + (z & 1); /* must start on even regs, so we may waste one */ + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: +#endif + case FFI_TYPE_STRUCT: + z += 1; /* pass by ptr, callee will copy */ + break; + + default: /* <= 32-bit values */ + z++; + } + } + + /* We can fit up to 6 args in the default 64-byte stack frame, + if we need more, we need more stack. */ + if (z <= 6) + cif->bytes = MIN_STACK_SIZE; /* min stack size */ + else + cif->bytes = 64 + ROUND_UP((z - 6) * sizeof(UINT32), MIN_STACK_SIZE); + + debug(3, "Calculated stack size is %u bytes\n", cif->bytes); +} + +/* Perform machine dependent cif processing. */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + cif->flags = (unsigned) cif->rtype->type; + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a structure. */ + cif->flags = FFI_TYPE_STRUCT; + break; +#endif + + case FFI_TYPE_STRUCT: + /* For the return type we have to check the size of the structures. + If the size is smaller or equal 4 bytes, the result is given back + in one register. If the size is smaller or equal 8 bytes than we + return the result in two registers. But if the size is bigger than + 8 bytes, we work with pointers. */ + cif->flags = ffi_struct_type(cif->rtype); + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + /* Lucky us, because of the unique PA ABI we get to do our + own stack sizing. */ + switch (cif->abi) + { + case FFI_PA32: + ffi_size_stack_pa32(cif); + break; + + default: + FFI_ASSERT(0); + break; + } + + return FFI_OK; +} + +extern void ffi_call_pa32(void (*)(UINT32 *, extended_cif *, unsigned), + extended_cif *, unsigned, unsigned, unsigned *, + void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if (rvalue == NULL +#ifdef PA_HPUX + && (cif->rtype->type == FFI_TYPE_STRUCT + || cif->rtype->type == FFI_TYPE_LONGDOUBLE)) +#else + && cif->rtype->type == FFI_TYPE_STRUCT) +#endif + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + + switch (cif->abi) + { + case FFI_PA32: + debug(3, "Calling ffi_call_pa32: ecif=%p, bytes=%u, flags=%u, rvalue=%p, fn=%p\n", &ecif, cif->bytes, cif->flags, ecif.rvalue, (void *)fn); + ffi_call_pa32(ffi_prep_args_pa32, &ecif, cif->bytes, + cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT(0); + break; + } +} + +#if FFI_CLOSURES +/* This is more-or-less an inverse of ffi_call -- we have arguments on + the stack, and we need to fill them into a cif structure and invoke + the user function. This really ought to be in asm to make sure + the compiler doesn't do things we don't expect. */ +ffi_status ffi_closure_inner_pa32(ffi_closure *closure, UINT32 *stack) +{ + ffi_cif *cif; + void **avalue; + void *rvalue; + /* Functions can return up to 64-bits in registers. Return address + must be double word aligned. */ + union { double rd; UINT32 ret[2]; } u; + ffi_type **p_arg; + char *tmp; + int i, avn; + unsigned int slot = FIRST_ARG_SLOT; + register UINT32 r28 asm("r28"); + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + cif = closure->cif; + + /* If returning via structure, callee will write to our pointer. */ + if (cif->flags == FFI_TYPE_STRUCT) + rvalue = (void *)r28; + else + rvalue = &u; + + avalue = (void **)alloca(cif->nargs * FFI_SIZEOF_ARG); + avn = cif->nargs; + p_arg = cif->arg_types; + + for (i = 0; i < avn; i++) + { + int type = (*p_arg)->type; + + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + avalue[i] = (char *)(stack - slot) + sizeof(UINT32) - (*p_arg)->size; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_FLOAT: +#ifdef PA_LINUX + /* The closure call is indirect. In Linux, floating point + arguments in indirect calls with a prototype are passed + in the floating point registers instead of the general + registers. So, we need to replace what was previously + stored in the current slot with the value in the + corresponding floating point register. */ + switch (slot - FIRST_ARG_SLOT) + { + case 0: fstw(fr4, (void *)(stack - slot)); break; + case 1: fstw(fr5, (void *)(stack - slot)); break; + case 2: fstw(fr6, (void *)(stack - slot)); break; + case 3: fstw(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + + case FFI_TYPE_DOUBLE: + slot += (slot & 1) ? 1 : 2; +#ifdef PA_LINUX + /* See previous comment for FFI_TYPE_FLOAT. */ + switch (slot - FIRST_ARG_SLOT) + { + case 1: fstd(fr5, (void *)(stack - slot)); break; + case 3: fstd(fr7, (void *)(stack - slot)); break; + } +#endif + avalue[i] = (void *)(stack - slot); + break; + +#ifdef PA_HPUX + case FFI_TYPE_LONGDOUBLE: + /* Long doubles are treated like a big structure. */ + avalue[i] = (void *) *(stack - slot); + break; +#endif + + case FFI_TYPE_STRUCT: + /* Structs smaller or equal than 4 bytes are passed in one + register. Structs smaller or equal 8 bytes are passed in two + registers. Larger structures are passed by pointer. */ + if((*p_arg)->size <= 4) + { + avalue[i] = (void *)(stack - slot) + sizeof(UINT32) - + (*p_arg)->size; + } + else if ((*p_arg)->size <= 8) + { + slot += (slot & 1) ? 1 : 2; + avalue[i] = (void *)(stack - slot) + sizeof(UINT64) - + (*p_arg)->size; + } + else + avalue[i] = (void *) *(stack - slot); + break; + + default: + FFI_ASSERT(0); + } + + slot++; + p_arg++; + } + + /* Invoke the closure. */ + (c->fun) (cif, rvalue, avalue, c->user_data); + + debug(3, "after calling function, ret[0] = %08x, ret[1] = %08x\n", u.ret[0], + u.ret[1]); + + /* Store the result using the lower 2 bytes of the flags. */ + switch (cif->flags) + { + case FFI_TYPE_UINT8: + *(stack - FIRST_ARG_SLOT) = (UINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_SINT8: + *(stack - FIRST_ARG_SLOT) = (SINT8)(u.ret[0] >> 24); + break; + case FFI_TYPE_UINT16: + *(stack - FIRST_ARG_SLOT) = (UINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_SINT16: + *(stack - FIRST_ARG_SLOT) = (SINT16)(u.ret[0] >> 16); + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + *(stack - FIRST_ARG_SLOT) = u.ret[0]; + *(stack - FIRST_ARG_SLOT - 1) = u.ret[1]; + break; + + case FFI_TYPE_DOUBLE: + fldd(rvalue, fr4); + break; + + case FFI_TYPE_FLOAT: + fldw(rvalue, fr4); + break; + + case FFI_TYPE_STRUCT: + /* Don't need a return value, done by caller. */ + break; + + case FFI_TYPE_SMALL_STRUCT2: + case FFI_TYPE_SMALL_STRUCT3: + case FFI_TYPE_SMALL_STRUCT4: + tmp = (void*)(stack - FIRST_ARG_SLOT); + tmp += 4 - cif->rtype->size; + memcpy((void*)tmp, &u, cif->rtype->size); + break; + + case FFI_TYPE_SMALL_STRUCT5: + case FFI_TYPE_SMALL_STRUCT6: + case FFI_TYPE_SMALL_STRUCT7: + case FFI_TYPE_SMALL_STRUCT8: + { + unsigned int ret2[2]; + int off; + + /* Right justify ret[0] and ret[1] */ + switch (cif->flags) + { + case FFI_TYPE_SMALL_STRUCT5: off = 3; break; + case FFI_TYPE_SMALL_STRUCT6: off = 2; break; + case FFI_TYPE_SMALL_STRUCT7: off = 1; break; + default: off = 0; break; + } + + memset (ret2, 0, sizeof (ret2)); + memcpy ((char *)ret2 + off, &u, 8 - off); + + *(stack - FIRST_ARG_SLOT) = ret2[0]; + *(stack - FIRST_ARG_SLOT - 1) = ret2[1]; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_VOID: + break; + + default: + debug(0, "assert with cif->flags: %d\n",cif->flags); + FFI_ASSERT(0); + break; + } + return FFI_OK; +} + +/* Fill in a closure to refer to the specified fun and user_data. + cif specifies the argument and result types for fun. + The cif must already be prep'ed. */ + +extern void ffi_closure_pa32(void); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + ffi_closure *c = (ffi_closure *)FFI_RESTORE_PTR (closure); + + /* The layout of a function descriptor. A function pointer with the PLABEL + bit set points to a function descriptor. */ + struct pa32_fd + { + UINT32 code_pointer; + UINT32 gp; + }; + + struct ffi_pa32_trampoline_struct + { + UINT32 code_pointer; /* Pointer to ffi_closure_unix. */ + UINT32 fake_gp; /* Pointer to closure, installed as gp. */ + UINT32 real_gp; /* Real gp value. */ + }; + + struct ffi_pa32_trampoline_struct *tramp; + struct pa32_fd *fd; + + if (cif->abi != FFI_PA32) + return FFI_BAD_ABI; + + /* Get function descriptor address for ffi_closure_pa32. */ + fd = (struct pa32_fd *)((UINT32)ffi_closure_pa32 & ~3); + + /* Setup trampoline. */ + tramp = (struct ffi_pa32_trampoline_struct *)c->tramp; + tramp->code_pointer = fd->code_pointer; + tramp->fake_gp = (UINT32)codeloc & ~3; + tramp->real_gp = fd->gp; + + c->cif = cif; + c->user_data = user_data; + c->fun = fun; + + return FFI_OK; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffitarget.h new file mode 100644 index 0000000..df1209e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/ffitarget.h @@ -0,0 +1,80 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for hppa. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#ifdef PA_LINUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA_HPUX + FFI_PA32, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA32 +#endif + +#ifdef PA64_HPUX +#error "PA64_HPUX FFI is not yet implemented" + FFI_PA64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_PA64 +#endif +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 12 + +#define FFI_TYPE_SMALL_STRUCT2 -1 +#define FFI_TYPE_SMALL_STRUCT3 -2 +#define FFI_TYPE_SMALL_STRUCT4 -3 +#define FFI_TYPE_SMALL_STRUCT5 -4 +#define FFI_TYPE_SMALL_STRUCT6 -5 +#define FFI_TYPE_SMALL_STRUCT7 -6 +#define FFI_TYPE_SMALL_STRUCT8 -7 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/hpux32.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/hpux32.S new file mode 100644 index 0000000..d0e5f69 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/hpux32.S @@ -0,0 +1,370 @@ +/* ----------------------------------------------------------------------- + hpux32.S - Copyright (c) 2006 Free Software Foundation, Inc. + (c) 2008 Red Hat, Inc. + based on src/pa/linux.S + + HP-UX PA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .LEVEL 1.1 + .SPACE $PRIVATE$ + .IMPORT $global$,DATA + .IMPORT $$dyncall,MILLICODE + .SUBSPA $DATA$ + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,ENTRY,PRIV_LEV=3 + .import ffi_prep_args_pa32,CODE + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .align 4 + +L$FB1 +ffi_call_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI11 + copy %sp, %r3 +L$CFI12 + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +L$CFI13 + copy %sp, %r4 + + addl %arg2, %r4, %arg0 ; arg stack + stw %arg3, -48(%r3) ; save flags we need it later + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args are loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 ; %ret0 <- rvalue + ldw -56(%r3), %r22 ; %r22 <- function to call + bl $$dyncall, %r31 ; Call the user function + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 ; r21 <- flags + ldw -52(%r3), %r20 ; r20 <- rvalue + + /* Store the result according to the return type. The most + likely types should come first. */ + +L$checkint + comib,<>,n FFI_TYPE_INT, %r21, L$checkint8 + b L$done + stw %ret0, 0(%r20) + +L$checkint8 + comib,<>,n FFI_TYPE_UINT8, %r21, L$checkint16 + b L$done + stb %ret0, 0(%r20) + +L$checkint16 + comib,<>,n FFI_TYPE_UINT16, %r21, L$checkdbl + b L$done + sth %ret0, 0(%r20) + +L$checkdbl + comib,<>,n FFI_TYPE_DOUBLE, %r21, L$checkfloat + b L$done + fstd %fr4,0(%r20) + +L$checkfloat + comib,<>,n FFI_TYPE_FLOAT, %r21, L$checkll + b L$done + fstw %fr4L,0(%r20) + +L$checkll + comib,<>,n FFI_TYPE_UINT64, %r21, L$checksmst2 + stw %ret0, 0(%r20) + b L$done + stw %ret1, 4(%r20) + +L$checksmst2 + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, L$checksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst3 + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, L$checksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst4 + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, L$checksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret0, 0(%r20) + +L$checksmst5 + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, L$checksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst6 + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, L$checksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst7 + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, L$checksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b L$done + stb %ret1, 0(%r20) + +L$checksmst8 + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, L$done + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +L$done + /* all done, return */ + copy %r4, %sp ; pop arg stack + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 ; .. and pop stack + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +L$FE1 + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .export ffi_closure_pa32,ENTRY,PRIV_LEV=3,RTNVAL=GR + .import ffi_closure_inner_pa32,CODE + .align 4 +L$FB2 +ffi_closure_pa32 + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) + copy %r3, %r1 +L$CFI21 + copy %sp, %r3 +L$CFI22 + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%rp) + ldw -40(%sp), %ret1 + .exit + .procend +L$FE2: + + .SPACE $PRIVATE$ + .SUBSPA $DATA$ + + .align 4 + .EXPORT _GLOBAL__F_ffi_call_pa32,DATA +_GLOBAL__F_ffi_call_pa32 +L$frame1: + .word L$ECIE1-L$SCIE1 ;# Length of Common Information Entry +L$SCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version + .ascii "\0" ;# CIE Augmentation + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +L$ECIE1: +L$SFDE1: + .word L$EFDE1-L$ASFDE1 ;# FDE Length +L$ASFDE1: + .word L$ASFDE1-L$frame1 ;# FDE CIE offset + .word L$FB1 ;# FDE initial location + .word L$FE1-L$FB1 ;# FDE address range + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI11-L$FB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI12-L$CFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI13-L$CFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +L$EFDE1: + +L$SFDE2: + .word L$EFDE2-L$ASFDE2 ;# FDE Length +L$ASFDE2: + .word L$ASFDE2-L$frame1 ;# FDE CIE offset + .word L$FB2 ;# FDE initial location + .word L$FE2-L$FB2 ;# FDE address range + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI21-L$FB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word L$CFI22-L$CFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +L$EFDE2: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/linux.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/linux.S new file mode 100644 index 0000000..33ef0b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/pa/linux.S @@ -0,0 +1,380 @@ +/* ----------------------------------------------------------------------- + linux.S - (c) 2003-2004 Randolph Chung + (c) 2008 Red Hat, Inc. + + HPPA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL RENESAS TECHNOLOGY BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + .level 1.1 + .align 4 + + /* void ffi_call_pa32(void (*)(char *, extended_cif *), + extended_cif *ecif, + unsigned bytes, + unsigned flags, + unsigned *rvalue, + void (*fn)(void)); + */ + + .export ffi_call_pa32,code + .import ffi_prep_args_pa32,code + + .type ffi_call_pa32, @function +.LFB1: +ffi_call_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=4 + .entry + stw %rp, -20(%sp) + copy %r3, %r1 +.LCFI11: + + copy %sp, %r3 +.LCFI12: + + /* Setup the stack for calling prep_args... + We want the stack to look like this: + + [ Previous stack ] <- %r3 + + [ 64-bytes register save area ] <- %r4 + + [ Stack space for actual call, passed as ] <- %arg0 + [ arg0 to ffi_prep_args_pa32 ] + + [ Stack for calling prep_args ] <- %sp + */ + + stwm %r1, 64(%sp) + stw %r4, 12(%r3) +.LCFI13: + copy %sp, %r4 + + addl %arg2, %r4, %arg0 /* arg stack */ + stw %arg3, -48(%r3) /* save flags; we need it later */ + + /* Call prep_args: + %arg0(stack) -- set up above + %arg1(ecif) -- same as incoming param + %arg2(bytes) -- same as incoming param */ + bl ffi_prep_args_pa32,%r2 + ldo 64(%arg0), %sp + ldo -64(%sp), %sp + + /* now %sp should point where %arg0 was pointing. */ + + /* Load the arguments that should be passed in registers + The fp args were loaded by the prep_args function. */ + ldw -36(%sp), %arg0 + ldw -40(%sp), %arg1 + ldw -44(%sp), %arg2 + ldw -48(%sp), %arg3 + + /* in case the function is going to return a structure + we need to give it a place to put the result. */ + ldw -52(%r3), %ret0 /* %ret0 <- rvalue */ + ldw -56(%r3), %r22 /* %r22 <- function to call */ + bl $$dyncall, %r31 /* Call the user function */ + copy %r31, %rp + + /* Prepare to store the result; we need to recover flags and rvalue. */ + ldw -48(%r3), %r21 /* r21 <- flags */ + ldw -52(%r3), %r20 /* r20 <- rvalue */ + + /* Store the result according to the return type. */ + +.Lcheckint: + comib,<>,n FFI_TYPE_INT, %r21, .Lcheckint8 + b .Ldone + stw %ret0, 0(%r20) + +.Lcheckint8: + comib,<>,n FFI_TYPE_UINT8, %r21, .Lcheckint16 + b .Ldone + stb %ret0, 0(%r20) + +.Lcheckint16: + comib,<>,n FFI_TYPE_UINT16, %r21, .Lcheckdbl + b .Ldone + sth %ret0, 0(%r20) + +.Lcheckdbl: + comib,<>,n FFI_TYPE_DOUBLE, %r21, .Lcheckfloat + b .Ldone + fstd %fr4,0(%r20) + +.Lcheckfloat: + comib,<>,n FFI_TYPE_FLOAT, %r21, .Lcheckll + b .Ldone + fstw %fr4L,0(%r20) + +.Lcheckll: + comib,<>,n FFI_TYPE_UINT64, %r21, .Lchecksmst2 + stw %ret0, 0(%r20) + b .Ldone + stw %ret1, 4(%r20) + +.Lchecksmst2: + comib,<>,n FFI_TYPE_SMALL_STRUCT2, %r21, .Lchecksmst3 + /* 2-byte structs are returned in ret0 as ????xxyy. */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst3: + comib,<>,n FFI_TYPE_SMALL_STRUCT3, %r21, .Lchecksmst4 + /* 3-byte structs are returned in ret0 as ??xxyyzz. */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst4: + comib,<>,n FFI_TYPE_SMALL_STRUCT4, %r21, .Lchecksmst5 + /* 4-byte structs are returned in ret0 as wwxxyyzz. */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret0, 0(%r20) + +.Lchecksmst5: + comib,<>,n FFI_TYPE_SMALL_STRUCT5, %r21, .Lchecksmst6 + /* 5 byte values are returned right justified: + ret0 ret1 + 5: ??????aa bbccddee */ + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst6: + comib,<>,n FFI_TYPE_SMALL_STRUCT6, %r21, .Lchecksmst7 + /* 6 byte values are returned right justified: + ret0 ret1 + 6: ????aabb ccddeeff */ + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst7: + comib,<>,n FFI_TYPE_SMALL_STRUCT7, %r21, .Lchecksmst8 + /* 7 byte values are returned right justified: + ret0 ret1 + 7: ??aabbcc ddeeffgg */ + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + b .Ldone + stb %ret1, 0(%r20) + +.Lchecksmst8: + comib,<>,n FFI_TYPE_SMALL_STRUCT8, %r21, .Ldone + /* 8 byte values are returned right justified: + ret0 ret1 + 8: aabbccdd eeffgghh */ + extru %ret0, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret0, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stbs,ma %ret0, 1(%r20) + extru %ret1, 7, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 15, 8, %r22 + stbs,ma %r22, 1(%r20) + extru %ret1, 23, 8, %r22 + stbs,ma %r22, 1(%r20) + stb %ret1, 0(%r20) + +.Ldone: + /* all done, return */ + copy %r4, %sp /* pop arg stack */ + ldw 12(%r3), %r4 + ldwm -64(%sp), %r3 /* .. and pop stack */ + ldw -20(%sp), %rp + bv %r0(%rp) + nop + .exit + .procend +.LFE1: + + /* void ffi_closure_pa32(void); + Called with closure argument in %r19 */ + .export ffi_closure_pa32,code + .import ffi_closure_inner_pa32,code + + .type ffi_closure_pa32, @function +.LFB2: +ffi_closure_pa32: + .proc + .callinfo FRAME=64,CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=3 + .entry + + stw %rp, -20(%sp) +.LCFI20: + copy %r3, %r1 +.LCFI21: + copy %sp, %r3 +.LCFI22: + stwm %r1, 64(%sp) + + /* Put arguments onto the stack and call ffi_closure_inner. */ + stw %arg0, -36(%r3) + stw %arg1, -40(%r3) + stw %arg2, -44(%r3) + stw %arg3, -48(%r3) + + /* Retrieve closure pointer and real gp. */ + copy %r19, %arg0 + ldw 8(%r19), %r19 + bl ffi_closure_inner_pa32, %r2 + copy %r3, %arg1 + + ldwm -64(%sp), %r3 + ldw -20(%sp), %rp + ldw -36(%sp), %ret0 + bv %r0(%r2) + ldw -40(%sp), %ret1 + + .exit + .procend +.LFE2: + + .section ".eh_frame",EH_FRAME_FLAGS,@progbits +.Lframe1: + .word .LECIE1-.LSCIE1 ;# Length of Common Information Entry +.LSCIE1: + .word 0x0 ;# CIE Identifier Tag + .byte 0x1 ;# CIE Version +#ifdef __PIC__ + .ascii "zR\0" ;# CIE Augmentation: 'z' - data, 'R' - DW_EH_PE_... data +#else + .ascii "\0" ;# CIE Augmentation +#endif + .uleb128 0x1 ;# CIE Code Alignment Factor + .sleb128 4 ;# CIE Data Alignment Factor + .byte 0x2 ;# CIE RA Column +#ifdef __PIC__ + .uleb128 0x1 ;# Augmentation size + .byte 0x1b ;# FDE Encoding (DW_EH_PE_pcrel|DW_EH_PE_sdata4) +#endif + .byte 0xc ;# DW_CFA_def_cfa + .uleb128 0x1e + .uleb128 0x0 + .align 4 +.LECIE1: +.LSFDE1: + .word .LEFDE1-.LASFDE1 ;# FDE Length +.LASFDE1: + .word .LASFDE1-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB1-. ;# FDE initial location +#else + .word .LFB1 ;# FDE initial location +#endif + .word .LFE1-.LFB1 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI11-.LFB1 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf; save r2 at [r30-20] + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI12-.LCFI11 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI13-.LCFI12 + .byte 0x84 ;# DW_CFA_offset, column 0x4 + .uleb128 0x3 + + .align 4 +.LEFDE1: + +.LSFDE2: + .word .LEFDE2-.LASFDE2 ;# FDE Length +.LASFDE2: + .word .LASFDE2-.Lframe1 ;# FDE CIE offset +#ifdef __PIC__ + .word .LFB2-. ;# FDE initial location +#else + .word .LFB2 ;# FDE initial location +#endif + .word .LFE2-.LFB2 ;# FDE address range +#ifdef __PIC__ + .uleb128 0x0 ;# Augmentation size: no data +#endif + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI21-.LFB2 + .byte 0x83 ;# DW_CFA_offset, column 0x3 + .uleb128 0x0 + .byte 0x11 ;# DW_CFA_offset_extended_sf + .uleb128 0x2 + .sleb128 -5 + + .byte 0x4 ;# DW_CFA_advance_loc4 + .word .LCFI22-.LCFI21 + .byte 0xd ;# DW_CFA_def_cfa_register = r3 + .uleb128 0x3 + + .align 4 +.LEFDE2: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix.S new file mode 100644 index 0000000..7ba5415 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix.S @@ -0,0 +1,566 @@ +/* ----------------------------------------------------------------------- + aix.S - Copyright (c) 2002, 2009 Free Software Foundation, Inc. + based on darwin.S by John Hornkvist + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_prep_args + +#define LIBFFI_ASM +#include +#include +#define JUMPTARGET(name) name +#define L(x) x + .file "aix.S" + .toc + + /* void ffi_call_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const)); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_AIX + .globl .ffi_call_AIX +.csect ffi_call_AIX[DS] +ffi_call_AIX: +#ifdef __64BIT__ + .llong .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r0, 16(r1) +LCFI..0: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address. */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 16(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-32-(13*8)(r28) + lfd f2,-32-(12*8)(r28) + lfd f3,-32-(11*8)(r28) + lfd f4,-32-(10*8)(r28) + nop + lfd f5,-32-(9*8)(r28) + lfd f6,-32-(8*8)(r28) + lfd f7,-32-(7*8)(r28) + lfd f8,-32-(6*8)(r28) + nop + lfd f9,-32-(5*8)(r28) + lfd f10,-32-(4*8)(r28) + lfd f11,-32-(3*8)(r28) + lfd f12,-32-(2*8)(r28) + nop + lfd f13,-32-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + ld r2, 40(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + std r3, 0(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + ld r0, 16(r28) + ld r28, -32(r1) + mtlr r0 + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + bf 31, L(done_return_value) + stfd f2, 8(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#else /* ! __64BIT__ */ + + .long .ffi_call_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_AIX: + .function .ffi_call_AIX,.ffi_call_AIX,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: + /* Save registers we use. */ + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r0, 8(r1) +LCFI..0: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..1: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 8(r29) + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + +L1: + /* Load all the FP registers. */ + bf 6,L2 /* 2f + 0x18 */ + lfd f1,-16-(13*8)(r28) + lfd f2,-16-(12*8)(r28) + lfd f3,-16-(11*8)(r28) + lfd f4,-16-(10*8)(r28) + nop + lfd f5,-16-(9*8)(r28) + lfd f6,-16-(8*8)(r28) + lfd f7,-16-(7*8)(r28) + lfd f8,-16-(6*8)(r28) + nop + lfd f9,-16-(5*8)(r28) + lfd f10,-16-(4*8)(r28) + lfd f11,-16-(3*8)(r28) + lfd f12,-16-(2*8)(r28) + nop + lfd f13,-16-(1*8)(r28) + +L2: + /* Make the call. */ + bctrl + lwz r2, 20(r1) + + /* Now, deal with the return value. */ + mtcrf 0x01, r31 + + bt 30, L(done_return_value) + bt 29, L(fp_return_value) + stw r3, 0(r30) + bf 28, L(done_return_value) + stw r4, 4(r30) + + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + mr r1, r28 + lwz r0, 8(r28) + lwz r28,-16(r1) + mtlr r0 + lwz r29,-12(r1) + lwz r30, -8(r1) + lwz r31, -4(r1) + blr + +L(fp_return_value): + bf 28, L(float_return_value) + stfd f1, 0(r30) + b L(done_return_value) +L(float_return_value): + stfs f1, 0(r30) + b L(done_return_value) +LFE..0: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_AIX) */ + + /* void ffi_call_go_AIX(extended_cif *ecif, unsigned long bytes, + * unsigned int flags, unsigned int *rvalue, + * void (*fn)(), + * void (*prep_args)(extended_cif*, unsigned *const), + * void *closure); + * r3=ecif, r4=bytes, r5=flags, r6=rvalue, r7=fn, r8=prep_args, r9=closure + */ + +.csect .text[PR] + .align 2 + .globl ffi_call_go_AIX + .globl .ffi_call_go_AIX +.csect ffi_call_go_AIX[DS] +ffi_call_go_AIX: +#ifdef __64BIT__ + .llong .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: + /* Save registers we use. */ + mflr r0 + + std r28,-32(r1) + std r29,-24(r1) + std r30,-16(r1) + std r31, -8(r1) + + std r9, 8(r1) /* closure, saved in cr field. */ + std r0, 16(r1) +LCFI..2: + mr r28, r1 /* our AP. */ + stdux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + std r2, 40(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + ld r0, 0(r29) + ld r2, 8(r29) + ld r11, 8(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + ld r3, 40+(1*8)(r1) + ld r4, 40+(2*8)(r1) + ld r5, 40+(3*8)(r1) + ld r6, 40+(4*8)(r1) + nop + ld r7, 40+(5*8)(r1) + ld r8, 40+(6*8)(r1) + ld r9, 40+(7*8)(r1) + ld r10,40+(8*8)(r1) + + b L1 +LFE..1: +#else /* ! __64BIT__ */ + + .long .ffi_call_go_AIX, TOC[tc0], 0 + .csect .text[PR] +.ffi_call_go_AIX: + .function .ffi_call_go_AIX,.ffi_call_go_AIX,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 + /* Save registers we use. */ +LFB..1: + mflr r0 + + stw r28,-16(r1) + stw r29,-12(r1) + stw r30, -8(r1) + stw r31, -4(r1) + + stw r9, 4(r1) /* closure, saved in cr field. */ + stw r0, 8(r1) +LCFI..2: + mr r28, r1 /* out AP. */ + stwux r1, r1, r4 +LCFI..3: + + /* Save arguments over call... */ + mr r31, r5 /* flags, */ + mr r30, r6 /* rvalue, */ + mr r29, r7 /* function address, */ + stw r2, 20(r1) + + /* Call ffi_prep_args. */ + mr r4, r1 + bl .ffi_prep_args + nop + + /* Now do the call. */ + lwz r0, 0(r29) + lwz r2, 4(r29) + lwz r11, 4(r28) /* closure */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40, r31 + mtctr r0 + /* Load all those argument registers. */ + /* We have set up a nice stack frame, just load it into registers. */ + lwz r3, 20+(1*4)(r1) + lwz r4, 20+(2*4)(r1) + lwz r5, 20+(3*4)(r1) + lwz r6, 20+(4*4)(r1) + nop + lwz r7, 20+(5*4)(r1) + lwz r8, 20+(6*4)(r1) + lwz r9, 20+(7*4)(r1) + lwz r10,20+(8*4)(r1) + + b L1 +LFE..1: +#endif + .ef __LINE__ + .long 0 + .byte 0,0,0,1,128,4,0,0 +/* END(ffi_call_go_AIX) */ + +.csect .text[PR] + .align 2 + .globl ffi_call_DARWIN + .globl .ffi_call_DARWIN +.csect ffi_call_DARWIN[DS] +ffi_call_DARWIN: +#ifdef __64BIT__ + .llong .ffi_call_DARWIN, TOC[tc0], 0 +#else + .long .ffi_call_DARWIN, TOC[tc0], 0 +#endif + .csect .text[PR] +.ffi_call_DARWIN: + blr + .long 0 + .byte 0,0,0,0,0,0,0,0 +/* END(ffi_call_DARWIN) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix +_GLOBAL__F_libffi_src_powerpc_aix: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte 0x41 /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_def_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .byte 0x9f /* DW_CFA_offset Register r31 */ + .byte 0x1 /* uleb128 0x1; Offset 1 (-4/-8) */ + .byte 0x9e /* DW_CFA_offset Register r30 */ + .byte 0x2 /* uleb128 0x2; Offset 2 (-8/-16) */ + .byte 0x9d /* DW_CFA_offset Register r29 */ + .byte 0x3 /* uleb128 0x3; Offset 3 (-12/-24) */ + .byte 0x9c /* DW_CFA_offset Register r28 */ + .byte 0x4 /* uleb128 0x4; Offset 4 (-16/-32) */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0x1c /* uleb128 28; Register r28 */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix_closure.S new file mode 100644 index 0000000..132c785 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/aix_closure.S @@ -0,0 +1,694 @@ +/* ----------------------------------------------------------------------- + aix_closure.S - Copyright (c) 2002, 2003, 2009 Free Software Foundation, Inc. + based on darwin_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + + .set r0,0 + .set r1,1 + .set r2,2 + .set r3,3 + .set r4,4 + .set r5,5 + .set r6,6 + .set r7,7 + .set r8,8 + .set r9,9 + .set r10,10 + .set r11,11 + .set r12,12 + .set r13,13 + .set r14,14 + .set r15,15 + .set r16,16 + .set r17,17 + .set r18,18 + .set r19,19 + .set r20,20 + .set r21,21 + .set r22,22 + .set r23,23 + .set r24,24 + .set r25,25 + .set r26,26 + .set r27,27 + .set r28,28 + .set r29,29 + .set r30,30 + .set r31,31 + .set f0,0 + .set f1,1 + .set f2,2 + .set f3,3 + .set f4,4 + .set f5,5 + .set f6,6 + .set f7,7 + .set f8,8 + .set f9,9 + .set f10,10 + .set f11,11 + .set f12,12 + .set f13,13 + .set f14,14 + .set f15,15 + .set f16,16 + .set f17,17 + .set f18,18 + .set f19,19 + .set f20,20 + .set f21,21 + + .extern .ffi_closure_helper_DARWIN + .extern .ffi_go_closure_helper_DARWIN + +#define LIBFFI_ASM +#define JUMPTARGET(name) name +#define L(x) x + .file "aix_closure.S" + .toc +LC..60: + .tc L..60[TC],L..60 + .csect .text[PR] + .align 2 + +.csect .text[PR] + .align 2 + .globl ffi_closure_ASM + .globl .ffi_closure_ASM +.csect ffi_closure_ASM[DS] +ffi_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..0: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 10(r3) /* load type from return type */ + ld r4, LC..60(2) /* get address of jump table */ + sldi r3, r3, 4 /* now multiply return type by 16 */ + ld r0, 240+16(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_INT */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 112+0(r1) + mtlr r0 + lfd f2, 112+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 112+7(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 112+6(r1) + mtlr r0 +L..finish: + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 112+6(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT32 */ + lwa r3, 112+4(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_UINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_SINT64 */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 240 + blr + nop + +/* case FFI_TYPE_POINTER */ + ld r3, 112+0(r1) + mtlr r0 + addi r1, r1, 240 + blr +LFE..0: + +#else /* ! __64BIT__ */ + + .long .ffi_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_closure_ASM: + .function .ffi_closure_ASM,.ffi_closure_ASM,16,044,LFE..0-LFB..0 + .bf __LINE__ + .line 1 +LFB..0: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..0: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..1: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + /* get the context pointer from the trampoline */ + mr r3, r11 + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_closure_helper_DARWIN + nop + +.Ldoneclosure: + + /* now r3 contains the return type */ + /* so use it to look up in a table */ + /* so we know how to deal with each type */ + + /* look up the proper starting point in table */ + /* by using return type as offset */ + lhz r3, 6(r3) /* load type from return type */ + lwz r4, LC..60(2) /* get address of jump table */ + slwi r3, r3, 4 /* now multiply return type by 16 */ + lwz r0, 176+8(r1) /* load return address */ + add r3, r3, r4 /* add contents of table to table address */ + mtctr r3 + bctr /* jump to it */ + +/* Each fragment must be exactly 16 bytes long (4 instructions). + Align to 16 byte boundary for cache and dispatch efficiency. */ + .align 4 + +L..60: +/* case FFI_TYPE_VOID */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_INT */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_FLOAT */ + lfs f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_DOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_LONGDOUBLE */ + lfd f1, 56+0(r1) + mtlr r0 + lfd f2, 56+8(r1) + b L..finish + +/* case FFI_TYPE_UINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT8 */ + lbz r3, 56+3(r1) + mtlr r0 + extsb r3, r3 + b L..finish + +/* case FFI_TYPE_UINT16 */ + lhz r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT16 */ + lha r3, 56+2(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_SINT32 */ + lwz r3, 56+0(r1) + mtlr r0 + addi r1, r1, 176 + blr + +/* case FFI_TYPE_UINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_SINT64 */ + lwz r3, 56+0(r1) + mtlr r0 + lwz r4, 56+4(r1) + b L..finish + +/* case FFI_TYPE_STRUCT */ + mtlr r0 + addi r1, r1, 176 + blr + nop + +/* case FFI_TYPE_POINTER */ + lwz r3, 56+0(r1) + mtlr r0 +L..finish: + addi r1, r1, 176 + blr +LFE..0: +#endif + .ef __LINE__ +/* END(ffi_closure_ASM) */ + + +.csect .text[PR] + .align 2 + .globl ffi_go_closure_ASM + .globl .ffi_go_closure_ASM +.csect ffi_go_closure_ASM[DS] +ffi_go_closure_ASM: +#ifdef __64BIT__ + .llong .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + std r3, 48+(0*8)(r1) + std r4, 48+(1*8)(r1) + std r5, 48+(2*8)(r1) + std r6, 48+(3*8)(r1) + mflr r0 + + std r7, 48+(4*8)(r1) + std r8, 48+(5*8)(r1) + std r9, 48+(6*8)(r1) + std r10, 48+(7*8)(r1) + std r0, 16(r1) /* save the return address */ +LCFI..2: + /* 48 Bytes (Linkage Area) */ + /* 64 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 8 Bytes (alignment) */ + /* 240 Bytes */ + + stdu r1, -240(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 128+(0*8)(r1) + stfd f2, 128+(1*8)(r1) + stfd f3, 128+(2*8)(r1) + stfd f4, 128+(3*8)(r1) + stfd f5, 128+(4*8)(r1) + stfd f6, 128+(5*8)(r1) + stfd f7, 128+(6*8)(r1) + stfd f8, 128+(7*8)(r1) + stfd f9, 128+(8*8)(r1) + stfd f10, 128+(9*8)(r1) + stfd f11, 128+(10*8)(r1) + stfd f12, 128+(11*8)(r1) + stfd f13, 128+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, r11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 112 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 288 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 128 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: + +#else /* ! __64BIT__ */ + + .long .ffi_go_closure_ASM, TOC[tc0], 0 + .csect .text[PR] +.ffi_go_closure_ASM: + .function .ffi_go_closure_ASM,.ffi_go_closure_ASM,16,044,LFE..1-LFB..1 + .bf __LINE__ + .line 1 +LFB..1: +/* we want to build up an area for the parameters passed */ +/* in registers (both floating point and integer) */ + + /* we store gpr 3 to gpr 10 (aligned to 4) + in the parents outgoing area */ + stw r3, 24+(0*4)(r1) + stw r4, 24+(1*4)(r1) + stw r5, 24+(2*4)(r1) + stw r6, 24+(3*4)(r1) + mflr r0 + + stw r7, 24+(4*4)(r1) + stw r8, 24+(5*4)(r1) + stw r9, 24+(6*4)(r1) + stw r10, 24+(7*4)(r1) + stw r0, 8(r1) +LCFI..2: + /* 24 Bytes (Linkage Area) */ + /* 32 Bytes (params) */ + /* 16 Bytes (result) */ + /* 104 Bytes (13*8 from FPR) */ + /* 176 Bytes */ + + stwu r1, -176(r1) /* skip over caller save area + keep stack aligned to 16 */ +LCFI..3: + + /* next save fpr 1 to fpr 13 (aligned to 8) */ + stfd f1, 72+(0*8)(r1) + stfd f2, 72+(1*8)(r1) + stfd f3, 72+(2*8)(r1) + stfd f4, 72+(3*8)(r1) + stfd f5, 72+(4*8)(r1) + stfd f6, 72+(5*8)(r1) + stfd f7, 72+(6*8)(r1) + stfd f8, 72+(7*8)(r1) + stfd f9, 72+(8*8)(r1) + stfd f10, 72+(9*8)(r1) + stfd f11, 72+(10*8)(r1) + stfd f12, 72+(11*8)(r1) + stfd f13, 72+(12*8)(r1) + + /* set up registers for the routine that actually does the work */ + mr r3, 11 /* go closure */ + + /* now load up the pointer to the result storage */ + addi r4, r1, 56 + + /* now load up the pointer to the saved gpr registers */ + addi r5, r1, 200 + + /* now load up the pointer to the saved fpr registers */ + addi r6, r1, 72 + + /* make the call */ + bl .ffi_go_closure_helper_DARWIN + nop + + b .Ldoneclosure +LFE..1: +#endif + .ef __LINE__ +/* END(ffi_go_closure_ASM) */ + +/* EH frame stuff. */ + +#define LR_REGNO 0x41 /* Link Register (65), see rs6000.md */ +#ifdef __64BIT__ +#define PTRSIZE 8 +#define LOG2_PTRSIZE 3 +#define CFA_OFFSET 0xf0,0x01 /* LEB128 240 */ +#define FDE_ENCODING 0x1c /* DW_EH_PE_pcrel|DW_EH_PE_sdata8 */ +#define EH_DATA_ALIGN_FACT 0x78 /* LEB128 -8 */ +#else +#define PTRSIZE 4 +#define LOG2_PTRSIZE 2 +#define CFA_OFFSET 0xb0,0x01 /* LEB128 176 */ +#define FDE_ENCODING 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4 */ +#define EH_DATA_ALIGN_FACT 0x7c /* LEB128 -4 */ +#endif + + .csect _unwind.ro_[RO],4 + .align LOG2_PTRSIZE + .globl _GLOBAL__F_libffi_src_powerpc_aix_closure +_GLOBAL__F_libffi_src_powerpc_aix_closure: +Lframe..1: + .vbyte 4,LECIE..1-LSCIE..1 /* CIE Length */ +LSCIE..1: + .vbyte 4,0 /* CIE Identifier Tag */ + .byte 0x3 /* CIE Version */ + .byte "zR" /* CIE Augmentation */ + .byte 0 + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte EH_DATA_ALIGN_FACT /* leb128 -4/-8; CIE Data Alignment Factor */ + .byte LR_REGNO /* CIE RA Column */ + .byte 0x1 /* uleb128 0x1; Augmentation size */ + .byte FDE_ENCODING /* FDE Encoding (pcrel|sdata4/8) */ + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0x1 /* uleb128 0x1; Register r1 */ + .byte 0 /* uleb128 0x0; Offset 0 */ + .align LOG2_PTRSIZE +LECIE..1: +LSFDE..1: + .vbyte 4,LEFDE..1-LASFDE..1 /* FDE Length */ +LASFDE..1: + .vbyte 4,LASFDE..1-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..0-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..0-LFB..0 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..1-LCFI..0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..0-LFB..0 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..1: +LSFDE..2: + .vbyte 4,LEFDE..2-LASFDE..2 /* FDE Length */ +LASFDE..2: + .vbyte 4,LASFDE..2-Lframe..1 /* FDE CIE offset */ + .vbyte PTRSIZE,LFB..1-$ /* FDE initial location */ + .vbyte PTRSIZE,LFE..1-LFB..1 /* FDE address range */ + .byte 0 /* uleb128 0x0; Augmentation size */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..3-LCFI..2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte CFA_OFFSET /* uleb128 176/240 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .vbyte 4,LCFI..2-LFB..1 + .byte 0x11 /* DW_CFA_offset_extended_sf */ + .byte LR_REGNO /* uleb128 LR_REGNO; Register LR */ + .byte 0x7e /* leb128 -2; Offset -2 (8/16) */ + .align LOG2_PTRSIZE +LEFDE..2: + .vbyte 4,0 /* End of FDEs */ + + .csect .text[PR] + .ref _GLOBAL__F_libffi_src_powerpc_aix_closure /* Prevents garbage collection by AIX linker */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/asm.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/asm.h new file mode 100644 index 0000000..27b22f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/asm.h @@ -0,0 +1,125 @@ +/* ----------------------------------------------------------------------- + asm.h - Copyright (c) 1998 Geoffrey Keating + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define ASM_GLOBAL_DIRECTIVE .globl + + +#define C_SYMBOL_NAME(name) name +/* Macro for a label. */ +#ifdef __STDC__ +#define C_LABEL(name) name##: +#else +#define C_LABEL(name) name/**/: +#endif + +/* This seems to always be the case on PPC. */ +#define ALIGNARG(log2) log2 +/* For ELF we need the `.type' directive to make shared libs work right. */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +/* If compiled for profiling, call `_mcount' at the start of each function. */ +#ifdef PROF +/* The mcount code relies on the return address being on the stack + to locate our caller and so it can restore it; so store one just + for its benefit. */ +#ifdef PIC +#define CALL_MCOUNT \ + .pushsection; \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + stw %r0,4(%r1); \ + bl _GLOBAL_OFFSET_TABLE_@local-4; \ + mflr %r11; \ + lwz %r0,0b@got(%r11); \ + bl JUMPTARGET(_mcount); +#else /* PIC */ +#define CALL_MCOUNT \ + .section ".data"; \ + .align ALIGNARG(2); \ +0:.long 0; \ + .previous; \ + mflr %r0; \ + lis %r11,0b@ha; \ + stw %r0,4(%r1); \ + addi %r0,%r11,0b@l; \ + bl JUMPTARGET(_mcount); +#endif /* PIC */ +#else /* PROF */ +#define CALL_MCOUNT /* Do nothing. */ +#endif /* PROF */ + +#define ENTRY(name) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT + +#define EALIGN_W_0 /* No words to insert. */ +#define EALIGN_W_1 nop +#define EALIGN_W_2 nop;nop +#define EALIGN_W_3 nop;nop;nop +#define EALIGN_W_4 EALIGN_W_3;nop +#define EALIGN_W_5 EALIGN_W_4;nop +#define EALIGN_W_6 EALIGN_W_5;nop +#define EALIGN_W_7 EALIGN_W_6;nop + +/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes + past a 2^align boundary. */ +#ifdef PROF +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(2); \ + C_LABEL(name) \ + CALL_MCOUNT \ + b 0f; \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + 0: +#else /* PROF */ +#define EFFI_ALIGN(name, alignt, words) \ + ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \ + ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \ + .align ALIGNARG(alignt); \ + EALIGN_W_##words; \ + C_LABEL(name) +#endif + +#define END(name) \ + ASM_SIZE_DIRECTIVE(name) + +#ifdef PIC +#define JUMPTARGET(name) name##@plt +#else +#define JUMPTARGET(name) name +#endif + +/* Local labels stripped out by the linker. */ +#define L(x) .L##x diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin.S new file mode 100644 index 0000000..066eb82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin.S @@ -0,0 +1,378 @@ +/* ----------------------------------------------------------------------- + darwin.S - Copyright (c) 2000 John Hornkvist + Copyright (c) 2004, 2010 Free Software Foundation, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) +#define sgux MODE_CHOICE(stwux,stdux) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* If there is any FP stuff we make space for all of the regs. */ +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +#define RESULT_BYTES 16 + +/* This should be kept in step with the same value in ffi_darwin.c. */ +#define ASM_NEEDS_REGISTERS 4 +#define SAVE_REGS_SIZE (ASM_NEEDS_REGISTERS * GPR_BYTES) + +#include +#include + +#define JUMPTARGET(name) name +#define L(x) x + + .text + .align 2 + .globl _ffi_prep_args + + .align 2 + .globl _ffi_call_DARWIN + + /* We arrive here with: + r3 = ptr to extended cif. + r4 = -bytes. + r5 = cif flags. + r6 = ptr to return value. + r7 = fn pointer (user func). + r8 = fn pointer (ffi_prep_args). + r9 = ffi_type* for the ret val. */ + +_ffi_call_DARWIN: +Lstartcode: + mr r12,r8 /* We only need r12 until the call, + so it does not have to be saved. */ +LFB1: + /* Save the old stack pointer as AP. */ + mr r8,r1 +LCFI0: + + /* Save the retval type in parents frame. */ + sg r9,(LINKAGE_SIZE+6*GPR_BYTES)(r8) + + /* Allocate the stack space we need. */ + sgux r1,r1,r4 + + /* Save registers we use. */ + mflr r9 + sg r9,SAVED_LR_OFFSET(r8) + + sg r28,-(4 * GPR_BYTES)(r8) + sg r29,-(3 * GPR_BYTES)(r8) + sg r30,-(2 * GPR_BYTES)(r8) + sg r31,-( GPR_BYTES)(r8) + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + sg r2,(5 * GPR_BYTES)(r1) +#endif + +LCFI1: + + /* Save arguments over call. */ + mr r31,r5 /* flags, */ + mr r30,r6 /* rvalue, */ + mr r29,r7 /* function address, */ + mr r28,r8 /* our AP. */ +LCFI2: + /* Call ffi_prep_args. r3 = extended cif, r4 = stack ptr copy. */ + mr r4,r1 + li r9,0 + + mtctr r12 /* r12 holds address of _ffi_prep_args. */ + bctrl + +#if !defined(POWERPC_DARWIN) + /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */ + lg r2,(5 * GPR_BYTES)(r1) +#endif + /* Now do the call. + Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,r31 + /* Get the address to call into CTR. */ + mtctr r29 + /* Load all those argument registers. + We have set up a nice stack frame, just load it into registers. */ + lg r3, (LINKAGE_SIZE )(r1) + lg r4, (LINKAGE_SIZE + GPR_BYTES)(r1) + lg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r1) + lg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r1) + nop + lg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r1) + lg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r1) + lg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r1) + lg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r1) + +L1: + /* ... Load all the FP registers. */ + bf 6,L2 /* No floats to load. */ + lfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + lfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + lfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + lfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + lfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + lfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + lfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + lfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + lfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + lfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + lfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + lfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + lfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + +L2: + mr r12,r29 /* Put the target address in r12 as specified. */ + mtctr r12 + nop + nop + + /* Make the call. */ + bctrl + + /* Now, deal with the return value. */ + + /* m64 structure returns can occupy the same set of registers as + would be used to pass such a structure as arg0 - so take care + not to step on any possibly hot regs. */ + + /* Get the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + ; FLAG_RETURNS_NOTHING also covers struct ret-by-ref. + bt 30,L(done_return_value) ; FLAG_RETURNS_NOTHING + bf 27,L(scalar_return_value) ; not FLAG_RETURNS_STRUCT + + /* OK, so we have a struct. */ +#if defined(__ppc64__) + bt 31,L(maybe_return_128) ; FLAG_RETURNS_128BITS, special case + + /* OK, we have to map the return back to a mem struct. + We are about to trample the parents param area, so recover the + return type. r29 is free, since the call is done. */ + lg r29,(LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + + sg r3, (LINKAGE_SIZE )(r28) + sg r4, (LINKAGE_SIZE + GPR_BYTES)(r28) + sg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r28) + sg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r28) + nop + sg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r28) + sg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r28) + sg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r28) + sg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + /* OK, so do the block move - we trust that memcpy will not trample + the fprs... */ + mr r3,r30 ; dest + addi r4,r28,LINKAGE_SIZE ; source + /* The size is a size_t, should be long. */ + lg r5,0(r29) + /* Figure out small structs */ + cmpi 0,r5,4 + bgt L3 ; 1, 2 and 4 bytes have special rules. + cmpi 0,r5,3 + beq L3 ; not 3 + addi r4,r4,8 + subf r4,r5,r4 +L3: + bl _memcpy + + /* ... do we need the FP registers? - recover the flags.. */ + mtcrf 0x03,r31 ; we need c6 & cr7 now. + bf 29,L(done_return_value) /* No floats in the struct. */ + stfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28) + stfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28) + stfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28) + stfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28) + nop + stfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28) + stfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28) + stfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28) + stfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28) + nop + stfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28) + stfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28) + stfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28) + stfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28) + nop + stfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28) + + mr r3,r29 ; ffi_type * + mr r4,r30 ; dest + addi r5,r28,-SAVE_REGS_SIZE-(13*FPR_SIZE) ; fprs + xor r6,r6,r6 + sg r6,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28) + addi r6,r28,(LINKAGE_SIZE + 7 * GPR_BYTES) ; point to a zeroed counter. + bl _darwin64_struct_floats_to_mem + + b L(done_return_value) +#else + stw r3,0(r30) ; m32 the only struct return in reg is 4 bytes. +#endif + b L(done_return_value) + +L(fp_return_value): + /* Do we have long double to store? */ + bf 31,L(fd_return_value) ; FLAG_RETURNS_128BITS + stfd f1,0(r30) + stfd f2,FPR_SIZE(r30) + b L(done_return_value) + +L(fd_return_value): + /* Do we have double to store? */ + bf 28,L(float_return_value) + stfd f1,0(r30) + b L(done_return_value) + +L(float_return_value): + /* We only have a float to store. */ + stfs f1,0(r30) + b L(done_return_value) + +L(scalar_return_value): + bt 29,L(fp_return_value) ; FLAG_RETURNS_FP + ; ffi_arg is defined as unsigned long. + sg r3,0(r30) ; Save the reg. + bf 28,L(done_return_value) ; not FLAG_RETURNS_64BITS + +#if defined(__ppc64__) +L(maybe_return_128): + std r3,0(r30) + bf 31,L(done_return_value) ; not FLAG_RETURNS_128BITS + std r4,8(r30) +#else + stw r4,4(r30) +#endif + + /* Fall through. */ + /* We want this at the end to simplify eh epilog computation. */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lg r29,SAVED_LR_OFFSET(r28) + ; epilog + lg r31,-(1 * GPR_BYTES)(r28) + mtlr r29 + lg r30,-(2 * GPR_BYTES)(r28) + lg r29,-(3 * GPR_BYTES)(r28) + lg r28,-(4 * GPR_BYTES)(r28) + lg r1,0(r1) + blr +LFE1: + .align 1 +/* END(_ffi_call_DARWIN) */ + +/* Provide a null definition of _ffi_call_AIX. */ + .text + .globl _ffi_call_AIX + .align 2 +_ffi_call_AIX: + blr +/* END(_ffi_call_AIX) */ + +/* EH stuff. */ + +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + + .globl _ffi_call_DARWIN.eh +_ffi_call_DARWIN.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$3,LFE1-Lstartcode + .g_long L$set$3 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x08 ; uleb128 0x08 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$5,LCFI1-LCFI0 + .long L$set$5 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .byte 0x9f ; DW_CFA_offset, column 0x1f + .byte 0x1 ; uleb128 0x1 + .byte 0x9e ; DW_CFA_offset, column 0x1e + .byte 0x2 ; uleb128 0x2 + .byte 0x9d ; DW_CFA_offset, column 0x1d + .byte 0x3 ; uleb128 0x3 + .byte 0x9c ; DW_CFA_offset, column 0x1c + .byte 0x4 ; uleb128 0x4 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$6,LCFI2-LCFI1 + .long L$set$6 + .byte 0xd ; DW_CFA_def_cfa_register + .byte 0x1c ; uleb128 0x1c + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin_closure.S new file mode 100644 index 0000000..3121e6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/darwin_closure.S @@ -0,0 +1,571 @@ +/* ----------------------------------------------------------------------- + darwin_closure.S - Copyright (c) 2002, 2003, 2004, 2010, + Free Software Foundation, Inc. + based on ppc_closure.S + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#define L(x) x + +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define machine_choice MODE_CHOICE(ppc7400,ppc64) + +; Define some pseudo-opcodes for size-independent load & store of GPRs ... +#define lgu MODE_CHOICE(lwzu, ldu) +#define lg MODE_CHOICE(lwz,ld) +#define sg MODE_CHOICE(stw,std) +#define sgu MODE_CHOICE(stwu,stdu) + +; ... and the size of GPRs and their storage indicator. +#define GPR_BYTES MODE_CHOICE(4,8) +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04. +#define LINKAGE_SIZE MODE_CHOICE(24,48) +#define PARAM_AREA MODE_CHOICE(32,64) + +#define SAVED_CR_OFFSET MODE_CHOICE(4,8) /* save position for CR */ +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */ + +/* WARNING: if ffi_type is changed... here be monsters. + Offsets of items within the result type. */ +#define FFI_TYPE_TYPE MODE_CHOICE(6,10) +#define FFI_TYPE_ELEM MODE_CHOICE(8,16) + +#define SAVED_FPR_COUNT 13 +#define FPR_SIZE 8 +/* biggest m64 struct ret is 8GPRS + 13FPRS = 168 bytes - rounded to 16bytes = 176. */ +#define RESULT_BYTES MODE_CHOICE(16,176) + +; The whole stack frame **MUST** be 16byte-aligned. +#define SAVE_SIZE (((LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)+15) & -16LL) +#define PAD_SIZE (SAVE_SIZE-(LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)) + +#define PARENT_PARM_BASE (SAVE_SIZE+LINKAGE_SIZE) +#define FP_SAVE_BASE (LINKAGE_SIZE+PARAM_AREA) + +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 +; We no longer need the pic symbol stub for Darwin >= 9. +#define BLCLS_HELP _ffi_closure_helper_DARWIN +#define STRUCT_RETVALUE_P _darwin64_struct_ret_by_value_p +#define PASS_STR_FLOATS _darwin64_pass_struct_floats +#undef WANT_STUB +#else +#define BLCLS_HELP L_ffi_closure_helper_DARWIN$stub +#define STRUCT_RETVALUE_P L_darwin64_struct_ret_by_value_p$stub +#define PASS_STR_FLOATS L_darwin64_pass_struct_floats$stub +#define WANT_STUB +#endif + +/* m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee`s LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent`s frame. + |--------------------------------------------| <+ <<< on entry to + | Result Bytes 16/176 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callees LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< call. + +*/ + + .file "darwin_closure.S" + + .machine machine_choice + + .text + .globl _ffi_closure_ASM + .align LOG2_GPR_BYTES +_ffi_closure_ASM: +LFB1: +Lstartcode: + mflr r0 /* extract return address */ + sg r0,SAVED_LR_OFFSET(r1) /* save the return address */ +LCFI0: + sgu r1,-SAVE_SIZE(r1) /* skip over caller save area + keep stack aligned to 16. */ +LCFI1: + /* We want to build up an area for the parameters passed + in registers. (both floating point and integer) */ + + /* Put gpr 3 to gpr 10 in the parents outgoing area... + ... the remainder of any params that overflowed the regs will + follow here. */ + sg r3, (PARENT_PARM_BASE )(r1) + sg r4, (PARENT_PARM_BASE + GPR_BYTES )(r1) + sg r5, (PARENT_PARM_BASE + GPR_BYTES * 2)(r1) + sg r6, (PARENT_PARM_BASE + GPR_BYTES * 3)(r1) + sg r7, (PARENT_PARM_BASE + GPR_BYTES * 4)(r1) + sg r8, (PARENT_PARM_BASE + GPR_BYTES * 5)(r1) + sg r9, (PARENT_PARM_BASE + GPR_BYTES * 6)(r1) + sg r10,(PARENT_PARM_BASE + GPR_BYTES * 7)(r1) + + /* We save fpr 1 to fpr 14 in our own save frame. */ + stfd f1, (FP_SAVE_BASE )(r1) + stfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + stfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + stfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + stfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + stfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + stfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + stfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + stfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + stfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + stfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + stfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + stfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* Set up registers for the routine that actually does the work + get the context pointer from the trampoline. */ + mr r3,r11 + + /* Now load up the pointer to the result storage. */ + addi r4,r1,(SAVE_SIZE-RESULT_BYTES) + + /* Now load up the pointer to the saved gpr registers. */ + addi r5,r1,PARENT_PARM_BASE + + /* Now load up the pointer to the saved fpr registers. */ + addi r6,r1,FP_SAVE_BASE + + /* Make the call. */ + bl BLCLS_HELP + + /* r3 contains the rtype pointer... save it since we will need + it later. */ + sg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type + lg r0,0(r3) ; size => r0 + lhz r3,FFI_TYPE_TYPE(r3) ; type => r3 + + /* The helper will have intercepted structure returns and inserted + the caller`s destination address for structs returned by ref. */ + + /* r3 contains the return type so use it to look up in a table + so we know how to deal with each type. */ + + addi r5,r1,(SAVE_SIZE-RESULT_BYTES) /* Otherwise, our return is here. */ + bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */ + mflr r4 /* Move to r4. */ + slwi r3,r3,4 /* Now multiply return type by 16. */ + add r3,r3,r4 /* Add contents of table to table address. */ + mtctr r3 + bctr /* Jump to it. */ +LFE1: +/* Each of the ret_typeX code fragments has to be exactly 16 bytes long + (4 instructions). For cache effectiveness we align to a 16 byte boundary + first. */ + + .align 4 + + nop + nop + nop +Lget_ret_type0_addr: + blrl + +/* case FFI_TYPE_VOID */ +Lret_type0: + b Lfinish + nop + nop + nop + +/* case FFI_TYPE_INT */ +Lret_type1: + lg r3,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_FLOAT */ +Lret_type2: + lfs f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_DOUBLE */ +Lret_type3: + lfd f1,0(r5) + b Lfinish + nop + nop + +/* case FFI_TYPE_LONGDOUBLE */ +Lret_type4: + lfd f1,0(r5) + lfd f2,8(r5) + b Lfinish + nop + +/* case FFI_TYPE_UINT8 */ +Lret_type5: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT8 */ +Lret_type6: +#if defined(__ppc64__) + lbz r3,7(r5) +#else + lbz r3,3(r5) +#endif + extsb r3,r3 + b Lfinish + nop + +/* case FFI_TYPE_UINT16 */ +Lret_type7: +#if defined(__ppc64__) + lhz r3,6(r5) +#else + lhz r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT16 */ +Lret_type8: +#if defined(__ppc64__) + lha r3,6(r5) +#else + lha r3,2(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT32 */ +Lret_type9: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_SINT32 */ +Lret_type10: +#if defined(__ppc64__) + lwz r3,4(r5) +#else + lwz r3,0(r5) +#endif + b Lfinish + nop + nop + +/* case FFI_TYPE_UINT64 */ +Lret_type11: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_SINT64 */ +Lret_type12: +#if defined(__ppc64__) + lg r3,0(r5) + b Lfinish + nop +#else + lwz r3,0(r5) + lwz r4,4(r5) + b Lfinish +#endif + nop + +/* case FFI_TYPE_STRUCT */ +Lret_type13: +#if defined(__ppc64__) + lg r3,0(r5) ; we need at least this... + cmpi 0,r0,4 + bgt Lstructend ; not a special small case + b Lsmallstruct ; see if we need more. +#else + cmpwi 0,r0,4 + bgt Lfinish ; not by value + lg r3,0(r5) + b Lfinish +#endif +/* case FFI_TYPE_POINTER */ +Lret_type14: + lg r3,0(r5) + b Lfinish + nop + nop + +#if defined(__ppc64__) +Lsmallstruct: + beq Lfour ; continuation of Lret13. + cmpi 0,r0,3 + beq Lfinish ; don`t adjust this - can`t be any floats here... + srdi r3,r3,48 + cmpi 0,r0,2 + beq Lfinish ; .. or here .. + srdi r3,r3,8 + b Lfinish ; .. or here. + +Lfour: + lg r6,LINKAGE_SIZE(r1) ; get the result type + lg r6,FFI_TYPE_ELEM(r6) ; elements array pointer + lg r6,0(r6) ; first element + lhz r0,FFI_TYPE_TYPE(r6) ; OK go the type + cmpi 0,r0,2 ; FFI_TYPE_FLOAT + bne Lfourint + lfs f1,0(r5) ; just one float in the struct. + b Lfinish + +Lfourint: + srdi r3,r3,32 ; four bytes. + b Lfinish + +Lstructend: + lg r3,LINKAGE_SIZE(r1) ; get the result type + bl STRUCT_RETVALUE_P + cmpi 0,r3,0 + beq Lfinish ; nope. + /* Recover a pointer to the results. */ + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we need at least this... + lg r4,8(r11) + cmpi 0,r0,16 + beq Lfinish ; special case 16 bytes we don't consider floats. + + /* OK, frustratingly, the process of saving the struct to mem might have + messed with the FPRs, so we have to re-load them :(. + We`ll use our FPRs space again - calling: + void darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) + We`ll temporarily pinch the first two slots of the param area for local + vars used by the routine. */ + xor r6,r6,r6 + addi r5,r1,PARENT_PARM_BASE ; some space + sg r6,0(r5) ; *nfpr zeroed. + addi r6,r5,8 ; **fprs + addi r3,r1,FP_SAVE_BASE ; pointer to FPRs space + sg r3,0(r6) + mr r4,r11 ; the struct is here... + lg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type. + bl PASS_STR_FLOATS ; get struct floats into FPR save space. + /* See if we used any floats */ + lwz r0,(SAVE_SIZE-RESULT_BYTES)(r1) + cmpi 0,r0,0 + beq Lstructints ; nope. + /* OK load `em up... */ + lfd f1, (FP_SAVE_BASE )(r1) + lfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1) + lfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1) + lfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1) + lfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1) + lfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1) + lfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1) + lfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1) + lfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1) + lfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1) + lfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1) + lfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1) + lfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1) + + /* point back at our saved struct. */ +Lstructints: + addi r11,r1,(SAVE_SIZE-RESULT_BYTES) + lg r3,0(r11) ; we end up picking the + lg r4,8(r11) ; first two again. + lg r5,16(r11) + lg r6,24(r11) + lg r7,32(r11) + lg r8,40(r11) + lg r9,48(r11) + lg r10,56(r11) +#endif + +/* case done */ +Lfinish: + addi r1,r1,SAVE_SIZE /* Restore stack pointer. */ + lg r0,SAVED_LR_OFFSET(r1) /* Get return address. */ + mtlr r0 /* Reset link register. */ + blr +Lendcode: + .align 1 + +/* END(ffi_closure_ASM) */ + +/* EH frame stuff. */ +#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78) +/* 176, 400 */ +#define EH_FRAME_OFFSETA MODE_CHOICE(176,0x90) +#define EH_FRAME_OFFSETB MODE_CHOICE(1,3) + + .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EH_frame1: + .set L$set$0,LECIE1-LSCIE1 + .long L$set$0 ; Length of Common Information Entry +LSCIE1: + .long 0x0 ; CIE Identifier Tag + .byte 0x1 ; CIE Version + .ascii "zR\0" ; CIE Augmentation + .byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor + .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor + .byte 0x41 ; CIE RA Column + .byte 0x1 ; uleb128 0x1; Augmentation size + .byte 0x10 ; FDE Encoding (pcrel) + .byte 0xc ; DW_CFA_def_cfa + .byte 0x1 ; uleb128 0x1 + .byte 0x0 ; uleb128 0x0 + .align LOG2_GPR_BYTES +LECIE1: + .globl _ffi_closure_ASM.eh +_ffi_closure_ASM.eh: +LSFDE1: + .set L$set$1,LEFDE1-LASFDE1 + .long L$set$1 ; FDE Length + +LASFDE1: + .long LASFDE1-EH_frame1 ; FDE CIE offset + .g_long Lstartcode-. ; FDE initial location + .set L$set$2,LFE1-Lstartcode + .g_long L$set$2 ; FDE address range + .byte 0x0 ; uleb128 0x0; Augmentation size + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$3,LCFI1-LCFI0 + .long L$set$3 + .byte 0xe ; DW_CFA_def_cfa_offset + .byte EH_FRAME_OFFSETA,EH_FRAME_OFFSETB ; uleb128 176,1/190,3 + .byte 0x4 ; DW_CFA_advance_loc4 + .set L$set$4,LCFI0-Lstartcode + .long L$set$4 + .byte 0x11 ; DW_CFA_offset_extended_sf + .byte 0x41 ; uleb128 0x41 + .byte 0x7e ; sleb128 -2 + .align LOG2_GPR_BYTES +LEFDE1: + .align 1 + +#ifdef WANT_STUB + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_ffi_closure_helper_DARWIN$stub: + .indirect_symbol _ffi_closure_helper_DARWIN + mflr r0 + bcl 20,31,"L1$spb" +"L1$spb": + mflr r11 + addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb") + mtlr r0 + lwzu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L1$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_ffi_closure_helper_DARWIN$lazy_ptr: + .indirect_symbol _ffi_closure_helper_DARWIN + .g_long dyld_stub_binding_helper + +#if defined(__ppc64__) + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_struct_ret_by_value_p$stub: + .indirect_symbol _darwin64_struct_ret_by_value_p + mflr r0 + bcl 20,31,"L2$spb" +"L2$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L2$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_struct_ret_by_value_p$lazy_ptr: + .indirect_symbol _darwin64_struct_ret_by_value_p + .g_long dyld_stub_binding_helper + + .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 5 +L_darwin64_pass_struct_floats$stub: + .indirect_symbol _darwin64_pass_struct_floats + mflr r0 + bcl 20,31,"L3$spb" +"L3$spb": + mflr r11 + addis r11,r11,ha16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb") + mtlr r0 + lwzu r12,lo16(L_darwin64_pass_struct_floats$lazy_ptr-"L3$spb")(r11) + mtctr r12 + bctr + .lazy_symbol_pointer +L_darwin64_pass_struct_floats$lazy_ptr: + .indirect_symbol _darwin64_pass_struct_floats + .g_long dyld_stub_binding_helper +# endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi.c new file mode 100644 index 0000000..a19bcbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi.c @@ -0,0 +1,175 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" +#include "ffi_common.h" +#include "ffi_powerpc.h" + +#if HAVE_LONG_DOUBLE_VARIANT +/* Adjust ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types (ffi_abi abi) +{ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# ifdef POWERPC64 + ffi_prep_types_linux64 (abi); +# else + ffi_prep_types_sysv (abi); +# endif +# endif +} +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64 (cif); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var (ffi_cif *cif, + unsigned int nfixedargs MAYBE_UNUSED, + unsigned int ntotalargs MAYBE_UNUSED) +{ +#ifdef POWERPC64 + return ffi_prep_cif_linux64_var (cif, nfixedargs, ntotalargs); +#else + return ffi_prep_cif_sysv (cif); +#endif +} + +static void +ffi_call_int (ffi_cif *cif, + void (*fn) (void), + void *rvalue, + void **avalue, + void *closure) +{ + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead returns + them in memory. + + We bounce-buffer SYSV small struct return values so that sysv.S + can write r3 and r4 to memory without worrying about struct size. + + For ELFv2 ABI, use a bounce buffer for homogeneous structs too, + for similar reasons. This bounce buffer must be aligned to 16 + bytes for use with homogeneous structs of vectors (float128). */ + float128 smst_buffer[8]; + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + ecif.rvalue = rvalue; + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + ecif.rvalue = smst_buffer; + /* Ensure that we have a valid struct return value. + FIXME: Isn't this just papering over a user problem? */ + else if (!rvalue && cif->rtype->type == FFI_TYPE_STRUCT) + ecif.rvalue = alloca (cif->rtype->size); + +#ifdef POWERPC64 + ffi_call_LINUX64 (&ecif, fn, ecif.rvalue, cif->flags, closure, + -(long) cif->bytes); +#else + ffi_call_SYSV (&ecif, fn, ecif.rvalue, cif->flags, closure, -cif->bytes); +#endif + + /* Check for a bounce-buffered return value */ + if (rvalue && ecif.rvalue == smst_buffer) + { + unsigned int rsize = cif->rtype->size; +#ifndef __LITTLE_ENDIAN__ + /* The SYSV ABI returns a structure of up to 4 bytes in size + left-padded in r3. */ +# ifndef POWERPC64 + if (rsize <= 4) + memcpy (rvalue, (char *) smst_buffer + 4 - rsize, rsize); + else +# endif + /* The SYSV ABI returns a structure of up to 8 bytes in size + left-padded in r3/r4, and the ELFv2 ABI similarly returns a + structure of up to 8 bytes in size left-padded in r3. But + note that a structure of a single float is not paddded. */ + if (rsize <= 8 && (cif->flags & FLAG_RETURNS_FP) == 0) + memcpy (rvalue, (char *) smst_buffer + 8 - rsize, rsize); + else +#endif + memcpy (rvalue, smst_buffer, rsize); + } +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#ifdef POWERPC64 + return ffi_prep_closure_loc_linux64 (closure, cif, fun, user_data, codeloc); +#else + return ffi_prep_closure_loc_sysv (closure, cif, fun, user_data, codeloc); +#endif +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ +#ifdef POWERPC64 + closure->tramp = ffi_go_closure_linux64; +#else + closure->tramp = ffi_go_closure_sysv; +#endif + closure->cif = cif; + closure->fun = fun; + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c new file mode 100644 index 0000000..64bb94d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_darwin.c @@ -0,0 +1,1452 @@ +/* ----------------------------------------------------------------------- + ffi_darwin.c + + Copyright (C) 1998 Geoffrey Keating + Copyright (C) 2001 John Hornkvist + Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. + + FFI support for Darwin and AIX. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +extern void ffi_closure_ASM (void); + +#if defined (FFI_GO_CLOSURES) +extern void ffi_go_closure_ASM (void); +#endif + +enum { + /* The assembly depends on these exact flags. + For Darwin64 (when FLAG_RETURNS_STRUCT is set): + FLAG_RETURNS_FP indicates that the structure embeds FP data. + FLAG_RETURNS_128BITS signals a special struct size that is not + expanded for float content. */ + FLAG_RETURNS_128BITS = 1 << (31-31), /* These go in cr7 */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_64BITS = 1 << (31-28), + + FLAG_RETURNS_STRUCT = 1 << (31-27), /* This goes in cr6 */ + + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4) +}; + +/* About the DARWIN ABI. */ +enum { + NUM_GPR_ARG_REGISTERS = 8, + NUM_FPR_ARG_REGISTERS = 13, + LINKAGE_AREA_GPRS = 6 +}; + +enum { ASM_NEEDS_REGISTERS = 4 }; /* r28-r31 */ + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments. + + m32/m64 + + The stack layout we want looks like this: + + | Return address from ffi_call_DARWIN | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4/8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | ASM_NEEDS_REGISTERS=r28-r31 4*(4/8) | | ffi_call_DARWIN + |--------------------------------------------| | + | When we have any FP activity... the | | + | FPRs occupy NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 from high to low addr. | | + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_call_DARWIN + + */ + +#if defined(POWERPC_DARWIN64) +static void +darwin64_pass_struct_by_value + (ffi_type *, char *, unsigned, unsigned *, double **, unsigned long **); +#endif + +/* This depends on GPR_SIZE = sizeof (unsigned long) */ + +void +ffi_prep_args (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + const unsigned nargs = ecif->cif->nargs; +#if !defined(POWERPC_DARWIN64) + const ffi_abi abi = ecif->cif->abi; +#endif + + /* 'stacktop' points at the previous backchain pointer. */ + unsigned long *const stacktop = stack + (bytes / sizeof(unsigned long)); + + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + double *fpr_base = (double *) (stacktop - ASM_NEEDS_REGISTERS) - NUM_FPR_ARG_REGISTERS; + int gp_count = 0, fparg_count = 0; + + /* 'next_arg' grows up as we put parameters in it. */ + unsigned long *next_arg = stack + LINKAGE_AREA_GPRS; /* 6 reserved positions. */ + + int i; + double double_tmp; + void **p_argv = ecif->avalue; + unsigned long gprvalue; + ffi_type** ptr = ecif->cif->arg_types; +#if !defined(POWERPC_DARWIN64) + char *dest_cpy; +#endif + unsigned size_al = 0; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT(((unsigned) (char *) stack & 0xF) == 0); + FFI_ASSERT(((unsigned) (char *) stacktop & 0xF) == 0); + FFI_ASSERT((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. + Rule: + Return values are referenced by r3, so r4 is the first parameter. */ + + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + for (i = nargs; i > 0; i--, ptr++, p_argv++) + { + switch ((*ptr)->type) + { + /* If a floating-point parameter appears before all of the general- + purpose registers are filled, the corresponding GPRs that match + the size of the floating-point parameter are skipped. */ + case FFI_TYPE_FLOAT: + double_tmp = *(float *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; +#if defined(POWERPC_DARWIN) + *(float *)next_arg = *(float *) *p_argv; +#else + *(double *)next_arg = double_tmp; +#endif + next_arg++; + gp_count++; + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_DOUBLE: + double_tmp = *(double *) *p_argv; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *)next_arg = double_tmp; +#ifdef POWERPC64 + next_arg++; + gp_count++; +#else + next_arg += 2; + gp_count += 2; +#endif + fparg_count++; + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +# if defined(POWERPC64) && !defined(POWERPC_DARWIN64) + /* ??? This will exceed the regs count when the value starts at fp13 + and it will not put the extra bit on the stack. */ + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *(long double *) fpr_base++ = *(long double *) *p_argv; + else + *(long double *) next_arg = *(long double *) *p_argv; + next_arg += 2; + fparg_count += 2; +# else + double_tmp = ((double *) *p_argv)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; + double_tmp = ((double *) *p_argv)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = double_tmp; + *(double *) next_arg = double_tmp; +# if defined(POWERPC_DARWIN64) + next_arg++; + gp_count++; +# else + next_arg += 2; + gp_count += 2; +# endif + fparg_count++; +# endif + FFI_ASSERT(flags & FLAG_FP_ARGUMENTS); + break; +#endif + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + gprvalue = *(long long *) *p_argv; + goto putgpr; +#else + *(long long *) next_arg = *(long long *) *p_argv; + next_arg += 2; + gp_count += 2; +#endif + break; + case FFI_TYPE_POINTER: + gprvalue = *(unsigned long *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT8: + gprvalue = *(unsigned char *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = *(signed char *) *p_argv; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = *(unsigned short *) *p_argv; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = *(signed short *) *p_argv; + goto putgpr; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + next_arg = (unsigned long *)FFI_ALIGN((char *)next_arg, (*ptr)->alignment); + darwin64_pass_struct_by_value (*ptr, (char *) *p_argv, + (unsigned) size_al, + (unsigned int *) &fparg_count, + &fpr_base, &next_arg); +#else + dest_cpy = (char *) next_arg; + + /* If the first member of the struct is a double, then include enough + padding in the struct size to align it to double-word. */ + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); + +# if defined(POWERPC64) + FFI_ASSERT (abi != FFI_DARWIN); + memcpy ((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. + Structures with 3 byte in size are padded upwards. */ + if (size_al < 3 && abi == FFI_DARWIN) + dest_cpy += 4 - size_al; + + memcpy((char *) dest_cpy, (char *) *p_argv, size_al); + next_arg += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = *(signed int *) *p_argv; + goto putgpr; + + case FFI_TYPE_UINT32: + gprvalue = *(unsigned int *) *p_argv; + putgpr: + *next_arg++ = gprvalue; + gp_count++; + break; + default: + break; + } + } + + /* Check that we didn't overrun the stack... */ + /* FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS); + FFI_ASSERT((unsigned *)fpr_base + <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS); + FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4); */ +} + +#if defined(POWERPC_DARWIN64) + +/* See if we can put some of the struct into fprs. + This should not be called for structures of size 16 bytes, since these are not + broken out this way. */ +static void +darwin64_scan_struct_for_floats (ffi_type *s, unsigned *nfpr) +{ + int i; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p = s->elements[i]; + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_scan_struct_for_floats (p, nfpr); + break; + case FFI_TYPE_LONGDOUBLE: + (*nfpr) += 2; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + (*nfpr) += 1; + break; + default: + break; + } + } +} + +static int +darwin64_struct_size_exceeds_gprs_p (ffi_type *s, char *src, unsigned *nfpr) +{ + unsigned struct_offset=0, i; + + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + if (darwin64_struct_size_exceeds_gprs_p (p, item_base, nfpr)) + return 1; + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr >= NUM_FPR_ARG_REGISTERS) + return 1; + (*nfpr) += 1; + break; + default: + /* If we try and place any item, that is non-float, once we've + exceeded the 8 GPR mark, then we can't fit the struct. */ + if ((unsigned long)item_base >= 8*8) + return 1; + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return 0; +} + +/* Can this struct be returned by value? */ +int +darwin64_struct_ret_by_value_p (ffi_type *s) +{ + unsigned nfp = 0; + + FFI_ASSERT (s && s->type == FFI_TYPE_STRUCT); + + /* The largest structure we can return is 8long + 13 doubles. */ + if (s->size > 168) + return 0; + + /* We can't pass more than 13 floats. */ + darwin64_scan_struct_for_floats (s, &nfp); + if (nfp > 13) + return 0; + + /* If there are not too many floats, and the struct is + small enough to accommodate in the GPRs, then it must be OK. */ + if (s->size <= 64) + return 1; + + /* Well, we have to look harder. */ + nfp = 0; + if (darwin64_struct_size_exceeds_gprs_p (s, NULL, &nfp)) + return 0; + + return 1; +} + +void +darwin64_pass_struct_floats (ffi_type *s, char *src, + unsigned *nfpr, double **fprs) +{ + int i; + double *fpr_base = *fprs; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = src + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + darwin64_pass_struct_floats (p, item_base, nfpr, + &fpr_base); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = *(double *)item_base; + (*nfpr) += 1; + break; + case FFI_TYPE_FLOAT: + if (*nfpr < NUM_FPR_ARG_REGISTERS) + *fpr_base++ = (double) *(float *)item_base; + (*nfpr) += 1; + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + /* Update the scores. */ + *fprs = fpr_base; +} + +/* Darwin64 special rules. + Break out a struct into params and float registers. */ +static void +darwin64_pass_struct_by_value (ffi_type *s, char *src, unsigned size, + unsigned *nfpr, double **fprs, unsigned long **arg) +{ + unsigned long *next_arg = *arg; + char *dest_cpy = (char *)next_arg; + + FFI_ASSERT (s->type == FFI_TYPE_STRUCT) + + if (!size) + return; + + /* First... special cases. */ + if (size < 3 + || (size == 4 + && s->elements[0] + && s->elements[0]->type != FFI_TYPE_FLOAT)) + { + /* Must be at least one GPR, padding is unspecified in value, + let's make it zero. */ + *next_arg = 0UL; + dest_cpy += 8 - size; + memcpy ((char *) dest_cpy, src, size); + next_arg++; + } + else if (size == 16) + { + memcpy ((char *) dest_cpy, src, size); + next_arg += 2; + } + else + { + /* now the general case, we consider embedded floats. */ + memcpy ((char *) dest_cpy, src, size); + darwin64_pass_struct_floats (s, src, nfpr, fprs); + next_arg += (size+7)/8; + } + + *arg = next_arg; +} + +double * +darwin64_struct_floats_to_mem (ffi_type *s, char *dest, double *fprs, unsigned *nf) +{ + int i; + unsigned struct_offset = 0; + + /* We don't assume anything about the alignment of the source. */ + for (i = 0; s->elements[i] != NULL; i++) + { + char *item_base; + ffi_type *p = s->elements[i]; + /* Find the start of this item (0 for the first one). */ + if (i > 0) + struct_offset = FFI_ALIGN(struct_offset, p->alignment); + item_base = dest + struct_offset; + + switch (p->type) + { + case FFI_TYPE_STRUCT: + fprs = darwin64_struct_floats_to_mem (p, item_base, fprs, nf); + break; + case FFI_TYPE_LONGDOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + item_base += 8; + /* FALL THROUGH */ + case FFI_TYPE_DOUBLE: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(double *)item_base = *fprs++ ; + (*nf) += 1; + } + break; + case FFI_TYPE_FLOAT: + if (*nf < NUM_FPR_ARG_REGISTERS) + { + *(float *)item_base = (float) *fprs++ ; + (*nf) += 1; + } + break; + default: + break; + } + /* now count the size of what we just used. */ + struct_offset += p->size; + } + return fprs; +} + +#endif + +/* Adjust the size of S to be correct for Darwin. + On Darwin m32, the first field of a structure has natural alignment. + On Darwin m64, all fields have natural alignment. */ + +static void +darwin_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + if (p->type == FFI_TYPE_STRUCT) + darwin_adjust_aggregate_sizes (p); +#if defined(POWERPC_DARWIN64) + /* Natural alignment for all items. */ + align = p->alignment; +#else + /* Natural alignment for the first item... */ + if (i == 0) + align = p->alignment; + else if (p->alignment == 16 || p->alignment < 4) + /* .. subsequent items with vector or align < 4 have natural align. */ + align = p->alignment; + else + /* .. or align is 4. */ + align = 4; +#endif + /* Pad, if necessary, before adding the current item. */ + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + /* This should not be necessary on m64, but harmless. */ + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Adjust the size of S to be correct for AIX. + Word-align double unless it is the first member of a structure. */ + +static void +aix_adjust_aggregate_sizes (ffi_type *s) +{ + int i; + + if (s->type != FFI_TYPE_STRUCT) + return; + + s->size = 0; + for (i = 0; s->elements[i] != NULL; i++) + { + ffi_type *p; + int align; + + p = s->elements[i]; + aix_adjust_aggregate_sizes (p); + align = p->alignment; + if (i != 0 && p->type == FFI_TYPE_DOUBLE) + align = 4; + s->size = FFI_ALIGN(s->size, align) + p->size; + } + + s->size = FFI_ALIGN(s->size, s->alignment); + + if (s->elements[0]->type == FFI_TYPE_UINT64 + || s->elements[0]->type == FFI_TYPE_SINT64 + || s->elements[0]->type == FFI_TYPE_DOUBLE + || s->elements[0]->alignment == 8) + s->alignment = s->alignment > 8 ? s->alignment : 8; + /* Do not add additional tail padding. */ +} + +/* Perform machine dependent cif processing. */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* All this is for the DARWIN ABI. */ + unsigned i; + ffi_type **ptr; + unsigned bytes; + unsigned fparg_count = 0, intarg_count = 0; + unsigned flags = 0; + unsigned size_al = 0; + + /* All the machine-independent calculation of cif->bytes will be wrong. + All the calculation of structure sizes will also be wrong. + Redo the calculation for DARWIN. */ + + if (cif->abi == FFI_DARWIN) + { + darwin_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + darwin_adjust_aggregate_sizes (cif->arg_types[i]); + } + + if (cif->abi == FFI_AIX) + { + aix_adjust_aggregate_sizes (cif->rtype); + for (i = 0; i < cif->nargs; i++) + aix_adjust_aggregate_sizes (cif->arg_types[i]); + } + + /* Space for the frame pointer, callee's LR, CR, etc, and for + the asm's temp regs. */ + + bytes = (LINKAGE_AREA_GPRS + ASM_NEEDS_REGISTERS) * sizeof(unsigned long); + + /* Return value handling. + The rules m32 are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values [??? and structures between 5 and 8 bytes] are + returned in gpr3 and gpr4; + - Single/double FP values are returned in fpr1; + - Long double FP (if not equivalent to double) values are returned in + fpr1 and fpr2; + m64: + - 64-bit or smaller integral values are returned in GPR3 + - Single/double FP values are returned in fpr1; + - Long double FP values are returned in fpr1 and fpr2; + m64 Structures: + - If the structure could be accommodated in registers were it to be the + first argument to a routine, then it is returned in those registers. + m32/m64 structures otherwise: + - Larger structures values are allocated space and a pointer is passed + as the first argument. */ + switch (cif->rtype->type) + { + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + flags |= FLAG_RETURNS_FP; + break; +#endif + + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef POWERPC64 + case FFI_TYPE_POINTER: +#endif + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if defined(POWERPC_DARWIN64) + { + /* Can we fit the struct into regs? */ + if (darwin64_struct_ret_by_value_p (cif->rtype)) + { + unsigned nfpr = 0; + flags |= FLAG_RETURNS_STRUCT; + if (cif->rtype->size != 16) + darwin64_scan_struct_for_floats (cif->rtype, &nfpr) ; + else + flags |= FLAG_RETURNS_128BITS; + /* Will be 0 for 16byte struct. */ + if (nfpr) + flags |= FLAG_RETURNS_FP; + } + else /* By ref. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size <= 4) + flags |= FLAG_RETURNS_STRUCT; + else /* else by reference. */ + { + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; + } +#else /* assume we pass by ref. */ + flags |= FLAG_RETVAL_REFERENCE; + flags |= FLAG_RETURNS_NOTHING; + intarg_count++; +#endif + break; + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. + ??? Structures are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. + For m64 the count is effectively of half-GPRs. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned align_words; + switch ((*ptr)->type) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + fparg_count++; +#if !defined(POWERPC_DARWIN64) + /* If this FP arg is going on the stack, it must be + 8-byte-aligned. */ + if (fparg_count > NUM_FPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0) + intarg_count++; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + fparg_count += 2; + /* If this FP arg is going on the stack, it must be + 16-byte-aligned. */ + if (fparg_count >= NUM_FPR_ARG_REGISTERS) +#if defined (POWERPC64) + intarg_count = FFI_ALIGN(intarg_count, 2); +#else + intarg_count = FFI_ALIGN(intarg_count, 4); +#endif + break; +#endif + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#if defined(POWERPC64) + intarg_count++; +#else + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. */ + if (intarg_count == NUM_GPR_ARG_REGISTERS-1 + || (intarg_count >= NUM_GPR_ARG_REGISTERS + && (intarg_count & 0x01) != 0)) + intarg_count++; + intarg_count += 2; +#endif + break; + + case FFI_TYPE_STRUCT: + size_al = (*ptr)->size; +#if defined(POWERPC_DARWIN64) + align_words = (*ptr)->alignment >> 3; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* Base size of the struct. */ + intarg_count += (size_al + 7) / 8; + /* If 16 bytes then don't worry about floats. */ + if (size_al != 16) + /* Scan through for floats to be placed in regs. */ + darwin64_scan_struct_for_floats (*ptr, &fparg_count) ; +#else + align_words = (*ptr)->alignment >> 2; + if (align_words) + intarg_count = FFI_ALIGN(intarg_count, align_words); + /* If the first member of the struct is a double, then align + the struct to double-word. + if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN((*ptr)->size, 8); */ +# ifdef POWERPC64 + intarg_count += (size_al + 7) / 8; +# else + intarg_count += (size_al + 3) / 4; +# endif +#endif + break; + + default: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + break; + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + +#if defined(POWERPC_DARWIN64) + /* Space to image the FPR registers, if needed - which includes when they might be + used in a struct return. */ + if (fparg_count != 0 + || ((flags & FLAG_RETURNS_STRUCT) + && (flags & FLAG_RETURNS_FP))) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#else + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof(double); +#endif + + /* Stack space. */ +#ifdef POWERPC64 + if ((intarg_count + fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + fparg_count) * sizeof(long); +#else + if ((intarg_count + 2 * fparg_count) > NUM_GPR_ARG_REGISTERS) + bytes += (intarg_count + 2 * fparg_count) * sizeof(long); +#endif + else + bytes += NUM_GPR_ARG_REGISTERS * sizeof(long); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = FFI_ALIGN(bytes, 16) ; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void)); + +#if defined (FFI_GO_CLOSURES) +extern void ffi_call_go_AIX(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), void *closure); +#endif + +extern void ffi_call_DARWIN(extended_cif *, long, unsigned, unsigned *, + void (*fn)(void), void (*fn2)(void), ffi_type*); + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args)); + break; + case FFI_DARWIN: + ffi_call_DARWIN(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), cif->rtype); + break; + default: + FFI_ASSERT(0); + break; + } +} + +#if defined (FFI_GO_CLOSURES) +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return + value address then we need to make one. */ + + if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca (cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_AIX: + ffi_call_go_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn, + FFI_FN(ffi_prep_args), closure); + break; + default: + FFI_ASSERT(0); + break; + } +} +#endif + +static void flush_icache(char *); +static void flush_range(char *, int); + +/* The layout of a function descriptor. A C function pointer really + points to one of these. */ + +typedef struct aix_fd_struct { + void *code_pointer; + void *toc; +} aix_fd; + +/* here I'd like to add the stack frame layout we use in darwin_closure.S + and aix_closure.S + + m32/m64 + + The stack layout looks like this: + + | Additional params... | | Higher address + ~ ~ ~ + | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | + | Reserved 2*4/8 | | + |--------------------------------------------| | + | Space for callee's LR 4/8 | | + |--------------------------------------------| | + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | + | Current backchain pointer 4/8 |-/ Parent's frame. + |--------------------------------------------| <+ <<< on entry to ffi_closure_ASM + | Result Bytes 16 | | + |--------------------------------------------| | + ~ padding to 16-byte alignment ~ ~ + |--------------------------------------------| | + | NUM_FPR_ARG_REGISTERS slots | | + | here fp13 .. fp1 13*8 | | + |--------------------------------------------| | + | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS + |--------------------------------------------| | + | TOC=R2 (AIX) Reserved (Darwin) 4/8 | | + |--------------------------------------------| | stack | + | Reserved [compiler,binder] 2*4/8 | | grows | + |--------------------------------------------| | down V + | Space for callee's LR 4/8 | | + |--------------------------------------------| | lower addresses + | Saved CR [low word for m64] 4/8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4/8 |-/ during + |--------------------------------------------| <<< ffi_closure_ASM. + +*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + struct ffi_aix_trampoline_struct *tramp_aix; + aix_fd *fd; + + switch (cif->abi) + { + case FFI_DARWIN: + + FFI_ASSERT (cif->abi == FFI_DARWIN); + + tramp = (unsigned int *) &closure->tramp[0]; +#if defined(POWERPC_DARWIN64) + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0015; /* bcl- 20,4*cr7+so, +0x18 (L1) */ + /* We put the addresses here. */ + tramp[6] = 0x7d6802a6; /*L1: mflr r11 */ + tramp[7] = 0xe98b0000; /* ld r12,0(r11) function address */ + tramp[8] = 0x7c0803a6; /* mtlr r0 */ + tramp[9] = 0x7d8903a6; /* mtctr r12 */ + tramp[10] = 0xe96b0008; /* lwz r11,8(r11) static chain */ + tramp[11] = 0x4e800420; /* bctr */ + + *((unsigned long *)&tramp[2]) = (unsigned long) ffi_closure_ASM; /* function */ + *((unsigned long *)&tramp[4]) = (unsigned long) codeloc; /* context */ +#else + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */ + tramp[4] = 0x7d6802a6; /* mflr r11 */ + tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */ + tramp[6] = 0x7c0803a6; /* mtlr r0 */ + tramp[7] = 0x7d8903a6; /* mtctr r12 */ + tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */ + tramp[9] = 0x4e800420; /* bctr */ + tramp[2] = (unsigned long) ffi_closure_ASM; /* function */ + tramp[3] = (unsigned long) codeloc; /* context */ +#endif + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. Only necessary on Darwin. */ + flush_range(codeloc, FFI_TRAMPOLINE_SIZE); + + break; + + case FFI_AIX: + + tramp_aix = (struct ffi_aix_trampoline_struct *) (closure->tramp); + fd = (aix_fd *)(void *)ffi_closure_ASM; + + FFI_ASSERT (cif->abi == FFI_AIX); + + tramp_aix->code_pointer = fd->code_pointer; + tramp_aix->toc = fd->toc; + tramp_aix->static_chain = codeloc; + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + break; + + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} + +#if defined (FFI_GO_CLOSURES) +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_AIX: + + FFI_ASSERT (cif->abi == FFI_AIX); + + closure->tramp = (void *)ffi_go_closure_ASM; + closure->cif = cif; + closure->fun = fun; + return FFI_OK; + + // For now, ffi_prep_go_closure is only implemented for AIX, not for Darwin + default: + return FFI_BAD_ABI; + break; + } + return FFI_OK; +} +#endif + +static void +flush_icache(char *addr) +{ +#ifndef _AIX + __asm__ volatile ( + "dcbf 0,%0\n" + "\tsync\n" + "\ticbi 0,%0\n" + "\tsync\n" + "\tisync" + : : "r"(addr) : "memory"); +#endif +} + +static void +flush_range(char * addr1, int size) +{ +#define MIN_LINE_SIZE 32 + int i; + for (i = 0; i < size; i += MIN_LINE_SIZE) + flush_icache(addr1+i); + flush_icache(addr1+size-1); +} + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *, void *, + unsigned long *, ffi_dblfl *); + +#if defined (FFI_GO_CLOSURES) +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure*, void *, + unsigned long *, ffi_dblfl *); +#endif + +/* Basically the trampoline invokes ffi_closure_ASM, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_ASM invokes the + following helper function to do most of the work. */ + +static ffi_type * +ffi_closure_helper_common (ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + /* rvalue is the pointer to space for return value in closure assembly + pgr is the pointer to where r3-r10 are stored in ffi_closure_ASM + pfr is the pointer to where f1-f13 are stored in ffi_closure_ASM. */ + + typedef double ldbits[2]; + + union ldu + { + ldbits lb; + long double ld; + }; + + void ** avalue; + ffi_type ** arg_types; + long i, avn; + ffi_dblfl * end_pfr = pfr + NUM_FPR_ARG_REGISTERS; + unsigned size_al; +#if defined(POWERPC_DARWIN64) + unsigned fpsused = 0; +#endif + + avalue = alloca (cif->nargs * sizeof(void *)); + + if (cif->rtype->type == FFI_TYPE_STRUCT) + { +#if defined(POWERPC_DARWIN64) + if (!darwin64_struct_ret_by_value_p (cif->rtype)) + { + /* Won't fit into the regs - return by ref. */ + rvalue = (void *) *pgr; + pgr++; + } +#elif defined(DARWIN_PPC) + if (cif->rtype->size > 4) + { + rvalue = (void *) *pgr; + pgr++; + } +#else /* assume we return by ref. */ + rvalue = (void *) *pgr; + pgr++; +#endif + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 7; +#else + avalue[i] = (char *) pgr + 3; +#endif + pgr++; + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 6; +#else + avalue[i] = (char *) pgr + 2; +#endif + pgr++; + break; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#if defined(POWERPC64) + avalue[i] = (char *) pgr + 4; +#else + case FFI_TYPE_POINTER: + avalue[i] = pgr; +#endif + pgr++; + break; + + case FFI_TYPE_STRUCT: + size_al = arg_types[i]->size; +#if defined(POWERPC_DARWIN64) + pgr = (unsigned long *)FFI_ALIGN((char *)pgr, arg_types[i]->alignment); + if (size_al < 3 || size_al == 4) + { + avalue[i] = ((char *)pgr)+8-size_al; + if (arg_types[i]->elements[0]->type == FFI_TYPE_FLOAT + && fpsused < NUM_FPR_ARG_REGISTERS) + { + *(float *)pgr = (float) *(double *)pfr; + pfr++; + fpsused++; + } + } + else + { + if (size_al != 16) + pfr = (ffi_dblfl *) + darwin64_struct_floats_to_mem (arg_types[i], (char *)pgr, + (double *)pfr, &fpsused); + avalue[i] = pgr; + } + pgr += (size_al + 7) / 8; +#else + /* If the first member of the struct is a double, then align + the struct to double-word. */ + if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE) + size_al = FFI_ALIGN(arg_types[i]->size, 8); +# if defined(POWERPC64) + FFI_ASSERT (cif->abi != FFI_DARWIN); + avalue[i] = pgr; + pgr += (size_al + 7) / 8; +# else + /* Structures that match the basic modes (QI 1 byte, HI 2 bytes, + SI 4 bytes) are aligned as if they were those modes. */ + if (size_al < 3 && cif->abi == FFI_DARWIN) + avalue[i] = (char*) pgr + 4 - size_al; + else + avalue[i] = pgr; + pgr += (size_al + 3) / 4; +# endif +#endif + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: +#if defined(POWERPC64) + case FFI_TYPE_POINTER: + avalue[i] = pgr; + pgr++; + break; +#else + /* Long long ints are passed in two gpr's. */ + avalue[i] = pgr; + pgr += 2; + break; +#endif + + case FFI_TYPE_FLOAT: + /* A float value consumes a GPR. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr++; + break; + + case FFI_TYPE_DOUBLE: + /* A double value consumes two GPRs. + There are 13 64bit floating point registers. */ + if (pfr < end_pfr) + { + avalue[i] = pfr; + pfr++; + } + else + { + avalue[i] = pgr; + } +#ifdef POWERPC64 + pgr++; +#else + pgr += 2; +#endif + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + + case FFI_TYPE_LONGDOUBLE: +#ifdef POWERPC64 + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr) + { + *pgr = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pgr; + } + pgr += 2; +#else /* POWERPC64 */ + /* A long double value consumes four GPRs and two FPRs. + There are 13 64bit floating point registers. */ + if (pfr + 1 < end_pfr) + { + avalue[i] = pfr; + pfr += 2; + } + /* Here we have the situation where one part of the long double + is stored in fpr13 and the other part is already on the stack. + We use a union to pass the long double to avalue[i]. */ + else if (pfr + 1 == end_pfr) + { + union ldu temp_ld; + memcpy (&temp_ld.lb[0], pfr, sizeof(ldbits)); + memcpy (&temp_ld.lb[1], pgr + 2, sizeof(ldbits)); + avalue[i] = &temp_ld.ld; + pfr++; + } + else + { + avalue[i] = pgr; + } + pgr += 4; +#endif /* POWERPC64 */ + break; +#endif + default: + FFI_ASSERT(0); + } + i++; + } + + (fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_ASM to perform return type promotions. */ + return cif->rtype; +} + +ffi_type * +ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure->user_data, rvalue, pgr, pfr); +} + +#if defined (FFI_GO_CLOSURES) +ffi_type * +ffi_go_closure_helper_DARWIN (ffi_go_closure *closure, void *rvalue, + unsigned long *pgr, ffi_dblfl *pfr) +{ + return ffi_closure_helper_common (closure->cif, closure->fun, + closure, rvalue, pgr, pfr); +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c new file mode 100644 index 0000000..4d50878 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_linux64.c @@ -0,0 +1,1153 @@ +/* ----------------------------------------------------------------------- + ffi_linux64.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifdef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the LINUX64 ABI. */ +enum { + NUM_GPR_ARG_REGISTERS64 = 8, + NUM_FPR_ARG_REGISTERS64 = 13, + NUM_VEC_ARG_REGISTERS64 = 12, +}; +enum { ASM_NEEDS_REGISTERS64 = 4 }; + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_linux64 (ffi_abi abi) +{ + if ((abi & (FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128)) == FFI_LINUX) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + + +static unsigned int +discover_homogeneous_aggregate (ffi_abi abi, + const ffi_type *t, + unsigned int *elnum) +{ + switch (t->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 64-bit long doubles are equivalent to doubles. */ + if ((abi & FFI_LINUX_LONG_DOUBLE_128) == 0) + { + *elnum = 1; + return FFI_TYPE_DOUBLE; + } + /* IBM extended precision values use unaligned pairs + of FPRs, but according to the ABI must be considered + distinct from doubles. They are also limited to a + maximum of four members in a homogeneous aggregate. */ + else if ((abi & FFI_LINUX_LONG_DOUBLE_IEEE128) == 0) + { + *elnum = 2; + return FFI_TYPE_LONGDOUBLE; + } + /* Fall through. */ +#endif + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + *elnum = 1; + return (int) t->type; + + case FFI_TYPE_STRUCT:; + { + unsigned int base_elt = 0, total_elnum = 0; + ffi_type **el = t->elements; + while (*el) + { + unsigned int el_elt, el_elnum = 0; + el_elt = discover_homogeneous_aggregate (abi, *el, &el_elnum); + if (el_elt == 0 + || (base_elt && base_elt != el_elt)) + return 0; + base_elt = el_elt; + total_elnum += el_elnum; +#if _CALL_ELF == 2 + if (total_elnum > 8) + return 0; +#else + if (total_elnum > 1) + return 0; +#endif + el++; + } + *elnum = total_elnum; + return base_elt; + } + + default: + return 0; + } +} + + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_linux64_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fparg_count = 0, intarg_count = 0, vecarg_count = 0; + unsigned flags = cif->flags; + unsigned elt, elnum, rtype; + +#if FFI_TYPE_LONGDOUBLE == FFI_TYPE_DOUBLE + /* If compiled without long double support... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0 || + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#elif !defined(__VEC__) + /* If compiled without vector register support (used by assembly)... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#else + /* If the IEEE128 flag is set, but long double is only 64 bits wide... */ + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) == 0 && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + return FFI_BAD_ABI; +#endif + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ +#if _CALL_ELF == 2 + /* Space for backchain, CR, LR, TOC and the asm's temp regs. */ + bytes = (4 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the general registers. */ + bytes += NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#else + /* Space for backchain, CR, LR, cc/ld doubleword, TOC and the asm's temp + regs. */ + bytes = (6 + ASM_NEEDS_REGISTERS64) * sizeof (long); + + /* Space for the mandatory parm save area and general registers. */ + bytes += 2 * NUM_GPR_ARG_REGISTERS64 * sizeof (long); +#endif + + /* Return value handling. */ + rtype = cif->rtype->type; +#if _CALL_ELF == 2 +homogeneous: +#endif + switch (rtype) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + flags |= FLAG_RETURNS_VEC; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: +#if _CALL_ELF == 2 + elt = discover_homogeneous_aggregate (cif->abi, cif->rtype, &elnum); + if (elt) + { + flags |= FLAG_RETURNS_SMST; + rtype = elt; + goto homogeneous; + } + if (cif->rtype->size <= 16) + { + flags |= FLAG_RETURNS_SMST; + break; + } +#endif + intarg_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned int align; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count++; + /* Align to 16 bytes, plus the 16-byte argument. */ + intarg_count = (intarg_count + 3) & ~0x1; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + fparg_count++; + intarg_count++; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + case FFI_TYPE_FLOAT: + fparg_count++; + intarg_count++; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + align = align / 8; + if (align > 1) + intarg_count = FFI_ALIGN (intarg_count, align); + } + intarg_count += ((*ptr)->size + 7) / 8; + elt = discover_homogeneous_aggregate (cif->abi, *ptr, &elnum); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + vecarg_count += elnum; + if (vecarg_count > NUM_VEC_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + } + else +#endif + if (elt) + { + fparg_count += elnum; + if (fparg_count > NUM_FPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + else + { + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + } + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 8-byte word in a GPR, either + the object itself or a pointer to it. */ + intarg_count++; + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + flags |= FLAG_ARG_NEEDS_PSAVE; + break; + default: + FFI_ASSERT (0); + } + } + + if (fparg_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (intarg_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (vecarg_count != 0) + flags |= FLAG_VEC_ARGUMENTS; + + /* Space for the FPR registers, if needed. */ + if (fparg_count != 0) + bytes += NUM_FPR_ARG_REGISTERS64 * sizeof (double); + /* Space for the vector registers, if needed, aligned to 16 bytes. */ + if (vecarg_count != 0) { + bytes = (bytes + 15) & ~0xF; + bytes += NUM_VEC_ARG_REGISTERS64 * sizeof (float128); + } + + /* Stack space. */ +#if _CALL_ELF == 2 + if ((flags & FLAG_ARG_NEEDS_PSAVE) != 0) + bytes += intarg_count * sizeof (long); +#else + if (intarg_count > NUM_GPR_ARG_REGISTERS64) + bytes += (intarg_count - NUM_GPR_ARG_REGISTERS64) * sizeof (long); +#endif + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64 (ffi_cif *cif) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = cif->nargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; + return ffi_prep_cif_linux64_core (cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_linux64_var (ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs MAYBE_UNUSED) +{ + if ((cif->abi & FFI_LINUX) != 0) + cif->nfixedargs = nfixedargs; +#if _CALL_ELF != 2 + else if (cif->abi == FFI_COMPAT_LINUX64) + { + /* This call is from old code. Don't touch cif->nfixedargs + since old code will be using a smaller cif. */ + cif->flags |= FLAG_COMPAT; + /* Translate to new abi value. */ + cif->abi = FFI_LINUX | FFI_LINUX_LONG_DOUBLE_128; + } +#endif + else + return FFI_BAD_ABI; +#if _CALL_ELF == 2 + cif->flags |= FLAG_ARG_NEEDS_PSAVE; +#endif + return ffi_prep_cif_linux64_core (cif); +} + + +/* ffi_prep_args64 is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Ret addr from ffi_call_LINUX64 8bytes | higher addresses + |--------------------------------------------| + | CR save area 8bytes | + |--------------------------------------------| + | Previous backchain pointer 8 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*8 | | ffi_call_LINUX64 + |--------------------------------------------| | + | GPR registers r3-r10 8*8 | | + |--------------------------------------------| | + | FPR registers f1-f13 (optional) 13*8 | | + |--------------------------------------------| | + | VEC registers v2-v13 (optional) 12*16 | | + |--------------------------------------------| | + | Parameter save area | | + |--------------------------------------------| | + | TOC save area 8 | | + |--------------------------------------------| | stack | + | Linker doubleword 8 | | grows | + |--------------------------------------------| | down V + | Compiler doubleword 8 | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 8 | | + |--------------------------------------------| | + | CR save area 8 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 8 |-/ during + |--------------------------------------------| <<< ffi_call_LINUX64 + +*/ + +void FFI_HIDDEN +ffi_prep_args64 (extended_cif *ecif, unsigned long *const stack) +{ + const unsigned long bytes = ecif->cif->bytes; + const unsigned long flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'next_arg' points at the space for gpr3, and grows upwards as + we use GPR registers, then continues at rest. */ + valp gpr_base; + valp gpr_end; + valp rest; + valp next_arg; + + /* 'fpr_base' points at the space for f1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + unsigned int fparg_count; + + /* 'vec_base' points at the space for v2, and grows upwards as + we use vector registers. */ + valp vec_base; + unsigned int vecarg_count; + + unsigned int i, words, nargs, nfixedargs; + ffi_type **ptr; + double double_tmp; + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + signed int **si; + unsigned int **ui; + unsigned long **ul; + float **f; + double **d; + float128 **f128; + } p_argv; + unsigned long gprvalue; + unsigned long align; + + stacktop.c = (char *) stack + bytes; + gpr_base.ul = stacktop.ul - ASM_NEEDS_REGISTERS64 - NUM_GPR_ARG_REGISTERS64; + gpr_end.ul = gpr_base.ul + NUM_GPR_ARG_REGISTERS64; +#if _CALL_ELF == 2 + rest.ul = stack + 4 + NUM_GPR_ARG_REGISTERS64; +#else + rest.ul = stack + 6 + NUM_GPR_ARG_REGISTERS64; +#endif + fpr_base.d = gpr_base.d - NUM_FPR_ARG_REGISTERS64; + fparg_count = 0; + /* Place the vector args below the FPRs, if used, else the GPRs. */ + if (ecif->cif->flags & FLAG_FP_ARGUMENTS) + vec_base.p = fpr_base.p & ~0xF; + else + vec_base.p = gpr_base.p; + vec_base.f128 -= NUM_VEC_ARG_REGISTERS64; + vecarg_count = 0; + next_arg.ul = gpr_base.ul; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_base.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) gpr_end.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) vec_base.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *next_arg.ul++ = (unsigned long) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + nargs = ecif->cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = ecif->cif->nfixedargs; + for (ptr = ecif->cif->arg_types, i = 0; + i < nargs; + i++, ptr++, p_argv.v++) + { + unsigned int elt, elnum; + + switch ((*ptr)->type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + next_arg.p = FFI_ALIGN (next_arg.p, 16); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 && i < nfixedargs) + memcpy (vec_base.f128++, *p_argv.f128, sizeof (float128)); + else + memcpy (next_arg.f128, *p_argv.f128, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 113); + FFI_ASSERT (flags & FLAG_VEC_ARGUMENTS); + break; + } + if ((ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + double_tmp = (*p_argv.d)[0]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + double_tmp = (*p_argv.d)[1]; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +# if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +# endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (__LDBL_MANT_DIG__ == 106); + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + double_tmp = **p_argv.d; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + *next_arg.d = double_tmp; +#endif + } + else + *next_arg.d = double_tmp; + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + double_tmp = **p_argv.f; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + { + *fpr_base.d++ = double_tmp; +#if _CALL_ELF != 2 + if ((flags & FLAG_COMPAT) != 0) + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } +#endif + } + else + { +# ifndef __LITTLE_ENDIAN__ + next_arg.f[1] = (float) double_tmp; +# else + next_arg.f[0] = (float) double_tmp; +# endif + } + if (++next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + fparg_count++; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_STRUCT: + if ((ecif->cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = (*ptr)->alignment; + if (align > 16) + align = 16; + if (align > 1) + { + next_arg.p = FFI_ALIGN (next_arg.p, align); + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + elt = discover_homogeneous_aggregate (ecif->cif->abi, *ptr, &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + float *f; + double *d; + float128 *f128; + } arg; + + arg.v = *p_argv.v; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (ecif->cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (vecarg_count < NUM_VEC_ARG_REGISTERS64 + && i < nfixedargs) + memcpy (vec_base.f128++, arg.f128, sizeof (float128)); + else + memcpy (next_arg.f128, arg.f128++, sizeof (float128)); + if (++next_arg.f128 == gpr_end.f128) + next_arg.f128 = rest.f128; + vecarg_count++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + double_tmp = *arg.f++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 + && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.f = (float) double_tmp; + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + fparg_count++; + } + while (--elnum != 0); + if ((next_arg.p & 7) != 0) + if (++next_arg.f == gpr_end.f) + next_arg.f = rest.f; + } + else + do + { + double_tmp = *arg.d++; + if (fparg_count < NUM_FPR_ARG_REGISTERS64 && i < nfixedargs) + *fpr_base.d++ = double_tmp; + else + *next_arg.d = double_tmp; + if (++next_arg.d == gpr_end.d) + next_arg.d = rest.d; + fparg_count++; + } + while (--elnum != 0); +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { + words = ((*ptr)->size + 7) / 8; + if (next_arg.ul >= gpr_base.ul && next_arg.ul + words > gpr_end.ul) + { + size_t first = gpr_end.c - next_arg.c; + memcpy (next_arg.c, *p_argv.c, first); + memcpy (rest.c, *p_argv.c + first, (*ptr)->size - first); + next_arg.c = rest.c + words * 8 - first; + } + else + { + char *where = next_arg.c; + +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if ((*ptr)->size < 8) + where += 8 - (*ptr)->size; +#endif + memcpy (where, *p_argv.c, (*ptr)->size); + next_arg.ul += words; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + } + } + break; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + case FFI_TYPE_UINT32: + gprvalue = **p_argv.ui; + goto putgpr; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + gprvalue = **p_argv.si; + goto putgpr; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + gprvalue = **p_argv.ul; + putgpr: + *next_arg.ul++ = gprvalue; + if (next_arg.ul == gpr_end.ul) + next_arg.ul = rest.ul; + break; + } + } + + FFI_ASSERT (flags & FLAG_4_GPR_ARGUMENTS + || (next_arg.ul >= gpr_base.ul + && next_arg.ul <= gpr_base.ul + 4)); +} + + +#if _CALL_ELF == 2 +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} +#endif + + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_linux64 (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ +#if _CALL_ELF == 2 + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp[0] = 0xe96c0018; /* 0: ld 11,2f-0b(12) */ + tramp[1] = 0xe98c0010; /* ld 12,1f-0b(12) */ + tramp[2] = 0x7d8903a6; /* mtctr 12 */ + tramp[3] = 0x4e800420; /* bctr */ + /* 1: .quad function_addr */ + /* 2: .quad context */ + *(void **) &tramp[4] = (void *) ffi_closure_LINUX64; + *(void **) &tramp[6] = codeloc; + flush_icache ((char *) tramp, (char *) codeloc, 4 * 4); +#else + void **tramp = (void **) &closure->tramp[0]; + + if (cif->abi < FFI_LINUX || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* Copy function address and TOC from ffi_closure_LINUX64 OPD. */ + memcpy (&tramp[0], (void **) ffi_closure_LINUX64, sizeof (void *)); + tramp[1] = codeloc; + memcpy (&tramp[2], (void **) ffi_closure_LINUX64 + 1, sizeof (void *)); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + + +int FFI_HIDDEN +ffi_closure_helper_LINUX64 (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pst, + ffi_dblfl *pfr, + float128 *pvec) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pst is the pointer to parameter save area + (r3-r10 are stored into its first 8 slots by ffi_closure_LINUX64) */ + /* pfr is the pointer to where f1-f13 are stored in ffi_closure_LINUX64 */ + /* pvec is the pointer to where v2-v13 are stored in ffi_closure_LINUX64 */ + + void **avalue; + ffi_type **arg_types; + unsigned long i, avn, nfixedargs; + ffi_dblfl *end_pfr = pfr + NUM_FPR_ARG_REGISTERS64; + float128 *end_pvec = pvec + NUM_VEC_ARG_REGISTERS64; + unsigned long align; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the + closure returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT + && (cif->flags & FLAG_RETURNS_SMST) == 0) + { + rvalue = (void *) *pst; + pst++; + } + + i = 0; + avn = cif->nargs; +#if _CALL_ELF != 2 + nfixedargs = (unsigned) -1; + if ((cif->flags & FLAG_COMPAT) == 0) +#endif + nfixedargs = cif->nfixedargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) + { + unsigned int elt, elnum; + + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 7; + pst++; + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 6; + pst++; + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; + pst++; + break; +#endif + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_STRUCT: + if ((cif->abi & FFI_LINUX_STRUCT_ALIGN) != 0) + { + align = arg_types[i]->alignment; + if (align > 16) + align = 16; + if (align > 1) + pst = (unsigned long *) FFI_ALIGN ((size_t) pst, align); + } + elt = discover_homogeneous_aggregate (cif->abi, arg_types[i], &elnum); + if (elt) + { +#if _CALL_ELF == 2 + union { + void *v; + unsigned long *ul; + float *f; + double *d; + float128 *f128; + size_t p; + } to, from; + + /* Repackage the aggregate from its parts. The + aggregate size is not greater than the space taken by + the registers so store back to the register/parameter + save arrays. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (pvec + elnum <= end_pvec) + to.v = pvec; + else + to.v = pst; + } + else +#endif + if (pfr + elnum <= end_pfr) + to.v = pfr; + else + to.v = pst; + + avalue[i] = to.v; + from.ul = pst; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (elt == FFI_TYPE_LONGDOUBLE && + (cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + do + { + if (pvec < end_pvec && i < nfixedargs) + memcpy (to.f128, pvec++, sizeof (float128)); + else + memcpy (to.f128, from.f128, sizeof (float128)); + to.f128++; + from.f128++; + } + while (--elnum != 0); + } + else +#endif + if (elt == FFI_TYPE_FLOAT) + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.f = (float) pfr->d; + pfr++; + } + else + *to.f = *from.f; + to.f++; + from.f++; + } + while (--elnum != 0); + } + else + { + do + { + if (pfr < end_pfr && i < nfixedargs) + { + *to.d = pfr->d; + pfr++; + } + else + *to.d = *from.d; + to.d++; + from.d++; + } + while (--elnum != 0); + } +#else + if (elt == FFI_TYPE_FLOAT) + goto do_float; + else + goto do_double; +#endif + } + else + { +#ifndef __LITTLE_ENDIAN__ + /* Structures with size less than eight bytes are passed + left-padded. */ + if (arg_types[i]->size < 8) + avalue[i] = (char *) pst + 8 - arg_types[i]->size; + else +#endif + avalue[i] = pst; + } + pst += (arg_types[i]->size + 7) / 8; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if ((cif->abi & FFI_LINUX_LONG_DOUBLE_IEEE128) != 0) + { + if (((unsigned long) pst & 0xF) != 0) + ++pst; + if (pvec < end_pvec && i < nfixedargs) + avalue[i] = pvec++; + else + avalue[i] = pst; + pst += 2; + break; + } + else if ((cif->abi & FFI_LINUX_LONG_DOUBLE_128) != 0) + { + if (pfr + 1 < end_pfr && i + 1 < nfixedargs) + { + avalue[i] = pfr; + pfr += 2; + } + else + { + if (pfr < end_pfr && i < nfixedargs) + { + /* Passed partly in f13 and partly on the stack. + Move it all to the stack. */ + *pst = *(unsigned long *) pfr; + pfr++; + } + avalue[i] = pst; + } + pst += 2; + break; + } + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: +#if _CALL_ELF != 2 + do_double: +#endif + /* On the outgoing stack all values are aligned to 8 */ + /* there are 13 64bit floating point registers */ + + if (pfr < end_pfr && i < nfixedargs) + { + avalue[i] = pfr; + pfr++; + } + else + avalue[i] = pst; + pst++; + break; + + case FFI_TYPE_FLOAT: +#if _CALL_ELF != 2 + do_float: +#endif + if (pfr < end_pfr && i < nfixedargs) + { + /* Float values are stored as doubles in the + ffi_closure_LINUX64 code. Fix them here. */ + pfr->f = (float) pfr->d; + avalue[i] = pfr; + pfr++; + } + else + { +#ifndef __LITTLE_ENDIAN__ + avalue[i] = (char *) pst + 4; +#else + avalue[i] = pst; +#endif + } + pst++; + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_LINUX64 how to perform return type promotions. */ + if ((cif->flags & FLAG_RETURNS_SMST) != 0) + { + if ((cif->flags & (FLAG_RETURNS_FP | FLAG_RETURNS_VEC)) == 0) + return FFI_V2_TYPE_SMALL_STRUCT + cif->rtype->size - 1; + else if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR_HOMOG; + else if ((cif->flags & FLAG_RETURNS_64BITS) != 0) + return FFI_V2_TYPE_DOUBLE_HOMOG; + else + return FFI_V2_TYPE_FLOAT_HOMOG; + } + if ((cif->flags & FLAG_RETURNS_VEC) != 0) + return FFI_V2_TYPE_VECTOR; + return cif->rtype->type; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h new file mode 100644 index 0000000..960a5c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_powerpc.h @@ -0,0 +1,105 @@ +/* ----------------------------------------------------------------------- + ffi_powerpc.h - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +enum { + /* The assembly depends on these exact flags. */ + /* These go in cr7 */ + FLAG_RETURNS_SMST = 1 << (31-31), /* Used for FFI_SYSV small structs. */ + FLAG_RETURNS_NOTHING = 1 << (31-30), + FLAG_RETURNS_FP = 1 << (31-29), + FLAG_RETURNS_VEC = 1 << (31-28), + + /* These go in cr6 */ + FLAG_RETURNS_64BITS = 1 << (31-27), + FLAG_RETURNS_128BITS = 1 << (31-26), + + FLAG_COMPAT = 1 << (31- 8), /* Not used by assembly */ + + /* These go in cr1 */ + FLAG_ARG_NEEDS_COPY = 1 << (31- 7), /* Used by sysv code */ + FLAG_ARG_NEEDS_PSAVE = FLAG_ARG_NEEDS_COPY, /* Used by linux64 code */ + FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */ + FLAG_4_GPR_ARGUMENTS = 1 << (31- 5), + FLAG_RETVAL_REFERENCE = 1 << (31- 4), + FLAG_VEC_ARGUMENTS = 1 << (31- 3), +}; + +typedef union +{ + float f; + double d; +} ffi_dblfl; + +#if defined(__FLOAT128_TYPE__) && defined(__HAVE_FLOAT128) +typedef _Float128 float128; +#elif defined(__FLOAT128__) +typedef __float128 float128; +#else +typedef char float128[16] __attribute__((aligned(16))); +#endif + +void FFI_HIDDEN ffi_closure_SYSV (void); +void FFI_HIDDEN ffi_go_closure_sysv (void); +void FFI_HIDDEN ffi_call_SYSV(extended_cif *, void (*)(void), void *, + unsigned, void *, int); + +void FFI_HIDDEN ffi_prep_types_sysv (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_sysv (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_sysv (ffi_closure *, + ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_SYSV (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, unsigned long *, + ffi_dblfl *, unsigned long *); + +void FFI_HIDDEN ffi_call_LINUX64(extended_cif *, void (*) (void), void *, + unsigned long, void *, long); +void FFI_HIDDEN ffi_closure_LINUX64 (void); +void FFI_HIDDEN ffi_go_closure_linux64 (void); + +void FFI_HIDDEN ffi_prep_types_linux64 (ffi_abi); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64 (ffi_cif *); +ffi_status FFI_HIDDEN ffi_prep_cif_linux64_var (ffi_cif *, unsigned int, + unsigned int); +void FFI_HIDDEN ffi_prep_args64 (extended_cif *, unsigned long *const); +ffi_status FFI_HIDDEN ffi_prep_closure_loc_linux64 (ffi_closure *, ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *); +int FFI_HIDDEN ffi_closure_helper_LINUX64 (ffi_cif *, + void (*) (ffi_cif *, void *, + void **, void *), + void *, void *, + unsigned long *, ffi_dblfl *, + float128 *); diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c new file mode 100644 index 0000000..4078e75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffi_sysv.c @@ -0,0 +1,923 @@ +/* ----------------------------------------------------------------------- + ffi_sysv.c - Copyright (C) 2013 IBM + Copyright (C) 2011 Anthony Green + Copyright (C) 2011 Kyle Moffett + Copyright (C) 2008 Red Hat, Inc + Copyright (C) 2007, 2008 Free Software Foundation, Inc + Copyright (c) 1998 Geoffrey Keating + + PowerPC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include "ffi.h" + +#ifndef POWERPC64 +#include "ffi_common.h" +#include "ffi_powerpc.h" + + +/* About the SYSV ABI. */ +#define ASM_NEEDS_REGISTERS 6 +#define NUM_GPR_ARG_REGISTERS 8 +#define NUM_FPR_ARG_REGISTERS 8 + + +#if HAVE_LONG_DOUBLE_VARIANT && FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +/* Adjust size of ffi_type_longdouble. */ +void FFI_HIDDEN +ffi_prep_types_sysv (ffi_abi abi) +{ + if ((abi & (FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128)) == FFI_SYSV) + { + ffi_type_longdouble.size = 8; + ffi_type_longdouble.alignment = 8; + } + else + { + ffi_type_longdouble.size = 16; + ffi_type_longdouble.alignment = 16; + } +} +#endif + +/* Transform long double, double and float to other types as per abi. */ +static int +translate_float (int abi, int type) +{ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + if (type == FFI_TYPE_LONGDOUBLE + && (abi & FFI_SYSV_LONG_DOUBLE_128) == 0) + type = FFI_TYPE_DOUBLE; +#endif + if ((abi & FFI_SYSV_SOFT_FLOAT) != 0) + { + if (type == FFI_TYPE_FLOAT) + type = FFI_TYPE_UINT32; + else if (type == FFI_TYPE_DOUBLE) + type = FFI_TYPE_UINT64; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + else if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_UINT128; + } + else if ((abi & FFI_SYSV_IBM_LONG_DOUBLE) == 0) + { + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + } + return type; +} + +/* Perform machine dependent cif processing */ +static ffi_status +ffi_prep_cif_sysv_core (ffi_cif *cif) +{ + ffi_type **ptr; + unsigned bytes; + unsigned i, fpr_count = 0, gpr_count = 0, stack_count = 0; + unsigned flags = cif->flags; + unsigned struct_copy_size = 0; + unsigned type = cif->rtype->type; + unsigned size = cif->rtype->size; + + /* The machine-independent calculation of cif->bytes doesn't work + for us. Redo the calculation. */ + + /* Space for the frame pointer, callee's LR, and the asm's temp regs. */ + bytes = (2 + ASM_NEEDS_REGISTERS) * sizeof (int); + + /* Space for the GPR registers. */ + bytes += NUM_GPR_ARG_REGISTERS * sizeof (int); + + /* Return value handling. The rules for SYSV are as follows: + - 32-bit (or less) integer values are returned in gpr3; + - Structures of size <= 4 bytes also returned in gpr3; + - 64-bit integer values and structures between 5 and 8 bytes are returned + in gpr3 and gpr4; + - Larger structures are allocated space and a pointer is passed as + the first argument. + - Single/double FP values are returned in fpr1; + - long doubles (if not equivalent to double) are returned in + fpr1,fpr2 for Linux and as for large structs for SysV. */ + + type = translate_float (cif->abi, type); + + switch (type) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ +#endif + case FFI_TYPE_DOUBLE: + flags |= FLAG_RETURNS_64BITS; + /* Fall through. */ + case FFI_TYPE_FLOAT: + flags |= FLAG_RETURNS_FP; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + flags |= FLAG_RETURNS_128BITS; + /* Fall through. */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags |= FLAG_RETURNS_64BITS; + break; + + case FFI_TYPE_STRUCT: + /* The final SYSV ABI says that structures smaller or equal 8 bytes + are returned in r3/r4. A draft ABI used by linux instead + returns them in memory. */ + if ((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + { + flags |= FLAG_RETURNS_SMST; + break; + } + gpr_count++; + flags |= FLAG_RETVAL_REFERENCE; + /* Fall through. */ + case FFI_TYPE_VOID: + flags |= FLAG_RETURNS_NOTHING; + break; + + default: + /* Returns 32-bit integer, or similar. Nothing to do here. */ + break; + } + + /* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the + first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest + goes on the stack. Structures and long doubles (if not equivalent + to double) are passed as a pointer to a copy of the structure. + Stuff on the stack needs to keep proper alignment. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + unsigned short typenum = (*ptr)->type; + + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS - 1) + { + fpr_count = NUM_FPR_ARG_REGISTERS; + /* 8-byte align long doubles. */ + stack_count += stack_count & 1; + stack_count += 4; + } + else + fpr_count += 2; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; +#endif + + case FFI_TYPE_DOUBLE: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + { + /* 8-byte align doubles. */ + stack_count += stack_count & 1; + stack_count += 2; + } + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_FLOAT: + if (fpr_count >= NUM_FPR_ARG_REGISTERS) + /* Yes, we don't follow the ABI, but neither does gcc. */ + stack_count += 1; + else + fpr_count += 1; +#ifdef __NO_FPRS__ + return FFI_BAD_ABI; +#endif + break; + + case FFI_TYPE_UINT128: + /* A long double in FFI_LINUX_SOFT_FLOAT can use only a set + of four consecutive gprs. If we do not have enough, we + have to adjust the gpr_count value. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS - 3) + gpr_count = NUM_GPR_ARG_REGISTERS; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 4; + else + gpr_count += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + /* 'long long' arguments are passed as two words, but + either both words must fit in registers or both go + on the stack. If they go on the stack, they must + be 8-byte-aligned. + + Also, only certain register pairs can be used for + passing long long int -- specifically (r3,r4), (r5,r6), + (r7,r8), (r9,r10). */ + gpr_count += gpr_count & 1; + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + { + stack_count += stack_count & 1; + stack_count += 2; + } + else + gpr_count += 2; + break; + + case FFI_TYPE_STRUCT: + /* We must allocate space for a copy of these to enforce + pass-by-value. Pad the space up to a multiple of 16 + bytes (the maximum alignment required for anything under + the SYSV ABI). */ + struct_copy_size += ((*ptr)->size + 15) & ~0xF; + /* Fall through (allocate space for the pointer). */ + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* Everything else is passed as a 4-byte word in a GPR, either + the object itself or a pointer to it. */ + if (gpr_count >= NUM_GPR_ARG_REGISTERS) + stack_count += 1; + else + gpr_count += 1; + break; + + default: + FFI_ASSERT (0); + } + } + + if (fpr_count != 0) + flags |= FLAG_FP_ARGUMENTS; + if (gpr_count > 4) + flags |= FLAG_4_GPR_ARGUMENTS; + if (struct_copy_size != 0) + flags |= FLAG_ARG_NEEDS_COPY; + + /* Space for the FPR registers, if needed. */ + if (fpr_count != 0) + bytes += NUM_FPR_ARG_REGISTERS * sizeof (double); + + /* Stack space. */ + bytes += stack_count * sizeof (int); + + /* The stack space allocated needs to be a multiple of 16 bytes. */ + bytes = (bytes + 15) & ~0xF; + + /* Add in the space for the copied structures. */ + bytes += struct_copy_size; + + cif->flags = flags; + cif->bytes = bytes; + + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_sysv (ffi_cif *cif) +{ + if ((cif->abi & FFI_SYSV) == 0) + { + /* This call is from old code. Translate to new ABI values. */ + cif->flags |= FLAG_COMPAT; + switch (cif->abi) + { + default: + return FFI_BAD_ABI; + + case FFI_COMPAT_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_STRUCT_RET | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_GCC_SYSV: + cif->abi = FFI_SYSV | FFI_SYSV_LONG_DOUBLE_128; + break; + + case FFI_COMPAT_LINUX: + cif->abi = (FFI_SYSV | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + + case FFI_COMPAT_LINUX_SOFT_FLOAT: + cif->abi = (FFI_SYSV | FFI_SYSV_SOFT_FLOAT | FFI_SYSV_IBM_LONG_DOUBLE + | FFI_SYSV_LONG_DOUBLE_128); + break; + } + } + return ffi_prep_cif_sysv_core (cif); +} + +/* ffi_prep_args_SYSV is called by the assembly routine once stack space + has been allocated for the function's arguments. + + The stack layout we want looks like this: + + | Return address from ffi_call_SYSV 4bytes | higher addresses + |--------------------------------------------| + | Previous backchain pointer 4 | stack pointer here + |--------------------------------------------|<+ <<< on entry to + | Saved r28-r31 4*4 | | ffi_call_SYSV + |--------------------------------------------| | + | GPR registers r3-r10 8*4 | | ffi_call_SYSV + |--------------------------------------------| | + | FPR registers f1-f8 (optional) 8*8 | | + |--------------------------------------------| | stack | + | Space for copied structures | | grows | + |--------------------------------------------| | down V + | Parameters that didn't fit in registers | | + |--------------------------------------------| | lower addresses + | Space for callee's LR 4 | | + |--------------------------------------------| | stack pointer here + | Current backchain pointer 4 |-/ during + |--------------------------------------------| <<< ffi_call_SYSV + +*/ + +void FFI_HIDDEN +ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack) +{ + const unsigned bytes = ecif->cif->bytes; + const unsigned flags = ecif->cif->flags; + + typedef union + { + char *c; + unsigned *u; + long long *ll; + float *f; + double *d; + } valp; + + /* 'stacktop' points at the previous backchain pointer. */ + valp stacktop; + + /* 'gpr_base' points at the space for gpr3, and grows upwards as + we use GPR registers. */ + valp gpr_base; + valp gpr_end; + +#ifndef __NO_FPRS__ + /* 'fpr_base' points at the space for fpr1, and grows upwards as + we use FPR registers. */ + valp fpr_base; + valp fpr_end; +#endif + + /* 'copy_space' grows down as we put structures in it. It should + stay 16-byte aligned. */ + valp copy_space; + + /* 'next_arg' grows up as we put parameters in it. */ + valp next_arg; + + int i; + ffi_type **ptr; +#ifndef __NO_FPRS__ + double double_tmp; +#endif + union + { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **ui; + long long **ll; + float **f; + double **d; + } p_argv; + size_t struct_copy_size; + unsigned gprvalue; + + stacktop.c = (char *) stack + bytes; + gpr_end.u = stacktop.u - ASM_NEEDS_REGISTERS; + gpr_base.u = gpr_end.u - NUM_GPR_ARG_REGISTERS; +#ifndef __NO_FPRS__ + fpr_end.d = gpr_base.d; + fpr_base.d = fpr_end.d - NUM_FPR_ARG_REGISTERS; + copy_space.c = ((flags & FLAG_FP_ARGUMENTS) ? fpr_base.c : gpr_base.c); +#else + copy_space.c = gpr_base.c; +#endif + next_arg.u = stack + 2; + + /* Check that everything starts aligned properly. */ + FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0); + FFI_ASSERT (((unsigned long) copy_space.c & 0xF) == 0); + FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0); + FFI_ASSERT ((bytes & 0xF) == 0); + FFI_ASSERT (copy_space.c >= next_arg.c); + + /* Deal with return values that are actually pass-by-reference. */ + if (flags & FLAG_RETVAL_REFERENCE) + *gpr_base.u++ = (unsigned) (char *) ecif->rvalue; + + /* Now for the arguments. */ + p_argv.v = ecif->avalue; + for (ptr = ecif->cif->arg_types, i = ecif->cif->nargs; + i > 0; + i--, ptr++, p_argv.v++) + { + unsigned int typenum = (*ptr)->type; + + typenum = translate_float (ecif->cif->abi, typenum); + + /* Now test the translated value */ + switch (typenum) + { +#ifndef __NO_FPRS__ +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + double_tmp = (*p_argv.d)[0]; + + if (fpr_base.d >= fpr_end.d - 1) + { + fpr_base.d = fpr_end.d; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + double_tmp = (*p_argv.d)[1]; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + { + *fpr_base.d++ = double_tmp; + double_tmp = (*p_argv.d)[1]; + *fpr_base.d++ = double_tmp; + } + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +# endif + case FFI_TYPE_DOUBLE: + double_tmp = **p_argv.d; + + if (fpr_base.d >= fpr_end.d) + { + if (((next_arg.u - stack) & 1) != 0) + next_arg.u += 1; + *next_arg.d = double_tmp; + next_arg.u += 2; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; + + case FFI_TYPE_FLOAT: + double_tmp = **p_argv.f; + if (fpr_base.d >= fpr_end.d) + { + *next_arg.f = (float) double_tmp; + next_arg.u += 1; + } + else + *fpr_base.d++ = double_tmp; + FFI_ASSERT (flags & FLAG_FP_ARGUMENTS); + break; +#endif /* have FPRs */ + + case FFI_TYPE_UINT128: + /* The soft float ABI for long doubles works like this, a long double + is passed in four consecutive GPRs if available. A maximum of 2 + long doubles can be passed in gprs. If we do not have 4 GPRs + left, the long double is passed on the stack, 4-byte aligned. */ + if (gpr_base.u >= gpr_end.u - 3) + { + unsigned int ii; + gpr_base.u = gpr_end.u; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *next_arg.u++ = int_tmp; + } + } + else + { + unsigned int ii; + for (ii = 0; ii < 4; ii++) + { + unsigned int int_tmp = (*p_argv.ui)[ii]; + *gpr_base.u++ = int_tmp; + } + } + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (gpr_base.u >= gpr_end.u - 1) + { + gpr_base.u = gpr_end.u; + if (((next_arg.u - stack) & 1) != 0) + next_arg.u++; + *next_arg.ll = **p_argv.ll; + next_arg.u += 2; + } + else + { + /* The abi states only certain register pairs can be + used for passing long long int specifically (r3,r4), + (r5,r6), (r7,r8), (r9,r10). If next arg is long long + but not correct starting register of pair then skip + until the proper starting register. */ + if (((gpr_end.u - gpr_base.u) & 1) != 0) + gpr_base.u++; + *gpr_base.ll++ = **p_argv.ll; + } + break; + + case FFI_TYPE_STRUCT: + struct_copy_size = ((*ptr)->size + 15) & ~0xF; + copy_space.c -= struct_copy_size; + memcpy (copy_space.c, *p_argv.c, (*ptr)->size); + + gprvalue = (unsigned long) copy_space.c; + + FFI_ASSERT (copy_space.c > next_arg.c); + FFI_ASSERT (flags & FLAG_ARG_NEEDS_COPY); + goto putgpr; + + case FFI_TYPE_UINT8: + gprvalue = **p_argv.uc; + goto putgpr; + case FFI_TYPE_SINT8: + gprvalue = **p_argv.sc; + goto putgpr; + case FFI_TYPE_UINT16: + gprvalue = **p_argv.us; + goto putgpr; + case FFI_TYPE_SINT16: + gprvalue = **p_argv.ss; + goto putgpr; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + + gprvalue = **p_argv.ui; + + putgpr: + if (gpr_base.u >= gpr_end.u) + *next_arg.u++ = gprvalue; + else + *gpr_base.u++ = gprvalue; + break; + } + } + + /* Check that we didn't overrun the stack... */ + FFI_ASSERT (copy_space.c >= next_arg.c); + FFI_ASSERT (gpr_base.u <= gpr_end.u); +#ifndef __NO_FPRS__ + FFI_ASSERT (fpr_base.u <= fpr_end.u); +#endif + FFI_ASSERT (((flags & FLAG_4_GPR_ARGUMENTS) != 0) + == (gpr_end.u - gpr_base.u < 4)); +} + +#define MIN_CACHE_LINE_SIZE 8 + +static void +flush_icache (char *wraddr, char *xaddr, int size) +{ + int i; + for (i = 0; i < size; i += MIN_CACHE_LINE_SIZE) + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" + : : "r" (xaddr + i), "r" (wraddr + i) : "memory"); + __asm__ volatile ("icbi 0,%0;" "dcbf 0,%1;" "sync;" "isync;" + : : "r"(xaddr + size - 1), "r"(wraddr + size - 1) + : "memory"); +} + +ffi_status FFI_HIDDEN +ffi_prep_closure_loc_sysv (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi < FFI_SYSV || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + tramp[0] = 0x7c0802a6; /* mflr r0 */ + tramp[1] = 0x429f0005; /* bcl 20,31,.+4 */ + tramp[2] = 0x7d6802a6; /* mflr r11 */ + tramp[3] = 0x7c0803a6; /* mtlr r0 */ + tramp[4] = 0x800b0018; /* lwz r0,24(r11) */ + tramp[5] = 0x816b001c; /* lwz r11,28(r11) */ + tramp[6] = 0x7c0903a6; /* mtctr r0 */ + tramp[7] = 0x4e800420; /* bctr */ + *(void **) &tramp[8] = (void *) ffi_closure_SYSV; /* function */ + *(void **) &tramp[9] = codeloc; /* context */ + + /* Flush the icache. */ + flush_icache ((char *)tramp, (char *)codeloc, 8 * 4); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + entry, r11 holds the address of the closure. + After storing the registers that could possibly contain + parameters to be passed into the stack frame and setting + up space for a return value, ffi_closure_SYSV invokes the + following helper function to do most of the work. */ + +int +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *rvalue, + unsigned long *pgr, + ffi_dblfl *pfr, + unsigned long *pst) +{ + /* rvalue is the pointer to space for return value in closure assembly */ + /* pgr is the pointer to where r3-r10 are stored in ffi_closure_SYSV */ + /* pfr is the pointer to where f1-f8 are stored in ffi_closure_SYSV */ + /* pst is the pointer to outgoing parameter stack in original caller */ + + void ** avalue; + ffi_type ** arg_types; + long i, avn; +#ifndef __NO_FPRS__ + long nf = 0; /* number of floating registers already used */ +#endif + long ng = 0; /* number of general registers already used */ + + unsigned size = cif->rtype->size; + unsigned short rtypenum = cif->rtype->type; + + avalue = alloca (cif->nargs * sizeof (void *)); + + /* First translate for softfloat/nonlinux */ + rtypenum = translate_float (cif->abi, rtypenum); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. + For FFI_SYSV the result is passed in r3/r4 if the struct size is less + or equal 8 bytes. */ + if (rtypenum == FFI_TYPE_STRUCT + && !((cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8)) + { + rvalue = (void *) *pgr; + ng++; + pgr++; + } + + i = 0; + avn = cif->nargs; + arg_types = cif->arg_types; + + /* Grab the addresses of the arguments from the stack frame. */ + while (i < avn) { + unsigned short typenum = arg_types[i]->type; + + /* We may need to handle some values depending on ABI. */ + typenum = translate_float (cif->abi, typenum); + + switch (typenum) + { +#ifndef __NO_FPRS__ + case FFI_TYPE_FLOAT: + /* Unfortunately float values are stored as doubles + in the ffi_closure_SYSV code (since we don't check + the type in that routine). */ + if (nf < NUM_FPR_ARG_REGISTERS) + { + /* FIXME? here we are really changing the values + stored in the original calling routines outgoing + parameter stack. This is probably a really + naughty thing to do but... */ + double temp = pfr->d; + pfr->f = (float) temp; + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + avalue[i] = pst; + pst += 1; + } + break; + + case FFI_TYPE_DOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS) + { + avalue[i] = pfr; + nf++; + pfr++; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + } + break; + +# if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + if (nf < NUM_FPR_ARG_REGISTERS - 1) + { + avalue[i] = pfr; + pfr += 2; + nf += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 4; + nf = 8; + } + break; +# endif +#endif + + case FFI_TYPE_UINT128: + /* Test if for the whole long double, 4 gprs are available. + otherwise the stuff ends up on the stack. */ + if (ng < NUM_GPR_ARG_REGISTERS - 3) + { + avalue[i] = pgr; + pgr += 4; + ng += 4; + } + else + { + avalue[i] = pst; + pst += 4; + ng = 8+4; + } + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 3; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 3; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: +#ifndef __LITTLE_ENDIAN__ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (char *) pgr + 2; + ng++; + pgr++; + } + else + { + avalue[i] = (char *) pst + 2; + pst++; + } + break; +#endif + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = pgr; + ng++; + pgr++; + } + else + { + avalue[i] = pst; + pst++; + } + break; + + case FFI_TYPE_STRUCT: + /* Structs are passed by reference. The address will appear in a + gpr if it is one of the first 8 arguments. */ + if (ng < NUM_GPR_ARG_REGISTERS) + { + avalue[i] = (void *) *pgr; + ng++; + pgr++; + } + else + { + avalue[i] = (void *) *pst; + pst++; + } + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + /* Passing long long ints are complex, they must + be passed in suitable register pairs such as + (r3,r4) or (r5,r6) or (r6,r7), or (r7,r8) or (r9,r10) + and if the entire pair aren't available then the outgoing + parameter stack is used for both but an alignment of 8 + must will be kept. So we must either look in pgr + or pst to find the correct address for this type + of parameter. */ + if (ng < NUM_GPR_ARG_REGISTERS - 1) + { + if (ng & 1) + { + /* skip r4, r6, r8 as starting points */ + ng++; + pgr++; + } + avalue[i] = pgr; + ng += 2; + pgr += 2; + } + else + { + if (((long) pst) & 4) + pst++; + avalue[i] = pst; + pst += 2; + ng = NUM_GPR_ARG_REGISTERS; + } + break; + + default: + FFI_ASSERT (0); + } + + i++; + } + + (*fun) (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. + Because the FFI_SYSV ABI returns the structures <= 8 bytes in + r3/r4 we have to tell ffi_closure_SYSV how to treat them. We + combine the base type FFI_SYSV_TYPE_SMALL_STRUCT with the size of + the struct less one. We never have a struct with size zero. + See the comment in ffitarget.h about ordering. */ + if (rtypenum == FFI_TYPE_STRUCT + && (cif->abi & FFI_SYSV_STRUCT_RET) != 0 && size <= 8) + return FFI_SYSV_TYPE_SMALL_STRUCT - 1 + size; + return rtypenum; +} +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffitarget.h new file mode 100644 index 0000000..7fb9a93 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ffitarget.h @@ -0,0 +1,204 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for PowerPC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined (POWERPC) && defined (__powerpc64__) /* linux64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#ifndef POWERPC_DARWIN64 +#define POWERPC_DARWIN64 +#endif +#elif defined (POWERPC_AIX) && defined (__64BIT__) /* AIX64 */ +#ifndef POWERPC64 +#define POWERPC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + +#if defined (POWERPC_AIX) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_AIX, + FFI_LAST_ABI + +#elif defined (POWERPC_DARWIN) + FFI_AIX, + FFI_DARWIN, + FFI_DEFAULT_ABI = FFI_DARWIN, + FFI_LAST_ABI + +#else + /* The FFI_COMPAT values are used by old code. Since libffi may be + a shared library we have to support old values for backwards + compatibility. */ + FFI_COMPAT_SYSV, + FFI_COMPAT_GCC_SYSV, + FFI_COMPAT_LINUX64, + FFI_COMPAT_LINUX, + FFI_COMPAT_LINUX_SOFT_FLOAT, + +# if defined (POWERPC64) + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 64-bit linux. We + only need worry about FFI_COMPAT_LINUX64, but to be safe avoid + all old values. */ + FFI_LINUX = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_LINUX_STRUCT_ALIGN = 1, + FFI_LINUX_LONG_DOUBLE_128 = 2, + FFI_LINUX_LONG_DOUBLE_IEEE128 = 4, + FFI_DEFAULT_ABI = (FFI_LINUX +# ifdef __STRUCT_PARM_ALIGN__ + | FFI_LINUX_STRUCT_ALIGN +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_LINUX_LONG_DOUBLE_128 +# ifdef __LONG_DOUBLE_IEEE128__ + | FFI_LINUX_LONG_DOUBLE_IEEE128 +# endif +# endif + ), + FFI_LAST_ABI = 16 + +# else + /* This bit, always set in new code, must not be set in any of the + old FFI_COMPAT values that might be used for 32-bit linux/sysv/bsd. */ + FFI_SYSV = 8, + /* This and following bits can reuse FFI_COMPAT values. */ + FFI_SYSV_SOFT_FLOAT = 1, + FFI_SYSV_STRUCT_RET = 2, + FFI_SYSV_IBM_LONG_DOUBLE = 4, + FFI_SYSV_LONG_DOUBLE_128 = 16, + + FFI_DEFAULT_ABI = (FFI_SYSV +# ifdef __NO_FPRS__ + | FFI_SYSV_SOFT_FLOAT +# endif +# if (defined (__SVR4_STRUCT_RETURN) \ + || defined (POWERPC_FREEBSD) && !defined (__AIX_STRUCT_RETURN)) + | FFI_SYSV_STRUCT_RET +# endif +# if __LDBL_MANT_DIG__ == 106 + | FFI_SYSV_IBM_LONG_DOUBLE +# endif +# ifdef __LONG_DOUBLE_128__ + | FFI_SYSV_LONG_DOUBLE_128 +# endif + ), + FFI_LAST_ABI = 32 +# endif +#endif + +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#if defined (POWERPC) || defined (POWERPC_FREEBSD) +# define FFI_GO_CLOSURES 1 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned nfixedargs +#endif +#if defined (POWERPC_AIX) +# define FFI_GO_CLOSURES 1 +#endif + +/* ppc_closure.S and linux64_closure.S expect this. */ +#define FFI_PPC_TYPE_LAST FFI_TYPE_POINTER + +/* We define additional types below. If generic types are added that + must be supported by powerpc libffi then it is likely that + FFI_PPC_TYPE_LAST needs increasing *and* the jump tables in + ppc_closure.S and linux64_closure.S be extended. */ + +#if !(FFI_TYPE_LAST == FFI_PPC_TYPE_LAST \ + || (FFI_TYPE_LAST == FFI_TYPE_COMPLEX \ + && !defined FFI_TARGET_HAS_COMPLEX_TYPE)) +# error "You likely have a broken powerpc libffi" +#endif + +/* Needed for soft-float long-double-128 support. */ +#define FFI_TYPE_UINT128 (FFI_PPC_TYPE_LAST + 1) + +/* Needed for FFI_SYSV small structure returns. */ +#define FFI_SYSV_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 2) + +/* Used by ELFv2 for homogenous structure returns. */ +#define FFI_V2_TYPE_VECTOR (FFI_PPC_TYPE_LAST + 1) +#define FFI_V2_TYPE_VECTOR_HOMOG (FFI_PPC_TYPE_LAST + 2) +#define FFI_V2_TYPE_FLOAT_HOMOG (FFI_PPC_TYPE_LAST + 3) +#define FFI_V2_TYPE_DOUBLE_HOMOG (FFI_PPC_TYPE_LAST + 4) +#define FFI_V2_TYPE_SMALL_STRUCT (FFI_PPC_TYPE_LAST + 5) + +#if _CALL_ELF == 2 +# define FFI_TRAMPOLINE_SIZE 32 +#else +# if defined(POWERPC64) || defined(POWERPC_AIX) +# if defined(POWERPC_DARWIN64) +# define FFI_TRAMPOLINE_SIZE 48 +# else +# define FFI_TRAMPOLINE_SIZE 24 +# endif +# else /* POWERPC || POWERPC_AIX */ +# define FFI_TRAMPOLINE_SIZE 40 +# endif +#endif + +#ifndef LIBFFI_ASM +#if defined(POWERPC_DARWIN) || defined(POWERPC_AIX) +struct ffi_aix_trampoline_struct { + void * code_pointer; /* Pointer to ffi_closure_ASM */ + void * toc; /* TOC */ + void * static_chain; /* Pointer to closure */ +}; +#endif +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64.S new file mode 100644 index 0000000..e92d64a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64.S @@ -0,0 +1,291 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#ifdef POWERPC64 + .hidden ffi_call_LINUX64 + .globl ffi_call_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_call_LINUX64: +# ifndef __PCREL__ + addis %r2, %r12, .TOC.-ffi_call_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_call_LINUX64@l +# endif + .localentry ffi_call_LINUX64, . - ffi_call_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_call_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_call_LINUX64,.TOC.@tocbase,0 + .type ffi_call_LINUX64,@function + .text +.L.ffi_call_LINUX64: +# else + .hidden .ffi_call_LINUX64 + .globl .ffi_call_LINUX64 + .quad .ffi_call_LINUX64,.TOC.@tocbase,0 + .size ffi_call_LINUX64,24 + .type .ffi_call_LINUX64,@function + .text +.ffi_call_LINUX64: +# endif +# endif + mflr %r0 + std %r28, -32(%r1) + std %r29, -24(%r1) + std %r30, -16(%r1) + std %r31, -8(%r1) + std %r7, 8(%r1) /* closure, saved in cr field. */ + std %r0, 16(%r1) + + mr %r28, %r1 /* our AP. */ + .cfi_def_cfa_register 28 + .cfi_offset 65, 16 + .cfi_offset 31, -8 + .cfi_offset 30, -16 + .cfi_offset 29, -24 + .cfi_offset 28, -32 + + stdux %r1, %r1, %r8 + mr %r31, %r6 /* flags, */ + mr %r30, %r5 /* rvalue, */ + mr %r29, %r4 /* function address. */ +/* Save toc pointer, not for the ffi_prep_args64 call, but for the later + bctrl function call. */ +# if _CALL_ELF == 2 + std %r2, 24(%r1) +# else + std %r2, 40(%r1) +# endif + + /* Call ffi_prep_args64. */ + mr %r4, %r1 +# if defined _CALL_LINUX || _CALL_ELF == 2 +# ifdef __PCREL__ + bl ffi_prep_args64@notoc +# else + bl ffi_prep_args64 + nop +# endif +# else + bl .ffi_prep_args64 + nop +# endif + +# if _CALL_ELF == 2 + mr %r12, %r29 +# else + ld %r12, 0(%r29) + ld %r2, 8(%r29) +# endif + /* Now do the call. */ + /* Set up cr1 with bits 3-7 of the flags. */ + mtcrf 0xc0, %r31 + + /* Get the address to call into CTR. */ + mtctr %r12 + /* Load all those argument registers. */ + addi %r29, %r28, -32-(8*8) + ld %r3, (0*8)(%r29) + ld %r4, (1*8)(%r29) + ld %r5, (2*8)(%r29) + ld %r6, (3*8)(%r29) + bf- 5, 1f + ld %r7, (4*8)(%r29) + ld %r8, (5*8)(%r29) + ld %r9, (6*8)(%r29) + ld %r10, (7*8)(%r29) +1: + + /* Load all the FP registers. */ + bf- 6, 2f + addi %r29, %r29, -(14*8) + lfd %f1, ( 1*8)(%r29) + lfd %f2, ( 2*8)(%r29) + lfd %f3, ( 3*8)(%r29) + lfd %f4, ( 4*8)(%r29) + lfd %f5, ( 5*8)(%r29) + lfd %f6, ( 6*8)(%r29) + lfd %f7, ( 7*8)(%r29) + lfd %f8, ( 8*8)(%r29) + lfd %f9, ( 9*8)(%r29) + lfd %f10, (10*8)(%r29) + lfd %f11, (11*8)(%r29) + lfd %f12, (12*8)(%r29) + lfd %f13, (13*8)(%r29) +2: + + /* Load all the vector registers. */ + bf- 3, 3f + addi %r29, %r29, -16 + lvx %v13, 0, %r29 + addi %r29, %r29, -16 + lvx %v12, 0, %r29 + addi %r29, %r29, -16 + lvx %v11, 0, %r29 + addi %r29, %r29, -16 + lvx %v10, 0, %r29 + addi %r29, %r29, -16 + lvx %v9, 0, %r29 + addi %r29, %r29, -16 + lvx %v8, 0, %r29 + addi %r29, %r29, -16 + lvx %v7, 0, %r29 + addi %r29, %r29, -16 + lvx %v6, 0, %r29 + addi %r29, %r29, -16 + lvx %v5, 0, %r29 + addi %r29, %r29, -16 + lvx %v4, 0, %r29 + addi %r29, %r29, -16 + lvx %v3, 0, %r29 + addi %r29, %r29, -16 + lvx %v2, 0, %r29 +3: + + /* Make the call. */ + ld %r11, 8(%r28) + bctrl + + /* This must follow the call immediately, the unwinder + uses this to find out if r2 has been saved or not. */ +# if _CALL_ELF == 2 + ld %r2, 24(%r1) +# else + ld %r2, 40(%r1) +# endif + + /* Now, deal with the return value. */ + mtcrf 0x01, %r31 + bt 31, .Lstruct_return_value + bt 30, .Ldone_return_value + bt 29, .Lfp_return_value + bt 28, .Lvec_return_value + std %r3, 0(%r30) + /* Fall through... */ + +.Ldone_return_value: + /* Restore the registers we used and return. */ + mr %r1, %r28 + .cfi_def_cfa_register 1 + ld %r0, 16(%r28) + ld %r28, -32(%r28) + mtlr %r0 + ld %r29, -24(%r1) + ld %r30, -16(%r1) + ld %r31, -8(%r1) + blr + +.Lvec_return_value: + stvx %v2, 0, %r30 + b .Ldone_return_value + +.Lfp_return_value: + .cfi_def_cfa_register 28 + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_return_value + stfd %f1, 0(%r30) + bf 26, .Ldone_return_value + stfd %f2, 8(%r30) + b .Ldone_return_value +.Lfloat_return_value: + stfs %f1, 0(%r30) + b .Ldone_return_value + +.Lstruct_return_value: + bf 29, .Lvec_homog_or_small_struct + mtcrf 0x02, %r31 /* cr6 */ + bf 27, .Lfloat_homog_return_value + stfd %f1, 0(%r30) + stfd %f2, 8(%r30) + stfd %f3, 16(%r30) + stfd %f4, 24(%r30) + stfd %f5, 32(%r30) + stfd %f6, 40(%r30) + stfd %f7, 48(%r30) + stfd %f8, 56(%r30) + b .Ldone_return_value + +.Lfloat_homog_return_value: + stfs %f1, 0(%r30) + stfs %f2, 4(%r30) + stfs %f3, 8(%r30) + stfs %f4, 12(%r30) + stfs %f5, 16(%r30) + stfs %f6, 20(%r30) + stfs %f7, 24(%r30) + stfs %f8, 28(%r30) + b .Ldone_return_value + +.Lvec_homog_or_small_struct: + bf 28, .Lsmall_struct + stvx %v2, 0, %r30 + addi %r30, %r30, 16 + stvx %v3, 0, %r30 + addi %r30, %r30, 16 + stvx %v4, 0, %r30 + addi %r30, %r30, 16 + stvx %v5, 0, %r30 + addi %r30, %r30, 16 + stvx %v6, 0, %r30 + addi %r30, %r30, 16 + stvx %v7, 0, %r30 + addi %r30, %r30, 16 + stvx %v8, 0, %r30 + addi %r30, %r30, 16 + stvx %v9, 0, %r30 + b .Ldone_return_value + +.Lsmall_struct: + std %r3, 0(%r30) + std %r4, 8(%r30) + b .Ldone_return_value + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_call_LINUX64,.-ffi_call_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_call_LINUX64,.-.L.ffi_call_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,4,0,0 + .size .ffi_call_LINUX64,.-.ffi_call_LINUX64 +# endif +# endif + +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64_closure.S new file mode 100644 index 0000000..3469a2c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/linux64_closure.S @@ -0,0 +1,564 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC64 Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include + + .file "linux64_closure.S" + +#ifdef POWERPC64 + FFI_HIDDEN (ffi_closure_LINUX64) + .globl ffi_closure_LINUX64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_closure_LINUX64: +# ifndef __PCREL__ + addis %r2, %r12, .TOC.-ffi_closure_LINUX64@ha + addi %r2, %r2, .TOC.-ffi_closure_LINUX64@l +# endif + .localentry ffi_closure_LINUX64, . - ffi_closure_LINUX64 +# else + .section ".opd","aw" + .align 3 +ffi_closure_LINUX64: +# ifdef _CALL_LINUX + .quad .L.ffi_closure_LINUX64,.TOC.@tocbase,0 + .type ffi_closure_LINUX64,@function + .text +.L.ffi_closure_LINUX64: +# else + FFI_HIDDEN (.ffi_closure_LINUX64) + .globl .ffi_closure_LINUX64 + .quad .ffi_closure_LINUX64,.TOC.@tocbase,0 + .size ffi_closure_LINUX64,24 + .type .ffi_closure_LINUX64,@function + .text +.ffi_closure_LINUX64: +# endif +# endif + +# if _CALL_ELF == 2 +# ifdef __VEC__ +# 32 byte special reg save area + 64 byte parm save area +# + 128 byte retval area + 13*8 fpr save area + 12*16 vec save area + round to 16 +# define STACKFRAME 528 +# else +# 32 byte special reg save area + 64 byte parm save area +# + 64 byte retval area + 13*8 fpr save area + round to 16 +# define STACKFRAME 272 +# endif +# define PARMSAVE 32 +# define RETVAL PARMSAVE+64 +# else +# 48 bytes special reg save area + 64 bytes parm save area +# + 16 bytes retval area + 13*8 bytes fpr save area + round to 16 +# define STACKFRAME 240 +# define PARMSAVE 48 +# define RETVAL PARMSAVE+64 +# endif + +# if _CALL_ELF == 2 + ld %r12, FFI_TRAMPOLINE_SIZE(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + # copy r2 to r11 and load TOC into r2 + mr %r11, %r2 + ld %r2, 16(%r2) + + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + ld %r4, FFI_TRAMPOLINE_SIZE+8(%r11) + # closure->user_data + ld %r5, FFI_TRAMPOLINE_SIZE+16(%r11) + +.Ldoclosure: + # next save fpr 1 to fpr 13 + stfd %f1, -104+(0*8)(%r1) + stfd %f2, -104+(1*8)(%r1) + stfd %f3, -104+(2*8)(%r1) + stfd %f4, -104+(3*8)(%r1) + stfd %f5, -104+(4*8)(%r1) + stfd %f6, -104+(5*8)(%r1) + stfd %f7, -104+(6*8)(%r1) + stfd %f8, -104+(7*8)(%r1) + stfd %f9, -104+(8*8)(%r1) + stfd %f10, -104+(9*8)(%r1) + stfd %f11, -104+(10*8)(%r1) + stfd %f12, -104+(11*8)(%r1) + stfd %f13, -104+(12*8)(%r1) + + # load up the pointer to the saved fpr registers + addi %r8, %r1, -104 + +# ifdef __VEC__ + # load up the pointer to the saved vector registers + # 8 bytes padding for 16-byte alignment at -112(%r1) + addi %r9, %r8, -24 + stvx %v13, 0, %r9 + addi %r9, %r9, -16 + stvx %v12, 0, %r9 + addi %r9, %r9, -16 + stvx %v11, 0, %r9 + addi %r9, %r9, -16 + stvx %v10, 0, %r9 + addi %r9, %r9, -16 + stvx %v9, 0, %r9 + addi %r9, %r9, -16 + stvx %v8, 0, %r9 + addi %r9, %r9, -16 + stvx %v7, 0, %r9 + addi %r9, %r9, -16 + stvx %v6, 0, %r9 + addi %r9, %r9, -16 + stvx %v5, 0, %r9 + addi %r9, %r9, -16 + stvx %v4, 0, %r9 + addi %r9, %r9, -16 + stvx %v3, 0, %r9 + addi %r9, %r9, -16 + stvx %v2, 0, %r9 +# endif + + # load up the pointer to the result storage + addi %r6, %r1, -STACKFRAME+RETVAL + + stdu %r1, -STACKFRAME(%r1) + .cfi_def_cfa_offset STACKFRAME + .cfi_offset 65, 16 + + # make the call +# if defined _CALL_LINUX || _CALL_ELF == 2 +# ifdef __PCREL__ + bl ffi_closure_helper_LINUX64@notoc +.Lret: +# else + bl ffi_closure_helper_LINUX64 +.Lret: + nop +# endif +# else + bl .ffi_closure_helper_LINUX64 +.Lret: + nop +# endif + + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + ld %r0, STACKFRAME+16(%r1) + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + bge .Lsmall + mflr %r4 # move address of .Lret to r4 + sldi %r3, %r3, 4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + add %r3, %r3, %r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 + +.Lret_type0: +# case FFI_TYPE_VOID + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_INT +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_FLOAT + lfs %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_DOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_LONGDOUBLE + lfd %f1, RETVAL+0(%r1) + mtlr %r0 + lfd %f2, RETVAL+8(%r1) + b .Lfinish +# case FFI_TYPE_UINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT8 +# ifdef __LITTLE_ENDIAN__ + lbz %r3, RETVAL+0(%r1) +# else + lbz %r3, RETVAL+7(%r1) +# endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish +# case FFI_TYPE_UINT16 +# ifdef __LITTLE_ENDIAN__ + lhz %r3, RETVAL+0(%r1) +# else + lhz %r3, RETVAL+6(%r1) +# endif + mtlr %r0 +.Lfinish: + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT16 +# ifdef __LITTLE_ENDIAN__ + lha %r3, RETVAL+0(%r1) +# else + lha %r3, RETVAL+6(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT32 +# ifdef __LITTLE_ENDIAN__ + lwz %r3, RETVAL+0(%r1) +# else + lwz %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT32 +# ifdef __LITTLE_ENDIAN__ + lwa %r3, RETVAL+0(%r1) +# else + lwa %r3, RETVAL+4(%r1) +# endif + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_UINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_SINT64 + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME + nop +# case FFI_TYPE_POINTER + ld %r3, RETVAL+0(%r1) + mtlr %r0 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +# case FFI_V2_TYPE_VECTOR + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + mtlr %r0 + b .Lfinish +# case FFI_V2_TYPE_VECTOR_HOMOG + addi %r3, %r1, RETVAL + lvx %v2, 0, %r3 + addi %r3, %r3, 16 + b .Lmorevector +# case FFI_V2_TYPE_FLOAT_HOMOG + lfs %f1, RETVAL+0(%r1) + lfs %f2, RETVAL+4(%r1) + lfs %f3, RETVAL+8(%r1) + b .Lmorefloat +# case FFI_V2_TYPE_DOUBLE_HOMOG + lfd %f1, RETVAL+0(%r1) + lfd %f2, RETVAL+8(%r1) + lfd %f3, RETVAL+16(%r1) + lfd %f4, RETVAL+24(%r1) + mtlr %r0 + lfd %f5, RETVAL+32(%r1) + lfd %f6, RETVAL+40(%r1) + lfd %f7, RETVAL+48(%r1) + lfd %f8, RETVAL+56(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorevector: + lvx %v3, 0, %r3 + addi %r3, %r3, 16 + lvx %v4, 0, %r3 + addi %r3, %r3, 16 + lvx %v5, 0, %r3 + mtlr %r0 + addi %r3, %r3, 16 + lvx %v6, 0, %r3 + addi %r3, %r3, 16 + lvx %v7, 0, %r3 + addi %r3, %r3, 16 + lvx %v8, 0, %r3 + addi %r3, %r3, 16 + lvx %v9, 0, %r3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lmorefloat: + lfs %f4, RETVAL+12(%r1) + mtlr %r0 + lfs %f5, RETVAL+16(%r1) + lfs %f6, RETVAL+20(%r1) + lfs %f7, RETVAL+24(%r1) + lfs %f8, RETVAL+28(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmall: +# ifdef __LITTLE_ENDIAN__ + ld %r3,RETVAL+0(%r1) + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr +# else + # A struct smaller than a dword is returned in the low bits of r3 + # ie. right justified. Larger structs are passed left justified + # in r3 and r4. The return value area on the stack will have + # the structs as they are usually stored in memory. + cmpldi %r3, FFI_V2_TYPE_SMALL_STRUCT + 7 # size 8 bytes? + neg %r5, %r3 + ld %r3,RETVAL+0(%r1) + blt .Lsmalldown + mtlr %r0 + ld %r4,RETVAL+8(%r1) + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset STACKFRAME +.Lsmalldown: + addi %r5, %r5, FFI_V2_TYPE_SMALL_STRUCT + 7 + mtlr %r0 + sldi %r5, %r5, 3 + addi %r1, %r1, STACKFRAME + .cfi_def_cfa_offset 0 + srd %r3, %r3, %r5 + blr +# endif + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_closure_LINUX64,.-ffi_closure_LINUX64 +# else +# ifdef _CALL_LINUX + .size ffi_closure_LINUX64,.-.L.ffi_closure_LINUX64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_closure_LINUX64,.-.ffi_closure_LINUX64 +# endif +# endif + + + FFI_HIDDEN (ffi_go_closure_linux64) + .globl ffi_go_closure_linux64 + .text + .cfi_startproc +# if _CALL_ELF == 2 +ffi_go_closure_linux64: +# ifndef __PCREL__ + addis %r2, %r12, .TOC.-ffi_go_closure_linux64@ha + addi %r2, %r2, .TOC.-ffi_go_closure_linux64@l +# endif + .localentry ffi_go_closure_linux64, . - ffi_go_closure_linux64 +# else + .section ".opd","aw" + .align 3 +ffi_go_closure_linux64: +# ifdef _CALL_LINUX + .quad .L.ffi_go_closure_linux64,.TOC.@tocbase,0 + .type ffi_go_closure_linux64,@function + .text +.L.ffi_go_closure_linux64: +# else + FFI_HIDDEN (.ffi_go_closure_linux64) + .globl .ffi_go_closure_linux64 + .quad .ffi_go_closure_linux64,.TOC.@tocbase,0 + .size ffi_go_closure_linux64,24 + .type .ffi_go_closure_linux64,@function + .text +.ffi_go_closure_linux64: +# endif +# endif + +# if _CALL_ELF == 2 + ld %r12, 8(%r11) # closure->cif + mflr %r0 + lwz %r12, 28(%r12) # cif->flags + mtcrf 0x40, %r12 + addi %r12, %r1, PARMSAVE + bt 7, 0f + # Our caller has not allocated a parameter save area. + # We need to allocate one here and use it to pass gprs to + # ffi_closure_helper_LINUX64. + addi %r12, %r1, -STACKFRAME+PARMSAVE +0: + # Save general regs into parm save area + std %r3, 0(%r12) + std %r4, 8(%r12) + std %r5, 16(%r12) + std %r6, 24(%r12) + std %r7, 32(%r12) + std %r8, 40(%r12) + std %r9, 48(%r12) + std %r10, 56(%r12) + + # load up the pointer to the parm save area + mr %r7, %r12 +# else + mflr %r0 + # Save general regs into parm save area + # This is the parameter save area set up by our caller. + std %r3, PARMSAVE+0(%r1) + std %r4, PARMSAVE+8(%r1) + std %r5, PARMSAVE+16(%r1) + std %r6, PARMSAVE+24(%r1) + std %r7, PARMSAVE+32(%r1) + std %r8, PARMSAVE+40(%r1) + std %r9, PARMSAVE+48(%r1) + std %r10, PARMSAVE+56(%r1) + + # load up the pointer to the parm save area + addi %r7, %r1, PARMSAVE +# endif + std %r0, 16(%r1) + + # closure->cif + ld %r3, 8(%r11) + # closure->fun + ld %r4, 16(%r11) + # user_data + mr %r5, %r11 + b .Ldoclosure + + .cfi_endproc +# if _CALL_ELF == 2 + .size ffi_go_closure_linux64,.-ffi_go_closure_linux64 +# else +# ifdef _CALL_LINUX + .size ffi_go_closure_linux64,.-.L.ffi_go_closure_linux64 +# else + .long 0 + .byte 0,12,0,1,128,0,0,0 + .size .ffi_go_closure_linux64,.-.ffi_go_closure_linux64 +# endif +# endif +#endif + +#if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2 + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ppc_closure.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ppc_closure.S new file mode 100644 index 0000000..b6d209d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/ppc_closure.S @@ -0,0 +1,397 @@ +/* ----------------------------------------------------------------------- + sysv.h - Copyright (c) 2003 Jakub Jelinek + Copyright (c) 2008 Red Hat, Inc. + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +#define LIBFFI_ASM +#include +#include +#include + + .file "ppc_closure.S" + +#ifndef POWERPC64 + +FFI_HIDDEN(ffi_closure_SYSV) +ENTRY(ffi_closure_SYSV) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + +# we want to build up an areas for the parameters passed +# in registers (both floating point and integer) + + # so first save gpr 3 to gpr 10 (aligned to 4) + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # set up registers for the routine that does the work + + # closure->cif + lwz %r3,FFI_TRAMPOLINE_SIZE(%r11) + # closure->fun + lwz %r4,FFI_TRAMPOLINE_SIZE+4(%r11) + # closure->user_data + lwz %r5,FFI_TRAMPOLINE_SIZE+8(%r11) + +.Ldoclosure: + stw %r6, 28(%r1) + stw %r7, 32(%r1) + stw %r8, 36(%r1) + stw %r9, 40(%r1) + stw %r10,44(%r1) + +#ifndef __NO_FPRS__ + # next save fpr 1 to fpr 8 (aligned to 8) + stfd %f1, 48(%r1) + stfd %f2, 56(%r1) + stfd %f3, 64(%r1) + stfd %f4, 72(%r1) + stfd %f5, 80(%r1) + stfd %f6, 88(%r1) + stfd %f7, 96(%r1) + stfd %f8, 104(%r1) +#endif + + # pointer to the result storage + addi %r6,%r1,112 + + # pointer to the saved gpr registers + addi %r7,%r1,16 + + # pointer to the saved fpr registers + addi %r8,%r1,48 + + # pointer to the outgoing parameter save area in the previous frame + # i.e. the previous frame pointer + 8 + addi %r9,%r1,152 + + # make the call + bl ffi_closure_helper_SYSV@local +.Lret: + # now r3 contains the return type + # so use it to look up in a table + # so we know how to deal with each type + + # look up the proper starting point in table + # by using return type as offset + + mflr %r4 # move address of .Lret to r4 + slwi %r3,%r3,4 # now multiply return type by 16 + addi %r4, %r4, .Lret_type0 - .Lret + lwz %r0,148(%r1) + add %r3,%r3,%r4 # add contents of table to table address + mtctr %r3 + bctr # jump to it + +# Each of the ret_typeX code fragments has to be exactly 16 bytes long +# (4 instructions). For cache effectiveness we align to a 16 byte boundary +# first. + .align 4 +# case FFI_TYPE_VOID +.Lret_type0: + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_INT + lwz %r3,112+0(%r1) + mtlr %r0 +.Lfinish: + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_FLOAT +#ifndef __NO_FPRS__ + lfs %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_DOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) +#else + nop +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_LONGDOUBLE +#ifndef __NO_FPRS__ + lfd %f1,112+0(%r1) + lfd %f2,112+8(%r1) + mtlr %r0 + b .Lfinish +#else + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop +#endif + +# case FFI_TYPE_UINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT8 +#ifdef __LITTLE_ENDIAN__ + lbz %r3,112+0(%r1) +#else + lbz %r3,112+3(%r1) +#endif + extsb %r3,%r3 + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_UINT16 +#ifdef __LITTLE_ENDIAN__ + lhz %r3,112+0(%r1) +#else + lhz %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT16 +#ifdef __LITTLE_ENDIAN__ + lha %r3,112+0(%r1) +#else + lha %r3,112+2(%r1) +#endif + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_SINT32 + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_SINT64 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +# case FFI_TYPE_STRUCT + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + nop + +# case FFI_TYPE_POINTER + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_TYPE_UINT128 + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + lwz %r5,112+8(%r1) + b .Luint128 + +# The return types below are only used when the ABI type is FFI_SYSV. +# case FFI_SYSV_TYPE_SMALL_STRUCT + 1. One byte struct. + lbz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 2. Two byte struct. + lhz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 3. Three byte struct. + lwz %r3,112+0(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#else + srwi %r3,%r3,8 + mtlr %r0 + b .Lfinish +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 4. Four byte struct. + lwz %r3,112+0(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 5. Five byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,24 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 6. Six byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,16 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 7. Seven byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) +#ifdef __LITTLE_ENDIAN__ + mtlr %r0 + b .Lfinish +#else + li %r5,8 + b .Lstruct567 +#endif + +# case FFI_SYSV_TYPE_SMALL_STRUCT + 8. Eight byte struct. + lwz %r3,112+0(%r1) + lwz %r4,112+4(%r1) + mtlr %r0 + b .Lfinish + +#ifndef __LITTLE_ENDIAN__ +.Lstruct567: + subfic %r6,%r5,32 + srw %r4,%r4,%r5 + slw %r6,%r3,%r6 + srw %r3,%r3,%r5 + or %r4,%r6,%r4 + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_def_cfa_offset 144 +#endif + +.Luint128: + lwz %r6,112+12(%r1) + mtlr %r0 + addi %r1,%r1,144 + .cfi_def_cfa_offset 0 + blr + .cfi_endproc +END(ffi_closure_SYSV) + + +FFI_HIDDEN(ffi_go_closure_sysv) +ENTRY(ffi_go_closure_sysv) + .cfi_startproc + stwu %r1,-144(%r1) + .cfi_def_cfa_offset 144 + mflr %r0 + stw %r0,148(%r1) + .cfi_offset 65, 4 + + stw %r3, 16(%r1) + stw %r4, 20(%r1) + stw %r5, 24(%r1) + + # closure->cif + lwz %r3,4(%r11) + # closure->fun + lwz %r4,8(%r11) + # user_data + mr %r5,%r11 + b .Ldoclosure + .cfi_endproc +END(ffi_go_closure_sysv) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/sysv.S new file mode 100644 index 0000000..df97734 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/powerpc/sysv.S @@ -0,0 +1,173 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998 Geoffrey Keating + Copyright (C) 2007 Free Software Foundation, Inc + + PowerPC Assembly glue. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include + +#ifndef POWERPC64 +FFI_HIDDEN(ffi_call_SYSV) +ENTRY(ffi_call_SYSV) + .cfi_startproc + /* Save the old stack pointer as AP. */ + mr %r10,%r1 + .cfi_def_cfa_register 10 + + /* Allocate the stack space we need. */ + stwux %r1,%r1,%r8 + /* Save registers we use. */ + mflr %r9 + stw %r28,-16(%r10) + stw %r29,-12(%r10) + stw %r30, -8(%r10) + stw %r31, -4(%r10) + stw %r9, 4(%r10) + .cfi_offset 65, 4 + .cfi_offset 31, -4 + .cfi_offset 30, -8 + .cfi_offset 29, -12 + .cfi_offset 28, -16 + + /* Save arguments over call... */ + stw %r7, -20(%r10) /* closure, */ + mr %r31,%r6 /* flags, */ + mr %r30,%r5 /* rvalue, */ + mr %r29,%r4 /* function address, */ + mr %r28,%r10 /* our AP. */ + .cfi_def_cfa_register 28 + + /* Call ffi_prep_args_SYSV. */ + mr %r4,%r1 + bl ffi_prep_args_SYSV@local + + /* Now do the call. */ + /* Set up cr1 with bits 4-7 of the flags. */ + mtcrf 0x40,%r31 + /* Get the address to call into CTR. */ + mtctr %r29 + /* Load all those argument registers. */ + lwz %r3,-24-(8*4)(%r28) + lwz %r4,-24-(7*4)(%r28) + lwz %r5,-24-(6*4)(%r28) + lwz %r6,-24-(5*4)(%r28) + bf- 5,1f + nop + lwz %r7,-24-(4*4)(%r28) + lwz %r8,-24-(3*4)(%r28) + lwz %r9,-24-(2*4)(%r28) + lwz %r10,-24-(1*4)(%r28) + nop +1: + +#ifndef __NO_FPRS__ + /* Load all the FP registers. */ + bf- 6,2f + lfd %f1,-24-(8*4)-(8*8)(%r28) + lfd %f2,-24-(8*4)-(7*8)(%r28) + lfd %f3,-24-(8*4)-(6*8)(%r28) + lfd %f4,-24-(8*4)-(5*8)(%r28) + nop + lfd %f5,-24-(8*4)-(4*8)(%r28) + lfd %f6,-24-(8*4)-(3*8)(%r28) + lfd %f7,-24-(8*4)-(2*8)(%r28) + lfd %f8,-24-(8*4)-(1*8)(%r28) +#endif +2: + + /* Make the call. */ + lwz %r11, -20(%r28) + bctrl + + /* Now, deal with the return value. */ + mtcrf 0x03,%r31 /* cr6-cr7 */ + bt- 31,L(small_struct_return_value) + bt- 30,L(done_return_value) +#ifndef __NO_FPRS__ + bt- 29,L(fp_return_value) +#endif + stw %r3,0(%r30) + bf+ 27,L(done_return_value) + stw %r4,4(%r30) + bf 26,L(done_return_value) + stw %r5,8(%r30) + stw %r6,12(%r30) + /* Fall through... */ + +L(done_return_value): + /* Restore the registers we used and return. */ + lwz %r9, 4(%r28) + lwz %r31, -4(%r28) + mtlr %r9 + lwz %r30, -8(%r28) + lwz %r29,-12(%r28) + lwz %r28,-16(%r28) + .cfi_remember_state + /* At this point we don't have a cfa register. Say all our + saved regs have been restored. */ + .cfi_same_value 65 + .cfi_same_value 31 + .cfi_same_value 30 + .cfi_same_value 29 + .cfi_same_value 28 + /* Hopefully this works.. */ + .cfi_def_cfa_register 1 + .cfi_offset 1, 0 + lwz %r1,0(%r1) + .cfi_same_value 1 + blr + +#ifndef __NO_FPRS__ +L(fp_return_value): + .cfi_restore_state + bf 27,L(float_return_value) + stfd %f1,0(%r30) + bf 26,L(done_return_value) + stfd %f2,8(%r30) + b L(done_return_value) +L(float_return_value): + stfs %f1,0(%r30) + b L(done_return_value) +#endif + +L(small_struct_return_value): + /* + * The C code always allocates a properly-aligned 8-byte bounce + * buffer to make this assembly code very simple. Just write out + * r3 and r4 to the buffer to allow the C code to handle the rest. + */ + stw %r3, 0(%r30) + stw %r4, 4(%r30) + b L(done_return_value) + .cfi_endproc + +END(ffi_call_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/prep_cif.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/prep_cif.c new file mode 100644 index 0000000..1db3804 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/prep_cif.c @@ -0,0 +1,263 @@ +/* ----------------------------------------------------------------------- + prep_cif.c - Copyright (c) 2011, 2012 Anthony Green + Copyright (c) 1996, 1998, 2007 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include + +/* Round up to FFI_SIZEOF_ARG. */ + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +/* Perform machine independent initialization of aggregate type + specifications. */ + +static ffi_status initialize_aggregate(ffi_type *arg, size_t *offsets) +{ + ffi_type **ptr; + + if (UNLIKELY(arg == NULL || arg->elements == NULL)) + return FFI_BAD_TYPEDEF; + + arg->size = 0; + arg->alignment = 0; + + ptr = &(arg->elements[0]); + + if (UNLIKELY(ptr == 0)) + return FFI_BAD_TYPEDEF; + + while ((*ptr) != NULL) + { + if (UNLIKELY(((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK))) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type */ + FFI_ASSERT_VALID_TYPE(*ptr); + + arg->size = FFI_ALIGN(arg->size, (*ptr)->alignment); + if (offsets) + *offsets++ = arg->size; + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + /* Structure size includes tail padding. This is important for + structures that fit in one register on ABIs like the PowerPC64 + Linux ABI that right justify small structs in a register. + It's also needed for nested structure layout, for example + struct A { long a; char b; }; struct B { struct A x; char y; }; + should find y at an offset of 2*sizeof(long) and result in a + total size of 3*sizeof(long). */ + arg->size = FFI_ALIGN (arg->size, arg->alignment); + + /* On some targets, the ABI defines that structures have an additional + alignment beyond the "natural" one based on their elements. */ +#ifdef FFI_AGGREGATE_ALIGNMENT + if (FFI_AGGREGATE_ALIGNMENT > arg->alignment) + arg->alignment = FFI_AGGREGATE_ALIGNMENT; +#endif + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +#ifndef __CRIS__ +/* The CRIS ABI specifies structure elements to have byte + alignment only, so it completely overrides this functions, + which assumes "natural" alignment and padding. */ + +/* Perform machine independent ffi_cif preparation, then call + machine dependent routine. */ + +/* For non variadic functions isvariadic should be 0 and + nfixedargs==ntotalargs. + + For variadic calls, isvariadic should be 1 and nfixedargs + and ntotalargs set as appropriate. nfixedargs must always be >=1 */ + + +ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, ffi_type **atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT(cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; +#if (defined(_M_ARM64) || defined(__aarch64__)) && defined(_WIN32) + cif->is_variadic = isvariadic; +#endif +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + /* Initialize the return type if necessary */ + if ((cif->rtype->size == 0) + && (initialize_aggregate(cif->rtype, NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if (rtype->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the return type */ + FFI_ASSERT_VALID_TYPE(cif->rtype); + + /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */ +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT +#ifdef TILE + && (cif->rtype->size > 10 * FFI_SIZEOF_ARG) +#endif +#ifdef XTENSA + && (cif->rtype->size > 16) +#endif +#ifdef NIOS2 + && (cif->rtype->size > 8) +#endif + ) + bytes = STACK_ARG_SIZE(sizeof(void*)); +#endif + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + + /* Initialize any uninitialized aggregate type definitions */ + if (((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if ((*ptr)->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + { + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = (unsigned)FFI_ALIGN(bytes, (*ptr)->alignment); + +#ifdef TILE + if (bytes < 10 * FFI_SIZEOF_ARG && + bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG) + { + /* An argument is never split between the 10 parameter + registers and the stack. */ + bytes = 10 * FFI_SIZEOF_ARG; + } +#endif +#ifdef XTENSA + if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4) + bytes = 6*4; +#endif + + bytes += (unsigned int)STACK_ARG_SIZE((*ptr)->size); + } +#endif + } + + cif->bytes = bytes; + + /* Perform machine dependent cif processing */ +#ifdef FFI_TARGET_SPECIFIC_VARIADIC + if (isvariadic) + return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs); +#endif + + return ffi_prep_cif_machdep(cif); +} +#endif /* not __CRIS__ */ + +ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs, + ffi_type *rtype, ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes); +} + +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes); +} + +#if FFI_CLOSURES + +ffi_status +ffi_prep_closure (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +{ + return ffi_prep_closure_loc (closure, cif, fun, user_data, closure); +} + +#endif + +ffi_status +ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +{ + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + if (struct_type->type != FFI_TYPE_STRUCT) + return FFI_BAD_TYPEDEF; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + return initialize_aggregate(struct_type, offsets); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/raw_api.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/raw_api.c new file mode 100644 index 0000000..be15611 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/raw_api.c @@ -0,0 +1,267 @@ +/* ----------------------------------------------------------------------- + raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc. + + Author: Kresten Krab Thorup + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This file defines generic functions for use with the raw api. */ + +#include +#include + +#if !FFI_NO_RAW_API + +size_t +ffi_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { +#if !FFI_NO_STRUCTS + if ((*at)->type == FFI_TYPE_STRUCT) + result += FFI_ALIGN (sizeof (void*), FFI_SIZEOF_ARG); + else +#endif + result += FFI_ALIGN ((*at)->size, FFI_SIZEOF_ARG); + } + + return result; +} + + +void +ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + *args = (raw++)->ptr; + break; +#endif + + case FFI_TYPE_COMPLEX: + *args = (raw++)->ptr; + break; + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + default: + *args = raw; + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if !FFI_NO_STRUCTS + if ((*tp)->type == FFI_TYPE_STRUCT) + { + *args = (raw++)->ptr; + } + else +#endif + if ((*tp)->type == FFI_TYPE_COMPLEX) + { + *args = (raw++)->ptr; + } + else + { + *args = (void*) raw; + raw += FFI_ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*); + } + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + (raw++)->uint = *(UINT8*) (*args); + break; + + case FFI_TYPE_SINT8: + (raw++)->sint = *(SINT8*) (*args); + break; + + case FFI_TYPE_UINT16: + (raw++)->uint = *(UINT16*) (*args); + break; + + case FFI_TYPE_SINT16: + (raw++)->sint = *(SINT16*) (*args); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + (raw++)->uint = *(UINT32*) (*args); + break; + + case FFI_TYPE_SINT32: + (raw++)->sint = *(SINT32*) (*args); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + (raw++)->ptr = *args; + break; +#endif + + case FFI_TYPE_COMPLEX: + (raw++)->ptr = *args; + break; + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } +} + +#if !FFI_NATIVE_RAW_API + + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, raw, cl->user_data); +} + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ + +#if FFI_CLOSURES + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_raw_closure (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data) +{ + return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ + +#endif /* !FFI_NO_RAW_API */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffi.c new file mode 100644 index 0000000..c910858 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffi.c @@ -0,0 +1,481 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + Based on MIPS N32/64 port + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include + +#if __riscv_float_abi_double +#define ABI_FLEN 64 +#define ABI_FLOAT double +#elif __riscv_float_abi_single +#define ABI_FLEN 32 +#define ABI_FLOAT float +#endif + +#define NARGREG 8 +#define STKALIGN 16 +#define MAXCOPYARG (2 * sizeof(double)) + +typedef struct call_context +{ +#if ABI_FLEN + ABI_FLOAT fa[8]; +#endif + size_t a[8]; + /* used by the assembly code to in-place construct its own stack frame */ + char frame[16]; +} call_context; + +typedef struct call_builder +{ + call_context *aregs; + int used_integer; + int used_float; + size_t *used_stack; +} call_builder; + +/* integer (not pointer) less than ABI XLEN */ +/* FFI_TYPE_INT does not appear to be used */ +#if __SIZEOF_POINTER__ == 8 +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) +#else +#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) +#endif + +#if ABI_FLEN +typedef struct { + char as_elements, type1, offset2, type2; +} float_struct_info; + +#if ABI_FLEN >= 64 +#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) +#else +#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) +#endif + +static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) { + int i; + if (out == out_end) return out; + if (in->type != FFI_TYPE_STRUCT) { + *(out++) = in; + } else { + for (i = 0; in->elements[i]; i++) + out = flatten_struct(in->elements[i], out, out_end); + } + return out; +} + +/* Structs with at most two fields after flattening, one of which is of + floating point type, are passed in multiple registers if sufficient + registers are available. */ +static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) { + float_struct_info ret = {0, 0, 0, 0}; + ffi_type *fields[3]; + int num_floats, num_ints; + int num_fields = flatten_struct(top, fields, fields + 3) - fields; + + if (num_fields == 1) { + if (IS_FLOAT(fields[0]->type)) { + ret.as_elements = 1; + ret.type1 = fields[0]->type; + } + } else if (num_fields == 2) { + num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type); + num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type); + if (num_floats == 0 || num_floats + num_ints != 2) + return ret; + if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG) + return ret; + if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type)) + return ret; + + ret.type1 = fields[0]->type; + ret.type2 = fields[1]->type; + ret.offset2 = FFI_ALIGN(fields[0]->size, fields[1]->alignment); + ret.as_elements = 1; + } + + return ret; +} +#endif + +/* allocates a single register, float register, or XLEN-sized stack slot to a datum */ +static void marshal_atom(call_builder *cb, int type, void *data) { + size_t value = 0; + switch (type) { + case FFI_TYPE_UINT8: value = *(uint8_t *)data; break; + case FFI_TYPE_SINT8: value = *(int8_t *)data; break; + case FFI_TYPE_UINT16: value = *(uint16_t *)data; break; + case FFI_TYPE_SINT16: value = *(int16_t *)data; break; + /* 32-bit quantities are always sign-extended in the ABI */ + case FFI_TYPE_UINT32: value = *(int32_t *)data; break; + case FFI_TYPE_SINT32: value = *(int32_t *)data; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: value = *(uint64_t *)data; break; + case FFI_TYPE_SINT64: value = *(int64_t *)data; break; +#endif + case FFI_TYPE_POINTER: value = *(size_t *)data; break; + + /* float values may be recoded in an implementation-defined way + by hardware conforming to 2.1 or earlier, so use asm to + reinterpret floats as doubles */ +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data)); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data)); + return; +#endif + default: FFI_ASSERT(0); break; + } + + if (cb->used_integer == NARGREG) { + *cb->used_stack++ = value; + } else { + cb->aregs->a[cb->used_integer++] = value; + } +} + +static void unmarshal_atom(call_builder *cb, int type, void *data) { + size_t value; + switch (type) { +#if ABI_FLEN >= 32 + case FFI_TYPE_FLOAT: + asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif +#if ABI_FLEN >= 64 + case FFI_TYPE_DOUBLE: + asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++])); + return; +#endif + } + + if (cb->used_integer == NARGREG) { + value = *cb->used_stack++; + } else { + value = cb->aregs->a[cb->used_integer++]; + } + + switch (type) { + case FFI_TYPE_UINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_SINT8: *(uint8_t *)data = value; break; + case FFI_TYPE_UINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_SINT16: *(uint16_t *)data = value; break; + case FFI_TYPE_UINT32: *(uint32_t *)data = value; break; + case FFI_TYPE_SINT32: *(uint32_t *)data = value; break; +#if __SIZEOF_POINTER__ == 8 + case FFI_TYPE_UINT64: *(uint64_t *)data = value; break; + case FFI_TYPE_SINT64: *(uint64_t *)data = value; break; +#endif + case FFI_TYPE_POINTER: *(size_t *)data = value; break; + default: FFI_ASSERT(0); break; + } +} + +/* adds an argument to a call, or a not by reference return value */ +static void marshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + marshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + marshal_atom(cb, type->type, data); + return; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + marshal_atom(cb, FFI_TYPE_POINTER, &data); + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + marshal_atom(cb, type->type, data); + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + memcpy(realign, data, type->size); + if (type->size > 0) + marshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + marshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + } +} + +/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */ +static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) { + size_t realign[2]; + void *pointer; + +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) { + unmarshal_atom(cb, fsi.type1, data); + if (fsi.offset2) + unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); + return data; + } + } + + if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { + unmarshal_atom(cb, type->type, data); + return data; + } +#endif + + if (type->size > 2 * __SIZEOF_POINTER__) { + /* pass by reference */ + unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer); + return pointer; + } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { + unmarshal_atom(cb, type->type, data); + return data; + } else { + /* overlong integers, soft-float floats, and structs without special + float handling are treated identically from this point on */ + + /* variadics are aligned even in registers */ + if (type->alignment > __SIZEOF_POINTER__) { + if (var) + cb->used_integer = FFI_ALIGN(cb->used_integer, 2); + cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); + } + + if (type->size > 0) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign); + if (type->size > __SIZEOF_POINTER__) + unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1); + memcpy(data, realign, type->size); + return data; + } +} + +static int passed_by_ref(call_builder *cb, ffi_type *type, int var) { +#if ABI_FLEN + if (!var && type->type == FFI_TYPE_STRUCT) { + float_struct_info fsi = struct_passed_as_elements(cb, type); + if (fsi.as_elements) return 0; + } +#endif + + return type->size > 2 * __SIZEOF_POINTER__; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { + cif->riscv_nfixedargs = cif->nargs; + return FFI_OK; +} + +/* Perform machine dependent cif processing when we have a variadic function */ + +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) { + cif->riscv_nfixedargs = nfixedargs; + return FFI_OK; +} + +/* Low level routine for calling functions */ +extern void ffi_call_asm (void *stack, struct call_context *regs, + void (*fn) (void), void *closure) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, + void *closure) +{ + /* this is a conservative estimate, assuming a complex return value and + that all remaining arguments are long long / __int128 */ + size_t arg_bytes = cif->nargs <= 3 ? 0 : + FFI_ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN); + size_t rval_bytes = 0; + if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__) + rval_bytes = FFI_ALIGN(cif->rtype->size, STKALIGN); + size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context); + + /* the assembly code will deallocate all stack data at lower addresses + than the argument region, so we need to allocate the frame and the + return value after the arguments in a single allocation */ + size_t alloc_base; + /* Argument region must be 16-byte aligned */ + if (_Alignof(max_align_t) >= STKALIGN) { + /* since sizeof long double is normally 16, the compiler will + guarantee alloca alignment to at least that much */ + alloc_base = (size_t)alloca(alloc_size); + } else { + alloc_base = FFI_ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN); + } + + if (rval_bytes) + rvalue = (void*)(alloc_base + arg_bytes); + + call_builder cb; + cb.used_float = cb.used_integer = 0; + cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes); + cb.used_stack = (void*)alloc_base; + + int return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + marshal(&cb, &ffi_type_pointer, 0, &rvalue); + + int i; + for (i = 0; i < cif->nargs; i++) + marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]); + + ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); + + cb.used_float = cb.used_integer = 0; + if (!return_by_ref && rvalue) + unmarshal(&cb, cif->rtype, 0, rvalue); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +extern void ffi_closure_asm(void) FFI_HIDDEN; + +ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc) +{ + uint32_t *tramp = (uint32_t *) &closure->tramp[0]; + uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; + + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + /* we will call ffi_closure_inner with codeloc, not closure, but as long + as the memory is readable it should work */ + + tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */ +#if __SIZEOF_POINTER__ == 8 + tramp[1] = 0x01033383; /* ld t2, 16(t1) */ +#else + tramp[1] = 0x01032383; /* lw t2, 16(t1) */ +#endif + tramp[2] = 0x00038067; /* jr t2 */ + tramp[3] = 0x00000013; /* nop */ + tramp[4] = fn; + tramp[5] = fn >> 32; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE); + + return FFI_OK; +} + +extern void ffi_go_closure_asm (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) + return FFI_BAD_ABI; + + closure->tramp = (void *) ffi_go_closure_asm; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Called by the assembly code with aregs pointing to saved argument registers + and stack pointing to the stacked arguments. Return values passed in + registers will be reloaded from aregs. */ +void FFI_HIDDEN +ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stack, call_context *aregs) +{ + void **avalue = alloca(cif->nargs * sizeof(void*)); + /* storage for arguments which will be copied by unmarshal(). We could + theoretically avoid the copies in many cases and use at most 128 bytes + of memory, but allocating disjoint storage for each argument is + simpler. */ + char *astorage = alloca(cif->nargs * MAXCOPYARG); + void *rvalue; + call_builder cb; + int return_by_ref; + int i; + + cb.aregs = aregs; + cb.used_integer = cb.used_float = 0; + cb.used_stack = stack; + + return_by_ref = passed_by_ref(&cb, cif->rtype, 0); + if (return_by_ref) + unmarshal(&cb, &ffi_type_pointer, 0, &rvalue); + else + rvalue = alloca(cif->rtype->size); + + for (i = 0; i < cif->nargs; i++) + avalue[i] = unmarshal(&cb, cif->arg_types[i], + i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG); + + fun (cif, rvalue, avalue, user_data); + + if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) { + cb.used_integer = cb.used_float = 0; + marshal(&cb, cif->rtype, 0, rvalue); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffitarget.h new file mode 100644 index 0000000..75e6462 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/ffitarget.h @@ -0,0 +1,69 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - 2014 Michael Knyszek + + Target configuration macros for RISC-V. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef __riscv +#error "libffi was configured for a RISC-V target but this does not appear to be a RISC-V compiler." +#endif + +#ifndef LIBFFI_ASM + +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +/* FFI_UNUSED_NN and riscv_unused are to maintain ABI compatibility with a + distributed Berkeley patch from 2014, and can be removed at SONAME bump */ +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_UNUSED_1, + FFI_UNUSED_2, + FFI_UNUSED_3, + FFI_LAST_ABI, + + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#endif /* LIBFFI_ASM */ + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_NATIVE_RAW_API 0 +#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused; +#define FFI_TARGET_SPECIFIC_VARIADIC + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/sysv.S new file mode 100644 index 0000000..522d0b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/riscv/sysv.S @@ -0,0 +1,293 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2015 Michael Knyszek + 2015 Andrew Waterman + 2018 Stef O'Rear + + RISC-V Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Define aliases so that we can handle all ABIs uniformly */ + +#if __SIZEOF_POINTER__ == 8 +#define PTRS 8 +#define LARG ld +#define SARG sd +#else +#define PTRS 4 +#define LARG lw +#define SARG sw +#endif + +#if __riscv_float_abi_double +#define FLTS 8 +#define FLARG fld +#define FSARG fsd +#elif __riscv_float_abi_single +#define FLTS 4 +#define FLARG flw +#define FSARG fsw +#else +#define FLTS 0 +#endif + +#define fp s0 + + .text + .globl ffi_call_asm + .type ffi_call_asm, @function + .hidden ffi_call_asm +/* + struct call_context { + floatreg fa[8]; + intreg a[8]; + intreg pad[rv32 ? 2 : 0]; + intreg save_fp, save_ra; + } + void ffi_call_asm (size_t *stackargs, struct call_context *regargs, + void (*fn) (void), void *closure); +*/ + +#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16) + +ffi_call_asm: + .cfi_startproc + + /* + We are NOT going to set up an ordinary stack frame. In order to pass + the stacked args to the called function, we adjust our stack pointer to + a0, which is in the _caller's_ alloca area. We establish our own stack + frame at the end of the call_context. + + Anything below the arguments will be freed at this point, although we + preserve the call_context so that it can be read back in the caller. + */ + + .cfi_def_cfa 11, FRAME_LEN # interim CFA based on a1 + SARG fp, FRAME_LEN - 2*PTRS(a1) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(a1) + .cfi_offset 1, -1*PTRS + + addi fp, a1, FRAME_LEN + mv sp, a0 + .cfi_def_cfa 8, 0 # our frame is fully set up + + # Load arguments + mv t1, a2 + mv t2, a3 + +#if FLTS + FLARG fa0, -FRAME_LEN+0*FLTS(fp) + FLARG fa1, -FRAME_LEN+1*FLTS(fp) + FLARG fa2, -FRAME_LEN+2*FLTS(fp) + FLARG fa3, -FRAME_LEN+3*FLTS(fp) + FLARG fa4, -FRAME_LEN+4*FLTS(fp) + FLARG fa5, -FRAME_LEN+5*FLTS(fp) + FLARG fa6, -FRAME_LEN+6*FLTS(fp) + FLARG fa7, -FRAME_LEN+7*FLTS(fp) +#endif + + LARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + LARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + LARG a2, -FRAME_LEN+8*FLTS+2*PTRS(fp) + LARG a3, -FRAME_LEN+8*FLTS+3*PTRS(fp) + LARG a4, -FRAME_LEN+8*FLTS+4*PTRS(fp) + LARG a5, -FRAME_LEN+8*FLTS+5*PTRS(fp) + LARG a6, -FRAME_LEN+8*FLTS+6*PTRS(fp) + LARG a7, -FRAME_LEN+8*FLTS+7*PTRS(fp) + + /* Call */ + jalr t1 + + /* Save return values - only a0/a1 (fa0/fa1) are used */ +#if FLTS + FSARG fa0, -FRAME_LEN+0*FLTS(fp) + FSARG fa1, -FRAME_LEN+1*FLTS(fp) +#endif + + SARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp) + SARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp) + + /* Restore and return */ + addi sp, fp, -FRAME_LEN + .cfi_def_cfa 2, FRAME_LEN + LARG ra, -1*PTRS(fp) + .cfi_restore 1 + LARG fp, -2*PTRS(fp) + .cfi_restore 8 + ret + .cfi_endproc + .size ffi_call_asm, .-ffi_call_asm + + +/* + ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_closure_asm + .hidden ffi_closure_asm + .type ffi_closure_asm, @function +ffi_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, FFI_TRAMPOLINE_SIZE+0*PTRS(t1) + LARG a1, FFI_TRAMPOLINE_SIZE+1*PTRS(t1) + LARG a2, FFI_TRAMPOLINE_SIZE+2*PTRS(t1) + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_closure_asm, .-ffi_closure_asm + +/* + ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. + void ffi_closure_inner (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + size_t *stackargs, struct call_context *regargs) +*/ + + .globl ffi_go_closure_asm + .hidden ffi_go_closure_asm + .type ffi_go_closure_asm, @function +ffi_go_closure_asm: + .cfi_startproc + + addi sp, sp, -FRAME_LEN + .cfi_def_cfa_offset FRAME_LEN + + /* make a frame */ + SARG fp, FRAME_LEN - 2*PTRS(sp) + .cfi_offset 8, -2*PTRS + SARG ra, FRAME_LEN - 1*PTRS(sp) + .cfi_offset 1, -1*PTRS + addi fp, sp, FRAME_LEN + + /* save arguments */ +#if FLTS + FSARG fa0, 0*FLTS(sp) + FSARG fa1, 1*FLTS(sp) + FSARG fa2, 2*FLTS(sp) + FSARG fa3, 3*FLTS(sp) + FSARG fa4, 4*FLTS(sp) + FSARG fa5, 5*FLTS(sp) + FSARG fa6, 6*FLTS(sp) + FSARG fa7, 7*FLTS(sp) +#endif + + SARG a0, 8*FLTS+0*PTRS(sp) + SARG a1, 8*FLTS+1*PTRS(sp) + SARG a2, 8*FLTS+2*PTRS(sp) + SARG a3, 8*FLTS+3*PTRS(sp) + SARG a4, 8*FLTS+4*PTRS(sp) + SARG a5, 8*FLTS+5*PTRS(sp) + SARG a6, 8*FLTS+6*PTRS(sp) + SARG a7, 8*FLTS+7*PTRS(sp) + + /* enter C */ + LARG a0, 1*PTRS(t2) + LARG a1, 2*PTRS(t2) + mv a2, t2 + addi a3, sp, FRAME_LEN + mv a4, sp + + call ffi_closure_inner + + /* return values */ +#if FLTS + FLARG fa0, 0*FLTS(sp) + FLARG fa1, 1*FLTS(sp) +#endif + + LARG a0, 8*FLTS+0*PTRS(sp) + LARG a1, 8*FLTS+1*PTRS(sp) + + /* restore and return */ + LARG ra, FRAME_LEN-1*PTRS(sp) + .cfi_restore 1 + LARG fp, FRAME_LEN-2*PTRS(sp) + .cfi_restore 8 + addi sp, sp, FRAME_LEN + .cfi_def_cfa_offset 0 + ret + .cfi_endproc + .size ffi_go_closure_asm, .-ffi_go_closure_asm diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffi.c new file mode 100644 index 0000000..4035b6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffi.c @@ -0,0 +1,756 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2000, 2007 Software AG + Copyright (c) 2008 Red Hat, Inc + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ +/*====================================================================*/ +/* Includes */ +/* -------- */ +/*====================================================================*/ + +#include +#include +#include +#include "internal.h" + +/*====================== End of Includes =============================*/ + +/*====================================================================*/ +/* Defines */ +/* ------- */ +/*====================================================================*/ + +/* Maximum number of GPRs available for argument passing. */ +#define MAX_GPRARGS 5 + +/* Maximum number of FPRs available for argument passing. */ +#ifdef __s390x__ +#define MAX_FPRARGS 4 +#else +#define MAX_FPRARGS 2 +#endif + +/* Round to multiple of 16. */ +#define ROUND_SIZE(size) (((size) + 15) & ~15) + +/*===================== End of Defines ===============================*/ + +/*====================================================================*/ +/* Externals */ +/* --------- */ +/*====================================================================*/ + +struct call_frame +{ + void *back_chain; + void *eos; + unsigned long gpr_args[5]; + unsigned long gpr_save[9]; + unsigned long long fpr_args[4]; +}; + +extern void FFI_HIDDEN ffi_call_SYSV(struct call_frame *, unsigned, void *, + void (*fn)(void), void *); + +extern void ffi_closure_SYSV(void); +extern void ffi_go_closure_SYSV(void); + +/*====================== End of Externals ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_check_struct_type. */ +/* */ +/* Function - Determine if a structure can be passed within a */ +/* general purpose or floating point register. */ +/* */ +/*====================================================================*/ + +static int +ffi_check_struct_type (ffi_type *arg) +{ + size_t size = arg->size; + + /* If the struct has just one element, look at that element + to find out whether to consider the struct as floating point. */ + while (arg->type == FFI_TYPE_STRUCT + && arg->elements[0] && !arg->elements[1]) + arg = arg->elements[0]; + + /* Structs of size 1, 2, 4, and 8 are passed in registers, + just like the corresponding int/float types. */ + switch (size) + { + case 1: + return FFI_TYPE_UINT8; + + case 2: + return FFI_TYPE_UINT16; + + case 4: + if (arg->type == FFI_TYPE_FLOAT) + return FFI_TYPE_FLOAT; + else + return FFI_TYPE_UINT32; + + case 8: + if (arg->type == FFI_TYPE_DOUBLE) + return FFI_TYPE_DOUBLE; + else + return FFI_TYPE_UINT64; + + default: + break; + } + + /* Other structs are passed via a pointer to the data. */ + return FFI_TYPE_POINTER; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_cif_machdep. */ +/* */ +/* Function - Perform machine dependent CIF processing. */ +/* */ +/*====================================================================*/ + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t struct_size = 0; + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Determine return value handling. */ + + switch (cif->rtype->type) + { + /* Void is easy. */ + case FFI_TYPE_VOID: + cif->flags = FFI390_RET_VOID; + break; + + /* Structures and complex are returned via a hidden pointer. */ + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; /* We need one GPR to pass the pointer. */ + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + cif->flags = FFI390_RET_FLOAT; + break; + + case FFI_TYPE_DOUBLE: + cif->flags = FFI390_RET_DOUBLE; + break; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + cif->flags = FFI390_RET_STRUCT; + n_gpr++; + break; +#endif + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI390_RET_INT64; + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + /* These are to be extended to word size. */ +#ifdef __s390x__ + cif->flags = FFI390_RET_INT64; +#else + cif->flags = FFI390_RET_INT32; +#endif + break; + + default: + FFI_ASSERT (0); + break; + } + + /* Now for the arguments. */ + + for (ptr = cif->arg_types, i = cif->nargs; + i > 0; + i--, ptr++) + { + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, we must reserve space + to copy its data for proper call-by-value semantics. */ + if (type == FFI_TYPE_POINTER) + struct_size += ROUND_SIZE ((*ptr)->size); + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + /* The first MAX_FPRARGS floating point arguments + go in FPRs, the rest overflow to the stack. */ + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + n_fpr++; + else + n_ov++; + break; + + /* On 31-bit machines, 64-bit integers are passed in GPR pairs, + if one is still available, or else on the stack. If only one + register is free, skip the register (it won't be used for any + subsequent argument either). */ + +#ifndef __s390x__ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + n_gpr += 2; + else + n_ov += 2; + break; +#endif + + /* Everything else is passed in GPRs (until MAX_GPRARGS + have been used) or overflows to the stack. */ + + default: + if (n_gpr < MAX_GPRARGS) + n_gpr++; + else + n_ov++; + break; + } + } + + /* Total stack space as required for overflow arguments + and temporary structure copies. */ + + cif->bytes = ROUND_SIZE (n_ov * sizeof (long)) + struct_size; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_call. */ +/* */ +/* Function - Call the FFI routine. */ +/* */ +/*====================================================================*/ + +static void +ffi_call_int(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue, + void *closure) +{ + int ret_type = cif->flags; + size_t rsize = 0, bytes = cif->bytes; + unsigned char *stack, *p_struct; + struct call_frame *frame; + unsigned long *p_ov, *p_gpr; + unsigned long long *p_fpr; + int n_fpr, n_gpr, n_ov, i, n; + ffi_type **arg_types; + + FFI_ASSERT (cif->abi == FFI_SYSV); + + /* If we don't have a return value, we need to fake one. */ + if (rvalue == NULL) + { + if (ret_type & FFI390_RET_IN_MEM) + rsize = cif->rtype->size; + else + ret_type = FFI390_RET_VOID; + } + + /* The stack space will be filled with those areas: + + dummy structure return (highest addresses) + FPR argument register save area + GPR argument register save area + stack frame for ffi_call_SYSV + temporary struct copies + overflow argument area (lowest addresses) + + We set up the following pointers: + + p_fpr: bottom of the FPR area (growing upwards) + p_gpr: bottom of the GPR area (growing upwards) + p_ov: bottom of the overflow area (growing upwards) + p_struct: top of the struct copy area (growing downwards) + + All areas are kept aligned to twice the word size. + + Note that we're going to create the stack frame for both + ffi_call_SYSV _and_ the target function right here. This + works because we don't make any function calls with more + than 5 arguments (indeed only memcpy and ffi_call_SYSV), + and thus we don't have any stacked outgoing parameters. */ + + stack = alloca (bytes + sizeof(struct call_frame) + rsize); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + /* Link the new frame back to the one from this function. */ + frame->back_chain = __builtin_frame_address (0); + + /* Fill in all of the argument stuff. */ + p_ov = (unsigned long *)stack; + p_struct = (unsigned char *)frame; + p_gpr = frame->gpr_args; + p_fpr = frame->fpr_args; + n_fpr = n_gpr = n_ov = 0; + + /* If we returning a structure then we set the first parameter register + to the address of where we are returning this structure. */ + if (cif->flags & FFI390_RET_IN_MEM) + p_gpr[n_gpr++] = (uintptr_t) rvalue; + + /* Now for the arguments. */ + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + void *arg = avalue[i]; + int type = ty->type; + ffi_arg val; + + restart: + switch (type) + { + case FFI_TYPE_SINT8: + val = *(SINT8 *)arg; + goto do_int; + case FFI_TYPE_UINT8: + val = *(UINT8 *)arg; + goto do_int; + case FFI_TYPE_SINT16: + val = *(SINT16 *)arg; + goto do_int; + case FFI_TYPE_UINT16: + val = *(UINT16 *)arg; + goto do_int; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + val = *(SINT32 *)arg; + goto do_int; + case FFI_TYPE_UINT32: + val = *(UINT32 *)arg; + goto do_int; + case FFI_TYPE_POINTER: + val = *(uintptr_t *)arg; + do_int: + *(n_gpr < MAX_GPRARGS ? p_gpr + n_gpr++ : p_ov + n_ov++) = val; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + val = *(UINT64 *)arg; + goto do_int; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + p_gpr[n_gpr++] = ((UINT32 *) arg)[0], + p_gpr[n_gpr++] = ((UINT32 *) arg)[1]; + else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + break; + + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = *(UINT64 *) arg; + else + { +#ifdef __s390x__ + p_ov[n_ov++] = *(UINT64 *) arg; +#else + p_ov[n_ov++] = ((UINT32 *) arg)[0], + p_ov[n_ov++] = ((UINT32 *) arg)[1]; +#endif + } + break; + + case FFI_TYPE_FLOAT: + val = *(UINT32 *)arg; + if (n_fpr < MAX_FPRARGS) + p_fpr[n_fpr++] = (UINT64)val << 32; + else + p_ov[n_ov++] = val; + break; + + case FFI_TYPE_STRUCT: + /* Check how a structure type is passed. */ + type = ffi_check_struct_type (ty); + /* Some structures are passed via a type they contain. */ + if (type != FFI_TYPE_POINTER) + goto restart; + /* ... otherwise, passed by reference. fallthru. */ + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + /* 16-byte long double is passed via reference. */ +#endif + case FFI_TYPE_COMPLEX: + /* Complex types are passed via reference. */ + p_struct -= ROUND_SIZE (ty->size); + memcpy (p_struct, arg, ty->size); + val = (uintptr_t)p_struct; + goto do_int; + + default: + FFI_ASSERT (0); + break; + } + } + + ffi_call_SYSV (frame, ret_type & FFI360_RET_MASK, rvalue, fn, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_closure_helper_SYSV. */ +/* */ +/* Function - Call a FFI closure target function. */ +/* */ +/*====================================================================*/ + +void FFI_HIDDEN +ffi_closure_helper_SYSV (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + unsigned long *p_gpr, + unsigned long long *p_fpr, + unsigned long *p_ov) +{ + unsigned long long ret_buffer; + + void *rvalue = &ret_buffer; + void **avalue; + void **p_arg; + + int n_gpr = 0; + int n_fpr = 0; + int n_ov = 0; + + ffi_type **ptr; + int i; + + /* Allocate buffer for argument list pointers. */ + p_arg = avalue = alloca (cif->nargs * sizeof (void *)); + + /* If we returning a structure, pass the structure address + directly to the target function. Otherwise, have the target + function store the return value to the GPR save area. */ + if (cif->flags & FFI390_RET_IN_MEM) + rvalue = (void *) p_gpr[n_gpr++]; + + /* Now for the arguments. */ + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, p_arg++, ptr++) + { + int deref_struct_pointer = 0; + int type = (*ptr)->type; + +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + /* 16-byte long double is passed like a struct. */ + if (type == FFI_TYPE_LONGDOUBLE) + type = FFI_TYPE_STRUCT; +#endif + + /* Check how a structure type is passed. */ + if (type == FFI_TYPE_STRUCT || type == FFI_TYPE_COMPLEX) + { + if (type == FFI_TYPE_COMPLEX) + type = FFI_TYPE_POINTER; + else + type = ffi_check_struct_type (*ptr); + + /* If we pass the struct via pointer, remember to + retrieve the pointer later. */ + if (type == FFI_TYPE_POINTER) + deref_struct_pointer = 1; + } + + /* Pointers are passed like UINTs of the same size. */ + if (type == FFI_TYPE_POINTER) + { +#ifdef __s390x__ + type = FFI_TYPE_UINT64; +#else + type = FFI_TYPE_UINT32; +#endif + } + + /* Now handle all primitive int/float data types. */ + switch (type) + { + case FFI_TYPE_DOUBLE: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = &p_ov[n_ov], + n_ov += sizeof (double) / sizeof (long); + break; + + case FFI_TYPE_FLOAT: + if (n_fpr < MAX_FPRARGS) + *p_arg = &p_fpr[n_fpr++]; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr++]; + else + *p_arg = &p_ov[n_ov++]; +#else + if (n_gpr == MAX_GPRARGS-1) + n_gpr = MAX_GPRARGS; + if (n_gpr < MAX_GPRARGS) + *p_arg = &p_gpr[n_gpr], n_gpr += 2; + else + *p_arg = &p_ov[n_ov], n_ov += 2; +#endif + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 4; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 4; + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 2; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 2; + break; + + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + if (n_gpr < MAX_GPRARGS) + *p_arg = (char *)&p_gpr[n_gpr++] + sizeof (long) - 1; + else + *p_arg = (char *)&p_ov[n_ov++] + sizeof (long) - 1; + break; + + default: + FFI_ASSERT (0); + break; + } + + /* If this is a struct passed via pointer, we need to + actually retrieve that pointer. */ + if (deref_struct_pointer) + *p_arg = *(void **)*p_arg; + } + + + /* Call the target function. */ + (fun) (cif, rvalue, avalue, user_data); + + /* Convert the return value. */ + switch (cif->rtype->type) + { + /* Void is easy, and so is struct. */ + case FFI_TYPE_VOID: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: +#endif + break; + + /* Floating point values are returned in fpr 0. */ + case FFI_TYPE_FLOAT: + p_fpr[0] = (long long) *(unsigned int *) rvalue << 32; + break; + + case FFI_TYPE_DOUBLE: + p_fpr[0] = *(unsigned long long *) rvalue; + break; + + /* Integer values are returned in gpr 2 (and gpr 3 + for 64-bit values on 31-bit machines). */ + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: +#ifdef __s390x__ + p_gpr[0] = *(unsigned long *) rvalue; +#else + p_gpr[0] = ((unsigned long *) rvalue)[0], + p_gpr[1] = ((unsigned long *) rvalue)[1]; +#endif + break; + + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT8: + p_gpr[0] = *(unsigned long *) rvalue; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT8: + p_gpr[0] = *(signed long *) rvalue; + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/*======================== End of Routine ============================*/ + +/*====================================================================*/ +/* */ +/* Name - ffi_prep_closure_loc. */ +/* */ +/* Function - Prepare a FFI closure. */ +/* */ +/*====================================================================*/ + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + void *codeloc) +{ + static unsigned short const template[] = { + 0x0d10, /* basr %r1,0 */ +#ifndef __s390x__ + 0x9801, 0x1006, /* lm %r0,%r1,6(%r1) */ +#else + 0xeb01, 0x100e, 0x0004, /* lmg %r0,%r1,14(%r1) */ +#endif + 0x07f1 /* br %r1 */ + }; + + unsigned long *tramp = (unsigned long *)&closure->tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + memcpy (tramp, template, sizeof(template)); + tramp[2] = (unsigned long)codeloc; + tramp[3] = (unsigned long)&ffi_closure_SYSV; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +/*======================== End of Routine ============================*/ + +/* Build a Go language closure. */ + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_SYSV; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffitarget.h new file mode 100644 index 0000000..d8a4ee4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/ffitarget.h @@ -0,0 +1,70 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for S390. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#if defined (__s390x__) +#ifndef S390X +#define S390X +#endif +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#ifdef S390X +#define FFI_TRAMPOLINE_SIZE 32 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/internal.h new file mode 100644 index 0000000..b875578 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/internal.h @@ -0,0 +1,11 @@ +/* If these values change, sysv.S must be adapted! */ +#define FFI390_RET_DOUBLE 0 +#define FFI390_RET_FLOAT 1 +#define FFI390_RET_INT64 2 +#define FFI390_RET_INT32 3 +#define FFI390_RET_VOID 4 + +#define FFI360_RET_MASK 7 +#define FFI390_RET_IN_MEM 8 + +#define FFI390_RET_STRUCT (FFI390_RET_VOID | FFI390_RET_IN_MEM) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/sysv.S new file mode 100644 index 0000000..c4b5006 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/s390/sysv.S @@ -0,0 +1,325 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2000 Software AG + Copyright (c) 2008 Red Hat, Inc. + + S390 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + + .text + +#ifndef __s390x__ + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + st %r6,44(%r2) # Save registers + stm %r12,%r14,48(%r2) + lr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 44 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_def_cfa_register r13 + st %r2,0(%r15) # Set up back chain + sla %r3,3 # ret_type *= 8 + lr %r12,%r4 # Save ret_addr + lr %r1,%r5 # Save fun + lr %r0,%r6 # Install static chain + + # Set return address, so that there is only one indirect jump. +#ifdef HAVE_AS_S390_ZARCH + larl %r14,.Ltable + ar %r14,%r3 +#else + basr %r14,0 +0: la %r14,.Ltable-0b(%r14,%r3) +#endif + + lm %r2,%r6,8(%r13) # Load arguments + ld %f0,64(%r13) + ld %f2,72(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_FLOAT + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + st %r3,4(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_INT32 + st %r2,0(%r12) + nop + # fallthru + + .balign 8 +# FFI390_RET_VOID +.Ldone: + l %r14,56(%r13) + l %r12,48(%r13) + l %r6,44(%r13) + l %r13,52(%r13) + .cfi_restore 14 + .cfi_restore 13 + .cfi_restore 12 + .cfi_restore 6 + .cfi_def_cfa r15, 96 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Load closure -> user_data + l %r2,4(%r4) # ->cif + l %r3,8(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stm %r2,%r6,8(%r15) # Save arguments + lr %r4,%r0 # Closure + l %r2,16(%r4) # ->cif + l %r3,20(%r4) # ->fun + l %r4,24(%r4) # ->user_data +.Ldoclosure: + stm %r12,%r15,48(%r15) # Save registers + lr %r12,%r15 + .cfi_def_cfa_register r12 + .cfi_rel_offset r6, 24 + .cfi_rel_offset r12, 48 + .cfi_rel_offset r13, 52 + .cfi_rel_offset r14, 56 + .cfi_rel_offset r15, 60 +#ifndef HAVE_AS_S390_ZARCH + basr %r13,0 # Set up base register +.Lcbase: + l %r1,.Lchelper-.Lcbase(%r13) # Get helper function +#endif + ahi %r15,-96-8 # Set up stack frame + st %r12,0(%r15) # Set up back chain + + std %f0,64(%r12) # Save fp arguments + std %f2,72(%r12) + + la %r5,96(%r12) # Overflow + st %r5,96(%r15) + la %r6,64(%r12) # FPRs + la %r5,8(%r12) # GPRs +#ifdef HAVE_AS_S390_ZARCH + brasl %r14,ffi_closure_helper_SYSV +#else + bas %r14,0(%r1,%r13) # Call helper +#endif + + lr %r15,%r12 + .cfi_def_cfa_register r15 + lm %r12,%r14,48(%r12) # Restore saved registers + l %r6,24(%r15) + ld %f0,64(%r15) # Load return registers + lm %r2,%r3,8(%r15) + br %r14 + .cfi_endproc + +#ifndef HAVE_AS_S390_ZARCH + .align 4 +.Lchelper: + .long ffi_closure_helper_SYSV-.Lcbase +#endif + + .size ffi_closure_SYSV,.-ffi_closure_SYSV + +#else + + # r2: frame + # r3: ret_type + # r4: ret_addr + # r5: fun + # r6: closure + + # This assumes we are using gas. + .balign 8 + .globl ffi_call_SYSV + FFI_HIDDEN(ffi_call_SYSV) + .type ffi_call_SYSV,%function +ffi_call_SYSV: + .cfi_startproc + stg %r6,88(%r2) # Save registers + stmg %r12,%r14,96(%r2) + lgr %r13,%r2 # Install frame pointer + .cfi_rel_offset r6, 88 + .cfi_rel_offset r12, 96 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_def_cfa_register r13 + stg %r2,0(%r15) # Set up back chain + larl %r14,.Ltable # Set up return address + slag %r3,%r3,3 # ret_type *= 8 + lgr %r12,%r4 # Save ret_addr + lgr %r1,%r5 # Save fun + lgr %r0,%r6 # Install static chain + agr %r14,%r3 + lmg %r2,%r6,16(%r13) # Load arguments + ld %f0,128(%r13) + ld %f2,136(%r13) + ld %f4,144(%r13) + ld %f6,152(%r13) + br %r1 # ... and call function + + .balign 8 +.Ltable: +# FFI390_RET_DOUBLE + std %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_DOUBLE + ste %f0,0(%r12) + j .Ldone + + .balign 8 +# FFI390_RET_INT64 + stg %r2,0(%r12) + + .balign 8 +# FFI390_RET_INT32 + # Never used, as we always store type ffi_arg. + # But the stg above is 6 bytes and we cannot + # jump around this case, so fall through. + nop + nop + + .balign 8 +# FFI390_RET_VOID +.Ldone: + lg %r14,112(%r13) + lg %r12,96(%r13) + lg %r6,88(%r13) + lg %r13,104(%r13) + .cfi_restore r14 + .cfi_restore r13 + .cfi_restore r12 + .cfi_restore r6 + .cfi_def_cfa r15, 160 + br %r14 + .cfi_endproc + .size ffi_call_SYSV,.-ffi_call_SYSV + + + .balign 8 + .globl ffi_go_closure_SYSV + FFI_HIDDEN(ffi_go_closure_SYSV) + .type ffi_go_closure_SYSV,%function +ffi_go_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure -> user_data + lg %r2,8(%r4) # ->cif + lg %r3,16(%r4) # ->fun + j .Ldoclosure + .cfi_endproc + .size ffi_go_closure_SYSV,.-ffi_go_closure_SYSV + + + .balign 8 + .globl ffi_closure_SYSV + FFI_HIDDEN(ffi_closure_SYSV) + .type ffi_closure_SYSV,%function +ffi_closure_SYSV: + .cfi_startproc + stmg %r2,%r6,16(%r15) # Save arguments + lgr %r4,%r0 # Load closure + lg %r2,32(%r4) # ->cif + lg %r3,40(%r4) # ->fun + lg %r4,48(%r4) # ->user_data +.Ldoclosure: + stmg %r13,%r15,104(%r15) # Save registers + lgr %r13,%r15 + .cfi_def_cfa_register r13 + .cfi_rel_offset r6, 48 + .cfi_rel_offset r13, 104 + .cfi_rel_offset r14, 112 + .cfi_rel_offset r15, 120 + aghi %r15,-160-16 # Set up stack frame + stg %r13,0(%r15) # Set up back chain + + std %f0,128(%r13) # Save fp arguments + std %f2,136(%r13) + std %f4,144(%r13) + std %f6,152(%r13) + la %r5,160(%r13) # Overflow + stg %r5,160(%r15) + la %r6,128(%r13) # FPRs + la %r5,16(%r13) # GPRs + brasl %r14,ffi_closure_helper_SYSV # Call helper + + lgr %r15,%r13 + .cfi_def_cfa_register r15 + lmg %r13,%r14,104(%r13) # Restore saved registers + lg %r6,48(%r15) + ld %f0,128(%r15) # Load return registers + lg %r2,16(%r15) + br %r14 + .cfi_endproc + .size ffi_closure_SYSV,.-ffi_closure_SYSV +#endif /* !s390x */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffi.c new file mode 100644 index 0000000..9ec86bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffi.c @@ -0,0 +1,717 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2002-2008, 2012 Kaz Kojima + Copyright (c) 2008 Red Hat, Inc. + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 4 +#if defined(__SH4__) +#define NFREGARG 8 +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +/* If the structure has essentially an unique element, return its type. */ +static int +simple_type (ffi_type *arg) +{ + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + else if (arg->elements[1]) + return FFI_TYPE_STRUCT; + + return simple_type (arg->elements[0]); +} + +static int +return_type (ffi_type *arg) +{ + unsigned short type; + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + type = simple_type (arg->elements[0]); + if (! arg->elements[1]) + { + switch (type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + return FFI_TYPE_INT; + + default: + return type; + } + } + + /* gcc uses r0/r1 pair for some kind of structures. */ + if (arg->size <= 2 * sizeof (int)) + { + int i = 0; + ffi_type *e; + + while ((e = arg->elements[i++])) + { + type = simple_type (e); + switch (type) + { + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + return FFI_TYPE_UINT64; + + default: + break; + } + } + } + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register int tmp; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + int greg, ireg; +#if defined(__SH4__) + int freg = 0; +#endif + + tmp = 0; + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += 4; + ireg = STRUCT_VALUE_ADDRESS_WITH_ARG ? 1 : 0; + } + else + ireg = 0; + + /* Set arguments for registers. */ + greg = ireg; + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + /* Set arguments on stack. */ + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; + + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; + + default: + FFI_ASSERT(0); + } + argp += z; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + argp += z; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + memcpy (argp, *p_argv, z); + argp += z; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg = NGREGARG; + continue; + } +#endif + memcpy (argp, *p_argv, z); + argp += n * sizeof (int); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; +#if defined(__SH4__) + int freg = 0; +#endif + + cif->flags = 0; + + greg = ((return_type (cif->rtype) == FFI_TYPE_STRUCT) && + STRUCT_VALUE_ADDRESS_WITH_ARG) ? 1 : 0; + +#if defined(__SH4__) + for (i = j = 0; i < cif->nargs && j < 12; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + if (freg >= NFREGARG) + continue; + freg++; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + case FFI_TYPE_DOUBLE: + if ((freg + 1) >= NFREGARG) + continue; + freg = (freg + 1) & ~1; + freg += 2; + cif->flags += ((cif->arg_types)[i]->type) << (2 * j); + j++; + break; + + default: + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 >= NGREGARG) + continue; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + break; + } + } +#else + for (i = j = 0; i < cif->nargs && j < 4; i++) + { + size = (cif->arg_types)[i]->size; + n = (size + sizeof (int) - 1) / sizeof (int); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + n = NGREGARG - greg; + greg += n; + for (m = 0; m < n; m++) + cif->flags += FFI_TYPE_INT << (2 * j++); + } +#endif + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags += (unsigned) (return_type (cif->rtype)) << 24; + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags += (unsigned) cif->rtype->type << 24; + break; + + default: + cif->flags += FFI_TYPE_INT << 24; + break; + } + + return FFI_OK; +} + +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *, + unsigned, unsigned, unsigned *, void (*fn)(void)); + +void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, + fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +#if defined(__SH4__) +extern void __ic_invalidate (void *line); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + unsigned int insn; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Set T bit if the function returns a struct pointed with R2. */ + insn = (return_type (cif->rtype) == FFI_TYPE_STRUCT + ? 0x0018 /* sett */ + : 0x0008 /* clrt */); + +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0xd301d102; + tramp[1] = 0x0000412b | (insn << 16); +#else + tramp[0] = 0xd102d301; + tramp[1] = 0x412b0000 | insn; +#endif + *(void **) &tramp[2] = (void *)codeloc; /* ctx */ + *(void **) &tramp[3] = (void *)ffi_closure_SYSV; /* funaddr */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + +#if defined(__SH4__) + /* Flush the icache. */ + __ic_invalidate(codeloc); +#endif + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +#ifdef __LITTLE_ENDIAN__ +#define OFS_INT8 0 +#define OFS_INT16 0 +#else +#define OFS_INT8 3 +#define OFS_INT16 2 +#endif + +int +ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue, + unsigned long *pgr, unsigned long *pfr, + unsigned long *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int ireg, greg = 0; +#if defined(__SH4__) + int freg = 0; +#endif + ffi_cif *cif; + + cif = closure->cif; + avalue = alloca(cif->nargs * sizeof(void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (cif->rtype->type == FFI_TYPE_STRUCT && STRUCT_VALUE_ADDRESS_WITH_ARG) + { + rvalue = (void *) *pgr++; + ireg = 1; + } + else + ireg = 0; + + cif = closure->cif; + greg = ireg; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ >= NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pgr) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pgr) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pgr; + break; + + default: + FFI_ASSERT(0); + } + pgr++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ >= NFREGARG) + continue; + avalue[i] = pfr; + pfr++; + } + else +#endif + { + if (greg++ >= NGREGARG) + continue; + avalue[i] = pgr; + pgr++; + } + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + continue; + if (freg & 1) + pfr++; + freg = (freg + 1) & ~1; + freg += 2; + avalue[i] = pfr; + pfr += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); +#if defined(__SH4__) + if (greg + n - 1 >= NGREGARG) + continue; +#else + if (greg >= NGREGARG) + continue; +#endif + greg += n; + avalue[i] = pgr; + pgr += n; + } + } + + greg = ireg; +#if defined(__SH4__) + freg = 0; +#endif + + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof(int)) + { + if (greg++ < NGREGARG) + continue; + + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + avalue[i] = (((char *)pst) + OFS_INT8); + break; + + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + avalue[i] = (((char *)pst) + OFS_INT16); + break; + + case FFI_TYPE_STRUCT: + avalue[i] = pst; + break; + + default: + FFI_ASSERT(0); + } + pst++; + } + else if (z == sizeof(int)) + { +#if defined(__SH4__) + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg++ < NFREGARG) + continue; + } + else +#endif + { + if (greg++ < NGREGARG) + continue; + } + avalue[i] = pst; + pst++; + } +#if defined(__SH4__) + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 < NFREGARG) + { + freg = (freg + 1) & ~1; + freg += 2; + continue; + } + avalue[i] = pst; + pst += 2; + } +#endif + else + { + int n = (z + sizeof (int) - 1) / sizeof (int); + if (greg + n - 1 < NGREGARG) + { + greg += n; + continue; + } +#if (! defined(__SH4__)) + else if (greg < NGREGARG) + { + greg += n; + pst += greg - NGREGARG; + continue; + } +#endif + avalue[i] = pst; + pst += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffitarget.h new file mode 100644 index 0000000..a36bf42 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/ffitarget.h @@ -0,0 +1,54 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/sysv.S new file mode 100644 index 0000000..5be7516 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh/sysv.S @@ -0,0 +1,850 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2002, 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#if defined(__HITACHI__) +#define STRUCT_VALUE_ADDRESS_WITH_ARG 1 +#else +#define STRUCT_VALUE_ADDRESS_WITH_ARG 0 +#endif + +.text + + # r4: ffi_prep_args + # r5: &ecif + # r6: bytes + # r7: flags + # sp+0: rvalue + # sp+4: fn + + # This assumes we are using gas. +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + mov.l r8,@-r15 +.LCFI0: + mov.l r9,@-r15 +.LCFI1: + mov.l r10,@-r15 +.LCFI2: + mov.l r12,@-r15 +.LCFI3: + mov.l r14,@-r15 +.LCFI4: + sts.l pr,@-r15 +.LCFI5: + mov r15,r14 +.LCFI6: +#if defined(__SH4__) + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r1 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + mov #4,r3 + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r1,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_f + + mov r3,r0 + and #1,r0 + tst r0,r0 + bt 1f + add #1,r3 +1: + mov #12,r0 + cmp/hs r0,r3 + bt/s 3f + shlr2 r1 + bsr L_pop_d + nop +3: + add #2,r3 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r3,r0 + add r0,r0 + add r3,r0 + add #-12,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_f: + cmp/eq #FFI_TYPE_FLOAT,r0 + bf L_pass_i + + mov #12,r0 + cmp/hs r0,r3 + bt/s 2f + shlr2 r1 + bsr L_pop_f + nop +2: + add #1,r3 + bra L_pass + add #-4,r8 + +L_pop_f: + mov r3,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop +#ifdef __LITTLE_ENDIAN__ + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr11 + rts + fmov.s @r15+,fr10 +#else + rts + fmov.s @r15+,fr4 + rts + fmov.s @r15+,fr5 + rts + fmov.s @r15+,fr6 + rts + fmov.s @r15+,fr7 + rts + fmov.s @r15+,fr8 + rts + fmov.s @r15+,fr9 + rts + fmov.s @r15+,fr10 + rts + fmov.s @r15+,fr11 +#endif + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r1 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr1,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr0,@r1 +#else + fmov.s fr0,@r1 + add #4,r1 + bra L_epilogue + fmov.s fr1,@r1 +#endif + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_f + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_f: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bf L_ret_i + + mov.l @(24,r14),r1 + bra L_epilogue + fmov.s fr0,@r1 + +L_ret_i: + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue + + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#else + mov r6,r8 + mov r7,r9 + + sub r6,r15 + add #-16,r15 + mov #~7,r0 + and r0,r15 + + mov r4,r0 + jsr @r0 + mov r15,r4 + + mov r9,r3 + shlr8 r9 + shlr8 r9 + shlr8 r9 + + mov #FFI_TYPE_STRUCT,r2 + cmp/eq r2,r9 + bf 1f +#if STRUCT_VALUE_ADDRESS_WITH_ARG + mov.l @r15+,r4 + bra 2f + mov #5,r2 +#else + mov.l @r15+,r10 +#endif +1: + mov #4,r2 +2: + +L_pass: + cmp/pl r8 + bf L_call_it + + mov r3,r0 + and #3,r0 + +L_pass_d: + cmp/eq #FFI_TYPE_DOUBLE,r0 + bf L_pass_i + + mov r15,r0 + and #7,r0 + tst r0,r0 + bt 1f + add #4,r15 +1: + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_d + nop +2: + add #2,r2 + bra L_pass + add #-8,r8 + +L_pop_d: + mov r2,r0 + add r0,r0 + add r2,r0 + add #-12,r0 + add r0,r0 + braf r0 + nop + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + rts + mov.l @r15+,r7 + +L_pass_i: + cmp/eq #FFI_TYPE_INT,r0 + bf L_call_it + + mov #8,r0 + cmp/hs r0,r2 + bt/s 2f + shlr2 r3 + bsr L_pop_i + nop +2: + add #1,r2 + bra L_pass + add #-4,r8 + +L_pop_i: + mov r2,r0 + shll2 r0 + add #-16,r0 + braf r0 + nop + rts + mov.l @r15+,r4 + rts + mov.l @r15+,r5 + rts + mov.l @r15+,r6 + rts + mov.l @r15+,r7 + +L_call_it: + # call function +#if (! STRUCT_VALUE_ADDRESS_WITH_ARG) + mov r10, r2 +#endif + mov.l @(28,r14),r1 + jsr @r1 + nop + +L_ret_d: + mov #FFI_TYPE_DOUBLE,r2 + cmp/eq r2,r9 + bf L_ret_ll + + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_ll: + mov #FFI_TYPE_SINT64,r2 + cmp/eq r2,r9 + bt/s 1f + mov #FFI_TYPE_UINT64,r2 + cmp/eq r2,r9 + bf L_ret_i + +1: + mov.l @(24,r14),r2 + mov.l r0,@r2 + bra L_epilogue + mov.l r1,@(4,r2) + +L_ret_i: + mov #FFI_TYPE_FLOAT,r2 + cmp/eq r2,r9 + bt 1f + mov #FFI_TYPE_INT,r2 + cmp/eq r2,r9 + bf L_epilogue +1: + mov.l @(24,r14),r1 + bra L_epilogue + mov.l r0,@r1 + +L_epilogue: + # Remove the space we pushed for the args + mov r14,r15 + + lds.l @r15+,pr + mov.l @r15+,r14 + mov.l @r15+,r12 + mov.l @r15+,r10 + mov.l @r15+,r9 + rts + mov.l @r15+,r8 +#endif +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + +.globl ffi_closure_helper_SYSV + +ENTRY(ffi_closure_SYSV) +.LFB2: + mov.l r7,@-r15 +.LCFI7: + mov.l r6,@-r15 +.LCFI8: + mov.l r5,@-r15 +.LCFI9: + mov.l r4,@-r15 +.LCFIA: + mov.l r14,@-r15 +.LCFIB: + sts.l pr,@-r15 + + /* Stack layout: + xx bytes (on stack parameters) + 16 bytes (register parameters) + 4 bytes (saved frame pointer) + 4 bytes (saved return address) + 32 bytes (floating register parameters, SH-4 only) + 8 bytes (result) + 4 bytes (pad) + 4 bytes (5th arg) + <- new stack pointer + */ +.LCFIC: +#if defined(__SH4__) + add #-48,r15 +#else + add #-16,r15 +#endif +.LCFID: + mov r15,r14 +.LCFIE: + +#if defined(__SH4__) + mov r14,r1 + add #48,r1 +#ifdef __LITTLE_ENDIAN__ + fmov.s fr10,@-r1 + fmov.s fr11,@-r1 + fmov.s fr8,@-r1 + fmov.s fr9,@-r1 + fmov.s fr6,@-r1 + fmov.s fr7,@-r1 + fmov.s fr4,@-r1 + fmov.s fr5,@-r1 +#else + fmov.s fr11,@-r1 + fmov.s fr10,@-r1 + fmov.s fr9,@-r1 + fmov.s fr8,@-r1 + fmov.s fr7,@-r1 + fmov.s fr6,@-r1 + fmov.s fr5,@-r1 + fmov.s fr4,@-r1 +#endif + mov r1,r7 + mov r14,r6 + add #56,r6 +#else + mov r14,r6 + add #24,r6 +#endif + + bt/s 10f + mov r2, r5 + mov r14,r1 + add #8,r1 + mov r1,r5 +10: + + mov r14,r1 +#if defined(__SH4__) + add #72,r1 +#else + add #40,r1 +#endif + mov.l r1,@r14 + +#ifdef PIC + mov.l L_got,r1 + mova L_got,r0 + add r0,r1 + mov.l L_helper,r0 + add r1,r0 +#else + mov.l L_helper,r0 +#endif + jsr @r0 + mov r3,r4 + + shll r0 + mov r0,r1 + mova L_table,r0 + add r1,r0 + mov.w @r0,r0 + mov r14,r2 + braf r0 + add #8,r2 +0: + .align 2 +#ifdef PIC +L_got: + .long _GLOBAL_OFFSET_TABLE_ +L_helper: + .long ffi_closure_helper_SYSV@GOTOFF +#else +L_helper: + .long ffi_closure_helper_SYSV +#endif +L_table: + .short L_case_v - 0b /* FFI_TYPE_VOID */ + .short L_case_i - 0b /* FFI_TYPE_INT */ +#if defined(__SH4__) + .short L_case_f - 0b /* FFI_TYPE_FLOAT */ + .short L_case_d - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_d - 0b /* FFI_TYPE_LONGDOUBLE */ +#else + .short L_case_i - 0b /* FFI_TYPE_FLOAT */ + .short L_case_ll - 0b /* FFI_TYPE_DOUBLE */ + .short L_case_ll - 0b /* FFI_TYPE_LONGDOUBLE */ +#endif + .short L_case_uq - 0b /* FFI_TYPE_UINT8 */ + .short L_case_q - 0b /* FFI_TYPE_SINT8 */ + .short L_case_uh - 0b /* FFI_TYPE_UINT16 */ + .short L_case_h - 0b /* FFI_TYPE_SINT16 */ + .short L_case_i - 0b /* FFI_TYPE_UINT32 */ + .short L_case_i - 0b /* FFI_TYPE_SINT32 */ + .short L_case_ll - 0b /* FFI_TYPE_UINT64 */ + .short L_case_ll - 0b /* FFI_TYPE_SINT64 */ + .short L_case_v - 0b /* FFI_TYPE_STRUCT */ + .short L_case_i - 0b /* FFI_TYPE_POINTER */ + +#if defined(__SH4__) +L_case_d: +#ifdef __LITTLE_ENDIAN__ + fmov.s @r2+,fr1 + bra L_case_v + fmov.s @r2,fr0 +#else + fmov.s @r2+,fr0 + bra L_case_v + fmov.s @r2,fr1 +#endif + +L_case_f: + bra L_case_v + fmov.s @r2,fr0 +#endif + +L_case_ll: + mov.l @r2+,r0 + bra L_case_v + mov.l @r2,r1 + +L_case_i: + bra L_case_v + mov.l @r2,r0 + +L_case_q: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + bra L_case_v + mov.b @r2,r0 + +L_case_uq: +#ifdef __LITTLE_ENDIAN__ +#else + add #3,r2 +#endif + mov.b @r2,r0 + bra L_case_v + extu.b r0,r0 + +L_case_h: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + bra L_case_v + mov.w @r2,r0 + +L_case_uh: +#ifdef __LITTLE_ENDIAN__ +#else + add #2,r2 +#endif + mov.w @r2,r0 + extu.w r0,r0 + /* fall through */ + +L_case_v: +#if defined(__SH4__) + add #48,r15 +#else + add #16,r15 +#endif + lds.l @r15+,pr + mov.l @r15+,r14 + rts + add #16,r15 +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */ + .byte 0x7c /* sleb128 -4; CIE Data Alignment Factor */ + .byte 0x11 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .byte 0xf /* uleb128 0xf */ + .byte 0x0 /* uleb128 0x0 */ + .align 2 +.LECIE1: +.LSFDE1: + .4byte .LEFDE1-.LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte .LASFDE1-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte .LFE1-.LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI0-.LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI1-.LCFI0 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI2-.LCFI1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI3-.LCFI2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI4-.LCFI3 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI5-.LCFI4 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x8a /* DW_CFA_offset, column 0xa */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x89 /* DW_CFA_offset, column 0x9 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x88 /* DW_CFA_offset, column 0x8 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI6-.LCFI5 + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte .LEFDE3-.LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte .LASFDE3-__FRAME_BEGIN__ /* FDE CIE offset */ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte .LFE2-.LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI7-.LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI8-.LCFI7 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x8 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFI9-.LCFI8 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0xc /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIA-.LCFI9 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x10 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIB-.LCFIA + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x14 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIC-.LCFIB + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte 0x18 /* uleb128 0x4 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFID-.LCFIC + .byte 0xe /* DW_CFA_def_cfa_offset */ +#if defined(__SH4__) + .byte 24+48 /* uleb128 24+48 */ +#else + .byte 24+16 /* uleb128 24+16 */ +#endif + .byte 0x91 /* DW_CFA_offset, column 0x11 */ + .byte 0x6 /* uleb128 0x6 */ + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .byte 0x5 /* uleb128 0x5 */ + .byte 0x84 /* DW_CFA_offset, column 0x4 */ + .byte 0x4 /* uleb128 0x4 */ + .byte 0x85 /* DW_CFA_offset, column 0x5 */ + .byte 0x3 /* uleb128 0x3 */ + .byte 0x86 /* DW_CFA_offset, column 0x6 */ + .byte 0x2 /* uleb128 0x2 */ + .byte 0x87 /* DW_CFA_offset, column 0x7 */ + .byte 0x1 /* uleb128 0x1 */ + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte .LCFIE-.LCFID + .byte 0xd /* DW_CFA_def_cfa_register */ + .byte 0xe /* uleb128 0xe */ + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffi.c new file mode 100644 index 0000000..123b87a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffi.c @@ -0,0 +1,469 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima + Copyright (c) 2008 Anthony Green + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include + +#define NGREGARG 8 +#define NFREGARG 12 + +static int +return_type (ffi_type *arg) +{ + + if (arg->type != FFI_TYPE_STRUCT) + return arg->type; + + /* gcc uses r2 if the result can be packed in on register. */ + if (arg->size <= sizeof (UINT8)) + return FFI_TYPE_UINT8; + else if (arg->size <= sizeof (UINT16)) + return FFI_TYPE_UINT16; + else if (arg->size <= sizeof (UINT32)) + return FFI_TYPE_UINT32; + else if (arg->size <= sizeof (UINT64)) + return FFI_TYPE_UINT64; + + return FFI_TYPE_STRUCT; +} + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +void ffi_prep_args(char *stack, extended_cif *ecif) +{ + register unsigned int i; + register unsigned int avn; + register void **p_argv; + register char *argp; + register ffi_type **p_arg; + + argp = stack; + + if (return_type (ecif->cif->rtype) == FFI_TYPE_STRUCT) + { + *(void **) argp = ecif->rvalue; + argp += sizeof (UINT64); + } + + avn = ecif->cif->nargs; + p_argv = ecif->avalue; + + for (i = 0, p_arg = ecif->cif->arg_types; i < avn; i++, p_arg++, p_argv++) + { + size_t z; + int align; + + z = (*p_arg)->size; + align = (*p_arg)->alignment; + if (z < sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(SINT64 *) argp = (SINT64) *(SINT8 *)(*p_argv); + break; + + case FFI_TYPE_UINT8: + *(UINT64 *) argp = (UINT64) *(UINT8 *)(*p_argv); + break; + + case FFI_TYPE_SINT16: + *(SINT64 *) argp = (SINT64) *(SINT16 *)(*p_argv); + break; + + case FFI_TYPE_UINT16: + *(UINT64 *) argp = (UINT64) *(UINT16 *)(*p_argv); + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT(0); + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT32) && align == sizeof (UINT32)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *(SINT64 *) argp = (SINT64) *(SINT32 *) (*p_argv); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_POINTER: + case FFI_TYPE_UINT32: + case FFI_TYPE_STRUCT: + *(UINT64 *) argp = (UINT64) *(UINT32 *) (*p_argv); + break; + + default: + FFI_ASSERT(0); + break; + } + argp += sizeof (UINT64); + } + else if (z == sizeof (UINT64) + && align == sizeof (UINT64) + && ((int) *p_argv & (sizeof (UINT64) - 1)) == 0) + { + *(UINT64 *) argp = *(UINT64 *) (*p_argv); + argp += sizeof (UINT64); + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + memcpy (argp, *p_argv, z); + argp += n * sizeof (UINT64); + } + } + + return; +} + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + int i, j; + int size, type; + int n, m; + int greg; + int freg; + int fpair = -1; + + greg = (return_type (cif->rtype) == FFI_TYPE_STRUCT ? 1 : 0); + freg = 0; + cif->flags2 = 0; + + for (i = j = 0; i < cif->nargs; i++) + { + type = (cif->arg_types)[i]->type; + switch (type) + { + case FFI_TYPE_FLOAT: + greg++; + cif->bytes += sizeof (UINT64) - sizeof (float); + if (freg >= NFREGARG - 1) + continue; + if (fpair < 0) + { + fpair = freg; + freg += 2; + } + else + fpair = -1; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + break; + + case FFI_TYPE_DOUBLE: + if (greg++ >= NGREGARG && (freg + 1) >= NFREGARG) + continue; + if ((freg + 1) < NFREGARG) + { + freg += 2; + cif->flags2 += ((cif->arg_types)[i]->type) << (2 * j++); + } + else + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + + default: + size = (cif->arg_types)[i]->size; + if (size < sizeof (UINT64)) + cif->bytes += sizeof (UINT64) - size; + n = (size + sizeof (UINT64) - 1) / sizeof (UINT64); + if (greg >= NGREGARG) + continue; + else if (greg + n - 1 >= NGREGARG) + greg = NGREGARG; + else + greg += n; + for (m = 0; m < n; m++) + cif->flags2 += FFI_TYPE_INT << (2 * j++); + break; + } + } + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_STRUCT: + cif->flags = return_type (cif->rtype); + break; + + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + cif->flags = cif->rtype->type; + break; + + default: + cif->flags = FFI_TYPE_INT; + break; + } + + return FFI_OK; +} + +/*@-declundef@*/ +/*@-exportheader@*/ +extern void ffi_call_SYSV(void (*)(char *, extended_cif *), + /*@out@*/ extended_cif *, + unsigned, unsigned, long long, + /*@out@*/ unsigned *, + void (*fn)(void)); +/*@=declundef@*/ +/*@=exportheader@*/ + +void ffi_call(/*@dependent@*/ ffi_cif *cif, + void (*fn)(void), + /*@out@*/ void *rvalue, + /*@dependent@*/ void **avalue) +{ + extended_cif ecif; + UINT64 trvalue; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ + + if (cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + ecif.rvalue = &trvalue; + else if ((rvalue == NULL) && + (cif->rtype->type == FFI_TYPE_STRUCT)) + { + ecif.rvalue = alloca(cif->rtype->size); + } + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_SYSV: + ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, cif->flags2, + ecif.rvalue, fn); + break; + default: + FFI_ASSERT(0); + break; + } + + if (rvalue + && cif->rtype->type == FFI_TYPE_STRUCT + && return_type (cif->rtype) != FFI_TYPE_STRUCT) + memcpy (rvalue, &trvalue, cif->rtype->size); +} + +extern void ffi_closure_SYSV (void); +extern void __ic_invalidate (void *line); + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp; + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + tramp = (unsigned int *) &closure->tramp[0]; + /* Since ffi_closure is an aligned object, the ffi trampoline is + called as an SHcompact code. Sigh. + SHcompact part: + mova @(1,pc),r0; add #1,r0; jmp @r0; nop; + SHmedia part: + movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0 + movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */ +#ifdef __LITTLE_ENDIAN__ + tramp[0] = 0x7001c701; + tramp[1] = 0x0009402b; +#else + tramp[0] = 0xc7017001; + tramp[1] = 0x402b0009; +#endif + tramp[2] = 0xcc000010 | (((UINT32) ffi_closure_SYSV) >> 16) << 10; + tramp[3] = 0xc8000010 | (((UINT32) ffi_closure_SYSV) & 0xffff) << 10; + tramp[4] = 0x6bf10600; + tramp[5] = 0xcc000010 | (((UINT32) codeloc) >> 16) << 10; + tramp[6] = 0xc8000010 | (((UINT32) codeloc) & 0xffff) << 10; + tramp[7] = 0x4401fff0; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + /* Flush the icache. */ + asm volatile ("ocbwb %0,0; synco; icbi %1,0; synci" : : "r" (tramp), + "r"(codeloc)); + + return FFI_OK; +} + +/* Basically the trampoline invokes ffi_closure_SYSV, and on + * entry, r3 holds the address of the closure. + * After storing the registers that could possibly contain + * parameters to be passed into the stack frame and setting + * up space for a return value, ffi_closure_SYSV invokes the + * following helper function to do most of the work. + */ + +int +ffi_closure_helper_SYSV (ffi_closure *closure, UINT64 *rvalue, + UINT64 *pgr, UINT64 *pfr, UINT64 *pst) +{ + void **avalue; + ffi_type **p_arg; + int i, avn; + int greg, freg; + ffi_cif *cif; + int fpair = -1; + + cif = closure->cif; + avalue = alloca (cif->nargs * sizeof (void *)); + + /* Copy the caller's structure return value address so that the closure + returns the data directly to the caller. */ + if (return_type (cif->rtype) == FFI_TYPE_STRUCT) + { + rvalue = (UINT64 *) *pgr; + greg = 1; + } + else + greg = 0; + + freg = 0; + cif = closure->cif; + avn = cif->nargs; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++) + { + size_t z; + void *p; + + z = (*p_arg)->size; + if (z < sizeof (UINT32)) + { + p = pgr + greg++; + + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + case FFI_TYPE_STRUCT: +#ifdef __LITTLE_ENDIAN__ + avalue[i] = p; +#else + avalue[i] = ((char *) p) + sizeof (UINT32) - z; +#endif + break; + + default: + FFI_ASSERT(0); + } + } + else if (z == sizeof (UINT32)) + { + if ((*p_arg)->type == FFI_TYPE_FLOAT) + { + if (freg < NFREGARG - 1) + { + if (fpair >= 0) + { + avalue[i] = (UINT32 *) pfr + fpair; + fpair = -1; + } + else + { +#ifdef __LITTLE_ENDIAN__ + fpair = freg; + avalue[i] = (UINT32 *) pfr + (1 ^ freg); +#else + fpair = 1 ^ freg; + avalue[i] = (UINT32 *) pfr + freg; +#endif + freg += 2; + } + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + } + else +#ifdef __LITTLE_ENDIAN__ + avalue[i] = pgr + greg; +#else + avalue[i] = (UINT32 *) (pgr + greg) + 1; +#endif + greg++; + } + else if ((*p_arg)->type == FFI_TYPE_DOUBLE) + { + if (freg + 1 >= NFREGARG) + avalue[i] = pgr + greg; + else + { + avalue[i] = pfr + (freg >> 1); + freg += 2; + } + greg++; + } + else + { + int n = (z + sizeof (UINT64) - 1) / sizeof (UINT64); + + avalue[i] = pgr + greg; + greg += n; + } + } + + (closure->fun) (cif, rvalue, avalue, closure->user_data); + + /* Tell ffi_closure_SYSV how to perform return type promotions. */ + return return_type (cif->rtype); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffitarget.h new file mode 100644 index 0000000..08a6fe9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/ffitarget.h @@ -0,0 +1,58 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SuperH - SHmedia. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; + +#define FFI_EXTRA_CIF_FIELDS long long flags2 +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 32 +#define FFI_NATIVE_RAW_API 0 + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/sysv.S new file mode 100644 index 0000000..c4587d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sh64/sysv.S @@ -0,0 +1,539 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2003, 2004, 2006, 2008 Kaz Kojima + + SuperH SHmedia Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#ifdef HAVE_MACHINE_ASM_H +#include +#else +/* XXX these lose for some platforms, I'm sure. */ +#define CNAME(x) x +#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x): +#endif + +#ifdef __LITTLE_ENDIAN__ +#define OFS_FLT 0 +#else +#define OFS_FLT 4 +#endif + + .section .text..SHmedia32,"ax" + + # r2: ffi_prep_args + # r3: &ecif + # r4: bytes + # r5: flags + # r6: flags2 + # r7: rvalue + # r8: fn + + # This assumes we are using gas. + .align 5 +ENTRY(ffi_call_SYSV) + # Save registers +.LFB1: + addi.l r15, -48, r15 +.LCFI0: + st.q r15, 40, r32 + st.q r15, 32, r31 + st.q r15, 24, r30 + st.q r15, 16, r29 + st.q r15, 8, r28 + st.l r15, 4, r18 + st.l r15, 0, r14 +.LCFI1: + add.l r15, r63, r14 +.LCFI2: +# add r4, r63, r28 + add r5, r63, r29 + add r6, r63, r30 + add r7, r63, r31 + add r8, r63, r32 + + addi r4, (64 + 7), r4 + andi r4, ~7, r4 + sub.l r15, r4, r15 + + ptabs/l r2, tr0 + add r15, r63, r2 + blink tr0, r18 + + addi r15, 64, r22 + movi 0, r0 + movi 0, r1 + movi -1, r23 + + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + ld.l r15, 0, r19 + addi r15, 8, r15 + addi r0, 1, r0 +1: + +.L_pass: + andi r30, 3, r20 + shlri r30, 2, r30 + + pt/l .L_call_it, tr0 + pt/l .L_pass_i, tr1 + pt/l .L_pass_f, tr2 + + beqi/l r20, FFI_TYPE_VOID, tr0 + beqi/l r20, FFI_TYPE_INT, tr1 + beqi/l r20, FFI_TYPE_FLOAT, tr2 + +.L_pass_d: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_d, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r1, 2, r1 + blink tr0, r63 + +.L_pop_d: + pt/l .L_pop_d_tbl, tr1 + gettr tr1, r20 + shlli r1, 2, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_d_tbl: + fld.d r15, 0, dr0 + blink tr0, r63 + fld.d r15, 0, dr2 + blink tr0, r63 + fld.d r15, 0, dr4 + blink tr0, r63 + fld.d r15, 0, dr6 + blink tr0, r63 + fld.d r15, 0, dr8 + blink tr0, r63 + fld.d r15, 0, dr10 + blink tr0, r63 + +.L_pass_f: + addi r0, 1, r0 + pt/l 3f, tr0 + movi 12, r20 + bge/l r1, r20, tr0 + + pt/l .L_pop_f, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + blink tr0, r63 + +.L_pop_f: + pt/l .L_pop_f_tbl, tr1 + pt/l 5f, tr2 + gettr tr1, r20 + bge/l r23, r63, tr2 + add r1, r63, r23 + shlli r1, 3, r21 + addi r1, 2, r1 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 +5: + addi r23, 1, r21 + movi -1, r23 + shlli r21, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_f_tbl: + fld.s r15, OFS_FLT, fr0 + blink tr0, r63 + fld.s r15, OFS_FLT, fr1 + blink tr0, r63 + fld.s r15, OFS_FLT, fr2 + blink tr0, r63 + fld.s r15, OFS_FLT, fr3 + blink tr0, r63 + fld.s r15, OFS_FLT, fr4 + blink tr0, r63 + fld.s r15, OFS_FLT, fr5 + blink tr0, r63 + fld.s r15, OFS_FLT, fr6 + blink tr0, r63 + fld.s r15, OFS_FLT, fr7 + blink tr0, r63 + fld.s r15, OFS_FLT, fr8 + blink tr0, r63 + fld.s r15, OFS_FLT, fr9 + blink tr0, r63 + fld.s r15, OFS_FLT, fr10 + blink tr0, r63 + fld.s r15, OFS_FLT, fr11 + blink tr0, r63 + +.L_pass_i: + pt/l 3f, tr0 + movi 8, r20 + bge/l r0, r20, tr0 + + pt/l .L_pop_i, tr1 + pt/l 2f, tr0 + blink tr1, r63 +2: + addi.l r15, 8, r15 +3: + pt/l .L_pass, tr0 + addi r0, 1, r0 + blink tr0, r63 + +.L_pop_i: + pt/l .L_pop_i_tbl, tr1 + gettr tr1, r20 + shlli r0, 3, r21 + add r20, r21, r20 + ptabs/l r20, tr1 + blink tr1, r63 + +.L_pop_i_tbl: + ld.q r15, 0, r2 + blink tr0, r63 + ld.q r15, 0, r3 + blink tr0, r63 + ld.q r15, 0, r4 + blink tr0, r63 + ld.q r15, 0, r5 + blink tr0, r63 + ld.q r15, 0, r6 + blink tr0, r63 + ld.q r15, 0, r7 + blink tr0, r63 + ld.q r15, 0, r8 + blink tr0, r63 + ld.q r15, 0, r9 + blink tr0, r63 + +.L_call_it: + # call function + pt/l 1f, tr1 + bnei/l r29, FFI_TYPE_STRUCT, tr1 + add r19, r63, r2 +1: + add r22, r63, r15 + ptabs/l r32, tr0 + blink tr0, r18 + + pt/l .L_ret_i, tr0 + pt/l .L_ret_ll, tr1 + pt/l .L_ret_d, tr2 + pt/l .L_ret_f, tr3 + pt/l .L_epilogue, tr4 + + beqi/l r29, FFI_TYPE_INT, tr0 + beqi/l r29, FFI_TYPE_UINT32, tr0 + beqi/l r29, FFI_TYPE_SINT64, tr1 + beqi/l r29, FFI_TYPE_UINT64, tr1 + beqi/l r29, FFI_TYPE_DOUBLE, tr2 + beqi/l r29, FFI_TYPE_FLOAT, tr3 + + pt/l .L_ret_q, tr0 + pt/l .L_ret_h, tr1 + + beqi/l r29, FFI_TYPE_UINT8, tr0 + beqi/l r29, FFI_TYPE_UINT16, tr1 + blink tr4, r63 + +.L_ret_d: + fst.d r31, 0, dr0 + blink tr4, r63 + +.L_ret_ll: + st.q r31, 0, r2 + blink tr4, r63 + +.L_ret_f: + fst.s r31, OFS_FLT, fr0 + blink tr4, r63 + +.L_ret_q: + st.b r31, 0, r2 + blink tr4, r63 + +.L_ret_h: + st.w r31, 0, r2 + blink tr4, r63 + +.L_ret_i: + st.l r31, 0, r2 + # Fall + +.L_epilogue: + # Remove the space we pushed for the args + add r14, r63, r15 + + ld.l r15, 0, r14 + ld.l r15, 4, r18 + ld.q r15, 8, r28 + ld.q r15, 16, r29 + ld.q r15, 24, r30 + ld.q r15, 32, r31 + ld.q r15, 40, r32 + addi.l r15, 48, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE1: +.ffi_call_SYSV_end: + .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV) + + .align 5 +ENTRY(ffi_closure_SYSV) +.LFB2: + addi.l r15, -136, r15 +.LCFI3: + st.l r15, 12, r18 + st.l r15, 8, r14 + st.l r15, 4, r12 +.LCFI4: + add r15, r63, r14 +.LCFI5: + /* Stack layout: + ... + 64 bytes (register parameters) + 48 bytes (floating register parameters) + 8 bytes (result) + 4 bytes (r18) + 4 bytes (r14) + 4 bytes (r12) + 4 bytes (for align) + <- new stack pointer + */ + fst.d r14, 24, dr0 + fst.d r14, 32, dr2 + fst.d r14, 40, dr4 + fst.d r14, 48, dr6 + fst.d r14, 56, dr8 + fst.d r14, 64, dr10 + st.q r14, 72, r2 + st.q r14, 80, r3 + st.q r14, 88, r4 + st.q r14, 96, r5 + st.q r14, 104, r6 + st.q r14, 112, r7 + st.q r14, 120, r8 + st.q r14, 128, r9 + + add r1, r63, r2 + addi r14, 16, r3 + addi r14, 72, r4 + addi r14, 24, r5 + addi r14, 136, r6 +#ifdef PIC + movi (((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) >> 16) & 65535), r12 + shori ((datalabel _GLOBAL_OFFSET_TABLE_-(.LPCS0-.)) & 65535), r12 +.LPCS0: ptrel/u r12, tr0 + movi ((ffi_closure_helper_SYSV@GOTPLT) & 65535), r1 + gettr tr0, r12 + ldx.l r1, r12, r1 + ptabs r1, tr0 +#else + pt/l ffi_closure_helper_SYSV, tr0 +#endif + blink tr0, r18 + + shlli r2, 1, r1 + movi (((datalabel .L_table) >> 16) & 65535), r2 + shori ((datalabel .L_table) & 65535), r2 + ldx.w r2, r1, r1 + add r1, r2, r1 + pt/l .L_case_v, tr1 + ptabs r1, tr0 + blink tr0, r63 + + .align 2 +.L_table: + .word .L_case_v - datalabel .L_table /* FFI_TYPE_VOID */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_INT */ + .word .L_case_f - datalabel .L_table /* FFI_TYPE_FLOAT */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_DOUBLE */ + .word .L_case_d - datalabel .L_table /* FFI_TYPE_LONGDOUBLE */ + .word .L_case_uq - datalabel .L_table /* FFI_TYPE_UINT8 */ + .word .L_case_q - datalabel .L_table /* FFI_TYPE_SINT8 */ + .word .L_case_uh - datalabel .L_table /* FFI_TYPE_UINT16 */ + .word .L_case_h - datalabel .L_table /* FFI_TYPE_SINT16 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_UINT32 */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_SINT32 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_UINT64 */ + .word .L_case_ll - datalabel .L_table /* FFI_TYPE_SINT64 */ + .word .L_case_v - datalabel .L_table /* FFI_TYPE_STRUCT */ + .word .L_case_i - datalabel .L_table /* FFI_TYPE_POINTER */ + + .align 2 +.L_case_d: + fld.d r14, 16, dr0 + blink tr1, r63 +.L_case_f: + fld.s r14, 16, fr0 + blink tr1, r63 +.L_case_ll: + ld.q r14, 16, r2 + blink tr1, r63 +.L_case_i: + ld.l r14, 16, r2 + blink tr1, r63 +.L_case_q: + ld.b r14, 16, r2 + blink tr1, r63 +.L_case_uq: + ld.ub r14, 16, r2 + blink tr1, r63 +.L_case_h: + ld.w r14, 16, r2 + blink tr1, r63 +.L_case_uh: + ld.uw r14, 16, r2 + blink tr1, r63 +.L_case_v: + add.l r14, r63, r15 + ld.l r15, 4, r12 + ld.l r15, 8, r14 + ld.l r15, 12, r18 + addi.l r15, 136, r15 + ptabs r18, tr0 + blink tr0, r63 + +.LFE2: +.ffi_closure_SYSV_end: + .size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + .section ".eh_frame","aw",@progbits +__FRAME_BEGIN__: + .4byte .LECIE1-.LSCIE1 /* Length of Common Information Entry */ +.LSCIE1: + .4byte 0x0 /* CIE Identifier Tag */ + .byte 0x1 /* CIE Version */ +#ifdef PIC + .ascii "zR\0" /* CIE Augmentation */ +#else + .byte 0x0 /* CIE Augmentation */ +#endif + .uleb128 0x1 /* CIE Code Alignment Factor */ + .sleb128 -4 /* CIE Data Alignment Factor */ + .byte 0x12 /* CIE RA Column */ +#ifdef PIC + .uleb128 0x1 /* Augmentation size */ + .byte 0x10 /* FDE Encoding (pcrel) */ +#endif + .byte 0xc /* DW_CFA_def_cfa */ + .uleb128 0xf + .uleb128 0x0 + .align 2 +.LECIE1: +.LSFDE1: + .4byte datalabel .LEFDE1-datalabel .LASFDE1 /* FDE Length */ +.LASFDE1: + .4byte datalabel .LASFDE1-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB1-. /* FDE initial location */ +#else + .4byte .LFB1 /* FDE initial location */ +#endif + .4byte datalabel .LFE1-datalabel .LFB1 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI0-datalabel .LFB1 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x30 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI1-datalabel .LCFI0 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0xc + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0xb + .byte 0x9c /* DW_CFA_offset, column 0x1c */ + .uleb128 0xa + .byte 0x9d /* DW_CFA_offset, column 0x1d */ + .uleb128 0x8 + .byte 0x9e /* DW_CFA_offset, column 0x1e */ + .uleb128 0x6 + .byte 0x9f /* DW_CFA_offset, column 0x1f */ + .uleb128 0x4 + .byte 0xa0 /* DW_CFA_offset, column 0x20 */ + .uleb128 0x2 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI2-datalabel .LCFI1 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE1: + +.LSFDE3: + .4byte datalabel .LEFDE3-datalabel .LASFDE3 /* FDE Length */ +.LASFDE3: + .4byte datalabel .LASFDE3-datalabel __FRAME_BEGIN__ +#ifdef PIC + .4byte .LFB2-. /* FDE initial location */ +#else + .4byte .LFB2 /* FDE initial location */ +#endif + .4byte datalabel .LFE2-datalabel .LFB2 /* FDE address range */ +#ifdef PIC + .uleb128 0x0 /* Augmentation size */ +#endif + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI3-datalabel .LFB2 + .byte 0xe /* DW_CFA_def_cfa_offset */ + .uleb128 0x88 + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI4-datalabel .LCFI3 + .byte 0x8c /* DW_CFA_offset, column 0xc */ + .uleb128 0x21 + .byte 0x8e /* DW_CFA_offset, column 0xe */ + .uleb128 0x20 + .byte 0x92 /* DW_CFA_offset, column 0x12 */ + .uleb128 0x1f + .byte 0x4 /* DW_CFA_advance_loc4 */ + .4byte datalabel .LCFI5-datalabel .LCFI4 + .byte 0xd /* DW_CFA_def_cfa_register */ + .uleb128 0xe + .align 2 +.LEFDE3: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi.c new file mode 100644 index 0000000..9e406d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi.c @@ -0,0 +1,468 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +/* Perform machine dependent cif processing */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + flags = (rtype->size & 0xfff) << SPARC_SIZEMASK_SHIFT; + flags |= SPARC_RET_STRUCT; + break; + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_COMPLEX: + rtt = rtype->elements[0]->type; + switch (rtt) + { + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_4; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_8; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = SPARC_RET_INT128; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = SPARC_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = SP_V8_RET_CPLX16; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = SP_V8_RET_CPLX8; + break; + default: + abort(); + } + break; + default: + abort(); + } + cif->flags = flags; + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + int tt = ty->type; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Passed by reference. */ + z = 4; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + /* FALLTHRU */ + + default: + z = FFI_ALIGN(z, 4); + } + bytes += z; + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 4) + bytes = 6 * 4; + + /* The ABI always requires space for the struct return pointer. */ + bytes += 4; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 2 * 4); + + /* Include the call frame to prep_args. */ + bytes += 4*16 + 4*8; + cif->bytes = bytes; + + return FFI_OK; +} + +extern void ffi_call_v8(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +int FFI_HIDDEN +ffi_prep_args_v8(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + + /* This could only really be done when we are returning a structure. + However, the space is reserved so we can do it unconditionally. */ + *argp++ = (unsigned long)rvalue; + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*4); +#endif + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + int tt = ty->type; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + *argp++ = (unsigned long)a; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + memcpy(argp, a, 8); + argp += 2; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *argp++ = *(unsigned *)a; + break; + + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + { + memcpy((char *)argp + 4 - z, a, z); + argp++; + } + else + { + memcpy(argp, a, z); + argp += z / 4; + } + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V8); + + /* If we've not got a return value, we need to create one if we've + got to pass the return value to the callee. Otherwise ignore it. */ + if (rvalue == NULL + && (cif->flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + bytes += FFI_ALIGN (cif->rtype->size, 8); + + ffi_call_v8(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + /* SPARC v8 requires 5 instructions for flush to be visible */ + asm volatile ("iflush %0; iflush %0+8; nop; nop; nop; nop; nop" + : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v8(void) FFI_HIDDEN; +extern void ffi_go_closure_v8(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long ctx = (unsigned long) closure; + unsigned long fn = (unsigned long) ffi_closure_v8; + + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + tramp[0] = 0x03000000 | fn >> 10; /* sethi %hi(fn), %g1 */ + tramp[1] = 0x05000000 | ctx >> 10; /* sethi %hi(ctx), %g2 */ + tramp[2] = 0x81c06000 | (fn & 0x3ff); /* jmp %g1+%lo(fn) */ + tramp[3] = 0x8410a000 | (ctx & 0x3ff);/* or %g2, %lo(ctx) */ + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V8) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v8; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v8(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *argp) +{ + ffi_type **arg_types; + void **avalue; + int i, nargs, flags; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. Also install it so we + can return the address in %o0. */ + if ((flags & SPARC_FLAG_RET_MASK) == SPARC_RET_STRUCT) + { + void *new_rvalue = (void *)*argp; + *(void **)rvalue = new_rvalue; + rvalue = new_rvalue; + } + + /* Always skip the structure return address. */ + argp++; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++) + { + ffi_type *ty = arg_types[i]; + int tt = ty->type; + void *a = argp; + size_t z; + + switch (tt) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + by_reference: + /* Straight copy of invisible reference. */ + a = (void *)*argp; + break; + + case FFI_TYPE_DOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + if ((unsigned long)a & 7) + { + /* Align on a 8-byte boundary. */ + UINT64 *tmp = alloca(8); + *tmp = ((UINT64)argp[0] << 32) | argp[1]; + a = tmp; + } + argp++; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_FLOAT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 2; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 3; + break; + + case FFI_TYPE_COMPLEX: + tt = ty->elements[0]->type; + z = ty->size; + if (tt == FFI_TYPE_FLOAT || z > 8) + goto by_reference; + if (z < 4) + a += 4 - z; + else if (z > 4) + argp++; + break; + + default: + abort(); + } + argp++; + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* !SPARC64 */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi64.c new file mode 100644 index 0000000..9e04061 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffi64.c @@ -0,0 +1,608 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011, 2013 Anthony Green + Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#ifdef SPARC64 + +/* Flatten the contents of a structure to the parts that are passed in + floating point registers. The return is a bit mask wherein bit N + set means bytes [4*n, 4*n+3] are passed in %fN. + + We encode both the (running) size (maximum 32) and mask (maxumum 255) + into one integer. The size is placed in the low byte, so that align + and addition work correctly. The mask is placed in the second byte. */ + +static int +ffi_struct_float_mask (ffi_type *outer_type, int size_mask) +{ + ffi_type **elts; + ffi_type *t; + + if (outer_type->type == FFI_TYPE_COMPLEX) + { + int m = 0, tt = outer_type->elements[0]->type; + size_t z = outer_type->size; + + if (tt == FFI_TYPE_FLOAT + || tt == FFI_TYPE_DOUBLE + || tt == FFI_TYPE_LONGDOUBLE) + m = (1 << (z / 4)) - 1; + return (m << 8) | z; + } + FFI_ASSERT (outer_type->type == FFI_TYPE_STRUCT); + + for (elts = outer_type->elements; (t = *elts) != NULL; elts++) + { + size_t z = t->size; + int o, m, tt; + + size_mask = FFI_ALIGN(size_mask, t->alignment); + switch (t->type) + { + case FFI_TYPE_STRUCT: + size_mask = ffi_struct_float_mask (t, size_mask); + continue; + case FFI_TYPE_COMPLEX: + tt = t->elements[0]->type; + if (tt != FFI_TYPE_FLOAT + && tt != FFI_TYPE_DOUBLE + && tt != FFI_TYPE_LONGDOUBLE) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + m = (1 << (z / 4)) - 1; /* compute mask for type */ + o = (size_mask >> 2) & 0x3f; /* extract word offset */ + size_mask |= m << (o + 8); /* insert mask into place */ + break; + } + size_mask += z; + } + + size_mask = FFI_ALIGN(size_mask, outer_type->alignment); + FFI_ASSERT ((size_mask & 0xff) == outer_type->size); + + return size_mask; +} + +/* Merge floating point data into integer data. If the structure is + entirely floating point, simply return a pointer to the fp data. */ + +static void * +ffi_struct_float_merge (int size_mask, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + return vi; + else if (mask == (1 << n) - 1) + return vf; + else + { + unsigned int *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + if ((mask >> i) & 1) + wi[i] = wf[i]; + + return vi; + } +} + +/* Similar, but place the data into VD in the end. */ + +void FFI_HIDDEN +ffi_struct_float_copy (int size_mask, void *vd, void *vi, void *vf) +{ + int size = size_mask & 0xff; + int mask = size_mask >> 8; + int n = size >> 2; + + if (mask == 0) + ; + else if (mask == (1 << n) - 1) + vi = vf; + else + { + unsigned int *wd = vd, *wi = vi, *wf = vf; + int i; + + for (i = 0; i < n; ++i) + wd[i] = ((mask >> i) & 1 ? wf : wi)[i]; + return; + } + memcpy (vd, vi, size); +} + +/* Perform machine dependent cif processing */ + +static ffi_status +ffi_prep_cif_machdep_core(ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + int rtt = rtype->type; + size_t bytes = 0; + int i, n, flags; + + /* Set the return type flag */ + switch (rtt) + { + case FFI_TYPE_VOID: + flags = SPARC_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = SPARC_RET_F_1; + break; + case FFI_TYPE_DOUBLE: + flags = SPARC_RET_F_2; + break; + case FFI_TYPE_LONGDOUBLE: + flags = SPARC_RET_F_4; + break; + + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + if (rtype->size > 32) + { + flags = SPARC_RET_VOID | SPARC_FLAG_RET_IN_MEM; + bytes = 8; + } + else + { + int size_mask = ffi_struct_float_mask (rtype, 0); + int word_size = (size_mask >> 2) & 0x3f; + int all_mask = (1 << word_size) - 1; + int fp_mask = size_mask >> 8; + + flags = (size_mask << SPARC_SIZEMASK_SHIFT) | SPARC_RET_STRUCT; + + /* For special cases of all-int or all-fp, we can return + the value directly without popping through a struct copy. */ + if (fp_mask == 0) + { + if (rtype->alignment >= 8) + { + if (rtype->size == 8) + flags = SPARC_RET_INT64; + else if (rtype->size == 16) + flags = SPARC_RET_INT128; + } + } + else if (fp_mask == all_mask) + switch (word_size) + { + case 1: flags = SPARC_RET_F_1; break; + case 2: flags = SPARC_RET_F_2; break; + case 3: flags = SP_V9_RET_F_3; break; + case 4: flags = SPARC_RET_F_4; break; + /* 5 word structures skipped; handled via RET_STRUCT. */ + case 6: flags = SPARC_RET_F_6; break; + /* 7 word structures skipped; handled via RET_STRUCT. */ + case 8: flags = SPARC_RET_F_8; break; + } + } + break; + + case FFI_TYPE_SINT8: + flags = SPARC_RET_SINT8; + break; + case FFI_TYPE_UINT8: + flags = SPARC_RET_UINT8; + break; + case FFI_TYPE_SINT16: + flags = SPARC_RET_SINT16; + break; + case FFI_TYPE_UINT16: + flags = SPARC_RET_UINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = SP_V9_RET_SINT32; + break; + case FFI_TYPE_UINT32: + flags = SPARC_RET_UINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + flags = SPARC_RET_INT64; + break; + + default: + abort(); + } + + bytes = 0; + for (i = 0, n = cif->nargs; i < n; ++i) + { + ffi_type *ty = cif->arg_types[i]; + size_t z = ty->size; + size_t a = ty->alignment; + + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + /* Large structs passed by reference. */ + if (z > 16) + { + a = z = 8; + break; + } + /* Small structs may be passed in integer or fp regs or both. */ + if (bytes >= 16*8) + break; + if ((ffi_struct_float_mask (ty, 0) & 0xff00) == 0) + break; + /* FALLTHRU */ + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + flags |= SPARC_FLAG_FP_ARGS; + break; + } + bytes = FFI_ALIGN(bytes, a); + bytes += FFI_ALIGN(z, 8); + } + + /* Sparc call frames require that space is allocated for 6 args, + even if they aren't used. Make that space if necessary. */ + if (bytes < 6 * 8) + bytes = 6 * 8; + + /* The stack must be 2 word aligned, so round bytes up appropriately. */ + bytes = FFI_ALIGN(bytes, 16); + + /* Include the call frame to prep_args. */ + bytes += 8*16 + 8*8; + + cif->bytes = bytes; + cif->flags = flags; + return FFI_OK; +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + cif->nfixedargs = cif->nargs; + return ffi_prep_cif_machdep_core(cif); +} + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned nfixedargs, unsigned ntotalargs) +{ + cif->nfixedargs = nfixedargs; + return ffi_prep_cif_machdep_core(cif); +} + +extern void ffi_call_v9(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, size_t bytes, void *closure) FFI_HIDDEN; + +/* ffi_prep_args is called by the assembly routine once stack space + has been allocated for the function's arguments */ + +int FFI_HIDDEN +ffi_prep_args_v9(ffi_cif *cif, unsigned long *argp, void *rvalue, void **avalue) +{ + ffi_type **p_arg; + int flags = cif->flags; + int i, nargs; + + if (rvalue == NULL) + { + if (flags & SPARC_FLAG_RET_IN_MEM) + { + /* Since we pass the pointer to the callee, we need a value. + We allowed for this space in ffi_call, before ffi_call_v8 + alloca'd the space. */ + rvalue = (char *)argp + cif->bytes; + } + else + { + /* Otherwise, we can ignore the return value. */ + flags = SPARC_RET_VOID; + } + } + +#ifdef USING_PURIFY + /* Purify will probably complain in our assembly routine, + unless we zero out this memory. */ + memset(argp, 0, 6*8); +#endif + + if (flags & SPARC_FLAG_RET_IN_MEM) + *argp++ = (unsigned long)rvalue; + + p_arg = cif->arg_types; + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = p_arg[i]; + void *a = avalue[i]; + size_t z; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *argp++ = *(SINT8 *)a; + break; + case FFI_TYPE_UINT8: + *argp++ = *(UINT8 *)a; + break; + case FFI_TYPE_SINT16: + *argp++ = *(SINT16 *)a; + break; + case FFI_TYPE_UINT16: + *argp++ = *(UINT16 *)a; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + *argp++ = *(SINT32 *)a; + break; + case FFI_TYPE_UINT32: + case FFI_TYPE_FLOAT: + *argp++ = *(UINT32 *)a; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_POINTER: + case FFI_TYPE_DOUBLE: + *argp++ = *(UINT64 *)a; + break; + + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + { + /* For structures larger than 16 bytes we pass reference. */ + *argp++ = (unsigned long)a; + break; + } + if (((unsigned long)argp & 15) && ty->alignment > 8) + argp++; + memcpy(argp, a, z); + argp += FFI_ALIGN(z, 8) / 8; + break; + + default: + abort(); + } + } + + return flags; +} + +static void +ffi_call_int(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t bytes = cif->bytes; + + FFI_ASSERT (cif->abi == FFI_V9); + + if (rvalue == NULL && (cif->flags & SPARC_FLAG_RET_IN_MEM)) + bytes += FFI_ALIGN (cif->rtype->size, 16); + + ffi_call_v9(cif, fn, rvalue, avalue, -bytes, closure); +} + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int(cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int(cif, fn, rvalue, avalue, closure); +} + +#ifdef __GNUC__ +static inline void +ffi_flush_icache (void *p) +{ + asm volatile ("flush %0; flush %0+8" : : "r" (p) : "memory"); +} +#else +extern void ffi_flush_icache (void *) FFI_HIDDEN; +#endif + +extern void ffi_closure_v9(void) FFI_HIDDEN; +extern void ffi_go_closure_v9(void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + unsigned int *tramp = (unsigned int *) &closure->tramp[0]; + unsigned long fn; + + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + /* Trampoline address is equal to the closure address. We take advantage + of that to reduce the trampoline size by 8 bytes. */ + fn = (unsigned long) ffi_closure_v9; + tramp[0] = 0x83414000; /* rd %pc, %g1 */ + tramp[1] = 0xca586010; /* ldx [%g1+16], %g5 */ + tramp[2] = 0x81c14000; /* jmp %g5 */ + tramp[3] = 0x01000000; /* nop */ + *((unsigned long *) &tramp[4]) = fn; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + ffi_flush_icache (closure); + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + if (cif->abi != FFI_V9) + return FFI_BAD_ABI; + + closure->tramp = ffi_go_closure_v9; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_sparc_inner_v9(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, void *rvalue, + unsigned long *gpr, unsigned long *fpr) +{ + ffi_type **arg_types; + void **avalue; + int i, argn, argx, nargs, flags, nfixedargs; + + arg_types = cif->arg_types; + nargs = cif->nargs; + flags = cif->flags; + nfixedargs = cif->nfixedargs; + + avalue = alloca(nargs * sizeof(void *)); + + /* Copy the caller's structure return address so that the closure + returns the data directly to the caller. */ + if (flags & SPARC_FLAG_RET_IN_MEM) + { + rvalue = (void *) gpr[0]; + /* Skip the structure return address. */ + argn = 1; + } + else + argn = 0; + + /* Grab the addresses of the arguments from the stack frame. */ + for (i = 0; i < nargs; i++, argn = argx) + { + int named = i < nfixedargs; + ffi_type *ty = arg_types[i]; + void *a = &gpr[argn]; + size_t z; + + argx = argn + 1; + switch (ty->type) + { + case FFI_TYPE_COMPLEX: + case FFI_TYPE_STRUCT: + z = ty->size; + if (z > 16) + a = *(void **)a; + else + { + argx = argn + FFI_ALIGN (z, 8) / 8; + if (named && argn < 16) + { + int size_mask = ffi_struct_float_mask (ty, 0); + int argn_mask = (0xffff00 >> argn) & 0xff00; + + /* Eliminate fp registers off the end. */ + size_mask = (size_mask & 0xff) | (size_mask & argn_mask); + a = ffi_struct_float_merge (size_mask, gpr+argn, fpr+argn); + } + } + break; + + case FFI_TYPE_LONGDOUBLE: + argn = FFI_ALIGN (argn, 2); + a = (named && argn < 16 ? fpr : gpr) + argn; + argx = argn + 2; + break; + case FFI_TYPE_DOUBLE: + if (named && argn < 16) + a = fpr + argn; + break; + case FFI_TYPE_FLOAT: + if (named && argn < 16) + a = fpr + argn; + a += 4; + break; + + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + break; + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + a += 4; + break; + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + a += 6; + break; + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + a += 7; + break; + + default: + abort(); + } + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell ffi_closure_sparc how to perform return type promotions. */ + return flags; +} +#endif /* SPARC64 */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffitarget.h new file mode 100644 index 0000000..2f4cd9a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/ffitarget.h @@ -0,0 +1,81 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 1996-2003 Red Hat, Inc. + Target configuration macros for SPARC. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +#if defined(__arch64__) || defined(__sparcv9) +#ifndef SPARC64 +#define SPARC64 +#endif +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, +#ifdef SPARC64 + FFI_V9, + FFI_DEFAULT_ABI = FFI_V9, +#else + FFI_V8, + FFI_DEFAULT_ABI = FFI_V8, +#endif + FFI_LAST_ABI +} ffi_abi; +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION 1 +#define FFI_TARGET_HAS_COMPLEX_TYPE 1 + +#ifdef SPARC64 +# define FFI_TARGET_SPECIFIC_VARIADIC 1 +# define FFI_EXTRA_CIF_FIELDS unsigned int nfixedargs +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#ifdef SPARC64 +#define FFI_TRAMPOLINE_SIZE 24 +#else +#define FFI_TRAMPOLINE_SIZE 16 +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/internal.h new file mode 100644 index 0000000..0a66472 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/internal.h @@ -0,0 +1,26 @@ +#define SPARC_RET_VOID 0 +#define SPARC_RET_STRUCT 1 +#define SPARC_RET_UINT8 2 +#define SPARC_RET_SINT8 3 +#define SPARC_RET_UINT16 4 +#define SPARC_RET_SINT16 5 +#define SPARC_RET_UINT32 6 +#define SP_V9_RET_SINT32 7 /* v9 only */ +#define SP_V8_RET_CPLX16 7 /* v8 only */ +#define SPARC_RET_INT64 8 +#define SPARC_RET_INT128 9 + +/* Note that F_7 is missing, and is handled by SPARC_RET_STRUCT. */ +#define SPARC_RET_F_8 10 +#define SPARC_RET_F_6 11 +#define SPARC_RET_F_4 12 +#define SPARC_RET_F_2 13 +#define SP_V9_RET_F_3 14 /* v9 only */ +#define SP_V8_RET_CPLX8 14 /* v8 only */ +#define SPARC_RET_F_1 15 + +#define SPARC_FLAG_RET_MASK 15 +#define SPARC_FLAG_RET_IN_MEM 32 +#define SPARC_FLAG_FP_ARGS 64 + +#define SPARC_SIZEMASK_SHIFT 8 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v8.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v8.S new file mode 100644 index 0000000..a2e4908 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v8.S @@ -0,0 +1,443 @@ +/* ----------------------------------------------------------------------- + v8.S - Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc. + + SPARC Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifndef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + + .text + +#ifndef __GNUC__ + .align 8 + .globl C(ffi_flush_icache) + .type C(ffi_flush_icache),#function + FFI_HIDDEN(C(ffi_flush_icache)) + +C(ffi_flush_icache): +1: iflush %o0 + iflush %o+8 + nop + nop + nop + nop + nop + retl + nop + .size C(ffi_flush_icache), . - C(ffi_flush_icache) +#endif + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + + .align 8 + .globl C(ffi_call_v8) + .type C(ffi_call_v8),#function + FFI_HIDDEN(C(ffi_call_v8)) + +C(ffi_call_v8): +.LUW0: + ! Allocate a stack frame sized by ffi_call. + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, 64+32, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v8) + mov %i3, %o3 ! copy avalue + + add %sp, 32, %sp ! deallocate prep frame + and %o0, SPARC_FLAG_RET_MASK, %l0 ! save return type + srl %o0, SPARC_SIZEMASK_SHIFT, %l1 ! save return size + ld [%sp+64+4], %o0 ! load all argument registers + ld [%sp+64+8], %o1 + ld [%sp+64+12], %o2 + ld [%sp+64+16], %o3 + cmp %l0, SPARC_RET_STRUCT ! struct return needs an unimp 4 + ld [%sp+64+20], %o4 + be 8f + ld [%sp+64+24], %o5 + + ! Call foreign function + call %i1 + mov %i5, %g2 ! load static chain + +0: call 1f ! load pc in %o7 + sll %l0, 4, %l0 +1: add %o7, %l0, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + unimp +E(SPARC_RET_UINT8) + and %o0, 0xff, %o0 + st %o0, [%i2] + ret + restore +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + b 7f + sra %o0, 24, %o0 +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + b 7f + srl %o0, 16, %o0 +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + b 7f + sra %o0, 16, %o0 +E(SPARC_RET_UINT32) +7: st %o0, [%i2] + ret + restore +E(SP_V8_RET_CPLX16) + sth %o0, [%i2+2] + b 9f + srl %o0, 16, %o0 +E(SPARC_RET_INT64) + st %o0, [%i2] + st %o1, [%i2+4] + ret + restore +E(SPARC_RET_INT128) + std %o0, [%i2] + std %o2, [%i2+8] + ret + restore +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + st %f3, [%i2+3*4] + nop + st %f2, [%i2+2*4] + nop +E(SPARC_RET_F_2) + st %f1, [%i2+4] + st %f0, [%i2] + ret + restore +E(SP_V8_RET_CPLX8) + stb %o0, [%i2+1] + b 0f + srl %o0, 8, %o0 +E(SPARC_RET_F_1) + st %f0, [%i2] + ret + restore + + .align 8 +9: sth %o0, [%i2] + ret + restore + .align 8 +0: stb %o0, [%i2] + ret + restore + + ! Struct returning functions expect and skip the unimp here. + ! To make it worse, conforming callees examine the unimp and + ! make sure the low 12 bits of the unimp match the size of + ! the struct being returned. + .align 8 +8: call 1f ! load pc in %o7 + sll %l1, 2, %l0 ! size * 4 +1: sll %l1, 4, %l1 ! size * 16 + add %l0, %l1, %l0 ! size * 20 + add %o7, %l0, %o7 ! o7 = 8b + size*20 + jmp %o7+(2f-8b) + mov %i5, %g2 ! load static chain +2: + +/* The Sun assembler doesn't understand .rept 0x1000. */ +#define rept1 \ + call %i1; \ + nop; \ + unimp (. - 2b) / 20; \ + ret; \ + restore + +#define rept16 \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1; \ + rept1; rept1; rept1; rept1 + +#define rept256 \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16; \ + rept16; rept16; rept16; rept16 + + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + rept256; rept256; rept256; rept256 + +.LUW2: + .size C(ffi_call_v8),. - C(ffi_call_v8) + + +/* 16*4 register window + 1*4 struct return + 6*4 args backing store + + 8*4 return storage + 1*4 alignment. */ +#define STACKFRAME (16*4 + 4 + 6*4 + 8*4 + 4) + +/* ffi_closure_v8(...) + + Receives the closure argument in %g2. */ + +#ifdef HAVE_AS_REGISTER_PSEUDO_OP + .register %g2, #scratch +#endif + + .align 8 + .globl C(ffi_go_closure_v8) + .type C(ffi_go_closure_v8),#function + FFI_HIDDEN(C(ffi_go_closure_v8)) + +C(ffi_go_closure_v8): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ld [%g2+4], %o0 ! load cif + ld [%g2+8], %o1 ! load fun + b 0f + mov %g2, %o2 ! load user_data +.LUW5: + .size C(ffi_go_closure_v8), . - C(ffi_go_closure_v8) + + .align 8 + .globl C(ffi_closure_v8) + .type C(ffi_closure_v8),#function + FFI_HIDDEN(C(ffi_closure_v8)) + +C(ffi_closure_v8): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ld [%g2+FFI_TRAMPOLINE_SIZE], %o0 ! load cif + ld [%g2+FFI_TRAMPOLINE_SIZE+4], %o1 ! load fun + ld [%g2+FFI_TRAMPOLINE_SIZE+8], %o2 ! load user_data +0: + ! Store all of the potential argument registers in va_list format. + st %i0, [%fp+68+0] + st %i1, [%fp+68+4] + st %i2, [%fp+68+8] + st %i3, [%fp+68+12] + st %i4, [%fp+68+16] + st %i5, [%fp+68+20] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, -8*4, %o3 + call ffi_closure_sparc_inner_v8 + add %fp, 64, %o4 + +0: call 1f + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o0 = o0 * 16 + add %o7, %o0, %o7 ! o7 = 0b + o0*16 + jmp %o7+(2f-0b) + add %fp, -8*4, %i2 + + ! Note that each entry is 4 insns, enforced by the E macro. + .align 16 +2: +E(SPARC_RET_VOID) + ret + restore +E(SPARC_RET_STRUCT) + ld [%i2], %i0 + jmp %i7+12 + restore +E(SPARC_RET_UINT8) + ldub [%i2+3], %i0 + ret + restore +E(SPARC_RET_SINT8) + ldsb [%i2+3], %i0 + ret + restore +E(SPARC_RET_UINT16) + lduh [%i2+2], %i0 + ret + restore +E(SPARC_RET_SINT16) + ldsh [%i2+2], %i0 + ret + restore +E(SPARC_RET_UINT32) + ld [%i2], %i0 + ret + restore +E(SP_V8_RET_CPLX16) + ld [%i2], %i0 + ret + restore +E(SPARC_RET_INT64) + ldd [%i2], %i0 + ret + restore +E(SPARC_RET_INT128) + ldd [%i2], %i0 + ldd [%i2+8], %i2 + ret + restore +E(SPARC_RET_F_8) + ld [%i2+7*4], %f7 + nop + ld [%i2+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [%i2+5*4], %f5 + nop + ld [%i2+4*4], %f4 + nop +E(SPARC_RET_F_4) + ld [%i2+3*4], %f3 + nop + ld [%i2+2*4], %f2 + nop +E(SPARC_RET_F_2) + ldd [%i2], %f0 + ret + restore +E(SP_V8_RET_CPLX8) + lduh [%i2], %i0 + ret + restore +E(SPARC_RET_F_1) + ld [%i2], %f0 + ret + restore + +.LUW8: + .size C(ffi_closure_v8), . - C(ffi_closure_v8) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_ADDR(X) %r_disp32(X) +#else +# define FDE_ADDR(X) X +#endif + + .align 4 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x7c ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0 ! DW_CFA_def_cfa, %o6, offset 0 + .align 4 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW0) ! Initial location + .long .LUW2 - .LUW0 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW3) ! Initial location + .long .LUW5 - .LUW3 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + .long FDE_ADDR(.LUW6) ! Initial location + .long .LUW8 - .LUW6 ! Address range + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 4 +.LEFDE3: + +#endif /* !SPARC64 */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v9.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v9.S new file mode 100644 index 0000000..55f8f43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/sparc/v9.S @@ -0,0 +1,440 @@ +/* ----------------------------------------------------------------------- + v9.S - Copyright (c) 2000, 2003, 2004, 2008 Red Hat, Inc. + + SPARC 64-bit Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#ifdef SPARC64 + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) + +#ifdef __USER_LABEL_PREFIX__ +# define C(Y) C1(__USER_LABEL_PREFIX__, Y) +#else +# define C(Y) Y +#endif +#define L(Y) C1(.L, Y) + +#if defined(__sun__) && defined(__svr4__) +# define E(INDEX) .align 16 +#else +# define E(INDEX) .align 16; .org 2b + INDEX * 16 +#endif + +#define STACK_BIAS 2047 + + .text + .align 8 + .globl C(ffi_call_v9) + .type C(ffi_call_v9),#function + FFI_HIDDEN(C(ffi_call_v9)) + +C(ffi_call_v9): +.LUW0: + save %sp, %o4, %sp +.LUW1: + mov %i0, %o0 ! copy cif + add %sp, STACK_BIAS+128+48, %o1 ! load args area + mov %i2, %o2 ! copy rvalue + call C(ffi_prep_args_v9) + mov %i3, %o3 ! copy avalue + + andcc %o0, SPARC_FLAG_FP_ARGS, %g0 ! need fp regs? + add %sp, 48, %sp ! deallocate prep frame + be,pt %xcc, 1f + mov %o0, %l0 ! save flags + + ldd [%sp+STACK_BIAS+128], %f0 ! load all fp arg regs + ldd [%sp+STACK_BIAS+128+8], %f2 + ldd [%sp+STACK_BIAS+128+16], %f4 + ldd [%sp+STACK_BIAS+128+24], %f6 + ldd [%sp+STACK_BIAS+128+32], %f8 + ldd [%sp+STACK_BIAS+128+40], %f10 + ldd [%sp+STACK_BIAS+128+48], %f12 + ldd [%sp+STACK_BIAS+128+56], %f14 + ldd [%sp+STACK_BIAS+128+64], %f16 + ldd [%sp+STACK_BIAS+128+72], %f18 + ldd [%sp+STACK_BIAS+128+80], %f20 + ldd [%sp+STACK_BIAS+128+88], %f22 + ldd [%sp+STACK_BIAS+128+96], %f24 + ldd [%sp+STACK_BIAS+128+104], %f26 + ldd [%sp+STACK_BIAS+128+112], %f28 + ldd [%sp+STACK_BIAS+128+120], %f30 + +1: ldx [%sp+STACK_BIAS+128], %o0 ! load all int arg regs + ldx [%sp+STACK_BIAS+128+8], %o1 + ldx [%sp+STACK_BIAS+128+16], %o2 + ldx [%sp+STACK_BIAS+128+24], %o3 + ldx [%sp+STACK_BIAS+128+32], %o4 + ldx [%sp+STACK_BIAS+128+40], %o5 + call %i1 + mov %i5, %g5 ! load static chain + +0: call 1f ! load pc in %o7 + and %l0, SPARC_FLAG_RET_MASK, %l1 +1: sll %l1, 4, %l1 + add %o7, %l1, %o7 ! o7 = 0b + ret_type*16 + jmp %o7+(2f-0b) + nop + + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + add %sp, STACK_BIAS-64+128+48, %l2 + sub %sp, 64, %sp + b 8f + stx %o0, [%l2] +E(SPARC_RET_UINT8) + and %o0, 0xff, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT8) + sll %o0, 24, %o0 + sra %o0, 24, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT16) + sll %o0, 16, %o0 + srl %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_SINT16) + sll %o0, 16, %o0 + sra %o0, 16, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_UINT32) + srl %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SP_V9_RET_SINT32) + sra %o0, 0, %i0 + return %i7+8 + stx %o0, [%o2] +E(SPARC_RET_INT64) + stx %o0, [%i2] + return %i7+8 + nop +E(SPARC_RET_INT128) + stx %o0, [%i2] + stx %o1, [%i2+8] + return %i7+8 + nop +E(SPARC_RET_F_8) + st %f7, [%i2+7*4] + nop + st %f6, [%i2+6*4] + nop +E(SPARC_RET_F_6) + st %f5, [%i2+5*4] + nop + st %f4, [%i2+4*4] + nop +E(SPARC_RET_F_4) + std %f2, [%i2+2*4] + return %i7+8 + std %f0, [%o2] +E(SPARC_RET_F_2) + return %i7+8 + std %f0, [%o2] +E(SP_V9_RET_F_3) + st %f2, [%i2+2*4] + nop + st %f1, [%i2+1*4] + nop +E(SPARC_RET_F_1) + return %i7+8 + st %f0, [%o2] + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: stx %o1, [%l2+8] + stx %o2, [%l2+16] + stx %o3, [%l2+24] + std %f0, [%l2+32] + std %f2, [%l2+40] + std %f4, [%l2+48] + std %f6, [%l2+56] + + ! Copy the structure into place. + srl %l0, SPARC_SIZEMASK_SHIFT, %o0 ! load size_mask + mov %i2, %o1 ! load dst + mov %l2, %o2 ! load src_gp + call C(ffi_struct_float_copy) + add %l2, 32, %o3 ! load src_fp + + return %i7+8 + nop + +.LUW2: + .size C(ffi_call_v9), . - C(ffi_call_v9) + + +#undef STACKFRAME +#define STACKFRAME 336 /* 16*8 register window + + 6*8 args backing store + + 20*8 locals */ +#define FP %fp+STACK_BIAS + +/* ffi_closure_v9(...) + + Receives the closure argument in %g1. */ + + .align 8 + .globl C(ffi_go_closure_v9) + .type C(ffi_go_closure_v9),#function + FFI_HIDDEN(C(ffi_go_closure_v9)) + +C(ffi_go_closure_v9): +.LUW3: + save %sp, -STACKFRAME, %sp +.LUW4: + ldx [%g5+8], %o0 + ldx [%g5+16], %o1 + b 0f + mov %g5, %o2 + +.LUW5: + .size C(ffi_go_closure_v9), . - C(ffi_go_closure_v9) + + .align 8 + .globl C(ffi_closure_v9) + .type C(ffi_closure_v9),#function + FFI_HIDDEN(C(ffi_closure_v9)) + +C(ffi_closure_v9): +.LUW6: + save %sp, -STACKFRAME, %sp +.LUW7: + ldx [%g1+FFI_TRAMPOLINE_SIZE], %o0 + ldx [%g1+FFI_TRAMPOLINE_SIZE+8], %o1 + ldx [%g1+FFI_TRAMPOLINE_SIZE+16], %o2 +0: + ! Store all of the potential argument registers in va_list format. + stx %i0, [FP+128+0] + stx %i1, [FP+128+8] + stx %i2, [FP+128+16] + stx %i3, [FP+128+24] + stx %i4, [FP+128+32] + stx %i5, [FP+128+40] + + ! Store possible floating point argument registers too. + std %f0, [FP-128] + std %f2, [FP-120] + std %f4, [FP-112] + std %f6, [FP-104] + std %f8, [FP-96] + std %f10, [FP-88] + std %f12, [FP-80] + std %f14, [FP-72] + std %f16, [FP-64] + std %f18, [FP-56] + std %f20, [FP-48] + std %f22, [FP-40] + std %f24, [FP-32] + std %f26, [FP-24] + std %f28, [FP-16] + std %f30, [FP-8] + + ! Call ffi_closure_sparc_inner to do the bulk of the work. + add %fp, STACK_BIAS-160, %o3 + add %fp, STACK_BIAS+128, %o4 + call C(ffi_closure_sparc_inner_v9) + add %fp, STACK_BIAS-128, %o5 + +0: call 1f ! load pc in %o7 + and %o0, SPARC_FLAG_RET_MASK, %o0 +1: sll %o0, 4, %o0 ! o2 = i2 * 16 + add %o7, %o0, %o7 ! o7 = 0b + i2*16 + jmp %o7+(2f-0b) + nop + + ! Note that we cannot load the data in the delay slot of + ! the return insn because the data is in the stack frame + ! that is deallocated by the return. + .align 16 +2: +E(SPARC_RET_VOID) + return %i7+8 + nop +E(SPARC_RET_STRUCT) + ldx [FP-160], %i0 + ldd [FP-160], %f0 + b 8f + ldx [FP-152], %i1 +E(SPARC_RET_UINT8) + ldub [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT8) + ldsb [FP-160+7], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT16) + lduh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_SINT16) + ldsh [FP-160+6], %i0 + return %i7+8 + nop +E(SPARC_RET_UINT32) + lduw [FP-160+4], %i0 + return %i7+8 + nop +E(SP_V9_RET_SINT32) + ldsw [FP-160+4], %i0 + return %i7+8 + nop +E(SPARC_RET_INT64) + ldx [FP-160], %i0 + return %i7+8 + nop +E(SPARC_RET_INT128) + ldx [FP-160], %i0 + ldx [FP-160+8], %i1 + return %i7+8 + nop +E(SPARC_RET_F_8) + ld [FP-160+7*4], %f7 + nop + ld [FP-160+6*4], %f6 + nop +E(SPARC_RET_F_6) + ld [FP-160+5*4], %f5 + nop + ld [FP-160+4*4], %f4 + nop +E(SPARC_RET_F_4) + ldd [FP-160], %f0 + ldd [FP-160+8], %f2 + return %i7+8 + nop +E(SPARC_RET_F_2) + ldd [FP-160], %f0 + return %i7+8 + nop +E(SP_V9_RET_F_3) + ld [FP-160+2*4], %f2 + nop + ld [FP-160+1*4], %f1 + nop +E(SPARC_RET_F_1) + ld [FP-160], %f0 + return %i7+8 + nop + + ! Finish the SPARC_RET_STRUCT sequence. + .align 8 +8: ldd [FP-152], %f2 + ldx [FP-144], %i2 + ldd [FP-144], %f4 + ldx [FP-136], %i3 + ldd [FP-136], %f6 + return %i7+8 + nop + +.LUW8: + .size C(ffi_closure_v9), . - C(ffi_closure_v9) + +#ifdef HAVE_RO_EH_FRAME + .section ".eh_frame",#alloc +#else + .section ".eh_frame",#alloc,#write +#endif + +#ifdef HAVE_AS_SPARC_UA_PCREL +# define FDE_RANGE(B, E) .long %r_disp32(B), E - B +#else +# define FDE_RANGE(B, E) .align 8; .xword B, E - B +#endif + + .align 8 +.LCIE: + .long .LECIE - .LSCIE ! CIE Length +.LSCIE: + .long 0 ! CIE Identifier Tag + .byte 1 ! CIE Version + .ascii "zR\0" ! CIE Augmentation + .byte 4 ! CIE Code Alignment Factor + .byte 0x78 ! CIE Data Alignment Factor + .byte 15 ! CIE RA Column + .byte 1 ! Augmentation size +#ifdef HAVE_AS_SPARC_UA_PCREL + .byte 0x1b ! FDE Encoding (pcrel sdata4) +#else + .byte 0x50 ! FDE Encoding (aligned absolute) +#endif + .byte 0xc, 14, 0xff, 0xf ! DW_CFA_def_cfa, %o6, offset 0x7ff + .align 8 +.LECIE: + + .long .LEFDE1 - .LSFDE1 ! FDE Length +.LSFDE1: + .long .LSFDE1 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW0, .LUW2) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE1: + + .long .LEFDE2 - .LSFDE2 ! FDE Length +.LSFDE2: + .long .LSFDE2 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW3, .LUW5) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE2: + + .long .LEFDE3 - .LSFDE3 ! FDE Length +.LSFDE3: + .long .LSFDE3 - .LCIE ! FDE CIE offset + FDE_RANGE(.LUW6, .LUW8) + .byte 0 ! Augmentation size + .byte 0x40+1 ! DW_CFA_advance_loc 4 + .byte 0xd, 30 ! DW_CFA_def_cfa_register, %i6 + .byte 0x2d ! DW_CFA_GNU_window_save + .byte 0x9, 15, 31 ! DW_CFA_register, %o7, %i7 + .align 8 +.LEFDE3: + +#endif /* SPARC64 */ +#ifdef __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffi.c new file mode 100644 index 0000000..3a94469 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffi.c @@ -0,0 +1,355 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2012 Tilera Corp. + + TILE Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +/* Performs a raw function call with the given NUM_ARG_REGS register arguments + and the specified additional stack arguments (if any). */ +extern void ffi_call_tile(ffi_sarg reg_args[NUM_ARG_REGS], + const ffi_sarg *stack_args, + size_t stack_args_bytes, + void (*fnaddr)(void)) + FFI_HIDDEN; + +/* This handles the raw call from the closure stub, cleaning up the + parameters and delegating to ffi_closure_tile_inner. */ +extern void ffi_closure_tile(void) FFI_HIDDEN; + + +ffi_status +ffi_prep_cif_machdep(ffi_cif *cif) +{ + /* We always allocate room for all registers. Even if we don't + use them as parameters, they get returned in the same array + as struct return values so we need to make room. */ + if (cif->bytes < NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->bytes = NUM_ARG_REGS * FFI_SIZEOF_ARG; + + if (cif->rtype->size > NUM_ARG_REGS * FFI_SIZEOF_ARG) + cif->flags = FFI_TYPE_STRUCT; + else + cif->flags = FFI_TYPE_INT; + + /* Nothing to do. */ + return FFI_OK; +} + + +static long +assign_to_ffi_arg(ffi_sarg *out, void *in, const ffi_type *type, + int write_to_reg) +{ + switch (type->type) + { + case FFI_TYPE_SINT8: + *out = *(SINT8 *)in; + return 1; + + case FFI_TYPE_UINT8: + *out = *(UINT8 *)in; + return 1; + + case FFI_TYPE_SINT16: + *out = *(SINT16 *)in; + return 1; + + case FFI_TYPE_UINT16: + *out = *(UINT16 *)in; + return 1; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: +#ifndef __LP64__ + case FFI_TYPE_POINTER: +#endif + /* Note that even unsigned 32-bit quantities are sign extended + on tilegx when stored in a register. */ + *out = *(SINT32 *)in; + return 1; + + case FFI_TYPE_FLOAT: +#ifdef __tilegx__ + if (write_to_reg) + { + /* Properly sign extend the value. */ + union { float f; SINT32 s32; } val; + val.f = *(float *)in; + *out = val.s32; + } + else +#endif + { + *(float *)out = *(float *)in; + } + return 1; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: +#ifdef __LP64__ + case FFI_TYPE_POINTER: +#endif + *(UINT64 *)out = *(UINT64 *)in; + return sizeof(UINT64) / FFI_SIZEOF_ARG; + + case FFI_TYPE_STRUCT: + memcpy(out, in, type->size); + return (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + case FFI_TYPE_VOID: + /* Must be a return type. Nothing to do. */ + return 0; + + default: + FFI_ASSERT(0); + return -1; + } +} + + +void +ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_sarg * const arg_mem = alloca(cif->bytes); + ffi_sarg * const reg_args = arg_mem; + ffi_sarg * const stack_args = ®_args[NUM_ARG_REGS]; + ffi_sarg *argp = arg_mem; + ffi_type ** const arg_types = cif->arg_types; + const long num_args = cif->nargs; + long i; + + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Pass a hidden pointer to the return value. We make sure there + is scratch space for the callee to store the return value even if + our caller doesn't care about it. */ + *argp++ = (intptr_t)(rvalue ? rvalue : alloca(cif->rtype->size)); + + /* No more work needed to return anything. */ + rvalue = NULL; + } + + for (i = 0; i < num_args; i++) + { + ffi_type *type = arg_types[i]; + void * const arg_in = avalue[i]; + ptrdiff_t arg_word = argp - arg_mem; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (type->type == FFI_TYPE_STRUCT) + { + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + + if (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS) + { + /* Args are not allowed to span registers and the stack. */ + argp = stack_args; + } + + memcpy(argp, arg_in, type->size); + argp += arg_size_in_words; + } + else + { + argp += assign_to_ffi_arg(argp, arg_in, arg_types[i], 1); + } + } + + /* Actually do the call. */ + ffi_call_tile(reg_args, stack_args, + cif->bytes - (NUM_ARG_REGS * FFI_SIZEOF_ARG), fn); + + if (rvalue != NULL) + assign_to_ffi_arg(rvalue, reg_args, cif->rtype, 0); +} + + +/* Template code for closure. */ +extern const UINT64 ffi_template_tramp_tile[] FFI_HIDDEN; + + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ +#ifdef __tilegx__ + /* TILE-Gx */ + SINT64 c; + SINT64 h; + int s; + UINT64 *out; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + + c = (intptr_t)closure; + h = (intptr_t)ffi_closure_tile; + s = 0; + + /* Find the smallest shift count that doesn't lose information + (i.e. no need to explicitly insert high bits of the address that + are just the sign extension of the low bits). */ + while ((c >> s) != (SINT16)(c >> s) || (h >> s) != (SINT16)(h >> s)) + s += 16; + +#define OPS(a, b, shift) \ + (create_Imm16_X0((a) >> (shift)) | create_Imm16_X1((b) >> (shift))) + + /* Emit the moveli. */ + *out++ = ffi_template_tramp_tile[0] | OPS(c, h, s); + for (s -= 16; s >= 0; s -= 16) + *out++ = ffi_template_tramp_tile[1] | OPS(c, h, s); + +#undef OPS + + *out++ = ffi_template_tramp_tile[2]; + +#else + /* TILEPro */ + UINT64 *out; + intptr_t delta; + + if (cif->abi != FFI_UNIX) + return FFI_BAD_ABI; + + out = (UINT64 *)closure->tramp; + delta = (intptr_t)ffi_closure_tile - (intptr_t)codeloc; + + *out++ = ffi_template_tramp_tile[0] | create_JOffLong_X1(delta >> 3); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + invalidate_icache(closure->tramp, (char *)out - closure->tramp, + getpagesize()); + + return FFI_OK; +} + + +/* This is called by the assembly wrapper for closures. This does + all of the work. On entry reg_args[0] holds the values the registers + had when the closure was invoked. On return reg_args[1] holds the register + values to be returned to the caller (many of which may be garbage). */ +void FFI_HIDDEN +ffi_closure_tile_inner(ffi_closure *closure, + ffi_sarg reg_args[2][NUM_ARG_REGS], + ffi_sarg *stack_args) +{ + ffi_cif * const cif = closure->cif; + void ** const avalue = alloca(cif->nargs * sizeof(void *)); + void *rvalue; + ffi_type ** const arg_types = cif->arg_types; + ffi_sarg * const reg_args_in = reg_args[0]; + ffi_sarg * const reg_args_out = reg_args[1]; + ffi_sarg * argp; + long i, arg_word, nargs = cif->nargs; + /* Use a union to guarantee proper alignment for double. */ + union { ffi_sarg arg[NUM_ARG_REGS]; double d; UINT64 u64; } closure_ret; + + /* Start out reading register arguments. */ + argp = reg_args_in; + + /* Copy the caller's structure return address to that the closure + returns the data directly to the caller. */ + if (cif->flags == FFI_TYPE_STRUCT) + { + /* Return by reference via hidden pointer. */ + rvalue = (void *)(intptr_t)*argp++; + arg_word = 1; + } + else + { + /* Return the value in registers. */ + rvalue = &closure_ret; + arg_word = 0; + } + + /* Grab the addresses of the arguments. */ + for (i = 0; i < nargs; i++) + { + ffi_type * const type = arg_types[i]; + const size_t arg_size_in_words = + (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG; + +#ifndef __tilegx__ + /* Doubleword-aligned values are always in an even-number register + pair, or doubleword-aligned stack slot if out of registers. */ + long align = arg_word & (type->alignment > FFI_SIZEOF_ARG); + argp += align; + arg_word += align; +#endif + + if (arg_word == NUM_ARG_REGS || + (arg_word < NUM_ARG_REGS && + arg_word + arg_size_in_words > NUM_ARG_REGS)) + { + /* Switch to reading arguments from the stack. */ + argp = stack_args; + arg_word = NUM_ARG_REGS; + } + + avalue[i] = argp; + argp += arg_size_in_words; + arg_word += arg_size_in_words; + } + + /* Invoke the closure. */ + closure->fun(cif, rvalue, avalue, closure->user_data); + + if (cif->flags != FFI_TYPE_STRUCT) + { + /* Canonicalize for register representation. */ + assign_to_ffi_arg(reg_args_out, &closure_ret, cif->rtype, 1); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffitarget.h new file mode 100644 index 0000000..679fb5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/ffitarget.h @@ -0,0 +1,65 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Tilera Corp. + Target configuration macros for TILE. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM + +#include + +typedef uint_reg_t ffi_arg; +typedef int_reg_t ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_UNIX, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ +#define FFI_CLOSURES 1 + +#ifdef __tilegx__ +/* We always pass 8-byte values, even in -m32 mode. */ +# define FFI_SIZEOF_ARG 8 +# ifdef __LP64__ +# define FFI_TRAMPOLINE_SIZE (8 * 5) /* 5 bundles */ +# else +# define FFI_TRAMPOLINE_SIZE (8 * 3) /* 3 bundles */ +# endif +#else +# define FFI_SIZEOF_ARG 4 +# define FFI_TRAMPOLINE_SIZE 8 /* 1 bundle */ +#endif +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/tile.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/tile.S new file mode 100644 index 0000000..d1f82cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/tile/tile.S @@ -0,0 +1,360 @@ +/* ----------------------------------------------------------------------- + tile.S - Copyright (c) 2011 Tilera Corp. + + Tilera TILEPro and TILE-Gx Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +/* Number of bytes in a register. */ +#define REG_SIZE FFI_SIZEOF_ARG + +/* Number of bytes in stack linkage area for backtracing. + + A note about the ABI: on entry to a procedure, sp points to a stack + slot where it must spill the return address if it's not a leaf. + REG_SIZE bytes beyond that is a slot owned by the caller which + contains the sp value that the caller had when it was originally + entered (i.e. the caller's frame pointer). */ +#define LINKAGE_SIZE (2 * REG_SIZE) + +/* The first 10 registers are used to pass arguments and return values. */ +#define NUM_ARG_REGS 10 + +#ifdef __tilegx__ +#define SW st +#define LW ld +#define BGZT bgtzt +#else +#define SW sw +#define LW lw +#define BGZT bgzt +#endif + + +/* void ffi_call_tile (int_reg_t reg_args[NUM_ARG_REGS], + const int_reg_t *stack_args, + unsigned long stack_args_bytes, + void (*fnaddr)(void)); + + On entry, REG_ARGS contain the outgoing register values, + and STACK_ARGS contains STACK_ARG_BYTES of additional values + to be passed on the stack. If STACK_ARG_BYTES is zero, then + STACK_ARGS is ignored. + + When the invoked function returns, the values of r0-r9 are + blindly stored back into REG_ARGS for the caller to examine. */ + + .section .text.ffi_call_tile, "ax", @progbits + .align 8 + .globl ffi_call_tile + FFI_HIDDEN(ffi_call_tile) +ffi_call_tile: + +/* Incoming arguments. */ +#define REG_ARGS r0 +#define INCOMING_STACK_ARGS r1 +#define STACK_ARG_BYTES r2 +#define ORIG_FNADDR r3 + +/* Temporary values. */ +#define FRAME_SIZE r10 +#define TMP r11 +#define TMP2 r12 +#define OUTGOING_STACK_ARGS r13 +#define REG_ADDR_PTR r14 +#define RETURN_REG_ADDR r15 +#define FNADDR r16 + + .cfi_startproc + { + /* Save return address. */ + SW sp, lr + .cfi_offset lr, 0 + /* Prepare to spill incoming r52. */ + addi TMP, sp, -REG_SIZE + /* Increase frame size to have room to spill r52 and REG_ARGS. + The +7 is to round up mod 8. */ + addi FRAME_SIZE, STACK_ARG_BYTES, \ + REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7 + } + { + /* Round stack frame size to a multiple of 8 to satisfy ABI. */ + andi FRAME_SIZE, FRAME_SIZE, -8 + /* Compute where to spill REG_ARGS value. */ + addi TMP2, sp, -(REG_SIZE * 2) + } + { + /* Spill incoming r52. */ + SW TMP, r52 + .cfi_offset r52, -REG_SIZE + /* Set up our frame pointer. */ + move r52, sp + .cfi_def_cfa_register r52 + /* Push stack frame. */ + sub sp, sp, FRAME_SIZE + } + { + /* Prepare to set up stack linkage. */ + addi TMP, sp, REG_SIZE + /* Prepare to memcpy stack args. */ + addi OUTGOING_STACK_ARGS, sp, LINKAGE_SIZE + /* Save REG_ARGS which we will need after we call the subroutine. */ + SW TMP2, REG_ARGS + } + { + /* Set up linkage info to hold incoming stack pointer. */ + SW TMP, r52 + } + { + /* Skip stack args memcpy if we don't have any stack args (common). */ + blezt STACK_ARG_BYTES, .Ldone_stack_args_memcpy + } + +.Lmemcpy_stack_args: + { + /* Load incoming argument from stack_args. */ + LW TMP, INCOMING_STACK_ARGS + addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE + } + { + /* Store stack argument into outgoing stack argument area. */ + SW OUTGOING_STACK_ARGS, TMP + addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE + addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE + } + { + BGZT STACK_ARG_BYTES, .Lmemcpy_stack_args + } +.Ldone_stack_args_memcpy: + + { + /* Copy aside ORIG_FNADDR so we can overwrite its register. */ + move FNADDR, ORIG_FNADDR + /* Prepare to load argument registers. */ + addi REG_ADDR_PTR, r0, REG_SIZE + /* Load outgoing r0. */ + LW r0, r0 + } + + /* Load up argument registers from the REG_ARGS array. */ +#define LOAD_REG(REG, PTR) \ + { \ + LW REG, PTR ; \ + addi PTR, PTR, REG_SIZE \ + } + + LOAD_REG(r1, REG_ADDR_PTR) + LOAD_REG(r2, REG_ADDR_PTR) + LOAD_REG(r3, REG_ADDR_PTR) + LOAD_REG(r4, REG_ADDR_PTR) + LOAD_REG(r5, REG_ADDR_PTR) + LOAD_REG(r6, REG_ADDR_PTR) + LOAD_REG(r7, REG_ADDR_PTR) + LOAD_REG(r8, REG_ADDR_PTR) + LOAD_REG(r9, REG_ADDR_PTR) + + { + /* Call the subroutine. */ + jalr FNADDR + } + + { + /* Restore original lr. */ + LW lr, r52 + /* Prepare to recover ARGS, which we spilled earlier. */ + addi TMP, r52, -(2 * REG_SIZE) + } + { + /* Restore ARGS, so we can fill it in with the return regs r0-r9. */ + LW RETURN_REG_ADDR, TMP + /* Prepare to restore original r52. */ + addi TMP, r52, -REG_SIZE + } + + { + /* Pop stack frame. */ + move sp, r52 + /* Restore original r52. */ + LW r52, TMP + } + +#define STORE_REG(REG, PTR) \ + { \ + SW PTR, REG ; \ + addi PTR, PTR, REG_SIZE \ + } + + /* Return all register values by reference. */ + STORE_REG(r0, RETURN_REG_ADDR) + STORE_REG(r1, RETURN_REG_ADDR) + STORE_REG(r2, RETURN_REG_ADDR) + STORE_REG(r3, RETURN_REG_ADDR) + STORE_REG(r4, RETURN_REG_ADDR) + STORE_REG(r5, RETURN_REG_ADDR) + STORE_REG(r6, RETURN_REG_ADDR) + STORE_REG(r7, RETURN_REG_ADDR) + STORE_REG(r8, RETURN_REG_ADDR) + STORE_REG(r9, RETURN_REG_ADDR) + + { + jrp lr + } + + .cfi_endproc + .size ffi_call_tile, .-ffi_call_tile + +/* ffi_closure_tile(...) + + On entry, lr points to the closure plus 8 bytes, and r10 + contains the actual return address. + + This function simply dumps all register parameters into a stack array + and passes the closure, the registers array, and the stack arguments + to C code that does all of the actual closure processing. */ + + .section .text.ffi_closure_tile, "ax", @progbits + .align 8 + .globl ffi_closure_tile + FFI_HIDDEN(ffi_closure_tile) + + .cfi_startproc +/* Room to spill all NUM_ARG_REGS incoming registers, plus frame linkage. */ +#define CLOSURE_FRAME_SIZE (((NUM_ARG_REGS * REG_SIZE * 2 + LINKAGE_SIZE) + 7) & -8) +ffi_closure_tile: + { +#ifdef __tilegx__ + st sp, lr + .cfi_offset lr, 0 +#else + /* Save return address (in r10 due to closure stub wrapper). */ + SW sp, r10 + .cfi_return_column r10 + .cfi_offset r10, 0 +#endif + /* Compute address for stack frame linkage. */ + addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + } + { + /* Save incoming stack pointer in linkage area. */ + SW r10, sp + .cfi_offset sp, -(CLOSURE_FRAME_SIZE - REG_SIZE) + /* Push a new stack frame. */ + addli sp, sp, -CLOSURE_FRAME_SIZE + .cfi_adjust_cfa_offset CLOSURE_FRAME_SIZE + } + + { + /* Create pointer to where to start spilling registers. */ + addi r10, sp, LINKAGE_SIZE + } + + /* Spill all the incoming registers. */ + STORE_REG(r0, r10) + STORE_REG(r1, r10) + STORE_REG(r2, r10) + STORE_REG(r3, r10) + STORE_REG(r4, r10) + STORE_REG(r5, r10) + STORE_REG(r6, r10) + STORE_REG(r7, r10) + STORE_REG(r8, r10) + { + /* Save r9. */ + SW r10, r9 +#ifdef __tilegx__ + /* Pointer to closure is passed in r11. */ + move r0, r11 +#else + /* Compute pointer to the closure object. Because the closure + starts with a "jal ffi_closure_tile", we can just take the + value of lr (a phony return address pointing into the closure) + and subtract 8. */ + addi r0, lr, -8 +#endif + /* Compute a pointer to the register arguments we just spilled. */ + addi r1, sp, LINKAGE_SIZE + } + { + /* Compute a pointer to the extra stack arguments (if any). */ + addli r2, sp, CLOSURE_FRAME_SIZE + LINKAGE_SIZE + /* Call C code to deal with all of the grotty details. */ + jal ffi_closure_tile_inner + } + { + addli r10, sp, CLOSURE_FRAME_SIZE + } + { + /* Restore the return address. */ + LW lr, r10 + /* Compute pointer to registers array. */ + addli r10, sp, LINKAGE_SIZE + (NUM_ARG_REGS * REG_SIZE) + } + /* Return all the register values, which C code may have set. */ + LOAD_REG(r0, r10) + LOAD_REG(r1, r10) + LOAD_REG(r2, r10) + LOAD_REG(r3, r10) + LOAD_REG(r4, r10) + LOAD_REG(r5, r10) + LOAD_REG(r6, r10) + LOAD_REG(r7, r10) + LOAD_REG(r8, r10) + LOAD_REG(r9, r10) + { + /* Pop the frame. */ + addli sp, sp, CLOSURE_FRAME_SIZE + jrp lr + } + + .cfi_endproc + .size ffi_closure_tile, . - ffi_closure_tile + + +/* What follows are code template instructions that get copied to the + closure trampoline by ffi_prep_closure_loc. The zeroed operands + get replaced by their proper values at runtime. */ + + .section .text.ffi_template_tramp_tile, "ax", @progbits + .align 8 + .globl ffi_template_tramp_tile + FFI_HIDDEN(ffi_template_tramp_tile) +ffi_template_tramp_tile: +#ifdef __tilegx__ + { + moveli r11, 0 /* backpatched to address of containing closure. */ + moveli r10, 0 /* backpatched to ffi_closure_tile. */ + } + /* Note: the following bundle gets generated multiple times + depending on the pointer value (esp. useful for -m32 mode). */ + { shl16insli r11, r11, 0 ; shl16insli r10, r10, 0 } + { info 2+8 /* for backtracer: -> pc in lr, frame size 0 */ ; jr r10 } +#else + /* 'jal .' yields a PC-relative offset of zero so we can OR in the + right offset at runtime. */ + { move r10, lr ; jal . /* ffi_closure_tile */ } +#endif + + .size ffi_template_tramp_tile, . - ffi_template_tramp_tile diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/types.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/types.c new file mode 100644 index 0000000..9ec27f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/types.c @@ -0,0 +1,108 @@ +/* ----------------------------------------------------------------------- + types.c - Copyright (c) 1996, 1998 Red Hat, Inc. + + Predefined ffi_types needed by libffi. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* Hide the basic type definitions from the header file, so that we + can redefine them here as "const". */ +#define LIBFFI_HIDE_BASIC_TYPES + +#include +#include + +/* Type definitions */ + +#define FFI_TYPEDEF(name, type, id, maybe_const)\ +struct struct_align_##name { \ + char c; \ + type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_##name = { \ + sizeof(type), \ + offsetof(struct struct_align_##name, x), \ + id, NULL \ +} + +#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \ +static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffi_type_##name), NULL \ +}; \ +struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ +} + +/* Size and alignment are fake here. They must not be 0. */ +FFI_EXTERN const ffi_type ffi_type_void = { + 1, 1, FFI_TYPE_VOID, NULL +}; + +FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const); +FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const); +FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const); +FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const); +FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const); +FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const); +FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const); +FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const); + +FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + +FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); +FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ +#define FFI_LDBL_CONST const +#else +#define FFI_LDBL_CONST +#endif + +#ifdef __alpha__ +/* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +/* Validate the hard-coded number below. */ +# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL }; +#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST); +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_COMPLEX_TYPEDEF(float, float, const); +FFI_COMPLEX_TYPEDEF(double, double, const); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST); +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/elfbsd.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/elfbsd.S new file mode 100644 index 0000000..01ca313 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/elfbsd.S @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#define LIBFFI_ASM +#include +#include + + .text + +/* + * void * %r0 + * ffi_call_elfbsd(extended_cif *ecif, 4(%ap) + * unsigned bytes, 8(%ap) + * unsigned flags, 12(%ap) + * void *rvalue, 16(%ap) + * void (*fn)()); 20(%ap) + */ + .globl ffi_call_elfbsd + .type ffi_call_elfbsd,@function + .align 2 +ffi_call_elfbsd: + .word 0x00c # save R2 and R3 + + # Allocate stack space for the args + subl2 8(%ap), %sp + + # Call ffi_prep_args + pushl %sp + pushl 4(%ap) + calls $2, ffi_prep_args + + # Get function pointer + movl 20(%ap), %r1 + + # Build a CALLS frame + ashl $-2, 8(%ap), %r0 + pushl %r0 # argument stack usage + movl %sp, %r0 # future %ap + # saved registers + bbc $11, 0(%r1), 1f + pushl %r11 +1: bbc $10, 0(%r1), 1f + pushl %r10 +1: bbc $9, 0(%r1), 1f + pushl %r9 +1: bbc $8, 0(%r1), 1f + pushl %r8 +1: bbc $7, 0(%r1), 1f + pushl %r7 +1: bbc $6, 0(%r1), 1f + pushl %r6 +1: bbc $5, 0(%r1), 1f + pushl %r5 +1: bbc $4, 0(%r1), 1f + pushl %r4 +1: bbc $3, 0(%r1), 1f + pushl %r3 +1: bbc $2, 0(%r1), 1f + pushl %r2 +1: + pushal 9f + pushl %fp + pushl %ap + movl 16(%ap), %r3 # struct return address, if needed + movl %r0, %ap + movzwl 4(%fp), %r0 # previous PSW, without the saved registers mask + bisl2 $0x20000000, %r0 # calls frame + movzwl 0(%r1), %r2 + bicw2 $0xf003, %r2 # only keep R11-R2 + ashl $16, %r2, %r2 + bisl2 %r2, %r0 # saved register mask of the called function + pushl %r0 + pushl $0 + movl %sp, %fp + + # Invoke the function + pushal 2(%r1) # skip procedure entry mask + movl %r3, %r1 + bicpsw $0x000f + rsb + +9: + # Copy return value if necessary + tstl 16(%ap) + jeql 9f + movl 16(%ap), %r2 + + bbc $0, 12(%ap), 1f # CIF_FLAGS_CHAR + movb %r0, 0(%r2) + brb 9f +1: + bbc $1, 12(%ap), 1f # CIF_FLAGS_SHORT + movw %r0, 0(%r2) + brb 9f +1: + bbc $2, 12(%ap), 1f # CIF_FLAGS_INT + movl %r0, 0(%r2) + brb 9f +1: + bbc $3, 12(%ap), 1f # CIF_FLAGS_DINT + movq %r0, 0(%r2) + brb 9f +1: + movl %r1, %r0 # might have been a struct + #brb 9f + +9: + ret + +/* + * ffi_closure_elfbsd(void); + * invoked with %r0: ffi_closure *closure + */ + .globl ffi_closure_elfbsd + .type ffi_closure_elfbsd, @function + .align 2 +ffi_closure_elfbsd: + .word 0 + + # Allocate room on stack for return value + subl2 $8, %sp + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushal 4(%sp) # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + # Copy return value if necessary + bitb $1, %r0 # CIF_FLAGS_CHAR + beql 1f + movb 0(%sp), %r0 + brb 9f +1: + bitb $2, %r0 # CIF_FLAGS_SHORT + beql 1f + movw 0(%sp), %r0 + brb 9f +1: + bitb $4, %r0 # CIF_FLAGS_INT + beql 1f + movl 0(%sp), %r0 + brb 9f +1: + bitb $8, %r0 # CIF_FLAGS_DINT + beql 1f + movq 0(%sp), %r0 + #brb 9f +1: + +9: + ret + +/* + * ffi_closure_struct_elfbsd(void); + * invoked with %r0: ffi_closure *closure + * %r1: struct return address + */ + .globl ffi_closure_struct_elfbsd + .type ffi_closure_struct_elfbsd, @function + .align 2 +ffi_closure_struct_elfbsd: + .word 0 + + # Invoke the closure function + pushal 4(%ap) # calling stack + pushl %r1 # return value + pushl %r0 # closure + calls $3, ffi_closure_elfbsd_inner + + ret diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffi.c new file mode 100644 index 0000000..e52caec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffi.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + * + * This file attempts to provide all the FFI entry points which can reliably + * be implemented in C. + */ + +#include +#include + +#include +#include + +#define CIF_FLAGS_CHAR 1 /* for struct only */ +#define CIF_FLAGS_SHORT 2 /* for struct only */ +#define CIF_FLAGS_INT 4 +#define CIF_FLAGS_DINT 8 + +/* + * Foreign Function Interface API + */ + +void ffi_call_elfbsd (extended_cif *, unsigned, unsigned, void *, + void (*) ()); +void *ffi_prep_args (extended_cif *ecif, void *stack); + +void * +ffi_prep_args (extended_cif *ecif, void *stack) +{ + unsigned int i; + void **p_argv; + char *argp; + ffi_type **p_arg; + void *struct_value_ptr; + + argp = stack; + + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT + && !ecif->cif->flags) + struct_value_ptr = ecif->rvalue; + else + struct_value_ptr = NULL; + + p_argv = ecif->avalue; + + for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; + i != 0; + i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + if (z < sizeof (int)) + { + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int) *(SINT8 *) *p_argv; + break; + + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int) *(UINT8 *) *p_argv; + break; + + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int) *(SINT16 *) *p_argv; + break; + + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int) *(UINT16 *) *p_argv; + break; + + case FFI_TYPE_STRUCT: + memcpy (argp, *p_argv, z); + break; + + default: + FFI_ASSERT (0); + } + z = sizeof (int); + } + else + { + memcpy (argp, *p_argv, z); + + /* Align if necessary. */ + if ((sizeof(int) - 1) & z) + z = FFI_ALIGN(z, sizeof(int)); + } + + p_argv++; + argp += z; + } + + return struct_value_ptr; +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + cif->flags = 0; + break; + + case FFI_TYPE_STRUCT: + if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT && + cif->rtype->elements[1]) + { + cif->flags = 0; + break; + } + + if (cif->rtype->size == sizeof (char)) + cif->flags = CIF_FLAGS_CHAR; + else if (cif->rtype->size == sizeof (short)) + cif->flags = CIF_FLAGS_SHORT; + else if (cif->rtype->size == sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else if (cif->rtype->size == 2 * sizeof (int)) + cif->flags = CIF_FLAGS_DINT; + else + cif->flags = 0; + break; + + default: + if (cif->rtype->size <= sizeof (int)) + cif->flags = CIF_FLAGS_INT; + else + cif->flags = CIF_FLAGS_DINT; + break; + } + + return FFI_OK; +} + +void +ffi_call (ffi_cif *cif, void (*fn) (), void *rvalue, void **avalue) +{ + extended_cif ecif; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* If the return value is a struct and we don't have a return value + address then we need to make one. */ + + if (rvalue == NULL + && cif->rtype->type == FFI_TYPE_STRUCT + && cif->flags == 0) + ecif.rvalue = alloca (cif->rtype->size); + else + ecif.rvalue = rvalue; + + switch (cif->abi) + { + case FFI_ELFBSD: + ffi_call_elfbsd (&ecif, cif->bytes, cif->flags, ecif.rvalue, fn); + break; + + default: + FFI_ASSERT (0); + break; + } +} + +/* + * Closure API + */ + +void ffi_closure_elfbsd (void); +void ffi_closure_struct_elfbsd (void); +unsigned int ffi_closure_elfbsd_inner (ffi_closure *, void *, char *); + +static void +ffi_prep_closure_elfbsd (ffi_cif *cif, void **avalue, char *stackp) +{ + unsigned int i; + void **p_argv; + ffi_type **p_arg; + + p_argv = avalue; + + for (i = cif->nargs, p_arg = cif->arg_types; i != 0; i--, p_arg++) + { + size_t z; + + z = (*p_arg)->size; + *p_argv = stackp; + + /* Align if necessary */ + if ((sizeof (int) - 1) & z) + z = FFI_ALIGN(z, sizeof (int)); + + p_argv++; + stackp += z; + } +} + +unsigned int +ffi_closure_elfbsd_inner (ffi_closure *closure, void *resp, char *stack) +{ + ffi_cif *cif; + void **arg_area; + + cif = closure->cif; + arg_area = (void **) alloca (cif->nargs * sizeof (void *)); + + ffi_prep_closure_elfbsd (cif, arg_area, stack); + + (closure->fun) (cif, resp, arg_area, closure->user_data); + + return cif->flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, + void (*fun)(ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + char *tramp = (char *) codeloc; + void *fn; + + FFI_ASSERT (cif->abi == FFI_ELFBSD); + + /* entry mask */ + *(unsigned short *)(tramp + 0) = 0x0000; + /* movl #closure, r0 */ + tramp[2] = 0xd0; + tramp[3] = 0x8f; + *(unsigned int *)(tramp + 4) = (unsigned int) closure; + tramp[8] = 0x50; + + if (cif->rtype->type == FFI_TYPE_STRUCT + && !cif->flags) + fn = &ffi_closure_struct_elfbsd; + else + fn = &ffi_closure_elfbsd; + + /* jmpl #fn */ + tramp[9] = 0x17; + tramp[10] = 0xef; + *(unsigned int *)(tramp + 11) = (unsigned int)fn + 2 - + (unsigned int)tramp - 9 - 6; + + closure->cif = cif; + closure->user_data = user_data; + closure->fun = fun; + + return FFI_OK; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffitarget.h new file mode 100644 index 0000000..2fc9488 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/vax/ffitarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013 Miodrag Vallat. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * ``Software''), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * vax Foreign Function Interface + */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_ELFBSD, + FFI_DEFAULT_ABI = FFI_ELFBSD, + FFI_LAST_ABI = FFI_DEFAULT_ABI + 1 +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_TRAMPOLINE_SIZE 15 +#define FFI_NATIVE_RAW_API 0 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/asmnames.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/asmnames.h new file mode 100644 index 0000000..7551021 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/asmnames.h @@ -0,0 +1,30 @@ +#ifndef ASMNAMES_H +#define ASMNAMES_H + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef __APPLE__ +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#if defined(__ELF__) && defined(__PIC__) +# define PLT(X) X@PLT +#else +# define PLT(X) X +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +#endif /* ASMNAMES_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi.c new file mode 100644 index 0000000..5f7fd81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi.c @@ -0,0 +1,770 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2017 Anthony Green + Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__i386__) || defined(_M_IX86) +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 80-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#if defined(__GNUC__) && !defined(__declspec) +# define __declspec(x) __attribute__((x)) +#endif + +#if defined(_MSC_VER) && defined(_M_IX86) +/* Stack is not 16-byte aligned on Windows. */ +#define STACK_ALIGN(bytes) (bytes) +#else +#define STACK_ALIGN(bytes) FFI_ALIGN (bytes, 16) +#endif + +/* Perform machine dependent cif processing. */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int i, n, flags, cabi = cif->abi; + + switch (cabi) + { + case FFI_SYSV: + case FFI_STDCALL: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + case FFI_PASCAL: + case FFI_REGISTER: + break; + default: + return FFI_BAD_ABI; + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = X86_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = X86_RET_FLOAT; + break; + case FFI_TYPE_DOUBLE: + flags = X86_RET_DOUBLE; + break; + case FFI_TYPE_LONGDOUBLE: + flags = X86_RET_LDOUBLE; + break; + case FFI_TYPE_UINT8: + flags = X86_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = X86_RET_UINT16; + break; + case FFI_TYPE_SINT8: + flags = X86_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = X86_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = X86_RET_INT64; + break; + case FFI_TYPE_STRUCT: +#ifndef X86 + /* ??? This should be a different ABI rather than an ifdef. */ + if (cif->rtype->size == 1) + flags = X86_RET_STRUCT_1B; + else if (cif->rtype->size == 2) + flags = X86_RET_STRUCT_2B; + else if (cif->rtype->size == 4) + flags = X86_RET_INT32; + else if (cif->rtype->size == 8) + flags = X86_RET_INT64; + else +#endif + { + do_struct: + switch (cabi) + { + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_STDCALL: + case FFI_MS_CDECL: + flags = X86_RET_STRUCTARG; + break; + default: + flags = X86_RET_STRUCTPOP; + break; + } + /* Allocate space for return value pointer. */ + bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); + } + break; + case FFI_TYPE_COMPLEX: + switch (cif->rtype->elements[0]->type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + goto do_struct; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = X86_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = X86_RET_STRUCT_2B; + break; + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + cif->flags = flags; + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *t = cif->arg_types[i]; + + bytes = FFI_ALIGN (bytes, t->alignment); + bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); + } + cif->bytes = bytes; + + return FFI_OK; +} + +static ffi_arg +extend_basic_type(void *arg, int type) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)arg; + case FFI_TYPE_UINT8: + return *(UINT8 *)arg; + case FFI_TYPE_SINT16: + return *(SINT16 *)arg; + case FFI_TYPE_UINT16: + return *(UINT16 *)arg; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + return *(UINT32 *)arg; + + default: + abort(); + } +} + +struct call_frame +{ + void *ebp; /* 0 */ + void *retaddr; /* 4 */ + void (*fn)(void); /* 8 */ + int flags; /* 12 */ + void *rvalue; /* 16 */ + unsigned regs[3]; /* 20-28 */ +}; + +struct abi_params +{ + int dir; /* parameter growth direction */ + int static_chain; /* the static chain register used by gcc */ + int nregs; /* number of register parameters */ + int regs[3]; +}; + +static const struct abi_params abi_params[FFI_LAST_ABI] = { + [FFI_SYSV] = { 1, R_ECX, 0 }, + [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } }, + [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } }, + [FFI_STDCALL] = { 1, R_ECX, 0 }, + [FFI_PASCAL] = { -1, R_ECX, 0 }, + /* ??? No defined static chain; gcc does not support REGISTER. */ + [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } }, + [FFI_MS_CDECL] = { 1, R_ECX, 0 } +}; + +#ifdef HAVE_FASTCALL + #ifdef _MSC_VER + #define FFI_DECLARE_FASTCALL __fastcall + #else + #define FFI_DECLARE_FASTCALL __declspec(fastcall) + #endif +#else + #define FFI_DECLARE_FASTCALL +#endif + +extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, dir, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + dir = pabi->dir; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + stack = alloca(bytes + sizeof(*frame) + rsize); + argp = (dir < 0 ? stack + bytes : stack); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + frame->regs[pabi->static_chain] = (unsigned)closure; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + ffi_arg val = extend_basic_type (valp, t); + + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + frame->regs[pabi->regs[narg_reg++]] = val; + else if (dir < 0) + { + argp -= 4; + *(ffi_arg *)argp = val; + } + else + { + *(ffi_arg *)argp = val; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer parameters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + /* Alignment rules for arguments are quite complex. Vectors and + structures with 16 byte alignment get it. Note that long double + on Darwin does have 16 byte alignment, and does not get this + alignment if passed directly; a structure with a long double + inside, however, would get 16 byte alignment. Since libffi does + not support vectors, we need non concern ourselves with other + cases. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + memcpy (argp, valp, z); + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + memcpy (argp, valp, z); + argp += za; + } + } + } + FFI_ASSERT (dir > 0 || argp == stack); + + ffi_call_i386 (frame, stack); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif + +/** private members **/ + +void FFI_HIDDEN ffi_closure_i386(void); +void FFI_HIDDEN ffi_closure_STDCALL(void); +void FFI_HIDDEN ffi_closure_REGISTER(void); + +struct closure_frame +{ + unsigned rettemp[4]; /* 0 */ + unsigned regs[3]; /* 16-24 */ + ffi_cif *cif; /* 28 */ + void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ + void *user_data; /* 36 */ +}; + +int FFI_HIDDEN FFI_DECLARE_FASTCALL +ffi_closure_inner (struct closure_frame *frame, char *stack) +{ + ffi_cif *cif = frame->cif; + int cabi, i, n, flags, dir, narg_reg; + const struct abi_params *pabi; + ffi_type **arg_types; + char *argp; + void *rvalue; + void **avalue; + + cabi = cif->abi; + flags = cif->flags; + narg_reg = 0; + rvalue = frame->rettemp; + pabi = &abi_params[cabi]; + dir = pabi->dir; + argp = (dir < 0 ? stack + STACK_ALIGN (cif->bytes) : stack); + + switch (flags) + { + case X86_RET_STRUCTARG: + if (pabi->nregs > 0) + { + rvalue = (void *)frame->regs[pabi->regs[0]]; + narg_reg = 1; + frame->rettemp[0] = (unsigned)rvalue; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + rvalue = *(void **)argp; + argp += sizeof(void *); + frame->rettemp[0] = (unsigned)rvalue; + break; + } + + n = cif->nargs; + avalue = alloca(sizeof(void *) * n); + + arg_types = cif->arg_types; + for (i = 0; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + void *valp; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + valp = &frame->regs[pabi->regs[narg_reg++]]; + else if (dir < 0) + { + argp -= 4; + valp = argp; + } + else + { + valp = argp; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* See the comment in ffi_call_int. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer parameters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + valp = argp; + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + valp = argp; + argp += za; + } + } + + avalue[i] = valp; + } + + frame->fun (cif, rvalue, avalue, frame->user_data); + + if (cabi == FFI_STDCALL) + return flags + (cif->bytes << X86_RET_POP_SHIFT); + else + return flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int op = 0xb8; /* movl imm, %eax */ + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + dest = ffi_closure_i386; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_closure_STDCALL; + break; + case FFI_REGISTER: + dest = ffi_closure_REGISTER; + op = 0x68; /* pushl imm */ + break; + default: + return FFI_BAD_ABI; + } + + /* endbr32. */ + *(UINT32 *) tramp = 0xfb1e0ff3; + + /* movl or pushl immediate. */ + tramp[4] = op; + *(void **)(tramp + 5) = codeloc; + + /* jmp dest */ + tramp[9] = 0xe9; + *(unsigned *)(tramp + 10) = (unsigned)dest - ((unsigned)codeloc + 14); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES + +void FFI_HIDDEN ffi_go_closure_EAX(void); +void FFI_HIDDEN ffi_go_closure_ECX(void); +void FFI_HIDDEN ffi_go_closure_STDCALL(void); + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*dest)(void); + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_MS_CDECL: + dest = ffi_go_closure_ECX; + break; + case FFI_THISCALL: + case FFI_FASTCALL: + dest = ffi_go_closure_EAX; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_go_closure_STDCALL; + break; + case FFI_REGISTER: + default: + return FFI_BAD_ABI; + } + + closure->tramp = dest; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_GO_CLOSURES */ + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + +void FFI_HIDDEN ffi_closure_raw_SYSV(void); +void FFI_HIDDEN ffi_closure_raw_THISCALL(void); + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int i; + + /* We currently don't support certain kinds of arguments for raw + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ + for (i = cif->nargs-1; i >= 0; i--) + switch (cif->arg_types[i]->type) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + return FFI_BAD_TYPEDEF; + } + + switch (cif->abi) + { + case FFI_THISCALL: + dest = ffi_closure_raw_THISCALL; + break; + case FFI_SYSV: + dest = ffi_closure_raw_SYSV; + break; + default: + return FFI_BAD_ABI; + } + + /* movl imm, %eax. */ + tramp[0] = 0xb8; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void +ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = STACK_ALIGN (cif->bytes); + argp = stack = + (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + bytes -= sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) + { + ffi_arg val = extend_basic_type (avalue, t); + frame->regs[pabi->regs[narg_reg++]] = val; + z = FFI_SIZEOF_ARG; + } + else + { + memcpy (argp, avalue, z); + z = FFI_ALIGN (z, FFI_SIZEOF_ARG); + argp += z; + } + avalue += z; + bytes -= z; + } + if (i < n) + memcpy (argp, avalue, bytes); + + ffi_call_i386 (frame, stack); +} +#endif /* !FFI_NO_RAW_API */ +#endif /* __i386__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi64.c new file mode 100644 index 0000000..39f9598 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffi64.c @@ -0,0 +1,895 @@ +/* ----------------------------------------------------------------------- + ffi64.c - Copyright (c) 2011, 2018 Anthony Green + Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include "internal64.h" + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) +#include "xmmintrin.h" +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; + union big_int_union sse[MAX_SSE_REGS]; + UINT64 rax; /* ssecount */ + UINT64 r10; /* static chain */ +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ +static size_t +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_integer: + { + size_t size = byte_offset + type->size; + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; +#endif + case FFI_TYPE_STRUCT: + { + const size_t UNITS_PER_WORD = 8; + size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + ffi_type **ptr; + unsigned int i; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + case FFI_TYPE_VOID: + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { + size_t num; + + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { + size_t pos = byte_offset / 8; + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (i > 1 && classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (i > 1 && classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } + case FFI_TYPE_COMPLEX: + { + ffi_type *inner = type->elements[0]; + switch (inner->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + goto do_integer; + + case FFI_TYPE_FLOAT: + classes[0] = X86_64_SSE_CLASS; + if (byte_offset % 8) + { + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = classes[1] = X86_64_SSEDF_CLASS; + return 2; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; +#endif + } + } + } + abort(); +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + +static size_t +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ + size_t n; + unsigned int i; + int ngpr, nsse; + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_cif_machdep_efi64(ffi_cif *cif); +#endif + +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int gprcount, ssecount, i, avn, ngpr, nsse; + unsigned flags; + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t bytes, n, rtype_size; + ffi_type *rtype; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_cif_machdep_efi64(cif); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + gprcount = ssecount = 0; + + rtype = cif->rtype; + rtype_size = rtype->size; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = UNIX64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = UNIX64_RET_UINT8; + break; + case FFI_TYPE_SINT8: + flags = UNIX64_RET_SINT8; + break; + case FFI_TYPE_UINT16: + flags = UNIX64_RET_UINT16; + break; + case FFI_TYPE_SINT16: + flags = UNIX64_RET_SINT16; + break; + case FFI_TYPE_UINT32: + flags = UNIX64_RET_UINT32; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = UNIX64_RET_SINT32; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM32; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_XMM64; + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87; + break; +#endif + case FFI_TYPE_STRUCT: + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ + flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; + } + else + { + _Bool sse0 = SSE_CLASS_P (classes[0]); + + if (rtype_size == 4 && sse0) + flags = UNIX64_RET_XMM32; + else if (rtype_size == 8) + flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64; + else + { + _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); + if (sse0 && sse1) + flags = UNIX64_RET_ST_XMM0_XMM1; + else if (sse0) + flags = UNIX64_RET_ST_XMM0_RAX; + else if (sse1) + flags = UNIX64_RET_ST_RAX_XMM0; + else + flags = UNIX64_RET_ST_RAX_RDX; + flags |= rtype_size << UNIX64_SIZE_SHIFT; + } + } + break; + case FFI_TYPE_COMPLEX: + switch (rtype->elements[0]->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_ST_RAX_RDX | ((unsigned) rtype_size << UNIX64_SIZE_SHIFT); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT); + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87_2; + break; +#endif + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + + bytes = FFI_ALIGN (bytes, align); + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) + flags |= UNIX64_FLAG_XMM_ARGS; + + cif->flags = flags; + cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; + int gprcount, ssecount, ngpr, nsse, i, avn, flags; + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value + address then we need to make one. Otherwise we can ignore it. */ + flags = cif->flags; + if (rvalue == NULL) + { + if (flags & UNIX64_FLAG_RET_IN_MEM) + rvalue = alloca (cif->rtype->size); + else + flags = UNIX64_RET_VOID; + } + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + + reg_args->r10 = (uintptr_t) closure; + + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ + if (flags & UNIX64_FLAG_RET_IN_MEM) + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { + size_t n, size = arg_types[i]->size; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; + unsigned int j; + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); + break; + case FFI_TYPE_SINT16: + reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); + break; + case FFI_TYPE_SINT32: + reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); + break; + default: + reg_args->gpr[gprcount] = 0; + memcpy (®_args->gpr[gprcount], a, size); + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: + memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); + break; + case X86_64_SSESF_CLASS: + memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); + break; + default: + abort(); + } + } + } + } + reg_args->rax = ssecount; + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + flags, rvalue, fn); +} + +#ifndef __ILP32__ +extern void +ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); +#endif + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_efi64(cif, fn, rvalue, avalue); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES + +#ifndef __ILP32__ +extern void +ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); +#endif + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_go_efi64(cif, fn, rvalue, avalue, closure); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +#endif /* FFI_GO_CLOSURES */ + +extern void ffi_closure_unix64(void) FFI_HIDDEN; +extern void ffi_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_closure_loc_efi64(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[24] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + void (*dest)(void); + char *tramp = closure->tramp; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + if (cif->flags & UNIX64_FLAG_XMM_ARGS) + dest = ffi_closure_unix64_sse; + else + dest = ffi_closure_unix64; + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)dest; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_unix64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, + struct register_args *reg_args, + char *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; + int flags; + + avn = cif->nargs; + flags = cif->flags; + avalue = alloca(avn * sizeof(void *)); + gprcount = ssecount = 0; + + if (flags & UNIX64_FLAG_RET_IN_MEM) + { + /* On return, %rax will contain the address that was passed + by the caller in %rdi. */ + void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; + *(void **)rvalue = r; + rvalue = r; + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t n; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); + unsigned int j; + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell assembly how to perform return type promotions. */ + return flags; +} + +#ifdef FFI_GO_CLOSURES + +extern void ffi_go_closure_unix64(void) FFI_HIDDEN; +extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_go_closure_efi64(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)); +#endif + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_go_closure_efi64(closure, cif, fun); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS + ? ffi_go_closure_unix64_sse + : ffi_go_closure_unix64); + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* FFI_GO_CLOSURES */ + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffitarget.h new file mode 100644 index 0000000..a34f3e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffitarget.h @@ -0,0 +1,160 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +/* 4 bytes of ENDBR64 + 7 bytes of LEA + 6 bytes of JMP + 7 bytes of NOP + + 8 bytes of pointer. */ +# define FFI_TRAMPOLINE_SIZE 32 +# define FFI_NATIVE_RAW_API 0 +#else +/* 4 bytes of ENDBR32 + 5 bytes of MOV + 5 bytes of JMP + 2 unused + bytes. */ +# define FFI_TRAMPOLINE_SIZE 16 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#if !defined(GENERATE_LIBFFI_MAP) && defined(__ASSEMBLER__) \ + && defined(__CET__) +# include +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +#endif + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffiw64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffiw64.c new file mode 100644 index 0000000..a43a9eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/ffiw64.c @@ -0,0 +1,318 @@ +/* ----------------------------------------------------------------------- + ffiw64.c - Copyright (c) 2018 Anthony Green + Copyright (c) 2014 Red Hat, Inc. + + x86 win64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined(__x86_64__) || defined(_M_AMD64) +#include +#include +#include +#include + +#ifdef X86_WIN64 +#define EFI64(name) name +#else +#define EFI64(name) FFI_HIDDEN name##_efi64 +#endif + +struct win64_call_frame +{ + UINT64 rbp; /* 0 */ + UINT64 retaddr; /* 8 */ + UINT64 fn; /* 16 */ + UINT64 flags; /* 24 */ + UINT64 rvalue; /* 32 */ +}; + +extern void ffi_call_win64 (void *stack, struct win64_call_frame *, + void *closure) FFI_HIDDEN; + +ffi_status FFI_HIDDEN +EFI64(ffi_prep_cif_machdep)(ffi_cif *cif) +{ + int flags, n; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + flags = cif->rtype->type; + switch (flags) + { + default: + break; + case FFI_TYPE_LONGDOUBLE: + /* GCC returns long double values by reference, like a struct */ + if (cif->abi == FFI_GNUW64) + flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_COMPLEX: + flags = FFI_TYPE_STRUCT; + /* FALLTHRU */ + case FFI_TYPE_STRUCT: + switch (cif->rtype->size) + { + case 8: + flags = FFI_TYPE_UINT64; + break; + case 4: + flags = FFI_TYPE_SMALL_STRUCT_4B; + break; + case 2: + flags = FFI_TYPE_SMALL_STRUCT_2B; + break; + case 1: + flags = FFI_TYPE_SMALL_STRUCT_1B; + break; + } + break; + } + cif->flags = flags; + + /* Each argument either fits in a register, an 8 byte slot, or is + passed by reference with the pointer in the 8 byte slot. */ + n = cif->nargs; + n += (flags == FFI_TYPE_STRUCT); + if (n < 4) + n = 4; + cif->bytes = n * 8; + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + int i, j, n, flags; + UINT64 *stack; + size_t rsize; + struct win64_call_frame *frame; + + FFI_ASSERT(cif->abi == FFI_GNUW64 || cif->abi == FFI_WIN64); + + flags = cif->flags; + rsize = 0; + + /* If we have no return value for a structure, we need to create one. + Otherwise we can ignore the return type entirely. */ + if (rvalue == NULL) + { + if (flags == FFI_TYPE_STRUCT) + rsize = cif->rtype->size; + else + flags = FFI_TYPE_VOID; + } + + stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize); + frame = (struct win64_call_frame *)((char *)stack + cif->bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = (uintptr_t)fn; + frame->flags = flags; + frame->rvalue = (uintptr_t)rvalue; + + j = 0; + if (flags == FFI_TYPE_STRUCT) + { + stack[0] = (uintptr_t)rvalue; + j = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++j) + { + switch (cif->arg_types[i]->size) + { + case 8: + stack[j] = *(UINT64 *)avalue[i]; + break; + case 4: + stack[j] = *(UINT32 *)avalue[i]; + break; + case 2: + stack[j] = *(UINT16 *)avalue[i]; + break; + case 1: + stack[j] = *(UINT8 *)avalue[i]; + break; + default: + stack[j] = (uintptr_t)avalue[i]; + break; + } + } + + ffi_call_win64 (stack, frame, closure); +} + +void +EFI64(ffi_call)(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +EFI64(ffi_call_go)(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_win64(void) FFI_HIDDEN; + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_win64(void) FFI_HIDDEN; +#endif + +ffi_status +EFI64(ffi_prep_closure_loc)(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[FFI_TRAMPOLINE_SIZE - 8] = { + /* endbr64 */ + 0xf3, 0x0f, 0x1e, 0xfa, + /* leaq -0xb(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf5, 0xff, 0xff, 0xff, + /* jmpq *0x7(%rip) # 0x18 */ + 0xff, 0x25, 0x07, 0x00, 0x00, 0x00, + /* nopl 0(%rax) */ + 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00 + }; + char *tramp = closure->tramp; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + sizeof (trampoline)) = (uintptr_t)ffi_closure_win64; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +ffi_status +EFI64(ffi_prep_go_closure)(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + closure->tramp = ffi_go_closure_win64; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif + +struct win64_closure_frame +{ + UINT64 rvalue[2]; + UINT64 fargs[4]; + UINT64 retaddr; + UINT64 args[]; +}; + +/* Force the inner function to use the MS ABI. When compiling on win64 + this is a nop. When compiling on unix, this simplifies the assembly, + and places the burden of saving the extra call-saved registers on + the compiler. */ +int FFI_HIDDEN __attribute__((ms_abi)) +ffi_closure_win64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + struct win64_closure_frame *frame) +{ + void **avalue; + void *rvalue; + int i, n, nreg, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + rvalue = frame->rvalue; + nreg = 0; + + /* When returning a structure, the address is in the first argument. + We must also be prepared to return the same address in eax, so + install that address in the frame and pretend we return a pointer. */ + flags = cif->flags; + if (flags == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)frame->args[0]; + frame->rvalue[0] = frame->args[0]; + nreg = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++nreg) + { + size_t size = cif->arg_types[i]->size; + size_t type = cif->arg_types[i]->type; + void *a; + + if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT) + { + if (nreg < 4) + a = &frame->fargs[nreg]; + else + a = &frame->args[nreg]; + } + else if (size == 1 || size == 2 || size == 4 || size == 8) + a = &frame->args[nreg]; + else + a = (void *)(uintptr_t)frame->args[nreg]; + + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + return flags; +} + +#endif /* __x86_64__ */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal.h new file mode 100644 index 0000000..09771ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal.h @@ -0,0 +1,29 @@ +#define X86_RET_FLOAT 0 +#define X86_RET_DOUBLE 1 +#define X86_RET_LDOUBLE 2 +#define X86_RET_SINT8 3 +#define X86_RET_SINT16 4 +#define X86_RET_UINT8 5 +#define X86_RET_UINT16 6 +#define X86_RET_INT64 7 +#define X86_RET_INT32 8 +#define X86_RET_VOID 9 +#define X86_RET_STRUCTPOP 10 +#define X86_RET_STRUCTARG 11 +#define X86_RET_STRUCT_1B 12 +#define X86_RET_STRUCT_2B 13 +#define X86_RET_UNUSED14 14 +#define X86_RET_UNUSED15 15 + +#define X86_RET_TYPE_MASK 15 +#define X86_RET_POP_SHIFT 4 + +#define R_EAX 0 +#define R_EDX 1 +#define R_ECX 2 + +#ifdef __PCC__ +# define HAVE_FASTCALL 0 +#else +# define HAVE_FASTCALL 1 +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal64.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal64.h new file mode 100644 index 0000000..512e955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/internal64.h @@ -0,0 +1,22 @@ +#define UNIX64_RET_VOID 0 +#define UNIX64_RET_UINT8 1 +#define UNIX64_RET_UINT16 2 +#define UNIX64_RET_UINT32 3 +#define UNIX64_RET_SINT8 4 +#define UNIX64_RET_SINT16 5 +#define UNIX64_RET_SINT32 6 +#define UNIX64_RET_INT64 7 +#define UNIX64_RET_XMM32 8 +#define UNIX64_RET_XMM64 9 +#define UNIX64_RET_X87 10 +#define UNIX64_RET_X87_2 11 +#define UNIX64_RET_ST_XMM0_RAX 12 +#define UNIX64_RET_ST_RAX_XMM0 13 +#define UNIX64_RET_ST_XMM0_XMM1 14 +#define UNIX64_RET_ST_RAX_RDX 15 + +#define UNIX64_RET_LAST 15 + +#define UNIX64_FLAG_RET_IN_MEM (1 << 10) +#define UNIX64_FLAG_XMM_ARGS (1 << 11) +#define UNIX64_SIZE_SHIFT 12 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv.S new file mode 100644 index 0000000..d8ab4b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv.S @@ -0,0 +1,1138 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __i386__ +#ifndef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef X86_DARWIN +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +/* Handle win32 fastcall name mangling. */ +#ifdef X86_WIN32 +# define ffi_call_i386 "@ffi_call_i386@8" +# define ffi_closure_inner "@ffi_closure_inner@8" +#else +# define ffi_call_i386 C(ffi_call_i386) +# define ffi_closure_inner C(ffi_closure_inner) +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + .balign 16 + .globl ffi_call_i386 + FFI_HIDDEN(ffi_call_i386) + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ffi_call_i386: +L(UW0): + # cfi_startproc + _CET_ENDBR +#if !HAVE_FASTCALL + movl 4(%esp), %ecx + movl 8(%esp), %edx +#endif + movl (%esp), %eax /* move the return address */ + movl %ebp, (%ecx) /* store %ebp into local frame */ + movl %eax, 4(%ecx) /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + movl %ecx, %ebp +L(UW1): + # cfi_def_cfa(%ebp, 8) + # cfi_rel_offset(%ebp, 0) + + movl %edx, %esp /* set outgoing argument stack */ + movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ + movl 20+R_EDX*4(%ebp), %edx + movl 20+R_ECX*4(%ebp), %ecx + + call *8(%ebp) + + movl 12(%ebp), %ecx /* load return type code */ + movl %ebx, 8(%ebp) /* preserve %ebx */ +L(UW2): + # cfi_rel_offset(%ebx, 8) + + andl $X86_RET_TYPE_MASK, %ecx +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc1): + leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx +#else + leal L(store_table)(,%ecx, 8), %ebx +#endif + movl 16(%ebp), %ecx /* load result address */ + _CET_NOTRACK jmp *%ebx + + .balign 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstps (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstpl (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstpt (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movswl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzwl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_INT64) + movl %edx, 4(%ecx) + /* fallthru */ +E(L(store_table), X86_RET_INT32) + movl %eax, (%ecx) + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + movl 8(%ebp), %ebx + movl %ebp, %esp + popl %ebp +L(UW3): + # cfi_remember_state + # cfi_def_cfa(%esp, 4) + # cfi_restore(%ebx) + # cfi_restore(%ebp) + ret +L(UW4): + # cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + movb %al, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + movw %ax, (%ecx) + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + ud2 +E(L(store_table), X86_RET_UNUSED15) + ud2 + +L(UW5): + # cfi_endproc +ENDF(ffi_call_i386) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +#define FFI_CLOSURE_SAVE_REGS \ + movl %eax, closure_CF+16+R_EAX*4(%esp); \ + movl %edx, closure_CF+16+R_EDX*4(%esp); \ + movl %ecx, closure_CF+16+R_ECX*4(%esp) + +#define FFI_CLOSURE_COPY_TRAMP_DATA \ + movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ + movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ + movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \ + movl %edx, closure_CF+28(%esp); \ + movl %ecx, closure_CF+32(%esp); \ + movl %eax, closure_CF+36(%esp) + +#if HAVE_FASTCALL +# define FFI_CLOSURE_PREP_CALL \ + movl %esp, %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ +#else +# define FFI_CLOSURE_PREP_CALL \ + leal closure_CF(%esp), %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ + movl %ecx, (%esp); \ + movl %edx, 4(%esp) +#endif + +#define FFI_CLOSURE_CALL_INNER(UWN) \ + call ffi_closure_inner + +#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))(, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx + +#ifdef __PIC__ +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + call C(__x86.get_pc_thunk.dx); \ +L(C1(pc,N)): \ + leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# else +# define FFI_CLOSURE_CALL_INNER_SAVE_EBX +# undef FFI_CLOSURE_CALL_INNER +# define FFI_CLOSURE_CALL_INNER(UWN) \ + movl %ebx, 40(%esp); /* save ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_rel_offset(%ebx, 40); */ \ + call C(__x86.get_pc_thunk.bx); /* load got register */ \ + addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \ + call ffi_closure_inner@PLT +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \ + movl 40(%esp), %ebx; /* restore ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_restore(%ebx); */ \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + _CET_NOTRACK jmp *%edx +# endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + + .balign 16 + .globl C(ffi_go_closure_EAX) + FFI_HIDDEN(C(ffi_go_closure_EAX)) +C(ffi_go_closure_EAX): +L(UW6): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW7): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%eax), %edx /* copy cif */ + movl 8(%eax), %ecx /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %ecx, closure_CF+32(%esp) + movl %eax, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + # cfi_endproc +ENDF(C(ffi_go_closure_EAX)) + + .balign 16 + .globl C(ffi_go_closure_ECX) + FFI_HIDDEN(C(ffi_go_closure_ECX)) +C(ffi_go_closure_ECX): +L(UW9): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW10): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + # cfi_endproc +ENDF(C(ffi_go_closure_ECX)) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + + .balign 16 + .globl C(ffi_closure_i386) + FFI_HIDDEN(C(ffi_closure_i386)) + +C(ffi_closure_i386): +L(UW12): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW13): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP(2, 15) + + .balign 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + flds closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fldl closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + addl $closure_FS, %esp +L(UW16): + # cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + addl $closure_FS, %esp +L(UW18): + # cfi_adjust_cfa_offset(-closure_FS) + ret $4 +L(UW19): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + ud2 +E(L(load_table2), X86_RET_UNUSED15) + ud2 + +L(UW20): + # cfi_endproc +ENDF(C(ffi_closure_i386)) + + .balign 16 + .globl C(ffi_go_closure_STDCALL) + FFI_HIDDEN(C(ffi_go_closure_STDCALL)) +C(ffi_go_closure_STDCALL): +L(UW21): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW22): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + # cfi_endproc +ENDF(C(ffi_go_closure_STDCALL)) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + + .balign 16 + .globl C(ffi_closure_REGISTER) + FFI_HIDDEN(C(ffi_closure_REGISTER)) +C(ffi_closure_REGISTER): +L(UW24): + # cfi_startproc + # cfi_def_cfa(%esp, 8) + # cfi_offset(%eip, -8) + _CET_ENDBR + subl $closure_FS-4, %esp +L(UW25): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl closure_FS-4(%esp), %ecx /* load retaddr */ + movl closure_FS(%esp), %eax /* load closure */ + movl %ecx, closure_FS(%esp) /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + # cfi_endproc +ENDF(C(ffi_closure_REGISTER)) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + + .balign 16 + .globl C(ffi_closure_STDCALL) + FFI_HIDDEN(C(ffi_closure_STDCALL)) +C(ffi_closure_STDCALL): +L(UW27): + # cfi_startproc + _CET_ENDBR + subl $closure_FS, %esp +L(UW28): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER): + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + movl %eax, %ecx + shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */ + leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */ + movl closure_FS(%esp), %edx /* move return address */ + movl %edx, (%ecx) + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP(3, 30) + + .balign 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + flds closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_DOUBLE) + fldl closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT8) + movsbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT16) + movswl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT8) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT16) + movzwl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT32) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_VOID) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTPOP) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTARG) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzwl %ax, %eax + movl %ecx, %esp + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + ud2 +E(L(load_table3), X86_RET_UNUSED15) + ud2 + +L(UW31): + # cfi_endproc +ENDF(C(ffi_closure_STDCALL)) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + + .balign 16 + .globl C(ffi_closure_raw_SYSV) + FFI_HIDDEN(C(ffi_closure_raw_SYSV)) +C(ffi_closure_raw_SYSV): +L(UW32): + # cfi_startproc + _CET_ENDBR + subl $raw_closure_S_FS, %esp +L(UW33): + # cfi_def_cfa_offset(raw_closure_S_FS + 4) + movl %ebx, raw_closure_S_FS-4(%esp) +L(UW34): + # cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc4): + leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +#else + leal L(load_table4)(,%eax, 8), %ecx +#endif + movl raw_closure_S_FS-4(%esp), %ebx +L(UW35): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e4) +E(L(load_table4), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + addl $raw_closure_S_FS, %esp +L(UW36): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + addl $raw_closure_S_FS, %esp +L(UW38): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret $4 +L(UW39): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + ud2 +E(L(load_table4), X86_RET_UNUSED15) + ud2 + +L(UW40): + # cfi_endproc +ENDF(C(ffi_closure_raw_SYSV)) + +#define raw_closure_T_FS (16+16+8) + + .balign 16 + .globl C(ffi_closure_raw_THISCALL) + FFI_HIDDEN(C(ffi_closure_raw_THISCALL)) +C(ffi_closure_raw_THISCALL): +L(UW41): + # cfi_startproc + _CET_ENDBR + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + popl %edx +L(UW42): + # cfi_def_cfa_offset(0) + # cfi_register(%eip, %edx) + pushl %ecx +L(UW43): + # cfi_adjust_cfa_offset(4) + pushl %edx +L(UW44): + # cfi_adjust_cfa_offset(4) + # cfi_rel_offset(%eip, 0) + subl $raw_closure_T_FS, %esp +L(UW45): + # cfi_adjust_cfa_offset(raw_closure_T_FS) + movl %ebx, raw_closure_T_FS-4(%esp) +L(UW46): + # cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc5): + leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +#else + leal L(load_table5)(,%eax, 8), %ecx +#endif + movl raw_closure_T_FS-4(%esp), %ebx +L(UW47): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e5) +E(L(load_table5), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + addl $raw_closure_T_FS, %esp +L(UW48): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret $4 +L(UW49): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + addl $raw_closure_T_FS, %esp +L(UW50): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret $8 +L(UW51): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + ud2 +E(L(load_table5), X86_RET_UNUSED15) + ud2 + +L(UW52): + # cfi_endproc +ENDF(C(ffi_closure_raw_THISCALL)) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + .globl X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +#if defined(__PIC__) + COMDAT(C(__x86.get_pc_thunk.bx)) +C(__x86.get_pc_thunk.bx): + movl (%esp), %ebx + ret +ENDF(C(__x86.get_pc_thunk.bx)) +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + COMDAT(C(__x86.get_pc_thunk.dx)) +C(__x86.get_pc_thunk.dx): + movl (%esp), %edx + ret +ENDF(C(__x86.get_pc_thunk.dx)) +#endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + .globl @feat.00 +@feat.00 = 1 +#endif + +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_i386 */ + .long C(ffi_call_i386) + .set L1,L(UW5)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_EAX */ + .long C(ffi_go_closure_EAX) + .set L2,L(UW8)-L(UW6) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_ECX */ + .long C(ffi_go_closure_ECX) + .set L3,L(UW11)-L(UW9) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_i386 */ + .long C(ffi_closure_i386) + .set L4,L(UW20)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_STDCALL */ + .long C(ffi_go_closure_STDCALL) + .set L5,L(UW23)-L(UW21) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_REGISTER */ + .long C(ffi_closure_REGISTER) + .set L6,L(UW26)-L(UW24) + .long L6 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_STDCALL */ + .long C(ffi_closure_STDCALL) + .set L7,L(UW31)-L(UW27) + .long L7 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_SYSV */ + .long C(ffi_closure_raw_SYSV) + .set L8,L(UW40)-L(UW32) + .long L8 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_THISCALL */ + .long C(ffi_closure_raw_THISCALL) + .set L9,L(UW52)-L(UW41) + .long L9 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 +#endif /* __APPLE__ */ + +#endif /* ifndef _MSC_VER */ +#endif /* ifdef __i386__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv_intel.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv_intel.S new file mode 100644 index 0000000..3cafd71 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/sysv_intel.S @@ -0,0 +1,995 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#ifdef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#define L(X) C1(L, X) +# define ENDF(X) X ENDP + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + X * 8 +#endif + + .686P + .MODEL FLAT + +EXTRN @ffi_closure_inner@8:PROC +_TEXT SEGMENT + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ALIGN 16 +PUBLIC @ffi_call_i386@8 +@ffi_call_i386@8 PROC +L(UW0): + cfi_startproc + #if !HAVE_FASTCALL + mov ecx, [esp+4] + mov edx, [esp+8] + #endif + mov eax, [esp] /* move the return address */ + mov [ecx], ebp /* store ebp into local frame */ + mov [ecx+4], eax /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + mov ebp, ecx +L(UW1): + // cfi_def_cfa(%ebp, 8) + // cfi_rel_offset(%ebp, 0) + + mov esp, edx /* set outgoing argument stack */ + mov eax, [20+R_EAX*4+ebp] /* set register arguments */ + mov edx, [20+R_EDX*4+ebp] + mov ecx, [20+R_ECX*4+ebp] + + call dword ptr [ebp+8] + + mov ecx, [12+ebp] /* load return type code */ + mov [ebp+8], ebx /* preserve %ebx */ +L(UW2): + // cfi_rel_offset(%ebx, 8) + + and ecx, X86_RET_TYPE_MASK + lea ebx, [L(store_table) + ecx * 8] + mov ecx, [ebp+16] /* load result address */ + jmp ebx + + ALIGN 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstp DWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstp QWORD PTR [ecx] + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movsx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzx eax, al + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzx eax, ax + mov [ecx], eax + jmp L(e1) +E(L(store_table), X86_RET_INT64) + mov [ecx+4], edx + /* fallthru */ +E(L(store_table), X86_RET_int 32) + mov [ecx], eax + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + mov ebx, [ebp+8] + mov esp, ebp + pop ebp +L(UW3): + // cfi_remember_state + // cfi_def_cfa(%esp, 4) + // cfi_restore(%ebx) + // cfi_restore(%ebp) + ret +L(UW4): + // cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + mov [ecx], al + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + mov [ecx], ax + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + int 3 +E(L(store_table), X86_RET_UNUSED15) + int 3 + +L(UW5): + // cfi_endproc +ENDF(@ffi_call_i386@8) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +FFI_CLOSURE_SAVE_REGS MACRO + mov [esp + closure_CF+16+R_EAX*4], eax + mov [esp + closure_CF+16+R_EDX*4], edx + mov [esp + closure_CF+16+R_ECX*4], ecx +ENDM + +FFI_CLOSURE_COPY_TRAMP_DATA MACRO + mov edx, [eax+FFI_TRAMPOLINE_SIZE] /* copy cif */ + mov ecx, [eax+FFI_TRAMPOLINE_SIZE+4] /* copy fun */ + mov eax, [eax+FFI_TRAMPOLINE_SIZE+8]; /* copy user_data */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax +ENDM + +#if HAVE_FASTCALL +FFI_CLOSURE_PREP_CALL MACRO + mov ecx, esp /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ +ENDM +#else +FFI_CLOSURE_PREP_CALL MACRO + lea ecx, [esp+closure_CF] /* load closure_data */ + lea edx, [esp+closure_FS+4] /* load incoming stack */ + mov [esp], ecx + mov [esp+4], edx +ENDM +#endif + +FFI_CLOSURE_CALL_INNER MACRO UWN + call @ffi_closure_inner@8 +ENDM + +FFI_CLOSURE_MASK_AND_JUMP MACRO LABEL + and eax, X86_RET_TYPE_MASK + lea edx, [LABEL+eax*8] + mov eax, [esp+closure_CF] /* optimiztic load */ + jmp edx +ENDM + +ALIGN 16 +PUBLIC ffi_go_closure_EAX +ffi_go_closure_EAX PROC C +L(UW6): + // cfi_startproc + sub esp, closure_FS +L(UW7): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [eax+4] /* copy cif */ + mov ecx, [eax +8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], ecx + mov [esp+closure_CF+36], eax /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + // cfi_endproc +ENDF(ffi_go_closure_EAX) + +ALIGN 16 +PUBLIC ffi_go_closure_ECX +ffi_go_closure_ECX PROC C +L(UW9): + // cfi_startproc + sub esp, closure_FS +L(UW10): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + // cfi_endproc +ENDF(ffi_go_closure_ECX) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + +ALIGN 16 +PUBLIC ffi_closure_i386 +ffi_closure_i386 PROC C +L(UW12): + // cfi_startproc + sub esp, closure_FS +L(UW13): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,2)) + + ALIGN 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + fld dword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fld qword ptr [esp+closure_CF] + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movsx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzx eax, ax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + add esp, closure_FS +L(UW16): + // cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + add esp, closure_FS +L(UW18): + // cfi_adjust_cfa_offset(-closure_FS) + ret 4 +L(UW19): + // cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + int 3 +E(L(load_table2), X86_RET_UNUSED15) + int 3 + +L(UW20): + // cfi_endproc +ENDF(ffi_closure_i386) + +ALIGN 16 +PUBLIC ffi_go_closure_STDCALL +ffi_go_closure_STDCALL PROC C +L(UW21): + // cfi_startproc + sub esp, closure_FS +L(UW22): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov edx, [ecx+4] /* copy cif */ + mov eax, [ecx+8] /* copy fun */ + mov [esp+closure_CF+28], edx + mov [esp+closure_CF+32], eax + mov [esp+closure_CF+36], ecx /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + // cfi_endproc +ENDF(ffi_go_closure_STDCALL) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + +ALIGN 16 +PUBLIC ffi_closure_REGISTER +ffi_closure_REGISTER PROC C +L(UW24): + // cfi_startproc + // cfi_def_cfa(%esp, 8) + // cfi_offset(%eip, -8) + sub esp, closure_FS-4 +L(UW25): + // cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + mov ecx, [esp+closure_FS-4] /* load retaddr */ + mov eax, [esp+closure_FS] /* load closure */ + mov [esp+closure_FS], ecx /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + // cfi_endproc +ENDF(ffi_closure_REGISTER) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + +ALIGN 16 +PUBLIC ffi_closure_STDCALL +ffi_closure_STDCALL PROC C +L(UW27): + // cfi_startproc + sub esp, closure_FS +L(UW28): + // cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER):: + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL):: + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + mov ecx, eax + shr ecx, X86_RET_POP_SHIFT /* isolate pop count */ + lea ecx, [esp+closure_FS+ecx] /* compute popped esp */ + mov edx, [esp+closure_FS] /* move return address */ + mov [ecx], edx + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP L(C1(load_table,3)) + + ALIGN 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + fld DWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_DOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_LDOUBLE) + fld QWORD PTR [esp+closure_CF] + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT8) + movsx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_SINT16) + movsx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT8) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_UINT16) + movzx eax, ax + mov esp, ecx + ret +E(L(load_table3), X86_RET_INT64) + mov edx, [esp+closure_CF+4] + mov esp, ecx + ret +E(L(load_table3), X86_RET_int 32) + mov esp, ecx + ret +E(L(load_table3), X86_RET_VOID) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTPOP) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCTARG) + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzx eax, al + mov esp, ecx + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzx eax, ax + mov esp, ecx + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + int 3 +E(L(load_table3), X86_RET_UNUSED15) + int 3 + +L(UW31): + // cfi_endproc +ENDF(ffi_closure_STDCALL) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + +ALIGN 16 +PUBLIC ffi_closure_raw_SYSV +ffi_closure_raw_SYSV PROC C +L(UW32): + // cfi_startproc + sub esp, raw_closure_S_FS +L(UW33): + // cfi_def_cfa_offset(raw_closure_S_FS + 4) + mov [esp+raw_closure_S_FS-4], ebx +L(UW34): + // cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_S_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc4): +// lea ecx, L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table4)+eax+8] +// #endif + mov ebx, [esp+raw_closure_S_FS-4] +L(UW35): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp dword ptr [ecx] + + ALIGN 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fld QWORD PTR [esp +16] + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movsx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzx eax, ax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e4) +E(L(load_table4), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + add esp, raw_closure_S_FS +L(UW36): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + add esp, raw_closure_S_FS +L(UW38): + // cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret 4 +L(UW39): + // cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + int 3 +E(L(load_table4), X86_RET_UNUSED15) + int 3 + +L(UW40): + // cfi_endproc +ENDF(ffi_closure_raw_SYSV) + +#define raw_closure_T_FS (16+16+8) + +ALIGN 16 +PUBLIC ffi_closure_raw_THISCALL +ffi_closure_raw_THISCALL PROC C +L(UW41): + // cfi_startproc + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + pop edx +L(UW42): + // cfi_def_cfa_offset(0) + // cfi_register(%eip, %edx) + push ecx +L(UW43): + // cfi_adjust_cfa_offset(4) + push edx +L(UW44): + // cfi_adjust_cfa_offset(4) + // cfi_rel_offset(%eip, 0) + sub esp, raw_closure_T_FS +L(UW45): + // cfi_adjust_cfa_offset(raw_closure_T_FS) + mov [esp+raw_closure_T_FS-4], ebx +L(UW46): + // cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + mov edx, [eax+FFI_TRAMPOLINE_SIZE+8] /* load cl->user_data */ + mov [esp+12], edx + lea edx, [esp+raw_closure_T_FS+4] /* load raw_args */ + mov [esp+8], edx + lea edx, [esp+16] /* load &res */ + mov [esp+4], edx + mov ebx, [eax+FFI_TRAMPOLINE_SIZE] /* load cl->cif */ + mov [esp], ebx + call DWORD PTR [eax+FFI_TRAMPOLINE_SIZE+4] /* call cl->fun */ + + mov eax, [ebx+20] /* load cif->flags */ + and eax, X86_RET_TYPE_MASK +// #ifdef __PIC__ +// call __x86.get_pc_thunk.bx +// L(pc5): +// leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +// #else + lea ecx, [L(load_table5)+eax*8] +//#endif + mov ebx, [esp+raw_closure_T_FS-4] +L(UW47): + // cfi_restore(%ebx) + mov eax, [esp+16] /* Optimistic load */ + jmp DWORD PTR [ecx] + + AlIGN 4 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + fld DWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fld QWORD PTR [esp +16] + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fld QWORD PTR [esp+16] + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movsx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzx eax, ax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + mov edx, [esp+16+4] + jmp L(e5) +E(L(load_table5), X86_RET_int 32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + add esp, raw_closure_T_FS +L(UW48): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret 4 +L(UW49): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + add esp, raw_closure_T_FS +L(UW50): + // cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret 8 +L(UW51): + // cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzx eax, al + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzx eax, ax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + int 3 +E(L(load_table5), X86_RET_UNUSED15) + int 3 + +L(UW52): + // cfi_endproc +ENDF(ffi_closure_raw_THISCALL) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + PUBLIC X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +// #if defined(__PIC__) +// COMDAT(C(__x86.get_pc_thunk.bx)) +// C(__x86.get_pc_thunk.bx): +// movl (%esp), %ebx +// ret +// ENDF(C(__x86.get_pc_thunk.bx)) +// # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +// COMDAT(C(__x86.get_pc_thunk.dx)) +// C(__x86.get_pc_thunk.dx): +// movl (%esp), %edx +// ret +// ENDF(C(__x86.get_pc_thunk.dx)) +// #endif /* DARWIN || HIDDEN */ +// #endif /* __PIC__ */ + +#if 0 +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + PUBLIC @feat.00 +@feat.00 = 1 +#endif + +#endif /* ifndef _MSC_VER */ +#endif /* ifndef __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +#endif + +END \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/unix64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/unix64.S new file mode 100644 index 0000000..89d7db1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/unix64.S @@ -0,0 +1,621 @@ +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include "internal64.h" +#include "asmnames.h" + + .text + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# ifdef __CET__ +# define E(BASE, X) .balign 8; .org BASE + X * 16 +# else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +# endif +#endif + +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .balign 8 + .globl C(ffi_call_unix64) + FFI_HIDDEN(C(ffi_call_unix64)) + +C(ffi_call_unix64): +L(UW0): + _CET_ENDBR + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ + + /* New stack frame based off rbp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-8, so from the + perspective of the unwind info, it hasn't moved. */ +L(UW1): + /* cfi_def_cfa(%rbp, 32) */ + /* cfi_rel_offset(%rbp, 16) */ + + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi + movq 0x08(%r10), %rsi + movq 0x10(%r10), %rdx + movq 0x18(%r10), %rcx + movq 0x20(%r10), %r8 + movq 0x28(%r10), %r9 + movl 0xb0(%r10), %eax /* Set number of SSE registers. */ + testl %eax, %eax + jnz L(load_sse) +L(ret_from_load_sse): + + /* Deallocate the reg arg area, except for r10, then load via pop. */ + leaq 0xb8(%r10), %rsp + popq %r10 + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ +L(UW2): + /* cfi_remember_state */ + /* cfi_def_cfa(%rsp, 8) */ + /* cfi_restore(%rbp) */ + + /* The first byte of the flags contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %cl + movzbl %cl, %r10d + leaq L(store_table)(%rip), %r11 + ja L(sa) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + + /* Prep for the structure cases: scratch area in redzone. */ + leaq -20(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(store_table): +E(L(store_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(store_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl %al, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl %ax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl %eax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbq %al, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswq %ax, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT32) + _CET_ENDBR + cltq + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_INT64) + _CET_ENDBR + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_X87) + _CET_ENDBR + fstpt (%rdi) + ret +E(L(store_table), UNIX64_RET_X87_2) + _CET_ENDBR + fstpt (%rdi) + fstpt 16(%rdi) + ret +E(L(store_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq %rax, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq %xmm0, 8(%rsi) + jmp L(s2) +E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq %xmm1, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq %rdx, 8(%rsi) +L(s2): + movq %rax, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + .balign 8 +L(s3): + movq %xmm0, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + +L(sa): call PLT(C(abort)) + + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ + .balign 2 +L(UW3): + /* cfi_restore_state */ +L(load_sse): + movdqa 0x30(%r10), %xmm0 + movdqa 0x40(%r10), %xmm1 + movdqa 0x50(%r10), %xmm2 + movdqa 0x60(%r10), %xmm3 + movdqa 0x70(%r10), %xmm4 + movdqa 0x80(%r10), %xmm5 + movdqa 0x90(%r10), %xmm6 + movdqa 0xa0(%r10), %xmm7 + jmp L(ret_from_load_sse) + +L(UW4): +ENDF(C(ffi_call_unix64)) + +/* 6 general registers, 8 vector registers, + 32 bytes of rvalue, 8 bytes of alignment. */ +#define ffi_closure_OFS_G 0 +#define ffi_closure_OFS_V (6*8) +#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) +#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) + +/* The location of rvalue within the red zone after deallocating the frame. */ +#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) + + .balign 2 + .globl C(ffi_closure_unix64_sse) + FFI_HIDDEN(C(ffi_closure_unix64_sse)) + +C(ffi_closure_unix64_sse): +L(UW5): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW6): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry1) + +L(UW7): +ENDF(C(ffi_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_closure_unix64) + FFI_HIDDEN(C(ffi_closure_unix64)) + +C(ffi_closure_unix64): +L(UW8): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW9): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry1): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ + movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ + movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ +#else + movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ +#endif +L(do_closure): + leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ + movq %rsp, %r8 /* Load reg_args */ + leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ + call PLT(C(ffi_closure_unix64_inner)) + + /* Deallocate stack frame early; return value is now in redzone. */ + addq $ffi_closure_FS, %rsp +L(UW10): + /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ + + /* The first byte of the return value contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %al + movzbl %al, %r10d + leaq L(load_table)(%rip), %r11 + ja L(la) +#ifdef __CET__ + /* NB: Originally, each slot is 8 byte. 4 bytes of ENDBR64 + + 4 bytes NOP padding double slot size to 16 bytes. */ + addl %r10d, %r10d +#endif + leaq (%r11, %r10, 8), %r10 + leaq ffi_closure_RED_RVALUE(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(load_table): +E(L(load_table), UNIX64_RET_VOID) + _CET_ENDBR + ret +E(L(load_table), UNIX64_RET_UINT8) + _CET_ENDBR + movzbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT16) + _CET_ENDBR + movzwl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT8) + _CET_ENDBR + movsbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT16) + _CET_ENDBR + movswl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT32) + _CET_ENDBR + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_INT64) + _CET_ENDBR + movq (%rsi), %rax + ret +E(L(load_table), UNIX64_RET_XMM32) + _CET_ENDBR + movd (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_XMM64) + _CET_ENDBR + movq (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_X87) + _CET_ENDBR + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_X87_2) + _CET_ENDBR + fldt 16(%rsi) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_ST_XMM0_RAX) + _CET_ENDBR + movq 8(%rsi), %rax + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_XMM0) + _CET_ENDBR + movq 8(%rsi), %xmm0 + jmp L(l2) +E(L(load_table), UNIX64_RET_ST_XMM0_XMM1) + _CET_ENDBR + movq 8(%rsi), %xmm1 + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_RDX) + _CET_ENDBR + movq 8(%rsi), %rdx +L(l2): + movq (%rsi), %rax + ret + .balign 8 +L(l3): + movq (%rsi), %xmm0 + ret + +L(la): call PLT(C(abort)) + +L(UW11): +ENDF(C(ffi_closure_unix64)) + + .balign 2 + .globl C(ffi_go_closure_unix64_sse) + FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) + +C(ffi_go_closure_unix64_sse): +L(UW12): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW13): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry2) + +L(UW14): +ENDF(C(ffi_go_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_go_closure_unix64) + FFI_HIDDEN(C(ffi_go_closure_unix64)) + +C(ffi_go_closure_unix64): +L(UW15): + _CET_ENDBR + subq $ffi_closure_FS, %rsp +L(UW16): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry2): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl 4(%r10), %edi /* Load cif */ + movl 8(%r10), %esi /* Load fun */ + movl %r10d, %edx /* Load closure (user_data) */ +#else + movq 8(%r10), %rdi /* Load cif */ + movq 16(%r10), %rsi /* Load fun */ + movq %r10, %rdx /* Load closure (user_data) */ +#endif + jmp L(do_closure) + +L(UW17): +ENDF(C(ffi_go_closure_unix64)) + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,"a",@unwind +#else +.section .eh_frame,"a",@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#ifdef __CET__ +/* Use DW_CFA_advance_loc2 when IBT is enabled. */ +# define ADV(N, P) .byte 3; .2byte L(N)-L(P) +#else +# define ADV(N, P) .byte 2, L(N)-L(P) +#endif + + .balign 8 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x78 /* CIE Data Alignment Factor */ + .byte 0x10 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ + .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ + .balign 8 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW4)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ + .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ + ADV(UW2, UW1) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + ADV(UW3, UW2) + .byte 0xb /* DW_CFA_restore_state */ + .balign 8 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW5)) /* Initial location */ + .long L(UW7)-L(UW5) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW6, UW5) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW8)) /* Initial location */ + .long L(UW11)-L(UW8) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW9, UW8) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + ADV(UW10, UW9) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW14)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW15)) /* Initial location */ + .long L(UW17)-L(UW15) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW16, UW15) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE5): +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_unix64 */ + .quad C(ffi_call_unix64) + .set L1,L(UW4)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64_sse */ + .quad C(ffi_closure_unix64_sse) + .set L2,L(UW7)-L(UW5) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64 */ + .quad C(ffi_closure_unix64) + .set L3,L(UW11)-L(UW8) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64_sse */ + .quad C(ffi_go_closure_unix64_sse) + .set L4,L(UW14)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64 */ + .quad C(ffi_go_closure_unix64) + .set L5,L(UW17)-L(UW15) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 +#endif + +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64.S new file mode 100644 index 0000000..8315e8b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64.S @@ -0,0 +1,241 @@ +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 %rcx +#define arg1 %rdx +#define arg2 %r8 +#define arg3 %r9 +#else +#define SEH(...) +#define arg0 %rdi +#define arg1 %rsi +#define arg2 %rdx +#define arg3 %rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + (X) * 8 +#endif + + .text + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 8 + .globl C(ffi_call_win64) + FFI_HIDDEN(C(ffi_call_win64)) + + SEH(.seh_proc ffi_call_win64) +C(ffi_call_win64): + cfi_startproc + _CET_ENDBR + /* Set up the local stack frame and install it in rbp/rsp. */ + movq (%rsp), %rax + movq %rbp, (arg1) + movq %rax, 8(arg1) + movq arg1, %rbp + cfi_def_cfa(%rbp, 16) + cfi_rel_offset(%rbp, 0) + SEH(.seh_pushreg %rbp) + SEH(.seh_setframe %rbp, 0) + SEH(.seh_endprologue) + movq arg0, %rsp + + movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + movq (%rsp), %rcx + movsd (%rsp), %xmm0 + movq 8(%rsp), %rdx + movsd 8(%rsp), %xmm1 + movq 16(%rsp), %r8 + movsd 16(%rsp), %xmm2 + movq 24(%rsp), %r9 + movsd 24(%rsp), %xmm3 + + call *16(%rbp) + + movl 24(%rbp), %ecx + movq 32(%rbp), %r8 + leaq 0f(%rip), %r10 + cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + leaq (%r10, %rcx, 8), %r10 + ja 99f + _CET_NOTRACK jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +.macro epilogue + leaveq + cfi_remember_state + cfi_def_cfa(%rsp, 8) + cfi_restore(%rbp) + ret + cfi_restore_state +.endm + + .align 8 +0: +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd %xmm0, (%r8) + epilogue +// FFI_TYPE_LONGDOUBLE may be FFI_TYPE_DOUBLE but we need a different value here. +E(0b, FFI_TYPE_DOUBLE + 1) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzbl %al, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsbq %al, %rax + jmp 98f +E(0b, FFI_TYPE_UINT16) + movzwl %ax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movswq %ax, %rax + jmp 98f +E(0b, FFI_TYPE_UINT32) + movl %eax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +98: movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + movl %eax, (%r8) + epilogue + + .align 8 +99: call PLT(C(abort)) + + epilogue + + cfi_endproc + SEH(.seh_endproc) + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + .align 8 + .globl C(ffi_go_closure_win64) + FFI_HIDDEN(C(ffi_go_closure_win64)) + + SEH(.seh_proc ffi_go_closure_win64) +C(ffi_go_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq 8(%r10), %rcx /* load cif */ + movq 16(%r10), %rdx /* load fun */ + movq %r10, %r8 /* closure is user_data */ + jmp 0f + cfi_endproc + SEH(.seh_endproc) + + .align 8 + .globl C(ffi_closure_win64) + FFI_HIDDEN(C(ffi_closure_win64)) + + SEH(.seh_proc ffi_closure_win64) +C(ffi_closure_win64): + cfi_startproc + _CET_ENDBR + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +0: + subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.seh_stackalloc ffi_clo_FS) + SEH(.seh_endprologue) + + /* Save all sse arguments into the stack frame. */ + movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + leaq ffi_clo_OFF_R(%rsp), %r9 + call PLT(C(ffi_closure_win64_inner)) + + /* Load the result into both possible result registers. */ + movq ffi_clo_OFF_R(%rsp), %rax + movsd ffi_clo_OFF_R(%rsp), %xmm0 + + addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + SEH(.seh_endproc) +#endif /* __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64_intel.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64_intel.S new file mode 100644 index 0000000..970a4f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/x86/win64_intel.S @@ -0,0 +1,238 @@ +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 rcx +#define arg1 rdx +#define arg2 r8 +#define arg3 r9 +#else +#define SEH(...) +#define arg0 rdi +#define arg1 rsi +#define arg2 rdx +#define arg3 rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) ALIGN 8 +#else +# define E(BASE, X) ALIGN 8; ORG BASE + (X) * 8 +#endif + + .CODE + extern PLT(C(abort)):near + extern C(ffi_closure_win64_inner):near + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + ALIGN 8 + PUBLIC C(ffi_call_win64) + + ; SEH(.safesh ffi_call_win64) +C(ffi_call_win64) proc SEH(frame) + cfi_startproc + /* Set up the local stack frame and install it in rbp/rsp. */ + mov RAX, [RSP] ; movq (%rsp), %rax + mov [arg1], RBP ; movq %rbp, (arg1) + mov [arg1 + 8], RAX; movq %rax, 8(arg1) + mov RBP, arg1; movq arg1, %rbp + cfi_def_cfa(rbp, 16) + cfi_rel_offset(rbp, 0) + SEH(.pushreg rbp) + SEH(.setframe rbp, 0) + SEH(.endprolog) + mov RSP, arg0 ; movq arg0, %rsp + + mov R10, arg2 ; movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + mov RCX, [RSP] ; movq (%rsp), %rcx + movsd XMM0, qword ptr [RSP] ; movsd (%rsp), %xmm0 + mov RDX, [RSP + 8] ;movq 8(%rsp), %rdx + movsd XMM1, qword ptr [RSP + 8]; movsd 8(%rsp), %xmm1 + mov R8, [RSP + 16] ; movq 16(%rsp), %r8 + movsd XMM2, qword ptr [RSP + 16] ; movsd 16(%rsp), %xmm2 + mov R9, [RSP + 24] ; movq 24(%rsp), %r9 + movsd XMM3, qword ptr [RSP + 24] ;movsd 24(%rsp), %xmm3 + + CALL qword ptr [RBP + 16] ; call *16(%rbp) + + mov ECX, [RBP + 24] ; movl 24(%rbp), %ecx + mov R8, [RBP + 32] ; movq 32(%rbp), %r8 + LEA R10, ffi_call_win64_tab ; leaq 0f(%rip), %r10 + CMP ECX, FFI_TYPE_SMALL_STRUCT_4B ; cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + LEA R10, [R10 + RCX*8] ; leaq (%r10, %rcx, 8), %r10 + JA L99 ; ja 99f + JMP R10 ; jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +epilogue macro + LEAVE + cfi_remember_state + cfi_def_cfa(rsp, 8) + cfi_restore(rbp) + RET + cfi_restore_state +endm + + ALIGN 8 +ffi_call_win64_tab LABEL NEAR +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movsxd rax, eax ; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss dword ptr [r8], xmm0 ; movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd qword ptr[r8], xmm0; movsd %xmm0, (%r8) + epilogue +// FFI_TYPE_LONGDOUBLE may be FFI_TYPE_DOUBLE but we need a different value here. +E(0b, FFI_TYPE_DOUBLE + 1) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzx eax, al ;movzbl %al, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsx rax, al ; movsbq %al, %rax + jmp L98 +E(0b, FFI_TYPE_UINT16) + movzx eax, ax ; movzwl %ax, %eax + mov qword ptr[r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movsx rax, ax; movswq %ax, %rax + jmp L98 +E(0b, FFI_TYPE_UINT32) + mov eax, eax; movl %eax, %eax + mov qword ptr[r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movsxd rax, eax; movslq %eax, %rax + mov qword ptr [r8], rax; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +L98 LABEL near + mov qword ptr [r8], rax ; movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + mov qword ptr [r8], rax;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + mov qword ptr [r8], rax ;movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + mov byte ptr [r8], al ; movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + mov word ptr [r8], ax ; movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + mov dword ptr [r8], eax ; movl %eax, (%r8) + epilogue + + align 8 +L99 LABEL near + call PLT(C(abort)) + + epilogue + + cfi_endproc + C(ffi_call_win64) endp + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + align 8 + PUBLIC C(ffi_go_closure_win64) + +C(ffi_go_closure_win64) proc + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9 ;movq %r9, 32(%rsp) + + mov rcx, qword ptr [r10 + 8]; movq 8(%r10), %rcx /* load cif */ + mov rdx, qword ptr [r10 + 16]; movq 16(%r10), %rdx /* load fun */ + mov r8, r10 ; movq %r10, %r8 /* closure is user_data */ + jmp ffi_closure_win64_2 + cfi_endproc + C(ffi_go_closure_win64) endp + + align 8 + +PUBLIC C(ffi_closure_win64) +C(ffi_closure_win64) PROC FRAME + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + mov qword ptr [rsp + 8], rcx; movq %rcx, 8(%rsp) + mov qword ptr [rsp + 16], rdx; movq %rdx, 16(%rsp) + mov qword ptr [rsp + 24], r8; movq %r8, 24(%rsp) + mov qword ptr [rsp + 32], r9; movq %r9, 32(%rsp) + + mov rcx, qword ptr [FFI_TRAMPOLINE_SIZE + r10] ;movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + mov rdx, qword ptr [FFI_TRAMPOLINE_SIZE + 8 + r10] ; movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + mov r8, qword ptr [FFI_TRAMPOLINE_SIZE+16+r10] ;movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +ffi_closure_win64_2 LABEL near + sub rsp, ffi_clo_FS ;subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.allocstack ffi_clo_FS) + SEH(.endprolog) + + /* Save all sse arguments into the stack frame. */ + movsd qword ptr [ffi_clo_OFF_X + rsp], xmm0 ; movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd qword ptr [ffi_clo_OFF_X+8+rsp], xmm1 ; movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd qword ptr [ffi_clo_OFF_X+16+rsp], xmm2 ; movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd qword ptr [ffi_clo_OFF_X+24+rsp], xmm3 ; movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + lea r9, [ffi_clo_OFF_R + rsp] ; leaq ffi_clo_OFF_R(%rsp), %r9 + call C(ffi_closure_win64_inner) + + /* Load the result into both possible result registers. */ + + mov rax, qword ptr [ffi_clo_OFF_R + rsp] ;movq ffi_clo_OFF_R(%rsp), %rax + movsd xmm0, qword ptr [rsp + ffi_clo_OFF_R] ;movsd ffi_clo_OFF_R(%rsp), %xmm0 + + add rsp, ffi_clo_FS ;addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + C(ffi_closure_win64) endp + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif +_text ends +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffi.c new file mode 100644 index 0000000..9a0575f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffi.c @@ -0,0 +1,298 @@ +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +/* + |----------------------------------------| + | | + on entry to ffi_call ----> |----------------------------------------| + | caller stack frame for registers a0-a3 | + |----------------------------------------| + | | + | additional arguments | + entry of the function ---> |----------------------------------------| + | copy of function arguments a2-a7 | + | - - - - - - - - - - - - - | + | | + + The area below the entry line becomes the new stack frame for the function. + +*/ + + +#define FFI_TYPE_STRUCT_REGS FFI_TYPE_LAST + + +extern void ffi_call_SYSV(void *rvalue, unsigned rsize, unsigned flags, + void(*fn)(void), unsigned nbytes, extended_cif*); +extern void ffi_closure_SYSV(void) FFI_HIDDEN; + +ffi_status ffi_prep_cif_machdep(ffi_cif *cif) +{ + switch(cif->rtype->type) { + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + cif->flags = cif->rtype->type; + break; + case FFI_TYPE_VOID: + case FFI_TYPE_FLOAT: + cif->flags = FFI_TYPE_UINT32; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + cif->flags = FFI_TYPE_UINT64; // cif->rtype->type; + break; + case FFI_TYPE_STRUCT: + cif->flags = FFI_TYPE_STRUCT; //_REGS; + /* Up to 16 bytes are returned in registers */ + if (cif->rtype->size > 4 * 4) { + /* returned structure is referenced by a register; use 8 bytes + (including 4 bytes for potential additional alignment) */ + cif->flags = FFI_TYPE_STRUCT; + cif->bytes += 8; + } + break; + + default: + cif->flags = FFI_TYPE_UINT32; + break; + } + + /* Round the stack up to a full 4 register frame, just in case + (we use this size in movsp). This way, it's also a multiple of + 8 bytes for 64-bit arguments. */ + cif->bytes = FFI_ALIGN(cif->bytes, 16); + + return FFI_OK; +} + +void ffi_prep_args(extended_cif *ecif, unsigned char* stack) +{ + unsigned int i; + unsigned long *addr; + ffi_type **ptr; + + union { + void **v; + char **c; + signed char **sc; + unsigned char **uc; + signed short **ss; + unsigned short **us; + unsigned int **i; + long long **ll; + float **f; + double **d; + } p_argv; + + /* Verify that everything is aligned up properly */ + FFI_ASSERT (((unsigned long) stack & 0x7) == 0); + + p_argv.v = ecif->avalue; + addr = (unsigned long*)stack; + + /* structures with a size greater than 16 bytes are passed in memory */ + if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 16) + { + *addr++ = (unsigned long)ecif->rvalue; + } + + for (i = ecif->cif->nargs, ptr = ecif->cif->arg_types; + i > 0; + i--, ptr++, p_argv.v++) + { + switch ((*ptr)->type) + { + case FFI_TYPE_SINT8: + *addr++ = **p_argv.sc; + break; + case FFI_TYPE_UINT8: + *addr++ = **p_argv.uc; + break; + case FFI_TYPE_SINT16: + *addr++ = **p_argv.ss; + break; + case FFI_TYPE_UINT16: + *addr++ = **p_argv.us; + break; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + *addr++ = **p_argv.i; + break; + case FFI_TYPE_DOUBLE: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + if (((unsigned long)addr & 4) != 0) + addr++; + *(unsigned long long*)addr = **p_argv.ll; + addr += sizeof(unsigned long long) / sizeof (addr); + break; + + case FFI_TYPE_STRUCT: + { + unsigned long offs; + unsigned long size; + + if (((unsigned long)addr & 4) != 0 && (*ptr)->alignment > 4) + addr++; + + offs = (unsigned long) addr - (unsigned long) stack; + size = (*ptr)->size; + + /* Entire structure must fit the argument registers or referenced */ + if (offs < FFI_REGISTER_NARGS * 4 + && offs + size > FFI_REGISTER_NARGS * 4) + addr = (unsigned long*) (stack + FFI_REGISTER_NARGS * 4); + + memcpy((char*) addr, *p_argv.c, size); + addr += (size + 3) / 4; + break; + } + + default: + FFI_ASSERT(0); + } + } +} + + +void ffi_call(ffi_cif* cif, void(*fn)(void), void *rvalue, void **avalue) +{ + extended_cif ecif; + unsigned long rsize = cif->rtype->size; + int flags = cif->flags; + void *alloc = NULL; + + ecif.cif = cif; + ecif.avalue = avalue; + + /* Note that for structures that are returned in registers (size <= 16 bytes) + we allocate a temporary buffer and use memcpy to copy it to the final + destination. The reason is that the target address might be misaligned or + the length not a multiple of 4 bytes. Handling all those cases would be + very complex. */ + + if (flags == FFI_TYPE_STRUCT && (rsize <= 16 || rvalue == NULL)) + { + alloc = alloca(FFI_ALIGN(rsize, 4)); + ecif.rvalue = alloc; + } + else + { + ecif.rvalue = rvalue; + } + + if (cif->abi != FFI_SYSV) + FFI_ASSERT(0); + + ffi_call_SYSV (ecif.rvalue, rsize, cif->flags, fn, cif->bytes, &ecif); + + if (alloc != NULL && rvalue != NULL) + memcpy(rvalue, alloc, rsize); +} + +extern void ffi_trampoline(); +extern void ffi_cacheflush(void* start, void* end); + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + /* copye trampoline to stack and patch 'ffi_closure_SYSV' pointer */ + memcpy(closure->tramp, ffi_trampoline, FFI_TRAMPOLINE_SIZE); + *(unsigned int*)(&closure->tramp[8]) = (unsigned int)ffi_closure_SYSV; + + // Do we have this function? + // __builtin___clear_cache(closer->tramp, closer->tramp + FFI_TRAMPOLINE_SIZE) + ffi_cacheflush(closure->tramp, closure->tramp + FFI_TRAMPOLINE_SIZE); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + return FFI_OK; +} + + +long FFI_HIDDEN +ffi_closure_SYSV_inner(ffi_closure *closure, void **values, void *rvalue) +{ + ffi_cif *cif; + ffi_type **arg_types; + void **avalue; + int i, areg; + + cif = closure->cif; + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + areg = 0; + + int rtype = cif->rtype->type; + if (rtype == FFI_TYPE_STRUCT && cif->rtype->size > 4 * 4) + { + rvalue = *values; + areg++; + } + + cif = closure->cif; + arg_types = cif->arg_types; + avalue = alloca(cif->nargs * sizeof(void *)); + + for (i = 0; i < cif->nargs; i++) + { + if (arg_types[i]->alignment == 8 && (areg & 1) != 0) + areg++; + + // skip the entry 16,a1 framework, add 16 bytes (4 registers) + if (areg == FFI_REGISTER_NARGS) + areg += 4; + + if (arg_types[i]->type == FFI_TYPE_STRUCT) + { + int numregs = ((arg_types[i]->size + 3) & ~3) / 4; + if (areg < FFI_REGISTER_NARGS && areg + numregs > FFI_REGISTER_NARGS) + areg = FFI_REGISTER_NARGS + 4; + } + + avalue[i] = &values[areg]; + areg += (arg_types[i]->size + 3) / 4; + } + + (closure->fun)(cif, rvalue, avalue, closure->user_data); + + return rtype; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffitarget.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffitarget.h new file mode 100644 index 0000000..0ba728b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/ffitarget.h @@ -0,0 +1,53 @@ +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2013 Tensilica, Inc. + Target configuration macros for XTENSA. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +} ffi_abi; +#endif + +#define FFI_REGISTER_NARGS 6 + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 +#define FFI_TRAMPOLINE_SIZE 24 + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/sysv.S b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/sysv.S new file mode 100644 index 0000000..e942179 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/src/xtensa/sysv.S @@ -0,0 +1,258 @@ +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2013 Tensilica, Inc. + + XTENSA Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include + +#define ENTRY(name) .text; .globl name; .type name,@function; .align 4; name: +#define END(name) .size name , . - name + +/* Assert that the table below is in sync with ffi.h. */ + +#if FFI_TYPE_UINT8 != 5 \ + || FFI_TYPE_SINT8 != 6 \ + || FFI_TYPE_UINT16 != 7 \ + || FFI_TYPE_SINT16 != 8 \ + || FFI_TYPE_UINT32 != 9 \ + || FFI_TYPE_SINT32 != 10 \ + || FFI_TYPE_UINT64 != 11 +#error "xtensa/sysv.S out of sync with ffi.h" +#endif + + +/* ffi_call_SYSV (rvalue, rbytes, flags, (*fnaddr)(), bytes, ecif) + void *rvalue; a2 + unsigned long rbytes; a3 + unsigned flags; a4 + void (*fnaddr)(); a5 + unsigned long bytes; a6 + extended_cif* ecif) a7 +*/ + +ENTRY(ffi_call_SYSV) + + entry a1, 32 # 32 byte frame for using call8 below + + mov a10, a7 # a10(->arg0): ecif + sub a11, a1, a6 # a11(->arg1): stack pointer + mov a7, a1 # fp + movsp a1, a11 # set new sp = old_sp - bytes + + movi a8, ffi_prep_args + callx8 a8 # ffi_prep_args(ecif, stack) + + # prepare to move stack pointer back up to 6 arguments + # note that 'bytes' is already aligned + + movi a10, 6*4 + sub a11, a6, a10 + movgez a6, a10, a11 + add a6, a1, a6 + + + # we can pass up to 6 arguments in registers + # for simplicity, just load 6 arguments + # (the stack size is at least 32 bytes, so no risk to cross boundaries) + + l32i a10, a1, 0 + l32i a11, a1, 4 + l32i a12, a1, 8 + l32i a13, a1, 12 + l32i a14, a1, 16 + l32i a15, a1, 20 + + # move stack pointer + + movsp a1, a6 + + callx8 a5 # (*fn)(args...) + + # Handle return value(s) + + beqz a2, .Lexit + + movi a5, FFI_TYPE_STRUCT + bne a4, a5, .Lstore + movi a5, 16 + blt a5, a3, .Lexit + + s32i a10, a2, 0 + blti a3, 5, .Lexit + addi a3, a3, -1 + s32i a11, a2, 4 + blti a3, 8, .Lexit + s32i a12, a2, 8 + blti a3, 12, .Lexit + s32i a13, a2, 12 + +.Lexit: retw + +.Lstore: + addi a4, a4, -FFI_TYPE_UINT8 + bgei a4, 7, .Lexit # should never happen + movi a6, store_calls + add a4, a4, a4 + addx4 a6, a4, a6 # store_table + idx * 8 + jx a6 + + .align 8 +store_calls: + # UINT8 + s8i a10, a2, 0 + retw + + # SINT8 + .align 8 + s8i a10, a2, 0 + retw + + # UINT16 + .align 8 + s16i a10, a2, 0 + retw + + # SINT16 + .align 8 + s16i a10, a2, 0 + retw + + # UINT32 + .align 8 + s32i a10, a2, 0 + retw + + # SINT32 + .align 8 + s32i a10, a2, 0 + retw + + # UINT64 + .align 8 + s32i a10, a2, 0 + s32i a11, a2, 4 + retw + +END(ffi_call_SYSV) + + +/* + * void ffi_cacheflush (unsigned long start, unsigned long end) + */ + +#define EXTRA_ARGS_SIZE 24 + +ENTRY(ffi_cacheflush) + + entry a1, 16 + +1: +#if XCHAL_DCACHE_SIZE + dhwbi a2, 0 +#endif +#if XCHAL_ICACHE_SIZE + ihi a2, 0 +#endif + addi a2, a2, 4 + blt a2, a3, 1b + + retw + +END(ffi_cacheflush) + +/* ffi_trampoline is copied to the stack */ + +ENTRY(ffi_trampoline) + + entry a1, 16 + (FFI_REGISTER_NARGS * 4) + (4 * 4) # [ 0] + j 2f # [ 3] + .align 4 # [ 6] +1: .long 0 # [ 8] +2: l32r a15, 1b # [12] + _mov a14, a0 # [15] + callx0 a15 # [18] + # [21] +END(ffi_trampoline) + +/* + * ffi_closure() + * + * a0: closure + 21 + * a14: return address (a0) + */ + +ENTRY(ffi_closure_SYSV) + + /* intentionally omitting entry here */ + + # restore return address (a0) and move pointer to closure to a10 + addi a10, a0, -21 + mov a0, a14 + + # allow up to 4 arguments as return values + addi a11, a1, 4 * 4 + + # save up to 6 arguments to stack (allocated by entry below) + s32i a2, a11, 0 + s32i a3, a11, 4 + s32i a4, a11, 8 + s32i a5, a11, 12 + s32i a6, a11, 16 + s32i a7, a11, 20 + + movi a8, ffi_closure_SYSV_inner + mov a12, a1 + callx8 a8 # .._inner(*closure, **avalue, *rvalue) + + # load up to four return arguments + l32i a2, a1, 0 + l32i a3, a1, 4 + l32i a4, a1, 8 + l32i a5, a1, 12 + + # (sign-)extend return value + movi a11, FFI_TYPE_UINT8 + bne a10, a11, 1f + extui a2, a2, 0, 8 + retw + +1: movi a11, FFI_TYPE_SINT8 + bne a10, a11, 1f + sext a2, a2, 7 + retw + +1: movi a11, FFI_TYPE_UINT16 + bne a10, a11, 1f + extui a2, a2, 0, 16 + retw + +1: movi a11, FFI_TYPE_SINT16 + bne a10, a11, 1f + sext a2, a2, 15 + +1: retw + +END(ffi_closure_SYSV) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/stamp-h.in b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/stamp-h.in new file mode 100644 index 0000000..9788f70 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/stamp-h.in @@ -0,0 +1 @@ +timestamp diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/Makefile.am b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/Makefile.am new file mode 100644 index 0000000..bcfea57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/Makefile.am @@ -0,0 +1,122 @@ +## Process this file with automake to produce Makefile.in. + +AUTOMAKE_OPTIONS = foreign dejagnu + +EXTRA_DEJAGNU_SITE_CONFIG=../local.exp + +CLEANFILES = *.exe core* *.log *.sum + +EXTRA_DIST = lib/target-libpath.exp lib/libffi.exp lib/wrapper.exp \ +libffi.call/strlen4.c libffi.call/struct10.c libffi.call/many_mixed.c \ +libffi.call/float.c libffi.call/struct5.c libffi.call/return_fl3.c \ +libffi.call/return_fl1.c libffi.call/call.exp libffi.call/pyobjc-tc.c \ +libffi.call/float_va.c libffi.call/struct8.c libffi.call/pr1172638.c \ +libffi.call/return_sc.c libffi.call/va_struct1.c \ +libffi.call/align_stdcall.c libffi.call/struct9.c libffi.call/va_1.c \ +libffi.call/va_struct2.c libffi.call/return_fl2.c \ +libffi.call/align_mixed.c libffi.call/ffitest.h libffi.call/struct4.c \ +libffi.call/return_ldl.c libffi.call/float3.c libffi.call/return_sl.c \ +libffi.call/return_dbl1.c libffi.call/err_bad_typedef.c \ +libffi.call/return_ll1.c libffi.call/return_dbl2.c \ +libffi.call/negint.c libffi.closures/nested_struct3.c \ +libffi.call/struct2.c libffi.call/struct3.c libffi.call/return_fl.c \ +libffi.call/offsets.c libffi.call/struct7.c libffi.call/va_struct3.c \ +libffi.call/float1.c libffi.call/uninitialized.c libffi.call/many2.c \ +libffi.call/struct6.c libffi.call/strlen2.c libffi.call/float2.c \ +libffi.call/return_ul.c libffi.call/struct1.c libffi.call/strlen3.c \ +libffi.call/return_dbl.c libffi.call/float4.c libffi.call/many.c \ +libffi.call/strlen.c libffi.call/return_uc.c libffi.call/many_double.c \ +libffi.call/return_ll.c libffi.call/promotion.c \ +libffi.complex/complex_defs_longdouble.inc \ +libffi.complex/cls_align_complex_float.c \ +libffi.complex/cls_complex_va_float.c \ +libffi.complex/cls_complex_struct_float.c \ +libffi.complex/return_complex2_longdouble.c \ +libffi.complex/cls_complex_float.c \ +libffi.complex/return_complex_longdouble.c \ +libffi.complex/return_complex2_float.c libffi.complex/cls_complex.inc \ +libffi.complex/cls_complex_va_longdouble.c \ +libffi.complex/return_complex_double.c \ +libffi.complex/return_complex.inc libffi.complex/many_complex.inc \ +libffi.complex/complex_float.c libffi.complex/cls_align_complex.inc \ +libffi.complex/return_complex2_double.c \ +libffi.complex/many_complex_float.c libffi.complex/ffitest.h \ +libffi.complex/return_complex1_double.c \ +libffi.complex/cls_complex_struct_longdouble.c \ +libffi.complex/complex_defs_double.inc \ +libffi.complex/cls_complex_va_double.c \ +libffi.complex/many_complex_double.c \ +libffi.complex/return_complex2.inc \ +libffi.complex/return_complex1_float.c \ +libffi.complex/complex_longdouble.c \ +libffi.complex/complex_defs_float.inc \ +libffi.complex/cls_complex_double.c \ +libffi.complex/cls_align_complex_double.c \ +libffi.complex/cls_align_complex_longdouble.c \ +libffi.complex/complex_double.c libffi.complex/cls_complex_va.inc \ +libffi.complex/many_complex_longdouble.c libffi.complex/complex.inc \ +libffi.complex/return_complex1_longdouble.c \ +libffi.complex/complex_int.c libffi.complex/cls_complex_longdouble.c \ +libffi.complex/cls_complex_struct_double.c \ +libffi.complex/return_complex1.inc libffi.complex/complex.exp \ +libffi.complex/cls_complex_struct.inc \ +libffi.complex/return_complex_float.c libffi.go/closure1.c \ +libffi.go/aa-direct.c libffi.go/ffitest.h libffi.go/go.exp \ +libffi.go/static-chain.h libffi.bhaible/bhaible.exp \ +libffi.bhaible/test-call.c libffi.bhaible/alignof.h \ +libffi.bhaible/testcases.c libffi.bhaible/test-callback.c \ +libffi.bhaible/Makefile libffi.bhaible/README config/default.exp \ +libffi.closures/cls_multi_sshort.c \ +libffi.closures/cls_align_longdouble_split2.c \ +libffi.closures/cls_1_1byte.c libffi.closures/cls_uint_va.c \ +libffi.closures/cls_3_1byte.c libffi.closures/cls_many_mixed_args.c \ +libffi.closures/cls_20byte1.c libffi.closures/cls_pointer_stack.c \ +libffi.closures/cls_align_float.c libffi.closures/cls_5_1_byte.c \ +libffi.closures/cls_9byte1.c libffi.closures/cls_align_uint32.c \ +libffi.closures/stret_medium.c libffi.closures/cls_3byte1.c \ +libffi.closures/cls_align_uint64.c libffi.closures/cls_longdouble_va.c \ +libffi.closures/cls_align_pointer.c libffi.closures/cls_19byte.c \ +libffi.closures/cls_ushort.c libffi.closures/cls_align_sint32.c \ +libffi.closures/cls_ulonglong.c libffi.closures/cls_struct_va1.c \ +libffi.closures/cls_9byte2.c libffi.closures/closure_fn5.c \ +libffi.closures/cls_5byte.c libffi.closures/cls_3float.c \ +libffi.closures/closure.exp libffi.closures/cls_schar.c \ +libffi.closures/closure_fn4.c libffi.closures/cls_uchar_va.c \ +libffi.closures/closure_fn0.c libffi.closures/huge_struct.c \ +libffi.closures/cls_ushort_va.c \ +libffi.closures/cls_64byte.c libffi.closures/cls_longdouble.c \ +libffi.closures/cls_ulong_va.c libffi.closures/cls_6_1_byte.c \ +libffi.closures/cls_align_uint16.c libffi.closures/closure_fn2.c \ +libffi.closures/unwindtest_ffi_call.cc \ +libffi.closures/cls_multi_ushortchar.c libffi.closures/cls_8byte.c \ +libffi.closures/ffitest.h libffi.closures/nested_struct8.c \ +libffi.closures/cls_pointer.c libffi.closures/nested_struct2.c \ +libffi.closures/nested_struct.c libffi.closures/cls_multi_schar.c \ +libffi.closures/cls_align_longdouble_split.c \ +libffi.closures/cls_uchar.c libffi.closures/nested_struct9.c \ +libffi.closures/cls_float.c libffi.closures/stret_medium2.c \ +libffi.closures/closure_loc_fn0.c libffi.closures/cls_6byte.c \ +libffi.closures/closure_simple.c libffi.closures/cls_align_double.c \ +libffi.closures/cls_multi_uchar.c libffi.closures/cls_4_1byte.c \ +libffi.closures/closure_fn3.c libffi.closures/cls_align_sint64.c \ +libffi.closures/nested_struct1.c libffi.closures/unwindtest.cc \ +libffi.closures/nested_struct5.c libffi.closures/cls_multi_ushort.c \ +libffi.closures/nested_struct11.c \ +libffi.closures/cls_multi_sshortchar.c \ +libffi.closures/cls_align_longdouble.c \ +libffi.closures/cls_dbls_struct.c \ +libffi.closures/cls_many_mixed_float_double.c \ +libffi.closures/stret_large.c libffi.closures/stret_large2.c \ +libffi.closures/cls_align_sint16.c libffi.closures/cls_2byte.c \ +libffi.closures/nested_struct4.c libffi.closures/problem1.c \ +libffi.closures/testclosure.c libffi.closures/nested_struct6.c \ +libffi.closures/cls_4byte.c libffi.closures/cls_24byte.c \ +libffi.closures/nested_struct10.c libffi.closures/cls_uint.c \ +libffi.closures/cls_12byte.c libffi.closures/cls_sint.c \ +libffi.closures/cls_7_1_byte.c libffi.closures/cls_sshort.c \ +libffi.closures/cls_16byte.c libffi.closures/nested_struct7.c \ +libffi.closures/cls_double_va.c libffi.closures/cls_3byte2.c \ +libffi.closures/cls_double.c libffi.closures/cls_7byte.c \ +libffi.closures/closure_fn6.c libffi.closures/closure_fn1.c \ +libffi.closures/cls_20byte.c libffi.closures/cls_18byte.c \ +libffi.closures/err_bad_abi.c diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/config/default.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/config/default.exp new file mode 100644 index 0000000..90967cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/config/default.exp @@ -0,0 +1 @@ +load_lib "standard.exp" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/libffi.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/libffi.exp new file mode 100644 index 0000000..d3c17db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/libffi.exp @@ -0,0 +1,660 @@ +# Copyright (C) 2003, 2005, 2008, 2009, 2010, 2011, 2014, 2019 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +proc load_gcc_lib { filename } { + global srcdir + load_file $srcdir/lib/$filename +} + +load_lib dg.exp +load_lib libgloss.exp +load_gcc_lib target-libpath.exp +load_gcc_lib wrapper.exp + +proc check_effective_target_gccbug { } { + global has_gccbug + return $has_gccbug +} + +# Return 1 if the target matches the effective target 'arg', 0 otherwise. +# This can be used with any check_* proc that takes no argument and +# returns only 1 or 0. It could be used with check_* procs that take +# arguments with keywords that pass particular arguments. + +proc is-effective-target { arg } { + global et_index + set selected 0 + if { ![info exists et_index] } { + # Initialize the effective target index that is used in some + # check_effective_target_* procs. + set et_index 0 + } + if { [info procs check_effective_target_${arg}] != [list] } { + set selected [check_effective_target_${arg}] + } else { + error "unknown effective target keyword `$arg'" + } + verbose "is-effective-target: $arg $selected" 2 + return $selected +} + +proc is-effective-target-keyword { arg } { + if { [info procs check_effective_target_${arg}] != [list] } { + return 1 + } else { + return 0 + } +} + +# Intercept the call to the DejaGnu version of dg-process-target to +# support use of an effective-target keyword in place of a list of +# target triplets to xfail or skip a test. +# +# The argument to dg-process-target is the keyword "target" or "xfail" +# followed by a selector: +# target-triplet-1 ... +# effective-target-keyword +# selector-expression +# +# For a target list the result is "S" if the target is selected, "N" otherwise. +# For an xfail list the result is "F" if the target is affected, "P" otherwise. + +# In contexts that allow either "target" or "xfail" the argument can be +# target selector1 xfail selector2 +# which returns "N" if selector1 is not selected, otherwise the result of +# "xfail selector2". +# +# A selector expression appears within curly braces and uses a single logical +# operator: !, &&, or ||. An operand is another selector expression, an +# effective-target keyword, or a list of target triplets within quotes or +# curly braces. + +if { [info procs saved-dg-process-target] == [list] } { + rename dg-process-target saved-dg-process-target + + # Evaluate an operand within a selector expression. + proc selector_opd { op } { + set selector "target" + lappend selector $op + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_opd: `$op' $answer" 2 + return $answer + } + + # Evaluate a target triplet list within a selector expression. + # Unlike other operands, this needs to be expanded from a list to + # the same string as "target". + proc selector_list { op } { + set selector "target [join $op]" + set answer [ expr { [dg-process-target $selector] == "S" } ] + verbose "selector_list: `$op' $answer" 2 + return $answer + } + + # Evaluate a selector expression. + proc selector_expression { exp } { + if { [llength $exp] == 2 } { + if [string match "!" [lindex $exp 0]] { + set op1 [lindex $exp 1] + set answer [expr { ! [selector_opd $op1] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } elseif { [llength $exp] == 3 } { + set op1 [lindex $exp 0] + set opr [lindex $exp 1] + set op2 [lindex $exp 2] + if [string match "&&" $opr] { + set answer [expr { [selector_opd $op1] && [selector_opd $op2] }] + } elseif [string match "||" $opr] { + set answer [expr { [selector_opd $op1] || [selector_opd $op2] }] + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + } else { + # Assume it's a list of target triplets. + set answer [selector_list $exp] + } + + verbose "selector_expression: `$exp' $answer" 2 + return $answer + } + + # Evaluate "target selector" or "xfail selector". + + proc dg-process-target-1 { args } { + verbose "dg-process-target-1: `$args'" 2 + + # Extract the 'what' keyword from the argument list. + set selector [string trim [lindex $args 0]] + if [regexp "^xfail " $selector] { + set what "xfail" + } elseif [regexp "^target " $selector] { + set what "target" + } else { + error "syntax error in target selector \"$selector\"" + } + + # Extract the rest of the list, which might be a keyword. + regsub "^${what}" $selector "" rest + set rest [string trim $rest] + + if [is-effective-target-keyword $rest] { + # The selector is an effective target keyword. + if [is-effective-target $rest] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + if [string match "{*}" $rest] { + if [selector_expression [lindex $rest 0]] { + return [expr { $what == "xfail" ? "F" : "S" }] + } else { + return [expr { $what == "xfail" ? "P" : "N" }] + } + } + + # The selector is not an effective-target keyword, so process + # the list of target triplets. + return [saved-dg-process-target $selector] + } + + # Intercept calls to the DejaGnu function. In addition to + # processing "target selector" or "xfail selector", handle + # "target selector1 xfail selector2". + + proc dg-process-target { args } { + verbose "replacement dg-process-target: `$args'" 2 + + set selector [string trim [lindex $args 0]] + + # If the argument list contains both 'target' and 'xfail', + # process 'target' and, if that succeeds, process 'xfail'. + if [regexp "^target .* xfail .*" $selector] { + set xfail_index [string first "xfail" $selector] + set xfail_selector [string range $selector $xfail_index end] + set target_selector [string range $selector 0 [expr $xfail_index-1]] + set target_selector [string trim $target_selector] + if { [dg-process-target-1 $target_selector] == "N" } { + return "N" + } + return [dg-process-target-1 $xfail_selector] + + } + return [dg-process-target-1 $selector] + } +} + +# Define libffi callbacks for dg.exp. + +proc libffi-dg-test-1 { target_compile prog do_what extra_tool_flags } { + + # To get all \n in dg-output test strings to match printf output + # in a system that outputs it as \015\012 (i.e. not just \012), we + # need to change all \n into \r?\n. As there is no dejagnu flag + # or hook to do that, we simply change the text being tested. + # Unfortunately, we have to know that the variable is called + # dg-output-text and lives in the caller of libffi-dg-test, which + # is two calls up. Overriding proc dg-output would be longer and + # would necessarily have the same assumption. + upvar 2 dg-output-text output_match + + if { [llength $output_match] > 1 } { + regsub -all "\n" [lindex $output_match 1] "\r?\n" x + set output_match [lreplace $output_match 1 1 $x] + } + + # Set up the compiler flags, based on what we're going to do. + + set options [list] + switch $do_what { + "compile" { + set compile_type "assembly" + set output_file "[file rootname [file tail $prog]].s" + } + "link" { + set compile_type "executable" + set output_file "[file rootname [file tail $prog]].exe" + # The following line is needed for targets like the i960 where + # the default output file is b.out. Sigh. + } + "run" { + set compile_type "executable" + # FIXME: "./" is to cope with "." not being in $PATH. + # Should this be handled elsewhere? + # YES. + set output_file "./[file rootname [file tail $prog]].exe" + # This is the only place where we care if an executable was + # created or not. If it was, dg.exp will try to run it. + remote_file build delete $output_file; + } + default { + perror "$do_what: not a valid dg-do keyword" + return "" + } + } + + if { $extra_tool_flags != "" } { + lappend options "additional_flags=$extra_tool_flags" + } + + set comp_output [libffi_target_compile "$prog" "$output_file" "$compile_type" $options]; + + + return [list $comp_output $output_file] +} + + +proc libffi-dg-test { prog do_what extra_tool_flags } { + return [libffi-dg-test-1 target_compile $prog $do_what $extra_tool_flags] +} + +proc libffi-dg-prune { target_triplet text } { + # We get this with some qemu emulated systems (eg. ppc64le-linux-gnu) + regsub -all "(^|\n)\[^\n\]*unable to perform all requested operations" $text "" text + return $text +} + +proc libffi-init { args } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global objdir + global TOOL_OPTIONS + global tool + global libffi_include + global libffi_link_flags + global tool_root_dir + global ld_library_path + global compiler_vendor + + if ![info exists blddirffi] { + set blddirffi [pwd]/.. + } + + verbose "libffi $blddirffi" + + # Which compiler are we building with? + set tmp [grep "$blddirffi/config.log" "^ax_cv_c_compiler_vendor.*$"] + regexp -- {^[^=]*=(.*)$} $tmp nil compiler_vendor + + if { [string match $compiler_vendor "gnu"] } { + set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a] + if {$gccdir != ""} { + set gccdir [file dirname $gccdir] + } + verbose "gccdir $gccdir" + + set ld_library_path "." + append ld_library_path ":${gccdir}" + + set compiler "${gccdir}/xgcc" + if { [is_remote host] == 0 && [which $compiler] != 0 } { + foreach i "[exec $compiler --print-multi-lib]" { + set mldir "" + regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir + set mldir [string trimright $mldir "\;@"] + if { "$mldir" == "." } { + continue + } + if { [llength [glob -nocomplain ${gccdir}/${mldir}/libgcc_s*.so.*]] >= 1 } { + append ld_library_path ":${gccdir}/${mldir}" + } + } + } + } + + # add the library path for libffi. + append ld_library_path ":${blddirffi}/.libs" + + verbose "ld_library_path: $ld_library_path" + + # Point to the Libffi headers in libffi. + set libffi_include "${blddirffi}/include" + verbose "libffi_include $libffi_include" + + set libffi_dir "${blddirffi}/.libs" + verbose "libffi_dir $libffi_dir" + if { $libffi_dir != "" } { + set libffi_dir [file dirname ${libffi_dir}] + set libffi_link_flags "-L${libffi_dir}/.libs" + } + + set_ld_library_path_env_vars + libffi_maybe_build_wrapper "${objdir}/testglue.o" +} + +proc libffi_exit { } { + global gluefile; + + if [info exists gluefile] { + file_on_build delete $gluefile; + unset gluefile; + } +} + +proc libffi_target_compile { source dest type options } { + global gluefile wrap_flags; + global srcdir + global blddirffi + global TOOL_OPTIONS + global libffi_link_flags + global libffi_include + global target_triplet + global compiler_vendor + + if { [target_info needs_status_wrapper]!="" && [info exists gluefile] } { + lappend options "libs=${gluefile}" + lappend options "ldflags=$wrap_flags" + } + + # TOOL_OPTIONS must come first, so that it doesn't override testcase + # specific options. + if [info exists TOOL_OPTIONS] { + lappend options "additional_flags=$TOOL_OPTIONS" + } + + # search for ffi_mips.h in srcdir, too + lappend options "additional_flags=-I${libffi_include} -I${srcdir}/../include -I${libffi_include}/.." + lappend options "additional_flags=${libffi_link_flags}" + + # Darwin needs a stack execution allowed flag. + + if { [istarget "*-*-darwin9*"] || [istarget "*-*-darwin1*"] + || [istarget "*-*-darwin2*"] } { + lappend options "additional_flags=-Wl,-allow_stack_execute" + } + + # If you're building the compiler with --prefix set to a place + # where it's not yet installed, then the linker won't be able to + # find the libgcc used by libffi.dylib. We could pass the + # -dylib_file option, but that's complicated, and it's much easier + # to just make the linker find libgcc using -L options. + if { [string match "*-*-darwin*" $target_triplet] } { + lappend options "libs= -shared-libgcc" + } + + if { [string match "*-*-openbsd*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + lappend options "libs= -lffi" + + if { [string match "aarch64*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + # this may be required for g++, but just confused clang. + if { [string match "*.cc" $source] } { + lappend options "c++" + } + + if { [string match "arc*-*-linux*" $target_triplet] } { + lappend options "libs= -lpthread" + } + + verbose "options: $options" + return [target_compile $source $dest $type $options] +} + +# TEST should be a preprocessor condition. Returns true if it holds. +proc libffi_feature_test { test } { + set src "ffitest[pid].c" + + set f [open $src "w"] + puts $f "#include " + puts $f $test + puts $f "/* OK */" + puts $f "#else" + puts $f "# error Failed $test" + puts $f "#endif" + close $f + + set lines [libffi_target_compile $src /dev/null assembly ""] + file delete $src + + return [string match "" $lines] +} + +# Utility routines. + +# +# search_for -- looks for a string match in a file +# +proc search_for { file pattern } { + set fd [open $file r] + while { [gets $fd cur_line]>=0 } { + if [string match "*$pattern*" $cur_line] then { + close $fd + return 1 + } + } + close $fd + return 0 +} + +# Modified dg-runtest that can cycle through a list of optimization options +# as c-torture does. +proc libffi-dg-runtest { testcases default-extra-flags } { + global runtests + + foreach test $testcases { + # If we're only testing specific files and this isn't one of + # them, skip it. + if ![runtest_file_p $runtests $test] { + continue + } + + # Look for a loop within the source code - if we don't find one, + # don't pass -funroll[-all]-loops. + global torture_with_loops torture_without_loops + if [expr [search_for $test "for*("]+[search_for $test "while*("]] { + set option_list $torture_with_loops + } else { + set option_list $torture_without_loops + } + + set nshort [file tail [file dirname $test]]/[file tail $test] + + foreach flags $option_list { + verbose "Testing $nshort, $flags" 1 + dg-test $test $flags ${default-extra-flags} + } + } +} + +proc run-many-tests { testcases extra_flags } { + global compiler_vendor + global has_gccbug + global env + switch $compiler_vendor { + "clang" { + set common "-W -Wall" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + "gnu" { + set common "-W -Wall -Wno-psabi" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "-O0" "-O2" } + } + } + default { + # Assume we are using the vendor compiler. + set common "" + if [info exists env(LIBFFI_TEST_OPTIMIZATION)] { + set optimizations [ list $env(LIBFFI_TEST_OPTIMIZATION) ] + } else { + set optimizations { "" } + } + } + } + + info exists env(LD_LIBRARY_PATH) + + set targetabis { "" } + if [string match $compiler_vendor "gnu"] { + if [libffi_feature_test "#ifdef __i386__"] { + set targetabis { + "" + "-DABI_NUM=FFI_STDCALL -DABI_ATTR=__STDCALL__" + "-DABI_NUM=FFI_THISCALL -DABI_ATTR=__THISCALL__" + "-DABI_NUM=FFI_FASTCALL -DABI_ATTR=__FASTCALL__" + } + } elseif { [istarget "x86_64-*-*"] \ + && [libffi_feature_test "#if !defined __ILP32__ \ + && !defined __i386__"] } { + set targetabis { + "" + "-DABI_NUM=FFI_GNUW64 -DABI_ATTR=__MSABI__" + } + } + } + + set common [ concat $common $extra_flags ] + foreach test $testcases { + set testname [file tail $test] + if [search_for $test "ABI_NUM"] { + set abis $targetabis + } else { + set abis { "" } + } + foreach opt $optimizations { + foreach abi $abis { + set options [concat $common $opt $abi] + set has_gccbug false; + if { [string match $compiler_vendor "gnu"] \ + && [string match "*MSABI*" $abi] \ + && ( ( [string match "*DGTEST=57 *" $common] \ + && [string match "*call.c*" $testname] ) \ + || ( [string match "*DGTEST=54 *" $common] \ + && [string match "*callback*" $testname] ) \ + || [string match "*DGTEST=55 *" $common] \ + || [string match "*DGTEST=56 *" $common] ) } then { + if [libffi_feature_test "#if (__GNUC__ < 9) || ((__GNUC__ == 9) && (__GNUC_MINOR__ < 3))"] { + set has_gccbug true; + } + } + verbose "Testing $testname, $options" 1 + verbose "has_gccbug = $has_gccbug" 1 + dg-test $test $options "" + } + } + } +} + +# Like check_conditional_xfail, but callable from a dg test. + +proc dg-xfail-if { args } { + set args [lreplace $args 0 0] + set selector "target [join [lindex $args 1]]" + if { [dg-process-target $selector] == "S" } { + global compiler_conditional_xfail_data + set compiler_conditional_xfail_data $args + } +} + +proc check-flags { args } { + + # The args are within another list; pull them out. + set args [lindex $args 0] + + # The next two arguments are optional. If they were not specified, + # use the defaults. + if { [llength $args] == 2 } { + lappend $args [list "*"] + } + if { [llength $args] == 3 } { + lappend $args [list ""] + } + + # If the option strings are the defaults, or the same as the + # defaults, there is no need to call check_conditional_xfail to + # compare them to the actual options. + if { [string compare [lindex $args 2] "*"] == 0 + && [string compare [lindex $args 3] "" ] == 0 } { + set result 1 + } else { + # The target list might be an effective-target keyword, so replace + # the original list with "*-*-*", since we already know it matches. + set result [check_conditional_xfail [lreplace $args 1 1 "*-*-*"]] + } + + return $result +} + +proc dg-skip-if { args } { + # Verify the number of arguments. The last two are optional. + set args [lreplace $args 0 0] + if { [llength $args] < 2 || [llength $args] > 4 } { + error "dg-skip-if 2: need 2, 3, or 4 arguments" + } + + # Don't bother if we're already skipping the test. + upvar dg-do-what dg-do-what + if { [lindex ${dg-do-what} 1] == "N" } { + return + } + + set selector [list target [lindex $args 1]] + if { [dg-process-target $selector] == "S" } { + if [check-flags $args] { + upvar dg-do-what dg-do-what + set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"] + } + } +} + +# We need to make sure that additional_files and additional_sources +# are both cleared out after every test. It is not enough to clear +# them out *before* the next test run because gcc-target-compile gets +# run directly from some .exp files (outside of any test). (Those +# uses should eventually be eliminated.) + +# Because the DG framework doesn't provide a hook that is run at the +# end of a test, we must replace dg-test with a wrapper. + +if { [info procs saved-dg-test] == [list] } { + rename dg-test saved-dg-test + + proc dg-test { args } { + global additional_files + global additional_sources + global errorInfo + + if { [ catch { eval saved-dg-test $args } errmsg ] } { + set saved_info $errorInfo + set additional_files "" + set additional_sources "" + error $errmsg $saved_info + } + set additional_files "" + set additional_sources "" + } +} + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp new file mode 100644 index 0000000..6b7beba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/target-libpath.exp @@ -0,0 +1,283 @@ +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file was contributed by John David Anglin (dave.anglin@nrc-cnrc.gc.ca) + +set orig_environment_saved 0 +set orig_ld_library_path_saved 0 +set orig_ld_run_path_saved 0 +set orig_shlib_path_saved 0 +set orig_ld_libraryn32_path_saved 0 +set orig_ld_library64_path_saved 0 +set orig_ld_library_path_32_saved 0 +set orig_ld_library_path_64_saved 0 +set orig_dyld_library_path_saved 0 +set orig_path_saved 0 + +####################################### +# proc set_ld_library_path_env_vars { } +####################################### + +proc set_ld_library_path_env_vars { } { + global ld_library_path + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + global GCC_EXEC_PREFIX + + # Set the relocated compiler prefix, but only if the user hasn't specified one. + if { [info exists GCC_EXEC_PREFIX] && ![info exists env(GCC_EXEC_PREFIX)] } { + setenv GCC_EXEC_PREFIX "$GCC_EXEC_PREFIX" + } + + # Setting the ld library path causes trouble when testing cross-compilers. + if { [is_remote target] } { + return + } + + if { $orig_environment_saved == 0 } { + global env + + set orig_environment_saved 1 + + # Save the original environment. + if [info exists env(LD_LIBRARY_PATH)] { + set orig_ld_library_path "$env(LD_LIBRARY_PATH)" + set orig_ld_library_path_saved 1 + } + if [info exists env(LD_RUN_PATH)] { + set orig_ld_run_path "$env(LD_RUN_PATH)" + set orig_ld_run_path_saved 1 + } + if [info exists env(SHLIB_PATH)] { + set orig_shlib_path "$env(SHLIB_PATH)" + set orig_shlib_path_saved 1 + } + if [info exists env(LD_LIBRARYN32_PATH)] { + set orig_ld_libraryn32_path "$env(LD_LIBRARYN32_PATH)" + set orig_ld_libraryn32_path_saved 1 + } + if [info exists env(LD_LIBRARY64_PATH)] { + set orig_ld_library64_path "$env(LD_LIBRARY64_PATH)" + set orig_ld_library64_path_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_32)] { + set orig_ld_library_path_32 "$env(LD_LIBRARY_PATH_32)" + set orig_ld_library_path_32_saved 1 + } + if [info exists env(LD_LIBRARY_PATH_64)] { + set orig_ld_library_path_64 "$env(LD_LIBRARY_PATH_64)" + set orig_ld_library_path_64_saved 1 + } + if [info exists env(DYLD_LIBRARY_PATH)] { + set orig_dyld_library_path "$env(DYLD_LIBRARY_PATH)" + set orig_dyld_library_path_saved 1 + } + if [info exists env(PATH)] { + set orig_path "$env(PATH)" + set orig_path_saved 1 + } + } + + # We need to set ld library path in the environment. Currently, + # unix.exp doesn't set the environment correctly for all systems. + # It only sets SHLIB_PATH and LD_LIBRARY_PATH when it executes a + # program. We also need the environment set for compilations, etc. + # + # On IRIX 6, we have to set variables akin to LD_LIBRARY_PATH, but + # called LD_LIBRARYN32_PATH (for the N32 ABI) and LD_LIBRARY64_PATH + # (for the 64-bit ABI). The same applies to Darwin (DYLD_LIBRARY_PATH), + # Solaris 32 bit (LD_LIBRARY_PATH_32), Solaris 64 bit (LD_LIBRARY_PATH_64), + # and HP-UX (SHLIB_PATH). In some cases, the variables are independent + # of LD_LIBRARY_PATH, and in other cases LD_LIBRARY_PATH is used if the + # variable is not defined. + # + # Doing this is somewhat of a hack as ld_library_path gets repeated in + # SHLIB_PATH and LD_LIBRARY_PATH when unix_load sets these variables. + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH "$ld_library_path" + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$ld_library_path:$orig_ld_run_path" + } else { + setenv LD_RUN_PATH "$ld_library_path" + } + # The default shared library dynamic path search for 64-bit + # HP-UX executables searches LD_LIBRARY_PATH before SHLIB_PATH. + # LD_LIBRARY_PATH isn't used for 32-bit executables. Thus, we + # set LD_LIBRARY_PATH and SHLIB_PATH as if they were independent. + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$ld_library_path:$orig_shlib_path" + } else { + setenv SHLIB_PATH "$ld_library_path" + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_libraryn32_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARYN32_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARYN32_PATH "$ld_library_path" + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library64_path" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY64_PATH "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY64_PATH "$ld_library_path" + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path_32" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_32 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_32 "$ld_library_path" + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path_64" + } elseif { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH_64 "$ld_library_path:$orig_ld_library_path" + } else { + setenv LD_LIBRARY_PATH_64 "$ld_library_path" + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$ld_library_path:$orig_dyld_library_path" + } else { + setenv DYLD_LIBRARY_PATH "$ld_library_path" + } + if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } { + if { $orig_path_saved } { + setenv PATH "$ld_library_path:$orig_path" + } else { + setenv PATH "$ld_library_path" + } + } + + verbose -log "set_ld_library_path_env_vars: ld_library_path=$ld_library_path" +} + +####################################### +# proc restore_ld_library_path_env_vars { } +####################################### + +proc restore_ld_library_path_env_vars { } { + global orig_environment_saved + global orig_ld_library_path_saved + global orig_ld_run_path_saved + global orig_shlib_path_saved + global orig_ld_libraryn32_path_saved + global orig_ld_library64_path_saved + global orig_ld_library_path_32_saved + global orig_ld_library_path_64_saved + global orig_dyld_library_path_saved + global orig_path_saved + global orig_ld_library_path + global orig_ld_run_path + global orig_shlib_path + global orig_ld_libraryn32_path + global orig_ld_library64_path + global orig_ld_library_path_32 + global orig_ld_library_path_64 + global orig_dyld_library_path + global orig_path + + if { $orig_environment_saved == 0 } { + return + } + + if { $orig_ld_library_path_saved } { + setenv LD_LIBRARY_PATH "$orig_ld_library_path" + } elseif [info exists env(LD_LIBRARY_PATH)] { + unsetenv LD_LIBRARY_PATH + } + if { $orig_ld_run_path_saved } { + setenv LD_RUN_PATH "$orig_ld_run_path" + } elseif [info exists env(LD_RUN_PATH)] { + unsetenv LD_RUN_PATH + } + if { $orig_shlib_path_saved } { + setenv SHLIB_PATH "$orig_shlib_path" + } elseif [info exists env(SHLIB_PATH)] { + unsetenv SHLIB_PATH + } + if { $orig_ld_libraryn32_path_saved } { + setenv LD_LIBRARYN32_PATH "$orig_ld_libraryn32_path" + } elseif [info exists env(LD_LIBRARYN32_PATH)] { + unsetenv LD_LIBRARYN32_PATH + } + if { $orig_ld_library64_path_saved } { + setenv LD_LIBRARY64_PATH "$orig_ld_library64_path" + } elseif [info exists env(LD_LIBRARY64_PATH)] { + unsetenv LD_LIBRARY64_PATH + } + if { $orig_ld_library_path_32_saved } { + setenv LD_LIBRARY_PATH_32 "$orig_ld_library_path_32" + } elseif [info exists env(LD_LIBRARY_PATH_32)] { + unsetenv LD_LIBRARY_PATH_32 + } + if { $orig_ld_library_path_64_saved } { + setenv LD_LIBRARY_PATH_64 "$orig_ld_library_path_64" + } elseif [info exists env(LD_LIBRARY_PATH_64)] { + unsetenv LD_LIBRARY_PATH_64 + } + if { $orig_dyld_library_path_saved } { + setenv DYLD_LIBRARY_PATH "$orig_dyld_library_path" + } elseif [info exists env(DYLD_LIBRARY_PATH)] { + unsetenv DYLD_LIBRARY_PATH + } + if { $orig_path_saved } { + setenv PATH "$orig_path" + } elseif [info exists env(PATH)] { + unsetenv PATH + } +} + +####################################### +# proc get_shlib_extension { } +####################################### + +proc get_shlib_extension { } { + global shlib_ext + + if { [ istarget *-*-darwin* ] } { + set shlib_ext "dylib" + } elseif { [ istarget *-*-cygwin* ] || [ istarget *-*-mingw* ] } { + set shlib_ext "dll" + } elseif { [ istarget hppa*-*-hpux* ] } { + set shlib_ext "sl" + } else { + set shlib_ext "so" + } + return $shlib_ext +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/wrapper.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/wrapper.exp new file mode 100644 index 0000000..4e5ae43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/lib/wrapper.exp @@ -0,0 +1,45 @@ +# Copyright (C) 2004, 2007 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# This file contains GCC-specifics for status wrappers for test programs. + +# ${tool}_maybe_build_wrapper -- Build wrapper object if the target +# needs it. FILENAME is the path to the wrapper file. If there are +# additional arguments, they are command-line options to provide to +# the compiler when compiling FILENAME. + +proc ${tool}_maybe_build_wrapper { filename args } { + global gluefile wrap_flags + + if { [target_info needs_status_wrapper] != "" \ + && [target_info needs_status_wrapper] != "0" \ + && ![info exists gluefile] } { + set saved_wrap_compile_flags [target_info wrap_compile_flags] + set flags [join $args " "] + # The wrapper code may contain code that gcc objects on. This + # became true for dejagnu-1.4.4. The set of warnings and code + # that gcc objects on may change, so just make sure -w is always + # passed to turn off all warnings. + set_currtarget_info wrap_compile_flags \ + "$saved_wrap_compile_flags -w $flags" + set result [build_wrapper $filename] + set_currtarget_info wrap_compile_flags "$saved_wrap_compile_flags" + if { $result != "" } { + set gluefile [lindex $result 0] + set wrap_flags [lindex $result 1] + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile new file mode 100644 index 0000000..3322de9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/Makefile @@ -0,0 +1,28 @@ +CC = gcc +CFLAGS = -O2 -Wall +prefix = +includedir = $(prefix)/include +libdir = $(prefix)/lib +CPPFLAGS = -I$(includedir) +LDFLAGS = -L$(libdir) -Wl,-rpath,$(libdir) + +all: check-call check-callback + +test-call: test-call.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-call test-call.c -lffi + +test-callback: test-callback.c testcases.c + $(CC) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o test-callback test-callback.c -lffi + +check-call: test-call + ./test-call > test-call.out + LC_ALL=C uniq -u < test-call.out > failed-call + test '!' -s failed-call + +check-callback: test-callback + ./test-callback > test-callback.out + LC_ALL=C uniq -u < test-callback.out > failed-callback + test '!' -s failed-callback + +clean: + rm -f test-call test-callback test-call.out test-callback.out failed-call failed-callback diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/README b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/README new file mode 100644 index 0000000..be8540b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/README @@ -0,0 +1,78 @@ +This package contains a test suite for libffi. + +This test suite can be compiled with a C compiler. No need for 'expect' +or some other package that is often not installed. + +The test suite consists of 81 C functions, each with a different signature. +* test-call verifies that calling each function directly produces the same + results as calling the function indirectly through 'ffi_call'. +* test-callback verifies that calling each function directly produces the same + results as calling a function that is a callback (object build by + 'ffi_prep_closure_loc') and simulates the original function. + +Each direct or indirect invocation should produce one line of output to +stdout. A correct output consists of paired lines, such as + +void f(void): +void f(void): +int f(void):->99 +int f(void):->99 +int f(int):(1)->2 +int f(int):(1)->2 +int f(2*int):(1,2)->3 +int f(2*int):(1,2)->3 +... + +The Makefile then creates two files: +* failed-call, which consists of the non-paired lines of output of + 'test-call', +* failed-callback, which consists of the non-paired lines of output of + 'test-callback'. + +The test suite passes if both failed-call and failed-callback come out +as empty. + + +How to use the test suite +------------------------- + +1. Modify the Makefile's variables + prefix = the directory in which libffi was installed + CC = the C compiler, often with options such as "-m32" or "-m64" + that enforce a certain ABI, + CFLAGS = optimization options (need to change them only for non-GCC + compilers) +2. Run "make". If it fails already in "test-call", run also + "make check-callback". +3. If this failed, inspect the output files. + + +How to interpret the results +---------------------------- + +The failed-call and failed-callback files consist of paired lines: +The first line is the result of the direct invocation. +The second line is the result of invocation through libffi. + +For example, this output + +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->255 +uchar f(uchar,ushort,uint,ulong):(97,2,3,4)->0 + +indicates that the arguments were passed correctly, but the return +value came out wrong. + +And this output + +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,6,7,8,561,1105,1729,2465,2821,6601)->15319.1 +float f(17*float,3*int,L):(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,-140443648,10,268042216,-72537980,-140443648,-140443648,-140443648,-140443648,-140443648)->-6.47158e+08 + +indicates that integer arguments that come after 17 floating-point arguments +were not passed correctly. + + +Credits +------- + +The test suite is based on the one of GNU libffcall-2.0. +Authors: Bill Triggs, Bruno Haible diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h new file mode 100644 index 0000000..00604a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/alignof.h @@ -0,0 +1,50 @@ +/* Determine alignment of types. + Copyright (C) 2003-2004, 2006, 2009-2017 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +#ifndef _ALIGNOF_H +#define _ALIGNOF_H + +#include + +/* alignof_slot (TYPE) + Determine the alignment of a structure slot (field) of a given type, + at compile time. Note that the result depends on the ABI. + This is the same as alignof (TYPE) and _Alignof (TYPE), defined in + if __alignof_is_defined is 1. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __cplusplus + template struct alignof_helper { char __slot1; type __slot2; }; +# define alignof_slot(type) offsetof (alignof_helper, __slot2) +#else +# define alignof_slot(type) offsetof (struct { char __slot1; type __slot2; }, __slot2) +#endif + +/* alignof_type (TYPE) + Determine the good alignment of an object of the given type at compile time. + Note that this is not necessarily the same as alignof_slot(type). + For example, with GNU C on x86 platforms: alignof_type(double) = 8, but + - when -malign-double is not specified: alignof_slot(double) = 4, + - when -malign-double is specified: alignof_slot(double) = 8. + Note: The result cannot be used as a value for an 'enum' constant, + due to bugs in HP-UX 10.20 cc and AIX 3.2.5 xlc. */ +#if defined __GNUC__ || defined __IBM__ALIGNOF__ +# define alignof_type __alignof__ +#else +# define alignof_type alignof_slot +#endif + +#endif /* _ALIGNOF_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp new file mode 100644 index 0000000..44aebc5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/bhaible.exp @@ -0,0 +1,63 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2018 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir +global compiler_vendor + +# The conversion of this testsuite into a dejagnu compatible testsuite +# was done in a pretty lazy fashion, and requires the use of compiler +# flags to disable warnings for now. +if { [string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-unused-but-set-variable -Wno-uninitialized"; +} +if { [string match $compiler_vendor "microsoft"] } { + # -wd4996 suggest use of vsprintf_s instead of vsprintf + # -wd4116 unnamed type definition + # -wd4101 unreferenced local variable + # -wd4244 warning about implicit double to float conversion + set warning_options "-wd4996 -wd4116 -wd4101 -wd4244"; +} +if { ![string match $compiler_vendor "microsoft"] && ![string match $compiler_vendor "gnu"] } { + set warning_options "-Wno-unused-variable -Wno-unused-parameter -Wno-uninitialized"; +} + + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-call.c]] + +for {set i 1} {$i < 82} {incr i} { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/test-callback.c]] + +for {set i 1} {$i < 81} {incr i} { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist [format "-DDGTEST=%d %s" $i $warning_options] + } else { + foreach test $tlist { + unsupported [format "%s -DDGTEST=%d %s" $test $i $warning_options] + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c new file mode 100644 index 0000000..cf9219e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-call.c @@ -0,0 +1,1745 @@ +/** + Copyright 1993 Bill Triggs + Copyright 1995-2017 Bruno Haible + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +**/ + +/* { dg-do run { xfail gccbug } } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() +#define FFI_CALL(cif,fn,args,retaddr) \ + ffi_call(&(cif),(void(*)(void))(fn),retaddr,args) + +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +void + void_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 1 + v_v(); + clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + { + FFI_CALL(cif,v_v,NULL,NULL); + } + } +#endif + return; +} +void + int_tests (void) +{ + int ir; + ffi_arg retvalue; +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + { + FFI_CALL(cif,i_v,NULL,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1 }; + FFI_CALL(cif,i_i,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2 }; + FFI_CALL(cif,i_i2,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4 }; + FFI_CALL(cif,i_i4,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8 }; + FFI_CALL(cif,i_i8,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &i4, &i5, &i6, &i7, &i8, &i9, &i10, &i11, &i12, &i13, &i14, &i15, &i16 }; + FFI_CALL(cif,i_i16,args,&retvalue); + ir = retvalue; + } + } + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + + return; +} +void + float_tests (void) +{ + float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1 }; + FFI_CALL(cif,f_f,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2 }; + FFI_CALL(cif,f_f2,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4 }; + FFI_CALL(cif,f_f4,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8 }; + FFI_CALL(cif,f_f8,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16 }; + FFI_CALL(cif,f_f16,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &f18, &f19, &f20, &f21, &f22, &f23, &f24 }; + FFI_CALL(cif,f_f24,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +} +void + double_tests (void) +{ + double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1 }; + FFI_CALL(cif,d_d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2 }; + FFI_CALL(cif,d_d2,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4 }; + FFI_CALL(cif,d_d4,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8 }; + FFI_CALL(cif,d_d8,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16 }; + FFI_CALL(cif,d_d16,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + pointer_tests (void) +{ + void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + { + void* puc1 = &uc1; + void* pd2 = &d2; + void* pstr3 = str3; + void* pI4 = &I4; + /*const*/ void* args[] = { &puc1, &pd2, &pstr3, &pI4 }; + FFI_CALL(cif,vp_vpdpcpsp,args,&vpr); + } + } + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + return; +} +void + mixed_number_tests (void) +{ + uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + + /* Unsigned types. + */ +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1, us2, ui3, ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + { + ffi_arg r; + /*const*/ void* args[] = { &uc1, &us2, &ui3, &ul4 }; + FFI_CALL(cif,uc_ucsil,args,&r); + ucr = (uchar) r; + } + } + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + /* Mixed int & float types. + */ + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &d3, &d4 }; + FFI_CALL(cif,d_iidd,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &d4, &i5 }; + FFI_CALL(cif,d_iiidi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &i1, &d2, &i3, &d4 }; + FFI_CALL(cif,d_idid,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &f1, &d2, &i3 }; + FFI_CALL(cif,d_fdi,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + { + ffi_arg rint; + /*const*/ void* args[] = { &c1, &d2, &c3, &d4 }; + FFI_CALL(cif,us_cdcd,args,&rint); + usr = (ushort) rint; + } + } + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + /* Long long types. + */ + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &i1, &i2, &i3, &ll1, &i13 }; + FFI_CALL(cif,ll_iiilli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &f13, &ll1, &i13 }; + FFI_CALL(cif,ll_flli,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &i9 }; + FFI_CALL(cif,f_fi,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &i9 }; + FFI_CALL(cif,f_f2i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &i9 }; + FFI_CALL(cif,f_f3i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &i9 }; + FFI_CALL(cif,f_f4i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &i9 }; + FFI_CALL(cif,f_f7i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &i9 }; + FFI_CALL(cif,f_f8i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f12i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &i9 }; + FFI_CALL(cif,f_f12i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &i9 }; + FFI_CALL(cif,f_f13i,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &i9 }; + FFI_CALL(cif,d_di,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &i9 }; + FFI_CALL(cif,d_d2i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &i9 }; + FFI_CALL(cif,d_d3i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &i9 }; + FFI_CALL(cif,d_d4i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &i9 }; + FFI_CALL(cif,d_d7i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &i9 }; + FFI_CALL(cif,d_d8i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &i9 }; + FFI_CALL(cif,d_d12i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 43 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &i9 }; + FFI_CALL(cif,d_d13i,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} +void + small_structure_return_tests (void) +{ +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + { + FFI_CALL(cif,S1_v,NULL,&r); + } + } + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + { + FFI_CALL(cif,S2_v,NULL,&r); + } + } + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + { + FFI_CALL(cif,S3_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + { + FFI_CALL(cif,S4_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + { + FFI_CALL(cif,S7_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + { + FFI_CALL(cif,S8_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + { + FFI_CALL(cif,S12_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + { + FFI_CALL(cif,S15_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif +#if (!defined(DGTEST)) || DGTEST == 52 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + { + FFI_CALL(cif,S16_v,NULL,&r); + } + } + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif +} +void + structure_tests (void) +{ + Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + { + /*const*/ void* args[] = { &I1, &I2, &I3 }; + FFI_CALL(cif,I_III,args,&Ir); + } + } + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 54 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + { + /*const*/ void* args[] = { &C1, &d2, &C3 }; + FFI_CALL(cif,C_CdC,args,&Cr); + } + } + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 55 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + { + /*const*/ void* args[] = { &F1, &f2, &d3 }; + FFI_CALL(cif,F_Ffd,args,&Fr); + } + } + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &f1, &D2, &d3 }; + FFI_CALL(cif,D_fDd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 57 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + { + /*const*/ void* args[] = { &D1, &f2, &d3 }; + FFI_CALL(cif,D_Dfd,args,&Dr); + } + } + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 58 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + { + /*const*/ void* args[] = { &J1, &i2, &J2 }; + FFI_CALL(cif,J_JiJ,args,&Jr); + } + } + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 59 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + { + char space = ' '; + /*const*/ void* args[] = { &T1, &space, &T2 }; + FFI_CALL(cif,T_TcT,args,&Tr); + } + } + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 60 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + { + /*const*/ void* args[] = { &B1, &c2, &d3, &B2 }; + FFI_CALL(cif,X_BcdB,args,&Xr); + } + } + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif + + return; +} + +void + gpargs_boundary_tests (void) +{ + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &K1, &l9 }; + FFI_CALL(cif,l_l0K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &K1, &l9 }; + FFI_CALL(cif,l_l1K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &K1, &l9 }; + FFI_CALL(cif,l_l2K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &K1, &l9 }; + FFI_CALL(cif,l_l3K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &K1, &l9 }; + FFI_CALL(cif,l_l4K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &K1, &l9 }; + FFI_CALL(cif,l_l5K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 67 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &K1, &l9 }; + FFI_CALL(cif,l_l6K,args,&lr); + } + } + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 68 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + { + /*const*/ void* args[] = { &f1, &f2, &f3, &f4, &f5, &f6, &f7, &f8, &f9, &f10, &f11, &f12, &f13, &f14, &f15, &f16, &f17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,f_f17l3L,args,&fr); + } + } + fprintf(out,"->%g\n",fr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 69 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &d1, &d2, &d3, &d4, &d5, &d6, &d7, &d8, &d9, &d10, &d11, &d12, &d13, &d14, &d15, &d16, &d17, &l6, &l7, &l8, &L1 }; + FFI_CALL(cif,d_d17l3L,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &ll1, &l9 }; + FFI_CALL(cif,ll_l2ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &ll1, &l9 }; + FFI_CALL(cif,ll_l3ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &ll1, &l9 }; + FFI_CALL(cif,ll_l4ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &ll1, &l9 }; + FFI_CALL(cif,ll_l5ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &ll1, &l9 }; + FFI_CALL(cif,ll_l6ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 75 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &ll1, &l9 }; + FFI_CALL(cif,ll_l7ll,args,&llr); + } + } + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l2d(l1,l2,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &d2, &l9 }; + FFI_CALL(cif,d_l2d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l3d(l1,l2,l3,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &d2, &l9 }; + FFI_CALL(cif,d_l3d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l4d(l1,l2,l3,l4,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &d2, &l9 }; + FFI_CALL(cif,d_l4d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l5d(l1,l2,l3,l4,l5,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &d2, &l9 }; + FFI_CALL(cif,d_l5d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l6d(l1,l2,l3,l4,l5,l6,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &d2, &l9 }; + FFI_CALL(cif,d_l6d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif +#if (!defined(DGTEST)) || DGTEST == 81 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,d2,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + { + /*const*/ void* args[] = { &l1, &l2, &l3, &l4, &l5, &l6, &l7, &d2, &l9 }; + FFI_CALL(cif,d_l7d,args,&dr); + } + } + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + return; +} + +int + main (void) +{ + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + + void_tests(); + int_tests(); + float_tests(); + double_tests(); + pointer_tests(); + mixed_number_tests(); + small_structure_return_tests(); + structure_tests(); + gpargs_boundary_tests(); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c new file mode 100644 index 0000000..0b16799 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/test-callback.c @@ -0,0 +1,2885 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* { dg-do run { xfail gccbug } } */ + +#include +#include +#include +#include +#include "alignof.h" +#include + +/* libffi testsuite local changes -------------------------------- */ +#ifdef DGTEST +/* Redefine exit(1) as a test failure */ +#define exit(V) (void)((V) ? (abort(), 1) : exit(0)) +int count = 0; +char rbuf1[2048]; +char rbuf2[2048]; +int _fprintf(FILE *stream, const char *format, ...) +{ + va_list args; + va_start(args, format); + + switch (count++) + { + case 0: + case 1: + vsprintf(&rbuf1[strlen(rbuf1)], format, args); + break; + case 2: + printf("%s", rbuf1); + vsprintf(rbuf2, format, args); + break; + case 3: + vsprintf(&rbuf2[strlen(rbuf2)], format, args); + printf("%s", rbuf2); + if (strcmp (rbuf1, rbuf2)) abort(); + break; + } + + va_end(args); + + return 0; +} +#define fprintf _fprintf +#endif +/* --------------------------------------------------------------- */ + +#include "testcases.c" + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#endif + +/* Definitions that ought to be part of libffi. */ +static ffi_type ffi_type_char; +#define ffi_type_slonglong ffi_type_sint64 +#define ffi_type_ulonglong ffi_type_uint64 + +/* libffi does not support arrays inside structs. */ +#define SKIP_EXTRA_STRUCTS + +#define FFI_PREP_CIF(cif,argtypes,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,sizeof(argtypes)/sizeof(argtypes[0]),&rettype,argtypes) != FFI_OK) abort() +#define FFI_PREP_CIF_NOARGS(cif,rettype) \ + if (ffi_prep_cif(&(cif),ABI_NUM,0,&rettype,NULL) != FFI_OK) abort() + +#if defined(__sparc__) && defined(__sun) && defined(__SUNPRO_C) /* SUNWspro cc */ +/* SunPRO cc miscompiles the simulator function for X_BcdB: d.i[1] is + * temporarily stored in %l2 and put onto the stack from %l2, but in between + * the copy of X has used %l2 as a counter without saving and restoring its + * value. + */ +#define SKIP_X +#endif +#if defined(__mipsn32__) && !defined(__GNUC__) +/* The X test crashes for an unknown reason. */ +#define SKIP_X +#endif + + +/* These functions simulate the behaviour of the functions defined in testcases.c. */ + +/* void tests */ +void v_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&v_v) { fprintf(out,"wrong data for v_v\n"); exit(1); } + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +void i_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_v) { fprintf(out,"wrong data for i_v\n"); exit(1); } + {int r=99; + fprintf(out,"int f(void):"); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i) { fprintf(out,"wrong data for i_i\n"); exit(1); } + int a = *(int*)(*args++); + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + *(ffi_arg*)retp = r; +} +void i_i2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i2) { fprintf(out,"wrong data for i_i2\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i4) { fprintf(out,"wrong data for i_i4\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i8) { fprintf(out,"wrong data for i_i8\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + *(ffi_arg*)retp = r; +}} +void i_i16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&i_i16) { fprintf(out,"wrong data for i_i16\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + int d = *(int*)(*args++); + int e = *(int*)(*args++); + int f = *(int*)(*args++); + int g = *(int*)(*args++); + int h = *(int*)(*args++); + int i = *(int*)(*args++); + int j = *(int*)(*args++); + int k = *(int*)(*args++); + int l = *(int*)(*args++); + int m = *(int*)(*args++); + int n = *(int*)(*args++); + int o = *(int*)(*args++); + int p = *(int*)(*args++); + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(ffi_arg*)retp = r; +}} + +/* float tests */ +void f_f_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f) { fprintf(out,"wrong data for f_f\n"); exit(1); } + {float a = *(float*)(*args++); + float r=a+1.0; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + *(float*)retp = r; +}} +void f_f2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2) { fprintf(out,"wrong data for f_f2\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + *(float*)retp = r; +}} +void f_f4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4) { fprintf(out,"wrong data for f_f4\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(float*)retp = r; +}} +void f_f8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8) { fprintf(out,"wrong data for f_f8\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(float*)retp = r; +}} +void f_f16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f16) { fprintf(out,"wrong data for f_f16\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(float*)retp = r; +}} +void f_f24_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f24) { fprintf(out,"wrong data for f_f24\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + float s = *(float*)(*args++); + float t = *(float*)(*args++); + float u = *(float*)(*args++); + float v = *(float*)(*args++); + float w = *(float*)(*args++); + float x = *(float*)(*args++); + float y = *(float*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + *(float*)retp = r; +}} + +/* double tests */ +void d_d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d) { fprintf(out,"wrong data for d_d\n"); exit(1); } + {double a = *(double*)(*args++); + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + *(double*)retp = r; +}} +void d_d2_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2) { fprintf(out,"wrong data for d_d2\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + *(double*)retp = r; +}} +void d_d4_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4) { fprintf(out,"wrong data for d_d4\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_d8_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8) { fprintf(out,"wrong data for d_d8\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + *(double*)retp = r; +}} +void d_d16_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d16) { fprintf(out,"wrong data for d_d16\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + *(double*)retp = r; +}} + +/* pointer tests */ +void vp_vpdpcpsp_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&vp_vpdpcpsp) { fprintf(out,"wrong data for vp_vpdpcpsp\n"); exit(1); } + {void* a = *(void* *)(*args++); + double* b = *(double* *)(*args++); + char* c = *(char* *)(*args++); + Int* d = *(Int* *)(*args++); + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + *(void* *)retp = ret; +}} + +/* mixed number tests */ +void uc_ucsil_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&uc_ucsil) { fprintf(out,"wrong data for uc_ucsil\n"); exit(1); } + {uchar a = *(unsigned char *)(*args++); + ushort b = *(unsigned short *)(*args++); + uint c = *(unsigned int *)(*args++); + ulong d = *(unsigned long *)(*args++); + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void d_iidd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iidd) { fprintf(out,"wrong data for d_iidd\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_iiidi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_iiidi) { fprintf(out,"wrong data for d_iiidi\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + int e = *(int*)(*args++); + double r=a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + *(double*)retp = r; +}} +void d_idid_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_idid) { fprintf(out,"wrong data for d_idid\n"); exit(1); } + {int a = *(int*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double d = *(double*)(*args++); + double r=a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + *(double*)retp = r; +}} +void d_fdi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_fdi) { fprintf(out,"wrong data for d_fdi\n"); exit(1); } + {float a = *(float*)(*args++); + double b = *(double*)(*args++); + int c = *(int*)(*args++); + double r=a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + *(double*)retp = r; +}} +void us_cdcd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&us_cdcd) { fprintf(out,"wrong data for us_cdcd\n"); exit(1); } + {char a = *(char*)(*args++); + double b = *(double*)(*args++); + char c = *(char*)(*args++); + double d = *(double*)(*args++); + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + *(ffi_arg *)retp = r; +}} +void ll_iiilli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_iiilli) { fprintf(out,"wrong data for ll_iiilli\n"); exit(1); } + {int a = *(int*)(*args++); + int b = *(int*)(*args++); + int c = *(int*)(*args++); + long long d = *(long long *)(*args++); + int e = *(int*)(*args++); + long long r = (long long)(int)a + (long long)(int)b + (long long)(int)c + d + (long long)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + *(long long *)retp = r; +}} +void ll_flli_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_flli) { fprintf(out,"wrong data for ll_flli\n"); exit(1); } + {float a = *(float*)(*args++); + long long b = *(long long *)(*args++); + int c = *(int*)(*args++); + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + *(long long *)retp = r; +}} +void f_fi_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_fi) { fprintf(out,"wrong data for f_fi\n"); exit(1); } + {float a = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + *(float*)retp = r; +}} +void f_f2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f2i) { fprintf(out,"wrong data for f_f2i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(float*)retp = r; +}} +void f_f3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f3i) { fprintf(out,"wrong data for f_f3i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(float*)retp = r; +}} +void f_f4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f4i) { fprintf(out,"wrong data for f_f4i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(float*)retp = r; +}} +void f_f7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f7i) { fprintf(out,"wrong data for f_f7i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(float*)retp = r; +}} +void f_f8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f8i) { fprintf(out,"wrong data for f_f8i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(float*)retp = r; +}} +void f_f12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f12i) { fprintf(out,"wrong data for f_f12i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(float*)retp = r; +}} +void f_f13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f13i) { fprintf(out,"wrong data for f_f13i\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + int z = *(int*)(*args++); + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(float*)retp = r; +}} +void d_di_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_di) { fprintf(out,"wrong data for d_di\n"); exit(1); } + {double a = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + *(double*)retp = r; +}} +void d_d2i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d2i) { fprintf(out,"wrong data for d_d2i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + *(double*)retp = r; +}} +void d_d3i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d3i) { fprintf(out,"wrong data for d_d3i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + *(double*)retp = r; +}} +void d_d4i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d4i) { fprintf(out,"wrong data for d_d4i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + *(double*)retp = r; +}} +void d_d7i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d7i) { fprintf(out,"wrong data for d_d7i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + *(double*)retp = r; +}} +void d_d8i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d8i) { fprintf(out,"wrong data for d_d8i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + *(double*)retp = r; +}} +void d_d12i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d12i) { fprintf(out,"wrong data for d_d12i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + *(double*)retp = r; +}} +void d_d13i_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d13i) { fprintf(out,"wrong data for d_d13i\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + int z = *(int*)(*args++); + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + *(double*)retp = r; +}} + +/* small structure return tests */ +void S1_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S1_v) { fprintf(out,"wrong data for S1_v\n"); exit(1); } + {Size1 r = Size1_1; + fprintf(out,"Size1 f(void):"); + fflush(out); + *(Size1*)retp = r; +}} +void S2_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S2_v) { fprintf(out,"wrong data for S2_v\n"); exit(1); } + {Size2 r = Size2_1; + fprintf(out,"Size2 f(void):"); + fflush(out); + *(Size2*)retp = r; +}} +void S3_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S3_v) { fprintf(out,"wrong data for S3_v\n"); exit(1); } + {Size3 r = Size3_1; + fprintf(out,"Size3 f(void):"); + fflush(out); + *(Size3*)retp = r; +}} +void S4_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S4_v) { fprintf(out,"wrong data for S4_v\n"); exit(1); } + {Size4 r = Size4_1; + fprintf(out,"Size4 f(void):"); + fflush(out); + *(Size4*)retp = r; +}} +void S7_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S7_v) { fprintf(out,"wrong data for S7_v\n"); exit(1); } + {Size7 r = Size7_1; + fprintf(out,"Size7 f(void):"); + fflush(out); + *(Size7*)retp = r; +}} +void S8_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S8_v) { fprintf(out,"wrong data for S8_v\n"); exit(1); } + {Size8 r = Size8_1; + fprintf(out,"Size8 f(void):"); + fflush(out); + *(Size8*)retp = r; +}} +void S12_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S12_v) { fprintf(out,"wrong data for S12_v\n"); exit(1); } + {Size12 r = Size12_1; + fprintf(out,"Size12 f(void):"); + fflush(out); + *(Size12*)retp = r; +}} +void S15_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S15_v) { fprintf(out,"wrong data for S15_v\n"); exit(1); } + {Size15 r = Size15_1; + fprintf(out,"Size15 f(void):"); + fflush(out); + *(Size15*)retp = r; +}} +void S16_v_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&S16_v) { fprintf(out,"wrong data for S16_v\n"); exit(1); } + {Size16 r = Size16_1; + fprintf(out,"Size16 f(void):"); + fflush(out); + *(Size16*)retp = r; +}} + +/* structure tests */ +void I_III_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&I_III) { fprintf(out,"wrong data for I_III\n"); exit(1); } + {Int a = *(Int*)(*args++); + Int b = *(Int*)(*args++); + Int c = *(Int*)(*args++); + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + *(Int*)retp = r; +}} +void C_CdC_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&C_CdC) { fprintf(out,"wrong data for C_CdC\n"); exit(1); } + {Char a = *(Char*)(*args++); + double b = *(double*)(*args++); + Char c = *(Char*)(*args++); + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + *(Char*)retp = r; +}} +void F_Ffd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&F_Ffd) { fprintf(out,"wrong data for F_Ffd\n"); exit(1); } + {Float a = *(Float*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Float r; + r.x = a.x + b + c; + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Float*)retp = r; +}} +void D_fDd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_fDd) { fprintf(out,"wrong data for D_fDd\n"); exit(1); } + {float a = *(float*)(*args++); + Double b = *(Double*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + *(Double*)retp = r; +}} +void D_Dfd_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&D_Dfd) { fprintf(out,"wrong data for D_Dfd\n"); exit(1); } + {Double a = *(Double*)(*args++); + float b = *(float*)(*args++); + double c = *(double*)(*args++); + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + *(Double*)retp = r; +}} +void J_JiJ_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&J_JiJ) { fprintf(out,"wrong data for J_JiJ\n"); exit(1); } + {J a = *(J*)(*args++); + int b= *(int*)(*args++); + J c = *(J*)(*args++); + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + *(J*)retp = r; +}} +#ifndef SKIP_EXTRA_STRUCTS +void T_TcT_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&T_TcT) { fprintf(out,"wrong data for T_TcT\n"); exit(1); } + {T a = *(T*)(*args++); + char b = *(char*)(*args++); + T c = *(T*)(*args++); + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + *(T*)retp = r; +}} +void X_BcdB_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&X_BcdB) { fprintf(out,"wrong data for X_BcdB\n"); exit(1); } + {B a = *(B*)(*args++); + char b = *(char*)(*args++); + double c = *(double*)(*args++); + B d = *(B*)(*args++); + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + *(X*)retp = r; +}} +#endif + +/* gpargs boundary tests */ +void l_l0K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l0K) { fprintf(out,"wrong data for l_l0K\n"); exit(1); } + {K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l1K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l1K) { fprintf(out,"wrong data for l_l1K\n"); exit(1); } + {long a1 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l2K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l2K) { fprintf(out,"wrong data for l_l2K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l3K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l3K) { fprintf(out,"wrong data for l_l3K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l4K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l4K) { fprintf(out,"wrong data for l_l4K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l5K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l5K) { fprintf(out,"wrong data for l_l5K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void l_l6K_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&l_l6K) { fprintf(out,"wrong data for l_l6K\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + K b = *(K*)(*args++); + long c = *(long*)(*args++); + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + *(ffi_arg*)retp = r; +}} +void f_f17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&f_f17l3L) { fprintf(out,"wrong data for f_f17l3L\n"); exit(1); } + {float a = *(float*)(*args++); + float b = *(float*)(*args++); + float c = *(float*)(*args++); + float d = *(float*)(*args++); + float e = *(float*)(*args++); + float f = *(float*)(*args++); + float g = *(float*)(*args++); + float h = *(float*)(*args++); + float i = *(float*)(*args++); + float j = *(float*)(*args++); + float k = *(float*)(*args++); + float l = *(float*)(*args++); + float m = *(float*)(*args++); + float n = *(float*)(*args++); + float o = *(float*)(*args++); + float p = *(float*)(*args++); + float q = *(float*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(float*)retp = r; +}} +void d_d17l3L_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_d17l3L) { fprintf(out,"wrong data for d_d17l3L\n"); exit(1); } + {double a = *(double*)(*args++); + double b = *(double*)(*args++); + double c = *(double*)(*args++); + double d = *(double*)(*args++); + double e = *(double*)(*args++); + double f = *(double*)(*args++); + double g = *(double*)(*args++); + double h = *(double*)(*args++); + double i = *(double*)(*args++); + double j = *(double*)(*args++); + double k = *(double*)(*args++); + double l = *(double*)(*args++); + double m = *(double*)(*args++); + double n = *(double*)(*args++); + double o = *(double*)(*args++); + double p = *(double*)(*args++); + double q = *(double*)(*args++); + long s = *(long*)(*args++); + long t = *(long*)(*args++); + long u = *(long*)(*args++); + L z = *(L*)(*args++); + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + *(double*)retp = r; +}} +void ll_l2ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l2ll) { fprintf(out,"wrong data for ll_l2ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l3ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l3ll) { fprintf(out,"wrong data for ll_l3ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l4ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l4ll) { fprintf(out,"wrong data for ll_l4ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l5ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l5ll) { fprintf(out,"wrong data for ll_l5ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l6ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l6ll) { fprintf(out,"wrong data for ll_l6ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void ll_l7ll_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&ll_l7ll) { fprintf(out,"wrong data for ll_l7ll\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + long long b = *(long long *)(*args++); + long c = *(long*)(*args++); + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + *(long long *)retp = r; +}} +void d_l2d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l2d) { fprintf(out,"wrong data for d_l2d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l3d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l3d) { fprintf(out,"wrong data for d_l3d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l4d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l4d) { fprintf(out,"wrong data for d_l4d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l5d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l5d) { fprintf(out,"wrong data for d_l5d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l6d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l6d) { fprintf(out,"wrong data for d_l6d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + *(double*)retp = r; +}} +void d_l7d_simulator (ffi_cif* cif, void* retp, /*const*/ void* /*const*/ *args, void* data) +{ + if (data != (void*)&d_l7d) { fprintf(out,"wrong data for d_l7d\n"); exit(1); } + {long a1 = *(long*)(*args++); + long a2 = *(long*)(*args++); + long a3 = *(long*)(*args++); + long a4 = *(long*)(*args++); + long a5 = *(long*)(*args++); + long a6 = *(long*)(*args++); + long a7 = *(long*)(*args++); + double b = *(double*)(*args++); + long c = *(long*)(*args++); + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + *(double*)retp = r; +}} + + +/* + * The way we run these tests - first call the function directly, then + * through vacall() - there is the danger that arguments or results seem + * to be passed correctly, but what we are seeing are in fact the vestiges + * (traces) or the previous call. This may seriously fake the test. + * Avoid this by clearing the registers between the first and the second call. + */ +long clear_traces_i (long a, long b, long c, long d, long e, long f, long g, long h, + long i, long j, long k, long l, long m, long n, long o, long p) +{ return 0; } +float clear_traces_f (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p) +{ return 0.0; } +double clear_traces_d (double a, double b, double c, double d, double e, double f, double g, + double h, double i, double j, double k, double l, double m, double n, + double o, double p) +{ return 0.0; } +J clear_traces_J (void) +{ J j; j.l1 = j.l2 = 0; return j; } +void clear_traces (void) +{ clear_traces_i(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); + clear_traces_f(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_d(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0); + clear_traces_J(); +} + +int main (void) +{ + void* callback_code; + void* callback_writable; +#define ALLOC_CALLBACK() \ + callback_writable = ffi_closure_alloc(sizeof(ffi_closure),&callback_code); \ + if (!callback_writable) abort() +#define PREP_CALLBACK(cif,simulator,data) \ + if (ffi_prep_closure_loc(callback_writable,&(cif),simulator,data,callback_code) != FFI_OK) abort() +#define FREE_CALLBACK() \ + ffi_closure_free(callback_writable) + + ffi_type_char = (char)(-1) < 0 ? ffi_type_schar : ffi_type_uchar; + out = stdout; + +#if (!defined(DGTEST)) || DGTEST == 1 + /* void tests */ + v_v(); + clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_void); + PREP_CALLBACK(cif,v_v_simulator,(void*)&v_v); + ((void (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); +#endif + + /* int tests */ + { int ir; + +#if (!defined(DGTEST)) || DGTEST == 2 + ir = i_v(); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_sint); + PREP_CALLBACK(cif,i_v_simulator,(void*)&i_v); + ir = ((int (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 3 + ir = i_i(i1); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i_simulator,(void*)&i_i); + ir = ((int (ABI_ATTR *) (int)) callback_code) (i1); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 4 + ir = i_i2(i1,i2); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i2_simulator,(void*)&i_i2); + ir = ((int (ABI_ATTR *) (int,int)) callback_code) (i1,i2); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 5 + ir = i_i4(i1,i2,i3,i4); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i4_simulator,(void*)&i_i4); + ir = ((int (ABI_ATTR *) (int,int,int,int)) callback_code) (i1,i2,i3,i4); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 6 + ir = i_i8(i1,i2,i3,i4,i5,i6,i7,i8); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i8_simulator,(void*)&i_i8); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 7 + ir = i_i16(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + fprintf(out,"->%d\n",ir); + fflush(out); + ir = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_sint); + PREP_CALLBACK(cif,i_i16_simulator,(void*)&i_i16); + ir = ((int (ABI_ATTR *) (int,int,int,int,int,int,int,int,int,int,int,int,int,int,int,int)) callback_code) (i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16); + } + FREE_CALLBACK(); + fprintf(out,"->%d\n",ir); + fflush(out); +#endif + } + + /* float tests */ + { float fr; + +#if (!defined(DGTEST)) || DGTEST == 8 + fr = f_f(f1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f_simulator,(void*)&f_f); + fr = ((float (ABI_ATTR *) (float)) callback_code) (f1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 9 + fr = f_f2(f1,f2); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2_simulator,(void*)&f_f2); + fr = ((float (ABI_ATTR *) (float,float)) callback_code) (f1,f2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 10 + fr = f_f4(f1,f2,f3,f4); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4_simulator,(void*)&f_f4); + fr = ((float (ABI_ATTR *) (float,float,float,float)) callback_code) (f1,f2,f3,f4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 11 + fr = f_f8(f1,f2,f3,f4,f5,f6,f7,f8); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8_simulator,(void*)&f_f8); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 12 + fr = f_f16(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f16_simulator,(void*)&f_f16); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 13 + fr = f_f24(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f24_simulator,(void*)&f_f24); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + + } + + /* double tests */ + { double dr; + +#if (!defined(DGTEST)) || DGTEST == 14 + dr = d_d(d1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d_simulator,(void*)&d_d); + dr = ((double (ABI_ATTR *) (double)) callback_code) (d1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 15 + dr = d_d2(d1,d2); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2_simulator,(void*)&d_d2); + dr = ((double (ABI_ATTR *) (double,double)) callback_code) (d1,d2); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 16 + dr = d_d4(d1,d2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4_simulator,(void*)&d_d4); + dr = ((double (ABI_ATTR *) (double,double,double,double)) callback_code) (d1,d2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 17 + dr = d_d8(d1,d2,d3,d4,d5,d6,d7,d8); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8_simulator,(void*)&d_d8); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 18 + dr = d_d16(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d16_simulator,(void*)&d_d16); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* pointer tests */ + { void* vpr; + +#if (!defined(DGTEST)) || DGTEST == 19 + vpr = vp_vpdpcpsp(&uc1,&d2,str3,&I4); + fprintf(out,"->0x%p\n",vpr); + fflush(out); + vpr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer, &ffi_type_pointer }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_pointer); + PREP_CALLBACK(cif,vp_vpdpcpsp_simulator,(void*)&vp_vpdpcpsp); + vpr = ((void* (ABI_ATTR *) (void*,double*,char*,Int*)) callback_code) (&uc1,&d2,str3,&I4); + } + FREE_CALLBACK(); + fprintf(out,"->0x%p\n",vpr); + fflush(out); +#endif + } + + /* mixed number tests */ + { uchar ucr; + ushort usr; + float fr; + double dr; + long long llr; + +#if (!defined(DGTEST)) || DGTEST == 20 + ucr = uc_ucsil(uc1,us2,ui3,ul4); + fprintf(out,"->%u\n",ucr); + fflush(out); + ucr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_uchar, &ffi_type_ushort, &ffi_type_uint, &ffi_type_ulong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_uchar); + PREP_CALLBACK(cif,uc_ucsil_simulator,(void*)&uc_ucsil); + ucr = ((uchar (ABI_ATTR *) (uchar,ushort,uint,ulong)) callback_code) (uc1,us2,ui3,ul4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",ucr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 21 + dr = d_iidd(i1,i2,d3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iidd_simulator,(void*)&d_iidd); + dr = ((double (ABI_ATTR *) (int,int,double,double)) callback_code) (i1,i2,d3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 22 + dr = d_iiidi(i1,i2,i3,d4,i5); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_iiidi_simulator,(void*)&d_iiidi); + dr = ((double (ABI_ATTR *) (int,int,int,double,int)) callback_code) (i1,i2,i3,d4,i5); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 23 + dr = d_idid(i1,d2,i3,d4); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_double, &ffi_type_sint, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_idid_simulator,(void*)&d_idid); + dr = ((double (ABI_ATTR *) (int,double,int,double)) callback_code) (i1,d2,i3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 24 + dr = d_fdi(f1,d2,i3); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_fdi_simulator,(void*)&d_fdi); + dr = ((double (ABI_ATTR *) (float,double,int)) callback_code) (f1,d2,i3); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 25 + usr = us_cdcd(c1,d2,c3,d4); + fprintf(out,"->%u\n",usr); + fflush(out); + usr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_char, &ffi_type_double, &ffi_type_char, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_ushort); + PREP_CALLBACK(cif,us_cdcd_simulator,(void*)&us_cdcd); + usr = ((ushort (ABI_ATTR *) (char,double,char,double)) callback_code) (c1,d2,c3,d4); + } + FREE_CALLBACK(); + fprintf(out,"->%u\n",usr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 26 + llr = ll_iiilli(i1,i2,i3,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_sint, &ffi_type_sint, &ffi_type_sint, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_iiilli_simulator,(void*)&ll_iiilli); + llr = ((long long (ABI_ATTR *) (int,int,int,long long,int)) callback_code) (i1,i2,i3,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 27 + llr = ll_flli(f13,ll1,i13); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_slonglong, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_flli_simulator,(void*)&ll_flli); + llr = ((long long (ABI_ATTR *) (float,long long,int)) callback_code) (f13,ll1,i13); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 28 + fr = f_fi(f1,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_fi_simulator,(void*)&f_fi); + fr = ((float (ABI_ATTR *) (float,int)) callback_code) (f1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 29 + fr = f_f2i(f1,f2,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f2i_simulator,(void*)&f_f2i); + fr = ((float (ABI_ATTR *) (float,float,int)) callback_code) (f1,f2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 30 + fr = f_f3i(f1,f2,f3,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f3i_simulator,(void*)&f_f3i); + fr = ((float (ABI_ATTR *) (float,float,float,int)) callback_code) (f1,f2,f3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 31 + fr = f_f4i(f1,f2,f3,f4,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f4i_simulator,(void*)&f_f4i); + fr = ((float (ABI_ATTR *) (float,float,float,float,int)) callback_code) (f1,f2,f3,f4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 32 + fr = f_f7i(f1,f2,f3,f4,f5,f6,f7,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f7i_simulator,(void*)&f_f7i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 33 + fr = f_f8i(f1,f2,f3,f4,f5,f6,f7,f8,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f8i_simulator,(void*)&f_f8i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 34 + fr = f_f13i(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f13i_simulator,(void*)&f_f13i); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,int)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 35 + dr = d_di(d1,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_di_simulator,(void*)&d_di); + dr = ((double (ABI_ATTR *) (double,int)) callback_code) (d1,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 36 + dr = d_d2i(d1,d2,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d2i_simulator,(void*)&d_d2i); + dr = ((double (ABI_ATTR *) (double,double,int)) callback_code) (d1,d2,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 37 + dr = d_d3i(d1,d2,d3,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d3i_simulator,(void*)&d_d3i); + dr = ((double (ABI_ATTR *) (double,double,double,int)) callback_code) (d1,d2,d3,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 38 + dr = d_d4i(d1,d2,d3,d4,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d4i_simulator,(void*)&d_d4i); + dr = ((double (ABI_ATTR *) (double,double,double,double,int)) callback_code) (d1,d2,d3,d4,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 39 + dr = d_d7i(d1,d2,d3,d4,d5,d6,d7,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d7i_simulator,(void*)&d_d7i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 40 + dr = d_d8i(d1,d2,d3,d4,d5,d6,d7,d8,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d8i_simulator,(void*)&d_d8i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 41 + dr = d_d12i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d12i_simulator,(void*)&d_d12i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 42 + dr = d_d13i(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_sint }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d13i_simulator,(void*)&d_d13i); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,int)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,i9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + } + + /* small structure return tests */ +#if (!defined(DGTEST)) || DGTEST == 43 + { + Size1 r = S1_v(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size1_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Size1; + ffi_type_Size1.type = FFI_TYPE_STRUCT; + ffi_type_Size1.size = sizeof(Size1); + ffi_type_Size1.alignment = alignof_slot(Size1); + ffi_type_Size1.elements = ffi_type_Size1_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size1); + PREP_CALLBACK(cif,S1_v_simulator,(void*)&S1_v); + r = ((Size1 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c}\n",r.x1); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 44 + { + Size2 r = S2_v(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size2_elements[] = { &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size2; + ffi_type_Size2.type = FFI_TYPE_STRUCT; + ffi_type_Size2.size = sizeof(Size2); + ffi_type_Size2.alignment = alignof_slot(Size2); + ffi_type_Size2.elements = ffi_type_Size2_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size2); + PREP_CALLBACK(cif,S2_v_simulator,(void*)&S2_v); + r = ((Size2 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c}\n",r.x1,r.x2); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 45 + { + Size3 r = S3_v(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size3_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size3; + ffi_type_Size3.type = FFI_TYPE_STRUCT; + ffi_type_Size3.size = sizeof(Size3); + ffi_type_Size3.alignment = alignof_slot(Size3); + ffi_type_Size3.elements = ffi_type_Size3_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size3); + PREP_CALLBACK(cif,S3_v_simulator,(void*)&S3_v); + r = ((Size3 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c}\n",r.x1,r.x2,r.x3); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 46 + { + Size4 r = S4_v(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size4_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size4; + ffi_type_Size4.type = FFI_TYPE_STRUCT; + ffi_type_Size4.size = sizeof(Size4); + ffi_type_Size4.alignment = alignof_slot(Size4); + ffi_type_Size4.elements = ffi_type_Size4_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size4); + PREP_CALLBACK(cif,S4_v_simulator,(void*)&S4_v); + r = ((Size4 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 47 + { + Size7 r = S7_v(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size7_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size7; + ffi_type_Size7.type = FFI_TYPE_STRUCT; + ffi_type_Size7.size = sizeof(Size7); + ffi_type_Size7.alignment = alignof_slot(Size7); + ffi_type_Size7.elements = ffi_type_Size7_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size7); + PREP_CALLBACK(cif,S7_v_simulator,(void*)&S7_v); + r = ((Size7 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 48 + { + Size8 r = S8_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size8_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size8; + ffi_type_Size8.type = FFI_TYPE_STRUCT; + ffi_type_Size8.size = sizeof(Size8); + ffi_type_Size8.alignment = alignof_slot(Size8); + ffi_type_Size8.elements = ffi_type_Size8_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size8); + PREP_CALLBACK(cif,S8_v_simulator,(void*)&S8_v); + r = ((Size8 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 49 + { + Size12 r = S12_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size12_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size12; + ffi_type_Size12.type = FFI_TYPE_STRUCT; + ffi_type_Size12.size = sizeof(Size12); + ffi_type_Size12.alignment = alignof_slot(Size12); + ffi_type_Size12.elements = ffi_type_Size12_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size12); + PREP_CALLBACK(cif,S12_v_simulator,(void*)&S12_v); + r = ((Size12 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 50 + { + Size15 r = S15_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size15_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size15; + ffi_type_Size15.type = FFI_TYPE_STRUCT; + ffi_type_Size15.size = sizeof(Size15); + ffi_type_Size15.alignment = alignof_slot(Size15); + ffi_type_Size15.elements = ffi_type_Size15_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size15); + PREP_CALLBACK(cif,S15_v_simulator,(void*)&S15_v); + r = ((Size15 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15); + fflush(out); + } +#endif + +#if (!defined(DGTEST)) || DGTEST == 51 + { + Size16 r = S16_v(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + memset(&r,0,sizeof(r)); clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Size16_elements[] = { &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, &ffi_type_char, NULL }; + ffi_type ffi_type_Size16; + ffi_type_Size16.type = FFI_TYPE_STRUCT; + ffi_type_Size16.size = sizeof(Size16); + ffi_type_Size16.alignment = alignof_slot(Size16); + ffi_type_Size16.elements = ffi_type_Size16_elements; + ffi_cif cif; + FFI_PREP_CIF_NOARGS(cif,ffi_type_Size16); + PREP_CALLBACK(cif,S16_v_simulator,(void*)&S16_v); + r = ((Size16 (ABI_ATTR *) (void)) callback_code) (); + } + FREE_CALLBACK(); + fprintf(out,"->{%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c}\n",r.x1,r.x2,r.x3,r.x4,r.x5,r.x6,r.x7,r.x8,r.x9,r.x10,r.x11,r.x12,r.x13,r.x14,r.x15,r.x16); + fflush(out); + } +#endif + + + /* structure tests */ + { Int Ir; + Char Cr; + Float Fr; + Double Dr; + J Jr; +#ifndef SKIP_EXTRA_STRUCTS + T Tr; + X Xr; +#endif + +#if (!defined(DGTEST)) || DGTEST == 52 + Ir = I_III(I1,I2,I3); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); + Ir.x = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Int_elements[] = { &ffi_type_sint, NULL }; + ffi_type ffi_type_Int; + ffi_type_Int.type = FFI_TYPE_STRUCT; + ffi_type_Int.size = sizeof(Int); + ffi_type_Int.alignment = alignof_slot(Int); + ffi_type_Int.elements = ffi_type_Int_elements; + ffi_type* argtypes[] = { &ffi_type_Int, &ffi_type_Int, &ffi_type_Int }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Int); + PREP_CALLBACK(cif,I_III_simulator,(void*)&I_III); + Ir = ((Int (ABI_ATTR *) (Int,Int,Int)) callback_code) (I1,I2,I3); + } + FREE_CALLBACK(); + fprintf(out,"->{%d}\n",Ir.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 53 + Cr = C_CdC(C1,d2,C3); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); + Cr.x = '\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Char_elements[] = { &ffi_type_char, NULL }; + ffi_type ffi_type_Char; + ffi_type_Char.type = FFI_TYPE_STRUCT; + ffi_type_Char.size = sizeof(Char); + ffi_type_Char.alignment = alignof_slot(Char); + ffi_type_Char.elements = ffi_type_Char_elements; + ffi_type* argtypes[] = { &ffi_type_Char, &ffi_type_double, &ffi_type_Char }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Char); + PREP_CALLBACK(cif,C_CdC_simulator,(void*)&C_CdC); + Cr = ((Char (ABI_ATTR *) (Char,double,Char)) callback_code) (C1,d2,C3); + } + FREE_CALLBACK(); + fprintf(out,"->{'%c'}\n",Cr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 54 + Fr = F_Ffd(F1,f2,d3); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); + Fr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Float_elements[] = { &ffi_type_float, NULL }; + ffi_type ffi_type_Float; + ffi_type_Float.type = FFI_TYPE_STRUCT; + ffi_type_Float.size = sizeof(Float); + ffi_type_Float.alignment = alignof_slot(Float); + ffi_type_Float.elements = ffi_type_Float_elements; + ffi_type* argtypes[] = { &ffi_type_Float, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Float); + PREP_CALLBACK(cif,F_Ffd_simulator,(void*)&F_Ffd); + Fr = ((Float (ABI_ATTR *) (Float,float,double)) callback_code) (F1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Fr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 55 + Dr = D_fDd(f1,D2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_Double, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_fDd_simulator,(void*)&D_fDd); + Dr = ((Double (ABI_ATTR *) (float,Double,double)) callback_code) (f1,D2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 56 + Dr = D_Dfd(D1,f2,d3); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); + Dr.x = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_Double_elements[] = { &ffi_type_double, NULL }; + ffi_type ffi_type_Double; + ffi_type_Double.type = FFI_TYPE_STRUCT; + ffi_type_Double.size = sizeof(Double); + ffi_type_Double.alignment = alignof_slot(Double); + ffi_type_Double.elements = ffi_type_Double_elements; + ffi_type* argtypes[] = { &ffi_type_Double, &ffi_type_float, &ffi_type_double }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_Double); + PREP_CALLBACK(cif,D_Dfd_simulator,(void*)&D_Dfd); + Dr = ((Double (ABI_ATTR *) (Double,float,double)) callback_code) (D1,f2,d3); + } + FREE_CALLBACK(); + fprintf(out,"->{%g}\n",Dr.x); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 57 + Jr = J_JiJ(J1,i2,J2); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); + Jr.l1 = Jr.l2 = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_J_elements[] = { &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_J; + ffi_type_J.type = FFI_TYPE_STRUCT; + ffi_type_J.size = sizeof(J); + ffi_type_J.alignment = alignof_slot(J); + ffi_type_J.elements = ffi_type_J_elements; + ffi_type* argtypes[] = { &ffi_type_J, &ffi_type_sint, &ffi_type_J }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_J); + PREP_CALLBACK(cif,J_JiJ_simulator,(void*)&J_JiJ); + Jr = ((J (ABI_ATTR *) (J,int,J)) callback_code) (J1,i2,J2); + } + FREE_CALLBACK(); + fprintf(out,"->{%ld,%ld}\n",Jr.l1,Jr.l2); + fflush(out); +#endif + +#ifndef SKIP_EXTRA_STRUCTS +#if (!defined(DGTEST)) || DGTEST == 58 + Tr = T_TcT(T1,' ',T2); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); + Tr.c[0] = Tr.c[1] = Tr.c[2] = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_T_elements[] = { ??, NULL }; + ffi_type ffi_type_T; + ffi_type_T.type = FFI_TYPE_STRUCT; + ffi_type_T.size = sizeof(T); + ffi_type_T.alignment = alignof_slot(T); + ffi_type_T.elements = ffi_type_T_elements; + ffi_type* argtypes[] = { &ffi_type_T, &ffi_type_char, &ffi_type_T }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_T); + PREP_CALLBACK(cif,T_TcT_simulator,(void*)&T_TcT); + Tr = ((T (ABI_ATTR *) (T,char,T)) callback_code) (T1,' ',T2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%c%c%c\"}\n",Tr.c[0],Tr.c[1],Tr.c[2]); + fflush(out); +#endif + +#ifndef SKIP_X +#if (!defined(DGTEST)) || DGTEST == 59 + Xr = X_BcdB(B1,c2,d3,B2); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); + Xr.c[0]=Xr.c1='\0'; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* ffi_type_X_elements[] = { ??, NULL }; + ffi_type ffi_type_X; + ffi_type_X.type = FFI_TYPE_STRUCT; + ffi_type_X.size = sizeof(X); + ffi_type_X.alignment = alignof_slot(X); + ffi_type_X.elements = ffi_type_X_elements; + ffi_type* argtypes[] = { &ffi_type_X, &ffi_type_char, &ffi_type_double, &ffi_type_X }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_X); + PREP_CALLBACK(cif,X_BcdB_simulator,(void*)&X_BcdB); + Xr = ((X (ABI_ATTR *) (B,char,double,B)) callback_code) (B1,c2,d3,B2); + } + FREE_CALLBACK(); + fprintf(out,"->{\"%s\",'%c'}\n",Xr.c,Xr.c1); + fflush(out); +#endif +#endif +#endif + } + + + /* gpargs boundary tests */ + { + ffi_type* ffi_type_K_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_K; + ffi_type* ffi_type_L_elements[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, NULL }; + ffi_type ffi_type_L; + long lr; + long long llr; + float fr; + double dr; + + ffi_type_K.type = FFI_TYPE_STRUCT; + ffi_type_K.size = sizeof(K); + ffi_type_K.alignment = alignof_slot(K); + ffi_type_K.elements = ffi_type_K_elements; + + ffi_type_L.type = FFI_TYPE_STRUCT; + ffi_type_L.size = sizeof(L); + ffi_type_L.alignment = alignof_slot(L); + ffi_type_L.elements = ffi_type_L_elements; + +#if (!defined(DGTEST)) || DGTEST == 60 + lr = l_l0K(K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l0K_simulator,(void*)l_l0K); + lr = ((long (ABI_ATTR *) (K,long)) callback_code) (K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 61 + lr = l_l1K(l1,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l1K_simulator,(void*)l_l1K); + lr = ((long (ABI_ATTR *) (long,K,long)) callback_code) (l1,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 62 + lr = l_l2K(l1,l2,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l2K_simulator,(void*)l_l2K); + lr = ((long (ABI_ATTR *) (long,long,K,long)) callback_code) (l1,l2,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 63 + lr = l_l3K(l1,l2,l3,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l3K_simulator,(void*)l_l3K); + lr = ((long (ABI_ATTR *) (long,long,long,K,long)) callback_code) (l1,l2,l3,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 64 + lr = l_l4K(l1,l2,l3,l4,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l4K_simulator,(void*)l_l4K); + lr = ((long (ABI_ATTR *) (long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 65 + lr = l_l5K(l1,l2,l3,l4,l5,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l5K_simulator,(void*)l_l5K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 66 + lr = l_l6K(l1,l2,l3,l4,l5,l6,K1,l9); + fprintf(out,"->%ld\n",lr); + fflush(out); + lr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_K, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slong); + PREP_CALLBACK(cif,l_l6K_simulator,(void*)l_l6K); + lr = ((long (ABI_ATTR *) (long,long,long,long,long,long,K,long)) callback_code) (l1,l2,l3,l4,l5,l6,K1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%ld\n",lr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 67 + fr = f_f17l3L(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + fprintf(out,"->%g\n",fr); + fflush(out); + fr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_float, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_float); + PREP_CALLBACK(cif,f_f17l3L_simulator,(void*)&f_f17l3L); + fr = ((float (ABI_ATTR *) (float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,float,long,long,long,L)) callback_code) (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",fr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 68 + dr = d_d17l3L(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_double, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_L }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_d17l3L_simulator,(void*)&d_d17l3L); + dr = ((double (ABI_ATTR *) (double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,double,long,long,long,L)) callback_code) (d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17,l6,l7,l8,L1); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 69 + llr = ll_l2ll(l1,l2,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l2ll_simulator,(void*)ll_l2ll); + llr = ((long long (ABI_ATTR *) (long,long,long long,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 70 + llr = ll_l3ll(l1,l2,l3,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l3ll_simulator,(void*)ll_l3ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long long,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 71 + llr = ll_l4ll(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l4ll_simulator,(void*)ll_l4ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 72 + llr = ll_l5ll(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l5ll_simulator,(void*)ll_l5ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 73 + llr = ll_l6ll(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l6ll_simulator,(void*)ll_l6ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 74 + llr = ll_l7ll(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); + llr = 0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slonglong, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_slonglong); + PREP_CALLBACK(cif,ll_l7ll_simulator,(void*)ll_l7ll); + llr = ((long long (ABI_ATTR *) (long,long,long,long,long,long,long,long long,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->0x%lx%08lx\n",(long)(llr>>32),(long)(llr&0xffffffff)); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 75 + dr = d_l2d(l1,l2,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l2d_simulator,(void*)d_l2d); + dr = ((double (ABI_ATTR *) (long,long,double,long)) callback_code) (l1,l2,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 76 + dr = d_l3d(l1,l2,l3,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l3d_simulator,(void*)d_l3d); + dr = ((double (ABI_ATTR *) (long,long,long,double,long)) callback_code) (l1,l2,l3,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 77 + dr = d_l4d(l1,l2,l3,l4,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l4d_simulator,(void*)d_l4d); + dr = ((double (ABI_ATTR *) (long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 78 + dr = d_l5d(l1,l2,l3,l4,l5,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l5d_simulator,(void*)d_l5d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 79 + dr = d_l6d(l1,l2,l3,l4,l5,l6,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l6d_simulator,(void*)d_l6d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + +#if (!defined(DGTEST)) || DGTEST == 80 + dr = d_l7d(l1,l2,l3,l4,l5,l6,l7,ll1,l9); + fprintf(out,"->%g\n",dr); + fflush(out); + dr = 0.0; clear_traces(); + ALLOC_CALLBACK(); + { + ffi_type* argtypes[] = { &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_slong, &ffi_type_double, &ffi_type_slong }; + ffi_cif cif; + FFI_PREP_CIF(cif,argtypes,ffi_type_double); + PREP_CALLBACK(cif,d_l7d_simulator,(void*)d_l7d); + dr = ((double (ABI_ATTR *) (long,long,long,long,long,long,long,double,long)) callback_code) (l1,l2,l3,l4,l5,l6,l7,ll1,l9); + } + FREE_CALLBACK(); + fprintf(out,"->%g\n",dr); + fflush(out); +#endif + + } + + exit(0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c new file mode 100644 index 0000000..d25ebf4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.bhaible/testcases.c @@ -0,0 +1,743 @@ +/* + * Copyright 1993 Bill Triggs + * Copyright 1995-2017 Bruno Haible + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* This file defines test functions of selected signatures, that exercise + dark corners of the various ABIs. */ + +#include + +FILE* out; + +#define uchar unsigned char +#define ushort unsigned short +#define uint unsigned int +#define ulong unsigned long + +typedef struct { char x; } Char; +typedef struct { short x; } Short; +typedef struct { int x; } Int; +typedef struct { long x; } Long; +typedef struct { float x; } Float; +typedef struct { double x; } Double; +typedef struct { char c; float f; } A; +typedef struct { double d; int i[3]; } B; +typedef struct { long l1; long l2; } J; +typedef struct { long l1; long l2; long l3; long l4; } K; +typedef struct { long l1; long l2; long l3; long l4; long l5; long l6; } L; +typedef struct { char x1; } Size1; +typedef struct { char x1; char x2; } Size2; +typedef struct { char x1; char x2; char x3; } Size3; +typedef struct { char x1; char x2; char x3; char x4; } Size4; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; +} Size7; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; +} Size8; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; +} Size12; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; +} Size15; +typedef struct { + char x1; char x2; char x3; char x4; char x5; char x6; char x7; char x8; + char x9; char x10; char x11; char x12; char x13; char x14; char x15; char x16; +} Size16; +typedef struct { char c[3]; } T; +typedef struct { char c[33],c1; } X; + +char c1='a', c2=127, c3=(char)128, c4=(char)255, c5=-1; +short s1=32767, s2=(short)32768, s3=3, s4=4, s5=5, s6=6, s7=7, s8=8, s9=9; +int i1=1, i2=2, i3=3, i4=4, i5=5, i6=6, i7=7, i8=8, i9=9, + i10=11, i11=12, i12=13, i13=14, i14=15, i15=16, i16=17; +long l1=1, l2=2, l3=3, l4=4, l5=5, l6=6, l7=7, l8=8, l9=9; +long long ll1 = 3875056143130689530LL; +float f1=0.1f, f2=0.2f, f3=0.3f, f4=0.4f, f5=0.5f, f6=0.6f, f7=0.7f, f8=0.8f, f9=0.9f, + f10=1.1f, f11=1.2f, f12=1.3f, f13=1.4f, f14=1.5f, f15=1.6f, f16=1.7f, f17=1.8f, + f18=1.9f, f19=2.1f, f20=2.2f, f21=2.3f, f22=2.4f, f23=2.5f, f24=2.6f; +double d1=0.1, d2=0.2, d3=0.3, d4=0.4, d5=0.5, d6=0.6, d7=0.7, d8=0.8, d9=0.9, + d10=1.1, d11=1.2, d12=1.3, d13=1.4, d14=1.5, d15=1.6, d16=1.7, d17=1.8; + +uchar uc1='a', uc2=127, uc3=128, uc4=255, uc5=(uchar)-1; +ushort us1=1, us2=2, us3=3, us4=4, us5=5, us6=6, us7=7, us8=8, us9=9; +uint ui1=1, ui2=2, ui3=3, ui4=4, ui5=5, ui6=6, ui7=7, ui8=8, ui9=9; +ulong ul1=1, ul2=2, ul3=3, ul4=4, ul5=5, ul6=6, ul7=7, ul8=8, ul9=9; + +char *str1="hello",str2[]="goodbye",*str3="still here?"; +Char C1={'A'}, C2={'B'}, C3={'C'}, C4={'\377'}, C5={(char)(-1)}; +Short S1={1}, S2={2}, S3={3}, S4={4}, S5={5}, S6={6}, S7={7}, S8={8}, S9={9}; +Int I1={1}, I2={2}, I3={3}, I4={4}, I5={5}, I6={6}, I7={7}, I8={8}, I9={9}; +Float F1={0.1f}, F2={0.2f}, F3={0.3f}, F4={0.4f}, F5={0.5f}, F6={0.6f}, F7={0.7f}, F8={0.8f}, F9={0.9f}; +Double D1={0.1}, D2={0.2}, D3={0.3}, D4={0.4}, D5={0.5}, D6={0.6}, D7={0.7}, D8={0.8}, D9={0.9}; + +A A1={'a',0.1f},A2={'b',0.2f},A3={'\377',0.3f}; +B B1={0.1,{1,2,3}},B2={0.2,{5,4,3}}; +J J1={47,11},J2={73,55}; +K K1={19,69,12,28}; +L L1={561,1105,1729,2465,2821,6601}; /* A002997 */ +Size1 Size1_1={'a'}; +Size2 Size2_1={'a','b'}; +Size3 Size3_1={'a','b','c'}; +Size4 Size4_1={'a','b','c','d'}; +Size7 Size7_1={'a','b','c','d','e','f','g'}; +Size8 Size8_1={'a','b','c','d','e','f','g','h'}; +Size12 Size12_1={'a','b','c','d','e','f','g','h','i','j','k','l'}; +Size15 Size15_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o'}; +Size16 Size16_1={'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'}; +T T1={{'t','h','e'}},T2={{'f','o','x'}}; +X X1={"abcdefghijklmnopqrstuvwxyzABCDEF",'G'}, X2={"123",'9'}, X3={"return-return-return",'R'}; + +#if defined(__GNUC__) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_ATTR +#define ABI_ATTR +#endif + +/* void tests */ +void ABI_ATTR v_v (void) +{ + fprintf(out,"void f(void):\n"); + fflush(out); +} + +/* int tests */ +int ABI_ATTR i_v (void) +{ + int r=99; + fprintf(out,"int f(void):"); + fflush(out); + return r; +} +int ABI_ATTR i_i (int a) +{ + int r=a+1; + fprintf(out,"int f(int):(%d)",a); + fflush(out); + return r; +} +int ABI_ATTR i_i2 (int a, int b) +{ + int r=a+b; + fprintf(out,"int f(2*int):(%d,%d)",a,b); + fflush(out); + return r; +} +int ABI_ATTR i_i4 (int a, int b, int c, int d) +{ + int r=a+b+c+d; + fprintf(out,"int f(4*int):(%d,%d,%d,%d)",a,b,c,d); + fflush(out); + return r; +} +int ABI_ATTR i_i8 (int a, int b, int c, int d, int e, int f, int g, int h) +{ + int r=a+b+c+d+e+f+g+h; + fprintf(out,"int f(8*int):(%d,%d,%d,%d,%d,%d,%d,%d)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +int ABI_ATTR i_i16 (int a, int b, int c, int d, int e, int f, int g, int h, + int i, int j, int k, int l, int m, int n, int o, int p) +{ + int r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"int f(16*int):(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)", + a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* float tests */ +float ABI_ATTR f_f (float a) +{ + float r=a+1.0f; + fprintf(out,"float f(float):(%g)",a); + fflush(out); + return r; +} +float ABI_ATTR f_f2 (float a, float b) +{ + float r=a+b; + fprintf(out,"float f(2*float):(%g,%g)",a,b); + fflush(out); + return r; +} +float ABI_ATTR f_f4 (float a, float b, float c, float d) +{ + float r=a+b+c+d; + fprintf(out,"float f(4*float):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +float ABI_ATTR f_f8 (float a, float b, float c, float d, float e, float f, + float g, float h) +{ + float r=a+b+c+d+e+f+g+h; + fprintf(out,"float f(8*float):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +float ABI_ATTR f_f16 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"float f(16*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} +float ABI_ATTR f_f24 (float a, float b, float c, float d, float e, float f, float g, float h, + float i, float j, float k, float l, float m, float n, float o, float p, + float q, float s, float t, float u, float v, float w, float x, float y) +{ + float r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+v+w+x+y; + fprintf(out,"float f(24*float):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,v,w,x,y); + fflush(out); + return r; +} + +/* double tests */ +double ABI_ATTR d_d (double a) +{ + double r=a+1.0; + fprintf(out,"double f(double):(%g)",a); + fflush(out); + return r; +} +double ABI_ATTR d_d2 (double a, double b) +{ + double r=a+b; + fprintf(out,"double f(2*double):(%g,%g)",a,b); + fflush(out); + return r; +} +double ABI_ATTR d_d4 (double a, double b, double c, double d) +{ + double r=a+b+c+d; + fprintf(out,"double f(4*double):(%g,%g,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_d8 (double a, double b, double c, double d, double e, double f, + double g, double h) +{ + double r=a+b+c+d+e+f+g+h; + fprintf(out,"double f(8*double):(%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h); + fflush(out); + return r; +} +double ABI_ATTR d_d16 (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p) +{ + double r=a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p; + fprintf(out,"double f(16*double):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p); + fflush(out); + return r; +} + +/* pointer tests */ +void* ABI_ATTR vp_vpdpcpsp (void* a, double* b, char* c, Int* d) +{ + void* ret = (char*)b + 1; + fprintf(out,"void* f(void*,double*,char*,Int*):(0x%p,0x%p,0x%p,0x%p)",a,b,c,d); + fflush(out); + return ret; +} + +/* mixed number tests */ +uchar ABI_ATTR uc_ucsil (uchar a, ushort b, uint c, ulong d) +{ + uchar r = (uchar)-1; + fprintf(out,"uchar f(uchar,ushort,uint,ulong):(%u,%u,%u,%lu)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iidd (int a, int b, double c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,int,double,double):(%d,%d,%g,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_iiidi (int a, int b, int c, double d, int e) +{ + double r = a+b+c+d+e; + fprintf(out,"double f(int,int,int,double,int):(%d,%d,%d,%g,%d)",a,b,c,d,e); + fflush(out); + return r; +} +double ABI_ATTR d_idid (int a, double b, int c, double d) +{ + double r = a+b+c+d; + fprintf(out,"double f(int,double,int,double):(%d,%g,%d,%g)",a,b,c,d); + fflush(out); + return r; +} +double ABI_ATTR d_fdi (float a, double b, int c) +{ + double r = a+b+c; + fprintf(out,"double f(float,double,int):(%g,%g,%d)",a,b,c); + fflush(out); + return r; +} +ushort ABI_ATTR us_cdcd (char a, double b, char c, double d) +{ + ushort r = (ushort)(a + b + c + d); + fprintf(out,"ushort f(char,double,char,double):('%c',%g,'%c',%g)",a,b,c,d); + fflush(out); + return r; +} + +long long ABI_ATTR ll_iiilli (int a, int b, int c, long long d, int e) +{ + long long r = (long long)(int)a+(long long)(int)b+(long long)(int)c+d+(long long)(int)e; + fprintf(out,"long long f(int,int,int,long long,int):(%d,%d,%d,0x%lx%08lx,%d)",a,b,c,(long)(d>>32),(long)(d&0xffffffff),e); + fflush(out); + return r; +} +long long ABI_ATTR ll_flli (float a, long long b, int c) +{ + long long r = (long long)(int)a + b + (long long)c; + fprintf(out,"long long f(float,long long,int):(%g,0x%lx%08lx,0x%lx)",a,(long)(b>>32),(long)(b&0xffffffff),(long)c); + fflush(out); + return r; +} + +float ABI_ATTR f_fi (float a, int z) +{ + float r = a+z; + fprintf(out,"float f(float,int):(%g,%d)",a,z); + fflush(out); + return r; +} +float ABI_ATTR f_f2i (float a, float b, int z) +{ + float r = a+b+z; + fprintf(out,"float f(2*float,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +float ABI_ATTR f_f3i (float a, float b, float c, int z) +{ + float r = a+b+c+z; + fprintf(out,"float f(3*float,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +float ABI_ATTR f_f4i (float a, float b, float c, float d, int z) +{ + float r = a+b+c+d+z; + fprintf(out,"float f(4*float,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +float ABI_ATTR f_f7i (float a, float b, float c, float d, float e, float f, float g, + int z) +{ + float r = a+b+c+d+e+f+g+z; + fprintf(out,"float f(7*float,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +float ABI_ATTR f_f8i (float a, float b, float c, float d, float e, float f, float g, + float h, int z) +{ + float r = a+b+c+d+e+f+g+h+z; + fprintf(out,"float f(8*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +float ABI_ATTR f_f12i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"float f(12*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +float ABI_ATTR f_f13i (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, int z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"float f(13*float,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +double ABI_ATTR d_di (double a, int z) +{ + double r = a+z; + fprintf(out,"double f(double,int):(%g,%d)",a,z); + fflush(out); + return r; +} +double ABI_ATTR d_d2i (double a, double b, int z) +{ + double r = a+b+z; + fprintf(out,"double f(2*double,int):(%g,%g,%d)",a,b,z); + fflush(out); + return r; +} +double ABI_ATTR d_d3i (double a, double b, double c, int z) +{ + double r = a+b+c+z; + fprintf(out,"double f(3*double,int):(%g,%g,%g,%d)",a,b,c,z); + fflush(out); + return r; +} +double ABI_ATTR d_d4i (double a, double b, double c, double d, int z) +{ + double r = a+b+c+d+z; + fprintf(out,"double f(4*double,int):(%g,%g,%g,%g,%d)",a,b,c,d,z); + fflush(out); + return r; +} +double ABI_ATTR d_d7i (double a, double b, double c, double d, double e, double f, + double g, int z) +{ + double r = a+b+c+d+e+f+g+z; + fprintf(out,"double f(7*double,int):(%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,z); + fflush(out); + return r; +} +double ABI_ATTR d_d8i (double a, double b, double c, double d, double e, double f, + double g, double h, int z) +{ + double r = a+b+c+d+e+f+g+h+z; + fprintf(out,"double f(8*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,z); + fflush(out); + return r; +} +double ABI_ATTR d_d12i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+z; + fprintf(out,"double f(12*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,z); + fflush(out); + return r; +} +double ABI_ATTR d_d13i (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, int z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+z; + fprintf(out,"double f(13*double,int):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%d)",a,b,c,d,e,f,g,h,i,j,k,l,m,z); + fflush(out); + return r; +} + +/* small structure return tests */ +Size1 ABI_ATTR S1_v (void) +{ + fprintf(out,"Size1 f(void):"); + fflush(out); + return Size1_1; +} +Size2 ABI_ATTR S2_v (void) +{ + fprintf(out,"Size2 f(void):"); + fflush(out); + return Size2_1; +} +Size3 ABI_ATTR S3_v (void) +{ + fprintf(out,"Size3 f(void):"); + fflush(out); + return Size3_1; +} +Size4 ABI_ATTR S4_v (void) +{ + fprintf(out,"Size4 f(void):"); + fflush(out); + return Size4_1; +} +Size7 ABI_ATTR S7_v (void) +{ + fprintf(out,"Size7 f(void):"); + fflush(out); + return Size7_1; +} +Size8 ABI_ATTR S8_v (void) +{ + fprintf(out,"Size8 f(void):"); + fflush(out); + return Size8_1; +} +Size12 ABI_ATTR S12_v (void) +{ + fprintf(out,"Size12 f(void):"); + fflush(out); + return Size12_1; +} +Size15 ABI_ATTR S15_v (void) +{ + fprintf(out,"Size15 f(void):"); + fflush(out); + return Size15_1; +} +Size16 ABI_ATTR S16_v (void) +{ + fprintf(out,"Size16 f(void):"); + fflush(out); + return Size16_1; +} + +/* structure tests */ +Int ABI_ATTR I_III (Int a, Int b, Int c) +{ + Int r; + r.x = a.x + b.x + c.x; + fprintf(out,"Int f(Int,Int,Int):({%d},{%d},{%d})",a.x,b.x,c.x); + fflush(out); + return r; +} +Char ABI_ATTR C_CdC (Char a, double b, Char c) +{ + Char r; + r.x = (a.x + c.x)/2; + fprintf(out,"Char f(Char,double,Char):({'%c'},%g,{'%c'})",a.x,b,c.x); + fflush(out); + return r; +} +Float ABI_ATTR F_Ffd (Float a, float b, double c) +{ + Float r; + r.x = (float) (a.x + b + c); + fprintf(out,"Float f(Float,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +Double ABI_ATTR D_fDd (float a, Double b, double c) +{ + Double r; + r.x = a + b.x + c; + fprintf(out,"Double f(float,Double,double):(%g,{%g},%g)",a,b.x,c); + fflush(out); + return r; +} +Double ABI_ATTR D_Dfd (Double a, float b, double c) +{ + Double r; + r.x = a.x + b + c; + fprintf(out,"Double f(Double,float,double):({%g},%g,%g)",a.x,b,c); + fflush(out); + return r; +} +J ABI_ATTR J_JiJ (J a, int b, J c) +{ + J r; + r.l1 = a.l1+c.l1; r.l2 = a.l2+b+c.l2; + fprintf(out,"J f(J,int,J):({%ld,%ld},%d,{%ld,%ld})",a.l1,a.l2,b,c.l1,c.l2); + fflush(out); + return r; +} +T ABI_ATTR T_TcT (T a, char b, T c) +{ + T r; + r.c[0]='b'; r.c[1]=c.c[1]; r.c[2]=c.c[2]; + fprintf(out,"T f(T,char,T):({\"%c%c%c\"},'%c',{\"%c%c%c\"})",a.c[0],a.c[1],a.c[2],b,c.c[0],c.c[1],c.c[2]); + fflush(out); + return r; +} +X ABI_ATTR X_BcdB (B a, char b, double c, B d) +{ + static X xr={"return val",'R'}; + X r; + r = xr; + r.c1 = b; + fprintf(out,"X f(B,char,double,B):({%g,{%d,%d,%d}},'%c',%g,{%g,{%d,%d,%d}})", + a.d,a.i[0],a.i[1],a.i[2],b,c,d.d,d.i[0],d.i[1],d.i[2]); + fflush(out); + return r; +} + +/* Test for cases where some argument (especially structure, 'long long', or + 'double') may be passed partially in general-purpose argument registers + and partially on the stack. Different ABIs pass between 4 and 8 arguments + (or none) in general-purpose argument registers. */ + +long ABI_ATTR l_l0K (K b, long c) +{ + long r = b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(K,long):(%ld,%ld,%ld,%ld,%ld)",b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l1K (long a1, K b, long c) +{ + long r = a1 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld)",a1,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l2K (long a1, long a2, K b, long c) +{ + long r = a1 + a2 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(2*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l3K (long a1, long a2, long a3, K b, long c) +{ + long r = a1 + a2 + a3 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(3*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l4K (long a1, long a2, long a3, long a4, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(4*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l5K (long a1, long a2, long a3, long a4, long a5, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(5*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +long ABI_ATTR l_l6K (long a1, long a2, long a3, long a4, long a5, long a6, K b, long c) +{ + long r = a1 + a2 + a3 + a4 + a5 + a6 + b.l1 + b.l2 + b.l3 + b.l4 + c; + fprintf(out,"long f(6*long,K,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a1,a2,a3,a4,a5,a6,b.l1,b.l2,b.l3,b.l4,c); + fflush(out); + return r; +} +/* These tests is crafted on the knowledge that for all known ABIs: + * 17 > number of floating-point argument registers, + * 3 < number of general-purpose argument registers < 3 + 6. */ +float ABI_ATTR f_f17l3L (float a, float b, float c, float d, float e, float f, float g, + float h, float i, float j, float k, float l, float m, float n, + float o, float p, float q, + long s, long t, long u, L z) +{ + float r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"float f(17*float,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} +double ABI_ATTR d_d17l3L (double a, double b, double c, double d, double e, double f, + double g, double h, double i, double j, double k, double l, + double m, double n, double o, double p, double q, + long s, long t, long u, L z) +{ + double r = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+s+t+u+z.l1+z.l2+z.l3+z.l4+z.l5+z.l6; + fprintf(out,"double f(17*double,3*int,L):(%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld)",a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,s,t,u,z.l1,z.l2,z.l3,z.l4,z.l5,z.l6); + fflush(out); + return r; +} + +long long ABI_ATTR ll_l2ll (long a1, long a2, long long b, long c) +{ + long long r = (long long) (a1 + a2) + b + c; + fprintf(out,"long long f(2*long,long long,long):(%ld,%ld,0x%lx%08lx,%ld)",a1,a2,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l3ll (long a1, long a2, long a3, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3) + b + c; + fprintf(out,"long long f(3*long,long long,long):(%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l4ll (long a1, long a2, long a3, long a4, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"long long f(4*long,long long,long):(%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l5ll (long a1, long a2, long a3, long a4, long a5, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"long long f(5*long,long long,long):(%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l6ll (long a1, long a2, long a3, long a4, long a5, long a6, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"long long f(6*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} +long long ABI_ATTR ll_l7ll (long a1, long a2, long a3, long a4, long a5, long a6, long a7, long long b, long c) +{ + long long r = (long long) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"long long f(7*long,long long,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,0x%lx%08lx,%ld)",a1,a2,a3,a4,a5,a6,a7,(long)(b>>32),(long)(b&0xffffffff),c); + fflush(out); + return r; +} + +double ABI_ATTR d_l2d (long a1, long a2, double b, long c) +{ + double r = (double) (a1 + a2) + b + c; + fprintf(out,"double f(2*long,double,long):(%ld,%ld,%g,%ld)",a1,a2,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l3d (long a1, long a2, long a3, double b, long c) +{ + double r = (double) (a1 + a2 + a3) + b + c; + fprintf(out,"double f(3*long,double,long):(%ld,%ld,%ld,%g,%ld)",a1,a2,a3,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l4d (long a1, long a2, long a3, long a4, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4) + b + c; + fprintf(out,"double f(4*long,double,long):(%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l5d (long a1, long a2, long a3, long a4, long a5, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5) + b + c; + fprintf(out,"double f(5*long,double,long):(%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l6d (long a1, long a2, long a3, long a4, long a5, long a6, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6) + b + c; + fprintf(out,"double f(6*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,b,c); + fflush(out); + return r; +} +double ABI_ATTR d_l7d (long a1, long a2, long a3, long a4, long a5, long a6, long a7, double b, long c) +{ + double r = (double) (a1 + a2 + a3 + a4 + a5 + a6 + a7) + b + c; + fprintf(out,"double f(7*long,double,long):(%ld,%ld,%ld,%ld,%ld,%ld,%ld,%g,%ld)",a1,a2,a3,a4,a5,a6,a7,b,c); + fflush(out); + return r; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c new file mode 100644 index 0000000..5d4959c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_mixed.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]); + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c new file mode 100644 index 0000000..5e5cb86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/align_stdcall.c @@ -0,0 +1,46 @@ +/* Area: ffi_call + Purpose: Check for proper argument alignment. + Limitations: none. + PR: none. + Originator: (from many_win32.c) */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static float ABI_ATTR align_arguments(int i1, + double f2, + int i3, + double f4) +{ + return i1+f2+i3+f4; +} + +int main(void) +{ + ffi_cif cif; + ffi_type *args[4] = { + &ffi_type_sint, + &ffi_type_double, + &ffi_type_sint, + &ffi_type_double + }; + double fa[2] = {1,2}; + int ia[2] = {1,2}; + void *values[4] = {&ia[0], &fa[0], &ia[1], &fa[1]}; + float f, ff; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_float, args) == FFI_OK); + + ff = align_arguments(ia[0], fa[0], ia[1], fa[1]);; + + ffi_call(&cif, FFI_FN(align_arguments), &f, values); + + if (f == ff) + printf("align arguments tests ok!\n"); + else + CHECK(0); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/call.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/call.exp new file mode 100644 index 0000000..13ba2bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/call.exp @@ -0,0 +1,54 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +run-many-tests $tlist $additional_options + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + run-many-tests $tlist $additional_options +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c new file mode 100644 index 0000000..bf60161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/err_bad_typedef.c @@ -0,0 +1,26 @@ +/* Area: ffi_prep_cif + Purpose: Test error return for bad typedefs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +int main (void) +{ + ffi_cif cif; + ffi_type* arg_types[1]; + + ffi_type badType = ffi_type_void; + + arg_types[0] = NULL; + + badType.size = 0; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &badType, + arg_types) == FFI_BAD_TYPEDEF); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float.c new file mode 100644 index 0000000..fbc272d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int floating(int a, float b, double c, long double d) +{ + int i; + + i = (int) ((float)a/b + ((float)c/(float)d)); + + return i; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + float f; + signed int si1; + double d; + long double ld; + + args[0] = &ffi_type_sint; + values[0] = &si1; + args[1] = &ffi_type_float; + values[1] = &f; + args[2] = &ffi_type_double; + values[2] = &d; + args[3] = &ffi_type_longdouble; + values[3] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + si1 = 6; + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating (si1, f, d, ld); + + ffi_call(&cif, FFI_FN(floating), &rint, values); + + printf ("%d vs %d\n", (int)rint, floating (si1, f, d, ld)); + + CHECK((int)rint == floating(si1, f, d, ld)); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float1.c new file mode 100644 index 0000000..c48493c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float1.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +#include "float.h" + +#include + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(float f) +{ + return f/3.0; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* These are not always the same!! Check for a reasonable delta */ + + CHECK(fabs(result[0].d - dblit(f)) < DBL_EPSILON); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float2.c new file mode 100644 index 0000000..57cd9e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float2.c @@ -0,0 +1,61 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static long double ldblit(float f) +{ + return (long double) (((long double) f)/ (long double) 3.0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float f; + long double ld; + long double original; + + args[0] = &ffi_type_float; + values[0] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + f = 3.14159; + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf\n", ldblit(f)); +#endif + + ld = 666; + ffi_call(&cif, FFI_FN(ldblit), &ld, values); + +#if defined(__sun) && defined(__GNUC__) + /* long double support under SunOS/gcc is pretty much non-existent. + You'll get the odd bus error in library routines like printf() */ +#else + printf ("%Lf, %Lf, %Lf, %Lf\n", ld, ldblit(f), ld - ldblit(f), LDBL_EPSILON); +#endif + + /* These are not always the same!! Check for a reasonable delta */ + original = ldblit(f); + if (((ld > original) ? (ld - original) : (original - ld)) < LDBL_EPSILON) + puts("long double return value tests ok!"); + else + CHECK(0); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float3.c new file mode 100644 index 0000000..bab3206 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float3.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check float arguments with different orders. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "float.h" + +#include + +static double floating_1(float a, double b, long double c) +{ + return (double) a + b + (double) c; +} + +static double floating_2(long double a, double b, float c) +{ + return (double) a + b + (double) c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double rd; + + float f; + double d; + long double ld; + + args[0] = &ffi_type_float; + values[0] = &f; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_longdouble; + values[2] = &ld; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + f = 3.14159; + d = (double)1.0/(double)3.0; + ld = 2.71828182846L; + + floating_1 (f, d, ld); + + ffi_call(&cif, FFI_FN(floating_1), &rd, values); + + CHECK(fabs(rd - floating_1(f, d, ld)) < DBL_EPSILON); + + args[0] = &ffi_type_longdouble; + values[0] = &ld; + args[1] = &ffi_type_double; + values[1] = &d; + args[2] = &ffi_type_float; + values[2] = &f; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_double, args) == FFI_OK); + + floating_2 (ld, d, f); + + ffi_call(&cif, FFI_FN(floating_2), &rd, values); + + CHECK(fabs(rd - floating_2(ld, d, f)) < DBL_EPSILON); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float4.c new file mode 100644 index 0000000..0dd6d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float4.c @@ -0,0 +1,62 @@ +/* Area: ffi_call + Purpose: Check denorm double value. + Limitations: none. + PR: PR26483. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +/* { dg-options "-mieee" { target alpha*-*-* } } */ + +#include "ffitest.h" +#include "float.h" + +typedef union +{ + double d; + unsigned char c[sizeof (double)]; +} value_type; + +#define CANARY 0xba + +static double dblit(double d) +{ + return d; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double d; + value_type result[2]; + unsigned int i; + + args[0] = &ffi_type_double; + values[0] = &d; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + d = DBL_MIN / 2; + + /* Put a canary in the return array. This is a regression test for + a buffer overrun. */ + memset(result[1].c, CANARY, sizeof (double)); + + ffi_call(&cif, FFI_FN(dblit), &result[0].d, values); + + /* The standard delta check doesn't work for denorms. Since we didn't do + any arithmetic, we should get the original result back, and hence an + exact check should be OK here. */ + + CHECK(result[0].d == dblit(d)); + + /* Check the canary. */ + for (i = 0; i < sizeof (double); ++i) + CHECK(result[1].c[i] == CANARY); + + exit(0); + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c new file mode 100644 index 0000000..5acff91 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/float_va.c @@ -0,0 +1,107 @@ +/* Area: fp and variadics + Purpose: check fp inputs and returns work on variadics, even the fixed params + Limitations: None + PR: none + Originator: 2011-01-25 + + Intended to stress the difference in ABI on ARM vfp +*/ + +/* { dg-do run } */ + +#include + +#include "ffitest.h" + +/* prints out all the parameters, and returns the sum of them all. + * 'x' is the number of variadic parameters all of which are double in this test + */ +double float_va_fn(unsigned int x, double y,...) +{ + double total=0.0; + va_list ap; + unsigned int i; + + total+=(double)x; + total+=y; + + printf("%u: %.1f :", x, y); + + va_start(ap, y); + for(i=0;i +#include +#include + +static float ABI_ATTR many(float f1, float f2, float f3, float f4, float f5, float f6, float f7, float f8, float f9, float f10, float f11, float f12, float f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + float fa[13]; + float f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_float; + values[i] = &fa[i]; + fa[i] = (float) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 13, + &ffi_type_float, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many2.c new file mode 100644 index 0000000..1c85746 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many2.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check uint8_t arguments. + Limitations: none. + PR: PR45677. + Originator: Dan Witte 20100916 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +#define NARGS 7 + +typedef unsigned char u8; + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +uint8_t +foo (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return a + b + c + d + e + f + g; +} + +uint8_t ABI_ATTR +bar (uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g) +{ + return foo (a, b, c, d, e, f, g); +} + +int +main (void) +{ + ffi_type *ffitypes[NARGS]; + int i; + ffi_cif cif; + ffi_arg result = 0; + uint8_t args[NARGS]; + void *argptrs[NARGS]; + + for (i = 0; i < NARGS; ++i) + ffitypes[i] = &ffi_type_uint8; + + CHECK (ffi_prep_cif (&cif, ABI_NUM, NARGS, + &ffi_type_uint8, ffitypes) == FFI_OK); + + for (i = 0; i < NARGS; ++i) + { + args[i] = i; + argptrs[i] = &args[i]; + } + ffi_call (&cif, FFI_FN (bar), &result, argptrs); + + CHECK (result == 21); + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c new file mode 100644 index 0000000..4ef8c8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_double.c @@ -0,0 +1,70 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + double f3, + double f4, + double f5, + double f6, + double f7, + double f8, + double f9, + double f10, + double f11, + double f12, + double f13) +{ +#if 0 + printf("%f %f %f %f %f %f %f %f %f %f %f %f %f\n", + (double) f1, (double) f2, (double) f3, (double) f4, (double) f5, + (double) f6, (double) f7, (double) f8, (double) f9, (double) f10, + (double) f11, (double) f12, (double) f13); +#endif + + return ((f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + double fa[13]; + double f, ff; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &ffi_type_double; + values[i] = &fa[i]; + fa[i] = (double) i; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], + fa[2], fa[3], + fa[4], fa[5], + fa[6], fa[7], + fa[8], fa[9], + fa[10],fa[11],fa[12]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c new file mode 100644 index 0000000..85ec36e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/many_mixed.c @@ -0,0 +1,78 @@ +/* Area: ffi_call + Purpose: Check return value double, with many arguments + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +#include +#include +#include + +static double many(double f1, + double f2, + long int i1, + double f3, + double f4, + long int i2, + double f5, + double f6, + long int i3, + double f7, + double f8, + long int i4, + double f9, + double f10, + long int i5, + double f11, + double f12, + long int i6, + double f13) +{ + return ((double) (i1 + i2 + i3 + i4 + i5 + i6) + (f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[19]; + void *values[19]; + double fa[19]; + long int la[19]; + double f, ff; + int i; + + for (i = 0; i < 19; i++) + { + if( (i - 2) % 3 == 0) { + args[i] = &ffi_type_slong; + la[i] = (long int) i; + values[i] = &la[i]; + } + else { + args[i] = &ffi_type_double; + fa[i] = (double) i; + values[i] = &fa[i]; + } + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 19, + &ffi_type_double, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &f, values); + + ff = many(fa[0], fa[1], la[2], + fa[3], fa[4], la[5], + fa[6], fa[7], la[8], + fa[9], fa[10], la[11], + fa[12], fa[13], la[14], + fa[15], fa[16], la[17], + fa[18]); + if (fabs(f - ff) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/negint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/negint.c new file mode 100644 index 0000000..6e2f26f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/negint.c @@ -0,0 +1,52 @@ +/* Area: ffi_call + Purpose: Check that negative integers are passed correctly. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static int checking(int a, short b, signed char c) +{ + + return (a < 0 && b < 0 && c < 0); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + + checking (si, ss, sc); + + ffi_call(&cif, FFI_FN(checking), &rint, values); + + printf ("%d vs %d\n", (int)rint, checking (si, ss, sc)); + + CHECK(rint != 0); + + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c new file mode 100644 index 0000000..23d88b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/offsets.c @@ -0,0 +1,46 @@ +/* Area: Struct layout + Purpose: Test ffi_get_struct_offsets + Limitations: none. + PR: none. + Originator: Tom Tromey. */ + +/* { dg-do run } */ +#include "ffitest.h" +#include + +struct test_1 +{ + char c; + float f; + char c2; + int i; +}; + +int +main (void) +{ + ffi_type test_1_type; + ffi_type *test_1_elements[5]; + size_t test_1_offsets[4]; + + test_1_elements[0] = &ffi_type_schar; + test_1_elements[1] = &ffi_type_float; + test_1_elements[2] = &ffi_type_schar; + test_1_elements[3] = &ffi_type_sint; + test_1_elements[4] = NULL; + + test_1_type.size = 0; + test_1_type.alignment = 0; + test_1_type.type = FFI_TYPE_STRUCT; + test_1_type.elements = test_1_elements; + + CHECK (ffi_get_struct_offsets (FFI_DEFAULT_ABI, &test_1_type, test_1_offsets) + == FFI_OK); + CHECK (test_1_type.size == sizeof (struct test_1)); + CHECK (offsetof (struct test_1, c) == test_1_offsets[0]); + CHECK (offsetof (struct test_1, f) == test_1_offsets[1]); + CHECK (offsetof (struct test_1, c2) == test_1_offsets[2]); + CHECK (offsetof (struct test_1, i) == test_1_offsets[3]); + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c new file mode 100644 index 0000000..7da1621 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pr1172638.c @@ -0,0 +1,127 @@ +/* Area: ffi_call + Purpose: Reproduce bug found in python ctypes + Limitations: none. + PR: Fedora 1174037 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + +static RECT ABI_ATTR pr_test(int i __UNUSED__, RECT ar __UNUSED__, + RECT* br __UNUSED__, POINT cp __UNUSED__, + RECT dr __UNUSED__, RECT *er __UNUSED__, + POINT fp, RECT gr __UNUSED__) +{ + RECT result; + + result.left = fp.x; + result.right = fp.y; + result.top = fp.x; + result.bottom = fp.y; + + return result; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type point_type, rect_type; + ffi_type *point_type_elements[3]; + ffi_type *rect_type_elements[5]; + + int i; + POINT cp, fp; + RECT ar, br, dr, er, gr; + RECT *p1, *p2; + + /* This is a hack to get a properly aligned result buffer */ + RECT *rect_result = + (RECT *) malloc (sizeof(RECT)); + + point_type.size = 0; + point_type.alignment = 0; + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = point_type_elements; + point_type_elements[0] = &ffi_type_slong; + point_type_elements[1] = &ffi_type_slong; + point_type_elements[2] = NULL; + + rect_type.size = 0; + rect_type.alignment = 0; + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = rect_type_elements; + rect_type_elements[0] = &ffi_type_slong; + rect_type_elements[1] = &ffi_type_slong; + rect_type_elements[2] = &ffi_type_slong; + rect_type_elements[3] = &ffi_type_slong; + rect_type_elements[4] = NULL; + + args[0] = &ffi_type_sint; + args[1] = &rect_type; + args[2] = &ffi_type_pointer; + args[3] = &point_type; + args[4] = &rect_type; + args[5] = &ffi_type_pointer; + args[6] = &point_type; + args[7] = &rect_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 8, &rect_type, args) == FFI_OK); + + i = 1; + ar.left = 2; + ar.right = 3; + ar.top = 4; + ar.bottom = 5; + br.left = 6; + br.right = 7; + br.top = 8; + br.bottom = 9; + cp.x = 10; + cp.y = 11; + dr.left = 12; + dr.right = 13; + dr.top = 14; + dr.bottom = 15; + er.left = 16; + er.right = 17; + er.top = 18; + er.bottom = 19; + fp.x = 20; + fp.y = 21; + gr.left = 22; + gr.right = 23; + gr.top = 24; + gr.bottom = 25; + + values[0] = &i; + values[1] = &ar; + p1 = &br; + values[2] = &p1; + values[3] = &cp; + values[4] = &dr; + p2 = &er; + values[5] = &p2; + values[6] = &fp; + values[7] = &gr; + + ffi_call (&cif, FFI_FN(pr_test), rect_result, values); + + CHECK(rect_result->top == 20); + + free (rect_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c new file mode 100644 index 0000000..4456161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/promotion.c @@ -0,0 +1,59 @@ +/* Area: ffi_call + Purpose: Promotion test. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static int promotion(signed char sc, signed short ss, + unsigned char uc, unsigned short us) +{ + int r = (int) sc + (int) ss + (int) uc + (int) us; + + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + unsigned char uc; + signed short ss; + unsigned short us; + unsigned long ul; + + args[0] = &ffi_type_schar; + args[1] = &ffi_type_sshort; + args[2] = &ffi_type_uchar; + args[3] = &ffi_type_ushort; + values[0] = ≻ + values[1] = &ss; + values[2] = &uc; + values[3] = &us; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sint, args) == FFI_OK); + + us = 0; + ul = 0; + + for (sc = (signed char) -127; + sc <= (signed char) 120; sc += 1) + for (ss = -30000; ss <= 30000; ss += 10000) + for (uc = (unsigned char) 0; + uc <= (unsigned char) 200; uc += 20) + for (us = 0; us <= 60000; us += 10000) + { + ul++; + ffi_call(&cif, FFI_FN(promotion), &rint, values); + CHECK((int)rint == (signed char) sc + (signed short) ss + + (unsigned char) uc + (unsigned short) us); + } + printf("%lu promotion tests run\n", ul); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c new file mode 100644 index 0000000..e29bd6c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/pyobjc-tc.c @@ -0,0 +1,114 @@ +/* Area: ffi_call + Purpose: Check different structures. + Limitations: none. + PR: none. + Originator: Ronald Oussoren 20030824 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct Point { + float x; + float y; +} Point; + +typedef struct Size { + float h; + float w; +} Size; + +typedef struct Rect { + Point o; + Size s; +} Rect; + +int doit(int o, char* s, Point p, Rect r, int last) +{ + printf("CALLED WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, s, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, last); + return 42; +} + + +int main(void) +{ + ffi_type point_type; + ffi_type size_type; + ffi_type rect_type; + ffi_cif cif; + ffi_type* arglist[6]; + void* values[6]; + int r; + + /* + * First set up FFI types for the 3 struct types + */ + + point_type.size = 0; /*sizeof(Point);*/ + point_type.alignment = 0; /*__alignof__(Point);*/ + point_type.type = FFI_TYPE_STRUCT; + point_type.elements = malloc(3 * sizeof(ffi_type*)); + point_type.elements[0] = &ffi_type_float; + point_type.elements[1] = &ffi_type_float; + point_type.elements[2] = NULL; + + size_type.size = 0;/* sizeof(Size);*/ + size_type.alignment = 0;/* __alignof__(Size);*/ + size_type.type = FFI_TYPE_STRUCT; + size_type.elements = malloc(3 * sizeof(ffi_type*)); + size_type.elements[0] = &ffi_type_float; + size_type.elements[1] = &ffi_type_float; + size_type.elements[2] = NULL; + + rect_type.size = 0;/*sizeof(Rect);*/ + rect_type.alignment =0;/* __alignof__(Rect);*/ + rect_type.type = FFI_TYPE_STRUCT; + rect_type.elements = malloc(3 * sizeof(ffi_type*)); + rect_type.elements[0] = &point_type; + rect_type.elements[1] = &size_type; + rect_type.elements[2] = NULL; + + /* + * Create a CIF + */ + arglist[0] = &ffi_type_sint; + arglist[1] = &ffi_type_pointer; + arglist[2] = &point_type; + arglist[3] = &rect_type; + arglist[4] = &ffi_type_sint; + arglist[5] = NULL; + + r = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, + 5, &ffi_type_sint, arglist); + if (r != FFI_OK) { + abort(); + } + + + /* And call the function through the CIF */ + + { + Point p = { 1.0, 2.0 }; + Rect r = { { 9.0, 10.0}, { -1.0, -2.0 } }; + int o = 0; + int l = 42; + char* m = "myMethod"; + ffi_arg result; + + values[0] = &o; + values[1] = &m; + values[2] = &p; + values[3] = &r; + values[4] = &l; + values[5] = NULL; + + printf("CALLING WITH %d %s {%f %f} {{%f %f} {%f %f}} %d\n", + o, m, p.x, p.y, r.o.x, r.o.y, r.s.h, r.s.w, l); + + ffi_call(&cif, FFI_FN(doit), &result, values); + + printf ("The result is %d\n", (int)result); + + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c new file mode 100644 index 0000000..fd07e50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl) +{ + printf ("%f\n", dbl); + return 2 * dbl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl, rdbl; + + args[0] = &ffi_type_double; + values[0] = &dbl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, args) == FFI_OK); + + for (dbl = -127.3; dbl < 127; dbl++) + { + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl)); + CHECK(rdbl == 2 * dbl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c new file mode 100644 index 0000000..0ea5d50 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, float fl2, unsigned int in3, double dbl4) +{ + return dbl1 + fl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl4, rdbl; + float fl2; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + fl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, fl2, in3, dbl4)); + CHECK(rdbl == dbl1 + fl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c new file mode 100644 index 0000000..b3818f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_dbl2.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static double return_dbl(double dbl1, double dbl2, unsigned int in3, double dbl4) +{ + return dbl1 + dbl2 + in3 + dbl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + double dbl1, dbl2, dbl4, rdbl; + unsigned int in3; + args[0] = &ffi_type_double; + args[1] = &ffi_type_double; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_double; + values[0] = &dbl1; + values[1] = &dbl2; + values[2] = &in3; + values[3] = &dbl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_double, args) == FFI_OK); + dbl1 = 127.0; + dbl2 = 128.0; + in3 = 255; + dbl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values); + printf ("%f vs %f\n", rdbl, return_dbl(dbl1, dbl2, in3, dbl4)); + CHECK(rdbl == dbl1 + dbl2 + in3 + dbl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c new file mode 100644 index 0000000..fb8a09e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl.c @@ -0,0 +1,35 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl) +{ + return 2 * fl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl, rfl; + + args[0] = &ffi_type_float; + values[0] = &fl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, args) == FFI_OK); + + for (fl = -127.0; fl < 127; fl++) + { + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl)); + CHECK(rfl == 2 * fl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c new file mode 100644 index 0000000..c3d92c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl1.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2) +{ + return fl1 + fl2; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, rfl; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2)); + CHECK(rfl == fl1 + fl2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c new file mode 100644 index 0000000..ddb976c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +/* Use volatile float to avoid false negative on ix86. See PR target/323. */ +static float return_fl(float fl1, float fl2, float fl3, float fl4) +{ + volatile float sum; + + sum = fl1 + fl2 + fl3 + fl4; + return sum; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl3, fl4, rfl; + volatile float sum; + + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_float; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &fl3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + fl3 = 255.1; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, fl3, fl4)); + + sum = fl1 + fl2 + fl3 + fl4; + CHECK(rfl == sum); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c new file mode 100644 index 0000000..c37877b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_fl3.c @@ -0,0 +1,42 @@ +/* Area: ffi_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20050212 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static float return_fl(float fl1, float fl2, unsigned int in3, float fl4) +{ + return fl1 + fl2 + in3 + fl4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + float fl1, fl2, fl4, rfl; + unsigned int in3; + args[0] = &ffi_type_float; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &ffi_type_float; + values[0] = &fl1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &fl4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_float, args) == FFI_OK); + fl1 = 127.0; + fl2 = 128.0; + in3 = 255; + fl4 = 512.7; + + ffi_call(&cif, FFI_FN(return_fl), &rfl, values); + printf ("%f vs %f\n", rfl, return_fl(fl1, fl2, in3, fl4)); + CHECK(rfl == fl1 + fl2 + in3 + fl4); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c new file mode 100644 index 0000000..52a92fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ldl.c @@ -0,0 +1,34 @@ +/* Area: ffi_call + Purpose: Check return value long double. + Limitations: none. + PR: none. + Originator: 20071113 */ +/* { dg-do run } */ + +#include "ffitest.h" + +static long double return_ldl(long double ldl) +{ + return 2*ldl; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long double ldl, rldl; + + args[0] = &ffi_type_longdouble; + values[0] = &ldl; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_longdouble, args) == FFI_OK); + + for (ldl = -127.0; ldl < 127.0; ldl++) + { + ffi_call(&cif, FFI_FN(return_ldl), &rldl, values); + CHECK(rldl == 2 * ldl); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c new file mode 100644 index 0000000..ea4a1e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll.c @@ -0,0 +1,41 @@ +/* Area: ffi_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +static long long return_ll(long long ll) +{ + return ll; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll; + + args[0] = &ffi_type_sint64; + values[0] = ≪ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint64, args) == FFI_OK); + + for (ll = 0LL; ll < 100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + + for (ll = 55555555555000LL; ll < 55555555555100LL; ll++) + { + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + CHECK(rlonglong == ll); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c new file mode 100644 index 0000000..593e8a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ll1.c @@ -0,0 +1,43 @@ +/* Area: ffi_call + Purpose: Check if long long are passed in the corresponding regs on ppc. + Limitations: none. + PR: 20104. + Originator: 20050222 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" +static long long return_ll(int ll0, long long ll1, int ll2) +{ + return ll0 + ll1 + ll2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + long long rlonglong; + long long ll1; + unsigned ll0, ll2; + + args[0] = &ffi_type_sint; + args[1] = &ffi_type_sint64; + args[2] = &ffi_type_sint; + values[0] = &ll0; + values[1] = &ll1; + values[2] = &ll2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint64, args) == FFI_OK); + + ll0 = 11111111; + ll1 = 11111111111000LL; + ll2 = 11111111; + + ffi_call(&cif, FFI_FN(return_ll), &rlonglong, values); + printf("res: %" PRIdLL ", %" PRIdLL "\n", rlonglong, ll0 + ll1 + ll2); + /* { dg-output "res: 11111133333222, 11111133333222" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c new file mode 100644 index 0000000..a36cf3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sc.c @@ -0,0 +1,36 @@ +/* Area: ffi_call + Purpose: Check return value signed char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static signed char return_sc(signed char sc) +{ + return sc; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + signed char sc; + + args[0] = &ffi_type_schar; + values[0] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, args) == FFI_OK); + + for (sc = (signed char) -127; + sc < (signed char) 127; sc++) + { + ffi_call(&cif, FFI_FN(return_sc), &rint, values); + CHECK((signed char)rint == sc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c new file mode 100644 index 0000000..f0fd345 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_sl.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if long as return type is handled correctly. + Limitations: none. + PR: none. + */ + +/* { dg-do run } */ +#include "ffitest.h" +static long return_sl(long l1, long l2) +{ + return l1 - l2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long l1, l2; + + args[0] = &ffi_type_slong; + args[1] = &ffi_type_slong; + values[0] = &l1; + values[1] = &l2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_slong, args) == FFI_OK); + + l1 = 1073741823L; + l2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_sl), &res, values); + printf("res: %ld, %ld\n", (long)res, l1 - l2); + /* { dg-output "res: -1, -1" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c new file mode 100644 index 0000000..6fe5546 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_uc.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check return value unsigned char. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static unsigned char return_uc(unsigned char uc) +{ + return uc; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + unsigned char uc; + + args[0] = &ffi_type_uchar; + values[0] = &uc; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, args) == FFI_OK); + + for (uc = (unsigned char) '\x00'; + uc < (unsigned char) '\xff'; uc++) + { + ffi_call(&cif, FFI_FN(return_uc), &rint, values); + CHECK((unsigned char)rint == uc); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c new file mode 100644 index 0000000..12b266f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/return_ul.c @@ -0,0 +1,38 @@ +/* Area: ffi_call + Purpose: Check if unsigned long as return type is handled correctly. + Limitations: none. + PR: none. + Originator: 20060724 */ + +/* { dg-do run } */ +#include "ffitest.h" +static unsigned long return_ul(unsigned long ul1, unsigned long ul2) +{ + return ul1 + ul2; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg res; + unsigned long ul1, ul2; + + args[0] = &ffi_type_ulong; + args[1] = &ffi_type_ulong; + values[0] = &ul1; + values[1] = &ul2; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ulong, args) == FFI_OK); + + ul1 = 1073741823L; + ul2 = 1073741824L; + + ffi_call(&cif, FFI_FN(return_ul), &res, values); + printf("res: %lu, %lu\n", (unsigned long)res, ul1 + ul2); + /* { dg-output "res: 2147483647, 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c new file mode 100644 index 0000000..35b70ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen.c @@ -0,0 +1,44 @@ +/* Area: ffi_call + Purpose: Check strlen function call. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +static size_t ABI_ATTR my_strlen(char *s) +{ + return (strlen(s)); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + + args[0] = &ffi_type_pointer; + values[0] = (void*) &s; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 7); + + s = "1234567890123456789012345"; + ffi_call(&cif, FFI_FN(my_strlen), &rint, values); + CHECK(rint == 25); + + exit (0); +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c new file mode 100644 index 0000000..96282bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen2.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(char *s, float a) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[0] = &ffi_type_pointer; + args[1] = &ffi_type_float; + values[0] = (void*) &s; + values[1] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c new file mode 100644 index 0000000..beba86e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen3.c @@ -0,0 +1,49 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s) +{ + return (size_t) ((int) strlen(s) + (int) a); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + float v2; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 1); + + s = "1234567"; + v2 = -1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 6); + + s = "1234567890123456789012345"; + v2 = 1.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 26); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c new file mode 100644 index 0000000..d5d42b4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/strlen4.c @@ -0,0 +1,55 @@ +/* Area: ffi_call + Purpose: Check strlen function call with additional arguments. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static size_t ABI_ATTR my_f(float a, char *s, int i) +{ + return (size_t) ((int) strlen(s) + (int) a + i); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + char *s; + int v1; + float v2; + args[2] = &ffi_type_sint; + args[1] = &ffi_type_pointer; + args[0] = &ffi_type_float; + values[2] = (void*) &v1; + values[1] = (void*) &s; + values[0] = (void*) &v2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 3, + &ffi_type_sint, args) == FFI_OK); + + s = "a"; + v1 = 1; + v2 = 0.0; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 2); + + s = "1234567"; + v2 = -1.0; + v1 = -2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 4); + + s = "1234567890123456789012345"; + v2 = 1.0; + v1 = 2; + ffi_call(&cif, FFI_FN(my_f), &rint, values); + CHECK(rint == 28); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c new file mode 100644 index 0000000..c13e23f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct1.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 ABI_ATTR struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + test_structure_1 ts1_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c new file mode 100644 index 0000000..17b1377 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct10.c @@ -0,0 +1,57 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: Sergei Trofimovich + + The test originally discovered in ruby's bindings + for ffi in https://bugs.gentoo.org/634190 */ + +/* { dg-do run } */ +#include "ffitest.h" + +struct s { + int s32; + float f32; + signed char s8; +}; + +struct s make_s(void) { + struct s r; + r.s32 = 0x1234; + r.f32 = 7.0; + r.s8 = 0x78; + return r; +} + +int main() { + ffi_cif cif; + struct s r; + ffi_type rtype; + ffi_type* s_fields[] = { + &ffi_type_sint, + &ffi_type_float, + &ffi_type_schar, + NULL, + }; + + rtype.size = 0; + rtype.alignment = 0, + rtype.type = FFI_TYPE_STRUCT, + rtype.elements = s_fields, + + r.s32 = 0xbad; + r.f32 = 999.999; + r.s8 = 0x51; + + // Here we emulate the following call: + //r = make_s(); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &rtype, NULL) == FFI_OK); + ffi_call(&cif, FFI_FN(make_s), &r, NULL); + + CHECK(r.s32 == 0x1234); + CHECK(r.f32 == 7.0); + CHECK(r.s8 == 0x78); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c new file mode 100644 index 0000000..5077a5e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct2.c @@ -0,0 +1,67 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + double d1; + double d2; +} test_structure_2; + +static test_structure_2 ABI_ATTR struct2(test_structure_2 ts) +{ + ts.d1--; + ts.d2--; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + test_structure_2 ts2_arg; + ffi_type ts2_type; + ffi_type *ts2_type_elements[3]; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_2 *ts2_result = + (test_structure_2 *) malloc (sizeof(test_structure_2)); + + ts2_type.size = 0; + ts2_type.alignment = 0; + ts2_type.type = FFI_TYPE_STRUCT; + ts2_type.elements = ts2_type_elements; + ts2_type_elements[0] = &ffi_type_double; + ts2_type_elements[1] = &ffi_type_double; + ts2_type_elements[2] = NULL; + + args[0] = &ts2_type; + values[0] = &ts2_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts2_type, args) == FFI_OK); + + ts2_arg.d1 = 5.55; + ts2_arg.d2 = 6.66; + + printf ("%g\n", ts2_arg.d1); + printf ("%g\n", ts2_arg.d2); + + ffi_call(&cif, FFI_FN(struct2), ts2_result, values); + + printf ("%g\n", ts2_result->d1); + printf ("%g\n", ts2_result->d2); + + CHECK(ts2_result->d1 == 5.55 - 1); + CHECK(ts2_result->d2 == 6.66 - 1); + + free (ts2_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c new file mode 100644 index 0000000..7eba0ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct3.c @@ -0,0 +1,60 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + int si; +} test_structure_3; + +static test_structure_3 ABI_ATTR struct3(test_structure_3 ts) +{ + ts.si = -(ts.si*2); + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + int compare_value; + ffi_type ts3_type; + ffi_type *ts3_type_elements[2]; + + test_structure_3 ts3_arg; + test_structure_3 *ts3_result = + (test_structure_3 *) malloc (sizeof(test_structure_3)); + + ts3_type.size = 0; + ts3_type.alignment = 0; + ts3_type.type = FFI_TYPE_STRUCT; + ts3_type.elements = ts3_type_elements; + ts3_type_elements[0] = &ffi_type_sint; + ts3_type_elements[1] = NULL; + + args[0] = &ts3_type; + values[0] = &ts3_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, + &ts3_type, args) == FFI_OK); + + ts3_arg.si = -123; + compare_value = ts3_arg.si; + + ffi_call(&cif, FFI_FN(struct3), ts3_result, values); + + printf ("%d %d\n", ts3_result->si, -(compare_value*2)); + + CHECK(ts3_result->si == -(compare_value*2)); + + free (ts3_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c new file mode 100644 index 0000000..66a9551 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct4.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned ui1; + unsigned ui2; + unsigned ui3; +} test_structure_4; + +static test_structure_4 ABI_ATTR struct4(test_structure_4 ts) +{ + ts.ui3 = ts.ui1 * ts.ui2 * ts.ui3; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts4_type; + ffi_type *ts4_type_elements[4]; + + test_structure_4 ts4_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_4 *ts4_result = + (test_structure_4 *) malloc (sizeof(test_structure_4)); + + ts4_type.size = 0; + ts4_type.alignment = 0; + ts4_type.type = FFI_TYPE_STRUCT; + ts4_type.elements = ts4_type_elements; + ts4_type_elements[0] = &ffi_type_uint; + ts4_type_elements[1] = &ffi_type_uint; + ts4_type_elements[2] = &ffi_type_uint; + ts4_type_elements[3] = NULL; + + args[0] = &ts4_type; + values[0] = &ts4_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts4_type, args) == FFI_OK); + + ts4_arg.ui1 = 2; + ts4_arg.ui2 = 3; + ts4_arg.ui3 = 4; + + ffi_call (&cif, FFI_FN(struct4), ts4_result, values); + + CHECK(ts4_result->ui3 == 2U * 3U * 4U); + + + free (ts4_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c new file mode 100644 index 0000000..23e2a3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct5.c @@ -0,0 +1,66 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + char c1; + char c2; +} test_structure_5; + +static test_structure_5 ABI_ATTR struct5(test_structure_5 ts1, test_structure_5 ts2) +{ + ts1.c1 += ts2.c1; + ts1.c2 -= ts2.c2; + + return ts1; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts5_type; + ffi_type *ts5_type_elements[3]; + + test_structure_5 ts5_arg1, ts5_arg2; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_5 *ts5_result = + (test_structure_5 *) malloc (sizeof(test_structure_5)); + + ts5_type.size = 0; + ts5_type.alignment = 0; + ts5_type.type = FFI_TYPE_STRUCT; + ts5_type.elements = ts5_type_elements; + ts5_type_elements[0] = &ffi_type_schar; + ts5_type_elements[1] = &ffi_type_schar; + ts5_type_elements[2] = NULL; + + args[0] = &ts5_type; + args[1] = &ts5_type; + values[0] = &ts5_arg1; + values[1] = &ts5_arg2; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 2, &ts5_type, args) == FFI_OK); + + ts5_arg1.c1 = 2; + ts5_arg1.c2 = 6; + ts5_arg2.c1 = 5; + ts5_arg2.c2 = 3; + + ffi_call (&cif, FFI_FN(struct5), ts5_result, values); + + CHECK(ts5_result->c1 == 7); + CHECK(ts5_result->c2 == 3); + + + free (ts5_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c new file mode 100644 index 0000000..173c66e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct6.c @@ -0,0 +1,64 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f; + double d; +} test_structure_6; + +static test_structure_6 ABI_ATTR struct6 (test_structure_6 ts) +{ + ts.f += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts6_type; + ffi_type *ts6_type_elements[3]; + + test_structure_6 ts6_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_6 *ts6_result = + (test_structure_6 *) malloc (sizeof(test_structure_6)); + + ts6_type.size = 0; + ts6_type.alignment = 0; + ts6_type.type = FFI_TYPE_STRUCT; + ts6_type.elements = ts6_type_elements; + ts6_type_elements[0] = &ffi_type_float; + ts6_type_elements[1] = &ffi_type_double; + ts6_type_elements[2] = NULL; + + args[0] = &ts6_type; + values[0] = &ts6_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts6_type, args) == FFI_OK); + + ts6_arg.f = 5.55f; + ts6_arg.d = 6.66; + + printf ("%g\n", ts6_arg.f); + printf ("%g\n", ts6_arg.d); + + ffi_call(&cif, FFI_FN(struct6), ts6_result, values); + + CHECK(ts6_result->f == 5.55f + 1); + CHECK(ts6_result->d == 6.66 + 1); + + free (ts6_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c new file mode 100644 index 0000000..badc7e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct7.c @@ -0,0 +1,74 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + double d; +} test_structure_7; + +static test_structure_7 ABI_ATTR struct7 (test_structure_7 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.d += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts7_type; + ffi_type *ts7_type_elements[4]; + + test_structure_7 ts7_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_7 *ts7_result = + (test_structure_7 *) malloc (sizeof(test_structure_7)); + + ts7_type.size = 0; + ts7_type.alignment = 0; + ts7_type.type = FFI_TYPE_STRUCT; + ts7_type.elements = ts7_type_elements; + ts7_type_elements[0] = &ffi_type_float; + ts7_type_elements[1] = &ffi_type_float; + ts7_type_elements[2] = &ffi_type_double; + ts7_type_elements[3] = NULL; + + args[0] = &ts7_type; + values[0] = &ts7_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts7_type, args) == FFI_OK); + + ts7_arg.f1 = 5.55f; + ts7_arg.f2 = 55.5f; + ts7_arg.d = 6.66; + + printf ("%g\n", ts7_arg.f1); + printf ("%g\n", ts7_arg.f2); + printf ("%g\n", ts7_arg.d); + + ffi_call(&cif, FFI_FN(struct7), ts7_result, values); + + printf ("%g\n", ts7_result->f1); + printf ("%g\n", ts7_result->f2); + printf ("%g\n", ts7_result->d); + + CHECK(ts7_result->f1 == 5.55f + 1); + CHECK(ts7_result->f2 == 55.5f + 1); + CHECK(ts7_result->d == 6.66 + 1); + + free (ts7_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c new file mode 100644 index 0000000..ef204ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct8.c @@ -0,0 +1,81 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" +typedef struct +{ + float f1; + float f2; + float f3; + float f4; +} test_structure_8; + +static test_structure_8 ABI_ATTR struct8 (test_structure_8 ts) +{ + ts.f1 += 1; + ts.f2 += 1; + ts.f3 += 1; + ts.f4 += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts8_type; + ffi_type *ts8_type_elements[5]; + + test_structure_8 ts8_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_8 *ts8_result = + (test_structure_8 *) malloc (sizeof(test_structure_8)); + + ts8_type.size = 0; + ts8_type.alignment = 0; + ts8_type.type = FFI_TYPE_STRUCT; + ts8_type.elements = ts8_type_elements; + ts8_type_elements[0] = &ffi_type_float; + ts8_type_elements[1] = &ffi_type_float; + ts8_type_elements[2] = &ffi_type_float; + ts8_type_elements[3] = &ffi_type_float; + ts8_type_elements[4] = NULL; + + args[0] = &ts8_type; + values[0] = &ts8_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts8_type, args) == FFI_OK); + + ts8_arg.f1 = 5.55f; + ts8_arg.f2 = 55.5f; + ts8_arg.f3 = -5.55f; + ts8_arg.f4 = -55.5f; + + printf ("%g\n", ts8_arg.f1); + printf ("%g\n", ts8_arg.f2); + printf ("%g\n", ts8_arg.f3); + printf ("%g\n", ts8_arg.f4); + + ffi_call(&cif, FFI_FN(struct8), ts8_result, values); + + printf ("%g\n", ts8_result->f1); + printf ("%g\n", ts8_result->f2); + printf ("%g\n", ts8_result->f3); + printf ("%g\n", ts8_result->f4); + + CHECK(ts8_result->f1 == 5.55f + 1); + CHECK(ts8_result->f2 == 55.5f + 1); + CHECK(ts8_result->f3 == -5.55f + 1); + CHECK(ts8_result->f4 == -55.5f + 1); + + free (ts8_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c new file mode 100644 index 0000000..4a13b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/struct9.c @@ -0,0 +1,68 @@ +/* Area: ffi_call + Purpose: Check structures. + Limitations: none. + PR: none. + Originator: From the original ffitest.c */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + float f; + int i; +} test_structure_9; + +static test_structure_9 ABI_ATTR struct9 (test_structure_9 ts) +{ + ts.f += 1; + ts.i += 1; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts9_type; + ffi_type *ts9_type_elements[3]; + + test_structure_9 ts9_arg; + + /* This is a hack to get a properly aligned result buffer */ + test_structure_9 *ts9_result = + (test_structure_9 *) malloc (sizeof(test_structure_9)); + + ts9_type.size = 0; + ts9_type.alignment = 0; + ts9_type.type = FFI_TYPE_STRUCT; + ts9_type.elements = ts9_type_elements; + ts9_type_elements[0] = &ffi_type_float; + ts9_type_elements[1] = &ffi_type_sint; + ts9_type_elements[2] = NULL; + + args[0] = &ts9_type; + values[0] = &ts9_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 1, &ts9_type, args) == FFI_OK); + + ts9_arg.f = 5.55f; + ts9_arg.i = 5; + + printf ("%g\n", ts9_arg.f); + printf ("%d\n", ts9_arg.i); + + ffi_call(&cif, FFI_FN(struct9), ts9_result, values); + + printf ("%g\n", ts9_result->f); + printf ("%d\n", ts9_result->i); + + CHECK(ts9_result->f == 5.55f + 1); + CHECK(ts9_result->i == 5 + 1); + + free (ts9_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c new file mode 100644 index 0000000..f00d830 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/uninitialized.c @@ -0,0 +1,61 @@ +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct +{ + unsigned char uc; + double d; + unsigned int ui; +} test_structure_1; + +static test_structure_1 struct1(test_structure_1 ts) +{ + ts.uc++; + ts.d--; + ts.ui++; + + return ts; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_type ts1_type; + ffi_type *ts1_type_elements[4]; + + memset(&cif, 1, sizeof(cif)); + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + ts1_type_elements[0] = &ffi_type_uchar; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = &ffi_type_uint; + ts1_type_elements[3] = NULL; + + test_structure_1 ts1_arg; + /* This is a hack to get a properly aligned result buffer */ + test_structure_1 *ts1_result = + (test_structure_1 *) malloc (sizeof(test_structure_1)); + + args[0] = &ts1_type; + values[0] = &ts1_arg; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ts1_type, args) == FFI_OK); + + ts1_arg.uc = '\x01'; + ts1_arg.d = 3.14159; + ts1_arg.ui = 555; + + ffi_call(&cif, FFI_FN(struct1), ts1_result, values); + + CHECK(ts1_result->ui == 556); + CHECK(ts1_result->d == 3.14159 - 1); + + free (ts1_result); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c new file mode 100644 index 0000000..59d085c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_1.c @@ -0,0 +1,196 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* m68k-*-* alpha-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + float f; + double d; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + + uc = va_arg (ap, unsigned); + sc = va_arg (ap, signed); + + us = va_arg (ap, unsigned); + ss = va_arg (ap, signed); + + ui = va_arg (ap, unsigned int); + si = va_arg (ap, signed int); + + ul = va_arg (ap, unsigned long); + sl = va_arg (ap, signed long); + + f = va_arg (ap, double); /* C standard promotes float->double + when anonymous */ + d = va_arg (ap, double); + + printf ("%u %u %u %u %u %u %u %u %u uc=%u sc=%d %u %d %u %d %lu %ld %f %f\n", + s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b, + uc, sc, + us, ss, + ui, si, + ul, sl, + f, d); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[15]; + ffi_type* arg_types[15]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + unsigned char uc; + signed char sc; + unsigned short us; + signed short ss; + unsigned int ui; + signed int si; + unsigned long ul; + signed long sl; + double d1; + double f1; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = &ffi_type_uchar; + arg_types[5] = &ffi_type_schar; + arg_types[6] = &ffi_type_ushort; + arg_types[7] = &ffi_type_sshort; + arg_types[8] = &ffi_type_uint; + arg_types[9] = &ffi_type_sint; + arg_types[10] = &ffi_type_ulong; + arg_types[11] = &ffi_type_slong; + arg_types[12] = &ffi_type_double; + arg_types[13] = &ffi_type_double; + arg_types[14] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 14, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + uc = 9; + sc = 10; + us = 11; + ss = 12; + ui = 13; + si = 14; + ul = 15; + sl = 16; + f1 = 2.12; + d1 = 3.13; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = &uc; + args[5] = ≻ + args[6] = &us; + args[7] = &ss; + args[8] = &ui; + args[9] = &si; + args[10] = &ul; + args[11] = &sl; + args[12] = &f1; + args[13] = &d1; + args[14] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8 uc=9 sc=10 11 12 13 14 15 16 2.120000 3.130000" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c new file mode 100644 index 0000000..e645206 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct1.c @@ -0,0 +1,121 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static int +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + return n + 1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + ffi_arg res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c new file mode 100644 index 0000000..56f5b9c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct2.c @@ -0,0 +1,123 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct small_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + s1.a += s2.a; + s1.b += s2.b; + return s1; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct small_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &s_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d\n", res.a, res.b); + /* { dg-output "\nres: 12 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c new file mode 100644 index 0000000..9a27e7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.call/va_struct3.c @@ -0,0 +1,125 @@ +/* Area: ffi_call + Purpose: Test passing struct in variable argument lists. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ + +#include "ffitest.h" +#include + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static struct large_tag +test_fn (int n, ...) +{ + va_list ap; + struct small_tag s1; + struct small_tag s2; + struct large_tag l; + + va_start (ap, n); + s1 = va_arg (ap, struct small_tag); + l = va_arg (ap, struct large_tag); + s2 = va_arg (ap, struct small_tag); + printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e, + s2.a, s2.b); + va_end (ap); + l.a += s1.a; + l.b += s1.b; + l.c += s2.a; + l.d += s2.b; + return l; +} + +int +main (void) +{ + ffi_cif cif; + void* args[5]; + ffi_type* arg_types[5]; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int n; + struct large_tag res; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &l_type, arg_types) == FFI_OK); + + s1.a = 5; + s1.b = 6; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + s2.a = 7; + s2.b = 8; + + n = 41; + + args[0] = &n; + args[1] = &s1; + args[2] = &l1; + args[3] = &s2; + args[4] = NULL; + + ffi_call(&cif, FFI_FN(test_fn), &res, args); + /* { dg-output "5 6 10 11 12 13 14 7 8" } */ + printf("res: %d %d %d %d %d\n", res.a, res.b, res.c, res.d, res.e); + /* { dg-output "\nres: 15 17 19 21 14" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp new file mode 100644 index 0000000..ed4145c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure.exp @@ -0,0 +1,67 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014, 2019 Free Software Foundation, Inc. +# Copyright (C) 2019 Anthony Green + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +if { [string match $compiler_vendor "microsoft"] } { + # -wd4005 macro redefinition + # -wd4244 implicit conversion to type of smaller size + # -wd4305 truncation to smaller type + # -wd4477 printf %lu of uintptr_t + # -wd4312 implicit conversion to type of greater size + # -wd4311 pointer truncation to unsigned long + # -EHsc C++ Exception Handling (no SEH exceptions) + set additional_options "-wd4005 -wd4244 -wd4305 -wd4477 -wd4312 -wd4311 -EHsc"; +} else { + set additional_options ""; +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.c]] + +if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.cc]] + +# No C++ for or1k +if { [istarget "or1k-*-*"] } { + foreach test $tlist { + unsupported "$test" + } +} else { + if { [libffi_feature_test "#if FFI_CLOSURES"] } { + run-many-tests $tlist $additional_options + } else { + foreach test $tlist { + unsupported "$test" + } + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c new file mode 100644 index 0000000..a579ff6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn0.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + void * code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c new file mode 100644 index 0000000..9123173 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn1.c @@ -0,0 +1,81 @@ +/* Area: closure_call. + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + + +static void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c new file mode 100644 index 0000000..08ff9d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn2.c @@ -0,0 +1,81 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn2(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(double *)args[0] +(int)(*(double *)args[1]) + + (int)(*(double *)args[2]) + (int)*(double *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(double *)args[0], (int)(*(double *)args[1]), + (int)(*(double *)args[2]), (int)*(double *)args[3], + (int)(*(signed short *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(int *)args[7]), + (int)(*(double*)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); +} + +typedef int (*closure_test_type2)(double, double, double, double, signed short, + double, double, int, double, int, int, float, + int, float, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = &ffi_type_double; + cl_arg_types[2] = &ffi_type_double; + cl_arg_types[3] = &ffi_type_double; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn2, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type2)code)) + (1, 2, 3, 4, 127, 5, 6, 8, 9, 10, 11, 12.0, 13, + 19.0, 21, 1); + /* { dg-output "1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c new file mode 100644 index 0000000..9b54d80 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn3.c @@ -0,0 +1,82 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void closure_test_fn3(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(float *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(float *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(float *)args[13]) + + (int)(*(float *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(float *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(float *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(float *)args[13]), + (int)(*(float *)args[14]), *(int *)args[15], (int)(intptr_t)userdata, + (int)*(ffi_arg *)resp); + + } + +typedef int (*closure_test_type3)(float, float, float, float, float, float, + float, float, double, int, float, float, int, + float, float, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_float; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_float; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_float; + cl_arg_types[14] = &ffi_type_float; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn3, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type3)code)) + (1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9, 10, 11.11, 12.0, 13, + 19.19, 21.21, 1); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 19 21 1 3: 135" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 135" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c new file mode 100644 index 0000000..d4a1530 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn4.c @@ -0,0 +1,89 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(unsigned long long *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(unsigned long long *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11LL, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c new file mode 100644 index 0000000..9907442 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn5.c @@ -0,0 +1,92 @@ +/* Area: closure_call + Purpose: Check multiple long long values passing. + Exceed the limit of gpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20031026 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn5(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)*(unsigned long long *)args[1] + + (int)*(unsigned long long *)args[2] + (int)*(unsigned long long *)args[3] + + (int)*(unsigned long long *)args[4] + (int)*(unsigned long long *)args[5] + + (int)*(unsigned long long *)args[6] + (int)*(unsigned long long *)args[7] + + (int)*(unsigned long long *)args[8] + (int)*(unsigned long long *)args[9] + + (int)*(int *)args[10] + + (int)*(unsigned long long *)args[11] + + (int)*(unsigned long long *)args[12] + + (int)*(unsigned long long *)args[13] + + (int)*(unsigned long long *)args[14] + + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)*(unsigned long long *)args[1], + (int)*(unsigned long long *)args[2], + (int)*(unsigned long long *)args[3], + (int)*(unsigned long long *)args[4], + (int)*(unsigned long long *)args[5], + (int)*(unsigned long long *)args[6], + (int)*(unsigned long long *)args[7], + (int)*(unsigned long long *)args[8], + (int)*(unsigned long long *)args[9], + (int)*(int *)args[10], + (int)*(unsigned long long *)args[11], + (int)*(unsigned long long *)args[12], + (int)*(unsigned long long *)args[13], + (int)*(unsigned long long *)args[14], + *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, unsigned long long, + int, unsigned long long, + unsigned long long, unsigned long long, + unsigned long long, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int i, res; + + for (i = 0; i < 10; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[10] = &ffi_type_sint; + for (i = 11; i < 15; i++) { + cl_arg_types[i] = &ffi_type_uint64; + } + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn5, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1LL, 2LL, 3LL, 4LL, 127LL, 429LL, 7LL, 8LL, 9LL, 10LL, 11, 12LL, + 13LL, 19LL, 21LL, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c new file mode 100644 index 0000000..73c54fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_fn6.c @@ -0,0 +1,90 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC. + Limitations: none. + PR: PR23404 + Originator: 20050830 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + + (int)(*(unsigned long long *)args[1]) + + (int)(*(unsigned long long *)args[2]) + + (int)*(unsigned long long *)args[3] + + (int)(*(int *)args[4]) + (int)(*(double *)args[5]) + + (int)*(double *)args[6] + (int)(*(float *)args[7]) + + (int)(*(double *)args[8]) + (int)*(double *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(double *)args[14]) + (int)*(double *)args[15] + + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], + (int)(*(unsigned long long *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(unsigned long long *)args[3], + (int)(*(int *)args[4]), (int)(*(double *)args[5]), + (int)*(double *)args[6], (int)(*(float *)args[7]), + (int)(*(double *)args[8]), (int)*(double *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(double *)args[14]), (int)(*(double *)args[15]), + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_test_type0)(unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + int, double, double, float, double, double, + int, float, int, int, double, double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_uint64; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_uint64; + cl_arg_types[4] = &ffi_type_sint; + cl_arg_types[5] = &ffi_type_double; + cl_arg_types[6] = &ffi_type_double; + cl_arg_types[7] = &ffi_type_float; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_double; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_double; + cl_arg_types[15] = &ffi_type_double; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn0, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*((closure_test_type0)code)) + (1, 2, 3, 4, 127, 429., 7., 8., 9.5, 10., 11, 12., 13, + 19, 21., 1.); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c new file mode 100644 index 0000000..b3afa0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_loc_fn0.c @@ -0,0 +1,95 @@ +/* Area: closure_call + Purpose: Check multiple values passing from different type. + Also, exceed the limit of gpr and fpr registers on PowerPC + Darwin. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_loc_test_fn0(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata) +{ + *(ffi_arg*)resp = + (int)*(unsigned long long *)args[0] + (int)(*(int *)args[1]) + + (int)(*(unsigned long long *)args[2]) + (int)*(int *)args[3] + + (int)(*(signed short *)args[4]) + + (int)(*(unsigned long long *)args[5]) + + (int)*(int *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double *)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(unsigned long long *)args[0], (int)(*(int *)args[1]), + (int)(*(unsigned long long *)args[2]), + (int)*(int *)args[3], (int)(*(signed short *)args[4]), + (int)(*(unsigned long long *)args[5]), + (int)*(int *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]),*(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg *)resp); + +} + +typedef int (*closure_loc_test_type0)(unsigned long long, int, unsigned long long, + int, signed short, unsigned long long, int, + int, double, int, int, float, int, int, + int, int); + +int main (void) +{ + ffi_cif cif; + ffi_closure *pcl; + ffi_type * cl_arg_types[17]; + int res; + void *codeloc; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = &ffi_type_sint; + cl_arg_types[2] = &ffi_type_uint64; + cl_arg_types[3] = &ffi_type_sint; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_uint64; + cl_arg_types[6] = &ffi_type_sint; + cl_arg_types[7] = &ffi_type_sint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_sint; + cl_arg_types[10] = &ffi_type_sint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_sint; + cl_arg_types[13] = &ffi_type_sint; + cl_arg_types[14] = &ffi_type_sint; + cl_arg_types[15] = &ffi_type_sint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + pcl = ffi_closure_alloc(sizeof(ffi_closure), &codeloc); + CHECK(pcl != NULL); + CHECK(codeloc != NULL); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_loc_test_fn0, + (void *) 3 /* userdata */, codeloc) == FFI_OK); + + CHECK(memcmp(pcl, codeloc, sizeof(*pcl)) == 0); + + res = (*((closure_loc_test_type0)codeloc)) + (1LL, 2, 3LL, 4, 127, 429LL, 7, 8, 9.5, 10, 11, 12, 13, + 19, 21, 1); + /* { dg-output "1 2 3 4 127 429 7 8 9 10 11 12 13 19 21 1 3: 680" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 680" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c new file mode 100644 index 0000000..5a4e728 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/closure_simple.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check simple closure handling with all ABIs + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void +closure_test(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata) +{ + *(ffi_arg*)resp = + (int)*(int *)args[0] + (int)(*(int *)args[1]) + + (int)(*(int *)args[2]) + (int)(*(int *)args[3]) + + (int)(intptr_t)userdata; + + printf("%d %d %d %d: %d\n", + (int)*(int *)args[0], (int)(*(int *)args[1]), + (int)(*(int *)args[2]), (int)(*(int *)args[3]), + (int)*(ffi_arg *)resp); + +} + +typedef int (ABI_ATTR *closure_test_type0)(int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = &ffi_type_uint; + cl_arg_types[3] = &ffi_type_uint; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, ABI_NUM, 4, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test, + (void *) 3 /* userdata */, code) == FFI_OK); + + res = (*(closure_test_type0)code)(0, 1, 2, 3); + /* { dg-output "0 1 2 3: 9" } */ + + printf("res: %d\n",res); + /* { dg-output "\nres: 9" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c new file mode 100644 index 0000000..ea0825d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_12byte.c @@ -0,0 +1,94 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_12byte { + int a; + int b; + int c; +} cls_struct_12byte; + +cls_struct_12byte cls_struct_12byte_fn(struct cls_struct_12byte b1, + struct cls_struct_12byte b2) +{ + struct cls_struct_12byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_12byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args , void* userdata __UNUSED__) +{ + struct cls_struct_12byte b1, b2; + + b1 = *(struct cls_struct_12byte*)(args[0]); + b2 = *(struct cls_struct_12byte*)(args[1]); + + *(cls_struct_12byte*)resp = cls_struct_12byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_12byte h_dbl = { 7, 4, 9 }; + struct cls_struct_12byte j_dbl = { 1, 5, 3 }; + struct cls_struct_12byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_12byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_12byte_gn, NULL, code) == FFI_OK); + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + res_dbl = ((cls_struct_12byte(*)(cls_struct_12byte, cls_struct_12byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 4 9 1 5 3: 8 9 12" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 9 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c new file mode 100644 index 0000000..89a08a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_16byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte { + int a; + double b; + int c; +} cls_struct_16byte; + +cls_struct_16byte cls_struct_16byte_fn(struct cls_struct_16byte b1, + struct cls_struct_16byte b2) +{ + struct cls_struct_16byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + result.c = b1.c + b2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", b1.a, b1.b, b1.c, b2.a, b2.b, b2.c, + result.a, result.b, result.c); + + return result; +} + +static void cls_struct_16byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_16byte b1, b2; + + b1 = *(struct cls_struct_16byte*)(args[0]); + b2 = *(struct cls_struct_16byte*)(args[1]); + + *(cls_struct_16byte*)resp = cls_struct_16byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte h_dbl = { 7, 8.0, 9 }; + struct cls_struct_16byte j_dbl = { 1, 9.0, 3 }; + struct cls_struct_16byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_16byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0.0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_16byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_16byte(*)(cls_struct_16byte, cls_struct_16byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 9 1 9 3: 8 17 12" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 8 17 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c new file mode 100644 index 0000000..9f75da8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_18byte.c @@ -0,0 +1,96 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_18byte { + double a; + unsigned char b; + unsigned char c; + double d; +} cls_struct_18byte; + +cls_struct_18byte cls_struct_18byte_fn(struct cls_struct_18byte a1, + struct cls_struct_18byte a2) +{ + struct cls_struct_18byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + + printf("%g %d %d %g %g %d %d %g: %g %d %d %g\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + return result; +} + +static void +cls_struct_18byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_18byte a1, a2; + + a1 = *(struct cls_struct_18byte*)(args[0]); + a2 = *(struct cls_struct_18byte*)(args[1]); + + *(cls_struct_18byte*)resp = cls_struct_18byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 }; + struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 }; + struct cls_struct_18byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_18byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_18byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_18byte(*)(cls_struct_18byte, cls_struct_18byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 4 125 124 5: 5 252 250 8" } */ + printf("res: %g %d %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 5 252 250 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c new file mode 100644 index 0000000..278794b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_19byte.c @@ -0,0 +1,102 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Double alignment check on darwin. + Limitations: none. + PR: none. + Originator: 20030915 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_19byte { + double a; + unsigned char b; + unsigned char c; + double d; + unsigned char e; +} cls_struct_19byte; + +cls_struct_19byte cls_struct_19byte_fn(struct cls_struct_19byte a1, + struct cls_struct_19byte a2) +{ + struct cls_struct_19byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + + printf("%g %d %d %g %d %g %d %d %g %d: %g %d %d %g %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + return result; +} + +static void +cls_struct_19byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_19byte a1, a2; + + a1 = *(struct cls_struct_19byte*)(args[0]); + a2 = *(struct cls_struct_19byte*)(args[1]); + + *(cls_struct_19byte*)resp = cls_struct_19byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 }; + struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 }; + struct cls_struct_19byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_19byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_19byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_19byte(*)(cls_struct_19byte, cls_struct_19byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 127 126 3 120 4 125 124 5 119: 5 252 250 8 239" } */ + printf("res: %g %d %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 5 252 250 8 239" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c new file mode 100644 index 0000000..82492c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_1_1byte.c @@ -0,0 +1,89 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_1_1byte { + unsigned char a; +} cls_struct_1_1byte; + +cls_struct_1_1byte cls_struct_1_1byte_fn(struct cls_struct_1_1byte a1, + struct cls_struct_1_1byte a2) +{ + struct cls_struct_1_1byte result; + + result.a = a1.a + a2.a; + + printf("%d %d: %d\n", a1.a, a2.a, result.a); + + return result; +} + +static void +cls_struct_1_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_1_1byte a1, a2; + + a1 = *(struct cls_struct_1_1byte*)(args[0]); + a2 = *(struct cls_struct_1_1byte*)(args[1]); + + *(cls_struct_1_1byte*)resp = cls_struct_1_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[2]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_1_1byte g_dbl = { 12 }; + struct cls_struct_1_1byte f_dbl = { 178 }; + struct cls_struct_1_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_1_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_1_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_1_1byte(*)(cls_struct_1_1byte, cls_struct_1_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 178: 190" } */ + printf("res: %d\n", res_dbl.a); + /* { dg-output "\nres: 190" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c new file mode 100644 index 0000000..3f8bb28 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + double a; + double b; + int c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%g %g %d %g %g %d: %g %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 }; + struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %g %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c new file mode 100644 index 0000000..6562727 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_20byte1.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_20byte { + int a; + double b; + double c; +} cls_struct_20byte; + +cls_struct_20byte cls_struct_20byte_fn(struct cls_struct_20byte a1, + struct cls_struct_20byte a2) +{ + struct cls_struct_20byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %g %d %g %g: %d %g %g\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, + result.a, result.b, result.c); + return result; +} + +static void +cls_struct_20byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_20byte a1, a2; + + a1 = *(struct cls_struct_20byte*)(args[0]); + a2 = *(struct cls_struct_20byte*)(args[1]); + + *(cls_struct_20byte*)resp = cls_struct_20byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 }; + struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 }; + struct cls_struct_20byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_20byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_20byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_20byte(*)(cls_struct_20byte, cls_struct_20byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 7: 5 7 10" } */ + printf("res: %d %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 5 7 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c new file mode 100644 index 0000000..1d82f6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_24byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_24byte { + double a; + double b; + int c; + float d; +} cls_struct_24byte; + +cls_struct_24byte cls_struct_24byte_fn(struct cls_struct_24byte b0, + struct cls_struct_24byte b1, + struct cls_struct_24byte b2, + struct cls_struct_24byte b3) +{ + struct cls_struct_24byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + + printf("%g %g %d %g %g %g %d %g %g %g %d %g %g %g %d %g: %g %g %d %g\n", + b0.a, b0.b, b0.c, b0.d, + b1.a, b1.b, b1.c, b1.d, + b2.a, b2.b, b2.c, b2.d, + b3.a, b3.b, b3.c, b2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_24byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_24byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_24byte*)(args[0]); + b1 = *(struct cls_struct_24byte*)(args[1]); + b2 = *(struct cls_struct_24byte*)(args[2]); + b3 = *(struct cls_struct_24byte*)(args[3]); + + *(cls_struct_24byte*)resp = cls_struct_24byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_24byte e_dbl = { 9.0, 2.0, 6, 5.0 }; + struct cls_struct_24byte f_dbl = { 1.0, 2.0, 3, 7.0 }; + struct cls_struct_24byte g_dbl = { 4.0, 5.0, 7, 9.0 }; + struct cls_struct_24byte h_dbl = { 8.0, 6.0, 1, 4.0 }; + struct cls_struct_24byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = &ffi_type_float; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_24byte_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_24byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_24byte(*)(cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte, + cls_struct_24byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 5 1 2 3 7 4 5 7 9 8 6 1 9: 22 15 17 25" } */ + printf("res: %g %g %d %g\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 22 15 17 25" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c new file mode 100644 index 0000000..81bb0a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_2byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_2byte { + unsigned char a; + unsigned char b; +} cls_struct_2byte; + +cls_struct_2byte cls_struct_2byte_fn(struct cls_struct_2byte a1, + struct cls_struct_2byte a2) +{ + struct cls_struct_2byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_2byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_2byte a1, a2; + + a1 = *(struct cls_struct_2byte*)(args[0]); + a2 = *(struct cls_struct_2byte*)(args[1]); + + *(cls_struct_2byte*)resp = cls_struct_2byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_2byte g_dbl = { 12, 127 }; + struct cls_struct_2byte f_dbl = { 1, 13 }; + struct cls_struct_2byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_2byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_2byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_2byte(*)(cls_struct_2byte, cls_struct_2byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 127 1 13: 13 140" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c new file mode 100644 index 0000000..b782746 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3_1byte.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3_1byte { + unsigned char a; + unsigned char b; + unsigned char c; +} cls_struct_3_1byte; + +cls_struct_3_1byte cls_struct_3_1byte_fn(struct cls_struct_3_1byte a1, + struct cls_struct_3_1byte a2) +{ + struct cls_struct_3_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_3_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3_1byte a1, a2; + + a1 = *(struct cls_struct_3_1byte*)(args[0]); + a2 = *(struct cls_struct_3_1byte*)(args[1]); + + *(cls_struct_3_1byte*)resp = cls_struct_3_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3_1byte g_dbl = { 12, 13, 14 }; + struct cls_struct_3_1byte f_dbl = { 178, 179, 180 }; + struct cls_struct_3_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3_1byte(*)(cls_struct_3_1byte, cls_struct_3_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 178 179 180: 190 192 194" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 190 192 194" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c new file mode 100644 index 0000000..a02c463 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte { + unsigned short a; + unsigned char b; +} cls_struct_3byte; + +cls_struct_3byte cls_struct_3byte_fn(struct cls_struct_3byte a1, + struct cls_struct_3byte a2) +{ + struct cls_struct_3byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte a1, a2; + + a1 = *(struct cls_struct_3byte*)(args[0]); + a2 = *(struct cls_struct_3byte*)(args[1]); + + *(cls_struct_3byte*)resp = cls_struct_3byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte g_dbl = { 12, 119 }; + struct cls_struct_3byte f_dbl = { 1, 15 }; + struct cls_struct_3byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte(*)(cls_struct_3byte, cls_struct_3byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 119 1 15: 13 134" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 13 134" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c new file mode 100644 index 0000000..c7251ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3byte2.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_3byte_1 { + unsigned char a; + unsigned short b; +} cls_struct_3byte_1; + +cls_struct_3byte_1 cls_struct_3byte_fn1(struct cls_struct_3byte_1 a1, + struct cls_struct_3byte_1 a2) +{ + struct cls_struct_3byte_1 result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_3byte_gn1(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_3byte_1 a1, a2; + + a1 = *(struct cls_struct_3byte_1*)(args[0]); + a2 = *(struct cls_struct_3byte_1*)(args[1]); + + *(cls_struct_3byte_1*)resp = cls_struct_3byte_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_3byte_1 g_dbl = { 15, 125 }; + struct cls_struct_3byte_1 f_dbl = { 9, 19 }; + struct cls_struct_3byte_1 res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3byte_fn1), &res_dbl, args_dbl); + /* { dg-output "15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3byte_gn1, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_3byte_1(*)(cls_struct_3byte_1, cls_struct_3byte_1))(code))(g_dbl, f_dbl); + /* { dg-output "\n15 125 9 19: 24 144" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 24 144" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c new file mode 100644 index 0000000..48888f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_3float.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations:>none. + PR: none. + Originator: 20171026 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_3float { + float f; + float g; + float h; +} cls_struct_3float; + +cls_struct_3float cls_struct_3float_fn(struct cls_struct_3float a1, + struct cls_struct_3float a2) +{ + struct cls_struct_3float result; + + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + result.h = a1.h + a2.h; + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.f, a1.g, a1.h, + a2.f, a2.g, a2.h, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_3float_gn(ffi_cif *cif __UNUSED__, void* resp, void **args, + void* userdata __UNUSED__) +{ + struct cls_struct_3float a1, a2; + + a1 = *(struct cls_struct_3float*)(args[0]); + a2 = *(struct cls_struct_3float*)(args[1]); + + *(cls_struct_3float*)resp = cls_struct_3float_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void *args_dbl[3]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_3float g_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float f_dbl = { 1.0f, 2.0f, 3.0f }; + struct cls_struct_3float res_dbl; + + cls_struct_fields[0] = &ffi_type_float; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_float; + cls_struct_fields[3] = NULL; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_3float_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_3float_gn, NULL, code) == + FFI_OK); + + res_dbl = ((cls_struct_3float(*)(cls_struct_3float, + cls_struct_3float))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c new file mode 100644 index 0000000..2d6d8b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4_1byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Especially with small structures which may fit in one + register. Depending on the ABI. + Limitations: none. + PR: none. + Originator: 20030902 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_4_1byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; +} cls_struct_4_1byte; + +cls_struct_4_1byte cls_struct_4_1byte_fn(struct cls_struct_4_1byte a1, + struct cls_struct_4_1byte a2) +{ + struct cls_struct_4_1byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_4_1byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4_1byte a1, a2; + + a1 = *(struct cls_struct_4_1byte*)(args[0]); + a2 = *(struct cls_struct_4_1byte*)(args[1]); + + *(cls_struct_4_1byte*)resp = cls_struct_4_1byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 }; + struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 }; + struct cls_struct_4_1byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4_1byte_fn), &res_dbl, args_dbl); + /* { dg-output "12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4_1byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4_1byte(*)(cls_struct_4_1byte, cls_struct_4_1byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 13 14 15 178 179 180 181: 190 192 194 196" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 190 192 194 196" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c new file mode 100644 index 0000000..4ac3787 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_4byte.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_4byte { + unsigned short a; + unsigned short b; +} cls_struct_4byte; + +cls_struct_4byte cls_struct_4byte_fn(struct cls_struct_4byte a1, + struct cls_struct_4byte a2) +{ + struct cls_struct_4byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %d %d %d: %d %d\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_4byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_4byte a1, a2; + + a1 = *(struct cls_struct_4byte*)(args[0]); + a2 = *(struct cls_struct_4byte*)(args[1]); + + *(cls_struct_4byte*)resp = cls_struct_4byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_4byte g_dbl = { 127, 120 }; + struct cls_struct_4byte f_dbl = { 12, 128 }; + struct cls_struct_4byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_4byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_4byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_4byte(*)(cls_struct_4byte, cls_struct_4byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 12 128: 139 248" } */ + printf("res: %d %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 139 248" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c new file mode 100644 index 0000000..ad9d51c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5_1_byte.c @@ -0,0 +1,109 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + + printf("%d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, + a2.a, a2.b, a2.c, a2.d, a2.e, + result.a, result.b, result.c, result.d, result.e); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[6]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 12 128 9 3 4: 139 248 10 6 8" } */ + printf("res: %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e); + /* { dg-output "\nres: 139 248 10 6 8" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c new file mode 100644 index 0000000..4e0c000 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_5byte.c @@ -0,0 +1,98 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_5byte { + unsigned short a; + unsigned short b; + unsigned char c; +} cls_struct_5byte; + +cls_struct_5byte cls_struct_5byte_fn(struct cls_struct_5byte a1, + struct cls_struct_5byte a2) +{ + struct cls_struct_5byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, + result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_5byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_5byte a1, a2; + + a1 = *(struct cls_struct_5byte*)(args[0]); + a2 = *(struct cls_struct_5byte*)(args[1]); + + *(cls_struct_5byte*)resp = cls_struct_5byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_5byte g_dbl = { 127, 120, 1 }; + struct cls_struct_5byte f_dbl = { 12, 128, 9 }; + struct cls_struct_5byte res_dbl = { 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_5byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_5byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_5byte(*)(cls_struct_5byte, cls_struct_5byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 12 128 9: 139 248 10" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 139 248 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c new file mode 100644 index 0000000..a55edc2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_64byte.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_64byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; +} cls_struct_64byte; + +cls_struct_64byte cls_struct_64byte_fn(struct cls_struct_64byte b0, + struct cls_struct_64byte b1, + struct cls_struct_64byte b2, + struct cls_struct_64byte b3) +{ + struct cls_struct_64byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + + printf("%g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h); + + return result; +} + +static void +cls_struct_64byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_64byte b0, b1, b2, b3; + + b0 = *(struct cls_struct_64byte*)(args[0]); + b1 = *(struct cls_struct_64byte*)(args[1]); + b2 = *(struct cls_struct_64byte*)(args[2]); + b3 = *(struct cls_struct_64byte*)(args[3]); + + *(cls_struct_64byte*)resp = cls_struct_64byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[9]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_64byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0 }; + struct cls_struct_64byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0 }; + struct cls_struct_64byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0 }; + struct cls_struct_64byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0 }; + struct cls_struct_64byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_64byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_64byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_64byte(*)(cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte, + cls_struct_64byte)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18" } */ + printf("res: %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c new file mode 100644 index 0000000..b4dcdba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6_1_byte.c @@ -0,0 +1,113 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, + result.a, result.b, result.c, result.d, result.e, result.f); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[7]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 }; + struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 12 128 9 3 4 5: 139 248 10 6 8 10" } */ + printf("res: %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f); + /* { dg-output "\nres: 139 248 10 6 8 10" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c new file mode 100644 index 0000000..7406780 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_6byte.c @@ -0,0 +1,99 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_6byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned char d; +} cls_struct_6byte; + +cls_struct_6byte cls_struct_6byte_fn(struct cls_struct_6byte a1, + struct cls_struct_6byte a2) +{ + struct cls_struct_6byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_6byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_6byte a1, a2; + + a1 = *(struct cls_struct_6byte*)(args[0]); + a2 = *(struct cls_struct_6byte*)(args[1]); + + *(cls_struct_6byte*)resp = cls_struct_6byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 }; + struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 }; + struct cls_struct_6byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_6byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_6byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_6byte(*)(cls_struct_6byte, cls_struct_6byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 128 12 128 9 127: 139 248 10 255" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 255" } */ + + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c new file mode 100644 index 0000000..14a7e96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7_1_byte.c @@ -0,0 +1,117 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20050708 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned char a; + unsigned char b; + unsigned char c; + unsigned char d; + unsigned char e; + unsigned char f; + unsigned char g; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + result.e = a1.e + a2.e; + result.f = a1.f + a2.f; + result.g = a1.g + a2.g; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d %d %d %d %d %d %d\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + result.a, result.b, result.c, result.d, result.e, result.f, result.g); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 }; + struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 }; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_uchar; + cls_struct_fields[4] = &ffi_type_uchar; + cls_struct_fields[5] = &ffi_type_uchar; + cls_struct_fields[6] = &ffi_type_uchar; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + res_dbl.a = 0; + res_dbl.b = 0; + res_dbl.c = 0; + res_dbl.d = 0; + res_dbl.e = 0; + res_dbl.f = 0; + res_dbl.g = 0; + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 3 4 5 6 12 128 9 3 4 5 6: 139 248 10 6 8 10 12" } */ + printf("res: %d %d %d %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 139 248 10 6 8 10 12" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c new file mode 100644 index 0000000..1645cc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_7byte.c @@ -0,0 +1,97 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_7byte { + unsigned short a; + unsigned short b; + unsigned char c; + unsigned short d; +} cls_struct_7byte; + +cls_struct_7byte cls_struct_7byte_fn(struct cls_struct_7byte a1, + struct cls_struct_7byte a2) +{ + struct cls_struct_7byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + result.d = a1.d + a2.d; + + printf("%d %d %d %d %d %d %d %d: %d %d %d %d\n", a1.a, a1.b, a1.c, a1.d, + a2.a, a2.b, a2.c, a2.d, + result.a, result.b, result.c, result.d); + + return result; +} + +static void +cls_struct_7byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_7byte a1, a2; + + a1 = *(struct cls_struct_7byte*)(args[0]); + a2 = *(struct cls_struct_7byte*)(args[1]); + + *(cls_struct_7byte*)resp = cls_struct_7byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 }; + struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 }; + struct cls_struct_7byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_ushort; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = &ffi_type_ushort; + cls_struct_fields[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_7byte_fn), &res_dbl, args_dbl); + /* { dg-output "127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_7byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_7byte(*)(cls_struct_7byte, cls_struct_7byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n127 120 1 254 12 128 9 255: 139 248 10 509" } */ + printf("res: %d %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c, res_dbl.d); + /* { dg-output "\nres: 139 248 10 509" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c new file mode 100644 index 0000000..f6c1ea5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_8byte.c @@ -0,0 +1,88 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Check overlapping. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_8byte { + int a; + float b; +} cls_struct_8byte; + +cls_struct_8byte cls_struct_8byte_fn(struct cls_struct_8byte a1, + struct cls_struct_8byte a2) +{ + struct cls_struct_8byte result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + + printf("%d %g %d %g: %d %g\n", a1.a, a1.b, a2.a, a2.b, result.a, result.b); + + return result; +} + +static void +cls_struct_8byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_8byte a1, a2; + + a1 = *(struct cls_struct_8byte*)(args[0]); + a2 = *(struct cls_struct_8byte*)(args[1]); + + *(cls_struct_8byte*)resp = cls_struct_8byte_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_8byte g_dbl = { 1, 2.0 }; + struct cls_struct_8byte f_dbl = { 4, 5.0 }; + struct cls_struct_8byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_8byte_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_8byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_8byte(*)(cls_struct_8byte, cls_struct_8byte))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 4 5: 5 7" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 5 7" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c new file mode 100644 index 0000000..0b85722 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does not here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + int a; + double b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%d %g %d %g: %d %g\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7, 8.0}; + struct cls_struct_9byte j_dbl = { 1, 9.0}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_sint; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %d %g\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c new file mode 100644 index 0000000..edf991d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_9byte2.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Depending on the ABI. Darwin/AIX do double-word + alignment of the struct if the first element is a double. + Check that it does here. + Limitations: none. + PR: none. + Originator: 20030914 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_9byte { + double a; + int b; +} cls_struct_9byte; + +cls_struct_9byte cls_struct_9byte_fn(struct cls_struct_9byte b1, + struct cls_struct_9byte b2) +{ + struct cls_struct_9byte result; + + result.a = b1.a + b2.a; + result.b = b1.b + b2.b; + + printf("%g %d %g %d: %g %d\n", b1.a, b1.b, b2.a, b2.b, + result.a, result.b); + + return result; +} + +static void cls_struct_9byte_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_9byte b1, b2; + + b1 = *(struct cls_struct_9byte*)(args[0]); + b2 = *(struct cls_struct_9byte*)(args[1]); + + *(cls_struct_9byte*)resp = cls_struct_9byte_fn(b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_9byte h_dbl = { 7.0, 8}; + struct cls_struct_9byte j_dbl = { 1.0, 9}; + struct cls_struct_9byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &h_dbl; + args_dbl[1] = &j_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_9byte_fn), &res_dbl, args_dbl); + /* { dg-output "7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_9byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_9byte(*)(cls_struct_9byte, cls_struct_9byte))(code))(h_dbl, j_dbl); + /* { dg-output "\n7 8 1 9: 8 17" } */ + printf("res: %g %d\n", res_dbl.a, res_dbl.b); + /* { dg-output "\nres: 8 17" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c new file mode 100644 index 0000000..aad5f3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_double.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of double. + Limitations: none. + PR: none. + Originator: 20031203 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c new file mode 100644 index 0000000..37e0855 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_float.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of float. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + float b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c new file mode 100644 index 0000000..b3322d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + long double b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %g %d %d %g %d: %d %g %d\n", a1.a, (double)a1.b, a1.c, a2.a, (double)a2.b, a2.c, result.a, (double)result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %g %d\n", res_dbl.a, (double)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c new file mode 100644 index 0000000..cc1c43b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split.c @@ -0,0 +1,132 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + long double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +cls_struct_align cls_struct_align_fn2( + cls_struct_align a1) +{ + struct cls_struct_align r; + + r.a = a1.a + 1; + r.b = a1.b + 1; + r.c = a1.c + 1; + r.d = a1.d + 1; + r.e = a1.e + 1; + r.f = a1.f + 1; + r.g = a1.g + 1; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg: " + "%Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_longdouble; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %Lg %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c new file mode 100644 index 0000000..5d3bec0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_longdouble_split2.c @@ -0,0 +1,115 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of long double. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +typedef struct cls_struct_align { + long double a; + long double b; + long double c; + long double d; + long double e; + double f; + long double g; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + cls_struct_align a1, + cls_struct_align a2) +{ + struct cls_struct_align r; + + r.a = a1.a + a2.a; + r.b = a1.b + a2.b; + r.c = a1.c + a2.c; + r.d = a1.d + a2.d; + r.e = a1.e + a2.e; + r.f = a1.f + a2.f; + r.g = a1.g + a2.g; + + printf("%Lg %Lg %Lg %Lg %Lg %g %Lg %Lg %Lg %Lg %Lg %Lg %g %Lg: " + "%Lg %Lg %Lg %Lg %Lg %g %Lg\n", + a1.a, a1.b, a1.c, a1.d, a1.e, a1.f, a1.g, + a2.a, a2.b, a2.c, a2.d, a2.e, a2.f, a2.g, + r.a, r.b, r.c, r.d, r.e, r.f, r.g); + + return r; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[8]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[3]; + + struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 }; + struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_longdouble; + cls_struct_fields[2] = &ffi_type_longdouble; + cls_struct_fields[3] = &ffi_type_longdouble; + cls_struct_fields[4] = &ffi_type_longdouble; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_longdouble; + cls_struct_fields[7] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 12 13 14: 9 11 13 15 17 19 21" } */ + printf("res: %Lg %Lg %Lg %Lg %Lg %g %Lg\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g); + /* { dg-output "\nres: 9 11 13 15 17 19 21" } */ + + exit(0); +} + + + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c new file mode 100644 index 0000000..8fbf36a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_pointer.c @@ -0,0 +1,95 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of pointer. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + void *b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = (void *)((uintptr_t)a1.b + (uintptr_t)a2.b); + result.c = a1.c + a2.c; + + printf("%d %" PRIuPTR " %d %d %" PRIuPTR " %d: %d %" PRIuPTR " %d\n", + a1.a, (uintptr_t)a1.b, a1.c, + a2.a, (uintptr_t)a2.b, a2.c, + result.a, (uintptr_t)result.b, + result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, (void *)4951, 127 }; + struct cls_struct_align f_dbl = { 1, (void *)9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_pointer; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIuPTR " %d\n", res_dbl.a, (uintptr_t)res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c new file mode 100644 index 0000000..039b874 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sshort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c new file mode 100644 index 0000000..c96c6d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c new file mode 100644 index 0000000..9aa7bdd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_sint64.c @@ -0,0 +1,92 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of sint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + signed long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_sint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c new file mode 100644 index 0000000..97620b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint16.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint16. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned short b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_ushort; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c new file mode 100644 index 0000000..5766fad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint32.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint32. + Limitations: none. + PR: none. + Originator: 20031203 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned int b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c new file mode 100644 index 0000000..a52cb89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_align_uint64.c @@ -0,0 +1,93 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of uint64. + Limitations: none. + PR: none. + Originator: 20031203 */ + + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct cls_struct_align { + unsigned char a; + unsigned long long b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, + struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %" PRIdLL " %d %d %" PRIdLL " %d: %d %" PRIdLL " %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_align g_dbl = { 12, 4951, 127 }; + struct cls_struct_align f_dbl = { 1, 9320, 13 }; + struct cls_struct_align res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &g_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); + /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_dbl, f_dbl); + /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ + printf("res: %d %" PRIdLL " %d\n", res_dbl.a, res_dbl.b, res_dbl.c); + /* { dg-output "\nres: 13 14271 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c new file mode 100644 index 0000000..e451dea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_dbls_struct.c @@ -0,0 +1,66 @@ +/* Area: ffi_call, closure_call + Purpose: Check double arguments in structs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/23/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef struct Dbls { + double x; + double y; +} Dbls; + +void +closure_test_fn(Dbls p) +{ + printf("%.1f %.1f\n", p.x, p.y); +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Dbls*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Dbls arg = { 1.0, 2.0 }; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &ffi_type_double; + ts1_type_elements[1] = &ffi_type_double; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + ((void*(*)(Dbls))(code))(arg); + /* { dg-output "1.0 2.0" } */ + + closure_test_fn(arg); + /* { dg-output "\n1.0 2.0" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c new file mode 100644 index 0000000..84ad4cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value double. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(double *)resp = *(double *)args[0]; + + printf("%f: %f\n",*(double *)args[0], + *(double *)resp); + } +typedef double (*cls_ret_double)(double); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + double res; + + cl_arg_types[0] = &ffi_type_double; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_double)code))(21474.789); + /* { dg-output "21474.789000: 21474.789000" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: 21474.789000" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c new file mode 100644 index 0000000..e077f92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_double_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_double_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + double doubleValue = *(double*)args[1]; + + *(ffi_arg*)resp = printf(format, doubleValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1f\n"; + double doubleArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_double; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &doubleArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_double_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, doubleArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c new file mode 100644 index 0000000..0090fed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_float.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_float_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(float *)resp = *(float *)args[0]; + + printf("%g: %g\n",*(float *)args[0], + *(float *)resp); + } + +typedef float (*cls_ret_float)(float); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + float res; + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_float, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_float_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_float)code)(-2122.12))); + /* { dg-output "\\-2122.12: \\-2122.12" } */ + printf("res: %.6f\n", res); + /* { dg-output "\nres: \-2122.120117" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c new file mode 100644 index 0000000..d24e72e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble.c @@ -0,0 +1,105 @@ +/* Area: ffi_call, closure_call + Purpose: Check long double arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin */ + +/* This test is known to PASS on armv7l-unknown-linux-gnueabihf, so I have + remove the xfail for arm*-*-* below, until we know more. */ +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ + +#include "ffitest.h" + +long double cls_ldouble_fn( + long double a1, + long double a2, + long double a3, + long double a4, + long double a5, + long double a6, + long double a7, + long double a8) +{ + long double r = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; + + printf("%Lg %Lg %Lg %Lg %Lg %Lg %Lg %Lg: %Lg\n", + a1, a2, a3, a4, a5, a6, a7, a8, r); + + return r; +} + +static void +cls_ldouble_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + long double a1 = *(long double*)args[0]; + long double a2 = *(long double*)args[1]; + long double a3 = *(long double*)args[2]; + long double a4 = *(long double*)args[3]; + long double a5 = *(long double*)args[4]; + long double a6 = *(long double*)args[5]; + long double a7 = *(long double*)args[6]; + long double a8 = *(long double*)args[7]; + + *(long double*)resp = cls_ldouble_fn( + a1, a2, a3, a4, a5, a6, a7, a8); +} + +int main(void) +{ + ffi_cif cif; + void* code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[9]; + ffi_type* arg_types[9]; + long double res = 0; + + long double arg1 = 1; + long double arg2 = 2; + long double arg3 = 3; + long double arg4 = 4; + long double arg5 = 5; + long double arg6 = 6; + long double arg7 = 7; + long double arg8 = 8; + + arg_types[0] = &ffi_type_longdouble; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = &ffi_type_longdouble; + arg_types[3] = &ffi_type_longdouble; + arg_types[4] = &ffi_type_longdouble; + arg_types[5] = &ffi_type_longdouble; + arg_types[6] = &ffi_type_longdouble; + arg_types[7] = &ffi_type_longdouble; + arg_types[8] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 8, &ffi_type_longdouble, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = &arg3; + args[3] = &arg4; + args[4] = &arg5; + args[5] = &arg6; + args[6] = &arg7; + args[7] = &arg8; + args[8] = NULL; + + ffi_call(&cif, FFI_FN(cls_ldouble_fn), &res, args); + /* { dg-output "1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ldouble_gn, NULL, code) == FFI_OK); + + res = ((long double(*)(long double, long double, long double, long double, + long double, long double, long double, long double))(code))(arg1, arg2, + arg3, arg4, arg5, arg6, arg7, arg8); + /* { dg-output "\n1 2 3 4 5 6 7 8: 36" } */ + printf("res: %Lg\n", res); + /* { dg-output "\nres: 36" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c new file mode 100644 index 0000000..39b438b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_longdouble_va.c @@ -0,0 +1,61 @@ +/* Area: ffi_call, closure_call + Purpose: Test long doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-output "" { xfail avr32*-*-* x86_64-*-mingw* } } */ +/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */ + +#include "ffitest.h" + +static void +cls_longdouble_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + long double ldValue = *(long double*)args[1]; + + *(ffi_arg*)resp = printf(format, ldValue); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + ffi_type* arg_types[3]; + + char* format = "%.1Lf\n"; + long double ldArg = 7; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_longdouble; + arg_types[2] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &ldArg; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(printf), &res, args); + /* { dg-output "7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_longdouble_va_fn, NULL, + code) == FFI_OK); + + res = ((int(*)(char*, ...))(code))(format, ldArg); + /* { dg-output "\n7.0" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 4" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c new file mode 100644 index 0000000..7fd6c82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_args.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check closures called with many args of mixed types + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_ret_double_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + int i; + double r = 0; + double t; + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + { + t = *(long int *)args[i]; + CHECK(t == i+1); + } + else + { + t = *(double *)args[i]; + CHECK(fabs(t - ((i+1) * 0.1)) < FLT_EPSILON); + } + r += t; + } + *(double *)resp = r; +} +typedef double (*cls_ret_double)(double, double, double, double, long int, +double, double, double, double, long int, double, long int, double, long int, +double, long int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[NARGS]; + double res; + int i; + double expected = 64.9; + + for(i = 0; i < NARGS; i++) + { + if(i == 4 || i == 9 || i == 11 || i == 13 || i == 15) + cl_arg_types[i] = &ffi_type_slong; + else + cl_arg_types[i] = &ffi_type_double; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NARGS, + &ffi_type_double, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_double_fn, NULL, code) == FFI_OK); + + res = (((cls_ret_double)code))(0.1, 0.2, 0.3, 0.4, 5, 0.6, 0.7, 0.8, 0.9, 10, + 1.1, 12, 1.3, 14, 1.5, 16); + if (fabs(res - expected) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c new file mode 100644 index 0000000..62b0697 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_many_mixed_float_double.c @@ -0,0 +1,55 @@ +/* Area: closure_call + Purpose: Check register allocation for closure calls with many float and double arguments + Limitations: none. + PR: none. + Originator: */ + +/* { dg-do run } */ +#include "ffitest.h" +#include +#include + +#define NARGS 16 + +static void cls_mixed_float_double_fn(ffi_cif* cif , void* ret, void** args, + void* userdata __UNUSED__) +{ + double r = 0; + unsigned int i; + double t; + for(i=0; i < cif->nargs; i++) + { + if(cif->arg_types[i] == &ffi_type_double) { + t = *(((double**)(args))[i]); + } else { + t = *(((float**)(args))[i]); + } + r += t; + } + *((double*)ret) = r; +} +typedef double (*cls_mixed)(double, float, double, double, double, double, double, float, float, double, float, float); + +int main (void) +{ + ffi_cif cif; + ffi_closure *closure; + void* code; + ffi_type *argtypes[12] = {&ffi_type_double, &ffi_type_float, &ffi_type_double, + &ffi_type_double, &ffi_type_double, &ffi_type_double, + &ffi_type_double, &ffi_type_float, &ffi_type_float, + &ffi_type_double, &ffi_type_float, &ffi_type_float}; + + + closure = ffi_closure_alloc(sizeof(ffi_closure), (void**)&code); + if(closure ==NULL) + abort(); + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 12, &ffi_type_double, argtypes) == FFI_OK); + CHECK(ffi_prep_closure_loc(closure, &cif, cls_mixed_float_double_fn, NULL, code) == FFI_OK); + double ret = ((cls_mixed)code)(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2); + ffi_closure_free(closure); + if(fabs(ret - 7.8) < FLT_EPSILON) + exit(0); + else + abort(); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c new file mode 100644 index 0000000..71df7b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_schar.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed char test_func_fn(signed char a1, signed char a2) +{ + signed char result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a2; + + a1 = *(signed char *)avals[0]; + a2 = *(signed char *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed char (*test_type)(signed char, signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + signed char a, b, res_closure; + + a = 2; + b = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_schar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 125: 127" } */ + printf("res: %d\n", (signed char)res_call); + /* { dg-output "\nres: 127" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 125); + /* { dg-output "\n2 125: 127" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c new file mode 100644 index 0000000..4c39153 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed short a1, signed short a2) +{ + signed short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed short a1, a2; + + a1 = *(signed short *)avals[0]; + a2 = *(signed short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef signed short (*test_type)(signed short, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c new file mode 100644 index 0000000..1c3aeb5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_sshortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple signed short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +signed short test_func_fn(signed char a1, signed short a2, + signed char a3, signed short a4) +{ + signed short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + signed char a1, a3; + signed short a2, a4; + + a1 = *(signed char *)avals[0]; + a2 = *(signed short *)avals[1]; + a3 = *(signed char *)avals[2]; + a4 = *(signed short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef signed short (*test_type)(signed char, signed short, + signed char, signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + signed char a, c; + signed short b, d, res_closure; + + a = 1; + b = 32765; + c = 127; + d = -128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = &ffi_type_sshort; + cl_arg_types[2] = &ffi_type_schar; + cl_arg_types[3] = &ffi_type_sshort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 32765 127 -128: 32765" } */ + printf("res: %d\n", (signed short)res_call); + /* { dg-output "\nres: 32765" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 32765, 127, -128); + /* { dg-output "\n1 32765 127 -128: 32765" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32765" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c new file mode 100644 index 0000000..009c02c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_uchar.c @@ -0,0 +1,91 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned char test_func_fn(unsigned char a1, unsigned char a2, + unsigned char a3, unsigned char a4) +{ + unsigned char result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a2, a3, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned char *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned char *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned char (*test_type)(unsigned char, unsigned char, + unsigned char, unsigned char); + +void test_func(ffi_cif *cif __UNUSED__, void *rval __UNUSED__, void **avals, + void *data __UNUSED__) +{ + printf("%d %d %d %d\n", *(unsigned char *)avals[0], + *(unsigned char *)avals[1], *(unsigned char *)avals[2], + *(unsigned char *)avals[3]); +} +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, b, c, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 125; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_uchar; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 125: 255" } */ + printf("res: %d\n", (unsigned char)res_call); + /* { dg-output "\nres: 255" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 125); + /* { dg-output "\n1 2 127 125: 255" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c new file mode 100644 index 0000000..dd10ca7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushort.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned short a1, unsigned short a2) +{ + unsigned short result; + + result = a1 + a2; + + printf("%d %d: %d\n", a1, a2, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned short a1, a2; + + a1 = *(unsigned short *)avals[0]; + a2 = *(unsigned short *)avals[1]; + + *(ffi_arg *)rval = test_func_fn(a1, a2); + +} + +typedef unsigned short (*test_type)(unsigned short, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[3]; + ffi_type * cl_arg_types[3]; + ffi_arg res_call; + unsigned short a, b, res_closure; + + a = 2; + b = 32765; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = NULL; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "2 32765: 32767" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 32767" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(2, 32765); + /* { dg-output "\n2 32765: 32767" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 32767" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c new file mode 100644 index 0000000..2588e97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_multi_ushortchar.c @@ -0,0 +1,86 @@ +/* Area: ffi_call, closure_call + Purpose: Check passing of multiple unsigned short/char values. + Limitations: none. + PR: PR13221. + Originator: 20031129 */ + +/* { dg-do run } */ +#include "ffitest.h" + +unsigned short test_func_fn(unsigned char a1, unsigned short a2, + unsigned char a3, unsigned short a4) +{ + unsigned short result; + + result = a1 + a2 + a3 + a4; + + printf("%d %d %d %d: %d\n", a1, a2, a3, a4, result); + + return result; + +} + +static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, + void *data __UNUSED__) +{ + unsigned char a1, a3; + unsigned short a2, a4; + + a1 = *(unsigned char *)avals[0]; + a2 = *(unsigned short *)avals[1]; + a3 = *(unsigned char *)avals[2]; + a4 = *(unsigned short *)avals[3]; + + *(ffi_arg *)rval = test_func_fn(a1, a2, a3, a4); + +} + +typedef unsigned short (*test_type)(unsigned char, unsigned short, + unsigned char, unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void * args_dbl[5]; + ffi_type * cl_arg_types[5]; + ffi_arg res_call; + unsigned char a, c; + unsigned short b, d, res_closure; + + a = 1; + b = 2; + c = 127; + d = 128; + + args_dbl[0] = &a; + args_dbl[1] = &b; + args_dbl[2] = &c; + args_dbl[3] = &d; + args_dbl[4] = NULL; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = &ffi_type_uchar; + cl_arg_types[3] = &ffi_type_ushort; + cl_arg_types[4] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); + /* { dg-output "1 2 127 128: 258" } */ + printf("res: %d\n", (unsigned short)res_call); + /* { dg-output "\nres: 258" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); + + res_closure = (*((test_type)code))(1, 2, 127, 128); + /* { dg-output "\n1 2 127 128: 258" } */ + printf("res: %d\n", res_closure); + /* { dg-output "\nres: 258" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c new file mode 100644 index 0000000..d82a87a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer.c @@ -0,0 +1,74 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +void* cls_pointer_fn(void* a1, void* a2) +{ + void* result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + *(void**)resp = cls_pointer_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x12345678; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_pointer_fn), &res, args); + /* { dg-output "0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + /* { dg-output "\n0x12345678 0x89abcdef: 0x9be02467" } */ + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\nres: 0x9be02467" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c new file mode 100644 index 0000000..1f1d915 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_pointer_stack.c @@ -0,0 +1,142 @@ +/* Area: ffi_call, closure_call + Purpose: Check pointer arguments across multiple hideous stack frames. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/7/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +static long dummyVar; + +long dummy_func( + long double a1, char b1, + long double a2, char b2, + long double a3, char b3, + long double a4, char b4) +{ + return a1 + b1 + a2 + b2 + a3 + b3 + a4 + b4; +} + +void* cls_pointer_fn2(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(uintptr_t) a1, + (unsigned int)(uintptr_t) a2, + (unsigned int)(uintptr_t) result); + + return result; +} + +void* cls_pointer_fn1(void* a1, void* a2) +{ + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + void* result; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + result = (void*)((intptr_t)a1 + (intptr_t)a2); + + printf("0x%08x 0x%08x: 0x%08x\n", + (unsigned int)(intptr_t) a1, + (unsigned int)(intptr_t) a2, + (unsigned int)(intptr_t) result); + + result = cls_pointer_fn2(result, a1); + + return result; +} + +static void +cls_pointer_gn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + void* a1 = *(void**)(args[0]); + void* a2 = *(void**)(args[1]); + + long double trample1 = (intptr_t)a1 + (intptr_t)a2; + char trample2 = ((char*)&a1)[0] + ((char*)&a2)[0]; + long double trample3 = (intptr_t)trample1 + (intptr_t)a1; + char trample4 = trample2 + ((char*)&a1)[1]; + long double trample5 = (intptr_t)trample3 + (intptr_t)a2; + char trample6 = trample4 + ((char*)&a2)[1]; + long double trample7 = (intptr_t)trample5 + (intptr_t)trample1; + char trample8 = trample6 + trample2; + + dummyVar = dummy_func(trample1, trample2, trample3, trample4, + trample5, trample6, trample7, trample8); + + *(void**)resp = cls_pointer_fn1(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[3]; + /* ffi_type cls_pointer_type; */ + ffi_type* arg_types[3]; + +/* cls_pointer_type.size = sizeof(void*); + cls_pointer_type.alignment = 0; + cls_pointer_type.type = FFI_TYPE_POINTER; + cls_pointer_type.elements = NULL;*/ + + void* arg1 = (void*)0x01234567; + void* arg2 = (void*)0x89abcdef; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &ffi_type_pointer; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_pointer, + arg_types) == FFI_OK); + + args[0] = &arg1; + args[1] = &arg2; + args[2] = NULL; + + printf("\n"); + ffi_call(&cif, FFI_FN(cls_pointer_fn1), &res, args); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK); + + res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2); + + printf("res: 0x%08x\n", (unsigned int) res); + /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */ + /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */ + /* { dg-output "\nres: 0x8bf258bd" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c new file mode 100644 index 0000000..82986b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_schar.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Check return value schar. + Limitations: none. + PR: none. + Originator: 20031108 */ + + + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_schar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed char *)args[0]; + printf("%d: %d\n",*(signed char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed char (*cls_ret_schar)(signed char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed char res; + + cl_arg_types[0] = &ffi_type_schar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_schar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_schar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_schar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c new file mode 100644 index 0000000..c7e13b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sint.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sint32. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed int *)args[0]; + printf("%d: %d\n",*(signed int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed int (*cls_ret_sint)(signed int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed int res; + + cl_arg_types[0] = &ffi_type_sint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sint)code))(65534); + /* { dg-output "65534: 65534" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65534" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c new file mode 100644 index 0000000..846d57e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_sshort.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value sshort. + Limitations: none. + PR: none. + Originator: 20031108 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_sshort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(signed short *)args[0]; + printf("%d: %d\n",*(signed short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef signed short (*cls_ret_sshort)(signed short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + signed short res; + + cl_arg_types[0] = &ffi_type_sshort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_sshort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_sshort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_sshort)code))(255); + /* { dg-output "255: 255" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 255" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c new file mode 100644 index 0000000..6d1fdae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_struct_va1.c @@ -0,0 +1,114 @@ +/* Area: ffi_call, closure_call + Purpose: Test doubles passed in variable argument lists. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ +/* { dg-output "" { xfail avr32*-*-* } } */ +#include "ffitest.h" + +struct small_tag +{ + unsigned char a; + unsigned char b; +}; + +struct large_tag +{ + unsigned a; + unsigned b; + unsigned c; + unsigned d; + unsigned e; +}; + +static void +test_fn (ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + int n = *(int*)args[0]; + struct small_tag s1 = * (struct small_tag *) args[1]; + struct large_tag l1 = * (struct large_tag *) args[2]; + struct small_tag s2 = * (struct small_tag *) args[3]; + + printf ("%d %d %d %d %d %d %d %d %d %d\n", n, s1.a, s1.b, + l1.a, l1.b, l1.c, l1.d, l1.e, + s2.a, s2.b); + * (ffi_arg*) resp = 42; +} + +int +main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc (sizeof (ffi_closure), &code); + ffi_type* arg_types[5]; + + ffi_arg res = 0; + + ffi_type s_type; + ffi_type *s_type_elements[3]; + + ffi_type l_type; + ffi_type *l_type_elements[6]; + + struct small_tag s1; + struct small_tag s2; + struct large_tag l1; + + int si; + + s_type.size = 0; + s_type.alignment = 0; + s_type.type = FFI_TYPE_STRUCT; + s_type.elements = s_type_elements; + + s_type_elements[0] = &ffi_type_uchar; + s_type_elements[1] = &ffi_type_uchar; + s_type_elements[2] = NULL; + + l_type.size = 0; + l_type.alignment = 0; + l_type.type = FFI_TYPE_STRUCT; + l_type.elements = l_type_elements; + + l_type_elements[0] = &ffi_type_uint; + l_type_elements[1] = &ffi_type_uint; + l_type_elements[2] = &ffi_type_uint; + l_type_elements[3] = &ffi_type_uint; + l_type_elements[4] = &ffi_type_uint; + l_type_elements[5] = NULL; + + arg_types[0] = &ffi_type_sint; + arg_types[1] = &s_type; + arg_types[2] = &l_type; + arg_types[3] = &s_type; + arg_types[4] = NULL; + + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, + arg_types) == FFI_OK); + + si = 4; + s1.a = 5; + s1.b = 6; + + s2.a = 20; + s2.b = 21; + + l1.a = 10; + l1.b = 11; + l1.c = 12; + l1.d = 13; + l1.e = 14; + + CHECK(ffi_prep_closure_loc(pcl, &cif, test_fn, NULL, code) == FFI_OK); + + res = ((int (*)(int, ...))(code))(si, s1, l1, s2); + /* { dg-output "4 5 6 10 11 12 13 14 20 21" } */ + printf("res: %d\n", (int) res); + /* { dg-output "\nres: 42" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c new file mode 100644 index 0000000..c1317e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar.c @@ -0,0 +1,42 @@ +/* Area: closure_call + Purpose: Check return value uchar. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uchar_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned char *)args[0]; + printf("%d: %d\n",*(unsigned char *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned char (*cls_ret_uchar)(unsigned char); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned char res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uchar_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uchar)code))(127); + /* { dg-output "127: 127" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 127" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c new file mode 100644 index 0000000..6491c5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uchar_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned char argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned char T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uchar; + cl_arg_types[1] = &ffi_type_uchar; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uchar, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c new file mode 100644 index 0000000..885cff5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value uint. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_uint_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg *)resp = *(unsigned int *)args[0]; + + printf("%d: %d\n",*(unsigned int *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned int (*cls_ret_uint)(unsigned int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned int res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_uint_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_uint)code))(2147483647); + /* { dg-output "2147483647: 2147483647" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 2147483647" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c new file mode 100644 index 0000000..b04cfd1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_uint_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned int argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned int T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)*(ffi_arg *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_uint; + cl_arg_types[1] = &ffi_type_uint; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_uint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c new file mode 100644 index 0000000..0315082 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulong_va.c @@ -0,0 +1,45 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned long argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ + +#include "ffitest.h" + +typedef unsigned long T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(T *)resp = *(T *)args[0]; + + printf("%ld: %ld %ld\n", *(T *)resp, *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ulong; + cl_arg_types[1] = &ffi_type_ulong; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ulong, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %ld\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c new file mode 100644 index 0000000..62f2cae --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ulonglong.c @@ -0,0 +1,47 @@ +/* Area: closure_call + Purpose: Check return value long long. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +static void cls_ret_ulonglong_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + *(unsigned long long *)resp= 0xfffffffffffffffLL ^ *(unsigned long long *)args[0]; + + printf("%" PRIuLL ": %" PRIuLL "\n",*(unsigned long long *)args[0], + *(unsigned long long *)(resp)); +} +typedef unsigned long long (*cls_ret_ulonglong)(unsigned long long); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned long long res; + + cl_arg_types[0] = &ffi_type_uint64; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_uint64, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ulonglong_fn, NULL, code) == FFI_OK); + res = (*((cls_ret_ulonglong)code))(214LL); + /* { dg-output "214: 1152921504606846761" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 1152921504606846761" } */ + + res = (*((cls_ret_ulonglong)code))(9223372035854775808LL); + /* { dg-output "\n9223372035854775808: 8070450533247928831" } */ + printf("res: %" PRIdLL "\n", res); + /* { dg-output "\nres: 8070450533247928831" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c new file mode 100644 index 0000000..a00100e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort.c @@ -0,0 +1,43 @@ +/* Area: closure_call + Purpose: Check return value ushort. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +static void cls_ret_ushort_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + *(ffi_arg*)resp = *(unsigned short *)args[0]; + + printf("%d: %d\n",*(unsigned short *)args[0], + (int)*(ffi_arg *)(resp)); +} +typedef unsigned short (*cls_ret_ushort)(unsigned short); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + unsigned short res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ushort_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_ushort)code))(65535); + /* { dg-output "65535: 65535" } */ + printf("res: %d\n",res); + /* { dg-output "\nres: 65535" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c new file mode 100644 index 0000000..37aa106 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/cls_ushort_va.c @@ -0,0 +1,44 @@ +/* Area: closure_call + Purpose: Test anonymous unsigned short argument. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef unsigned short T; + +static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + *(ffi_arg *)resp = *(T *)args[0]; + + printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]); + } + +typedef T (*cls_ret_T)(T, ...); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[3]; + T res; + + cl_arg_types[0] = &ffi_type_ushort; + cl_arg_types[1] = &ffi_type_ushort; + cl_arg_types[2] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, + &ffi_type_ushort, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK); + res = ((((cls_ret_T)code)(67, 4))); + /* { dg-output "67: 67 4" } */ + printf("res: %d\n", res); + /* { dg-output "\nres: 67" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c new file mode 100644 index 0000000..f5a7317 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/err_bad_abi.c @@ -0,0 +1,36 @@ +/* Area: ffi_prep_cif, ffi_prep_closure + Purpose: Test error return for bad ABIs. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/6/2007 */ + +/* { dg-do run } */ + +#include "ffitest.h" + +static void +dummy_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* arg_types[1]; + + arg_types[0] = NULL; + + CHECK(ffi_prep_cif(&cif, 255, 0, &ffi_type_void, + arg_types) == FFI_BAD_ABI); + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &ffi_type_void, + arg_types) == FFI_OK); + + cif.abi= 255; + + CHECK(ffi_prep_closure_loc(pcl, &cif, dummy_fn, NULL, code) == FFI_BAD_ABI); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h new file mode 100644 index 0000000..cfce1ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/ffitest.h @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include "fficonfig.h" + +#if defined HAVE_STDINT_H +#include +#endif + +#if defined HAVE_INTTYPES_H +#include +#endif + +#define MAX_ARGS 256 + +#define CHECK(x) (void)(!(x) ? (abort(), 1) : 0) + +/* Define macros so that compilers other than gcc can run the tests. */ +#undef __UNUSED__ +#if defined(__GNUC__) +#define __UNUSED__ __attribute__((__unused__)) +#define __STDCALL__ __attribute__((stdcall)) +#define __THISCALL__ __attribute__((thiscall)) +#define __FASTCALL__ __attribute__((fastcall)) +#define __MSABI__ __attribute__((ms_abi)) +#else +#define __UNUSED__ +#define __STDCALL__ __stdcall +#define __THISCALL__ __thiscall +#define __FASTCALL__ __fastcall +#endif + +#ifndef ABI_NUM +#define ABI_NUM FFI_DEFAULT_ABI +#define ABI_ATTR +#endif + +/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a + file open. */ +#ifdef HAVE_MMAP_ANON +# undef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) +# define MAP_ANONYMOUS MAP_ANON +# endif +# define USING_MMAP + +#endif + +#ifdef HAVE_MMAP_DEV_ZERO + +# include +# ifndef MAP_FAILED +# define MAP_FAILED -1 +# endif +# define USING_MMAP + +#endif + +/* MinGW kludge. */ +#if defined(_WIN64) | defined(_WIN32) +#define PRIdLL "I64d" +#define PRIuLL "I64u" +#else +#define PRIdLL "lld" +#define PRIuLL "llu" +#endif + +/* Tru64 UNIX kludge. */ +#if defined(__alpha__) && defined(__osf__) +/* Tru64 UNIX V4.0 doesn't support %lld/%lld, but long is 64-bit. */ +#undef PRIdLL +#define PRIdLL "ld" +#undef PRIuLL +#define PRIuLL "lu" +#define PRId8 "hd" +#define PRIu8 "hu" +#define PRId64 "ld" +#define PRIu64 "lu" +#define PRIuPTR "lu" +#endif + +/* PA HP-UX kludge. */ +#if defined(__hppa__) && defined(__hpux__) && !defined(PRIuPTR) +#define PRIuPTR "lu" +#endif + +/* IRIX kludge. */ +#if defined(__sgi) +/* IRIX 6.5 provides all definitions, but only for C99 + compilations. */ +#define PRId8 "hhd" +#define PRIu8 "hhu" +#if (_MIPS_SZLONG == 32) +#define PRId64 "lld" +#define PRIu64 "llu" +#endif +/* This doesn't match , which always has "lld" here, but the + arguments are uint64_t, int64_t, which are unsigned long, long for + 64-bit in . */ +#if (_MIPS_SZLONG == 64) +#define PRId64 "ld" +#define PRIu64 "lu" +#endif +/* This doesn't match , which has "u" here, but the arguments + are uintptr_t, which is always unsigned long. */ +#define PRIuPTR "lu" +#endif + +/* Solaris < 10 kludge. */ +#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR) +#if defined(__arch64__) || defined (__x86_64__) +#define PRIuPTR "lu" +#else +#define PRIuPTR "u" +#endif +#endif + +/* MSVC kludge. */ +#if defined _MSC_VER +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) +#define PRIuPTR "lu" +#define PRIu8 "u" +#define PRId8 "d" +#define PRIu64 "I64u" +#define PRId64 "I64d" +#endif +#endif + +#ifndef PRIuPTR +#define PRIuPTR "u" +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c new file mode 100644 index 0000000..e8e1d86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/huge_struct.c @@ -0,0 +1,343 @@ +/* Area: ffi_call, closure_call + Purpose: Check large structure returns. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/18/2007 +*/ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */ +/* { dg-options -Wformat=0 { target moxie*-*-elf or1k-*-* } } */ + +#include + +#include "ffitest.h" + +typedef struct BigStruct{ + uint8_t a; + int8_t b; + uint16_t c; + int16_t d; + uint32_t e; + int32_t f; + uint64_t g; + int64_t h; + float i; + double j; + long double k; + char* l; + uint8_t m; + int8_t n; + uint16_t o; + int16_t p; + uint32_t q; + int32_t r; + uint64_t s; + int64_t t; + float u; + double v; + long double w; + char* x; + uint8_t y; + int8_t z; + uint16_t aa; + int16_t bb; + uint32_t cc; + int32_t dd; + uint64_t ee; + int64_t ff; + float gg; + double hh; + long double ii; + char* jj; + uint8_t kk; + int8_t ll; + uint16_t mm; + int16_t nn; + uint32_t oo; + int32_t pp; + uint64_t qq; + int64_t rr; + float ss; + double tt; + long double uu; + char* vv; + uint8_t ww; + int8_t xx; +} BigStruct; + +BigStruct +test_large_fn( + uint8_t ui8_1, + int8_t si8_1, + uint16_t ui16_1, + int16_t si16_1, + uint32_t ui32_1, + int32_t si32_1, + uint64_t ui64_1, + int64_t si64_1, + float f_1, + double d_1, + long double ld_1, + char* p_1, + uint8_t ui8_2, + int8_t si8_2, + uint16_t ui16_2, + int16_t si16_2, + uint32_t ui32_2, + int32_t si32_2, + uint64_t ui64_2, + int64_t si64_2, + float f_2, + double d_2, + long double ld_2, + char* p_2, + uint8_t ui8_3, + int8_t si8_3, + uint16_t ui16_3, + int16_t si16_3, + uint32_t ui32_3, + int32_t si32_3, + uint64_t ui64_3, + int64_t si64_3, + float f_3, + double d_3, + long double ld_3, + char* p_3, + uint8_t ui8_4, + int8_t si8_4, + uint16_t ui16_4, + int16_t si16_4, + uint32_t ui32_4, + int32_t si32_4, + uint64_t ui64_4, + int64_t si64_4, + float f_4, + double d_4, + long double ld_4, + char* p_4, + uint8_t ui8_5, + int8_t si8_5) +{ + BigStruct retVal = { + ui8_1 + 1, si8_1 + 1, ui16_1 + 1, si16_1 + 1, ui32_1 + 1, si32_1 + 1, + ui64_1 + 1, si64_1 + 1, f_1 + 1, d_1 + 1, ld_1 + 1, (char*)((intptr_t)p_1 + 1), + ui8_2 + 2, si8_2 + 2, ui16_2 + 2, si16_2 + 2, ui32_2 + 2, si32_2 + 2, + ui64_2 + 2, si64_2 + 2, f_2 + 2, d_2 + 2, ld_2 + 2, (char*)((intptr_t)p_2 + 2), + ui8_3 + 3, si8_3 + 3, ui16_3 + 3, si16_3 + 3, ui32_3 + 3, si32_3 + 3, + ui64_3 + 3, si64_3 + 3, f_3 + 3, d_3 + 3, ld_3 + 3, (char*)((intptr_t)p_3 + 3), + ui8_4 + 4, si8_4 + 4, ui16_4 + 4, si16_4 + 4, ui32_4 + 4, si32_4 + 4, + ui64_4 + 4, si64_4 + 4, f_4 + 4, d_4 + 4, ld_4 + 4, (char*)((intptr_t)p_4 + 4), + ui8_5 + 5, si8_5 + 5}; + + printf("%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 ": " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, (unsigned long)p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, (unsigned long)p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, (unsigned long)p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, (unsigned long)p_4, ui8_5, si8_5, + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + + return retVal; +} + +static void +cls_large_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + uint8_t ui8_1 = *(uint8_t*)args[0]; + int8_t si8_1 = *(int8_t*)args[1]; + uint16_t ui16_1 = *(uint16_t*)args[2]; + int16_t si16_1 = *(int16_t*)args[3]; + uint32_t ui32_1 = *(uint32_t*)args[4]; + int32_t si32_1 = *(int32_t*)args[5]; + uint64_t ui64_1 = *(uint64_t*)args[6]; + int64_t si64_1 = *(int64_t*)args[7]; + float f_1 = *(float*)args[8]; + double d_1 = *(double*)args[9]; + long double ld_1 = *(long double*)args[10]; + char* p_1 = *(char**)args[11]; + uint8_t ui8_2 = *(uint8_t*)args[12]; + int8_t si8_2 = *(int8_t*)args[13]; + uint16_t ui16_2 = *(uint16_t*)args[14]; + int16_t si16_2 = *(int16_t*)args[15]; + uint32_t ui32_2 = *(uint32_t*)args[16]; + int32_t si32_2 = *(int32_t*)args[17]; + uint64_t ui64_2 = *(uint64_t*)args[18]; + int64_t si64_2 = *(int64_t*)args[19]; + float f_2 = *(float*)args[20]; + double d_2 = *(double*)args[21]; + long double ld_2 = *(long double*)args[22]; + char* p_2 = *(char**)args[23]; + uint8_t ui8_3 = *(uint8_t*)args[24]; + int8_t si8_3 = *(int8_t*)args[25]; + uint16_t ui16_3 = *(uint16_t*)args[26]; + int16_t si16_3 = *(int16_t*)args[27]; + uint32_t ui32_3 = *(uint32_t*)args[28]; + int32_t si32_3 = *(int32_t*)args[29]; + uint64_t ui64_3 = *(uint64_t*)args[30]; + int64_t si64_3 = *(int64_t*)args[31]; + float f_3 = *(float*)args[32]; + double d_3 = *(double*)args[33]; + long double ld_3 = *(long double*)args[34]; + char* p_3 = *(char**)args[35]; + uint8_t ui8_4 = *(uint8_t*)args[36]; + int8_t si8_4 = *(int8_t*)args[37]; + uint16_t ui16_4 = *(uint16_t*)args[38]; + int16_t si16_4 = *(int16_t*)args[39]; + uint32_t ui32_4 = *(uint32_t*)args[40]; + int32_t si32_4 = *(int32_t*)args[41]; + uint64_t ui64_4 = *(uint64_t*)args[42]; + int64_t si64_4 = *(int64_t*)args[43]; + float f_4 = *(float*)args[44]; + double d_4 = *(double*)args[45]; + long double ld_4 = *(long double*)args[46]; + char* p_4 = *(char**)args[47]; + uint8_t ui8_5 = *(uint8_t*)args[48]; + int8_t si8_5 = *(int8_t*)args[49]; + + *(BigStruct*)resp = test_large_fn( + ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, p_1, + ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, p_2, + ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, p_3, + ui8_4, si8_4, ui16_4, si16_4, ui32_4, si32_4, ui64_4, si64_4, f_4, d_4, ld_4, p_4, + ui8_5, si8_5); +} + +int +main(int argc __UNUSED__, const char** argv __UNUSED__) +{ + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + + ffi_cif cif; + ffi_type* argTypes[51]; + void* argValues[51]; + + ffi_type ret_struct_type; + ffi_type* st_fields[51]; + BigStruct retVal; + + uint8_t ui8 = 1; + int8_t si8 = 2; + uint16_t ui16 = 3; + int16_t si16 = 4; + uint32_t ui32 = 5; + int32_t si32 = 6; + uint64_t ui64 = 7; + int64_t si64 = 8; + float f = 9; + double d = 10; + long double ld = 11; + char* p = (char*)0x12345678; + + memset (&retVal, 0, sizeof(retVal)); + + ret_struct_type.size = 0; + ret_struct_type.alignment = 0; + ret_struct_type.type = FFI_TYPE_STRUCT; + ret_struct_type.elements = st_fields; + + st_fields[0] = st_fields[12] = st_fields[24] = st_fields[36] = st_fields[48] = &ffi_type_uint8; + st_fields[1] = st_fields[13] = st_fields[25] = st_fields[37] = st_fields[49] = &ffi_type_sint8; + st_fields[2] = st_fields[14] = st_fields[26] = st_fields[38] = &ffi_type_uint16; + st_fields[3] = st_fields[15] = st_fields[27] = st_fields[39] = &ffi_type_sint16; + st_fields[4] = st_fields[16] = st_fields[28] = st_fields[40] = &ffi_type_uint32; + st_fields[5] = st_fields[17] = st_fields[29] = st_fields[41] = &ffi_type_sint32; + st_fields[6] = st_fields[18] = st_fields[30] = st_fields[42] = &ffi_type_uint64; + st_fields[7] = st_fields[19] = st_fields[31] = st_fields[43] = &ffi_type_sint64; + st_fields[8] = st_fields[20] = st_fields[32] = st_fields[44] = &ffi_type_float; + st_fields[9] = st_fields[21] = st_fields[33] = st_fields[45] = &ffi_type_double; + st_fields[10] = st_fields[22] = st_fields[34] = st_fields[46] = &ffi_type_longdouble; + st_fields[11] = st_fields[23] = st_fields[35] = st_fields[47] = &ffi_type_pointer; + + st_fields[50] = NULL; + + argTypes[0] = argTypes[12] = argTypes[24] = argTypes[36] = argTypes[48] = &ffi_type_uint8; + argValues[0] = argValues[12] = argValues[24] = argValues[36] = argValues[48] = &ui8; + argTypes[1] = argTypes[13] = argTypes[25] = argTypes[37] = argTypes[49] = &ffi_type_sint8; + argValues[1] = argValues[13] = argValues[25] = argValues[37] = argValues[49] = &si8; + argTypes[2] = argTypes[14] = argTypes[26] = argTypes[38] = &ffi_type_uint16; + argValues[2] = argValues[14] = argValues[26] = argValues[38] = &ui16; + argTypes[3] = argTypes[15] = argTypes[27] = argTypes[39] = &ffi_type_sint16; + argValues[3] = argValues[15] = argValues[27] = argValues[39] = &si16; + argTypes[4] = argTypes[16] = argTypes[28] = argTypes[40] = &ffi_type_uint32; + argValues[4] = argValues[16] = argValues[28] = argValues[40] = &ui32; + argTypes[5] = argTypes[17] = argTypes[29] = argTypes[41] = &ffi_type_sint32; + argValues[5] = argValues[17] = argValues[29] = argValues[41] = &si32; + argTypes[6] = argTypes[18] = argTypes[30] = argTypes[42] = &ffi_type_uint64; + argValues[6] = argValues[18] = argValues[30] = argValues[42] = &ui64; + argTypes[7] = argTypes[19] = argTypes[31] = argTypes[43] = &ffi_type_sint64; + argValues[7] = argValues[19] = argValues[31] = argValues[43] = &si64; + argTypes[8] = argTypes[20] = argTypes[32] = argTypes[44] = &ffi_type_float; + argValues[8] = argValues[20] = argValues[32] = argValues[44] = &f; + argTypes[9] = argTypes[21] = argTypes[33] = argTypes[45] = &ffi_type_double; + argValues[9] = argValues[21] = argValues[33] = argValues[45] = &d; + argTypes[10] = argTypes[22] = argTypes[34] = argTypes[46] = &ffi_type_longdouble; + argValues[10] = argValues[22] = argValues[34] = argValues[46] = &ld; + argTypes[11] = argTypes[23] = argTypes[35] = argTypes[47] = &ffi_type_pointer; + argValues[11] = argValues[23] = argValues[35] = argValues[47] = &p; + + argTypes[50] = NULL; + argValues[50] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 50, &ret_struct_type, argTypes) == FFI_OK); + + ffi_call(&cif, FFI_FN(test_large_fn), &retVal, argValues); + /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_large_fn, NULL, code) == FFI_OK); + + retVal = ((BigStruct(*)( + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, uint64_t, int64_t, float, double, long double, char*, + uint8_t, int8_t))(code))( + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p, + ui8, si8); + /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx " + "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n", + retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f, + retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l, + retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r, + retVal.s, retVal.t, retVal.u, retVal.v, retVal.w, (unsigned long)retVal.x, + retVal.y, retVal.z, retVal.aa, retVal.bb, retVal.cc, retVal.dd, + retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj, + retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp, + retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx); + /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */ + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c new file mode 100644 index 0000000..c15e3a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct.c @@ -0,0 +1,152 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined)) + (code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c new file mode 100644 index 0000000..477a6b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct1.c @@ -0,0 +1,161 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_16byte1 { + double a; + float b; + int c; +} cls_struct_16byte1; + +typedef struct cls_struct_16byte2 { + int ii; + double dd; + float ff; +} cls_struct_16byte2; + +typedef struct cls_struct_combined { + cls_struct_16byte1 d; + cls_struct_16byte2 e; +} cls_struct_combined; + +cls_struct_combined cls_struct_combined_fn(struct cls_struct_16byte1 b0, + struct cls_struct_16byte2 b1, + struct cls_struct_combined b2, + struct cls_struct_16byte1 b3) +{ + struct cls_struct_combined result; + + result.d.a = b0.a + b1.dd + b2.d.a; + result.d.b = b0.b + b1.ff + b2.d.b; + result.d.c = b0.c + b1.ii + b2.d.c; + result.e.ii = b0.c + b1.ii + b2.e.ii; + result.e.dd = b0.a + b1.dd + b2.e.dd; + result.e.ff = b0.b + b1.ff + b2.e.ff; + + printf("%g %g %d %d %g %g %g %g %d %d %g %g %g %g %d: %g %g %d %d %g %g\n", + b0.a, b0.b, b0.c, + b1.ii, b1.dd, b1.ff, + b2.d.a, b2.d.b, b2.d.c, + b2.e.ii, b2.e.dd, b2.e.ff, + b3.a, b3.b, b3.c, + result.d.a, result.d.b, result.d.c, + result.e.ii, result.e.dd, result.e.ff); + + return result; +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct cls_struct_16byte1 b0; + struct cls_struct_16byte2 b1; + struct cls_struct_combined b2; + struct cls_struct_16byte1 b3; + + b0 = *(struct cls_struct_16byte1*)(args[0]); + b1 = *(struct cls_struct_16byte2*)(args[1]); + b2 = *(struct cls_struct_combined*)(args[2]); + b3 = *(struct cls_struct_16byte1*)(args[3]); + + + *(cls_struct_combined*)resp = cls_struct_combined_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[5]; + ffi_type* cls_struct_fields1[5]; + ffi_type* cls_struct_fields2[5]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6}; + struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0}; + struct cls_struct_combined g_dbl = {{4.0, 5.0, 6}, + {3, 1.0, 8.0}}; + struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4}; + struct cls_struct_combined res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_float; + cls_struct_fields[2] = &ffi_type_sint; + cls_struct_fields[3] = NULL; + + cls_struct_fields1[0] = &ffi_type_sint; + cls_struct_fields1[1] = &ffi_type_double; + cls_struct_fields1[2] = &ffi_type_float; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &cls_struct_type; + cls_struct_fields2[1] = &cls_struct_type1; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type2, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_combined_fn), &res_dbl, args_dbl); + /* { dg-output "9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + res_dbl = ((cls_struct_combined(*)(cls_struct_16byte1, + cls_struct_16byte2, + cls_struct_combined, + cls_struct_16byte1)) + (code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n9 2 6 1 2 3 4 5 6 3 1 8 3 2 4: 15 10 13 10 12 13" } */ + CHECK( res_dbl.d.a == (e_dbl.a + f_dbl.dd + g_dbl.d.a)); + CHECK( res_dbl.d.b == (e_dbl.b + f_dbl.ff + g_dbl.d.b)); + CHECK( res_dbl.d.c == (e_dbl.c + f_dbl.ii + g_dbl.d.c)); + CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii)); + CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd)); + CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff)); + /* CHECK( 1 == 0); */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c new file mode 100644 index 0000000..3cf2b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct10.c @@ -0,0 +1,134 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + unsigned char y; + struct A x; + unsigned int z; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b3.z + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + result.z = 0; + + printf("%d %d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, b3.z, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[4]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = { 99, {12LL , 127}, 255}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &ffi_type_uchar; + cls_struct_fields1[1] = &cls_struct_type; + cls_struct_fields1[2] = &ffi_type_uint; + cls_struct_fields1[3] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 255 2 9: 270 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + f_dbl.z + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c new file mode 100644 index 0000000..3510493 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct11.c @@ -0,0 +1,121 @@ +/* Area: ffi_call, closure_call + Purpose: Check parameter passing with nested structs + of a single type. This tests the special cases + for homogeneous floating-point aggregates in the + AArch64 PCS. + Limitations: none. + PR: none. + Originator: ARM Ltd. */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + float a_x; + float a_y; +} A; + +typedef struct B { + float b_x; + float b_y; +} B; + +typedef struct C { + A a; + B b; +} C; + +static C C_fn (int x, int y, int z, C source, int i, int j, int k) +{ + C result; + result.a.a_x = source.a.a_x; + result.a.a_y = source.a.a_y; + result.b.b_x = source.b.b_x; + result.b.b_y = source.b.b_y; + + printf ("%d, %d, %d, %d, %d, %d\n", x, y, z, i, j, k); + + printf ("%.1f, %.1f, %.1f, %.1f, " + "%.1f, %.1f, %.1f, %.1f\n", + source.a.a_x, source.a.a_y, + source.b.b_x, source.b.b_y, + result.a.a_x, result.a.a_y, + result.b.b_x, result.b.b_y); + + return result; +} + +int main (void) +{ + ffi_cif cif; + + ffi_type* struct_fields_source_a[3]; + ffi_type* struct_fields_source_b[3]; + ffi_type* struct_fields_source_c[3]; + ffi_type* arg_types[8]; + + ffi_type struct_type_a, struct_type_b, struct_type_c; + + struct A source_fld_a = {1.0, 2.0}; + struct B source_fld_b = {4.0, 8.0}; + int k = 1; + + struct C result; + struct C source = {source_fld_a, source_fld_b}; + + struct_type_a.size = 0; + struct_type_a.alignment = 0; + struct_type_a.type = FFI_TYPE_STRUCT; + struct_type_a.elements = struct_fields_source_a; + + struct_type_b.size = 0; + struct_type_b.alignment = 0; + struct_type_b.type = FFI_TYPE_STRUCT; + struct_type_b.elements = struct_fields_source_b; + + struct_type_c.size = 0; + struct_type_c.alignment = 0; + struct_type_c.type = FFI_TYPE_STRUCT; + struct_type_c.elements = struct_fields_source_c; + + struct_fields_source_a[0] = &ffi_type_float; + struct_fields_source_a[1] = &ffi_type_float; + struct_fields_source_a[2] = NULL; + + struct_fields_source_b[0] = &ffi_type_float; + struct_fields_source_b[1] = &ffi_type_float; + struct_fields_source_b[2] = NULL; + + struct_fields_source_c[0] = &struct_type_a; + struct_fields_source_c[1] = &struct_type_b; + struct_fields_source_c[2] = NULL; + + arg_types[0] = &ffi_type_sint32; + arg_types[1] = &ffi_type_sint32; + arg_types[2] = &ffi_type_sint32; + arg_types[3] = &struct_type_c; + arg_types[4] = &ffi_type_sint32; + arg_types[5] = &ffi_type_sint32; + arg_types[6] = &ffi_type_sint32; + arg_types[7] = NULL; + + void *args[7]; + args[0] = &k; + args[1] = &k; + args[2] = &k; + args[3] = &source; + args[4] = &k; + args[5] = &k; + args[6] = &k; + CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, 7, &struct_type_c, + arg_types) == FFI_OK); + + ffi_call (&cif, FFI_FN (C_fn), &result, args); + /* { dg-output "1, 1, 1, 1, 1, 1\n" } */ + /* { dg-output "1.0, 2.0, 4.0, 8.0, 1.0, 2.0, 4.0, 8.0" } */ + CHECK (result.a.a_x == source.a.a_x); + CHECK (result.a.a_y == source.a.a_y); + CHECK (result.b.b_x == source.b.b_x); + CHECK (result.b.b_y == source.b.b_y); + exit (0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c new file mode 100644 index 0000000..69268cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct2.c @@ -0,0 +1,110 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%lu %d %lu %d %d: %lu %d %d\n", b0.a, b0.b, b1.x.a, b1.x.b, b1.y, + result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1, 7}; + struct B f_dbl = {{12 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_ulong; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c new file mode 100644 index 0000000..ab18cad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct3.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20030911 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +B B_fn(struct A b0, struct B b1) +{ + struct B result; + + result.x.a = b0.a + b1.x.a; + result.x.b = b0.b + b1.x.b + b1.y; + result.y = b0.b + b1.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b0.a, b0.b, + (int)b1.x.a, b1.x.b, b1.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c new file mode 100644 index 0000000..2ffb4d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct4.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c new file mode 100644 index 0000000..6c79845 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct5.c @@ -0,0 +1,112 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + long double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_longdouble; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c new file mode 100644 index 0000000..59d3579 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct6.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: PR 25630. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + double a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1.0, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_slong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c new file mode 100644 index 0000000..27595e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct7.c @@ -0,0 +1,111 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +static B B_fn(struct A b2, struct B b3) +{ + struct B result; + + result.x.a = b2.a + b3.x.a; + result.x.b = b2.b + b3.x.b + b3.y; + result.y = b2.b + b3.x.b; + + printf("%d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + + *(B*)resp = B_fn(b0, b1); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[3]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type cls_struct_type, cls_struct_type1; + ffi_type* dbl_arg_types[3]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12.0 , 127}, 99}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B))(code))(e_dbl, f_dbl); + /* { dg-output "\n1 7 12 127 99: 13 233 134" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c new file mode 100644 index 0000000..0e6c682 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct8.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned long long a; + unsigned char b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", (int)b2.a, b2.b, + (int)b3.x.a, b3.x.b, b3.y, (int)b4.d, b4.e, + (int)result.x.a, result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1LL, 7}; + struct B f_dbl = {{12LL , 127}, 99}; + struct C g_dbl = { 2LL, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uint64; + cls_struct_fields[1] = &ffi_type_uchar; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_uint64; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c new file mode 100644 index 0000000..5f7ac67 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/nested_struct9.c @@ -0,0 +1,131 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Contains structs as parameter of the struct itself. + Sample taken from Alan Modras patch to src/prep_cif.c. + Limitations: none. + PR: none. + Originator: 20051010 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct A { + unsigned char a; + unsigned long long b; +} A; + +typedef struct B { + struct A x; + unsigned char y; +} B; + +typedef struct C { + unsigned long d; + unsigned char e; +} C; + +static B B_fn(struct A b2, struct B b3, struct C b4) +{ + struct B result; + + result.x.a = b2.a + b3.x.a + b4.d; + result.x.b = b2.b + b3.x.b + b3.y + b4.e; + result.y = b2.b + b3.x.b + b4.e; + + printf("%d %d %d %d %d %d %d: %d %d %d\n", b2.a, (int)b2.b, + b3.x.a, (int)b3.x.b, b3.y, (int)b4.d, b4.e, + result.x.a, (int)result.x.b, result.y); + + return result; +} + +static void +B_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct A b0; + struct B b1; + struct C b2; + + b0 = *(struct A*)(args[0]); + b1 = *(struct B*)(args[1]); + b2 = *(struct C*)(args[2]); + + *(B*)resp = B_fn(b0, b1, b2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[4]; + ffi_type* cls_struct_fields[3]; + ffi_type* cls_struct_fields1[3]; + ffi_type* cls_struct_fields2[3]; + ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2; + ffi_type* dbl_arg_types[4]; + + struct A e_dbl = { 1, 7LL}; + struct B f_dbl = {{12.0 , 127}, 99}; + struct C g_dbl = { 2, 9}; + + struct B res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_type1.size = 0; + cls_struct_type1.alignment = 0; + cls_struct_type1.type = FFI_TYPE_STRUCT; + cls_struct_type1.elements = cls_struct_fields1; + + cls_struct_type2.size = 0; + cls_struct_type2.alignment = 0; + cls_struct_type2.type = FFI_TYPE_STRUCT; + cls_struct_type2.elements = cls_struct_fields2; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &ffi_type_uint64; + cls_struct_fields[2] = NULL; + + cls_struct_fields1[0] = &cls_struct_type; + cls_struct_fields1[1] = &ffi_type_uchar; + cls_struct_fields1[2] = NULL; + + cls_struct_fields2[0] = &ffi_type_ulong; + cls_struct_fields2[1] = &ffi_type_uchar; + cls_struct_fields2[2] = NULL; + + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type1; + dbl_arg_types[2] = &cls_struct_type2; + dbl_arg_types[3] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &cls_struct_type1, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = NULL; + + ffi_call(&cif, FFI_FN(B_fn), &res_dbl, args_dbl); + /* { dg-output "1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + CHECK(ffi_prep_closure_loc(pcl, &cif, B_gn, NULL, code) == FFI_OK); + + res_dbl = ((B(*)(A, B, C))(code))(e_dbl, f_dbl, g_dbl); + /* { dg-output "\n1 7 12 127 99 2 9: 15 242 143" } */ + CHECK( res_dbl.x.a == (e_dbl.a + f_dbl.x.a + g_dbl.d)); + CHECK( res_dbl.x.b == (e_dbl.b + f_dbl.x.b + f_dbl.y + g_dbl.e)); + CHECK( res_dbl.y == (e_dbl.b + f_dbl.x.b + g_dbl.e)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c new file mode 100644 index 0000000..6a91555 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/problem1.c @@ -0,0 +1,90 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure passing with different structure size. + Limitations: none. + PR: none. + Originator: 20030828 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct my_ffi_struct { + double a; + double b; + double c; +} my_ffi_struct; + +my_ffi_struct callee(struct my_ffi_struct a1, struct my_ffi_struct a2) +{ + struct my_ffi_struct result; + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + + printf("%g %g %g %g %g %g: %g %g %g\n", a1.a, a1.b, a1.c, + a2.a, a2.b, a2.c, result.a, result.b, result.c); + + return result; +} + +void stub(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + struct my_ffi_struct a1; + struct my_ffi_struct a2; + + a1 = *(struct my_ffi_struct*)(args[0]); + a2 = *(struct my_ffi_struct*)(args[1]); + + *(my_ffi_struct *)resp = callee(a1, a2); +} + + +int main(void) +{ + ffi_type* my_ffi_struct_fields[4]; + ffi_type my_ffi_struct_type; + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[3]; + + struct my_ffi_struct g = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct f = { 1.0, 2.0, 3.0 }; + struct my_ffi_struct res; + + my_ffi_struct_type.size = 0; + my_ffi_struct_type.alignment = 0; + my_ffi_struct_type.type = FFI_TYPE_STRUCT; + my_ffi_struct_type.elements = my_ffi_struct_fields; + + my_ffi_struct_fields[0] = &ffi_type_double; + my_ffi_struct_fields[1] = &ffi_type_double; + my_ffi_struct_fields[2] = &ffi_type_double; + my_ffi_struct_fields[3] = NULL; + + arg_types[0] = &my_ffi_struct_type; + arg_types[1] = &my_ffi_struct_type; + arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &my_ffi_struct_type, + arg_types) == FFI_OK); + + args[0] = &g; + args[1] = &f; + args[2] = NULL; + ffi_call(&cif, FFI_FN(callee), &res, args); + /* { dg-output "1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, stub, NULL, code) == FFI_OK); + + res = ((my_ffi_struct(*)(struct my_ffi_struct, struct my_ffi_struct))(code))(g, f); + /* { dg-output "\n1 2 3 1 2 3: 2 4 6" } */ + printf("res: %g %g %g\n", res.a, res.b, res.c); + /* { dg-output "\nres: 2 4 6" } */ + + exit(0);; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c new file mode 100644 index 0000000..71c2469 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large.c @@ -0,0 +1,145 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_108byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + int n; +} struct_108byte; + +struct_108byte cls_struct_108byte_fn( + struct_108byte b0, + struct_108byte b1, + struct_108byte b2, + struct_108byte b3) +{ + struct_108byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n); + + return result; +} + +static void +cls_struct_108byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_108byte b0, b1, b2, b3; + + b0 = *(struct_108byte*)(args[0]); + b1 = *(struct_108byte*)(args[1]); + b2 = *(struct_108byte*)(args[2]); + b3 = *(struct_108byte*)(args[3]); + + *(struct_108byte*)resp = cls_struct_108byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[15]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_108byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 7 }; + struct_108byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 4 }; + struct_108byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 3 }; + struct_108byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 2 }; + struct_108byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_sint32; + cls_struct_fields[14] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_108byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_108byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_108byte(*)(struct_108byte, struct_108byte, + struct_108byte, struct_108byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c new file mode 100644 index 0000000..d9c750e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_large2.c @@ -0,0 +1,148 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +/* 13 FPRs: 104 bytes */ +/* 14 FPRs: 112 bytes */ + +typedef struct struct_116byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; + double j; + double k; + double l; + double m; + double n; + int o; +} struct_116byte; + +struct_116byte cls_struct_116byte_fn( + struct_116byte b0, + struct_116byte b1, + struct_116byte b2, + struct_116byte b3) +{ + struct_116byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + result.j = b0.j + b1.j + b2.j + b3.j; + result.k = b0.k + b1.k + b2.k + b3.k; + result.l = b0.l + b1.l + b2.l + b3.l; + result.m = b0.m + b1.m + b2.m + b3.m; + result.n = b0.n + b1.n + b2.n + b3.n; + result.o = b0.o + b1.o + b2.o + b3.o; + + printf("%g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i, + result.j, result.k, result.l, result.m, result.n, result.o); + + return result; +} + +static void +cls_struct_116byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_116byte b0, b1, b2, b3; + + b0 = *(struct_116byte*)(args[0]); + b1 = *(struct_116byte*)(args[1]); + b2 = *(struct_116byte*)(args[2]); + b3 = *(struct_116byte*)(args[3]); + + *(struct_116byte*)resp = cls_struct_116byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[16]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_116byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 7 }; + struct_116byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 6.0, 4 }; + struct_116byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 7.0, 3 }; + struct_116byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 8.0, 2 }; + struct_116byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = &ffi_type_double; + cls_struct_fields[10] = &ffi_type_double; + cls_struct_fields[11] = &ffi_type_double; + cls_struct_fields[12] = &ffi_type_double; + cls_struct_fields[13] = &ffi_type_double; + cls_struct_fields[14] = &ffi_type_sint32; + cls_struct_fields[15] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_116byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_116byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_116byte(*)(struct_116byte, struct_116byte, + struct_116byte, struct_116byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g %g %g %g %g %g %d\n", res_dbl.a, res_dbl.b, + res_dbl.c, res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i, + res_dbl.j, res_dbl.k, res_dbl.l, res_dbl.m, res_dbl.n, res_dbl.o); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 22 15 17 25 6 26 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c new file mode 100644 index 0000000..973ee02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium.c @@ -0,0 +1,124 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + double i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %g\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7.0 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3.0 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2.0 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_double; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %g\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c new file mode 100644 index 0000000..84323d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/stret_medium2.c @@ -0,0 +1,125 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure returning with different structure size. + Depending on the ABI. Check bigger struct which overlaps + the gp and fp register count on Darwin/AIX/ppc64. + Limitations: none. + PR: none. + Originator: Blake Chaffin 6/21/2007 */ + +/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */ +/* { dg-options "-Wno-format" { target alpha*-dec-osf* } } */ +#include "ffitest.h" + +typedef struct struct_72byte { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + long long i; +} struct_72byte; + +struct_72byte cls_struct_72byte_fn( + struct_72byte b0, + struct_72byte b1, + struct_72byte b2, + struct_72byte b3) +{ + struct_72byte result; + + result.a = b0.a + b1.a + b2.a + b3.a; + result.b = b0.b + b1.b + b2.b + b3.b; + result.c = b0.c + b1.c + b2.c + b3.c; + result.d = b0.d + b1.d + b2.d + b3.d; + result.e = b0.e + b1.e + b2.e + b3.e; + result.f = b0.f + b1.f + b2.f + b3.f; + result.g = b0.g + b1.g + b2.g + b3.g; + result.h = b0.h + b1.h + b2.h + b3.h; + result.i = b0.i + b1.i + b2.i + b3.i; + + printf("%g %g %g %g %g %g %g %g %" PRIdLL "\n", result.a, result.b, result.c, + result.d, result.e, result.f, result.g, result.h, result.i); + + return result; +} + +static void +cls_struct_72byte_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) +{ + struct_72byte b0, b1, b2, b3; + + b0 = *(struct_72byte*)(args[0]); + b1 = *(struct_72byte*)(args[1]); + b2 = *(struct_72byte*)(args[2]); + b3 = *(struct_72byte*)(args[3]); + + *(struct_72byte*)resp = cls_struct_72byte_fn(b0, b1, b2, b3); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_dbl[5]; + ffi_type* cls_struct_fields[10]; + ffi_type cls_struct_type; + ffi_type* dbl_arg_types[5]; + + struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7 }; + struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4 }; + struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3 }; + struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2 }; + struct_72byte res_dbl; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_double; + cls_struct_fields[1] = &ffi_type_double; + cls_struct_fields[2] = &ffi_type_double; + cls_struct_fields[3] = &ffi_type_double; + cls_struct_fields[4] = &ffi_type_double; + cls_struct_fields[5] = &ffi_type_double; + cls_struct_fields[6] = &ffi_type_double; + cls_struct_fields[7] = &ffi_type_double; + cls_struct_fields[8] = &ffi_type_sint64; + cls_struct_fields[9] = NULL; + + dbl_arg_types[0] = &cls_struct_type; + dbl_arg_types[1] = &cls_struct_type; + dbl_arg_types[2] = &cls_struct_type; + dbl_arg_types[3] = &cls_struct_type; + dbl_arg_types[4] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, &cls_struct_type, + dbl_arg_types) == FFI_OK); + + args_dbl[0] = &e_dbl; + args_dbl[1] = &f_dbl; + args_dbl[2] = &g_dbl; + args_dbl[3] = &h_dbl; + args_dbl[4] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_72byte_fn), &res_dbl, args_dbl); + /* { dg-output "22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_72byte_gn, NULL, code) == FFI_OK); + + res_dbl = ((struct_72byte(*)(struct_72byte, struct_72byte, + struct_72byte, struct_72byte))(code))(e_dbl, f_dbl, g_dbl, h_dbl); + /* { dg-output "\n22 15 17 25 6 13 19 18 16" } */ + printf("res: %g %g %g %g %g %g %g %g %" PRIdLL "\n", res_dbl.a, res_dbl.b, res_dbl.c, + res_dbl.d, res_dbl.e, res_dbl.f, res_dbl.g, res_dbl.h, res_dbl.i); + /* { dg-output "\nres: 22 15 17 25 6 13 19 18 16" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c new file mode 100644 index 0000000..ca31056 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/testclosure.c @@ -0,0 +1,70 @@ +/* Area: closure_call + Purpose: Check return value float. + Limitations: none. + PR: 41908. + Originator: 20091102 */ + +/* { dg-do run } */ +#include "ffitest.h" + +typedef struct cls_struct_combined { + float a; + float b; + float c; + float d; +} cls_struct_combined; + +void cls_struct_combined_fn(struct cls_struct_combined arg) +{ + printf("%g %g %g %g\n", + arg.a, arg.b, + arg.c, arg.d); + fflush(stdout); +} + +static void +cls_struct_combined_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + struct cls_struct_combined a0; + + a0 = *(struct cls_struct_combined*)(args[0]); + + cls_struct_combined_fn(a0); +} + + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type* cls_struct_fields0[5]; + ffi_type cls_struct_type0; + ffi_type* dbl_arg_types[5]; + + struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0}; + + cls_struct_type0.size = 0; + cls_struct_type0.alignment = 0; + cls_struct_type0.type = FFI_TYPE_STRUCT; + cls_struct_type0.elements = cls_struct_fields0; + + cls_struct_fields0[0] = &ffi_type_float; + cls_struct_fields0[1] = &ffi_type_float; + cls_struct_fields0[2] = &ffi_type_float; + cls_struct_fields0[3] = &ffi_type_float; + cls_struct_fields0[4] = NULL; + + dbl_arg_types[0] = &cls_struct_type0; + dbl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, &ffi_type_void, + dbl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_combined_gn, NULL, code) == FFI_OK); + + ((void(*)(cls_struct_combined)) (code))(g_dbl); + /* { dg-output "4 5 1 8" } */ + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc new file mode 100644 index 0000000..e114565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest.cc @@ -0,0 +1,117 @@ +/* Area: ffi_closure, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Jeff Sturm */ + +/* { dg-do run { xfail x86_64-apple-darwin* moxie*-*-* } } */ + +#include "ffitest.h" + +void ABI_ATTR +closure_test_fn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args __UNUSED__, void* userdata __UNUSED__) +{ + throw 9; +} + +typedef void (*closure_test_type)(); + +void closure_test_fn1(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) + { + *(ffi_arg*)resp = + (int)*(float *)args[0] +(int)(*(float *)args[1]) + + (int)(*(float *)args[2]) + (int)*(float *)args[3] + + (int)(*(signed short *)args[4]) + (int)(*(float *)args[5]) + + (int)*(float *)args[6] + (int)(*(int *)args[7]) + + (int)(*(double*)args[8]) + (int)*(int *)args[9] + + (int)(*(int *)args[10]) + (int)(*(float *)args[11]) + + (int)*(int *)args[12] + (int)(*(int *)args[13]) + + (int)(*(int *)args[14]) + *(int *)args[15] + (int)(intptr_t)userdata; + + printf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d: %d\n", + (int)*(float *)args[0], (int)(*(float *)args[1]), + (int)(*(float *)args[2]), (int)*(float *)args[3], + (int)(*(signed short *)args[4]), (int)(*(float *)args[5]), + (int)*(float *)args[6], (int)(*(int *)args[7]), + (int)(*(double *)args[8]), (int)*(int *)args[9], + (int)(*(int *)args[10]), (int)(*(float *)args[11]), + (int)*(int *)args[12], (int)(*(int *)args[13]), + (int)(*(int *)args[14]), *(int *)args[15], + (int)(intptr_t)userdata, (int)*(ffi_arg*)resp); + + throw (int)*(ffi_arg*)resp; +} + +typedef int (*closure_test_type1)(float, float, float, float, signed short, + float, float, int, double, int, int, float, + int, int, int, int); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = (ffi_closure *)ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[17]; + + { + cl_arg_types[1] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, + &ffi_type_void, cl_arg_types) == FFI_OK); + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn, NULL, code) == FFI_OK); + + try + { + (*((closure_test_type)(code)))(); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + + { + + cl_arg_types[0] = &ffi_type_float; + cl_arg_types[1] = &ffi_type_float; + cl_arg_types[2] = &ffi_type_float; + cl_arg_types[3] = &ffi_type_float; + cl_arg_types[4] = &ffi_type_sshort; + cl_arg_types[5] = &ffi_type_float; + cl_arg_types[6] = &ffi_type_float; + cl_arg_types[7] = &ffi_type_uint; + cl_arg_types[8] = &ffi_type_double; + cl_arg_types[9] = &ffi_type_uint; + cl_arg_types[10] = &ffi_type_uint; + cl_arg_types[11] = &ffi_type_float; + cl_arg_types[12] = &ffi_type_uint; + cl_arg_types[13] = &ffi_type_uint; + cl_arg_types[14] = &ffi_type_uint; + cl_arg_types[15] = &ffi_type_uint; + cl_arg_types[16] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 16, + &ffi_type_sint, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_fn1, + (void *) 3 /* userdata */, code) == FFI_OK); + try + { + (*((closure_test_type1)code)) + (1.1, 2.2, 3.3, 4.4, 127, 5.5, 6.6, 8, 9, 10, 11, 12.0, 13, + 19, 21, 1); + /* { dg-output "\n1 2 3 4 127 5 6 8 9 10 11 12 13 19 21 1 3: 255" } */ + } catch (int exception_code) + { + CHECK(exception_code == 255); + } + printf("part two OK\n"); + /* { dg-output "\npart two OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc new file mode 100644 index 0000000..153d240 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.closures/unwindtest_ffi_call.cc @@ -0,0 +1,54 @@ +/* Area: ffi_call, unwind info + Purpose: Check if the unwind information is passed correctly. + Limitations: none. + PR: none. + Originator: Andreas Tobler 20061213 */ + +/* { dg-do run { xfail moxie*-*-* } } */ + +#include "ffitest.h" + +static int checking(int a __UNUSED__, short b __UNUSED__, + signed char c __UNUSED__) +{ + throw 9; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + ffi_arg rint; + + signed int si; + signed short ss; + signed char sc; + + args[0] = &ffi_type_sint; + values[0] = &si; + args[1] = &ffi_type_sshort; + values[1] = &ss; + args[2] = &ffi_type_schar; + values[2] = ≻ + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &ffi_type_sint, args) == FFI_OK); + + si = -6; + ss = -12; + sc = -1; + { + try + { + ffi_call(&cif, FFI_FN(checking), &rint, values); + } catch (int exception_code) + { + CHECK(exception_code == 9); + } + printf("part one OK\n"); + /* { dg-output "part one OK" } */ + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc new file mode 100644 index 0000000..4a812ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex.inc @@ -0,0 +1,91 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct cls_struct_align { + unsigned char a; + _Complex T_C_TYPE b; + unsigned char c; +} cls_struct_align; + +cls_struct_align cls_struct_align_fn( + struct cls_struct_align a1, struct cls_struct_align a2) +{ + struct cls_struct_align result; + + result.a = a1.a + a2.a; + result.b = a1.b + a2.b; + result.c = a1.c + a2.c; + + printf("%d %f,%fi %d %d %f,%fi %d: %d %f,%fi %d\n", + a1.a, T_CONV creal (a1.b), T_CONV cimag (a1.b), a1.c, + a2.a, T_CONV creal (a2.b), T_CONV cimag (a2.b), a2.c, + result.a, T_CONV creal (result.b), T_CONV cimag (result.b), result.c); + + return result; +} + +static void +cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) +{ + + struct cls_struct_align a1, a2; + + a1 = *(struct cls_struct_align*)(args[0]); + a2 = *(struct cls_struct_align*)(args[1]); + + *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args_c[5]; + ffi_type* cls_struct_fields[4]; + ffi_type cls_struct_type; + ffi_type* c_arg_types[5]; + + struct cls_struct_align g_c = { 12, 4951 + 7 * I, 127 }; + struct cls_struct_align f_c = { 1, 9320 + 1 * I, 13 }; + struct cls_struct_align res_c; + + cls_struct_type.size = 0; + cls_struct_type.alignment = 0; + cls_struct_type.type = FFI_TYPE_STRUCT; + cls_struct_type.elements = cls_struct_fields; + + cls_struct_fields[0] = &ffi_type_uchar; + cls_struct_fields[1] = &T_FFI_TYPE; + cls_struct_fields[2] = &ffi_type_uchar; + cls_struct_fields[3] = NULL; + + c_arg_types[0] = &cls_struct_type; + c_arg_types[1] = &cls_struct_type; + c_arg_types[2] = NULL; + + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, + c_arg_types) == FFI_OK); + + args_c[0] = &g_c; + args_c[1] = &f_c; + args_c[2] = NULL; + + ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_c, args_c); + /* { dg-output "12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_struct_align_gn, NULL, code) == FFI_OK); + + res_c = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(code))(g_c, f_c); + /* { dg-output "\n12 4951,7i 127 1 9320,1i 13: 13 14271,8i 140" } */ + printf("res: %d %f,%fi %d\n", + res_c.a, T_CONV creal (res_c.b), T_CONV cimag (res_c.b), res_c.c); + /* { dg-output "\nres: 13 14271,8i 140" } */ + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c new file mode 100644 index 0000000..0dff23a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c new file mode 100644 index 0000000..0affbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c new file mode 100644 index 0000000..7889ba8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_align_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check structure alignment of complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_align_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc new file mode 100644 index 0000000..f937404 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex.inc @@ -0,0 +1,42 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static void cls_ret_complex_fn(ffi_cif* cif __UNUSED__, void* resp, void** args, + void* userdata __UNUSED__) + { + _Complex T_C_TYPE *pa; + _Complex T_C_TYPE *pr; + pa = (_Complex T_C_TYPE *)args[0]; + pr = (_Complex T_C_TYPE *)resp; + *pr = *pa; + + printf("%.6f,%.6fi: %.6f,%.6fi\n", + T_CONV creal (*pa), T_CONV cimag (*pa), + T_CONV creal (*pr), T_CONV cimag (*pr)); + } +typedef _Complex T_C_TYPE (*cls_ret_complex)(_Complex T_C_TYPE); + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type * cl_arg_types[2]; + _Complex T_C_TYPE res; + + cl_arg_types[0] = &T_FFI_TYPE; + cl_arg_types[1] = NULL; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_complex_fn, NULL, code) == FFI_OK); + + res = (*((cls_ret_complex)code))(0.125 + 128.0 * I); + printf("res: %.6f,%.6fi\n", T_CONV creal (res), T_CONV cimag (res)); + CHECK (res == (0.125 + 128.0 * I)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c new file mode 100644 index 0000000..05e3534 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_double.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c new file mode 100644 index 0000000..5df7849 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_float.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c new file mode 100644 index 0000000..2b1c320 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: closure_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc new file mode 100644 index 0000000..df8708d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct.inc @@ -0,0 +1,71 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +typedef struct Cs { + _Complex T_C_TYPE x; + _Complex T_C_TYPE y; +} Cs; + +Cs gc; + +void +closure_test_fn(Cs p) +{ + printf("%.1f,%.1fi %.1f,%.1fi\n", + T_CONV creal (p.x), T_CONV cimag (p.x), + T_CONV creal (p.y), T_CONV cimag (p.y)); + gc = p; +} + +void +closure_test_gn(ffi_cif* cif __UNUSED__, void* resp __UNUSED__, + void** args, void* userdata __UNUSED__) +{ + closure_test_fn(*(Cs*)args[0]); +} + +int main(int argc __UNUSED__, char** argv __UNUSED__) +{ + ffi_cif cif; + + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + ffi_type *cl_arg_types[1]; + + ffi_type ts1_type; + ffi_type* ts1_type_elements[4]; + + Cs arg = { 1.0 + 11.0 * I, 2.0 + 22.0 * I}; + + ts1_type.size = 0; + ts1_type.alignment = 0; + ts1_type.type = FFI_TYPE_STRUCT; + ts1_type.elements = ts1_type_elements; + + ts1_type_elements[0] = &T_FFI_TYPE; + ts1_type_elements[1] = &T_FFI_TYPE; + ts1_type_elements[2] = NULL; + + cl_arg_types[0] = &ts1_type; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &ffi_type_void, cl_arg_types) == FFI_OK); + + CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_gn, NULL, code) == FFI_OK); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + ((void*(*)(Cs))(code))(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + gc.x = 0.0 + 0.0 * I; + gc.y = 0.0 + 0.0 * I; + closure_test_fn(arg); + /* { dg-output "1.0,11.0i 2.0,22.0i\n" } */ + CHECK (gc.x == arg.x && gc.y == arg.y); + + return 0; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c new file mode 100644 index 0000000..ec71346 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c new file mode 100644 index 0000000..96fdf75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c new file mode 100644 index 0000000..005b467 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_struct_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Check complex arguments in structs. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_struct.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc new file mode 100644 index 0000000..8a3e15f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va.inc @@ -0,0 +1,80 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include +#include +#include +#include + +static _Complex T_C_TYPE gComplexValue1 = 1 + 2 * I; +static _Complex T_C_TYPE gComplexValue2 = 3 + 4 * I; + +static int cls_variadic(const char *format, ...) +{ + va_list ap; + _Complex T_C_TYPE p1, p2; + + va_start (ap, format); + p1 = va_arg (ap, _Complex T_C_TYPE); + p2 = va_arg (ap, _Complex T_C_TYPE); + va_end (ap); + + return printf(format, T_CONV creal (p1), T_CONV cimag (p1), + T_CONV creal (p2), T_CONV cimag (p2)); +} + +static void +cls_complex_va_fn(ffi_cif* cif __UNUSED__, void* resp, + void** args, void* userdata __UNUSED__) +{ + char* format = *(char**)args[0]; + gComplexValue1 = *(_Complex T_C_TYPE*)args[1]; + gComplexValue2 = *(_Complex T_C_TYPE*)args[2]; + + *(ffi_arg*)resp = + printf(format, + T_CONV creal (gComplexValue1), T_CONV cimag (gComplexValue1), + T_CONV creal (gComplexValue2), T_CONV cimag (gComplexValue2)); +} + +int main (void) +{ + ffi_cif cif; + void *code; + ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); + void* args[4]; + ffi_type* arg_types[4]; + char *format = "%.1f,%.1fi %.1f,%.1fi\n"; + + _Complex T_C_TYPE complexArg1 = 1.0 + 22.0 *I; + _Complex T_C_TYPE complexArg2 = 333.0 + 4444.0 *I; + ffi_arg res = 0; + + arg_types[0] = &ffi_type_pointer; + arg_types[1] = &T_FFI_TYPE; + arg_types[2] = &T_FFI_TYPE; + arg_types[3] = NULL; + + /* This printf call is variadic */ + CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 3, &ffi_type_sint, + arg_types) == FFI_OK); + + args[0] = &format; + args[1] = &complexArg1; + args[2] = &complexArg2; + args[3] = NULL; + + ffi_call(&cif, FFI_FN(cls_variadic), &res, args); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + CHECK(ffi_prep_closure_loc(pcl, &cif, cls_complex_va_fn, NULL, code) + == FFI_OK); + + res = ((int(*)(char *, ...))(code))(format, complexArg1, complexArg2); + CHECK (gComplexValue1 == complexArg1); + CHECK (gComplexValue2 == complexArg2); + printf("res: %d\n", (int) res); + CHECK (res == 24); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c new file mode 100644 index 0000000..879ccf3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c new file mode 100644 index 0000000..2b17826 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_float.c @@ -0,0 +1,16 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +/* Alpha splits _Complex into two arguments. It's illegal to pass + float through varargs, so _Complex float goes badly. In sort of + gets passed as _Complex double, but the compiler doesn't agree + with itself on this issue. */ +/* { dg-do run { xfail alpha*-*-* } } */ + +#include "complex_defs_float.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c new file mode 100644 index 0000000..6eca965 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/cls_complex_va_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call, closure_call + Purpose: Test complex' passed in variable argument lists. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "cls_complex_va.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp new file mode 100644 index 0000000..4631db2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_TARGET_HAS_COMPLEX_TYPE"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc new file mode 100644 index 0000000..515ae3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex.inc @@ -0,0 +1,51 @@ +/* -*-c-*-*/ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE f_complex(_Complex T_C_TYPE c, int x, int *py) +{ + c = -(2 * creal (c)) + (cimag (c) + 1)* I; + *py += x; + + return c; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex T_C_TYPE tc_arg; + _Complex T_C_TYPE tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, + &T_FFI_TYPE, args) == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%f,%fi %f,%fi, x %d 1234, y %d 11110\n", + T_CONV creal (tc_result), T_CONV cimag (tc_result), + T_CONV creal (2.0), T_CONV creal (8.0), tc_int_arg_x, tc_y); + + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc new file mode 100644 index 0000000..3583e16 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_double.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_double +/* C type corresponding to the base type. */ +#define T_C_TYPE double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc new file mode 100644 index 0000000..bbd9375 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_float.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_float +/* C type corresponding to the base type. */ +#define T_C_TYPE float +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV (double) diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc new file mode 100644 index 0000000..14b9f24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_defs_longdouble.inc @@ -0,0 +1,7 @@ +/* -*-c-*- */ +/* Complex base type. */ +#define T_FFI_TYPE ffi_type_complex_longdouble +/* C type corresponding to the base type. */ +#define T_C_TYPE long double +/* C cast for a value of type T_C_TYPE that is passed to printf. */ +#define T_CONV diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c new file mode 100644 index 0000000..8a3297b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c new file mode 100644 index 0000000..5044ebb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c new file mode 100644 index 0000000..bac3190 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_int.c @@ -0,0 +1,86 @@ +/* Area: ffi_call + Purpose: Check non-standard complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "ffitest.h" +#include "ffi.h" +#include + +_Complex int f_complex(_Complex int c, int x, int *py) +{ + __real__ c = -2 * __real__ c; + __imag__ c = __imag__ c + 1; + *py += x; + return c; +} + +/* + * This macro can be used to define new complex type descriptors + * in a platform independent way. + * + * name: Name of the new descriptor is ffi_type_complex_. + * type: The C base type of the complex type. + */ +#define FFI_COMPLEX_TYPEDEF(name, type, ffitype) \ + static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffitype), NULL \ + }; \ + struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ + }; \ + ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ + } + +/* Define new complex type descriptors using the macro: */ +/* ffi_type_complex_sint */ +FFI_COMPLEX_TYPEDEF(sint, int, ffi_type_sint); +/* ffi_type_complex_uchar */ +FFI_COMPLEX_TYPEDEF(uchar, unsigned char, ffi_type_uint8); + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + + _Complex int tc_arg; + _Complex int tc_result; + int tc_int_arg_x; + int tc_y; + int *tc_ptr_arg_y = &tc_y; + + args[0] = &ffi_type_complex_sint; + args[1] = &ffi_type_sint; + args[2] = &ffi_type_pointer; + values[0] = &tc_arg; + values[1] = &tc_int_arg_x; + values[2] = &tc_ptr_arg_y; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 3, &ffi_type_complex_sint, args) + == FFI_OK); + + tc_arg = 1 + 7 * I; + tc_int_arg_x = 1234; + tc_y = 9876; + ffi_call(&cif, FFI_FN(f_complex), &tc_result, values); + + printf ("%d,%di %d,%di, x %d 1234, y %d 11110\n", + (int)tc_result, (int)(tc_result * -I), 2, 8, tc_int_arg_x, tc_y); + /* dg-output "-2,8i 2,8i, x 1234 1234, y 11110 11110" */ + CHECK (creal (tc_result) == -2); + CHECK (cimag (tc_result) == 8); + CHECK (tc_int_arg_x == 1234); + CHECK (*tc_ptr_arg_y == 11110); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c new file mode 100644 index 0000000..7e78366 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check complex types. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc new file mode 100644 index 0000000..e37a774 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex.inc @@ -0,0 +1,78 @@ +/* -*-c-*- */ +#include "ffitest.h" + +#include +#include + +static _Complex T_C_TYPE many(_Complex T_C_TYPE c1, + _Complex T_C_TYPE c2, + _Complex T_C_TYPE c3, + _Complex T_C_TYPE c4, + _Complex T_C_TYPE c5, + _Complex T_C_TYPE c6, + _Complex T_C_TYPE c7, + _Complex T_C_TYPE c8, + _Complex T_C_TYPE c9, + _Complex T_C_TYPE c10, + _Complex T_C_TYPE c11, + _Complex T_C_TYPE c12, + _Complex T_C_TYPE c13) +{ + printf("0 :%f,%fi\n" + "1 :%f,%fi\n" + "2 :%f,%fi\n" + "3 :%f,%fi\n" + "4 :%f,%fi\n" + "5 :%f,%fi\n" + "6 :%f,%fi\n" + "7 :%f,%fi\n" + "8 :%f,%fi\n" + "9 :%f,%fi\n" + "10:%f,%fi\n" + "11:%f,%fi\n" + "12:%f,%fi\n", + T_CONV creal (c1), T_CONV cimag (c1), + T_CONV creal (c2), T_CONV cimag (c2), + T_CONV creal (c3), T_CONV cimag (c3), + T_CONV creal (c4), T_CONV cimag (c4), + T_CONV creal (c5), T_CONV cimag (c5), + T_CONV creal (c6), T_CONV cimag (c6), + T_CONV creal (c7), T_CONV cimag (c7), + T_CONV creal (c8), T_CONV cimag (c8), + T_CONV creal (c9), T_CONV cimag (c9), + T_CONV creal (c10), T_CONV cimag (c10), + T_CONV creal (c11), T_CONV cimag (c11), + T_CONV creal (c12), T_CONV cimag (c12), + T_CONV creal (c13), T_CONV cimag (c13)); + + return (c1+c2-c3-c4+c5+c6+c7-c8-c9-c10-c11+c12+c13); +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[13]; + void *values[13]; + _Complex T_C_TYPE ca[13]; + _Complex T_C_TYPE c, cc; + int i; + + for (i = 0; i < 13; i++) + { + args[i] = &T_FFI_TYPE; + values[i] = &ca[i]; + ca[i] = i + (-20 - i) * I; + } + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 13, &T_FFI_TYPE, args) == FFI_OK); + + ffi_call(&cif, FFI_FN(many), &c, values); + + cc = many(ca[0], ca[1], ca[2], ca[3], ca[4], ca[5], ca[6], ca[7], ca[8], + ca[9], ca[10], ca[11], ca[12]); + CHECK(creal (cc) == creal (c)); + CHECK(cimag (cc) == cimag (c)); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c new file mode 100644 index 0000000..3fd53c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c new file mode 100644 index 0000000..c43d21c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c new file mode 100644 index 0000000..dbab723 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/many_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex, with many arguments + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "many_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc new file mode 100644 index 0000000..8bf0c1f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex.inc @@ -0,0 +1,37 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c) +{ + printf ("%f,%fi\n", T_CONV creal (c), T_CONV cimag (c)); + return 2 * c; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c, rc, rc2; + T_C_TYPE cr, ci; + + args[0] = &T_FFI_TYPE; + values[0] = &c; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1, + &T_FFI_TYPE, args) == FFI_OK); + + for (cr = -127.0; cr < 127; cr++) + { + ci = 1000.0 - cr; + c = cr + ci * I; + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == 2 * c); + } + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc new file mode 100644 index 0000000..7cecc0f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1.inc @@ -0,0 +1,41 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +static _Complex T_C_TYPE return_c(_Complex T_C_TYPE c1, float fl2, unsigned int in3, _Complex T_C_TYPE c4) +{ + return c1 + fl2 + in3 + c4; +} +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c4, rc, rc2; + float fl2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &ffi_type_float; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &fl2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + fl2 = 128.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, fl2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c new file mode 100644 index 0000000..727410d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c new file mode 100644 index 0000000..a2aeada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c new file mode 100644 index 0000000..103504b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex1_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex1.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc new file mode 100644 index 0000000..265170b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2.inc @@ -0,0 +1,44 @@ +/* -*-c-*- */ +#include "ffitest.h" +#include + +_Complex T_C_TYPE +return_c(_Complex T_C_TYPE c1, _Complex T_C_TYPE c2, + unsigned int in3, _Complex T_C_TYPE c4) +{ + volatile _Complex T_C_TYPE r = c1 + c2 + in3 + c4; + return r; +} + +int main (void) +{ + ffi_cif cif; + ffi_type *args[MAX_ARGS]; + void *values[MAX_ARGS]; + _Complex T_C_TYPE c1, c2, c4, rc, rc2; + unsigned int in3; + args[0] = &T_FFI_TYPE; + args[1] = &T_FFI_TYPE; + args[2] = &ffi_type_uint; + args[3] = &T_FFI_TYPE; + values[0] = &c1; + values[1] = &c2; + values[2] = &in3; + values[3] = &c4; + + /* Initialize the cif */ + CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 4, + &T_FFI_TYPE, args) == FFI_OK); + c1 = 127.0 + 255.0 * I; + c2 = 128.0 + 256.0; + in3 = 255; + c4 = 512.7 + 1024.1 * I; + + ffi_call(&cif, FFI_FN(return_c), &rc, values); + rc2 = return_c(c1, c2, in3, c4); + printf ("%f,%fi vs %f,%fi\n", + T_CONV creal (rc), T_CONV cimag (rc), + T_CONV creal (rc2), T_CONV cimag (rc2)); + CHECK(rc == rc2); + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c new file mode 100644 index 0000000..ab9efac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c new file mode 100644 index 0000000..d7f22c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c new file mode 100644 index 0000000..3edea62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex2_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex2.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c new file mode 100644 index 0000000..e2497cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_double.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_double.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c new file mode 100644 index 0000000..a35528f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_float.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_float.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c new file mode 100644 index 0000000..142d7be --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.complex/return_complex_longdouble.c @@ -0,0 +1,10 @@ +/* Area: ffi_call + Purpose: Check return value complex. + Limitations: none. + PR: none. + Originator: . */ + +/* { dg-do run } */ + +#include "complex_defs_longdouble.inc" +#include "return_complex.inc" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c new file mode 100644 index 0000000..b00c404 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/aa-direct.c @@ -0,0 +1,34 @@ +/* { dg-do run } */ + +#include "static-chain.h" + +#if defined(__GNUC__) && !defined(__clang__) && defined(STATIC_CHAIN_REG) + +#include "ffitest.h" + +/* Blatent assumption here that the prologue doesn't clobber the + static chain for trivial functions. If this is not true, don't + define STATIC_CHAIN_REG, and we'll test what we can via other tests. */ +void *doit(void) +{ + register void *chain __asm__(STATIC_CHAIN_REG); + return chain; +} + +int main() +{ + ffi_cif cif; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(doit), &result, NULL, &result); + + CHECK(result == &result); + + return 0; +} + +#else /* UNSUPPORTED */ +int main() { return 0; } +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c new file mode 100644 index 0000000..7b34afc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/closure1.c @@ -0,0 +1,28 @@ +/* { dg-do run } */ + +#include "ffitest.h" + +void doit(ffi_cif *cif, void *rvalue, void **avalue, void *closure) +{ + (void)cif; + (void)avalue; + *(void **)rvalue = closure; +} + +typedef void * (*FN)(void); + +int main() +{ + ffi_cif cif; + ffi_go_closure cl; + void *result; + + CHECK(ffi_prep_cif(&cif, ABI_NUM, 0, &ffi_type_pointer, NULL) == FFI_OK); + CHECK(ffi_prep_go_closure(&cl, &cif, doit) == FFI_OK); + + ffi_call_go(&cif, FFI_FN(*(FN *)&cl), &result, NULL, &cl); + + CHECK(result == &cl); + + exit(0); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h new file mode 100644 index 0000000..d27d362 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/ffitest.h @@ -0,0 +1 @@ +#include "../libffi.call/ffitest.h" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/go.exp b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/go.exp new file mode 100644 index 0000000..100c5e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/go.exp @@ -0,0 +1,36 @@ +# Copyright (C) 2003, 2006, 2009, 2010, 2014 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; see the file COPYING3. If not see +# . + +dg-init +libffi-init + +global srcdir subdir + +set tlist [lsort [glob -nocomplain -- $srcdir/$subdir/*.{c,cc}]] + +if { [libffi_feature_test "#ifdef FFI_GO_CLOSURES"] } { + run-many-tests $tlist "" +} else { + foreach test $tlist { + unsupported "$test" + } +} + +dg-finish + +# Local Variables: +# tcl-indent-level:4 +# End: diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h new file mode 100644 index 0000000..3675b40 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/libffi/testsuite/libffi.go/static-chain.h @@ -0,0 +1,19 @@ +#ifdef __aarch64__ +# define STATIC_CHAIN_REG "x18" +#elif defined(__alpha__) +# define STATIC_CHAIN_REG "$1" +#elif defined(__arm__) +# define STATIC_CHAIN_REG "ip" +#elif defined(__sparc__) +# if defined(__arch64__) || defined(__sparcv9) +# define STATIC_CHAIN_REG "g5" +# else +# define STATIC_CHAIN_REG "g2" +# endif +#elif defined(__x86_64__) +# define STATIC_CHAIN_REG "r10" +#elif defined(__i386__) +# ifndef ABI_NUM +# define STATIC_CHAIN_REG "ecx" /* FFI_DEFAULT_ABI only */ +# endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi.h new file mode 100644 index 0000000..89b3e32 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2008, 2009, Wayne Meissner + * + * Copyright (c) 2008-2013, Ruby FFI project contributors + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Ruby FFI project nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RBFFI_RBFFI_H +#define RBFFI_RBFFI_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_PARAMETERS (32) + +extern VALUE rbffi_FFIModule; + +extern void rbffi_Type_Init(VALUE ffiModule); +extern void rbffi_Buffer_Init(VALUE ffiModule); +extern void rbffi_Invoker_Init(VALUE ffiModule); +extern void rbffi_Variadic_Init(VALUE ffiModule); +extern VALUE rbffi_AbstractMemoryClass, rbffi_InvokerClass; +extern int rbffi_type_size(VALUE type); +extern void rbffi_Thread_Init(VALUE moduleFFI); + +#ifdef __cplusplus +} +#endif + +#endif /* RBFFI_RBFFI_H */ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi_endian.h b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi_endian.h new file mode 100644 index 0000000..ebb8420 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ext/ffi_c/rbffi_endian.h @@ -0,0 +1,59 @@ +#ifndef JFFI_ENDIAN_H +#define JFFI_ENDIAN_H + +#ifndef _MSC_VER +#include +#endif + +#include + +#if defined(__linux__) || defined(__CYGWIN__) || defined(__GNU__) || defined(__GLIBC__) || defined(__HAIKU__) +# include +# if !defined(LITTLE_ENDIAN) && defined(__LITTLE_ENDIAN) +# define LITTLE_ENDIAN __LITTLE_ENDIAN +# endif +# if !defined(BIG_ENDIAN) && defined(__BIG_ENDIAN) +# define BIG_ENDIAN __BIG_ENDIAN +# endif +# if !defined(BYTE_ORDER) && defined(__BYTE_ORDER) +# define BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#ifdef __sun +# include +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(_BIG_ENDIAN) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(_LITTLE_ENDIAN) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_AIX) && !defined(BYTE_ORDER) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# if defined(__BIG_ENDIAN__) +# define BYTE_ORDER BIG_ENDIAN +# elif defined(__LITTLE_ENDIAN__) +# define BYTE_ORDER LITTLE_ENDIAN +# else +# error "Cannot determine endian-ness" +# endif +#endif + +#if defined(_WIN32) +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# define BYTE_ORDER LITTLE_ENDIAN +#endif + +#if !defined(BYTE_ORDER) || !defined(LITTLE_ENDIAN) || !defined(BIG_ENDIAN) +# error "Cannot determine the endian-ness of this platform" +#endif + +#endif /* JFFI_ENDIAN_H */ + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ffi.gemspec b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ffi.gemspec new file mode 100644 index 0000000..eaae515 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/ffi.gemspec @@ -0,0 +1,42 @@ +require File.expand_path("../lib/#{File.basename(__FILE__, '.gemspec')}/version", __FILE__) + +Gem::Specification.new do |s| + s.name = 'ffi' + s.version = FFI::VERSION + s.author = 'Wayne Meissner' + s.email = 'wmeissner@gmail.com' + s.homepage = 'https://github.com/ffi/ffi/wiki' + s.summary = 'Ruby FFI' + s.description = 'Ruby FFI library' + if s.respond_to?(:metadata) + s.metadata['bug_tracker_uri'] = 'https://github.com/ffi/ffi/issues' + s.metadata['changelog_uri'] = 'https://github.com/ffi/ffi/blob/master/CHANGELOG.md' + s.metadata['documentation_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['wiki_uri'] = 'https://github.com/ffi/ffi/wiki' + s.metadata['source_code_uri'] = 'https://github.com/ffi/ffi/' + s.metadata['mailing_list_uri'] = 'http://groups.google.com/group/ruby-ffi' + end + s.files = `git ls-files -z`.split("\x0").reject do |f| + f =~ /^(\.|bench|gen|libtest|nbproject|spec)/ + end + + # Add libffi git files + lfs = `git --git-dir ext/ffi_c/libffi/.git ls-files -z`.split("\x0") + # Add autoconf generated files of libffi + lfs += %w[ configure config.guess config.sub install-sh ltmain.sh missing fficonfig.h.in ] + # Add automake generated files of libffi + lfs += `git --git-dir ext/ffi_c/libffi/.git ls-files -z *.am */*.am`.gsub(".am\0", ".in\0").split("\x0") + s.files += lfs.map do |f| + File.join("ext/ffi_c/libffi", f) + end + + s.extensions << 'ext/ffi_c/extconf.rb' + s.rdoc_options = %w[--exclude=ext/ffi_c/.*\.o$ --exclude=ffi_c\.(bundle|so)$] + s.license = 'BSD-3-Clause' + s.require_paths << 'ext/ffi_c' + s.required_ruby_version = '>= 2.3' + s.add_development_dependency 'rake', '~> 13.0' + s.add_development_dependency 'rake-compiler', '~> 1.0' + s.add_development_dependency 'rake-compiler-dock', '~> 1.0' + s.add_development_dependency 'rspec', '~> 2.14.1' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi.rb new file mode 100644 index 0000000..3fb20a8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi.rb @@ -0,0 +1,27 @@ +if RUBY_ENGINE == 'ruby' + begin + require RUBY_VERSION.split('.')[0, 2].join('.') + '/ffi_c' + rescue Exception + require 'ffi_c' + end + + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'jruby' && (RUBY_ENGINE_VERSION.split('.').map(&:to_i) <=> [9, 2, 20]) >= 0 + JRuby::Util.load_ext("org.jruby.ext.ffi.FFIService") + require 'ffi/ffi' + +elsif RUBY_ENGINE == 'truffleruby' && (RUBY_ENGINE_VERSION.split('.').map(&:to_i) <=> [20, 1, 0]) >= 0 + require 'truffleruby/ffi_backend' + require 'ffi/ffi' + +else + # Remove the ffi gem dir from the load path, then reload the internal ffi implementation + $LOAD_PATH.delete(File.dirname(__FILE__)) + $LOAD_PATH.delete(File.join(File.dirname(__FILE__), 'ffi')) + unless $LOADED_FEATURES.nil? + $LOADED_FEATURES.delete(__FILE__) + $LOADED_FEATURES.delete('ffi.rb') + end + require 'ffi.rb' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/abstract_memory.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/abstract_memory.rb new file mode 100644 index 0000000..e0aa221 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/abstract_memory.rb @@ -0,0 +1,44 @@ +# +# Copyright (C) 2020 Lars Kanis +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + + +module FFI + class AbstractMemory + LONG_MAX = FFI::Pointer.new(1).size + private_constant :LONG_MAX + + # Return +true+ if +self+ has a size limit. + # + # @return [Boolean] + def size_limit? + size != LONG_MAX + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/autopointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/autopointer.rb new file mode 100644 index 0000000..679d7e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/autopointer.rb @@ -0,0 +1,203 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + class AutoPointer < Pointer + extend DataConverter + + # @overload initialize(pointer, method) + # @param pointer [Pointer] + # @param method [Method] + # @return [self] + # The passed Method will be invoked at GC time. + # @overload initialize(pointer, proc) + # @param pointer [Pointer] + # @return [self] + # The passed Proc will be invoked at GC time (SEE WARNING BELOW!) + # @note WARNING: passing a proc _may_ cause your pointer to never be + # GC'd, unless you're careful to avoid trapping a reference to the + # pointer in the proc. See the test specs for examples. + # @overload initialize(pointer) { |p| ... } + # @param pointer [Pointer] + # @yieldparam [Pointer] p +pointer+ passed to the block + # @return [self] + # The passed block will be invoked at GC time. + # @note + # WARNING: passing a block will cause your pointer to never be GC'd. + # This is bad. + # @overload initialize(pointer) + # @param pointer [Pointer] + # @return [self] + # The pointer's release() class method will be invoked at GC time. + # + # @note The safest, and therefore preferred, calling + # idiom is to pass a Method as the second parameter. Example usage: + # + # class PointerHelper + # def self.release(pointer) + # ... + # end + # end + # + # p = AutoPointer.new(other_pointer, PointerHelper.method(:release)) + # + # The above code will cause PointerHelper#release to be invoked at GC time. + # + # @note + # The last calling idiom (only one parameter) is generally only + # going to be useful if you subclass {AutoPointer}, and override + # #release, which by default does nothing. + def initialize(ptr, proc=nil, &block) + super(ptr.type_size, ptr) + raise TypeError, "Invalid pointer" if ptr.nil? || !ptr.kind_of?(Pointer) \ + || ptr.kind_of?(MemoryPointer) || ptr.kind_of?(AutoPointer) + + @releaser = if proc + if not proc.respond_to?(:call) + raise RuntimeError.new("proc must be callable") + end + CallableReleaser.new(ptr, proc) + + else + if not self.class.respond_to?(:release) + raise RuntimeError.new("no release method defined") + end + DefaultReleaser.new(ptr, self.class) + end + + ObjectSpace.define_finalizer(self, @releaser) + self + end + + # @return [nil] + # Free the pointer. + def free + @releaser.free + end + + # @param [Boolean] autorelease + # @return [Boolean] +autorelease+ + # Set +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease=(autorelease) + @releaser.autorelease=(autorelease) + end + + # @return [Boolean] +autorelease+ + # Get +autorelease+ property. See {Pointer Autorelease section at Pointer}. + def autorelease? + @releaser.autorelease + end + + # @abstract Base class for {AutoPointer}'s releasers. + # + # All subclasses of Releaser should define a +#release(ptr)+ method. + # A releaser is an object in charge of release an {AutoPointer}. + class Releaser + attr_accessor :autorelease + + # @param [Pointer] ptr + # @param [#call] proc + # @return [nil] + # A new instance of Releaser. + def initialize(ptr, proc) + @ptr = ptr + @proc = proc + @autorelease = true + end + + # @return [nil] + # Free pointer. + def free + if @ptr + release(@ptr) + @autorelease = false + @ptr = nil + @proc = nil + end + end + + # @param args + # Release pointer if +autorelease+ is set. + def call(*args) + release(@ptr) if @autorelease && @ptr + end + end + + # DefaultReleaser is a {Releaser} used when an {AutoPointer} is defined + # without Proc or Method. In this case, the pointer to release must be of + # a class derived from AutoPointer with a {release} class method. + class DefaultReleaser < Releaser + # @param [Pointer] ptr + # @return [nil] + # Release +ptr+ using the {release} class method of its class. + def release(ptr) + @proc.release(ptr) + end + end + + # CallableReleaser is a {Releaser} used when an {AutoPointer} is defined with a + # Proc or a Method. + class CallableReleaser < Releaser + # Release +ptr+ by using Proc or Method defined at +ptr+ + # {AutoPointer#initialize initialization}. + # + # @param [Pointer] ptr + # @return [nil] + def release(ptr) + @proc.call(ptr) + end + end + + # Return native type of AutoPointer. + # + # Override {DataConverter#native_type}. + # @return [Type::POINTER] + # @raise {RuntimeError} if class does not implement a +#release+ method + def self.native_type + if not self.respond_to?(:release) + raise RuntimeError.new("no release method defined for #{self.inspect}") + end + Type::POINTER + end + + # Create a new AutoPointer. + # + # Override {DataConverter#from_native}. + # @overload self.from_native(ptr, ctx) + # @param [Pointer] ptr + # @param ctx not used. Please set +nil+. + # @return [AutoPointer] + def self.from_native(val, ctx) + self.new(val) + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/buffer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/buffer.rb new file mode 100644 index 0000000..449e45b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/buffer.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/buffer' in user code +# diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/callback.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/callback.rb new file mode 100644 index 0000000..32d52f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/callback.rb @@ -0,0 +1,4 @@ +# +# All the code from this file is now implemented in C. This file remains +# to satisfy any leftover require 'ffi/callback' in user code +# diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/data_converter.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/data_converter.rb new file mode 100644 index 0000000..1527588 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/data_converter.rb @@ -0,0 +1,67 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This module is used to extend somes classes and give then a common API. + # + # Most of methods defined here must be overriden. + module DataConverter + # Get native type. + # + # @overload native_type(type) + # @param [String, Symbol, Type] type + # @return [Type] + # Get native type from +type+. + # + # @overload native_type + # @raise {NotImplementedError} This method must be overriden. + def native_type(type = nil) + if type + @native_type = FFI.find_type(type) + else + native_type = @native_type + unless native_type + raise NotImplementedError, 'native_type method not overridden and no native_type set' + end + native_type + end + end + + # Convert to a native type. + def to_native(value, ctx) + value + end + + # Convert from a native type. + def from_native(value, ctx) + value + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/enum.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/enum.rb new file mode 100644 index 0000000..8fcb498 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/enum.rb @@ -0,0 +1,296 @@ +# +# Copyright (C) 2009, 2010 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # An instance of this class permits to manage {Enum}s. In fact, Enums is a collection of {Enum}s. + class Enums + + # @return [nil] + def initialize + @all_enums = Array.new + @tagged_enums = Hash.new + @symbol_map = Hash.new + end + + # @param [Enum] enum + # Add an {Enum} to the collection. + def <<(enum) + @all_enums << enum + @tagged_enums[enum.tag] = enum unless enum.tag.nil? + @symbol_map.merge!(enum.symbol_map) + end + + # @param query enum tag or part of an enum name + # @return [Enum] + # Find a {Enum} in collection. + def find(query) + if @tagged_enums.has_key?(query) + @tagged_enums[query] + else + @all_enums.detect { |enum| enum.symbols.include?(query) } + end + end + + # @param symbol a symbol to find in merge symbol maps of all enums. + # @return a symbol + def __map_symbol(symbol) + @symbol_map[symbol] + end + + end + + # Represents a C enum. + # + # For a C enum: + # enum fruits { + # apple, + # banana, + # orange, + # pineapple + # }; + # are defined this vocabulary: + # * a _symbol_ is a word from the enumeration (ie. _apple_, by example); + # * a _value_ is the value of a symbol in the enumeration (by example, apple has value _0_ and banana _1_). + class Enum + include DataConverter + + attr_reader :tag + attr_reader :native_type + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info + # @param [nil, Symbol] tag enum tag + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Enum + # @param [nil, Enumerable] info symbols and values for new Enum + # @param [nil, Symbol] tag name of new Enum + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate enum key" if @kv_map.has_key?(i) + @kv_map[i] = value + last_cst = i + value += 1 + when Integer + @kv_map[last_cst] = i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # @return [Array] enum symbol names + def symbols + @kv_map.keys + end + + # Get a symbol or a value from the enum. + # @overload [](query) + # Get enum value from symbol. + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get enum symbol from value. + # @param [Integer] query + # @return [Symbol] + def [](query) + case query + when Symbol + @kv_map[query] + when Integer + @vk_map[query] + end + end + alias find [] + + # Get the symbol map. + # @return [Hash] + def symbol_map + @kv_map + end + + alias to_h symbol_map + alias to_hash symbol_map + + # @param [Symbol, Integer, #to_int] val + # @param ctx unused + # @return [Integer] value of a enum symbol + def to_native(val, ctx) + @kv_map[val] || if val.is_a?(Integer) + val + elsif val.respond_to?(:to_int) + val.to_int + else + raise ArgumentError, "invalid enum value, #{val.inspect}" + end + end + + # @param val + # @return symbol name if it exists for +val+. + def from_native(val, ctx) + @vk_map[val] || val + end + end + + # Represents a C enum whose values are power of 2 + # + # @example + # enum { + # red = (1<<0), + # green = (1<<1), + # blue = (1<<2) + # } + # + # Contrary to classical enums, bitmask values are usually combined + # when used. + class Bitmask < Enum + + # @overload initialize(info, tag=nil) + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + # @overload initialize(native_type, info, tag=nil) + # @param [FFI::Type] native_type Native type for new Bitmask + # @param [nil, Enumerable] info symbols and bit rank for new Bitmask + # @param [nil, Symbol] tag name of new Bitmask + def initialize(*args) + @native_type = args.first.kind_of?(FFI::Type) ? args.shift : Type::INT + info, @tag = *args + @kv_map = Hash.new + unless info.nil? + last_cst = nil + value = 0 + info.each do |i| + case i + when Symbol + raise ArgumentError, "duplicate bitmask key" if @kv_map.has_key?(i) + @kv_map[i] = 1 << value + last_cst = i + value += 1 + when Integer + raise ArgumentError, "bitmask index should be positive" if i<0 + @kv_map[last_cst] = 1 << i + value = i+1 + end + end + end + @vk_map = @kv_map.invert + end + + # Get a symbol list or a value from the bitmask + # @overload [](*query) + # Get bitmask value from symbol list + # @param [Symbol] query + # @return [Integer] + # @overload [](query) + # Get bitmaks value from symbol array + # @param [Array] query + # @return [Integer] + # @overload [](*query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Integer] query + # @return [Array] + # @overload [](query) + # Get a list of bitmask symbols corresponding to + # the or reduction of a list of integer + # @param [Array] query + # @return [Array] + def [](*query) + flat_query = query.flatten + raise ArgumentError, "query should be homogeneous, #{query.inspect}" unless flat_query.all? { |o| o.is_a?(Symbol) } || flat_query.all? { |o| o.is_a?(Integer) || o.respond_to?(:to_int) } + case flat_query[0] + when Symbol + flat_query.inject(0) do |val, o| + v = @kv_map[o] + if v then val |= v else val end + end + when Integer, ->(o) { o.respond_to?(:to_int) } + val = flat_query.inject(0) { |mask, o| mask |= o.to_int } + @kv_map.select { |_, v| v & val != 0 }.keys + end + end + + # Get the native value of a bitmask + # @overload to_native(query, ctx) + # @param [Symbol, Integer, #to_int] query + # @param ctx unused + # @return [Integer] value of a bitmask + # @overload to_native(query, ctx) + # @param [Array] query + # @param ctx unused + # @return [Integer] value of a bitmask + def to_native(query, ctx) + return 0 if query.nil? + flat_query = [query].flatten + flat_query.inject(0) do |val, o| + case o + when Symbol + v = @kv_map[o] + raise ArgumentError, "invalid bitmask value, #{o.inspect}" unless v + val |= v + when Integer + val |= o + when ->(obj) { obj.respond_to?(:to_int) } + val |= o.to_int + else + raise ArgumentError, "invalid bitmask value, #{o.inspect}" + end + end + end + + # @param [Integer] val + # @param ctx unused + # @return [Array] list of symbol names corresponding to val, plus an optional remainder if some bits don't match any constant + def from_native(val, ctx) + list = @kv_map.select { |_, v| v & val != 0 }.keys + # If there are unmatch flags, + # return them in an integer, + # else information can be lost. + # Similar to Enum behavior. + remainder = val ^ list.inject(0) do |tmp, o| + v = @kv_map[o] + if v then tmp |= v else tmp end + end + list.push remainder unless remainder == 0 + return list + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/errno.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/errno.rb new file mode 100644 index 0000000..de82d89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/errno.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # @return (see FFI::LastError.error) + # @see FFI::LastError.error + def self.errno + FFI::LastError.error + end + # @param error (see FFI::LastError.error=) + # @return (see FFI::LastError.error=) + # @see FFI::LastError.error= + def self.errno=(error) + FFI::LastError.error = error + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/ffi.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/ffi.rb new file mode 100644 index 0000000..dfffa8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/ffi.rb @@ -0,0 +1,47 @@ +# +# Copyright (C) 2008-2010 JRuby project +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'ffi/platform' +require 'ffi/data_converter' +require 'ffi/types' +require 'ffi/library' +require 'ffi/errno' +require 'ffi/abstract_memory' +require 'ffi/pointer' +require 'ffi/memorypointer' +require 'ffi/struct' +require 'ffi/union' +require 'ffi/managedstruct' +require 'ffi/callback' +require 'ffi/io' +require 'ffi/autopointer' +require 'ffi/variadic' +require 'ffi/enum' +require 'ffi/version' diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/io.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/io.rb new file mode 100644 index 0000000..e1bb955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/io.rb @@ -0,0 +1,62 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + + # This module implements a couple of class methods to play with IO. + module IO + # @param [Integer] fd file decriptor + # @param [String] mode mode string + # @return [::IO] + # Synonym for IO::for_fd. + def self.for_fd(fd, mode = "r") + ::IO.for_fd(fd, mode) + end + + # @param [#read] io io to read from + # @param [AbstractMemory] buf destination for data read from +io+ + # @param [nil, Numeric] len maximul number of bytes to read from +io+. If +nil+, + # read until end of file. + # @return [Numeric] length really read, in bytes + # + # A version of IO#read that reads data from an IO and put then into a native buffer. + # + # This will be optimized at some future time to eliminate the double copy. + # + def self.native_read(io, buf, len) + tmp = io.read(len) + return -1 unless tmp + buf.put_bytes(0, tmp) + tmp.length + end + + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/library.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/library.rb new file mode 100644 index 0000000..43b2bfe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/library.rb @@ -0,0 +1,592 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + CURRENT_PROCESS = USE_THIS_PROCESS_AS_LIBRARY = Object.new + + # @param [#to_s] lib library name + # @return [String] library name formatted for current platform + # Transform a generic library name to a platform library name + # @example + # # Linux + # FFI.map_library_name 'c' # -> "libc.so.6" + # FFI.map_library_name 'jpeg' # -> "libjpeg.so" + # # Windows + # FFI.map_library_name 'c' # -> "msvcrt.dll" + # FFI.map_library_name 'jpeg' # -> "jpeg.dll" + def self.map_library_name(lib) + # Mangle the library name to reflect the native library naming conventions + lib = Library::LIBC if lib == 'c' + + if lib && File.basename(lib) == lib + lib = Platform::LIBPREFIX + lib unless lib =~ /^#{Platform::LIBPREFIX}/ + r = Platform::IS_WINDOWS || Platform::IS_MAC ? "\\.#{Platform::LIBSUFFIX}$" : "\\.so($|\\.[1234567890]+)" + lib += ".#{Platform::LIBSUFFIX}" unless lib =~ /#{r}/ + end + + lib + end + + # Exception raised when a function is not found in libraries + class NotFoundError < LoadError + def initialize(function, *libraries) + super("Function '#{function}' not found in [#{libraries[0].nil? ? 'current process' : libraries.join(", ")}]") + end + end + + # This module is the base to use native functions. + # + # A basic usage may be: + # require 'ffi' + # + # module Hello + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function 'puts', [ :string ], :int + # end + # + # Hello.puts("Hello, World") + # + # + module Library + CURRENT_PROCESS = FFI::CURRENT_PROCESS + LIBC = FFI::Platform::LIBC + + # @param mod extended object + # @return [nil] + # @raise {RuntimeError} if +mod+ is not a Module + # Test if extended object is a Module. If not, raise RuntimeError. + def self.extended(mod) + raise RuntimeError.new("must only be extended by module") unless mod.kind_of?(::Module) + end + + + # @param [Array] names names of libraries to load + # @return [Array] + # @raise {LoadError} if a library cannot be opened + # Load native libraries. + def ffi_lib(*names) + raise LoadError.new("library names list must not be empty") if names.empty? + + lib_flags = defined?(@ffi_lib_flags) ? @ffi_lib_flags : FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL + ffi_libs = names.map do |name| + + if name == FFI::CURRENT_PROCESS + FFI::DynamicLibrary.open(nil, FFI::DynamicLibrary::RTLD_LAZY | FFI::DynamicLibrary::RTLD_LOCAL) + + else + libnames = (name.is_a?(::Array) ? name : [ name ]).map(&:to_s).map { |n| [ n, FFI.map_library_name(n) ].uniq }.flatten.compact + lib = nil + errors = {} + + libnames.each do |libname| + begin + orig = libname + lib = FFI::DynamicLibrary.open(libname, lib_flags) + break if lib + + rescue Exception => ex + ldscript = false + if ex.message =~ /(([^ \t()])+\.so([^ \t:()])*):([ \t])*(invalid ELF header|file too short|invalid file format)/ + if File.binread($1) =~ /(?:GROUP|INPUT) *\( *([^ \)]+)/ + libname = $1 + ldscript = true + end + end + + if ldscript + retry + else + # TODO better library lookup logic + unless libname.start_with?("/") || FFI::Platform.windows? + path = ['/usr/lib/','/usr/local/lib/','/opt/local/lib/', '/opt/homebrew/lib/'].find do |pth| + File.exist?(pth + libname) + end + if path + libname = path + libname + retry + end + end + + libr = (orig == libname ? orig : "#{orig} #{libname}") + errors[libr] = ex + end + end + end + + if lib.nil? + raise LoadError.new(errors.values.join(".\n")) + end + + # return the found lib + lib + end + end + + @ffi_libs = ffi_libs + end + + # Set the calling convention for {#attach_function} and {#callback} + # + # @see http://en.wikipedia.org/wiki/Stdcall#stdcall + # @note +:stdcall+ is typically used for attaching Windows API functions + # + # @param [Symbol] convention one of +:default+, +:stdcall+ + # @return [Symbol] the new calling convention + def ffi_convention(convention = nil) + @ffi_convention ||= :default + @ffi_convention = convention if convention + @ffi_convention + end + + # @see #ffi_lib + # @return [Array] array of currently loaded FFI libraries + # @raise [LoadError] if no libraries have been loaded (using {#ffi_lib}) + # Get FFI libraries loaded using {#ffi_lib}. + def ffi_libraries + raise LoadError.new("no library specified") if !defined?(@ffi_libs) || @ffi_libs.empty? + @ffi_libs + end + + # Flags used in {#ffi_lib}. + # + # This map allows you to supply symbols to {#ffi_lib_flags} instead of + # the actual constants. + FlagsMap = { + :global => DynamicLibrary::RTLD_GLOBAL, + :local => DynamicLibrary::RTLD_LOCAL, + :lazy => DynamicLibrary::RTLD_LAZY, + :now => DynamicLibrary::RTLD_NOW + } + + # Sets library flags for {#ffi_lib}. + # + # @example + # ffi_lib_flags(:lazy, :local) # => 5 + # + # @param [Symbol, …] flags (see {FlagsMap}) + # @return [Fixnum] the new value + def ffi_lib_flags(*flags) + @ffi_lib_flags = flags.inject(0) { |result, f| result | FlagsMap[f] } + end + + + ## + # @overload attach_function(func, args, returns, options = {}) + # @example attach function without an explicit name + # module Foo + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :malloc, [:size_t], :pointer + # end + # # now callable via Foo.malloc + # @overload attach_function(name, func, args, returns, options = {}) + # @example attach function with an explicit name + # module Bar + # extend FFI::Library + # ffi_lib FFI::Library::LIBC + # attach_function :c_malloc, :malloc, [:size_t], :pointer + # end + # # now callable via Bar.c_malloc + # + # Attach C function +func+ to this module. + # + # + # @param [#to_s] name name of ruby method to attach as + # @param [#to_s] func name of C function to attach + # @param [Array] args an array of types + # @param [Symbol] returns type of return value + # @option options [Boolean] :blocking (@blocking) set to true if the C function is a blocking call + # @option options [Symbol] :convention (:default) calling convention (see {#ffi_convention}) + # @option options [FFI::Enums] :enums + # @option options [Hash] :type_map + # + # @return [FFI::VariadicInvoker] + # + # @raise [FFI::NotFoundError] if +func+ cannot be found in the attached libraries (see {#ffi_lib}) + def attach_function(name, func, args, returns = nil, options = nil) + mname, a2, a3, a4, a5 = name, func, args, returns, options + cname, arg_types, ret_type, opts = (a4 && (a2.is_a?(String) || a2.is_a?(Symbol))) ? [ a2, a3, a4, a5 ] : [ mname.to_s, a2, a3, a4 ] + + # Convert :foo to the native type + arg_types = arg_types.map { |e| find_type(e) } + options = { + :convention => ffi_convention, + :type_map => defined?(@ffi_typedefs) ? @ffi_typedefs : nil, + :blocking => defined?(@blocking) && @blocking, + :enums => defined?(@ffi_enums) ? @ffi_enums : nil, + } + + @blocking = false + options.merge!(opts) if opts && opts.is_a?(Hash) + + # Try to locate the function in any of the libraries + invokers = [] + ffi_libraries.each do |lib| + if invokers.empty? + begin + function = nil + function_names(cname, arg_types).find do |fname| + function = lib.find_function(fname) + end + raise LoadError unless function + + invokers << if arg_types.length > 0 && arg_types[arg_types.length - 1] == FFI::NativeType::VARARGS + VariadicInvoker.new(function, arg_types, find_type(ret_type), options) + + else + Function.new(find_type(ret_type), arg_types, function, options) + end + + rescue LoadError + end + end + end + invoker = invokers.compact.shift + raise FFI::NotFoundError.new(cname.to_s, ffi_libraries.map { |lib| lib.name }) unless invoker + + invoker.attach(self, mname.to_s) + invoker + end + + # @param [#to_s] name function name + # @param [Array] arg_types function's argument types + # @return [Array] + # This function returns a list of possible names to lookup. + # @note Function names on windows may be decorated if they are using stdcall. See + # * http://en.wikipedia.org/wiki/Name_mangling#C_name_decoration_in_Microsoft_Windows + # * http://msdn.microsoft.com/en-us/library/zxk0tw93%28v=VS.100%29.aspx + # * http://en.wikibooks.org/wiki/X86_Disassembly/Calling_Conventions#STDCALL + # Note that decorated names can be overridden via def files. Also note that the + # windows api, although using, doesn't have decorated names. + def function_names(name, arg_types) + result = [name.to_s] + if ffi_convention == :stdcall + # Get the size of each parameter + size = arg_types.inject(0) do |mem, arg| + size = arg.size + # The size must be a multiple of 4 + size += (4 - size) % 4 + mem + size + end + + result << "_#{name.to_s}@#{size}" # win32 + result << "#{name.to_s}@#{size}" # win64 + end + result + end + + # @overload attach_variable(mname, cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [#to_s] cname name of C variable to attach + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :c_myvar, :myvar, :long + # end + # # now callable via Bar.c_myvar + # @overload attach_variable(cname, type) + # @param [#to_s] mname name of ruby method to attach as + # @param [DataConverter, Struct, Symbol, Type] type C variable's type + # @example + # module Bar + # extend FFI::Library + # ffi_lib 'my_lib' + # attach_variable :myvar, :long + # end + # # now callable via Bar.myvar + # @return [DynamicLibrary::Symbol] + # @raise {FFI::NotFoundError} if +cname+ cannot be found in libraries + # + # Attach C variable +cname+ to this module. + def attach_variable(mname, a1, a2 = nil) + cname, type = a2 ? [ a1, a2 ] : [ mname.to_s, a1 ] + address = nil + ffi_libraries.each do |lib| + begin + address = lib.find_variable(cname.to_s) + break unless address.nil? + rescue LoadError + end + end + + raise FFI::NotFoundError.new(cname, ffi_libraries) if address.nil? || address.null? + if type.is_a?(Class) && type < FFI::Struct + # If it is a global struct, just attach directly to the pointer + s = s = type.new(address) # Assigning twice to suppress unused variable warning + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname} + end + code + + else + sc = Class.new(FFI::Struct) + sc.layout :gvar, find_type(type) + s = sc.new(address) + # + # Attach to this module as mname/mname= + # + self.module_eval <<-code, __FILE__, __LINE__ + @@ffi_gvar_#{mname} = s + def self.#{mname} + @@ffi_gvar_#{mname}[:gvar] + end + def self.#{mname}=(value) + @@ffi_gvar_#{mname}[:gvar] = value + end + code + + end + + address + end + + + # @overload callback(name, params, ret) + # @param name callback name to add to type map + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @overload callback(params, ret) + # @param [Array] params array of parameters' types + # @param [DataConverter, Struct, Symbol, Type] ret callback return type + # @return [FFI::CallbackInfo] + def callback(*args) + raise ArgumentError, "wrong number of arguments" if args.length < 2 || args.length > 3 + name, params, ret = if args.length == 3 + args + else + [ nil, args[0], args[1] ] + end + + native_params = params.map { |e| find_type(e) } + raise ArgumentError, "callbacks cannot have variadic parameters" if native_params.include?(FFI::Type::VARARGS) + options = Hash.new + options[:convention] = ffi_convention + options[:enums] = @ffi_enums if defined?(@ffi_enums) + ret_type = find_type(ret) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + cb = FFI::CallbackInfo.new(ret_type, native_params, options) + + # Add to the symbol -> type map (unless there was no name) + unless name.nil? + typedef cb, name + end + + cb + end + + # Register or get an already registered type definition. + # + # To register a new type definition, +old+ should be a {FFI::Type}. +add+ + # is in this case the type definition. + # + # If +old+ is a {DataConverter}, a {Type::Mapped} is returned. + # + # If +old+ is +:enum+ + # * and +add+ is an +Array+, a call to {#enum} is made with +add+ as single parameter; + # * in others cases, +info+ is used to create a named enum. + # + # If +old+ is a key for type map, #typedef get +old+ type definition. + # + # @param [DataConverter, Symbol, Type] old + # @param [Symbol] add + # @param [Symbol] info + # @return [FFI::Enum, FFI::Type] + def typedef(old, add, info=nil) + @ffi_typedefs = Hash.new unless defined?(@ffi_typedefs) + + @ffi_typedefs[add] = if old.kind_of?(FFI::Type) + old + + elsif @ffi_typedefs.has_key?(old) + @ffi_typedefs[old] + + elsif old.is_a?(DataConverter) + FFI::Type::Mapped.new(old) + + elsif old == :enum + if add.kind_of?(Array) + self.enum(add) + else + self.enum(info, add) + end + + else + FFI.find_type(old) + end + end + + private + # Generic enum builder + # @param [Class] klass can be one of FFI::Enum or FFI::Bitmask + # @param args (see #enum or #bitmask) + def generic_enum(klass, *args) + native_type = args.first.kind_of?(FFI::Type) ? args.shift : nil + name, values = if args[0].kind_of?(Symbol) && args[1].kind_of?(Array) + [ args[0], args[1] ] + elsif args[0].kind_of?(Array) + [ nil, args[0] ] + else + [ nil, args ] + end + @ffi_enums = FFI::Enums.new unless defined?(@ffi_enums) + @ffi_enums << (e = native_type ? klass.new(native_type, values, name) : klass.new(values, name)) + + # If called with a name, add a typedef alias + typedef(e, name) if name + e + end + + public + # @overload enum(name, values) + # Create a named enum. + # @example + # enum :foo, [:zero, :one, :two] # named enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(*args) + # Create an unnamed enum. + # @example + # enum :zero, :one, :two # unnamed enum + # @param args values for enum + # @overload enum(values) + # Create an unnamed enum. + # @example + # enum [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [Array] values values for enum + # @overload enum(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :foo, [:zero, :one, :two] # named enum + # @param [FFI::Type] native_type native type for new enum + # @param [Symbol] name name for new enum + # @param [Array] values values for enum + # @overload enum(native_type, *args) + # Create an unnamed enum and specify the native type. + # @example + # enum FFI::Type::UINT64, :zero, :one, :two # unnamed enum + # @param [FFI::Type] native_type native type for new enum + # @param args values for enum + # @overload enum(native_type, values) + # Create an unnamed enum and specify the native type. + # @example + # enum Type::UINT64, [:zero, :one, :two] # unnamed enum, equivalent to above example + # @param [FFI::Type] native_type native type for new enum + # @param [Array] values values for enum + # @return [FFI::Enum] + # Create a new {FFI::Enum}. + def enum(*args) + generic_enum(FFI::Enum, *args) + end + + # @overload bitmask(name, values) + # Create a named bitmask + # @example + # bitmask :foo, [:red, :green, :blue] # bits 0,1,2 are used + # bitmask :foo, [:red, :green, 5, :blue] # bits 0,5,6 are used + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(*args) + # Create an unamed bitmask + # @example + # bm = bitmask :red, :green, :blue # bits 0,1,2 are used + # bm = bitmask :red, :green, 5, blue # bits 0,5,6 are used + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(values) + # Create an unamed bitmask + # @example + # bm = bitmask [:red, :green, :blue] # bits 0,1,2 are used + # bm = bitmask [:red, :green, 5, blue] # bits 0,5,6 are used + # @param [Array] values for new bitmask + # @overload bitmask(native_type, name, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, :foo, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol] name for new bitmask + # @param [Array] values for new bitmask + # @overload bitmask(native_type, *args) + # @example + # bitmask FFI::Type::UINT64, :red, :green, :blue + # @param [FFI::Type] native_type native type for new bitmask + # @param [Symbol, Integer] args values for new bitmask + # @overload bitmask(native_type, values) + # Create a named enum and specify the native type. + # @example + # bitmask FFI::Type::UINT64, [:red, :green, :blue] + # @param [FFI::Type] native_type native type for new bitmask + # @param [Array] values for new bitmask + # @return [FFI::Bitmask] + # Create a new FFI::Bitmask + def bitmask(*args) + generic_enum(FFI::Bitmask, *args) + end + + # @param name + # @return [FFI::Enum] + # Find an enum by name. + def enum_type(name) + @ffi_enums.find(name) if defined?(@ffi_enums) + end + + # @param symbol + # @return [FFI::Enum] + # Find an enum by a symbol it contains. + def enum_value(symbol) + @ffi_enums.__map_symbol(symbol) + end + + # @param [DataConverter, Type, Struct, Symbol] t type to find + # @return [Type] + # Find a type definition. + def find_type(t) + if t.kind_of?(Type) + t + + elsif defined?(@ffi_typedefs) && @ffi_typedefs.has_key?(t) + @ffi_typedefs[t] + + elsif t.is_a?(Class) && t < Struct + Type::POINTER + + elsif t.is_a?(DataConverter) + # Add a typedef so next time the converter is used, it hits the cache + typedef Type::Mapped.new(t), t + + end || FFI.find_type(t) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/managedstruct.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/managedstruct.rb new file mode 100644 index 0000000..b5ec8a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/managedstruct.rb @@ -0,0 +1,84 @@ +# Copyright (C) 2008 Mike Dalessio +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module FFI + # + # FFI::ManagedStruct allows custom garbage-collection of your FFI::Structs. + # + # The typical use case would be when interacting with a library + # that has a nontrivial memory management design, such as a linked + # list or a binary tree. + # + # When the {Struct} instance is garbage collected, FFI::ManagedStruct will + # invoke the class's release() method during object finalization. + # + # @example Example usage: + # module MyLibrary + # ffi_lib "libmylibrary" + # attach_function :new_dlist, [], :pointer + # attach_function :destroy_dlist, [:pointer], :void + # end + # + # class DoublyLinkedList < FFI::ManagedStruct + # @@@ + # struct do |s| + # s.name 'struct dlist' + # s.include 'dlist.h' + # s.field :head, :pointer + # s.field :tail, :pointer + # end + # @@@ + # + # def self.release ptr + # MyLibrary.destroy_dlist(ptr) + # end + # end + # + # begin + # ptr = DoublyLinkedList.new(MyLibrary.new_dlist) + # # do something with the list + # end + # # struct is out of scope, and will be GC'd using DoublyLinkedList#release + # + # + class ManagedStruct < FFI::Struct + + # @overload initialize(pointer) + # @param [Pointer] pointer + # Create a new ManagedStruct which will invoke the class method #release on + # @overload initialize + # A new instance of FFI::ManagedStruct. + def initialize(pointer=nil) + raise NoMethodError, "release() not implemented for class #{self}" unless self.class.respond_to? :release + raise ArgumentError, "Must supply a pointer to memory for the Struct" unless pointer + super AutoPointer.new(pointer, self.class.method(:release)) + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/memorypointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/memorypointer.rb new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/memorypointer.rb @@ -0,0 +1 @@ +# This class is now implemented in C diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform.rb new file mode 100644 index 0000000..bf01a27 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform.rb @@ -0,0 +1,185 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +require 'rbconfig' +module FFI + class PlatformError < LoadError; end + + # This module defines different constants and class methods to play with + # various platforms. + module Platform + OS = case RbConfig::CONFIG['host_os'].downcase + when /linux/ + "linux" + when /darwin/ + "darwin" + when /freebsd/ + "freebsd" + when /netbsd/ + "netbsd" + when /openbsd/ + "openbsd" + when /dragonfly/ + "dragonflybsd" + when /sunos|solaris/ + "solaris" + when /mingw|mswin/ + "windows" + else + RbConfig::CONFIG['host_os'].downcase + end + + OSVERSION = RbConfig::CONFIG['host_os'].gsub(/[^\d]/, '').to_i + + CPU = RbConfig::CONFIG['host_cpu'] + + ARCH = case CPU.downcase + when /amd64|x86_64|x64/ + "x86_64" + when /i\d86|x86|i86pc/ + "i386" + when /ppc64|powerpc64/ + "powerpc64" + when /ppc|powerpc/ + "powerpc" + when /sparcv9|sparc64/ + "sparcv9" + when /arm64|aarch64/ # MacOS calls it "arm64", other operating systems "aarch64" + "aarch64" + when /^arm/ + if OS == "darwin" # Ruby before 3.0 reports "arm" instead of "arm64" as host_cpu on darwin + "aarch64" + else + "arm" + end + else + RbConfig::CONFIG['host_cpu'] + end + + private + # @param [String) os + # @return [Boolean] + # Test if current OS is +os+. + def self.is_os(os) + OS == os + end + + IS_GNU = defined?(GNU_LIBC) + IS_LINUX = is_os("linux") + IS_MAC = is_os("darwin") + IS_FREEBSD = is_os("freebsd") + IS_NETBSD = is_os("netbsd") + IS_OPENBSD = is_os("openbsd") + IS_DRAGONFLYBSD = is_os("dragonfly") + IS_SOLARIS = is_os("solaris") + IS_WINDOWS = is_os("windows") + IS_BSD = IS_MAC || IS_FREEBSD || IS_NETBSD || IS_OPENBSD || IS_DRAGONFLYBSD + + # Add the version for known ABI breaks + name_version = "12" if IS_FREEBSD && OSVERSION >= 12 # 64-bit inodes + + NAME = "#{ARCH}-#{OS}#{name_version}" + CONF_DIR = File.join(File.dirname(__FILE__), 'platform', NAME) + + public + + LIBPREFIX = case OS + when /windows|msys/ + '' + when /cygwin/ + 'cyg' + else + 'lib' + end + + LIBSUFFIX = case OS + when /darwin/ + 'dylib' + when /linux|bsd|solaris/ + 'so' + when /windows|cygwin|msys/ + 'dll' + else + # Punt and just assume a sane unix (i.e. anything but AIX) + 'so' + end + + LIBC = if IS_WINDOWS + crtname = RbConfig::CONFIG["RUBY_SO_NAME"][/msvc\w+/] || 'ucrtbase' + "#{crtname}.dll" + elsif IS_GNU + GNU_LIBC + elsif OS == 'cygwin' + "cygwin1.dll" + elsif OS == 'msys' + # Not sure how msys 1.0 behaves, tested on MSYS2. + "msys-2.0.dll" + else + "#{LIBPREFIX}c.#{LIBSUFFIX}" + end + + LITTLE_ENDIAN = 1234 unless defined?(LITTLE_ENDIAN) + BIG_ENDIAN = 4321 unless defined?(BIG_ENDIAN) + unless defined?(BYTE_ORDER) + BYTE_ORDER = [0x12345678].pack("I") == [0x12345678].pack("N") ? BIG_ENDIAN : LITTLE_ENDIAN + end + + # Test if current OS is a *BSD (include MAC) + # @return [Boolean] + def self.bsd? + IS_BSD + end + + # Test if current OS is Windows + # @return [Boolean] + def self.windows? + IS_WINDOWS + end + + # Test if current OS is Mac OS + # @return [Boolean] + def self.mac? + IS_MAC + end + + # Test if current OS is Solaris (Sun OS) + # @return [Boolean] + def self.solaris? + IS_SOLARIS + end + + # Test if current OS is a unix OS + # @return [Boolean] + def self.unix? + !IS_WINDOWS + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-darwin/types.conf new file mode 100644 index 0000000..68841bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-darwin/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = long +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_string_t[37] = char +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sae_associd_t = uint +rbx.platform.typedef.sae_connid_t = uint +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_off_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd/types.conf new file mode 100644 index 0000000..8d111e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd12/types.conf new file mode 100644 index 0000000..3ccb8f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-freebsd12/types.conf @@ -0,0 +1,181 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = uint +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__char16_t = ushort +rbx.platform.typedef.__char32_t = uint +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = long +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__daddr_t = long +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__rman_res_t = ulong +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = long +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = ulong +rbx.platform.typedef.__vm_paddr_t = ulong +rbx.platform.typedef.__vm_size_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.cap_ioctl_t = ulong +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.kpaddr_t = ulong +rbx.platform.typedef.ksize_t = ulong +rbx.platform.typedef.kssize_t = long +rbx.platform.typedef.kvaddr_t = ulong +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.rman_res_t = ulong +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.rune_t = int +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_ooffset_t = ulong +rbx.platform.typedef.vm_paddr_t = ulong +rbx.platform.typedef.vm_pindex_t = ulong +rbx.platform.typedef.vm_size_t = ulong +rbx.platform.typedef.wchar_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-linux/types.conf new file mode 100644 index 0000000..4cd2438 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-openbsd/types.conf new file mode 100644 index 0000000..6abc9c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/aarch64-openbsd/types.conf @@ -0,0 +1,134 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd/types.conf new file mode 100644 index 0000000..cfbb90f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-linux/types.conf new file mode 100644 index 0000000..a070d39 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/arm-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-cygwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-cygwin/types.conf new file mode 100644 index 0000000..93f6b86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd/types.conf new file mode 100644 index 0000000..6c882d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd12/types.conf new file mode 100644 index 0000000..523370d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-freebsd12/types.conf @@ -0,0 +1,152 @@ + +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = uint +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpumask_t = uint +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = int +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long_long +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = int +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong_long +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = int +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = long_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = uint +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__u_register_t = uint +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = uint +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = uint +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = uint +rbx.platform.typedef.__vm_ooffset_t = long_long +rbx.platform.typedef.__vm_paddr_t = uint +rbx.platform.typedef.__vm_pindex_t = ulong_long +rbx.platform.typedef.__vm_size_t = uint +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = uint +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpumask_t = uint +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long_long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = long_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_register_t = uint +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = uint +rbx.platform.typedef.vm_ooffset_t = long_long +rbx.platform.typedef.vm_paddr_t = uint +rbx.platform.typedef.vm_pindex_t = ulong_long +rbx.platform.typedef.vm_size_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-gnu/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-gnu/types.conf new file mode 100644 index 0000000..fa2fa8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-gnu/types.conf @@ -0,0 +1,107 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsid_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__pthread_key = int +rbx.platform.typedef.__pthread_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__sigset_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.fsid_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.pthread_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-linux/types.conf new file mode 100644 index 0000000..feb6bc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-linux/types.conf @@ -0,0 +1,103 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-netbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-netbsd/types.conf new file mode 100644 index 0000000..a5aba89 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-netbsd/types.conf @@ -0,0 +1,126 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-openbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-openbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-solaris/types.conf new file mode 100644 index 0000000..22a2414 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-windows/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-windows/types.conf new file mode 100644 index 0000000..a5d0b05 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/i386-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = int +rbx.platform.typedef._sigset_t = ulong +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rsize_t = uint +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/ia64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/ia64-linux/types.conf new file mode 100644 index 0000000..e0eeecc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/ia64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mips64el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsel-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsel-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsel-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6el-linux/types.conf new file mode 100644 index 0000000..24e8202 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa32r6el-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6el-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6el-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/mipsisa64r6el-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-aix/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-aix/types.conf new file mode 100644 index 0000000..cbd20e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-aix/types.conf @@ -0,0 +1,180 @@ +rbx.platform.typedef.UTF32Char = uint +rbx.platform.typedef.UniChar = ushort +rbx.platform.typedef.__cptr32 = string +rbx.platform.typedef.__cptr64 = ulong_long +rbx.platform.typedef.__long32_t = long +rbx.platform.typedef.__long64_t = int +rbx.platform.typedef.__ptr32 = pointer +rbx.platform.typedef.__ptr64 = ulong_long +rbx.platform.typedef.__ulong32_t = ulong +rbx.platform.typedef.__ulong64_t = uint +rbx.platform.typedef.aptx_t = ushort +rbx.platform.typedef.blkcnt32_t = int +rbx.platform.typedef.blkcnt64_t = ulong_long +rbx.platform.typedef.blkcnt_t = int +rbx.platform.typedef.blksize32_t = int +rbx.platform.typedef.blksize64_t = ulong_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.boolean_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.chan_t = int +rbx.platform.typedef.class_id_t = uint +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = long_long +rbx.platform.typedef.cnt64_t = long_long +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.crid_t = int +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev32_t = uint +rbx.platform.typedef.dev64_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.esid_t = uint +rbx.platform.typedef.ext_t = int +rbx.platform.typedef.fpos64_t = long_long +rbx.platform.typedef.fpos_t = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino32_t = uint +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16 = short +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32 = int +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int32long64_t = int +rbx.platform.typedef.int64 = long_long +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8 = char +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intfast_t = int +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.krpn_t = int +rbx.platform.typedef.kvmhandle_t = ulong +rbx.platform.typedef.kvmid_t = long +rbx.platform.typedef.kvpn_t = int +rbx.platform.typedef.level_t = int +rbx.platform.typedef.liobn_t = uint +rbx.platform.typedef.long32int64_t = long +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.mid_t = pointer +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.mtyp_t = long +rbx.platform.typedef.nlink_t = short +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.paddr_t = long +rbx.platform.typedef.pdtx_t = int +rbx.platform.typedef.pid32_t = int +rbx.platform.typedef.pid64_t = ulong_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pshift_t = ushort +rbx.platform.typedef.psize_t = long_long +rbx.platform.typedef.psx_t = short +rbx.platform.typedef.ptex_t = ulong +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.rpn64_t = long_long +rbx.platform.typedef.rpn_t = int +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.signal_t = int +rbx.platform.typedef.size64_t = ulong_long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.slab_t[12] = char +rbx.platform.typedef.snidx_t = int +rbx.platform.typedef.socklen_t = ulong +rbx.platform.typedef.soff_t = int +rbx.platform.typedef.sshift_t = ushort +rbx.platform.typedef.ssize64_t = long_long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.swhatx_t = ulong +rbx.platform.typedef.tid32_t = int +rbx.platform.typedef.tid64_t = ulong_long +rbx.platform.typedef.tid_t = int +rbx.platform.typedef.time32_t = int +rbx.platform.typedef.time64_t = long_long +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer32_t = int +rbx.platform.typedef.timer64_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16 = ushort +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32 = uint +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64 = ulong_long +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8 = uchar +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar = uchar +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint32long64_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintfast_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong32int64_t = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unidx_t = int +rbx.platform.typedef.unit_addr_t = ulong_long +rbx.platform.typedef.ureg_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.va_list = string +rbx.platform.typedef.vmfkey_t = uint +rbx.platform.typedef.vmhandle32_t = uint +rbx.platform.typedef.vmhandle_t = ulong +rbx.platform.typedef.vmhwkey_t = int +rbx.platform.typedef.vmid32_t = int +rbx.platform.typedef.vmid64_t = long_long +rbx.platform.typedef.vmid_t = long +rbx.platform.typedef.vmidx_t = int +rbx.platform.typedef.vmkey_t = int +rbx.platform.typedef.vmlpghandle_t = ulong +rbx.platform.typedef.vmm_lock_t = int +rbx.platform.typedef.vmnodeidx_t = int +rbx.platform.typedef.vmprkey_t = uint +rbx.platform.typedef.vmsize_t = int +rbx.platform.typedef.vpn_t = int +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = uint +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-darwin/types.conf new file mode 100644 index 0000000..ae100f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-darwin/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = int +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-linux/types.conf new file mode 100644 index 0000000..42eec13 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-linux/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = int +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64-linux/types.conf new file mode 100644 index 0000000..b61f4f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64le-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64le-linux/types.conf new file mode 100644 index 0000000..9ac9c2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/powerpc64le-linux/types.conf @@ -0,0 +1,100 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/riscv64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/riscv64-linux/types.conf new file mode 100644 index 0000000..ede0f98 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/riscv64-linux/types.conf @@ -0,0 +1,104 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390-linux/types.conf new file mode 100644 index 0000000..291b032 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390x-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390x-linux/types.conf new file mode 100644 index 0000000..32a7feb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/s390x-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-linux/types.conf new file mode 100644 index 0000000..aea09d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long_long +rbx.platform.typedef.__blkcnt64_t = long_long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong_long +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong_long +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong_long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = int +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long_long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long_long +rbx.platform.typedef.__rlim64_t = ulong_long +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = int +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong_long +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong_long +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc64-linux/types.conf new file mode 100644 index 0000000..7626bfc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparc64-linux/types.conf @@ -0,0 +1,102 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.*__qaddr_t = long +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-openbsd/types.conf new file mode 100644 index 0000000..aea7955 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-openbsd/types.conf @@ -0,0 +1,156 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong +rbx.platform.typedef.wchar_t = int +rbx.platform.typedef.wint_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-solaris/types.conf new file mode 100644 index 0000000..a1548e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/sparcv9-solaris/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.() = pointer +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.Psocklen_t = pointer +rbx.platform.typedef.avl_index_t = uint +rbx.platform.typedef.blkcnt64_t = long_long +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong_long +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt64_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = long +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int) = pointer +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = int +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.kid_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = ulong +rbx.platform.typedef.minor_t = ulong +rbx.platform.typedef.mode_t = ulong +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long_long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = long +rbx.platform.typedef.poolid_t = long +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = long +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = int +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = uint +rbx.platform.typedef.size_t) = pointer +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = long +rbx.platform.typedef.t_uscalar_t = ulong +rbx.platform.typedef.taskid_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.ts_t = long_long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = long +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = uint +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong_long +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = long diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-cygwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-cygwin/types.conf new file mode 100644 index 0000000..5bef6d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-cygwin/types.conf @@ -0,0 +1,3 @@ +rbx.platform.typedef.ptrdiff_t = int64 +rbx.platform.typedef.size_t = uint64 +rbx.platform.typedef.ssize_t = int64 diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-darwin/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-darwin/types.conf new file mode 100644 index 0000000..68841bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-darwin/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.__darwin_blkcnt_t = long_long +rbx.platform.typedef.__darwin_blksize_t = int +rbx.platform.typedef.__darwin_clock_t = ulong +rbx.platform.typedef.__darwin_ct_rune_t = int +rbx.platform.typedef.__darwin_dev_t = int +rbx.platform.typedef.__darwin_fsblkcnt_t = uint +rbx.platform.typedef.__darwin_fsfilcnt_t = uint +rbx.platform.typedef.__darwin_gid_t = uint +rbx.platform.typedef.__darwin_id_t = uint +rbx.platform.typedef.__darwin_ino64_t = ulong_long +rbx.platform.typedef.__darwin_ino_t = ulong_long +rbx.platform.typedef.__darwin_intptr_t = long +rbx.platform.typedef.__darwin_mach_port_name_t = uint +rbx.platform.typedef.__darwin_mach_port_t = uint +rbx.platform.typedef.__darwin_mode_t = ushort +rbx.platform.typedef.__darwin_natural_t = uint +rbx.platform.typedef.__darwin_off_t = long_long +rbx.platform.typedef.__darwin_pid_t = int +rbx.platform.typedef.__darwin_pthread_key_t = ulong +rbx.platform.typedef.__darwin_ptrdiff_t = long +rbx.platform.typedef.__darwin_rune_t = int +rbx.platform.typedef.__darwin_sigset_t = uint +rbx.platform.typedef.__darwin_size_t = ulong +rbx.platform.typedef.__darwin_socklen_t = uint +rbx.platform.typedef.__darwin_ssize_t = long +rbx.platform.typedef.__darwin_suseconds_t = int +rbx.platform.typedef.__darwin_time_t = long +rbx.platform.typedef.__darwin_uid_t = uint +rbx.platform.typedef.__darwin_useconds_t = uint +rbx.platform.typedef.__darwin_uuid_string_t[37] = char +rbx.platform.typedef.__darwin_uuid_t[16] = uchar +rbx.platform.typedef.__darwin_wchar_t = int +rbx.platform.typedef.__darwin_wint_t = int +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.fd_mask = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = uint +rbx.platform.typedef.fsfilcnt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino64_t = ulong_long +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.rsize_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sae_associd_t = uint +rbx.platform.typedef.sae_connid_t = uint +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.syscall_arg_t = ulong_long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.user_addr_t = ulong_long +rbx.platform.typedef.user_long_t = long_long +rbx.platform.typedef.user_off_t = long_long +rbx.platform.typedef.user_size_t = ulong_long +rbx.platform.typedef.user_ssize_t = long_long +rbx.platform.typedef.user_time_t = long_long +rbx.platform.typedef.user_ulong_t = ulong_long +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-dragonflybsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-dragonflybsd/types.conf new file mode 100644 index 0000000..889f6a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-dragonflybsd/types.conf @@ -0,0 +1,130 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intlp_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintlp_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = ulong +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_daddr_t = uint +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.v_caddr_t = pointer +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd/types.conf new file mode 100644 index 0000000..8dbe370 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long_long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long_long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd12/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd12/types.conf new file mode 100644 index 0000000..31b1073 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-freebsd12/types.conf @@ -0,0 +1,158 @@ +rbx.platform.typedef.*) = pointer +rbx.platform.typedef.___wchar_t = int +rbx.platform.typedef.__accmode_t = int +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__char16_t = ushort +rbx.platform.typedef.__char32_t = uint +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpulevel_t = int +rbx.platform.typedef.__cpusetid_t = int +rbx.platform.typedef.__cpuwhich_t = int +rbx.platform.typedef.__critical_t = long +rbx.platform.typedef.__ct_rune_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = ulong +rbx.platform.typedef.__fflags_t = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = long +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intfptr_t = long +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__lwpid_t = int +rbx.platform.typedef.__mode_t = ushort +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = long +rbx.platform.typedef.__rman_res_t = ulong +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = long +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__u_register_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintfptr_t = ulong +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vm_offset_t = ulong +rbx.platform.typedef.__vm_paddr_t = ulong +rbx.platform.typedef.__vm_size_t = ulong +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.accmode_t = int +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.c_caddr_t = pointer +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.cap_ioctl_t = ulong +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpulevel_t = int +rbx.platform.typedef.cpusetid_t = int +rbx.platform.typedef.cpuwhich_t = int +rbx.platform.typedef.critical_t = long +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fflags_t = uint +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = long +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.ksize_t = ulong +rbx.platform.typedef.kvaddr_t = ulong +rbx.platform.typedef.lwpid_t = int +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = long +rbx.platform.typedef.rman_res_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.segsz_t = long +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_ooffset_t = long +rbx.platform.typedef.vm_paddr_t = ulong +rbx.platform.typedef.vm_pindex_t = ulong +rbx.platform.typedef.vm_size_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-haiku/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-haiku/types.conf new file mode 100644 index 0000000..d5ddfa7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-haiku/types.conf @@ -0,0 +1,117 @@ +rbx.platform.typedef.__haiku_addr_t = ulong +rbx.platform.typedef.__haiku_generic_addr_t = ulong +rbx.platform.typedef.__haiku_int16 = short +rbx.platform.typedef.__haiku_int32 = int +rbx.platform.typedef.__haiku_int64 = long +rbx.platform.typedef.__haiku_int8 = char +rbx.platform.typedef.__haiku_phys_addr_t = ulong +rbx.platform.typedef.__haiku_phys_saddr_t = long +rbx.platform.typedef.__haiku_saddr_t = long +rbx.platform.typedef.__haiku_std_int16 = short +rbx.platform.typedef.__haiku_std_int32 = int +rbx.platform.typedef.__haiku_std_int64 = long +rbx.platform.typedef.__haiku_std_int8 = char +rbx.platform.typedef.__haiku_std_uint16 = ushort +rbx.platform.typedef.__haiku_std_uint32 = uint +rbx.platform.typedef.__haiku_std_uint64 = ulong +rbx.platform.typedef.__haiku_std_uint8 = uchar +rbx.platform.typedef.__haiku_uint16 = ushort +rbx.platform.typedef.__haiku_uint32 = uint +rbx.platform.typedef.__haiku_uint64 = ulong +rbx.platform.typedef.__haiku_uint8 = uchar +rbx.platform.typedef.addr_t = ulong +rbx.platform.typedef.bigtime_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = pointer +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = int +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fd_mask = uint +rbx.platform.typedef.fsblkcnt_t = long +rbx.platform.typedef.fsfilcnt_t = long +rbx.platform.typedef.generic_addr_t = ulong +rbx.platform.typedef.generic_size_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = int +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = long +rbx.platform.typedef.int16 = short +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32 = int +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64 = long +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8 = char +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = int +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nanotime_t = long +rbx.platform.typedef.nlink_t = int +rbx.platform.typedef.off_t = long +rbx.platform.typedef.perform_code = uint +rbx.platform.typedef.phys_addr_t = ulong +rbx.platform.typedef.phys_size_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.sig_atomic_t = int +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.status_t = int +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.type_code = uint +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16 = ushort +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32 = uint +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64 = ulong +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8 = uchar +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uint +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.umode_t = uint +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.unichar = ushort +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.void*) = pointer +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-linux/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-linux/types.conf new file mode 100644 index 0000000..060f161 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-linux/types.conf @@ -0,0 +1,132 @@ +rbx.platform.typedef.*__caddr_t = char +rbx.platform.typedef.__blkcnt64_t = long +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = long +rbx.platform.typedef.__clock_t = long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__daddr_t = int +rbx.platform.typedef.__dev_t = ulong +rbx.platform.typedef.__fd_mask = long +rbx.platform.typedef.__fsblkcnt64_t = ulong +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt64_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__fsword_t = long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino64_t = ulong +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = int +rbx.platform.typedef.__loff_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = ulong +rbx.platform.typedef.__off64_t = long +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__priority_which_t = int +rbx.platform.typedef.__quad_t = long +rbx.platform.typedef.__rlim64_t = ulong +rbx.platform.typedef.__rlim_t = ulong +rbx.platform.typedef.__rlimit_resource_t = int +rbx.platform.typedef.__rusage_who_t = int +rbx.platform.typedef.__sig_atomic_t = int +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__syscall_slong_t = long +rbx.platform.typedef.__syscall_ulong_t = ulong +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = pointer +rbx.platform.typedef.__u_char = uchar +rbx.platform.typedef.__u_int = uint +rbx.platform.typedef.__u_long = ulong +rbx.platform.typedef.__u_quad_t = ulong +rbx.platform.typedef.__u_short = ushort +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = long +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = long +rbx.platform.typedef.int_fast32_t = long +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = int +rbx.platform.typedef.loff_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ulong +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_once_t = int +rbx.platform.typedef.pthread_t = ulong +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.quad_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = pointer +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ulong +rbx.platform.typedef.uint_fast32_t = ulong +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.wchar_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-msys/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-msys/types.conf new file mode 100644 index 0000000..aa827c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-msys/types.conf @@ -0,0 +1,119 @@ +rbx.platform.typedef.*addr_t = char +rbx.platform.typedef.__ULong = uint +rbx.platform.typedef.__blkcnt_t = long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = ulong +rbx.platform.typedef.__clockid_t = ulong +rbx.platform.typedef.__cpu_mask = ulong +rbx.platform.typedef.__dev_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong +rbx.platform.typedef.__fsfilcnt_t = ulong +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__ino_t = ulong +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long_long +rbx.platform.typedef.__loff_t = long_long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nl_item = int +rbx.platform.typedef.__nlink_t = ushort +rbx.platform.typedef.__off_t = long +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__sigset_t = ulong +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = int +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__time_t = long +rbx.platform.typedef.__timer_t = ulong +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = ulong +rbx.platform.typedef._fpos64_t = long_long +rbx.platform.typedef._fpos_t = long +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = ulong +rbx.platform.typedef.clockid_t = ulong +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.fd_mask = ulong +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = long +rbx.platform.typedef.int_fast32_t = long +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long_long +rbx.platform.typedef.loff_t = long_long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = ushort +rbx.platform.typedef.off_t = long +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sbintime_t = long +rbx.platform.typedef.sig_atomic_t = int +rbx.platform.typedef.sigset_t = ulong +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = int +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = ulong +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_register_t = ulong +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ulong +rbx.platform.typedef.uint_fast32_t = ulong +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.useconds_t = ulong +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vm_offset_t = ulong +rbx.platform.typedef.vm_size_t = ulong +rbx.platform.typedef.wint_t = uint diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-netbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-netbsd/types.conf new file mode 100644 index 0000000..15a0d61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-netbsd/types.conf @@ -0,0 +1,128 @@ +rbx.platform.typedef.__clock_t = int +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = int +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = uint +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = int +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = int +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = int +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = int +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr64_t = long_long +rbx.platform.typedef.daddr_t = int +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = uint +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = int +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = int +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = int +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-openbsd/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-openbsd/types.conf new file mode 100644 index 0000000..6abc9c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-openbsd/types.conf @@ -0,0 +1,134 @@ +rbx.platform.typedef.__blkcnt_t = long_long +rbx.platform.typedef.__blksize_t = int +rbx.platform.typedef.__clock_t = long_long +rbx.platform.typedef.__clockid_t = int +rbx.platform.typedef.__cpuid_t = ulong +rbx.platform.typedef.__dev_t = int +rbx.platform.typedef.__fd_mask = uint +rbx.platform.typedef.__fixpt_t = uint +rbx.platform.typedef.__fsblkcnt_t = ulong_long +rbx.platform.typedef.__fsfilcnt_t = ulong_long +rbx.platform.typedef.__gid_t = uint +rbx.platform.typedef.__id_t = uint +rbx.platform.typedef.__in_addr_t = uint +rbx.platform.typedef.__in_port_t = ushort +rbx.platform.typedef.__ino_t = ulong_long +rbx.platform.typedef.__int16_t = short +rbx.platform.typedef.__int32_t = int +rbx.platform.typedef.__int64_t = long_long +rbx.platform.typedef.__int8_t = char +rbx.platform.typedef.__int_fast16_t = int +rbx.platform.typedef.__int_fast32_t = int +rbx.platform.typedef.__int_fast64_t = long_long +rbx.platform.typedef.__int_fast8_t = int +rbx.platform.typedef.__int_least16_t = short +rbx.platform.typedef.__int_least32_t = int +rbx.platform.typedef.__int_least64_t = long_long +rbx.platform.typedef.__int_least8_t = char +rbx.platform.typedef.__intmax_t = long_long +rbx.platform.typedef.__intptr_t = long +rbx.platform.typedef.__key_t = long +rbx.platform.typedef.__mode_t = uint +rbx.platform.typedef.__nlink_t = uint +rbx.platform.typedef.__off_t = long_long +rbx.platform.typedef.__paddr_t = ulong +rbx.platform.typedef.__pid_t = int +rbx.platform.typedef.__psize_t = ulong +rbx.platform.typedef.__ptrdiff_t = long +rbx.platform.typedef.__register_t = long +rbx.platform.typedef.__rlim_t = ulong_long +rbx.platform.typedef.__rune_t = int +rbx.platform.typedef.__sa_family_t = uchar +rbx.platform.typedef.__segsz_t = int +rbx.platform.typedef.__size_t = ulong +rbx.platform.typedef.__socklen_t = uint +rbx.platform.typedef.__ssize_t = long +rbx.platform.typedef.__suseconds_t = long +rbx.platform.typedef.__swblk_t = int +rbx.platform.typedef.__time_t = long_long +rbx.platform.typedef.__timer_t = int +rbx.platform.typedef.__uid_t = uint +rbx.platform.typedef.__uint16_t = ushort +rbx.platform.typedef.__uint32_t = uint +rbx.platform.typedef.__uint64_t = ulong_long +rbx.platform.typedef.__uint8_t = uchar +rbx.platform.typedef.__uint_fast16_t = uint +rbx.platform.typedef.__uint_fast32_t = uint +rbx.platform.typedef.__uint_fast64_t = ulong_long +rbx.platform.typedef.__uint_fast8_t = uint +rbx.platform.typedef.__uint_least16_t = ushort +rbx.platform.typedef.__uint_least32_t = uint +rbx.platform.typedef.__uint_least64_t = ulong_long +rbx.platform.typedef.__uint_least8_t = uchar +rbx.platform.typedef.__uintmax_t = ulong_long +rbx.platform.typedef.__uintptr_t = ulong +rbx.platform.typedef.__useconds_t = uint +rbx.platform.typedef.__vaddr_t = ulong +rbx.platform.typedef.__vsize_t = ulong +rbx.platform.typedef.__wchar_t = int +rbx.platform.typedef.__wctrans_t = pointer +rbx.platform.typedef.__wctype_t = pointer +rbx.platform.typedef.__wint_t = int +rbx.platform.typedef.blkcnt_t = long_long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.caddr_t = string +rbx.platform.typedef.clock_t = long_long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cpuid_t = ulong +rbx.platform.typedef.daddr32_t = int +rbx.platform.typedef.daddr_t = long_long +rbx.platform.typedef.dev_t = int +rbx.platform.typedef.fixpt_t = uint +rbx.platform.typedef.fsblkcnt_t = ulong_long +rbx.platform.typedef.fsfilcnt_t = ulong_long +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.id_t = uint +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.ino_t = ulong_long +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.key_t = long +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.paddr_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.psize_t = ulong +rbx.platform.typedef.qaddr_t = pointer +rbx.platform.typedef.quad_t = long_long +rbx.platform.typedef.register_t = long +rbx.platform.typedef.rlim_t = ulong_long +rbx.platform.typedef.sa_family_t = uchar +rbx.platform.typedef.segsz_t = int +rbx.platform.typedef.sigset_t = uint +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.swblk_t = int +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_int16_t = ushort +rbx.platform.typedef.u_int32_t = uint +rbx.platform.typedef.u_int64_t = ulong_long +rbx.platform.typedef.u_int8_t = uchar +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_quad_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.vaddr_t = ulong +rbx.platform.typedef.vsize_t = ulong diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-solaris/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-solaris/types.conf new file mode 100644 index 0000000..a7890d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-solaris/types.conf @@ -0,0 +1,122 @@ +rbx.platform.typedef.*caddr_t = char +rbx.platform.typedef.blkcnt64_t = long +rbx.platform.typedef.blkcnt_t = long +rbx.platform.typedef.blksize_t = int +rbx.platform.typedef.clock_t = long +rbx.platform.typedef.clockid_t = int +rbx.platform.typedef.cnt_t = short +rbx.platform.typedef.cpu_flag_t = ushort +rbx.platform.typedef.ctid_t = int +rbx.platform.typedef.daddr_t = long +rbx.platform.typedef.datalink_id_t = uint +rbx.platform.typedef.dev_t = ulong +rbx.platform.typedef.diskaddr_t = ulong_long +rbx.platform.typedef.disp_lock_t = uchar +rbx.platform.typedef.fd_mask = long +rbx.platform.typedef.fds_mask = long +rbx.platform.typedef.fsblkcnt64_t = ulong +rbx.platform.typedef.fsblkcnt_t = ulong +rbx.platform.typedef.fsfilcnt64_t = ulong +rbx.platform.typedef.fsfilcnt_t = ulong +rbx.platform.typedef.gid_t = uint +rbx.platform.typedef.hrtime_t = long_long +rbx.platform.typedef.id_t = int +rbx.platform.typedef.in_addr_t = uint +rbx.platform.typedef.in_port_t = ushort +rbx.platform.typedef.index_t = short +rbx.platform.typedef.ino64_t = ulong +rbx.platform.typedef.ino_t = ulong +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = int +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long +rbx.platform.typedef.intptr_t = long +rbx.platform.typedef.ipaddr_t = uint +rbx.platform.typedef.k_fltset_t = uint +rbx.platform.typedef.key_t = int +rbx.platform.typedef.len_t = ulong_long +rbx.platform.typedef.lgrp_id_t = int +rbx.platform.typedef.lock_t = uchar +rbx.platform.typedef.longlong_t = long_long +rbx.platform.typedef.major_t = uint +rbx.platform.typedef.minor_t = uint +rbx.platform.typedef.mode_t = uint +rbx.platform.typedef.model_t = uint +rbx.platform.typedef.nfds_t = ulong +rbx.platform.typedef.nlink_t = uint +rbx.platform.typedef.o_dev_t = short +rbx.platform.typedef.o_gid_t = ushort +rbx.platform.typedef.o_ino_t = ushort +rbx.platform.typedef.o_mode_t = ushort +rbx.platform.typedef.o_nlink_t = short +rbx.platform.typedef.o_pid_t = short +rbx.platform.typedef.o_uid_t = ushort +rbx.platform.typedef.off64_t = long +rbx.platform.typedef.off_t = long +rbx.platform.typedef.offset_t = long_long +rbx.platform.typedef.pad64_t = long +rbx.platform.typedef.pfn_t = ulong +rbx.platform.typedef.pgcnt_t = ulong +rbx.platform.typedef.pid_t = int +rbx.platform.typedef.poolid_t = int +rbx.platform.typedef.pri_t = short +rbx.platform.typedef.projid_t = int +rbx.platform.typedef.pthread_key_t = uint +rbx.platform.typedef.pthread_t = uint +rbx.platform.typedef.ptrdiff_t = long +rbx.platform.typedef.rlim64_t = ulong_long +rbx.platform.typedef.rlim_t = ulong +rbx.platform.typedef.sa_family_t = ushort +rbx.platform.typedef.size_t = ulong +rbx.platform.typedef.socklen_t = uint +rbx.platform.typedef.spgcnt_t = long +rbx.platform.typedef.ssize_t = long +rbx.platform.typedef.suseconds_t = long +rbx.platform.typedef.sysid_t = short +rbx.platform.typedef.t_scalar_t = int +rbx.platform.typedef.t_uscalar_t = uint +rbx.platform.typedef.taskid_t = int +rbx.platform.typedef.time_t = long +rbx.platform.typedef.timer_t = int +rbx.platform.typedef.u_char = uchar +rbx.platform.typedef.u_int = uint +rbx.platform.typedef.u_long = ulong +rbx.platform.typedef.u_longlong_t = ulong_long +rbx.platform.typedef.u_offset_t = ulong_long +rbx.platform.typedef.u_short = ushort +rbx.platform.typedef.uchar_t = uchar +rbx.platform.typedef.uid_t = uint +rbx.platform.typedef.uint = uint +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint32_t = uint +rbx.platform.typedef.uint64_t = ulong +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = uint +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least32_t = uint +rbx.platform.typedef.uint_least64_t = ulong +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uint_t = uint +rbx.platform.typedef.uintmax_t = ulong +rbx.platform.typedef.uintptr_t = ulong +rbx.platform.typedef.ulong = ulong +rbx.platform.typedef.ulong_t = ulong +rbx.platform.typedef.unchar = uchar +rbx.platform.typedef.upad64_t = ulong +rbx.platform.typedef.use_t = uchar +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.ushort = ushort +rbx.platform.typedef.ushort_t = ushort +rbx.platform.typedef.zoneid_t = int diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-windows/types.conf b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-windows/types.conf new file mode 100644 index 0000000..5bdbbde --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/platform/x86_64-windows/types.conf @@ -0,0 +1,52 @@ +rbx.platform.typedef.__time32_t = long +rbx.platform.typedef.__time64_t = long_long +rbx.platform.typedef._dev_t = uint +rbx.platform.typedef._ino_t = ushort +rbx.platform.typedef._mode_t = ushort +rbx.platform.typedef._off64_t = long_long +rbx.platform.typedef._off_t = long +rbx.platform.typedef._pid_t = long_long +rbx.platform.typedef._sigset_t = ulong_long +rbx.platform.typedef.dev_t = uint +rbx.platform.typedef.errno_t = int +rbx.platform.typedef.ino_t = ushort +rbx.platform.typedef.int16_t = short +rbx.platform.typedef.int32_t = int +rbx.platform.typedef.int64_t = long_long +rbx.platform.typedef.int8_t = char +rbx.platform.typedef.int_fast16_t = short +rbx.platform.typedef.int_fast32_t = int +rbx.platform.typedef.int_fast64_t = long_long +rbx.platform.typedef.int_fast8_t = char +rbx.platform.typedef.int_least16_t = short +rbx.platform.typedef.int_least32_t = int +rbx.platform.typedef.int_least64_t = long_long +rbx.platform.typedef.int_least8_t = char +rbx.platform.typedef.intmax_t = long_long +rbx.platform.typedef.intptr_t = long_long +rbx.platform.typedef.mode_t = ushort +rbx.platform.typedef.off32_t = long +rbx.platform.typedef.off64_t = long_long +rbx.platform.typedef.off_t = long_long +rbx.platform.typedef.pid_t = long_long +rbx.platform.typedef.ptrdiff_t = long_long +rbx.platform.typedef.rsize_t = ulong_long +rbx.platform.typedef.size_t = ulong_long +rbx.platform.typedef.ssize_t = long_long +rbx.platform.typedef.time_t = long_long +rbx.platform.typedef.uint16_t = ushort +rbx.platform.typedef.uint64_t = ulong_long +rbx.platform.typedef.uint8_t = uchar +rbx.platform.typedef.uint_fast16_t = ushort +rbx.platform.typedef.uint_fast32_t = uint +rbx.platform.typedef.uint_fast64_t = ulong_long +rbx.platform.typedef.uint_fast8_t = uchar +rbx.platform.typedef.uint_least16_t = ushort +rbx.platform.typedef.uint_least64_t = ulong_long +rbx.platform.typedef.uint_least8_t = uchar +rbx.platform.typedef.uintmax_t = ulong_long +rbx.platform.typedef.uintptr_t = ulong_long +rbx.platform.typedef.useconds_t = uint +rbx.platform.typedef.wchar_t = ushort +rbx.platform.typedef.wctype_t = ushort +rbx.platform.typedef.wint_t = ushort diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/pointer.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/pointer.rb new file mode 100644 index 0000000..37a054b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/pointer.rb @@ -0,0 +1,167 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' + +# NOTE: all method definitions in this file are conditional on +# whether they are not already defined. This is needed because +# some Ruby implementations (e.g., TruffleRuby) might already +# provide these methods due to using FFI internally, and we +# should not override them to avoid warnings. + +module FFI + class Pointer + + # Pointer size + SIZE = Platform::ADDRESS_SIZE / 8 unless const_defined?(:SIZE) + + # Return the size of a pointer on the current platform, in bytes + # @return [Numeric] + def self.size + SIZE + end unless respond_to?(:size) + + # @param [nil,Numeric] len length of string to return + # @return [String] + # Read pointer's contents as a string, or the first +len+ bytes of the + # equivalent string if +len+ is not +nil+. + def read_string(len=nil) + if len + return ''.b if len == 0 + get_bytes(0, len) + else + get_string(0) + end + end unless method_defined?(:read_string) + + # @param [Numeric] len length of string to return + # @return [String] + # Read the first +len+ bytes of pointer's contents as a string. + # + # Same as: + # ptr.read_string(len) # with len not nil + def read_string_length(len) + get_bytes(0, len) + end unless method_defined?(:read_string_length) + + # @return [String] + # Read pointer's contents as a string. + # + # Same as: + # ptr.read_string # with no len + def read_string_to_null + get_string(0) + end unless method_defined?(:read_string_to_null) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +len+ first bytes of +str+ in pointer's contents. + # + # Same as: + # ptr.write_string(str, len) # with len not nil + def write_string_length(str, len) + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string_length) + + # @param [String] str string to write + # @param [Numeric] len length of string to return + # @return [self] + # Write +str+ in pointer's contents, or first +len+ bytes if + # +len+ is not +nil+. + def write_string(str, len=nil) + len = str.bytesize unless len + # Write the string data without NUL termination + put_bytes(0, str, 0, len) + end unless method_defined?(:write_string) + + # @param [Type] type type of data to read from pointer's contents + # @param [Symbol] reader method to send to +self+ to read +type+ + # @param [Numeric] length + # @return [Array] + # Read an array of +type+ of length +length+. + # @example + # ptr.read_array_of_type(TYPE_UINT8, :read_uint8, 4) # -> [1, 2, 3, 4] + def read_array_of_type(type, reader, length) + ary = [] + size = FFI.type_size(type) + tmp = self + length.times { |j| + ary << tmp.send(reader) + tmp += size unless j == length-1 # avoid OOB + } + ary + end unless method_defined?(:read_array_of_type) + + # @param [Type] type type of data to write to pointer's contents + # @param [Symbol] writer method to send to +self+ to write +type+ + # @param [Array] ary + # @return [self] + # Write +ary+ in pointer's contents as +type+. + # @example + # ptr.write_array_of_type(TYPE_UINT8, :put_uint8, [1, 2, 3 ,4]) + def write_array_of_type(type, writer, ary) + size = FFI.type_size(type) + ary.each_with_index { |val, i| + break unless i < self.size + self.send(writer, i * size, val) + } + self + end unless method_defined?(:write_array_of_type) + + # @return [self] + def to_ptr + self + end unless method_defined?(:to_ptr) + + # @param [Symbol,Type] type of data to read + # @return [Object] + # Read pointer's contents as +type+ + # + # Same as: + # ptr.get(type, 0) + def read(type) + get(type, 0) + end unless method_defined?(:read) + + # @param [Symbol,Type] type of data to read + # @param [Object] value to write + # @return [nil] + # Write +value+ of type +type+ to pointer's content + # + # Same as: + # ptr.put(type, 0) + def write(type, value) + put(type, 0, value) + end unless method_defined?(:write) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct.rb new file mode 100644 index 0000000..7028258 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct.rb @@ -0,0 +1,316 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/platform' +require 'ffi/struct_layout' +require 'ffi/struct_layout_builder' +require 'ffi/struct_by_reference' + +module FFI + + class Struct + + # Get struct size + # @return [Numeric] + def size + self.class.size + end + + # @return [Fixnum] Struct alignment + def alignment + self.class.alignment + end + alias_method :align, :alignment + + # (see FFI::StructLayout#offset_of) + def offset_of(name) + self.class.offset_of(name) + end + + # (see FFI::StructLayout#members) + def members + self.class.members + end + + # @return [Array] + # Get array of values from Struct fields. + def values + members.map { |m| self[m] } + end + + # (see FFI::StructLayout#offsets) + def offsets + self.class.offsets + end + + # Clear the struct content. + # @return [self] + def clear + pointer.clear + self + end + + # Get {Pointer} to struct content. + # @return [AbstractMemory] + def to_ptr + pointer + end + + # Get struct size + # @return [Numeric] + def self.size + defined?(@layout) ? @layout.size : defined?(@size) ? @size : 0 + end + + # set struct size + # @param [Numeric] size + # @return [size] + def self.size=(size) + raise ArgumentError, "Size already set" if defined?(@size) || defined?(@layout) + @size = size + end + + # @return (see Struct#alignment) + def self.alignment + @layout.alignment + end + + # (see FFI::Type#members) + def self.members + @layout.members + end + + # (see FFI::StructLayout#offsets) + def self.offsets + @layout.offsets + end + + # (see FFI::StructLayout#offset_of) + def self.offset_of(name) + @layout.offset_of(name) + end + + def self.in + ptr(:in) + end + + def self.out + ptr(:out) + end + + def self.ptr(flags = :inout) + @ref_data_type ||= Type::Mapped.new(StructByReference.new(self)) + end + + def self.val + @val_data_type ||= StructByValue.new(self) + end + + def self.by_value + self.val + end + + def self.by_ref(flags = :inout) + self.ptr(flags) + end + + class ManagedStructConverter < StructByReference + + # @param [Struct] struct_class + def initialize(struct_class) + super(struct_class) + + raise NoMethodError, "release() not implemented for class #{struct_class}" unless struct_class.respond_to? :release + @method = struct_class.method(:release) + end + + # @param [Pointer] ptr + # @param [nil] ctx + # @return [Struct] + def from_native(ptr, ctx) + struct_class.new(AutoPointer.new(ptr, @method)) + end + end + + def self.auto_ptr + @managed_type ||= Type::Mapped.new(ManagedStructConverter.new(self)) + end + + + class << self + public + + # @return [StructLayout] + # @overload layout + # @return [StructLayout] + # Get struct layout. + # @overload layout(*spec) + # @param [Array,Array(Hash)] spec + # @return [StructLayout] + # Create struct layout from +spec+. + # @example Creating a layout from an array +spec+ + # class MyStruct < Struct + # layout :field1, :int, + # :field2, :pointer, + # :field3, :string + # end + # @example Creating a layout from an array +spec+ with offset + # class MyStructWithOffset < Struct + # layout :field1, :int, + # :field2, :pointer, 6, # set offset to 6 for this field + # :field3, :string + # end + # @example Creating a layout from a hash +spec+ + # class MyStructFromHash < Struct + # layout :field1 => :int, + # :field2 => :pointer, + # :field3 => :string + # end + # @example Creating a layout with pointers to functions + # class MyFunctionTable < Struct + # layout :function1, callback([:int, :int], :int), + # :function2, callback([:pointer], :void), + # :field3, :string + # end + def layout(*spec) + warn "[DEPRECATION] Struct layout is already defined for class #{self.inspect}. Redefinition as in #{caller[0]} will be disallowed in ffi-2.0." if defined?(@layout) + return @layout if spec.size == 0 + + builder = StructLayoutBuilder.new + builder.union = self < Union + builder.packed = @packed if defined?(@packed) + builder.alignment = @min_alignment if defined?(@min_alignment) + + if spec[0].kind_of?(Hash) + hash_layout(builder, spec) + else + array_layout(builder, spec) + end + builder.size = @size if defined?(@size) && @size > builder.size + cspec = builder.build + @layout = cspec unless self == Struct + @size = cspec.size + return cspec + end + + + protected + + def callback(params, ret) + mod = enclosing_module + ret_type = find_type(ret, mod) + if ret_type == Type::STRING + raise TypeError, ":string is not allowed as return type of callbacks" + end + FFI::CallbackInfo.new(ret_type, params.map { |e| find_type(e, mod) }) + end + + def packed(packed = 1) + @packed = packed + end + alias :pack :packed + + def aligned(alignment = 1) + @min_alignment = alignment + end + alias :align :aligned + + def enclosing_module + begin + mod = self.name.split("::")[0..-2].inject(Object) { |obj, c| obj.const_get(c) } + if mod.respond_to?(:find_type) && (mod.is_a?(FFI::Library) || mod < FFI::Struct) + mod + end + rescue Exception + nil + end + end + + + def find_field_type(type, mod = enclosing_module) + if type.kind_of?(Class) && type < Struct + FFI::Type::Struct.new(type) + + elsif type.kind_of?(Class) && type < FFI::StructLayout::Field + type + + elsif type.kind_of?(::Array) + FFI::Type::Array.new(find_field_type(type[0]), type[1]) + + else + find_type(type, mod) + end + end + + def find_type(type, mod = enclosing_module) + if mod + mod.find_type(type) + end || FFI.find_type(type) + end + + private + + # @param [StructLayoutBuilder] builder + # @param [Hash] spec + # @return [builder] + # Add hash +spec+ to +builder+. + def hash_layout(builder, spec) + spec[0].each do |name, type| + builder.add name, find_field_type(type), nil + end + end + + # @param [StructLayoutBuilder] builder + # @param [Array] spec + # @return [builder] + # Add array +spec+ to +builder+. + def array_layout(builder, spec) + i = 0 + while i < spec.size + name, type = spec[i, 2] + i += 2 + + # If the next param is a Integer, it specifies the offset + if spec[i].kind_of?(Integer) + offset = spec[i] + i += 1 + else + offset = nil + end + + builder.add name, find_field_type(type), offset + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_by_reference.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_by_reference.rb new file mode 100644 index 0000000..27f25ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_by_reference.rb @@ -0,0 +1,72 @@ +# +# Copyright (C) 2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.# + +module FFI + # This class includes the {FFI::DataConverter} module. + class StructByReference + include DataConverter + + attr_reader :struct_class + + # @param [Struct] struct_class + def initialize(struct_class) + unless Class === struct_class and struct_class < FFI::Struct + raise TypeError, 'wrong type (expected subclass of FFI::Struct)' + end + @struct_class = struct_class + end + + # Always get {FFI::Type}::POINTER. + def native_type + FFI::Type::POINTER + end + + # @param [nil, Struct] value + # @param [nil] ctx + # @return [AbstractMemory] Pointer on +value+. + def to_native(value, ctx) + return Pointer::NULL if value.nil? + + unless @struct_class === value + raise TypeError, "wrong argument type #{value.class} (expected #{@struct_class})" + end + + value.pointer + end + + # @param [AbstractMemory] value + # @param [nil] ctx + # @return [Struct] + # Create a struct from content of memory +value+. + def from_native(value, ctx) + @struct_class.new(value) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout.rb new file mode 100644 index 0000000..d5a78a7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout.rb @@ -0,0 +1,96 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (C) 2008, 2009 Andrea Fazzi +# Copyright (C) 2008, 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + class StructLayout + + # @return [Array + # Get an array of tuples (field name, offset of the field). + def offsets + members.map { |m| [ m, self[m].offset ] } + end + + # @return [Numeric] + # Get the offset of a field. + def offset_of(field_name) + self[field_name].offset + end + + # An enum {Field} in a {StructLayout}. + class Enum < Field + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @return [Object] + # Get an object of type {#type} from memory pointed by +ptr+. + def get(ptr) + type.find(ptr.get_int(offset)) + end + + # @param [AbstractMemory] ptr pointer on a {Struct} + # @param value + # @return [nil] + # Set +value+ into memory pointed by +ptr+. + def put(ptr, value) + ptr.put_int(offset, type.find(value)) + end + + end + + class InnerStruct < Field + def get(ptr) + type.struct_class.new(ptr.slice(self.offset, self.size)) + end + + def put(ptr, value) + raise TypeError, "wrong value type (expected #{type.struct_class})" unless value.is_a?(type.struct_class) + ptr.slice(self.offset, self.size).__copy_from__(value.pointer, self.size) + end + end + + class Mapped < Field + def initialize(name, offset, type, orig_field) + super(name, offset, type) + @orig_field = orig_field + end + + def get(ptr) + type.from_native(@orig_field.get(ptr), nil) + end + + def put(ptr, value) + @orig_field.put(ptr, type.to_native(value, nil)) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout_builder.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout_builder.rb new file mode 100644 index 0000000..4d6a464 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/struct_layout_builder.rb @@ -0,0 +1,227 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + + # Build a {StructLayout struct layout}. + class StructLayoutBuilder + attr_reader :size + attr_reader :alignment + + def initialize + @size = 0 + @alignment = 1 + @min_alignment = 1 + @packed = false + @union = false + @fields = Array.new + end + + # Set size attribute with +size+ only if +size+ is greater than attribute value. + # @param [Numeric] size + def size=(size) + @size = size if size > @size + end + + # Set alignment attribute with +align+ only if it is greater than attribute value. + # @param [Numeric] align + def alignment=(align) + @alignment = align if align > @alignment + @min_alignment = align + end + + # Set union attribute. + # Set to +true+ to build a {Union} instead of a {Struct}. + # @param [Boolean] is_union + # @return [is_union] + def union=(is_union) + @union = is_union + end + + # Building a {Union} or a {Struct} ? + # + # @return [Boolean] + # + def union? + @union + end + + # Set packed attribute + # @overload packed=(packed) Set alignment and packed attributes to + # +packed+. + # + # @param [Fixnum] packed + # + # @return [packed] + # @overload packed=(packed) Set packed attribute. + # @param packed + # + # @return [0,1] + # + def packed=(packed) + if packed.is_a?(0.class) + @alignment = packed + @packed = packed + else + @packed = packed ? 1 : 0 + end + end + + + # List of number types + NUMBER_TYPES = [ + Type::INT8, + Type::UINT8, + Type::INT16, + Type::UINT16, + Type::INT32, + Type::UINT32, + Type::LONG, + Type::ULONG, + Type::INT64, + Type::UINT64, + Type::FLOAT32, + Type::FLOAT64, + Type::LONGDOUBLE, + Type::BOOL, + ] + + # @param [String, Symbol] name name of the field + # @param [Array, DataConverter, Struct, StructLayout::Field, Symbol, Type] type type of the field + # @param [Numeric, nil] offset + # @return [self] + # Add a field to the builder. + # @note Setting +offset+ to +nil+ or +-1+ is equivalent to +0+. + def add(name, type, offset = nil) + + if offset.nil? || offset == -1 + offset = @union ? 0 : align(@size, @packed ? [ @packed, type.alignment ].min : [ @min_alignment, type.alignment ].max) + end + + # + # If a FFI::Type type was passed in as the field arg, try and convert to a StructLayout::Field instance + # + field = type.is_a?(StructLayout::Field) ? type : field_for_type(name, offset, type) + @fields << field + @alignment = [ @alignment, field.alignment ].max unless @packed + @size = [ @size, field.size + (@union ? 0 : field.offset) ].max + + return self + end + + # @param (see #add) + # @return (see #add) + # Same as {#add}. + # @see #add + def add_field(name, type, offset = nil) + add(name, type, offset) + end + + # @param (see #add) + # @return (see #add) + # Add a struct as a field to the builder. + def add_struct(name, type, offset = nil) + add(name, Type::Struct.new(type), offset) + end + + # @param name (see #add) + # @param type (see #add) + # @param [Numeric] count array length + # @param offset (see #add) + # @return (see #add) + # Add an array as a field to the builder. + def add_array(name, type, count, offset = nil) + add(name, Type::Array.new(type, count), offset) + end + + # @return [StructLayout] + # Build and return the struct layout. + def build + # Add tail padding if the struct is not packed + size = @packed ? @size : align(@size, @alignment) + + layout = StructLayout.new(@fields, size, @alignment) + layout.__union! if @union + layout + end + + private + + # @param [Numeric] offset + # @param [Numeric] align + # @return [Numeric] + def align(offset, align) + align + ((offset - 1) & ~(align - 1)); + end + + # @param (see #add) + # @return [StructLayout::Field] + def field_for_type(name, offset, type) + field_class = case + when type.is_a?(Type::Function) + StructLayout::Function + + when type.is_a?(Type::Struct) + StructLayout::InnerStruct + + when type.is_a?(Type::Array) + StructLayout::Array + + when type.is_a?(FFI::Enum) + StructLayout::Enum + + when NUMBER_TYPES.include?(type) + StructLayout::Number + + when type == Type::POINTER + StructLayout::Pointer + + when type == Type::STRING + StructLayout::String + + when type.is_a?(Class) && type < StructLayout::Field + type + + when type.is_a?(DataConverter) + return StructLayout::Mapped.new(name, offset, Type::Mapped.new(type), field_for_type(name, offset, type.native_type)) + + when type.is_a?(Type::Mapped) + return StructLayout::Mapped.new(name, offset, type, field_for_type(name, offset, type.native_type)) + + else + raise TypeError, "invalid struct field type #{type.inspect}" + end + + field_class.new(name, offset, type) + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/const_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/const_generator.rb new file mode 100644 index 0000000..70ba9c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/const_generator.rb @@ -0,0 +1,232 @@ +require 'tempfile' +require 'open3' + +module FFI + + # ConstGenerator turns C constants into ruby values. + # + # @example a simple example for stdio + # require 'ffi/tools/const_generator' + # cg = FFI::ConstGenerator.new('stdio') do |gen| + # gen.const(:SEEK_SET) + # gen.const('SEEK_CUR') + # gen.const('seek_end') # this constant does not exist + # end # #calculate called automatically at the end of the block + # + # cg['SEEK_SET'] # => 0 + # cg['SEEK_CUR'] # => 1 + # cg['seek_end'] # => nil + # cg.to_ruby # => "SEEK_SET = 0\nSEEK_CUR = 1\n# seek_end not available" + class ConstGenerator + @options = {} + attr_reader :constants + + # Creates a new constant generator that uses +prefix+ as a name, and an + # options hash. + # + # The only option is +:required+, which if set to +true+ raises an error if a + # constant you have requested was not found. + # + # @param [#to_s] prefix + # @param [Hash] options + # @return + # @option options [Boolean] :required + # @overload initialize(prefix, options) + # @overload initialize(prefix, options) { |gen| ... } + # @yieldparam [ConstGenerator] gen new generator is passed to the block + # When passed a block, {#calculate} is automatically called at the end of + # the block, otherwise you must call it yourself. + def initialize(prefix = nil, options = {}) + @includes = ['stdio.h', 'stddef.h'] + @constants = {} + @prefix = prefix + + @required = options[:required] + @options = options + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + # Set class options + # These options are merged with {#initialize} options when it is called with a block. + # @param [Hash] options + # @return [Hash] class options + def self.options=(options) + @options = options + end + # Get class options. + # @return [Hash] class options + def self.options + @options + end + # @param [String] name + # @return constant value (converted if a +converter+ was defined). + # Access a constant by name. + def [](name) + @constants[name].converted_value + end + + # Request the value for C constant +name+. + # + # @param [#to_s] name C constant name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # + # @overload const(name, format=nil, cast='', ruby_name=nil, converter=nil) + # +converter+ is a Method or a Proc. + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + # @overload const(name, format=nil, cast='', ruby_name=nil) { |value| ... } + # Use a converter block. This block convert the value from a string to the + # appropriate type for {#to_ruby}. + # @yieldparam value constant value + def const(name, format = nil, cast = '', ruby_name = nil, converter = nil, + &converter_proc) + format ||= '%d' + cast ||= '' + + if converter_proc and converter then + raise ArgumentError, "Supply only converter or converter block" + end + + converter = converter_proc if converter.nil? + + const = Constant.new name, format, cast, ruby_name, converter + @constants[name.to_s] = const + return const + end + + # Calculate constants values. + # @param [Hash] options + # @option options [String] :cppflags flags for C compiler + # @return [nil] + # @raise if a constant is missing and +:required+ was set to +true+ (see {#initialize}) + def calculate(options = {}) + binary_path = nil + + Tempfile.open("#{@prefix}.const_generator") do |f| + binary_path = f.path + ".bin" + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + f.puts "\nint main(int argc, char **argv)\n{" + + @constants.each_value do |const| + f.puts <<-EOF + #ifdef #{const.name} + printf("#{const.name} #{const.format}\\n", #{const.cast}#{const.name}); + #endif + EOF + end + + f.puts "\n\treturn 0;\n}" + f.flush + + cc = ENV['CC'] || 'gcc' + output = `#{cc} #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary_path} 2>&1` + + unless $?.success? then + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating constants #{@prefix}:\n#{output}" + end + end + + output = `#{binary_path}` + File.unlink(binary_path + (FFI::Platform.windows? ? ".exe" : "")) + output.each_line do |line| + line =~ /^(\S+)\s(.*)$/ + const = @constants[$1] + const.value = $2 + end + + missing_constants = @constants.select do |name, constant| + constant.value.nil? + end.map { |name,| name } + + if @required and not missing_constants.empty? then + raise "Missing required constants for #{@prefix}: #{missing_constants.join ', '}" + end + end + + # Dump constants to +io+. + # @param [#puts] io + # @return [nil] + def dump_constants(io) + @constants.each do |name, constant| + name = [@prefix, name].join '.' if @prefix + io.puts "#{name} = #{constant.converted_value}" + end + end + + # Outputs values for discovered constants. If the constant's value was + # not discovered it is not omitted. + # @return [String] + def to_ruby + @constants.sort_by { |name,| name }.map do |name, constant| + if constant.value.nil? then + "# #{name} not available" + else + constant.to_ruby + end + end.join "\n" + end + + # Add additional C include file(s) to calculate constants from. + # @note +stdio.h+ and +stddef.h+ automatically included + # @param [List, Array] i include file(s) + # @return [Array] array of include files + def include(*i) + @includes |= i.flatten + end + + end + + # This class hold constants for {ConstGenerator} + class ConstGenerator::Constant + + attr_reader :name, :format, :cast + attr_accessor :value + + # @param [#to_s] name + # @param [String] format a printf format string to print the value out + # @param [String] cast a C cast for the value + # @param ruby_name alternate ruby name for {#to_ruby} + # @param [#call] converter convert the value from a string to the appropriate + # type for {#to_ruby}. + def initialize(name, format, cast, ruby_name = nil, converter=nil) + @name = name + @format = format + @cast = cast + @ruby_name = ruby_name + @converter = converter + @value = nil + end + + # Return constant value (converted if a +converter+ was defined). + # @return constant value. + def converted_value + if @converter + @converter.call(@value) + else + @value + end + end + + # get constant ruby name + # @return [String] + def ruby_name + @ruby_name || @name + end + + # Get an evaluable string from constant. + # @return [String] + def to_ruby + "#{ruby_name} = #{converted_value}" + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator.rb new file mode 100644 index 0000000..5552ea5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator.rb @@ -0,0 +1,105 @@ +require 'ffi/tools/struct_generator' +require 'ffi/tools/const_generator' + +module FFI + + ## + # Generate files with C structs for FFI::Struct and C constants. + # + # == A simple example + # + # In file +zlib.rb.ffi+: + # module Zlib + # @@@ + # constants do |c| + # c.include "zlib.h" + # c.const :ZLIB_VERNUM + # end + # @@@ + # + # class ZStream < FFI::Struct + # + # struct do |s| + # s.name "struct z_stream_s" + # s.include "zlib.h" + # + # s.field :next_in, :pointer + # s.field :avail_in, :uint + # s.field :total_in, :ulong + # end + # @@@ + # end + # end + # + # Translate the file: + # require "ffi/tools/generator" + # FFI::Generator.new "zlib.rb.ffi", "zlib.rb" + # + # Generates the file +zlib.rb+ with constant values and offsets: + # module Zlib + # ZLIB_VERNUM = 4784 + # + # class ZStream < FFI::Struct + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 8, + # :total_in, :ulong, 16 + # end + # + # @see FFI::Generator::Task for easy integration in a Rakefile + class Generator + + def initialize(ffi_name, rb_name, options = {}) + @ffi_name = ffi_name + @rb_name = rb_name + @options = options + @name = File.basename rb_name, '.rb' + + file = File.read @ffi_name + + new_file = file.gsub(/^( *)@@@(.*?)@@@/m) do + @constants = [] + @structs = [] + + indent = $1 + original_lines = $2.count "\n" + + instance_eval $2, @ffi_name, $`.count("\n") + + new_lines = [] + @constants.each { |c| new_lines << c.to_ruby } + @structs.each { |s| new_lines << s.generate_layout } + + new_lines = new_lines.join("\n").split "\n" # expand multiline blocks + new_lines = new_lines.map { |line| indent + line } + + padding = original_lines - new_lines.length + new_lines += [nil] * padding if padding >= 0 + + new_lines.join "\n" + end + + open @rb_name, 'w' do |f| + f.puts "# This file is generated from `#{@ffi_name}'. Do not edit." + f.puts + f.puts new_file + end + end + + def constants(options = {}, &block) + @constants << FFI::ConstGenerator.new(@name, @options.merge(options), &block) + end + + def struct(options = {}, &block) + @structs << FFI::StructGenerator.new(@name, @options.merge(options), &block) + end + + ## + # Utility converter for constants + + def to_s + proc { |obj| obj.to_s.inspect } + end + + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator_task.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator_task.rb new file mode 100644 index 0000000..da72968 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/generator_task.rb @@ -0,0 +1,32 @@ +require 'ffi/tools/generator' +require 'rake' +require 'rake/tasklib' + +## +# Add Rake tasks that generate files with C structs for FFI::Struct and C constants. +# +# @example a simple example for your Rakefile +# require "ffi/tools/generator_task" +# # Add a task to generate my_object.rb out of my_object.rb.ffi +# FFI::Generator::Task.new ["my_object.rb"], cflags: "-I/usr/local/mylibrary" +# +# The generated files are also added to the 'clear' task. +# +# @see FFI::Generator for a description of the file content +class FFI::Generator::Task < Rake::TaskLib + + def initialize(rb_names, options={}) + task :clean do rm_f rb_names end + + rb_names.each do |rb_name| + ffi_name = "#{rb_name}.ffi" + + file rb_name => ffi_name do |t| + puts "Generating #{rb_name}..." if Rake.application.options.trace + + FFI::Generator.new ffi_name, rb_name, options + end + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/struct_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/struct_generator.rb new file mode 100644 index 0000000..3a951c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/struct_generator.rb @@ -0,0 +1,195 @@ +require 'tempfile' + +module FFI + + ## + # Generates an FFI Struct layout. + # + # Given the @@@ portion in: + # + # class Zlib::ZStream < FFI::Struct + # @@@ + # name "struct z_stream_s" + # include "zlib.h" + # + # field :next_in, :pointer + # field :avail_in, :uint + # field :total_in, :ulong + # + # # ... + # @@@ + # end + # + # StructGenerator will create the layout: + # + # layout :next_in, :pointer, 0, + # :avail_in, :uint, 4, + # :total_in, :ulong, 8, + # # ... + # + # StructGenerator does its best to pad the layout it produces to preserve + # line numbers. Place the struct definition as close to the top of the file + # for best results. + + class StructGenerator + @options = {} + attr_accessor :size + attr_reader :fields + + def initialize(name, options = {}) + @name = name + @struct_name = nil + @includes = [] + @fields = [] + @found = false + @size = nil + + if block_given? then + yield self + calculate self.class.options.merge(options) + end + end + def self.options=(options) + @options = options + end + def self.options + @options + end + def calculate(options = {}) + binary = File.join Dir.tmpdir, "rb_struct_gen_bin_#{Process.pid}" + + raise "struct name not set" if @struct_name.nil? + + Tempfile.open("#{@name}.struct_generator") do |f| + f.puts "#include " + + @includes.each do |inc| + f.puts "#include <#{inc}>" + end + + f.puts "#include \n\n" + f.puts "int main(int argc, char **argv)\n{" + f.puts " #{@struct_name} s;" + f.puts %[ printf("sizeof(#{@struct_name}) %u\\n", (unsigned int) sizeof(#{@struct_name}));] + + @fields.each do |field| + f.puts <<-EOF + printf("#{field.name} %u %u\\n", (unsigned int) offsetof(#{@struct_name}, #{field.name}), + (unsigned int) sizeof(s.#{field.name})); + EOF + end + + f.puts "\n return 0;\n}" + f.flush + + cc = ENV['CC'] || 'gcc' + output = `#{cc} #{options[:cppflags]} #{options[:cflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -x c -Wall -Werror #{f.path} -o #{binary} 2>&1` + + unless $?.success? then + @found = false + output = output.split("\n").map { |l| "\t#{l}" }.join "\n" + raise "Compilation error generating struct #{@name} (#{@struct_name}):\n#{output}" + end + end + + output = `#{binary}`.split "\n" + File.unlink(binary + (FFI::Platform.windows? ? ".exe" : "")) + sizeof = output.shift + unless @size + m = /\s*sizeof\([^)]+\) (\d+)/.match sizeof + @size = m[1] + end + + line_no = 0 + output.each do |line| + md = line.match(/.+ (\d+) (\d+)/) + @fields[line_no].offset = md[1].to_i + @fields[line_no].size = md[2].to_i + + line_no += 1 + end + + @found = true + end + + def field(name, type=nil) + field = Field.new(name, type) + @fields << field + return field + end + + def found? + @found + end + + def dump_config(io) + io.puts "rbx.platform.#{@name}.sizeof = #{@size}" + + @fields.each { |field| io.puts field.to_config(@name) } + end + + def generate_layout + buf = "" + + @fields.each_with_index do |field, i| + if buf.empty? + buf << "layout :#{field.name}, :#{field.type}, #{field.offset}" + else + buf << " :#{field.name}, :#{field.type}, #{field.offset}" + end + + if i < @fields.length - 1 + buf << ",\n" + end + end + + buf + end + + def get_field(name) + @fields.find { |f| name == f.name } + end + + def include(i) + @includes << i + end + + def name(n) + @struct_name = n + end + + end + + ## + # A field in a Struct. + + class StructGenerator::Field + + attr_reader :name + attr_reader :type + attr_reader :offset + attr_accessor :size + + def initialize(name, type) + @name = name + @type = type + @offset = nil + @size = nil + end + + def offset=(o) + @offset = o + end + + def to_config(name) + buf = [] + buf << "rbx.platform.#{name}.#{@name}.offset = #{@offset}" + buf << "rbx.platform.#{name}.#{@name}.size = #{@size}" + buf << "rbx.platform.#{name}.#{@name}.type = #{@type}" if @type + buf + end + + end + +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/types_generator.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/types_generator.rb new file mode 100644 index 0000000..ba2d8c5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/tools/types_generator.rb @@ -0,0 +1,137 @@ +require 'tempfile' + +module FFI + + # @private + class TypesGenerator + + ## + # Maps different C types to the C type representations we use + + TYPE_MAP = { + "char" => :char, + "signed char" => :char, + "__signed char" => :char, + "unsigned char" => :uchar, + + "short" => :short, + "signed short" => :short, + "signed short int" => :short, + "unsigned short" => :ushort, + "unsigned short int" => :ushort, + + "int" => :int, + "signed int" => :int, + "unsigned int" => :uint, + + "long" => :long, + "long int" => :long, + "signed long" => :long, + "signed long int" => :long, + "unsigned long" => :ulong, + "unsigned long int" => :ulong, + "long unsigned int" => :ulong, + + "long long" => :long_long, + "long long int" => :long_long, + "signed long long" => :long_long, + "signed long long int" => :long_long, + "unsigned long long" => :ulong_long, + "unsigned long long int" => :ulong_long, + + "char *" => :string, + "void *" => :pointer, + } + + def self.generate(options = {}) + typedefs = nil + Tempfile.open 'ffi_types_generator' do |io| + io.puts <<-C +#include +#include +#include +#if !(defined(WIN32)) +#include +#include +#include +#endif + C + + io.close + cc = ENV['CC'] || 'gcc' + cmd = "#{cc} -E -x c #{options[:cppflags]} -D_DARWIN_USE_64_BIT_INODE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -c" + if options[:input] + typedefs = File.read(options[:input]) + elsif options[:remote] + typedefs = `ssh #{options[:remote]} #{cmd} - < #{io.path}` + else + typedefs = `#{cmd} #{io.path}` + end + end + + code = [] + + typedefs.each_line do |type| + # We only care about single line typedef + next unless type =~ /typedef/ + # Ignore unions or structs + next if type =~ /union|struct/ + + # strip off the starting typedef and ending ; + type.gsub!(/^(.*typedef\s*)/, "") + type.gsub!(/\s*;\s*$/, "") + + parts = type.split(/\s+/) + def_type = parts.join(" ") + + # GCC does mapping with __attribute__ stuf, also see + # http://hal.cs.berkeley.edu/cil/cil016.html section 16.2.7. Problem + # with this is that the __attribute__ stuff can either occur before or + # after the new type that is defined... + if type =~ /__attribute__/ + if parts.last =~ /__QI__|__HI__|__SI__|__DI__|__word__/ + + # In this case, the new type is BEFORE __attribute__ we need to + # find the final_type as the type before the part that starts with + # __attribute__ + final_type = "" + parts.each do |p| + break if p =~ /__attribute__/ + final_type = p + end + else + final_type = parts.pop + end + + def_type = case type + when /__QI__/ then "char" + when /__HI__/ then "short" + when /__SI__/ then "int" + when /__DI__/ then "long long" + when /__word__/ then "long" + else "int" + end + + def_type = "unsigned #{def_type}" if type =~ /unsigned/ + else + final_type = parts.pop + def_type = parts.join(" ") + end + + if type = TYPE_MAP[def_type] + code << "rbx.platform.typedef.#{final_type} = #{type}" + TYPE_MAP[final_type] = TYPE_MAP[def_type] + else + # Fallback to an ordinary pointer if we don't know the type + if def_type =~ /\*/ + code << "rbx.platform.typedef.#{final_type} = pointer" + TYPE_MAP[final_type] = :pointer + end + end + end + + code.sort.join("\n") + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/types.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/types.rb new file mode 100644 index 0000000..90f50c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/types.rb @@ -0,0 +1,194 @@ +# +# Copyright (C) 2008-2010 Wayne Meissner +# Copyright (c) 2007, 2008 Evan Phoenix +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# see {file:README} +module FFI + + # @param [Type, DataConverter, Symbol] old type definition used by {FFI.find_type} + # @param [Symbol] add new type definition's name to add + # @return [Type] + # Add a definition type to type definitions. + def self.typedef(old, add) + TypeDefs[add] = self.find_type(old) + end + + # (see FFI.typedef) + def self.add_typedef(old, add) + typedef old, add + end + + + # @param [Type, DataConverter, Symbol] name + # @param [Hash] type_map if nil, {FFI::TypeDefs} is used + # @return [Type] + # Find a type in +type_map+ ({FFI::TypeDefs}, by default) from + # a type objet, a type name (symbol). If +name+ is a {DataConverter}, + # a new {Type::Mapped} is created. + def self.find_type(name, type_map = nil) + if name.is_a?(Type) + name + + elsif type_map && type_map.has_key?(name) + type_map[name] + + elsif TypeDefs.has_key?(name) + TypeDefs[name] + + elsif name.is_a?(DataConverter) + (type_map || TypeDefs)[name] = Type::Mapped.new(name) + else + raise TypeError, "unable to resolve type '#{name}'" + end + end + + # List of type definitions + TypeDefs.merge!({ + # The C void type; only useful for function return types + :void => Type::VOID, + + # C boolean type + :bool => Type::BOOL, + + # C nul-terminated string + :string => Type::STRING, + + # C signed char + :char => Type::CHAR, + # C unsigned char + :uchar => Type::UCHAR, + + # C signed short + :short => Type::SHORT, + # C unsigned short + :ushort => Type::USHORT, + + # C signed int + :int => Type::INT, + # C unsigned int + :uint => Type::UINT, + + # C signed long + :long => Type::LONG, + + # C unsigned long + :ulong => Type::ULONG, + + # C signed long long integer + :long_long => Type::LONG_LONG, + + # C unsigned long long integer + :ulong_long => Type::ULONG_LONG, + + # C single precision float + :float => Type::FLOAT, + + # C double precision float + :double => Type::DOUBLE, + + # C long double + :long_double => Type::LONGDOUBLE, + + # Native memory address + :pointer => Type::POINTER, + + # 8 bit signed integer + :int8 => Type::INT8, + # 8 bit unsigned integer + :uint8 => Type::UINT8, + + # 16 bit signed integer + :int16 => Type::INT16, + # 16 bit unsigned integer + :uint16 => Type::UINT16, + + # 32 bit signed integer + :int32 => Type::INT32, + # 32 bit unsigned integer + :uint32 => Type::UINT32, + + # 64 bit signed integer + :int64 => Type::INT64, + # 64 bit unsigned integer + :uint64 => Type::UINT64, + + :buffer_in => Type::BUFFER_IN, + :buffer_out => Type::BUFFER_OUT, + :buffer_inout => Type::BUFFER_INOUT, + + # Used in function prototypes to indicate the arguments are variadic + :varargs => Type::VARARGS, + }) + + # This will convert a pointer to a Ruby string (just like `:string`), but + # also allow to work with the pointer itself. This is useful when you want + # a Ruby string already containing a copy of the data, but also the pointer + # to the data for you to do something with it, like freeing it, in case the + # library handed the memory off to the caller (Ruby-FFI). + # + # It's {typedef}'d as +:strptr+. + class StrPtrConverter + extend DataConverter + native_type Type::POINTER + + # @param [Pointer] val + # @param ctx not used + # @return [Array(String, Pointer)] + # Returns a [ String, Pointer ] tuple so the C memory for the string can be freed + def self.from_native(val, ctx) + [ val.null? ? nil : val.get_string(0), val ] + end + end + + typedef(StrPtrConverter, :strptr) + + # @param type +type+ is an instance of class accepted by {FFI.find_type} + # @return [Numeric] + # Get +type+ size, in bytes. + def self.type_size(type) + find_type(type).size + end + + # Load all the platform dependent types + begin + File.open(File.join(Platform::CONF_DIR, 'types.conf'), "r") do |f| + prefix = "rbx.platform.typedef." + f.each_line { |line| + if line.index(prefix) == 0 + new_type, orig_type = line.chomp.slice(prefix.length..-1).split(/\s*=\s*/) + typedef(orig_type.to_sym, new_type.to_sym) + end + } + end + typedef :pointer, :caddr_t + rescue Errno::ENOENT + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/union.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/union.rb new file mode 100644 index 0000000..38414ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/union.rb @@ -0,0 +1,43 @@ +# +# Copyright (C) 2009 Andrea Fazzi +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +require 'ffi/struct' + +module FFI + + class Union < FFI::Struct + def self.builder + b = StructLayoutBuilder.new + b.union = true + b + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/variadic.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/variadic.rb new file mode 100644 index 0000000..743ce7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/variadic.rb @@ -0,0 +1,69 @@ +# +# Copyright (C) 2008, 2009 Wayne Meissner +# Copyright (C) 2009 Luc Heinrich +# +# This file is part of ruby-ffi. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the Ruby FFI project nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +module FFI + class VariadicInvoker + def call(*args, &block) + param_types = Array.new(@fixed) + param_values = Array.new + @fixed.each_with_index do |t, i| + param_values << args[i] + end + i = @fixed.length + while i < args.length + param_types << FFI.find_type(args[i], @type_map) + param_values << args[i + 1] + i += 2 + end + invoke(param_types, param_values, &block) + end + + # + # Attach the invoker to module +mod+ as +mname+ + # + def attach(mod, mname) + invoker = self + params = "*args" + call = "call" + mod.module_eval <<-code + @@#{mname} = invoker + def self.#{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + def #{mname}(#{params}) + @@#{mname}.#{call}(#{params}) + end + code + invoker + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/version.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/version.rb new file mode 100644 index 0000000..3027569 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi/version.rb @@ -0,0 +1,3 @@ +module FFI + VERSION = '1.15.5' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi_c.bundle b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi_c.bundle new file mode 100755 index 0000000..8edc525 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/lib/ffi_c.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/rakelib/ffi_gem_helper.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/rakelib/ffi_gem_helper.rb new file mode 100644 index 0000000..74be131 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/rakelib/ffi_gem_helper.rb @@ -0,0 +1,65 @@ +require 'bundler' +require 'bundler/gem_helper' + +class FfiGemHelper < Bundler::GemHelper + attr_accessor :cross_platforms + + def install + super + + task "release:guard_clean" => ["release:update_history"] + + task "release:update_history" do + update_history + end + + task "release:rubygem_push" => ["gem:native", "gem:java"] + end + + def hfile + "CHANGELOG.md" + end + + def headline + '([^\w]*)(\d+\.\d+\.\d+(?:\.\w+)?)([^\w]+)([2Y][0Y][0-9Y][0-9Y]-[0-1M][0-9M]-[0-3D][0-9D])([^\w]*|$)' + end + + def reldate + Time.now.strftime("%Y-%m-%d") + end + + def update_history + hin = File.read(hfile) + hout = hin.sub(/#{headline}/) do + raise "#{hfile} isn't up-to-date for version #{version}" unless $2==version.to_s + $1 + $2 + $3 + reldate + $5 + end + if hout != hin + Bundler.ui.confirm "Updating #{hfile} for release." + File.write(hfile, hout) + Rake::FileUtilsExt.sh "git", "commit", hfile, "-m", "Update release date in #{hfile}" + end + end + + def tag_version + Bundler.ui.confirm "Tag release with annotation:" + m = File.read(hfile).match(/(?#{headline}.*?)#{headline}/m) || raise("Unable to find release notes in #{hfile}") + Bundler.ui.info(m[:annotation].gsub(/^/, " ")) + IO.popen(["git", "tag", "--file=-", version_tag], "w") do |fd| + fd.write m[:annotation] + end + yield if block_given? + rescue + Bundler.ui.error "Untagging #{version_tag} due to error." + sh_with_code "git tag -d #{version_tag}" + raise + end + + def rubygem_push(path) + cross_platforms.each do |ruby_platform| + super(path.gsub(/\.gem\z/, "-#{ruby_platform}.gem")) + end + super(path.gsub(/\.gem\z/, "-java.gem")) + super(path) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getlogin.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getlogin.rb new file mode 100644 index 0000000..6713021 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getlogin.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getlogin, [ ], :string +end +puts "getlogin=#{Foo.getlogin}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getpid.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getpid.rb new file mode 100644 index 0000000..1720635 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/getpid.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :getpid, [ ], :int +end +puts "My pid=#{Foo.getpid}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/gettimeofday.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/gettimeofday.rb new file mode 100644 index 0000000..864bbb6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/gettimeofday.rb @@ -0,0 +1,18 @@ +require 'ffi' +require 'rbconfig' + +class Timeval < FFI::Struct + layout tv_sec: :ulong, tv_usec: :ulong +end +module LibC + extend FFI::Library + if FFI::Platform.windows? + ffi_lib RbConfig::CONFIG["LIBRUBY_SO"] + else + ffi_lib FFI::Library::LIBC + end + attach_function :gettimeofday, [ :pointer, :pointer ], :int +end +t = Timeval.new +LibC.gettimeofday(t.pointer, nil) +puts "t.tv_sec=#{t[:tv_sec]} t.tv_usec=#{t[:tv_usec]}" diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/hello.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/hello.rb new file mode 100644 index 0000000..f2ccf37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/hello.rb @@ -0,0 +1,8 @@ +require 'ffi' + +module Foo + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function("cputs", "puts", [ :string ], :int) +end +Foo.cputs("Hello, World via libc puts using FFI on MRI ruby") diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/inotify.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/inotify.rb new file mode 100644 index 0000000..018d78c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/inotify.rb @@ -0,0 +1,60 @@ +require 'ffi' + +module Inotify + extend FFI::Library + ffi_lib FFI::Library::LIBC + class Event < FFI::Struct + layout \ + :wd, :int, + :mask, :uint, + :cookie, :uint, + :len, :uint + end + attach_function :init, :inotify_init, [ ], :int + attach_function :add_watch, :inotify_add_watch, [ :int, :string, :uint ], :int + attach_function :rm_watch, :inotify_rm_watch, [ :int, :uint ], :int + attach_function :read, [ :int, :buffer_out, :uint ], :int + IN_ACCESS=0x00000001 + IN_MODIFY=0x00000002 + IN_ATTRIB=0x00000004 + IN_CLOSE_WRITE=0x00000008 + IN_CLOSE_NOWRITE=0x00000010 + IN_CLOSE=(IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_OPEN=0x00000020 + IN_MOVED_FROM=0x00000040 + IN_MOVED_TO=0x00000080 + IN_MOVE= (IN_MOVED_FROM | IN_MOVED_TO) + IN_CREATE=0x00000100 + IN_DELETE=0x00000200 + IN_DELETE_SELF=0x00000400 + IN_MOVE_SELF=0x00000800 + # Events sent by the kernel. + IN_UNMOUNT=0x00002000 + IN_Q_OVERFLOW=0x00004000 + IN_IGNORED=0x00008000 + IN_ONLYDIR=0x01000000 + IN_DONT_FOLLOW=0x02000000 + IN_MASK_ADD=0x20000000 + IN_ISDIR=0x40000000 + IN_ONESHOT=0x80000000 + IN_ALL_EVENTS=(IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE \ + | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM \ + | IN_MOVED_TO | IN_CREATE | IN_DELETE \ + | IN_DELETE_SELF | IN_MOVE_SELF) + +end +if $0 == __FILE__ + fd = Inotify.init + puts "fd=#{fd}" + wd = Inotify.add_watch(fd, "/tmp/", Inotify::IN_ALL_EVENTS) + fp = FFI::IO.for_fd(fd) + puts "wfp=#{fp}" + while true + buf = FFI::Buffer.alloc_out(Inotify::Event.size + 4096, 1, false) + ev = Inotify::Event.new buf + ready = IO.select([ fp ], nil, nil, nil) + n = Inotify.read(fd, buf, buf.total) + puts "Read #{n} bytes from inotify fd" + puts "event.wd=#{ev[:wd]} mask=#{ev[:mask]} len=#{ev[:len]} name=#{ev[:len] > 0 ? buf.get_string(16) : 'unknown'}" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/pty.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/pty.rb new file mode 100644 index 0000000..8b6b885 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/pty.rb @@ -0,0 +1,75 @@ +require 'ffi' + +module PTY + private + module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + attach_function :forkpty, [ :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :openpty, [ :buffer_out, :buffer_out, :buffer_out, :buffer_in, :buffer_in ], :int + attach_function :login_tty, [ :int ], :int + attach_function :close, [ :int ], :int + attach_function :strerror, [ :int ], :string + attach_function :fork, [], :int + attach_function :execv, [ :string, :buffer_in ], :int + attach_function :execvp, [ :string, :buffer_in ], :int + attach_function :dup2, [ :int, :int ], :int + attach_function :dup, [ :int ], :int + end + Buffer = FFI::Buffer + def self.build_args(args) + cmd = args.shift + cmd_args = args.map do |arg| + MemoryPointer.from_string(arg) + end + exec_args = MemoryPointer.new(:pointer, 1 + cmd_args.length + 1) + exec_cmd = MemoryPointer.from_string(cmd) + exec_args[0].put_pointer(0, exec_cmd) + cmd_args.each_with_index do |arg, i| + exec_args[i + 1].put_pointer(0, arg) + end + [ cmd, exec_args ] + end + public + def self.getpty(*args) + mfdp = Buffer.new :int + name = Buffer.new 1024 + # + # All the execv setup is done in the parent, since doing anything other than + # execv in the child after fork is really flakey + # + exec_cmd, exec_args = build_args(args) + pid = LibC.forkpty(mfdp, name, nil, nil) + raise "forkpty failed: #{LibC.strerror(FFI.errno)}" if pid < 0 + if pid == 0 + LibC.execvp(exec_cmd, exec_args) + exit 1 + end + masterfd = mfdp.get_int(0) + rfp = FFI::IO.for_fd(masterfd, "r") + wfp = FFI::IO.for_fd(LibC.dup(masterfd), "w") + if block_given? + yield rfp, wfp, pid + rfp.close unless rfp.closed? + wfp.close unless wfp.closed? + else + [ rfp, wfp, pid ] + end + end + def self.spawn(*args, &block) + self.getpty("/bin/sh", "-c", args[0], &block) + end +end +module LibC + extend FFI::Library + attach_function :close, [ :int ], :int + attach_function :write, [ :int, :buffer_in, :ulong ], :long + attach_function :read, [ :int, :buffer_out, :ulong ], :long +end +PTY.getpty("/bin/ls", "-alR", "/") { |rfd, wfd, pid| +#PTY.spawn("ls -laR /") { |rfd, wfd, pid| + puts "child pid=#{pid}" + while !rfd.eof? && (buf = rfd.gets) + puts "child: '#{buf.strip}'" + end +} diff --git a/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/qsort.rb b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/qsort.rb new file mode 100644 index 0000000..58622c1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ffi-1.15.5/samples/qsort.rb @@ -0,0 +1,20 @@ +require 'ffi' + +module LibC + extend FFI::Library + ffi_lib FFI::Library::LIBC + callback :qsort_cmp, [ :pointer, :pointer ], :int + attach_function :qsort, [ :pointer, :ulong, :ulong, :qsort_cmp ], :int +end + +p = FFI::MemoryPointer.new(:int, 2) +p.put_array_of_int32(0, [ 2, 1 ]) +puts "ptr=#{p.inspect}" +puts "Before qsort #{p.get_array_of_int32(0, 2).join(', ')}" +LibC.qsort(p, 2, 4) do |p1, p2| + i1 = p1.get_int32(0) + i2 = p2.get_int32(0) + puts "In block: comparing #{i1} and #{i2}" + i1 < i2 ? -1 : i1 > i2 ? 1 : 0 +end +puts "After qsort #{p.get_array_of_int32(0, 2).join(', ')}" diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.gitignore b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.gitignore new file mode 100644 index 0000000..9106b2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.gitignore @@ -0,0 +1,8 @@ +/.bundle/ +/.yardoc +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.rubocop.yml new file mode 100644 index 0000000..a91c832 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.rubocop.yml @@ -0,0 +1,30 @@ +Metrics/AbcSize: + Max: 35 + +Metrics/CyclomaticComplexity: + Max: 10 + +Metrics/LineLength: + Max: 105 + +Metrics/MethodLength: + Max: 30 + +Style/AsciiComments: + Exclude: + - 'lib/fourflusher/executable.rb' + +Style/ClassVars: + Exclude: + - 'lib/fourflusher/find.rb' + +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + - 'lib/fourflusher/version.rb' + - 'lib/fourflusher/compat.rb' + +Lint/Void: + Exclude: + - 'spec/unit_spec.rb' diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.travis.yml new file mode 100644 index 0000000..5163910 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/.travis.yml @@ -0,0 +1,8 @@ +language: ruby +rvm: + - 2.0.0-p648 + - 2.5.1 +cache: bundler +before_install: gem install bundler -v "~> 1.17" +script: + - bundle exec rake diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/CHANGELOG.md new file mode 100644 index 0000000..eaee477 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/CHANGELOG.md @@ -0,0 +1,142 @@ + +## 2.3.1 (2019-06-18) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed crash with very long devices list/slow computers. + [Martin Fiebig](https://github.com/mfiebig) + [#17](https://github.com/CocoaPods/fourflusher/issues/17) + + +## 2.3.0 (2019-06-05) + +##### Enhancements + +* Update simctl interface for Xcode 11. + [Colin Humber](https://github.com/colinhumber) + [#22](https://github.com/CocoaPods/fourflusher/pull/22) + +##### Bug Fixes + +* None. + + +## 2.2.0 (2019-01-28) + +##### Enhancements + +* Update simctl interface for Xcode 10.2. + [Jeff Kelley](https://github.com/SlaunchaMan) + [CocoaPods#8458](https://github.com/CocoaPods/CocoaPods/issues/8458) + +##### Bug Fixes + +* None. + + +## 2.1.0 (2018-10-17) + +##### Enhancements + +* Support Xcode 10.1. + +##### Bug Fixes + +* None. + + +## 2.0.1 (2016-10-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed crash with simulators comparison, when device model is undefined + [Roman Truba](https://github.com/dreddik) + [#9](https://github.com/CocoaPods/fourflusher/pull/9) + + +## v2.0.0 (2016-10-02) + +##### Breaking + +* Use JSON output from `xcrun simctl list`. Drop support for Xcode 6 (doesn't support JSON output) + [Ben Asher](https://github.com/benasher44) + [#6](https://github.com/CocoaPods/fourflusher/pull/6) + +##### Enhancements + +* Update simulator count for Travis + [Boris Bügling](https://github.com/neonichu) + [#5](https://github.com/CocoaPods/fourflusher/pull/5) + +* A more helpful error message for missing simulators + [Radek Pietruszewski](https://github.com/radex) + [#4](https://github.com/CocoaPods/fourflusher/pull/4) + +##### Bug Fixes + +* None + +## v1.0.1 (2016-06-25) + +##### Enhancements + +* Show better error for `:oldest` simulator search + [Boris Bügling](https://github.com/neonichu) + +## v1.0.0 (2016-06-24) + +##### Enhancements + +* Improve finding simulators + [Boris Bügling](https://github.com/neonichu) + +## v0.3.2 (2016-06-16) + +##### Enhancements + +* Support for parsing iOS 10.0 and tvOS 10.0 + [Boris Bügling](https://github.com/neonichu) + +##### Bug Fixes + +* Rubocop fixes + [Boris Bügling](https://github.com/neonichu) + +## v0.3.1 (2016-05-30) + +##### Enhancements + +* Travis CI improvements + [Boris Bügling](https://github.com/neonichu) + [#1](https://github.com/CocoaPods/fourflusher/pull/1) + +* Handle missing Xcode more gracefully + [Boris Bügling](https://github.com/neonichu) + +## v0.3.0 (2015-12-29) + +##### Enhancements + +* Allow specifying constraints for `destination()` + [Boris Bügling](https://github.com/neonichu) + +## v0.2.0 (2015-12-29) + +##### Enhancements + +* Allow constraining simulators by OS version + [Boris Bügling](https://github.com/neonichu) + +## v0.1.0 (2015-12-18) + +* List usable simulators. Find simulators by name. Get the destination setting for the simulator to pass to Xcodebuild + [Boris Bügling](https://github.com/neonichu) + diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile new file mode 100644 index 0000000..fa75df1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile @@ -0,0 +1,3 @@ +source 'https://rubygems.org' + +gemspec diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile.lock new file mode 100644 index 0000000..76a65ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Gemfile.lock @@ -0,0 +1,47 @@ +PATH + remote: . + specs: + fourflusher (2.3.1) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.3.0) + astrolabe (1.3.1) + parser (~> 2.2) + diff-lcs (1.2.5) + parser (2.3.1.4) + ast (~> 2.2) + powerpack (0.1.1) + rainbow (2.1.0) + rake (10.5.0) + rspec (2.99.0) + rspec-core (~> 2.99.0) + rspec-expectations (~> 2.99.0) + rspec-mocks (~> 2.99.0) + rspec-core (2.99.2) + rspec-expectations (2.99.2) + diff-lcs (>= 1.1.3, < 2.0) + rspec-mocks (2.99.4) + rubocop (0.35.1) + astrolabe (~> 1.3) + parser (>= 2.2.3.0, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + tins (<= 1.6.0) + ruby-progressbar (1.8.1) + tins (1.6.0) + +PLATFORMS + ruby + +DEPENDENCIES + bundler (~> 1.11) + fourflusher! + rake (~> 10.0) + rspec (~> 2) + rubocop (~> 0.35.0) + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/LICENSE.txt new file mode 100644 index 0000000..0d5eb7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Boris Bügling + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/README.md b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/README.md new file mode 100644 index 0000000..d4dd384 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/README.md @@ -0,0 +1,33 @@ +# four-flusher + +A library for interacting with Xcode simulators. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'fourflusher' +``` + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install fourflusher + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/CocoaPods/fourflusher. + +## License + +The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT). diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Rakefile b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Rakefile new file mode 100644 index 0000000..05f4f23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/Rakefile @@ -0,0 +1,9 @@ +require 'bundler/gem_tasks' + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new + +require 'rubocop/rake_task' +RuboCop::RakeTask.new(:rubocop) + +task default: [:spec, :rubocop] diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/console b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/console new file mode 100755 index 0000000..7124d1b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/console @@ -0,0 +1,14 @@ +#!/usr/bin/env ruby + +require 'bundler/setup' +require 'fourflusher' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require 'irb' +IRB.start diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/setup b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/setup new file mode 100755 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/fourflusher.gemspec b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/fourflusher.gemspec new file mode 100644 index 0000000..b3003eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/fourflusher.gemspec @@ -0,0 +1,27 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'fourflusher/version' + +Gem::Specification.new do |spec| + spec.name = 'fourflusher' + spec.version = Fourflusher::VERSION + spec.authors = ['Boris Bügling'] + spec.email = ['boris@icculus.org'] + + spec.summary = 'A library for interacting with Xcode simulators.' + spec.homepage = 'https://github.com/neonichu/fourflusher' + spec.license = 'MIT' + + spec.files = `git ls-files -z`.split("\x0").reject { |f| + f.match(%r{^(test|spec|features)/}) + } + spec.bindir = 'exe' + spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.11' + spec.add_development_dependency 'rake', '~> 10.0' + spec.add_development_dependency 'rspec', '~> 2' + spec.add_development_dependency 'rubocop', '~> 0.35.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher.rb new file mode 100644 index 0000000..16e1178 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher.rb @@ -0,0 +1,3 @@ +require 'fourflusher/find' +require 'fourflusher/version' +require 'fourflusher/xcodebuild' diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/compat.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/compat.rb new file mode 100644 index 0000000..f210d19 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/compat.rb @@ -0,0 +1,24 @@ +module Fourflusher + class Config + def self.instance + @instance || new + end + + def verbose? + false + end + end + + class Informative < StandardError + end + + class UI + def self.indentation_level + 0 + end + + def self.message(message) + print(message) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/executable.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/executable.rb new file mode 100644 index 0000000..fa03642 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/executable.rb @@ -0,0 +1,239 @@ +require 'fourflusher/compat' + +# This file is a verbatim copy from CocoaPods, a project licensed under the MIT license. + +# Copyright (c) 2011 - 2015 Eloy Durán , +# Fabio Pelosin , +# Samuel Giddins , +# Marius Rackwitz , +# Kyle Fuller , +# Boris Bügling , +# Orta Therox , and +# Olivier Halligon . + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +module Fourflusher + # Module which provides support for running executables. + # + # In a class it can be used as: + # + # extend Executable + # executable :git + # + # This will create two methods `git` and `git!` both accept a command but + # the later will raise on non successful executions. The methods return the + # output of the command. + # + module Executable + # Creates the methods for the executable with the given name. + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def executable(name) + define_method(name) do |*command| + Executable.execute_command(name, Array(command).flatten, false) + end + + define_method(name.to_s + '!') do |*command| + Executable.execute_command(name, Array(command).flatten, true) + end + end + + # Executes the given command displaying it if in verbose mode. + # + # @param [String] bin + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Bool] raise_on_failure + # Whether it should raise if the command fails. + # + # @raise If the executable could not be located. + # + # @raise If the command fails and the `raise_on_failure` is set to true. + # + # @return [String] the output of the command (STDOUT and STDERR). + # + def self.execute_command(executable, command, raise_on_failure = true) + bin = which(executable) + fail Fourflusher::Informative, "Unable to locate the executable `#{executable}`" unless bin + + command = command.map(&:to_s) + full_command = "#{bin} #{command.join(' ')}" + + if Config.instance.verbose? + UI.message("$ #{full_command}") + stdout = Indenter.new(STDOUT) + stderr = Indenter.new(STDERR) + else + stdout = Indenter.new + stderr = Indenter.new + end + + status = popen3(bin, command, stdout, stderr) + stdout = stdout.join + stderr = stderr.join + output = stdout + stderr + unless status.success? + if raise_on_failure + fail Fourflusher::Informative, "#{full_command}\n\n#{output}" + else + UI.message("[!] Failed: #{full_command}".red) + end + end + + output + end + + # Returns the absolute path to the binary with the given name on the current + # `PATH`, or `nil` if none is found. + # + # @param [String] program + # The name of the program being searched for. + # + # @return [String,Nil] The absolute path to the given program, or `nil` if + # it wasn't found in the current `PATH`. + # + def self.which(program) + program = program.to_s + ENV['PATH'].split(File::PATH_SEPARATOR).each do |path| + bin = File.expand_path(program, path) + return bin if File.file?(bin) && File.executable?(bin) + end + nil + end + + # Runs the given command, capturing the desired output. + # + # @param [String] bin + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Symbol] capture + # Whether it should raise if the command fails. + # + # @raise If the executable could not be located. + # + # @return [(String, Process::Status)] + # The desired captured output from the command, and the status from + # running the command. + # + def self.capture_command(executable, command, capture: :merge) + bin = which(executable) + fail Fourflusher::Informative, "Unable to locate the executable `#{executable}`" unless bin + + require 'open3' + command = command.map(&:to_s) + case capture + when :merge then Open3.capture2e(bin, *command) + when :both then Open3.capture3(bin, *command) + when :out then Open3.capture2(bin, *command) + when :err then Open3.capture3(bin, *command).drop(1) + when :none then Open3.capture2(bin, *command).last + end + end + + private + + def self.popen3(bin, command, stdout, stderr) + require 'open3' + Open3.popen3(bin, *command) do |i, o, e, t| + stdout_thread = reader(o, stdout) + stderr_thread = reader(e, stderr) + i.close + + status = t.value + + o.flush + e.flush + + # wait for both threads to process the streams + stdout_thread.join + stderr_thread.join + + status + end + end + + def self.reader(input, output) + Thread.new do + buf = '' + begin + loop do + buf << input.readpartial(4096) + loop do + string, separator, buf = buf.partition(/[\r\n]/) + if separator.empty? + buf = string + break + end + output << (string << separator) + end + end + rescue EOFError + output << (buf << $INPUT_RECORD_SEPARATOR) unless buf.empty? + end + end + end + + #-------------------------------------------------------------------------# + + # Helper class that allows to write to an {IO} instance taking into account + # the UI indentation level. + # + class Indenter < ::Array + # @return [Fixnum] The indentation level of the UI. + # + attr_accessor :indent + + # @return [IO] the {IO} to which the output should be printed. + # + attr_accessor :io + + # Init a new Indenter + # + # @param [IO] io @see io + # + def initialize(io = nil) + @io = io + @indent = ' ' * UI.indentation_level + end + + # Stores a portion of the output and prints it to the {IO} instance. + # + # @param [String] value + # the output to print. + # + # @return [void] + # + def <<(value) + super + io << "#{indent}#{value}" if io + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/find.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/find.rb new file mode 100644 index 0000000..f841b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/find.rb @@ -0,0 +1,152 @@ +require 'fourflusher/simctl' +require 'json' +require 'rubygems/version' + +module Fourflusher + # Metadata about an installed Xcode simulator + class Simulator + attr_reader :id + attr_reader :name + attr_reader :os_version + + def os_name + @os_name.downcase.to_sym + end + + def compatible?(other_version) + other_version <= os_version + end + + def to_s + "#{@name} (#{@id}) - #{@os_name} #{@os_version}" + end + + # Compare function for sorting simulators in order by + # - OS Name: ascending + # - OS Version: descending + # - Device type: iPhone first, then ascending + # - Model: ascending + def sim_list_compare(other) + return os_name.to_s <=> other.os_name.to_s unless os_name == other.os_name + return other.os_version <=> os_version unless os_version == other.os_version + device1, model1 = device_and_model + device2, model2 = other.device_and_model + return device_compare(device1, device2) unless device1 == device2 + return model1 <=> model2 unless model1.nil? || model2.nil? + model2.nil? ? 1 : -1 + end + + def device_compare(my_device, other_device) + return -1 if my_device == 'iPhone' + return 1 if other_device == 'iPhone' + return my_device <=> other_device unless my_device.nil? || other_device.nil? + other_device.nil? ? 1 : -1 + end + + # Returns the [device, model] for use during sorting + # Examples: [iPhone, 5s], [iPhone, 6s Plus], [Apple Watch Series 2, 38mm] + def device_and_model + if os_name == :watchos + # Sample string: Apple Watch Series 2 - 38mm + name.split ' - ' + else + # Sample string: "iPhone 5s" or "iPhone 6 Plus" or "iPad Air 2" + if name.start_with? 'Apple TV' + # The last part is the model, and the rest is the device + parts = name.rpartition(' ').reject { |str| str.strip.empty? } + [parts[0...-1].join(' '), parts.drop(parts.count - 1).join(' ')].map(&:strip) + else + # The first part is device, and the rest is the model + name.split ' ', 2 + end + end + end + + private + + def initialize(device_json, os_name, os_version) + @id = device_json['udid'] + @name = device_json['name'] + @os_name = os_name + @os_version = Gem::Version.new os_version + end + end + + # { + # "devices" : { + # "iOS 10.0" : [ + # { + # "state" : "Shutdown", + # "availability" : "(available)", + # "name" : "iPhone 5", + # "udid" : "B7D21008-CC16-47D6-A9A9-885FE1FC47A8" + # }, + # { + # "state" : "Shutdown", + # "availability" : "(available)", + # "name" : "iPhone 5s", + # "udid" : "38EAE7BD-90C3-4C3D-A672-3AF683EEC5A2" + # }, + # ] + # } + # } + + # Executes `simctl` commands + class SimControl + def simulator(filter, os_name = :ios, minimum_version = '1.0') + usable_simulators(filter, os_name, minimum_version).first + end + + def usable_simulators(filter = nil, os = :ios, minimum_version = '1.0') + sims = fetch_sims + oses = sims.map(&:os_name).uniq + os = os.downcase.to_sym + + unless oses.include?(os) + fail "Could not find a `#{os}` simulator (valid values: #{oses.join(', ')}). Ensure that "\ + "Xcode -> Window -> Devices has at least one `#{os}` simulator listed or otherwise add one." + end + + return sims if filter.nil? + minimum_version = Gem::Version.new(minimum_version) + sims = sims.select { |sim| sim.os_name == os && sim.compatible?(minimum_version) } + + return [sims.min_by(&:os_version)] if filter == :oldest + + found_sims = sims.select { |sim| sim.name == filter } + return found_sims if found_sims.count > 0 + sims.select { |sim| sim.name.start_with?(filter) } + end + + private + + # Gets the simulators and transforms the simctl json into Simulator objects + def fetch_sims + device_list = JSON.parse(list(['-j', 'devices']))['devices'] + unless device_list.is_a?(Hash) + msg = "Expected devices to be of type Hash but instated found #{device_list.class}" + fail Fourflusher::Informative, msg + end + device_list.flat_map do |runtime_str, devices| + # This format changed with Xcode 10.2. + if runtime_str.start_with?('com.apple.CoreSimulator.SimRuntime.') + # Sample string: com.apple.CoreSimulator.SimRuntime.iOS-12-2 + _unused, os_info = runtime_str.split 'com.apple.CoreSimulator.SimRuntime.' + os_name, os_major_version, os_minor_version = os_info.split '-' + os_version = "#{os_major_version}.#{os_minor_version}" + else + # Sample string: iOS 9.3 + os_name, os_version = runtime_str.split ' ' + end + + devices.map do |device| + device_is_available = device['isAvailable'] == 'YES' || device['isAvailable'] == true + + if device['availability'] == '(available)' || device_is_available + Simulator.new(device, os_name, os_version) + end + end + end.compact.sort(&:sim_list_compare) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/simctl.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/simctl.rb new file mode 100644 index 0000000..f0d32da --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/simctl.rb @@ -0,0 +1,19 @@ +require 'fourflusher/executable' + +module Fourflusher + # Executes `simctl` commands + class SimControl + extend Executable + executable :xcrun + + def list(args) + simctl!(['list'] + args) + end + + private + + def simctl!(args) + xcrun!(['simctl'] + args) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/version.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/version.rb new file mode 100644 index 0000000..cb57686 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/version.rb @@ -0,0 +1,3 @@ +module Fourflusher + VERSION = '2.3.1' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/xcodebuild.rb b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/xcodebuild.rb new file mode 100644 index 0000000..246c9c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fourflusher-2.3.1/lib/fourflusher/xcodebuild.rb @@ -0,0 +1,13 @@ +require 'fourflusher/find' + +module Fourflusher + # Executes `simctl` commands + class SimControl + def destination(filter, os = :ios, minimum_version = '1.0') + sim = simulator(filter, os, minimum_version) + filter = "for #{os} #{minimum_version}" if filter == :oldest + fail "Simulator #{filter} is not available." if sim.nil? + ['-destination', "id=#{sim.id}"] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.gitignore b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.gitignore new file mode 100644 index 0000000..ee73f85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.gitignore @@ -0,0 +1,24 @@ +## MAC OS +.DS_Store + +## TEXTMATE +*.tmproj +tmtags + +## EMACS +*~ +\#* +.\#* + +## VIM +*.swp + +## PROJECT::GENERAL +coverage +doc +.yardoc +pkg + +## PROJECT::SPECIFIC +Gemfile.lock +*.gem diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.rspec b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.rspec new file mode 100644 index 0000000..5f16476 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/.rspec @@ -0,0 +1,2 @@ +--color +--format progress diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/CHANGELOG b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/CHANGELOG new file mode 100644 index 0000000..a1d6c74 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/CHANGELOG @@ -0,0 +1,93 @@ +2.0.4 / 2013-09-19 + +* Bug fixes + + * Restore 1.8 compatibility per @zhaocai in #6 + +2.0.3 / 2013-08-11 + +* Enhancements + + * If you pass a non-String to #find, the :read will be automatically applied + +2.0.2 / 2013-08-11 + +* Enhancements + + * You can pass identity rules as procs that take record1 and record2 in their original forms (note they should return nil if not sure) + +2.0.1 / 2013-06-06 + +* Bug fixes + + * Fix Ruby 1.8 compatibility - thanks @trishume @fuzzy76 ! https://github.com/seamusabshere/fuzzy_match/issues/5 + +2.0.0 / 2013-05-22 + +* Breaking changes + + * normalizers removed - use groupings instead + * first_grouping_decides removed + * FuzzyMatch#free gone + +* Enhancements + + * chained groupings! + * faster and simpler structure + * FuzzyMatch#find_with_score returns [record, dice_score, lev_score] + +1.5.0 / 2013-04-03 + +* Breaking changes + + * No longer automatically calls to_regexp on rules - you must pass Regexps to normalizers, groupings, etc. + +* Enhancements + + * FuzzyMatch#find_best returns all top results with the same score - thanks @ihough ! + * Doesn't require to_regexp gem for you - you can still use it if you want to convert strings into regexps safely, if you want, tho + +1.4.1 / 2013-01-17 + +* Bug fixes + + * Don't die when you're comparing a string of length 1 and another string of length less than three (thanks @ihough !) + +* Enhancements + + * '2A' is allowed to match '2 A'... funky stuff with pair distance and short strings + * FuzzyMatch#find_all_with_score returns a sorted array of records with their scores - thanks @brycesenz! (https://github.com/seamusabshere/fuzzy_match/issues/3) + +1.4.0 / 2012-09-07 + +* Breaking changes + + * Option keys are no longer symbolized automatically - make sure you do that if there's any chance they'll be strings + * active_record_inline_schema is no longer a runtime dependency - add it to your Gemfile if you use FuzzyMatch::CachedResult + +* Enhancements + + * Tiny bit better #explain(needle) + * Remove dependency on ActiveSupport + +1.3.3 / 2012-04-13 + +* Enhancements + + * Now you must require 'fuzzy_match/cached_result' if you want to use it. + * Use active_record_inline_schema to create the FuzzyMatch::CachedResult table + * Test against CohortAnalysis, the replacement for CohortScope + * Fix some other random deprecations (like set_primary_key) + +1.3.2 / 2012-02-24 + +* Enhancements + + * Start keeping a changelog! + * renamed blockings to groupings + * cleaned up tests + +* Bug fixes + + * better handling for one-letter similiarities like 'X foo' vs 'X bar' which couldn't be detected by pair distance + * take deprecated option :tighteners as :normalizers diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Gemfile b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Gemfile new file mode 100644 index 0000000..e3e0762 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Gemfile @@ -0,0 +1,11 @@ +source :rubygems + +gemspec + +# bin/fuzzy_match development +gem 'activesupport' +gem 'remote_table' +gem 'thor' +gem 'to_regexp' +gem 'perftools.rb' +gem 'pry' diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/LICENSE b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/LICENSE new file mode 100644 index 0000000..968ab96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/LICENSE @@ -0,0 +1,20 @@ +Copyright 2011 Brighter Planet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/README.markdown b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/README.markdown new file mode 100644 index 0000000..03d29c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/README.markdown @@ -0,0 +1,191 @@ +## Top 3 reasons you should use FuzzyMatch + +1. *intelligent defaults*: it uses a combination of Pair Distance (2-gram) and Levenshtein Edit Distance to effectively match many examples with no configuration +2. *all-vs-all*: it takes care of finding the optimal match by comparing everything against everything else (when that's necessary) +3. *refinable*: you might get to 90% with no configuration, but if you need to go beyond you can use regexps, grouping, and stop words + +It solves many mid-range matching problems — if your haystack is ~10k records — if you can winnow down the initial possibilities at the database level and only bring good contenders into app memory — why not give it a shot? + +# FuzzyMatch + +Find a needle in a haystack based on string similarity and regular expression rules. + +Replaces [`loose_tight_dictionary`](https://github.com/seamusabshere/loose_tight_dictionary) because that was a confusing name. + +Warning! `normalizers` are gone in version 2 and above! See the CHANGELOG and check out enhanced (and hopefully more intuitive) `groupings`. + +![diagram of matching process](https://raw.github.com/seamusabshere/fuzzy_match/master/highlevel.png) + +## Quickstart + + >> require 'fuzzy_match' + => true + >> FuzzyMatch.new(['seamus', 'andy', 'ben']).find('Shamus') + => "seamus" + +See also the blog post [Fuzzy match in Ruby](http://numbers.brighterplanet.com/2012/01/18/fuzzy-match-in-ruby/). + +## Default matching (string similarity) + +At the core, and even if you configure nothing else, string similarity (calculated by "pair distance" aka Dice's Coefficient) is used to compare records. + +You can tell `FuzzyMatch` what field or method to use via the `:read` option... for example, let's say you want to match a `Country` object like `#` + + >> fz = FuzzyMatch.new(Country.all, :read => :name) + => # + >> fz.find('youruguay') + => # + +## Optional rules (regular expressions) + +You can improve the default matchings with rules. There are 3 different kinds of rules. Each rule is a regular expression. + +We suggest that you **first try without any rules** and only define them to improve matching, prevent false positives, etc. + +### Groupings + +Group records together. The two laws of groupings: + +1. If a needle matches a grouping, only compare it with straws in the same grouping; (the "buddies vs buddies" rule) +2. If a needle doesn't match any grouping, only compare it with straws that also don't match ANY grouping (the "misfits vs misfits" rule) + +The two laws of chained groupings: (new in v2.0 and rather important) + +1. Sub-groupings (e.g., `/plaza/i` below) only match if their primary (e.g., `/ramada/i`) does +2. In final grouping decisions, sub-groupings win over primaries (so "Ramada Inn" is NOT grouped with "Ramada Plaza", but if you removed `/plaza/i` sub-grouping, then they would be grouped together) + +Hopefully they are rather intuitive once you start using them. + +[![screenshot of spreadsheet of groupings](https://raw.github.com/seamusabshere/fuzzy_match/master/groupings-screenshot.png)](https://docs.google.com/spreadsheet/pub?key=0AkCJNpm9Ks6JdG4xSWhfWFlOV1RsZ2NCeU9seGx6cnc&single=true&gid=0&output=html) + +That will... + +* separate "Orient Express Hotel" and "Ramada Conference Center Mandarin" from real Mandarin Oriental hotels +* keep "Trump Hotel Collection" away from "Luxury Collection" (another real hotel brand) without messing with the word "Luxury" +* make sure that "Ramada Plaza" are always grouped with other RPs—and not with plain old Ramadas—and vice versa +* splits out Hyatts into their different brands +* and more + +You specify chained groupings as arrays of regexps: + + groupings = [ + /mandarin/i, + /trump/i, + [ /ramada/i, /plaza/i ], + ... + ] + fz = FuzzyMatch.new(haystack, groupings: groupings) + +This way of specifying groupings is meant to be easy to load from a CSV, like `bin/fuzzy_match` does. + +Formerly called "blockings," but that was jargon that confused people. + +### Identities + +Prevent impossible matches. Can be very confusing—see if you can make things work with groupings first. + +Adding an identity like `/(f)-?(\d50)/i` ensures that "Ford F-150" and "Ford F-250" never match. + +Note that identities do not establish certainty. They just say whether two records **could** be identical... then string similarity takes over. + +### Stop words + +Ignore common and/or meaningless words when doing string similarity. + +Adding a stop word like `THE` ensures that it is not taken into account when comparing "THE CAT", "THE DAT", and "THE CATT" + +Stop words are NOT removed when checking `:must_match_at_least_one_word` and when doing identities and groupings. + +## Find options + +* `read`: how to interpret each record in the 'haystack', either a Proc or a symbol +* `must_match_grouping`: don't return a match unless the needle fits into one of the groupings you specified +* `must_match_at_least_one_word`: don't return a match unless the needle shares at least one word with the match. Note that "Foo's" is treated like one word (so that it won't match "'s") and "Bolivia," is treated as just "bolivia" +* `gather_last_result`: enable `last_result` + +## Case sensitivity + +String similarity is case-insensitive. Everything is downcased before scoring. This is a change from previous versions. + +Be careful with uppercase letters in your rules; in general, things are downcased before comparing. + +## String similarity algorithm + +The algorithm is [Dice's Coefficient](http://en.wikipedia.org/wiki/Dice's_coefficient) (aka Pair Distance) because it seemed to work better than Longest Substring, Hamming, Jaro Winkler, Levenshtein (although see edge case below) etc. + +Here's a great explanation copied from [the wikipedia entry](http://en.wikipedia.org/wiki/Dice%27s_coefficient): + + to calculate the similarity between: + + night + nacht + + We would find the set of bigrams in each word: + + {ni,ig,gh,ht} + {na,ac,ch,ht} + + Each set has four elements, and the intersection of these two sets has only one element: ht. + + Inserting these numbers into the formula, we calculate, s = (2 · 1) / (4 + 4) = 0.25. + +### Edge case: when Dice's fails, use Levenshtein + +In edge cases where Dice's finds that two strings are equally similar to a third string, then Levenshtein distance is used. For example, pair distance considers "RATZ" and "CATZ" to be equally similar to "RITZ" so we invoke Levenshtein. + + >> 'RITZ'.pair_distance_similar 'RATZ' + => 0.3333333333333333 + >> 'RITZ'.pair_distance_similar 'CATZ' + => 0.3333333333333333 # pair distance can't tell the difference, so we fall back to levenshtein... + >> 'RITZ'.levenshtein_similar 'RATZ' + => 0.75 + >> 'RITZ'.levenshtein_similar 'CATZ' + => 0.5 # which properly shows that RATZ should win + +## Cached results + +Make sure you add active\_record\_inline\_schema to your gemfile. + +TODO write documentation. For now, please see how [we manually cache matches between aircraft and flight segments](https://github.com/brighterplanet/earth/blob/master/lib/earth/air/aircraft.rb). + +## Glossary + +The admittedly imperfect metaphor is "look for a needle in a haystack" + +* needle: the search term +* haystack: the records you are searching (your result will be an object from here) + +## Using amatch to make it faster + +You can optionally use [`amatch`](http://flori.github.com/amatch/) by [Florian Frank](https://github.com/flori) (thanks Flori!) to make string similarity calculations in a C extension. + + require 'fuzzy_match' + require 'amatch' # note that you have to require this... fuzzy_match won't require it for you + FuzzyMatch.engine = :amatch + +Otherwise, pure ruby versions of the string similarity algorithms derived from the [answer to a StackOverflow question](http://stackoverflow.com/questions/653157/a-better-similarity-ranking-algorithm-for-variable-length-strings) and [the text gem](https://github.com/threedaymonk/text/blob/master/lib/text/levenshtein.rb) are used. Thanks [marzagao](http://stackoverflow.com/users/10997/marzagao) and [threedaymonk](https://github.com/threedaymonk)! + +## Real-world usage + +

Brighter Planet logo

+ +We use `fuzzy_match` for [data science at Brighter Planet](http://brighterplanet.com/research) and in production at + +* [Brighter Planet's impact estimate web service](http://impact.brighterplanet.com) +* [Brighter Planet's reference data web service](http://data.brighterplanet.com) + +We often combine it with [`remote_table`](https://github.com/seamusabshere/remote_table) and [`errata`](https://github.com/seamusabshere/errata): + +- download table with `remote_table` +- correct serious or repeated errors with `errata` +- `fuzzy_match` the rest + +## Authors + +* Seamus Abshere +* Ian Hough +* Andy Rossmeissl + +## Copyright + +Copyright 2013 Seamus Abshere diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Rakefile b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Rakefile new file mode 100644 index 0000000..7ed4815 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/Rakefile @@ -0,0 +1,5 @@ +require 'bundler' +Bundler::GemHelper.install_tasks + +require 'yard' +YARD::Rake::YardocTask.new diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb new file mode 100644 index 0000000..57ac8c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/THANKS-WILLIAM-JAMES.rb @@ -0,0 +1,37 @@ +#!/usr/bin/env ruby + +# Thanks William James! +# http://www.ruby-forum.com/topic/95519#200484 +def cart_prod(*args) + args.inject([[]]){|old,lst| + new = [] + lst.each{|e| new += old.map{|c| c.dup << e }} + new + } +end + +require 'benchmark' + +a = [1,2,3] +b = [4,5] +Benchmark.bmbm do |x| + x.report("native") do + 500_000.times { a.product(b) } + end + x.report("william-james") do |x| + 500_000.times { cart_prod(a, b) } + end +end + +# results: +# $ ruby foo.rb +# Rehearsal ------------------------------------------------- +# native 0.720000 0.000000 0.720000 ( 0.729319) +# william-james 3.620000 0.010000 3.630000 ( 3.629198) +# ---------------------------------------- total: 4.350000sec +# +# user system total real +# native 0.710000 0.000000 0.710000 ( 0.708620) +# william-james 3.800000 0.000000 3.800000 ( 3.792538) + +# thanks for all the fish! diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt new file mode 100644 index 0000000..b5729bb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-with-free.txt @@ -0,0 +1,283 @@ + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 326 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 325 benchmark/memory.rb:21:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 201 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 31 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 21 ./benchmark/../lib/fuzzy_match.rb:199:FuzzyMatch::Grouping + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:__node__ + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 8 ./benchmark/../lib/fuzzy_match/grouping.rb:24:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:6:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:5:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 7 ./benchmark/../lib/fuzzy_match/grouping.rb:27:__node__ + 7 ./benchmark/../lib/fuzzy_match.rb:209:String + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/grouping.rb:22:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/result.rb:16:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:26:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:25:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:15:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:9:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:6:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:3:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:13:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:12:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:10:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:8:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:21:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:19:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:15:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:33:__node__ + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match.rb:77:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 2 ./benchmark/../lib/fuzzy_match/result.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/result.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:9:Class + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:29:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:86:Array + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Regexp + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/result.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:10:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:39:String + 1 ./benchmark/../lib/fuzzy_match.rb:39:FuzzyMatch::Result + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:209:Array + 1 ./benchmark/../lib/fuzzy_match.rb:199:String + 1 ./benchmark/../lib/fuzzy_match.rb:198:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array + 1 ./benchmark/../lib/fuzzy_match.rb:101:Array diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt new file mode 100644 index 0000000..d6a7bb8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before-without-last-result.txt @@ -0,0 +1,257 @@ + 8393 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 3349 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 3276 ./benchmark/../lib/fuzzy_match.rb:187:Array + 3042 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__varmap__ + 2752 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 1883 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 1674 ./benchmark/../lib/fuzzy_match/score.rb:13:Amatch::PairDistance + 1017 ./benchmark/../lib/fuzzy_match/similarity.rb:37:Array + 779 ./benchmark/../lib/fuzzy_match/similarity.rb:42:FuzzyMatch::Score + 779 ./benchmark/../lib/fuzzy_match/similarity.rb:41:FuzzyMatch::Score + 676 benchmark/memory.rb:21:String + 607 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 444 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 234 ./benchmark/../lib/fuzzy_match/similarity.rb:56:Array + 234 ./benchmark/../lib/fuzzy_match/similarity.rb:55:Array + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 129 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 127 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 119 ./benchmark/../lib/fuzzy_match.rb:101:__scope__ + 118 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Hash + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__scope__ + 118 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__scope__ + 117 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__scope__ + 117 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:Array + 102 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:Array + 102 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:MatchData + 101 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:MatchData + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 36 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 26 ./benchmark/../lib/fuzzy_match.rb:187:FuzzyMatch::Normalizer + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 4 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match.rb:77:Array + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:30:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:29:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match.rb:86:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:101:Array + 1 benchmark/memory.rb:50:__scope__ + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__scope__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:62:Hash + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:35:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before.txt b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before.txt new file mode 100644 index 0000000..85a142b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/before.txt @@ -0,0 +1,304 @@ + 8642 ./benchmark/../lib/fuzzy_match/score.rb:13:String + 3465 ./benchmark/../lib/fuzzy_match/score.rb:13:Array + 3388 ./benchmark/../lib/fuzzy_match/similarity.rb:25:Array + 3146 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__varmap__ + 2842 ./benchmark/../lib/fuzzy_match/similarity.rb:57:Array + 1962 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/object/blank.rb:68:String + 1957 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1632:String + 1936 ./benchmark/../lib/fuzzy_match/score.rb:13:Float + 1732 ./benchmark/../lib/fuzzy_match/score.rb:13:Amatch::PairDistance + 1054 ./benchmark/../lib/fuzzy_match/similarity.rb:37:Array + 806 ./benchmark/../lib/fuzzy_match/similarity.rb:41:FuzzyMatch::Score + 805 ./benchmark/../lib/fuzzy_match/similarity.rb:42:FuzzyMatch::Score + 688 benchmark/memory.rb:21:String + 639 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:String + 448 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Array + 342 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/hasher.rb:20:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:22:ActiveSupport::OrderedHash + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:65:String + 325 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/ordered_hash.rb:39:Array + 325 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:FuzzyMatch::Similarity + 325 ./benchmark/../lib/fuzzy_match/similarity.rb:25:FuzzyMatch::Score + 325 ./benchmark/../lib/fuzzy_match.rb:35:FuzzyMatch::Wrapper + 320 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/format/delimited.rb:28:String + 303 ./benchmark/../lib/fuzzy_match/similarity.rb:21:Float + 242 ./benchmark/../lib/fuzzy_match/similarity.rb:56:Array + 242 ./benchmark/../lib/fuzzy_match/similarity.rb:55:Array + 184 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:String + 140 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:__node__ + 133 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__node__ + 131 ./benchmark/../lib/fuzzy_match/similarity.rb:55:__node__ + 123 ./benchmark/../lib/fuzzy_match.rb:101:__scope__ + 122 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:Hash + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__scope__ + 122 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__scope__ + 121 ./benchmark/../lib/fuzzy_match/wrapper.rb:29:__scope__ + 121 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:Array + 110 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:Array + 110 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:MatchData + 109 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:MatchData + 57 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:Regexp + 41 ./benchmark/../lib/fuzzy_match/similarity.rb:49:__node__ + 28 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:19:__node__ + 26 ./benchmark/../lib/fuzzy_match.rb:187:FuzzyMatch::Normalizer + 22 ./benchmark/../lib/fuzzy_match/similarity.rb:57:__node__ + 22 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:__node__ + 21 ./benchmark/../lib/fuzzy_match.rb:199:FuzzyMatch::Grouping + 17 ./benchmark/../lib/fuzzy_match/similarity.rb:21:__node__ + 16 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:Class + 14 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:__node__ + 14 ./benchmark/../lib/fuzzy_match/similarity.rb:37:__node__ + 13 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:__node__ + 13 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:27:__node__ + 12 ./benchmark/../lib/fuzzy_match/wrapper.rb:19:__node__ + 11 ./benchmark/../lib/fuzzy_match/identity.rb:18:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:__node__ + 11 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:__node__ + 10 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch.bundle:0:String + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:39:__node__ + 10 ./benchmark/../lib/fuzzy_match/similarity.rb:25:__node__ + 10 ./benchmark/../lib/fuzzy_match.rb:193:FuzzyMatch::Identity + 9 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:49:String + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:42:__node__ + 9 ./benchmark/../lib/fuzzy_match/similarity.rb:41:__node__ + 8 ./benchmark/../lib/fuzzy_match/wrapper.rb:31:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:__node__ + 8 ./benchmark/../lib/fuzzy_match/normalizer.rb:14:__node__ + 8 ./benchmark/../lib/fuzzy_match/similarity.rb:38:__node__ + 8 ./benchmark/../lib/fuzzy_match/score.rb:13:__node__ + 8 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:__node__ + 8 ./benchmark/../lib/fuzzy_match/grouping.rb:24:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:7:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:6:__node__ + 7 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:5:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:9:__node__ + 7 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:__node__ + 7 ./benchmark/../lib/fuzzy_match/similarity.rb:45:__node__ + 7 ./benchmark/../lib/fuzzy_match/score.rb:17:__node__ + 7 ./benchmark/../lib/fuzzy_match/identity.rb:19:__node__ + 7 ./benchmark/../lib/fuzzy_match/grouping.rb:27:__node__ + 6 ./benchmark/../lib/fuzzy_match/wrapper.rb:8:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:44:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:15:__node__ + 6 ./benchmark/../lib/fuzzy_match/similarity.rb:13:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:25:__node__ + 6 ./benchmark/../lib/fuzzy_match/score.rb:21:__node__ + 6 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:__node__ + 6 ./benchmark/../lib/fuzzy_match/grouping.rb:22:__node__ + 5 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/fastercsv-1.5.4/lib/faster_csv.rb:1640:String + 5 ./benchmark/../lib/fuzzy_match/wrapper.rb:34:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/normalizer.rb:19:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:8:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:33:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:29:__node__ + 5 ./benchmark/../lib/fuzzy_match/similarity.rb:12:__node__ + 5 ./benchmark/../lib/fuzzy_match/score.rb:9:__node__ + 5 ./benchmark/../lib/fuzzy_match/result.rb:16:__node__ + 5 ./benchmark/../lib/fuzzy_match/identity.rb:10:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:26:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:25:__node__ + 5 ./benchmark/../lib/fuzzy_match/grouping.rb:15:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:33:__node__ + 4 ./benchmark/../lib/fuzzy_match/wrapper.rb:30:__node__ + 4 ./benchmark/../lib/fuzzy_match/normalizer.rb:20:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:59:__node__ + 4 ./benchmark/../lib/fuzzy_match/similarity.rb:54:__node__ + 4 ./benchmark/../lib/fuzzy_match/score.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:9:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:7:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:6:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:5:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:4:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:3:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:13:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:12:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:11:__node__ + 4 ./benchmark/../lib/fuzzy_match/result.rb:10:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:8:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:22:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:21:__node__ + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:20:String + 4 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:11:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:8:__node__ + 3 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/wrapper.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:26:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/normalizer.rb:13:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:6:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:58:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:56:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:48:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:36:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:32:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:28:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/similarity.rb:11:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:8:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:7:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:24:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:20:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:16:__node__ + 3 ./benchmark/../lib/fuzzy_match/score.rb:12:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:21:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:19:__node__ + 3 ./benchmark/../lib/fuzzy_match/result.rb:15:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:9:__node__ + 3 ./benchmark/../lib/fuzzy_match/identity.rb:17:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:3:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:18:__node__ + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:16:String + 3 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:15:String + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:33:__node__ + 3 ./benchmark/../lib/fuzzy_match/grouping.rb:14:__node__ + 3 ./benchmark/../lib/fuzzy_match.rb:86:Array + 3 ./benchmark/../lib/fuzzy_match.rb:77:Array + 2 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:387:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:3:String + 2 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:35:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:20:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/wrapper.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:Class + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:28:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:27:String + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:24:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:15:__node__ + 2 ./benchmark/../lib/fuzzy_match/normalizer.rb:10:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:60:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:50:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:4:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:46:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:3:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/similarity.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/score.rb:26:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:18:__node__ + 2 ./benchmark/../lib/fuzzy_match/score.rb:17:String + 2 ./benchmark/../lib/fuzzy_match/score.rb:14:__node__ + 2 ./benchmark/../lib/fuzzy_match/result.rb:2:Class + 2 ./benchmark/../lib/fuzzy_match/result.rb:17:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:4:Class + 2 ./benchmark/../lib/fuzzy_match/identity.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:22:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:21:__node__ + 2 ./benchmark/../lib/fuzzy_match/identity.rb:11:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:7:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:6:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:5:__node__ + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:String + 2 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:9:Class + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:34:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:32:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:30:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:29:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:23:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:16:__node__ + 2 ./benchmark/../lib/fuzzy_match/grouping.rb:12:__node__ + 2 ./benchmark/../lib/fuzzy_match.rb:101:Array + 1 benchmark/memory.rb:50:__scope__ + 1 benchmark/memory.rb:50:String + 1 benchmark/memory.rb:49:FuzzyMatch + 1 /Users/seamus/.rvm/rubies/ruby-1.8.7-p334/lib/ruby/1.8/uri/common.rb:492:URI::Generic + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:19:Process::Status + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table/executor.rb:10:Bignum + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:63:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:121:RemoteTable::Transformer + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:116:RemoteTable::Format::Delimited + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:111:RemoteTable::Properties + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/remote_table-1.1.6/lib/remote_table.rb:106:RemoteTable::LocalFile + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Regexp + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:4:Array + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:__scope__ + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:String + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/amatch-0.2.5/lib/amatch/version.rb:1:Module + 1 /Users/seamus/.rvm/gems/ruby-1.8.7-p334/gems/activesupport-3.0.5/lib/active_support/core_ext/hash/keys.rb:18:Hash + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/wrapper.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:4:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:3:String + 1 ./benchmark/../lib/fuzzy_match/normalizer.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/similarity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:3:__node__ + 1 ./benchmark/../lib/fuzzy_match/score.rb:1:String + 1 ./benchmark/../lib/fuzzy_match/score.rb:10:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/result.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/result.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:5:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/identity.rb:4:String + 1 ./benchmark/../lib/fuzzy_match/identity.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:4:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:2:Module + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:28:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:26:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:25:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:24:String + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:23:Regexp + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:17:Hash + 1 ./benchmark/../lib/fuzzy_match/extract_regexp.rb:10:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:9:String + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:1:__node__ + 1 ./benchmark/../lib/fuzzy_match/grouping.rb:10:FuzzyMatch::ExtractRegexp + 1 ./benchmark/../lib/fuzzy_match.rb:62:FuzzyMatch::Wrapper + 1 ./benchmark/../lib/fuzzy_match.rb:39:String + 1 ./benchmark/../lib/fuzzy_match.rb:39:FuzzyMatch::Result + 1 ./benchmark/../lib/fuzzy_match.rb:35:String + 1 ./benchmark/../lib/fuzzy_match.rb:35:Array + 1 ./benchmark/../lib/fuzzy_match.rb:199:String + 1 ./benchmark/../lib/fuzzy_match.rb:198:Array + 1 ./benchmark/../lib/fuzzy_match.rb:193:String + 1 ./benchmark/../lib/fuzzy_match.rb:192:Array + 1 ./benchmark/../lib/fuzzy_match.rb:187:String + 1 ./benchmark/../lib/fuzzy_match.rb:186:Array diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb new file mode 100644 index 0000000..4514d9a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/benchmark/memory.rb @@ -0,0 +1,53 @@ +#!/usr/bin/env ruby + +require 'rubygems' +require 'memprof' +require 'bundler' +Bundler.setup +require 'remote_table' +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) +require 'fuzzy_match' + +# messily stolen from the bts example + +# The records that your dictionary will return. +# (Example) A table of aircraft as defined by the U.S. Bureau of Transportation Statistics +HAYSTACK = RemoteTable.new :url => "file://#{File.expand_path('../../examples/bts_aircraft/number_260.csv', __FILE__)}", :select => lambda { |record| record['Aircraft Type'].to_i.between?(1, 998) and record['Manufacturer'].present? } + +# A reader used to convert every record (which could be an object of any type) into a string that will be used for similarity. +# (Example) Combine the make and model into something like "boeing 747" +# Note the downcase! +HAYSTACK_READER = lambda { |record| "#{record['Manufacturer']} #{record['Long Name']}".downcase } + +# Whether to even bother trying to find a match for something without an explicit group +# (Example) False, which is the default, which means we have more work to do +MUST_MATCH_GROUPING = false + +# Groupings +# (Example) We made these by trial and error +GROUPINGS = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/groupings.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +# Normalizers +# (Example) We made these by trial and error +NORMALIZERS = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/normalizers.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +# Identities +# (Example) We made these by trial and error +IDENTITIES = RemoteTable.new(:url => "file://#{File.expand_path("../../examples/bts_aircraft/identities.csv", __FILE__)}", :headers => :first_row).map { |row| row['regexp'] } + +FINAL_OPTIONS = { + :read => HAYSTACK_READER, + :must_match_grouping => MUST_MATCH_GROUPING, + :normalizers => NORMALIZERS, + :identities => IDENTITIES, + :groupings => GROUPINGS +} + +Memprof.start + +d = FuzzyMatch.new HAYSTACK, FINAL_OPTIONS +record = d.find('boeing 707(100)', :gather_last_result => false) + +Memprof.stats +Memprof.stop diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match new file mode 100755 index 0000000..b6a6b34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/bin/fuzzy_match @@ -0,0 +1,106 @@ +#!/usr/bin/env ruby + +if File.exist?('Gemfile') + require 'bundler/setup' +end +if ENV['PROFILE'] == 'true' + require 'perftools' +end +# PerfTools::CpuProfiler.start("profile_data") do +require 'fuzzy_match' +require 'fuzzy_match/version' + +require 'active_support/core_ext' +require 'remote_table' +require 'thor' +require 'to_regexp' + +class FuzzyMatch + class Cli < ::Thor + desc :match, "Print out matches between A and B, where A is haystack and B is a bunch of needles." + method_option :csv, :default => false, :type => :boolean, :desc => "CSV output" + method_option :a_col, :default => 0, :type => :string, :desc => "Column name in A. Defaults to first column." + method_option :b_col, :default => 0, :type => :string, :desc => "Column name in B. Defaults to first column." + method_option :downcase, :default => true, :type => :boolean, :desc => "Whether to downcase everything (except regexes, where you have to do /foo/i)" + method_option :groupings, :default => nil, :type => :string, :desc => "Spreadsheet with groupings - no headers, multi-part groupings on the same row" + method_option :rules, :default => nil, :type => :string, :desc => "Spreadsheet with headers: stop_words, identities, find_options. Listing a find_option like must_match_grouping makes it true." + method_option :explain, :default => false, :type => :boolean + method_option :grep, :default => nil, :type => :string + method_option :limit, :default => 1.0/0, :type => :numeric + def match(a_url, b_url) + puts "Checking matches using fuzzy_match version #{FuzzyMatch::VERSION}..." + fz = mkfz a_url + b = load_b b_url + if ENV['PROFILE'] == 'true' + require 'perftools' + PerfTools::CpuProfiler.start("profile.bin") { report(fz, b) } + system "pprof.rb --text profile.bin" + `pprof.rb --gif profile.bin > profile.gif` + else + report fz, b + end + end + + private + + def report(fz, b) + b.each do |b_val| + if options.explain + fz.explain + else + a_val = fz.find b_val + if options.csv + # puts [ b_val.ljust(50), a_val ].join('-> ') + puts [ b_val, a_val ].to_csv + else + puts %{\nB: #{b_val}\nA: #{a_val}} + end + end + end + end + + def load_b(b_url) + b_options = options.b_col.is_a?(String) ? { headers: :first_row } : { headers: false } + if options[:grep] + regexp = options[:grep].to_regexp(detect: true) + b_options[:select] = lambda { |row| regexp =~ row[options.b_col] } + end + b = RemoteTable.new(b_url, b_options).to_a + limit = [options.limit, b.length].min + b.first(limit).map do |row| + b_val = row[options.b_col] + b_val.downcase! if options.downcase + b_val + end + end + + def mkfz(a_url) + a_options = options.a_col.is_a?(String) ? { headers: :first_row } : { headers: false } + a = RemoteTable.new(a_url, a_options).map { |row| row[options.a_col] } + a.map!(&:downcase) if options.downcase + FuzzyMatch.new a, fz_options + end + + def fz_options + memo = {} + if options.groupings + memo[:groupings] = RemoteTable.new(options.groupings, :headers => false).map do |row| + row.to_a.select(&:present?).map { |v| v.to_regexp(detect: true) } + end + end + if options.rules + t = RemoteTable.new(options.rules, :headers => :first_row) + find_options = t.rows.map { |row| row['find_options'] } + memo.merge!( + identities: t.rows.map { |row| row['identities'] }.select(&:present?).map { |v| v.to_regexp(detect: true) }, + stop_words: t.rows.map { |row| row['stop_words'] }.select(&:present?).map { |v| v.to_regexp(detect: true) }, + must_match_grouping: find_options.include?('must_match_grouping'), + must_match_at_least_one_word: find_options.include?('must_match_at_least_one_word'), + ) + end + memo + end + end +end + +FuzzyMatch::Cli.start diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec new file mode 100644 index 0000000..5f782d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/fuzzy_match.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +require File.expand_path("../lib/fuzzy_match/version", __FILE__) + +Gem::Specification.new do |s| + s.name = "fuzzy_match" + s.version = FuzzyMatch::VERSION + s.authors = ["Seamus Abshere"] + s.email = ["seamus@abshere.net"] + s.homepage = "https://github.com/seamusabshere/fuzzy_match" + s.summary = %Q{Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.} + s.description = %Q{Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.} + + s.rubyforge_project = "fuzzy_match" + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- test/*`.split("\n") + s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } + s.require_paths = ["lib"] + + # needed if you use FuzzyMatch::CachedResult + s.add_development_dependency 'active_record_inline_schema', '>=0.4.0' + + # development dependencies + s.add_development_dependency 'pry' + s.add_development_dependency 'rspec-core' + s.add_development_dependency 'rspec-expectations' + s.add_development_dependency 'rspec-mocks' + s.add_development_dependency 'activerecord', '>=3' + s.add_development_dependency 'mysql2' + s.add_development_dependency 'cohort_analysis' + s.add_development_dependency 'weighted_average' + s.add_development_dependency 'yard' + s.add_development_dependency 'amatch' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png new file mode 100644 index 0000000..23e68bd Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/groupings-screenshot.png differ diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.graffle b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.graffle new file mode 100644 index 0000000..6f974a5 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.graffle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.png b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.png new file mode 100644 index 0000000..b39970e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/highlevel.png differ diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb new file mode 100644 index 0000000..0f87c22 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match.rb @@ -0,0 +1,335 @@ +require 'fuzzy_match/rule' +require 'fuzzy_match/rule/grouping' +require 'fuzzy_match/rule/identity' +require 'fuzzy_match/result' +require 'fuzzy_match/record' +require 'fuzzy_match/similarity' +require 'fuzzy_match/score' + +# See the README for more information. +class FuzzyMatch + class << self + def engine + @engine + end + + def engine=(alt_engine) + @engine = alt_engine + end + + def score_class + case engine + when :pure_ruby + Score::PureRuby + when :amatch + Score::Amatch + else + raise ::ArgumentError, "[fuzzy_match] #{engine.inspect} is not a recognized engine." + end + end + end + + DEFAULT_ENGINE = :pure_ruby + + #TODO refactor at least all the :find_X things + DEFAULT_OPTIONS = { + :must_match_grouping => false, + :must_match_at_least_one_word => false, + :gather_last_result => false, + :find_all => false, + :find_all_with_score => false, + :threshold => 0, + :find_best => false, + :find_with_score => false, + } + + self.engine = DEFAULT_ENGINE + + attr_reader :haystack + attr_reader :groupings + attr_reader :identities + attr_reader :stop_words + attr_accessor :read + attr_reader :default_options + + # haystack - a bunch of records that will compete to see who best matches the needle + # + # Rules (can only be specified at initialization or by using a setter) + # * :identities - regexps + # * :groupings - regexps + # * :stop_words - regexps + # * :read - how to interpret each record in the 'haystack', either a Proc or a symbol + # + # Options (can be specified at initialization or when calling #find) + # * :must_match_grouping - don't return a match unless the needle fits into one of the groupings you specified + # * :must_match_at_least_one_word - don't return a match unless the needle shares at least one word with the match + # * :gather_last_result - enable last_result + # * :threshold - set a score threshold below which not to return results (not generally recommended - please test the results of setting a threshold thoroughly - one set of results and their scores probably won't be enough to determine the appropriate number). Only checked against the Pair Distance score and ignored when one string or the other is of length 1. + def initialize(haystack, options_and_rules = {}) + o = options_and_rules.dup + + # rules + @read = o.delete(:read) || o.delete(:haystack_reader) + @groupings = (o.delete(:groupings) || o.delete(:blockings) || []).map { |regexp| Rule::Grouping.make(regexp) }.flatten + @identities = (o.delete(:identities) || []).map { |regexp| Rule::Identity.new(regexp) } + @stop_words = o.delete(:stop_words) || [] + + # options + if deprecated = o.delete(:must_match_blocking) + o[:must_match_grouping] = deprecated + end + @default_options = DEFAULT_OPTIONS.merge(o).freeze + + @haystack = haystack.map { |original| Record.new original, :stop_words => @stop_words, :read => @read } + end + + def last_result + @last_result or raise("You can't access the last result until you've run a find with :gather_last_result => true") + end + + # Return everything in sorted order + def find_all(needle, options = {}) + options = options.merge(:find_all => true) + find needle, options + end + + # Return the top results with the same score + def find_best(needle, options = {}) + options = options.merge(:find_best => true) + find needle, options + end + + # Return everything in sorted order with score + def find_all_with_score(needle, options = {}) + options = options.merge(:find_all_with_score => true) + find needle, options + end + + # Return one with score + def find_with_score(needle, options = {}) + options = options.merge(:find_with_score => true) + find needle, options + end + + def find(needle, options = {}) + options = default_options.merge options + + threshold = options[:threshold] + gather_last_result = options[:gather_last_result] + is_find_all_with_score = options[:find_all_with_score] + is_find_with_score = options[:find_with_score] + is_find_best = options[:find_best] + is_find_all = options[:find_all] || is_find_all_with_score || is_find_best + must_match_grouping = options[:must_match_grouping] + must_match_at_least_one_word = options[:must_match_at_least_one_word] + + if gather_last_result + @last_result = Result.new + last_result.read = read + last_result.haystack = haystack + last_result.options = options + end + + if gather_last_result + last_result.identities = identities + last_result.groupings = groupings + last_result.stop_words = stop_words + end + + needle = case needle + when String + Record.new needle + else + Record.new needle, :read => read + end + + if gather_last_result + last_result.needle = needle + end + + if groupings.any? + first_grouping = groupings.detect { |grouping| grouping.xmatch? needle } + if gather_last_result + if first_grouping + last_result.timeline << "Grouping: #{first_grouping.inspect}" + else + last_result.timeline << "No grouping." + end + end + end + + if must_match_grouping and not first_grouping + if gather_last_result + last_result.timeline << <<-EOS +The needle didn't match any of the #{groupings.length} groupings, which was a requirement. +\t#{groupings.map(&:inspect).join("\n\t")} +EOS + end + if is_find_all + return [] + else + return nil + end + end + + if groupings.any? and not first_grouping + passed_grouping_requirement = haystack.reject do |straw| + groupings.any? { |grouping| grouping.xmatch? straw } + end + else + passed_grouping_requirement = haystack + end + + if must_match_at_least_one_word + passed_word_requirement = passed_grouping_requirement.select do |straw| + (needle.words & straw.words).any? + end + if gather_last_result + last_result.timeline << <<-EOS +Since :must_match_at_least_one_word => true, the competition was reduced to records sharing at least one word with the needle. +\tNeedle words: #{needle.words.map(&:inspect).join(', ')} +\tPassed (first 3): #{passed_word_requirement[0,3].map(&:inspect).join(', ')} +\tFailed (first 3): #{(passed_grouping_requirement-passed_word_requirement)[0,3].map(&:inspect).join(', ')} +EOS + end + else + passed_word_requirement = passed_grouping_requirement + end + + if first_grouping + joint = passed_word_requirement.select do |straw| + first_grouping.xjoin? needle, straw + end + # binding.pry + if gather_last_result + last_result.timeline << <<-EOS +Since there were groupings, the competition was reduced to #{joint.length} records in the same group as the needle. +\t#{joint.map(&:inspect).join("\n\t")} +EOS + end + else + joint = passed_word_requirement.dup + end + + if joint.none? + if must_match_grouping + if gather_last_result + last_result.timeline << <<-EOS +Since :must_match_at_least_one_word => true and none of the competition was in the same group as the needle, the search stopped. +EOS + end + if is_find_all + return [] + else + return nil + end + else + joint = passed_word_requirement.dup + end + end + + if identities.any? + possibly_identical = joint.select do |straw| + identities.all? do |identity| + answer = identity.identical? needle, straw + answer.nil? or answer == true + end + end + if gather_last_result + last_result.timeline << <<-EOS +Since there were identities, the competition was reduced to records that might be identical to the needle (in other words, are not certainly different) +\tIdentities (first 10 of #{identities.length}): #{identities[0,9].map(&:inspect).join(', ')} +\tPassed (first 10 of #{possibly_identical.length}): #{possibly_identical[0,9].map(&:inspect).join(', ')} +\tFailed (first 10 of #{(joint-possibly_identical).length}): #{(joint-possibly_identical)[0,9].map(&:inspect).join(', ')} +EOS + end + else + possibly_identical = joint.dup + end + + similarities = possibly_identical.map { |straw| needle.similarity straw }.sort.reverse + + if gather_last_result + last_result.timeline << <<-EOS +The competition was sorted in order of similarity to the needle. +\t#{similarities[0,9].map { |s| "#{s.record2.similarity(needle).inspect}" }.join("\n\t")} +EOS + end + + if is_find_all_with_score + memo = [] + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + bs = similarity.best_score + memo << [similarity.record2.original, bs.dices_coefficient_similar, bs.levenshtein_similar] + end + end + return memo + end + + if is_find_best + memo = [] + best_bs = nil + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + bs = similarity.best_score + best_bs ||= bs + if bs >= best_bs + memo << similarity.record2.original + else + break + end + end + end + return memo + end + + if is_find_all + memo = [] + similarities.each do |similarity| + if similarity.satisfy?(needle, threshold) + memo << similarity.record2.original + end + end + return memo + end + + best_similarity = similarities.first + winner = nil + + if best_similarity and best_similarity.satisfy?(needle, threshold) + winner = best_similarity.record2.original + if gather_last_result + last_result.winner = winner + last_result.score = best_similarity.best_score.dices_coefficient_similar + last_result.timeline << <<-EOS +A winner was determined because the Dice's Coefficient similarity (#{'%0.5f' % best_similarity.best_score.dices_coefficient_similar}) is greater than zero or because it shared a word with the needle. +EOS + end + if is_find_with_score + bs = best_similarity.best_score + return [winner, bs.dices_coefficient_similar, bs.levenshtein_similar] + else + return winner + end + elsif gather_last_result + best_similarity_record = if best_similarity and best_similarity.record2 + best_similarity.record2.original + end + last_result.timeline << <<-EOS +No winner assigned because the score of the best similarity (#{best_similarity_record.inspect}) was zero and it didn't match any words with the needle (#{needle.inspect}). +EOS + end + + nil # ugly + end + + # Explain is like mysql's EXPLAIN command. You give it a needle and it tells you about how it was located (successfully or not) in the haystack. + # + # d = FuzzyMatch.new ['737', '747', '757' ] + # d.explain 'boeing 737-100' + def explain(needle, options = {}) + find needle, options.merge(:gather_last_result => true) + last_result.explain + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb new file mode 100644 index 0000000..e9d46bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/cached_result.rb @@ -0,0 +1,79 @@ +require 'active_record_inline_schema' + +class FuzzyMatch + class CachedResult < ::ActiveRecord::Base + if ::ActiveRecord::VERSION::STRING >= '3.2' + self.table_name = :fuzzy_match_cached_results + else + set_table_name :fuzzy_match_cached_results + end + + class << self + def setup(from_scratch = false) + if from_scratch + connection.drop_table :fuzzy_match_cached_results rescue nil + end + auto_upgrade! + end + end + + col :a_class + col :a + col :b_class + col :b + add_index [:a_class, :b_class, :a], :name => 'aba' + add_index [:a_class, :b_class, :b], :name => 'abb' + add_index [:a_class, :b_class, :a, :b], :name => 'abab' + + module ActiveRecordBaseExtension + # required options: + # :primary_key - what to call on this class + # :foreign_key - what to call on the other class + def cache_fuzzy_match_with(other_active_record_class, options) + other = other_active_record_class.to_s.singularize.camelcase + me = name + if me < other + a = me + b = other + primary_key = :a + foreign_key = :b + else + a = other + b = me + primary_key = :b + foreign_key = :a + end + + # def aircraft + define_method other.underscore.pluralize do + other.constantize.where options[:foreign_key] => send("#{other.underscore.pluralize}_foreign_keys") + end + + # def flight_segments_foreign_keys + define_method "#{other.underscore.pluralize}_foreign_keys" do + fz = ::FuzzyMatch::CachedResult.arel_table + sql = fz.project(fz[foreign_key]).where(fz["#{primary_key}_class".to_sym].eq(self.class.name).and(fz["#{foreign_key}_class".to_sym].eq(other)).and(fz[primary_key].eq(send(options[:primary_key])))).to_sql + connection.select_values sql + end + + # def cache_aircraft! + define_method "cache_#{other.underscore.pluralize}!" do + other_class = other.constantize + primary_key_value = send options[:primary_key] + other_class.fuzzy_match.find_all(primary_key_value).each do |other_instance| + attrs = {} + attrs[primary_key] = primary_key_value + attrs["#{primary_key}_class"] = self.class.name + attrs[foreign_key] = other_instance.send options[:foreign_key] + attrs["#{foreign_key}_class"] = other + unless ::FuzzyMatch::CachedResult.exists? attrs + ::FuzzyMatch::CachedResult.create! attrs + end + end + end + end + end + end +end + +::ActiveRecord::Base.extend ::FuzzyMatch::CachedResult::ActiveRecordBaseExtension diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb new file mode 100644 index 0000000..db13b12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/record.rb @@ -0,0 +1,58 @@ +class FuzzyMatch + # Records are the tokens that are passed around when doing scoring and optimizing. + class Record #:nodoc: all + # "Foo's" is one word + # "North-west" is just one word + # "Bolivia," is just Bolivia + WORD_BOUNDARY = %r{\W*(?:\s+|$)} + EMPTY = [].freeze + BLANK = ''.freeze + + attr_reader :original + attr_reader :read + attr_reader :stop_words + + def initialize(original, options = {}) + @original = original + @read = options[:read] + @stop_words = options.fetch(:stop_words, EMPTY) + end + + def inspect + "w(#{clean.inspect})" + end + + def clean + @clean ||= begin + memo = whole.dup + stop_words.each do |stop_word| + memo.gsub! stop_word, BLANK + end + memo.strip.freeze + end + end + + def words + @words ||= clean.downcase.split(WORD_BOUNDARY).freeze + end + + def similarity(other) + Similarity.new self, other + end + + def whole + @whole ||= case read + when ::NilClass + original + when ::Numeric, ::String + original[read] + when ::Proc + read.call original + when ::Symbol + original.respond_to?(read) ? original.send(read) : original[read] + else + raise "Expected nil, a proc, or a symbol, got #{read.inspect}" + end.to_s.strip.freeze + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb new file mode 100644 index 0000000..bfaded9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/result.rb @@ -0,0 +1,45 @@ +# encoding: utf-8 +require 'erb' +require 'pp' + +class FuzzyMatch + class Result #:nodoc: all + EXPLANATION = <<-ERB +##################################################### +# SUMMARY +##################################################### + +Needle: <%= needle.inspect %> +Match: <%= winner.inspect %> + +##################################################### +# OPTIONS +##################################################### + +<%= PP.pp(options, '') %> + +<% timeline.each_with_index do |event, index| %> +(<%= index+1 %>) <%= event %> +<% end %> +ERB + + attr_accessor :needle + attr_accessor :read + attr_accessor :haystack + attr_accessor :options + attr_accessor :groupings + attr_accessor :identities + attr_accessor :stop_words + attr_accessor :winner + attr_accessor :score + attr_reader :timeline + + def initialize + @timeline = [] + end + + def explain + $stdout.puts ::ERB.new(EXPLANATION, 0, '%<').result(binding) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb new file mode 100644 index 0000000..400c35c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule.rb @@ -0,0 +1,17 @@ +class FuzzyMatch + # A rule characterized by a regexp. Abstract. + class Rule + attr_reader :regexp + + def initialize(regexp) + unless regexp.is_a?(::Regexp) + raise ArgumentError, "[FuzzyMatch] Rules must be set with Regexp objects, but got #{regexp.inspect} (#{regexp.class.name})" + end + @regexp = regexp + end + + def ==(other) + other.class == self.class and regexp == other.regexp + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb new file mode 100644 index 0000000..2bcc4aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/grouping.rb @@ -0,0 +1,90 @@ +# require 'pry' +class FuzzyMatch + class Rule + # "Record linkage typically involves two main steps: grouping and scoring..." + # http://en.wikipedia.org/wiki/Record_linkage + # + # Groupings effectively divide up the haystack into groups that match a pattern + # + # A grouping (formerly known as a blocking) comes into effect when a str matches. + # Then the needle must also match the grouping's regexp. + class Grouping < Rule + class << self + def make(regexps) + case regexps + when ::Regexp + new regexps + when ::Array + chain = regexps.flatten.map { |regexp| new regexp } + if chain.length == 1 + chain[0] # not really a chain after all + else + chain.each { |grouping| grouping.chain = chain } + chain + end + else + raise ArgumentError, "[fuzzy_match] Groupings should be specified as single regexps or an array of regexps (got #{regexps.inspect})" + end + end + end + + attr_accessor :chain + + def inspect + memo = [] + memo << "#{regexp.inspect}" + if chain + memo << "(#{chain.find_index(self)} of #{chain.length})" + end + memo.join ' ' + end + + def xmatch?(record) + if primary? + match?(record) and subs.none? { |sub| sub.match?(record) } + else + match?(record) and primary.match?(record) + end + end + + def xjoin?(needle, straw) + if primary? + join?(needle, straw) and subs.none? { |sub| sub.match?(straw) } # maybe xmatch here? + else + join?(needle, straw) and primary.match?(straw) + end + end + + protected + + def primary? + chain ? (primary == self) : true + # not chain or primary == self + end + + def primary + chain ? chain[0] : self + end + + def subs + chain ? chain[1..-1] : [] + end + + def match?(record) + !!(regexp.match(record.whole)) + end + + def join?(needle, straw) + if straw_match_data = regexp.match(straw.whole) + if needle_match_data = regexp.match(needle.whole) + straw_match_data.captures.join.downcase == needle_match_data.captures.join.downcase + else + false + end + else + nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb new file mode 100644 index 0000000..cf08e61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/rule/identity.rb @@ -0,0 +1,40 @@ +class FuzzyMatch + class Rule + # Identities take effect when needle and haystack both match a regexp + # Then the captured part of the regexp has to match exactly + class Identity < Rule + attr_reader :proc + + def initialize(regexp_or_proc) + case regexp_or_proc + when Regexp + @regexp = regexp_or_proc + when Proc + @proc = regexp_or_proc + else + raise ArgumentError, "[FuzzyMatch] Identity must be set with either Regexp objects or Procs, but got #{regexp_or_proc.inspect} (#{regexp_or_proc.class.name})" + end + end + + def ==(other) + other.class == self.class and (regexp ? regexp == other.regexp : proc == other.proc) + end + + # Two strings are "identical" if they both match this identity and the captures are equal. + # + # Only returns true/false if both strings match the regexp. + # Otherwise returns nil. + def identical?(record1, record2) + if regexp + if (str1_match_data = regexp.match(record1.whole)) and (str2_match_data = regexp.match(record2.whole)) + str1_match_data.captures.join.downcase == str2_match_data.captures.join.downcase + else + nil + end + else + proc.call record1.original, record2.original + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb new file mode 100644 index 0000000..e982464 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score.rb @@ -0,0 +1,30 @@ +require 'fuzzy_match/score/pure_ruby' +require 'fuzzy_match/score/amatch' + +class FuzzyMatch + class Score + include Comparable + + attr_reader :str1 + attr_reader :str2 + + def initialize(str1, str2) + @str1 = str1.downcase + @str2 = str2.downcase + end + + def inspect + %{(dice=#{"%0.5f" % dices_coefficient_similar},lev=#{"%0.5f" % levenshtein_similar})} + end + + def <=>(other) + a = dices_coefficient_similar + b = other.dices_coefficient_similar + if a.nan? or b.nan? or (by_dices_coefficient = (a <=> b)) == 0 + levenshtein_similar <=> other.levenshtein_similar + else + by_dices_coefficient + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb new file mode 100644 index 0000000..25180b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/amatch.rb @@ -0,0 +1,21 @@ +class FuzzyMatch + class Score + # be sure to `require 'amatch'` before you use this class + class Amatch < Score + + def dices_coefficient_similar + @dices_coefficient_similar ||= if str1 == str2 + 1.0 + elsif str1.length == 1 and str2.length == 1 + 0.0 + else + str1.pair_distance_similar str2 + end + end + + def levenshtein_similar + @levenshtein_similar ||= str1.levenshtein_similar str2 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb new file mode 100644 index 0000000..55963da --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/score/pure_ruby.rb @@ -0,0 +1,94 @@ +class FuzzyMatch + class Score + class PureRuby < Score + + SPACE = ' ' + + # http://stackoverflow.com/questions/653157/a-better-similarity-ranking-algorithm-for-variable-length-strings + def dices_coefficient_similar + @dices_coefficient_similar ||= begin + if str1 == str2 + 1.0 + elsif str1.length == 1 and str2.length == 1 + 0.0 + else + pairs1 = (0..str1.length-2).map do |i| + str1[i,2] + end.reject do |pair| + pair.include? SPACE + end + pairs2 = (0..str2.length-2).map do |i| + str2[i,2] + end.reject do |pair| + pair.include? SPACE + end + union = pairs1.size + pairs2.size + intersection = 0 + pairs1.each do |p1| + 0.upto(pairs2.size-1) do |i| + if p1 == pairs2[i] + intersection += 1 + pairs2.slice!(i) + break + end + end + end + (2.0 * intersection) / union + end + end + end + + # extracted/adapted from the text gem version 1.0.2 + # normalization added for utf-8 strings + # lib/text/levenshtein.rb + def levenshtein_similar + @levenshtein_similar ||= begin + if utf8? + unpack_rule = 'U*' + else + unpack_rule = 'C*' + end + s = str1.unpack(unpack_rule) + t = str2.unpack(unpack_rule) + n = s.length + m = t.length + + if n == 0 or m == 0 + 0.0 + else + d = (0..m).to_a + x = nil + (0...n).each do |i| + e = i+1 + (0...m).each do |j| + cost = (s[i] == t[j]) ? 0 : 1 + x = [ + d[j+1] + 1, # insertion + e + 1, # deletion + d[j] + cost # substitution + ].min + d[j] = e + e = x + end + d[m] = x + end + # normalization logic from https://github.com/flori/amatch/blob/master/ext/amatch_ext.c#L301 + # if (b_len > a_len) { + # result = rb_float_new(1.0 - ((double) v[p][b_len]) / b_len); + # } else { + # result = rb_float_new(1.0 - ((double) v[p][b_len]) / a_len); + # } + 1.0 - x.to_f / [n, m].max + end + end + end + + private + + def utf8? + return @utf8_query if defined?(@utf8_query) + @utf8_query = (defined?(::Encoding) ? str1.encoding.to_s : $KCODE).downcase.start_with?('u') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb new file mode 100644 index 0000000..1692ad6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/similarity.rb @@ -0,0 +1,39 @@ +class FuzzyMatch + class Similarity + attr_reader :record1 + attr_reader :record2 + + def initialize(record1, record2) + @record1 = record1 + @record2 = record2 + end + + def <=>(other) + by_score = best_score <=> other.best_score + if by_score == 0 + original_weight <=> other.original_weight + else + by_score + end + end + + def best_score + @best_score ||= FuzzyMatch.score_class.new(record1.clean, record2.clean) + end + + def satisfy?(needle, threshold) + best_score.dices_coefficient_similar > threshold or + ((record2.clean.length < 3 or needle.clean.length < 3) and best_score.levenshtein_similar > 0) or + (needle.words & record2.words).any? + end + + def inspect + %{#{record2.clean.inspect} ~ #{record1.clean.inspect} => #{best_score.inspect}} + end + + # Weight things towards short original strings + def original_weight + @original_weight ||= (1.0 / (record1.clean.length * record2.clean.length)) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb new file mode 100644 index 0000000..3882a3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/lib/fuzzy_match/version.rb @@ -0,0 +1,3 @@ +class FuzzyMatch + VERSION = '2.0.4' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb new file mode 100644 index 0000000..19cd465 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/amatch_spec.rb @@ -0,0 +1,17 @@ +unless RUBY_PLATFORM == 'java' + require 'spec_helper' + require 'amatch' + + describe FuzzyMatch do + describe %{when using the :amatch string similarity engine} do + before do + $testing_amatch = true + FuzzyMatch.engine = :amatch + end + after do + $testing_amatch = false + FuzzyMatch.engine = nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb new file mode 100644 index 0000000..dfa4c25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/cache_spec.rb @@ -0,0 +1,132 @@ +require 'spec_helper' + +require 'active_record' +require 'cohort_analysis' +require 'weighted_average' + +ActiveRecord::Base.establish_connection( + 'adapter' => 'mysql2', + 'database' => 'fuzzy_match_test', + 'username' => 'root', + 'password' => 'password' +) + +# require 'logger' +# ActiveRecord::Base.logger = Logger.new $stderr +# ActiveRecord::Base.logger.level = Logger::DEBUG + +ActiveSupport::Inflector.inflections do |inflect| + inflect.uncountable 'aircraft' +end + +require 'fuzzy_match/cached_result' + +::FuzzyMatch::CachedResult.setup(true) + +class Aircraft < ActiveRecord::Base + MUTEX = ::Mutex.new + self.primary_key = 'icao_code' + + cache_fuzzy_match_with :flight_segments, :primary_key => :aircraft_description, :foreign_key => :aircraft_description + + def aircraft_description + [manufacturer_name, model_name].compact.join(' ') + end + + def self.fuzzy_match + @fuzzy_match || MUTEX.synchronize do + @fuzzy_match||= FuzzyMatch.new(all, :read => ::Proc.new { |straw| straw.aircraft_description }) + end + end + + def self.create_table + connection.drop_table(:aircraft) rescue nil + connection.execute %{ +CREATE TABLE `aircraft` ( + `icao_code` varchar(255) DEFAULT NULL, + `manufacturer_name` varchar(255) DEFAULT NULL, + `model_name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`icao_code`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + } + reset_column_information + end +end + +class FlightSegment < ActiveRecord::Base + self.primary_key = 'row_hash' + + cache_fuzzy_match_with :aircraft, :primary_key => :aircraft_description, :foreign_key => :aircraft_description + + def self.create_table + connection.drop_table(:flight_segments) rescue nil + connection.execute %{ +CREATE TABLE `flight_segments` ( + `row_hash` varchar(255) NOT NULL DEFAULT '', + `aircraft_description` varchar(255) DEFAULT NULL, + `passengers` int(11) DEFAULT NULL, + `seats` int(11) DEFAULT NULL, + PRIMARY KEY (`row_hash`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + } + end +end + +FlightSegment.create_table +Aircraft.create_table + +a = Aircraft.new +a.icao_code = 'B742' +a.manufacturer_name = 'Boeing' +a.model_name = '747-200' +a.save! + +fs = FlightSegment.new +fs.row_hash = 'madison to chicago' +fs.aircraft_description = 'BORING 747200' +fs.passengers = 10 +fs.seats = 10 +fs.save! + +fs = FlightSegment.new +fs.row_hash = 'madison to minneapolis' +fs.aircraft_description = 'bing 747' +fs.passengers = 100 +fs.seats = 5 +fs.save! + +FlightSegment.all.each do |fs| + fs.cache_aircraft! +end + +describe FuzzyMatch::CachedResult do + it %{joins aircraft to flight segments} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.count.should == 2 + end + + it %{allow simple SQL operations} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.sum(:passengers).should == 110 + end + + it %{works with weighted_average} do + aircraft = Aircraft.find('B742') + aircraft.flight_segments.weighted_average(:seats, :weighted_by => :passengers).should == 5.45455 + end + + it %{works with cohort_scope (albeit rather clumsily)} do + aircraft = Aircraft.find('B742') + cohort = FlightSegment.cohort({:aircraft_description => aircraft.flight_segments_foreign_keys}, :minimum_size => 2) + FlightSegment.connection.select_value(cohort.project('COUNT(*)').to_sql).should == 2 + # FlightSegment.cohort(:aircraft_description => aircraft.flight_segments_foreign_keys).should == [] + end + + # def test_006_you_can_get_aircraft_from_flight_segments + # fs = FlightSegment.first + # # you need to add an aircraft_description column + # lambda do + # fs.aircraft.count.should == 2 + # end.must_raise ActiveRecord::StatementInvalid + # end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/foo.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/foo.rb new file mode 100644 index 0000000..b948eda --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/foo.rb @@ -0,0 +1,9 @@ +require 'fileutils' +Dir['test*.rb'].each do |f| + n = File.basename(f, '.rb') + n.sub! 'test_', '' + n += '_spec.rb' + puts f + puts n + FileUtils.cp f, n +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb new file mode 100644 index 0000000..9339c87 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/fuzzy_match_spec.rb @@ -0,0 +1,367 @@ +# -*- encoding: utf-8 -*- +require 'spec_helper' + +describe FuzzyMatch do + describe '#find' do + it %{finds the best match using string similarity} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.find('RITZ').should == 'RATZ' + end + + it %{doesn't mind crazy characters} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.find('RíTZ').should == 'RATZ' + end + + it %{not return any result if the maximum score is zero} do + FuzzyMatch.new(['a']).find('b').should be_nil + end + + it %{finds exact matches} do + d = FuzzyMatch.new [ 'X' ] + d.find('X').should == 'X' + end + end + + describe '#find_all' do + it %{return all records in sorted order} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_all('X').should == ['X', 'X22' ] + d.find_all('A').should == [] + end + end + + describe '#find_best' do + it %{returns one or more records with the best score} do + d = FuzzyMatch.new [ 'X', 'X', 'X22', 'Y', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_best('X').should == ['X', 'X' ] + d.find_best('A').should == [] + end + end + + describe '#find_all_with_score' do + it %{return records with 2 scores} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_all_with_score('X').should == [ ['X', 1, 1], ['X22', 0, 0.33333333333333337] ] + d.find_all_with_score('A').should == [] + end + end + + describe '#find_with_score' do + it %{return record with dice's and lev's scores} do + d = FuzzyMatch.new [ 'X', 'X22', 'Y', 'Y4' ], :groupings => [ /X/, /Y/ ], :must_match_grouping => true + d.find_with_score('X').should == ['X', 1, 1] + d.find_with_score('A').should be_nil + end + end + + describe '#explain' do + before do + require 'stringio' + @capture = StringIO.new + @old_stdout = $stdout + $stdout = @capture + end + after do + $stdout = @old_stdout + end + + it %{print a basic explanation to stdout} do + d = FuzzyMatch.new %w{ RATZ CATZ } + d.explain('RITZ') + @capture.rewind + @capture.read.should include('CATZ') + end + + it %{explains match failures} do + FuzzyMatch.new(['aaa']).explain('bbb') + @capture.rewind + @capture.read.should =~ %r{No winner assigned.*aaa.*bbb} + end + end + + describe "groupings replacings normalizers" do + it %{sometimes gets false results without them} do + d = FuzzyMatch.new ['BOEING 737-100/200', 'BOEING 737-900'] + d.find('BOEING 737100 number 900').should == 'BOEING 737-900' + end + + it %{can be used to improve results} do + d = FuzzyMatch.new ['BOEING 737-100/200', 'BOEING 737-900'], :groupings => [ [/boeing/i, /7(\d\d)-?(\d\d\d)?/]] + d.find('BOEING 737100 number 900').should == 'BOEING 737-100/200' + end + end + + describe "identities" do + it %{sometimes gets false results without them} do + # false positive without identity + d = FuzzyMatch.new %w{ foo bar } + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should == 'bar' + end + + it %{can be used to improve results} do + d = FuzzyMatch.new %w{ foo bar }, :identities => [ /ba(.)/ ] + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should be_nil + d.find('baze').should be_nil + end + + it %{is sort of like backreferences} do + one = '1 sauk ONEONEONEONEONE' + two = '2 sauk TWOTWOTWOTWO' + d = FuzzyMatch.new([one, two]) + d.find('1 sauk TWOTWOTWOTWO').should == two # wrong + d = FuzzyMatch.new([one, two], :identities => [/\A(\d+)\s+(\w+)/]) + d.find('1 sauk TWOTWOTWOTWO').should == one # correct + end + + it %{has a proc form} do + d = FuzzyMatch.new %w{ foo bar }, :identities => [ lambda { |a, b| (a.start_with?('ba') and b.start_with?('ba') ? a[2] == b[2] : nil) } ] + d.find('bar').should == 'bar' + d.find('bare').should == 'bar' + d.find('baz').should be_nil + d.find('baze').should be_nil + end + end + + describe 'groupings' do + it %{sometimes gets false results without them} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ] + d.find('Barack Bush').should == 'Barack Obama' # luke i am your father + d.find('George Obama').should == 'George Bush' # nooooooooooooooooooo + end + + it %{can be used to improve results} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ] + d.find('Barack Bush').should == 'George Bush' + d.find('George Obama').should == 'Barack Obama' + end + + it %{stays within the group} do + d = FuzzyMatch.new [ 'AB', 'CD' ] + d.find('ABCDCD').should == 'CD' + d = FuzzyMatch.new [ 'AB', 'CD' ], :groupings => [/A/] + d.find('ABCDCD').should == 'AB' + end + + describe 'with chains' do + describe 'hotel example' do + before do + @grandh = 'Grand Hyatt' + @h = 'Hyatt' + @hgarden = 'Hyatt Garden' + @grandhotel = 'Grand Hotel' + @fz = FuzzyMatch.new([@grandh, @h, @hgarden, @grandhotel], :groupings => [ [ /hyatt/i, /garden/i, /grand/i ] ], :stop_words => [ /hotel/i ]) + end + + it %{works as expected} do + @fz.find('Grand Hyatt').should == @grandh + @fz.find('Grand Hyatt Foobar').should == @grandh + @fz.find('Hyatt Garden').should == @hgarden + @fz.find('Hyatt Garden Foobar').should == @hgarden + end + + it %{enforces some stuff} do + # nope + @fz.find('Grund Hyatt').should == @h + @fz.find('Grund Hyatt Foobar').should == @h + @fz.find('Hyatt Gurden').should == @h + @fz.find('Hyatt Gurden Foobar').should == @h + # hmm - hyatt misspelled, so totally prevented from matching hyatt + @fz.find('Grund Hyutt').should == @grandhotel + @fz.find('Grund Hyutt Foobar').should == @grandhotel + # precedence + @fz.find('Grand Hyatt Garden').should == @hgarden + @fz.find('Grand Hyatt Garden Foobar').should == @hgarden + # sanity + @fz.find('Grund Hyatt Garden').should == @hgarden + @fz.find('Grund Hyatt Garden Foobar').should == @hgarden + @fz.find('Grand Hyatt Gurden').should == @grandh + @fz.find('Grand Hyatt Gurden Foobar').should == @grandh + end + + it %{is sticky} do + @fz.find('Grand Hotel').should == @grandhotel + @fz.find('Hotel Garden').should be_nil + @fz.find('Grand Hotel Garden').should == @grandhotel + end + end + + it "helps with subgroups" do + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7\d{2})/] ] + d.find_all('Boeing 747').should == [ 'Boeing 747', 'Boeing 747SR' ] + + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7\d{2})/] ] + d.find_all('Boeing ER6').should == ["Boeing ER6"] + + d = FuzzyMatch.new [ 'Boeing 747', 'Boeing 747SR', 'Boeing ER6' ], :groupings => [ [/boeing/i, /(7|E\d{2})/i] ] + d.find_all('Boeing ER6').should == [ 'Boeing ER6' ] + d.find_all('Boeing 747').should == [ 'Boeing 747', 'Boeing 747SR' ] + end + end + end + + describe "the :must_match_grouping option" do + it %{optionally only attempt matches with records that fit into a grouping} do + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ], :must_match_grouping => true + d.find('George Clinton').should be_nil + + d = FuzzyMatch.new [ 'Barack Obama', 'George Bush' ], :groupings => [ /Obama/, /Bush/ ] + d.find('George Clinton', :must_match_grouping => true).should be_nil + end + end + + describe "the :read option" do + it %{interpret a Numeric as an array index} do + ab = ['a', 'b'] + ba = ['b', 'a'] + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => 0 + by_last = FuzzyMatch.new haystack, :read => 1 + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + + it %{interpret a Symbol, etc. as hash key} do + ab = { :one => 'a', :two => 'b' } + ba = { :one => 'b', :two => 'a' } + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => :one + by_last = FuzzyMatch.new haystack, :read => :two + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + + MyStruct = Struct.new(:one, :two) + it %{interpret a Symbol as a method id (if the object responds to it)} do + ab = MyStruct.new('a', 'b') + ba = MyStruct.new('b', 'a') + haystack = [ab, ba] + by_first = FuzzyMatch.new haystack, :read => :one + by_last = FuzzyMatch.new haystack, :read => :two + by_first.read.should == :one + by_last.read.should == :two + by_first.find('a').should == ab + by_last.find('b').should == ab + by_first.find('b').should == ba + by_last.find('a').should == ba + end + end + + describe 'the :must_match_at_least_one_word option' do + it %{optionally require that the matching record share at least one word with the needle} do + d = FuzzyMatch.new %w{ RATZ CATZ }, :must_match_at_least_one_word => true + d.find('RITZ').should be_nil + + d = FuzzyMatch.new ["Foo's Bar"], :must_match_at_least_one_word => true + d.find("Foo's").should == "Foo's Bar" + d.find("'s").should be_nil + d.find("Foo").should be_nil + + d = FuzzyMatch.new ["Bolivia, Plurinational State of"], :must_match_at_least_one_word => true + d.find("Bolivia").should == "Bolivia, Plurinational State of" + end + + it %{use STOP WORDS} do + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ] + d.find('A HTL', :must_match_at_least_one_word => true).should == 'B HTL' + + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ], :must_match_at_least_one_word => true + d.find('A HTL').should == 'B HTL' + + d = FuzzyMatch.new [ 'A HOTEL', 'B HTL' ], :must_match_at_least_one_word => true, :stop_words => [ %r{HO?TE?L} ] + d.find('A HTL').should == 'A HOTEL' + end + + it %{not be fooled by substrings (but rather compare whole words to whole words)} do + d = FuzzyMatch.new [ 'PENINSULA HOTELS' ], :must_match_at_least_one_word => true + d.find('DOLCE LA HULPE BXL FI').should be_nil + end + + it %{not be case-sensitive when checking for sharing of words} do + d = FuzzyMatch.new [ 'A', 'B' ] + d.find('a', :must_match_at_least_one_word => true).should == 'A' + end + end + + describe "the :gather_last_result option" do + it %{not gather metadata about the last result by default} do + d = FuzzyMatch.new %w{ NISSAN HONDA } + d.find('MISSAM') + lambda do + d.last_result + end.should raise_error(::RuntimeError, /gather_last_result/) + end + + it %{optionally gather metadata about the last result} do + d = FuzzyMatch.new %w{ NISSAN HONDA } + d.find 'MISSAM', :gather_last_result => true + d.last_result.score.should == 0.6 + d.last_result.winner.should == 'NISSAN' + end + end + + describe 'quirks' do + it %{should not return false negatives because of one-letter similarities} do + # dices coefficient doesn't think these two are similar at all because it looks at pairs + FuzzyMatch.score_class.new('X foo', 'X bar').dices_coefficient_similar.should == 0 + # so we must compensate for that somewhere + d = FuzzyMatch.new ['X foo', 'randomness'] + d.find('X bar').should == 'X foo' + # without making false positives + d.find('Y bar').should be_nil + end + + it %{finds possible matches even when pair distance fails} do + d = FuzzyMatch.new ['XX', '2 A'] + d.find('2A').should == '2 A' + d = FuzzyMatch.new ['XX', '2A'] + d.find('2 A').should == '2A' + end + + it %{weird blow ups} do + d = FuzzyMatch.new ['XX', '2 A'] + d.find('A').should == '2 A' + d = FuzzyMatch.new ['XX', 'A'] + d.find('2 A').should == 'A' + end + + it %{from the wild 1} do + d = FuzzyMatch.new ["Doyle Collection", "Trump Collection", "Luxury Collection", "Autograph Collection"] + d.find("Algonquin Autograph Collection").should == "Autograph Collection" + end + + end + + describe 'deprecations' do + it %{takes :must_match_blocking as :must_match_grouping} do + d = FuzzyMatch.new [], :must_match_blocking => :a + d.default_options[:must_match_grouping].should == :a + end + + it %{takes :haystack_reader as :read} do + d = FuzzyMatch.new [], :haystack_reader => :c + d.read.should == :c + end + + it %{takes :blockings as :groupings} do + d = FuzzyMatch.new [], :blockings => [ /X/, /Y/ ] + d.groupings.should == [ FuzzyMatch::Rule::Grouping.new(/X/), FuzzyMatch::Rule::Grouping.new(/Y/) ] + end + end + + it %{defaults to a pure-ruby engine, but also has amatch} do + if defined?($testing_amatch) and $testing_amatch + FuzzyMatch.engine.should == :amatch + else + FuzzyMatch.engine.should == :pure_ruby + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb new file mode 100644 index 0000000..76b4165 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/grouping_spec.rb @@ -0,0 +1,60 @@ +require 'spec_helper' + +describe FuzzyMatch::Rule::Grouping do + it %{matches a single string argument} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xmatch?(r('2 apples')).should == true + end + + it %{embraces case insensitivity} do + b = FuzzyMatch::Rule::Grouping.new %r{apple}i + b.xmatch?(r('2 Apples')).should == true + end + + it %{xjoins two string arguments} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('apple'), r('2 apples')).should == true + end + + it %{fails to xjoin two string arguments} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('orange'), r('2 apples')).should == false + end + + it %{returns nil instead of false when it has no information} do + b = FuzzyMatch::Rule::Grouping.new %r{apple} + b.xjoin?(r('orange'), r('orange')).should be_nil + end + + it %{has chains} do + h, gr, ga = FuzzyMatch::Rule::Grouping.make([/hyatt/, /grand/, /garden/]) + h.xjoin?(r('hyatt'), r('hyatt')).should == true + + h.xjoin?(r('grund hyatt'), r('grand hyatt')).should == true + gr.xjoin?(r('grund hyatt'), r('grand hyatt')).should == false + ga.xjoin?(r('grund hyatt'), r('grand hyatt')).should be_nil + + h.xjoin?(r('hyatt gurden'), r('hyatt garden')).should == true + gr.xjoin?(r('hyatt gurden'), r('hyatt garden')).should be_nil + ga.xjoin?(r('hyatt gurden'), r('hyatt garden')).should == false + + h.xjoin?(r('grand hyatt'), r('grand hyatt')).should == false # sacrificing itself + gr.xjoin?(r('grand hyatt'), r('grand hyatt')).should == true + ga.xjoin?(r('grand hyatt'), r('grand hyatt')).should be_nil + + h.xjoin?(r('hyatt garden'), r('hyatt garden')).should == false # sacrificing itself + gr.xjoin?(r('hyatt garden'), r('hyatt garden')).should be_nil + ga.xjoin?(r('hyatt garden'), r('hyatt garden')).should == true + + h.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == false # sacrificing itself + gr.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == true + ga.xjoin?(r('grand hyatt garden'), r('grand hyatt garden')).should == true # NOT sacrificing itself? + end + + private + + def r(str) + FuzzyMatch::Record.new str + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb new file mode 100644 index 0000000..d1e5f2f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/identity_spec.rb @@ -0,0 +1,29 @@ +require 'spec_helper' + +describe FuzzyMatch::Rule::Identity do + it %{determines whether two records COULD be identical} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('A1'), r('A 1foobar')).should == true + end + + it %{determines that two records MUST NOT be identical} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('A1'), r('A 2foobar')).should == false + end + + it %{returns nil indicating no information} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)} + i.identical?(r('B1'), r('A 2foobar')).should == nil + end + + it %{embraces case insensitivity} do + i = FuzzyMatch::Rule::Identity.new %r{(A)[ ]*(\d)}i + i.identical?(r('A1'), r('a 1foobar')).should == true + end + + private + + def r(str) + FuzzyMatch::Record.new str + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb new file mode 100644 index 0000000..0958ea3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/record_spec.rb @@ -0,0 +1,25 @@ +require 'spec_helper' + +describe FuzzyMatch::Record do + it %{does not treat "'s" as a word} do + assert_split ["foo's", "bar"], "Foo's Bar" + end + + it %{treats "bolivia," as just "bolivia"} do + assert_split ["bolivia", "plurinational", "state"], "Bolivia, Plurinational State" + end + + it %{does not split up hyphenated words} do + assert_split ['north-west'], "north-west" + end + + it %{splits up words as expected} do + assert_split ['the', 'quick', "fox's", 'mouth', 'is', 'always', 'full'], "the quick fox's mouth -- is always full." + end + + private + + def assert_split(ary, str) + FuzzyMatch::Record.new(str).words.should == ary + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb new file mode 100644 index 0000000..847a213 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/fuzzy_match-2.0.4/spec/spec_helper.rb @@ -0,0 +1,21 @@ +# This file was generated by the `rspec --init` command. Conventionally, all +# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. +# Require this file using `require "spec_helper"` to ensure that it is only +# loaded once. +# +# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration +RSpec.configure do |config| + config.treat_symbols_as_metadata_keys_with_true_values = true + config.run_all_when_everything_filtered = true + config.filter_run :focus + + # Run specs in random order to surface order dependencies. If you find an + # order dependency and want to debug it, you can fix the order by providing + # the seed, which is printed after each run. + # --seed 1234 + config.order = 'random' +end + +require 'pry' + +require 'fuzzy_match' diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.gitignore b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.gitignore new file mode 100644 index 0000000..b473272 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.gitignore @@ -0,0 +1,10 @@ +/.bundle/ +/.yardoc +/Gemfile.lock +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ +/.idea diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rspec b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rspec new file mode 100644 index 0000000..8c18f1a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rspec @@ -0,0 +1,2 @@ +--format documentation +--color diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rubocop.yml new file mode 100644 index 0000000..8587014 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.rubocop.yml @@ -0,0 +1,134 @@ +# That looks wrong +Layout/AlignHash: + Enabled: false + +Layout/DotPosition: + Enabled: false + +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +Layout/IndentHash: + Enabled: false + +Lint/HandleExceptions: + Enabled: false + +# the let(:key) { ... } should be allowed in tests +Lint/ParenthesesAsGroupedExpression: + Exclude: + - 'spec/**/*' + +# Cop supports --auto-correct. +Lint/UnusedBlockArgument: + Enabled: false + +# Needed for $verbose +Style/GlobalVars: + Enabled: false + +Lint/UnusedMethodArgument: + Enabled: false + +# https://stackoverflow.com/a/13059657/865175 +Lint/UriEscapeUnescape: + Enabled: false + +Lint/UselessAssignment: + Exclude: + - 'spec/**/*' + +# Offense count: 20 +Metrics/AbcSize: + Max: 60 + +Metrics/BlockLength: + Enabled: false + +# Configuration parameters: CountComments. +Metrics/ClassLength: + Max: 320 + +Metrics/CyclomaticComplexity: + Max: 17 + +# Configuration parameters: AllowURI, URISchemes. +Metrics/LineLength: + Max: 370 + +# options.rb might be large, we know that +Metrics/MethodLength: + Max: 60 + Exclude: + - 'lib/*/options.rb' + +# Configuration parameters: CountKeywordArgs. +Metrics/ParameterLists: + Max: 17 + +Metrics/PerceivedComplexity: + Max: 18 + +Naming/FileName: + Enabled: false + +# and and or is okay +Style/AndOr: + Enabled: false + +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +Style/BracesAroundHashParameters: + Enabled: false + +Style/ClassCheck: + EnforcedStyle: kind_of? + +Style/ClassVars: + Enabled: false + +Style/Documentation: + Enabled: false + +Style/DoubleNegation: + Enabled: false + +# Needed for $verbose +Style/GlobalVars: + Enabled: false + +Style/GuardClause: + Enabled: false + +# Having if in the same line might not always be good +Style/IfUnlessModifier: + Enabled: false + +Style/RaiseArgs: + EnforcedStyle: exploded + +# Better too much 'return' than one missing +Style/RedundantReturn: + Enabled: false + +# Not a good thing +Style/RedundantSelf: + Enabled: false + +# raise and fail are both okay +Style/SignalException: + Enabled: false + +# $? Exit +Style/SpecialGlobalVars: + Enabled: false + +# Both string notations are okay +Style/StringLiterals: + Enabled: false + +# The %w might be confusing for new users +Style/WordArray: + MinSize: 19 + +Style/ZeroLengthPredicate: + Enabled: false diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.travis.yml new file mode 100644 index 0000000..40b07ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/.travis.yml @@ -0,0 +1,9 @@ +language: ruby +rvm: + - 2.1 + - 2.2 + - 2.3 + - 2.4 + - 2.5 + +before_install: gem install bundler -v 1.11.2 diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/CHANGELOG.md new file mode 100644 index 0000000..b11624e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/CHANGELOG.md @@ -0,0 +1,26 @@ +### Master + +### 1.1.3 + +* Require IRB early in the lib's lifecycle - segiddins + +### 1.1.2 + +* URL escape the query for GH issues - revolter + +### 1.1.1 + +* Allow either all typoes, or all no typoes in the delegate call - revolter + +### 1.1.0 + +* Fixes typos in the delegate methods - revolter +* Adds support for showing how to click on a link in terminal - 0xced + +### 1.0.3 + +* Fixes for URLs with spaces - orta + +### 1.0.0 + +* Initial major release - orta + krausefx diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Gemfile b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Gemfile new file mode 100644 index 0000000..962bb5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in gh-issues-inspector.gemspec +gemspec diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/LICENSE b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/LICENSE new file mode 100644 index 0000000..aaa005a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Orta Therox and Felix Krause + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/README.md b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/README.md new file mode 100644 index 0000000..3d0adbd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/README.md @@ -0,0 +1,102 @@ +# The Issues Inspector + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'gh_inspector' +``` + +And then execute: + + $ bundle + +## Usage + +#### The Inspector + +To get started using The Issues Inspector, you will need to +create an inspector instance. This class is main public API for querying issues. + +#### Getting Started + +Create an instance of `GhInspector::Inspector`, you can then ask it to search +based on your raised exception, or as a direct query yourself. + +``` ruby +require 'gh_inspector' +inspector = GhInspector::Inspector.new "orta", "eigen" +# Either use an error: +inspector.search_exception an_error, ArtsyUI.new +# Or use a specific query: +inspector.search_query "Someone set us up the bomb" +``` + +By default this would output: + +``` +Looking for related issues on CocoaPods/CocoaPods... + + - undefined method `to_ary' for #Did you mean? to_query + https://github.com/CocoaPods/CocoaPods/issues/4748 [closed] [1 comment] + + - NoMethodError - undefined method `to_ary' for Pod EAIntroView + https://github.com/CocoaPods/CocoaPods/issues/4391 [closed] [15 comments] + + - Do a search on GitHub for issues relating to a crash? + https://github.com/CocoaPods/CocoaPods/issues/4391 [open] [3 comments] + +and 10 more at: +https://github.com/CocoaPods/CocoaPods/search?q=undefined+method+%60to_ary%27&type=Issues +``` +#### Presenting Your Report + +The default user interface for the inspector, its public API should be +considered the protocol for other classes wanting to provide a user interface. + +Your custom objects will be verified at runtime that they conform to the protocol. + +You can see the default implementation at +[lib/evidence.rb](/orta/gh-issues-inspector/tree/master/lib/evidence.rb). + +Both `search_query` and `search_exception` take your custom delegate as a 2nd optional parameter. + +``` ruby +require 'gh_inspector' +inspector = GhInspector::Inspector.new "orta", "eigen" +inspector.search_exception an_error, ArtsyUI.new +``` + +or + +``` ruby +require 'gh_inspector' +inspector = GhInspector::Inspector.new "fastlane", "fastlane" +inspector.search_query "Someone set us up the bomb", FastlaneUI.new +``` + +Protocol for custom objects: + + - `inspector_started_query(query, inspector)` - Called just as the investigation has begun. + - `inspector_successfully_recieved_report(report, inspector)` - Deprecated: Please use `inspector_successfully_received_report` instead. + - `inspector_successfully_received_report(report, inspector)` - Called once the inspector has received a report with more than one issue. + - `inspector_recieved_empty_report(report, inspector)` - Deprecated: Please use `inspector_received_empty_report` instead. + - `inspector_received_empty_report(report, inspector)` - Called once the report has been received, but when there are no issues found. + - `inspector_could_not_create_report(error, query, inspector)` - Called when there have been networking issues in creating the report. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bundle exec rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +The usage section of this README is generated from inline documentation inside the classes, to update it run `bundle exec rake readme`. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Vision + +I don't expect this project to grow too much, there's space around improving the search query for an exception, mainly. Other than that the project is effectively done and just needs some production usage to iron out any kinks. This project is well tested, and has zero dependencies. + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/orta/gh-issues-inspector. diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Rakefile b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Rakefile new file mode 100644 index 0000000..a2adb88 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/Rakefile @@ -0,0 +1,62 @@ +require 'bundler' +require 'bundler/gem_tasks' +begin + Bundler.setup(:default, :development) +rescue Bundler::BundlerError => e + warn e.message + warn 'Run `bundle install` to install missing gems' + exit e.status_code +end + +require 'rspec/core/rake_task' +require 'rubocop/rake_task' + +RSpec::Core::RakeTask.new(:specs) + +task default: :spec + +task :spec do + Rake::Task['specs'].invoke + Rake::Task['rubocop'].invoke +end + +desc 'Run RuboCop on the lib/specs directory' +RuboCop::RakeTask.new(:rubocop) do |task| + task.patterns = ['lib/**/*.rb', 'spec/**/*.rb'] +end + +task :readme do + readme = File.open("README.md", 'rb', &:read) + + start_split = "## Usage" + end_split = "## Development" + + start = readme.split(start_split)[0] + rest = readme.split(start_split)[1] + finale = rest.split(end_split)[1] + + require 'yard' + files = ["lib/gh_inspector/inspector.rb", "lib/gh_inspector/sidekick.rb", "lib/gh_inspector/evidence.rb"] + docs = YARD::Registry.load(files, true) + + usage = "\n\n" + usage << "#### The Inspector\n\n" + usage << docs.at("GhInspector::Inspector").docstring + usage << "\n" + + usage << "#### Presenting Your Report \n\n" + evidence = docs.at("GhInspector::Evidence") + usage << evidence.docstring + usage << "\n" + + usage << "\nProtocol for custom objects:\n\n" + evidence.children.each do |method| + next unless method.name.to_s.start_with? "inspector" + params = method.parameters.flatten.compact + usage << " - `#{method.name}(#{params.join ', '})` - #{method.docstring}\n" + end + usage << "\n" + + new_file = start + start_split + usage + end_split + finale + File.open("README.md", 'w') { |f| f.write new_file } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/console b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/console new file mode 100755 index 0000000..ba7bd4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/console @@ -0,0 +1,10 @@ +#!/usr/bin/env ruby + +require 'bundler/setup' +require 'gh_inspector' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +require 'pry' +Pry.start diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/setup b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/setup new file mode 100644 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/gh_inspector.gemspec b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/gh_inspector.gemspec new file mode 100644 index 0000000..4377281 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/gh_inspector.gemspec @@ -0,0 +1,27 @@ + +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'gh_inspector/version' + +Gem::Specification.new do |spec| + spec.name = 'gh_inspector' + spec.version = GhInspector::VERSION + spec.authors = ['Orta Therox', 'Felix Krause'] + spec.email = ['orta.therox@gmail.com', 'gh_inspector@krausefx.com'] + + spec.license = 'MIT' + spec.summary = 'Search through GitHub issues for your project for existing issues about a Ruby Error.' + spec.description = spec.summary + + spec.homepage = 'https://github.com/orta/gh_inspector' + + spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) } + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.11' + spec.add_development_dependency 'pry', '~> 0.6' + spec.add_development_dependency 'rake', '~> 10.0' + spec.add_development_dependency 'rspec', '~> 3.0' + spec.add_development_dependency 'rubocop', '~> 0', '> 0' + spec.add_development_dependency 'yard', '~> 0', '> 0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector.rb new file mode 100644 index 0000000..7e6ece7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector.rb @@ -0,0 +1,8 @@ +require 'gh_inspector/version' +require 'gh_inspector/inspector' +require 'gh_inspector/sidekick' +require 'gh_inspector/evidence' +require 'gh_inspector/exception_hound' + +module GhInspector +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/evidence.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/evidence.rb new file mode 100644 index 0000000..0499f38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/evidence.rb @@ -0,0 +1,118 @@ +require 'gh_inspector/version' +require 'time' + +module GhInspector + NUMBER_OF_ISSUES_INLINE = 3 + + # The default user interface for the inspector, its public API should be + # considered the protocol for other classes wanting to provide a user interface. + # + # Your custom objects will be verified at runtime that they conform to the protocol. + # + # You can see the default implementation at + # [lib/evidence.rb](/orta/gh-issues-inspector/tree/master/lib/evidence.rb). + # + # Both `search_query` and `search_exception` take your custom delegate as a 2nd optional parameter. + # + # ``` ruby + # require 'gh_inspector' + # inspector = GhInspector::Inspector.new "orta", "eigen" + # inspector.search_exception an_error, ArtsyUI.new + # ``` + # + # or + # + # ``` ruby + # require 'gh_inspector' + # inspector = GhInspector::Inspector.new "fastlane", "fastlane" + # inspector.search_query "Someone set us up the bomb", FastlaneUI.new + # ``` + # + + class Evidence + # Called just as the investigation has begun. + def inspector_started_query(query, inspector) + puts "Looking for related GitHub issues on #{inspector.repo_owner}/#{inspector.repo_name}..." + puts "Search query: #{query}" if inspector.verbose + puts "" + end + + # Deprecated: Please use `inspector_successfully_received_report` instead. + def inspector_successfully_recieved_report(report, inspector) + warn "[DEPRECATION] `inspector_successfully_recieved_report` is deprecated. Please use `inspector_successfully_received_report` instead." + inspector_successfully_received_report(report, inspector) + end + + # Called once the inspector has received a report with more than one issue. + def inspector_successfully_received_report(report, inspector) + report.issues[0..(NUMBER_OF_ISSUES_INLINE - 1)].each { |issue| print_issue_full(issue) } + + if report.issues.count > NUMBER_OF_ISSUES_INLINE + puts "and #{report.total_results - NUMBER_OF_ISSUES_INLINE} more at: #{report.url}" + puts "" + end + + print_open_link_hint + end + + # Deprecated: Please use `inspector_received_empty_report` instead. + def inspector_recieved_empty_report(report, inspector) + warn "[DEPRECATION] `inspector_recieved_empty_report` is deprecated. Please use `inspector_received_empty_report` instead." + inspector_received_empty_report(report, inspector) + end + + # Called once the report has been received, but when there are no issues found. + def inspector_received_empty_report(report, inspector) + puts "Found no similar issues. To create a new issue, please visit:" + puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/issues/new" + print_open_link_hint(true) + end + + # Called when there have been networking issues in creating the report. + def inspector_could_not_create_report(error, query, inspector) + puts "Could not access the GitHub API, you may have better luck via the website." + puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/search?q=#{query}&type=Issues&utf8=✓" + puts "Error: #{error.name}" + print_open_link_hint(true) + end + + private + + def print_issue_full(issue) + puts " - #{issue.title}" + puts " #{issue.html_url} [#{issue.state}] [#{issue.comments} comment#{issue.comments == 1 ? '' : 's'}]" + puts " #{Time.parse(issue.updated_at).to_pretty}" + puts "" + end + + def print_open_link_hint(newline = false) + puts "" if newline + puts "You can ⌘ + double-click on links to open them directly in your browser. 🔗" if /darwin/ =~ RUBY_PLATFORM + end + end +end + +# Taken from https://stackoverflow.com/questions/195740/how-do-you-do-relative-time-in-rails + +module PrettyDate + def to_pretty + a = (Time.now - self).to_i + + case a + when 0 then 'just now' + when 1 then 'a second ago' + when 2..59 then a.to_s + ' seconds ago' + when 60..119 then 'a minute ago' # 120 = 2 minutes + when 120..3540 then (a / 60).to_i.to_s + ' minutes ago' + when 3541..7100 then 'an hour ago' # 3600 = 1 hour + when 7101..82_800 then ((a + 99) / 3600).to_i.to_s + ' hours ago' + when 82_801..172_000 then 'a day ago' # 86400 = 1 day + when 172_001..518_400 then ((a + 800) / (60 * 60 * 24)).to_i.to_s + ' days ago' + when 518_400..1_036_800 then 'a week ago' + when 1_036_801..4_147_204 then ((a + 180_000) / (60 * 60 * 24 * 7)).to_i.to_s + ' weeks ago' + else strftime("%d %b %Y") + end + end +end + +Time.send :include, PrettyDate diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/exception_hound.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/exception_hound.rb new file mode 100644 index 0000000..ad89001 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/exception_hound.rb @@ -0,0 +1,45 @@ +module GhInspector + class ExceptionHound + attr_accessor :message + + def initialize(error) + self.message = find_message error + end + + def find_message(error) + error.to_s + end + + def query + undefined + simple_nil + demangle_instances + + message + end + + private + + def undefined + self.message = message.gsub "undefined local variable or method", "undefined" + end + + def simple_nil + self.message = message.gsub "nil:NilClass", "nil" + end + + def demangle_instances + self.message = regex_replace(message, /(#<.*>)/, /#<(.*):/) + end + + def regex_replace(string, find, replace) + if string.match find + full = string.match(find)[0] + simple = string.match(replace)[1] + string.gsub full, simple + else + string + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/inspector.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/inspector.rb new file mode 100644 index 0000000..08e85a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/inspector.rb @@ -0,0 +1,71 @@ +# Note that the README is generated from the class comments, so it's a bit +# wider scope than your average class comment. + +module GhInspector + # To get started using The Issues Inspector, you will need to + # create an inspector instance. This class is main public API for querying issues. + # + # #### Getting Started + # + # Create an instance of `GhInspector::Inspector`, you can then ask it to search + # based on your raised exception, or as a direct query yourself. + # + # ``` ruby + # require 'gh_inspector' + # inspector = GhInspector::Inspector.new "orta", "eigen" + # # Either use an error: + # inspector.search_exception an_error, ArtsyUI.new + # # Or use a specific query: + # inspector.search_query "Someone set us up the bomb" + # ``` + # + # By default this would output: + # + # ``` + # Looking for related issues on CocoaPods/CocoaPods... + # + # - undefined method `to_ary' for #Did you mean? to_query + # https://github.com/CocoaPods/CocoaPods/issues/4748 [closed] [1 comment] + # + # - NoMethodError - undefined method `to_ary' for Pod EAIntroView + # https://github.com/CocoaPods/CocoaPods/issues/4391 [closed] [15 comments] + # + # - Do a search on GitHub for issues relating to a crash? + # https://github.com/CocoaPods/CocoaPods/issues/4391 [open] [3 comments] + # + # and 10 more at: + # https://github.com/CocoaPods/CocoaPods/search?q=undefined+method+%60to_ary%27&type=Issues + # ``` + # + + class Inspector + attr_accessor :repo_owner, :repo_name, :query, :sidekick, :verbose + + # Class init function with a "orta/project" style string + def self.from_slug(slug) + details = slug.split '/' + Inspector.new details.first, details.last + end + + # Init function with "orta", "project" + def initialize(repo_owner, repo_name, verbose: false) + self.repo_owner = repo_owner + self.repo_name = repo_name + self.verbose = verbose + self.sidekick = Sidekick.new(self, repo_owner, repo_name) + end + + # Will do some magic to try and pull out a reasonable search query + # for an exception, then searches with that + def search_exception(exception, delegate = nil) + query = ExceptionHound.new(exception).query + search_query(query, delegate) + end + + # Queries for an specific search string + def search_query(query, delegate = nil) + delegate ||= Evidence.new + sidekick.search(query, delegate) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/sidekick.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/sidekick.rb new file mode 100644 index 0000000..5677253 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/sidekick.rb @@ -0,0 +1,138 @@ +require 'erb' +require "net/http" +require 'uri' + +module GhInspector + # The Sidekick is the one who does all the real work. + # They take the query, get the GitHub API results, etc + # then pass them back to the inspector who gets the public API credit. + + class Sidekick + attr_accessor :repo_owner, :repo_name, :inspector, :using_deprecated_method + + def initialize(inspector, repo_owner, repo_name) + self.inspector = inspector + self.repo_owner = repo_owner + self.repo_name = repo_name + self.using_deprecated_method = false + end + + # Searches for a query, with a UI delegate + def search(query, delegate) + validate_delegate(delegate) + + delegate.inspector_started_query(query, inspector) + url = url_for_request query + + begin + results = get_api_results(url) + rescue Timeout::Error, Errno::EINVAL, Errno::ECONNRESET, EOFError, Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, Net::ProtocolError => e + delegate.inspector_could_not_create_report(e, query, inspector) + return + end + + report = parse_results query, results + + # TODO: progress callback + + if report.issues.any? + if self.using_deprecated_method + delegate.inspector_successfully_recieved_report(report, inspector) + else + delegate.inspector_successfully_received_report(report, inspector) + end + else + # rubocop:disable Style/IfInsideElse + if self.using_deprecated_method + delegate.inspector_recieved_empty_report(report, inspector) + else + delegate.inspector_received_empty_report(report, inspector) + end + # rubocop:enable Style/IfInsideElse + end + + report + end + + def verbose + self.inspector.verbose + end + + private + + require 'json' + + # Generates a URL for the request + def url_for_request(query, sort_by: nil, order: nil) + url = "https://api.github.com/search/issues?q=" + url += ERB::Util.url_encode(query) + url += "+repo:#{repo_owner}/#{repo_name}" + url += "&sort=#{sort_by}" if sort_by + url += "&order=#{order}" if order + + url + end + + # Gets the search results + def get_api_results(url) + uri = URI.parse(url) + puts "URL: #{url}" if self.verbose + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + + request = Net::HTTP::Get.new(uri.request_uri) + response = http.request(request) + + JSON.parse(response.body) + end + + # Converts a GitHub search JSON into a InspectionReport + def parse_results(query, results) + report = InspectionReport.new + report.url = "https://github.com/#{repo_owner}/#{repo_name}/search?q=#{ERB::Util.url_encode(query)}&type=Issues&utf8=✓" + report.query = query + report.total_results = results['total_count'] + report.issues = results['items'].map { |item| Issue.new(item) } + report + end + + def validate_delegate(delegate) + deprecated_delegate_methods = %i[ + inspector_successfully_recieved_report + inspector_recieved_empty_report + ] + new_delegate_methods = %i[ + inspector_successfully_received_report + inspector_received_empty_report + ] + + deprecated_delegate_methods.each do |deprecated_delegate_method| + self.using_deprecated_method = true if delegate.methods.include?(deprecated_delegate_method) + end + + e = Evidence.new + protocol = e.public_methods false + protocol.each do |m| + is_deprecated_method = deprecated_delegate_methods.include?(m) + is_new_delegate_method = new_delegate_methods.include?(m) + + raise "#{delegate} does not handle #{m}" unless delegate.methods.include?(m) || is_deprecated_method || is_new_delegate_method + end + end + end + + class InspectionReport + attr_accessor :issues, :url, :query, :total_results + end + + class Issue + attr_accessor :title, :number, :html_url, :state, :body, :comments, :updated_at + + # Hash -> public attributes + def initialize(*h) + if h.length == 1 && h.first.kind_of?(Hash) + h.first.each { |k, v| send("#{k}=", v) if public_methods.include?("#{k}=".to_sym) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/version.rb b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/version.rb new file mode 100644 index 0000000..1d09eea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/gh_inspector-1.1.3/lib/gh_inspector/version.rb @@ -0,0 +1,3 @@ +module GhInspector + VERSION = '1.1.3'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/README.md b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/README.md new file mode 100644 index 0000000..0296264 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/README.md @@ -0,0 +1,85 @@ +httpclient - HTTP accessing library. [![Gem Version](https://badge.fury.io/rb/httpclient.svg)](http://badge.fury.io/rb/httpclient) + +Copyright (C) 2000-2015 NAKAMURA, Hiroshi . + +'httpclient' gives something like the functionality of libwww-perl (LWP) in +Ruby. 'httpclient' formerly known as 'http-access2'. + +See [HTTPClient](http://www.rubydoc.info/gems/httpclient/frames) for documentation. + + +## Features + +* methods like GET/HEAD/POST/* via HTTP/1.1. +* HTTPS(SSL), Cookies, proxy, authentication(Digest, NTLM, Basic), etc. +* asynchronous HTTP request, streaming HTTP request. +* debug mode CLI. +* by contrast with net/http in standard distribution; + * Cookies support + * MT-safe + * streaming POST (POST with File/IO) + * Digest auth + * Negotiate/NTLM auth for WWW-Authenticate (requires net/ntlm module; rubyntlm gem) + * NTLM auth for Proxy-Authenticate (requires 'win32/sspi' module; rubysspi gem) + * extensible with filter interface + * you don't have to care HTTP/1.1 persistent connection + (httpclient cares instead of you) +* Not supported now + * Cache + * Rather advanced HTTP/1.1 usage such as Range, deflate, etc. + (of course you can set it in header by yourself) + +## httpclient command + +Usage: 1) `httpclient get https://www.google.co.jp/?q=ruby` +Usage: 2) `httpclient` + +For 1) it issues a GET request to the given URI and shows the wiredump and +the parsed result. For 2) it invokes irb shell with the binding that has a +HTTPClient as 'self'. You can call HTTPClient instance methods like; + +```ruby +get "https://www.google.co.jp/", :q => :ruby +``` + +## Author + + * Name:: Hiroshi Nakamura + * E-mail:: nahi@ruby-lang.org + * Project web site:: http://github.com/nahi/httpclient + + +## License + +This program is copyrighted free software by NAKAMURA, Hiroshi. You can +redistribute it and/or modify it under the same terms of Ruby's license; +either the dual license version in 2003, or any later version. + +httpclient/session.rb is based on http-access.rb in http-access/0.0.4. Some +part of it is copyrighted by Maebashi-san who made and published +http-access/0.0.4. http-access/0.0.4 did not include license notice but when +I asked Maebashi-san he agreed that I can redistribute it under the same terms +of Ruby. Many thanks to Maebashi-san. + + +## Install + +You can install httpclient via rubygems: `gem install httpclient` + + +## Usage + +See [HTTPClient](http://www.rubydoc.info/gems/httpclient/frames) for documentation. +You can also check sample/howto.rb how to use APIs. + +## Bug report or Feature request + +Please file a ticket at the project web site. + +1. find a similar ticket from https://github.com/nahi/httpclient/issues +2. create a new ticket by clicking 'Create Issue' button. +3. you can use github features such as pull-request if you like. + +## Changes + +See [ChangeLog](https://github.com/nahi/httpclient/blob/master/CHANGELOG.md) diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/httpclient b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/httpclient new file mode 100755 index 0000000..57d3f3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/httpclient @@ -0,0 +1,77 @@ +#!/usr/bin/env ruby + +# httpclient shell command. +# +# Usage: 1) % httpclient get https://www.google.co.jp/ q=ruby +# Usage: 2) % httpclient +# +# For 1) it issues a GET request to the given URI and shows the wiredump and +# the parsed result. For 2) it invokes irb shell with the binding that has a +# HTTPClient as 'self'. You can call HTTPClient instance methods like; +# > get "https://www.google.co.jp/", :q => :ruby +require 'httpclient' + +method = ARGV.shift +if method == 'version' + puts HTTPClient::VERSION + exit +end + +url = ARGV.shift +if method && url + client = HTTPClient.new + client.strict_response_size_check = true + if method == 'download' + print client.get_content(url) + else + client.debug_dev = STDERR + $DEBUG = true + require 'pp' + pp client.send(method, url, *ARGV) + end + exit +end + +require 'irb' +require 'irb/completion' + +class Runner + def initialize + @httpclient = HTTPClient.new + @httpclient.strict_response_size_check = true + end + + def method_missing(msg, *a, &b) + debug, $DEBUG = $DEBUG, true + begin + @httpclient.send(msg, *a, &b) + ensure + $DEBUG = debug + end + end + + def run + IRB.setup(nil) + ws = IRB::WorkSpace.new(binding) + irb = IRB::Irb.new(ws) + IRB.conf[:MAIN_CONTEXT] = irb.context + + trap("SIGINT") do + irb.signal_handle + end + + begin + catch(:IRB_EXIT) do + irb.eval_input + end + ensure + IRB.irb_at_exit + end + end + + def to_s + 'HTTPClient' + end +end + +Runner.new.run diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/jsonclient b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/jsonclient new file mode 100755 index 0000000..8f9af3a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/bin/jsonclient @@ -0,0 +1,85 @@ +#!/usr/bin/env ruby + +# jsonclient shell command. +# +# Usage: 1) % jsonclient post https://www.example.com/ content.json +# Usage: 2) % jsonclient +# +# For 1) it issues a GET request to the given URI and shows the wiredump and +# the parsed result. For 2) it invokes irb shell with the binding that has a +# JSONClient as 'self'. You can call JSONClient instance methods like; +# > post "https://www.example.com/resource", {'hello' => 'world'} +require 'jsonclient' + +method = ARGV.shift +url = ARGV.shift +body = [] +if ['post', 'put'].include?(method) + if ARGV.size == 1 && File.exist?(ARGV[0]) + body << File.read(ARGV[0]) + else + body << ARGF.read + end +end +if method && url + require 'pp' + client = JSONClient.new + client.debug_dev = STDERR if $DEBUG + res = client.send(method, url, *body) + STDERR.puts('RESPONSE HEADER: ') + PP.pp(res.headers, STDERR) + if res.ok? + begin + puts JSON.pretty_generate(res.content) + rescue JSON::GeneratorError + puts res.content + end + exit 0 + else + STDERR.puts res.content + exit 1 + end +end + +require 'irb' +require 'irb/completion' + +class Runner + def initialize + @httpclient = JSONClient.new + end + + def method_missing(msg, *a, &b) + debug, $DEBUG = $DEBUG, true + begin + @httpclient.send(msg, *a, &b) + ensure + $DEBUG = debug + end + end + + def run + IRB.setup(nil) + ws = IRB::WorkSpace.new(binding) + irb = IRB::Irb.new(ws) + IRB.conf[:MAIN_CONTEXT] = irb.context + + trap("SIGINT") do + irb.signal_handle + end + + begin + catch(:IRB_EXIT) do + irb.eval_input + end + ensure + IRB.irb_at_exit + end + end + + def to_s + 'JSONClient' + end +end + +Runner.new.run diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/hexdump.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/hexdump.rb new file mode 100644 index 0000000..4cd006f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/hexdump.rb @@ -0,0 +1,50 @@ +# encoding: binary + +# This was written by Arai-san and published at +# http://blade.nagaokaut.ac.jp/cgi-bin/scat.rb/ruby/ruby-list/31987 + + +module HexDump + # str must be in BINARY encoding in 1.9 + def encode(str) + offset = 0 + result = [] + while raw = str.slice(offset, 16) and raw.length > 0 + # data field + data = '' + for v in raw.unpack('N* a*') + if v.kind_of? Integer + data << sprintf("%08x ", v) + else + v.each_byte {|c| data << sprintf("%02x", c) } + end + end + # text field + text = raw.tr("\000-\037\177-\377", ".") + result << sprintf("%08x %-36s %s", offset, data, text) + offset += 16 + # omit duplicate line + if /^(#{regex_quote_n(raw)})+/n =~ str[offset .. -1] + result << sprintf("%08x ...", offset) + offset += $&.length + # should print at the end + if offset == str.length + result << sprintf("%08x %-36s %s", offset-16, data, text) + end + end + end + result + end + module_function :encode + + if RUBY_VERSION >= "1.9" + # raw must be in BINARY encoding in 1.9 + def self.regex_quote_n(raw) + Regexp.quote(raw) + end + else + def self.regex_quote_n(raw) + Regexp.quote(raw, 'n') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2.rb new file mode 100644 index 0000000..4588011 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2.rb @@ -0,0 +1,55 @@ +# HTTPAccess2 - HTTP accessing library. +# Copyright (C) 2000-2007 NAKAMURA, Hiroshi . + +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + +# http-access2.rb is based on http-access.rb in http-access/0.0.4. Some +# part of it is copyrighted by Maebashi-san who made and published +# http-access/0.0.4. http-access/0.0.4 did not include license notice but +# when I asked Maebashi-san he agreed that I can redistribute it under the +# same terms of Ruby. Many thanks to Maebashi-san. + + +require 'httpclient' + + +module HTTPAccess2 + VERSION = ::HTTPClient::VERSION + RUBY_VERSION_STRING = ::HTTPClient::RUBY_VERSION_STRING + SSLEnabled = ::HTTPClient::SSLEnabled + SSPIEnabled = ::HTTPClient::SSPIEnabled + DEBUG_SSL = true + + Util = ::HTTPClient::Util + + class Client < ::HTTPClient + class RetryableResponse < StandardError + end + end + + SSLConfig = ::HTTPClient::SSLConfig + BasicAuth = ::HTTPClient::BasicAuth + DigestAuth = ::HTTPClient::DigestAuth + NegotiateAuth = ::HTTPClient::NegotiateAuth + AuthFilterBase = ::HTTPClient::AuthFilterBase + WWWAuth = ::HTTPClient::WWWAuth + ProxyAuth = ::HTTPClient::ProxyAuth + Site = ::HTTPClient::Site + Connection = ::HTTPClient::Connection + SessionManager = ::HTTPClient::SessionManager + SSLSocketWrap = ::HTTPClient::SSLSocket + DebugSocket = ::HTTPClient::DebugSocket + + class Session < ::HTTPClient::Session + class Error < StandardError + end + class InvalidState < Error + end + class BadResponse < Error + end + class KeepAliveDisconnected < Error + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb new file mode 100644 index 0000000..56f7884 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/cookie.rb @@ -0,0 +1 @@ +require 'httpclient/cookie' diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/http.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/http.rb new file mode 100644 index 0000000..fc9b23c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/http-access2/http.rb @@ -0,0 +1 @@ +require 'httpclient/http' diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient.rb new file mode 100644 index 0000000..4f4b429 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient.rb @@ -0,0 +1,1327 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'stringio' +require 'digest/sha1' + +# Extra library +require 'httpclient/version' +require 'httpclient/util' +require 'httpclient/ssl_config' +require 'httpclient/connection' +require 'httpclient/session' +require 'httpclient/http' +require 'httpclient/auth' +require 'httpclient/cookie' + +# :main:HTTPClient +# The HTTPClient class provides several methods for accessing Web resources +# via HTTP. +# +# HTTPClient instance is designed to be MT-safe. You can call a HTTPClient +# instance from several threads without synchronization after setting up an +# instance. +# +# clnt = HTTPClient.new +# clnt.set_cookie_store('/home/nahi/cookie.dat') +# urls.each do |url| +# Thread.new(url) do |u| +# p clnt.head(u).status +# end +# end +# +# == How to use +# +# At first, how to create your client. See initialize for more detail. +# +# 1. Create simple client. +# +# clnt = HTTPClient.new +# +# 2. Accessing resources through HTTP proxy. You can use environment +# variable 'http_proxy' or 'HTTP_PROXY' instead. +# +# clnt = HTTPClient.new('http://myproxy:8080') +# +# === How to retrieve web resources +# +# See get and get_content. +# +# 1. Get content of specified URL. It returns HTTP::Message object and +# calling 'body' method of it returns a content String. +# +# puts clnt.get('http://dev.ctor.org/').body +# +# 2. For getting content directly, use get_content. It follows redirect +# response and returns a String of whole result. +# +# puts clnt.get_content('http://dev.ctor.org/') +# +# 3. You can pass :follow_redirect option to follow redirect response in get. +# +# puts clnt.get('http://dev.ctor.org/', :follow_redirect => true) +# +# 4. Get content as chunks of String. It yields chunks of String. +# +# clnt.get_content('http://dev.ctor.org/') do |chunk| +# puts chunk +# end +# +# === Invoking other HTTP methods +# +# See head, get, post, put, delete, options, propfind, proppatch and trace. +# It returns a HTTP::Message instance as a response. +# +# 1. Do HEAD request. +# +# res = clnt.head(uri) +# p res.header['Last-Modified'][0] +# +# 2. Do GET request with query. +# +# query = { 'keyword' => 'ruby', 'lang' => 'en' } +# res = clnt.get(uri, query) +# p res.status +# p res.contenttype +# p res.header['X-Custom'] +# puts res.body +# +# You can also use keyword argument style. +# +# res = clnt.get(uri, :query => { :keyword => 'ruby', :lang => 'en' }) +# +# === How to POST +# +# See post. +# +# 1. Do POST a form data. +# +# body = { 'keyword' => 'ruby', 'lang' => 'en' } +# res = clnt.post(uri, body) +# +# Keyword argument style. +# +# res = clnt.post(uri, :body => ...) +# +# 2. Do multipart file upload with POST. No need to set extra header by +# yourself from httpclient/2.1.4. +# +# File.open('/tmp/post_data') do |file| +# body = { 'upload' => file, 'user' => 'nahi' } +# res = clnt.post(uri, body) +# end +# +# 3. Do multipart with custom body. +# +# File.open('/tmp/post_data') do |file| +# body = [{ 'Content-Type' => 'application/atom+xml; charset=UTF-8', +# :content => '...' }, +# { 'Content-Type' => 'video/mp4', +# 'Content-Transfer-Encoding' => 'binary', +# :content => file }] +# res = clnt.post(uri, body) +# end +# +# === Accessing via SSL +# +# Ruby needs to be compiled with OpenSSL. +# +# 1. Get content of specified URL via SSL. +# Just pass an URL which starts with 'https://'. +# +# https_url = 'https://www.rsa.com' +# clnt.get(https_url) +# +# 2. Getting peer certificate from response. +# +# res = clnt.get(https_url) +# p res.peer_cert #=> returns OpenSSL::X509::Certificate +# +# 3. Configuring OpenSSL options. See HTTPClient::SSLConfig for more details. +# +# user_cert_file = 'cert.pem' +# user_key_file = 'privkey.pem' +# clnt.ssl_config.set_client_cert_file(user_cert_file, user_key_file) +# clnt.get(https_url) +# +# 4. Revocation check. On JRuby you can set following options to let +# HTTPClient to perform revocation check with CRL and OCSP: +# +# -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true +# ex. jruby -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true app.rb +# Revoked cert example: https://test-sspev.verisign.com:2443/test-SSPEV-revoked-verisign.html +# +# On other platform you can download CRL by yourself and set it via +# SSLConfig#add_crl. +# +# === Handling Cookies +# +# 1. Using volatile Cookies. Nothing to do. HTTPClient handles Cookies. +# +# clnt = HTTPClient.new +# res = clnt.get(url1) # receives Cookies. +# res = clnt.get(url2) # sends Cookies if needed. +# p res.cookies +# +# 2. Saving non volatile Cookies to a specified file. Need to set a file at +# first and invoke save method at last. +# +# clnt = HTTPClient.new +# clnt.set_cookie_store('/home/nahi/cookie.dat') +# clnt.get(url) +# ... +# clnt.save_cookie_store +# +# 3. Disabling Cookies. +# +# clnt = HTTPClient.new +# clnt.cookie_manager = nil +# +# === Configuring authentication credentials +# +# 1. Authentication with Web server. Supports BasicAuth, DigestAuth, and +# Negotiate/NTLM (requires ruby/ntlm module). +# +# clnt = HTTPClient.new +# domain = 'http://dev.ctor.org/http-access2/' +# user = 'user' +# password = 'user' +# clnt.set_auth(domain, user, password) +# p clnt.get('http://dev.ctor.org/http-access2/login').status +# +# 2. Authentication with Proxy server. Supports BasicAuth and NTLM +# (requires win32/sspi) +# +# clnt = HTTPClient.new(proxy) +# user = 'proxy' +# password = 'proxy' +# clnt.set_proxy_auth(user, password) +# p clnt.get(url) +# +# === Invoking HTTP methods with custom header +# +# Pass a Hash or an Array for header argument. +# +# header = { 'Accept' => 'text/html' } +# clnt.get(uri, query, header) +# +# header = [['Accept', 'image/jpeg'], ['Accept', 'image/png']] +# clnt.get_content(uri, query, header) +# +# === Invoking HTTP methods asynchronously +# +# See head_async, get_async, post_async, put_async, delete_async, +# options_async, propfind_async, proppatch_async, and trace_async. +# It immediately returns a HTTPClient::Connection instance as a returning value. +# +# connection = clnt.post_async(url, body) +# print 'posting.' +# while true +# break if connection.finished? +# print '.' +# sleep 1 +# end +# puts '.' +# res = connection.pop +# p res.status +# p res.body.read # res.body is an IO for the res of async method. +# +# === Shortcut methods +# +# You can invoke get_content, get, etc. without creating HTTPClient instance. +# +# ruby -rhttpclient -e 'puts HTTPClient.get_content(ARGV.shift)' http://dev.ctor.org/ +# ruby -rhttpclient -e 'p HTTPClient.head(ARGV.shift).header["last-modified"]' http://dev.ctor.org/ +# +class HTTPClient + RUBY_VERSION_STRING = "ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE})" + LIB_NAME = "(#{VERSION}, #{RUBY_VERSION_STRING})" + + include Util + + # Raised for indicating running environment configuration error for example + # accessing via SSL under the ruby which is not compiled with OpenSSL. + class ConfigurationError < StandardError + end + + # Raised for indicating HTTP response error. + class BadResponseError < RuntimeError + # HTTP::Message:: a response + attr_reader :res + + def initialize(msg, res = nil) # :nodoc: + super(msg) + @res = res + end + end + + # Raised for indicating a timeout error. + class TimeoutError < RuntimeError + end + + # Raised for indicating a connection timeout error. + # You can configure connection timeout via HTTPClient#connect_timeout=. + class ConnectTimeoutError < TimeoutError + end + + # Raised for indicating a request sending timeout error. + # You can configure request sending timeout via HTTPClient#send_timeout=. + class SendTimeoutError < TimeoutError + end + + # Raised for indicating a response receiving timeout error. + # You can configure response receiving timeout via + # HTTPClient#receive_timeout=. + class ReceiveTimeoutError < TimeoutError + end + + # Deprecated. just for backward compatibility + class Session + BadResponse = ::HTTPClient::BadResponseError + end + + class << self + %w(get_content post_content head get post put delete options propfind proppatch trace).each do |name| + eval <<-EOD + def #{name}(*arg, &block) + clnt = new + begin + clnt.#{name}(*arg, &block) + ensure + clnt.reset_all + end + end + EOD + end + + private + + def attr_proxy(symbol, assignable = false) + name = symbol.to_s + define_method(name) { + @session_manager.__send__(name) + } + if assignable + aname = name + '=' + define_method(aname) { |rhs| + @session_manager.__send__(aname, rhs) + } + end + end + end + + # HTTPClient::SSLConfig:: SSL configurator. + attr_reader :ssl_config + # HTTPClient::CookieManager:: Cookies configurator. + attr_accessor :cookie_manager + # An array of response HTTP message body String which is used for loop-back + # test. See test/* to see how to use it. If you want to do loop-back test + # of HTTP header, use test_loopback_http_response instead. + attr_reader :test_loopback_response + # An array of request filter which can trap HTTP request/response. + # See HTTPClient::WWWAuth to see how to use it. + attr_reader :request_filter + # HTTPClient::ProxyAuth:: Proxy authentication handler. + attr_reader :proxy_auth + # HTTPClient::WWWAuth:: WWW authentication handler. + attr_reader :www_auth + # How many times get_content and post_content follows HTTP redirect. + # 10 by default. + attr_accessor :follow_redirect_count + # Base url of resources. + attr_accessor :base_url + # Default request header. + attr_accessor :default_header + + # Set HTTP version as a String:: 'HTTP/1.0' or 'HTTP/1.1' + attr_proxy(:protocol_version, true) + # Connect timeout in sec. + attr_proxy(:connect_timeout, true) + # Request sending timeout in sec. + attr_proxy(:send_timeout, true) + # Response receiving timeout in sec. + attr_proxy(:receive_timeout, true) + # Reuse the same connection within this timeout in sec. from last used. + attr_proxy(:keep_alive_timeout, true) + # Size of reading block for non-chunked response. + attr_proxy(:read_block_size, true) + # Negotiation retry count for authentication. 5 by default. + attr_proxy(:protocol_retry_count, true) + # if your ruby is older than 2005-09-06, do not set socket_sync = false to + # avoid an SSL socket blocking bug in openssl/buffering.rb. + attr_proxy(:socket_sync, true) + # Enables TCP keepalive; no timing settings exist at present + attr_proxy(:tcp_keepalive, true) + # User-Agent header in HTTP request. + attr_proxy(:agent_name, true) + # From header in HTTP request. + attr_proxy(:from, true) + # An array of response HTTP String (not a HTTP message body) which is used + # for loopback test. See test/* to see how to use it. + attr_proxy(:test_loopback_http_response) + # Decompress a compressed (with gzip or deflate) content body transparently. false by default. + attr_proxy(:transparent_gzip_decompression, true) + # Raise BadResponseError if response size does not match with Content-Length header in response. false by default. + # TODO: enable by default + attr_proxy(:strict_response_size_check, true) + # Local socket address. Set HTTPClient#socket_local.host and HTTPClient#socket_local.port to specify local binding hostname and port of TCP socket. + attr_proxy(:socket_local, true) + + # Default header for PROPFIND request. + PROPFIND_DEFAULT_EXTHEADER = { 'Depth' => '0' } + + # Default User-Agent header + DEFAULT_AGENT_NAME = 'HTTPClient/1.0' + + # Creates a HTTPClient instance which manages sessions, cookies, etc. + # + # HTTPClient.new takes optional arguments as a Hash. + # * :proxy - proxy url string + # * :agent_name - User-Agent String + # * :from - from header String + # * :base_url - base URL of resources + # * :default_header - header Hash all HTTP requests should have + # * :force_basic_auth - flag for sending Authorization header w/o gettin 401 first + # User-Agent and From are embedded in HTTP request Header if given. + # From header is not set without setting it explicitly. + # + # proxy = 'http://myproxy:8080' + # agent_name = 'MyAgent/0.1' + # from = 'from@example.com' + # HTTPClient.new(proxy, agent_name, from) + # + # After you set :base_url, all resources you pass to get, post and other + # methods are recognized to be prefixed with base_url. Say base_url is + # 'https://api.example.com/v1/, get('users') is the same as + # get('https://api.example.com/v1/users') internally. You can also pass + # full URL from 'http://' even after setting base_url. + # + # The expected base_url and path behavior is the following. Please take + # care of '/' in base_url and path. + # + # The last '/' is important for base_url: + # 1. http://foo/bar/baz/ + path -> http://foo/bar/baz/path + # 2. http://foo/bar/baz + path -> http://foo/bar/path + # Relative path handling: + # 3. http://foo/bar/baz/ + ../path -> http://foo/bar/path + # 4. http://foo/bar/baz + ../path -> http://foo/path + # 5. http://foo/bar/baz/ + ./path -> http://foo/bar/baz/path + # 6. http://foo/bar/baz + ./path -> http://foo/bar/path + # The leading '/' of path means absolute path: + # 7. http://foo/bar/baz/ + /path -> http://foo/path + # 8. http://foo/bar/baz + /path -> http://foo/path + # + # :default_header is for providing default headers Hash that all HTTP + # requests should have, such as custom 'Authorization' header in API. + # You can override :default_header with :header Hash parameter in HTTP + # request methods. + # + # :force_basic_auth turns on/off the BasicAuth force flag. Generally + # HTTP client must send Authorization header after it gets 401 error + # from server from security reason. But in some situation (e.g. API + # client) you might want to send Authorization from the beginning. + def initialize(*args, &block) + proxy, agent_name, from, base_url, default_header, force_basic_auth = + keyword_argument(args, :proxy, :agent_name, :from, :base_url, :default_header, :force_basic_auth) + @proxy = nil # assigned later. + @no_proxy = nil + @no_proxy_regexps = [] + @base_url = base_url + @default_header = default_header || {} + @www_auth = WWWAuth.new + @proxy_auth = ProxyAuth.new + @www_auth.basic_auth.force_auth = @proxy_auth.basic_auth.force_auth = force_basic_auth + @request_filter = [@proxy_auth, @www_auth] + @debug_dev = nil + @redirect_uri_callback = method(:default_redirect_uri_callback) + @test_loopback_response = [] + @session_manager = SessionManager.new(self) + @session_manager.agent_name = agent_name || DEFAULT_AGENT_NAME + @session_manager.from = from + @session_manager.ssl_config = @ssl_config = SSLConfig.new(self) + @cookie_manager = CookieManager.new + @follow_redirect_count = 10 + load_environment + self.proxy = proxy if proxy + keep_webmock_compat + instance_eval(&block) if block + end + + # webmock 1.6.2 depends on HTTP::Message#body.content to work. + # let's keep it work iif webmock is loaded for a while. + def keep_webmock_compat + if respond_to?(:do_get_block_with_webmock) + ::HTTP::Message.module_eval do + def body + def (o = self.content).content + self + end + o + end + end + end + end + + # Returns debug device if exists. See debug_dev=. + def debug_dev + @debug_dev + end + + # Sets debug device. Once debug device is set, all HTTP requests and + # responses are dumped to given device. dev must respond to << for dump. + # + # Calling this method resets all existing sessions. + def debug_dev=(dev) + @debug_dev = dev + reset_all + @session_manager.debug_dev = dev + end + + # Returns URI object of HTTP proxy if exists. + def proxy + @proxy + end + + # Sets HTTP proxy used for HTTP connection. Given proxy can be an URI, + # a String or nil. You can set user/password for proxy authentication like + # HTTPClient#proxy = 'http://user:passwd@myproxy:8080' + # + # You can use environment variable 'http_proxy' or 'HTTP_PROXY' for it. + # You need to use 'cgi_http_proxy' or 'CGI_HTTP_PROXY' instead if you run + # HTTPClient from CGI environment from security reason. (HTTPClient checks + # 'REQUEST_METHOD' environment variable whether it's CGI or not) + # + # Calling this method resets all existing sessions. + def proxy=(proxy) + if proxy.nil? || proxy.to_s.empty? + @proxy = nil + @proxy_auth.reset_challenge + else + @proxy = urify(proxy) + if @proxy.scheme == nil or @proxy.scheme.downcase != 'http' or + @proxy.host == nil or @proxy.port == nil + raise ArgumentError.new("unsupported proxy #{proxy}") + end + @proxy_auth.reset_challenge + if @proxy.user || @proxy.password + @proxy_auth.set_auth(@proxy.user, @proxy.password) + end + end + reset_all + @session_manager.proxy = @proxy + @proxy + end + + # Returns NO_PROXY setting String if given. + def no_proxy + @no_proxy + end + + # Sets NO_PROXY setting String. no_proxy must be a comma separated String. + # Each entry must be 'host' or 'host:port' such as; + # HTTPClient#no_proxy = 'example.com,example.co.jp:443' + # + # 'localhost' is treated as a no_proxy site regardless of explicitly listed. + # HTTPClient checks given URI objects before accessing it. + # 'host' is tail string match. No IP-addr conversion. + # + # You can use environment variable 'no_proxy' or 'NO_PROXY' for it. + # + # Calling this method resets all existing sessions. + def no_proxy=(no_proxy) + @no_proxy = no_proxy + @no_proxy_regexps.clear + if @no_proxy + @no_proxy.scan(/([^\s:,]+)(?::(\d+))?/) do |host, port| + if host[0] == ?. + regexp = /#{Regexp.quote(host)}\z/i + else + regexp = /(\A|\.)#{Regexp.quote(host)}\z/i + end + @no_proxy_regexps << [regexp, port] + end + end + reset_all + end + + # Sets credential for Web server authentication. + # domain:: a String or an URI to specify where HTTPClient should use this + # credential. If you set uri to nil, HTTPClient uses this credential + # wherever a server requires it. + # user:: username String. + # passwd:: password String. + # + # You can set multiple credentials for each uri. + # + # clnt.set_auth('http://www.example.com/foo/', 'foo_user', 'passwd') + # clnt.set_auth('http://www.example.com/bar/', 'bar_user', 'passwd') + # + # Calling this method resets all existing sessions. + def set_auth(domain, user, passwd) + uri = to_resource_url(domain) + @www_auth.set_auth(uri, user, passwd) + reset_all + end + + # Deprecated. Use set_auth instead. + def set_basic_auth(domain, user, passwd) + uri = to_resource_url(domain) + @www_auth.basic_auth.set(uri, user, passwd) + reset_all + end + + # Sets credential for Proxy authentication. + # user:: username String. + # passwd:: password String. + # + # Calling this method resets all existing sessions. + def set_proxy_auth(user, passwd) + @proxy_auth.set_auth(user, passwd) + reset_all + end + + # Turn on/off the BasicAuth force flag. Generally HTTP client must + # send Authorization header after it gets 401 error from server from + # security reason. But in some situation (e.g. API client) you might + # want to send Authorization from the beginning. + def force_basic_auth=(force_basic_auth) + @www_auth.basic_auth.force_auth = @proxy_auth.basic_auth.force_auth = force_basic_auth + end + + # Sets the filename where non-volatile Cookies be saved by calling + # save_cookie_store. + # This method tries to load and managing Cookies from the specified file. + # + # Calling this method resets all existing sessions. + def set_cookie_store(filename) + @cookie_manager.cookies_file = filename + @cookie_manager.load_cookies if filename + reset_all + end + + # Try to save Cookies to the file specified in set_cookie_store. Unexpected + # error will be raised if you don't call set_cookie_store first. + def save_cookie_store + @cookie_manager.save_cookies + end + + # Returns stored cookies. + def cookies + if @cookie_manager + @cookie_manager.cookies + end + end + + # Sets callback proc when HTTP redirect status is returned for get_content + # and post_content. default_redirect_uri_callback is used by default. + # + # If you need strict implementation which does not allow relative URI + # redirection, set strict_redirect_uri_callback instead. + # + # clnt.redirect_uri_callback = clnt.method(:strict_redirect_uri_callback) + # + def redirect_uri_callback=(redirect_uri_callback) + @redirect_uri_callback = redirect_uri_callback + end + + # Retrieves a web resource. + # + # uri:: a String or an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b'. + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c'. + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # get_content(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # get_content follows HTTP redirect status (see HTTP::Status.redirect?) + # internally and try to retrieve content from redirected URL. See + # redirect_uri_callback= how HTTP redirection is handled. + # + # If you need to get full HTTP response including HTTP status and headers, + # use get method. get returns HTTP::Message as a response and you need to + # follow HTTP redirect by yourself if you need. + def get_content(uri, *args, &block) + query, header = keyword_argument(args, :query, :header) + success_content(follow_redirect(:get, uri, query, nil, header || {}, &block)) + end + + # Posts a content. + # + # uri:: a String or an URI object which represents an URL of web resource. + # body:: a Hash or an Array of body part. e.g. + # { "a" => "b" } => 'a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'a=b&a=c' + # When you pass a File as a value, it will be posted as a + # multipart/form-data. e.g. + # { 'upload' => file } + # You can also send custom multipart by passing an array of hashes. + # Each part must have a :content attribute which can be a file, all + # other keys will become headers. + # [{ 'Content-Type' => 'text/plain', :content => "some text" }, + # { 'Content-Type' => 'video/mp4', :content => File.new('video.mp4') }] + # => + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } + # or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # post_content(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # post_content follows HTTP redirect status (see HTTP::Status.redirect?) + # internally and try to post the content to redirected URL. See + # redirect_uri_callback= how HTTP redirection is handled. + # Bear in mind that you should not depend on post_content because it sends + # the same POST method to the new location which is prohibited in HTTP spec. + # + # If you need to get full HTTP response including HTTP status and headers, + # use post method. + def post_content(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + query, body, header = keyword_argument(args, :query, :body, :header) + else + query = nil + body, header = keyword_argument(args, :body, :header) + end + success_content(follow_redirect(:post, uri, query, body, header || {}, &block)) + end + + # A method for redirect uri callback. How to use: + # clnt.redirect_uri_callback = clnt.method(:strict_redirect_uri_callback) + # This callback does not allow relative redirect such as + # Location: ../foo/ + # in HTTP header. (raises BadResponseError instead) + def strict_redirect_uri_callback(uri, res) + newuri = urify(res.header['location'][0]) + if https?(uri) && !https?(newuri) + raise BadResponseError.new("redirecting to non-https resource") + end + if !http?(newuri) && !https?(newuri) + raise BadResponseError.new("unexpected location: #{newuri}", res) + end + puts "redirect to: #{newuri}" if $DEBUG + newuri + end + + # A default method for redirect uri callback. This method is used by + # HTTPClient instance by default. + # This callback allows relative redirect such as + # Location: ../foo/ + # in HTTP header. + def default_redirect_uri_callback(uri, res) + newuri = urify(res.header['location'][0]) + if !http?(newuri) && !https?(newuri) + warn("#{newuri}: a relative URI in location header which is not recommended") + warn("'The field value consists of a single absolute URI' in HTTP spec") + newuri = uri + newuri + end + if https?(uri) && !https?(newuri) + raise BadResponseError.new("redirecting to non-https resource") + end + puts "redirect to: #{newuri}" if $DEBUG + newuri + end + + # Sends HEAD request to the specified URL. See request for arguments. + def head(uri, *args) + request(:head, uri, argument_to_hash(args, :query, :header, :follow_redirect)) + end + + # Sends GET request to the specified URL. See request for arguments. + def get(uri, *args, &block) + request(:get, uri, argument_to_hash(args, :query, :header, :follow_redirect), &block) + end + + # Sends PATCH request to the specified URL. See request for arguments. + def patch(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request(:patch, uri, new_args, &block) + end + + # Sends POST request to the specified URL. See request for arguments. + # You should not depend on :follow_redirect => true for POST method. It + # sends the same POST method to the new location which is prohibited in HTTP spec. + def post(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header, :follow_redirect) + end + request(:post, uri, new_args, &block) + end + + # Sends PUT request to the specified URL. See request for arguments. + def put(uri, *args, &block) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request(:put, uri, new_args, &block) + end + + # Sends DELETE request to the specified URL. See request for arguments. + def delete(uri, *args, &block) + request(:delete, uri, argument_to_hash(args, :body, :header, :query), &block) + end + + # Sends OPTIONS request to the specified URL. See request for arguments. + def options(uri, *args, &block) + new_args = argument_to_hash(args, :header, :query, :body) + request(:options, uri, new_args, &block) + end + + # Sends PROPFIND request to the specified URL. See request for arguments. + def propfind(uri, *args, &block) + request(:propfind, uri, argument_to_hash(args, :header), &block) + end + + # Sends PROPPATCH request to the specified URL. See request for arguments. + def proppatch(uri, *args, &block) + request(:proppatch, uri, argument_to_hash(args, :body, :header), &block) + end + + # Sends TRACE request to the specified URL. See request for arguments. + def trace(uri, *args, &block) + request('TRACE', uri, argument_to_hash(args, :query, :header), &block) + end + + # Sends a request to the specified URL. + # + # method:: HTTP method to be sent. method.to_s.upcase is used. + # uri:: a String or an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c' + # body:: a Hash or an Array of body part. e.g. + # { "a" => "b" } + # => 'a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] + # => 'a=b&a=c'. + # When the given method is 'POST' and the given body contains a file + # as a value, it will be posted as a multipart/form-data. e.g. + # { 'upload' => file } + # You can also send custom multipart by passing an array of hashes. + # Each part must have a :content attribute which can be a file, all + # other keys will become headers. + # [{ 'Content-Type' => 'text/plain', :content => "some text" }, + # { 'Content-Type' => 'video/mp4', :content => File.new('video.mp4') }] + # => + # See HTTP::Message.file? for actual condition of 'a file'. + # header:: a Hash or an Array of extra headers. e.g. + # { 'Accept' => 'text/html' } or + # [['Accept', 'image/jpeg'], ['Accept', 'image/png']]. + # &block:: Give a block to get chunked message-body of response like + # get(uri) { |chunked_body| ... }. + # Size of each chunk may not be the same. + # + # You can also pass a String as a body. HTTPClient just sends a String as + # a HTTP request message body. + # + # When you pass an IO as a body, HTTPClient sends it as a HTTP request with + # chunked encoding (Transfer-Encoding: chunked in HTTP header) if IO does not + # respond to :size. Bear in mind that some server application does not support + # chunked request. At least cgi.rb does not support it. + def request(method, uri, *args, &block) + query, body, header, follow_redirect = keyword_argument(args, :query, :body, :header, :follow_redirect) + if method == :propfind + header ||= PROPFIND_DEFAULT_EXTHEADER + else + header ||= {} + end + uri = to_resource_url(uri) + if block + filtered_block = adapt_block(&block) + end + if follow_redirect + follow_redirect(method, uri, query, body, header, &block) + else + do_request(method, uri, query, body, header, &filtered_block) + end + end + + # Sends HEAD request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def head_async(uri, *args) + request_async2(:head, uri, argument_to_hash(args, :query, :header)) + end + + # Sends GET request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def get_async(uri, *args) + request_async2(:get, uri, argument_to_hash(args, :query, :header)) + end + + # Sends PATCH request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def patch_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:patch, uri, new_args) + end + + # Sends POST request in async style. See request_async for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def post_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:post, uri, new_args) + end + + # Sends PUT request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def put_async(uri, *args) + if hashy_argument_has_keys(args, :query, :body) + new_args = args[0] + else + new_args = argument_to_hash(args, :body, :header) + end + request_async2(:put, uri, new_args) + end + + # Sends DELETE request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def delete_async(uri, *args) + request_async2(:delete, uri, argument_to_hash(args, :body, :header, :query)) + end + + # Sends OPTIONS request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def options_async(uri, *args) + request_async2(:options, uri, argument_to_hash(args, :header, :query, :body)) + end + + # Sends PROPFIND request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def propfind_async(uri, *args) + request_async2(:propfind, uri, argument_to_hash(args, :body, :header)) + end + + # Sends PROPPATCH request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def proppatch_async(uri, *args) + request_async2(:proppatch, uri, argument_to_hash(args, :body, :header)) + end + + # Sends TRACE request in async style. See request_async2 for arguments. + # It immediately returns a HTTPClient::Connection instance as a result. + def trace_async(uri, *args) + request_async2(:trace, uri, argument_to_hash(args, :query, :header)) + end + + # Sends a request in async style. request method creates new Thread for + # HTTP connection and returns a HTTPClient::Connection instance immediately. + # + # Arguments definition is the same as request. + def request_async(method, uri, query = nil, body = nil, header = {}) + uri = to_resource_url(uri) + do_request_async(method, uri, query, body, header) + end + + # new method that has same signature as 'request' + def request_async2(method, uri, *args) + query, body, header = keyword_argument(args, :query, :body, :header) + if [:post, :put].include?(method) + body ||= '' + end + if method == :propfind + header ||= PROPFIND_DEFAULT_EXTHEADER + else + header ||= {} + end + uri = to_resource_url(uri) + do_request_async(method, uri, query, body, header) + end + + # Resets internal session for the given URL. Keep-alive connection for the + # site (host-port pair) is disconnected if exists. + def reset(uri) + uri = to_resource_url(uri) + @session_manager.reset(uri) + end + + # Resets all of internal sessions. Keep-alive connections are disconnected. + def reset_all + @session_manager.reset_all + end + +private + + class RetryableResponse < StandardError # :nodoc: + attr_reader :res + + def initialize(res = nil) + @res = res + end + end + + class KeepAliveDisconnected < StandardError # :nodoc: + attr_reader :sess + attr_reader :cause + def initialize(sess = nil, cause = nil) + super("#{self.class.name}: #{cause ? cause.message : nil}") + @sess = sess + @cause = cause + end + end + + def hashy_argument_has_keys(args, *key) + # if the given arg is a single Hash and... + args.size == 1 and Hash === args[0] and + # it has any one of the key + key.all? { |e| args[0].key?(e) } + end + + def do_request(method, uri, query, body, header, &block) + res = nil + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_count = @session_manager.protocol_retry_count + proxy = no_proxy?(uri) ? nil : @proxy + previous_request = previous_response = nil + while retry_count > 0 + body.pos = pos if pos + req = create_request(method, uri, query, body, header) + if previous_request + # to remember IO positions to read + req.http_body.positions = previous_request.http_body.positions + end + begin + protect_keep_alive_disconnected do + # TODO: remove Connection.new + # We want to delete Connection usage in do_get_block but Newrelic gem depends on it. + # https://github.com/newrelic/rpm/blob/master/lib/new_relic/agent/instrumentation/httpclient.rb#L34-L36 + conn = Connection.new + res = do_get_block(req, proxy, conn, &block) + # Webmock's do_get_block returns ConditionVariable + if !res.respond_to?(:previous) + res = conn.pop + end + end + res.previous = previous_response + break + rescue RetryableResponse => e + previous_request = req + previous_response = res = e.res + retry_count -= 1 + end + end + res + end + + def do_request_async(method, uri, query, body, header) + conn = Connection.new + t = Thread.new(conn) { |tconn| + begin + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_count = @session_manager.protocol_retry_count + proxy = no_proxy?(uri) ? nil : @proxy + while retry_count > 0 + body.pos = pos if pos + req = create_request(method, uri, query, body, header) + begin + protect_keep_alive_disconnected do + do_get_stream(req, proxy, tconn) + end + break + rescue RetryableResponse + retry_count -= 1 + end + end + rescue Exception => e + conn.push e + end + } + conn.async_thread = t + conn + end + + def load_environment + # http_proxy + if getenv('REQUEST_METHOD') + # HTTP_PROXY conflicts with the environment variable usage in CGI where + # HTTP_* is used for HTTP header information. Unlike open-uri, we + # simply ignore http_proxy in CGI env and use cgi_http_proxy instead. + self.proxy = getenv('cgi_http_proxy') + else + self.proxy = getenv('http_proxy') + end + # no_proxy + self.no_proxy = getenv('no_proxy') + end + + def getenv(name) + ENV[name.downcase] || ENV[name.upcase] + end + + def adapt_block(&block) + return block if block.arity == 2 + proc { |r, str| block.call(str) } + end + + def follow_redirect(method, uri, query, body, header, &block) + uri = to_resource_url(uri) + if block + b = adapt_block(&block) + filtered_block = proc { |r, str| + b.call(r, str) if r.ok? + } + end + if HTTP::Message.file?(body) + pos = body.pos rescue nil + end + retry_number = 0 + previous = nil + request_query = query + while retry_number < @follow_redirect_count + body.pos = pos if pos + res = do_request(method, uri, request_query, body, header, &filtered_block) + res.previous = previous + if res.redirect? + if res.header['location'].empty? + raise BadResponseError.new("Missing Location header for redirect", res) + end + method = :get if res.see_other? # See RFC2616 10.3.4 + uri = urify(@redirect_uri_callback.call(uri, res)) + # To avoid duped query parameter. 'location' must include query part. + request_query = nil + previous = res + retry_number += 1 + else + return res + end + end + raise BadResponseError.new("retry count exceeded", res) + end + + def success_content(res) + if res.ok? + return res.content + else + raise BadResponseError.new("unexpected response: #{res.header.inspect}", res) + end + end + + def protect_keep_alive_disconnected + begin + yield + rescue KeepAliveDisconnected + # Force to create new connection + Thread.current[:HTTPClient_AcquireNewConnection] = true + begin + yield + ensure + Thread.current[:HTTPClient_AcquireNewConnection] = false + end + end + end + + def create_request(method, uri, query, body, header) + method = method.to_s.upcase + if header.is_a?(Hash) + header = @default_header.merge(header).to_a + else + header = @default_header.to_a + header.dup + end + boundary = nil + if body + _, content_type = header.find { |key, value| + key.to_s.downcase == 'content-type' + } + if content_type + if /\Amultipart/ =~ content_type + if content_type =~ /boundary=(.+)\z/ + boundary = $1 + else + boundary = create_boundary + content_type = "#{content_type}; boundary=#{boundary}" + header = override_header(header, 'content-type', content_type) + end + end + else + if file_in_form_data?(body) + boundary = create_boundary + content_type = "multipart/form-data; boundary=#{boundary}" + else + content_type = 'application/x-www-form-urlencoded' + end + header << ['Content-Type', content_type] + end + end + req = HTTP::Message.new_request(method, uri, query, body, boundary) + header.each do |key, value| + req.header.add(key.to_s, value) + end + if @cookie_manager + cookie_value = @cookie_manager.cookie_value(uri) + if cookie_value + req.header.add('Cookie', cookie_value) + end + end + req + end + + def create_boundary + Digest::SHA1.hexdigest(Time.now.to_s) + end + + def file_in_form_data?(body) + HTTP::Message.multiparam_query?(body) && + body.any? { |k, v| HTTP::Message.file?(v) } + end + + def override_header(header, key, value) + result = [] + header.each do |k, v| + if k.to_s.downcase == key + result << [key, value] + else + result << [k, v] + end + end + result + end + + NO_PROXY_HOSTS = ['localhost'] + + def no_proxy?(uri) + if !@proxy or NO_PROXY_HOSTS.include?(uri.host) + return true + end + @no_proxy_regexps.each do |regexp, port| + if !port || uri.port == port.to_i + if regexp =~ uri.host + return true + end + end + end + false + end + + # !! CAUTION !! + # Method 'do_get*' runs under MT conditon. Be careful to change. + def do_get_block(req, proxy, conn, &block) + @request_filter.each do |filter| + filter.filter_request(req) + end + if str = @test_loopback_response.shift + dump_dummy_request_response(req.http_body.dump, str) if @debug_dev + res = HTTP::Message.new_response(str, req.header) + conn.push(res) + return res + end + content = block ? nil : '' + res = HTTP::Message.new_response(content, req.header) + @debug_dev << "= Request\n\n" if @debug_dev + sess = @session_manager.query(req, proxy) + res.peer_cert = sess.ssl_peer_cert + @debug_dev << "\n\n= Response\n\n" if @debug_dev + do_get_header(req, res, sess) + conn.push(res) + sess.get_body do |part| + set_encoding(part, res.body_encoding) + if block + block.call(res, part.dup) + else + content << part + end + end + # there could be a race condition but it's OK to cache unreusable + # connection because we do retry for that case. + @session_manager.keep(sess) unless sess.closed? + commands = @request_filter.collect { |filter| + filter.filter_response(req, res) + } + if commands.find { |command| command == :retry } + raise RetryableResponse.new(res) + end + res + end + + def do_get_stream(req, proxy, conn) + @request_filter.each do |filter| + filter.filter_request(req) + end + if str = @test_loopback_response.shift + dump_dummy_request_response(req.http_body.dump, str) if @debug_dev + conn.push(HTTP::Message.new_response(StringIO.new(str), req.header)) + return + end + piper, pipew = IO.pipe + pipew.binmode + res = HTTP::Message.new_response(piper, req.header) + @debug_dev << "= Request\n\n" if @debug_dev + sess = @session_manager.query(req, proxy) + res.peer_cert = sess.ssl_peer_cert + @debug_dev << "\n\n= Response\n\n" if @debug_dev + do_get_header(req, res, sess) + conn.push(res) + sess.get_body do |part| + set_encoding(part, res.body_encoding) + pipew.write(part) + end + pipew.close + @session_manager.keep(sess) unless sess.closed? + _ = @request_filter.collect { |filter| + filter.filter_response(req, res) + } + # ignore commands (not retryable in async mode) + res + end + + def do_get_header(req, res, sess) + res.http_version, res.status, res.reason, headers = sess.get_header + res.header.set_headers(headers) + if @cookie_manager + res.header['set-cookie'].each do |cookie| + @cookie_manager.parse(cookie, req.header.request_uri) + end + end + end + + def dump_dummy_request_response(req, res) + @debug_dev << "= Dummy Request\n\n" + @debug_dev << req + @debug_dev << "\n\n= Dummy Response\n\n" + @debug_dev << res + end + + def set_encoding(str, encoding) + str.force_encoding(encoding) if encoding + end + + def to_resource_url(uri) + u = urify(uri) + if @base_url && u.scheme.nil? && u.host.nil? + urify(@base_url) + uri + else + u + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb new file mode 100644 index 0000000..6b7b8b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/auth.rb @@ -0,0 +1,924 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'digest/md5' +require 'httpclient/session' +require 'mutex_m' + + +class HTTPClient + + NTLMEnabled = false + SSPIEnabled = false + GSSAPIEnabled = false + + # Common abstract class for authentication filter. + # + # There are 2 authentication filters. + # WWWAuth:: Authentication filter for handling authentication negotiation + # between Web server. Parses 'WWW-Authentication' header in + # response and generates 'Authorization' header in request. + # ProxyAuth:: Authentication filter for handling authentication negotiation + # between Proxy server. Parses 'Proxy-Authentication' header in + # response and generates 'Proxy-Authorization' header in request. + class AuthFilterBase + private + + def parse_authentication_header(res, tag) + challenge = res.header[tag] + return nil unless challenge + challenge.collect { |c| parse_challenge_header(c) }.compact + end + + def parse_challenge_header(challenge) + scheme, param_str = challenge.scan(/\A(\S+)(?:\s+(.*))?\z/)[0] + return nil if scheme.nil? + return scheme, param_str + end + end + + + # Authentication filter for handling authentication negotiation between + # Web server. Parses 'WWW-Authentication' header in response and + # generates 'Authorization' header in request. + # + # Authentication filter is implemented using request filter of HTTPClient. + # It traps HTTP response header and maintains authentication state, and + # traps HTTP request header for inserting necessary authentication header. + # + # WWWAuth has sub filters (BasicAuth, DigestAuth, NegotiateAuth and + # SSPINegotiateAuth) and delegates some operations to it. + # NegotiateAuth requires 'ruby/ntlm' module (rubyntlm gem). + # SSPINegotiateAuth requires 'win32/sspi' module (rubysspi gem). + class WWWAuth < AuthFilterBase + attr_reader :basic_auth + attr_reader :digest_auth + attr_reader :negotiate_auth + attr_reader :sspi_negotiate_auth + attr_reader :oauth + + # Creates new WWWAuth. + def initialize + @basic_auth = BasicAuth.new + @digest_auth = DigestAuth.new + @negotiate_auth = NegotiateAuth.new + @ntlm_auth = NegotiateAuth.new('NTLM') + @sspi_negotiate_auth = SSPINegotiateAuth.new + @oauth = OAuth.new + # sort authenticators by priority + @authenticator = [@oauth, @negotiate_auth, @ntlm_auth, @sspi_negotiate_auth, @digest_auth, @basic_auth] + end + + # Resets challenge state. See sub filters for more details. + def reset_challenge + @authenticator.each do |auth| + auth.reset_challenge + end + end + + # Set authentication credential. See sub filters for more details. + def set_auth(uri, user, passwd) + @authenticator.each do |auth| + auth.set(uri, user, passwd) + end + reset_challenge + end + + # Filter API implementation. Traps HTTP request and insert + # 'Authorization' header if needed. + def filter_request(req) + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if cred = auth.get(req) + if cred == :skip + # some authenticator (NTLM and Negotiate) does not + # need to send extra header after authorization. In such case + # it should block other authenticators to respond and :skip is + # the marker for such case. + return + end + req.header.set('Authorization', auth.scheme + " " + cred) + return + end + end + end + + # Filter API implementation. Traps HTTP response and parses + # 'WWW-Authenticate' header. + # + # This remembers the challenges for all authentication methods + # available to the client. On the subsequent retry of the request, + # filter_request will select the strongest method. + def filter_response(req, res) + command = nil + if res.status == HTTP::Status::UNAUTHORIZED + if challenge = parse_authentication_header(res, 'www-authenticate') + uri = req.header.request_uri + challenge.each do |scheme, param_str| + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if scheme.downcase == auth.scheme.downcase + challengeable = auth.challenge(uri, param_str) + command = :retry if challengeable + end + end + end + # ignore unknown authentication scheme + end + end + command + end + end + + + # Authentication filter for handling authentication negotiation between + # Proxy server. Parses 'Proxy-Authentication' header in response and + # generates 'Proxy-Authorization' header in request. + # + # Authentication filter is implemented using request filter of HTTPClient. + # It traps HTTP response header and maintains authentication state, and + # traps HTTP request header for inserting necessary authentication header. + # + # ProxyAuth has sub filters (BasicAuth, NegotiateAuth, and SSPINegotiateAuth) + # and delegates some operations to it. + # NegotiateAuth requires 'ruby/ntlm' module. + # SSPINegotiateAuth requires 'win32/sspi' module. + class ProxyAuth < AuthFilterBase + attr_reader :basic_auth + attr_reader :digest_auth + attr_reader :negotiate_auth + attr_reader :sspi_negotiate_auth + + # Creates new ProxyAuth. + def initialize + @basic_auth = ProxyBasicAuth.new + @negotiate_auth = NegotiateAuth.new + @ntlm_auth = NegotiateAuth.new('NTLM') + @sspi_negotiate_auth = SSPINegotiateAuth.new + @digest_auth = ProxyDigestAuth.new + # sort authenticators by priority + @authenticator = [@negotiate_auth, @ntlm_auth, @sspi_negotiate_auth, @digest_auth, @basic_auth] + end + + # Resets challenge state. See sub filters for more details. + def reset_challenge + @authenticator.each do |auth| + auth.reset_challenge + end + end + + # Set authentication credential. See sub filters for more details. + def set_auth(user, passwd) + @authenticator.each do |auth| + auth.set(nil, user, passwd) + end + reset_challenge + end + + # Filter API implementation. Traps HTTP request and insert + # 'Proxy-Authorization' header if needed. + def filter_request(req) + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if cred = auth.get(req) + if cred == :skip + # some authenticator (NTLM and Negotiate) does not + # need to send extra header after authorization. In such case + # it should block other authenticators to respond and :skip is + # the marker for such case. + return + end + req.header.set('Proxy-Authorization', auth.scheme + " " + cred) + return + end + end + end + + # Filter API implementation. Traps HTTP response and parses + # 'Proxy-Authenticate' header. + def filter_response(req, res) + command = nil + if res.status == HTTP::Status::PROXY_AUTHENTICATE_REQUIRED + if challenge = parse_authentication_header(res, 'proxy-authenticate') + uri = req.header.request_uri + challenge.each do |scheme, param_str| + @authenticator.each do |auth| + next unless auth.set? # hasn't be set, don't use it + if scheme.downcase == auth.scheme.downcase + challengeable = auth.challenge(uri, param_str) + command = :retry if challengeable + end + end + end + # ignore unknown authentication scheme + end + end + command + end + end + + # Authentication filter base class. + class AuthBase + include HTTPClient::Util + + # Authentication scheme. + attr_reader :scheme + + def initialize(scheme) + @scheme = scheme + @challenge = {} + end + + # Resets challenge state. Do not send '*Authorization' header until the + # server sends '*Authentication' again. + def reset_challenge + synchronize do + @challenge.clear + end + end + end + + # Authentication filter for handling BasicAuth negotiation. + # Used in WWWAuth and ProxyAuth. + class BasicAuth < AuthBase + include Mutex_m + + # Send Authorization Header without receiving 401 + attr_accessor :force_auth + + # Creates new BasicAuth filter. + def initialize + super('Basic') + @cred = nil + @auth = {} + @force_auth = false + end + + # Set authentication credential. + # uri == nil for generic purpose (allow to use user/password for any URL). + def set(uri, user, passwd) + synchronize do + if uri.nil? + @cred = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + else + uri = Util.uri_dirname(uri) + @auth[uri] = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @cred || @auth.any? + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + return nil if !@force_auth and !@challenge.any? { |uri, ok| + Util.uri_part_of(target_uri, uri) and ok + } + return @cred if @cred + Util.hash_find_value(@auth) { |uri, cred| + Util.uri_part_of(target_uri, uri) + } + } + end + + # Challenge handler: remember URL for response. + def challenge(uri, param_str = nil) + synchronize { + @challenge[urify(uri)] = true + true + } + end + end + + class ProxyBasicAuth < BasicAuth + def set(uri, user, passwd) + synchronize do + @cred = ["#{user}:#{passwd}"].pack('m').tr("\n", '') + end + end + + def get(req) + synchronize { + return nil if !@force_auth and !@challenge['challenged'] + @cred + } + end + + # Challenge handler: remember URL for response. + def challenge(uri, param_str = nil) + synchronize { + @challenge['challenged'] = true + true + } + end + end + + # Authentication filter for handling DigestAuth negotiation. + # Used in WWWAuth. + class DigestAuth < AuthBase + include Mutex_m + + # Creates new DigestAuth filter. + def initialize + super('Digest') + @auth = {} + @nonce_count = 0 + end + + # Set authentication credential. + # uri == nil is ignored. + def set(uri, user, passwd) + synchronize do + if uri + uri = Util.uri_dirname(uri) + @auth[uri] = [user, passwd] + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @auth.any? + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + param = Util.hash_find_value(@challenge) { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + user, passwd = Util.hash_find_value(@auth) { |uri, auth_data| + Util.uri_part_of(target_uri, uri) + } + return nil unless user + calc_cred(req, user, passwd, param) + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + @challenge[uri] = parse_challenge_param(param_str) + true + } + end + + private + + # this method is implemented by sromano and posted to + # http://tools.assembla.com/breakout/wiki/DigestForSoap + # Thanks! + # supported algorithms: MD5, MD5-sess + def calc_cred(req, user, passwd, param) + method = req.header.request_method + path = req.header.create_query_uri + a_1 = "#{user}:#{param['realm']}:#{passwd}" + a_2 = "#{method}:#{path}" + qop = param['qop'] + nonce = param['nonce'] + cnonce = nil + if qop || param['algorithm'] =~ /MD5-sess/ + cnonce = generate_cnonce() + end + a_1_md5sum = Digest::MD5.hexdigest(a_1) + if param['algorithm'] =~ /MD5-sess/ + a_1_md5sum = Digest::MD5.hexdigest("#{a_1_md5sum}:#{nonce}:#{cnonce}") + algorithm = "MD5-sess" + else + algorithm = "MD5" + end + message_digest = [] + message_digest << a_1_md5sum + message_digest << nonce + if qop + @nonce_count += 1 + message_digest << ('%08x' % @nonce_count) + message_digest << cnonce + message_digest << param['qop'] + end + message_digest << Digest::MD5.hexdigest(a_2) + header = [] + header << "username=\"#{user}\"" + header << "realm=\"#{param['realm']}\"" + header << "nonce=\"#{nonce}\"" + header << "uri=\"#{path}\"" + if cnonce + header << "cnonce=\"#{cnonce}\"" + end + if qop + header << "nc=#{'%08x' % @nonce_count}" + header << "qop=#{param['qop']}" + end + header << "response=\"#{Digest::MD5.hexdigest(message_digest.join(":"))}\"" + header << "algorithm=#{algorithm}" + header << "opaque=\"#{param['opaque']}\"" if param.key?('opaque') + header.join(", ") + end + + # cf. WEBrick::HTTPAuth::DigestAuth#generate_next_nonce(aTime) + def generate_cnonce + now = "%012d" % Time.now.to_i + pk = Digest::MD5.hexdigest([now, self.__id__, Process.pid, rand(65535)].join)[0, 32] + [now + ':' + pk].pack('m*').chop + end + + def parse_challenge_param(param_str) + param = {} + param_str.scan(/\s*([^\,]+(?:\\.[^\,]*)*)/).each do |str| + key, value = str[0].scan(/\A([^=]+)=(.*)\z/)[0] + if /\A"(.*)"\z/ =~ value + value = $1.gsub(/\\(.)/, '\1') + end + param[key] = value + end + param + end + end + + + # Authentication filter for handling DigestAuth negotiation. + # Ignores uri argument. Used in ProxyAuth. + class ProxyDigestAuth < DigestAuth + + # overrides DigestAuth#set. sets default user name and password. uri is not used. + def set(uri, user, passwd) + synchronize do + @auth = [user, passwd] + end + end + + # overrides DigestAuth#get. Uses default user name and password + # regardless of target uri if the proxy has required authentication + # before + def get(req) + synchronize { + param = @challenge + return nil unless param + user, passwd = @auth + return nil unless user + calc_cred(req, user, passwd, param) + } + end + + def reset_challenge + synchronize do + @challenge = nil + end + end + + def challenge(uri, param_str) + synchronize { + @challenge = parse_challenge_param(param_str) + true + } + end + end + + # Authentication filter for handling Negotiate/NTLM negotiation. + # Used in WWWAuth and ProxyAuth. + # + # NegotiateAuth depends on 'ruby/ntlm' module. + class NegotiateAuth < AuthBase + include Mutex_m + + # NTLM opt for ruby/ntlm. {:ntlmv2 => true} by default. + attr_reader :ntlm_opt + + # Creates new NegotiateAuth filter. + def initialize(scheme = "Negotiate") + super(scheme) + @auth = {} + @auth_default = nil + @ntlm_opt = { + :ntlmv2 => true + } + end + + # Set authentication credential. + # uri == nil for generic purpose (allow to use user/password for any URL). + def set(uri, user, passwd) + synchronize do + if uri + uri = Util.uri_dirname(uri) + @auth[uri] = [user, passwd] + else + @auth_default = [user, passwd] + end + end + end + + # have we marked this as set - ie that it's valid to use in this context? + def set? + @auth_default || @auth.any? + end + + # Response handler: returns credential. + # See ruby/ntlm for negotiation state transition. + def get(req) + target_uri = req.header.request_uri + synchronize { + _domain_uri, param = @challenge.find { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + user, passwd = Util.hash_find_value(@auth) { |uri, auth_data| + Util.uri_part_of(target_uri, uri) + } + unless user + user, passwd = @auth_default + end + return nil unless user + Util.try_require('net/ntlm') || return + domain = nil + domain, user = user.split("\\") if user.index("\\") + state = param[:state] + authphrase = param[:authphrase] + case state + when :init + t1 = Net::NTLM::Message::Type1.new + t1.domain = domain if domain + t1.encode64 + when :response + t2 = Net::NTLM::Message.decode64(authphrase) + param = {:user => user, :password => passwd} + param[:domain] = domain if domain + t3 = t2.response(param, @ntlm_opt.dup) + @challenge[target_uri][:state] = :done + t3.encode64 + when :done + :skip + else + nil + end + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + if param_str.nil? or @challenge[uri].nil? + c = @challenge[uri] = {} + c[:state] = :init + c[:authphrase] = "" + else + c = @challenge[uri] + c[:state] = :response + c[:authphrase] = param_str + end + true + } + end + end + + + # Authentication filter for handling Negotiate/NTLM negotiation. + # Used in ProxyAuth. + # + # SSPINegotiateAuth depends on 'win32/sspi' module. + class SSPINegotiateAuth < AuthBase + include Mutex_m + + # Creates new SSPINegotiateAuth filter. + def initialize + super('Negotiate') + end + + # Set authentication credential. + # NOT SUPPORTED: username and necessary data is retrieved by win32/sspi. + # See win32/sspi for more details. + def set(*args) + # not supported + end + + # Check always (not effective but it works) + def set? + !@challenge.empty? + end + + # Response handler: returns credential. + # See win32/sspi for negotiation state transition. + def get(req) + target_uri = req.header.request_uri + synchronize { + domain_uri, param = @challenge.find { |uri, v| + Util.uri_part_of(target_uri, uri) + } + return nil unless param + Util.try_require('win32/sspi') || Util.try_require('gssapi') || return + state = param[:state] + authenticator = param[:authenticator] + authphrase = param[:authphrase] + case state + when :init + if defined?(Win32::SSPI) + authenticator = param[:authenticator] = Win32::SSPI::NegotiateAuth.new + authenticator.get_initial_token(@scheme) + else # use GSSAPI + authenticator = param[:authenticator] = GSSAPI::Simple.new(domain_uri.host, 'HTTP') + # Base64 encode the context token + [authenticator.init_context].pack('m').gsub(/\n/,'') + end + when :response + @challenge[target_uri][:state] = :done + if defined?(Win32::SSPI) + authenticator.complete_authentication(authphrase) + else # use GSSAPI + authenticator.init_context(authphrase.unpack('m').pop) + end + when :done + :skip + else + nil + end + } + end + + # Challenge handler: remember URL and challenge token for response. + def challenge(uri, param_str) + synchronize { + if param_str.nil? or @challenge[uri].nil? + c = @challenge[uri] = {} + c[:state] = :init + c[:authenticator] = nil + c[:authphrase] = "" + else + c = @challenge[uri] + c[:state] = :response + c[:authphrase] = param_str + end + true + } + end + end + + # Authentication filter for handling OAuth negotiation. + # Used in WWWAuth. + # + # CAUTION: This impl only support '#7 Accessing Protected Resources' in OAuth + # Core 1.0 spec for now. You need to obtain Access token and Access secret by + # yourself. + # + # CAUTION: This impl does NOT support OAuth Request Body Hash spec for now. + # http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html + # + class OAuth < AuthBase + include Mutex_m + + class Config + include HTTPClient::Util + + attr_accessor :http_method + attr_accessor :realm + attr_accessor :consumer_key + attr_accessor :consumer_secret + attr_accessor :token + attr_accessor :secret + attr_accessor :signature_method + attr_accessor :version + attr_accessor :callback + attr_accessor :verifier + + # for OAuth Session 1.0 (draft) + attr_accessor :session_handle + + attr_reader :signature_handler + + attr_accessor :debug_timestamp + attr_accessor :debug_nonce + + def initialize(*args) + @http_method, + @realm, + @consumer_key, + @consumer_secret, + @token, + @secret, + @signature_method, + @version, + @callback, + @verifier = + keyword_argument(args, + :http_method, + :realm, + :consumer_key, + :consumer_secret, + :token, + :secret, + :signature_method, + :version, + :callback, + :verifier + ) + @http_method ||= :post + @session_handle = nil + @signature_handler = {} + end + end + + def self.escape(str) # :nodoc: + if str.respond_to?(:force_encoding) + str.dup.force_encoding('BINARY').gsub(/([^a-zA-Z0-9_.~-]+)/) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + } + else + str.gsub(/([^a-zA-Z0-9_.~-]+)/n) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + } + end + end + + def escape(str) + self.class.escape(str) + end + + # Creates new DigestAuth filter. + def initialize + super('OAuth') + @config = nil # common config + @auth = {} # configs for each site + @nonce_count = 0 + @signature_handler = { + 'HMAC-SHA1' => method(:sign_hmac_sha1) + } + end + + # Set authentication credential. + # You cannot set OAuth config via WWWAuth#set_auth. Use OAuth#config= + def set(*args) + # not supported + end + + # Check always (not effective but it works) + def set? + !@challenge.empty? + end + + # Set authentication credential. + def set_config(uri, config) + synchronize do + if uri.nil? + @config = config + else + uri = Util.uri_dirname(urify(uri)) + @auth[uri] = config + end + end + end + + # Get authentication credential. + def get_config(uri = nil) + synchronize { + do_get_config(uri) + } + end + + # Response handler: returns credential. + # It sends cred only when a given uri is; + # * child page of challengeable(got *Authenticate before) uri and, + # * child page of defined credential + def get(req) + target_uri = req.header.request_uri + synchronize { + return nil unless @challenge[nil] or @challenge.find { |uri, ok| + Util.uri_part_of(target_uri, uri) and ok + } + config = do_get_config(target_uri) || @config + return nil unless config + calc_cred(req, config) + } + end + + # Challenge handler: remember URL for response. + # + # challenge() in OAuth handler always returns false to avoid connection + # retry which should not work in OAuth authentication context. This + # method just remember URL (nil means 'any') for the next connection. + # Normally OAuthClient handles this correctly but see how it uses when + # you need to use this class directly. + def challenge(uri, param_str = nil) + synchronize { + if uri.nil? + @challenge[nil] = true + else + @challenge[urify(uri)] = true + end + false + } + end + + private + + def do_get_config(uri = nil) + if uri.nil? + @config + else + uri = urify(uri) + Util.hash_find_value(@auth) { |cand_uri, cred| + Util.uri_part_of(uri, cand_uri) + } + end + end + + def calc_cred(req, config) + header = {} + header['oauth_consumer_key'] = config.consumer_key + header['oauth_signature_method'] = config.signature_method + header['oauth_timestamp'] = config.debug_timestamp || Time.now.to_i.to_s + header['oauth_nonce'] = config.debug_nonce || generate_nonce() + header['oauth_token'] = config.token if config.token + header['oauth_version'] = config.version if config.version + header['oauth_callback'] = config.callback if config.callback + header['oauth_verifier'] = config.verifier if config.verifier + header['oauth_session_handle'] = config.session_handle if config.session_handle + signature = sign(config, header, req) + header['oauth_signature'] = signature + # no need to do but we should sort for easier to test. + str = header.sort_by { |k, v| k }.map { |k, v| encode_header(k, v) }.join(', ') + if config.realm + str = %Q(realm="#{config.realm}", ) + str + end + str + end + + def generate_nonce + @nonce_count += 1 + now = "%012d" % Time.now.to_i + pk = Digest::MD5.hexdigest([@nonce_count.to_s, now, self.__id__, Process.pid, rand(65535)].join)[0, 32] + [now + ':' + pk].pack('m*').chop + end + + def encode_header(k, v) + %Q(#{escape(k.to_s)}="#{escape(v.to_s)}") + end + + def encode_param(params) + params.map { |k, v| + [v].flatten.map { |vv| + %Q(#{escape(k.to_s)}=#{escape(vv.to_s)}) + } + }.flatten + end + + def sign(config, header, req) + base_string = create_base_string(config, header, req) + if handler = config.signature_handler[config.signature_method] || @signature_handler[config.signature_method.to_s] + handler.call(config, base_string) + else + raise ConfigurationError.new("Unknown OAuth signature method: #{config.signature_method}") + end + end + + def create_base_string(config, header, req) + params = encode_param(header) + query = req.header.request_query + if query and HTTP::Message.multiparam_query?(query) + params += encode_param(query) + end + # captures HTTP Message body only for 'application/x-www-form-urlencoded' + if req.header.contenttype == 'application/x-www-form-urlencoded' and req.http_body.size + params += encode_param(HTTP::Message.parse(req.http_body.content)) + end + uri = req.header.request_uri + if uri.query + params += encode_param(HTTP::Message.parse(uri.query)) + end + if uri.port == uri.default_port + request_url = "#{uri.scheme.downcase}://#{uri.host}#{uri.path}" + else + request_url = "#{uri.scheme.downcase}://#{uri.host}:#{uri.port}#{uri.path}" + end + [req.header.request_method.upcase, request_url, params.sort.join('&')].map { |e| + escape(e) + }.join('&') + end + + def sign_hmac_sha1(config, base_string) + unless SSLEnabled + raise ConfigurationError.new("openssl required for OAuth implementation") + end + key = [escape(config.consumer_secret.to_s), escape(config.secret.to_s)].join('&') + digester = OpenSSL::Digest::SHA1.new + [OpenSSL::HMAC.digest(digester, key, base_string)].pack('m*').chomp + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem new file mode 100644 index 0000000..742dc6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert.pem @@ -0,0 +1,3952 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Wed Oct 28 04:12:04 2015 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.25. +## SHA1: 6d7d2f0a4fae587e7431be191a081ac1257d300a +## + + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +WoSign +====== +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNVBAMTIUNlcnRpZmljYXRpb24g +QXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJ +BgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA +vcqNrLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1UfcIiePyO +CbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcSccf+Hb0v1naMQFXQoOXXDX +2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2ZjC1vt7tj/id07sBMOby8w7gLJKA84X5 +KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4Mx1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR ++ScPewavVIMYe+HdVHpRaG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ez +EC8wQjchzDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDaruHqk +lWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221KmYo0SLwX3OSACCK2 +8jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvASh0JWzko/amrzgD5LkhLJuYwTKVY +yrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWvHYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0C +AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R +8bNLtwYgFP6HEtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 +LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJMuYhOZO9sxXq +T2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2eJXLOC62qx1ViC777Y7NhRCOj +y+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VNg64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC +2nz4SNAzqfkHx5Xh9T71XXG68pWpdIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes +5cVAWubXbHssw1abR80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/ +EaEQPkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGcexGATVdVh +mVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+J7x6v+Db9NpSvd4MVHAx +kUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMlOtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGi +kpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWTee5Ehr7XHuQe+w== +-----END CERTIFICATE----- + +WoSign China +============ +-----BEGIN CERTIFICATE----- +MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBGMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMMEkNBIOayg+mAmuagueiv +geS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYD +VQQKExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k +8H/rD195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld19AXbbQs5 +uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExfv5RxadmWPgxDT74wwJ85 +dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnkUkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5 +Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+LNVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFy +b7Ao65vh4YOhn0pdr8yb+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc +76DbT52VqyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6KyX2m ++Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0GAbQOXDBGVWCvOGU6 +yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaKJ/kR8slC/k7e3x9cxKSGhxYzoacX +GKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUA +A4ICAQBqinA4WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 +yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj/feTZU7n85iY +r83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6jBAyvd0zaziGfjk9DgNyp115 +j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0A +kLppRQjbbpCBhqcqBT/mhDn4t/lXX0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97 +qA4bLJyuQHCH2u2nFoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Y +jj4Du9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10lO1Hm13ZB +ONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Leie2uPAmvylezkolwQOQv +T8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR12KvxAmLBsX5VYc8T1yaw15zLKYs4SgsO +kI26oQ== +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl +OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV +MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF +JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G3 +================================== +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y +olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t +x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy +EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K +Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur +mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5 +1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp +07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo +FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE +41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu +yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq +KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1 +v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA +8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b +8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r +mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq +1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI +JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV +tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk= +-----END CERTIFICATE----- + +Staat der Nederlanden EV Root CA +================================ +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M +MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl +cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk +SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW +O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r +0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 +Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV +XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr +08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV +0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd +74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx +fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa +ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu +c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq +5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN +b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN +f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi +5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 +WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK +DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy +eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 +========================================================= +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1Qg +RWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAw +ODA3MDFaFw0yMzA0MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0w +SwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnE +n2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBFbGVrdHJvbmlrIFNlcnRp +ZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApCUZ4WWe60ghUEoI5RHwWrom/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537 +jVJp45wnEFPzpALFp/kRGml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1m +ep5Fimh34khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z5UNP +9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0hO8EuPbJbKoCPrZV +4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QIDAQABo0IwQDAdBgNVHQ4EFgQUVpkH +HtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAJ5FdnsXSDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPo +BP5yCccLqh0lVX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nfpeYVhDfwwvJl +lpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CFYv4HAqGEVka+lgqaE9chTLd8 +B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW+qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- + +TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H6 +========================================================= +-----BEGIN CERTIFICATE----- +MIIEJjCCAw6gAwIBAgIGfaHyZeyKMA0GCSqGSIb3DQEBCwUAMIGxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMU0wSwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg2MB4XDTEzMTIxODA5 +MDQxMFoXDTIzMTIxNjA5MDQxMFowgbExCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExTTBL +BgNVBAoMRFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2ZW5sacSf +aSBIaXptZXRsZXJpIEEuxZ4uMUIwQAYDVQQDDDlUw5xSS1RSVVNUIEVsZWt0cm9uaWsgU2VydGlm +aWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLEgSDYwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCdsGjW6L0UlqMACprx9MfMkU1xeHe59yEmFXNRFpQJRwXiM/VomjX/3EsvMsew7eKC5W/a +2uqsxgbPJQ1BgfbBOCK9+bGlprMBvD9QFyv26WZV1DOzXPhDIHiTVRZwGTLmiddk671IUP320EED +wnS3/faAz1vFq6TWlRKb55cTMgPp1KtDWxbtMyJkKbbSk60vbNg9tvYdDjTu0n2pVQ8g9P0pu5Fb +HH3GQjhtQiht1AH7zYiXSX6484P4tZgvsycLSF5W506jM7NE1qXyGJTtHB6plVxiSvgNZ1GpryHV ++DKdeboaX+UEVU0TRv/yz3THGmNtwx8XEsMeED5gCLMxAgMBAAGjQjBAMB0GA1UdDgQWBBTdVRcT +9qzoSCHK77Wv0QAy7Z6MtTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQsFAAOCAQEAb1gNl0OqFlQ+v6nfkkU/hQu7VtMMUszIv3ZnXuaqs6fvuay0EBQNdH49ba3R +fdCaqaXKGDsCQC4qnFAUi/5XfldcEQlLNkVS9z2sFP1E34uXI9TDwe7UU5X+LEr+DXCqu4svLcsy +o4LyVN/Y8t3XSHLuSqMplsNEzm61kod2pLv0kmzOLBQJZo6NrRa1xxsJYTvjIKIDgI6tflEATseW +hvtDmHd9KMeP2Cpu54Rvl0EpABZeTeIT6lnAY2c6RPuY/ATTMHKm9ocJV612ph1jmv3XZch4gyt1 +O6VbuA1df74jrlZVlFjvH4GMKrLN5ptjnhi85WsGtAuYSyher4hYyw== +-----END CERTIFICATE----- + +Certinomis - Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAbBgNVBAMTFENlcnRpbm9taXMg +LSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMzMTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIx +EzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRD +ZXJ0aW5vbWlzIC0gUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQos +P5L2fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJflLieY6pOo +d5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQVWZUKxkd8aRi5pwP5ynap +z8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDFTKWrteoB4owuZH9kb/2jJZOLyKIOSY00 +8B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09x +RLWtwHkziOC/7aOgFLScCbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE +6OXWk6RiwsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJwx3t +FvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SGm/lg0h9tkQPTYKbV +PZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4F2iw4lNVYC2vPsKD2NkJK/DAZNuH +i5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZngWVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGj +YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I +6tNxIqSSaHh02TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/0KGRHCwPT5iV +WVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWwF6YSjNRieOpWauwK0kDDPAUw +Pk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZSg081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAX +lCOotQqSD7J6wWAsOMwaplv/8gzjqh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJ +y29SWwNyhlCVCNSNh4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9 +Iff/ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8Vbtaw5Bng +DwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwjY/M50n92Uaf0yKHxDHYi +I0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nM +cyrDflOR1m749fPH0FFNjkulW+YZFzvWgQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVr +hkIGuUE= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem new file mode 100644 index 0000000..9794dfb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cacert1024.pem @@ -0,0 +1,3866 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Tue Apr 22 08:29:31 2014 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla-release/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb new file mode 100644 index 0000000..b702c92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/connection.rb @@ -0,0 +1,88 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +class HTTPClient + + + # Represents a HTTP response to an asynchronous request. Async methods of + # HTTPClient such as get_async, post_async, etc. returns an instance of + # Connection. + # + # == How to use + # + # 1. Invoke HTTP method asynchronously and check if it's been finished + # periodically. + # + # connection = clnt.post_async(url, body) + # print 'posting.' + # while true + # break if connection.finished? + # print '.' + # sleep 1 + # end + # puts '.' + # res = connection.pop + # p res.status + # + # 2. Read the response as an IO. + # + # connection = clnt.get_async('http://dev.ctor.org/') + # io = connection.pop.content + # while str = io.read(40) + # p str + # end + class Connection + attr_accessor :async_thread + + def initialize(header_queue = [], body_queue = []) # :nodoc: + @headers = header_queue + @body = body_queue + @async_thread = nil + @queue = Queue.new + end + + # Checks if the asynchronous invocation has been finished or not. + def finished? + if !@async_thread + # Not in async mode. + true + elsif @async_thread.alive? + # Working... + false + else + # Async thread have been finished. + join + true + end + end + + # Retrieves a HTTP::Message instance of HTTP response. Do not invoke this + # method twice for now. The second invocation will be blocked. + def pop + response_or_exception = @queue.pop + if response_or_exception.is_a? Exception + raise response_or_exception + end + response_or_exception + end + + def push(result) # :nodoc: + @queue.push(result) + end + + # Waits the completion of the asynchronous invocation. + def join + if @async_thread + @async_thread.join + end + nil + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb new file mode 100644 index 0000000..65fc6ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/cookie.rb @@ -0,0 +1,220 @@ +# do not override if httpclient/webagent-cookie is loaded already +unless defined?(HTTPClient::CookieManager) +begin # for catching LoadError and load webagent-cookie instead + +require 'http-cookie' +require 'httpclient/util' + +class HTTPClient + class CookieManager + include HTTPClient::Util + + attr_reader :format, :jar + attr_accessor :cookies_file + + def initialize(cookies_file = nil, format = WebAgentSaver, jar = HTTP::CookieJar.new) + @cookies_file = cookies_file + @format = format + @jar = jar + load_cookies if @cookies_file + end + + def load_cookies + check_cookies_file + @jar.clear + @jar.load(@cookies_file, :format => @format) + end + + def save_cookies(session = false) + check_cookies_file + @jar.save(@cookies_file, :format => @format, :session => session) + end + + def cookies(uri = nil) + cookies = @jar.cookies(uri) + # TODO: return HTTP::Cookie in the future + cookies.map { |cookie| + WebAgent::Cookie.new( + :name => cookie.name, + :value => cookie.value, + :domain => cookie.domain, + :path => cookie.path, + :origin => cookie.origin, + :for_domain => cookie.for_domain, + :expires => cookie.expires, + :httponly => cookie.httponly, + :secure => cookie.secure + ) + } + end + + def cookie_value(uri) + cookies = self.cookies(uri) + unless cookies.empty? + HTTP::Cookie.cookie_value(cookies) + end + end + + def parse(value, uri) + @jar.parse(value, uri) + end + + def cookies=(cookies) + @jar.clear + cookies.each do |cookie| + add(cookie) + end + end + + def add(cookie) + @jar.add(cookie) + end + + def find(uri) + warning('CookieManager#find is deprecated and will be removed in near future. Use HTTP::Cookie.cookie_value(CookieManager#cookies) instead') + if cookie = cookies(uri) + HTTP::Cookie.cookie_value(cookie) + end + end + + private + + def check_cookies_file + unless @cookies_file + raise ArgumentError.new('Cookies file not specified') + end + end + end + + class WebAgentSaver < HTTP::CookieJar::AbstractSaver + # no option + def default_options + {} + end + + # same as HTTP::CookieJar::CookiestxtSaver + def save(io, jar) + jar.each { |cookie| + next if !@session && cookie.session? + io.print cookie_to_record(cookie) + } + end + + # same as HTTP::CookieJar::CookiestxtSaver + def load(io, jar) + io.each_line { |line| + cookie = parse_record(line) and jar.add(cookie) + } + end + + private + + def cookie_to_record(cookie) + [ + cookie.origin, + cookie.name, + cookie.value, + cookie.expires.to_i, + cookie.dot_domain, + cookie.path, + self.class.flag(cookie) + ].join("\t") + "\n" + end + + def parse_record(line) + return nil if /\A#/ =~ line + col = line.chomp.split(/\t/) + + origin = col[0] + name = col[1] + value = col[2] + value.chomp! + if col[3].empty? or col[3] == '0' + expires = nil + else + expires = Time.at(col[3].to_i) + return nil if expires < Time.now + end + domain = col[4] + path = col[5] + + cookie = WebAgent::Cookie.new(name, value, + :origin => origin, + :domain => domain, + :path => path, + :expires => expires + ) + self.class.set_flag(cookie, col[6].to_i) + cookie + end + + USE = 1 + SECURE = 2 + DOMAIN = 4 + PATH = 8 + HTTP_ONLY = 64 + + def self.flag(cookie) + flg = 0 + flg += USE # not used + flg += SECURE if cookie.secure? + flg += DOMAIN if cookie.for_domain? + flg += HTTP_ONLY if cookie.httponly? + flg += PATH if cookie.path # not used + flg + end + + def self.set_flag(cookie, flag) + cookie.secure = true if flag & SECURE > 0 + cookie.for_domain = true if flag & DOMAIN > 0 + cookie.httponly = true if flag & HTTP_ONLY > 0 + end + end +end + +# for backward compatibility +class WebAgent + CookieManager = ::HTTPClient::CookieManager + + class Cookie < HTTP::Cookie + include HTTPClient::Util + + def url + deprecated('url', 'origin') + self.origin + end + + def url=(url) + deprecated('url=', 'origin=') + self.origin = url + end + + def http_only? + deprecated('http_only?', 'httponly?') + self.httponly? + end + + alias original_domain domain + + def domain + warning('Cookie#domain returns dot-less domain name now. Use Cookie#dot_domain if you need "." at the beginning.') + self.original_domain + end + + def flag + deprecated('flag', 'secure, for_domain, etc.') + HTTPClient::WebAgentSaver.flag(self) + end + + private + + def deprecated(old, new) + warning("WebAgent::Cookie is deprecated and will be replaced with HTTP::Cookie in the near future. Please use Cookie##{new} instead of Cookie##{old} for the replacement.") + end + end +end + +rescue LoadError + require 'httpclient/webagent-cookie' +end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/http.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/http.rb new file mode 100644 index 0000000..330b3db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/http.rb @@ -0,0 +1,1082 @@ +# -*- encoding: utf-8 -*- + +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'time' +if defined?(Encoding::ASCII_8BIT) + require 'open-uri' # for encoding +end +require 'httpclient/util' + + +# A namespace module for HTTP Message definitions used by HTTPClient. +module HTTP + + + # Represents HTTP response status code. Defines constants for HTTP response + # and some conditional methods. + module Status + OK = 200 + CREATED = 201 + ACCEPTED = 202 + NON_AUTHORITATIVE_INFORMATION = 203 + NO_CONTENT = 204 + RESET_CONTENT = 205 + PARTIAL_CONTENT = 206 + MOVED_PERMANENTLY = 301 + FOUND = 302 + SEE_OTHER = 303 + TEMPORARY_REDIRECT = MOVED_TEMPORARILY = 307 + BAD_REQUEST = 400 + UNAUTHORIZED = 401 + PROXY_AUTHENTICATE_REQUIRED = 407 + INTERNAL = 500 + + # Status codes for successful HTTP response. + SUCCESSFUL_STATUS = [ + OK, CREATED, ACCEPTED, + NON_AUTHORITATIVE_INFORMATION, NO_CONTENT, + RESET_CONTENT, PARTIAL_CONTENT + ] + + # Status codes which is a redirect. + REDIRECT_STATUS = [ + MOVED_PERMANENTLY, FOUND, SEE_OTHER, + TEMPORARY_REDIRECT, MOVED_TEMPORARILY + ] + + # Returns true if the given status represents successful HTTP response. + # See also SUCCESSFUL_STATUS. + def self.successful?(status) + SUCCESSFUL_STATUS.include?(status) + end + + # Returns true if the given status is thought to be redirect. + # See also REDIRECT_STATUS. + def self.redirect?(status) + REDIRECT_STATUS.include?(status) + end + end + + + # Represents a HTTP message. A message is for a request or a response. + # + # Request message is generated from given parameters internally so users + # don't need to care about it. Response message is the instance that + # methods of HTTPClient returns so users need to know how to extract + # HTTP response data from Message. + # + # Some attributes are only for a request or a response, not both. + # + # == How to use HTTP response message + # + # 1. Gets response message body. + # + # res = clnt.get(url) + # p res.content #=> String + # + # 2. Gets response status code. + # + # res = clnt.get(url) + # p res.status #=> 200, 501, etc. (Integer) + # + # 3. Gets response header. + # + # res = clnt.get(url) + # res.header['set-cookie'].each do |value| + # p value + # end + # assert_equal(1, res.header['last-modified'].size) + # p res.header['last-modified'].first + # + class Message + include HTTPClient::Util + + CRLF = "\r\n" + + # Represents HTTP message header. + class Headers + # HTTP version in a HTTP header. String. + attr_accessor :http_version + # Size of body. nil when size is unknown (e.g. chunked response). + attr_reader :body_size + # Request/Response is chunked or not. + attr_accessor :chunked + + # Request only. Requested method. + attr_reader :request_method + # Request only. Requested URI. + attr_accessor :request_uri + # Request only. Requested query. + attr_accessor :request_query + # Request only. Requested via proxy or not. + attr_accessor :request_absolute_uri + + # Response only. HTTP status + attr_reader :status_code + # Response only. HTTP status reason phrase. + attr_accessor :reason_phrase + + # Used for dumping response. + attr_accessor :body_type # :nodoc: + # Used for dumping response. + attr_accessor :body_charset # :nodoc: + # Used for dumping response. + attr_accessor :body_date # :nodoc: + # Used for keeping content encoding. + attr_reader :body_encoding # :nodoc: + + # HTTP response status code to reason phrase mapping definition. + STATUS_CODE_MAP = { + Status::OK => 'OK', + Status::CREATED => "Created", + Status::NON_AUTHORITATIVE_INFORMATION => "Non-Authoritative Information", + Status::NO_CONTENT => "No Content", + Status::RESET_CONTENT => "Reset Content", + Status::PARTIAL_CONTENT => "Partial Content", + Status::MOVED_PERMANENTLY => 'Moved Permanently', + Status::FOUND => 'Found', + Status::SEE_OTHER => 'See Other', + Status::TEMPORARY_REDIRECT => 'Temporary Redirect', + Status::MOVED_TEMPORARILY => 'Temporary Redirect', + Status::BAD_REQUEST => 'Bad Request', + Status::INTERNAL => 'Internal Server Error', + } + + # $KCODE to charset mapping definition. + CHARSET_MAP = { + 'NONE' => 'us-ascii', + 'EUC' => 'euc-jp', + 'SJIS' => 'shift_jis', + 'UTF8' => 'utf-8', + } + + # Creates a Message::Headers. Use init_request, init_response, or + # init_connect_request for acutual initialize. + def initialize + @http_version = '1.1' + @body_size = nil + @chunked = false + + @request_method = nil + @request_uri = nil + @request_query = nil + @request_absolute_uri = nil + + @status_code = nil + @reason_phrase = nil + + @body_type = nil + @body_charset = nil + @body_date = nil + @body_encoding = nil + + @is_request = nil + @header_item = [] + @dumped = false + end + + # Initialize this instance as a CONNECT request. + def init_connect_request(uri) + @is_request = true + @request_method = 'CONNECT' + @request_uri = uri + @request_query = nil + @http_version = '1.0' + end + + # Placeholder URI object for nil uri. + NIL_URI = HTTPClient::Util.urify('http://nil-uri-given/') + # Initialize this instance as a general request. + def init_request(method, uri, query = nil) + @is_request = true + @request_method = method + @request_uri = uri || NIL_URI + @request_query = query + @request_absolute_uri = false + self + end + + # Initialize this instance as a response. + def init_response(status_code, req = nil) + @is_request = false + self.status_code = status_code + if req + @request_method = req.request_method + @request_uri = req.request_uri + @request_query = req.request_query + end + self + end + + # Sets status code and reason phrase. + def status_code=(status_code) + @status_code = status_code + @reason_phrase = STATUS_CODE_MAP[@status_code] + end + + # Returns 'Content-Type' header value. + def content_type + self['Content-Type'][0] + end + + # Sets 'Content-Type' header value. Overrides if already exists. + def content_type=(content_type) + delete('Content-Type') + self['Content-Type'] = content_type + end + + alias contenttype content_type + alias contenttype= content_type= + + if defined?(Encoding::ASCII_8BIT) + def set_body_encoding + if type = self.content_type + OpenURI::Meta.init(o = '') + o.meta_add_field('content-type', type) + @body_encoding = o.encoding + end + end + else + def set_body_encoding + @body_encoding = nil + end + end + + # Sets byte size of message body. + # body_size == nil means that the body is_a? IO + def body_size=(body_size) + @body_size = body_size + end + + # Dumps message header part and returns a dumped String. + def dump + set_header + str = nil + if @is_request + str = request_line + else + str = response_status_line + end + str + @header_item.collect { |key, value| + "#{ key }: #{ value }#{ CRLF }" + }.join + end + + # Set Date header + def set_date_header + set('Date', Time.now.httpdate) + end + + # Adds a header. Addition order is preserved. + def add(key, value) + if value.is_a?(Array) + value.each do |v| + @header_item.push([key, v]) + end + else + @header_item.push([key, value]) + end + end + + # Sets a header. + def set(key, value) + delete(key) + add(key, value) + end + + # Returns an Array of headers for the given key. Each element is a pair + # of key and value. It returns an single element Array even if the only + # one header exists. If nil key given, it returns all headers. + def get(key = nil) + if key.nil? + all + else + key = key.upcase + @header_item.find_all { |k, v| k.upcase == key } + end + end + + # Returns an Array of all headers. + def all + @header_item + end + + # Deletes headers of the given key. + def delete(key) + key = key.upcase + @header_item.delete_if { |k, v| k.upcase == key } + end + + # Adds a header. See set. + def []=(key, value) + set(key, value) + end + + # Returns an Array of header values for the given key. + def [](key) + get(key).collect { |item| item[1] } + end + + def set_headers(headers) + headers.each do |key, value| + add(key, value) + end + set_body_encoding + end + + def create_query_uri() + if @request_method == 'CONNECT' + return "#{@request_uri.host}:#{@request_uri.port}" + end + path = @request_uri.path + path = '/' if path.nil? or path.empty? + if query_str = create_query_part() + path += "?#{query_str}" + end + path + end + + def create_query_part() + query_str = nil + if @request_uri.query + query_str = @request_uri.query + end + if @request_query + if query_str + query_str += "&#{Message.create_query_part_str(@request_query)}" + else + query_str = Message.create_query_part_str(@request_query) + end + end + query_str + end + + private + + def request_line + path = create_query_uri() + if @request_absolute_uri + path = "#{ @request_uri.scheme }://#{ @request_uri.host }:#{ @request_uri.port }#{ path }" + end + "#{ @request_method } #{ path } HTTP/#{ @http_version }#{ CRLF }" + end + + def response_status_line + if defined?(Apache) + "HTTP/#{ @http_version } #{ @status_code } #{ @reason_phrase }#{ CRLF }" + else + "Status: #{ @status_code } #{ @reason_phrase }#{ CRLF }" + end + end + + def set_header + if @is_request + set_request_header + else + set_response_header + end + end + + def set_request_header + return if @dumped + @dumped = true + keep_alive = Message.keep_alive_enabled?(@http_version) + if !keep_alive and @request_method != 'CONNECT' + set('Connection', 'close') + end + if @chunked + set('Transfer-Encoding', 'chunked') + elsif @body_size and (keep_alive or @body_size != 0) + set('Content-Length', @body_size.to_s) + end + if @http_version >= '1.1' and get('Host').empty? + if @request_uri.port == @request_uri.default_port + # GFE/1.3 dislikes default port number (returns 404) + set('Host', "#{@request_uri.hostname}") + else + set('Host', "#{@request_uri.hostname}:#{@request_uri.port}") + end + end + end + + def set_response_header + return if @dumped + @dumped = true + if defined?(Apache) && self['Date'].empty? + set_date_header + end + keep_alive = Message.keep_alive_enabled?(@http_version) + if @chunked + set('Transfer-Encoding', 'chunked') + else + if keep_alive or @body_size != 0 + set('Content-Length', @body_size.to_s) + end + end + if @body_date + set('Last-Modified', @body_date.httpdate) + end + if self['Content-Type'].empty? + set('Content-Type', "#{ @body_type || 'text/html' }; charset=#{ charset_label }") + end + end + + def charset_label + # TODO: should handle response encoding for 1.9 correctly. + if RUBY_VERSION > "1.9" + CHARSET_MAP[@body_charset] || 'us-ascii' + else + CHARSET_MAP[@body_charset || $KCODE] || 'us-ascii' + end + end + end + + + # Represents HTTP message body. + class Body + # Size of body. nil when size is unknown (e.g. chunked response). + attr_reader :size + # maxbytes of IO#read for streaming request. See DEFAULT_CHUNK_SIZE. + attr_accessor :chunk_size + # Hash that keeps IO positions + attr_accessor :positions + + # Default value for chunk_size + DEFAULT_CHUNK_SIZE = 1024 * 16 + + # Creates a Message::Body. Use init_request or init_response + # for acutual initialize. + def initialize + @body = nil + @size = nil + @positions = nil + @chunk_size = nil + end + + # Initialize this instance as a request. + def init_request(body = nil, boundary = nil) + @boundary = boundary + @positions = {} + set_content(body, boundary) + @chunk_size = DEFAULT_CHUNK_SIZE + self + end + + # Initialize this instance as a response. + def init_response(body = nil) + @body = body + if @body.respond_to?(:bytesize) + @size = @body.bytesize + elsif @body.respond_to?(:size) + @size = @body.size + else + @size = nil + end + self + end + + # Dumps message body to given dev. + # dev needs to respond to <<. + # + # Message header must be given as the first argument for performance + # reason. (header is dumped to dev, too) + # If no dev (the second argument) given, this method returns a dumped + # String. + # + # assert: @size is not nil + def dump(header = '', dev = '') + if @body.is_a?(Parts) + dev << header + @body.parts.each do |part| + if Message.file?(part) + reset_pos(part) + dump_file(part, dev, @body.sizes[part]) + else + dev << part + end + end + elsif Message.file?(@body) + dev << header + reset_pos(@body) + dump_file(@body, dev, @size) + elsif @body + dev << header + @body + else + dev << header + end + dev + end + + # Dumps message body with chunked encoding to given dev. + # dev needs to respond to <<. + # + # Message header must be given as the first argument for performance + # reason. (header is dumped to dev, too) + # If no dev (the second argument) given, this method returns a dumped + # String. + def dump_chunked(header = '', dev = '') + dev << header + if @body.is_a?(Parts) + @body.parts.each do |part| + if Message.file?(part) + reset_pos(part) + dump_chunks(part, dev) + else + dev << dump_chunk(part) + end + end + dev << (dump_last_chunk + CRLF) + elsif @body + reset_pos(@body) + dump_chunks(@body, dev) + dev << (dump_last_chunk + CRLF) + end + dev + end + + # Returns a message body itself. + def content + @body + end + + private + + def set_content(body, boundary = nil) + if Message.file?(body) + # uses Transfer-Encoding: chunked if body does not respond to :size. + # bear in mind that server may not support it. at least ruby's CGI doesn't. + @body = body + remember_pos(@body) + @size = body.respond_to?(:size) ? body.size - body.pos : nil + elsif boundary and Message.multiparam_query?(body) + @body = build_query_multipart_str(body, boundary) + @size = @body.size + else + @body = Message.create_query_part_str(body) + @size = @body.bytesize + end + end + + def remember_pos(io) + # IO may not support it (ex. IO.pipe) + @positions[io] = io.pos if io.respond_to?(:pos) + end + + def reset_pos(io) + io.pos = @positions[io] if @positions.key?(io) + end + + def dump_file(io, dev, sz) + buf = '' + rest = sz + while rest > 0 + n = io.read([rest, @chunk_size].min, buf) + raise ArgumentError.new("Illegal size value: #size returns #{sz} but cannot read") if n.nil? + dev << buf + rest -= n.bytesize + end + end + + def dump_chunks(io, dev) + buf = '' + while !io.read(@chunk_size, buf).nil? + dev << dump_chunk(buf) + end + end + + def dump_chunk(str) + dump_chunk_size(str.bytesize) + (str + CRLF) + end + + def dump_last_chunk + dump_chunk_size(0) + end + + def dump_chunk_size(size) + sprintf("%x", size) + CRLF + end + + class Parts + attr_reader :size + attr_reader :sizes + + def initialize + @body = [] + @sizes = {} + @size = 0 # total + @as_stream = false + end + + def add(part) + if Message.file?(part) + @as_stream = true + @body << part + if part.respond_to?(:lstat) + sz = part.lstat.size + add_size(part, sz) + elsif part.respond_to?(:size) + if sz = part.size + add_size(part, sz) + else + @sizes.clear + @size = nil + end + else + # use chunked upload + @sizes.clear + @size = nil + end + elsif @body[-1].is_a?(String) + @body[-1] += part.to_s + @size += part.to_s.bytesize if @size + else + @body << part.to_s + @size += part.to_s.bytesize if @size + end + end + + def parts + if @as_stream + @body + else + [@body.join] + end + end + + private + + def add_size(part, sz) + if @size + @sizes[part] = sz + @size += sz + end + end + end + + def build_query_multipart_str(query, boundary) + parts = Parts.new + query.each do |attr, value| + headers = ["--#{boundary}"] + if Message.file?(value) + remember_pos(value) + param_str = params_from_file(value).collect { |k, v| + "#{k}=\"#{v}\"" + }.join("; ") + if value.respond_to?(:mime_type) + content_type = value.mime_type + elsif value.respond_to?(:content_type) + content_type = value.content_type + else + path = value.respond_to?(:path) ? value.path : nil + content_type = Message.mime_type(path) + end + headers << %{Content-Disposition: form-data; name="#{attr}"; #{param_str}} + headers << %{Content-Type: #{content_type}} + elsif attr.is_a?(Hash) + h = attr + value = h[:content] + h.each do |h_key, h_val| + headers << %{#{h_key}: #{h_val}} if h_key != :content + end + remember_pos(value) if Message.file?(value) + else + headers << %{Content-Disposition: form-data; name="#{attr}"} + value = value.to_s + end + parts.add(headers.join(CRLF) + CRLF + CRLF) + parts.add(value) + parts.add(CRLF) + end + parts.add("--#{boundary}--" + CRLF + CRLF) # empty epilogue + parts + end + + def params_from_file(value) + params = {} + original_filename = value.respond_to?(:original_filename) ? value.original_filename : nil + path = value.respond_to?(:path) ? value.path : nil + params['filename'] = original_filename || File.basename(path || '') + # Creation time is not available from File::Stat + if value.respond_to?(:mtime) + params['modification-date'] = value.mtime.rfc822 + end + if value.respond_to?(:atime) + params['read-date'] = value.atime.rfc822 + end + params + end + end + + + class << self + private :new + + # Creates a Message instance of 'CONNECT' request. + # 'CONNECT' request does not have Body. + # uri:: an URI that need to connect. Only uri.host and uri.port are used. + def new_connect_request(uri) + m = new + m.http_header.init_connect_request(uri) + m.http_header.body_size = nil + m + end + + # Creates a Message instance of general request. + # method:: HTTP method String. + # uri:: an URI object which represents an URL of web resource. + # query:: a Hash or an Array of query part of URL. + # e.g. { "a" => "b" } => 'http://host/part?a=b' + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'http://host/part?a=b&a=c' + # body:: a Hash or an Array of body part. + # e.g. { "a" => "b" } => 'a=b'. + # Give an array to pass multiple value like + # [["a", "b"], ["a", "c"]] => 'a=b&a=c'. + # boundary:: When the boundary given, it is sent as + # a multipart/form-data using this boundary String. + def new_request(method, uri, query = nil, body = nil, boundary = nil) + m = new + m.http_header.init_request(method, uri, query) + m.http_body = Body.new + m.http_body.init_request(body || '', boundary) + if body + m.http_header.body_size = m.http_body.size + m.http_header.chunked = true if m.http_body.size.nil? + else + m.http_header.body_size = nil + end + m + end + + # Creates a Message instance of response. + # body:: a String or an IO of response message body. + def new_response(body, req = nil) + m = new + m.http_header.init_response(Status::OK, req) + m.http_body = Body.new + m.http_body.init_response(body) + m.http_header.body_size = m.http_body.size || 0 + m + end + + @@mime_type_handler = nil + + # Sets MIME type handler. + # + # handler must respond to :call with a single argument :path and returns + # a MIME type String e.g. 'text/html'. + # When the handler returns nil or an empty String, + # 'application/octet-stream' is used. + # + # When you set nil to the handler, internal_mime_type is used instead. + # The handler is nil by default. + def mime_type_handler=(handler) + @@mime_type_handler = handler + end + + # Returns MIME type handler. + def mime_type_handler + @@mime_type_handler + end + + # For backward compatibility. + alias set_mime_type_func mime_type_handler= + alias get_mime_type_func mime_type_handler + + def mime_type(path) # :nodoc: + if @@mime_type_handler + res = @@mime_type_handler.call(path) + if !res || res.to_s == '' + return 'application/octet-stream' + else + return res + end + else + internal_mime_type(path) + end + end + + # Default MIME type handler. + # See mime_type_handler=. + def internal_mime_type(path) + case path + when /\.txt$/i + 'text/plain' + when /\.xml$/i + 'text/xml' + when /\.(htm|html)$/i + 'text/html' + when /\.doc$/i + 'application/msword' + when /\.png$/i + 'image/png' + when /\.gif$/i + 'image/gif' + when /\.(jpg|jpeg)$/i + 'image/jpeg' + else + 'application/octet-stream' + end + end + + # Returns true if the given HTTP version allows keep alive connection. + # version:: String + def keep_alive_enabled?(version) + version >= '1.1' + end + + # Returns true if the given query (or body) has a multiple parameter. + def multiparam_query?(query) + query.is_a?(Array) or query.is_a?(Hash) + end + + # Returns true if the given object is a File. In HTTPClient, a file is; + # * must respond to :read for retrieving String chunks. + # * must respond to :pos and :pos= to rewind for reading. + # Rewinding is only needed for following HTTP redirect. Some IO impl + # defines :pos= but raises an Exception for pos= such as StringIO + # but there's no problem as far as using it for non-following methods + # (get/post/etc.) + def file?(obj) + obj.respond_to?(:read) and obj.respond_to?(:pos) and + obj.respond_to?(:pos=) + end + + def create_query_part_str(query) # :nodoc: + if multiparam_query?(query) + escape_query(query) + elsif query.respond_to?(:read) + query = query.read + else + query.to_s + end + end + + def Array.try_convert(value) + return value if value.instance_of?(Array) + return nil if !value.respond_to?(:to_ary) + converted = value.to_ary + return converted if converted.instance_of?(Array) + + cname = value.class.name + raise TypeError, "can't convert %s to %s (%s#%s gives %s)" % + [cname, Array.name, cname, :to_ary, converted.class.name] + end unless Array.respond_to?(:try_convert) + + def escape_query(query) # :nodoc: + pairs = [] + query.each { |attr, value| + left = escape(attr.to_s) << '=' + if values = Array.try_convert(value) + values.each { |v| + if v.respond_to?(:read) + v = v.read + end + pairs.push(left + escape(v.to_s)) + } + else + if value.respond_to?(:read) + value = value.read + end + pairs.push(left << escape(value.to_s)) + end + } + pairs.join('&') + end + + # from CGI.escape + if defined?(Encoding::ASCII_8BIT) + def escape(str) # :nodoc: + str.dup.force_encoding(Encoding::ASCII_8BIT).gsub(/([^ a-zA-Z0-9_.-]+)/) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + }.tr(' ', '+') + end + else + def escape(str) # :nodoc: + str.gsub(/([^ a-zA-Z0-9_.-]+)/n) { + '%' + $1.unpack('H2' * $1.bytesize).join('%').upcase + }.tr(' ', '+') + end + end + + # from CGI.parse + def parse(query) + params = Hash.new([].freeze) + query.split(/[&;]/n).each do |pairs| + key, value = pairs.split('=',2).collect{|v| unescape(v) } + if params.has_key?(key) + params[key].push(value) + else + params[key] = [value] + end + end + params + end + + # from CGI.unescape + def unescape(string) + string.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n) do + [$1.delete('%')].pack('H*') + end + end + end + + + # HTTP::Message::Headers:: message header. + attr_accessor :http_header + + # HTTP::Message::Body:: message body. + attr_reader :http_body + + # OpenSSL::X509::Certificate:: response only. server certificate which is + # used for retrieving the response. + attr_accessor :peer_cert + + # The other Message object when this Message is generated instead of + # the Message because of redirection, negotiation, or format conversion. + attr_accessor :previous + + # Creates a Message. This method should be used internally. + # Use Message.new_connect_request, Message.new_request or + # Message.new_response instead. + def initialize # :nodoc: + @http_header = Headers.new + @http_body = @peer_cert = nil + @previous = nil + end + + # Dumps message (header and body) to given dev. + # dev needs to respond to <<. + def dump(dev = '') + str = @http_header.dump + CRLF + if @http_header.chunked + dev = @http_body.dump_chunked(str, dev) + elsif @http_body + dev = @http_body.dump(str, dev) + else + dev << str + end + dev + end + + # Sets a new body. header.body_size is updated with new body.size. + def http_body=(body) + @http_body = body + @http_header.body_size = @http_body.size if @http_header + end + alias body= http_body= + + # Returns HTTP version in a HTTP header. String. + def http_version + @http_header.http_version + end + + # Sets HTTP version in a HTTP header. String. + def http_version=(http_version) + @http_header.http_version = http_version + end + + VERSION_WARNING = 'Message#version (Float) is deprecated. Use Message#http_version (String) instead.' + def version + warning(VERSION_WARNING) + @http_header.http_version.to_f + end + + def version=(version) + warning(VERSION_WARNING) + @http_header.http_version = version + end + + # Returns HTTP status code in response. Integer. + def status + @http_header.status_code + end + + alias code status + alias status_code status + + # Sets HTTP status code of response. Integer. + # Reason phrase is updated, too. + def status=(status) + @http_header.status_code = status + end + + # Returns HTTP status reason phrase in response. String. + def reason + @http_header.reason_phrase + end + + # Sets HTTP status reason phrase of response. String. + def reason=(reason) + @http_header.reason_phrase = reason + end + + # Returns 'Content-Type' header value. + def content_type + @http_header.content_type + end + + # Sets 'Content-Type' header value. Overrides if already exists. + def content_type=(content_type) + @http_header.content_type = content_type + end + alias contenttype content_type + alias contenttype= content_type= + + # Returns content encoding + def body_encoding + @http_header.body_encoding + end + + # Returns a content of message body. A String or an IO. + def content + @http_body.content + end + + alias header http_header + alias body content + + # Returns Hash of header. key and value are both String. Each key has a + # single value so you can't extract exact value when a message has multiple + # headers like 'Set-Cookie'. Use header['Set-Cookie'] for that purpose. + # (It returns an Array always) + def headers + Hash[*http_header.all.flatten] + end + + # Extracts cookies from 'Set-Cookie' header. + # Supports 'Set-Cookie' in response header only. + # Do we need 'Cookie' support in request header? + def cookies + set_cookies = http_header['set-cookie'] + unless set_cookies.empty? + uri = http_header.request_uri + set_cookies.map { |str| + WebAgent::Cookie.parse(str, uri) + }.flatten + end + end + + # Convenience method to return boolean of whether we had a successful request + def ok? + HTTP::Status.successful?(status) + end + + def redirect? + HTTP::Status.redirect?(status) + end + + # SEE_OTHER is a redirect, but it should sent as GET + def see_other? + status == HTTP::Status::SEE_OTHER + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb new file mode 100644 index 0000000..e6aaf47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/include_client.rb @@ -0,0 +1,85 @@ +# It is useful to re-use a HTTPClient instance for multiple requests, to +# re-use HTTP 1.1 persistent connections. +# +# To do that, you sometimes want to store an HTTPClient instance in a global/ +# class variable location, so it can be accessed and re-used. +# +# This mix-in makes it easy to create class-level access to one or more +# HTTPClient instances. The HTTPClient instances are lazily initialized +# on first use (to, for instance, avoid interfering with WebMock/VCR), +# and are initialized in a thread-safe manner. Note that a +# HTTPClient, once initialized, is safe for use in multiple threads. +# +# Note that you `extend` HTTPClient::IncludeClient, not `include. +# +# require 'httpclient/include_client' +# class Widget +# extend HTTPClient::IncludeClient +# +# include_http_client +# # and/or, specify more stuff +# include_http_client('http://myproxy:8080', :method_name => :my_client) do |client| +# # any init you want +# client.set_cookie_store nil +# client. +# end +# end +# +# That creates two HTTPClient instances available at the class level. +# The first will be available from Widget.http_client (default method +# name for `include_http_client`), with default initialization. +# +# The second will be available at Widget.my_client, with the init arguments +# provided, further initialized by the block provided. +# +# In addition to a class-level method, for convenience instance-level methods +# are also provided. Widget.http_client is identical to Widget.new.http_client +# +# +require 'httpclient' + +class HTTPClient + module IncludeClient + + + def include_http_client(*args, &block) + # We're going to dynamically define a class + # to hold our state, namespaced, as well as possibly dynamic + # name of cover method. + method_name = (args.last.delete(:method_name) if args.last.kind_of? Hash) || :http_client + args.pop if args.last == {} # if last arg was named methods now empty, remove it. + + # By the amazingness of closures, we can create these things + # in local vars here and use em in our method, we don't even + # need iVars for state. + client_instance = nil + client_mutex = Mutex.new + client_args = args + client_block = block + + # to define a _class method_ on the specific class that's currently + # `self`, we have to use this bit of metaprogramming, sorry. + (class << self; self ; end).instance_eval do + define_method(method_name) do + # implementation copied from ruby stdlib singleton + # to create this global obj thread-safely. + return client_instance if client_instance + client_mutex.synchronize do + return client_instance if client_instance + # init HTTPClient with specified args/block + client_instance = HTTPClient.new(*client_args) + client_block.call(client_instance) if client_block + end + return client_instance + end + end + + # And for convenience, an _instance method_ on the class that just + # delegates to the class method. + define_method(method_name) do + self.class.send(method_name) + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb new file mode 100644 index 0000000..046b498 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/jruby_ssl_socket.rb @@ -0,0 +1,588 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'java' +require 'httpclient/ssl_config' + + +class HTTPClient + +unless defined?(SSLSocket) + + class JavaSocketWrap + java_import 'java.net.InetSocketAddress' + java_import 'java.io.BufferedInputStream' + + BUF_SIZE = 1024 * 16 + + def self.connect(socket, site, opts = {}) + socket_addr = InetSocketAddress.new(site.host, site.port) + if opts[:connect_timeout] + socket.connect(socket_addr, opts[:connect_timeout]) + else + socket.connect(socket_addr) + end + socket.setSoTimeout(opts[:so_timeout]) if opts[:so_timeout] + socket.setKeepAlive(true) if opts[:tcp_keepalive] + socket + end + + def initialize(socket, debug_dev = nil) + @socket = socket + @debug_dev = debug_dev + @outstr = @socket.getOutputStream + @instr = BufferedInputStream.new(@socket.getInputStream) + @buf = (' ' * BUF_SIZE).to_java_bytes + @bufstr = '' + end + + def close + @socket.close + end + + def closed? + @socket.isClosed + end + + def eof? + @socket.isClosed + end + + def gets(rs) + while (size = @bufstr.index(rs)).nil? + if fill() == -1 + size = @bufstr.size + break + end + end + str = @bufstr.slice!(0, size + rs.size) + debug(str) + str + end + + def read(size, buf = nil) + while @bufstr.size < size + if fill() == -1 + break + end + end + str = @bufstr.slice!(0, size) + debug(str) + if buf + buf.replace(str) + else + str + end + end + + def readpartial(size, buf = nil) + while @bufstr.size == 0 + if fill() == -1 + raise EOFError.new('end of file reached') + end + end + str = @bufstr.slice!(0, size) + debug(str) + if buf + buf.replace(str) + else + str + end + end + + def <<(str) + rv = @outstr.write(str.to_java_bytes) + debug(str) + rv + end + + def flush + @socket.flush + end + + def sync + true + end + + def sync=(sync) + unless sync + raise "sync = false is not supported. This option was introduced for backward compatibility just in case." + end + end + + private + + def fill + begin + size = @instr.read(@buf) + if size > 0 + @bufstr << String.from_java_bytes(@buf, Encoding::BINARY)[0, size] + end + size + rescue java.io.IOException => e + raise OpenSSL::SSL::SSLError.new("#{e.class}: #{e.getMessage}") + end + end + + def debug(str) + @debug_dev << str if @debug_dev && str + end + end + + class JRubySSLSocket < JavaSocketWrap + java_import 'java.io.ByteArrayInputStream' + java_import 'java.io.InputStreamReader' + java_import 'java.net.Socket' + java_import 'java.security.KeyStore' + java_import 'java.security.cert.Certificate' + java_import 'java.security.cert.CertificateFactory' + java_import 'javax.net.ssl.KeyManagerFactory' + java_import 'javax.net.ssl.SSLContext' + java_import 'javax.net.ssl.SSLSocketFactory' + java_import 'javax.net.ssl.TrustManager' + java_import 'javax.net.ssl.TrustManagerFactory' + java_import 'javax.net.ssl.X509TrustManager' + java_import 'org.jruby.ext.openssl.x509store.PEMInputOutput' + + class JavaCertificate + attr_reader :cert + + def initialize(cert) + @cert = cert + end + + def subject + @cert.getSubjectDN + end + + def to_text + @cert.toString + end + + def to_pem + '(not in PEM format)' + end + end + + class SSLStoreContext + attr_reader :current_cert, :chain, :error_depth, :error, :error_string + + def initialize(current_cert, chain, error_depth, error, error_string) + @current_cert, @chain, @error_depth, @error, @error_string = + current_cert, chain, error_depth, error, error_string + end + end + + class JSSEVerifyCallback + def initialize(verify_callback) + @verify_callback = verify_callback + end + + def call(is_ok, chain, error_depth = -1, error = -1, error_string = '(unknown)') + if @verify_callback + ruby_chain = chain.map { |cert| + JavaCertificate.new(cert) + }.reverse + # NOTE: The order depends on provider implementation + ruby_chain.each do |cert| + is_ok = @verify_callback.call( + is_ok, + SSLStoreContext.new(cert, ruby_chain, error_depth, error, error_string) + ) + end + end + is_ok + end + end + + class VerifyNoneTrustManagerFactory + class VerifyNoneTrustManager + include X509TrustManager + + def initialize(verify_callback) + @verify_callback = JSSEVerifyCallback.new(verify_callback) + end + + def checkServerTrusted(chain, authType) + @verify_callback.call(true, chain) + end + + def checkClientTrusted(chain, authType); end + def getAcceptedIssuers; end + end + + def initialize(verify_callback = nil) + @verify_callback = verify_callback + end + + def init(trustStore) + @managers = [VerifyNoneTrustManager.new(@verify_callback)].to_java(X509TrustManager) + end + + def getTrustManagers + @managers + end + end + + class SystemTrustManagerFactory + class SystemTrustManager + include X509TrustManager + + def initialize(original, verify_callback) + @original = original + @verify_callback = JSSEVerifyCallback.new(verify_callback) + end + + def checkServerTrusted(chain, authType) + is_ok = false + excn = nil + # TODO can we detect the depth from excn? + error_depth = -1 + error = nil + error_message = nil + begin + @original.checkServerTrusted(chain, authType) + is_ok = true + rescue java.security.cert.CertificateException => excn + is_ok = false + error = excn.class.name + error_message = excn.getMessage + end + is_ok = @verify_callback.call(is_ok, chain, error_depth, error, error_message) + unless is_ok + excn ||= OpenSSL::SSL::SSLError.new('verifycallback failed') + raise excn + end + end + + def checkClientTrusted(chain, authType); end + def getAcceptedIssuers; end + end + + def initialize(verify_callback = nil) + @verify_callback = verify_callback + end + + def init(trust_store) + tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) + tmf.java_method(:init, [KeyStore]).call(trust_store) + @original = tmf.getTrustManagers.find { |tm| + tm.is_a?(X509TrustManager) + } + @managers = [SystemTrustManager.new(@original, @verify_callback)].to_java(X509TrustManager) + end + + def getTrustManagers + @managers + end + end + + module PEMUtils + def self.read_certificate(pem) + cert = pem.sub(/.*?-----BEGIN CERTIFICATE-----/m, '').sub(/-----END CERTIFICATE-----.*?/m, '') + der = cert.unpack('m*').first + cf = CertificateFactory.getInstance('X.509') + cf.generateCertificate(ByteArrayInputStream.new(der.to_java_bytes)) + end + + def self.read_private_key(pem, password) + if password + password = password.unpack('C*').to_java(:char) + end + PEMInputOutput.read_private_key(InputStreamReader.new(ByteArrayInputStream.new(pem.to_java_bytes)), password) + end + end + + class KeyStoreLoader + PASSWORD = 16.times.map { rand(256) }.to_java(:char) + + def initialize + @keystore = KeyStore.getInstance('JKS') + @keystore.load(nil) + end + + def add(cert_source, key_source, password) + cert_str = cert_source.respond_to?(:to_pem) ? cert_source.to_pem : File.read(cert_source.to_s) + cert = PEMUtils.read_certificate(cert_str) + @keystore.setCertificateEntry('client_cert', cert) + key_str = key_source.respond_to?(:to_pem) ? key_source.to_pem : File.read(key_source.to_s) + key_pair = PEMUtils.read_private_key(key_str, password) + @keystore.setKeyEntry('client_key', key_pair.getPrivate, PASSWORD, [cert].to_java(Certificate)) + end + + def keystore + @keystore + end + end + + class TrustStoreLoader + attr_reader :size + + def initialize + @trust_store = KeyStore.getInstance('JKS') + @trust_store.load(nil) + @size = 0 + end + + def add(cert_source) + return if cert_source == :default + if cert_source.respond_to?(:to_pem) + pem = cert_source.to_pem + load_pem(pem) + elsif File.directory?(cert_source) + warn("#{cert_source}: directory not yet supported") + return + else + pem = nil + File.read(cert_source).each_line do |line| + case line + when /-----BEGIN CERTIFICATE-----/ + pem = '' + when /-----END CERTIFICATE-----/ + load_pem(pem) + # keep parsing in case where multiple certificates in a file + else + if pem + pem << line + end + end + end + end + end + + def trust_store + if @size == 0 + nil + else + @trust_store + end + end + + private + + def load_pem(pem) + cert = PEMUtils.read_certificate(pem) + @size += 1 + @trust_store.setCertificateEntry("cert_#{@size}", cert) + end + end + + # Ported from commons-httpclient 'BrowserCompatHostnameVerifier' + class BrowserCompatHostnameVerifier + BAD_COUNTRY_2LDS = %w(ac co com ed edu go gouv gov info lg ne net or org).sort + require 'ipaddr' + + def extract_sans(cert, subject_type) + sans = cert.getSubjectAlternativeNames rescue nil + if sans.nil? + return nil + end + sans.find_all { |san| + san.first.to_i == subject_type + }.map { |san| + san[1] + } + end + + def extract_cn(cert) + subject = cert.getSubjectX500Principal() + if subject + subject_dn = javax.naming.ldap.LdapName.new(subject.toString) + subject_dn.getRdns.to_a.reverse.each do |rdn| + attributes = rdn.toAttributes + cn = attributes.get('cn') + if cn + if value = cn.get + return value.to_s + end + end + end + end + end + + def ipaddr?(addr) + !(IPAddr.new(addr) rescue nil).nil? + end + + def verify(hostname, cert) + is_ipaddr = ipaddr?(hostname) + sans = extract_sans(cert, is_ipaddr ? 7 : 2) + cn = extract_cn(cert) + if sans + sans.each do |san| + return true if match_identify(hostname, san) + end + raise OpenSSL::SSL::SSLError.new("Certificate for <#{hostname}> doesn't match any of the subject alternative names: #{sans}") + elsif cn + return true if match_identify(hostname, cn) + raise OpenSSL::SSL::SSLError.new("Certificate for <#{hostname}> doesn't match common name of the certificate subject: #{cn}") + end + raise OpenSSL::SSL::SSLError.new("Certificate subject for for <#{hostname}> doesn't contain a common name and does not have alternative names") + end + + def match_identify(hostname, identity) + if hostname.nil? + return false + end + hostname = hostname.downcase + identity = identity.downcase + parts = identity.split('.') + if parts.length >= 3 && parts.first.end_with?('*') && valid_country_wildcard(parts) + create_wildcard_regexp(identity) =~ hostname + else + hostname == identity + end + end + + def create_wildcard_regexp(value) + # Escape first then search '\*' for meta-char interpolation + labels = value.split('.').map { |e| Regexp.escape(e) } + # Handle '*'s only at the left-most label, exclude A-label and U-label + labels[0].gsub!(/\\\*/, '[^.]+') if !labels[0].start_with?('xn\-\-') and labels[0].ascii_only? + /\A#{labels.join('\.')}\z/i + end + + def valid_country_wildcard(parts) + if parts.length != 3 || parts[2].length != 2 + true + else + !BAD_COUNTRY_2LDS.include?(parts[1]) + end + end + end + + def self.create_socket(session) + opts = { + :connect_timeout => session.connect_timeout * 1000, + # send_timeout is ignored in JRuby + :so_timeout => session.receive_timeout * 1000, + :tcp_keepalive => session.tcp_keepalive, + :debug_dev => session.debug_dev + } + socket = nil + begin + if session.proxy + site = session.proxy || session.dest + socket = JavaSocketWrap.connect(Socket.new, site, opts) + session.connect_ssl_proxy(JavaSocketWrap.new(socket), Util.urify(session.dest.to_s)) + end + new(socket, session.dest, session.ssl_config, opts) + rescue + socket.close if socket + raise + end + end + + DEFAULT_SSL_PROTOCOL = (java.lang.System.getProperty('java.specification.version') == '1.7') ? 'TLSv1.2' : 'TLS' + def initialize(socket, dest, config, opts = {}) + @config = config + begin + @ssl_socket = create_ssl_socket(socket, dest, config, opts) + ssl_version = java_ssl_version(config) + @ssl_socket.setEnabledProtocols([ssl_version].to_java(java.lang.String)) if ssl_version != DEFAULT_SSL_PROTOCOL + if config.ciphers != SSLConfig::CIPHERS_DEFAULT + @ssl_socket.setEnabledCipherSuites(config.ciphers.to_java(java.lang.String)) + end + ssl_connect(dest.host) + rescue java.security.GeneralSecurityException => e + raise OpenSSL::SSL::SSLError.new(e.getMessage) + rescue java.io.IOException => e + raise OpenSSL::SSL::SSLError.new("#{e.class}: #{e.getMessage}") + end + + super(@ssl_socket, opts[:debug_dev]) + end + + def java_ssl_version(config) + if config.ssl_version == :auto + DEFAULT_SSL_PROTOCOL + else + config.ssl_version.to_s.tr('_', '.') + end + end + + def create_ssl_context(config) + unless config.cert_store_crl_items.empty? + raise NotImplementedError.new('Manual CRL configuration is not yet supported') + end + + km = nil + if config.client_cert && config.client_key + loader = KeyStoreLoader.new + loader.add(config.client_cert, config.client_key, config.client_key_pass) + kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) + kmf.init(loader.keystore, KeyStoreLoader::PASSWORD) + km = kmf.getKeyManagers + end + + trust_store = nil + verify_callback = config.verify_callback || config.method(:default_verify_callback) + if !config.verify? + tmf = VerifyNoneTrustManagerFactory.new(verify_callback) + else + tmf = SystemTrustManagerFactory.new(verify_callback) + loader = TrustStoreLoader.new + config.cert_store_items.each do |item| + loader.add(item) + end + trust_store = loader.trust_store + end + tmf.init(trust_store) + tm = tmf.getTrustManagers + + ctx = SSLContext.getInstance(java_ssl_version(config)) + ctx.init(km, tm, nil) + if config.timeout + ctx.getClientSessionContext.setSessionTimeout(config.timeout) + end + ctx + end + + def create_ssl_socket(socket, dest, config, opts) + ctx = create_ssl_context(config) + factory = ctx.getSocketFactory + if socket + ssl_socket = factory.createSocket(socket, dest.host, dest.port, true) + else + ssl_socket = factory.createSocket + JavaSocketWrap.connect(ssl_socket, dest, opts) + end + ssl_socket + end + + def peer_cert + @peer_cert + end + + private + + def ssl_connect(hostname) + @ssl_socket.startHandshake + ssl_session = @ssl_socket.getSession + @peer_cert = JavaCertificate.new(ssl_session.getPeerCertificates.first) + if $DEBUG + warn("Protocol version: #{ssl_session.getProtocol}") + warn("Cipher: #{@ssl_socket.getSession.getCipherSuite}") + end + post_connection_check(hostname) + end + + def post_connection_check(hostname) + if !@config.verify? + return + else + BrowserCompatHostnameVerifier.new.verify(hostname, @peer_cert.cert) + end + end + end + + SSLSocket = JRubySSLSocket + +end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/session.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/session.rb new file mode 100644 index 0000000..b3dedd5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/session.rb @@ -0,0 +1,960 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. +# +# httpclient/session.rb is based on http-access.rb in http-access/0.0.4. Some +# part of it is copyrighted by Maebashi-san who made and published +# http-access/0.0.4. http-access/0.0.4 did not include license notice but when +# I asked Maebashi-san he agreed that I can redistribute it under the same terms +# of Ruby. Many thanks to Maebashi-san. + + +require 'socket' +require 'thread' +require 'timeout' +require 'stringio' +require 'zlib' + +require 'httpclient/timeout' # TODO: remove this once we drop 1.8 support +require 'httpclient/ssl_config' +require 'httpclient/http' +if RUBY_ENGINE == 'jruby' + require 'httpclient/jruby_ssl_socket' +else + require 'httpclient/ssl_socket' +end + + +class HTTPClient + + + # Represents a Site: protocol scheme, host String and port Number. + class Site + # Protocol scheme. + attr_accessor :scheme + # Host String. + attr_accessor :host + alias hostname host + # Port number. + attr_accessor :port + + # Creates a new Site based on the given URI. + def initialize(uri = nil) + if uri + @scheme = uri.scheme || 'tcp' + @host = uri.hostname || '0.0.0.0' + @port = uri.port.to_i + else + @scheme = 'tcp' + @host = '0.0.0.0' + @port = 0 + end + end + + # Returns address String. + def addr + "#{@scheme}://#{@host}:#{@port.to_s}" + end + + # Returns true is scheme, host and port are '==' + def ==(rhs) + (@scheme == rhs.scheme) and (@host == rhs.host) and (@port == rhs.port) + end + + # Same as ==. + def eql?(rhs) + self == rhs + end + + def hash # :nodoc: + [@scheme, @host, @port].hash + end + + def to_s # :nodoc: + addr + end + + # Returns true if scheme, host and port of the given URI matches with this. + def match(uri) + (@scheme == uri.scheme) and (@host == uri.host) and (@port == uri.port.to_i) + end + + def inspect # :nodoc: + sprintf("#<%s:0x%x %s>", self.class.name, __id__, addr) + end + + EMPTY = Site.new.freeze + end + + + # Manages sessions for a HTTPClient instance. + class SessionManager + # Name of this client. Used for 'User-Agent' header in HTTP request. + attr_accessor :agent_name + # Owner of this client. Used for 'From' header in HTTP request. + attr_accessor :from + + # Requested protocol version + attr_accessor :protocol_version + # Chunk size for chunked request + attr_accessor :chunk_size + # Device for dumping log for debugging + attr_accessor :debug_dev + # Boolean value for Socket#sync + attr_accessor :socket_sync + # Boolean value to send TCP keepalive packets; no timing settings exist at present + attr_accessor :tcp_keepalive + + attr_accessor :connect_timeout + # Maximum retry count. 0 for infinite. + attr_accessor :connect_retry + attr_accessor :send_timeout + attr_accessor :receive_timeout + attr_accessor :keep_alive_timeout + attr_accessor :read_block_size + attr_accessor :protocol_retry_count + + # Raise BadResponseError if response size does not match with Content-Length header in response. + attr_accessor :strict_response_size_check + + # Local address to bind local side of the socket to + attr_accessor :socket_local + + attr_accessor :ssl_config + + attr_reader :test_loopback_http_response + + attr_accessor :transparent_gzip_decompression + + def initialize(client) + @client = client + @proxy = client.proxy + + @agent_name = nil + @from = nil + + @protocol_version = nil + @debug_dev = client.debug_dev + @socket_sync = true + @tcp_keepalive = false + @chunk_size = ::HTTP::Message::Body::DEFAULT_CHUNK_SIZE + + @connect_timeout = 60 + @connect_retry = 1 + @send_timeout = 120 + @receive_timeout = 60 # For each read_block_size bytes + @keep_alive_timeout = 15 # '15' is from Apache 2 default + @read_block_size = 1024 * 16 # follows net/http change in 1.8.7 + @protocol_retry_count = 5 + + @ssl_config = nil + @test_loopback_http_response = [] + + @transparent_gzip_decompression = false + @strict_response_size_check = false + @socket_local = Site.new + + @sess_pool = {} + @sess_pool_mutex = Mutex.new + @sess_pool_last_checked = Time.now + end + + def proxy=(proxy) + if proxy.nil? + @proxy = nil + else + @proxy = Site.new(proxy) + end + end + + def query(req, via_proxy) + req.http_body.chunk_size = @chunk_size if req.http_body + sess = get_session(req, via_proxy) + begin + sess.query(req) + rescue + sess.close + raise + end + sess + end + + def reset(uri) + site = Site.new(uri) + close(site) + end + + def reset_all + close_all + end + + # assert: sess.last_used must not be nil + def keep(sess) + add_cached_session(sess) + end + + private + + # TODO: create PR for webmock's httpclient adapter to use get_session + # instead of open so that we can remove duplicated Site creation for + # each session. + def get_session(req, via_proxy = false) + uri = req.header.request_uri + if uri.scheme.nil? + raise ArgumentError.new("Request URI must have schema. Possibly add 'http://' to the request URI?") + end + site = Site.new(uri) + if cached = get_cached_session(site) + cached + else + open(uri, via_proxy) + end + end + + def open(uri, via_proxy = false) + site = Site.new(uri) + sess = Session.new(@client, site, @agent_name, @from) + sess.proxy = via_proxy ? @proxy : nil + sess.socket_sync = @socket_sync + sess.tcp_keepalive = @tcp_keepalive + sess.requested_version = @protocol_version if @protocol_version + sess.connect_timeout = @connect_timeout + sess.connect_retry = @connect_retry + sess.send_timeout = @send_timeout + sess.receive_timeout = @receive_timeout + sess.read_block_size = @read_block_size + sess.protocol_retry_count = @protocol_retry_count + sess.ssl_config = @ssl_config + sess.debug_dev = @debug_dev + sess.strict_response_size_check = @strict_response_size_check + sess.socket_local = @socket_local + sess.test_loopback_http_response = @test_loopback_http_response + sess.transparent_gzip_decompression = @transparent_gzip_decompression + sess + end + + def close_all + @sess_pool_mutex.synchronize do + @sess_pool.each do |site, pool| + pool.each do |sess| + sess.close + end + end + end + @sess_pool.clear + end + + # This method might not work as you expected... + def close(dest) + if cached = get_cached_session(Site.new(dest)) + cached.close + true + else + false + end + end + + def get_cached_session(site) + if Thread.current[:HTTPClient_AcquireNewConnection] + return nil + end + @sess_pool_mutex.synchronize do + now = Time.now + if now > @sess_pool_last_checked + @keep_alive_timeout + scrub_cached_session(now) + @sess_pool_last_checked = now + end + if pool = @sess_pool[site] + pool.each_with_index do |sess, idx| + if valid_session?(sess, now) + return pool.slice!(idx) + end + end + end + end + nil + end + + def scrub_cached_session(now) + @sess_pool.each do |site, pool| + pool.replace(pool.select { |sess| + if valid_session?(sess, now) + true + else + sess.close # close & remove from the pool + false + end + }) + end + end + + def valid_session?(sess, now) + (now <= sess.last_used + @keep_alive_timeout) + end + + def add_cached_session(sess) + @sess_pool_mutex.synchronize do + (@sess_pool[sess.dest] ||= []).unshift(sess) + end + end + end + + + # Wraps up a Socket for method interception. + module SocketWrap + def initialize(socket, *args) + super(*args) + @socket = socket + end + + def close + @socket.close + end + + def closed? + @socket.closed? + end + + def eof? + @socket.eof? + end + + def gets(rs) + @socket.gets(rs) + end + + def read(size, buf = nil) + @socket.read(size, buf) + end + + def readpartial(size, buf = nil) + # StringIO doesn't support :readpartial + if @socket.respond_to?(:readpartial) + @socket.readpartial(size, buf) + else + @socket.read(size, buf) + end + end + + def <<(str) + @socket << str + end + + def flush + @socket.flush + end + + def sync + @socket.sync + end + + def sync=(sync) + @socket.sync = sync + end + end + + + # Module for intercepting Socket methods and dumps in/out to given debugging + # device. debug_dev must respond to <<. + module DebugSocket + extend SocketWrap + + def debug_dev=(debug_dev) + @debug_dev = debug_dev + end + + def close + super + debug("! CONNECTION CLOSED\n") + end + + def gets(rs) + str = super + debug(str) + str + end + + def read(size, buf = nil) + str = super + debug(str) + str + end + + def readpartial(size, buf = nil) + str = super + debug(str) + str + end + + def <<(str) + super + debug(str) + end + + private + + def debug(str) + if str && @debug_dev + if str.index("\0") + require 'hexdump' + str.force_encoding('BINARY') if str.respond_to?(:force_encoding) + @debug_dev << HexDump.encode(str).join("\n") + else + @debug_dev << str + end + end + end + end + + + # Dummy Socket for emulating loopback test. + class LoopBackSocket + include SocketWrap + + def initialize(host, port, response) + super(response.is_a?(StringIO) ? response : StringIO.new(response)) + @host = host + @port = port + end + + def <<(str) + # ignored + end + + def peer_cert + nil + end + end + + + # Manages a HTTP session with a Site. + class Session + include HTTPClient::Timeout + include Util + + # Destination site + attr_reader :dest + # Proxy site + attr_accessor :proxy + # Boolean value for Socket#sync + attr_accessor :socket_sync + # Boolean value to send TCP keepalive packets; no timing settings exist at present + attr_accessor :tcp_keepalive + # Requested protocol version + attr_accessor :requested_version + # Device for dumping log for debugging + attr_accessor :debug_dev + + attr_accessor :connect_timeout + attr_accessor :connect_retry + attr_accessor :send_timeout + attr_accessor :receive_timeout + attr_accessor :read_block_size + attr_accessor :protocol_retry_count + + attr_accessor :strict_response_size_check + attr_accessor :socket_local + + attr_accessor :ssl_config + attr_reader :ssl_peer_cert + attr_accessor :test_loopback_http_response + + attr_accessor :transparent_gzip_decompression + attr_reader :last_used + + def initialize(client, dest, agent_name, from) + @client = client + @dest = dest + @proxy = nil + @socket_sync = true + @tcp_keepalive = false + @requested_version = nil + + @debug_dev = nil + + @connect_timeout = nil + @connect_retry = 1 + @send_timeout = nil + @receive_timeout = nil + @read_block_size = nil + @protocol_retry_count = 5 + + @ssl_config = nil + @ssl_peer_cert = nil + + @test_loopback_http_response = nil + @strict_response_size_check = false + @socket_local = Site::EMPTY + + @agent_name = agent_name + @from = from + @state = :INIT + + @requests = [] + + @status = nil + @reason = nil + @headers = [] + + @socket = nil + @readbuf = nil + + @transparent_gzip_decompression = false + @last_used = nil + end + + # Send a request to the server + def query(req) + connect if @state == :INIT + # Use absolute URI (not absolute path) iif via proxy AND not HTTPS. + req.header.request_absolute_uri = !@proxy.nil? && !https?(@dest) + begin + ::Timeout.timeout(@send_timeout, SendTimeoutError) do + set_header(req) + req.dump(@socket) + # flush the IO stream as IO::sync mode is false + @socket.flush unless @socket_sync + end + rescue Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, IOError + # JRuby can raise IOError instead of ECONNRESET for now + close + raise KeepAliveDisconnected.new(self, $!) + rescue HTTPClient::TimeoutError + close + raise + rescue => e + close + if SSLEnabled and e.is_a?(OpenSSL::SSL::SSLError) + raise KeepAliveDisconnected.new(self, e) + else + raise + end + end + + @state = :META if @state == :WAIT + @next_connection = nil + @requests.push(req) + @last_used = Time.now + end + + def close + if !@socket.nil? and !@socket.closed? + # @socket.flush may block when it the socket is already closed by + # foreign host and the client runs under MT-condition. + @socket.close + end + @state = :INIT + end + + def closed? + @state == :INIT + end + + def get_header + begin + if @state != :META + raise RuntimeError.new("get_status must be called at the beginning of a session") + end + read_header + rescue + close + raise + end + [@version, @status, @reason, @headers] + end + + def eof? + if !@content_length.nil? + @content_length == 0 + else + @socket.closed? or @socket.eof? + end + end + + def get_body(&block) + begin + read_header if @state == :META + return nil if @state != :DATA + if @transparent_gzip_decompression + block = content_inflater_block(@content_encoding, block) + end + if @chunked + read_body_chunked(&block) + elsif @content_length + read_body_length(&block) + else + read_body_rest(&block) + end + rescue + close + raise + end + if eof? + if @next_connection + @state = :WAIT + else + close + end + end + nil + end + + def create_socket(host, port) + socket = nil + begin + @debug_dev << "! CONNECT TO #{host}:#{port}\n" if @debug_dev + clean_host = host.delete("[]") + if @socket_local == Site::EMPTY + socket = TCPSocket.new(clean_host, port) + else + clean_local = @socket_local.host.delete("[]") + socket = TCPSocket.new(clean_host, port, clean_local, @socket_local.port) + end + socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if @tcp_keepalive + if @debug_dev + @debug_dev << "! CONNECTION ESTABLISHED\n" + socket.extend(DebugSocket) + socket.debug_dev = @debug_dev + end + rescue SystemCallError => e + raise e.class, e.message + " (#{host}:#{port})" + rescue SocketError => e + raise e.class, e.message + " (#{host}:#{port})" + end + socket + end + + def create_loopback_socket(host, port, str) + @debug_dev << "! CONNECT TO #{host}:#{port}\n" if @debug_dev + socket = LoopBackSocket.new(host, port, str) + if @debug_dev + @debug_dev << "! CONNECTION ESTABLISHED\n" + socket.extend(DebugSocket) + socket.debug_dev = @debug_dev + end + if https?(@dest) && @proxy + connect_ssl_proxy(socket, Util.urify(@dest.to_s)) + end + socket + end + + def connect_ssl_proxy(socket, uri) + req = HTTP::Message.new_connect_request(uri) + @client.request_filter.each do |filter| + filter.filter_request(req) + end + set_header(req) + req.dump(socket) + socket.flush unless @socket_sync + res = HTTP::Message.new_response('') + parse_header(socket) + res.http_version, res.status, res.reason = @version, @status, @reason + @headers.each do |key, value| + res.header.set(key.to_s, value) + end + commands = @client.request_filter.collect { |filter| + filter.filter_response(req, res) + } + if commands.find { |command| command == :retry } + raise RetryableResponse.new(res) + end + unless @status == 200 + raise BadResponseError.new("connect to ssl proxy failed with status #{@status} #{@reason}", res) + end + end + + private + + # This inflater allows deflate compression with/without zlib header + class LenientInflater + def initialize + @inflater = Zlib::Inflate.new(Zlib::MAX_WBITS) + @first = true + end + + def inflate(body) + if @first + first_inflate(body) + else + @inflater.inflate(body) + end + end + + private + + def first_inflate(body) + @first = false + begin + @inflater.inflate(body) + rescue Zlib::DataError + # fallback to deflate without zlib header + @inflater = Zlib::Inflate.new(-Zlib::MAX_WBITS) + @inflater.inflate(body) + end + end + end + + def content_inflater_block(content_encoding, block) + case content_encoding + when 'gzip', 'x-gzip' + # zlib itself has a functionality to decompress gzip stream. + # - zlib 1.2.5 Manual + # http://www.zlib.net/manual.html#Advanced + # > windowBits can also be greater than 15 for optional gzip decoding. Add 32 to + # > windowBits to enable zlib and gzip decoding with automatic header detection, + # > or add 16 to decode only the gzip format + inflate_stream = Zlib::Inflate.new(Zlib::MAX_WBITS + 32) + when 'deflate' + inflate_stream = LenientInflater.new + else + return block + end + Proc.new { |buf| + block.call(inflate_stream.inflate(buf)) + } + end + + def set_header(req) + if @requested_version + if /^(?:HTTP\/|)(\d+.\d+)$/ =~ @requested_version + req.http_version = $1 + end + end + if @agent_name && req.header.get('User-Agent').empty? + req.header.set('User-Agent', "#{@agent_name} #{LIB_NAME}") + end + if @from && req.header.get('From').empty? + req.header.set('From', @from) + end + if req.header.get('Accept').empty? + req.header.set('Accept', '*/*') + end + if @transparent_gzip_decompression + req.header.set('Accept-Encoding', 'gzip,deflate') + end + if req.header.get('Date').empty? + req.header.set_date_header + end + end + + # Connect to the server + def connect + site = @proxy || @dest + retry_number = 0 + begin + ::Timeout.timeout(@connect_timeout, ConnectTimeoutError) do + if str = @test_loopback_http_response.shift + @socket = create_loopback_socket(site.host, site.port, str) + elsif https?(@dest) + @socket = SSLSocket.create_socket(self) + @ssl_peer_cert = @socket.peer_cert + else + @socket = create_socket(site.host, site.port) + end + @socket.sync = @socket_sync + end + rescue RetryableResponse + retry_number += 1 + if retry_number < @protocol_retry_count + retry + end + raise BadResponseError.new("connect to the server failed with status #{@status} #{@reason}") + rescue TimeoutError + if @connect_retry == 0 + retry + else + retry_number += 1 + retry if retry_number < @connect_retry + end + close + raise + end + @state = :WAIT + end + + # Read status block. + def read_header + @content_length = nil + @chunked = false + @content_encoding = nil + @chunk_length = 0 + parse_header(@socket) + # Header of the request has been parsed. + @state = :DATA + req = @requests.shift + if req.header.request_method == 'HEAD' or no_message_body?(@status) + @content_length = 0 + if @next_connection + @state = :WAIT + else + close + end + end + @next_connection = false if !@content_length and !@chunked + end + + StatusParseRegexp = %r(\AHTTP/(\d+\.\d+)\s+(\d\d\d)\s*([^\r\n]+)?\r?\n\z) + def parse_header(socket) + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + initial_line = nil + begin + begin + initial_line = socket.gets("\n") + if initial_line.nil? + close + raise KeepAliveDisconnected.new(self) + end + rescue Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, IOError + # JRuby can raise IOError instead of ECONNRESET for now + close + raise KeepAliveDisconnected.new(self, $!) + end + if StatusParseRegexp !~ initial_line + @version = '0.9' + @status = nil + @reason = nil + @next_connection = false + @content_length = nil + @readbuf = initial_line + break + end + @version, @status, @reason = $1, $2.to_i, $3 + @next_connection = HTTP::Message.keep_alive_enabled?(@version) + @headers = [] + while true + line = socket.gets("\n") + unless line + raise BadResponseError.new('unexpected EOF') + end + line.chomp! + break if line.empty? + if line[0] == ?\ or line[0] == ?\t + last = @headers.last[1] + last << ' ' unless last.empty? + last << line.strip + else + key, value = line.strip.split(/\s*:\s*/, 2) + parse_content_header(key, value) + @headers << [key, value] + end + end + end while (@version == '1.1' && @status == 100) + end + end + + def no_message_body?(status) + !status.nil? && # HTTP/0.9 + ((status >= 100 && status < 200) || status == 204 || status == 304) + end + + def parse_content_header(key, value) + key = key.downcase + case key + when 'content-length' + @content_length = value.to_i + when 'content-encoding' + @content_encoding = value.downcase + when 'transfer-encoding' + if value.downcase == 'chunked' + @chunked = true + @chunk_length = 0 + @content_length = nil + end + when 'connection', 'proxy-connection' + if value.downcase == 'keep-alive' + @next_connection = true + else + @next_connection = false + end + end + end + + def read_body_length(&block) + return nil if @content_length == 0 + while true + buf = empty_bin_str + maxbytes = @read_block_size + maxbytes = @content_length if maxbytes > @content_length && @content_length > 0 + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + begin + @socket.readpartial(maxbytes, buf) + rescue EOFError + close + buf = nil + if @strict_response_size_check + raise BadResponseError.new("EOF while reading rest #{@content_length} bytes") + end + end + end + if buf && buf.bytesize > 0 + @content_length -= buf.bytesize + yield buf + else + @content_length = 0 + end + return if @content_length == 0 + end + end + + RS = "\r\n" + def read_body_chunked(&block) + buf = empty_bin_str + while true + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + len = @socket.gets(RS) + if len.nil? # EOF + close + return + end + @chunk_length = len.hex + if @chunk_length == 0 + @content_length = 0 + @socket.gets(RS) + return + end + @socket.read(@chunk_length, buf) + @socket.read(2) + end + unless buf.empty? + yield buf + end + end + end + + def read_body_rest + if @readbuf and @readbuf.bytesize > 0 + yield @readbuf + @readbuf = nil + end + while true + buf = empty_bin_str + ::Timeout.timeout(@receive_timeout, ReceiveTimeoutError) do + begin + @socket.readpartial(@read_block_size, buf) + rescue EOFError + buf = nil + if @strict_response_size_check + raise BadResponseError.new("EOF while reading chunked response") + end + end + end + if buf && buf.bytesize > 0 + yield buf + else + return + end + end + end + + def empty_bin_str + str = '' + str.force_encoding('BINARY') if str.respond_to?(:force_encoding) + str + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb new file mode 100644 index 0000000..f6e7ce9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_config.rb @@ -0,0 +1,424 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +class HTTPClient + + begin + require 'openssl' + SSLEnabled = true + rescue LoadError + SSLEnabled = false + end + + # Represents SSL configuration for HTTPClient instance. + # The implementation depends on OpenSSL. + # + # == Trust Anchor Control + # + # SSLConfig loads 'httpclient/cacert.pem' as a trust anchor + # (trusted certificate(s)) with add_trust_ca in initialization time. + # This means that HTTPClient instance trusts some CA certificates by default, + # like Web browsers. 'httpclient/cacert.pem' is downloaded from curl web + # site by the author and included in released package. + # + # On JRuby, HTTPClient uses Java runtime's trusted CA certificates, not + # cacert.pem by default. You can load cacert.pem by calling + # SSLConfig#load_trust_ca manually like: + # + # HTTPClient.new { self.ssl_config.load_trust_ca }.get("https://...") + # + # You may want to change trust anchor by yourself. Call clear_cert_store + # then add_trust_ca for that purpose. + class SSLConfig + include HTTPClient::Util + if SSLEnabled + include OpenSSL + + module ::OpenSSL + module X509 + class Store + attr_reader :_httpclient_cert_store_items + + # TODO: use prepend instead when we drop JRuby + 1.9.x support + wrapped = {} + + wrapped[:initialize] = instance_method(:initialize) + define_method(:initialize) do |*args| + wrapped[:initialize].bind(self).call(*args) + @_httpclient_cert_store_items = [ENV['SSL_CERT_FILE'] || :default] + end + + [:add_cert, :add_file, :add_path].each do |m| + wrapped[m] = instance_method(m) + define_method(m) do |cert| + res = wrapped[m].bind(self).call(cert) + @_httpclient_cert_store_items << cert + res + end + end + end + end + end + end + + class << self + private + def attr_config(symbol) + name = symbol.to_s + ivar_name = "@#{name}" + define_method(name) { + instance_variable_get(ivar_name) + } + define_method("#{name}=") { |rhs| + if instance_variable_get(ivar_name) != rhs + instance_variable_set(ivar_name, rhs) + change_notify + end + } + symbol + end + end + + + CIPHERS_DEFAULT = "ALL:!aNULL:!eNULL:!SSLv2" # OpenSSL >1.0.0 default + + # Which TLS protocol version (also called method) will be used. Defaults + # to :auto which means that OpenSSL decides (In my tests this resulted + # with always the highest available protocol being used). + # String name of OpenSSL's SSL version method name: TLSv1_2, TLSv1_1, TLSv1, + # SSLv2, SSLv23, SSLv3 or :auto (and nil) to allow version negotiation (default). + # See {OpenSSL::SSL::SSLContext::METHODS} for a list of available versions + # in your specific Ruby environment. + attr_config :ssl_version + # OpenSSL::X509::Certificate:: certificate for SSL client authentication. + # nil by default. (no client authentication) + attr_config :client_cert + # OpenSSL::PKey::PKey:: private key for SSL client authentication. + # nil by default. (no client authentication) + attr_config :client_key + # OpenSSL::PKey::PKey:: private key pass phrase for client_key. + # nil by default. (no pass phrase) + attr_config :client_key_pass + + # A number which represents OpenSSL's verify mode. Default value is + # OpenSSL::SSL::VERIFY_PEER | OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT. + attr_config :verify_mode + # A number of verify depth. Certification path which length is longer than + # this depth is not allowed. + # CAUTION: this is OpenSSL specific option and ignored on JRuby. + attr_config :verify_depth + # A callback handler for custom certificate verification. nil by default. + # If the handler is set, handler.call is invoked just after general + # OpenSSL's verification. handler.call is invoked with 2 arguments, + # ok and ctx; ok is a result of general OpenSSL's verification. ctx is a + # OpenSSL::X509::StoreContext. + attr_config :verify_callback + # SSL timeout in sec. nil by default. + attr_config :timeout + # A number of OpenSSL's SSL options. Default value is + # OpenSSL::SSL::OP_ALL | OpenSSL::SSL::OP_NO_SSLv2 + # CAUTION: this is OpenSSL specific option and ignored on JRuby. + # Use ssl_version to specify the TLS version you want to use. + attr_config :options + # A String of OpenSSL's cipher configuration. Default value is + # ALL:!ADH:!LOW:!EXP:!MD5:+SSLv2:@STRENGTH + # See ciphers(1) man in OpenSSL for more detail. + attr_config :ciphers + + # OpenSSL::X509::X509::Store used for verification. You can reset the + # store with clear_cert_store and set the new store with cert_store=. + attr_reader :cert_store # don't use if you don't know what it is. + + # For server side configuration. Ignore this. + attr_config :client_ca # :nodoc: + + # These array keeps original files/dirs that was added to @cert_store + def cert_store_items; @cert_store._httpclient_cert_store_items; end + attr_reader :cert_store_crl_items + + # Creates a SSLConfig. + def initialize(client) + return unless SSLEnabled + @client = client + @cert_store = X509::Store.new + @cert_store_crl_items = [] + @client_cert = @client_key = @client_key_pass = @client_ca = nil + @verify_mode = SSL::VERIFY_PEER | SSL::VERIFY_FAIL_IF_NO_PEER_CERT + @verify_depth = nil + @verify_callback = nil + @dest = nil + @timeout = nil + @ssl_version = :auto + # Follow ruby-ossl's definition + @options = OpenSSL::SSL::OP_ALL + @options &= ~OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS if defined?(OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS) + @options |= OpenSSL::SSL::OP_NO_COMPRESSION if defined?(OpenSSL::SSL::OP_NO_COMPRESSION) + @options |= OpenSSL::SSL::OP_NO_SSLv2 if defined?(OpenSSL::SSL::OP_NO_SSLv2) + @options |= OpenSSL::SSL::OP_NO_SSLv3 if defined?(OpenSSL::SSL::OP_NO_SSLv3) + # OpenSSL 0.9.8 default: "ALL:!ADH:!LOW:!EXP:!MD5:+SSLv2:@STRENGTH" + @ciphers = CIPHERS_DEFAULT + @cacerts_loaded = false + end + + # Sets certificate and private key for SSL client authentication. + # cert_file:: must be a filename of PEM/DER formatted file. + # key_file:: must be a filename of PEM/DER formatted file. Key must be an + # RSA key. If you want to use other PKey algorithm, + # use client_key=. + # + # Calling this method resets all existing sessions if value is changed. + def set_client_cert_file(cert_file, key_file, pass = nil) + if (@client_cert != cert_file) || (@client_key != key_file) || (@client_key_pass != pass) + @client_cert, @client_key, @client_key_pass = cert_file, key_file, pass + change_notify + end + end + + # Sets OpenSSL's default trusted CA certificates. Generally, OpenSSL is + # configured to use OS's trusted CA certificates located at + # /etc/pki/certs or /etc/ssl/certs. Unfortunately OpenSSL's Windows build + # does not work with Windows Certificate Storage. + # + # On Windows or when you build OpenSSL manually, you can set the + # CA certificates directory by SSL_CERT_DIR env variable at runtime. + # + # SSL_CERT_DIR=/etc/ssl/certs ruby -rhttpclient -e "..." + # + # Calling this method resets all existing sessions. + def set_default_paths + @cacerts_loaded = true # avoid lazy override + @cert_store = X509::Store.new + @cert_store.set_default_paths + change_notify + end + + # Drops current certificate store (OpenSSL::X509::Store) for SSL and create + # new one for the next session. + # + # Calling this method resets all existing sessions. + def clear_cert_store + @cacerts_loaded = true # avoid lazy override + @cert_store = X509::Store.new + @cert_store._httpclient_cert_store_items.clear + change_notify + end + + # Sets new certificate store (OpenSSL::X509::Store). + # don't use if you don't know what it is. + # + # Calling this method resets all existing sessions. + def cert_store=(cert_store) + # This is object equality check, since OpenSSL::X509::Store doesn't overload == + if !@cacerts_loaded || (@cert_store != cert_store) + @cacerts_loaded = true # avoid lazy override + @cert_store = cert_store + change_notify + end + end + + # Sets trust anchor certificate(s) for verification. + # trust_ca_file_or_hashed_dir:: a filename of a PEM/DER formatted + # OpenSSL::X509::Certificate or + # a 'c-rehash'eddirectory name which stores + # trusted certificate files. + # + # Calling this method resets all existing sessions. + def add_trust_ca(trust_ca_file_or_hashed_dir) + unless File.exist?(trust_ca_file_or_hashed_dir) + trust_ca_file_or_hashed_dir = File.join(File.dirname(__FILE__), trust_ca_file_or_hashed_dir) + end + @cacerts_loaded = true # avoid lazy override + add_trust_ca_to_store(@cert_store, trust_ca_file_or_hashed_dir) + change_notify + end + alias set_trust_ca add_trust_ca + + def add_trust_ca_to_store(cert_store, trust_ca_file_or_hashed_dir) + if FileTest.directory?(trust_ca_file_or_hashed_dir) + cert_store.add_path(trust_ca_file_or_hashed_dir) + else + cert_store.add_file(trust_ca_file_or_hashed_dir) + end + end + + # Loads default trust anchors. + # Calling this method resets all existing sessions. + def load_trust_ca + load_cacerts(@cert_store) + change_notify + end + + # Adds CRL for verification. + # crl:: a OpenSSL::X509::CRL or a filename of a PEM/DER formatted + # OpenSSL::X509::CRL. + # + # On JRuby, instead of setting CRL by yourself you can set following + # options to let HTTPClient to perform revocation check with CRL and OCSP: + # -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true + # ex. jruby -J-Dcom.sun.security.enableCRLDP=true -J-Dcom.sun.net.ssl.checkRevocation=true app.rb + # + # Revoked cert example: https://test-sspev.verisign.com:2443/test-SSPEV-revoked-verisign.html + # + # Calling this method resets all existing sessions. + def add_crl(crl) + unless crl.is_a?(X509::CRL) + crl = X509::CRL.new(File.open(crl) { |f| f.read }) + end + @cert_store.add_crl(crl) + @cert_store_crl_items << crl + @cert_store.flags = X509::V_FLAG_CRL_CHECK | X509::V_FLAG_CRL_CHECK_ALL + change_notify + end + alias set_crl add_crl + + def verify? + @verify_mode && (@verify_mode & OpenSSL::SSL::VERIFY_PEER != 0) + end + + # interfaces for SSLSocket. + def set_context(ctx) # :nodoc: + load_trust_ca unless @cacerts_loaded + @cacerts_loaded = true + # Verification: Use Store#verify_callback instead of SSLContext#verify*? + ctx.cert_store = @cert_store + ctx.verify_mode = @verify_mode + ctx.verify_depth = @verify_depth if @verify_depth + ctx.verify_callback = @verify_callback || method(:default_verify_callback) + # SSL config + if @client_cert + ctx.cert = @client_cert.is_a?(X509::Certificate) ? @client_cert : + X509::Certificate.new(File.open(@client_cert) { |f| f.read }) + end + if @client_key + ctx.key = @client_key.is_a?(PKey::PKey) ? @client_key : + PKey::RSA.new(File.open(@client_key) { |f| f.read }, @client_key_pass) + end + ctx.client_ca = @client_ca + ctx.timeout = @timeout + ctx.options = @options + ctx.ciphers = @ciphers + ctx.ssl_version = @ssl_version unless @ssl_version == :auto + end + + # post connection check proc for ruby < 1.8.5. + # this definition must match with the one in ext/openssl/lib/openssl/ssl.rb + def post_connection_check(peer_cert, hostname) # :nodoc: + check_common_name = true + cert = peer_cert + cert.extensions.each{|ext| + next if ext.oid != "subjectAltName" + ext.value.split(/,\s+/).each{|general_name| + if /\ADNS:(.*)/ =~ general_name + check_common_name = false + reg = Regexp.escape($1).gsub(/\\\*/, "[^.]+") + return true if /\A#{reg}\z/i =~ hostname + elsif /\AIP Address:(.*)/ =~ general_name + check_common_name = false + return true if $1 == hostname + end + } + } + if check_common_name + cert.subject.to_a.each{|oid, value| + if oid == "CN" + reg = Regexp.escape(value).gsub(/\\\*/, "[^.]+") + return true if /\A#{reg}\z/i =~ hostname + end + } + end + raise SSL::SSLError, "hostname was not match with the server certificate" + end + + # Default callback for verification: only dumps error. + def default_verify_callback(is_ok, ctx) + if $DEBUG + if is_ok + warn("ok: #{ctx.current_cert.subject.to_s.dump}") + else + warn("ng: #{ctx.current_cert.subject.to_s.dump} at depth #{ctx.error_depth} - #{ctx.error}: #{ctx.error_string} in #{ctx.chain.inspect}") + end + warn(ctx.current_cert.to_text) + warn(ctx.current_cert.to_pem) + end + if !is_ok + depth = ctx.error_depth + code = ctx.error + msg = ctx.error_string + warn("at depth #{depth} - #{code}: #{msg}") if $DEBUG + end + is_ok + end + + # Sample callback method: CAUTION: does not check CRL/ARL. + def sample_verify_callback(is_ok, ctx) + unless is_ok + depth = ctx.error_depth + code = ctx.error + msg = ctx.error_string + warn("at depth #{depth} - #{code}: #{msg}") if $DEBUG + return false + end + + cert = ctx.current_cert + self_signed = false + ca = false + pathlen = nil + server_auth = true + self_signed = (cert.subject.cmp(cert.issuer) == 0) + + # Check extensions whatever its criticality is. (sample) + cert.extensions.each do |ex| + case ex.oid + when 'basicConstraints' + /CA:(TRUE|FALSE), pathlen:(\d+)/ =~ ex.value + ca = ($1 == 'TRUE') + pathlen = $2.to_i + when 'keyUsage' + usage = ex.value.split(/\s*,\s*/) + ca = usage.include?('Certificate Sign') + server_auth = usage.include?('Key Encipherment') + when 'extendedKeyUsage' + usage = ex.value.split(/\s*,\s*/) + server_auth = usage.include?('Netscape Server Gated Crypto') + when 'nsCertType' + usage = ex.value.split(/\s*,\s*/) + ca = usage.include?('SSL CA') + server_auth = usage.include?('SSL Server') + end + end + + if self_signed + warn('self signing CA') if $DEBUG + return true + elsif ca + warn('middle level CA') if $DEBUG + return true + elsif server_auth + warn('for server authentication') if $DEBUG + return true + end + + return false + end + + private + + def change_notify + @client.reset_all + nil + end + + # Use 2048 bit certs trust anchor + def load_cacerts(cert_store) + file = File.join(File.dirname(__FILE__), 'cacert.pem') + add_trust_ca_to_store(cert_store, file) + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb new file mode 100644 index 0000000..1480142 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/ssl_socket.rb @@ -0,0 +1,150 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'httpclient/ssl_config' + + +class HTTPClient + + # Wraps up OpenSSL::SSL::SSLSocket and offers debugging features. + class SSLSocket + def self.create_socket(session) + opts = { + :debug_dev => session.debug_dev + } + site = session.proxy || session.dest + socket = session.create_socket(site.host, site.port) + begin + if session.proxy + session.connect_ssl_proxy(socket, Util.urify(session.dest.to_s)) + end + new(socket, session.dest, session.ssl_config, opts) + rescue + socket.close + raise + end + end + + def initialize(socket, dest, config, opts = {}) + unless SSLEnabled + raise ConfigurationError.new('Ruby/OpenSSL module is required') + end + @socket = socket + @config = config + @ssl_socket = create_openssl_socket(@socket) + @debug_dev = opts[:debug_dev] + ssl_connect(dest.host) + end + + def peer_cert + @ssl_socket.peer_cert + end + + def close + @ssl_socket.close + @socket.close + end + + def closed? + @socket.closed? + end + + def eof? + @ssl_socket.eof? + end + + def gets(rs) + str = @ssl_socket.gets(rs) + debug(str) + str + end + + def read(size, buf = nil) + str = @ssl_socket.read(size, buf) + debug(str) + str + end + + def readpartial(size, buf = nil) + str = @ssl_socket.readpartial(size, buf) + debug(str) + str + end + + def <<(str) + rv = @ssl_socket.write(str) + debug(str) + rv + end + + def flush + @ssl_socket.flush + end + + def sync + @ssl_socket.sync + end + + def sync=(sync) + @ssl_socket.sync = sync + end + + private + + def ssl_connect(hostname = nil) + if hostname && @ssl_socket.respond_to?(:hostname=) + @ssl_socket.hostname = hostname + end + @ssl_socket.connect + if $DEBUG + if @ssl_socket.respond_to?(:ssl_version) + warn("Protocol version: #{@ssl_socket.ssl_version}") + end + warn("Cipher: #{@ssl_socket.cipher.inspect}") + end + post_connection_check(hostname) + end + + def post_connection_check(hostname) + verify_mode = @config.verify_mode || OpenSSL::SSL::VERIFY_NONE + if verify_mode == OpenSSL::SSL::VERIFY_NONE + return + elsif @ssl_socket.peer_cert.nil? and + check_mask(verify_mode, OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT) + raise OpenSSL::SSL::SSLError.new('no peer cert') + end + if @ssl_socket.respond_to?(:post_connection_check) and RUBY_VERSION > "1.8.4" + @ssl_socket.post_connection_check(hostname) + else + @config.post_connection_check(@ssl_socket.peer_cert, hostname) + end + end + + def check_mask(value, mask) + value & mask == mask + end + + def create_openssl_socket(socket) + ssl_socket = nil + if OpenSSL::SSL.const_defined?("SSLContext") + ctx = OpenSSL::SSL::SSLContext.new + @config.set_context(ctx) + ssl_socket = OpenSSL::SSL::SSLSocket.new(socket, ctx) + else + ssl_socket = OpenSSL::SSL::SSLSocket.new(socket) + @config.set_context(ssl_socket) + end + ssl_socket + end + + def debug(str) + @debug_dev << str if @debug_dev && str + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb new file mode 100644 index 0000000..c5bbcac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/timeout.rb @@ -0,0 +1,140 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'timeout' +require 'thread' + + +class HTTPClient + + + # Replaces timeout.rb to avoid Thread creation and scheduling overhead. + # + # You should check another timeout replace in WEBrick. + # See lib/webrick/utils.rb in ruby/1.9. + # + # About this implementation: + # * Do not create Thread for each timeout() call. Just create 1 Thread for + # timeout scheduler. + # * Do not wakeup the scheduler thread so often. Let scheduler thread sleep + # until the nearest period. +if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + class TimeoutScheduler + + # Represents timeout period. + class Period + attr_reader :thread, :time + + # Creates new Period. + def initialize(thread, time, ex) + @thread, @time, @ex = thread, time, ex + @lock = Mutex.new + end + + # Raises if thread exists and alive. + def raise(message) + @lock.synchronize do + if @thread and @thread.alive? + @thread.raise(@ex, message) + end + end + end + + # Cancel this Period. Mutex is needed to avoid too-late exception. + def cancel + @lock.synchronize do + @thread = nil + end + end + end + + # Creates new TimeoutScheduler. + def initialize + @pool = {} + @next = nil + @thread = start_timer_thread + end + + # Registers new timeout period. + def register(thread, sec, ex) + period = Period.new(thread, Time.now + sec, ex || ::Timeout::Error) + @pool[period] = true + if @next.nil? or period.time < @next + begin + @thread.wakeup + rescue ThreadError + # Thread may be dead by fork. + @thread = start_timer_thread + end + end + period + end + + # Cancels the given period. + def cancel(period) + @pool.delete(period) + period.cancel + end + + private + + def start_timer_thread + thread = Thread.new { + while true + if @pool.empty? + @next = nil + sleep + else + min, = @pool.min { |a, b| a[0].time <=> b[0].time } + @next = min.time + sec = @next - Time.now + if sec > 0 + sleep(sec) + end + end + now = Time.now + @pool.keys.each do |period| + if period.time < now + period.raise('execution expired') + cancel(period) + end + end + end + } + Thread.pass while thread.status != 'sleep' + thread + end + end + + class << self + # CAUTION: caller must aware of race condition. + def timeout_scheduler + @timeout_scheduler ||= TimeoutScheduler.new + end + end + timeout_scheduler # initialize at first time. +end + + module Timeout + if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + def timeout(sec, ex = nil, &block) + return yield if sec == nil or sec.zero? + scheduler = nil + begin + scheduler = HTTPClient.timeout_scheduler + period = scheduler.register(Thread.current, sec, ex) + yield(sec) + ensure + scheduler.cancel(period) if scheduler and period + end + end + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/util.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/util.rb new file mode 100644 index 0000000..6ff3801 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/util.rb @@ -0,0 +1,222 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +unless ''.respond_to?(:bytesize) + class String + alias bytesize size + end +end + +if RUBY_VERSION < "1.9.3" + require 'uri' + module URI + class Generic + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + end + end +end + +# With recent JRuby 1.7 + jruby-openssl, X509CRL#extentions_to_text causes +# StringIndexOOBException when we try to dump SSL Server Certificate. +# when one of extensions has "" as value. +if defined?(JRUBY_VERSION) + require 'openssl' + require 'java' + module OpenSSL + module X509 + class Certificate + java_import 'java.security.cert.Certificate' + java_import 'java.security.cert.CertificateFactory' + java_import 'java.io.ByteArrayInputStream' + def to_text + cf = CertificateFactory.getInstance('X.509') + cf.generateCertificate(ByteArrayInputStream.new(self.to_der.to_java_bytes)).toString + end + end + end + end +end + + +class HTTPClient + + + # A module for common function. + module Util + + # URI abstraction; Addressable::URI or URI + require 'uri' + begin + require 'addressable/uri' + # Older versions doesn't have #default_port + unless Addressable::URI.instance_methods.include?(:default_port) # 1.9 only + raise LoadError + end + class AddressableURI < Addressable::URI + # Overwrites the original definition just for one line... + def authority + self.host && @authority ||= (begin + authority = "" + if self.userinfo != nil + authority << "#{self.userinfo}@" + end + authority << self.host + if self.port != self.default_port # ...HERE! Compares with default_port because self.port is not nil in this wrapper. + authority << ":#{self.port}" + end + authority + end) + end + + # HTTPClient expects urify("http://foo/").port to be not nil but 80 like URI. + def port + super || default_port + end + + # Captured from uri/generic.rb + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + end + AddressableEnabled = true + rescue LoadError + AddressableEnabled = false + end + + # Keyword argument helper. + # args:: given arguments. + # *field:: a list of arguments to be extracted. + # + # You can extract 3 arguments (a, b, c) with: + # + # include Util + # def my_method(*args) + # a, b, c = keyword_argument(args, :a, :b, :c) + # ... + # end + # my_method(1, 2, 3) + # my_method(:b => 2, :a = 1) + # + # instead of; + # + # def my_method(a, b, c) + # ... + # end + # + def keyword_argument(args, *field) + if args.size == 1 and Hash === args[0] + h = args[0] + if field.any? { |f| h.key?(f) } + return h.values_at(*field) + end + end + args + end + + # Keyword argument to hash helper. + # args:: given arguments. + # *field:: a list of arguments to be extracted. + # + # Returns hash which has defined keys. When a Hash given, returns it + # including undefined keys. When an Array given, returns a Hash which only + # includes defined keys. + def argument_to_hash(args, *field) + return nil if args.empty? + if args.size == 1 and Hash === args[0] + h = args[0] + if field.any? { |f| h.key?(f) } + return h + end + end + h = {} + field.each_with_index do |e, idx| + h[e] = args[idx] + end + h + end + + # Gets an URI instance. + def urify(uri) + if uri.nil? + nil + elsif uri.is_a?(URI) + uri + elsif AddressableEnabled + AddressableURI.parse(uri.to_s) + else + URI.parse(uri.to_s) + end + end + module_function :urify + + # Returns true if the given 2 URIs have a part_of relationship. + # * the same scheme + # * the same host String (no host resolution or IP-addr conversion) + # * the same port number + # * target URI's path starts with base URI's path. + def uri_part_of(uri, part) + ((uri.scheme == part.scheme) and + (uri.host == part.host) and + (uri.port == part.port) and + uri.path.upcase.index(part.path.upcase) == 0) + end + module_function :uri_part_of + + # Returns parent directory URI of the given URI. + def uri_dirname(uri) + uri = uri.clone + uri.path = uri.path.sub(/\/[^\/]*\z/, '/') + uri + end + module_function :uri_dirname + + # Finds a value of a Hash. + def hash_find_value(hash, &block) + v = hash.find(&block) + v ? v[1] : nil + end + module_function :hash_find_value + + # Try to require a feature and returns true/false if loaded + # + # It returns 'true' for the second require in contrast of the standard + # require returns false if the feature is already loaded. + def try_require(feature) + require feature + true + rescue LoadError + false + end + module_function :try_require + + # show one warning message only once by caching message + # + # it cached all messages in memory so be careful not to show many kinds of warning message. + @@__warned = {} + def warning(message) + return if @@__warned.key?(message) + warn(message) + @@__warned[message] = true + end + + # Checks if the given URI is https. + def https?(uri) + uri.scheme && uri.scheme.downcase == 'https' + end + + def http?(uri) + uri.scheme && uri.scheme.downcase == 'http' + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/version.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/version.rb new file mode 100644 index 0000000..d1ab8e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/version.rb @@ -0,0 +1,3 @@ +class HTTPClient + VERSION = '2.8.3' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb new file mode 100644 index 0000000..74dbfef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/httpclient/webagent-cookie.rb @@ -0,0 +1,459 @@ +# cookie.rb is redistributed file which is originally included in Webagent +# version 0.6.2 by TAKAHASHI `Maki' Masayoshi. And it contains some bug fixes. +# You can download the entire package of Webagent from +# http://www.rubycolor.org/arc/. + + +# Cookie class +# +# I refered to w3m's source to make these classes. Some comments +# are quoted from it. I'm thanksful for author(s) of it. +# +# w3m homepage: http://ei5nazha.yz.yamagata-u.ac.jp/~aito/w3m/eng/ + +require 'time' +require 'monitor' +require 'httpclient/util' + +class WebAgent + + module CookieUtils + + def head_match?(str1, str2) + str1 == str2[0, str1.length] + end + + def tail_match?(str1, str2) + if str1.length > 0 + str1 == str2[-str1.length..-1].to_s + else + true + end + end + + def domain_match(host, domain) + return false if domain.nil? + domainname = domain.sub(/\.\z/, '').downcase + hostname = host.sub(/\.\z/, '').downcase + case domain + when /\d+\.\d+\.\d+\.\d+/ + return (hostname == domainname) + when '.' + return true + when /^\./ + # allows; host == rubyforge.org, domain == .rubyforge.org + return tail_match?(domainname, '.' + hostname) + else + return (hostname == domainname) + end + end + end + + class Cookie + include CookieUtils + + attr_accessor :name, :value + attr_accessor :domain, :path + attr_accessor :expires ## for Netscape Cookie + attr_accessor :url + attr_writer :use, :secure, :http_only, :discard, :domain_orig, :path_orig, :override + + USE = 1 + SECURE = 2 + DOMAIN = 4 + PATH = 8 + DISCARD = 16 + OVERRIDE = 32 + OVERRIDE_OK = 32 + HTTP_ONLY = 64 + + def self.parse(str, url) + cookie = new + cookie.parse(str, url) + cookie + end + + def initialize + @name = @value = @domain = @path = nil + @expires = nil + @url = nil + @use = @secure = @http_only = @discard = @domain_orig = @path_orig = @override = nil + end + + def discard? + @discard + end + + def use? + @use + end + + def secure? + @secure + end + + def http_only? + @http_only + end + + def domain_orig? + @domain_orig + end + + def path_orig? + @path_orig + end + + def override? + @override + end + + def flag + flg = 0 + flg += USE if @use + flg += SECURE if @secure + flg += HTTP_ONLY if @http_only + flg += DOMAIN if @domain_orig + flg += PATH if @path_orig + flg += DISCARD if @discard + flg += OVERRIDE if @override + flg + end + + def set_flag(flag) + flag = flag.to_i + @use = true if flag & USE > 0 + @secure = true if flag & SECURE > 0 + @http_only = true if flag & HTTP_ONLY > 0 + @domain_orig = true if flag & DOMAIN > 0 + @path_orig = true if flag & PATH > 0 + @discard = true if flag & DISCARD > 0 + @override = true if flag & OVERRIDE > 0 + end + + def match?(url) + domainname = url.host + if (!domainname || + !domain_match(domainname, @domain) || + (@path && !head_match?(@path, url.path.empty? ? '/' : url.path)) || + (@secure && (url.scheme != 'https')) ) + return false + else + return true + end + end + + def join_quotedstr(array, sep) + ret = Array.new + old_elem = nil + array.each{|elem| + if (elem.scan(/"/).length % 2) == 0 + if old_elem + old_elem << sep << elem + else + ret << elem + old_elem = nil + end + else + if old_elem + old_elem << sep << elem + ret << old_elem + old_elem = nil + else + old_elem = elem.dup + end + end + } + ret + end + + def parse(str, url) + @url = url + # TODO: should not depend on join_quotedstr. scan with escape like CSV. + cookie_elem = str.split(/;/) + cookie_elem = join_quotedstr(cookie_elem, ';') + cookie_elem -= [""] # del empty elements, a cookie might included ";;" + first_elem = cookie_elem.shift + if first_elem !~ /([^=]*)(\=(.*))?/ + return + ## raise ArgumentError 'invalid cookie value' + end + @name = $1.strip + @value = normalize_cookie_value($3) + cookie_elem.each{|pair| + key, value = pair.split(/=/, 2) ## value may nil + key.strip! + value = normalize_cookie_value(value) + case key.downcase + when 'domain' + @domain = value + when 'expires' + @expires = nil + begin + @expires = Time.parse(value).gmtime if value + rescue ArgumentError + end + when 'path' + @path = value + when 'secure' + @secure = true ## value may nil, but must 'true'. + when 'httponly' + @http_only = true ## value may nil, but must 'true'. + else + warn("Unknown key: #{key} = #{value}") + end + } + end + + private + + def normalize_cookie_value(value) + if value + value = value.strip.sub(/\A"(.*)"\z/) { $1 } + value = nil if value.empty? + end + value + end + end + + ## + # An Array class that already includes the MonitorMixin module. + # + class SynchronizedArray < Array + include MonitorMixin + end + + class CookieManager + include CookieUtils + + ### errors + class Error < StandardError; end + class ErrorOverrideOK < Error; end + class SpecialError < Error; end + + attr_reader :cookies + attr_accessor :cookies_file + attr_accessor :accept_domains, :reject_domains + + def initialize(file=nil) + @cookies = SynchronizedArray.new + @cookies_file = file + @is_saved = true + @reject_domains = Array.new + @accept_domains = Array.new + @netscape_rule = false + end + + def cookies=(cookies) + if cookies.is_a?(SynchronizedArray) + @cookies = cookies + else + @cookies = SynchronizedArray.new(cookies) + end + end + + def save_all_cookies(force = nil, save_unused = true, save_discarded = true) + @cookies.synchronize do + check_expired_cookies + if @is_saved and !force + return + end + File.open(@cookies_file, 'w') do |f| + @cookies.each do |cookie| + if (cookie.use? or save_unused) and + (!cookie.discard? or save_discarded) + f.print(cookie.url.to_s,"\t", + cookie.name,"\t", + cookie.value,"\t", + cookie.expires.to_i,"\t", + cookie.domain,"\t", + cookie.path,"\t", + cookie.flag,"\n") + end + end + end + end + @is_saved = true + end + + def save_cookies(force = nil) + save_all_cookies(force, false, false) + end + + def check_expired_cookies + @cookies.reject!{|cookie| + is_expired = (cookie.expires && (cookie.expires < Time.now.gmtime)) + if is_expired && !cookie.discard? + @is_saved = false + end + is_expired + } + end + + def parse(str, url) + cookie = WebAgent::Cookie.new + cookie.parse(str, url) + add(cookie) + end + + def find(url) + return nil if @cookies.empty? + + cookie_list = Array.new + @cookies.each{|cookie| + is_expired = (cookie.expires && (cookie.expires < Time.now.gmtime)) + if cookie.use? && !is_expired && cookie.match?(url) + if cookie_list.select{|c1| c1.name == cookie.name}.empty? + cookie_list << cookie + end + end + } + return make_cookie_str(cookie_list) + end + alias cookie_value find + + def add(given) + check_domain(given.domain, given.url.host, given.override?) + + domain = given.domain || given.url.host + path = given.path || given.url.path.sub(%r|/[^/]*\z|, '') + + cookie = nil + @cookies.synchronize do + check_expired_cookies + cookie = @cookies.find { |c| + c.domain == domain && c.path == path && c.name == given.name + } + if !cookie + cookie = WebAgent::Cookie.new + cookie.use = true + @cookies << cookie + end + end + + cookie.domain = domain + cookie.path = path + cookie.url = given.url + cookie.name = given.name + cookie.value = given.value + cookie.expires = given.expires + cookie.secure = given.secure? + cookie.http_only = given.http_only? + cookie.domain_orig = given.domain + cookie.path_orig = given.path + + if cookie.discard? || cookie.expires.nil? + cookie.discard = true + else + cookie.discard = false + @is_saved = false + end + end + + def load_cookies + return if !File.readable?(@cookies_file) + @cookies.synchronize do + @cookies.clear + File.open(@cookies_file,'r'){|f| + while line = f.gets + cookie = WebAgent::Cookie.new + @cookies << cookie + col = line.chomp.split(/\t/) + cookie.url = HTTPClient::Util.urify(col[0]) + cookie.name = col[1] + cookie.value = col[2] + if col[3].empty? or col[3] == '0' + cookie.expires = nil + else + cookie.expires = Time.at(col[3].to_i).gmtime + end + cookie.domain = col[4] + cookie.path = col[5] + cookie.set_flag(col[6]) + end + } + end + end + + # Who use it? + def check_cookie_accept_domain(domain) + unless domain + return false + end + @accept_domains.each{|dom| + if domain_match(domain, dom) + return true + end + } + @reject_domains.each{|dom| + if domain_match(domain, dom) + return false + end + } + return true + end + + private + + def make_cookie_str(cookie_list) + if cookie_list.empty? + return nil + end + + ret = '' + c = cookie_list.shift + ret += "#{c.name}=#{c.value}" + cookie_list.each{|cookie| + ret += "; #{cookie.name}=#{cookie.value}" + } + return ret + end + + # for conformance to http://wp.netscape.com/newsref/std/cookie_spec.html + attr_accessor :netscape_rule + SPECIAL_DOMAIN = [".com",".edu",".gov",".mil",".net",".org",".int"] + + def check_domain(domain, hostname, override) + return unless domain + + # [DRAFT 12] s. 4.2.2 (does not apply in the case that + # host name is the same as domain attribute for version 0 + # cookie) + # I think that this rule has almost the same effect as the + # tail match of [NETSCAPE]. + if domain !~ /^\./ && hostname != domain + domain = '.'+domain + end + # [NETSCAPE] rule + if @netscape_rule + n = domain.scan(/\./).length + if n < 2 + cookie_error(SpecialError.new, override) + elsif n == 2 + ## [NETSCAPE] rule + ok = SPECIAL_DOMAIN.select{|sdomain| + sdomain == domain[-(sdomain.length)..-1] + } + if ok.empty? + cookie_error(SpecialError.new, override) + end + end + end + # this implementation does not check RFC2109 4.3.2 case 2; + # the portion of host not in domain does not contain a dot. + # according to nsCookieService.cpp in Firefox 3.0.4, Firefox 3.0.4 + # and IE does not check, too. + end + + # not tested well; used only netscape_rule = true. + def cookie_error(err, override) + if !err.kind_of?(ErrorOverrideOK) || !override + raise err + end + end + end +end + +class HTTPClient + CookieManager = WebAgent::CookieManager +end unless defined?(HTTPClient::CookieManager) diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/jsonclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/jsonclient.rb new file mode 100755 index 0000000..fa099b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/jsonclient.rb @@ -0,0 +1,63 @@ +require 'httpclient' +require 'json' + +# JSONClient auto-converts Hash <-> JSON in request and response. +# * For POST or PUT request, convert Hash body to JSON String with 'application/json; charset=utf-8' header. +# * For response, convert JSON String to Hash when content-type is '(application|text)/(x-)?json' +class JSONClient < HTTPClient + CONTENT_TYPE_JSON_REGEX = /(application|text)\/(x-)?json/i + CONTENT_TYPE_JSON = 'application/json; charset=utf-8' + + attr_reader :content_type_json_request + attr_reader :content_type_json_response_regex + + def initialize(*args) + super + @content_type_json_request = CONTENT_TYPE_JSON + @content_type_json_response_regex = CONTENT_TYPE_JSON_REGEX + end + + def post(uri, *args, &block) + request(:post, uri, argument_to_hash_for_json(args), &block) + end + + def put(uri, *args, &block) + request(:put, uri, argument_to_hash_for_json(args), &block) + end + + def request(method, uri, *args, &block) + res = super + if @content_type_json_response_regex =~ res.content_type + res = wrap_json_response(res) + end + res + end + +private + + def argument_to_hash_for_json(args) + hash = argument_to_hash(args, :body, :header, :follow_redirect) + if hash[:body].is_a?(Hash) + hash[:header] = json_header(hash[:header]) + hash[:body] = JSON.generate(hash[:body]) + end + hash + end + + def json_header(header) + header ||= {} + if header.is_a?(Hash) + header['Content-Type'] = @content_type_json_request + else + header << ['Content-Type', @content_type_json_request] + end + header + end + + def wrap_json_response(original) + res = ::HTTP::Message.new_response(JSON.parse(original.content)) + res.http_header = original.http_header + res.previous = original + res + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/oauthclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/oauthclient.rb new file mode 100644 index 0000000..f9bb9d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/lib/oauthclient.rb @@ -0,0 +1,111 @@ +# HTTPClient - HTTP client library. +# Copyright (C) 2000-2015 NAKAMURA, Hiroshi . +# +# This program is copyrighted free software by NAKAMURA, Hiroshi. You can +# redistribute it and/or modify it under the same terms of Ruby's license; +# either the dual license version in 2003, or any later version. + + +require 'httpclient' + +module HTTP + class Message + attr_accessor :oauth_params + end +end + + +# OAuthClient provides OAuth related methods in addition to HTTPClient. +# +# See sample/ dir how to use OAuthClient. There are sample clients for Twitter, +# FriendFeed and Google Buzz. +class OAuthClient < HTTPClient + + # HTTPClient::OAuth::Config:: OAuth configurator. + attr_accessor :oauth_config + + # Creates a OAuthClient instance which provides OAuth related methods in + # addition to HTTPClient. + # + # Method signature is as same as HTTPClient. See HTTPClient.new + def initialize(*arg) + super + @oauth_config = HTTPClient::OAuth::Config.new + self.www_auth.oauth.set_config(nil, @oauth_config) + self.www_auth.oauth.challenge(nil) + self.strict_response_size_check = true + end + + # Get request token. + # uri:: URI for request token. + # callback:: callback String. This can be nil for OAuth 1.0a + # param:: Additional query parameter Hash. + # + # It returns a HTTP::Message instance as a response. When the request is made + # successfully, you can retrieve a pair of request token and secret like + # following; + # res = client.get_request_token(...) + # token = res.oauth_params['oauth_token'] + # secret = res.oauth_params['oauth_token_secret'] + def get_request_token(uri, callback = nil, param = nil) + oauth_config.token = nil + oauth_config.secret = nil + oauth_config.callback = callback + oauth_config.verifier = nil + res = request(oauth_config.http_method, uri, param) + filter_response(res) + res + end + + # Get access token. + # uri:: URI for request token. + # request_token:: a request token String. See get_access_token. + # request_token_secret:: a request secret String. See get_access_token. + # verifier:: a verifier tag String. + # + # When the request succeeds and the server returns a pair of access token + # and secret, oauth_config.token and oauth_token.secret are updated with + # the access token. Then you can call OAuthClient#get, #post, #delete, etc. + # All requests are signed. + def get_access_token(uri, request_token, request_token_secret, verifier = nil) + oauth_config.token = request_token + oauth_config.secret = request_token_secret + oauth_config.callback = nil + oauth_config.verifier = verifier + res = request(oauth_config.http_method, uri) + filter_response(res) + oauth_config.verifier = nil + res + end + + # Parse response and returns a Hash. + def get_oauth_response(res) + enc = res.header['content-encoding'] + body = nil + if enc and enc[0] and enc[0].downcase == 'gzip' + body = Zlib::GzipReader.wrap(StringIO.new(res.content)) { |gz| gz.read } + else + body = res.content + end + body.split('&').inject({}) { |r, e| + key, value = e.split('=', 2) + r[unescape(key)] = unescape(value) + r + } + end + +private + + def unescape(escaped) + escaped ? ::HTTP::Message.unescape(escaped) : nil + end + + def filter_response(res) + if res.status == 200 + if res.oauth_params = get_oauth_response(res) + oauth_config.token = res.oauth_params['oauth_token'] + oauth_config.secret = res.oauth_params['oauth_token_secret'] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/async.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/async.rb new file mode 100644 index 0000000..283ddc8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/async.rb @@ -0,0 +1,8 @@ +require 'httpclient' + +c = HTTPClient.new +conn = c.get_async("http://dev.ctor.org/") +io = conn.pop.content +while str = io.read(40) + p str +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/auth.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/auth.rb new file mode 100644 index 0000000..7ec9ab8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/auth.rb @@ -0,0 +1,11 @@ +require 'httpclient' + +c = HTTPClient.new +c.debug_dev = STDOUT + +# for Proxy authentication: supports Basic, Negotiate and NTLM. +#c.set_proxy_auth("admin", "admin") + +# for WWW authentication: supports Basic, Digest and Negotiate. +c.set_auth("http://dev.ctor.org/http-access2/", "user", "user") +p c.get("http://dev.ctor.org/http-access2/login") diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/cookie.rb new file mode 100644 index 0000000..606bfd3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/cookie.rb @@ -0,0 +1,18 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +proxy = ENV['HTTP_PROXY'] +clnt = HTTPClient.new(proxy) +clnt.set_cookie_store("cookie.dat") +clnt.debug_dev = STDOUT if $DEBUG + +while urlstr = ARGV.shift + response = clnt.get(urlstr){ |data| + print data + } + p response.contenttype +end + +clnt.save_cookie_store diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/dav.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/dav.rb new file mode 100644 index 0000000..61862cf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/dav.rb @@ -0,0 +1,103 @@ +require 'uri' +require 'httpclient' + +class DAV + attr_reader :headers + + def initialize(uri = nil) + @uri = nil + @headers = {} + open(uri) if uri + proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] || nil + @client = HTTPClient.new(proxy) + end + + def open(uri) + @uri = if uri.is_a?(URI) + uri + else + URI.parse(uri) + end + end + + def set_basic_auth(user_id, passwd) + @client.set_basic_auth(@uri, user_id, passwd) + end + + # TODO: propget/propset support + + def propfind(target) + target_uri = @uri + target + res = @client.propfind(target_uri) + res.body.content + end + + def get(target, local = nil) + local ||= target + target_uri = @uri + target + if FileTest.exist?(local) + raise RuntimeError.new("File #{ local } exists.") + end + f = File.open(local, "wb") + res = @client.get(target_uri, nil, @headers) do |data| + f << data + end + f.close + STDOUT.puts("#{ res.header['content-length'][0] } bytes saved to file #{ target }.") + end + + def debug_dev=(dev) + @client.debug_dev = dev + end + + def get_content(target) + target_uri = @uri + target + @client.get_content(target_uri, nil, @headers) + end + + def put_content(target, content) + target_uri = @uri + target + res = @client.put(target_uri, content, @headers) + if res.status < 200 or res.status >= 300 + raise "HTTP PUT failed: #{res.inspect}" + end + end + + class Mock + attr_reader :headers + + def initialize(uri = nil) + @uri = nil + @headers = {} + open(uri) if uri + + @cache = {} + end + + def open(uri) + @uri = uri.is_a?(URI) ? uri : URI.parse(uri) + end + + def set_basic_auth(user_id, passwd) + # ignore + end + + def propfind(target) + # not found + nil + end + + def get(target, local = nil) + # ignore + end + + def get_content(target) + @cache[target] + end + + def put_content(target, content) + @cache[target] = content + nil + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/howto.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/howto.rb new file mode 100644 index 0000000..c5e24ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/howto.rb @@ -0,0 +1,49 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +proxy = ENV['HTTP_PROXY'] +clnt = HTTPClient.new(proxy) +clnt.set_cookie_store("cookie.dat") +target = ARGV.shift || "http://localhost/foo.cgi" + +puts +puts '= GET content directly' +puts clnt.get_content(target) + +puts '= GET result object' +result = clnt.get(target) +puts '== Header object' +p result.header +puts "== Content-type" +p result.contenttype +puts '== Body object' +p result.body +puts '== Content' +print result.content +puts '== GET with Block' +clnt.get(target) do |str| + puts str +end + +puts +puts '= GET with query' +puts clnt.get(target, { "foo" => "bar", "baz" => "quz" }).content + +puts +puts '= GET with query 2' +puts clnt.get(target, [["foo", "bar1"], ["foo", "bar2"]]).content + +clnt.debug_dev = STDERR +puts +puts '= GET with extra header' +puts clnt.get(target, nil, { "SOAPAction" => "HelloWorld" }).content + +puts +puts '= GET with extra header 2' +puts clnt.get(target, nil, [["Accept", "text/plain"], ["Accept", "text/html"]]).content + +clnt.debug_dev = nil + +clnt.save_cookie_store diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/jsonclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/jsonclient.rb new file mode 100644 index 0000000..4a12f3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/jsonclient.rb @@ -0,0 +1,67 @@ +require 'httpclient' +require 'json' + +module HTTP + class Message + # Returns JSON object of message body + alias original_content content + def content + if JSONClient::CONTENT_TYPE_JSON_REGEX =~ content_type + JSON.parse(original_content) + else + original_content + end + end + end +end + + +# JSONClient provides JSON related methods in addition to HTTPClient. +class JSONClient < HTTPClient + CONTENT_TYPE_JSON_REGEX = /(application|text)\/(x-)?json/i + + attr_accessor :content_type_json + + class JSONRequestHeaderFilter + attr_accessor :replace + + def initialize(client) + @client = client + @replace = false + end + + def filter_request(req) + req.header['content-type'] = @client.content_type_json if @replace + end + + def filter_response(req, res) + @replace = false + end + end + + def initialize(*args) + super + @header_filter = JSONRequestHeaderFilter.new(self) + @request_filter << @header_filter + @content_type_json = 'application/json; charset=utf-8' + end + + def post(uri, *args, &block) + @header_filter.replace = true + request(:post, uri, jsonify(argument_to_hash(args, :body, :header, :follow_redirect)), &block) + end + + def put(uri, *args, &block) + @header_filter.replace = true + request(:put, uri, jsonify(argument_to_hash(args, :body, :header)), &block) + end + +private + + def jsonify(hash) + if hash[:body] && hash[:body].is_a?(Hash) + hash[:body] = JSON.generate(hash[:body]) + end + hash + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb new file mode 100644 index 0000000..15205e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_buzz.rb @@ -0,0 +1,57 @@ +require 'oauthclient' +require 'zlib' +require 'stringio' + +# Get your own consumer token from http://code.google.com/apis/accounts/docs/RegistrationForWebAppsAuto.html +consumer_key = nil +consumer_secret = nil + +callback = 'http://localhost/' # should point somewhere else... +scope = 'https://www.googleapis.com/auth/buzz' +request_token_url = 'https://www.google.com/accounts/OAuthGetRequestToken' +access_token_url = 'https://www.google.com/accounts/OAuthGetAccessToken' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # Twitter does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url, callback, :scope => scope) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: https://www.google.com/buzz/api/auth/OAuthAuthorizeToken?oauth_token=#{token}&domain=#{consumer_key}&scope=#{scope}" +puts "Type oauth_verifier (if given) and hit [enter] to go" +require 'cgi' +verifier = CGI.unescape(gets.chomp) +verifier = nil if verifier.empty? + +# Get access token. +res = client.get_access_token(access_token_url, token, secret, verifier) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +id = res.oauth_params['user_id'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. +# @consumption requires Buzz API +puts client.get_content("https://www.googleapis.com/buzz/v1/activities/@me/@consumption", :alt => :json, :prettyprint => true) diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb new file mode 100644 index 0000000..02d3496 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_friendfeed.rb @@ -0,0 +1,59 @@ +require 'oauthclient' + +# Get your own consumer token from http://friendfeed.com/api/applications +consumer_key = 'EDIT HERE' +consumer_secret = 'EDIT HERE' + +request_token_url = 'https://friendfeed.com/account/oauth/request_token' +oob_authorize_url = 'https://friendfeed.com/account/oauth/authorize' +access_token_url = 'https://friendfeed.com/account/oauth/access_token' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # FriendFeed does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: #{oob_authorize_url}?oauth_token=#{token}" +puts "Hit [enter] to go" +gets + +# Get access token. +# FYI: You may need to re-construct OAuthClient instance here. +# In normal web app flow, getting access token and getting request token +# must be done in different HTTP requests. +# client = OAuthClient.new +# client.oauth_config.consumer_key = consumer_key +# client.oauth_config.consumer_secret = consumer_secret +# client.oauth_config.signature_method = 'HMAC-SHA1' +# client.oauth_config.http_method = :get # Twitter does not allow :post +res = client.get_access_token(access_token_url, token, secret) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +username = res.oauth_params['username'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. (user profile) +puts client.get("http://friendfeed-api.com/v2/feedinfo/#{username}?format=json") diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb new file mode 100644 index 0000000..731645b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/oauth_twitter.rb @@ -0,0 +1,61 @@ +require 'oauthclient' + +# Get your own consumer token from http://twitter.com/apps +consumer_key = 'EDIT HERE' +consumer_secret = 'EDIT HERE' + +callback = ARGV.shift # can be nil for OAuth 1.0. (not 1.0a) +request_token_url = 'https://api.twitter.com/oauth/request_token' +oob_authorize_url = 'https://api.twitter.com/oauth/authorize' +access_token_url = 'https://api.twitter.com/oauth/access_token' + +STDOUT.sync = true + +# create OAuth client. +client = OAuthClient.new +client.oauth_config.consumer_key = consumer_key +client.oauth_config.consumer_secret = consumer_secret +client.oauth_config.signature_method = 'HMAC-SHA1' +client.oauth_config.http_method = :get # Twitter does not allow :post +client.debug_dev = STDERR if $DEBUG + +# Get request token. +res = client.get_request_token(request_token_url, callback) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +token = res.oauth_params['oauth_token'] +secret = res.oauth_params['oauth_token_secret'] +raise if token.nil? or secret.nil? + +# You need to confirm authorization out of band. +puts +puts "Go here and do confirm: #{oob_authorize_url}?oauth_token=#{token}" +puts "Type oauth_verifier/PIN (if given) and hit [enter] to go" +verifier = gets.chomp +verifier = nil if verifier.empty? + +# Get access token. +# FYI: You may need to re-construct OAuthClient instance here. +# In normal web app flow, getting access token and getting request token +# must be done in different HTTP requests. +# client = OAuthClient.new +# client.oauth_config.consumer_key = consumer_key +# client.oauth_config.consumer_secret = consumer_secret +# client.oauth_config.signature_method = 'HMAC-SHA1' +# client.oauth_config.http_method = :get # Twitter does not allow :post +res = client.get_access_token(access_token_url, token, secret, verifier) +p res.status +p res.oauth_params +p res.content +p client.oauth_config +id = res.oauth_params['user_id'] + +puts +puts "Access token usage example" +puts "Hit [enter] to go" +gets + +# Access to a protected resource. (DM) +puts client.get("https://api.twitter.com/1.1/direct_messages.json") diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem new file mode 100644 index 0000000..9dfce02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIBADANBgkqhkiG9w0BAQUFADAtMQswCQYDVQQGEwJDWjEN +MAsGA1UEChMEUnVieTEPMA0GA1UEAxMGUnVieUNBMB4XDTAzMDUzMTAyNDcyOFoX +DTA1MDUzMDAyNDcyOFowLTELMAkGA1UEBhMCQ1oxDTALBgNVBAoTBFJ1YnkxDzAN +BgNVBAMTBlJ1YnlDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANfw +JSR2OxME6WBRYekhvN1M7uZwJLC3qHhRtBm583x7MS3yzF/HwFNH1oAmOmzUcDSz +Y9OmTDVIMU9b0bSYHuu8KswrRWmx/iEhgU/hODS1MQKi+uHoMTtY/dVXwTLAfw5d +UMPQ9F5EhiV57sKWrS7vIVBtpxUNgfJW61FP+Ru3Lr8uhUgXPECHck8fjFu8w2Sw +JQBQ8ePrfKBiTHpOzKmDVXXWzVYzIQN0zQfpu/FSjOJ4xWV3wmfltN4FK+UkpALW +3RKsNFx+Pmji0fr2/PeEPhzKkhWk5b+pYrIlTNkagS7u8EoeLtY1y3LSZvopbcPI +l0QFQHCMtxS7ngC6wsECAwEAAaOBuTCBtjAPBgNVHRMECDAGAQH/AgEAMC0GCWCG +SAGG+EIBDQQgFh5HZW5lcmF0ZWQgYnkgT3BlblNTTCBmb3IgUnVieS4wHQYDVR0O +BBYEFA2IpXrgDnpJ9p6bfBmtM6j0IejmMFUGA1UdIwROMEyAFA2IpXrgDnpJ9p6b +fBmtM6j0IejmoTGkLzAtMQswCQYDVQQGEwJDWjENMAsGA1UEChMEUnVieTEPMA0G +A1UEAxMGUnVieUNBggEAMA0GCSqGSIb3DQEBBQUAA4IBAQB1b4iTezqgZj6FhcAc +0AtleAc8TpUn8YOX6zSlHG6I7tKcLfnWen9eFs4Jx73Bup5wHvcrHF6U+/nAdpb5 +R7lkNbjWFWwoi5cQC36mh4ym8GWhUZUf8362yPzimbdKMBIiDoIBY8NyIBBORU2m +BlXsHr0dqGpeTmNnFhgi1FCN+F/VitplOUP9E8NzsgLS/GY9HO160HbPXoZc4yp5 +KFweqmyhbxZvkrrD6LanvB23cJP+TeHzA5mLJtIBciFq+AJe+cDTlJYjtLcsxkAy +Evojd//5gePjgSz3gbzU5Zq0/tI2T0rQYnmySoSQ1u2+i5FrMvEjh/H6tZtQ6nO0 +ROTL +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0key.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0key.pem new file mode 100644 index 0000000..71b7fe6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/0key.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,BC9A85421E11A052 + +dlFN8woboqu1mLWHSe6HnBCzrJUFw1NLv4sJM8MFPcSGkPEJoqceRQ5XFFlwnhwT +WStidVQLAaCqWxPlQbvKs/eW2yEZRX3JhNHlGkuZUbAuqC7FGcIPCFtXmshmR3/K +wSxM3DuEBA31kHoyH/DGPEOcp/L9M95fU2w/1Zz895nXgECdlYfMB7KEXJ7S0TVD +NvcH6dcNS2I56ikUfRd9MA+tTtJTDsolBii3HpNkI+i8GeAAznfuyhCa+RasbMnR +3SRHSgiZpK01JkepKEumG1iyGXBE4GMlF3dK9TL4UTfK7PjbW1BRGVdmGayA5L8X +vpn9uIMbpFUD7nU/cSs6+ECYy9Fdq/d8oE7YRVpqGA2mX+Vvjzc6ZByiW8eH8uYW +d1YCySAzttQUcDFx4YlpbQN+NSE67amGY/nkXn3CnQuB2uDiUm6818p1jjhEbIj6 +SQpNgpuhHPhyawMuC98BvxEXoSTFRmXx5h+Qgo8nfCBUpGUCA8STuJG+EebIXMHH +yNdZGYMdkkuzow1G+bcHaIV4pgD0hOX6D+AzhO8/11gsJNiZHWAkxRtXres0B7JQ +BTv6mrpmRsr1TbY7K55y5QePkvroiqI4BX2//RgAcUw+aBzPCSt87YU55pPxxDCv +KnoMQJapUIlXgYBs+2GBiFQlr3dAnzduXrXkZa/TuE0853QDDnKWN3aWapW0EieH +sDxYz6kZ6c/vjDJtNjjgysKvi2ZFnJMk92fi1sNd2MrH9w1vSmjHw6+b9REH+r6K +YCcMzCUnIV5Y5jgbnrY5jWlB5Jt5PlU+QDFTBNsdctyoES3h5yQh48hcpnJOy4YT +tn9jEmIAYM7QZtGZrY5yiZRZbM5tLL7VeXA0M7N0ivjZUVP4NBUV1IFhLpeus3Yo +yXB99Sib/M8fqlmnRlyKKaRseB9a80/LJdLJC7Q1aJG9JYTTpumydppDzvwwUFV/ +yHQibPzWhnHTElyXSGeWWQ/4gKJXJFkSyrBeKt/DcDggEnpsVw7kAeo0KJLWvBq2 +0CwnWxPsseIYSLdwE0wOZTnWvHC6viCCqxaFwROa+bHsgCtRR8xwOKVg0RQRexQi +SKsCEyq4hfKH3Kd7KkI5c4iCHrNDxQiYVAHGo0kuVWPNP9PH8XrRuP7f2Aq6ObEU +cGLN+OKlzChKJQZZzdthGTjO52cZERokS7etLs5yPNM27CUZIy2gUsWwANG700ov +DYi4S9j4y2fK9qVFGuNmZ7nNQ6juHIN8ZpZObmzGz/GEsVy8zYGV7jH2bC8fhCg1 +wiTn0CHyfI0AfJ5zQMQ48z/ATI5+/pP5DuJ4kPLYU90EdIlIQ/9zQDRxVPvUalas +kskX8qdsbILaLqKzqvPa6jkhncV7SVQPhxIrs6vU2RWrsU1Qw9wHhzaITu9yb1l1 +s8uialiJWnWOwkhIAUULOoGRnBB7U6sseFh7LcYHc2sZYJzQQO3g14ANvtsu/ILT +abUPXtGHSUlI4kUjI82NEkyQz/pDD9VkhTefiFHNymIkUaZBulmR5QdcEcQn6bku +J/P3M/T7CtsCPdBUrzDI71eAYqXPjikg+8unKWk9c/p+V7wXtvsdgJf3+xht/YUQ +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem new file mode 100644 index 0000000..d7eeea6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwLTELMAkGA1UEBhMCQ1ox +DTALBgNVBAoTBFJ1YnkxDzANBgNVBAMTBlJ1YnlDQTAeFw0wMzA1MzEwMzUwNDFa +Fw0wNDA1MzAwMzUwNDFaMDAxCzAJBgNVBAYTAkNaMQ0wCwYDVQQKEwRSdWJ5MRIw +EAYDVQQDEwlsb2NhbGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALhs +fh4i1c+K57vFG7SUDfdxuSlbPUqaV0sbiuvWb0f7B2T7bHicaIRsDYW7PQRLLwwR +Pd+aKg3KuwhWN47faRam19Z3yWCD7Tg+BhXDqlXnz6snnr4APpAxc22kJKjzuil6 +sp+QTkl/EFKI3+ocDur1UB+kSOmTzsDmepaWUZwTAgMBAAGjgbMwgbAwCQYDVR0T +BAIwADAtBglghkgBhvhCAQ0EIBYeR2VuZXJhdGVkIGJ5IE9wZW5TU0wgZm9yIFJ1 +YnkuMB0GA1UdDgQWBBQlYESgTYdkTOYy02+/jGSqa+OpzjBVBgNVHSMETjBMgBQN +iKV64A56Sfaem3wZrTOo9CHo5qExpC8wLTELMAkGA1UEBhMCQ1oxDTALBgNVBAoT +BFJ1YnkxDzANBgNVBAMTBlJ1YnlDQYIBADANBgkqhkiG9w0BAQUFAAOCAQEAJh9v +ehhUv69oilVWGvGB6xCr8LgErnO9QdAyqJE2xBhbNaB3crjWDdQTz4UNvCQoJG/4 +Oa9Vp10vM8E0ZMVHer87WM9tPEOg09r38U/1c7gSYBkPSGQfeWtZNjQ1YOm6RDx4 +JJp9sp9v/CdBlVXaBQQd+MQFny7E+EkMHRfiv89KTfOs0wYdirLrM1C90CZUEj0i +cMcBdHzH5XcNpWB1ag4cNiUn2ljsaWlUpEg48gLe2FLJVPBio+iZnOm/C6KIwBMO +BCVxkZ6oIR87JT4xbr8SxRS9d/irhVU9MtGYwMe4MPSztefASdmEyj59ZFCLKQHV ++ltGb7/b7DetoT1spA== +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem new file mode 100644 index 0000000..34261ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/1000key.pem @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,5E9A9AC8F0F62A4A + +3/VDlc8E13xKZr+MXt+HVZCiuWN1gsVx1bTZE5fN5FVoJ45Bgy2zAegnqZiP1NNy +j/76Vy9Ru/G9HPh5QPbIPl+md+tOyJV2M23a+5jESk1tLuWt5lRqmTtHN000844l +uCgZPPhRV7nhYPC5f6Gqw/xDl/EZsElqJM/hl2JbqiVKfT5/1i0STU8LNkoaLWrJ +kQhc7hOR5ihmDPeD8mA99bmGD+UyyqzzLTtKvRbBObyi9dy7cQ5Q+iptQWTUuEKI ++W7b8f8/Iiin4JJZGpFuhQSx0ARjT0fuYNXddmz1L3Gu3sjzN1GvT2T3GlpiF4/7 +ERS8Q43zjoCP8nC2MTSvdNRRoPMBg2SDS2ZIq4GSiKsKjeN+GnPCycAMeZSr5yG6 +VMBJLoAJ7XIcl3se8gF6hH1AfhCzDaK/2pKLP9jH/W4g6VvUBazKEQCNbZXSTpQ4 +8EfvJBPpplFs3Zid6iwC/WjKhFBuBBfycwNJjXG9x1fMPkBM8HeiZXgrXoUXJMEP +RF05Beo0HPPEOPIxcG6EVmnpDvs8uC+xIQ6UE6DrLGK5TnR6kdz3BDpeAehujSss +wfZiOvuJQZQl+oovOH54pcwAwhicgRcNdIX47kHrXNL1vQMYTXte+ZzDGyoOXd0W +qf1CZbrjULT9nfJFWMMicTnLM/6iQx+3bTkXXvk0qP0qAoIPqtY4rwt6yHgq937A +LubDxudMWV0hqcnH8cBCPHfWdE4HELw4RcVXmQH43yHs1gwShyG9rTS+PCKoRr8u +bpssW6J0xJmilf1KprbNWJyof9i0CtSVOlUt6ttoinwqj8Me01dHqQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html new file mode 100644 index 0000000..6e871d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/htdocs/index.html @@ -0,0 +1,10 @@ + + + SSL test + + +

+ Verification succeeded? +

+ + diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb new file mode 100644 index 0000000..0c47520 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/ssl_client.rb @@ -0,0 +1,22 @@ +#!/usr/bin/env ruby + +$:.unshift(File.join('..', '..', 'lib')) +require 'httpclient' + +url = ARGV.shift || 'https://localhost:8808/' +uri = URI.parse(url) + +#ca_file = "0cert.pem" +#crl_file = '0crl.pem' + +# create CA's cert in pem format and run 'c_rehash' in trust_certs dir. before +# using this. +ca_path = File.join(File.dirname(File.expand_path(__FILE__)), "trust_certs") + +proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] || nil +h = HTTPClient.new(proxy) +#h.ssl_config.add_trust_ca(ca_file) +#h.ssl_config.add_crl(crl_file) +h.ssl_config.add_trust_ca(ca_path) + +print h.get_content(url) diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb new file mode 100644 index 0000000..1994f58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/ssl/webrick_httpsd.rb @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby + +require 'webrick/https' +require 'getopts' + +getopts nil, 'r:', 'p:8808' + +dir = File::dirname(File::expand_path(__FILE__)) + +# Pass phrase of '1000key.pem' is '1000'. +data = open(File::join(dir, "1000key.pem")){|io| io.read } +pkey = OpenSSL::PKey::RSA.new(data) +data = open(File::join(dir, "1000cert.pem")){|io| io.read } +cert = OpenSSL::X509::Certificate.new(data) + +s = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Port => $OPT_p.to_i, + :Logger => nil, + :DocumentRoot => $OPT_r || File::join(dir, "/htdocs"), + :SSLEnable => true, + :SSLVerifyClient => ::OpenSSL::SSL::VERIFY_NONE, + :SSLCertificate => cert, + :SSLPrivateKey => pkey, + :SSLCertName => nil, + :SSLCACertificateFile => "all.pem" +) +trap("INT"){ s.shutdown } +s.start diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/stream.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/stream.rb new file mode 100644 index 0000000..3d1478b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/stream.rb @@ -0,0 +1,21 @@ +$:.unshift(File.join('..', 'lib')) +require "httpclient" + +c = HTTPClient.new + +piper, pipew = IO.pipe +conn = c.post_async("http://localhost:8080/stream", piper) + +Thread.new do + res = conn.pop + while str = res.content.read(10) + p str + end +end + +p "type here" +while line = STDIN.gets + pipew << line +end +pipew.close +sleep 5 diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/thread.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/thread.rb new file mode 100644 index 0000000..594cc97 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/thread.rb @@ -0,0 +1,27 @@ +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +urlstr = ARGV.shift + +proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] +h = HTTPClient.new(proxy) + +count = 20 + +res = [] +g = [] +for i in 0..count + g << Thread.new { + res[i] = h.get(urlstr) + } +end + +g.each do |th| + th.join +end + +for i in 0..(count - 1) + raise unless (res[i].content == res[i + 1].content) +end + +puts 'ok' diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/wcat.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/wcat.rb new file mode 100644 index 0000000..0781b6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/sample/wcat.rb @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby + +# wcat for http-access2 +# Copyright (C) 2001 TAKAHASHI Masayoshi + +$:.unshift(File.join('..', 'lib')) +require 'httpclient' + +if ENV['HTTP_PROXY'] + h = HTTPClient.new(ENV['HTTP_PROXY']) +else + h = HTTPClient.new() +end + +while urlstr = ARGV.shift + response = h.get(urlstr){ |data| + print data + } + p response.contenttype + p response.peer_cert if /^https/i =~ urlstr +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca-chain.pem b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca-chain.pem new file mode 100644 index 0000000..c642a41 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca-chain.pem @@ -0,0 +1,44 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIBADANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDIzMloXDTM2MDEyMjAwNDIzMlowPDELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQswCQYDVQQDDAJDQTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANbv0x42BTKFEQOE+KJ2XmiSdZpR +wjzQLAkPLRnLB98tlzs4xo+y4RyY/rd5TT9UzBJTIhP8CJi5GbS1oXEerQXB3P0d +L5oSSMwGGyuIzgZe5+vZ1kgzQxMEKMMKlzA73rbMd4Jx3u5+jdbP0EDrPYfXSvLY +bS04n2aX7zrN3x5KdDrNBfwBio2/qeaaj4+9OxnwRvYP3WOvqdW0h329eMfHw0pi +JI0drIVdsEqClUV4pebT/F+CPUPkEh/weySgo9wANockkYu5ujw2GbLFcO5LXxxm +dEfcVr3r6t6zOA4bJwL0W/e6LBcrwiG/qPDFErhwtgTLYf6Er67SzLyA66UCAwEA +AaOB3DCB2TAPBgNVHRMBAf8EBTADAQH/MDEGCWCGSAGG+EIBDQQkFiJSdWJ5L09w +ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBRJ7Xd380KzBV7f +USKIQ+O/vKbhDzAOBgNVHQ8BAf8EBAMCAQYwZAYDVR0jBF0wW4AUSe13d/NCswVe +31EiiEPjv7ym4Q+hQKQ+MDwxCzAJBgNVBAYMAkpQMRIwEAYDVQQKDAlKSU4uR1Iu +SlAxDDAKBgNVBAsMA1JSUjELMAkGA1UEAwwCQ0GCAQAwDQYJKoZIhvcNAQEFBQAD +ggEBAIu/mfiez5XN5tn2jScgShPgHEFJBR0BTJBZF6xCk0jyqNx/g9HMj2ELCuK+ +r/Y7KFW5c5M3AQ+xWW0ZSc4kvzyTcV7yTVIwj2jZ9ddYMN3nupZFgBK1GB4Y05GY +MJJFRkSu6d/Ph5ypzBVw2YMT/nsOo5VwMUGLgS7YVjU+u/HNWz80J3oO17mNZllj +PvORJcnjwlroDnS58KoJ7GDgejv3ESWADvX1OHLE4cRkiQGeLoEU4pxdCxXRqX0U +PbwIkZN9mXVcrmPHq8MWi4eC/V7hnbZETMHuWhUoiNdOEfsAXr3iP4KjyyRdwc7a +d/xgcK06UVQRL/HbEYGiQL056mc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDaDCCAlCgAwIBAgIBATANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDMyN1oXDTM1MDEyMjAwNDMyN1owPzELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQ4wDAYDVQQDDAVTdWJDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0Ou7AyRcRXnB/kVHv/6kwe +ANzgg/DyJfsAUqW90m7Lu1nqyug8gK0RBd77yU0w5HOAMHTVSdpjZK0g2sgx4Mb1 +d/213eL9TTl5MRVEChTvQr8q5DVG/8fxPPE7fMI8eOAzd98/NOAChk+80r4Sx7fC +kGVEE1bKwY1MrUsUNjOY2d6t3M4HHV3HX1V8ShuKfsHxgCmLzdI8U+5CnQedFgkm +3e+8tr8IX5RR1wA1Ifw9VadF7OdI/bGMzog/Q8XCLf+WPFjnK7Gcx6JFtzF6Gi4x +4dp1Xl45JYiVvi9zQ132wu8A1pDHhiNgQviyzbP+UjcB/tsOpzBQF8abYzgEkWEC +AwEAAaNyMHAwDwYDVR0TAQH/BAUwAwEB/zAxBglghkgBhvhCAQ0EJBYiUnVieS9P +cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUlCjXWLsReYzH +LzsxwVnCXmKoB/owCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCJ/OyN +rT8Cq2Y+G2yA/L1EMRvvxwFBqxavqaqHl/6rwsIBFlB3zbqGA/0oec6MAVnYynq4 +c4AcHTjx3bQ/S4r2sNTZq0DH4SYbQzIobx/YW8PjQUJt8KQdKMcwwi7arHP7A/Ha +LKu8eIC2nsUBnP4NhkYSGhbmpJK+PFD0FVtD0ZIRlY/wsnaZNjWWcnWF1/FNuQ4H +ySjIblqVQkPuzebv3Ror6ZnVDukn96Mg7kP4u6zgxOeqlJGRe1M949SS9Vudjl8X +SF4aZUUB9pQGhsqQJVqaz2OlhGOp9D0q54xko/rekjAIcuDjl1mdX4F2WRrzpUmZ +uY/bPeOBYiVsOYVe +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca.cert b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca.cert new file mode 100644 index 0000000..bcabbee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/ca.cert @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIBADANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDIzMloXDTM2MDEyMjAwNDIzMlowPDELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQswCQYDVQQDDAJDQTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANbv0x42BTKFEQOE+KJ2XmiSdZpR +wjzQLAkPLRnLB98tlzs4xo+y4RyY/rd5TT9UzBJTIhP8CJi5GbS1oXEerQXB3P0d +L5oSSMwGGyuIzgZe5+vZ1kgzQxMEKMMKlzA73rbMd4Jx3u5+jdbP0EDrPYfXSvLY +bS04n2aX7zrN3x5KdDrNBfwBio2/qeaaj4+9OxnwRvYP3WOvqdW0h329eMfHw0pi +JI0drIVdsEqClUV4pebT/F+CPUPkEh/weySgo9wANockkYu5ujw2GbLFcO5LXxxm +dEfcVr3r6t6zOA4bJwL0W/e6LBcrwiG/qPDFErhwtgTLYf6Er67SzLyA66UCAwEA +AaOB3DCB2TAPBgNVHRMBAf8EBTADAQH/MDEGCWCGSAGG+EIBDQQkFiJSdWJ5L09w +ZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBRJ7Xd380KzBV7f +USKIQ+O/vKbhDzAOBgNVHQ8BAf8EBAMCAQYwZAYDVR0jBF0wW4AUSe13d/NCswVe +31EiiEPjv7ym4Q+hQKQ+MDwxCzAJBgNVBAYMAkpQMRIwEAYDVQQKDAlKSU4uR1Iu +SlAxDDAKBgNVBAsMA1JSUjELMAkGA1UEAwwCQ0GCAQAwDQYJKoZIhvcNAQEFBQAD +ggEBAIu/mfiez5XN5tn2jScgShPgHEFJBR0BTJBZF6xCk0jyqNx/g9HMj2ELCuK+ +r/Y7KFW5c5M3AQ+xWW0ZSc4kvzyTcV7yTVIwj2jZ9ddYMN3nupZFgBK1GB4Y05GY +MJJFRkSu6d/Ph5ypzBVw2YMT/nsOo5VwMUGLgS7YVjU+u/HNWz80J3oO17mNZllj +PvORJcnjwlroDnS58KoJ7GDgejv3ESWADvX1OHLE4cRkiQGeLoEU4pxdCxXRqX0U +PbwIkZN9mXVcrmPHq8MWi4eC/V7hnbZETMHuWhUoiNdOEfsAXr3iP4KjyyRdwc7a +d/xgcK06UVQRL/HbEYGiQL056mc= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client-pass.key b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client-pass.key new file mode 100644 index 0000000..c332d2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client-pass.key @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,DE0F454B166A4941 + +Kub+uiaDkZAmUP2P1VKKB1tPcoJ/ZSs5sLckVv156XDfH+6OilEh+E4vXuKkJnW7 +KFVM/nKrKPxLtNmKha0yx2bqZeUfUdpwq1GqTve84v/oJDTOhBXPlKlkMvzzVhdC +IeJ61BgSt4ZVWSAcorae8yvDtUCtVoc0YonuiEno5bjEOWMuOu9iwviDIO+IePdY +mgIPkEyPQOY6/Ir3ImLdqmpPfVPnNxx5fIw9VXDfTqWfY3qHnGECx17ko4PCxhkN +IwnXU8E6r6XRpHV58t7JkM88eD0crpQpZ8Ki1zVPtHq8DfQLwQI+FGt6PBmeneVl +Dne6UPIaEDpd9f5X+Q7+2jZCBOsGntNh4+E7AwnG+G4IpleUG308DWsXZZpYhfLy +12WMzDlsaQ68qgO1a7rD+nOpIgUfIl7bdB242g7gWvXyVzZOGJIg/P3Fl6ydR7Al +afAQFH2L1YH7u9zJLIonMmVRz7VNUHwlVaPE18VGBbzwFOmZHj2THUUB3cOGfsC8 +FgQz0JVZT5t7fAS53KRXhH/mWEimcrKSvZJxOBwoknQDtHS517wMhyUco63UYEQq +2nkW6BD08Qc92xu14hWuWrActTtsJ3wyGSPMYbqo5QRvlnpaEzaQlMRXdBHYbSFJ +D5Eo2nXXqNPX7YbyIHh+cda80r9OwmH/gvXThQd79pMvNHPZ2TWnrlZF7YAdVxHH +etLrAVas2AxXs2LdhwFTI6dmxMv92gYz/WwMeZaNV7SJ4JIKHxGCmajv12cnGVh9 +qCxMIFcpISr3EMwEAnF0npfQ6Xp6rKFUXuEml036vE8= +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.cert b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.cert new file mode 100644 index 0000000..ad13c4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKDCCAhCgAwIBAgIBAjANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMTAzMTQ1OFoXDTM1MDEyMzAzMTQ1OFowZTELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMRAwDgYDVQQDDAdleGFtcGxl +MSIwIAYJKoZIhvcNAQkBDBNleGFtcGxlQGV4YW1wbGUub3JnMIGfMA0GCSqGSIb3 +DQEBAQUAA4GNADCBiQKBgQDRWssrK8Gyr+500hpLjCGR3+AHL8/hEJM5zKi/MgLW +jTkvsgOwbYwXOiNtAbR9y4/ucDq7EY+cMUMHES4uFaPTcOaAV0aZRmk8AgslN1tQ +gNS6ew7/Luq3DcVeWkX8PYgR9VG0mD1MPfJ6+IFA5d3vKpdBkBgN4l46jjO0/2Xf +ewIDAQABo4GPMIGMMAwGA1UdEwEB/wQCMAAwMQYJYIZIAYb4QgENBCQWIlJ1Ynkv +T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFOFvay0H7lr2 +xUx6waYEV2bVDYQhMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAgYI +KwYBBQUHAwQwDQYJKoZIhvcNAQEFBQADggEBABd2dYWqbDIWf5sWFvslezxJv8gI +w64KCJBuyJAiDuf+oazr3016kMzAlt97KecLZDusGNagPrq02UX7YMoQFsWJBans +cDtHrkM0al5r6/WGexNMgtYbNTYzt/IwodISGBgZ6dsOuhznwms+IBsTNDAvWeLP +lt2tOqD8kEmjwMgn0GDRuKjs4EoboA3kMULb1p9akDV9ZESU3eOtpS5/G5J5msLI +9WXbYBjcjvkLuJH9VsJhb+R58Vl0ViemvAHhPilSl1SPWVunGhv6FcIkdBEi1k9F +e8BNMmsEjFiANiIRvpdLRbiGBt0KrKTndVfsmoKCvY48oCOvnzxtahFxfs8= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.key b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.key new file mode 100644 index 0000000..37bc62f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/client.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQDRWssrK8Gyr+500hpLjCGR3+AHL8/hEJM5zKi/MgLWjTkvsgOw +bYwXOiNtAbR9y4/ucDq7EY+cMUMHES4uFaPTcOaAV0aZRmk8AgslN1tQgNS6ew7/ +Luq3DcVeWkX8PYgR9VG0mD1MPfJ6+IFA5d3vKpdBkBgN4l46jjO0/2XfewIDAQAB +AoGAZcz8llWErtsV3QB9gNb3S/PNADGjqBFjReva8n3jG2k4sZSibpwWTwUaTNtT +ZQgjSRKRvH1hk9XwffNAvXAQZNNkuj/16gO2oO45nyLj4dO365ujLptWnVIWDHOE +uN0GeiZO+VzcCisT0WCq4tvtLeH8svrxzA8cbXIEyOK7NiECQQDwo2zPFyKAZ/Cu +lDJ6zKT+RjfWwW7DgWzirAlTrt4ViMaW+IaDH29TmQpb4V4NuR3Xi+2Xl4oicu6S +36TW9+/FAkEA3rgfOQJuLlWSnw1RTGwvnC816a/W7iYYY7B+0U4cDbfWl7IoXT4y +M8nV/HESooviZLqBwzAYSoj3fFKYBKpGPwJAUO8GN5iWWA2dW3ooiDiv/X1sZmRk +dojfMFWgRW747tEzya8Ivq0h6kH8w+5GjeMG8Gn1nRiwsulo6Ckj7dEx6QJACyui +7UIQ8qP6GZ4aYMHgVW4Mvy7Bkeo5OO7GPYs0Xv/EdJFL8vlGnVBXOjUVoS9w6Gpu +TbLg1QQvnX2rADjmEwJANxZO2GUkaWGsEif8aGW0x5g/IdaMGG27pTWk5zqix7P3 +1UDrdo/JOXhptovhRi06EppIxAxYmbh9vd9VN8Itlw== +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/helper.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/helper.rb new file mode 100644 index 0000000..26bc4f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/helper.rb @@ -0,0 +1,131 @@ +# -*- encoding: utf-8 -*- +begin + require 'simplecov' + require 'simplecov-rcov' + SimpleCov.formatter = SimpleCov::Formatter::RcovFormatter + SimpleCov.start +rescue LoadError +end +require 'test/unit' + +require 'httpclient' +require 'webrick' +require 'webrick/httpproxy.rb' +require 'logger' +require 'stringio' +require 'cgi' +require 'webrick/httputils' + + +module Helper + Port = 17171 + ProxyPort = 17172 + + def serverport + @serverport + end + + def proxyport + @proxyport + end + + def serverurl + "http://localhost:#{serverport}/" + end + + def proxyurl + "http://localhost:#{proxyport}/" + end + + def setup + @logger = Logger.new(STDERR) + @logger.level = Logger::Severity::FATAL + @proxyio = StringIO.new + @proxylogger = Logger.new(@proxyio) + @proxylogger.level = Logger::Severity::DEBUG + @server = @proxyserver = @client = nil + @server_thread = @proxyserver_thread = nil + @serverport = Port + @proxyport = ProxyPort + end + + def teardown + teardown_client if @client + teardown_proxyserver if @proxyserver + teardown_server if @server + end + + def setup_client + @client = HTTPClient.new + end + + def escape_noproxy + backup = HTTPClient::NO_PROXY_HOSTS.dup + HTTPClient::NO_PROXY_HOSTS.clear + yield + ensure + HTTPClient::NO_PROXY_HOSTS.replace(backup) + end + + def setup_proxyserver + @proxyserver = WEBrick::HTTPProxyServer.new( + :BindAddress => "localhost", + :Logger => @proxylogger, + :Port => 0, + :AccessLog => [] + ) + @proxyport = @proxyserver.config[:Port] + @proxyserver_thread = start_server_thread(@proxyserver) + end + + def teardown_client + @client.reset_all + end + + def teardown_server + @server.shutdown + #@server_thread.kill + end + + def teardown_proxyserver + @proxyserver.shutdown + #@proxyserver_thread.kill + end + + def start_server_thread(server) + t = Thread.new { + Thread.current.abort_on_exception = true + server.start + } + while server.status != :Running + Thread.pass + unless t.alive? + t.join + raise + end + end + t + end + + def params(str) + HTTP::Message.parse(str).inject({}) { |r, (k, v)| r[k] = v.first; r } + end + + def silent + begin + back, $VERBOSE = $VERBOSE, nil + yield + ensure + $VERBOSE = back + end + end + + def escape_env + env = {} + env.update(ENV) + yield + ensure + ENV.clear + ENV.update(env) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htdigest b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htdigest new file mode 100644 index 0000000..0a125d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htdigest @@ -0,0 +1 @@ +admin:auth:4302fe65caa32f27721949149ccd3083 diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htpasswd b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htpasswd new file mode 100644 index 0000000..70df50c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/htpasswd @@ -0,0 +1,2 @@ +admin:Qg266hq/YYKe2 +guest:gbPc4vPCH.h12 diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb new file mode 100644 index 0000000..560a1c6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/jruby_ssl_socket/test_pemutils.rb @@ -0,0 +1,32 @@ +require File.expand_path('helper', File.join(File.dirname(__FILE__), "..")) + + +class PEMUtilsTest < Test::Unit::TestCase + include Helper + + def setup + @raw_cert = "-----BEGIN CERTIFICATE-----\nMIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjEzNFoXDTE3MDgxMDE3MjEzNFowSzETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEZMBcGA1UEAwwQ\nUnVieSBjZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB\nAJCfsSXpSMpmZCVa+ZCM+QDgomnhDlvnrGDq6pasTaIspGTXgws+7r8Dt/cNe6EH\nHJpRH2cGRiO4yPcfcT9eS4X7k8OC4f33wHfACOmLu6LeoNE8ujmSk6L6WzLUI+sE\nnLZbFrXxoAo4XHsm8vEG9C+jEoXZ1p+47wrAGaDwDQTnzlMy4dT9pRQEJP2G/Rry\nUkuZn8SUWmh3/YS78iaSzsNF1cgE1ealHOrPPFDjiCGDaH/LHyUPYlbFSLZ/B7Qx\nLxi5sePLcywWq/EJrmWpgeVTDjtNijsdKv/A3qkY+fm/oD0pzt7XsfJaP9YKNyJO\nQFdxWZeiPcDF+Hwf+IwSr+kCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgeAMB0GA1Ud\nDgQWBBQNvzYzJyXemGhxbA8NMXLolDnPyjANBgkqhkiG9w0BAQsFAAOCAQEARIJV\noKejGlOTn71QutnNnu07UtTu0IHs6YqjYzzND+m4JXLN+wvYm72AFUG0b1L7dRg0\niK8XjQrlNQNVqP1Mc6tffchy20neOPOHeiO6qTdRU8P2S8D3Uwe+1qhgxjfE+cWc\nwZmWxYK4HA8c58PxWMqrkr2QqXDplG9KWLvOgrtPGiLLZcQSKhvvB63QzItHBDU6\nRayiJY3oPkK/HrIvFlySqFqzWmuyknkciOFywEHQMz/tcSFJ2QFpPj/tBz9VXohH\nZ8KscmfhZrTPBjo+ky1lz/WraWoz4LMiLnkC2ABczWLRSawu+v3Irx1NFJngt05e\npqwtqIUeg7j+JLiTaA==\n-----END CERTIFICATE-----" + end + + def test_read_certificate + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(@raw_cert) + end + end + + def test_read_certificate_works_with_random_ascii_text_outside_begin_end + raw_cert_with_ascii = "some text before begin\n" + @raw_cert + "\nsome text after end" + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(raw_cert_with_ascii) + end + end + + def test_read_certificate_uses_all_content_if_missing_begin_end + cert = @raw_cert.sub(/-----BEGIN CERTIFICATE-----/, '').sub(/-----END CERTIFICATE-----/, '') + assert_nothing_raised do + binary = HTTPClient::JRubySSLSocket::PEMUtils.read_certificate(cert) + end + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/runner.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/runner.rb new file mode 100644 index 0000000..a07c28e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/runner.rb @@ -0,0 +1,2 @@ +require 'test/unit' +exit Test::Unit::AutoRunner.run(true, File.dirname($0)) diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.cert b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.cert new file mode 100644 index 0000000..998ccc5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/zCCAeegAwIBAgIBATANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxDjAMBgNVBAMMBVN1YkNB +MB4XDTA0MDEzMTAzMTMxNloXDTMzMDEyMzAzMTMxNlowQzELMAkGA1UEBgwCSlAx +EjAQBgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMRIwEAYDVQQDDAlsb2Nh +bGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANFJTxWqup3nV9dsJAku +p+WaXnPNIzcpAA3qMGZDJTJsfa8Du7ZxTP0XJK5mETttBrn711cJxAuP3KjqnW9S +vtZ9lY2sXJ6Zj62sN5LwG3VVe25dI28yR1EsbHjJ5Zjf9tmggMC6am52dxuHbt5/ +vHo4ngJuKE/U+eeGRivMn6gFAgMBAAGjgYUwgYIwDAYDVR0TAQH/BAIwADAxBglg +hkgBhvhCAQ0EJBYiUnVieS9PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAd +BgNVHQ4EFgQUpZIyygD9JxFYHHOTEuWOLbCKfckwCwYDVR0PBAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBBQUAA4IBAQBwAIj5SaBHaA5X31IP +CFCJiep96awfp7RANO0cuUj+ZpGoFn9d6FXY0g+Eg5wAkCNIzZU5NHN9xsdOpnUo +zIBbyTfQEPrge1CMWMvL6uGaoEXytq84VTitF/xBTky4KtTn6+es4/e7jrrzeUXQ +RC46gkHObmDT91RkOEGjHLyld2328jo3DIN/VTHIryDeVHDWjY5dENwpwdkhhm60 +DR9IrNBbXWEe9emtguNXeN0iu1ux0lG1Hc6pWGQxMlRKNvGh0yZB9u5EVe38tOV0 +jQaoNyL7qzcQoXD3Dmbi1p0iRmg/+HngISsz8K7k7MBNVsSclztwgCzTZOBiVtkM +rRlQ +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.key b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.key new file mode 100644 index 0000000..9ba2218 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/server.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDRSU8Vqrqd51fXbCQJLqflml5zzSM3KQAN6jBmQyUybH2vA7u2 +cUz9FySuZhE7bQa5+9dXCcQLj9yo6p1vUr7WfZWNrFyemY+trDeS8Bt1VXtuXSNv +MkdRLGx4yeWY3/bZoIDAumpudncbh27ef7x6OJ4CbihP1PnnhkYrzJ+oBQIDAQAB +AoGBAIf4CstW2ltQO7+XYGoex7Hh8s9lTSW/G2vu5Hbr1LTHy3fzAvdq8MvVR12O +rk9fa+lU9vhzPc0NMB0GIDZ9GcHuhW5hD1Wg9OSCbTOkZDoH3CAFqonjh4Qfwv5W +IPAFn9KHukdqGXkwEMdErsUaPTy9A1V/aROVEaAY+HJgq/eZAkEA/BP1QMV04WEZ +Oynzz7/lLizJGGxp2AOvEVtqMoycA/Qk+zdKP8ufE0wbmCE3Qd6GoynavsHb6aGK +gQobb8zDZwJBANSK6MrXlrZTtEaeZuyOB4mAmRzGzOUVkUyULUjEx2GDT93ujAma +qm/2d3E+wXAkNSeRpjUmlQXy/2oSqnGvYbMCQQDRM+cYyEcGPUVpWpnj0shrF/QU +9vSot/X1G775EMTyaw6+BtbyNxVgOIu2J+rqGbn3c+b85XqTXOPL0A2RLYkFAkAm +syhSDtE9X55aoWsCNZY/vi+i4rvaFoQ/WleogVQAeGVpdo7/DK9t9YWoFBIqth0L +mGSYFu9ZhvZkvQNV8eYrAkBJ+rOIaLDsmbrgkeDruH+B/9yrm4McDtQ/rgnOGYnH +LjLpLLOrgUxqpzLWe++EwSLwK2//dHO+SPsQJ4xsyQJy +-----END RSA PRIVATE KEY----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/sslsvr.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/sslsvr.rb new file mode 100644 index 0000000..b1f4614 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/sslsvr.rb @@ -0,0 +1,65 @@ +require 'webrick/https' +require 'logger' +require 'rbconfig' + +PORT = 17171 +DIR = File.dirname(File.expand_path(__FILE__)) + +def cert(filename) + OpenSSL::X509::Certificate.new(File.open(File.join(DIR, filename)) { |f| + f.read + }) +end + +def key(filename) + OpenSSL::PKey::RSA.new(File.open(File.join(DIR, filename)) { |f| + f.read + }) +end + +def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" +end + +logger = Logger.new(STDERR) +logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + +server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => PORT, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key'), + :SSLVerifyClient => nil, #OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT|OpenSSL::SSL::VERIFY_PEER, + :SSLClientCA => cert('ca.cert'), + :SSLCertName => nil +) +trap(:INT) do + server.shutdown +end +[:hello].each do |sym| + server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) +end + +t = Thread.new { + Thread.current.abort_on_exception = true + server.start +} +while server.status != :Running + sleep 0.1 + unless t.alive? + t.join + raise + end +end +STDOUT.sync = true +puts $$ +t.join diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/subca.cert b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/subca.cert new file mode 100644 index 0000000..1e47185 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/subca.cert @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaDCCAlCgAwIBAgIBATANBgkqhkiG9w0BAQUFADA8MQswCQYDVQQGDAJKUDES +MBAGA1UECgwJSklOLkdSLkpQMQwwCgYDVQQLDANSUlIxCzAJBgNVBAMMAkNBMB4X +DTA0MDEzMDAwNDMyN1oXDTM1MDEyMjAwNDMyN1owPzELMAkGA1UEBgwCSlAxEjAQ +BgNVBAoMCUpJTi5HUi5KUDEMMAoGA1UECwwDUlJSMQ4wDAYDVQQDDAVTdWJDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ0Ou7AyRcRXnB/kVHv/6kwe +ANzgg/DyJfsAUqW90m7Lu1nqyug8gK0RBd77yU0w5HOAMHTVSdpjZK0g2sgx4Mb1 +d/213eL9TTl5MRVEChTvQr8q5DVG/8fxPPE7fMI8eOAzd98/NOAChk+80r4Sx7fC +kGVEE1bKwY1MrUsUNjOY2d6t3M4HHV3HX1V8ShuKfsHxgCmLzdI8U+5CnQedFgkm +3e+8tr8IX5RR1wA1Ifw9VadF7OdI/bGMzog/Q8XCLf+WPFjnK7Gcx6JFtzF6Gi4x +4dp1Xl45JYiVvi9zQ132wu8A1pDHhiNgQviyzbP+UjcB/tsOpzBQF8abYzgEkWEC +AwEAAaNyMHAwDwYDVR0TAQH/BAUwAwEB/zAxBglghkgBhvhCAQ0EJBYiUnVieS9P +cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUlCjXWLsReYzH +LzsxwVnCXmKoB/owCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCJ/OyN +rT8Cq2Y+G2yA/L1EMRvvxwFBqxavqaqHl/6rwsIBFlB3zbqGA/0oec6MAVnYynq4 +c4AcHTjx3bQ/S4r2sNTZq0DH4SYbQzIobx/YW8PjQUJt8KQdKMcwwi7arHP7A/Ha +LKu8eIC2nsUBnP4NhkYSGhbmpJK+PFD0FVtD0ZIRlY/wsnaZNjWWcnWF1/FNuQ4H +ySjIblqVQkPuzebv3Ror6ZnVDukn96Mg7kP4u6zgxOeqlJGRe1M949SS9Vudjl8X +SF4aZUUB9pQGhsqQJVqaz2OlhGOp9D0q54xko/rekjAIcuDjl1mdX4F2WRrzpUmZ +uY/bPeOBYiVsOYVe +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_auth.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_auth.rb new file mode 100644 index 0000000..ab5cd62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_auth.rb @@ -0,0 +1,492 @@ +require File.expand_path('helper', File.dirname(__FILE__)) +require 'digest/md5' +require 'rack' +require 'rack/lint' +require 'rack-ntlm' + +class TestAuth < Test::Unit::TestCase + include Helper + + def setup + super + setup_server + end + + def teardown + super + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + @server.mount( + '/basic_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_basic_auth).to_proc) + ) + @server.mount( + '/digest_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_digest_auth).to_proc) + ) + @server.mount( + '/digest_sess_auth', + WEBrick::HTTPServlet::ProcHandler.new(method(:do_digest_sess_auth).to_proc) + ) + # NTLM endpoint + ntlm_handler = Rack::Handler::WEBrick.new(@server, + Rack::Builder.app do + use Rack::ShowExceptions + use Rack::ContentLength + use Rack::Ntlm, {:uri_pattern => /.*/, :auth => {:username => "admin", :password => "admin"}} + run lambda { |env| [200, { 'Content-Type' => 'text/html' }, ['ntlm_auth OK']] } + end + ) + @server.mount( + '/ntlm_auth', + WEBrick::HTTPServlet::ProcHandler.new(Proc.new do |req, res| + ntlm_handler.service(req, res) + end) + ) + # Htpasswd + htpasswd = File.join(File.dirname(__FILE__), 'htpasswd') + htpasswd_userdb = WEBrick::HTTPAuth::Htpasswd.new(htpasswd) + htdigest = File.join(File.dirname(__FILE__), 'htdigest') + htdigest_userdb = WEBrick::HTTPAuth::Htdigest.new(htdigest) + @basic_auth = WEBrick::HTTPAuth::BasicAuth.new( + :Logger => @logger, + :Realm => 'auth', + :UserDB => htpasswd_userdb + ) + @digest_auth = WEBrick::HTTPAuth::DigestAuth.new( + :Logger => @logger, + :Algorithm => 'MD5', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + @digest_sess_auth = WEBrick::HTTPAuth::DigestAuth.new( + :Logger => @logger, + :Algorithm => 'MD5-sess', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + @server_thread = start_server_thread(@server) + + @proxy_digest_auth = WEBrick::HTTPAuth::ProxyDigestAuth.new( + :Logger => @proxylogger, + :Algorithm => 'MD5', + :Realm => 'auth', + :UserDB => htdigest_userdb + ) + + @proxyserver = WEBrick::HTTPProxyServer.new( + :ProxyAuthProc => @proxy_digest_auth.method(:authenticate).to_proc, + :BindAddress => "localhost", + :Logger => @proxylogger, + :Port => 0, + :AccessLog => [] + ) + @proxyport = @proxyserver.config[:Port] + @proxyserver_thread = start_server_thread(@proxyserver) + end + + def do_basic_auth(req, res) + @basic_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res.body = 'basic_auth OK' + end + + def do_digest_auth(req, res) + @digest_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res['x-query'] = req.body + res.body = 'digest_auth OK' + req.query_string.to_s + end + + def do_digest_sess_auth(req, res) + @digest_sess_auth.authenticate(req, res) + res['content-type'] = 'text/plain' + res['x-query'] = req.body + res.body = 'digest_sess_auth OK' + req.query_string.to_s + end + + # TODO: monkey patching for rack-ntlm-test-services's incompat. + module ::Net + module NTLM + # ruby-ntlm 0.3.0 -> 0.4.0 + def self.decode_utf16le(*arg) + EncodeUtil.decode_utf16le(*arg) + end + # Make it work if @value == nil + class SecurityBuffer < FieldSet + remove_method(:data_size) if method_defined?(:data_size) + def data_size + @active && @value ? @value.size : 0 + end + end + end + end + def test_ntlm_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/ntlm_auth", 'admin', 'admin') + assert_equal('ntlm_auth OK', c.get_content("http://localhost:#{serverport}/ntlm_auth")) + end + + def test_basic_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + end + + def test_basic_auth_compat + c = HTTPClient.new + c.set_basic_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + end + + def test_BASIC_auth + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + res = c.get("http://localhost:#{serverport}/basic_auth") + assert_equal('basic_auth OK', res.content) + assert_equal(200, res.status) + assert_equal(401, res.previous.status) + assert_equal(nil, res.previous.previous) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_force + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.force_basic_auth = true + c.debug_dev = str = '' + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + assert_equal('Authorization: Basic YWRtaW46YWRtaW4='.upcase, str.split(/\r?\n/)[5].upcase) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_async + # async methods don't issure retry call so for successful authentication you need to set force_basic_auth flag + c = HTTPClient.new(:force_basic_auth => true) + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + # WEBrick in ruby 1.8.7 uses 'BASIC' instead of 'Basic' + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + # + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + conn = c.get_async("http://localhost:#{serverport}/basic_auth") + assert_equal('basic_auth OK', conn.pop.body.read) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_BASIC_auth_nil_uri + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + c.set_auth(nil, 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth")) + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + # To work this test consistently on CRuby you can to add 'Thread.pass' in + # @challenge iteration at BasicAuth#get like; + # + # return nil unless @challenge.find { |uri, ok| + # Thread.pass + # Util.uri_part_of(target_uri, uri) and ok + # } + def test_BASIC_auth_multi_thread + c = HTTPClient.new + webrick_backup = @basic_auth.instance_eval { @auth_scheme } + begin + @basic_auth.instance_eval { @auth_scheme = "BASIC" } + c.www_auth.basic_auth.instance_eval { @scheme = "BASIC" } + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + + 100.times.map { |idx| + Thread.new(idx) { |idx2| + Thread.abort_on_exception = true + Thread.pass + c.get("http://localhost:#{serverport}/basic_auth?#{idx2}") + } + }.map { |t| + t.join + } + ensure + @basic_auth.instance_eval { @auth_scheme = webrick_backup } + end + end + + def test_basic_auth_reuses_credentials + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://localhost:#{serverport}/basic_auth/")) + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content("http://localhost:#{serverport}/basic_auth/sub/dir/") + assert_match(/Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_digest_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OK', c.get_content("http://localhost:#{serverport}/digest_auth")) + end + + def test_digest_auth_reuses_credentials + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OK', c.get_content("http://localhost:#{serverport}/digest_auth/")) + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content("http://localhost:#{serverport}/digest_auth/sub/dir/") + assert_match(/Authorization: Digest/, str) + end + + def test_digest_auth_with_block + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + called = false + c.get_content("http://localhost:#{serverport}/digest_auth") do |str| + assert_equal('digest_auth OK', str) + called = true + end + assert(called) + # + called = false + c.get("http://localhost:#{serverport}/digest_auth") do |str| + assert_equal('digest_auth OK', str) + called = true + end + assert(called) + end + + def test_digest_auth_with_post_io + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + post_body = StringIO.new("1234567890") + assert_equal('1234567890', c.post("http://localhost:#{serverport}/digest_auth", post_body).header['x-query'][0]) + # + post_body = StringIO.new("1234567890") + post_body.read(5) + assert_equal('67890', c.post("http://localhost:#{serverport}/digest_auth", post_body).header['x-query'][0]) + end + + def test_digest_auth_with_querystring + c = HTTPClient.new + c.debug_dev = STDERR if $DEBUG + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_auth OKbar=baz', c.get_content("http://localhost:#{serverport}/digest_auth/foo?bar=baz")) + end + + def test_perfer_digest + c = HTTPClient.new + c.set_auth('http://example.com/', 'admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 401 Unauthorized\nWWW-Authenticate: Basic realm=\"foo\"\nWWW-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/^Authorization: Digest/, str) + end + + def test_digest_sess_auth + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + assert_equal('digest_sess_auth OK', c.get_content("http://localhost:#{serverport}/digest_sess_auth")) + end + + def test_proxy_auth + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Basic realm=\"foo\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_proxy_auth_force + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.force_basic_auth = true + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_proxy_auth_reuses_credentials + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Basic realm=\"foo\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.get_content('http://www1.example.com/') + c.debug_dev = str = '' + c.get_content('http://www2.example.com/') + assert_match(/Proxy-Authorization: Basic YWRtaW46YWRtaW4=/, str) + end + + def test_digest_proxy_auth_loop + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_digest_proxy_auth + c=HTTPClient.new("http://localhost:#{proxyport}/") + c.set_proxy_auth('admin', 'admin') + c.set_auth("http://127.0.0.1:#{serverport}/", 'admin', 'admin') + assert_equal('basic_auth OK', c.get_content("http://127.0.0.1:#{serverport}/basic_auth")) + end + + def test_digest_proxy_invalid_auth + c=HTTPClient.new("http://localhost:#{proxyport}/") + c.set_proxy_auth('admin', 'wrong') + c.set_auth("http://127.0.0.1:#{serverport}/", 'admin', 'admin') + assert_raises(HTTPClient::BadResponseError) do + c.get_content("http://127.0.0.1:#{serverport}/basic_auth") + end + end + + def test_prefer_digest_to_basic_proxy_auth + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nProxy-Authenticate: Basic realm=\"bar\"\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.debug_dev = str = '' + c.get_content('http://example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_digest_proxy_auth_reuses_credentials + c = HTTPClient.new + c.set_proxy_auth('admin', 'admin') + c.test_loopback_http_response << "HTTP/1.0 407 Unauthorized\nProxy-Authenticate: Digest realm=\"foo\", nonce=\"nonce\", stale=false\nContent-Length: 2\n\nNG" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + md5 = Digest::MD5.new + ha1 = md5.hexdigest("admin:foo:admin") + ha2 = md5.hexdigest("GET:/") + response = md5.hexdigest("#{ha1}:nonce:#{ha2}") + c.get_content('http://www1.example.com/') + c.debug_dev = str = '' + c.get_content('http://www2.example.com/') + assert_match(/Proxy-Authorization: Digest/, str) + assert_match(%r"response=\"#{response}\"", str) + end + + def test_oauth + c = HTTPClient.new + config = HTTPClient::OAuth::Config.new( + :realm => 'http://photos.example.net/', + :consumer_key => 'dpf43f3p2l4k3l03', + :consumer_secret => 'kd94hf93k423kf44', + :token => 'nnch734d00sl2jdk', + :secret => 'pfkkdhi9sl3r4s00', + :version => '1.0', + :signature_method => 'HMAC-SHA1' + ) + config.debug_timestamp = '1191242096' + config.debug_nonce = 'kllo9940pd9333jh' + c.www_auth.oauth.set_config('http://photos.example.net/', config) + c.www_auth.oauth.challenge('http://photos.example.net/') + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://photos.example.net/photos', [[:file, 'vacation.jpg'], [:size, 'original']]) + assert(str.index(%q(GET /photos?file=vacation.jpg&size=original))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="tR3%2BTy81lMeYAr%2FFid0kMTYa%2FWM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + # + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.get_content('http://photos.example.net/photos?file=vacation.jpg&size=original') + assert(str.index(%q(GET /photos?file=vacation.jpg&size=original))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="tR3%2BTy81lMeYAr%2FFid0kMTYa%2FWM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + # + c.test_loopback_http_response << "HTTP/1.0 200 OK\nContent-Length: 2\n\nOK" + c.debug_dev = str = '' + c.post_content('http://photos.example.net/photos', [[:file, 'vacation.jpg'], [:size, 'original']]) + assert(str.index(%q(POST /photos))) + assert(str.index(%q(Authorization: OAuth realm="http://photos.example.net/", oauth_consumer_key="dpf43f3p2l4k3l03", oauth_nonce="kllo9940pd9333jh", oauth_signature="wPkvxykrw%2BBTdCcGqKr%2B3I%2BPsiM%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1191242096", oauth_token="nnch734d00sl2jdk", oauth_version="1.0"))) + end + + def test_basic_auth_post_with_multipart + retry_times = 0 + begin + c = HTTPClient.new + c.set_auth("http://localhost:#{serverport}/", 'admin', 'admin') + File.open(__FILE__) do |f| + # read 'f' twice for authorization negotiation + assert_equal('basic_auth OK', c.post("http://localhost:#{serverport}/basic_auth", :file => f).content) + end + rescue Errno::ECONNRESET, HTTPClient::KeepAliveDisconnected + # TODO: WEBrick server returns ECONNRESET/EPIPE before sending Unauthorized response to client? + raise if retry_times > 5 + retry_times += 1 + sleep 1 + retry + end + end + + def test_negotiate_and_basic + c = HTTPClient.new + c.test_loopback_http_response << %Q(HTTP/1.1 401 Unauthorized\r\nWWW-Authenticate: NTLM\r\nWWW-Authenticate: Basic realm="foo"\r\nConnection: Keep-Alive\r\nContent-Length: 0\r\n\r\n) + c.test_loopback_http_response << %Q(HTTP/1.1 401 Unauthorized\r\nWWW-Authenticate: NTLM TlRMTVNTUAACAAAAAAAAACgAAAABAAAAAAAAAAAAAAA=\r\nConnection: Keep-Alive\r\nContent-Length: 0\r\n\r\n) + c.test_loopback_http_response << %Q(HTTP/1.0 200 OK\r\nConnection: Keep-Alive\r\nContent-Length: 1\r\n\r\na) + c.test_loopback_http_response << %Q(HTTP/1.0 200 OK\r\nConnection: Keep-Alive\r\nContent-Length: 1\r\n\r\nb) + c.debug_dev = str = '' + c.set_auth('http://www.example.org/', 'admin', 'admin') + # Do NTLM negotiation + c.get('http://www.example.org/foo') + # BasicAuth authenticator should not respond to it because NTLM + # negotiation has been finished. + assert_match(%r(Authorization: NTLM), str) + assert_not_match(%r(Authorization: Basic), str) + # ditto for other resource that is protected with NTLM + c.debug_dev = str = '' + c.get('http://www.example.org/foo/subdir') + assert_not_match(%r(Authorization: NTLM), str) + assert_not_match(%r(Authorization: Basic), str) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_cookie.rb new file mode 100644 index 0000000..57be1e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_cookie.rb @@ -0,0 +1,309 @@ +require 'test/unit' +require 'uri' +require 'tempfile' + +require 'httpclient/util' +require 'httpclient/cookie' + +class TestCookie < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @c = WebAgent::Cookie.new('hoge', 'funi') + end + + def test_s_new() + assert_instance_of(WebAgent::Cookie, @c) + end +end + +class TestCookieManager < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @cm = WebAgent::CookieManager.new() + end + + def teardown() + end + + def test_parse() + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2999 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2999, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse2() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + + def test_parse3() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT;Secure;HTTPOnly" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + assert_equal(true, cookie.secure?) + assert_equal(true, cookie.http_only?) + end + + def test_parse_double_semicolon() + str = "xmen=off,0,0,1;; path=\"/;;\"; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/;;", cookie.path) + assert_equal("excite.co.jp", cookie.domain) + assert_equal(".excite.co.jp", cookie.dot_domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + +# def test_make_portlist() +# assert_equal([80,8080], @cm.instance_eval{make_portlist("80,8080")}) +# assert_equal([80], @cm.instance_eval{make_portlist("80")}) +# assert_equal([80,8080,10080], @cm.instance_eval{make_portlist(" 80, 8080, 10080 \n")}) +# end + + def test_check_expired_cookies() + format = "%a, %d-%b-%Y %H:%M:%S GMT" + c1 = WebAgent::Cookie.new('hoge1', 'funi', :domain => 'http://www.example.com/', :path => '/') + c2 = WebAgent::Cookie.new('hoge2', 'funi', :domain => 'http://www.example.com/', :path => '/') + c3 = WebAgent::Cookie.new('hoge3', 'funi', :domain => 'http://www.example.com/', :path => '/') + c4 = WebAgent::Cookie.new('hoge4', 'funi', :domain => 'http://www.example.com/', :path => '/') + c1.expires = (Time.now - 100).gmtime.strftime(format) + c2.expires = (Time.now + 100).gmtime.strftime(format) + c3.expires = (Time.now - 10).gmtime.strftime(format) + c4.expires = nil + cookies = [c1,c2,c3,c4] + @cm.cookies = cookies + assert_equal(c2.name, @cm.cookies[0].name) + assert_equal(c2.expires, @cm.cookies[0].expires) + assert_equal(c4.name, @cm.cookies[1].name) + assert_equal(c4.expires, @cm.cookies[1].expires) + end + + def test_parse_expires + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=\"\"" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse_after_expiration + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2999 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2999, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + + time = Time.at(Time.now.to_i + 60).utc + expires = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT") + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=#{expires}; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(time, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_find_cookie() + str = "xmen=off,0,0,1; path=/; domain=.excite2.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite2.co.jp/")) + + str = "xmen=off,0,0,2; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite.co.jp/")) + + url = urify('http://www.excite.co.jp/hoge/funi/') + cookie_str = @cm.find(url) + assert_equal("xmen=\"off,0,0,2\"", cookie_str) + end + + def test_load_cookies() + cookiefile = Tempfile.new('test_cookie') + File.open(cookiefile.path, 'w') do |f| + f.write < same as URL + c.url = urify("http://www.inac.co.jp/hoge/hoge2/hoge3") + @cm.add(c) + # + c1, c2 = @cm.cookies + assert_equal('/hoge/hoge2/', c1.path) + assert_equal('/', c2.path) + end + + def test_keep_escaped + uri = urify('http://www.example.org') + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=\"2\"; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal('', c.value) + assert_equal('bar=', @cm.find(uri)) + + @cm.parse("bar=\"\"; path=/", uri) + c = @cm.cookies.first + assert_equal('', c.value) + assert_equal('bar=', @cm.find(uri)) + end + + def test_load_cookies_escaped + uri = urify('http://example.org/') + f = Tempfile.new('test_cookie') + File.open(f.path, 'w') do |out| + out.write <= "1.9.0" +# Rubinius 1.8 mode does not support Regexp.quote(raw, 'n') I don't want put +# a pressure on supporting it because 1.9 mode works fine. diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_http-access2.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_http-access2.rb new file mode 100644 index 0000000..732c044 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_http-access2.rb @@ -0,0 +1,508 @@ +# -*- encoding: utf-8 -*- +require 'http-access2' +require File.expand_path('helper', File.dirname(__FILE__)) +require 'tempfile' + + +module HTTPAccess2 + + +class TestClient < Test::Unit::TestCase + include Helper + include HTTPClient::Util + + def setup + super + setup_server + setup_client + end + + def teardown + super + end + + def test_initialize + setup_proxyserver + escape_noproxy do + @proxyio.string = "" + @client = HTTPAccess2::Client.new(proxyurl) + assert_equal(urify(proxyurl), @client.proxy) + assert_equal(200, @client.head(serverurl).status) + assert(!@proxyio.string.empty?) + end + end + + def test_agent_name + @client = HTTPAccess2::Client.new(nil, "agent_name_foo") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^User-Agent: agent_name_foo/, lines[4]) + end + + def test_from + @client = HTTPAccess2::Client.new(nil, nil, "from_bar") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^From: from_bar/, lines[5]) + end + + def test_debug_dev + str = "" + @client.debug_dev = str + assert(str.empty?) + @client.get(serverurl) + assert(!str.empty?) + end + + def _test_protocol_version_http09 + @client.protocol_version = 'HTTP/0.9' + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/0.9", lines[3]) + assert_equal("Connection: close", lines[5]) + assert_equal("= Response", lines[6]) + assert_match(/^hello/, lines[7]) + end + + def test_protocol_version_http10 + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/1.0", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + end + + def test_protocol_version_http11 + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + @client.protocol_version = 'HTTP/1.1' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.0", lines[3]) + end + + def test_proxy + setup_proxyserver + escape_noproxy do + begin + @client.proxy = "http://あ" + rescue => e + assert_match(/InvalidURIError/, e.class.to_s) + end + @client.proxy = "" + assert_nil(@client.proxy) + @client.proxy = "http://foo:1234" + assert_equal(urify("http://foo:1234"), @client.proxy) + uri = urify("http://bar:2345") + @client.proxy = uri + assert_equal(uri, @client.proxy) + # + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(@proxyio.string.empty?) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(!@proxyio.string.empty?) + end + end + + def test_noproxy_for_localhost + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(@proxyio.string.empty?) + end + + def test_no_proxy + setup_proxyserver + escape_noproxy do + # proxy is not set. + @client.no_proxy = 'localhost' + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:baz' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:443' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = "foobar,localhost:443:localhost:#{serverport},baz" + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + end + + def test_get_content + assert_equal('hello', @client.get_content(serverurl + 'hello')) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + assert_equal('hello', @client.get_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::Session::BadResponse) do + @client.get_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::Session::BadResponse) do + @client.get_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_post_content + assert_equal('hello', @client.post_content(serverurl + 'hello')) + assert_equal('hello', @client.post_content(serverurl + 'redirect1')) + assert_equal('hello', @client.post_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::Session::BadResponse) do + @client.post_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::Session::BadResponse) do + @client.post_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.post_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_head + assert_equal("head", @client.head(serverurl + 'servlet').header["x-head"][0]) + param = {'1'=>'2', '3'=>'4'} + res = @client.head(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get + assert_equal("get", @client.get(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post + assert_equal("post", @client.post(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_put + assert_equal("put", @client.put(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_delete + assert_equal("delete", @client.delete(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_options + assert_equal("options", @client.options(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace + assert_equal("trace", @client.trace(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_query + assert_equal({'1'=>'2'}, check_query_get({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_get({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_get({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_get({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_get([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_get('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_get('12+3=45&+=+')) + assert_equal({}, check_query_get('')) + end + + def test_post_body + assert_equal({'1'=>'2'}, check_query_post({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_post({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_post({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_post({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_post([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_post('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_post('12+3=45&+=+')) + assert_equal({}, check_query_post('')) + # + post_body = StringIO.new("foo=bar&foo=baz") + assert_equal( + ["bar", "baz"], + check_query_post(post_body)["foo"].to_ary.sort + ) + end + + def test_extra_headers + str = "" + @client.debug_dev = str + @client.head(serverurl, nil, {"ABC" => "DEF"}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + # + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, [["ABC", "DEF"], ["ABC", "DEF"]]) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + assert_match("ABC: DEF", lines[5]) + end + + def test_timeout + assert_equal(60, @client.connect_timeout) + assert_equal(120, @client.send_timeout) + assert_equal(60, @client.receive_timeout) + end + + def test_connect_timeout + # ToDo + end + + def test_send_timeout + # ToDo + end + + def test_receive_timeout + # this test takes 2 sec + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=2')) + @client.reset_all + @client.receive_timeout = 1 + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=0')) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(serverurl + 'sleep?sec=2') + end + @client.reset_all + @client.receive_timeout = 3 + assert_equal('hello', @client.get_content(serverurl + 'sleep?sec=2')) + end + + def test_cookies + cookiefile = Tempfile.new('test_cookies_file') + # from [ruby-talk:164079] + File.open(cookiefile.path, "wb") do |f| + f << "http://rubyforge.org//account/login.php session_ser LjEwMy45Ni40Ni0q%2A-fa0537de8cc31 2131676286 .rubyforge.org / 13\n" + end + cm = WebAgent::CookieManager::new(cookiefile.path) + cm.load_cookies + assert_equal(1, cm.cookies.size) + end + +private + + def check_query_get(query) + WEBrick::HTTPUtils.parse_query( + @client.get(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def check_query_post(query) + WEBrick::HTTPUtils.parse_query( + @client.post(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + [:hello, :sleep, :redirect1, :redirect2, :redirect3, :redirect_self, :relative_redirect].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server.mount('/servlet', TestServlet.new(@server)) + @server_thread = start_server_thread(@server) + end + + def escape_noproxy + backup = HTTPAccess2::Client::NO_PROXY_HOSTS.dup + HTTPAccess2::Client::NO_PROXY_HOSTS.clear + yield + ensure + HTTPAccess2::Client::NO_PROXY_HOSTS.replace(backup) + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_redirect1(req, res) + res.set_redirect(WEBrick::HTTPStatus::MovedPermanently, serverurl + "hello") + end + + def do_redirect2(req, res) + res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, serverurl + "redirect3") + end + + def do_redirect3(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "hello") + end + + def do_redirect_self(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "redirect_self") + end + + def do_relative_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, "hello") + end + + class TestServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def do_HEAD(req, res) + res["x-head"] = 'head' # use this for test purpose only. + res["x-query"] = query_response(req) + end + + def do_GET(req, res) + res.body = 'get' + res["x-query"] = query_response(req) + end + + def do_POST(req, res) + res.body = 'post' + res["x-query"] = body_response(req) + end + + def do_PUT(req, res) + res.body = 'put' + end + + def do_DELETE(req, res) + res.body = 'delete' + end + + def do_OPTIONS(req, res) + # check RFC for legal response. + res.body = 'options' + end + + def do_TRACE(req, res) + # client SHOULD reflect the message received back to the client as the + # entity-body of a 200 (OK) response. [RFC2616] + res.body = 'trace' + res["x-query"] = query_response(req) + end + + private + + def query_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.query_string)) + end + + def body_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.body)) + end + + def query_escape(query) + escaped = [] + query.collect do |k, v| + v.to_ary.each do |ve| + escaped << CGI.escape(k) + '=' + CGI.escape(ve) + end + end + escaped.join('&') + end + end +end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_httpclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_httpclient.rb new file mode 100644 index 0000000..c8e5330 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_httpclient.rb @@ -0,0 +1,2145 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) +require 'tempfile' + + +class TestHTTPClient < Test::Unit::TestCase + include Helper + include HTTPClient::Util + + def setup + super + setup_server + setup_client + end + + def teardown + super + end + + def test_initialize + setup_proxyserver + escape_noproxy do + @proxyio.string = "" + @client = HTTPClient.new(proxyurl) + assert_equal(urify(proxyurl), @client.proxy) + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + end + end + + def test_agent_name + @client = HTTPClient.new(nil, "agent_name_foo") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^User-Agent: agent_name_foo \(#{HTTPClient::VERSION}/, lines[4]) + end + + def test_from + @client = HTTPClient.new(nil, nil, "from_bar") + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match(/^From: from_bar/, lines[5]) + end + + def test_debug_dev + str = "" + @client.debug_dev = str + assert_equal(str.object_id, @client.debug_dev.object_id) + assert(str.empty?) + @client.get(serverurl) + assert(!str.empty?) + end + + def test_debug_dev_stream + str = "" + @client.debug_dev = str + conn = @client.get_async(serverurl) + Thread.pass while !conn.finished? + assert(!str.empty?) + end + + def test_protocol_version_http09 + @client.protocol_version = 'HTTP/0.9' + @client.debug_dev = str = '' + @client.test_loopback_http_response << "hello\nworld\n" + res = @client.get(serverurl + 'hello') + assert_equal('0.9', res.http_version) + assert_equal(nil, res.status) + assert_equal(nil, res.reason) + assert_equal("hello\nworld\n", res.content) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/0.9", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + assert_match(/^hello$/, lines[9]) + assert_match(/^world$/, lines[10]) + end + + def test_protocol_version_http10 + assert_equal(nil, @client.protocol_version) + @client.protocol_version = 'HTTP/1.0' + assert_equal('HTTP/1.0', @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl + 'hello') + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET /hello HTTP/1.0", lines[3]) + assert_equal("Connection: close", lines[7]) + assert_equal("= Response", lines[8]) + end + + def test_header_accept_by_default + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("Accept: */*", lines[5]) + end + + def test_header_accept + str = "" + @client.debug_dev = str + @client.get(serverurl, :header => {:Accept => 'text/html'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("Accept: text/html", lines[4]) + end + + def test_header_symbol + str = "" + @client.debug_dev = str + @client.post(serverurl + 'servlet', :header => {:'Content-Type' => 'application/json'}, :body => 'hello') + lines = str.split(/(?:\r?\n)+/).grep(/^Content-Type/) + assert_equal(2, lines.size) # 1 for both request and response + end + + def test_host_given + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + # + @client.reset_all + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, {'Host' => 'foo'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: foo", lines[4]) # use given param + end + + def test_redirect_returns_not_modified + assert_nothing_raised do + ::Timeout.timeout(2) do + @client.get(serverurl + 'status', {:status => 306}, {:follow_redirect => true}) + end + end + end + + class LocationRemoveFilter + def filter_request(req); end + def filter_response(req, res); res.header.delete('Location'); end + end + + def test_redirect_without_location_should_gracefully_fail + @client.request_filter << LocationRemoveFilter.new + assert_raises(HTTPClient::BadResponseError) do + @client.get(serverurl + 'redirect1', :follow_redirect => true) + end + end + + def test_protocol_version_http11 + assert_equal(nil, @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + assert_equal("Host: localhost:#{serverport}", lines[7]) + @client.protocol_version = 'HTTP/1.1' + assert_equal('HTTP/1.1', @client.protocol_version) + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.1", lines[3]) + @client.protocol_version = 'HTTP/1.0' + str = "" + @client.debug_dev = str + @client.get(serverurl) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_equal("! CONNECTION ESTABLISHED", lines[2]) + assert_equal("GET / HTTP/1.0", lines[3]) + end + + def test_proxy + setup_proxyserver + escape_noproxy do + begin + @client.proxy = "http://あ" + rescue => e + assert_match(/InvalidURIError/, e.class.to_s) + end + @client.proxy = "" + assert_nil(@client.proxy) + @client.proxy = "http://admin:admin@foo:1234" + assert_equal(urify("http://admin:admin@foo:1234"), @client.proxy) + uri = urify("http://bar:2345") + @client.proxy = uri + assert_equal(uri, @client.proxy) + # + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + @client.debug_dev = str = "" + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + assert(/Host: localhost:#{serverport}/ =~ str) + end + end + + def test_host_header + @client.proxy = proxyurl + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + assert_equal(200, @client.head('http://www.example.com/foo').status) + # ensure no ':80' is added. some servers dislike that. + assert(/\r\nHost: www\.example\.com\r\n/ =~ str) + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + assert_equal(200, @client.head('http://www.example.com:12345/foo').status) + # ensure ':12345' exists. + assert(/\r\nHost: www\.example\.com:12345\r\n/ =~ str) + end + + def test_proxy_env + setup_proxyserver + escape_env do + ENV['http_proxy'] = "http://admin:admin@foo:1234" + ENV['NO_PROXY'] = "foobar" + client = HTTPClient.new + assert_equal(urify("http://admin:admin@foo:1234"), client.proxy) + assert_equal('foobar', client.no_proxy) + end + end + + def test_proxy_env_cgi + setup_proxyserver + escape_env do + ENV['REQUEST_METHOD'] = 'GET' # CGI environment emulation + ENV['http_proxy'] = "http://admin:admin@foo:1234" + ENV['no_proxy'] = "foobar" + client = HTTPClient.new + assert_equal(nil, client.proxy) + ENV['CGI_HTTP_PROXY'] = "http://admin:admin@foo:1234" + client = HTTPClient.new + assert_equal(urify("http://admin:admin@foo:1234"), client.proxy) + end + end + + def test_empty_proxy_env + setup_proxyserver + escape_env do + ENV['http_proxy'] = "" + client = HTTPClient.new + assert_equal(nil, client.proxy) + end + end + + def test_noproxy_for_localhost + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + + def test_no_proxy + setup_proxyserver + escape_noproxy do + # proxy is not set. + assert_equal(nil, @client.no_proxy) + @client.no_proxy = 'localhost' + assert_equal('localhost', @client.no_proxy) + @proxyio.string = "" + @client.proxy = nil + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:baz' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + # + @client.no_proxy = 'foobar,localhost:443' + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ =~ @proxyio.string) + # + @client.no_proxy = "foobar,localhost:443:localhost:#{serverport},baz" + @proxyio.string = "" + @client.proxy = proxyurl + assert_equal(200, @client.head(serverurl).status) + assert(/accept/ !~ @proxyio.string) + end + end + + def test_no_proxy_with_initial_dot + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '' + @client.proxy = proxyurl + @client.head('http://www.foo.com') + assert(/CONNECT TO localhost/ =~ str, 'via proxy') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '.foo.com' + @client.proxy = proxyurl + @client.head('http://www.foo.com') + assert(/CONNECT TO www.foo.com/ =~ str, 'no proxy because .foo.com matches with www.foo.com') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = '.foo.com' + @client.proxy = proxyurl + @client.head('http://foo.com') + assert(/CONNECT TO localhost/ =~ str, 'via proxy because .foo.com does not matche with foo.com') + # + @client.debug_dev = str = "" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\n\r\n" + @client.no_proxy = 'foo.com' + @client.proxy = proxyurl + @client.head('http://foo.com') + assert(/CONNECT TO foo.com/ =~ str, 'no proxy because foo.com matches with foo.com') + end + + def test_cookie_update_while_authentication + escape_noproxy do + @client.test_loopback_http_response << < true).header.request_uri) + assert_equal(expected, @client.get(serverurl + 'redirect2', :follow_redirect => true).header.request_uri) + end + + def test_redirect_non_https + url = serverurl + 'redirect1' + https_url = urify(url) + https_url.scheme = 'https' + # + redirect_to_http = "HTTP/1.0 302 OK\nLocation: #{url}\n\n" + redirect_to_https = "HTTP/1.0 302 OK\nLocation: #{https_url}\n\n" + # + # https -> http is denied + @client.test_loopback_http_response << redirect_to_http + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(https_url) + end + # + # http -> http is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_http + assert_equal('hello', @client.get_content(url)) + # + # http -> https is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_https + assert_raises(OpenSSL::SSL::SSLError) do + # trying to normal endpoint with SSL -> SSL negotiation failure + @client.get_content(url) + end + # + # https -> https is OK + @client.reset_all + @client.test_loopback_http_response << redirect_to_https + assert_raises(OpenSSL::SSL::SSLError) do + # trying to normal endpoint with SSL -> SSL negotiation failure + @client.get_content(https_url) + end + # + # https -> http with strict_redirect_uri_callback + @client.redirect_uri_callback = @client.method(:strict_redirect_uri_callback) + @client.test_loopback_http_response << redirect_to_http + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(https_url) + end + end + + def test_redirect_see_other + assert_equal('hello', @client.post_content(serverurl + 'redirect_see_other')) + end + + def test_redirect_relative + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: hello\n\n" + silent do + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + end + # + @client.reset_all + @client.redirect_uri_callback = @client.method(:strict_redirect_uri_callback) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + @client.reset_all + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: hello\n\n" + begin + @client.get_content(serverurl + 'redirect1') + assert(false) + rescue HTTPClient::BadResponseError => e + assert_equal(302, e.res.status) + end + end + + def test_redirect_https_relative + url = serverurl + 'redirect1' + https_url = urify(url) + https_url.scheme = 'https' + @client.test_loopback_http_response << "HTTP/1.0 302 OK\nLocation: /foo\n\n" + @client.test_loopback_http_response << "HTTP/1.0 200 OK\n\nhello" + silent do + assert_equal('hello', @client.get_content(https_url)) + end + end + + def test_no_content + assert_nothing_raised do + ::Timeout.timeout(2) do + @client.get(serverurl + 'status', :status => 101) + @client.get(serverurl + 'status', :status => 204) + @client.get(serverurl + 'status', :status => 304) + end + end + end + + def test_get_content + assert_equal('hello', @client.get_content(serverurl + 'hello')) + assert_equal('hello', @client.get_content(serverurl + 'redirect1')) + assert_equal('hello', @client.get_content(serverurl + 'redirect2')) + url = serverurl.sub(/localhost/, '127.0.0.1') + assert_equal('hello', @client.get_content(url + 'hello')) + assert_equal('hello', @client.get_content(url + 'redirect1')) + assert_equal('hello', @client.get_content(url + 'redirect2')) + @client.reset(serverurl) + @client.reset(url) + @client.reset(serverurl) + @client.reset(url) + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.get_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_get_content_with_base_url + @client = HTTPClient.new(:base_url => serverurl) + assert_equal('hello', @client.get_content('/hello')) + assert_equal('hello', @client.get_content('/redirect1')) + assert_equal('hello', @client.get_content('/redirect2')) + @client.reset('/') + assert_raises(HTTPClient::BadResponseError) do + @client.get_content('/notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.get_content('/redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.get_content('/relative_redirect')) + assert(called) + end + + GZIP_CONTENT = "\x1f\x8b\x08\x00\x1a\x96\xe0\x4c\x00\x03\xcb\x48\xcd\xc9\xc9\x07\x00\x86\xa6\x10\x36\x05\x00\x00\x00" + DEFLATE_CONTENT = "\x78\x9c\xcb\x48\xcd\xc9\xc9\x07\x00\x06\x2c\x02\x15" + DEFLATE_NOHEADER_CONTENT = "x\x9C\xCBH\xCD\xC9\xC9\a\x00\x06,\x02\x15" + [GZIP_CONTENT, DEFLATE_CONTENT, DEFLATE_NOHEADER_CONTENT].each do |content| + content.force_encoding('BINARY') if content.respond_to?(:force_encoding) + end + def test_get_gzipped_content + @client.transparent_gzip_decompression = false + content = @client.get_content(serverurl + 'compressed?enc=gzip') + assert_not_equal('hello', content) + assert_equal(GZIP_CONTENT, content) + @client.transparent_gzip_decompression = true + @client.reset_all + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=gzip')) + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=deflate')) + assert_equal('hello', @client.get_content(serverurl + 'compressed?enc=deflate_noheader')) + @client.transparent_gzip_decompression = false + @client.reset_all + end + + def test_get_content_with_block + @client.get_content(serverurl + 'hello') do |str| + assert_equal('hello', str) + end + @client.get_content(serverurl + 'redirect1') do |str| + assert_equal('hello', str) + end + @client.get_content(serverurl + 'redirect2') do |str| + assert_equal('hello', str) + end + end + + def test_post_content + assert_equal('hello', @client.post_content(serverurl + 'hello')) + assert_equal('hello', @client.post_content(serverurl + 'redirect1')) + assert_equal('hello', @client.post_content(serverurl + 'redirect2')) + assert_raises(HTTPClient::BadResponseError) do + @client.post_content(serverurl + 'notfound') + end + assert_raises(HTTPClient::BadResponseError) do + @client.post_content(serverurl + 'redirect_self') + end + called = false + @client.redirect_uri_callback = lambda { |uri, res| + newuri = res.header['location'][0] + called = true + newuri + } + assert_equal('hello', @client.post_content(serverurl + 'relative_redirect')) + assert(called) + end + + def test_post_content_io + post_body = StringIO.new("1234567890") + assert_equal('post,1234567890', @client.post_content(serverurl + 'servlet', post_body)) + post_body = StringIO.new("1234567890") + assert_equal('post,1234567890', @client.post_content(serverurl + 'servlet_redirect', post_body)) + # + post_body = StringIO.new("1234567890") + post_body.read(5) + assert_equal('post,67890', @client.post_content(serverurl + 'servlet_redirect', post_body)) + end + + def test_head + assert_equal("head", @client.head(serverurl + 'servlet').header["x-head"][0]) + param = {'1'=>'2', '3'=>'4'} + res = @client.head(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_head_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.head_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get + assert_equal("get", @client.get(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_nil(res.contenttype) + # + url = serverurl.to_s + 'servlet?5=6&7=8' + res = @client.get(url, param) + assert_equal(param.merge("5"=>"6", "7"=>"8"), params(res.header["x-query"][0])) + assert_nil(res.contenttype) + end + + def test_get_with_base_url + @client = HTTPClient.new(:base_url => serverurl) + assert_equal("get", @client.get('/servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.get('/servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_nil(res.contenttype) + # + @client.base_url = serverurl[0...-1] + '/servlet' + url = '?5=6&7=8' + res = @client.get(url, param) + assert_equal(param.merge("5"=>"6", "7"=>"8"), params(res.header["x-query"][0])) + assert_nil(res.contenttype) + end + + def test_get_with_default_header + @client = HTTPClient.new(:base_url => serverurl, :default_header => {'x-header' => 'custom'}) + assert_equal('custom', @client.get('/servlet').headers['X-Header']) + @client.default_header = {'x-header' => 'custom2'} + assert_equal('custom2', @client.get('/servlet').headers['X-Header']) + # passing Hash overrides + assert_equal('custom3', @client.get('/servlet', :header => {'x-header' => 'custom3'}).headers['X-Header']) + # passing Array does not override + assert_equal('custom2, custom4', @client.get('/servlet', :header => [['x-header', 'custom4']]).headers['X-Header']) + end + + def test_head_follow_redirect + expected = urify(serverurl + 'hello') + assert_equal(expected, @client.head(serverurl + 'hello', :follow_redirect => true).header.request_uri) + assert_equal(expected, @client.head(serverurl + 'redirect1', :follow_redirect => true).header.request_uri) + assert_equal(expected, @client.head(serverurl + 'redirect2', :follow_redirect => true).header.request_uri) + end + + def test_get_follow_redirect + assert_equal('hello', @client.get(serverurl + 'hello', :follow_redirect => true).body) + assert_equal('hello', @client.get(serverurl + 'redirect1', :follow_redirect => true).body) + + res = @client.get(serverurl + 'redirect2', :follow_redirect => true) + assert_equal('hello', res.body) + assert_equal("http://localhost:#{@serverport}/hello", res.header.request_uri.to_s) + assert_equal("http://localhost:#{@serverport}/redirect3", res.previous.header.request_uri.to_s) + assert_equal("http://localhost:#{@serverport}/redirect2", res.previous.previous.header.request_uri.to_s) + assert_equal(nil, res.previous.previous.previous) + end + + def test_get_follow_redirect_with_query + assert_equal('hello?1=2&3=4', @client.get(serverurl + 'redirect1', :query => {1 => 2, 3 => 4}, :follow_redirect => true).body) + end + + def test_get_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.get_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_async_with_base_url + param = {'1'=>'2', '3'=>'4'} + @client = HTTPClient.new(:base_url => serverurl) + + # Use preconfigured :base_url + conn = @client.get_async('servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + # full URL still works + conn = @client.get_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_get_async_for_largebody + conn = @client.get_async(serverurl + 'largebody') + res = conn.pop + assert_equal(1000*1000, res.content.read.length) + end + + if RUBY_VERSION > "1.9" + def test_post_async_with_default_internal + original_encoding = Encoding.default_internal + Encoding.default_internal = Encoding::UTF_8 + begin + post_body = StringIO.new("こんにちは") + conn = @client.post_async(serverurl + 'servlet', post_body) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal 'post,こんにちは', res.content.read + ensure + Encoding.default_internal = original_encoding + end + end + end + + def test_get_with_block + called = false + res = @client.get(serverurl + 'servlet') { |str| + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_arity_2 + called = false + res = @client.get(serverurl + 'servlet') { |blk_res, str| + assert_equal(200, blk_res.status) + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_and_redirects + called = false + res = @client.get(serverurl + 'servlet', :follow_redirect => true) { |str| + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_arity_2_and_redirects + called = false + res = @client.get(serverurl + 'servlet', :follow_redirect => true) { |blk_res, str| + assert_equal(200, blk_res.status) + assert_equal('get', str) + called = true + } + assert(called) + # res does not have a content + assert_nil(res.content) + end + + def test_get_with_block_string_recycle + @client.read_block_size = 2 + body = [] + _res = @client.get(serverurl + 'servlet') { |str| + body << str + } + assert_equal(2, body.size) + assert_equal("get", body.join) # Was "tt" by String object recycle... + end + + def test_get_with_block_chunked_string_recycle + server = TCPServer.open('localhost', 0) + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + create_keepalive_thread(1, sock) + } + url = "http://localhost:#{server.addr[1]}/" + body = [] + begin + _res = @client.get(url + 'chunked') { |str| + body << str + } + ensure + server.close + server_thread.join + end + assert_equal('abcdefghijklmnopqrstuvwxyz1234567890abcdef', body.join) + end + + def test_post + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4]) + param = {'1'=>'2', '3'=>'4'} + res = @client.post(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post_empty + @client.debug_dev = str = '' + # nil body means 'no content' that is allowed but WEBrick cannot handle it. + @client.post(serverurl + 'servlet', :body => nil) + # request does not have 'Content-Type' + assert_equal(1, str.scan(/content-type/i).size) + end + + def test_post_with_query + # this {:query => 'query'} recognized as body + res = @client.post(serverurl + 'servlet', :query => 'query') + assert_equal("post", res.content[0, 4]) + assert_equal("query=query", res.headers["X-Query"]) + assert_equal("", res.headers["X-Request-Query"]) + end + + def test_post_with_query_and_body + res = @client.post(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("post", res.content[0, 4]) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_post_follow_redirect + assert_equal('hello', @client.post(serverurl + 'hello', :follow_redirect => true).body) + assert_equal('hello', @client.post(serverurl + 'redirect1', :follow_redirect => true).body) + assert_equal('hello', @client.post(serverurl + 'redirect2', :follow_redirect => true).body) + end + + def test_post_with_content_type + param = [['1', '2'], ['3', '4']] + ext = {'content-type' => 'application/x-www-form-urlencoded', 'hello' => 'world'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_equal(Hash[param], params(res.header["x-query"][0])) + # + ext = [['content-type', 'multipart/form-data'], ['hello', 'world']] + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + # + ext = {'content-type' => 'multipart/form-data; boundary=hello'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + assert_equal("post,--hello\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n2\r\n--hello\r\nContent-Disposition: form-data; name=\"3\"\r\n\r\n4\r\n--hello--\r\n\r\n", res.content) + end + + def test_post_with_custom_multipart_and_boolean_params + param = [['boolean_true', true]] + ext = { 'content-type' => 'multipart/form-data' } + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="boolean_true"\r\n\r\ntrue\r\n/, res.content) + # + param = [['boolean_false', false]] + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="boolean_false"\r\n\r\nfalse\r\n/, res.content) + # + param = [['nil', nil]] + res = @client.post(serverurl + 'servlet', param, ext) + assert_match(/Content-Disposition: form-data; name="nil"\r\n\r\n\r\n/, res.content) + end + + def test_post_with_file + STDOUT.sync = true + File.open(__FILE__) do |file| + res = @client.post(serverurl + 'servlet', {1=>2, 3=>file}) + assert_match(/^Content-Disposition: form-data; name="1"\r\n/nm, res.content) + assert_match(/^Content-Disposition: form-data; name="3";/, res.content) + assert_match(/FIND_TAG_IN_THIS_FILE/, res.content) + end + end + + def test_post_with_file_without_size + STDOUT.sync = true + File.open(__FILE__) do |file| + def file.size + # Simulates some strange Windows behaviour + raise SystemCallError.new "Unknown Error (20047)" + end + assert_nothing_raised do + @client.post(serverurl + 'servlet', {1=>2, 3=>file}) + end + end + end + + def test_post_with_io # streaming, but not chunked + myio = StringIO.new("X" * (HTTP::Message::Body::DEFAULT_CHUNK_SIZE + 1)) + def myio.read(*args) + @called ||= 0 + @called += 1 + super + end + def myio.called + @called + end + @client.debug_dev = str = StringIO.new + res = @client.post(serverurl + 'servlet', {1=>2, 3=>myio}) + assert_match(/\r\nContent-Disposition: form-data; name="1"\r\n/m, res.content) + assert_match(/\r\n2\r\n/m, res.content) + assert_match(/\r\nContent-Disposition: form-data; name="3"; filename=""\r\n/m, res.content) + assert_match(/\r\nContent-Length:/m, str.string) + # HTTPClient reads from head to 'size'; CHUNK_SIZE bytes then 1 byte, that's all. + assert_equal(2, myio.called) + end + + def test_post_with_io_nosize # streaming + chunked post + myio = StringIO.new("4") + def myio.size + nil + end + @client.debug_dev = str = StringIO.new + res = @client.post(serverurl + 'servlet', {1=>2, 3=>myio}) + assert_match(/\r\nContent-Disposition: form-data; name="1"\r\n/m, res.content) + assert_match(/\r\n2\r\n/m, res.content) + assert_match(/\r\nContent-Disposition: form-data; name="3"; filename=""\r\n/m, res.content) + assert_match(/\r\n4\r\n/m, res.content) + assert_match(/\r\nTransfer-Encoding: chunked\r\n/m, str.string) + end + + def test_post_with_sized_io + myio = StringIO.new("45") + def myio.size + 1 + end + res = @client.post(serverurl + 'servlet', myio) + assert_equal('post,4', res.content) + end + + def test_post_with_sized_io_part + myio = StringIO.new("45") + def myio.size + 1 + end + @client.debug_dev = str = StringIO.new + _res = @client.post(serverurl + 'servlet', { :file => myio }) + assert_match(/\r\n4\r\n/, str.string, 'should send "4" not "45"') + end + + def test_post_with_unknown_sized_io_part + myio1 = StringIO.new("123") + myio2 = StringIO.new("45") + class << myio1 + undef :size + end + class << myio2 + # This does not work because other file is 'unknown sized' + def size + 1 + end + end + @client.debug_dev = str = StringIO.new + _res = @client.post(serverurl + 'servlet', { :file1 => myio1, :file2 => myio2 }) + assert_match(/\r\n45\r\n/, str.string) + end + + def test_post_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.post_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_post_with_block + called = false + res = @client.post(serverurl + 'servlet', '') { |str| + assert_equal('post,', str) + called = true + } + assert(called) + assert_nil(res.content) + # + called = false + param = [['1', '2'], ['3', '4']] + res = @client.post(serverurl + 'servlet', param) { |str| + assert_equal('post,1=2&3=4', str) + called = true + } + assert(called) + assert_equal('1=2&3=4', res.header["x-query"][0]) + assert_nil(res.content) + end + + def test_post_with_custom_multipart + ext = {'content-type' => 'multipart/form-data'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + body = [{ 'Content-Disposition' => 'form-data; name="1"', :content => "2"}, + { 'Content-Disposition' => 'form-data; name="3"', :content => "4"}] + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + # + ext = {'content-type' => 'multipart/form-data; boundary=hello'} + assert_equal("post", @client.post(serverurl + 'servlet', '').content[0, 4], ext) + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/Content-Disposition: form-data; name="1"/, res.content) + assert_match(/Content-Disposition: form-data; name="3"/, res.content) + assert_equal("post,--hello\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n2\r\n--hello\r\nContent-Disposition: form-data; name=\"3\"\r\n\r\n4\r\n--hello--\r\n\r\n", res.content) + end + + def test_post_with_custom_multipart_and_file + STDOUT.sync = true + File.open(__FILE__) do |file| + def file.original_filename + 'file.txt' + end + + ext = { 'Content-Type' => 'multipart/alternative' } + body = [{ 'Content-Type' => 'text/plain', :content => "this is only a test" }, + { 'Content-Type' => 'application/x-ruby', :content => file }] + res = @client.post(serverurl + 'servlet', body, ext) + assert_match(/^Content-Type: text\/plain\r\n/m, res.content) + assert_match(/^this is only a test\r\n/m, res.content) + assert_match(/^Content-Type: application\/x-ruby\r\n/m, res.content) + assert_match(/Content-Disposition: form-data; name="3"; filename="file.txt"/, res.content) + assert_match(/FIND_TAG_IN_THIS_FILE/, res.content) + end + end + + def test_patch + assert_equal("patch", @client.patch(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + res = @client.patch(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_equal('Content-Type: application/x-www-form-urlencoded', str.split(/\r?\n/)[5]) + end + + def test_patch_with_query_and_body + res = @client.patch(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("patch", res.content) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_patch_bytesize + res = @client.patch(serverurl + 'servlet', 'txt' => 'あいうえお') + assert_equal('txt=%E3%81%82%E3%81%84%E3%81%86%E3%81%88%E3%81%8A', res.header["x-query"][0]) + assert_equal('15', res.header["x-size"][0]) + end + + def test_patch_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.patch_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_put + assert_equal("put", @client.put(serverurl + 'servlet', '').content) + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + res = @client.put(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + assert_equal('Content-Type: application/x-www-form-urlencoded', str.split(/\r?\n/)[5]) + end + + def test_put_with_query_and_body + res = @client.put(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("put", res.content) + assert_equal("body=body", res.headers["X-Query"]) + assert_equal("query=query", res.headers["X-Request-Query"]) + end + + def test_put_bytesize + res = @client.put(serverurl + 'servlet', 'txt' => 'あいうえお') + assert_equal('txt=%E3%81%82%E3%81%84%E3%81%86%E3%81%88%E3%81%8A', res.header["x-query"][0]) + assert_equal('15', res.header["x-size"][0]) + end + + def test_put_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.put_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_delete + assert_equal("delete", @client.delete(serverurl + 'servlet').content) + end + + def test_delete_with_query + res = @client.delete(serverurl + 'servlet', :query => {:query => 'query'}) + assert_equal("delete", res.content) + assert_equal('query=query', res.headers['X-Request-Query']) + end + + def test_delete_with_query_and_body + res = @client.delete(serverurl + 'servlet', :query => {:query => 'query'}, :body => {:body => 'body'}) + assert_equal("delete", res.content) + assert_equal('query=query', res.headers['X-Request-Query']) + assert_equal('body=body', res.headers['X-Query']) + end + + # Not prohibited by spec, but normally it's ignored + def test_delete_with_body + param = {'1'=>'2', '3'=>'4'} + @client.debug_dev = str = '' + assert_equal("delete", @client.delete(serverurl + 'servlet', param).content) + assert_equal({'1' => ['2'], '3' => ['4']}, HTTP::Message.parse(str.split(/\r?\n\r?\n/)[2])) + end + + def test_delete_async + conn = @client.delete_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('delete', res.content.read) + end + + def test_options + assert_equal('options', @client.options(serverurl + 'servlet').content) + end + + def test_options_with_header + res = @client.options(serverurl + 'servlet', {'x-header' => 'header'}) + assert_equal('header', res.headers['X-Header']) + end + + def test_options_with_body + res = @client.options(serverurl + 'servlet', :body => 'body') + assert_equal('body', res.headers['X-Body']) + end + + def test_options_with_body_and_header + res = @client.options(serverurl + 'servlet', :body => 'body', :header => {'x-header' => 'header'}) + assert_equal('header', res.headers['X-Header']) + assert_equal('body', res.headers['X-Body']) + end + + def test_options_async + conn = @client.options_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('options', res.content.read) + end + + def test_propfind + assert_equal("propfind", @client.propfind(serverurl + 'servlet').content) + end + + def test_propfind_async + conn = @client.propfind_async(serverurl + 'servlet') + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('propfind', res.content.read) + end + + def test_proppatch + assert_equal("proppatch", @client.proppatch(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.proppatch(serverurl + 'servlet', param) + assert_equal('proppatch', res.content) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_proppatch_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.proppatch_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal('proppatch', res.content.read) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace + assert_equal("trace", @client.trace(serverurl + 'servlet').content) + param = {'1'=>'2', '3'=>'4'} + res = @client.trace(serverurl + 'servlet', param) + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_trace_async + param = {'1'=>'2', '3'=>'4'} + conn = @client.trace_async(serverurl + 'servlet', param) + Thread.pass while !conn.finished? + res = conn.pop + assert_equal(param, params(res.header["x-query"][0])) + end + + def test_chunked + assert_equal('chunked', @client.get_content(serverurl + 'chunked', { 'msg' => 'chunked' })) + assert_equal('あいうえお', @client.get_content(serverurl + 'chunked', { 'msg' => 'あいうえお' })) + end + + def test_chunked_empty + assert_equal('', @client.get_content(serverurl + 'chunked', { 'msg' => '' })) + end + + def test_get_query + assert_equal({'1'=>'2'}, check_query_get({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_get({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_get({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_get({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_get([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_get('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_get('12+3=45&+=+')) + assert_equal({}, check_query_get('')) + assert_equal({'1'=>'2'}, check_query_get({1=>StringIO.new('2')})) + assert_equal({'1'=>'2', '3'=>'4'}, check_query_get(StringIO.new('3=4&1=2'))) + + hash = check_query_get({"a"=>["A","a"], "B"=>"b"}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + + hash = check_query_get({"a"=>WEBrick::HTTPUtils::FormData.new("A","a"), "B"=>"b"}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + + hash = check_query_get({"a"=>[StringIO.new("A"),StringIO.new("a")], "B"=>StringIO.new("b")}) + assert_equal({'a'=>'A', 'B'=>'b'}, hash) + assert_equal(['A','a'], hash['a'].to_ary) + end + + def test_post_body + assert_equal({'1'=>'2'}, check_query_post({1=>2})) + assert_equal({'a'=>'A', 'B'=>'b'}, check_query_post({"a"=>"A", "B"=>"b"})) + assert_equal({'&'=>'&'}, check_query_post({"&"=>"&"})) + assert_equal({'= '=>' =+'}, check_query_post({"= "=>" =+"})) + assert_equal( + ['=', '&'].sort, + check_query_post([["=", "="], ["=", "&"]])['='].to_ary.sort + ) + assert_equal({'123'=>'45'}, check_query_post('123=45')) + assert_equal({'12 3'=>'45', ' '=>' '}, check_query_post('12+3=45&+=+')) + assert_equal({}, check_query_post('')) + # + post_body = StringIO.new("foo=bar&foo=baz") + assert_equal( + ["bar", "baz"], + check_query_post(post_body)["foo"].to_ary.sort + ) + end + + def test_extra_headers + str = "" + @client.debug_dev = str + @client.head(serverurl, nil, {"ABC" => "DEF"}) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + # + str = "" + @client.debug_dev = str + @client.get(serverurl, nil, [["ABC", "DEF"], ["ABC", "DEF"]]) + lines = str.split(/(?:\r?\n)+/) + assert_equal("= Request", lines[0]) + assert_match("ABC: DEF", lines[4]) + assert_match("ABC: DEF", lines[5]) + end + + def test_http_custom_date_header + @client.debug_dev = (str = "") + _res = @client.get(serverurl + 'hello', :header => {'Date' => 'foo'}) + lines = str.split(/(?:\r?\n)+/) + assert_equal('Date: foo', lines[4]) + end + + def test_timeout + assert_equal(60, @client.connect_timeout) + assert_equal(120, @client.send_timeout) + assert_equal(60, @client.receive_timeout) + # + @client.connect_timeout = 1 + @client.send_timeout = 2 + @client.receive_timeout = 3 + assert_equal(1, @client.connect_timeout) + assert_equal(2, @client.send_timeout) + assert_equal(3, @client.receive_timeout) + end + + def test_connect_timeout + # ToDo + end + + def test_send_timeout + # ToDo + end + + def test_receive_timeout + # this test takes 2 sec + assert_equal('hello?sec=2', @client.get_content(serverurl + 'sleep?sec=2')) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('hello?sec=0', @client.get_content(serverurl + 'sleep?sec=0')) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(serverurl + 'sleep?sec=2') + end + @client.receive_timeout = 3 + @client.reset_all + assert_equal('hello?sec=2', @client.get_content(serverurl + 'sleep?sec=2')) + end + + def test_receive_timeout_post + # this test takes 2 sec + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 2).content) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 0).content) + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.post(serverurl + 'sleep', :sec => 2) + end + @client.receive_timeout = 3 + @client.reset_all + assert_equal('hello', @client.post(serverurl + 'sleep', :sec => 2).content) + end + + def test_reset + url = serverurl + 'servlet' + assert_nothing_raised do + 5.times do + @client.get(url) + @client.reset(url) + end + end + end + + def test_reset_all + assert_nothing_raised do + 5.times do + @client.get(serverurl + 'servlet') + @client.reset_all + end + end + end + + def test_cookies + cookiefile = Tempfile.new('test_cookies_file') + File.open(cookiefile.path, "wb") do |f| + f << "http://rubyforge.org/account/login.php\tsession_ser\tLjEwMy45Ni40Ni0q%2A-fa0537de8cc31\t2000000000\trubyforge.org\t/account/\t9\n" + end + @client.set_cookie_store(cookiefile.path) + # + @client.reset_all + @client.test_loopback_http_response << "HTTP/1.0 200 OK\nSet-Cookie: session_ser=bar; expires=#{Time.at(1924873200).gmtime.httpdate}\n\nOK" + @client.get_content('http://rubyforge.org/account/login.php') + @client.save_cookie_store + str = File.read(cookiefile.path) + assert_match(%r(http://rubyforge.org/account/login.php\tsession_ser\tbar\t1924873200\trubyforge.org\t/account/\t9), str) + end + + def test_eof_error_length + io = StringIO.new('') + def io.gets(*arg) + @buf ||= ["HTTP/1.0 200 OK\n", "content-length: 123\n", "\n"] + @buf.shift + end + def io.readpartial(size, buf) + @second ||= false + if !@second + @second = '1st' + buf << "abc" + buf + elsif @second == '1st' + @second = '2nd' + raise EOFError.new + else + raise Exception.new + end + end + def io.eof? + true + end + @client.test_loopback_http_response << io + assert_nothing_raised do + @client.get('http://foo/bar') + end + end + + def test_eof_error_rest + io = StringIO.new('') + def io.gets(*arg) + @buf ||= ["HTTP/1.0 200 OK\n", "\n"] + @buf.shift + end + def io.readpartial(size, buf) + @second ||= false + if !@second + @second = '1st' + buf << "abc" + buf + elsif @second == '1st' + @second = '2nd' + raise EOFError.new + else + raise Exception.new + end + end + def io.eof? + true + end + @client.test_loopback_http_response << io + assert_nothing_raised do + @client.get('http://foo/bar') + end + end + + def test_urify + extend HTTPClient::Util + assert_nil(urify(nil)) + uri = 'http://foo' + assert_equal(urify(uri), urify(uri)) + assert_equal(urify(uri), urify(urify(uri))) + end + + def test_connection + c = HTTPClient::Connection.new + assert(c.finished?) + assert_nil(c.join) + end + + def test_site + site = HTTPClient::Site.new + assert_equal('tcp', site.scheme) + assert_equal('0.0.0.0', site.host) + assert_equal(0, site.port) + assert_equal('tcp://0.0.0.0:0', site.addr) + assert_equal('tcp://0.0.0.0:0', site.to_s) + assert_nothing_raised do + site.inspect + end + # + site = HTTPClient::Site.new(urify('http://localhost:12345/foo')) + assert_equal('http', site.scheme) + assert_equal('localhost', site.host) + assert_equal(12345, site.port) + assert_equal('http://localhost:12345', site.addr) + assert_equal('http://localhost:12345', site.to_s) + assert_nothing_raised do + site.inspect + end + # + site1 = HTTPClient::Site.new(urify('http://localhost:12341/')) + site2 = HTTPClient::Site.new(urify('http://localhost:12342/')) + site3 = HTTPClient::Site.new(urify('http://localhost:12342/')) + assert(!(site1 == site2)) + h = { site1 => 'site1', site2 => 'site2' } + h[site3] = 'site3' + assert_equal('site1', h[site1]) + assert_equal('site3', h[site2]) + end + + def test_http_header + res = @client.get(serverurl + 'hello') + assert_equal('text/html', res.contenttype) + assert_equal(5, res.header.get(nil).size) + # + res.header.delete('connection') + assert_equal(4, res.header.get(nil).size) + # + res.header['foo'] = 'bar' + assert_equal(['bar'], res.header['foo']) + # + assert_equal([['foo', 'bar']], res.header.get('foo')) + res.header['foo'] = ['bar', 'bar2'] + assert_equal([['foo', 'bar'], ['foo', 'bar2']], res.header.get('foo')) + end + + def test_mime_type + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + assert_equal('text/html', HTTP::Message.mime_type('foo.html')) + assert_equal('text/html', HTTP::Message.mime_type('foo.htm')) + assert_equal('text/xml', HTTP::Message.mime_type('foo.xml')) + assert_equal('application/msword', HTTP::Message.mime_type('foo.doc')) + assert_equal('image/png', HTTP::Message.mime_type('foo.png')) + assert_equal('image/gif', HTTP::Message.mime_type('foo.gif')) + assert_equal('image/jpeg', HTTP::Message.mime_type('foo.jpg')) + assert_equal('image/jpeg', HTTP::Message.mime_type('foo.jpeg')) + assert_equal('application/octet-stream', HTTP::Message.mime_type('foo.unknown')) + # + handler = lambda { |path| 'hello/world' } + assert_nil(HTTP::Message.mime_type_handler) + assert_nil(HTTP::Message.get_mime_type_func) + HTTP::Message.mime_type_handler = handler + assert_not_nil(HTTP::Message.mime_type_handler) + assert_not_nil(HTTP::Message.get_mime_type_func) + assert_equal('hello/world', HTTP::Message.mime_type('foo.txt')) + HTTP::Message.mime_type_handler = nil + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + HTTP::Message.set_mime_type_func(nil) + assert_equal('text/plain', HTTP::Message.mime_type('foo.txt')) + # + handler = lambda { |path| nil } + HTTP::Message.mime_type_handler = handler + assert_equal('application/octet-stream', HTTP::Message.mime_type('foo.txt')) + end + + def test_connect_request + req = HTTP::Message.new_connect_request(urify('https://foo/bar')) + assert_equal("CONNECT foo:443 HTTP/1.0\r\n\r\n", req.dump) + req = HTTP::Message.new_connect_request(urify('https://example.com/')) + assert_equal("CONNECT example.com:443 HTTP/1.0\r\n\r\n", req.dump) + end + + def test_response + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Status: 200 OK", + "response" + ], + res.dump.split(/\r\n/).sort + ) + assert_equal(['8'], res.header['Content-Length']) + assert_equal('8', res.headers['Content-Length']) + res.header.set('foo', 'bar') + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Status: 200 OK", + "foo: bar", + "response" + ], + res.dump.split(/\r\n/).sort + ) + # nil body + res = HTTP::Message.new_response(nil) + assert_equal( + [ + "Content-Length: 0", + "Content-Type: text/html; charset=us-ascii", + "Status: 200 OK" + ], + res.dump.split(/\r\n/).sort + ) + # for mod_ruby env + Object.const_set('Apache', nil) + begin + res = HTTP::Message.new_response('response') + assert(res.dump.split(/\r\n/).any? { |line| /^Date/ =~ line }) + # + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + res.header['Date'] = Time.at(946652400).httpdate + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Date: Fri, 31 Dec 1999 15:00:00 GMT", + "HTTP/1.1 200 OK", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "response" + ], + res.dump.split(/\r\n/).sort + ) + ensure + Object.instance_eval { remove_const('Apache') } + end + end + + def test_response_cookies + res = HTTP::Message.new_response('response') + res.contenttype = 'text/plain' + res.header.body_date = Time.at(946652400) + res.header.request_uri = 'http://www.example.com/' + assert_nil(res.cookies) + # + res.header['Set-Cookie'] = [ + 'CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT', + 'PART_NUMBER=ROCKET_LAUNCHER_0001; path=/' + ] + assert_equal( + [ + "", + "Content-Length: 8", + "Content-Type: text/plain", + "Last-Modified: Fri, 31 Dec 1999 15:00:00 GMT", + "Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT", + "Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/", + "Status: 200 OK", + "response" + ], + res.dump.split(/\r\n/).sort + ) + assert_equal(2, res.cookies.size) + assert_equal('CUSTOMER', res.cookies[0].name) + assert_equal('PART_NUMBER', res.cookies[1].name) + end + + def test_ok_response_success + res = HTTP::Message.new_response('response') + assert_equal(true, res.ok?) + res.status = 404 + assert_equal(false, res.ok?) + res.status = 500 + assert_equal(false, res.ok?) + res.status = 302 + assert_equal(false, res.ok?) + end + + if !defined?(JRUBY_VERSION) and RUBY_VERSION < '1.9' + def test_timeout_scheduler + assert_equal('hello', @client.get_content(serverurl + 'hello')) + status = HTTPClient.timeout_scheduler.instance_eval { @thread.kill; @thread.join; @thread.status } + assert(!status) # dead + assert_equal('hello', @client.get_content(serverurl + 'hello')) + end + end + + def test_session_manager + mgr = HTTPClient::SessionManager.new(@client) + assert_nil(mgr.instance_eval { @proxy }) + assert_nil(mgr.debug_dev) + @client.debug_dev = Object.new + @client.proxy = 'http://myproxy:12345' + mgr = HTTPClient::SessionManager.new(@client) + assert_equal('http://myproxy:12345', mgr.instance_eval { @proxy }.to_s) + assert_equal(@client.debug_dev, mgr.debug_dev) + end + + def create_keepalive_disconnected_thread(idx, sock) + Thread.new { + # return "12345" for the first connection + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + # for the next connection, close while reading the request for emulating + # KeepAliveDisconnected + sock.gets + sock.close + } + end + + def test_keepalive_disconnected + client = HTTPClient.new + server = TCPServer.open('127.0.0.1', 0) + server.listen(30) # set enough backlogs + endpoint = "http://127.0.0.1:#{server.addr[1]}/" + queue = Queue.new + Thread.new(queue) { |qs| + Thread.abort_on_exception = true + # want 5 requests issued + 5.times { qs.pop } + # emulate 10 keep-alive connections + 10.times do |idx| + sock = server.accept + create_keepalive_disconnected_thread(idx, sock) + end + # return "23456" for the request which gets KeepAliveDisconnected + 5.times do + sock = server.accept + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("\r\n") + sock.write("23456") + sock.close + end + # return "34567" for the rest requests + while true + sock = server.accept + sock.gets + sock.gets + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Connection: close\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("34567") + sock.close + end + } + # try to allocate 10 keep-alive connections; it's a race so some + # threads can reuse the connection so actual number of keep-alive + # connections should be smaller than 10. + (0...10).to_a.map { + Thread.new(queue) { |qc| + Thread.abort_on_exception = true + conn = client.get_async(endpoint) + qc.push(true) + assert_equal("12345", conn.pop.content.read) + } + }.each { |th| th.join } + # send 5 requests, some of these should get KeepAliveDesconnected + # but should retry with new connection. + (0...5).to_a.map { + Thread.new { + Thread.abort_on_exception = true + assert_equal("23456", client.get(endpoint).content) + } + }.each { |th| th.join } + # rest requests won't get KeepAliveDisconnected + (0...10).to_a.map { + Thread.new { + Thread.abort_on_exception = true + assert_equal("34567", client.get(endpoint).content) + } + }.each { |th| th.join } + end + + def create_keepalive_thread(count, sock) + Thread.new { + Thread.abort_on_exception = true + count.times do + req = sock.gets + while line = sock.gets + break if line.chomp.empty? + end + case req + when /chunked/ + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Transfer-Encoding: chunked\r\n") + sock.write("\r\n") + sock.write("1a\r\n") + sock.write("abcdefghijklmnopqrstuvwxyz\r\n") + sock.write("10\r\n") + sock.write("1234567890abcdef\r\n") + sock.write("0\r\n") + sock.write("\r\n") + else + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + end + end + sock.close + } + end + + def test_keepalive + server = TCPServer.open('localhost', 0) + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + create_keepalive_thread(10, sock) + } + url = "http://localhost:#{server.addr[1]}/" + begin + # content-length + 5.times do + assert_equal('12345', @client.get(url).body) + end + # chunked + 5.times do + assert_equal('abcdefghijklmnopqrstuvwxyz1234567890abcdef', @client.get(url + 'chunked').body) + end + ensure + server.close + server_thread.join + end + end + + def test_strict_response_size_check + @client.strict_response_size_check = false + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\nContent-Length: 12345\r\n\r\nhello world" + assert_equal('hello world', @client.get_content('http://dummy')) + + @client.reset_all + @client.strict_response_size_check = true + @client.test_loopback_http_response << "HTTP/1.0 200 OK\r\nContent-Length: 12345\r\n\r\nhello world" + assert_raise(HTTPClient::BadResponseError) do + @client.get_content('http://dummy') + end + end + + def test_socket_local + @client.socket_local.host = '127.0.0.1' + assert_equal('hello', @client.get_content(serverurl + 'hello')) + @client.reset_all + @client.socket_local.port = serverport + begin + @client.get_content(serverurl + 'hello') + rescue Errno::EADDRINUSE, SocketError + assert(true) + end + end + + def test_body_param_order + ary = ('b'..'d').map { |k| ['key2', k] } << ['key1', 'a'] << ['key3', 'z'] + assert_equal("key2=b&key2=c&key2=d&key1=a&key3=z", HTTP::Message.escape_query(ary)) + end + + if RUBY_VERSION > "1.9" + def test_charset + body = @client.get(serverurl + 'charset').body + assert_equal(Encoding::EUC_JP, body.encoding) + assert_equal('あいうえお'.encode(Encoding::EUC_JP), body) + end + end + + if RUBY_VERSION >= "1.9.3" + def test_continue + @client.debug_dev = str = '' + res = @client.get(serverurl + 'continue', :header => {:Expect => '100-continue'}) + assert_equal(200, res.status) + assert_equal('done!', res.body) + assert_match(/Expect: 100-continue/, str) + end + end + + def test_ipv6literaladdress_in_uri + server = TCPServer.open('::1', 0) rescue return # Skip if IPv6 is unavailable. + server_thread = Thread.new { + Thread.abort_on_exception = true + sock = server.accept + while line = sock.gets + break if line.chomp.empty? + end + sock.write("HTTP/1.1 200 OK\r\n") + sock.write("Content-Length: 5\r\n") + sock.write("\r\n") + sock.write("12345") + sock.close + } + uri = "http://[::1]:#{server.addr[1]}/" + begin + assert_equal('12345', @client.get(uri).body) + ensure + server.close + server_thread.kill + server_thread.join + end + end + + def test_uri_no_schema + assert_raise(ArgumentError) do + @client.get_content("www.example.com") + end + end + + def test_tcp_keepalive + @client.tcp_keepalive = true + @client.get(serverurl) + + # expecting HTTP keepalive caches the socket + session = @client.instance_variable_get(:@session_manager).send(:get_cached_session, HTTPClient::Site.new(URI.parse(serverurl))) + socket = session.instance_variable_get(:@socket) + + assert_true(session.tcp_keepalive) + assert_equal(Socket::SO_KEEPALIVE, socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE).optname) + end + +private + + def check_query_get(query) + WEBrick::HTTPUtils.parse_query( + @client.get(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def check_query_post(query) + WEBrick::HTTPUtils.parse_query( + @client.post(serverurl + 'servlet', query).header["x-query"][0] + ) + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + [ + :hello, :sleep, :servlet_redirect, :redirect1, :redirect2, :redirect3, + :redirect_self, :relative_redirect, :redirect_see_other, :chunked, + :largebody, :status, :compressed, :charset, :continue + ].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server.mount('/servlet', TestServlet.new(@server)) + @server_thread = start_server_thread(@server) + end + + def add_query_string(req) + if req.query_string + '?' + req.query_string + else + '' + end + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + add_query_string(req) + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "hello" + add_query_string(req) + end + + def do_servlet_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "servlet" + add_query_string(req)) + end + + def do_redirect1(req, res) + res.set_redirect(WEBrick::HTTPStatus::MovedPermanently, serverurl + "hello" + add_query_string(req)) + end + + def do_redirect2(req, res) + res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, serverurl + "redirect3" + add_query_string(req)) + end + + def do_redirect3(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "hello" + add_query_string(req)) + end + + def do_redirect_self(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, serverurl + "redirect_self" + add_query_string(req)) + end + + def do_relative_redirect(req, res) + res.set_redirect(WEBrick::HTTPStatus::Found, "hello" + add_query_string(req)) + end + + def do_redirect_see_other(req, res) + if req.request_method == 'POST' + res.set_redirect(WEBrick::HTTPStatus::SeeOther, serverurl + "redirect_see_other" + add_query_string(req)) # self + else + res.body = 'hello' + end + end + + def do_chunked(req, res) + res.chunked = true + res['content-type'] = 'text/plain; charset=UTF-8' + piper, pipew = IO.pipe + res.body = piper + pipew << req.query['msg'] + pipew.close + end + + def do_largebody(req, res) + res['content-type'] = 'text/html' + res.body = "a" * 1000 * 1000 + end + + def do_compressed(req, res) + res['content-type'] = 'application/octet-stream' + if req.query['enc'] == 'gzip' + res['content-encoding'] = 'gzip' + res.body = GZIP_CONTENT + elsif req.query['enc'] == 'deflate' + res['content-encoding'] = 'deflate' + res.body = DEFLATE_CONTENT + elsif req.query['enc'] == 'deflate_noheader' + res['content-encoding'] = 'deflate' + res.body = DEFLATE_NOHEADER_CONTENT + end + end + + def do_charset(req, res) + if RUBY_VERSION > "1.9" + res.body = 'あいうえお'.encode("euc-jp") + res['Content-Type'] = 'text/plain; charset=euc-jp' + else + res.body = 'this endpoint is for 1.9 or later' + end + end + + def do_status(req, res) + res.status = req.query['status'].to_i + end + + def do_continue(req, res) + req.continue + res.body = 'done!' + end + + class TestServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def do_HEAD(req, res) + res["x-head"] = 'head' # use this for test purpose only. + res["x-query"] = query_response(req) + end + + def do_GET(req, res) + res.body = 'get' + res['x-header'] = req['X-Header'] + res["x-query"] = query_response(req) + end + + def do_POST(req, res) + res["content-type"] = "text/plain" # iso-8859-1, not US-ASCII + res.body = 'post,' + req.body.to_s + res["x-query"] = body_response(req) + res["x-request-query"] = req.query_string + end + + def do_PATCH(req, res) + res["x-query"] = body_response(req) + param = WEBrick::HTTPUtils.parse_query(req.body) || {} + res["x-size"] = (param['txt'] || '').size + res.body = param['txt'] || 'patch' + res["x-request-query"] = req.query_string + end + + def do_PUT(req, res) + res["x-query"] = body_response(req) + param = WEBrick::HTTPUtils.parse_query(req.body) || {} + res["x-size"] = (param['txt'] || '').size + res.body = param['txt'] || 'put' + res["x-request-query"] = req.query_string + end + + def do_DELETE(req, res) + res.body = 'delete' + res["x-query"] = body_response(req) + res["x-request-query"] = req.query_string + end + + def do_OPTIONS(req, res) + res.body = 'options' + res['x-header'] = req['X-Header'] + res['x-body'] = req.body + end + + def do_PROPFIND(req, res) + res.body = 'propfind' + end + + def do_PROPPATCH(req, res) + res.body = 'proppatch' + res["x-query"] = body_response(req) + end + + def do_TRACE(req, res) + # client SHOULD reflect the message received back to the client as the + # entity-body of a 200 (OK) response. [RFC2616] + res.body = 'trace' + res["x-query"] = query_response(req) + end + + private + + def query_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.query_string)) + end + + def body_response(req) + query_escape(WEBrick::HTTPUtils.parse_query(req.body)) + end + + def query_escape(query) + escaped = [] + query.sort_by { |k, v| k }.collect do |k, v| + v.to_ary.each do |ve| + escaped << CGI.escape(k) + '=' + CGI.escape(ve) + end + end + escaped.join('&') + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_include_client.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_include_client.rb new file mode 100644 index 0000000..8cd05c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_include_client.rb @@ -0,0 +1,52 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) + +require 'httpclient/include_client' +class TestIncludeClient < Test::Unit::TestCase + class Widget + extend HTTPClient::IncludeClient + + include_http_client("http://example.com") do |client| + client.cookie_manager = nil + client.agent_name = "iMonkey 4k" + end + end + + class OtherWidget + extend HTTPClient::IncludeClient + + include_http_client + include_http_client(:method_name => :other_http_client) + end + + class UnrelatedBlankClass ; end + + def test_client_class_level_singleton + assert_equal Widget.http_client.object_id, Widget.http_client.object_id + + assert_equal Widget.http_client.object_id, Widget.new.http_client.object_id + + assert_not_equal Widget.http_client.object_id, OtherWidget.http_client.object_id + end + + def test_configured + assert_equal Widget.http_client.agent_name, "iMonkey 4k" + assert_nil Widget.http_client.cookie_manager + assert_equal Widget.http_client.proxy.to_s, "http://example.com" + end + + def test_two_includes + assert_not_equal OtherWidget.http_client.object_id, OtherWidget.other_http_client.object_id + + assert_equal OtherWidget.other_http_client.object_id, OtherWidget.new.other_http_client.object_id + end + + # meta-programming gone wrong sometimes accidentally + # adds the class method to _everyone_, a mistake we've made before. + def test_not_infected_class_hieararchy + assert ! Class.respond_to?(:http_client) + assert ! UnrelatedBlankClass.respond_to?(:http_client) + end + + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_jsonclient.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_jsonclient.rb new file mode 100644 index 0000000..839aa54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_jsonclient.rb @@ -0,0 +1,80 @@ +# -*- encoding: utf-8 -*- +require File.expand_path('helper', File.dirname(__FILE__)) +require 'jsonclient' + + +class TestJSONClient < Test::Unit::TestCase + include Helper + + def setup + super + setup_server + @client = JSONClient.new + end + + def teardown + super + end + + def test_post + res = @client.post(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + # #previous contains the original response + assert_equal(1, JSON.parse(res.previous.content)['a']) + end + + def test_post_with_header + res = @client.post(serverurl + 'json', :header => {'X-foo' => 'bar'}, :body => {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_post_with_array_header + res = @client.post(serverurl + 'json', :header => [['X-foo', 'bar']], :body => {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_post_non_json_body + res = @client.post(serverurl + 'json', 'a=b&c=d') + assert_equal('a=b&c=d', res.content) + assert_equal('application/x-www-form-urlencoded', res.content_type) + end + + def test_put + res = @client.put(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal(2, res.content['b']['c']) + assert_equal('application/json; charset=utf-8', res.content_type) + end + + def test_get_not_affected + res = @client.get(serverurl + 'json', {'a' => 1, 'b' => {'c' => 2}}) + assert_equal('', res.content) + assert_equal('', res.content_type) + end + + class JSONServlet < WEBrick::HTTPServlet::AbstractServlet + def get_instance(*arg) + self + end + + def service(req, res) + res['content-type'] = req['content-type'] + res.body = req.body + end + end + + def setup_server + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => @logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => File.dirname(File.expand_path(__FILE__)) + ) + @serverport = @server.config[:Port] + @server.mount('/json', JSONServlet.new(@server)) + @server_thread = start_server_thread(@server) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_ssl.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_ssl.rb new file mode 100644 index 0000000..2e634d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_ssl.rb @@ -0,0 +1,559 @@ +require File.expand_path('helper', File.dirname(__FILE__)) +require 'webrick/https' + + +class TestSSL < Test::Unit::TestCase + include Helper + + DIR = File.dirname(File.expand_path(__FILE__)) + + def setup + super + @serverpid = @client = nil + @verify_callback_called = false + setup_server + setup_client + @url = "https://localhost:#{serverport}/hello" + end + + def teardown + super + end + + def path(filename) + File.expand_path(filename, DIR) + end + + def test_proxy_ssl + setup_proxyserver + escape_noproxy do + @client.proxy = proxyurl + @client.ssl_config.set_client_cert_file(path('client.cert'), path('client.key')) + @client.ssl_config.add_trust_ca(path('ca.cert')) + @client.ssl_config.add_trust_ca(path('subca.cert')) + @client.debug_dev = str = "" + assert_equal(200, @client.get(@url).status) + assert(/accept/ =~ @proxyio.string, 'proxy is not used') + assert(/Host: localhost:#{serverport}/ =~ str) + end + end + + def test_options + cfg = @client.ssl_config + assert_nil(cfg.client_cert) + assert_nil(cfg.client_key) + assert_nil(cfg.client_ca) + assert_equal(OpenSSL::SSL::VERIFY_PEER | OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT, cfg.verify_mode) + assert_nil(cfg.verify_callback) + assert_nil(cfg.timeout) + expected_options = OpenSSL::SSL::OP_ALL | OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3 + expected_options &= ~OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS if defined?(OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS) + expected_options |= OpenSSL::SSL::OP_NO_COMPRESSION if defined?(OpenSSL::SSL::OP_NO_COMPRESSION) + assert_equal(expected_options, cfg.options) + assert_equal("ALL:!aNULL:!eNULL:!SSLv2", cfg.ciphers) + assert_instance_of(OpenSSL::X509::Store, cfg.cert_store) + end + +unless defined?(HTTPClient::JRubySSLSocket) + # JRubySSLSocket does not support sync mode. + def test_sync + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client.key')) + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + + @client.socket_sync = false + @client.reset_all + assert_equal("hello", @client.get_content(@url)) + end +end + + def test_debug_dev + str = @client.debug_dev = '' + cfg = @client.ssl_config + cfg.client_cert = path("client.cert") + cfg.client_key = path("client.key") + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + assert(str.scan(/^hello$/)[0]) + end + + def test_verification_without_httpclient + raw_cert = "-----BEGIN CERTIFICATE-----\nMIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjEzNFoXDTE3MDgxMDE3MjEzNFowSzETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEZMBcGA1UEAwwQ\nUnVieSBjZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB\nAJCfsSXpSMpmZCVa+ZCM+QDgomnhDlvnrGDq6pasTaIspGTXgws+7r8Dt/cNe6EH\nHJpRH2cGRiO4yPcfcT9eS4X7k8OC4f33wHfACOmLu6LeoNE8ujmSk6L6WzLUI+sE\nnLZbFrXxoAo4XHsm8vEG9C+jEoXZ1p+47wrAGaDwDQTnzlMy4dT9pRQEJP2G/Rry\nUkuZn8SUWmh3/YS78iaSzsNF1cgE1ealHOrPPFDjiCGDaH/LHyUPYlbFSLZ/B7Qx\nLxi5sePLcywWq/EJrmWpgeVTDjtNijsdKv/A3qkY+fm/oD0pzt7XsfJaP9YKNyJO\nQFdxWZeiPcDF+Hwf+IwSr+kCAwEAAaMxMC8wDgYDVR0PAQH/BAQDAgeAMB0GA1Ud\nDgQWBBQNvzYzJyXemGhxbA8NMXLolDnPyjANBgkqhkiG9w0BAQsFAAOCAQEARIJV\noKejGlOTn71QutnNnu07UtTu0IHs6YqjYzzND+m4JXLN+wvYm72AFUG0b1L7dRg0\niK8XjQrlNQNVqP1Mc6tffchy20neOPOHeiO6qTdRU8P2S8D3Uwe+1qhgxjfE+cWc\nwZmWxYK4HA8c58PxWMqrkr2QqXDplG9KWLvOgrtPGiLLZcQSKhvvB63QzItHBDU6\nRayiJY3oPkK/HrIvFlySqFqzWmuyknkciOFywEHQMz/tcSFJ2QFpPj/tBz9VXohH\nZ8KscmfhZrTPBjo+ky1lz/WraWoz4LMiLnkC2ABczWLRSawu+v3Irx1NFJngt05e\npqwtqIUeg7j+JLiTaA==\n-----END CERTIFICATE-----" + raw_ca_cert = "-----BEGIN CERTIFICATE-----\nMIIDYjCCAkqgAwIBAgIBATANBgkqhkiG9w0BAQsFADBCMRMwEQYKCZImiZPyLGQB\nGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJcnVieS1sYW5nMRAwDgYDVQQDDAdSdWJ5\nIENBMB4XDTE2MDgxMDE3MjA1NFoXDTE4MDgxMDE3MjA1NFowQjETMBEGCgmSJomT\n8ixkARkWA29yZzEZMBcGCgmSJomT8ixkARkWCXJ1YnktbGFuZzEQMA4GA1UEAwwH\nUnVieSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALKGwyM3Ejtl\npo7CqaDlS71gDZn3gm6IwWpmRMLJofSI9LCwAbjijSC2HvO0xUWoYW40FbzjnnEi\ngszsWyPwuQIx9t0bhuAyllNIfImmkaQkrikXKBKzia4jPnbc4iXPnfjuThjESFWl\ntfbN6y1B5TjKhD1KelfakUO+iMu8WlIA9NKQZYfJ/F3QSpP5Iqb3KN/jVifFbDV8\nbAl3Ln4rT2kTCKrZZcl1jmWsJv8jBw6+P7hk0/Mu0JeHAITsjbNbpHd8UXpCfbVs\nsNGZrBU4uJdZ2YTG+Y27/t25jFNQwb+TWbvig7rfdX2sjssuxa00BBxarC08tIVj\nZprM37KcNn8CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC\nAQYwHQYDVR0OBBYEFA2/NjMnJd6YaHFsDw0xcuiUOc/KMB8GA1UdIwQYMBYEFA2/\nNjMnJd6YaHFsDw0xcuiUOc/KMA0GCSqGSIb3DQEBCwUAA4IBAQAJSOw49XqvUll0\n3vU9EAO6yUdeZSsQENIfYbRMQgapbnN1vTyrUjPZkGC5hIE1pVdoHtEoUEICxIwy\nr6BKxiSLBDLp+rvIuDdzMkXIWdUVvTZguVRyKtM2gfnpsPLpVnv+stBmAW2SMyxm\nkymhOpkjdv3He+45uorB3tdfBS9VVomDEUJdg38UE1b5eXRQ3D6gG0iCPFzKszXg\nLoAYhGxtjCJaKlbzduMK0YO6aelgW1+XnVIKcA7DJ9egk5d/dFZBPFfwumwr9hTH\nh7/fp3Fr87weI+CkfmFyJZrsEBlXJBVuvPesMVHTh3Whm5kmCdWcBJU0QmSq42ZL\n72U0PXLR\n-----END CERTIFICATE-----" + ca_cert = ::OpenSSL::X509::Certificate.new(raw_ca_cert) + cert = ::OpenSSL::X509::Certificate.new(raw_cert) + store = ::OpenSSL::X509::Store.new + store.add_cert(ca_cert) + assert(store.verify(cert)) + end + + def test_verification + cfg = @client.ssl_config + cfg.verify_callback = method(:verify_callback).to_proc + begin + @verify_callback_called = false + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.client_cert = path("client.cert") + cfg.client_key = path("client.key") + @verify_callback_called = false + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.add_trust_ca(path('ca.cert')) + @verify_callback_called = false + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.add_trust_ca(path('subca.cert')) + @verify_callback_called = false + assert_equal("hello", @client.get_content(@url)) + assert(@verify_callback_called) + # +if false + # JRubySSLSocket does not support depth. + # Also on travis environment, verify_depth seems to not work properly. + cfg.verify_depth = 1 # 2 required: root-sub + @verify_callback_called = false + begin + @client.get(@url) + assert(false, "verify_depth is not supported? #{OpenSSL::OPENSSL_VERSION}") + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + assert(@verify_callback_called) + end + # + cfg.verify_depth = 2 # 2 required: root-sub + @verify_callback_called = false + @client.get(@url) + assert(@verify_callback_called) + # +end + cfg.verify_depth = nil + cfg.cert_store = OpenSSL::X509::Store.new + cfg.verify_mode = OpenSSL::SSL::VERIFY_PEER + begin + @client.get_content(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + # + cfg.verify_mode = nil + assert_equal("hello", @client.get_content(@url)) + cfg.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_equal("hello", @client.get_content(@url)) + end + + def test_cert_store + cfg = @client.ssl_config + cfg.cert_store.add_cert(cert('ca.cert')) + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + # + cfg.cert_store.add_cert(cert('subca.cert')) + assert_equal("hello", @client.get_content(@url)) + cfg.clear_cert_store + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/(certificate verify failed|unable to find valid certification path to requested target)/, ssle.message) + end + end + +if defined?(HTTPClient::JRubySSLSocket) + def test_ciphers + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client-pass.key'), 'pass4key') + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + cfg.timeout = 123 + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = [] + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/No appropriate protocol/, ssle.message) + end + # + cfg.ciphers = %w(TLS_RSA_WITH_AES_128_CBC_SHA) + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = HTTPClient::SSLConfig::CIPHERS_DEFAULT + assert_equal("hello", @client.get_content(@url)) + end + +else + + def test_ciphers + cfg = @client.ssl_config + cfg.set_client_cert_file(path('client.cert'), path('client-pass.key'), 'pass4key') + cfg.add_trust_ca(path('ca.cert')) + cfg.add_trust_ca(path('subca.cert')) + cfg.timeout = 123 + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = "!ALL" + begin + @client.get(@url) + assert(false) + rescue OpenSSL::SSL::SSLError => ssle + assert_match(/no cipher match/, ssle.message) + end + # + cfg.ciphers = "ALL" + assert_equal("hello", @client.get_content(@url)) + # + cfg.ciphers = "DEFAULT" + assert_equal("hello", @client.get_content(@url)) + end +end + + def test_set_default_paths + assert_raise(OpenSSL::SSL::SSLError) do + @client.get(@url) + end + escape_env do + ENV['SSL_CERT_FILE'] = File.join(DIR, 'ca-chain.pem') + @client.ssl_config.set_default_paths + @client.get(@url) + end + end + + def test_no_sslv3 + teardown_server + setup_server_with_ssl_version(:SSLv3) + assert_raise(OpenSSL::SSL::SSLError) do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + end + end + + def test_allow_tlsv1 + teardown_server + setup_server_with_ssl_version(:TLSv1) + assert_nothing_raised do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + end + end + + def test_use_higher_TLS + omit('TODO: it does not pass with Java 7 or old openssl ') + teardown_server + setup_server_with_ssl_version('TLSv1_2') + assert_nothing_raised do + @client.ssl_config.verify_mode = nil + @client.get("https://localhost:#{serverport}/hello") + # TODO: should check JRubySSLSocket.ssl_socket.getSession.getProtocol + # but it's not thread safe. How can I return protocol version to the caller? + end + end + + VERIFY_TEST_CERT_LOCALHOST = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIIB9jCCAV+gAwIBAgIJAIH8Gsm4PcNKMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0xNjA4MTgxMDI2MDVaFw00NDAxMDMxMDI2MDVaMBQx +EjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +p7D8q0lcx5EZEV5+zPnQsxrbft5xyhH/MCStbH46DRATGPNSOaLRCG5r8gTKQzpD +4swGrQFYe2ienQ+7o4aEHErsXp4O/EmDKeiXWWrMqPr23r3HOBDebuynC/sCwy7N +epnX9u1VLB03eo+suj4d86OoOF+o11t9ZP+GA29Rsf8CAwEAAaNQME4wHQYDVR0O +BBYEFIxsJuPVvd5KKFcAvHGSeKSsWiUJMB8GA1UdIwQYMBaAFIxsJuPVvd5KKFcA +vHGSeKSsWiUJMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAMJaVCrrM +SM2I06Vr4BL+jtDFhZh3HmJFEDpwEFQ5Y9hduwdUGRBGCpkuea3fE2FKwWW9gLM1 +w7rFMzYFtCEqm78dJWIU79MRy0wjO4LgtYfoikgBh6JKWuV5ed/+L3sLyLG0ZTtv +lrD7lzDtXgwvj007PxDoYRp3JwYzKRmTbH8= +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_CERT_FOO_DOMAIN = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIIB8jCCAVugAwIBAgIJAL/od7Whx7VTMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV +BAMMB2Zvby5jb20wHhcNMTYwODE4MTAyMzUyWhcNNDQwMTAzMTAyMzUyWjASMRAw +DgYDVQQDDAdmb28uY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnsPyr +SVzHkRkRXn7M+dCzGtt+3nHKEf8wJK1sfjoNEBMY81I5otEIbmvyBMpDOkPizAat +AVh7aJ6dD7ujhoQcSuxeng78SYMp6JdZasyo+vbevcc4EN5u7KcL+wLDLs16mdf2 +7VUsHTd6j6y6Ph3zo6g4X6jXW31k/4YDb1Gx/wIDAQABo1AwTjAdBgNVHQ4EFgQU +jGwm49W93kooVwC8cZJ4pKxaJQkwHwYDVR0jBBgwFoAUjGwm49W93kooVwC8cZJ4 +pKxaJQkwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQCVKTvfxx+yezuR +5WpVKw1E9qabKOYFB5TqdHMHreRubMJTaoZC+YzhcCwtyLlAA9+axKINAiMM8T+z +jjfOHQSa2GS2TaaVDJWmXIgsAlEbjd2BEiQF0LZYGJRG9pyq0WbTV+CyFdrghjcO +xX/t7OG7NfOG9dhv3J+5SX10S5V5Dg== +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_CERT_ALT_NAME = OpenSSL::X509::Certificate.new(<<-EOS) +-----BEGIN CERTIFICATE----- +MIICDDCCAXWgAwIBAgIJAOxXY4nOwxhGMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAeFw0xNjA4MTgxMDM0NTJaFw00NDAxMDMxMDM0NTJaMBQx +EjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +p7D8q0lcx5EZEV5+zPnQsxrbft5xyhH/MCStbH46DRATGPNSOaLRCG5r8gTKQzpD +4swGrQFYe2ienQ+7o4aEHErsXp4O/EmDKeiXWWrMqPr23r3HOBDebuynC/sCwy7N +epnX9u1VLB03eo+suj4d86OoOF+o11t9ZP+GA29Rsf8CAwEAAaNmMGQwFAYDVR0R +BA0wC4IJKi5mb28uY29tMB0GA1UdDgQWBBSMbCbj1b3eSihXALxxknikrFolCTAf +BgNVHSMEGDAWgBSMbCbj1b3eSihXALxxknikrFolCTAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4GBADJlKNFuOnsDIhHGW72HuQw4naN6lM3eZE9JJ+UF/XIF +ghGtgqw+00Yy5wMFc1K2Wm4p5NymmDfC/P1FOe34bpxt9/IWm6mEoIWoodC3N4Cm +PtnSS1/CRWzVIPGMglTGGDcUc70tfeAWgyTxgcNQd4vTFtnN0f0RDdaXa8kfKMTw +-----END CERTIFICATE----- + EOS + + VERIFY_TEST_PKEY = OpenSSL::PKey::RSA.new(<<-EOS) +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCnsPyrSVzHkRkRXn7M+dCzGtt+3nHKEf8wJK1sfjoNEBMY81I5 +otEIbmvyBMpDOkPizAatAVh7aJ6dD7ujhoQcSuxeng78SYMp6JdZasyo+vbevcc4 +EN5u7KcL+wLDLs16mdf27VUsHTd6j6y6Ph3zo6g4X6jXW31k/4YDb1Gx/wIDAQAB +AoGAe0RHx+WKtQx8/96VmTl951qzxMPho2etTYd4kAsNwzJwx2N9qu57eBYrdWF+ +CQMYievucFhP4Y+bINtC1Eb6btz9TCUwjCfeIxfGRoFf3cxVmxlsRJJmN1kSZlu1 +yYlcMVuP4noeFIMQBRrt5pyLCx2Z9A01NCQT4Y6VoREBIeECQQDWeNhsL6xkrmdB +M9+zl+SqHdNKhgKwMdp74+UNnAV9I8GB7bGlOWhc83aqMLgS+JBDFXcmNF/KawTR +zcnkod5xAkEAyClFgr3lZQSnwUwoA/AOcyW0+H63taaaXS/g8n3H8ENK6kL4ldUx +IgCk2ekbQ5Y3S2WScIGXNxMOza9MlsOvbwJAPUtoPvMZB+U4KVBT/JXKijvf6QqH +tidpU8L78XnHr84KPcHa5WeUxgvmvBkUYoebYzC9TrPlNIqFZBi2PJtuYQJBAMda +E5j7eJT75fhm2RPS6xFT5MH5sw6AOA3HucrJ63AoFVzsBpl0E9NBwO4ndLgDzF6T +cx4Kc4iuunewuB8QFpECQQCfvsHCjIJ/X4kiqeBzxDq2GR/oDgQkOzY+4H9U7Lwl +e61RBaxk5OHOA0bLtvJblV6NL72ZEZhX60wAWbrOPhpT +-----END RSA PRIVATE KEY----- + EOS + + def test_post_connection_check + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_LOCALHOST, VERIFY_TEST_PKEY) + file = Tempfile.new('cert') + File.write(file.path, VERIFY_TEST_CERT_LOCALHOST.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_FOO_DOMAIN, VERIFY_TEST_PKEY) + File.write(file.path, VERIFY_TEST_CERT_FOO_DOMAIN.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_raises(OpenSSL::SSL::SSLError) do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + + teardown_server + setup_server_with_server_cert(nil, VERIFY_TEST_CERT_ALT_NAME, VERIFY_TEST_PKEY) + File.write(file.path, VERIFY_TEST_CERT_ALT_NAME.to_pem) + @client.ssl_config.add_trust_ca(file.path) + assert_raises(OpenSSL::SSL::SSLError) do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE + assert_nothing_raised do + @client.get("https://localhost:#{serverport}/hello") + end + @client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + + def test_x509_store_add_cert_prepend + store = OpenSSL::X509::Store.new + assert_equal(store, store.add_cert(OpenSSL::X509::Certificate.new(VERIFY_TEST_CERT_LOCALHOST))) + end + + def test_tcp_keepalive + @client.tcp_keepalive = true + @client.ssl_config.add_trust_ca(path('ca-chain.pem')) + @client.get_content(@url) + + # expecting HTTP keepalive caches the socket + session = @client.instance_variable_get(:@session_manager).send(:get_cached_session, HTTPClient::Site.new(URI.parse(@url))) + socket = session.instance_variable_get(:@socket).instance_variable_get(:@socket) + + assert_true(session.tcp_keepalive) + if RUBY_ENGINE == 'jruby' + assert_true(socket.getKeepAlive()) + else + assert_equal(Socket::SO_KEEPALIVE, socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE).optname) + end + end + + def test_timeout + url = "https://localhost:#{serverport}/" + @client.ssl_config.add_trust_ca(path('ca-chain.pem')) + assert_equal('sleep', @client.get_content(url + 'sleep?sec=2')) + @client.receive_timeout = 1 + @client.reset_all + assert_equal('sleep', @client.get_content(url + 'sleep?sec=0')) + + start = Time.now + assert_raise(HTTPClient::ReceiveTimeoutError) do + @client.get_content(url + 'sleep?sec=5') + end + if Time.now - start > 3 + # before #342 it detected timeout when IO was freed + fail 'timeout does not work' + end + + @client.receive_timeout = 3 + @client.reset_all + assert_equal('sleep', @client.get_content(url + 'sleep?sec=2')) + end + +private + + def cert(filename) + OpenSSL::X509::Certificate.new(File.read(File.join(DIR, filename))) + end + + def key(filename) + OpenSSL::PKey::RSA.new(File.read(File.join(DIR, filename))) + end + + def q(str) + %Q["#{str}"] + end + + def setup_server + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key'), + :SSLVerifyClient => nil, #OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT|OpenSSL::SSL::VERIFY_PEER, + :SSLClientCA => cert('ca.cert'), + :SSLCertName => nil + ) + @serverport = @server.config[:Port] + [:hello, :sleep].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def setup_server_with_ssl_version(ssl_version) + # JRubyOpenSSL does not support "TLSv1_2" as an known version, and some JCE provides TLS v1.2 as "TLSv1.2" not "TLSv1_2" + if RUBY_ENGINE == 'jruby' && ['TLSv1_1', 'TLSv1_2'].include?(ssl_version) + ssl_version = ssl_version.tr('_', '.') + end + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => File.join(DIR, 'ca.cert'), + :SSLCertificate => cert('server.cert'), + :SSLPrivateKey => key('server.key') + ) + @server.ssl_context.ssl_version = ssl_version + @serverport = @server.config[:Port] + [:hello].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def setup_server_with_server_cert(ca_cert, server_cert, server_key) + logger = Logger.new(STDERR) + logger.level = Logger::Severity::FATAL # avoid logging SSLError (ERROR level) + @server = WEBrick::HTTPServer.new( + :BindAddress => "localhost", + :Logger => logger, + :Port => 0, + :AccessLog => [], + :DocumentRoot => DIR, + :SSLEnable => true, + :SSLCACertificateFile => ca_cert, + :SSLCertificate => server_cert, + :SSLPrivateKey => server_key, + :SSLVerifyClient => nil, + :SSLClientCA => nil, + :SSLCertName => nil + ) + @serverport = @server.config[:Port] + [:hello].each do |sym| + @server.mount( + "/#{sym}", + WEBrick::HTTPServlet::ProcHandler.new(method("do_#{sym}").to_proc) + ) + end + @server_thread = start_server_thread(@server) + end + + def do_hello(req, res) + res['content-type'] = 'text/html' + res.body = "hello" + end + + def do_sleep(req, res) + sec = req.query['sec'].to_i + sleep sec + res['content-type'] = 'text/html' + res.body = "sleep" + end + + def start_server_thread(server) + t = Thread.new { + Thread.current.abort_on_exception = true + server.start + } + while server.status != :Running + sleep 0.1 + unless t.alive? + t.join + raise + end + end + t + end + + def verify_callback(ok, cert) + @verify_callback_called = true + p ["client", ok, cert] if $DEBUG + ok + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb new file mode 100644 index 0000000..29a415b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/httpclient-2.8.3/test/test_webagent-cookie.rb @@ -0,0 +1,465 @@ +require 'test/unit' +require 'uri' +require 'tempfile' + +# This testcase is located for reference, not for running. +if false + +require 'httpclient/webagent-cookie' + +class TestCookie < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @c = WebAgent::Cookie.new() + end + + def test_s_new() + assert_instance_of(WebAgent::Cookie, @c) + end + + def test_discard? + assert_equal(false, !!(@c.discard?)) + @c.discard = true + assert_equal(true, !!(@c.discard?)) + end + + def test_match() + url = urify('http://www.rubycolor.org/hoge/funi/#919191') + + @c.domain = 'www.rubycolor.org' + assert_equal(true, @c.match?(url)) + + @c.domain = '.rubycolor.org' + assert_equal(true, @c.match?(url)) + + @c.domain = 'aaa.www.rubycolor.org' + assert_equal(false, @c.match?(url)) + + @c.domain = 'aaa.www.rubycolor.org' + assert_equal(false, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/' + assert_equal(true, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + assert_equal(true, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge/hoge' + assert_equal(false, @c.match?(url)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = true + assert_equal(false, @c.match?(url)) + + url2 = urify('https://www.rubycolor.org/hoge/funi/#919191') + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = true + assert_equal(true, @c.match?(url2)) + + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' + @c.secure = nil + assert_equal(true, @c.match?(url2)) ## not false! + + url.port = 80 + @c.domain = 'www.rubycolor.org' + @c.path = '/hoge' +# @c.port = [80,8080] + assert_equal(true, @c.match?(url)) + + url_nopath = URI.parse('http://www.rubycolor.org') + @c.domain = 'www.rubycolor.org' + @c.path = '/' + assert_equal(true, @c.match?(url_nopath)) + + end + + def test_head_match?() + assert_equal(true, @c.head_match?("","")) + assert_equal(false, @c.head_match?("a","")) + assert_equal(true, @c.head_match?("","a")) + assert_equal(true, @c.head_match?("abcde","abcde")) + assert_equal(true, @c.head_match?("abcde","abcdef")) + assert_equal(false, @c.head_match?("abcdef","abcde")) + assert_equal(false, @c.head_match?("abcde","bcde")) + assert_equal(false, @c.head_match?("bcde","abcde")) + end + + def test_tail_match?() + assert_equal(true, @c.tail_match?("","")) + assert_equal(false, @c.tail_match?("a","")) + assert_equal(true, @c.tail_match?("","a")) + assert_equal(true, @c.tail_match?("abcde","abcde")) + assert_equal(false, @c.tail_match?("abcde","abcdef")) + assert_equal(false, @c.tail_match?("abcdef","abcde")) + assert_equal(false, @c.tail_match?("abcde","bcde")) + assert_equal(true, @c.tail_match?("bcde","abcde")) + end + + + def test_domain_match() + extend WebAgent::CookieUtils + assert_equal(true, !!domain_match("hoge.co.jp",".")) +# assert_equal(true, !!domain_match("locahost",".local")) + assert_equal(true, !!domain_match("192.168.10.1","192.168.10.1")) + assert_equal(false, !!domain_match("192.168.10.1","192.168.10.2")) +# assert_equal(false, !!domain_match("hoge.co.jp",".hoge.co.jp")) + # allows; host == rubyforge.org, domain == .rubyforge.org + assert_equal(true, !!domain_match("hoge.co.jp",".hoge.co.jp")) + assert_equal(true, !!domain_match("www.hoge.co.jp", "www.hoge.co.jp")) + assert_equal(false, !!domain_match("www.hoge.co.jp", "www2.hoge.co.jp")) + assert_equal(true, !!domain_match("www.hoge.co.jp", ".hoge.co.jp")) + assert_equal(true, !!domain_match("www.aa.hoge.co.jp", ".hoge.co.jp")) + assert_equal(false, !!domain_match("www.hoge.co.jp", "hoge.co.jp")) + end + + def test_join_quotedstr() + arr1 = ['hoge=funi', 'hoge2=funi2'] + assert_equal(arr1, @c.instance_eval{join_quotedstr(arr1,';')}) + arr2 = ['hoge="fu', 'ni"', 'funi=funi'] + assert_equal(['hoge="fu;ni"','funi=funi'], + @c.instance_eval{join_quotedstr(arr2,';')}) + arr3 = ['hoge="funi";hoge2="fu','ni2";hoge3="hoge"', 'funi="funi"'] + assert_equal(['hoge="funi";hoge2="fu,ni2";hoge3="hoge"', 'funi="funi"'], + @c.instance_eval{join_quotedstr(arr3,',')}) + end + +end + +class TestCookieManager < Test::Unit::TestCase + include HTTPClient::Util + + def setup() + @cm = WebAgent::CookieManager.new() + end + + def teardown() + end + + def test_parse() + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2010 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2010, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse2() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + + def test_parse3() + str = "xmen=off,0,0,1; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT;Secure;HTTPOnly" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + assert_equal(true, cookie.secure?) + assert_equal(true, cookie.http_only?) + end + + def test_parse_double_semicolon() + str = "xmen=off,0,0,1;; path=\"/;;\"; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify('http://www.excite.co.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("xmen", cookie.name) + assert_equal("off,0,0,1", cookie.value) + assert_equal("/;;", cookie.path) + assert_equal(".excite.co.jp", cookie.domain) + assert_equal(Time.gm(2037,12,31,12,0,0), cookie.expires) + end + +# def test_make_portlist() +# assert_equal([80,8080], @cm.instance_eval{make_portlist("80,8080")}) +# assert_equal([80], @cm.instance_eval{make_portlist("80")}) +# assert_equal([80,8080,10080], @cm.instance_eval{make_portlist(" 80, 8080, 10080 \n")}) +# end + + def test_check_expired_cookies() + c1 = WebAgent::Cookie.new() + c2 = c1.dup + c3 = c1.dup + c4 = c1.dup + c1.expires = Time.now - 100 + c2.expires = Time.now + 100 + c3.expires = Time.now - 10 + c4.expires = nil + cookies = [c1,c2,c3,c4] + @cm.cookies = cookies + @cm.check_expired_cookies() + # expires == nil cookies (session cookie) exists. + assert_equal([c2,c4], @cm.cookies) + end + + def test_parse_expires + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + # + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; path=/; expires=\"\"" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(nil, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_parse_after_expiration + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=Wed, 01-Dec-2010 00:00:00 GMT; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_instance_of(WebAgent::Cookie, cookie) + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(Time.gm(2010, 12, 1, 0,0,0), cookie.expires) + assert_equal("/", cookie.path) + + time = Time.at(Time.now.to_i + 60).utc + expires = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT") + str = "inkid=n92b0ADOgACIgUb9lsjHqAAAHu2a; expires=#{expires}; path=/" + @cm.parse(str, urify('http://www.test.jp')) + cookie = @cm.cookies[0] + assert_equal("inkid", cookie.name) + assert_equal("n92b0ADOgACIgUb9lsjHqAAAHu2a", cookie.value) + assert_equal(time, cookie.expires) + assert_equal("/", cookie.path) + end + + def test_find_cookie() + str = "xmen=off,0,0,1; path=/; domain=.excite2.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite2.co.jp/")) + + str = "xmen=off,0,0,2; path=/; domain=.excite.co.jp; expires=Wednesday, 31-Dec-2037 12:00:00 GMT" + @cm.parse(str, urify("http://www.excite.co.jp/")) + + @cm.cookies[0].use = true + @cm.cookies[1].use = true + + url = urify('http://www.excite.co.jp/hoge/funi/') + cookie_str = @cm.find(url) + assert_equal("xmen=off,0,0,2", cookie_str) + end + + def test_load_cookies() + begin + File.open("tmp_test.tmp","w") {|f| + f.write < same as URL + c.url = urify("http://www.inac.co.jp/hoge/hoge2/hoge3") + @cm.add(c) + # + c1, c2 = @cm.cookies + assert_equal('', c1.path) + assert_equal('/hoge/hoge2', c2.path) + end + + def test_check_cookie_accept_domain() + @cm.accept_domains = [".example1.co.jp", "www1.example.jp"] + @cm.reject_domains = [".example2.co.jp", "www2.example.jp"] + check1 = @cm.check_cookie_accept_domain("www.example1.co.jp") + assert_equal(true, check1) + check2 = @cm.check_cookie_accept_domain("www.example2.co.jp") + assert_equal(false, check2) + check3 = @cm.check_cookie_accept_domain("www1.example.jp") + assert_equal(true, check3) + check4 = @cm.check_cookie_accept_domain("www2.example.jp") + assert_equal(false, check4) + check5 = @cm.check_cookie_accept_domain("aa.www2.example.jp") + assert_equal(true, check5) + check6 = @cm.check_cookie_accept_domain("aa.www2.example.jp") + assert_equal(true, check6) + assert_equal(false, @cm.check_cookie_accept_domain(nil)) + end + + def test_escaped + uri = urify('http://www.example.org') + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=2; path=/", uri) + c = @cm.cookies.first + assert_equal('2', c.value) + assert_equal('bar=2', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal(nil, c.value) + assert_equal('bar=', @cm.find(uri)) + + @cm.parse("bar=; path=/", uri) + c = @cm.cookies.first + assert_equal(nil, c.value) + assert_equal('bar=', @cm.find(uri)) + end + + def test_load_cookies_escaped + uri = urify('http://example.org/') + f = Tempfile.new('test_cookie') + File.open(f.path, 'w') do |out| + out.write < "Dies ist ein Test" +``` + +## Features + +* Translation and localization +* Interpolation of values to translations +* Pluralization (CLDR compatible) +* Customizable transliteration to ASCII +* Flexible defaults +* Bulk lookup +* Lambdas as translation data +* Custom key/scope separator +* Custom exception handlers +* Extensible architecture with a swappable backend + +## Pluggable Features + +* Cache +* Pluralization: lambda pluralizers stored as translation data +* Locale fallbacks, RFC4647 compliant (optionally: RFC4646 locale validation) +* [Gettext support](https://github.com/ruby-i18n/i18n/wiki/Gettext) +* Translation metadata + +## Alternative Backend + +* Chain +* ActiveRecord (optionally: ActiveRecord::Missing and ActiveRecord::StoreProcs) +* KeyValue (uses active_support/json and cannot store procs) + +For more information and lots of resources see [the 'Resources' page on the wiki](https://github.com/ruby-i18n/i18n/wiki/Resources). + +## Tests + +You can run tests both with + +* `rake test` or just `rake` +* run any test file directly, e.g. `ruby -Ilib:test test/api/simple_test.rb` + +You can run all tests against all Gemfiles with + +* `ruby test/run_all.rb` + +The structure of the test suite is a bit unusual as it uses modules to reuse +particular tests in different test cases. + +The reason for this is that we need to enforce the I18n API across various +combinations of extensions. E.g. the Simple backend alone needs to support +the same API as any combination of feature and/or optimization modules included +to the Simple backend. We test this by reusing the same API definition (implemented +as test methods) in test cases with different setups. + +You can find the test cases that enforce the API in test/api. And you can find +the API definition test methods in test/api/tests. + +All other test cases (e.g. as defined in test/backend, test/core_ext) etc. +follow the usual test setup and should be easy to grok. + +## More Documentation + +Additional documentation can be found here: https://github.com/ruby-i18n/i18n/wiki + +## Contributors + +* @radar +* @carlosantoniodasilva +* @josevalim +* @knapo +* @tigrish +* [and many more](https://github.com/ruby-i18n/i18n/graphs/contributors) + +## License + +MIT License. See the included MIT-LICENSE file. diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n.rb new file mode 100644 index 0000000..e197e2b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n.rb @@ -0,0 +1,429 @@ +# frozen_string_literal: true + +require 'concurrent/map' +require 'concurrent/hash' + +require 'i18n/version' +require 'i18n/utils' +require 'i18n/exceptions' +require 'i18n/interpolate/ruby' + +module I18n + autoload :Backend, 'i18n/backend' + autoload :Config, 'i18n/config' + autoload :Gettext, 'i18n/gettext' + autoload :Locale, 'i18n/locale' + autoload :Tests, 'i18n/tests' + autoload :Middleware, 'i18n/middleware' + + RESERVED_KEYS = %i[ + cascade + deep_interpolation + default + exception_handler + fallback + fallback_in_progress + fallback_original_locale + format + object + raise + resolve + scope + separator + throw + ] + EMPTY_HASH = {}.freeze + + def self.new_double_nested_cache # :nodoc: + Concurrent::Map.new { |h, k| h[k] = Concurrent::Map.new } + end + + # Marks a key as reserved. Reserved keys are used internally, + # and can't also be used for interpolation. If you are using any + # extra keys as I18n options, you should call I18n.reserve_key + # before any I18n.translate (etc) calls are made. + def self.reserve_key(key) + RESERVED_KEYS << key.to_sym + @reserved_keys_pattern = nil + end + + def self.reserved_keys_pattern # :nodoc: + @reserved_keys_pattern ||= /%\{(#{RESERVED_KEYS.join("|")})\}/ + end + + module Base + # Gets I18n configuration object. + def config + Thread.current[:i18n_config] ||= I18n::Config.new + end + + # Sets I18n configuration object. + def config=(value) + Thread.current[:i18n_config] = value + end + + # Write methods which delegates to the configuration object + %w(locale backend default_locale available_locales default_separator + exception_handler load_path enforce_available_locales).each do |method| + module_eval <<-DELEGATORS, __FILE__, __LINE__ + 1 + def #{method} + config.#{method} + end + + def #{method}=(value) + config.#{method} = (value) + end + DELEGATORS + end + + # Tells the backend to reload translations. Used in situations like the + # Rails development environment. Backends can implement whatever strategy + # is useful. + def reload! + config.clear_available_locales_set + config.backend.reload! + end + + # Tells the backend to load translations now. Used in situations like the + # Rails production environment. Backends can implement whatever strategy + # is useful. + def eager_load! + config.backend.eager_load! + end + + # Translates, pluralizes and interpolates a given key using a given locale, + # scope, and default, as well as interpolation values. + # + # *LOOKUP* + # + # Translation data is organized as a nested hash using the upper-level keys + # as namespaces. E.g., ActionView ships with the translation: + # :date => {:formats => {:short => "%b %d"}}. + # + # Translations can be looked up at any level of this hash using the key argument + # and the scope option. E.g., in this example I18n.t :date + # returns the whole translations hash {:formats => {:short => "%b %d"}}. + # + # Key can be either a single key or a dot-separated key (both Strings and Symbols + # work). E.g., the short format can be looked up using both: + # I18n.t 'date.formats.short' + # I18n.t :'date.formats.short' + # + # Scope can be either a single key, a dot-separated key or an array of keys + # or dot-separated keys. Keys and scopes can be combined freely. So these + # examples will all look up the same short date format: + # I18n.t 'date.formats.short' + # I18n.t 'formats.short', :scope => 'date' + # I18n.t 'short', :scope => 'date.formats' + # I18n.t 'short', :scope => %w(date formats) + # + # *INTERPOLATION* + # + # Translations can contain interpolation variables which will be replaced by + # values passed to #translate as part of the options hash, with the keys matching + # the interpolation variable names. + # + # E.g., with a translation :foo => "foo %{bar}" the option + # value for the key +bar+ will be interpolated into the translation: + # I18n.t :foo, :bar => 'baz' # => 'foo baz' + # + # *PLURALIZATION* + # + # Translation data can contain pluralized translations. Pluralized translations + # are arrays of singular/plural versions of translations like ['Foo', 'Foos']. + # + # Note that I18n::Backend::Simple only supports an algorithm for English + # pluralization rules. Other algorithms can be supported by custom backends. + # + # This returns the singular version of a pluralized translation: + # I18n.t :foo, :count => 1 # => 'Foo' + # + # These both return the plural version of a pluralized translation: + # I18n.t :foo, :count => 0 # => 'Foos' + # I18n.t :foo, :count => 2 # => 'Foos' + # + # The :count option can be used both for pluralization and interpolation. + # E.g., with the translation + # :foo => ['%{count} foo', '%{count} foos'], count will + # be interpolated to the pluralized translation: + # I18n.t :foo, :count => 1 # => '1 foo' + # + # *DEFAULTS* + # + # This returns the translation for :foo or default if no translation was found: + # I18n.t :foo, :default => 'default' + # + # This returns the translation for :foo or the translation for :bar if no + # translation for :foo was found: + # I18n.t :foo, :default => :bar + # + # Returns the translation for :foo or the translation for :bar + # or default if no translations for :foo and :bar were found. + # I18n.t :foo, :default => [:bar, 'default'] + # + # *BULK LOOKUP* + # + # This returns an array with the translations for :foo and :bar. + # I18n.t [:foo, :bar] + # + # Can be used with dot-separated nested keys: + # I18n.t [:'baz.foo', :'baz.bar'] + # + # Which is the same as using a scope option: + # I18n.t [:foo, :bar], :scope => :baz + # + # *LAMBDAS* + # + # Both translations and defaults can be given as Ruby lambdas. Lambdas will be + # called and passed the key and options. + # + # E.g. assuming the key :salutation resolves to: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. #{options[:name]}" : "Mrs. #{options[:name]}" } + # + # Then I18n.t(:salutation, :gender => 'w', :name => 'Smith') will result in "Mrs. Smith". + # + # Note that the string returned by lambda will go through string interpolation too, + # so the following lambda would give the same result: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. %{name}" : "Mrs. %{name}" } + # + # It is recommended to use/implement lambdas in an "idempotent" way. E.g. when + # a cache layer is put in front of I18n.translate it will generate a cache key + # from the argument values passed to #translate. Therefore your lambdas should + # always return the same translations/values per unique combination of argument + # values. + # + # *Ruby 2.7+ keyword arguments warning* + # + # This method uses keyword arguments. + # There is a breaking change in ruby that produces warning with ruby 2.7 and won't work as expected with ruby 3.0 + # The "hash" parameter must be passed as keyword argument. + # + # Good: + # I18n.t(:salutation, :gender => 'w', :name => 'Smith') + # I18n.t(:salutation, **{ :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, **any_hash) + # + # Bad: + # I18n.t(:salutation, { :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, any_hash) + # + def translate(key = nil, throw: false, raise: false, locale: nil, **options) # TODO deprecate :raise + locale ||= config.locale + raise Disabled.new('t') if locale == false + enforce_available_locales!(locale) + + backend = config.backend + + result = catch(:exception) do + if key.is_a?(Array) + key.map { |k| backend.translate(locale, k, options) } + else + backend.translate(locale, key, options) + end + end + + if result.is_a?(MissingTranslation) + handle_exception((throw && :throw || raise && :raise), result, locale, key, options) + else + result + end + end + alias :t :translate + + # Wrapper for translate that adds :raise => true. With + # this option, if no translation is found, it will raise I18n::MissingTranslationData + def translate!(key, **options) + translate(key, **options, raise: true) + end + alias :t! :translate! + + # Returns true if a translation exists for a given key, otherwise returns false. + def exists?(key, _locale = nil, locale: _locale, **options) + locale ||= config.locale + raise Disabled.new('exists?') if locale == false + raise I18n::ArgumentError if key.is_a?(String) && key.empty? + config.backend.exists?(locale, key, options) + end + + # Transliterates UTF-8 characters to ASCII. By default this method will + # transliterate only Latin strings to an ASCII approximation: + # + # I18n.transliterate("Ærøskøbing") + # # => "AEroskobing" + # + # I18n.transliterate("日本語") + # # => "???" + # + # It's also possible to add support for per-locale transliterations. I18n + # expects transliteration rules to be stored at + # i18n.transliterate.rule. + # + # Transliteration rules can either be a Hash or a Proc. Procs must accept a + # single string argument. Hash rules inherit the default transliteration + # rules, while Procs do not. + # + # *Examples* + # + # Setting a Hash in .yml: + # + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # Setting a Hash using Ruby: + # + # store_translations(:de, i18n: { + # transliterate: { + # rule: { + # 'ü' => 'ue', + # 'ö' => 'oe' + # } + # } + # }) + # + # Setting a Proc: + # + # translit = lambda {|string| MyTransliterator.transliterate(string) } + # store_translations(:xx, :i18n => {:transliterate => {:rule => translit}) + # + # Transliterating strings: + # + # I18n.locale = :en + # I18n.transliterate("Jürgen") # => "Jurgen" + # I18n.locale = :de + # I18n.transliterate("Jürgen") # => "Juergen" + # I18n.transliterate("Jürgen", :locale => :en) # => "Jurgen" + # I18n.transliterate("Jürgen", :locale => :de) # => "Juergen" + def transliterate(key, throw: false, raise: false, locale: nil, replacement: nil, **options) + locale ||= config.locale + raise Disabled.new('transliterate') if locale == false + enforce_available_locales!(locale) + + config.backend.transliterate(locale, key, replacement) + rescue I18n::ArgumentError => exception + handle_exception((throw && :throw || raise && :raise), exception, locale, key, options) + end + + # Localizes certain objects, such as dates and numbers to local formatting. + def localize(object, locale: nil, format: nil, **options) + locale ||= config.locale + raise Disabled.new('l') if locale == false + enforce_available_locales!(locale) + + format ||= :default + config.backend.localize(locale, object, format, options) + end + alias :l :localize + + # Executes block with given I18n.locale set. + def with_locale(tmp_locale = nil) + if tmp_locale == nil + yield + else + current_locale = self.locale + self.locale = tmp_locale + begin + yield + ensure + self.locale = current_locale + end + end + end + + # Merges the given locale, key and scope into a single array of keys. + # Splits keys that contain dots into multiple keys. Makes sure all + # keys are Symbols. + def normalize_keys(locale, key, scope, separator = nil) + separator ||= I18n.default_separator + + [ + *normalize_key(locale, separator), + *normalize_key(scope, separator), + *normalize_key(key, separator) + ] + end + + # Returns true when the passed locale, which can be either a String or a + # Symbol, is in the list of available locales. Returns false otherwise. + def locale_available?(locale) + I18n.config.available_locales_set.include?(locale) + end + + # Raises an InvalidLocale exception when the passed locale is not available. + def enforce_available_locales!(locale) + if locale != false && config.enforce_available_locales + raise I18n::InvalidLocale.new(locale) if !locale_available?(locale) + end + end + + def available_locales_initialized? + config.available_locales_initialized? + end + + private + + # Any exceptions thrown in translate will be sent to the @@exception_handler + # which can be a Symbol, a Proc or any other Object unless they're forced to + # be raised or thrown (MissingTranslation). + # + # If exception_handler is a Symbol then it will simply be sent to I18n as + # a method call. A Proc will simply be called. In any other case the + # method #call will be called on the exception_handler object. + # + # Examples: + # + # I18n.exception_handler = :custom_exception_handler # this is the default + # I18n.custom_exception_handler(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = lambda { |*args| ... } # a lambda + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = I18nExceptionHandler.new # an object + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + def handle_exception(handling, exception, locale, key, options) + case handling + when :raise + raise exception.respond_to?(:to_exception) ? exception.to_exception : exception + when :throw + throw :exception, exception + else + case handler = options[:exception_handler] || config.exception_handler + when Symbol + send(handler, exception, locale, key, options) + else + handler.call(exception, locale, key, options) + end + end + end + + @@normalized_key_cache = I18n.new_double_nested_cache + + def normalize_key(key, separator) + @@normalized_key_cache[separator][key] ||= + case key + when Array + key.flat_map { |k| normalize_key(k, separator) } + else + keys = key.to_s.split(separator) + keys.delete('') + keys.map! do |k| + case k + when /\A[-+]?([1-9]\d*|0)\z/ # integer + k.to_i + when 'true' + true + when 'false' + false + else + k.to_sym + end + end + keys + end + end + end + + extend Base +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend.rb new file mode 100644 index 0000000..863d618 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module I18n + module Backend + autoload :Base, 'i18n/backend/base' + autoload :Cache, 'i18n/backend/cache' + autoload :CacheFile, 'i18n/backend/cache_file' + autoload :Cascade, 'i18n/backend/cascade' + autoload :Chain, 'i18n/backend/chain' + autoload :Fallbacks, 'i18n/backend/fallbacks' + autoload :Flatten, 'i18n/backend/flatten' + autoload :Gettext, 'i18n/backend/gettext' + autoload :InterpolationCompiler, 'i18n/backend/interpolation_compiler' + autoload :KeyValue, 'i18n/backend/key_value' + autoload :LazyLoadable, 'i18n/backend/lazy_loadable' + autoload :Memoize, 'i18n/backend/memoize' + autoload :Metadata, 'i18n/backend/metadata' + autoload :Pluralization, 'i18n/backend/pluralization' + autoload :Simple, 'i18n/backend/simple' + autoload :Transliterator, 'i18n/backend/transliterator' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/base.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/base.rb new file mode 100644 index 0000000..4cbcc3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/base.rb @@ -0,0 +1,299 @@ +# frozen_string_literal: true + +require 'yaml' +require 'json' + +module I18n + module Backend + module Base + include I18n::Backend::Transliterator + + # Accepts a list of paths to translation files. Loads translations from + # plain Ruby (*.rb), YAML files (*.yml), or JSON files (*.json). See #load_rb, #load_yml, and #load_json + # for details. + def load_translations(*filenames) + filenames = I18n.load_path if filenames.empty? + filenames.flatten.each do |filename| + loaded_translations = load_file(filename) + yield filename, loaded_translations if block_given? + end + end + + # This method receives a locale, a data hash and options for storing translations. + # Should be implemented + def store_translations(locale, data, options = EMPTY_HASH) + raise NotImplementedError + end + + def translate(locale, key, options = EMPTY_HASH) + raise I18n::ArgumentError if (key.is_a?(String) || key.is_a?(Symbol)) && key.empty? + raise InvalidLocale.new(locale) unless locale + return nil if key.nil? && !options.key?(:default) + + entry = lookup(locale, key, options[:scope], options) unless key.nil? + + if entry.nil? && options.key?(:default) + entry = default(locale, key, options[:default], options) + else + entry = resolve_entry(locale, key, entry, options) + end + + count = options[:count] + + if entry.nil? && (subtrees? || !count) + if (options.key?(:default) && !options[:default].nil?) || !options.key?(:default) + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + end + + entry = entry.dup if entry.is_a?(String) + entry = pluralize(locale, entry, count) if count + + if entry.nil? && !subtrees? + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + deep_interpolation = options[:deep_interpolation] + values = Utils.except(options, *RESERVED_KEYS) + if values + entry = if deep_interpolation + deep_interpolate(locale, entry, values) + else + interpolate(locale, entry, values) + end + end + entry + end + + def exists?(locale, key, options = EMPTY_HASH) + lookup(locale, key) != nil + end + + # Acts the same as +strftime+, but uses a localized version of the + # format string. Takes a key from the date/time formats translations as + # a format argument (e.g., :short in :'date.formats'). + def localize(locale, object, format = :default, options = EMPTY_HASH) + if object.nil? && options.include?(:default) + return options[:default] + end + raise ArgumentError, "Object must be a Date, DateTime or Time object. #{object.inspect} given." unless object.respond_to?(:strftime) + + if Symbol === format + key = format + type = object.respond_to?(:sec) ? 'time' : 'date' + options = options.merge(:raise => true, :object => object, :locale => locale) + format = I18n.t(:"#{type}.formats.#{key}", **options) + end + + format = translate_localization_format(locale, object, format, options) + object.strftime(format) + end + + # Returns an array of locales for which translations are available + # ignoring the reserved translation meta data key :i18n. + def available_locales + raise NotImplementedError + end + + def reload! + eager_load! if eager_loaded? + end + + def eager_load! + @eager_loaded = true + end + + protected + + def eager_loaded? + @eager_loaded ||= false + end + + # The method which actually looks up for the translation in the store. + def lookup(locale, key, scope = [], options = EMPTY_HASH) + raise NotImplementedError + end + + def subtrees? + true + end + + # Evaluates defaults. + # If given subject is an Array, it walks the array and returns the + # first translation that can be resolved. Otherwise it tries to resolve + # the translation directly. + def default(locale, object, subject, options = EMPTY_HASH) + options = options.reject { |key, value| key == :default } + case subject + when Array + subject.each do |item| + result = resolve(locale, object, item, options) + return result unless result.nil? + end and nil + else + resolve(locale, object, subject, options) + end + end + + # Resolves a translation. + # If the given subject is a Symbol, it will be translated with the + # given options. If it is a Proc then it will be evaluated. All other + # subjects will be returned directly. + def resolve(locale, object, subject, options = EMPTY_HASH) + return subject if options[:resolve] == false + result = catch(:exception) do + case subject + when Symbol + I18n.translate(subject, **options.merge(:locale => locale, :throw => true)) + when Proc + date_or_time = options.delete(:object) || object + resolve(locale, object, subject.call(date_or_time, **options)) + else + subject + end + end + result unless result.is_a?(MissingTranslation) + end + alias_method :resolve_entry, :resolve + + # Picks a translation from a pluralized mnemonic subkey according to English + # pluralization rules : + # - It will pick the :one subkey if count is equal to 1. + # - It will pick the :other subkey otherwise. + # - It will pick the :zero subkey in the special case where count is + # equal to 0 and there is a :zero subkey present. This behaviour is + # not standard with regards to the CLDR pluralization rules. + # Other backends can implement more flexible or complex pluralization rules. + def pluralize(locale, entry, count) + entry = entry.reject { |k, _v| k == :attributes } if entry.is_a?(Hash) + return entry unless entry.is_a?(Hash) && count && entry.values.none? { |v| v.is_a?(Hash) } + + key = pluralization_key(entry, count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + end + + # Interpolates values into a given subject. + # + # if the given subject is a string then: + # method interpolates "file %{file} opened by %%{user}", :file => 'test.txt', :user => 'Mr. X' + # # => "file test.txt opened by %{user}" + # + # if the given subject is an array then: + # each element of the array is recursively interpolated (until it finds a string) + # method interpolates ["yes, %{user}", ["maybe no, %{user}, "no, %{user}"]], :user => "bartuz" + # # => "["yes, bartuz",["maybe no, bartuz", "no, bartuz"]]" + def interpolate(locale, subject, values = EMPTY_HASH) + return subject if values.empty? + + case subject + when ::String then I18n.interpolate(subject, values) + when ::Array then subject.map { |element| interpolate(locale, element, values) } + else + subject + end + end + + # Deep interpolation + # + # deep_interpolate { people: { ann: "Ann is %{ann}", john: "John is %{john}" } }, + # ann: 'good', john: 'big' + # #=> { people: { ann: "Ann is good", john: "John is big" } } + def deep_interpolate(locale, data, values = EMPTY_HASH) + return data if values.empty? + + case data + when ::String + I18n.interpolate(data, values) + when ::Hash + data.each_with_object({}) do |(k, v), result| + result[k] = deep_interpolate(locale, v, values) + end + when ::Array + data.map do |v| + deep_interpolate(locale, v, values) + end + else + data + end + end + + # Loads a single translations file by delegating to #load_rb or + # #load_yml depending on the file extension and directly merges the + # data to the existing translations. Raises I18n::UnknownFileType + # for all other file extensions. + def load_file(filename) + type = File.extname(filename).tr('.', '').downcase + raise UnknownFileType.new(type, filename) unless respond_to?(:"load_#{type}", true) + data, keys_symbolized = send(:"load_#{type}", filename) + unless data.is_a?(Hash) + raise InvalidLocaleData.new(filename, 'expects it to return a hash, but does not') + end + data.each { |locale, d| store_translations(locale, d || {}, skip_symbolize_keys: keys_symbolized) } + + data + end + + # Loads a plain Ruby translations file. eval'ing the file must yield + # a Hash containing translation data with locales as toplevel keys. + def load_rb(filename) + translations = eval(IO.read(filename), binding, filename) + [translations, false] + end + + # Loads a YAML translations file. The data must have locales as + # toplevel keys. + def load_yml(filename) + begin + if YAML.respond_to?(:unsafe_load_file) # Psych 4.0 way + [YAML.unsafe_load_file(filename, symbolize_names: true, freeze: true), true] + else + [YAML.load_file(filename), false] + end + rescue TypeError, ScriptError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + alias_method :load_yaml, :load_yml + + # Loads a JSON translations file. The data must have locales as + # toplevel keys. + def load_json(filename) + begin + # Use #load_file as a proxy for a version of JSON where symbolize_names and freeze are supported. + if ::JSON.respond_to?(:load_file) + [::JSON.load_file(filename, symbolize_names: true, freeze: true), true] + else + [::JSON.parse(File.read(filename)), false] + end + rescue TypeError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + + def translate_localization_format(locale, object, format, options) + format.to_s.gsub(/%(|\^)[aAbBpP]/) do |match| + case match + when '%a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday] + when '%^a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday].upcase + when '%A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday] + when '%^A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday].upcase + when '%b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon] + when '%^b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon].upcase + when '%B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon] + when '%^B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon].upcase + when '%p' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).upcase if object.respond_to? :hour + when '%P' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).downcase if object.respond_to? :hour + end + end + rescue MissingTranslationData => e + e.message + end + + def pluralization_key(entry, count) + key = :zero if count == 0 && entry.has_key?(:zero) + key ||= count == 1 ? :one : :other + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache.rb new file mode 100644 index 0000000..40c18d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +# This module allows you to easily cache all responses from the backend - thus +# speeding up the I18n aspects of your application quite a bit. +# +# To enable caching you can simply include the Cache module to the Simple +# backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.send(:include, I18n::Backend::Cache) +# +# You will also need to set a cache store implementation that you want to use: +# +# I18n.cache_store = ActiveSupport::Cache.lookup_store(:memory_store) +# +# You can use any cache implementation you want that provides the same API as +# ActiveSupport::Cache (only the methods #fetch and #write are being used). +# +# The cache_key implementation by default assumes you pass values that return +# a valid key from #hash (see +# https://www.ruby-doc.org/core/classes/Object.html#M000337). However, you can +# configure your own digest method via which responds to #hexdigest (see +# https://ruby-doc.org/stdlib/libdoc/openssl/rdoc/OpenSSL/Digest.html): +# +# I18n.cache_key_digest = OpenSSL::Digest::SHA256.new +# +# If you use a lambda as a default value in your translation like this: +# +# I18n.t(:"date.order", :default => lambda {[:month, :day, :year]}) +# +# Then you will always have a cache miss, because each time this method +# is called the lambda will have a different hash value. If you know +# the result of the lambda is a constant as in the example above, then +# to cache this you can make the lambda a constant, like this: +# +# DEFAULT_DATE_ORDER = lambda {[:month, :day, :year]} +# ... +# I18n.t(:"date.order", :default => DEFAULT_DATE_ORDER) +# +# If the lambda may result in different values for each call then consider +# also using the Memoize backend. +# +module I18n + class << self + @@cache_store = nil + @@cache_namespace = nil + @@cache_key_digest = nil + + def cache_store + @@cache_store + end + + def cache_store=(store) + @@cache_store = store + end + + def cache_namespace + @@cache_namespace + end + + def cache_namespace=(namespace) + @@cache_namespace = namespace + end + + def cache_key_digest + @@cache_key_digest + end + + def cache_key_digest=(key_digest) + @@cache_key_digest = key_digest + end + + def perform_caching? + !cache_store.nil? + end + end + + module Backend + # TODO Should the cache be cleared if new translations are stored? + module Cache + def translate(locale, key, options = EMPTY_HASH) + I18n.perform_caching? ? fetch(cache_key(locale, key, options)) { super } : super + end + + protected + + def fetch(cache_key, &block) + result = _fetch(cache_key, &block) + throw(:exception, result) if result.is_a?(MissingTranslation) + result = result.dup if result.frozen? rescue result + result + end + + def _fetch(cache_key, &block) + result = I18n.cache_store.read(cache_key) + return result unless result.nil? + result = catch(:exception, &block) + I18n.cache_store.write(cache_key, result) unless result.is_a?(Proc) + result + end + + def cache_key(locale, key, options) + # This assumes that only simple, native Ruby values are passed to I18n.translate. + "i18n/#{I18n.cache_namespace}/#{locale}/#{digest_item(key)}/#{digest_item(options)}" + end + + private + + def digest_item(key) + I18n.cache_key_digest ? I18n.cache_key_digest.hexdigest(key.to_s) : key.to_s.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache_file.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache_file.rb new file mode 100644 index 0000000..0c5e192 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cache_file.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'openssl' + +module I18n + module Backend + # Overwrites the Base load_file method to cache loaded file contents. + module CacheFile + # Optionally provide path_roots array to normalize filename paths, + # to make the cached i18n data portable across environments. + attr_accessor :path_roots + + protected + + # Track loaded translation files in the `i18n.load_file` scope, + # and skip loading the file if its contents are still up-to-date. + def load_file(filename) + initialized = !respond_to?(:initialized?) || initialized? + key = I18n::Backend::Flatten.escape_default_separator(normalized_path(filename)) + old_mtime, old_digest = initialized && lookup(:i18n, key, :load_file) + return if (mtime = File.mtime(filename).to_i) == old_mtime || + (digest = OpenSSL::Digest::SHA256.file(filename).hexdigest) == old_digest + super + store_translations(:i18n, load_file: { key => [mtime, digest] }) + end + + # Translate absolute filename to relative path for i18n key. + def normalized_path(file) + return file unless path_roots + path = path_roots.find(&file.method(:start_with?)) || + raise(InvalidLocaleData.new(file, 'outside expected path roots')) + file.sub(path, path_roots.index(path).to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cascade.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cascade.rb new file mode 100644 index 0000000..782b07b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/cascade.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +# The Cascade module adds the ability to do cascading lookups to backends that +# are compatible to the Simple backend. +# +# By cascading lookups we mean that for any key that can not be found the +# Cascade module strips one segment off the scope part of the key and then +# tries to look up the key in that scope. +# +# E.g. when a lookup for the key :"foo.bar.baz" does not yield a result then +# the segment :bar will be stripped off the scope part :"foo.bar" and the new +# scope :foo will be used to look up the key :baz. If that does not succeed +# then the remaining scope segment :foo will be omitted, too, and again the +# key :baz will be looked up (now with no scope). +# +# To enable a cascading lookup one passes the :cascade option: +# +# I18n.t(:'foo.bar.baz', :cascade => true) +# +# This will return the first translation found for :"foo.bar.baz", :"foo.baz" +# or :baz in this order. +# +# The cascading lookup takes precedence over resolving any given defaults. +# I.e. defaults will kick in after the cascading lookups haven't succeeded. +# +# This behavior is useful for libraries like ActiveRecord validations where +# the library wants to give users a bunch of more or less fine-grained options +# of scopes for a particular key. +# +# Thanks to Clemens Kofler for the initial idea and implementation! See +# http://github.com/clemens/i18n-cascading-backend + +module I18n + module Backend + module Cascade + def lookup(locale, key, scope = [], options = EMPTY_HASH) + return super unless cascade = options[:cascade] + + cascade = { :step => 1 } unless cascade.is_a?(Hash) + step = cascade[:step] || 1 + offset = cascade[:offset] || 1 + separator = options[:separator] || I18n.default_separator + skip_root = cascade.has_key?(:skip_root) ? cascade[:skip_root] : true + + scope = I18n.normalize_keys(nil, key, scope, separator) + key = (scope.slice!(-offset, offset) || []).join(separator) + + begin + result = super + return result unless result.nil? + scope = scope.dup + end while (!scope.empty? || !skip_root) && scope.slice!(-step, step) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/chain.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/chain.rb new file mode 100644 index 0000000..525dd2d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/chain.rb @@ -0,0 +1,128 @@ +# frozen_string_literal: true + +module I18n + module Backend + # Backend that chains multiple other backends and checks each of them when + # a translation needs to be looked up. This is useful when you want to use + # standard translations with a Simple backend but store custom application + # translations in a database or other backends. + # + # To use the Chain backend instantiate it and set it to the I18n module. + # You can add chained backends through the initializer or backends + # accessor: + # + # # preserves the existing Simple backend set to I18n.backend + # I18n.backend = I18n::Backend::Chain.new(I18n::Backend::ActiveRecord.new, I18n.backend) + # + # The implementation assumes that all backends added to the Chain implement + # a lookup method with the same API as Simple backend does. + class Chain + module Implementation + include Base + + attr_accessor :backends + + def initialize(*backends) + self.backends = backends + end + + def initialized? + backends.all? do |backend| + backend.instance_eval do + return false unless initialized? + end + end + true + end + + def reload! + backends.each { |backend| backend.reload! } + end + + def eager_load! + backends.each { |backend| backend.eager_load! } + end + + def store_translations(locale, data, options = EMPTY_HASH) + backends.first.store_translations(locale, data, options) + end + + def available_locales + backends.map { |backend| backend.available_locales }.flatten.uniq + end + + def translate(locale, key, default_options = EMPTY_HASH) + namespace = nil + options = Utils.except(default_options, :default) + + backends.each do |backend| + catch(:exception) do + options = default_options if backend == backends.last + translation = backend.translate(locale, key, options) + if namespace_lookup?(translation, options) + namespace = _deep_merge(translation, namespace || {}) + elsif !translation.nil? || (options.key?(:default) && options[:default].nil?) + return translation + end + end + end + + return namespace if namespace + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def exists?(locale, key, options = EMPTY_HASH) + backends.any? do |backend| + backend.exists?(locale, key, options) + end + end + + def localize(locale, object, format = :default, options = EMPTY_HASH) + backends.each do |backend| + catch(:exception) do + result = backend.localize(locale, object, format, options) and return result + end + end + throw(:exception, I18n::MissingTranslation.new(locale, format, options)) + end + + protected + def init_translations + backends.each do |backend| + backend.send(:init_translations) + end + end + + def translations + backends.reverse.each_with_object({}) do |backend, memo| + partial_translations = backend.instance_eval do + init_translations unless initialized? + translations + end + Utils.deep_merge!(memo, partial_translations) { |_, a, b| b || a } + end + end + + def namespace_lookup?(result, options) + result.is_a?(Hash) && !options.has_key?(:count) + end + + private + # This is approximately what gets used in ActiveSupport. + # However since we are not guaranteed to run in an ActiveSupport context + # it is wise to have our own copy. We underscore it + # to not pollute the namespace of the including class. + def _deep_merge(hash, other_hash) + copy = hash.dup + other_hash.each_pair do |k,v| + value_from_other = hash[k] + copy[k] = value_from_other.is_a?(Hash) && v.is_a?(Hash) ? _deep_merge(value_from_other, v) : v + end + copy + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/fallbacks.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/fallbacks.rb new file mode 100644 index 0000000..7afbfe3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/fallbacks.rb @@ -0,0 +1,115 @@ +# frozen_string_literal: true + +# I18n locale fallbacks are useful when you want your application to use +# translations from other locales when translations for the current locale are +# missing. E.g. you might want to use :en translations when translations in +# your applications main locale :de are missing. +# +# To enable locale fallbacks you can simply include the Fallbacks module to +# the Simple backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Fallbacks) +module I18n + @@fallbacks = nil + + class << self + # Returns the current fallbacks implementation. Defaults to +I18n::Locale::Fallbacks+. + def fallbacks + @@fallbacks ||= I18n::Locale::Fallbacks.new + Thread.current[:i18n_fallbacks] || @@fallbacks + end + + # Sets the current fallbacks implementation. Use this to set a different fallbacks implementation. + def fallbacks=(fallbacks) + @@fallbacks = fallbacks.is_a?(Array) ? I18n::Locale::Fallbacks.new(fallbacks) : fallbacks + Thread.current[:i18n_fallbacks] = @@fallbacks + end + end + + module Backend + module Fallbacks + # Overwrites the Base backend translate method so that it will try each + # locale given by I18n.fallbacks for the given locale. E.g. for the + # locale :"de-DE" it might try the locales :"de-DE", :de and :en + # (depends on the fallbacks implementation) until it finds a result with + # the given options. If it does not find any result for any of the + # locales it will then throw MissingTranslation as usual. + # + # The default option takes precedence over fallback locales only when + # it's a Symbol. When the default contains a String, Proc or Hash + # it is evaluated last after all the fallback locales have been tried. + def translate(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + return super if options[:fallback_in_progress] + default = extract_non_symbol_default!(options) if options[:default] + + fallback_options = options.merge(:fallback_in_progress => true, fallback_original_locale: locale) + I18n.fallbacks[locale].each do |fallback| + begin + catch(:exception) do + result = super(fallback, key, fallback_options) + unless result.nil? + on_fallback(locale, fallback, key, options) if locale.to_s != fallback.to_s + return result + end + end + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + return if options.key?(:default) && options[:default].nil? + + return super(locale, nil, options.merge(:default => default)) if default + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def resolve_entry(locale, object, subject, options = EMPTY_HASH) + return subject if options[:resolve] == false + result = catch(:exception) do + options.delete(:fallback_in_progress) if options.key?(:fallback_in_progress) + + case subject + when Symbol + I18n.translate(subject, **options.merge(:locale => options[:fallback_original_locale], :throw => true)) + when Proc + date_or_time = options.delete(:object) || object + resolve_entry(options[:fallback_original_locale], object, subject.call(date_or_time, **options)) + else + subject + end + end + result unless result.is_a?(MissingTranslation) + end + + def extract_non_symbol_default!(options) + defaults = [options[:default]].flatten + first_non_symbol_default = defaults.detect{|default| !default.is_a?(Symbol)} + if first_non_symbol_default + options[:default] = defaults[0, defaults.index(first_non_symbol_default)] + end + return first_non_symbol_default + end + + def exists?(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + I18n.fallbacks[locale].each do |fallback| + begin + return true if super(fallback, key) + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + false + end + + private + + # Overwrite on_fallback to add specified logic when the fallback succeeds. + def on_fallback(_original_locale, _fallback_locale, _key, _optoins) + nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/flatten.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/flatten.rb new file mode 100644 index 0000000..e9bd9d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/flatten.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +module I18n + module Backend + # This module contains several helpers to assist flattening translations. + # You may want to flatten translations for: + # + # 1) speed up lookups, as in the Memoize backend; + # 2) In case you want to store translations in a data store, as in ActiveRecord backend; + # + # You can check both backends above for some examples. + # This module also keeps all links in a hash so they can be properly resolved when flattened. + module Flatten + SEPARATOR_ESCAPE_CHAR = "\001" + FLATTEN_SEPARATOR = "." + + # normalize_keys the flatten way. This method is significantly faster + # and creates way less objects than the one at I18n.normalize_keys. + # It also handles escaping the translation keys. + def self.normalize_flat_keys(locale, key, scope, separator) + keys = [scope, key] + keys.flatten! + keys.compact! + + separator ||= I18n.default_separator + + if separator != FLATTEN_SEPARATOR + from_str = "#{FLATTEN_SEPARATOR}#{separator}" + to_str = "#{SEPARATOR_ESCAPE_CHAR}#{FLATTEN_SEPARATOR}" + + keys.map! { |k| k.to_s.tr from_str, to_str } + end + + keys.join(".") + end + + # Receives a string and escape the default separator. + def self.escape_default_separator(key) #:nodoc: + key.to_s.tr(FLATTEN_SEPARATOR, SEPARATOR_ESCAPE_CHAR) + end + + # Shortcut to I18n::Backend::Flatten.normalize_flat_keys + # and then resolve_links. + def normalize_flat_keys(locale, key, scope, separator) + key = I18n::Backend::Flatten.normalize_flat_keys(locale, key, scope, separator) + resolve_link(locale, key) + end + + # Store flattened links. + def links + @links ||= I18n.new_double_nested_cache + end + + # Flatten keys for nested Hashes by chaining up keys: + # + # >> { "a" => { "b" => { "c" => "d", "e" => "f" }, "g" => "h" }, "i" => "j"}.wind + # => { "a.b.c" => "d", "a.b.e" => "f", "a.g" => "h", "i" => "j" } + # + def flatten_keys(hash, escape, prev_key=nil, &block) + hash.each_pair do |key, value| + key = escape_default_separator(key) if escape + curr_key = [prev_key, key].compact.join(FLATTEN_SEPARATOR).to_sym + yield curr_key, value + flatten_keys(value, escape, curr_key, &block) if value.is_a?(Hash) + end + end + + # Receives a hash of translations (where the key is a locale and + # the value is another hash) and return a hash with all + # translations flattened. + # + # Nested hashes are included in the flattened hash just if subtree + # is true and Symbols are automatically stored as links. + def flatten_translations(locale, data, escape, subtree) + hash = {} + flatten_keys(data, escape) do |key, value| + if value.is_a?(Hash) + hash[key] = value if subtree + else + store_link(locale, key, value) if value.is_a?(Symbol) + hash[key] = value + end + end + hash + end + + protected + + def store_link(locale, key, link) + links[locale.to_sym][key.to_s] = link.to_s + end + + def resolve_link(locale, key) + key, locale = key.to_s, locale.to_sym + links = self.links[locale] + + if links.key?(key) + links[key] + elsif link = find_link(locale, key) + store_link(locale, key, key.gsub(*link)) + else + key + end + end + + def find_link(locale, key) #:nodoc: + links[locale].each_pair do |from, to| + return [from, to] if key[0, from.length] == from + end && nil + end + + def escape_default_separator(key) #:nodoc: + I18n::Backend::Flatten.escape_default_separator(key) + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/gettext.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/gettext.rb new file mode 100644 index 0000000..0769646 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/gettext.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +require 'i18n/gettext' +require 'i18n/gettext/po_parser' + +module I18n + module Backend + # Experimental support for using Gettext po files to store translations. + # + # To use this you can simply include the module to the Simple backend - or + # whatever other backend you are using. + # + # I18n::Backend::Simple.include(I18n::Backend::Gettext) + # + # Now you should be able to include your Gettext translation (*.po) files to + # the +I18n.load_path+ so they're loaded to the backend and you can use them as + # usual: + # + # I18n.load_path += Dir["path/to/locales/*.po"] + # + # Following the Gettext convention this implementation expects that your + # translation files are named by their locales. E.g. the file en.po would + # contain the translations for the English locale. + # + # To translate text you must use one of the translate methods provided by + # I18n::Gettext::Helpers. + # + # include I18n::Gettext::Helpers + # puts _("some string") + # + # Without it strings containing periods (".") will not be translated. + + module Gettext + class PoData < Hash + def set_comment(msgid_or_sym, comment) + # ignore + end + end + + protected + def load_po(filename) + locale = ::File.basename(filename, '.po').to_sym + data = normalize(locale, parse(filename)) + [{ locale => data }, false] + end + + def parse(filename) + GetText::PoParser.new.parse(::File.read(filename), PoData.new) + end + + def normalize(locale, data) + data.inject({}) do |result, (key, value)| + unless key.nil? || key.empty? + key = key.gsub(I18n::Gettext::CONTEXT_SEPARATOR, '|') + key, value = normalize_pluralization(locale, key, value) if key.index("\000") + + parts = key.split('|').reverse + normalized = parts.inject({}) do |_normalized, part| + { part => _normalized.empty? ? value : _normalized } + end + + Utils.deep_merge!(result, normalized) + end + result + end + end + + def normalize_pluralization(locale, key, value) + # FIXME po_parser includes \000 chars that can not be turned into Symbols + key = key.gsub("\000", I18n::Gettext::PLURAL_SEPARATOR).split(I18n::Gettext::PLURAL_SEPARATOR).first + + keys = I18n::Gettext.plural_keys(locale) + values = value.split("\000") + raise "invalid number of plurals: #{values.size}, keys: #{keys.inspect} on #{locale} locale for msgid #{key.inspect} with values #{values.inspect}" if values.size != keys.size + + result = {} + values.each_with_index { |_value, ix| result[keys[ix]] = _value } + [key, result] + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/interpolation_compiler.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/interpolation_compiler.rb new file mode 100644 index 0000000..8b52e7b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/interpolation_compiler.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +# The InterpolationCompiler module contains optimizations that can tremendously +# speed up the interpolation process on the Simple backend. +# +# It works by defining a pre-compiled method on stored translation Strings that +# already bring all the knowledge about contained interpolation variables etc. +# so that the actual recurring interpolation will be very fast. +# +# To enable pre-compiled interpolations you can simply include the +# InterpolationCompiler module to the Simple backend: +# +# I18n::Backend::Simple.include(I18n::Backend::InterpolationCompiler) +# +# Note that InterpolationCompiler does not yield meaningful results and consequently +# should not be used with Ruby 1.9 (YARV) but improves performance everywhere else +# (jRuby, Rubinius). +module I18n + module Backend + module InterpolationCompiler + module Compiler + extend self + + TOKENIZER = /(%%\{[^\}]+\}|%\{[^\}]+\})/ + INTERPOLATION_SYNTAX_PATTERN = /(%)?(%\{([^\}]+)\})/ + + def compile_if_an_interpolation(string) + if interpolated_str?(string) + string.instance_eval <<-RUBY_EVAL, __FILE__, __LINE__ + def i18n_interpolate(v = {}) + "#{compiled_interpolation_body(string)}" + end + RUBY_EVAL + end + + string + end + + def interpolated_str?(str) + str.kind_of?(::String) && str =~ INTERPOLATION_SYNTAX_PATTERN + end + + protected + # tokenize("foo %{bar} baz %%{buz}") # => ["foo ", "%{bar}", " baz ", "%%{buz}"] + def tokenize(str) + str.split(TOKENIZER) + end + + def compiled_interpolation_body(str) + tokenize(str).map do |token| + (matchdata = token.match(INTERPOLATION_SYNTAX_PATTERN)) ? handle_interpolation_token(token, matchdata) : escape_plain_str(token) + end.join + end + + def handle_interpolation_token(interpolation, matchdata) + escaped, pattern, key = matchdata.values_at(1, 2, 3) + escaped ? pattern : compile_interpolation_token(key.to_sym) + end + + def compile_interpolation_token(key) + "\#{#{interpolate_or_raise_missing(key)}}" + end + + def interpolate_or_raise_missing(key) + escaped_key = escape_key_sym(key) + RESERVED_KEYS.include?(key) ? reserved_key(escaped_key) : interpolate_key(escaped_key) + end + + def interpolate_key(key) + [direct_key(key), nil_key(key), missing_key(key)].join('||') + end + + def direct_key(key) + "((t = v[#{key}]) && t.respond_to?(:call) ? t.call : t)" + end + + def nil_key(key) + "(v.has_key?(#{key}) && '')" + end + + def missing_key(key) + "I18n.config.missing_interpolation_argument_handler.call(#{key}, v, self)" + end + + def reserved_key(key) + "raise(ReservedInterpolationKey.new(#{key}, self))" + end + + def escape_plain_str(str) + str.gsub(/"|\\|#/) {|x| "\\#{x}"} + end + + def escape_key_sym(key) + # rely on Ruby to do all the hard work :) + key.to_sym.inspect + end + end + + def interpolate(locale, string, values) + if string.respond_to?(:i18n_interpolate) + string.i18n_interpolate(values) + elsif values + super + else + string + end + end + + def store_translations(locale, data, options = EMPTY_HASH) + compile_all_strings_in(data) + super + end + + protected + def compile_all_strings_in(data) + data.each_value do |value| + Compiler.compile_if_an_interpolation(value) + compile_all_strings_in(value) if value.kind_of?(Hash) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/key_value.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/key_value.rb new file mode 100644 index 0000000..b937e25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/key_value.rb @@ -0,0 +1,204 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + + begin + require 'oj' + class JSON + class << self + def encode(value) + Oj::Rails.encode(value) + end + def decode(value) + Oj.load(value) + end + end + end + rescue LoadError + require 'active_support/json' + JSON = ActiveSupport::JSON + end + + module Backend + # This is a basic backend for key value stores. It receives on + # initialization the store, which should respond to three methods: + # + # * store#[](key) - Used to get a value + # * store#[]=(key, value) - Used to set a value + # * store#keys - Used to get all keys + # + # Since these stores only supports string, all values are converted + # to JSON before being stored, allowing it to also store booleans, + # hashes and arrays. However, this store does not support Procs. + # + # As the ActiveRecord backend, Symbols are just supported when loading + # translations from the filesystem or through explicit store translations. + # + # Also, avoid calling I18n.available_locales since it's a somehow + # expensive operation in most stores. + # + # == Example + # + # To setup I18n to use TokyoCabinet in memory is quite straightforward: + # + # require 'rufus/tokyo/cabinet' # gem install rufus-tokyo + # I18n.backend = I18n::Backend::KeyValue.new(Rufus::Tokyo::Cabinet.new('*')) + # + # == Performance + # + # You may make this backend even faster by including the Memoize module. + # However, notice that you should properly clear the cache if you change + # values directly in the key-store. + # + # == Subtrees + # + # In most backends, you are allowed to retrieve part of a translation tree: + # + # I18n.backend.store_translations :en, :foo => { :bar => :baz } + # I18n.t "foo" #=> { :bar => :baz } + # + # This backend supports this feature by default, but it slows down the storage + # of new data considerably and makes hard to delete entries. That said, you are + # allowed to disable the storage of subtrees on initialization: + # + # I18n::Backend::KeyValue.new(@store, false) + # + # This is useful if you are using a KeyValue backend chained to a Simple backend. + class KeyValue + module Implementation + attr_accessor :store + + include Base, Flatten + + def initialize(store, subtrees=true) + @store, @subtrees = store, subtrees + end + + def initialized? + !@store.nil? + end + + def store_translations(locale, data, options = EMPTY_HASH) + escape = options.fetch(:escape, true) + flatten_translations(locale, data, escape, @subtrees).each do |key, value| + key = "#{locale}.#{key}" + + case value + when Hash + if @subtrees && (old_value = @store[key]) + old_value = JSON.decode(old_value) + value = Utils.deep_merge!(Utils.deep_symbolize_keys(old_value), value) if old_value.is_a?(Hash) + end + when Proc + raise "Key-value stores cannot handle procs" + end + + @store[key] = JSON.encode(value) unless value.is_a?(Symbol) + end + end + + def available_locales + locales = @store.keys.map { |k| k =~ /\./; $` } + locales.uniq! + locales.compact! + locales.map! { |k| k.to_sym } + locales + end + + protected + + # Queries the translations from the key-value store and converts + # them into a hash such as the one returned from loading the + # haml files + def translations + @translations = Utils.deep_symbolize_keys(@store.keys.clone.map do |main_key| + main_value = JSON.decode(@store[main_key]) + main_key.to_s.split(".").reverse.inject(main_value) do |value, key| + {key.to_sym => value} + end + end.inject{|hash, elem| Utils.deep_merge!(hash, elem)}) + end + + def init_translations + # NO OP + # This call made also inside Simple Backend and accessed by + # other plugins like I18n-js and babilu and + # to use it along with the Chain backend we need to + # provide a uniform API even for protected methods :S + end + + def subtrees? + @subtrees + end + + def lookup(locale, key, scope = [], options = EMPTY_HASH) + key = normalize_flat_keys(locale, key, scope, options[:separator]) + value = @store["#{locale}.#{key}"] + value = JSON.decode(value) if value + + if value.is_a?(Hash) + Utils.deep_symbolize_keys(value) + elsif !value.nil? + value + elsif !@subtrees + SubtreeProxy.new("#{locale}.#{key}", @store) + end + end + + def pluralize(locale, entry, count) + if subtrees? + super + else + return entry unless entry.is_a?(Hash) + key = pluralization_key(entry, count) + entry[key] + end + end + end + + class SubtreeProxy + def initialize(master_key, store) + @master_key = master_key + @store = store + @subtree = nil + end + + def has_key?(key) + @subtree && @subtree.has_key?(key) || self[key] + end + + def [](key) + unless @subtree && value = @subtree[key] + value = @store["#{@master_key}.#{key}"] + if value + value = JSON.decode(value) + (@subtree ||= {})[key] = value + end + end + value + end + + def is_a?(klass) + Hash == klass || super + end + alias :kind_of? :is_a? + + def instance_of?(klass) + Hash == klass || super + end + + def nil? + @subtree.nil? + end + + def inspect + @subtree.inspect + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/lazy_loadable.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/lazy_loadable.rb new file mode 100644 index 0000000..60f21fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/lazy_loadable.rb @@ -0,0 +1,184 @@ +# frozen_string_literal: true + +module I18n + module Backend + # Backend that lazy loads translations based on the current locale. This + # implementation avoids loading all translations up front. Instead, it only + # loads the translations that belong to the current locale. This offers a + # performance incentive in local development and test environments for + # applications with many translations for many different locales. It's + # particularly useful when the application only refers to a single locales' + # translations at a time (ex. A Rails workload). The implementation + # identifies which translation files from the load path belong to the + # current locale by pattern matching against their path name. + # + # Specifically, a translation file is considered to belong to a locale if: + # a) the filename is in the I18n load path + # b) the filename ends in a supported extension (ie. .yml, .json, .po, .rb) + # c) the filename starts with the locale identifier + # d) the locale identifier and optional proceeding text is separated by an underscore, ie. "_". + # + # Examples: + # Valid files that will be selected by this backend: + # + # "files/locales/en_translation.yml" (Selected for locale "en") + # "files/locales/fr.po" (Selected for locale "fr") + # + # Invalid files that won't be selected by this backend: + # + # "files/locales/translation-file" + # "files/locales/en-translation.unsupported" + # "files/locales/french/translation.yml" + # "files/locales/fr/translation.yml" + # + # The implementation uses this assumption to defer the loading of + # translation files until the current locale actually requires them. + # + # The backend has two working modes: lazy_load and eager_load. + # + # Note: This backend should only be enabled in test environments! + # When the mode is set to false, the backend behaves exactly like the + # Simple backend, with an additional check that the paths being loaded + # abide by the format. If paths can't be matched to the format, an error is raised. + # + # You can configure lazy loaded backends through the initializer or backends + # accessor: + # + # # In test environments + # + # I18n.backend = I18n::Backend::LazyLoadable.new(lazy_load: true) + # + # # In other environments, such as production and CI + # + # I18n.backend = I18n::Backend::LazyLoadable.new(lazy_load: false) # default + # + class LocaleExtractor + class << self + def locale_from_path(path) + name = File.basename(path, ".*") + locale = name.split("_").first + locale.to_sym unless locale.nil? + end + end + end + + class LazyLoadable < Simple + def initialize(lazy_load: false) + @lazy_load = lazy_load + end + + # Returns whether the current locale is initialized. + def initialized? + if lazy_load? + initialized_locales[I18n.locale] + else + super + end + end + + # Clean up translations and uninitialize all locales. + def reload! + if lazy_load? + @initialized_locales = nil + @translations = nil + else + super + end + end + + # Eager loading is not supported in the lazy context. + def eager_load! + if lazy_load? + raise UnsupportedMethod.new(__method__, self.class, "Cannot eager load translations because backend was configured with lazy_load: true.") + else + super + end + end + + # Parse the load path and extract all locales. + def available_locales + if lazy_load? + I18n.load_path.map { |path| LocaleExtractor.locale_from_path(path) } + else + super + end + end + + def lookup(locale, key, scope = [], options = EMPTY_HASH) + if lazy_load? + I18n.with_locale(locale) do + super + end + else + super + end + end + + protected + + + # Load translations from files that belong to the current locale. + def init_translations + file_errors = if lazy_load? + initialized_locales[I18n.locale] = true + load_translations_and_collect_file_errors(filenames_for_current_locale) + else + @initialized = true + load_translations_and_collect_file_errors(I18n.load_path) + end + + raise InvalidFilenames.new(file_errors) unless file_errors.empty? + end + + def initialized_locales + @initialized_locales ||= Hash.new(false) + end + + private + + def lazy_load? + @lazy_load + end + + class FilenameIncorrect < StandardError + def initialize(file, expected_locale, unexpected_locales) + super "#{file} can only load translations for \"#{expected_locale}\". Found translations for: #{unexpected_locales}." + end + end + + # Loads each file supplied and asserts that the file only loads + # translations as expected by the name. The method returns a list of + # errors corresponding to offending files. + def load_translations_and_collect_file_errors(files) + errors = [] + + load_translations(files) do |file, loaded_translations| + assert_file_named_correctly!(file, loaded_translations) + rescue FilenameIncorrect => e + errors << e + end + + errors + end + + # Select all files from I18n load path that belong to current locale. + # These files must start with the locale identifier (ie. "en", "pt-BR"), + # followed by an "_" demarcation to separate proceeding text. + def filenames_for_current_locale + I18n.load_path.flatten.select do |path| + LocaleExtractor.locale_from_path(path) == I18n.locale + end + end + + # Checks if a filename is named in correspondence to the translations it loaded. + # The locale extracted from the path must be the single locale loaded in the translations. + def assert_file_named_correctly!(file, translations) + loaded_locales = translations.keys.map(&:to_sym) + expected_locale = LocaleExtractor.locale_from_path(file) + unexpected_locales = loaded_locales.reject { |locale| locale == expected_locale } + + raise FilenameIncorrect.new(file, expected_locale, unexpected_locales) unless unexpected_locales.empty? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/memoize.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/memoize.rb new file mode 100644 index 0000000..3293d2b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/memoize.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +# Memoize module simply memoizes the values returned by lookup using +# a flat hash and can tremendously speed up the lookup process in a backend. +# +# To enable it you can simply include the Memoize module to your backend: +# +# I18n::Backend::Simple.include(I18n::Backend::Memoize) +# +# Notice that it's the responsibility of the backend to define whenever the +# cache should be cleaned. +module I18n + module Backend + module Memoize + def available_locales + @memoized_locales ||= super + end + + def store_translations(locale, data, options = EMPTY_HASH) + reset_memoizations!(locale) + super + end + + def reload! + reset_memoizations! + super + end + + def eager_load! + memoized_lookup + available_locales + super + end + + protected + + def lookup(locale, key, scope = nil, options = EMPTY_HASH) + flat_key = I18n::Backend::Flatten.normalize_flat_keys(locale, + key, scope, options[:separator]).to_sym + flat_hash = memoized_lookup[locale.to_sym] + flat_hash.key?(flat_key) ? flat_hash[flat_key] : (flat_hash[flat_key] = super) + end + + def memoized_lookup + @memoized_lookup ||= I18n.new_double_nested_cache + end + + def reset_memoizations!(locale=nil) + @memoized_locales = nil + (locale ? memoized_lookup[locale.to_sym] : memoized_lookup).clear + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/metadata.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/metadata.rb new file mode 100644 index 0000000..51ea7a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/metadata.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +# I18n translation metadata is useful when you want to access information +# about how a translation was looked up, pluralized or interpolated in +# your application. +# +# msg = I18n.t(:message, :default => 'Hi!', :scope => :foo) +# msg.translation_metadata +# # => { :key => :message, :scope => :foo, :default => 'Hi!' } +# +# If a :count option was passed to #translate it will be set to the metadata. +# Likewise, if any interpolation variables were passed they will also be set. +# +# To enable translation metadata you can simply include the Metadata module +# into the Simple backend class - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Metadata) +# +module I18n + module Backend + module Metadata + class << self + def included(base) + Object.class_eval do + def translation_metadata + unless self.frozen? + @translation_metadata ||= {} + else + {} + end + end + + def translation_metadata=(translation_metadata) + @translation_metadata = translation_metadata unless self.frozen? + end + end unless Object.method_defined?(:translation_metadata) + end + end + + def translate(locale, key, options = EMPTY_HASH) + metadata = { + :locale => locale, + :key => key, + :scope => options[:scope], + :default => options[:default], + :separator => options[:separator], + :values => options.reject { |name, _value| RESERVED_KEYS.include?(name) } + } + with_metadata(metadata) { super } + end + + def interpolate(locale, entry, values = EMPTY_HASH) + metadata = entry.translation_metadata.merge(:original => entry) + with_metadata(metadata) { super } + end + + def pluralize(locale, entry, count) + with_metadata(:count => count) { super } + end + + protected + + def with_metadata(metadata, &block) + result = yield + result.translation_metadata = result.translation_metadata.merge(metadata) if result + result + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/pluralization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/pluralization.rb new file mode 100644 index 0000000..b602657 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/pluralization.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +# I18n Pluralization are useful when you want your application to +# customize pluralization rules. +# +# To enable locale specific pluralizations you can simply include the +# Pluralization module to the Simple backend - or whatever other backend you +# are using. +# +# I18n::Backend::Simple.include(I18n::Backend::Pluralization) +# +# You also need to make sure to provide pluralization algorithms to the +# backend, i.e. include them to your I18n.load_path accordingly. +module I18n + module Backend + module Pluralization + # Overwrites the Base backend translate method so that it will check the + # translation meta data space (:i18n) for a locale specific pluralization + # rule and use it to pluralize the given entry. I.e. the library expects + # pluralization rules to be stored at I18n.t(:'i18n.plural.rule') + # + # Pluralization rules are expected to respond to #call(count) and + # return a pluralization key. Valid keys depend on the translation data + # hash (entry) but it is generally recommended to follow CLDR's style, + # i.e., return one of the keys :zero, :one, :few, :many, :other. + # + # The :zero key is always picked directly when count equals 0 AND the + # translation data has the key :zero. This way translators are free to + # either pick a special :zero translation even for languages where the + # pluralizer does not return a :zero key. + def pluralize(locale, entry, count) + return entry unless entry.is_a?(Hash) && count + + pluralizer = pluralizer(locale) + if pluralizer.respond_to?(:call) + key = count == 0 && entry.has_key?(:zero) ? :zero : pluralizer.call(count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + else + super + end + end + + protected + + def pluralizers + @pluralizers ||= {} + end + + def pluralizer(locale) + pluralizers[locale] ||= I18n.t(:'i18n.plural.rule', :locale => locale, :resolve => false) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/simple.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/simple.rb new file mode 100644 index 0000000..0c49de8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/simple.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + module Backend + # A simple backend that reads translations from YAML files and stores them in + # an in-memory hash. Relies on the Base backend. + # + # The implementation is provided by a Implementation module allowing to easily + # extend Simple backend's behavior by including modules. E.g.: + # + # module I18n::Backend::Pluralization + # def pluralize(*args) + # # extended pluralization logic + # super + # end + # end + # + # I18n::Backend::Simple.include(I18n::Backend::Pluralization) + class Simple + module Implementation + include Base + + def initialized? + @initialized ||= false + end + + # Stores translations for the given locale in memory. + # This uses a deep merge for the translations hash, so existing + # translations will be overwritten by new ones only at the deepest + # level of the hash. + def store_translations(locale, data, options = EMPTY_HASH) + if I18n.enforce_available_locales && + I18n.available_locales_initialized? && + !I18n.locale_available?(locale) + return data + end + locale = locale.to_sym + translations[locale] ||= Concurrent::Hash.new + data = Utils.deep_symbolize_keys(data) unless options.fetch(:skip_symbolize_keys, false) + Utils.deep_merge!(translations[locale], data) + end + + # Get available locales from the translations hash + def available_locales + init_translations unless initialized? + translations.inject([]) do |locales, (locale, data)| + locales << locale unless data.size <= 1 && (data.empty? || data.has_key?(:i18n)) + locales + end + end + + # Clean up translations hash and set initialized to false on reload! + def reload! + @initialized = false + @translations = nil + super + end + + def eager_load! + init_translations unless initialized? + super + end + + def translations(do_init: false) + # To avoid returning empty translations, + # call `init_translations` + init_translations if do_init && !initialized? + + @translations ||= Concurrent::Hash.new { |h, k| h[k] = Concurrent::Hash.new } + end + + protected + + def init_translations + load_translations + @initialized = true + end + + # Looks up a translation from the translations hash. Returns nil if + # either key is nil, or locale, scope or key do not exist as a key in the + # nested translations hash. Splits keys or scopes containing dots + # into multiple keys, i.e. currency.format is regarded the same as + # %w(currency format). + def lookup(locale, key, scope = [], options = EMPTY_HASH) + init_translations unless initialized? + keys = I18n.normalize_keys(locale, key, scope, options[:separator]) + + keys.inject(translations) do |result, _key| + return nil unless result.is_a?(Hash) + unless result.has_key?(_key) + _key = _key.to_s.to_sym + return nil unless result.has_key?(_key) + end + result = result[_key] + result = resolve_entry(locale, _key, result, options.merge(:scope => nil)) if result.is_a?(Symbol) + result + end + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/transliterator.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/transliterator.rb new file mode 100644 index 0000000..bb704ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/backend/transliterator.rb @@ -0,0 +1,108 @@ +# encoding: utf-8 +# frozen_string_literal: true + +module I18n + module Backend + module Transliterator + DEFAULT_REPLACEMENT_CHAR = "?" + + # Given a locale and a UTF-8 string, return the locale's ASCII + # approximation for the string. + def transliterate(locale, string, replacement = nil) + @transliterators ||= {} + @transliterators[locale] ||= Transliterator.get I18n.t(:'i18n.transliterate.rule', + :locale => locale, :resolve => false, :default => {}) + @transliterators[locale].transliterate(string, replacement) + end + + # Get a transliterator instance. + def self.get(rule = nil) + if !rule || rule.kind_of?(Hash) + HashTransliterator.new(rule) + elsif rule.kind_of? Proc + ProcTransliterator.new(rule) + else + raise I18n::ArgumentError, "Transliteration rule must be a proc or a hash." + end + end + + # A transliterator which accepts a Proc as its transliteration rule. + class ProcTransliterator + def initialize(rule) + @rule = rule + end + + def transliterate(string, replacement = nil) + @rule.call(string) + end + end + + # A transliterator which accepts a Hash of characters as its translation + # rule. + class HashTransliterator + DEFAULT_APPROXIMATIONS = { + "À"=>"A", "Á"=>"A", "Â"=>"A", "Ã"=>"A", "Ä"=>"A", "Å"=>"A", "Æ"=>"AE", + "Ç"=>"C", "È"=>"E", "É"=>"E", "Ê"=>"E", "Ë"=>"E", "Ì"=>"I", "Í"=>"I", + "Î"=>"I", "Ï"=>"I", "Ð"=>"D", "Ñ"=>"N", "Ò"=>"O", "Ó"=>"O", "Ô"=>"O", + "Õ"=>"O", "Ö"=>"O", "×"=>"x", "Ø"=>"O", "Ù"=>"U", "Ú"=>"U", "Û"=>"U", + "Ü"=>"U", "Ý"=>"Y", "Þ"=>"Th", "ß"=>"ss", "à"=>"a", "á"=>"a", "â"=>"a", + "ã"=>"a", "ä"=>"a", "å"=>"a", "æ"=>"ae", "ç"=>"c", "è"=>"e", "é"=>"e", + "ê"=>"e", "ë"=>"e", "ì"=>"i", "í"=>"i", "î"=>"i", "ï"=>"i", "ð"=>"d", + "ñ"=>"n", "ò"=>"o", "ó"=>"o", "ô"=>"o", "õ"=>"o", "ö"=>"o", "ø"=>"o", + "ù"=>"u", "ú"=>"u", "û"=>"u", "ü"=>"u", "ý"=>"y", "þ"=>"th", "ÿ"=>"y", + "Ā"=>"A", "ā"=>"a", "Ă"=>"A", "ă"=>"a", "Ą"=>"A", "ą"=>"a", "Ć"=>"C", + "ć"=>"c", "Ĉ"=>"C", "ĉ"=>"c", "Ċ"=>"C", "ċ"=>"c", "Č"=>"C", "č"=>"c", + "Ď"=>"D", "ď"=>"d", "Đ"=>"D", "đ"=>"d", "Ē"=>"E", "ē"=>"e", "Ĕ"=>"E", + "ĕ"=>"e", "Ė"=>"E", "ė"=>"e", "Ę"=>"E", "ę"=>"e", "Ě"=>"E", "ě"=>"e", + "Ĝ"=>"G", "ĝ"=>"g", "Ğ"=>"G", "ğ"=>"g", "Ġ"=>"G", "ġ"=>"g", "Ģ"=>"G", + "ģ"=>"g", "Ĥ"=>"H", "ĥ"=>"h", "Ħ"=>"H", "ħ"=>"h", "Ĩ"=>"I", "ĩ"=>"i", + "Ī"=>"I", "ī"=>"i", "Ĭ"=>"I", "ĭ"=>"i", "Į"=>"I", "į"=>"i", "İ"=>"I", + "ı"=>"i", "IJ"=>"IJ", "ij"=>"ij", "Ĵ"=>"J", "ĵ"=>"j", "Ķ"=>"K", "ķ"=>"k", + "ĸ"=>"k", "Ĺ"=>"L", "ĺ"=>"l", "Ļ"=>"L", "ļ"=>"l", "Ľ"=>"L", "ľ"=>"l", + "Ŀ"=>"L", "ŀ"=>"l", "Ł"=>"L", "ł"=>"l", "Ń"=>"N", "ń"=>"n", "Ņ"=>"N", + "ņ"=>"n", "Ň"=>"N", "ň"=>"n", "ʼn"=>"'n", "Ŋ"=>"NG", "ŋ"=>"ng", + "Ō"=>"O", "ō"=>"o", "Ŏ"=>"O", "ŏ"=>"o", "Ő"=>"O", "ő"=>"o", "Œ"=>"OE", + "œ"=>"oe", "Ŕ"=>"R", "ŕ"=>"r", "Ŗ"=>"R", "ŗ"=>"r", "Ř"=>"R", "ř"=>"r", + "Ś"=>"S", "ś"=>"s", "Ŝ"=>"S", "ŝ"=>"s", "Ş"=>"S", "ş"=>"s", "Š"=>"S", + "š"=>"s", "Ţ"=>"T", "ţ"=>"t", "Ť"=>"T", "ť"=>"t", "Ŧ"=>"T", "ŧ"=>"t", + "Ũ"=>"U", "ũ"=>"u", "Ū"=>"U", "ū"=>"u", "Ŭ"=>"U", "ŭ"=>"u", "Ů"=>"U", + "ů"=>"u", "Ű"=>"U", "ű"=>"u", "Ų"=>"U", "ų"=>"u", "Ŵ"=>"W", "ŵ"=>"w", + "Ŷ"=>"Y", "ŷ"=>"y", "Ÿ"=>"Y", "Ź"=>"Z", "ź"=>"z", "Ż"=>"Z", "ż"=>"z", + "Ž"=>"Z", "ž"=>"z" + }.freeze + + def initialize(rule = nil) + @rule = rule + add_default_approximations + add rule if rule + end + + def transliterate(string, replacement = nil) + replacement ||= DEFAULT_REPLACEMENT_CHAR + string.gsub(/[^\x00-\x7f]/u) do |char| + approximations[char] || replacement + end + end + + private + + def approximations + @approximations ||= {} + end + + def add_default_approximations + DEFAULT_APPROXIMATIONS.each do |key, value| + approximations[key] = value + end + end + + # Add transliteration rules to the approximations hash. + def add(hash) + hash.each do |key, value| + approximations[key.to_s] = value.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/config.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/config.rb new file mode 100644 index 0000000..ea3dd1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/config.rb @@ -0,0 +1,165 @@ +# frozen_string_literal: true + +require 'set' + +module I18n + class Config + # The only configuration value that is not global and scoped to thread is :locale. + # It defaults to the default_locale. + def locale + defined?(@locale) && @locale != nil ? @locale : default_locale + end + + # Sets the current locale pseudo-globally, i.e. in the Thread.current hash. + def locale=(locale) + I18n.enforce_available_locales!(locale) + @locale = locale && locale.to_sym + end + + # Returns the current backend. Defaults to +Backend::Simple+. + def backend + @@backend ||= Backend::Simple.new + end + + # Sets the current backend. Used to set a custom backend. + def backend=(backend) + @@backend = backend + end + + # Returns the current default locale. Defaults to :'en' + def default_locale + @@default_locale ||= :en + end + + # Sets the current default locale. Used to set a custom default locale. + def default_locale=(locale) + I18n.enforce_available_locales!(locale) + @@default_locale = locale && locale.to_sym + end + + # Returns an array of locales for which translations are available. + # Unless you explicitely set these through I18n.available_locales= + # the call will be delegated to the backend. + def available_locales + @@available_locales ||= nil + @@available_locales || backend.available_locales + end + + # Caches the available locales list as both strings and symbols in a Set, so + # that we can have faster lookups to do the available locales enforce check. + def available_locales_set #:nodoc: + @@available_locales_set ||= available_locales.inject(Set.new) do |set, locale| + set << locale.to_s << locale.to_sym + end + end + + # Sets the available locales. + def available_locales=(locales) + @@available_locales = Array(locales).map { |locale| locale.to_sym } + @@available_locales = nil if @@available_locales.empty? + @@available_locales_set = nil + end + + # Returns true if the available_locales have been initialized + def available_locales_initialized? + ( !!defined?(@@available_locales) && !!@@available_locales ) + end + + # Clears the available locales set so it can be recomputed again after I18n + # gets reloaded. + def clear_available_locales_set #:nodoc: + @@available_locales_set = nil + end + + # Returns the current default scope separator. Defaults to '.' + def default_separator + @@default_separator ||= '.' + end + + # Sets the current default scope separator. + def default_separator=(separator) + @@default_separator = separator + end + + # Returns the current exception handler. Defaults to an instance of + # I18n::ExceptionHandler. + def exception_handler + @@exception_handler ||= ExceptionHandler.new + end + + # Sets the exception handler. + def exception_handler=(exception_handler) + @@exception_handler = exception_handler + end + + # Returns the current handler for situations when interpolation argument + # is missing. MissingInterpolationArgument will be raised by default. + def missing_interpolation_argument_handler + @@missing_interpolation_argument_handler ||= lambda do |missing_key, provided_hash, string| + raise MissingInterpolationArgument.new(missing_key, provided_hash, string) + end + end + + # Sets the missing interpolation argument handler. It can be any + # object that responds to #call. The arguments that will be passed to #call + # are the same as for MissingInterpolationArgument initializer. Use +Proc.new+ + # if you don't care about arity. + # + # == Example: + # You can supress raising an exception and return string instead: + # + # I18n.config.missing_interpolation_argument_handler = Proc.new do |key| + # "#{key} is missing" + # end + def missing_interpolation_argument_handler=(exception_handler) + @@missing_interpolation_argument_handler = exception_handler + end + + # Allow clients to register paths providing translation data sources. The + # backend defines acceptable sources. + # + # E.g. the provided SimpleBackend accepts a list of paths to translation + # files which are either named *.rb and contain plain Ruby Hashes or are + # named *.yml and contain YAML data. So for the SimpleBackend clients may + # register translation files like this: + # I18n.load_path << 'path/to/locale/en.yml' + def load_path + @@load_path ||= [] + end + + # Sets the load path instance. Custom implementations are expected to + # behave like a Ruby Array. + def load_path=(load_path) + @@load_path = load_path + @@available_locales_set = nil + backend.reload! + end + + # Whether or not to verify if locales are in the list of available locales. + # Defaults to true. + @@enforce_available_locales = true + def enforce_available_locales + @@enforce_available_locales + end + + def enforce_available_locales=(enforce_available_locales) + @@enforce_available_locales = enforce_available_locales + end + + # Returns the current interpolation patterns. Defaults to + # I18n::DEFAULT_INTERPOLATION_PATTERNS. + def interpolation_patterns + @@interpolation_patterns ||= I18n::DEFAULT_INTERPOLATION_PATTERNS.dup + end + + # Sets the current interpolation patterns. Used to set a interpolation + # patterns. + # + # E.g. using {{}} as a placeholder like "{{hello}}, world!": + # + # I18n.config.interpolation_patterns << /\{\{(\w+)\}\}/ + def interpolation_patterns=(interpolation_patterns) + @@interpolation_patterns = interpolation_patterns + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/exceptions.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/exceptions.rb new file mode 100644 index 0000000..f66e207 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/exceptions.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +require 'cgi' + +module I18n + class ExceptionHandler + def call(exception, _locale, _key, _options) + if exception.is_a?(MissingTranslation) + exception.message + else + raise exception + end + end + end + + class ArgumentError < ::ArgumentError; end + + class Disabled < ArgumentError + def initialize(method) + super(<<~MESSAGE) + I18n.#{method} is currently disabled, likely because your application is still in its loading phase. + + This method is meant to display text in the user locale, so calling it before the user locale has + been set is likely to display text from the wrong locale to some users. + + If you have a legitimate reason to access i18n data outside of the user flow, you can do so by passing + the desired locale explictly with the `locale` argument, e.g. `I18n.#{method}(..., locale: :en)` + MESSAGE + end + end + + class InvalidLocale < ArgumentError + attr_reader :locale + def initialize(locale) + @locale = locale + super "#{locale.inspect} is not a valid locale" + end + end + + class InvalidLocaleData < ArgumentError + attr_reader :filename + def initialize(filename, exception_message) + @filename, @exception_message = filename, exception_message + super "can not load translations from #{filename}: #{exception_message}" + end + end + + class MissingTranslation < ArgumentError + module Base + PERMITTED_KEYS = [:scope].freeze + + attr_reader :locale, :key, :options + + def initialize(locale, key, options = EMPTY_HASH) + @key, @locale, @options = key, locale, options.slice(*PERMITTED_KEYS) + options.each { |k, v| self.options[k] = v.inspect if v.is_a?(Proc) } + end + + def keys + @keys ||= I18n.normalize_keys(locale, key, options[:scope]).tap do |keys| + keys << 'no key' if keys.size < 2 + end + end + + def message + "translation missing: #{keys.join('.')}" + end + alias :to_s :message + + def to_exception + MissingTranslationData.new(locale, key, options) + end + end + + include Base + end + + class MissingTranslationData < ArgumentError + include MissingTranslation::Base + end + + class InvalidPluralizationData < ArgumentError + attr_reader :entry, :count, :key + def initialize(entry, count, key) + @entry, @count, @key = entry, count, key + super "translation data #{entry.inspect} can not be used with :count => #{count}. key '#{key}' is missing." + end + end + + class MissingInterpolationArgument < ArgumentError + attr_reader :key, :values, :string + def initialize(key, values, string) + @key, @values, @string = key, values, string + super "missing interpolation argument #{key.inspect} in #{string.inspect} (#{values.inspect} given)" + end + end + + class ReservedInterpolationKey < ArgumentError + attr_reader :key, :string + def initialize(key, string) + @key, @string = key, string + super "reserved key #{key.inspect} used in #{string.inspect}" + end + end + + class UnknownFileType < ArgumentError + attr_reader :type, :filename + def initialize(type, filename) + @type, @filename = type, filename + super "can not load translations from #{filename}, the file type #{type} is not known" + end + end + + class UnsupportedMethod < ArgumentError + attr_reader :method, :backend_klass, :msg + def initialize(method, backend_klass, msg) + @method = method + @backend_klass = backend_klass + @msg = msg + super "#{backend_klass} does not support the ##{method} method. #{msg}" + end + end + + class InvalidFilenames < ArgumentError + NUMBER_OF_ERRORS_SHOWN = 20 + def initialize(file_errors) + super <<~MSG + Found #{file_errors.count} error(s). + The first #{[file_errors.count, NUMBER_OF_ERRORS_SHOWN].min} error(s): + #{file_errors.map(&:message).first(NUMBER_OF_ERRORS_SHOWN).join("\n")} + + To use the LazyLoadable backend: + 1. Filenames must start with the locale. + 2. An underscore must separate the locale with any optional text that follows. + 3. The file must only contain translation data for the single locale. + + Example: + "/config/locales/fr.yml" which contains: + ```yml + fr: + dog: + chien + ``` + MSG + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext.rb new file mode 100644 index 0000000..858daff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module I18n + module Gettext + PLURAL_SEPARATOR = "\001" + CONTEXT_SEPARATOR = "\004" + + autoload :Helpers, 'i18n/gettext/helpers' + + @@plural_keys = { :en => [:one, :other] } + + class << self + # returns an array of plural keys for the given locale or the whole hash + # of locale mappings to plural keys so that we can convert from gettext's + # integer-index based style + # TODO move this information to the pluralization module + def plural_keys(*args) + args.empty? ? @@plural_keys : @@plural_keys[args.first] || @@plural_keys[:en] + end + + def extract_scope(msgid, separator) + scope = msgid.to_s.split(separator) + msgid = scope.pop + [scope, msgid] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/helpers.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/helpers.rb new file mode 100644 index 0000000..d077619 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/helpers.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require 'i18n/gettext' + +module I18n + module Gettext + # Implements classical Gettext style accessors. To use this include the + # module to the global namespace or wherever you want to use it. + # + # include I18n::Gettext::Helpers + module Helpers + # Makes dynamic translation messages readable for the gettext parser. + # _(fruit) cannot be understood by the gettext parser. To help the parser find all your translations, + # you can add fruit = N_("Apple") which does not translate, but tells the parser: "Apple" needs translation. + # * msgid: the message id. + # * Returns: msgid. + def N_(msgsid) + msgsid + end + + def gettext(msgid, options = EMPTY_HASH) + I18n.t(msgid, **{:default => msgid, :separator => '|'}.merge(options)) + end + alias _ gettext + + def sgettext(msgid, separator = '|') + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + I18n.t(msgid, :scope => scope, :default => msgid, :separator => separator) + end + alias s_ sgettext + + def pgettext(msgctxt, msgid) + separator = I18n::Gettext::CONTEXT_SEPARATOR + sgettext([msgctxt, msgid].join(separator), separator) + end + alias p_ pgettext + + def ngettext(msgid, msgid_plural, n = 1) + nsgettext(msgid, msgid_plural, n) + end + alias n_ ngettext + + # Method signatures: + # nsgettext('Fruits|apple', 'apples', 2) + # nsgettext(['Fruits|apple', 'apples'], 2) + def nsgettext(msgid, msgid_plural, n = 1, separator = '|') + if msgid.is_a?(Array) + msgid, msgid_plural, n, separator = msgid[0], msgid[1], msgid_plural, n + separator = '|' unless separator.is_a?(::String) + end + + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + default = { :one => msgid, :other => msgid_plural } + I18n.t(msgid, :default => default, :count => n, :scope => scope, :separator => separator) + end + alias ns_ nsgettext + + # Method signatures: + # npgettext('Fruits', 'apple', 'apples', 2) + # npgettext('Fruits', ['apple', 'apples'], 2) + def npgettext(msgctxt, msgid, msgid_plural, n = 1) + separator = I18n::Gettext::CONTEXT_SEPARATOR + + if msgid.is_a?(Array) + msgid_plural, msgid, n = msgid[1], [msgctxt, msgid[0]].join(separator), msgid_plural + else + msgid = [msgctxt, msgid].join(separator) + end + + nsgettext(msgid, msgid_plural, n, separator) + end + alias np_ npgettext + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/po_parser.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/po_parser.rb new file mode 100644 index 0000000..a07fdc5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/gettext/po_parser.rb @@ -0,0 +1,329 @@ +=begin + poparser.rb - Generate a .mo + + Copyright (C) 2003-2009 Masao Mutoh + + You may redistribute it and/or modify it under the same + license terms as Ruby. +=end + +#MODIFIED +# removed include GetText etc +# added stub translation method _(x) +require 'racc/parser' + +module GetText + + class PoParser < Racc::Parser + + def _(x) + x + end + +module_eval <<'..end src/poparser.ry modeval..id7a99570e05', 'src/poparser.ry', 108 + def unescape(orig) + ret = orig.gsub(/\\n/, "\n") + ret.gsub!(/\\t/, "\t") + ret.gsub!(/\\r/, "\r") + ret.gsub!(/\\"/, "\"") + ret + end + + def parse(str, data, ignore_fuzzy = true) + @comments = [] + @data = data + @fuzzy = false + @msgctxt = "" + $ignore_fuzzy = ignore_fuzzy + + str.strip! + @q = [] + until str.empty? do + case str + when /\A\s+/ + str = $' + when /\Amsgctxt/ + @q.push [:MSGCTXT, $&] + str = $' + when /\Amsgid_plural/ + @q.push [:MSGID_PLURAL, $&] + str = $' + when /\Amsgid/ + @q.push [:MSGID, $&] + str = $' + when /\Amsgstr/ + @q.push [:MSGSTR, $&] + str = $' + when /\A\[(\d+)\]/ + @q.push [:PLURAL_NUM, $1] + str = $' + when /\A\#~(.*)/ + $stderr.print _("Warning: obsolete msgid exists.\n") + $stderr.print " #{$&}\n" + @q.push [:COMMENT, $&] + str = $' + when /\A\#(.*)/ + @q.push [:COMMENT, $&] + str = $' + when /\A\"(.*)\"/ + @q.push [:STRING, $1] + str = $' + else + #c = str[0,1] + #@q.push [:STRING, c] + str = str[1..-1] + end + end + @q.push [false, '$end'] + if $DEBUG + @q.each do |a,b| + puts "[#{a}, #{b}]" + end + end + @yydebug = true if $DEBUG + do_parse + + if @comments.size > 0 + @data.set_comment(:last, @comments.join("\n")) + end + @data + end + + def next_token + @q.shift + end + + def on_message(msgid, msgstr) + if msgstr.size > 0 + @data[msgid] = msgstr + @data.set_comment(msgid, @comments.join("\n")) + end + @comments.clear + @msgctxt = "" + end + + def on_comment(comment) + @fuzzy = true if (/fuzzy/ =~ comment) + @comments << comment + end + + +..end src/poparser.ry modeval..id7a99570e05 + +##### racc 1.4.5 generates ### + +racc_reduce_table = [ + 0, 0, :racc_error, + 0, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 12, :_reduce_5, + 1, 13, :_reduce_none, + 1, 13, :_reduce_none, + 4, 15, :_reduce_8, + 5, 16, :_reduce_9, + 2, 17, :_reduce_10, + 1, 17, :_reduce_none, + 3, 18, :_reduce_12, + 1, 11, :_reduce_13, + 2, 14, :_reduce_14, + 1, 14, :_reduce_15 ] + +racc_reduce_n = 16 + +racc_shift_n = 26 + +racc_action_table = [ + 3, 13, 5, 7, 9, 15, 16, 17, 20, 17, + 13, 17, 13, 13, 11, 17, 23, 20, 13, 17 ] + +racc_action_check = [ + 1, 16, 1, 1, 1, 12, 12, 12, 18, 18, + 7, 14, 15, 9, 3, 19, 20, 21, 23, 25 ] + +racc_action_pointer = [ + nil, 0, nil, 14, nil, nil, nil, 3, nil, 6, + nil, nil, 0, nil, 4, 5, -6, nil, 2, 8, + 8, 11, nil, 11, nil, 12 ] + +racc_action_default = [ + -1, -16, -2, -16, -3, -13, -4, -16, -6, -16, + -7, 26, -16, -15, -5, -16, -16, -14, -16, -8, + -16, -9, -11, -16, -10, -12 ] + +racc_goto_table = [ + 12, 22, 14, 4, 24, 6, 2, 8, 18, 19, + 10, 21, 1, nil, nil, nil, 25 ] + +racc_goto_check = [ + 5, 9, 5, 3, 9, 4, 2, 6, 5, 5, + 7, 8, 1, nil, nil, nil, 5 ] + +racc_goto_pointer = [ + nil, 12, 5, 2, 4, -7, 6, 9, -7, -17 ] + +racc_goto_default = [ + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil ] + +racc_token_table = { + false => 0, + Object.new => 1, + :COMMENT => 2, + :MSGID => 3, + :MSGCTXT => 4, + :MSGID_PLURAL => 5, + :MSGSTR => 6, + :STRING => 7, + :PLURAL_NUM => 8 } + +racc_use_result_var = true + +racc_nt_base = 9 + +Racc_arg = [ + racc_action_table, + racc_action_check, + racc_action_default, + racc_action_pointer, + racc_goto_table, + racc_goto_check, + racc_goto_default, + racc_goto_pointer, + racc_nt_base, + racc_reduce_table, + racc_token_table, + racc_shift_n, + racc_reduce_n, + racc_use_result_var ] + +Racc_token_to_s_table = [ +'$end', +'error', +'COMMENT', +'MSGID', +'MSGCTXT', +'MSGID_PLURAL', +'MSGSTR', +'STRING', +'PLURAL_NUM', +'$start', +'msgfmt', +'comment', +'msgctxt', +'message', +'string_list', +'single_message', +'plural_message', +'msgstr_plural', +'msgstr_plural_line'] + +Racc_debug_parser = true + +##### racc system variables end ##### + + # reduce 0 omitted + + # reduce 1 omitted + + # reduce 2 omitted + + # reduce 3 omitted + + # reduce 4 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 25 + def _reduce_5( val, _values, result ) + @msgctxt = unescape(val[1]) + "\004" + result + end +.,., + + # reduce 6 omitted + + # reduce 7 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 48 + def _reduce_8( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print " msgid '#{val[1]}'\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]), unescape(val[3])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 65 + def _reduce_9( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print "msgid = '#{val[1]}\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]) + "\000" + unescape(val[3]), unescape(val[4])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 76 + def _reduce_10( val, _values, result ) + if val[0].size > 0 + result = val[0] + "\000" + val[1] + else + result = "" + end + result + end +.,., + + # reduce 11 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 84 + def _reduce_12( val, _values, result ) + result = val[2] + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 91 + def _reduce_13( val, _values, result ) + on_comment(val[0]) + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 99 + def _reduce_14( val, _values, result ) + result = val.delete_if{|item| item == ""}.join + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 103 + def _reduce_15( val, _values, result ) + result = val[0] + result + end +.,., + + def _reduce_none( val, _values, result ) + result + end + + end # class PoParser + +end # module GetText diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/interpolate/ruby.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/interpolate/ruby.rb new file mode 100644 index 0000000..dab8f0e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/interpolate/ruby.rb @@ -0,0 +1,39 @@ +# heavily based on Masao Mutoh's gettext String interpolation extension +# http://github.com/mutoh/gettext/blob/f6566738b981fe0952548c421042ad1e0cdfb31e/lib/gettext/core_ext/string.rb + +module I18n + DEFAULT_INTERPOLATION_PATTERNS = [ + /%%/, + /%\{([\w|]+)\}/, # matches placeholders like "%{foo} or %{foo|word}" + /%<(\w+)>([^\d]*?\d*\.?\d*[bBdiouxXeEfgGcps])/ # matches placeholders like "%.d" + ].freeze + INTERPOLATION_PATTERN = Regexp.union(DEFAULT_INTERPOLATION_PATTERNS) + deprecate_constant :INTERPOLATION_PATTERN + + class << self + # Return String or raises MissingInterpolationArgument exception. + # Missing argument's logic is handled by I18n.config.missing_interpolation_argument_handler. + def interpolate(string, values) + raise ReservedInterpolationKey.new($1.to_sym, string) if string =~ I18n.reserved_keys_pattern + raise ArgumentError.new('Interpolation values must be a Hash.') unless values.kind_of?(Hash) + interpolate_hash(string, values) + end + + def interpolate_hash(string, values) + string.gsub(Regexp.union(config.interpolation_patterns)) do |match| + if match == '%%' + '%' + else + key = ($1 || $2 || match.tr("%{}", "")).to_sym + value = if values.key?(key) + values[key] + else + config.missing_interpolation_argument_handler.call(key, values, string) + end + value = value.call(values) if value.respond_to?(:call) + $3 ? sprintf("%#{$3}", value) : value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale.rb new file mode 100644 index 0000000..c4078e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module I18n + module Locale + autoload :Fallbacks, 'i18n/locale/fallbacks' + autoload :Tag, 'i18n/locale/tag' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/fallbacks.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/fallbacks.rb new file mode 100644 index 0000000..e30acc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/fallbacks.rb @@ -0,0 +1,97 @@ +# Locale Fallbacks +# +# Extends the I18n module to hold a fallbacks instance which is set to an +# instance of I18n::Locale::Fallbacks by default but can be swapped with a +# different implementation. +# +# Locale fallbacks will compute a number of fallback locales for a given locale. +# For example: +# +#

+# I18n.fallbacks[:"es-MX"] # => [:"es-MX", :es, :en] 
+# +# Locale fallbacks always fall back to +# +# * all parent locales of a given locale (e.g. :es for :"es-MX") first, +# * the current default locales and all of their parents second +# +# The default locales are set to [] by default but can be set to something else. +# +# One can additionally add any number of additional fallback locales manually. +# These will be added before the default locales to the fallback chain. For +# example: +# +# # using a custom locale as default fallback locale +# +# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"en-GB", :"de-AT" => :de, :"de-CH" => :de) +# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"en-GB", :en] +# I18n.fallbacks[:"de-CH"] # => [:"de-CH", :de, :"en-GB", :en] +# +# # mapping fallbacks to an existing instance +# +# # people speaking Catalan also speak Spanish as spoken in Spain +# fallbacks = I18n.fallbacks +# fallbacks.map(:ca => :"es-ES") +# fallbacks[:ca] # => [:ca, :"es-ES", :es, :"en-US", :en] +# +# # people speaking Arabian as spoken in Palestine also speak Hebrew as spoken in Israel +# fallbacks.map(:"ar-PS" => :"he-IL") +# fallbacks[:"ar-PS"] # => [:"ar-PS", :ar, :"he-IL", :he, :"en-US", :en] +# fallbacks[:"ar-EG"] # => [:"ar-EG", :ar, :"en-US", :en] +# +# # people speaking Sami as spoken in Finland also speak Swedish and Finnish as spoken in Finland +# fallbacks.map(:sms => [:"se-FI", :"fi-FI"]) +# fallbacks[:sms] # => [:sms, :"se-FI", :se, :"fi-FI", :fi, :"en-US", :en] + +module I18n + module Locale + class Fallbacks < Hash + def initialize(*mappings) + @map = {} + map(mappings.pop) if mappings.last.is_a?(Hash) + self.defaults = mappings.empty? ? [] : mappings + end + + def defaults=(defaults) + @defaults = defaults.flat_map { |default| compute(default, false) } + end + attr_reader :defaults + + def [](locale) + raise InvalidLocale.new(locale) if locale.nil? + raise Disabled.new('fallback#[]') if locale == false + locale = locale.to_sym + super || store(locale, compute(locale)) + end + + def map(*args, &block) + if args.count == 1 && !block_given? + mappings = args.first + mappings.each do |from, to| + from, to = from.to_sym, Array(to) + to.each do |_to| + @map[from] ||= [] + @map[from] << _to.to_sym + end + end + else + @map.map(*args, &block) + end + end + + protected + + def compute(tags, include_defaults = true, exclude = []) + result = Array(tags).flat_map do |tag| + tags = I18n::Locale::Tag.tag(tag).self_and_parents.map! { |t| t.to_sym } - exclude + tags.each { |_tag| tags += compute(@map[_tag], false, exclude + tags) if @map[_tag] } + tags + end + result.push(*defaults) if include_defaults + result.uniq! + result.compact! + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag.rb new file mode 100644 index 0000000..a640b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag.rb @@ -0,0 +1,28 @@ +# encoding: utf-8 + +module I18n + module Locale + module Tag + autoload :Parents, 'i18n/locale/tag/parents' + autoload :Rfc4646, 'i18n/locale/tag/rfc4646' + autoload :Simple, 'i18n/locale/tag/simple' + + class << self + # Returns the current locale tag implementation. Defaults to +I18n::Locale::Tag::Simple+. + def implementation + @@implementation ||= Simple + end + + # Sets the current locale tag implementation. Use this to set a different locale tag implementation. + def implementation=(implementation) + @@implementation = implementation + end + + # Factory method for locale tags. Delegates to the current locale tag implementation. + def tag(tag) + implementation.tag(tag) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/parents.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/parents.rb new file mode 100644 index 0000000..6283e66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/parents.rb @@ -0,0 +1,24 @@ +module I18n + module Locale + module Tag + module Parents + def parent + @parent ||= + begin + segs = to_a + segs.compact! + segs.length > 1 ? self.class.tag(*segs[0..(segs.length - 2)].join('-')) : nil + end + end + + def self_and_parents + @self_and_parents ||= [self].concat parents + end + + def parents + @parents ||= parent ? [parent].concat(parent.parents) : [] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/rfc4646.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/rfc4646.rb new file mode 100644 index 0000000..4ce4c75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/rfc4646.rb @@ -0,0 +1,74 @@ +# RFC 4646/47 compliant Locale tag implementation that parses locale tags to +# subtags such as language, script, region, variant etc. +# +# For more information see by http://en.wikipedia.org/wiki/IETF_language_tag +# +# Rfc4646::Parser does not implement grandfathered tags. + +module I18n + module Locale + module Tag + RFC4646_SUBTAGS = [ :language, :script, :region, :variant, :extension, :privateuse, :grandfathered ] + RFC4646_FORMATS = { :language => :downcase, :script => :capitalize, :region => :upcase, :variant => :downcase } + + class Rfc4646 < Struct.new(*RFC4646_SUBTAGS) + class << self + # Parses the given tag and returns a Tag instance if it is valid. + # Returns false if the given tag is not valid according to RFC 4646. + def tag(tag) + matches = parser.match(tag) + new(*matches) if matches + end + + def parser + @@parser ||= Rfc4646::Parser + end + + def parser=(parser) + @@parser = parser + end + end + + include Parents + + RFC4646_FORMATS.each do |name, format| + define_method(name) { self[name].send(format) unless self[name].nil? } + end + + def to_sym + to_s.to_sym + end + + def to_s + @tag ||= to_a.compact.join("-") + end + + def to_a + members.collect { |attr| self.send(attr) } + end + + module Parser + PATTERN = %r{\A(?: + ([a-z]{2,3}(?:(?:-[a-z]{3}){0,3})?|[a-z]{4}|[a-z]{5,8}) # language + (?:-([a-z]{4}))? # script + (?:-([a-z]{2}|\d{3}))? # region + (?:-([0-9a-z]{5,8}|\d[0-9a-z]{3}))* # variant + (?:-([0-9a-wyz](?:-[0-9a-z]{2,8})+))* # extension + (?:-(x(?:-[0-9a-z]{1,8})+))?| # privateuse subtag + (x(?:-[0-9a-z]{1,8})+)| # privateuse tag + /* ([a-z]{1,3}(?:-[0-9a-z]{2,8}){1,2}) */ # grandfathered + )\z}xi + + class << self + def match(tag) + c = PATTERN.match(tag.to_s).captures + c[0..4] << (c[5].nil? ? c[6] : c[5]) << c[7] # TODO c[7] is grandfathered, throw a NotImplemented exception here? + rescue + false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/simple.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/simple.rb new file mode 100644 index 0000000..6d9ab56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/locale/tag/simple.rb @@ -0,0 +1,39 @@ +# Simple Locale tag implementation that computes subtags by simply splitting +# the locale tag at '-' occurences. +module I18n + module Locale + module Tag + class Simple + class << self + def tag(tag) + new(tag) + end + end + + include Parents + + attr_reader :tag + + def initialize(*tag) + @tag = tag.join('-').to_sym + end + + def subtags + @subtags = tag.to_s.split('-').map!(&:to_s) + end + + def to_sym + tag + end + + def to_s + tag.to_s + end + + def to_a + subtags + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/middleware.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/middleware.rb new file mode 100644 index 0000000..59b377e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/middleware.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module I18n + class Middleware + + def initialize(app) + @app = app + end + + def call(env) + @app.call(env) + ensure + Thread.current[:i18n_config] = I18n::Config.new + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests.rb new file mode 100644 index 0000000..30d0ed5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module I18n + module Tests + autoload :Basics, 'i18n/tests/basics' + autoload :Defaults, 'i18n/tests/defaults' + autoload :Interpolation, 'i18n/tests/interpolation' + autoload :Link, 'i18n/tests/link' + autoload :Localization, 'i18n/tests/localization' + autoload :Lookup, 'i18n/tests/lookup' + autoload :Pluralization, 'i18n/tests/pluralization' + autoload :Procs, 'i18n/tests/procs' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/basics.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/basics.rb new file mode 100644 index 0000000..be82430 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/basics.rb @@ -0,0 +1,58 @@ +module I18n + module Tests + module Basics + def teardown + I18n.available_locales = nil + end + + test "available_locales returns the available_locales produced by the backend, by default" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + assert_equal I18n.available_locales, I18n.backend.available_locales + end + + test "available_locales can be set to something else independently from the actual locale data" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + I18n.available_locales = :foo + assert_equal [:foo], I18n.available_locales + + I18n.available_locales = [:foo, 'bar'] + assert_equal [:foo, :bar], I18n.available_locales + + I18n.available_locales = nil + assert_equal I18n.available_locales, I18n.backend.available_locales + end + + test "available_locales memoizes when set explicitely" do + I18n.backend.expects(:available_locales).never + I18n.available_locales = [:foo] + I18n.backend.store_translations('de', :bar => 'baz') + I18n.reload! + assert_equal [:foo], I18n.available_locales + end + + test "available_locales delegates to the backend when not set explicitely" do + original_available_locales_value = I18n.backend.available_locales + I18n.backend.expects(:available_locales).returns(original_available_locales_value).twice + assert_equal I18n.backend.available_locales, I18n.available_locales + end + + test "exists? is implemented by the backend" do + I18n.backend.store_translations(:foo, :bar => 'baz') + assert I18n.exists?(:bar, :foo) + end + + test "storing a nil value as a translation removes it from the available locale data" do + I18n.backend.store_translations(:en, :to_be_deleted => 'bar') + assert_equal 'bar', I18n.t(:to_be_deleted, :default => 'baz') + + I18n.cache_store.clear if I18n.respond_to?(:cache_store) && I18n.cache_store + I18n.backend.store_translations(:en, :to_be_deleted => nil) + assert_equal 'baz', I18n.t(:to_be_deleted, :default => 'baz') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/defaults.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/defaults.rb new file mode 100644 index 0000000..31fdb46 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/defaults.rb @@ -0,0 +1,52 @@ +# encoding: utf-8 + +module I18n + module Tests + module Defaults + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }) + end + + test "defaults: given nil as a key it returns the given default" do + assert_equal 'default', I18n.t(nil, :default => 'default') + end + + test "defaults: given a symbol as a default it translates the symbol" do + assert_equal 'bar', I18n.t(nil, :default => :'foo.bar') + end + + test "defaults: given a symbol as a default and a scope it stays inside the scope when looking up the symbol" do + assert_equal 'bar', I18n.t(:missing, :default => :bar, :scope => :foo) + end + + test "defaults: given an array as a default it returns the first match" do + assert_equal 'bar', I18n.t(:does_not_exist, :default => [:does_not_exist_2, :'foo.bar']) + end + + test "defaults: given an array as a default with false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => [false]) + end + + test "defaults: given false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => false) + end + + test "defaults: given nil it returns nil" do + assert_nil I18n.t(:does_not_exist, :default => nil) + end + + test "defaults: given an array of missing keys it raises a MissingTranslationData exception" do + assert_raises I18n::MissingTranslationData do + I18n.t(:does_not_exist, :default => [:does_not_exist_2, :does_not_exist_3], :raise => true) + end + end + + test "defaults: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t(nil, :default => :'foo|bar', :separator => '|') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/interpolation.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/interpolation.rb new file mode 100644 index 0000000..6bfe2e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/interpolation.rb @@ -0,0 +1,163 @@ +# encoding: utf-8 + +module I18n + module Tests + module Interpolation + # If no interpolation parameter is not given, I18n should not alter the string. + # This behavior is due to three reasons: + # + # * Checking interpolation keys in all strings hits performance, badly; + # + # * This allows us to retrieve untouched values through I18n. For example + # I could have a middleware that returns I18n lookup results in JSON + # to be processed through Javascript. Leaving the keys untouched allows + # the interpolation to happen at the javascript level; + # + # * Security concerns: if I allow users to translate a web site, they can + # insert %{} in messages causing the I18n lookup to fail in every request. + # + test "interpolation: given no values it does not alter the string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!') + end + + test "interpolation: given values it interpolates them into the string" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => 'David') + end + + test "interpolation: works with a pipe" do + assert_equal 'Hi david!', interpolate(:default => 'Hi %{name|lowercase}!', :'name|lowercase' => 'david') + end + + test "interpolation: given a nil value it still interpolates it into the string" do + assert_equal 'Hi !', interpolate(:default => 'Hi %{name}!', :name => nil) + end + + test "interpolation: given a lambda as a value it calls it if the string contains the key" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => lambda { |*args| 'David' }) + end + + test "interpolation: given a lambda as a value it does not call it if the string does not contain the key" do + assert_nothing_raised { interpolate(:default => 'Hi!', :name => lambda { |*args| raise 'fail' }) } + end + + test "interpolation: given values but missing a key it raises I18n::MissingInterpolationArgument" do + assert_raises(I18n::MissingInterpolationArgument) do + interpolate(:default => '%{foo}', :bar => 'bar') + end + end + + test "interpolation: it does not raise I18n::MissingInterpolationArgument for escaped variables" do + assert_nothing_raised do + assert_equal 'Barr %{foo}', interpolate(:default => '%{bar} %%{foo}', :bar => 'Barr') + end + end + + test "interpolation: it does not change the original, stored translation string" do + I18n.backend.store_translations(:en, :interpolate => 'Hi %{name}!') + assert_equal 'Hi David!', interpolate(:interpolate, :name => 'David') + assert_equal 'Hi Yehuda!', interpolate(:interpolate, :name => 'Yehuda') + end + + test "interpolation: given an array interpolates each element" do + I18n.backend.store_translations(:en, :array_interpolate => ['Hi', 'Mr. %{name}', 'or sir %{name}']) + assert_equal ['Hi', 'Mr. Bartuz', 'or sir Bartuz'], interpolate(:array_interpolate, :name => 'Bartuz') + end + + test "interpolation: given the translation is in utf-8 it still works" do + assert_equal 'Häi David!', interpolate(:default => 'Häi %{name}!', :name => 'David') + end + + test "interpolation: given the value is in utf-8 it still works" do + assert_equal 'Hi ゆきひろ!', interpolate(:default => 'Hi %{name}!', :name => 'ゆきひろ') + end + + test "interpolation: given the translation and the value are in utf-8 it still works" do + assert_equal 'こんにちは、ゆきひろさん!', interpolate(:default => 'こんにちは、%{name}さん!', :name => 'ゆきひろ') + end + + if Object.const_defined?(:Encoding) + test "interpolation: given a euc-jp translation and a utf-8 value it raises Encoding::CompatibilityError" do + assert_raises(Encoding::CompatibilityError) do + interpolate(:default => euc_jp('こんにちは、%{name}さん!'), :name => 'ゆきひろ') + end + end + + test "interpolation: given a utf-8 translation and a euc-jp value it raises Encoding::CompatibilityError" do + assert_raises(Encoding::CompatibilityError) do + interpolate(:default => 'こんにちは、%{name}さん!', :name => euc_jp('ゆきひろ')) + end + end + + test "interpolation: ASCII strings in the backend should be encoded to UTF8 if interpolation options are in UTF8" do + I18n.backend.store_translations 'en', 'encoding' => ('%{who} let me go'.force_encoding("ASCII")) + result = I18n.t 'encoding', :who => "måmmå miå" + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 with ASCII interpolation" do + I18n.backend.store_translations 'en', 'encoding' => 'måmmå miå %{what}' + result = I18n.t 'encoding', :what => 'let me go'.force_encoding("ASCII") + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 even with numbers interpolation" do + I18n.backend.store_translations 'en', 'encoding' => '%{count} times: måmmå miå' + result = I18n.t 'encoding', :count => 3 + assert_equal Encoding::UTF_8, result.encoding + end + end + + test "interpolation: given a translations containing a reserved key it raises I18n::ReservedInterpolationKey" do + assert_raises(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{exception_handler}') } + assert_raises(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{default}') } + assert_raises(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{separator}') } + assert_raises(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{scope}') } + end + + test "interpolation: deep interpolation for default string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for interpolated string" do + assert_equal 'Hi Ann!', interpolate(:default => 'Hi %{name}!', :name => 'Ann', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Hash" do + people = { :people => { :ann => 'Ann is %{ann}', :john => 'John is %{john}' } } + interpolated_people = { :people => { :ann => 'Ann is good', :john => 'John is big' } } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Array" do + people = { :people => ['Ann is %{ann}', 'John is %{john}'] } + interpolated_people = { :people => ['Ann is good', 'John is big'] } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + protected + + def capture(stream) + begin + stream = stream.to_s + eval "$#{stream} = StringIO.new" + yield + result = eval("$#{stream}").string + ensure + eval("$#{stream} = #{stream.upcase}") + end + + result + end + + def euc_jp(string) + string.encode!(Encoding::EUC_JP) + end + + def interpolate(*args) + options = args.last.is_a?(Hash) ? args.pop : {} + key = args.pop + I18n.backend.translate('en', key, options) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/link.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/link.rb new file mode 100644 index 0000000..d2f20e8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/link.rb @@ -0,0 +1,66 @@ +# encoding: utf-8 + +module I18n + module Tests + module Link + test "linked lookup: if a key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :linked, + :linked => 'linked' + } + assert_equal 'linked', I18n.backend.translate('en', :link) + end + + test "linked lookup: if a key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :"foo.linked", + :foo => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :link)) + end + + test "linked lookup: if a dot-separated key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked }, + :linked => 'linked' + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: if a dot-separated key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :"bar.linked" }, + :bar => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: links always refer to the absolute key" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked, :linked => 'linked in foo' }, + :linked => 'linked absolutely' + } + assert_equal 'linked absolutely', I18n.backend.translate('en', :link, :scope => :foo) + end + + test "linked lookup: a link can resolve to a namespace in the middle of a dot-separated key" do + I18n.backend.store_translations 'en', { + :activemodel => { :errors => { :messages => { :blank => "can't be blank" } } }, + :activerecord => { :errors => { :messages => :"activemodel.errors.messages" } } + } + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + end + + test "linked lookup: a link can resolve with option :count" do + I18n.backend.store_translations 'en', { + :counter => :counted, + :counted => { :foo => { :one => "one", :other => "other" }, :bar => "bar" } + } + assert_equal "one", I18n.t(:'counter.foo', count: 1) + assert_equal "other", I18n.t(:'counter.foo', count: 2) + assert_equal "bar", I18n.t(:'counter.bar', count: 3) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization.rb new file mode 100644 index 0000000..53b1502 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization.rb @@ -0,0 +1,19 @@ +module I18n + module Tests + module Localization + autoload :Date, 'i18n/tests/localization/date' + autoload :DateTime, 'i18n/tests/localization/date_time' + autoload :Time, 'i18n/tests/localization/time' + autoload :Procs, 'i18n/tests/localization/procs' + + def self.included(base) + base.class_eval do + include I18n::Tests::Localization::Date + include I18n::Tests::Localization::DateTime + include I18n::Tests::Localization::Procs + include I18n::Tests::Localization::Time + end + end + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date.rb new file mode 100644 index 0000000..2a44371 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date.rb @@ -0,0 +1,117 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Date + def setup + super + setup_date_translations + @date = ::Date.new(2008, 3, 1) + end + + test "localize Date: given the short format it uses it" do + assert_equal '01. Mär', I18n.l(@date, :format => :short, :locale => :de) + end + + test "localize Date: given the long format it uses it" do + assert_equal '01. März 2008', I18n.l(@date, :format => :long, :locale => :de) + end + + test "localize Date: given the default format it uses it" do + assert_equal '01.03.2008', I18n.l(@date, :format => :default, :locale => :de) + end + + test "localize Date: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@date, :format => '%A', :locale => :de) + end + + test "localize Date: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@date, :format => '%^A', :locale => :de) + end + + test "localize Date: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@date, :format => '%a', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@date, :format => '%^a', :locale => :de) + end + + test "localize Date: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@date, :format => '%B', :locale => :de) + end + + test "localize Date: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@date, :format => '%^B', :locale => :de) + end + + test "localize Date: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@date, :format => '%b', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@date, :format => '%^b', :locale => :de) + end + + test "localize Date: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Date.new(2008, 2, 1), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Date: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@date, :format => '%b', :locale => :fr) + end + + test "localize Date: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@date, :format => '%x') } + end + + test "localize Date: does not modify the options hash" do + options = { :format => '%b', :locale => :de } + assert_equal 'Mär', I18n.l(@date, **options) + assert_equal({ :format => '%b', :locale => :de }, options) + assert_nothing_raised { I18n.l(@date, **options.freeze) } + end + + test "localize Date: given nil with default value it returns default" do + assert_equal 'default', I18n.l(nil, :default => 'default') + end + + test "localize Date: given nil it raises I18n::ArgumentError" do + assert_raises(I18n::ArgumentError) { I18n.l(nil) } + end + + test "localize Date: given a plain Object it raises I18n::ArgumentError" do + assert_raises(I18n::ArgumentError) { I18n.l(Object.new) } + end + + test "localize Date: given a format is missing it raises I18n::MissingTranslationData" do + assert_raises(I18n::MissingTranslationData) { I18n.l(@date, :format => :missing) } + end + + test "localize Date: it does not alter the format string" do + assert_equal '01. Februar 2009', I18n.l(::Date.parse('2009-02-01'), :format => :long, :locale => :de) + assert_equal '01. Oktober 2009', I18n.l(::Date.parse('2009-10-01'), :format => :long, :locale => :de) + end + + protected + + def setup_date_translations + I18n.backend.store_translations :de, { + :date => { + :formats => { + :default => "%d.%m.%Y", + :short => "%d. %b", + :long => "%d. %B %Y", + }, + :day_names => %w(Sonntag Montag Dienstag Mittwoch Donnerstag Freitag Samstag), + :abbr_day_names => %w(So Mo Di Mi Do Fr Sa), + :month_names => %w(Januar Februar März April Mai Juni Juli August September Oktober November Dezember).unshift(nil), + :abbr_month_names => %w(Jan Feb Mär Apr Mai Jun Jul Aug Sep Okt Nov Dez).unshift(nil) + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date_time.rb new file mode 100644 index 0000000..b09b888 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/date_time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module DateTime + def setup + super + setup_datetime_translations + @datetime = ::DateTime.new(2008, 3, 1, 6) + @other_datetime = ::DateTime.new(2008, 3, 1, 18) + end + + test "localize DateTime: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@datetime, :format => :short, :locale => :de) + end + + test "localize DateTime: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@datetime, :format => :long, :locale => :de) + end + + test "localize DateTime: given the default format it uses it" do + assert_equal 'Sa, 01. Mär 2008 06:00:00 +0000', I18n.l(@datetime, :format => :default, :locale => :de) + end + + test "localize DateTime: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@datetime, :format => '%A', :locale => :de) + end + + test "localize DateTime: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@datetime, :format => '%^A', :locale => :de) + end + + test "localize DateTime: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@datetime, :format => '%a', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@datetime, :format => '%^a', :locale => :de) + end + + test "localize DateTime: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@datetime, :format => '%B', :locale => :de) + end + + test "localize DateTime: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@datetime, :format => '%^B', :locale => :de) + end + + test "localize DateTime: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@datetime, :format => '%b', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@datetime, :format => '%^b', :locale => :de) + end + + test "localize DateTime: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::DateTime.new(2008, 2, 1, 6), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize DateTime: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@datetime, :format => '%b', :locale => :fr) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@datetime, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_datetime, :format => '%p', :locale => :de) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator in downcase" do + assert_equal 'am', I18n.l(@datetime, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_datetime, :format => '%P', :locale => :de) + end + + test "localize DateTime: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@datetime, :format => '%x') } + end + + test "localize DateTime: given a format is missing it raises I18n::MissingTranslationData" do + assert_raises(I18n::MissingTranslationData) { I18n.l(@datetime, :format => :missing) } + end + + protected + + def setup_datetime_translations + # time translations might have been set up in Tests::Api::Localization::Time + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M" + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/procs.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/procs.rb new file mode 100644 index 0000000..7db45d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/procs.rb @@ -0,0 +1,118 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Procs + test "localize: using day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/Суббота/, I18n.l(time, :format => "%A, %d %B", :locale => :ru)) + assert_match(/суббота/, I18n.l(time, :format => "%d %B (%A)", :locale => :ru)) + end + + test "localize: using month names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %B %Y", :locale => :ru)) + assert_match(/Март /, I18n.l(time, :format => "%B %Y", :locale => :ru)) + end + + test "localize: using abbreviated day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %b %Y", :locale => :ru)) + assert_match(/март /, I18n.l(time, :format => "%b %Y", :locale => :ru)) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {}]', I18n.l(date, :format => :proc, :locale => :ru) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {:foo=>"foo"}]', I18n.l(date, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {}]', I18n.l(datetime, :format => :proc, :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {:foo=>"foo"}]', I18n.l(datetime, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], {}), I18n.l(time, :format => :proc, :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + options = { :foo => 'foo' } + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], options), I18n.l(time, **options.merge(:format => :proc, :locale => :ru)) + end + + protected + + def self.inspect_args(args, kwargs) + args << kwargs + args = args.map do |arg| + case arg + when ::Time, ::DateTime + arg.strftime('%a, %d %b %Y %H:%M:%S %Z').sub('+0000', '+00:00') + when ::Date + arg.strftime('%a, %d %b %Y') + when Hash + arg.delete(:fallback_in_progress) + arg.delete(:fallback_original_locale) + arg.inspect + else + arg.inspect + end + end + "[#{args.join(', ')}]" + end + + def setup_time_proc_translations + I18n.backend.store_translations :ru, { + :time => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + } + }, + :date => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + }, + :'day_names' => lambda { |key, options| + (options[:format] =~ /^%A/) ? + %w(Воскресенье Понедельник Вторник Среда Четверг Пятница Суббота) : + %w(воскресенье понедельник вторник среда четверг пятница суббота) + }, + :'month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)?(%B)/) ? + %w(января февраля марта апреля мая июня июля августа сентября октября ноября декабря).unshift(nil) : + %w(Январь Февраль Март Апрель Май Июнь Июль Август Сентябрь Октябрь Ноябрь Декабрь).unshift(nil) + }, + :'abbr_month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)(%b)/) ? + %w(янв. февр. марта апр. мая июня июля авг. сент. окт. нояб. дек.).unshift(nil) : + %w(янв. февр. март апр. май июнь июль авг. сент. окт. нояб. дек.).unshift(nil) + }, + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/time.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/time.rb new file mode 100644 index 0000000..7afe176 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/localization/time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Time + def setup + super + setup_time_translations + @time = ::Time.utc(2008, 3, 1, 6, 0) + @other_time = ::Time.utc(2008, 3, 1, 18, 0) + end + + test "localize Time: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@time, :format => :short, :locale => :de) + end + + test "localize Time: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@time, :format => :long, :locale => :de) + end + + # TODO Seems to break on Windows because ENV['TZ'] is ignored. What's a better way to do this? + # def test_localize_given_the_default_format_it_uses_it + # assert_equal 'Sa, 01. Mar 2008 06:00:00 +0000', I18n.l(@time, :format => :default, :locale => :de) + # end + + test "localize Time: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@time, :format => '%A', :locale => :de) + end + + test "localize Time: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@time, :format => '%^A', :locale => :de) + end + + test "localize Time: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@time, :format => '%a', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@time, :format => '%^a', :locale => :de) + end + + test "localize Time: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@time, :format => '%B', :locale => :de) + end + + test "localize Time: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@time, :format => '%^B', :locale => :de) + end + + test "localize Time: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@time, :format => '%b', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@time, :format => '%^b', :locale => :de) + end + + test "localize Time: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Time.utc(2008, 2, 1, 6, 0), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Time: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@time, :format => '%b', :locale => :fr) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@time, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_time, :format => '%p', :locale => :de) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator in upcase" do + assert_equal 'am', I18n.l(@time, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_time, :format => '%P', :locale => :de) + end + + test "localize Time: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@time, :format => '%x') } + end + + test "localize Time: given a format is missing it raises I18n::MissingTranslationData" do + assert_raises(I18n::MissingTranslationData) { I18n.l(@time, :format => :missing) } + end + + protected + + def setup_time_translations + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M", + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/lookup.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/lookup.rb new file mode 100644 index 0000000..3bd46b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/lookup.rb @@ -0,0 +1,81 @@ +# encoding: utf-8 + +module I18n + module Tests + module Lookup + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }, :falsy => false, :truthy => true, + :string => "a", :array => %w(a b c), :hash => { "a" => "b" }) + end + + test "lookup: it returns a string" do + assert_equal("a", I18n.t(:string)) + end + + test "lookup: it returns hash" do + assert_equal({ :a => "b" }, I18n.t(:hash)) + end + + test "lookup: it returns an array" do + assert_equal(%w(a b c), I18n.t(:array)) + end + + test "lookup: it returns a native true" do + assert I18n.t(:truthy) === true + end + + test "lookup: it returns a native false" do + assert I18n.t(:falsy) === false + end + + test "lookup: given a missing key, no default and no raise option it returns an error message" do + assert_equal "translation missing: en.missing", I18n.t(:missing) + end + + test "lookup: given a missing key, no default and the raise option it raises MissingTranslationData" do + assert_raises(I18n::MissingTranslationData) { I18n.t(:missing, :raise => true) } + end + + test "lookup: does not raise an exception if no translation data is present for the given locale" do + assert_nothing_raised { I18n.t(:foo, :locale => :xx) } + end + + test "lookup: does not modify the options hash" do + options = {} + assert_equal "a", I18n.t(:string, **options) + assert_equal({}, options) + assert_nothing_raised { I18n.t(:string, **options.freeze) } + end + + test "lookup: given an array of keys it translates all of them" do + assert_equal %w(bar baz), I18n.t([:bar, :baz], :scope => [:foo]) + end + + test "lookup: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t('foo|bar', :separator => '|') + end + + # In fact it probably *should* fail but Rails currently relies on using the default locale instead. + # So we'll stick to this for now until we get it fixed in Rails. + test "lookup: given nil as a locale it does not raise but use the default locale" do + # assert_raises(I18n::InvalidLocale) { I18n.t(:bar, :locale => nil) } + assert_nothing_raised { I18n.t(:bar, :locale => nil) } + end + + test "lookup: a resulting String is not frozen" do + assert !I18n.t(:string).frozen? + end + + test "lookup: a resulting Array is not frozen" do + assert !I18n.t(:array).frozen? + end + + test "lookup: a resulting Hash is not frozen" do + assert !I18n.t(:hash).frozen? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/pluralization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/pluralization.rb new file mode 100644 index 0000000..19e73f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/pluralization.rb @@ -0,0 +1,35 @@ +# encoding: utf-8 + +module I18n + module Tests + module Pluralization + test "pluralization: given 0 it returns the :zero translation if it is defined" do + assert_equal 'zero', I18n.t(:default => { :zero => 'zero' }, :count => 0) + end + + test "pluralization: given 0 it returns the :other translation if :zero is not defined" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 0) + end + + test "pluralization: given 1 it returns the singular translation" do + assert_equal 'bar', I18n.t(:default => { :one => 'bar' }, :count => 1) + end + + test "pluralization: given 2 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 2) + end + + test "pluralization: given 3 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 3) + end + + test "pluralization: given nil it returns the whole entry" do + assert_equal({ :one => 'bar' }, I18n.t(:default => { :one => 'bar' }, :count => nil)) + end + + test "pluralization: given incomplete pluralization data it raises I18n::InvalidPluralizationData" do + assert_raises(I18n::InvalidPluralizationData) { I18n.t(:default => { :one => 'bar' }, :count => 2) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/procs.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/procs.rb new file mode 100644 index 0000000..6abd861 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/tests/procs.rb @@ -0,0 +1,66 @@ +# encoding: utf-8 + +module I18n + module Tests + module Procs + test "lookup: given a translation is a proc it calls the proc with the key and interpolation values" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "lookup: given a translation is a proc it passes the interpolation values as keyword arguments" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |key, foo:, **| I18n::Tests::Procs.filter_args(key, foo: foo) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "defaults: given a default is a Proc it calls it with the key and interpolation values" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_equal '[nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "defaults: given a default is a key that resolves to a Proc it calls it with the key and interpolation values" do + the_lambda = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + I18n.backend.store_translations(:en, :a_lambda => the_lambda) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => :a_lambda, :foo => 'foo') + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => [nil, :a_lambda], :foo => 'foo') + end + + test "interpolation: given an interpolation value is a lambda it calls it with key and values before interpolating it" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_match %r(\[\{:foo=>#\}\]), I18n.t(nil, :default => '%{foo}', :foo => proc) + end + + test "interpolation: given a key resolves to a Proc that returns a string then interpolation still works" do + proc = lambda { |*args| "%{foo}: " + I18n::Tests::Procs.filter_args(*args) } + assert_equal 'foo: [nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "pluralization: given a key resolves to a Proc that returns valid data then pluralization still works" do + proc = lambda { |*args| { :zero => 'zero', :one => 'one', :other => 'other' } } + assert_equal 'zero', I18n.t(:default => proc, :count => 0) + assert_equal 'one', I18n.t(:default => proc, :count => 1) + assert_equal 'other', I18n.t(:default => proc, :count => 2) + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc translations" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal Proc, I18n.t(:a_lambda, :resolve => false).class + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc default" do + assert_equal Proc, I18n.t(nil, :default => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }, :resolve => false).class + end + + + def self.filter_args(*args) + args.map do |arg| + if arg.is_a?(Hash) + arg.delete(:fallback_in_progress) + arg.delete(:fallback_original_locale) + end + arg + end.inspect + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/utils.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/utils.rb new file mode 100644 index 0000000..8841561 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/utils.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module I18n + module Utils + class << self + if Hash.method_defined?(:except) + def except(hash, *keys) + hash.except(*keys) + end + else + def except(hash, *keys) + hash = hash.dup + keys.each { |k| hash.delete(k) } + hash + end + end + + def deep_merge(hash, other_hash, &block) + deep_merge!(hash.dup, other_hash, &block) + end + + def deep_merge!(hash, other_hash, &block) + hash.merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + deep_merge(this_val, other_val, &block) + elsif block_given? + yield key, this_val, other_val + else + other_val + end + end + end + + def deep_symbolize_keys(hash) + hash.each_with_object({}) do |(key, value), result| + result[key.respond_to?(:to_sym) ? key.to_sym : key] = deep_symbolize_keys_in_object(value) + result + end + end + + private + + def deep_symbolize_keys_in_object(value) + case value + when Hash + deep_symbolize_keys(value) + when Array + value.map { |e| deep_symbolize_keys_in_object(e) } + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/version.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/version.rb new file mode 100644 index 0000000..bc754ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.10.0/lib/i18n/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module I18n + VERSION = "1.10.0" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/MIT-LICENSE b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/MIT-LICENSE new file mode 100644 index 0000000..ed8e9ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2008 The Ruby I18n team + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/README.md b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/README.md new file mode 100644 index 0000000..98132cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/README.md @@ -0,0 +1,122 @@ +# Ruby I18n + +[![Build Status](https://github.com/ruby-i18n/i18n/workflows/Ruby/badge.svg)](https://github.com/ruby-i18n/i18n/actions?query=workflow%3ARuby) + +Ruby internationalization and localization (i18n) solution. + +Currently maintained by @radar. + +## Usage + +### Rails + +You will most commonly use this library within a Rails app. + +[See the Rails Guide](https://guides.rubyonrails.org/i18n.html) for an example of its usage. + +### Ruby (without Rails) + +If you want to use this library without Rails, you can simply add `i18n` to your `Gemfile`: + +```ruby +gem 'i18n' +``` + +Then configure I18n with some translations, and a default locale: + +```ruby +I18n.load_path << Dir[File.expand_path("config/locales") + "/*.yml"] +I18n.default_locale = :en # (note that `en` is already the default!) +``` + +A simple translation file in your project might live at `config/locales/en.yml` and look like: + +```yml +en: + test: "This is a test" +``` + +You can then access this translation by doing: + +```ruby +I18n.t(:test) +``` + +You can switch locales in your project by setting `I18n.locale` to a different value: + +```ruby +I18n.locale = :de +I18n.t(:test) # => "Dies ist ein Test" +``` + +## Features + +* Translation and localization +* Interpolation of values to translations +* Pluralization (CLDR compatible) +* Customizable transliteration to ASCII +* Flexible defaults +* Bulk lookup +* Lambdas as translation data +* Custom key/scope separator +* Custom exception handlers +* Extensible architecture with a swappable backend + +## Pluggable Features + +* Cache +* Pluralization: lambda pluralizers stored as translation data +* Locale fallbacks, RFC4647 compliant (optionally: RFC4646 locale validation) +* [Gettext support](https://github.com/ruby-i18n/i18n/wiki/Gettext) +* Translation metadata + +## Alternative Backend + +* Chain +* ActiveRecord (optionally: ActiveRecord::Missing and ActiveRecord::StoreProcs) +* KeyValue (uses active_support/json and cannot store procs) + +For more information and lots of resources see [the 'Resources' page on the wiki](https://github.com/ruby-i18n/i18n/wiki/Resources). + +## Tests + +You can run tests both with + +* `rake test` or just `rake` +* run any test file directly, e.g. `ruby -Ilib:test test/api/simple_test.rb` + +You can run all tests against all Gemfiles with + +* `ruby test/run_all.rb` + +The structure of the test suite is a bit unusual as it uses modules to reuse +particular tests in different test cases. + +The reason for this is that we need to enforce the I18n API across various +combinations of extensions. E.g. the Simple backend alone needs to support +the same API as any combination of feature and/or optimization modules included +to the Simple backend. We test this by reusing the same API definition (implemented +as test methods) in test cases with different setups. + +You can find the test cases that enforce the API in test/api. And you can find +the API definition test methods in test/api/tests. + +All other test cases (e.g. as defined in test/backend, test/core_ext) etc. +follow the usual test setup and should be easy to grok. + +## More Documentation + +Additional documentation can be found here: https://github.com/ruby-i18n/i18n/wiki + +## Contributors + +* @radar +* @carlosantoniodasilva +* @josevalim +* @knapo +* @tigrish +* [and many more](https://github.com/ruby-i18n/i18n/graphs/contributors) + +## License + +MIT License. See the included MIT-LICENSE file. diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n.rb new file mode 100644 index 0000000..ba29a1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n.rb @@ -0,0 +1,414 @@ +# frozen_string_literal: true + +require 'concurrent/map' + +require 'i18n/version' +require 'i18n/exceptions' +require 'i18n/interpolate/ruby' + +module I18n + autoload :Backend, 'i18n/backend' + autoload :Config, 'i18n/config' + autoload :Gettext, 'i18n/gettext' + autoload :Locale, 'i18n/locale' + autoload :Tests, 'i18n/tests' + autoload :Middleware, 'i18n/middleware' + + RESERVED_KEYS = %i[ + cascade + deep_interpolation + default + exception_handler + fallback + fallback_in_progress + format + object + raise + resolve + scope + separator + throw + ].freeze + RESERVED_KEYS_PATTERN = /%\{(#{RESERVED_KEYS.join("|")})\}/ + EMPTY_HASH = {}.freeze + + def self.new_double_nested_cache # :nodoc: + Concurrent::Map.new { |h,k| h[k] = Concurrent::Map.new } + end + + module Base + # Gets I18n configuration object. + def config + Thread.current[:i18n_config] ||= I18n::Config.new + end + + # Sets I18n configuration object. + def config=(value) + Thread.current[:i18n_config] = value + end + + # Write methods which delegates to the configuration object + %w(locale backend default_locale available_locales default_separator + exception_handler load_path enforce_available_locales).each do |method| + module_eval <<-DELEGATORS, __FILE__, __LINE__ + 1 + def #{method} + config.#{method} + end + + def #{method}=(value) + config.#{method} = (value) + end + DELEGATORS + end + + # Tells the backend to reload translations. Used in situations like the + # Rails development environment. Backends can implement whatever strategy + # is useful. + def reload! + config.clear_available_locales_set + config.backend.reload! + end + + # Tells the backend to load translations now. Used in situations like the + # Rails production environment. Backends can implement whatever strategy + # is useful. + def eager_load! + config.backend.eager_load! + end + + # Translates, pluralizes and interpolates a given key using a given locale, + # scope, and default, as well as interpolation values. + # + # *LOOKUP* + # + # Translation data is organized as a nested hash using the upper-level keys + # as namespaces. E.g., ActionView ships with the translation: + # :date => {:formats => {:short => "%b %d"}}. + # + # Translations can be looked up at any level of this hash using the key argument + # and the scope option. E.g., in this example I18n.t :date + # returns the whole translations hash {:formats => {:short => "%b %d"}}. + # + # Key can be either a single key or a dot-separated key (both Strings and Symbols + # work). E.g., the short format can be looked up using both: + # I18n.t 'date.formats.short' + # I18n.t :'date.formats.short' + # + # Scope can be either a single key, a dot-separated key or an array of keys + # or dot-separated keys. Keys and scopes can be combined freely. So these + # examples will all look up the same short date format: + # I18n.t 'date.formats.short' + # I18n.t 'formats.short', :scope => 'date' + # I18n.t 'short', :scope => 'date.formats' + # I18n.t 'short', :scope => %w(date formats) + # + # *INTERPOLATION* + # + # Translations can contain interpolation variables which will be replaced by + # values passed to #translate as part of the options hash, with the keys matching + # the interpolation variable names. + # + # E.g., with a translation :foo => "foo %{bar}" the option + # value for the key +bar+ will be interpolated into the translation: + # I18n.t :foo, :bar => 'baz' # => 'foo baz' + # + # *PLURALIZATION* + # + # Translation data can contain pluralized translations. Pluralized translations + # are arrays of singular/plural versions of translations like ['Foo', 'Foos']. + # + # Note that I18n::Backend::Simple only supports an algorithm for English + # pluralization rules. Other algorithms can be supported by custom backends. + # + # This returns the singular version of a pluralized translation: + # I18n.t :foo, :count => 1 # => 'Foo' + # + # These both return the plural version of a pluralized translation: + # I18n.t :foo, :count => 0 # => 'Foos' + # I18n.t :foo, :count => 2 # => 'Foos' + # + # The :count option can be used both for pluralization and interpolation. + # E.g., with the translation + # :foo => ['%{count} foo', '%{count} foos'], count will + # be interpolated to the pluralized translation: + # I18n.t :foo, :count => 1 # => '1 foo' + # + # *DEFAULTS* + # + # This returns the translation for :foo or default if no translation was found: + # I18n.t :foo, :default => 'default' + # + # This returns the translation for :foo or the translation for :bar if no + # translation for :foo was found: + # I18n.t :foo, :default => :bar + # + # Returns the translation for :foo or the translation for :bar + # or default if no translations for :foo and :bar were found. + # I18n.t :foo, :default => [:bar, 'default'] + # + # *BULK LOOKUP* + # + # This returns an array with the translations for :foo and :bar. + # I18n.t [:foo, :bar] + # + # Can be used with dot-separated nested keys: + # I18n.t [:'baz.foo', :'baz.bar'] + # + # Which is the same as using a scope option: + # I18n.t [:foo, :bar], :scope => :baz + # + # *LAMBDAS* + # + # Both translations and defaults can be given as Ruby lambdas. Lambdas will be + # called and passed the key and options. + # + # E.g. assuming the key :salutation resolves to: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. #{options[:name]}" : "Mrs. #{options[:name]}" } + # + # Then I18n.t(:salutation, :gender => 'w', :name => 'Smith') will result in "Mrs. Smith". + # + # Note that the string returned by lambda will go through string interpolation too, + # so the following lambda would give the same result: + # lambda { |key, options| options[:gender] == 'm' ? "Mr. %{name}" : "Mrs. %{name}" } + # + # It is recommended to use/implement lambdas in an "idempotent" way. E.g. when + # a cache layer is put in front of I18n.translate it will generate a cache key + # from the argument values passed to #translate. Therefore your lambdas should + # always return the same translations/values per unique combination of argument + # values. + # + # *Ruby 2.7+ keyword arguments warning* + # + # This method uses keyword arguments. + # There is a breaking change in ruby that produces warning with ruby 2.7 and won't work as expected with ruby 3.0 + # The "hash" parameter must be passed as keyword argument. + # + # Good: + # I18n.t(:salutation, :gender => 'w', :name => 'Smith') + # I18n.t(:salutation, **{ :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, **any_hash) + # + # Bad: + # I18n.t(:salutation, { :gender => 'w', :name => 'Smith' }) + # I18n.t(:salutation, any_hash) + # + def translate(key = nil, *, throw: false, raise: false, locale: nil, **options) # TODO deprecate :raise + locale ||= config.locale + raise Disabled.new('t') if locale == false + enforce_available_locales!(locale) + + backend = config.backend + + result = catch(:exception) do + if key.is_a?(Array) + key.map { |k| backend.translate(locale, k, options) } + else + backend.translate(locale, key, options) + end + end + + if result.is_a?(MissingTranslation) + handle_exception((throw && :throw || raise && :raise), result, locale, key, options) + else + result + end + end + alias :t :translate + + # Wrapper for translate that adds :raise => true. With + # this option, if no translation is found, it will raise I18n::MissingTranslationData + def translate!(key, options = EMPTY_HASH) + translate(key, **options.merge(:raise => true)) + end + alias :t! :translate! + + # Returns true if a translation exists for a given key, otherwise returns false. + def exists?(key, _locale = nil, locale: _locale, **options) + locale ||= config.locale + raise Disabled.new('exists?') if locale == false + raise I18n::ArgumentError if key.is_a?(String) && key.empty? + config.backend.exists?(locale, key, options) + end + + # Transliterates UTF-8 characters to ASCII. By default this method will + # transliterate only Latin strings to an ASCII approximation: + # + # I18n.transliterate("Ærøskøbing") + # # => "AEroskobing" + # + # I18n.transliterate("日本語") + # # => "???" + # + # It's also possible to add support for per-locale transliterations. I18n + # expects transliteration rules to be stored at + # i18n.transliterate.rule. + # + # Transliteration rules can either be a Hash or a Proc. Procs must accept a + # single string argument. Hash rules inherit the default transliteration + # rules, while Procs do not. + # + # *Examples* + # + # Setting a Hash in .yml: + # + # i18n: + # transliterate: + # rule: + # ü: "ue" + # ö: "oe" + # + # Setting a Hash using Ruby: + # + # store_translations(:de, :i18n => { + # :transliterate => { + # :rule => { + # "ü" => "ue", + # "ö" => "oe" + # } + # } + # ) + # + # Setting a Proc: + # + # translit = lambda {|string| MyTransliterator.transliterate(string) } + # store_translations(:xx, :i18n => {:transliterate => {:rule => translit}) + # + # Transliterating strings: + # + # I18n.locale = :en + # I18n.transliterate("Jürgen") # => "Jurgen" + # I18n.locale = :de + # I18n.transliterate("Jürgen") # => "Juergen" + # I18n.transliterate("Jürgen", :locale => :en) # => "Jurgen" + # I18n.transliterate("Jürgen", :locale => :de) # => "Juergen" + def transliterate(key, *, throw: false, raise: false, locale: nil, replacement: nil, **options) + locale ||= config.locale + raise Disabled.new('transliterate') if locale == false + enforce_available_locales!(locale) + + config.backend.transliterate(locale, key, replacement) + rescue I18n::ArgumentError => exception + handle_exception((throw && :throw || raise && :raise), exception, locale, key, options) + end + + # Localizes certain objects, such as dates and numbers to local formatting. + def localize(object, locale: nil, format: nil, **options) + locale ||= config.locale + raise Disabled.new('l') if locale == false + enforce_available_locales!(locale) + + format ||= :default + config.backend.localize(locale, object, format, options) + end + alias :l :localize + + # Executes block with given I18n.locale set. + def with_locale(tmp_locale = nil) + if tmp_locale == nil + yield + else + current_locale = self.locale + self.locale = tmp_locale + begin + yield + ensure + self.locale = current_locale + end + end + end + + # Merges the given locale, key and scope into a single array of keys. + # Splits keys that contain dots into multiple keys. Makes sure all + # keys are Symbols. + def normalize_keys(locale, key, scope, separator = nil) + separator ||= I18n.default_separator + + keys = [] + keys.concat normalize_key(locale, separator) + keys.concat normalize_key(scope, separator) + keys.concat normalize_key(key, separator) + keys + end + + # Returns true when the passed locale, which can be either a String or a + # Symbol, is in the list of available locales. Returns false otherwise. + def locale_available?(locale) + I18n.config.available_locales_set.include?(locale) + end + + # Raises an InvalidLocale exception when the passed locale is not available. + def enforce_available_locales!(locale) + if locale != false && config.enforce_available_locales + raise I18n::InvalidLocale.new(locale) if !locale_available?(locale) + end + end + + def available_locales_initialized? + config.available_locales_initialized? + end + + private + + # Any exceptions thrown in translate will be sent to the @@exception_handler + # which can be a Symbol, a Proc or any other Object unless they're forced to + # be raised or thrown (MissingTranslation). + # + # If exception_handler is a Symbol then it will simply be sent to I18n as + # a method call. A Proc will simply be called. In any other case the + # method #call will be called on the exception_handler object. + # + # Examples: + # + # I18n.exception_handler = :custom_exception_handler # this is the default + # I18n.custom_exception_handler(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = lambda { |*args| ... } # a lambda + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + # + # I18n.exception_handler = I18nExceptionHandler.new # an object + # I18n.exception_handler.call(exception, locale, key, options) # will be called like this + def handle_exception(handling, exception, locale, key, options) + case handling + when :raise + raise exception.respond_to?(:to_exception) ? exception.to_exception : exception + when :throw + throw :exception, exception + else + case handler = options[:exception_handler] || config.exception_handler + when Symbol + send(handler, exception, locale, key, options) + else + handler.call(exception, locale, key, options) + end + end + end + + @@normalized_key_cache = I18n.new_double_nested_cache + + def normalize_key(key, separator) + @@normalized_key_cache[separator][key] ||= + case key + when Array + key.flat_map { |k| normalize_key(k, separator) } + else + keys = key.to_s.split(separator) + keys.delete('') + keys.map! do |k| + case k + when /\A[-+]?[1-9]\d*\z/ # integer + k.to_i + when 'true' + true + when 'false' + false + else + k.to_sym + end + end + keys + end + end + end + + extend Base +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend.rb new file mode 100644 index 0000000..453ed71 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module I18n + module Backend + autoload :Base, 'i18n/backend/base' + autoload :InterpolationCompiler, 'i18n/backend/interpolation_compiler' + autoload :Cache, 'i18n/backend/cache' + autoload :CacheFile, 'i18n/backend/cache_file' + autoload :Cascade, 'i18n/backend/cascade' + autoload :Chain, 'i18n/backend/chain' + autoload :Fallbacks, 'i18n/backend/fallbacks' + autoload :Flatten, 'i18n/backend/flatten' + autoload :Gettext, 'i18n/backend/gettext' + autoload :KeyValue, 'i18n/backend/key_value' + autoload :Memoize, 'i18n/backend/memoize' + autoload :Metadata, 'i18n/backend/metadata' + autoload :Pluralization, 'i18n/backend/pluralization' + autoload :Simple, 'i18n/backend/simple' + autoload :Transliterator, 'i18n/backend/transliterator' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb new file mode 100644 index 0000000..b04a259 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/base.rb @@ -0,0 +1,285 @@ +# frozen_string_literal: true + +require 'yaml' +require 'json' +require 'i18n/core_ext/hash' + +module I18n + module Backend + module Base + using I18n::HashRefinements + include I18n::Backend::Transliterator + + # Accepts a list of paths to translation files. Loads translations from + # plain Ruby (*.rb), YAML files (*.yml), or JSON files (*.json). See #load_rb, #load_yml, and #load_json + # for details. + def load_translations(*filenames) + filenames = I18n.load_path if filenames.empty? + filenames.flatten.each { |filename| load_file(filename) } + end + + # This method receives a locale, a data hash and options for storing translations. + # Should be implemented + def store_translations(locale, data, options = EMPTY_HASH) + raise NotImplementedError + end + + def translate(locale, key, options = EMPTY_HASH) + raise I18n::ArgumentError if (key.is_a?(String) || key.is_a?(Symbol)) && key.empty? + raise InvalidLocale.new(locale) unless locale + return nil if key.nil? && !options.key?(:default) + + entry = lookup(locale, key, options[:scope], options) unless key.nil? + + if entry.nil? && options.key?(:default) + entry = default(locale, key, options[:default], options) + else + entry = resolve(locale, key, entry, options) + end + + count = options[:count] + + if entry.nil? && (subtrees? || !count) + if (options.key?(:default) && !options[:default].nil?) || !options.key?(:default) + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + end + + entry = entry.dup if entry.is_a?(String) + entry = pluralize(locale, entry, count) if count + + if entry.nil? && !subtrees? + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + deep_interpolation = options[:deep_interpolation] + values = options.except(*RESERVED_KEYS) + if values + entry = if deep_interpolation + deep_interpolate(locale, entry, values) + else + interpolate(locale, entry, values) + end + end + entry + end + + def exists?(locale, key, options = EMPTY_HASH) + lookup(locale, key) != nil + end + + # Acts the same as +strftime+, but uses a localized version of the + # format string. Takes a key from the date/time formats translations as + # a format argument (e.g., :short in :'date.formats'). + def localize(locale, object, format = :default, options = EMPTY_HASH) + if object.nil? && options.include?(:default) + return options[:default] + end + raise ArgumentError, "Object must be a Date, DateTime or Time object. #{object.inspect} given." unless object.respond_to?(:strftime) + + if Symbol === format + key = format + type = object.respond_to?(:sec) ? 'time' : 'date' + options = options.merge(:raise => true, :object => object, :locale => locale) + format = I18n.t(:"#{type}.formats.#{key}", **options) + end + + format = translate_localization_format(locale, object, format, options) + object.strftime(format) + end + + # Returns an array of locales for which translations are available + # ignoring the reserved translation meta data key :i18n. + def available_locales + raise NotImplementedError + end + + def reload! + eager_load! if eager_loaded? + end + + def eager_load! + @eager_loaded = true + end + + protected + + def eager_loaded? + @eager_loaded ||= false + end + + # The method which actually looks up for the translation in the store. + def lookup(locale, key, scope = [], options = EMPTY_HASH) + raise NotImplementedError + end + + def subtrees? + true + end + + # Evaluates defaults. + # If given subject is an Array, it walks the array and returns the + # first translation that can be resolved. Otherwise it tries to resolve + # the translation directly. + def default(locale, object, subject, options = EMPTY_HASH) + options = options.reject { |key, value| key == :default } + case subject + when Array + subject.each do |item| + result = resolve(locale, object, item, options) + return result unless result.nil? + end and nil + else + resolve(locale, object, subject, options) + end + end + + # Resolves a translation. + # If the given subject is a Symbol, it will be translated with the + # given options. If it is a Proc then it will be evaluated. All other + # subjects will be returned directly. + def resolve(locale, object, subject, options = EMPTY_HASH) + return subject if options[:resolve] == false + result = catch(:exception) do + case subject + when Symbol + I18n.translate(subject, **options.merge(:locale => locale, :throw => true)) + when Proc + date_or_time = options.delete(:object) || object + resolve(locale, object, subject.call(date_or_time, **options)) + else + subject + end + end + result unless result.is_a?(MissingTranslation) + end + + # Picks a translation from a pluralized mnemonic subkey according to English + # pluralization rules : + # - It will pick the :one subkey if count is equal to 1. + # - It will pick the :other subkey otherwise. + # - It will pick the :zero subkey in the special case where count is + # equal to 0 and there is a :zero subkey present. This behaviour is + # not standard with regards to the CLDR pluralization rules. + # Other backends can implement more flexible or complex pluralization rules. + def pluralize(locale, entry, count) + entry = entry.reject { |k, _v| k == :attributes } if entry.is_a?(Hash) + return entry unless entry.is_a?(Hash) && count && entry.values.none? { |v| v.is_a?(Hash) } + + key = pluralization_key(entry, count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + end + + # Interpolates values into a given subject. + # + # if the given subject is a string then: + # method interpolates "file %{file} opened by %%{user}", :file => 'test.txt', :user => 'Mr. X' + # # => "file test.txt opened by %{user}" + # + # if the given subject is an array then: + # each element of the array is recursively interpolated (until it finds a string) + # method interpolates ["yes, %{user}", ["maybe no, %{user}, "no, %{user}"]], :user => "bartuz" + # # => "["yes, bartuz",["maybe no, bartuz", "no, bartuz"]]" + def interpolate(locale, subject, values = EMPTY_HASH) + return subject if values.empty? + + case subject + when ::String then I18n.interpolate(subject, values) + when ::Array then subject.map { |element| interpolate(locale, element, values) } + else + subject + end + end + + # Deep interpolation + # + # deep_interpolate { people: { ann: "Ann is %{ann}", john: "John is %{john}" } }, + # ann: 'good', john: 'big' + # #=> { people: { ann: "Ann is good", john: "John is big" } } + def deep_interpolate(locale, data, values = EMPTY_HASH) + return data if values.empty? + + case data + when ::String + I18n.interpolate(data, values) + when ::Hash + data.each_with_object({}) do |(k, v), result| + result[k] = deep_interpolate(locale, v, values) + end + when ::Array + data.map do |v| + deep_interpolate(locale, v, values) + end + else + data + end + end + + # Loads a single translations file by delegating to #load_rb or + # #load_yml depending on the file extension and directly merges the + # data to the existing translations. Raises I18n::UnknownFileType + # for all other file extensions. + def load_file(filename) + type = File.extname(filename).tr('.', '').downcase + raise UnknownFileType.new(type, filename) unless respond_to?(:"load_#{type}", true) + data = send(:"load_#{type}", filename) + unless data.is_a?(Hash) + raise InvalidLocaleData.new(filename, 'expects it to return a hash, but does not') + end + data.each { |locale, d| store_translations(locale, d || {}) } + end + + # Loads a plain Ruby translations file. eval'ing the file must yield + # a Hash containing translation data with locales as toplevel keys. + def load_rb(filename) + eval(IO.read(filename), binding, filename) + end + + # Loads a YAML translations file. The data must have locales as + # toplevel keys. + def load_yml(filename) + begin + YAML.load_file(filename) + rescue TypeError, ScriptError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + alias_method :load_yaml, :load_yml + + # Loads a JSON translations file. The data must have locales as + # toplevel keys. + def load_json(filename) + begin + ::JSON.parse(File.read(filename)) + rescue TypeError, StandardError => e + raise InvalidLocaleData.new(filename, e.inspect) + end + end + + def translate_localization_format(locale, object, format, options) + format.to_s.gsub(/%(|\^)[aAbBpP]/) do |match| + case match + when '%a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday] + when '%^a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday].upcase + when '%A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday] + when '%^A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday].upcase + when '%b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon] + when '%^b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon].upcase + when '%B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon] + when '%^B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon].upcase + when '%p' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).upcase if object.respond_to? :hour + when '%P' then I18n.t!(:"time.#{object.hour < 12 ? :am : :pm}", :locale => locale, :format => format).downcase if object.respond_to? :hour + end + end + rescue MissingTranslationData => e + e.message + end + + def pluralization_key(entry, count) + key = :zero if count == 0 && entry.has_key?(:zero) + key ||= count == 1 ? :one : :other + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb new file mode 100644 index 0000000..41b58fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +# This module allows you to easily cache all responses from the backend - thus +# speeding up the I18n aspects of your application quite a bit. +# +# To enable caching you can simply include the Cache module to the Simple +# backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.send(:include, I18n::Backend::Cache) +# +# You will also need to set a cache store implementation that you want to use: +# +# I18n.cache_store = ActiveSupport::Cache.lookup_store(:memory_store) +# +# You can use any cache implementation you want that provides the same API as +# ActiveSupport::Cache (only the methods #fetch and #write are being used). +# +# The cache_key implementation by default assumes you pass values that return +# a valid key from #hash (see +# http://www.ruby-doc.org/core/classes/Object.html#M000337). However, you can +# configure your own digest method via which responds to #hexdigest (see +# http://ruby-doc.org/stdlib/libdoc/digest/rdoc/index.html): +# +# I18n.cache_key_digest = Digest::MD5.new +# +# If you use a lambda as a default value in your translation like this: +# +# I18n.t(:"date.order", :default => lambda {[:month, :day, :year]}) +# +# Then you will always have a cache miss, because each time this method +# is called the lambda will have a different hash value. If you know +# the result of the lambda is a constant as in the example above, then +# to cache this you can make the lambda a constant, like this: +# +# DEFAULT_DATE_ORDER = lambda {[:month, :day, :year]} +# ... +# I18n.t(:"date.order", :default => DEFAULT_DATE_ORDER) +# +# If the lambda may result in different values for each call then consider +# also using the Memoize backend. +# +module I18n + class << self + @@cache_store = nil + @@cache_namespace = nil + @@cache_key_digest = nil + + def cache_store + @@cache_store + end + + def cache_store=(store) + @@cache_store = store + end + + def cache_namespace + @@cache_namespace + end + + def cache_namespace=(namespace) + @@cache_namespace = namespace + end + + def cache_key_digest + @@cache_key_digest + end + + def cache_key_digest=(key_digest) + @@cache_key_digest = key_digest + end + + def perform_caching? + !cache_store.nil? + end + end + + module Backend + # TODO Should the cache be cleared if new translations are stored? + module Cache + def translate(locale, key, options = EMPTY_HASH) + I18n.perform_caching? ? fetch(cache_key(locale, key, options)) { super } : super + end + + protected + + def fetch(cache_key, &block) + result = _fetch(cache_key, &block) + throw(:exception, result) if result.is_a?(MissingTranslation) + result = result.dup if result.frozen? rescue result + result + end + + def _fetch(cache_key, &block) + result = I18n.cache_store.read(cache_key) + return result unless result.nil? + result = catch(:exception, &block) + I18n.cache_store.write(cache_key, result) unless result.is_a?(Proc) + result + end + + def cache_key(locale, key, options) + # This assumes that only simple, native Ruby values are passed to I18n.translate. + "i18n/#{I18n.cache_namespace}/#{locale}/#{digest_item(key)}/#{digest_item(options)}" + end + + private + + def digest_item(key) + I18n.cache_key_digest ? I18n.cache_key_digest.hexdigest(key.to_s) : key.to_s.hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb new file mode 100644 index 0000000..4dafb3e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cache_file.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'digest/sha2' + +module I18n + module Backend + # Overwrites the Base load_file method to cache loaded file contents. + module CacheFile + # Optionally provide path_roots array to normalize filename paths, + # to make the cached i18n data portable across environments. + attr_accessor :path_roots + + protected + + # Track loaded translation files in the `i18n.load_file` scope, + # and skip loading the file if its contents are still up-to-date. + def load_file(filename) + initialized = !respond_to?(:initialized?) || initialized? + key = I18n::Backend::Flatten.escape_default_separator(normalized_path(filename)) + old_mtime, old_digest = initialized && lookup(:i18n, key, :load_file) + return if (mtime = File.mtime(filename).to_i) == old_mtime || + (digest = Digest::SHA2.file(filename).hexdigest) == old_digest + super + store_translations(:i18n, load_file: { key => [mtime, digest] }) + end + + # Translate absolute filename to relative path for i18n key. + def normalized_path(file) + return file unless path_roots + path = path_roots.find(&file.method(:start_with?)) || + raise(InvalidLocaleData.new(file, 'outside expected path roots')) + file.sub(path, path_roots.index(path).to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb new file mode 100644 index 0000000..782b07b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/cascade.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +# The Cascade module adds the ability to do cascading lookups to backends that +# are compatible to the Simple backend. +# +# By cascading lookups we mean that for any key that can not be found the +# Cascade module strips one segment off the scope part of the key and then +# tries to look up the key in that scope. +# +# E.g. when a lookup for the key :"foo.bar.baz" does not yield a result then +# the segment :bar will be stripped off the scope part :"foo.bar" and the new +# scope :foo will be used to look up the key :baz. If that does not succeed +# then the remaining scope segment :foo will be omitted, too, and again the +# key :baz will be looked up (now with no scope). +# +# To enable a cascading lookup one passes the :cascade option: +# +# I18n.t(:'foo.bar.baz', :cascade => true) +# +# This will return the first translation found for :"foo.bar.baz", :"foo.baz" +# or :baz in this order. +# +# The cascading lookup takes precedence over resolving any given defaults. +# I.e. defaults will kick in after the cascading lookups haven't succeeded. +# +# This behavior is useful for libraries like ActiveRecord validations where +# the library wants to give users a bunch of more or less fine-grained options +# of scopes for a particular key. +# +# Thanks to Clemens Kofler for the initial idea and implementation! See +# http://github.com/clemens/i18n-cascading-backend + +module I18n + module Backend + module Cascade + def lookup(locale, key, scope = [], options = EMPTY_HASH) + return super unless cascade = options[:cascade] + + cascade = { :step => 1 } unless cascade.is_a?(Hash) + step = cascade[:step] || 1 + offset = cascade[:offset] || 1 + separator = options[:separator] || I18n.default_separator + skip_root = cascade.has_key?(:skip_root) ? cascade[:skip_root] : true + + scope = I18n.normalize_keys(nil, key, scope, separator) + key = (scope.slice!(-offset, offset) || []).join(separator) + + begin + result = super + return result unless result.nil? + scope = scope.dup + end while (!scope.empty? || !skip_root) && scope.slice!(-step, step) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb new file mode 100644 index 0000000..9ab8575 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/chain.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +module I18n + module Backend + # Backend that chains multiple other backends and checks each of them when + # a translation needs to be looked up. This is useful when you want to use + # standard translations with a Simple backend but store custom application + # translations in a database or other backends. + # + # To use the Chain backend instantiate it and set it to the I18n module. + # You can add chained backends through the initializer or backends + # accessor: + # + # # preserves the existing Simple backend set to I18n.backend + # I18n.backend = I18n::Backend::Chain.new(I18n::Backend::ActiveRecord.new, I18n.backend) + # + # The implementation assumes that all backends added to the Chain implement + # a lookup method with the same API as Simple backend does. + class Chain + using I18n::HashRefinements + + module Implementation + include Base + + attr_accessor :backends + + def initialize(*backends) + self.backends = backends + end + + def initialized? + backends.all? do |backend| + backend.instance_eval do + return false unless initialized? + end + end + true + end + + def reload! + backends.each { |backend| backend.reload! } + end + + def eager_load! + backends.each { |backend| backend.eager_load! } + end + + def store_translations(locale, data, options = EMPTY_HASH) + backends.first.store_translations(locale, data, options) + end + + def available_locales + backends.map { |backend| backend.available_locales }.flatten.uniq + end + + def translate(locale, key, default_options = EMPTY_HASH) + namespace = nil + options = default_options.except(:default) + + backends.each do |backend| + catch(:exception) do + options = default_options if backend == backends.last + translation = backend.translate(locale, key, options) + if namespace_lookup?(translation, options) + namespace = _deep_merge(translation, namespace || {}) + elsif !translation.nil? || (options.key?(:default) && options[:default].nil?) + return translation + end + end + end + + return namespace if namespace + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def exists?(locale, key, options = EMPTY_HASH) + backends.any? do |backend| + backend.exists?(locale, key, options) + end + end + + def localize(locale, object, format = :default, options = EMPTY_HASH) + backends.each do |backend| + catch(:exception) do + result = backend.localize(locale, object, format, options) and return result + end + end + throw(:exception, I18n::MissingTranslation.new(locale, format, options)) + end + + protected + def init_translations + backends.each do |backend| + backend.send(:init_translations) + end + end + + def translations + backends.reverse.each_with_object({}) do |backend, memo| + partial_translations = backend.instance_eval do + init_translations unless initialized? + translations + end + memo.deep_merge!(partial_translations) { |_, a, b| b || a } + end + end + + def namespace_lookup?(result, options) + result.is_a?(Hash) && !options.has_key?(:count) + end + + private + # This is approximately what gets used in ActiveSupport. + # However since we are not guaranteed to run in an ActiveSupport context + # it is wise to have our own copy. We underscore it + # to not pollute the namespace of the including class. + def _deep_merge(hash, other_hash) + copy = hash.dup + other_hash.each_pair do |k,v| + value_from_other = hash[k] + copy[k] = value_from_other.is_a?(Hash) && v.is_a?(Hash) ? _deep_merge(value_from_other, v) : v + end + copy + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb new file mode 100644 index 0000000..ce8635b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/fallbacks.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +# I18n locale fallbacks are useful when you want your application to use +# translations from other locales when translations for the current locale are +# missing. E.g. you might want to use :en translations when translations in +# your applications main locale :de are missing. +# +# To enable locale fallbacks you can simply include the Fallbacks module to +# the Simple backend - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Fallbacks) +module I18n + @@fallbacks = nil + + class << self + # Returns the current fallbacks implementation. Defaults to +I18n::Locale::Fallbacks+. + def fallbacks + @@fallbacks ||= I18n::Locale::Fallbacks.new + end + + # Sets the current fallbacks implementation. Use this to set a different fallbacks implementation. + def fallbacks=(fallbacks) + @@fallbacks = fallbacks.is_a?(Array) ? I18n::Locale::Fallbacks.new(fallbacks) : fallbacks + end + end + + module Backend + module Fallbacks + # Overwrites the Base backend translate method so that it will try each + # locale given by I18n.fallbacks for the given locale. E.g. for the + # locale :"de-DE" it might try the locales :"de-DE", :de and :en + # (depends on the fallbacks implementation) until it finds a result with + # the given options. If it does not find any result for any of the + # locales it will then throw MissingTranslation as usual. + # + # The default option takes precedence over fallback locales only when + # it's a Symbol. When the default contains a String, Proc or Hash + # it is evaluated last after all the fallback locales have been tried. + def translate(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + return super if options[:fallback_in_progress] + default = extract_non_symbol_default!(options) if options[:default] + + fallback_options = options.merge(:fallback_in_progress => true) + I18n.fallbacks[locale].each do |fallback| + begin + catch(:exception) do + result = super(fallback, key, fallback_options) + unless result.nil? + on_fallback(locale, fallback, key, options) if locale != fallback + return result + end + end + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + return if options.key?(:default) && options[:default].nil? + + return super(locale, nil, options.merge(:default => default)) if default + throw(:exception, I18n::MissingTranslation.new(locale, key, options)) + end + + def extract_non_symbol_default!(options) + defaults = [options[:default]].flatten + first_non_symbol_default = defaults.detect{|default| !default.is_a?(Symbol)} + if first_non_symbol_default + options[:default] = defaults[0, defaults.index(first_non_symbol_default)] + end + return first_non_symbol_default + end + + def exists?(locale, key, options = EMPTY_HASH) + return super unless options.fetch(:fallback, true) + I18n.fallbacks[locale].each do |fallback| + begin + return true if super(fallback, key) + rescue I18n::InvalidLocale + # we do nothing when the locale is invalid, as this is a fallback anyways. + end + end + + false + end + + private + + # Overwrite on_fallback to add specified logic when the fallback succeeds. + def on_fallback(_original_locale, _fallback_locale, _key, _optoins) + nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb new file mode 100644 index 0000000..e9bd9d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/flatten.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +module I18n + module Backend + # This module contains several helpers to assist flattening translations. + # You may want to flatten translations for: + # + # 1) speed up lookups, as in the Memoize backend; + # 2) In case you want to store translations in a data store, as in ActiveRecord backend; + # + # You can check both backends above for some examples. + # This module also keeps all links in a hash so they can be properly resolved when flattened. + module Flatten + SEPARATOR_ESCAPE_CHAR = "\001" + FLATTEN_SEPARATOR = "." + + # normalize_keys the flatten way. This method is significantly faster + # and creates way less objects than the one at I18n.normalize_keys. + # It also handles escaping the translation keys. + def self.normalize_flat_keys(locale, key, scope, separator) + keys = [scope, key] + keys.flatten! + keys.compact! + + separator ||= I18n.default_separator + + if separator != FLATTEN_SEPARATOR + from_str = "#{FLATTEN_SEPARATOR}#{separator}" + to_str = "#{SEPARATOR_ESCAPE_CHAR}#{FLATTEN_SEPARATOR}" + + keys.map! { |k| k.to_s.tr from_str, to_str } + end + + keys.join(".") + end + + # Receives a string and escape the default separator. + def self.escape_default_separator(key) #:nodoc: + key.to_s.tr(FLATTEN_SEPARATOR, SEPARATOR_ESCAPE_CHAR) + end + + # Shortcut to I18n::Backend::Flatten.normalize_flat_keys + # and then resolve_links. + def normalize_flat_keys(locale, key, scope, separator) + key = I18n::Backend::Flatten.normalize_flat_keys(locale, key, scope, separator) + resolve_link(locale, key) + end + + # Store flattened links. + def links + @links ||= I18n.new_double_nested_cache + end + + # Flatten keys for nested Hashes by chaining up keys: + # + # >> { "a" => { "b" => { "c" => "d", "e" => "f" }, "g" => "h" }, "i" => "j"}.wind + # => { "a.b.c" => "d", "a.b.e" => "f", "a.g" => "h", "i" => "j" } + # + def flatten_keys(hash, escape, prev_key=nil, &block) + hash.each_pair do |key, value| + key = escape_default_separator(key) if escape + curr_key = [prev_key, key].compact.join(FLATTEN_SEPARATOR).to_sym + yield curr_key, value + flatten_keys(value, escape, curr_key, &block) if value.is_a?(Hash) + end + end + + # Receives a hash of translations (where the key is a locale and + # the value is another hash) and return a hash with all + # translations flattened. + # + # Nested hashes are included in the flattened hash just if subtree + # is true and Symbols are automatically stored as links. + def flatten_translations(locale, data, escape, subtree) + hash = {} + flatten_keys(data, escape) do |key, value| + if value.is_a?(Hash) + hash[key] = value if subtree + else + store_link(locale, key, value) if value.is_a?(Symbol) + hash[key] = value + end + end + hash + end + + protected + + def store_link(locale, key, link) + links[locale.to_sym][key.to_s] = link.to_s + end + + def resolve_link(locale, key) + key, locale = key.to_s, locale.to_sym + links = self.links[locale] + + if links.key?(key) + links[key] + elsif link = find_link(locale, key) + store_link(locale, key, key.gsub(*link)) + else + key + end + end + + def find_link(locale, key) #:nodoc: + links[locale].each_pair do |from, to| + return [from, to] if key[0, from.length] == from + end && nil + end + + def escape_default_separator(key) #:nodoc: + I18n::Backend::Flatten.escape_default_separator(key) + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb new file mode 100644 index 0000000..72d20f0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/gettext.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require 'i18n/gettext' +require 'i18n/gettext/po_parser' + +module I18n + module Backend + # Experimental support for using Gettext po files to store translations. + # + # To use this you can simply include the module to the Simple backend - or + # whatever other backend you are using. + # + # I18n::Backend::Simple.include(I18n::Backend::Gettext) + # + # Now you should be able to include your Gettext translation (*.po) files to + # the +I18n.load_path+ so they're loaded to the backend and you can use them as + # usual: + # + # I18n.load_path += Dir["path/to/locales/*.po"] + # + # Following the Gettext convention this implementation expects that your + # translation files are named by their locales. E.g. the file en.po would + # contain the translations for the English locale. + # + # To translate text you must use one of the translate methods provided by + # I18n::Gettext::Helpers. + # + # include I18n::Gettext::Helpers + # puts _("some string") + # + # Without it strings containing periods (".") will not be translated. + + module Gettext + using I18n::HashRefinements + + class PoData < Hash + def set_comment(msgid_or_sym, comment) + # ignore + end + end + + protected + def load_po(filename) + locale = ::File.basename(filename, '.po').to_sym + data = normalize(locale, parse(filename)) + { locale => data } + end + + def parse(filename) + GetText::PoParser.new.parse(::File.read(filename), PoData.new) + end + + def normalize(locale, data) + data.inject({}) do |result, (key, value)| + unless key.nil? || key.empty? + key = key.gsub(I18n::Gettext::CONTEXT_SEPARATOR, '|') + key, value = normalize_pluralization(locale, key, value) if key.index("\000") + + parts = key.split('|').reverse + normalized = parts.inject({}) do |_normalized, part| + { part => _normalized.empty? ? value : _normalized } + end + + result.deep_merge!(normalized) + end + result + end + end + + def normalize_pluralization(locale, key, value) + # FIXME po_parser includes \000 chars that can not be turned into Symbols + key = key.gsub("\000", I18n::Gettext::PLURAL_SEPARATOR).split(I18n::Gettext::PLURAL_SEPARATOR).first + + keys = I18n::Gettext.plural_keys(locale) + values = value.split("\000") + raise "invalid number of plurals: #{values.size}, keys: #{keys.inspect} on #{locale} locale for msgid #{key.inspect} with values #{values.inspect}" if values.size != keys.size + + result = {} + values.each_with_index { |_value, ix| result[keys[ix]] = _value } + [key, result] + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb new file mode 100644 index 0000000..8b52e7b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/interpolation_compiler.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +# The InterpolationCompiler module contains optimizations that can tremendously +# speed up the interpolation process on the Simple backend. +# +# It works by defining a pre-compiled method on stored translation Strings that +# already bring all the knowledge about contained interpolation variables etc. +# so that the actual recurring interpolation will be very fast. +# +# To enable pre-compiled interpolations you can simply include the +# InterpolationCompiler module to the Simple backend: +# +# I18n::Backend::Simple.include(I18n::Backend::InterpolationCompiler) +# +# Note that InterpolationCompiler does not yield meaningful results and consequently +# should not be used with Ruby 1.9 (YARV) but improves performance everywhere else +# (jRuby, Rubinius). +module I18n + module Backend + module InterpolationCompiler + module Compiler + extend self + + TOKENIZER = /(%%\{[^\}]+\}|%\{[^\}]+\})/ + INTERPOLATION_SYNTAX_PATTERN = /(%)?(%\{([^\}]+)\})/ + + def compile_if_an_interpolation(string) + if interpolated_str?(string) + string.instance_eval <<-RUBY_EVAL, __FILE__, __LINE__ + def i18n_interpolate(v = {}) + "#{compiled_interpolation_body(string)}" + end + RUBY_EVAL + end + + string + end + + def interpolated_str?(str) + str.kind_of?(::String) && str =~ INTERPOLATION_SYNTAX_PATTERN + end + + protected + # tokenize("foo %{bar} baz %%{buz}") # => ["foo ", "%{bar}", " baz ", "%%{buz}"] + def tokenize(str) + str.split(TOKENIZER) + end + + def compiled_interpolation_body(str) + tokenize(str).map do |token| + (matchdata = token.match(INTERPOLATION_SYNTAX_PATTERN)) ? handle_interpolation_token(token, matchdata) : escape_plain_str(token) + end.join + end + + def handle_interpolation_token(interpolation, matchdata) + escaped, pattern, key = matchdata.values_at(1, 2, 3) + escaped ? pattern : compile_interpolation_token(key.to_sym) + end + + def compile_interpolation_token(key) + "\#{#{interpolate_or_raise_missing(key)}}" + end + + def interpolate_or_raise_missing(key) + escaped_key = escape_key_sym(key) + RESERVED_KEYS.include?(key) ? reserved_key(escaped_key) : interpolate_key(escaped_key) + end + + def interpolate_key(key) + [direct_key(key), nil_key(key), missing_key(key)].join('||') + end + + def direct_key(key) + "((t = v[#{key}]) && t.respond_to?(:call) ? t.call : t)" + end + + def nil_key(key) + "(v.has_key?(#{key}) && '')" + end + + def missing_key(key) + "I18n.config.missing_interpolation_argument_handler.call(#{key}, v, self)" + end + + def reserved_key(key) + "raise(ReservedInterpolationKey.new(#{key}, self))" + end + + def escape_plain_str(str) + str.gsub(/"|\\|#/) {|x| "\\#{x}"} + end + + def escape_key_sym(key) + # rely on Ruby to do all the hard work :) + key.to_sym.inspect + end + end + + def interpolate(locale, string, values) + if string.respond_to?(:i18n_interpolate) + string.i18n_interpolate(values) + elsif values + super + else + string + end + end + + def store_translations(locale, data, options = EMPTY_HASH) + compile_all_strings_in(data) + super + end + + protected + def compile_all_strings_in(data) + data.each_value do |value| + Compiler.compile_if_an_interpolation(value) + compile_all_strings_in(value) if value.kind_of?(Hash) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb new file mode 100644 index 0000000..e391672 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/key_value.rb @@ -0,0 +1,206 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + + begin + require 'oj' + class JSON + class << self + def encode(value) + Oj::Rails.encode(value) + end + def decode(value) + Oj.load(value) + end + end + end + rescue LoadError + require 'active_support/json' + JSON = ActiveSupport::JSON + end + + module Backend + # This is a basic backend for key value stores. It receives on + # initialization the store, which should respond to three methods: + # + # * store#[](key) - Used to get a value + # * store#[]=(key, value) - Used to set a value + # * store#keys - Used to get all keys + # + # Since these stores only supports string, all values are converted + # to JSON before being stored, allowing it to also store booleans, + # hashes and arrays. However, this store does not support Procs. + # + # As the ActiveRecord backend, Symbols are just supported when loading + # translations from the filesystem or through explicit store translations. + # + # Also, avoid calling I18n.available_locales since it's a somehow + # expensive operation in most stores. + # + # == Example + # + # To setup I18n to use TokyoCabinet in memory is quite straightforward: + # + # require 'rufus/tokyo/cabinet' # gem install rufus-tokyo + # I18n.backend = I18n::Backend::KeyValue.new(Rufus::Tokyo::Cabinet.new('*')) + # + # == Performance + # + # You may make this backend even faster by including the Memoize module. + # However, notice that you should properly clear the cache if you change + # values directly in the key-store. + # + # == Subtrees + # + # In most backends, you are allowed to retrieve part of a translation tree: + # + # I18n.backend.store_translations :en, :foo => { :bar => :baz } + # I18n.t "foo" #=> { :bar => :baz } + # + # This backend supports this feature by default, but it slows down the storage + # of new data considerably and makes hard to delete entries. That said, you are + # allowed to disable the storage of subtrees on initialization: + # + # I18n::Backend::KeyValue.new(@store, false) + # + # This is useful if you are using a KeyValue backend chained to a Simple backend. + class KeyValue + using I18n::HashRefinements + + module Implementation + attr_accessor :store + + include Base, Flatten + + def initialize(store, subtrees=true) + @store, @subtrees = store, subtrees + end + + def initialized? + !@store.nil? + end + + def store_translations(locale, data, options = EMPTY_HASH) + escape = options.fetch(:escape, true) + flatten_translations(locale, data, escape, @subtrees).each do |key, value| + key = "#{locale}.#{key}" + + case value + when Hash + if @subtrees && (old_value = @store[key]) + old_value = JSON.decode(old_value) + value = old_value.deep_symbolize_keys.deep_merge!(value) if old_value.is_a?(Hash) + end + when Proc + raise "Key-value stores cannot handle procs" + end + + @store[key] = JSON.encode(value) unless value.is_a?(Symbol) + end + end + + def available_locales + locales = @store.keys.map { |k| k =~ /\./; $` } + locales.uniq! + locales.compact! + locales.map! { |k| k.to_sym } + locales + end + + protected + + # Queries the translations from the key-value store and converts + # them into a hash such as the one returned from loading the + # haml files + def translations + @translations = @store.keys.clone.map do |main_key| + main_value = JSON.decode(@store[main_key]) + main_key.to_s.split(".").reverse.inject(main_value) do |value, key| + {key.to_sym => value} + end + end.inject{|hash, elem| hash.deep_merge!(elem)}.deep_symbolize_keys + end + + def init_translations + # NO OP + # This call made also inside Simple Backend and accessed by + # other plugins like I18n-js and babilu and + # to use it along with the Chain backend we need to + # provide a uniform API even for protected methods :S + end + + def subtrees? + @subtrees + end + + def lookup(locale, key, scope = [], options = EMPTY_HASH) + key = normalize_flat_keys(locale, key, scope, options[:separator]) + value = @store["#{locale}.#{key}"] + value = JSON.decode(value) if value + + if value.is_a?(Hash) + value.deep_symbolize_keys + elsif !value.nil? + value + elsif !@subtrees + SubtreeProxy.new("#{locale}.#{key}", @store) + end + end + + def pluralize(locale, entry, count) + if subtrees? + super + else + return entry unless entry.is_a?(Hash) + key = pluralization_key(entry, count) + entry[key] + end + end + end + + class SubtreeProxy + def initialize(master_key, store) + @master_key = master_key + @store = store + @subtree = nil + end + + def has_key?(key) + @subtree && @subtree.has_key?(key) || self[key] + end + + def [](key) + unless @subtree && value = @subtree[key] + value = @store["#{@master_key}.#{key}"] + if value + value = JSON.decode(value) + (@subtree ||= {})[key] = value + end + end + value + end + + def is_a?(klass) + Hash == klass || super + end + alias :kind_of? :is_a? + + def instance_of?(klass) + Hash == klass || super + end + + def nil? + @subtree.nil? + end + + def inspect + @subtree.inspect + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb new file mode 100644 index 0000000..3293d2b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/memoize.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +# Memoize module simply memoizes the values returned by lookup using +# a flat hash and can tremendously speed up the lookup process in a backend. +# +# To enable it you can simply include the Memoize module to your backend: +# +# I18n::Backend::Simple.include(I18n::Backend::Memoize) +# +# Notice that it's the responsibility of the backend to define whenever the +# cache should be cleaned. +module I18n + module Backend + module Memoize + def available_locales + @memoized_locales ||= super + end + + def store_translations(locale, data, options = EMPTY_HASH) + reset_memoizations!(locale) + super + end + + def reload! + reset_memoizations! + super + end + + def eager_load! + memoized_lookup + available_locales + super + end + + protected + + def lookup(locale, key, scope = nil, options = EMPTY_HASH) + flat_key = I18n::Backend::Flatten.normalize_flat_keys(locale, + key, scope, options[:separator]).to_sym + flat_hash = memoized_lookup[locale.to_sym] + flat_hash.key?(flat_key) ? flat_hash[flat_key] : (flat_hash[flat_key] = super) + end + + def memoized_lookup + @memoized_lookup ||= I18n.new_double_nested_cache + end + + def reset_memoizations!(locale=nil) + @memoized_locales = nil + (locale ? memoized_lookup[locale.to_sym] : memoized_lookup).clear + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb new file mode 100644 index 0000000..51ea7a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/metadata.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +# I18n translation metadata is useful when you want to access information +# about how a translation was looked up, pluralized or interpolated in +# your application. +# +# msg = I18n.t(:message, :default => 'Hi!', :scope => :foo) +# msg.translation_metadata +# # => { :key => :message, :scope => :foo, :default => 'Hi!' } +# +# If a :count option was passed to #translate it will be set to the metadata. +# Likewise, if any interpolation variables were passed they will also be set. +# +# To enable translation metadata you can simply include the Metadata module +# into the Simple backend class - or whatever other backend you are using: +# +# I18n::Backend::Simple.include(I18n::Backend::Metadata) +# +module I18n + module Backend + module Metadata + class << self + def included(base) + Object.class_eval do + def translation_metadata + unless self.frozen? + @translation_metadata ||= {} + else + {} + end + end + + def translation_metadata=(translation_metadata) + @translation_metadata = translation_metadata unless self.frozen? + end + end unless Object.method_defined?(:translation_metadata) + end + end + + def translate(locale, key, options = EMPTY_HASH) + metadata = { + :locale => locale, + :key => key, + :scope => options[:scope], + :default => options[:default], + :separator => options[:separator], + :values => options.reject { |name, _value| RESERVED_KEYS.include?(name) } + } + with_metadata(metadata) { super } + end + + def interpolate(locale, entry, values = EMPTY_HASH) + metadata = entry.translation_metadata.merge(:original => entry) + with_metadata(metadata) { super } + end + + def pluralize(locale, entry, count) + with_metadata(:count => count) { super } + end + + protected + + def with_metadata(metadata, &block) + result = yield + result.translation_metadata = result.translation_metadata.merge(metadata) if result + result + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb new file mode 100644 index 0000000..b602657 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/pluralization.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +# I18n Pluralization are useful when you want your application to +# customize pluralization rules. +# +# To enable locale specific pluralizations you can simply include the +# Pluralization module to the Simple backend - or whatever other backend you +# are using. +# +# I18n::Backend::Simple.include(I18n::Backend::Pluralization) +# +# You also need to make sure to provide pluralization algorithms to the +# backend, i.e. include them to your I18n.load_path accordingly. +module I18n + module Backend + module Pluralization + # Overwrites the Base backend translate method so that it will check the + # translation meta data space (:i18n) for a locale specific pluralization + # rule and use it to pluralize the given entry. I.e. the library expects + # pluralization rules to be stored at I18n.t(:'i18n.plural.rule') + # + # Pluralization rules are expected to respond to #call(count) and + # return a pluralization key. Valid keys depend on the translation data + # hash (entry) but it is generally recommended to follow CLDR's style, + # i.e., return one of the keys :zero, :one, :few, :many, :other. + # + # The :zero key is always picked directly when count equals 0 AND the + # translation data has the key :zero. This way translators are free to + # either pick a special :zero translation even for languages where the + # pluralizer does not return a :zero key. + def pluralize(locale, entry, count) + return entry unless entry.is_a?(Hash) && count + + pluralizer = pluralizer(locale) + if pluralizer.respond_to?(:call) + key = count == 0 && entry.has_key?(:zero) ? :zero : pluralizer.call(count) + raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) + entry[key] + else + super + end + end + + protected + + def pluralizers + @pluralizers ||= {} + end + + def pluralizer(locale) + pluralizers[locale] ||= I18n.t(:'i18n.plural.rule', :locale => locale, :resolve => false) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb new file mode 100644 index 0000000..ed74146 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/simple.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require 'i18n/backend/base' + +module I18n + module Backend + # A simple backend that reads translations from YAML files and stores them in + # an in-memory hash. Relies on the Base backend. + # + # The implementation is provided by a Implementation module allowing to easily + # extend Simple backend's behavior by including modules. E.g.: + # + # module I18n::Backend::Pluralization + # def pluralize(*args) + # # extended pluralization logic + # super + # end + # end + # + # I18n::Backend::Simple.include(I18n::Backend::Pluralization) + class Simple + using I18n::HashRefinements + + module Implementation + include Base + + def initialized? + @initialized ||= false + end + + # Stores translations for the given locale in memory. + # This uses a deep merge for the translations hash, so existing + # translations will be overwritten by new ones only at the deepest + # level of the hash. + def store_translations(locale, data, options = EMPTY_HASH) + if I18n.enforce_available_locales && + I18n.available_locales_initialized? && + !I18n.available_locales.include?(locale.to_sym) && + !I18n.available_locales.include?(locale.to_s) + return data + end + locale = locale.to_sym + translations[locale] ||= {} + data = data.deep_symbolize_keys + translations[locale].deep_merge!(data) + end + + # Get available locales from the translations hash + def available_locales + init_translations unless initialized? + translations.inject([]) do |locales, (locale, data)| + locales << locale unless data.size <= 1 && (data.empty? || data.has_key?(:i18n)) + locales + end + end + + # Clean up translations hash and set initialized to false on reload! + def reload! + @initialized = false + @translations = nil + super + end + + def eager_load! + init_translations unless initialized? + super + end + + def translations(do_init: false) + # To avoid returning empty translations, + # call `init_translations` + init_translations if do_init && !initialized? + + @translations ||= {} + end + + protected + + def init_translations + load_translations + @initialized = true + end + + # Looks up a translation from the translations hash. Returns nil if + # either key is nil, or locale, scope or key do not exist as a key in the + # nested translations hash. Splits keys or scopes containing dots + # into multiple keys, i.e. currency.format is regarded the same as + # %w(currency format). + def lookup(locale, key, scope = [], options = EMPTY_HASH) + init_translations unless initialized? + keys = I18n.normalize_keys(locale, key, scope, options[:separator]) + + keys.inject(translations) do |result, _key| + return nil unless result.is_a?(Hash) + unless result.has_key?(_key) + _key = _key.to_s.to_sym + return nil unless result.has_key?(_key) + end + result = result[_key] + result = resolve(locale, _key, result, options.merge(:scope => nil)) if result.is_a?(Symbol) + result + end + end + end + + include Implementation + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb new file mode 100644 index 0000000..bb704ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/backend/transliterator.rb @@ -0,0 +1,108 @@ +# encoding: utf-8 +# frozen_string_literal: true + +module I18n + module Backend + module Transliterator + DEFAULT_REPLACEMENT_CHAR = "?" + + # Given a locale and a UTF-8 string, return the locale's ASCII + # approximation for the string. + def transliterate(locale, string, replacement = nil) + @transliterators ||= {} + @transliterators[locale] ||= Transliterator.get I18n.t(:'i18n.transliterate.rule', + :locale => locale, :resolve => false, :default => {}) + @transliterators[locale].transliterate(string, replacement) + end + + # Get a transliterator instance. + def self.get(rule = nil) + if !rule || rule.kind_of?(Hash) + HashTransliterator.new(rule) + elsif rule.kind_of? Proc + ProcTransliterator.new(rule) + else + raise I18n::ArgumentError, "Transliteration rule must be a proc or a hash." + end + end + + # A transliterator which accepts a Proc as its transliteration rule. + class ProcTransliterator + def initialize(rule) + @rule = rule + end + + def transliterate(string, replacement = nil) + @rule.call(string) + end + end + + # A transliterator which accepts a Hash of characters as its translation + # rule. + class HashTransliterator + DEFAULT_APPROXIMATIONS = { + "À"=>"A", "Á"=>"A", "Â"=>"A", "Ã"=>"A", "Ä"=>"A", "Å"=>"A", "Æ"=>"AE", + "Ç"=>"C", "È"=>"E", "É"=>"E", "Ê"=>"E", "Ë"=>"E", "Ì"=>"I", "Í"=>"I", + "Î"=>"I", "Ï"=>"I", "Ð"=>"D", "Ñ"=>"N", "Ò"=>"O", "Ó"=>"O", "Ô"=>"O", + "Õ"=>"O", "Ö"=>"O", "×"=>"x", "Ø"=>"O", "Ù"=>"U", "Ú"=>"U", "Û"=>"U", + "Ü"=>"U", "Ý"=>"Y", "Þ"=>"Th", "ß"=>"ss", "à"=>"a", "á"=>"a", "â"=>"a", + "ã"=>"a", "ä"=>"a", "å"=>"a", "æ"=>"ae", "ç"=>"c", "è"=>"e", "é"=>"e", + "ê"=>"e", "ë"=>"e", "ì"=>"i", "í"=>"i", "î"=>"i", "ï"=>"i", "ð"=>"d", + "ñ"=>"n", "ò"=>"o", "ó"=>"o", "ô"=>"o", "õ"=>"o", "ö"=>"o", "ø"=>"o", + "ù"=>"u", "ú"=>"u", "û"=>"u", "ü"=>"u", "ý"=>"y", "þ"=>"th", "ÿ"=>"y", + "Ā"=>"A", "ā"=>"a", "Ă"=>"A", "ă"=>"a", "Ą"=>"A", "ą"=>"a", "Ć"=>"C", + "ć"=>"c", "Ĉ"=>"C", "ĉ"=>"c", "Ċ"=>"C", "ċ"=>"c", "Č"=>"C", "č"=>"c", + "Ď"=>"D", "ď"=>"d", "Đ"=>"D", "đ"=>"d", "Ē"=>"E", "ē"=>"e", "Ĕ"=>"E", + "ĕ"=>"e", "Ė"=>"E", "ė"=>"e", "Ę"=>"E", "ę"=>"e", "Ě"=>"E", "ě"=>"e", + "Ĝ"=>"G", "ĝ"=>"g", "Ğ"=>"G", "ğ"=>"g", "Ġ"=>"G", "ġ"=>"g", "Ģ"=>"G", + "ģ"=>"g", "Ĥ"=>"H", "ĥ"=>"h", "Ħ"=>"H", "ħ"=>"h", "Ĩ"=>"I", "ĩ"=>"i", + "Ī"=>"I", "ī"=>"i", "Ĭ"=>"I", "ĭ"=>"i", "Į"=>"I", "į"=>"i", "İ"=>"I", + "ı"=>"i", "IJ"=>"IJ", "ij"=>"ij", "Ĵ"=>"J", "ĵ"=>"j", "Ķ"=>"K", "ķ"=>"k", + "ĸ"=>"k", "Ĺ"=>"L", "ĺ"=>"l", "Ļ"=>"L", "ļ"=>"l", "Ľ"=>"L", "ľ"=>"l", + "Ŀ"=>"L", "ŀ"=>"l", "Ł"=>"L", "ł"=>"l", "Ń"=>"N", "ń"=>"n", "Ņ"=>"N", + "ņ"=>"n", "Ň"=>"N", "ň"=>"n", "ʼn"=>"'n", "Ŋ"=>"NG", "ŋ"=>"ng", + "Ō"=>"O", "ō"=>"o", "Ŏ"=>"O", "ŏ"=>"o", "Ő"=>"O", "ő"=>"o", "Œ"=>"OE", + "œ"=>"oe", "Ŕ"=>"R", "ŕ"=>"r", "Ŗ"=>"R", "ŗ"=>"r", "Ř"=>"R", "ř"=>"r", + "Ś"=>"S", "ś"=>"s", "Ŝ"=>"S", "ŝ"=>"s", "Ş"=>"S", "ş"=>"s", "Š"=>"S", + "š"=>"s", "Ţ"=>"T", "ţ"=>"t", "Ť"=>"T", "ť"=>"t", "Ŧ"=>"T", "ŧ"=>"t", + "Ũ"=>"U", "ũ"=>"u", "Ū"=>"U", "ū"=>"u", "Ŭ"=>"U", "ŭ"=>"u", "Ů"=>"U", + "ů"=>"u", "Ű"=>"U", "ű"=>"u", "Ų"=>"U", "ų"=>"u", "Ŵ"=>"W", "ŵ"=>"w", + "Ŷ"=>"Y", "ŷ"=>"y", "Ÿ"=>"Y", "Ź"=>"Z", "ź"=>"z", "Ż"=>"Z", "ż"=>"z", + "Ž"=>"Z", "ž"=>"z" + }.freeze + + def initialize(rule = nil) + @rule = rule + add_default_approximations + add rule if rule + end + + def transliterate(string, replacement = nil) + replacement ||= DEFAULT_REPLACEMENT_CHAR + string.gsub(/[^\x00-\x7f]/u) do |char| + approximations[char] || replacement + end + end + + private + + def approximations + @approximations ||= {} + end + + def add_default_approximations + DEFAULT_APPROXIMATIONS.each do |key, value| + approximations[key] = value + end + end + + # Add transliteration rules to the approximations hash. + def add(hash) + hash.each do |key, value| + approximations[key.to_s] = value.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/config.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/config.rb new file mode 100644 index 0000000..ea3dd1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/config.rb @@ -0,0 +1,165 @@ +# frozen_string_literal: true + +require 'set' + +module I18n + class Config + # The only configuration value that is not global and scoped to thread is :locale. + # It defaults to the default_locale. + def locale + defined?(@locale) && @locale != nil ? @locale : default_locale + end + + # Sets the current locale pseudo-globally, i.e. in the Thread.current hash. + def locale=(locale) + I18n.enforce_available_locales!(locale) + @locale = locale && locale.to_sym + end + + # Returns the current backend. Defaults to +Backend::Simple+. + def backend + @@backend ||= Backend::Simple.new + end + + # Sets the current backend. Used to set a custom backend. + def backend=(backend) + @@backend = backend + end + + # Returns the current default locale. Defaults to :'en' + def default_locale + @@default_locale ||= :en + end + + # Sets the current default locale. Used to set a custom default locale. + def default_locale=(locale) + I18n.enforce_available_locales!(locale) + @@default_locale = locale && locale.to_sym + end + + # Returns an array of locales for which translations are available. + # Unless you explicitely set these through I18n.available_locales= + # the call will be delegated to the backend. + def available_locales + @@available_locales ||= nil + @@available_locales || backend.available_locales + end + + # Caches the available locales list as both strings and symbols in a Set, so + # that we can have faster lookups to do the available locales enforce check. + def available_locales_set #:nodoc: + @@available_locales_set ||= available_locales.inject(Set.new) do |set, locale| + set << locale.to_s << locale.to_sym + end + end + + # Sets the available locales. + def available_locales=(locales) + @@available_locales = Array(locales).map { |locale| locale.to_sym } + @@available_locales = nil if @@available_locales.empty? + @@available_locales_set = nil + end + + # Returns true if the available_locales have been initialized + def available_locales_initialized? + ( !!defined?(@@available_locales) && !!@@available_locales ) + end + + # Clears the available locales set so it can be recomputed again after I18n + # gets reloaded. + def clear_available_locales_set #:nodoc: + @@available_locales_set = nil + end + + # Returns the current default scope separator. Defaults to '.' + def default_separator + @@default_separator ||= '.' + end + + # Sets the current default scope separator. + def default_separator=(separator) + @@default_separator = separator + end + + # Returns the current exception handler. Defaults to an instance of + # I18n::ExceptionHandler. + def exception_handler + @@exception_handler ||= ExceptionHandler.new + end + + # Sets the exception handler. + def exception_handler=(exception_handler) + @@exception_handler = exception_handler + end + + # Returns the current handler for situations when interpolation argument + # is missing. MissingInterpolationArgument will be raised by default. + def missing_interpolation_argument_handler + @@missing_interpolation_argument_handler ||= lambda do |missing_key, provided_hash, string| + raise MissingInterpolationArgument.new(missing_key, provided_hash, string) + end + end + + # Sets the missing interpolation argument handler. It can be any + # object that responds to #call. The arguments that will be passed to #call + # are the same as for MissingInterpolationArgument initializer. Use +Proc.new+ + # if you don't care about arity. + # + # == Example: + # You can supress raising an exception and return string instead: + # + # I18n.config.missing_interpolation_argument_handler = Proc.new do |key| + # "#{key} is missing" + # end + def missing_interpolation_argument_handler=(exception_handler) + @@missing_interpolation_argument_handler = exception_handler + end + + # Allow clients to register paths providing translation data sources. The + # backend defines acceptable sources. + # + # E.g. the provided SimpleBackend accepts a list of paths to translation + # files which are either named *.rb and contain plain Ruby Hashes or are + # named *.yml and contain YAML data. So for the SimpleBackend clients may + # register translation files like this: + # I18n.load_path << 'path/to/locale/en.yml' + def load_path + @@load_path ||= [] + end + + # Sets the load path instance. Custom implementations are expected to + # behave like a Ruby Array. + def load_path=(load_path) + @@load_path = load_path + @@available_locales_set = nil + backend.reload! + end + + # Whether or not to verify if locales are in the list of available locales. + # Defaults to true. + @@enforce_available_locales = true + def enforce_available_locales + @@enforce_available_locales + end + + def enforce_available_locales=(enforce_available_locales) + @@enforce_available_locales = enforce_available_locales + end + + # Returns the current interpolation patterns. Defaults to + # I18n::DEFAULT_INTERPOLATION_PATTERNS. + def interpolation_patterns + @@interpolation_patterns ||= I18n::DEFAULT_INTERPOLATION_PATTERNS.dup + end + + # Sets the current interpolation patterns. Used to set a interpolation + # patterns. + # + # E.g. using {{}} as a placeholder like "{{hello}}, world!": + # + # I18n.config.interpolation_patterns << /\{\{(\w+)\}\}/ + def interpolation_patterns=(interpolation_patterns) + @@interpolation_patterns = interpolation_patterns + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb new file mode 100644 index 0000000..775d490 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/core_ext/hash.rb @@ -0,0 +1,59 @@ +module I18n + module HashRefinements + refine Hash do + using I18n::HashRefinements + def except(*keys) + dup.except!(*keys) + end + + def except!(*keys) + keys.each { |key| delete(key) } + self + end + + def deep_symbolize_keys + each_with_object({}) do |(key, value), result| + result[symbolize_key(key)] = deep_symbolize_keys_in_object(value) + result + end + end + + # deep_merge from activesupport 5 + # Copyright (c) 2005-2019 David Heinemeier Hansson + def deep_merge(other_hash, &block) + dup.deep_merge!(other_hash, &block) + end + + # deep_merge! from activesupport 5 + # Copyright (c) 2005-2019 David Heinemeier Hansson + def deep_merge!(other_hash, &block) + merge!(other_hash) do |key, this_val, other_val| + if this_val.is_a?(Hash) && other_val.is_a?(Hash) + this_val.deep_merge(other_val, &block) + elsif block_given? + block.call(key, this_val, other_val) + else + other_val + end + end + end + + def symbolize_key(key) + key.respond_to?(:to_sym) ? key.to_sym : key + end + + private + + def deep_symbolize_keys_in_object(value) + case value + when Hash + value.deep_symbolize_keys + when Array + value.map { |e| deep_symbolize_keys_in_object(e) } + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb new file mode 100644 index 0000000..74b3d34 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/exceptions.rb @@ -0,0 +1,111 @@ +# frozen_string_literal: true + +require 'cgi' + +module I18n + class ExceptionHandler + def call(exception, _locale, _key, _options) + if exception.is_a?(MissingTranslation) + exception.message + else + raise exception + end + end + end + + class ArgumentError < ::ArgumentError; end + + class Disabled < ArgumentError + def initialize(method) + super(<<~MESSAGE) + I18n.#{method} is currently disabled, likely because your application is still in its loading phase. + + This method is meant to display text in the user locale, so calling it before the user locale has + been set is likely to display text from the wrong locale to some users. + + If you have a legitimate reason to access i18n data outside of the user flow, you can do so by passing + the desired locale explictly with the `locale` argument, e.g. `I18n.#{method}(..., locale: :en)` + MESSAGE + end + end + + class InvalidLocale < ArgumentError + attr_reader :locale + def initialize(locale) + @locale = locale + super "#{locale.inspect} is not a valid locale" + end + end + + class InvalidLocaleData < ArgumentError + attr_reader :filename + def initialize(filename, exception_message) + @filename, @exception_message = filename, exception_message + super "can not load translations from #{filename}: #{exception_message}" + end + end + + class MissingTranslation < ArgumentError + module Base + attr_reader :locale, :key, :options + + def initialize(locale, key, options = EMPTY_HASH) + @key, @locale, @options = key, locale, options.dup + options.each { |k, v| self.options[k] = v.inspect if v.is_a?(Proc) } + end + + def keys + @keys ||= I18n.normalize_keys(locale, key, options[:scope]).tap do |keys| + keys << 'no key' if keys.size < 2 + end + end + + def message + "translation missing: #{keys.join('.')}" + end + alias :to_s :message + + def to_exception + MissingTranslationData.new(locale, key, options) + end + end + + include Base + end + + class MissingTranslationData < ArgumentError + include MissingTranslation::Base + end + + class InvalidPluralizationData < ArgumentError + attr_reader :entry, :count, :key + def initialize(entry, count, key) + @entry, @count, @key = entry, count, key + super "translation data #{entry.inspect} can not be used with :count => #{count}. key '#{key}' is missing." + end + end + + class MissingInterpolationArgument < ArgumentError + attr_reader :key, :values, :string + def initialize(key, values, string) + @key, @values, @string = key, values, string + super "missing interpolation argument #{key.inspect} in #{string.inspect} (#{values.inspect} given)" + end + end + + class ReservedInterpolationKey < ArgumentError + attr_reader :key, :string + def initialize(key, string) + @key, @string = key, string + super "reserved key #{key.inspect} used in #{string.inspect}" + end + end + + class UnknownFileType < ArgumentError + attr_reader :type, :filename + def initialize(type, filename) + @type, @filename = type, filename + super "can not load translations from #{filename}, the file type #{type} is not known" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext.rb new file mode 100644 index 0000000..858daff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module I18n + module Gettext + PLURAL_SEPARATOR = "\001" + CONTEXT_SEPARATOR = "\004" + + autoload :Helpers, 'i18n/gettext/helpers' + + @@plural_keys = { :en => [:one, :other] } + + class << self + # returns an array of plural keys for the given locale or the whole hash + # of locale mappings to plural keys so that we can convert from gettext's + # integer-index based style + # TODO move this information to the pluralization module + def plural_keys(*args) + args.empty? ? @@plural_keys : @@plural_keys[args.first] || @@plural_keys[:en] + end + + def extract_scope(msgid, separator) + scope = msgid.to_s.split(separator) + msgid = scope.pop + [scope, msgid] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb new file mode 100644 index 0000000..d077619 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/helpers.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require 'i18n/gettext' + +module I18n + module Gettext + # Implements classical Gettext style accessors. To use this include the + # module to the global namespace or wherever you want to use it. + # + # include I18n::Gettext::Helpers + module Helpers + # Makes dynamic translation messages readable for the gettext parser. + # _(fruit) cannot be understood by the gettext parser. To help the parser find all your translations, + # you can add fruit = N_("Apple") which does not translate, but tells the parser: "Apple" needs translation. + # * msgid: the message id. + # * Returns: msgid. + def N_(msgsid) + msgsid + end + + def gettext(msgid, options = EMPTY_HASH) + I18n.t(msgid, **{:default => msgid, :separator => '|'}.merge(options)) + end + alias _ gettext + + def sgettext(msgid, separator = '|') + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + I18n.t(msgid, :scope => scope, :default => msgid, :separator => separator) + end + alias s_ sgettext + + def pgettext(msgctxt, msgid) + separator = I18n::Gettext::CONTEXT_SEPARATOR + sgettext([msgctxt, msgid].join(separator), separator) + end + alias p_ pgettext + + def ngettext(msgid, msgid_plural, n = 1) + nsgettext(msgid, msgid_plural, n) + end + alias n_ ngettext + + # Method signatures: + # nsgettext('Fruits|apple', 'apples', 2) + # nsgettext(['Fruits|apple', 'apples'], 2) + def nsgettext(msgid, msgid_plural, n = 1, separator = '|') + if msgid.is_a?(Array) + msgid, msgid_plural, n, separator = msgid[0], msgid[1], msgid_plural, n + separator = '|' unless separator.is_a?(::String) + end + + scope, msgid = I18n::Gettext.extract_scope(msgid, separator) + default = { :one => msgid, :other => msgid_plural } + I18n.t(msgid, :default => default, :count => n, :scope => scope, :separator => separator) + end + alias ns_ nsgettext + + # Method signatures: + # npgettext('Fruits', 'apple', 'apples', 2) + # npgettext('Fruits', ['apple', 'apples'], 2) + def npgettext(msgctxt, msgid, msgid_plural, n = 1) + separator = I18n::Gettext::CONTEXT_SEPARATOR + + if msgid.is_a?(Array) + msgid_plural, msgid, n = msgid[1], [msgctxt, msgid[0]].join(separator), msgid_plural + else + msgid = [msgctxt, msgid].join(separator) + end + + nsgettext(msgid, msgid_plural, n, separator) + end + alias np_ npgettext + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb new file mode 100644 index 0000000..a07fdc5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/gettext/po_parser.rb @@ -0,0 +1,329 @@ +=begin + poparser.rb - Generate a .mo + + Copyright (C) 2003-2009 Masao Mutoh + + You may redistribute it and/or modify it under the same + license terms as Ruby. +=end + +#MODIFIED +# removed include GetText etc +# added stub translation method _(x) +require 'racc/parser' + +module GetText + + class PoParser < Racc::Parser + + def _(x) + x + end + +module_eval <<'..end src/poparser.ry modeval..id7a99570e05', 'src/poparser.ry', 108 + def unescape(orig) + ret = orig.gsub(/\\n/, "\n") + ret.gsub!(/\\t/, "\t") + ret.gsub!(/\\r/, "\r") + ret.gsub!(/\\"/, "\"") + ret + end + + def parse(str, data, ignore_fuzzy = true) + @comments = [] + @data = data + @fuzzy = false + @msgctxt = "" + $ignore_fuzzy = ignore_fuzzy + + str.strip! + @q = [] + until str.empty? do + case str + when /\A\s+/ + str = $' + when /\Amsgctxt/ + @q.push [:MSGCTXT, $&] + str = $' + when /\Amsgid_plural/ + @q.push [:MSGID_PLURAL, $&] + str = $' + when /\Amsgid/ + @q.push [:MSGID, $&] + str = $' + when /\Amsgstr/ + @q.push [:MSGSTR, $&] + str = $' + when /\A\[(\d+)\]/ + @q.push [:PLURAL_NUM, $1] + str = $' + when /\A\#~(.*)/ + $stderr.print _("Warning: obsolete msgid exists.\n") + $stderr.print " #{$&}\n" + @q.push [:COMMENT, $&] + str = $' + when /\A\#(.*)/ + @q.push [:COMMENT, $&] + str = $' + when /\A\"(.*)\"/ + @q.push [:STRING, $1] + str = $' + else + #c = str[0,1] + #@q.push [:STRING, c] + str = str[1..-1] + end + end + @q.push [false, '$end'] + if $DEBUG + @q.each do |a,b| + puts "[#{a}, #{b}]" + end + end + @yydebug = true if $DEBUG + do_parse + + if @comments.size > 0 + @data.set_comment(:last, @comments.join("\n")) + end + @data + end + + def next_token + @q.shift + end + + def on_message(msgid, msgstr) + if msgstr.size > 0 + @data[msgid] = msgstr + @data.set_comment(msgid, @comments.join("\n")) + end + @comments.clear + @msgctxt = "" + end + + def on_comment(comment) + @fuzzy = true if (/fuzzy/ =~ comment) + @comments << comment + end + + +..end src/poparser.ry modeval..id7a99570e05 + +##### racc 1.4.5 generates ### + +racc_reduce_table = [ + 0, 0, :racc_error, + 0, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 10, :_reduce_none, + 2, 12, :_reduce_5, + 1, 13, :_reduce_none, + 1, 13, :_reduce_none, + 4, 15, :_reduce_8, + 5, 16, :_reduce_9, + 2, 17, :_reduce_10, + 1, 17, :_reduce_none, + 3, 18, :_reduce_12, + 1, 11, :_reduce_13, + 2, 14, :_reduce_14, + 1, 14, :_reduce_15 ] + +racc_reduce_n = 16 + +racc_shift_n = 26 + +racc_action_table = [ + 3, 13, 5, 7, 9, 15, 16, 17, 20, 17, + 13, 17, 13, 13, 11, 17, 23, 20, 13, 17 ] + +racc_action_check = [ + 1, 16, 1, 1, 1, 12, 12, 12, 18, 18, + 7, 14, 15, 9, 3, 19, 20, 21, 23, 25 ] + +racc_action_pointer = [ + nil, 0, nil, 14, nil, nil, nil, 3, nil, 6, + nil, nil, 0, nil, 4, 5, -6, nil, 2, 8, + 8, 11, nil, 11, nil, 12 ] + +racc_action_default = [ + -1, -16, -2, -16, -3, -13, -4, -16, -6, -16, + -7, 26, -16, -15, -5, -16, -16, -14, -16, -8, + -16, -9, -11, -16, -10, -12 ] + +racc_goto_table = [ + 12, 22, 14, 4, 24, 6, 2, 8, 18, 19, + 10, 21, 1, nil, nil, nil, 25 ] + +racc_goto_check = [ + 5, 9, 5, 3, 9, 4, 2, 6, 5, 5, + 7, 8, 1, nil, nil, nil, 5 ] + +racc_goto_pointer = [ + nil, 12, 5, 2, 4, -7, 6, 9, -7, -17 ] + +racc_goto_default = [ + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil ] + +racc_token_table = { + false => 0, + Object.new => 1, + :COMMENT => 2, + :MSGID => 3, + :MSGCTXT => 4, + :MSGID_PLURAL => 5, + :MSGSTR => 6, + :STRING => 7, + :PLURAL_NUM => 8 } + +racc_use_result_var = true + +racc_nt_base = 9 + +Racc_arg = [ + racc_action_table, + racc_action_check, + racc_action_default, + racc_action_pointer, + racc_goto_table, + racc_goto_check, + racc_goto_default, + racc_goto_pointer, + racc_nt_base, + racc_reduce_table, + racc_token_table, + racc_shift_n, + racc_reduce_n, + racc_use_result_var ] + +Racc_token_to_s_table = [ +'$end', +'error', +'COMMENT', +'MSGID', +'MSGCTXT', +'MSGID_PLURAL', +'MSGSTR', +'STRING', +'PLURAL_NUM', +'$start', +'msgfmt', +'comment', +'msgctxt', +'message', +'string_list', +'single_message', +'plural_message', +'msgstr_plural', +'msgstr_plural_line'] + +Racc_debug_parser = true + +##### racc system variables end ##### + + # reduce 0 omitted + + # reduce 1 omitted + + # reduce 2 omitted + + # reduce 3 omitted + + # reduce 4 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 25 + def _reduce_5( val, _values, result ) + @msgctxt = unescape(val[1]) + "\004" + result + end +.,., + + # reduce 6 omitted + + # reduce 7 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 48 + def _reduce_8( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print " msgid '#{val[1]}'\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]), unescape(val[3])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 65 + def _reduce_9( val, _values, result ) + if @fuzzy and $ignore_fuzzy + if val[1] != "" + $stderr.print _("Warning: fuzzy message was ignored.\n") + $stderr.print "msgid = '#{val[1]}\n" + else + on_message('', unescape(val[3])) + end + @fuzzy = false + else + on_message(@msgctxt + unescape(val[1]) + "\000" + unescape(val[3]), unescape(val[4])) + end + result = "" + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 76 + def _reduce_10( val, _values, result ) + if val[0].size > 0 + result = val[0] + "\000" + val[1] + else + result = "" + end + result + end +.,., + + # reduce 11 omitted + +module_eval <<'.,.,', 'src/poparser.ry', 84 + def _reduce_12( val, _values, result ) + result = val[2] + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 91 + def _reduce_13( val, _values, result ) + on_comment(val[0]) + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 99 + def _reduce_14( val, _values, result ) + result = val.delete_if{|item| item == ""}.join + result + end +.,., + +module_eval <<'.,.,', 'src/poparser.ry', 103 + def _reduce_15( val, _values, result ) + result = val[0] + result + end +.,., + + def _reduce_none( val, _values, result ) + result + end + + end # class PoParser + +end # module GetText diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb new file mode 100644 index 0000000..13171d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/interpolate/ruby.rb @@ -0,0 +1,39 @@ +# heavily based on Masao Mutoh's gettext String interpolation extension +# http://github.com/mutoh/gettext/blob/f6566738b981fe0952548c421042ad1e0cdfb31e/lib/gettext/core_ext/string.rb + +module I18n + DEFAULT_INTERPOLATION_PATTERNS = [ + /%%/, + /%\{([\w|]+)\}/, # matches placeholders like "%{foo} or %{foo|word}" + /%<(\w+)>(.*?\d*\.?\d*[bBdiouxXeEfgGcps])/ # matches placeholders like "%.d" + ].freeze + INTERPOLATION_PATTERN = Regexp.union(DEFAULT_INTERPOLATION_PATTERNS) + deprecate_constant :INTERPOLATION_PATTERN + + class << self + # Return String or raises MissingInterpolationArgument exception. + # Missing argument's logic is handled by I18n.config.missing_interpolation_argument_handler. + def interpolate(string, values) + raise ReservedInterpolationKey.new($1.to_sym, string) if string =~ RESERVED_KEYS_PATTERN + raise ArgumentError.new('Interpolation values must be a Hash.') unless values.kind_of?(Hash) + interpolate_hash(string, values) + end + + def interpolate_hash(string, values) + string.gsub(Regexp.union(config.interpolation_patterns)) do |match| + if match == '%%' + '%' + else + key = ($1 || $2 || match.tr("%{}", "")).to_sym + value = if values.key?(key) + values[key] + else + config.missing_interpolation_argument_handler.call(key, values, string) + end + value = value.call(values) if value.respond_to?(:call) + $3 ? sprintf("%#{$3}", value) : value + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale.rb new file mode 100644 index 0000000..c4078e6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +module I18n + module Locale + autoload :Fallbacks, 'i18n/locale/fallbacks' + autoload :Tag, 'i18n/locale/tag' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb new file mode 100644 index 0000000..94909aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/fallbacks.rb @@ -0,0 +1,99 @@ +# Locale Fallbacks +# +# Extends the I18n module to hold a fallbacks instance which is set to an +# instance of I18n::Locale::Fallbacks by default but can be swapped with a +# different implementation. +# +# Locale fallbacks will compute a number of fallback locales for a given locale. +# For example: +# +#

+# I18n.fallbacks[:"es-MX"] # => [:"es-MX", :es, :en] 
+# +# Locale fallbacks always fall back to +# +# * all parent locales of a given locale (e.g. :es for :"es-MX") first, +# * the current default locales and all of their parents second +# +# The default locales are set to [I18n.default_locale] by default but can be +# set to something else. +# +# One can additionally add any number of additional fallback locales manually. +# These will be added before the default locales to the fallback chain. For +# example: +# +# # using the default locale as default fallback locale +# +# I18n.default_locale = :"en-US" +# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"de-AT" => :"de-DE") +# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"de-DE"] +# +# # using a custom locale as default fallback locale +# +# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"en-GB", :"de-AT" => :de, :"de-CH" => :de) +# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"en-GB", :en] +# I18n.fallbacks[:"de-CH"] # => [:"de-CH", :de, :"en-GB", :en] +# +# # mapping fallbacks to an existing instance +# +# # people speaking Catalan also speak Spanish as spoken in Spain +# fallbacks = I18n.fallbacks +# fallbacks.map(:ca => :"es-ES") +# fallbacks[:ca] # => [:ca, :"es-ES", :es, :"en-US", :en] +# +# # people speaking Arabian as spoken in Palestine also speak Hebrew as spoken in Israel +# fallbacks.map(:"ar-PS" => :"he-IL") +# fallbacks[:"ar-PS"] # => [:"ar-PS", :ar, :"he-IL", :he, :"en-US", :en] +# fallbacks[:"ar-EG"] # => [:"ar-EG", :ar, :"en-US", :en] +# +# # people speaking Sami as spoken in Finnland also speak Swedish and Finnish as spoken in Finnland +# fallbacks.map(:sms => [:"se-FI", :"fi-FI"]) +# fallbacks[:sms] # => [:sms, :"se-FI", :se, :"fi-FI", :fi, :"en-US", :en] + +module I18n + module Locale + class Fallbacks < Hash + def initialize(*mappings) + @map = {} + map(mappings.pop) if mappings.last.is_a?(Hash) + self.defaults = mappings.empty? ? [] : mappings + end + + def defaults=(defaults) + @defaults = defaults.flat_map { |default| compute(default, false) } + end + attr_reader :defaults + + def [](locale) + raise InvalidLocale.new(locale) if locale.nil? + raise Disabled.new('fallback#[]') if locale == false + locale = locale.to_sym + super || store(locale, compute(locale)) + end + + def map(mappings) + mappings.each do |from, to| + from, to = from.to_sym, Array(to) + to.each do |_to| + @map[from] ||= [] + @map[from] << _to.to_sym + end + end + end + + protected + + def compute(tags, include_defaults = true, exclude = []) + result = Array(tags).flat_map do |tag| + tags = I18n::Locale::Tag.tag(tag).self_and_parents.map! { |t| t.to_sym } - exclude + tags.each { |_tag| tags += compute(@map[_tag], false, exclude + tags) if @map[_tag] } + tags + end + result.push(*defaults) if include_defaults + result.uniq! + result.compact! + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb new file mode 100644 index 0000000..a640b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag.rb @@ -0,0 +1,28 @@ +# encoding: utf-8 + +module I18n + module Locale + module Tag + autoload :Parents, 'i18n/locale/tag/parents' + autoload :Rfc4646, 'i18n/locale/tag/rfc4646' + autoload :Simple, 'i18n/locale/tag/simple' + + class << self + # Returns the current locale tag implementation. Defaults to +I18n::Locale::Tag::Simple+. + def implementation + @@implementation ||= Simple + end + + # Sets the current locale tag implementation. Use this to set a different locale tag implementation. + def implementation=(implementation) + @@implementation = implementation + end + + # Factory method for locale tags. Delegates to the current locale tag implementation. + def tag(tag) + implementation.tag(tag) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb new file mode 100644 index 0000000..6283e66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/parents.rb @@ -0,0 +1,24 @@ +module I18n + module Locale + module Tag + module Parents + def parent + @parent ||= + begin + segs = to_a + segs.compact! + segs.length > 1 ? self.class.tag(*segs[0..(segs.length - 2)].join('-')) : nil + end + end + + def self_and_parents + @self_and_parents ||= [self].concat parents + end + + def parents + @parents ||= parent ? [parent].concat(parent.parents) : [] + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb new file mode 100644 index 0000000..4ce4c75 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/rfc4646.rb @@ -0,0 +1,74 @@ +# RFC 4646/47 compliant Locale tag implementation that parses locale tags to +# subtags such as language, script, region, variant etc. +# +# For more information see by http://en.wikipedia.org/wiki/IETF_language_tag +# +# Rfc4646::Parser does not implement grandfathered tags. + +module I18n + module Locale + module Tag + RFC4646_SUBTAGS = [ :language, :script, :region, :variant, :extension, :privateuse, :grandfathered ] + RFC4646_FORMATS = { :language => :downcase, :script => :capitalize, :region => :upcase, :variant => :downcase } + + class Rfc4646 < Struct.new(*RFC4646_SUBTAGS) + class << self + # Parses the given tag and returns a Tag instance if it is valid. + # Returns false if the given tag is not valid according to RFC 4646. + def tag(tag) + matches = parser.match(tag) + new(*matches) if matches + end + + def parser + @@parser ||= Rfc4646::Parser + end + + def parser=(parser) + @@parser = parser + end + end + + include Parents + + RFC4646_FORMATS.each do |name, format| + define_method(name) { self[name].send(format) unless self[name].nil? } + end + + def to_sym + to_s.to_sym + end + + def to_s + @tag ||= to_a.compact.join("-") + end + + def to_a + members.collect { |attr| self.send(attr) } + end + + module Parser + PATTERN = %r{\A(?: + ([a-z]{2,3}(?:(?:-[a-z]{3}){0,3})?|[a-z]{4}|[a-z]{5,8}) # language + (?:-([a-z]{4}))? # script + (?:-([a-z]{2}|\d{3}))? # region + (?:-([0-9a-z]{5,8}|\d[0-9a-z]{3}))* # variant + (?:-([0-9a-wyz](?:-[0-9a-z]{2,8})+))* # extension + (?:-(x(?:-[0-9a-z]{1,8})+))?| # privateuse subtag + (x(?:-[0-9a-z]{1,8})+)| # privateuse tag + /* ([a-z]{1,3}(?:-[0-9a-z]{2,8}){1,2}) */ # grandfathered + )\z}xi + + class << self + def match(tag) + c = PATTERN.match(tag.to_s).captures + c[0..4] << (c[5].nil? ? c[6] : c[5]) << c[7] # TODO c[7] is grandfathered, throw a NotImplemented exception here? + rescue + false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb new file mode 100644 index 0000000..6d9ab56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/locale/tag/simple.rb @@ -0,0 +1,39 @@ +# Simple Locale tag implementation that computes subtags by simply splitting +# the locale tag at '-' occurences. +module I18n + module Locale + module Tag + class Simple + class << self + def tag(tag) + new(tag) + end + end + + include Parents + + attr_reader :tag + + def initialize(*tag) + @tag = tag.join('-').to_sym + end + + def subtags + @subtags = tag.to_s.split('-').map!(&:to_s) + end + + def to_sym + tag + end + + def to_s + tag.to_s + end + + def to_a + subtags + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/middleware.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/middleware.rb new file mode 100644 index 0000000..59b377e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/middleware.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module I18n + class Middleware + + def initialize(app) + @app = app + end + + def call(env) + @app.call(env) + ensure + Thread.current[:i18n_config] = I18n::Config.new + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests.rb new file mode 100644 index 0000000..30d0ed5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module I18n + module Tests + autoload :Basics, 'i18n/tests/basics' + autoload :Defaults, 'i18n/tests/defaults' + autoload :Interpolation, 'i18n/tests/interpolation' + autoload :Link, 'i18n/tests/link' + autoload :Localization, 'i18n/tests/localization' + autoload :Lookup, 'i18n/tests/lookup' + autoload :Pluralization, 'i18n/tests/pluralization' + autoload :Procs, 'i18n/tests/procs' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb new file mode 100644 index 0000000..951d7ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/basics.rb @@ -0,0 +1,60 @@ +module I18n + module Tests + module Basics + def teardown + I18n.available_locales = nil + end + + test "available_locales returns the locales stored to the backend by default" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + assert I18n.available_locales.include?(:de) + assert I18n.available_locales.include?(:en) + end + + test "available_locales can be set to something else independently from the actual locale data" do + I18n.backend.store_translations('de', :foo => 'bar') + I18n.backend.store_translations('en', :foo => 'foo') + + I18n.available_locales = :foo + assert_equal [:foo], I18n.available_locales + + I18n.available_locales = [:foo, 'bar'] + assert_equal [:foo, :bar], I18n.available_locales + + I18n.available_locales = nil + assert I18n.available_locales.include?(:de) + assert I18n.available_locales.include?(:en) + end + + test "available_locales memoizes when set explicitely" do + I18n.backend.expects(:available_locales).never + I18n.available_locales = [:foo] + I18n.backend.store_translations('de', :bar => 'baz') + I18n.reload! + assert_equal [:foo], I18n.available_locales + end + + test "available_locales delegates to the backend when not set explicitely" do + original_available_locales_value = I18n.backend.available_locales + I18n.backend.expects(:available_locales).returns(original_available_locales_value).twice + assert_equal I18n.backend.available_locales, I18n.available_locales + end + + test "exists? is implemented by the backend" do + I18n.backend.store_translations(:foo, :bar => 'baz') + assert I18n.exists?(:bar, :foo) + end + + test "storing a nil value as a translation removes it from the available locale data" do + I18n.backend.store_translations(:en, :to_be_deleted => 'bar') + assert_equal 'bar', I18n.t(:to_be_deleted, :default => 'baz') + + I18n.cache_store.clear if I18n.respond_to?(:cache_store) && I18n.cache_store + I18n.backend.store_translations(:en, :to_be_deleted => nil) + assert_equal 'baz', I18n.t(:to_be_deleted, :default => 'baz') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb new file mode 100644 index 0000000..b46af61 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/defaults.rb @@ -0,0 +1,52 @@ +# encoding: utf-8 + +module I18n + module Tests + module Defaults + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }) + end + + test "defaults: given nil as a key it returns the given default" do + assert_equal 'default', I18n.t(nil, :default => 'default') + end + + test "defaults: given a symbol as a default it translates the symbol" do + assert_equal 'bar', I18n.t(nil, :default => :'foo.bar') + end + + test "defaults: given a symbol as a default and a scope it stays inside the scope when looking up the symbol" do + assert_equal 'bar', I18n.t(:missing, :default => :bar, :scope => :foo) + end + + test "defaults: given an array as a default it returns the first match" do + assert_equal 'bar', I18n.t(:does_not_exist, :default => [:does_not_exist_2, :'foo.bar']) + end + + test "defaults: given an array as a default with false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => [false]) + end + + test "defaults: given false it returns false" do + assert_equal false, I18n.t(:does_not_exist, :default => false) + end + + test "defaults: given nil it returns nil" do + assert_nil I18n.t(:does_not_exist, :default => nil) + end + + test "defaults: given an array of missing keys it raises a MissingTranslationData exception" do + assert_raise I18n::MissingTranslationData do + I18n.t(:does_not_exist, :default => [:does_not_exist_2, :does_not_exist_3], :raise => true) + end + end + + test "defaults: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t(nil, :default => :'foo|bar', :separator => '|') + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb new file mode 100644 index 0000000..fd78804 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/interpolation.rb @@ -0,0 +1,163 @@ +# encoding: utf-8 + +module I18n + module Tests + module Interpolation + # If no interpolation parameter is not given, I18n should not alter the string. + # This behavior is due to three reasons: + # + # * Checking interpolation keys in all strings hits performance, badly; + # + # * This allows us to retrieve untouched values through I18n. For example + # I could have a middleware that returns I18n lookup results in JSON + # to be processed through Javascript. Leaving the keys untouched allows + # the interpolation to happen at the javascript level; + # + # * Security concerns: if I allow users to translate a web site, they can + # insert %{} in messages causing the I18n lookup to fail in every request. + # + test "interpolation: given no values it does not alter the string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!') + end + + test "interpolation: given values it interpolates them into the string" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => 'David') + end + + test "interpolation: works with a pipe" do + assert_equal 'Hi david!', interpolate(:default => 'Hi %{name|lowercase}!', :'name|lowercase' => 'david') + end + + test "interpolation: given a nil value it still interpolates it into the string" do + assert_equal 'Hi !', interpolate(:default => 'Hi %{name}!', :name => nil) + end + + test "interpolation: given a lambda as a value it calls it if the string contains the key" do + assert_equal 'Hi David!', interpolate(:default => 'Hi %{name}!', :name => lambda { |*args| 'David' }) + end + + test "interpolation: given a lambda as a value it does not call it if the string does not contain the key" do + assert_nothing_raised { interpolate(:default => 'Hi!', :name => lambda { |*args| raise 'fail' }) } + end + + test "interpolation: given values but missing a key it raises I18n::MissingInterpolationArgument" do + assert_raise(I18n::MissingInterpolationArgument) do + interpolate(:default => '%{foo}', :bar => 'bar') + end + end + + test "interpolation: it does not raise I18n::MissingInterpolationArgument for escaped variables" do + assert_nothing_raised do + assert_equal 'Barr %{foo}', interpolate(:default => '%{bar} %%{foo}', :bar => 'Barr') + end + end + + test "interpolation: it does not change the original, stored translation string" do + I18n.backend.store_translations(:en, :interpolate => 'Hi %{name}!') + assert_equal 'Hi David!', interpolate(:interpolate, :name => 'David') + assert_equal 'Hi Yehuda!', interpolate(:interpolate, :name => 'Yehuda') + end + + test "interpolation: given an array interpolates each element" do + I18n.backend.store_translations(:en, :array_interpolate => ['Hi', 'Mr. %{name}', 'or sir %{name}']) + assert_equal ['Hi', 'Mr. Bartuz', 'or sir Bartuz'], interpolate(:array_interpolate, :name => 'Bartuz') + end + + test "interpolation: given the translation is in utf-8 it still works" do + assert_equal 'Häi David!', interpolate(:default => 'Häi %{name}!', :name => 'David') + end + + test "interpolation: given the value is in utf-8 it still works" do + assert_equal 'Hi ゆきひろ!', interpolate(:default => 'Hi %{name}!', :name => 'ゆきひろ') + end + + test "interpolation: given the translation and the value are in utf-8 it still works" do + assert_equal 'こんにちは、ゆきひろさん!', interpolate(:default => 'こんにちは、%{name}さん!', :name => 'ゆきひろ') + end + + if Object.const_defined?(:Encoding) + test "interpolation: given a euc-jp translation and a utf-8 value it raises Encoding::CompatibilityError" do + assert_raise(Encoding::CompatibilityError) do + interpolate(:default => euc_jp('こんにちは、%{name}さん!'), :name => 'ゆきひろ') + end + end + + test "interpolation: given a utf-8 translation and a euc-jp value it raises Encoding::CompatibilityError" do + assert_raise(Encoding::CompatibilityError) do + interpolate(:default => 'こんにちは、%{name}さん!', :name => euc_jp('ゆきひろ')) + end + end + + test "interpolation: ASCII strings in the backend should be encoded to UTF8 if interpolation options are in UTF8" do + I18n.backend.store_translations 'en', 'encoding' => ('%{who} let me go'.force_encoding("ASCII")) + result = I18n.t 'encoding', :who => "måmmå miå" + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 with ASCII interpolation" do + I18n.backend.store_translations 'en', 'encoding' => 'måmmå miå %{what}' + result = I18n.t 'encoding', :what => 'let me go'.force_encoding("ASCII") + assert_equal Encoding::UTF_8, result.encoding + end + + test "interpolation: UTF8 strings in the backend are still returned as UTF8 even with numbers interpolation" do + I18n.backend.store_translations 'en', 'encoding' => '%{count} times: måmmå miå' + result = I18n.t 'encoding', :count => 3 + assert_equal Encoding::UTF_8, result.encoding + end + end + + test "interpolation: given a translations containing a reserved key it raises I18n::ReservedInterpolationKey" do + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{exception_handler}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{default}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{separator}') } + assert_raise(I18n::ReservedInterpolationKey) { interpolate(:foo => :bar, :default => '%{scope}') } + end + + test "interpolation: deep interpolation for default string" do + assert_equal 'Hi %{name}!', interpolate(:default => 'Hi %{name}!', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for interpolated string" do + assert_equal 'Hi Ann!', interpolate(:default => 'Hi %{name}!', :name => 'Ann', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Hash" do + people = { :people => { :ann => 'Ann is %{ann}', :john => 'John is %{john}' } } + interpolated_people = { :people => { :ann => 'Ann is good', :john => 'John is big' } } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + test "interpolation: deep interpolation for Array" do + people = { :people => ['Ann is %{ann}', 'John is %{john}'] } + interpolated_people = { :people => ['Ann is good', 'John is big'] } + assert_equal interpolated_people, interpolate(:default => people, :ann => 'good', :john => 'big', :deep_interpolation => true) + end + + protected + + def capture(stream) + begin + stream = stream.to_s + eval "$#{stream} = StringIO.new" + yield + result = eval("$#{stream}").string + ensure + eval("$#{stream} = #{stream.upcase}") + end + + result + end + + def euc_jp(string) + string.encode!(Encoding::EUC_JP) + end + + def interpolate(*args) + options = args.last.is_a?(Hash) ? args.pop : {} + key = args.pop + I18n.backend.translate('en', key, options) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb new file mode 100644 index 0000000..d2f20e8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/link.rb @@ -0,0 +1,66 @@ +# encoding: utf-8 + +module I18n + module Tests + module Link + test "linked lookup: if a key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :linked, + :linked => 'linked' + } + assert_equal 'linked', I18n.backend.translate('en', :link) + end + + test "linked lookup: if a key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :link => :"foo.linked", + :foo => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :link)) + end + + test "linked lookup: if a dot-separated key resolves to a symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked }, + :linked => 'linked' + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: if a dot-separated key resolves to a dot-separated symbol it looks up the symbol" do + I18n.backend.store_translations 'en', { + :foo => { :link => :"bar.linked" }, + :bar => { :linked => 'linked' } + } + assert_equal('linked', I18n.backend.translate('en', :'foo.link')) + end + + test "linked lookup: links always refer to the absolute key" do + I18n.backend.store_translations 'en', { + :foo => { :link => :linked, :linked => 'linked in foo' }, + :linked => 'linked absolutely' + } + assert_equal 'linked absolutely', I18n.backend.translate('en', :link, :scope => :foo) + end + + test "linked lookup: a link can resolve to a namespace in the middle of a dot-separated key" do + I18n.backend.store_translations 'en', { + :activemodel => { :errors => { :messages => { :blank => "can't be blank" } } }, + :activerecord => { :errors => { :messages => :"activemodel.errors.messages" } } + } + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + assert_equal "can't be blank", I18n.t(:"activerecord.errors.messages.blank") + end + + test "linked lookup: a link can resolve with option :count" do + I18n.backend.store_translations 'en', { + :counter => :counted, + :counted => { :foo => { :one => "one", :other => "other" }, :bar => "bar" } + } + assert_equal "one", I18n.t(:'counter.foo', count: 1) + assert_equal "other", I18n.t(:'counter.foo', count: 2) + assert_equal "bar", I18n.t(:'counter.bar', count: 3) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb new file mode 100644 index 0000000..53b1502 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization.rb @@ -0,0 +1,19 @@ +module I18n + module Tests + module Localization + autoload :Date, 'i18n/tests/localization/date' + autoload :DateTime, 'i18n/tests/localization/date_time' + autoload :Time, 'i18n/tests/localization/time' + autoload :Procs, 'i18n/tests/localization/procs' + + def self.included(base) + base.class_eval do + include I18n::Tests::Localization::Date + include I18n::Tests::Localization::DateTime + include I18n::Tests::Localization::Procs + include I18n::Tests::Localization::Time + end + end + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb new file mode 100644 index 0000000..34fd7a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date.rb @@ -0,0 +1,117 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Date + def setup + super + setup_date_translations + @date = ::Date.new(2008, 3, 1) + end + + test "localize Date: given the short format it uses it" do + assert_equal '01. Mär', I18n.l(@date, :format => :short, :locale => :de) + end + + test "localize Date: given the long format it uses it" do + assert_equal '01. März 2008', I18n.l(@date, :format => :long, :locale => :de) + end + + test "localize Date: given the default format it uses it" do + assert_equal '01.03.2008', I18n.l(@date, :format => :default, :locale => :de) + end + + test "localize Date: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@date, :format => '%A', :locale => :de) + end + + test "localize Date: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@date, :format => '%^A', :locale => :de) + end + + test "localize Date: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@date, :format => '%a', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@date, :format => '%^a', :locale => :de) + end + + test "localize Date: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@date, :format => '%B', :locale => :de) + end + + test "localize Date: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@date, :format => '%^B', :locale => :de) + end + + test "localize Date: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@date, :format => '%b', :locale => :de) + end + + test "localize Date: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@date, :format => '%^b', :locale => :de) + end + + test "localize Date: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Date.new(2008, 2, 1), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Date: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@date, :format => '%b', :locale => :fr) + end + + test "localize Date: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@date, :format => '%x') } + end + + test "localize Date: does not modify the options hash" do + options = { :format => '%b', :locale => :de } + assert_equal 'Mär', I18n.l(@date, **options) + assert_equal({ :format => '%b', :locale => :de }, options) + assert_nothing_raised { I18n.l(@date, **options.freeze) } + end + + test "localize Date: given nil with default value it returns default" do + assert_equal 'default', I18n.l(nil, :default => 'default') + end + + test "localize Date: given nil it raises I18n::ArgumentError" do + assert_raise(I18n::ArgumentError) { I18n.l(nil) } + end + + test "localize Date: given a plain Object it raises I18n::ArgumentError" do + assert_raise(I18n::ArgumentError) { I18n.l(Object.new) } + end + + test "localize Date: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@date, :format => :missing) } + end + + test "localize Date: it does not alter the format string" do + assert_equal '01. Februar 2009', I18n.l(::Date.parse('2009-02-01'), :format => :long, :locale => :de) + assert_equal '01. Oktober 2009', I18n.l(::Date.parse('2009-10-01'), :format => :long, :locale => :de) + end + + protected + + def setup_date_translations + I18n.backend.store_translations :de, { + :date => { + :formats => { + :default => "%d.%m.%Y", + :short => "%d. %b", + :long => "%d. %B %Y", + }, + :day_names => %w(Sonntag Montag Dienstag Mittwoch Donnerstag Freitag Samstag), + :abbr_day_names => %w(So Mo Di Mi Do Fr Sa), + :month_names => %w(Januar Februar März April Mai Juni Juli August September Oktober November Dezember).unshift(nil), + :abbr_month_names => %w(Jan Feb Mär Apr Mai Jun Jul Aug Sep Okt Nov Dez).unshift(nil) + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb new file mode 100644 index 0000000..a4ed9ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/date_time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module DateTime + def setup + super + setup_datetime_translations + @datetime = ::DateTime.new(2008, 3, 1, 6) + @other_datetime = ::DateTime.new(2008, 3, 1, 18) + end + + test "localize DateTime: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@datetime, :format => :short, :locale => :de) + end + + test "localize DateTime: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@datetime, :format => :long, :locale => :de) + end + + test "localize DateTime: given the default format it uses it" do + assert_equal 'Sa, 01. Mär 2008 06:00:00 +0000', I18n.l(@datetime, :format => :default, :locale => :de) + end + + test "localize DateTime: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@datetime, :format => '%A', :locale => :de) + end + + test "localize DateTime: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@datetime, :format => '%^A', :locale => :de) + end + + test "localize DateTime: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@datetime, :format => '%a', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@datetime, :format => '%^a', :locale => :de) + end + + test "localize DateTime: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@datetime, :format => '%B', :locale => :de) + end + + test "localize DateTime: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@datetime, :format => '%^B', :locale => :de) + end + + test "localize DateTime: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@datetime, :format => '%b', :locale => :de) + end + + test "localize DateTime: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@datetime, :format => '%^b', :locale => :de) + end + + test "localize DateTime: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::DateTime.new(2008, 2, 1, 6), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize DateTime: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@datetime, :format => '%b', :locale => :fr) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@datetime, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_datetime, :format => '%p', :locale => :de) + end + + test "localize DateTime: given a meridian indicator format it returns the correct meridian indicator in downcase" do + assert_equal 'am', I18n.l(@datetime, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_datetime, :format => '%P', :locale => :de) + end + + test "localize DateTime: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@datetime, :format => '%x') } + end + + test "localize DateTime: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@datetime, :format => :missing) } + end + + protected + + def setup_datetime_translations + # time translations might have been set up in Tests::Api::Localization::Time + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M" + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb new file mode 100644 index 0000000..5f3e9bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/procs.rb @@ -0,0 +1,117 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Procs + test "localize: using day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/Суббота/, I18n.l(time, :format => "%A, %d %B", :locale => :ru)) + assert_match(/суббота/, I18n.l(time, :format => "%d %B (%A)", :locale => :ru)) + end + + test "localize: using month names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %B %Y", :locale => :ru)) + assert_match(/Март /, I18n.l(time, :format => "%B %Y", :locale => :ru)) + end + + test "localize: using abbreviated day names from lambdas" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_match(/марта/, I18n.l(time, :format => "%d %b %Y", :locale => :ru)) + assert_match(/март /, I18n.l(time, :format => "%b %Y", :locale => :ru)) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {}]', I18n.l(date, :format => :proc, :locale => :ru) + end + + test "localize Date: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + date = ::Date.new(2008, 3, 1) + assert_equal '[Sat, 01 Mar 2008, {:foo=>"foo"}]', I18n.l(date, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {}]', I18n.l(datetime, :format => :proc, :locale => :ru) + end + + test "localize DateTime: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + datetime = ::DateTime.new(2008, 3, 1, 6) + assert_equal '[Sat, 01 Mar 2008 06:00:00 +00:00, {:foo=>"foo"}]', I18n.l(datetime, :format => :proc, :foo => 'foo', :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], {}), I18n.l(time, :format => :proc, :locale => :ru) + end + + test "localize Time: given a format that resolves to a Proc it calls the Proc with the object and extra options" do + setup_time_proc_translations + time = ::Time.utc(2008, 3, 1, 6, 0) + options = { :foo => 'foo' } + assert_equal I18n::Tests::Localization::Procs.inspect_args([time], options), I18n.l(time, **options.merge(:format => :proc, :locale => :ru)) + end + + protected + + def self.inspect_args(args, kwargs) + args << kwargs + args = args.map do |arg| + case arg + when ::Time, ::DateTime + arg.strftime('%a, %d %b %Y %H:%M:%S %Z').sub('+0000', '+00:00') + when ::Date + arg.strftime('%a, %d %b %Y') + when Hash + arg.delete(:fallback_in_progress) + arg.inspect + else + arg.inspect + end + end + "[#{args.join(', ')}]" + end + + def setup_time_proc_translations + I18n.backend.store_translations :ru, { + :time => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + } + }, + :date => { + :formats => { + :proc => lambda { |*args, **kwargs| I18n::Tests::Localization::Procs.inspect_args(args, kwargs) } + }, + :'day_names' => lambda { |key, options| + (options[:format] =~ /^%A/) ? + %w(Воскресенье Понедельник Вторник Среда Четверг Пятница Суббота) : + %w(воскресенье понедельник вторник среда четверг пятница суббота) + }, + :'month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)?(%B)/) ? + %w(января февраля марта апреля мая июня июля августа сентября октября ноября декабря).unshift(nil) : + %w(Январь Февраль Март Апрель Май Июнь Июль Август Сентябрь Октябрь Ноябрь Декабрь).unshift(nil) + }, + :'abbr_month_names' => lambda { |key, options| + (options[:format] =~ /(%d|%e)(\s*)(%b)/) ? + %w(янв. февр. марта апр. мая июня июля авг. сент. окт. нояб. дек.).unshift(nil) : + %w(янв. февр. март апр. май июнь июль авг. сент. окт. нояб. дек.).unshift(nil) + }, + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb new file mode 100644 index 0000000..6928791 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/localization/time.rb @@ -0,0 +1,103 @@ +# encoding: utf-8 + +module I18n + module Tests + module Localization + module Time + def setup + super + setup_time_translations + @time = ::Time.utc(2008, 3, 1, 6, 0) + @other_time = ::Time.utc(2008, 3, 1, 18, 0) + end + + test "localize Time: given the short format it uses it" do + assert_equal '01. Mär 06:00', I18n.l(@time, :format => :short, :locale => :de) + end + + test "localize Time: given the long format it uses it" do + assert_equal '01. März 2008 06:00', I18n.l(@time, :format => :long, :locale => :de) + end + + # TODO Seems to break on Windows because ENV['TZ'] is ignored. What's a better way to do this? + # def test_localize_given_the_default_format_it_uses_it + # assert_equal 'Sa, 01. Mar 2008 06:00:00 +0000', I18n.l(@time, :format => :default, :locale => :de) + # end + + test "localize Time: given a day name format it returns the correct day name" do + assert_equal 'Samstag', I18n.l(@time, :format => '%A', :locale => :de) + end + + test "localize Time: given a uppercased day name format it returns the correct day name in upcase" do + assert_equal 'samstag'.upcase, I18n.l(@time, :format => '%^A', :locale => :de) + end + + test "localize Time: given an abbreviated day name format it returns the correct abbreviated day name" do + assert_equal 'Sa', I18n.l(@time, :format => '%a', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased day name format it returns the correct abbreviated day name in upcase" do + assert_equal 'sa'.upcase, I18n.l(@time, :format => '%^a', :locale => :de) + end + + test "localize Time: given a month name format it returns the correct month name" do + assert_equal 'März', I18n.l(@time, :format => '%B', :locale => :de) + end + + test "localize Time: given a uppercased month name format it returns the correct month name in upcase" do + assert_equal 'märz'.upcase, I18n.l(@time, :format => '%^B', :locale => :de) + end + + test "localize Time: given an abbreviated month name format it returns the correct abbreviated month name" do + assert_equal 'Mär', I18n.l(@time, :format => '%b', :locale => :de) + end + + test "localize Time: given an abbreviated and uppercased month name format it returns the correct abbreviated month name in upcase" do + assert_equal 'mär'.upcase, I18n.l(@time, :format => '%^b', :locale => :de) + end + + test "localize Time: given a date format with the month name upcased it returns the correct value" do + assert_equal '1. FEBRUAR 2008', I18n.l(::Time.utc(2008, 2, 1, 6, 0), :format => "%-d. %^B %Y", :locale => :de) + end + + test "localize Time: given missing translations it returns the correct error message" do + assert_equal 'translation missing: fr.date.abbr_month_names', I18n.l(@time, :format => '%b', :locale => :fr) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator" do + assert_equal 'AM', I18n.l(@time, :format => '%p', :locale => :de) + assert_equal 'PM', I18n.l(@other_time, :format => '%p', :locale => :de) + end + + test "localize Time: given a meridian indicator format it returns the correct meridian indicator in upcase" do + assert_equal 'am', I18n.l(@time, :format => '%P', :locale => :de) + assert_equal 'pm', I18n.l(@other_time, :format => '%P', :locale => :de) + end + + test "localize Time: given an unknown format it does not fail" do + assert_nothing_raised { I18n.l(@time, :format => '%x') } + end + + test "localize Time: given a format is missing it raises I18n::MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.l(@time, :format => :missing) } + end + + protected + + def setup_time_translations + I18n.backend.store_translations :de, { + :time => { + :formats => { + :default => "%a, %d. %b %Y %H:%M:%S %z", + :short => "%d. %b %H:%M", + :long => "%d. %B %Y %H:%M", + }, + :am => 'am', + :pm => 'pm' + } + } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb new file mode 100644 index 0000000..61e16cd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/lookup.rb @@ -0,0 +1,81 @@ +# encoding: utf-8 + +module I18n + module Tests + module Lookup + def setup + super + I18n.backend.store_translations(:en, :foo => { :bar => 'bar', :baz => 'baz' }, :falsy => false, :truthy => true, + :string => "a", :array => %w(a b c), :hash => { "a" => "b" }) + end + + test "lookup: it returns a string" do + assert_equal("a", I18n.t(:string)) + end + + test "lookup: it returns hash" do + assert_equal({ :a => "b" }, I18n.t(:hash)) + end + + test "lookup: it returns an array" do + assert_equal(%w(a b c), I18n.t(:array)) + end + + test "lookup: it returns a native true" do + assert I18n.t(:truthy) === true + end + + test "lookup: it returns a native false" do + assert I18n.t(:falsy) === false + end + + test "lookup: given a missing key, no default and no raise option it returns an error message" do + assert_equal "translation missing: en.missing", I18n.t(:missing) + end + + test "lookup: given a missing key, no default and the raise option it raises MissingTranslationData" do + assert_raise(I18n::MissingTranslationData) { I18n.t(:missing, :raise => true) } + end + + test "lookup: does not raise an exception if no translation data is present for the given locale" do + assert_nothing_raised { I18n.t(:foo, :locale => :xx) } + end + + test "lookup: does not modify the options hash" do + options = {} + assert_equal "a", I18n.t(:string, **options) + assert_equal({}, options) + assert_nothing_raised { I18n.t(:string, **options.freeze) } + end + + test "lookup: given an array of keys it translates all of them" do + assert_equal %w(bar baz), I18n.t([:bar, :baz], :scope => [:foo]) + end + + test "lookup: using a custom scope separator" do + # data must have been stored using the custom separator when using the ActiveRecord backend + I18n.backend.store_translations(:en, { :foo => { :bar => 'bar' } }, { :separator => '|' }) + assert_equal 'bar', I18n.t('foo|bar', :separator => '|') + end + + # In fact it probably *should* fail but Rails currently relies on using the default locale instead. + # So we'll stick to this for now until we get it fixed in Rails. + test "lookup: given nil as a locale it does not raise but use the default locale" do + # assert_raise(I18n::InvalidLocale) { I18n.t(:bar, :locale => nil) } + assert_nothing_raised { I18n.t(:bar, :locale => nil) } + end + + test "lookup: a resulting String is not frozen" do + assert !I18n.t(:string).frozen? + end + + test "lookup: a resulting Array is not frozen" do + assert !I18n.t(:array).frozen? + end + + test "lookup: a resulting Hash is not frozen" do + assert !I18n.t(:hash).frozen? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb new file mode 100644 index 0000000..d3319dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/pluralization.rb @@ -0,0 +1,35 @@ +# encoding: utf-8 + +module I18n + module Tests + module Pluralization + test "pluralization: given 0 it returns the :zero translation if it is defined" do + assert_equal 'zero', I18n.t(:default => { :zero => 'zero' }, :count => 0) + end + + test "pluralization: given 0 it returns the :other translation if :zero is not defined" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 0) + end + + test "pluralization: given 1 it returns the singular translation" do + assert_equal 'bar', I18n.t(:default => { :one => 'bar' }, :count => 1) + end + + test "pluralization: given 2 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 2) + end + + test "pluralization: given 3 it returns the :other translation" do + assert_equal 'bars', I18n.t(:default => { :other => 'bars' }, :count => 3) + end + + test "pluralization: given nil it returns the whole entry" do + assert_equal({ :one => 'bar' }, I18n.t(:default => { :one => 'bar' }, :count => nil)) + end + + test "pluralization: given incomplete pluralization data it raises I18n::InvalidPluralizationData" do + assert_raise(I18n::InvalidPluralizationData) { I18n.t(:default => { :one => 'bar' }, :count => 2) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb new file mode 100644 index 0000000..dad76de --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/tests/procs.rb @@ -0,0 +1,60 @@ +# encoding: utf-8 + +module I18n + module Tests + module Procs + test "lookup: given a translation is a proc it calls the proc with the key and interpolation values" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "lookup: given a translation is a proc it passes the interpolation values as keyword arguments" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |key, foo:, **| I18n::Tests::Procs.filter_args(key, foo: foo) }) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(:a_lambda, :foo => 'foo') + end + + test "defaults: given a default is a Proc it calls it with the key and interpolation values" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_equal '[nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "defaults: given a default is a key that resolves to a Proc it calls it with the key and interpolation values" do + the_lambda = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + I18n.backend.store_translations(:en, :a_lambda => the_lambda) + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => :a_lambda, :foo => 'foo') + assert_equal '[:a_lambda, {:foo=>"foo"}]', I18n.t(nil, :default => [nil, :a_lambda], :foo => 'foo') + end + + test "interpolation: given an interpolation value is a lambda it calls it with key and values before interpolating it" do + proc = lambda { |*args| I18n::Tests::Procs.filter_args(*args) } + assert_match %r(\[\{:foo=>#\}\]), I18n.t(nil, :default => '%{foo}', :foo => proc) + end + + test "interpolation: given a key resolves to a Proc that returns a string then interpolation still works" do + proc = lambda { |*args| "%{foo}: " + I18n::Tests::Procs.filter_args(*args) } + assert_equal 'foo: [nil, {:foo=>"foo"}]', I18n.t(nil, :default => proc, :foo => 'foo') + end + + test "pluralization: given a key resolves to a Proc that returns valid data then pluralization still works" do + proc = lambda { |*args| { :zero => 'zero', :one => 'one', :other => 'other' } } + assert_equal 'zero', I18n.t(:default => proc, :count => 0) + assert_equal 'one', I18n.t(:default => proc, :count => 1) + assert_equal 'other', I18n.t(:default => proc, :count => 2) + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc translations" do + I18n.backend.store_translations(:en, :a_lambda => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }) + assert_equal Proc, I18n.t(:a_lambda, :resolve => false).class + end + + test "lookup: given the option :resolve => false was passed it does not resolve proc default" do + assert_equal Proc, I18n.t(nil, :default => lambda { |*args| I18n::Tests::Procs.filter_args(*args) }, :resolve => false).class + end + + + def self.filter_args(*args) + args.map {|arg| arg.delete(:fallback_in_progress) if arg.is_a?(Hash) ; arg }.inspect + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/version.rb b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/version.rb new file mode 100644 index 0000000..3781c93 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/i18n-1.8.5/lib/i18n/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module I18n + VERSION = "1.8.5" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.gitignore b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.gitignore new file mode 100644 index 0000000..e8b1a52 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.gitignore @@ -0,0 +1,18 @@ +.*.sw[pon] +*.bundle +coverage +tags +pkg +.nfs.* +.idea +java/Json.iml +Gemfile.lock +.rvmrc +*.rbc +.rbx +.AppleDouble +.DS_Store +*/**/Makefile +*/**/*.o +.byebug_history +*.log diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.travis.yml new file mode 100644 index 0000000..920a4ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/.travis.yml @@ -0,0 +1,26 @@ +# Passes arguments to bundle install (http://gembundler.com/man/bundle-install.1.html) +#bundler_args: --binstubs +language: ruby + +# Specify which ruby versions you wish to run your tests on, each version will be used +rvm: + - 2.0.0 + - 2.1 + - 2.2 + - 2.3 + - 2.4 + - 2.5 + - 2.6 + - 2.7.0-preview3 + - ruby-head + - jruby + - jruby-9.2.7.0 + - truffleruby +matrix: + allow_failures: + - rvm: ruby-head + - rvm: jruby + - rvm: jruby-9.2.7.0 + - rvm: truffleruby +script: "bundle exec rake" +sudo: false diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/CHANGES.md b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/CHANGES.md new file mode 100644 index 0000000..0d39f21 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/CHANGES.md @@ -0,0 +1,424 @@ +# Changes + +## 2020-06-30 (2.3.1) + +* Spelling and grammar fixes for comments. Pull request #191 by Josh + Kline. +* Enhance generic JSON and #generate docs. Pull request #347 by Victor + Shepelev. +* Add :nodoc: for GeneratorMethods. Pull request #349 by Victor Shepelev. +* Baseline changes to help (JRuby) development. Pull request #371 by Karol + Bucek. +* Add metadata for rubygems.org. Pull request #379 by Alexandre ZANNI. +* Remove invalid JSON.generate description from JSON module rdoc. Pull + request #384 by Jeremy Evans. +* Test with TruffleRuby in CI. Pull request #402 by Benoit Daloze. +* Rdoc enhancements. Pull request #413 by Burdette Lamar. +* Fixtures/ are not being tested... Pull request #416 by Marc-André + Lafortune. +* Use frozen string for hash key. Pull request #420 by Marc-André + Lafortune. +* Added :call-seq: to RDoc for some methods. Pull request #422 by Burdette + Lamar. +* Small typo fix. Pull request #423 by Marc-André Lafortune. + +## 2019-12-11 (2.3.0) + * Fix default of `create_additions` to always be `false` for `JSON(user_input)` + and `JSON.parse(user_input, nil)`. + Note that `JSON.load` remains with default `true` and is meant for internal + serialization of trusted data. [CVE-2020-10663] + * Fix passing args all #to_json in json/add/*. + * Fix encoding issues + * Fix issues of keyword vs positional parameter + * Fix JSON::Parser against bigdecimal updates + * Bug fixes to JRuby port + +## 2019-02-21 (2.2.0) + * Adds support for 2.6 BigDecimal and ruby standard library Set datetype. + +## 2017-04-18 (2.1.0) + * Allow passing of `decimal_class` option to specify a class as which to parse + JSON float numbers. +## 2017-03-23 (2.0.4) + * Raise exception for incomplete unicode surrogates/character escape + sequences. This problem was reported by Daniel Gollahon (dgollahon). + * Fix arbitrary heap exposure problem. This problem was reported by Ahmad + Sherif (ahmadsherif). + +## 2017-01-12 (2.0.3) + * Set `required_ruby_version` to 1.9 + * Some small fixes + +## 2016-07-26 (2.0.2) + * Specify `required_ruby_version` for json\_pure. + * Fix issue #295 failure when parsing frozen strings. + +## 2016-07-01 (2.0.1) + * Fix problem when requiring json\_pure and Parser constant was defined top + level. + * Add `RB_GC_GUARD` to avoid possible GC problem via Pete Johns. + * Store `current_nesting` on stack by Aaron Patterson. + +## 2015-09-11 (2.0.0) + * Now complies to newest JSON RFC 7159. + * Implements compatibiliy to ruby 2.4 integer unification. + * Drops support for old rubies whose life has ended, that is rubies < 2.0. + Also see https://www.ruby-lang.org/en/news/2014/07/01/eol-for-1-8-7-and-1-9-2/ + * There were still some mentions of dual GPL licensing in the source, but JSON + has just the Ruby license that itself includes an explicit dual-licensing + clause that allows covered software to be distributed under the terms of + the Simplified BSD License instead for all ruby versions >= 1.9.3. This is + however a GPL compatible license according to the Free Software Foundation. + I changed these mentions to be consistent with the Ruby license setting in + the gemspec files which were already correct now. + +## 2015-06-01 (1.8.3) + * Fix potential memory leak, thx to nobu. + +## 2015-01-08 (1.8.2) + * Some performance improvements by Vipul A M . + * Fix by Jason R. Clark to avoid mutation of + `JSON.dump_default_options`. + * More tests by Michael Mac-Vicar and fixing + `space_before` accessor in generator. + * Performance on Jruby improved by Ben Browning . + * Some fixes to be compatible with the new Ruby 2.2 by Zachary Scott + and SHIBATA Hiroshi . + +## 2013-05-13 (1.8.1) + * Remove Rubinius exception since transcoding should be working now. + +## 2013-05-13 (1.8.0) + * Fix https://github.com/flori/json/issues/162 reported by Marc-Andre + Lafortune . Thanks! + * Applied patches by Yui NARUSE to suppress warning with + -Wchar-subscripts and better validate UTF-8 strings. + * Applied patch by ginriki@github to remove unnecessary if. + * Add load/dump interface to `JSON::GenericObject` to make + serialize :some_attribute, `JSON::GenericObject` + work in Rails active models for convenient `SomeModel#some_attribute.foo.bar` + access to serialised JSON data. + +## 2013-02-04 (1.7.7) + * Security fix for JSON create_additions default value and + `JSON::GenericObject`. It should not be possible to create additions unless + explicitely requested by setting the create_additions argument to true or + using the JSON.load/dump interface. If `JSON::GenericObject` is supposed to + be automatically deserialised, this has to be explicitely enabled by + setting + JSON::GenericObject.json_creatable = true + as well. + * Remove useless assert in fbuffer implementation. + * Apply patch attached to https://github.com/flori/json/issues#issue/155 + provided by John Shahid , Thx! + * Add license information to rubygems spec data, reported by Jordi Massaguer Pla . + * Improve documentation, thx to Zachary Scott . + +## 2012-11-29 (1.7.6) + * Add `GeneratorState#merge` alias for JRuby, fix state accessor methods. Thx to + jvshahid@github. + * Increase hash likeness of state objects. + +## 2012-08-17 (1.7.5) + * Fix compilation of extension on older rubies. + +## 2012-07-26 (1.7.4) + * Fix compilation problem on AIX, see https://github.com/flori/json/issues/142 + +## 2012-05-12 (1.7.3) + * Work around Rubinius encoding issues using iconv for conversion instead. + +## 2012-05-11 (1.7.2) + * Fix some encoding issues, that cause problems for the pure and the + extension variant in jruby 1.9 mode. + +## 2012-04-28 (1.7.1) + * Some small fixes for building + +## 2012-04-28 (1.7.0) + * Add `JSON::GenericObject` for method access to objects transmitted via JSON. + +## 2012-04-27 (1.6.7) + * Fix possible crash when trying to parse nil value. + +## 2012-02-11 (1.6.6) + * Propagate src encoding to values made from it (fixes 1.9 mode converting + everything to ascii-8bit; harmless for 1.8 mode too) (Thomas E. Enebo + ), should fix + https://github.com/flori/json/issues#issue/119. + * Fix https://github.com/flori/json/issues#issue/124 Thx to Jason Hutchens. + * Fix https://github.com/flori/json/issues#issue/117 + +## 2012-01-15 (1.6.5) + * Vit Ondruch reported a bug that shows up when using + optimisation under GCC 4.7. Thx to him, Bohuslav Kabrda + and Yui NARUSE for debugging and + developing a patch fix. + +## 2011-12-24 (1.6.4) + * Patches that improve speed on JRuby contributed by Charles Oliver Nutter + . + * Support `object_class`/`array_class` with duck typed hash/array. + +## 2011-12-01 (1.6.3) + * Let `JSON.load('')` return nil as well to make mysql text columns (default to + `''`) work better for serialization. + +## 2011-11-21 (1.6.2) + * Add support for OpenStruct and BigDecimal. + * Fix bug when parsing nil in `quirks_mode`. + * Make JSON.dump and JSON.load methods better cooperate with Rails' serialize + method. Just use: + serialize :value, JSON + * Fix bug with time serialization concerning nanoseconds. Thanks for the + patch go to Josh Partlow (jpartlow@github). + * Improve parsing speed for JSON numbers (integers and floats) in a similar way to + what Evan Phoenix suggested in: + https://github.com/flori/json/pull/103 + +## 2011-09-18 (1.6.1) + * Using -target 1.5 to force Java bits to compile with 1.5. + +## 2011-09-12 (1.6.0) + * Extract utilities (prettifier and GUI-editor) in its own gem json-utils. + * Split json/add/core into different files for classes to be serialised. + +## 2011-08-31 (1.5.4) + * Fix memory leak when used from multiple JRuby. (Patch by + jfirebaugh@github). + * Apply patch by Eric Wong that fixes garbage collection problem + reported in https://github.com/flori/json/issues/46. + * Add :quirks_mode option to parser and generator. + * Add support for Rational and Complex number additions via json/add/complex + and json/add/rational requires. + +## 2011-06-20 (1.5.3) + * Alias State#configure method as State#merge to increase duck type synonymy with Hash. + * Add `as_json` methods in json/add/core, so rails can create its json objects + the new way. + +## 2011-05-11 (1.5.2) + * Apply documentation patch by Cory Monty . + * Add gemspecs for json and json\_pure. + * Fix bug in jruby pretty printing. + * Fix bug in `object_class` and `array_class` when inheriting from Hash or + Array. + +## 2011-01-24 (1.5.1) + * Made rake-compiler build a fat binary gem. This should fix issue + https://github.com/flori/json/issues#issue/54. + +## 2011-01-22 (1.5.0) + * Included Java source codes for the Jruby extension made by Daniel Luz + . + * Output full exception message of `deep_const_get` to aid debugging. + * Fixed an issue with ruby 1.9 `Module#const_defined?` method, that was + reported by Riley Goodside. + +## 2010-08-09 (1.4.6) + * Fixed oversight reported in http://github.com/flori/json/issues/closed#issue/23, + always create a new object from the state prototype. + * Made pure and ext api more similar again. + +## 2010-08-07 (1.4.5) + * Manage data structure nesting depth in state object during generation. This + should reduce problems with `to_json` method definіtions that only have one + argument. + * Some fixes in the state objects and additional tests. +## 2010-08-06 (1.4.4) + * Fixes build problem for rubinius under OS X, http://github.com/flori/json/issues/closed#issue/25 + * Fixes crashes described in http://github.com/flori/json/issues/closed#issue/21 and + http://github.com/flori/json/issues/closed#issue/23 +## 2010-05-05 (1.4.3) + * Fixed some test assertions, from Ruby r27587 and r27590, patch by nobu. + * Fixed issue http://github.com/flori/json/issues/#issue/20 reported by + electronicwhisper@github. Thx! + +## 2010-04-26 (1.4.2) + * Applied patch from naruse Yui NARUSE to make building with + Microsoft Visual C possible again. + * Applied patch from devrandom in order to allow building of + json_pure if extensiontask is not present. + * Thanks to Dustin Schneider , who reported a memory + leak, which is fixed in this release. + * Applied 993f261ccb8f911d2ae57e9db48ec7acd0187283 patch from josh@github. + +## 2010-04-25 (1.4.1) + * Fix for a bug reported by Dan DeLeo , caused by T_FIXNUM + being different on 32bit/64bit architectures. + +## 2010-04-23 (1.4.0) + * Major speed improvements and building with simplified + directory/file-structure. + * Extension should at least be comapatible with MRI, YARV and Rubinius. + +## 2010-04-07 (1.2.4) + * Triger const_missing callback to make Rails' dynamic class loading work. + +## 2010-03-11 (1.2.3) + * Added a `State#[]` method which returns an attribute's value in order to + increase duck type compatibility to Hash. + +## 2010-02-27 (1.2.2) + * Made some changes to make the building of the parser/generator compatible + to Rubinius. + +## 2009-11-25 (1.2.1) + * Added `:symbolize_names` option to Parser, which returns symbols instead of + strings in object names/keys. + +## 2009-10-01 (1.2.0) + * `fast_generate` now raises an exeception for nan and infinite floats. + * On Ruby 1.8 json supports parsing of UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + and UTF-32LE JSON documents now. Under Ruby 1.9 the M17n conversion + functions are used to convert from all supported encodings. ASCII-8BIT + encoded strings are handled like all strings under Ruby 1.8 were. + * Better documentation + +## 2009-08-23 (1.1.9) + * Added forgotten main doc file `extra_rdoc_files`. + +## 2009-08-23 (1.1.8) + * Applied a patch by OZAWA Sakuro to make json/pure + work in environments that don't provide iconv. + * Applied patch by okkez_ in order to fix Ruby Bug #1768: + http://redmine.ruby-lang.org/issues/show/1768. + * Finally got around to avoid the rather paranoid escaping of ?/ characters + in the generator's output. The parsers aren't affected by this change. + Thanks to Rich Apodaca for the suggestion. + +## 2009-06-29 (1.1.7) + * Security Fix for JSON::Pure::Parser. A specially designed string could + cause catastrophic backtracking in one of the parser's regular expressions + in earlier 1.1.x versions. JSON::Ext::Parser isn't affected by this issue. + Thanks to Bartosz Blimke for reporting this + problem. + * This release also uses a less strict ruby version requirement for the + creation of the mswin32 native gem. + +## 2009-05-10 (1.1.6) + * No changes. І tested native linux gems in the last release and they don't + play well with different ruby versions other than the one the gem was built + with. This release is just to bump the version number in order to skip the + native gem on rubyforge. + +## 2009-05-10 (1.1.5) + * Started to build gems with rake-compiler gem. + * Applied patch object/array class patch from Brian Candler + and fixes. + +## 2009-04-01 (1.1.4) + * Fixed a bug in the creation of serialized generic rails objects reported by + Friedrich Graeter . + * Deleted tests/runner.rb, we're using testrb instead. + * Editor supports Infinity in numbers now. + * Made some changes in order to get the library to compile/run under Ruby + 1.9. + * Improved speed of the code path for the fast_generate method in the pure + variant. + +## 2008-07-10 (1.1.3) + * Wesley Beary reported a bug in json/add/core's DateTime + handling: If the nominator and denominator of the offset were divisible by + each other Ruby's Rational#to_s returns them as an integer not a fraction + with '/'. This caused a ZeroDivisionError during parsing. + * Use Date#start and DateTime#start instead of sg method, while + remaining backwards compatible. + * Supports ragel >= 6.0 now. + * Corrected some tests. + * Some minor changes. + +## 2007-11-27 (1.1.2) + * Remember default dir (last used directory) in editor. + * JSON::Editor.edit method added, the editor can now receive json texts from + the clipboard via C-v. + * Load json texts from an URL pasted via middle button press. + * Added :create_additions option to Parser. This makes it possible to disable + the creation of additions by force, in order to treat json texts as data + while having additions loaded. + * Jacob Maine reported, that JSON(:foo) outputs a JSON + object if the rails addition is enabled, which is wrong. It now outputs a + JSON string "foo" instead, like suggested by Jacob Maine. + * Discovered a bug in the Ruby Bugs Tracker on rubyforge, that was reported + by John Evans lgastako@gmail.com. He could produce a crash in the JSON + generator by returning something other than a String instance from a + to_json method. I now guard against this by doing a rather crude type + check, which raises an exception instead of crashing. + +## 2007-07-06 (1.1.1) + * Yui NARUSE sent some patches to fix tests for Ruby + 1.9. I applied them and adapted some of them a bit to run both on 1.8 and + 1.9. + * Introduced a `JSON.parse!` method without depth checking for people who + like danger. + * Made generate and `pretty_generate` methods configurable by an options hash. + * Added :allow_nan option to parser and generator in order to handle NaN, + Infinity, and -Infinity correctly - if requested. Floats, which aren't numbers, + aren't valid JSON according to RFC4627, so by default an exception will be + raised if any of these symbols are encountered. Thanks to Andrea Censi + for his hint about this. + * Fixed some more tests for Ruby 1.9. + * Implemented dump/load interface of Marshal as suggested in ruby-core:11405 + by murphy . + * Implemented the `max_nesting` feature for generate methods, too. + * Added some implementations for ruby core's custom objects for + serialisation/deserialisation purposes. + +## 2007-05-21 (1.1.0) + * Implemented max_nesting feature for parser to avoid stack overflows for + data from untrusted sources. If you trust the source, you can disable it + with the option max_nesting => false. + * Piers Cawley reported a bug, that not every + character can be escaped by `\` as required by RFC4627. There's a + contradiction between David Crockford's JSON checker test vectors (in + tests/fixtures) and RFC4627, though. I decided to stick to the RFC, because + the JSON checker seems to be a bit older than the RFC. + * Extended license to Ruby License, which includes the GPL. + * Added keyboard shortcuts, and 'Open location' menu item to edit_json.rb. + +## 2007-05-09 (1.0.4) + * Applied a patch from Yui NARUSE to make JSON compile + under Ruby 1.9. Thank you very much for mailing it to me! + * Made binary variants of JSON fail early, instead of falling back to the + pure version. This should avoid overshadowing of eventual problems while + loading of the binary. + +## 2007-03-24 (1.0.3) + * Improved performance of pure variant a bit. + * The ext variant of this release supports the mswin32 platform. Ugh! + +## 2007-03-24 (1.0.2) + * Ext Parser didn't parse 0e0 correctly into 0.0: Fixed! + +## 2007-03-24 (1.0.1) + * Forgot some object files in the build dir. I really like that - not! + +## 2007-03-24 (1.0.0) + * Added C implementations for the JSON generator and a ragel based JSON + parser in C. + * Much more tests, especially fixtures from json.org. + * Further improved conformance to RFC4627. + +## 2007-02-09 (0.4.3) + * Conform more to RFC4627 for JSON: This means JSON strings + now always must contain exactly one object `"{ ... }"` or array `"[ ... ]"` in + order to be parsed without raising an exception. The definition of what + constitutes a whitespace is narrower in JSON than in Ruby ([ \t\r\n]), and + there are differences in floats and integers (no octals or hexadecimals) as + well. + * Added aliases generate and `pretty_generate` of unparse and `pretty_unparse`. + * Fixed a test case. + * Catch an `Iconv::InvalidEncoding` exception, that seems to occur on some Sun + boxes with SunOS 5.8, if iconv doesn't support utf16 conversions. This was + reported by Andrew R Jackson , thanks a bunch! + +## 2006-08-25 (0.4.2) + * Fixed a bug in handling solidi (/-characters), that was reported by + Kevin Gilpin . + +## 2006-02-06 (0.4.1) + * Fixed a bug related to escaping with backslashes. Thanks for the report go + to Florian Munz . + +## 2005-09-23 (0.4.0) + * Initial Rubyforge Version diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Gemfile b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Gemfile new file mode 100644 index 0000000..8fe591e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Gemfile @@ -0,0 +1,14 @@ +# vim: set ft=ruby: + +source 'https://rubygems.org' + +case ENV['JSON'] +when 'ext', nil + if ENV['RUBY_ENGINE'] == 'jruby' + gemspec :name => 'json-java' + else + gemspec :name => 'json' + end +when 'pure' + gemspec :name => 'json_pure' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README-json-jruby.md b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README-json-jruby.md new file mode 100644 index 0000000..1336837 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README-json-jruby.md @@ -0,0 +1,33 @@ +JSON-JRuby +========== + +JSON-JRuby is a port of Florian Frank's native +[`json` library](http://json.rubyforge.org/) to JRuby. +It aims to be a perfect drop-in replacement for `json_pure`. + + +Development version +=================== + +The latest version is available from the +[Git repository](http://github.com/mernen/json-jruby/tree): + + git clone git://github.com/mernen/json-jruby.git + + +Compiling +========= + +You'll need JRuby version 1.2 or greater to build JSON-JRuby. +Its path must be set on the `jruby.dir` property of +`nbproject/project.properties` (defaults to `../jruby`). + +Additionally, you'll need [Ant](http://ant.apache.org/), and +[Ragel](http://www.cs.queensu.ca/~thurston/ragel/) 6.4 or greater. + +Then, from the folder where the sources are located, type: + + ant clean jar + +to clean any leftovers from previous builds and generate the `.jar` files. +To generate a RubyGem, specify the `gem` action rather than `jar`. diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README.md b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README.md new file mode 100644 index 0000000..682db2f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/README.md @@ -0,0 +1,425 @@ +# JSON implementation for Ruby + +[![Travis Widget](http://travis-ci.org/flori/json.svg?branch=master)](https://travis-ci.org/flori/json) + +## Description + +This is a implementation of the JSON specification according to RFC 7159 +http://www.ietf.org/rfc/rfc7159.txt . Starting from version 1.0.0 on there +will be two variants available: + +* A pure ruby variant, that relies on the iconv and the stringscan + extensions, which are both part of the ruby standard library. +* The quite a bit faster native extension variant, which is in parts + implemented in C or Java and comes with its own unicode conversion + functions and a parser generated by the ragel state machine compiler + http://www.complang.org/ragel/ . + +Both variants of the JSON generator generate UTF-8 character sequences by +default. If an :ascii\_only option with a true value is given, they escape all +non-ASCII and control characters with \uXXXX escape sequences, and support +UTF-16 surrogate pairs in order to be able to generate the whole range of +unicode code points. + +All strings, that are to be encoded as JSON strings, should be UTF-8 byte +sequences on the Ruby side. To encode raw binary strings, that aren't UTF-8 +encoded, please use the to\_json\_raw\_object method of String (which produces +an object, that contains a byte array) and decode the result on the receiving +endpoint. + +## Installation + +It's recommended to use the extension variant of JSON, because it's faster than +the pure ruby variant. If you cannot build it on your system, you can settle +for the latter. + +Just type into the command line as root: + +``` +# rake install +``` + +The above command will build the extensions and install them on your system. + +``` +# rake install_pure +``` + +or + +``` +# ruby install.rb +``` + +will just install the pure ruby implementation of JSON. + +If you use Rubygems you can type + +``` +# gem install json +``` + +instead, to install the newest JSON version. + +There is also a pure ruby json only variant of the gem, that can be installed +with: + +``` +# gem install json_pure +``` + +## Compiling the extensions yourself + +If you want to create the `parser.c` file from its `parser.rl` file or draw nice +graphviz images of the state machines, you need ragel from: +http://www.complang.org/ragel/ + +## Usage + +To use JSON you can + +```ruby +require 'json' +``` + +to load the installed variant (either the extension `'json'` or the pure +variant `'json_pure'`). If you have installed the extension variant, you can +pick either the extension variant or the pure variant by typing + +```ruby +require 'json/ext' +``` + +or + +```ruby +require 'json/pure' +``` + +Now you can parse a JSON document into a ruby data structure by calling + +```ruby +JSON.parse(document) +``` + +If you want to generate a JSON document from a ruby data structure call +```ruby +JSON.generate(data) +``` + +You can also use the `pretty_generate` method (which formats the output more +verbosely and nicely) or `fast_generate` (which doesn't do any of the security +checks generate performs, e. g. nesting deepness checks). + +There are also the JSON and JSON[] methods which use parse on a String or +generate a JSON document from an array or hash: + +```ruby +document = JSON 'test' => 23 # => "{\"test\":23}" +document = JSON['test' => 23] # => "{\"test\":23}" +``` + +and + +```ruby +data = JSON '{"test":23}' # => {"test"=>23} +data = JSON['{"test":23}'] # => {"test"=>23} +``` + +You can choose to load a set of common additions to ruby core's objects if +you + +```ruby +require 'json/add/core' +``` + +After requiring this you can, e. g., serialise/deserialise Ruby ranges: + +```ruby +JSON JSON(1..10) # => 1..10 +``` + +To find out how to add JSON support to other or your own classes, read the +section "More Examples" below. + +To get the best compatibility to rails' JSON implementation, you can + +```ruby +require 'json/add/rails' +``` + +Both of the additions attempt to require `'json'` (like above) first, if it has +not been required yet. + +## Serializing exceptions + +The JSON module doesn't extend `Exception` by default. If you convert an `Exception` +object to JSON, it will by default only include the exception message. + +To include the full details, you must either load the `json/add/core` mentioned +above, or specifically load the exception addition: + +```ruby +require 'json/add/exception' +``` + +## More Examples + +To create a JSON document from a ruby data structure, you can call +`JSON.generate` like that: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,\"4..10\"]" +``` + +To get back a ruby data structure from a JSON document, you have to call +JSON.parse on it: + +```ruby +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, "4..10"] +``` + +Note, that the range from the original data structure is a simple +string now. The reason for this is, that JSON doesn't support ranges +or arbitrary classes. In this case the json library falls back to call +`Object#to_json`, which is the same as `#to_s.to_json`. + +It's possible to add JSON support serialization to arbitrary classes by +simply implementing a more specialized version of the `#to_json method`, that +should return a JSON object (a hash converted to JSON with `#to_json`) like +this (don't forget the `*a` for all the arguments): + +```ruby +class Range + def to_json(*a) + { + 'json_class' => self.class.name, # = 'Range' + 'data' => [ first, last, exclude_end? ] + }.to_json(*a) + end +end +``` + +The hash key `json_class` is the class, that will be asked to deserialise the +JSON representation later. In this case it's `Range`, but any namespace of +the form `A::B` or `::A::B` will do. All other keys are arbitrary and can be +used to store the necessary data to configure the object to be deserialised. + +If the key `json_class` is found in a JSON object, the JSON parser checks +if the given class responds to the `json_create` class method. If so, it is +called with the JSON object converted to a Ruby hash. So a range can +be deserialised by implementing `Range.json_create` like this: + +```ruby +class Range + def self.json_create(o) + new(*o['data']) + end +end +``` + +Now it possible to serialise/deserialise ranges as well: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json, :create_additions => true +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +``` + +`JSON.generate` always creates the shortest possible string representation of a +ruby data structure in one line. This is good for data storage or network +protocols, but not so good for humans to read. Fortunately there's also +`JSON.pretty_generate` (or `JSON.pretty_generate`) that creates a more readable +output: + +```ruby + puts JSON.pretty_generate([1, 2, {"a"=>3.141}, false, true, nil, 4..10]) + [ + 1, + 2, + { + "a": 3.141 + }, + false, + true, + null, + { + "json_class": "Range", + "data": [ + 4, + 10, + false + ] + } + ] +``` + +There are also the methods `Kernel#j` for generate, and `Kernel#jj` for +`pretty_generate` output to the console, that work analogous to Core Ruby's `p` and +the `pp` library's `pp` methods. + +The script `tools/server.rb` contains a small example if you want to test, how +receiving a JSON object from a webrick server in your browser with the +javasript prototype library http://www.prototypejs.org works. + +## Speed Comparisons + +I have created some benchmark results (see the benchmarks/data-p4-3Ghz +subdir of the package) for the JSON-parser to estimate the speed up in the C +extension: + +``` + Comparing times (call_time_mean): + 1 ParserBenchmarkExt#parser 900 repeats: + 553.922304770 ( real) -> 21.500x + 0.001805307 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 224.513358139 ( real) -> 8.714x + 0.004454078 + 3 ParserBenchmarkPure#parser 1000 repeats: + 26.755020642 ( real) -> 1.038x + 0.037376163 + 4 ParserBenchmarkRails#parser 1000 repeats: + 25.763381731 ( real) -> 1.000x + 0.038814780 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1 is `JSON::Ext::Parser`, 2 is `YAML.load` with YAML +compatbile JSON document, 3 is is `JSON::Pure::Parser`, and 4 is +`ActiveSupport::JSON.decode`. The ActiveSupport JSON-decoder converts the +input first to YAML and then uses the YAML-parser, the conversion seems to +slow it down so much that it is only as fast as the `JSON::Pure::Parser`! + +If you look at the benchmark data you can see that this is mostly caused by +the frequent high outliers - the median of the Rails-parser runs is still +overall smaller than the median of the `JSON::Pure::Parser` runs: + +``` + Comparing times (call_time_median): + 1 ParserBenchmarkExt#parser 900 repeats: + 800.592479481 ( real) -> 26.936x + 0.001249075 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 271.002390644 ( real) -> 9.118x + 0.003690004 + 3 ParserBenchmarkRails#parser 1000 repeats: + 30.227910865 ( real) -> 1.017x + 0.033082008 + 4 ParserBenchmarkPure#parser 1000 repeats: + 29.722384421 ( real) -> 1.000x + 0.033644676 + calls/sec ( time) -> speed covers + secs/call +``` + +I have benchmarked the `JSON-Generator` as well. This generated a few more +values, because there are different modes that also influence the achieved +speed: + +``` + Comparing times (call_time_mean): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 547.354332608 ( real) -> 15.090x + 0.001826970 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 443.968212317 ( real) -> 12.240x + 0.002252414 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 375.104545883 ( real) -> 10.341x + 0.002665923 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 49.978706968 ( real) -> 1.378x + 0.020008521 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 38.531868759 ( real) -> 1.062x + 0.025952543 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 36.927649925 ( real) -> 1.018x 7 (>=3859) + 0.027079979 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 36.272134441 ( real) -> 1.000x 6 (>=3859) + 0.027569373 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1-3 are `JSON::Ext::Generator` methods. 4, 6, and 7 are +`JSON::Pure::Generator` methods and 5 is the Rails JSON generator. It is now a +bit faster than the `generator_safe` and `generator_pretty` methods of the pure +variant but slower than the others. + +To achieve the fastest JSON document output, you can use the `fast_generate` +method. Beware, that this will disable the checking for circular Ruby data +structures, which may cause JSON to go into an infinite loop. + +Here are the median comparisons for completeness' sake: + +``` + Comparing times (call_time_median): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 708.258020939 ( real) -> 16.547x + 0.001411915 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 569.105020353 ( real) -> 13.296x + 0.001757145 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 482.825371244 ( real) -> 11.280x + 0.002071142 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 62.717626652 ( real) -> 1.465x + 0.015944481 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 43.965681162 ( real) -> 1.027x + 0.022745013 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 43.929073409 ( real) -> 1.026x 7 (>=3859) + 0.022763968 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 42.802514491 ( real) -> 1.000x 6 (>=3859) + 0.023363113 + calls/sec ( time) -> speed covers + secs/call +``` + +## Development + +### Release + +Update the json.gemspec and json-java.gemspec. + +``` +rbenv shell 2.6.5 +rake build +gem push pkg/json-2.3.0.gem + +rbenv shell jruby-9.2.9.0 +rake build +gem push pkg/json-2.3.0-java.gem +``` + +## Author + +Florian Frank + +## License + +Ruby License, see https://www.ruby-lang.org/en/about/license.txt. + +## Download + +The latest version of this library can be downloaded at + +* https://rubygems.org/gems/json + +Online Documentation should be located at + +* https://www.rubydoc.info/gems/json diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Rakefile b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Rakefile new file mode 100644 index 0000000..5b42292 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/Rakefile @@ -0,0 +1,334 @@ +begin + require 'rubygems/package_task' +rescue LoadError +end + +require 'rbconfig' +include\ + begin + RbConfig + rescue NameError + Config + end + +require 'rake/clean' +CLOBBER.include 'doc', 'Gemfile.lock' +CLEAN.include FileList['diagrams/*.*'], 'doc', 'coverage', 'tmp', + FileList["ext/**/{Makefile,mkmf.log}"], 'build', 'dist', FileList['**/*.rbc'], + FileList["{ext,lib}/**/*.{so,bundle,#{CONFIG['DLEXT']},o,obj,pdb,lib,manifest,exp,def,jar,class,dSYM}"], + FileList['java/src/**/*.class'] + +require 'rake/testtask' +class UndocumentedTestTask < Rake::TestTask + def desc(*) end +end + +which = lambda { |c| + w = `which #{c}` + break w.chomp unless w.empty? +} + +MAKE = ENV['MAKE'] || %w[gmake make].find(&which) +BUNDLE = ENV['BUNDLE'] || %w[bundle].find(&which) +PKG_NAME = 'json' +PKG_TITLE = 'JSON Implementation for Ruby' +PKG_VERSION = File.read('VERSION').chomp +PKG_FILES = FileList[`git ls-files`.split(/\n/)] + +EXT_ROOT_DIR = 'ext/json/ext' +EXT_PARSER_DIR = "#{EXT_ROOT_DIR}/parser" +EXT_PARSER_DL = "#{EXT_PARSER_DIR}/parser.#{CONFIG['DLEXT']}" +RAGEL_PATH = "#{EXT_PARSER_DIR}/parser.rl" +EXT_PARSER_SRC = "#{EXT_PARSER_DIR}/parser.c" +EXT_GENERATOR_DIR = "#{EXT_ROOT_DIR}/generator" +EXT_GENERATOR_DL = "#{EXT_GENERATOR_DIR}/generator.#{CONFIG['DLEXT']}" +EXT_GENERATOR_SRC = "#{EXT_GENERATOR_DIR}/generator.c" + +JAVA_DIR = "java/src/json/ext" +JAVA_RAGEL_PATH = "#{JAVA_DIR}/Parser.rl" +JAVA_PARSER_SRC = "#{JAVA_DIR}/Parser.java" +JAVA_SOURCES = FileList["#{JAVA_DIR}/*.java"] +JAVA_CLASSES = [] +JRUBY_PARSER_JAR = File.expand_path("lib/json/ext/parser.jar") +JRUBY_GENERATOR_JAR = File.expand_path("lib/json/ext/generator.jar") + +RAGEL_CODEGEN = %w[rlcodegen rlgen-cd ragel].find(&which) +RAGEL_DOTGEN = %w[rlgen-dot rlgen-cd ragel].find(&which) + +desc "Installing library (pure)" +task :install_pure => :version do + ruby 'install.rb' +end + +task :install_ext_really do + sitearchdir = CONFIG["sitearchdir"] + cd 'ext' do + for file in Dir["json/ext/*.#{CONFIG['DLEXT']}"] + d = File.join(sitearchdir, file) + mkdir_p File.dirname(d) + install(file, d) + end + warn " *** Installed EXT ruby library." + end +end + +desc "Installing library (extension)" +task :install_ext => [ :compile, :install_pure, :install_ext_really ] + +desc "Installing library (extension)" +task :install => :install_ext + +desc m = "Writing version information for #{PKG_VERSION}" +task :version do + puts m + File.open(File.join('lib', 'json', 'version.rb'), 'w') do |v| + v.puts < [ :set_env_pure, :check_env, :do_test_pure ] +task(:set_env_pure) { ENV['JSON'] = 'pure' } + +UndocumentedTestTask.new do |t| + t.name = 'do_test_pure' + t.libs << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' +end + +desc "Testing library (pure ruby and extension)" +task :test => [ :test_pure, :test_ext ] + +namespace :gems do + desc 'Install all development gems' + task :install do + sh "#{BUNDLE}" + end +end + +if defined?(RUBY_ENGINE) and RUBY_ENGINE == 'jruby' + ENV['JAVA_HOME'] ||= [ + '/usr/local/java/jdk', + '/usr/lib/jvm/java-6-openjdk', + '/Library/Java/Home', + ].find { |c| File.directory?(c) } + if ENV['JAVA_HOME'] + warn " *** JAVA_HOME is set to #{ENV['JAVA_HOME'].inspect}" + ENV['PATH'] = ENV['PATH'].split(/:/).unshift(java_path = "#{ENV['JAVA_HOME']}/bin") * ':' + warn " *** java binaries are assumed to be in #{java_path.inspect}" + else + warn " *** JAVA_HOME was not set or could not be guessed!" + exit 1 + end + + file JAVA_PARSER_SRC => JAVA_RAGEL_PATH do + cd JAVA_DIR do + if RAGEL_CODEGEN == 'ragel' + sh "ragel Parser.rl -J -o Parser.java" + else + sh "ragel -x Parser.rl | #{RAGEL_CODEGEN} -J" + end + end + end + + desc "Generate parser for java with ragel" + task :ragel => JAVA_PARSER_SRC + + desc "Delete the ragel generated Java source" + task :ragel_clean do + rm_rf JAVA_PARSER_SRC + end + + JRUBY_JAR = File.join(CONFIG["libdir"], "jruby.jar") + if File.exist?(JRUBY_JAR) + JAVA_SOURCES.each do |src| + classpath = (Dir['java/lib/*.jar'] << 'java/src' << JRUBY_JAR) * ':' + obj = src.sub(/\.java\Z/, '.class') + file obj => src do + sh 'javac', '-classpath', classpath, '-source', '1.6', '-target', '1.6', src + end + JAVA_CLASSES << obj + end + else + warn "WARNING: Cannot find jruby in path => Cannot build jruby extension!" + end + + desc "Compiling jruby extension" + task :compile => JAVA_CLASSES + + desc "Package the jruby gem" + task :jruby_gem => :create_jar do + sh 'gem build json-java.gemspec' + mkdir_p 'pkg' + mv "json-#{PKG_VERSION}-java.gem", 'pkg' + end + + desc "Testing library (jruby)" + task :test_ext => [ :set_env_ext, :create_jar, :check_env, :do_test_ext ] + task(:set_env_ext) { ENV['JSON'] = 'ext' } + + UndocumentedTestTask.new do |t| + t.name = 'do_test_ext' + t.libs << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' + end + + file JRUBY_PARSER_JAR => :compile do + cd 'java/src' do + parser_classes = FileList[ + "json/ext/ByteListTranscoder*.class", + "json/ext/OptionsReader*.class", + "json/ext/Parser*.class", + "json/ext/RuntimeInfo*.class", + "json/ext/StringDecoder*.class", + "json/ext/Utils*.class" + ] + sh 'jar', 'cf', File.basename(JRUBY_PARSER_JAR), *parser_classes + mv File.basename(JRUBY_PARSER_JAR), File.dirname(JRUBY_PARSER_JAR) + end + end + + desc "Create parser jar" + task :create_parser_jar => JRUBY_PARSER_JAR + + file JRUBY_GENERATOR_JAR => :compile do + cd 'java/src' do + generator_classes = FileList[ + "json/ext/ByteListTranscoder*.class", + "json/ext/OptionsReader*.class", + "json/ext/Generator*.class", + "json/ext/RuntimeInfo*.class", + "json/ext/StringEncoder*.class", + "json/ext/Utils*.class" + ] + sh 'jar', 'cf', File.basename(JRUBY_GENERATOR_JAR), *generator_classes + mv File.basename(JRUBY_GENERATOR_JAR), File.dirname(JRUBY_GENERATOR_JAR) + end + end + + desc "Create generator jar" + task :create_generator_jar => JRUBY_GENERATOR_JAR + + desc "Create parser and generator jars" + task :create_jar => [ :create_parser_jar, :create_generator_jar ] + + desc "Build all gems and archives for a new release of the jruby extension." + task :build => [ :clean, :version, :jruby_gem ] + + task :release => :build +else + desc "Compiling extension" + task :compile => [ EXT_PARSER_DL, EXT_GENERATOR_DL ] + + file EXT_PARSER_DL => EXT_PARSER_SRC do + cd EXT_PARSER_DIR do + ruby 'extconf.rb' + sh MAKE + end + cp "#{EXT_PARSER_DIR}/parser.#{CONFIG['DLEXT']}", EXT_ROOT_DIR + end + + file EXT_GENERATOR_DL => EXT_GENERATOR_SRC do + cd EXT_GENERATOR_DIR do + ruby 'extconf.rb' + sh MAKE + end + cp "#{EXT_GENERATOR_DIR}/generator.#{CONFIG['DLEXT']}", EXT_ROOT_DIR + end + + desc "Testing library (extension)" + task :test_ext => [ :check_env, :compile, :do_test_ext ] + + UndocumentedTestTask.new do |t| + t.name = 'do_test_ext' + t.libs << 'ext' << 'lib' << 'tests' + t.test_files = FileList['tests/*_test.rb'] + t.verbose = true + t.options = '-v' + end + + desc "Generate parser with ragel" + task :ragel => EXT_PARSER_SRC + + desc "Delete the ragel generated C source" + task :ragel_clean do + rm_rf EXT_PARSER_SRC + end + + desc "Update the tags file" + task :tags do + system 'ctags', *Dir['**/*.{rb,c,h,java}'] + end + + file EXT_PARSER_SRC => RAGEL_PATH do + cd EXT_PARSER_DIR do + if RAGEL_CODEGEN == 'ragel' + sh "ragel parser.rl -G2 -o parser.c" + else + sh "ragel -x parser.rl | #{RAGEL_CODEGEN} -G2" + end + src = File.read("parser.c").gsub(/[ \t]+$/, '') + src.gsub!(/^static const int (JSON_.*=.*);$/, 'enum {\1};') + src.gsub!(/0 <= \(\*p\) && \(\*p\) <= 31/, "0 <= (signed char)(*p) && (*p) <= 31") + src[0, 0] = "/* This file is automatically generated from parser.rl by using ragel */" + File.open("parser.c", "w") {|f| f.print src} + end + end + + desc "Generate diagrams of ragel parser (ps)" + task :ragel_dot_ps do + root = 'diagrams' + specs = [] + File.new(RAGEL_PATH).grep(/^\s*machine\s*(\S+);\s*$/) { specs << $1 } + for s in specs + if RAGEL_DOTGEN == 'ragel' + sh "ragel #{RAGEL_PATH} -S#{s} -p -V | dot -Tps -o#{root}/#{s}.ps" + else + sh "ragel -x #{RAGEL_PATH} -S#{s} | #{RAGEL_DOTGEN} -p|dot -Tps -o#{root}/#{s}.ps" + end + end + end + + desc "Generate diagrams of ragel parser (png)" + task :ragel_dot_png do + root = 'diagrams' + specs = [] + File.new(RAGEL_PATH).grep(/^\s*machine\s*(\S+);\s*$/) { specs << $1 } + for s in specs + if RAGEL_DOTGEN == 'ragel' + sh "ragel #{RAGEL_PATH} -S#{s} -p -V | dot -Tpng -o#{root}/#{s}.png" + else + sh "ragel -x #{RAGEL_PATH} -S#{s} | #{RAGEL_DOTGEN} -p|dot -Tpng -o#{root}/#{s}.png" + end + end + end + + desc "Generate diagrams of ragel parser" + task :ragel_dot => [ :ragel_dot_png, :ragel_dot_ps ] + + desc "Build all gems and archives for a new release of json and json_pure." + task :build => [ :clean, :gemspec, :package ] + + task :release => :build +end + +desc "Compile in the the source directory" +task :default => [ :clean, :test ] diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/VERSION b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/VERSION new file mode 100644 index 0000000..2bf1c1c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/VERSION @@ -0,0 +1 @@ +2.3.1 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/diagrams/.keep b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/diagrams/.keep new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h new file mode 100644 index 0000000..dc8f406 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/fbuffer/fbuffer.h @@ -0,0 +1,187 @@ + +#ifndef _FBUFFER_H_ +#define _FBUFFER_H_ + +#include "ruby.h" + +#ifndef RHASH_SIZE +#define RHASH_SIZE(hsh) (RHASH(hsh)->tbl->num_entries) +#endif + +#ifndef RFLOAT_VALUE +#define RFLOAT_VALUE(val) (RFLOAT(val)->value) +#endif + +#ifndef RARRAY_LEN +#define RARRAY_LEN(ARRAY) RARRAY(ARRAY)->len +#endif +#ifndef RSTRING_PTR +#define RSTRING_PTR(string) RSTRING(string)->ptr +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(string) RSTRING(string)->len +#endif + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +#ifdef HAVE_RUBY_ENCODING_H +#include "ruby/encoding.h" +#define FORCE_UTF8(obj) rb_enc_associate((obj), rb_utf8_encoding()) +#else +#define FORCE_UTF8(obj) +#endif + +/* We don't need to guard objects for rbx, so let's do nothing at all. */ +#ifndef RB_GC_GUARD +#define RB_GC_GUARD(object) +#endif + +typedef struct FBufferStruct { + unsigned long initial_length; + char *ptr; + unsigned long len; + unsigned long capa; +} FBuffer; + +#define FBUFFER_INITIAL_LENGTH_DEFAULT 1024 + +#define FBUFFER_PTR(fb) (fb->ptr) +#define FBUFFER_LEN(fb) (fb->len) +#define FBUFFER_CAPA(fb) (fb->capa) +#define FBUFFER_PAIR(fb) FBUFFER_PTR(fb), FBUFFER_LEN(fb) + +static FBuffer *fbuffer_alloc(unsigned long initial_length); +static void fbuffer_free(FBuffer *fb); +static void fbuffer_clear(FBuffer *fb); +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len); +#ifdef JSON_GENERATOR +static void fbuffer_append_long(FBuffer *fb, long number); +#endif +static void fbuffer_append_char(FBuffer *fb, char newchr); +#ifdef JSON_GENERATOR +static FBuffer *fbuffer_dup(FBuffer *fb); +static VALUE fbuffer_to_s(FBuffer *fb); +#endif + +static FBuffer *fbuffer_alloc(unsigned long initial_length) +{ + FBuffer *fb; + if (initial_length <= 0) initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + fb = ALLOC(FBuffer); + memset((void *) fb, 0, sizeof(FBuffer)); + fb->initial_length = initial_length; + return fb; +} + +static void fbuffer_free(FBuffer *fb) +{ + if (fb->ptr) ruby_xfree(fb->ptr); + ruby_xfree(fb); +} + +static void fbuffer_clear(FBuffer *fb) +{ + fb->len = 0; +} + +static void fbuffer_inc_capa(FBuffer *fb, unsigned long requested) +{ + unsigned long required; + + if (!fb->ptr) { + fb->ptr = ALLOC_N(char, fb->initial_length); + fb->capa = fb->initial_length; + } + + for (required = fb->capa; requested > required - fb->len; required <<= 1); + + if (required > fb->capa) { + REALLOC_N(fb->ptr, char, required); + fb->capa = required; + } +} + +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len) +{ + if (len > 0) { + fbuffer_inc_capa(fb, len); + MEMCPY(fb->ptr + fb->len, newstr, char, len); + fb->len += len; + } +} + +#ifdef JSON_GENERATOR +static void fbuffer_append_str(FBuffer *fb, VALUE str) +{ + const char *newstr = StringValuePtr(str); + unsigned long len = RSTRING_LEN(str); + + RB_GC_GUARD(str); + + fbuffer_append(fb, newstr, len); +} +#endif + +static void fbuffer_append_char(FBuffer *fb, char newchr) +{ + fbuffer_inc_capa(fb, 1); + *(fb->ptr + fb->len) = newchr; + fb->len++; +} + +#ifdef JSON_GENERATOR +static void freverse(char *start, char *end) +{ + char c; + + while (end > start) { + c = *end, *end-- = *start, *start++ = c; + } +} + +static long fltoa(long number, char *buf) +{ + static char digits[] = "0123456789"; + long sign = number; + char* tmp = buf; + + if (sign < 0) number = -number; + do *tmp++ = digits[number % 10]; while (number /= 10); + if (sign < 0) *tmp++ = '-'; + freverse(buf, tmp - 1); + return tmp - buf; +} + +static void fbuffer_append_long(FBuffer *fb, long number) +{ + char buf[20]; + unsigned long len = fltoa(number, buf); + fbuffer_append(fb, buf, len); +} + +static FBuffer *fbuffer_dup(FBuffer *fb) +{ + unsigned long len = fb->len; + FBuffer *result; + + result = fbuffer_alloc(len); + fbuffer_append(result, FBUFFER_PAIR(fb)); + return result; +} + +static VALUE fbuffer_to_s(FBuffer *fb) +{ + VALUE result = rb_str_new(FBUFFER_PTR(fb), FBUFFER_LEN(fb)); + fbuffer_free(fb); + FORCE_UTF8(result); + return result; +} +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/depend b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/depend new file mode 100644 index 0000000..1a042a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/depend @@ -0,0 +1 @@ +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb new file mode 100644 index 0000000..8627c5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/extconf.rb @@ -0,0 +1,4 @@ +require 'mkmf' + +$defs << "-DJSON_GENERATOR" +create_makefile 'json/ext/generator' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.c b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.c new file mode 100644 index 0000000..749efaf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.c @@ -0,0 +1,1569 @@ +#include "../fbuffer/fbuffer.h" +#include "generator.h" + +#ifdef HAVE_RUBY_ENCODING_H +static VALUE CEncoding_UTF_8; +static ID i_encoding, i_encode; +#endif + +static VALUE mJSON, mExt, mGenerator, cState, mGeneratorMethods, mObject, + mHash, mArray, +#ifdef RUBY_INTEGER_UNIFICATION + mInteger, +#else + mFixnum, mBignum, +#endif + mFloat, mString, mString_Extend, + mTrueClass, mFalseClass, mNilClass, eGeneratorError, + eNestingError, + i_SAFE_STATE_PROTOTYPE; + +static ID i_to_s, i_to_json, i_new, i_indent, i_space, i_space_before, + i_object_nl, i_array_nl, i_max_nesting, i_allow_nan, i_ascii_only, + i_pack, i_unpack, i_create_id, i_extend, i_key_p, + i_aref, i_send, i_respond_to_p, i_match, i_keys, i_depth, + i_buffer_initial_length, i_dup; + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length) +{ + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return 0; + /* Everything else falls through when "1"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xED: if (a > 0x9F) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + } + if (*source > 0xF4) return 0; + return 1; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf. */ +static void unicode_escape(char *buf, UTF16 character) +{ + const char *digits = "0123456789abcdef"; + + buf[2] = digits[character >> 12]; + buf[3] = digits[(character >> 8) & 0xf]; + buf[4] = digits[(character >> 4) & 0xf]; + buf[5] = digits[character & 0xf]; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf, then + * the buffer buf is appended to the FBuffer buffer. */ +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 + character) +{ + unicode_escape(buf, character); + fbuffer_append(buffer, buf, 6); +} + +/* Converts string to a JSON string in FBuffer buffer, where all but the ASCII + * and control characters are JSON escaped. */ +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string) +{ + const UTF8 *source = (UTF8 *) RSTRING_PTR(string); + const UTF8 *sourceEnd = source + RSTRING_LEN(string); + char buf[6] = { '\\', 'u' }; + + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8(source, extraBytesToRead+1)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* normal case */ + if (ch >= 0x20 && ch <= 0x7f) { + switch (ch) { + case '\\': + fbuffer_append(buffer, "\\\\", 2); + break; + case '"': + fbuffer_append(buffer, "\\\"", 2); + break; + default: + fbuffer_append_char(buffer, (char)ch); + break; + } + } else { + switch (ch) { + case '\n': + fbuffer_append(buffer, "\\n", 2); + break; + case '\r': + fbuffer_append(buffer, "\\r", 2); + break; + case '\t': + fbuffer_append(buffer, "\\t", 2); + break; + case '\f': + fbuffer_append(buffer, "\\f", 2); + break; + case '\b': + fbuffer_append(buffer, "\\b", 2); + break; + default: + unicode_escape_to_buffer(buffer, buf, (UTF16) ch); + break; + } + } + } + } else if (ch > UNI_MAX_UTF16) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the start */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + ch -= halfBase; + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START)); + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch & halfMask) + UNI_SUR_LOW_START)); + } + } + RB_GC_GUARD(string); +} + +/* Converts string to a JSON string in FBuffer buffer, where only the + * characters required by the JSON standard are JSON escaped. The remaining + * characters (should be UTF8) are just passed through and appended to the + * result. */ +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string) +{ + const char *ptr = RSTRING_PTR(string), *p; + unsigned long len = RSTRING_LEN(string), start = 0, end = 0; + const char *escape = NULL; + int escape_len; + unsigned char c; + char buf[6] = { '\\', 'u' }; + int ascii_only = rb_enc_str_asciionly_p(string); + + for (start = 0, end = 0; end < len;) { + p = ptr + end; + c = (unsigned char) *p; + if (c < 0x20) { + switch (c) { + case '\n': + escape = "\\n"; + escape_len = 2; + break; + case '\r': + escape = "\\r"; + escape_len = 2; + break; + case '\t': + escape = "\\t"; + escape_len = 2; + break; + case '\f': + escape = "\\f"; + escape_len = 2; + break; + case '\b': + escape = "\\b"; + escape_len = 2; + break; + default: + unicode_escape(buf, (UTF16) *p); + escape = buf; + escape_len = 6; + break; + } + } else { + switch (c) { + case '\\': + escape = "\\\\"; + escape_len = 2; + break; + case '"': + escape = "\\\""; + escape_len = 2; + break; + default: + { + unsigned short clen = 1; + if (!ascii_only) { + clen += trailingBytesForUTF8[c]; + if (end + clen > len) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8((UTF8 *) p, clen)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + } + end += clen; + } + continue; + break; + } + } + fbuffer_append(buffer, ptr + start, end - start); + fbuffer_append(buffer, escape, escape_len); + start = ++end; + escape = NULL; + } + fbuffer_append(buffer, ptr + start, end - start); +} + +static char *fstrndup(const char *ptr, unsigned long len) { + char *result; + if (len <= 0) return NULL; + result = ALLOC_N(char, len); + memcpy(result, ptr, len); + return result; +} + +/* + * Document-module: JSON::Ext::Generator + * + * This is the JSON generator implemented as a C extension. It can be + * configured to be used by setting + * + * JSON.generator = JSON::Ext::Generator + * + * with the method generator= in JSON. + * + */ + +/* Explanation of the following: that's the only way to not pollute + * standard library's docs with GeneratorMethods:: which + * are uninformative and take a large place in a list of classes + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Array + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Bignum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::FalseClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Fixnum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Float + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Hash + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Integer + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::NilClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Object + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String::Extend + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::TrueClass + * :nodoc: + */ + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON object, that is generated from + * this Hash instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(object); +} + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON array, that is generated from + * this Array instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self) { + GENERATE_JSON(array); +} + +#ifdef RUBY_INTEGER_UNIFICATION +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(integer); +} + +#else +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(fixnum); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(bignum); +} +#endif + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Float number. + */ +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(float); +} + +/* + * call-seq: String.included(modul) + * + * Extends _modul_ with the String::Extend module. + */ +static VALUE mString_included_s(VALUE self, VALUE modul) { + VALUE result = rb_funcall(modul, i_extend, 1, mString_Extend); + return result; +} + +/* + * call-seq: to_json(*) + * + * This string should be encoded with UTF-8 A call to this method + * returns a JSON string encoded with UTF16 big endian characters as + * \u????. + */ +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(string); +} + +/* + * call-seq: to_json_raw_object() + * + * This method creates a raw object hash, that can be nested into + * other data structures and will be generated as a raw string. This + * method should be used, if you want to convert raw strings to JSON + * instead of UTF-8 strings, e. g. binary data. + */ +static VALUE mString_to_json_raw_object(VALUE self) +{ + VALUE ary; + VALUE result = rb_hash_new(); + rb_hash_aset(result, rb_funcall(mJSON, i_create_id, 0), rb_class_name(rb_obj_class(self))); + ary = rb_funcall(self, i_unpack, 1, rb_str_new2("C*")); + rb_hash_aset(result, rb_str_new2("raw"), ary); + return result; +} + +/* + * call-seq: to_json_raw(*args) + * + * This method creates a JSON text from the result of a call to + * to_json_raw_object of this String. + */ +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self) +{ + VALUE obj = mString_to_json_raw_object(self); + Check_Type(obj, T_HASH); + return mHash_to_json(argc, argv, obj); +} + +/* + * call-seq: json_create(o) + * + * Raw Strings are JSON Objects (the raw bytes are stored in an array for the + * key "raw"). The Ruby String can be created by this module method. + */ +static VALUE mString_Extend_json_create(VALUE self, VALUE o) +{ + VALUE ary; + Check_Type(o, T_HASH); + ary = rb_hash_aref(o, rb_str_new2("raw")); + return rb_funcall(ary, i_pack, 1, rb_str_new2("C*")); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for true: 'true'. + */ +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(true); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for false: 'false'. + */ +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(false); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for nil: 'null'. + */ +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(null); +} + +/* + * call-seq: to_json(*) + * + * Converts this object to a string (calling #to_s), converts + * it to a JSON string, and returns the result. This is a fallback, if no + * special method #to_json was defined for some object. + */ +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self) +{ + VALUE state; + VALUE string = rb_funcall(self, i_to_s, 0); + rb_scan_args(argc, argv, "01", &state); + Check_Type(string, T_STRING); + state = cState_from_state_s(cState, state); + return cState_partial_generate(state, string); +} + +static void State_free(void *ptr) +{ + JSON_Generator_State *state = ptr; + if (state->indent) ruby_xfree(state->indent); + if (state->space) ruby_xfree(state->space); + if (state->space_before) ruby_xfree(state->space_before); + if (state->object_nl) ruby_xfree(state->object_nl); + if (state->array_nl) ruby_xfree(state->array_nl); + if (state->array_delim) fbuffer_free(state->array_delim); + if (state->object_delim) fbuffer_free(state->object_delim); + if (state->object_delim2) fbuffer_free(state->object_delim2); + ruby_xfree(state); +} + +static size_t State_memsize(const void *ptr) +{ + const JSON_Generator_State *state = ptr; + size_t size = sizeof(*state); + if (state->indent) size += state->indent_len + 1; + if (state->space) size += state->space_len + 1; + if (state->space_before) size += state->space_before_len + 1; + if (state->object_nl) size += state->object_nl_len + 1; + if (state->array_nl) size += state->array_nl_len + 1; + if (state->array_delim) size += FBUFFER_CAPA(state->array_delim); + if (state->object_delim) size += FBUFFER_CAPA(state->object_delim); + if (state->object_delim2) size += FBUFFER_CAPA(state->object_delim2); + return size; +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Generator_State_type = { + "JSON/Generator/State", + {NULL, State_free, State_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cState_s_allocate(VALUE klass) +{ + JSON_Generator_State *state; + return TypedData_Make_Struct(klass, JSON_Generator_State, + &JSON_Generator_State_type, state); +} + +/* + * call-seq: configure(opts) + * + * Configure this State instance with the Hash _opts_, and return + * itself. + */ +static VALUE cState_configure(VALUE self, VALUE opts) +{ + VALUE tmp; + GET_STATE(self); + tmp = rb_check_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(tmp)) tmp = rb_convert_type(opts, T_HASH, "Hash", "to_h"); + opts = tmp; + tmp = rb_hash_aref(opts, ID2SYM(i_indent)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->indent = fstrndup(RSTRING_PTR(tmp), len + 1); + state->indent_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space_before)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space_before = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_before_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_array_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->array_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->array_nl_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_object_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->object_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->object_nl_len = len; + } + tmp = ID2SYM(i_max_nesting); + state->max_nesting = 100; + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + state->max_nesting = FIX2LONG(max_nesting); + } else { + state->max_nesting = 0; + } + } + tmp = ID2SYM(i_depth); + state->depth = 0; + if (option_given_p(opts, tmp)) { + VALUE depth = rb_hash_aref(opts, tmp); + if (RTEST(depth)) { + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + } else { + state->depth = 0; + } + } + tmp = ID2SYM(i_buffer_initial_length); + if (option_given_p(opts, tmp)) { + VALUE buffer_initial_length = rb_hash_aref(opts, tmp); + if (RTEST(buffer_initial_length)) { + long initial_length; + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) state->buffer_initial_length = initial_length; + } + } + tmp = rb_hash_aref(opts, ID2SYM(i_allow_nan)); + state->allow_nan = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_ascii_only)); + state->ascii_only = RTEST(tmp); + return self; +} + +static void set_state_ivars(VALUE hash, VALUE state) +{ + VALUE ivars = rb_obj_instance_variables(state); + int i = 0; + for (i = 0; i < RARRAY_LEN(ivars); i++) { + VALUE key = rb_funcall(rb_ary_entry(ivars, i), i_to_s, 0); + long key_len = RSTRING_LEN(key); + VALUE value = rb_iv_get(state, StringValueCStr(key)); + rb_hash_aset(hash, rb_str_intern(rb_str_substr(key, 1, key_len - 1)), value); + } +} + +/* + * call-seq: to_h + * + * Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + */ +static VALUE cState_to_h(VALUE self) +{ + VALUE result = rb_hash_new(); + GET_STATE(self); + set_state_ivars(result, self); + rb_hash_aset(result, ID2SYM(i_indent), rb_str_new(state->indent, state->indent_len)); + rb_hash_aset(result, ID2SYM(i_space), rb_str_new(state->space, state->space_len)); + rb_hash_aset(result, ID2SYM(i_space_before), rb_str_new(state->space_before, state->space_before_len)); + rb_hash_aset(result, ID2SYM(i_object_nl), rb_str_new(state->object_nl, state->object_nl_len)); + rb_hash_aset(result, ID2SYM(i_array_nl), rb_str_new(state->array_nl, state->array_nl_len)); + rb_hash_aset(result, ID2SYM(i_allow_nan), state->allow_nan ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_ascii_only), state->ascii_only ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_max_nesting), LONG2FIX(state->max_nesting)); + rb_hash_aset(result, ID2SYM(i_depth), LONG2FIX(state->depth)); + rb_hash_aset(result, ID2SYM(i_buffer_initial_length), LONG2FIX(state->buffer_initial_length)); + return result; +} + +/* +* call-seq: [](name) +* +* Returns the value returned by method +name+. +*/ +static VALUE cState_aref(VALUE self, VALUE name) +{ + name = rb_funcall(name, i_to_s, 0); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) { + return rb_funcall(self, i_send, 1, name); + } else { + return rb_attr_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name))); + } +} + +/* +* call-seq: []=(name, value) +* +* Sets the attribute name to value. +*/ +static VALUE cState_aset(VALUE self, VALUE name, VALUE value) +{ + VALUE name_writer; + + name = rb_funcall(name, i_to_s, 0); + name_writer = rb_str_cat2(rb_str_dup(name), "="); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name_writer))) { + return rb_funcall(self, i_send, 2, name_writer, value); + } else { + rb_ivar_set(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)), value); + } + return Qnil; +} + +struct hash_foreach_arg { + FBuffer *buffer; + JSON_Generator_State *state; + VALUE Vstate; + int iter; +}; + +static int +json_object_i(VALUE key, VALUE val, VALUE _arg) +{ + struct hash_foreach_arg *arg = (struct hash_foreach_arg *)_arg; + FBuffer *buffer = arg->buffer; + JSON_Generator_State *state = arg->state; + VALUE Vstate = arg->Vstate; + + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + char *delim = FBUFFER_PTR(state->object_delim); + long delim_len = FBUFFER_LEN(state->object_delim); + char *delim2 = FBUFFER_PTR(state->object_delim2); + long delim2_len = FBUFFER_LEN(state->object_delim2); + long depth = state->depth; + int j; + VALUE klass, key_to_s; + + if (arg->iter > 0) fbuffer_append(buffer, delim, delim_len); + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + } + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + + klass = CLASS_OF(key); + if (klass == rb_cString) { + key_to_s = key; + } else if (klass == rb_cSymbol) { + key_to_s = rb_id2str(SYM2ID(key)); + } else { + key_to_s = rb_funcall(key, i_to_s, 0); + } + Check_Type(key_to_s, T_STRING); + generate_json(buffer, Vstate, state, key_to_s); + fbuffer_append(buffer, delim2, delim2_len); + generate_json(buffer, Vstate, state, val); + + arg->iter++; + return ST_CONTINUE; +} + +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + long depth = ++state->depth; + int j; + struct hash_foreach_arg arg; + + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '{'); + + arg.buffer = buffer; + arg.state = state; + arg.Vstate = Vstate; + arg.iter = 0; + rb_hash_foreach(obj, json_object_i, (VALUE)&arg); + + depth = --state->depth; + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, '}'); +} + +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *array_nl = state->array_nl; + long array_nl_len = state->array_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + char *delim = FBUFFER_PTR(state->array_delim); + long delim_len = FBUFFER_LEN(state->array_delim); + long depth = ++state->depth; + int i, j; + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '['); + if (array_nl) fbuffer_append(buffer, array_nl, array_nl_len); + for(i = 0; i < RARRAY_LEN(obj); i++) { + if (i > 0) fbuffer_append(buffer, delim, delim_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + generate_json(buffer, Vstate, state, rb_ary_entry(obj, i)); + } + state->depth = --depth; + if (array_nl) { + fbuffer_append(buffer, array_nl, array_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, ']'); +} + +#ifdef HAVE_RUBY_ENCODING_H +static int enc_utf8_compatible_p(rb_encoding *enc) +{ + if (enc == rb_usascii_encoding()) return 1; + if (enc == rb_utf8_encoding()) return 1; + return 0; +} +#endif + +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_char(buffer, '"'); +#ifdef HAVE_RUBY_ENCODING_H + if (!enc_utf8_compatible_p(rb_enc_get(obj))) { + obj = rb_str_encode(obj, CEncoding_UTF_8, 0, Qnil); + } +#endif + if (state->ascii_only) { + convert_UTF8_to_JSON_ASCII(buffer, obj); + } else { + convert_UTF8_to_JSON(buffer, obj); + } + fbuffer_append_char(buffer, '"'); +} + +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "null", 4); +} + +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "false", 5); +} + +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "true", 4); +} + +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_long(buffer, FIX2LONG(obj)); +} + +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp = rb_funcall(obj, i_to_s, 0); + fbuffer_append_str(buffer, tmp); +} + +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + if (FIXNUM_P(obj)) + generate_json_fixnum(buffer, Vstate, state, obj); + else + generate_json_bignum(buffer, Vstate, state, obj); +} +#endif +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + double value = RFLOAT_VALUE(obj); + char allow_nan = state->allow_nan; + VALUE tmp = rb_funcall(obj, i_to_s, 0); + if (!allow_nan) { + if (isinf(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } else if (isnan(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } + } + fbuffer_append_str(buffer, tmp); +} + +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp; + VALUE klass = CLASS_OF(obj); + if (klass == rb_cHash) { + generate_json_object(buffer, Vstate, state, obj); + } else if (klass == rb_cArray) { + generate_json_array(buffer, Vstate, state, obj); + } else if (klass == rb_cString) { + generate_json_string(buffer, Vstate, state, obj); + } else if (obj == Qnil) { + generate_json_null(buffer, Vstate, state, obj); + } else if (obj == Qfalse) { + generate_json_false(buffer, Vstate, state, obj); + } else if (obj == Qtrue) { + generate_json_true(buffer, Vstate, state, obj); + } else if (FIXNUM_P(obj)) { + generate_json_fixnum(buffer, Vstate, state, obj); + } else if (RB_TYPE_P(obj, T_BIGNUM)) { + generate_json_bignum(buffer, Vstate, state, obj); + } else if (klass == rb_cFloat) { + generate_json_float(buffer, Vstate, state, obj); + } else if (rb_respond_to(obj, i_to_json)) { + tmp = rb_funcall(obj, i_to_json, 1, Vstate); + Check_Type(tmp, T_STRING); + fbuffer_append_str(buffer, tmp); + } else { + tmp = rb_funcall(obj, i_to_s, 0); + Check_Type(tmp, T_STRING); + generate_json_string(buffer, Vstate, state, tmp); + } +} + +static FBuffer *cState_prepare_buffer(VALUE self) +{ + FBuffer *buffer; + GET_STATE(self); + buffer = fbuffer_alloc(state->buffer_initial_length); + + if (state->object_delim) { + fbuffer_clear(state->object_delim); + } else { + state->object_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->object_delim, ','); + if (state->object_delim2) { + fbuffer_clear(state->object_delim2); + } else { + state->object_delim2 = fbuffer_alloc(16); + } + if (state->space_before) fbuffer_append(state->object_delim2, state->space_before, state->space_before_len); + fbuffer_append_char(state->object_delim2, ':'); + if (state->space) fbuffer_append(state->object_delim2, state->space, state->space_len); + + if (state->array_delim) { + fbuffer_clear(state->array_delim); + } else { + state->array_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->array_delim, ','); + if (state->array_nl) fbuffer_append(state->array_delim, state->array_nl, state->array_nl_len); + return buffer; +} + +static VALUE cState_partial_generate(VALUE self, VALUE obj) +{ + FBuffer *buffer = cState_prepare_buffer(self); + GET_STATE(self); + generate_json(buffer, self, state, obj); + return fbuffer_to_s(buffer); +} + +/* + * call-seq: generate(obj) + * + * Generates a valid JSON document from object +obj+ and returns the + * result. If no valid JSON document can be created this method raises a + * GeneratorError exception. + */ +static VALUE cState_generate(VALUE self, VALUE obj) +{ + VALUE result = cState_partial_generate(self, obj); + GET_STATE(self); + (void)state; + return result; +} + +/* + * call-seq: new(opts = {}) + * + * Instantiates a new State object, configured by _opts_. + * + * _opts_ can have the following keys: + * + * * *indent*: a string used to indent levels (default: ''), + * * *space*: a string that is put after, a : or , delimiter (default: ''), + * * *space_before*: a string that is put before a : pair delimiter (default: ''), + * * *object_nl*: a string that is put at the end of a JSON object (default: ''), + * * *array_nl*: a string that is put at the end of a JSON array (default: ''), + * * *allow_nan*: true if NaN, Infinity, and -Infinity should be + * generated, otherwise an exception is thrown, if these values are + * encountered. This options defaults to false. + * * *ascii_only*: true if only ASCII characters should be generated. This + * option defaults to false. + * * *buffer_initial_length*: sets the initial length of the generator's + * internal buffer. + */ +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE opts; + GET_STATE(self); + state->max_nesting = 100; + state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + rb_scan_args(argc, argv, "01", &opts); + if (!NIL_P(opts)) cState_configure(self, opts); + return self; +} + +/* + * call-seq: initialize_copy(orig) + * + * Initializes this object from orig if it can be duplicated/cloned and returns + * it. +*/ +static VALUE cState_init_copy(VALUE obj, VALUE orig) +{ + JSON_Generator_State *objState, *origState; + + if (obj == orig) return obj; + GET_STATE_TO(obj, objState); + GET_STATE_TO(orig, origState); + if (!objState) rb_raise(rb_eArgError, "unallocated JSON::State"); + + MEMCPY(objState, origState, JSON_Generator_State, 1); + objState->indent = fstrndup(origState->indent, origState->indent_len); + objState->space = fstrndup(origState->space, origState->space_len); + objState->space_before = fstrndup(origState->space_before, origState->space_before_len); + objState->object_nl = fstrndup(origState->object_nl, origState->object_nl_len); + objState->array_nl = fstrndup(origState->array_nl, origState->array_nl_len); + if (origState->array_delim) objState->array_delim = fbuffer_dup(origState->array_delim); + if (origState->object_delim) objState->object_delim = fbuffer_dup(origState->object_delim); + if (origState->object_delim2) objState->object_delim2 = fbuffer_dup(origState->object_delim2); + return obj; +} + +/* + * call-seq: from_state(opts) + * + * Creates a State object from _opts_, which ought to be Hash to create a + * new State instance configured by _opts_, something else to create an + * unconfigured instance. If _opts_ is a State object, it is just returned. + */ +static VALUE cState_from_state_s(VALUE self, VALUE opts) +{ + if (rb_obj_is_kind_of(opts, self)) { + return opts; + } else if (rb_obj_is_kind_of(opts, rb_cHash)) { + return rb_funcall(self, i_new, 1, opts); + } else { + VALUE prototype = rb_const_get(mJSON, i_SAFE_STATE_PROTOTYPE); + return rb_funcall(prototype, i_dup, 0); + } +} + +/* + * call-seq: indent() + * + * Returns the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent(VALUE self) +{ + GET_STATE(self); + return state->indent ? rb_str_new(state->indent, state->indent_len) : rb_str_new2(""); +} + +/* + * call-seq: indent=(indent) + * + * Sets the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent_set(VALUE self, VALUE indent) +{ + unsigned long len; + GET_STATE(self); + Check_Type(indent, T_STRING); + len = RSTRING_LEN(indent); + if (len == 0) { + if (state->indent) { + ruby_xfree(state->indent); + state->indent = NULL; + state->indent_len = 0; + } + } else { + if (state->indent) ruby_xfree(state->indent); + state->indent = fstrndup(RSTRING_PTR(indent), len); + state->indent_len = len; + } + return Qnil; +} + +/* + * call-seq: space() + * + * Returns the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space(VALUE self) +{ + GET_STATE(self); + return state->space ? rb_str_new(state->space, state->space_len) : rb_str_new2(""); +} + +/* + * call-seq: space=(space) + * + * Sets _space_ to the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space_set(VALUE self, VALUE space) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space, T_STRING); + len = RSTRING_LEN(space); + if (len == 0) { + if (state->space) { + ruby_xfree(state->space); + state->space = NULL; + state->space_len = 0; + } + } else { + if (state->space) ruby_xfree(state->space); + state->space = fstrndup(RSTRING_PTR(space), len); + state->space_len = len; + } + return Qnil; +} + +/* + * call-seq: space_before() + * + * Returns the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before(VALUE self) +{ + GET_STATE(self); + return state->space_before ? rb_str_new(state->space_before, state->space_before_len) : rb_str_new2(""); +} + +/* + * call-seq: space_before=(space_before) + * + * Sets the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before_set(VALUE self, VALUE space_before) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space_before, T_STRING); + len = RSTRING_LEN(space_before); + if (len == 0) { + if (state->space_before) { + ruby_xfree(state->space_before); + state->space_before = NULL; + state->space_before_len = 0; + } + } else { + if (state->space_before) ruby_xfree(state->space_before); + state->space_before = fstrndup(RSTRING_PTR(space_before), len); + state->space_before_len = len; + } + return Qnil; +} + +/* + * call-seq: object_nl() + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl(VALUE self) +{ + GET_STATE(self); + return state->object_nl ? rb_str_new(state->object_nl, state->object_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: object_nl=(object_nl) + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(object_nl, T_STRING); + len = RSTRING_LEN(object_nl); + if (len == 0) { + if (state->object_nl) { + ruby_xfree(state->object_nl); + state->object_nl = NULL; + } + } else { + if (state->object_nl) ruby_xfree(state->object_nl); + state->object_nl = fstrndup(RSTRING_PTR(object_nl), len); + state->object_nl_len = len; + } + return Qnil; +} + +/* + * call-seq: array_nl() + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl(VALUE self) +{ + GET_STATE(self); + return state->array_nl ? rb_str_new(state->array_nl, state->array_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: array_nl=(array_nl) + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(array_nl, T_STRING); + len = RSTRING_LEN(array_nl); + if (len == 0) { + if (state->array_nl) { + ruby_xfree(state->array_nl); + state->array_nl = NULL; + } + } else { + if (state->array_nl) ruby_xfree(state->array_nl); + state->array_nl = fstrndup(RSTRING_PTR(array_nl), len); + state->array_nl_len = len; + } + return Qnil; +} + + +/* +* call-seq: check_circular? +* +* Returns true, if circular data structures should be checked, +* otherwise returns false. +*/ +static VALUE cState_check_circular_p(VALUE self) +{ + GET_STATE(self); + return state->max_nesting ? Qtrue : Qfalse; +} + +/* + * call-seq: max_nesting + * + * This integer returns the maximum level of data structure nesting in + * the generated JSON, max_nesting = 0 if no maximum is checked. + */ +static VALUE cState_max_nesting(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->max_nesting); +} + +/* + * call-seq: max_nesting=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_max_nesting_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + return state->max_nesting = FIX2LONG(depth); +} + +/* + * call-seq: allow_nan? + * + * Returns true, if NaN, Infinity, and -Infinity should be generated, otherwise + * returns false. + */ +static VALUE cState_allow_nan_p(VALUE self) +{ + GET_STATE(self); + return state->allow_nan ? Qtrue : Qfalse; +} + +/* + * call-seq: ascii_only? + * + * Returns true, if only ASCII characters should be generated. Otherwise + * returns false. + */ +static VALUE cState_ascii_only_p(VALUE self) +{ + GET_STATE(self); + return state->ascii_only ? Qtrue : Qfalse; +} + +/* + * call-seq: depth + * + * This integer returns the current depth of data structure nesting. + */ +static VALUE cState_depth(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->depth); +} + +/* + * call-seq: depth=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_depth_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + return Qnil; +} + +/* + * call-seq: buffer_initial_length + * + * This integer returns the current initial length of the buffer. + */ +static VALUE cState_buffer_initial_length(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->buffer_initial_length); +} + +/* + * call-seq: buffer_initial_length=(length) + * + * This sets the initial length of the buffer to +length+, if +length+ > 0, + * otherwise its value isn't changed. + */ +static VALUE cState_buffer_initial_length_set(VALUE self, VALUE buffer_initial_length) +{ + long initial_length; + GET_STATE(self); + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) { + state->buffer_initial_length = initial_length; + } + return Qnil; +} + +/* + * + */ +void Init_generator(void) +{ +#undef rb_intern + rb_require("json/common"); + + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + mGenerator = rb_define_module_under(mExt, "Generator"); + + eGeneratorError = rb_path2class("JSON::GeneratorError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eGeneratorError); + rb_gc_register_mark_object(eNestingError); + + cState = rb_define_class_under(mGenerator, "State", rb_cObject); + rb_define_alloc_func(cState, cState_s_allocate); + rb_define_singleton_method(cState, "from_state", cState_from_state_s, 1); + rb_define_method(cState, "initialize", cState_initialize, -1); + rb_define_method(cState, "initialize_copy", cState_init_copy, 1); + rb_define_method(cState, "indent", cState_indent, 0); + rb_define_method(cState, "indent=", cState_indent_set, 1); + rb_define_method(cState, "space", cState_space, 0); + rb_define_method(cState, "space=", cState_space_set, 1); + rb_define_method(cState, "space_before", cState_space_before, 0); + rb_define_method(cState, "space_before=", cState_space_before_set, 1); + rb_define_method(cState, "object_nl", cState_object_nl, 0); + rb_define_method(cState, "object_nl=", cState_object_nl_set, 1); + rb_define_method(cState, "array_nl", cState_array_nl, 0); + rb_define_method(cState, "array_nl=", cState_array_nl_set, 1); + rb_define_method(cState, "max_nesting", cState_max_nesting, 0); + rb_define_method(cState, "max_nesting=", cState_max_nesting_set, 1); + rb_define_method(cState, "check_circular?", cState_check_circular_p, 0); + rb_define_method(cState, "allow_nan?", cState_allow_nan_p, 0); + rb_define_method(cState, "ascii_only?", cState_ascii_only_p, 0); + rb_define_method(cState, "depth", cState_depth, 0); + rb_define_method(cState, "depth=", cState_depth_set, 1); + rb_define_method(cState, "buffer_initial_length", cState_buffer_initial_length, 0); + rb_define_method(cState, "buffer_initial_length=", cState_buffer_initial_length_set, 1); + rb_define_method(cState, "configure", cState_configure, 1); + rb_define_alias(cState, "merge", "configure"); + rb_define_method(cState, "to_h", cState_to_h, 0); + rb_define_alias(cState, "to_hash", "to_h"); + rb_define_method(cState, "[]", cState_aref, 1); + rb_define_method(cState, "[]=", cState_aset, 2); + rb_define_method(cState, "generate", cState_generate, 1); + + mGeneratorMethods = rb_define_module_under(mGenerator, "GeneratorMethods"); + mObject = rb_define_module_under(mGeneratorMethods, "Object"); + rb_define_method(mObject, "to_json", mObject_to_json, -1); + mHash = rb_define_module_under(mGeneratorMethods, "Hash"); + rb_define_method(mHash, "to_json", mHash_to_json, -1); + mArray = rb_define_module_under(mGeneratorMethods, "Array"); + rb_define_method(mArray, "to_json", mArray_to_json, -1); +#ifdef RUBY_INTEGER_UNIFICATION + mInteger = rb_define_module_under(mGeneratorMethods, "Integer"); + rb_define_method(mInteger, "to_json", mInteger_to_json, -1); +#else + mFixnum = rb_define_module_under(mGeneratorMethods, "Fixnum"); + rb_define_method(mFixnum, "to_json", mFixnum_to_json, -1); + mBignum = rb_define_module_under(mGeneratorMethods, "Bignum"); + rb_define_method(mBignum, "to_json", mBignum_to_json, -1); +#endif + mFloat = rb_define_module_under(mGeneratorMethods, "Float"); + rb_define_method(mFloat, "to_json", mFloat_to_json, -1); + mString = rb_define_module_under(mGeneratorMethods, "String"); + rb_define_singleton_method(mString, "included", mString_included_s, 1); + rb_define_method(mString, "to_json", mString_to_json, -1); + rb_define_method(mString, "to_json_raw", mString_to_json_raw, -1); + rb_define_method(mString, "to_json_raw_object", mString_to_json_raw_object, 0); + mString_Extend = rb_define_module_under(mString, "Extend"); + rb_define_method(mString_Extend, "json_create", mString_Extend_json_create, 1); + mTrueClass = rb_define_module_under(mGeneratorMethods, "TrueClass"); + rb_define_method(mTrueClass, "to_json", mTrueClass_to_json, -1); + mFalseClass = rb_define_module_under(mGeneratorMethods, "FalseClass"); + rb_define_method(mFalseClass, "to_json", mFalseClass_to_json, -1); + mNilClass = rb_define_module_under(mGeneratorMethods, "NilClass"); + rb_define_method(mNilClass, "to_json", mNilClass_to_json, -1); + + i_to_s = rb_intern("to_s"); + i_to_json = rb_intern("to_json"); + i_new = rb_intern("new"); + i_indent = rb_intern("indent"); + i_space = rb_intern("space"); + i_space_before = rb_intern("space_before"); + i_object_nl = rb_intern("object_nl"); + i_array_nl = rb_intern("array_nl"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_ascii_only = rb_intern("ascii_only"); + i_depth = rb_intern("depth"); + i_buffer_initial_length = rb_intern("buffer_initial_length"); + i_pack = rb_intern("pack"); + i_unpack = rb_intern("unpack"); + i_create_id = rb_intern("create_id"); + i_extend = rb_intern("extend"); + i_key_p = rb_intern("key?"); + i_aref = rb_intern("[]"); + i_send = rb_intern("__send__"); + i_respond_to_p = rb_intern("respond_to?"); + i_match = rb_intern("match"); + i_keys = rb_intern("keys"); + i_dup = rb_intern("dup"); +#ifdef HAVE_RUBY_ENCODING_H + CEncoding_UTF_8 = rb_funcall(rb_path2class("Encoding"), rb_intern("find"), 1, rb_str_new2("utf-8")); + i_encoding = rb_intern("encoding"); + i_encode = rb_intern("encode"); +#endif + i_SAFE_STATE_PROTOTYPE = rb_intern("SAFE_STATE_PROTOTYPE"); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.h b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.h new file mode 100644 index 0000000..c367a62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/generator/generator.h @@ -0,0 +1,171 @@ +#ifndef _GENERATOR_H_ +#define _GENERATOR_H_ + +#include +#include + +#include "ruby.h" + +#ifdef HAVE_RUBY_RE_H +#include "ruby/re.h" +#else +#include "re.h" +#endif + +#ifndef rb_intern_str +#define rb_intern_str(string) SYM2ID(rb_str_intern(string)) +#endif + +#ifndef rb_obj_instance_variables +#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0) +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode definitions */ + +#define UNI_STRICT_CONVERSION 1 + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length); +static void unicode_escape(char *buf, UTF16 character); +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character); +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string); +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string); +static char *fstrndup(const char *ptr, unsigned long len); + +/* ruby api and some helpers */ + +typedef struct JSON_Generator_StateStruct { + char *indent; + long indent_len; + char *space; + long space_len; + char *space_before; + long space_before_len; + char *object_nl; + long object_nl_len; + char *array_nl; + long array_nl_len; + FBuffer *array_delim; + FBuffer *object_delim; + FBuffer *object_delim2; + long max_nesting; + char allow_nan; + char ascii_only; + long depth; + long buffer_initial_length; +} JSON_Generator_State; + +#define GET_STATE_TO(self, state) \ + TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state) + +#define GET_STATE(self) \ + JSON_Generator_State *state; \ + GET_STATE_TO(self, state) + +#define GENERATE_JSON(type) \ + FBuffer *buffer; \ + VALUE Vstate; \ + JSON_Generator_State *state; \ + \ + rb_scan_args(argc, argv, "01", &Vstate); \ + Vstate = cState_from_state_s(cState, Vstate); \ + TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \ + buffer = cState_prepare_buffer(Vstate); \ + generate_json_##type(buffer, Vstate, state, self); \ + return fbuffer_to_s(buffer) + +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self); +#ifdef RUBY_INTEGER_UNIFICATION +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self); +#else +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self); +#endif +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_included_s(VALUE self, VALUE modul); +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_to_json_raw_object(VALUE self); +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self); +static VALUE mString_Extend_json_create(VALUE self, VALUE o); +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self); +static void State_free(void *state); +static VALUE cState_s_allocate(VALUE klass); +static VALUE cState_configure(VALUE self, VALUE opts); +static VALUE cState_to_h(VALUE self); +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#endif +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static VALUE cState_partial_generate(VALUE self, VALUE obj); +static VALUE cState_generate(VALUE self, VALUE obj); +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cState_from_state_s(VALUE self, VALUE opts); +static VALUE cState_indent(VALUE self); +static VALUE cState_indent_set(VALUE self, VALUE indent); +static VALUE cState_space(VALUE self); +static VALUE cState_space_set(VALUE self, VALUE space); +static VALUE cState_space_before(VALUE self); +static VALUE cState_space_before_set(VALUE self, VALUE space_before); +static VALUE cState_object_nl(VALUE self); +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl); +static VALUE cState_array_nl(VALUE self); +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl); +static VALUE cState_max_nesting(VALUE self); +static VALUE cState_max_nesting_set(VALUE self, VALUE depth); +static VALUE cState_allow_nan_p(VALUE self); +static VALUE cState_ascii_only_p(VALUE self); +static VALUE cState_depth(VALUE self); +static VALUE cState_depth_set(VALUE self, VALUE depth); +static FBuffer *cState_prepare_buffer(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Generator_State_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json) +#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/depend b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/depend new file mode 100644 index 0000000..498ffa9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/depend @@ -0,0 +1 @@ +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb new file mode 100644 index 0000000..f7360d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/extconf.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: false +require 'mkmf' + +have_func("rb_enc_raise", "ruby.h") + +create_makefile 'json/ext/parser' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.c b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.c new file mode 100644 index 0000000..6f0e4ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.c @@ -0,0 +1,2137 @@ +/* This file is automatically generated from parser.rl by using ragel */ +#line 1 "parser.rl" +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + + +#line 126 "parser.rl" + + + +#line 108 "parser.c" +enum {JSON_object_start = 1}; +enum {JSON_object_first_final = 27}; +enum {JSON_object_error = 0}; + +enum {JSON_object_en_main = 1}; + + +#line 168 "parser.rl" + + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + +#line 132 "parser.c" + { + cs = JSON_object_start; + } + +#line 183 "parser.rl" + +#line 139 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 123 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 47: goto st23; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st2; + goto st0; +tr2: +#line 150 "parser.rl" + { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, p, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { p--; {p++; cs = 3; goto _out;} } else {p = (( np))-1;} + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 180 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 47: goto st4; + case 58: goto st8; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st5; + case 47: goto st7; + } + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 42 ) + goto st6; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st3; + } + goto st5; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 10 ) + goto st3; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 13: goto st8; + case 32: goto st8; + case 34: goto tr11; + case 45: goto tr11; + case 47: goto st19; + case 73: goto tr11; + case 78: goto tr11; + case 91: goto tr11; + case 102: goto tr11; + case 110: goto tr11; + case 116: goto tr11; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr11; + } else if ( (*p) >= 9 ) + goto st8; + goto st0; +tr11: +#line 134 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 9; goto _out;} + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + {p = (( np))-1;} + } + } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 268 "parser.c" + switch( (*p) ) { + case 13: goto st9; + case 32: goto st9; + case 44: goto st10; + case 47: goto st15; + case 125: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st9; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 34: goto tr2; + case 47: goto st11; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st12; + case 47: goto st14; + } + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 42 ) + goto st13; + goto st12; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st13; + case 47: goto st10; + } + goto st12; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 10 ) + goto st10; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st16; + case 47: goto st18; + } + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 42 ) + goto st17; + goto st16; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + switch( (*p) ) { + case 42: goto st17; + case 47: goto st9; + } + goto st16; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 10 ) + goto st9; + goto st18; +tr4: +#line 158 "parser.rl" + { p--; {p++; cs = 27; goto _out;} } + goto st27; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: +#line 364 "parser.c" + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + switch( (*p) ) { + case 42: goto st20; + case 47: goto st22; + } + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 42 ) + goto st21; + goto st20; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + switch( (*p) ) { + case 42: goto st21; + case 47: goto st8; + } + goto st20; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 10 ) + goto st8; + goto st22; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + switch( (*p) ) { + case 42: goto st24; + case 47: goto st26; + } + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 42 ) + goto st25; + goto st24; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + switch( (*p) ) { + case 42: goto st25; + case 47: goto st2; + } + goto st24; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 10 ) + goto st2; + goto st26; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 184 "parser.rl" + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 487 "parser.c" +enum {JSON_value_start = 1}; +enum {JSON_value_first_final = 29}; +enum {JSON_value_error = 0}; + +enum {JSON_value_en_main = 1}; + + +#line 284 "parser.rl" + + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + +#line 503 "parser.c" + { + cs = JSON_value_start; + } + +#line 291 "parser.rl" + +#line 510 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr3; + case 47: goto st6; + case 73: goto st10; + case 78: goto st17; + case 91: goto tr7; + case 102: goto st19; + case 110: goto st23; + case 116: goto st26; + case 123: goto tr11; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr3; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 236 "parser.rl" + { + char *np = JSON_parse_string(json, p, pe, result); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr3: +#line 241 "parser.rl" + { + char *np; + if(pe > p + 8 && !strncmp(MinusInfinity, p, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + {p = (( p + 10))-1;} + p--; {p++; cs = 29; goto _out;} + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + np = JSON_parse_integer(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + p--; {p++; cs = 29; goto _out;} + } + goto st29; +tr7: +#line 259 "parser.rl" + { + char *np; + np = JSON_parse_array(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr11: +#line 265 "parser.rl" + { + char *np; + np = JSON_parse_object(json, p, pe, result, current_nesting + 1); + if (np == NULL) { p--; {p++; cs = 29; goto _out;} } else {p = (( np))-1;} + } + goto st29; +tr25: +#line 229 "parser.rl" + { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + goto st29; +tr27: +#line 222 "parser.rl" + { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + goto st29; +tr31: +#line 216 "parser.rl" + { + *result = Qfalse; + } + goto st29; +tr34: +#line 213 "parser.rl" + { + *result = Qnil; + } + goto st29; +tr37: +#line 219 "parser.rl" + { + *result = Qtrue; + } + goto st29; +st29: + if ( ++p == pe ) + goto _test_eof29; +case 29: +#line 271 "parser.rl" + { p--; {p++; cs = 29; goto _out;} } +#line 630 "parser.c" + switch( (*p) ) { + case 13: goto st29; + case 32: goto st29; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st29; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st29; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st29; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 110 ) + goto st11; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + if ( (*p) == 102 ) + goto st12; + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 105 ) + goto st13; + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + if ( (*p) == 110 ) + goto st14; + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 105 ) + goto st15; + goto st0; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + if ( (*p) == 116 ) + goto st16; + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 121 ) + goto tr25; + goto st0; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: + if ( (*p) == 97 ) + goto st18; + goto st0; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: + if ( (*p) == 78 ) + goto tr27; + goto st0; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: + if ( (*p) == 97 ) + goto st20; + goto st0; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: + if ( (*p) == 108 ) + goto st21; + goto st0; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: + if ( (*p) == 115 ) + goto st22; + goto st0; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: + if ( (*p) == 101 ) + goto tr31; + goto st0; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: + if ( (*p) == 117 ) + goto st24; + goto st0; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: + if ( (*p) == 108 ) + goto st25; + goto st0; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: + if ( (*p) == 108 ) + goto tr34; + goto st0; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: + if ( (*p) == 114 ) + goto st27; + goto st0; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: + if ( (*p) == 117 ) + goto st28; + goto st0; +st28: + if ( ++p == pe ) + goto _test_eof28; +case 28: + if ( (*p) == 101 ) + goto tr37; + goto st0; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 292 "parser.rl" + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + + +#line 881 "parser.c" +enum {JSON_integer_start = 1}; +enum {JSON_integer_first_final = 3}; +enum {JSON_integer_error = 0}; + +enum {JSON_integer_en_main = 1}; + + +#line 308 "parser.rl" + + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 897 "parser.c" + { + cs = JSON_integer_start; + } + +#line 315 "parser.rl" + json->memo = p; + +#line 905 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st5; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st0; + goto tr4; +tr4: +#line 305 "parser.rl" + { p--; {p++; cs = 4; goto _out;} } + goto st4; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: +#line 946 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + goto tr4; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 317 "parser.rl" + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + + +#line 980 "parser.c" +enum {JSON_float_start = 1}; +enum {JSON_float_first_final = 8}; +enum {JSON_float_error = 0}; + +enum {JSON_float_en_main = 1}; + + +#line 342 "parser.rl" + + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + +#line 1009 "parser.c" + { + cs = JSON_float_start; + } + +#line 362 "parser.rl" + json->memo = p; + +#line 1017 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 45: goto st2; + case 48: goto st3; + } + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + if ( (*p) == 48 ) + goto st3; + if ( 49 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + goto st0; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 69: goto st5; + case 101: goto st5; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st8; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +tr9: +#line 336 "parser.rl" + { p--; {p++; cs = 9; goto _out;} } + goto st9; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: +#line 1082 "parser.c" + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 43: goto st6; + case 45: goto st6; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + switch( (*p) ) { + case 69: goto st0; + case 101: goto st0; + } + if ( (*p) > 46 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st10; + } else if ( (*p) >= 45 ) + goto st0; + goto tr9; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 46: goto st4; + case 69: goto st5; + case 101: goto st5; + } + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 364 "parser.rl" + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + + +#line 1169 "parser.c" +enum {JSON_array_start = 1}; +enum {JSON_array_first_final = 17}; +enum {JSON_array_error = 0}; + +enum {JSON_array_en_main = 1}; + + +#line 417 "parser.rl" + + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + +#line 1191 "parser.c" + { + cs = JSON_array_start; + } + +#line 430 "parser.rl" + +#line 1198 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 91 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 13: goto st2; + case 32: goto st2; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st13; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 93: goto tr4; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st2; + goto st0; +tr2: +#line 394 "parser.rl" + { + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + p--; {p++; cs = 3; goto _out;} + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + {p = (( np))-1;} + } + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 1257 "parser.c" + switch( (*p) ) { + case 13: goto st3; + case 32: goto st3; + case 44: goto st4; + case 47: goto st9; + case 93: goto tr4; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st3; + goto st0; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 13: goto st4; + case 32: goto st4; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st5; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st4; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + switch( (*p) ) { + case 42: goto st6; + case 47: goto st8; + } + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) == 42 ) + goto st7; + goto st6; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st4; + } + goto st6; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + if ( (*p) == 10 ) + goto st4; + goto st8; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + switch( (*p) ) { + case 42: goto st10; + case 47: goto st12; + } + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( (*p) == 42 ) + goto st11; + goto st10; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + switch( (*p) ) { + case 42: goto st11; + case 47: goto st3; + } + goto st10; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( (*p) == 10 ) + goto st3; + goto st12; +tr4: +#line 409 "parser.rl" + { p--; {p++; cs = 17; goto _out;} } + goto st17; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: +#line 1364 "parser.c" + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + switch( (*p) ) { + case 42: goto st14; + case 47: goto st16; + } + goto st0; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: + if ( (*p) == 42 ) + goto st15; + goto st14; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 42: goto st15; + case 47: goto st2; + } + goto st14; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 10 ) + goto st2; + goto st16; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 431 "parser.rl" + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + + +#line 1509 "parser.c" +enum {JSON_string_start = 1}; +enum {JSON_string_first_final = 8}; +enum {JSON_string_error = 0}; + +enum {JSON_string_en_main = 1}; + + +#line 538 "parser.rl" + + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + +#line 1539 "parser.c" + { + cs = JSON_string_start; + } + +#line 559 "parser.rl" + json->memo = p; + +#line 1547 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + if ( (*p) == 34 ) + goto st2; + goto st0; +st0: +cs = 0; + goto _out; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 34: goto tr2; + case 92: goto st3; + } + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +tr2: +#line 524 "parser.rl" + { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + p--; + {p++; cs = 8; goto _out;} + } else { + FORCE_UTF8(*result); + {p = (( p + 1))-1;} + } + } +#line 535 "parser.rl" + { p--; {p++; cs = 8; goto _out;} } + goto st8; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: +#line 1590 "parser.c" + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 117 ) + goto st4; + if ( 0 <= (signed char)(*p) && (*p) <= 31 ) + goto st0; + goto st2; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st5; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st5; + } else + goto st5; + goto st0; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st6; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st6; + } else + goto st6; + goto st0; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st7; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st7; + } else + goto st7; + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto st2; + } else if ( (*p) > 70 ) { + if ( 97 <= (*p) && (*p) <= 102 ) + goto st2; + } else + goto st2; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 561 "parser.rl" + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + + +#line 1853 "parser.c" +enum {JSON_start = 1}; +enum {JSON_first_final = 10}; +enum {JSON_error = 0}; + +enum {JSON_en_main = 1}; + + +#line 761 "parser.rl" + + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + +#line 1878 "parser.c" + { + cs = JSON_start; + } + +#line 777 "parser.rl" + p = json->source; + pe = p + json->len; + +#line 1887 "parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +st1: + if ( ++p == pe ) + goto _test_eof1; +case 1: + switch( (*p) ) { + case 13: goto st1; + case 32: goto st1; + case 34: goto tr2; + case 45: goto tr2; + case 47: goto st6; + case 73: goto tr2; + case 78: goto tr2; + case 91: goto tr2; + case 102: goto tr2; + case 110: goto tr2; + case 116: goto tr2; + case 123: goto tr2; + } + if ( (*p) > 10 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr2; + } else if ( (*p) >= 9 ) + goto st1; + goto st0; +st0: +cs = 0; + goto _out; +tr2: +#line 753 "parser.rl" + { + char *np = JSON_parse_value(json, p, pe, &result, 0); + if (np == NULL) { p--; {p++; cs = 10; goto _out;} } else {p = (( np))-1;} + } + goto st10; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: +#line 1931 "parser.c" + switch( (*p) ) { + case 13: goto st10; + case 32: goto st10; + case 47: goto st2; + } + if ( 9 <= (*p) && (*p) <= 10 ) + goto st10; + goto st0; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: + switch( (*p) ) { + case 42: goto st3; + case 47: goto st5; + } + goto st0; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: + if ( (*p) == 42 ) + goto st4; + goto st3; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: + switch( (*p) ) { + case 42: goto st4; + case 47: goto st10; + } + goto st3; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: + if ( (*p) == 10 ) + goto st10; + goto st5; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: + switch( (*p) ) { + case 42: goto st7; + case 47: goto st9; + } + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 42 ) + goto st8; + goto st7; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + switch( (*p) ) { + case 42: goto st8; + case 47: goto st1; + } + goto st7; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 10 ) + goto st1; + goto st9; + } + _test_eof1: cs = 1; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 780 "parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.h b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.h new file mode 100644 index 0000000..e6cf779 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.h @@ -0,0 +1,91 @@ +#ifndef _PARSER_H_ +#define _PARSER_H_ + +#include "ruby.h" + +#ifndef HAVE_RUBY_RE_H +#include "re.h" +#endif + +#ifdef HAVE_RUBY_ST_H +#include "ruby/st.h" +#else +#include "st.h" +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode */ + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +typedef struct JSON_ParserStruct { + VALUE Vsource; + char *source; + long len; + char *memo; + VALUE create_id; + int max_nesting; + int allow_nan; + int parsing_name; + int symbolize_names; + VALUE object_class; + VALUE array_class; + VALUE decimal_class; + int create_additions; + VALUE match_string; + FBuffer *fbuffer; +} JSON_Parser; + +#define GET_PARSER \ + GET_PARSER_INIT; \ + if (!json->Vsource) rb_raise(rb_eTypeError, "uninitialized instance") +#define GET_PARSER_INIT \ + JSON_Parser *json; \ + TypedData_Get_Struct(self, JSON_Parser, &JSON_Parser_type, json) + +#define MinusInfinity "-Infinity" +#define EVIL 0x666 + +static UTF32 unescape_unicode(const unsigned char *p); +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch); +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd); +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result); +static VALUE convert_encoding(VALUE source); +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cParser_parse(VALUE self); +static void JSON_mark(void *json); +static void JSON_free(void *json); +static VALUE cJSON_parser_s_allocate(VALUE klass); +static VALUE cParser_source(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Parser_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, JSON_free, json) +#define TypedData_Get_Struct(self, JSON_Parser, ignore, json) Data_Get_Struct(self, JSON_Parser, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl new file mode 100644 index 0000000..00a35bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/ext/parser/parser.rl @@ -0,0 +1,897 @@ +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; +static VALUE cBigDecimal = Qundef; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_BigDecimal; + +%%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft\"\-\[\{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; +}%% + +%%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + fexec np; + } + } + + action parse_name { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, fpc, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + + pair = ignore* begin_name >parse_name ignore* name_separator ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object + (pair (next_pair)*)? ignore* + end_object + ) @exit; +}%% + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + %% write init; + %% write exec; + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + *result = Qnil; + } + action parse_false { + *result = Qfalse; + } + action parse_true { + *result = Qtrue; + } + action parse_nan { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + action parse_infinity { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + action parse_string { + char *np = JSON_parse_string(json, fpc, pe, result); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_number { + char *np; + if(pe > fpc + 8 && !strncmp(MinusInfinity, fpc, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + fexec p + 10; + fhold; fbreak; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, fpc, pe, result); + if (np != NULL) fexec np; + np = JSON_parse_integer(json, fpc, pe, result); + if (np != NULL) fexec np; + fhold; fbreak; + } + + action parse_array { + char *np; + np = JSON_parse_array(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_object { + char *np; + np = JSON_parse_object(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + +main := ignore* ( + Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) ignore* %*exit; +}%% + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + +%%{ + machine JSON_integer; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ('0' | [1-9][0-9]*) (^[0-9]? @exit); +}%% + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + +%%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ( + (('0' | [1-9][0-9]*) '.' [0-9]+ ([Ee] [+\-]?[0-9]+)?) + | (('0' | [1-9][0-9]*) ([Ee] [+\-]?[0-9]+)) + ) (^[0-9Ee.\-]? @exit ); +}%% + +static int is_bigdecimal_class(VALUE obj) +{ + if (cBigDecimal == Qundef) { + if (rb_const_defined(rb_cObject, i_BigDecimal)) { + cBigDecimal = rb_const_get_at(rb_cObject, i_BigDecimal); + } + else { + return 0; + } + } + return obj == cBigDecimal; +} + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_float_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + if (NIL_P(json->decimal_class)) { + *result = rb_float_new(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } else { + VALUE text; + text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + if (is_bigdecimal_class(json->decimal_class)) { + *result = rb_funcall(Qnil, i_BigDecimal, 1, text); + } else { + *result = rb_funcall(json->decimal_class, i_new, 1, text); + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + fexec np; + } + } + + action exit { fhold; fbreak; } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array ignore* + ((begin_value >parse_value ignore*) + (ignore* next_element ignore*)*)? + end_array @exit; +}%% + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + %% write init; + %% write exec; + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static VALUE json_string_unescape(VALUE result, char *string, char *stringEnd) +{ + char *p = string, *pe = string, *unescape; + int unescape_len; + char buf[4]; + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) rb_str_buf_cat(result, p, pe - p); + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + rb_str_buf_cat(result, unescape, unescape_len); + p = ++pe; + } else { + pe++; + } + } + rb_str_buf_cat(result, p, pe - p); + return result; +} + +%%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + *result = json_string_unescape(*result, json->memo + 1, p); + if (NIL_P(*result)) { + fhold; + fbreak; + } else { + FORCE_UTF8(*result); + fexec p + 1; + } + } + + action exit { fhold; fbreak; } + + main := '"' ((^([\"\\] | 0..0x1f) | '\\'[\"\\/bfnrt] | '\\u'[0-9a-fA-F]{4} | '\\'^([\"\\/bfnrtu]|0..0x1f))* %parse_string) '"' @exit; +}%% + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + *result = rb_str_buf_new(0); + %% write init; + json->memo = p; + %% write exec; + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (json->symbolize_names && json->parsing_name) { + *result = rb_str_intern(*result); + } else if (RB_TYPE_P(*result, T_STRING)) { + rb_str_resize(*result, RSTRING_LEN(*result)); + } + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + +%%{ + machine JSON; + + write data; + + include JSON_common; + + action parse_value { + char *np = JSON_parse_value(json, fpc, pe, &result, 0); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + main := ignore* ( + begin_value >parse_value + ) ignore*; +}%% + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + %% write init; + p = json->source; + pe = p + json->len; + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_BigDecimal = rb_intern("BigDecimal"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/extconf.rb new file mode 100644 index 0000000..7595d58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/ext/json/extconf.rb @@ -0,0 +1,2 @@ +require 'mkmf' +create_makefile('json') diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/install.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/install.rb new file mode 100755 index 0000000..9d3ae12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/install.rb @@ -0,0 +1,23 @@ +#!/usr/bin/env ruby + +require 'fileutils' +include FileUtils::Verbose +require 'rbconfig' +include\ + begin + RbConfig + rescue NameError + Config + end + +sitelibdir = CONFIG["sitelibdir"] +cd 'lib' do + install('json.rb', sitelibdir) + mkdir_p File.join(sitelibdir, 'json') + for file in Dir['json/**/*}'] + d = File.join(sitelibdir, file) + mkdir_p File.dirname(d) + install(file, d) + end +end +warn " *** Installed PURE ruby library." diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java new file mode 100644 index 0000000..6f6ab66 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ByteListTranscoder.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A class specialized in transcoding a certain String format into another, + * using UTF-8 ByteLists as both input and output. + */ +abstract class ByteListTranscoder { + protected final ThreadContext context; + + protected ByteList src; + protected int srcEnd; + /** Position where the last read character started */ + protected int charStart; + /** Position of the next character to read */ + protected int pos; + + private ByteList out; + /** + * When a character that can be copied straight into the output is found, + * its index is stored on this variable, and copying is delayed until + * the sequence of characters that can be copied ends. + * + *

The variable stores -1 when not in a plain sequence. + */ + private int quoteStart = -1; + + protected ByteListTranscoder(ThreadContext context) { + this.context = context; + } + + protected void init(ByteList src, ByteList out) { + this.init(src, 0, src.length(), out); + } + + protected void init(ByteList src, int start, int end, ByteList out) { + this.src = src; + this.pos = start; + this.charStart = start; + this.srcEnd = end; + this.out = out; + } + + /** + * Returns whether there are any characters left to be read. + */ + protected boolean hasNext() { + return pos < srcEnd; + } + + /** + * Returns the next character in the buffer. + */ + private char next() { + return src.charAt(pos++); + } + + /** + * Reads an UTF-8 character from the input and returns its code point, + * while advancing the input position. + * + *

Raises an {@link #invalidUtf8()} exception if an invalid byte + * is found. + */ + protected int readUtf8Char() { + charStart = pos; + char head = next(); + if (head <= 0x7f) { // 0b0xxxxxxx (ASCII) + return head; + } + if (head <= 0xbf) { // 0b10xxxxxx + throw invalidUtf8(); // tail byte with no head + } + if (head <= 0xdf) { // 0b110xxxxx + ensureMin(1); + int cp = ((head & 0x1f) << 6) + | nextPart(); + if (cp < 0x0080) throw invalidUtf8(); + return cp; + } + if (head <= 0xef) { // 0b1110xxxx + ensureMin(2); + int cp = ((head & 0x0f) << 12) + | (nextPart() << 6) + | nextPart(); + if (cp < 0x0800) throw invalidUtf8(); + return cp; + } + if (head <= 0xf7) { // 0b11110xxx + ensureMin(3); + int cp = ((head & 0x07) << 18) + | (nextPart() << 12) + | (nextPart() << 6) + | nextPart(); + if (!Character.isValidCodePoint(cp)) throw invalidUtf8(); + return cp; + } + // 0b11111xxx? + throw invalidUtf8(); + } + + /** + * Throws a GeneratorError if the input list doesn't have at least this + * many bytes left. + */ + protected void ensureMin(int n) { + if (pos + n > srcEnd) throw incompleteUtf8(); + } + + /** + * Reads the next byte of a multi-byte UTF-8 character and returns its + * contents (lower 6 bits). + * + *

Throws a GeneratorError if the byte is not a valid tail. + */ + private int nextPart() { + char c = next(); + // tail bytes must be 0b10xxxxxx + if ((c & 0xc0) != 0x80) throw invalidUtf8(); + return c & 0x3f; + } + + + protected void quoteStart() { + if (quoteStart == -1) quoteStart = charStart; + } + + /** + * When in a sequence of characters that can be copied directly, + * interrupts the sequence and copies it to the output buffer. + * + * @param endPos The offset until which the direct character quoting should + * occur. You may pass {@link #pos} to quote until the most + * recently read character, or {@link #charStart} to quote + * until the character before it. + */ + protected void quoteStop(int endPos) { + if (quoteStart != -1) { + out.append(src, quoteStart, endPos - quoteStart); + quoteStart = -1; + } + } + + protected void append(int b) { + out.append(b); + } + + protected void append(byte[] origin, int start, int length) { + out.append(origin, start, length); + } + + + protected abstract RaiseException invalidUtf8(); + + protected RaiseException incompleteUtf8() { + return invalidUtf8(); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Generator.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Generator.java new file mode 100644 index 0000000..24bf049 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Generator.java @@ -0,0 +1,466 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBasicObject; +import org.jruby.RubyBignum; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ClassIndex; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +public final class Generator { + private Generator() { + throw new RuntimeException(); + } + + /** + * Encodes the given object as a JSON string, using the given handler. + */ + static RubyString + generateJson(ThreadContext context, T object, + Handler handler, IRubyObject[] args) { + Session session = new Session(context, args.length > 0 ? args[0] + : null); + return session.infect(handler.generateNew(session, object)); + } + + /** + * Encodes the given object as a JSON string, detecting the appropriate handler + * for the given object. + */ + static RubyString + generateJson(ThreadContext context, T object, IRubyObject[] args) { + Handler handler = getHandlerFor(context.getRuntime(), object); + return generateJson(context, object, handler, args); + } + + /** + * Encodes the given object as a JSON string, using the appropriate + * handler if one is found or calling #to_json if not. + */ + public static RubyString + generateJson(ThreadContext context, T object, + GeneratorState config) { + Session session = new Session(context, config); + Handler handler = getHandlerFor(context.getRuntime(), object); + return handler.generateNew(session, object); + } + + // NOTE: drop this once Ruby 1.9.3 support is gone! + private static final int FIXNUM = 1; + private static final int BIGNUM = 2; + private static final int ARRAY = 3; + private static final int STRING = 4; + private static final int NIL = 5; + private static final int TRUE = 6; + private static final int FALSE = 7; + private static final int HASH = 10; + private static final int FLOAT = 11; + // hard-coded due JRuby 1.7 compatibility + // https://github.com/jruby/jruby/blob/1.7.27/core/src/main/java/org/jruby/runtime/ClassIndex.java + + /** + * Returns the best serialization handler for the given object. + */ + // Java's generics can't handle this satisfactorily, so I'll just leave + // the best I could get and ignore the warnings + @SuppressWarnings("unchecked") + private static + Handler getHandlerFor(Ruby runtime, T object) { + switch (((RubyBasicObject) object).getNativeTypeIndex()) { + // can not use getNativeClassIndex due 1.7 compatibility + case NIL : return (Handler) NIL_HANDLER; + case TRUE : return (Handler) TRUE_HANDLER; + case FALSE : return (Handler) FALSE_HANDLER; + case FLOAT : return (Handler) FLOAT_HANDLER; + case FIXNUM : return (Handler) FIXNUM_HANDLER; + case BIGNUM : return (Handler) BIGNUM_HANDLER; + case STRING : + if (((RubyBasicObject) object).getMetaClass() != runtime.getString()) break; + return (Handler) STRING_HANDLER; + case ARRAY : + if (((RubyBasicObject) object).getMetaClass() != runtime.getArray()) break; + return (Handler) ARRAY_HANDLER; + case HASH : + if (((RubyBasicObject) object).getMetaClass() != runtime.getHash()) break; + return (Handler) HASH_HANDLER; + } + return GENERIC_HANDLER; + } + + + /* Generator context */ + + /** + * A class that concentrates all the information that is shared by + * generators working on a single session. + * + *

A session is defined as the process of serializing a single root + * object; any handler directly called by container handlers (arrays and + * hashes/objects) shares this object with its caller. + * + *

Note that anything called indirectly (via {@link GENERIC_HANDLER}) + * won't be part of the session. + */ + static class Session { + private final ThreadContext context; + private GeneratorState state; + private IRubyObject possibleState; + private RuntimeInfo info; + private StringEncoder stringEncoder; + + private boolean tainted = false; + private boolean untrusted = false; + + Session(ThreadContext context, GeneratorState state) { + this.context = context; + this.state = state; + } + + Session(ThreadContext context, IRubyObject possibleState) { + this.context = context; + this.possibleState = possibleState == null || possibleState.isNil() + ? null : possibleState; + } + + public ThreadContext getContext() { + return context; + } + + public Ruby getRuntime() { + return context.getRuntime(); + } + + public GeneratorState getState() { + if (state == null) { + state = GeneratorState.fromState(context, getInfo(), possibleState); + } + return state; + } + + public RuntimeInfo getInfo() { + if (info == null) info = RuntimeInfo.forRuntime(getRuntime()); + return info; + } + + public StringEncoder getStringEncoder() { + if (stringEncoder == null) { + stringEncoder = new StringEncoder(context, getState().asciiOnly()); + } + return stringEncoder; + } + + public void infectBy(IRubyObject object) { + if (object.isTaint()) tainted = true; + if (object.isUntrusted()) untrusted = true; + } + + public T infect(T object) { + if (tainted) object.setTaint(true); + if (untrusted) object.setUntrusted(true); + return object; + } + } + + + /* Handler base classes */ + + private static abstract class Handler { + /** + * Returns an estimative of how much space the serialization of the + * given object will take. Used for allocating enough buffer space + * before invoking other methods. + */ + int guessSize(Session session, T object) { + return 4; + } + + RubyString generateNew(Session session, T object) { + RubyString result; + ByteList buffer = new ByteList(guessSize(session, object)); + generate(session, object, buffer); + result = RubyString.newString(session.getRuntime(), buffer); + ThreadContext context = session.getContext(); + RuntimeInfo info = session.getInfo(); + result.force_encoding(context, info.utf8.get()); + return result; + } + + abstract void generate(Session session, T object, ByteList buffer); + } + + /** + * A handler that returns a fixed keyword regardless of the passed object. + */ + private static class KeywordHandler + extends Handler { + private final ByteList keyword; + + private KeywordHandler(String keyword) { + this.keyword = new ByteList(ByteList.plain(keyword), false); + } + + @Override + int guessSize(Session session, T object) { + return keyword.length(); + } + + @Override + RubyString generateNew(Session session, T object) { + return RubyString.newStringShared(session.getRuntime(), keyword); + } + + @Override + void generate(Session session, T object, ByteList buffer) { + buffer.append(keyword); + } + } + + + /* Handlers */ + + static final Handler BIGNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyBignum object, ByteList buffer) { + // JRUBY-4751: RubyBignum.to_s() returns generic object + // representation (fixed in 1.5, but we maintain backwards + // compatibility; call to_s(IRubyObject[]) then + buffer.append(((RubyString)object.to_s(IRubyObject.NULL_ARRAY)).getByteList()); + } + }; + + static final Handler FIXNUM_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFixnum object, ByteList buffer) { + buffer.append(object.to_s().getByteList()); + } + }; + + static final Handler FLOAT_HANDLER = + new Handler() { + @Override + void generate(Session session, RubyFloat object, ByteList buffer) { + double value = RubyFloat.num2dbl(object); + + if (Double.isInfinite(value) || Double.isNaN(value)) { + if (!session.getState().allowNaN()) { + throw Utils.newException(session.getContext(), + Utils.M_GENERATOR_ERROR, + object + " not allowed in JSON"); + } + } + buffer.append(((RubyString)object.to_s()).getByteList()); + } + }; + + static final Handler ARRAY_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyArray object) { + GeneratorState state = session.getState(); + int depth = state.getDepth(); + int perItem = + 4 // prealloc + + (depth + 1) * state.getIndent().length() // indent + + 1 + state.getArrayNl().length(); // ',' arrayNl + return 2 + object.size() * perItem; + } + + @Override + void generate(Session session, RubyArray object, ByteList buffer) { + ThreadContext context = session.getContext(); + Ruby runtime = context.getRuntime(); + GeneratorState state = session.getState(); + int depth = state.increaseDepth(); + + ByteList indentUnit = state.getIndent(); + byte[] shift = Utils.repeat(indentUnit, depth); + + ByteList arrayNl = state.getArrayNl(); + byte[] delim = new byte[1 + arrayNl.length()]; + delim[0] = ','; + System.arraycopy(arrayNl.unsafeBytes(), arrayNl.begin(), delim, 1, + arrayNl.length()); + + session.infectBy(object); + + buffer.append((byte)'['); + buffer.append(arrayNl); + boolean firstItem = true; + for (int i = 0, t = object.getLength(); i < t; i++) { + IRubyObject element = object.eltInternal(i); + session.infectBy(element); + if (firstItem) { + firstItem = false; + } else { + buffer.append(delim); + } + buffer.append(shift); + Handler handler = getHandlerFor(runtime, element); + handler.generate(session, element, buffer); + } + + state.decreaseDepth(); + if (arrayNl.length() != 0) { + buffer.append(arrayNl); + buffer.append(shift, 0, state.getDepth() * indentUnit.length()); + } + + buffer.append((byte)']'); + } + }; + + static final Handler HASH_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyHash object) { + GeneratorState state = session.getState(); + int perItem = + 12 // key, colon, comma + + (state.getDepth() + 1) * state.getIndent().length() + + state.getSpaceBefore().length() + + state.getSpace().length(); + return 2 + object.size() * perItem; + } + + @Override + void generate(final Session session, RubyHash object, + final ByteList buffer) { + ThreadContext context = session.getContext(); + final Ruby runtime = context.getRuntime(); + final GeneratorState state = session.getState(); + final int depth = state.increaseDepth(); + + final ByteList objectNl = state.getObjectNl(); + final byte[] indent = Utils.repeat(state.getIndent(), depth); + final ByteList spaceBefore = state.getSpaceBefore(); + final ByteList space = state.getSpace(); + + buffer.append((byte)'{'); + buffer.append(objectNl); + object.visitAll(new RubyHash.Visitor() { + private boolean firstPair = true; + + @Override + public void visit(IRubyObject key, IRubyObject value) { + if (firstPair) { + firstPair = false; + } else { + buffer.append((byte)','); + buffer.append(objectNl); + } + if (objectNl.length() != 0) buffer.append(indent); + + STRING_HANDLER.generate(session, key.asString(), buffer); + session.infectBy(key); + + buffer.append(spaceBefore); + buffer.append((byte)':'); + buffer.append(space); + + Handler valueHandler = getHandlerFor(runtime, value); + valueHandler.generate(session, value, buffer); + session.infectBy(value); + } + }); + state.decreaseDepth(); + if (objectNl.length() != 0) { + buffer.append(objectNl); + buffer.append(Utils.repeat(state.getIndent(), state.getDepth())); + } + buffer.append((byte)'}'); + } + }; + + static final Handler STRING_HANDLER = + new Handler() { + @Override + int guessSize(Session session, RubyString object) { + // for most applications, most strings will be just a set of + // printable ASCII characters without any escaping, so let's + // just allocate enough space for that + the quotes + return 2 + object.getByteList().length(); + } + + @Override + void generate(Session session, RubyString object, ByteList buffer) { + RuntimeInfo info = session.getInfo(); + RubyString src; + + if (object.encoding(session.getContext()) != info.utf8.get()) { + src = (RubyString)object.encode(session.getContext(), + info.utf8.get()); + } else { + src = object; + } + + session.getStringEncoder().encode(src.getByteList(), buffer); + } + }; + + static final Handler TRUE_HANDLER = + new KeywordHandler("true"); + static final Handler FALSE_HANDLER = + new KeywordHandler("false"); + static final Handler NIL_HANDLER = + new KeywordHandler("null"); + + /** + * The default handler (Object#to_json): coerces the object + * to string using #to_s, and serializes that string. + */ + static final Handler OBJECT_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + RubyString str = object.asString(); + return STRING_HANDLER.generateNew(session, str); + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString str = object.asString(); + STRING_HANDLER.generate(session, str, buffer); + } + }; + + /** + * A handler that simply calls #to_json(state) on the + * given object. + */ + static final Handler GENERIC_HANDLER = + new Handler() { + @Override + RubyString generateNew(Session session, IRubyObject object) { + if (object.respondsTo("to_json")) { + IRubyObject result = object.callMethod(session.getContext(), "to_json", + new IRubyObject[] {session.getState()}); + if (result instanceof RubyString) return (RubyString)result; + throw session.getRuntime().newTypeError("to_json must return a String"); + } else { + return OBJECT_HANDLER.generateNew(session, object); + } + } + + @Override + void generate(Session session, IRubyObject object, ByteList buffer) { + RubyString result = generateNew(session, object); + buffer.append(result.getByteList()); + } + }; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java new file mode 100644 index 0000000..bde7a18 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorMethods.java @@ -0,0 +1,231 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyBoolean; +import org.jruby.RubyFixnum; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * A class that populates the + * Json::Ext::Generator::GeneratorMethods module. + * + * @author mernen + */ +class GeneratorMethods { + /** + * Populates the given module with all modules and their methods + * @param info + * @param generatorMethodsModule The module to populate + * (normally JSON::Generator::GeneratorMethods) + */ + static void populate(RuntimeInfo info, RubyModule module) { + defineMethods(module, "Array", RbArray.class); + defineMethods(module, "FalseClass", RbFalse.class); + defineMethods(module, "Float", RbFloat.class); + defineMethods(module, "Hash", RbHash.class); + defineMethods(module, "Integer", RbInteger.class); + defineMethods(module, "NilClass", RbNil.class); + defineMethods(module, "Object", RbObject.class); + defineMethods(module, "String", RbString.class); + defineMethods(module, "TrueClass", RbTrue.class); + + info.stringExtendModule = new WeakReference(module.defineModuleUnder("String") + .defineModuleUnder("Extend")); + info.stringExtendModule.get().defineAnnotatedMethods(StringExtend.class); + } + + /** + * Convenience method for defining methods on a submodule. + * @param parentModule + * @param submoduleName + * @param klass + */ + private static void defineMethods(RubyModule parentModule, + String submoduleName, Class klass) { + RubyModule submodule = parentModule.defineModuleUnder(submoduleName); + submodule.defineAnnotatedMethods(klass); + } + + + public static class RbHash { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyHash)vSelf, + Generator.HASH_HANDLER, args); + } + } + + public static class RbArray { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyArray)vSelf, + Generator.ARRAY_HANDLER, args); + } + } + + public static class RbInteger { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, args); + } + } + + public static class RbFloat { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyFloat)vSelf, + Generator.FLOAT_HANDLER, args); + } + } + + public static class RbString { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyString)vSelf, + Generator.STRING_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw(*) + * + *

This method creates a JSON text from the result of a call to + * {@link #to_json_raw_object} of this String. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + RubyHash obj = toJsonRawObject(context, Utils.ensureString(vSelf)); + return Generator.generateJson(context, obj, + Generator.HASH_HANDLER, args); + } + + /** + * {@link RubyString String}#to_json_raw_object(*) + * + *

This method creates a raw object Hash, that can be nested into + * other data structures and will be unparsed as a raw string. This + * method should be used if you want to convert raw strings to JSON + * instead of UTF-8 strings, e.g. binary data. + */ + @JRubyMethod(rest=true) + public static IRubyObject to_json_raw_object(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return toJsonRawObject(context, Utils.ensureString(vSelf)); + } + + private static RubyHash toJsonRawObject(ThreadContext context, + RubyString self) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + IRubyObject createId = RuntimeInfo.forRuntime(runtime) + .jsonModule.get().callMethod(context, "create_id"); + result.op_aset(context, createId, self.getMetaClass().to_s()); + + ByteList bl = self.getByteList(); + byte[] uBytes = bl.unsafeBytes(); + RubyArray array = runtime.newArray(bl.length()); + for (int i = bl.begin(), t = bl.begin() + bl.length(); i < t; i++) { + array.store(i, runtime.newFixnum(uBytes[i] & 0xff)); + } + + result.op_aset(context, runtime.newString("raw"), array); + return result; + } + + @JRubyMethod(required=1, module=true) + public static IRubyObject included(ThreadContext context, + IRubyObject vSelf, IRubyObject module) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + return module.callMethod(context, "extend", info.stringExtendModule.get()); + } + } + + public static class StringExtend { + /** + * {@link RubyString String}#json_create(o) + * + *

Raw Strings are JSON Objects (the raw bytes are stored in an + * array for the key "raw"). The Ruby String can be created by this + * module method. + */ + @JRubyMethod(required=1) + public static IRubyObject json_create(ThreadContext context, + IRubyObject vSelf, IRubyObject vHash) { + Ruby runtime = context.getRuntime(); + RubyHash o = vHash.convertToHash(); + IRubyObject rawData = o.fastARef(runtime.newString("raw")); + if (rawData == null) { + throw runtime.newArgumentError("\"raw\" value not defined " + + "for encoded String"); + } + RubyArray ary = Utils.ensureArray(rawData); + byte[] bytes = new byte[ary.getLength()]; + for (int i = 0, t = ary.getLength(); i < t; i++) { + IRubyObject element = ary.eltInternal(i); + if (element instanceof RubyFixnum) { + bytes[i] = (byte)RubyNumeric.fix2long(element); + } else { + throw runtime.newTypeError(element, runtime.getFixnum()); + } + } + return runtime.newString(new ByteList(bytes, false)); + } + } + + public static class RbTrue { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.TRUE_HANDLER, args); + } + } + + public static class RbFalse { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, (RubyBoolean)vSelf, + Generator.FALSE_HANDLER, args); + } + } + + public static class RbNil { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject vSelf, IRubyObject[] args) { + return Generator.generateJson(context, vSelf, + Generator.NIL_HANDLER, args); + } + } + + public static class RbObject { + @JRubyMethod(rest=true) + public static IRubyObject to_json(ThreadContext context, + IRubyObject self, IRubyObject[] args) { + return RbString.to_json(context, self.asString(), args); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java new file mode 100644 index 0000000..e665ad1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorService.java @@ -0,0 +1,42 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Generator module. + * @author mernen + */ +public class GeneratorService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyModule generatorModule = jsonExtModule.defineModuleUnder("Generator"); + + RubyClass stateClass = + generatorModule.defineClassUnder("State", runtime.getObject(), + GeneratorState.ALLOCATOR); + stateClass.defineAnnotatedMethods(GeneratorState.class); + info.generatorStateClass = new WeakReference(stateClass); + + RubyModule generatorMethods = + generatorModule.defineModuleUnder("GeneratorMethods"); + GeneratorMethods.populate(info, generatorMethods); + + return true; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java new file mode 100644 index 0000000..44a9311 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/GeneratorState.java @@ -0,0 +1,490 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * The JSON::Ext::Generator::State class. + * + *

This class is used to create State instances, that are use to hold data + * while generating a JSON text from a a Ruby data structure. + * + * @author mernen + */ +public class GeneratorState extends RubyObject { + /** + * The indenting unit string. Will be repeated several times for larger + * indenting levels. + */ + private ByteList indent = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added after a semicolon on a JSON object. + * @see #spaceBefore + */ + private ByteList space = ByteList.EMPTY_BYTELIST; + /** + * The spacing to be added before a semicolon on a JSON object. + * @see #space + */ + private ByteList spaceBefore = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON object. + * It is assumed to be a newline, if set. + */ + private ByteList objectNl = ByteList.EMPTY_BYTELIST; + /** + * Any suffix to be added after the comma for each element on a JSON Array. + * It is assumed to be a newline, if set. + */ + private ByteList arrayNl = ByteList.EMPTY_BYTELIST; + + /** + * The maximum level of nesting of structures allowed. + * 0 means disabled. + */ + private int maxNesting = DEFAULT_MAX_NESTING; + static final int DEFAULT_MAX_NESTING = 100; + /** + * Whether special float values (NaN, Infinity, + * -Infinity) are accepted. + * If set to false, an exception will be thrown upon + * encountering one. + */ + private boolean allowNaN = DEFAULT_ALLOW_NAN; + static final boolean DEFAULT_ALLOW_NAN = false; + /** + * If set to true all JSON documents generated do not contain + * any other characters than ASCII characters. + */ + private boolean asciiOnly = DEFAULT_ASCII_ONLY; + static final boolean DEFAULT_ASCII_ONLY = false; + /** + * If set to true all JSON values generated might not be + * RFC-conform JSON documents. + */ + private boolean quirksMode = DEFAULT_QUIRKS_MODE; + static final boolean DEFAULT_QUIRKS_MODE = false; + /** + * The initial buffer length of this state. (This isn't really used on all + * non-C implementations.) + */ + private int bufferInitialLength = DEFAULT_BUFFER_INITIAL_LENGTH; + static final int DEFAULT_BUFFER_INITIAL_LENGTH = 1024; + + /** + * The current depth (inside a #to_json call) + */ + private int depth = 0; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new GeneratorState(runtime, klazz); + } + }; + + public GeneratorState(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + /** + * State.from_state(opts) + * + *

Creates a State object from opts, which ought to be + * {@link RubyHash Hash} to create a new State instance + * configured by opts, something else to create an + * unconfigured instance. If opts is a State + * object, it is just returned. + * @param clazzParam The receiver of the method call + * ({@link RubyClass} State) + * @param opts The object to use as a base for the new State + * @param block The block passed to the method + * @return A GeneratorState as determined above + */ + @JRubyMethod(meta=true) + public static IRubyObject from_state(ThreadContext context, + IRubyObject klass, IRubyObject opts) { + return fromState(context, opts); + } + + static GeneratorState fromState(ThreadContext context, IRubyObject opts) { + return fromState(context, RuntimeInfo.forRuntime(context.getRuntime()), opts); + } + + static GeneratorState fromState(ThreadContext context, RuntimeInfo info, + IRubyObject opts) { + RubyClass klass = info.generatorStateClass.get(); + if (opts != null) { + // if the given parameter is a Generator::State, return itself + if (klass.isInstance(opts)) return (GeneratorState)opts; + + // if the given parameter is a Hash, pass it to the instantiator + if (context.getRuntime().getHash().isInstance(opts)) { + return (GeneratorState)klass.newInstance(context, + new IRubyObject[] {opts}, Block.NULL_BLOCK); + } + } + + // for other values, return the safe prototype + return (GeneratorState)info.getSafeStatePrototype(context).dup(); + } + + /** + * State#initialize(opts = {}) + * + * Instantiates a new State object, configured by opts. + * + * opts can have the following keys: + * + *

+ *
:indent + *
a {@link RubyString String} used to indent levels (default: "") + *
:space + *
a String that is put after a ':' or ',' + * delimiter (default: "") + *
:space_before + *
a String that is put before a ":" pair delimiter + * (default: "") + *
:object_nl + *
a String that is put at the end of a JSON object (default: "") + *
:array_nl + *
a String that is put at the end of a JSON array (default: "") + *
:allow_nan + *
true if NaN, Infinity, and + * -Infinity should be generated, otherwise an exception is + * thrown if these values are encountered. + * This options defaults to false. + */ + @JRubyMethod(optional=1, visibility=Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + configure(context, args.length > 0 ? args[0] : null); + return this; + } + + @JRubyMethod + public IRubyObject initialize_copy(ThreadContext context, IRubyObject vOrig) { + Ruby runtime = context.getRuntime(); + if (!(vOrig instanceof GeneratorState)) { + throw runtime.newTypeError(vOrig, getType()); + } + GeneratorState orig = (GeneratorState)vOrig; + this.indent = orig.indent; + this.space = orig.space; + this.spaceBefore = orig.spaceBefore; + this.objectNl = orig.objectNl; + this.arrayNl = orig.arrayNl; + this.maxNesting = orig.maxNesting; + this.allowNaN = orig.allowNaN; + this.asciiOnly = orig.asciiOnly; + this.quirksMode = orig.quirksMode; + this.bufferInitialLength = orig.bufferInitialLength; + this.depth = orig.depth; + return this; + } + + /** + * Generates a valid JSON document from object obj and returns + * the result. If no valid JSON document can be created this method raises + * a GeneratorError exception. + */ + @JRubyMethod + public IRubyObject generate(ThreadContext context, IRubyObject obj) { + RubyString result = Generator.generateJson(context, obj, this); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + result.force_encoding(context, info.utf8.get()); + return result; + } + + private static boolean matchClosingBrace(ByteList bl, int pos, int len, + int brace) { + for (int endPos = len - 1; endPos > pos; endPos--) { + int b = bl.get(endPos); + if (Character.isWhitespace(b)) continue; + return b == brace; + } + return false; + } + + @JRubyMethod(name="[]", required=1) + public IRubyObject op_aref(ThreadContext context, IRubyObject vName) { + String name = vName.asJavaString(); + if (getMetaClass().isMethodBound(name, true)) { + return send(context, vName, Block.NULL_BLOCK); + } else { + IRubyObject value = getInstanceVariables().getInstanceVariable("@" + name); + return value == null ? context.nil : value; + } + } + + @JRubyMethod(name="[]=", required=2) + public IRubyObject op_aset(ThreadContext context, IRubyObject vName, IRubyObject value) { + String name = vName.asJavaString(); + String nameWriter = name + "="; + if (getMetaClass().isMethodBound(nameWriter, true)) { + return send(context, context.getRuntime().newString(nameWriter), value, Block.NULL_BLOCK); + } else { + getInstanceVariables().setInstanceVariable("@" + name, value); + } + return context.getRuntime().getNil(); + } + + public ByteList getIndent() { + return indent; + } + + @JRubyMethod(name="indent") + public RubyString indent_get(ThreadContext context) { + return context.getRuntime().newString(indent); + } + + @JRubyMethod(name="indent=") + public IRubyObject indent_set(ThreadContext context, IRubyObject indent) { + this.indent = prepareByteList(context, indent); + return indent; + } + + public ByteList getSpace() { + return space; + } + + @JRubyMethod(name="space") + public RubyString space_get(ThreadContext context) { + return context.getRuntime().newString(space); + } + + @JRubyMethod(name="space=") + public IRubyObject space_set(ThreadContext context, IRubyObject space) { + this.space = prepareByteList(context, space); + return space; + } + + public ByteList getSpaceBefore() { + return spaceBefore; + } + + @JRubyMethod(name="space_before") + public RubyString space_before_get(ThreadContext context) { + return context.getRuntime().newString(spaceBefore); + } + + @JRubyMethod(name="space_before=") + public IRubyObject space_before_set(ThreadContext context, + IRubyObject spaceBefore) { + this.spaceBefore = prepareByteList(context, spaceBefore); + return spaceBefore; + } + + public ByteList getObjectNl() { + return objectNl; + } + + @JRubyMethod(name="object_nl") + public RubyString object_nl_get(ThreadContext context) { + return context.getRuntime().newString(objectNl); + } + + @JRubyMethod(name="object_nl=") + public IRubyObject object_nl_set(ThreadContext context, + IRubyObject objectNl) { + this.objectNl = prepareByteList(context, objectNl); + return objectNl; + } + + public ByteList getArrayNl() { + return arrayNl; + } + + @JRubyMethod(name="array_nl") + public RubyString array_nl_get(ThreadContext context) { + return context.getRuntime().newString(arrayNl); + } + + @JRubyMethod(name="array_nl=") + public IRubyObject array_nl_set(ThreadContext context, + IRubyObject arrayNl) { + this.arrayNl = prepareByteList(context, arrayNl); + return arrayNl; + } + + @JRubyMethod(name="check_circular?") + public RubyBoolean check_circular_p(ThreadContext context) { + return context.getRuntime().newBoolean(maxNesting != 0); + } + + /** + * Returns the maximum level of nesting configured for this state. + */ + public int getMaxNesting() { + return maxNesting; + } + + @JRubyMethod(name="max_nesting") + public RubyInteger max_nesting_get(ThreadContext context) { + return context.getRuntime().newFixnum(maxNesting); + } + + @JRubyMethod(name="max_nesting=") + public IRubyObject max_nesting_set(IRubyObject max_nesting) { + maxNesting = RubyNumeric.fix2int(max_nesting); + return max_nesting; + } + + public boolean allowNaN() { + return allowNaN; + } + + @JRubyMethod(name="allow_nan?") + public RubyBoolean allow_nan_p(ThreadContext context) { + return context.getRuntime().newBoolean(allowNaN); + } + + public boolean asciiOnly() { + return asciiOnly; + } + + @JRubyMethod(name="ascii_only?") + public RubyBoolean ascii_only_p(ThreadContext context) { + return context.getRuntime().newBoolean(asciiOnly); + } + + @JRubyMethod(name="buffer_initial_length") + public RubyInteger buffer_initial_length_get(ThreadContext context) { + return context.getRuntime().newFixnum(bufferInitialLength); + } + + @JRubyMethod(name="buffer_initial_length=") + public IRubyObject buffer_initial_length_set(IRubyObject buffer_initial_length) { + int newLength = RubyNumeric.fix2int(buffer_initial_length); + if (newLength > 0) bufferInitialLength = newLength; + return buffer_initial_length; + } + + public int getDepth() { + return depth; + } + + @JRubyMethod(name="depth") + public RubyInteger depth_get(ThreadContext context) { + return context.getRuntime().newFixnum(depth); + } + + @JRubyMethod(name="depth=") + public IRubyObject depth_set(IRubyObject vDepth) { + depth = RubyNumeric.fix2int(vDepth); + return vDepth; + } + + private ByteList prepareByteList(ThreadContext context, IRubyObject value) { + RubyString str = value.convertToString(); + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str.getByteList().dup(); + } + + /** + * State#configure(opts) + * + *

Configures this State instance with the {@link RubyHash Hash} + * opts, and returns itself. + * @param vOpts The options hash + * @return The receiver + */ + @JRubyMethod(alias = "merge") + public IRubyObject configure(ThreadContext context, IRubyObject vOpts) { + OptionsReader opts = new OptionsReader(context, vOpts); + + ByteList indent = opts.getString("indent"); + if (indent != null) this.indent = indent; + + ByteList space = opts.getString("space"); + if (space != null) this.space = space; + + ByteList spaceBefore = opts.getString("space_before"); + if (spaceBefore != null) this.spaceBefore = spaceBefore; + + ByteList arrayNl = opts.getString("array_nl"); + if (arrayNl != null) this.arrayNl = arrayNl; + + ByteList objectNl = opts.getString("object_nl"); + if (objectNl != null) this.objectNl = objectNl; + + maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + allowNaN = opts.getBool("allow_nan", DEFAULT_ALLOW_NAN); + asciiOnly = opts.getBool("ascii_only", DEFAULT_ASCII_ONLY); + bufferInitialLength = opts.getInt("buffer_initial_length", DEFAULT_BUFFER_INITIAL_LENGTH); + + depth = opts.getInt("depth", 0); + + return this; + } + + /** + * State#to_h() + * + *

Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + * @return the hash + */ + @JRubyMethod(alias = "to_hash") + public RubyHash to_h(ThreadContext context) { + Ruby runtime = context.getRuntime(); + RubyHash result = RubyHash.newHash(runtime); + + result.op_aset(context, runtime.newSymbol("indent"), indent_get(context)); + result.op_aset(context, runtime.newSymbol("space"), space_get(context)); + result.op_aset(context, runtime.newSymbol("space_before"), space_before_get(context)); + result.op_aset(context, runtime.newSymbol("object_nl"), object_nl_get(context)); + result.op_aset(context, runtime.newSymbol("array_nl"), array_nl_get(context)); + result.op_aset(context, runtime.newSymbol("allow_nan"), allow_nan_p(context)); + result.op_aset(context, runtime.newSymbol("ascii_only"), ascii_only_p(context)); + result.op_aset(context, runtime.newSymbol("max_nesting"), max_nesting_get(context)); + result.op_aset(context, runtime.newSymbol("depth"), depth_get(context)); + result.op_aset(context, runtime.newSymbol("buffer_initial_length"), buffer_initial_length_get(context)); + for (String name: getInstanceVariableNameList()) { + result.op_aset(context, runtime.newSymbol(name.substring(1)), getInstanceVariables().getInstanceVariable(name)); + } + return result; + } + + public int increaseDepth() { + depth++; + checkMaxNesting(); + return depth; + } + + public int decreaseDepth() { + return --depth; + } + + /** + * Checks if the current depth is allowed as per this state's options. + * @param context + * @param depth The corrent depth + */ + private void checkMaxNesting() { + if (maxNesting != 0 && depth > maxNesting) { + depth--; + throw Utils.newException(getRuntime().getCurrentContext(), + Utils.M_NESTING_ERROR, "nesting of " + depth + " is too deep"); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java new file mode 100644 index 0000000..70426d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/OptionsReader.java @@ -0,0 +1,113 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyNumeric; +import org.jruby.RubyString; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +final class OptionsReader { + private final ThreadContext context; + private final Ruby runtime; + private final RubyHash opts; + private RuntimeInfo info; + + OptionsReader(ThreadContext context, IRubyObject vOpts) { + this.context = context; + this.runtime = context.getRuntime(); + if (vOpts == null || vOpts.isNil()) { + opts = null; + } else if (vOpts.respondsTo("to_hash")) { + opts = vOpts.convertToHash(); + } else if (vOpts.respondsTo("to_h")) { + opts = vOpts.callMethod(context, "to_h").convertToHash(); + } else { + opts = vOpts.convertToHash(); /* Should just raise the correct TypeError */ + } + } + + private RuntimeInfo getRuntimeInfo() { + if (info != null) return info; + info = RuntimeInfo.forRuntime(runtime); + return info; + } + + /** + * Efficiently looks up items with a {@link RubySymbol Symbol} key + * @param key The Symbol name to look up for + * @return The item in the {@link RubyHash Hash}, or null + * if not found + */ + IRubyObject get(String key) { + return opts == null ? null : opts.fastARef(runtime.newSymbol(key)); + } + + boolean getBool(String key, boolean defaultValue) { + IRubyObject value = get(key); + return value == null ? defaultValue : value.isTrue(); + } + + int getInt(String key, int defaultValue) { + IRubyObject value = get(key); + if (value == null) return defaultValue; + if (!value.isTrue()) return 0; + return RubyNumeric.fix2int(value); + } + + /** + * Reads the setting from the options hash. If no entry is set for this + * key or if it evaluates to false, returns null; attempts to + * coerce the value to {@link RubyString String} otherwise. + * @param key The Symbol name to look up for + * @return null if the key is not in the Hash or if + * its value evaluates to false + * @throws RaiseException TypeError if the value does not + * evaluate to false and can't be + * converted to string + */ + ByteList getString(String key) { + RubyString str = getString(key, null); + return str == null ? null : str.getByteList().dup(); + } + + RubyString getString(String key, RubyString defaultValue) { + IRubyObject value = get(key); + if (value == null || !value.isTrue()) return defaultValue; + + RubyString str = value.convertToString(); + RuntimeInfo info = getRuntimeInfo(); + if (str.encoding(context) != info.utf8.get()) { + str = (RubyString)str.encode(context, info.utf8.get()); + } + return str; + } + + /** + * Reads the setting from the options hash. If it is nil or + * undefined, returns the default value given. + * If not, ensures it is a RubyClass instance and shares the same + * allocator as the default value (i.e. for the basic types which have + * their specific allocators, this ensures the passed value is + * a subclass of them). + */ + RubyClass getClass(String key, RubyClass defaultValue) { + IRubyObject value = get(key); + + if (value == null || value.isNil()) return defaultValue; + return (RubyClass)value; + } + + public RubyHash getHash(String key) { + IRubyObject value = get(key); + if (value == null || value.isNil()) return new RubyHash(runtime); + return (RubyHash) value; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.java new file mode 100644 index 0000000..647afca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.java @@ -0,0 +1,2362 @@ + +// line 1 "Parser.rl" +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + +// line 339 "Parser.rl" + + + +// line 321 "Parser.java" +private static byte[] init__JSON_value_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2, 1, 3, 1, 4, 1, + 5, 1, 6, 1, 7, 1, 8, 1, 9 + }; +} + +private static final byte _JSON_value_actions[] = init__JSON_value_actions_0(); + + +private static byte[] init__JSON_value_key_offsets_0() +{ + return new byte [] { + 0, 0, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 + }; +} + +private static final byte _JSON_value_key_offsets[] = init__JSON_value_key_offsets_0(); + + +private static char[] init__JSON_value_trans_keys_0() +{ + return new char [] { + 34, 45, 73, 78, 91, 102, 110, 116, 123, 48, 57, 110, + 102, 105, 110, 105, 116, 121, 97, 78, 97, 108, 115, 101, + 117, 108, 108, 114, 117, 101, 0 + }; +} + +private static final char _JSON_value_trans_keys[] = init__JSON_value_trans_keys_0(); + + +private static byte[] init__JSON_value_single_lengths_0() +{ + return new byte [] { + 0, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 + }; +} + +private static final byte _JSON_value_single_lengths[] = init__JSON_value_single_lengths_0(); + + +private static byte[] init__JSON_value_range_lengths_0() +{ + return new byte [] { + 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_value_range_lengths[] = init__JSON_value_range_lengths_0(); + + +private static byte[] init__JSON_value_index_offsets_0() +{ + return new byte [] { + 0, 0, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, + 31, 33, 35, 37, 39, 41, 43, 45, 47, 49 + }; +} + +private static final byte _JSON_value_index_offsets[] = init__JSON_value_index_offsets_0(); + + +private static byte[] init__JSON_value_trans_targs_0() +{ + return new byte [] { + 21, 21, 2, 9, 21, 11, 15, 18, 21, 21, 0, 3, + 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 21, + 0, 10, 0, 21, 0, 12, 0, 13, 0, 14, 0, 21, + 0, 16, 0, 17, 0, 21, 0, 19, 0, 20, 0, 21, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_targs[] = init__JSON_value_trans_targs_0(); + + +private static byte[] init__JSON_value_trans_actions_0() +{ + return new byte [] { + 13, 11, 0, 0, 15, 0, 0, 0, 17, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, + 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 5, + 0, 0, 0 + }; +} + +private static final byte _JSON_value_trans_actions[] = init__JSON_value_trans_actions_0(); + + +private static byte[] init__JSON_value_from_state_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 19 + }; +} + +private static final byte _JSON_value_from_state_actions[] = init__JSON_value_from_state_actions_0(); + + +static final int JSON_value_start = 1; +static final int JSON_value_first_final = 21; +static final int JSON_value_error = 0; + +static final int JSON_value_en_main = 1; + + +// line 445 "Parser.rl" + + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 443 "Parser.java" + { + cs = JSON_value_start; + } + +// line 452 "Parser.rl" + +// line 450 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _acts = _JSON_value_from_state_actions[cs]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) { + switch ( _JSON_value_actions[_acts++] ) { + case 9: +// line 430 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 482 "Parser.java" + } + } + + _match: do { + _keys = _JSON_value_key_offsets[cs]; + _trans = _JSON_value_index_offsets[cs]; + _klen = _JSON_value_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_value_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_value_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_value_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_value_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + cs = _JSON_value_trans_targs[_trans]; + + if ( _JSON_value_trans_actions[_trans] != 0 ) { + _acts = _JSON_value_trans_actions[_trans]; + _nacts = (int) _JSON_value_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_value_actions[_acts++] ) + { + case 0: +// line 347 "Parser.rl" + { + result = getRuntime().getNil(); + } + break; + case 1: +// line 350 "Parser.rl" + { + result = getRuntime().getFalse(); + } + break; + case 2: +// line 353 "Parser.rl" + { + result = getRuntime().getTrue(); + } + break; + case 3: +// line 356 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + break; + case 4: +// line 363 "Parser.rl" + { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + break; + case 5: +// line 370 "Parser.rl" + { + if (pe > p + 8 && + absSubSequence(p, p + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + {p = (( p + 10))-1;} + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + parseInteger(res, p, pe); + if (res.result != null) { + result = res.result; + {p = (( res.p))-1;} + } + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; + case 6: +// line 396 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 7: +// line 406 "Parser.rl" + { + currentNesting++; + parseArray(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; + case 8: +// line 418 "Parser.rl" + { + currentNesting++; + parseObject(res, p, pe); + currentNesting--; + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 654 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 453 "Parser.rl" + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + +// line 684 "Parser.java" +private static byte[] init__JSON_integer_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_integer_actions[] = init__JSON_integer_actions_0(); + + +private static byte[] init__JSON_integer_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 9 + }; +} + +private static final byte _JSON_integer_key_offsets[] = init__JSON_integer_key_offsets_0(); + + +private static char[] init__JSON_integer_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 48, 57, 48, 57, 0 + }; +} + +private static final char _JSON_integer_trans_keys[] = init__JSON_integer_trans_keys_0(); + + +private static byte[] init__JSON_integer_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 0, 0, 0 + }; +} + +private static final byte _JSON_integer_single_lengths[] = init__JSON_integer_single_lengths_0(); + + +private static byte[] init__JSON_integer_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 1, 0, 1 + }; +} + +private static final byte _JSON_integer_range_lengths[] = init__JSON_integer_range_lengths_0(); + + +private static byte[] init__JSON_integer_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 9, 10 + }; +} + +private static final byte _JSON_integer_index_offsets[] = init__JSON_integer_index_offsets_0(); + + +private static byte[] init__JSON_integer_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 1, 4, 1, 3, 4, + 0 + }; +} + +private static final byte _JSON_integer_indicies[] = init__JSON_integer_indicies_0(); + + +private static byte[] init__JSON_integer_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 5, 4 + }; +} + +private static final byte _JSON_integer_trans_targs[] = init__JSON_integer_trans_targs_0(); + + +private static byte[] init__JSON_integer_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_integer_trans_actions[] = init__JSON_integer_trans_actions_0(); + + +static final int JSON_integer_start = 1; +static final int JSON_integer_first_final = 3; +static final int JSON_integer_error = 0; + +static final int JSON_integer_en_main = 1; + + +// line 472 "Parser.rl" + + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + +// line 801 "Parser.java" + { + cs = JSON_integer_start; + } + +// line 489 "Parser.rl" + int memo = p; + +// line 809 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_integer_key_offsets[cs]; + _trans = _JSON_integer_index_offsets[cs]; + _klen = _JSON_integer_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_integer_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_integer_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_integer_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_integer_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_integer_indicies[_trans]; + cs = _JSON_integer_trans_targs[_trans]; + + if ( _JSON_integer_trans_actions[_trans] != 0 ) { + _acts = _JSON_integer_trans_actions[_trans]; + _nacts = (int) _JSON_integer_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_integer_actions[_acts++] ) + { + case 0: +// line 466 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 896 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 491 "Parser.rl" + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + +// line 938 "Parser.java" +private static byte[] init__JSON_float_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_float_actions[] = init__JSON_float_actions_0(); + + +private static byte[] init__JSON_float_key_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 10, 12, 16, 18, 23, 29, 29 + }; +} + +private static final byte _JSON_float_key_offsets[] = init__JSON_float_key_offsets_0(); + + +private static char[] init__JSON_float_trans_keys_0() +{ + return new char [] { + 45, 48, 49, 57, 48, 49, 57, 46, 69, 101, 48, 57, + 43, 45, 48, 57, 48, 57, 46, 69, 101, 48, 57, 69, + 101, 45, 46, 48, 57, 69, 101, 45, 46, 48, 57, 0 + }; +} + +private static final char _JSON_float_trans_keys[] = init__JSON_float_trans_keys_0(); + + +private static byte[] init__JSON_float_single_lengths_0() +{ + return new byte [] { + 0, 2, 1, 3, 0, 2, 0, 3, 2, 0, 2 + }; +} + +private static final byte _JSON_float_single_lengths[] = init__JSON_float_single_lengths_0(); + + +private static byte[] init__JSON_float_range_lengths_0() +{ + return new byte [] { + 0, 1, 1, 0, 1, 1, 1, 1, 2, 0, 2 + }; +} + +private static final byte _JSON_float_range_lengths[] = init__JSON_float_range_lengths_0(); + + +private static byte[] init__JSON_float_index_offsets_0() +{ + return new byte [] { + 0, 0, 4, 7, 11, 13, 17, 19, 24, 29, 30 + }; +} + +private static final byte _JSON_float_index_offsets[] = init__JSON_float_index_offsets_0(); + + +private static byte[] init__JSON_float_indicies_0() +{ + return new byte [] { + 0, 2, 3, 1, 2, 3, 1, 4, 5, 5, 1, 6, + 1, 7, 7, 8, 1, 8, 1, 4, 5, 5, 3, 1, + 5, 5, 1, 6, 9, 1, 1, 1, 1, 8, 9, 0 + }; +} + +private static final byte _JSON_float_indicies[] = init__JSON_float_indicies_0(); + + +private static byte[] init__JSON_float_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 7, 4, 5, 8, 6, 10, 9 + }; +} + +private static final byte _JSON_float_trans_targs[] = init__JSON_float_trans_targs_0(); + + +private static byte[] init__JSON_float_trans_actions_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_float_trans_actions[] = init__JSON_float_trans_actions_0(); + + +static final int JSON_float_start = 1; +static final int JSON_float_first_final = 8; +static final int JSON_float_error = 0; + +static final int JSON_float_en_main = 1; + + +// line 526 "Parser.rl" + + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + +// line 1060 "Parser.java" + { + cs = JSON_float_start; + } + +// line 545 "Parser.rl" + int memo = p; + +// line 1068 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_float_key_offsets[cs]; + _trans = _JSON_float_index_offsets[cs]; + _klen = _JSON_float_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_float_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_float_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_float_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_float_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_float_indicies[_trans]; + cs = _JSON_float_trans_targs[_trans]; + + if ( _JSON_float_trans_actions[_trans] != 0 ) { + _acts = _JSON_float_trans_actions[_trans]; + _nacts = (int) _JSON_float_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_float_actions[_acts++] ) + { + case 0: +// line 517 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1155 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 547 "Parser.rl" + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + +// line 1198 "Parser.java" +private static byte[] init__JSON_string_actions_0() +{ + return new byte [] { + 0, 2, 0, 1 + }; +} + +private static final byte _JSON_string_actions[] = init__JSON_string_actions_0(); + + +private static byte[] init__JSON_string_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 5, 8, 14, 20, 26, 32 + }; +} + +private static final byte _JSON_string_key_offsets[] = init__JSON_string_key_offsets_0(); + + +private static char[] init__JSON_string_trans_keys_0() +{ + return new char [] { + 34, 34, 92, 0, 31, 117, 0, 31, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 48, 57, 65, 70, + 97, 102, 48, 57, 65, 70, 97, 102, 0 + }; +} + +private static final char _JSON_string_trans_keys[] = init__JSON_string_trans_keys_0(); + + +private static byte[] init__JSON_string_single_lengths_0() +{ + return new byte [] { + 0, 1, 2, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_single_lengths[] = init__JSON_string_single_lengths_0(); + + +private static byte[] init__JSON_string_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 3, 3, 3, 3, 0 + }; +} + +private static final byte _JSON_string_range_lengths[] = init__JSON_string_range_lengths_0(); + + +private static byte[] init__JSON_string_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 6, 9, 13, 17, 21, 25 + }; +} + +private static final byte _JSON_string_index_offsets[] = init__JSON_string_index_offsets_0(); + + +private static byte[] init__JSON_string_indicies_0() +{ + return new byte [] { + 0, 1, 2, 3, 1, 0, 4, 1, 0, 5, 5, 5, + 1, 6, 6, 6, 1, 7, 7, 7, 1, 0, 0, 0, + 1, 1, 0 + }; +} + +private static final byte _JSON_string_indicies[] = init__JSON_string_indicies_0(); + + +private static byte[] init__JSON_string_trans_targs_0() +{ + return new byte [] { + 2, 0, 8, 3, 4, 5, 6, 7 + }; +} + +private static final byte _JSON_string_trans_targs[] = init__JSON_string_trans_targs_0(); + + +private static byte[] init__JSON_string_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_string_trans_actions[] = init__JSON_string_trans_actions_0(); + + +static final int JSON_string_start = 1; +static final int JSON_string_first_final = 8; +static final int JSON_string_error = 0; + +static final int JSON_string_en_main = 1; + + +// line 599 "Parser.rl" + + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + +// line 1308 "Parser.java" + { + cs = JSON_string_start; + } + +// line 606 "Parser.rl" + int memo = p; + +// line 1316 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_string_key_offsets[cs]; + _trans = _JSON_string_index_offsets[cs]; + _klen = _JSON_string_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_string_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_string_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_string_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_string_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_string_indicies[_trans]; + cs = _JSON_string_trans_targs[_trans]; + + if ( _JSON_string_trans_actions[_trans] != 0 ) { + _acts = _JSON_string_trans_actions[_trans]; + _nacts = (int) _JSON_string_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_string_actions[_acts++] ) + { + case 0: +// line 574 "Parser.rl" + { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + {p = (( p + 1))-1;} + } + } + break; + case 1: +// line 587 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1418 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 608 "Parser.rl" + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + +// line 1476 "Parser.java" +private static byte[] init__JSON_array_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1 + }; +} + +private static final byte _JSON_array_actions[] = init__JSON_array_actions_0(); + + +private static byte[] init__JSON_array_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 18, 25, 41, 43, 44, 46, 47, 49, 50, + 52, 53, 55, 56, 58, 59 + }; +} + +private static final byte _JSON_array_key_offsets[] = init__JSON_array_key_offsets_0(); + + +private static char[] init__JSON_array_trans_keys_0() +{ + return new char [] { + 91, 13, 32, 34, 45, 47, 73, 78, 91, 93, 102, 110, + 116, 123, 9, 10, 48, 57, 13, 32, 44, 47, 93, 9, + 10, 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, + 123, 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, + 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, 10, 0 + }; +} + +private static final char _JSON_array_trans_keys[] = init__JSON_array_trans_keys_0(); + + +private static byte[] init__JSON_array_single_lengths_0() +{ + return new byte [] { + 0, 1, 13, 5, 12, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 0 + }; +} + +private static final byte _JSON_array_single_lengths[] = init__JSON_array_single_lengths_0(); + + +private static byte[] init__JSON_array_range_lengths_0() +{ + return new byte [] { + 0, 0, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_range_lengths[] = init__JSON_array_range_lengths_0(); + + +private static byte[] init__JSON_array_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 18, 25, 40, 43, 45, 48, 50, 53, 55, + 58, 60, 63, 65, 68, 70 + }; +} + +private static final byte _JSON_array_index_offsets[] = init__JSON_array_index_offsets_0(); + + +private static byte[] init__JSON_array_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 2, 3, 2, 2, 2, 4, 2, + 2, 2, 2, 0, 2, 1, 5, 5, 6, 7, 4, 5, + 1, 6, 6, 2, 2, 8, 2, 2, 2, 2, 2, 2, + 2, 6, 2, 1, 9, 10, 1, 11, 9, 11, 6, 9, + 6, 10, 12, 13, 1, 14, 12, 14, 5, 12, 5, 13, + 15, 16, 1, 17, 15, 17, 0, 15, 0, 16, 1, 0 + }; +} + +private static final byte _JSON_array_indicies[] = init__JSON_array_indicies_0(); + + +private static byte[] init__JSON_array_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 13, 17, 3, 4, 9, 5, 6, 8, 7, + 10, 12, 11, 14, 16, 15 + }; +} + +private static final byte _JSON_array_trans_targs[] = init__JSON_array_trans_targs_0(); + + +private static byte[] init__JSON_array_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_array_trans_actions[] = init__JSON_array_trans_actions_0(); + + +static final int JSON_array_start = 1; +static final int JSON_array_first_final = 17; +static final int JSON_array_error = 0; + +static final int JSON_array_en_main = 1; + + +// line 681 "Parser.rl" + + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1609 "Parser.java" + { + cs = JSON_array_start; + } + +// line 700 "Parser.rl" + +// line 1616 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_array_key_offsets[cs]; + _trans = _JSON_array_index_offsets[cs]; + _klen = _JSON_array_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_array_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_array_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_array_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_array_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_array_indicies[_trans]; + cs = _JSON_array_trans_targs[_trans]; + + if ( _JSON_array_trans_actions[_trans] != 0 ) { + _acts = _JSON_array_trans_actions[_trans]; + _nacts = (int) _JSON_array_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_array_actions[_acts++] ) + { + case 0: +// line 650 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 665 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 1720 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 701 "Parser.rl" + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + +// line 1750 "Parser.java" +private static byte[] init__JSON_object_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 1, 1, 2 + }; +} + +private static final byte _JSON_object_actions[] = init__JSON_object_actions_0(); + + +private static byte[] init__JSON_object_key_offsets_0() +{ + return new byte [] { + 0, 0, 1, 8, 14, 16, 17, 19, 20, 36, 43, 49, + 51, 52, 54, 55, 57, 58, 60, 61, 63, 64, 66, 67, + 69, 70, 72, 73 + }; +} + +private static final byte _JSON_object_key_offsets[] = init__JSON_object_key_offsets_0(); + + +private static char[] init__JSON_object_trans_keys_0() +{ + return new char [] { + 123, 13, 32, 34, 47, 125, 9, 10, 13, 32, 47, 58, + 9, 10, 42, 47, 42, 42, 47, 10, 13, 32, 34, 45, + 47, 73, 78, 91, 102, 110, 116, 123, 9, 10, 48, 57, + 13, 32, 44, 47, 125, 9, 10, 13, 32, 34, 47, 9, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 42, 47, 42, 42, 47, 10, 42, 47, 42, 42, 47, + 10, 0 + }; +} + +private static final char _JSON_object_trans_keys[] = init__JSON_object_trans_keys_0(); + + +private static byte[] init__JSON_object_single_lengths_0() +{ + return new byte [] { + 0, 1, 5, 4, 2, 1, 2, 1, 12, 5, 4, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 0 + }; +} + +private static final byte _JSON_object_single_lengths[] = init__JSON_object_single_lengths_0(); + + +private static byte[] init__JSON_object_range_lengths_0() +{ + return new byte [] { + 0, 0, 1, 1, 0, 0, 0, 0, 2, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_range_lengths[] = init__JSON_object_range_lengths_0(); + + +private static byte[] init__JSON_object_index_offsets_0() +{ + return new byte [] { + 0, 0, 2, 9, 15, 18, 20, 23, 25, 40, 47, 53, + 56, 58, 61, 63, 66, 68, 71, 73, 76, 78, 81, 83, + 86, 88, 91, 93 + }; +} + +private static final byte _JSON_object_index_offsets[] = init__JSON_object_index_offsets_0(); + + +private static byte[] init__JSON_object_indicies_0() +{ + return new byte [] { + 0, 1, 0, 0, 2, 3, 4, 0, 1, 5, 5, 6, + 7, 5, 1, 8, 9, 1, 10, 8, 10, 5, 8, 5, + 9, 7, 7, 11, 11, 12, 11, 11, 11, 11, 11, 11, + 11, 7, 11, 1, 13, 13, 14, 15, 4, 13, 1, 14, + 14, 2, 16, 14, 1, 17, 18, 1, 19, 17, 19, 14, + 17, 14, 18, 20, 21, 1, 22, 20, 22, 13, 20, 13, + 21, 23, 24, 1, 25, 23, 25, 7, 23, 7, 24, 26, + 27, 1, 28, 26, 28, 0, 26, 0, 27, 1, 0 + }; +} + +private static final byte _JSON_object_indicies[] = init__JSON_object_indicies_0(); + + +private static byte[] init__JSON_object_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 23, 27, 3, 4, 8, 5, 7, 6, 9, + 19, 9, 10, 15, 11, 12, 14, 13, 16, 18, 17, 20, + 22, 21, 24, 26, 25 + }; +} + +private static final byte _JSON_object_trans_targs[] = init__JSON_object_trans_targs_0(); + + +private static byte[] init__JSON_object_trans_actions_0() +{ + return new byte [] { + 0, 0, 3, 0, 5, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_object_trans_actions[] = init__JSON_object_trans_actions_0(); + + +static final int JSON_object_start = 1; +static final int JSON_object_first_final = 27; +static final int JSON_object_error = 0; + +static final int JSON_object_en_main = 1; + + +// line 760 "Parser.rl" + + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + +// line 1898 "Parser.java" + { + cs = JSON_object_start; + } + +// line 784 "Parser.rl" + +// line 1905 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_object_key_offsets[cs]; + _trans = _JSON_object_index_offsets[cs]; + _klen = _JSON_object_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_object_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_object_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_object_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_object_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_object_indicies[_trans]; + cs = _JSON_object_trans_targs[_trans]; + + if ( _JSON_object_trans_actions[_trans] != 0 ) { + _acts = _JSON_object_trans_actions[_trans]; + _nacts = (int) _JSON_object_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_object_actions[_acts++] ) + { + case 0: +// line 715 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + {p = (( res.p))-1;} + } + } + break; + case 1: +// line 730 "Parser.rl" + { + parseString(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + {p = (( res.p))-1;} + } + } + break; + case 2: +// line 748 "Parser.rl" + { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 2029 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 785 "Parser.rl" + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + +// line 2082 "Parser.java" +private static byte[] init__JSON_actions_0() +{ + return new byte [] { + 0, 1, 0 + }; +} + +private static final byte _JSON_actions[] = init__JSON_actions_0(); + + +private static byte[] init__JSON_key_offsets_0() +{ + return new byte [] { + 0, 0, 16, 18, 19, 21, 22, 24, 25, 27, 28 + }; +} + +private static final byte _JSON_key_offsets[] = init__JSON_key_offsets_0(); + + +private static char[] init__JSON_trans_keys_0() +{ + return new char [] { + 13, 32, 34, 45, 47, 73, 78, 91, 102, 110, 116, 123, + 9, 10, 48, 57, 42, 47, 42, 42, 47, 10, 42, 47, + 42, 42, 47, 10, 13, 32, 47, 9, 10, 0 + }; +} + +private static final char _JSON_trans_keys[] = init__JSON_trans_keys_0(); + + +private static byte[] init__JSON_single_lengths_0() +{ + return new byte [] { + 0, 12, 2, 1, 2, 1, 2, 1, 2, 1, 3 + }; +} + +private static final byte _JSON_single_lengths[] = init__JSON_single_lengths_0(); + + +private static byte[] init__JSON_range_lengths_0() +{ + return new byte [] { + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1 + }; +} + +private static final byte _JSON_range_lengths[] = init__JSON_range_lengths_0(); + + +private static byte[] init__JSON_index_offsets_0() +{ + return new byte [] { + 0, 0, 15, 18, 20, 23, 25, 28, 30, 33, 35 + }; +} + +private static final byte _JSON_index_offsets[] = init__JSON_index_offsets_0(); + + +private static byte[] init__JSON_indicies_0() +{ + return new byte [] { + 0, 0, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, + 0, 2, 1, 4, 5, 1, 6, 4, 6, 7, 4, 7, + 5, 8, 9, 1, 10, 8, 10, 0, 8, 0, 9, 7, + 7, 11, 7, 1, 0 + }; +} + +private static final byte _JSON_indicies[] = init__JSON_indicies_0(); + + +private static byte[] init__JSON_trans_targs_0() +{ + return new byte [] { + 1, 0, 10, 6, 3, 5, 4, 10, 7, 9, 8, 2 + }; +} + +private static final byte _JSON_trans_targs[] = init__JSON_trans_targs_0(); + + +private static byte[] init__JSON_trans_actions_0() +{ + return new byte [] { + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; +} + +private static final byte _JSON_trans_actions[] = init__JSON_trans_actions_0(); + + +static final int JSON_start = 1; +static final int JSON_first_final = 10; +static final int JSON_error = 0; + +static final int JSON_en_main = 1; + + +// line 836 "Parser.rl" + + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + +// line 2195 "Parser.java" + { + cs = JSON_start; + } + +// line 845 "Parser.rl" + p = byteList.begin(); + pe = p + byteList.length(); + +// line 2204 "Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _JSON_key_offsets[cs]; + _trans = _JSON_index_offsets[cs]; + _klen = _JSON_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _JSON_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _JSON_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _JSON_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _JSON_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _JSON_indicies[_trans]; + cs = _JSON_trans_targs[_trans]; + + if ( _JSON_trans_actions[_trans] != 0 ) { + _acts = _JSON_trans_actions[_trans]; + _nacts = (int) _JSON_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _JSON_actions[_acts++] ) + { + case 0: +// line 822 "Parser.rl" + { + parseValue(res, p, pe); + if (res.result == null) { + p--; + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } else { + result = res.result; + {p = (( res.p))-1;} + } + } + break; +// line 2297 "Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 848 "Parser.rl" + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.rl b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.rl new file mode 100644 index 0000000..c0c72ad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Parser.rl @@ -0,0 +1,893 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyFloat; +import org.jruby.RubyHash; +import org.jruby.RubyInteger; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.JumpException; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; +import org.jruby.util.ConvertBytes; +import static org.jruby.util.ConvertDouble.DoubleConverter; + +/** + * The JSON::Ext::Parser class. + * + *

This is the JSON parser implemented as a Java class. To use it as the + * standard parser, set + *

JSON.parser = JSON::Ext::Parser
+ * This is performed for you when you include "json/ext". + * + *

This class does not perform the actual parsing, just acts as an interface + * to Ruby code. When the {@link #parse()} method is invoked, a + * Parser.ParserSession object is instantiated, which handles the process. + * + * @author mernen + */ +public class Parser extends RubyObject { + private final RuntimeInfo info; + private RubyString vSource; + private RubyString createId; + private boolean createAdditions; + private int maxNesting; + private boolean allowNaN; + private boolean symbolizeNames; + private RubyClass objectClass; + private RubyClass arrayClass; + private RubyClass decimalClass; + private RubyHash match_string; + + private static final int DEFAULT_MAX_NESTING = 100; + + private static final ByteList JSON_MINUS_INFINITY = new ByteList(ByteList.plain("-Infinity")); + // constant names in the JSON module containing those values + private static final String CONST_NAN = "NaN"; + private static final String CONST_INFINITY = "Infinity"; + private static final String CONST_MINUS_INFINITY = "MinusInfinity"; + + static final ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Parser(runtime, klazz); + } + }; + + /** + * Multiple-value return for internal parser methods. + * + *

All the parseStuff methods return instances of + * ParserResult when successful, or null when + * there's a problem with the input data. + */ + static final class ParserResult { + /** + * The result of the successful parsing. Should never be + * null. + */ + IRubyObject result; + /** + * The point where the parser returned. + */ + int p; + + void update(IRubyObject result, int p) { + this.result = result; + this.p = p; + } + } + + public Parser(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + info = RuntimeInfo.forRuntime(runtime); + } + + /** + * Parser.new(source, opts = {}) + * + *

Creates a new JSON::Ext::Parser instance for the string + * source. + * It will be configured by the opts Hash. + * opts can have the following keys: + * + *

+ *
:max_nesting + *
The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, + * it defaults to 100. + * + *
:allow_nan + *
If set to true, allow NaN, + * Infinity and -Infinity in defiance of RFC 4627 + * to be parsed by the Parser. This option defaults to false. + * + *
:symbolize_names + *
If set to true, returns symbols for the names (keys) in + * a JSON object. Otherwise strings are returned, which is also the default. + * + *
:create_additions + *
If set to false, the Parser doesn't create additions + * even if a matching class and create_id was found. This option + * defaults to true. + * + *
:object_class + *
Defaults to Hash. + * + *
:array_class + *
Defaults to Array. + * + *
:decimal_class + *
Specifies which class to use instead of the default (Float) when + * parsing decimal numbers. This class must accept a single string argument + * in its constructor. + *
+ */ + @JRubyMethod(name = "new", required = 1, optional = 1, meta = true) + public static IRubyObject newInstance(IRubyObject clazz, IRubyObject[] args, Block block) { + Parser parser = (Parser)((RubyClass)clazz).allocate(); + + parser.callInit(args, block); + + return parser; + } + + @JRubyMethod(required = 1, optional = 1, visibility = Visibility.PRIVATE) + public IRubyObject initialize(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.getRuntime(); + if (this.vSource != null) { + throw runtime.newTypeError("already initialized instance"); + } + + OptionsReader opts = new OptionsReader(context, args.length > 1 ? args[1] : null); + this.maxNesting = opts.getInt("max_nesting", DEFAULT_MAX_NESTING); + this.allowNaN = opts.getBool("allow_nan", false); + this.symbolizeNames = opts.getBool("symbolize_names", false); + this.createId = opts.getString("create_id", getCreateId(context)); + this.createAdditions = opts.getBool("create_additions", false); + this.objectClass = opts.getClass("object_class", runtime.getHash()); + this.arrayClass = opts.getClass("array_class", runtime.getArray()); + this.decimalClass = opts.getClass("decimal_class", null); + this.match_string = opts.getHash("match_string"); + + if(symbolizeNames && createAdditions) { + throw runtime.newArgumentError( + "options :symbolize_names and :create_additions cannot be " + + " used in conjunction" + ); + } + this.vSource = args[0].convertToString(); + this.vSource = convertEncoding(context, vSource); + + return this; + } + + /** + * Checks the given string's encoding. If a non-UTF-8 encoding is detected, + * a converted copy is returned. + * Returns the source string if no conversion is needed. + */ + private RubyString convertEncoding(ThreadContext context, RubyString source) { + RubyEncoding encoding = (RubyEncoding)source.encoding(context); + if (encoding == info.ascii8bit.get()) { + if (source.isFrozen()) { + source = (RubyString) source.dup(); + } + source.force_encoding(context, info.utf8.get()); + } else { + source = (RubyString) source.encode(context, info.utf8.get()); + } + return source; + } + + /** + * Checks the first four bytes of the given ByteList to infer its encoding, + * using the principle demonstrated on section 3 of RFC 4627 (JSON). + */ + private static String sniffByteList(ByteList bl) { + if (bl.length() < 4) return null; + if (bl.get(0) == 0 && bl.get(2) == 0) { + return bl.get(1) == 0 ? "utf-32be" : "utf-16be"; + } + if (bl.get(1) == 0 && bl.get(3) == 0) { + return bl.get(2) == 0 ? "utf-32le" : "utf-16le"; + } + return null; + } + + /** + * Assumes the given (binary) RubyString to be in the given encoding, then + * converts it to UTF-8. + */ + private RubyString reinterpretEncoding(ThreadContext context, + RubyString str, String sniffedEncoding) { + RubyEncoding actualEncoding = info.getEncoding(context, sniffedEncoding); + RubyEncoding targetEncoding = info.utf8.get(); + RubyString dup = (RubyString)str.dup(); + dup.force_encoding(context, actualEncoding); + return (RubyString)dup.encode_bang(context, targetEncoding); + } + + /** + * Parser#parse() + * + *

Parses the current JSON text source and returns the + * complete data structure as a result. + */ + @JRubyMethod + public IRubyObject parse(ThreadContext context) { + return new ParserSession(this, context, info).parse(); + } + + /** + * Parser#source() + * + *

Returns a copy of the current source string, that was + * used to construct this Parser. + */ + @JRubyMethod(name = "source") + public IRubyObject source_get() { + return checkAndGetSource().dup(); + } + + public RubyString checkAndGetSource() { + if (vSource != null) { + return vSource; + } else { + throw getRuntime().newTypeError("uninitialized instance"); + } + } + + /** + * Queries JSON.create_id. Returns null if it is + * set to nil or false, and a String if not. + */ + private RubyString getCreateId(ThreadContext context) { + IRubyObject v = info.jsonModule.get().callMethod(context, "create_id"); + return v.isTrue() ? v.convertToString() : null; + } + + /** + * A string parsing session. + * + *

Once a ParserSession is instantiated, the source string should not + * change until the parsing is complete. The ParserSession object assumes + * the source {@link RubyString} is still associated to its original + * {@link ByteList}, which in turn must still be bound to the same + * byte[] value (and on the same offset). + */ + // Ragel uses lots of fall-through + @SuppressWarnings("fallthrough") + private static class ParserSession { + private final Parser parser; + private final ThreadContext context; + private final RuntimeInfo info; + private final ByteList byteList; + private final ByteList view; + private final byte[] data; + private final StringDecoder decoder; + private int currentNesting = 0; + private final DoubleConverter dc; + + // initialization value for all state variables. + // no idea about the origins of this value, ask Flori ;) + private static final int EVIL = 0x666; + + private ParserSession(Parser parser, ThreadContext context, RuntimeInfo info) { + this.parser = parser; + this.context = context; + this.info = info; + this.byteList = parser.checkAndGetSource().getByteList(); + this.data = byteList.unsafeBytes(); + this.view = new ByteList(data, false); + this.decoder = new StringDecoder(context); + this.dc = new DoubleConverter(); + } + + private RaiseException unexpectedToken(int absStart, int absEnd) { + RubyString msg = getRuntime().newString("unexpected token at '") + .cat(data, absStart, absEnd - absStart) + .cat((byte)'\''); + return newException(Utils.M_PARSER_ERROR, msg); + } + + private Ruby getRuntime() { + return context.getRuntime(); + } + + %%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft"\-[{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; + }%% + + %%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + result = getRuntime().getNil(); + } + action parse_false { + result = getRuntime().getFalse(); + } + action parse_true { + result = getRuntime().getTrue(); + } + action parse_nan { + if (parser.allowNaN) { + result = getConstant(CONST_NAN); + } else { + throw unexpectedToken(p - 2, pe); + } + } + action parse_infinity { + if (parser.allowNaN) { + result = getConstant(CONST_INFINITY); + } else { + throw unexpectedToken(p - 7, pe); + } + } + action parse_number { + if (pe > fpc + 8 && + absSubSequence(fpc, fpc + 9).equals(JSON_MINUS_INFINITY)) { + + if (parser.allowNaN) { + result = getConstant(CONST_MINUS_INFINITY); + fexec p + 10; + fhold; + fbreak; + } else { + throw unexpectedToken(p, pe); + } + } + parseFloat(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + parseInteger(res, fpc, pe); + if (res.result != null) { + result = res.result; + fexec res.p; + } + fhold; + fbreak; + } + action parse_string { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_array { + currentNesting++; + parseArray(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action parse_object { + currentNesting++; + parseObject(res, fpc, pe); + currentNesting--; + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + action exit { + fhold; + fbreak; + } + + main := ( Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) %*exit; + }%% + + void parseValue(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + %% write exec; + + if (cs >= JSON_value_first_final && result != null) { + res.update(result, p); + } else { + res.update(null, p); + } + } + + %%{ + machine JSON_integer; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? ( '0' | [1-9][0-9]* ) ( ^[0-9]? @exit ); + }%% + + void parseInteger(ParserResult res, int p, int pe) { + int new_p = parseIntegerInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + RubyInteger number = createInteger(p, new_p); + res.update(number, new_p + 1); + return; + } + + int parseIntegerInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_integer_first_final) { + return -1; + } + + return p; + } + + RubyInteger createInteger(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return bytesToInum(runtime, num); + } + + RubyInteger bytesToInum(Ruby runtime, ByteList num) { + return runtime.is1_9() ? + ConvertBytes.byteListToInum19(runtime, num, 10, true) : + ConvertBytes.byteListToInum(runtime, num, 10, true); + } + + %%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { + fhold; + fbreak; + } + + main := '-'? + ( ( ( '0' | [1-9][0-9]* ) '.' [0-9]+ ( [Ee] [+\-]?[0-9]+ )? ) + | ( ( '0' | [1-9][0-9]* ) ( [Ee] [+\-]? [0-9]+ ) ) ) + ( ^[0-9Ee.\-]? @exit ); + }%% + + void parseFloat(ParserResult res, int p, int pe) { + int new_p = parseFloatInternal(p, pe); + if (new_p == -1) { + res.update(null, p); + return; + } + IRubyObject number = parser.decimalClass == null ? + createFloat(p, new_p) : createCustomDecimal(p, new_p); + + res.update(number, new_p + 1); + return; + } + + int parseFloatInternal(int p, int pe) { + int cs = EVIL; + + %% write init; + int memo = p; + %% write exec; + + if (cs < JSON_float_first_final) { + return -1; + } + + return p; + } + + RubyFloat createFloat(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + return RubyFloat.newFloat(runtime, dc.parse(num, true, runtime.is1_9())); + } + + IRubyObject createCustomDecimal(int p, int new_p) { + Ruby runtime = getRuntime(); + ByteList num = absSubSequence(p, new_p); + IRubyObject numString = runtime.newString(num.toString()); + return parser.decimalClass.callMethod(context, "new", numString); + } + + %%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + int offset = byteList.begin(); + ByteList decoded = decoder.decode(byteList, memo + 1 - offset, + p - offset); + result = getRuntime().newString(decoded); + if (result == null) { + fhold; + fbreak; + } else { + fexec p + 1; + } + } + + action exit { + fhold; + fbreak; + } + + main := '"' + ( ( ^(["\\]|0..0x1f) + | '\\'["\\/bfnrt] + | '\\u'[0-9a-fA-F]{4} + | '\\'^(["\\/bfnrtu]|0..0x1f) + )* %parse_string + ) '"' @exit; + }%% + + void parseString(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject result = null; + + %% write init; + int memo = p; + %% write exec; + + if (parser.createAdditions) { + RubyHash matchString = parser.match_string; + if (matchString != null) { + final IRubyObject[] memoArray = { result, null }; + try { + matchString.visitAll(new RubyHash.Visitor() { + @Override + public void visit(IRubyObject pattern, IRubyObject klass) { + if (pattern.callMethod(context, "===", memoArray[0]).isTrue()) { + memoArray[1] = klass; + throw JumpException.SPECIAL_JUMP; + } + } + }); + } catch (JumpException e) { } + if (memoArray[1] != null) { + RubyClass klass = (RubyClass) memoArray[1]; + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + result = klass.callMethod(context, "json_create", result); + } + } + } + } + + if (cs >= JSON_string_first_final && result != null) { + if (result instanceof RubyString) { + ((RubyString)result).force_encoding(context, info.utf8.get()); + } + res.update(result, p + 1); + } else { + res.update(null, p + 1); + } + } + + %%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.arrayClass == getRuntime().getArray()) { + ((RubyArray)result).append(res.result); + } else { + result.callMethod(context, "<<", res.result); + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array + ignore* + ( ( begin_value >parse_value + ignore* ) + ( ignore* + next_element + ignore* )* )? + ignore* + end_array @exit; + }%% + + void parseArray(ParserResult res, int p, int pe) { + int cs = EVIL; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + IRubyObject result; + if (parser.arrayClass == getRuntime().getArray()) { + result = RubyArray.newArray(getRuntime()); + } else { + result = parser.arrayClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs >= JSON_array_first_final) { + res.update(result, p + 1); + } else { + throw unexpectedToken(p, pe); + } + } + + %%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + if (parser.objectClass == getRuntime().getHash()) { + ((RubyHash)result).op_aset(context, lastName, res.result); + } else { + result.callMethod(context, "[]=", new IRubyObject[] { lastName, res.result }); + } + fexec res.p; + } + } + + action parse_name { + parseString(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + RubyString name = (RubyString)res.result; + if (parser.symbolizeNames) { + lastName = context.getRuntime().is1_9() + ? name.intern19() + : name.intern(); + } else { + lastName = name; + } + fexec res.p; + } + } + + action exit { + fhold; + fbreak; + } + + pair = ignore* begin_name >parse_name ignore* name_separator + ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object (pair (next_pair)*)? ignore* end_object + ) @exit; + }%% + + void parseObject(ParserResult res, int p, int pe) { + int cs = EVIL; + IRubyObject lastName = null; + boolean objectDefault = true; + + if (parser.maxNesting > 0 && currentNesting > parser.maxNesting) { + throw newException(Utils.M_NESTING_ERROR, + "nesting of " + currentNesting + " is too deep"); + } + + // this is guaranteed to be a RubyHash due to the earlier + // allocator test at OptionsReader#getClass + IRubyObject result; + if (parser.objectClass == getRuntime().getHash()) { + result = RubyHash.newHash(getRuntime()); + } else { + objectDefault = false; + result = parser.objectClass.newInstance(context, + IRubyObject.NULL_ARRAY, Block.NULL_BLOCK); + } + + %% write init; + %% write exec; + + if (cs < JSON_object_first_final) { + res.update(null, p + 1); + return; + } + + IRubyObject returnedResult = result; + + // attempt to de-serialize object + if (parser.createAdditions) { + IRubyObject vKlassName; + if (objectDefault) { + vKlassName = ((RubyHash)result).op_aref(context, parser.createId); + } else { + vKlassName = result.callMethod(context, "[]", parser.createId); + } + + if (!vKlassName.isNil()) { + // might throw ArgumentError, we let it propagate + IRubyObject klass = parser.info.jsonModule.get(). + callMethod(context, "deep_const_get", vKlassName); + if (klass.respondsTo("json_creatable?") && + klass.callMethod(context, "json_creatable?").isTrue()) { + + returnedResult = klass.callMethod(context, "json_create", result); + } + } + } + res.update(returnedResult, p + 1); + } + + %%{ + machine JSON; + include JSON_common; + + write data; + + action parse_value { + parseValue(res, fpc, pe); + if (res.result == null) { + fhold; + fbreak; + } else { + result = res.result; + fexec res.p; + } + } + + main := ignore* + ( begin_value >parse_value) + ignore*; + }%% + + public IRubyObject parseImplemetation() { + int cs = EVIL; + int p, pe; + IRubyObject result = null; + ParserResult res = new ParserResult(); + + %% write init; + p = byteList.begin(); + pe = p + byteList.length(); + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + throw unexpectedToken(p, pe); + } + } + + public IRubyObject parse() { + return parseImplemetation(); + } + + /** + * Updates the "view" bytelist with the new offsets and returns it. + * @param start + * @param end + */ + private ByteList absSubSequence(int absStart, int absEnd) { + view.setBegin(absStart); + view.setRealSize(absEnd - absStart); + return view; + } + + /** + * Retrieves a constant directly descended from the JSON module. + * @param name The constant name + */ + private IRubyObject getConstant(String name) { + return parser.info.jsonModule.get().getConstant(name); + } + + private RaiseException newException(String className, String message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, RubyString message) { + return Utils.newException(context, className, message); + } + + private RaiseException newException(String className, + String messageBegin, ByteList messageEnd) { + return newException(className, + getRuntime().newString(messageBegin).cat(messageEnd)); + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ParserService.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ParserService.java new file mode 100644 index 0000000..b6015f9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/ParserService.java @@ -0,0 +1,34 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.io.IOException; +import java.lang.ref.WeakReference; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.runtime.load.BasicLibraryService; + +/** + * The service invoked by JRuby's {@link org.jruby.runtime.load.LoadService LoadService}. + * Defines the JSON::Ext::Parser class. + * @author mernen + */ +public class ParserService implements BasicLibraryService { + public boolean basicLoad(Ruby runtime) throws IOException { + runtime.getLoadService().require("json/common"); + RuntimeInfo info = RuntimeInfo.initRuntime(runtime); + + info.jsonModule = new WeakReference(runtime.defineModule("JSON")); + RubyModule jsonExtModule = info.jsonModule.get().defineModuleUnder("Ext"); + RubyClass parserClass = + jsonExtModule.defineClassUnder("Parser", runtime.getObject(), + Parser.ALLOCATOR); + parserClass.defineAnnotatedMethods(Parser.class); + return true; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java new file mode 100644 index 0000000..2323bd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/RuntimeInfo.java @@ -0,0 +1,116 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import java.lang.ref.WeakReference; +import java.util.HashMap; +import java.util.Map; +import java.util.WeakHashMap; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyEncoding; +import org.jruby.RubyModule; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + + +final class RuntimeInfo { + // since the vast majority of cases runs just one runtime, + // we optimize for that + private static WeakReference runtime1 = new WeakReference(null); + private static RuntimeInfo info1; + // store remaining runtimes here (does not include runtime1) + private static Map runtimes; + + // these fields are filled by the service loaders + // Use WeakReferences so that RuntimeInfo doesn't indirectly hold a hard reference to + // the Ruby runtime object, which would cause memory leaks in the runtimes map above. + /** JSON */ + WeakReference jsonModule; + /** JSON::Ext::Generator::GeneratorMethods::String::Extend */ + WeakReference stringExtendModule; + /** JSON::Ext::Generator::State */ + WeakReference generatorStateClass; + /** JSON::SAFE_STATE_PROTOTYPE */ + WeakReference safeStatePrototype; + + final WeakReference utf8; + final WeakReference ascii8bit; + // other encodings + private final Map> encodings; + + private RuntimeInfo(Ruby runtime) { + RubyClass encodingClass = runtime.getEncoding(); + if (encodingClass == null) { // 1.8 mode + utf8 = ascii8bit = null; + encodings = null; + } else { + ThreadContext context = runtime.getCurrentContext(); + + utf8 = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("utf-8"))); + ascii8bit = new WeakReference((RubyEncoding)RubyEncoding.find(context, + encodingClass, runtime.newString("ascii-8bit"))); + encodings = new HashMap>(); + } + } + + static RuntimeInfo initRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) { + return info1; + } else if (runtime1.get() == null) { + runtime1 = new WeakReference(runtime); + info1 = new RuntimeInfo(runtime); + return info1; + } else { + if (runtimes == null) { + runtimes = new WeakHashMap(1); + } + RuntimeInfo cache = runtimes.get(runtime); + if (cache == null) { + cache = new RuntimeInfo(runtime); + runtimes.put(runtime, cache); + } + return cache; + } + } + } + + public static RuntimeInfo forRuntime(Ruby runtime) { + synchronized (RuntimeInfo.class) { + if (runtime1.get() == runtime) return info1; + RuntimeInfo cache = null; + if (runtimes != null) cache = runtimes.get(runtime); + assert cache != null : "Runtime given has not initialized JSON::Ext"; + return cache; + } + } + + public RubyEncoding getEncoding(ThreadContext context, String name) { + synchronized (encodings) { + WeakReference encoding = encodings.get(name); + if (encoding == null) { + Ruby runtime = context.getRuntime(); + encoding = new WeakReference((RubyEncoding)RubyEncoding.find(context, + runtime.getEncoding(), runtime.newString(name))); + encodings.put(name, encoding); + } + return encoding.get(); + } + } + + public GeneratorState getSafeStatePrototype(ThreadContext context) { + if (safeStatePrototype == null) { + IRubyObject value = jsonModule.get().getConstant("SAFE_STATE_PROTOTYPE"); + if (!(value instanceof GeneratorState)) { + throw context.getRuntime().newTypeError(value, generatorStateClass.get()); + } + safeStatePrototype = new WeakReference((GeneratorState)value); + } + return safeStatePrototype.get(); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java new file mode 100644 index 0000000..76cf183 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringDecoder.java @@ -0,0 +1,166 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * A decoder that reads a JSON-encoded string from the given sources and + * returns its decoded form on a new ByteList. Escaped Unicode characters + * are encoded as UTF-8. + */ +final class StringDecoder extends ByteListTranscoder { + /** + * Stores the offset of the high surrogate when reading a surrogate pair, + * or -1 when not. + */ + private int surrogatePairStart = -1; + + // Array used for writing multi-byte characters into the buffer at once + private final byte[] aux = new byte[4]; + + StringDecoder(ThreadContext context) { + super(context); + } + + ByteList decode(ByteList src, int start, int end) { + ByteList out = new ByteList(end - start); + out.setEncoding(src.getEncoding()); + init(src, start, end, out); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + return out; + } + + private void handleChar(int c) { + if (c == '\\') { + quoteStop(charStart); + handleEscapeSequence(); + } else { + quoteStart(); + } + } + + private void handleEscapeSequence() { + ensureMin(1); + switch (readUtf8Char()) { + case 'b': + append('\b'); + break; + case 'f': + append('\f'); + break; + case 'n': + append('\n'); + break; + case 'r': + append('\r'); + break; + case 't': + append('\t'); + break; + case 'u': + ensureMin(4); + int cp = readHex(); + if (Character.isHighSurrogate((char)cp)) { + handleLowSurrogate((char)cp); + } else if (Character.isLowSurrogate((char)cp)) { + // low surrogate with no high surrogate + throw invalidUtf8(); + } else { + writeUtf8Char(cp); + } + break; + default: // '\\', '"', '/'... + quoteStart(); + } + } + + private void handleLowSurrogate(char highSurrogate) { + surrogatePairStart = charStart; + ensureMin(1); + int lowSurrogate = readUtf8Char(); + + if (lowSurrogate == '\\') { + ensureMin(5); + if (readUtf8Char() != 'u') throw invalidUtf8(); + lowSurrogate = readHex(); + } + + if (Character.isLowSurrogate((char)lowSurrogate)) { + writeUtf8Char(Character.toCodePoint(highSurrogate, + (char)lowSurrogate)); + surrogatePairStart = -1; + } else { + throw invalidUtf8(); + } + } + + private void writeUtf8Char(int codePoint) { + if (codePoint < 0x80) { + append(codePoint); + } else if (codePoint < 0x800) { + aux[0] = (byte)(0xc0 | (codePoint >>> 6)); + aux[1] = tailByte(codePoint & 0x3f); + append(aux, 0, 2); + } else if (codePoint < 0x10000) { + aux[0] = (byte)(0xe0 | (codePoint >>> 12)); + aux[1] = tailByte(codePoint >>> 6); + aux[2] = tailByte(codePoint); + append(aux, 0, 3); + } else { + aux[0] = (byte)(0xf0 | codePoint >>> 18); + aux[1] = tailByte(codePoint >>> 12); + aux[2] = tailByte(codePoint >>> 6); + aux[3] = tailByte(codePoint); + append(aux, 0, 4); + } + } + + private byte tailByte(int value) { + return (byte)(0x80 | (value & 0x3f)); + } + + /** + * Reads a 4-digit unsigned hexadecimal number from the source. + */ + private int readHex() { + int numberStart = pos; + int result = 0; + int length = 4; + for (int i = 0; i < length; i++) { + int digit = readUtf8Char(); + int digitValue; + if (digit >= '0' && digit <= '9') { + digitValue = digit - '0'; + } else if (digit >= 'a' && digit <= 'f') { + digitValue = 10 + digit - 'a'; + } else if (digit >= 'A' && digit <= 'F') { + digitValue = 10 + digit - 'A'; + } else { + throw new NumberFormatException("Invalid base 16 number " + + src.subSequence(numberStart, numberStart + length)); + } + result = result * 16 + digitValue; + } + return result; + } + + @Override + protected RaiseException invalidUtf8() { + ByteList message = new ByteList( + ByteList.plain("partial character in source, " + + "but hit end near ")); + int start = surrogatePairStart != -1 ? surrogatePairStart : charStart; + message.append(src, start, srcEnd - start); + return Utils.newException(context, Utils.M_PARSER_ERROR, + context.getRuntime().newString(message)); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java new file mode 100644 index 0000000..9d40dd3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/StringEncoder.java @@ -0,0 +1,111 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.ThreadContext; +import org.jruby.util.ByteList; + +/** + * An encoder that reads from the given source and outputs its representation + * to another ByteList. The source string is fully checked for UTF-8 validity, + * and throws a GeneratorError if any problem is found. + */ +final class StringEncoder extends ByteListTranscoder { + private final boolean asciiOnly; + + // Escaped characters will reuse this array, to avoid new allocations + // or appending them byte-by-byte + private final byte[] aux = + new byte[] {/* First unicode character */ + '\\', 'u', 0, 0, 0, 0, + /* Second unicode character (for surrogate pairs) */ + '\\', 'u', 0, 0, 0, 0, + /* "\X" characters */ + '\\', 0}; + // offsets on the array above + private static final int ESCAPE_UNI1_OFFSET = 0; + private static final int ESCAPE_UNI2_OFFSET = ESCAPE_UNI1_OFFSET + 6; + private static final int ESCAPE_CHAR_OFFSET = ESCAPE_UNI2_OFFSET + 6; + /** Array used for code point decomposition in surrogates */ + private final char[] utf16 = new char[2]; + + private static final byte[] HEX = + new byte[] {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + StringEncoder(ThreadContext context, boolean asciiOnly) { + super(context); + this.asciiOnly = asciiOnly; + } + + void encode(ByteList src, ByteList out) { + init(src, out); + append('"'); + while (hasNext()) { + handleChar(readUtf8Char()); + } + quoteStop(pos); + append('"'); + } + + private void handleChar(int c) { + switch (c) { + case '"': + case '\\': + escapeChar((char)c); + break; + case '\n': + escapeChar('n'); + break; + case '\r': + escapeChar('r'); + break; + case '\t': + escapeChar('t'); + break; + case '\f': + escapeChar('f'); + break; + case '\b': + escapeChar('b'); + break; + default: + if (c >= 0x20 && c <= 0x7f || + (c >= 0x80 && !asciiOnly)) { + quoteStart(); + } else { + quoteStop(charStart); + escapeUtf8Char(c); + } + } + } + + private void escapeChar(char c) { + quoteStop(charStart); + aux[ESCAPE_CHAR_OFFSET + 1] = (byte)c; + append(aux, ESCAPE_CHAR_OFFSET, 2); + } + + private void escapeUtf8Char(int codePoint) { + int numChars = Character.toChars(codePoint, utf16, 0); + escapeCodeUnit(utf16[0], ESCAPE_UNI1_OFFSET + 2); + if (numChars > 1) escapeCodeUnit(utf16[1], ESCAPE_UNI2_OFFSET + 2); + append(aux, ESCAPE_UNI1_OFFSET, 6 * numChars); + } + + private void escapeCodeUnit(char c, int auxOffset) { + for (int i = 0; i < 4; i++) { + aux[auxOffset + i] = HEX[(c >>> (12 - 4 * i)) & 0xf]; + } + } + + @Override + protected RaiseException invalidUtf8() { + return Utils.newException(context, Utils.M_GENERATOR_ERROR, + "source sequence is illegal/malformed utf-8"); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Utils.java b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Utils.java new file mode 100644 index 0000000..ed6f832 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/java/src/json/ext/Utils.java @@ -0,0 +1,88 @@ +/* + * This code is copyrighted work by Daniel Luz . + * + * Distributed under the Ruby license: https://www.ruby-lang.org/en/about/license.txt + */ +package json.ext; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyException; +import org.jruby.RubyHash; +import org.jruby.RubyString; +import org.jruby.exceptions.RaiseException; +import org.jruby.runtime.Block; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +/** + * Library of miscellaneous utility functions + */ +final class Utils { + public static final String M_GENERATOR_ERROR = "GeneratorError"; + public static final String M_NESTING_ERROR = "NestingError"; + public static final String M_PARSER_ERROR = "ParserError"; + + private Utils() { + throw new RuntimeException(); + } + + /** + * Safe {@link RubyArray} type-checking. + * Returns the given object if it is an Array, + * or throws an exception if not. + * @param object The object to test + * @return The given object if it is an Array + * @throws RaiseException TypeError if the object is not + * of the expected type + */ + static RubyArray ensureArray(IRubyObject object) throws RaiseException { + if (object instanceof RubyArray) return (RubyArray)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getArray()); + } + + static RubyHash ensureHash(IRubyObject object) throws RaiseException { + if (object instanceof RubyHash) return (RubyHash)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getHash()); + } + + static RubyString ensureString(IRubyObject object) throws RaiseException { + if (object instanceof RubyString) return (RubyString)object; + Ruby runtime = object.getRuntime(); + throw runtime.newTypeError(object, runtime.getString()); + } + + static RaiseException newException(ThreadContext context, + String className, String message) { + return newException(context, className, + context.getRuntime().newString(message)); + } + + static RaiseException newException(ThreadContext context, + String className, RubyString message) { + RuntimeInfo info = RuntimeInfo.forRuntime(context.getRuntime()); + RubyClass klazz = info.jsonModule.get().getClass(className); + RubyException excptn = + (RubyException)klazz.newInstance(context, + new IRubyObject[] {message}, Block.NULL_BLOCK); + return new RaiseException(excptn); + } + + static byte[] repeat(ByteList a, int n) { + return repeat(a.unsafeBytes(), a.begin(), a.length(), n); + } + + static byte[] repeat(byte[] a, int begin, int length, int n) { + if (length == 0) return ByteList.NULL_ARRAY; + int resultLen = length * n; + byte[] result = new byte[resultLen]; + for (int pos = 0; pos < resultLen; pos += length) { + System.arraycopy(a, begin, result, pos, length); + } + return result; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json-java.gemspec b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json-java.gemspec new file mode 100755 index 0000000..1bcd639 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json-java.gemspec @@ -0,0 +1,37 @@ +#!/usr/bin/env jruby +require "rubygems" + +spec = Gem::Specification.new do |s| + s.name = "json" + s.version = File.read("VERSION").chomp + s.summary = "JSON implementation for JRuby" + s.description = "A JSON implementation as a JRuby extension." + s.author = "Daniel Luz" + s.email = "dev+ruby@mernen.com" + s.homepage = "http://flori.github.com/json" + s.platform = 'java' + s.licenses = ["Ruby"] + + s.files = Dir["{docs,lib,tests}/**/*"] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + s.add_development_dependency(%q, [">= 0"]) + s.add_development_dependency(%q, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end + else + s.add_dependency(%q, [">= 0"]) + s.add_dependency(%q, [">= 2.0", "< 4.0"]) + end +end + +if $0 == __FILE__ + Gem::Builder.new(spec).build +else + spec +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json.gemspec b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json.gemspec new file mode 100644 index 0000000..0c72e82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json.gemspec @@ -0,0 +1,139 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json" + s.version = File.read('VERSION').chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib"] + s.authors = ["Florian Frank"] + s.description = "This is a JSON implementation as a Ruby extension in C." + s.email = "flori@ping.de" + s.extensions = ["ext/json/ext/generator/extconf.rb", "ext/json/ext/parser/extconf.rb", "ext/json/extconf.rb"] + s.extra_rdoc_files = ["README.md"] + s.files = [ + ".gitignore", + ".travis.yml", + "CHANGES.md", + "Gemfile", + "README-json-jruby.md", + "README.md", + "Rakefile", + "VERSION", + "diagrams/.keep", + "ext/json/ext/fbuffer/fbuffer.h", + "ext/json/ext/generator/depend", + "ext/json/ext/generator/extconf.rb", + "ext/json/ext/generator/generator.c", + "ext/json/ext/generator/generator.h", + "ext/json/ext/parser/depend", + "ext/json/ext/parser/extconf.rb", + "ext/json/ext/parser/parser.c", + "ext/json/ext/parser/parser.h", + "ext/json/ext/parser/parser.rl", + "ext/json/extconf.rb", + "install.rb", + "java/src/json/ext/ByteListTranscoder.java", + "java/src/json/ext/Generator.java", + "java/src/json/ext/GeneratorMethods.java", + "java/src/json/ext/GeneratorService.java", + "java/src/json/ext/GeneratorState.java", + "java/src/json/ext/OptionsReader.java", + "java/src/json/ext/Parser.java", + "java/src/json/ext/Parser.rl", + "java/src/json/ext/ParserService.java", + "java/src/json/ext/RuntimeInfo.java", + "java/src/json/ext/StringDecoder.java", + "java/src/json/ext/StringEncoder.java", + "java/src/json/ext/Utils.java", + "json-java.gemspec", + "json.gemspec", + "json_pure.gemspec", + "lib/json.rb", + "lib/json/add/bigdecimal.rb", + "lib/json/add/complex.rb", + "lib/json/add/core.rb", + "lib/json/add/date.rb", + "lib/json/add/date_time.rb", + "lib/json/add/exception.rb", + "lib/json/add/ostruct.rb", + "lib/json/add/range.rb", + "lib/json/add/rational.rb", + "lib/json/add/regexp.rb", + "lib/json/add/set.rb", + "lib/json/add/struct.rb", + "lib/json/add/symbol.rb", + "lib/json/add/time.rb", + "lib/json/common.rb", + "lib/json/ext.rb", + "lib/json/ext/.keep", + "lib/json/generic_object.rb", + "lib/json/pure.rb", + "lib/json/pure/generator.rb", + "lib/json/pure/parser.rb", + "lib/json/version.rb", + "references/rfc7159.txt", + "tests/fixtures/fail10.json", + "tests/fixtures/fail11.json", + "tests/fixtures/fail12.json", + "tests/fixtures/fail13.json", + "tests/fixtures/fail14.json", + "tests/fixtures/fail18.json", + "tests/fixtures/fail19.json", + "tests/fixtures/fail2.json", + "tests/fixtures/fail20.json", + "tests/fixtures/fail21.json", + "tests/fixtures/fail22.json", + "tests/fixtures/fail23.json", + "tests/fixtures/fail24.json", + "tests/fixtures/fail25.json", + "tests/fixtures/fail27.json", + "tests/fixtures/fail28.json", + "tests/fixtures/fail3.json", + "tests/fixtures/fail4.json", + "tests/fixtures/fail5.json", + "tests/fixtures/fail6.json", + "tests/fixtures/fail7.json", + "tests/fixtures/fail8.json", + "tests/fixtures/fail9.json", + "tests/fixtures/obsolete_fail1.json", + "tests/fixtures/pass1.json", + "tests/fixtures/pass15.json", + "tests/fixtures/pass16.json", + "tests/fixtures/pass17.json", + "tests/fixtures/pass2.json", + "tests/fixtures/pass26.json", + "tests/fixtures/pass3.json", + "tests/json_addition_test.rb", + "tests/json_common_interface_test.rb", + "tests/json_encoding_test.rb", + "tests/json_ext_parser_test.rb", + "tests/json_fixtures_test.rb", + "tests/json_generator_test.rb", + "tests/json_generic_object_test.rb", + "tests/json_parser_test.rb", + "tests/json_string_matching_test.rb", + "tests/test_helper.rb", + "tests/test_helper.rb", + "tools/diff.sh", + "tools/fuzz.rb", + "tools/server.rb", + ] + s.homepage = "http://flori.github.com/json" + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/flori/json/issues', + 'changelog_uri' => 'https://github.com/flori/json/blob/master/CHANGES.md', + 'documentation_uri' => 'http://flori.github.io/json/doc/index.html', + 'homepage_uri' => 'http://flori.github.io/json/', + 'source_code_uri' => 'https://github.com/flori/json', + 'wiki_uri' => 'https://github.com/flori/json/wiki' + } + s.licenses = ["Ruby"] + s.rdoc_options = ["--title", "JSON implemention for Ruby", "--main", "README.md"] + s.required_ruby_version = Gem::Requirement.new(">= 2.0") + s.summary = "JSON Implementation for Ruby" + s.test_files = ["tests/test_helper.rb"] + + s.add_development_dependency("rake", [">= 0"]) + s.add_development_dependency("test-unit", [">= 2.0", "< 4.0"]) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json_pure.gemspec b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json_pure.gemspec new file mode 100644 index 0000000..007acd3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/json_pure.gemspec @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json_pure".freeze + s.version = File.read("VERSION").chomp + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Florian Frank".freeze] + s.description = "This is a JSON implementation in pure Ruby.".freeze + s.email = "flori@ping.de".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["./tests/test_helper.rb".freeze, ".gitignore".freeze, ".travis.yml".freeze, "CHANGES.md".freeze, "Gemfile".freeze, "LICENSE".freeze, "README-json-jruby.md".freeze, "README.md".freeze, "Rakefile".freeze, "VERSION".freeze, "diagrams/.keep".freeze, "ext/json/ext/fbuffer/fbuffer.h".freeze, "ext/json/ext/generator/depend".freeze, "ext/json/ext/generator/extconf.rb".freeze, "ext/json/ext/generator/generator.c".freeze, "ext/json/ext/generator/generator.h".freeze, "ext/json/ext/parser/depend".freeze, "ext/json/ext/parser/extconf.rb".freeze, "ext/json/ext/parser/parser.c".freeze, "ext/json/ext/parser/parser.h".freeze, "ext/json/ext/parser/parser.rl".freeze, "ext/json/extconf.rb".freeze, "install.rb".freeze, "java/src/json/ext/ByteListTranscoder.java".freeze, "java/src/json/ext/Generator.java".freeze, "java/src/json/ext/GeneratorMethods.java".freeze, "java/src/json/ext/GeneratorService.java".freeze, "java/src/json/ext/GeneratorState.java".freeze, "java/src/json/ext/OptionsReader.java".freeze, "java/src/json/ext/Parser.java".freeze, "java/src/json/ext/Parser.rl".freeze, "java/src/json/ext/ParserService.java".freeze, "java/src/json/ext/RuntimeInfo.java".freeze, "java/src/json/ext/StringDecoder.java".freeze, "java/src/json/ext/StringEncoder.java".freeze, "java/src/json/ext/Utils.java".freeze, "json-java.gemspec".freeze, "json.gemspec".freeze, "json_pure.gemspec".freeze, "lib/json.rb".freeze, "lib/json/add/bigdecimal.rb".freeze, "lib/json/add/complex.rb".freeze, "lib/json/add/core.rb".freeze, "lib/json/add/date.rb".freeze, "lib/json/add/date_time.rb".freeze, "lib/json/add/exception.rb".freeze, "lib/json/add/ostruct.rb".freeze, "lib/json/add/range.rb".freeze, "lib/json/add/rational.rb".freeze, "lib/json/add/regexp.rb".freeze, "lib/json/add/set.rb".freeze, "lib/json/add/struct.rb".freeze, "lib/json/add/symbol.rb".freeze, "lib/json/add/time.rb".freeze, "lib/json/common.rb".freeze, "lib/json/ext.rb".freeze, "lib/json/ext/.keep".freeze, "lib/json/generic_object.rb".freeze, "lib/json/pure.rb".freeze, "lib/json/pure/generator.rb".freeze, "lib/json/pure/parser.rb".freeze, "lib/json/version.rb".freeze, "references/rfc7159.txt".freeze, "tests/fixtures/fail10.json".freeze, "tests/fixtures/fail11.json".freeze, "tests/fixtures/fail12.json".freeze, "tests/fixtures/fail13.json".freeze, "tests/fixtures/fail14.json".freeze, "tests/fixtures/fail18.json".freeze, "tests/fixtures/fail19.json".freeze, "tests/fixtures/fail2.json".freeze, "tests/fixtures/fail20.json".freeze, "tests/fixtures/fail21.json".freeze, "tests/fixtures/fail22.json".freeze, "tests/fixtures/fail23.json".freeze, "tests/fixtures/fail24.json".freeze, "tests/fixtures/fail25.json".freeze, "tests/fixtures/fail27.json".freeze, "tests/fixtures/fail28.json".freeze, "tests/fixtures/fail3.json".freeze, "tests/fixtures/fail4.json".freeze, "tests/fixtures/fail5.json".freeze, "tests/fixtures/fail6.json".freeze, "tests/fixtures/fail7.json".freeze, "tests/fixtures/fail8.json".freeze, "tests/fixtures/fail9.json".freeze, "tests/fixtures/obsolete_fail1.json".freeze, "tests/fixtures/pass1.json".freeze, "tests/fixtures/pass15.json".freeze, "tests/fixtures/pass16.json".freeze, "tests/fixtures/pass17.json".freeze, "tests/fixtures/pass2.json".freeze, "tests/fixtures/pass26.json".freeze, "tests/fixtures/pass3.json".freeze, "tests/json_addition_test.rb".freeze, "tests/json_common_interface_test.rb".freeze, "tests/json_encoding_test.rb".freeze, "tests/json_ext_parser_test.rb".freeze, "tests/json_fixtures_test.rb".freeze, "tests/json_generator_test.rb".freeze, "tests/json_generic_object_test.rb".freeze, "tests/json_parser_test.rb".freeze, "tests/json_string_matching_test.rb".freeze, "tests/test_helper.rb".freeze, "tools/diff.sh".freeze, "tools/fuzz.rb".freeze, "tools/server.rb".freeze] + s.homepage = "http://flori.github.com/json".freeze + s.licenses = ["Ruby".freeze] + s.rdoc_options = ["--title".freeze, "JSON implemention for ruby".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.1.2".freeze + s.summary = "JSON Implementation for Ruby".freeze + s.test_files = ["./tests/test_helper.rb".freeze] + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q.freeze, [">= 0"]) + s.add_development_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + else + s.add_dependency(%q.freeze, [">= 0"]) + s.add_dependency(%q.freeze, [">= 2.0", "< 4.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json.rb new file mode 100644 index 0000000..3eb59f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json.rb @@ -0,0 +1,412 @@ +#frozen_string_literal: false +require 'json/common' + +## +# = JavaScript \Object Notation (\JSON) +# +# \JSON is a lightweight data-interchange format. +# +# A \JSON value is one of the following: +# - Double-quoted text: "foo". +# - Number: +1+, +1.0+, +2.0e2+. +# - Boolean: +true+, +false+. +# - Null: +null+. +# - \Array: an ordered list of values, enclosed by square brackets: +# ["foo", 1, 1.0, 2.0e2, true, false, null] +# +# - \Object: a collection of name/value pairs, enclosed by curly braces; +# each name is double-quoted text; +# the values may be any \JSON values: +# {"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null} +# +# A \JSON array or object may contain nested arrays, objects, and scalars +# to any depth: +# {"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]} +# [{"foo": 0, "bar": 1}, ["baz", 2]] +# +# == Using \Module \JSON +# +# To make module \JSON available in your code, begin with: +# require 'json' +# +# All examples here assume that this has been done. +# +# === Parsing \JSON +# +# You can parse a \String containing \JSON data using +# either of two methods: +# - JSON.parse(source, opts) +# - JSON.parse!(source, opts) +# +# where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# The difference between the two methods +# is that JSON.parse! omits some checks +# and may not be safe for some +source+ data; +# use it only for data from trusted sources. +# Use the safer method JSON.parse for less trusted sources. +# +# ==== Parsing \JSON Arrays +# +# When +source+ is a \JSON array, JSON.parse by default returns a Ruby \Array: +# json = '["foo", 1, 1.0, 2.0e2, true, false, null]' +# ruby = JSON.parse(json) +# ruby # => ["foo", 1, 1.0, 200.0, true, false, nil] +# ruby.class # => Array +# +# The \JSON array may contain nested arrays, objects, and scalars +# to any depth: +# json = '[{"foo": 0, "bar": 1}, ["baz", 2]]' +# JSON.parse(json) # => [{"foo"=>0, "bar"=>1}, ["baz", 2]] +# +# ==== Parsing \JSON \Objects +# +# When the source is a \JSON object, JSON.parse by default returns a Ruby \Hash: +# json = '{"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null}' +# ruby = JSON.parse(json) +# ruby # => {"a"=>"foo", "b"=>1, "c"=>1.0, "d"=>200.0, "e"=>true, "f"=>false, "g"=>nil} +# ruby.class # => Hash +# +# The \JSON object may contain nested arrays, objects, and scalars +# to any depth: +# json = '{"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]}' +# JSON.parse(json) # => {"foo"=>{"bar"=>1, "baz"=>2}, "bat"=>[0, 1, 2]} +# +# ==== Parsing \JSON Scalars +# +# When the source is a \JSON scalar (not an array or object), +# JSON.parse returns a Ruby scalar. +# +# \String: +# ruby = JSON.parse('"foo"') +# ruby # => 'foo' +# ruby.class # => String +# \Integer: +# ruby = JSON.parse('1') +# ruby # => 1 +# ruby.class # => Integer +# \Float: +# ruby = JSON.parse('1.0') +# ruby # => 1.0 +# ruby.class # => Float +# ruby = JSON.parse('2.0e2') +# ruby # => 200 +# ruby.class # => Float +# Boolean: +# ruby = JSON.parse('true') +# ruby # => true +# ruby.class # => TrueClass +# ruby = JSON.parse('false') +# ruby # => false +# ruby.class # => FalseClass +# Null: +# ruby = JSON.parse('null') +# ruby # => nil +# ruby.class # => NilClass +# +# === Generating \JSON +# +# To generate a Ruby \String containing \JSON data, +# use method JSON.generate(source, opts), where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# ==== Generating \JSON from Arrays +# +# When the source is a Ruby \Array, JSON.generate returns +# a \String containing a \JSON array: +# ruby = [0, 's', :foo] +# json = JSON.generate(ruby) +# json # => '[0,"s","foo"]' +# +# The Ruby \Array array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = [0, [1, 2], {foo: 3, bar: 4}] +# json = JSON.generate(ruby) +# json # => '[0,[1,2],{"foo":3,"bar":4}]' +# +# ==== Generating \JSON from Hashes +# +# When the source is a Ruby \Hash, JSON.generate returns +# a \String containing a \JSON object: +# ruby = {foo: 0, bar: 's', baz: :bat} +# json = JSON.generate(ruby) +# json # => '{"foo":0,"bar":"s","baz":"bat"}' +# +# The Ruby \Hash array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} +# json = JSON.generate(ruby) +# json # => '{"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"}' +# +# ==== Generating \JSON from Other Objects +# +# When the source is neither an \Array nor a \Hash, +# the generated \JSON data depends on the class of the source. +# +# When the source is a Ruby \Integer or \Float, JSON.generate returns +# a \String containing a \JSON number: +# JSON.generate(42) # => '42' +# JSON.generate(0.42) # => '0.42' +# +# When the source is a Ruby \String, JSON.generate returns +# a \String containing a \JSON string (with double-quotes): +# JSON.generate('A string') # => '"A string"' +# +# When the source is +true+, +false+ or +nil+, JSON.generate returns +# a \String containing the corresponding \JSON token: +# JSON.generate(true) # => 'true' +# JSON.generate(false) # => 'false' +# JSON.generate(nil) # => 'null' +# +# When the source is none of the above, JSON.generate returns +# a \String containing a \JSON string representation of the source: +# JSON.generate(:foo) # => '"foo"' +# JSON.generate(Complex(0, 0)) # => '"0+0i"' +# JSON.generate(Dir.new('.')) # => '"#

"' +# +# == \JSON Additions +# +# When you "round trip" a non-\String object from Ruby to \JSON and back, +# you have a new \String, instead of the object you began with: +# ruby0 = Range.new(0, 2) +# json = JSON.generate(ruby0) +# json # => '0..2"' +# ruby1 = JSON.parse(json) +# ruby1 # => '0..2' +# ruby1.class # => String +# +# You can use \JSON _additions_ to preserve the original object. +# The addition is an extension of a ruby class, so that: +# - \JSON.generate stores more information in the \JSON string. +# - \JSON.parse, called with option +create_additions+, +# uses that information to create a proper Ruby object. +# +# This example shows a \Range being generated into \JSON +# and parsed back into Ruby, both without and with +# the addition for \Range: +# ruby = Range.new(0, 2) +# # This passage does not use the addition for Range. +# json0 = JSON.generate(ruby) +# ruby0 = JSON.parse(json0) +# # This passage uses the addition for Range. +# require 'json/add/range' +# json1 = JSON.generate(ruby) +# ruby1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <require 'json/add/bigdecimal' +# - Complex: require 'json/add/complex' +# - Date: require 'json/add/date' +# - DateTime: require 'json/add/date_time' +# - Exception: require 'json/add/exception' +# - OpenStruct: require 'json/add/ostruct' +# - Range: require 'json/add/range' +# - Rational: require 'json/add/rational' +# - Regexp: require 'json/add/regexp' +# - Set: require 'json/add/set' +# - Struct: require 'json/add/struct' +# - Symbol: require 'json/add/symbol' +# - Time: require 'json/add/time' +# +# To reduce punctuation clutter, the examples below +# show the generated \JSON via +puts+, rather than the usual +inspect+, +# +# \BigDecimal: +# require 'json/add/bigdecimal' +# ruby0 = BigDecimal(0) # 0.0 +# json = JSON.generate(ruby0) # {"json_class":"BigDecimal","b":"27:0.0"} +# ruby1 = JSON.parse(json, create_additions: true) # 0.0 +# ruby1.class # => BigDecimal +# +# \Complex: +# require 'json/add/complex' +# ruby0 = Complex(1+0i) # 1+0i +# json = JSON.generate(ruby0) # {"json_class":"Complex","r":1,"i":0} +# ruby1 = JSON.parse(json, create_additions: true) # 1+0i +# ruby1.class # Complex +# +# \Date: +# require 'json/add/date' +# ruby0 = Date.today # 2020-05-02 +# json = JSON.generate(ruby0) # {"json_class":"Date","y":2020,"m":5,"d":2,"sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 +# ruby1.class # Date +# +# \DateTime: +# require 'json/add/date_time' +# ruby0 = DateTime.now # 2020-05-02T10:38:13-05:00 +# json = JSON.generate(ruby0) # {"json_class":"DateTime","y":2020,"m":5,"d":2,"H":10,"M":38,"S":13,"of":"-5/24","sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02T10:38:13-05:00 +# ruby1.class # DateTime +# +# \Exception (and its subclasses including \RuntimeError): +# require 'json/add/exception' +# ruby0 = Exception.new('A message') # A message +# json = JSON.generate(ruby0) # {"json_class":"Exception","m":"A message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # A message +# ruby1.class # Exception +# ruby0 = RuntimeError.new('Another message') # Another message +# json = JSON.generate(ruby0) # {"json_class":"RuntimeError","m":"Another message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # Another message +# ruby1.class # RuntimeError +# +# \OpenStruct: +# require 'json/add/ostruct' +# ruby0 = OpenStruct.new(name: 'Matz', language: 'Ruby') # # +# json = JSON.generate(ruby0) # {"json_class":"OpenStruct","t":{"name":"Matz","language":"Ruby"}} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # OpenStruct +# +# \Range: +# require 'json/add/range' +# ruby0 = Range.new(0, 2) # 0..2 +# json = JSON.generate(ruby0) # {"json_class":"Range","a":[0,2,false]} +# ruby1 = JSON.parse(json, create_additions: true) # 0..2 +# ruby1.class # Range +# +# \Rational: +# require 'json/add/rational' +# ruby0 = Rational(1, 3) # 1/3 +# json = JSON.generate(ruby0) # {"json_class":"Rational","n":1,"d":3} +# ruby1 = JSON.parse(json, create_additions: true) # 1/3 +# ruby1.class # Rational +# +# \Regexp: +# require 'json/add/regexp' +# ruby0 = Regexp.new('foo') # (?-mix:foo) +# json = JSON.generate(ruby0) # {"json_class":"Regexp","o":0,"s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # (?-mix:foo) +# ruby1.class # Regexp +# +# \Set: +# require 'json/add/set' +# ruby0 = Set.new([0, 1, 2]) # # +# json = JSON.generate(ruby0) # {"json_class":"Set","a":[0,1,2]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Set +# +# \Struct: +# require 'json/add/struct' +# Customer = Struct.new(:name, :address) # Customer +# ruby0 = Customer.new("Dave", "123 Main") # # +# json = JSON.generate(ruby0) # {"json_class":"Customer","v":["Dave","123 Main"]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Customer + # +# \Symbol: +# require 'json/add/symbol' +# ruby0 = :foo # foo +# json = JSON.generate(ruby0) # {"json_class":"Symbol","s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # foo +# ruby1.class # Symbol +# +# \Time: +# require 'json/add/time' +# ruby0 = Time.now # 2020-05-02 11:28:26 -0500 +# json = JSON.generate(ruby0) # {"json_class":"Time","s":1588436906,"n":840560000} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 11:28:26 -0500 +# ruby1.class # Time +# +# +# === Custom \JSON Additions +# +# In addition to the \JSON additions provided, +# you can craft \JSON additions of your own, +# either for Ruby built-in classes or for user-defined classes. +# +# Here's a user-defined class +Foo+: +# class Foo +# attr_accessor :bar, :baz +# def initialize(bar, baz) +# self.bar = bar +# self.baz = baz +# end +# end +# +# Here's the \JSON addition for it: +# # Extend class Foo with JSON addition. +# class Foo +# # Serialize Foo object with its class name and arguments +# def to_json(*args) +# { +# JSON.create_id => self.class.name, +# 'a' => [ bar, baz ] +# }.to_json(*args) +# end +# # Deserialize JSON string by constructing new Foo object with arguments. +# def self.json_create(object) +# new(*object['a']) +# end +# end +# +# Demonstration: +# require 'json' +# # This Foo object has no custom addition. +# foo0 = Foo.new(0, 1) +# json0 = JSON.generate(foo0) +# obj0 = JSON.parse(json0) +# # Lood the custom addition. +# require_relative 'foo_addition' +# # This foo has the custom addition. +# foo1 = Foo.new(0, 1) +# json1 = JSON.generate(foo1) +# obj1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <" (String) +# With custom addition: {"json_class":"Foo","a":[0,1]} (String) +# Parsed JSON: +# Without custom addition: "#" (String) +# With custom addition: # (Foo) +# +module JSON + require 'json/version' + + begin + require 'json/ext' + rescue LoadError + require 'json/pure' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb new file mode 100644 index 0000000..c8b4f56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/bigdecimal.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::BigDecimal) or require 'bigdecimal' + +class BigDecimal + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + BigDecimal._load object['b'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'b' => _dump, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/complex.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/complex.rb new file mode 100644 index 0000000..4d977e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/complex.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Complex) or require 'complex' + +class Complex + + # Deserializes JSON string by converting Real value r, imaginary + # value i, to a Complex object. + def self.json_create(object) + Complex(object['r'], object['i']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'r' => real, + 'i' => imag, + } + end + + # Stores class name (Complex) along with real value r and imaginary value i as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/core.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/core.rb new file mode 100644 index 0000000..bfb017c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/core.rb @@ -0,0 +1,12 @@ +#frozen_string_literal: false +# This file requires the implementations of ruby core's custom objects for +# serialisation/deserialisation. + +require 'json/add/date' +require 'json/add/date_time' +require 'json/add/exception' +require 'json/add/range' +require 'json/add/regexp' +require 'json/add/struct' +require 'json/add/symbol' +require 'json/add/time' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date.rb new file mode 100644 index 0000000..2552356 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date.rb @@ -0,0 +1,34 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class Date + + # Deserializes JSON string by converting Julian year y, month + # m, day d and Day of Calendar Reform sg to Date. + def self.json_create(object) + civil(*object.values_at('y', 'm', 'd', 'sg')) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'sg' => start, + } + end + + # Stores class name (Date) with Julian year y, month m, day + # d and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date_time.rb new file mode 100644 index 0000000..38b0e86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/date_time.rb @@ -0,0 +1,50 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class DateTime + + # Deserializes JSON string by converting year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg to DateTime. + def self.json_create(object) + args = object.values_at('y', 'm', 'd', 'H', 'M', 'S') + of_a, of_b = object['of'].split('/') + if of_b and of_b != '0' + args << Rational(of_a.to_i, of_b.to_i) + else + args << of_a + end + args << object['sg'] + civil(*args) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'H' => hour, + 'M' => min, + 'S' => sec, + 'of' => offset.to_s, + 'sg' => start, + } + end + + # Stores class name (DateTime) with Julian year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end + + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/exception.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/exception.rb new file mode 100644 index 0000000..a107e5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/exception.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Exception + + # Deserializes JSON string by constructing new Exception object with message + # m and backtrace b serialized with to_json + def self.json_create(object) + result = new(object['m']) + result.set_backtrace object['b'] + result + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'm' => message, + 'b' => backtrace, + } + end + + # Stores class name (Exception) with message m and backtrace array + # b as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/ostruct.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/ostruct.rb new file mode 100644 index 0000000..686cf00 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/ostruct.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'ostruct' + +class OpenStruct + + # Deserializes JSON string by constructing new Struct object with values + # t serialized by to_json. + def self.json_create(object) + new(object['t'] || object[:t]) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 't' => table, + } + end + + # Stores class name (OpenStruct) with this struct's values t as a + # JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/range.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/range.rb new file mode 100644 index 0000000..93529fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/range.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Range + + # Deserializes JSON string by constructing new Range object with arguments + # a serialized by to_json. + def self.json_create(object) + new(*object['a']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => [ first, last, exclude_end? ] + } + end + + # Stores class name (Range) with JSON array of arguments a which + # include first (integer), last (integer), and + # exclude_end? (boolean) as JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/rational.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/rational.rb new file mode 100644 index 0000000..6be4034 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/rational.rb @@ -0,0 +1,28 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Rational) or require 'rational' + +class Rational + # Deserializes JSON string by converting numerator value n, + # denominator value d, to a Rational object. + def self.json_create(object) + Rational(object['n'], object['d']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'n' => numerator, + 'd' => denominator, + } + end + + # Stores class name (Rational) along with numerator value n and denominator value d as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/regexp.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/regexp.rb new file mode 100644 index 0000000..39d69fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/regexp.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Regexp + + # Deserializes JSON string by constructing new Regexp object with source + # s (Regexp or String) and options o serialized by + # to_json + def self.json_create(object) + new(object['s'], object['o']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'o' => options, + 's' => source, + } + end + + # Stores class name (Regexp) with options o and source s + # (Regexp or String) as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/set.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/set.rb new file mode 100644 index 0000000..71e2a0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/set.rb @@ -0,0 +1,29 @@ +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Set) or require 'set' + +class Set + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + new object['a'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => to_a, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/struct.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/struct.rb new file mode 100644 index 0000000..e8395ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/struct.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Struct + + # Deserializes JSON string by constructing new Struct object with values + # v serialized by to_json. + def self.json_create(object) + new(*object['v']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 'v' => values, + } + end + + # Stores class name (Struct) with Struct values v as a JSON string. + # Only named structs are supported. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/symbol.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/symbol.rb new file mode 100644 index 0000000..74b13a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/symbol.rb @@ -0,0 +1,25 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Symbol + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 's' => to_s, + } + end + + # Stores class name (Symbol) with String representation of Symbol as a JSON string. + def to_json(*a) + as_json.to_json(*a) + end + + # Deserializes JSON string by converting the string value stored in the object to a Symbol + def self.json_create(o) + o['s'].to_sym + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/time.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/time.rb new file mode 100644 index 0000000..b73acc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/add/time.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Time + + # Deserializes JSON string by converting time since epoch to Time + def self.json_create(object) + if usec = object.delete('u') # used to be tv_usec -> tv_nsec + object['n'] = usec * 1000 + end + if method_defined?(:tv_nsec) + at(object['s'], Rational(object['n'], 1000)) + else + at(object['s'], object['n'] / 1000) + end + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + nanoseconds = [ tv_usec * 1000 ] + respond_to?(:tv_nsec) and nanoseconds << tv_nsec + nanoseconds = nanoseconds.max + { + JSON.create_id => self.class.name, + 's' => tv_sec, + 'n' => nanoseconds, + } + end + + # Stores class name (Time) with number of seconds since epoch and number of + # microseconds for Time as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/common.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/common.rb new file mode 100644 index 0000000..991d760 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/common.rb @@ -0,0 +1,691 @@ +#frozen_string_literal: false +require 'json/version' +require 'json/generic_object' + +module JSON + class << self + # If +object+ is a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), calls JSON.parse with +object+ and +opts+: + # json = '[0, 1, null]' + # JSON[json]# => [0, 1, nil] + # + # Otherwise, calls JSON.generate with +object+ and +opts+: + # ruby = [0, 1, nil] + # JSON[ruby] # => '[0,1,null]' + def [](object, opts = {}) + if object.respond_to? :to_str + JSON.parse(object.to_str, opts) + else + JSON.generate(object, opts) + end + end + + # Returns the JSON parser class that is used by JSON. This is either + # JSON::Ext::Parser or JSON::Pure::Parser: + # JSON.parser # => JSON::Ext::Parser + attr_reader :parser + + # Set the JSON parser class _parser_ to be used by JSON. + def parser=(parser) # :nodoc: + @parser = parser + remove_const :Parser if const_defined?(:Parser, false) + const_set :Parser, parser + end + + # Return the constant located at _path_. The format of _path_ has to be + # either ::A::B::C or A::B::C. In any case, A has to be located at the top + # level (absolute namespace path?). If there doesn't exist a constant at + # the given path, an ArgumentError is raised. + def deep_const_get(path) # :nodoc: + path.to_s.split(/::/).inject(Object) do |p, c| + case + when c.empty? then p + when p.const_defined?(c, true) then p.const_get(c) + else + begin + p.const_missing(c) + rescue NameError => e + raise ArgumentError, "can't get const #{path}: #{e}" + end + end + end + end + + # Set the module _generator_ to be used by JSON. + def generator=(generator) # :nodoc: + old, $VERBOSE = $VERBOSE, nil + @generator = generator + generator_methods = generator::GeneratorMethods + for const in generator_methods.constants + klass = deep_const_get(const) + modul = generator_methods.const_get(const) + klass.class_eval do + instance_methods(false).each do |m| + m.to_s == 'to_json' and remove_method m + end + include modul + end + end + self.state = generator::State + const_set :State, self.state + const_set :SAFE_STATE_PROTOTYPE, State.new + const_set :FAST_STATE_PROTOTYPE, State.new( + :indent => '', + :space => '', + :object_nl => "", + :array_nl => "", + :max_nesting => false + ) + const_set :PRETTY_STATE_PROTOTYPE, State.new( + :indent => ' ', + :space => ' ', + :object_nl => "\n", + :array_nl => "\n" + ) + ensure + $VERBOSE = old + end + + # Returns the JSON generator module that is used by JSON. This is + # either JSON::Ext::Generator or JSON::Pure::Generator: + # JSON.generator # => JSON::Ext::Generator + attr_reader :generator + + # Sets or Returns the JSON generator state class that is used by JSON. This is + # either JSON::Ext::Generator::State or JSON::Pure::Generator::State: + # JSON.state # => JSON::Ext::Generator::State + attr_accessor :state + + # Sets or returns create identifier, which is used to decide if the _json_create_ + # hook of a class should be called; initial value is +json_class+: + # JSON.create_id # => 'json_class' + attr_accessor :create_id + end + self.create_id = 'json_class' + + NaN = 0.0/0 + + Infinity = 1.0/0 + + MinusInfinity = -Infinity + + # The base exception for JSON errors. + class JSONError < StandardError + def self.wrap(exception) + obj = new("Wrapped(#{exception.class}): #{exception.message.inspect}") + obj.set_backtrace exception.backtrace + obj + end + end + + # This exception is raised if a parser error occurs. + class ParserError < JSONError; end + + # This exception is raised if the nesting of parsed data structures is too + # deep. + class NestingError < ParserError; end + + # :stopdoc: + class CircularDatastructure < NestingError; end + # :startdoc: + + # This exception is raised if a generator or unparser error occurs. + class GeneratorError < JSONError; end + # For backwards compatibility + UnparserError = GeneratorError # :nodoc: + + # This exception is raised if the required unicode support is missing on the + # system. Usually this means that the iconv library is not installed. + class MissingUnicodeSupport < JSONError; end + + module_function + + # :call-seq: + # JSON.parse(source, opts) -> object + # + # Argument +source+ contains the \String to be parsed. It must be a + # {String-convertible object}[doc/implicit_conversion_rdoc.html#label-String-Convertible+Objects] + # (implementing +to_str+), and must contain valid \JSON data. + # + # Argument +opts+, if given, contains options for the parsing, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns the Ruby objects created by parsing the given +source+. + # + # --- + # + # When +source+ is a \JSON array, returns a Ruby \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby # => ["foo", 1.0, true, false, nil] + # ruby.class # => Array + # + # When +source+ is a \JSON object, returns a Ruby \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # ruby.class # => Hash + # + # For examples of parsing for all \JSON data types, see + # {Parsing \JSON}[#module-JSON-label-Parsing+JSON]. + # + # ====== Input Options + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth allowed; + # defaults to +100+; specify +false+ to disable depth checking. + # + # With the default, +false+: + # source = '[0, [1, [2, [3]]]]' + # ruby = JSON.parse(source) + # ruby # => [0, [1, [2, [3]]]] + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.parse(source, {max_nesting: 1}) + # Bad value: + # # Raises TypeError (wrong argument type Symbol (expected Fixnum)): + # JSON.parse(source, {max_nesting: :foo}) + # + # --- + # + # Option +allow_nan+ (boolean) specifies whether to allow + # NaN, Infinity, and MinusInfinity in +source+; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::ParserError (225: unexpected token at '[NaN]'): + # JSON.parse('[NaN]') + # # Raises JSON::ParserError (232: unexpected token at '[Infinity]'): + # JSON.parse('[Infinity]') + # # Raises JSON::ParserError (248: unexpected token at '[-Infinity]'): + # JSON.parse('[-Infinity]') + # Allow: + # source = '[NaN, Infinity, -Infinity]' + # ruby = JSON.parse(source, {allow_nan: true}) + # ruby # => [NaN, Infinity, -Infinity] + # + # ====== Output Options + # + # Option +symbolize_names+ (boolean) specifies whether returned \Hash keys + # should be Symbols; + # defaults to +false+ (use Strings). + # + # With the default, +false+: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # Use Symbols: + # ruby = JSON.parse(source, {symbolize_names: true}) + # ruby # => {:a=>"foo", :b=>1.0, :c=>true, :d=>false, :e=>nil} + # + # --- + # + # Option +object_class+ (\Class) specifies the Ruby class to be used + # for each \JSON object; + # defaults to \Hash. + # + # With the default, \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby.class # => Hash + # Use class \OpenStruct: + # ruby = JSON.parse(source, {object_class: OpenStruct}) + # ruby # => # + # + # --- + # + # Option +array_class+ (\Class) specifies the Ruby class to be used + # for each \JSON array; + # defaults to \Array. + # + # With the default, \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby.class # => Array + # Use class \Set: + # ruby = JSON.parse(source, {array_class: Set}) + # ruby # => # + # + # --- + # + # Option +create_additions+ (boolean) specifies whether to use \JSON additions in parsing. + # See {\JSON Additions}[#module-JSON-label-JSON+Additions]. + # + # ====== Exceptions + # + # Raises an exception if +source+ is not valid JSON: + # + # # Raises JSON::ParserError (783: unexpected token at ''): + # JSON.parse('') + # + def parse(source, opts = {}) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.parse!(source, opts) -> object + # + # Calls + # parse(source, opts) + # with +source+ and possibly modified +opts+. + # + # Differences from JSON.parse: + # - Option +max_nesting+, if not provided, defaults to +false+, + # which disables checking for nesting depth. + # - Option +allow_nan+, if not provided, defaults to +true+. + def parse!(source, opts = {}) + opts = { + :max_nesting => false, + :allow_nan => true + }.merge(opts) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.generate(obj, opts = nil) -> new_string + # + # Argument +obj+ is the Ruby object to be converted to \JSON. + # + # Argument +opts+, if given, contains options for the generation, and must be a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects] + # (implementing +to_hash+). + # + # Returns a \String containing the generated \JSON data. + # + # See also JSON.fast_generate, JSON.pretty_generate. + # + # --- + # + # When +obj+ is an + # {Array-convertible object}[doc/implicit_conversion_rdoc.html#label-Array-Convertible+Objects] + # (implementing +to_ary+), returns a \String containing a \JSON array: + # obj = ["foo", 1.0, true, false, nil] + # json = JSON.generate(obj) + # json # => '["foo",1.0,true,false,null]' + # + # When +obj+ is a + # {Hash-convertible object}[doc/implicit_conversion_rdoc.html#label-Hash-Convertible+Objects], + # return a \String containing a \JSON object: + # obj = {foo: 0, bar: 's', baz: :bat} + # json = JSON.generate(obj) + # json # => '{"foo":0,"bar":"s","baz":"bat"}' + # + # For examples of generating from other Ruby objects, see + # {Generating \JSON from Other Objects}[#module-JSON-label-Generating+JSON+from+Other+Objects]. + # + # ====== Input Options + # + # Option +allow_nan+ (boolean) specifies whether + # +NaN+, +Infinity+, and -Infinity may be generated; + # defaults to +false+. + # + # With the default, +false+: + # # Raises JSON::GeneratorError (920: NaN not allowed in JSON): + # JSON.generate(JSON::NaN) + # # Raises JSON::GeneratorError (917: Infinity not allowed in JSON): + # JSON.generate(JSON::Infinity) + # # Raises JSON::GeneratorError (917: -Infinity not allowed in JSON): + # JSON.generate(JSON::MinusInfinity) + # + # Allow: + # ruby = [Float::NaN, Float::Infinity, Float::MinusInfinity] + # JSON.generate(ruby, allow_nan: true) # => '[NaN,Infinity,-Infinity]' + # + # --- + # + # Option +max_nesting+ (\Integer) specifies the maximum nesting depth + # in +obj+; defaults to +100+. + # + # With the default, +100+: + # obj = [[[[[[0]]]]]] + # JSON.generate(obj) # => '[[[[[[0]]]]]]' + # + # Too deep: + # # Raises JSON::NestingError (nesting of 2 is too deep): + # JSON.generate(obj, max_nesting: 2) + # + # ====== Output Options + # + # The default formatting options generate the most compact + # \JSON data, all on one line and with no whitespace. + # + # You can use these formatting options to generate + # \JSON data in a more open format, using whitespace. + # See also JSON.pretty_generate. + # + # - Option +array_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON array; defaults to the empty \String, ''. + # - Option +object_nl+ (\String) specifies a string (usually a newline) + # to be inserted after each \JSON object; defaults to the empty \String, ''. + # - Option +indent+ (\String) specifies the string (usually spaces) to be + # used for indentation; defaults to the empty \String, ''; + # defaults to the empty \String, ''; + # has no effect unless options +array_nl+ or +object_nl+ specify newlines. + # - Option +space+ (\String) specifies a string (usually a space) to be + # inserted after the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # - Option +space_before+ (\String) specifies a string (usually a space) to be + # inserted before the colon in each \JSON object's pair; + # defaults to the empty \String, ''. + # + # In this example, +obj+ is used first to generate the shortest + # \JSON data (no whitespace), then again with all formatting options + # specified: + # + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.generate(obj) + # puts 'Compact:', json + # opts = { + # array_nl: "\n", + # object_nl: "\n", + # indent+: ' ', + # space_before: ' ', + # space: ' ' + # } + # puts 'Open:', JSON.generate(obj, opts) + # + # Output: + # Compact: + # {"foo":["bar","baz"],"bat":{"bam":0,"bad":1}} + # Open: + # { + # "foo" : [ + # "bar", + # "baz" + # ], + # "bat" : { + # "bam" : 0, + # "bad" : 1 + # } + # } + # + # --- + # + # Raises an exception if any formatting option is not a \String. + # + # ====== Exceptions + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises JSON::NestingError (nesting of 100 is too deep): + # JSON.generate(a) + # + def generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = SAFE_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state = state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and + # later delete them. + alias unparse generate + module_function :unparse + # :startdoc: + + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # By default, generates \JSON data without checking + # for circular references in +obj+ (option +max_nesting+ set to +false+, disabled). + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises SystemStackError (stack level too deep): + # JSON.fast_generate(a) + def fast_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = FAST_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias fast_unparse fast_generate + module_function :fast_unparse + # :startdoc: + + # :call-seq: + # JSON.pretty_generate(obj, opts = nil) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # Default options are: + # { + # indent: ' ', # Two spaces + # space: ' ', # One space + # array_nl: "\n", # Newline + # object_nl: "\n" # Newline + # } + # + # Example: + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.pretty_generate(obj) + # puts json + # Output: + # { + # "foo": [ + # "bar", + # "baz" + # ], + # "bat": { + # "bam": 0, + # "bad": 1 + # } + # } + # + def pretty_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = PRETTY_STATE_PROTOTYPE.dup + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias pretty_unparse pretty_generate + module_function :pretty_unparse + # :startdoc: + + class << self + # Sets or returns default options for the JSON.load method. + # Initially: + # opts = JSON.load_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :allow_blank=>true, :create_additions=>true} + attr_accessor :load_default_options + end + self.load_default_options = { + :max_nesting => false, + :allow_nan => true, + :allow_blank => true, + :create_additions => true, + } + + # Load a ruby data structure from a JSON _source_ and return it. A source can + # either be a string-like object, an IO-like object, or an object responding + # to the read method. If _proc_ was given, it will be called with any nested + # Ruby object as an argument recursively in depth first order. To modify the + # default options pass in the optional _options_ argument as well. + # + # BEWARE: This method is meant to serialise data from trusted user input, + # like from your own database server or clients under your control, it could + # be dangerous to allow untrusted users to pass JSON sources into it. The + # default options for the parser can be changed via the load_default_options + # method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def load(source, proc = nil, options = {}) + opts = load_default_options.merge options + if source.respond_to? :to_str + source = source.to_str + elsif source.respond_to? :to_io + source = source.to_io.read + elsif source.respond_to?(:read) + source = source.read + end + if opts[:allow_blank] && (source.nil? || source.empty?) + source = 'null' + end + result = parse(source, opts) + recurse_proc(result, &proc) if proc + result + end + + # Recursively calls passed _Proc_ if the parsed data structure is an _Array_ or _Hash_ + def recurse_proc(result, &proc) + case result + when Array + result.each { |x| recurse_proc x, &proc } + proc.call result + when Hash + result.each { |x, y| recurse_proc x, &proc; recurse_proc y, &proc } + proc.call result + else + proc.call result + end + end + + alias restore load + module_function :restore + + class << self + # Sets or returns the default options for the JSON.dump method. + # Initially: + # opts = JSON.dump_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true} + attr_accessor :dump_default_options + end + self.dump_default_options = { + :max_nesting => false, + :allow_nan => true, + } + + # Dumps _obj_ as a JSON string, i.e. calls generate on the object and returns + # the result. + # + # If anIO (an IO-like object or an object that responds to the write method) + # was given, the resulting JSON is written to it. + # + # If the number of nested arrays or objects exceeds _limit_, an ArgumentError + # exception is raised. This argument is similar (but not exactly the + # same!) to the _limit_ argument in Marshal.dump. + # + # The default options for the generator can be changed via the + # dump_default_options method. + # + # This method is part of the implementation of the load/dump interface of + # Marshal and YAML. + def dump(obj, anIO = nil, limit = nil) + if anIO and limit.nil? + anIO = anIO.to_io if anIO.respond_to?(:to_io) + unless anIO.respond_to?(:write) + limit = anIO + anIO = nil + end + end + opts = JSON.dump_default_options + opts = opts.merge(:max_nesting => limit) if limit + result = generate(obj, opts) + if anIO + anIO.write result + anIO + else + result + end + rescue JSON::NestingError + raise ArgumentError, "exceed depth limit" + end + + # Encodes string using String.encode. + def self.iconv(to, from, string) + string.encode(to, from) + end +end + +module ::Kernel + private + + # Outputs _objs_ to STDOUT as JSON strings in the shortest form, that is in + # one line. + def j(*objs) + objs.each do |obj| + puts JSON::generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # Outputs _objs_ to STDOUT as JSON strings in a pretty format, with + # indentation and over many lines. + def jj(*objs) + objs.each do |obj| + puts JSON::pretty_generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # If _object_ is string-like, parse the string and return the parsed result as + # a Ruby data structure. Otherwise, generate a JSON text from the Ruby data + # structure object and return it. + # + # The _opts_ argument is passed through to generate/parse respectively. See + # generate and parse for their documentation. + def JSON(object, *args) + if object.respond_to? :to_str + JSON.parse(object.to_str, args.first) + else + JSON.generate(object, args.first) + end + end +end + +# Extends any Class to include _json_creatable?_ method. +class ::Class + # Returns true if this class can be used to create an instance + # from a serialised JSON string. The class has to implement a class + # method _json_create_ that expects a hash as first parameter. The hash + # should include the required data. + def json_creatable? + respond_to?(:json_create) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/ext.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/ext.rb new file mode 100644 index 0000000..7264a85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/ext.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality as C extensions. + module Ext + require 'json/ext/parser' + require 'json/ext/generator' + $DEBUG and warn "Using Ext extension for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/ext/.keep b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/ext/.keep new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/generic_object.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/generic_object.rb new file mode 100644 index 0000000..108309d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/generic_object.rb @@ -0,0 +1,71 @@ +#frozen_string_literal: false +require 'ostruct' + +module JSON + class GenericObject < OpenStruct + class << self + alias [] new + + def json_creatable? + @json_creatable + end + + attr_writer :json_creatable + + def json_create(data) + data = data.dup + data.delete JSON.create_id + self[data] + end + + def from_hash(object) + case + when object.respond_to?(:to_hash) + result = new + object.to_hash.each do |key, value| + result[key] = from_hash(value) + end + result + when object.respond_to?(:to_ary) + object.to_ary.map { |a| from_hash(a) } + else + object + end + end + + def load(source, proc = nil, opts = {}) + result = ::JSON.load(source, proc, opts.merge(:object_class => self)) + result.nil? ? new : result + end + + def dump(obj, *args) + ::JSON.dump(obj, *args) + end + end + self.json_creatable = false + + def to_hash + table + end + + def [](name) + __send__(name) + end unless method_defined?(:[]) + + def []=(name, value) + __send__("#{name}=", value) + end unless method_defined?(:[]=) + + def |(other) + self.class[other.to_hash.merge(to_hash)] + end + + def as_json(*) + { JSON.create_id => self.class.name }.merge to_hash + end + + def to_json(*a) + as_json.to_json(*a) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure.rb new file mode 100644 index 0000000..53178b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality in pure ruby. + module Pure + require 'json/pure/parser' + require 'json/pure/generator' + $DEBUG and warn "Using Pure library for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/generator.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/generator.rb new file mode 100644 index 0000000..471af63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/generator.rb @@ -0,0 +1,459 @@ +#frozen_string_literal: false +module JSON + MAP = { + "\x0" => '\u0000', + "\x1" => '\u0001', + "\x2" => '\u0002', + "\x3" => '\u0003', + "\x4" => '\u0004', + "\x5" => '\u0005', + "\x6" => '\u0006', + "\x7" => '\u0007', + "\b" => '\b', + "\t" => '\t', + "\n" => '\n', + "\xb" => '\u000b', + "\f" => '\f', + "\r" => '\r', + "\xe" => '\u000e', + "\xf" => '\u000f', + "\x10" => '\u0010', + "\x11" => '\u0011', + "\x12" => '\u0012', + "\x13" => '\u0013', + "\x14" => '\u0014', + "\x15" => '\u0015', + "\x16" => '\u0016', + "\x17" => '\u0017', + "\x18" => '\u0018', + "\x19" => '\u0019', + "\x1a" => '\u001a', + "\x1b" => '\u001b', + "\x1c" => '\u001c', + "\x1d" => '\u001d', + "\x1e" => '\u001e', + "\x1f" => '\u001f', + '"' => '\"', + '\\' => '\\\\', + } # :nodoc: + + # Convert a UTF8 encoded Ruby string _string_ to a JSON string, encoded with + # UTF16 big endian characters as \u????, and return it. + def utf8_to_json(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/) { MAP[$&] } + string.force_encoding(::Encoding::UTF_8) + string + end + + def utf8_to_json_ascii(string) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + string.gsub!(/["\\\x0-\x1f]/n) { MAP[$&] } + string.gsub!(/( + (?: + [\xc2-\xdf][\x80-\xbf] | + [\xe0-\xef][\x80-\xbf]{2} | + [\xf0-\xf4][\x80-\xbf]{3} + )+ | + [\x80-\xc1\xf5-\xff] # invalid + )/nx) { |c| + c.size == 1 and raise GeneratorError, "invalid utf8 byte: '#{c}'" + s = JSON.iconv('utf-16be', 'utf-8', c).unpack('H*')[0] + s.force_encoding(::Encoding::ASCII_8BIT) + s.gsub!(/.{4}/n, '\\\\u\&') + s.force_encoding(::Encoding::UTF_8) + } + string.force_encoding(::Encoding::UTF_8) + string + rescue => e + raise GeneratorError.wrap(e) + end + + def valid_utf8?(string) + encoding = string.encoding + (encoding == Encoding::UTF_8 || encoding == Encoding::ASCII) && + string.valid_encoding? + end + module_function :utf8_to_json, :utf8_to_json_ascii, :valid_utf8? + + module Pure + module Generator + # This class is used to create State instances, that are use to hold data + # while generating a JSON text from a Ruby data structure. + class State + # Creates a State object from _opts_, which ought to be Hash to create + # a new State instance configured by _opts_, something else to create + # an unconfigured instance. If _opts_ is a State object, it is just + # returned. + def self.from_state(opts) + case + when self === opts + opts + when opts.respond_to?(:to_hash) + new(opts.to_hash) + when opts.respond_to?(:to_h) + new(opts.to_h) + else + SAFE_STATE_PROTOTYPE.dup + end + end + + # Instantiates a new State object, configured by _opts_. + # + # _opts_ can have the following keys: + # + # * *indent*: a string used to indent levels (default: ''), + # * *space*: a string that is put after, a : or , delimiter (default: ''), + # * *space_before*: a string that is put before a : pair delimiter (default: ''), + # * *object_nl*: a string that is put at the end of a JSON object (default: ''), + # * *array_nl*: a string that is put at the end of a JSON array (default: ''), + # * *check_circular*: is deprecated now, use the :max_nesting option instead, + # * *max_nesting*: sets the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum should be checked. + # * *allow_nan*: true if NaN, Infinity, and -Infinity should be + # generated, otherwise an exception is thrown, if these values are + # encountered. This options defaults to false. + def initialize(opts = {}) + @indent = '' + @space = '' + @space_before = '' + @object_nl = '' + @array_nl = '' + @allow_nan = false + @ascii_only = false + @buffer_initial_length = 1024 + configure opts + end + + # This string is used to indent levels in the JSON text. + attr_accessor :indent + + # This string is used to insert a space between the tokens in a JSON + # string. + attr_accessor :space + + # This string is used to insert a space before the ':' in JSON objects. + attr_accessor :space_before + + # This string is put at the end of a line that holds a JSON object (or + # Hash). + attr_accessor :object_nl + + # This string is put at the end of a line that holds a JSON array. + attr_accessor :array_nl + + # This integer returns the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum is checked. + attr_accessor :max_nesting + + # :stopdoc: + attr_reader :buffer_initial_length + + def buffer_initial_length=(length) + if length > 0 + @buffer_initial_length = length + end + end + # :startdoc: + + # This integer returns the current depth data structure nesting in the + # generated JSON. + attr_accessor :depth + + def check_max_nesting # :nodoc: + return if @max_nesting.zero? + current_nesting = depth + 1 + current_nesting > @max_nesting and + raise NestingError, "nesting of #{current_nesting} is too deep" + end + + # Returns true, if circular data structures are checked, + # otherwise returns false. + def check_circular? + !@max_nesting.zero? + end + + # Returns true if NaN, Infinity, and -Infinity should be considered as + # valid JSON and output. + def allow_nan? + @allow_nan + end + + # Returns true, if only ASCII characters should be generated. Otherwise + # returns false. + def ascii_only? + @ascii_only + end + + # Configure this State instance with the Hash _opts_, and return + # itself. + def configure(opts) + if opts.respond_to?(:to_hash) + opts = opts.to_hash + elsif opts.respond_to?(:to_h) + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + for key, value in opts + instance_variable_set "@#{key}", value + end + @indent = opts[:indent] if opts.key?(:indent) + @space = opts[:space] if opts.key?(:space) + @space_before = opts[:space_before] if opts.key?(:space_before) + @object_nl = opts[:object_nl] if opts.key?(:object_nl) + @array_nl = opts[:array_nl] if opts.key?(:array_nl) + @allow_nan = !!opts[:allow_nan] if opts.key?(:allow_nan) + @ascii_only = opts[:ascii_only] if opts.key?(:ascii_only) + @depth = opts[:depth] || 0 + @buffer_initial_length ||= opts[:buffer_initial_length] + + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + self + end + alias merge configure + + # Returns the configuration instance variables as a hash, that can be + # passed to the configure method. + def to_h + result = {} + for iv in instance_variables + iv = iv.to_s[1..-1] + result[iv.to_sym] = self[iv] + end + result + end + + alias to_hash to_h + + # Generates a valid JSON document from object +obj+ and + # returns the result. If no valid JSON document can be + # created this method raises a + # GeneratorError exception. + def generate(obj) + result = obj.to_json(self) + JSON.valid_utf8?(result) or raise GeneratorError, + "source sequence #{result.inspect} is illegal/malformed utf-8" + result + end + + # Return the value returned by method +name+. + def [](name) + if respond_to?(name) + __send__(name) + else + instance_variable_get("@#{name}") if + instance_variables.include?("@#{name}".to_sym) # avoid warning + end + end + + def []=(name, value) + if respond_to?(name_writer = "#{name}=") + __send__ name_writer, value + else + instance_variable_set "@#{name}", value + end + end + end + + module GeneratorMethods + module Object + # Converts this object to a string (calling #to_s), converts + # it to a JSON string, and returns the result. This is a fallback, if no + # special method #to_json was defined for some object. + def to_json(*) to_s.to_json end + end + + module Hash + # Returns a JSON string containing a JSON object, that is unparsed from + # this Hash instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + # _depth_ is used to find out nesting depth, to indent accordingly. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_shift(state) + state.object_nl.empty? or return '' + state.indent * state.depth + end + + def json_transform(state) + delim = ',' + delim << state.object_nl + result = '{' + result << state.object_nl + depth = state.depth += 1 + first = true + indent = !state.object_nl.empty? + each { |key,value| + result << delim unless first + result << state.indent * depth if indent + result << key.to_s.to_json(state) + result << state.space_before + result << ':' + result << state.space + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.object_nl + result << state.indent * depth if indent + result << '}' + result + end + end + + module Array + # Returns a JSON string containing a JSON array, that is unparsed from + # this Array instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_transform(state) + delim = ',' + delim << state.array_nl + result = '[' + result << state.array_nl + depth = state.depth += 1 + first = true + indent = !state.array_nl.empty? + each { |value| + result << delim unless first + result << state.indent * depth if indent + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.array_nl + result << state.indent * depth if indent + result << ']' + end + end + + module Integer + # Returns a JSON string representation for this Integer number. + def to_json(*) to_s end + end + + module Float + # Returns a JSON string representation for this Float number. + def to_json(state = nil, *) + state = State.from_state(state) + case + when infinite? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + when nan? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + else + to_s + end + end + end + + module String + # This string should be encoded with UTF-8 A call to this method + # returns a JSON string encoded with UTF16 big endian characters as + # \u????. + def to_json(state = nil, *args) + state = State.from_state(state) + if encoding == ::Encoding::UTF_8 + string = self + else + string = encode(::Encoding::UTF_8) + end + if state.ascii_only? + '"' << JSON.utf8_to_json_ascii(string) << '"' + else + '"' << JSON.utf8_to_json(string) << '"' + end + end + + # Module that holds the extending methods if, the String module is + # included. + module Extend + # Raw Strings are JSON Objects (the raw bytes are stored in an + # array for the key "raw"). The Ruby String can be created by this + # module method. + def json_create(o) + o['raw'].pack('C*') + end + end + + # Extends _modul_ with the String::Extend module. + def self.included(modul) + modul.extend Extend + end + + # This method creates a raw object hash, that can be nested into + # other data structures and will be unparsed as a raw string. This + # method should be used, if you want to convert raw strings to JSON + # instead of UTF-8 strings, e. g. binary data. + def to_json_raw_object + { + JSON.create_id => self.class.name, + 'raw' => self.unpack('C*'), + } + end + + # This method creates a JSON text from the result of + # a call to to_json_raw_object of this String. + def to_json_raw(*args) + to_json_raw_object.to_json(*args) + end + end + + module TrueClass + # Returns a JSON string for true: 'true'. + def to_json(*) 'true' end + end + + module FalseClass + # Returns a JSON string for false: 'false'. + def to_json(*) 'false' end + end + + module NilClass + # Returns a JSON string for nil: 'null'. + def to_json(*) 'null' end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/parser.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/parser.rb new file mode 100644 index 0000000..5db244c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/pure/parser.rb @@ -0,0 +1,319 @@ +#frozen_string_literal: false +require 'strscan' + +module JSON + module Pure + # This class implements the JSON parser that is used to parse a JSON string + # into a Ruby data structure. + class Parser < StringScanner + STRING = /" ((?:[^\x0-\x1f"\\] | + # escaped special characters: + \\["\\\/bfnrt] | + \\u[0-9a-fA-F]{4} | + # match all but escaped special characters: + \\[\x20-\x21\x23-\x2e\x30-\x5b\x5d-\x61\x63-\x65\x67-\x6d\x6f-\x71\x73\x75-\xff])*) + "/nx + INTEGER = /(-?0|-?[1-9]\d*)/ + FLOAT = /(-? + (?:0|[1-9]\d*) + (?: + \.\d+(?i:e[+-]?\d+) | + \.\d+ | + (?i:e[+-]?\d+) + ) + )/x + NAN = /NaN/ + INFINITY = /Infinity/ + MINUS_INFINITY = /-Infinity/ + OBJECT_OPEN = /\{/ + OBJECT_CLOSE = /\}/ + ARRAY_OPEN = /\[/ + ARRAY_CLOSE = /\]/ + PAIR_DELIMITER = /:/ + COLLECTION_DELIMITER = /,/ + TRUE = /true/ + FALSE = /false/ + NULL = /null/ + IGNORE = %r( + (?: + //[^\n\r]*[\n\r]| # line comments + /\* # c-style comments + (?: + [^*/]| # normal chars + /[^*]| # slashes that do not start a nested comment + \*[^/]| # asterisks that do not end this comment + /(?=\*/) # single slash before this comment's end + )* + \*/ # the End of this comment + |[ \t\r\n]+ # whitespaces: space, horicontal tab, lf, cr + )+ + )mx + + UNPARSED = Object.new.freeze + + # Creates a new JSON::Pure::Parser instance for the string _source_. + # + # It will be configured by the _opts_ hash. _opts_ can have the following + # keys: + # * *max_nesting*: The maximum depth of nesting allowed in the parsed data + # structures. Disable depth checking with :max_nesting => false|nil|0, + # it defaults to 100. + # * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + # defiance of RFC 7159 to be parsed by the Parser. This option defaults + # to false. + # * *symbolize_names*: If set to true, returns symbols for the names + # (keys) in a JSON object. Otherwise strings are returned, which is + # also the default. It's not possible to use this option in + # conjunction with the *create_additions* option. + # * *create_additions*: If set to true, the Parser creates + # additions when a matching class and create_id are found. This + # option defaults to false. + # * *object_class*: Defaults to Hash + # * *array_class*: Defaults to Array + # * *decimal_class*: Specifies which class to use instead of the default + # (Float) when parsing decimal numbers. This class must accept a single + # string argument in its constructor. + def initialize(source, opts = {}) + opts ||= {} + source = convert_encoding source + super source + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + @allow_nan = !!opts[:allow_nan] + @symbolize_names = !!opts[:symbolize_names] + if opts.key?(:create_additions) + @create_additions = !!opts[:create_additions] + else + @create_additions = false + end + @symbolize_names && @create_additions and raise ArgumentError, + 'options :symbolize_names and :create_additions cannot be used '\ + 'in conjunction' + @create_id = @create_additions ? JSON.create_id : nil + @object_class = opts[:object_class] || Hash + @array_class = opts[:array_class] || Array + @decimal_class = opts[:decimal_class] + @match_string = opts[:match_string] + end + + alias source string + + def reset + super + @current_nesting = 0 + end + + # Parses the current JSON string _source_ and returns the + # complete data structure as a result. + def parse + reset + obj = nil + while !eos? && skip(IGNORE) do end + if eos? + raise ParserError, "source is not valid JSON!" + else + obj = parse_value + UNPARSED.equal?(obj) and raise ParserError, + "source is not valid JSON!" + end + while !eos? && skip(IGNORE) do end + eos? or raise ParserError, "source is not valid JSON!" + obj + end + + private + + def convert_encoding(source) + if source.respond_to?(:to_str) + source = source.to_str + else + raise TypeError, + "#{source.inspect} is not like a string" + end + if source.encoding != ::Encoding::ASCII_8BIT + source = source.encode(::Encoding::UTF_8) + source.force_encoding(::Encoding::ASCII_8BIT) + end + source + end + + # Unescape characters in strings. + UNESCAPE_MAP = Hash.new { |h, k| h[k] = k.chr } + UNESCAPE_MAP.update({ + ?" => '"', + ?\\ => '\\', + ?/ => '/', + ?b => "\b", + ?f => "\f", + ?n => "\n", + ?r => "\r", + ?t => "\t", + ?u => nil, + }) + + EMPTY_8BIT_STRING = '' + if ::String.method_defined?(:encode) + EMPTY_8BIT_STRING.force_encoding Encoding::ASCII_8BIT + end + + def parse_string + if scan(STRING) + return '' if self[1].empty? + string = self[1].gsub(%r((?:\\[\\bfnrt"/]|(?:\\u(?:[A-Fa-f\d]{4}))+|\\[\x20-\xff]))n) do |c| + if u = UNESCAPE_MAP[$&[1]] + u + else # \uXXXX + bytes = EMPTY_8BIT_STRING.dup + i = 0 + while c[6 * i] == ?\\ && c[6 * i + 1] == ?u + bytes << c[6 * i + 2, 2].to_i(16) << c[6 * i + 4, 2].to_i(16) + i += 1 + end + JSON.iconv('utf-8', 'utf-16be', bytes) + end + end + if string.respond_to?(:force_encoding) + string.force_encoding(::Encoding::UTF_8) + end + if @create_additions and @match_string + for (regexp, klass) in @match_string + klass.json_creatable? or next + string =~ regexp and return klass.json_create(string) + end + end + string + else + UNPARSED + end + rescue => e + raise ParserError, "Caught #{e.class} at '#{peek(20)}': #{e}" + end + + def parse_value + case + when scan(FLOAT) + if @decimal_class then + if @decimal_class == BigDecimal then + BigDecimal(self[1]) + else + @decimal_class.new(self[1]) || Float(self[1]) + end + else + Float(self[1]) + end + when scan(INTEGER) + Integer(self[1]) + when scan(TRUE) + true + when scan(FALSE) + false + when scan(NULL) + nil + when !UNPARSED.equal?(string = parse_string) + string + when scan(ARRAY_OPEN) + @current_nesting += 1 + ary = parse_array + @current_nesting -= 1 + ary + when scan(OBJECT_OPEN) + @current_nesting += 1 + obj = parse_object + @current_nesting -= 1 + obj + when @allow_nan && scan(NAN) + NaN + when @allow_nan && scan(INFINITY) + Infinity + when @allow_nan && scan(MINUS_INFINITY) + MinusInfinity + else + UNPARSED + end + end + + def parse_array + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @array_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(value = parse_value) + delim = false + result << value + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(ARRAY_CLOSE) + ; + else + raise ParserError, "expected ',' or ']' in array at '#{peek(20)}'!" + end + when scan(ARRAY_CLOSE) + if delim + raise ParserError, "expected next element in array at '#{peek(20)}'!" + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in array at '#{peek(20)}'!" + end + end + result + end + + def parse_object + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @object_class.new + delim = false + until eos? + case + when !UNPARSED.equal?(string = parse_string) + skip(IGNORE) + unless scan(PAIR_DELIMITER) + raise ParserError, "expected ':' in object at '#{peek(20)}'!" + end + skip(IGNORE) + unless UNPARSED.equal?(value = parse_value) + result[@symbolize_names ? string.to_sym : string] = value + delim = false + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(OBJECT_CLOSE) + ; + else + raise ParserError, "expected ',' or '}' in object at '#{peek(20)}'!" + end + else + raise ParserError, "expected value in object at '#{peek(20)}'!" + end + when scan(OBJECT_CLOSE) + if delim + raise ParserError, "expected next name, value pair in object at '#{peek(20)}'!" + end + if @create_additions and klassname = result[@create_id] + klass = JSON.deep_const_get klassname + break unless klass and klass.json_creatable? + result = klass.json_create(result) + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in object at '#{peek(20)}'!" + end + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/version.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/version.rb new file mode 100644 index 0000000..e4dd3e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/lib/json/version.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +module JSON + # JSON version + VERSION = '2.3.1' + VERSION_ARRAY = VERSION.split(/\./).map { |x| x.to_i } # :nodoc: + VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc: + VERSION_MINOR = VERSION_ARRAY[1] # :nodoc: + VERSION_BUILD = VERSION_ARRAY[2] # :nodoc: +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/references/rfc7159.txt b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/references/rfc7159.txt new file mode 100644 index 0000000..64fcada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/references/rfc7159.txt @@ -0,0 +1,899 @@ + + + + + + +Internet Engineering Task Force (IETF) T. Bray, Ed. +Request for Comments: 7159 Google, Inc. +Obsoletes: 4627, 7158 March 2014 +Category: Standards Track +ISSN: 2070-1721 + + + The JavaScript Object Notation (JSON) Data Interchange Format + +Abstract + + JavaScript Object Notation (JSON) is a lightweight, text-based, + language-independent data interchange format. It was derived from + the ECMAScript Programming Language Standard. JSON defines a small + set of formatting rules for the portable representation of structured + data. + + This document removes inconsistencies with other specifications of + JSON, repairs specification errors, and offers experience-based + interoperability guidance. + +Status of This Memo + + This is an Internet Standards Track document. + + This document is a product of the Internet Engineering Task Force + (IETF). It represents the consensus of the IETF community. It has + received public review and has been approved for publication by the + Internet Engineering Steering Group (IESG). Further information on + Internet Standards is available in Section 2 of RFC 5741. + + Information about the current status of this document, any errata, + and how to provide feedback on it may be obtained at + http://www.rfc-editor.org/info/rfc7159. + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 1] + +RFC 7159 JSON March 2014 + + +Copyright Notice + + Copyright (c) 2014 IETF Trust and the persons identified as the + document authors. All rights reserved. + + This document is subject to BCP 78 and the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info) in effect on the date of + publication of this document. Please review these documents + carefully, as they describe your rights and restrictions with respect + to this document. Code Components extracted from this document must + include Simplified BSD License text as described in Section 4.e of + the Trust Legal Provisions and are provided without warranty as + described in the Simplified BSD License. + + This document may contain material from IETF Documents or IETF + Contributions published or made publicly available before November + 10, 2008. The person(s) controlling the copyright in some of this + material may not have granted the IETF Trust the right to allow + modifications of such material outside the IETF Standards Process. + Without obtaining an adequate license from the person(s) controlling + the copyright in such materials, this document may not be modified + outside the IETF Standards Process, and derivative works of it may + not be created outside the IETF Standards Process, except to format + it for publication as an RFC or to translate it into languages other + than English. + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 2] + +RFC 7159 JSON March 2014 + + +Table of Contents + + 1. Introduction ....................................................3 + 1.1. Conventions Used in This Document ..........................4 + 1.2. Specifications of JSON .....................................4 + 1.3. Introduction to This Revision ..............................4 + 2. JSON Grammar ....................................................4 + 3. Values ..........................................................5 + 4. Objects .........................................................6 + 5. Arrays ..........................................................6 + 6. Numbers .........................................................6 + 7. Strings .........................................................8 + 8. String and Character Issues .....................................9 + 8.1. Character Encoding .........................................9 + 8.2. Unicode Characters .........................................9 + 8.3. String Comparison ..........................................9 + 9. Parsers ........................................................10 + 10. Generators ....................................................10 + 11. IANA Considerations ...........................................10 + 12. Security Considerations .......................................11 + 13. Examples ......................................................12 + 14. Contributors ..................................................13 + 15. References ....................................................13 + 15.1. Normative References .....................................13 + 15.2. Informative References ...................................13 + Appendix A. Changes from RFC 4627 .................................15 + +1. Introduction + + JavaScript Object Notation (JSON) is a text format for the + serialization of structured data. It is derived from the object + literals of JavaScript, as defined in the ECMAScript Programming + Language Standard, Third Edition [ECMA-262]. + + JSON can represent four primitive types (strings, numbers, booleans, + and null) and two structured types (objects and arrays). + + A string is a sequence of zero or more Unicode characters [UNICODE]. + Note that this citation references the latest version of Unicode + rather than a specific release. It is not expected that future + changes in the UNICODE specification will impact the syntax of JSON. + + An object is an unordered collection of zero or more name/value + pairs, where a name is a string and a value is a string, number, + boolean, null, object, or array. + + An array is an ordered sequence of zero or more values. + + + + +Bray Standards Track [Page 3] + +RFC 7159 JSON March 2014 + + + The terms "object" and "array" come from the conventions of + JavaScript. + + JSON's design goals were for it to be minimal, portable, textual, and + a subset of JavaScript. + +1.1. Conventions Used in This Document + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", + "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this + document are to be interpreted as described in [RFC2119]. + + The grammatical rules in this document are to be interpreted as + described in [RFC5234]. + +1.2. Specifications of JSON + + This document updates [RFC4627], which describes JSON and registers + the media type "application/json". + + A description of JSON in ECMAScript terms appears in Version 5.1 of + the ECMAScript specification [ECMA-262], Section 15.12. JSON is also + described in [ECMA-404]. + + All of the specifications of JSON syntax agree on the syntactic + elements of the language. + +1.3. Introduction to This Revision + + In the years since the publication of RFC 4627, JSON has found very + wide use. This experience has revealed certain patterns, which, + while allowed by its specifications, have caused interoperability + problems. + + Also, a small number of errata have been reported (see RFC Errata IDs + 607 [Err607] and 3607 [Err3607]). + + This document's goal is to apply the errata, remove inconsistencies + with other specifications of JSON, and highlight practices that can + lead to interoperability problems. + +2. JSON Grammar + + A JSON text is a sequence of tokens. The set of tokens includes six + structural characters, strings, numbers, and three literal names. + + A JSON text is a serialized value. Note that certain previous + specifications of JSON constrained a JSON text to be an object or an + + + +Bray Standards Track [Page 4] + +RFC 7159 JSON March 2014 + + + array. Implementations that generate only objects or arrays where a + JSON text is called for will be interoperable in the sense that all + implementations will accept these as conforming JSON texts. + + JSON-text = ws value ws + + These are the six structural characters: + + begin-array = ws %x5B ws ; [ left square bracket + + begin-object = ws %x7B ws ; { left curly bracket + + end-array = ws %x5D ws ; ] right square bracket + + end-object = ws %x7D ws ; } right curly bracket + + name-separator = ws %x3A ws ; : colon + + value-separator = ws %x2C ws ; , comma + + Insignificant whitespace is allowed before or after any of the six + structural characters. + + ws = *( + %x20 / ; Space + %x09 / ; Horizontal tab + %x0A / ; Line feed or New line + %x0D ) ; Carriage return + +3. Values + + A JSON value MUST be an object, array, number, or string, or one of + the following three literal names: + + false null true + + The literal names MUST be lowercase. No other literal names are + allowed. + + value = false / null / true / object / array / number / string + + false = %x66.61.6c.73.65 ; false + + null = %x6e.75.6c.6c ; null + + true = %x74.72.75.65 ; true + + + + + +Bray Standards Track [Page 5] + +RFC 7159 JSON March 2014 + + +4. Objects + + An object structure is represented as a pair of curly brackets + surrounding zero or more name/value pairs (or members). A name is a + string. A single colon comes after each name, separating the name + from the value. A single comma separates a value from a following + name. The names within an object SHOULD be unique. + + object = begin-object [ member *( value-separator member ) ] + end-object + + member = string name-separator value + + An object whose names are all unique is interoperable in the sense + that all software implementations receiving that object will agree on + the name-value mappings. When the names within an object are not + unique, the behavior of software that receives such an object is + unpredictable. Many implementations report the last name/value pair + only. Other implementations report an error or fail to parse the + object, and some implementations report all of the name/value pairs, + including duplicates. + + JSON parsing libraries have been observed to differ as to whether or + not they make the ordering of object members visible to calling + software. Implementations whose behavior does not depend on member + ordering will be interoperable in the sense that they will not be + affected by these differences. + +5. Arrays + + An array structure is represented as square brackets surrounding zero + or more values (or elements). Elements are separated by commas. + + array = begin-array [ value *( value-separator value ) ] end-array + + There is no requirement that the values in an array be of the same + type. + +6. Numbers + + The representation of numbers is similar to that used in most + programming languages. A number is represented in base 10 using + decimal digits. It contains an integer component that may be + prefixed with an optional minus sign, which may be followed by a + fraction part and/or an exponent part. Leading zeros are not + allowed. + + A fraction part is a decimal point followed by one or more digits. + + + +Bray Standards Track [Page 6] + +RFC 7159 JSON March 2014 + + + An exponent part begins with the letter E in upper or lower case, + which may be followed by a plus or minus sign. The E and optional + sign are followed by one or more digits. + + Numeric values that cannot be represented in the grammar below (such + as Infinity and NaN) are not permitted. + + number = [ minus ] int [ frac ] [ exp ] + + decimal-point = %x2E ; . + + digit1-9 = %x31-39 ; 1-9 + + e = %x65 / %x45 ; e E + + exp = e [ minus / plus ] 1*DIGIT + + frac = decimal-point 1*DIGIT + + int = zero / ( digit1-9 *DIGIT ) + + minus = %x2D ; - + + plus = %x2B ; + + + zero = %x30 ; 0 + + This specification allows implementations to set limits on the range + and precision of numbers accepted. Since software that implements + IEEE 754-2008 binary64 (double precision) numbers [IEEE754] is + generally available and widely used, good interoperability can be + achieved by implementations that expect no more precision or range + than these provide, in the sense that implementations will + approximate JSON numbers within the expected precision. A JSON + number such as 1E400 or 3.141592653589793238462643383279 may indicate + potential interoperability problems, since it suggests that the + software that created it expects receiving software to have greater + capabilities for numeric magnitude and precision than is widely + available. + + Note that when such software is used, numbers that are integers and + are in the range [-(2**53)+1, (2**53)-1] are interoperable in the + sense that implementations will agree exactly on their numeric + values. + + + + + + + +Bray Standards Track [Page 7] + +RFC 7159 JSON March 2014 + + +7. Strings + + The representation of strings is similar to conventions used in the C + family of programming languages. A string begins and ends with + quotation marks. All Unicode characters may be placed within the + quotation marks, except for the characters that must be escaped: + quotation mark, reverse solidus, and the control characters (U+0000 + through U+001F). + + Any character may be escaped. If the character is in the Basic + Multilingual Plane (U+0000 through U+FFFF), then it may be + represented as a six-character sequence: a reverse solidus, followed + by the lowercase letter u, followed by four hexadecimal digits that + encode the character's code point. The hexadecimal letters A though + F can be upper or lower case. So, for example, a string containing + only a single reverse solidus character may be represented as + "\u005C". + + Alternatively, there are two-character sequence escape + representations of some popular characters. So, for example, a + string containing only a single reverse solidus character may be + represented more compactly as "\\". + + To escape an extended character that is not in the Basic Multilingual + Plane, the character is represented as a 12-character sequence, + encoding the UTF-16 surrogate pair. So, for example, a string + containing only the G clef character (U+1D11E) may be represented as + "\uD834\uDD1E". + + string = quotation-mark *char quotation-mark + + char = unescaped / + escape ( + %x22 / ; " quotation mark U+0022 + %x5C / ; \ reverse solidus U+005C + %x2F / ; / solidus U+002F + %x62 / ; b backspace U+0008 + %x66 / ; f form feed U+000C + %x6E / ; n line feed U+000A + %x72 / ; r carriage return U+000D + %x74 / ; t tab U+0009 + %x75 4HEXDIG ) ; uXXXX U+XXXX + + escape = %x5C ; \ + + quotation-mark = %x22 ; " + + unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + + + +Bray Standards Track [Page 8] + +RFC 7159 JSON March 2014 + + +8. String and Character Issues + +8.1. Character Encoding + + JSON text SHALL be encoded in UTF-8, UTF-16, or UTF-32. The default + encoding is UTF-8, and JSON texts that are encoded in UTF-8 are + interoperable in the sense that they will be read successfully by the + maximum number of implementations; there are many implementations + that cannot successfully read texts in other encodings (such as + UTF-16 and UTF-32). + + Implementations MUST NOT add a byte order mark to the beginning of a + JSON text. In the interests of interoperability, implementations + that parse JSON texts MAY ignore the presence of a byte order mark + rather than treating it as an error. + +8.2. Unicode Characters + + When all the strings represented in a JSON text are composed entirely + of Unicode characters [UNICODE] (however escaped), then that JSON + text is interoperable in the sense that all software implementations + that parse it will agree on the contents of names and of string + values in objects and arrays. + + However, the ABNF in this specification allows member names and + string values to contain bit sequences that cannot encode Unicode + characters; for example, "\uDEAD" (a single unpaired UTF-16 + surrogate). Instances of this have been observed, for example, when + a library truncates a UTF-16 string without checking whether the + truncation split a surrogate pair. The behavior of software that + receives JSON texts containing such values is unpredictable; for + example, implementations might return different values for the length + of a string value or even suffer fatal runtime exceptions. + +8.3. String Comparison + + Software implementations are typically required to test names of + object members for equality. Implementations that transform the + textual representation into sequences of Unicode code units and then + perform the comparison numerically, code unit by code unit, are + interoperable in the sense that implementations will agree in all + cases on equality or inequality of two strings. For example, + implementations that compare strings with escaped characters + unconverted may incorrectly find that "a\\b" and "a\u005Cb" are not + equal. + + + + + + +Bray Standards Track [Page 9] + +RFC 7159 JSON March 2014 + + +9. Parsers + + A JSON parser transforms a JSON text into another representation. A + JSON parser MUST accept all texts that conform to the JSON grammar. + A JSON parser MAY accept non-JSON forms or extensions. + + An implementation may set limits on the size of texts that it + accepts. An implementation may set limits on the maximum depth of + nesting. An implementation may set limits on the range and precision + of numbers. An implementation may set limits on the length and + character contents of strings. + +10. Generators + + A JSON generator produces JSON text. The resulting text MUST + strictly conform to the JSON grammar. + +11. IANA Considerations + + The MIME media type for JSON text is application/json. + + Type name: application + + Subtype name: json + + Required parameters: n/a + + Optional parameters: n/a + + Encoding considerations: binary + + Security considerations: See [RFC7159], Section 12. + + Interoperability considerations: Described in [RFC7159] + + Published specification: [RFC7159] + + Applications that use this media type: + JSON has been used to exchange data between applications written + in all of these programming languages: ActionScript, C, C#, + Clojure, ColdFusion, Common Lisp, E, Erlang, Go, Java, JavaScript, + Lua, Objective CAML, Perl, PHP, Python, Rebol, Ruby, Scala, and + Scheme. + + + + + + + + +Bray Standards Track [Page 10] + +RFC 7159 JSON March 2014 + + + Additional information: + Magic number(s): n/a + File extension(s): .json + Macintosh file type code(s): TEXT + + Person & email address to contact for further information: + IESG + + + Intended usage: COMMON + + Restrictions on usage: none + + Author: + Douglas Crockford + + + Change controller: + IESG + + + Note: No "charset" parameter is defined for this registration. + Adding one really has no effect on compliant recipients. + +12. Security Considerations + + Generally, there are security issues with scripting languages. JSON + is a subset of JavaScript but excludes assignment and invocation. + + Since JSON's syntax is borrowed from JavaScript, it is possible to + use that language's "eval()" function to parse JSON texts. This + generally constitutes an unacceptable security risk, since the text + could contain executable code along with data declarations. The same + consideration applies to the use of eval()-like functions in any + other programming language in which JSON texts conform to that + language's syntax. + + + + + + + + + + + + + + + +Bray Standards Track [Page 11] + +RFC 7159 JSON March 2014 + + +13. Examples + + This is a JSON object: + + { + "Image": { + "Width": 800, + "Height": 600, + "Title": "View from 15th Floor", + "Thumbnail": { + "Url": "http://www.example.com/image/481989943", + "Height": 125, + "Width": 100 + }, + "Animated" : false, + "IDs": [116, 943, 234, 38793] + } + } + + Its Image member is an object whose Thumbnail member is an object and + whose IDs member is an array of numbers. + + This is a JSON array containing two objects: + + [ + { + "precision": "zip", + "Latitude": 37.7668, + "Longitude": -122.3959, + "Address": "", + "City": "SAN FRANCISCO", + "State": "CA", + "Zip": "94107", + "Country": "US" + }, + { + "precision": "zip", + "Latitude": 37.371991, + "Longitude": -122.026020, + "Address": "", + "City": "SUNNYVALE", + "State": "CA", + "Zip": "94085", + "Country": "US" + } + ] + + + + + +Bray Standards Track [Page 12] + +RFC 7159 JSON March 2014 + + + Here are three small JSON texts containing only values: + + "Hello world!" + + 42 + + true + +14. Contributors + + RFC 4627 was written by Douglas Crockford. This document was + constructed by making a relatively small number of changes to that + document; thus, the vast majority of the text here is his. + +15. References + +15.1. Normative References + + [IEEE754] IEEE, "IEEE Standard for Floating-Point Arithmetic", IEEE + Standard 754, August 2008, + . + + [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate + Requirement Levels", BCP 14, RFC 2119, March 1997. + + [RFC5234] Crocker, D. and P. Overell, "Augmented BNF for Syntax + Specifications: ABNF", STD 68, RFC 5234, January 2008. + + [UNICODE] The Unicode Consortium, "The Unicode Standard", + . + +15.2. Informative References + + [ECMA-262] Ecma International, "ECMAScript Language Specification + Edition 5.1", Standard ECMA-262, June 2011, + . + + [ECMA-404] Ecma International, "The JSON Data Interchange Format", + Standard ECMA-404, October 2013, + . + + [Err3607] RFC Errata, Errata ID 3607, RFC 3607, + . + + + + + + +Bray Standards Track [Page 13] + +RFC 7159 JSON March 2014 + + + [Err607] RFC Errata, Errata ID 607, RFC 607, + . + + [RFC4627] Crockford, D., "The application/json Media Type for + JavaScript Object Notation (JSON)", RFC 4627, July 2006. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 14] + +RFC 7159 JSON March 2014 + + +Appendix A. Changes from RFC 4627 + + This section lists changes between this document and the text in RFC + 4627. + + o Changed the title and abstract of the document. + + o Changed the reference to [UNICODE] to be not version specific. + + o Added a "Specifications of JSON" section. + + o Added an "Introduction to This Revision" section. + + o Changed the definition of "JSON text" so that it can be any JSON + value, removing the constraint that it be an object or array. + + o Added language about duplicate object member names, member + ordering, and interoperability. + + o Clarified the absence of a requirement that values in an array be + of the same JSON type. + + o Applied erratum #607 from RFC 4627 to correctly align the artwork + for the definition of "object". + + o Changed "as sequences of digits" to "in the grammar below" in the + "Numbers" section, and made base-10-ness explicit. + + o Added language about number interoperability as a function of + IEEE754, and added an IEEE754 reference. + + o Added language about interoperability and Unicode characters and + about string comparisons. To do this, turned the old "Encoding" + section into a "String and Character Issues" section, with three + subsections: "Character Encoding", "Unicode Characters", and + "String Comparison". + + o Changed guidance in the "Parsers" section to point out that + implementations may set limits on the range "and precision" of + numbers. + + o Updated and tidied the "IANA Considerations" section. + + o Made a real "Security Considerations" section and lifted the text + out of the previous "IANA Considerations" section. + + + + + + +Bray Standards Track [Page 15] + +RFC 7159 JSON March 2014 + + + o Applied erratum #3607 from RFC 4627 by removing the security + consideration that begins "A JSON text can be safely passed" and + the JavaScript code that went with that consideration. + + o Added a note to the "Security Considerations" section pointing out + the risks of using the "eval()" function in JavaScript or any + other language in which JSON texts conform to that language's + syntax. + + o Added a note to the "IANA Considerations" clarifying the absence + of a "charset" parameter for the application/json media type. + + o Changed "100" to 100 and added a boolean field, both in the first + example. + + o Added examples of JSON texts with simple values, neither objects + nor arrays. + + o Added a "Contributors" section crediting Douglas Crockford. + + o Added a reference to RFC 4627. + + o Moved the ECMAScript reference from Normative to Informative and + updated it to reference ECMAScript 5.1, and added a reference to + ECMA 404. + +Author's Address + + Tim Bray (editor) + Google, Inc. + + EMail: tbray@textuality.com + + + + + + + + + + + + + + + + + + + +Bray Standards Track [Page 16] + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail10.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail10.json new file mode 100644 index 0000000..5d8c004 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail10.json @@ -0,0 +1 @@ +{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail11.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail11.json new file mode 100644 index 0000000..76eb95b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail11.json @@ -0,0 +1 @@ +{"Illegal expression": 1 + 2} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail12.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail12.json new file mode 100644 index 0000000..77580a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail12.json @@ -0,0 +1 @@ +{"Illegal invocation": alert()} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail13.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail13.json new file mode 100644 index 0000000..379406b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail13.json @@ -0,0 +1 @@ +{"Numbers cannot have leading zeroes": 013} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail14.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail14.json new file mode 100644 index 0000000..0ed366b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail14.json @@ -0,0 +1 @@ +{"Numbers cannot be hex": 0x14} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail18.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail18.json new file mode 100644 index 0000000..ebc11eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail18.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail19.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail19.json new file mode 100644 index 0000000..3b9c46f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail19.json @@ -0,0 +1 @@ +{"Missing colon" null} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail2.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail2.json new file mode 100644 index 0000000..6b7c11e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail2.json @@ -0,0 +1 @@ +["Unclosed array" \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail20.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail20.json new file mode 100644 index 0000000..27c1af3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail20.json @@ -0,0 +1 @@ +{"Double colon":: null} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail21.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail21.json new file mode 100644 index 0000000..6247457 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail21.json @@ -0,0 +1 @@ +{"Comma instead of colon", null} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail22.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail22.json new file mode 100644 index 0000000..a775258 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail22.json @@ -0,0 +1 @@ +["Colon instead of comma": false] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail23.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail23.json new file mode 100644 index 0000000..494add1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail23.json @@ -0,0 +1 @@ +["Bad value", truth] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail24.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail24.json new file mode 100644 index 0000000..caff239 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail24.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail25.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail25.json new file mode 100644 index 0000000..2dfbd25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail25.json @@ -0,0 +1 @@ +["tab character in string "] diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail27.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail27.json new file mode 100644 index 0000000..6b01a2c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail27.json @@ -0,0 +1,2 @@ +["line +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail28.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail28.json new file mode 100644 index 0000000..621a010 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail28.json @@ -0,0 +1,2 @@ +["line\ +break"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail3.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail3.json new file mode 100644 index 0000000..168c81e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail3.json @@ -0,0 +1 @@ +{unquoted_key: "keys must be quoted"} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail4.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail4.json new file mode 100644 index 0000000..9de168b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail4.json @@ -0,0 +1 @@ +["extra comma",] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail5.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail5.json new file mode 100644 index 0000000..ddf3ce3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail5.json @@ -0,0 +1 @@ +["double extra comma",,] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail6.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail6.json new file mode 100644 index 0000000..ed91580 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail6.json @@ -0,0 +1 @@ +[ , "<-- missing value"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail7.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail7.json new file mode 100644 index 0000000..8a96af3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail7.json @@ -0,0 +1 @@ +["Comma after the close"], \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail8.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail8.json new file mode 100644 index 0000000..b28479c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail8.json @@ -0,0 +1 @@ +["Extra close"]] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail9.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail9.json new file mode 100644 index 0000000..5815574 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/fail9.json @@ -0,0 +1 @@ +{"Extra comma": true,} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json new file mode 100644 index 0000000..98b77de --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/obsolete_fail1.json @@ -0,0 +1 @@ +"A JSON payload should be an object or array, not a string." diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass1.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass1.json new file mode 100644 index 0000000..7828fcc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass1.json @@ -0,0 +1,56 @@ +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E666, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ], + "compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066 + + +,"rosebud"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass15.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass15.json new file mode 100644 index 0000000..fc8376b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass15.json @@ -0,0 +1 @@ +["Illegal backslash escape: \x15"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass16.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass16.json new file mode 100644 index 0000000..c43ae3c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass16.json @@ -0,0 +1 @@ +["Illegal backslash escape: \'"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass17.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass17.json new file mode 100644 index 0000000..62b9214 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass17.json @@ -0,0 +1 @@ +["Illegal backslash escape: \017"] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass2.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass2.json new file mode 100644 index 0000000..d3c63c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass2.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass26.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass26.json new file mode 100644 index 0000000..845d26a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass26.json @@ -0,0 +1 @@ +["tab\ character\ in\ string\ "] \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass3.json b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass3.json new file mode 100644 index 0000000..4528d51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/fixtures/pass3.json @@ -0,0 +1,6 @@ +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_addition_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_addition_test.rb new file mode 100644 index 0000000..61625f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_addition_test.rb @@ -0,0 +1,203 @@ +#frozen_string_literal: false +require 'test_helper' +require 'json/add/core' +require 'json/add/complex' +require 'json/add/rational' +require 'json/add/bigdecimal' +require 'json/add/ostruct' +require 'json/add/set' +require 'date' + +class JSONAdditionTest < Test::Unit::TestCase + include JSON + + class A + def initialize(a) + @a = a + end + + attr_reader :a + + def ==(other) + a == other.a + end + + def self.json_create(object) + new(*object['args']) + end + + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class A2 < A + def to_json(*args) + { + 'json_class' => self.class.name, + 'args' => [ @a ], + }.to_json(*args) + end + end + + class B + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => self.class.name, + }.to_json(*args) + end + end + + class C + def self.json_creatable? + false + end + + def to_json(*args) + { + 'json_class' => 'JSONAdditionTest::Nix', + }.to_json(*args) + end + end + + def test_extended_json + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + end + + def test_extended_json_default + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_hash = parse(json) + assert_kind_of Hash, a_hash + end + + def test_extended_json_disabled + a = A.new(666) + assert A.json_creatable? + json = generate(a) + a_again = parse(json, :create_additions => true) + assert_kind_of a.class, a_again + assert_equal a, a_again + a_hash = parse(json, :create_additions => false) + assert_kind_of Hash, a_hash + assert_equal( + {"args"=>[666], "json_class"=>"JSONAdditionTest::A"}.sort_by { |k,| k }, + a_hash.sort_by { |k,| k } + ) + end + + def test_extended_json_fail1 + b = B.new + assert !B.json_creatable? + json = generate(b) + assert_equal({ "json_class"=>"JSONAdditionTest::B" }, parse(json)) + end + + def test_extended_json_fail2 + c = C.new + assert !C.json_creatable? + json = generate(c) + assert_raise(ArgumentError, NameError) { parse(json, :create_additions => true) } + end + + def test_raw_strings + raw = '' + raw.respond_to?(:encode!) and raw.encode!(Encoding::ASCII_8BIT) + raw_array = [] + for i in 0..255 + raw << i + raw_array << i + end + json = raw.to_json_raw + json_raw_object = raw.to_json_raw_object + hash = { 'json_class' => 'String', 'raw'=> raw_array } + assert_equal hash, json_raw_object + assert_match(/\A\{.*\}\z/, json) + assert_match(/"json_class":"String"/, json) + assert_match(/"raw":\[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255\]/, json) + raw_again = parse(json, :create_additions => true) + assert_equal raw, raw_again + end + + MyJsonStruct = Struct.new 'MyJsonStruct', :foo, :bar + + def test_core + t = Time.now + assert_equal t, JSON(JSON(t), :create_additions => true) + d = Date.today + assert_equal d, JSON(JSON(d), :create_additions => true) + d = DateTime.civil(2007, 6, 14, 14, 57, 10, Rational(1, 12), 2299161) + assert_equal d, JSON(JSON(d), :create_additions => true) + assert_equal 1..10, JSON(JSON(1..10), :create_additions => true) + assert_equal 1...10, JSON(JSON(1...10), :create_additions => true) + assert_equal "a".."c", JSON(JSON("a".."c"), :create_additions => true) + assert_equal "a"..."c", JSON(JSON("a"..."c"), :create_additions => true) + s = MyJsonStruct.new 4711, 'foot' + assert_equal s, JSON(JSON(s), :create_additions => true) + struct = Struct.new :foo, :bar + s = struct.new 4711, 'foot' + assert_raise(JSONError) { JSON(s) } + begin + raise TypeError, "test me" + rescue TypeError => e + e_json = JSON.generate e + e_again = JSON e_json, :create_additions => true + assert_kind_of TypeError, e_again + assert_equal e.message, e_again.message + assert_equal e.backtrace, e_again.backtrace + end + assert_equal(/foo/, JSON(JSON(/foo/), :create_additions => true)) + assert_equal(/foo/i, JSON(JSON(/foo/i), :create_additions => true)) + end + + def test_utc_datetime + now = Time.now + d = DateTime.parse(now.to_s, :create_additions => true) # usual case + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.parse(now.utc.to_s) # of = 0 + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(1,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + d = DateTime.civil(2008, 6, 17, 11, 48, 32, Rational(12,24)) + assert_equal d, parse(d.to_json, :create_additions => true) + end + + def test_rational_complex + assert_equal Rational(2, 9), parse(JSON(Rational(2, 9)), :create_additions => true) + assert_equal Complex(2, 9), parse(JSON(Complex(2, 9)), :create_additions => true) + end + + def test_bigdecimal + assert_equal BigDecimal('3.141', 23), JSON(JSON(BigDecimal('3.141', 23)), :create_additions => true) + assert_equal BigDecimal('3.141', 666), JSON(JSON(BigDecimal('3.141', 666)), :create_additions => true) + end + + def test_ostruct + o = OpenStruct.new + # XXX this won't work; o.foo = { :bar => true } + o.foo = { 'bar' => true } + assert_equal o, parse(JSON(o), :create_additions => true) + end + + def test_set + s = Set.new([:a, :b, :c, :a]) + assert_equal s, JSON.parse(JSON(s), :create_additions => true) + ss = SortedSet.new([:d, :b, :a, :c]) + ss_again = JSON.parse(JSON(ss), :create_additions => true) + assert_kind_of ss.class, ss_again + assert_equal ss, ss_again + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_common_interface_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_common_interface_test.rb new file mode 100644 index 0000000..53f335e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_common_interface_test.rb @@ -0,0 +1,126 @@ +#frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' + +class JSONCommonInterfaceTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},'\ + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + end + + def test_index + assert_equal @json, JSON[@hash] + assert_equal @hash, JSON[@json] + end + + def test_parser + assert_match(/::Parser\z/, JSON.parser.name) + end + + def test_generator + assert_match(/::Generator\z/, JSON.generator.name) + end + + def test_state + assert_match(/::Generator::State\z/, JSON.state.name) + end + + def test_create_id + assert_equal 'json_class', JSON.create_id + JSON.create_id = 'foo_bar' + assert_equal 'foo_bar', JSON.create_id + ensure + JSON.create_id = 'json_class' + end + + def test_deep_const_get + assert_raise(ArgumentError) { JSON.deep_const_get('Nix::Da') } + assert_equal File::SEPARATOR, JSON.deep_const_get('File::SEPARATOR') + end + + def test_parse + assert_equal [ 1, 2, 3, ], JSON.parse('[ 1, 2, 3 ]') + end + + def test_parse_bang + assert_equal [ 1, Infinity, 3, ], JSON.parse!('[ 1, Infinity, 3 ]') + end + + def test_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_fast_generate + assert_equal '[1,2,3]', JSON.generate([ 1, 2, 3 ]) + end + + def test_pretty_generate + assert_equal "[\n 1,\n 2,\n 3\n]", JSON.pretty_generate([ 1, 2, 3 ]) + end + + def test_load + assert_equal @hash, JSON.load(@json) + tempfile = Tempfile.open('@json') + tempfile.write @json + tempfile.rewind + assert_equal @hash, JSON.load(tempfile) + stringio = StringIO.new(@json) + stringio.rewind + assert_equal @hash, JSON.load(stringio) + assert_equal nil, JSON.load(nil) + assert_equal nil, JSON.load('') + ensure + tempfile.close! + end + + def test_load_with_options + json = '{ "foo": NaN }' + assert JSON.load(json, nil, :allow_nan => true)['foo'].nan? + end + + def test_load_null + assert_equal nil, JSON.load(nil, nil, :allow_blank => true) + assert_raise(TypeError) { JSON.load(nil, nil, :allow_blank => false) } + assert_raise(JSON::ParserError) { JSON.load('', nil, :allow_blank => false) } + end + + def test_dump + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + assert_equal too_deep, dump(eval(too_deep)) + assert_kind_of String, Marshal.dump(eval(too_deep)) + assert_raise(ArgumentError) { dump(eval(too_deep), 100) } + assert_raise(ArgumentError) { Marshal.dump(eval(too_deep), 100) } + assert_equal too_deep, dump(eval(too_deep), 101) + assert_kind_of String, Marshal.dump(eval(too_deep), 101) + output = StringIO.new + dump(eval(too_deep), output) + assert_equal too_deep, output.string + output = StringIO.new + dump(eval(too_deep), output, 101) + assert_equal too_deep, output.string + end + + def test_dump_should_modify_defaults + max_nesting = JSON.dump_default_options[:max_nesting] + dump([], StringIO.new, 10) + assert_equal max_nesting, JSON.dump_default_options[:max_nesting] + end + + def test_JSON + assert_equal @json, JSON(@hash) + assert_equal @hash, JSON(@json) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_encoding_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_encoding_test.rb new file mode 100644 index 0000000..cc7b715 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_encoding_test.rb @@ -0,0 +1,107 @@ +# encoding: utf-8 +#frozen_string_literal: false +require 'test_helper' + +class JSONEncodingTest < Test::Unit::TestCase + include JSON + + def setup + @utf_8 = '"© ≠ €!"' + @ascii_8bit = @utf_8.dup.force_encoding('ascii-8bit') + @parsed = "© ≠ €!" + @generated = '"\u00a9 \u2260 \u20ac!"' + if String.method_defined?(:encode) + @utf_16_data = @parsed.encode('utf-16be', 'utf-8') + @utf_16be = @utf_8.encode('utf-16be', 'utf-8') + @utf_16le = @utf_8.encode('utf-16le', 'utf-8') + @utf_32be = @utf_8.encode('utf-32be', 'utf-8') + @utf_32le = @utf_8.encode('utf-32le', 'utf-8') + else + require 'iconv' + @utf_16_data, = Iconv.iconv('utf-16be', 'utf-8', @parsed) + @utf_16be, = Iconv.iconv('utf-16be', 'utf-8', @utf_8) + @utf_16le, = Iconv.iconv('utf-16le', 'utf-8', @utf_8) + @utf_32be, = Iconv.iconv('utf-32be', 'utf-8', @utf_8) + @utf_32le, = Iconv.iconv('utf-32le', 'utf-8', @utf_8) + end + end + + def test_parse + assert_equal @parsed, JSON.parse(@ascii_8bit) + assert_equal @parsed, JSON.parse(@utf_8) + assert_equal @parsed, JSON.parse(@utf_16be) + assert_equal @parsed, JSON.parse(@utf_16le) + assert_equal @parsed, JSON.parse(@utf_32be) + assert_equal @parsed, JSON.parse(@utf_32le) + end + + def test_generate + assert_equal @generated, JSON.generate(@parsed, :ascii_only => true) + assert_equal @generated, JSON.generate(@utf_16_data, :ascii_only => true) + end + + def test_unicode + assert_equal '""', ''.to_json + assert_equal '"\\b"', "\b".to_json + assert_equal '"\u0001"', 0x1.chr.to_json + assert_equal '"\u001f"', 0x1f.chr.to_json + assert_equal '" "', ' '.to_json + assert_equal "\"#{0x7f.chr}\"", 0x7f.chr.to_json + utf8 = [ "© ≠ €! \01" ] + json = '["© ≠ €! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => false) + assert_equal utf8, parse(json) + json = '["\u00a9 \u2260 \u20ac! \u0001"]' + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + json = "[\"\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212\"]" + assert_equal utf8, parse(json) + assert_equal json, utf8.to_json(:ascii_only => false) + utf8 = ["\343\201\202\343\201\204\343\201\206\343\201\210\343\201\212"] + assert_equal utf8, parse(json) + json = "[\"\\u3042\\u3044\\u3046\\u3048\\u304a\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + utf8 = ['საქართველო'] + json = '["საქართველო"]' + assert_equal json, utf8.to_json(:ascii_only => false) + json = "[\"\\u10e1\\u10d0\\u10e5\\u10d0\\u10e0\\u10d7\\u10d5\\u10d4\\u10da\\u10dd\"]" + assert_equal json, utf8.to_json(:ascii_only => true) + assert_equal utf8, parse(json) + assert_equal '["Ã"]', generate(["Ã"], :ascii_only => false) + assert_equal '["\\u00c3"]', generate(["Ã"], :ascii_only => true) + assert_equal ["€"], parse('["\u20ac"]') + utf8 = ["\xf0\xa0\x80\x81"] + json = "[\"\xf0\xa0\x80\x81\"]" + assert_equal json, generate(utf8, :ascii_only => false) + assert_equal utf8, parse(json) + json = '["\ud840\udc01"]' + assert_equal json, generate(utf8, :ascii_only => true) + assert_equal utf8, parse(json) + assert_raise(JSON::ParserError) { parse('"\u"') } + assert_raise(JSON::ParserError) { parse('"\ud800"') } + end + + def test_chars + (0..0x7f).each do |i| + json = '["\u%04x"]' % i + if RUBY_VERSION >= "1.9." + i = i.chr + end + assert_equal i, parse(json).first[0] + if i == ?\b + generated = generate(["" << i]) + assert '["\b"]' == generated || '["\10"]' == generated + elsif [?\n, ?\r, ?\t, ?\f].include?(i) + assert_equal '[' << ('' << i).dump << ']', generate(["" << i]) + elsif i.chr < 0x20.chr + assert_equal json, generate(["" << i]) + end + end + assert_raise(JSON::GeneratorError) do + generate(["\x80"], :ascii_only => true) + end + assert_equal "\302\200", parse('["\u0080"]').first + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_ext_parser_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_ext_parser_test.rb new file mode 100644 index 0000000..c5a030e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_ext_parser_test.rb @@ -0,0 +1,15 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONExtParserTest < Test::Unit::TestCase + if defined?(JSON::Ext::Parser) + def test_allocate + parser = JSON::Ext::Parser.new("{}") + assert_raise(TypeError, '[ruby-core:35079]') do + parser.__send__(:initialize, "{}") + end + parser = JSON::Ext::Parser.allocate + assert_raise(TypeError, '[ruby-core:35079]') { parser.source } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_fixtures_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_fixtures_test.rb new file mode 100644 index 0000000..b2a1ec8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_fixtures_test.rb @@ -0,0 +1,37 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONFixturesTest < Test::Unit::TestCase + def setup + fixtures = File.join(File.dirname(__FILE__), 'fixtures/{fail,pass}*.json') + passed, failed = Dir[fixtures].partition { |f| f['pass'] } + @passed = passed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + @failed = failed.inject([]) { |a, f| a << [ f, File.read(f) ] }.sort + end + + def test_passing + for name, source in @passed + begin + assert JSON.parse(source), + "Did not pass for fixture '#{name}': #{source.inspect}" + rescue => e + warn "\nCaught #{e.class}(#{e}) for fixture '#{name}': #{source.inspect}\n#{e.backtrace * "\n"}" + raise e + end + end + end + + def test_failing + for name, source in @failed + assert_raise(JSON::ParserError, JSON::NestingError, + "Did not fail for fixture '#{name}': #{source.inspect}") do + JSON.parse(source) + end + end + end + + def test_sanity + assert(@passed.size > 5) + assert(@failed.size > 20) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generator_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generator_test.rb new file mode 100644 index 0000000..ee19fa5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generator_test.rb @@ -0,0 +1,421 @@ +#!/usr/bin/env ruby +# encoding: utf-8 +# frozen_string_literal: false + +require 'test_helper' + +class JSONGeneratorTest < Test::Unit::TestCase + include JSON + + def setup + @hash = { + 'a' => 2, + 'b' => 3.141, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "\"\0\037", + 'h' => 1000.0, + 'i' => 0.001 + } + @json2 = '{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},' + + '"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}' + @json3 = <<'EOT'.chomp +{ + "a": 2, + "b": 3.141, + "c": "c", + "d": [ + 1, + "b", + 3.14 + ], + "e": { + "foo": "bar" + }, + "g": "\"\u0000\u001f", + "h": 1000.0, + "i": 0.001 +} +EOT + end + + def silence + v = $VERBOSE + $VERBOSE = nil + yield + ensure + $VERBOSE = v + end + + def test_remove_const_segv + return if RUBY_ENGINE == 'jruby' + stress = GC.stress + const = JSON::SAFE_STATE_PROTOTYPE.dup + + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, nil + end + + 10.times do |i| + assert_raise TypeError do + bignum_too_long_to_embed_as_string.to_json + end + end + ensure + GC.stress = stress + silence do + JSON.const_set :SAFE_STATE_PROTOTYPE, const + end + end if JSON.const_defined?("Ext") + + def test_generate + json = generate(@hash) + assert_equal(parse(@json2), parse(json)) + json = JSON[@hash] + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666) + end + + def test_generate_pretty + json = pretty_generate(@hash) + # hashes aren't (insertion) ordered on every ruby implementation + # assert_equal(@json3, json) + assert_equal(parse(@json3), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = pretty_generate({1=>2}) + assert_equal(<<'EOT'.chomp, json) +{ + "1": 2 +} +EOT + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', pretty_generate(666) + end + + def test_generate_custom + state = State.new(:space_before => " ", :space => " ", :indent => "", :object_nl => "\n", :array_nl => "") + json = generate({1=>{2=>3,4=>[5,6]}}, state) + assert_equal(<<'EOT'.chomp, json) +{ +"1" : { +"2" : 3, +"4" : [5,6] +} +} +EOT + end + + def test_fast_generate + json = fast_generate(@hash) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = fast_generate({1=>2}) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', fast_generate(666) + end + + def test_own_state + state = State.new + json = generate(@hash, state) + assert_equal(parse(@json2), parse(json)) + parsed_json = parse(json) + assert_equal(@hash, parsed_json) + json = generate({1=>2}, state) + assert_equal('{"1":2}', json) + parsed_json = parse(json) + assert_equal({"1"=>2}, parsed_json) + assert_equal '666', generate(666, state) + end + + def test_states + json = generate({1=>2}, nil) + assert_equal('{"1":2}', json) + s = JSON.state.new + assert s.check_circular? + assert s[:check_circular?] + h = { 1=>2 } + h[3] = h + assert_raise(JSON::NestingError) { generate(h) } + assert_raise(JSON::NestingError) { generate(h, s) } + s = JSON.state.new + a = [ 1, 2 ] + a << a + assert_raise(JSON::NestingError) { generate(a, s) } + assert s.check_circular? + assert s[:check_circular?] + end + + def test_pretty_state + state = PRETTY_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "\n", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => " ", + :max_nesting => 100, + :object_nl => "\n", + :space => " ", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_safe_state + state = SAFE_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 100, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_fast_state + state = FAST_STATE_PROTOTYPE.dup + assert_equal({ + :allow_nan => false, + :array_nl => "", + :ascii_only => false, + :buffer_initial_length => 1024, + :depth => 0, + :indent => "", + :max_nesting => 0, + :object_nl => "", + :space => "", + :space_before => "", + }.sort_by { |n,| n.to_s }, state.to_h.sort_by { |n,| n.to_s }) + end + + def test_allow_nan + assert_raise(GeneratorError) { generate([JSON::NaN]) } + assert_equal '[NaN]', generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::NaN]) } + assert_raise(GeneratorError) { pretty_generate([JSON::NaN]) } + assert_equal "[\n NaN\n]", pretty_generate([JSON::NaN], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::Infinity]) } + assert_equal '[Infinity]', generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::Infinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::Infinity]) } + assert_equal "[\n Infinity\n]", pretty_generate([JSON::Infinity], :allow_nan => true) + assert_raise(GeneratorError) { generate([JSON::MinusInfinity]) } + assert_equal '[-Infinity]', generate([JSON::MinusInfinity], :allow_nan => true) + assert_raise(GeneratorError) { fast_generate([JSON::MinusInfinity]) } + assert_raise(GeneratorError) { pretty_generate([JSON::MinusInfinity]) } + assert_equal "[\n -Infinity\n]", pretty_generate([JSON::MinusInfinity], :allow_nan => true) + end + + def test_depth + ary = []; ary << ary + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { generate(ary) } + assert_equal 0, JSON::SAFE_STATE_PROTOTYPE.depth + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + assert_raise(JSON::NestingError) { JSON.pretty_generate(ary) } + assert_equal 0, JSON::PRETTY_STATE_PROTOTYPE.depth + s = JSON.state.new + assert_equal 0, s.depth + assert_raise(JSON::NestingError) { ary.to_json(s) } + assert_equal 100, s.depth + end + + def test_buffer_initial_length + s = JSON.state.new + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 0 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = -1 + assert_equal 1024, s.buffer_initial_length + s.buffer_initial_length = 128 + assert_equal 128, s.buffer_initial_length + end + + def test_gc + if respond_to?(:assert_in_out_err) + assert_in_out_err(%w[-rjson --disable-gems], <<-EOS, [], []) + bignum_too_long_to_embed_as_string = 1234567890123456789012345 + expect = bignum_too_long_to_embed_as_string.to_s + GC.stress = true + + 10.times do |i| + tmp = bignum_too_long_to_embed_as_string.to_json + raise "'\#{expect}' is expected, but '\#{tmp}'" unless tmp == expect + end + EOS + end + end if GC.respond_to?(:stress=) + + def test_configure_using_configure_and_merge + numbered_state = { + :indent => "1", + :space => '2', + :space_before => '3', + :object_nl => '4', + :array_nl => '5' + } + state1 = JSON.state.new + state1.merge(numbered_state) + assert_equal '1', state1.indent + assert_equal '2', state1.space + assert_equal '3', state1.space_before + assert_equal '4', state1.object_nl + assert_equal '5', state1.array_nl + state2 = JSON.state.new + state2.configure(numbered_state) + assert_equal '1', state2.indent + assert_equal '2', state2.space + assert_equal '3', state2.space_before + assert_equal '4', state2.object_nl + assert_equal '5', state2.array_nl + end + + def test_configure_hash_conversion + state = JSON.state.new + state.configure(:indent => '1') + assert_equal '1', state.indent + state = JSON.state.new + foo = 'foo' + assert_raise(TypeError) do + state.configure(foo) + end + def foo.to_h + { :indent => '2' } + end + state.configure(foo) + assert_equal '2', state.indent + end + + if defined?(JSON::Ext::Generator) + def test_broken_bignum # [ruby-core:38867] + pid = fork do + x = 1 << 64 + x.class.class_eval do + def to_s + end + end + begin + JSON::Ext::Generator::State.new.generate(x) + exit 1 + rescue TypeError + exit 0 + end + end + _, status = Process.waitpid2(pid) + assert status.success? + rescue NotImplementedError + # forking to avoid modifying core class of a parent process and + # introducing race conditions of tests are run in parallel + end + end + + def test_hash_likeness_set_symbol + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil.class, state[:foo].class + assert_equal nil, state['foo'] + state[:foo] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_hash_likeness_set_string + state = JSON.state.new + assert_equal nil, state[:foo] + assert_equal nil, state['foo'] + state['foo'] = :bar + assert_equal :bar, state[:foo] + assert_equal :bar, state['foo'] + state_hash = state.to_hash + assert_kind_of Hash, state_hash + assert_equal :bar, state_hash[:foo] + end + + def test_json_generate + assert_raise JSON::GeneratorError do + assert_equal true, generate(["\xea"]) + end + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { generate too_deep_ary } + assert_raise(JSON::NestingError) { generate too_deep_ary, :max_nesting => 100 } + ok = generate too_deep_ary, :max_nesting => 101 + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => nil + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => false + assert_equal too_deep, ok + ok = generate too_deep_ary, :max_nesting => 0 + assert_equal too_deep, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal json, generate(data) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal json, generate(data) + # + data = [ '/' ] + json = '["/"]' + assert_equal json, generate(data) + # + data = ['"'] + json = '["\""]' + assert_equal json, generate(data) + # + data = ["'"] + json = '["\\\'"]' + assert_equal '["\'"]', generate(data) + end + + def test_string_subclass + s = Class.new(String) do + def to_s; self; end + undef to_json + end + assert_nothing_raised(SystemStackError) do + assert_equal '["foo"]', JSON.generate([s.new('foo')]) + end + end + + if defined?(Encoding) + def test_nonutf8_encoding + assert_equal("\"5\u{b0}\"", "5\xb0".force_encoding("iso-8859-1").to_json) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generic_object_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generic_object_test.rb new file mode 100644 index 0000000..82742dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_generic_object_test.rb @@ -0,0 +1,82 @@ +#frozen_string_literal: false +require 'test_helper' + +class JSONGenericObjectTest < Test::Unit::TestCase + include JSON + + def setup + @go = GenericObject[ :a => 1, :b => 2 ] + end + + def test_attributes + assert_equal 1, @go.a + assert_equal 1, @go[:a] + assert_equal 2, @go.b + assert_equal 2, @go[:b] + assert_nil @go.c + assert_nil @go[:c] + end + + def test_generate_json + switch_json_creatable do + assert_equal @go, JSON(JSON(@go), :create_additions => true) + end + end + + def test_parse_json + assert_kind_of Hash, + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + switch_json_creatable do + assert_equal @go, l = + JSON( + '{ "json_class": "JSON::GenericObject", "a": 1, "b": 2 }', + :create_additions => true + ) + assert_equal 1, l.a + assert_equal @go, + l = JSON('{ "a": 1, "b": 2 }', :object_class => GenericObject) + assert_equal 1, l.a + assert_equal GenericObject[:a => GenericObject[:b => 2]], + l = JSON('{ "a": { "b": 2 } }', :object_class => GenericObject) + assert_equal 2, l.a.b + end + end + + def test_from_hash + result = GenericObject.from_hash( + :foo => { :bar => { :baz => true }, :quux => [ { :foobar => true } ] }) + assert_kind_of GenericObject, result.foo + assert_kind_of GenericObject, result.foo.bar + assert_equal true, result.foo.bar.baz + assert_kind_of GenericObject, result.foo.quux.first + assert_equal true, result.foo.quux.first.foobar + assert_equal true, GenericObject.from_hash(true) + end + + def test_json_generic_object_load + empty = JSON::GenericObject.load(nil) + assert_kind_of JSON::GenericObject, empty + simple_json = '{"json_class":"JSON::GenericObject","hello":"world"}' + simple = JSON::GenericObject.load(simple_json) + assert_kind_of JSON::GenericObject, simple + assert_equal "world", simple.hello + converting = JSON::GenericObject.load('{ "hello": "world" }') + assert_kind_of JSON::GenericObject, converting + assert_equal "world", converting.hello + + json = JSON::GenericObject.dump(JSON::GenericObject[:hello => 'world']) + assert_equal JSON(json), JSON('{"json_class":"JSON::GenericObject","hello":"world"}') + end + + private + + def switch_json_creatable + JSON::GenericObject.json_creatable = true + yield + ensure + JSON::GenericObject.json_creatable = false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_parser_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_parser_test.rb new file mode 100644 index 0000000..9946dd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_parser_test.rb @@ -0,0 +1,472 @@ +# encoding: utf-8 +# frozen_string_literal: false +require 'test_helper' +require 'stringio' +require 'tempfile' +require 'ostruct' +require 'bigdecimal' + +class JSONParserTest < Test::Unit::TestCase + include JSON + + def test_construction + parser = JSON::Parser.new('test') + assert_equal 'test', parser.source + end + + def test_argument_encoding + source = "{}".encode("UTF-16") + JSON::Parser.new(source) + assert_equal Encoding::UTF_16, source.encoding + end if defined?(Encoding::UTF_16) + + def test_error_message_encoding + bug10705 = '[ruby-core:67386] [Bug #10705]' + json = ".\"\xE2\x88\x9A\"".force_encoding(Encoding::UTF_8) + e = assert_raise(JSON::ParserError) { + JSON::Ext::Parser.new(json).parse + } + assert_equal(Encoding::UTF_8, e.message.encoding, bug10705) + assert_include(e.message, json, bug10705) + end if defined?(Encoding::UTF_8) and defined?(JSON::Ext::Parser) + + def test_parsing + parser = JSON::Parser.new('"test"') + assert_equal 'test', parser.parse + end + + def test_parser_reset + parser = Parser.new('{"a":"b"}') + assert_equal({ 'a' => 'b' }, parser.parse) + assert_equal({ 'a' => 'b' }, parser.parse) + end + + def test_parse_values + assert_equal(nil, parse('null')) + assert_equal(false, parse('false')) + assert_equal(true, parse('true')) + assert_equal(-23, parse('-23')) + assert_equal(23, parse('23')) + assert_in_delta(0.23, parse('0.23'), 1e-2) + assert_in_delta(0.0, parse('0e0'), 1e-2) + assert_equal("", parse('""')) + assert_equal("foobar", parse('"foobar"')) + end + + def test_parse_simple_arrays + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([ nil ], parse('[null]')) + assert_equal([ false ], parse('[false]')) + assert_equal([ true ], parse('[true]')) + assert_equal([ -23 ], parse('[-23]')) + assert_equal([ 23 ], parse('[23]')) + assert_equal_float([ 0.23 ], parse('[0.23]')) + assert_equal_float([ 0.0 ], parse('[0e0]')) + assert_equal([""], parse('[""]')) + assert_equal(["foobar"], parse('["foobar"]')) + assert_equal([{}], parse('[{}]')) + end + + def test_parse_simple_objects + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({ "a" => nil }, parse('{ "a" : null}')) + assert_equal({ "a" => nil }, parse('{"a":null}')) + assert_equal({ "a" => false }, parse('{ "a" : false } ')) + assert_equal({ "a" => false }, parse('{"a":false}')) + assert_raise(JSON::ParserError) { parse('{false}') } + assert_equal({ "a" => true }, parse('{"a":true}')) + assert_equal({ "a" => true }, parse(' { "a" : true } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => -23 }, parse(' { "a" : -23 } ')) + assert_equal({ "a" => 23 }, parse('{"a":23 } ')) + assert_equal({ "a" => 23 }, parse(' { "a" : 23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + assert_equal({ "a" => 0.23 }, parse(' { "a" : 0.23 } ')) + end + + def test_parse_numbers + assert_raise(JSON::ParserError) { parse('+23.2') } + assert_raise(JSON::ParserError) { parse('+23') } + assert_raise(JSON::ParserError) { parse('.23') } + assert_raise(JSON::ParserError) { parse('023') } + assert_equal(23, parse('23')) + assert_equal(-23, parse('-23')) + assert_equal_float(3.141, parse('3.141')) + assert_equal_float(-3.141, parse('-3.141')) + assert_equal_float(3.141, parse('3141e-3')) + assert_equal_float(3.141, parse('3141.1e-3')) + assert_equal_float(3.141, parse('3141E-3')) + assert_equal_float(3.141, parse('3141.0E-3')) + assert_equal_float(-3.141, parse('-3141.0e-3')) + assert_equal_float(-3.141, parse('-3141e-3')) + assert_raise(ParserError) { parse('NaN') } + assert parse('NaN', :allow_nan => true).nan? + assert_raise(ParserError) { parse('Infinity') } + assert_equal(1.0/0, parse('Infinity', :allow_nan => true)) + assert_raise(ParserError) { parse('-Infinity') } + assert_equal(-1.0/0, parse('-Infinity', :allow_nan => true)) + end + + def test_parse_bigdecimals + assert_equal(BigDecimal, JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"].class) + assert_equal(BigDecimal("0.901234567890123456789E1"),JSON.parse('{"foo": 9.01234567890123456789}', decimal_class: BigDecimal)["foo"] ) + end + + if Array.method_defined?(:permutation) + def test_parse_more_complex_arrays + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + json = pretty_generate(perm) + assert_equal perm, parse(json) + end + end + + def test_parse_complex_objects + a = [ nil, false, true, "foßbar", [ "n€st€d", true ], { "nested" => true, "n€ßt€ð2" => {} }] + a.permutation.each do |perm| + s = "a" + orig_obj = perm.inject({}) { |h, x| h[s.dup] = x; s = s.succ; h } + json = pretty_generate(orig_obj) + assert_equal orig_obj, parse(json) + end + end + end + + def test_parse_arrays + assert_equal([1,2,3], parse('[1,2,3]')) + assert_equal([1.2,2,3], parse('[1.2,2,3]')) + assert_equal([[],[[],[]]], parse('[[],[[],[]]]')) + assert_equal([], parse('[]')) + assert_equal([], parse(' [ ] ')) + assert_equal([1], parse('[1]')) + assert_equal([1], parse(' [ 1 ] ')) + ary = [[1], ["foo"], [3.14], [4711.0], [2.718], [nil], + [[1, -2, 3]], [false], [true]] + assert_equal(ary, + parse('[[1],["foo"],[3.14],[47.11e+2],[2718.0E-3],[null],[[1,-2,3]],[false],[true]]')) + assert_equal(ary, parse(%Q{ [ [1] , ["foo"] , [3.14] \t , [47.11e+2]\s + , [2718.0E-3 ],\r[ null] , [[1, -2, 3 ]], [false ],[ true]\n ] })) + end + + def test_parse_json_primitive_values + assert_raise(JSON::ParserError) { parse('') } + assert_raise(TypeError) { parse(nil) } + assert_raise(JSON::ParserError) { parse(' /* foo */ ') } + assert_equal nil, parse('null') + assert_equal false, parse('false') + assert_equal true, parse('true') + assert_equal 23, parse('23') + assert_equal 1, parse('1') + assert_equal_float 3.141, parse('3.141'), 1E-3 + assert_equal 2 ** 64, parse('18446744073709551616') + assert_equal 'foo', parse('"foo"') + assert parse('NaN', :allow_nan => true).nan? + assert parse('Infinity', :allow_nan => true).infinite? + assert parse('-Infinity', :allow_nan => true).infinite? + assert_raise(JSON::ParserError) { parse('[ 1, ]') } + end + + def test_parse_some_strings + assert_equal([""], parse('[""]')) + assert_equal(["\\"], parse('["\\\\"]')) + assert_equal(['"'], parse('["\""]')) + assert_equal(['\\"\\'], parse('["\\\\\\"\\\\"]')) + assert_equal( + ["\"\b\n\r\t\0\037"], + parse('["\"\b\n\r\t\u0000\u001f"]') + ) + end + + def test_parse_big_integers + json1 = JSON(orig = (1 << 31) - 1) + assert_equal orig, parse(json1) + json2 = JSON(orig = 1 << 31) + assert_equal orig, parse(json2) + json3 = JSON(orig = (1 << 62) - 1) + assert_equal orig, parse(json3) + json4 = JSON(orig = 1 << 62) + assert_equal orig, parse(json4) + json5 = JSON(orig = 1 << 64) + assert_equal orig, parse(json5) + end + + def test_some_wrong_inputs + assert_raise(ParserError) { parse('[] bla') } + assert_raise(ParserError) { parse('[] 1') } + assert_raise(ParserError) { parse('[] []') } + assert_raise(ParserError) { parse('[] {}') } + assert_raise(ParserError) { parse('{} []') } + assert_raise(ParserError) { parse('{} {}') } + assert_raise(ParserError) { parse('[NULL]') } + assert_raise(ParserError) { parse('[FALSE]') } + assert_raise(ParserError) { parse('[TRUE]') } + assert_raise(ParserError) { parse('[07] ') } + assert_raise(ParserError) { parse('[0a]') } + assert_raise(ParserError) { parse('[1.]') } + assert_raise(ParserError) { parse(' ') } + end + + def test_symbolize_names + assert_equal({ "foo" => "bar", "baz" => "quux" }, + parse('{"foo":"bar", "baz":"quux"}')) + assert_equal({ :foo => "bar", :baz => "quux" }, + parse('{"foo":"bar", "baz":"quux"}', :symbolize_names => true)) + assert_raise(ArgumentError) do + parse('{}', :symbolize_names => true, :create_additions => true) + end + end + + def test_parse_comments + json = < "value1", "key2" => "value2", "key3" => "value3" }, + parse(json)) + json = < "value1" }, parse(json)) + end + + def test_nesting + too_deep = '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]' + too_deep_ary = eval too_deep + assert_raise(JSON::NestingError) { parse too_deep } + assert_raise(JSON::NestingError) { parse too_deep, :max_nesting => 100 } + ok = parse too_deep, :max_nesting => 101 + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => nil + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => false + assert_equal too_deep_ary, ok + ok = parse too_deep, :max_nesting => 0 + assert_equal too_deep_ary, ok + end + + def test_backslash + data = [ '\\.(?i:gif|jpe?g|png)$' ] + json = '["\\\\.(?i:gif|jpe?g|png)$"]' + assert_equal data, parse(json) + # + data = [ '\\"' ] + json = '["\\\\\""]' + assert_equal data, parse(json) + # + json = '["/"]' + data = [ '/' ] + assert_equal data, parse(json) + # + json = '["\""]' + data = ['"'] + assert_equal data, parse(json) + # + json = '["\\\'"]' + data = ["'"] + assert_equal data, parse(json) + end + + class SubArray < Array + def <<(v) + @shifted = true + super + end + + def shifted? + @shifted + end + end + + class SubArray2 < Array + def to_json(*a) + { + JSON.create_id => self.class.name, + 'ary' => to_a, + }.to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + o['ary'] + end + end + + class SubArrayWrapper + def initialize + @data = [] + end + + attr_reader :data + + def [](index) + @data[index] + end + + def <<(value) + @data << value + @shifted = true + end + + def shifted? + @shifted + end + end + + def test_parse_array_custom_array_derived_class + res = parse('[1,2]', :array_class => SubArray) + assert_equal([1,2], res) + assert_equal(SubArray, res.class) + assert res.shifted? + end + + def test_parse_array_custom_non_array_derived_class + res = parse('[1,2]', :array_class => SubArrayWrapper) + assert_equal([1,2], res.data) + assert_equal(SubArrayWrapper, res.class) + assert res.shifted? + end + + def test_parse_object + assert_equal({}, parse('{}')) + assert_equal({}, parse(' { } ')) + assert_equal({'foo'=>'bar'}, parse('{"foo":"bar"}')) + assert_equal({'foo'=>'bar'}, parse(' { "foo" : "bar" } ')) + end + + class SubHash < Hash + def []=(k, v) + @item_set = true + super + end + + def item_set? + @item_set + end + end + + class SubHash2 < Hash + def to_json(*a) + { + JSON.create_id => self.class.name, + }.merge(self).to_json(*a) + end + + def self.json_create(o) + o.delete JSON.create_id + self[o] + end + end + + class SubOpenStruct < OpenStruct + def [](k) + __send__(k) + end + + def []=(k, v) + @item_set = true + __send__("#{k}=", v) + end + + def item_set? + @item_set + end + end + + def test_parse_object_custom_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubHash) + assert_equal({"foo" => "bar"}, res) + assert_equal(SubHash, res.class) + assert res.item_set? + end + + def test_parse_object_custom_non_hash_derived_class + res = parse('{"foo":"bar"}', :object_class => SubOpenStruct) + assert_equal "bar", res.foo + assert_equal(SubOpenStruct, res.class) + assert res.item_set? + end + + def test_parse_generic_object + res = parse( + '{"foo":"bar", "baz":{}}', + :object_class => JSON::GenericObject + ) + assert_equal(JSON::GenericObject, res.class) + assert_equal "bar", res.foo + assert_equal "bar", res["foo"] + assert_equal "bar", res[:foo] + assert_equal "bar", res.to_hash[:foo] + assert_equal(JSON::GenericObject, res.baz.class) + end + + def test_generate_core_subclasses_with_new_to_json + obj = SubHash2["foo" => SubHash2["bar" => true]] + obj_json = JSON(obj) + obj_again = parse(obj_json, :create_additions => true) + assert_kind_of SubHash2, obj_again + assert_kind_of SubHash2, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + assert_equal ["foo"], + JSON(JSON(SubArray2["foo"]), :create_additions => true) + end + + def test_generate_core_subclasses_with_default_to_json + assert_equal '{"foo":"bar"}', JSON(SubHash["foo" => "bar"]) + assert_equal '["foo"]', JSON(SubArray["foo"]) + end + + def test_generate_of_core_subclasses + obj = SubHash["foo" => SubHash["bar" => true]] + obj_json = JSON(obj) + obj_again = JSON(obj_json) + assert_kind_of Hash, obj_again + assert_kind_of Hash, obj_again['foo'] + assert obj_again['foo']['bar'] + assert_equal obj, obj_again + end + + def test_parsing_frozen_ascii8bit_string + assert_equal( + { 'foo' => 'bar' }, + JSON('{ "foo": "bar" }'.force_encoding(Encoding::ASCII_8BIT).freeze) + ) + end + + private + + def assert_equal_float(expected, actual, delta = 1e-2) + Array === expected and expected = expected.first + Array === actual and actual = actual.first + assert_in_delta(expected, actual, delta) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_string_matching_test.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_string_matching_test.rb new file mode 100644 index 0000000..5d55dc3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/json_string_matching_test.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +require 'test_helper' +require 'time' + +class JSONStringMatchingTest < Test::Unit::TestCase + include JSON + + class TestTime < ::Time + def self.json_create(string) + Time.parse(string) + end + + def to_json(*) + %{"#{strftime('%FT%T%z')}"} + end + + def ==(other) + to_i == other.to_i + end + end + + def test_match_date + t = TestTime.new + t_json = [ t ].to_json + time_regexp = /\A\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4}\z/ + assert_equal [ t ], + parse( + t_json, + :create_additions => true, + :match_string => { time_regexp => TestTime } + ) + assert_equal [ t.strftime('%FT%T%z') ], + parse( + t_json, + :match_string => { time_regexp => TestTime } + ) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/test_helper.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/test_helper.rb new file mode 100644 index 0000000..c5ec0fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tests/test_helper.rb @@ -0,0 +1,17 @@ +case ENV['JSON'] +when 'pure' + $:.unshift 'lib' + require 'json/pure' +when 'ext' + $:.unshift 'ext', 'lib' + require 'json/ext' +else + $:.unshift 'ext', 'lib' + require 'json' +end + +require 'test/unit' +begin + require 'byebug' +rescue LoadError +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/diff.sh b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/diff.sh new file mode 100755 index 0000000..89385b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/diff.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +files=`find ext -name '*.[ch]' -o -name parser.rl` + +for f in $files +do + b=`basename $f` + g=`find ../ruby/ext/json -name $b` + d=`diff -u $f $g` + test -z "$d" && continue + echo "$d" + read -p "Edit diff of $b? " a + case $a in + [yY]*) + vimdiff $f $g + ;; + esac +done diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/fuzz.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/fuzz.rb new file mode 100755 index 0000000..8261930 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/fuzz.rb @@ -0,0 +1,131 @@ +require 'json' + +class Fuzzer + def initialize(n, freqs = {}) + sum = freqs.inject(0.0) { |s, x| s + x.last } + freqs.each_key { |x| freqs[x] /= sum } + s = 0.0 + freqs.each_key do |x| + freqs[x] = s .. (s + t = freqs[x]) + s += t + end + @freqs = freqs + @n = n + @alpha = (0..0xff).to_a + end + + def random_string + s = '' + 30.times { s << @alpha[rand(@alpha.size)] } + s + end + + def pick + r = rand + found = @freqs.find { |k, f| f.include? rand } + found && found.first + end + + def make_pick + k = pick + case + when k == Hash, k == Array + k.new + when k == true, k == false, k == nil + k + when k == String + random_string + when k == Fixnum + rand(2 ** 30) - 2 ** 29 + when k == Bignum + rand(2 ** 70) - 2 ** 69 + end + end + + def fuzz(current = nil) + if @n > 0 + case current + when nil + @n -= 1 + current = fuzz [ Hash, Array ][rand(2)].new + when Array + while @n > 0 + @n -= 1 + current << case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + when Hash + while @n > 0 + @n -= 1 + current[random_string] = case p = make_pick + when Array, Hash + fuzz(p) + else + p + end + end + end + end + current + end +end + +class MyState < JSON.state + WS = " \r\t\n" + + def initialize + super( + :indent => make_spaces, + :space => make_spaces, + :space_before => make_spaces, + :object_nl => make_spaces, + :array_nl => make_spaces, + :max_nesting => false + ) + end + + def make_spaces + s = '' + rand(1).times { s << WS[rand(WS.size)] } + s + end +end + +n = (ARGV.shift || 500).to_i +loop do + fuzzer = Fuzzer.new(n, + Hash => 25, + Array => 25, + String => 10, + Fixnum => 10, + Bignum => 10, + nil => 5, + true => 5, + false => 5 + ) + o1 = fuzzer.fuzz + json = JSON.generate o1, MyState.new + if $DEBUG + puts "-" * 80 + puts json, json.size + else + puts json.size + end + begin + o2 = JSON.parse(json, :max_nesting => false) + rescue JSON::ParserError => e + puts "Caught #{e.class}: #{e.message}\n#{e.backtrace * "\n"}" + puts "o1 = #{o1.inspect}", "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + exit + end + if o1 != o2 + puts "mismatch", "o1 = #{o1.inspect}", "o2 = #{o2.inspect}", + "json = #{json}", "json_str = #{json.inspect}" + puts "locals = #{local_variables.inspect}" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/server.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/server.rb new file mode 100755 index 0000000..184a01c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.3.1/tools/server.rb @@ -0,0 +1,62 @@ +#!/usr/bin/env ruby +# encoding: utf-8 + +require 'webrick' +include WEBrick +$:.unshift 'ext' +$:.unshift 'lib' +require 'json' + +class JSONServlet < HTTPServlet::AbstractServlet + @@count = 1 + + def do_GET(req, res) + obj = { + "TIME" => Time.now.strftime("%FT%T"), + "foo" => "Bär", + "bar" => "© ≠ €!", + 'a' => 2, + 'b' => 3.141, + 'COUNT' => @@count += 1, + 'c' => 'c', + 'd' => [ 1, "b", 3.14 ], + 'e' => { 'foo' => 'bar' }, + 'g' => "松本行弘", + 'h' => 1000.0, + 'i' => 0.001, + 'j' => "\xf0\xa0\x80\x81", + } + res.body = JSON.generate obj + res['Content-Type'] = "application/json" + end +end + +def create_server(err, dir, port) + dir = File.expand_path(dir) + err.puts "Surf to:", "http://#{Socket.gethostname}:#{port}" + + s = HTTPServer.new( + :Port => port, + :DocumentRoot => dir, + :Logger => WEBrick::Log.new(err), + :AccessLog => [ + [ err, WEBrick::AccessLog::COMMON_LOG_FORMAT ], + [ err, WEBrick::AccessLog::REFERER_LOG_FORMAT ], + [ err, WEBrick::AccessLog::AGENT_LOG_FORMAT ] + ] + ) + s.mount("/json", JSONServlet) + s +end + +default_dir = File.expand_path(File.join(File.dirname(__FILE__), '..', 'data')) +dir = ARGV.shift || default_dir +port = (ARGV.shift || 6666).to_i +s = create_server(STDERR, dir, 6666) +t = Thread.new { s.start } +trap(:INT) do + s.shutdown + t.join + exit +end +sleep diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/CHANGES.md b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/CHANGES.md new file mode 100644 index 0000000..ba66514 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/CHANGES.md @@ -0,0 +1,457 @@ +# Changes + +### 2021-10-24 (2.6.1) + +* Restore version.rb with 2.6.1 + +### 2021-10-14 (2.6.0) + +* Use `rb_enc_interned_str` if available to reduce allocations in `freeze: true` mode. #451. +* Bump required_ruby_version to 2.3. +* Fix compatibility with `GC.compact`. +* Fix some compilation warnings. #469 + +## 2020-12-22 (2.5.1) + +* Restore the compatibility for constants of JSON class. + +## 2020-12-22 (2.5.0) + +* Ready to Ractor-safe at Ruby 3.0. + +## 2020-12-17 (2.4.1) + +* Restore version.rb with 2.4.1 + +## 2020-12-15 (2.4.0) + +* Implement a freeze: parser option #447 +* Fix an issue with generate_pretty and empty objects in the Ruby and Java implementations #449 +* Fix JSON.load_file doc #448 +* Fix pure parser with unclosed arrays / objects #425 +* bundle the LICENSE file in the gem #444 +* Add an option to escape forward slash character #405 +* RDoc for JSON #439 #446 #442 #434 #433 #430 + +## 2020-06-30 (2.3.1) + +* Spelling and grammar fixes for comments. Pull request #191 by Josh + Kline. +* Enhance generic JSON and #generate docs. Pull request #347 by Victor + Shepelev. +* Add :nodoc: for GeneratorMethods. Pull request #349 by Victor Shepelev. +* Baseline changes to help (JRuby) development. Pull request #371 by Karol + Bucek. +* Add metadata for rubygems.org. Pull request #379 by Alexandre ZANNI. +* Remove invalid JSON.generate description from JSON module rdoc. Pull + request #384 by Jeremy Evans. +* Test with TruffleRuby in CI. Pull request #402 by Benoit Daloze. +* Rdoc enhancements. Pull request #413 by Burdette Lamar. +* Fixtures/ are not being tested... Pull request #416 by Marc-André + Lafortune. +* Use frozen string for hash key. Pull request #420 by Marc-André + Lafortune. +* Added :call-seq: to RDoc for some methods. Pull request #422 by Burdette + Lamar. +* Small typo fix. Pull request #423 by Marc-André Lafortune. + +## 2019-12-11 (2.3.0) + * Fix default of `create_additions` to always be `false` for `JSON(user_input)` + and `JSON.parse(user_input, nil)`. + Note that `JSON.load` remains with default `true` and is meant for internal + serialization of trusted data. [CVE-2020-10663] + * Fix passing args all #to_json in json/add/*. + * Fix encoding issues + * Fix issues of keyword vs positional parameter + * Fix JSON::Parser against bigdecimal updates + * Bug fixes to JRuby port + +## 2019-02-21 (2.2.0) + * Adds support for 2.6 BigDecimal and ruby standard library Set datetype. + +## 2017-04-18 (2.1.0) + * Allow passing of `decimal_class` option to specify a class as which to parse + JSON float numbers. +## 2017-03-23 (2.0.4) + * Raise exception for incomplete unicode surrogates/character escape + sequences. This problem was reported by Daniel Gollahon (dgollahon). + * Fix arbitrary heap exposure problem. This problem was reported by Ahmad + Sherif (ahmadsherif). + +## 2017-01-12 (2.0.3) + * Set `required_ruby_version` to 1.9 + * Some small fixes + +## 2016-07-26 (2.0.2) + * Specify `required_ruby_version` for json\_pure. + * Fix issue #295 failure when parsing frozen strings. + +## 2016-07-01 (2.0.1) + * Fix problem when requiring json\_pure and Parser constant was defined top + level. + * Add `RB_GC_GUARD` to avoid possible GC problem via Pete Johns. + * Store `current_nesting` on stack by Aaron Patterson. + +## 2015-09-11 (2.0.0) + * Now complies to newest JSON RFC 7159. + * Implements compatibility to ruby 2.4 integer unification. + * Drops support for old rubies whose life has ended, that is rubies < 2.0. + Also see https://www.ruby-lang.org/en/news/2014/07/01/eol-for-1-8-7-and-1-9-2/ + * There were still some mentions of dual GPL licensing in the source, but JSON + has just the Ruby license that itself includes an explicit dual-licensing + clause that allows covered software to be distributed under the terms of + the Simplified BSD License instead for all ruby versions >= 1.9.3. This is + however a GPL compatible license according to the Free Software Foundation. + I changed these mentions to be consistent with the Ruby license setting in + the gemspec files which were already correct now. + +## 2015-06-01 (1.8.3) + * Fix potential memory leak, thx to nobu. + +## 2015-01-08 (1.8.2) + * Some performance improvements by Vipul A M . + * Fix by Jason R. Clark to avoid mutation of + `JSON.dump_default_options`. + * More tests by Michael Mac-Vicar and fixing + `space_before` accessor in generator. + * Performance on Jruby improved by Ben Browning . + * Some fixes to be compatible with the new Ruby 2.2 by Zachary Scott + and SHIBATA Hiroshi . + +## 2013-05-13 (1.8.1) + * Remove Rubinius exception since transcoding should be working now. + +## 2013-05-13 (1.8.0) + * Fix https://github.com/flori/json/issues/162 reported by Marc-Andre + Lafortune . Thanks! + * Applied patches by Yui NARUSE to suppress warning with + -Wchar-subscripts and better validate UTF-8 strings. + * Applied patch by ginriki@github to remove unnecessary if. + * Add load/dump interface to `JSON::GenericObject` to make + serialize :some_attribute, `JSON::GenericObject` + work in Rails active models for convenient `SomeModel#some_attribute.foo.bar` + access to serialised JSON data. + +## 2013-02-04 (1.7.7) + * Security fix for JSON create_additions default value and + `JSON::GenericObject`. It should not be possible to create additions unless + explicitly requested by setting the create_additions argument to true or + using the JSON.load/dump interface. If `JSON::GenericObject` is supposed to + be automatically deserialised, this has to be explicitly enabled by + setting + JSON::GenericObject.json_creatable = true + as well. + * Remove useless assert in fbuffer implementation. + * Apply patch attached to https://github.com/flori/json/issues#issue/155 + provided by John Shahid , Thx! + * Add license information to rubygems spec data, reported by Jordi Massaguer Pla . + * Improve documentation, thx to Zachary Scott . + +## 2012-11-29 (1.7.6) + * Add `GeneratorState#merge` alias for JRuby, fix state accessor methods. Thx to + jvshahid@github. + * Increase hash likeness of state objects. + +## 2012-08-17 (1.7.5) + * Fix compilation of extension on older rubies. + +## 2012-07-26 (1.7.4) + * Fix compilation problem on AIX, see https://github.com/flori/json/issues/142 + +## 2012-05-12 (1.7.3) + * Work around Rubinius encoding issues using iconv for conversion instead. + +## 2012-05-11 (1.7.2) + * Fix some encoding issues, that cause problems for the pure and the + extension variant in jruby 1.9 mode. + +## 2012-04-28 (1.7.1) + * Some small fixes for building + +## 2012-04-28 (1.7.0) + * Add `JSON::GenericObject` for method access to objects transmitted via JSON. + +## 2012-04-27 (1.6.7) + * Fix possible crash when trying to parse nil value. + +## 2012-02-11 (1.6.6) + * Propagate src encoding to values made from it (fixes 1.9 mode converting + everything to ascii-8bit; harmless for 1.8 mode too) (Thomas E. Enebo + ), should fix + https://github.com/flori/json/issues#issue/119. + * Fix https://github.com/flori/json/issues#issue/124 Thx to Jason Hutchens. + * Fix https://github.com/flori/json/issues#issue/117 + +## 2012-01-15 (1.6.5) + * Vit Ondruch reported a bug that shows up when using + optimisation under GCC 4.7. Thx to him, Bohuslav Kabrda + and Yui NARUSE for debugging and + developing a patch fix. + +## 2011-12-24 (1.6.4) + * Patches that improve speed on JRuby contributed by Charles Oliver Nutter + . + * Support `object_class`/`array_class` with duck typed hash/array. + +## 2011-12-01 (1.6.3) + * Let `JSON.load('')` return nil as well to make mysql text columns (default to + `''`) work better for serialization. + +## 2011-11-21 (1.6.2) + * Add support for OpenStruct and BigDecimal. + * Fix bug when parsing nil in `quirks_mode`. + * Make JSON.dump and JSON.load methods better cooperate with Rails' serialize + method. Just use: + serialize :value, JSON + * Fix bug with time serialization concerning nanoseconds. Thanks for the + patch go to Josh Partlow (jpartlow@github). + * Improve parsing speed for JSON numbers (integers and floats) in a similar way to + what Evan Phoenix suggested in: + https://github.com/flori/json/pull/103 + +## 2011-09-18 (1.6.1) + * Using -target 1.5 to force Java bits to compile with 1.5. + +## 2011-09-12 (1.6.0) + * Extract utilities (prettifier and GUI-editor) in its own gem json-utils. + * Split json/add/core into different files for classes to be serialised. + +## 2011-08-31 (1.5.4) + * Fix memory leak when used from multiple JRuby. (Patch by + jfirebaugh@github). + * Apply patch by Eric Wong that fixes garbage collection problem + reported in https://github.com/flori/json/issues/46. + * Add :quirks_mode option to parser and generator. + * Add support for Rational and Complex number additions via json/add/complex + and json/add/rational requires. + +## 2011-06-20 (1.5.3) + * Alias State#configure method as State#merge to increase duck type synonymy with Hash. + * Add `as_json` methods in json/add/core, so rails can create its json objects + the new way. + +## 2011-05-11 (1.5.2) + * Apply documentation patch by Cory Monty . + * Add gemspecs for json and json\_pure. + * Fix bug in jruby pretty printing. + * Fix bug in `object_class` and `array_class` when inheriting from Hash or + Array. + +## 2011-01-24 (1.5.1) + * Made rake-compiler build a fat binary gem. This should fix issue + https://github.com/flori/json/issues#issue/54. + +## 2011-01-22 (1.5.0) + * Included Java source codes for the Jruby extension made by Daniel Luz + . + * Output full exception message of `deep_const_get` to aid debugging. + * Fixed an issue with ruby 1.9 `Module#const_defined?` method, that was + reported by Riley Goodside. + +## 2010-08-09 (1.4.6) + * Fixed oversight reported in http://github.com/flori/json/issues/closed#issue/23, + always create a new object from the state prototype. + * Made pure and ext api more similar again. + +## 2010-08-07 (1.4.5) + * Manage data structure nesting depth in state object during generation. This + should reduce problems with `to_json` method definіtions that only have one + argument. + * Some fixes in the state objects and additional tests. +## 2010-08-06 (1.4.4) + * Fixes build problem for rubinius under OS X, http://github.com/flori/json/issues/closed#issue/25 + * Fixes crashes described in http://github.com/flori/json/issues/closed#issue/21 and + http://github.com/flori/json/issues/closed#issue/23 +## 2010-05-05 (1.4.3) + * Fixed some test assertions, from Ruby r27587 and r27590, patch by nobu. + * Fixed issue http://github.com/flori/json/issues/#issue/20 reported by + electronicwhisper@github. Thx! + +## 2010-04-26 (1.4.2) + * Applied patch from naruse Yui NARUSE to make building with + Microsoft Visual C possible again. + * Applied patch from devrandom in order to allow building of + json_pure if extensiontask is not present. + * Thanks to Dustin Schneider , who reported a memory + leak, which is fixed in this release. + * Applied 993f261ccb8f911d2ae57e9db48ec7acd0187283 patch from josh@github. + +## 2010-04-25 (1.4.1) + * Fix for a bug reported by Dan DeLeo , caused by T_FIXNUM + being different on 32bit/64bit architectures. + +## 2010-04-23 (1.4.0) + * Major speed improvements and building with simplified + directory/file-structure. + * Extension should at least be compatible with MRI, YARV and Rubinius. + +## 2010-04-07 (1.2.4) + * Triger const_missing callback to make Rails' dynamic class loading work. + +## 2010-03-11 (1.2.3) + * Added a `State#[]` method which returns an attribute's value in order to + increase duck type compatibility to Hash. + +## 2010-02-27 (1.2.2) + * Made some changes to make the building of the parser/generator compatible + to Rubinius. + +## 2009-11-25 (1.2.1) + * Added `:symbolize_names` option to Parser, which returns symbols instead of + strings in object names/keys. + +## 2009-10-01 (1.2.0) + * `fast_generate` now raises an exception for nan and infinite floats. + * On Ruby 1.8 json supports parsing of UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + and UTF-32LE JSON documents now. Under Ruby 1.9 the M17n conversion + functions are used to convert from all supported encodings. ASCII-8BIT + encoded strings are handled like all strings under Ruby 1.8 were. + * Better documentation + +## 2009-08-23 (1.1.9) + * Added forgotten main doc file `extra_rdoc_files`. + +## 2009-08-23 (1.1.8) + * Applied a patch by OZAWA Sakuro to make json/pure + work in environments that don't provide iconv. + * Applied patch by okkez_ in order to fix Ruby Bug #1768: + http://redmine.ruby-lang.org/issues/show/1768. + * Finally got around to avoid the rather paranoid escaping of ?/ characters + in the generator's output. The parsers aren't affected by this change. + Thanks to Rich Apodaca for the suggestion. + +## 2009-06-29 (1.1.7) + * Security Fix for JSON::Pure::Parser. A specially designed string could + cause catastrophic backtracking in one of the parser's regular expressions + in earlier 1.1.x versions. JSON::Ext::Parser isn't affected by this issue. + Thanks to Bartosz Blimke for reporting this + problem. + * This release also uses a less strict ruby version requirement for the + creation of the mswin32 native gem. + +## 2009-05-10 (1.1.6) + * No changes. І tested native linux gems in the last release and they don't + play well with different ruby versions other than the one the gem was built + with. This release is just to bump the version number in order to skip the + native gem on rubyforge. + +## 2009-05-10 (1.1.5) + * Started to build gems with rake-compiler gem. + * Applied patch object/array class patch from Brian Candler + and fixes. + +## 2009-04-01 (1.1.4) + * Fixed a bug in the creation of serialized generic rails objects reported by + Friedrich Graeter . + * Deleted tests/runner.rb, we're using testrb instead. + * Editor supports Infinity in numbers now. + * Made some changes in order to get the library to compile/run under Ruby + 1.9. + * Improved speed of the code path for the fast_generate method in the pure + variant. + +## 2008-07-10 (1.1.3) + * Wesley Beary reported a bug in json/add/core's DateTime + handling: If the nominator and denominator of the offset were divisible by + each other Ruby's Rational#to_s returns them as an integer not a fraction + with '/'. This caused a ZeroDivisionError during parsing. + * Use Date#start and DateTime#start instead of sg method, while + remaining backwards compatible. + * Supports ragel >= 6.0 now. + * Corrected some tests. + * Some minor changes. + +## 2007-11-27 (1.1.2) + * Remember default dir (last used directory) in editor. + * JSON::Editor.edit method added, the editor can now receive json texts from + the clipboard via C-v. + * Load json texts from an URL pasted via middle button press. + * Added :create_additions option to Parser. This makes it possible to disable + the creation of additions by force, in order to treat json texts as data + while having additions loaded. + * Jacob Maine reported, that JSON(:foo) outputs a JSON + object if the rails addition is enabled, which is wrong. It now outputs a + JSON string "foo" instead, like suggested by Jacob Maine. + * Discovered a bug in the Ruby Bugs Tracker on rubyforge, that was reported + by John Evans lgastako@gmail.com. He could produce a crash in the JSON + generator by returning something other than a String instance from a + to_json method. I now guard against this by doing a rather crude type + check, which raises an exception instead of crashing. + +## 2007-07-06 (1.1.1) + * Yui NARUSE sent some patches to fix tests for Ruby + 1.9. I applied them and adapted some of them a bit to run both on 1.8 and + 1.9. + * Introduced a `JSON.parse!` method without depth checking for people who + like danger. + * Made generate and `pretty_generate` methods configurable by an options hash. + * Added :allow_nan option to parser and generator in order to handle NaN, + Infinity, and -Infinity correctly - if requested. Floats, which aren't numbers, + aren't valid JSON according to RFC4627, so by default an exception will be + raised if any of these symbols are encountered. Thanks to Andrea Censi + for his hint about this. + * Fixed some more tests for Ruby 1.9. + * Implemented dump/load interface of Marshal as suggested in ruby-core:11405 + by murphy . + * Implemented the `max_nesting` feature for generate methods, too. + * Added some implementations for ruby core's custom objects for + serialisation/deserialisation purposes. + +## 2007-05-21 (1.1.0) + * Implemented max_nesting feature for parser to avoid stack overflows for + data from untrusted sources. If you trust the source, you can disable it + with the option max_nesting => false. + * Piers Cawley reported a bug, that not every + character can be escaped by `\` as required by RFC4627. There's a + contradiction between David Crockford's JSON checker test vectors (in + tests/fixtures) and RFC4627, though. I decided to stick to the RFC, because + the JSON checker seems to be a bit older than the RFC. + * Extended license to Ruby License, which includes the GPL. + * Added keyboard shortcuts, and 'Open location' menu item to edit_json.rb. + +## 2007-05-09 (1.0.4) + * Applied a patch from Yui NARUSE to make JSON compile + under Ruby 1.9. Thank you very much for mailing it to me! + * Made binary variants of JSON fail early, instead of falling back to the + pure version. This should avoid overshadowing of eventual problems while + loading of the binary. + +## 2007-03-24 (1.0.3) + * Improved performance of pure variant a bit. + * The ext variant of this release supports the mswin32 platform. Ugh! + +## 2007-03-24 (1.0.2) + * Ext Parser didn't parse 0e0 correctly into 0.0: Fixed! + +## 2007-03-24 (1.0.1) + * Forgot some object files in the build dir. I really like that - not! + +## 2007-03-24 (1.0.0) + * Added C implementations for the JSON generator and a ragel based JSON + parser in C. + * Much more tests, especially fixtures from json.org. + * Further improved conformance to RFC4627. + +## 2007-02-09 (0.4.3) + * Conform more to RFC4627 for JSON: This means JSON strings + now always must contain exactly one object `"{ ... }"` or array `"[ ... ]"` in + order to be parsed without raising an exception. The definition of what + constitutes a whitespace is narrower in JSON than in Ruby ([ \t\r\n]), and + there are differences in floats and integers (no octals or hexadecimals) as + well. + * Added aliases generate and `pretty_generate` of unparse and `pretty_unparse`. + * Fixed a test case. + * Catch an `Iconv::InvalidEncoding` exception, that seems to occur on some Sun + boxes with SunOS 5.8, if iconv doesn't support utf16 conversions. This was + reported by Andrew R Jackson , thanks a bunch! + +## 2006-08-25 (0.4.2) + * Fixed a bug in handling solidi (/-characters), that was reported by + Kevin Gilpin . + +## 2006-02-06 (0.4.1) + * Fixed a bug related to escaping with backslashes. Thanks for the report go + to Florian Munz . + +## 2005-09-23 (0.4.0) + * Initial Rubyforge Version diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/LICENSE b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/LICENSE new file mode 100644 index 0000000..426810a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/LICENSE @@ -0,0 +1,56 @@ +Ruby is copyrighted free software by Yukihiro Matsumoto . +You can redistribute it and/or modify it under either the terms of the +2-clause BSDL (see the file BSDL), or the conditions below: + + 1. You may make and give away verbatim copies of the source form of the + software without restriction, provided that you duplicate all of the + original copyright notices and associated disclaimers. + + 2. You may modify your copy of the software in any way, provided that + you do at least ONE of the following: + + a) place your modifications in the Public Domain or otherwise + make them Freely Available, such as by posting said + modifications to Usenet or an equivalent medium, or by allowing + the author to include your modifications in the software. + + b) use the modified software only within your corporation or + organization. + + c) give non-standard binaries non-standard names, with + instructions on where to get the original software distribution. + + d) make other distribution arrangements with the author. + + 3. You may distribute the software in object code or binary form, + provided that you do at least ONE of the following: + + a) distribute the binaries and library files of the software, + together with instructions (in the manual page or equivalent) + on where to get the original distribution. + + b) accompany the distribution with the machine-readable source of + the software. + + c) give non-standard binaries non-standard names, with + instructions on where to get the original software distribution. + + d) make other distribution arrangements with the author. + + 4. You may modify and include the part of the software into any other + software (possibly commercial). But some files in the distribution + are not written by the author, so that they are not under these terms. + + For the list of those files and their copying conditions, see the + file LEGAL. + + 5. The scripts and library files supplied as input to or produced as + output from the software do not automatically fall under the + copyright of the software, but belong to whomever generated them, + and may be sold commercially, and may be aggregated with this + software. + + 6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/README.md b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/README.md new file mode 100644 index 0000000..288ccdf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/README.md @@ -0,0 +1,425 @@ +# JSON implementation for Ruby + +[![CI](https://github.com/flori/json/actions/workflows/ci.yml/badge.svg)](https://github.com/flori/json/actions/workflows/ci.yml) + +## Description + +This is a implementation of the JSON specification according to RFC 7159 +http://www.ietf.org/rfc/rfc7159.txt . Starting from version 1.0.0 on there +will be two variants available: + +* A pure ruby variant, that relies on the iconv and the stringscan + extensions, which are both part of the ruby standard library. +* The quite a bit faster native extension variant, which is in parts + implemented in C or Java and comes with its own unicode conversion + functions and a parser generated by the ragel state machine compiler + http://www.complang.org/ragel/ . + +Both variants of the JSON generator generate UTF-8 character sequences by +default. If an :ascii\_only option with a true value is given, they escape all +non-ASCII and control characters with \uXXXX escape sequences, and support +UTF-16 surrogate pairs in order to be able to generate the whole range of +unicode code points. + +All strings, that are to be encoded as JSON strings, should be UTF-8 byte +sequences on the Ruby side. To encode raw binary strings, that aren't UTF-8 +encoded, please use the to\_json\_raw\_object method of String (which produces +an object, that contains a byte array) and decode the result on the receiving +endpoint. + +## Installation + +It's recommended to use the extension variant of JSON, because it's faster than +the pure ruby variant. If you cannot build it on your system, you can settle +for the latter. + +Just type into the command line as root: + +``` +# rake install +``` + +The above command will build the extensions and install them on your system. + +``` +# rake install_pure +``` + +or + +``` +# ruby install.rb +``` + +will just install the pure ruby implementation of JSON. + +If you use Rubygems you can type + +``` +# gem install json +``` + +instead, to install the newest JSON version. + +There is also a pure ruby json only variant of the gem, that can be installed +with: + +``` +# gem install json_pure +``` + +## Compiling the extensions yourself + +If you want to create the `parser.c` file from its `parser.rl` file or draw nice +graphviz images of the state machines, you need ragel from: +http://www.complang.org/ragel/ + +## Usage + +To use JSON you can + +```ruby +require 'json' +``` + +to load the installed variant (either the extension `'json'` or the pure +variant `'json_pure'`). If you have installed the extension variant, you can +pick either the extension variant or the pure variant by typing + +```ruby +require 'json/ext' +``` + +or + +```ruby +require 'json/pure' +``` + +Now you can parse a JSON document into a ruby data structure by calling + +```ruby +JSON.parse(document) +``` + +If you want to generate a JSON document from a ruby data structure call +```ruby +JSON.generate(data) +``` + +You can also use the `pretty_generate` method (which formats the output more +verbosely and nicely) or `fast_generate` (which doesn't do any of the security +checks generate performs, e. g. nesting deepness checks). + +There are also the JSON and JSON[] methods which use parse on a String or +generate a JSON document from an array or hash: + +```ruby +document = JSON 'test' => 23 # => "{\"test\":23}" +document = JSON['test' => 23] # => "{\"test\":23}" +``` + +and + +```ruby +data = JSON '{"test":23}' # => {"test"=>23} +data = JSON['{"test":23}'] # => {"test"=>23} +``` + +You can choose to load a set of common additions to ruby core's objects if +you + +```ruby +require 'json/add/core' +``` + +After requiring this you can, e. g., serialise/deserialise Ruby ranges: + +```ruby +JSON JSON(1..10) # => 1..10 +``` + +To find out how to add JSON support to other or your own classes, read the +section "More Examples" below. + +To get the best compatibility to rails' JSON implementation, you can + +```ruby +require 'json/add/rails' +``` + +Both of the additions attempt to require `'json'` (like above) first, if it has +not been required yet. + +## Serializing exceptions + +The JSON module doesn't extend `Exception` by default. If you convert an `Exception` +object to JSON, it will by default only include the exception message. + +To include the full details, you must either load the `json/add/core` mentioned +above, or specifically load the exception addition: + +```ruby +require 'json/add/exception' +``` + +## More Examples + +To create a JSON document from a ruby data structure, you can call +`JSON.generate` like that: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,\"4..10\"]" +``` + +To get back a ruby data structure from a JSON document, you have to call +JSON.parse on it: + +```ruby +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, "4..10"] +``` + +Note, that the range from the original data structure is a simple +string now. The reason for this is, that JSON doesn't support ranges +or arbitrary classes. In this case the json library falls back to call +`Object#to_json`, which is the same as `#to_s.to_json`. + +It's possible to add JSON support serialization to arbitrary classes by +simply implementing a more specialized version of the `#to_json method`, that +should return a JSON object (a hash converted to JSON with `#to_json`) like +this (don't forget the `*a` for all the arguments): + +```ruby +class Range + def to_json(*a) + { + 'json_class' => self.class.name, # = 'Range' + 'data' => [ first, last, exclude_end? ] + }.to_json(*a) + end +end +``` + +The hash key `json_class` is the class, that will be asked to deserialise the +JSON representation later. In this case it's `Range`, but any namespace of +the form `A::B` or `::A::B` will do. All other keys are arbitrary and can be +used to store the necessary data to configure the object to be deserialised. + +If the key `json_class` is found in a JSON object, the JSON parser checks +if the given class responds to the `json_create` class method. If so, it is +called with the JSON object converted to a Ruby hash. So a range can +be deserialised by implementing `Range.json_create` like this: + +```ruby +class Range + def self.json_create(o) + new(*o['data']) + end +end +``` + +Now it possible to serialise/deserialise ranges as well: + +```ruby +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +json = JSON.generate [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +# => "[1,2,{\"a\":3.141},false,true,null,{\"json_class\":\"Range\",\"data\":[4,10,false]}]" +JSON.parse json, :create_additions => true +# => [1, 2, {"a"=>3.141}, false, true, nil, 4..10] +``` + +`JSON.generate` always creates the shortest possible string representation of a +ruby data structure in one line. This is good for data storage or network +protocols, but not so good for humans to read. Fortunately there's also +`JSON.pretty_generate` (or `JSON.pretty_generate`) that creates a more readable +output: + +```ruby + puts JSON.pretty_generate([1, 2, {"a"=>3.141}, false, true, nil, 4..10]) + [ + 1, + 2, + { + "a": 3.141 + }, + false, + true, + null, + { + "json_class": "Range", + "data": [ + 4, + 10, + false + ] + } + ] +``` + +There are also the methods `Kernel#j` for generate, and `Kernel#jj` for +`pretty_generate` output to the console, that work analogous to Core Ruby's `p` and +the `pp` library's `pp` methods. + +The script `tools/server.rb` contains a small example if you want to test, how +receiving a JSON object from a webrick server in your browser with the +JavaScript prototype library http://www.prototypejs.org works. + +## Speed Comparisons + +I have created some benchmark results (see the benchmarks/data-p4-3Ghz +subdir of the package) for the JSON-parser to estimate the speed up in the C +extension: + +``` + Comparing times (call_time_mean): + 1 ParserBenchmarkExt#parser 900 repeats: + 553.922304770 ( real) -> 21.500x + 0.001805307 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 224.513358139 ( real) -> 8.714x + 0.004454078 + 3 ParserBenchmarkPure#parser 1000 repeats: + 26.755020642 ( real) -> 1.038x + 0.037376163 + 4 ParserBenchmarkRails#parser 1000 repeats: + 25.763381731 ( real) -> 1.000x + 0.038814780 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1 is `JSON::Ext::Parser`, 2 is `YAML.load` with YAML +compatible JSON document, 3 is is `JSON::Pure::Parser`, and 4 is +`ActiveSupport::JSON.decode`. The ActiveSupport JSON-decoder converts the +input first to YAML and then uses the YAML-parser, the conversion seems to +slow it down so much that it is only as fast as the `JSON::Pure::Parser`! + +If you look at the benchmark data you can see that this is mostly caused by +the frequent high outliers - the median of the Rails-parser runs is still +overall smaller than the median of the `JSON::Pure::Parser` runs: + +``` + Comparing times (call_time_median): + 1 ParserBenchmarkExt#parser 900 repeats: + 800.592479481 ( real) -> 26.936x + 0.001249075 + 2 ParserBenchmarkYAML#parser 1000 repeats: + 271.002390644 ( real) -> 9.118x + 0.003690004 + 3 ParserBenchmarkRails#parser 1000 repeats: + 30.227910865 ( real) -> 1.017x + 0.033082008 + 4 ParserBenchmarkPure#parser 1000 repeats: + 29.722384421 ( real) -> 1.000x + 0.033644676 + calls/sec ( time) -> speed covers + secs/call +``` + +I have benchmarked the `JSON-Generator` as well. This generated a few more +values, because there are different modes that also influence the achieved +speed: + +``` + Comparing times (call_time_mean): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 547.354332608 ( real) -> 15.090x + 0.001826970 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 443.968212317 ( real) -> 12.240x + 0.002252414 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 375.104545883 ( real) -> 10.341x + 0.002665923 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 49.978706968 ( real) -> 1.378x + 0.020008521 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 38.531868759 ( real) -> 1.062x + 0.025952543 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 36.927649925 ( real) -> 1.018x 7 (>=3859) + 0.027079979 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 36.272134441 ( real) -> 1.000x 6 (>=3859) + 0.027569373 + calls/sec ( time) -> speed covers + secs/call +``` + +In the table above 1-3 are `JSON::Ext::Generator` methods. 4, 6, and 7 are +`JSON::Pure::Generator` methods and 5 is the Rails JSON generator. It is now a +bit faster than the `generator_safe` and `generator_pretty` methods of the pure +variant but slower than the others. + +To achieve the fastest JSON document output, you can use the `fast_generate` +method. Beware, that this will disable the checking for circular Ruby data +structures, which may cause JSON to go into an infinite loop. + +Here are the median comparisons for completeness' sake: + +``` + Comparing times (call_time_median): + 1 GeneratorBenchmarkExt#generator_fast 1000 repeats: + 708.258020939 ( real) -> 16.547x + 0.001411915 + 2 GeneratorBenchmarkExt#generator_safe 1000 repeats: + 569.105020353 ( real) -> 13.296x + 0.001757145 + 3 GeneratorBenchmarkExt#generator_pretty 900 repeats: + 482.825371244 ( real) -> 11.280x + 0.002071142 + 4 GeneratorBenchmarkPure#generator_fast 1000 repeats: + 62.717626652 ( real) -> 1.465x + 0.015944481 + 5 GeneratorBenchmarkRails#generator 1000 repeats: + 43.965681162 ( real) -> 1.027x + 0.022745013 + 6 GeneratorBenchmarkPure#generator_safe 1000 repeats: + 43.929073409 ( real) -> 1.026x 7 (>=3859) + 0.022763968 + 7 GeneratorBenchmarkPure#generator_pretty 1000 repeats: + 42.802514491 ( real) -> 1.000x 6 (>=3859) + 0.023363113 + calls/sec ( time) -> speed covers + secs/call +``` + +## Development + +### Release + +Update the json.gemspec and json-java.gemspec. + +``` +rbenv shell 2.6.5 +rake build +gem push pkg/json-2.3.0.gem + +rbenv shell jruby-9.2.9.0 +rake build +gem push pkg/json-2.3.0-java.gem +``` + +## Author + +Florian Frank + +## License + +Ruby License, see https://www.ruby-lang.org/en/about/license.txt. + +## Download + +The latest version of this library can be downloaded at + +* https://rubygems.org/gems/json + +Online Documentation should be located at + +* https://www.rubydoc.info/gems/json diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/VERSION b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/VERSION new file mode 100644 index 0000000..6a6a3d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/VERSION @@ -0,0 +1 @@ +2.6.1 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/Makefile b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/Makefile new file mode 100644 index 0000000..0cb4403 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/Makefile @@ -0,0 +1,202 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 +hdrdir = $(topdir) +arch_hdrdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/Users/arthur/.rvm/rubies/ruby-3.0.0 +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20220301-14832-83u4r7 +sitelibdir = $(DESTDIR)./.gem.20220301-14832-83u4r7 +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(SDKROOT)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc -fdeclspec +CXX = g++ -fdeclspec +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 +debugflags = -ggdb3 +warnflags = -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wmissing-noreturn -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens +cppflags = +CCDLFLAGS = -fno-common +CFLAGS = $(CCDLFLAGS) -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib +dldflags = -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup -Wl,-multiply_defined,suppress +ARCH_FLAG = -m64 +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.3.0 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = arm64-darwin21 +sitearch = $(arch) +ruby_version = 3.0.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = /opt/homebrew/opt/coreutils/bin/gmkdir -p +INSTALL = /opt/homebrew/opt/coreutils/bin/ginstall -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) +ORIG_SRCS = +SRCS = $(ORIG_SRCS) +OBJS = +HDRS = +LOCAL_HDRS = +TARGET = +TARGET_NAME = +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = +CLEANOBJS = *.o *.bak + +all: Makefile +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: Makefile +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h new file mode 100644 index 0000000..dc8f406 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/fbuffer/fbuffer.h @@ -0,0 +1,187 @@ + +#ifndef _FBUFFER_H_ +#define _FBUFFER_H_ + +#include "ruby.h" + +#ifndef RHASH_SIZE +#define RHASH_SIZE(hsh) (RHASH(hsh)->tbl->num_entries) +#endif + +#ifndef RFLOAT_VALUE +#define RFLOAT_VALUE(val) (RFLOAT(val)->value) +#endif + +#ifndef RARRAY_LEN +#define RARRAY_LEN(ARRAY) RARRAY(ARRAY)->len +#endif +#ifndef RSTRING_PTR +#define RSTRING_PTR(string) RSTRING(string)->ptr +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(string) RSTRING(string)->len +#endif + +#ifdef PRIsVALUE +# define RB_OBJ_CLASSNAME(obj) rb_obj_class(obj) +# define RB_OBJ_STRING(obj) (obj) +#else +# define PRIsVALUE "s" +# define RB_OBJ_CLASSNAME(obj) rb_obj_classname(obj) +# define RB_OBJ_STRING(obj) StringValueCStr(obj) +#endif + +#ifdef HAVE_RUBY_ENCODING_H +#include "ruby/encoding.h" +#define FORCE_UTF8(obj) rb_enc_associate((obj), rb_utf8_encoding()) +#else +#define FORCE_UTF8(obj) +#endif + +/* We don't need to guard objects for rbx, so let's do nothing at all. */ +#ifndef RB_GC_GUARD +#define RB_GC_GUARD(object) +#endif + +typedef struct FBufferStruct { + unsigned long initial_length; + char *ptr; + unsigned long len; + unsigned long capa; +} FBuffer; + +#define FBUFFER_INITIAL_LENGTH_DEFAULT 1024 + +#define FBUFFER_PTR(fb) (fb->ptr) +#define FBUFFER_LEN(fb) (fb->len) +#define FBUFFER_CAPA(fb) (fb->capa) +#define FBUFFER_PAIR(fb) FBUFFER_PTR(fb), FBUFFER_LEN(fb) + +static FBuffer *fbuffer_alloc(unsigned long initial_length); +static void fbuffer_free(FBuffer *fb); +static void fbuffer_clear(FBuffer *fb); +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len); +#ifdef JSON_GENERATOR +static void fbuffer_append_long(FBuffer *fb, long number); +#endif +static void fbuffer_append_char(FBuffer *fb, char newchr); +#ifdef JSON_GENERATOR +static FBuffer *fbuffer_dup(FBuffer *fb); +static VALUE fbuffer_to_s(FBuffer *fb); +#endif + +static FBuffer *fbuffer_alloc(unsigned long initial_length) +{ + FBuffer *fb; + if (initial_length <= 0) initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + fb = ALLOC(FBuffer); + memset((void *) fb, 0, sizeof(FBuffer)); + fb->initial_length = initial_length; + return fb; +} + +static void fbuffer_free(FBuffer *fb) +{ + if (fb->ptr) ruby_xfree(fb->ptr); + ruby_xfree(fb); +} + +static void fbuffer_clear(FBuffer *fb) +{ + fb->len = 0; +} + +static void fbuffer_inc_capa(FBuffer *fb, unsigned long requested) +{ + unsigned long required; + + if (!fb->ptr) { + fb->ptr = ALLOC_N(char, fb->initial_length); + fb->capa = fb->initial_length; + } + + for (required = fb->capa; requested > required - fb->len; required <<= 1); + + if (required > fb->capa) { + REALLOC_N(fb->ptr, char, required); + fb->capa = required; + } +} + +static void fbuffer_append(FBuffer *fb, const char *newstr, unsigned long len) +{ + if (len > 0) { + fbuffer_inc_capa(fb, len); + MEMCPY(fb->ptr + fb->len, newstr, char, len); + fb->len += len; + } +} + +#ifdef JSON_GENERATOR +static void fbuffer_append_str(FBuffer *fb, VALUE str) +{ + const char *newstr = StringValuePtr(str); + unsigned long len = RSTRING_LEN(str); + + RB_GC_GUARD(str); + + fbuffer_append(fb, newstr, len); +} +#endif + +static void fbuffer_append_char(FBuffer *fb, char newchr) +{ + fbuffer_inc_capa(fb, 1); + *(fb->ptr + fb->len) = newchr; + fb->len++; +} + +#ifdef JSON_GENERATOR +static void freverse(char *start, char *end) +{ + char c; + + while (end > start) { + c = *end, *end-- = *start, *start++ = c; + } +} + +static long fltoa(long number, char *buf) +{ + static char digits[] = "0123456789"; + long sign = number; + char* tmp = buf; + + if (sign < 0) number = -number; + do *tmp++ = digits[number % 10]; while (number /= 10); + if (sign < 0) *tmp++ = '-'; + freverse(buf, tmp - 1); + return tmp - buf; +} + +static void fbuffer_append_long(FBuffer *fb, long number) +{ + char buf[20]; + unsigned long len = fltoa(number, buf); + fbuffer_append(fb, buf, len); +} + +static FBuffer *fbuffer_dup(FBuffer *fb) +{ + unsigned long len = fb->len; + FBuffer *result; + + result = fbuffer_alloc(len); + fbuffer_append(result, FBUFFER_PAIR(fb)); + return result; +} + +static VALUE fbuffer_to_s(FBuffer *fb) +{ + VALUE result = rb_str_new(FBUFFER_PTR(fb), FBUFFER_LEN(fb)); + fbuffer_free(fb); + FORCE_UTF8(result); + return result; +} +#endif +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/Makefile b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/Makefile new file mode 100644 index 0000000..052566c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/Makefile @@ -0,0 +1,268 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 +hdrdir = $(topdir) +arch_hdrdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/Users/arthur/.rvm/rubies/ruby-3.0.0 +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20220301-14832-8um9zr +sitelibdir = $(DESTDIR)./.gem.20220301-14832-8um9zr +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(SDKROOT)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc -fdeclspec +CXX = g++ -fdeclspec +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 +debugflags = -ggdb3 +warnflags = -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wmissing-noreturn -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens +cppflags = +CCDLFLAGS = -fno-common +CFLAGS = $(CCDLFLAGS) -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -DJSON_GENERATOR -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib +dldflags = -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup -Wl,-multiply_defined,suppress +ARCH_FLAG = -m64 +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.3.0 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = arm64-darwin21 +sitearch = $(arch) +ruby_version = 3.0.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = /opt/homebrew/opt/coreutils/bin/gmkdir -p +INSTALL = /opt/homebrew/opt/coreutils/bin/ginstall -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = /json/ext +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) +ORIG_SRCS = generator.c +SRCS = $(ORIG_SRCS) +OBJS = generator.o +HDRS = $(srcdir)/generator.h +LOCAL_HDRS = +TARGET = generator +TARGET_NAME = generator +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object json/ext/$(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +### +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/depend b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/depend new file mode 100644 index 0000000..1a042a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/depend @@ -0,0 +1 @@ +generator.o: generator.c generator.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb new file mode 100644 index 0000000..8627c5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/extconf.rb @@ -0,0 +1,4 @@ +require 'mkmf' + +$defs << "-DJSON_GENERATOR" +create_makefile 'json/ext/generator' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.bundle b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.bundle new file mode 100755 index 0000000..0a5c4b7 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.c b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.c new file mode 100644 index 0000000..e3a8347 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.c @@ -0,0 +1,1608 @@ +#include "../fbuffer/fbuffer.h" +#include "generator.h" + +static VALUE mJSON, mExt, mGenerator, cState, mGeneratorMethods, mObject, + mHash, mArray, +#ifdef RUBY_INTEGER_UNIFICATION + mInteger, +#else + mFixnum, mBignum, +#endif + mFloat, mString, mString_Extend, + mTrueClass, mFalseClass, mNilClass, eGeneratorError, + eNestingError; + +static ID i_to_s, i_to_json, i_new, i_indent, i_space, i_space_before, + i_object_nl, i_array_nl, i_max_nesting, i_allow_nan, i_ascii_only, + i_pack, i_unpack, i_create_id, i_extend, i_key_p, + i_aref, i_send, i_respond_to_p, i_match, i_keys, i_depth, + i_buffer_initial_length, i_dup, i_escape_slash; + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns 0. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length) +{ + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return 0; + /* Everything else falls through when "1"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; + case 2: if ((a = (*--srcptr)) > 0xBF) return 0; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return 0; break; + case 0xED: if (a > 0x9F) return 0; break; + case 0xF0: if (a < 0x90) return 0; break; + case 0xF4: if (a > 0x8F) return 0; break; + default: if (a < 0x80) return 0; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return 0; + } + if (*source > 0xF4) return 0; + return 1; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf. */ +static void unicode_escape(char *buf, UTF16 character) +{ + const char *digits = "0123456789abcdef"; + + buf[2] = digits[character >> 12]; + buf[3] = digits[(character >> 8) & 0xf]; + buf[4] = digits[(character >> 4) & 0xf]; + buf[5] = digits[character & 0xf]; +} + +/* Escapes the UTF16 character and stores the result in the buffer buf, then + * the buffer buf is appended to the FBuffer buffer. */ +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 + character) +{ + unicode_escape(buf, character); + fbuffer_append(buffer, buf, 6); +} + +/* Converts string to a JSON string in FBuffer buffer, where all but the ASCII + * and control characters are JSON escaped. */ +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string, char escape_slash) +{ + const UTF8 *source = (UTF8 *) RSTRING_PTR(string); + const UTF8 *sourceEnd = source + RSTRING_LEN(string); + char buf[6] = { '\\', 'u' }; + + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8(source, extraBytesToRead+1)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* normal case */ + if (ch >= 0x20 && ch <= 0x7f) { + switch (ch) { + case '\\': + fbuffer_append(buffer, "\\\\", 2); + break; + case '"': + fbuffer_append(buffer, "\\\"", 2); + break; + case '/': + if(escape_slash) { + fbuffer_append(buffer, "\\/", 2); + break; + } + default: + fbuffer_append_char(buffer, (char)ch); + break; + } + } else { + switch (ch) { + case '\n': + fbuffer_append(buffer, "\\n", 2); + break; + case '\r': + fbuffer_append(buffer, "\\r", 2); + break; + case '\t': + fbuffer_append(buffer, "\\t", 2); + break; + case '\f': + fbuffer_append(buffer, "\\f", 2); + break; + case '\b': + fbuffer_append(buffer, "\\b", 2); + break; + default: + unicode_escape_to_buffer(buffer, buf, (UTF16) ch); + break; + } + } + } + } else if (ch > UNI_MAX_UTF16) { +#if UNI_STRICT_CONVERSION + source -= (extraBytesToRead+1); /* return to the start */ + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf8"); +#else + unicode_escape_to_buffer(buffer, buf, UNI_REPLACEMENT_CHAR); +#endif + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + ch -= halfBase; + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START)); + unicode_escape_to_buffer(buffer, buf, (UTF16)((ch & halfMask) + UNI_SUR_LOW_START)); + } + } + RB_GC_GUARD(string); +} + +/* Converts string to a JSON string in FBuffer buffer, where only the + * characters required by the JSON standard are JSON escaped. The remaining + * characters (should be UTF8) are just passed through and appended to the + * result. */ +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string, char escape_slash) +{ + const char *ptr = RSTRING_PTR(string), *p; + unsigned long len = RSTRING_LEN(string), start = 0, end = 0; + const char *escape = NULL; + int escape_len; + unsigned char c; + char buf[6] = { '\\', 'u' }; + int ascii_only = rb_enc_str_asciionly_p(string); + + for (start = 0, end = 0; end < len;) { + p = ptr + end; + c = (unsigned char) *p; + if (c < 0x20) { + switch (c) { + case '\n': + escape = "\\n"; + escape_len = 2; + break; + case '\r': + escape = "\\r"; + escape_len = 2; + break; + case '\t': + escape = "\\t"; + escape_len = 2; + break; + case '\f': + escape = "\\f"; + escape_len = 2; + break; + case '\b': + escape = "\\b"; + escape_len = 2; + break; + default: + unicode_escape(buf, (UTF16) *p); + escape = buf; + escape_len = 6; + break; + } + } else { + switch (c) { + case '\\': + escape = "\\\\"; + escape_len = 2; + break; + case '"': + escape = "\\\""; + escape_len = 2; + break; + case '/': + if(escape_slash) { + escape = "\\/"; + escape_len = 2; + break; + } + default: + { + unsigned short clen = 1; + if (!ascii_only) { + clen += trailingBytesForUTF8[c]; + if (end + clen > len) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "partial character in source, but hit end"); + } + if (!isLegalUTF8((UTF8 *) p, clen)) { + rb_raise(rb_path2class("JSON::GeneratorError"), + "source sequence is illegal/malformed utf-8"); + } + } + end += clen; + } + continue; + break; + } + } + fbuffer_append(buffer, ptr + start, end - start); + fbuffer_append(buffer, escape, escape_len); + start = ++end; + escape = NULL; + } + fbuffer_append(buffer, ptr + start, end - start); +} + +static char *fstrndup(const char *ptr, unsigned long len) { + char *result; + if (len <= 0) return NULL; + result = ALLOC_N(char, len); + memcpy(result, ptr, len); + return result; +} + +/* + * Document-module: JSON::Ext::Generator + * + * This is the JSON generator implemented as a C extension. It can be + * configured to be used by setting + * + * JSON.generator = JSON::Ext::Generator + * + * with the method generator= in JSON. + * + */ + +/* Explanation of the following: that's the only way to not pollute + * standard library's docs with GeneratorMethods:: which + * are uninformative and take a large place in a list of classes + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Array + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Bignum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::FalseClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Fixnum + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Float + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Hash + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Integer + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::NilClass + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::Object + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::String::Extend + * :nodoc: + */ + +/* + * Document-module: JSON::Ext::Generator::GeneratorMethods::TrueClass + * :nodoc: + */ + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON object, that is generated from + * this Hash instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(object); +} + +/* + * call-seq: to_json(state = nil) + * + * Returns a JSON string containing a JSON array, that is generated from + * this Array instance. + * _state_ is a JSON::State object, that can also be used to configure the + * produced JSON string output further. + */ +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self) { + GENERATE_JSON(array); +} + +#ifdef RUBY_INTEGER_UNIFICATION +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(integer); +} + +#else +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(fixnum); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Integer number. + */ +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(bignum); +} +#endif + +/* + * call-seq: to_json(*) + * + * Returns a JSON string representation for this Float number. + */ +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(float); +} + +/* + * call-seq: String.included(modul) + * + * Extends _modul_ with the String::Extend module. + */ +static VALUE mString_included_s(VALUE self, VALUE modul) { + VALUE result = rb_funcall(modul, i_extend, 1, mString_Extend); + return result; +} + +/* + * call-seq: to_json(*) + * + * This string should be encoded with UTF-8 A call to this method + * returns a JSON string encoded with UTF16 big endian characters as + * \u????. + */ +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(string); +} + +/* + * call-seq: to_json_raw_object() + * + * This method creates a raw object hash, that can be nested into + * other data structures and will be generated as a raw string. This + * method should be used, if you want to convert raw strings to JSON + * instead of UTF-8 strings, e. g. binary data. + */ +static VALUE mString_to_json_raw_object(VALUE self) +{ + VALUE ary; + VALUE result = rb_hash_new(); + rb_hash_aset(result, rb_funcall(mJSON, i_create_id, 0), rb_class_name(rb_obj_class(self))); + ary = rb_funcall(self, i_unpack, 1, rb_str_new2("C*")); + rb_hash_aset(result, rb_str_new2("raw"), ary); + return result; +} + +/* + * call-seq: to_json_raw(*args) + * + * This method creates a JSON text from the result of a call to + * to_json_raw_object of this String. + */ +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self) +{ + VALUE obj = mString_to_json_raw_object(self); + Check_Type(obj, T_HASH); + return mHash_to_json(argc, argv, obj); +} + +/* + * call-seq: json_create(o) + * + * Raw Strings are JSON Objects (the raw bytes are stored in an array for the + * key "raw"). The Ruby String can be created by this module method. + */ +static VALUE mString_Extend_json_create(VALUE self, VALUE o) +{ + VALUE ary; + Check_Type(o, T_HASH); + ary = rb_hash_aref(o, rb_str_new2("raw")); + return rb_funcall(ary, i_pack, 1, rb_str_new2("C*")); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for true: 'true'. + */ +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(true); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for false: 'false'. + */ +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(false); +} + +/* + * call-seq: to_json(*) + * + * Returns a JSON string for nil: 'null'. + */ +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self) +{ + GENERATE_JSON(null); +} + +/* + * call-seq: to_json(*) + * + * Converts this object to a string (calling #to_s), converts + * it to a JSON string, and returns the result. This is a fallback, if no + * special method #to_json was defined for some object. + */ +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self) +{ + VALUE state; + VALUE string = rb_funcall(self, i_to_s, 0); + rb_scan_args(argc, argv, "01", &state); + Check_Type(string, T_STRING); + state = cState_from_state_s(cState, state); + return cState_partial_generate(state, string); +} + +static void State_free(void *ptr) +{ + JSON_Generator_State *state = ptr; + if (state->indent) ruby_xfree(state->indent); + if (state->space) ruby_xfree(state->space); + if (state->space_before) ruby_xfree(state->space_before); + if (state->object_nl) ruby_xfree(state->object_nl); + if (state->array_nl) ruby_xfree(state->array_nl); + if (state->array_delim) fbuffer_free(state->array_delim); + if (state->object_delim) fbuffer_free(state->object_delim); + if (state->object_delim2) fbuffer_free(state->object_delim2); + ruby_xfree(state); +} + +static size_t State_memsize(const void *ptr) +{ + const JSON_Generator_State *state = ptr; + size_t size = sizeof(*state); + if (state->indent) size += state->indent_len + 1; + if (state->space) size += state->space_len + 1; + if (state->space_before) size += state->space_before_len + 1; + if (state->object_nl) size += state->object_nl_len + 1; + if (state->array_nl) size += state->array_nl_len + 1; + if (state->array_delim) size += FBUFFER_CAPA(state->array_delim); + if (state->object_delim) size += FBUFFER_CAPA(state->object_delim); + if (state->object_delim2) size += FBUFFER_CAPA(state->object_delim2); + return size; +} + +#ifndef HAVE_RB_EXT_RACTOR_SAFE +# undef RUBY_TYPED_FROZEN_SHAREABLE +# define RUBY_TYPED_FROZEN_SHAREABLE 0 +#endif + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Generator_State_type = { + "JSON/Generator/State", + {NULL, State_free, State_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_FROZEN_SHAREABLE, +#endif +}; +#endif + +static VALUE cState_s_allocate(VALUE klass) +{ + JSON_Generator_State *state; + return TypedData_Make_Struct(klass, JSON_Generator_State, + &JSON_Generator_State_type, state); +} + +/* + * call-seq: configure(opts) + * + * Configure this State instance with the Hash _opts_, and return + * itself. + */ +static VALUE cState_configure(VALUE self, VALUE opts) +{ + VALUE tmp; + GET_STATE(self); + tmp = rb_check_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(tmp)) tmp = rb_convert_type(opts, T_HASH, "Hash", "to_h"); + opts = tmp; + tmp = rb_hash_aref(opts, ID2SYM(i_indent)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->indent = fstrndup(RSTRING_PTR(tmp), len + 1); + state->indent_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_space_before)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->space_before = fstrndup(RSTRING_PTR(tmp), len + 1); + state->space_before_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_array_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->array_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->array_nl_len = len; + } + tmp = rb_hash_aref(opts, ID2SYM(i_object_nl)); + if (RTEST(tmp)) { + unsigned long len; + Check_Type(tmp, T_STRING); + len = RSTRING_LEN(tmp); + state->object_nl = fstrndup(RSTRING_PTR(tmp), len + 1); + state->object_nl_len = len; + } + tmp = ID2SYM(i_max_nesting); + state->max_nesting = 100; + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + state->max_nesting = FIX2LONG(max_nesting); + } else { + state->max_nesting = 0; + } + } + tmp = ID2SYM(i_depth); + state->depth = 0; + if (option_given_p(opts, tmp)) { + VALUE depth = rb_hash_aref(opts, tmp); + if (RTEST(depth)) { + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + } else { + state->depth = 0; + } + } + tmp = ID2SYM(i_buffer_initial_length); + if (option_given_p(opts, tmp)) { + VALUE buffer_initial_length = rb_hash_aref(opts, tmp); + if (RTEST(buffer_initial_length)) { + long initial_length; + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) state->buffer_initial_length = initial_length; + } + } + tmp = rb_hash_aref(opts, ID2SYM(i_allow_nan)); + state->allow_nan = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_ascii_only)); + state->ascii_only = RTEST(tmp); + tmp = rb_hash_aref(opts, ID2SYM(i_escape_slash)); + state->escape_slash = RTEST(tmp); + return self; +} + +static void set_state_ivars(VALUE hash, VALUE state) +{ + VALUE ivars = rb_obj_instance_variables(state); + int i = 0; + for (i = 0; i < RARRAY_LEN(ivars); i++) { + VALUE key = rb_funcall(rb_ary_entry(ivars, i), i_to_s, 0); + long key_len = RSTRING_LEN(key); + VALUE value = rb_iv_get(state, StringValueCStr(key)); + rb_hash_aset(hash, rb_str_intern(rb_str_substr(key, 1, key_len - 1)), value); + } +} + +/* + * call-seq: to_h + * + * Returns the configuration instance variables as a hash, that can be + * passed to the configure method. + */ +static VALUE cState_to_h(VALUE self) +{ + VALUE result = rb_hash_new(); + GET_STATE(self); + set_state_ivars(result, self); + rb_hash_aset(result, ID2SYM(i_indent), rb_str_new(state->indent, state->indent_len)); + rb_hash_aset(result, ID2SYM(i_space), rb_str_new(state->space, state->space_len)); + rb_hash_aset(result, ID2SYM(i_space_before), rb_str_new(state->space_before, state->space_before_len)); + rb_hash_aset(result, ID2SYM(i_object_nl), rb_str_new(state->object_nl, state->object_nl_len)); + rb_hash_aset(result, ID2SYM(i_array_nl), rb_str_new(state->array_nl, state->array_nl_len)); + rb_hash_aset(result, ID2SYM(i_allow_nan), state->allow_nan ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_ascii_only), state->ascii_only ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_max_nesting), LONG2FIX(state->max_nesting)); + rb_hash_aset(result, ID2SYM(i_escape_slash), state->escape_slash ? Qtrue : Qfalse); + rb_hash_aset(result, ID2SYM(i_depth), LONG2FIX(state->depth)); + rb_hash_aset(result, ID2SYM(i_buffer_initial_length), LONG2FIX(state->buffer_initial_length)); + return result; +} + +/* +* call-seq: [](name) +* +* Returns the value returned by method +name+. +*/ +static VALUE cState_aref(VALUE self, VALUE name) +{ + name = rb_funcall(name, i_to_s, 0); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) { + return rb_funcall(self, i_send, 1, name); + } else { + return rb_attr_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name))); + } +} + +/* +* call-seq: []=(name, value) +* +* Sets the attribute name to value. +*/ +static VALUE cState_aset(VALUE self, VALUE name, VALUE value) +{ + VALUE name_writer; + + name = rb_funcall(name, i_to_s, 0); + name_writer = rb_str_cat2(rb_str_dup(name), "="); + if (RTEST(rb_funcall(self, i_respond_to_p, 1, name_writer))) { + return rb_funcall(self, i_send, 2, name_writer, value); + } else { + rb_ivar_set(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)), value); + } + return Qnil; +} + +struct hash_foreach_arg { + FBuffer *buffer; + JSON_Generator_State *state; + VALUE Vstate; + int iter; +}; + +static int +json_object_i(VALUE key, VALUE val, VALUE _arg) +{ + struct hash_foreach_arg *arg = (struct hash_foreach_arg *)_arg; + FBuffer *buffer = arg->buffer; + JSON_Generator_State *state = arg->state; + VALUE Vstate = arg->Vstate; + + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + char *delim = FBUFFER_PTR(state->object_delim); + long delim_len = FBUFFER_LEN(state->object_delim); + char *delim2 = FBUFFER_PTR(state->object_delim2); + long delim2_len = FBUFFER_LEN(state->object_delim2); + long depth = state->depth; + int j; + VALUE klass, key_to_s; + + if (arg->iter > 0) fbuffer_append(buffer, delim, delim_len); + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + } + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + + klass = CLASS_OF(key); + if (klass == rb_cString) { + key_to_s = key; + } else if (klass == rb_cSymbol) { + key_to_s = rb_id2str(SYM2ID(key)); + } else { + key_to_s = rb_funcall(key, i_to_s, 0); + } + Check_Type(key_to_s, T_STRING); + generate_json(buffer, Vstate, state, key_to_s); + fbuffer_append(buffer, delim2, delim2_len); + generate_json(buffer, Vstate, state, val); + + arg->iter++; + return ST_CONTINUE; +} + +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *object_nl = state->object_nl; + long object_nl_len = state->object_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + long depth = ++state->depth; + int j; + struct hash_foreach_arg arg; + + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '{'); + + arg.buffer = buffer; + arg.state = state; + arg.Vstate = Vstate; + arg.iter = 0; + rb_hash_foreach(obj, json_object_i, (VALUE)&arg); + + depth = --state->depth; + if (object_nl) { + fbuffer_append(buffer, object_nl, object_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, '}'); +} + +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + char *array_nl = state->array_nl; + long array_nl_len = state->array_nl_len; + char *indent = state->indent; + long indent_len = state->indent_len; + long max_nesting = state->max_nesting; + char *delim = FBUFFER_PTR(state->array_delim); + long delim_len = FBUFFER_LEN(state->array_delim); + long depth = ++state->depth; + int i, j; + if (max_nesting != 0 && depth > max_nesting) { + fbuffer_free(buffer); + rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); + } + fbuffer_append_char(buffer, '['); + if (array_nl) fbuffer_append(buffer, array_nl, array_nl_len); + for(i = 0; i < RARRAY_LEN(obj); i++) { + if (i > 0) fbuffer_append(buffer, delim, delim_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + generate_json(buffer, Vstate, state, rb_ary_entry(obj, i)); + } + state->depth = --depth; + if (array_nl) { + fbuffer_append(buffer, array_nl, array_nl_len); + if (indent) { + for (j = 0; j < depth; j++) { + fbuffer_append(buffer, indent, indent_len); + } + } + } + fbuffer_append_char(buffer, ']'); +} + +#ifdef HAVE_RUBY_ENCODING_H +static int enc_utf8_compatible_p(rb_encoding *enc) +{ + if (enc == rb_usascii_encoding()) return 1; + if (enc == rb_utf8_encoding()) return 1; + return 0; +} +#endif + +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_char(buffer, '"'); +#ifdef HAVE_RUBY_ENCODING_H + if (!enc_utf8_compatible_p(rb_enc_get(obj))) { + obj = rb_str_export_to_enc(obj, rb_utf8_encoding()); + } +#endif + if (state->ascii_only) { + convert_UTF8_to_JSON_ASCII(buffer, obj, state->escape_slash); + } else { + convert_UTF8_to_JSON(buffer, obj, state->escape_slash); + } + fbuffer_append_char(buffer, '"'); +} + +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "null", 4); +} + +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "false", 5); +} + +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append(buffer, "true", 4); +} + +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + fbuffer_append_long(buffer, FIX2LONG(obj)); +} + +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp = rb_funcall(obj, i_to_s, 0); + fbuffer_append_str(buffer, tmp); +} + +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + if (FIXNUM_P(obj)) + generate_json_fixnum(buffer, Vstate, state, obj); + else + generate_json_bignum(buffer, Vstate, state, obj); +} +#endif +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + double value = RFLOAT_VALUE(obj); + char allow_nan = state->allow_nan; + VALUE tmp = rb_funcall(obj, i_to_s, 0); + if (!allow_nan) { + if (isinf(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } else if (isnan(value)) { + fbuffer_free(buffer); + rb_raise(eGeneratorError, "%u: %"PRIsVALUE" not allowed in JSON", __LINE__, RB_OBJ_STRING(tmp)); + } + } + fbuffer_append_str(buffer, tmp); +} + +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) +{ + VALUE tmp; + VALUE klass = CLASS_OF(obj); + if (klass == rb_cHash) { + generate_json_object(buffer, Vstate, state, obj); + } else if (klass == rb_cArray) { + generate_json_array(buffer, Vstate, state, obj); + } else if (klass == rb_cString) { + generate_json_string(buffer, Vstate, state, obj); + } else if (obj == Qnil) { + generate_json_null(buffer, Vstate, state, obj); + } else if (obj == Qfalse) { + generate_json_false(buffer, Vstate, state, obj); + } else if (obj == Qtrue) { + generate_json_true(buffer, Vstate, state, obj); + } else if (FIXNUM_P(obj)) { + generate_json_fixnum(buffer, Vstate, state, obj); + } else if (RB_TYPE_P(obj, T_BIGNUM)) { + generate_json_bignum(buffer, Vstate, state, obj); + } else if (klass == rb_cFloat) { + generate_json_float(buffer, Vstate, state, obj); + } else if (rb_respond_to(obj, i_to_json)) { + tmp = rb_funcall(obj, i_to_json, 1, Vstate); + Check_Type(tmp, T_STRING); + fbuffer_append_str(buffer, tmp); + } else { + tmp = rb_funcall(obj, i_to_s, 0); + Check_Type(tmp, T_STRING); + generate_json_string(buffer, Vstate, state, tmp); + } +} + +static FBuffer *cState_prepare_buffer(VALUE self) +{ + FBuffer *buffer; + GET_STATE(self); + buffer = fbuffer_alloc(state->buffer_initial_length); + + if (state->object_delim) { + fbuffer_clear(state->object_delim); + } else { + state->object_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->object_delim, ','); + if (state->object_delim2) { + fbuffer_clear(state->object_delim2); + } else { + state->object_delim2 = fbuffer_alloc(16); + } + if (state->space_before) fbuffer_append(state->object_delim2, state->space_before, state->space_before_len); + fbuffer_append_char(state->object_delim2, ':'); + if (state->space) fbuffer_append(state->object_delim2, state->space, state->space_len); + + if (state->array_delim) { + fbuffer_clear(state->array_delim); + } else { + state->array_delim = fbuffer_alloc(16); + } + fbuffer_append_char(state->array_delim, ','); + if (state->array_nl) fbuffer_append(state->array_delim, state->array_nl, state->array_nl_len); + return buffer; +} + +static VALUE cState_partial_generate(VALUE self, VALUE obj) +{ + FBuffer *buffer = cState_prepare_buffer(self); + GET_STATE(self); + generate_json(buffer, self, state, obj); + return fbuffer_to_s(buffer); +} + +/* + * call-seq: generate(obj) + * + * Generates a valid JSON document from object +obj+ and returns the + * result. If no valid JSON document can be created this method raises a + * GeneratorError exception. + */ +static VALUE cState_generate(VALUE self, VALUE obj) +{ + VALUE result = cState_partial_generate(self, obj); + GET_STATE(self); + (void)state; + return result; +} + +/* + * call-seq: new(opts = {}) + * + * Instantiates a new State object, configured by _opts_. + * + * _opts_ can have the following keys: + * + * * *indent*: a string used to indent levels (default: ''), + * * *space*: a string that is put after, a : or , delimiter (default: ''), + * * *space_before*: a string that is put before a : pair delimiter (default: ''), + * * *object_nl*: a string that is put at the end of a JSON object (default: ''), + * * *array_nl*: a string that is put at the end of a JSON array (default: ''), + * * *allow_nan*: true if NaN, Infinity, and -Infinity should be + * generated, otherwise an exception is thrown, if these values are + * encountered. This options defaults to false. + * * *ascii_only*: true if only ASCII characters should be generated. This + * option defaults to false. + * * *buffer_initial_length*: sets the initial length of the generator's + * internal buffer. + */ +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE opts; + GET_STATE(self); + state->max_nesting = 100; + state->buffer_initial_length = FBUFFER_INITIAL_LENGTH_DEFAULT; + rb_scan_args(argc, argv, "01", &opts); + if (!NIL_P(opts)) cState_configure(self, opts); + return self; +} + +/* + * call-seq: initialize_copy(orig) + * + * Initializes this object from orig if it can be duplicated/cloned and returns + * it. +*/ +static VALUE cState_init_copy(VALUE obj, VALUE orig) +{ + JSON_Generator_State *objState, *origState; + + if (obj == orig) return obj; + GET_STATE_TO(obj, objState); + GET_STATE_TO(orig, origState); + if (!objState) rb_raise(rb_eArgError, "unallocated JSON::State"); + + MEMCPY(objState, origState, JSON_Generator_State, 1); + objState->indent = fstrndup(origState->indent, origState->indent_len); + objState->space = fstrndup(origState->space, origState->space_len); + objState->space_before = fstrndup(origState->space_before, origState->space_before_len); + objState->object_nl = fstrndup(origState->object_nl, origState->object_nl_len); + objState->array_nl = fstrndup(origState->array_nl, origState->array_nl_len); + if (origState->array_delim) objState->array_delim = fbuffer_dup(origState->array_delim); + if (origState->object_delim) objState->object_delim = fbuffer_dup(origState->object_delim); + if (origState->object_delim2) objState->object_delim2 = fbuffer_dup(origState->object_delim2); + return obj; +} + +/* + * call-seq: from_state(opts) + * + * Creates a State object from _opts_, which ought to be Hash to create a + * new State instance configured by _opts_, something else to create an + * unconfigured instance. If _opts_ is a State object, it is just returned. + */ +static VALUE cState_from_state_s(VALUE self, VALUE opts) +{ + if (rb_obj_is_kind_of(opts, self)) { + return opts; + } else if (rb_obj_is_kind_of(opts, rb_cHash)) { + return rb_funcall(self, i_new, 1, opts); + } else { + return rb_class_new_instance(0, NULL, cState); + } +} + +/* + * call-seq: indent() + * + * Returns the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent(VALUE self) +{ + GET_STATE(self); + return state->indent ? rb_str_new(state->indent, state->indent_len) : rb_str_new2(""); +} + +/* + * call-seq: indent=(indent) + * + * Sets the string that is used to indent levels in the JSON text. + */ +static VALUE cState_indent_set(VALUE self, VALUE indent) +{ + unsigned long len; + GET_STATE(self); + Check_Type(indent, T_STRING); + len = RSTRING_LEN(indent); + if (len == 0) { + if (state->indent) { + ruby_xfree(state->indent); + state->indent = NULL; + state->indent_len = 0; + } + } else { + if (state->indent) ruby_xfree(state->indent); + state->indent = fstrndup(RSTRING_PTR(indent), len); + state->indent_len = len; + } + return Qnil; +} + +/* + * call-seq: space() + * + * Returns the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space(VALUE self) +{ + GET_STATE(self); + return state->space ? rb_str_new(state->space, state->space_len) : rb_str_new2(""); +} + +/* + * call-seq: space=(space) + * + * Sets _space_ to the string that is used to insert a space between the tokens in a JSON + * string. + */ +static VALUE cState_space_set(VALUE self, VALUE space) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space, T_STRING); + len = RSTRING_LEN(space); + if (len == 0) { + if (state->space) { + ruby_xfree(state->space); + state->space = NULL; + state->space_len = 0; + } + } else { + if (state->space) ruby_xfree(state->space); + state->space = fstrndup(RSTRING_PTR(space), len); + state->space_len = len; + } + return Qnil; +} + +/* + * call-seq: space_before() + * + * Returns the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before(VALUE self) +{ + GET_STATE(self); + return state->space_before ? rb_str_new(state->space_before, state->space_before_len) : rb_str_new2(""); +} + +/* + * call-seq: space_before=(space_before) + * + * Sets the string that is used to insert a space before the ':' in JSON objects. + */ +static VALUE cState_space_before_set(VALUE self, VALUE space_before) +{ + unsigned long len; + GET_STATE(self); + Check_Type(space_before, T_STRING); + len = RSTRING_LEN(space_before); + if (len == 0) { + if (state->space_before) { + ruby_xfree(state->space_before); + state->space_before = NULL; + state->space_before_len = 0; + } + } else { + if (state->space_before) ruby_xfree(state->space_before); + state->space_before = fstrndup(RSTRING_PTR(space_before), len); + state->space_before_len = len; + } + return Qnil; +} + +/* + * call-seq: object_nl() + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl(VALUE self) +{ + GET_STATE(self); + return state->object_nl ? rb_str_new(state->object_nl, state->object_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: object_nl=(object_nl) + * + * This string is put at the end of a line that holds a JSON object (or + * Hash). + */ +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(object_nl, T_STRING); + len = RSTRING_LEN(object_nl); + if (len == 0) { + if (state->object_nl) { + ruby_xfree(state->object_nl); + state->object_nl = NULL; + } + } else { + if (state->object_nl) ruby_xfree(state->object_nl); + state->object_nl = fstrndup(RSTRING_PTR(object_nl), len); + state->object_nl_len = len; + } + return Qnil; +} + +/* + * call-seq: array_nl() + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl(VALUE self) +{ + GET_STATE(self); + return state->array_nl ? rb_str_new(state->array_nl, state->array_nl_len) : rb_str_new2(""); +} + +/* + * call-seq: array_nl=(array_nl) + * + * This string is put at the end of a line that holds a JSON array. + */ +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl) +{ + unsigned long len; + GET_STATE(self); + Check_Type(array_nl, T_STRING); + len = RSTRING_LEN(array_nl); + if (len == 0) { + if (state->array_nl) { + ruby_xfree(state->array_nl); + state->array_nl = NULL; + } + } else { + if (state->array_nl) ruby_xfree(state->array_nl); + state->array_nl = fstrndup(RSTRING_PTR(array_nl), len); + state->array_nl_len = len; + } + return Qnil; +} + + +/* +* call-seq: check_circular? +* +* Returns true, if circular data structures should be checked, +* otherwise returns false. +*/ +static VALUE cState_check_circular_p(VALUE self) +{ + GET_STATE(self); + return state->max_nesting ? Qtrue : Qfalse; +} + +/* + * call-seq: max_nesting + * + * This integer returns the maximum level of data structure nesting in + * the generated JSON, max_nesting = 0 if no maximum is checked. + */ +static VALUE cState_max_nesting(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->max_nesting); +} + +/* + * call-seq: max_nesting=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_max_nesting_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + return state->max_nesting = FIX2LONG(depth); +} + +/* + * call-seq: escape_slash + * + * If this boolean is true, the forward slashes will be escaped in + * the json output. + */ +static VALUE cState_escape_slash(VALUE self) +{ + GET_STATE(self); + return state->escape_slash ? Qtrue : Qfalse; +} + +/* + * call-seq: escape_slash=(depth) + * + * This sets whether or not the forward slashes will be escaped in + * the json output. + */ +static VALUE cState_escape_slash_set(VALUE self, VALUE enable) +{ + GET_STATE(self); + state->escape_slash = RTEST(enable); + return Qnil; +} + +/* + * call-seq: allow_nan? + * + * Returns true, if NaN, Infinity, and -Infinity should be generated, otherwise + * returns false. + */ +static VALUE cState_allow_nan_p(VALUE self) +{ + GET_STATE(self); + return state->allow_nan ? Qtrue : Qfalse; +} + +/* + * call-seq: ascii_only? + * + * Returns true, if only ASCII characters should be generated. Otherwise + * returns false. + */ +static VALUE cState_ascii_only_p(VALUE self) +{ + GET_STATE(self); + return state->ascii_only ? Qtrue : Qfalse; +} + +/* + * call-seq: depth + * + * This integer returns the current depth of data structure nesting. + */ +static VALUE cState_depth(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->depth); +} + +/* + * call-seq: depth=(depth) + * + * This sets the maximum level of data structure nesting in the generated JSON + * to the integer depth, max_nesting = 0 if no maximum should be checked. + */ +static VALUE cState_depth_set(VALUE self, VALUE depth) +{ + GET_STATE(self); + Check_Type(depth, T_FIXNUM); + state->depth = FIX2LONG(depth); + return Qnil; +} + +/* + * call-seq: buffer_initial_length + * + * This integer returns the current initial length of the buffer. + */ +static VALUE cState_buffer_initial_length(VALUE self) +{ + GET_STATE(self); + return LONG2FIX(state->buffer_initial_length); +} + +/* + * call-seq: buffer_initial_length=(length) + * + * This sets the initial length of the buffer to +length+, if +length+ > 0, + * otherwise its value isn't changed. + */ +static VALUE cState_buffer_initial_length_set(VALUE self, VALUE buffer_initial_length) +{ + long initial_length; + GET_STATE(self); + Check_Type(buffer_initial_length, T_FIXNUM); + initial_length = FIX2LONG(buffer_initial_length); + if (initial_length > 0) { + state->buffer_initial_length = initial_length; + } + return Qnil; +} + +/* + * + */ +void Init_generator(void) +{ +#ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); +#endif + +#undef rb_intern + rb_require("json/common"); + + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + mGenerator = rb_define_module_under(mExt, "Generator"); + + eGeneratorError = rb_path2class("JSON::GeneratorError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eGeneratorError); + rb_gc_register_mark_object(eNestingError); + + cState = rb_define_class_under(mGenerator, "State", rb_cObject); + rb_define_alloc_func(cState, cState_s_allocate); + rb_define_singleton_method(cState, "from_state", cState_from_state_s, 1); + rb_define_method(cState, "initialize", cState_initialize, -1); + rb_define_method(cState, "initialize_copy", cState_init_copy, 1); + rb_define_method(cState, "indent", cState_indent, 0); + rb_define_method(cState, "indent=", cState_indent_set, 1); + rb_define_method(cState, "space", cState_space, 0); + rb_define_method(cState, "space=", cState_space_set, 1); + rb_define_method(cState, "space_before", cState_space_before, 0); + rb_define_method(cState, "space_before=", cState_space_before_set, 1); + rb_define_method(cState, "object_nl", cState_object_nl, 0); + rb_define_method(cState, "object_nl=", cState_object_nl_set, 1); + rb_define_method(cState, "array_nl", cState_array_nl, 0); + rb_define_method(cState, "array_nl=", cState_array_nl_set, 1); + rb_define_method(cState, "max_nesting", cState_max_nesting, 0); + rb_define_method(cState, "max_nesting=", cState_max_nesting_set, 1); + rb_define_method(cState, "escape_slash", cState_escape_slash, 0); + rb_define_method(cState, "escape_slash?", cState_escape_slash, 0); + rb_define_method(cState, "escape_slash=", cState_escape_slash_set, 1); + rb_define_method(cState, "check_circular?", cState_check_circular_p, 0); + rb_define_method(cState, "allow_nan?", cState_allow_nan_p, 0); + rb_define_method(cState, "ascii_only?", cState_ascii_only_p, 0); + rb_define_method(cState, "depth", cState_depth, 0); + rb_define_method(cState, "depth=", cState_depth_set, 1); + rb_define_method(cState, "buffer_initial_length", cState_buffer_initial_length, 0); + rb_define_method(cState, "buffer_initial_length=", cState_buffer_initial_length_set, 1); + rb_define_method(cState, "configure", cState_configure, 1); + rb_define_alias(cState, "merge", "configure"); + rb_define_method(cState, "to_h", cState_to_h, 0); + rb_define_alias(cState, "to_hash", "to_h"); + rb_define_method(cState, "[]", cState_aref, 1); + rb_define_method(cState, "[]=", cState_aset, 2); + rb_define_method(cState, "generate", cState_generate, 1); + + mGeneratorMethods = rb_define_module_under(mGenerator, "GeneratorMethods"); + mObject = rb_define_module_under(mGeneratorMethods, "Object"); + rb_define_method(mObject, "to_json", mObject_to_json, -1); + mHash = rb_define_module_under(mGeneratorMethods, "Hash"); + rb_define_method(mHash, "to_json", mHash_to_json, -1); + mArray = rb_define_module_under(mGeneratorMethods, "Array"); + rb_define_method(mArray, "to_json", mArray_to_json, -1); +#ifdef RUBY_INTEGER_UNIFICATION + mInteger = rb_define_module_under(mGeneratorMethods, "Integer"); + rb_define_method(mInteger, "to_json", mInteger_to_json, -1); +#else + mFixnum = rb_define_module_under(mGeneratorMethods, "Fixnum"); + rb_define_method(mFixnum, "to_json", mFixnum_to_json, -1); + mBignum = rb_define_module_under(mGeneratorMethods, "Bignum"); + rb_define_method(mBignum, "to_json", mBignum_to_json, -1); +#endif + mFloat = rb_define_module_under(mGeneratorMethods, "Float"); + rb_define_method(mFloat, "to_json", mFloat_to_json, -1); + mString = rb_define_module_under(mGeneratorMethods, "String"); + rb_define_singleton_method(mString, "included", mString_included_s, 1); + rb_define_method(mString, "to_json", mString_to_json, -1); + rb_define_method(mString, "to_json_raw", mString_to_json_raw, -1); + rb_define_method(mString, "to_json_raw_object", mString_to_json_raw_object, 0); + mString_Extend = rb_define_module_under(mString, "Extend"); + rb_define_method(mString_Extend, "json_create", mString_Extend_json_create, 1); + mTrueClass = rb_define_module_under(mGeneratorMethods, "TrueClass"); + rb_define_method(mTrueClass, "to_json", mTrueClass_to_json, -1); + mFalseClass = rb_define_module_under(mGeneratorMethods, "FalseClass"); + rb_define_method(mFalseClass, "to_json", mFalseClass_to_json, -1); + mNilClass = rb_define_module_under(mGeneratorMethods, "NilClass"); + rb_define_method(mNilClass, "to_json", mNilClass_to_json, -1); + + i_to_s = rb_intern("to_s"); + i_to_json = rb_intern("to_json"); + i_new = rb_intern("new"); + i_indent = rb_intern("indent"); + i_space = rb_intern("space"); + i_space_before = rb_intern("space_before"); + i_object_nl = rb_intern("object_nl"); + i_array_nl = rb_intern("array_nl"); + i_max_nesting = rb_intern("max_nesting"); + i_escape_slash = rb_intern("escape_slash"); + i_allow_nan = rb_intern("allow_nan"); + i_ascii_only = rb_intern("ascii_only"); + i_depth = rb_intern("depth"); + i_buffer_initial_length = rb_intern("buffer_initial_length"); + i_pack = rb_intern("pack"); + i_unpack = rb_intern("unpack"); + i_create_id = rb_intern("create_id"); + i_extend = rb_intern("extend"); + i_key_p = rb_intern("key?"); + i_aref = rb_intern("[]"); + i_send = rb_intern("__send__"); + i_respond_to_p = rb_intern("respond_to?"); + i_match = rb_intern("match"); + i_keys = rb_intern("keys"); + i_dup = rb_intern("dup"); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.h b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.h new file mode 100644 index 0000000..3ebd622 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.h @@ -0,0 +1,174 @@ +#ifndef _GENERATOR_H_ +#define _GENERATOR_H_ + +#include +#include + +#include "ruby.h" + +#ifdef HAVE_RUBY_RE_H +#include "ruby/re.h" +#else +#include "re.h" +#endif + +#ifndef rb_intern_str +#define rb_intern_str(string) SYM2ID(rb_str_intern(string)) +#endif + +#ifndef rb_obj_instance_variables +#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0) +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode definitions */ + +#define UNI_STRICT_CONVERSION 1 + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length); +static void unicode_escape(char *buf, UTF16 character); +static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character); +static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string, char escape_slash); +static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string, char escape_slash); +static char *fstrndup(const char *ptr, unsigned long len); + +/* ruby api and some helpers */ + +typedef struct JSON_Generator_StateStruct { + char *indent; + long indent_len; + char *space; + long space_len; + char *space_before; + long space_before_len; + char *object_nl; + long object_nl_len; + char *array_nl; + long array_nl_len; + FBuffer *array_delim; + FBuffer *object_delim; + FBuffer *object_delim2; + long max_nesting; + char allow_nan; + char ascii_only; + char escape_slash; + long depth; + long buffer_initial_length; +} JSON_Generator_State; + +#define GET_STATE_TO(self, state) \ + TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state) + +#define GET_STATE(self) \ + JSON_Generator_State *state; \ + GET_STATE_TO(self, state) + +#define GENERATE_JSON(type) \ + FBuffer *buffer; \ + VALUE Vstate; \ + JSON_Generator_State *state; \ + \ + rb_scan_args(argc, argv, "01", &Vstate); \ + Vstate = cState_from_state_s(cState, Vstate); \ + TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \ + buffer = cState_prepare_buffer(Vstate); \ + generate_json_##type(buffer, Vstate, state, self); \ + return fbuffer_to_s(buffer) + +static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self); +#ifdef RUBY_INTEGER_UNIFICATION +static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self); +#else +static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self); +#endif +static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_included_s(VALUE self, VALUE modul); +static VALUE mString_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mString_to_json_raw_object(VALUE self); +static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self); +static VALUE mString_Extend_json_create(VALUE self, VALUE o); +static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self); +static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self); +static void State_free(void *state); +static VALUE cState_s_allocate(VALUE klass); +static VALUE cState_configure(VALUE self, VALUE opts); +static VALUE cState_to_h(VALUE self); +static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#ifdef RUBY_INTEGER_UNIFICATION +static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +#endif +static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj); +static VALUE cState_partial_generate(VALUE self, VALUE obj); +static VALUE cState_generate(VALUE self, VALUE obj); +static VALUE cState_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cState_from_state_s(VALUE self, VALUE opts); +static VALUE cState_indent(VALUE self); +static VALUE cState_indent_set(VALUE self, VALUE indent); +static VALUE cState_space(VALUE self); +static VALUE cState_space_set(VALUE self, VALUE space); +static VALUE cState_space_before(VALUE self); +static VALUE cState_space_before_set(VALUE self, VALUE space_before); +static VALUE cState_object_nl(VALUE self); +static VALUE cState_object_nl_set(VALUE self, VALUE object_nl); +static VALUE cState_array_nl(VALUE self); +static VALUE cState_array_nl_set(VALUE self, VALUE array_nl); +static VALUE cState_max_nesting(VALUE self); +static VALUE cState_max_nesting_set(VALUE self, VALUE depth); +static VALUE cState_allow_nan_p(VALUE self); +static VALUE cState_ascii_only_p(VALUE self); +static VALUE cState_depth(VALUE self); +static VALUE cState_depth_set(VALUE self, VALUE depth); +static VALUE cState_escape_slash(VALUE self); +static VALUE cState_escape_slash_set(VALUE self, VALUE depth); +static FBuffer *cState_prepare_buffer(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Generator_State_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json) +#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.o b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.o new file mode 100644 index 0000000..edbbc0a Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/generator/generator.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/.sitearchdir.-.json.-.ext.time new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/Makefile b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/Makefile new file mode 100644 index 0000000..eb1245f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/Makefile @@ -0,0 +1,268 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0 +hdrdir = $(topdir) +arch_hdrdir = /Users/arthur/.rvm/rubies/ruby-3.0.0/include/ruby-3.0.0/arm64-darwin21 +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/Users/arthur/.rvm/rubies/ruby-3.0.0 +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20220301-14832-nfemna +sitelibdir = $(DESTDIR)./.gem.20220301-14832-nfemna +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(SDKROOT)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc -fdeclspec +CXX = g++ -fdeclspec +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework Security -framework Foundation $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 +debugflags = -ggdb3 +warnflags = -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wmissing-noreturn -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens +cppflags = +CCDLFLAGS = -fno-common +CFLAGS = $(CCDLFLAGS) -O3 -I/opt/homebrew/opt/libyaml/include -I/opt/homebrew/opt/libksba/include -I/opt/homebrew/opt/readline/include -I/opt/homebrew/opt/zlib/include -I/opt/homebrew/opt/openssl@1.1/include -fno-common -pipe -DSTR_UMINUS_DEDUPE=1 -DSTR_UMINUS_DEDUPE_FROZEN=1 $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -DHAVE_RB_ENC_RAISE -DHAVE_RB_ENC_INTERNED_STR -I/usr/local/opt/libffi/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -fstack-protector-strong -L/usr/local/lib +dldflags = -L/opt/homebrew/opt/libyaml/lib -L/opt/homebrew/opt/libksba/lib -L/opt/homebrew/opt/readline/lib -L/opt/homebrew/opt/zlib/lib -L/opt/homebrew/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup -Wl,-multiply_defined,suppress +ARCH_FLAG = -m64 +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -dynamic -bundle +LDSHAREDXX = $(CXX) -dynamic -bundle +AR = ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby.3.0 +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = arm64-darwin21 +sitearch = $(arch) +ruby_version = 3.0.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = $(RUBY) -run -e rm -- -rf +RMDIRS = rmdir -p +MAKEDIRS = /opt/homebrew/opt/coreutils/bin/gmkdir -p +INSTALL = /opt/homebrew/opt/coreutils/bin/ginstall -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = /json/ext +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) +ORIG_SRCS = parser.c +SRCS = $(ORIG_SRCS) +OBJS = parser.o +HDRS = $(srcdir)/parser.h +LOCAL_HDRS = +TARGET = parser +TARGET_NAME = parser +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).bundle +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(rubyhdrdir)/ruby$(target_prefix) +ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.-.json.-.ext.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object json/ext/$(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + $(Q) $(POSTLINK) + + + +### +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/depend b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/depend new file mode 100644 index 0000000..498ffa9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/depend @@ -0,0 +1 @@ +parser.o: parser.c parser.h $(srcdir)/../fbuffer/fbuffer.h diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb new file mode 100644 index 0000000..feb586e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/extconf.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: false +require 'mkmf' + +have_func("rb_enc_raise", "ruby.h") +have_func("rb_enc_interned_str", "ruby.h") + +# checking if String#-@ (str_uminus) dedupes... ' +begin + a = -(%w(t e s t).join) + b = -(%w(t e s t).join) + if a.equal?(b) + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=1 ' + else + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=0 ' + end +rescue NoMethodError + $CFLAGS << ' -DSTR_UMINUS_DEDUPE=0 ' +end + +# checking if String#-@ (str_uminus) directly interns frozen strings... ' +begin + s = rand.to_s.freeze + if (-s).equal?(s) && (-s.dup).equal?(s) + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=1 ' + else + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=0 ' + end +rescue NoMethodError + $CFLAGS << ' -DSTR_UMINUS_DEDUPE_FROZEN=0 ' +end + +create_makefile 'json/ext/parser' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.bundle b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.bundle new file mode 100755 index 0000000..1137b12 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.c b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.c new file mode 100644 index 0000000..b1dc881 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.c @@ -0,0 +1,3338 @@ +/* This file is automatically generated from parser.rl by using ragel */ +#line 1 "parser.rl" +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, +i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, +i_object_class, i_array_class, i_decimal_class, i_key_p, +i_deep_const_get, i_match, i_match_string, i_aset, i_aref, +i_leftshift, i_new, i_try_convert, i_freeze, i_uminus; + + +#line 125 "parser.rl" + + + +enum {JSON_object_start = 1}; +enum {JSON_object_first_final = 27}; +enum {JSON_object_error = 0}; + +enum {JSON_object_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_object_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 167 "parser.rl" + + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + + { + cs = (int)JSON_object_start; + } + + #line 182 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + case 17: + goto st_case_17; + case 18: + goto st_case_18; + case 27: + goto st_case_27; + case 19: + goto st_case_19; + case 20: + goto st_case_20; + case 21: + goto st_case_21; + case 22: + goto st_case_22; + case 23: + goto st_case_23; + case 24: + goto st_case_24; + case 25: + goto st_case_25; + case 26: + goto st_case_26; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 123 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 13: { + goto st2; + } + case 32: { + goto st2; + } + case 34: { + goto ctr2; + } + case 47: { + goto st23; + } + case 125: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st2; + } + { + goto st0; + } + ctr2: + { + #line 149 "parser.rl" + + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, p, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 3; goto _out;} } else {p = (( np))-1;} + + } + + goto st3; + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 13: { + goto st3; + } + case 32: { + goto st3; + } + case 47: { + goto st4; + } + case 58: { + goto st8; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st3; + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st5; + } + case 47: { + goto st7; + } + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 42 ) { + goto st6; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st6; + } + case 47: { + goto st3; + } + } + { + goto st5; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 10 ) { + goto st3; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 13: { + goto st8; + } + case 32: { + goto st8; + } + case 34: { + goto ctr11; + } + case 45: { + goto ctr11; + } + case 47: { + goto st19; + } + case 73: { + goto ctr11; + } + case 78: { + goto ctr11; + } + case 91: { + goto ctr11; + } + case 102: { + goto ctr11; + } + case 110: { + goto ctr11; + } + case 116: { + goto ctr11; + } + case 123: { + goto ctr11; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr11; + } + } else if ( ( (*( p))) >= 9 ) { + goto st8; + } + { + goto st0; + } + ctr11: + { + #line 133 "parser.rl" + + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + {p = p - 1; } {p+= 1; cs = 9; goto _out;} + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + {p = (( np))-1;} + + } + } + + goto st9; + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + switch( ( (*( p))) ) { + case 13: { + goto st9; + } + case 32: { + goto st9; + } + case 44: { + goto st10; + } + case 47: { + goto st15; + } + case 125: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st9; + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 13: { + goto st10; + } + case 32: { + goto st10; + } + case 34: { + goto ctr2; + } + case 47: { + goto st11; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st10; + } + { + goto st0; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + switch( ( (*( p))) ) { + case 42: { + goto st12; + } + case 47: { + goto st14; + } + } + { + goto st0; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 42 ) { + goto st13; + } + { + goto st12; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + switch( ( (*( p))) ) { + case 42: { + goto st13; + } + case 47: { + goto st10; + } + } + { + goto st12; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 10 ) { + goto st10; + } + { + goto st14; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + switch( ( (*( p))) ) { + case 42: { + goto st16; + } + case 47: { + goto st18; + } + } + { + goto st0; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 42 ) { + goto st17; + } + { + goto st16; + } + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + switch( ( (*( p))) ) { + case 42: { + goto st17; + } + case 47: { + goto st9; + } + } + { + goto st16; + } + st18: + p+= 1; + if ( p == pe ) + goto _test_eof18; + st_case_18: + if ( ( (*( p))) == 10 ) { + goto st9; + } + { + goto st18; + } + ctr4: + { + #line 157 "parser.rl" + {p = p - 1; } {p+= 1; cs = 27; goto _out;} } + + goto st27; + st27: + p+= 1; + if ( p == pe ) + goto _test_eof27; + st_case_27: + { + goto st0; + } + st19: + p+= 1; + if ( p == pe ) + goto _test_eof19; + st_case_19: + switch( ( (*( p))) ) { + case 42: { + goto st20; + } + case 47: { + goto st22; + } + } + { + goto st0; + } + st20: + p+= 1; + if ( p == pe ) + goto _test_eof20; + st_case_20: + if ( ( (*( p))) == 42 ) { + goto st21; + } + { + goto st20; + } + st21: + p+= 1; + if ( p == pe ) + goto _test_eof21; + st_case_21: + switch( ( (*( p))) ) { + case 42: { + goto st21; + } + case 47: { + goto st8; + } + } + { + goto st20; + } + st22: + p+= 1; + if ( p == pe ) + goto _test_eof22; + st_case_22: + if ( ( (*( p))) == 10 ) { + goto st8; + } + { + goto st22; + } + st23: + p+= 1; + if ( p == pe ) + goto _test_eof23; + st_case_23: + switch( ( (*( p))) ) { + case 42: { + goto st24; + } + case 47: { + goto st26; + } + } + { + goto st0; + } + st24: + p+= 1; + if ( p == pe ) + goto _test_eof24; + st_case_24: + if ( ( (*( p))) == 42 ) { + goto st25; + } + { + goto st24; + } + st25: + p+= 1; + if ( p == pe ) + goto _test_eof25; + st_case_25: + switch( ( (*( p))) ) { + case 42: { + goto st25; + } + case 47: { + goto st2; + } + } + { + goto st24; + } + st26: + p+= 1; + if ( p == pe ) + goto _test_eof26; + st_case_26: + if ( ( (*( p))) == 10 ) { + goto st2; + } + { + goto st26; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 183 "parser.rl" + + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + + +enum {JSON_value_start = 1}; +enum {JSON_value_first_final = 29}; +enum {JSON_value_error = 0}; + +enum {JSON_value_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_value_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 283 "parser.rl" + + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + + { + cs = (int)JSON_value_start; + } + + #line 290 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 29: + goto st_case_29; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + case 17: + goto st_case_17; + case 18: + goto st_case_18; + case 19: + goto st_case_19; + case 20: + goto st_case_20; + case 21: + goto st_case_21; + case 22: + goto st_case_22; + case 23: + goto st_case_23; + case 24: + goto st_case_24; + case 25: + goto st_case_25; + case 26: + goto st_case_26; + case 27: + goto st_case_27; + case 28: + goto st_case_28; + } + goto st_out; + st1: + p+= 1; + if ( p == pe ) + goto _test_eof1; + st_case_1: + switch( ( (*( p))) ) { + case 13: { + goto st1; + } + case 32: { + goto st1; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr3; + } + case 47: { + goto st6; + } + case 73: { + goto st10; + } + case 78: { + goto st17; + } + case 91: { + goto ctr7; + } + case 102: { + goto st19; + } + case 110: { + goto st23; + } + case 116: { + goto st26; + } + case 123: { + goto ctr11; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr3; + } + } else if ( ( (*( p))) >= 9 ) { + goto st1; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + ctr2: + { + #line 235 "parser.rl" + + char *np = JSON_parse_string(json, p, pe, result); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr3: + { + #line 240 "parser.rl" + + char *np; + if(pe > p + 8 && !strncmp(MinusInfinity, p, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + {p = (( p + 10))-1;} + + {p = p - 1; } {p+= 1; cs = 29; goto _out;} + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + + np = JSON_parse_integer(json, p, pe, result); + if (np != NULL) {p = (( np))-1;} + + {p = p - 1; } {p+= 1; cs = 29; goto _out;} + } + + goto st29; + ctr7: + { + #line 258 "parser.rl" + + char *np; + np = JSON_parse_array(json, p, pe, result, current_nesting + 1); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr11: + { + #line 264 "parser.rl" + + char *np; + np = JSON_parse_object(json, p, pe, result, current_nesting + 1); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 29; goto _out;} } else {p = (( np))-1;} + + } + + goto st29; + ctr25: + { + #line 228 "parser.rl" + + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + + goto st29; + ctr27: + { + #line 221 "parser.rl" + + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + + goto st29; + ctr31: + { + #line 215 "parser.rl" + + *result = Qfalse; + } + + goto st29; + ctr34: + { + #line 212 "parser.rl" + + *result = Qnil; + } + + goto st29; + ctr37: + { + #line 218 "parser.rl" + + *result = Qtrue; + } + + goto st29; + st29: + p+= 1; + if ( p == pe ) + goto _test_eof29; + st_case_29: + { + #line 270 "parser.rl" + {p = p - 1; } {p+= 1; cs = 29; goto _out;} } + switch( ( (*( p))) ) { + case 13: { + goto st29; + } + case 32: { + goto st29; + } + case 47: { + goto st2; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st29; + } + { + goto st0; + } + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 42: { + goto st3; + } + case 47: { + goto st5; + } + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 42 ) { + goto st4; + } + { + goto st3; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st4; + } + case 47: { + goto st29; + } + } + { + goto st3; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 10 ) { + goto st29; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st9; + } + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 42 ) { + goto st8; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 42: { + goto st8; + } + case 47: { + goto st1; + } + } + { + goto st7; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + if ( ( (*( p))) == 10 ) { + goto st1; + } + { + goto st9; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + if ( ( (*( p))) == 110 ) { + goto st11; + } + { + goto st0; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + if ( ( (*( p))) == 102 ) { + goto st12; + } + { + goto st0; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 105 ) { + goto st13; + } + { + goto st0; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + if ( ( (*( p))) == 110 ) { + goto st14; + } + { + goto st0; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 105 ) { + goto st15; + } + { + goto st0; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + if ( ( (*( p))) == 116 ) { + goto st16; + } + { + goto st0; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 121 ) { + goto ctr25; + } + { + goto st0; + } + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + if ( ( (*( p))) == 97 ) { + goto st18; + } + { + goto st0; + } + st18: + p+= 1; + if ( p == pe ) + goto _test_eof18; + st_case_18: + if ( ( (*( p))) == 78 ) { + goto ctr27; + } + { + goto st0; + } + st19: + p+= 1; + if ( p == pe ) + goto _test_eof19; + st_case_19: + if ( ( (*( p))) == 97 ) { + goto st20; + } + { + goto st0; + } + st20: + p+= 1; + if ( p == pe ) + goto _test_eof20; + st_case_20: + if ( ( (*( p))) == 108 ) { + goto st21; + } + { + goto st0; + } + st21: + p+= 1; + if ( p == pe ) + goto _test_eof21; + st_case_21: + if ( ( (*( p))) == 115 ) { + goto st22; + } + { + goto st0; + } + st22: + p+= 1; + if ( p == pe ) + goto _test_eof22; + st_case_22: + if ( ( (*( p))) == 101 ) { + goto ctr31; + } + { + goto st0; + } + st23: + p+= 1; + if ( p == pe ) + goto _test_eof23; + st_case_23: + if ( ( (*( p))) == 117 ) { + goto st24; + } + { + goto st0; + } + st24: + p+= 1; + if ( p == pe ) + goto _test_eof24; + st_case_24: + if ( ( (*( p))) == 108 ) { + goto st25; + } + { + goto st0; + } + st25: + p+= 1; + if ( p == pe ) + goto _test_eof25; + st_case_25: + if ( ( (*( p))) == 108 ) { + goto ctr34; + } + { + goto st0; + } + st26: + p+= 1; + if ( p == pe ) + goto _test_eof26; + st_case_26: + if ( ( (*( p))) == 114 ) { + goto st27; + } + { + goto st0; + } + st27: + p+= 1; + if ( p == pe ) + goto _test_eof27; + st_case_27: + if ( ( (*( p))) == 117 ) { + goto st28; + } + { + goto st0; + } + st28: + p+= 1; + if ( p == pe ) + goto _test_eof28; + st_case_28: + if ( ( (*( p))) == 101 ) { + goto ctr37; + } + { + goto st0; + } + st_out: + _test_eof1: cs = 1; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 291 "parser.rl" + + + if (json->freeze) { + OBJ_FREEZE(*result); + } + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + + +enum {JSON_integer_start = 1}; +enum {JSON_integer_first_final = 3}; +enum {JSON_integer_error = 0}; + +enum {JSON_integer_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_integer_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 311 "parser.rl" + + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + + { + cs = (int)JSON_integer_start; + } + + #line 318 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + } + goto st_out; + st_case_1: + switch( ( (*( p))) ) { + case 45: { + goto st2; + } + case 48: { + goto st3; + } + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + if ( ( (*( p))) == 48 ) { + goto st3; + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st0; + } + { + goto ctr4; + } + ctr4: + { + #line 308 "parser.rl" + {p = p - 1; } {p+= 1; cs = 4; goto _out;} } + + goto st4; + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + { + goto ctr4; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 320 "parser.rl" + + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + + +enum {JSON_float_start = 1}; +enum {JSON_float_first_final = 8}; +enum {JSON_float_error = 0}; + +enum {JSON_float_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_float_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 345 "parser.rl" + + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + + { + cs = (int)JSON_float_start; + } + + #line 352 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 10: + goto st_case_10; + case 7: + goto st_case_7; + } + goto st_out; + st_case_1: + switch( ( (*( p))) ) { + case 45: { + goto st2; + } + case 48: { + goto st3; + } + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + if ( ( (*( p))) == 48 ) { + goto st3; + } + if ( 49 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 46: { + goto st4; + } + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st8; + } + { + goto st0; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + if ( ( (*( p))) > 46 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st8; + } + } else if ( ( (*( p))) >= 45 ) { + goto st0; + } + { + goto ctr9; + } + ctr9: + { + #line 339 "parser.rl" + {p = p - 1; } {p+= 1; cs = 9; goto _out;} } + + goto st9; + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + switch( ( (*( p))) ) { + case 43: { + goto st6; + } + case 45: { + goto st6; + } + } + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 69: { + goto st0; + } + case 101: { + goto st0; + } + } + if ( ( (*( p))) > 46 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st10; + } + } else if ( ( (*( p))) >= 45 ) { + goto st0; + } + { + goto ctr9; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + switch( ( (*( p))) ) { + case 46: { + goto st4; + } + case 69: { + goto st5; + } + case 101: { + goto st5; + } + } + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + { + goto st0; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 354 "parser.rl" + + + if (cs >= JSON_float_first_final) { + VALUE mod = Qnil; + ID method_id = 0; + if (rb_respond_to(json->decimal_class, i_try_convert)) { + mod = json->decimal_class; + method_id = i_try_convert; + } else if (rb_respond_to(json->decimal_class, i_new)) { + mod = json->decimal_class; + method_id = i_new; + } else if (RB_TYPE_P(json->decimal_class, T_CLASS)) { + VALUE name = rb_class_name(json->decimal_class); + const char *name_cstr = RSTRING_PTR(name); + const char *last_colon = strrchr(name_cstr, ':'); + if (last_colon) { + const char *mod_path_end = last_colon - 1; + VALUE mod_path = rb_str_substr(name, 0, mod_path_end - name_cstr); + mod = rb_path_to_class(mod_path); + + const char *method_name_beg = last_colon + 1; + long before_len = method_name_beg - name_cstr; + long len = RSTRING_LEN(name) - before_len; + VALUE method_name = rb_str_substr(name, before_len, len); + method_id = SYM2ID(rb_str_intern(method_name)); + } else { + mod = rb_mKernel; + method_id = SYM2ID(rb_str_intern(name)); + } + } + + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + + if (method_id) { + VALUE text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + *result = rb_funcallv(mod, method_id, 1, &text); + } else { + *result = DBL2NUM(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } + + return p + 1; + } else { + return NULL; + } +} + + + +enum {JSON_array_start = 1}; +enum {JSON_array_first_final = 17}; +enum {JSON_array_error = 0}; + +enum {JSON_array_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_array_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 432 "parser.rl" + + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + + { + cs = (int)JSON_array_start; + } + + #line 445 "parser.rl" + + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + case 10: + goto st_case_10; + case 11: + goto st_case_11; + case 12: + goto st_case_12; + case 17: + goto st_case_17; + case 13: + goto st_case_13; + case 14: + goto st_case_14; + case 15: + goto st_case_15; + case 16: + goto st_case_16; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 91 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 13: { + goto st2; + } + case 32: { + goto st2; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st13; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 93: { + goto ctr4; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st2; + } + { + goto st0; + } + ctr2: + { + #line 409 "parser.rl" + + VALUE v = Qnil; + char *np = JSON_parse_value(json, p, pe, &v, current_nesting); + if (np == NULL) { + {p = p - 1; } {p+= 1; cs = 3; goto _out;} + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + {p = (( np))-1;} + + } + } + + goto st3; + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + switch( ( (*( p))) ) { + case 13: { + goto st3; + } + case 32: { + goto st3; + } + case 44: { + goto st4; + } + case 47: { + goto st9; + } + case 93: { + goto ctr4; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st3; + } + { + goto st0; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 13: { + goto st4; + } + case 32: { + goto st4; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st5; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st4; + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + switch( ( (*( p))) ) { + case 42: { + goto st6; + } + case 47: { + goto st8; + } + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( ( (*( p))) == 42 ) { + goto st7; + } + { + goto st6; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st4; + } + } + { + goto st6; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + if ( ( (*( p))) == 10 ) { + goto st4; + } + { + goto st8; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + switch( ( (*( p))) ) { + case 42: { + goto st10; + } + case 47: { + goto st12; + } + } + { + goto st0; + } + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + if ( ( (*( p))) == 42 ) { + goto st11; + } + { + goto st10; + } + st11: + p+= 1; + if ( p == pe ) + goto _test_eof11; + st_case_11: + switch( ( (*( p))) ) { + case 42: { + goto st11; + } + case 47: { + goto st3; + } + } + { + goto st10; + } + st12: + p+= 1; + if ( p == pe ) + goto _test_eof12; + st_case_12: + if ( ( (*( p))) == 10 ) { + goto st3; + } + { + goto st12; + } + ctr4: + { + #line 424 "parser.rl" + {p = p - 1; } {p+= 1; cs = 17; goto _out;} } + + goto st17; + st17: + p+= 1; + if ( p == pe ) + goto _test_eof17; + st_case_17: + { + goto st0; + } + st13: + p+= 1; + if ( p == pe ) + goto _test_eof13; + st_case_13: + switch( ( (*( p))) ) { + case 42: { + goto st14; + } + case 47: { + goto st16; + } + } + { + goto st0; + } + st14: + p+= 1; + if ( p == pe ) + goto _test_eof14; + st_case_14: + if ( ( (*( p))) == 42 ) { + goto st15; + } + { + goto st14; + } + st15: + p+= 1; + if ( p == pe ) + goto _test_eof15; + st_case_15: + switch( ( (*( p))) ) { + case 42: { + goto st15; + } + case 47: { + goto st2; + } + } + { + goto st14; + } + st16: + p+= 1; + if ( p == pe ) + goto _test_eof16; + st_case_16: + if ( ( (*( p))) == 10 ) { + goto st2; + } + { + goto st16; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 446 "parser.rl" + + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static const size_t MAX_STACK_BUFFER_SIZE = 128; +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize) +{ + VALUE result = Qnil; + size_t bufferSize = stringEnd - string; + char *p = string, *pe = string, *unescape, *bufferStart, *buffer; + int unescape_len; + char buf[4]; + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + bufferStart = buffer = ALLOC_N(char, bufferSize); + } else { + bufferStart = buffer = ALLOCA_N(char, bufferSize); + } + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + MEMCPY(buffer, unescape, char, unescape_len); + buffer += unescape_len; + p = ++pe; + } else { + pe++; + } + } + + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + + # ifdef HAVE_RB_ENC_INTERNED_STR + if (intern) { + result = rb_enc_interned_str(bufferStart, (long)(buffer - bufferStart), rb_utf8_encoding()); + } else { + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + } + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + # else + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + + if (intern) { + # if STR_UMINUS_DEDUPE_FROZEN + // Starting from MRI 2.8 it is preferable to freeze the string + // before deduplication so that it can be interned directly + // otherwise it would be duplicated first which is wasteful. + result = rb_funcall(rb_str_freeze(result), i_uminus, 0); + # elif STR_UMINUS_DEDUPE + // MRI 2.5 and older do not deduplicate strings that are already + // frozen. + result = rb_funcall(result, i_uminus, 0); + # else + result = rb_str_freeze(result); + # endif + } + # endif + + if (symbolize) { + result = rb_str_intern(result); + } + + return result; +} + + +enum {JSON_string_start = 1}; +enum {JSON_string_first_final = 8}; +enum {JSON_string_error = 0}; + +enum {JSON_string_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_string_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 612 "parser.rl" + + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + + { + cs = (int)JSON_string_start; + } + + #line 632 "parser.rl" + + json->memo = p; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 2: + goto st_case_2; + case 8: + goto st_case_8; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + } + goto st_out; + st_case_1: + if ( ( (*( p))) == 34 ) { + goto st2; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 34: { + goto ctr2; + } + case 92: { + goto st3; + } + } + if ( 0 <= (signed char)(*(p)) && (*(p)) <= 31 ) { + goto st0; + } + { + goto st2; + } + ctr2: + { + #line 599 "parser.rl" + + *result = json_string_unescape(json->memo + 1, p, json->parsing_name || json-> freeze, json->parsing_name && json->symbolize_names); + if (NIL_P(*result)) { + {p = p - 1; } + {p+= 1; cs = 8; goto _out;} + } else { + {p = (( p + 1))-1;} + + } + } + { + #line 609 "parser.rl" + {p = p - 1; } {p+= 1; cs = 8; goto _out;} } + + goto st8; + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 117 ) { + goto st4; + } + if ( 0 <= (signed char)(*(p)) && (*(p)) <= 31 ) { + goto st0; + } + { + goto st2; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st5; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st5; + } + } else { + goto st5; + } + { + goto st0; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st6; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st6; + } + } else { + goto st6; + } + { + goto st0; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st7; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st7; + } + } else { + goto st7; + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) < 65 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto st2; + } + } else if ( ( (*( p))) > 70 ) { + if ( 97 <= ( (*( p))) && ( (*( p))) <= 102 ) { + goto st2; + } + } else { + goto st2; + } + { + goto st0; + } + st_out: + _test_eof2: cs = 2; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 634 "parser.rl" + + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* +* Document-class: JSON::Ext::Parser +* +* This is the JSON parser implemented as a C extension. It can be configured +* to be used by setting +* +* JSON.parser = JSON::Ext::Parser +* +* with the method parser= in JSON. +* +*/ + +static VALUE convert_encoding(VALUE source) +{ + #ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } + #endif + return source; +} + +/* +* call-seq: new(source, opts => {}) +* +* Creates a new JSON::Ext::Parser instance for the string _source_. +* +* Creates a new JSON::Ext::Parser instance for the string _source_. +* +* It will be configured by the _opts_ hash. _opts_ can have the following +* keys: +* +* _opts_ can have the following keys: +* * *max_nesting*: The maximum depth of nesting allowed in the parsed data +* structures. Disable depth checking with :max_nesting => false|nil|0, it +* defaults to 100. +* * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in +* defiance of RFC 4627 to be parsed by the Parser. This option defaults to +* false. +* * *symbolize_names*: If set to true, returns symbols for the names +* (keys) in a JSON object. Otherwise strings are returned, which is +* also the default. It's not possible to use this option in +* conjunction with the *create_additions* option. +* * *create_additions*: If set to false, the Parser doesn't create +* additions even if a matching class and create_id was found. This option +* defaults to false. +* * *object_class*: Defaults to Hash +* * *array_class*: Defaults to Array +*/ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } + #ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); + #else + rb_scan_args(argc, argv, "11", &source, &opts); + #endif + if (!NIL_P(opts)) { + #ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { + #endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_freeze); + if (option_given_p(opts, tmp)) { + json->freeze = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->freeze = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } + #ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } + #endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + + +enum {JSON_start = 1}; +enum {JSON_first_final = 10}; +enum {JSON_error = 0}; + +enum {JSON_en_main = 1}; + +static const char MAYBE_UNUSED(_JSON_nfa_targs)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_offsets)[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_push_actions)[] = { + 0, 0 +}; + +static const char MAYBE_UNUSED(_JSON_nfa_pop_trans)[] = { + 0, 0 +}; + + +#line 835 "parser.rl" + + +/* +* call-seq: parse() +* +* Parses the current JSON text _source_ and returns the complete data +* structure as a result. +*/ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + + { + cs = (int)JSON_start; + } + + #line 851 "parser.rl" + + p = json->source; + pe = p + json->len; + + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { + case 1: + goto st_case_1; + case 0: + goto st_case_0; + case 10: + goto st_case_10; + case 2: + goto st_case_2; + case 3: + goto st_case_3; + case 4: + goto st_case_4; + case 5: + goto st_case_5; + case 6: + goto st_case_6; + case 7: + goto st_case_7; + case 8: + goto st_case_8; + case 9: + goto st_case_9; + } + goto st_out; + st1: + p+= 1; + if ( p == pe ) + goto _test_eof1; + st_case_1: + switch( ( (*( p))) ) { + case 13: { + goto st1; + } + case 32: { + goto st1; + } + case 34: { + goto ctr2; + } + case 45: { + goto ctr2; + } + case 47: { + goto st6; + } + case 73: { + goto ctr2; + } + case 78: { + goto ctr2; + } + case 91: { + goto ctr2; + } + case 102: { + goto ctr2; + } + case 110: { + goto ctr2; + } + case 116: { + goto ctr2; + } + case 123: { + goto ctr2; + } + } + if ( ( (*( p))) > 10 ) { + if ( 48 <= ( (*( p))) && ( (*( p))) <= 57 ) { + goto ctr2; + } + } else if ( ( (*( p))) >= 9 ) { + goto st1; + } + { + goto st0; + } + st_case_0: + st0: + cs = 0; + goto _out; + ctr2: + { + #line 827 "parser.rl" + + char *np = JSON_parse_value(json, p, pe, &result, 0); + if (np == NULL) { {p = p - 1; } {p+= 1; cs = 10; goto _out;} } else {p = (( np))-1;} + + } + + goto st10; + st10: + p+= 1; + if ( p == pe ) + goto _test_eof10; + st_case_10: + switch( ( (*( p))) ) { + case 13: { + goto st10; + } + case 32: { + goto st10; + } + case 47: { + goto st2; + } + } + if ( 9 <= ( (*( p))) && ( (*( p))) <= 10 ) { + goto st10; + } + { + goto st0; + } + st2: + p+= 1; + if ( p == pe ) + goto _test_eof2; + st_case_2: + switch( ( (*( p))) ) { + case 42: { + goto st3; + } + case 47: { + goto st5; + } + } + { + goto st0; + } + st3: + p+= 1; + if ( p == pe ) + goto _test_eof3; + st_case_3: + if ( ( (*( p))) == 42 ) { + goto st4; + } + { + goto st3; + } + st4: + p+= 1; + if ( p == pe ) + goto _test_eof4; + st_case_4: + switch( ( (*( p))) ) { + case 42: { + goto st4; + } + case 47: { + goto st10; + } + } + { + goto st3; + } + st5: + p+= 1; + if ( p == pe ) + goto _test_eof5; + st_case_5: + if ( ( (*( p))) == 10 ) { + goto st10; + } + { + goto st5; + } + st6: + p+= 1; + if ( p == pe ) + goto _test_eof6; + st_case_6: + switch( ( (*( p))) ) { + case 42: { + goto st7; + } + case 47: { + goto st9; + } + } + { + goto st0; + } + st7: + p+= 1; + if ( p == pe ) + goto _test_eof7; + st_case_7: + if ( ( (*( p))) == 42 ) { + goto st8; + } + { + goto st7; + } + st8: + p+= 1; + if ( p == pe ) + goto _test_eof8; + st_case_8: + switch( ( (*( p))) ) { + case 42: { + goto st8; + } + case 47: { + goto st1; + } + } + { + goto st7; + } + st9: + p+= 1; + if ( p == pe ) + goto _test_eof9; + st_case_9: + if ( ( (*( p))) == 10 ) { + goto st1; + } + { + goto st9; + } + st_out: + _test_eof1: cs = 1; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + + _test_eof: {} + _out: {} + } + + #line 854 "parser.rl" + + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, + #ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, + #endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* +* call-seq: source() +* +* Returns a copy of the current _source_ string, that was used to construct +* this Parser. +*/ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ + #ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); + #endif + + #undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_try_convert = rb_intern("try_convert"); + i_freeze = rb_intern("freeze"); + i_uminus = rb_intern("-@"); +} + +/* +* Local variables: +* mode: c +* c-file-style: ruby +* indent-tabs-mode: nil +* End: +*/ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.h b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.h new file mode 100644 index 0000000..92ed3fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.h @@ -0,0 +1,96 @@ +#ifndef _PARSER_H_ +#define _PARSER_H_ + +#include "ruby.h" + +#ifndef HAVE_RUBY_RE_H +#include "re.h" +#endif + +#ifdef HAVE_RUBY_ST_H +#include "ruby/st.h" +#else +#include "st.h" +#endif + +#ifndef MAYBE_UNUSED +# define MAYBE_UNUSED(x) x +#endif + +#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key)) + +/* unicode */ + +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ + +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF + +typedef struct JSON_ParserStruct { + VALUE Vsource; + char *source; + long len; + char *memo; + VALUE create_id; + int max_nesting; + int allow_nan; + int parsing_name; + int symbolize_names; + int freeze; + VALUE object_class; + VALUE array_class; + VALUE decimal_class; + int create_additions; + VALUE match_string; + FBuffer *fbuffer; +} JSON_Parser; + +#define GET_PARSER \ + GET_PARSER_INIT; \ + if (!json->Vsource) rb_raise(rb_eTypeError, "uninitialized instance") +#define GET_PARSER_INIT \ + JSON_Parser *json; \ + TypedData_Get_Struct(self, JSON_Parser, &JSON_Parser_type, json) + +#define MinusInfinity "-Infinity" +#define EVIL 0x666 + +static UTF32 unescape_unicode(const unsigned char *p); +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch); +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result); +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting); +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize); +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result); +static VALUE convert_encoding(VALUE source); +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self); +static VALUE cParser_parse(VALUE self); +static void JSON_mark(void *json); +static void JSON_free(void *json); +static VALUE cJSON_parser_s_allocate(VALUE klass); +static VALUE cParser_source(VALUE self); +#ifndef ZALLOC +#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type))) +static inline void *ruby_zalloc(size_t n) +{ + void *p = ruby_xmalloc(n); + memset(p, 0, n); + return p; +} +#endif +#ifdef TypedData_Make_Struct +static const rb_data_type_t JSON_Parser_type; +#define NEW_TYPEDDATA_WRAPPER 1 +#else +#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, JSON_free, json) +#define TypedData_Get_Struct(self, JSON_Parser, ignore, json) Data_Get_Struct(self, JSON_Parser, json) +#endif + +#endif diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.o b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.o new file mode 100644 index 0000000..d9feb63 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.o differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl new file mode 100644 index 0000000..f7be1a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/ext/parser/parser.rl @@ -0,0 +1,977 @@ +#include "../fbuffer/fbuffer.h" +#include "parser.h" + +#if defined HAVE_RUBY_ENCODING_H +# define EXC_ENCODING rb_utf8_encoding(), +# ifndef HAVE_RB_ENC_RAISE +static void +enc_raise(rb_encoding *enc, VALUE exc, const char *fmt, ...) +{ + va_list args; + VALUE mesg; + + va_start(args, fmt); + mesg = rb_enc_vsprintf(enc, fmt, args); + va_end(args); + + rb_exc_raise(rb_exc_new3(exc, mesg)); +} +# define rb_enc_raise enc_raise +# endif +#else +# define EXC_ENCODING /* nothing */ +# define rb_enc_raise rb_raise +#endif + +/* unicode */ + +static const signed char digit_values[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, + -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 +}; + +static UTF32 unescape_unicode(const unsigned char *p) +{ + signed char b; + UTF32 result = 0; + b = digit_values[p[0]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[1]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[2]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + b = digit_values[p[3]]; + if (b < 0) return UNI_REPLACEMENT_CHAR; + result = (result << 4) | (unsigned char)b; + return result; +} + +static int convert_UTF32_to_UTF8(char *buf, UTF32 ch) +{ + int len = 1; + if (ch <= 0x7F) { + buf[0] = (char) ch; + } else if (ch <= 0x07FF) { + buf[0] = (char) ((ch >> 6) | 0xC0); + buf[1] = (char) ((ch & 0x3F) | 0x80); + len++; + } else if (ch <= 0xFFFF) { + buf[0] = (char) ((ch >> 12) | 0xE0); + buf[1] = (char) (((ch >> 6) & 0x3F) | 0x80); + buf[2] = (char) ((ch & 0x3F) | 0x80); + len += 2; + } else if (ch <= 0x1fffff) { + buf[0] =(char) ((ch >> 18) | 0xF0); + buf[1] =(char) (((ch >> 12) & 0x3F) | 0x80); + buf[2] =(char) (((ch >> 6) & 0x3F) | 0x80); + buf[3] =(char) ((ch & 0x3F) | 0x80); + len += 3; + } else { + buf[0] = '?'; + } + return len; +} + +static VALUE mJSON, mExt, cParser, eParserError, eNestingError; +static VALUE CNaN, CInfinity, CMinusInfinity; + +static ID i_json_creatable_p, i_json_create, i_create_id, i_create_additions, + i_chr, i_max_nesting, i_allow_nan, i_symbolize_names, + i_object_class, i_array_class, i_decimal_class, i_key_p, + i_deep_const_get, i_match, i_match_string, i_aset, i_aref, + i_leftshift, i_new, i_try_convert, i_freeze, i_uminus; + +%%{ + machine JSON_common; + + cr = '\n'; + cr_neg = [^\n]; + ws = [ \t\r\n]; + c_comment = '/*' ( any* - (any* '*/' any* ) ) '*/'; + cpp_comment = '//' cr_neg* cr; + comment = c_comment | cpp_comment; + ignore = ws | comment; + name_separator = ':'; + value_separator = ','; + Vnull = 'null'; + Vfalse = 'false'; + Vtrue = 'true'; + VNaN = 'NaN'; + VInfinity = 'Infinity'; + VMinusInfinity = '-Infinity'; + begin_value = [nft\"\-\[\{NI] | digit; + begin_object = '{'; + end_object = '}'; + begin_array = '['; + end_array = ']'; + begin_string = '"'; + begin_name = begin_string; + begin_number = digit | '-'; +}%% + +%%{ + machine JSON_object; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->object_class)) { + OBJ_FREEZE(last_name); + rb_hash_aset(*result, last_name, v); + } else { + rb_funcall(*result, i_aset, 2, last_name, v); + } + fexec np; + } + } + + action parse_name { + char *np; + json->parsing_name = 1; + np = JSON_parse_string(json, fpc, pe, &last_name); + json->parsing_name = 0; + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + + pair = ignore* begin_name >parse_name ignore* name_separator ignore* begin_value >parse_value; + next_pair = ignore* value_separator pair; + + main := ( + begin_object + (pair (next_pair)*)? ignore* + end_object + ) @exit; +}%% + +static char *JSON_parse_object(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE last_name = Qnil; + VALUE object_class = json->object_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + + *result = NIL_P(object_class) ? rb_hash_new() : rb_class_new_instance(0, 0, object_class); + + %% write init; + %% write exec; + + if (cs >= JSON_object_first_final) { + if (json->create_additions) { + VALUE klassname; + if (NIL_P(json->object_class)) { + klassname = rb_hash_aref(*result, json->create_id); + } else { + klassname = rb_funcall(*result, i_aref, 1, json->create_id); + } + if (!NIL_P(klassname)) { + VALUE klass = rb_funcall(mJSON, i_deep_const_get, 1, klassname); + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0))) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + } + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_value; + include JSON_common; + + write data; + + action parse_null { + *result = Qnil; + } + action parse_false { + *result = Qfalse; + } + action parse_true { + *result = Qtrue; + } + action parse_nan { + if (json->allow_nan) { + *result = CNaN; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 2); + } + } + action parse_infinity { + if (json->allow_nan) { + *result = CInfinity; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p - 8); + } + } + action parse_string { + char *np = JSON_parse_string(json, fpc, pe, result); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_number { + char *np; + if(pe > fpc + 8 && !strncmp(MinusInfinity, fpc, 9)) { + if (json->allow_nan) { + *result = CMinusInfinity; + fexec p + 10; + fhold; fbreak; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + } + } + np = JSON_parse_float(json, fpc, pe, result); + if (np != NULL) fexec np; + np = JSON_parse_integer(json, fpc, pe, result); + if (np != NULL) fexec np; + fhold; fbreak; + } + + action parse_array { + char *np; + np = JSON_parse_array(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action parse_object { + char *np; + np = JSON_parse_object(json, fpc, pe, result, current_nesting + 1); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + action exit { fhold; fbreak; } + +main := ignore* ( + Vnull @parse_null | + Vfalse @parse_false | + Vtrue @parse_true | + VNaN @parse_nan | + VInfinity @parse_infinity | + begin_number >parse_number | + begin_string >parse_string | + begin_array >parse_array | + begin_object >parse_object + ) ignore* %*exit; +}%% + +static char *JSON_parse_value(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + + %% write init; + %% write exec; + + if (json->freeze) { + OBJ_FREEZE(*result); + } + + if (cs >= JSON_value_first_final) { + return p; + } else { + return NULL; + } +} + +%%{ + machine JSON_integer; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ('0' | [1-9][0-9]*) (^[0-9]? @exit); +}%% + +static char *JSON_parse_integer(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_integer_first_final) { + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + *result = rb_cstr2inum(FBUFFER_PTR(json->fbuffer), 10); + return p + 1; + } else { + return NULL; + } +} + +%%{ + machine JSON_float; + include JSON_common; + + write data; + + action exit { fhold; fbreak; } + + main := '-'? ( + (('0' | [1-9][0-9]*) '.' [0-9]+ ([Ee] [+\-]?[0-9]+)?) + | (('0' | [1-9][0-9]*) ([Ee] [+\-]?[0-9]+)) + ) (^[0-9Ee.\-]? @exit ); +}%% + +static char *JSON_parse_float(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + + %% write init; + json->memo = p; + %% write exec; + + if (cs >= JSON_float_first_final) { + VALUE mod = Qnil; + ID method_id = 0; + if (rb_respond_to(json->decimal_class, i_try_convert)) { + mod = json->decimal_class; + method_id = i_try_convert; + } else if (rb_respond_to(json->decimal_class, i_new)) { + mod = json->decimal_class; + method_id = i_new; + } else if (RB_TYPE_P(json->decimal_class, T_CLASS)) { + VALUE name = rb_class_name(json->decimal_class); + const char *name_cstr = RSTRING_PTR(name); + const char *last_colon = strrchr(name_cstr, ':'); + if (last_colon) { + const char *mod_path_end = last_colon - 1; + VALUE mod_path = rb_str_substr(name, 0, mod_path_end - name_cstr); + mod = rb_path_to_class(mod_path); + + const char *method_name_beg = last_colon + 1; + long before_len = method_name_beg - name_cstr; + long len = RSTRING_LEN(name) - before_len; + VALUE method_name = rb_str_substr(name, before_len, len); + method_id = SYM2ID(rb_str_intern(method_name)); + } else { + mod = rb_mKernel; + method_id = SYM2ID(rb_str_intern(name)); + } + } + + long len = p - json->memo; + fbuffer_clear(json->fbuffer); + fbuffer_append(json->fbuffer, json->memo, len); + fbuffer_append_char(json->fbuffer, '\0'); + + if (method_id) { + VALUE text = rb_str_new2(FBUFFER_PTR(json->fbuffer)); + *result = rb_funcallv(mod, method_id, 1, &text); + } else { + *result = DBL2NUM(rb_cstr_to_dbl(FBUFFER_PTR(json->fbuffer), 1)); + } + + return p + 1; + } else { + return NULL; + } +} + + +%%{ + machine JSON_array; + include JSON_common; + + write data; + + action parse_value { + VALUE v = Qnil; + char *np = JSON_parse_value(json, fpc, pe, &v, current_nesting); + if (np == NULL) { + fhold; fbreak; + } else { + if (NIL_P(json->array_class)) { + rb_ary_push(*result, v); + } else { + rb_funcall(*result, i_leftshift, 1, v); + } + fexec np; + } + } + + action exit { fhold; fbreak; } + + next_element = value_separator ignore* begin_value >parse_value; + + main := begin_array ignore* + ((begin_value >parse_value ignore*) + (ignore* next_element ignore*)*)? + end_array @exit; +}%% + +static char *JSON_parse_array(JSON_Parser *json, char *p, char *pe, VALUE *result, int current_nesting) +{ + int cs = EVIL; + VALUE array_class = json->array_class; + + if (json->max_nesting && current_nesting > json->max_nesting) { + rb_raise(eNestingError, "nesting of %d is too deep", current_nesting); + } + *result = NIL_P(array_class) ? rb_ary_new() : rb_class_new_instance(0, 0, array_class); + + %% write init; + %% write exec; + + if(cs >= JSON_array_first_final) { + return p + 1; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return NULL; + } +} + +static const size_t MAX_STACK_BUFFER_SIZE = 128; +static VALUE json_string_unescape(char *string, char *stringEnd, int intern, int symbolize) +{ + VALUE result = Qnil; + size_t bufferSize = stringEnd - string; + char *p = string, *pe = string, *unescape, *bufferStart, *buffer; + int unescape_len; + char buf[4]; + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + bufferStart = buffer = ALLOC_N(char, bufferSize); + } else { + bufferStart = buffer = ALLOCA_N(char, bufferSize); + } + + while (pe < stringEnd) { + if (*pe == '\\') { + unescape = (char *) "?"; + unescape_len = 1; + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + switch (*++pe) { + case 'n': + unescape = (char *) "\n"; + break; + case 'r': + unescape = (char *) "\r"; + break; + case 't': + unescape = (char *) "\t"; + break; + case '"': + unescape = (char *) "\""; + break; + case '\\': + unescape = (char *) "\\"; + break; + case 'b': + unescape = (char *) "\b"; + break; + case 'f': + unescape = (char *) "\f"; + break; + case 'u': + if (pe > stringEnd - 4) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete unicode character escape sequence at '%s'", __LINE__, p + ); + } else { + UTF32 ch = unescape_unicode((unsigned char *) ++pe); + pe += 3; + if (UNI_SUR_HIGH_START == (ch & 0xFC00)) { + pe++; + if (pe > stringEnd - 6) { + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + rb_enc_raise( + EXC_ENCODING eParserError, + "%u: incomplete surrogate pair at '%s'", __LINE__, p + ); + } + if (pe[0] == '\\' && pe[1] == 'u') { + UTF32 sur = unescape_unicode((unsigned char *) pe + 2); + ch = (((ch & 0x3F) << 10) | ((((ch >> 6) & 0xF) + 1) << 16) + | (sur & 0x3FF)); + pe += 5; + } else { + unescape = (char *) "?"; + break; + } + } + unescape_len = convert_UTF32_to_UTF8(buf, ch); + unescape = buf; + } + break; + default: + p = pe; + continue; + } + MEMCPY(buffer, unescape, char, unescape_len); + buffer += unescape_len; + p = ++pe; + } else { + pe++; + } + } + + if (pe > p) { + MEMCPY(buffer, p, char, pe - p); + buffer += pe - p; + } + +# ifdef HAVE_RB_ENC_INTERNED_STR + if (intern) { + result = rb_enc_interned_str(bufferStart, (long)(buffer - bufferStart), rb_utf8_encoding()); + } else { + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + } + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } +# else + result = rb_utf8_str_new(bufferStart, (long)(buffer - bufferStart)); + + if (bufferSize > MAX_STACK_BUFFER_SIZE) { + free(bufferStart); + } + + if (intern) { + # if STR_UMINUS_DEDUPE_FROZEN + // Starting from MRI 2.8 it is preferable to freeze the string + // before deduplication so that it can be interned directly + // otherwise it would be duplicated first which is wasteful. + result = rb_funcall(rb_str_freeze(result), i_uminus, 0); + # elif STR_UMINUS_DEDUPE + // MRI 2.5 and older do not deduplicate strings that are already + // frozen. + result = rb_funcall(result, i_uminus, 0); + # else + result = rb_str_freeze(result); + # endif + } +# endif + + if (symbolize) { + result = rb_str_intern(result); + } + + return result; +} + +%%{ + machine JSON_string; + include JSON_common; + + write data; + + action parse_string { + *result = json_string_unescape(json->memo + 1, p, json->parsing_name || json-> freeze, json->parsing_name && json->symbolize_names); + if (NIL_P(*result)) { + fhold; + fbreak; + } else { + fexec p + 1; + } + } + + action exit { fhold; fbreak; } + + main := '"' ((^([\"\\] | 0..0x1f) | '\\'[\"\\/bfnrt] | '\\u'[0-9a-fA-F]{4} | '\\'^([\"\\/bfnrtu]|0..0x1f))* %parse_string) '"' @exit; +}%% + +static int +match_i(VALUE regexp, VALUE klass, VALUE memo) +{ + if (regexp == Qundef) return ST_STOP; + if (RTEST(rb_funcall(klass, i_json_creatable_p, 0)) && + RTEST(rb_funcall(regexp, i_match, 1, rb_ary_entry(memo, 0)))) { + rb_ary_push(memo, klass); + return ST_STOP; + } + return ST_CONTINUE; +} + +static char *JSON_parse_string(JSON_Parser *json, char *p, char *pe, VALUE *result) +{ + int cs = EVIL; + VALUE match_string; + + %% write init; + json->memo = p; + %% write exec; + + if (json->create_additions && RTEST(match_string = json->match_string)) { + VALUE klass; + VALUE memo = rb_ary_new2(2); + rb_ary_push(memo, *result); + rb_hash_foreach(match_string, match_i, memo); + klass = rb_ary_entry(memo, 1); + if (RTEST(klass)) { + *result = rb_funcall(klass, i_json_create, 1, *result); + } + } + + if (cs >= JSON_string_first_final) { + return p + 1; + } else { + return NULL; + } +} + +/* + * Document-class: JSON::Ext::Parser + * + * This is the JSON parser implemented as a C extension. It can be configured + * to be used by setting + * + * JSON.parser = JSON::Ext::Parser + * + * with the method parser= in JSON. + * + */ + +static VALUE convert_encoding(VALUE source) +{ +#ifdef HAVE_RUBY_ENCODING_H + rb_encoding *enc = rb_enc_get(source); + if (enc == rb_ascii8bit_encoding()) { + if (OBJ_FROZEN(source)) { + source = rb_str_dup(source); + } + FORCE_UTF8(source); + } else { + source = rb_str_conv_enc(source, rb_enc_get(source), rb_utf8_encoding()); + } +#endif + return source; +} + +/* + * call-seq: new(source, opts => {}) + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * Creates a new JSON::Ext::Parser instance for the string _source_. + * + * It will be configured by the _opts_ hash. _opts_ can have the following + * keys: + * + * _opts_ can have the following keys: + * * *max_nesting*: The maximum depth of nesting allowed in the parsed data + * structures. Disable depth checking with :max_nesting => false|nil|0, it + * defaults to 100. + * * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + * defiance of RFC 4627 to be parsed by the Parser. This option defaults to + * false. + * * *symbolize_names*: If set to true, returns symbols for the names + * (keys) in a JSON object. Otherwise strings are returned, which is + * also the default. It's not possible to use this option in + * conjunction with the *create_additions* option. + * * *create_additions*: If set to false, the Parser doesn't create + * additions even if a matching class and create_id was found. This option + * defaults to false. + * * *object_class*: Defaults to Hash + * * *array_class*: Defaults to Array + */ +static VALUE cParser_initialize(int argc, VALUE *argv, VALUE self) +{ + VALUE source, opts; + GET_PARSER_INIT; + + if (json->Vsource) { + rb_raise(rb_eTypeError, "already initialized instance"); + } +#ifdef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + rb_scan_args(argc, argv, "1:", &source, &opts); +#else + rb_scan_args(argc, argv, "11", &source, &opts); +#endif + if (!NIL_P(opts)) { +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + opts = rb_convert_type(opts, T_HASH, "Hash", "to_hash"); + if (NIL_P(opts)) { + rb_raise(rb_eArgError, "opts needs to be like a hash"); + } else { +#endif + VALUE tmp = ID2SYM(i_max_nesting); + if (option_given_p(opts, tmp)) { + VALUE max_nesting = rb_hash_aref(opts, tmp); + if (RTEST(max_nesting)) { + Check_Type(max_nesting, T_FIXNUM); + json->max_nesting = FIX2INT(max_nesting); + } else { + json->max_nesting = 0; + } + } else { + json->max_nesting = 100; + } + tmp = ID2SYM(i_allow_nan); + if (option_given_p(opts, tmp)) { + json->allow_nan = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->allow_nan = 0; + } + tmp = ID2SYM(i_symbolize_names); + if (option_given_p(opts, tmp)) { + json->symbolize_names = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->symbolize_names = 0; + } + tmp = ID2SYM(i_freeze); + if (option_given_p(opts, tmp)) { + json->freeze = RTEST(rb_hash_aref(opts, tmp)) ? 1 : 0; + } else { + json->freeze = 0; + } + tmp = ID2SYM(i_create_additions); + if (option_given_p(opts, tmp)) { + json->create_additions = RTEST(rb_hash_aref(opts, tmp)); + } else { + json->create_additions = 0; + } + if (json->symbolize_names && json->create_additions) { + rb_raise(rb_eArgError, + "options :symbolize_names and :create_additions cannot be " + " used in conjunction"); + } + tmp = ID2SYM(i_create_id); + if (option_given_p(opts, tmp)) { + json->create_id = rb_hash_aref(opts, tmp); + } else { + json->create_id = rb_funcall(mJSON, i_create_id, 0); + } + tmp = ID2SYM(i_object_class); + if (option_given_p(opts, tmp)) { + json->object_class = rb_hash_aref(opts, tmp); + } else { + json->object_class = Qnil; + } + tmp = ID2SYM(i_array_class); + if (option_given_p(opts, tmp)) { + json->array_class = rb_hash_aref(opts, tmp); + } else { + json->array_class = Qnil; + } + tmp = ID2SYM(i_decimal_class); + if (option_given_p(opts, tmp)) { + json->decimal_class = rb_hash_aref(opts, tmp); + } else { + json->decimal_class = Qnil; + } + tmp = ID2SYM(i_match_string); + if (option_given_p(opts, tmp)) { + VALUE match_string = rb_hash_aref(opts, tmp); + json->match_string = RTEST(match_string) ? match_string : Qnil; + } else { + json->match_string = Qnil; + } +#ifndef HAVE_RB_SCAN_ARGS_OPTIONAL_HASH + } +#endif + } else { + json->max_nesting = 100; + json->allow_nan = 0; + json->create_additions = 0; + json->create_id = rb_funcall(mJSON, i_create_id, 0); + json->object_class = Qnil; + json->array_class = Qnil; + json->decimal_class = Qnil; + } + source = convert_encoding(StringValue(source)); + StringValue(source); + json->len = RSTRING_LEN(source); + json->source = RSTRING_PTR(source);; + json->Vsource = source; + return self; +} + +%%{ + machine JSON; + + write data; + + include JSON_common; + + action parse_value { + char *np = JSON_parse_value(json, fpc, pe, &result, 0); + if (np == NULL) { fhold; fbreak; } else fexec np; + } + + main := ignore* ( + begin_value >parse_value + ) ignore*; +}%% + +/* + * call-seq: parse() + * + * Parses the current JSON text _source_ and returns the complete data + * structure as a result. + */ +static VALUE cParser_parse(VALUE self) +{ + char *p, *pe; + int cs = EVIL; + VALUE result = Qnil; + GET_PARSER; + + %% write init; + p = json->source; + pe = p + json->len; + %% write exec; + + if (cs >= JSON_first_final && p == pe) { + return result; + } else { + rb_enc_raise(EXC_ENCODING eParserError, "%u: unexpected token at '%s'", __LINE__, p); + return Qnil; + } +} + +static void JSON_mark(void *ptr) +{ + JSON_Parser *json = ptr; + rb_gc_mark_maybe(json->Vsource); + rb_gc_mark_maybe(json->create_id); + rb_gc_mark_maybe(json->object_class); + rb_gc_mark_maybe(json->array_class); + rb_gc_mark_maybe(json->decimal_class); + rb_gc_mark_maybe(json->match_string); +} + +static void JSON_free(void *ptr) +{ + JSON_Parser *json = ptr; + fbuffer_free(json->fbuffer); + ruby_xfree(json); +} + +static size_t JSON_memsize(const void *ptr) +{ + const JSON_Parser *json = ptr; + return sizeof(*json) + FBUFFER_CAPA(json->fbuffer); +} + +#ifdef NEW_TYPEDDATA_WRAPPER +static const rb_data_type_t JSON_Parser_type = { + "JSON/Parser", + {JSON_mark, JSON_free, JSON_memsize,}, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + 0, 0, + RUBY_TYPED_FREE_IMMEDIATELY, +#endif +}; +#endif + +static VALUE cJSON_parser_s_allocate(VALUE klass) +{ + JSON_Parser *json; + VALUE obj = TypedData_Make_Struct(klass, JSON_Parser, &JSON_Parser_type, json); + json->fbuffer = fbuffer_alloc(0); + return obj; +} + +/* + * call-seq: source() + * + * Returns a copy of the current _source_ string, that was used to construct + * this Parser. + */ +static VALUE cParser_source(VALUE self) +{ + GET_PARSER; + return rb_str_dup(json->Vsource); +} + +void Init_parser(void) +{ +#ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); +#endif + +#undef rb_intern + rb_require("json/common"); + mJSON = rb_define_module("JSON"); + mExt = rb_define_module_under(mJSON, "Ext"); + cParser = rb_define_class_under(mExt, "Parser", rb_cObject); + eParserError = rb_path2class("JSON::ParserError"); + eNestingError = rb_path2class("JSON::NestingError"); + rb_gc_register_mark_object(eParserError); + rb_gc_register_mark_object(eNestingError); + rb_define_alloc_func(cParser, cJSON_parser_s_allocate); + rb_define_method(cParser, "initialize", cParser_initialize, -1); + rb_define_method(cParser, "parse", cParser_parse, 0); + rb_define_method(cParser, "source", cParser_source, 0); + + CNaN = rb_const_get(mJSON, rb_intern("NaN")); + rb_gc_register_mark_object(CNaN); + + CInfinity = rb_const_get(mJSON, rb_intern("Infinity")); + rb_gc_register_mark_object(CInfinity); + + CMinusInfinity = rb_const_get(mJSON, rb_intern("MinusInfinity")); + rb_gc_register_mark_object(CMinusInfinity); + + i_json_creatable_p = rb_intern("json_creatable?"); + i_json_create = rb_intern("json_create"); + i_create_id = rb_intern("create_id"); + i_create_additions = rb_intern("create_additions"); + i_chr = rb_intern("chr"); + i_max_nesting = rb_intern("max_nesting"); + i_allow_nan = rb_intern("allow_nan"); + i_symbolize_names = rb_intern("symbolize_names"); + i_object_class = rb_intern("object_class"); + i_array_class = rb_intern("array_class"); + i_decimal_class = rb_intern("decimal_class"); + i_match = rb_intern("match"); + i_match_string = rb_intern("match_string"); + i_key_p = rb_intern("key?"); + i_deep_const_get = rb_intern("deep_const_get"); + i_aset = rb_intern("[]="); + i_aref = rb_intern("[]"); + i_leftshift = rb_intern("<<"); + i_new = rb_intern("new"); + i_try_convert = rb_intern("try_convert"); + i_freeze = rb_intern("freeze"); + i_uminus = rb_intern("-@"); +} + +/* + * Local variables: + * mode: c + * c-file-style: ruby + * indent-tabs-mode: nil + * End: + */ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/extconf.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/extconf.rb new file mode 100644 index 0000000..8a99b6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/ext/json/extconf.rb @@ -0,0 +1,3 @@ +require 'mkmf' + +create_makefile('json') diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/json.gemspec b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/json.gemspec new file mode 100644 index 0000000..948e92c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/json.gemspec @@ -0,0 +1,67 @@ +# -*- encoding: utf-8 -*- + +Gem::Specification.new do |s| + s.name = "json" + s.version = File.read(File.expand_path('../VERSION', __FILE__)).chomp + + s.summary = "JSON Implementation for Ruby" + s.description = "This is a JSON implementation as a Ruby extension in C." + s.licenses = ["Ruby"] + s.authors = ["Florian Frank"] + s.email = "flori@ping.de" + + s.extensions = ["ext/json/ext/generator/extconf.rb", "ext/json/ext/parser/extconf.rb", "ext/json/extconf.rb"] + s.extra_rdoc_files = ["README.md"] + s.rdoc_options = ["--title", "JSON implementation for Ruby", "--main", "README.md"] + s.files = [ + "CHANGES.md", + "LICENSE", + "README.md", + "VERSION", + "ext/json/ext/fbuffer/fbuffer.h", + "ext/json/ext/generator/depend", + "ext/json/ext/generator/extconf.rb", + "ext/json/ext/generator/generator.c", + "ext/json/ext/generator/generator.h", + "ext/json/ext/parser/depend", + "ext/json/ext/parser/extconf.rb", + "ext/json/ext/parser/parser.c", + "ext/json/ext/parser/parser.h", + "ext/json/ext/parser/parser.rl", + "ext/json/extconf.rb", + "json.gemspec", + "lib/json.rb", + "lib/json/add/bigdecimal.rb", + "lib/json/add/complex.rb", + "lib/json/add/core.rb", + "lib/json/add/date.rb", + "lib/json/add/date_time.rb", + "lib/json/add/exception.rb", + "lib/json/add/ostruct.rb", + "lib/json/add/range.rb", + "lib/json/add/rational.rb", + "lib/json/add/regexp.rb", + "lib/json/add/set.rb", + "lib/json/add/struct.rb", + "lib/json/add/symbol.rb", + "lib/json/add/time.rb", + "lib/json/common.rb", + "lib/json/ext.rb", + "lib/json/generic_object.rb", + "lib/json/pure.rb", + "lib/json/pure/generator.rb", + "lib/json/pure/parser.rb", + "lib/json/version.rb", + ] + s.homepage = "http://flori.github.com/json" + s.metadata = { + 'bug_tracker_uri' => 'https://github.com/flori/json/issues', + 'changelog_uri' => 'https://github.com/flori/json/blob/master/CHANGES.md', + 'documentation_uri' => 'http://flori.github.io/json/doc/index.html', + 'homepage_uri' => 'http://flori.github.io/json/', + 'source_code_uri' => 'https://github.com/flori/json', + 'wiki_uri' => 'https://github.com/flori/json/wiki' + } + + s.required_ruby_version = Gem::Requirement.new(">= 2.3") +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json.rb new file mode 100644 index 0000000..1e64bfc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json.rb @@ -0,0 +1,583 @@ +#frozen_string_literal: false +require 'json/common' + +## +# = JavaScript \Object Notation (\JSON) +# +# \JSON is a lightweight data-interchange format. +# +# A \JSON value is one of the following: +# - Double-quoted text: "foo". +# - Number: +1+, +1.0+, +2.0e2+. +# - Boolean: +true+, +false+. +# - Null: +null+. +# - \Array: an ordered list of values, enclosed by square brackets: +# ["foo", 1, 1.0, 2.0e2, true, false, null] +# +# - \Object: a collection of name/value pairs, enclosed by curly braces; +# each name is double-quoted text; +# the values may be any \JSON values: +# {"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null} +# +# A \JSON array or object may contain nested arrays, objects, and scalars +# to any depth: +# {"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]} +# [{"foo": 0, "bar": 1}, ["baz", 2]] +# +# == Using \Module \JSON +# +# To make module \JSON available in your code, begin with: +# require 'json' +# +# All examples here assume that this has been done. +# +# === Parsing \JSON +# +# You can parse a \String containing \JSON data using +# either of two methods: +# - JSON.parse(source, opts) +# - JSON.parse!(source, opts) +# +# where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# The difference between the two methods +# is that JSON.parse! omits some checks +# and may not be safe for some +source+ data; +# use it only for data from trusted sources. +# Use the safer method JSON.parse for less trusted sources. +# +# ==== Parsing \JSON Arrays +# +# When +source+ is a \JSON array, JSON.parse by default returns a Ruby \Array: +# json = '["foo", 1, 1.0, 2.0e2, true, false, null]' +# ruby = JSON.parse(json) +# ruby # => ["foo", 1, 1.0, 200.0, true, false, nil] +# ruby.class # => Array +# +# The \JSON array may contain nested arrays, objects, and scalars +# to any depth: +# json = '[{"foo": 0, "bar": 1}, ["baz", 2]]' +# JSON.parse(json) # => [{"foo"=>0, "bar"=>1}, ["baz", 2]] +# +# ==== Parsing \JSON \Objects +# +# When the source is a \JSON object, JSON.parse by default returns a Ruby \Hash: +# json = '{"a": "foo", "b": 1, "c": 1.0, "d": 2.0e2, "e": true, "f": false, "g": null}' +# ruby = JSON.parse(json) +# ruby # => {"a"=>"foo", "b"=>1, "c"=>1.0, "d"=>200.0, "e"=>true, "f"=>false, "g"=>nil} +# ruby.class # => Hash +# +# The \JSON object may contain nested arrays, objects, and scalars +# to any depth: +# json = '{"foo": {"bar": 1, "baz": 2}, "bat": [0, 1, 2]}' +# JSON.parse(json) # => {"foo"=>{"bar"=>1, "baz"=>2}, "bat"=>[0, 1, 2]} +# +# ==== Parsing \JSON Scalars +# +# When the source is a \JSON scalar (not an array or object), +# JSON.parse returns a Ruby scalar. +# +# \String: +# ruby = JSON.parse('"foo"') +# ruby # => 'foo' +# ruby.class # => String +# \Integer: +# ruby = JSON.parse('1') +# ruby # => 1 +# ruby.class # => Integer +# \Float: +# ruby = JSON.parse('1.0') +# ruby # => 1.0 +# ruby.class # => Float +# ruby = JSON.parse('2.0e2') +# ruby # => 200 +# ruby.class # => Float +# Boolean: +# ruby = JSON.parse('true') +# ruby # => true +# ruby.class # => TrueClass +# ruby = JSON.parse('false') +# ruby # => false +# ruby.class # => FalseClass +# Null: +# ruby = JSON.parse('null') +# ruby # => nil +# ruby.class # => NilClass +# +# ==== Parsing Options +# +# ====== Input Options +# +# Option +max_nesting+ (\Integer) specifies the maximum nesting depth allowed; +# defaults to +100+; specify +false+ to disable depth checking. +# +# With the default, +false+: +# source = '[0, [1, [2, [3]]]]' +# ruby = JSON.parse(source) +# ruby # => [0, [1, [2, [3]]]] +# Too deep: +# # Raises JSON::NestingError (nesting of 2 is too deep): +# JSON.parse(source, {max_nesting: 1}) +# Bad value: +# # Raises TypeError (wrong argument type Symbol (expected Fixnum)): +# JSON.parse(source, {max_nesting: :foo}) +# +# --- +# +# Option +allow_nan+ (boolean) specifies whether to allow +# NaN, Infinity, and MinusInfinity in +source+; +# defaults to +false+. +# +# With the default, +false+: +# # Raises JSON::ParserError (225: unexpected token at '[NaN]'): +# JSON.parse('[NaN]') +# # Raises JSON::ParserError (232: unexpected token at '[Infinity]'): +# JSON.parse('[Infinity]') +# # Raises JSON::ParserError (248: unexpected token at '[-Infinity]'): +# JSON.parse('[-Infinity]') +# Allow: +# source = '[NaN, Infinity, -Infinity]' +# ruby = JSON.parse(source, {allow_nan: true}) +# ruby # => [NaN, Infinity, -Infinity] +# +# ====== Output Options +# +# Option +symbolize_names+ (boolean) specifies whether returned \Hash keys +# should be Symbols; +# defaults to +false+ (use Strings). +# +# With the default, +false+: +# source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' +# ruby = JSON.parse(source) +# ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} +# Use Symbols: +# ruby = JSON.parse(source, {symbolize_names: true}) +# ruby # => {:a=>"foo", :b=>1.0, :c=>true, :d=>false, :e=>nil} +# +# --- +# +# Option +object_class+ (\Class) specifies the Ruby class to be used +# for each \JSON object; +# defaults to \Hash. +# +# With the default, \Hash: +# source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' +# ruby = JSON.parse(source) +# ruby.class # => Hash +# Use class \OpenStruct: +# ruby = JSON.parse(source, {object_class: OpenStruct}) +# ruby # => # +# +# --- +# +# Option +array_class+ (\Class) specifies the Ruby class to be used +# for each \JSON array; +# defaults to \Array. +# +# With the default, \Array: +# source = '["foo", 1.0, true, false, null]' +# ruby = JSON.parse(source) +# ruby.class # => Array +# Use class \Set: +# ruby = JSON.parse(source, {array_class: Set}) +# ruby # => # +# +# --- +# +# Option +create_additions+ (boolean) specifies whether to use \JSON additions in parsing. +# See {\JSON Additions}[#module-JSON-label-JSON+Additions]. +# +# === Generating \JSON +# +# To generate a Ruby \String containing \JSON data, +# use method JSON.generate(source, opts), where +# - +source+ is a Ruby object. +# - +opts+ is a \Hash object containing options +# that control both input allowed and output formatting. +# +# ==== Generating \JSON from Arrays +# +# When the source is a Ruby \Array, JSON.generate returns +# a \String containing a \JSON array: +# ruby = [0, 's', :foo] +# json = JSON.generate(ruby) +# json # => '[0,"s","foo"]' +# +# The Ruby \Array array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = [0, [1, 2], {foo: 3, bar: 4}] +# json = JSON.generate(ruby) +# json # => '[0,[1,2],{"foo":3,"bar":4}]' +# +# ==== Generating \JSON from Hashes +# +# When the source is a Ruby \Hash, JSON.generate returns +# a \String containing a \JSON object: +# ruby = {foo: 0, bar: 's', baz: :bat} +# json = JSON.generate(ruby) +# json # => '{"foo":0,"bar":"s","baz":"bat"}' +# +# The Ruby \Hash array may contain nested arrays, hashes, and scalars +# to any depth: +# ruby = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} +# json = JSON.generate(ruby) +# json # => '{"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"}' +# +# ==== Generating \JSON from Other Objects +# +# When the source is neither an \Array nor a \Hash, +# the generated \JSON data depends on the class of the source. +# +# When the source is a Ruby \Integer or \Float, JSON.generate returns +# a \String containing a \JSON number: +# JSON.generate(42) # => '42' +# JSON.generate(0.42) # => '0.42' +# +# When the source is a Ruby \String, JSON.generate returns +# a \String containing a \JSON string (with double-quotes): +# JSON.generate('A string') # => '"A string"' +# +# When the source is +true+, +false+ or +nil+, JSON.generate returns +# a \String containing the corresponding \JSON token: +# JSON.generate(true) # => 'true' +# JSON.generate(false) # => 'false' +# JSON.generate(nil) # => 'null' +# +# When the source is none of the above, JSON.generate returns +# a \String containing a \JSON string representation of the source: +# JSON.generate(:foo) # => '"foo"' +# JSON.generate(Complex(0, 0)) # => '"0+0i"' +# JSON.generate(Dir.new('.')) # => '"#"' +# +# ==== Generating Options +# +# ====== Input Options +# +# Option +allow_nan+ (boolean) specifies whether +# +NaN+, +Infinity+, and -Infinity may be generated; +# defaults to +false+. +# +# With the default, +false+: +# # Raises JSON::GeneratorError (920: NaN not allowed in JSON): +# JSON.generate(JSON::NaN) +# # Raises JSON::GeneratorError (917: Infinity not allowed in JSON): +# JSON.generate(JSON::Infinity) +# # Raises JSON::GeneratorError (917: -Infinity not allowed in JSON): +# JSON.generate(JSON::MinusInfinity) +# +# Allow: +# ruby = [Float::NaN, Float::Infinity, Float::MinusInfinity] +# JSON.generate(ruby, allow_nan: true) # => '[NaN,Infinity,-Infinity]' +# +# --- +# +# Option +max_nesting+ (\Integer) specifies the maximum nesting depth +# in +obj+; defaults to +100+. +# +# With the default, +100+: +# obj = [[[[[[0]]]]]] +# JSON.generate(obj) # => '[[[[[[0]]]]]]' +# +# Too deep: +# # Raises JSON::NestingError (nesting of 2 is too deep): +# JSON.generate(obj, max_nesting: 2) +# +# ====== Output Options +# +# The default formatting options generate the most compact +# \JSON data, all on one line and with no whitespace. +# +# You can use these formatting options to generate +# \JSON data in a more open format, using whitespace. +# See also JSON.pretty_generate. +# +# - Option +array_nl+ (\String) specifies a string (usually a newline) +# to be inserted after each \JSON array; defaults to the empty \String, ''. +# - Option +object_nl+ (\String) specifies a string (usually a newline) +# to be inserted after each \JSON object; defaults to the empty \String, ''. +# - Option +indent+ (\String) specifies the string (usually spaces) to be +# used for indentation; defaults to the empty \String, ''; +# defaults to the empty \String, ''; +# has no effect unless options +array_nl+ or +object_nl+ specify newlines. +# - Option +space+ (\String) specifies a string (usually a space) to be +# inserted after the colon in each \JSON object's pair; +# defaults to the empty \String, ''. +# - Option +space_before+ (\String) specifies a string (usually a space) to be +# inserted before the colon in each \JSON object's pair; +# defaults to the empty \String, ''. +# +# In this example, +obj+ is used first to generate the shortest +# \JSON data (no whitespace), then again with all formatting options +# specified: +# +# obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} +# json = JSON.generate(obj) +# puts 'Compact:', json +# opts = { +# array_nl: "\n", +# object_nl: "\n", +# indent: ' ', +# space_before: ' ', +# space: ' ' +# } +# puts 'Open:', JSON.generate(obj, opts) +# +# Output: +# Compact: +# {"foo":["bar","baz"],"bat":{"bam":0,"bad":1}} +# Open: +# { +# "foo" : [ +# "bar", +# "baz" +# ], +# "bat" : { +# "bam" : 0, +# "bad" : 1 +# } +# } +# +# == \JSON Additions +# +# When you "round trip" a non-\String object from Ruby to \JSON and back, +# you have a new \String, instead of the object you began with: +# ruby0 = Range.new(0, 2) +# json = JSON.generate(ruby0) +# json # => '0..2"' +# ruby1 = JSON.parse(json) +# ruby1 # => '0..2' +# ruby1.class # => String +# +# You can use \JSON _additions_ to preserve the original object. +# The addition is an extension of a ruby class, so that: +# - \JSON.generate stores more information in the \JSON string. +# - \JSON.parse, called with option +create_additions+, +# uses that information to create a proper Ruby object. +# +# This example shows a \Range being generated into \JSON +# and parsed back into Ruby, both without and with +# the addition for \Range: +# ruby = Range.new(0, 2) +# # This passage does not use the addition for Range. +# json0 = JSON.generate(ruby) +# ruby0 = JSON.parse(json0) +# # This passage uses the addition for Range. +# require 'json/add/range' +# json1 = JSON.generate(ruby) +# ruby1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <require 'json/add/bigdecimal' +# - Complex: require 'json/add/complex' +# - Date: require 'json/add/date' +# - DateTime: require 'json/add/date_time' +# - Exception: require 'json/add/exception' +# - OpenStruct: require 'json/add/ostruct' +# - Range: require 'json/add/range' +# - Rational: require 'json/add/rational' +# - Regexp: require 'json/add/regexp' +# - Set: require 'json/add/set' +# - Struct: require 'json/add/struct' +# - Symbol: require 'json/add/symbol' +# - Time: require 'json/add/time' +# +# To reduce punctuation clutter, the examples below +# show the generated \JSON via +puts+, rather than the usual +inspect+, +# +# \BigDecimal: +# require 'json/add/bigdecimal' +# ruby0 = BigDecimal(0) # 0.0 +# json = JSON.generate(ruby0) # {"json_class":"BigDecimal","b":"27:0.0"} +# ruby1 = JSON.parse(json, create_additions: true) # 0.0 +# ruby1.class # => BigDecimal +# +# \Complex: +# require 'json/add/complex' +# ruby0 = Complex(1+0i) # 1+0i +# json = JSON.generate(ruby0) # {"json_class":"Complex","r":1,"i":0} +# ruby1 = JSON.parse(json, create_additions: true) # 1+0i +# ruby1.class # Complex +# +# \Date: +# require 'json/add/date' +# ruby0 = Date.today # 2020-05-02 +# json = JSON.generate(ruby0) # {"json_class":"Date","y":2020,"m":5,"d":2,"sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 +# ruby1.class # Date +# +# \DateTime: +# require 'json/add/date_time' +# ruby0 = DateTime.now # 2020-05-02T10:38:13-05:00 +# json = JSON.generate(ruby0) # {"json_class":"DateTime","y":2020,"m":5,"d":2,"H":10,"M":38,"S":13,"of":"-5/24","sg":2299161.0} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02T10:38:13-05:00 +# ruby1.class # DateTime +# +# \Exception (and its subclasses including \RuntimeError): +# require 'json/add/exception' +# ruby0 = Exception.new('A message') # A message +# json = JSON.generate(ruby0) # {"json_class":"Exception","m":"A message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # A message +# ruby1.class # Exception +# ruby0 = RuntimeError.new('Another message') # Another message +# json = JSON.generate(ruby0) # {"json_class":"RuntimeError","m":"Another message","b":null} +# ruby1 = JSON.parse(json, create_additions: true) # Another message +# ruby1.class # RuntimeError +# +# \OpenStruct: +# require 'json/add/ostruct' +# ruby0 = OpenStruct.new(name: 'Matz', language: 'Ruby') # # +# json = JSON.generate(ruby0) # {"json_class":"OpenStruct","t":{"name":"Matz","language":"Ruby"}} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # OpenStruct +# +# \Range: +# require 'json/add/range' +# ruby0 = Range.new(0, 2) # 0..2 +# json = JSON.generate(ruby0) # {"json_class":"Range","a":[0,2,false]} +# ruby1 = JSON.parse(json, create_additions: true) # 0..2 +# ruby1.class # Range +# +# \Rational: +# require 'json/add/rational' +# ruby0 = Rational(1, 3) # 1/3 +# json = JSON.generate(ruby0) # {"json_class":"Rational","n":1,"d":3} +# ruby1 = JSON.parse(json, create_additions: true) # 1/3 +# ruby1.class # Rational +# +# \Regexp: +# require 'json/add/regexp' +# ruby0 = Regexp.new('foo') # (?-mix:foo) +# json = JSON.generate(ruby0) # {"json_class":"Regexp","o":0,"s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # (?-mix:foo) +# ruby1.class # Regexp +# +# \Set: +# require 'json/add/set' +# ruby0 = Set.new([0, 1, 2]) # # +# json = JSON.generate(ruby0) # {"json_class":"Set","a":[0,1,2]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Set +# +# \Struct: +# require 'json/add/struct' +# Customer = Struct.new(:name, :address) # Customer +# ruby0 = Customer.new("Dave", "123 Main") # # +# json = JSON.generate(ruby0) # {"json_class":"Customer","v":["Dave","123 Main"]} +# ruby1 = JSON.parse(json, create_additions: true) # # +# ruby1.class # Customer +# +# \Symbol: +# require 'json/add/symbol' +# ruby0 = :foo # foo +# json = JSON.generate(ruby0) # {"json_class":"Symbol","s":"foo"} +# ruby1 = JSON.parse(json, create_additions: true) # foo +# ruby1.class # Symbol +# +# \Time: +# require 'json/add/time' +# ruby0 = Time.now # 2020-05-02 11:28:26 -0500 +# json = JSON.generate(ruby0) # {"json_class":"Time","s":1588436906,"n":840560000} +# ruby1 = JSON.parse(json, create_additions: true) # 2020-05-02 11:28:26 -0500 +# ruby1.class # Time +# +# +# === Custom \JSON Additions +# +# In addition to the \JSON additions provided, +# you can craft \JSON additions of your own, +# either for Ruby built-in classes or for user-defined classes. +# +# Here's a user-defined class +Foo+: +# class Foo +# attr_accessor :bar, :baz +# def initialize(bar, baz) +# self.bar = bar +# self.baz = baz +# end +# end +# +# Here's the \JSON addition for it: +# # Extend class Foo with JSON addition. +# class Foo +# # Serialize Foo object with its class name and arguments +# def to_json(*args) +# { +# JSON.create_id => self.class.name, +# 'a' => [ bar, baz ] +# }.to_json(*args) +# end +# # Deserialize JSON string by constructing new Foo object with arguments. +# def self.json_create(object) +# new(*object['a']) +# end +# end +# +# Demonstration: +# require 'json' +# # This Foo object has no custom addition. +# foo0 = Foo.new(0, 1) +# json0 = JSON.generate(foo0) +# obj0 = JSON.parse(json0) +# # Lood the custom addition. +# require_relative 'foo_addition' +# # This foo has the custom addition. +# foo1 = Foo.new(0, 1) +# json1 = JSON.generate(foo1) +# obj1 = JSON.parse(json1, create_additions: true) +# # Make a nice display. +# display = <" (String) +# With custom addition: {"json_class":"Foo","a":[0,1]} (String) +# Parsed JSON: +# Without custom addition: "#" (String) +# With custom addition: # (Foo) +# +module JSON + require 'json/version' + + begin + require 'json/ext' + rescue LoadError + require 'json/pure' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb new file mode 100644 index 0000000..c8b4f56 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/bigdecimal.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::BigDecimal) or require 'bigdecimal' + +class BigDecimal + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + BigDecimal._load object['b'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'b' => _dump, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/complex.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/complex.rb new file mode 100644 index 0000000..e63e29f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/complex.rb @@ -0,0 +1,28 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Complex + + # Deserializes JSON string by converting Real value r, imaginary + # value i, to a Complex object. + def self.json_create(object) + Complex(object['r'], object['i']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'r' => real, + 'i' => imag, + } + end + + # Stores class name (Complex) along with real value r and imaginary value i as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/core.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/core.rb new file mode 100644 index 0000000..bfb017c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/core.rb @@ -0,0 +1,12 @@ +#frozen_string_literal: false +# This file requires the implementations of ruby core's custom objects for +# serialisation/deserialisation. + +require 'json/add/date' +require 'json/add/date_time' +require 'json/add/exception' +require 'json/add/range' +require 'json/add/regexp' +require 'json/add/struct' +require 'json/add/symbol' +require 'json/add/time' diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date.rb new file mode 100644 index 0000000..2552356 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date.rb @@ -0,0 +1,34 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class Date + + # Deserializes JSON string by converting Julian year y, month + # m, day d and Day of Calendar Reform sg to Date. + def self.json_create(object) + civil(*object.values_at('y', 'm', 'd', 'sg')) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'sg' => start, + } + end + + # Stores class name (Date) with Julian year y, month m, day + # d and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date_time.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date_time.rb new file mode 100644 index 0000000..38b0e86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/date_time.rb @@ -0,0 +1,50 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'date' + +class DateTime + + # Deserializes JSON string by converting year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg to DateTime. + def self.json_create(object) + args = object.values_at('y', 'm', 'd', 'H', 'M', 'S') + of_a, of_b = object['of'].split('/') + if of_b and of_b != '0' + args << Rational(of_a.to_i, of_b.to_i) + else + args << of_a + end + args << object['sg'] + civil(*args) + end + + alias start sg unless method_defined?(:start) + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'y' => year, + 'm' => month, + 'd' => day, + 'H' => hour, + 'M' => min, + 'S' => sec, + 'of' => offset.to_s, + 'sg' => start, + } + end + + # Stores class name (DateTime) with Julian year y, month m, + # day d, hour H, minute M, second S, + # offset of and Day of Calendar Reform sg as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end + + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/exception.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/exception.rb new file mode 100644 index 0000000..a107e5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/exception.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Exception + + # Deserializes JSON string by constructing new Exception object with message + # m and backtrace b serialized with to_json + def self.json_create(object) + result = new(object['m']) + result.set_backtrace object['b'] + result + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'm' => message, + 'b' => backtrace, + } + end + + # Stores class name (Exception) with message m and backtrace array + # b as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/ostruct.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/ostruct.rb new file mode 100644 index 0000000..686cf00 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/ostruct.rb @@ -0,0 +1,31 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +require 'ostruct' + +class OpenStruct + + # Deserializes JSON string by constructing new Struct object with values + # t serialized by to_json. + def self.json_create(object) + new(object['t'] || object[:t]) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 't' => table, + } + end + + # Stores class name (OpenStruct) with this struct's values t as a + # JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/range.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/range.rb new file mode 100644 index 0000000..93529fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/range.rb @@ -0,0 +1,29 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Range + + # Deserializes JSON string by constructing new Range object with arguments + # a serialized by to_json. + def self.json_create(object) + new(*object['a']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => [ first, last, exclude_end? ] + } + end + + # Stores class name (Range) with JSON array of arguments a which + # include first (integer), last (integer), and + # exclude_end? (boolean) as JSON string. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/rational.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/rational.rb new file mode 100644 index 0000000..f776226 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/rational.rb @@ -0,0 +1,27 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Rational + # Deserializes JSON string by converting numerator value n, + # denominator value d, to a Rational object. + def self.json_create(object) + Rational(object['n'], object['d']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'n' => numerator, + 'd' => denominator, + } + end + + # Stores class name (Rational) along with numerator value n and denominator value d as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/regexp.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/regexp.rb new file mode 100644 index 0000000..39d69fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/regexp.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Regexp + + # Deserializes JSON string by constructing new Regexp object with source + # s (Regexp or String) and options o serialized by + # to_json + def self.json_create(object) + new(object['s'], object['o']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 'o' => options, + 's' => source, + } + end + + # Stores class name (Regexp) with options o and source s + # (Regexp or String) as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/set.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/set.rb new file mode 100644 index 0000000..71e2a0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/set.rb @@ -0,0 +1,29 @@ +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end +defined?(::Set) or require 'set' + +class Set + # Import a JSON Marshalled object. + # + # method used for JSON marshalling support. + def self.json_create(object) + new object['a'] + end + + # Marshal the object to JSON. + # + # method used for JSON marshalling support. + def as_json(*) + { + JSON.create_id => self.class.name, + 'a' => to_a, + } + end + + # return the JSON value + def to_json(*args) + as_json.to_json(*args) + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/struct.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/struct.rb new file mode 100644 index 0000000..e8395ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/struct.rb @@ -0,0 +1,30 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Struct + + # Deserializes JSON string by constructing new Struct object with values + # v serialized by to_json. + def self.json_create(object) + new(*object['v']) + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + klass = self.class.name + klass.to_s.empty? and raise JSON::JSONError, "Only named structs are supported!" + { + JSON.create_id => klass, + 'v' => values, + } + end + + # Stores class name (Struct) with Struct values v as a JSON string. + # Only named structs are supported. + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/symbol.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/symbol.rb new file mode 100644 index 0000000..74b13a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/symbol.rb @@ -0,0 +1,25 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Symbol + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + { + JSON.create_id => self.class.name, + 's' => to_s, + } + end + + # Stores class name (Symbol) with String representation of Symbol as a JSON string. + def to_json(*a) + as_json.to_json(*a) + end + + # Deserializes JSON string by converting the string value stored in the object to a Symbol + def self.json_create(o) + o['s'].to_sym + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/time.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/time.rb new file mode 100644 index 0000000..b73acc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/add/time.rb @@ -0,0 +1,38 @@ +#frozen_string_literal: false +unless defined?(::JSON::JSON_LOADED) and ::JSON::JSON_LOADED + require 'json' +end + +class Time + + # Deserializes JSON string by converting time since epoch to Time + def self.json_create(object) + if usec = object.delete('u') # used to be tv_usec -> tv_nsec + object['n'] = usec * 1000 + end + if method_defined?(:tv_nsec) + at(object['s'], Rational(object['n'], 1000)) + else + at(object['s'], object['n'] / 1000) + end + end + + # Returns a hash, that will be turned into a JSON object and represent this + # object. + def as_json(*) + nanoseconds = [ tv_usec * 1000 ] + respond_to?(:tv_nsec) and nanoseconds << tv_nsec + nanoseconds = nanoseconds.max + { + JSON.create_id => self.class.name, + 's' => tv_sec, + 'n' => nanoseconds, + } + end + + # Stores class name (Time) with number of seconds since epoch and number of + # microseconds for Time as JSON string + def to_json(*args) + as_json.to_json(*args) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/common.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/common.rb new file mode 100644 index 0000000..ea46896 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/common.rb @@ -0,0 +1,703 @@ +#frozen_string_literal: false +require 'json/version' +require 'json/generic_object' + +module JSON + class << self + # :call-seq: + # JSON[object] -> new_array or new_string + # + # If +object+ is a \String, + # calls JSON.parse with +object+ and +opts+ (see method #parse): + # json = '[0, 1, null]' + # JSON[json]# => [0, 1, nil] + # + # Otherwise, calls JSON.generate with +object+ and +opts+ (see method #generate): + # ruby = [0, 1, nil] + # JSON[ruby] # => '[0,1,null]' + def [](object, opts = {}) + if object.respond_to? :to_str + JSON.parse(object.to_str, opts) + else + JSON.generate(object, opts) + end + end + + # Returns the JSON parser class that is used by JSON. This is either + # JSON::Ext::Parser or JSON::Pure::Parser: + # JSON.parser # => JSON::Ext::Parser + attr_reader :parser + + # Set the JSON parser class _parser_ to be used by JSON. + def parser=(parser) # :nodoc: + @parser = parser + remove_const :Parser if const_defined?(:Parser, false) + const_set :Parser, parser + end + + # Return the constant located at _path_. The format of _path_ has to be + # either ::A::B::C or A::B::C. In any case, A has to be located at the top + # level (absolute namespace path?). If there doesn't exist a constant at + # the given path, an ArgumentError is raised. + def deep_const_get(path) # :nodoc: + path.to_s.split(/::/).inject(Object) do |p, c| + case + when c.empty? then p + when p.const_defined?(c, true) then p.const_get(c) + else + begin + p.const_missing(c) + rescue NameError => e + raise ArgumentError, "can't get const #{path}: #{e}" + end + end + end + end + + # Set the module _generator_ to be used by JSON. + def generator=(generator) # :nodoc: + old, $VERBOSE = $VERBOSE, nil + @generator = generator + generator_methods = generator::GeneratorMethods + for const in generator_methods.constants + klass = deep_const_get(const) + modul = generator_methods.const_get(const) + klass.class_eval do + instance_methods(false).each do |m| + m.to_s == 'to_json' and remove_method m + end + include modul + end + end + self.state = generator::State + const_set :State, self.state + const_set :SAFE_STATE_PROTOTYPE, State.new # for JRuby + const_set :FAST_STATE_PROTOTYPE, create_fast_state + const_set :PRETTY_STATE_PROTOTYPE, create_pretty_state + ensure + $VERBOSE = old + end + + def create_fast_state + State.new( + :indent => '', + :space => '', + :object_nl => "", + :array_nl => "", + :max_nesting => false + ) + end + + def create_pretty_state + State.new( + :indent => ' ', + :space => ' ', + :object_nl => "\n", + :array_nl => "\n" + ) + end + + # Returns the JSON generator module that is used by JSON. This is + # either JSON::Ext::Generator or JSON::Pure::Generator: + # JSON.generator # => JSON::Ext::Generator + attr_reader :generator + + # Sets or Returns the JSON generator state class that is used by JSON. This is + # either JSON::Ext::Generator::State or JSON::Pure::Generator::State: + # JSON.state # => JSON::Ext::Generator::State + attr_accessor :state + end + + DEFAULT_CREATE_ID = 'json_class'.freeze + private_constant :DEFAULT_CREATE_ID + + CREATE_ID_TLS_KEY = "JSON.create_id".freeze + private_constant :CREATE_ID_TLS_KEY + + # Sets create identifier, which is used to decide if the _json_create_ + # hook of a class should be called; initial value is +json_class+: + # JSON.create_id # => 'json_class' + def self.create_id=(new_value) + Thread.current[CREATE_ID_TLS_KEY] = new_value.dup.freeze + end + + # Returns the current create identifier. + # See also JSON.create_id=. + def self.create_id + Thread.current[CREATE_ID_TLS_KEY] || DEFAULT_CREATE_ID + end + + NaN = 0.0/0 + + Infinity = 1.0/0 + + MinusInfinity = -Infinity + + # The base exception for JSON errors. + class JSONError < StandardError + def self.wrap(exception) + obj = new("Wrapped(#{exception.class}): #{exception.message.inspect}") + obj.set_backtrace exception.backtrace + obj + end + end + + # This exception is raised if a parser error occurs. + class ParserError < JSONError; end + + # This exception is raised if the nesting of parsed data structures is too + # deep. + class NestingError < ParserError; end + + # :stopdoc: + class CircularDatastructure < NestingError; end + # :startdoc: + + # This exception is raised if a generator or unparser error occurs. + class GeneratorError < JSONError; end + # For backwards compatibility + UnparserError = GeneratorError # :nodoc: + + # This exception is raised if the required unicode support is missing on the + # system. Usually this means that the iconv library is not installed. + class MissingUnicodeSupport < JSONError; end + + module_function + + # :call-seq: + # JSON.parse(source, opts) -> object + # + # Returns the Ruby objects created by parsing the given +source+. + # + # Argument +source+ contains the \String to be parsed. + # + # Argument +opts+, if given, contains a \Hash of options for the parsing. + # See {Parsing Options}[#module-JSON-label-Parsing+Options]. + # + # --- + # + # When +source+ is a \JSON array, returns a Ruby \Array: + # source = '["foo", 1.0, true, false, null]' + # ruby = JSON.parse(source) + # ruby # => ["foo", 1.0, true, false, nil] + # ruby.class # => Array + # + # When +source+ is a \JSON object, returns a Ruby \Hash: + # source = '{"a": "foo", "b": 1.0, "c": true, "d": false, "e": null}' + # ruby = JSON.parse(source) + # ruby # => {"a"=>"foo", "b"=>1.0, "c"=>true, "d"=>false, "e"=>nil} + # ruby.class # => Hash + # + # For examples of parsing for all \JSON data types, see + # {Parsing \JSON}[#module-JSON-label-Parsing+JSON]. + # + # Parses nested JSON objects: + # source = <<-EOT + # { + # "name": "Dave", + # "age" :40, + # "hats": [ + # "Cattleman's", + # "Panama", + # "Tophat" + # ] + # } + # EOT + # ruby = JSON.parse(source) + # ruby # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # --- + # + # Raises an exception if +source+ is not valid JSON: + # # Raises JSON::ParserError (783: unexpected token at ''): + # JSON.parse('') + # + def parse(source, opts = {}) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.parse!(source, opts) -> object + # + # Calls + # parse(source, opts) + # with +source+ and possibly modified +opts+. + # + # Differences from JSON.parse: + # - Option +max_nesting+, if not provided, defaults to +false+, + # which disables checking for nesting depth. + # - Option +allow_nan+, if not provided, defaults to +true+. + def parse!(source, opts = {}) + opts = { + :max_nesting => false, + :allow_nan => true + }.merge(opts) + Parser.new(source, **(opts||{})).parse + end + + # :call-seq: + # JSON.load_file(path, opts={}) -> object + # + # Calls: + # parse(File.read(path), opts) + # + # See method #parse. + def load_file(filespec, opts = {}) + parse(File.read(filespec), opts) + end + + # :call-seq: + # JSON.load_file!(path, opts = {}) + # + # Calls: + # JSON.parse!(File.read(path, opts)) + # + # See method #parse! + def load_file!(filespec, opts = {}) + parse!(File.read(filespec), opts) + end + + # :call-seq: + # JSON.generate(obj, opts = nil) -> new_string + # + # Returns a \String containing the generated \JSON data. + # + # See also JSON.fast_generate, JSON.pretty_generate. + # + # Argument +obj+ is the Ruby object to be converted to \JSON. + # + # Argument +opts+, if given, contains a \Hash of options for the generation. + # See {Generating Options}[#module-JSON-label-Generating+Options]. + # + # --- + # + # When +obj+ is an \Array, returns a \String containing a \JSON array: + # obj = ["foo", 1.0, true, false, nil] + # json = JSON.generate(obj) + # json # => '["foo",1.0,true,false,null]' + # + # When +obj+ is a \Hash, returns a \String containing a \JSON object: + # obj = {foo: 0, bar: 's', baz: :bat} + # json = JSON.generate(obj) + # json # => '{"foo":0,"bar":"s","baz":"bat"}' + # + # For examples of generating from other Ruby objects, see + # {Generating \JSON from Other Objects}[#module-JSON-label-Generating+JSON+from+Other+Objects]. + # + # --- + # + # Raises an exception if any formatting option is not a \String. + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises JSON::NestingError (nesting of 100 is too deep): + # JSON.generate(a) + # + def generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = State.new + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state = state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and + # later delete them. + alias unparse generate + module_function :unparse + # :startdoc: + + # :call-seq: + # JSON.fast_generate(obj, opts) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # By default, generates \JSON data without checking + # for circular references in +obj+ (option +max_nesting+ set to +false+, disabled). + # + # Raises an exception if +obj+ contains circular references: + # a = []; b = []; a.push(b); b.push(a) + # # Raises SystemStackError (stack level too deep): + # JSON.fast_generate(a) + def fast_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = JSON.create_fast_state + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias fast_unparse fast_generate + module_function :fast_unparse + # :startdoc: + + # :call-seq: + # JSON.pretty_generate(obj, opts = nil) -> new_string + # + # Arguments +obj+ and +opts+ here are the same as + # arguments +obj+ and +opts+ in JSON.generate. + # + # Default options are: + # { + # indent: ' ', # Two spaces + # space: ' ', # One space + # array_nl: "\n", # Newline + # object_nl: "\n" # Newline + # } + # + # Example: + # obj = {foo: [:bar, :baz], bat: {bam: 0, bad: 1}} + # json = JSON.pretty_generate(obj) + # puts json + # Output: + # { + # "foo": [ + # "bar", + # "baz" + # ], + # "bat": { + # "bam": 0, + # "bad": 1 + # } + # } + # + def pretty_generate(obj, opts = nil) + if State === opts + state, opts = opts, nil + else + state = JSON.create_pretty_state + end + if opts + if opts.respond_to? :to_hash + opts = opts.to_hash + elsif opts.respond_to? :to_h + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + state.configure(opts) + end + state.generate(obj) + end + + # :stopdoc: + # I want to deprecate these later, so I'll first be silent about them, and later delete them. + alias pretty_unparse pretty_generate + module_function :pretty_unparse + # :startdoc: + + class << self + # Sets or returns default options for the JSON.load method. + # Initially: + # opts = JSON.load_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :allow_blank=>true, :create_additions=>true} + attr_accessor :load_default_options + end + self.load_default_options = { + :max_nesting => false, + :allow_nan => true, + :allow_blank => true, + :create_additions => true, + } + + # :call-seq: + # JSON.load(source, proc = nil, options = {}) -> object + # + # Returns the Ruby objects created by parsing the given +source+. + # + # - Argument +source+ must be, or be convertible to, a \String: + # - If +source+ responds to instance method +to_str+, + # source.to_str becomes the source. + # - If +source+ responds to instance method +to_io+, + # source.to_io.read becomes the source. + # - If +source+ responds to instance method +read+, + # source.read becomes the source. + # - If both of the following are true, source becomes the \String 'null': + # - Option +allow_blank+ specifies a truthy value. + # - The source, as defined above, is +nil+ or the empty \String ''. + # - Otherwise, +source+ remains the source. + # - Argument +proc+, if given, must be a \Proc that accepts one argument. + # It will be called recursively with each result (depth-first order). + # See details below. + # BEWARE: This method is meant to serialise data from trusted user input, + # like from your own database server or clients under your control, it could + # be dangerous to allow untrusted users to pass JSON sources into it. + # - Argument +opts+, if given, contains a \Hash of options for the parsing. + # See {Parsing Options}[#module-JSON-label-Parsing+Options]. + # The default options can be changed via method JSON.load_default_options=. + # + # --- + # + # When no +proc+ is given, modifies +source+ as above and returns the result of + # parse(source, opts); see #parse. + # + # Source for following examples: + # source = <<-EOT + # { + # "name": "Dave", + # "age" :40, + # "hats": [ + # "Cattleman's", + # "Panama", + # "Tophat" + # ] + # } + # EOT + # + # Load a \String: + # ruby = JSON.load(source) + # ruby # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # Load an \IO object: + # require 'stringio' + # object = JSON.load(StringIO.new(source)) + # object # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # Load a \File object: + # path = 't.json' + # File.write(path, source) + # File.open(path) do |file| + # JSON.load(file) + # end # => {"name"=>"Dave", "age"=>40, "hats"=>["Cattleman's", "Panama", "Tophat"]} + # + # --- + # + # When +proc+ is given: + # - Modifies +source+ as above. + # - Gets the +result+ from calling parse(source, opts). + # - Recursively calls proc(result). + # - Returns the final result. + # + # Example: + # require 'json' + # + # # Some classes for the example. + # class Base + # def initialize(attributes) + # @attributes = attributes + # end + # end + # class User < Base; end + # class Account < Base; end + # class Admin < Base; end + # # The JSON source. + # json = <<-EOF + # { + # "users": [ + # {"type": "User", "username": "jane", "email": "jane@example.com"}, + # {"type": "User", "username": "john", "email": "john@example.com"} + # ], + # "accounts": [ + # {"account": {"type": "Account", "paid": true, "account_id": "1234"}}, + # {"account": {"type": "Account", "paid": false, "account_id": "1235"}} + # ], + # "admins": {"type": "Admin", "password": "0wn3d"} + # } + # EOF + # # Deserializer method. + # def deserialize_obj(obj, safe_types = %w(User Account Admin)) + # type = obj.is_a?(Hash) && obj["type"] + # safe_types.include?(type) ? Object.const_get(type).new(obj) : obj + # end + # # Call to JSON.load + # ruby = JSON.load(json, proc {|obj| + # case obj + # when Hash + # obj.each {|k, v| obj[k] = deserialize_obj v } + # when Array + # obj.map! {|v| deserialize_obj v } + # end + # }) + # pp ruby + # Output: + # {"users"=> + # [#"User", "username"=>"jane", "email"=>"jane@example.com"}>, + # #"User", "username"=>"john", "email"=>"john@example.com"}>], + # "accounts"=> + # [{"account"=> + # #"Account", "paid"=>true, "account_id"=>"1234"}>}, + # {"account"=> + # #"Account", "paid"=>false, "account_id"=>"1235"}>}], + # "admins"=> + # #"Admin", "password"=>"0wn3d"}>} + # + def load(source, proc = nil, options = {}) + opts = load_default_options.merge options + if source.respond_to? :to_str + source = source.to_str + elsif source.respond_to? :to_io + source = source.to_io.read + elsif source.respond_to?(:read) + source = source.read + end + if opts[:allow_blank] && (source.nil? || source.empty?) + source = 'null' + end + result = parse(source, opts) + recurse_proc(result, &proc) if proc + result + end + + # Recursively calls passed _Proc_ if the parsed data structure is an _Array_ or _Hash_ + def recurse_proc(result, &proc) # :nodoc: + case result + when Array + result.each { |x| recurse_proc x, &proc } + proc.call result + when Hash + result.each { |x, y| recurse_proc x, &proc; recurse_proc y, &proc } + proc.call result + else + proc.call result + end + end + + alias restore load + module_function :restore + + class << self + # Sets or returns the default options for the JSON.dump method. + # Initially: + # opts = JSON.dump_default_options + # opts # => {:max_nesting=>false, :allow_nan=>true, :escape_slash=>false} + attr_accessor :dump_default_options + end + self.dump_default_options = { + :max_nesting => false, + :allow_nan => true, + :escape_slash => false, + } + + # :call-seq: + # JSON.dump(obj, io = nil, limit = nil) + # + # Dumps +obj+ as a \JSON string, i.e. calls generate on the object and returns the result. + # + # The default options can be changed via method JSON.dump_default_options. + # + # - Argument +io+, if given, should respond to method +write+; + # the \JSON \String is written to +io+, and +io+ is returned. + # If +io+ is not given, the \JSON \String is returned. + # - Argument +limit+, if given, is passed to JSON.generate as option +max_nesting+. + # + # --- + # + # When argument +io+ is not given, returns the \JSON \String generated from +obj+: + # obj = {foo: [0, 1], bar: {baz: 2, bat: 3}, bam: :bad} + # json = JSON.dump(obj) + # json # => "{\"foo\":[0,1],\"bar\":{\"baz\":2,\"bat\":3},\"bam\":\"bad\"}" + # + # When argument +io+ is given, writes the \JSON \String to +io+ and returns +io+: + # path = 't.json' + # File.open(path, 'w') do |file| + # JSON.dump(obj, file) + # end # => # + # puts File.read(path) + # Output: + # {"foo":[0,1],"bar":{"baz":2,"bat":3},"bam":"bad"} + def dump(obj, anIO = nil, limit = nil) + if anIO and limit.nil? + anIO = anIO.to_io if anIO.respond_to?(:to_io) + unless anIO.respond_to?(:write) + limit = anIO + anIO = nil + end + end + opts = JSON.dump_default_options + opts = opts.merge(:max_nesting => limit) if limit + result = generate(obj, opts) + if anIO + anIO.write result + anIO + else + result + end + rescue JSON::NestingError + raise ArgumentError, "exceed depth limit" + end + + # Encodes string using String.encode. + def self.iconv(to, from, string) + string.encode(to, from) + end +end + +module ::Kernel + private + + # Outputs _objs_ to STDOUT as JSON strings in the shortest form, that is in + # one line. + def j(*objs) + objs.each do |obj| + puts JSON::generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # Outputs _objs_ to STDOUT as JSON strings in a pretty format, with + # indentation and over many lines. + def jj(*objs) + objs.each do |obj| + puts JSON::pretty_generate(obj, :allow_nan => true, :max_nesting => false) + end + nil + end + + # If _object_ is string-like, parse the string and return the parsed result as + # a Ruby data structure. Otherwise, generate a JSON text from the Ruby data + # structure object and return it. + # + # The _opts_ argument is passed through to generate/parse respectively. See + # generate and parse for their documentation. + def JSON(object, *args) + if object.respond_to? :to_str + JSON.parse(object.to_str, args.first) + else + JSON.generate(object, args.first) + end + end +end + +# Extends any Class to include _json_creatable?_ method. +class ::Class + # Returns true if this class can be used to create an instance + # from a serialised JSON string. The class has to implement a class + # method _json_create_ that expects a hash as first parameter. The hash + # should include the required data. + def json_creatable? + respond_to?(:json_create) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext.rb new file mode 100644 index 0000000..7264a85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality as C extensions. + module Ext + require 'json/ext/parser' + require 'json/ext/generator' + $DEBUG and warn "Using Ext extension for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/generator.bundle b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/generator.bundle new file mode 100755 index 0000000..0a5c4b7 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/generator.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/parser.bundle b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/parser.bundle new file mode 100755 index 0000000..1137b12 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/ext/parser.bundle differ diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/generic_object.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/generic_object.rb new file mode 100644 index 0000000..108309d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/generic_object.rb @@ -0,0 +1,71 @@ +#frozen_string_literal: false +require 'ostruct' + +module JSON + class GenericObject < OpenStruct + class << self + alias [] new + + def json_creatable? + @json_creatable + end + + attr_writer :json_creatable + + def json_create(data) + data = data.dup + data.delete JSON.create_id + self[data] + end + + def from_hash(object) + case + when object.respond_to?(:to_hash) + result = new + object.to_hash.each do |key, value| + result[key] = from_hash(value) + end + result + when object.respond_to?(:to_ary) + object.to_ary.map { |a| from_hash(a) } + else + object + end + end + + def load(source, proc = nil, opts = {}) + result = ::JSON.load(source, proc, opts.merge(:object_class => self)) + result.nil? ? new : result + end + + def dump(obj, *args) + ::JSON.dump(obj, *args) + end + end + self.json_creatable = false + + def to_hash + table + end + + def [](name) + __send__(name) + end unless method_defined?(:[]) + + def []=(name, value) + __send__("#{name}=", value) + end unless method_defined?(:[]=) + + def |(other) + self.class[other.to_hash.merge(to_hash)] + end + + def as_json(*) + { JSON.create_id => self.class.name }.merge to_hash + end + + def to_json(*a) + as_json.to_json(*a) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure.rb new file mode 100644 index 0000000..53178b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure.rb @@ -0,0 +1,15 @@ +require 'json/common' + +module JSON + # This module holds all the modules/classes that implement JSON's + # functionality in pure ruby. + module Pure + require 'json/pure/parser' + require 'json/pure/generator' + $DEBUG and warn "Using Pure library for JSON." + JSON.parser = Parser + JSON.generator = Generator + end + + JSON_LOADED = true unless defined?(::JSON::JSON_LOADED) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/generator.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/generator.rb new file mode 100644 index 0000000..2257ee3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/generator.rb @@ -0,0 +1,479 @@ +#frozen_string_literal: false +module JSON + MAP = { + "\x0" => '\u0000', + "\x1" => '\u0001', + "\x2" => '\u0002', + "\x3" => '\u0003', + "\x4" => '\u0004', + "\x5" => '\u0005', + "\x6" => '\u0006', + "\x7" => '\u0007', + "\b" => '\b', + "\t" => '\t', + "\n" => '\n', + "\xb" => '\u000b', + "\f" => '\f', + "\r" => '\r', + "\xe" => '\u000e', + "\xf" => '\u000f', + "\x10" => '\u0010', + "\x11" => '\u0011', + "\x12" => '\u0012', + "\x13" => '\u0013', + "\x14" => '\u0014', + "\x15" => '\u0015', + "\x16" => '\u0016', + "\x17" => '\u0017', + "\x18" => '\u0018', + "\x19" => '\u0019', + "\x1a" => '\u001a', + "\x1b" => '\u001b', + "\x1c" => '\u001c', + "\x1d" => '\u001d', + "\x1e" => '\u001e', + "\x1f" => '\u001f', + '"' => '\"', + '\\' => '\\\\', + } # :nodoc: + + ESCAPE_SLASH_MAP = MAP.merge( + '/' => '\\/', + ) + + # Convert a UTF8 encoded Ruby string _string_ to a JSON string, encoded with + # UTF16 big endian characters as \u????, and return it. + def utf8_to_json(string, escape_slash = false) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + map = escape_slash ? ESCAPE_SLASH_MAP : MAP + string.gsub!(/[\/"\\\x0-\x1f]/) { map[$&] || $& } + string.force_encoding(::Encoding::UTF_8) + string + end + + def utf8_to_json_ascii(string, escape_slash = false) # :nodoc: + string = string.dup + string.force_encoding(::Encoding::ASCII_8BIT) + map = escape_slash ? ESCAPE_SLASH_MAP : MAP + string.gsub!(/[\/"\\\x0-\x1f]/n) { map[$&] || $& } + string.gsub!(/( + (?: + [\xc2-\xdf][\x80-\xbf] | + [\xe0-\xef][\x80-\xbf]{2} | + [\xf0-\xf4][\x80-\xbf]{3} + )+ | + [\x80-\xc1\xf5-\xff] # invalid + )/nx) { |c| + c.size == 1 and raise GeneratorError, "invalid utf8 byte: '#{c}'" + s = JSON.iconv('utf-16be', 'utf-8', c).unpack('H*')[0] + s.force_encoding(::Encoding::ASCII_8BIT) + s.gsub!(/.{4}/n, '\\\\u\&') + s.force_encoding(::Encoding::UTF_8) + } + string.force_encoding(::Encoding::UTF_8) + string + rescue => e + raise GeneratorError.wrap(e) + end + + def valid_utf8?(string) + encoding = string.encoding + (encoding == Encoding::UTF_8 || encoding == Encoding::ASCII) && + string.valid_encoding? + end + module_function :utf8_to_json, :utf8_to_json_ascii, :valid_utf8? + + module Pure + module Generator + # This class is used to create State instances, that are use to hold data + # while generating a JSON text from a Ruby data structure. + class State + # Creates a State object from _opts_, which ought to be Hash to create + # a new State instance configured by _opts_, something else to create + # an unconfigured instance. If _opts_ is a State object, it is just + # returned. + def self.from_state(opts) + case + when self === opts + opts + when opts.respond_to?(:to_hash) + new(opts.to_hash) + when opts.respond_to?(:to_h) + new(opts.to_h) + else + SAFE_STATE_PROTOTYPE.dup + end + end + + # Instantiates a new State object, configured by _opts_. + # + # _opts_ can have the following keys: + # + # * *indent*: a string used to indent levels (default: ''), + # * *space*: a string that is put after, a : or , delimiter (default: ''), + # * *space_before*: a string that is put before a : pair delimiter (default: ''), + # * *object_nl*: a string that is put at the end of a JSON object (default: ''), + # * *array_nl*: a string that is put at the end of a JSON array (default: ''), + # * *escape_slash*: true if forward slash (/) should be escaped (default: false) + # * *check_circular*: is deprecated now, use the :max_nesting option instead, + # * *max_nesting*: sets the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum should be checked. + # * *allow_nan*: true if NaN, Infinity, and -Infinity should be + # generated, otherwise an exception is thrown, if these values are + # encountered. This options defaults to false. + def initialize(opts = {}) + @indent = '' + @space = '' + @space_before = '' + @object_nl = '' + @array_nl = '' + @allow_nan = false + @ascii_only = false + @escape_slash = false + @buffer_initial_length = 1024 + configure opts + end + + # This string is used to indent levels in the JSON text. + attr_accessor :indent + + # This string is used to insert a space between the tokens in a JSON + # string. + attr_accessor :space + + # This string is used to insert a space before the ':' in JSON objects. + attr_accessor :space_before + + # This string is put at the end of a line that holds a JSON object (or + # Hash). + attr_accessor :object_nl + + # This string is put at the end of a line that holds a JSON array. + attr_accessor :array_nl + + # This integer returns the maximum level of data structure nesting in + # the generated JSON, max_nesting = 0 if no maximum is checked. + attr_accessor :max_nesting + + # If this attribute is set to true, forward slashes will be escaped in + # all json strings. + attr_accessor :escape_slash + + # :stopdoc: + attr_reader :buffer_initial_length + + def buffer_initial_length=(length) + if length > 0 + @buffer_initial_length = length + end + end + # :startdoc: + + # This integer returns the current depth data structure nesting in the + # generated JSON. + attr_accessor :depth + + def check_max_nesting # :nodoc: + return if @max_nesting.zero? + current_nesting = depth + 1 + current_nesting > @max_nesting and + raise NestingError, "nesting of #{current_nesting} is too deep" + end + + # Returns true, if circular data structures are checked, + # otherwise returns false. + def check_circular? + !@max_nesting.zero? + end + + # Returns true if NaN, Infinity, and -Infinity should be considered as + # valid JSON and output. + def allow_nan? + @allow_nan + end + + # Returns true, if only ASCII characters should be generated. Otherwise + # returns false. + def ascii_only? + @ascii_only + end + + # Returns true, if forward slashes are escaped. Otherwise returns false. + def escape_slash? + @escape_slash + end + + # Configure this State instance with the Hash _opts_, and return + # itself. + def configure(opts) + if opts.respond_to?(:to_hash) + opts = opts.to_hash + elsif opts.respond_to?(:to_h) + opts = opts.to_h + else + raise TypeError, "can't convert #{opts.class} into Hash" + end + for key, value in opts + instance_variable_set "@#{key}", value + end + @indent = opts[:indent] if opts.key?(:indent) + @space = opts[:space] if opts.key?(:space) + @space_before = opts[:space_before] if opts.key?(:space_before) + @object_nl = opts[:object_nl] if opts.key?(:object_nl) + @array_nl = opts[:array_nl] if opts.key?(:array_nl) + @allow_nan = !!opts[:allow_nan] if opts.key?(:allow_nan) + @ascii_only = opts[:ascii_only] if opts.key?(:ascii_only) + @depth = opts[:depth] || 0 + @buffer_initial_length ||= opts[:buffer_initial_length] + @escape_slash = !!opts[:escape_slash] if opts.key?(:escape_slash) + + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + self + end + alias merge configure + + # Returns the configuration instance variables as a hash, that can be + # passed to the configure method. + def to_h + result = {} + for iv in instance_variables + iv = iv.to_s[1..-1] + result[iv.to_sym] = self[iv] + end + result + end + + alias to_hash to_h + + # Generates a valid JSON document from object +obj+ and + # returns the result. If no valid JSON document can be + # created this method raises a + # GeneratorError exception. + def generate(obj) + result = obj.to_json(self) + JSON.valid_utf8?(result) or raise GeneratorError, + "source sequence #{result.inspect} is illegal/malformed utf-8" + result + end + + # Return the value returned by method +name+. + def [](name) + if respond_to?(name) + __send__(name) + else + instance_variable_get("@#{name}") if + instance_variables.include?("@#{name}".to_sym) # avoid warning + end + end + + def []=(name, value) + if respond_to?(name_writer = "#{name}=") + __send__ name_writer, value + else + instance_variable_set "@#{name}", value + end + end + end + + module GeneratorMethods + module Object + # Converts this object to a string (calling #to_s), converts + # it to a JSON string, and returns the result. This is a fallback, if no + # special method #to_json was defined for some object. + def to_json(*) to_s.to_json end + end + + module Hash + # Returns a JSON string containing a JSON object, that is unparsed from + # this Hash instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + # _depth_ is used to find out nesting depth, to indent accordingly. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_shift(state) + state.object_nl.empty? or return '' + state.indent * state.depth + end + + def json_transform(state) + delim = ',' + delim << state.object_nl + result = '{' + result << state.object_nl + depth = state.depth += 1 + first = true + indent = !state.object_nl.empty? + each { |key,value| + result << delim unless first + result << state.indent * depth if indent + result << key.to_s.to_json(state) + result << state.space_before + result << ':' + result << state.space + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + unless first + result << state.object_nl + result << state.indent * depth if indent + end + result << '}' + result + end + end + + module Array + # Returns a JSON string containing a JSON array, that is unparsed from + # this Array instance. + # _state_ is a JSON::State object, that can also be used to configure the + # produced JSON string output further. + def to_json(state = nil, *) + state = State.from_state(state) + state.check_max_nesting + json_transform(state) + end + + private + + def json_transform(state) + delim = ',' + delim << state.array_nl + result = '[' + result << state.array_nl + depth = state.depth += 1 + first = true + indent = !state.array_nl.empty? + each { |value| + result << delim unless first + result << state.indent * depth if indent + if value.respond_to?(:to_json) + result << value.to_json(state) + else + result << %{"#{String(value)}"} + end + first = false + } + depth = state.depth -= 1 + result << state.array_nl + result << state.indent * depth if indent + result << ']' + end + end + + module Integer + # Returns a JSON string representation for this Integer number. + def to_json(*) to_s end + end + + module Float + # Returns a JSON string representation for this Float number. + def to_json(state = nil, *) + state = State.from_state(state) + case + when infinite? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + when nan? + if state.allow_nan? + to_s + else + raise GeneratorError, "#{self} not allowed in JSON" + end + else + to_s + end + end + end + + module String + # This string should be encoded with UTF-8 A call to this method + # returns a JSON string encoded with UTF16 big endian characters as + # \u????. + def to_json(state = nil, *args) + state = State.from_state(state) + if encoding == ::Encoding::UTF_8 + string = self + else + string = encode(::Encoding::UTF_8) + end + if state.ascii_only? + '"' << JSON.utf8_to_json_ascii(string, state.escape_slash) << '"' + else + '"' << JSON.utf8_to_json(string, state.escape_slash) << '"' + end + end + + # Module that holds the extending methods if, the String module is + # included. + module Extend + # Raw Strings are JSON Objects (the raw bytes are stored in an + # array for the key "raw"). The Ruby String can be created by this + # module method. + def json_create(o) + o['raw'].pack('C*') + end + end + + # Extends _modul_ with the String::Extend module. + def self.included(modul) + modul.extend Extend + end + + # This method creates a raw object hash, that can be nested into + # other data structures and will be unparsed as a raw string. This + # method should be used, if you want to convert raw strings to JSON + # instead of UTF-8 strings, e. g. binary data. + def to_json_raw_object + { + JSON.create_id => self.class.name, + 'raw' => self.unpack('C*'), + } + end + + # This method creates a JSON text from the result of + # a call to to_json_raw_object of this String. + def to_json_raw(*args) + to_json_raw_object.to_json(*args) + end + end + + module TrueClass + # Returns a JSON string for true: 'true'. + def to_json(*) 'true' end + end + + module FalseClass + # Returns a JSON string for false: 'false'. + def to_json(*) 'false' end + end + + module NilClass + # Returns a JSON string for nil: 'null'. + def to_json(*) 'null' end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/parser.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/parser.rb new file mode 100644 index 0000000..e1d701b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/pure/parser.rb @@ -0,0 +1,337 @@ +#frozen_string_literal: false +require 'strscan' + +module JSON + module Pure + # This class implements the JSON parser that is used to parse a JSON string + # into a Ruby data structure. + class Parser < StringScanner + STRING = /" ((?:[^\x0-\x1f"\\] | + # escaped special characters: + \\["\\\/bfnrt] | + \\u[0-9a-fA-F]{4} | + # match all but escaped special characters: + \\[\x20-\x21\x23-\x2e\x30-\x5b\x5d-\x61\x63-\x65\x67-\x6d\x6f-\x71\x73\x75-\xff])*) + "/nx + INTEGER = /(-?0|-?[1-9]\d*)/ + FLOAT = /(-? + (?:0|[1-9]\d*) + (?: + \.\d+(?i:e[+-]?\d+) | + \.\d+ | + (?i:e[+-]?\d+) + ) + )/x + NAN = /NaN/ + INFINITY = /Infinity/ + MINUS_INFINITY = /-Infinity/ + OBJECT_OPEN = /\{/ + OBJECT_CLOSE = /\}/ + ARRAY_OPEN = /\[/ + ARRAY_CLOSE = /\]/ + PAIR_DELIMITER = /:/ + COLLECTION_DELIMITER = /,/ + TRUE = /true/ + FALSE = /false/ + NULL = /null/ + IGNORE = %r( + (?: + //[^\n\r]*[\n\r]| # line comments + /\* # c-style comments + (?: + [^*/]| # normal chars + /[^*]| # slashes that do not start a nested comment + \*[^/]| # asterisks that do not end this comment + /(?=\*/) # single slash before this comment's end + )* + \*/ # the End of this comment + |[ \t\r\n]+ # whitespaces: space, horizontal tab, lf, cr + )+ + )mx + + UNPARSED = Object.new.freeze + + # Creates a new JSON::Pure::Parser instance for the string _source_. + # + # It will be configured by the _opts_ hash. _opts_ can have the following + # keys: + # * *max_nesting*: The maximum depth of nesting allowed in the parsed data + # structures. Disable depth checking with :max_nesting => false|nil|0, + # it defaults to 100. + # * *allow_nan*: If set to true, allow NaN, Infinity and -Infinity in + # defiance of RFC 7159 to be parsed by the Parser. This option defaults + # to false. + # * *freeze*: If set to true, all parsed objects will be frozen. Parsed + # string will be deduplicated if possible. + # * *symbolize_names*: If set to true, returns symbols for the names + # (keys) in a JSON object. Otherwise strings are returned, which is + # also the default. It's not possible to use this option in + # conjunction with the *create_additions* option. + # * *create_additions*: If set to true, the Parser creates + # additions when a matching class and create_id are found. This + # option defaults to false. + # * *object_class*: Defaults to Hash + # * *array_class*: Defaults to Array + # * *decimal_class*: Specifies which class to use instead of the default + # (Float) when parsing decimal numbers. This class must accept a single + # string argument in its constructor. + def initialize(source, opts = {}) + opts ||= {} + source = convert_encoding source + super source + if !opts.key?(:max_nesting) # defaults to 100 + @max_nesting = 100 + elsif opts[:max_nesting] + @max_nesting = opts[:max_nesting] + else + @max_nesting = 0 + end + @allow_nan = !!opts[:allow_nan] + @symbolize_names = !!opts[:symbolize_names] + @freeze = !!opts[:freeze] + if opts.key?(:create_additions) + @create_additions = !!opts[:create_additions] + else + @create_additions = false + end + @symbolize_names && @create_additions and raise ArgumentError, + 'options :symbolize_names and :create_additions cannot be used '\ + 'in conjunction' + @create_id = @create_additions ? JSON.create_id : nil + @object_class = opts[:object_class] || Hash + @array_class = opts[:array_class] || Array + @decimal_class = opts[:decimal_class] + @match_string = opts[:match_string] + end + + alias source string + + def reset + super + @current_nesting = 0 + end + + # Parses the current JSON string _source_ and returns the + # complete data structure as a result. + def parse + reset + obj = nil + while !eos? && skip(IGNORE) do end + if eos? + raise ParserError, "source is not valid JSON!" + else + obj = parse_value + UNPARSED.equal?(obj) and raise ParserError, + "source is not valid JSON!" + obj.freeze if @freeze + end + while !eos? && skip(IGNORE) do end + eos? or raise ParserError, "source is not valid JSON!" + obj + end + + private + + def convert_encoding(source) + if source.respond_to?(:to_str) + source = source.to_str + else + raise TypeError, + "#{source.inspect} is not like a string" + end + if source.encoding != ::Encoding::ASCII_8BIT + source = source.encode(::Encoding::UTF_8) + source.force_encoding(::Encoding::ASCII_8BIT) + end + source + end + + # Unescape characters in strings. + UNESCAPE_MAP = Hash.new { |h, k| h[k] = k.chr } + UNESCAPE_MAP.update({ + ?" => '"', + ?\\ => '\\', + ?/ => '/', + ?b => "\b", + ?f => "\f", + ?n => "\n", + ?r => "\r", + ?t => "\t", + ?u => nil, + }) + + EMPTY_8BIT_STRING = '' + if ::String.method_defined?(:encode) + EMPTY_8BIT_STRING.force_encoding Encoding::ASCII_8BIT + end + + STR_UMINUS = ''.respond_to?(:-@) + def parse_string + if scan(STRING) + return '' if self[1].empty? + string = self[1].gsub(%r((?:\\[\\bfnrt"/]|(?:\\u(?:[A-Fa-f\d]{4}))+|\\[\x20-\xff]))n) do |c| + if u = UNESCAPE_MAP[$&[1]] + u + else # \uXXXX + bytes = EMPTY_8BIT_STRING.dup + i = 0 + while c[6 * i] == ?\\ && c[6 * i + 1] == ?u + bytes << c[6 * i + 2, 2].to_i(16) << c[6 * i + 4, 2].to_i(16) + i += 1 + end + JSON.iconv('utf-8', 'utf-16be', bytes) + end + end + if string.respond_to?(:force_encoding) + string.force_encoding(::Encoding::UTF_8) + end + + if @freeze + if STR_UMINUS + string = -string + else + string.freeze + end + end + + if @create_additions and @match_string + for (regexp, klass) in @match_string + klass.json_creatable? or next + string =~ regexp and return klass.json_create(string) + end + end + string + else + UNPARSED + end + rescue => e + raise ParserError, "Caught #{e.class} at '#{peek(20)}': #{e}" + end + + def parse_value + case + when scan(FLOAT) + if @decimal_class then + if @decimal_class == BigDecimal then + BigDecimal(self[1]) + else + @decimal_class.new(self[1]) || Float(self[1]) + end + else + Float(self[1]) + end + when scan(INTEGER) + Integer(self[1]) + when scan(TRUE) + true + when scan(FALSE) + false + when scan(NULL) + nil + when !UNPARSED.equal?(string = parse_string) + string + when scan(ARRAY_OPEN) + @current_nesting += 1 + ary = parse_array + @current_nesting -= 1 + ary + when scan(OBJECT_OPEN) + @current_nesting += 1 + obj = parse_object + @current_nesting -= 1 + obj + when @allow_nan && scan(NAN) + NaN + when @allow_nan && scan(INFINITY) + Infinity + when @allow_nan && scan(MINUS_INFINITY) + MinusInfinity + else + UNPARSED + end + end + + def parse_array + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @array_class.new + delim = false + loop do + case + when eos? + raise ParserError, "unexpected end of string while parsing array" + when !UNPARSED.equal?(value = parse_value) + delim = false + result << value + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(ARRAY_CLOSE) + ; + else + raise ParserError, "expected ',' or ']' in array at '#{peek(20)}'!" + end + when scan(ARRAY_CLOSE) + if delim + raise ParserError, "expected next element in array at '#{peek(20)}'!" + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in array at '#{peek(20)}'!" + end + end + result + end + + def parse_object + raise NestingError, "nesting of #@current_nesting is too deep" if + @max_nesting.nonzero? && @current_nesting > @max_nesting + result = @object_class.new + delim = false + loop do + case + when eos? + raise ParserError, "unexpected end of string while parsing object" + when !UNPARSED.equal?(string = parse_string) + skip(IGNORE) + unless scan(PAIR_DELIMITER) + raise ParserError, "expected ':' in object at '#{peek(20)}'!" + end + skip(IGNORE) + unless UNPARSED.equal?(value = parse_value) + result[@symbolize_names ? string.to_sym : string] = value + delim = false + skip(IGNORE) + if scan(COLLECTION_DELIMITER) + delim = true + elsif match?(OBJECT_CLOSE) + ; + else + raise ParserError, "expected ',' or '}' in object at '#{peek(20)}'!" + end + else + raise ParserError, "expected value in object at '#{peek(20)}'!" + end + when scan(OBJECT_CLOSE) + if delim + raise ParserError, "expected next name, value pair in object at '#{peek(20)}'!" + end + if @create_additions and klassname = result[@create_id] + klass = JSON.deep_const_get klassname + break unless klass and klass.json_creatable? + result = klass.json_create(result) + end + break + when skip(IGNORE) + ; + else + raise ParserError, "unexpected token in object at '#{peek(20)}'!" + end + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/version.rb b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/version.rb new file mode 100644 index 0000000..35e8dd3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/json-2.6.1/lib/json/version.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +module JSON + # JSON version + VERSION = '2.6.1' + VERSION_ARRAY = VERSION.split(/\./).map { |x| x.to_i } # :nodoc: + VERSION_MAJOR = VERSION_ARRAY[0] # :nodoc: + VERSION_MINOR = VERSION_ARRAY[1] # :nodoc: + VERSION_BUILD = VERSION_ARRAY[2] # :nodoc: +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/.autotest b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/.autotest new file mode 100644 index 0000000..b6fbce5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/.autotest @@ -0,0 +1,34 @@ +# -*- ruby -*- + +require 'autotest/restart' +require 'autotest/rcov' if ENV['RCOV'] + +Autotest.add_hook :initialize do |at| + at.testlib = 'minitest/autorun' + + bench_tests = %w(TestMinitestBenchmark) + mock_tests = %w(TestMinitestMock TestMinitestStub) + spec_tests = %w(TestMinitestReporter TestMetaStatic TestMeta + TestSpecInTestCase) + unit_tests = %w(TestMinitestGuard TestMinitestRunnable + TestMinitestRunner TestMinitestTest TestMinitestUnit + TestMinitestUnitInherited TestMinitestUnitOrder + TestMinitestUnitRecording TestMinitestUnitTestCase) + + { + bench_tests => "test/minitest/test_minitest_benchmark.rb", + mock_tests => "test/minitest/test_minitest_mock.rb", + spec_tests => "test/minitest/test_minitest_reporter.rb", + unit_tests => "test/minitest/test_minitest_unit.rb", + }.each do |klasses, file| + klasses.each do |klass| + at.extra_class_map[klass] = file + end + end + + at.add_exception 'coverage.info' + at.add_exception 'coverage' +end + +# require 'autotest/rcov' +# Autotest::RCov.command = 'rcov_info' diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/History.rdoc b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/History.rdoc new file mode 100644 index 0000000..0b00862 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/History.rdoc @@ -0,0 +1,1397 @@ +=== 5.14.2 / 2020-08-31 + +* 1 bug fix: + + * Bumped ruby version to include 3.0 (trunk). + +=== 5.14.1 / 2020-05-15 + +* 3 minor enhancements: + + * Minitest.filter_backtrace returns original backtrace if filter comes back empty. + * Minitest::BacktraceFilter now returns entire backtrace if $MT_DEBUG set in env. + * Return true on a successful refute. (jusleg) + +* 1 bug fix: + + * Fixed expectation doco to not use global expectations. + +=== 5.14.0 / 2020-01-11 + +* 2 minor enhancements: + + * Block-assertions (eg assert_output) now error if raised inside the block. (casperisfine) + * Changed assert_raises to only catch Assertion since that covers Skip and friends. + +* 3 bug fixes: + + * Added example for value wrapper with block to Expectations module. (stomar) + * Fixed use of must/wont_be_within_delta on Expectation instance. (stomar) + * Renamed UnexpectedError#exception to #error to avoid problems with reraising. (casperisfine) + +=== 5.13.0 / 2019-10-29 + +* 9 minor enhancements: + + * Added Minitest::Guard#osx? + * Added examples to documentation for assert_raises. (lxxxvi) + * Added expectations #path_must_exist and #path_wont_exist. Not thrilled with the names. + * Added fail_after(year, month, day, msg) to allow time-bombing after a deadline. + * Added skip_until(year, month, day, msg) to allow deferring until a deadline. + * Deprecated Minitest::Guard#maglev? + * Deprecated Minitest::Guard#rubinius? + * Finally added assert_path_exists and refute_path_exists. (deivid-rodriguez) + * Refactored and pulled Assertions#things_to_diff out of #diff. (BurdetteLamar) + +* 3 bug fixes: + + * Fix autorun bug that affects fork exit status in tests. (dylanahsmith/jhawthorn) + * Improved documentation for _/value/expect, especially for blocks. (svoop) + * Support new Proc#to_s format. (ko1) + +=== 5.12.2 / 2019-09-28 + +* 1 bug fix: + + * After chatting w/ @y-yagi and others, decided to lower support to include ruby 2.2. + +=== 5.12.1 / 2019-09-28 + +* 1 minor enhancement: + + * Added documentation for Reporter classes. (sshaw) + +* 3 bug fixes: + + * Avoid using 'match?' to support older ruby versions. (y-yagi) + * Fixed broken link to reference on goodness-of-fit testing. (havenwood) + * Update requirements in readme and Rakefile/hoe spec. + +=== 5.12.0 / 2019-09-22 + +* 8 minor enhancements: + + * Added a descriptive error if assert_output or assert_raises called without a block. (okuramasafumi) + * Changed mu_pp_for_diff to make having both \n and \\n easier to debug. + * Deprecated $N for specifying number of parallel test runners. Use MT_CPU. + * Deprecated use of global expectations. To be removed from MT6. + * Extended Assertions#mu_pp to encoding validity output for strings to improve diffs. + * Extended Assertions#mu_pp to output encoding and validity if invalid to improve diffs. + * Extended Assertions#mu_pp_for_diff to make escaped newlines more obvious in diffs. + * Fail gracefully when expectation used outside of `it`. + +* 3 bug fixes: + + * Check `option[:filter]` klass before match. Fixes 2.6 warning. (y-yagi) + * Fixed Assertions#diff from recalculating if set to nil + * Fixed spec section of readme to not use deprecated global expectations. (CheezItMan) + +=== 5.11.3 / 2018-01-26 + +* 1 bug fix: + + * Pushed #error? up to Reportable module. (composerinteralia) + +=== 5.11.2 / 2018-01-25 + +* 1 minor enhancement: + + * Reversed Test < Result. Back to < Runnable and using Reportable for shared code. + +* 2 bug fixes: + + * Fixed Result#location for instances of Test. (alexisbernard) + * Fixed deprecation message for Runnable#marshal_dump. (y-yagi) + +=== 5.11.1 / 2018-01-02 + +* 1 bug fix: + + * Fixed Result (a superclass of Test) overriding Runnable's name accessors. (y-yagi, MSP-Greg) + +=== 5.11.0 / 2018-01-01 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 7 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Added deprecation warning for Runnable#marshal_dump. + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.11.0b1 / 2017-12-20 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 6 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + * Removed Runnable.marshal_dump/load. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.10.3 / 2017-07-21 + +* 1 minor enhancement: + + * Extended documentation for Mock#expect for multiple calls to mock object. (insti) + +* 2 bug fixes: + + * Finished off missing doco. + * Fixed verbose output on parallelize_me! classes. (chanks) + +=== 5.10.2 / 2017-05-09 + +* 1 minor enhancement: + + * Added suggestion in minitest/hell to install minitest/proveit. + +* 7 bug fixes: + + * Expand MT6 to Minitest 6. (xaviershay) + * Fixed location of assert_send deprecation. (rab) + * Fixed location of nil assert_equal deprecation to work with expectations. (jeremyevans) + * Fixed minitest/hell to use parallelize_me! (azul) + * Made deprecation use warn so -W0 will silence it. + * Workaround for rdoc nodoc generation bug that totally f'd up minitest doco. (Paxa) + * Write aggregated_results directly to the IO object to avoid mixed encoding errors. (tenderlove) + +=== 5.10.1 / 2016-12-01 + +* 1 bug fix: + + * Added a hack/kludge to deal with missing #prerecord on reporters that aren't properly subclassing AbstractReporter (I'm looking at you minitest-reporters) + +=== 5.10.0 / 2016-11-30 + +* 1 major enhancement: + + * Deprecated ruby 1.8, 1.9, possibly 2.0, assert_send, & old MiniTest namespace. + +* 3 minor enhancements: + + * Warn if assert_equal expects a nil. This will fail in minitest 6+. (tenderlove) + * Added AbstractReporter#prerecord and extended ProgressReporter and CompositeReporter to use it. + * Minor optimization: remove runnables with no runnable methods before run. + +* 3 bug fixes: + + * Fix assert_throw rescuing any NameError and ArgumentError. (waldyr) + * Clean up (most of the) last remaining vestiges of minitest/unit. + * 2.4: removed deprecation warnings when referring to Fixnum. + +=== 5.9.1 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.9.0 / 2016-05-16 + +* 8 minor enhancements: + + * Added Minitest.info_signal accessors to customize signal for test run info. (nate) + * Added assert_mock to make it more clear that you're testing w/ them. + * Added negative filter by test name. (utilum) + * Added warning to README that 1.8 and 1.9 support will be dropped in minitest 6. + * Automatically activate minitest/hell if $MT_HELL is defined. + * Improved default error messages for assert and refute. (bhenderson) + * minitest/hell now tries to require minitest/proveit + * mu_pp for strings prints out non-standard encodings to improve assert_equal diffs. + +* 1 bug fix: + + * Removed Interrupt from PASSTHROUGH_EXCEPTIONS (already handled). (waldyr) + +=== 5.8.5 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.8.4 / 2016-01-21 + +* 1 bug fix: + + * Allow Minitest::Assertion to pass through assert_raises so inner failures are dealt with first. + +=== 5.8.3 / 2015-11-17 + +* 1 minor enhancement: + + * Added extra note about mocks and threads to readme. (zamith) + +* 1 bug fix: + + * Fixed bug in Mock#verify. (pithub/zamith) + +=== 5.8.2 / 2015-10-26 + +* 1 bug fix: + + * Fixed using parallelize_me! and capture_io (or any locking io). (arlt/tenderlove) + +=== 5.8.1 / 2015-09-23 + +* 1 minor enhancement: + + * Refactor assert_raises to be cleaner and to pass SystemExit and SignalException. (bhenderson) + +=== 5.8.0 / 2015-08-06 + +* 2 minor enhancements: + + * Add optional delegation mechanism to extend object with a mock. (zamith) + * Return early if there are no filtered methods. (jeremyevans) + +* 1 bug fix: + + * Don't extend io with pride if io is not a tty. (toy) + +=== 5.7.0 / 2015-05-27 + +* 1 major enhancement: + + * assert_raises now matches subclasses of the expected exception types. (jeremyevans) + +* 3 minor enhancements: + + * Added :block type for minitest/spec's #infect_an_assertion. (jeremyevans) + * Inline verification error messages in minitest/mock for GC performance. (zamith) + * assert_raises defaults to RuntimeError if not specified. (jeremyevans) + +* 4 bug fixes: + + * Added 'class' to minitest/mock's overridden_methods list. (zamith) + * Added file/line to infect_an_assertion's class_eval call. (jeremyevans) + * Cleared UnexpectedError's mesg w/ generic string. + * Fixed non-proc-oriented expectations when used on proc target. (jeremyevans) + +=== 5.6.1 / 2015-04-27 + +* 2 bug fixes: + + * Added Minitest.clock_time and switched all Time.now to it. (tenderlove) + * Moved Minitest::Expectations#_ into Minitest::Spec::DSL. + +=== 5.6.0 / 2015-04-13 + +* 4 major enhancements: + + * Added Minitest::Expectation value monad. + * Added Minitest::Expectations#_ that returns an Expectation. Aliased to value. + * All expectations are added to Minitest::Expectation. + * At some point, the methods on Object will be deprecated and then removed. + +* 4 minor enhancements: + + * Added a note about bundle exec pitfall in ruby 2.2+. (searls) + * Lazily start the parallel executor. (tenderlove) + * Make mocks more debugger-friendly (edward) + * Print out the current test run on interrupt. (riffraff) + +* 3 bug fixes: + + * Fix failing test under Windows. (kimhmadsen) + * Record mocked calls before they happen so mocks can raise exceptions easier (tho I'm not a fan). (corecode) + * Tried to clarify mocks vs stubs terminology better. (kkirsche) + +=== 5.5.1 / 2015-01-09 + +* 1 bug fix: + + * Fixed doco problems. (zzak) + +=== 5.5.0 / 2014-12-12 // mri 2.2.0 (as a real gem) + +* 1 minor enhancement: + + * Allow seed to be given via ENV for rake test loader sadness: eg rake SEED=42. + +=== 5.4.3 / 2014-11-11 + +* 2 bug fixes: + + * Clarified requirements for ruby are now 1.8.7 or better. + * Force encode error output in case mal-encoded exception is raised. (jasonrclark) + +=== 5.4.2 / 2014-09-26 + +* 2 minor enhancements: + + * Extract teardown method list. + * Thanks to minitest-gcstats got a 5-10% speedup via reduced GC! + +=== 5.4.1 / 2014-08-28 + +* 1 bug fix: + + * Fixed specs hidden by nesting/ordering bug (blowmage/apotonick) + +=== 5.4.0 / 2014-07-07 + +* 2 minor enhancements: + + * Kernel#describe extended to splat additional_desc. + * Spec#spec_type extended to take a splat of additional items, passed to matcher procs. + +* 1 bug fix: + + * minitest/spec should require minitest/test, not minitest/unit. (doudou) + +=== 5.3.5 / 2014-06-17 + +* 1 minor enhancement: + + * Spit and polish (mostly spit). + +=== 5.3.4 / 2014-05-15 + +* 1 minor enhancement: + + * Test classes are randomized before running. (judofyr) + +=== 5.3.3 / 2014-04-14 + +* 1 bug fix: + + * Fixed using expectations w/ DSL in Test class w/o describe. (blowmage+others) + +=== 5.3.2 / 2014-04-02 + +* 1 bug fix: + + * Fixed doco on Assertions.assertions. (xaviershay) + +=== 5.3.1 / 2014-03-14 + +* 1 minor enhancement: + + * Modified verbage on bad 'let' names to be more helpful. (Archytaus) + +* 1 bug fix: + + * Fixed 2 cases still using MiniTest. (mikesea) + +=== 5.3.0 / 2014-02-25 + +* 1 minor enhancement: + + * Mocked methods can take a block to verify state. Seattle.rb 12 bday present from ernie! Thanks!! + +=== 5.2.3 / 2014-02-10 + +* 1 bug fix: + + * Fixed Spec#let check to allow overriding of other lets. (mvz) + +=== 5.2.2 / 2014-01-22 + +* 1 minor enhancement: + + * Spec#let raises ArgumentError if you override _any_ instance method (except subject). (rynr) + +* 1 bug fix: + + * Fixed up benchmark spec doco and added a test to demonstrate. (bhenderson) + +=== 5.2.1 / 2014-01-07 + +* 1 bug fix: + + * Properly deal with horrible mix of runtime load errors + other at_exit handlers. (dougo/chqr) + +=== 5.2.0 / 2013-12-13 + +* 1 minor enhancement: + + * Change expectations to allow calling most on procs (but not calling the proc). (bhenderson+others) + +=== 5.1.0 / 2013-12-05 + +* 1 minor enhancement: + + * Use a Queue for scheduling parallel tests. (tenderlove) + +* 1 bug fix: + + * Fixed misspelling in doco. (amatsuda) + +=== 5.0.8 / 2013-09-20 + +* 1 bug fix: + + * Fixed siginfo handler by rearranging reporters and fixing to_s. (tenderlove) + +=== 5.0.7 / 2013-09-05 + +* 2 minor enhancements: + + * Added clarification about the use of thread local variables in expectations. (jemc) + * Added extra message about skipped tests, if any. Disable globally with $MT_NO_SKIP_MSG. + +* 2 bug fixes: + + * Only require minitest, not minitest/autorun in pride_plugin. (judofyr) + * Require rubygems in load_plugins in case you're not using minitest/autorun. + +=== 5.0.6 / 2013-06-28 + +* 3 minor enhancements: + + * Allow stub to pass args to blocks. (swindsor) + * Improved warning message about minitest/autorun to address 1.9's minitest/autorun. + * Made minitest/test require minitest as needed. For lib writers. (erikh) + +* 1 bug fix: + + * Fixed missing require in minitest/test. (erikh) + +=== 4.7.5 / 2013-06-21 // mri 2.1.1 + +* 2 bug fixes: + + * Fix Spec#describe_stack to be thread local. + * Fix multithreaded test failures by defining Time local to mock test namespace + +=== 5.0.5 / 2013-06-20 + +* 6 bug fixes: + + * DOH! Fixed the rest of the new casing on Minitest. (splattael) + * Fixed typo on minitest/mock rdoc. (mrgilman/guiceolin) + * Make Spec::DSL.describe_stack thread local to avoid failing on my own tests. + * Make a fake Time.now local to the tests so they won't interfere with real reporter timings. + * Make everything mockable by wrapping all 'special' methods in a smarter wrapper. (bestie) + * Raise ArgumentError if let name starts with 'test'. (johnmaxwell) + +=== 5.0.4 / 2013-06-07 + +* 5 minor enhancements: + + * Added AbstractReporter, defining required Reporter API to quack properly. + * Added doco for writing reporters. + * Refactored Reporter into ProgressReporter and SummaryReporter. (idea: phiggins, code:me+scotch) + * Refactored SummaryReporter pushing up to StatisticsReporter. (phiggins) + * Removed Reporter#run_and_report... cleaner, but doesn't "fit" in the API. + +=== 5.0.3 / 2013-05-29 + +* 4 minor enhancements: + + * Added Runnable.with_info_handler and Runnable.on_signal. + * Moved io.sync restore to Reporter#run_and_report. + * Refactored inner loop of Reporter#report to #to_s. Callable for status updates. + * Restored MT4's mid-run report (^t). (tenderlove). + +=== 5.0.2 / 2013-05-20 + +* 3 bug fixes: + + * Gem.find_files is smarter than I remember... cause I wrote it that way. *sigh* I'm getting old. + * Pride wasn't doing puts through its #io. (tmiller/tenderlove) + * Replaced Runnable#dup and Test#dup with marshal_dump/load. Too many problems cropping up on untested rails code. (tenderlove/rubys) + +=== 5.0.1 / 2013-05-14 + +* 2 bug fixes: + + * Documented Assertions' need for @assertions to be defined by the includer. + * Only load one plugin version per name. Tries for latest. + +=== 5.0.0 / 2013-05-10 + +Oh god... here we go... + +Minitest 5: + +* 4 deaths in the family: + + * MiniTest.runner is dead. No more manager objects. + * MiniTest::Unit#record is dead. Use a Reporter instance instead. + * MiniTest::Unit._run_* is dead. Runnable things are responsible for their own runs. + * MiniTest::Unit.output is dead. No more centralized IO. + +* 12 major (oft incompatible) changes: + + * Renamed MiniTest to Minitest. Your pinkies will thank me. (aliased to MiniTest) + * Removed MiniTest::Unit entirely. No more manager objects. + * Added Minitest::Runnable. Everything minitest can run subclasses this. + * Renamed MiniTest::Unit::TestCase to Minitest::Test (subclassing Runnable). + * Added Minitest::Benchmark. + * Your benchmarks need to move to their own subclass. + * Benchmarks using the spec DSL have to have "Bench" somewhere in their describe. + * MiniTest::Unit.after_tests moved to Minitest.after_tests + * MiniTest::Unit.autorun is now Minitest.autorun. Just require minitest/autorun pls. + * Removed ParallelEach#grep since it isn't used anywhere. + * Renamed Runnable#__name__ to Runnable#name (but uses @NAME internally). + * Runnable#run needs to return self. Allows for swapping of results as needed. + +* 8 minor moves: + + * Moved Assertions module to minitest/assertions.rb + * Moved Expectations module to minitest/expectations.rb + * Moved Test to minitest/test.rb + * Moved everything else in minitest/unit.rb to minitest.rb + * minitest/unit.rb is now just a small (user-test only) compatibility layer. + * Moved most of minitest/pride into minitest/pride_plugin. + * minitest/pride now just activates pride. + * Moved ParallelEach under Minitest. + +* 9 additions: + + * Added a plugin system that can extend command-line options. + * Added Minitest.extensions. + * Added Minitest.reporter (only available during startup). + * Added Minitest.run(args). This is the very top of any Minitest run. + * Added Minitest::Reporter. Everything minitest can report goes through here. + * Minitest.reporter is a composite so you can add your own. + * Added Minitest::CompositeReporter. Much easier to extend with your own reporters. + * Added UnexpectedError, an Assertion subclass, to wrap up errors. + * Minitest::Test#run is now freakin' beautiful. 47 -> 17 loc + +* 11 other: + + * Removed Object.infect_with_assertions (it was already dead code). + * Runnables are responsible for knowing their result_code (eg "." or "F"). + * Minitest.autorun now returns boolean, not exit code. + * Added FAQ entry for extending via modules. (phiggins) + * Implement Runnable#dup to cleanse state back to test results. Helps with serialization. pair:tenderlove + * Moved ParallelEach under Minitest. + * Runnable#run needs to return self. Allows for swapping of results as needed. + * Minitest.init_plugins passes down options. + * Minitest.load_plugins only loads once. + * Fixed minitest/pride to work with rake test loader again. (tmiller) + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + +* 5 voodoo: + + * Removed mutex from minitest.rb (phiggins) + * Removed mutex from test.rb (phiggins) + * Removed Minitest::Reporter.synchronize (phiggins) + * Removed Minitest::Test.synchronize (phiggins) + * Upon loading minitest/parallel_each, record, capture_io and capture_subprocess_io are doped with synchronization code. (phiggins) + +=== 4.7.4 / 2013-05-01 + +This is probably the last release of the 4.x series. It will be merged +to ruby and will be put into maintenance mode there. + +I'm not set in stone on this, but at this point further development of +minitest (5+) will be gem-only. It is just too hard to work w/in +ruby-core w/ test-unit compatibility holding minitest development +back. + +* 2 minor enhancements: + + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + * Allow disabling of info_signal handler in runner. (erikh) + +=== 4.7.3 / 2013-04-20 + +* 1 bug fix: + + * Reverted stubbing of module methods change. Stub the user, not the impl. (ab9/tyabe) + +=== 4.7.2 / 2013-04-18 + +* 2 bug fixes: + + * Fixed inconsistency in refute_in_delta/epsilon. I double negatived my logic. (nettsundere) + * Fixed stubbing of module methods (eg Kernel#sleep). (steveklabnik) + +=== 4.7.1 / 2013-04-09 + +* 1 minor enhancement: + + * Added FAQ section to README + +* 1 bug fix: + + * Fixed bug where guard runs tests bypassing minitest/autorun and an ivar isn't set right. (darrencauthon) + +=== 4.7.0 / 2013-03-18 + +* 1 major enhancement: + + * Refactored MiniTest::Spec into MiniTest::Spec::DSL. + +* 1 bug fix: + + * Removed $DEBUG handler that detected when test/unit and minitest were both loaded. (tenderlove) + +=== 4.6.2 / 2013-02-27 + +* 1 minor enhancement: + + * Change error output to match Class#method, making it easier to use -n filter. + +=== 4.6.1 / 2013-02-14 + +* 1 bug fix: + + * Fixed an option processing bug caused by test/unit's irresponsibly convoluted code. (floehopper) + +=== 4.6.0 / 2013-02-07 + +* 3 major enhancements: + + * Removed ::reset_setup_teardown_hooks + * Removed the long deprecated assert_block + * Removed the long deprecated lifecycle hooks: add_(setup|teardown)_hook + +* 1 minor enhancement: + + * Allow filtering tests by suite name as well as test name. (lazyatom) + +* 2 bug fixes: + + * Made hex handling (eg object_ids) in mu_pp_for_diff more specific. (maxim) + * nodoc top-level module. (zzak) + +=== 4.5.0 / 2013-01-22 + +* 1 major enhancement: + + * Rearranged minitest/unit.rb so NO parallelization code is loaded/used until you opt-in. + +* 4 minor enhancements: + + * Added TestCase#skipped? for teardown guards + * Added maglev? guard + * Document that record can be sent twice if teardown fails or errors (randycoulman) + * Errors in teardown are now recorded. (randycoulman) + +* 3 bug fixes: + + * Added hacks and skips to get clean test runs on maglev + * Modified float tests for maglev float output differences. Not sure this is right. Not sure I care. + * Test for existance of diff.exe instead of assuming they have devkit. (blowmage/Cumbayah) + +=== 4.4.0 / 2013-01-07 + +* 3 minor enhancements: + + * Added fit_logarithic and assert_performance_logarithmic. (ktheory) + * Merge processed options so others can mess with defaults. (tenderlove) + * TestCase#message can now take another proc to defer custom message cost. (ordinaryzelig/bhenderson) + +* 1 bug fix: + + * TestCase#passed? now true if test is skipped. (qanhd) + +=== 4.3.3 / 2012-12-06 + +* 1 bug fix: + + * Updated information about stubbing. (daviddavis) + +=== 4.3.2 / 2012-11-27 // mri 2.0.0 + +* 1 minor enhancement: + + * Improved assert_equals error message to point you at #== of member objects. (kcurtin) + +=== 4.3.1 / 2012-11-23 + +* 1 bug fix: + + * Moved test_children to serial testcase to prevent random failures. + +=== 4.3.0 / 2012-11-17 + +* 4 minor enhancements: + + * Allow #autorun to run even if loaded with other test libs that call exit. (sunaku) + * Do not include Expectations in Object if $MT_NO_EXPECTATIONS is set (experimental?) + * Gave some much needed love to assert_raises. + * Mock#expect can take a block to custom-validate args. (gmoothart) + +=== 4.2.0 / 2012-11-02 + +* 4 major enhancements: + + * Added minitest/hell - run all your tests through the ringer! + * Added support for :parallel test_order to run test cases in parallel. + * Removed last_error and refactored runner code to be threadsafe. + * _run_suites now runs suites in parallel if they opt-in. + +* 4 minor enhancements: + + * Added TestCase#synchronize + * Added TestCase.make_my_diffs_pretty! + * Added TestCase.parallelize_me! + * Lock on capture_io for thread safety (tenderlove) + +=== 4.1.0 / 2012-10-05 + +* 2 minor enhancements: + + * Added skip example to readme. (dissolved) + * Extracted backtrace filter to object. (tenderlove) + +* 1 bug fix: + + * OMG I'm so dumb. Fixed access to deprecated hook class methods. I hate ruby modules. (route) + +=== 4.0.0 / 2012-09-28 + +* 1 major enhancement: + + * The names of a privately-used undocumented constants are Super Important™. + +* 1 minor enhancement: + + * Support stubbing methods that would be handled via method_missing. (jhsu) + +* 3 bug fixes: + + * Add include_private param to MiniTest::Mock#respond_to? (rf-) + * Fixed use of minitest/pride with --help. (zw963) + * Made 'No visible difference.' message more clear. (ckrailo) + +=== 3.5.0 / 2012-09-21 + +* 1 minor enhancement: + + * Added #capture_subprocess_io. (route) + +=== 3.4.0 / 2012-09-05 + +* 2 minor enhancements: + + * assert_output can now take regexps for expected values. (suggested by stomar) + * Clarified that ruby 1.9/2.0's phony gems cause serious confusion for rubygems. + +=== 3.3.0 / 2012-07-26 + +* 1 major enhancement: + + * Deprecated add_(setup|teardown)_hook in favor of (before|after)_(setup|teardown) [2013-01-01] + +* 4 minor enhancements: + + * Refactored deprecated hook system into a module. + * Refactored lifecycle hooks into a module. + * Removed after_setup/before_teardown + run_X_hooks from Spec. + * Spec#before/after now do a simple define_method and call super. DUR. + +* 2 bug fixes: + + * Fixed #passed? when used against a test that called flunk. (floehopper) + * Fixed rdoc bug preventing doco for some expectations. (stomar). + +=== 3.2.0 / 2012-06-26 + +* 1 minor enhancement: + + * Stubs now yield self. (peterhellberg) + +* 1 bug fix: + + * Fixed verbose test that only fails when run in verbose mode. mmmm irony. + +=== 3.1.0 / 2012-06-13 + +* 2 minor enhancements: + + * Removed LONG deprecated Unit.out accessor + * Removed generated method name munging from minitest/spec. (ordinaryzelig/tenderlove) + +=== 3.0.1 / 2012-05-24 + +* 1 bug fix: + + * I'm a dumbass and refactored into Mock#call. Renamed to #__call so you can mock #call. (mschuerig) + +=== 3.0.0 / 2012-05-08 + +* 3 major enhancements: + + * Added Object#stub (in minitest/mock.rb). + * Mock#expect mocks are used in the order they're given. + * Mock#verify now strictly compares against expect calls. + +* 3 minor enhancements: + + * Added caller to deprecation message. + * Mock error messages are much prettier. + * Removed String check for RHS of assert/refute_match. This lets #to_str work properly. + +* 1 bug fix: + + * Support drive letter on Windows. Patch provided from MRI by Usaku NAKAMURA. (ayumin) + +=== 2.12.1 / 2012-04-10 + +* 1 minor enhancement: + + * Added ruby releases to History.txt to make it easier to see what you're missing + +* 1 bug fix: + + * Rolled my own deprecate msg to allow MT to work with rubygems < 1.7 + +=== 2.12.0 / 2012-04-03 + +* 4 minor enhancements: + + * ::it returns test method name (wojtekmach) + * Added #record method to runner so runner subclasses can cleanly gather data. + * Added Minitest alias for MiniTest because even I forget. + * Deprecated assert_block!! Yay!!! + +* 1 bug fix: + + * Fixed warning in i_suck_and_my_tests_are_order_dependent! (phiggins) + +=== 2.11.4 / 2012-03-20 + +* 2 minor enhancements: + + * Updated known extensions + * You got your unicode in my tests! You got your tests in my unicode! (fl00r) + +* 1 bug fix: + + * Fixed MiniTest::Mock example in the readme. (conradwt) + +=== 2.11.3 / 2012-02-29 + +* 2 bug fixes: + + * Clarified that assert_raises returns the exception for further testing + * Fixed assert_in_epsilon when both args are negative. (tamc) + +=== 2.11.2 / 2012-02-14 + +* 1 minor enhancement: + + * Display failures/errors on SIGINFO. (tenderlove) + +* 1 bug fix: + + * Fixed MiniTest::Unit.after_tests for Ruby 1.9.3. (ysbaddaden) + +=== 2.11.1 / 2012-02-01 + +* 3 bug fixes: + + * Improved description for --name argument. (drd) + * Ensure Mock#expect's expected args is an Array. (mperham) + * Ensure Mock#verify verifies multiple expects of the same method. (chastell) + +=== 2.11.0 / 2012-01-25 + +* 2 minor enhancements: + + * Added before / after hooks for setup and teardown. (tenderlove) + * Pushed run_setup_hooks down to Spec. (tenderlove) + +=== 2.10.1 / 2012-01-17 + +* 1 bug fix: + + * Fixed stupid 1.9 path handling grumble grumble. (graaff) + +=== 2.10.0 / 2011-12-20 + +* 3 minor enhancements: + + * Added specs for must/wont be_empty/respond_to/be_kind_of and others. + * Added tests for assert/refute predicate. + * Split minitest/excludes.rb out to its own gem. + +* 1 bug fix: + + * Fixed must_be_empty and wont_be_empty argument handling. (mrsimo) + +=== 2.9.1 / 2011-12-13 + +* 4 minor enhancements: + + * Added a ton of tests on spec error message output. + * Cleaned up consistency of msg handling on unary expectations. + * Improved error messages on assert/refute_in_delta. + * infect_an_assertion no longer checks arity and better handles args. + +* 1 bug fix: + + * Fixed error message on specs when 2+ args and custom message provided. (chastell) + +=== 2.9.0 / 2011-12-07 + +* 4 minor enhancements: + + * Added TestCase.exclude and load_excludes for programmatic filtering of tests. + * Added guard methods so you can cleanly skip based on platform/impl + * Holy crap! 100% doco! `rdoc -C` ftw + * Switch assert_output to test stderr before stdout to possibly improve debugging + +=== 2.8.1 / 2011-11-17 + +* 1 bug fix: + + * Ugh. 1.9's test/unit violates my internals. Added const_missing. + +=== 2.8.0 / 2011-11-08 + +* 2 minor enhancements: + + * Add a method so that code can be run around a particular test case (tenderlove) + * Turn off backtrace filtering if we're running inside a ruby checkout. (drbrain) + +* 2 bug fixes: + + * Fixed 2 typos and 2 doc glitches. (splattael) + * Remove unused block arguments to avoid creating Proc objects. (k-tsj) + +=== 2.7.0 / 2011-10-25 + +* 2 minor enhancements: + + * Include failed values in the expected arg output in MockExpectationError. (nono) + * Make minitest/pride work with other 256 color capable terms. (sunaku) + +* 2 bug fixes: + + * Clarified the documentation of minitest/benchmark (eregon) + * Fixed using expectations in regular unit tests. (sunaku) + +=== 2.6.2 / 2011-10-19 + +* 1 minor enhancement: + + * Added link to vim bundle. (sunaku) + +* 2 bug fixes: + + * Force gem activation in hoe minitest plugin + * Support RUBY_VERSION='2.0.0' (nagachika) + +=== 2.6.1 / 2011-09-27 + +* 2 bug fixes: + + * Alias Spec.name from Spec.to_s so it works when @name is nil (nathany) + * Fixed assert and refute_operator where second object has a bad == method. + +=== 2.6.0 / 2011-09-13 + +* 2 minor enhancements: + + * Added specify alias for it and made desc optional. + * Spec#must_be and #wont_be can be used with predicates (metaskills) + +* 1 bug fix: + + * Fixed Mock.respond_to?(var) to work with strings. (holli) + +=== 2.5.1 / 2011-08-27 // ruby 1.9.3: p0, p125, p34579 + +* 2 minor enhancements: + + * Added gem activation for minitest in minitest/autoload to help out 1.9 users + * Extended Spec.register_spec_type to allow for procs instead of just regexps. + +=== 2.5.0 / 2011-08-18 + +* 4 minor enhancements: + + * Added 2 more arguments against rspec: let & subject in 9 loc! (emmanuel/luis) + * Added TestCase.i_suck_and_my_tests_are_order_dependent! + * Extended describe to take an optional method name (2 line change!). (emmanuel) + * Refactored and extended minitest/pride to do full 256 color support. (lolcat) + +* 1 bug fix: + + * Doc fixes. (chastell) + +=== 2.4.0 / 2011-08-09 + +* 4 minor enhancements: + + * Added simple examples for all expectations. + * Improved Mock error output when args mismatch. + * Moved all expectations from Object to MiniTest::Expectations. + * infect_with_assertions has been removed due to excessive clever + +* 4 bug fixes: + + * Fix Assertions#mu_pp to deal with immutable encoded strings. (ferrous26) + * Fix minitest/pride for MacRuby (ferrous26) + * Made error output less fancy so it is more readable + * Mock shouldn't undef === and inspect. (dgraham) + +=== 2.3.1 / 2011-06-22 + +* 1 bug fix: + + * Fixed minitest hoe plugin to be a spermy dep and not depend on itself. + +=== 2.3.0 / 2011-06-15 + +* 5 minor enhancements: + + * Add setup and teardown hooks to MiniTest::TestCase. (phiggins) + * Added nicer error messages for MiniTest::Mock. (phiggins) + * Allow for less specific expected arguments in Mock. (bhenderson/phiggins) + * Made MiniTest::Mock a blank slate. (phiggins) + * Refactored minitest/spec to use the hooks instead of define_inheritable_method. (phiggins) + +* 2 bug fixes: + + * Fixed TestCase's inherited hook. (dchelimsky/phiggins/jamis, the 'good' neighbor) + * MiniTest::Assertions#refute_empty should use mu_pp in the default message. (whatthejeff) + +=== 2.2.2 / 2011-06-01 + +* 2 bug fixes: + + * Got rid of the trailing period in message for assert_equal. (tenderlove) + * Windows needs more flushing. (Akio Tajima) + +=== 2.2.1 / 2011-05-31 + +* 1 bug fix: + + * My _ONE_ non-rubygems-using minitest user goes to Seattle.rb! + +=== 2.2.0 / 2011-05-29 + +* 6 minor enhancements: + + * assert_equal (and must_equal) now tries to diff output where it makes sense. + * Added Assertions#diff(exp, act) to be used by assert_equal. + * Added Assertions#mu_pp_for_diff + * Added Assertions.diff and diff= + * Moved minitest hoe-plugin from hoe-seattlerb. (erikh) + * Skipped tests only output details in verbose mode. (tenderlove+zenspider=xoxo) + +=== 2.1.0 / 2011-04-11 + +* 5 minor enhancements: + + * Added MiniTest::Spec.register_spec_type(matcher, klass) and spec_type(desc) + * Added ability for specs to share code via subclassing of Spec. (metaskills) + * Clarified (or tried to) bench_performance_linear's use of threshold. + * MiniTest::Unit.runner=(runner) provides an easy way of creating custom test runners for specialized needs. (justinweiss) + * Reverse order of inheritance in teardowns of specs. (deepfryed) + +* 3 bug fixes: + + * FINALLY fixed problems of inheriting specs in describe/it/describe scenario. (MGPalmer) + * Fixed a new warning in 1.9.3. + * Fixed assert_block's message handling. (nobu) + +=== 2.0.2 / 2010-12-24 + +* 1 minor enhancement: + + * Completed doco on minitest/benchmark for specs. + +* 1 bug fix: + + * Benchmarks in specs that didn't call bench_range would die. (zzak). + +=== 2.0.1 / 2010-12-15 + +* 4 minor enhancements: + + * Do not filter backtrace if $DEBUG + * Exit autorun via nested at_exit handler, in case other libs call exit + * Make options accesor lazy. + * Split printing of test name and its time. (nurse) + +* 1 bug fix: + + * Fix bug when ^T is hit before runner start + +=== 2.0.0 / 2010-11-11 + +* 3 major enhancements: + + * Added minitest/benchmark! Assert your performance! YAY! + * Refactored runner to allow for more extensibility. See minitest/benchmark. + * This makes the runner backwards incompatible in some ways! + +* 9 minor enhancements: + + * Added MiniTest::Unit.after_tests { ... } + * Improved output by adding test rates and a more sortable verbose format + * Improved readme based on feedback from others + * Added io method to TestCase. If used, it'll supplant '.EF' output. + * Refactored IO in MiniTest::Unit. + * Refactored _run_anything to _run_suite to make it easier to wrap (ngauthier) + * Spec class names are now the unmunged descriptions (btakita) + * YAY for not having redundant rdoc/readmes! + * Help output is now generated from the flags you passed straight up. + +* 4 bug fixes: + + * Fixed scoping issue on minitest/mock (srbaker/prosperity) + * Fixed some of the assertion default messages + * Fixes autorun when on windows with ruby install on different drive (larsch) + * Fixed rdoc output bug in spec.rb + +=== 1.7.2 / 2010-09-23 + +* 3 bug fixes: + + * Fixed doco for expectations and Spec. + * Fixed test_capture_io on 1.9.3+ (sora_h) + * assert_raises now lets MiniTest::Skip through. (shyouhei) + +=== 1.7.1 / 2010-09-01 + +* 1 bug fix: + + * 1.9.2 fixes for spec tests + +=== 1.7.0 / 2010-07-15 + +* 5 minor enhancements: + + * Added assert_output (mapped to must_output). + * Added assert_silent (mapped to must_be_silent). + * Added examples to readme (Mike Dalessio) + * Added options output at the top of the run, for fatal run debugging (tenderlove) + * Spec's describe method returns created class + +=== 1.6.0 / 2010-03-27 // ruby 1.9.2-p290 + +* 10 minor enhancements: + + * Added --seed argument so you can reproduce a random order for debugging. + * Added documentation for assertions + * Added more rdoc and tons of :nodoc: + * Added output to give you all the options you need to reproduce that run. + * Added proper argument parsing to minitest. + * Added unique serial # to spec names so order can be preserved (needs tests). (phrogz) + * Empty 'it' fails with default msg. (phrogz) + * Remove previous method on expect to remove 1.9 warnings + * Spec#it is now order-proof wrt subclasses/nested describes. + * assert_same error message now reports in decimal, eg: oid=123. (mattkent) + +* 2 bug fixes: + + * Fixed message on refute_same to be consistent with assert_same. + * Fixed method randomization to be stable for testing. + +=== 1.5.0 / 2010-01-06 + +* 4 minor enhancements: + + * Added ability to specify what assertions should have their args flipped. + * Don't flip arguments on *include and *respond_to assertions. + * Refactored Module.infect_an_assertion from Module.infect_with_assertions. + * before/after :all now bitches and acts like :each + +* 3 bug fixes: + + * Nested describes now map to nested test classes to avoid namespace collision. + * Using undef_method instead of remove_method to clean out inherited specs. + * assert_raises was ignoring passed in message. + +=== 1.4.2 / 2009-06-25 + +* 1 bug fix: + + * Fixed info handler for systems that don't have siginfo. + +=== 1.4.1 / 2009-06-23 + +* 1 major enhancement: + + * Handle ^C and other fatal exceptions by failing + +* 1 minor enhancement: + + * Added something to catch mixed use of test/unit and minitest if $DEBUG + +* 1 bug fix: + + * Added SIGINFO handler for finding slow tests without verbose + +=== 1.4.0 / 2009-06-18 + +* 5 minor enhancement: + + * Added clarification doco. + * Added specs and mocks to autorun. + * Changed spec test class creation to be non-destructive. + * Updated rakefile for new hoe capabilities. + * describes are nestable (via subclass). before/after/def inherits, specs don't. + +* 3 bug fixes: + + * Fixed location on must/wont. + * Switched to __name__ to avoid common ivar name. + * Fixed indentation in test file (1.9). + +=== 1.3.1 / 2009-01-20 // ruby 1.9.1-p431 + +* 1 minor enhancement: + + * Added miniunit/autorun.rb as replacement for test/unit.rb's autorun. + +* 16 bug fixes: + + * 1.9 test fixes. + * Bug fixes from nobu and akira for really odd scenarios. They run ruby funny. + * Fixed (assert|refute)_match's argument order. + * Fixed LocalJumpError in autorun if exception thrown before at_exit. + * Fixed assert_in_delta (should be >=, not >). + * Fixed assert_raises to match Modules. + * Fixed capture_io to not dup IOs. + * Fixed indentation of capture_io for ruby 1.9 warning. + * Fixed location to deal better with custom assertions and load paths. (Yuki) + * Fixed order of (must|wont)_include in MiniTest::Spec. + * Fixed skip's backtrace. + * Got arg order wrong in *_match in tests, message wrong as a result. + * Made describe private. For some reason I thought that an attribute of Kernel. + * Removed disable_autorun method, added autorun.rb instead. + * assert_match escapes if passed string for pattern. + * instance_of? is different from ===, use instance_of. + +=== 1.3.0 / 2008-10-09 + +* 2 major enhancements: + + * renamed to minitest and pulled out test/unit compatibility. + * mini/test.rb is now minitest/unit.rb, everything else maps directly. + +* 12 minor enhancements: + + * assert_match now checks that act can call =~ and converts exp to a + regexp only if needed. + * Added assert_send... seems useless to me tho. + * message now forces to string... ruby-core likes to pass classes and arrays :( + * Added -v handling and switched to @verbose from $DEBUG. + * Verbose output now includes test class name and adds a sortable running time! + * Switched message generation into procs for message deferment. + * Added skip and renamed fail to flunk. + * Improved output failure messages for assert_instance_of, assert_kind_of + * Improved output for assert_respond_to, assert_same. + * at_exit now exits false instead of errors+failures. + * Made the tests happier and more readable imhfo. + * Switched index(s) == 0 to rindex(s, 0) on nobu's suggestion. Faster. + +* 5 bug fixes: + + * 1.9: Added encoding normalization in mu_pp. + * 1.9: Fixed backtrace filtering (BTs are expanded now) + * Added back exception_details to assert_raises. DOH. + * Fixed shadowed variable in mock.rb + * Fixed stupid muscle memory message bug in assert_send. + +=== 1.2.1 / 2008-06-10 + +* 7 minor enhancements: + + * Added deprecations everywhere in test/unit. + * Added test_order to TestCase. :random on mini, :sorted on test/unit (for now). + * Big cleanup in test/unit for rails. Thanks Jeremy Kemper! + * Minor readability cleanup. + * Pushed setup/run/teardown down to testcase allowing specialized testcases. + * Removed pp. Tests run 2x faster. :/ + * Renamed deprecation methods and moved to test/unit/deprecate.rb. + +=== 1.2.0 / 2008-06-09 + +* 2 major enhancements: + + * Added Mini::Spec. + * Added Mini::Mock. Thanks Steven Baker!! + +* 23 minor enhancements: + + * Added bin/use_miniunit to make it easy to test out miniunit. + * Added -n filtering, thanks to Phil Hagelberg! + * Added args argument to #run, takes ARGV from at_exit. + * Added test name output if $DEBUG. + * Added a refute (was deny) for every assert. + * Added capture_io and a bunch of nice assertions from zentest. + * Added deprecation mechanism for assert_no/not methods to test/unit/assertions. + * Added pp output when available. + * Added tests for all assertions. Pretty much maxed out coverage. + * Added tests to verify consistency and good naming. + * Aliased and deprecated all ugly assertions. + * Cleaned out test/unit. Moved autorun there. + * Code cleanup to make extensions easier. Thanks Chad! + * Got spec args reversed in all but a couple assertions. Much more readable. + * Improved error messages across the board. Adds your message to the default. + * Moved into Mini namespace, renamed to Mini::Test and Mini::Spec. + * Pulled the assertions into their own module... + * Removed as much code as I could while still maintaining full functionality. + * Moved filter_backtrace into MiniTest. + * Removed MiniTest::Unit::run. Unnecessary. + * Removed location_of_failure. Unnecessary. + * Rewrote test/unit's filter_backtrace. Flog from 37.0 to 18.1 + * Removed assert_send. Google says it is never used. + * Renamed MiniTest::Unit.autotest to #run. + * Renamed deny to refute. + * Rewrote some ugly/confusing default assertion messages. + * assert_in_delta now defaults to 0.001 precision. Makes specs prettier. + +* 9 bug fixes: + + * Fixed assert_raises to raise outside of the inner-begin/rescue. + * Fixed for ruby 1.9 and rubinius. + * No longer exits 0 if exception in code PRE-test run causes early exit. + * Removed implementors method list from mini/test.rb - too stale. + * assert_nothing_raised takes a class as an arg. wtf? STUPID + * ".EF" output is now unbuffered. + * Bunch of changes to get working with rails... UGH. + * Added stupid hacks to deal with rails not requiring their dependencies. + * Now bitch loudly if someone defines one of my classes instead of requiring. + * Fixed infect method to work better on 1.9. + * Fixed all shadowed variable warnings in 1.9. + +=== 1.1.0 / 2007-11-08 + +* 4 major enhancements: + + * Finished writing all missing assertions. + * Output matches original test/unit. + * Documented every method needed by language implementor. + * Fully switched over to self-testing setup. + +* 2 minor enhancements: + + * Added deny (assert ! test), our favorite extension to test/unit. + * Added .autotest and fairly complete unit tests. (thanks Chad for help here) + +=== 1.0.0 / 2006-10-30 + +* 1 major enhancement + + * Birthday! diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Manifest.txt b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Manifest.txt new file mode 100644 index 0000000..8e096ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Manifest.txt @@ -0,0 +1,27 @@ +.autotest +History.rdoc +Manifest.txt +README.rdoc +Rakefile +design_rationale.rb +lib/hoe/minitest.rb +lib/minitest.rb +lib/minitest/assertions.rb +lib/minitest/autorun.rb +lib/minitest/benchmark.rb +lib/minitest/expectations.rb +lib/minitest/hell.rb +lib/minitest/mock.rb +lib/minitest/parallel.rb +lib/minitest/pride.rb +lib/minitest/pride_plugin.rb +lib/minitest/spec.rb +lib/minitest/test.rb +lib/minitest/unit.rb +test/minitest/metametameta.rb +test/minitest/test_minitest_assertions.rb +test/minitest/test_minitest_benchmark.rb +test/minitest/test_minitest_mock.rb +test/minitest/test_minitest_reporter.rb +test/minitest/test_minitest_spec.rb +test/minitest/test_minitest_test.rb diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/README.rdoc new file mode 100644 index 0000000..202a08a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/README.rdoc @@ -0,0 +1,764 @@ += minitest/{test,spec,mock,benchmark} + +home :: https://github.com/seattlerb/minitest +bugs :: https://github.com/seattlerb/minitest/issues +rdoc :: http://docs.seattlerb.org/minitest +vim :: https://github.com/sunaku/vim-ruby-minitest +emacs:: https://github.com/arthurnn/minitest-emacs + +== DESCRIPTION: + +minitest provides a complete suite of testing facilities supporting +TDD, BDD, mocking, and benchmarking. + + "I had a class with Jim Weirich on testing last week and we were + allowed to choose our testing frameworks. Kirk Haines and I were + paired up and we cracked open the code for a few test + frameworks... + + I MUST say that minitest is *very* readable / understandable + compared to the 'other two' options we looked at. Nicely done and + thank you for helping us keep our mental sanity." + + -- Wayne E. Seguin + +minitest/test is a small and incredibly fast unit testing framework. +It provides a rich set of assertions to make your tests clean and +readable. + +minitest/spec is a functionally complete spec engine. It hooks onto +minitest/test and seamlessly bridges test assertions over to spec +expectations. + +minitest/benchmark is an awesome way to assert the performance of your +algorithms in a repeatable manner. Now you can assert that your newb +co-worker doesn't replace your linear algorithm with an exponential +one! + +minitest/mock by Steven Baker, is a beautifully tiny mock (and stub) +object framework. + +minitest/pride shows pride in testing and adds coloring to your test +output. I guess it is an example of how to write IO pipes too. :P + +minitest/test is meant to have a clean implementation for language +implementors that need a minimal set of methods to bootstrap a working +test suite. For example, there is no magic involved for test-case +discovery. + + "Again, I can't praise enough the idea of a testing/specing + framework that I can actually read in full in one sitting!" + + -- Piotr Szotkowski + +Comparing to rspec: + + rspec is a testing DSL. minitest is ruby. + + -- Adam Hawkins, "Bow Before MiniTest" + +minitest doesn't reinvent anything that ruby already provides, like: +classes, modules, inheritance, methods. This means you only have to +learn ruby to use minitest and all of your regular OO practices like +extract-method refactorings still apply. + +== FEATURES/PROBLEMS: + +* minitest/autorun - the easy and explicit way to run all your tests. +* minitest/test - a very fast, simple, and clean test system. +* minitest/spec - a very fast, simple, and clean spec system. +* minitest/mock - a simple and clean mock/stub system. +* minitest/benchmark - an awesome way to assert your algorithm's performance. +* minitest/pride - show your pride in testing! +* Incredibly small and fast runner, but no bells and whistles. +* Written by squishy human beings. Software can never be perfect. We will all eventually die. + +== RATIONALE: + +See design_rationale.rb to see how specs and tests work in minitest. + +== SYNOPSIS: + +Given that you'd like to test the following class: + + class Meme + def i_can_has_cheezburger? + "OHAI!" + end + + def will_it_blend? + "YES!" + end + end + +=== Unit tests + +Define your tests as methods beginning with +test_+. + + require "minitest/autorun" + + class TestMeme < Minitest::Test + def setup + @meme = Meme.new + end + + def test_that_kitty_can_eat + assert_equal "OHAI!", @meme.i_can_has_cheezburger? + end + + def test_that_it_will_not_blend + refute_match /^no/i, @meme.will_it_blend? + end + + def test_that_will_be_skipped + skip "test this later" + end + end + +=== Specs + + require "minitest/autorun" + + describe Meme do + before do + @meme = Meme.new + end + + describe "when asked about cheeseburgers" do + it "must respond positively" do + _(@meme.i_can_has_cheezburger?).must_equal "OHAI!" + end + end + + describe "when asked about blending possibilities" do + it "won't say no" do + _(@meme.will_it_blend?).wont_match /^no/i + end + end + end + +For matchers support check out: + +* https://github.com/wojtekmach/minitest-matchers +* https://github.com/rmm5t/minitest-matchers_vaccine + +=== Benchmarks + +Add benchmarks to your tests. + + # optionally run benchmarks, good for CI-only work! + require "minitest/benchmark" if ENV["BENCH"] + + class TestMeme < Minitest::Benchmark + # Override self.bench_range or default range is [1, 10, 100, 1_000, 10_000] + def bench_my_algorithm + assert_performance_linear 0.9999 do |n| # n is a range value + @obj.my_algorithm(n) + end + end + end + +Or add them to your specs. If you make benchmarks optional, you'll +need to wrap your benchmarks in a conditional since the methods won't +be defined. In minitest 5, the describe name needs to match +/Bench(mark)?$/. + + describe "Meme Benchmark" do + if ENV["BENCH"] then + bench_performance_linear "my_algorithm", 0.9999 do |n| + 100.times do + @obj.my_algorithm(n) + end + end + end + end + +outputs something like: + + # Running benchmarks: + + TestBlah 100 1000 10000 + bench_my_algorithm 0.006167 0.079279 0.786993 + bench_other_algorithm 0.061679 0.792797 7.869932 + +Output is tab-delimited to make it easy to paste into a spreadsheet. + +=== Mocks + +Mocks and stubs defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Mocks are pre-programmed with expectations which form a specification +of the calls they are expected to receive. They can throw an exception +if they receive a call they don't expect and are checked during +verification to ensure they got all the calls they were expecting." + + class MemeAsker + def initialize(meme) + @meme = meme + end + + def ask(question) + method = question.tr(" ", "_") + "?" + @meme.__send__(method) + end + end + + require "minitest/autorun" + + describe MemeAsker, :ask do + describe "when passed an unpunctuated question" do + it "should invoke the appropriate predicate method on the meme" do + @meme = Minitest::Mock.new + @meme_asker = MemeAsker.new @meme + @meme.expect :will_it_blend?, :return_value + + @meme_asker.ask "will it blend" + + @meme.verify + end + end + end + +==== Multi-threading and Mocks + +Minitest mocks do not support multi-threading. If it works, fine, if it doesn't +you can use regular ruby patterns and facilities like local variables. Here's +an example of asserting that code inside a thread is run: + + def test_called_inside_thread + called = false + pr = Proc.new { called = true } + thread = Thread.new(&pr) + thread.join + assert called, "proc not called" + end + +=== Stubs + +Mocks and stubs are defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Stubs provide canned answers to calls made during the test". + +Minitest's stub method overrides a single method for the duration of +the block. + + def test_stale_eh + obj_under_test = Something.new + + refute obj_under_test.stale? + + Time.stub :now, Time.at(0) do # stub goes away once the block is done + assert obj_under_test.stale? + end + end + +A note on stubbing: In order to stub a method, the method must +actually exist prior to stubbing. Use a singleton method to create a +new non-existing method: + + def obj_under_test.fake_method + ... + end + +=== Running Your Tests + +Ideally, you'll use a rake task to run your tests, either piecemeal or +all at once. Both rake and rails ship with rake tasks for running your +tests. BUT! You don't have to: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb + Run options: --seed 37685 + + # Running: + + ...................................................................... (etc) + + Finished in 0.107130s, 1446.8403 runs/s, 2959.0217 assertions/s. + + 155 runs, 317 assertions, 0 failures, 0 errors, 0 skips + +There are runtime options available, both from minitest itself, and also +provided via plugins. To see them, simply run with +--help+: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb --help + minitest options: + -h, --help Display this help. + -s, --seed SEED Sets random seed. Also via env. Eg: SEED=n rake + -v, --verbose Verbose. Show progress processing files. + -n, --name PATTERN Filter run on /regexp/ or string. + -e, --exclude PATTERN Exclude /regexp/ or string from run. + + Known extensions: pride, autotest + -p, --pride Pride. Show your testing pride! + -a, --autotest Connect to autotest server. + +You can set up a rake task to run all your tests by adding this to your Rakefile: + + require "rake/testtask" + + Rake::TestTask.new(:test) do |t| + t.libs << "test" + t.libs << "lib" + t.test_files = FileList["test/**/test_*.rb"] + end + + task :default => :test + +== Writing Extensions + +To define a plugin, add a file named minitest/XXX_plugin.rb to your +project/gem. That file must be discoverable via ruby's LOAD_PATH (via +rubygems or otherwise). Minitest will find and require that file using +Gem.find_files. It will then try to call +plugin_XXX_init+ during +startup. The option processor will also try to call +plugin_XXX_options+ +passing the OptionParser instance and the current options hash. This +lets you register your own command-line options. Here's a totally +bogus example: + + # minitest/bogus_plugin.rb: + + module Minitest + def self.plugin_bogus_options(opts, options) + opts.on "--myci", "Report results to my CI" do + options[:myci] = true + options[:myci_addr] = get_myci_addr + options[:myci_port] = get_myci_port + end + end + + def self.plugin_bogus_init(options) + self.reporter << MyCI.new(options) if options[:myci] + end + end + +=== Adding custom reporters + +Minitest uses composite reporter to output test results using multiple +reporter instances. You can add new reporters to the composite during +the init_plugins phase. As we saw in +plugin_bogus_init+ above, you +simply add your reporter instance to the composite via <<. + ++AbstractReporter+ defines the API for reporters. You may subclass it +and override any method you want to achieve your desired behavior. + +start :: Called when the run has started. +record :: Called for each result, passed or otherwise. +report :: Called at the end of the run. +passed? :: Called to see if you detected any problems. + +Using our example above, here is how we might implement MyCI: + + # minitest/bogus_plugin.rb + + module Minitest + class MyCI < AbstractReporter + attr_accessor :results, :addr, :port + + def initialize options + self.results = [] + self.addr = options[:myci_addr] + self.port = options[:myci_port] + end + + def record result + self.results << result + end + + def report + CI.connect(addr, port).send_results self.results + end + end + + # code from above... + end + +== FAQ + +=== How to test SimpleDelegates? + +The following implementation and test: + + class Worker < SimpleDelegator + def work + end + end + + describe Worker do + before do + @worker = Worker.new(Object.new) + end + + it "must respond to work" do + _(@worker).must_respond_to :work + end + end + +outputs a failure: + + 1) Failure: + Worker#test_0001_must respond to work [bug11.rb:16]: + Expected # (Object) to respond to #work. + +Worker is a SimpleDelegate which in 1.9+ is a subclass of BasicObject. +Expectations are put on Object (one level down) so the Worker +(SimpleDelegate) hits +method_missing+ and delegates down to the ++Object.new+ instance. That object doesn't respond to work so the test +fails. + +You can bypass SimpleDelegate#method_missing by extending the worker +with Minitest::Expectations. You can either do that in your setup at +the instance level, like: + + before do + @worker = Worker.new(Object.new) + @worker.extend Minitest::Expectations + end + +or you can extend the Worker class (within the test file!), like: + + class Worker + include ::Minitest::Expectations + end + +=== How to share code across test classes? + +Use a module. That's exactly what they're for: + + module UsefulStuff + def useful_method + # ... + end + end + + describe Blah do + include UsefulStuff + + def test_whatever + # useful_method available here + end + end + +Remember, +describe+ simply creates test classes. It's just ruby at +the end of the day and all your normal Good Ruby Rules (tm) apply. If +you want to extend your test using setup/teardown via a module, just +make sure you ALWAYS call super. before/after automatically call super +for you, so make sure you don't do it twice. + +=== How to run code before a group of tests? + +Use a constant with begin...end like this: + + describe Blah do + SETUP = begin + # ... this runs once when describe Blah starts + end + # ... + end + +This can be useful for expensive initializations or sharing state. +Remember, this is just ruby code, so you need to make sure this +technique and sharing state doesn't interfere with your tests. + +=== Why am I seeing uninitialized constant MiniTest::Test (NameError)? + +Are you running the test with Bundler (e.g. via bundle exec )? If so, +in order to require minitest, you must first add the gem 'minitest' +to your Gemfile and run +bundle+. Once it's installed, you should be +able to require minitest and run your tests. + +== Prominent Projects using Minitest: + +* arel +* journey +* mime-types +* nokogiri +* rails (active_support et al) +* rake +* rdoc +* ...and of course, everything from seattle.rb... + +== Developing Minitest: + +Minitest requires {Hoe}[https://rubygems.org/gems/hoe]. + +=== Minitest's own tests require UTF-8 external encoding. + +This is a common problem in Windows, where the default external Encoding is +often CP850, but can affect any platform. +Minitest can run test suites using any Encoding, but to run Minitest's +own tests you must have a default external Encoding of UTF-8. + +If your encoding is wrong, you'll see errors like: + + --- expected + +++ actual + @@ -1,2 +1,3 @@ + # encoding: UTF-8 + -"Expected /\\w+/ to not match \"blah blah blah\"." + +"Expected /\\w+/ to not match # encoding: UTF-8 + +\"blah blah blah\"." + +To check your current encoding, run: + + ruby -e 'puts Encoding.default_external' + +If your output is something other than UTF-8, you can set the RUBYOPTS +env variable to a value of '-Eutf-8'. Something like: + + RUBYOPT='-Eutf-8' ruby -e 'puts Encoding.default_external' + +Check your OS/shell documentation for the precise syntax (the above +will not work on a basic Windows CMD prompt, look for the SET command). +Once you've got it successfully outputing UTF-8, use the same setting +when running rake in Minitest. + +=== Minitest's own tests require GNU (or similar) diff. + +This is also a problem primarily affecting Windows developers. PowerShell +has a command called diff, but it is not suitable for use with Minitest. + +If you see failures like either of these, you are probably missing diff tool: + + 4) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_long [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:936]: + Expected: "--- expected\n+++ actual\n@@ -1 +1 @@\n-\"hahahahahahahahahahahahahahahahahahahaha\"\n+\"blahblahblahblahblahblahblahblahblahblah\"\n" + Actual: "Expected: \"hahahahahahahahahahahahahahahahahahahaha\"\n Actual: \"blahblahblahblahblahblahblahblahblahblah\"" + + + 5) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_collection_hash_hex_invisible [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:845]: + Expected: "No visible difference in the Hash#inspect output.\nYou should look at the implementation of #== on Hash or its members.\n + {1=>#}" + Actual: "Expected: {1=>#}\n Actual: {1=>#}" + + +If you use Cygwin or MSYS2 or similar there are packages that include a +GNU diff for Windows. If you don't, you can download GNU diffutils from +http://gnuwin32.sourceforge.net/packages/diffutils.htm +(make sure to add it to your PATH). + +You can make sure it's installed and path is configured properly with: + + diff.exe -v + +There are multiple lines of output, the first should be something like: + + diff (GNU diffutils) 2.8.1 + +If you are using PowerShell make sure you run diff.exe, not just diff, +which will invoke the PowerShell built in function. + +== Known Extensions: + +capybara_minitest_spec :: Bridge between Capybara RSpec matchers and + Minitest::Spec expectations (e.g. + page.must_have_content("Title")). +color_pound_spec_reporter :: Test names print Ruby Object types in color with + your Minitest Spec style tests. +minispec-metadata :: Metadata for describe/it blocks & CLI tag filter. + E.g. it "requires JS driver", js: true do & + ruby test.rb --tag js runs tests tagged :js. +minispec-rails :: Minimal support to use Spec style in Rails 5+. +mini-apivore :: for swagger based automated API testing. +minitest-around :: Around block for minitest. An alternative to + setup/teardown dance. +minitest-assert_errors :: Adds Minitest assertions to test for errors raised + or not raised by Minitest itself. +minitest-autotest :: autotest is a continuous testing facility meant to + be used during development. +minitest-bacon :: minitest-bacon extends minitest with bacon-like + functionality. +minitest-bang :: Adds support for RSpec-style let! to immediately + invoke let statements before each test. +minitest-bisect :: Helps you isolate and debug random test failures. +minitest-blink1_reporter :: Display test results with a Blink1. +minitest-capistrano :: Assertions and expectations for testing + Capistrano recipes. +minitest-capybara :: Capybara matchers support for minitest unit and + spec. +minitest-chef-handler :: Run Minitest suites as Chef report handlers +minitest-ci :: CI reporter plugin for Minitest. +minitest-context :: Defines contexts for code reuse in Minitest + specs that share common expectations. +minitest-debugger :: Wraps assert so failed assertions drop into + the ruby debugger. +minitest-display :: Patches Minitest to allow for an easily + configurable output. +minitest-documentation :: Minimal documentation format inspired by rspec's. +minitest-doc_reporter :: Detailed output inspired by rspec's documentation + format. +minitest-emoji :: Print out emoji for your test passes, fails, and + skips. +minitest-english :: Semantically symmetric aliases for assertions and + expectations. +minitest-excludes :: Clean API for excluding certain tests you + don't want to run under certain conditions. +minitest-fail-fast :: Reimplements RSpec's "fail fast" feature +minitest-filecontent :: Support unit tests with expectation results in files. + Differing results will be stored again in files. +minitest-filesystem :: Adds assertion and expectation to help testing + filesystem contents. +minitest-firemock :: Makes your Minitest mocks more resilient. +minitest-focus :: Focus on one test at a time. +minitest-gcstats :: A minitest plugin that adds a report of the top + tests by number of objects allocated. +minitest-global_expectations:: Support minitest expectation methods for all objects +minitest-great_expectations :: Generally useful additions to minitest's + assertions and expectations. +minitest-growl :: Test notifier for minitest via growl. +minitest-happy :: GLOBALLY ACTIVATE MINITEST PRIDE! RAWR! +minitest-have_tag :: Adds Minitest assertions to test for the existence of + HTML tags, including contents, within a provided string. +minitest-hooks :: Around and before_all/after_all/around_all hooks +minitest-hyper :: Pretty, single-page HTML reports for your Minitest runs +minitest-implicit-subject :: Implicit declaration of the test subject. +minitest-instrument :: Instrument ActiveSupport::Notifications when + test method is executed. +minitest-instrument-db :: Store information about speed of test execution + provided by minitest-instrument in database. +minitest-junit :: JUnit-style XML reporter for minitest. +minitest-keyword :: Use Minitest assertions with keyword arguments. +minitest-libnotify :: Test notifier for minitest via libnotify. +minitest-line :: Run test at line number. +minitest-logger :: Define assert_log and enable minitest to test log messages. + Supports Logger and Log4r::Logger. +minitest-macruby :: Provides extensions to minitest for macruby UI + testing. +minitest-matchers :: Adds support for RSpec-style matchers to + minitest. +minitest-matchers_vaccine :: Adds assertions that adhere to the matcher spec, + but without any expectation infections. +minitest-metadata :: Annotate tests with metadata (key-value). +minitest-mock_expectations :: Provides method call assertions for minitest. +minitest-mongoid :: Mongoid assertion matchers for Minitest. +minitest-must_not :: Provides must_not as an alias for wont in + Minitest. +minitest-optional_retry :: Automatically retry failed test to help with flakiness. +minitest-osx :: Reporter for the Mac OS X notification center. +minitest-parallel_fork :: Fork-based parallelization +minitest-parallel-db :: Run tests in parallel with a single database. +minitest-power_assert :: PowerAssert for Minitest. +minitest-predicates :: Adds support for .predicate? methods. +minitest-profile :: List the 10 slowest tests in your suite. +minitest-rails :: Minitest integration for Rails 3.x. +minitest-rails-capybara :: Capybara integration for Minitest::Rails. +minitest-reporters :: Create customizable Minitest output formats. +minitest-rg :: Colored red/green output for Minitest. +minitest-rspec_mocks :: Use RSpec Mocks with Minitest. +minitest-server :: minitest-server provides a client/server setup + with your minitest process, allowing your test + run to send its results directly to a handler. +minitest-sequel :: Minitest assertions to speed-up development and + testing of Ruby Sequel database setups. +minitest-shared_description :: Support for shared specs and shared spec + subclasses +minitest-should_syntax :: RSpec-style x.should == y assertions for + Minitest. +minitest-shouldify :: Adding all manner of shoulds to Minitest (bad + idea) +minitest-snail :: Print a list of tests that take too long +minitest-spec-context :: Provides rspec-ish context method to + Minitest::Spec. +minitest-spec-expect :: Expect syntax for Minitest::Spec (e.g. + expect(sequences).to_include :celery_man). +minitest-spec-magic :: Minitest::Spec extensions for Rails and beyond. +minitest-spec-rails :: Drop in Minitest::Spec superclass for + ActiveSupport::TestCase. +minitest-sprint :: Runs (Get it? It's fast!) your tests and makes + it easier to rerun individual failures. +minitest-stately :: Find leaking state between tests +minitest-stub_any_instance :: Stub any instance of a method on the given class + for the duration of a block. +minitest-stub-const :: Stub constants for the duration of a block. +minitest-tags :: Add tags for minitest. +minitest-unordered :: Adds a new assertion to minitest for checking the + contents of a collection, ignoring element order. +minitest-vcr :: Automatic cassette managment with Minitest::Spec + and VCR. +minitest_log :: Adds structured logging, data explication, and verdicts. +minitest_owrapper :: Get tests results as a TestResult object. +minitest_should :: Shoulda style syntax for minitest test::unit. +minitest_tu_shim :: Bridges between test/unit and minitest. +mongoid-minitest :: Minitest matchers for Mongoid. +mutant-minitest :: Minitest integration for mutant. +pry-rescue :: A pry plugin w/ minitest support. See + pry-rescue/minitest.rb. +rspec2minitest :: Easily translate any RSpec matchers to Minitest + assertions and expectations. + +== Unknown Extensions: + +Authors... Please send me a pull request with a description of your minitest extension. + +* assay-minitest +* detroit-minitest +* em-minitest-spec +* flexmock-minitest +* guard-minitest +* guard-minitest-decisiv +* minitest-activemodel +* minitest-ar-assertions +* minitest-capybara-unit +* minitest-colorer +* minitest-deluxe +* minitest-extra-assertions +* minitest-rails-shoulda +* minitest-spec +* minitest-spec-should +* minitest-sugar +* spork-minitest + +== Minitest related goods + +* minitest/pride fabric: http://www.spoonflower.com/fabric/3928730-again-by-katie_allen + +== REQUIREMENTS: + +* Ruby 2.3+. No magic is involved. I hope. + +== INSTALL: + + sudo gem install minitest + +On 1.9, you already have it. To get newer candy you can still install +the gem, and then requiring "minitest/autorun" should automatically +pull it in. If not, you'll need to do it yourself: + + gem "minitest" # ensures you"re using the gem, and not the built-in MT + require "minitest/autorun" + + # ... usual testing stuffs ... + +DO NOTE: There is a serious problem with the way that ruby 1.9/2.0 +packages their own gems. They install a gem specification file, but +don't install the gem contents in the gem path. This messes up +Gem.find_files and many other things (gem which, gem contents, etc). + +Just install minitest as a gem for real and you'll be happier. + +== LICENSE: + +(The MIT License) + +Copyright (c) Ryan Davis, seattle.rb + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Rakefile b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Rakefile new file mode 100644 index 0000000..0237b44 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/Rakefile @@ -0,0 +1,74 @@ +# -*- ruby -*- + +require "rubygems" +require "hoe" + +Hoe.plugin :seattlerb +Hoe.plugin :rdoc + +Hoe.spec "minitest" do + developer "Ryan Davis", "ryand-ruby@zenspider.com" + + license "MIT" + + require_ruby_version [">= 2.2", "< 3.1"] +end + +desc "Find missing expectations" +task :specs do + $:.unshift "lib" + require "minitest/test" + require "minitest/spec" + + pos_prefix, neg_prefix = "must", "wont" + skip_re = /^(must|wont)$|wont_(throw)|must_(block|not?_|nothing|send|raise$)/x + dont_flip_re = /(must|wont)_(include|respond_to)/ + + map = { + /(must_throw)s/ => '\1', + /(?!not)_same/ => "_be_same_as", + /_in_/ => "_be_within_", + /_operator/ => "_be", + /_includes/ => "_include", + /(must|wont)_(.*_of|nil|silent|empty)/ => '\1_be_\2', + /must_raises/ => "must_raise", + /(must|wont)_predicate/ => '\1_be', + /(must|wont)_path_exists/ => 'path_\1_exist', + } + + expectations = Minitest::Expectations.public_instance_methods.map(&:to_s) + assertions = Minitest::Assertions.public_instance_methods.map(&:to_s) + + assertions.sort.each do |assertion| + expectation = case assertion + when /^assert/ then + assertion.sub(/^assert/, pos_prefix.to_s) + when /^refute/ then + assertion.sub(/^refute/, neg_prefix.to_s) + end + + next unless expectation + next if expectation =~ skip_re + + regexp, replacement = map.find { |re, _| expectation =~ re } + expectation.sub! regexp, replacement if replacement + + next if expectations.include? expectation + + args = [assertion, expectation].map(&:to_sym).map(&:inspect) + args << :reverse if expectation =~ dont_flip_re + + puts + puts "##" + puts "# :method: #{expectation}" + puts "# See Minitest::Assertions##{assertion}" + puts + puts "infect_an_assertion #{args.join ", "}" + end +end + +task :bugs do + sh "for f in bug*.rb ; do echo $f; echo; #{Gem.ruby} -Ilib $f && rm $f ; done" +end + +# vim: syntax=Ruby diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/design_rationale.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/design_rationale.rb new file mode 100644 index 0000000..a3fcc37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/design_rationale.rb @@ -0,0 +1,52 @@ +# Specs: # Equivalent Unit Tests: +############################################################################### +describe Thingy do # class TestThingy < Minitest::Test + before do # def setup + do_some_setup # super + end # do_some_setup + # end + it "should do the first thing" do # + 1.must_equal 1 # def test_first_thing + end # assert_equal 1, 1 + # end + describe SubThingy do # end + before do # + do_more_setup # class TestSubThingy < TestThingy + end # def setup + # super + it "should do the second thing" do # do_more_setup + 2.must_equal 2 # end + end # + end # def test_second_thing +end # assert_equal 2, 2 + # end + # end +############################################################################### +# runs 2 specs # runs 3 tests +############################################################################### +# The specs generate: + +class ThingySpec < Minitest::Spec + def setup + super + do_some_setup + end + + def test_should_do_the_first_thing + assert_equal 1, 1 + end +end + +class SubThingySpec < ThingySpec + def setup + super + do_more_setup + end + + # because only setup/teardown is inherited, not specs + remove_method :test_should_do_the_first_thing + + def test_should_do_the_second_thing + assert_equal 2, 2 + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/hoe/minitest.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/hoe/minitest.rb new file mode 100644 index 0000000..6c045a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/hoe/minitest.rb @@ -0,0 +1,32 @@ +# :stopdoc: + +class Hoe +end + +module Hoe::Minitest + def minitest? + self.name == "minitest" + end + + def initialize_minitest + unless minitest? then + dir = "../../minitest/dev/lib" + Hoe.add_include_dirs dir if File.directory? dir + end + + gem "minitest" + require "minitest" + version = Minitest::VERSION.split(/\./).first(2).join(".") + + dependency "minitest", "~> #{version}", :development unless + minitest? or ENV["MT_NO_ISOLATE"] + end + + def define_minitest_tasks + self.testlib = :minitest + + # make sure we use the gemmed minitest on 1.9 + self.test_prelude = 'gem "minitest"' unless + minitest? or ENV["MT_NO_ISOLATE"] + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest.rb new file mode 100644 index 0000000..e796de5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest.rb @@ -0,0 +1,1056 @@ +require "optparse" +require "thread" +require "mutex_m" +require "minitest/parallel" +require "stringio" + +## +# :include: README.rdoc + +module Minitest + VERSION = "5.14.2" # :nodoc: + ENCS = "".respond_to? :encoding # :nodoc: + + @@installed_at_exit ||= false + @@after_run = [] + @extensions = [] + + mc = (class << self; self; end) + + ## + # Parallel test executor + + mc.send :attr_accessor, :parallel_executor + + warn "DEPRECATED: use MT_CPU instead of N for parallel test runs" if ENV["N"] + n_threads = (ENV["MT_CPU"] || ENV["N"] || 2).to_i + self.parallel_executor = Parallel::Executor.new n_threads + + ## + # Filter object for backtraces. + + mc.send :attr_accessor, :backtrace_filter + + ## + # Reporter object to be used for all runs. + # + # NOTE: This accessor is only available during setup, not during runs. + + mc.send :attr_accessor, :reporter + + ## + # Names of known extension plugins. + + mc.send :attr_accessor, :extensions + + ## + # The signal to use for dumping information to STDERR. Defaults to "INFO". + + mc.send :attr_accessor, :info_signal + self.info_signal = "INFO" + + ## + # Registers Minitest to run at process exit + + def self.autorun + at_exit { + next if $! and not ($!.kind_of? SystemExit and $!.success?) + + exit_code = nil + + pid = Process.pid + at_exit { + next if Process.pid != pid + @@after_run.reverse_each(&:call) + exit exit_code || false + } + + exit_code = Minitest.run ARGV + } unless @@installed_at_exit + @@installed_at_exit = true + end + + ## + # A simple hook allowing you to run a block of code after everything + # is done running. Eg: + # + # Minitest.after_run { p $debugging_info } + + def self.after_run &block + @@after_run << block + end + + def self.init_plugins options # :nodoc: + self.extensions.each do |name| + msg = "plugin_#{name}_init" + send msg, options if self.respond_to? msg + end + end + + def self.load_plugins # :nodoc: + return unless self.extensions.empty? + + seen = {} + + require "rubygems" unless defined? Gem + + Gem.find_files("minitest/*_plugin.rb").each do |plugin_path| + name = File.basename plugin_path, "_plugin.rb" + + next if seen[name] + seen[name] = true + + require plugin_path + self.extensions << name + end + end + + ## + # This is the top-level run method. Everything starts from here. It + # tells each Runnable sub-class to run, and each of those are + # responsible for doing whatever they do. + # + # The overall structure of a run looks like this: + # + # Minitest.autorun + # Minitest.run(args) + # Minitest.__run(reporter, options) + # Runnable.runnables.each + # runnable.run(reporter, options) + # self.runnable_methods.each + # self.run_one_method(self, runnable_method, reporter) + # Minitest.run_one_method(klass, runnable_method) + # klass.new(runnable_method).run + + def self.run args = [] + self.load_plugins unless args.delete("--no-plugins") || ENV["MT_NO_PLUGINS"] + + options = process_args args + + reporter = CompositeReporter.new + reporter << SummaryReporter.new(options[:io], options) + reporter << ProgressReporter.new(options[:io], options) + + self.reporter = reporter # this makes it available to plugins + self.init_plugins options + self.reporter = nil # runnables shouldn't depend on the reporter, ever + + self.parallel_executor.start if parallel_executor.respond_to?(:start) + reporter.start + begin + __run reporter, options + rescue Interrupt + warn "Interrupted. Exiting..." + end + self.parallel_executor.shutdown + reporter.report + + reporter.passed? + end + + ## + # Internal run method. Responsible for telling all Runnable + # sub-classes to run. + + def self.__run reporter, options + suites = Runnable.runnables.reject { |s| s.runnable_methods.empty? }.shuffle + parallel, serial = suites.partition { |s| s.test_order == :parallel } + + # If we run the parallel tests before the serial tests, the parallel tests + # could run in parallel with the serial tests. This would be bad because + # the serial tests won't lock around Reporter#record. Run the serial tests + # first, so that after they complete, the parallel tests will lock when + # recording results. + serial.map { |suite| suite.run reporter, options } + + parallel.map { |suite| suite.run reporter, options } + end + + def self.process_args args = [] # :nodoc: + options = { + :io => $stdout, + } + orig_args = args.dup + + OptionParser.new do |opts| + opts.banner = "minitest options:" + opts.version = Minitest::VERSION + + opts.on "-h", "--help", "Display this help." do + puts opts + exit + end + + opts.on "--no-plugins", "Bypass minitest plugin auto-loading (or set $MT_NO_PLUGINS)." + + desc = "Sets random seed. Also via env. Eg: SEED=n rake" + opts.on "-s", "--seed SEED", Integer, desc do |m| + options[:seed] = m.to_i + end + + opts.on "-v", "--verbose", "Verbose. Show progress processing files." do + options[:verbose] = true + end + + opts.on "-n", "--name PATTERN", "Filter run on /regexp/ or string." do |a| + options[:filter] = a + end + + opts.on "-e", "--exclude PATTERN", "Exclude /regexp/ or string from run." do |a| + options[:exclude] = a + end + + unless extensions.empty? + opts.separator "" + opts.separator "Known extensions: #{extensions.join(", ")}" + + extensions.each do |meth| + msg = "plugin_#{meth}_options" + send msg, opts, options if self.respond_to?(msg) + end + end + + begin + opts.parse! args + rescue OptionParser::InvalidOption => e + puts + puts e + puts + puts opts + exit 1 + end + + orig_args -= args + end + + unless options[:seed] then + srand + options[:seed] = (ENV["SEED"] || srand).to_i % 0xFFFF + orig_args << "--seed" << options[:seed].to_s + end + + srand options[:seed] + + options[:args] = orig_args.map { |s| + s =~ /[\s|&<>$()]/ ? s.inspect : s + }.join " " + + options + end + + def self.filter_backtrace bt # :nodoc: + result = backtrace_filter.filter bt + result = bt.dup if result.empty? + result + end + + ## + # Represents anything "runnable", like Test, Spec, Benchmark, or + # whatever you can dream up. + # + # Subclasses of this are automatically registered and available in + # Runnable.runnables. + + class Runnable + ## + # Number of assertions executed in this run. + + attr_accessor :assertions + + ## + # An assertion raised during the run, if any. + + attr_accessor :failures + + ## + # The time it took to run. + + attr_accessor :time + + def time_it # :nodoc: + t0 = Minitest.clock_time + + yield + ensure + self.time = Minitest.clock_time - t0 + end + + ## + # Name of the run. + + def name + @NAME + end + + ## + # Set the name of the run. + + def name= o + @NAME = o + end + + ## + # Returns all instance methods matching the pattern +re+. + + def self.methods_matching re + public_instance_methods(true).grep(re).map(&:to_s) + end + + def self.reset # :nodoc: + @@runnables = [] + end + + reset + + ## + # Responsible for running all runnable methods in a given class, + # each in its own instance. Each instance is passed to the + # reporter to record. + + def self.run reporter, options = {} + filter = options[:filter] || "/./" + filter = Regexp.new $1 if filter.is_a?(String) && filter =~ %r%/(.*)/% + + filtered_methods = self.runnable_methods.find_all { |m| + filter === m || filter === "#{self}##{m}" + } + + exclude = options[:exclude] + exclude = Regexp.new $1 if exclude =~ %r%/(.*)/% + + filtered_methods.delete_if { |m| + exclude === m || exclude === "#{self}##{m}" + } + + return if filtered_methods.empty? + + with_info_handler reporter do + filtered_methods.each do |method_name| + run_one_method self, method_name, reporter + end + end + end + + ## + # Runs a single method and has the reporter record the result. + # This was considered internal API but is factored out of run so + # that subclasses can specialize the running of an individual + # test. See Minitest::ParallelTest::ClassMethods for an example. + + def self.run_one_method klass, method_name, reporter + reporter.prerecord klass, method_name + reporter.record Minitest.run_one_method(klass, method_name) + end + + def self.with_info_handler reporter, &block # :nodoc: + handler = lambda do + unless reporter.passed? then + warn "Current results:" + warn "" + warn reporter.reporters.first + warn "" + end + end + + on_signal ::Minitest.info_signal, handler, &block + end + + SIGNALS = Signal.list # :nodoc: + + def self.on_signal name, action # :nodoc: + supported = SIGNALS[name] + + old_trap = trap name do + old_trap.call if old_trap.respond_to? :call + action.call + end if supported + + yield + ensure + trap name, old_trap if supported + end + + ## + # Each subclass of Runnable is responsible for overriding this + # method to return all runnable methods. See #methods_matching. + + def self.runnable_methods + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns all subclasses of Runnable. + + def self.runnables + @@runnables + end + + @@marshal_dump_warned = false + + def marshal_dump # :nodoc: + unless @@marshal_dump_warned then + warn ["Minitest::Runnable#marshal_dump is deprecated.", + "You might be violating internals. From", caller.first].join " " + @@marshal_dump_warned = true + end + + [self.name, self.failures, self.assertions, self.time] + end + + def marshal_load ary # :nodoc: + self.name, self.failures, self.assertions, self.time = ary + end + + def failure # :nodoc: + self.failures.first + end + + def initialize name # :nodoc: + self.name = name + self.failures = [] + self.assertions = 0 + end + + ## + # Runs a single method. Needs to return self. + + def run + raise NotImplementedError, "subclass responsibility" + end + + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns a single character string to print based on the result + # of the run. One of ".", "F", + # "E" or "S". + + def result_code + raise NotImplementedError, "subclass responsibility" + end + + ## + # Was this run skipped? See #passed? for more information. + + def skipped? + raise NotImplementedError, "subclass responsibility" + end + end + + ## + # Shared code for anything that can get passed to a Reporter. See + # Minitest::Test & Minitest::Result. + + module Reportable + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + not self.failure + end + + ## + # The location identifier of this test. Depends on a method + # existing called class_name. + + def location + loc = " [#{self.failure.location}]" unless passed? or error? + "#{self.class_name}##{self.name}#{loc}" + end + + def class_name # :nodoc: + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns ".", "F", or "E" based on the result of the run. + + def result_code + self.failure and self.failure.result_code or "." + end + + ## + # Was this run skipped? + + def skipped? + self.failure and Skip === self.failure + end + + ## + # Did this run error? + + def error? + self.failures.any? { |f| UnexpectedError === f } + end + end + + ## + # This represents a test result in a clean way that can be + # marshalled over a wire. Tests can do anything they want to the + # test instance and can create conditions that cause Marshal.dump to + # blow up. By using Result.from(a_test) you can be reasonably sure + # that the test result can be marshalled. + + class Result < Runnable + include Minitest::Reportable + + undef_method :marshal_dump + undef_method :marshal_load + + ## + # The class name of the test result. + + attr_accessor :klass + + ## + # The location of the test method. + + attr_accessor :source_location + + ## + # Create a new test result from a Runnable instance. + + def self.from runnable + o = runnable + + r = self.new o.name + r.klass = o.class.name + r.assertions = o.assertions + r.failures = o.failures.dup + r.time = o.time + + r.source_location = o.method(o.name).source_location rescue ["unknown", -1] + + r + end + + def class_name # :nodoc: + self.klass # for Minitest::Reportable + end + + def to_s # :nodoc: + return location if passed? and not skipped? + + failures.map { |failure| + "#{failure.result_label}:\n#{self.location}:\n#{failure.message}\n" + }.join "\n" + end + end + + ## + # Defines the API for Reporters. Subclass this and override whatever + # you want. Go nuts. + + class AbstractReporter + include Mutex_m + + ## + # Starts reporting on the run. + + def start + end + + ## + # About to start running a test. This allows a reporter to show + # that it is starting or that we are in the middle of a test run. + + def prerecord klass, name + end + + ## + # Output and record the result of the test. Call + # {result#result_code}[rdoc-ref:Runnable#result_code] to get the + # result character string. Stores the result of the run if the run + # did not pass. + + def record result + end + + ## + # Outputs the summary of the run. + + def report + end + + ## + # Did this run pass? + + def passed? + true + end + end + + class Reporter < AbstractReporter # :nodoc: + ## + # The IO used to report. + + attr_accessor :io + + ## + # Command-line options for this run. + + attr_accessor :options + + def initialize io = $stdout, options = {} # :nodoc: + super() + self.io = io + self.options = options + end + end + + ## + # A very simple reporter that prints the "dots" during the run. + # + # This is added to the top-level CompositeReporter at the start of + # the run. If you want to change the output of minitest via a + # plugin, pull this out of the composite and replace it with your + # own. + + class ProgressReporter < Reporter + def prerecord klass, name #:nodoc: + if options[:verbose] then + io.print "%s#%s = " % [klass.name, name] + io.flush + end + end + + def record result # :nodoc: + io.print "%.2f s = " % [result.time] if options[:verbose] + io.print result.result_code + io.puts if options[:verbose] + end + end + + ## + # A reporter that gathers statistics about a test run. Does not do + # any IO because meant to be used as a parent class for a reporter + # that does. + # + # If you want to create an entirely different type of output (eg, + # CI, HTML, etc), this is the place to start. + # + # Example: + # + # class JenkinsCIReporter < StatisticsReporter + # def report + # super # Needed to calculate some statistics + # + # print " 30 characters. + # 3. or: Strings are equal to each other (but maybe different encodings?). + # 4. and: we found a diff executable. + + def things_to_diff exp, act + expect = mu_pp_for_diff exp + butwas = mu_pp_for_diff act + + e1, e2 = expect.include?("\n"), expect.include?("\\n") + b1, b2 = butwas.include?("\n"), butwas.include?("\\n") + + need_to_diff = + (e1 ^ e2 || + b1 ^ b2 || + expect.size > 30 || + butwas.size > 30 || + expect == butwas) && + Minitest::Assertions.diff + + need_to_diff && [expect, butwas] + end + + ## + # This returns a human-readable version of +obj+. By default + # #inspect is called. You can override this to use #pretty_inspect + # if you want. + # + # See Minitest::Test.make_my_diffs_pretty! + + def mu_pp obj + s = obj.inspect + + if defined? Encoding then + s = s.encode Encoding.default_external + + if String === obj && (obj.encoding != Encoding.default_external || + !obj.valid_encoding?) then + enc = "# encoding: #{obj.encoding}" + val = "# valid: #{obj.valid_encoding?}" + s = "#{enc}\n#{val}\n#{s}" + end + end + + s + end + + ## + # This returns a diff-able more human-readable version of +obj+. + # This differs from the regular mu_pp because it expands escaped + # newlines and makes hex-values (like object_ids) generic. This + # uses mu_pp to do the first pass and then cleans it up. + + def mu_pp_for_diff obj + str = mu_pp obj + + # both '\n' & '\\n' (_after_ mu_pp (aka inspect)) + single = !!str.match(/(?exp == act printing the difference between + # the two, if possible. + # + # If there is no visible difference but the assertion fails, you + # should suspect that your #== is buggy, or your inspect output is + # missing crucial details. For nicer structural diffing, set + # Minitest::Test.make_my_diffs_pretty! + # + # For floats use assert_in_delta. + # + # See also: Minitest::Assertions.diff + + def assert_equal exp, act, msg = nil + msg = message(msg, E) { diff exp, act } + result = assert exp == act, msg + + if nil == exp then + if Minitest::VERSION =~ /^6/ then + refute_nil exp, "Use assert_nil if expecting nil." + else + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + + warn "DEPRECATED: Use assert_nil if expecting nil from #{where}. This will fail in Minitest 6." + end + end + + result + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ are within +delta+ + # of each other. + # + # assert_in_delta Math::PI, (22.0 / 7.0), 0.01 + + def assert_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to be <= #{delta}" + } + assert delta >= n, msg + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ have a relative + # error less than +epsilon+. + + def assert_in_epsilon exp, act, epsilon = 0.001, msg = nil + assert_in_delta exp, act, [exp.abs, act.abs].min * epsilon, msg + end + + ## + # Fails unless +collection+ includes +obj+. + + def assert_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + assert collection.include?(obj), msg + end + + ## + # Fails unless +obj+ is an instance of +cls+. + + def assert_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be an instance of #{cls}, not #{obj.class}" + } + + assert obj.instance_of?(cls), msg + end + + ## + # Fails unless +obj+ is a kind of +cls+. + + def assert_kind_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be a kind of #{cls}, not #{obj.class}" } + + assert obj.kind_of?(cls), msg + end + + ## + # Fails unless +matcher+ =~ +obj+. + + def assert_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + assert matcher =~ obj, msg + end + + ## + # Fails unless +obj+ is nil + + def assert_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to be nil" } + assert obj.nil?, msg + end + + ## + # For testing with binary operators. Eg: + # + # assert_operator 5, :<=, 4 + + def assert_operator o1, op, o2 = UNDEFINED, msg = nil + return assert_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op} #{mu_pp(o2)}" } + assert o1.__send__(op, o2), msg + end + + ## + # Fails if stdout or stderr do not output the expected results. + # Pass in nil if you don't care about that streams output. Pass in + # "" if you require it to be silent. Pass in a regexp if you want + # to pattern match. + # + # assert_output(/hey/) { method_with_output } + # + # NOTE: this uses #capture_io, not #capture_subprocess_io. + # + # See also: #assert_silent + + def assert_output stdout = nil, stderr = nil + flunk "assert_output requires a block to capture output." unless + block_given? + + out, err = capture_io do + yield + end + + err_msg = Regexp === stderr ? :assert_match : :assert_equal if stderr + out_msg = Regexp === stdout ? :assert_match : :assert_equal if stdout + + y = send err_msg, stderr, err, "In stderr" if err_msg + x = send out_msg, stdout, out, "In stdout" if out_msg + + (!stdout || x) && (!stderr || y) + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Fails unless +path+ exists. + + def assert_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to exist" } + assert File.exist?(path), msg + end + + ## + # For testing with predicates. Eg: + # + # assert_predicate str, :empty? + # + # This is really meant for specs and is front-ended by assert_operator: + # + # str.must_be :empty? + + def assert_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op}" } + assert o1.__send__(op), msg + end + + ## + # Fails unless the block raises one of +exp+. Returns the + # exception matched so you can check the message, attributes, etc. + # + # +exp+ takes an optional message on the end to help explain + # failures and defaults to StandardError if no exception class is + # passed. Eg: + # + # assert_raises(CustomError) { method_with_custom_error } + # + # With custom error message: + # + # assert_raises(CustomError, 'This should have raised CustomError') { method_with_custom_error } + # + # Using the returned object: + # + # error = assert_raises(CustomError) do + # raise CustomError, 'This is really bad' + # end + # + # assert_equal 'This is really bad', error.message + + def assert_raises *exp + flunk "assert_raises requires a block to capture errors." unless + block_given? + + msg = "#{exp.pop}.\n" if String === exp.last + exp << StandardError if exp.empty? + + begin + yield + rescue *exp => e + pass # count assertion + return e + rescue Minitest::Assertion # incl Skip & UnexpectedError + # don't count assertion + raise + rescue SignalException, SystemExit + raise + rescue Exception => e + flunk proc { + exception_details(e, "#{msg}#{mu_pp(exp)} exception expected, not") + } + end + + exp = exp.first if exp.size == 1 + + flunk "#{msg}#{mu_pp(exp)} expected but nothing was raised." + end + + ## + # Fails unless +obj+ responds to +meth+. + + def assert_respond_to obj, meth, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} (#{obj.class}) to respond to ##{meth}" + } + assert obj.respond_to?(meth), msg + end + + ## + # Fails unless +exp+ and +act+ are #equal? + + def assert_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to be the same as %s (oid=%d)" % data + } + assert exp.equal?(act), msg + end + + ## + # +send_ary+ is a receiver, message and arguments. + # + # Fails unless the call returns a true value + + def assert_send send_ary, m = nil + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + warn "DEPRECATED: assert_send. From #{where}" + + recv, msg, *args = send_ary + m = message(m) { + "Expected #{mu_pp(recv)}.#{msg}(*#{mu_pp(args)}) to return true" } + assert recv.__send__(msg, *args), m + end + + ## + # Fails if the block outputs anything to stderr or stdout. + # + # See also: #assert_output + + def assert_silent + assert_output "", "" do + yield + end + end + + ## + # Fails unless the block throws +sym+ + + def assert_throws sym, msg = nil + default = "Expected #{mu_pp(sym)} to have been thrown" + caught = true + catch(sym) do + begin + yield + rescue ThreadError => e # wtf?!? 1.8 + threads == suck + default += ", not \:#{e.message[/uncaught throw \`(\w+?)\'/, 1]}" + rescue ArgumentError => e # 1.9 exception + raise e unless e.message.include?("uncaught throw") + default += ", not #{e.message.split(/ /).last}" + rescue NameError => e # 1.8 exception + raise e unless e.name == sym + default += ", not #{e.name.inspect}" + end + caught = false + end + + assert caught, message(msg) { default } + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Captures $stdout and $stderr into strings: + # + # out, err = capture_io do + # puts "Some info" + # warn "You did a bad thing" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: For efficiency, this method uses StringIO and does not + # capture IO for subprocesses. Use #capture_subprocess_io for + # that. + + def capture_io + _synchronize do + begin + captured_stdout, captured_stderr = StringIO.new, StringIO.new + + orig_stdout, orig_stderr = $stdout, $stderr + $stdout, $stderr = captured_stdout, captured_stderr + + yield + + return captured_stdout.string, captured_stderr.string + ensure + $stdout = orig_stdout + $stderr = orig_stderr + end + end + end + + ## + # Captures $stdout and $stderr into strings, using Tempfile to + # ensure that subprocess IO is captured as well. + # + # out, err = capture_subprocess_io do + # system "echo Some info" + # system "echo You did a bad thing 1>&2" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: This method is approximately 10x slower than #capture_io so + # only use it when you need to test the output of a subprocess. + + def capture_subprocess_io + _synchronize do + begin + require "tempfile" + + captured_stdout, captured_stderr = Tempfile.new("out"), Tempfile.new("err") + + orig_stdout, orig_stderr = $stdout.dup, $stderr.dup + $stdout.reopen captured_stdout + $stderr.reopen captured_stderr + + yield + + $stdout.rewind + $stderr.rewind + + return captured_stdout.read, captured_stderr.read + ensure + captured_stdout.unlink + captured_stderr.unlink + $stdout.reopen orig_stdout + $stderr.reopen orig_stderr + + orig_stdout.close + orig_stderr.close + captured_stdout.close + captured_stderr.close + end + end + end + + ## + # Returns details for exception +e+ + + def exception_details e, msg + [ + "#{msg}", + "Class: <#{e.class}>", + "Message: <#{e.message.inspect}>", + "---Backtrace---", + "#{Minitest.filter_backtrace(e.backtrace).join("\n")}", + "---------------", + ].join "\n" + end + + ## + # Fails after a given date (in the local time zone). This allows + # you to put time-bombs in your tests if you need to keep + # something around until a later date lest you forget about it. + + def fail_after y,m,d,msg + flunk msg if Time.now > Time.local(y, m, d) + end + + ## + # Fails with +msg+. + + def flunk msg = nil + msg ||= "Epic Fail!" + assert false, msg + end + + ## + # Returns a proc that will output +msg+ along with the default message. + + def message msg = nil, ending = nil, &default + proc { + msg = msg.call.chomp(".") if Proc === msg + custom_message = "#{msg}.\n" unless msg.nil? or msg.to_s.empty? + "#{custom_message}#{default.call}#{ending || "."}" + } + end + + ## + # used for counting assertions + + def pass _msg = nil + assert true + end + + ## + # Fails if +test+ is truthy. + + def refute test, msg = nil + msg ||= message { "Expected #{mu_pp(test)} to not be truthy" } + assert !test, msg + end + + ## + # Fails if +obj+ is empty. + + def refute_empty obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be empty" } + assert_respond_to obj, :empty? + refute obj.empty?, msg + end + + ## + # Fails if exp == act. + # + # For floats use refute_in_delta. + + def refute_equal exp, act, msg = nil + msg = message(msg) { + "Expected #{mu_pp(act)} to not be equal to #{mu_pp(exp)}" + } + refute exp == act, msg + end + + ## + # For comparing Floats. Fails if +exp+ is within +delta+ of +act+. + # + # refute_in_delta Math::PI, (22.0 / 7.0) + + def refute_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to not be <= #{delta}" + } + refute delta >= n, msg + end + + ## + # For comparing Floats. Fails if +exp+ and +act+ have a relative error + # less than +epsilon+. + + def refute_in_epsilon a, b, epsilon = 0.001, msg = nil + refute_in_delta a, b, a * epsilon, msg + end + + ## + # Fails if +collection+ includes +obj+. + + def refute_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to not include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + refute collection.include?(obj), msg + end + + ## + # Fails if +obj+ is an instance of +cls+. + + def refute_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to not be an instance of #{cls}" + } + refute obj.instance_of?(cls), msg + end + + ## + # Fails if +obj+ is a kind of +cls+. + + def refute_kind_of cls, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be a kind of #{cls}" } + refute obj.kind_of?(cls), msg + end + + ## + # Fails if +matcher+ =~ +obj+. + + def refute_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to not match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + refute matcher =~ obj, msg + end + + ## + # Fails if +obj+ is nil. + + def refute_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be nil" } + refute obj.nil?, msg + end + + ## + # Fails if +o1+ is not +op+ +o2+. Eg: + # + # refute_operator 1, :>, 2 #=> pass + # refute_operator 1, :<, 2 #=> fail + + def refute_operator o1, op, o2 = UNDEFINED, msg = nil + return refute_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op} #{mu_pp(o2)}" } + refute o1.__send__(op, o2), msg + end + + ## + # Fails if +path+ exists. + + def refute_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to not exist" } + refute File.exist?(path), msg + end + + ## + # For testing with predicates. + # + # refute_predicate str, :empty? + # + # This is really meant for specs and is front-ended by refute_operator: + # + # str.wont_be :empty? + + def refute_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op}" } + refute o1.__send__(op), msg + end + + ## + # Fails if +obj+ responds to the message +meth+. + + def refute_respond_to obj, meth, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not respond to #{meth}" } + + refute obj.respond_to?(meth), msg + end + + ## + # Fails if +exp+ is the same (by object identity) as +act+. + + def refute_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to not be the same as %s (oid=%d)" % data + } + refute exp.equal?(act), msg + end + + ## + # Skips the current run. If run in verbose-mode, the skipped run + # gets listed at the end of the run but doesn't cause a failure + # exit code. + + def skip msg = nil, bt = caller + msg ||= "Skipped, no message given" + @skip = true + raise Minitest::Skip, msg, bt + end + + ## + # Skips the current run until a given date (in the local time + # zone). This allows you to put some fixes on hold until a later + # date, but still holds you accountable and prevents you from + # forgetting it. + + def skip_until y,m,d,msg + skip msg if Time.now < Time.local(y, m, d) + where = caller.first.split(/:/, 3).first(2).join ":" + warn "Stale skip_until %p at %s" % [msg, where] + end + + ## + # Was this testcase skipped? Meant for #teardown. + + def skipped? + defined?(@skip) and @skip + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/autorun.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/autorun.rb new file mode 100644 index 0000000..9409b04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/autorun.rb @@ -0,0 +1,13 @@ +begin + require "rubygems" + gem "minitest" +rescue Gem::LoadError + # do nothing +end + +require "minitest" +require "minitest/spec" +require "minitest/mock" +require "minitest/hell" if ENV["MT_HELL"] + +Minitest.autorun diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb new file mode 100644 index 0000000..f2b2465 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/benchmark.rb @@ -0,0 +1,455 @@ +require "minitest/test" +require "minitest/spec" + +module Minitest + ## + # Subclass Benchmark to create your own benchmark runs. Methods + # starting with "bench_" get executed on a per-class. + # + # See Minitest::Assertions + + class Benchmark < Test + def self.io # :nodoc: + @io + end + + def io # :nodoc: + self.class.io + end + + def self.run reporter, options = {} # :nodoc: + @io = reporter.io + super + end + + def self.runnable_methods # :nodoc: + methods_matching(/^bench_/) + end + + ## + # Returns a set of ranges stepped exponentially from +min+ to + # +max+ by powers of +base+. Eg: + # + # bench_exp(2, 16, 2) # => [2, 4, 8, 16] + + def self.bench_exp min, max, base = 10 + min = (Math.log10(min) / Math.log10(base)).to_i + max = (Math.log10(max) / Math.log10(base)).to_i + + (min..max).map { |m| base ** m }.to_a + end + + ## + # Returns a set of ranges stepped linearly from +min+ to +max+ by + # +step+. Eg: + # + # bench_linear(20, 40, 10) # => [20, 30, 40] + + def self.bench_linear min, max, step = 10 + (min..max).step(step).to_a + rescue LocalJumpError # 1.8.6 + r = []; (min..max).step(step) { |n| r << n }; r + end + + ## + # Specifies the ranges used for benchmarking for that class. + # Defaults to exponential growth from 1 to 10k by powers of 10. + # Override if you need different ranges for your benchmarks. + # + # See also: ::bench_exp and ::bench_linear. + + def self.bench_range + bench_exp 1, 10_000 + end + + ## + # Runs the given +work+, gathering the times of each run. Range + # and times are then passed to a given +validation+ proc. Outputs + # the benchmark name and times in tab-separated format, making it + # easy to paste into a spreadsheet for graphing or further + # analysis. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # validation = proc { |x, y| ... } + # assert_performance validation do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance validation, &work + range = self.class.bench_range + + io.print "#{self.name}" + + times = [] + + range.each do |x| + GC.start + t0 = Minitest.clock_time + instance_exec(x, &work) + t = Minitest.clock_time - t0 + + io.print "\t%9.6f" % t + times << t + end + io.puts + + validation[range, times] + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a constant rate (eg, linear slope == 0) within a given + # +threshold+. Note: because we're testing for a slope of 0, R^2 + # is not a good determining factor for the fit, so the threshold + # is applied against the slope itself. As such, you probably want + # to tighten it from the default. + # + # See https://www.graphpad.com/guides/prism/8/curve-fitting/reg_intepretingnonlinr2.htm + # for more details. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_constant 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_constant threshold = 0.99, &work + validation = proc do |range, times| + a, b, rr = fit_linear range, times + assert_in_delta 0, b, 1 - threshold + [a, b, rr] + end + + assert_performance validation, &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a exponential curve within a given error +threshold+. + # + # Fit is calculated by #fit_exponential. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_exponential 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_exponential threshold = 0.99, &work + assert_performance validation_for_fit(:exponential, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a logarithmic curve within a given error +threshold+. + # + # Fit is calculated by #fit_logarithmic. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_logarithmic 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_logarithmic threshold = 0.99, &work + assert_performance validation_for_fit(:logarithmic, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a straight line within a given error +threshold+. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_linear 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_linear threshold = 0.99, &work + assert_performance validation_for_fit(:linear, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered curve + # fit to match a power curve within a given error +threshold+. + # + # Fit is calculated by #fit_power. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_power 0.9999 do |x| + # @obj.algorithm + # end + # end + + def assert_performance_power threshold = 0.99, &work + assert_performance validation_for_fit(:power, threshold), &work + end + + ## + # Takes an array of x/y pairs and calculates the general R^2 value. + # + # See: http://en.wikipedia.org/wiki/Coefficient_of_determination + + def fit_error xys + y_bar = sigma(xys) { |_, y| y } / xys.size.to_f + ss_tot = sigma(xys) { |_, y| (y - y_bar) ** 2 } + ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 } + + 1 - (ss_err / ss_tot) + end + + ## + # To fit a functional form: y = ae^(bx). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html + + def fit_exponential xs, ys + n = xs.size + xys = xs.zip(ys) + sxlny = sigma(xys) { |x, y| x * Math.log(y) } + slny = sigma(xys) { |_, y| Math.log(y) } + sx2 = sigma(xys) { |x, _| x * x } + sx = sigma xs + + c = n * sx2 - sx ** 2 + a = (slny * sx2 - sx * sxlny) / c + b = ( n * sxlny - sx * slny ) / c + + return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) } + end + + ## + # To fit a functional form: y = a + b*ln(x). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingLogarithmic.html + + def fit_logarithmic xs, ys + n = xs.size + xys = xs.zip(ys) + slnx2 = sigma(xys) { |x, _| Math.log(x) ** 2 } + slnx = sigma(xys) { |x, _| Math.log(x) } + sylnx = sigma(xys) { |x, y| y * Math.log(x) } + sy = sigma(xys) { |_, y| y } + + c = n * slnx2 - slnx ** 2 + b = ( n * sylnx - sy * slnx ) / c + a = (sy - b * slnx) / n + + return a, b, fit_error(xys) { |x| a + b * Math.log(x) } + end + + ## + # Fits the functional form: a + bx. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFitting.html + + def fit_linear xs, ys + n = xs.size + xys = xs.zip(ys) + sx = sigma xs + sy = sigma ys + sx2 = sigma(xs) { |x| x ** 2 } + sxy = sigma(xys) { |x, y| x * y } + + c = n * sx2 - sx**2 + a = (sy * sx2 - sx * sxy) / c + b = ( n * sxy - sx * sy ) / c + + return a, b, fit_error(xys) { |x| a + b * x } + end + + ## + # To fit a functional form: y = ax^b. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html + + def fit_power xs, ys + n = xs.size + xys = xs.zip(ys) + slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) } + slnx = sigma(xs) { |x | Math.log(x) } + slny = sigma(ys) { | y| Math.log(y) } + slnx2 = sigma(xs) { |x | Math.log(x) ** 2 } + + b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2) + a = (slny - b * slnx) / n + + return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) } + end + + ## + # Enumerates over +enum+ mapping +block+ if given, returning the + # sum of the result. Eg: + # + # sigma([1, 2, 3]) # => 1 + 2 + 3 => 6 + # sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14 + + def sigma enum, &block + enum = enum.map(&block) if block + enum.inject { |sum, n| sum + n } + end + + ## + # Returns a proc that calls the specified fit method and asserts + # that the error is within a tolerable threshold. + + def validation_for_fit msg, threshold + proc do |range, times| + a, b, rr = send "fit_#{msg}", range, times + assert_operator rr, :>=, threshold + [a, b, rr] + end + end + end +end + +module Minitest + ## + # The spec version of Minitest::Benchmark. + + class BenchSpec < Benchmark + extend Minitest::Spec::DSL + + ## + # This is used to define a new benchmark method. You usually don't + # use this directly and is intended for those needing to write new + # performance curve fits (eg: you need a specific polynomial fit). + # + # See ::bench_performance_linear for an example of how to use this. + + def self.bench name, &block + define_method "bench_#{name.gsub(/\W+/, "_")}", &block + end + + ## + # Specifies the ranges used for benchmarking for that class. + # + # bench_range do + # bench_exp(2, 16, 2) + # end + # + # See Minitest::Benchmark#bench_range for more details. + + def self.bench_range &block + return super unless block + + meta = (class << self; self; end) + meta.send :define_method, "bench_range", &block + end + + ## + # Create a benchmark that verifies that the performance is linear. + # + # describe "my class Bench" do + # bench_performance_linear "fast_algorithm", 0.9999 do |n| + # @obj.fast_algorithm(n) + # end + # end + + def self.bench_performance_linear name, threshold = 0.99, &work + bench name do + assert_performance_linear threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is constant. + # + # describe "my class Bench" do + # bench_performance_constant "zoom_algorithm!" do |n| + # @obj.zoom_algorithm!(n) + # end + # end + + def self.bench_performance_constant name, threshold = 0.99, &work + bench name do + assert_performance_constant threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is exponential. + # + # describe "my class Bench" do + # bench_performance_exponential "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_exponential name, threshold = 0.99, &work + bench name do + assert_performance_exponential threshold, &work + end + end + + + ## + # Create a benchmark that verifies that the performance is logarithmic. + # + # describe "my class Bench" do + # bench_performance_logarithmic "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_logarithmic name, threshold = 0.99, &work + bench name do + assert_performance_logarithmic threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is power. + # + # describe "my class Bench" do + # bench_performance_power "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_power name, threshold = 0.99, &work + bench name do + assert_performance_power threshold, &work + end + end + end + + Minitest::Spec.register_spec_type(/Bench(mark)?$/, Minitest::BenchSpec) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/expectations.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/expectations.rb new file mode 100644 index 0000000..fb4ec4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/expectations.rb @@ -0,0 +1,303 @@ +## +# It's where you hide your "assertions". +# +# Please note, because of the way that expectations are implemented, +# all expectations (eg must_equal) are dependent upon a thread local +# variable +:current_spec+. If your specs rely on mixing threads into +# the specs themselves, you're better off using assertions or the new +# _(value) wrapper. For example: +# +# it "should still work in threads" do +# my_threaded_thingy do +# (1+1).must_equal 2 # bad +# assert_equal 2, 1+1 # good +# _(1 + 1).must_equal 2 # good +# value(1 + 1).must_equal 2 # good, also #expect +# _ { 1 + "1" }.must_raise TypeError # good +# end +# end + +module Minitest::Expectations + + ## + # See Minitest::Assertions#assert_empty. + # + # _(collection).must_be_empty + # + # :method: must_be_empty + + infect_an_assertion :assert_empty, :must_be_empty, :unary + + ## + # See Minitest::Assertions#assert_equal + # + # _(a).must_equal b + # + # :method: must_equal + + infect_an_assertion :assert_equal, :must_equal + + ## + # See Minitest::Assertions#assert_in_delta + # + # _(n).must_be_close_to m [, delta] + # + # :method: must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#assert_in_epsilon + # + # _(n).must_be_within_epsilon m [, epsilon] + # + # :method: must_be_within_epsilon + + infect_an_assertion :assert_in_epsilon, :must_be_within_epsilon + + ## + # See Minitest::Assertions#assert_includes + # + # _(collection).must_include obj + # + # :method: must_include + + infect_an_assertion :assert_includes, :must_include, :reverse + + ## + # See Minitest::Assertions#assert_instance_of + # + # _(obj).must_be_instance_of klass + # + # :method: must_be_instance_of + + infect_an_assertion :assert_instance_of, :must_be_instance_of + + ## + # See Minitest::Assertions#assert_kind_of + # + # _(obj).must_be_kind_of mod + # + # :method: must_be_kind_of + + infect_an_assertion :assert_kind_of, :must_be_kind_of + + ## + # See Minitest::Assertions#assert_match + # + # _(a).must_match b + # + # :method: must_match + + infect_an_assertion :assert_match, :must_match + + ## + # See Minitest::Assertions#assert_nil + # + # _(obj).must_be_nil + # + # :method: must_be_nil + + infect_an_assertion :assert_nil, :must_be_nil, :unary + + ## + # See Minitest::Assertions#assert_operator + # + # _(n).must_be :<=, 42 + # + # This can also do predicates: + # + # _(str).must_be :empty? + # + # :method: must_be + + infect_an_assertion :assert_operator, :must_be, :reverse + + ## + # See Minitest::Assertions#assert_output + # + # _ { ... }.must_output out_or_nil [, err] + # + # :method: must_output + + infect_an_assertion :assert_output, :must_output, :block + + ## + # See Minitest::Assertions#assert_raises + # + # _ { ... }.must_raise exception + # + # :method: must_raise + + infect_an_assertion :assert_raises, :must_raise, :block + + ## + # See Minitest::Assertions#assert_respond_to + # + # _(obj).must_respond_to msg + # + # :method: must_respond_to + + infect_an_assertion :assert_respond_to, :must_respond_to, :reverse + + ## + # See Minitest::Assertions#assert_same + # + # _(a).must_be_same_as b + # + # :method: must_be_same_as + + infect_an_assertion :assert_same, :must_be_same_as + + ## + # See Minitest::Assertions#assert_silent + # + # _ { ... }.must_be_silent + # + # :method: must_be_silent + + infect_an_assertion :assert_silent, :must_be_silent, :block + + ## + # See Minitest::Assertions#assert_throws + # + # _ { ... }.must_throw sym + # + # :method: must_throw + + infect_an_assertion :assert_throws, :must_throw, :block + + ## + # See Minitest::Assertions#assert_path_exists + # + # _(some_path).path_must_exist + # + # :method: path_must_exist + + infect_an_assertion :assert_path_exists, :path_must_exist, :unary + + ## + # See Minitest::Assertions#refute_path_exists + # + # _(some_path).path_wont_exist + # + # :method: path_wont_exist + + infect_an_assertion :refute_path_exists, :path_wont_exist, :unary + + ## + # See Minitest::Assertions#refute_empty + # + # _(collection).wont_be_empty + # + # :method: wont_be_empty + + infect_an_assertion :refute_empty, :wont_be_empty, :unary + + ## + # See Minitest::Assertions#refute_equal + # + # _(a).wont_equal b + # + # :method: wont_equal + + infect_an_assertion :refute_equal, :wont_equal + + ## + # See Minitest::Assertions#refute_in_delta + # + # _(n).wont_be_close_to m [, delta] + # + # :method: wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#refute_in_epsilon + # + # _(n).wont_be_within_epsilon m [, epsilon] + # + # :method: wont_be_within_epsilon + + infect_an_assertion :refute_in_epsilon, :wont_be_within_epsilon + + ## + # See Minitest::Assertions#refute_includes + # + # _(collection).wont_include obj + # + # :method: wont_include + + infect_an_assertion :refute_includes, :wont_include, :reverse + + ## + # See Minitest::Assertions#refute_instance_of + # + # _(obj).wont_be_instance_of klass + # + # :method: wont_be_instance_of + + infect_an_assertion :refute_instance_of, :wont_be_instance_of + + ## + # See Minitest::Assertions#refute_kind_of + # + # _(obj).wont_be_kind_of mod + # + # :method: wont_be_kind_of + + infect_an_assertion :refute_kind_of, :wont_be_kind_of + + ## + # See Minitest::Assertions#refute_match + # + # _(a).wont_match b + # + # :method: wont_match + + infect_an_assertion :refute_match, :wont_match + + ## + # See Minitest::Assertions#refute_nil + # + # _(obj).wont_be_nil + # + # :method: wont_be_nil + + infect_an_assertion :refute_nil, :wont_be_nil, :unary + + ## + # See Minitest::Assertions#refute_operator + # + # _(n).wont_be :<=, 42 + # + # This can also do predicates: + # + # str.wont_be :empty? + # + # :method: wont_be + + infect_an_assertion :refute_operator, :wont_be, :reverse + + ## + # See Minitest::Assertions#refute_respond_to + # + # _(obj).wont_respond_to msg + # + # :method: wont_respond_to + + infect_an_assertion :refute_respond_to, :wont_respond_to, :reverse + + ## + # See Minitest::Assertions#refute_same + # + # _(a).wont_be_same_as b + # + # :method: wont_be_same_as + + infect_an_assertion :refute_same, :wont_be_same_as +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/hell.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/hell.rb new file mode 100644 index 0000000..73c88ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/hell.rb @@ -0,0 +1,11 @@ +require "minitest/parallel" + +class Minitest::Test + parallelize_me! +end + +begin + require "minitest/proveit" +rescue LoadError + warn "NOTE: `gem install minitest-proveit` for even more hellish tests" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/mock.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/mock.rb new file mode 100644 index 0000000..39cfc24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/mock.rb @@ -0,0 +1,240 @@ +class MockExpectationError < StandardError; end # :nodoc: + +module Minitest # :nodoc: + + ## + # A simple and clean mock object framework. + # + # All mock objects are an instance of Mock + + class Mock + alias :__respond_to? :respond_to? + + overridden_methods = %w[ + === + class + inspect + instance_eval + instance_variables + object_id + public_send + respond_to_missing? + send + to_s + ] + + instance_methods.each do |m| + undef_method m unless overridden_methods.include?(m.to_s) || m =~ /^__/ + end + + overridden_methods.map(&:to_sym).each do |method_id| + define_method method_id do |*args, &b| + if @expected_calls.key? method_id then + method_missing(method_id, *args, &b) + else + super(*args, &b) + end + end + end + + def initialize delegator = nil # :nodoc: + @delegator = delegator + @expected_calls = Hash.new { |calls, name| calls[name] = [] } + @actual_calls = Hash.new { |calls, name| calls[name] = [] } + end + + ## + # Expect that method +name+ is called, optionally with +args+ or a + # +blk+, and returns +retval+. + # + # @mock.expect(:meaning_of_life, 42) + # @mock.meaning_of_life # => 42 + # + # @mock.expect(:do_something_with, true, [some_obj, true]) + # @mock.do_something_with(some_obj, true) # => true + # + # @mock.expect(:do_something_else, true) do |a1, a2| + # a1 == "buggs" && a2 == :bunny + # end + # + # +args+ is compared to the expected args using case equality (ie, the + # '===' operator), allowing for less specific expectations. + # + # @mock.expect(:uses_any_string, true, [String]) + # @mock.uses_any_string("foo") # => true + # @mock.verify # => true + # + # @mock.expect(:uses_one_string, true, ["foo"]) + # @mock.uses_one_string("bar") # => raises MockExpectationError + # + # If a method will be called multiple times, specify a new expect for each one. + # They will be used in the order you define them. + # + # @mock.expect(:ordinal_increment, 'first') + # @mock.expect(:ordinal_increment, 'second') + # + # @mock.ordinal_increment # => 'first' + # @mock.ordinal_increment # => 'second' + # @mock.ordinal_increment # => raises MockExpectationError "No more expects available for :ordinal_increment" + # + + def expect name, retval, args = [], &blk + name = name.to_sym + + if block_given? + raise ArgumentError, "args ignored when block given" unless args.empty? + @expected_calls[name] << { :retval => retval, :block => blk } + else + raise ArgumentError, "args must be an array" unless Array === args + @expected_calls[name] << { :retval => retval, :args => args } + end + self + end + + def __call name, data # :nodoc: + case data + when Hash then + "#{name}(#{data[:args].inspect[1..-2]}) => #{data[:retval].inspect}" + else + data.map { |d| __call name, d }.join ", " + end + end + + ## + # Verify that all methods were called as expected. Raises + # +MockExpectationError+ if the mock object was not called as + # expected. + + def verify + @expected_calls.each do |name, expected| + actual = @actual_calls.fetch(name, nil) + raise MockExpectationError, "expected #{__call name, expected[0]}" unless actual + raise MockExpectationError, "expected #{__call name, expected[actual.size]}, got [#{__call name, actual}]" if + actual.size < expected.size + end + true + end + + def method_missing sym, *args, &block # :nodoc: + unless @expected_calls.key?(sym) then + if @delegator && @delegator.respond_to?(sym) + return @delegator.public_send(sym, *args, &block) + else + raise NoMethodError, "unmocked method %p, expected one of %p" % + [sym, @expected_calls.keys.sort_by(&:to_s)] + end + end + + index = @actual_calls[sym].length + expected_call = @expected_calls[sym][index] + + unless expected_call then + raise MockExpectationError, "No more expects available for %p: %p" % + [sym, args] + end + + expected_args, retval, val_block = + expected_call.values_at(:args, :retval, :block) + + if val_block then + # keep "verify" happy + @actual_calls[sym] << expected_call + + raise MockExpectationError, "mocked method %p failed block w/ %p" % + [sym, args] unless val_block.call(*args, &block) + + return retval + end + + if expected_args.size != args.size then + raise ArgumentError, "mocked method %p expects %d arguments, got %d" % + [sym, expected_args.size, args.size] + end + + zipped_args = expected_args.zip(args) + fully_matched = zipped_args.all? { |mod, a| + mod === a or mod == a + } + + unless fully_matched then + raise MockExpectationError, "mocked method %p called with unexpected arguments %p" % + [sym, args] + end + + @actual_calls[sym] << { + :retval => retval, + :args => zipped_args.map! { |mod, a| mod === a ? mod : a }, + } + + retval + end + + def respond_to? sym, include_private = false # :nodoc: + return true if @expected_calls.key? sym.to_sym + return true if @delegator && @delegator.respond_to?(sym, include_private) + __respond_to?(sym, include_private) + end + end +end + +module Minitest::Assertions + ## + # Assert that the mock verifies correctly. + + def assert_mock mock + assert mock.verify + end +end + +## +# Object extensions for Minitest::Mock. + +class Object + + ## + # Add a temporary stubbed method replacing +name+ for the duration + # of the +block+. If +val_or_callable+ responds to #call, then it + # returns the result of calling it, otherwise returns the value + # as-is. If stubbed method yields a block, +block_args+ will be + # passed along. Cleans up the stub at the end of the +block+. The + # method +name+ must exist before stubbing. + # + # def test_stale_eh + # obj_under_test = Something.new + # refute obj_under_test.stale? + # + # Time.stub :now, Time.at(0) do + # assert obj_under_test.stale? + # end + # end + # + + def stub name, val_or_callable, *block_args + new_name = "__minitest_stub__#{name}" + + metaclass = class << self; self; end + + if respond_to? name and not methods.map(&:to_s).include? name.to_s then + metaclass.send :define_method, name do |*args| + super(*args) + end + end + + metaclass.send :alias_method, new_name, name + + metaclass.send :define_method, name do |*args, &blk| + if val_or_callable.respond_to? :call then + val_or_callable.call(*args, &blk) + else + blk.call(*block_args) if blk + val_or_callable + end + end + + yield self + ensure + metaclass.send :undef_method, name + metaclass.send :alias_method, name, new_name + metaclass.send :undef_method, new_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/parallel.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/parallel.rb new file mode 100644 index 0000000..40ac709 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/parallel.rb @@ -0,0 +1,70 @@ +module Minitest + module Parallel #:nodoc: + + ## + # The engine used to run multiple tests in parallel. + + class Executor + + ## + # The size of the pool of workers. + + attr_reader :size + + ## + # Create a parallel test executor of with +size+ workers. + + def initialize size + @size = size + @queue = Queue.new + @pool = nil + end + + ## + # Start the executor + + def start + @pool = size.times.map { + Thread.new(@queue) do |queue| + Thread.current.abort_on_exception = true + while (job = queue.pop) + klass, method, reporter = job + reporter.synchronize { reporter.prerecord klass, method } + result = Minitest.run_one_method klass, method + reporter.synchronize { reporter.record result } + end + end + } + end + + ## + # Add a job to the queue + + def << work; @queue << work; end + + ## + # Shuts down the pool of workers by signalling them to quit and + # waiting for them all to finish what they're currently working + # on. + + def shutdown + size.times { @queue << nil } + @pool.each(&:join) + end + end + + module Test # :nodoc: + def _synchronize; Minitest::Test.io_lock.synchronize { yield }; end # :nodoc: + + module ClassMethods # :nodoc: + def run_one_method klass, method_name, reporter + Minitest.parallel_executor << [klass, method_name, reporter] + end + + def test_order + :parallel + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride.rb new file mode 100644 index 0000000..f3b8e47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride.rb @@ -0,0 +1,4 @@ +require "minitest" + +Minitest.load_plugins +Minitest::PrideIO.pride! diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb new file mode 100644 index 0000000..aeef2b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/pride_plugin.rb @@ -0,0 +1,142 @@ +require "minitest" + +module Minitest + def self.plugin_pride_options opts, _options # :nodoc: + opts.on "-p", "--pride", "Pride. Show your testing pride!" do + PrideIO.pride! + end + end + + def self.plugin_pride_init options # :nodoc: + if PrideIO.pride? then + klass = ENV["TERM"] =~ /^xterm|-256color$/ ? PrideLOL : PrideIO + io = klass.new options[:io] + + self.reporter.reporters.grep(Minitest::Reporter).each do |rep| + rep.io = io if rep.io.tty? + end + end + end + + ## + # Show your testing pride! + + class PrideIO + ## + # Activate the pride plugin. Called from both -p option and minitest/pride + + def self.pride! + @pride = true + end + + ## + # Are we showing our testing pride? + + def self.pride? + @pride ||= false + end + + # Start an escape sequence + ESC = "\e[" + + # End the escape sequence + NND = "#{ESC}0m" + + # The IO we're going to pipe through. + attr_reader :io + + def initialize io # :nodoc: + @io = io + # stolen from /System/Library/Perl/5.10.0/Term/ANSIColor.pm + # also reference http://en.wikipedia.org/wiki/ANSI_escape_code + @colors ||= (31..36).to_a + @size = @colors.size + @index = 0 + end + + ## + # Wrap print to colorize the output. + + def print o + case o + when "." then + io.print pride o + when "E", "F" then + io.print "#{ESC}41m#{ESC}37m#{o}#{NND}" + when "S" then + io.print pride o + else + io.print o + end + end + + def puts *o # :nodoc: + o.map! { |s| + s.to_s.sub(/Finished/) { + @index = 0 + "Fabulous run".split(//).map { |c| + pride(c) + }.join + } + } + + io.puts(*o) + end + + ## + # Color a string. + + def pride string + string = "*" if string == "." + c = @colors[@index % @size] + @index += 1 + "#{ESC}#{c}m#{string}#{NND}" + end + + def method_missing msg, *args # :nodoc: + io.send(msg, *args) + end + end + + ## + # If you thought the PrideIO was colorful... + # + # (Inspired by lolcat, but with clean math) + + class PrideLOL < PrideIO + PI_3 = Math::PI / 3 # :nodoc: + + def initialize io # :nodoc: + # walk red, green, and blue around a circle separated by equal thirds. + # + # To visualize, type this into wolfram-alpha: + # + # plot (3*sin(x)+3), (3*sin(x+2*pi/3)+3), (3*sin(x+4*pi/3)+3) + + # 6 has wide pretty gradients. 3 == lolcat, about half the width + @colors = (0...(6 * 7)).map { |n| + n *= 1.0 / 6 + r = (3 * Math.sin(n ) + 3).to_i + g = (3 * Math.sin(n + 2 * PI_3) + 3).to_i + b = (3 * Math.sin(n + 4 * PI_3) + 3).to_i + + # Then we take rgb and encode them in a single number using base 6. + # For some mysterious reason, we add 16... to clear the bottom 4 bits? + # Yes... they're ugly. + + 36 * r + 6 * g + b + 16 + } + + super + end + + ## + # Make the string even more colorful. Damnit. + + def pride string + c = @colors[@index % @size] + @index += 1 + "#{ESC}38;5;#{c}m#{string}#{NND}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/spec.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/spec.rb new file mode 100644 index 0000000..12f1975 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/spec.rb @@ -0,0 +1,342 @@ +require "minitest/test" + +class Module # :nodoc: + def infect_an_assertion meth, new_name, dont_flip = false # :nodoc: + block = dont_flip == :block + dont_flip = false if block + + # warn "%-22p -> %p %p" % [meth, new_name, dont_flip] + self.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + warn "DEPRECATED: global use of #{new_name} from #\{where}. Use _(obj).#{new_name} instead. This will fail in Minitest 6." + Minitest::Expectation.new(self, Minitest::Spec.current).#{new_name}(*args) + end + EOM + + Minitest::Expectation.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + raise "Calling ##{new_name} outside of test." unless ctx + case + when #{!!dont_flip} then + ctx.#{meth}(target, *args) + when #{block} && Proc === target then + ctx.#{meth}(*args, &target) + else + ctx.#{meth}(args.first, target, *args[1..-1]) + end + end + EOM + end +end + +Minitest::Expectation = Struct.new :target, :ctx # :nodoc: + +## +# Kernel extensions for minitest + +module Kernel + ## + # Describe a series of expectations for a given target +desc+. + # + # Defines a test class subclassing from either Minitest::Spec or + # from the surrounding describe's class. The surrounding class may + # subclass Minitest::Spec manually in order to easily share code: + # + # class MySpec < Minitest::Spec + # # ... shared code ... + # end + # + # class TestStuff < MySpec + # it "does stuff" do + # # shared code available here + # end + # describe "inner stuff" do + # it "still does stuff" do + # # ...and here + # end + # end + # end + # + # For more information on getting started with writing specs, see: + # + # http://www.rubyinside.com/a-minitestspec-tutorial-elegant-spec-style-testing-that-comes-with-ruby-5354.html + # + # For some suggestions on how to improve your specs, try: + # + # http://betterspecs.org + # + # but do note that several items there are debatable or specific to + # rspec. + # + # For more information about expectations, see Minitest::Expectations. + + def describe desc, *additional_desc, &block # :doc: + stack = Minitest::Spec.describe_stack + name = [stack.last, desc, *additional_desc].compact.join("::") + sclas = stack.last || if Class === self && kind_of?(Minitest::Spec::DSL) then + self + else + Minitest::Spec.spec_type desc, *additional_desc + end + + cls = sclas.create name, desc + + stack.push cls + cls.class_eval(&block) + stack.pop + cls + end + private :describe +end + +## +# Minitest::Spec -- The faster, better, less-magical spec framework! +# +# For a list of expectations, see Minitest::Expectations. + +class Minitest::Spec < Minitest::Test + + def self.current # :nodoc: + Thread.current[:current_spec] + end + + def initialize name # :nodoc: + super + Thread.current[:current_spec] = self + end + + ## + # Oh look! A Minitest::Spec::DSL module! Eat your heart out DHH. + + module DSL + ## + # Contains pairs of matchers and Spec classes to be used to + # calculate the superclass of a top-level describe. This allows for + # automatically customizable spec types. + # + # See: register_spec_type and spec_type + + TYPES = [[//, Minitest::Spec]] + + ## + # Register a new type of spec that matches the spec's description. + # This method can take either a Regexp and a spec class or a spec + # class and a block that takes the description and returns true if + # it matches. + # + # Eg: + # + # register_spec_type(/Controller$/, Minitest::Spec::Rails) + # + # or: + # + # register_spec_type(Minitest::Spec::RailsModel) do |desc| + # desc.superclass == ActiveRecord::Base + # end + + def register_spec_type *args, &block + if block then + matcher, klass = block, args.first + else + matcher, klass = *args + end + TYPES.unshift [matcher, klass] + end + + ## + # Figure out the spec class to use based on a spec's description. Eg: + # + # spec_type("BlahController") # => Minitest::Spec::Rails + + def spec_type desc, *additional + TYPES.find { |matcher, _klass| + if matcher.respond_to? :call then + matcher.call desc, *additional + else + matcher === desc.to_s + end + }.last + end + + def describe_stack # :nodoc: + Thread.current[:describe_stack] ||= [] + end + + def children # :nodoc: + @children ||= [] + end + + def nuke_test_methods! # :nodoc: + self.public_instance_methods.grep(/^test_/).each do |name| + self.send :undef_method, name + end + end + + ## + # Define a 'before' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#setup. + + def before _type = nil, &block + define_method :setup do + super() + self.instance_eval(&block) + end + end + + ## + # Define an 'after' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#teardown. + + def after _type = nil, &block + define_method :teardown do + self.instance_eval(&block) + super() + end + end + + ## + # Define an expectation with name +desc+. Name gets morphed to a + # proper test method name. For some freakish reason, people who + # write specs don't like class inheritance, so this goes way out of + # its way to make sure that expectations aren't inherited. + # + # This is also aliased to #specify and doesn't require a +desc+ arg. + # + # Hint: If you _do_ want inheritance, use minitest/test. You can mix + # and match between assertions and expectations as much as you want. + + def it desc = "anonymous", &block + block ||= proc { skip "(no tests defined)" } + + @specs ||= 0 + @specs += 1 + + name = "test_%04d_%s" % [ @specs, desc ] + + undef_klasses = self.children.reject { |c| c.public_method_defined? name } + + define_method name, &block + + undef_klasses.each do |undef_klass| + undef_klass.send :undef_method, name + end + + name + end + + ## + # Essentially, define an accessor for +name+ with +block+. + # + # Why use let instead of def? I honestly don't know. + + def let name, &block + name = name.to_s + pre, post = "let '#{name}' cannot ", ". Please use another name." + methods = Minitest::Spec.instance_methods.map(&:to_s) - %w[subject] + raise ArgumentError, "#{pre}begin with 'test'#{post}" if + name =~ /\Atest/ + raise ArgumentError, "#{pre}override a method in Minitest::Spec#{post}" if + methods.include? name + + define_method name do + @_memoized ||= {} + @_memoized.fetch(name) { |k| @_memoized[k] = instance_eval(&block) } + end + end + + ## + # Another lazy man's accessor generator. Made even more lazy by + # setting the name for you to +subject+. + + def subject &block + let :subject, &block + end + + def create name, desc # :nodoc: + cls = Class.new(self) do + @name = name + @desc = desc + + nuke_test_methods! + end + + children << cls + + cls + end + + def name # :nodoc: + defined?(@name) ? @name : super + end + + def to_s # :nodoc: + name # Can't alias due to 1.8.7, not sure why + end + + attr_reader :desc # :nodoc: + alias :specify :it + + ## + # Rdoc... why are you so dumb? + + module InstanceMethods + ## + # Takes a value or a block and returns a value monad that has + # all of Expectations methods available to it. + # + # _(1 + 1).must_equal 2 + # + # And for blocks: + # + # _ { 1 + "1" }.must_raise TypeError + # + # This method of expectation-based testing is preferable to + # straight-expectation methods (on Object) because it stores its + # test context, bypassing our hacky use of thread-local variables. + # + # NOTE: At some point, the methods on Object will be deprecated + # and then removed. + # + # It is also aliased to #value and #expect for your aesthetic + # pleasure: + # + # _(1 + 1).must_equal 2 + # value(1 + 1).must_equal 2 + # expect(1 + 1).must_equal 2 + + def _ value = nil, &block + Minitest::Expectation.new block || value, self + end + + alias value _ + alias expect _ + + def before_setup # :nodoc: + super + Thread.current[:current_spec] = self + end + end + + def self.extended obj # :nodoc: + obj.send :include, InstanceMethods + end + end + + extend DSL + + TYPES = DSL::TYPES # :nodoc: +end + +require "minitest/expectations" + +class Object # :nodoc: + include Minitest::Expectations unless ENV["MT_NO_EXPECTATIONS"] +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/test.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/test.rb new file mode 100644 index 0000000..0993bc4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/test.rb @@ -0,0 +1,220 @@ +require "minitest" unless defined? Minitest::Runnable + +module Minitest + ## + # Subclass Test to create your own tests. Typically you'll want a + # Test subclass per implementation class. + # + # See Minitest::Assertions + + class Test < Runnable + require "minitest/assertions" + include Minitest::Assertions + include Minitest::Reportable + + def class_name # :nodoc: + self.class.name # for Minitest::Reportable + end + + PASSTHROUGH_EXCEPTIONS = [NoMemoryError, SignalException, SystemExit] # :nodoc: + + # :stopdoc: + class << self; attr_accessor :io_lock; end + self.io_lock = Mutex.new + # :startdoc: + + ## + # Call this at the top of your tests when you absolutely + # positively need to have ordered tests. In doing so, you're + # admitting that you suck and your tests are weak. + + def self.i_suck_and_my_tests_are_order_dependent! + class << self + undef_method :test_order if method_defined? :test_order + define_method :test_order do :alpha end + end + end + + ## + # Make diffs for this Test use #pretty_inspect so that diff + # in assert_equal can have more details. NOTE: this is much slower + # than the regular inspect but much more usable for complex + # objects. + + def self.make_my_diffs_pretty! + require "pp" + + define_method :mu_pp, &:pretty_inspect + end + + ## + # Call this at the top of your tests when you want to run your + # tests in parallel. In doing so, you're admitting that you rule + # and your tests are awesome. + + def self.parallelize_me! + include Minitest::Parallel::Test + extend Minitest::Parallel::Test::ClassMethods + end + + ## + # Returns all instance methods starting with "test_". Based on + # #test_order, the methods are either sorted, randomized + # (default), or run in parallel. + + def self.runnable_methods + methods = methods_matching(/^test_/) + + case self.test_order + when :random, :parallel then + max = methods.size + methods.sort.sort_by { rand max } + when :alpha, :sorted then + methods.sort + else + raise "Unknown test_order: #{self.test_order.inspect}" + end + end + + ## + # Defines the order to run tests (:random by default). Override + # this or use a convenience method to change it for your tests. + + def self.test_order + :random + end + + TEARDOWN_METHODS = %w[ before_teardown teardown after_teardown ] # :nodoc: + + ## + # Runs a single test with setup/teardown hooks. + + def run + with_info_handler do + time_it do + capture_exceptions do + before_setup; setup; after_setup + + self.send self.name + end + + TEARDOWN_METHODS.each do |hook| + capture_exceptions do + self.send hook + end + end + end + end + + Result.from self # per contract + end + + ## + # Provides before/after hooks for setup and teardown. These are + # meant for library writers, NOT for regular test authors. See + # #before_setup for an example. + + module LifecycleHooks + + ## + # Runs before every test, before setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # As a simplistic example: + # + # module MyMinitestPlugin + # def before_setup + # super + # # ... stuff to do before setup is run + # end + # + # def after_setup + # # ... stuff to do after setup is run + # super + # end + # + # def before_teardown + # super + # # ... stuff to do before teardown is run + # end + # + # def after_teardown + # # ... stuff to do after teardown is run + # super + # end + # end + # + # class MiniTest::Test + # include MyMinitestPlugin + # end + + def before_setup; end + + ## + # Runs before every test. Use this to set up before each test + # run. + + def setup; end + + ## + # Runs before every test, after setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_setup; end + + ## + # Runs after every test, before teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def before_teardown; end + + ## + # Runs after every test. Use this to clean up after each test + # run. + + def teardown; end + + ## + # Runs after every test, after teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_teardown; end + end # LifecycleHooks + + def capture_exceptions # :nodoc: + yield + rescue *PASSTHROUGH_EXCEPTIONS + raise + rescue Assertion => e + self.failures << e + rescue Exception => e + self.failures << UnexpectedError.new(e) + end + + def with_info_handler &block # :nodoc: + t0 = Minitest.clock_time + + handler = lambda do + warn "\nCurrent: %s#%s %.2fs" % [self.class, self.name, Minitest.clock_time - t0] + end + + self.class.on_signal ::Minitest.info_signal, handler, &block + end + + include LifecycleHooks + include Guard + extend Guard + end # Test +end + +require "minitest/unit" unless defined?(MiniTest) # compatibility layer only diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/unit.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/unit.rb new file mode 100644 index 0000000..9a4eadf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/lib/minitest/unit.rb @@ -0,0 +1,45 @@ +# :stopdoc: + +unless defined?(Minitest) then + # all of this crap is just to avoid circular requires and is only + # needed if a user requires "minitest/unit" directly instead of + # "minitest/autorun", so we also warn + + from = caller.reject { |s| s =~ /rubygems/ }.join("\n ") + warn "Warning: you should require 'minitest/autorun' instead." + warn %(Warning: or add 'gem "minitest"' before 'require "minitest/autorun"') + warn "From:\n #{from}" + + module Minitest; end + MiniTest = Minitest # prevents minitest.rb from requiring back to us + require "minitest" +end + +MiniTest = Minitest unless defined?(MiniTest) + +module Minitest + class Unit + VERSION = Minitest::VERSION + class TestCase < Minitest::Test + def self.inherited klass # :nodoc: + from = caller.first + warn "MiniTest::Unit::TestCase is now Minitest::Test. From #{from}" + super + end + end + + def self.autorun # :nodoc: + from = caller.first + warn "MiniTest::Unit.autorun is now Minitest.autorun. From #{from}" + Minitest.autorun + end + + def self.after_tests &b # :nodoc: + from = caller.first + warn "MiniTest::Unit.after_tests is now Minitest.after_run. From #{from}" + Minitest.after_run(&b) + end + end +end + +# :startdoc: diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/metametameta.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/metametameta.rb new file mode 100644 index 0000000..6e75b9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/metametameta.rb @@ -0,0 +1,136 @@ +require "tempfile" +require "stringio" +require "minitest/autorun" + +class Minitest::Test + def clean s + s.gsub(/^ {6}/, "") + end + + def with_empty_backtrace_filter + original = Minitest.backtrace_filter + + obj = Minitest::BacktraceFilter.new + def obj.filter _bt + [] + end + + Minitest::Test.io_lock.synchronize do # try not to trounce in parallel + begin + Minitest.backtrace_filter = obj + yield + ensure + Minitest.backtrace_filter = original + end + end + end +end + + +class FakeNamedTest < Minitest::Test + @@count = 0 + + def self.name + @fake_name ||= begin + @@count += 1 + "FakeNamedTest%02d" % @@count + end + end +end + +module MyModule; end +class AnError < StandardError; include MyModule; end + +class MetaMetaMetaTestCase < Minitest::Test + attr_accessor :reporter, :output, :tu + + def with_stderr err + old = $stderr + $stderr = err + yield + ensure + $stderr = old + end + + def run_tu_with_fresh_reporter flags = %w[--seed 42] + options = Minitest.process_args flags + + @output = StringIO.new("".encode('UTF-8')) + + self.reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(@output, options) + reporter << Minitest::ProgressReporter.new(@output, options) + + with_stderr @output do + reporter.start + + yield(reporter) if block_given? + + @tus ||= [@tu] + @tus.each do |tu| + Minitest::Runnable.runnables.delete tu + + tu.run reporter, options + end + + reporter.report + end + end + + def first_reporter + reporter.reporters.first + end + + def assert_report expected, flags = %w[--seed 42], &block + header = clean <<-EOM + Run options: #{flags.map { |s| s =~ /\|/ ? s.inspect : s }.join " "} + + # Running: + + EOM + + run_tu_with_fresh_reporter flags, &block + + output = normalize_output @output.string.dup + + assert_equal header + expected, output + end + + def normalize_output output + output.sub!(/Finished in .*/, "Finished in 0.00") + output.sub!(/Loaded suite .*/, "Loaded suite blah") + + output.gsub!(/FakeNamedTest\d+/, "FakeNamedTestXX") + output.gsub!(/ = \d+.\d\d s = /, " = 0.00 s = ") + output.gsub!(/0x[A-Fa-f0-9]+/, "0xXXX") + output.gsub!(/ +$/, "") + + if windows? then + output.gsub!(/\[(?:[A-Za-z]:)?[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)(?:[A-Za-z]:)?[^:]+:\d+:in/, '\1FILE:LINE:in') + else + output.gsub!(/\[[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)[^:]+:\d+:in/, '\1FILE:LINE:in') + end + + output.gsub!(/( at )[^:]+:\d+/, '\1[FILE:LINE]') + + output + end + + def restore_env + old_value = ENV["MT_NO_SKIP_MSG"] + ENV.delete "MT_NO_SKIP_MSG" + + yield + ensure + ENV["MT_NO_SKIP_MSG"] = old_value + end + + def setup + super + srand 42 + Minitest::Test.reset + @tu = nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb new file mode 100644 index 0000000..5bc456f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_assertions.rb @@ -0,0 +1,1575 @@ +# encoding: UTF-8 + +require "minitest/autorun" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +SomeError = Class.new Exception + +unless defined? MyModule then + module MyModule; end + class AnError < StandardError; include MyModule; end +end + +class TestMinitestAssertions < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + class DummyTest + include Minitest::Assertions + # include Minitest::Reportable # TODO: why do I really need this? + + attr_accessor :assertions, :failure + + def initialize + self.assertions = 0 + self.failure = nil + end + end + + def setup + super + + Minitest::Test.reset + + @tc = DummyTest.new + @zomg = "zomg ponies!" # TODO: const + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") + end + + def assert_deprecated name + dep = /DEPRECATED: #{name}. From #{__FILE__}:\d+(?::.*)?/ + dep = "" if $-w.nil? + + assert_output nil, dep do + yield + end + end + + def assert_triggered expected, klass = Minitest::Assertion + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + + assert_msg = Regexp === expected ? :assert_match : :assert_equal + self.send assert_msg, expected, msg + end + + def assert_unexpected expected + expected = Regexp.new expected if String === expected + + assert_triggered expected, Minitest::UnexpectedError do + yield + end + end + + def clean s + s.gsub(/^ {6,10}/, "") + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def test_assert + @assertion_count = 2 + + @tc.assert_equal true, @tc.assert(true), "returns true on success" + end + + def test_assert__triggered + assert_triggered "Expected false to be truthy." do + @tc.assert false + end + end + + def test_assert__triggered_message + assert_triggered @zomg do + @tc.assert false, @zomg + end + end + + def test_assert__triggered_lambda + assert_triggered "whoops" do + @tc.assert false, lambda { "whoops" } + end + end + + def test_assert_empty + @assertion_count = 2 + + @tc.assert_empty [] + end + + def test_assert_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [1] to be empty." do + @tc.assert_empty [1] + end + end + + def test_assert_equal + @tc.assert_equal 1, 1 + end + + def test_assert_equal_different_collection_array_hex_invisible + object1 = Object.new + object2 = Object.new + msg = "No visible difference in the Array#inspect output. + You should look at the implementation of #== on Array or its members. + [#]".gsub(/^ +/, "") + assert_triggered msg do + @tc.assert_equal [object1], [object2] + end + end + + def test_assert_equal_different_collection_hash_hex_invisible + h1, h2 = {}, {} + h1[1] = Object.new + h2[1] = Object.new + msg = "No visible difference in the Hash#inspect output. + You should look at the implementation of #== on Hash or its members. + {1=>#}".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal h1, h2 + end + end + + def test_assert_equal_different_diff_deactivated + without_diff do + assert_triggered util_msg("haha" * 10, "blah" * 10) do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + end + + def test_assert_equal_different_message + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, message { "whoops" } + end + end + + def test_assert_equal_different_lambda + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, lambda { "whoops" } + end + end + + def test_assert_equal_different_hex + c = Class.new do + def initialize s; @name = s; end + end + + o1 = c.new "a" + o2 = c.new "b" + msg = clean <<-EOS + --- expected + +++ actual + @@ -1 +1 @@ + -#<#:0xXXXXXX @name=\"a\"> + +#<#:0xXXXXXX @name=\"b\"> + EOS + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_hex_invisible + o1 = Object.new + o2 = Object.new + + msg = "No visible difference in the Object#inspect output. + You should look at the implementation of #== on Object or its members. + #".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long + msg = "--- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_invisible + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_msg + msg = "message. + --- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + @tc.assert_equal o1, o2, "message" + end + end + + def test_assert_equal_different_short + assert_triggered util_msg(1, 2) do + @tc.assert_equal 1, 2 + end + end + + def test_assert_equal_different_short_msg + assert_triggered util_msg(1, 2, "message") do + @tc.assert_equal 1, 2, "message" + end + end + + def test_assert_equal_different_short_multiline + msg = "--- expected\n+++ actual\n@@ -1,2 +1,2 @@\n \"a\n-b\"\n+c\"\n" + assert_triggered msg do + @tc.assert_equal "a\nb", "a\nc" + end + end + + def test_assert_equal_does_not_allow_lhs_nil + if Minitest::VERSION =~ /^6/ then + warn "Time to strip the MT5 test" + + @assertion_count += 1 + assert_triggered(/Use assert_nil if expecting nil/) do + @tc.assert_equal nil, nil + end + else + err_re = /Use assert_nil if expecting nil from .*test_minitest_\w+.rb/ + err_re = "" if $-w.nil? + + assert_output "", err_re do + @tc.assert_equal nil, nil + end + end + end + + def test_assert_equal_does_not_allow_lhs_nil_triggered + assert_triggered "Expected: nil\n Actual: false" do + @tc.assert_equal nil, false + end + end + + def test_assert_equal_string_bug791 + exp = <<-'EOF'.gsub(/^ {10}/, "") # note single quotes + --- expected + +++ actual + @@ -1,2 +1 @@ + -"\\n + -" + +"\\\" + EOF + + exp = "Expected: \"\\\\n\"\n Actual: \"\\\\\"" + assert_triggered exp do + @tc.assert_equal "\\n", "\\" + end + end + + def test_assert_equal_string_both_escaped_unescaped_newlines + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,2 +1 @@ + -\"A\\n + -B\" + +\"A\\n\\\\nB\" + EOM + + assert_triggered msg do + exp = "A\\nB" + act = "A\n\\nB" + + @tc.assert_equal exp, act + end + end + + def test_assert_equal_string_encodings + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: UTF-8 + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_string_encodings_both_different + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: US-ASCII + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt".force_encoding "ASCII" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_unescape_newlines + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + -"hello + +"hello\n + world" + EOM + + assert_triggered msg do + exp = "hello\nworld" + act = 'hello\nworld' # notice single quotes + + @tc.assert_equal exp, act + end + end + + def test_assert_in_delta + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.1 + end + + def test_assert_in_delta_triggered + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.000001 + end + end + + def test_assert_in_epsilon + @assertion_count = 10 + + @tc.assert_in_epsilon 10_000, 9991 + @tc.assert_in_epsilon 9991, 10_000 + @tc.assert_in_epsilon 1.0, 1.001 + @tc.assert_in_epsilon 1.001, 1.0 + + @tc.assert_in_epsilon 10_000, 9999.1, 0.0001 + @tc.assert_in_epsilon 9999.1, 10_000, 0.0001 + @tc.assert_in_epsilon 1.0, 1.0001, 0.0001 + @tc.assert_in_epsilon 1.0001, 1.0, 0.0001 + + @tc.assert_in_epsilon(-1, -1) + @tc.assert_in_epsilon(-10_000, -9991) + end + + def test_assert_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to be <= 9.99." do + @tc.assert_in_epsilon 10_000, 9990 + end + end + + def test_assert_in_epsilon_triggered_negative_case + x = (RUBY18 and not maglev?) ? "0.1" : "0.100000xxx" + y = "0.1" + assert_triggered "Expected |-1.1 - -1| (#{x}) to be <= #{y}." do + @tc.assert_in_epsilon(-1.1, -1, 0.1) + end + end + + def test_assert_includes + @assertion_count = 2 + + @tc.assert_includes [true], true + end + + def test_assert_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.assert_includes [true], false + end + + expected = "Expected [true] to include false." + assert_equal expected, e.message + end + + def test_assert_instance_of + @tc.assert_instance_of String, "blah" + end + + def test_assert_instance_of_triggered + assert_triggered 'Expected "blah" to be an instance of Array, not String.' do + @tc.assert_instance_of Array, "blah" + end + end + + def test_assert_kind_of + @tc.assert_kind_of String, "blah" + end + + def test_assert_kind_of_triggered + assert_triggered 'Expected "blah" to be a kind of Array, not String.' do + @tc.assert_kind_of Array, "blah" + end + end + + def test_assert_match + @assertion_count = 2 + @tc.assert_match(/\w+/, "blah blah blah") + end + + def test_assert_match_matchee_to_str + @assertion_count = 2 + + obj = Object.new + def obj.to_str; "blah" end + + @tc.assert_match "blah", obj + end + + def test_assert_match_matcher_object + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + + @tc.assert_match pattern, 5 + end + + def test_assert_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; false end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to match 5." do + @tc.assert_match pattern, 5 + end + end + + def test_assert_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\d+/ to match "blah blah blah".' do + @tc.assert_match(/\d+/, "blah blah blah") + end + end + + def test_assert_nil + @tc.assert_nil nil + end + + def test_assert_nil_triggered + assert_triggered "Expected 42 to be nil." do + @tc.assert_nil 42 + end + end + + def test_assert_operator + @tc.assert_operator 2, :>, 1 + end + + def test_assert_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.assert_operator bad, :equal?, bad + end + + def test_assert_operator_triggered + assert_triggered "Expected 2 to be < 1." do + @tc.assert_operator 2, :<, 1 + end + end + + def test_assert_output_both + @assertion_count = 2 + + @tc.assert_output "yay", "blah" do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_both_regexps + @assertion_count = 4 + + @tc.assert_output(/y.y/, /bl.h/) do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_err + @tc.assert_output nil, "blah" do + $stderr.print "blah" + end + end + + def test_assert_output_neither + @assertion_count = 0 + + @tc.assert_output do + # do nothing + end + end + + def test_assert_output_out + @tc.assert_output "blah" do + print "blah" + end + end + + def test_assert_output_triggered_both + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output "yay", "blah" do + print "boo" + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_err + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output nil, "blah" do + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_out + assert_triggered util_msg("blah", "blah blah", "In stdout") do + @tc.assert_output "blah" do + print "blah blah" + end + end + end + + def test_assert_output_no_block + assert_triggered "assert_output requires a block to capture output." do + @tc.assert_output "blah" + end + end + + def test_assert_output_nested_assert_uncaught + @assertion_count = 1 + + assert_triggered "Epic Fail!" do + @tc.assert_output "blah\n" do + puts "blah" + @tc.flunk + end + end + end + + def test_assert_output_nested_raise + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_raises RuntimeError do + puts "blah" + raise "boom!" + end + end + end + + def test_assert_output_nested_raise_bad + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise "boom!" + end + end + end + end + + def test_assert_output_nested_raise_mismatch + # this test is redundant, but illustrative + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises RuntimeError do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise ArgumentError, "boom!" + end + end + end + end + + def test_assert_output_nested_throw_caught + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_throws :boom! do + puts "blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_caught_bad + @assertion_count = 1 # want 0; can't prevent throw from escaping :( + + @tc.assert_throws :boom! do # 2) captured via catch + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_mismatch + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_throws :not_boom! do # 2) captured via assert_throws+rescue + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + end + + def test_assert_output_uncaught_raise + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + raise "boom!" + end + end + end + + def test_assert_output_uncaught_throw + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + throw :boom! + end + end + end + def test_assert_predicate + @tc.assert_predicate "", :empty? + end + + def test_assert_predicate_triggered + assert_triggered 'Expected "blah" to be empty?.' do + @tc.assert_predicate "blah", :empty? + end + end + + def test_assert_raises + @tc.assert_raises RuntimeError do + raise "blah" + end + end + + def test_assert_raises_default + @tc.assert_raises do + raise StandardError, "blah" + end + end + + def test_assert_raises_default_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises do + raise SomeError, "blah" + end + end + + expected = clean <<-EOM.chomp + [StandardError] exception expected, not + Class: + Message: <\"blah\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_default_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected, actual + end + + def test_assert_raises_exit + @tc.assert_raises SystemExit do + exit 1 + end + end + + def test_assert_raises_module + @tc.assert_raises MyModule do + raise AnError + end + end + + def test_assert_raises_signals + @tc.assert_raises SignalException do + raise SignalException, :INT + end + end + + def test_assert_raises_throw_nested_bad + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_raises do + @tc.assert_throws :blah do + raise "boom!" + throw :not_blah + end + end + end + end + + ## + # *sigh* This is quite an odd scenario, but it is from real (albeit + # ugly) test code in ruby-core: + + # http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=29259 + + def test_assert_raises_skip + @assertion_count = 0 + + assert_triggered "skipped", Minitest::Skip do + @tc.assert_raises ArgumentError do + begin + raise "blah" + rescue + skip "skipped" + end + end + end + end + + def test_assert_raises_subclass + @tc.assert_raises StandardError do + raise AnError + end + end + + def test_assert_raises_subclass_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises SomeError do + raise AnError, "some message" + end + end + + expected = clean <<-EOM + [SomeError] exception expected, not + Class: + Message: <\"some message\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_subclass_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_different + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM.chomp + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_triggered_different\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected, actual + end + + def test_assert_raises_triggered_different_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError, "XXX" do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM + XXX. + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`test_assert_raises_triggered_different_msg\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(/block \(\d+ levels\) in /, "") if RUBY_VERSION >= "1.9.0" + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_none + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion do + # do nothing + end + end + + expected = "Minitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_triggered_none_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion, "XXX" do + # do nothing + end + end + + expected = "XXX.\nMinitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_without_block + assert_triggered "assert_raises requires a block to capture errors." do + @tc.assert_raises StandardError + end + end + + def test_assert_respond_to + @tc.assert_respond_to "blah", :empty? + end + + def test_assert_respond_to_triggered + assert_triggered 'Expected "blah" (String) to respond to #rawr!.' do + @tc.assert_respond_to "blah", :rawr! + end + end + + def test_assert_same + @assertion_count = 3 + + o = "blah" + @tc.assert_same 1, 1 + @tc.assert_same :blah, :blah + @tc.assert_same o, o + end + + def test_assert_same_triggered + @assertion_count = 2 + + assert_triggered "Expected 2 (oid=N) to be the same as 1 (oid=N)." do + @tc.assert_same 1, 2 + end + + s1 = "blah" + s2 = "blah" + + assert_triggered 'Expected "blah" (oid=N) to be the same as "blah" (oid=N).' do + @tc.assert_same s1, s2 + end + end + + def test_assert_send + assert_deprecated :assert_send do + @tc.assert_send [1, :<, 2] + end + end + + def test_assert_send_bad + assert_deprecated :assert_send do + assert_triggered "Expected 1.>(*[2]) to return true." do + @tc.assert_send [1, :>, 2] + end + end + end + + def test_assert_silent + @assertion_count = 2 + + @tc.assert_silent do + # do nothing + end + end + + def test_assert_silent_triggered_err + assert_triggered util_msg("", "blah blah", "In stderr") do + @tc.assert_silent do + $stderr.print "blah blah" + end + end + end + + def test_assert_silent_triggered_out + @assertion_count = 2 + + assert_triggered util_msg("", "blah blah", "In stdout") do + @tc.assert_silent do + print "blah blah" + end + end + end + + def test_assert_throws + @tc.assert_throws :blah do + throw :blah + end + end + + def test_assert_throws_argument_exception + @assertion_count = 0 + + assert_unexpected "ArgumentError" do + @tc.assert_throws :blah do + raise ArgumentError + end + end + end + + def test_assert_throws_different + assert_triggered "Expected :blah to have been thrown, not :not_blah." do + @tc.assert_throws :blah do + throw :not_blah + end + end + end + + def test_assert_throws_name_error + @assertion_count = 0 + + assert_unexpected "NameError" do + @tc.assert_throws :blah do + raise NameError + end + end + end + + def test_assert_throws_unthrown + assert_triggered "Expected :blah to have been thrown." do + @tc.assert_throws :blah do + # do nothing + end + end + end + + def test_assert_path_exists + @tc.assert_path_exists __FILE__ + end + + def test_assert_path_exists_triggered + assert_triggered "Expected path 'blah' to exist." do + @tc.assert_path_exists "blah" + end + end + + def test_capture_io + @assertion_count = 0 + + non_verbose do + out, err = capture_io do + puts "hi" + $stderr.puts "bye!" + end + + assert_equal "hi\n", out + assert_equal "bye!\n", err + end + end + + def test_capture_subprocess_io + @assertion_count = 0 + + non_verbose do + out, err = capture_subprocess_io do + system("echo hi") + system("echo bye! 1>&2") + end + + assert_equal "hi\n", out + assert_equal "bye!", err.strip + end + end + + def test_class_asserts_match_refutes + @assertion_count = 0 + + methods = Minitest::Assertions.public_instance_methods + methods.map!(&:to_s) if Symbol === methods.first + + # These don't have corresponding refutes _on purpose_. They're + # useless and will never be added, so don't bother. + ignores = %w[assert_output assert_raises assert_send + assert_silent assert_throws assert_mock] + + # These are test/unit methods. I'm not actually sure why they're still here + ignores += %w[assert_no_match assert_not_equal assert_not_nil + assert_not_same assert_nothing_raised + assert_nothing_thrown assert_raise] + + asserts = methods.grep(/^assert/).sort - ignores + refutes = methods.grep(/^refute/).sort - ignores + + assert_empty refutes.map { |n| n.sub(/^refute/, "assert") } - asserts + assert_empty asserts.map { |n| n.sub(/^assert/, "refute") } - refutes + end + + def test_delta_consistency + @assertion_count = 2 + + @tc.assert_in_delta 0, 1, 1 + + assert_triggered "Expected |0 - 1| (1) to not be <= 1." do + @tc.refute_in_delta 0, 1, 1 + end + end + + def test_epsilon_consistency + @assertion_count = 2 + + @tc.assert_in_epsilon 1.0, 1.001 + + msg = "Expected |1.0 - 1.001| (0.000999xxx) to not be <= 0.001." + assert_triggered msg do + @tc.refute_in_epsilon 1.0, 1.001 + end + end + + def assert_fail_after t + @tc.fail_after t.year, t.month, t.day, "remove the deprecations" + end + + def test_fail_after + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_silent do + assert_fail_after d1 + end + + assert_triggered "remove the deprecations" do + assert_fail_after d0 + end + end + + def test_flunk + assert_triggered "Epic Fail!" do + @tc.flunk + end + end + + def test_flunk_message + assert_triggered @zomg do + @tc.flunk @zomg + end + end + + def test_pass + @tc.pass + end + + def test_refute + @assertion_count = 2 + + @tc.assert_equal true, @tc.refute(false), "returns true on success" + end + + def test_refute_empty + @assertion_count = 2 + + @tc.refute_empty [1] + end + + def test_refute_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [] to not be empty." do + @tc.refute_empty [] + end + end + + def test_refute_equal + @tc.refute_equal "blah", "yay" + end + + def test_refute_equal_triggered + assert_triggered 'Expected "blah" to not be equal to "blah".' do + @tc.refute_equal "blah", "blah" + end + end + + def test_refute_in_delta + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.000001 + end + + def test_refute_in_delta_triggered + x = "0.1" + assert_triggered "Expected |0.0 - 0.001| (0.001) to not be <= #{x}." do + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.1 + end + end + + def test_refute_in_epsilon + @tc.refute_in_epsilon 10_000, 9990-1 + end + + def test_refute_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to not be <= 10.0." do + @tc.refute_in_epsilon 10_000, 9990 + flunk + end + end + + def test_refute_includes + @assertion_count = 2 + + @tc.refute_includes [true], false + end + + def test_refute_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.refute_includes [true], true + end + + expected = "Expected [true] to not include true." + assert_equal expected, e.message + end + + def test_refute_instance_of + @tc.refute_instance_of Array, "blah" + end + + def test_refute_instance_of_triggered + assert_triggered 'Expected "blah" to not be an instance of String.' do + @tc.refute_instance_of String, "blah" + end + end + + def test_refute_kind_of + @tc.refute_kind_of Array, "blah" + end + + def test_refute_kind_of_triggered + assert_triggered 'Expected "blah" to not be a kind of String.' do + @tc.refute_kind_of String, "blah" + end + end + + def test_refute_match + @assertion_count = 2 + @tc.refute_match(/\d+/, "blah blah blah") + end + + def test_refute_match_matcher_object + @assertion_count = 2 + pattern = Object.new + def pattern.=~ _; false end + @tc.refute_match pattern, 5 + end + + def test_refute_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to not match 5." do + @tc.refute_match pattern, 5 + end + end + + def test_refute_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\w+/ to not match "blah blah blah".' do + @tc.refute_match(/\w+/, "blah blah blah") + end + end + + def test_refute_nil + @tc.refute_nil 42 + end + + def test_refute_nil_triggered + assert_triggered "Expected nil to not be nil." do + @tc.refute_nil nil + end + end + + def test_refute_operator + @tc.refute_operator 2, :<, 1 + end + + def test_refute_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.refute_operator true, :equal?, bad + end + + def test_refute_operator_triggered + assert_triggered "Expected 2 to not be > 1." do + @tc.refute_operator 2, :>, 1 + end + end + + def test_refute_predicate + @tc.refute_predicate "42", :empty? + end + + def test_refute_predicate_triggered + assert_triggered 'Expected "" to not be empty?.' do + @tc.refute_predicate "", :empty? + end + end + + def test_refute_respond_to + @tc.refute_respond_to "blah", :rawr! + end + + def test_refute_respond_to_triggered + assert_triggered 'Expected "blah" to not respond to empty?.' do + @tc.refute_respond_to "blah", :empty? + end + end + + def test_refute_same + @tc.refute_same 1, 2 + end + + def test_refute_same_triggered + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + @tc.refute_same 1, 1 + end + end + + def test_refute_path_exists + @tc.refute_path_exists "blah" + end + + def test_refute_path_exists_triggered + assert_triggered "Expected path '#{__FILE__}' to not exist." do + @tc.refute_path_exists __FILE__ + end + end + + def test_skip + @assertion_count = 0 + + assert_triggered "haha!", Minitest::Skip do + @tc.skip "haha!" + end + end + + def assert_skip_until t, msg + @tc.skip_until t.year, t.month, t.day, msg + end + + def test_skip_until + @assertion_count = 0 + + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_output "", /Stale skip_until \"not yet\" at .*?:\d+$/ do + assert_skip_until d0, "not yet" + end + + assert_triggered "not ready yet", Minitest::Skip do + assert_skip_until d1, "not ready yet" + end + end + + def util_msg exp, act, msg = nil + s = "Expected: #{exp.inspect}\n Actual: #{act.inspect}" + s = "#{msg}.\n#{s}" if msg + s + end + + def without_diff + old_diff = Minitest::Assertions.diff + Minitest::Assertions.diff = nil + + yield + ensure + Minitest::Assertions.diff = old_diff + end +end + +class TestMinitestAssertionHelpers < Minitest::Test + def assert_mu_pp exp, input, raw = false + act = mu_pp input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def assert_mu_pp_for_diff exp, input, raw = false + act = mu_pp_for_diff input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def test_diff_equal + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + + assert_equal msg, diff(o1, o2) + end + + def test_diff_str_mixed + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1 +1 @@ + -"A\\n\nB" + +"A\n\\nB" + EOM + + exp = "A\\n\nB" + act = "A\n\\nB" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_multiline + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + "A + -B" + +C" + EOM + + exp = "A\nB" + act = "A\nC" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_simple + msg = <<-'EOM'.gsub(/^ {10}/, "").chomp # NOTE single quotes on heredoc + Expected: "A" + Actual: "B" + EOM + + exp = "A" + act = "B" + + assert_equal msg, diff(exp, act) + end + + def test_message + assert_equal "blah2.", message { "blah2" }.call + assert_equal "blah2.", message("") { "blah2" }.call + assert_equal "blah1.\nblah2.", message(:blah1) { "blah2" }.call + assert_equal "blah1.\nblah2.", message("blah1") { "blah2" }.call + + message = proc { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + + message = message { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + end + + def test_message_deferred + var = nil + + msg = message { var = "blah" } + + assert_nil var + + msg.call + + assert_equal "blah", var + end + + def test_mu_pp + assert_mu_pp 42.inspect, 42 + assert_mu_pp %w[a b c].inspect, %w[a b c] + assert_mu_pp "A B", "A B" + assert_mu_pp "A\\nB", "A\nB" + assert_mu_pp "A\\\\nB", 'A\nB' # notice single quotes + end + + def test_mu_pp_for_diff + assert_mu_pp_for_diff "#", Object.new + assert_mu_pp_for_diff "A B", "A B" + assert_mu_pp_for_diff [1, 2, 3].inspect, [1, 2, 3] + assert_mu_pp_for_diff "A\nB", "A\nB" + end + + def test_mu_pp_for_diff_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_bad_encoding_both + str = "\666A\\n\nB".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding_both + str = "A\\n\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_nerd + assert_mu_pp_for_diff "A\\nB\\\\nC", "A\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\nC", "\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\n", "\nB\\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\\\nB\\n", "\\nB\n" + assert_mu_pp_for_diff "\\\\nB\\nC", "\\nB\nC" + assert_mu_pp_for_diff "A\\\\n\\nB", "A\\n\nB" + assert_mu_pp_for_diff "A\\n\\\\nB", "A\n\\nB" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + end + + def test_mu_pp_for_diff_str_normal + assert_mu_pp_for_diff "", "" + assert_mu_pp_for_diff "A\\n\n", "A\\n" + assert_mu_pp_for_diff "A\\n\nB", "A\\nB" + assert_mu_pp_for_diff "A\n", "A\n" + assert_mu_pp_for_diff "A\nB", "A\nB" + assert_mu_pp_for_diff "\\n\n", "\\n" + assert_mu_pp_for_diff "\n", "\n" + assert_mu_pp_for_diff "\\n\nA", "\\nA" + assert_mu_pp_for_diff "\nA", "\nA" + end + + def test_mu_pp_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\nB\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_immutable + printer = Class.new { extend Minitest::Assertions } + str = "test".freeze + assert_equal '"test"', printer.mu_pp(str) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb new file mode 100644 index 0000000..88abf77 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_benchmark.rb @@ -0,0 +1,137 @@ +require "minitest/autorun" +require "minitest/benchmark" + +## +# Used to verify data: +# http://www.wolframalpha.com/examples/RegressionAnalysis.html + +class TestMinitestBenchmark < Minitest::Test + def test_cls_bench_exp + assert_equal [2, 4, 8, 16, 32], Minitest::Benchmark.bench_exp(2, 32, 2) + end + + def test_cls_bench_linear + assert_equal [2, 4, 6, 8, 10], Minitest::Benchmark.bench_linear(2, 10, 2) + end + + def test_cls_runnable_methods + assert_equal [], Minitest::Benchmark.runnable_methods + + c = Class.new(Minitest::Benchmark) do + def bench_blah + end + end + + assert_equal ["bench_blah"], c.runnable_methods + end + + def test_cls_bench_range + assert_equal [1, 10, 100, 1_000, 10_000], Minitest::Benchmark.bench_range + end + + def test_fit_exponential_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 * Math.exp(2.1 * n) } + + assert_fit :exponential, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_exponential_noisy + x = [1.0, 1.9, 2.6, 3.4, 5.0] + y = [12, 10, 8.2, 6.9, 5.9] + + # verified with Numbers and R + assert_fit :exponential, x, y, 0.95, 13.81148, -0.1820 + end + + def test_fit_logarithmic_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 + 2.1 * Math.log(n) } + + assert_fit :logarithmic, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_logarithmic_noisy + x = [1.0, 2.0, 3.0, 4.0, 5.0] + # Generated with + # y = x.map { |n| jitter = 0.999 + 0.002 * rand; (Math.log(n) ) * jitter } + y = [0.0, 0.6935, 1.0995, 1.3873, 1.6097] + + assert_fit :logarithmic, x, y, 0.95, 0, 1 + end + + def test_fit_constant_clean + x = (1..5).to_a + y = [5.0, 5.0, 5.0, 5.0, 5.0] + + assert_fit :linear, x, y, nil, 5.0, 0 + end + + def test_fit_constant_noisy + x = (1..5).to_a + y = [1.0, 1.2, 1.0, 0.8, 1.0] + + # verified in numbers and R + assert_fit :linear, x, y, nil, 1.12, -0.04 + end + + def test_fit_linear_clean + # y = m * x + b where m = 2.2, b = 3.1 + x = (1..5).to_a + y = x.map { |n| 2.2 * n + 3.1 } + + assert_fit :linear, x, y, 1.0, 3.1, 2.2 + end + + def test_fit_linear_noisy + x = [ 60, 61, 62, 63, 65] + y = [3.1, 3.6, 3.8, 4.0, 4.1] + + # verified in numbers and R + assert_fit :linear, x, y, 0.8315, -7.9635, 0.1878 + end + + def test_fit_power_clean + # y = A x ** B, where B = b and A = e ** a + # if, A = 1, B = 2, then + + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = [1.0, 4.0, 9.0, 16.0, 25.0] + + assert_fit :power, x, y, 1.0, 1.0, 2.0 + end + + def test_fit_power_noisy + # from www.engr.uidaho.edu/thompson/courses/ME330/lecture/least_squares.html + x = [10, 12, 15, 17, 20, 22, 25, 27, 30, 32, 35] + y = [95, 105, 125, 141, 173, 200, 253, 298, 385, 459, 602] + + # verified in numbers + assert_fit :power, x, y, 0.90, 2.6217, 1.4556 + + # income to % of households below income amount + # http://library.wolfram.com/infocenter/Conferences/6461/PowerLaws.nb + x = [15_000, 25_000, 35_000, 50_000, 75_000, 100_000] + y = [0.154, 0.283, 0.402, 0.55, 0.733, 0.843] + + # verified in numbers + assert_fit :power, x, y, 0.96, 3.119e-5, 0.8959 + end + + def assert_fit msg, x, y, fit, exp_a, exp_b + bench = Minitest::Benchmark.new :blah + + a, b, rr = bench.send "fit_#{msg}", x, y + + assert_operator rr, :>=, fit if fit + assert_in_delta exp_a, a + assert_in_delta exp_b, b + end +end + +describe "my class Bench" do + klass = self + it "should provide bench methods" do + klass.must_respond_to :bench + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb new file mode 100644 index 0000000..7128d5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_mock.rb @@ -0,0 +1,872 @@ +require "minitest/autorun" + +class TestMinitestMock < Minitest::Test + parallelize_me! + + def setup + @mock = Minitest::Mock.new.expect(:foo, nil) + @mock.expect(:meaning_of_life, 42) + end + + def test_create_stub_method + assert_nil @mock.foo + end + + def test_allow_return_value_specification + assert_equal 42, @mock.meaning_of_life + end + + def test_blow_up_if_not_called + @mock.foo + + util_verify_bad "expected meaning_of_life() => 42" + end + + def test_not_blow_up_if_everything_called + @mock.foo + @mock.meaning_of_life + + assert_mock @mock + end + + def test_allow_expectations_to_be_added_after_creation + @mock.expect(:bar, true) + assert @mock.bar + end + + def test_not_verify_if_new_expected_method_is_not_called + @mock.foo + @mock.meaning_of_life + @mock.expect(:bar, true) + + util_verify_bad "expected bar() => true" + end + + def test_blow_up_on_wrong_number_of_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises ArgumentError do + @mock.sum + end + + assert_equal "mocked method :sum expects 2 arguments, got 0", e.message + end + + def test_return_mock_does_not_raise + retval = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, retval) + mock.foo + + assert_mock mock + end + + def test_mock_args_does_not_raise + arg = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, nil, [arg]) + mock.foo(arg) + + assert_mock mock + end + + def test_set_expectation_on_special_methods + mock = Minitest::Mock.new + + mock.expect :object_id, "received object_id" + assert_equal "received object_id", mock.object_id + + mock.expect :respond_to_missing?, "received respond_to_missing?" + assert_equal "received respond_to_missing?", mock.respond_to_missing? + + mock.expect :===, "received ===" + assert_equal "received ===", mock.=== + + mock.expect :inspect, "received inspect" + assert_equal "received inspect", mock.inspect + + mock.expect :to_s, "received to_s" + assert_equal "received to_s", mock.to_s + + mock.expect :public_send, "received public_send" + assert_equal "received public_send", mock.public_send + + mock.expect :send, "received send" + assert_equal "received send", mock.send + + assert_mock mock + end + + def test_expectations_can_be_satisfied_via_send + @mock.send :foo + @mock.send :meaning_of_life + + assert_mock @mock + end + + def test_expectations_can_be_satisfied_via_public_send + skip "Doesn't run on 1.8" if RUBY_VERSION < "1.9" + + @mock.public_send :foo + @mock.public_send :meaning_of_life + + assert_mock @mock + end + + def test_blow_up_on_wrong_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises MockExpectationError do + @mock.sum(2, 4) + end + + exp = "mocked method :sum called with unexpected arguments [2, 4]" + assert_equal exp, e.message + end + + def test_expect_with_non_array_args + e = assert_raises ArgumentError do + @mock.expect :blah, 3, false + end + + assert_equal "args must be an array", e.message + end + + def test_respond_appropriately + assert @mock.respond_to?(:foo) + assert @mock.respond_to?(:foo, true) + assert @mock.respond_to?("foo") + assert !@mock.respond_to?(:bar) + end + + def test_no_method_error_on_unexpected_methods + e = assert_raises NoMethodError do + @mock.bar + end + + expected = "unmocked method :bar, expected one of [:foo, :meaning_of_life]" + + assert_equal expected, e.message + end + + def test_assign_per_mock_return_values + a = Minitest::Mock.new + b = Minitest::Mock.new + + a.expect(:foo, :a) + b.expect(:foo, :b) + + assert_equal :a, a.foo + assert_equal :b, b.foo + end + + def test_do_not_create_stub_method_on_new_mocks + a = Minitest::Mock.new + a.expect(:foo, :a) + + assert !Minitest::Mock.new.respond_to?(:foo) + end + + def test_mock_is_a_blank_slate + @mock.expect :kind_of?, true, [String] + @mock.expect :==, true, [1] + + assert @mock.kind_of?(String), "didn't mock :kind_of\?" + assert @mock == 1, "didn't mock :==" + end + + def test_verify_allows_called_args_to_be_loosely_specified + mock = Minitest::Mock.new + mock.expect :loose_expectation, true, [Integer] + mock.loose_expectation 1 + + assert_mock mock + end + + def test_verify_raises_with_strict_args + mock = Minitest::Mock.new + mock.expect :strict_expectation, true, [2] + + e = assert_raises MockExpectationError do + mock.strict_expectation 1 + end + + exp = "mocked method :strict_expectation called with unexpected arguments [1]" + assert_equal exp, e.message + end + + def test_method_missing_empty + mock = Minitest::Mock.new + + mock.expect :a, nil + + mock.a + + e = assert_raises MockExpectationError do + mock.a + end + + assert_equal "No more expects available for :a: []", e.message + end + + def test_same_method_expects_are_verified_when_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + mock.foo :baz + + assert_mock mock + end + + def test_same_method_expects_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:baz) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_same_method_expects_with_same_args_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:bar] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:bar) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_verify_passes_when_mock_block_returns_true + mock = Minitest::Mock.new + mock.expect :foo, nil do + true + end + + mock.foo + + assert_mock mock + end + + def test_mock_block_is_passed_function_params + arg1, arg2, arg3 = :bar, [1, 2, 3], { :a => "a" } + mock = Minitest::Mock.new + mock.expect :foo, nil do |a1, a2, a3| + a1 == arg1 && a2 == arg2 && a3 == arg3 + end + + mock.foo arg1, arg2, arg3 + + assert_mock mock + end + + def test_mock_block_is_passed_function_block + mock = Minitest::Mock.new + block = proc { "bar" } + mock.expect :foo, nil do |arg, &blk| + arg == "foo" && + blk == block + end + mock.foo "foo", &block + assert_mock mock + end + + def test_verify_fails_when_mock_block_returns_false + mock = Minitest::Mock.new + mock.expect :foo, nil do + false + end + + e = assert_raises(MockExpectationError) { mock.foo } + exp = "mocked method :foo failed block w/ []" + + assert_equal exp, e.message + end + + def test_mock_block_throws_if_args_passed + mock = Minitest::Mock.new + + e = assert_raises(ArgumentError) do + mock.expect :foo, nil, [:a, :b, :c] do + true + end + end + + exp = "args ignored when block given" + + assert_equal exp, e.message + end + + def test_mock_returns_retval_when_called_with_block + mock = Minitest::Mock.new + mock.expect(:foo, 32) do + true + end + + rs = mock.foo + + assert_equal rs, 32 + end + + def util_verify_bad exp + e = assert_raises MockExpectationError do + @mock.verify + end + + assert_equal exp, e.message + end + + def test_mock_called_via_send + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.send :foo + assert_mock mock + end + + def test_mock_called_via___send__ + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.__send__ :foo + assert_mock mock + end + + def test_mock_called_via_send_with_args + mock = Minitest::Mock.new + mock.expect(:foo, true, [1, 2, 3]) + + mock.send(:foo, 1, 2, 3) + assert_mock mock + end + +end + +require "minitest/metametameta" + +class TestMinitestStub < Minitest::Test + # Do not parallelize since we're calling stub on class methods + + def setup + super + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @assertion_count = 1 + end + + def teardown + super + assert_equal @assertion_count, @tc.assertions if self.passed? + end + + class Time + def self.now + 24 + end + end + + def assert_stub val_or_callable + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, val_or_callable do + @tc.assert_equal 42, Time.now + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_private_module_method + @assertion_count += 1 + + t0 = Time.now + + self.stub :sleep, nil do + @tc.assert_nil sleep(10) + end + + @tc.assert_operator Time.now - t0, :<=, 1 + end + + def test_stub_private_module_method_indirect + @assertion_count += 1 + + fail_clapper = Class.new do + def fail_clap + raise + :clap + end + end.new + + fail_clapper.stub :raise, nil do |safe_clapper| + @tc.assert_equal :clap, safe_clapper.fail_clap # either form works + @tc.assert_equal :clap, fail_clapper.fail_clap # yay closures + end + end + + def test_stub_public_module_method + Math.stub :log10, :stubbed do + @tc.assert_equal :stubbed, Math.log10(1000) + end + end + + def test_stub_value + assert_stub 42 + end + + def test_stub_block + assert_stub lambda { 42 } + end + + def test_stub_block_args + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, lambda { |n| n * 2 } do + @tc.assert_equal 42, Time.now(21) + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_callable + obj = Object.new + + def obj.call + 42 + end + + assert_stub obj + end + + def test_stub_yield_self + obj = "foo" + + val = obj.stub :to_s, "bar" do |s| + s.to_s + end + + @tc.assert_equal "bar", val + end + + def test_dynamic_method + @assertion_count = 2 + + dynamic = Class.new do + def self.respond_to? meth + meth == :found + end + + def self.method_missing meth, *args, &block + if meth == :found + false + else + super + end + end + end + + val = dynamic.stub(:found, true) do |s| + s.found + end + + @tc.assert_equal true, val + @tc.assert_equal false, dynamic.found + end + + def test_stub_NameError + e = @tc.assert_raises NameError do + Time.stub :nope_nope_nope, 42 do + # do nothing + end + end + + exp = /undefined method `nope_nope_nope' for( class)? `#{self.class}::Time'/ + assert_match exp, e.message + end + + def test_mock_with_yield + mock = Minitest::Mock.new + mock.expect(:write, true) do + true + end + rs = nil + + File.stub :open, true, mock do + File.open "foo.txt", "r" do |f| + rs = f.write + end + end + @tc.assert_equal true, rs + end + + alias test_stub_value__old test_stub_value # TODO: remove/rename + + ## Permutation Sets: + + # [:value, :lambda] + # [:*, :block, :block_call] + # [:**, :block_args] + # + # Where: + # + # :value = a normal value + # :lambda = callable or lambda + # :* = no block + # :block = normal block + # :block_call = :lambda invokes the block (N/A for :value) + # :** = no args + # :args = args passed to stub + + ## Permutations + + # [:call, :*, :**] =>5 callable+block FIX: CALL BOTH (bug) + # [:call, :*, :**] =>6 callable + + # [:lambda, :*, :**] => lambda result + + # [:lambda, :*, :args] => lambda result NO ARGS + + # [:lambda, :block, :**] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :**] =>6 lambda result + + # [:lambda, :block, :args] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :args] =>6 lambda result + # [:lambda, :block, :args] =>7 raise ArgumentError + + # [:lambda, :block_call, :**] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :**] =>6 lambda+block result + + # [:lambda, :block_call, :args] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :args] =>6 lambda+block result + + # [:value, :*, :**] => value + + # [:value, :*, :args] => value, ignore args + + # [:value, :block, :**] =>5 value, call block + # [:value, :block, :**] =>6 value + + # [:value, :block, :args] =>5 value, call block w/ args + # [:value, :block, :args] =>6 value, call block w/ args, deprecated + # [:value, :block, :args] =>7 raise ArgumentError + + # [:value, :block_call, :**] => N/A + + # [:value, :block_call, :args] => N/A + + class Bar + def call + puts "hi" + end + end + + class Foo + def self.blocking + yield + end + end + + class Thingy + def self.identity arg + arg + end + end + + def test_stub_callable_block_5 # from tenderlove + @assertion_count += 1 + Foo.stub5 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_callable_block_6 # from tenderlove + skip_stub6 + + @assertion_count += 1 + Foo.stub6 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_lambda + Thread.stub :new, lambda { 21+21 } do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_args + Thread.stub :new, lambda { 21+21 }, :wtf do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_block_5 + Thread.stub5 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_6 + skip_stub6 + + Thread.stub6 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_5 + @assertion_count += 1 + Thingy.stub5 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6 + skip_stub6 + + @assertion_count += 1 + Thingy.stub6 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6_2 + skip_stub6 + + @tc.assert_raises ArgumentError do + Thingy.stub6_2 :identity, lambda { |y| :__not_run__ }, :WTF? do + # doesn't matter + end + end + end + + def test_stub_lambda_block_call_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, lambda { |p, m, &blk| blk and blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f && f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6 :open, lambda { |p, m, &blk| blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5(:open, lambda { |p, m, &blk| blk and blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def test_stub_value + Thread.stub :new, 42 do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_args + Thread.stub :new, 42, :WTF? do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_5 + @assertion_count += 1 + Thread.stub5 :new, 42 do + result = Thread.new do + @tc.assert true + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_6 + skip_stub6 + + Thread.stub6 :new, 42 do + result = Thread.new do + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_args_5 + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_5__break_if_not_passed + e = @tc.assert_raises NoMethodError do + File.stub5 :open, :return_value do # intentionally bad setup w/ no args + File.open "foo.txt", "r" do |f| + f.write "woot" + end + end + end + exp = "undefined method `write' for nil:NilClass" + assert_equal exp, e.message + end + + def test_stub_value_block_args_6 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + assert_deprecated do + File.stub6 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal :value, result + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def assert_deprecated re = /deprecated/ + assert_output "", re do + yield + end + end + + def skip_stub6 + skip "not yet" unless STUB6 + end +end + +STUB6 = ENV["STUB6"] + +if STUB6 then + require "minitest/mock6" if STUB6 +else + class Object + alias stub5 stub + alias stub6 stub + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb new file mode 100644 index 0000000..6619d03 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_reporter.rb @@ -0,0 +1,299 @@ +require "minitest/autorun" +require "minitest/metametameta" + +class Runnable + def woot + assert true + end +end + +class TestMinitestReporter < MetaMetaMetaTestCase + + attr_accessor :r, :io + + def new_composite_reporter + reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(self.io) + reporter << Minitest::ProgressReporter.new(self.io) + + def reporter.first + reporters.first + end + + def reporter.results + first.results + end + + def reporter.count + first.count + end + + def reporter.assertions + first.assertions + end + + reporter + end + + def setup + self.io = StringIO.new("") + self.r = new_composite_reporter + end + + def error_test + unless defined? @et then + @et = Minitest::Test.new(:woot) + @et.failures << Minitest::UnexpectedError.new(begin + raise "no" + rescue => e + e + end) + @et = Minitest::Result.from @et + end + @et + end + + def fail_test + unless defined? @ft then + @ft = Minitest::Test.new(:woot) + @ft.failures << begin + raise Minitest::Assertion, "boo" + rescue Minitest::Assertion => e + e + end + @ft = Minitest::Result.from @ft + end + @ft + end + + def passing_test + @pt ||= Minitest::Result.from Minitest::Test.new(:woot) + end + + def skip_test + unless defined? @st then + @st = Minitest::Test.new(:woot) + @st.failures << begin + raise Minitest::Skip + rescue Minitest::Assertion => e + e + end + @st = Minitest::Result.from @st + end + @st + end + + def test_to_s + r.record passing_test + r.record fail_test + assert_match "woot", r.first.to_s + end + + def test_passed_eh_empty + assert_predicate r, :passed? + end + + def test_passed_eh_failure + r.results << fail_test + + refute_predicate r, :passed? + end + + SKIP_MSG = "\n\nYou have skipped tests. Run with --verbose for details." + + def test_passed_eh_error + r.start + + r.results << error_test + + refute_predicate r, :passed? + + r.report + + refute_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped + r.start + r.results << skip_test + assert r.passed? + + restore_env do + r.report + end + + assert_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped_verbose + r.first.options[:verbose] = true + + r.start + r.results << skip_test + assert r.passed? + r.report + + refute_match SKIP_MSG, io.string + end + + def test_start + r.start + + exp = "Run options: \n\n# Running:\n\n" + + assert_equal exp, io.string + end + + def test_record_pass + r.record passing_test + + assert_equal ".", io.string + assert_empty r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_fail + fail_test = self.fail_test + r.record fail_test + + assert_equal "F", io.string + assert_equal [fail_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_error + error_test = self.error_test + r.record error_test + + assert_equal "E", io.string + assert_equal [error_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_skip + skip_test = self.skip_test + r.record skip_test + + assert_equal "S", io.string + assert_equal [skip_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_report_empty + r.start + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_passing + r.start + r.record passing_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + . + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_failure + r.start + r.record fail_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + F + + Finished in 0.00 + + 1) Failure: + Minitest::Test#woot [FILE:LINE]: + boo + + 1 runs, 0 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_error + r.start + r.record error_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + E + + Finished in 0.00 + + 1) Error: + Minitest::Test#woot: + RuntimeError: no + FILE:LINE:in `error_test' + FILE:LINE:in `test_report_error' + + 1 runs, 0 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_skipped + r.start + r.record skip_test + + restore_env do + r.report + end + + exp = clean <<-EOM + Run options: + + # Running: + + S + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + assert_equal exp, normalize_output(io.string) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb new file mode 100644 index 0000000..fe4710d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_spec.rb @@ -0,0 +1,1061 @@ +# encoding: utf-8 +require "minitest/metametameta" +require "stringio" + +class MiniSpecA < Minitest::Spec; end +class MiniSpecB < Minitest::Test; extend Minitest::Spec::DSL; end +class MiniSpecC < MiniSpecB; end +class NamedExampleA < MiniSpecA; end +class NamedExampleB < MiniSpecB; end +class NamedExampleC < MiniSpecC; end +class ExampleA; end +class ExampleB < ExampleA; end + +describe Minitest::Spec do + # helps to deal with 2.4 deprecation of Fixnum for Integer + Int = 1.class + + # do not parallelize this suite... it just can"t handle it. + + def assert_triggered expected = "blah", klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises(klass) do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + msg.gsub!(/:0x[Xa-fA-F0-9]{4,}[ @].+?>/, ":0xXXXXXX@PATH>") + + if expected + @assertion_count += 1 + case expected + when String then + assert_equal expected, msg + when Regexp then + @assertion_count += 1 + assert_match expected, msg + else + flunk "Unknown: #{expected.inspect}" + end + end + end + + def assert_success spec + assert_equal true, spec + end + + before do + @assertion_count = 4 + end + + after do + _(self.assertions).must_equal @assertion_count if passed? and not skipped? + end + + it "needs to be able to catch a Minitest::Assertion exception" do + @assertion_count = 1 + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + end + + it "needs to check for file existence" do + @assertion_count = 3 + + assert_success _(__FILE__).path_must_exist + + assert_triggered "Expected path 'blah' to exist." do + _("blah").path_must_exist + end + end + + it "needs to check for file non-existence" do + @assertion_count = 3 + + assert_success _("blah").path_wont_exist + + assert_triggered "Expected path '#{__FILE__}' to not exist." do + _(__FILE__).path_wont_exist + end + end + + it "needs to be sensible about must_include order" do + @assertion_count += 3 # must_include is 2 assertions + + assert_success _([1, 2, 3]).must_include(2) + + assert_triggered "Expected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5, "msg" + end + end + + it "needs to be sensible about wont_include order" do + @assertion_count += 3 # wont_include is 2 assertions + + assert_success _([1, 2, 3]).wont_include(5) + + assert_triggered "Expected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2, "msg" + end + end + + it "needs to catch an expected exception" do + @assertion_count = 2 + + expect { raise "blah" }.must_raise RuntimeError + expect { raise Minitest::Assertion }.must_raise Minitest::Assertion + end + + it "needs to catch an unexpected exception" do + @assertion_count -= 2 # no positive + + msg = <<-EOM.gsub(/^ {6}/, "").chomp + [RuntimeError] exception expected, not + Class: + Message: <"woot"> + ---Backtrace--- + EOM + + assert_triggered msg do + expect { raise StandardError, "woot" }.must_raise RuntimeError + end + + assert_triggered "msg.\n#{msg}" do + expect { raise StandardError, "woot" }.must_raise RuntimeError, "msg" + end + end + + it "needs to ensure silence" do + @assertion_count -= 1 # no msg + @assertion_count += 2 # assert_output is 2 assertions + + assert_success expect {}.must_be_silent + + assert_triggered "In stdout.\nExpected: \"\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_be_silent + end + end + + it "needs to have all methods named well" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count = 2 + + methods = Minitest::Expectations.public_instance_methods.grep(/must|wont/) + methods.map!(&:to_s) if Symbol === methods.first + + musts, wonts = methods.sort.partition { |m| m =~ /must/ } + + expected_musts = %w[must_be + must_be_close_to + must_be_empty + must_be_instance_of + must_be_kind_of + must_be_nil + must_be_same_as + must_be_silent + must_be_within_delta + must_be_within_epsilon + must_equal + must_include + must_match + must_output + must_raise + must_respond_to + must_throw + path_must_exist] + + bad = %w[not raise throw send output be_silent] + + expected_wonts = expected_musts.map { |m| m.sub(/must/, "wont") }.sort + expected_wonts.reject! { |m| m =~ /wont_#{Regexp.union(*bad)}/ } + + _(musts).must_equal expected_musts + _(wonts).must_equal expected_wonts + end + + it "needs to raise if an expected exception is not raised" do + @assertion_count -= 2 # no positive test + + assert_triggered "RuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError + end + + assert_triggered "msg.\nRuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError, "msg" + end + end + + it "needs to verify binary messages" do + assert_success _(42).wont_be(:<, 24) + + assert_triggered "Expected 24 to not be < 42." do + _(24).wont_be :<, 42 + end + + assert_triggered "msg.\nExpected 24 to not be < 42." do + _(24).wont_be :<, 42, "msg" + end + end + + it "needs to verify emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _([]).must_be_empty + + assert_triggered "Expected [42] to be empty." do + _([42]).must_be_empty + end + + assert_triggered "msg.\nExpected [42] to be empty." do + _([42]).must_be_empty "msg" + end + end + + it "needs to verify equality" do + @assertion_count += 1 + + assert_success _(6 * 7).must_equal(42) + + assert_triggered "Expected: 42\n Actual: 54" do + _(6 * 9).must_equal 42 + end + + assert_triggered "msg.\nExpected: 42\n Actual: 54" do + _(6 * 9).must_equal 42, "msg" + end + + assert_triggered(/^-42\n\+#\n/) do + _(proc { 42 }).must_equal 42 # proc isn't called, so expectation fails + end + end + + it "needs to warn on equality with nil" do + @assertion_count += 1 # extra test + + out, err = capture_io do + assert_success _(nil).must_equal(nil) + end + + exp = "DEPRECATED: Use assert_nil if expecting nil from #{__FILE__}:#{__LINE__-3}. " \ + "This will fail in Minitest 6.\n" + exp = "" if $-w.nil? + + assert_empty out + assert_equal exp, err + end + + it "needs to verify floats outside a delta" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_close_to(42) + + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= 0.001." do + _(6 * 7.0).wont_be_close_to 42 + end + + x = "1.0e-05" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001, "msg" + end + end + + it "needs to verify floats outside an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_within_epsilon(42) + + x = "0.042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42 + end + + x = "0.00042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001, "msg" + end + end + + it "needs to verify floats within a delta" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_close_to(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.001." do + _(1.0 / 100).must_be_close_to 0.0 + end + + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001, "msg" + end + end + + it "needs to verify floats within an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_within_epsilon(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.0." do + _(1.0 / 100).must_be_within_epsilon 0.0 + end + + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001, "msg" + end + end + + it "needs to verify identity" do + assert_success _(1).must_be_same_as(1) + + assert_triggered "Expected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2, "msg" + end + end + + it "needs to verify inequality" do + @assertion_count += 2 + assert_success _(42).wont_equal(6 * 9) + assert_success _(proc {}).wont_equal(42) + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + + assert_triggered "msg.\nExpected 1 to not be equal to 1." do + _(1).wont_equal 1, "msg" + end + end + + it "needs to verify instances of a class" do + assert_success _(42).wont_be_instance_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be an instance of #{Int.name}." do + _(42).wont_be_instance_of Int, "msg" + end + end + + it "needs to verify kinds of a class" do + @assertion_count += 2 + + assert_success _(42).wont_be_kind_of(String) + assert_success _(proc {}).wont_be_kind_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int, "msg" + end + end + + it "needs to verify kinds of objects" do + @assertion_count += 3 # extra test + + assert_success _(6 * 7).must_be_kind_of(Int) + assert_success _(6 * 7).must_be_kind_of(Numeric) + + assert_triggered "Expected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String + end + + assert_triggered "msg.\nExpected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String, "msg" + end + + exp = "Expected # to be a kind of String, not Proc." + assert_triggered exp do + _(proc {}).must_be_kind_of String + end + end + + it "needs to verify mismatch" do + @assertion_count += 3 # match is 2 + + assert_success _("blah").wont_match(/\d+/) + + assert_triggered "Expected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/) + end + + assert_triggered "msg.\nExpected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/, "msg") + end + end + + it "needs to verify nil" do + assert_success _(nil).must_be_nil + + assert_triggered "Expected 42 to be nil." do + _(42).must_be_nil + end + + assert_triggered "msg.\nExpected 42 to be nil." do + _(42).must_be_nil "msg" + end + end + + it "needs to verify non-emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _(["some item"]).wont_be_empty + + assert_triggered "Expected [] to not be empty." do + _([]).wont_be_empty + end + + assert_triggered "msg.\nExpected [] to not be empty." do + _([]).wont_be_empty "msg" + end + end + + it "needs to verify non-identity" do + assert_success _(1).wont_be_same_as(2) + + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1, "msg" + end + end + + it "needs to verify non-nil" do + assert_success _(42).wont_be_nil + + assert_triggered "Expected nil to not be nil." do + _(nil).wont_be_nil + end + + assert_triggered "msg.\nExpected nil to not be nil." do + _(nil).wont_be_nil "msg" + end + end + + it "needs to verify objects not responding to a message" do + assert_success _("").wont_respond_to(:woot!) + + assert_triggered "Expected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s + end + + assert_triggered "msg.\nExpected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s, "msg" + end + end + + it "needs to verify output in stderr" do + @assertion_count -= 1 # no msg + + assert_success expect { $stderr.print "blah" }.must_output(nil, "blah") + + assert_triggered "In stderr.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { $stderr.print "xxx" }.must_output(nil, "blah") + end + end + + it "needs to verify output in stdout" do + @assertion_count -= 1 # no msg + + assert_success expect { print "blah" }.must_output("blah") + + assert_triggered "In stdout.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_output("blah") + end + end + + it "needs to verify regexp matches" do + @assertion_count += 3 # must_match is 2 assertions + + assert_success _("blah").must_match(/\w+/) + + assert_triggered "Expected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/) + end + + assert_triggered "msg.\nExpected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/, "msg") + end + end + + describe "expect" do + before do + @assertion_count -= 3 + end + + it "can use expect" do + _(1 + 1).must_equal 2 + end + + it "can use expect with a lambda" do + _ { raise "blah" }.must_raise RuntimeError + end + + it "can use expect in a thread" do + Thread.new { _(1 + 1).must_equal 2 }.join + end + + it "can NOT use must_equal in a thread. It must use expect in a thread" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + assert_raises RuntimeError do + capture_io do + Thread.new { (1 + 1).must_equal 2 }.join + end + end + end + + it "fails gracefully when expectation used outside of `it`" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 1 + + e = assert_raises RuntimeError do + capture_io do + Thread.new { # forces ctx to be nil + describe("woot") do + (1 + 1).must_equal 2 + end + }.join + end + end + + assert_equal "Calling #must_equal outside of test.", e.message + end + + it "deprecates expectation used without _" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + + # https://github.com/seattlerb/minitest/issues/837 + # https://github.com/rails/rails/pull/39304 + it "deprecates expectation used without _ with empty backtrace_filter" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + with_empty_backtrace_filter do + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + end + end + + it "needs to verify throw" do + @assertion_count += 2 # 2 extra tests + + assert_success expect { throw :blah }.must_throw(:blah) + + assert_triggered "Expected :blah to have been thrown." do + expect {}.must_throw :blah + end + + assert_triggered "Expected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah + end + + assert_triggered "msg.\nExpected :blah to have been thrown." do + expect {}.must_throw :blah, "msg" + end + + assert_triggered "msg.\nExpected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah, "msg" + end + end + + it "needs to verify types of objects" do + assert_success _(6 * 7).must_be_instance_of(Int) + + exp = "Expected 42 to be an instance of String, not #{Int.name}." + + assert_triggered exp do + _(6 * 7).must_be_instance_of String + end + + assert_triggered "msg.\n#{exp}" do + _(6 * 7).must_be_instance_of String, "msg" + end + end + + it "needs to verify using any (negative) predicate" do + @assertion_count -= 1 # doesn"t take a message + + assert_success _("blah").wont_be(:empty?) + + assert_triggered "Expected \"\" to not be empty?." do + _("").wont_be :empty? + end + end + + it "needs to verify using any binary operator" do + @assertion_count -= 1 # no msg + + assert_success _(41).must_be(:<, 42) + + assert_triggered "Expected 42 to be < 41." do + _(42).must_be(:<, 41) + end + end + + it "needs to verify using any predicate" do + @assertion_count -= 1 # no msg + + assert_success _("").must_be(:empty?) + + assert_triggered "Expected \"blah\" to be empty?." do + _("blah").must_be :empty? + end + end + + it "needs to verify using respond_to" do + assert_success _(42).must_respond_to(:+) + + assert_triggered "Expected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear + end + + assert_triggered "msg.\nExpected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear, "msg" + end + end +end + +describe Minitest::Spec, :let do + i_suck_and_my_tests_are_order_dependent! + + def _count + $let_count ||= 0 + end + + let :count do + $let_count += 1 + $let_count + end + + it "is evaluated once per example" do + _(_count).must_equal 0 + + _(count).must_equal 1 + _(count).must_equal 1 + + _(_count).must_equal 1 + end + + it "is REALLY evaluated once per example" do + _(_count).must_equal 1 + + _(count).must_equal 2 + _(count).must_equal 2 + + _(_count).must_equal 2 + end + + it 'raises an error if the name begins with "test"' do + expect { self.class.let(:test_value) { true } }.must_raise ArgumentError + end + + it "raises an error if the name shadows a normal instance method" do + expect { self.class.let(:message) { true } }.must_raise ArgumentError + end + + it "doesn't raise an error if it is just another let" do + v = proc do + describe :outer do + let(:bar) + describe :inner do + let(:bar) + end + end + :good + end.call + _(v).must_equal :good + end + + it "procs come after dont_flip" do + p = proc {} + assert_respond_to p, :call + _(p).must_respond_to :call + end +end + +describe Minitest::Spec, :subject do + attr_reader :subject_evaluation_count + + subject do + @subject_evaluation_count ||= 0 + @subject_evaluation_count += 1 + @subject_evaluation_count + end + + it "is evaluated once per example" do + _(subject).must_equal 1 + _(subject).must_equal 1 + _(subject_evaluation_count).must_equal 1 + end +end + +class TestMetaStatic < Minitest::Test + def test_children + Minitest::Spec.children.clear # prevents parallel run + + y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + it "top-level-it" do end + + z = describe "second thingy" do end + end + + assert_equal [x], Minitest::Spec.children + assert_equal [y, z], x.children + assert_equal [], y.children + assert_equal [], z.children + end + + def test_it_wont_remove_existing_child_test_methods + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do + it do + assert true + end + end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 1, inner.public_instance_methods.grep(/^test_/).count + end + + def test_it_wont_add_test_methods_to_children + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 0, inner.public_instance_methods.grep(/^test_/).count + end +end + +class TestMeta < MetaMetaMetaTestCase + # do not call parallelize_me! here because specs use register_spec_type globally + + def util_structure + y = z = nil + before_list = [] + after_list = [] + x = describe "top-level thingy" do + before { before_list << 1 } + after { after_list << 1 } + + it "top-level-it" do end + + y = describe "inner thingy" do + before { before_list << 2 } + after { after_list << 2 } + it "inner-it" do end + + z = describe "very inner thingy" do + before { before_list << 3 } + after { after_list << 3 } + it "inner-it" do end + + it { } # ignore me + specify { } # anonymous it + end + end + end + + return x, y, z, before_list, after_list + end + + def test_register_spec_type + original_types = Minitest::Spec::TYPES.dup + + assert_includes Minitest::Spec::TYPES, [//, Minitest::Spec] + + Minitest::Spec.register_spec_type(/woot/, TestMeta) + + p = lambda do |_| true end + Minitest::Spec.register_spec_type TestMeta, &p + + keys = Minitest::Spec::TYPES.map(&:first) + + assert_includes keys, /woot/ + assert_includes keys, p + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_spec_type + original_types = Minitest::Spec::TYPES.dup + + Minitest::Spec.register_spec_type(/A$/, MiniSpecA) + Minitest::Spec.register_spec_type MiniSpecB do |desc| + desc.superclass == ExampleA + end + Minitest::Spec.register_spec_type MiniSpecC do |_desc, *addl| + addl.include? :woot + end + + assert_equal MiniSpecA, Minitest::Spec.spec_type(ExampleA) + assert_equal MiniSpecB, Minitest::Spec.spec_type(ExampleB) + assert_equal MiniSpecC, Minitest::Spec.spec_type(ExampleB, :woot) + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_bug_dsl_expectations + spec_class = Class.new MiniSpecB do + it "should work" do + _(0).must_equal 0 + end + end + + test_name = spec_class.instance_methods.sort.grep(/test/).first + + spec = spec_class.new test_name + + result = spec.run + + assert spec.passed? + assert result.passed? + assert_equal 1, result.assertions + end + + def test_name + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + spec_c = describe ExampleB, :random_method, :addl_context do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + assert_equal "ExampleB::random_method::addl_context", spec_c.name + end + + def test_name2 + assert_equal "NamedExampleA", NamedExampleA.name + assert_equal "NamedExampleB", NamedExampleB.name + assert_equal "NamedExampleC", NamedExampleC.name + + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + end + + def test_structure + x, y, z, * = util_structure + + assert_equal "top-level thingy", x.to_s + assert_equal "top-level thingy::inner thingy", y.to_s + assert_equal "top-level thingy::inner thingy::very inner thingy", z.to_s + + assert_equal "top-level thingy", x.desc + assert_equal "inner thingy", y.desc + assert_equal "very inner thingy", z.desc + + top_methods = %w[setup teardown test_0001_top-level-it] + inner_methods1 = %w[setup teardown test_0001_inner-it] + inner_methods2 = inner_methods1 + + %w[test_0002_anonymous test_0003_anonymous] + + assert_equal top_methods, x.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods1, y.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods2, z.instance_methods(false).sort.map(&:to_s) + end + + def test_structure_postfix_it + z = nil + y = describe "outer" do + # NOT here, below the inner-describe! + # it "inner-it" do end + + z = describe "inner" do + it "inner-it" do end + end + + # defined AFTER inner describe means we'll try to wipe out the inner-it + it "inner-it" do end + end + + assert_equal %w[test_0001_inner-it], y.instance_methods(false).map(&:to_s) + assert_equal %w[test_0001_inner-it], z.instance_methods(false).map(&:to_s) + end + + def test_setup_teardown_behavior + _, _, z, before_list, after_list = util_structure + + @tu = z + + run_tu_with_fresh_reporter + + size = z.runnable_methods.size + assert_equal [1, 2, 3] * size, before_list + assert_equal [3, 2, 1] * size, after_list + end + + def test_describe_first_structure + x1 = x2 = y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + x1 = it "top level it" do end + x2 = it "не латинские &いった α, β, γ, δ, ε hello!!! world" do end + + z = describe "second thingy" do end + end + + test_methods = ["test_0001_top level it", + "test_0002_не латинские &いった α, β, γ, δ, ε hello!!! world", + ].sort + + assert_equal test_methods, [x1, x2] + assert_equal test_methods, x.instance_methods.grep(/^test/).map(&:to_s).sort + assert_equal [], y.instance_methods.grep(/^test/) + assert_equal [], z.instance_methods.grep(/^test/) + end + + def test_structure_subclasses + z = nil + x = Class.new Minitest::Spec do + def xyz; end + end + y = Class.new x do + z = describe("inner") { } + end + + assert_respond_to x.new(nil), "xyz" + assert_respond_to y.new(nil), "xyz" + assert_respond_to z.new(nil), "xyz" + end +end + +class TestSpecInTestCase < MetaMetaMetaTestCase + def setup + super + + Thread.current[:current_spec] = self + @tc = self + @assertion_count = 2 + end + + def assert_triggered expected, klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, "\1") + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + + assert_equal expected, msg + end + + def teardown + msg = "expected #{@assertion_count} assertions, not #{@tc.assertions}" + assert_equal @assertion_count, @tc.assertions, msg + end + + def test_expectation + @tc.assert_equal true, _(1).must_equal(1) + end + + def test_expectation_triggered + assert_triggered "Expected: 2\n Actual: 1" do + _(1).must_equal 2 + end + end + + include Minitest::Spec::DSL::InstanceMethods + + def test_expectation_with_a_message + assert_triggered "woot.\nExpected: 2\n Actual: 1" do + _(1).must_equal 2, "woot" + end + end +end + +class ValueMonadTest < Minitest::Test + attr_accessor :struct + + def setup + @struct = { :_ => "a", :value => "b", :expect => "c" } + def @struct.method_missing k # think openstruct + self[k] + end + end + + def test_value_monad_method + assert_equal "a", struct._ + end + + def test_value_monad_value_alias + assert_equal "b", struct.value + end + + def test_value_monad_expect_alias + assert_equal "c", struct.expect + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb new file mode 100644 index 0000000..89ba812 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.14.2/test/minitest/test_minitest_test.rb @@ -0,0 +1,1084 @@ +# encoding: UTF-8 + +require "pathname" +require "minitest/metametameta" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +class Minitest::Runnable + def whatever # faked for testing + assert true + end +end + +class TestMinitestUnit < MetaMetaMetaTestCase + parallelize_me! + + pwd = Pathname.new File.expand_path Dir.pwd + basedir = Pathname.new(File.expand_path "lib/minitest") + "mini" + basedir = basedir.relative_path_from(pwd).to_s + MINITEST_BASE_DIR = basedir[/\A\./] ? basedir : "./#{basedir}" + BT_MIDDLE = ["#{MINITEST_BASE_DIR}/test.rb:161:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:158:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:139:in `run'", + "#{MINITEST_BASE_DIR}/test.rb:106:in `run'"] + + def test_filter_backtrace + # this is a semi-lame mix of relative paths. + # I cheated by making the autotest parts not have ./ + bt = (["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'", + "#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29", + "test/test_autotest.rb:422"]) + bt = util_expand_bt bt + + ex = ["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'"] + ex = util_expand_bt ex + + fu = Minitest.filter_backtrace(bt) + + assert_equal ex, fu + end + + def test_filter_backtrace_all_unit + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29"]) + ex = bt.clone + fu = Minitest.filter_backtrace(bt) + assert_equal ex, fu + end + + def test_filter_backtrace_unit_starts + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/mini/test.rb:29", + "-e:1"]) + + bt = util_expand_bt bt + + ex = ["-e:1"] + fu = Minitest.filter_backtrace bt + assert_equal ex, fu + end + + def test_filter_backtrace__empty + with_empty_backtrace_filter do + bt = %w[first second third] + fu = Minitest.filter_backtrace bt.dup + assert_equal bt, fu + end + end + + def test_infectious_binary_encoding + @tu = Class.new FakeNamedTest do + def test_this_is_not_ascii_assertion + assert_equal "ЁЁЁ", "ёёё" + end + + def test_this_is_non_ascii_failure_message + fail 'ЁЁЁ'.force_encoding('ASCII-8BIT') + end + end + + expected = clean <<-EOM + EF + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_this_is_non_ascii_failure_message: + RuntimeError: ЁЁЁ + FILE:LINE:in `test_this_is_non_ascii_failure_message' + + 2) Failure: + FakeNamedTestXX#test_this_is_not_ascii_assertion [FILE:LINE]: + Expected: \"ЁЁЁ\" + Actual: \"ёёё\" + + 2 runs, 1 assertions, 1 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_passed_eh_teardown_good + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + assert_predicate test, :passed? + refute_predicate test, :skipped? + end + + def test_passed_eh_teardown_skipped + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; skip "bork"; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + assert_predicate test, :skipped? + end + + def test_passed_eh_teardown_flunked + test_class = Class.new FakeNamedTest do + def teardown; flunk; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + refute_predicate test, :skipped? + end + + def util_expand_bt bt + if RUBY_VERSION >= "1.9.0" then + bt.map { |f| (f =~ /^\./) ? File.expand_path(f) : f } + else + bt + end + end +end + +class TestMinitestUnitInherited < MetaMetaMetaTestCase + def with_overridden_include + Class.class_eval do + def inherited_with_hacks _klass + throw :inherited_hook + end + + alias inherited_without_hacks inherited + alias inherited inherited_with_hacks + alias IGNORE_ME! inherited # 1.8 bug. god I love venture bros + end + + yield + ensure + Class.class_eval do + alias inherited inherited_without_hacks + + undef_method :inherited_with_hacks + undef_method :inherited_without_hacks + end + + refute_respond_to Class, :inherited_with_hacks + refute_respond_to Class, :inherited_without_hacks + end + + def test_inherited_hook_plays_nice_with_others + with_overridden_include do + assert_throws :inherited_hook do + Class.new FakeNamedTest + end + end + end +end + +class TestMinitestRunner < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_class_runnables + @assertion_count = 0 + + tc = Class.new(Minitest::Test) + + assert_equal 1, Minitest::Test.runnables.size + assert_equal [tc], Minitest::Test.runnables + end + + def test_run_test + @tu = + Class.new FakeNamedTest do + attr_reader :foo + + def run + @foo = "hi mom!" + r = super + @foo = "okay" + + r + end + + def test_something + assert_equal "hi mom!", foo + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_error + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E. + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_error: + RuntimeError: unhandled exception + FILE:LINE:in \`test_error\' + + 2 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error_teardown + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def teardown + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_something: + RuntimeError: unhandled exception + FILE:LINE:in \`teardown\' + + 1 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_failing + setup_basic_tu + + expected = clean <<-EOM + F. + + Finished in 0.00 + + 1) Failure: + FakeNamedTestXX#test_failure [FILE:LINE]: + Expected false to be truthy. + + 2 runs, 2 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def setup_basic_tu + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_failure + assert false + end + end + end + + def test_run_failing_filtered + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--name /some|thing/ --seed 42] + end + + def assert_filtering filter, name, expected, a = false + args = %W[--#{filter} #{name} --seed 42] + + alpha = Class.new FakeNamedTest do + define_method :test_something do + assert a + end + end + Object.const_set(:Alpha, alpha) + + beta = Class.new FakeNamedTest do + define_method :test_something do + assert true + end + end + Object.const_set(:Beta, beta) + + @tus = [alpha, beta] + + assert_report expected, args + ensure + Object.send :remove_const, :Alpha + Object.send :remove_const, :Beta + end + + def test_run_filtered_including_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "/Beta#test_something/", expected + end + + def test_run_filtered_including_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "Beta#test_something", expected + end + + def test_run_filtered_string_method_only + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "test_something", expected, :pass + end + + def test_run_failing_excluded + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--exclude /failure/ --seed 42] + end + + def test_run_filtered_excluding_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "/Alpha#test_something/", expected + end + + def test_run_filtered_excluding_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "Alpha#test_something", expected + end + + def test_run_filtered_excluding_string_method_only + expected = clean <<-EOM + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "test_something", expected, :pass + end + + def test_run_passing + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_skip + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + S. + + Finished in 0.00 + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + restore_env do + assert_report expected + end + end + + def test_run_skip_verbose + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + FakeNamedTestXX#test_skip = 0.00 s = S + FakeNamedTestXX#test_something = 0.00 s = . + + Finished in 0.00 + + 1) Skipped: + FakeNamedTestXX#test_skip [FILE:LINE]: + not yet + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + EOM + + assert_report expected, %w[--seed 42 --verbose] + end + + def test_run_with_other_runner + @tu = + Class.new FakeNamedTest do + def self.run reporter, options = {} + @reporter = reporter + before_my_suite + super + end + + def self.name; "wacky!" end + + def self.before_my_suite + @reporter.io.puts "Running #{self.name} tests" + @@foo = 1 + end + + def test_something + assert_equal 1, @@foo + end + + def test_something_else + assert_equal 1, @@foo + end + end + + expected = clean <<-EOM + Running wacky! tests + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + require "monitor" + + class Latch + def initialize count = 1 + @count = count + @lock = Monitor.new + @cv = @lock.new_cond + end + + def release + @lock.synchronize do + @count -= 1 if @count > 0 + @cv.broadcast if @count == 0 + end + end + + def await + @lock.synchronize { @cv.wait_while { @count > 0 } } + end + end + + def test_run_parallel + test_count = 2 + test_latch = Latch.new test_count + wait_latch = Latch.new test_count + main_latch = Latch.new + + thread = Thread.new { + Thread.current.abort_on_exception = true + + # This latch waits until both test latches have been released. Both + # latches can't be released unless done in separate threads because + # `main_latch` keeps the test method from finishing. + test_latch.await + main_latch.release + } + + @tu = + Class.new FakeNamedTest do + parallelize_me! + + test_count.times do |i| + define_method :"test_wait_on_main_thread_#{i}" do + test_latch.release + + # This latch blocks until the "main thread" releases it. The main + # thread can't release this latch until both test latches have + # been released. This forces the latches to be released in separate + # threads. + main_latch.await + assert true + end + end + end + + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report(expected) do |reporter| + reporter.extend(Module.new { + define_method("record") do |result| + super(result) + wait_latch.release + end + + define_method("report") do + wait_latch.await + super() + end + }) + end + assert thread.join + end +end + +class TestMinitestUnitOrder < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_before_setup + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :setup do + super() + call_order << :setup + end + + define_method :before_setup do + call_order << :before_setup + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_setup, :setup] + assert_equal expected, call_order + end + + def test_after_teardown + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :teardown do + super() + call_order << :teardown + end + + define_method :after_teardown do + call_order << :after_teardown + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_all_teardowns_are_guaranteed_to_run + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :after_teardown do + super() + call_order << :after_teardown + raise + end + + define_method :teardown do + super() + call_order << :teardown + raise + end + + define_method :before_teardown do + super() + call_order << :before_teardown + raise + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_teardown, :teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_setup_and_teardown_survive_inheritance + call_order = [] + + @tu = Class.new FakeNamedTest do + define_method :setup do + call_order << :setup_method + end + + define_method :teardown do + call_order << :teardown_method + end + + define_method :test_something do + call_order << :test + end + end + + run_tu_with_fresh_reporter + + @tu = Class.new @tu + run_tu_with_fresh_reporter + + # Once for the parent class, once for the child + expected = [:setup_method, :test, :teardown_method] * 2 + + assert_equal expected, call_order + end +end + +class TestMinitestRunnable < Minitest::Test + def setup_marshal klass + tc = klass.new "whatever" + tc.assertions = 42 + tc.failures << "a failure" + + yield tc if block_given? + + def tc.setup + @blah = "blah" + end + tc.setup + + @tc = Minitest::Result.from tc + end + + def assert_marshal expected_ivars + new_tc = Marshal.load Marshal.dump @tc + + ivars = new_tc.instance_variables.map(&:to_s).sort + assert_equal expected_ivars, ivars + assert_equal "whatever", new_tc.name + assert_equal 42, new_tc.assertions + assert_equal ["a failure"], new_tc.failures + + yield new_tc if block_given? + end + + def test_marshal + setup_marshal Minitest::Runnable + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] + end + + def test_spec_marshal + klass = describe("whatever") { it("passes") { assert true } } + rm = klass.runnable_methods.first + + # Run the test + @tc = klass.new(rm).run + + assert_kind_of Minitest::Result, @tc + + # Pass it over the wire + over_the_wire = Marshal.load Marshal.dump @tc + + assert_equal @tc.time, over_the_wire.time + assert_equal @tc.name, over_the_wire.name + assert_equal @tc.assertions, over_the_wire.assertions + assert_equal @tc.failures, over_the_wire.failures + assert_equal @tc.klass, over_the_wire.klass + end +end + +class TestMinitestTest < TestMinitestRunnable + def test_dup + setup_marshal Minitest::Test do |tc| + tc.time = 3.14 + end + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] do |new_tc| + assert_in_epsilon 3.14, new_tc.time + end + end +end + +class TestMinitestUnitTestCase < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + def setup + super + + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @zomg = "zomg ponies!" + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") if @tc.passed? + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def test_runnable_methods_random + @assertion_count = 0 + + sample_test_case = Class.new FakeNamedTest do + def self.test_order; :random; end + def test_test1; assert "does not matter" end + def test_test2; assert "does not matter" end + def test_test3; assert "does not matter" end + end + + srand 42 + expected = %w[test_test2 test_test1 test_test3] + assert_equal expected, sample_test_case.runnable_methods + end + + def test_runnable_methods_sorted + @assertion_count = 0 + + sample_test_case = Class.new FakeNamedTest do + def self.test_order; :sorted end + def test_test3; assert "does not matter" end + def test_test2; assert "does not matter" end + def test_test1; assert "does not matter" end + end + + expected = %w[test_test1 test_test2 test_test3] + assert_equal expected, sample_test_case.runnable_methods + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_sets_test_order_alpha + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + + assert_equal :alpha, shitty_test_case.test_order + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_does_not_warn + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + def shitty_test_case.test_order; :lol end + + assert_silent do + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + end + end + + def test_autorun_does_not_affect_fork_success_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork {}) + assert_equal true, $?.success? + end + + def test_autorun_does_not_affect_fork_exit_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork { exit 42 }) + assert_equal 42, $?.exitstatus + end +end + +class TestMinitestGuard < Minitest::Test + parallelize_me! + + def test_mri_eh + assert self.class.mri? "ruby blah" + assert self.mri? "ruby blah" + end + + def test_jruby_eh + assert self.class.jruby? "java" + assert self.jruby? "java" + end + + def test_rubinius_eh + assert_output "", /DEPRECATED/ do + assert self.class.rubinius? "rbx" + end + assert_output "", /DEPRECATED/ do + assert self.rubinius? "rbx" + end + end + + def test_maglev_eh + assert_output "", /DEPRECATED/ do + assert self.class.maglev? "maglev" + end + assert_output "", /DEPRECATED/ do + assert self.maglev? "maglev" + end + end + + def test_osx_eh + assert self.class.osx? "darwin" + assert self.osx? "darwin" + end + + def test_windows_eh + assert self.class.windows? "mswin" + assert self.windows? "mswin" + end +end + +class TestMinitestUnitRecording < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def assert_run_record *expected, &block + @tu = Class.new FakeNamedTest, &block + + run_tu_with_fresh_reporter + + recorded = first_reporter.results.map(&:failures).flatten.map { |f| f.error.class } + + assert_equal expected, recorded + end + + def test_run_with_bogus_reporter + # https://github.com/seattlerb/minitest/issues/659 + # TODO: remove test for minitest 6 + @tu = Class.new FakeNamedTest do + def test_method + assert true + end + end + + bogus_reporter = Class.new do # doesn't subclass AbstractReporter + def start; @success = false; end + # def prerecord klass, name; end # doesn't define full API + def record result; @success = true; end + def report; end + def passed?; end + def results; end + def success?; @success; end + end.new + + self.reporter = Minitest::CompositeReporter.new + reporter << bogus_reporter + + Minitest::Runnable.runnables.delete @tu + + @tu.run reporter, {} + + assert_predicate bogus_reporter, :success? + end + + def test_record_passing + assert_run_record do + def test_method + assert true + end + end + end + + def test_record_failing + assert_run_record Minitest::Assertion do + def test_method + assert false + end + end + end + + def test_record_error + assert_run_record RuntimeError do + def test_method + raise "unhandled exception" + end + end + end + + def test_record_error_teardown + assert_run_record RuntimeError do + def test_method + assert true + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_record_error_in_test_and_teardown + assert_run_record AnError, RuntimeError do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_to_s_error_in_test_and_teardown + @tu = Class.new FakeNamedTest do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + + run_tu_with_fresh_reporter + + exp = clean " + Error: + FakeNamedTestXX#test_method: + AnError: AnError + FILE:LINE:in `test_method' + + Error: + FakeNamedTestXX#test_method: + RuntimeError: unhandled exception + FILE:LINE:in `teardown' + " + + assert_equal exp.strip, normalize_output(first_reporter.results.first.to_s).strip + end + + def test_record_skip + assert_run_record Minitest::Skip do + def test_method + skip "not yet" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/.autotest b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/.autotest new file mode 100644 index 0000000..b6fbce5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/.autotest @@ -0,0 +1,34 @@ +# -*- ruby -*- + +require 'autotest/restart' +require 'autotest/rcov' if ENV['RCOV'] + +Autotest.add_hook :initialize do |at| + at.testlib = 'minitest/autorun' + + bench_tests = %w(TestMinitestBenchmark) + mock_tests = %w(TestMinitestMock TestMinitestStub) + spec_tests = %w(TestMinitestReporter TestMetaStatic TestMeta + TestSpecInTestCase) + unit_tests = %w(TestMinitestGuard TestMinitestRunnable + TestMinitestRunner TestMinitestTest TestMinitestUnit + TestMinitestUnitInherited TestMinitestUnitOrder + TestMinitestUnitRecording TestMinitestUnitTestCase) + + { + bench_tests => "test/minitest/test_minitest_benchmark.rb", + mock_tests => "test/minitest/test_minitest_mock.rb", + spec_tests => "test/minitest/test_minitest_reporter.rb", + unit_tests => "test/minitest/test_minitest_unit.rb", + }.each do |klasses, file| + klasses.each do |klass| + at.extra_class_map[klass] = file + end + end + + at.add_exception 'coverage.info' + at.add_exception 'coverage' +end + +# require 'autotest/rcov' +# Autotest::RCov.command = 'rcov_info' diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/History.rdoc b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/History.rdoc new file mode 100644 index 0000000..252a651 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/History.rdoc @@ -0,0 +1,1430 @@ +=== 5.15.0 / 2021-12-14 + +* 1 major enhancement: + + * assert_throws returns the value returned, if any. (volmer) + +* 3 minor enhancements: + + * Added -S option to skip reporting of certain types of output + * Enable Ruby deprecation warnings by default. (casperisfine) + * Use Etc.nprocessors by default in order to maximize cpu usage. (tonytonyjan) + +* 6 bug fixes: + + * Close then unlink tempfiles on Windows. (nobu) + * Fixed #skip_until for windows paths. (MSP-Greg) + * Fixed a bunch of tests for jruby and windows. (MSP-Greg) + * Fixed marshalling of specs if they error. (tenderlove, jeremyevans, et al) + * Updated deprecation message for block expectations. (blowmage) + * Use Kernel.warn directly in expectations in case CUT defines their own warn. (firien) + +=== 5.14.4 / 2021-02-23 + +* 1 bug fix: + + * Fixed deprecation warning using stub with methods using keyword arguments. (Nakilon) + +=== 5.14.3 / 2021-01-05 + +* 1 bug fix: + + * Bumped require_ruby_version to < 4 (trunk = 3.1). + +=== 5.14.2 / 2020-08-31 + +* 1 bug fix: + + * Bumped ruby version to include 3.0 (trunk). + +=== 5.14.1 / 2020-05-15 + +* 3 minor enhancements: + + * Minitest.filter_backtrace returns original backtrace if filter comes back empty. + * Minitest::BacktraceFilter now returns entire backtrace if $MT_DEBUG set in env. + * Return true on a successful refute. (jusleg) + +* 1 bug fix: + + * Fixed expectation doco to not use global expectations. + +=== 5.14.0 / 2020-01-11 + +* 2 minor enhancements: + + * Block-assertions (eg assert_output) now error if raised inside the block. (casperisfine) + * Changed assert_raises to only catch Assertion since that covers Skip and friends. + +* 3 bug fixes: + + * Added example for value wrapper with block to Expectations module. (stomar) + * Fixed use of must/wont_be_within_delta on Expectation instance. (stomar) + * Renamed UnexpectedError#exception to #error to avoid problems with reraising. (casperisfine) + +=== 5.13.0 / 2019-10-29 + +* 9 minor enhancements: + + * Added Minitest::Guard#osx? + * Added examples to documentation for assert_raises. (lxxxvi) + * Added expectations #path_must_exist and #path_wont_exist. Not thrilled with the names. + * Added fail_after(year, month, day, msg) to allow time-bombing after a deadline. + * Added skip_until(year, month, day, msg) to allow deferring until a deadline. + * Deprecated Minitest::Guard#maglev? + * Deprecated Minitest::Guard#rubinius? + * Finally added assert_path_exists and refute_path_exists. (deivid-rodriguez) + * Refactored and pulled Assertions#things_to_diff out of #diff. (BurdetteLamar) + +* 3 bug fixes: + + * Fix autorun bug that affects fork exit status in tests. (dylanahsmith/jhawthorn) + * Improved documentation for _/value/expect, especially for blocks. (svoop) + * Support new Proc#to_s format. (ko1) + +=== 5.12.2 / 2019-09-28 + +* 1 bug fix: + + * After chatting w/ @y-yagi and others, decided to lower support to include ruby 2.2. + +=== 5.12.1 / 2019-09-28 + +* 1 minor enhancement: + + * Added documentation for Reporter classes. (sshaw) + +* 3 bug fixes: + + * Avoid using 'match?' to support older ruby versions. (y-yagi) + * Fixed broken link to reference on goodness-of-fit testing. (havenwood) + * Update requirements in readme and Rakefile/hoe spec. + +=== 5.12.0 / 2019-09-22 + +* 8 minor enhancements: + + * Added a descriptive error if assert_output or assert_raises called without a block. (okuramasafumi) + * Changed mu_pp_for_diff to make having both \n and \\n easier to debug. + * Deprecated $N for specifying number of parallel test runners. Use MT_CPU. + * Deprecated use of global expectations. To be removed from MT6. + * Extended Assertions#mu_pp to encoding validity output for strings to improve diffs. + * Extended Assertions#mu_pp to output encoding and validity if invalid to improve diffs. + * Extended Assertions#mu_pp_for_diff to make escaped newlines more obvious in diffs. + * Fail gracefully when expectation used outside of `it`. + +* 3 bug fixes: + + * Check `option[:filter]` klass before match. Fixes 2.6 warning. (y-yagi) + * Fixed Assertions#diff from recalculating if set to nil + * Fixed spec section of readme to not use deprecated global expectations. (CheezItMan) + +=== 5.11.3 / 2018-01-26 + +* 1 bug fix: + + * Pushed #error? up to Reportable module. (composerinteralia) + +=== 5.11.2 / 2018-01-25 + +* 1 minor enhancement: + + * Reversed Test < Result. Back to < Runnable and using Reportable for shared code. + +* 2 bug fixes: + + * Fixed Result#location for instances of Test. (alexisbernard) + * Fixed deprecation message for Runnable#marshal_dump. (y-yagi) + +=== 5.11.1 / 2018-01-02 + +* 1 bug fix: + + * Fixed Result (a superclass of Test) overriding Runnable's name accessors. (y-yagi, MSP-Greg) + +=== 5.11.0 / 2018-01-01 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 7 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Added deprecation warning for Runnable#marshal_dump. + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.11.0b1 / 2017-12-20 + +* 2 major enhancements: + + * Added Minitest::Result and Minitest::Result.from(runnable). + * Changed Minitest::Test to subclass Result and refactored methods up. + +* 6 minor enhancements: + + * Added --no-plugins and MT_NO_PLUGINS to bypass MT plugin autoloading. Helps with bad actors installed globally. + * Added bench_performance_{logarithmic,power} for spec-style benchmarks. (rickhull) + * Minitest.run_one_method now checks for instance of Result, not exact same class. + * Minitest::Test.run returns a Result version of self, not self. + * ProgressReporter#prerecord now explicitly prints klass.name. Allows for fakers. + * Removed Runnable.marshal_dump/load. + +* 4 bug fixes: + + * Object.stub no longer calls the passed block if stubbed with a callable. + * Object.stub now passes blocks down to the callable result. + * Pushed Minitest::Test#time & #time_it up to Runnable. + * Test nil equality directly in assert_equal. Fixes #679. (voxik) + +=== 5.10.3 / 2017-07-21 + +* 1 minor enhancement: + + * Extended documentation for Mock#expect for multiple calls to mock object. (insti) + +* 2 bug fixes: + + * Finished off missing doco. + * Fixed verbose output on parallelize_me! classes. (chanks) + +=== 5.10.2 / 2017-05-09 + +* 1 minor enhancement: + + * Added suggestion in minitest/hell to install minitest/proveit. + +* 7 bug fixes: + + * Expand MT6 to Minitest 6. (xaviershay) + * Fixed location of assert_send deprecation. (rab) + * Fixed location of nil assert_equal deprecation to work with expectations. (jeremyevans) + * Fixed minitest/hell to use parallelize_me! (azul) + * Made deprecation use warn so -W0 will silence it. + * Workaround for rdoc nodoc generation bug that totally f'd up minitest doco. (Paxa) + * Write aggregated_results directly to the IO object to avoid mixed encoding errors. (tenderlove) + +=== 5.10.1 / 2016-12-01 + +* 1 bug fix: + + * Added a hack/kludge to deal with missing #prerecord on reporters that aren't properly subclassing AbstractReporter (I'm looking at you minitest-reporters) + +=== 5.10.0 / 2016-11-30 + +* 1 major enhancement: + + * Deprecated ruby 1.8, 1.9, possibly 2.0, assert_send, & old MiniTest namespace. + +* 3 minor enhancements: + + * Warn if assert_equal expects a nil. This will fail in minitest 6+. (tenderlove) + * Added AbstractReporter#prerecord and extended ProgressReporter and CompositeReporter to use it. + * Minor optimization: remove runnables with no runnable methods before run. + +* 3 bug fixes: + + * Fix assert_throw rescuing any NameError and ArgumentError. (waldyr) + * Clean up (most of the) last remaining vestiges of minitest/unit. + * 2.4: removed deprecation warnings when referring to Fixnum. + +=== 5.9.1 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.9.0 / 2016-05-16 + +* 8 minor enhancements: + + * Added Minitest.info_signal accessors to customize signal for test run info. (nate) + * Added assert_mock to make it more clear that you're testing w/ them. + * Added negative filter by test name. (utilum) + * Added warning to README that 1.8 and 1.9 support will be dropped in minitest 6. + * Automatically activate minitest/hell if $MT_HELL is defined. + * Improved default error messages for assert and refute. (bhenderson) + * minitest/hell now tries to require minitest/proveit + * mu_pp for strings prints out non-standard encodings to improve assert_equal diffs. + +* 1 bug fix: + + * Removed Interrupt from PASSTHROUGH_EXCEPTIONS (already handled). (waldyr) + +=== 5.8.5 / 2016-09-25 + +* 2 bug fixes: + + * Re-release to refresh gem certificate signing. ugh. + * Fixed hoe/minitest to not augment load path if we're actually testing minitest. + +=== 5.8.4 / 2016-01-21 + +* 1 bug fix: + + * Allow Minitest::Assertion to pass through assert_raises so inner failures are dealt with first. + +=== 5.8.3 / 2015-11-17 + +* 1 minor enhancement: + + * Added extra note about mocks and threads to readme. (zamith) + +* 1 bug fix: + + * Fixed bug in Mock#verify. (pithub/zamith) + +=== 5.8.2 / 2015-10-26 + +* 1 bug fix: + + * Fixed using parallelize_me! and capture_io (or any locking io). (arlt/tenderlove) + +=== 5.8.1 / 2015-09-23 + +* 1 minor enhancement: + + * Refactor assert_raises to be cleaner and to pass SystemExit and SignalException. (bhenderson) + +=== 5.8.0 / 2015-08-06 + +* 2 minor enhancements: + + * Add optional delegation mechanism to extend object with a mock. (zamith) + * Return early if there are no filtered methods. (jeremyevans) + +* 1 bug fix: + + * Don't extend io with pride if io is not a tty. (toy) + +=== 5.7.0 / 2015-05-27 + +* 1 major enhancement: + + * assert_raises now matches subclasses of the expected exception types. (jeremyevans) + +* 3 minor enhancements: + + * Added :block type for minitest/spec's #infect_an_assertion. (jeremyevans) + * Inline verification error messages in minitest/mock for GC performance. (zamith) + * assert_raises defaults to RuntimeError if not specified. (jeremyevans) + +* 4 bug fixes: + + * Added 'class' to minitest/mock's overridden_methods list. (zamith) + * Added file/line to infect_an_assertion's class_eval call. (jeremyevans) + * Cleared UnexpectedError's mesg w/ generic string. + * Fixed non-proc-oriented expectations when used on proc target. (jeremyevans) + +=== 5.6.1 / 2015-04-27 + +* 2 bug fixes: + + * Added Minitest.clock_time and switched all Time.now to it. (tenderlove) + * Moved Minitest::Expectations#_ into Minitest::Spec::DSL. + +=== 5.6.0 / 2015-04-13 + +* 4 major enhancements: + + * Added Minitest::Expectation value monad. + * Added Minitest::Expectations#_ that returns an Expectation. Aliased to value. + * All expectations are added to Minitest::Expectation. + * At some point, the methods on Object will be deprecated and then removed. + +* 4 minor enhancements: + + * Added a note about bundle exec pitfall in ruby 2.2+. (searls) + * Lazily start the parallel executor. (tenderlove) + * Make mocks more debugger-friendly (edward) + * Print out the current test run on interrupt. (riffraff) + +* 3 bug fixes: + + * Fix failing test under Windows. (kimhmadsen) + * Record mocked calls before they happen so mocks can raise exceptions easier (tho I'm not a fan). (corecode) + * Tried to clarify mocks vs stubs terminology better. (kkirsche) + +=== 5.5.1 / 2015-01-09 + +* 1 bug fix: + + * Fixed doco problems. (zzak) + +=== 5.5.0 / 2014-12-12 // mri 2.2.0 (as a real gem) + +* 1 minor enhancement: + + * Allow seed to be given via ENV for rake test loader sadness: eg rake SEED=42. + +=== 5.4.3 / 2014-11-11 + +* 2 bug fixes: + + * Clarified requirements for ruby are now 1.8.7 or better. + * Force encode error output in case mal-encoded exception is raised. (jasonrclark) + +=== 5.4.2 / 2014-09-26 + +* 2 minor enhancements: + + * Extract teardown method list. + * Thanks to minitest-gcstats got a 5-10% speedup via reduced GC! + +=== 5.4.1 / 2014-08-28 + +* 1 bug fix: + + * Fixed specs hidden by nesting/ordering bug (blowmage/apotonick) + +=== 5.4.0 / 2014-07-07 + +* 2 minor enhancements: + + * Kernel#describe extended to splat additional_desc. + * Spec#spec_type extended to take a splat of additional items, passed to matcher procs. + +* 1 bug fix: + + * minitest/spec should require minitest/test, not minitest/unit. (doudou) + +=== 5.3.5 / 2014-06-17 + +* 1 minor enhancement: + + * Spit and polish (mostly spit). + +=== 5.3.4 / 2014-05-15 + +* 1 minor enhancement: + + * Test classes are randomized before running. (judofyr) + +=== 5.3.3 / 2014-04-14 + +* 1 bug fix: + + * Fixed using expectations w/ DSL in Test class w/o describe. (blowmage+others) + +=== 5.3.2 / 2014-04-02 + +* 1 bug fix: + + * Fixed doco on Assertions.assertions. (xaviershay) + +=== 5.3.1 / 2014-03-14 + +* 1 minor enhancement: + + * Modified verbage on bad 'let' names to be more helpful. (Archytaus) + +* 1 bug fix: + + * Fixed 2 cases still using MiniTest. (mikesea) + +=== 5.3.0 / 2014-02-25 + +* 1 minor enhancement: + + * Mocked methods can take a block to verify state. Seattle.rb 12 bday present from ernie! Thanks!! + +=== 5.2.3 / 2014-02-10 + +* 1 bug fix: + + * Fixed Spec#let check to allow overriding of other lets. (mvz) + +=== 5.2.2 / 2014-01-22 + +* 1 minor enhancement: + + * Spec#let raises ArgumentError if you override _any_ instance method (except subject). (rynr) + +* 1 bug fix: + + * Fixed up benchmark spec doco and added a test to demonstrate. (bhenderson) + +=== 5.2.1 / 2014-01-07 + +* 1 bug fix: + + * Properly deal with horrible mix of runtime load errors + other at_exit handlers. (dougo/chqr) + +=== 5.2.0 / 2013-12-13 + +* 1 minor enhancement: + + * Change expectations to allow calling most on procs (but not calling the proc). (bhenderson+others) + +=== 5.1.0 / 2013-12-05 + +* 1 minor enhancement: + + * Use a Queue for scheduling parallel tests. (tenderlove) + +* 1 bug fix: + + * Fixed misspelling in doco. (amatsuda) + +=== 5.0.8 / 2013-09-20 + +* 1 bug fix: + + * Fixed siginfo handler by rearranging reporters and fixing to_s. (tenderlove) + +=== 5.0.7 / 2013-09-05 + +* 2 minor enhancements: + + * Added clarification about the use of thread local variables in expectations. (jemc) + * Added extra message about skipped tests, if any. Disable globally with $MT_NO_SKIP_MSG. + +* 2 bug fixes: + + * Only require minitest, not minitest/autorun in pride_plugin. (judofyr) + * Require rubygems in load_plugins in case you're not using minitest/autorun. + +=== 5.0.6 / 2013-06-28 + +* 3 minor enhancements: + + * Allow stub to pass args to blocks. (swindsor) + * Improved warning message about minitest/autorun to address 1.9's minitest/autorun. + * Made minitest/test require minitest as needed. For lib writers. (erikh) + +* 1 bug fix: + + * Fixed missing require in minitest/test. (erikh) + +=== 4.7.5 / 2013-06-21 // mri 2.1.1 + +* 2 bug fixes: + + * Fix Spec#describe_stack to be thread local. + * Fix multithreaded test failures by defining Time local to mock test namespace + +=== 5.0.5 / 2013-06-20 + +* 6 bug fixes: + + * DOH! Fixed the rest of the new casing on Minitest. (splattael) + * Fixed typo on minitest/mock rdoc. (mrgilman/guiceolin) + * Make Spec::DSL.describe_stack thread local to avoid failing on my own tests. + * Make a fake Time.now local to the tests so they won't interfere with real reporter timings. + * Make everything mockable by wrapping all 'special' methods in a smarter wrapper. (bestie) + * Raise ArgumentError if let name starts with 'test'. (johnmaxwell) + +=== 5.0.4 / 2013-06-07 + +* 5 minor enhancements: + + * Added AbstractReporter, defining required Reporter API to quack properly. + * Added doco for writing reporters. + * Refactored Reporter into ProgressReporter and SummaryReporter. (idea: phiggins, code:me+scotch) + * Refactored SummaryReporter pushing up to StatisticsReporter. (phiggins) + * Removed Reporter#run_and_report... cleaner, but doesn't "fit" in the API. + +=== 5.0.3 / 2013-05-29 + +* 4 minor enhancements: + + * Added Runnable.with_info_handler and Runnable.on_signal. + * Moved io.sync restore to Reporter#run_and_report. + * Refactored inner loop of Reporter#report to #to_s. Callable for status updates. + * Restored MT4's mid-run report (^t). (tenderlove). + +=== 5.0.2 / 2013-05-20 + +* 3 bug fixes: + + * Gem.find_files is smarter than I remember... cause I wrote it that way. *sigh* I'm getting old. + * Pride wasn't doing puts through its #io. (tmiller/tenderlove) + * Replaced Runnable#dup and Test#dup with marshal_dump/load. Too many problems cropping up on untested rails code. (tenderlove/rubys) + +=== 5.0.1 / 2013-05-14 + +* 2 bug fixes: + + * Documented Assertions' need for @assertions to be defined by the includer. + * Only load one plugin version per name. Tries for latest. + +=== 5.0.0 / 2013-05-10 + +Oh god... here we go... + +Minitest 5: + +* 4 deaths in the family: + + * MiniTest.runner is dead. No more manager objects. + * MiniTest::Unit#record is dead. Use a Reporter instance instead. + * MiniTest::Unit._run_* is dead. Runnable things are responsible for their own runs. + * MiniTest::Unit.output is dead. No more centralized IO. + +* 12 major (oft incompatible) changes: + + * Renamed MiniTest to Minitest. Your pinkies will thank me. (aliased to MiniTest) + * Removed MiniTest::Unit entirely. No more manager objects. + * Added Minitest::Runnable. Everything minitest can run subclasses this. + * Renamed MiniTest::Unit::TestCase to Minitest::Test (subclassing Runnable). + * Added Minitest::Benchmark. + * Your benchmarks need to move to their own subclass. + * Benchmarks using the spec DSL have to have "Bench" somewhere in their describe. + * MiniTest::Unit.after_tests moved to Minitest.after_run + * MiniTest::Unit.autorun is now Minitest.autorun. Just require minitest/autorun pls. + * Removed ParallelEach#grep since it isn't used anywhere. + * Renamed Runnable#__name__ to Runnable#name (but uses @NAME internally). + * Runnable#run needs to return self. Allows for swapping of results as needed. + +* 8 minor moves: + + * Moved Assertions module to minitest/assertions.rb + * Moved Expectations module to minitest/expectations.rb + * Moved Test to minitest/test.rb + * Moved everything else in minitest/unit.rb to minitest.rb + * minitest/unit.rb is now just a small (user-test only) compatibility layer. + * Moved most of minitest/pride into minitest/pride_plugin. + * minitest/pride now just activates pride. + * Moved ParallelEach under Minitest. + +* 9 additions: + + * Added a plugin system that can extend command-line options. + * Added Minitest.extensions. + * Added Minitest.reporter (only available during startup). + * Added Minitest.run(args). This is the very top of any Minitest run. + * Added Minitest::Reporter. Everything minitest can report goes through here. + * Minitest.reporter is a composite so you can add your own. + * Added Minitest::CompositeReporter. Much easier to extend with your own reporters. + * Added UnexpectedError, an Assertion subclass, to wrap up errors. + * Minitest::Test#run is now freakin' beautiful. 47 -> 17 loc + +* 11 other: + + * Removed Object.infect_with_assertions (it was already dead code). + * Runnables are responsible for knowing their result_code (eg "." or "F"). + * Minitest.autorun now returns boolean, not exit code. + * Added FAQ entry for extending via modules. (phiggins) + * Implement Runnable#dup to cleanse state back to test results. Helps with serialization. pair:tenderlove + * Moved ParallelEach under Minitest. + * Runnable#run needs to return self. Allows for swapping of results as needed. + * Minitest.init_plugins passes down options. + * Minitest.load_plugins only loads once. + * Fixed minitest/pride to work with rake test loader again. (tmiller) + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + +* 5 voodoo: + + * Removed mutex from minitest.rb (phiggins) + * Removed mutex from test.rb (phiggins) + * Removed Minitest::Reporter.synchronize (phiggins) + * Removed Minitest::Test.synchronize (phiggins) + * Upon loading minitest/parallel_each, record, capture_io and capture_subprocess_io are doped with synchronization code. (phiggins) + +=== 4.7.4 / 2013-05-01 + +This is probably the last release of the 4.x series. It will be merged +to ruby and will be put into maintenance mode there. + +I'm not set in stone on this, but at this point further development of +minitest (5+) will be gem-only. It is just too hard to work w/in +ruby-core w/ test-unit compatibility holding minitest development +back. + +* 2 minor enhancements: + + * Added count/size to ParallelEach to fix use w/in stdlib's test/unit. :( (btaitelb) + * Allow disabling of info_signal handler in runner. (erikh) + +=== 4.7.3 / 2013-04-20 + +* 1 bug fix: + + * Reverted stubbing of module methods change. Stub the user, not the impl. (ab9/tyabe) + +=== 4.7.2 / 2013-04-18 + +* 2 bug fixes: + + * Fixed inconsistency in refute_in_delta/epsilon. I double negatived my logic. (nettsundere) + * Fixed stubbing of module methods (eg Kernel#sleep). (steveklabnik) + +=== 4.7.1 / 2013-04-09 + +* 1 minor enhancement: + + * Added FAQ section to README + +* 1 bug fix: + + * Fixed bug where guard runs tests bypassing minitest/autorun and an ivar isn't set right. (darrencauthon) + +=== 4.7.0 / 2013-03-18 + +* 1 major enhancement: + + * Refactored MiniTest::Spec into MiniTest::Spec::DSL. + +* 1 bug fix: + + * Removed $DEBUG handler that detected when test/unit and minitest were both loaded. (tenderlove) + +=== 4.6.2 / 2013-02-27 + +* 1 minor enhancement: + + * Change error output to match Class#method, making it easier to use -n filter. + +=== 4.6.1 / 2013-02-14 + +* 1 bug fix: + + * Fixed an option processing bug caused by test/unit's irresponsibly convoluted code. (floehopper) + +=== 4.6.0 / 2013-02-07 + +* 3 major enhancements: + + * Removed ::reset_setup_teardown_hooks + * Removed the long deprecated assert_block + * Removed the long deprecated lifecycle hooks: add_(setup|teardown)_hook + +* 1 minor enhancement: + + * Allow filtering tests by suite name as well as test name. (lazyatom) + +* 2 bug fixes: + + * Made hex handling (eg object_ids) in mu_pp_for_diff more specific. (maxim) + * nodoc top-level module. (zzak) + +=== 4.5.0 / 2013-01-22 + +* 1 major enhancement: + + * Rearranged minitest/unit.rb so NO parallelization code is loaded/used until you opt-in. + +* 4 minor enhancements: + + * Added TestCase#skipped? for teardown guards + * Added maglev? guard + * Document that record can be sent twice if teardown fails or errors (randycoulman) + * Errors in teardown are now recorded. (randycoulman) + +* 3 bug fixes: + + * Added hacks and skips to get clean test runs on maglev + * Modified float tests for maglev float output differences. Not sure this is right. Not sure I care. + * Test for existance of diff.exe instead of assuming they have devkit. (blowmage/Cumbayah) + +=== 4.4.0 / 2013-01-07 + +* 3 minor enhancements: + + * Added fit_logarithic and assert_performance_logarithmic. (ktheory) + * Merge processed options so others can mess with defaults. (tenderlove) + * TestCase#message can now take another proc to defer custom message cost. (ordinaryzelig/bhenderson) + +* 1 bug fix: + + * TestCase#passed? now true if test is skipped. (qanhd) + +=== 4.3.3 / 2012-12-06 + +* 1 bug fix: + + * Updated information about stubbing. (daviddavis) + +=== 4.3.2 / 2012-11-27 // mri 2.0.0 + +* 1 minor enhancement: + + * Improved assert_equals error message to point you at #== of member objects. (kcurtin) + +=== 4.3.1 / 2012-11-23 + +* 1 bug fix: + + * Moved test_children to serial testcase to prevent random failures. + +=== 4.3.0 / 2012-11-17 + +* 4 minor enhancements: + + * Allow #autorun to run even if loaded with other test libs that call exit. (sunaku) + * Do not include Expectations in Object if $MT_NO_EXPECTATIONS is set (experimental?) + * Gave some much needed love to assert_raises. + * Mock#expect can take a block to custom-validate args. (gmoothart) + +=== 4.2.0 / 2012-11-02 + +* 4 major enhancements: + + * Added minitest/hell - run all your tests through the ringer! + * Added support for :parallel test_order to run test cases in parallel. + * Removed last_error and refactored runner code to be threadsafe. + * _run_suites now runs suites in parallel if they opt-in. + +* 4 minor enhancements: + + * Added TestCase#synchronize + * Added TestCase.make_my_diffs_pretty! + * Added TestCase.parallelize_me! + * Lock on capture_io for thread safety (tenderlove) + +=== 4.1.0 / 2012-10-05 + +* 2 minor enhancements: + + * Added skip example to readme. (dissolved) + * Extracted backtrace filter to object. (tenderlove) + +* 1 bug fix: + + * OMG I'm so dumb. Fixed access to deprecated hook class methods. I hate ruby modules. (route) + +=== 4.0.0 / 2012-09-28 + +* 1 major enhancement: + + * The names of a privately-used undocumented constants are Super Important™. + +* 1 minor enhancement: + + * Support stubbing methods that would be handled via method_missing. (jhsu) + +* 3 bug fixes: + + * Add include_private param to MiniTest::Mock#respond_to? (rf-) + * Fixed use of minitest/pride with --help. (zw963) + * Made 'No visible difference.' message more clear. (ckrailo) + +=== 3.5.0 / 2012-09-21 + +* 1 minor enhancement: + + * Added #capture_subprocess_io. (route) + +=== 3.4.0 / 2012-09-05 + +* 2 minor enhancements: + + * assert_output can now take regexps for expected values. (suggested by stomar) + * Clarified that ruby 1.9/2.0's phony gems cause serious confusion for rubygems. + +=== 3.3.0 / 2012-07-26 + +* 1 major enhancement: + + * Deprecated add_(setup|teardown)_hook in favor of (before|after)_(setup|teardown) [2013-01-01] + +* 4 minor enhancements: + + * Refactored deprecated hook system into a module. + * Refactored lifecycle hooks into a module. + * Removed after_setup/before_teardown + run_X_hooks from Spec. + * Spec#before/after now do a simple define_method and call super. DUR. + +* 2 bug fixes: + + * Fixed #passed? when used against a test that called flunk. (floehopper) + * Fixed rdoc bug preventing doco for some expectations. (stomar). + +=== 3.2.0 / 2012-06-26 + +* 1 minor enhancement: + + * Stubs now yield self. (peterhellberg) + +* 1 bug fix: + + * Fixed verbose test that only fails when run in verbose mode. mmmm irony. + +=== 3.1.0 / 2012-06-13 + +* 2 minor enhancements: + + * Removed LONG deprecated Unit.out accessor + * Removed generated method name munging from minitest/spec. (ordinaryzelig/tenderlove) + +=== 3.0.1 / 2012-05-24 + +* 1 bug fix: + + * I'm a dumbass and refactored into Mock#call. Renamed to #__call so you can mock #call. (mschuerig) + +=== 3.0.0 / 2012-05-08 + +* 3 major enhancements: + + * Added Object#stub (in minitest/mock.rb). + * Mock#expect mocks are used in the order they're given. + * Mock#verify now strictly compares against expect calls. + +* 3 minor enhancements: + + * Added caller to deprecation message. + * Mock error messages are much prettier. + * Removed String check for RHS of assert/refute_match. This lets #to_str work properly. + +* 1 bug fix: + + * Support drive letter on Windows. Patch provided from MRI by Usaku NAKAMURA. (ayumin) + +=== 2.12.1 / 2012-04-10 + +* 1 minor enhancement: + + * Added ruby releases to History.txt to make it easier to see what you're missing + +* 1 bug fix: + + * Rolled my own deprecate msg to allow MT to work with rubygems < 1.7 + +=== 2.12.0 / 2012-04-03 + +* 4 minor enhancements: + + * ::it returns test method name (wojtekmach) + * Added #record method to runner so runner subclasses can cleanly gather data. + * Added Minitest alias for MiniTest because even I forget. + * Deprecated assert_block!! Yay!!! + +* 1 bug fix: + + * Fixed warning in i_suck_and_my_tests_are_order_dependent! (phiggins) + +=== 2.11.4 / 2012-03-20 + +* 2 minor enhancements: + + * Updated known extensions + * You got your unicode in my tests! You got your tests in my unicode! (fl00r) + +* 1 bug fix: + + * Fixed MiniTest::Mock example in the readme. (conradwt) + +=== 2.11.3 / 2012-02-29 + +* 2 bug fixes: + + * Clarified that assert_raises returns the exception for further testing + * Fixed assert_in_epsilon when both args are negative. (tamc) + +=== 2.11.2 / 2012-02-14 + +* 1 minor enhancement: + + * Display failures/errors on SIGINFO. (tenderlove) + +* 1 bug fix: + + * Fixed MiniTest::Unit.after_tests for Ruby 1.9.3. (ysbaddaden) + +=== 2.11.1 / 2012-02-01 + +* 3 bug fixes: + + * Improved description for --name argument. (drd) + * Ensure Mock#expect's expected args is an Array. (mperham) + * Ensure Mock#verify verifies multiple expects of the same method. (chastell) + +=== 2.11.0 / 2012-01-25 + +* 2 minor enhancements: + + * Added before / after hooks for setup and teardown. (tenderlove) + * Pushed run_setup_hooks down to Spec. (tenderlove) + +=== 2.10.1 / 2012-01-17 + +* 1 bug fix: + + * Fixed stupid 1.9 path handling grumble grumble. (graaff) + +=== 2.10.0 / 2011-12-20 + +* 3 minor enhancements: + + * Added specs for must/wont be_empty/respond_to/be_kind_of and others. + * Added tests for assert/refute predicate. + * Split minitest/excludes.rb out to its own gem. + +* 1 bug fix: + + * Fixed must_be_empty and wont_be_empty argument handling. (mrsimo) + +=== 2.9.1 / 2011-12-13 + +* 4 minor enhancements: + + * Added a ton of tests on spec error message output. + * Cleaned up consistency of msg handling on unary expectations. + * Improved error messages on assert/refute_in_delta. + * infect_an_assertion no longer checks arity and better handles args. + +* 1 bug fix: + + * Fixed error message on specs when 2+ args and custom message provided. (chastell) + +=== 2.9.0 / 2011-12-07 + +* 4 minor enhancements: + + * Added TestCase.exclude and load_excludes for programmatic filtering of tests. + * Added guard methods so you can cleanly skip based on platform/impl + * Holy crap! 100% doco! `rdoc -C` ftw + * Switch assert_output to test stderr before stdout to possibly improve debugging + +=== 2.8.1 / 2011-11-17 + +* 1 bug fix: + + * Ugh. 1.9's test/unit violates my internals. Added const_missing. + +=== 2.8.0 / 2011-11-08 + +* 2 minor enhancements: + + * Add a method so that code can be run around a particular test case (tenderlove) + * Turn off backtrace filtering if we're running inside a ruby checkout. (drbrain) + +* 2 bug fixes: + + * Fixed 2 typos and 2 doc glitches. (splattael) + * Remove unused block arguments to avoid creating Proc objects. (k-tsj) + +=== 2.7.0 / 2011-10-25 + +* 2 minor enhancements: + + * Include failed values in the expected arg output in MockExpectationError. (nono) + * Make minitest/pride work with other 256 color capable terms. (sunaku) + +* 2 bug fixes: + + * Clarified the documentation of minitest/benchmark (eregon) + * Fixed using expectations in regular unit tests. (sunaku) + +=== 2.6.2 / 2011-10-19 + +* 1 minor enhancement: + + * Added link to vim bundle. (sunaku) + +* 2 bug fixes: + + * Force gem activation in hoe minitest plugin + * Support RUBY_VERSION='2.0.0' (nagachika) + +=== 2.6.1 / 2011-09-27 + +* 2 bug fixes: + + * Alias Spec.name from Spec.to_s so it works when @name is nil (nathany) + * Fixed assert and refute_operator where second object has a bad == method. + +=== 2.6.0 / 2011-09-13 + +* 2 minor enhancements: + + * Added specify alias for it and made desc optional. + * Spec#must_be and #wont_be can be used with predicates (metaskills) + +* 1 bug fix: + + * Fixed Mock.respond_to?(var) to work with strings. (holli) + +=== 2.5.1 / 2011-08-27 // ruby 1.9.3: p0, p125, p34579 + +* 2 minor enhancements: + + * Added gem activation for minitest in minitest/autoload to help out 1.9 users + * Extended Spec.register_spec_type to allow for procs instead of just regexps. + +=== 2.5.0 / 2011-08-18 + +* 4 minor enhancements: + + * Added 2 more arguments against rspec: let & subject in 9 loc! (emmanuel/luis) + * Added TestCase.i_suck_and_my_tests_are_order_dependent! + * Extended describe to take an optional method name (2 line change!). (emmanuel) + * Refactored and extended minitest/pride to do full 256 color support. (lolcat) + +* 1 bug fix: + + * Doc fixes. (chastell) + +=== 2.4.0 / 2011-08-09 + +* 4 minor enhancements: + + * Added simple examples for all expectations. + * Improved Mock error output when args mismatch. + * Moved all expectations from Object to MiniTest::Expectations. + * infect_with_assertions has been removed due to excessive clever + +* 4 bug fixes: + + * Fix Assertions#mu_pp to deal with immutable encoded strings. (ferrous26) + * Fix minitest/pride for MacRuby (ferrous26) + * Made error output less fancy so it is more readable + * Mock shouldn't undef === and inspect. (dgraham) + +=== 2.3.1 / 2011-06-22 + +* 1 bug fix: + + * Fixed minitest hoe plugin to be a spermy dep and not depend on itself. + +=== 2.3.0 / 2011-06-15 + +* 5 minor enhancements: + + * Add setup and teardown hooks to MiniTest::TestCase. (phiggins) + * Added nicer error messages for MiniTest::Mock. (phiggins) + * Allow for less specific expected arguments in Mock. (bhenderson/phiggins) + * Made MiniTest::Mock a blank slate. (phiggins) + * Refactored minitest/spec to use the hooks instead of define_inheritable_method. (phiggins) + +* 2 bug fixes: + + * Fixed TestCase's inherited hook. (dchelimsky/phiggins/jamis, the 'good' neighbor) + * MiniTest::Assertions#refute_empty should use mu_pp in the default message. (whatthejeff) + +=== 2.2.2 / 2011-06-01 + +* 2 bug fixes: + + * Got rid of the trailing period in message for assert_equal. (tenderlove) + * Windows needs more flushing. (Akio Tajima) + +=== 2.2.1 / 2011-05-31 + +* 1 bug fix: + + * My _ONE_ non-rubygems-using minitest user goes to Seattle.rb! + +=== 2.2.0 / 2011-05-29 + +* 6 minor enhancements: + + * assert_equal (and must_equal) now tries to diff output where it makes sense. + * Added Assertions#diff(exp, act) to be used by assert_equal. + * Added Assertions#mu_pp_for_diff + * Added Assertions.diff and diff= + * Moved minitest hoe-plugin from hoe-seattlerb. (erikh) + * Skipped tests only output details in verbose mode. (tenderlove+zenspider=xoxo) + +=== 2.1.0 / 2011-04-11 + +* 5 minor enhancements: + + * Added MiniTest::Spec.register_spec_type(matcher, klass) and spec_type(desc) + * Added ability for specs to share code via subclassing of Spec. (metaskills) + * Clarified (or tried to) bench_performance_linear's use of threshold. + * MiniTest::Unit.runner=(runner) provides an easy way of creating custom test runners for specialized needs. (justinweiss) + * Reverse order of inheritance in teardowns of specs. (deepfryed) + +* 3 bug fixes: + + * FINALLY fixed problems of inheriting specs in describe/it/describe scenario. (MGPalmer) + * Fixed a new warning in 1.9.3. + * Fixed assert_block's message handling. (nobu) + +=== 2.0.2 / 2010-12-24 + +* 1 minor enhancement: + + * Completed doco on minitest/benchmark for specs. + +* 1 bug fix: + + * Benchmarks in specs that didn't call bench_range would die. (zzak). + +=== 2.0.1 / 2010-12-15 + +* 4 minor enhancements: + + * Do not filter backtrace if $DEBUG + * Exit autorun via nested at_exit handler, in case other libs call exit + * Make options accesor lazy. + * Split printing of test name and its time. (nurse) + +* 1 bug fix: + + * Fix bug when ^T is hit before runner start + +=== 2.0.0 / 2010-11-11 + +* 3 major enhancements: + + * Added minitest/benchmark! Assert your performance! YAY! + * Refactored runner to allow for more extensibility. See minitest/benchmark. + * This makes the runner backwards incompatible in some ways! + +* 9 minor enhancements: + + * Added MiniTest::Unit.after_tests { ... } + * Improved output by adding test rates and a more sortable verbose format + * Improved readme based on feedback from others + * Added io method to TestCase. If used, it'll supplant '.EF' output. + * Refactored IO in MiniTest::Unit. + * Refactored _run_anything to _run_suite to make it easier to wrap (ngauthier) + * Spec class names are now the unmunged descriptions (btakita) + * YAY for not having redundant rdoc/readmes! + * Help output is now generated from the flags you passed straight up. + +* 4 bug fixes: + + * Fixed scoping issue on minitest/mock (srbaker/prosperity) + * Fixed some of the assertion default messages + * Fixes autorun when on windows with ruby install on different drive (larsch) + * Fixed rdoc output bug in spec.rb + +=== 1.7.2 / 2010-09-23 + +* 3 bug fixes: + + * Fixed doco for expectations and Spec. + * Fixed test_capture_io on 1.9.3+ (sora_h) + * assert_raises now lets MiniTest::Skip through. (shyouhei) + +=== 1.7.1 / 2010-09-01 + +* 1 bug fix: + + * 1.9.2 fixes for spec tests + +=== 1.7.0 / 2010-07-15 + +* 5 minor enhancements: + + * Added assert_output (mapped to must_output). + * Added assert_silent (mapped to must_be_silent). + * Added examples to readme (Mike Dalessio) + * Added options output at the top of the run, for fatal run debugging (tenderlove) + * Spec's describe method returns created class + +=== 1.6.0 / 2010-03-27 // ruby 1.9.2-p290 + +* 10 minor enhancements: + + * Added --seed argument so you can reproduce a random order for debugging. + * Added documentation for assertions + * Added more rdoc and tons of :nodoc: + * Added output to give you all the options you need to reproduce that run. + * Added proper argument parsing to minitest. + * Added unique serial # to spec names so order can be preserved (needs tests). (phrogz) + * Empty 'it' fails with default msg. (phrogz) + * Remove previous method on expect to remove 1.9 warnings + * Spec#it is now order-proof wrt subclasses/nested describes. + * assert_same error message now reports in decimal, eg: oid=123. (mattkent) + +* 2 bug fixes: + + * Fixed message on refute_same to be consistent with assert_same. + * Fixed method randomization to be stable for testing. + +=== 1.5.0 / 2010-01-06 + +* 4 minor enhancements: + + * Added ability to specify what assertions should have their args flipped. + * Don't flip arguments on *include and *respond_to assertions. + * Refactored Module.infect_an_assertion from Module.infect_with_assertions. + * before/after :all now bitches and acts like :each + +* 3 bug fixes: + + * Nested describes now map to nested test classes to avoid namespace collision. + * Using undef_method instead of remove_method to clean out inherited specs. + * assert_raises was ignoring passed in message. + +=== 1.4.2 / 2009-06-25 + +* 1 bug fix: + + * Fixed info handler for systems that don't have siginfo. + +=== 1.4.1 / 2009-06-23 + +* 1 major enhancement: + + * Handle ^C and other fatal exceptions by failing + +* 1 minor enhancement: + + * Added something to catch mixed use of test/unit and minitest if $DEBUG + +* 1 bug fix: + + * Added SIGINFO handler for finding slow tests without verbose + +=== 1.4.0 / 2009-06-18 + +* 5 minor enhancement: + + * Added clarification doco. + * Added specs and mocks to autorun. + * Changed spec test class creation to be non-destructive. + * Updated rakefile for new hoe capabilities. + * describes are nestable (via subclass). before/after/def inherits, specs don't. + +* 3 bug fixes: + + * Fixed location on must/wont. + * Switched to __name__ to avoid common ivar name. + * Fixed indentation in test file (1.9). + +=== 1.3.1 / 2009-01-20 // ruby 1.9.1-p431 + +* 1 minor enhancement: + + * Added miniunit/autorun.rb as replacement for test/unit.rb's autorun. + +* 16 bug fixes: + + * 1.9 test fixes. + * Bug fixes from nobu and akira for really odd scenarios. They run ruby funny. + * Fixed (assert|refute)_match's argument order. + * Fixed LocalJumpError in autorun if exception thrown before at_exit. + * Fixed assert_in_delta (should be >=, not >). + * Fixed assert_raises to match Modules. + * Fixed capture_io to not dup IOs. + * Fixed indentation of capture_io for ruby 1.9 warning. + * Fixed location to deal better with custom assertions and load paths. (Yuki) + * Fixed order of (must|wont)_include in MiniTest::Spec. + * Fixed skip's backtrace. + * Got arg order wrong in *_match in tests, message wrong as a result. + * Made describe private. For some reason I thought that an attribute of Kernel. + * Removed disable_autorun method, added autorun.rb instead. + * assert_match escapes if passed string for pattern. + * instance_of? is different from ===, use instance_of. + +=== 1.3.0 / 2008-10-09 + +* 2 major enhancements: + + * renamed to minitest and pulled out test/unit compatibility. + * mini/test.rb is now minitest/unit.rb, everything else maps directly. + +* 12 minor enhancements: + + * assert_match now checks that act can call =~ and converts exp to a + regexp only if needed. + * Added assert_send... seems useless to me tho. + * message now forces to string... ruby-core likes to pass classes and arrays :( + * Added -v handling and switched to @verbose from $DEBUG. + * Verbose output now includes test class name and adds a sortable running time! + * Switched message generation into procs for message deferment. + * Added skip and renamed fail to flunk. + * Improved output failure messages for assert_instance_of, assert_kind_of + * Improved output for assert_respond_to, assert_same. + * at_exit now exits false instead of errors+failures. + * Made the tests happier and more readable imhfo. + * Switched index(s) == 0 to rindex(s, 0) on nobu's suggestion. Faster. + +* 5 bug fixes: + + * 1.9: Added encoding normalization in mu_pp. + * 1.9: Fixed backtrace filtering (BTs are expanded now) + * Added back exception_details to assert_raises. DOH. + * Fixed shadowed variable in mock.rb + * Fixed stupid muscle memory message bug in assert_send. + +=== 1.2.1 / 2008-06-10 + +* 7 minor enhancements: + + * Added deprecations everywhere in test/unit. + * Added test_order to TestCase. :random on mini, :sorted on test/unit (for now). + * Big cleanup in test/unit for rails. Thanks Jeremy Kemper! + * Minor readability cleanup. + * Pushed setup/run/teardown down to testcase allowing specialized testcases. + * Removed pp. Tests run 2x faster. :/ + * Renamed deprecation methods and moved to test/unit/deprecate.rb. + +=== 1.2.0 / 2008-06-09 + +* 2 major enhancements: + + * Added Mini::Spec. + * Added Mini::Mock. Thanks Steven Baker!! + +* 23 minor enhancements: + + * Added bin/use_miniunit to make it easy to test out miniunit. + * Added -n filtering, thanks to Phil Hagelberg! + * Added args argument to #run, takes ARGV from at_exit. + * Added test name output if $DEBUG. + * Added a refute (was deny) for every assert. + * Added capture_io and a bunch of nice assertions from zentest. + * Added deprecation mechanism for assert_no/not methods to test/unit/assertions. + * Added pp output when available. + * Added tests for all assertions. Pretty much maxed out coverage. + * Added tests to verify consistency and good naming. + * Aliased and deprecated all ugly assertions. + * Cleaned out test/unit. Moved autorun there. + * Code cleanup to make extensions easier. Thanks Chad! + * Got spec args reversed in all but a couple assertions. Much more readable. + * Improved error messages across the board. Adds your message to the default. + * Moved into Mini namespace, renamed to Mini::Test and Mini::Spec. + * Pulled the assertions into their own module... + * Removed as much code as I could while still maintaining full functionality. + * Moved filter_backtrace into MiniTest. + * Removed MiniTest::Unit::run. Unnecessary. + * Removed location_of_failure. Unnecessary. + * Rewrote test/unit's filter_backtrace. Flog from 37.0 to 18.1 + * Removed assert_send. Google says it is never used. + * Renamed MiniTest::Unit.autotest to #run. + * Renamed deny to refute. + * Rewrote some ugly/confusing default assertion messages. + * assert_in_delta now defaults to 0.001 precision. Makes specs prettier. + +* 9 bug fixes: + + * Fixed assert_raises to raise outside of the inner-begin/rescue. + * Fixed for ruby 1.9 and rubinius. + * No longer exits 0 if exception in code PRE-test run causes early exit. + * Removed implementors method list from mini/test.rb - too stale. + * assert_nothing_raised takes a class as an arg. wtf? STUPID + * ".EF" output is now unbuffered. + * Bunch of changes to get working with rails... UGH. + * Added stupid hacks to deal with rails not requiring their dependencies. + * Now bitch loudly if someone defines one of my classes instead of requiring. + * Fixed infect method to work better on 1.9. + * Fixed all shadowed variable warnings in 1.9. + +=== 1.1.0 / 2007-11-08 + +* 4 major enhancements: + + * Finished writing all missing assertions. + * Output matches original test/unit. + * Documented every method needed by language implementor. + * Fully switched over to self-testing setup. + +* 2 minor enhancements: + + * Added deny (assert ! test), our favorite extension to test/unit. + * Added .autotest and fairly complete unit tests. (thanks Chad for help here) + +=== 1.0.0 / 2006-10-30 + +* 1 major enhancement + + * Birthday! diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Manifest.txt b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Manifest.txt new file mode 100644 index 0000000..8e096ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Manifest.txt @@ -0,0 +1,27 @@ +.autotest +History.rdoc +Manifest.txt +README.rdoc +Rakefile +design_rationale.rb +lib/hoe/minitest.rb +lib/minitest.rb +lib/minitest/assertions.rb +lib/minitest/autorun.rb +lib/minitest/benchmark.rb +lib/minitest/expectations.rb +lib/minitest/hell.rb +lib/minitest/mock.rb +lib/minitest/parallel.rb +lib/minitest/pride.rb +lib/minitest/pride_plugin.rb +lib/minitest/spec.rb +lib/minitest/test.rb +lib/minitest/unit.rb +test/minitest/metametameta.rb +test/minitest/test_minitest_assertions.rb +test/minitest/test_minitest_benchmark.rb +test/minitest/test_minitest_mock.rb +test/minitest/test_minitest_reporter.rb +test/minitest/test_minitest_spec.rb +test/minitest/test_minitest_test.rb diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/README.rdoc b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/README.rdoc new file mode 100644 index 0000000..f8cc32c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/README.rdoc @@ -0,0 +1,803 @@ += minitest/{test,spec,mock,benchmark} + +home :: https://github.com/seattlerb/minitest +bugs :: https://github.com/seattlerb/minitest/issues +rdoc :: http://docs.seattlerb.org/minitest +vim :: https://github.com/sunaku/vim-ruby-minitest +emacs:: https://github.com/arthurnn/minitest-emacs + +== DESCRIPTION: + +minitest provides a complete suite of testing facilities supporting +TDD, BDD, mocking, and benchmarking. + + "I had a class with Jim Weirich on testing last week and we were + allowed to choose our testing frameworks. Kirk Haines and I were + paired up and we cracked open the code for a few test + frameworks... + + I MUST say that minitest is *very* readable / understandable + compared to the 'other two' options we looked at. Nicely done and + thank you for helping us keep our mental sanity." + + -- Wayne E. Seguin + +minitest/test is a small and incredibly fast unit testing framework. +It provides a rich set of assertions to make your tests clean and +readable. + +minitest/spec is a functionally complete spec engine. It hooks onto +minitest/test and seamlessly bridges test assertions over to spec +expectations. + +minitest/benchmark is an awesome way to assert the performance of your +algorithms in a repeatable manner. Now you can assert that your newb +co-worker doesn't replace your linear algorithm with an exponential +one! + +minitest/mock by Steven Baker, is a beautifully tiny mock (and stub) +object framework. + +minitest/pride shows pride in testing and adds coloring to your test +output. I guess it is an example of how to write IO pipes too. :P + +minitest/test is meant to have a clean implementation for language +implementors that need a minimal set of methods to bootstrap a working +test suite. For example, there is no magic involved for test-case +discovery. + + "Again, I can't praise enough the idea of a testing/specing + framework that I can actually read in full in one sitting!" + + -- Piotr Szotkowski + +Comparing to rspec: + + rspec is a testing DSL. minitest is ruby. + + -- Adam Hawkins, "Bow Before MiniTest" + +minitest doesn't reinvent anything that ruby already provides, like: +classes, modules, inheritance, methods. This means you only have to +learn ruby to use minitest and all of your regular OO practices like +extract-method refactorings still apply. + +== FEATURES/PROBLEMS: + +* minitest/autorun - the easy and explicit way to run all your tests. +* minitest/test - a very fast, simple, and clean test system. +* minitest/spec - a very fast, simple, and clean spec system. +* minitest/mock - a simple and clean mock/stub system. +* minitest/benchmark - an awesome way to assert your algorithm's performance. +* minitest/pride - show your pride in testing! +* Incredibly small and fast runner, but no bells and whistles. +* Written by squishy human beings. Software can never be perfect. We will all eventually die. + +== RATIONALE: + +See design_rationale.rb to see how specs and tests work in minitest. + +== SYNOPSIS: + +Given that you'd like to test the following class: + + class Meme + def i_can_has_cheezburger? + "OHAI!" + end + + def will_it_blend? + "YES!" + end + end + +=== Unit tests + +Define your tests as methods beginning with +test_+. + + require "minitest/autorun" + + class TestMeme < Minitest::Test + def setup + @meme = Meme.new + end + + def test_that_kitty_can_eat + assert_equal "OHAI!", @meme.i_can_has_cheezburger? + end + + def test_that_it_will_not_blend + refute_match /^no/i, @meme.will_it_blend? + end + + def test_that_will_be_skipped + skip "test this later" + end + end + +=== Specs + + require "minitest/autorun" + + describe Meme do + before do + @meme = Meme.new + end + + describe "when asked about cheeseburgers" do + it "must respond positively" do + _(@meme.i_can_has_cheezburger?).must_equal "OHAI!" + end + end + + describe "when asked about blending possibilities" do + it "won't say no" do + _(@meme.will_it_blend?).wont_match /^no/i + end + end + end + +For matchers support check out: + +* https://github.com/wojtekmach/minitest-matchers +* https://github.com/rmm5t/minitest-matchers_vaccine + +=== Benchmarks + +Add benchmarks to your tests. + + # optionally run benchmarks, good for CI-only work! + require "minitest/benchmark" if ENV["BENCH"] + + class TestMeme < Minitest::Benchmark + # Override self.bench_range or default range is [1, 10, 100, 1_000, 10_000] + def bench_my_algorithm + assert_performance_linear 0.9999 do |n| # n is a range value + @obj.my_algorithm(n) + end + end + end + +Or add them to your specs. If you make benchmarks optional, you'll +need to wrap your benchmarks in a conditional since the methods won't +be defined. In minitest 5, the describe name needs to match +/Bench(mark)?$/. + + describe "Meme Benchmark" do + if ENV["BENCH"] then + bench_performance_linear "my_algorithm", 0.9999 do |n| + 100.times do + @obj.my_algorithm(n) + end + end + end + end + +outputs something like: + + # Running benchmarks: + + TestBlah 100 1000 10000 + bench_my_algorithm 0.006167 0.079279 0.786993 + bench_other_algorithm 0.061679 0.792797 7.869932 + +Output is tab-delimited to make it easy to paste into a spreadsheet. + +=== Mocks + +Mocks and stubs defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Mocks are pre-programmed with expectations which form a specification +of the calls they are expected to receive. They can throw an exception +if they receive a call they don't expect and are checked during +verification to ensure they got all the calls they were expecting." + + class MemeAsker + def initialize(meme) + @meme = meme + end + + def ask(question) + method = question.tr(" ", "_") + "?" + @meme.__send__(method) + end + end + + require "minitest/autorun" + + describe MemeAsker, :ask do + describe "when passed an unpunctuated question" do + it "should invoke the appropriate predicate method on the meme" do + @meme = Minitest::Mock.new + @meme_asker = MemeAsker.new @meme + @meme.expect :will_it_blend?, :return_value + + @meme_asker.ask "will it blend" + + @meme.verify + end + end + end + +==== Multi-threading and Mocks + +Minitest mocks do not support multi-threading. If it works, fine, if it doesn't +you can use regular ruby patterns and facilities like local variables. Here's +an example of asserting that code inside a thread is run: + + def test_called_inside_thread + called = false + pr = Proc.new { called = true } + thread = Thread.new(&pr) + thread.join + assert called, "proc not called" + end + +=== Stubs + +Mocks and stubs are defined using terminology by Fowler & Meszaros at +http://www.martinfowler.com/bliki/TestDouble.html: + +"Stubs provide canned answers to calls made during the test". + +Minitest's stub method overrides a single method for the duration of +the block. + + def test_stale_eh + obj_under_test = Something.new + + refute obj_under_test.stale? + + Time.stub :now, Time.at(0) do # stub goes away once the block is done + assert obj_under_test.stale? + end + end + +A note on stubbing: In order to stub a method, the method must +actually exist prior to stubbing. Use a singleton method to create a +new non-existing method: + + def obj_under_test.fake_method + ... + end + +=== Running Your Tests + +Ideally, you'll use a rake task to run your tests, either piecemeal or +all at once. Both rake and rails ship with rake tasks for running your +tests. BUT! You don't have to: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb + Run options: --seed 37685 + + # Running: + + ...................................................................... (etc) + + Finished in 0.107130s, 1446.8403 runs/s, 2959.0217 assertions/s. + + 155 runs, 317 assertions, 0 failures, 0 errors, 0 skips + +There are runtime options available, both from minitest itself, and also +provided via plugins. To see them, simply run with +--help+: + + % ruby -Ilib:test test/minitest/test_minitest_test.rb --help + minitest options: + -h, --help Display this help. + -s, --seed SEED Sets random seed. Also via env. Eg: SEED=n rake + -v, --verbose Verbose. Show progress processing files. + -n, --name PATTERN Filter run on /regexp/ or string. + -e, --exclude PATTERN Exclude /regexp/ or string from run. + + Known extensions: pride, autotest + -p, --pride Pride. Show your testing pride! + -a, --autotest Connect to autotest server. + +You can set up a rake task to run all your tests by adding this to your Rakefile: + + require "rake/testtask" + + Rake::TestTask.new(:test) do |t| + t.libs << "test" + t.libs << "lib" + t.test_files = FileList["test/**/test_*.rb"] + end + + task :default => :test + +== Writing Extensions + +To define a plugin, add a file named minitest/XXX_plugin.rb to your +project/gem. That file must be discoverable via ruby's LOAD_PATH (via +rubygems or otherwise). Minitest will find and require that file using +Gem.find_files. It will then try to call +plugin_XXX_init+ during +startup. The option processor will also try to call +plugin_XXX_options+ +passing the OptionParser instance and the current options hash. This +lets you register your own command-line options. Here's a totally +bogus example: + + # minitest/bogus_plugin.rb: + + module Minitest + def self.plugin_bogus_options(opts, options) + opts.on "--myci", "Report results to my CI" do + options[:myci] = true + options[:myci_addr] = get_myci_addr + options[:myci_port] = get_myci_port + end + end + + def self.plugin_bogus_init(options) + self.reporter << MyCI.new(options) if options[:myci] + end + end + +=== Adding custom reporters + +Minitest uses composite reporter to output test results using multiple +reporter instances. You can add new reporters to the composite during +the init_plugins phase. As we saw in +plugin_bogus_init+ above, you +simply add your reporter instance to the composite via <<. + ++AbstractReporter+ defines the API for reporters. You may subclass it +and override any method you want to achieve your desired behavior. + +start :: Called when the run has started. +record :: Called for each result, passed or otherwise. +report :: Called at the end of the run. +passed? :: Called to see if you detected any problems. + +Using our example above, here is how we might implement MyCI: + + # minitest/bogus_plugin.rb + + module Minitest + class MyCI < AbstractReporter + attr_accessor :results, :addr, :port + + def initialize options + self.results = [] + self.addr = options[:myci_addr] + self.port = options[:myci_port] + end + + def record result + self.results << result + end + + def report + CI.connect(addr, port).send_results self.results + end + end + + # code from above... + end + +== FAQ + +=== What versions are compatible with what? Or what versions are supported? + +Minitest is a dependency of rails, which until fairly recently had an +overzealous backwards compatibility policy. As such, I'm stuck +supporting versions of ruby that are long past EOL. Once rails 5.2 is +dropped (hopefully April 2021), I get to drop a bunch of versions of +ruby that I have to currently test against. + +(As of 2021-01-31) + +Current versions of rails: (https://endoflife.date/rails) + + | rails | min ruby | rec ruby | minitest | status | + |-------+----------+----------+----------+----------| + | 7.0 | >= 2.7 | 3.0 | >= 5.1 | Future | + | 6.1 | >= 2.5 | 3.0 | >= 5.1 | Current | + | 6.0 | >= 2.5 | 2.6 | >= 5.1 | Security | + | 5.2 | >= 2.2.2 | 2.5 | ~> 5.1 | Security | EOL @railsconf 2021? + +Current versions of ruby: (https://endoflife.date/ruby) + + | ruby | Status | EOL Date | + |------+---------+------------| + | 3.0 | Current | 2024-03-31 | + | 2.7 | Maint | 2023-03-31 | + | 2.6 | Maint* | 2022-03-31 | + | 2.5 | EOL | 2021-03-31 | + | 2.4 | EOL | 2020-03-31 | + | 2.3 | EOL | 2019-03-31 | + | 2.2 | EOL | 2018-03-31 | + +See also: + +* https://www.fastruby.io/blog/ruby/rails/versions/compatibility-table.html +* https://jamesjeffersconsulting.com/ruby-rails-version-matrix/ + +=== How to test SimpleDelegates? + +The following implementation and test: + + class Worker < SimpleDelegator + def work + end + end + + describe Worker do + before do + @worker = Worker.new(Object.new) + end + + it "must respond to work" do + _(@worker).must_respond_to :work + end + end + +outputs a failure: + + 1) Failure: + Worker#test_0001_must respond to work [bug11.rb:16]: + Expected # (Object) to respond to #work. + +Worker is a SimpleDelegate which in 1.9+ is a subclass of BasicObject. +Expectations are put on Object (one level down) so the Worker +(SimpleDelegate) hits +method_missing+ and delegates down to the ++Object.new+ instance. That object doesn't respond to work so the test +fails. + +You can bypass SimpleDelegate#method_missing by extending the worker +with Minitest::Expectations. You can either do that in your setup at +the instance level, like: + + before do + @worker = Worker.new(Object.new) + @worker.extend Minitest::Expectations + end + +or you can extend the Worker class (within the test file!), like: + + class Worker + include ::Minitest::Expectations + end + +=== How to share code across test classes? + +Use a module. That's exactly what they're for: + + module UsefulStuff + def useful_method + # ... + end + end + + describe Blah do + include UsefulStuff + + def test_whatever + # useful_method available here + end + end + +Remember, +describe+ simply creates test classes. It's just ruby at +the end of the day and all your normal Good Ruby Rules (tm) apply. If +you want to extend your test using setup/teardown via a module, just +make sure you ALWAYS call super. before/after automatically call super +for you, so make sure you don't do it twice. + +=== How to run code before a group of tests? + +Use a constant with begin...end like this: + + describe Blah do + SETUP = begin + # ... this runs once when describe Blah starts + end + # ... + end + +This can be useful for expensive initializations or sharing state. +Remember, this is just ruby code, so you need to make sure this +technique and sharing state doesn't interfere with your tests. + +=== Why am I seeing uninitialized constant MiniTest::Test (NameError)? + +Are you running the test with Bundler (e.g. via bundle exec )? If so, +in order to require minitest, you must first add the gem 'minitest' +to your Gemfile and run +bundle+. Once it's installed, you should be +able to require minitest and run your tests. + +== Prominent Projects using Minitest: + +* arel +* journey +* mime-types +* nokogiri +* rails (active_support et al) +* rake +* rdoc +* ...and of course, everything from seattle.rb... + +== Developing Minitest: + +Minitest requires {Hoe}[https://rubygems.org/gems/hoe]. + +=== Minitest's own tests require UTF-8 external encoding. + +This is a common problem in Windows, where the default external Encoding is +often CP850, but can affect any platform. +Minitest can run test suites using any Encoding, but to run Minitest's +own tests you must have a default external Encoding of UTF-8. + +If your encoding is wrong, you'll see errors like: + + --- expected + +++ actual + @@ -1,2 +1,3 @@ + # encoding: UTF-8 + -"Expected /\\w+/ to not match \"blah blah blah\"." + +"Expected /\\w+/ to not match # encoding: UTF-8 + +\"blah blah blah\"." + +To check your current encoding, run: + + ruby -e 'puts Encoding.default_external' + +If your output is something other than UTF-8, you can set the RUBYOPTS +env variable to a value of '-Eutf-8'. Something like: + + RUBYOPT='-Eutf-8' ruby -e 'puts Encoding.default_external' + +Check your OS/shell documentation for the precise syntax (the above +will not work on a basic Windows CMD prompt, look for the SET command). +Once you've got it successfully outputing UTF-8, use the same setting +when running rake in Minitest. + +=== Minitest's own tests require GNU (or similar) diff. + +This is also a problem primarily affecting Windows developers. PowerShell +has a command called diff, but it is not suitable for use with Minitest. + +If you see failures like either of these, you are probably missing diff tool: + + 4) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_long [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:936]: + Expected: "--- expected\n+++ actual\n@@ -1 +1 @@\n-\"hahahahahahahahahahahahahahahahahahahaha\"\n+\"blahblahblahblahblahblahblahblahblahblah\"\n" + Actual: "Expected: \"hahahahahahahahahahahahahahahahahahahaha\"\n Actual: \"blahblahblahblahblahblahblahblahblahblah\"" + + + 5) Failure: + TestMinitestUnitTestCase#test_assert_equal_different_collection_hash_hex_invisible [D:/ruby/seattlerb/minitest/test/minitest/test_minitest_test.rb:845]: + Expected: "No visible difference in the Hash#inspect output.\nYou should look at the implementation of #== on Hash or its members.\n + {1=>#}" + Actual: "Expected: {1=>#}\n Actual: {1=>#}" + + +If you use Cygwin or MSYS2 or similar there are packages that include a +GNU diff for Windows. If you don't, you can download GNU diffutils from +http://gnuwin32.sourceforge.net/packages/diffutils.htm +(make sure to add it to your PATH). + +You can make sure it's installed and path is configured properly with: + + diff.exe -v + +There are multiple lines of output, the first should be something like: + + diff (GNU diffutils) 2.8.1 + +If you are using PowerShell make sure you run diff.exe, not just diff, +which will invoke the PowerShell built in function. + +== Known Extensions: + +capybara_minitest_spec :: Bridge between Capybara RSpec matchers and + Minitest::Spec expectations (e.g. + page.must_have_content("Title")). +color_pound_spec_reporter :: Test names print Ruby Object types in color with + your Minitest Spec style tests. +minispec-metadata :: Metadata for describe/it blocks & CLI tag filter. + E.g. it "requires JS driver", js: true do & + ruby test.rb --tag js runs tests tagged :js. +minispec-rails :: Minimal support to use Spec style in Rails 5+. +mini-apivore :: for swagger based automated API testing. +minitest-around :: Around block for minitest. An alternative to + setup/teardown dance. +minitest-assert_errors :: Adds Minitest assertions to test for errors raised + or not raised by Minitest itself. +minitest-autotest :: autotest is a continuous testing facility meant to + be used during development. +minitest-bacon :: minitest-bacon extends minitest with bacon-like + functionality. +minitest-bang :: Adds support for RSpec-style let! to immediately + invoke let statements before each test. +minitest-bisect :: Helps you isolate and debug random test failures. +minitest-blink1_reporter :: Display test results with a Blink1. +minitest-capistrano :: Assertions and expectations for testing + Capistrano recipes. +minitest-capybara :: Capybara matchers support for minitest unit and + spec. +minitest-chef-handler :: Run Minitest suites as Chef report handlers +minitest-ci :: CI reporter plugin for Minitest. +minitest-context :: Defines contexts for code reuse in Minitest + specs that share common expectations. +minitest-debugger :: Wraps assert so failed assertions drop into + the ruby debugger. +minitest-display :: Patches Minitest to allow for an easily + configurable output. +minitest-documentation :: Minimal documentation format inspired by rspec's. +minitest-doc_reporter :: Detailed output inspired by rspec's documentation + format. +minitest-emoji :: Print out emoji for your test passes, fails, and + skips. +minitest-english :: Semantically symmetric aliases for assertions and + expectations. +minitest-excludes :: Clean API for excluding certain tests you + don't want to run under certain conditions. +minitest-fail-fast :: Reimplements RSpec's "fail fast" feature +minitest-filecontent :: Support unit tests with expectation results in files. + Differing results will be stored again in files. +minitest-filesystem :: Adds assertion and expectation to help testing + filesystem contents. +minitest-firemock :: Makes your Minitest mocks more resilient. +minitest-focus :: Focus on one test at a time. +minitest-gcstats :: A minitest plugin that adds a report of the top + tests by number of objects allocated. +minitest-global_expectations:: Support minitest expectation methods for all objects +minitest-great_expectations :: Generally useful additions to minitest's + assertions and expectations. +minitest-growl :: Test notifier for minitest via growl. +minitest-happy :: GLOBALLY ACTIVATE MINITEST PRIDE! RAWR! +minitest-have_tag :: Adds Minitest assertions to test for the existence of + HTML tags, including contents, within a provided string. +minitest-heat :: Reporting that builds a heat map of failure locations +minitest-hooks :: Around and before_all/after_all/around_all hooks +minitest-hyper :: Pretty, single-page HTML reports for your Minitest runs +minitest-implicit-subject :: Implicit declaration of the test subject. +minitest-instrument :: Instrument ActiveSupport::Notifications when + test method is executed. +minitest-instrument-db :: Store information about speed of test execution + provided by minitest-instrument in database. +minitest-junit :: JUnit-style XML reporter for minitest. +minitest-keyword :: Use Minitest assertions with keyword arguments. +minitest-libnotify :: Test notifier for minitest via libnotify. +minitest-line :: Run test at line number. +minitest-logger :: Define assert_log and enable minitest to test log messages. + Supports Logger and Log4r::Logger. +minitest-macruby :: Provides extensions to minitest for macruby UI + testing. +minitest-matchers :: Adds support for RSpec-style matchers to + minitest. +minitest-matchers_vaccine :: Adds assertions that adhere to the matcher spec, + but without any expectation infections. +minitest-metadata :: Annotate tests with metadata (key-value). +minitest-mock_expectations :: Provides method call assertions for minitest. +minitest-mongoid :: Mongoid assertion matchers for Minitest. +minitest-must_not :: Provides must_not as an alias for wont in + Minitest. +minitest-optional_retry :: Automatically retry failed test to help with flakiness. +minitest-osx :: Reporter for the Mac OS X notification center. +minitest-parallel_fork :: Fork-based parallelization +minitest-parallel-db :: Run tests in parallel with a single database. +minitest-power_assert :: PowerAssert for Minitest. +minitest-predicates :: Adds support for .predicate? methods. +minitest-profile :: List the 10 slowest tests in your suite. +minitest-rails :: Minitest integration for Rails 3.x. +minitest-rails-capybara :: Capybara integration for Minitest::Rails. +minitest-reporters :: Create customizable Minitest output formats. +minitest-rg :: Colored red/green output for Minitest. +minitest-rspec_mocks :: Use RSpec Mocks with Minitest. +minitest-server :: minitest-server provides a client/server setup + with your minitest process, allowing your test + run to send its results directly to a handler. +minitest-sequel :: Minitest assertions to speed-up development and + testing of Ruby Sequel database setups. +minitest-shared_description :: Support for shared specs and shared spec + subclasses +minitest-should_syntax :: RSpec-style x.should == y assertions for + Minitest. +minitest-shouldify :: Adding all manner of shoulds to Minitest (bad + idea) +minitest-snail :: Print a list of tests that take too long +minitest-spec-context :: Provides rspec-ish context method to + Minitest::Spec. +minitest-spec-expect :: Expect syntax for Minitest::Spec (e.g. + expect(sequences).to_include :celery_man). +minitest-spec-magic :: Minitest::Spec extensions for Rails and beyond. +minitest-spec-rails :: Drop in Minitest::Spec superclass for + ActiveSupport::TestCase. +minitest-sprint :: Runs (Get it? It's fast!) your tests and makes + it easier to rerun individual failures. +minitest-stately :: Find leaking state between tests +minitest-stub_any_instance :: Stub any instance of a method on the given class + for the duration of a block. +minitest-stub-const :: Stub constants for the duration of a block. +minitest-tags :: Add tags for minitest. +minitest-unordered :: Adds a new assertion to minitest for checking the + contents of a collection, ignoring element order. +minitest-vcr :: Automatic cassette managment with Minitest::Spec + and VCR. +minitest_log :: Adds structured logging, data explication, and verdicts. +minitest_owrapper :: Get tests results as a TestResult object. +minitest_should :: Shoulda style syntax for minitest test::unit. +minitest_tu_shim :: Bridges between test/unit and minitest. +mongoid-minitest :: Minitest matchers for Mongoid. +mutant-minitest :: Minitest integration for mutant. +pry-rescue :: A pry plugin w/ minitest support. See + pry-rescue/minitest.rb. +rematch :: Declutter your test files from large hardcoded data + and update them automatically when your code changes. +rspec2minitest :: Easily translate any RSpec matchers to Minitest + assertions and expectations. + +== Unknown Extensions: + +Authors... Please send me a pull request with a description of your minitest extension. + +* assay-minitest +* detroit-minitest +* em-minitest-spec +* flexmock-minitest +* guard-minitest +* guard-minitest-decisiv +* minitest-activemodel +* minitest-ar-assertions +* minitest-capybara-unit +* minitest-colorer +* minitest-deluxe +* minitest-extra-assertions +* minitest-rails-shoulda +* minitest-spec +* minitest-spec-should +* minitest-sugar +* spork-minitest + +== Minitest related goods + +* minitest/pride fabric: http://www.spoonflower.com/fabric/3928730-again-by-katie_allen + +== REQUIREMENTS: + +* Ruby 2.3+. No magic is involved. I hope. + +== INSTALL: + + sudo gem install minitest + +On 1.9, you already have it. To get newer candy you can still install +the gem, and then requiring "minitest/autorun" should automatically +pull it in. If not, you'll need to do it yourself: + + gem "minitest" # ensures you"re using the gem, and not the built-in MT + require "minitest/autorun" + + # ... usual testing stuffs ... + +DO NOTE: There is a serious problem with the way that ruby 1.9/2.0 +packages their own gems. They install a gem specification file, but +don't install the gem contents in the gem path. This messes up +Gem.find_files and many other things (gem which, gem contents, etc). + +Just install minitest as a gem for real and you'll be happier. + +== LICENSE: + +(The MIT License) + +Copyright (c) Ryan Davis, seattle.rb + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Rakefile new file mode 100644 index 0000000..a09f9a6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/Rakefile @@ -0,0 +1,74 @@ +# -*- ruby -*- + +require "rubygems" +require "hoe" + +Hoe.plugin :seattlerb +Hoe.plugin :rdoc + +Hoe.spec "minitest" do + developer "Ryan Davis", "ryand-ruby@zenspider.com" + + license "MIT" + + require_ruby_version [">= 2.2", "< 4.0"] +end + +desc "Find missing expectations" +task :specs do + $:.unshift "lib" + require "minitest/test" + require "minitest/spec" + + pos_prefix, neg_prefix = "must", "wont" + skip_re = /^(must|wont)$|wont_(throw)|must_(block|not?_|nothing|send|raise$)/x + dont_flip_re = /(must|wont)_(include|respond_to)/ + + map = { + /(must_throw)s/ => '\1', + /(?!not)_same/ => "_be_same_as", + /_in_/ => "_be_within_", + /_operator/ => "_be", + /_includes/ => "_include", + /(must|wont)_(.*_of|nil|silent|empty)/ => '\1_be_\2', + /must_raises/ => "must_raise", + /(must|wont)_predicate/ => '\1_be', + /(must|wont)_path_exists/ => 'path_\1_exist', + } + + expectations = Minitest::Expectations.public_instance_methods.map(&:to_s) + assertions = Minitest::Assertions.public_instance_methods.map(&:to_s) + + assertions.sort.each do |assertion| + expectation = case assertion + when /^assert/ then + assertion.sub(/^assert/, pos_prefix.to_s) + when /^refute/ then + assertion.sub(/^refute/, neg_prefix.to_s) + end + + next unless expectation + next if expectation =~ skip_re + + regexp, replacement = map.find { |re, _| expectation =~ re } + expectation.sub! regexp, replacement if replacement + + next if expectations.include? expectation + + args = [assertion, expectation].map(&:to_sym).map(&:inspect) + args << :reverse if expectation =~ dont_flip_re + + puts + puts "##" + puts "# :method: #{expectation}" + puts "# See Minitest::Assertions##{assertion}" + puts + puts "infect_an_assertion #{args.join ", "}" + end +end + +task :bugs do + sh "for f in bug*.rb ; do echo $f; echo; #{Gem.ruby} -Ilib $f && rm $f ; done" +end + +# vim: syntax=Ruby diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/design_rationale.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/design_rationale.rb new file mode 100644 index 0000000..a3fcc37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/design_rationale.rb @@ -0,0 +1,52 @@ +# Specs: # Equivalent Unit Tests: +############################################################################### +describe Thingy do # class TestThingy < Minitest::Test + before do # def setup + do_some_setup # super + end # do_some_setup + # end + it "should do the first thing" do # + 1.must_equal 1 # def test_first_thing + end # assert_equal 1, 1 + # end + describe SubThingy do # end + before do # + do_more_setup # class TestSubThingy < TestThingy + end # def setup + # super + it "should do the second thing" do # do_more_setup + 2.must_equal 2 # end + end # + end # def test_second_thing +end # assert_equal 2, 2 + # end + # end +############################################################################### +# runs 2 specs # runs 3 tests +############################################################################### +# The specs generate: + +class ThingySpec < Minitest::Spec + def setup + super + do_some_setup + end + + def test_should_do_the_first_thing + assert_equal 1, 1 + end +end + +class SubThingySpec < ThingySpec + def setup + super + do_more_setup + end + + # because only setup/teardown is inherited, not specs + remove_method :test_should_do_the_first_thing + + def test_should_do_the_second_thing + assert_equal 2, 2 + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/hoe/minitest.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/hoe/minitest.rb new file mode 100644 index 0000000..eadbf0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/hoe/minitest.rb @@ -0,0 +1,28 @@ +# :stopdoc: + +class Hoe +end + +module Hoe::Minitest + def minitest? + self.name == "minitest" + end + + def initialize_minitest + unless minitest? then + dir = "../../minitest/dev/lib" + Hoe.add_include_dirs dir if File.directory? dir + end + + gem "minitest" + require "minitest" + version = Minitest::VERSION.split(/\./).first(2).join(".") + + dependency "minitest", "~> #{version}", :development unless + minitest? or ENV["MT_NO_ISOLATE"] + end + + def define_minitest_tasks + self.testlib = :minitest + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest.rb new file mode 100644 index 0000000..468b3ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest.rb @@ -0,0 +1,1069 @@ +require "optparse" +require "thread" +require "mutex_m" +require "minitest/parallel" +require "stringio" +require "etc" + +## +# :include: README.rdoc + +module Minitest + VERSION = "5.15.0" # :nodoc: + ENCS = "".respond_to? :encoding # :nodoc: + + @@installed_at_exit ||= false + @@after_run = [] + @extensions = [] + + mc = (class << self; self; end) + + ## + # Parallel test executor + + mc.send :attr_accessor, :parallel_executor + + warn "DEPRECATED: use MT_CPU instead of N for parallel test runs" if ENV["N"] + n_threads = (ENV["MT_CPU"] || ENV["N"] || Etc.nprocessors).to_i + self.parallel_executor = Parallel::Executor.new n_threads + + ## + # Filter object for backtraces. + + mc.send :attr_accessor, :backtrace_filter + + ## + # Reporter object to be used for all runs. + # + # NOTE: This accessor is only available during setup, not during runs. + + mc.send :attr_accessor, :reporter + + ## + # Names of known extension plugins. + + mc.send :attr_accessor, :extensions + + ## + # The signal to use for dumping information to STDERR. Defaults to "INFO". + + mc.send :attr_accessor, :info_signal + self.info_signal = "INFO" + + ## + # Registers Minitest to run at process exit + + def self.autorun + if Object.const_defined?(:Warning) && Warning.respond_to?(:[]=) + Warning[:deprecated] = true + end + + at_exit { + next if $! and not ($!.kind_of? SystemExit and $!.success?) + + exit_code = nil + + pid = Process.pid + at_exit { + next if Process.pid != pid + @@after_run.reverse_each(&:call) + exit exit_code || false + } + + exit_code = Minitest.run ARGV + } unless @@installed_at_exit + @@installed_at_exit = true + end + + ## + # A simple hook allowing you to run a block of code after everything + # is done running. Eg: + # + # Minitest.after_run { p $debugging_info } + + def self.after_run &block + @@after_run << block + end + + def self.init_plugins options # :nodoc: + self.extensions.each do |name| + msg = "plugin_#{name}_init" + send msg, options if self.respond_to? msg + end + end + + def self.load_plugins # :nodoc: + return unless self.extensions.empty? + + seen = {} + + require "rubygems" unless defined? Gem + + Gem.find_files("minitest/*_plugin.rb").each do |plugin_path| + name = File.basename plugin_path, "_plugin.rb" + + next if seen[name] + seen[name] = true + + require plugin_path + self.extensions << name + end + end + + ## + # This is the top-level run method. Everything starts from here. It + # tells each Runnable sub-class to run, and each of those are + # responsible for doing whatever they do. + # + # The overall structure of a run looks like this: + # + # Minitest.autorun + # Minitest.run(args) + # Minitest.__run(reporter, options) + # Runnable.runnables.each + # runnable.run(reporter, options) + # self.runnable_methods.each + # self.run_one_method(self, runnable_method, reporter) + # Minitest.run_one_method(klass, runnable_method) + # klass.new(runnable_method).run + + def self.run args = [] + self.load_plugins unless args.delete("--no-plugins") || ENV["MT_NO_PLUGINS"] + + options = process_args args + + reporter = CompositeReporter.new + reporter << SummaryReporter.new(options[:io], options) + reporter << ProgressReporter.new(options[:io], options) + + self.reporter = reporter # this makes it available to plugins + self.init_plugins options + self.reporter = nil # runnables shouldn't depend on the reporter, ever + + self.parallel_executor.start if parallel_executor.respond_to?(:start) + reporter.start + begin + __run reporter, options + rescue Interrupt + warn "Interrupted. Exiting..." + end + self.parallel_executor.shutdown + reporter.report + + reporter.passed? + end + + ## + # Internal run method. Responsible for telling all Runnable + # sub-classes to run. + + def self.__run reporter, options + suites = Runnable.runnables.reject { |s| s.runnable_methods.empty? }.shuffle + parallel, serial = suites.partition { |s| s.test_order == :parallel } + + # If we run the parallel tests before the serial tests, the parallel tests + # could run in parallel with the serial tests. This would be bad because + # the serial tests won't lock around Reporter#record. Run the serial tests + # first, so that after they complete, the parallel tests will lock when + # recording results. + serial.map { |suite| suite.run reporter, options } + + parallel.map { |suite| suite.run reporter, options } + end + + def self.process_args args = [] # :nodoc: + options = { + :io => $stdout, + } + orig_args = args.dup + + OptionParser.new do |opts| + opts.banner = "minitest options:" + opts.version = Minitest::VERSION + + opts.on "-h", "--help", "Display this help." do + puts opts + exit + end + + opts.on "--no-plugins", "Bypass minitest plugin auto-loading (or set $MT_NO_PLUGINS)." + + desc = "Sets random seed. Also via env. Eg: SEED=n rake" + opts.on "-s", "--seed SEED", Integer, desc do |m| + options[:seed] = m.to_i + end + + opts.on "-v", "--verbose", "Verbose. Show progress processing files." do + options[:verbose] = true + end + + opts.on "-n", "--name PATTERN", "Filter run on /regexp/ or string." do |a| + options[:filter] = a + end + + opts.on "-e", "--exclude PATTERN", "Exclude /regexp/ or string from run." do |a| + options[:exclude] = a + end + + opts.on "-S", "--skip CODES", String, "Skip reporting of certain types of results (eg E)." do |s| + options[:skip] = s.chars.to_a + end + + unless extensions.empty? + opts.separator "" + opts.separator "Known extensions: #{extensions.join(", ")}" + + extensions.each do |meth| + msg = "plugin_#{meth}_options" + send msg, opts, options if self.respond_to?(msg) + end + end + + begin + opts.parse! args + rescue OptionParser::InvalidOption => e + puts + puts e + puts + puts opts + exit 1 + end + + orig_args -= args + end + + unless options[:seed] then + srand + options[:seed] = (ENV["SEED"] || srand).to_i % 0xFFFF + orig_args << "--seed" << options[:seed].to_s + end + + srand options[:seed] + + options[:args] = orig_args.map { |s| + s =~ /[\s|&<>$()]/ ? s.inspect : s + }.join " " + + options + end + + def self.filter_backtrace bt # :nodoc: + result = backtrace_filter.filter bt + result = bt.dup if result.empty? + result + end + + ## + # Represents anything "runnable", like Test, Spec, Benchmark, or + # whatever you can dream up. + # + # Subclasses of this are automatically registered and available in + # Runnable.runnables. + + class Runnable + ## + # Number of assertions executed in this run. + + attr_accessor :assertions + + ## + # An assertion raised during the run, if any. + + attr_accessor :failures + + ## + # The time it took to run. + + attr_accessor :time + + def time_it # :nodoc: + t0 = Minitest.clock_time + + yield + ensure + self.time = Minitest.clock_time - t0 + end + + ## + # Name of the run. + + def name + @NAME + end + + ## + # Set the name of the run. + + def name= o + @NAME = o + end + + ## + # Returns all instance methods matching the pattern +re+. + + def self.methods_matching re + public_instance_methods(true).grep(re).map(&:to_s) + end + + def self.reset # :nodoc: + @@runnables = [] + end + + reset + + ## + # Responsible for running all runnable methods in a given class, + # each in its own instance. Each instance is passed to the + # reporter to record. + + def self.run reporter, options = {} + filter = options[:filter] || "/./" + filter = Regexp.new $1 if filter.is_a?(String) && filter =~ %r%/(.*)/% + + filtered_methods = self.runnable_methods.find_all { |m| + filter === m || filter === "#{self}##{m}" + } + + exclude = options[:exclude] + exclude = Regexp.new $1 if exclude =~ %r%/(.*)/% + + filtered_methods.delete_if { |m| + exclude === m || exclude === "#{self}##{m}" + } + + return if filtered_methods.empty? + + with_info_handler reporter do + filtered_methods.each do |method_name| + run_one_method self, method_name, reporter + end + end + end + + ## + # Runs a single method and has the reporter record the result. + # This was considered internal API but is factored out of run so + # that subclasses can specialize the running of an individual + # test. See Minitest::ParallelTest::ClassMethods for an example. + + def self.run_one_method klass, method_name, reporter + reporter.prerecord klass, method_name + reporter.record Minitest.run_one_method(klass, method_name) + end + + def self.with_info_handler reporter, &block # :nodoc: + handler = lambda do + unless reporter.passed? then + warn "Current results:" + warn "" + warn reporter.reporters.first + warn "" + end + end + + on_signal ::Minitest.info_signal, handler, &block + end + + SIGNALS = Signal.list # :nodoc: + + def self.on_signal name, action # :nodoc: + supported = SIGNALS[name] + + old_trap = trap name do + old_trap.call if old_trap.respond_to? :call + action.call + end if supported + + yield + ensure + trap name, old_trap if supported + end + + ## + # Each subclass of Runnable is responsible for overriding this + # method to return all runnable methods. See #methods_matching. + + def self.runnable_methods + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns all subclasses of Runnable. + + def self.runnables + @@runnables + end + + @@marshal_dump_warned = false + + def marshal_dump # :nodoc: + unless @@marshal_dump_warned then + warn ["Minitest::Runnable#marshal_dump is deprecated.", + "You might be violating internals. From", caller.first].join " " + @@marshal_dump_warned = true + end + + [self.name, self.failures, self.assertions, self.time] + end + + def marshal_load ary # :nodoc: + self.name, self.failures, self.assertions, self.time = ary + end + + def failure # :nodoc: + self.failures.first + end + + def initialize name # :nodoc: + self.name = name + self.failures = [] + self.assertions = 0 + end + + ## + # Runs a single method. Needs to return self. + + def run + raise NotImplementedError, "subclass responsibility" + end + + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns a single character string to print based on the result + # of the run. One of ".", "F", + # "E" or "S". + + def result_code + raise NotImplementedError, "subclass responsibility" + end + + ## + # Was this run skipped? See #passed? for more information. + + def skipped? + raise NotImplementedError, "subclass responsibility" + end + end + + ## + # Shared code for anything that can get passed to a Reporter. See + # Minitest::Test & Minitest::Result. + + module Reportable + ## + # Did this run pass? + # + # Note: skipped runs are not considered passing, but they don't + # cause the process to exit non-zero. + + def passed? + not self.failure + end + + ## + # The location identifier of this test. Depends on a method + # existing called class_name. + + def location + loc = " [#{self.failure.location}]" unless passed? or error? + "#{self.class_name}##{self.name}#{loc}" + end + + def class_name # :nodoc: + raise NotImplementedError, "subclass responsibility" + end + + ## + # Returns ".", "F", or "E" based on the result of the run. + + def result_code + self.failure and self.failure.result_code or "." + end + + ## + # Was this run skipped? + + def skipped? + self.failure and Skip === self.failure + end + + ## + # Did this run error? + + def error? + self.failures.any? { |f| UnexpectedError === f } + end + end + + ## + # This represents a test result in a clean way that can be + # marshalled over a wire. Tests can do anything they want to the + # test instance and can create conditions that cause Marshal.dump to + # blow up. By using Result.from(a_test) you can be reasonably sure + # that the test result can be marshalled. + + class Result < Runnable + include Minitest::Reportable + + undef_method :marshal_dump + undef_method :marshal_load + + ## + # The class name of the test result. + + attr_accessor :klass + + ## + # The location of the test method. + + attr_accessor :source_location + + ## + # Create a new test result from a Runnable instance. + + def self.from runnable + o = runnable + + r = self.new o.name + r.klass = o.class.name + r.assertions = o.assertions + r.failures = o.failures.dup + r.time = o.time + + r.source_location = o.method(o.name).source_location rescue ["unknown", -1] + + r + end + + def class_name # :nodoc: + self.klass # for Minitest::Reportable + end + + def to_s # :nodoc: + return location if passed? and not skipped? + + failures.map { |failure| + "#{failure.result_label}:\n#{self.location}:\n#{failure.message}\n" + }.join "\n" + end + end + + ## + # Defines the API for Reporters. Subclass this and override whatever + # you want. Go nuts. + + class AbstractReporter + include Mutex_m + + ## + # Starts reporting on the run. + + def start + end + + ## + # About to start running a test. This allows a reporter to show + # that it is starting or that we are in the middle of a test run. + + def prerecord klass, name + end + + ## + # Output and record the result of the test. Call + # {result#result_code}[rdoc-ref:Runnable#result_code] to get the + # result character string. Stores the result of the run if the run + # did not pass. + + def record result + end + + ## + # Outputs the summary of the run. + + def report + end + + ## + # Did this run pass? + + def passed? + true + end + end + + class Reporter < AbstractReporter # :nodoc: + ## + # The IO used to report. + + attr_accessor :io + + ## + # Command-line options for this run. + + attr_accessor :options + + def initialize io = $stdout, options = {} # :nodoc: + super() + self.io = io + self.options = options + end + end + + ## + # A very simple reporter that prints the "dots" during the run. + # + # This is added to the top-level CompositeReporter at the start of + # the run. If you want to change the output of minitest via a + # plugin, pull this out of the composite and replace it with your + # own. + + class ProgressReporter < Reporter + def prerecord klass, name #:nodoc: + if options[:verbose] then + io.print "%s#%s = " % [klass.name, name] + io.flush + end + end + + def record result # :nodoc: + io.print "%.2f s = " % [result.time] if options[:verbose] + io.print result.result_code + io.puts if options[:verbose] + end + end + + ## + # A reporter that gathers statistics about a test run. Does not do + # any IO because meant to be used as a parent class for a reporter + # that does. + # + # If you want to create an entirely different type of output (eg, + # CI, HTML, etc), this is the place to start. + # + # Example: + # + # class JenkinsCIReporter < StatisticsReporter + # def report + # super # Needed to calculate some statistics + # + # print " 30 characters. + # 3. or: Strings are equal to each other (but maybe different encodings?). + # 4. and: we found a diff executable. + + def things_to_diff exp, act + expect = mu_pp_for_diff exp + butwas = mu_pp_for_diff act + + e1, e2 = expect.include?("\n"), expect.include?("\\n") + b1, b2 = butwas.include?("\n"), butwas.include?("\\n") + + need_to_diff = + (e1 ^ e2 || + b1 ^ b2 || + expect.size > 30 || + butwas.size > 30 || + expect == butwas) && + Minitest::Assertions.diff + + need_to_diff && [expect, butwas] + end + + ## + # This returns a human-readable version of +obj+. By default + # #inspect is called. You can override this to use #pretty_inspect + # if you want. + # + # See Minitest::Test.make_my_diffs_pretty! + + def mu_pp obj + s = obj.inspect + + if defined? Encoding then + s = s.encode Encoding.default_external + + if String === obj && (obj.encoding != Encoding.default_external || + !obj.valid_encoding?) then + enc = "# encoding: #{obj.encoding}" + val = "# valid: #{obj.valid_encoding?}" + s = "#{enc}\n#{val}\n#{s}" + end + end + + s + end + + ## + # This returns a diff-able more human-readable version of +obj+. + # This differs from the regular mu_pp because it expands escaped + # newlines and makes hex-values (like object_ids) generic. This + # uses mu_pp to do the first pass and then cleans it up. + + def mu_pp_for_diff obj + str = mu_pp obj + + # both '\n' & '\\n' (_after_ mu_pp (aka inspect)) + single = !!str.match(/(?exp == act printing the difference between + # the two, if possible. + # + # If there is no visible difference but the assertion fails, you + # should suspect that your #== is buggy, or your inspect output is + # missing crucial details. For nicer structural diffing, set + # Minitest::Test.make_my_diffs_pretty! + # + # For floats use assert_in_delta. + # + # See also: Minitest::Assertions.diff + + def assert_equal exp, act, msg = nil + msg = message(msg, E) { diff exp, act } + result = assert exp == act, msg + + if nil == exp then + if Minitest::VERSION =~ /^6/ then + refute_nil exp, "Use assert_nil if expecting nil." + else + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + + warn "DEPRECATED: Use assert_nil if expecting nil from #{where}. This will fail in Minitest 6." + end + end + + result + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ are within +delta+ + # of each other. + # + # assert_in_delta Math::PI, (22.0 / 7.0), 0.01 + + def assert_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to be <= #{delta}" + } + assert delta >= n, msg + end + + ## + # For comparing Floats. Fails unless +exp+ and +act+ have a relative + # error less than +epsilon+. + + def assert_in_epsilon exp, act, epsilon = 0.001, msg = nil + assert_in_delta exp, act, [exp.abs, act.abs].min * epsilon, msg + end + + ## + # Fails unless +collection+ includes +obj+. + + def assert_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + assert collection.include?(obj), msg + end + + ## + # Fails unless +obj+ is an instance of +cls+. + + def assert_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be an instance of #{cls}, not #{obj.class}" + } + + assert obj.instance_of?(cls), msg + end + + ## + # Fails unless +obj+ is a kind of +cls+. + + def assert_kind_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to be a kind of #{cls}, not #{obj.class}" } + + assert obj.kind_of?(cls), msg + end + + ## + # Fails unless +matcher+ =~ +obj+. + + def assert_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + assert matcher =~ obj, msg + end + + ## + # Fails unless +obj+ is nil + + def assert_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to be nil" } + assert obj.nil?, msg + end + + ## + # For testing with binary operators. Eg: + # + # assert_operator 5, :<=, 4 + + def assert_operator o1, op, o2 = UNDEFINED, msg = nil + return assert_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op} #{mu_pp(o2)}" } + assert o1.__send__(op, o2), msg + end + + ## + # Fails if stdout or stderr do not output the expected results. + # Pass in nil if you don't care about that streams output. Pass in + # "" if you require it to be silent. Pass in a regexp if you want + # to pattern match. + # + # assert_output(/hey/) { method_with_output } + # + # NOTE: this uses #capture_io, not #capture_subprocess_io. + # + # See also: #assert_silent + + def assert_output stdout = nil, stderr = nil + flunk "assert_output requires a block to capture output." unless + block_given? + + out, err = capture_io do + yield + end + + err_msg = Regexp === stderr ? :assert_match : :assert_equal if stderr + out_msg = Regexp === stdout ? :assert_match : :assert_equal if stdout + + y = send err_msg, stderr, err, "In stderr" if err_msg + x = send out_msg, stdout, out, "In stdout" if out_msg + + (!stdout || x) && (!stderr || y) + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Fails unless +path+ exists. + + def assert_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to exist" } + assert File.exist?(path), msg + end + + ## + # For testing with predicates. Eg: + # + # assert_predicate str, :empty? + # + # This is really meant for specs and is front-ended by assert_operator: + # + # str.must_be :empty? + + def assert_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to be #{op}" } + assert o1.__send__(op), msg + end + + ## + # Fails unless the block raises one of +exp+. Returns the + # exception matched so you can check the message, attributes, etc. + # + # +exp+ takes an optional message on the end to help explain + # failures and defaults to StandardError if no exception class is + # passed. Eg: + # + # assert_raises(CustomError) { method_with_custom_error } + # + # With custom error message: + # + # assert_raises(CustomError, 'This should have raised CustomError') { method_with_custom_error } + # + # Using the returned object: + # + # error = assert_raises(CustomError) do + # raise CustomError, 'This is really bad' + # end + # + # assert_equal 'This is really bad', error.message + + def assert_raises *exp + flunk "assert_raises requires a block to capture errors." unless + block_given? + + msg = "#{exp.pop}.\n" if String === exp.last + exp << StandardError if exp.empty? + + begin + yield + rescue *exp => e + pass # count assertion + return e + rescue Minitest::Assertion # incl Skip & UnexpectedError + # don't count assertion + raise + rescue SignalException, SystemExit + raise + rescue Exception => e + flunk proc { + exception_details(e, "#{msg}#{mu_pp(exp)} exception expected, not") + } + end + + exp = exp.first if exp.size == 1 + + flunk "#{msg}#{mu_pp(exp)} expected but nothing was raised." + end + + ## + # Fails unless +obj+ responds to +meth+. + + def assert_respond_to obj, meth, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} (#{obj.class}) to respond to ##{meth}" + } + assert obj.respond_to?(meth), msg + end + + ## + # Fails unless +exp+ and +act+ are #equal? + + def assert_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to be the same as %s (oid=%d)" % data + } + assert exp.equal?(act), msg + end + + ## + # +send_ary+ is a receiver, message and arguments. + # + # Fails unless the call returns a true value + + def assert_send send_ary, m = nil + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + warn "DEPRECATED: assert_send. From #{where}" + + recv, msg, *args = send_ary + m = message(m) { + "Expected #{mu_pp(recv)}.#{msg}(*#{mu_pp(args)}) to return true" } + assert recv.__send__(msg, *args), m + end + + ## + # Fails if the block outputs anything to stderr or stdout. + # + # See also: #assert_output + + def assert_silent + assert_output "", "" do + yield + end + end + + ## + # Fails unless the block throws +sym+ + + def assert_throws sym, msg = nil + default = "Expected #{mu_pp(sym)} to have been thrown" + caught = true + value = catch(sym) do + begin + yield + rescue ThreadError => e # wtf?!? 1.8 + threads == suck + default += ", not \:#{e.message[/uncaught throw \`(\w+?)\'/, 1]}" + rescue ArgumentError => e # 1.9 exception + raise e unless e.message.include?("uncaught throw") + default += ", not #{e.message.split(/ /).last}" + rescue NameError => e # 1.8 exception + raise e unless e.name == sym + default += ", not #{e.name.inspect}" + end + caught = false + end + + assert caught, message(msg) { default } + value + rescue Assertion + raise + rescue => e + raise UnexpectedError, e + end + + ## + # Captures $stdout and $stderr into strings: + # + # out, err = capture_io do + # puts "Some info" + # warn "You did a bad thing" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: For efficiency, this method uses StringIO and does not + # capture IO for subprocesses. Use #capture_subprocess_io for + # that. + + def capture_io + _synchronize do + begin + captured_stdout, captured_stderr = StringIO.new, StringIO.new + + orig_stdout, orig_stderr = $stdout, $stderr + $stdout, $stderr = captured_stdout, captured_stderr + + yield + + return captured_stdout.string, captured_stderr.string + ensure + $stdout = orig_stdout + $stderr = orig_stderr + end + end + end + + ## + # Captures $stdout and $stderr into strings, using Tempfile to + # ensure that subprocess IO is captured as well. + # + # out, err = capture_subprocess_io do + # system "echo Some info" + # system "echo You did a bad thing 1>&2" + # end + # + # assert_match %r%info%, out + # assert_match %r%bad%, err + # + # NOTE: This method is approximately 10x slower than #capture_io so + # only use it when you need to test the output of a subprocess. + + def capture_subprocess_io + _synchronize do + begin + require "tempfile" + + captured_stdout, captured_stderr = Tempfile.new("out"), Tempfile.new("err") + + orig_stdout, orig_stderr = $stdout.dup, $stderr.dup + $stdout.reopen captured_stdout + $stderr.reopen captured_stderr + + yield + + $stdout.rewind + $stderr.rewind + + return captured_stdout.read, captured_stderr.read + ensure + $stdout.reopen orig_stdout + $stderr.reopen orig_stderr + + orig_stdout.close + orig_stderr.close + captured_stdout.close! + captured_stderr.close! + end + end + end + + ## + # Returns details for exception +e+ + + def exception_details e, msg + [ + "#{msg}", + "Class: <#{e.class}>", + "Message: <#{e.message.inspect}>", + "---Backtrace---", + "#{Minitest.filter_backtrace(e.backtrace).join("\n")}", + "---------------", + ].join "\n" + end + + ## + # Fails after a given date (in the local time zone). This allows + # you to put time-bombs in your tests if you need to keep + # something around until a later date lest you forget about it. + + def fail_after y,m,d,msg + flunk msg if Time.now > Time.local(y, m, d) + end + + ## + # Fails with +msg+. + + def flunk msg = nil + msg ||= "Epic Fail!" + assert false, msg + end + + ## + # Returns a proc that will output +msg+ along with the default message. + + def message msg = nil, ending = nil, &default + proc { + msg = msg.call.chomp(".") if Proc === msg + custom_message = "#{msg}.\n" unless msg.nil? or msg.to_s.empty? + "#{custom_message}#{default.call}#{ending || "."}" + } + end + + ## + # used for counting assertions + + def pass _msg = nil + assert true + end + + ## + # Fails if +test+ is truthy. + + def refute test, msg = nil + msg ||= message { "Expected #{mu_pp(test)} to not be truthy" } + assert !test, msg + end + + ## + # Fails if +obj+ is empty. + + def refute_empty obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be empty" } + assert_respond_to obj, :empty? + refute obj.empty?, msg + end + + ## + # Fails if exp == act. + # + # For floats use refute_in_delta. + + def refute_equal exp, act, msg = nil + msg = message(msg) { + "Expected #{mu_pp(act)} to not be equal to #{mu_pp(exp)}" + } + refute exp == act, msg + end + + ## + # For comparing Floats. Fails if +exp+ is within +delta+ of +act+. + # + # refute_in_delta Math::PI, (22.0 / 7.0) + + def refute_in_delta exp, act, delta = 0.001, msg = nil + n = (exp - act).abs + msg = message(msg) { + "Expected |#{exp} - #{act}| (#{n}) to not be <= #{delta}" + } + refute delta >= n, msg + end + + ## + # For comparing Floats. Fails if +exp+ and +act+ have a relative error + # less than +epsilon+. + + def refute_in_epsilon a, b, epsilon = 0.001, msg = nil + refute_in_delta a, b, a * epsilon, msg + end + + ## + # Fails if +collection+ includes +obj+. + + def refute_includes collection, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(collection)} to not include #{mu_pp(obj)}" + } + assert_respond_to collection, :include? + refute collection.include?(obj), msg + end + + ## + # Fails if +obj+ is an instance of +cls+. + + def refute_instance_of cls, obj, msg = nil + msg = message(msg) { + "Expected #{mu_pp(obj)} to not be an instance of #{cls}" + } + refute obj.instance_of?(cls), msg + end + + ## + # Fails if +obj+ is a kind of +cls+. + + def refute_kind_of cls, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be a kind of #{cls}" } + refute obj.kind_of?(cls), msg + end + + ## + # Fails if +matcher+ =~ +obj+. + + def refute_match matcher, obj, msg = nil + msg = message(msg) { "Expected #{mu_pp matcher} to not match #{mu_pp obj}" } + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + refute matcher =~ obj, msg + end + + ## + # Fails if +obj+ is nil. + + def refute_nil obj, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not be nil" } + refute obj.nil?, msg + end + + ## + # Fails if +o1+ is not +op+ +o2+. Eg: + # + # refute_operator 1, :>, 2 #=> pass + # refute_operator 1, :<, 2 #=> fail + + def refute_operator o1, op, o2 = UNDEFINED, msg = nil + return refute_predicate o1, op, msg if UNDEFINED == o2 + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op} #{mu_pp(o2)}" } + refute o1.__send__(op, o2), msg + end + + ## + # Fails if +path+ exists. + + def refute_path_exists path, msg = nil + msg = message(msg) { "Expected path '#{path}' to not exist" } + refute File.exist?(path), msg + end + + ## + # For testing with predicates. + # + # refute_predicate str, :empty? + # + # This is really meant for specs and is front-ended by refute_operator: + # + # str.wont_be :empty? + + def refute_predicate o1, op, msg = nil + msg = message(msg) { "Expected #{mu_pp(o1)} to not be #{op}" } + refute o1.__send__(op), msg + end + + ## + # Fails if +obj+ responds to the message +meth+. + + def refute_respond_to obj, meth, msg = nil + msg = message(msg) { "Expected #{mu_pp(obj)} to not respond to #{meth}" } + + refute obj.respond_to?(meth), msg + end + + ## + # Fails if +exp+ is the same (by object identity) as +act+. + + def refute_same exp, act, msg = nil + msg = message(msg) { + data = [mu_pp(act), act.object_id, mu_pp(exp), exp.object_id] + "Expected %s (oid=%d) to not be the same as %s (oid=%d)" % data + } + refute exp.equal?(act), msg + end + + ## + # Skips the current run. If run in verbose-mode, the skipped run + # gets listed at the end of the run but doesn't cause a failure + # exit code. + + def skip msg = nil, bt = caller + msg ||= "Skipped, no message given" + @skip = true + raise Minitest::Skip, msg, bt + end + + ## + # Skips the current run until a given date (in the local time + # zone). This allows you to put some fixes on hold until a later + # date, but still holds you accountable and prevents you from + # forgetting it. + + def skip_until y,m,d,msg + skip msg if Time.now < Time.local(y, m, d) + where = caller.first.rpartition(':in').reject(&:empty?).first + warn "Stale skip_until %p at %s" % [msg, where] + end + + ## + # Was this testcase skipped? Meant for #teardown. + + def skipped? + defined?(@skip) and @skip + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/autorun.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/autorun.rb new file mode 100644 index 0000000..9409b04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/autorun.rb @@ -0,0 +1,13 @@ +begin + require "rubygems" + gem "minitest" +rescue Gem::LoadError + # do nothing +end + +require "minitest" +require "minitest/spec" +require "minitest/mock" +require "minitest/hell" if ENV["MT_HELL"] + +Minitest.autorun diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/benchmark.rb new file mode 100644 index 0000000..f2b2465 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/benchmark.rb @@ -0,0 +1,455 @@ +require "minitest/test" +require "minitest/spec" + +module Minitest + ## + # Subclass Benchmark to create your own benchmark runs. Methods + # starting with "bench_" get executed on a per-class. + # + # See Minitest::Assertions + + class Benchmark < Test + def self.io # :nodoc: + @io + end + + def io # :nodoc: + self.class.io + end + + def self.run reporter, options = {} # :nodoc: + @io = reporter.io + super + end + + def self.runnable_methods # :nodoc: + methods_matching(/^bench_/) + end + + ## + # Returns a set of ranges stepped exponentially from +min+ to + # +max+ by powers of +base+. Eg: + # + # bench_exp(2, 16, 2) # => [2, 4, 8, 16] + + def self.bench_exp min, max, base = 10 + min = (Math.log10(min) / Math.log10(base)).to_i + max = (Math.log10(max) / Math.log10(base)).to_i + + (min..max).map { |m| base ** m }.to_a + end + + ## + # Returns a set of ranges stepped linearly from +min+ to +max+ by + # +step+. Eg: + # + # bench_linear(20, 40, 10) # => [20, 30, 40] + + def self.bench_linear min, max, step = 10 + (min..max).step(step).to_a + rescue LocalJumpError # 1.8.6 + r = []; (min..max).step(step) { |n| r << n }; r + end + + ## + # Specifies the ranges used for benchmarking for that class. + # Defaults to exponential growth from 1 to 10k by powers of 10. + # Override if you need different ranges for your benchmarks. + # + # See also: ::bench_exp and ::bench_linear. + + def self.bench_range + bench_exp 1, 10_000 + end + + ## + # Runs the given +work+, gathering the times of each run. Range + # and times are then passed to a given +validation+ proc. Outputs + # the benchmark name and times in tab-separated format, making it + # easy to paste into a spreadsheet for graphing or further + # analysis. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # validation = proc { |x, y| ... } + # assert_performance validation do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance validation, &work + range = self.class.bench_range + + io.print "#{self.name}" + + times = [] + + range.each do |x| + GC.start + t0 = Minitest.clock_time + instance_exec(x, &work) + t = Minitest.clock_time - t0 + + io.print "\t%9.6f" % t + times << t + end + io.puts + + validation[range, times] + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a constant rate (eg, linear slope == 0) within a given + # +threshold+. Note: because we're testing for a slope of 0, R^2 + # is not a good determining factor for the fit, so the threshold + # is applied against the slope itself. As such, you probably want + # to tighten it from the default. + # + # See https://www.graphpad.com/guides/prism/8/curve-fitting/reg_intepretingnonlinr2.htm + # for more details. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_constant 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_constant threshold = 0.99, &work + validation = proc do |range, times| + a, b, rr = fit_linear range, times + assert_in_delta 0, b, 1 - threshold + [a, b, rr] + end + + assert_performance validation, &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a exponential curve within a given error +threshold+. + # + # Fit is calculated by #fit_exponential. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_exponential 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_exponential threshold = 0.99, &work + assert_performance validation_for_fit(:exponential, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a logarithmic curve within a given error +threshold+. + # + # Fit is calculated by #fit_logarithmic. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_logarithmic 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_logarithmic threshold = 0.99, &work + assert_performance validation_for_fit(:logarithmic, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered fit to + # match a straight line within a given error +threshold+. + # + # Fit is calculated by #fit_linear. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_linear 0.9999 do |n| + # @obj.algorithm(n) + # end + # end + + def assert_performance_linear threshold = 0.99, &work + assert_performance validation_for_fit(:linear, threshold), &work + end + + ## + # Runs the given +work+ and asserts that the times gathered curve + # fit to match a power curve within a given error +threshold+. + # + # Fit is calculated by #fit_power. + # + # Ranges are specified by ::bench_range. + # + # Eg: + # + # def bench_algorithm + # assert_performance_power 0.9999 do |x| + # @obj.algorithm + # end + # end + + def assert_performance_power threshold = 0.99, &work + assert_performance validation_for_fit(:power, threshold), &work + end + + ## + # Takes an array of x/y pairs and calculates the general R^2 value. + # + # See: http://en.wikipedia.org/wiki/Coefficient_of_determination + + def fit_error xys + y_bar = sigma(xys) { |_, y| y } / xys.size.to_f + ss_tot = sigma(xys) { |_, y| (y - y_bar) ** 2 } + ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 } + + 1 - (ss_err / ss_tot) + end + + ## + # To fit a functional form: y = ae^(bx). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html + + def fit_exponential xs, ys + n = xs.size + xys = xs.zip(ys) + sxlny = sigma(xys) { |x, y| x * Math.log(y) } + slny = sigma(xys) { |_, y| Math.log(y) } + sx2 = sigma(xys) { |x, _| x * x } + sx = sigma xs + + c = n * sx2 - sx ** 2 + a = (slny * sx2 - sx * sxlny) / c + b = ( n * sxlny - sx * slny ) / c + + return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) } + end + + ## + # To fit a functional form: y = a + b*ln(x). + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingLogarithmic.html + + def fit_logarithmic xs, ys + n = xs.size + xys = xs.zip(ys) + slnx2 = sigma(xys) { |x, _| Math.log(x) ** 2 } + slnx = sigma(xys) { |x, _| Math.log(x) } + sylnx = sigma(xys) { |x, y| y * Math.log(x) } + sy = sigma(xys) { |_, y| y } + + c = n * slnx2 - slnx ** 2 + b = ( n * sylnx - sy * slnx ) / c + a = (sy - b * slnx) / n + + return a, b, fit_error(xys) { |x| a + b * Math.log(x) } + end + + ## + # Fits the functional form: a + bx. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFitting.html + + def fit_linear xs, ys + n = xs.size + xys = xs.zip(ys) + sx = sigma xs + sy = sigma ys + sx2 = sigma(xs) { |x| x ** 2 } + sxy = sigma(xys) { |x, y| x * y } + + c = n * sx2 - sx**2 + a = (sy * sx2 - sx * sxy) / c + b = ( n * sxy - sx * sy ) / c + + return a, b, fit_error(xys) { |x| a + b * x } + end + + ## + # To fit a functional form: y = ax^b. + # + # Takes x and y values and returns [a, b, r^2]. + # + # See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html + + def fit_power xs, ys + n = xs.size + xys = xs.zip(ys) + slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) } + slnx = sigma(xs) { |x | Math.log(x) } + slny = sigma(ys) { | y| Math.log(y) } + slnx2 = sigma(xs) { |x | Math.log(x) ** 2 } + + b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2) + a = (slny - b * slnx) / n + + return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) } + end + + ## + # Enumerates over +enum+ mapping +block+ if given, returning the + # sum of the result. Eg: + # + # sigma([1, 2, 3]) # => 1 + 2 + 3 => 6 + # sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14 + + def sigma enum, &block + enum = enum.map(&block) if block + enum.inject { |sum, n| sum + n } + end + + ## + # Returns a proc that calls the specified fit method and asserts + # that the error is within a tolerable threshold. + + def validation_for_fit msg, threshold + proc do |range, times| + a, b, rr = send "fit_#{msg}", range, times + assert_operator rr, :>=, threshold + [a, b, rr] + end + end + end +end + +module Minitest + ## + # The spec version of Minitest::Benchmark. + + class BenchSpec < Benchmark + extend Minitest::Spec::DSL + + ## + # This is used to define a new benchmark method. You usually don't + # use this directly and is intended for those needing to write new + # performance curve fits (eg: you need a specific polynomial fit). + # + # See ::bench_performance_linear for an example of how to use this. + + def self.bench name, &block + define_method "bench_#{name.gsub(/\W+/, "_")}", &block + end + + ## + # Specifies the ranges used for benchmarking for that class. + # + # bench_range do + # bench_exp(2, 16, 2) + # end + # + # See Minitest::Benchmark#bench_range for more details. + + def self.bench_range &block + return super unless block + + meta = (class << self; self; end) + meta.send :define_method, "bench_range", &block + end + + ## + # Create a benchmark that verifies that the performance is linear. + # + # describe "my class Bench" do + # bench_performance_linear "fast_algorithm", 0.9999 do |n| + # @obj.fast_algorithm(n) + # end + # end + + def self.bench_performance_linear name, threshold = 0.99, &work + bench name do + assert_performance_linear threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is constant. + # + # describe "my class Bench" do + # bench_performance_constant "zoom_algorithm!" do |n| + # @obj.zoom_algorithm!(n) + # end + # end + + def self.bench_performance_constant name, threshold = 0.99, &work + bench name do + assert_performance_constant threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is exponential. + # + # describe "my class Bench" do + # bench_performance_exponential "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_exponential name, threshold = 0.99, &work + bench name do + assert_performance_exponential threshold, &work + end + end + + + ## + # Create a benchmark that verifies that the performance is logarithmic. + # + # describe "my class Bench" do + # bench_performance_logarithmic "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_logarithmic name, threshold = 0.99, &work + bench name do + assert_performance_logarithmic threshold, &work + end + end + + ## + # Create a benchmark that verifies that the performance is power. + # + # describe "my class Bench" do + # bench_performance_power "algorithm" do |n| + # @obj.algorithm(n) + # end + # end + + def self.bench_performance_power name, threshold = 0.99, &work + bench name do + assert_performance_power threshold, &work + end + end + end + + Minitest::Spec.register_spec_type(/Bench(mark)?$/, Minitest::BenchSpec) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/expectations.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/expectations.rb new file mode 100644 index 0000000..fb4ec4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/expectations.rb @@ -0,0 +1,303 @@ +## +# It's where you hide your "assertions". +# +# Please note, because of the way that expectations are implemented, +# all expectations (eg must_equal) are dependent upon a thread local +# variable +:current_spec+. If your specs rely on mixing threads into +# the specs themselves, you're better off using assertions or the new +# _(value) wrapper. For example: +# +# it "should still work in threads" do +# my_threaded_thingy do +# (1+1).must_equal 2 # bad +# assert_equal 2, 1+1 # good +# _(1 + 1).must_equal 2 # good +# value(1 + 1).must_equal 2 # good, also #expect +# _ { 1 + "1" }.must_raise TypeError # good +# end +# end + +module Minitest::Expectations + + ## + # See Minitest::Assertions#assert_empty. + # + # _(collection).must_be_empty + # + # :method: must_be_empty + + infect_an_assertion :assert_empty, :must_be_empty, :unary + + ## + # See Minitest::Assertions#assert_equal + # + # _(a).must_equal b + # + # :method: must_equal + + infect_an_assertion :assert_equal, :must_equal + + ## + # See Minitest::Assertions#assert_in_delta + # + # _(n).must_be_close_to m [, delta] + # + # :method: must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_close_to + + infect_an_assertion :assert_in_delta, :must_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#assert_in_epsilon + # + # _(n).must_be_within_epsilon m [, epsilon] + # + # :method: must_be_within_epsilon + + infect_an_assertion :assert_in_epsilon, :must_be_within_epsilon + + ## + # See Minitest::Assertions#assert_includes + # + # _(collection).must_include obj + # + # :method: must_include + + infect_an_assertion :assert_includes, :must_include, :reverse + + ## + # See Minitest::Assertions#assert_instance_of + # + # _(obj).must_be_instance_of klass + # + # :method: must_be_instance_of + + infect_an_assertion :assert_instance_of, :must_be_instance_of + + ## + # See Minitest::Assertions#assert_kind_of + # + # _(obj).must_be_kind_of mod + # + # :method: must_be_kind_of + + infect_an_assertion :assert_kind_of, :must_be_kind_of + + ## + # See Minitest::Assertions#assert_match + # + # _(a).must_match b + # + # :method: must_match + + infect_an_assertion :assert_match, :must_match + + ## + # See Minitest::Assertions#assert_nil + # + # _(obj).must_be_nil + # + # :method: must_be_nil + + infect_an_assertion :assert_nil, :must_be_nil, :unary + + ## + # See Minitest::Assertions#assert_operator + # + # _(n).must_be :<=, 42 + # + # This can also do predicates: + # + # _(str).must_be :empty? + # + # :method: must_be + + infect_an_assertion :assert_operator, :must_be, :reverse + + ## + # See Minitest::Assertions#assert_output + # + # _ { ... }.must_output out_or_nil [, err] + # + # :method: must_output + + infect_an_assertion :assert_output, :must_output, :block + + ## + # See Minitest::Assertions#assert_raises + # + # _ { ... }.must_raise exception + # + # :method: must_raise + + infect_an_assertion :assert_raises, :must_raise, :block + + ## + # See Minitest::Assertions#assert_respond_to + # + # _(obj).must_respond_to msg + # + # :method: must_respond_to + + infect_an_assertion :assert_respond_to, :must_respond_to, :reverse + + ## + # See Minitest::Assertions#assert_same + # + # _(a).must_be_same_as b + # + # :method: must_be_same_as + + infect_an_assertion :assert_same, :must_be_same_as + + ## + # See Minitest::Assertions#assert_silent + # + # _ { ... }.must_be_silent + # + # :method: must_be_silent + + infect_an_assertion :assert_silent, :must_be_silent, :block + + ## + # See Minitest::Assertions#assert_throws + # + # _ { ... }.must_throw sym + # + # :method: must_throw + + infect_an_assertion :assert_throws, :must_throw, :block + + ## + # See Minitest::Assertions#assert_path_exists + # + # _(some_path).path_must_exist + # + # :method: path_must_exist + + infect_an_assertion :assert_path_exists, :path_must_exist, :unary + + ## + # See Minitest::Assertions#refute_path_exists + # + # _(some_path).path_wont_exist + # + # :method: path_wont_exist + + infect_an_assertion :refute_path_exists, :path_wont_exist, :unary + + ## + # See Minitest::Assertions#refute_empty + # + # _(collection).wont_be_empty + # + # :method: wont_be_empty + + infect_an_assertion :refute_empty, :wont_be_empty, :unary + + ## + # See Minitest::Assertions#refute_equal + # + # _(a).wont_equal b + # + # :method: wont_equal + + infect_an_assertion :refute_equal, :wont_equal + + ## + # See Minitest::Assertions#refute_in_delta + # + # _(n).wont_be_close_to m [, delta] + # + # :method: wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_close_to + + infect_an_assertion :refute_in_delta, :wont_be_within_delta # :nodoc: + + ## + # See Minitest::Assertions#refute_in_epsilon + # + # _(n).wont_be_within_epsilon m [, epsilon] + # + # :method: wont_be_within_epsilon + + infect_an_assertion :refute_in_epsilon, :wont_be_within_epsilon + + ## + # See Minitest::Assertions#refute_includes + # + # _(collection).wont_include obj + # + # :method: wont_include + + infect_an_assertion :refute_includes, :wont_include, :reverse + + ## + # See Minitest::Assertions#refute_instance_of + # + # _(obj).wont_be_instance_of klass + # + # :method: wont_be_instance_of + + infect_an_assertion :refute_instance_of, :wont_be_instance_of + + ## + # See Minitest::Assertions#refute_kind_of + # + # _(obj).wont_be_kind_of mod + # + # :method: wont_be_kind_of + + infect_an_assertion :refute_kind_of, :wont_be_kind_of + + ## + # See Minitest::Assertions#refute_match + # + # _(a).wont_match b + # + # :method: wont_match + + infect_an_assertion :refute_match, :wont_match + + ## + # See Minitest::Assertions#refute_nil + # + # _(obj).wont_be_nil + # + # :method: wont_be_nil + + infect_an_assertion :refute_nil, :wont_be_nil, :unary + + ## + # See Minitest::Assertions#refute_operator + # + # _(n).wont_be :<=, 42 + # + # This can also do predicates: + # + # str.wont_be :empty? + # + # :method: wont_be + + infect_an_assertion :refute_operator, :wont_be, :reverse + + ## + # See Minitest::Assertions#refute_respond_to + # + # _(obj).wont_respond_to msg + # + # :method: wont_respond_to + + infect_an_assertion :refute_respond_to, :wont_respond_to, :reverse + + ## + # See Minitest::Assertions#refute_same + # + # _(a).wont_be_same_as b + # + # :method: wont_be_same_as + + infect_an_assertion :refute_same, :wont_be_same_as +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/hell.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/hell.rb new file mode 100644 index 0000000..73c88ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/hell.rb @@ -0,0 +1,11 @@ +require "minitest/parallel" + +class Minitest::Test + parallelize_me! +end + +begin + require "minitest/proveit" +rescue LoadError + warn "NOTE: `gem install minitest-proveit` for even more hellish tests" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/mock.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/mock.rb new file mode 100644 index 0000000..ddfc788 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/mock.rb @@ -0,0 +1,244 @@ +class MockExpectationError < StandardError; end # :nodoc: + +module Minitest # :nodoc: + + ## + # A simple and clean mock object framework. + # + # All mock objects are an instance of Mock + + class Mock + alias :__respond_to? :respond_to? + + overridden_methods = %w[ + === + class + inspect + instance_eval + instance_variables + object_id + public_send + respond_to_missing? + send + to_s + ] + + instance_methods.each do |m| + undef_method m unless overridden_methods.include?(m.to_s) || m =~ /^__/ + end + + overridden_methods.map(&:to_sym).each do |method_id| + define_method method_id do |*args, &b| + if @expected_calls.key? method_id then + method_missing(method_id, *args, &b) + else + super(*args, &b) + end + end + end + + def initialize delegator = nil # :nodoc: + @delegator = delegator + @expected_calls = Hash.new { |calls, name| calls[name] = [] } + @actual_calls = Hash.new { |calls, name| calls[name] = [] } + end + + ## + # Expect that method +name+ is called, optionally with +args+ or a + # +blk+, and returns +retval+. + # + # @mock.expect(:meaning_of_life, 42) + # @mock.meaning_of_life # => 42 + # + # @mock.expect(:do_something_with, true, [some_obj, true]) + # @mock.do_something_with(some_obj, true) # => true + # + # @mock.expect(:do_something_else, true) do |a1, a2| + # a1 == "buggs" && a2 == :bunny + # end + # + # +args+ is compared to the expected args using case equality (ie, the + # '===' operator), allowing for less specific expectations. + # + # @mock.expect(:uses_any_string, true, [String]) + # @mock.uses_any_string("foo") # => true + # @mock.verify # => true + # + # @mock.expect(:uses_one_string, true, ["foo"]) + # @mock.uses_one_string("bar") # => raises MockExpectationError + # + # If a method will be called multiple times, specify a new expect for each one. + # They will be used in the order you define them. + # + # @mock.expect(:ordinal_increment, 'first') + # @mock.expect(:ordinal_increment, 'second') + # + # @mock.ordinal_increment # => 'first' + # @mock.ordinal_increment # => 'second' + # @mock.ordinal_increment # => raises MockExpectationError "No more expects available for :ordinal_increment" + # + + def expect name, retval, args = [], &blk + name = name.to_sym + + if block_given? + raise ArgumentError, "args ignored when block given" unless args.empty? + @expected_calls[name] << { :retval => retval, :block => blk } + else + raise ArgumentError, "args must be an array" unless Array === args + @expected_calls[name] << { :retval => retval, :args => args } + end + self + end + + def __call name, data # :nodoc: + case data + when Hash then + "#{name}(#{data[:args].inspect[1..-2]}) => #{data[:retval].inspect}" + else + data.map { |d| __call name, d }.join ", " + end + end + + ## + # Verify that all methods were called as expected. Raises + # +MockExpectationError+ if the mock object was not called as + # expected. + + def verify + @expected_calls.each do |name, expected| + actual = @actual_calls.fetch(name, nil) + raise MockExpectationError, "expected #{__call name, expected[0]}" unless actual + raise MockExpectationError, "expected #{__call name, expected[actual.size]}, got [#{__call name, actual}]" if + actual.size < expected.size + end + true + end + + def method_missing sym, *args, &block # :nodoc: + unless @expected_calls.key?(sym) then + if @delegator && @delegator.respond_to?(sym) + return @delegator.public_send(sym, *args, &block) + else + raise NoMethodError, "unmocked method %p, expected one of %p" % + [sym, @expected_calls.keys.sort_by(&:to_s)] + end + end + + index = @actual_calls[sym].length + expected_call = @expected_calls[sym][index] + + unless expected_call then + raise MockExpectationError, "No more expects available for %p: %p" % + [sym, args] + end + + expected_args, retval, val_block = + expected_call.values_at(:args, :retval, :block) + + if val_block then + # keep "verify" happy + @actual_calls[sym] << expected_call + + raise MockExpectationError, "mocked method %p failed block w/ %p" % + [sym, args] unless val_block.call(*args, &block) + + return retval + end + + if expected_args.size != args.size then + raise ArgumentError, "mocked method %p expects %d arguments, got %d" % + [sym, expected_args.size, args.size] + end + + zipped_args = expected_args.zip(args) + fully_matched = zipped_args.all? { |mod, a| + mod === a or mod == a + } + + unless fully_matched then + raise MockExpectationError, "mocked method %p called with unexpected arguments %p" % + [sym, args] + end + + @actual_calls[sym] << { + :retval => retval, + :args => zipped_args.map! { |mod, a| mod === a ? mod : a }, + } + + retval + end + + def respond_to? sym, include_private = false # :nodoc: + return true if @expected_calls.key? sym.to_sym + return true if @delegator && @delegator.respond_to?(sym, include_private) + __respond_to?(sym, include_private) + end + end +end + +module Minitest::Assertions + ## + # Assert that the mock verifies correctly. + + def assert_mock mock + assert mock.verify + end +end + +## +# Object extensions for Minitest::Mock. + +class Object + + ## + # Add a temporary stubbed method replacing +name+ for the duration + # of the +block+. If +val_or_callable+ responds to #call, then it + # returns the result of calling it, otherwise returns the value + # as-is. If stubbed method yields a block, +block_args+ will be + # passed along. Cleans up the stub at the end of the +block+. The + # method +name+ must exist before stubbing. + # + # def test_stale_eh + # obj_under_test = Something.new + # refute obj_under_test.stale? + # + # Time.stub :now, Time.at(0) do + # assert obj_under_test.stale? + # end + # end + #-- + # NOTE: keyword args in callables are NOT checked for correctness + # against the existing method. Too many edge cases to be worth it. + + def stub name, val_or_callable, *block_args + new_name = "__minitest_stub__#{name}" + + metaclass = class << self; self; end + + if respond_to? name and not methods.map(&:to_s).include? name.to_s then + metaclass.send :define_method, name do |*args| + super(*args) + end + end + + metaclass.send :alias_method, new_name, name + + metaclass.send :define_method, name do |*args, &blk| + if val_or_callable.respond_to? :call then + val_or_callable.call(*args, &blk) + else + blk.call(*block_args) if blk + val_or_callable + end + end + + metaclass.send(:ruby2_keywords, name) if metaclass.respond_to?(:ruby2_keywords, true) + + yield self + ensure + metaclass.send :undef_method, name + metaclass.send :alias_method, name, new_name + metaclass.send :undef_method, new_name + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/parallel.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/parallel.rb new file mode 100644 index 0000000..40ac709 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/parallel.rb @@ -0,0 +1,70 @@ +module Minitest + module Parallel #:nodoc: + + ## + # The engine used to run multiple tests in parallel. + + class Executor + + ## + # The size of the pool of workers. + + attr_reader :size + + ## + # Create a parallel test executor of with +size+ workers. + + def initialize size + @size = size + @queue = Queue.new + @pool = nil + end + + ## + # Start the executor + + def start + @pool = size.times.map { + Thread.new(@queue) do |queue| + Thread.current.abort_on_exception = true + while (job = queue.pop) + klass, method, reporter = job + reporter.synchronize { reporter.prerecord klass, method } + result = Minitest.run_one_method klass, method + reporter.synchronize { reporter.record result } + end + end + } + end + + ## + # Add a job to the queue + + def << work; @queue << work; end + + ## + # Shuts down the pool of workers by signalling them to quit and + # waiting for them all to finish what they're currently working + # on. + + def shutdown + size.times { @queue << nil } + @pool.each(&:join) + end + end + + module Test # :nodoc: + def _synchronize; Minitest::Test.io_lock.synchronize { yield }; end # :nodoc: + + module ClassMethods # :nodoc: + def run_one_method klass, method_name, reporter + Minitest.parallel_executor << [klass, method_name, reporter] + end + + def test_order + :parallel + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride.rb new file mode 100644 index 0000000..f3b8e47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride.rb @@ -0,0 +1,4 @@ +require "minitest" + +Minitest.load_plugins +Minitest::PrideIO.pride! diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride_plugin.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride_plugin.rb new file mode 100644 index 0000000..aeef2b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/pride_plugin.rb @@ -0,0 +1,142 @@ +require "minitest" + +module Minitest + def self.plugin_pride_options opts, _options # :nodoc: + opts.on "-p", "--pride", "Pride. Show your testing pride!" do + PrideIO.pride! + end + end + + def self.plugin_pride_init options # :nodoc: + if PrideIO.pride? then + klass = ENV["TERM"] =~ /^xterm|-256color$/ ? PrideLOL : PrideIO + io = klass.new options[:io] + + self.reporter.reporters.grep(Minitest::Reporter).each do |rep| + rep.io = io if rep.io.tty? + end + end + end + + ## + # Show your testing pride! + + class PrideIO + ## + # Activate the pride plugin. Called from both -p option and minitest/pride + + def self.pride! + @pride = true + end + + ## + # Are we showing our testing pride? + + def self.pride? + @pride ||= false + end + + # Start an escape sequence + ESC = "\e[" + + # End the escape sequence + NND = "#{ESC}0m" + + # The IO we're going to pipe through. + attr_reader :io + + def initialize io # :nodoc: + @io = io + # stolen from /System/Library/Perl/5.10.0/Term/ANSIColor.pm + # also reference http://en.wikipedia.org/wiki/ANSI_escape_code + @colors ||= (31..36).to_a + @size = @colors.size + @index = 0 + end + + ## + # Wrap print to colorize the output. + + def print o + case o + when "." then + io.print pride o + when "E", "F" then + io.print "#{ESC}41m#{ESC}37m#{o}#{NND}" + when "S" then + io.print pride o + else + io.print o + end + end + + def puts *o # :nodoc: + o.map! { |s| + s.to_s.sub(/Finished/) { + @index = 0 + "Fabulous run".split(//).map { |c| + pride(c) + }.join + } + } + + io.puts(*o) + end + + ## + # Color a string. + + def pride string + string = "*" if string == "." + c = @colors[@index % @size] + @index += 1 + "#{ESC}#{c}m#{string}#{NND}" + end + + def method_missing msg, *args # :nodoc: + io.send(msg, *args) + end + end + + ## + # If you thought the PrideIO was colorful... + # + # (Inspired by lolcat, but with clean math) + + class PrideLOL < PrideIO + PI_3 = Math::PI / 3 # :nodoc: + + def initialize io # :nodoc: + # walk red, green, and blue around a circle separated by equal thirds. + # + # To visualize, type this into wolfram-alpha: + # + # plot (3*sin(x)+3), (3*sin(x+2*pi/3)+3), (3*sin(x+4*pi/3)+3) + + # 6 has wide pretty gradients. 3 == lolcat, about half the width + @colors = (0...(6 * 7)).map { |n| + n *= 1.0 / 6 + r = (3 * Math.sin(n ) + 3).to_i + g = (3 * Math.sin(n + 2 * PI_3) + 3).to_i + b = (3 * Math.sin(n + 4 * PI_3) + 3).to_i + + # Then we take rgb and encode them in a single number using base 6. + # For some mysterious reason, we add 16... to clear the bottom 4 bits? + # Yes... they're ugly. + + 36 * r + 6 * g + b + 16 + } + + super + end + + ## + # Make the string even more colorful. Damnit. + + def pride string + c = @colors[@index % @size] + @index += 1 + "#{ESC}38;5;#{c}m#{string}#{NND}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/spec.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/spec.rb new file mode 100644 index 0000000..41deb55 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/spec.rb @@ -0,0 +1,343 @@ +require "minitest/test" + +class Module # :nodoc: + def infect_an_assertion meth, new_name, dont_flip = false # :nodoc: + block = dont_flip == :block + dont_flip = false if block + target_obj = block ? '_{obj.method}' : '_(obj)' + + # warn "%-22p -> %p %p" % [meth, new_name, dont_flip] + self.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + where = Minitest.filter_backtrace(caller).first + where = where.split(/:in /, 2).first # clean up noise + Kernel.warn "DEPRECATED: global use of #{new_name} from #\{where}. Use #{target_obj}.#{new_name} instead. This will fail in Minitest 6." + Minitest::Expectation.new(self, Minitest::Spec.current).#{new_name}(*args) + end + EOM + + Minitest::Expectation.class_eval <<-EOM, __FILE__, __LINE__ + 1 + def #{new_name} *args + raise "Calling ##{new_name} outside of test." unless ctx + case + when #{!!dont_flip} then + ctx.#{meth}(target, *args) + when #{block} && Proc === target then + ctx.#{meth}(*args, &target) + else + ctx.#{meth}(args.first, target, *args[1..-1]) + end + end + EOM + end +end + +Minitest::Expectation = Struct.new :target, :ctx # :nodoc: + +## +# Kernel extensions for minitest + +module Kernel + ## + # Describe a series of expectations for a given target +desc+. + # + # Defines a test class subclassing from either Minitest::Spec or + # from the surrounding describe's class. The surrounding class may + # subclass Minitest::Spec manually in order to easily share code: + # + # class MySpec < Minitest::Spec + # # ... shared code ... + # end + # + # class TestStuff < MySpec + # it "does stuff" do + # # shared code available here + # end + # describe "inner stuff" do + # it "still does stuff" do + # # ...and here + # end + # end + # end + # + # For more information on getting started with writing specs, see: + # + # http://www.rubyinside.com/a-minitestspec-tutorial-elegant-spec-style-testing-that-comes-with-ruby-5354.html + # + # For some suggestions on how to improve your specs, try: + # + # http://betterspecs.org + # + # but do note that several items there are debatable or specific to + # rspec. + # + # For more information about expectations, see Minitest::Expectations. + + def describe desc, *additional_desc, &block # :doc: + stack = Minitest::Spec.describe_stack + name = [stack.last, desc, *additional_desc].compact.join("::") + sclas = stack.last || if Class === self && kind_of?(Minitest::Spec::DSL) then + self + else + Minitest::Spec.spec_type desc, *additional_desc + end + + cls = sclas.create name, desc + + stack.push cls + cls.class_eval(&block) + stack.pop + cls + end + private :describe +end + +## +# Minitest::Spec -- The faster, better, less-magical spec framework! +# +# For a list of expectations, see Minitest::Expectations. + +class Minitest::Spec < Minitest::Test + + def self.current # :nodoc: + Thread.current[:current_spec] + end + + def initialize name # :nodoc: + super + Thread.current[:current_spec] = self + end + + ## + # Oh look! A Minitest::Spec::DSL module! Eat your heart out DHH. + + module DSL + ## + # Contains pairs of matchers and Spec classes to be used to + # calculate the superclass of a top-level describe. This allows for + # automatically customizable spec types. + # + # See: register_spec_type and spec_type + + TYPES = [[//, Minitest::Spec]] + + ## + # Register a new type of spec that matches the spec's description. + # This method can take either a Regexp and a spec class or a spec + # class and a block that takes the description and returns true if + # it matches. + # + # Eg: + # + # register_spec_type(/Controller$/, Minitest::Spec::Rails) + # + # or: + # + # register_spec_type(Minitest::Spec::RailsModel) do |desc| + # desc.superclass == ActiveRecord::Base + # end + + def register_spec_type *args, &block + if block then + matcher, klass = block, args.first + else + matcher, klass = *args + end + TYPES.unshift [matcher, klass] + end + + ## + # Figure out the spec class to use based on a spec's description. Eg: + # + # spec_type("BlahController") # => Minitest::Spec::Rails + + def spec_type desc, *additional + TYPES.find { |matcher, _klass| + if matcher.respond_to? :call then + matcher.call desc, *additional + else + matcher === desc.to_s + end + }.last + end + + def describe_stack # :nodoc: + Thread.current[:describe_stack] ||= [] + end + + def children # :nodoc: + @children ||= [] + end + + def nuke_test_methods! # :nodoc: + self.public_instance_methods.grep(/^test_/).each do |name| + self.send :undef_method, name + end + end + + ## + # Define a 'before' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#setup. + + def before _type = nil, &block + define_method :setup do + super() + self.instance_eval(&block) + end + end + + ## + # Define an 'after' action. Inherits the way normal methods should. + # + # NOTE: +type+ is ignored and is only there to make porting easier. + # + # Equivalent to Minitest::Test#teardown. + + def after _type = nil, &block + define_method :teardown do + self.instance_eval(&block) + super() + end + end + + ## + # Define an expectation with name +desc+. Name gets morphed to a + # proper test method name. For some freakish reason, people who + # write specs don't like class inheritance, so this goes way out of + # its way to make sure that expectations aren't inherited. + # + # This is also aliased to #specify and doesn't require a +desc+ arg. + # + # Hint: If you _do_ want inheritance, use minitest/test. You can mix + # and match between assertions and expectations as much as you want. + + def it desc = "anonymous", &block + block ||= proc { skip "(no tests defined)" } + + @specs ||= 0 + @specs += 1 + + name = "test_%04d_%s" % [ @specs, desc ] + + undef_klasses = self.children.reject { |c| c.public_method_defined? name } + + define_method name, &block + + undef_klasses.each do |undef_klass| + undef_klass.send :undef_method, name + end + + name + end + + ## + # Essentially, define an accessor for +name+ with +block+. + # + # Why use let instead of def? I honestly don't know. + + def let name, &block + name = name.to_s + pre, post = "let '#{name}' cannot ", ". Please use another name." + methods = Minitest::Spec.instance_methods.map(&:to_s) - %w[subject] + raise ArgumentError, "#{pre}begin with 'test'#{post}" if + name =~ /\Atest/ + raise ArgumentError, "#{pre}override a method in Minitest::Spec#{post}" if + methods.include? name + + define_method name do + @_memoized ||= {} + @_memoized.fetch(name) { |k| @_memoized[k] = instance_eval(&block) } + end + end + + ## + # Another lazy man's accessor generator. Made even more lazy by + # setting the name for you to +subject+. + + def subject &block + let :subject, &block + end + + def create name, desc # :nodoc: + cls = Class.new(self) do + @name = name + @desc = desc + + nuke_test_methods! + end + + children << cls + + cls + end + + def name # :nodoc: + defined?(@name) ? @name : super + end + + def to_s # :nodoc: + name # Can't alias due to 1.8.7, not sure why + end + + attr_reader :desc # :nodoc: + alias :specify :it + + ## + # Rdoc... why are you so dumb? + + module InstanceMethods + ## + # Takes a value or a block and returns a value monad that has + # all of Expectations methods available to it. + # + # _(1 + 1).must_equal 2 + # + # And for blocks: + # + # _ { 1 + "1" }.must_raise TypeError + # + # This method of expectation-based testing is preferable to + # straight-expectation methods (on Object) because it stores its + # test context, bypassing our hacky use of thread-local variables. + # + # NOTE: At some point, the methods on Object will be deprecated + # and then removed. + # + # It is also aliased to #value and #expect for your aesthetic + # pleasure: + # + # _(1 + 1).must_equal 2 + # value(1 + 1).must_equal 2 + # expect(1 + 1).must_equal 2 + + def _ value = nil, &block + Minitest::Expectation.new block || value, self + end + + alias value _ + alias expect _ + + def before_setup # :nodoc: + super + Thread.current[:current_spec] = self + end + end + + def self.extended obj # :nodoc: + obj.send :include, InstanceMethods + end + end + + extend DSL + + TYPES = DSL::TYPES # :nodoc: +end + +require "minitest/expectations" + +class Object # :nodoc: + include Minitest::Expectations unless ENV["MT_NO_EXPECTATIONS"] +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/test.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/test.rb new file mode 100644 index 0000000..8061230 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/test.rb @@ -0,0 +1,230 @@ +require "minitest" unless defined? Minitest::Runnable + +module Minitest + ## + # Subclass Test to create your own tests. Typically you'll want a + # Test subclass per implementation class. + # + # See Minitest::Assertions + + class Test < Runnable + require "minitest/assertions" + include Minitest::Assertions + include Minitest::Reportable + + def class_name # :nodoc: + self.class.name # for Minitest::Reportable + end + + PASSTHROUGH_EXCEPTIONS = [NoMemoryError, SignalException, SystemExit] # :nodoc: + + # :stopdoc: + class << self; attr_accessor :io_lock; end + self.io_lock = Mutex.new + # :startdoc: + + ## + # Call this at the top of your tests when you absolutely + # positively need to have ordered tests. In doing so, you're + # admitting that you suck and your tests are weak. + + def self.i_suck_and_my_tests_are_order_dependent! + class << self + undef_method :test_order if method_defined? :test_order + define_method :test_order do :alpha end + end + end + + ## + # Make diffs for this Test use #pretty_inspect so that diff + # in assert_equal can have more details. NOTE: this is much slower + # than the regular inspect but much more usable for complex + # objects. + + def self.make_my_diffs_pretty! + require "pp" + + define_method :mu_pp, &:pretty_inspect + end + + ## + # Call this at the top of your tests when you want to run your + # tests in parallel. In doing so, you're admitting that you rule + # and your tests are awesome. + + def self.parallelize_me! + include Minitest::Parallel::Test + extend Minitest::Parallel::Test::ClassMethods + end + + ## + # Returns all instance methods starting with "test_". Based on + # #test_order, the methods are either sorted, randomized + # (default), or run in parallel. + + def self.runnable_methods + methods = methods_matching(/^test_/) + + case self.test_order + when :random, :parallel then + max = methods.size + methods.sort.sort_by { rand max } + when :alpha, :sorted then + methods.sort + else + raise "Unknown test_order: #{self.test_order.inspect}" + end + end + + ## + # Defines the order to run tests (:random by default). Override + # this or use a convenience method to change it for your tests. + + def self.test_order + :random + end + + TEARDOWN_METHODS = %w[ before_teardown teardown after_teardown ] # :nodoc: + + ## + # Runs a single test with setup/teardown hooks. + + def run + with_info_handler do + time_it do + capture_exceptions do + before_setup; setup; after_setup + + self.send self.name + end + + TEARDOWN_METHODS.each do |hook| + capture_exceptions do + self.send hook + end + end + end + end + + Result.from self # per contract + end + + ## + # Provides before/after hooks for setup and teardown. These are + # meant for library writers, NOT for regular test authors. See + # #before_setup for an example. + + module LifecycleHooks + + ## + # Runs before every test, before setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # As a simplistic example: + # + # module MyMinitestPlugin + # def before_setup + # super + # # ... stuff to do before setup is run + # end + # + # def after_setup + # # ... stuff to do after setup is run + # super + # end + # + # def before_teardown + # super + # # ... stuff to do before teardown is run + # end + # + # def after_teardown + # # ... stuff to do after teardown is run + # super + # end + # end + # + # class MiniTest::Test + # include MyMinitestPlugin + # end + + def before_setup; end + + ## + # Runs before every test. Use this to set up before each test + # run. + + def setup; end + + ## + # Runs before every test, after setup. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_setup; end + + ## + # Runs after every test, before teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def before_teardown; end + + ## + # Runs after every test. Use this to clean up after each test + # run. + + def teardown; end + + ## + # Runs after every test, after teardown. This hook is meant for + # libraries to extend minitest. It is not meant to be used by + # test developers. + # + # See #before_setup for an example. + + def after_teardown; end + end # LifecycleHooks + + def capture_exceptions # :nodoc: + yield + rescue *PASSTHROUGH_EXCEPTIONS + raise + rescue Assertion => e + self.failures << e + rescue Exception => e + self.failures << UnexpectedError.new(sanitize_exception e) + end + + def sanitize_exception e # :nodoc: + Marshal.dump e + e + rescue TypeError + bt = e.backtrace + e = RuntimeError.new "Wrapped undumpable exception for: #{e.class}: #{e.message}" + e.set_backtrace bt + e + end + + def with_info_handler &block # :nodoc: + t0 = Minitest.clock_time + + handler = lambda do + warn "\nCurrent: %s#%s %.2fs" % [self.class, self.name, Minitest.clock_time - t0] + end + + self.class.on_signal ::Minitest.info_signal, handler, &block + end + + include LifecycleHooks + include Guard + extend Guard + end # Test +end + +require "minitest/unit" unless defined?(MiniTest) # compatibility layer only diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/unit.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/unit.rb new file mode 100644 index 0000000..9a4eadf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/lib/minitest/unit.rb @@ -0,0 +1,45 @@ +# :stopdoc: + +unless defined?(Minitest) then + # all of this crap is just to avoid circular requires and is only + # needed if a user requires "minitest/unit" directly instead of + # "minitest/autorun", so we also warn + + from = caller.reject { |s| s =~ /rubygems/ }.join("\n ") + warn "Warning: you should require 'minitest/autorun' instead." + warn %(Warning: or add 'gem "minitest"' before 'require "minitest/autorun"') + warn "From:\n #{from}" + + module Minitest; end + MiniTest = Minitest # prevents minitest.rb from requiring back to us + require "minitest" +end + +MiniTest = Minitest unless defined?(MiniTest) + +module Minitest + class Unit + VERSION = Minitest::VERSION + class TestCase < Minitest::Test + def self.inherited klass # :nodoc: + from = caller.first + warn "MiniTest::Unit::TestCase is now Minitest::Test. From #{from}" + super + end + end + + def self.autorun # :nodoc: + from = caller.first + warn "MiniTest::Unit.autorun is now Minitest.autorun. From #{from}" + Minitest.autorun + end + + def self.after_tests &b # :nodoc: + from = caller.first + warn "MiniTest::Unit.after_tests is now Minitest.after_run. From #{from}" + Minitest.after_run(&b) + end + end +end + +# :startdoc: diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/metametameta.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/metametameta.rb new file mode 100644 index 0000000..6e75b9d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/metametameta.rb @@ -0,0 +1,136 @@ +require "tempfile" +require "stringio" +require "minitest/autorun" + +class Minitest::Test + def clean s + s.gsub(/^ {6}/, "") + end + + def with_empty_backtrace_filter + original = Minitest.backtrace_filter + + obj = Minitest::BacktraceFilter.new + def obj.filter _bt + [] + end + + Minitest::Test.io_lock.synchronize do # try not to trounce in parallel + begin + Minitest.backtrace_filter = obj + yield + ensure + Minitest.backtrace_filter = original + end + end + end +end + + +class FakeNamedTest < Minitest::Test + @@count = 0 + + def self.name + @fake_name ||= begin + @@count += 1 + "FakeNamedTest%02d" % @@count + end + end +end + +module MyModule; end +class AnError < StandardError; include MyModule; end + +class MetaMetaMetaTestCase < Minitest::Test + attr_accessor :reporter, :output, :tu + + def with_stderr err + old = $stderr + $stderr = err + yield + ensure + $stderr = old + end + + def run_tu_with_fresh_reporter flags = %w[--seed 42] + options = Minitest.process_args flags + + @output = StringIO.new("".encode('UTF-8')) + + self.reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(@output, options) + reporter << Minitest::ProgressReporter.new(@output, options) + + with_stderr @output do + reporter.start + + yield(reporter) if block_given? + + @tus ||= [@tu] + @tus.each do |tu| + Minitest::Runnable.runnables.delete tu + + tu.run reporter, options + end + + reporter.report + end + end + + def first_reporter + reporter.reporters.first + end + + def assert_report expected, flags = %w[--seed 42], &block + header = clean <<-EOM + Run options: #{flags.map { |s| s =~ /\|/ ? s.inspect : s }.join " "} + + # Running: + + EOM + + run_tu_with_fresh_reporter flags, &block + + output = normalize_output @output.string.dup + + assert_equal header + expected, output + end + + def normalize_output output + output.sub!(/Finished in .*/, "Finished in 0.00") + output.sub!(/Loaded suite .*/, "Loaded suite blah") + + output.gsub!(/FakeNamedTest\d+/, "FakeNamedTestXX") + output.gsub!(/ = \d+.\d\d s = /, " = 0.00 s = ") + output.gsub!(/0x[A-Fa-f0-9]+/, "0xXXX") + output.gsub!(/ +$/, "") + + if windows? then + output.gsub!(/\[(?:[A-Za-z]:)?[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)(?:[A-Za-z]:)?[^:]+:\d+:in/, '\1FILE:LINE:in') + else + output.gsub!(/\[[^\]:]+:\d+\]/, "[FILE:LINE]") + output.gsub!(/^(\s+)[^:]+:\d+:in/, '\1FILE:LINE:in') + end + + output.gsub!(/( at )[^:]+:\d+/, '\1[FILE:LINE]') + + output + end + + def restore_env + old_value = ENV["MT_NO_SKIP_MSG"] + ENV.delete "MT_NO_SKIP_MSG" + + yield + ensure + ENV["MT_NO_SKIP_MSG"] = old_value + end + + def setup + super + srand 42 + Minitest::Test.reset + @tu = nil + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_assertions.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_assertions.rb new file mode 100644 index 0000000..283dff5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_assertions.rb @@ -0,0 +1,1588 @@ +# encoding: UTF-8 + +require "minitest/autorun" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +SomeError = Class.new Exception + +unless defined? MyModule then + module MyModule; end + class AnError < StandardError; include MyModule; end +end + +class TestMinitestAssertions < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + # not included in JRuby + RE_LEVELS = /\(\d+ levels\) / + + class DummyTest + include Minitest::Assertions + # include Minitest::Reportable # TODO: why do I really need this? + + attr_accessor :assertions, :failure + + def initialize + self.assertions = 0 + self.failure = nil + end + end + + def setup + super + + Minitest::Test.reset + + @tc = DummyTest.new + @zomg = "zomg ponies!" # TODO: const + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") + end + + def assert_deprecated name + dep = /DEPRECATED: #{name}. From #{__FILE__}:\d+(?::.*)?/ + dep = "" if $-w.nil? + + assert_output nil, dep do + yield + end + end + + def assert_triggered expected, klass = Minitest::Assertion + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + + assert_msg = Regexp === expected ? :assert_match : :assert_equal + self.send assert_msg, expected, msg + end + + def assert_unexpected expected + expected = Regexp.new expected if String === expected + + assert_triggered expected, Minitest::UnexpectedError do + yield + end + end + + def clean s + s.gsub(/^ {6,10}/, "") + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def test_assert + @assertion_count = 2 + + @tc.assert_equal true, @tc.assert(true), "returns true on success" + end + + def test_assert__triggered + assert_triggered "Expected false to be truthy." do + @tc.assert false + end + end + + def test_assert__triggered_message + assert_triggered @zomg do + @tc.assert false, @zomg + end + end + + def test_assert__triggered_lambda + assert_triggered "whoops" do + @tc.assert false, lambda { "whoops" } + end + end + + def test_assert_empty + @assertion_count = 2 + + @tc.assert_empty [] + end + + def test_assert_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [1] to be empty." do + @tc.assert_empty [1] + end + end + + def test_assert_equal + @tc.assert_equal 1, 1 + end + + def test_assert_equal_different_collection_array_hex_invisible + object1 = Object.new + object2 = Object.new + msg = "No visible difference in the Array#inspect output. + You should look at the implementation of #== on Array or its members. + [#]".gsub(/^ +/, "") + assert_triggered msg do + @tc.assert_equal [object1], [object2] + end + end + + def test_assert_equal_different_collection_hash_hex_invisible + h1, h2 = {}, {} + h1[1] = Object.new + h2[1] = Object.new + msg = "No visible difference in the Hash#inspect output. + You should look at the implementation of #== on Hash or its members. + {1=>#}".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal h1, h2 + end + end + + def test_assert_equal_different_diff_deactivated + without_diff do + assert_triggered util_msg("haha" * 10, "blah" * 10) do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + end + + def test_assert_equal_different_message + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, message { "whoops" } + end + end + + def test_assert_equal_different_lambda + assert_triggered "whoops.\nExpected: 1\n Actual: 2" do + @tc.assert_equal 1, 2, lambda { "whoops" } + end + end + + def test_assert_equal_different_hex + c = Class.new do + def initialize s; @name = s; end + end + + o1 = c.new "a" + o2 = c.new "b" + msg = clean <<-EOS + --- expected + +++ actual + @@ -1 +1 @@ + -#<#:0xXXXXXX @name=\"a\"> + +#<#:0xXXXXXX @name=\"b\"> + EOS + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_hex_invisible + o1 = Object.new + o2 = Object.new + + msg = "No visible difference in the Object#inspect output. + You should look at the implementation of #== on Object or its members. + #".gsub(/^ +/, "") + + assert_triggered msg do + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long + msg = "--- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_invisible + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + @tc.assert_equal o1, o2 + end + end + + def test_assert_equal_different_long_msg + msg = "message. + --- expected + +++ actual + @@ -1 +1 @@ + -\"hahahahahahahahahahahahahahahahahahahaha\" + +\"blahblahblahblahblahblahblahblahblahblah\" + ".gsub(/^ +/, "") + + assert_triggered msg do + o1 = "haha" * 10 + o2 = "blah" * 10 + @tc.assert_equal o1, o2, "message" + end + end + + def test_assert_equal_different_short + assert_triggered util_msg(1, 2) do + @tc.assert_equal 1, 2 + end + end + + def test_assert_equal_different_short_msg + assert_triggered util_msg(1, 2, "message") do + @tc.assert_equal 1, 2, "message" + end + end + + def test_assert_equal_different_short_multiline + msg = "--- expected\n+++ actual\n@@ -1,2 +1,2 @@\n \"a\n-b\"\n+c\"\n" + assert_triggered msg do + @tc.assert_equal "a\nb", "a\nc" + end + end + + def test_assert_equal_does_not_allow_lhs_nil + if Minitest::VERSION =~ /^6/ then + warn "Time to strip the MT5 test" + + @assertion_count += 1 + assert_triggered(/Use assert_nil if expecting nil/) do + @tc.assert_equal nil, nil + end + else + err_re = /Use assert_nil if expecting nil from .*test_minitest_\w+.rb/ + err_re = "" if $-w.nil? + + assert_output "", err_re do + @tc.assert_equal nil, nil + end + end + end + + def test_assert_equal_does_not_allow_lhs_nil_triggered + assert_triggered "Expected: nil\n Actual: false" do + @tc.assert_equal nil, false + end + end + + def test_assert_equal_string_bug791 + exp = <<-'EOF'.gsub(/^ {10}/, "") # note single quotes + --- expected + +++ actual + @@ -1,2 +1 @@ + -"\\n + -" + +"\\\" + EOF + + exp = "Expected: \"\\\\n\"\n Actual: \"\\\\\"" + assert_triggered exp do + @tc.assert_equal "\\n", "\\" + end + end + + def test_assert_equal_string_both_escaped_unescaped_newlines + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,2 +1 @@ + -\"A\\n + -B\" + +\"A\\n\\\\nB\" + EOM + + assert_triggered msg do + exp = "A\\nB" + act = "A\n\\nB" + + @tc.assert_equal exp, act + end + end + + def test_assert_equal_string_encodings + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: UTF-8 + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_string_encodings_both_different + msg = <<-EOM.gsub(/^ {10}/, "") + --- expected + +++ actual + @@ -1,3 +1,3 @@ + -# encoding: US-ASCII + -# valid: false + +# encoding: ASCII-8BIT + +# valid: true + "bad-utf8-\\xF1.txt" + EOM + + assert_triggered msg do + x = "bad-utf8-\xF1.txt".force_encoding "ASCII" + y = x.dup.force_encoding "binary" # TODO: switch to .b when 1.9 dropped + @tc.assert_equal x, y + end + end unless RUBY18 + + def test_assert_equal_unescape_newlines + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + -"hello + +"hello\n + world" + EOM + + assert_triggered msg do + exp = "hello\nworld" + act = 'hello\nworld' # notice single quotes + + @tc.assert_equal exp, act + end + end + + def test_assert_in_delta + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.1 + end + + def test_assert_in_delta_triggered + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + @tc.assert_in_delta 0.0, 1.0 / 1000, 0.000001 + end + end + + def test_assert_in_epsilon + @assertion_count = 10 + + @tc.assert_in_epsilon 10_000, 9991 + @tc.assert_in_epsilon 9991, 10_000 + @tc.assert_in_epsilon 1.0, 1.001 + @tc.assert_in_epsilon 1.001, 1.0 + + @tc.assert_in_epsilon 10_000, 9999.1, 0.0001 + @tc.assert_in_epsilon 9999.1, 10_000, 0.0001 + @tc.assert_in_epsilon 1.0, 1.0001, 0.0001 + @tc.assert_in_epsilon 1.0001, 1.0, 0.0001 + + @tc.assert_in_epsilon(-1, -1) + @tc.assert_in_epsilon(-10_000, -9991) + end + + def test_assert_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to be <= 9.99." do + @tc.assert_in_epsilon 10_000, 9990 + end + end + + def test_assert_in_epsilon_triggered_negative_case + x = (RUBY18 and not maglev?) ? "0.1" : "0.100000xxx" + y = "0.1" + assert_triggered "Expected |-1.1 - -1| (#{x}) to be <= #{y}." do + @tc.assert_in_epsilon(-1.1, -1, 0.1) + end + end + + def test_assert_includes + @assertion_count = 2 + + @tc.assert_includes [true], true + end + + def test_assert_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.assert_includes [true], false + end + + expected = "Expected [true] to include false." + assert_equal expected, e.message + end + + def test_assert_instance_of + @tc.assert_instance_of String, "blah" + end + + def test_assert_instance_of_triggered + assert_triggered 'Expected "blah" to be an instance of Array, not String.' do + @tc.assert_instance_of Array, "blah" + end + end + + def test_assert_kind_of + @tc.assert_kind_of String, "blah" + end + + def test_assert_kind_of_triggered + assert_triggered 'Expected "blah" to be a kind of Array, not String.' do + @tc.assert_kind_of Array, "blah" + end + end + + def test_assert_match + @assertion_count = 2 + @tc.assert_match(/\w+/, "blah blah blah") + end + + def test_assert_match_matchee_to_str + @assertion_count = 2 + + obj = Object.new + def obj.to_str; "blah" end + + @tc.assert_match "blah", obj + end + + def test_assert_match_matcher_object + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + + @tc.assert_match pattern, 5 + end + + def test_assert_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; false end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to match 5." do + @tc.assert_match pattern, 5 + end + end + + def test_assert_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\d+/ to match "blah blah blah".' do + @tc.assert_match(/\d+/, "blah blah blah") + end + end + + def test_assert_nil + @tc.assert_nil nil + end + + def test_assert_nil_triggered + assert_triggered "Expected 42 to be nil." do + @tc.assert_nil 42 + end + end + + def test_assert_operator + @tc.assert_operator 2, :>, 1 + end + + def test_assert_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.assert_operator bad, :equal?, bad + end + + def test_assert_operator_triggered + assert_triggered "Expected 2 to be < 1." do + @tc.assert_operator 2, :<, 1 + end + end + + def test_assert_output_both + @assertion_count = 2 + + @tc.assert_output "yay", "blah" do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_both_regexps + @assertion_count = 4 + + @tc.assert_output(/y.y/, /bl.h/) do + print "yay" + $stderr.print "blah" + end + end + + def test_assert_output_err + @tc.assert_output nil, "blah" do + $stderr.print "blah" + end + end + + def test_assert_output_neither + @assertion_count = 0 + + @tc.assert_output do + # do nothing + end + end + + def test_assert_output_out + @tc.assert_output "blah" do + print "blah" + end + end + + def test_assert_output_triggered_both + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output "yay", "blah" do + print "boo" + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_err + assert_triggered util_msg("blah", "blah blah", "In stderr") do + @tc.assert_output nil, "blah" do + $stderr.print "blah blah" + end + end + end + + def test_assert_output_triggered_out + assert_triggered util_msg("blah", "blah blah", "In stdout") do + @tc.assert_output "blah" do + print "blah blah" + end + end + end + + def test_assert_output_no_block + assert_triggered "assert_output requires a block to capture output." do + @tc.assert_output "blah" + end + end + + def test_assert_output_nested_assert_uncaught + @assertion_count = 1 + + assert_triggered "Epic Fail!" do + @tc.assert_output "blah\n" do + puts "blah" + @tc.flunk + end + end + end + + def test_assert_output_nested_raise + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_raises RuntimeError do + puts "blah" + raise "boom!" + end + end + end + + def test_assert_output_nested_raise_bad + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise "boom!" + end + end + end + end + + def test_assert_output_nested_raise_mismatch + # this test is redundant, but illustrative + @assertion_count = 0 + + assert_unexpected "boom!" do + @tc.assert_raises RuntimeError do # 2) bypassed via UnexpectedError + @tc.assert_output "blah\n" do # 1) captures and raises UnexpectedError + puts "not_blah" + raise ArgumentError, "boom!" + end + end + end + end + + def test_assert_output_nested_throw_caught + @assertion_count = 2 + + @tc.assert_output "blah\n" do + @tc.assert_throws :boom! do + puts "blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_caught_bad + @assertion_count = 1 # want 0; can't prevent throw from escaping :( + + @tc.assert_throws :boom! do # 2) captured via catch + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + + def test_assert_output_nested_throw_mismatch + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_throws :not_boom! do # 2) captured via assert_throws+rescue + @tc.assert_output "blah\n" do # 1) bypassed via throw + puts "not_blah" + throw :boom! + end + end + end + end + + def test_assert_output_uncaught_raise + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + raise "boom!" + end + end + end + + def test_assert_output_uncaught_throw + @assertion_count = 0 + + assert_unexpected "uncaught throw :boom!" do + @tc.assert_output "blah\n" do + puts "not_blah" + throw :boom! + end + end + end + def test_assert_predicate + @tc.assert_predicate "", :empty? + end + + def test_assert_predicate_triggered + assert_triggered 'Expected "blah" to be empty?.' do + @tc.assert_predicate "blah", :empty? + end + end + + def test_assert_raises + @tc.assert_raises RuntimeError do + raise "blah" + end + end + + def test_assert_raises_default + @tc.assert_raises do + raise StandardError, "blah" + end + end + + def test_assert_raises_default_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises do + raise SomeError, "blah" + end + end + + expected = clean <<-EOM.chomp + [StandardError] exception expected, not + Class: + Message: <\"blah\"> + ---Backtrace--- + FILE:LINE:in \`block in test_assert_raises_default_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(RE_LEVELS, "") unless jruby? + + assert_equal expected, actual + end + + def test_assert_raises_exit + @tc.assert_raises SystemExit do + exit 1 + end + end + + def test_assert_raises_module + @tc.assert_raises MyModule do + raise AnError + end + end + + def test_assert_raises_signals + @tc.assert_raises SignalException do + raise SignalException, :INT + end + end + + def test_assert_raises_throw_nested_bad + @assertion_count = 0 + + assert_unexpected "RuntimeError: boom!" do + @tc.assert_raises do + @tc.assert_throws :blah do + raise "boom!" + throw :not_blah + end + end + end + end + + ## + # *sigh* This is quite an odd scenario, but it is from real (albeit + # ugly) test code in ruby-core: + + # http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=29259 + + def test_assert_raises_skip + @assertion_count = 0 + + assert_triggered "skipped", Minitest::Skip do + @tc.assert_raises ArgumentError do + begin + raise "blah" + rescue + skip "skipped" + end + end + end + end + + def test_assert_raises_subclass + @tc.assert_raises StandardError do + raise AnError + end + end + + def test_assert_raises_subclass_triggered + e = assert_raises Minitest::Assertion do + @tc.assert_raises SomeError do + raise AnError, "some message" + end + end + + expected = clean <<-EOM + [SomeError] exception expected, not + Class: + Message: <\"some message\"> + ---Backtrace--- + FILE:LINE:in \`block in test_assert_raises_subclass_triggered\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(RE_LEVELS, "") unless jruby? + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_different + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM.chomp + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`block in test_assert_raises_triggered_different\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(RE_LEVELS, "") unless jruby? + + assert_equal expected, actual + end + + def test_assert_raises_triggered_different_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises RuntimeError, "XXX" do + raise SyntaxError, "icky" + end + end + + expected = clean <<-EOM + XXX. + [RuntimeError] exception expected, not + Class: + Message: <\"icky\"> + ---Backtrace--- + FILE:LINE:in \`block in test_assert_raises_triggered_different_msg\' + --------------- + EOM + + actual = e.message.gsub(/^.+:\d+/, "FILE:LINE") + actual.gsub!(RE_LEVELS, "") unless jruby? + + assert_equal expected.chomp, actual + end + + def test_assert_raises_triggered_none + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion do + # do nothing + end + end + + expected = "Minitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_triggered_none_msg + e = assert_raises Minitest::Assertion do + @tc.assert_raises Minitest::Assertion, "XXX" do + # do nothing + end + end + + expected = "XXX.\nMinitest::Assertion expected but nothing was raised." + + assert_equal expected, e.message + end + + def test_assert_raises_without_block + assert_triggered "assert_raises requires a block to capture errors." do + @tc.assert_raises StandardError + end + end + + def test_assert_respond_to + @tc.assert_respond_to "blah", :empty? + end + + def test_assert_respond_to_triggered + assert_triggered 'Expected "blah" (String) to respond to #rawr!.' do + @tc.assert_respond_to "blah", :rawr! + end + end + + def test_assert_same + @assertion_count = 3 + + o = "blah" + @tc.assert_same 1, 1 + @tc.assert_same :blah, :blah + @tc.assert_same o, o + end + + def test_assert_same_triggered + @assertion_count = 2 + + assert_triggered "Expected 2 (oid=N) to be the same as 1 (oid=N)." do + @tc.assert_same 1, 2 + end + + s1 = "blah" + s2 = "blah" + + assert_triggered 'Expected "blah" (oid=N) to be the same as "blah" (oid=N).' do + @tc.assert_same s1, s2 + end + end + + def test_assert_send + assert_deprecated :assert_send do + @tc.assert_send [1, :<, 2] + end + end + + def test_assert_send_bad + assert_deprecated :assert_send do + assert_triggered "Expected 1.>(*[2]) to return true." do + @tc.assert_send [1, :>, 2] + end + end + end + + def test_assert_silent + @assertion_count = 2 + + @tc.assert_silent do + # do nothing + end + end + + def test_assert_silent_triggered_err + assert_triggered util_msg("", "blah blah", "In stderr") do + @tc.assert_silent do + $stderr.print "blah blah" + end + end + end + + def test_assert_silent_triggered_out + @assertion_count = 2 + + assert_triggered util_msg("", "blah blah", "In stdout") do + @tc.assert_silent do + print "blah blah" + end + end + end + + def test_assert_throws + v = @tc.assert_throws :blah do + throw :blah + end + + assert_nil v + end + + def test_assert_throws_value + v = @tc.assert_throws :blah do + throw :blah, 42 + end + + assert_equal 42, v + end + + def test_assert_throws_argument_exception + @assertion_count = 0 + + assert_unexpected "ArgumentError" do + @tc.assert_throws :blah do + raise ArgumentError + end + end + end + + def test_assert_throws_different + assert_triggered "Expected :blah to have been thrown, not :not_blah." do + @tc.assert_throws :blah do + throw :not_blah + end + end + end + + def test_assert_throws_name_error + @assertion_count = 0 + + assert_unexpected "NameError" do + @tc.assert_throws :blah do + raise NameError + end + end + end + + def test_assert_throws_unthrown + assert_triggered "Expected :blah to have been thrown." do + @tc.assert_throws :blah do + # do nothing + end + end + end + + def test_assert_path_exists + @tc.assert_path_exists __FILE__ + end + + def test_assert_path_exists_triggered + assert_triggered "Expected path 'blah' to exist." do + @tc.assert_path_exists "blah" + end + end + + def test_capture_io + @assertion_count = 0 + + non_verbose do + out, err = capture_io do + puts "hi" + $stderr.puts "bye!" + end + + assert_equal "hi\n", out + assert_equal "bye!\n", err + end + end + + def test_capture_subprocess_io + @assertion_count = 0 + + non_verbose do + out, err = capture_subprocess_io do + system("echo hi") + system("echo bye! 1>&2") + end + + assert_equal "hi\n", out + assert_equal "bye!", err.strip + end + end + + def test_class_asserts_match_refutes + @assertion_count = 0 + + methods = Minitest::Assertions.public_instance_methods + methods.map!(&:to_s) if Symbol === methods.first + + # These don't have corresponding refutes _on purpose_. They're + # useless and will never be added, so don't bother. + ignores = %w[assert_output assert_raises assert_send + assert_silent assert_throws assert_mock] + + # These are test/unit methods. I'm not actually sure why they're still here + ignores += %w[assert_no_match assert_not_equal assert_not_nil + assert_not_same assert_nothing_raised + assert_nothing_thrown assert_raise] + + asserts = methods.grep(/^assert/).sort - ignores + refutes = methods.grep(/^refute/).sort - ignores + + assert_empty refutes.map { |n| n.sub(/^refute/, "assert") } - asserts + assert_empty asserts.map { |n| n.sub(/^assert/, "refute") } - refutes + end + + def test_delta_consistency + @assertion_count = 2 + + @tc.assert_in_delta 0, 1, 1 + + assert_triggered "Expected |0 - 1| (1) to not be <= 1." do + @tc.refute_in_delta 0, 1, 1 + end + end + + def test_epsilon_consistency + @assertion_count = 2 + + @tc.assert_in_epsilon 1.0, 1.001 + + msg = "Expected |1.0 - 1.001| (0.000999xxx) to not be <= 0.001." + assert_triggered msg do + @tc.refute_in_epsilon 1.0, 1.001 + end + end + + def assert_fail_after t + @tc.fail_after t.year, t.month, t.day, "remove the deprecations" + end + + def test_fail_after + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_silent do + assert_fail_after d1 + end + + assert_triggered "remove the deprecations" do + assert_fail_after d0 + end + end + + def test_flunk + assert_triggered "Epic Fail!" do + @tc.flunk + end + end + + def test_flunk_message + assert_triggered @zomg do + @tc.flunk @zomg + end + end + + def test_pass + @tc.pass + end + + def test_refute + @assertion_count = 2 + + @tc.assert_equal true, @tc.refute(false), "returns true on success" + end + + def test_refute_empty + @assertion_count = 2 + + @tc.refute_empty [1] + end + + def test_refute_empty_triggered + @assertion_count = 2 + + assert_triggered "Expected [] to not be empty." do + @tc.refute_empty [] + end + end + + def test_refute_equal + @tc.refute_equal "blah", "yay" + end + + def test_refute_equal_triggered + assert_triggered 'Expected "blah" to not be equal to "blah".' do + @tc.refute_equal "blah", "blah" + end + end + + def test_refute_in_delta + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.000001 + end + + def test_refute_in_delta_triggered + x = "0.1" + assert_triggered "Expected |0.0 - 0.001| (0.001) to not be <= #{x}." do + @tc.refute_in_delta 0.0, 1.0 / 1000, 0.1 + end + end + + def test_refute_in_epsilon + @tc.refute_in_epsilon 10_000, 9990-1 + end + + def test_refute_in_epsilon_triggered + assert_triggered "Expected |10000 - 9990| (10) to not be <= 10.0." do + @tc.refute_in_epsilon 10_000, 9990 + flunk + end + end + + def test_refute_includes + @assertion_count = 2 + + @tc.refute_includes [true], false + end + + def test_refute_includes_triggered + @assertion_count = 3 + + e = @tc.assert_raises Minitest::Assertion do + @tc.refute_includes [true], true + end + + expected = "Expected [true] to not include true." + assert_equal expected, e.message + end + + def test_refute_instance_of + @tc.refute_instance_of Array, "blah" + end + + def test_refute_instance_of_triggered + assert_triggered 'Expected "blah" to not be an instance of String.' do + @tc.refute_instance_of String, "blah" + end + end + + def test_refute_kind_of + @tc.refute_kind_of Array, "blah" + end + + def test_refute_kind_of_triggered + assert_triggered 'Expected "blah" to not be a kind of String.' do + @tc.refute_kind_of String, "blah" + end + end + + def test_refute_match + @assertion_count = 2 + @tc.refute_match(/\d+/, "blah blah blah") + end + + def test_refute_match_matcher_object + @assertion_count = 2 + pattern = Object.new + def pattern.=~ _; false end + @tc.refute_match pattern, 5 + end + + def test_refute_match_object_triggered + @assertion_count = 2 + + pattern = Object.new + def pattern.=~ _; true end + def pattern.inspect; "[Object]" end + + assert_triggered "Expected [Object] to not match 5." do + @tc.refute_match pattern, 5 + end + end + + def test_refute_match_triggered + @assertion_count = 2 + assert_triggered 'Expected /\w+/ to not match "blah blah blah".' do + @tc.refute_match(/\w+/, "blah blah blah") + end + end + + def test_refute_nil + @tc.refute_nil 42 + end + + def test_refute_nil_triggered + assert_triggered "Expected nil to not be nil." do + @tc.refute_nil nil + end + end + + def test_refute_operator + @tc.refute_operator 2, :<, 1 + end + + def test_refute_operator_bad_object + bad = Object.new + def bad.== _; true end + + @tc.refute_operator true, :equal?, bad + end + + def test_refute_operator_triggered + assert_triggered "Expected 2 to not be > 1." do + @tc.refute_operator 2, :>, 1 + end + end + + def test_refute_predicate + @tc.refute_predicate "42", :empty? + end + + def test_refute_predicate_triggered + assert_triggered 'Expected "" to not be empty?.' do + @tc.refute_predicate "", :empty? + end + end + + def test_refute_respond_to + @tc.refute_respond_to "blah", :rawr! + end + + def test_refute_respond_to_triggered + assert_triggered 'Expected "blah" to not respond to empty?.' do + @tc.refute_respond_to "blah", :empty? + end + end + + def test_refute_same + @tc.refute_same 1, 2 + end + + def test_refute_same_triggered + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + @tc.refute_same 1, 1 + end + end + + def test_refute_path_exists + @tc.refute_path_exists "blah" + end + + def test_refute_path_exists_triggered + assert_triggered "Expected path '#{__FILE__}' to not exist." do + @tc.refute_path_exists __FILE__ + end + end + + def test_skip + @assertion_count = 0 + + assert_triggered "haha!", Minitest::Skip do + @tc.skip "haha!" + end + end + + def assert_skip_until t, msg + @tc.skip_until t.year, t.month, t.day, msg + end + + def test_skip_until + @assertion_count = 0 + + d0 = Time.now + d1 = d0 + 86_400 # I am an idiot + + assert_output "", /Stale skip_until \"not yet\" at .*?:\d+$/ do + assert_skip_until d0, "not yet" + end + + assert_triggered "not ready yet", Minitest::Skip do + assert_skip_until d1, "not ready yet" + end + end + + def util_msg exp, act, msg = nil + s = "Expected: #{exp.inspect}\n Actual: #{act.inspect}" + s = "#{msg}.\n#{s}" if msg + s + end + + def without_diff + old_diff = Minitest::Assertions.diff + Minitest::Assertions.diff = nil + + yield + ensure + Minitest::Assertions.diff = old_diff + end +end + +class TestMinitestAssertionHelpers < Minitest::Test + def assert_mu_pp exp, input, raw = false + act = mu_pp input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def assert_mu_pp_for_diff exp, input, raw = false + act = mu_pp_for_diff input + + if String === input && !raw then + assert_equal "\"#{exp}\"", act + else + assert_equal exp, act + end + end + + def test_diff_equal + msg = "No visible difference in the String#inspect output. + You should look at the implementation of #== on String or its members. + \"blahblahblahblahblahblahblahblahblahblah\"".gsub(/^ +/, "") + + o1 = "blah" * 10 + o2 = "blah" * 10 + def o1.== _ + false + end + + assert_equal msg, diff(o1, o2) + end + + def test_diff_str_mixed + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1 +1 @@ + -"A\\n\nB" + +"A\n\\nB" + EOM + + exp = "A\\n\nB" + act = "A\n\\nB" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_multiline + msg = <<-'EOM'.gsub(/^ {10}/, "") # NOTE single quotes on heredoc + --- expected + +++ actual + @@ -1,2 +1,2 @@ + "A + -B" + +C" + EOM + + exp = "A\nB" + act = "A\nC" + + assert_equal msg, diff(exp, act) + end + + def test_diff_str_simple + msg = <<-'EOM'.gsub(/^ {10}/, "").chomp # NOTE single quotes on heredoc + Expected: "A" + Actual: "B" + EOM + + exp = "A" + act = "B" + + assert_equal msg, diff(exp, act) + end + + def test_message + assert_equal "blah2.", message { "blah2" }.call + assert_equal "blah2.", message("") { "blah2" }.call + assert_equal "blah1.\nblah2.", message(:blah1) { "blah2" }.call + assert_equal "blah1.\nblah2.", message("blah1") { "blah2" }.call + + message = proc { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + + message = message { "blah1" } + assert_equal "blah1.\nblah2.", message(message) { "blah2" }.call + end + + def test_message_deferred + var = nil + + msg = message { var = "blah" } + + assert_nil var + + msg.call + + assert_equal "blah", var + end + + def test_mu_pp + assert_mu_pp 42.inspect, 42 + assert_mu_pp %w[a b c].inspect, %w[a b c] + assert_mu_pp "A B", "A B" + assert_mu_pp "A\\nB", "A\nB" + assert_mu_pp "A\\\\nB", 'A\nB' # notice single quotes + end + + def test_mu_pp_for_diff + assert_mu_pp_for_diff "#", Object.new + assert_mu_pp_for_diff "A B", "A B" + assert_mu_pp_for_diff [1, 2, 3].inspect, [1, 2, 3] + assert_mu_pp_for_diff "A\nB", "A\nB" + end + + def test_mu_pp_for_diff_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_bad_encoding_both + str = "\666A\\n\nB".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_encoding_both + str = "A\\n\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\\\n\\nB\"" + + assert_mu_pp_for_diff exp, str, :raw + end + + def test_mu_pp_for_diff_str_nerd + assert_mu_pp_for_diff "A\\nB\\\\nC", "A\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\nC", "\nB\\nC" + assert_mu_pp_for_diff "\\nB\\\\n", "\nB\\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\\\nB\\n", "\\nB\n" + assert_mu_pp_for_diff "\\\\nB\\nC", "\\nB\nC" + assert_mu_pp_for_diff "A\\\\n\\nB", "A\\n\nB" + assert_mu_pp_for_diff "A\\n\\\\nB", "A\n\\nB" + assert_mu_pp_for_diff "\\\\n\\n", "\\n\n" + assert_mu_pp_for_diff "\\n\\\\n", "\n\\n" + end + + def test_mu_pp_for_diff_str_normal + assert_mu_pp_for_diff "", "" + assert_mu_pp_for_diff "A\\n\n", "A\\n" + assert_mu_pp_for_diff "A\\n\nB", "A\\nB" + assert_mu_pp_for_diff "A\n", "A\n" + assert_mu_pp_for_diff "A\nB", "A\nB" + assert_mu_pp_for_diff "\\n\n", "\\n" + assert_mu_pp_for_diff "\n", "\n" + assert_mu_pp_for_diff "\\n\nA", "\\nA" + assert_mu_pp_for_diff "\nA", "\nA" + end + + def test_mu_pp_str_bad_encoding + str = "\666".force_encoding Encoding::UTF_8 + exp = "# encoding: UTF-8\n# valid: false\n\"\\xB6\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_encoding + str = "A\nB".b + exp = "# encoding: ASCII-8BIT\n# valid: true\n\"A\\nB\"" + + assert_mu_pp exp, str, :raw + end + + def test_mu_pp_str_immutable + printer = Class.new { extend Minitest::Assertions } + str = "test".freeze + assert_equal '"test"', printer.mu_pp(str) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_benchmark.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_benchmark.rb new file mode 100644 index 0000000..88abf77 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_benchmark.rb @@ -0,0 +1,137 @@ +require "minitest/autorun" +require "minitest/benchmark" + +## +# Used to verify data: +# http://www.wolframalpha.com/examples/RegressionAnalysis.html + +class TestMinitestBenchmark < Minitest::Test + def test_cls_bench_exp + assert_equal [2, 4, 8, 16, 32], Minitest::Benchmark.bench_exp(2, 32, 2) + end + + def test_cls_bench_linear + assert_equal [2, 4, 6, 8, 10], Minitest::Benchmark.bench_linear(2, 10, 2) + end + + def test_cls_runnable_methods + assert_equal [], Minitest::Benchmark.runnable_methods + + c = Class.new(Minitest::Benchmark) do + def bench_blah + end + end + + assert_equal ["bench_blah"], c.runnable_methods + end + + def test_cls_bench_range + assert_equal [1, 10, 100, 1_000, 10_000], Minitest::Benchmark.bench_range + end + + def test_fit_exponential_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 * Math.exp(2.1 * n) } + + assert_fit :exponential, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_exponential_noisy + x = [1.0, 1.9, 2.6, 3.4, 5.0] + y = [12, 10, 8.2, 6.9, 5.9] + + # verified with Numbers and R + assert_fit :exponential, x, y, 0.95, 13.81148, -0.1820 + end + + def test_fit_logarithmic_clean + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = x.map { |n| 1.1 + 2.1 * Math.log(n) } + + assert_fit :logarithmic, x, y, 1.0, 1.1, 2.1 + end + + def test_fit_logarithmic_noisy + x = [1.0, 2.0, 3.0, 4.0, 5.0] + # Generated with + # y = x.map { |n| jitter = 0.999 + 0.002 * rand; (Math.log(n) ) * jitter } + y = [0.0, 0.6935, 1.0995, 1.3873, 1.6097] + + assert_fit :logarithmic, x, y, 0.95, 0, 1 + end + + def test_fit_constant_clean + x = (1..5).to_a + y = [5.0, 5.0, 5.0, 5.0, 5.0] + + assert_fit :linear, x, y, nil, 5.0, 0 + end + + def test_fit_constant_noisy + x = (1..5).to_a + y = [1.0, 1.2, 1.0, 0.8, 1.0] + + # verified in numbers and R + assert_fit :linear, x, y, nil, 1.12, -0.04 + end + + def test_fit_linear_clean + # y = m * x + b where m = 2.2, b = 3.1 + x = (1..5).to_a + y = x.map { |n| 2.2 * n + 3.1 } + + assert_fit :linear, x, y, 1.0, 3.1, 2.2 + end + + def test_fit_linear_noisy + x = [ 60, 61, 62, 63, 65] + y = [3.1, 3.6, 3.8, 4.0, 4.1] + + # verified in numbers and R + assert_fit :linear, x, y, 0.8315, -7.9635, 0.1878 + end + + def test_fit_power_clean + # y = A x ** B, where B = b and A = e ** a + # if, A = 1, B = 2, then + + x = [1.0, 2.0, 3.0, 4.0, 5.0] + y = [1.0, 4.0, 9.0, 16.0, 25.0] + + assert_fit :power, x, y, 1.0, 1.0, 2.0 + end + + def test_fit_power_noisy + # from www.engr.uidaho.edu/thompson/courses/ME330/lecture/least_squares.html + x = [10, 12, 15, 17, 20, 22, 25, 27, 30, 32, 35] + y = [95, 105, 125, 141, 173, 200, 253, 298, 385, 459, 602] + + # verified in numbers + assert_fit :power, x, y, 0.90, 2.6217, 1.4556 + + # income to % of households below income amount + # http://library.wolfram.com/infocenter/Conferences/6461/PowerLaws.nb + x = [15_000, 25_000, 35_000, 50_000, 75_000, 100_000] + y = [0.154, 0.283, 0.402, 0.55, 0.733, 0.843] + + # verified in numbers + assert_fit :power, x, y, 0.96, 3.119e-5, 0.8959 + end + + def assert_fit msg, x, y, fit, exp_a, exp_b + bench = Minitest::Benchmark.new :blah + + a, b, rr = bench.send "fit_#{msg}", x, y + + assert_operator rr, :>=, fit if fit + assert_in_delta exp_a, a + assert_in_delta exp_b, b + end +end + +describe "my class Bench" do + klass = self + it "should provide bench methods" do + klass.must_respond_to :bench + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_mock.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_mock.rb new file mode 100644 index 0000000..561b1a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_mock.rb @@ -0,0 +1,885 @@ +require "minitest/autorun" + +class TestMinitestMock < Minitest::Test + parallelize_me! + + def setup + @mock = Minitest::Mock.new.expect(:foo, nil) + @mock.expect(:meaning_of_life, 42) + end + + def test_create_stub_method + assert_nil @mock.foo + end + + def test_allow_return_value_specification + assert_equal 42, @mock.meaning_of_life + end + + def test_blow_up_if_not_called + @mock.foo + + util_verify_bad "expected meaning_of_life() => 42" + end + + def test_not_blow_up_if_everything_called + @mock.foo + @mock.meaning_of_life + + assert_mock @mock + end + + def test_allow_expectations_to_be_added_after_creation + @mock.expect(:bar, true) + assert @mock.bar + end + + def test_not_verify_if_new_expected_method_is_not_called + @mock.foo + @mock.meaning_of_life + @mock.expect(:bar, true) + + util_verify_bad "expected bar() => true" + end + + def test_blow_up_on_wrong_number_of_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises ArgumentError do + @mock.sum + end + + assert_equal "mocked method :sum expects 2 arguments, got 0", e.message + end + + def test_return_mock_does_not_raise + retval = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, retval) + mock.foo + + assert_mock mock + end + + def test_mock_args_does_not_raise + arg = Minitest::Mock.new + mock = Minitest::Mock.new + mock.expect(:foo, nil, [arg]) + mock.foo(arg) + + assert_mock mock + end + + def test_set_expectation_on_special_methods + mock = Minitest::Mock.new + + mock.expect :object_id, "received object_id" + assert_equal "received object_id", mock.object_id + + mock.expect :respond_to_missing?, "received respond_to_missing?" + assert_equal "received respond_to_missing?", mock.respond_to_missing? + + mock.expect :===, "received ===" + assert_equal "received ===", mock.=== + + mock.expect :inspect, "received inspect" + assert_equal "received inspect", mock.inspect + + mock.expect :to_s, "received to_s" + assert_equal "received to_s", mock.to_s + + mock.expect :public_send, "received public_send" + assert_equal "received public_send", mock.public_send + + mock.expect :send, "received send" + assert_equal "received send", mock.send + + assert_mock mock + end + + def test_expectations_can_be_satisfied_via_send + @mock.send :foo + @mock.send :meaning_of_life + + assert_mock @mock + end + + def test_expectations_can_be_satisfied_via_public_send + skip "Doesn't run on 1.8" if RUBY_VERSION < "1.9" + + @mock.public_send :foo + @mock.public_send :meaning_of_life + + assert_mock @mock + end + + def test_blow_up_on_wrong_arguments + @mock.foo + @mock.meaning_of_life + @mock.expect(:sum, 3, [1, 2]) + + e = assert_raises MockExpectationError do + @mock.sum(2, 4) + end + + exp = "mocked method :sum called with unexpected arguments [2, 4]" + assert_equal exp, e.message + end + + def test_expect_with_non_array_args + e = assert_raises ArgumentError do + @mock.expect :blah, 3, false + end + + assert_match "args must be an array", e.message + end + + def test_respond_appropriately + assert @mock.respond_to?(:foo) + assert @mock.respond_to?(:foo, true) + assert @mock.respond_to?("foo") + assert !@mock.respond_to?(:bar) + end + + def test_no_method_error_on_unexpected_methods + e = assert_raises NoMethodError do + @mock.bar + end + + expected = "unmocked method :bar, expected one of [:foo, :meaning_of_life]" + + assert_match expected, e.message + end + + def test_assign_per_mock_return_values + a = Minitest::Mock.new + b = Minitest::Mock.new + + a.expect(:foo, :a) + b.expect(:foo, :b) + + assert_equal :a, a.foo + assert_equal :b, b.foo + end + + def test_do_not_create_stub_method_on_new_mocks + a = Minitest::Mock.new + a.expect(:foo, :a) + + assert !Minitest::Mock.new.respond_to?(:foo) + end + + def test_mock_is_a_blank_slate + @mock.expect :kind_of?, true, [String] + @mock.expect :==, true, [1] + + assert @mock.kind_of?(String), "didn't mock :kind_of\?" + assert @mock == 1, "didn't mock :==" + end + + def test_verify_allows_called_args_to_be_loosely_specified + mock = Minitest::Mock.new + mock.expect :loose_expectation, true, [Integer] + mock.loose_expectation 1 + + assert_mock mock + end + + def test_verify_raises_with_strict_args + mock = Minitest::Mock.new + mock.expect :strict_expectation, true, [2] + + e = assert_raises MockExpectationError do + mock.strict_expectation 1 + end + + exp = "mocked method :strict_expectation called with unexpected arguments [1]" + assert_equal exp, e.message + end + + def test_method_missing_empty + mock = Minitest::Mock.new + + mock.expect :a, nil + + mock.a + + e = assert_raises MockExpectationError do + mock.a + end + + assert_equal "No more expects available for :a: []", e.message + end + + def test_same_method_expects_are_verified_when_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + mock.foo :baz + + assert_mock mock + end + + def test_same_method_expects_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:baz] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:baz) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_same_method_expects_with_same_args_blow_up_when_not_all_called + mock = Minitest::Mock.new + mock.expect :foo, nil, [:bar] + mock.expect :foo, nil, [:bar] + + mock.foo :bar + + e = assert_raises(MockExpectationError) { mock.verify } + + exp = "expected foo(:bar) => nil, got [foo(:bar) => nil]" + + assert_equal exp, e.message + end + + def test_verify_passes_when_mock_block_returns_true + mock = Minitest::Mock.new + mock.expect :foo, nil do + true + end + + mock.foo + + assert_mock mock + end + + def test_mock_block_is_passed_function_params + arg1, arg2, arg3 = :bar, [1, 2, 3], { :a => "a" } + mock = Minitest::Mock.new + mock.expect :foo, nil do |a1, a2, a3| + a1 == arg1 && a2 == arg2 && a3 == arg3 + end + + mock.foo arg1, arg2, arg3 + + assert_mock mock + end + + def test_mock_block_is_passed_function_block + mock = Minitest::Mock.new + block = proc { "bar" } + mock.expect :foo, nil do |arg, &blk| + arg == "foo" && + blk == block + end + mock.foo "foo", &block + assert_mock mock + end + + def test_verify_fails_when_mock_block_returns_false + mock = Minitest::Mock.new + mock.expect :foo, nil do + false + end + + e = assert_raises(MockExpectationError) { mock.foo } + exp = "mocked method :foo failed block w/ []" + + assert_equal exp, e.message + end + + def test_mock_block_throws_if_args_passed + mock = Minitest::Mock.new + + e = assert_raises(ArgumentError) do + mock.expect :foo, nil, [:a, :b, :c] do + true + end + end + + exp = "args ignored when block given" + + assert_match exp, e.message + end + + def test_mock_returns_retval_when_called_with_block + mock = Minitest::Mock.new + mock.expect(:foo, 32) do + true + end + + rs = mock.foo + + assert_equal rs, 32 + end + + def util_verify_bad exp + e = assert_raises MockExpectationError do + @mock.verify + end + + assert_equal exp, e.message + end + + def test_mock_called_via_send + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.send :foo + assert_mock mock + end + + def test_mock_called_via___send__ + mock = Minitest::Mock.new + mock.expect(:foo, true) + + mock.__send__ :foo + assert_mock mock + end + + def test_mock_called_via_send_with_args + mock = Minitest::Mock.new + mock.expect(:foo, true, [1, 2, 3]) + + mock.send(:foo, 1, 2, 3) + assert_mock mock + end + +end + +require "minitest/metametameta" + +class TestMinitestStub < Minitest::Test + # Do not parallelize since we're calling stub on class methods + + def setup + super + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @assertion_count = 1 + end + + def teardown + super + assert_equal @assertion_count, @tc.assertions if self.passed? + end + + class Time + def self.now + 24 + end + end + + def assert_stub val_or_callable + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, val_or_callable do + @tc.assert_equal 42, Time.now + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_private_module_method + @assertion_count += 1 + + t0 = Time.now + + self.stub :sleep, nil do + @tc.assert_nil sleep(10) + end + + @tc.assert_operator Time.now - t0, :<=, 1 + end + + def test_stub_private_module_method_indirect + @assertion_count += 1 + + fail_clapper = Class.new do + def fail_clap + raise + :clap + end + end.new + + fail_clapper.stub :raise, nil do |safe_clapper| + @tc.assert_equal :clap, safe_clapper.fail_clap # either form works + @tc.assert_equal :clap, fail_clapper.fail_clap # yay closures + end + end + + def test_stub_public_module_method + Math.stub :log10, :stubbed do + @tc.assert_equal :stubbed, Math.log10(1000) + end + end + + def test_stub_value + assert_stub 42 + end + + def test_stub_block + assert_stub lambda { 42 } + end + + def test_stub_block_args + @assertion_count += 1 + + t = Time.now.to_i + + Time.stub :now, lambda { |n| n * 2 } do + @tc.assert_equal 42, Time.now(21) + end + + @tc.assert_operator Time.now.to_i, :>=, t + end + + def test_stub_callable + obj = Object.new + + def obj.call + 42 + end + + assert_stub obj + end + + def test_stub_yield_self + obj = "foo" + + val = obj.stub :to_s, "bar" do |s| + s.to_s + end + + @tc.assert_equal "bar", val + end + + def test_dynamic_method + @assertion_count = 2 + + dynamic = Class.new do + def self.respond_to? meth + meth == :found + end + + def self.method_missing meth, *args, &block + if meth == :found + false + else + super + end + end + end + + val = dynamic.stub(:found, true) do |s| + s.found + end + + @tc.assert_equal true, val + @tc.assert_equal false, dynamic.found + end + + def test_stub_NameError + e = @tc.assert_raises NameError do + Time.stub :nope_nope_nope, 42 do + # do nothing + end + end + + exp = jruby? ? /Undefined method nope_nope_nope for '#{self.class}::Time'/ : + /undefined method `nope_nope_nope' for( class)? `#{self.class}::Time'/ + assert_match exp, e.message + end + + def test_mock_with_yield + mock = Minitest::Mock.new + mock.expect(:write, true) do + true + end + rs = nil + + File.stub :open, true, mock do + File.open "foo.txt", "r" do |f| + rs = f.write + end + end + @tc.assert_equal true, rs + end + + alias test_stub_value__old test_stub_value # TODO: remove/rename + + ## Permutation Sets: + + # [:value, :lambda] + # [:*, :block, :block_call] + # [:**, :block_args] + # + # Where: + # + # :value = a normal value + # :lambda = callable or lambda + # :* = no block + # :block = normal block + # :block_call = :lambda invokes the block (N/A for :value) + # :** = no args + # :args = args passed to stub + + ## Permutations + + # [:call, :*, :**] =>5 callable+block FIX: CALL BOTH (bug) + # [:call, :*, :**] =>6 callable + + # [:lambda, :*, :**] => lambda result + + # [:lambda, :*, :args] => lambda result NO ARGS + + # [:lambda, :block, :**] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :**] =>6 lambda result + + # [:lambda, :block, :args] =>5 lambda result FIX: CALL BOTH (bug) + # [:lambda, :block, :args] =>6 lambda result + # [:lambda, :block, :args] =>7 raise ArgumentError + + # [:lambda, :block_call, :**] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :**] =>6 lambda+block result + + # [:lambda, :block_call, :args] =>5 lambda FIX: BUG!-not passed block to lambda + # [:lambda, :block_call, :args] =>6 lambda+block result + + # [:value, :*, :**] => value + + # [:value, :*, :args] => value, ignore args + + # [:value, :block, :**] =>5 value, call block + # [:value, :block, :**] =>6 value + + # [:value, :block, :args] =>5 value, call block w/ args + # [:value, :block, :args] =>6 value, call block w/ args, deprecated + # [:value, :block, :args] =>7 raise ArgumentError + + # [:value, :block_call, :**] => N/A + + # [:value, :block_call, :args] => N/A + + class Bar + def call + puts "hi" + end + end + + class Foo + def self.blocking + yield + end + end + + class Thingy + def self.identity arg + arg + end + end + + class Keywords + def self.args req, kw1:, kw2:24 + [req, kw1, kw2] + end + end + + def test_stub_callable_keyword_args + Keywords.stub :args, ->(*args, **kws) { [args, kws] } do + @tc.assert_equal [["woot"], { kw1: 42 }], Keywords.args("woot", kw1: 42) + end + end + + def test_stub_callable_block_5 # from tenderlove + @assertion_count += 1 + Foo.stub5 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_callable_block_6 # from tenderlove + skip_stub6 + + @assertion_count += 1 + Foo.stub6 :blocking, Bar.new do + @tc.assert_output "hi\n", "" do + Foo.blocking do + @tc.flunk "shouldn't ever hit this" + end + end + end + end + + def test_stub_lambda + Thread.stub :new, lambda { 21+21 } do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_args + Thread.stub :new, lambda { 21+21 }, :wtf do + @tc.assert_equal 42, Thread.new + end + end + + def test_stub_lambda_block_5 + Thread.stub5 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_6 + skip_stub6 + + Thread.stub6 :new, lambda { 21+21 } do + result = Thread.new do + @tc.flunk "shouldn't ever hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_5 + @assertion_count += 1 + Thingy.stub5 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6 + skip_stub6 + + @assertion_count += 1 + Thingy.stub6 :identity, lambda { |y| @tc.assert_equal :nope, y; 21+21 }, :WTF? do + result = Thingy.identity :nope do |x| + @tc.flunk "shouldn't reach this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_lambda_block_args_6_2 + skip_stub6 + + @tc.assert_raises ArgumentError do + Thingy.stub6_2 :identity, lambda { |y| :__not_run__ }, :WTF? do + # doesn't matter + end + end + end + + def test_stub_lambda_block_call_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, lambda { |p, m, &blk| blk and blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f && f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6 :open, lambda { |p, m, &blk| blk.call io } do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_5 + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub5(:open, lambda { |p, m, &blk| blk and blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6 + skip_stub6 + + @assertion_count += 1 + rs = nil + io = StringIO.new "", "w" + File.stub6(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_lambda_block_call_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2(:open, lambda { |p, m, &blk| blk.call io }, :WTF?) do + File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def test_stub_value + Thread.stub :new, 42 do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_args + Thread.stub :new, 42, :WTF? do + result = Thread.new + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_5 + @assertion_count += 1 + Thread.stub5 :new, 42 do + result = Thread.new do + @tc.assert true + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_6 + skip_stub6 + + Thread.stub6 :new, 42 do + result = Thread.new do + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal 42, result + end + end + + def test_stub_value_block_args_5 + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + File.stub5 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_5__break_if_not_passed + e = @tc.assert_raises NoMethodError do + File.stub5 :open, :return_value do # intentionally bad setup w/ no args + File.open "foo.txt", "r" do |f| + f.write "woot" + end + end + end + exp = "undefined method `write' for nil:NilClass" + assert_match exp, e.message + end + + def test_stub_value_block_args_6 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + assert_deprecated do + File.stub6 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + rs = f.write("woot") + end + @tc.assert_equal :value, result + end + end + @tc.assert_equal 4, rs + @tc.assert_equal "woot", io.string + end + + def test_stub_value_block_args_6_2 + skip_stub6 + + @assertion_count += 2 + rs = nil + io = StringIO.new "", "w" + @tc.assert_raises ArgumentError do + File.stub6_2 :open, :value, io do + result = File.open "foo.txt", "r" do |f| + @tc.flunk "shouldn't hit this" + end + @tc.assert_equal :value, result + end + end + @tc.assert_nil rs + @tc.assert_equal "", io.string + end + + def assert_deprecated re = /deprecated/ + assert_output "", re do + yield + end + end + + def skip_stub6 + skip "not yet" unless STUB6 + end +end + +STUB6 = ENV["STUB6"] + +if STUB6 then + require "minitest/mock6" if STUB6 +else + class Object + alias stub5 stub + alias stub6 stub + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_reporter.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_reporter.rb new file mode 100644 index 0000000..53d9588 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_reporter.rb @@ -0,0 +1,311 @@ +require "minitest/autorun" +require "minitest/metametameta" +require "forwardable" + +class Runnable + def woot + assert true + end +end + +class TestMinitestReporter < MetaMetaMetaTestCase + + attr_accessor :r, :io + + def new_composite_reporter + # Ruby bug in older versions of 2.2 & 2.3 on all platforms + # Latest Windows builds were 2.2.6 and 2.3.3. Latest Ruby releases were + # 2.2.10 and 2.3.8. + skip if windows? && RUBY_VERSION < '2.4' + reporter = Minitest::CompositeReporter.new + reporter << Minitest::SummaryReporter.new(self.io) + reporter << Minitest::ProgressReporter.new(self.io) + + # eg reporter.results -> reporters.first.results + reporter.extend Forwardable + reporter.delegate :first => :reporters + reporter.delegate %i[results count assertions options to_s] => :first + + reporter + end + + def setup + self.io = StringIO.new("") + self.r = new_composite_reporter + end + + def error_test + unless defined? @et then + @et = Minitest::Test.new(:woot) + @et.failures << Minitest::UnexpectedError.new(begin + raise "no" + rescue => e + e + end) + @et = Minitest::Result.from @et + end + @et + end + + def fail_test + unless defined? @ft then + @ft = Minitest::Test.new(:woot) + @ft.failures << begin + raise Minitest::Assertion, "boo" + rescue Minitest::Assertion => e + e + end + @ft = Minitest::Result.from @ft + end + @ft + end + + def passing_test + @pt ||= Minitest::Result.from Minitest::Test.new(:woot) + end + + def skip_test + unless defined? @st then + @st = Minitest::Test.new(:woot) + @st.failures << begin + raise Minitest::Skip + rescue Minitest::Assertion => e + e + end + @st = Minitest::Result.from @st + end + @st + end + + def test_to_s + r.record passing_test + r.record fail_test + assert_match "woot", r.to_s + end + + def test_options_skip_F + r.options[:skip] = "F" + + r.record passing_test + r.record fail_test + + refute_match "woot", r.to_s + end + + def test_options_skip_E + r.options[:skip] = "E" + + r.record passing_test + r.record error_test + + refute_match "RuntimeError: no", r.to_s + end + + def test_passed_eh_empty + assert_predicate r, :passed? + end + + def test_passed_eh_failure + r.results << fail_test + + refute_predicate r, :passed? + end + + SKIP_MSG = "\n\nYou have skipped tests. Run with --verbose for details." + + def test_passed_eh_error + r.start + + r.results << error_test + + refute_predicate r, :passed? + + r.report + + refute_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped + r.start + r.results << skip_test + assert r.passed? + + restore_env do + r.report + end + + assert_match SKIP_MSG, io.string + end + + def test_passed_eh_skipped_verbose + r.options[:verbose] = true + + r.start + r.results << skip_test + assert r.passed? + r.report + + refute_match SKIP_MSG, io.string + end + + def test_start + r.start + + exp = "Run options: \n\n# Running:\n\n" + + assert_equal exp, io.string + end + + def test_record_pass + r.record passing_test + + assert_equal ".", io.string + assert_empty r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_fail + fail_test = self.fail_test + r.record fail_test + + assert_equal "F", io.string + assert_equal [fail_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_error + error_test = self.error_test + r.record error_test + + assert_equal "E", io.string + assert_equal [error_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_record_skip + skip_test = self.skip_test + r.record skip_test + + assert_equal "S", io.string + assert_equal [skip_test], r.results + assert_equal 1, r.count + assert_equal 0, r.assertions + end + + def test_report_empty + r.start + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_passing + r.start + r.record passing_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + . + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_failure + r.start + r.record fail_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + F + + Finished in 0.00 + + 1) Failure: + Minitest::Test#woot [FILE:LINE]: + boo + + 1 runs, 0 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_error + r.start + r.record error_test + r.report + + exp = clean <<-EOM + Run options: + + # Running: + + E + + Finished in 0.00 + + 1) Error: + Minitest::Test#woot: + RuntimeError: no + FILE:LINE:in `error_test' + FILE:LINE:in `test_report_error' + + 1 runs, 0 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_equal exp, normalize_output(io.string) + end + + def test_report_skipped + r.start + r.record skip_test + + restore_env do + r.report + end + + exp = clean <<-EOM + Run options: + + # Running: + + S + + Finished in 0.00 + + 1 runs, 0 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + assert_equal exp, normalize_output(io.string) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_spec.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_spec.rb new file mode 100644 index 0000000..201427d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_spec.rb @@ -0,0 +1,1062 @@ +# encoding: utf-8 +require "minitest/metametameta" +require "stringio" + +class MiniSpecA < Minitest::Spec; end +class MiniSpecB < Minitest::Test; extend Minitest::Spec::DSL; end +class MiniSpecC < MiniSpecB; end +class NamedExampleA < MiniSpecA; end +class NamedExampleB < MiniSpecB; end +class NamedExampleC < MiniSpecC; end +class ExampleA; end +class ExampleB < ExampleA; end + +describe Minitest::Spec do + # helps to deal with 2.4 deprecation of Fixnum for Integer + Int = 1.class + + # do not parallelize this suite... it just can"t handle it. + + def assert_triggered expected = "blah", klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises(klass) do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, '\1') + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + msg.gsub!(/(\d\.\d{6})\d+/, '\1xxx') # normalize: ruby version, impl, platform + msg.gsub!(/:0x[Xa-fA-F0-9]{4,}[ @].+?>/, ":0xXXXXXX@PATH>") + + if expected + @assertion_count += 1 + case expected + when String then + assert_equal expected, msg + when Regexp then + @assertion_count += 1 + assert_match expected, msg + else + flunk "Unknown: #{expected.inspect}" + end + end + end + + def assert_success spec + assert_equal true, spec + end + + before do + @assertion_count = 4 + end + + after do + _(self.assertions).must_equal @assertion_count if passed? and not skipped? + end + + it "needs to be able to catch a Minitest::Assertion exception" do + @assertion_count = 1 + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + end + + it "needs to check for file existence" do + @assertion_count = 3 + + assert_success _(__FILE__).path_must_exist + + assert_triggered "Expected path 'blah' to exist." do + _("blah").path_must_exist + end + end + + it "needs to check for file non-existence" do + @assertion_count = 3 + + assert_success _("blah").path_wont_exist + + assert_triggered "Expected path '#{__FILE__}' to not exist." do + _(__FILE__).path_wont_exist + end + end + + it "needs to be sensible about must_include order" do + @assertion_count += 3 # must_include is 2 assertions + + assert_success _([1, 2, 3]).must_include(2) + + assert_triggered "Expected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to include 5." do + _([1, 2, 3]).must_include 5, "msg" + end + end + + it "needs to be sensible about wont_include order" do + @assertion_count += 3 # wont_include is 2 assertions + + assert_success _([1, 2, 3]).wont_include(5) + + assert_triggered "Expected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2 + end + + assert_triggered "msg.\nExpected [1, 2, 3] to not include 2." do + _([1, 2, 3]).wont_include 2, "msg" + end + end + + it "needs to catch an expected exception" do + @assertion_count = 2 + + expect { raise "blah" }.must_raise RuntimeError + expect { raise Minitest::Assertion }.must_raise Minitest::Assertion + end + + it "needs to catch an unexpected exception" do + @assertion_count -= 2 # no positive + + msg = <<-EOM.gsub(/^ {6}/, "").chomp + [RuntimeError] exception expected, not + Class: + Message: <"woot"> + ---Backtrace--- + EOM + + assert_triggered msg do + expect { raise StandardError, "woot" }.must_raise RuntimeError + end + + assert_triggered "msg.\n#{msg}" do + expect { raise StandardError, "woot" }.must_raise RuntimeError, "msg" + end + end + + it "needs to ensure silence" do + @assertion_count -= 1 # no msg + @assertion_count += 2 # assert_output is 2 assertions + + assert_success expect {}.must_be_silent + + assert_triggered "In stdout.\nExpected: \"\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_be_silent + end + end + + it "needs to have all methods named well" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count = 2 + + methods = Minitest::Expectations.public_instance_methods.grep(/must|wont/) + methods.map!(&:to_s) if Symbol === methods.first + + musts, wonts = methods.sort.partition { |m| m =~ /must/ } + + expected_musts = %w[must_be + must_be_close_to + must_be_empty + must_be_instance_of + must_be_kind_of + must_be_nil + must_be_same_as + must_be_silent + must_be_within_delta + must_be_within_epsilon + must_equal + must_include + must_match + must_output + must_raise + must_respond_to + must_throw + path_must_exist] + + bad = %w[not raise throw send output be_silent] + + expected_wonts = expected_musts.map { |m| m.sub(/must/, "wont") }.sort + expected_wonts.reject! { |m| m =~ /wont_#{Regexp.union(*bad)}/ } + + _(musts).must_equal expected_musts + _(wonts).must_equal expected_wonts + end + + it "needs to raise if an expected exception is not raised" do + @assertion_count -= 2 # no positive test + + assert_triggered "RuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError + end + + assert_triggered "msg.\nRuntimeError expected but nothing was raised." do + expect { 42 }.must_raise RuntimeError, "msg" + end + end + + it "needs to verify binary messages" do + assert_success _(42).wont_be(:<, 24) + + assert_triggered "Expected 24 to not be < 42." do + _(24).wont_be :<, 42 + end + + assert_triggered "msg.\nExpected 24 to not be < 42." do + _(24).wont_be :<, 42, "msg" + end + end + + it "needs to verify emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _([]).must_be_empty + + assert_triggered "Expected [42] to be empty." do + _([42]).must_be_empty + end + + assert_triggered "msg.\nExpected [42] to be empty." do + _([42]).must_be_empty "msg" + end + end + + it "needs to verify equality" do + @assertion_count += 1 + + assert_success _(6 * 7).must_equal(42) + + assert_triggered "Expected: 42\n Actual: 54" do + _(6 * 9).must_equal 42 + end + + assert_triggered "msg.\nExpected: 42\n Actual: 54" do + _(6 * 9).must_equal 42, "msg" + end + + assert_triggered(/^-42\n\+#\n/) do + _(proc { 42 }).must_equal 42 # proc isn't called, so expectation fails + end + end + + it "needs to warn on equality with nil" do + @assertion_count += 1 # extra test + + out, err = capture_io do + assert_success _(nil).must_equal(nil) + end + + exp = "DEPRECATED: Use assert_nil if expecting nil from #{__FILE__}:#{__LINE__-3}. " \ + "This will fail in Minitest 6.\n" + exp = "" if $-w.nil? + + assert_empty out + assert_equal exp, err + end + + it "needs to verify floats outside a delta" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_close_to(42) + + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= 0.001." do + _(6 * 7.0).wont_be_close_to 42 + end + + x = "1.0e-05" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_close_to 42, 0.00001, "msg" + end + end + + it "needs to verify floats outside an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(24).wont_be_within_epsilon(42) + + x = "0.042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42 + end + + x = "0.00042" + assert_triggered "Expected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001 + end + + assert_triggered "msg.\nExpected |42 - 42.0| (0.0) to not be <= #{x}." do + _(6 * 7.0).wont_be_within_epsilon 42, 0.00001, "msg" + end + end + + it "needs to verify floats within a delta" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_close_to(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.001." do + _(1.0 / 100).must_be_close_to 0.0 + end + + x = "1.0e-06" + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= #{x}." do + _(1.0 / 1000).must_be_close_to 0.0, 0.000001, "msg" + end + end + + it "needs to verify floats within an epsilon" do + @assertion_count += 1 # extra test + + assert_success _(6.0 * 7).must_be_within_epsilon(42.0) + + assert_triggered "Expected |0.0 - 0.01| (0.01) to be <= 0.0." do + _(1.0 / 100).must_be_within_epsilon 0.0 + end + + assert_triggered "Expected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001 + end + + assert_triggered "msg.\nExpected |0.0 - 0.001| (0.001) to be <= 0.0." do + _(1.0 / 1000).must_be_within_epsilon 0.0, 0.000001, "msg" + end + end + + it "needs to verify identity" do + assert_success _(1).must_be_same_as(1) + + assert_triggered "Expected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to be the same as 2 (oid=N)." do + _(1).must_be_same_as 2, "msg" + end + end + + it "needs to verify inequality" do + @assertion_count += 2 + assert_success _(42).wont_equal(6 * 9) + assert_success _(proc {}).wont_equal(42) + + assert_triggered "Expected 1 to not be equal to 1." do + _(1).wont_equal 1 + end + + assert_triggered "msg.\nExpected 1 to not be equal to 1." do + _(1).wont_equal 1, "msg" + end + end + + it "needs to verify instances of a class" do + assert_success _(42).wont_be_instance_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be an instance of #{Int.name}." do + _(42).wont_be_instance_of Int, "msg" + end + end + + it "needs to verify kinds of a class" do + @assertion_count += 2 + + assert_success _(42).wont_be_kind_of(String) + assert_success _(proc {}).wont_be_kind_of(String) + + assert_triggered "Expected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int + end + + assert_triggered "msg.\nExpected 42 to not be a kind of #{Int.name}." do + _(42).wont_be_kind_of Int, "msg" + end + end + + it "needs to verify kinds of objects" do + @assertion_count += 3 # extra test + + assert_success _(6 * 7).must_be_kind_of(Int) + assert_success _(6 * 7).must_be_kind_of(Numeric) + + assert_triggered "Expected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String + end + + assert_triggered "msg.\nExpected 42 to be a kind of String, not #{Int.name}." do + _(6 * 7).must_be_kind_of String, "msg" + end + + exp = "Expected # to be a kind of String, not Proc." + assert_triggered exp do + _(proc {}).must_be_kind_of String + end + end + + it "needs to verify mismatch" do + @assertion_count += 3 # match is 2 + + assert_success _("blah").wont_match(/\d+/) + + assert_triggered "Expected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/) + end + + assert_triggered "msg.\nExpected /\\w+/ to not match \"blah\"." do + _("blah").wont_match(/\w+/, "msg") + end + end + + it "needs to verify nil" do + assert_success _(nil).must_be_nil + + assert_triggered "Expected 42 to be nil." do + _(42).must_be_nil + end + + assert_triggered "msg.\nExpected 42 to be nil." do + _(42).must_be_nil "msg" + end + end + + it "needs to verify non-emptyness" do + @assertion_count += 3 # empty is 2 assertions + + assert_success _(["some item"]).wont_be_empty + + assert_triggered "Expected [] to not be empty." do + _([]).wont_be_empty + end + + assert_triggered "msg.\nExpected [] to not be empty." do + _([]).wont_be_empty "msg" + end + end + + it "needs to verify non-identity" do + assert_success _(1).wont_be_same_as(2) + + assert_triggered "Expected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1 + end + + assert_triggered "msg.\nExpected 1 (oid=N) to not be the same as 1 (oid=N)." do + _(1).wont_be_same_as 1, "msg" + end + end + + it "needs to verify non-nil" do + assert_success _(42).wont_be_nil + + assert_triggered "Expected nil to not be nil." do + _(nil).wont_be_nil + end + + assert_triggered "msg.\nExpected nil to not be nil." do + _(nil).wont_be_nil "msg" + end + end + + it "needs to verify objects not responding to a message" do + assert_success _("").wont_respond_to(:woot!) + + assert_triggered "Expected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s + end + + assert_triggered "msg.\nExpected \"\" to not respond to to_s." do + _("").wont_respond_to :to_s, "msg" + end + end + + it "needs to verify output in stderr" do + @assertion_count -= 1 # no msg + + assert_success expect { $stderr.print "blah" }.must_output(nil, "blah") + + assert_triggered "In stderr.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { $stderr.print "xxx" }.must_output(nil, "blah") + end + end + + it "needs to verify output in stdout" do + @assertion_count -= 1 # no msg + + assert_success expect { print "blah" }.must_output("blah") + + assert_triggered "In stdout.\nExpected: \"blah\"\n Actual: \"xxx\"" do + expect { print "xxx" }.must_output("blah") + end + end + + it "needs to verify regexp matches" do + @assertion_count += 3 # must_match is 2 assertions + + assert_success _("blah").must_match(/\w+/) + + assert_triggered "Expected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/) + end + + assert_triggered "msg.\nExpected /\\d+/ to match \"blah\"." do + _("blah").must_match(/\d+/, "msg") + end + end + + describe "expect" do + before do + @assertion_count -= 3 + end + + it "can use expect" do + _(1 + 1).must_equal 2 + end + + it "can use expect with a lambda" do + _ { raise "blah" }.must_raise RuntimeError + end + + it "can use expect in a thread" do + Thread.new { _(1 + 1).must_equal 2 }.join + end + + it "can NOT use must_equal in a thread. It must use expect in a thread" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + assert_raises RuntimeError do + capture_io do + Thread.new { (1 + 1).must_equal 2 }.join + end + end + end + + it "fails gracefully when expectation used outside of `it`" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 1 + + e = assert_raises RuntimeError do + capture_io do + Thread.new { # forces ctx to be nil + describe("woot") do + (1 + 1).must_equal 2 + end + }.join + end + end + + assert_equal "Calling #must_equal outside of test.", e.message + end + + it "deprecates expectation used without _" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + + # https://github.com/seattlerb/minitest/issues/837 + # https://github.com/rails/rails/pull/39304 + it "deprecates expectation used without _ with empty backtrace_filter" do + skip "N/A" if ENV["MT_NO_EXPECTATIONS"] + + @assertion_count += 3 + + exp = /DEPRECATED: global use of must_equal from/ + + with_empty_backtrace_filter do + assert_output "", exp do + (1 + 1).must_equal 2 + end + end + end + end + + it "needs to verify throw" do + @assertion_count += 4 # 2 extra tests + + assert_nil expect { throw :blah }.must_throw(:blah) + assert_equal 42, expect { throw :blah, 42 }.must_throw(:blah) + + assert_triggered "Expected :blah to have been thrown." do + expect {}.must_throw :blah + end + + assert_triggered "Expected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah + end + + assert_triggered "msg.\nExpected :blah to have been thrown." do + expect {}.must_throw :blah, "msg" + end + + assert_triggered "msg.\nExpected :blah to have been thrown, not :xxx." do + expect { throw :xxx }.must_throw :blah, "msg" + end + end + + it "needs to verify types of objects" do + assert_success _(6 * 7).must_be_instance_of(Int) + + exp = "Expected 42 to be an instance of String, not #{Int.name}." + + assert_triggered exp do + _(6 * 7).must_be_instance_of String + end + + assert_triggered "msg.\n#{exp}" do + _(6 * 7).must_be_instance_of String, "msg" + end + end + + it "needs to verify using any (negative) predicate" do + @assertion_count -= 1 # doesn"t take a message + + assert_success _("blah").wont_be(:empty?) + + assert_triggered "Expected \"\" to not be empty?." do + _("").wont_be :empty? + end + end + + it "needs to verify using any binary operator" do + @assertion_count -= 1 # no msg + + assert_success _(41).must_be(:<, 42) + + assert_triggered "Expected 42 to be < 41." do + _(42).must_be(:<, 41) + end + end + + it "needs to verify using any predicate" do + @assertion_count -= 1 # no msg + + assert_success _("").must_be(:empty?) + + assert_triggered "Expected \"blah\" to be empty?." do + _("blah").must_be :empty? + end + end + + it "needs to verify using respond_to" do + assert_success _(42).must_respond_to(:+) + + assert_triggered "Expected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear + end + + assert_triggered "msg.\nExpected 42 (#{Int.name}) to respond to #clear." do + _(42).must_respond_to :clear, "msg" + end + end +end + +describe Minitest::Spec, :let do + i_suck_and_my_tests_are_order_dependent! + + def _count + $let_count ||= 0 + end + + let :count do + $let_count += 1 + $let_count + end + + it "is evaluated once per example" do + _(_count).must_equal 0 + + _(count).must_equal 1 + _(count).must_equal 1 + + _(_count).must_equal 1 + end + + it "is REALLY evaluated once per example" do + _(_count).must_equal 1 + + _(count).must_equal 2 + _(count).must_equal 2 + + _(_count).must_equal 2 + end + + it 'raises an error if the name begins with "test"' do + expect { self.class.let(:test_value) { true } }.must_raise ArgumentError + end + + it "raises an error if the name shadows a normal instance method" do + expect { self.class.let(:message) { true } }.must_raise ArgumentError + end + + it "doesn't raise an error if it is just another let" do + v = proc do + describe :outer do + let(:bar) + describe :inner do + let(:bar) + end + end + :good + end.call + _(v).must_equal :good + end + + it "procs come after dont_flip" do + p = proc {} + assert_respond_to p, :call + _(p).must_respond_to :call + end +end + +describe Minitest::Spec, :subject do + attr_reader :subject_evaluation_count + + subject do + @subject_evaluation_count ||= 0 + @subject_evaluation_count += 1 + @subject_evaluation_count + end + + it "is evaluated once per example" do + _(subject).must_equal 1 + _(subject).must_equal 1 + _(subject_evaluation_count).must_equal 1 + end +end + +class TestMetaStatic < Minitest::Test + def test_children + Minitest::Spec.children.clear # prevents parallel run + + y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + it "top-level-it" do end + + z = describe "second thingy" do end + end + + assert_equal [x], Minitest::Spec.children + assert_equal [y, z], x.children + assert_equal [], y.children + assert_equal [], z.children + end + + def test_it_wont_remove_existing_child_test_methods + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do + it do + assert true + end + end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 1, inner.public_instance_methods.grep(/^test_/).count + end + + def test_it_wont_add_test_methods_to_children + Minitest::Spec.children.clear # prevents parallel run + + inner = nil + outer = describe "outer" do + inner = describe "inner" do end + it do + assert true + end + end + + assert_equal 1, outer.public_instance_methods.grep(/^test_/).count + assert_equal 0, inner.public_instance_methods.grep(/^test_/).count + end +end + +class TestMeta < MetaMetaMetaTestCase + # do not call parallelize_me! here because specs use register_spec_type globally + + def util_structure + y = z = nil + before_list = [] + after_list = [] + x = describe "top-level thingy" do + before { before_list << 1 } + after { after_list << 1 } + + it "top-level-it" do end + + y = describe "inner thingy" do + before { before_list << 2 } + after { after_list << 2 } + it "inner-it" do end + + z = describe "very inner thingy" do + before { before_list << 3 } + after { after_list << 3 } + it "inner-it" do end + + it { } # ignore me + specify { } # anonymous it + end + end + end + + return x, y, z, before_list, after_list + end + + def test_register_spec_type + original_types = Minitest::Spec::TYPES.dup + + assert_includes Minitest::Spec::TYPES, [//, Minitest::Spec] + + Minitest::Spec.register_spec_type(/woot/, TestMeta) + + p = lambda do |_| true end + Minitest::Spec.register_spec_type TestMeta, &p + + keys = Minitest::Spec::TYPES.map(&:first) + + assert_includes keys, /woot/ + assert_includes keys, p + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_spec_type + original_types = Minitest::Spec::TYPES.dup + + Minitest::Spec.register_spec_type(/A$/, MiniSpecA) + Minitest::Spec.register_spec_type MiniSpecB do |desc| + desc.superclass == ExampleA + end + Minitest::Spec.register_spec_type MiniSpecC do |_desc, *addl| + addl.include? :woot + end + + assert_equal MiniSpecA, Minitest::Spec.spec_type(ExampleA) + assert_equal MiniSpecB, Minitest::Spec.spec_type(ExampleB) + assert_equal MiniSpecC, Minitest::Spec.spec_type(ExampleB, :woot) + ensure + Minitest::Spec::TYPES.replace original_types + end + + def test_bug_dsl_expectations + spec_class = Class.new MiniSpecB do + it "should work" do + _(0).must_equal 0 + end + end + + test_name = spec_class.instance_methods.sort.grep(/test/).first + + spec = spec_class.new test_name + + result = spec.run + + assert spec.passed? + assert result.passed? + assert_equal 1, result.assertions + end + + def test_name + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + spec_c = describe ExampleB, :random_method, :addl_context do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + assert_equal "ExampleB::random_method::addl_context", spec_c.name + end + + def test_name2 + assert_equal "NamedExampleA", NamedExampleA.name + assert_equal "NamedExampleB", NamedExampleB.name + assert_equal "NamedExampleC", NamedExampleC.name + + spec_a = describe ExampleA do; end + spec_b = describe ExampleB, :random_method do; end + + assert_equal "ExampleA", spec_a.name + assert_equal "ExampleB::random_method", spec_b.name + end + + def test_structure + x, y, z, * = util_structure + + assert_equal "top-level thingy", x.to_s + assert_equal "top-level thingy::inner thingy", y.to_s + assert_equal "top-level thingy::inner thingy::very inner thingy", z.to_s + + assert_equal "top-level thingy", x.desc + assert_equal "inner thingy", y.desc + assert_equal "very inner thingy", z.desc + + top_methods = %w[setup teardown test_0001_top-level-it] + inner_methods1 = %w[setup teardown test_0001_inner-it] + inner_methods2 = inner_methods1 + + %w[test_0002_anonymous test_0003_anonymous] + + assert_equal top_methods, x.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods1, y.instance_methods(false).sort.map(&:to_s) + assert_equal inner_methods2, z.instance_methods(false).sort.map(&:to_s) + end + + def test_structure_postfix_it + z = nil + y = describe "outer" do + # NOT here, below the inner-describe! + # it "inner-it" do end + + z = describe "inner" do + it "inner-it" do end + end + + # defined AFTER inner describe means we'll try to wipe out the inner-it + it "inner-it" do end + end + + assert_equal %w[test_0001_inner-it], y.instance_methods(false).map(&:to_s) + assert_equal %w[test_0001_inner-it], z.instance_methods(false).map(&:to_s) + end + + def test_setup_teardown_behavior + _, _, z, before_list, after_list = util_structure + + @tu = z + + run_tu_with_fresh_reporter + + size = z.runnable_methods.size + assert_equal [1, 2, 3] * size, before_list + assert_equal [3, 2, 1] * size, after_list + end + + def test_describe_first_structure + x1 = x2 = y = z = nil + x = describe "top-level thingy" do + y = describe "first thingy" do end + + x1 = it "top level it" do end + x2 = it "не латинские &いった α, β, γ, δ, ε hello!!! world" do end + + z = describe "second thingy" do end + end + + test_methods = ["test_0001_top level it", + "test_0002_не латинские &いった α, β, γ, δ, ε hello!!! world", + ].sort + + assert_equal test_methods, [x1, x2] + assert_equal test_methods, x.instance_methods.grep(/^test/).map(&:to_s).sort + assert_equal [], y.instance_methods.grep(/^test/) + assert_equal [], z.instance_methods.grep(/^test/) + end + + def test_structure_subclasses + z = nil + x = Class.new Minitest::Spec do + def xyz; end + end + y = Class.new x do + z = describe("inner") { } + end + + assert_respond_to x.new(nil), "xyz" + assert_respond_to y.new(nil), "xyz" + assert_respond_to z.new(nil), "xyz" + end +end + +class TestSpecInTestCase < MetaMetaMetaTestCase + def setup + super + + Thread.current[:current_spec] = self + @tc = self + @assertion_count = 2 + end + + def assert_triggered expected, klass = Minitest::Assertion + @assertion_count += 1 + + e = assert_raises klass do + yield + end + + msg = e.message.sub(/(---Backtrace---).*/m, "\1") + msg.gsub!(/\(oid=[-0-9]+\)/, "(oid=N)") + + assert_equal expected, msg + end + + def teardown + msg = "expected #{@assertion_count} assertions, not #{@tc.assertions}" + assert_equal @assertion_count, @tc.assertions, msg + end + + def test_expectation + @tc.assert_equal true, _(1).must_equal(1) + end + + def test_expectation_triggered + assert_triggered "Expected: 2\n Actual: 1" do + _(1).must_equal 2 + end + end + + include Minitest::Spec::DSL::InstanceMethods + + def test_expectation_with_a_message + assert_triggered "woot.\nExpected: 2\n Actual: 1" do + _(1).must_equal 2, "woot" + end + end +end + +class ValueMonadTest < Minitest::Test + attr_accessor :struct + + def setup + @struct = { :_ => "a", :value => "b", :expect => "c" } + def @struct.method_missing k # think openstruct + self[k] + end + end + + def test_value_monad_method + assert_equal "a", struct._ + end + + def test_value_monad_value_alias + assert_equal "b", struct.value + end + + def test_value_monad_expect_alias + assert_equal "c", struct.expect + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_test.rb b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_test.rb new file mode 100644 index 0000000..7fbbf1f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/minitest-5.15.0/test/minitest/test_minitest_test.rb @@ -0,0 +1,1109 @@ +# encoding: UTF-8 + +require "pathname" +require "minitest/metametameta" + +if defined? Encoding then + e = Encoding.default_external + if e != Encoding::UTF_8 then + warn "" + warn "" + warn "NOTE: External encoding #{e} is not UTF-8. Tests WILL fail." + warn " Run tests with `RUBYOPT=-Eutf-8 rake` to avoid errors." + warn "" + warn "" + end +end + +class Minitest::Runnable + def whatever # faked for testing + assert true + end +end + +class TestMinitestUnit < MetaMetaMetaTestCase + parallelize_me! + + pwd = Pathname.new File.expand_path Dir.pwd + basedir = Pathname.new(File.expand_path "lib/minitest") + "mini" + basedir = basedir.relative_path_from(pwd).to_s + MINITEST_BASE_DIR = basedir[/\A\./] ? basedir : "./#{basedir}" + BT_MIDDLE = ["#{MINITEST_BASE_DIR}/test.rb:161:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:158:in `each'", + "#{MINITEST_BASE_DIR}/test.rb:139:in `run'", + "#{MINITEST_BASE_DIR}/test.rb:106:in `run'"] + + def test_filter_backtrace + # this is a semi-lame mix of relative paths. + # I cheated by making the autotest parts not have ./ + bt = (["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'", + "#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29", + "test/test_autotest.rb:422"]) + bt = util_expand_bt bt + + ex = ["lib/autotest.rb:571:in `add_exception'", + "test/test_autotest.rb:62:in `test_add_exception'"] + ex = util_expand_bt ex + + fu = Minitest.filter_backtrace(bt) + + assert_equal ex, fu + end + + def test_filter_backtrace_all_unit + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/test.rb:29"]) + ex = bt.clone + fu = Minitest.filter_backtrace(bt) + assert_equal ex, fu + end + + def test_filter_backtrace_unit_starts + bt = (["#{MINITEST_BASE_DIR}/test.rb:165:in `__send__'"] + + BT_MIDDLE + + ["#{MINITEST_BASE_DIR}/mini/test.rb:29", + "-e:1"]) + + bt = util_expand_bt bt + + ex = ["-e:1"] + fu = Minitest.filter_backtrace bt + assert_equal ex, fu + end + + def test_filter_backtrace__empty + with_empty_backtrace_filter do + bt = %w[first second third] + fu = Minitest.filter_backtrace bt.dup + assert_equal bt, fu + end + end + + def test_infectious_binary_encoding + @tu = Class.new FakeNamedTest do + def test_this_is_not_ascii_assertion + assert_equal "ЁЁЁ", "ёёё" + end + + def test_this_is_non_ascii_failure_message + fail 'ЁЁЁ'.force_encoding('ASCII-8BIT') + end + end + + expected = clean <<-EOM + EF + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_this_is_non_ascii_failure_message: + RuntimeError: ЁЁЁ + FILE:LINE:in `test_this_is_non_ascii_failure_message' + + 2) Failure: + FakeNamedTestXX#test_this_is_not_ascii_assertion [FILE:LINE]: + Expected: \"ЁЁЁ\" + Actual: \"ёёё\" + + 2 runs, 1 assertions, 1 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_passed_eh_teardown_good + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + assert_predicate test, :passed? + refute_predicate test, :skipped? + end + + def test_passed_eh_teardown_skipped + test_class = Class.new FakeNamedTest do + def teardown; assert true; end + def test_omg; skip "bork"; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + assert_predicate test, :skipped? + end + + def test_passed_eh_teardown_flunked + test_class = Class.new FakeNamedTest do + def teardown; flunk; end + def test_omg; assert true; end + end + + test = test_class.new :test_omg + test.run + + refute_predicate test, :error? + refute_predicate test, :passed? + refute_predicate test, :skipped? + end + + def util_expand_bt bt + if RUBY_VERSION >= "1.9.0" then + bt.map { |f| (f =~ /^\./) ? File.expand_path(f) : f } + else + bt + end + end +end + +class TestMinitestUnitInherited < MetaMetaMetaTestCase + def with_overridden_include + Class.class_eval do + def inherited_with_hacks _klass + throw :inherited_hook + end + + alias inherited_without_hacks inherited + alias inherited inherited_with_hacks + alias IGNORE_ME! inherited # 1.8 bug. god I love venture bros + end + + yield + ensure + Class.class_eval do + alias inherited inherited_without_hacks + + undef_method :inherited_with_hacks + undef_method :inherited_without_hacks + end + + refute_respond_to Class, :inherited_with_hacks + refute_respond_to Class, :inherited_without_hacks + end + + def test_inherited_hook_plays_nice_with_others + with_overridden_include do + assert_throws :inherited_hook do + Class.new FakeNamedTest + end + end + end +end + +class TestMinitestRunner < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_class_runnables + @assertion_count = 0 + + tc = Class.new(Minitest::Test) + + assert_equal 1, Minitest::Test.runnables.size + assert_equal [tc], Minitest::Test.runnables + end + + def test_run_test + @tu = + Class.new FakeNamedTest do + attr_reader :foo + + def run + @foo = "hi mom!" + r = super + @foo = "okay" + + r + end + + def test_something + assert_equal "hi mom!", foo + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_error + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E. + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_error: + RuntimeError: unhandled exception + FILE:LINE:in \`test_error\' + + 2 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_error_teardown + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def teardown + raise "unhandled exception" + end + end + + expected = clean <<-EOM + E + + Finished in 0.00 + + 1) Error: + FakeNamedTestXX#test_something: + RuntimeError: unhandled exception + FILE:LINE:in \`teardown\' + + 1 runs, 1 assertions, 0 failures, 1 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_failing + setup_basic_tu + + expected = clean <<-EOM + F. + + Finished in 0.00 + + 1) Failure: + FakeNamedTestXX#test_failure [FILE:LINE]: + Expected false to be truthy. + + 2 runs, 2 assertions, 1 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def setup_basic_tu + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_failure + assert false + end + end + end + + def test_run_failing_filtered + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--name /some|thing/ --seed 42] + end + + def assert_filtering filter, name, expected, a = false + args = %W[--#{filter} #{name} --seed 42] + + alpha = Class.new FakeNamedTest do + define_method :test_something do + assert a + end + end + Object.const_set(:Alpha, alpha) + + beta = Class.new FakeNamedTest do + define_method :test_something do + assert true + end + end + Object.const_set(:Beta, beta) + + @tus = [alpha, beta] + + assert_report expected, args + ensure + Object.send :remove_const, :Alpha + Object.send :remove_const, :Beta + end + + def test_run_filtered_including_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "/Beta#test_something/", expected + end + + def test_run_filtered_including_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "Beta#test_something", expected + end + + def test_run_filtered_string_method_only + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "name", "test_something", expected, :pass + end + + def test_run_failing_excluded + setup_basic_tu + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected, %w[--exclude /failure/ --seed 42] + end + + def test_run_filtered_excluding_suite_name + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "/Alpha#test_something/", expected + end + + def test_run_filtered_excluding_suite_name_string + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "Alpha#test_something", expected + end + + def test_run_filtered_excluding_string_method_only + expected = clean <<-EOM + + + Finished in 0.00 + + 0 runs, 0 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_filtering "exclude", "test_something", expected, :pass + end + + def test_run_passing + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + end + + expected = clean <<-EOM + . + + Finished in 0.00 + + 1 runs, 1 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + def test_run_skip + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + S. + + Finished in 0.00 + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + + You have skipped tests. Run with --verbose for details. + EOM + + restore_env do + assert_report expected + end + end + + def test_run_skip_verbose + @tu = + Class.new FakeNamedTest do + def test_something + assert true + end + + def test_skip + skip "not yet" + end + end + + expected = clean <<-EOM + FakeNamedTestXX#test_skip = 0.00 s = S + FakeNamedTestXX#test_something = 0.00 s = . + + Finished in 0.00 + + 1) Skipped: + FakeNamedTestXX#test_skip [FILE:LINE]: + not yet + + 2 runs, 1 assertions, 0 failures, 0 errors, 1 skips + EOM + + assert_report expected, %w[--seed 42 --verbose] + end + + def test_run_with_other_runner + @tu = + Class.new FakeNamedTest do + def self.run reporter, options = {} + @reporter = reporter + before_my_suite + super + end + + def self.name; "wacky!" end + + def self.before_my_suite + @reporter.io.puts "Running #{self.name} tests" + @@foo = 1 + end + + def test_something + assert_equal 1, @@foo + end + + def test_something_else + assert_equal 1, @@foo + end + end + + expected = clean <<-EOM + Running wacky! tests + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report expected + end + + require "monitor" + + class Latch + def initialize count = 1 + @count = count + @lock = Monitor.new + @cv = @lock.new_cond + end + + def release + @lock.synchronize do + @count -= 1 if @count > 0 + @cv.broadcast if @count == 0 + end + end + + def await + @lock.synchronize { @cv.wait_while { @count > 0 } } + end + end + + def test_run_parallel + test_count = 2 + test_latch = Latch.new test_count + wait_latch = Latch.new test_count + main_latch = Latch.new + + thread = Thread.new { + Thread.current.abort_on_exception = true + + # This latch waits until both test latches have been released. Both + # latches can't be released unless done in separate threads because + # `main_latch` keeps the test method from finishing. + test_latch.await + main_latch.release + } + + @tu = + Class.new FakeNamedTest do + parallelize_me! + + test_count.times do |i| + define_method :"test_wait_on_main_thread_#{i}" do + test_latch.release + + # This latch blocks until the "main thread" releases it. The main + # thread can't release this latch until both test latches have + # been released. This forces the latches to be released in separate + # threads. + main_latch.await + assert true + end + end + end + + expected = clean <<-EOM + .. + + Finished in 0.00 + + 2 runs, 2 assertions, 0 failures, 0 errors, 0 skips + EOM + + assert_report(expected) do |reporter| + reporter.extend(Module.new { + define_method("record") do |result| + super(result) + wait_latch.release + end + + define_method("report") do + wait_latch.await + super() + end + }) + end + assert thread.join + end +end + +class TestMinitestUnitOrder < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def test_before_setup + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :setup do + super() + call_order << :setup + end + + define_method :before_setup do + call_order << :before_setup + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_setup, :setup] + assert_equal expected, call_order + end + + def test_after_teardown + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :teardown do + super() + call_order << :teardown + end + + define_method :after_teardown do + call_order << :after_teardown + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_all_teardowns_are_guaranteed_to_run + call_order = [] + @tu = + Class.new FakeNamedTest do + define_method :after_teardown do + super() + call_order << :after_teardown + raise + end + + define_method :teardown do + super() + call_order << :teardown + raise + end + + define_method :before_teardown do + super() + call_order << :before_teardown + raise + end + + def test_omg; assert true; end + end + + run_tu_with_fresh_reporter + + expected = [:before_teardown, :teardown, :after_teardown] + assert_equal expected, call_order + end + + def test_setup_and_teardown_survive_inheritance + call_order = [] + + @tu = Class.new FakeNamedTest do + define_method :setup do + call_order << :setup_method + end + + define_method :teardown do + call_order << :teardown_method + end + + define_method :test_something do + call_order << :test + end + end + + run_tu_with_fresh_reporter + + @tu = Class.new @tu + run_tu_with_fresh_reporter + + # Once for the parent class, once for the child + expected = [:setup_method, :test, :teardown_method] * 2 + + assert_equal expected, call_order + end +end + +class TestMinitestRunnable < Minitest::Test + def setup_marshal klass + tc = klass.new "whatever" + tc.assertions = 42 + tc.failures << "a failure" + + yield tc if block_given? + + def tc.setup + @blah = "blah" + end + tc.setup + + @tc = Minitest::Result.from tc + end + + def assert_marshal expected_ivars + new_tc = Marshal.load Marshal.dump @tc + + ivars = new_tc.instance_variables.map(&:to_s).sort + assert_equal expected_ivars, ivars + assert_equal "whatever", new_tc.name + assert_equal 42, new_tc.assertions + assert_equal ["a failure"], new_tc.failures + + yield new_tc if block_given? + end + + def test_marshal + setup_marshal Minitest::Runnable + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] + end + + def test_spec_marshal + klass = describe("whatever") { it("passes") { assert true } } + rm = klass.runnable_methods.first + + # Run the test + @tc = klass.new(rm).run + + assert_kind_of Minitest::Result, @tc + + # Pass it over the wire + over_the_wire = Marshal.load Marshal.dump @tc + + assert_equal @tc.time, over_the_wire.time + assert_equal @tc.name, over_the_wire.name + assert_equal @tc.assertions, over_the_wire.assertions + assert_equal @tc.failures, over_the_wire.failures + assert_equal @tc.klass, over_the_wire.klass + end + + def test_spec_marshal_with_exception + klass = describe("whatever") { it("passes") { raise Class.new(StandardError)} } + rm = klass.runnable_methods.first + + # Run the test + @tc = klass.new(rm).run + + assert_kind_of Minitest::Result, @tc + + # Pass it over the wire + over_the_wire = Marshal.load Marshal.dump @tc + + assert_equal @tc.time, over_the_wire.time + assert_equal @tc.name, over_the_wire.name + assert_equal @tc.assertions, over_the_wire.assertions + assert_equal @tc.failures, over_the_wire.failures + assert_equal @tc.klass, over_the_wire.klass + end +end + +class TestMinitestTest < TestMinitestRunnable + def test_dup + setup_marshal Minitest::Test do |tc| + tc.time = 3.14 + end + + assert_marshal %w[@NAME @assertions @failures @klass @source_location @time] do |new_tc| + assert_in_epsilon 3.14, new_tc.time + end + end +end + +class TestMinitestUnitTestCase < Minitest::Test + # do not call parallelize_me! - teardown accesses @tc._assertions + # which is not threadsafe. Nearly every method in here is an + # assertion test so it isn't worth splitting it out further. + + RUBY18 = !defined? Encoding + + def setup + super + + Minitest::Test.reset + + @tc = Minitest::Test.new "fake tc" + @zomg = "zomg ponies!" + @assertion_count = 1 + end + + def teardown + assert_equal(@assertion_count, @tc.assertions, + "expected #{@assertion_count} assertions to be fired during the test, not #{@tc.assertions}") if @tc.passed? + end + + def non_verbose + orig_verbose = $VERBOSE + $VERBOSE = false + + yield + ensure + $VERBOSE = orig_verbose + end + + def sample_test_case(rand) + srand rand + Class.new FakeNamedTest do + 100.times do |i| + define_method("test_#{i}") { assert true } + end + end.runnable_methods + end + + # srand varies with OS + def test_runnable_methods_random + @assertion_count = 0 + + random_tests_1 = sample_test_case 42 + random_tests_2 = sample_test_case 42 + random_tests_3 = sample_test_case 1_000 + + assert_equal random_tests_1, random_tests_2 + refute_equal random_tests_1, random_tests_3 + end + + def test_runnable_methods_sorted + @assertion_count = 0 + + sample_test_case = Class.new FakeNamedTest do + def self.test_order; :sorted end + def test_test3; assert "does not matter" end + def test_test2; assert "does not matter" end + def test_test1; assert "does not matter" end + end + + expected = %w[test_test1 test_test2 test_test3] + assert_equal expected, sample_test_case.runnable_methods + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_sets_test_order_alpha + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + + assert_equal :alpha, shitty_test_case.test_order + end + + def test_i_suck_and_my_tests_are_order_dependent_bang_does_not_warn + @assertion_count = 0 + + shitty_test_case = Class.new FakeNamedTest + + def shitty_test_case.test_order; :lol end + + assert_silent do + shitty_test_case.i_suck_and_my_tests_are_order_dependent! + end + end + + def test_autorun_does_not_affect_fork_success_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork {}) + assert_equal true, $?.success? + end + + def test_autorun_does_not_affect_fork_exit_status + @assertion_count = 0 + skip "windows doesn't have skip" unless Process.respond_to?(:fork) + Process.waitpid(fork { exit 42 }) + assert_equal 42, $?.exitstatus + end +end + +class TestMinitestGuard < Minitest::Test + parallelize_me! + + def test_mri_eh + assert self.class.mri? "ruby blah" + assert self.mri? "ruby blah" + end + + def test_jruby_eh + assert self.class.jruby? "java" + assert self.jruby? "java" + end + + def test_rubinius_eh + assert_output "", /DEPRECATED/ do + assert self.class.rubinius? "rbx" + end + assert_output "", /DEPRECATED/ do + assert self.rubinius? "rbx" + end + end + + def test_maglev_eh + assert_output "", /DEPRECATED/ do + assert self.class.maglev? "maglev" + end + assert_output "", /DEPRECATED/ do + assert self.maglev? "maglev" + end + end + + def test_osx_eh + assert self.class.osx? "darwin" + assert self.osx? "darwin" + end + + def test_windows_eh + assert self.class.windows? "mswin" + assert self.windows? "mswin" + end +end + +class TestMinitestUnitRecording < MetaMetaMetaTestCase + # do not parallelize this suite... it just can't handle it. + + def assert_run_record *expected, &block + @tu = Class.new FakeNamedTest, &block + + run_tu_with_fresh_reporter + + recorded = first_reporter.results.map(&:failures).flatten.map { |f| f.error.class } + + assert_equal expected, recorded + end + + def test_run_with_bogus_reporter + # https://github.com/seattlerb/minitest/issues/659 + # TODO: remove test for minitest 6 + @tu = Class.new FakeNamedTest do + def test_method + assert true + end + end + + bogus_reporter = Class.new do # doesn't subclass AbstractReporter + def start; @success = false; end + # def prerecord klass, name; end # doesn't define full API + def record result; @success = true; end + def report; end + def passed?; end + def results; end + def success?; @success; end + end.new + + self.reporter = Minitest::CompositeReporter.new + reporter << bogus_reporter + + Minitest::Runnable.runnables.delete @tu + + @tu.run reporter, {} + + assert_predicate bogus_reporter, :success? + end + + def test_record_passing + assert_run_record do + def test_method + assert true + end + end + end + + def test_record_failing + assert_run_record Minitest::Assertion do + def test_method + assert false + end + end + end + + def test_record_error + assert_run_record RuntimeError do + def test_method + raise "unhandled exception" + end + end + end + + def test_record_error_teardown + assert_run_record RuntimeError do + def test_method + assert true + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_record_error_in_test_and_teardown + assert_run_record AnError, RuntimeError do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + end + + def test_to_s_error_in_test_and_teardown + @tu = Class.new FakeNamedTest do + def test_method + raise AnError + end + + def teardown + raise "unhandled exception" + end + end + + run_tu_with_fresh_reporter + + exp = clean " + Error: + FakeNamedTestXX#test_method: + AnError: AnError + FILE:LINE:in `test_method' + + Error: + FakeNamedTestXX#test_method: + RuntimeError: unhandled exception + FILE:LINE:in `teardown' + " + + assert_equal exp.strip, normalize_output(first_reporter.results.first.to_s).strip + end + + def test_record_skip + assert_run_record Minitest::Skip do + def test_method + skip "not yet" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/ARCHITECTURE.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/ARCHITECTURE.md new file mode 100644 index 0000000..b7c869c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/ARCHITECTURE.md @@ -0,0 +1,102 @@ +# Molinillo Architecture + +At the highest level, Molinillo is a dependency resolution algorithm. +You hand the `Resolver` a list of dependencies and a 'locking' `DependencyGraph`, and you get a resulting dependency graph out of that. +In order to guarantee that the list of dependencies is properly resolved, however, an algorithm is required that is smarter than just walking the list of dependencies and activating each, and its own dependencies, in turn. + +## Backtracking + +At the heart of Molinillo is a [backtracking](http://en.wikipedia.org/wiki/Backtracking) algorithm with [forward checking](http://en.wikipedia.org/wiki/Look-ahead_(backtracking)). +Essentially, the resolution process keeps track of two types of states (dependency and possibility) in a stack. +If that stack is ever exhausted, resolution was impossible. +New states are pushed onto the stack for every dependency, and every time a dependency is successfully 'activated' a new state is pushed onto the stack that represents that activation. +This stack-based approach is used because backtracking (also known as *unwinding*) becomes as simple as popping a state off that stack. + +### Walkthrough + +1. The client initializes a `Resolver` with a `SpecificationProvider` and `UI` +2. The client calls `resolve` with an array of user-requested dependencies and an optional 'locking' `DependencyGraph` +3. The `Resolver` creates a new `Resolution` with those four user-specified parameters and calls `resolve` on it +4. The `Resolution` creates an `initial_state`, which takes the user-requested dependencies and puts them into a `DependencyState` + - In the process of creating the state, the `SpecificationProvider` is asked to sort the dependencies and return all the `possibilities` for the `initial_requirement` (taking into account whether the dependency is `locked`). These possibilities are then grouped into `PossibilitySet`s, with each set representing a group of versions for the dependency which share the same sub-dependency requirements and are contiguous + - A `DependencyGraph` is created that has all of these requirements point to `root_vertices` +5. The resolution process now enters its main loop, which continues as long as there is a current `state` to process, and the current state has requirements left to process +6. `UI#indicate_progress` is called to allow the client to report progress +7. If the current state is a `DependencyState`, we have it pop off a `PossibilityState` that encapsulates a `PossibilitySet` for that dependency +8. Process the topmost state on the stack +9. If there is a non-empty `PossibilitySet` for the state, `attempt_to_activate` it (jump to #11) +10. If there is no non-empty `PossibilitySet` for the state, `create_conflict` if the state is a `PossibilityState`, and then `unwind_for_conflict` + - `create_conflict` builds a `Conflict` object, with details of all of the requirements for the given dependency, and adds it to a hash of conflicts stored on the `state`, indexed by the name of the dependency + - `unwind_for_conflict` loops through all the conflicts on the `state`, looking for a state it can rewind to that might avoid that conflict. If no such state exists, it raises a VersionConflict error. Otherwise, it takes the most recent state with a chance to avoid the current conflicts and rewinds to it (go to #6) +11. Check if there is an existing vertex in the `activated` dependency graph for the dependency this state's `requirement` relates to +12. If there is no existing vertex in the `activated` dependency graph for the dependency this state's `requirement` relates to, `activate_new_spec`. This creates a new vertex in the `activated` dependency graph, with it's payload set to the possibility's `PossibilitySet`. It also pushes a new `DependencyState`, with the now-activated `PossibilitySet`'s own dependencies. Go to #6 +13. If there is an existing, `activated` vertex for the dependency, `attempt_to_filter_existing_spec` + - This filters the contents of the existing vertex's `PossibilitySet` by the current state's `requirement` + - If any possibilities remain within the `PossibilitySet`, it updates the activated vertex's payload with the new, filtered state and pushes a new `DependencyState` + - If no possibilities remain within the `PossibilitySet` after filtering, or if the current state's `PossibilitySet` had a different set of sub-dependecy requirements to the existing vertex's `PossibilitySet`, `create_conflict` and `unwind_for_conflict`, back to the last `DependencyState` that has a chance to not generate a conflict. Go to #6 +15. Terminate with the topmost state's dependency graph when there are no more requirements left +16. For each vertex with a payload of allowable versions for this resolution (i.e., a `PossibilitySet`), pick a single specific version. + +### Optimal unwinding + +For our backtracking algorithm to be efficient as well as correct, we need to +unwind efficiently after a conflict is encountered. Unwind too far and we'll +miss valid resolutions - once we unwind passed a DependencyState we can never +get there again. Unwind too little and resolution will be extremely slow - we'll +repeatedly hit the same conflict, processing many unnecessary iterations before +getting to a branch that avoids it. + +To unwind the optimal amount, we consider the current conflict, along with all +the previous unwinds that have determined our current state. + +1. First, consider the current conflict as follows: + - Find the earliest (lowest index) set of requirements which combine to cause + the conflict. Any non-binding requirements can be ignored, as removing them + would not resolve the current onflict + - For each binding requirement, find all the alternative possibilities that + would relax the requirement: + - the requirement's DependencyState might have alternative possibilities + that would satisfy all the other requirements + - the parent of the requirement might have alternative possibilities that + would prevent the requirement existing + - the parent of the parent of the requirement might have alternative + possibilities that would prevent the parent, and thus the requirement, + from existing + - etc., etc. + - Group all of the above possibilities into an array, and pick the one with + the highest index (i.e., the smallest rewind) as our candidate rewind +2. Next, consider any previous unwinds that were not executed (because a +different, smaller unwind was chosen instead): + - Ignore any previously unused unwinds that would now unwind further than the + highest index found in (1), if any + - For the remaining unused unwinds, check whether the unwind has a chance of + preventing us encountering the current conflict. For this to be the case, the + unwind must have been rejected in favour of an unwind to one of the states in + the current conflict's requirement tree + - If any such unwinds exist, use the one with the highest index (smallest + unwind) instead of the one found in (1) +3a. If no possible unwind was found in (1) and (2), raise a VersionConflict +error as resolution is not possible. +3b. Filter the state that we're unwinding to, in order to remove any +possibilities we know will result in a conflict. Consider all possible unwinds +to the chosen state (there may be several, amasssed from previous unused +unwinds for different conflicts) when doing this filtering - only +possibilities that will certainly result in *all* of those conflicts can be +filtered out as having no chance of resolution +4. Update the list of unused unwinds: + - Add all possible unwinds for the current conflict + - Update the `requirements_unwound_to_instead` attribute on any considered + unwind that was only rejected because it had a lower index than the chosen one + - Remove all unwinds to a state greater than or equal to the chosen unwind +5. Go to #6 in the main loop + +## Specification Provider + +The `SpecificationProvider` module forms the basis for the key integration point for a client library with Molinillo. +Its methods convert the client's domain-specific model objects into concepts the resolver understands: + +- Nested dependencies +- Names +- Requirement satisfaction +- Finding specifications (known internally as `possibilities`) +- Sorting dependencies (for the sake of reasonable resolver performance) diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/CHANGELOG.md new file mode 100644 index 0000000..b66c905 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/CHANGELOG.md @@ -0,0 +1,438 @@ +# Molinillo Changelog + +## 0.6.6 (2018-08-07) + +##### Enhancements + +* Improve performance of `Vertex#path_to?`. + [Samuel Giddins](https://github.com/segiddins) + +* Allow customization of string used to say that a version conflict has occurred + for a particular name by passing in the `:incompatible_version_message_for_conflict` + key when constructing a version conflict message with trees. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.6.5 (2018-03-22) + +##### Enhancements + +* Improve performance of recursive vertex methods. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.6.4 (2017-10-29) + +##### Enhancements + +* Reduce memory usage during resolution by making the `Vertex#requirements` + array unique. + [Grey Baker](https://github.com/greysteil) + [Jan Krutisch](https://github.com/halfbyte) + +##### Bug Fixes + +* None. + + +## 0.6.3 (2017-09-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle the case where an unwind occurs to a requirement that directly caused + the current conflict but could also have been unwound to directly from + previous conflicts. In this case, filtering must not remove any possibilities + that could have avoided the previous conflicts (even if they would not avoid + the current one). + [Grey Baker](https://github.com/greysteil) + + +## 0.6.2 (2017-08-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Insist each PossibilitySet contains contiguous versions. Fixes a regression + where an older dependency version with identical sub-dependencies to the + latest version may be preferred over the second-latest version. + [Grey Baker](https://github.com/greysteil) + + +## 0.6.1 (2017-08-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Allow the set of dependencies for a given possibility to change over time, + fixing a regression in 0.6.0. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.0 (2017-07-27) + +##### Breaking + +* Objects returned by `dependencies_for` and passed to `resolve` must properly implement + both `==` and `eql?`, such that they return `true` when they exhibit the same behavior in + `requirement_satisfied_by?`. + +##### Enhancements + +* Speed up dependency resolution by considering multiple possible versions of a + dependency at once, grouped by sub-dependencies. Groups are then filtered as + additional requirements are introduced. If a group's sub-dependencies cause + conflicts the entire group can be discarded, which reduces the number of + possibilities that have to be tested to find a resolution. + [Grey Baker](https://github.com/greysteil) + [Samuel Giddins](https://github.com/segiddins) + [#69](https://github.com/CocoaPods/Molinillo/pull/69) + +* Check for locked requirements when generating a new state's possibilities + array, and reduce possibilities set accordingly. Reduces scope for erroneous + VersionConflict errors. + [Grey Baker](https://github.com/greysteil) + [#67](https://github.com/CocoaPods/Molinillo/pull/67) + +* Add `VersionConflict#message_with_trees` for consumers who prefer a more verbose + conflict message that includes full requirement trees for all conflicts. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Improve unwinding by considering previous conflicts for the same dependency + when deciding which state to unwind to. Previously, prior conflicts were + stored in a hash indexed by their name, with only the most recent conflict + stored for each dependency. With this fix, Molinillo can resolve anything + that's thrown at it. 🎉 + [Grey Baker](https://github.com/greysteil) + [#73](https://github.com/CocoaPods/Molinillo/pull/73) + +* Only raise CircularDependency errors if they prevent resolution. + [Ian Young](https://github.com/iangreenleaf) + [Grey Baker](https://github.com/greysteil) + [#78](https://github.com/CocoaPods/Molinillo/pull/78) + +* Consider additional (binding) requirements that caused a conflict when + determining which state to unwind to. Previously, in some cases Molinillo + would erroneously throw a VersionConflict error if multiple requirements + combined to cause a conflict. + [Grey Baker](https://github.com/greysteil) + [#72](https://github.com/CocoaPods/Molinillo/pull/72) + +* Consider previous conflicts when determining the state to unwind to. If a + previous conflict, for a different dependency, is the reason we ended up with + the current conflict, then unwinding to a state that would not have caused + that conflict could prevent the current one, too. + [Grey Baker](https://github.com/greysteil) + [#72](https://github.com/CocoaPods/Molinillo/pull/72) + + +## 0.5.7 (2017-03-03) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Keep a stack of parents per requirement, so unwinding past a swap point that + updated the parent of the requirement works. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5425](https://github.com/bundler/bundler/issues/5425) + + +## 0.5.6 (2017-02-08) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Only reset the parent of a requirement after swapping when its original parent + was the same vertex being swapped. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5359](https://github.com/bundler/bundler/issues/5359) + [bundler#5362](https://github.com/bundler/bundler/issues/5362) + + +## 0.5.5 (2017-01-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Only remove requirements from the to-be-resolved list if there are no + activated vertices depending upon them after swapping. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5294](https://github.com/bundler/bundler/issues/5294) + + +## 0.5.4 (2016-11-14) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix unwinding when both sides of a conflict have a common parent + requirement. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5154](https://github.com/bundler/bundler/issues/5154) + + +## 0.5.3 (2016-10-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a regression in v0.5.2 that could cause resolution to fail after + swapping, because stale dependencies would still be in the requirements + list. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/Molinillo/issues/48) + +* Rename `Action.name` to `Action.action_name` to avoid overriding + `Module.name`. + [Samuel Giddins](https://github.com/segiddins) + [#50](https://github.com/CocoaPods/Molinillo/issues/50) + + +## 0.5.2 (2016-10-24) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a bug where `Resolution#parent_of` would return the incorrect parent for + a dependency after swapping had occurred, resulting in resolution failing. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5059](https://github.com/bundler/bundler/issues/5059) + + +## 0.5.1 (2016-09-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a bug where `Resolution#parent_of` would return the incorrect parent for + a dependency, resulting in resolution failing. + [Samuel Giddins](https://github.com/segiddins) + [bundler#4961](https://github.com/bundler/bundler/issues/4961) + + +## 0.5.0 (2016-06-14) + +##### Enhancements + +* Add an operation log to `DependencyGraph` to eliminate the need for graph + copies during dependency resolution, resulting in a 3-100x speedup and + reduction in allocations. + [Samuel Giddins](https://github.com/segiddins) + [bundler#4376](https://github.com/bundler/bundler/issues/4376) + +* Remove all metaprogramming to reduce array allocation overhead and improve + discoverability. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.4.5 (2016-04-30) + +##### Enhancements + +* For performance, don't needlessly dup objects in + `Resolution#push_state_for_requirements`. + [Joe Rafaniello](https://github.com/jrafanie) + +##### Bug Fixes + +* Recursively prune requirements when removing an orphan after swapping. + [Daniel DeLeo](https://github.com/danielsdeleo) + [berkshelf/solve#57](https://github.com/berkshelf/solve/issues/57) + + +## 0.4.4 (2016-02-28) + +##### Bug Fixes + +* Fix mutating a frozen string in `NoSuchDependencyError#message`. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.4.3 (2016-02-18) + +##### Enhancements + +* Add frozen string literal comments to all ruby files. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Prune the dependency list when removing an orphan after swapping. + [Samuel Giddins](https://github.com/segiddins) + [bundler/bundler#4276](https://github.com/bundler/bundler/issues/4276) + + +## 0.4.2 (2016-01-30) + +##### Bug Fixes + +* Detaching a vertex correctly removes it from the list of successors of its + predecessors. + [Samuel Giddins](https://github.com/segiddins) + +* Vertices orphaned after swapping dependencies are properly cleaned up from the + graph of activated specs. + [Samuel Giddins](https://github.com/segiddins) + [bundler/bundler#4198](https://github.com/bundler/bundler/issues/4198) + + +## 0.4.1 (2015-12-30) + +##### Enhancements + +* Ensure every API is 100% documented. + [Samuel Giddins](https://github.com/segiddins) + [#22](https://github.com/CocoaPods/Molinillo/issues/22) + + +## 0.4.0 (2015-07-27) + +##### API Breaking Changes + +* The `DependencyGraph` no longer treats root vertices specially, nor does it + maintain a direct reference to `edges`. Additionally, `Vertex` no longer + has a reference to its parent graph. + +##### Enhancements + +* Resolution has been sped up by 25x in some pathological cases, and in general + recursive operations on a `DependencyGraph` or `Vertex` are now `O(n)`. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3803](https://github.com/bundler/bundler/issues/3803) + +* Re-sorting of dependencies is skipped when the unresolved dependency list has + not changed, speeding up resolution of fully locked graphs. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.3.1 (2015-07-24) + +##### Enhancements + +* Add `Conflict#activated_by_name` to allow even richer version conflict + messages. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure `Conflict#requirement_trees` is exhaustive. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3860](https://github.com/bundler/bundler/issues/3860) + + +## 0.3.0 (2015-06-29) + +##### Enhancements + +* Add the ability to optionally skip dependencies that have no possibilities. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.3 (2015-03-28) + +##### Bug Fixes + +* Silence a silly MRI warning about declaring private attributes. + [Piotr Szotkowski](https://github.com/chastell) + [Bundler#3516](https://github.com/bundler/bundler/issues/3516) + [Bundler#3525](https://github.com/bundler/bundler/issues/3525) + + +## 0.2.2 (2015-03-27) + +##### Bug Fixes + +* Use an ivar in `DependencyGraph#initialize_copy` to silence an MRI warning. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3516](https://github.com/bundler/bundler/issues/3516) + + +## 0.2.1 (2015-02-21) + +* Allow resolving some pathological cases where the backjumping algorithm would + skip over a valid possibility. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.0 (2014-12-25) + +* Institute stricter forward checking by backjumping to the source of a + conflict, even if that source comes from the existing spec. This further + improves performance in highly conflicting situations when sorting heuristics + prove misleading. + [Samuel Giddins](https://github.com/segiddins) + [Smit Shah](https://github.com/Who828) + +* Add support for topologically sorting a dependency graph's vertices. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.1.2 (2014-11-19) + +##### Enhancements + +* Improve performance in highly conflicting situations by backtracking more than + one state at a time. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure that recursive invocations of `detach_vertex_named` don't lead to + messaging `nil`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#2805](https://github.com/CocoaPods/CocoaPods/issues/2805) + +## 0.1.1 (2014-11-06) + +* Ensure that an unwanted exception is not raised when an error occurs before + the initial state has been pushed upon the stack. + [Samuel Giddins](https://github.com/segiddins) + +## 0.1.0 (2014-10-26) + +* Initial release. + [Samuel Giddins](https://github.com/segiddins) diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/LICENSE b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/LICENSE new file mode 100644 index 0000000..01feffa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/LICENSE @@ -0,0 +1,9 @@ +This project is licensed under the MIT license. + +Copyright (c) 2014 Samuel E. Giddins segiddins@segiddins.me + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/README.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/README.md new file mode 100644 index 0000000..500caf3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/README.md @@ -0,0 +1,45 @@ +# Molinillo + +[![Build Status](https://img.shields.io/travis/CocoaPods/Molinillo/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Molinillo) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/Molinillo.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Molinillo) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/Molinillo.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Molinillo) + +A generic dependency-resolution implementation. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'molinillo', :git => 'https://github.com/CocoaPods/Molinillo' +``` + +And then execute: + +```bash +$ bundle install +``` + +Or install it yourself as: + +```bash +$ gem install molinillo +``` + +## Usage + +See the [ARCHITECTURE](ARCHITECTURE.md) file for an overview and look at the test suite for example usage. Better documentation and examples are +forthcoming. + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create a pull request + +## The Name + +[Molinillo](http://en.wikipedia.org/wiki/Molinillo_(whisk)) is a special whisk used in Mexico in the preparation of beverages such as hot chocolate. +Much like a dependency resolver, a molinillo helps take a list of ingredients and turn it into a delicious concoction! diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo.rb new file mode 100644 index 0000000..09d1641 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require 'molinillo/compatibility' +require 'molinillo/gem_metadata' +require 'molinillo/errors' +require 'molinillo/resolver' +require 'molinillo/modules/ui' +require 'molinillo/modules/specification_provider' + +# Molinillo is a generic dependency resolution algorithm. +module Molinillo +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/compatibility.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/compatibility.rb new file mode 100644 index 0000000..32d5d01 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/compatibility.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module Molinillo + # Hacks needed for old Ruby versions. + module Compatibility + module_function + + if [].respond_to?(:flat_map) + # Flat map + # @param [Enumerable] enum an enumerable object + # @block the block to flat-map with + # @return The enum, flat-mapped + def flat_map(enum, &blk) + enum.flat_map(&blk) + end + else + # Flat map + # @param [Enumerable] enum an enumerable object + # @block the block to flat-map with + # @return The enum, flat-mapped + def flat_map(enum, &blk) + enum.map(&blk).flatten(1) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/resolution_state.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/resolution_state.rb new file mode 100644 index 0000000..b083072 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/resolution_state.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module Molinillo + # @!visibility private + module Delegates + # Delegates all {Molinillo::ResolutionState} methods to a `#state` property. + module ResolutionState + # (see Molinillo::ResolutionState#name) + def name + current_state = state || Molinillo::ResolutionState.empty + current_state.name + end + + # (see Molinillo::ResolutionState#requirements) + def requirements + current_state = state || Molinillo::ResolutionState.empty + current_state.requirements + end + + # (see Molinillo::ResolutionState#activated) + def activated + current_state = state || Molinillo::ResolutionState.empty + current_state.activated + end + + # (see Molinillo::ResolutionState#requirement) + def requirement + current_state = state || Molinillo::ResolutionState.empty + current_state.requirement + end + + # (see Molinillo::ResolutionState#possibilities) + def possibilities + current_state = state || Molinillo::ResolutionState.empty + current_state.possibilities + end + + # (see Molinillo::ResolutionState#depth) + def depth + current_state = state || Molinillo::ResolutionState.empty + current_state.depth + end + + # (see Molinillo::ResolutionState#conflicts) + def conflicts + current_state = state || Molinillo::ResolutionState.empty + current_state.conflicts + end + + # (see Molinillo::ResolutionState#unused_unwind_options) + def unused_unwind_options + current_state = state || Molinillo::ResolutionState.empty + current_state.unused_unwind_options + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/specification_provider.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/specification_provider.rb new file mode 100644 index 0000000..95b581f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/delegates/specification_provider.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +module Molinillo + module Delegates + # Delegates all {Molinillo::SpecificationProvider} methods to a + # `#specification_provider` property. + module SpecificationProvider + # (see Molinillo::SpecificationProvider#search_for) + def search_for(dependency) + with_no_such_dependency_error_handling do + specification_provider.search_for(dependency) + end + end + + # (see Molinillo::SpecificationProvider#dependencies_for) + def dependencies_for(specification) + with_no_such_dependency_error_handling do + specification_provider.dependencies_for(specification) + end + end + + # (see Molinillo::SpecificationProvider#requirement_satisfied_by?) + def requirement_satisfied_by?(requirement, activated, spec) + with_no_such_dependency_error_handling do + specification_provider.requirement_satisfied_by?(requirement, activated, spec) + end + end + + # (see Molinillo::SpecificationProvider#name_for) + def name_for(dependency) + with_no_such_dependency_error_handling do + specification_provider.name_for(dependency) + end + end + + # (see Molinillo::SpecificationProvider#name_for_explicit_dependency_source) + def name_for_explicit_dependency_source + with_no_such_dependency_error_handling do + specification_provider.name_for_explicit_dependency_source + end + end + + # (see Molinillo::SpecificationProvider#name_for_locking_dependency_source) + def name_for_locking_dependency_source + with_no_such_dependency_error_handling do + specification_provider.name_for_locking_dependency_source + end + end + + # (see Molinillo::SpecificationProvider#sort_dependencies) + def sort_dependencies(dependencies, activated, conflicts) + with_no_such_dependency_error_handling do + specification_provider.sort_dependencies(dependencies, activated, conflicts) + end + end + + # (see Molinillo::SpecificationProvider#allow_missing?) + def allow_missing?(dependency) + with_no_such_dependency_error_handling do + specification_provider.allow_missing?(dependency) + end + end + + private + + # Ensures any raised {NoSuchDependencyError} has its + # {NoSuchDependencyError#required_by} set. + # @yield + def with_no_such_dependency_error_handling + yield + rescue NoSuchDependencyError => error + if state + vertex = activated.vertex_named(name_for(error.dependency)) + error.required_by += vertex.incoming_edges.map { |e| e.origin.name } + error.required_by << name_for_explicit_dependency_source unless vertex.explicit_requirements.empty? + end + raise + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph.rb new file mode 100644 index 0000000..746d80e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph.rb @@ -0,0 +1,223 @@ +# frozen_string_literal: true + +require 'set' +require 'tsort' + +require 'molinillo/dependency_graph/log' +require 'molinillo/dependency_graph/vertex' + +module Molinillo + # A directed acyclic graph that is tuned to hold named dependencies + class DependencyGraph + include Enumerable + + # Enumerates through the vertices of the graph. + # @return [Array] The graph's vertices. + def each + return vertices.values.each unless block_given? + vertices.values.each { |v| yield v } + end + + include TSort + + # @!visibility private + alias tsort_each_node each + + # @!visibility private + def tsort_each_child(vertex, &block) + vertex.successors.each(&block) + end + + # Topologically sorts the given vertices. + # @param [Enumerable] vertices the vertices to be sorted, which must + # all belong to the same graph. + # @return [Array] The sorted vertices. + def self.tsort(vertices) + TSort.tsort( + lambda { |b| vertices.each(&b) }, + lambda { |v, &b| (v.successors & vertices).each(&b) } + ) + end + + # A directed edge of a {DependencyGraph} + # @attr [Vertex] origin The origin of the directed edge + # @attr [Vertex] destination The destination of the directed edge + # @attr [Object] requirement The requirement the directed edge represents + Edge = Struct.new(:origin, :destination, :requirement) + + # @return [{String => Vertex}] the vertices of the dependency graph, keyed + # by {Vertex#name} + attr_reader :vertices + + # @return [Log] the op log for this graph + attr_reader :log + + # Initializes an empty dependency graph + def initialize + @vertices = {} + @log = Log.new + end + + # Tags the current state of the dependency as the given tag + # @param [Object] tag an opaque tag for the current state of the graph + # @return [Void] + def tag(tag) + log.tag(self, tag) + end + + # Rewinds the graph to the state tagged as `tag` + # @param [Object] tag the tag to rewind to + # @return [Void] + def rewind_to(tag) + log.rewind_to(self, tag) + end + + # Initializes a copy of a {DependencyGraph}, ensuring that all {#vertices} + # are properly copied. + # @param [DependencyGraph] other the graph to copy. + def initialize_copy(other) + super + @vertices = {} + @log = other.log.dup + traverse = lambda do |new_v, old_v| + return if new_v.outgoing_edges.size == old_v.outgoing_edges.size + old_v.outgoing_edges.each do |edge| + destination = add_vertex(edge.destination.name, edge.destination.payload) + add_edge_no_circular(new_v, destination, edge.requirement) + traverse.call(destination, edge.destination) + end + end + other.vertices.each do |name, vertex| + new_vertex = add_vertex(name, vertex.payload, vertex.root?) + new_vertex.explicit_requirements.replace(vertex.explicit_requirements) + traverse.call(new_vertex, vertex) + end + end + + # @return [String] a string suitable for debugging + def inspect + "#{self.class}:#{vertices.values.inspect}" + end + + # @param [Hash] options options for dot output. + # @return [String] Returns a dot format representation of the graph + def to_dot(options = {}) + edge_label = options.delete(:edge_label) + raise ArgumentError, "Unknown options: #{options.keys}" unless options.empty? + + dot_vertices = [] + dot_edges = [] + vertices.each do |n, v| + dot_vertices << " #{n} [label=\"{#{n}|#{v.payload}}\"]" + v.outgoing_edges.each do |e| + label = edge_label ? edge_label.call(e) : e.requirement + dot_edges << " #{e.origin.name} -> #{e.destination.name} [label=#{label.to_s.dump}]" + end + end + + dot_vertices.uniq! + dot_vertices.sort! + dot_edges.uniq! + dot_edges.sort! + + dot = dot_vertices.unshift('digraph G {').push('') + dot_edges.push('}') + dot.join("\n") + end + + # @return [Boolean] whether the two dependency graphs are equal, determined + # by a recursive traversal of each {#root_vertices} and its + # {Vertex#successors} + def ==(other) + return false unless other + return true if equal?(other) + vertices.each do |name, vertex| + other_vertex = other.vertex_named(name) + return false unless other_vertex + return false unless vertex.payload == other_vertex.payload + return false unless other_vertex.successors.to_set == vertex.successors.to_set + end + end + + # @param [String] name + # @param [Object] payload + # @param [Array] parent_names + # @param [Object] requirement the requirement that is requiring the child + # @return [void] + def add_child_vertex(name, payload, parent_names, requirement) + root = !parent_names.delete(nil) { true } + vertex = add_vertex(name, payload, root) + vertex.explicit_requirements << requirement if root + parent_names.each do |parent_name| + parent_vertex = vertex_named(parent_name) + add_edge(parent_vertex, vertex, requirement) + end + vertex + end + + # Adds a vertex with the given name, or updates the existing one. + # @param [String] name + # @param [Object] payload + # @return [Vertex] the vertex that was added to `self` + def add_vertex(name, payload, root = false) + log.add_vertex(self, name, payload, root) + end + + # Detaches the {#vertex_named} `name` {Vertex} from the graph, recursively + # removing any non-root vertices that were orphaned in the process + # @param [String] name + # @return [Array] the vertices which have been detached + def detach_vertex_named(name) + log.detach_vertex_named(self, name) + end + + # @param [String] name + # @return [Vertex,nil] the vertex with the given name + def vertex_named(name) + vertices[name] + end + + # @param [String] name + # @return [Vertex,nil] the root vertex with the given name + def root_vertex_named(name) + vertex = vertex_named(name) + vertex if vertex && vertex.root? + end + + # Adds a new {Edge} to the dependency graph + # @param [Vertex] origin + # @param [Vertex] destination + # @param [Object] requirement the requirement that this edge represents + # @return [Edge] the added edge + def add_edge(origin, destination, requirement) + if destination.path_to?(origin) + raise CircularDependencyError.new([origin, destination]) + end + add_edge_no_circular(origin, destination, requirement) + end + + # Deletes an {Edge} from the dependency graph + # @param [Edge] edge + # @return [Void] + def delete_edge(edge) + log.delete_edge(self, edge.origin.name, edge.destination.name, edge.requirement) + end + + # Sets the payload of the vertex with the given name + # @param [String] name the name of the vertex + # @param [Object] payload the payload + # @return [Void] + def set_payload(name, payload) + log.set_payload(self, name, payload) + end + + private + + # Adds a new {Edge} to the dependency graph without checking for + # circularity. + # @param (see #add_edge) + # @return (see #add_edge) + def add_edge_no_circular(origin, destination, requirement) + log.add_edge_no_circular(self, origin.name, destination.name, requirement) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/action.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/action.rb new file mode 100644 index 0000000..2fe384e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/action.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module Molinillo + class DependencyGraph + # An action that modifies a {DependencyGraph} that is reversible. + # @abstract + class Action + # rubocop:disable Lint/UnusedMethodArgument + + # @return [Symbol] The name of the action. + def self.action_name + raise 'Abstract' + end + + # Performs the action on the given graph. + # @param [DependencyGraph] graph the graph to perform the action on. + # @return [Void] + def up(graph) + raise 'Abstract' + end + + # Reverses the action on the given graph. + # @param [DependencyGraph] graph the graph to reverse the action on. + # @return [Void] + def down(graph) + raise 'Abstract' + end + + # @return [Action,Nil] The previous action + attr_accessor :previous + + # @return [Action,Nil] The next action + attr_accessor :next + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_edge_no_circular.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_edge_no_circular.rb new file mode 100644 index 0000000..0738b0a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_edge_no_circular.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#add_edge_no_circular) + class AddEdgeNoCircular < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges << edge + edge.destination.incoming_edges << edge + edge + end + + # (see Action#down) + def down(graph) + edge = make_edge(graph) + delete_first(edge.origin.outgoing_edges, edge) + delete_first(edge.destination.incoming_edges, edge) + end + + # @!group AddEdgeNoCircular + + # @return [String] the name of the origin of the edge + attr_reader :origin + + # @return [String] the name of the destination of the edge + attr_reader :destination + + # @return [Object] the requirement that the edge represents + attr_reader :requirement + + # @param [DependencyGraph] graph the graph to find vertices from + # @return [Edge] The edge this action adds + def make_edge(graph) + Edge.new(graph.vertex_named(origin), graph.vertex_named(destination), requirement) + end + + # Initialize an action to add an edge to a dependency graph + # @param [String] origin the name of the origin of the edge + # @param [String] destination the name of the destination of the edge + # @param [Object] requirement the requirement that the edge represents + def initialize(origin, destination, requirement) + @origin = origin + @destination = destination + @requirement = requirement + end + + private + + def delete_first(array, item) + return unless index = array.index(item) + array.delete_at(index) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_vertex.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_vertex.rb new file mode 100644 index 0000000..63a551e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/add_vertex.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#add_vertex) + class AddVertex < Action # :nodoc: + # @!group Action + + # (see Action.action_name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + if existing = graph.vertices[name] + @existing_payload = existing.payload + @existing_root = existing.root + end + vertex = existing || Vertex.new(name, payload) + graph.vertices[vertex.name] = vertex + vertex.payload ||= payload + vertex.root ||= root + vertex + end + + # (see Action#down) + def down(graph) + if defined?(@existing_payload) + vertex = graph.vertices[name] + vertex.payload = @existing_payload + vertex.root = @existing_root + else + graph.vertices.delete(name) + end + end + + # @!group AddVertex + + # @return [String] the name of the vertex + attr_reader :name + + # @return [Object] the payload for the vertex + attr_reader :payload + + # @return [Boolean] whether the vertex is root or not + attr_reader :root + + # Initialize an action to add a vertex to a dependency graph + # @param [String] name the name of the vertex + # @param [Object] payload the payload for the vertex + # @param [Boolean] root whether the vertex is root or not + def initialize(name, payload, root) + @name = name + @payload = payload + @root = root + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/delete_edge.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/delete_edge.rb new file mode 100644 index 0000000..9a8d222 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/delete_edge.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#delete_edge) + class DeleteEdge < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :delete_edge + end + + # (see Action#up) + def up(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges.delete(edge) + edge.destination.incoming_edges.delete(edge) + end + + # (see Action#down) + def down(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges << edge + edge.destination.incoming_edges << edge + edge + end + + # @!group DeleteEdge + + # @return [String] the name of the origin of the edge + attr_reader :origin_name + + # @return [String] the name of the destination of the edge + attr_reader :destination_name + + # @return [Object] the requirement that the edge represents + attr_reader :requirement + + # @param [DependencyGraph] graph the graph to find vertices from + # @return [Edge] The edge this action adds + def make_edge(graph) + Edge.new( + graph.vertex_named(origin_name), + graph.vertex_named(destination_name), + requirement + ) + end + + # Initialize an action to add an edge to a dependency graph + # @param [String] origin_name the name of the origin of the edge + # @param [String] destination_name the name of the destination of the edge + # @param [Object] requirement the requirement that the edge represents + def initialize(origin_name, destination_name, requirement) + @origin_name = origin_name + @destination_name = destination_name + @requirement = requirement + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/detach_vertex_named.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/detach_vertex_named.rb new file mode 100644 index 0000000..a7e64fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/detach_vertex_named.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#detach_vertex_named + class DetachVertexNamed < Action + # @!group Action + + # (see Action#name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + return [] unless @vertex = graph.vertices.delete(name) + + removed_vertices = [@vertex] + @vertex.outgoing_edges.each do |e| + v = e.destination + v.incoming_edges.delete(e) + if !v.root? && v.incoming_edges.empty? + removed_vertices.concat graph.detach_vertex_named(v.name) + end + end + + @vertex.incoming_edges.each do |e| + v = e.origin + v.outgoing_edges.delete(e) + end + + removed_vertices + end + + # (see Action#down) + def down(graph) + return unless @vertex + graph.vertices[@vertex.name] = @vertex + @vertex.outgoing_edges.each do |e| + e.destination.incoming_edges << e + end + @vertex.incoming_edges.each do |e| + e.origin.outgoing_edges << e + end + end + + # @!group DetachVertexNamed + + # @return [String] the name of the vertex to detach + attr_reader :name + + # Initialize an action to detach a vertex from a dependency graph + # @param [String] name the name of the vertex to detach + def initialize(name) + @name = name + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/log.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/log.rb new file mode 100644 index 0000000..6a71033 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/log.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/add_edge_no_circular' +require 'molinillo/dependency_graph/add_vertex' +require 'molinillo/dependency_graph/delete_edge' +require 'molinillo/dependency_graph/detach_vertex_named' +require 'molinillo/dependency_graph/set_payload' +require 'molinillo/dependency_graph/tag' + +module Molinillo + class DependencyGraph + # A log for dependency graph actions + class Log + # Initializes an empty log + def initialize + @current_action = @first_action = nil + end + + # @!macro [new] action + # {include:DependencyGraph#$0} + # @param [Graph] graph the graph to perform the action on + # @param (see DependencyGraph#$0) + # @return (see DependencyGraph#$0) + + # @macro action + def tag(graph, tag) + push_action(graph, Tag.new(tag)) + end + + # @macro action + def add_vertex(graph, name, payload, root) + push_action(graph, AddVertex.new(name, payload, root)) + end + + # @macro action + def detach_vertex_named(graph, name) + push_action(graph, DetachVertexNamed.new(name)) + end + + # @macro action + def add_edge_no_circular(graph, origin, destination, requirement) + push_action(graph, AddEdgeNoCircular.new(origin, destination, requirement)) + end + + # {include:DependencyGraph#delete_edge} + # @param [Graph] graph the graph to perform the action on + # @param [String] origin_name + # @param [String] destination_name + # @param [Object] requirement + # @return (see DependencyGraph#delete_edge) + def delete_edge(graph, origin_name, destination_name, requirement) + push_action(graph, DeleteEdge.new(origin_name, destination_name, requirement)) + end + + # @macro action + def set_payload(graph, name, payload) + push_action(graph, SetPayload.new(name, payload)) + end + + # Pops the most recent action from the log and undoes the action + # @param [DependencyGraph] graph + # @return [Action] the action that was popped off the log + def pop!(graph) + return unless action = @current_action + unless @current_action = action.previous + @first_action = nil + end + action.down(graph) + action + end + + extend Enumerable + + # @!visibility private + # Enumerates each action in the log + # @yield [Action] + def each + return enum_for unless block_given? + action = @first_action + loop do + break unless action + yield action + action = action.next + end + self + end + + # @!visibility private + # Enumerates each action in the log in reverse order + # @yield [Action] + def reverse_each + return enum_for(:reverse_each) unless block_given? + action = @current_action + loop do + break unless action + yield action + action = action.previous + end + self + end + + # @macro action + def rewind_to(graph, tag) + loop do + action = pop!(graph) + raise "No tag #{tag.inspect} found" unless action + break if action.class.action_name == :tag && action.tag == tag + end + end + + private + + # Adds the given action to the log, running the action + # @param [DependencyGraph] graph + # @param [Action] action + # @return The value returned by `action.up` + def push_action(graph, action) + action.previous = @current_action + @current_action.next = action if @current_action + @current_action = action + @first_action ||= action + action.up(graph) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/set_payload.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/set_payload.rb new file mode 100644 index 0000000..ff039e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/set_payload.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#set_payload + class SetPayload < Action # :nodoc: + # @!group Action + + # (see Action.action_name) + def self.action_name + :set_payload + end + + # (see Action#up) + def up(graph) + vertex = graph.vertex_named(name) + @old_payload = vertex.payload + vertex.payload = payload + end + + # (see Action#down) + def down(graph) + graph.vertex_named(name).payload = @old_payload + end + + # @!group SetPayload + + # @return [String] the name of the vertex + attr_reader :name + + # @return [Object] the payload for the vertex + attr_reader :payload + + # Initialize an action to add set the payload for a vertex in a dependency + # graph + # @param [String] name the name of the vertex + # @param [Object] payload the payload for the vertex + def initialize(name, payload) + @name = name + @payload = payload + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/tag.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/tag.rb new file mode 100644 index 0000000..b789513 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/tag.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph/action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#tag + class Tag < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :tag + end + + # (see Action#up) + def up(_graph) + end + + # (see Action#down) + def down(_graph) + end + + # @!group Tag + + # @return [Object] An opaque tag + attr_reader :tag + + # Initialize an action to tag a state of a dependency graph + # @param [Object] tag an opaque tag + def initialize(tag) + @tag = tag + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/vertex.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/vertex.rb new file mode 100644 index 0000000..d396eab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/dependency_graph/vertex.rb @@ -0,0 +1,158 @@ +# frozen_string_literal: true + +module Molinillo + class DependencyGraph + # A vertex in a {DependencyGraph} that encapsulates a {#name} and a + # {#payload} + class Vertex + # @return [String] the name of the vertex + attr_accessor :name + + # @return [Object] the payload the vertex holds + attr_accessor :payload + + # @return [Array] the explicit requirements that required + # this vertex + attr_reader :explicit_requirements + + # @return [Boolean] whether the vertex is considered a root vertex + attr_accessor :root + alias root? root + + # Initializes a vertex with the given name and payload. + # @param [String] name see {#name} + # @param [Object] payload see {#payload} + def initialize(name, payload) + @name = name.frozen? ? name : name.dup.freeze + @payload = payload + @explicit_requirements = [] + @outgoing_edges = [] + @incoming_edges = [] + end + + # @return [Array] all of the requirements that required + # this vertex + def requirements + (incoming_edges.map(&:requirement) + explicit_requirements).uniq + end + + # @return [Array] the edges of {#graph} that have `self` as their + # {Edge#origin} + attr_accessor :outgoing_edges + + # @return [Array] the edges of {#graph} that have `self` as their + # {Edge#destination} + attr_accessor :incoming_edges + + # @return [Array] the vertices of {#graph} that have an edge with + # `self` as their {Edge#destination} + def predecessors + incoming_edges.map(&:origin) + end + + # @return [Set] the vertices of {#graph} where `self` is a + # {#descendent?} + def recursive_predecessors + _recursive_predecessors + end + + # @param [Set] vertices the set to add the predecessors to + # @return [Set] the vertices of {#graph} where `self` is a + # {#descendent?} + def _recursive_predecessors(vertices = Set.new) + incoming_edges.each do |edge| + vertex = edge.origin + next unless vertices.add?(vertex) + vertex._recursive_predecessors(vertices) + end + + vertices + end + protected :_recursive_predecessors + + # @return [Array] the vertices of {#graph} that have an edge with + # `self` as their {Edge#origin} + def successors + outgoing_edges.map(&:destination) + end + + # @return [Set] the vertices of {#graph} where `self` is an + # {#ancestor?} + def recursive_successors + _recursive_successors + end + + # @param [Set] vertices the set to add the successors to + # @return [Set] the vertices of {#graph} where `self` is an + # {#ancestor?} + def _recursive_successors(vertices = Set.new) + outgoing_edges.each do |edge| + vertex = edge.destination + next unless vertices.add?(vertex) + vertex._recursive_successors(vertices) + end + + vertices + end + protected :_recursive_successors + + # @return [String] a string suitable for debugging + def inspect + "#{self.class}:#{name}(#{payload.inspect})" + end + + # @return [Boolean] whether the two vertices are equal, determined + # by a recursive traversal of each {Vertex#successors} + def ==(other) + return true if equal?(other) + shallow_eql?(other) && + successors.to_set == other.successors.to_set + end + + # @param [Vertex] other the other vertex to compare to + # @return [Boolean] whether the two vertices are equal, determined + # solely by {#name} and {#payload} equality + def shallow_eql?(other) + return true if equal?(other) + other && + name == other.name && + payload == other.payload + end + + alias eql? == + + # @return [Fixnum] a hash for the vertex based upon its {#name} + def hash + name.hash + end + + # Is there a path from `self` to `other` following edges in the + # dependency graph? + # @return true iff there is a path following edges within this {#graph} + def path_to?(other) + _path_to?(other) + end + + alias descendent? path_to? + + # @param [Vertex] other the vertex to check if there's a path to + # @param [Set] visited the vertices of {#graph} that have been visited + # @return [Boolean] whether there is a path to `other` from `self` + def _path_to?(other, visited = Set.new) + return false unless visited.add?(self) + return true if equal?(other) + successors.any? { |v| v._path_to?(other, visited) } + end + protected :_path_to? + + # Is there a path from `other` to `self` following edges in the + # dependency graph? + # @return true iff there is a path following edges within this {#graph} + def ancestor?(other) + other.path_to?(self) + end + + alias is_reachable_from? ancestor? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/errors.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/errors.rb new file mode 100644 index 0000000..27d4dcf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/errors.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +module Molinillo + # An error that occurred during the resolution process + class ResolverError < StandardError; end + + # An error caused by searching for a dependency that is completely unknown, + # i.e. has no versions available whatsoever. + class NoSuchDependencyError < ResolverError + # @return [Object] the dependency that could not be found + attr_accessor :dependency + + # @return [Array] the specifications that depended upon {#dependency} + attr_accessor :required_by + + # Initializes a new error with the given missing dependency. + # @param [Object] dependency @see {#dependency} + # @param [Array] required_by @see {#required_by} + def initialize(dependency, required_by = []) + @dependency = dependency + @required_by = required_by.uniq + super() + end + + # The error message for the missing dependency, including the specifications + # that had this dependency. + def message + sources = required_by.map { |r| "`#{r}`" }.join(' and ') + message = "Unable to find a specification for `#{dependency}`" + message += " depended upon by #{sources}" unless sources.empty? + message + end + end + + # An error caused by attempting to fulfil a dependency that was circular + # + # @note This exception will be thrown iff a {Vertex} is added to a + # {DependencyGraph} that has a {DependencyGraph::Vertex#path_to?} an + # existing {DependencyGraph::Vertex} + class CircularDependencyError < ResolverError + # [Set] the dependencies responsible for causing the error + attr_reader :dependencies + + # Initializes a new error with the given circular vertices. + # @param [Array] vertices the vertices in the dependency + # that caused the error + def initialize(vertices) + super "There is a circular dependency between #{vertices.map(&:name).join(' and ')}" + @dependencies = vertices.map { |vertex| vertex.payload.possibilities.last }.to_set + end + end + + # An error caused by conflicts in version + class VersionConflict < ResolverError + # @return [{String => Resolution::Conflict}] the conflicts that caused + # resolution to fail + attr_reader :conflicts + + # @return [SpecificationProvider] the specification provider used during + # resolution + attr_reader :specification_provider + + # Initializes a new error with the given version conflicts. + # @param [{String => Resolution::Conflict}] conflicts see {#conflicts} + # @param [SpecificationProvider] specification_provider see {#specification_provider} + def initialize(conflicts, specification_provider) + pairs = [] + Compatibility.flat_map(conflicts.values.flatten, &:requirements).each do |conflicting| + conflicting.each do |source, conflict_requirements| + conflict_requirements.each do |c| + pairs << [c, source] + end + end + end + + super "Unable to satisfy the following requirements:\n\n" \ + "#{pairs.map { |r, d| "- `#{r}` required by `#{d}`" }.join("\n")}" + + @conflicts = conflicts + @specification_provider = specification_provider + end + + require 'molinillo/delegates/specification_provider' + include Delegates::SpecificationProvider + + # @return [String] An error message that includes requirement trees, + # which is much more detailed & customizable than the default message + # @param [Hash] opts the options to create a message with. + # @option opts [String] :solver_name The user-facing name of the solver + # @option opts [String] :possibility_type The generic name of a possibility + # @option opts [Proc] :reduce_trees A proc that reduced the list of requirement trees + # @option opts [Proc] :printable_requirement A proc that pretty-prints requirements + # @option opts [Proc] :additional_message_for_conflict A proc that appends additional + # messages for each conflict + # @option opts [Proc] :version_for_spec A proc that returns the version number for a + # possibility + def message_with_trees(opts = {}) + solver_name = opts.delete(:solver_name) { self.class.name.split('::').first } + possibility_type = opts.delete(:possibility_type) { 'possibility named' } + reduce_trees = opts.delete(:reduce_trees) { proc { |trees| trees.uniq.sort_by(&:to_s) } } + printable_requirement = opts.delete(:printable_requirement) { proc { |req| req.to_s } } + additional_message_for_conflict = opts.delete(:additional_message_for_conflict) { proc {} } + version_for_spec = opts.delete(:version_for_spec) { proc(&:to_s) } + incompatible_version_message_for_conflict = opts.delete(:incompatible_version_message_for_conflict) do + proc do |name, _conflict| + %(#{solver_name} could not find compatible versions for #{possibility_type} "#{name}":) + end + end + + conflicts.sort.reduce(''.dup) do |o, (name, conflict)| + o << "\n" << incompatible_version_message_for_conflict.call(name, conflict) << "\n" + if conflict.locked_requirement + o << %( In snapshot (#{name_for_locking_dependency_source}):\n) + o << %( #{printable_requirement.call(conflict.locked_requirement)}\n) + o << %(\n) + end + o << %( In #{name_for_explicit_dependency_source}:\n) + trees = reduce_trees.call(conflict.requirement_trees) + + o << trees.map do |tree| + t = ''.dup + depth = 2 + tree.each do |req| + t << ' ' * depth << req.to_s + unless tree.last == req + if spec = conflict.activated_by_name[name_for(req)] + t << %( was resolved to #{version_for_spec.call(spec)}, which) + end + t << %( depends on) + end + t << %(\n) + depth += 1 + end + t + end.join("\n") + + additional_message_for_conflict.call(o, name, conflict) + + o + end.strip + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/gem_metadata.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/gem_metadata.rb new file mode 100644 index 0000000..4cab281 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/gem_metadata.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +module Molinillo + # The version of Molinillo. + VERSION = '0.6.6'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/specification_provider.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/specification_provider.rb new file mode 100644 index 0000000..036009f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/specification_provider.rb @@ -0,0 +1,101 @@ +# frozen_string_literal: true + +module Molinillo + # Provides information about specifcations and dependencies to the resolver, + # allowing the {Resolver} class to remain generic while still providing power + # and flexibility. + # + # This module contains the methods that users of Molinillo must to implement, + # using knowledge of their own model classes. + module SpecificationProvider + # Search for the specifications that match the given dependency. + # The specifications in the returned array will be considered in reverse + # order, so the latest version ought to be last. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `dependency` parameter. + # + # @param [Object] dependency + # @return [Array] the specifications that satisfy the given + # `dependency`. + def search_for(dependency) + [] + end + + # Returns the dependencies of `specification`. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `specification` parameter. + # + # @param [Object] specification + # @return [Array] the dependencies that are required by the given + # `specification`. + def dependencies_for(specification) + [] + end + + # Determines whether the given `requirement` is satisfied by the given + # `spec`, in the context of the current `activated` dependency graph. + # + # @param [Object] requirement + # @param [DependencyGraph] activated the current dependency graph in the + # resolution process. + # @param [Object] spec + # @return [Boolean] whether `requirement` is satisfied by `spec` in the + # context of the current `activated` dependency graph. + def requirement_satisfied_by?(requirement, activated, spec) + true + end + + # Returns the name for the given `dependency`. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `dependency` parameter. + # + # @param [Object] dependency + # @return [String] the name for the given `dependency`. + def name_for(dependency) + dependency.to_s + end + + # @return [String] the name of the source of explicit dependencies, i.e. + # those passed to {Resolver#resolve} directly. + def name_for_explicit_dependency_source + 'user-specified dependency' + end + + # @return [String] the name of the source of 'locked' dependencies, i.e. + # those passed to {Resolver#resolve} directly as the `base` + def name_for_locking_dependency_source + 'Lockfile' + end + + # Sort dependencies so that the ones that are easiest to resolve are first. + # Easiest to resolve is (usually) defined by: + # 1) Is this dependency already activated? + # 2) How relaxed are the requirements? + # 3) Are there any conflicts for this dependency? + # 4) How many possibilities are there to satisfy this dependency? + # + # @param [Array] dependencies + # @param [DependencyGraph] activated the current dependency graph in the + # resolution process. + # @param [{String => Array}] conflicts + # @return [Array] a sorted copy of `dependencies`. + def sort_dependencies(dependencies, activated, conflicts) + dependencies.sort_by do |dependency| + name = name_for(dependency) + [ + activated.vertex_named(name).payload ? 0 : 1, + conflicts[name] ? 0 : 1, + ] + end + end + + # Returns whether this dependency, which has no possible matching + # specifications, can safely be ignored. + # + # @param [Object] dependency + # @return [Boolean] whether this dependency can safely be skipped. + def allow_missing?(dependency) + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/ui.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/ui.rb new file mode 100644 index 0000000..6175c5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/modules/ui.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module Molinillo + # Conveys information about the resolution process to a user. + module UI + # The {IO} object that should be used to print output. `STDOUT`, by default. + # + # @return [IO] + def output + STDOUT + end + + # Called roughly every {#progress_rate}, this method should convey progress + # to the user. + # + # @return [void] + def indicate_progress + output.print '.' unless debug? + end + + # How often progress should be conveyed to the user via + # {#indicate_progress}, in seconds. A third of a second, by default. + # + # @return [Float] + def progress_rate + 0.33 + end + + # Called before resolution begins. + # + # @return [void] + def before_resolution + output.print 'Resolving dependencies...' + end + + # Called after resolution ends (either successfully or with an error). + # By default, prints a newline. + # + # @return [void] + def after_resolution + output.puts + end + + # Conveys debug information to the user. + # + # @param [Integer] depth the current depth of the resolution process. + # @return [void] + def debug(depth = 0) + if debug? + debug_info = yield + debug_info = debug_info.inspect unless debug_info.is_a?(String) + debug_info = debug_info.split("\n").map { |s| ":#{depth.to_s.rjust 4}: #{s}" } + output.puts debug_info + end + end + + # Whether or not debug messages should be printed. + # By default, whether or not the `MOLINILLO_DEBUG` environment variable is + # set. + # + # @return [Boolean] + def debug? + return @debug_mode if defined?(@debug_mode) + @debug_mode = ENV['MOLINILLO_DEBUG'] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolution.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolution.rb new file mode 100644 index 0000000..4854bb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolution.rb @@ -0,0 +1,837 @@ +# frozen_string_literal: true + +module Molinillo + class Resolver + # A specific resolution from a given {Resolver} + class Resolution + # A conflict that the resolution process encountered + # @attr [Object] requirement the requirement that immediately led to the conflict + # @attr [{String,Nil=>[Object]}] requirements the requirements that caused the conflict + # @attr [Object, nil] existing the existing spec that was in conflict with + # the {#possibility} + # @attr [Object] possibility_set the set of specs that was unable to be + # activated due to a conflict. + # @attr [Object] locked_requirement the relevant locking requirement. + # @attr [Array>] requirement_trees the different requirement + # trees that led to every requirement for the conflicting name. + # @attr [{String=>Object}] activated_by_name the already-activated specs. + # @attr [Object] underlying_error an error that has occurred during resolution, and + # will be raised at the end of it if no resolution is found. + Conflict = Struct.new( + :requirement, + :requirements, + :existing, + :possibility_set, + :locked_requirement, + :requirement_trees, + :activated_by_name, + :underlying_error + ) + + class Conflict + # @return [Object] a spec that was unable to be activated due to a conflict + def possibility + possibility_set && possibility_set.latest_version + end + end + + # A collection of possibility states that share the same dependencies + # @attr [Array] dependencies the dependencies for this set of possibilities + # @attr [Array] possibilities the possibilities + PossibilitySet = Struct.new(:dependencies, :possibilities) + + class PossibilitySet + # String representation of the possibility set, for debugging + def to_s + "[#{possibilities.join(', ')}]" + end + + # @return [Object] most up-to-date dependency in the possibility set + def latest_version + possibilities.last + end + end + + # Details of the state to unwind to when a conflict occurs, and the cause of the unwind + # @attr [Integer] state_index the index of the state to unwind to + # @attr [Object] state_requirement the requirement of the state we're unwinding to + # @attr [Array] requirement_tree for the requirement we're relaxing + # @attr [Array] conflicting_requirements the requirements that combined to cause the conflict + # @attr [Array] requirement_trees for the conflict + # @attr [Array] requirements_unwound_to_instead array of unwind requirements that were chosen over this unwind + UnwindDetails = Struct.new( + :state_index, + :state_requirement, + :requirement_tree, + :conflicting_requirements, + :requirement_trees, + :requirements_unwound_to_instead + ) + + class UnwindDetails + include Comparable + + # We compare UnwindDetails when choosing which state to unwind to. If + # two options have the same state_index we prefer the one most + # removed from a requirement that caused the conflict. Both options + # would unwind to the same state, but a `grandparent` option will + # filter out fewer of its possibilities after doing so - where a state + # is both a `parent` and a `grandparent` to requirements that have + # caused a conflict this is the correct behaviour. + # @param [UnwindDetail] other UnwindDetail to be compared + # @return [Integer] integer specifying ordering + def <=>(other) + if state_index > other.state_index + 1 + elsif state_index == other.state_index + reversed_requirement_tree_index <=> other.reversed_requirement_tree_index + else + -1 + end + end + + # @return [Integer] index of state requirement in reversed requirement tree + # (the conflicting requirement itself will be at position 0) + def reversed_requirement_tree_index + @reversed_requirement_tree_index ||= + if state_requirement + requirement_tree.reverse.index(state_requirement) + else + 999_999 + end + end + + # @return [Boolean] where the requirement of the state we're unwinding + # to directly caused the conflict. Note: in this case, it is + # impossible for the state we're unwinding to to be a parent of + # any of the other conflicting requirements (or we would have + # circularity) + def unwinding_to_primary_requirement? + requirement_tree.last == state_requirement + end + + # @return [Array] array of sub-dependencies to avoid when choosing a + # new possibility for the state we've unwound to. Only relevant for + # non-primary unwinds + def sub_dependencies_to_avoid + @requirements_to_avoid ||= + requirement_trees.map do |tree| + index = tree.index(state_requirement) + tree[index + 1] if index + end.compact + end + + # @return [Array] array of all the requirements that led to the need for + # this unwind + def all_requirements + @all_requirements ||= requirement_trees.flatten(1) + end + end + + # @return [SpecificationProvider] the provider that knows about + # dependencies, requirements, specifications, versions, etc. + attr_reader :specification_provider + + # @return [UI] the UI that knows how to communicate feedback about the + # resolution process back to the user + attr_reader :resolver_ui + + # @return [DependencyGraph] the base dependency graph to which + # dependencies should be 'locked' + attr_reader :base + + # @return [Array] the dependencies that were explicitly required + attr_reader :original_requested + + # Initializes a new resolution. + # @param [SpecificationProvider] specification_provider + # see {#specification_provider} + # @param [UI] resolver_ui see {#resolver_ui} + # @param [Array] requested see {#original_requested} + # @param [DependencyGraph] base see {#base} + def initialize(specification_provider, resolver_ui, requested, base) + @specification_provider = specification_provider + @resolver_ui = resolver_ui + @original_requested = requested + @base = base + @states = [] + @iteration_counter = 0 + @parents_of = Hash.new { |h, k| h[k] = [] } + end + + # Resolves the {#original_requested} dependencies into a full dependency + # graph + # @raise [ResolverError] if successful resolution is impossible + # @return [DependencyGraph] the dependency graph of successfully resolved + # dependencies + def resolve + start_resolution + + while state + break if !state.requirement && state.requirements.empty? + indicate_progress + if state.respond_to?(:pop_possibility_state) # DependencyState + debug(depth) { "Creating possibility state for #{requirement} (#{possibilities.count} remaining)" } + state.pop_possibility_state.tap do |s| + if s + states.push(s) + activated.tag(s) + end + end + end + process_topmost_state + end + + resolve_activated_specs + ensure + end_resolution + end + + # @return [Integer] the number of resolver iterations in between calls to + # {#resolver_ui}'s {UI#indicate_progress} method + attr_accessor :iteration_rate + private :iteration_rate + + # @return [Time] the time at which resolution began + attr_accessor :started_at + private :started_at + + # @return [Array] the stack of states for the resolution + attr_accessor :states + private :states + + private + + # Sets up the resolution process + # @return [void] + def start_resolution + @started_at = Time.now + + handle_missing_or_push_dependency_state(initial_state) + + debug { "Starting resolution (#{@started_at})\nUser-requested dependencies: #{original_requested}" } + resolver_ui.before_resolution + end + + def resolve_activated_specs + activated.vertices.each do |_, vertex| + next unless vertex.payload + + latest_version = vertex.payload.possibilities.reverse_each.find do |possibility| + vertex.requirements.all? { |req| requirement_satisfied_by?(req, activated, possibility) } + end + + activated.set_payload(vertex.name, latest_version) + end + activated.freeze + end + + # Ends the resolution process + # @return [void] + def end_resolution + resolver_ui.after_resolution + debug do + "Finished resolution (#{@iteration_counter} steps) " \ + "(Took #{(ended_at = Time.now) - @started_at} seconds) (#{ended_at})" + end + debug { 'Unactivated: ' + Hash[activated.vertices.reject { |_n, v| v.payload }].keys.join(', ') } if state + debug { 'Activated: ' + Hash[activated.vertices.select { |_n, v| v.payload }].keys.join(', ') } if state + end + + require 'molinillo/state' + require 'molinillo/modules/specification_provider' + + require 'molinillo/delegates/resolution_state' + require 'molinillo/delegates/specification_provider' + + include Molinillo::Delegates::ResolutionState + include Molinillo::Delegates::SpecificationProvider + + # Processes the topmost available {RequirementState} on the stack + # @return [void] + def process_topmost_state + if possibility + attempt_to_activate + else + create_conflict + unwind_for_conflict + end + rescue CircularDependencyError => underlying_error + create_conflict(underlying_error) + unwind_for_conflict + end + + # @return [Object] the current possibility that the resolution is trying + # to activate + def possibility + possibilities.last + end + + # @return [RequirementState] the current state the resolution is + # operating upon + def state + states.last + end + + # Creates the initial state for the resolution, based upon the + # {#requested} dependencies + # @return [DependencyState] the initial state for the resolution + def initial_state + graph = DependencyGraph.new.tap do |dg| + original_requested.each do |requested| + vertex = dg.add_vertex(name_for(requested), nil, true) + vertex.explicit_requirements << requested + end + dg.tag(:initial_state) + end + + requirements = sort_dependencies(original_requested, graph, {}) + initial_requirement = requirements.shift + DependencyState.new( + initial_requirement && name_for(initial_requirement), + requirements, + graph, + initial_requirement, + possibilities_for_requirement(initial_requirement, graph), + 0, + {}, + [] + ) + end + + # Unwinds the states stack because a conflict has been encountered + # @return [void] + def unwind_for_conflict + details_for_unwind = build_details_for_unwind + unwind_options = unused_unwind_options + debug(depth) { "Unwinding for conflict: #{requirement} to #{details_for_unwind.state_index / 2}" } + conflicts.tap do |c| + sliced_states = states.slice!((details_for_unwind.state_index + 1)..-1) + raise_error_unless_state(c) + activated.rewind_to(sliced_states.first || :initial_state) if sliced_states + state.conflicts = c + state.unused_unwind_options = unwind_options + filter_possibilities_after_unwind(details_for_unwind) + index = states.size - 1 + @parents_of.each { |_, a| a.reject! { |i| i >= index } } + state.unused_unwind_options.reject! { |uw| uw.state_index >= index } + end + end + + # Raises a VersionConflict error, or any underlying error, if there is no + # current state + # @return [void] + def raise_error_unless_state(conflicts) + return if state + + error = conflicts.values.map(&:underlying_error).compact.first + raise error || VersionConflict.new(conflicts, specification_provider) + end + + # @return [UnwindDetails] Details of the nearest index to which we could unwind + def build_details_for_unwind + # Get the possible unwinds for the current conflict + current_conflict = conflicts[name] + binding_requirements = binding_requirements_for_conflict(current_conflict) + unwind_details = unwind_options_for_requirements(binding_requirements) + + last_detail_for_current_unwind = unwind_details.sort.last + current_detail = last_detail_for_current_unwind + + # Look for past conflicts that could be unwound to affect the + # requirement tree for the current conflict + relevant_unused_unwinds = unused_unwind_options.select do |alternative| + intersecting_requirements = + last_detail_for_current_unwind.all_requirements & + alternative.requirements_unwound_to_instead + next if intersecting_requirements.empty? + # Find the highest index unwind whilst looping through + current_detail = alternative if alternative > current_detail + alternative + end + + # Add the current unwind options to the `unused_unwind_options` array. + # The "used" option will be filtered out during `unwind_for_conflict`. + state.unused_unwind_options += unwind_details.reject { |detail| detail.state_index == -1 } + + # Update the requirements_unwound_to_instead on any relevant unused unwinds + relevant_unused_unwinds.each { |d| d.requirements_unwound_to_instead << current_detail.state_requirement } + unwind_details.each { |d| d.requirements_unwound_to_instead << current_detail.state_requirement } + + current_detail + end + + # @param [Array] array of requirements that combine to create a conflict + # @return [Array] array of UnwindDetails that have a chance + # of resolving the passed requirements + def unwind_options_for_requirements(binding_requirements) + unwind_details = [] + + trees = [] + binding_requirements.reverse_each do |r| + partial_tree = [r] + trees << partial_tree + unwind_details << UnwindDetails.new(-1, nil, partial_tree, binding_requirements, trees, []) + + # If this requirement has alternative possibilities, check if any would + # satisfy the other requirements that created this conflict + requirement_state = find_state_for(r) + if conflict_fixing_possibilities?(requirement_state, binding_requirements) + unwind_details << UnwindDetails.new( + states.index(requirement_state), + r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + + # Next, look at the parent of this requirement, and check if the requirement + # could have been avoided if an alternative PossibilitySet had been chosen + parent_r = parent_of(r) + next if parent_r.nil? + partial_tree.unshift(parent_r) + requirement_state = find_state_for(parent_r) + if requirement_state.possibilities.any? { |set| !set.dependencies.include?(r) } + unwind_details << UnwindDetails.new( + states.index(requirement_state), + parent_r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + + # Finally, look at the grandparent and up of this requirement, looking + # for any possibilities that wouldn't create their parent requirement + grandparent_r = parent_of(parent_r) + until grandparent_r.nil? + partial_tree.unshift(grandparent_r) + requirement_state = find_state_for(grandparent_r) + if requirement_state.possibilities.any? { |set| !set.dependencies.include?(parent_r) } + unwind_details << UnwindDetails.new( + states.index(requirement_state), + grandparent_r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + parent_r = grandparent_r + grandparent_r = parent_of(parent_r) + end + end + + unwind_details + end + + # @param [DependencyState] state + # @param [Array] array of requirements + # @return [Boolean] whether or not the given state has any possibilities + # that could satisfy the given requirements + def conflict_fixing_possibilities?(state, binding_requirements) + return false unless state + + state.possibilities.any? do |possibility_set| + possibility_set.possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, binding_requirements) + end + end + end + + # Filter's a state's possibilities to remove any that would not fix the + # conflict we've just rewound from + # @param [UnwindDetails] details of the conflict just unwound from + # @return [void] + def filter_possibilities_after_unwind(unwind_details) + return unless state && !state.possibilities.empty? + + if unwind_details.unwinding_to_primary_requirement? + filter_possibilities_for_primary_unwind(unwind_details) + else + filter_possibilities_for_parent_unwind(unwind_details) + end + end + + # Filter's a state's possibilities to remove any that would not satisfy + # the requirements in the conflict we've just rewound from + # @param [UnwindDetails] details of the conflict just unwound from + # @return [void] + def filter_possibilities_for_primary_unwind(unwind_details) + unwinds_to_state = unused_unwind_options.select { |uw| uw.state_index == unwind_details.state_index } + unwinds_to_state << unwind_details + unwind_requirement_sets = unwinds_to_state.map(&:conflicting_requirements) + + state.possibilities.reject! do |possibility_set| + possibility_set.possibilities.none? do |poss| + unwind_requirement_sets.any? do |requirements| + possibility_satisfies_requirements?(poss, requirements) + end + end + end + end + + # @param [Object] possibility a single possibility + # @param [Array] requirements an array of requirements + # @return [Boolean] whether the possibility satisfies all of the + # given requirements + def possibility_satisfies_requirements?(possibility, requirements) + name = name_for(possibility) + + activated.tag(:swap) + activated.set_payload(name, possibility) if activated.vertex_named(name) + satisfied = requirements.all? { |r| requirement_satisfied_by?(r, activated, possibility) } + activated.rewind_to(:swap) + + satisfied + end + + # Filter's a state's possibilities to remove any that would (eventually) + # create a requirement in the conflict we've just rewound from + # @param [UnwindDetails] details of the conflict just unwound from + # @return [void] + def filter_possibilities_for_parent_unwind(unwind_details) + unwinds_to_state = unused_unwind_options.select { |uw| uw.state_index == unwind_details.state_index } + unwinds_to_state << unwind_details + + primary_unwinds = unwinds_to_state.select(&:unwinding_to_primary_requirement?).uniq + parent_unwinds = unwinds_to_state.uniq - primary_unwinds + + allowed_possibility_sets = Compatibility.flat_map(primary_unwinds) do |unwind| + states[unwind.state_index].possibilities.select do |possibility_set| + possibility_set.possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, unwind.conflicting_requirements) + end + end + end + + requirements_to_avoid = Compatibility.flat_map(parent_unwinds, &:sub_dependencies_to_avoid) + + state.possibilities.reject! do |possibility_set| + !allowed_possibility_sets.include?(possibility_set) && + (requirements_to_avoid - possibility_set.dependencies).empty? + end + end + + # @param [Conflict] conflict + # @return [Array] minimal array of requirements that would cause the passed + # conflict to occur. + def binding_requirements_for_conflict(conflict) + return [conflict.requirement] if conflict.possibility.nil? + + possible_binding_requirements = conflict.requirements.values.flatten(1).uniq + + # When there’s a `CircularDependency` error the conflicting requirement + # (the one causing the circular) won’t be `conflict.requirement` + # (which won’t be for the right state, because we won’t have created it, + # because it’s circular). + # We need to make sure we have that requirement in the conflict’s list, + # otherwise we won’t be able to unwind properly, so we just return all + # the requirements for the conflict. + return possible_binding_requirements if conflict.underlying_error + + possibilities = search_for(conflict.requirement) + + # If all the requirements together don't filter out all possibilities, + # then the only two requirements we need to consider are the initial one + # (where the dependency's version was first chosen) and the last + if binding_requirement_in_set?(nil, possible_binding_requirements, possibilities) + return [conflict.requirement, requirement_for_existing_name(name_for(conflict.requirement))].compact + end + + # Loop through the possible binding requirements, removing each one + # that doesn't bind. Use a `reverse_each` as we want the earliest set of + # binding requirements, and don't use `reject!` as we wish to refine the + # array *on each iteration*. + binding_requirements = possible_binding_requirements.dup + possible_binding_requirements.reverse_each do |req| + next if req == conflict.requirement + unless binding_requirement_in_set?(req, binding_requirements, possibilities) + binding_requirements -= [req] + end + end + + binding_requirements + end + + # @param [Object] requirement we wish to check + # @param [Array] array of requirements + # @param [Array] array of possibilities the requirements will be used to filter + # @return [Boolean] whether or not the given requirement is required to filter + # out all elements of the array of possibilities. + def binding_requirement_in_set?(requirement, possible_binding_requirements, possibilities) + possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, possible_binding_requirements - [requirement]) + end + end + + # @return [Object] the requirement that led to `requirement` being added + # to the list of requirements. + def parent_of(requirement) + return unless requirement + return unless index = @parents_of[requirement].last + return unless parent_state = @states[index] + parent_state.requirement + end + + # @return [Object] the requirement that led to a version of a possibility + # with the given name being activated. + def requirement_for_existing_name(name) + return nil unless vertex = activated.vertex_named(name) + return nil unless vertex.payload + states.find { |s| s.name == name }.requirement + end + + # @return [ResolutionState] the state whose `requirement` is the given + # `requirement`. + def find_state_for(requirement) + return nil unless requirement + states.find { |i| requirement == i.requirement } + end + + # @return [Conflict] a {Conflict} that reflects the failure to activate + # the {#possibility} in conjunction with the current {#state} + def create_conflict(underlying_error = nil) + vertex = activated.vertex_named(name) + locked_requirement = locked_requirement_named(name) + + requirements = {} + unless vertex.explicit_requirements.empty? + requirements[name_for_explicit_dependency_source] = vertex.explicit_requirements + end + requirements[name_for_locking_dependency_source] = [locked_requirement] if locked_requirement + vertex.incoming_edges.each do |edge| + (requirements[edge.origin.payload.latest_version] ||= []).unshift(edge.requirement) + end + + activated_by_name = {} + activated.each { |v| activated_by_name[v.name] = v.payload.latest_version if v.payload } + conflicts[name] = Conflict.new( + requirement, + requirements, + vertex.payload && vertex.payload.latest_version, + possibility, + locked_requirement, + requirement_trees, + activated_by_name, + underlying_error + ) + end + + # @return [Array>] The different requirement + # trees that led to every requirement for the current spec. + def requirement_trees + vertex = activated.vertex_named(name) + vertex.requirements.map { |r| requirement_tree_for(r) } + end + + # @return [Array] the list of requirements that led to + # `requirement` being required. + def requirement_tree_for(requirement) + tree = [] + while requirement + tree.unshift(requirement) + requirement = parent_of(requirement) + end + tree + end + + # Indicates progress roughly once every second + # @return [void] + def indicate_progress + @iteration_counter += 1 + @progress_rate ||= resolver_ui.progress_rate + if iteration_rate.nil? + if Time.now - started_at >= @progress_rate + self.iteration_rate = @iteration_counter + end + end + + if iteration_rate && (@iteration_counter % iteration_rate) == 0 + resolver_ui.indicate_progress + end + end + + # Calls the {#resolver_ui}'s {UI#debug} method + # @param [Integer] depth the depth of the {#states} stack + # @param [Proc] block a block that yields a {#to_s} + # @return [void] + def debug(depth = 0, &block) + resolver_ui.debug(depth, &block) + end + + # Attempts to activate the current {#possibility} + # @return [void] + def attempt_to_activate + debug(depth) { 'Attempting to activate ' + possibility.to_s } + existing_vertex = activated.vertex_named(name) + if existing_vertex.payload + debug(depth) { "Found existing spec (#{existing_vertex.payload})" } + attempt_to_filter_existing_spec(existing_vertex) + else + latest = possibility.latest_version + # use reject!(!satisfied) for 1.8.7 compatibility + possibility.possibilities.reject! do |possibility| + !requirement_satisfied_by?(requirement, activated, possibility) + end + if possibility.latest_version.nil? + # ensure there's a possibility for better error messages + possibility.possibilities << latest if latest + create_conflict + unwind_for_conflict + else + activate_new_spec + end + end + end + + # Attempts to update the existing vertex's `PossibilitySet` with a filtered version + # @return [void] + def attempt_to_filter_existing_spec(vertex) + filtered_set = filtered_possibility_set(vertex) + if !filtered_set.possibilities.empty? + activated.set_payload(name, filtered_set) + new_requirements = requirements.dup + push_state_for_requirements(new_requirements, false) + else + create_conflict + debug(depth) { "Unsatisfied by existing spec (#{vertex.payload})" } + unwind_for_conflict + end + end + + # Generates a filtered version of the existing vertex's `PossibilitySet` using the + # current state's `requirement` + # @param [Object] existing vertex + # @return [PossibilitySet] filtered possibility set + def filtered_possibility_set(vertex) + PossibilitySet.new(vertex.payload.dependencies, vertex.payload.possibilities & possibility.possibilities) + end + + # @param [String] requirement_name the spec name to search for + # @return [Object] the locked spec named `requirement_name`, if one + # is found on {#base} + def locked_requirement_named(requirement_name) + vertex = base.vertex_named(requirement_name) + vertex && vertex.payload + end + + # Add the current {#possibility} to the dependency graph of the current + # {#state} + # @return [void] + def activate_new_spec + conflicts.delete(name) + debug(depth) { "Activated #{name} at #{possibility}" } + activated.set_payload(name, possibility) + require_nested_dependencies_for(possibility) + end + + # Requires the dependencies that the recently activated spec has + # @param [Object] activated_possibility the PossibilitySet that has just been + # activated + # @return [void] + def require_nested_dependencies_for(possibility_set) + nested_dependencies = dependencies_for(possibility_set.latest_version) + debug(depth) { "Requiring nested dependencies (#{nested_dependencies.join(', ')})" } + nested_dependencies.each do |d| + activated.add_child_vertex(name_for(d), nil, [name_for(possibility_set.latest_version)], d) + parent_index = states.size - 1 + parents = @parents_of[d] + parents << parent_index if parents.empty? + end + + push_state_for_requirements(requirements + nested_dependencies, !nested_dependencies.empty?) + end + + # Pushes a new {DependencyState} that encapsulates both existing and new + # requirements + # @param [Array] new_requirements + # @return [void] + def push_state_for_requirements(new_requirements, requires_sort = true, new_activated = activated) + new_requirements = sort_dependencies(new_requirements.uniq, new_activated, conflicts) if requires_sort + new_requirement = nil + loop do + new_requirement = new_requirements.shift + break if new_requirement.nil? || states.none? { |s| s.requirement == new_requirement } + end + new_name = new_requirement ? name_for(new_requirement) : ''.freeze + possibilities = possibilities_for_requirement(new_requirement) + handle_missing_or_push_dependency_state DependencyState.new( + new_name, new_requirements, new_activated, + new_requirement, possibilities, depth, conflicts.dup, unused_unwind_options.dup + ) + end + + # Checks a proposed requirement with any existing locked requirement + # before generating an array of possibilities for it. + # @param [Object] the proposed requirement + # @return [Array] possibilities + def possibilities_for_requirement(requirement, activated = self.activated) + return [] unless requirement + if locked_requirement_named(name_for(requirement)) + return locked_requirement_possibility_set(requirement, activated) + end + + group_possibilities(search_for(requirement)) + end + + # @param [Object] the proposed requirement + # @return [Array] possibility set containing only the locked requirement, if any + def locked_requirement_possibility_set(requirement, activated = self.activated) + all_possibilities = search_for(requirement) + locked_requirement = locked_requirement_named(name_for(requirement)) + + # Longwinded way to build a possibilities array with either the locked + # requirement or nothing in it. Required, since the API for + # locked_requirement isn't guaranteed. + locked_possibilities = all_possibilities.select do |possibility| + requirement_satisfied_by?(locked_requirement, activated, possibility) + end + + group_possibilities(locked_possibilities) + end + + # Build an array of PossibilitySets, with each element representing a group of + # dependency versions that all have the same sub-dependency version constraints + # and are contiguous. + # @param [Array] an array of possibilities + # @return [Array] an array of possibility sets + def group_possibilities(possibilities) + possibility_sets = [] + current_possibility_set = nil + + possibilities.reverse_each do |possibility| + dependencies = dependencies_for(possibility) + if current_possibility_set && current_possibility_set.dependencies == dependencies + current_possibility_set.possibilities.unshift(possibility) + else + possibility_sets.unshift(PossibilitySet.new(dependencies, [possibility])) + current_possibility_set = possibility_sets.first + end + end + + possibility_sets + end + + # Pushes a new {DependencyState}. + # If the {#specification_provider} says to + # {SpecificationProvider#allow_missing?} that particular requirement, and + # there are no possibilities for that requirement, then `state` is not + # pushed, and the vertex in {#activated} is removed, and we continue + # resolving the remaining requirements. + # @param [DependencyState] state + # @return [void] + def handle_missing_or_push_dependency_state(state) + if state.requirement && state.possibilities.empty? && allow_missing?(state.requirement) + state.activated.detach_vertex_named(state.name) + push_state_for_requirements(state.requirements.dup, false, state.activated) + else + states.push(state).tap { activated.tag(state) } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolver.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolver.rb new file mode 100644 index 0000000..a2e09c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/resolver.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require 'molinillo/dependency_graph' + +module Molinillo + # This class encapsulates a dependency resolver. + # The resolver is responsible for determining which set of dependencies to + # activate, with feedback from the {#specification_provider} + # + # + class Resolver + require 'molinillo/resolution' + + # @return [SpecificationProvider] the specification provider used + # in the resolution process + attr_reader :specification_provider + + # @return [UI] the UI module used to communicate back to the user + # during the resolution process + attr_reader :resolver_ui + + # Initializes a new resolver. + # @param [SpecificationProvider] specification_provider + # see {#specification_provider} + # @param [UI] resolver_ui + # see {#resolver_ui} + def initialize(specification_provider, resolver_ui) + @specification_provider = specification_provider + @resolver_ui = resolver_ui + end + + # Resolves the requested dependencies into a {DependencyGraph}, + # locking to the base dependency graph (if specified) + # @param [Array] requested an array of 'requested' dependencies that the + # {#specification_provider} can understand + # @param [DependencyGraph,nil] base the base dependency graph to which + # dependencies should be 'locked' + def resolve(requested, base = DependencyGraph.new) + Resolution.new(specification_provider, + resolver_ui, + requested, + base). + resolve + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/state.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/state.rb new file mode 100644 index 0000000..d75759b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.6.6/lib/molinillo/state.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Molinillo + # A state that a {Resolution} can be in + # @attr [String] name the name of the current requirement + # @attr [Array] requirements currently unsatisfied requirements + # @attr [DependencyGraph] activated the graph of activated dependencies + # @attr [Object] requirement the current requirement + # @attr [Object] possibilities the possibilities to satisfy the current requirement + # @attr [Integer] depth the depth of the resolution + # @attr [Hash] conflicts unresolved conflicts, indexed by dependency name + # @attr [Array] unused_unwind_options unwinds for previous conflicts that weren't explored + ResolutionState = Struct.new( + :name, + :requirements, + :activated, + :requirement, + :possibilities, + :depth, + :conflicts, + :unused_unwind_options + ) + + class ResolutionState + # Returns an empty resolution state + # @return [ResolutionState] an empty state + def self.empty + new(nil, [], DependencyGraph.new, nil, nil, 0, {}, []) + end + end + + # A state that encapsulates a set of {#requirements} with an {Array} of + # possibilities + class DependencyState < ResolutionState + # Removes a possibility from `self` + # @return [PossibilityState] a state with a single possibility, + # the possibility that was removed from `self` + def pop_possibility_state + PossibilityState.new( + name, + requirements.dup, + activated, + requirement, + [possibilities.pop], + depth + 1, + conflicts.dup, + unused_unwind_options.dup + ).tap do |state| + state.activated.tag(state) + end + end + end + + # A state that encapsulates a single possibility to fulfill the given + # {#requirement} + class PossibilityState < ResolutionState + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/ARCHITECTURE.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/ARCHITECTURE.md new file mode 100644 index 0000000..528fa83 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/ARCHITECTURE.md @@ -0,0 +1,102 @@ +# Molinillo Architecture + +At the highest level, Molinillo is a dependency resolution algorithm. +You hand the `Resolver` a list of dependencies and a 'locking' `DependencyGraph`, and you get a resulting dependency graph out of that. +In order to guarantee that the list of dependencies is properly resolved, however, an algorithm is required that is smarter than just walking the list of dependencies and activating each, and its own dependencies, in turn. + +## Backtracking + +At the heart of Molinillo is a [backtracking](http://en.wikipedia.org/wiki/Backtracking) algorithm with [forward checking](http://en.wikipedia.org/wiki/Look-ahead_(backtracking)). +Essentially, the resolution process keeps track of two types of states (dependency and possibility) in a stack. +If that stack is ever exhausted, resolution was impossible. +New states are pushed onto the stack for every dependency, and every time a dependency is successfully 'activated' a new state is pushed onto the stack that represents that activation. +This stack-based approach is used because backtracking (also known as *unwinding*) becomes as simple as popping a state off that stack. + +### Walkthrough + +1. The client initializes a `Resolver` with a `SpecificationProvider` and `UI` +2. The client calls `resolve` with an array of user-requested dependencies and an optional 'locking' `DependencyGraph` +3. The `Resolver` creates a new `Resolution` with those four user-specified parameters and calls `resolve` on it +4. The `Resolution` creates an `initial_state`, which takes the user-requested dependencies and puts them into a `DependencyState` + - In the process of creating the state, the `SpecificationProvider` is asked to sort the dependencies and return all the `possibilities` for the `initial_requirement` (taking into account whether the dependency is `locked`). These possibilities are then grouped into `PossibilitySet`s, with each set representing a group of versions for the dependency which share the same sub-dependency requirements and are contiguous + - A `DependencyGraph` is created that has all of these requirements point to `root_vertices` +5. The resolution process now enters its main loop, which continues as long as there is a current `state` to process, and the current state has requirements left to process +6. `UI#indicate_progress` is called to allow the client to report progress +7. If the current state is a `DependencyState`, we have it pop off a `PossibilityState` that encapsulates a `PossibilitySet` for that dependency +8. Process the topmost state on the stack +9. If there is a non-empty `PossibilitySet` for the state, `attempt_to_activate` it (jump to #11) +10. If there is no non-empty `PossibilitySet` for the state, `create_conflict` if the state is a `PossibilityState`, and then `unwind_for_conflict` + - `create_conflict` builds a `Conflict` object, with details of all of the requirements for the given dependency, and adds it to a hash of conflicts stored on the `state`, indexed by the name of the dependency + - `unwind_for_conflict` loops through all the conflicts on the `state`, looking for a state it can rewind to that might avoid that conflict. If no such state exists, it raises a VersionConflict error. Otherwise, it takes the most recent state with a chance to avoid the current conflicts and rewinds to it (go to #6) +11. Check if there is an existing vertex in the `activated` dependency graph for the dependency this state's `requirement` relates to +12. If there is no existing vertex in the `activated` dependency graph for the dependency this state's `requirement` relates to, `activate_new_spec`. This creates a new vertex in the `activated` dependency graph, with it's payload set to the possibility's `PossibilitySet`. It also pushes a new `DependencyState`, with the now-activated `PossibilitySet`'s own dependencies. Go to #6 +13. If there is an existing, `activated` vertex for the dependency, `attempt_to_filter_existing_spec` + - This filters the contents of the existing vertex's `PossibilitySet` by the current state's `requirement` + - If any possibilities remain within the `PossibilitySet`, it updates the activated vertex's payload with the new, filtered state and pushes a new `DependencyState` + - If no possibilities remain within the `PossibilitySet` after filtering, or if the current state's `PossibilitySet` had a different set of sub-dependency requirements to the existing vertex's `PossibilitySet`, `create_conflict` and `unwind_for_conflict`, back to the last `DependencyState` that has a chance to not generate a conflict. Go to #6 +15. Terminate with the topmost state's dependency graph when there are no more requirements left +16. For each vertex with a payload of allowable versions for this resolution (i.e., a `PossibilitySet`), pick a single specific version. + +### Optimal unwinding + +For our backtracking algorithm to be efficient as well as correct, we need to +unwind efficiently after a conflict is encountered. Unwind too far and we'll +miss valid resolutions - once we unwind passed a DependencyState we can never +get there again. Unwind too little and resolution will be extremely slow - we'll +repeatedly hit the same conflict, processing many unnecessary iterations before +getting to a branch that avoids it. + +To unwind the optimal amount, we consider the current conflict, along with all +the previous unwinds that have determined our current state. + +1. First, consider the current conflict as follows: + - Find the earliest (lowest index) set of requirements which combine to cause + the conflict. Any non-binding requirements can be ignored, as removing them + would not resolve the current conflict + - For each binding requirement, find all the alternative possibilities that + would relax the requirement: + - the requirement's DependencyState might have alternative possibilities + that would satisfy all the other requirements + - the parent of the requirement might have alternative possibilities that + would prevent the requirement existing + - the parent of the parent of the requirement might have alternative + possibilities that would prevent the parent, and thus the requirement, + from existing + - etc., etc. + - Group all of the above possibilities into an array, and pick the one with + the highest index (i.e., the smallest rewind) as our candidate rewind +2. Next, consider any previous unwinds that were not executed (because a +different, smaller unwind was chosen instead): + - Ignore any previously unused unwinds that would now unwind further than the + highest index found in (1), if any + - For the remaining unused unwinds, check whether the unwind has a chance of + preventing us encountering the current conflict. For this to be the case, the + unwind must have been rejected in favour of an unwind to one of the states in + the current conflict's requirement tree + - If any such unwinds exist, use the one with the highest index (smallest + unwind) instead of the one found in (1) +3a. If no possible unwind was found in (1) and (2), raise a VersionConflict +error as resolution is not possible. +3b. Filter the state that we're unwinding to, in order to remove any +possibilities we know will result in a conflict. Consider all possible unwinds +to the chosen state (there may be several, amassed from previous unused +unwinds for different conflicts) when doing this filtering - only +possibilities that will certainly result in *all* of those conflicts can be +filtered out as having no chance of resolution +4. Update the list of unused unwinds: + - Add all possible unwinds for the current conflict + - Update the `requirements_unwound_to_instead` attribute on any considered + unwind that was only rejected because it had a lower index than the chosen one + - Remove all unwinds to a state greater than or equal to the chosen unwind +5. Go to #6 in the main loop + +## Specification Provider + +The `SpecificationProvider` module forms the basis for the key integration point for a client library with Molinillo. +Its methods convert the client's domain-specific model objects into concepts the resolver understands: + +- Nested dependencies +- Names +- Requirement satisfaction +- Finding specifications (known internally as `possibilities`) +- Sorting dependencies (for the sake of reasonable resolver performance) diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/CHANGELOG.md new file mode 100644 index 0000000..285c69a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/CHANGELOG.md @@ -0,0 +1,480 @@ +# Molinillo Changelog + +## 0.8.0 (2021-08-09) + +##### Breaking + +* Support for Ruby 2.0, 2.1 and 2.2 has been dropped, the minimum supported + Ruby version is now 2.3. + [David Rodríguez](https://github.com/deivid-rodriguez) + +##### Enhancements + +* Use `Array#-` in unwind logic, since it performs better than `Array#&`, so it + speeds up resolution. + [Lukas Oberhuber](https://github.com/lukaso) + +* Allow specification provider to customize how dependencies are compared when + grouping specifications with the same dependencies. + [David Rodríguez](https://github.com/deivid-rodriguez) + +##### Bug Fixes + +* None. + + +## 0.7.0 (2020-10-21) + +##### Breaking + +* Support for Ruby 1.8.7 and 1.9.3 has been dropped, the minimum supported + Ruby version is now 2.0. + [Samuel Giddins](https://github.com/segiddins) + +##### Enhancements + +* Circular dependency errors include the full (shortest) path between the + circularly-dependent vertices. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.6.6 (2018-08-07) + +##### Enhancements + +* Improve performance of `Vertex#path_to?`. + [Samuel Giddins](https://github.com/segiddins) + +* Allow customization of string used to say that a version conflict has occurred + for a particular name by passing in the `:incompatible_version_message_for_conflict` + key when constructing a version conflict message with trees. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.6.5 (2018-03-22) + +##### Enhancements + +* Improve performance of recursive vertex methods. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.6.4 (2017-10-29) + +##### Enhancements + +* Reduce memory usage during resolution by making the `Vertex#requirements` + array unique. + [Grey Baker](https://github.com/greysteil) + [Jan Krutisch](https://github.com/halfbyte) + +##### Bug Fixes + +* None. + + +## 0.6.3 (2017-09-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle the case where an unwind occurs to a requirement that directly caused + the current conflict but could also have been unwound to directly from + previous conflicts. In this case, filtering must not remove any possibilities + that could have avoided the previous conflicts (even if they would not avoid + the current one). + [Grey Baker](https://github.com/greysteil) + + +## 0.6.2 (2017-08-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Insist each PossibilitySet contains contiguous versions. Fixes a regression + where an older dependency version with identical sub-dependencies to the + latest version may be preferred over the second-latest version. + [Grey Baker](https://github.com/greysteil) + + +## 0.6.1 (2017-08-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Allow the set of dependencies for a given possibility to change over time, + fixing a regression in 0.6.0. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.0 (2017-07-27) + +##### Breaking + +* Objects returned by `dependencies_for` and passed to `resolve` must properly implement + both `==` and `eql?`, such that they return `true` when they exhibit the same behavior in + `requirement_satisfied_by?`. + +##### Enhancements + +* Speed up dependency resolution by considering multiple possible versions of a + dependency at once, grouped by sub-dependencies. Groups are then filtered as + additional requirements are introduced. If a group's sub-dependencies cause + conflicts the entire group can be discarded, which reduces the number of + possibilities that have to be tested to find a resolution. + [Grey Baker](https://github.com/greysteil) + [Samuel Giddins](https://github.com/segiddins) + [#69](https://github.com/CocoaPods/Molinillo/pull/69) + +* Check for locked requirements when generating a new state's possibilities + array, and reduce possibilities set accordingly. Reduces scope for erroneous + VersionConflict errors. + [Grey Baker](https://github.com/greysteil) + [#67](https://github.com/CocoaPods/Molinillo/pull/67) + +* Add `VersionConflict#message_with_trees` for consumers who prefer a more verbose + conflict message that includes full requirement trees for all conflicts. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Improve unwinding by considering previous conflicts for the same dependency + when deciding which state to unwind to. Previously, prior conflicts were + stored in a hash indexed by their name, with only the most recent conflict + stored for each dependency. With this fix, Molinillo can resolve anything + that's thrown at it. 🎉 + [Grey Baker](https://github.com/greysteil) + [#73](https://github.com/CocoaPods/Molinillo/pull/73) + +* Only raise CircularDependency errors if they prevent resolution. + [Ian Young](https://github.com/iangreenleaf) + [Grey Baker](https://github.com/greysteil) + [#78](https://github.com/CocoaPods/Molinillo/pull/78) + +* Consider additional (binding) requirements that caused a conflict when + determining which state to unwind to. Previously, in some cases Molinillo + would erroneously throw a VersionConflict error if multiple requirements + combined to cause a conflict. + [Grey Baker](https://github.com/greysteil) + [#72](https://github.com/CocoaPods/Molinillo/pull/72) + +* Consider previous conflicts when determining the state to unwind to. If a + previous conflict, for a different dependency, is the reason we ended up with + the current conflict, then unwinding to a state that would not have caused + that conflict could prevent the current one, too. + [Grey Baker](https://github.com/greysteil) + [#72](https://github.com/CocoaPods/Molinillo/pull/72) + + +## 0.5.7 (2017-03-03) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Keep a stack of parents per requirement, so unwinding past a swap point that + updated the parent of the requirement works. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5425](https://github.com/bundler/bundler/issues/5425) + + +## 0.5.6 (2017-02-08) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Only reset the parent of a requirement after swapping when its original parent + was the same vertex being swapped. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5359](https://github.com/bundler/bundler/issues/5359) + [bundler#5362](https://github.com/bundler/bundler/issues/5362) + + +## 0.5.5 (2017-01-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Only remove requirements from the to-be-resolved list if there are no + activated vertices depending upon them after swapping. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5294](https://github.com/bundler/bundler/issues/5294) + + +## 0.5.4 (2016-11-14) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix unwinding when both sides of a conflict have a common parent + requirement. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5154](https://github.com/bundler/bundler/issues/5154) + + +## 0.5.3 (2016-10-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a regression in v0.5.2 that could cause resolution to fail after + swapping, because stale dependencies would still be in the requirements + list. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/Molinillo/issues/48) + +* Rename `Action.name` to `Action.action_name` to avoid overriding + `Module.name`. + [Samuel Giddins](https://github.com/segiddins) + [#50](https://github.com/CocoaPods/Molinillo/issues/50) + + +## 0.5.2 (2016-10-24) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a bug where `Resolution#parent_of` would return the incorrect parent for + a dependency after swapping had occurred, resulting in resolution failing. + [Samuel Giddins](https://github.com/segiddins) + [bundler#5059](https://github.com/bundler/bundler/issues/5059) + + +## 0.5.1 (2016-09-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fixed a bug where `Resolution#parent_of` would return the incorrect parent for + a dependency, resulting in resolution failing. + [Samuel Giddins](https://github.com/segiddins) + [bundler#4961](https://github.com/bundler/bundler/issues/4961) + + +## 0.5.0 (2016-06-14) + +##### Enhancements + +* Add an operation log to `DependencyGraph` to eliminate the need for graph + copies during dependency resolution, resulting in a 3-100x speedup and + reduction in allocations. + [Samuel Giddins](https://github.com/segiddins) + [bundler#4376](https://github.com/bundler/bundler/issues/4376) + +* Remove all metaprogramming to reduce array allocation overhead and improve + discoverability. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.4.5 (2016-04-30) + +##### Enhancements + +* For performance, don't needlessly dup objects in + `Resolution#push_state_for_requirements`. + [Joe Rafaniello](https://github.com/jrafanie) + +##### Bug Fixes + +* Recursively prune requirements when removing an orphan after swapping. + [Daniel DeLeo](https://github.com/danielsdeleo) + [berkshelf/solve#57](https://github.com/berkshelf/solve/issues/57) + + +## 0.4.4 (2016-02-28) + +##### Bug Fixes + +* Fix mutating a frozen string in `NoSuchDependencyError#message`. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.4.3 (2016-02-18) + +##### Enhancements + +* Add frozen string literal comments to all ruby files. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Prune the dependency list when removing an orphan after swapping. + [Samuel Giddins](https://github.com/segiddins) + [bundler/bundler#4276](https://github.com/bundler/bundler/issues/4276) + + +## 0.4.2 (2016-01-30) + +##### Bug Fixes + +* Detaching a vertex correctly removes it from the list of successors of its + predecessors. + [Samuel Giddins](https://github.com/segiddins) + +* Vertices orphaned after swapping dependencies are properly cleaned up from the + graph of activated specs. + [Samuel Giddins](https://github.com/segiddins) + [bundler/bundler#4198](https://github.com/bundler/bundler/issues/4198) + + +## 0.4.1 (2015-12-30) + +##### Enhancements + +* Ensure every API is 100% documented. + [Samuel Giddins](https://github.com/segiddins) + [#22](https://github.com/CocoaPods/Molinillo/issues/22) + + +## 0.4.0 (2015-07-27) + +##### API Breaking Changes + +* The `DependencyGraph` no longer treats root vertices specially, nor does it + maintain a direct reference to `edges`. Additionally, `Vertex` no longer + has a reference to its parent graph. + +##### Enhancements + +* Resolution has been sped up by 25x in some pathological cases, and in general + recursive operations on a `DependencyGraph` or `Vertex` are now `O(n)`. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3803](https://github.com/bundler/bundler/issues/3803) + +* Re-sorting of dependencies is skipped when the unresolved dependency list has + not changed, speeding up resolution of fully locked graphs. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.3.1 (2015-07-24) + +##### Enhancements + +* Add `Conflict#activated_by_name` to allow even richer version conflict + messages. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure `Conflict#requirement_trees` is exhaustive. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3860](https://github.com/bundler/bundler/issues/3860) + + +## 0.3.0 (2015-06-29) + +##### Enhancements + +* Add the ability to optionally skip dependencies that have no possibilities. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.3 (2015-03-28) + +##### Bug Fixes + +* Silence a silly MRI warning about declaring private attributes. + [Piotr Szotkowski](https://github.com/chastell) + [Bundler#3516](https://github.com/bundler/bundler/issues/3516) + [Bundler#3525](https://github.com/bundler/bundler/issues/3525) + + +## 0.2.2 (2015-03-27) + +##### Bug Fixes + +* Use an ivar in `DependencyGraph#initialize_copy` to silence an MRI warning. + [Samuel Giddins](https://github.com/segiddins) + [Bundler#3516](https://github.com/bundler/bundler/issues/3516) + + +## 0.2.1 (2015-02-21) + +* Allow resolving some pathological cases where the backjumping algorithm would + skip over a valid possibility. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.0 (2014-12-25) + +* Institute stricter forward checking by backjumping to the source of a + conflict, even if that source comes from the existing spec. This further + improves performance in highly conflicting situations when sorting heuristics + prove misleading. + [Samuel Giddins](https://github.com/segiddins) + [Smit Shah](https://github.com/Who828) + +* Add support for topologically sorting a dependency graph's vertices. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.1.2 (2014-11-19) + +##### Enhancements + +* Improve performance in highly conflicting situations by backtracking more than + one state at a time. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure that recursive invocations of `detach_vertex_named` don't lead to + messaging `nil`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#2805](https://github.com/CocoaPods/CocoaPods/issues/2805) + +## 0.1.1 (2014-11-06) + +* Ensure that an unwanted exception is not raised when an error occurs before + the initial state has been pushed upon the stack. + [Samuel Giddins](https://github.com/segiddins) + +## 0.1.0 (2014-10-26) + +* Initial release. + [Samuel Giddins](https://github.com/segiddins) diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/LICENSE new file mode 100644 index 0000000..01feffa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/LICENSE @@ -0,0 +1,9 @@ +This project is licensed under the MIT license. + +Copyright (c) 2014 Samuel E. Giddins segiddins@segiddins.me + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/README.md b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/README.md new file mode 100644 index 0000000..b4f3dad --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/README.md @@ -0,0 +1,44 @@ +# Molinillo + +[![Build Status](https://github.com/CocoaPods/Molinillo/workflows/test/badge.svg)](https://github.com/CocoaPods/Molinillo/actions?query=branch%3Amaster) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/Molinillo.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Molinillo) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/Molinillo.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Molinillo) + +A generic dependency-resolution implementation. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'molinillo', :git => 'https://github.com/CocoaPods/Molinillo' +``` + +And then execute: + +```bash +$ bundle install +``` + +Or install it yourself as: + +```bash +$ gem install molinillo +``` + +## Usage + +See the [ARCHITECTURE](ARCHITECTURE.md) file for an overview and look at the test suite for example usage. + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create a pull request + +## The Name + +[Molinillo](http://en.wikipedia.org/wiki/Molinillo_(whisk)) is a special whisk used in Mexico in the preparation of beverages such as hot chocolate. +Much like a dependency resolver, a molinillo helps take a list of ingredients and turn it into a delicious concoction! diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo.rb new file mode 100644 index 0000000..9d38683 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'molinillo/gem_metadata' +require_relative 'molinillo/errors' +require_relative 'molinillo/resolver' +require_relative 'molinillo/modules/ui' +require_relative 'molinillo/modules/specification_provider' + +# Molinillo is a generic dependency resolution algorithm. +module Molinillo +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/resolution_state.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/resolution_state.rb new file mode 100644 index 0000000..b083072 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/resolution_state.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module Molinillo + # @!visibility private + module Delegates + # Delegates all {Molinillo::ResolutionState} methods to a `#state` property. + module ResolutionState + # (see Molinillo::ResolutionState#name) + def name + current_state = state || Molinillo::ResolutionState.empty + current_state.name + end + + # (see Molinillo::ResolutionState#requirements) + def requirements + current_state = state || Molinillo::ResolutionState.empty + current_state.requirements + end + + # (see Molinillo::ResolutionState#activated) + def activated + current_state = state || Molinillo::ResolutionState.empty + current_state.activated + end + + # (see Molinillo::ResolutionState#requirement) + def requirement + current_state = state || Molinillo::ResolutionState.empty + current_state.requirement + end + + # (see Molinillo::ResolutionState#possibilities) + def possibilities + current_state = state || Molinillo::ResolutionState.empty + current_state.possibilities + end + + # (see Molinillo::ResolutionState#depth) + def depth + current_state = state || Molinillo::ResolutionState.empty + current_state.depth + end + + # (see Molinillo::ResolutionState#conflicts) + def conflicts + current_state = state || Molinillo::ResolutionState.empty + current_state.conflicts + end + + # (see Molinillo::ResolutionState#unused_unwind_options) + def unused_unwind_options + current_state = state || Molinillo::ResolutionState.empty + current_state.unused_unwind_options + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/specification_provider.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/specification_provider.rb new file mode 100644 index 0000000..20157aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/delegates/specification_provider.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +module Molinillo + module Delegates + # Delegates all {Molinillo::SpecificationProvider} methods to a + # `#specification_provider` property. + module SpecificationProvider + # (see Molinillo::SpecificationProvider#search_for) + def search_for(dependency) + with_no_such_dependency_error_handling do + specification_provider.search_for(dependency) + end + end + + # (see Molinillo::SpecificationProvider#dependencies_for) + def dependencies_for(specification) + with_no_such_dependency_error_handling do + specification_provider.dependencies_for(specification) + end + end + + # (see Molinillo::SpecificationProvider#requirement_satisfied_by?) + def requirement_satisfied_by?(requirement, activated, spec) + with_no_such_dependency_error_handling do + specification_provider.requirement_satisfied_by?(requirement, activated, spec) + end + end + + # (see Molinillo::SpecificationProvider#dependencies_equal?) + def dependencies_equal?(dependencies, other_dependencies) + with_no_such_dependency_error_handling do + specification_provider.dependencies_equal?(dependencies, other_dependencies) + end + end + + # (see Molinillo::SpecificationProvider#name_for) + def name_for(dependency) + with_no_such_dependency_error_handling do + specification_provider.name_for(dependency) + end + end + + # (see Molinillo::SpecificationProvider#name_for_explicit_dependency_source) + def name_for_explicit_dependency_source + with_no_such_dependency_error_handling do + specification_provider.name_for_explicit_dependency_source + end + end + + # (see Molinillo::SpecificationProvider#name_for_locking_dependency_source) + def name_for_locking_dependency_source + with_no_such_dependency_error_handling do + specification_provider.name_for_locking_dependency_source + end + end + + # (see Molinillo::SpecificationProvider#sort_dependencies) + def sort_dependencies(dependencies, activated, conflicts) + with_no_such_dependency_error_handling do + specification_provider.sort_dependencies(dependencies, activated, conflicts) + end + end + + # (see Molinillo::SpecificationProvider#allow_missing?) + def allow_missing?(dependency) + with_no_such_dependency_error_handling do + specification_provider.allow_missing?(dependency) + end + end + + private + + # Ensures any raised {NoSuchDependencyError} has its + # {NoSuchDependencyError#required_by} set. + # @yield + def with_no_such_dependency_error_handling + yield + rescue NoSuchDependencyError => error + if state + vertex = activated.vertex_named(name_for(error.dependency)) + error.required_by += vertex.incoming_edges.map { |e| e.origin.name } + error.required_by << name_for_explicit_dependency_source unless vertex.explicit_requirements.empty? + end + raise + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph.rb new file mode 100644 index 0000000..a80ecd6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph.rb @@ -0,0 +1,255 @@ +# frozen_string_literal: true + +require 'tsort' + +require_relative 'dependency_graph/log' +require_relative 'dependency_graph/vertex' + +module Molinillo + # A directed acyclic graph that is tuned to hold named dependencies + class DependencyGraph + include Enumerable + + # Enumerates through the vertices of the graph. + # @return [Array] The graph's vertices. + def each + return vertices.values.each unless block_given? + vertices.values.each { |v| yield v } + end + + include TSort + + # @!visibility private + alias tsort_each_node each + + # @!visibility private + def tsort_each_child(vertex, &block) + vertex.successors.each(&block) + end + + # Topologically sorts the given vertices. + # @param [Enumerable] vertices the vertices to be sorted, which must + # all belong to the same graph. + # @return [Array] The sorted vertices. + def self.tsort(vertices) + TSort.tsort( + lambda { |b| vertices.each(&b) }, + lambda { |v, &b| (v.successors & vertices).each(&b) } + ) + end + + # A directed edge of a {DependencyGraph} + # @attr [Vertex] origin The origin of the directed edge + # @attr [Vertex] destination The destination of the directed edge + # @attr [Object] requirement The requirement the directed edge represents + Edge = Struct.new(:origin, :destination, :requirement) + + # @return [{String => Vertex}] the vertices of the dependency graph, keyed + # by {Vertex#name} + attr_reader :vertices + + # @return [Log] the op log for this graph + attr_reader :log + + # Initializes an empty dependency graph + def initialize + @vertices = {} + @log = Log.new + end + + # Tags the current state of the dependency as the given tag + # @param [Object] tag an opaque tag for the current state of the graph + # @return [Void] + def tag(tag) + log.tag(self, tag) + end + + # Rewinds the graph to the state tagged as `tag` + # @param [Object] tag the tag to rewind to + # @return [Void] + def rewind_to(tag) + log.rewind_to(self, tag) + end + + # Initializes a copy of a {DependencyGraph}, ensuring that all {#vertices} + # are properly copied. + # @param [DependencyGraph] other the graph to copy. + def initialize_copy(other) + super + @vertices = {} + @log = other.log.dup + traverse = lambda do |new_v, old_v| + return if new_v.outgoing_edges.size == old_v.outgoing_edges.size + old_v.outgoing_edges.each do |edge| + destination = add_vertex(edge.destination.name, edge.destination.payload) + add_edge_no_circular(new_v, destination, edge.requirement) + traverse.call(destination, edge.destination) + end + end + other.vertices.each do |name, vertex| + new_vertex = add_vertex(name, vertex.payload, vertex.root?) + new_vertex.explicit_requirements.replace(vertex.explicit_requirements) + traverse.call(new_vertex, vertex) + end + end + + # @return [String] a string suitable for debugging + def inspect + "#{self.class}:#{vertices.values.inspect}" + end + + # @param [Hash] options options for dot output. + # @return [String] Returns a dot format representation of the graph + def to_dot(options = {}) + edge_label = options.delete(:edge_label) + raise ArgumentError, "Unknown options: #{options.keys}" unless options.empty? + + dot_vertices = [] + dot_edges = [] + vertices.each do |n, v| + dot_vertices << " #{n} [label=\"{#{n}|#{v.payload}}\"]" + v.outgoing_edges.each do |e| + label = edge_label ? edge_label.call(e) : e.requirement + dot_edges << " #{e.origin.name} -> #{e.destination.name} [label=#{label.to_s.dump}]" + end + end + + dot_vertices.uniq! + dot_vertices.sort! + dot_edges.uniq! + dot_edges.sort! + + dot = dot_vertices.unshift('digraph G {').push('') + dot_edges.push('}') + dot.join("\n") + end + + # @param [DependencyGraph] other + # @return [Boolean] whether the two dependency graphs are equal, determined + # by a recursive traversal of each {#root_vertices} and its + # {Vertex#successors} + def ==(other) + return false unless other + return true if equal?(other) + vertices.each do |name, vertex| + other_vertex = other.vertex_named(name) + return false unless other_vertex + return false unless vertex.payload == other_vertex.payload + return false unless other_vertex.successors.to_set == vertex.successors.to_set + end + end + + # @param [String] name + # @param [Object] payload + # @param [Array] parent_names + # @param [Object] requirement the requirement that is requiring the child + # @return [void] + def add_child_vertex(name, payload, parent_names, requirement) + root = !parent_names.delete(nil) { true } + vertex = add_vertex(name, payload, root) + vertex.explicit_requirements << requirement if root + parent_names.each do |parent_name| + parent_vertex = vertex_named(parent_name) + add_edge(parent_vertex, vertex, requirement) + end + vertex + end + + # Adds a vertex with the given name, or updates the existing one. + # @param [String] name + # @param [Object] payload + # @return [Vertex] the vertex that was added to `self` + def add_vertex(name, payload, root = false) + log.add_vertex(self, name, payload, root) + end + + # Detaches the {#vertex_named} `name` {Vertex} from the graph, recursively + # removing any non-root vertices that were orphaned in the process + # @param [String] name + # @return [Array] the vertices which have been detached + def detach_vertex_named(name) + log.detach_vertex_named(self, name) + end + + # @param [String] name + # @return [Vertex,nil] the vertex with the given name + def vertex_named(name) + vertices[name] + end + + # @param [String] name + # @return [Vertex,nil] the root vertex with the given name + def root_vertex_named(name) + vertex = vertex_named(name) + vertex if vertex && vertex.root? + end + + # Adds a new {Edge} to the dependency graph + # @param [Vertex] origin + # @param [Vertex] destination + # @param [Object] requirement the requirement that this edge represents + # @return [Edge] the added edge + def add_edge(origin, destination, requirement) + if destination.path_to?(origin) + raise CircularDependencyError.new(path(destination, origin)) + end + add_edge_no_circular(origin, destination, requirement) + end + + # Deletes an {Edge} from the dependency graph + # @param [Edge] edge + # @return [Void] + def delete_edge(edge) + log.delete_edge(self, edge.origin.name, edge.destination.name, edge.requirement) + end + + # Sets the payload of the vertex with the given name + # @param [String] name the name of the vertex + # @param [Object] payload the payload + # @return [Void] + def set_payload(name, payload) + log.set_payload(self, name, payload) + end + + private + + # Adds a new {Edge} to the dependency graph without checking for + # circularity. + # @param (see #add_edge) + # @return (see #add_edge) + def add_edge_no_circular(origin, destination, requirement) + log.add_edge_no_circular(self, origin.name, destination.name, requirement) + end + + # Returns the path between two vertices + # @raise [ArgumentError] if there is no path between the vertices + # @param [Vertex] from + # @param [Vertex] to + # @return [Array] the shortest path from `from` to `to` + def path(from, to) + distances = Hash.new(vertices.size + 1) + distances[from.name] = 0 + predecessors = {} + each do |vertex| + vertex.successors.each do |successor| + if distances[successor.name] > distances[vertex.name] + 1 + distances[successor.name] = distances[vertex.name] + 1 + predecessors[successor] = vertex + end + end + end + + path = [to] + while before = predecessors[to] + path << before + to = before + break if to == from + end + + unless path.last.equal?(from) + raise ArgumentError, "There is no path from #{from.name} to #{to.name}" + end + + path.reverse + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/action.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/action.rb new file mode 100644 index 0000000..2fe384e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/action.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module Molinillo + class DependencyGraph + # An action that modifies a {DependencyGraph} that is reversible. + # @abstract + class Action + # rubocop:disable Lint/UnusedMethodArgument + + # @return [Symbol] The name of the action. + def self.action_name + raise 'Abstract' + end + + # Performs the action on the given graph. + # @param [DependencyGraph] graph the graph to perform the action on. + # @return [Void] + def up(graph) + raise 'Abstract' + end + + # Reverses the action on the given graph. + # @param [DependencyGraph] graph the graph to reverse the action on. + # @return [Void] + def down(graph) + raise 'Abstract' + end + + # @return [Action,Nil] The previous action + attr_accessor :previous + + # @return [Action,Nil] The next action + attr_accessor :next + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_edge_no_circular.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_edge_no_circular.rb new file mode 100644 index 0000000..6857f68 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_edge_no_circular.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#add_edge_no_circular) + class AddEdgeNoCircular < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges << edge + edge.destination.incoming_edges << edge + edge + end + + # (see Action#down) + def down(graph) + edge = make_edge(graph) + delete_first(edge.origin.outgoing_edges, edge) + delete_first(edge.destination.incoming_edges, edge) + end + + # @!group AddEdgeNoCircular + + # @return [String] the name of the origin of the edge + attr_reader :origin + + # @return [String] the name of the destination of the edge + attr_reader :destination + + # @return [Object] the requirement that the edge represents + attr_reader :requirement + + # @param [DependencyGraph] graph the graph to find vertices from + # @return [Edge] The edge this action adds + def make_edge(graph) + Edge.new(graph.vertex_named(origin), graph.vertex_named(destination), requirement) + end + + # Initialize an action to add an edge to a dependency graph + # @param [String] origin the name of the origin of the edge + # @param [String] destination the name of the destination of the edge + # @param [Object] requirement the requirement that the edge represents + def initialize(origin, destination, requirement) + @origin = origin + @destination = destination + @requirement = requirement + end + + private + + def delete_first(array, item) + return unless index = array.index(item) + array.delete_at(index) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_vertex.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_vertex.rb new file mode 100644 index 0000000..aad3b96 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/add_vertex.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#add_vertex) + class AddVertex < Action # :nodoc: + # @!group Action + + # (see Action.action_name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + if existing = graph.vertices[name] + @existing_payload = existing.payload + @existing_root = existing.root + end + vertex = existing || Vertex.new(name, payload) + graph.vertices[vertex.name] = vertex + vertex.payload ||= payload + vertex.root ||= root + vertex + end + + # (see Action#down) + def down(graph) + if defined?(@existing_payload) + vertex = graph.vertices[name] + vertex.payload = @existing_payload + vertex.root = @existing_root + else + graph.vertices.delete(name) + end + end + + # @!group AddVertex + + # @return [String] the name of the vertex + attr_reader :name + + # @return [Object] the payload for the vertex + attr_reader :payload + + # @return [Boolean] whether the vertex is root or not + attr_reader :root + + # Initialize an action to add a vertex to a dependency graph + # @param [String] name the name of the vertex + # @param [Object] payload the payload for the vertex + # @param [Boolean] root whether the vertex is root or not + def initialize(name, payload, root) + @name = name + @payload = payload + @root = root + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/delete_edge.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/delete_edge.rb new file mode 100644 index 0000000..b46196e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/delete_edge.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # (see DependencyGraph#delete_edge) + class DeleteEdge < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :delete_edge + end + + # (see Action#up) + def up(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges.delete(edge) + edge.destination.incoming_edges.delete(edge) + end + + # (see Action#down) + def down(graph) + edge = make_edge(graph) + edge.origin.outgoing_edges << edge + edge.destination.incoming_edges << edge + edge + end + + # @!group DeleteEdge + + # @return [String] the name of the origin of the edge + attr_reader :origin_name + + # @return [String] the name of the destination of the edge + attr_reader :destination_name + + # @return [Object] the requirement that the edge represents + attr_reader :requirement + + # @param [DependencyGraph] graph the graph to find vertices from + # @return [Edge] The edge this action adds + def make_edge(graph) + Edge.new( + graph.vertex_named(origin_name), + graph.vertex_named(destination_name), + requirement + ) + end + + # Initialize an action to add an edge to a dependency graph + # @param [String] origin_name the name of the origin of the edge + # @param [String] destination_name the name of the destination of the edge + # @param [Object] requirement the requirement that the edge represents + def initialize(origin_name, destination_name, requirement) + @origin_name = origin_name + @destination_name = destination_name + @requirement = requirement + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/detach_vertex_named.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/detach_vertex_named.rb new file mode 100644 index 0000000..b6521df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/detach_vertex_named.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#detach_vertex_named + class DetachVertexNamed < Action + # @!group Action + + # (see Action#name) + def self.action_name + :add_vertex + end + + # (see Action#up) + def up(graph) + return [] unless @vertex = graph.vertices.delete(name) + + removed_vertices = [@vertex] + @vertex.outgoing_edges.each do |e| + v = e.destination + v.incoming_edges.delete(e) + if !v.root? && v.incoming_edges.empty? + removed_vertices.concat graph.detach_vertex_named(v.name) + end + end + + @vertex.incoming_edges.each do |e| + v = e.origin + v.outgoing_edges.delete(e) + end + + removed_vertices + end + + # (see Action#down) + def down(graph) + return unless @vertex + graph.vertices[@vertex.name] = @vertex + @vertex.outgoing_edges.each do |e| + e.destination.incoming_edges << e + end + @vertex.incoming_edges.each do |e| + e.origin.outgoing_edges << e + end + end + + # @!group DetachVertexNamed + + # @return [String] the name of the vertex to detach + attr_reader :name + + # Initialize an action to detach a vertex from a dependency graph + # @param [String] name the name of the vertex to detach + def initialize(name) + @name = name + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/log.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/log.rb new file mode 100644 index 0000000..b5445c9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/log.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true + +require_relative 'add_edge_no_circular' +require_relative 'add_vertex' +require_relative 'delete_edge' +require_relative 'detach_vertex_named' +require_relative 'set_payload' +require_relative 'tag' + +module Molinillo + class DependencyGraph + # A log for dependency graph actions + class Log + # Initializes an empty log + def initialize + @current_action = @first_action = nil + end + + # @!macro [new] action + # {include:DependencyGraph#$0} + # @param [Graph] graph the graph to perform the action on + # @param (see DependencyGraph#$0) + # @return (see DependencyGraph#$0) + + # @macro action + def tag(graph, tag) + push_action(graph, Tag.new(tag)) + end + + # @macro action + def add_vertex(graph, name, payload, root) + push_action(graph, AddVertex.new(name, payload, root)) + end + + # @macro action + def detach_vertex_named(graph, name) + push_action(graph, DetachVertexNamed.new(name)) + end + + # @macro action + def add_edge_no_circular(graph, origin, destination, requirement) + push_action(graph, AddEdgeNoCircular.new(origin, destination, requirement)) + end + + # {include:DependencyGraph#delete_edge} + # @param [Graph] graph the graph to perform the action on + # @param [String] origin_name + # @param [String] destination_name + # @param [Object] requirement + # @return (see DependencyGraph#delete_edge) + def delete_edge(graph, origin_name, destination_name, requirement) + push_action(graph, DeleteEdge.new(origin_name, destination_name, requirement)) + end + + # @macro action + def set_payload(graph, name, payload) + push_action(graph, SetPayload.new(name, payload)) + end + + # Pops the most recent action from the log and undoes the action + # @param [DependencyGraph] graph + # @return [Action] the action that was popped off the log + def pop!(graph) + return unless action = @current_action + unless @current_action = action.previous + @first_action = nil + end + action.down(graph) + action + end + + extend Enumerable + + # @!visibility private + # Enumerates each action in the log + # @yield [Action] + def each + return enum_for unless block_given? + action = @first_action + loop do + break unless action + yield action + action = action.next + end + self + end + + # @!visibility private + # Enumerates each action in the log in reverse order + # @yield [Action] + def reverse_each + return enum_for(:reverse_each) unless block_given? + action = @current_action + loop do + break unless action + yield action + action = action.previous + end + self + end + + # @macro action + def rewind_to(graph, tag) + loop do + action = pop!(graph) + raise "No tag #{tag.inspect} found" unless action + break if action.class.action_name == :tag && action.tag == tag + end + end + + private + + # Adds the given action to the log, running the action + # @param [DependencyGraph] graph + # @param [Action] action + # @return The value returned by `action.up` + def push_action(graph, action) + action.previous = @current_action + @current_action.next = action if @current_action + @current_action = action + @first_action ||= action + action.up(graph) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/set_payload.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/set_payload.rb new file mode 100644 index 0000000..14f7cf3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/set_payload.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#set_payload + class SetPayload < Action # :nodoc: + # @!group Action + + # (see Action.action_name) + def self.action_name + :set_payload + end + + # (see Action#up) + def up(graph) + vertex = graph.vertex_named(name) + @old_payload = vertex.payload + vertex.payload = payload + end + + # (see Action#down) + def down(graph) + graph.vertex_named(name).payload = @old_payload + end + + # @!group SetPayload + + # @return [String] the name of the vertex + attr_reader :name + + # @return [Object] the payload for the vertex + attr_reader :payload + + # Initialize an action to add set the payload for a vertex in a dependency + # graph + # @param [String] name the name of the vertex + # @param [Object] payload the payload for the vertex + def initialize(name, payload) + @name = name + @payload = payload + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/tag.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/tag.rb new file mode 100644 index 0000000..d361eb8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/tag.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require_relative 'action' +module Molinillo + class DependencyGraph + # @!visibility private + # @see DependencyGraph#tag + class Tag < Action + # @!group Action + + # (see Action.action_name) + def self.action_name + :tag + end + + # (see Action#up) + def up(graph) + end + + # (see Action#down) + def down(graph) + end + + # @!group Tag + + # @return [Object] An opaque tag + attr_reader :tag + + # Initialize an action to tag a state of a dependency graph + # @param [Object] tag an opaque tag + def initialize(tag) + @tag = tag + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/vertex.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/vertex.rb new file mode 100644 index 0000000..5eafc90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/dependency_graph/vertex.rb @@ -0,0 +1,164 @@ +# frozen_string_literal: true + +module Molinillo + class DependencyGraph + # A vertex in a {DependencyGraph} that encapsulates a {#name} and a + # {#payload} + class Vertex + # @return [String] the name of the vertex + attr_accessor :name + + # @return [Object] the payload the vertex holds + attr_accessor :payload + + # @return [Array] the explicit requirements that required + # this vertex + attr_reader :explicit_requirements + + # @return [Boolean] whether the vertex is considered a root vertex + attr_accessor :root + alias root? root + + # Initializes a vertex with the given name and payload. + # @param [String] name see {#name} + # @param [Object] payload see {#payload} + def initialize(name, payload) + @name = name.frozen? ? name : name.dup.freeze + @payload = payload + @explicit_requirements = [] + @outgoing_edges = [] + @incoming_edges = [] + end + + # @return [Array] all of the requirements that required + # this vertex + def requirements + (incoming_edges.map(&:requirement) + explicit_requirements).uniq + end + + # @return [Array] the edges of {#graph} that have `self` as their + # {Edge#origin} + attr_accessor :outgoing_edges + + # @return [Array] the edges of {#graph} that have `self` as their + # {Edge#destination} + attr_accessor :incoming_edges + + # @return [Array] the vertices of {#graph} that have an edge with + # `self` as their {Edge#destination} + def predecessors + incoming_edges.map(&:origin) + end + + # @return [Set] the vertices of {#graph} where `self` is a + # {#descendent?} + def recursive_predecessors + _recursive_predecessors + end + + # @param [Set] vertices the set to add the predecessors to + # @return [Set] the vertices of {#graph} where `self` is a + # {#descendent?} + def _recursive_predecessors(vertices = new_vertex_set) + incoming_edges.each do |edge| + vertex = edge.origin + next unless vertices.add?(vertex) + vertex._recursive_predecessors(vertices) + end + + vertices + end + protected :_recursive_predecessors + + # @return [Array] the vertices of {#graph} that have an edge with + # `self` as their {Edge#origin} + def successors + outgoing_edges.map(&:destination) + end + + # @return [Set] the vertices of {#graph} where `self` is an + # {#ancestor?} + def recursive_successors + _recursive_successors + end + + # @param [Set] vertices the set to add the successors to + # @return [Set] the vertices of {#graph} where `self` is an + # {#ancestor?} + def _recursive_successors(vertices = new_vertex_set) + outgoing_edges.each do |edge| + vertex = edge.destination + next unless vertices.add?(vertex) + vertex._recursive_successors(vertices) + end + + vertices + end + protected :_recursive_successors + + # @return [String] a string suitable for debugging + def inspect + "#{self.class}:#{name}(#{payload.inspect})" + end + + # @return [Boolean] whether the two vertices are equal, determined + # by a recursive traversal of each {Vertex#successors} + def ==(other) + return true if equal?(other) + shallow_eql?(other) && + successors.to_set == other.successors.to_set + end + + # @param [Vertex] other the other vertex to compare to + # @return [Boolean] whether the two vertices are equal, determined + # solely by {#name} and {#payload} equality + def shallow_eql?(other) + return true if equal?(other) + other && + name == other.name && + payload == other.payload + end + + alias eql? == + + # @return [Fixnum] a hash for the vertex based upon its {#name} + def hash + name.hash + end + + # Is there a path from `self` to `other` following edges in the + # dependency graph? + # @return whether there is a path following edges within this {#graph} + def path_to?(other) + _path_to?(other) + end + + alias descendent? path_to? + + # @param [Vertex] other the vertex to check if there's a path to + # @param [Set] visited the vertices of {#graph} that have been visited + # @return [Boolean] whether there is a path to `other` from `self` + def _path_to?(other, visited = new_vertex_set) + return false unless visited.add?(self) + return true if equal?(other) + successors.any? { |v| v._path_to?(other, visited) } + end + protected :_path_to? + + # Is there a path from `other` to `self` following edges in the + # dependency graph? + # @return whether there is a path following edges within this {#graph} + def ancestor?(other) + other.path_to?(self) + end + + alias is_reachable_from? ancestor? + + def new_vertex_set + require 'set' + Set.new + end + private :new_vertex_set + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/errors.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/errors.rb new file mode 100644 index 0000000..c240ecd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/errors.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +module Molinillo + # An error that occurred during the resolution process + class ResolverError < StandardError; end + + # An error caused by searching for a dependency that is completely unknown, + # i.e. has no versions available whatsoever. + class NoSuchDependencyError < ResolverError + # @return [Object] the dependency that could not be found + attr_accessor :dependency + + # @return [Array] the specifications that depended upon {#dependency} + attr_accessor :required_by + + # Initializes a new error with the given missing dependency. + # @param [Object] dependency @see {#dependency} + # @param [Array] required_by @see {#required_by} + def initialize(dependency, required_by = []) + @dependency = dependency + @required_by = required_by.uniq + super() + end + + # The error message for the missing dependency, including the specifications + # that had this dependency. + def message + sources = required_by.map { |r| "`#{r}`" }.join(' and ') + message = "Unable to find a specification for `#{dependency}`" + message += " depended upon by #{sources}" unless sources.empty? + message + end + end + + # An error caused by attempting to fulfil a dependency that was circular + # + # @note This exception will be thrown if and only if a {Vertex} is added to a + # {DependencyGraph} that has a {DependencyGraph::Vertex#path_to?} an + # existing {DependencyGraph::Vertex} + class CircularDependencyError < ResolverError + # [Set] the dependencies responsible for causing the error + attr_reader :dependencies + + # Initializes a new error with the given circular vertices. + # @param [Array] vertices the vertices in the dependency + # that caused the error + def initialize(vertices) + super "There is a circular dependency between #{vertices.map(&:name).join(' and ')}" + @dependencies = vertices.map { |vertex| vertex.payload.possibilities.last }.to_set + end + end + + # An error caused by conflicts in version + class VersionConflict < ResolverError + # @return [{String => Resolution::Conflict}] the conflicts that caused + # resolution to fail + attr_reader :conflicts + + # @return [SpecificationProvider] the specification provider used during + # resolution + attr_reader :specification_provider + + # Initializes a new error with the given version conflicts. + # @param [{String => Resolution::Conflict}] conflicts see {#conflicts} + # @param [SpecificationProvider] specification_provider see {#specification_provider} + def initialize(conflicts, specification_provider) + pairs = [] + conflicts.values.flat_map(&:requirements).each do |conflicting| + conflicting.each do |source, conflict_requirements| + conflict_requirements.each do |c| + pairs << [c, source] + end + end + end + + super "Unable to satisfy the following requirements:\n\n" \ + "#{pairs.map { |r, d| "- `#{r}` required by `#{d}`" }.join("\n")}" + + @conflicts = conflicts + @specification_provider = specification_provider + end + + require_relative 'delegates/specification_provider' + include Delegates::SpecificationProvider + + # @return [String] An error message that includes requirement trees, + # which is much more detailed & customizable than the default message + # @param [Hash] opts the options to create a message with. + # @option opts [String] :solver_name The user-facing name of the solver + # @option opts [String] :possibility_type The generic name of a possibility + # @option opts [Proc] :reduce_trees A proc that reduced the list of requirement trees + # @option opts [Proc] :printable_requirement A proc that pretty-prints requirements + # @option opts [Proc] :additional_message_for_conflict A proc that appends additional + # messages for each conflict + # @option opts [Proc] :version_for_spec A proc that returns the version number for a + # possibility + def message_with_trees(opts = {}) + solver_name = opts.delete(:solver_name) { self.class.name.split('::').first } + possibility_type = opts.delete(:possibility_type) { 'possibility named' } + reduce_trees = opts.delete(:reduce_trees) { proc { |trees| trees.uniq.sort_by(&:to_s) } } + printable_requirement = opts.delete(:printable_requirement) { proc { |req| req.to_s } } + additional_message_for_conflict = opts.delete(:additional_message_for_conflict) { proc {} } + version_for_spec = opts.delete(:version_for_spec) { proc(&:to_s) } + incompatible_version_message_for_conflict = opts.delete(:incompatible_version_message_for_conflict) do + proc do |name, _conflict| + %(#{solver_name} could not find compatible versions for #{possibility_type} "#{name}":) + end + end + + conflicts.sort.reduce(''.dup) do |o, (name, conflict)| + o << "\n" << incompatible_version_message_for_conflict.call(name, conflict) << "\n" + if conflict.locked_requirement + o << %( In snapshot (#{name_for_locking_dependency_source}):\n) + o << %( #{printable_requirement.call(conflict.locked_requirement)}\n) + o << %(\n) + end + o << %( In #{name_for_explicit_dependency_source}:\n) + trees = reduce_trees.call(conflict.requirement_trees) + + o << trees.map do |tree| + t = ''.dup + depth = 2 + tree.each do |req| + t << ' ' * depth << printable_requirement.call(req) + unless tree.last == req + if spec = conflict.activated_by_name[name_for(req)] + t << %( was resolved to #{version_for_spec.call(spec)}, which) + end + t << %( depends on) + end + t << %(\n) + depth += 1 + end + t + end.join("\n") + + additional_message_for_conflict.call(o, name, conflict) + + o + end.strip + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/gem_metadata.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/gem_metadata.rb new file mode 100644 index 0000000..a22afd2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/gem_metadata.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +module Molinillo + # The version of Molinillo. + VERSION = '0.8.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/specification_provider.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/specification_provider.rb new file mode 100644 index 0000000..8012104 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/specification_provider.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +module Molinillo + # Provides information about specifcations and dependencies to the resolver, + # allowing the {Resolver} class to remain generic while still providing power + # and flexibility. + # + # This module contains the methods that users of Molinillo must to implement, + # using knowledge of their own model classes. + module SpecificationProvider + # Search for the specifications that match the given dependency. + # The specifications in the returned array will be considered in reverse + # order, so the latest version ought to be last. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `dependency` parameter. + # + # @param [Object] dependency + # @return [Array] the specifications that satisfy the given + # `dependency`. + def search_for(dependency) + [] + end + + # Returns the dependencies of `specification`. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `specification` parameter. + # + # @param [Object] specification + # @return [Array] the dependencies that are required by the given + # `specification`. + def dependencies_for(specification) + [] + end + + # Determines whether the given `requirement` is satisfied by the given + # `spec`, in the context of the current `activated` dependency graph. + # + # @param [Object] requirement + # @param [DependencyGraph] activated the current dependency graph in the + # resolution process. + # @param [Object] spec + # @return [Boolean] whether `requirement` is satisfied by `spec` in the + # context of the current `activated` dependency graph. + def requirement_satisfied_by?(requirement, activated, spec) + true + end + + # Determines whether two arrays of dependencies are equal, and thus can be + # grouped. + # + # @param [Array] dependencies + # @param [Array] other_dependencies + # @return [Boolean] whether `dependencies` and `other_dependencies` should + # be considered equal. + def dependencies_equal?(dependencies, other_dependencies) + dependencies == other_dependencies + end + + # Returns the name for the given `dependency`. + # @note This method should be 'pure', i.e. the return value should depend + # only on the `dependency` parameter. + # + # @param [Object] dependency + # @return [String] the name for the given `dependency`. + def name_for(dependency) + dependency.to_s + end + + # @return [String] the name of the source of explicit dependencies, i.e. + # those passed to {Resolver#resolve} directly. + def name_for_explicit_dependency_source + 'user-specified dependency' + end + + # @return [String] the name of the source of 'locked' dependencies, i.e. + # those passed to {Resolver#resolve} directly as the `base` + def name_for_locking_dependency_source + 'Lockfile' + end + + # Sort dependencies so that the ones that are easiest to resolve are first. + # Easiest to resolve is (usually) defined by: + # 1) Is this dependency already activated? + # 2) How relaxed are the requirements? + # 3) Are there any conflicts for this dependency? + # 4) How many possibilities are there to satisfy this dependency? + # + # @param [Array] dependencies + # @param [DependencyGraph] activated the current dependency graph in the + # resolution process. + # @param [{String => Array}] conflicts + # @return [Array] a sorted copy of `dependencies`. + def sort_dependencies(dependencies, activated, conflicts) + dependencies.sort_by do |dependency| + name = name_for(dependency) + [ + activated.vertex_named(name).payload ? 0 : 1, + conflicts[name] ? 0 : 1, + ] + end + end + + # Returns whether this dependency, which has no possible matching + # specifications, can safely be ignored. + # + # @param [Object] dependency + # @return [Boolean] whether this dependency can safely be skipped. + def allow_missing?(dependency) + false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/ui.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/ui.rb new file mode 100644 index 0000000..6175c5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/modules/ui.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module Molinillo + # Conveys information about the resolution process to a user. + module UI + # The {IO} object that should be used to print output. `STDOUT`, by default. + # + # @return [IO] + def output + STDOUT + end + + # Called roughly every {#progress_rate}, this method should convey progress + # to the user. + # + # @return [void] + def indicate_progress + output.print '.' unless debug? + end + + # How often progress should be conveyed to the user via + # {#indicate_progress}, in seconds. A third of a second, by default. + # + # @return [Float] + def progress_rate + 0.33 + end + + # Called before resolution begins. + # + # @return [void] + def before_resolution + output.print 'Resolving dependencies...' + end + + # Called after resolution ends (either successfully or with an error). + # By default, prints a newline. + # + # @return [void] + def after_resolution + output.puts + end + + # Conveys debug information to the user. + # + # @param [Integer] depth the current depth of the resolution process. + # @return [void] + def debug(depth = 0) + if debug? + debug_info = yield + debug_info = debug_info.inspect unless debug_info.is_a?(String) + debug_info = debug_info.split("\n").map { |s| ":#{depth.to_s.rjust 4}: #{s}" } + output.puts debug_info + end + end + + # Whether or not debug messages should be printed. + # By default, whether or not the `MOLINILLO_DEBUG` environment variable is + # set. + # + # @return [Boolean] + def debug? + return @debug_mode if defined?(@debug_mode) + @debug_mode = ENV['MOLINILLO_DEBUG'] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolution.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolution.rb new file mode 100644 index 0000000..455476c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolution.rb @@ -0,0 +1,839 @@ +# frozen_string_literal: true + +module Molinillo + class Resolver + # A specific resolution from a given {Resolver} + class Resolution + # A conflict that the resolution process encountered + # @attr [Object] requirement the requirement that immediately led to the conflict + # @attr [{String,Nil=>[Object]}] requirements the requirements that caused the conflict + # @attr [Object, nil] existing the existing spec that was in conflict with + # the {#possibility} + # @attr [Object] possibility_set the set of specs that was unable to be + # activated due to a conflict. + # @attr [Object] locked_requirement the relevant locking requirement. + # @attr [Array>] requirement_trees the different requirement + # trees that led to every requirement for the conflicting name. + # @attr [{String=>Object}] activated_by_name the already-activated specs. + # @attr [Object] underlying_error an error that has occurred during resolution, and + # will be raised at the end of it if no resolution is found. + Conflict = Struct.new( + :requirement, + :requirements, + :existing, + :possibility_set, + :locked_requirement, + :requirement_trees, + :activated_by_name, + :underlying_error + ) + + class Conflict + # @return [Object] a spec that was unable to be activated due to a conflict + def possibility + possibility_set && possibility_set.latest_version + end + end + + # A collection of possibility states that share the same dependencies + # @attr [Array] dependencies the dependencies for this set of possibilities + # @attr [Array] possibilities the possibilities + PossibilitySet = Struct.new(:dependencies, :possibilities) + + class PossibilitySet + # String representation of the possibility set, for debugging + def to_s + "[#{possibilities.join(', ')}]" + end + + # @return [Object] most up-to-date dependency in the possibility set + def latest_version + possibilities.last + end + end + + # Details of the state to unwind to when a conflict occurs, and the cause of the unwind + # @attr [Integer] state_index the index of the state to unwind to + # @attr [Object] state_requirement the requirement of the state we're unwinding to + # @attr [Array] requirement_tree for the requirement we're relaxing + # @attr [Array] conflicting_requirements the requirements that combined to cause the conflict + # @attr [Array] requirement_trees for the conflict + # @attr [Array] requirements_unwound_to_instead array of unwind requirements that were chosen over this unwind + UnwindDetails = Struct.new( + :state_index, + :state_requirement, + :requirement_tree, + :conflicting_requirements, + :requirement_trees, + :requirements_unwound_to_instead + ) + + class UnwindDetails + include Comparable + + # We compare UnwindDetails when choosing which state to unwind to. If + # two options have the same state_index we prefer the one most + # removed from a requirement that caused the conflict. Both options + # would unwind to the same state, but a `grandparent` option will + # filter out fewer of its possibilities after doing so - where a state + # is both a `parent` and a `grandparent` to requirements that have + # caused a conflict this is the correct behaviour. + # @param [UnwindDetail] other UnwindDetail to be compared + # @return [Integer] integer specifying ordering + def <=>(other) + if state_index > other.state_index + 1 + elsif state_index == other.state_index + reversed_requirement_tree_index <=> other.reversed_requirement_tree_index + else + -1 + end + end + + # @return [Integer] index of state requirement in reversed requirement tree + # (the conflicting requirement itself will be at position 0) + def reversed_requirement_tree_index + @reversed_requirement_tree_index ||= + if state_requirement + requirement_tree.reverse.index(state_requirement) + else + 999_999 + end + end + + # @return [Boolean] where the requirement of the state we're unwinding + # to directly caused the conflict. Note: in this case, it is + # impossible for the state we're unwinding to to be a parent of + # any of the other conflicting requirements (or we would have + # circularity) + def unwinding_to_primary_requirement? + requirement_tree.last == state_requirement + end + + # @return [Array] array of sub-dependencies to avoid when choosing a + # new possibility for the state we've unwound to. Only relevant for + # non-primary unwinds + def sub_dependencies_to_avoid + @requirements_to_avoid ||= + requirement_trees.map do |tree| + index = tree.index(state_requirement) + tree[index + 1] if index + end.compact + end + + # @return [Array] array of all the requirements that led to the need for + # this unwind + def all_requirements + @all_requirements ||= requirement_trees.flatten(1) + end + end + + # @return [SpecificationProvider] the provider that knows about + # dependencies, requirements, specifications, versions, etc. + attr_reader :specification_provider + + # @return [UI] the UI that knows how to communicate feedback about the + # resolution process back to the user + attr_reader :resolver_ui + + # @return [DependencyGraph] the base dependency graph to which + # dependencies should be 'locked' + attr_reader :base + + # @return [Array] the dependencies that were explicitly required + attr_reader :original_requested + + # Initializes a new resolution. + # @param [SpecificationProvider] specification_provider + # see {#specification_provider} + # @param [UI] resolver_ui see {#resolver_ui} + # @param [Array] requested see {#original_requested} + # @param [DependencyGraph] base see {#base} + def initialize(specification_provider, resolver_ui, requested, base) + @specification_provider = specification_provider + @resolver_ui = resolver_ui + @original_requested = requested + @base = base + @states = [] + @iteration_counter = 0 + @parents_of = Hash.new { |h, k| h[k] = [] } + end + + # Resolves the {#original_requested} dependencies into a full dependency + # graph + # @raise [ResolverError] if successful resolution is impossible + # @return [DependencyGraph] the dependency graph of successfully resolved + # dependencies + def resolve + start_resolution + + while state + break if !state.requirement && state.requirements.empty? + indicate_progress + if state.respond_to?(:pop_possibility_state) # DependencyState + debug(depth) { "Creating possibility state for #{requirement} (#{possibilities.count} remaining)" } + state.pop_possibility_state.tap do |s| + if s + states.push(s) + activated.tag(s) + end + end + end + process_topmost_state + end + + resolve_activated_specs + ensure + end_resolution + end + + # @return [Integer] the number of resolver iterations in between calls to + # {#resolver_ui}'s {UI#indicate_progress} method + attr_accessor :iteration_rate + private :iteration_rate + + # @return [Time] the time at which resolution began + attr_accessor :started_at + private :started_at + + # @return [Array] the stack of states for the resolution + attr_accessor :states + private :states + + private + + # Sets up the resolution process + # @return [void] + def start_resolution + @started_at = Time.now + + push_initial_state + + debug { "Starting resolution (#{@started_at})\nUser-requested dependencies: #{original_requested}" } + resolver_ui.before_resolution + end + + def resolve_activated_specs + activated.vertices.each do |_, vertex| + next unless vertex.payload + + latest_version = vertex.payload.possibilities.reverse_each.find do |possibility| + vertex.requirements.all? { |req| requirement_satisfied_by?(req, activated, possibility) } + end + + activated.set_payload(vertex.name, latest_version) + end + activated.freeze + end + + # Ends the resolution process + # @return [void] + def end_resolution + resolver_ui.after_resolution + debug do + "Finished resolution (#{@iteration_counter} steps) " \ + "(Took #{(ended_at = Time.now) - @started_at} seconds) (#{ended_at})" + end + debug { 'Unactivated: ' + Hash[activated.vertices.reject { |_n, v| v.payload }].keys.join(', ') } if state + debug { 'Activated: ' + Hash[activated.vertices.select { |_n, v| v.payload }].keys.join(', ') } if state + end + + require_relative 'state' + require_relative 'modules/specification_provider' + + require_relative 'delegates/resolution_state' + require_relative 'delegates/specification_provider' + + include Molinillo::Delegates::ResolutionState + include Molinillo::Delegates::SpecificationProvider + + # Processes the topmost available {RequirementState} on the stack + # @return [void] + def process_topmost_state + if possibility + attempt_to_activate + else + create_conflict + unwind_for_conflict + end + rescue CircularDependencyError => underlying_error + create_conflict(underlying_error) + unwind_for_conflict + end + + # @return [Object] the current possibility that the resolution is trying + # to activate + def possibility + possibilities.last + end + + # @return [RequirementState] the current state the resolution is + # operating upon + def state + states.last + end + + # Creates and pushes the initial state for the resolution, based upon the + # {#requested} dependencies + # @return [void] + def push_initial_state + graph = DependencyGraph.new.tap do |dg| + original_requested.each do |requested| + vertex = dg.add_vertex(name_for(requested), nil, true) + vertex.explicit_requirements << requested + end + dg.tag(:initial_state) + end + + push_state_for_requirements(original_requested, true, graph) + end + + # Unwinds the states stack because a conflict has been encountered + # @return [void] + def unwind_for_conflict + details_for_unwind = build_details_for_unwind + unwind_options = unused_unwind_options + debug(depth) { "Unwinding for conflict: #{requirement} to #{details_for_unwind.state_index / 2}" } + conflicts.tap do |c| + sliced_states = states.slice!((details_for_unwind.state_index + 1)..-1) + raise_error_unless_state(c) + activated.rewind_to(sliced_states.first || :initial_state) if sliced_states + state.conflicts = c + state.unused_unwind_options = unwind_options + filter_possibilities_after_unwind(details_for_unwind) + index = states.size - 1 + @parents_of.each { |_, a| a.reject! { |i| i >= index } } + state.unused_unwind_options.reject! { |uw| uw.state_index >= index } + end + end + + # Raises a VersionConflict error, or any underlying error, if there is no + # current state + # @return [void] + def raise_error_unless_state(conflicts) + return if state + + error = conflicts.values.map(&:underlying_error).compact.first + raise error || VersionConflict.new(conflicts, specification_provider) + end + + # @return [UnwindDetails] Details of the nearest index to which we could unwind + def build_details_for_unwind + # Get the possible unwinds for the current conflict + current_conflict = conflicts[name] + binding_requirements = binding_requirements_for_conflict(current_conflict) + unwind_details = unwind_options_for_requirements(binding_requirements) + + last_detail_for_current_unwind = unwind_details.sort.last + current_detail = last_detail_for_current_unwind + + # Look for past conflicts that could be unwound to affect the + # requirement tree for the current conflict + all_reqs = last_detail_for_current_unwind.all_requirements + all_reqs_size = all_reqs.size + relevant_unused_unwinds = unused_unwind_options.select do |alternative| + diff_reqs = all_reqs - alternative.requirements_unwound_to_instead + next if diff_reqs.size == all_reqs_size + # Find the highest index unwind whilst looping through + current_detail = alternative if alternative > current_detail + alternative + end + + # Add the current unwind options to the `unused_unwind_options` array. + # The "used" option will be filtered out during `unwind_for_conflict`. + state.unused_unwind_options += unwind_details.reject { |detail| detail.state_index == -1 } + + # Update the requirements_unwound_to_instead on any relevant unused unwinds + relevant_unused_unwinds.each do |d| + (d.requirements_unwound_to_instead << current_detail.state_requirement).uniq! + end + unwind_details.each do |d| + (d.requirements_unwound_to_instead << current_detail.state_requirement).uniq! + end + + current_detail + end + + # @param [Array] binding_requirements array of requirements that combine to create a conflict + # @return [Array] array of UnwindDetails that have a chance + # of resolving the passed requirements + def unwind_options_for_requirements(binding_requirements) + unwind_details = [] + + trees = [] + binding_requirements.reverse_each do |r| + partial_tree = [r] + trees << partial_tree + unwind_details << UnwindDetails.new(-1, nil, partial_tree, binding_requirements, trees, []) + + # If this requirement has alternative possibilities, check if any would + # satisfy the other requirements that created this conflict + requirement_state = find_state_for(r) + if conflict_fixing_possibilities?(requirement_state, binding_requirements) + unwind_details << UnwindDetails.new( + states.index(requirement_state), + r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + + # Next, look at the parent of this requirement, and check if the requirement + # could have been avoided if an alternative PossibilitySet had been chosen + parent_r = parent_of(r) + next if parent_r.nil? + partial_tree.unshift(parent_r) + requirement_state = find_state_for(parent_r) + if requirement_state.possibilities.any? { |set| !set.dependencies.include?(r) } + unwind_details << UnwindDetails.new( + states.index(requirement_state), + parent_r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + + # Finally, look at the grandparent and up of this requirement, looking + # for any possibilities that wouldn't create their parent requirement + grandparent_r = parent_of(parent_r) + until grandparent_r.nil? + partial_tree.unshift(grandparent_r) + requirement_state = find_state_for(grandparent_r) + if requirement_state.possibilities.any? { |set| !set.dependencies.include?(parent_r) } + unwind_details << UnwindDetails.new( + states.index(requirement_state), + grandparent_r, + partial_tree, + binding_requirements, + trees, + [] + ) + end + parent_r = grandparent_r + grandparent_r = parent_of(parent_r) + end + end + + unwind_details + end + + # @param [DependencyState] state + # @param [Array] binding_requirements array of requirements + # @return [Boolean] whether or not the given state has any possibilities + # that could satisfy the given requirements + def conflict_fixing_possibilities?(state, binding_requirements) + return false unless state + + state.possibilities.any? do |possibility_set| + possibility_set.possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, binding_requirements) + end + end + end + + # Filter's a state's possibilities to remove any that would not fix the + # conflict we've just rewound from + # @param [UnwindDetails] unwind_details details of the conflict just + # unwound from + # @return [void] + def filter_possibilities_after_unwind(unwind_details) + return unless state && !state.possibilities.empty? + + if unwind_details.unwinding_to_primary_requirement? + filter_possibilities_for_primary_unwind(unwind_details) + else + filter_possibilities_for_parent_unwind(unwind_details) + end + end + + # Filter's a state's possibilities to remove any that would not satisfy + # the requirements in the conflict we've just rewound from + # @param [UnwindDetails] unwind_details details of the conflict just unwound from + # @return [void] + def filter_possibilities_for_primary_unwind(unwind_details) + unwinds_to_state = unused_unwind_options.select { |uw| uw.state_index == unwind_details.state_index } + unwinds_to_state << unwind_details + unwind_requirement_sets = unwinds_to_state.map(&:conflicting_requirements) + + state.possibilities.reject! do |possibility_set| + possibility_set.possibilities.none? do |poss| + unwind_requirement_sets.any? do |requirements| + possibility_satisfies_requirements?(poss, requirements) + end + end + end + end + + # @param [Object] possibility a single possibility + # @param [Array] requirements an array of requirements + # @return [Boolean] whether the possibility satisfies all of the + # given requirements + def possibility_satisfies_requirements?(possibility, requirements) + name = name_for(possibility) + + activated.tag(:swap) + activated.set_payload(name, possibility) if activated.vertex_named(name) + satisfied = requirements.all? { |r| requirement_satisfied_by?(r, activated, possibility) } + activated.rewind_to(:swap) + + satisfied + end + + # Filter's a state's possibilities to remove any that would (eventually) + # create a requirement in the conflict we've just rewound from + # @param [UnwindDetails] unwind_details details of the conflict just unwound from + # @return [void] + def filter_possibilities_for_parent_unwind(unwind_details) + unwinds_to_state = unused_unwind_options.select { |uw| uw.state_index == unwind_details.state_index } + unwinds_to_state << unwind_details + + primary_unwinds = unwinds_to_state.select(&:unwinding_to_primary_requirement?).uniq + parent_unwinds = unwinds_to_state.uniq - primary_unwinds + + allowed_possibility_sets = primary_unwinds.flat_map do |unwind| + states[unwind.state_index].possibilities.select do |possibility_set| + possibility_set.possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, unwind.conflicting_requirements) + end + end + end + + requirements_to_avoid = parent_unwinds.flat_map(&:sub_dependencies_to_avoid) + + state.possibilities.reject! do |possibility_set| + !allowed_possibility_sets.include?(possibility_set) && + (requirements_to_avoid - possibility_set.dependencies).empty? + end + end + + # @param [Conflict] conflict + # @return [Array] minimal array of requirements that would cause the passed + # conflict to occur. + def binding_requirements_for_conflict(conflict) + return [conflict.requirement] if conflict.possibility.nil? + + possible_binding_requirements = conflict.requirements.values.flatten(1).uniq + + # When there's a `CircularDependency` error the conflicting requirement + # (the one causing the circular) won't be `conflict.requirement` + # (which won't be for the right state, because we won't have created it, + # because it's circular). + # We need to make sure we have that requirement in the conflict's list, + # otherwise we won't be able to unwind properly, so we just return all + # the requirements for the conflict. + return possible_binding_requirements if conflict.underlying_error + + possibilities = search_for(conflict.requirement) + + # If all the requirements together don't filter out all possibilities, + # then the only two requirements we need to consider are the initial one + # (where the dependency's version was first chosen) and the last + if binding_requirement_in_set?(nil, possible_binding_requirements, possibilities) + return [conflict.requirement, requirement_for_existing_name(name_for(conflict.requirement))].compact + end + + # Loop through the possible binding requirements, removing each one + # that doesn't bind. Use a `reverse_each` as we want the earliest set of + # binding requirements, and don't use `reject!` as we wish to refine the + # array *on each iteration*. + binding_requirements = possible_binding_requirements.dup + possible_binding_requirements.reverse_each do |req| + next if req == conflict.requirement + unless binding_requirement_in_set?(req, binding_requirements, possibilities) + binding_requirements -= [req] + end + end + + binding_requirements + end + + # @param [Object] requirement we wish to check + # @param [Array] possible_binding_requirements array of requirements + # @param [Array] possibilities array of possibilities the requirements will be used to filter + # @return [Boolean] whether or not the given requirement is required to filter + # out all elements of the array of possibilities. + def binding_requirement_in_set?(requirement, possible_binding_requirements, possibilities) + possibilities.any? do |poss| + possibility_satisfies_requirements?(poss, possible_binding_requirements - [requirement]) + end + end + + # @param [Object] requirement + # @return [Object] the requirement that led to `requirement` being added + # to the list of requirements. + def parent_of(requirement) + return unless requirement + return unless index = @parents_of[requirement].last + return unless parent_state = @states[index] + parent_state.requirement + end + + # @param [String] name + # @return [Object] the requirement that led to a version of a possibility + # with the given name being activated. + def requirement_for_existing_name(name) + return nil unless vertex = activated.vertex_named(name) + return nil unless vertex.payload + states.find { |s| s.name == name }.requirement + end + + # @param [Object] requirement + # @return [ResolutionState] the state whose `requirement` is the given + # `requirement`. + def find_state_for(requirement) + return nil unless requirement + states.find { |i| requirement == i.requirement } + end + + # @param [Object] underlying_error + # @return [Conflict] a {Conflict} that reflects the failure to activate + # the {#possibility} in conjunction with the current {#state} + def create_conflict(underlying_error = nil) + vertex = activated.vertex_named(name) + locked_requirement = locked_requirement_named(name) + + requirements = {} + unless vertex.explicit_requirements.empty? + requirements[name_for_explicit_dependency_source] = vertex.explicit_requirements + end + requirements[name_for_locking_dependency_source] = [locked_requirement] if locked_requirement + vertex.incoming_edges.each do |edge| + (requirements[edge.origin.payload.latest_version] ||= []).unshift(edge.requirement) + end + + activated_by_name = {} + activated.each { |v| activated_by_name[v.name] = v.payload.latest_version if v.payload } + conflicts[name] = Conflict.new( + requirement, + requirements, + vertex.payload && vertex.payload.latest_version, + possibility, + locked_requirement, + requirement_trees, + activated_by_name, + underlying_error + ) + end + + # @return [Array>] The different requirement + # trees that led to every requirement for the current spec. + def requirement_trees + vertex = activated.vertex_named(name) + vertex.requirements.map { |r| requirement_tree_for(r) } + end + + # @param [Object] requirement + # @return [Array] the list of requirements that led to + # `requirement` being required. + def requirement_tree_for(requirement) + tree = [] + while requirement + tree.unshift(requirement) + requirement = parent_of(requirement) + end + tree + end + + # Indicates progress roughly once every second + # @return [void] + def indicate_progress + @iteration_counter += 1 + @progress_rate ||= resolver_ui.progress_rate + if iteration_rate.nil? + if Time.now - started_at >= @progress_rate + self.iteration_rate = @iteration_counter + end + end + + if iteration_rate && (@iteration_counter % iteration_rate) == 0 + resolver_ui.indicate_progress + end + end + + # Calls the {#resolver_ui}'s {UI#debug} method + # @param [Integer] depth the depth of the {#states} stack + # @param [Proc] block a block that yields a {#to_s} + # @return [void] + def debug(depth = 0, &block) + resolver_ui.debug(depth, &block) + end + + # Attempts to activate the current {#possibility} + # @return [void] + def attempt_to_activate + debug(depth) { 'Attempting to activate ' + possibility.to_s } + existing_vertex = activated.vertex_named(name) + if existing_vertex.payload + debug(depth) { "Found existing spec (#{existing_vertex.payload})" } + attempt_to_filter_existing_spec(existing_vertex) + else + latest = possibility.latest_version + possibility.possibilities.select! do |possibility| + requirement_satisfied_by?(requirement, activated, possibility) + end + if possibility.latest_version.nil? + # ensure there's a possibility for better error messages + possibility.possibilities << latest if latest + create_conflict + unwind_for_conflict + else + activate_new_spec + end + end + end + + # Attempts to update the existing vertex's `PossibilitySet` with a filtered version + # @return [void] + def attempt_to_filter_existing_spec(vertex) + filtered_set = filtered_possibility_set(vertex) + if !filtered_set.possibilities.empty? + activated.set_payload(name, filtered_set) + new_requirements = requirements.dup + push_state_for_requirements(new_requirements, false) + else + create_conflict + debug(depth) { "Unsatisfied by existing spec (#{vertex.payload})" } + unwind_for_conflict + end + end + + # Generates a filtered version of the existing vertex's `PossibilitySet` using the + # current state's `requirement` + # @param [Object] vertex existing vertex + # @return [PossibilitySet] filtered possibility set + def filtered_possibility_set(vertex) + PossibilitySet.new(vertex.payload.dependencies, vertex.payload.possibilities & possibility.possibilities) + end + + # @param [String] requirement_name the spec name to search for + # @return [Object] the locked spec named `requirement_name`, if one + # is found on {#base} + def locked_requirement_named(requirement_name) + vertex = base.vertex_named(requirement_name) + vertex && vertex.payload + end + + # Add the current {#possibility} to the dependency graph of the current + # {#state} + # @return [void] + def activate_new_spec + conflicts.delete(name) + debug(depth) { "Activated #{name} at #{possibility}" } + activated.set_payload(name, possibility) + require_nested_dependencies_for(possibility) + end + + # Requires the dependencies that the recently activated spec has + # @param [Object] possibility_set the PossibilitySet that has just been + # activated + # @return [void] + def require_nested_dependencies_for(possibility_set) + nested_dependencies = dependencies_for(possibility_set.latest_version) + debug(depth) { "Requiring nested dependencies (#{nested_dependencies.join(', ')})" } + nested_dependencies.each do |d| + activated.add_child_vertex(name_for(d), nil, [name_for(possibility_set.latest_version)], d) + parent_index = states.size - 1 + parents = @parents_of[d] + parents << parent_index if parents.empty? + end + + push_state_for_requirements(requirements + nested_dependencies, !nested_dependencies.empty?) + end + + # Pushes a new {DependencyState} that encapsulates both existing and new + # requirements + # @param [Array] new_requirements + # @param [Boolean] requires_sort + # @param [Object] new_activated + # @return [void] + def push_state_for_requirements(new_requirements, requires_sort = true, new_activated = activated) + new_requirements = sort_dependencies(new_requirements.uniq, new_activated, conflicts) if requires_sort + new_requirement = nil + loop do + new_requirement = new_requirements.shift + break if new_requirement.nil? || states.none? { |s| s.requirement == new_requirement } + end + new_name = new_requirement ? name_for(new_requirement) : ''.freeze + possibilities = possibilities_for_requirement(new_requirement) + handle_missing_or_push_dependency_state DependencyState.new( + new_name, new_requirements, new_activated, + new_requirement, possibilities, depth, conflicts.dup, unused_unwind_options.dup + ) + end + + # Checks a proposed requirement with any existing locked requirement + # before generating an array of possibilities for it. + # @param [Object] requirement the proposed requirement + # @param [Object] activated + # @return [Array] possibilities + def possibilities_for_requirement(requirement, activated = self.activated) + return [] unless requirement + if locked_requirement_named(name_for(requirement)) + return locked_requirement_possibility_set(requirement, activated) + end + + group_possibilities(search_for(requirement)) + end + + # @param [Object] requirement the proposed requirement + # @param [Object] activated + # @return [Array] possibility set containing only the locked requirement, if any + def locked_requirement_possibility_set(requirement, activated = self.activated) + all_possibilities = search_for(requirement) + locked_requirement = locked_requirement_named(name_for(requirement)) + + # Longwinded way to build a possibilities array with either the locked + # requirement or nothing in it. Required, since the API for + # locked_requirement isn't guaranteed. + locked_possibilities = all_possibilities.select do |possibility| + requirement_satisfied_by?(locked_requirement, activated, possibility) + end + + group_possibilities(locked_possibilities) + end + + # Build an array of PossibilitySets, with each element representing a group of + # dependency versions that all have the same sub-dependency version constraints + # and are contiguous. + # @param [Array] possibilities an array of possibilities + # @return [Array] an array of possibility sets + def group_possibilities(possibilities) + possibility_sets = [] + current_possibility_set = nil + + possibilities.reverse_each do |possibility| + dependencies = dependencies_for(possibility) + if current_possibility_set && dependencies_equal?(current_possibility_set.dependencies, dependencies) + current_possibility_set.possibilities.unshift(possibility) + else + possibility_sets.unshift(PossibilitySet.new(dependencies, [possibility])) + current_possibility_set = possibility_sets.first + end + end + + possibility_sets + end + + # Pushes a new {DependencyState}. + # If the {#specification_provider} says to + # {SpecificationProvider#allow_missing?} that particular requirement, and + # there are no possibilities for that requirement, then `state` is not + # pushed, and the vertex in {#activated} is removed, and we continue + # resolving the remaining requirements. + # @param [DependencyState] state + # @return [void] + def handle_missing_or_push_dependency_state(state) + if state.requirement && state.possibilities.empty? && allow_missing?(state.requirement) + state.activated.detach_vertex_named(state.name) + push_state_for_requirements(state.requirements.dup, false, state.activated) + else + states.push(state).tap { activated.tag(state) } + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolver.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolver.rb new file mode 100644 index 0000000..a2ce6d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/resolver.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require_relative 'dependency_graph' + +module Molinillo + # This class encapsulates a dependency resolver. + # The resolver is responsible for determining which set of dependencies to + # activate, with feedback from the {#specification_provider} + # + # + class Resolver + require_relative 'resolution' + + # @return [SpecificationProvider] the specification provider used + # in the resolution process + attr_reader :specification_provider + + # @return [UI] the UI module used to communicate back to the user + # during the resolution process + attr_reader :resolver_ui + + # Initializes a new resolver. + # @param [SpecificationProvider] specification_provider + # see {#specification_provider} + # @param [UI] resolver_ui + # see {#resolver_ui} + def initialize(specification_provider, resolver_ui) + @specification_provider = specification_provider + @resolver_ui = resolver_ui + end + + # Resolves the requested dependencies into a {DependencyGraph}, + # locking to the base dependency graph (if specified) + # @param [Array] requested an array of 'requested' dependencies that the + # {#specification_provider} can understand + # @param [DependencyGraph,nil] base the base dependency graph to which + # dependencies should be 'locked' + def resolve(requested, base = DependencyGraph.new) + Resolution.new(specification_provider, + resolver_ui, + requested, + base). + resolve + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/state.rb b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/state.rb new file mode 100644 index 0000000..d75759b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/molinillo-0.8.0/lib/molinillo/state.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Molinillo + # A state that a {Resolution} can be in + # @attr [String] name the name of the current requirement + # @attr [Array] requirements currently unsatisfied requirements + # @attr [DependencyGraph] activated the graph of activated dependencies + # @attr [Object] requirement the current requirement + # @attr [Object] possibilities the possibilities to satisfy the current requirement + # @attr [Integer] depth the depth of the resolution + # @attr [Hash] conflicts unresolved conflicts, indexed by dependency name + # @attr [Array] unused_unwind_options unwinds for previous conflicts that weren't explored + ResolutionState = Struct.new( + :name, + :requirements, + :activated, + :requirement, + :possibilities, + :depth, + :conflicts, + :unused_unwind_options + ) + + class ResolutionState + # Returns an empty resolution state + # @return [ResolutionState] an empty state + def self.empty + new(nil, [], DependencyGraph.new, nil, nil, 0, {}, []) + end + end + + # A state that encapsulates a set of {#requirements} with an {Array} of + # possibilities + class DependencyState < ResolutionState + # Removes a possibility from `self` + # @return [PossibilityState] a state with a single possibility, + # the possibility that was removed from `self` + def pop_possibility_state + PossibilityState.new( + name, + requirements.dup, + activated, + requirement, + [possibilities.pop], + depth + 1, + conflicts.dup, + unused_unwind_options.dup + ).tap do |state| + state.activated.tag(state) + end + end + end + + # A state that encapsulates a single possibility to fulfill the given + # {#requirement} + class PossibilityState < ResolutionState + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.gitignore new file mode 100644 index 0000000..9106b2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.gitignore @@ -0,0 +1,8 @@ +/.bundle/ +/.yardoc +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rspec b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rspec new file mode 100644 index 0000000..8c18f1a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rspec @@ -0,0 +1,2 @@ +--format documentation +--color diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop.yml new file mode 100644 index 0000000..c97ad8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop.yml @@ -0,0 +1,14 @@ +inherit_from: + - .rubocop_todo.yml + +AllCops: + TargetRubyVersion: 2.1 + Exclude: + - 'lib/nanaimo/unicode/**/*.rb' + - 'vendor/**/*' + +Lint/AssignmentInCondition: + Enabled: false + +Metrics: + Enabled: false diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop_todo.yml b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop_todo.yml new file mode 100644 index 0000000..53bfac5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.rubocop_todo.yml @@ -0,0 +1,104 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2018-02-13 09:45:35 -0800 using RuboCop version 0.52.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 22 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: auto_detection, squiggly, active_support, powerpack, unindent +Layout/IndentHeredoc: + Exclude: + - 'Rakefile' + - 'lib/nanaimo/writer/xml.rb' + - 'spec/nanaimo/reader_spec.rb' + - 'spec/nanaimo/writer/xml_spec.rb' + +# Offense count: 2 +# Configuration parameters: Blacklist. +# Blacklist: END, (?-mix:EO[A-Z]{1}) +Naming/HeredocDelimiterNaming: + Exclude: + - 'lib/nanaimo/writer/xml.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +Performance/RegexpMatch: + Exclude: + - 'lib/nanaimo/unicode.rb' + - 'lib/nanaimo/writer.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle. +# SupportedStyles: compact, expanded +Style/EmptyMethod: + Exclude: + - 'lib/nanaimo/writer/xml.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/Encoding: + Exclude: + - 'nanaimo.gemspec' + +# Offense count: 2 +# Configuration parameters: . +# SupportedStyles: annotated, template, unannotated +Style/FormatStringToken: + EnforcedStyle: unannotated + +# Offense count: 2 +Style/IdenticalConditionalBranches: + Exclude: + - 'lib/nanaimo/unicode.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +Style/IfUnlessModifier: + Exclude: + - 'lib/nanaimo/reader.rb' + - 'lib/nanaimo/unicode.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/nanaimo/writer.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: PreferredDelimiters. +Style/PercentLiteralDelimiters: + Exclude: + - 'lib/nanaimo/reader.rb' + - 'lib/nanaimo/unicode.rb' + - 'spec/nanaimo/reader_spec.rb' + - 'spec/nanaimo/unicode_spec.rb' + - 'spec/nanaimo/writer/xml_spec.rb' + - 'spec/nanaimo/writer_spec.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/RedundantFreeze: + Exclude: + - 'lib/nanaimo/reader.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: MinSize. +# SupportedStyles: percent, brackets +Style/SymbolArray: + EnforcedStyle: brackets + +# Offense count: 56 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 331 diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.travis.yml new file mode 100644 index 0000000..46a1f04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/.travis.yml @@ -0,0 +1,18 @@ +dist: trusty +bundler_args: --without debugging documentation +branches: + only: + - master + +language: ruby +cache: bundler +rvm: + # The latest ruby version + - 2.7.0 + - 2.6.2 + - 2.5.0 + - 2.4.2 + # OS X 10.9.5-10.10.0 (2.0.0-p481) + - 2.0.0-p481 +before_install: + - gem install bundler -v "~> 1.17" diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CHANGELOG.md new file mode 100644 index 0000000..80c38ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CHANGELOG.md @@ -0,0 +1,168 @@ +# Nanaimo Changelog + +## 0.3.0 (2020-07-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 0.2.6 (2018-07-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix parse errors crashing when attempting to show context when the error + occurs on the first character in the plist. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.5 (2018-04-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix parsing arrays that contain a comment after a trailing comma. + [Samuel Giddins](https://github.com/segiddins) + [#26](https://github.com/CocoaPods/Nanaimo/issues/26) + + +## 0.2.4 (2018-03-22) + +##### Enhancements + +* Enable frozen string literals to improve performance. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.2.3 (2016-11-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Quote the empty string `nil` is implicitly written as in non-strict mode. + [Samuel Giddins](https://github.com/segiddins) + [Xcodeproj#453](https://github.com/CocoaPods/Xcodeproj/issues/453) + + +## 0.2.2 (2016-11-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix extraneously escaping single quotes when writing quoted strings. + [Samuel Giddins](https://github.com/segiddins) + +* Properly align the `^` in parse error messages when the line with the syntax + error contains tabs. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.2.1 (2016-11-03) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix reading all supported characters in unquoted strings. + [Samuel Giddins](https://github.com/segiddins) + [#13](https://github.com/CocoaPods/Nanaimo/issues/13) + + +## 0.2.0 (2016-11-02) + +##### Enhancements + +* Add context to parse errors. + [Samuel Giddins](https://github.com/segiddins) + [#5](https://github.com/CocoaPods/Nanaimo/issues/5) + +* Allow disabling 'strict' mode when writing plists, allowing unknown object + types to be serialized as their string representations. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* None. + + +## 0.1.4 (2016-11-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Allow reading unquoted strings that contain `-`. + [Samuel Giddins](https://github.com/segiddins) + [Xcodeproj#438](https://github.com/CocoaPods/Xcodeproj/issues/438) + + +## 0.1.3 (2016-11-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix unquoting a sequence of backslashes. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#6108](https://github.com/CocoaPods/CocoaPods/issues/6108) + + +## 0.1.2 (2016-10-29) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Add support for unquoted strings that contain a `$`. + [Danielle Tomlinson](https://github.com/dantoml) + [CocoaPods#6101](https://github.com/CocoaPods/CocoaPods/issues/6101) + + +## 0.1.1 (2016-10-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure all required classes are required before being used. + [Samuel Giddins](https://github.com/segiddins) + [Xcodeproj#435](https://github.com/CocoaPods/Xcodeproj/issues/435) + + +## 0.1.0 (2016-10-21) + +##### Enhancements + +* Initial implementation. + [Samuel Giddins](https://github.com/segiddins) diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CODE_OF_CONDUCT.md b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..ced55a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/CODE_OF_CONDUCT.md @@ -0,0 +1,49 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at dan@tomlinson.io. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile new file mode 100644 index 0000000..669620b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +# Specify your gem's dependencies in nanaimo.gemspec +gemspec + +group :development do + gem 'rake', '~> 12.0' + gem 'rspec' + gem 'rubocop', install_if: RUBY_VERSION >= '2.1' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile.lock b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile.lock new file mode 100644 index 0000000..f1f00f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Gemfile.lock @@ -0,0 +1,51 @@ +PATH + remote: . + specs: + nanaimo (0.3.0) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.4.0) + diff-lcs (1.3) + parallel (1.12.1) + parser (2.5.0.2) + ast (~> 2.4.0) + powerpack (0.1.1) + rainbow (3.0.0) + rake (12.3.3) + rspec (3.9.0) + rspec-core (~> 3.9.0) + rspec-expectations (~> 3.9.0) + rspec-mocks (~> 3.9.0) + rspec-core (3.9.1) + rspec-support (~> 3.9.1) + rspec-expectations (3.9.1) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-mocks (3.9.1) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-support (3.9.2) + rubocop (0.52.1) + parallel (~> 1.10) + parser (>= 2.4.0.2, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 4.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.9.0) + unicode-display_width (1.3.0) + +PLATFORMS + ruby + +DEPENDENCIES + bundler (~> 1.12) + nanaimo! + rake (~> 12.0) + rspec + rubocop + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/LICENSE.txt new file mode 100644 index 0000000..1617264 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Danielle Tomlinson, Samuel Giddins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/README.md b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/README.md new file mode 100644 index 0000000..f7787ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/README.md @@ -0,0 +1,55 @@ +# Nanaimo + +Nanaimo is a simple library that implements ASCII Plist serialization and +deserialization, entirely with native Ruby code (and zero dependencies). It +also comes with out-of-the-box support for serializing Xcode projects (complete +with annotations) and XML plists. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'nanaimo' +``` + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install nanaimo + +## Usage + +```ruby +require 'nanaimo' + +# parse a native ruby object from an ascii plist file +project_hash = Nanaimo::Reader + .from_file("App.xcodeproj/project.pbxproj") + .parse! + .as_ruby + +# change that object +project_hash['...'] = '...' + +# re-serialize it +ascii_plist_string = Nanaimo::Writer.new(project_hash).write +``` + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/CocoaPods/nanaimo. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. + + +## License + +The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT). diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Rakefile new file mode 100644 index 0000000..c3735c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/Rakefile @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require 'bundler/gem_tasks' +require 'rspec/core/rake_task' + +RSpec::Core::RakeTask.new(:spec) + +task default: [:spec] + +if RUBY_VERSION >= '2.1' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + task default: :rubocop +end + +task :generate_nextstep_mappings do + require 'net/http' + url = 'http://ftp.unicode.org/Public/MAPPINGS/VENDORS/NEXT/NEXTSTEP.TXT' + mappings = Net::HTTP.get(URI(url)) + .lines + .grep(/^[^#$]/) + .map { |l| l.split("\t", 3) } + .reduce('') do |f, (ns, uc, cm)| + f << " #{ns} => #{uc}, #{cm}" + end + map = <<-RUBY +# frozen-string-literal: true +module Nanaimo + module Unicode + # Taken from #{url} + NEXT_STEP_MAPPING = { +#{mappings} }.freeze + end +end + RUBY + File.open('lib/nanaimo/unicode/next_step_mapping.rb', 'w') { |f| f << map } +end + +task :generate_quote_maps do + quote_map = { + "\a" => '\\a', + "\b" => '\\b', + "\f" => '\\f', + "\r" => '\\r', + "\t" => '\\t', + "\v" => '\\v', + "\n" => '\\n', + %(') => "\\'", + %(") => '\\"', + '\\' => '\\\\' + } + + unquote_map = quote_map.each_with_object("\n" => "\n") do |(value, escaped), map| + map[escaped[1..-1]] = value + map + end + quote_map.delete("'") + + 0.upto(31) { |i| quote_map[[i].pack('U')] ||= format('\\U%04x', i) } + quote_regexp = Regexp.union(quote_map.keys) + + dump_hash = proc do |hash, indent = 4| + hash.reduce("{\n") { |dumped, (k, v)| dumped << "#{' ' * (indent + 2)}#{k.dump} => #{v.dump},\n" } << ' ' * indent << '}.freeze' + end + + map = <<-RUBY +# frozen-string-literal: true +module Nanaimo + module Unicode + QUOTE_MAP = #{dump_hash[quote_map]} + + UNQUOTE_MAP = #{dump_hash[unquote_map]} + + QUOTE_REGEXP = #{quote_regexp.inspect} + end +end + RUBY + + File.open('lib/nanaimo/unicode/quote_maps.rb', 'w') { |f| f << map } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/console b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/console new file mode 100755 index 0000000..e66267b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'bundler/setup' +require 'nanaimo' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require 'irb' +IRB.start diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/setup b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/setup new file mode 100755 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo.rb new file mode 100644 index 0000000..03f61a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require 'nanaimo/version' + +# A native Ruby implementation of ASCII plist parsing and serialization. +# +module Nanaimo + class Error < StandardError; end + + DEBUG = !ENV['NANAIMO_DEBUG'].nil? + private_constant :DEBUG + def self.debug + return unless DEBUG + warn yield + end + + require 'nanaimo/object' + require 'nanaimo/plist' + require 'nanaimo/reader' + require 'nanaimo/unicode' + require 'nanaimo/writer' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/object.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/object.rb new file mode 100644 index 0000000..72a299e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/object.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +module Nanaimo + # An object that belongs to a plist. + # + class Object + # @return The underlying native Ruby value + # + attr_accessor :value + + # @return [String] The annotation comment + # + attr_accessor :annotation + + def initialize(value, annotation) + self.value = value + self.annotation = annotation + + raise 'Item cannot be initialize with a nil value' if value.nil? + end + + def ==(other) + return unless other + if other.is_a?(self.class) + other.value == value && annotation == other.annotation + elsif other.is_a?(value.class) + other == value + end + end + alias eql? == + + def hash + value.hash + end + + def <=>(other) + other_value = if other.is_a?(Object) + other.value + elsif other.is_a?(value.class) + other + end + return unless other_value + + value <=> other_value + end + + def to_s + format('<%s %s>', self.class, value) + end + + # @return A native Ruby object representation + # + def as_ruby + raise 'unimplemented' + end + end + + # A string object in a Plist. + # + class String < Object + def as_ruby + value + end + end + + # A string object surrounded by quotes in a Plist. + # + class QuotedString < Object + def as_ruby + value + end + end + + # A data object in a Plist, represented by a binary-encoded string. + # + class Data < Object + def initialize(value, annotation) + value &&= value.dup.force_encoding(Encoding::BINARY) + super(value, annotation) + end + + def as_ruby + value + end + end + + # An array object in a Plist. + # + class Array < Object + def as_ruby + value.map(&:as_ruby) + end + end + + # A dictionary object in a Plist. + # + class Dictionary < Object + def as_ruby + Hash[value.map { |k, v| [k.as_ruby, v.as_ruby] }] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/plist.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/plist.rb new file mode 100644 index 0000000..fe465c4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/plist.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Nanaimo + # A Plist. + # + class Plist + # @return [Nanaimo::Object] The root level object in the plist. + # + attr_accessor :root_object + + # @return [String] The encoding of the plist. + # + attr_accessor :file_type + + def initialize(root_object = nil, file_type = nil) + @root_object = root_object + @file_type = file_type + end + + def ==(other) + return unless other.is_a?(Nanaimo::Plist) + file_type == other.file_type && root_object == other.root_object + end + + def hash + root_object.hash + end + + # @return A native Ruby object representation of the plist. + # + def as_ruby + root_object.as_ruby + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/reader.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/reader.rb new file mode 100644 index 0000000..bfd01ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/reader.rb @@ -0,0 +1,287 @@ +# frozen-string-literal: true + +module Nanaimo + # Transforms plist strings into Plist objects. + # + class Reader + autoload :StringScanner, 'strscan' + + # Raised when attempting to read a plist with an unsupported file format. + # + class UnsupportedPlistFormatError < Error + # @return [Symbol] The unsupported format. + # + attr_reader :format + + def initialize(format) + @format = format + end + + def to_s + "#{format} plists are currently unsupported" + end + end + + # Raised when parsing fails. + # + class ParseError < Error + # @return [[Integer, Integer]] The (line, column) offset into the plist + # where the error occurred + # + attr_accessor :location + + # @return [String] The contents of the plist. + # + attr_accessor :plist_string + + def to_s + "[!] #{super}#{context}" + end + + def context(n = 2) + line_number, column = location + line_number -= 1 + lines = plist_string.split(NEWLINE) + + s = line_number.succ.to_s.size + indent = "#{' ' * s}# " + indicator = "#{line_number.succ}> " + + m = ::String.new("\n") + + m << "#{indent}-------------------------------------------\n" + m << lines[[line_number - n, 0].max...line_number].map do |l| + "#{indent}#{l}\n" + end.join + + line = lines[line_number].to_s + m << "#{indicator}#{line}\n" + + m << ' ' * indent.size + m << line[0, column.pred].gsub(/[^\t]/, ' ') + m << "^\n" + + m << Array(lines[line_number.succ..[lines.count.pred, line_number + n].min]).map do |l| + l.strip.empty? ? '' : "#{indent}#{l}\n" + end.join + m << "#{indent}-------------------------------------------\n" + end + end + + # @param plist_contents [String] + # + # @return [Symbol] The file format of the plist in the given string. + # + def self.plist_type(plist_contents) + case plist_contents + when /\Abplist/ + :binary + when /\A<\?xml/ + :xml + else + :ascii + end + end + + # @param file_path [String] + # + # @return [Plist] A parsed plist from the given file + # + def self.from_file(file_path) + new(File.read(file_path)) + end + + # @param contents [String] The plist to be parsed + # + def initialize(contents) + @scanner = StringScanner.new(contents) + end + + # Parses the contents of the plist + # + # @return [Plist] The parsed Plist object. + # + def parse! + plist_format = ensure_ascii_plist! + read_string_encoding + root_object = parse_object + + eat_whitespace! + raise_parser_error ParseError, 'Found additional characters after parsing the root plist object' unless @scanner.eos? + + Nanaimo::Plist.new(root_object, plist_format) + end + + private + + def ensure_ascii_plist! + self.class.plist_type(@scanner.string).tap do |plist_format| + raise UnsupportedPlistFormatError, plist_format unless plist_format == :ascii + end + end + + def read_string_encoding + # TODO + end + + def parse_object(already_parsed_comment: false) + _comment = skip_to_non_space_matching_annotations unless already_parsed_comment + start_pos = @scanner.pos + raise_parser_error ParseError, 'Unexpected end of string while parsing' if @scanner.eos? + if @scanner.skip(/\{/) + parse_dictionary + elsif @scanner.skip(/\(/) + parse_array + elsif @scanner.skip(//) + raise_parser_error ParseError, "Data missing closing '>'" + end + data.chomp!('>') + data.delete!(' ') + unless data.size.even? + @scanner.unscan + raise_parser_error ParseError, 'Data has an uneven number of hex digits' + end + data = [data].pack('H*') + Nanaimo::Data.new(data, nil) + end + + def current_character + @scanner.peek(1) + end + + def read_singleline_comment + unless comment = @scanner.scan_until(NEWLINE) + raise_parser_error ParseError, 'Failed to terminate single line comment' + end + comment + end + + def eat_whitespace! + @scanner.skip(MANY_WHITESPACES) + end + + NEWLINE_CHARACTERS = %W(\x0A \x0D \u2028 \u2029).freeze + NEWLINE = Regexp.union(*NEWLINE_CHARACTERS) + + WHITESPACE_CHARACTERS = NEWLINE_CHARACTERS + %W(\x09 \x0B \x0C \x20) + WHITESPACE = Regexp.union(*WHITESPACE_CHARACTERS) + + MANY_WHITESPACES = /#{WHITESPACE}+/ + + def read_multiline_comment + unless annotation = @scanner.scan(%r{(?:.+?)(?=\*/)}m) + raise_parser_error ParseError, 'Failed to terminate multiline comment' + end + @scanner.skip(%r{\*/}) + + annotation + end + + def skip_to_non_space_matching_annotations + annotation = ''.freeze + until @scanner.eos? + eat_whitespace! + + # Comment Detection + if @scanner.skip(%r{//}) + annotation = read_singleline_comment + next + elsif @scanner.skip(%r{/\*}) + annotation = read_multiline_comment + next + end + + break + end + annotation + end + + def location_in(scanner) + pos = scanner.charpos + line = scanner.string[0..scanner.charpos].scan(NEWLINE).size + 1 + column = pos - (scanner.string.rindex(NEWLINE, pos - 1) || -1) + column = [1, column].max + [line, column] + end + + def raise_parser_error(klass, message) + exception = klass.new(message).tap do |error| + error.location = location_in(@scanner) + error.plist_string = @scanner.string + end + raise(exception) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode.rb new file mode 100644 index 0000000..b10865f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode.rb @@ -0,0 +1,89 @@ +# frozen-string-literal: true + +require 'nanaimo/unicode/next_step_mapping' +require 'nanaimo/unicode/quote_maps' +module Nanaimo + # @!visibility private + # + module Unicode + class UnsupportedEscapeSequenceError < Error; end + class InvalidEscapeSequenceError < Error; end + + module_function + + def quotify_string(string) + string.gsub(QUOTE_REGEXP, QUOTE_MAP) + end + + ESCAPE_PREFIXES = %W( + 0 1 2 3 4 5 6 7 a b f n r t v \n U \\ + ).freeze + + OCTAL_DIGITS = (0..7).map(&:to_s).freeze + + # Credit to Samantha Marshall + # Taken from https://github.com/samdmarshall/pbPlist/blob/346c29f91f913d35d0e24f6722ec19edb24e5707/pbPlist/StrParse.py#L197 + # Licensed under https://raw.githubusercontent.com/samdmarshall/pbPlist/blob/346c29f91f913d35d0e24f6722ec19edb24e5707/LICENSE + # + # Originally from: http://www.opensource.apple.com/source/CF/CF-744.19/CFOldStylePList.c See `getSlashedChar()` + def unquotify_string(string) + formatted_string = ::String.new + extracted_string = string + string_length = string.size + index = 0 + while index < string_length + if escape_index = extracted_string.index('\\', index) + formatted_string << extracted_string[index...escape_index] unless index == escape_index + index = escape_index + 1 + next_char = extracted_string[index] + if ESCAPE_PREFIXES.include?(next_char) + index += 1 + if unquoted = UNQUOTE_MAP[next_char] + formatted_string << unquoted + elsif next_char == 'U' + length = 4 + unicode_numbers = extracted_string[index, length] + unless unicode_numbers =~ /\A\h{4}\z/ + raise InvalidEscapeSequenceError, "Unicode '\\U' escape sequence terminated without 4 following hex characters" + end + index += length + formatted_string << [unicode_numbers.to_i(16)].pack('U') + elsif OCTAL_DIGITS.include?(next_char) # https://twitter.com/Catfish_Man/status/658014170055507968 + octal_string = extracted_string[index - 1, 3] + if octal_string =~ /\A[0-7]{3}\z/ + index += 2 + code_point = octal_string.to_i(8) + unless code_point <= 0x80 || converted = NEXT_STEP_MAPPING[code_point] + raise InvalidEscapeSequenceError, "Invalid octal escape sequence #{octal_string}" + end + formatted_string << [converted].pack('U') + else + formatted_string << next_char + end + else + raise UnsupportedEscapeSequenceError, "Failed to handle #{next_char} which is in the list of possible escapes" + end + else + index += 1 + formatted_string << next_char + end + else + formatted_string << extracted_string[index..-1] + index = string_length + end + end + formatted_string + end + + XML_STRING_ESCAPES = { + '&' => '&', + '<' => '<', + '>' => '>' + }.freeze + XML_STRING_ESCAPE_REGEXP = Regexp.union(XML_STRING_ESCAPES.keys) + + def xml_escape_string(string) + string.to_s.gsub(XML_STRING_ESCAPE_REGEXP, XML_STRING_ESCAPES) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/next_step_mapping.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/next_step_mapping.rb new file mode 100644 index 0000000..a286a92 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/next_step_mapping.rb @@ -0,0 +1,136 @@ +# frozen-string-literal: true +module Nanaimo + module Unicode + # Taken from http://ftp.unicode.org/Public/MAPPINGS/VENDORS/NEXT/NEXTSTEP.TXT + NEXT_STEP_MAPPING = { + 0x80 => 0x00a0, # NO-BREAK SPACE + 0x81 => 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE + 0x82 => 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x83 => 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x84 => 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE + 0x85 => 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x86 => 0x00c5, # LATIN CAPITAL LETTER A WITH RING + 0x87 => 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x88 => 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE + 0x89 => 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x8a => 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX + 0x8b => 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x8c => 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE + 0x8d => 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x8e => 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x8f => 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS + 0x90 => 0x00d0, # LATIN CAPITAL LETTER ETH + 0x91 => 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x92 => 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE + 0x93 => 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x94 => 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x95 => 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE + 0x96 => 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x97 => 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE + 0x98 => 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x99 => 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX + 0x9a => 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x9b => 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x9c => 0x00de, # LATIN CAPITAL LETTER THORN + 0x9d => 0x00b5, # MICRO SIGN + 0x9e => 0x00d7, # MULTIPLICATION SIGN + 0x9f => 0x00f7, # DIVISION SIGN + 0xa0 => 0x00a9, # COPYRIGHT SIGN + 0xa1 => 0x00a1, # INVERTED EXCLAMATION MARK + 0xa2 => 0x00a2, # CENT SIGN + 0xa3 => 0x00a3, # POUND SIGN + 0xa4 => 0x2044, # FRACTION SLASH + 0xa5 => 0x00a5, # YEN SIGN + 0xa6 => 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0xa7 => 0x00a7, # SECTION SIGN + 0xa8 => 0x00a4, # CURRENCY SIGN + 0xa9 => 0x2019, # RIGHT SINGLE QUOTATION MARK + 0xaa => 0x201c, # LEFT DOUBLE QUOTATION MARK + 0xab => 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0xac => 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 0xad => 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 0xae => 0xfb01, # LATIN SMALL LIGATURE FI + 0xaf => 0xfb02, # LATIN SMALL LIGATURE FL + 0xb0 => 0x00ae, # REGISTERED SIGN + 0xb1 => 0x2013, # EN DASH + 0xb2 => 0x2020, # DAGGER + 0xb3 => 0x2021, # DOUBLE DAGGER + 0xb4 => 0x00b7, # MIDDLE DOT + 0xb5 => 0x00a6, # BROKEN BAR + 0xb6 => 0x00b6, # PILCROW SIGN + 0xb7 => 0x2022, # BULLET + 0xb8 => 0x201a, # SINGLE LOW-9 QUOTATION MARK + 0xb9 => 0x201e, # DOUBLE LOW-9 QUOTATION MARK + 0xba => 0x201d, # RIGHT DOUBLE QUOTATION MARK + 0xbb => 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0xbc => 0x2026, # HORIZONTAL ELLIPSIS + 0xbd => 0x2030, # PER MILLE SIGN + 0xbe => 0x00ac, # NOT SIGN + 0xbf => 0x00bf, # INVERTED QUESTION MARK + 0xc0 => 0x00b9, # SUPERSCRIPT ONE + 0xc1 => 0x02cb, # MODIFIER LETTER GRAVE ACCENT + 0xc2 => 0x00b4, # ACUTE ACCENT + 0xc3 => 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT + 0xc4 => 0x02dc, # SMALL TILDE + 0xc5 => 0x00af, # MACRON + 0xc6 => 0x02d8, # BREVE + 0xc7 => 0x02d9, # DOT ABOVE + 0xc8 => 0x00a8, # DIAERESIS + 0xc9 => 0x00b2, # SUPERSCRIPT TWO + 0xca => 0x02da, # RING ABOVE + 0xcb => 0x00b8, # CEDILLA + 0xcc => 0x00b3, # SUPERSCRIPT THREE + 0xcd => 0x02dd, # DOUBLE ACUTE ACCENT + 0xce => 0x02db, # OGONEK + 0xcf => 0x02c7, # CARON + 0xd0 => 0x2014, # EM DASH + 0xd1 => 0x00b1, # PLUS-MINUS SIGN + 0xd2 => 0x00bc, # VULGAR FRACTION ONE QUARTER + 0xd3 => 0x00bd, # VULGAR FRACTION ONE HALF + 0xd4 => 0x00be, # VULGAR FRACTION THREE QUARTERS + 0xd5 => 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0xd6 => 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0xd7 => 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0xd8 => 0x00e3, # LATIN SMALL LETTER A WITH TILDE + 0xd9 => 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0xda => 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0xdb => 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0xdc => 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0xdd => 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0xde => 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0xdf => 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0xe0 => 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0xe1 => 0x00c6, # LATIN CAPITAL LETTER AE + 0xe2 => 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0xe3 => 0x00aa, # FEMININE ORDINAL INDICATOR + 0xe4 => 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0xe5 => 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0xe6 => 0x00f0, # LATIN SMALL LETTER ETH + 0xe7 => 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0xe8 => 0x0141, # LATIN CAPITAL LETTER L WITH STROKE + 0xe9 => 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE + 0xea => 0x0152, # LATIN CAPITAL LIGATURE OE + 0xeb => 0x00ba, # MASCULINE ORDINAL INDICATOR + 0xec => 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0xed => 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0xee => 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0xef => 0x00f5, # LATIN SMALL LETTER O WITH TILDE + 0xf0 => 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0xf1 => 0x00e6, # LATIN SMALL LETTER AE + 0xf2 => 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0xf3 => 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0xf4 => 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0xf5 => 0x0131, # LATIN SMALL LETTER DOTLESS I + 0xf6 => 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0xf7 => 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0xf8 => 0x0142, # LATIN SMALL LETTER L WITH STROKE + 0xf9 => 0x00f8, # LATIN SMALL LETTER O WITH STROKE + 0xfa => 0x0153, # LATIN SMALL LIGATURE OE + 0xfb => 0x00df, # LATIN SMALL LETTER SHARP S + 0xfc => 0x00fe, # LATIN SMALL LETTER THORN + 0xfd => 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0xfe => 0xfffd, # .notdef, REPLACEMENT CHARACTER + 0xff => 0xfffd, # .notdef, REPLACEMENT CHARACTER + }.freeze + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/quote_maps.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/quote_maps.rb new file mode 100644 index 0000000..b051232 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/unicode/quote_maps.rb @@ -0,0 +1,57 @@ +# frozen-string-literal: true +module Nanaimo + module Unicode + QUOTE_MAP = { + "\a" => "\\a", + "\b" => "\\b", + "\f" => "\\f", + "\r" => "\\r", + "\t" => "\\t", + "\v" => "\\v", + "\n" => "\\n", + "\"" => "\\\"", + "\\" => "\\\\", + "\x00" => "\\U0000", + "\x01" => "\\U0001", + "\x02" => "\\U0002", + "\x03" => "\\U0003", + "\x04" => "\\U0004", + "\x05" => "\\U0005", + "\x06" => "\\U0006", + "\x0E" => "\\U000e", + "\x0F" => "\\U000f", + "\x10" => "\\U0010", + "\x11" => "\\U0011", + "\x12" => "\\U0012", + "\x13" => "\\U0013", + "\x14" => "\\U0014", + "\x15" => "\\U0015", + "\x16" => "\\U0016", + "\x17" => "\\U0017", + "\x18" => "\\U0018", + "\x19" => "\\U0019", + "\x1A" => "\\U001a", + "\e" => "\\U001b", + "\x1C" => "\\U001c", + "\x1D" => "\\U001d", + "\x1E" => "\\U001e", + "\x1F" => "\\U001f", + }.freeze + + UNQUOTE_MAP = { + "\n" => "\n", + "a" => "\a", + "b" => "\b", + "f" => "\f", + "r" => "\r", + "t" => "\t", + "v" => "\v", + "n" => "\n", + "'" => "'", + "\"" => "\"", + "\\" => "\\", + }.freeze + + QUOTE_REGEXP = /\x07|\x08|\f|\r|\t|\v|\n|"|\\|\x00|\x01|\x02|\x03|\x04|\x05|\x06|\x0E|\x0F|\x10|\x11|\x12|\x13|\x14|\x15|\x16|\x17|\x18|\x19|\x1A|\x1B|\x1C|\x1D|\x1E|\x1F/ + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/version.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/version.rb new file mode 100644 index 0000000..bcc6086 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Nanaimo + VERSION = '0.3.0'.freeze +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer.rb new file mode 100644 index 0000000..71067e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer.rb @@ -0,0 +1,206 @@ +# frozen_string_literal: true + +module Nanaimo + # Transforms native ruby objects or Plist objects into their ASCII Plist + # string representation. + # + class Writer + autoload :PBXProjWriter, 'nanaimo/writer/pbxproj' + autoload :XMLWriter, 'nanaimo/writer/xml' + + # Raised when attempting to write a plist containing an object of an + # unsupported type. + # + class UnsupportedPlistTypeError < Error + def initialize(plist_format, object) + @plist_format = plist_format + @object = object + end + + def to_s + "Unable to write #{@object.inspect}. " \ + "`#{@object.class}` is an invalid object type to serialize in a #{@plist_format} plist." + end + end + + # The magic comment that denotes a UTF8-encoded plist. + # + UTF8 = "// !$*UTF8*$!\n".freeze + + # @param plist [Plist,String,Hash,Array] The plist object to write + # @param pretty [Boolean] Whether to serialize annotations and add + # spaces and newlines to make output more legible + # @param output [#<<] The output stream to write the plist to + # + def initialize(plist, pretty: true, output: ::String.new, strict: true) + @plist = plist + @pretty = pretty + @output = output + @indent = 0 + @newlines = true + @strict = strict + end + + # Writes the plist to the given output. + # + def write + write_utf8 + write_object(@plist.root_object) + write_newline + end + + attr_reader :indent, :pretty, :output, :newlines, :strict + private :indent, :pretty, :output, :newlines, :strict + + private + + def plist_format + :ascii + end + + def write_utf8 + output << UTF8 + end + + def write_newline + output << if newlines + "\n" + else + ' ' + end + end + + def write_object(object) + case object + when Array, ::Array + write_array(object) + when Dictionary, ::Hash + write_dictionary(object) + when QUOTED_STRING_REGEXP, QuotedString + write_quoted_string(object) + when String, ::String, Symbol + write_string(object) + when Data + write_data(object) + else + raise UnsupportedPlistTypeError.new(plist_format, object) if strict + write_string_quoted_if_necessary(object) + end + write_annotation(object) if pretty + output + end + + QUOTED_STRING_REGEXP = %r{\A\z|[^\w\./]|\A___} + private_constant :QUOTED_STRING_REGEXP + + def write_string_quoted_if_necessary(object) + string = object.to_s + string =~ QUOTED_STRING_REGEXP ? write_quoted_string(string) : write_string(string) + end + + def write_string(object) + output << value_for(object).to_s + end + + def write_quoted_string(object) + output << '"' << Unicode.quotify_string(value_for(object)) << '"' + end + + def write_data(object) + output << '<' + value_for(object).unpack('H*').first.chars.each_with_index do |c, i| + output << "\n" if i > 0 && (i % 16).zero? + output << ' ' if i > 0 && (i % 4).zero? + output << c + end + output << '>' + end + + def write_array(object) + write_array_start + value = value_for(object) + value.each do |v| + write_array_element(v) + end + write_array_end + end + + def write_array_start + output << '(' + write_newline if newlines + push_indent! + end + + def write_array_end + pop_indent! + write_indent + output << ')' + end + + def write_array_element(object) + write_indent + write_object(object) + output << ',' + write_newline + end + + def write_dictionary(object) + write_dictionary_start + value = value_for(object) + value.each do |key, val| + write_dictionary_key_value_pair(key, val) + end + write_dictionary_end + end + + def write_dictionary_start + output << '{' + write_newline if newlines + push_indent! + end + + def write_dictionary_end + pop_indent! + write_indent + output << '}' + end + + def write_dictionary_key_value_pair(key, value) + write_indent + write_object(key) + output << ' = ' + write_object(value) + output << ';' + write_newline + end + + def write_annotation(object) + return output unless object.is_a?(Nanaimo::Object) + annotation = object.annotation + return output unless annotation && !annotation.empty? + output << " /*#{annotation}*/" + end + + def value_for(object) + if object.is_a?(Nanaimo::Object) + object.value + else + object + end + end + + def push_indent! + @indent += 1 + end + + def pop_indent! + @indent -= 1 + @indent = 0 if @indent < 0 + end + + def write_indent + output << "\t" * indent if newlines + output + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/pbxproj.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/pbxproj.rb new file mode 100644 index 0000000..6f91963 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/pbxproj.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +module Nanaimo + class Writer + # Transforms native ruby objects or Plist objects into their ASCII Plist + # string representation, formatted as Xcode writes Xcode projects. + # + class PBXProjWriter < Writer + ISA = String.new('isa', '') + private_constant :ISA + + def initialize(plist, **args) + super(plist, **args) + @objects_section = false + end + + private + + def write_dictionary(object) + n = newlines + @newlines = false if flat_dictionary?(object) + return super(sort_dictionary(object)) unless @objects_section + @objects_section = false + write_dictionary_start + value = value_for(object) + objects_by_isa = value.group_by { |_k, v| isa_for(v) } + objects_by_isa.each do |isa, kvs| + write_newline + output << "/* Begin #{isa} section */" + write_newline + sort_dictionary(kvs, key_can_be_isa: false).each do |k, v| + write_dictionary_key_value_pair(k, v) + end + output << "/* End #{isa} section */" + write_newline + end + write_dictionary_end + ensure + @newlines = n + end + + def write_dictionary_key_value_pair(k, v) + # since the objects section is always at the top-level, + # we can avoid checking if we're starting the 'objects' + # section if we're further "indented" (aka deeper) in the project + @objects_section = true if indent == 1 && value_for(k) == 'objects' + + super + end + + def sort_dictionary(dictionary, key_can_be_isa: true) + hash = value_for(dictionary) + hash.sort_by do |k, _v| + k = value_for(k) + if key_can_be_isa + k == 'isa' ? '' : k + else + k + end + end + end + + def isa_for(dictionary) + dictionary = value_for(dictionary) + return unless dictionary.is_a?(Hash) + if isa = dictionary['isa'] + value_for(isa) + elsif isa = dictionary[ISA] + value_for(isa) + end + end + + def flat_dictionary?(dictionary) + case isa_for(dictionary) + when 'PBXBuildFile', 'PBXFileReference' + true + else + false + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/xml.rb b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/xml.rb new file mode 100644 index 0000000..4f86e5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/lib/nanaimo/writer/xml.rb @@ -0,0 +1,139 @@ +# frozen_string_literal: true + +module Nanaimo + class Writer + # Transforms native ruby objects or Plist objects into their XML Plist + # string representation. + # + class XMLWriter < Writer + autoload :Base64, 'base64' + autoload :Date, 'date' + autoload :DateTime, 'date' + + def write + write_xml_header + write_object(@plist.root_object) + write_newline + write_xml_footer + end + + private + + def plist_format + :xml + end + + def write_object(object) + case object + when Float, Integer + write_number(object) + when Time, Date, DateTime + write_date(object) + when true, false + write_boolean(object) + else + super + end + end + + def write_xml_header + output << <<-EOS + + + + EOS + end + + def write_xml_footer + output << <<-EOS + + EOS + end + + def write_annotation(_) + end + + def write_number(object) + type = object.integer? ? 'integer' : 'real' + output << "<#{type}>#{object}" + end + + def write_boolean(object) + output << "<#{object}/>" + end + + def write_date(object) + output << '' << object.iso8601 << '' + end + + def write_string(object) + output << '' << Unicode.xml_escape_string(value_for(object)) << '' + end + + def write_quoted_string(object) + write_string(object) + end + + def write_data(object) + output << '' + data = Base64.encode64(value_for(object)).delete("\n") + data = data.scan(/.{1,76}/).join("\n") if pretty + output << data << '' + end + + def write_array(object) + return output << '' if value_for(object).empty? + super + end + + def write_array_start + output << '' + write_newline if newlines + push_indent! + end + + def write_array_end + pop_indent! + write_indent + output << '' + end + + def write_array_element(object) + write_indent + write_object(object) + write_newline + end + + def write_dictionary(object) + object = value_for(object) + return output << '' if object.empty? + keys = object.keys.sort_by(&:to_s) + object = keys.each_with_object({}) do |key, hash| + hash[key.to_s] = object[key] + end + super(object) + end + + def write_dictionary_start + output << '' + write_newline if newlines + push_indent! + end + + def write_dictionary_end + pop_indent! + write_indent + output << '' + end + + def write_dictionary_key_value_pair(key, value) + write_indent + output << '' << Unicode.xml_escape_string(value_for(key)) << '' + write_newline + write_indent + write_object(value) + write_newline + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/nanaimo.gemspec b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/nanaimo.gemspec new file mode 100644 index 0000000..061d44c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nanaimo-0.3.0/nanaimo.gemspec @@ -0,0 +1,26 @@ +# coding: utf-8 +# frozen_string_literal: true + +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'nanaimo/version' + +Gem::Specification.new do |spec| + spec.name = 'nanaimo' + spec.version = Nanaimo::VERSION + spec.authors = ['Danielle Tomlinson', 'Samuel Giddins'] + spec.email = ['dan@tomlinson.io', 'segiddins@segiddins.me'] + + spec.summary = 'A library for (de)serialization of ASCII Plists.' + spec.homepage = 'https://github.com/CocoaPods/Nanaimo' + spec.license = 'MIT' + + spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) } + spec.bindir = 'exe' + spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.12' + spec.add_development_dependency 'rake', '~> 12.3' + spec.add_development_dependency 'rspec', '~> 3.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/LICENSE new file mode 100644 index 0000000..97c1a79 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2015 Manfred Stienstra, Fingertips + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/README.md b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/README.md new file mode 100644 index 0000000..af0a26a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/README.md @@ -0,0 +1,80 @@ +# Nap + +Nap is an extremely simple REST client for Ruby. It was built to quickly +fire off HTTP requests without having to research net/http internals. + +## Example + +```ruby +gem 'nap' +require 'rest' +require 'json' + +response = REST.get('http://twitter.com/statuses/friends_timeline.json', {}, + {:username => '_evan', :password => 'buttonscat'} +) +if response.ok? + timeline = JSON.parse(response.body) + puts(timeline.map do |item| + "#{item['user']['name']}\n\n#{item['text']}" + end.join("\n\n--\n\n")) +elsif response.forbidden? + puts "Are you sure you're `_evan' and your password is the name of your cat?" +else + puts "Something went wrong (#{response.status_code})" + puts response.body +end +``` + +## Advanced request configuration + +If you need more control over the Net::HTTP request you can pass a block to all of the request methods. +```ruby +response = REST.get('http://google.com') do |http_request| + http_request.open_timeout = 15 + http_request.set_debug_output(STDERR) +end +``` + +## Proxy support + +To enable the proxy settings in Nap, you can either use the HTTP\_PROXY or http\_proxy enviroment variable. + + $ env HTTP_PROXY=http://rob:secret@192.167.1.254:665 ruby app.rb + +## Exceptions + +Nap defines one top-level and three main error types which allow you to catch a whole range of exceptions thrown by underlying protocol implementations. + +* *REST::Error*: Any type of error +* *REST::Error::Timeout*: Read timeouts of various sorts +* *REST::Error::Connection*: Connection errors caused by dropped sockets +* *REST::Error::Protocol*: Request failed because of a problem when handling the HTTP request or response + +In the most basic case you can rescue from the top-level type to warn about fetching problems. + +```ruby +begin + REST.get('http://example.com/pigeons/12') +rescue REST::Error + puts "[!] Failed to fetch Pigeon number 12." +end +``` + +## Contributions + +Nap couldn't be the shining beacon in the eternal darkness without help from: + +* Eloy Durán +* Joshua Sierles +* Thijs van der Vossen + +For all other great human beings, please visit the GitHub contributors page. + +## Changes from 1.0.0 to 1.1.0 + +* REST::Request now allows all HTTP verbs to send a body entity. + +## Changes from 0.8.0 to 1.0.0 + +* Removed REST::DisconnectedError, please use REST::Error::Connection instead. diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest.rb b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest.rb new file mode 100644 index 0000000..5f4e36f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest.rb @@ -0,0 +1,106 @@ +require 'uri' + +# REST is basically a convenience wrapper around Net::HTTP. It defines a simple and consistant API +# for doing REST-style HTTP calls. +# +# In addition it provides wrappers for the many error classes that can be raised while making +# requests. See REST::Error for a complete discussion of options. +module REST + # Library version + VERSION = '1.1.0' + + # Performs a GET on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.get('http://example.com/pigeons/12', + # {'Accept' => 'text/plain'}, + # {:username => 'admin', :password => 'secret'} + # ) + # if response.ok? + # puts response.body + # else + # puts "Couldn't fetch your pigeon (#{response.status_code})" + # end + def self.get(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:get, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a HEAD on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.head('http://example.com/pigeons/12') + # if response.ok? + # puts "Your pigeon exists!" + # elsif response.found? + # puts "Someone moved your pigeon!" + # else + # puts "Couldn't fetch your pigeon (#{response.status_code})" + # end + def self.head(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:head, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a DELETE on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.delete('http://example.com/pigeons/12') + # if response.ok? + # puts "Your pigeon died ): )" + # elsif response.found? + # puts "Someone moved your pigeon!" + # else + # puts "Couldn't delete your pigeon (#{response.status_code})" + # end + def self.delete(uri, headers={}, options={}, &configure_block) + REST::Request.perform(:delete, URI.parse(uri), nil, headers, options, &configure_block) + end + + # Performs a PATCH on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.patch('http://example.com/pigeons/12', + # {'Name' => 'Homer'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.ok? + # puts "Your pigeon was renamed to 'Homer'!" + # else + # puts "Couldn't rename your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.patch(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:patch, URI.parse(uri), body, headers, options, &configure_block) + end + + # Performs a PUT on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.put('http://example.com/pigeons/12', + # {'Name' => 'Homer'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.ok? + # puts "Your pigeon 'Bowser' was replaced by 'Homer'!" + # else + # puts "Couldn't replace your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.put(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:put, URI.parse(uri), body, headers, options, &configure_block) + end + + # Performs a POST on a resource. See REST::Request.new for a complete discussion of options. + # + # response = REST.post('http://example.com/pigeons', + # {'Name' => 'Bowser'}.to_xml, + # {'Accept' => 'application/xml, */*', 'Content-Type' => 'application/xml'} + # ) + # if response.created? + # puts "Created a new pigeon called 'Bowser'" + # else + # puts "Couldn't create your pigeon (#{response.status_code})" + # puts XML.parse(response.body).reason + # end + def self.post(uri, body, headers={}, options={}, &configure_block) + REST::Request.perform(:post, URI.parse(uri), body, headers, options, &configure_block) + end +end + +require File.expand_path('../rest/error', __FILE__) +require File.expand_path('../rest/request', __FILE__) +require File.expand_path('../rest/response', __FILE__) diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/error.rb b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/error.rb new file mode 100644 index 0000000..3eedce3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/error.rb @@ -0,0 +1,92 @@ +require 'net/http' + +module REST + # This constant can be used to rescue any of the known `Timeout`, `Connection`, and `Protocol` + # error classes. + # + # For instance, to rescue _any_ type of error that could be raise while making a request: + # + # begin + # REST.get('http://example.com/pigeons/12') + # rescue REST::Error => e + # p e # => Timeout::Error + # end + # + # If you want to rescue only `Timeout` related error classes, however, you can limit the scope: + # + # begin + # REST.get('http://example.com/pigeons/12') + # rescue REST::Error::Timeout => e + # p e # => Timeout::Error + # end + module Error + # This constant can be used to rescue only the known `Timeout` error classes. + module Timeout + def self.class_names + %w( + Errno::ETIMEDOUT + Timeout::Error + Net::OpenTimeout + Net::ReadTimeout + ) + end + end + + # This constant can be used to rescue only the known `Connection` error classes. + module Connection + def self.class_names + %w( + EOFError + Errno::ECONNABORTED + Errno::ECONNREFUSED + Errno::ECONNRESET + Errno::EHOSTDOWN + Errno::EHOSTUNREACH + Errno::EINVAL + Errno::ENETUNREACH + SocketError + OpenSSL::SSL::SSLError + ) + end + end + + # This constant can be used to rescue only the known `Protocol` error classes. + module Protocol + def self.class_names + %w( + Net::HTTPBadResponse + Net::HTTPHeaderSyntaxError + Net::ProtocolError + Zlib::GzipFile::Error + ) + end + end + + private + + [Timeout, Connection, Protocol].each do |mod| + mod.send(:include, Error) + + # Collect all the error classes that exist at runtime. + def mod.classes + class_names.map do |name| + begin + # MRI < 2 does not support full constant paths for `Object.const_get`. + name.split('::').inject(Object) do |current, const| + current.const_get(const) + end + rescue NameError + nil + end + end.compact + end + + # Include the `mod` into the classes. + def mod.extend_classes! + classes.each { |klass| klass.send(:include, self) } + end + + mod.extend_classes! + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/request.rb b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/request.rb new file mode 100644 index 0000000..d9fb086 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/request.rb @@ -0,0 +1,196 @@ +require 'uri' +require 'net/http' + +module REST + # Request holds a HTTP request + class Request + attr_accessor :verb, :url, :body, :headers, :options, :request + + # * verb: The verb to use in the request, either :get, :head, :patch, :put, or :post + # * url: The URL to send the request to, must be a URI instance + # * body: The body to use in the request + # * headers: A hash of headers to add to the request + # * options: A hash of additional options + # * username: Username to use for basic authentication + # * password: Password to use for basic authentication + # * tls_verify/verify_ssl: Verify the server certificate against known CA's + # * tls_ca_file: Use a specific file for CA certificates instead of the built-in one + # this only works when :tls_verify is also set. + # * tls_key_and_certificate_file: The client key and certificate file to use for this + # request + # * tls_certificate: The client certficate to use for this request + # * tls_key: The client private key to use for this request + # * configure_block: An optional block that yields the underlying Net::HTTP + # request object allowing for more fine-grained configuration + # + # == Examples + # + # request = REST::Request.new(:get, URI.parse('http://example.com/pigeons/1')) + # + # request = REST::Request.new(:head, URI.parse('http://example.com/pigeons/1')) + # + # request = REST::Request.new(:post, + # URI.parse('http://example.com/pigeons'), + # {'name' => 'Homr'}.to_json, + # {'Accept' => 'application/json, */*', 'Content-Type' => 'application/json; charset=utf-8'} + # ) + # + # # Pass a block to configure the underlying +Net::HTTP+ request. + # request = REST::Request.new(:get, URI.parse('http://example.com/pigeons/largest')) do |http_request| + # http_request.open_timeout = 15 # seconds + # end + # + # == Authentication example + # + # request = REST::Request.new(:put, + # URI.parse('http://example.com/pigeons/1'), + # {'name' => 'Homer'}.to_json, + # {'Accept' => 'application/json, */*', 'Content-Type' => 'application/json; charset=utf-8'}, + # {:username => 'Admin', :password => 'secret'} + # ) + # + # == TLS / SSL examples + # + # # Use a client key and certificate + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_key_and_certificate_file => '/home/alice/keys/example.pem' + # }) + # + # # Use a client certificate and key from a specific location + # key_and_certificate = File.read('/home/alice/keys/example.pem') + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_key => OpenSSL::PKey::RSA.new(key_and_certificate), + # :tls_certificate => OpenSSL::X509::Certificate.new(key_and_certificate) + # }) + # + # # Verify the server certificate against a specific certificate + # request = REST::Request.new(:get, URI.parse('https://example.com/pigeons/1'), nil, {}, { + # :tls_verify => true, + # :tls_ca_file => '/home/alice/keys/example.pem' + # }) + def initialize(verb, url, body=nil, headers={}, options={}, &configure_block) + @verb = verb + @url = url + @body = body + @headers = headers + @options = options + @configure_block = configure_block + end + + # Returns the path (including the query) for the request + def path + [url.path.empty? ? '/' : url.path, url.query].compact.join('?') + end + + def proxy_env + { + 'http' => ENV['HTTP_PROXY'] || ENV['http_proxy'], + 'https' => ENV['HTTPS_PROXY'] || ENV['https_proxy'] + } + end + + def proxy_settings + proxy_env[url.scheme] ? URI.parse(proxy_env[url.scheme]) : nil + end + + def http_proxy + if settings = proxy_settings + Net::HTTP.Proxy(settings.host, settings.port, settings.user, settings.password) + end + end + + # Configures and returns a new Net::HTTP request object + def http_request + if http_proxy + http_request = http_proxy.new(url.host, url.port) + else + http_request = Net::HTTP.new(url.host, url.port) + end + + # enable SSL/TLS + if url.scheme == 'https' + require 'net/https' + require 'openssl' + Error::Connection.extend_classes! + + http_request.use_ssl = true + + if options[:tls_verify] or options[:verify_ssl] + if http_request.respond_to?(:enable_post_connection_check=) + http_request.enable_post_connection_check = true + end + # from http://curl.haxx.se/ca/cacert.pem + http_request.ca_file = options[:tls_ca_file] || File.expand_path('../../../support/cacert.pem', __FILE__) + http_request.verify_mode = OpenSSL::SSL::VERIFY_PEER + else + http_request.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + + if options[:tls_key_and_certificate_file] + key_and_certificate = File.read(options[:tls_key_and_certificate_file]) + options[:tls_key] = OpenSSL::PKey::RSA.new(key_and_certificate) + options[:tls_certificate] = OpenSSL::X509::Certificate.new(key_and_certificate) + end + + if options[:tls_key] and options[:tls_certificate] + http_request.key = options[:tls_key] + http_request.cert = options[:tls_certificate] + elsif options[:tls_key] || options[:tls_certificate] + raise ArgumentError, "Please specify both the certificate and private key (:tls_key and :tls_certificate)" + end + end + + if @configure_block + @configure_block.call(http_request) + end + + http_request + end + + def request_for_verb + case verb + when :get + Net::HTTP::Get.new(path, headers) + when :head + Net::HTTP::Head.new(path, headers) + when :delete + Net::HTTP::Delete.new(path, headers) + when :patch + if defined?(Net::HTTP::Patch) + Net::HTTP::Patch.new(path, headers) + else + raise ArgumentError, "This version of the Ruby standard library doesn't support PATCH" + end + when :put + Net::HTTP::Put.new(path, headers) + when :post + Net::HTTP::Post.new(path, headers) + else + raise ArgumentError, "Unknown HTTP verb `#{verb}'" + end + end + + # Performs the actual request and returns a REST::Response object with the response + def perform + self.request = request_for_verb + request.body = body + + if options[:username] and options[:password] + request.basic_auth(options[:username], options[:password]) + end + + http_request = http_request() + response = http_request.start { |http| http.request(request) } + REST::Response.new(response.code, response.instance_variable_get('@header'), response.body) + end + + # Shortcut for REST::Request.new(*args).perform. + # + # See new for options. + def self.perform(*args, &configure_block) + request = new(*args, &configure_block) + request.perform + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/response.rb b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/response.rb new file mode 100644 index 0000000..8ac1e2f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/lib/rest/response.rb @@ -0,0 +1,46 @@ +module REST + # Response holds a HTTP response + class Response + # These codes are used to define convenience boolean accessors on the response object. + # + # Examples + # + # REST::Response.new(200).ok? #=> true + # REST::Response.new(201).ok? #=> falses + # REST::Response.new(403).forbidden? #=> true + CODES = [ + [200, :ok], + [201, :created], + [301, :moved_permanently], + [302, :found], + [400, :bad_request], + [401, :unauthorized], + [403, :forbidden], + [422, :unprocessable_entity], + [404, :not_found], + [500, :internal_server_error] + ] + + attr_accessor :body, :headers, :status_code + + # * status_code: The status code of the response (ie. 200 or '404') + # * headers: The headers of the response + # * body: The body of the response + def initialize(status_code, headers={}, body='') + @status_code = status_code.to_i + @headers = headers + @body = body + end + + CODES.each do |code, name| + define_method "#{name}?" do + status_code == code + end + end + + # Returns _true_ when the status code is in the 2XX range. Returns false otherwise. + def success? + (status_code.to_s =~ /2../) ? true : false + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/support/cacert.pem b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/support/cacert.pem new file mode 100644 index 0000000..1b24dc6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/nap-1.1.0/support/cacert.pem @@ -0,0 +1,3988 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Wed Apr 22 03:12:04 2015 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.25. +## SHA1: ed3c0bbfb7912bcc00cd2033b0cb85c98d10559c +## + + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) FÅ‘tanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +WoSign +====== +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNVBAMTIUNlcnRpZmljYXRpb24g +QXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJ +BgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA +vcqNrLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1UfcIiePyO +CbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcSccf+Hb0v1naMQFXQoOXXDX +2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2ZjC1vt7tj/id07sBMOby8w7gLJKA84X5 +KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4Mx1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR ++ScPewavVIMYe+HdVHpRaG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ez +EC8wQjchzDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDaruHqk +lWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221KmYo0SLwX3OSACCK2 +8jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvASh0JWzko/amrzgD5LkhLJuYwTKVY +yrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWvHYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0C +AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R +8bNLtwYgFP6HEtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 +LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJMuYhOZO9sxXq +T2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2eJXLOC62qx1ViC777Y7NhRCOj +y+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VNg64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC +2nz4SNAzqfkHx5Xh9T71XXG68pWpdIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes +5cVAWubXbHssw1abR80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/ +EaEQPkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGcexGATVdVh +mVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+J7x6v+Db9NpSvd4MVHAx +kUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMlOtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGi +kpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWTee5Ehr7XHuQe+w== +-----END CERTIFICATE----- + +WoSign China +============ +-----BEGIN CERTIFICATE----- +MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBGMQswCQYDVQQG +EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMMEkNBIOayg+mAmuagueiv +geS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYD +VQQKExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k +8H/rD195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld19AXbbQs5 +uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExfv5RxadmWPgxDT74wwJ85 +dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnkUkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5 +Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+LNVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFy +b7Ao65vh4YOhn0pdr8yb+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc +76DbT52VqyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6KyX2m ++Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0GAbQOXDBGVWCvOGU6 +yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaKJ/kR8slC/k7e3x9cxKSGhxYzoacX +GKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUA +A4ICAQBqinA4WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 +yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj/feTZU7n85iY +r83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6jBAyvd0zaziGfjk9DgNyp115 +j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0A +kLppRQjbbpCBhqcqBT/mhDn4t/lXX0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97 +qA4bLJyuQHCH2u2nFoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Y +jj4Du9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10lO1Hm13ZB +ONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Leie2uPAmvylezkolwQOQv +T8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR12KvxAmLBsX5VYc8T1yaw15zLKYs4SgsO +kI26oQ== +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl +OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV +MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF +JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G3 +================================== +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y +olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t +x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy +EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K +Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur +mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5 +1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp +07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo +FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE +41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu +yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq +KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1 +v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA +8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b +8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r +mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq +1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI +JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV +tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk= +-----END CERTIFICATE----- + +Staat der Nederlanden EV Root CA +================================ +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M +MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl +cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk +SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW +O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r +0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 +Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV +XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr +08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV +0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd +74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx +fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa +ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu +c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq +5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN +b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN +f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi +5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 +WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK +DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy +eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/LICENSE.md b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/LICENSE.md new file mode 100644 index 0000000..a834fbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2011-2014 [CONTRIBUTORS.md](https://github.com/geemus/netrc/blob/master/CONTRIBUTORS.md) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/Readme.md b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/Readme.md new file mode 100644 index 0000000..9193474 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/Readme.md @@ -0,0 +1,53 @@ +# Netrc + +This library reads and writes +[`.netrc` files](http://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html). + +## API + +Read a netrc file: + + n = Netrc.read("sample.netrc") + +If the file doesn't exist, Netrc.read will return an empty object. If +the filename ends in ".gpg", it will be decrypted using +[GPG](http://www.gnupg.org/). + +Read the user's default netrc file. + +**On Unix:** `$NETRC/.netrc` or `$HOME/.netrc` (whichever is set first). + +**On Windows:** `%NETRC%\_netrc`, `%HOME%\_netrc`, `%HOMEDRIVE%%HOMEPATH%\_netrc`, or `%USERPROFILE%\_netrc` (whichever is set first). + + n = Netrc.read + +Configure netrc to allow permissive files (with permissions other than 0600): + + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = true + end + +Look up a username and password: + + user, pass = n["example.com"] + +Write a username and password: + + n["example.com"] = user, newpass + n.save + +If you make an entry that wasn't there before, it will be appended +to the end of the file. Sometimes people want to include a comment +explaining that the entry was added automatically. You can do it +like this: + + n.new_item_prefix = "# This entry was added automatically\n" + n["example.com"] = user, newpass + n.save + +Have fun! + +## Running Tests + + $ bundle install + $ bundle exec ruby -e 'Dir.glob "./test/**/test_*.rb", &method(:require)' diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/changelog.txt b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/changelog.txt new file mode 100644 index 0000000..60beb4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/changelog.txt @@ -0,0 +1,93 @@ +0.11.0 10/29/15 +=============== + +Respect NETRC environment variable +Fix for JRuby PernGen Space + +0.10.3 02/24/15 +=============== + +error when Dir.home is not readable + +0.10.2 12/17/14 +=============== + +set file permissions in /data to be world readable after test runs + +0.10.1 12/14/14 +=============== + +fix bug for `Dir.home` when can't find home + +0.10.0 12/10/14 +=============== + +use `Dir.home` for finding home on Ruby 1.9+ + +0.9.0 12/01/14 +============== + +use HOME or HOMEPATH/HOMEDRIVE to find home on windows + +0.8.0 10/16/14 +============== + +re-revert entry changes with minor bump + +0.7.9 10/16/14 +============== + +revert entry changes for a backwards-compatible version + +0.7.8 10/15/14 +============== + +add entry class to facilitate usage +switch gem source to rubygems.org +use guard, when available via guardfile +add default/read-only behavior +add allow_permissive_netrc_file option +fix an undefined variable path +fix Errno::EACCES error +silence const warnings in test + +0.7.7 08/15/12 +============== + +add newline between entries if one is missing + +0.7.6 08/15/12 +============== + +more unified newline handling +make entries with login/password parsable + + +0.7.5 06/25/12 +============== + +* improved operating system detection + +0.7.4 06/04/12 +============== + +* add support for encrypted files pgp netrc files + +0.7.3 06/04/12 +============== + +* also skip permissions check on cygwin + +0.7.2 05/23/12 +============= + +* use length instead of count on Array, provides compatibility with 1.8.6 + + +0.7.1 03/13/12 +============== + +* add Gemfile to simplify development +* add MIT license +* fix test require path +* fix unused variable assignment (caused warnings) in tests diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/default_only.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/default_only.netrc new file mode 100644 index 0000000..8df77a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/default_only.netrc @@ -0,0 +1,4 @@ +# this is my netrc with only a default +default + login ld # this is my default username + password pd diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/login.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/login.netrc new file mode 100644 index 0000000..f0ec3b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/login.netrc @@ -0,0 +1,3 @@ +# this is my login netrc +machine m + login l # this is my username diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/newlineless.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/newlineless.netrc new file mode 100644 index 0000000..5f3b1ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/newlineless.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/password.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/password.netrc new file mode 100644 index 0000000..ce68670 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/password.netrc @@ -0,0 +1,3 @@ +# this is my password netrc +machine m + password p # this is my password diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/permissive.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/permissive.netrc new file mode 100644 index 0000000..b92cad3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/permissive.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample.netrc new file mode 100644 index 0000000..b92cad3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample.netrc @@ -0,0 +1,4 @@ +# this is my netrc +machine m + login l # this is my username + password p diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi.netrc new file mode 100644 index 0000000..1936d4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi.netrc @@ -0,0 +1,8 @@ +# this is my netrc with multiple machines +machine m + login lm # this is my m-username + password pm + +machine n + login ln # this is my n-username + password pn diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc new file mode 100644 index 0000000..4e7dfe6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_multi_with_default.netrc @@ -0,0 +1,12 @@ +# this is my netrc with multiple machines and a default +machine m + login lm # this is my m-username + password pm + +machine n + login ln # this is my n-username + password pn + +default + login ld # this is my default username + password pd diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_with_default.netrc b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_with_default.netrc new file mode 100644 index 0000000..76597f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/data/sample_with_default.netrc @@ -0,0 +1,8 @@ +# this is my netrc with default +machine m + login l # this is my username + password p + +default + login default_login # this is my default username + password default_password diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/lib/netrc.rb b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/lib/netrc.rb new file mode 100644 index 0000000..83e4c5e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/lib/netrc.rb @@ -0,0 +1,251 @@ +require 'rbconfig' + +class Netrc + VERSION = "0.11.0" + + # see http://stackoverflow.com/questions/4871309/what-is-the-correct-way-to-detect-if-ruby-is-running-on-windows + WINDOWS = RbConfig::CONFIG["host_os"] =~ /mswin|mingw|cygwin/ + CYGWIN = RbConfig::CONFIG["host_os"] =~ /cygwin/ + + def self.default_path + File.join(ENV['NETRC'] || home_path, netrc_filename) + end + + def self.home_path + home = Dir.respond_to?(:home) ? Dir.home : ENV['HOME'] + + if WINDOWS && !CYGWIN + home ||= File.join(ENV['HOMEDRIVE'], ENV['HOMEPATH']) if ENV['HOMEDRIVE'] && ENV['HOMEPATH'] + home ||= ENV['USERPROFILE'] + # XXX: old stuff; most likely unnecessary + home = home.tr("\\", "/") unless home.nil? + end + + (home && File.readable?(home)) ? home : Dir.pwd + rescue ArgumentError + return Dir.pwd + end + + def self.netrc_filename + WINDOWS && !CYGWIN ? "_netrc" : ".netrc" + end + + def self.config + @config ||= {} + end + + def self.configure + yield(self.config) if block_given? + self.config + end + + def self.check_permissions(path) + perm = File.stat(path).mode & 0777 + if perm != 0600 && !(WINDOWS) && !(Netrc.config[:allow_permissive_netrc_file]) + raise Error, "Permission bits for '#{path}' should be 0600, but are "+perm.to_s(8) + end + end + + # Reads path and parses it as a .netrc file. If path doesn't + # exist, returns an empty object. Decrypt paths ending in .gpg. + def self.read(path=default_path) + check_permissions(path) + data = if path =~ /\.gpg$/ + decrypted = `gpg --batch --quiet --decrypt #{path}` + if $?.success? + decrypted + else + raise Error.new("Decrypting #{path} failed.") unless $?.success? + end + else + File.read(path) + end + new(path, parse(lex(data.lines.to_a))) + rescue Errno::ENOENT + new(path, parse(lex([]))) + end + + class TokenArray < Array + def take + if length < 1 + raise Error, "unexpected EOF" + end + shift + end + + def readto + l = [] + while length > 0 && ! yield(self[0]) + l << shift + end + return l.join + end + end + + def self.lex(lines) + tokens = TokenArray.new + for line in lines + content, comment = line.split(/(\s*#.*)/m) + content.each_char do |char| + case char + when /\s/ + if tokens.last && tokens.last[-1..-1] =~ /\s/ + tokens.last << char + else + tokens << char + end + else + if tokens.last && tokens.last[-1..-1] =~ /\S/ + tokens.last << char + else + tokens << char + end + end + end + if comment + tokens << comment + end + end + tokens + end + + def self.skip?(s) + s =~ /^\s/ + end + + + + # Returns two values, a header and a list of items. + # Each item is a tuple, containing some or all of: + # - machine keyword (including trailing whitespace+comments) + # - machine name + # - login keyword (including surrounding whitespace+comments) + # - login + # - password keyword (including surrounding whitespace+comments) + # - password + # - trailing chars + # This lets us change individual fields, then write out the file + # with all its original formatting. + def self.parse(ts) + cur, item = [], [] + + unless ts.is_a?(TokenArray) + ts = TokenArray.new(ts) + end + + pre = ts.readto{|t| t == "machine" || t == "default"} + + while ts.length > 0 + if ts[0] == 'default' + cur << ts.take.to_sym + cur << '' + else + cur << ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + if ts.include?('login') + cur << ts.readto{|t| t == "login"} + ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + if ts.include?('password') + cur << ts.readto{|t| t == "password"} + ts.take + ts.readto{|t| ! skip?(t)} + cur << ts.take + end + + cur << ts.readto{|t| t == "machine" || t == "default"} + + item << cur + cur = [] + end + + [pre, item] + end + + def initialize(path, data) + @new_item_prefix = '' + @path = path + @pre, @data = data + + if @data && @data.last && :default == @data.last[0] + @default = @data.pop + else + @default = nil + end + end + + attr_accessor :new_item_prefix + + def [](k) + if item = @data.detect {|datum| datum[1] == k} + Entry.new(item[3], item[5]) + elsif @default + Entry.new(@default[3], @default[5]) + end + end + + def []=(k, info) + if item = @data.detect {|datum| datum[1] == k} + item[3], item[5] = info + else + @data << new_item(k, info[0], info[1]) + end + end + + def length + @data.length + end + + def delete(key) + datum = nil + for value in @data + if value[1] == key + datum = value + break + end + end + @data.delete(datum) + end + + def each(&block) + @data.each(&block) + end + + def new_item(m, l, p) + [new_item_prefix+"machine ", m, "\n login ", l, "\n password ", p, "\n"] + end + + def save + if @path =~ /\.gpg$/ + e = IO.popen("gpg -a --batch --default-recipient-self -e", "r+") do |gpg| + gpg.puts(unparse) + gpg.close_write + gpg.read + end + raise Error.new("Encrypting #{@path} failed.") unless $?.success? + File.open(@path, 'w', 0600) {|file| file.print(e)} + else + File.open(@path, 'w', 0600) {|file| file.print(unparse)} + end + end + + def unparse + @pre + @data.map do |datum| + datum = datum.join + unless datum[-1..-1] == "\n" + datum << "\n" + else + datum + end + end.join + end + + Entry = Struct.new(:login, :password) do + alias to_ary to_a + end + +end + +class Netrc::Error < ::StandardError +end diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_lex.rb b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_lex.rb new file mode 100644 index 0000000..e63ff1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_lex.rb @@ -0,0 +1,58 @@ +$VERBOSE = true +require 'minitest/autorun' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") + +class TestLex < Minitest::Test + def test_lex_empty + t = Netrc.lex([]) + assert_equal([], t) + end + + def test_lex_comment + t = Netrc.lex(["# foo\n"]) + assert_equal(["# foo\n"], t) + end + + def test_lex_comment_after_space + t = Netrc.lex([" # foo\n"]) + assert_equal([" # foo\n"], t) + end + + def test_lex_comment_after_word + t = Netrc.lex(["x # foo\n"]) + assert_equal(["x", " # foo\n"], t) + end + + def test_lex_comment_with_hash + t = Netrc.lex(["x # foo # bar\n"]) + assert_equal(["x", " # foo # bar\n"], t) + end + + def test_lex_word + t = Netrc.lex(["x"]) + assert_equal(["x"], t) + end + + def test_lex_two_lines + t = Netrc.lex(["x\ny\n"]) + assert_equal(["x", "\n", "y", "\n"], t) + end + + def test_lex_word_and_comment + t = Netrc.lex(["x\n", "# foo\n"]) + assert_equal(["x", "\n", "# foo\n"], t) + end + + def test_lex_six_words + t = Netrc.lex(["machine m login l password p\n"]) + e = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] + assert_equal(e, t) + end + + def test_lex_complex + t = Netrc.lex(["machine sub.domain.com login email@domain.com password pass\n"]) + e = ["machine", " ", "sub.domain.com", " ", "login", " ", "email@domain.com", " ", "password", " ", "pass", "\n"] + assert_equal(e, t) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_netrc.rb b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_netrc.rb new file mode 100644 index 0000000..73c5c25 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_netrc.rb @@ -0,0 +1,273 @@ +$VERBOSE = true +require 'minitest/autorun' +require 'fileutils' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") +require "rbconfig" + +class TestNetrc < Minitest::Test + + def setup + Dir.glob('data/*.netrc').each{|f| File.chmod(0600, f)} + File.chmod(0644, "data/permissive.netrc") + end + + def teardown + Dir.glob('data/*.netrc').each{|f| File.chmod(0644, f)} + end + + def test_parse_empty + pre, items = Netrc.parse(Netrc.lex([])) + assert_equal("", pre) + assert_equal([], items) + end + + def test_parse_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/sample.netrc"))) + assert_equal("# this is my netrc\n", pre) + exp = [["machine ", + "m", + "\n login ", + "l", + " # this is my username\n password ", + "p", + "\n"]] + assert_equal(exp, items) + end + + def test_login_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/login.netrc"))) + assert_equal("# this is my login netrc\n", pre) + exp = [["machine ", + "m", + "\n login ", + "l", + " # this is my username\n"]] + assert_equal(exp, items) + end + + def test_password_file + pre, items = Netrc.parse(Netrc.lex(IO.readlines("data/password.netrc"))) + assert_equal("# this is my password netrc\n", pre) + exp = [["machine ", + "m", + "\n password ", + "p", + " # this is my password\n"]] + assert_equal(exp, items) + end + + def test_missing_file + n = Netrc.read("data/nonexistent.netrc") + assert_equal(0, n.length) + end + + def test_permission_error + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, false) + Netrc.read("data/permissive.netrc") + assert false, "Should raise an error if permissions are wrong on a non-windows system." + rescue Netrc::Error + assert true, "" + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + end + + def test_allow_permissive_netrc_file_option + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = true + end + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, false) + Netrc.read("data/permissive.netrc") + assert true, "" + rescue Netrc::Error + assert false, "Should not raise an error if allow_permissive_netrc_file option is set to true" + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + Netrc.configure do |config| + config[:allow_permissive_netrc_file] = false + end + end + + def test_permission_error_windows + original_windows = Netrc::WINDOWS + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, true) + Netrc.read("data/permissive.netrc") + rescue Netrc::Error + assert false, "Should not raise an error if permissions are wrong on a non-windows system." + ensure + Netrc.send(:remove_const, :WINDOWS) + Netrc.const_set(:WINDOWS, original_windows) + end + + def test_round_trip + n = Netrc.read("data/sample.netrc") + assert_equal(IO.read("data/sample.netrc"), n.unparse) + end + + def test_set + n = Netrc.read("data/sample.netrc") + n["m"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login a # this is my username\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_set_get + n = Netrc.read("data/sample.netrc") + n["m"] = "a", "b" + assert_equal(["a", "b"], n["m"].to_a) + end + + def test_add + n = Netrc.read("data/sample.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login l # this is my username\n"+ + " password p\n"+ + "# added\n"+ + "machine x\n"+ + " login a\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_add_newlineless + n = Netrc.read("data/newlineless.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + exp = "# this is my netrc\n"+ + "machine m\n"+ + " login l # this is my username\n"+ + " password p\n"+ + "# added\n"+ + "machine x\n"+ + " login a\n"+ + " password b\n" + assert_equal(exp, n.unparse) + end + + def test_add_get + n = Netrc.read("data/sample.netrc") + n.new_item_prefix = "# added\n" + n["x"] = "a", "b" + assert_equal(["a", "b"], n["x"].to_a) + end + + def test_get_missing + n = Netrc.read("data/sample.netrc") + assert_equal(nil, n["x"]) + end + + def test_save + n = Netrc.read("data/sample.netrc") + n.save + assert_equal(File.read("data/sample.netrc"), n.unparse) + end + + def test_save_create + FileUtils.rm_f("/tmp/created.netrc") + n = Netrc.read("/tmp/created.netrc") + n.save + unless Netrc::WINDOWS + assert_equal(0600, File.stat("/tmp/created.netrc").mode & 0777) + end + end + + def test_encrypted_roundtrip + if `gpg --list-keys 2> /dev/null` != "" + FileUtils.rm_f("/tmp/test.netrc.gpg") + n = Netrc.read("/tmp/test.netrc.gpg") + n["m"] = "a", "b" + n.save + assert_equal(0600, File.stat("/tmp/test.netrc.gpg").mode & 0777) + netrc = Netrc.read("/tmp/test.netrc.gpg")["m"] + assert_equal("a", netrc.login) + assert_equal("b", netrc.password) + end + end + + def test_missing_environment + nil_home = nil + ENV["HOME"], nil_home = nil_home, ENV["HOME"] + assert_equal File.join(Dir.pwd, '.netrc'), Netrc.default_path + ensure + ENV["HOME"], nil_home = nil_home, ENV["HOME"] + end + + def test_netrc_environment_variable + ENV["NETRC"] = File.join(Dir.pwd, 'data') + assert_equal File.join(Dir.pwd, 'data', '.netrc'), Netrc.default_path + ensure + ENV.delete("NETRC") + end + + def test_read_entry + entry = Netrc.read("data/sample.netrc")['m'] + assert_equal 'l', entry.login + assert_equal 'p', entry.password + + # hash-style + assert_equal 'l', entry[:login] + assert_equal 'p', entry[:password] + end + + def test_write_entry + n = Netrc.read("data/sample.netrc") + entry = n['m'] + entry.login = 'new_login' + entry.password = 'new_password' + n['m'] = entry + assert_equal(['new_login', 'new_password'], n['m'].to_a) + end + + def test_entry_splat + e = Netrc::Entry.new("user", "pass") + user, pass = *e + assert_equal("user", user) + assert_equal("pass", pass) + end + + def test_entry_implicit_splat + e = Netrc::Entry.new("user", "pass") + user, pass = e + assert_equal("user", user) + assert_equal("pass", pass) + end + + def test_with_default + netrc = Netrc.read('data/sample_with_default.netrc') + assert_equal(['l', 'p'], netrc['m'].to_a) + assert_equal(['default_login', 'default_password'], netrc['unknown'].to_a) + end + + def test_multi_without_default + netrc = Netrc.read('data/sample_multi.netrc') + assert_equal(['lm', 'pm'], netrc['m'].to_a) + assert_equal(['ln', 'pn'], netrc['n'].to_a) + assert_equal([], netrc['other'].to_a) + end + + def test_multi_with_default + netrc = Netrc.read('data/sample_multi_with_default.netrc') + assert_equal(['lm', 'pm'], netrc['m'].to_a) + assert_equal(['ln', 'pn'], netrc['n'].to_a) + assert_equal(['ld', 'pd'], netrc['other'].to_a) + end + + def test_default_only + netrc = Netrc.read('data/default_only.netrc') + assert_equal(['ld', 'pd'], netrc['m'].to_a) + assert_equal(['ld', 'pd'], netrc['other'].to_a) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_parse.rb b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_parse.rb new file mode 100644 index 0000000..9e61c69 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/netrc-0.11.0/test/test_parse.rb @@ -0,0 +1,34 @@ +$VERBOSE = true +require 'minitest/autorun' + +require File.expand_path("#{File.dirname(__FILE__)}/../lib/netrc") + +class TestParse < Minitest::Test + def test_parse_empty + pre, items = Netrc.parse([]) + assert_equal("", pre) + assert_equal([], items) + end + + def test_parse_comment + pre, items = Netrc.parse(["# foo\n"]) + assert_equal("# foo\n", pre) + assert_equal([], items) + end + + def test_parse_item + t = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] + pre, items = Netrc.parse(t) + assert_equal("", pre) + e = [["machine ", "m", " login ", "l", " password ", "p", "\n"]] + assert_equal(e, items) + end + + def test_parse_two_items + t = ["machine", " ", "m", " ", "login", " ", "l", " ", "password", " ", "p", "\n"] * 2 + pre, items = Netrc.parse(t) + assert_equal("", pre) + e = [["machine ", "m", " login ", "l", " password ", "p", "\n"]] * 2 + assert_equal(e, items) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/FUNDING.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/FUNDING.yml new file mode 100644 index 0000000..085e1ac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: "rubygems/public_suffix" +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with a single custom sponsorship URL diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml new file mode 100644 index 0000000..2de9058 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.github/workflows/tests.yml @@ -0,0 +1,36 @@ +name: Tests + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + + build: + strategy: + matrix: + ruby-version: + # - "2.3" + - "2.4" + - "2.5" + - "2.6" + - "2.7" + platform: [ubuntu-latest] + + runs-on: ${{ matrix.platform }} + steps: + + - uses: actions/checkout@v2 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + + - name: Install dependencies + run: bundle install + + - name: Run tests + run: bundle exec rake diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.gitignore b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.gitignore new file mode 100644 index 0000000..8506292 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.gitignore @@ -0,0 +1,8 @@ +# Bundler +/.bundle +/Gemfile.lock +/pkg/* + +# YARD +/.yardoc +/yardoc/ diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop.yml new file mode 100644 index 0000000..af922f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop.yml @@ -0,0 +1,36 @@ +inherit_from: + - .rubocop_opinionated.yml + +AllCops: + Exclude: + # Exclude .gemspec files because they are generally auto-generated + - '*.gemspec' + # Exclude vendored folders + - 'tmp/**/*' + - 'vendor/**/*' + # Exclude artifacts + - 'pkg/**/*' + # Other + - 'test/benchmarks/**/*' + - 'test/profilers/**/*' + +# I often use @_variable to avoid clashing. +Naming/MemoizedInstanceVariableName: + Enabled: false + +Style/ClassAndModuleChildren: + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# Dear Rubocop, I don't want to use String#strip_heredoc +Layout/HeredocIndentation: + Enabled: false + +Style/WordArray: + Enabled: false + MinSize: 3 + +Style/SymbolArray: + Enabled: false + MinSize: 3 diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml new file mode 100644 index 0000000..401e684 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.rubocop_opinionated.yml @@ -0,0 +1,157 @@ +AllCops: + Exclude: + # Exclude .gemspec files because they are generally auto-generated + - '*.gemspec' + # Exclude vendored folders + - 'tmp/**/*' + - 'vendor/**/*' + NewCops: enable + +# [codesmell] +Layout/LineLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + Max: 100 + +# [codesmell] +Metrics/AbcSize: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/BlockLength: + Enabled: false + +# [codesmell] +Metrics/CyclomaticComplexity: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/ClassLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/MethodLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + Max: 10 + +# [codesmell] +Metrics/ModuleLength: + Enabled: false + Exclude: + - 'spec/**/*_spec.rb' + - 'test/**/*_test.rb' + +# [codesmell] +Metrics/ParameterLists: + Enabled: false + Max: 5 + +# [codesmell] +Metrics/PerceivedComplexity: + Enabled: false + +# Do not use "and" or "or" in conditionals, but for readability we can use it +# to chain executions. Just beware of operator order. +Style/AndOr: + EnforcedStyle: conditionals + +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + +# Double empty lines are useful to separate conceptually different methods +# in the same class or module. +Layout/EmptyLines: + Enabled: false + +# In most cases, a space is nice. Sometimes, it's not. +# Just be consistent with the rest of the surrounding code. +Layout/EmptyLinesAroundClassBody: + Enabled: false + +# In most cases, a space is nice. Sometimes, it's not. +# Just be consistent with the rest of the surrounding code. +Layout/EmptyLinesAroundModuleBody: + Enabled: false + +# This is quite buggy, as it doesn't recognize double lines. +# Double empty lines are useful to separate conceptually different methods +# in the same class or module. +Layout/EmptyLineBetweenDefs: + Enabled: false + +# I personally don't care about the format style. +# In most cases I like to use %, but not at the point I want to enforce it +# as a convention in the entire code. +Style/FormatString: + Enabled: false + +# Annotated tokens (like %s) are a good thing, but in most cases we don't need them. +# %s is a simpler and straightforward version that works in almost all cases. So don't complain. +Style/FormatStringToken: + Enabled: false + +# unless is not always cool. +Style/NegatedIf: + Enabled: false + +# For years, %w() has been the de-facto standard. A lot of libraries are using (). +# Switching to [] would be a nightmare. +Style/PercentLiteralDelimiters: + Enabled: false + +# There are cases were the inline rescue is ok. We can either downgrade the severity, +# or rely on the developer judgement on a case-by-case basis. +Style/RescueModifier: + Enabled: false + +Style/SymbolArray: + EnforcedStyle: brackets + +# Sorry, but using trailing spaces helps readability. +# +# %w( foo bar ) +# +# looks better to me than +# +# %w( foo bar ) +# +Layout/SpaceInsidePercentLiteralDelimiters: + Enabled: false + +# Hate It or Love It, I prefer double quotes as this is more consistent +# with several other programming languages and the output of puts and inspect. +Style/StringLiterals: + EnforcedStyle: double_quotes + +# It's nice to be consistent. The trailing comma also allows easy reordering, +# and doesn't cause a diff in Git when you add a line to the bottom. +Style/TrailingCommaInArrayLiteral: + EnforcedStyleForMultiline: consistent_comma +Style/TrailingCommaInHashLiteral: + EnforcedStyleForMultiline: consistent_comma + +Style/TrivialAccessors: + # IgnoreClassMethods because I want to be able to define class-level accessors + # that sets an instance variable on the metaclass, such as: + # + # def self.default=(value) + # @default = value + # end + # + IgnoreClassMethods: true diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.travis.yml new file mode 100644 index 0000000..30f78e8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.travis.yml @@ -0,0 +1,23 @@ +language: ruby + +rvm: + # - 2.3 + - 2.4 + - 2.5 + - 2.6 + - 2.7 + - ruby-head + +env: + - COVERAGE=1 + +cache: + - bundler + +matrix: + allow_failures: + - rvm: ruby-head + +before_install: + - gem update --system + - gem install bundler diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.yardopts b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.yardopts new file mode 100644 index 0000000..0a782de --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/.yardopts @@ -0,0 +1 @@ +--title 'Ruby Public Suffix API Documentation' diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/2.0-Upgrade.md b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/2.0-Upgrade.md new file mode 100644 index 0000000..1a10bfb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/2.0-Upgrade.md @@ -0,0 +1,52 @@ +# Welcome to PublicSuffix 2.0! + +PublicSuffix 2.0 contains a rewritten internal representation and comparison logic, that drastically increases the lookup performance. The new version also changes several internal and external API. + +This document documents the most relevant changes to help you upgrading from PublicSuffix 1.0 to 2.0. + +## What's New + +- The library is now 100% compliants with the official PublicSuffix tests. The major breaking change you may experience, is that if a domain passed as input doesn't match any rule, the rule `*` is assumed. You can override this behavior by passing a custom default rule with the `default_rule` option. The old behavior can be restored by passing `default_rule: nil`. +- `PublicSuffix.domain` is a new method that parses the input and returns the domain (combination of second level domain + suffix). This is a convenient helper to parse a domain name, for example when you need to determine the cookie or SSL scope. +- Added the ability to disable the use of private domains either at runtime, in addition to the ability to not load the private domains section when reading the list (`private_domains: false`). This feature also superseded the `private_domains` class-level attribute, that is no longer available. + +## Upgrade + +When upgrading, here's the most relevant changes to keep an eye on: + +- Several futile utility helpers were removed, such as `Domain#rule`, `Domain#is_a_domain?`, `Domain#is_a_subdomain?`, `Domain#valid?`. You can easily obtain the same result by having a custom method that reconstructs the logic, and/or calling `PublicSuffix.{domain|parse}(domain.to_s)`. +- `PublicSuffix::List.private_domains` is no longer available. Instead, you now have two ways to enable/disable the private domains: + + 1. At runtime, by using the `ignore_private` option + + ```ruby + PublicSuffix.domain("something.blogspot.com", ignore_private: true) + ``` + + 1. Loading a filtered list: + + ```ruby + # Disable support for private TLDs + PublicSuffix::List.default = PublicSuffix::List.parse(File.read(PublicSuffix::List::DEFAULT_LIST_PATH), private_domains: false) + # => "blogspot.com" + PublicSuffix.domain("something.blogspot.com") + # => "blogspot.com" + ``` +- Now that the library is 100% compliant with the official PublicSuffix algorithm, if a domain passed as input doesn't match any rule, the wildcard rule `*` is assumed. This means that unlisted TLDs will be considered valid by default, when they would have been invalid in 1.x. However, you can override this behavior to emulate the 1.x behavior if needed: + + ```ruby + # 1.x: + + PublicSuffix.valid?("google.commm") + # => false + + # 2.x: + + PublicSuffix.valid?("google.commm") + # => true + + # Overriding 2.x behavior if needed: + + PublicSuffix.valid?("google.commm", default_rule: nil) + # => false + ```` diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/CHANGELOG.md new file mode 100644 index 0000000..4861d62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/CHANGELOG.md @@ -0,0 +1,406 @@ +# Changelog + +This project uses [Semantic Versioning 2.0.0](https://semver.org/). + + +## 4.0.6 + +### Changed + +- Updated definitions. + + +## 4.0.5 + +### Changed + +- Updated definitions. + + +## 4.0.4 + +### Changed + +- Updated definitions. + + +## 4.0.3 + +### Fixed + +- Fixed 2.7 deprecations and warnings (GH-167). [Thanks @BrianHawley] + + +## 4.0.2 + +### Changed + +- Updated definitions. + + +## 4.0.1 + +### Changed + +- Updated definitions. + + +## 4.0.0 + +### Changed + +- Minimum Ruby version is 2.3 + + +## Release 3.1.1 + +- CHANGED: Updated definitions. +- CHANGED: Rolled back support for Ruby 2.3 (GH-161, GH-162) + +IMPORTANT: 3.x is the latest version compatible with Ruby 2.1 and Ruby 2.2. + + +## Release 3.1.0 + +- CHANGED: Updated definitions. +- CHANGED: Minimum Ruby version is 2.3 +- CHANGED: Upgraded to Bundler 2.x + + +## Release 3.0.3 + +- CHANGED: Updated definitions. + + +## Release 3.0.2 + +- CHANGED: Updated definitions. + + +## Release 3.0.1 + +- CHANGED: Updated definitions. +- CHANGED: Improve performance and avoid allocation (GH-146). [Thanks @robholland] + + +## Release 3.0.0 + +This new version includes a major redesign of the library internals, with the goal to drastically +improve the lookup time while reducing storage space. + +For this reason, several public methods that are no longer applicable have been deprecated +and/or removed. You can find more information at GH-133. + +- CHANGED: Updated definitions. +- CHANGED: Dropped support for Ruby < 2.1 +- CHANGED: `PublicSuffix::List#rules` is now protected. You should not rely on it as the internal rule representation is subject to change to optimize performances. +- CHANGED: Removed `PublicSuffix::List.clear`, it was an unnecessary accessor method. Use `PublicSuffix::List.default = nil` if you **really** need to reset the default list. You shouldn't. +- CHANGED: `PublicSuffix::List#select` is now private. You should not use it, instead use `PublicSuffix::List#find`. +- CHANGED: `PublicSuffix::List` no longer implements Enumerable. Instead, use `#each` to loop over, or get an Enumerator. +- CHANGED: Redesigned internal list storage and lookup algorithm to achieve O(1) lookup time (see GH-133). + + +## Release 2.0.5 + +- CHANGED: Updated definitions. +- CHANGED: Initialization performance improvements (GH-128). [Thanks @casperisfine] + + +## Release 2.0.4 + +- FIXED: Fix a bug that caused the GEM to be published with the wrong version number in the gemspec (GH-121). + +- CHANGED: Updated definitions. + + +## Release 2.0.3 + +- CHANGED: Updated definitions. + + +## Release 2.0.2 + +- CHANGED: Updated definitions. + + +## Release 2.0.1 + +- FIXED: Fix bug that prevented .valid? to reset the default rule + + +## Release 2.0.0 + +- NEW: Added PublicSuffix.domain # => sld.tld +- NEW: Added the ability to disable the use of private domains either at runtime, in addition to the ability to not load the private domains section when reading the list (`private_domains: false`). This feature also superseded the `private_domains` class-level attribute, that is no longer available. + +- CHANGED: Considerable performance improvements (GH-92) +- CHANGED: Updated definitions. +- CHANGED: Removed deprecated PublicSuffix::InvalidDomain exception +- CHANGED: If the suffix is now listed, then the prevaling rule is "*" as defined by the PSL algorithm (GH-91) +- CHANGED: Input validation is performed only if you call `PublicSuffix.parse` or `PublicSuffix.list` +- CHANGED: Input with leading dot is invalid per PSL acceptance tests +- CHANGED: Removed `private_domains` class-level attribute. It is replaced by the `private_domains: false` option in the list parse method. +- CHANGED: The default list now assumes you use UTF-8 for reading the input (GH-94), + +- REMOVED: Removed futile utility helpers such as `Domain#rule`, `Domain#is_a_domain?`, `Domain#is_a_subdomain?`, `Domain#valid?`. You can easily obtain the same result by having a custom method that reconstructs the logic, and/or calling `PublicSuffix.{domain|parse}(domain.to_s)`. + + +## Release 1.5.3 + +- FIXED: Don't duplicate rule indices when creating index (GH-77). [Thanks @ags] + +- CHANGED: Updated definitions. + + +## Release 1.5.2 + +- CHANGED: Updated definitions. + + +## Release 1.5.1 + +- FIXED: Ignore case for parsing and validating (GH-62) + +- CHANGED: Updated definitions. + + +## Release 1.5.0 + +- CHANGED: Dropped support for Ruby < 2.0 + +- CHANGED: Updated definitions. + + +## Release 1.4.6 + +- CHANGED: Updated definitions. + + +## Release 1.4.5 + +- CHANGED: Updated definitions. + + +## Release 1.4.4 + +- CHANGED: Updated definitions. + + +## Release 1.4.3 + +- CHANGED: Updated definitions. + + +## Release 1.4.2 + +- CHANGED: Updated definitions. + + +## Release 1.4.1 + +- CHANGED: Updated definitions. + + +## Release 1.4.0 + +- CHANGED: Moved the definitions in the lib folder. + +- CHANGED: Updated definitions. + + +## Release 1.3.3 + +- CHANGED: Updated definitions. + + +## Release 1.3.2 + +- CHANGED: Updated definitions. + + +## Release 1.3.1 + +- CHANGED: Updated definitions. + + +## Release 1.3.0 + +- NEW: Ability to skip Private Domains (GH-28). [Thanks @rb2k] + +- CHANGED: Updated definitions. + + +## Release 1.2.1 + +- CHANGED: Updated definitions. + + +## Release 1.2.0 + +- NEW: Allow a custom List on `PublicSuffix.parse` (GH-26). [Thanks @itspriddle] + +- FIXED: PublicSuffix.parse and PublicSuffix.valid? crashes when input is nil (GH-20). + +- CHANGED: Updated definitions. + + +## Release 1.1.3 + +- CHANGED: Updated definitions. + + +## Release 1.1.2 + +- CHANGED: Updated definitions. + + +## Release 1.1.1 + +- CHANGED: Updated definitions. + + +## Release 1.1.0 + +- FIXED: #valid? and #parse consider URIs as valid domains (GH-15) + +- CHANGED: Updated definitions. + +- CHANGED: Removed deprecatd PublicSuffixService::RuleList. + + +## Release 1.0.0 + +- CHANGED: Updated definitions. + + +## Release 1.0.0.rc1 + +The library is now known as PublicSuffix. + + +## Release 0.9.1 + +- CHANGED: Renamed PublicSuffixService::RuleList to PublicSuffixService::List. + +- CHANGED: Renamed PublicSuffixService::List#list to PublicSuffixService::List#rules. + +- CHANGED: Renamed PublicSuffixService to PublicSuffix. + +- CHANGED: Updated definitions. + + +## Release 0.9.0 + +- CHANGED: Minimum Ruby version increased to Ruby 1.8.7. + +- CHANGED: rake/gempackagetask is deprecated. Use rubygems/package_task instead. + + +## Release 0.8.4 + +- FIXED: Reverted bugfix for issue #12 for Ruby 1.8.6. + This is the latest version compatible with Ruby 1.8.6. + + +## Release 0.8.3 + +- FIXED: Fixed ArgumentError: invalid byte sequence in US-ASCII with Ruby 1.9.2 (#12). + +- CHANGED: Updated definitions (#11). + +- CHANGED: Renamed definitions.txt to definitions.dat. + + +## Release 0.8.2 + +- NEW: Added support for rubygems-test. + +- CHANGED: Integrated Bundler. + +- CHANGED: Updated definitions. + + +## Release 0.8.1 + +- FIXED: The files in the release 0.8.0 have wrong permission 600 and can't be loaded (#10). + + +## Release 0.8.0 + +- CHANGED: Update public suffix list to d1a5599b49fa 2010-10-25 15:10 +0100 (#9) + +- NEW: Add support for Fully Qualified Domain Names (#7) + + +## Release 0.7.0 + +- CHANGED: Using YARD to document the code instead of RDoc. + +- FIXED: RuleList cache is not recreated when a new rule is appended to the list (#6) + +- FIXED: PublicSuffixService.valid? should return false if the domain is not defined or not allowed (#4, #5) + + +## Release 0.6.0 + +- NEW: PublicSuffixService.parse raises DomainNotAllowed when trying to parse a domain name + which exists, but is not allowed by the current definition list (#3) + + PublicSuffixService.parse("nic.do") + # => PublicSuffixService::DomainNotAllowed + +- CHANGED: Renamed PublicSuffixService::InvalidDomain to PublicSuffixService::DomainInvalid + + +## Release 0.5.2 + +- CHANGED: Update public suffix list to 248ea690d671 2010-09-16 18:02 +0100 + + +## Release 0.5.1 + +- CHANGED: Update public suffix list to 14dc66dd53c1 2010-09-15 17:09 +0100 + + +## Release 0.5.0 + +- CHANGED: Improve documentation for Domain#domain and Domain#subdomain (#1). + +- CHANGED: Performance improvements (#2). + + +## Release 0.4.0 + +- CHANGED: Rename library from DomainName to PublicSuffixService to reduce the probability of name conflicts. + + +## Release 0.3.1 + +- Deprecated DomainName library. + + +## Release 0.3.0 + +- CHANGED: DomainName#domain and DomainName#subdomain are no longer alias of Domain#sld and Domain#tld. + +- CHANGED: Removed DomainName#labels and decoupled Rule from DomainName. + +- CHANGED: DomainName#valid? no longer instantiates new DomainName objects. This means less overhead. + +- CHANGED: Refactoring the entire DomainName API. Removed the internal on-the-fly parsing. Added a bunch of new methods to check and validate the DomainName. + + +## Release 0.2.0 + +- NEW: DomainName#valid? + +- NEW: DomainName#parse and DomainName#parse! + +- NEW: DomainName#valid_domain? and DomainName#valid_subdomain? + +- CHANGED: Make sure RuleList lookup is only performed once. + + +## Release 0.1.0 + +- Initial version diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Gemfile b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Gemfile new file mode 100644 index 0000000..6ae6816 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Gemfile @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +source "https://rubygems.org" + +gemspec + +gem "rake" + +gem "codecov", require: false +gem "memory_profiler", require: false +gem "minitest" +gem "minitest-reporters" +gem "mocha" +gem "rubocop", "~>0.90", require: false +gem "yard" diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/LICENSE.txt b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/LICENSE.txt new file mode 100644 index 0000000..3bbd112 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2009-2020 Simone Carletti + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/README.md b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/README.md new file mode 100644 index 0000000..12ec7a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/README.md @@ -0,0 +1,207 @@ +# Public Suffix for Ruby + +PublicSuffix is a Ruby domain name parser based on the [Public Suffix List](https://publicsuffix.org/). + +[![Build Status](https://travis-ci.com/weppos/publicsuffix-ruby.svg?branch=master)](https://travis-ci.com/weppos/publicsuffix-ruby) +[![Tidelift dependencies](https://tidelift.com/badges/package/rubygems/public_suffix)](https://tidelift.com/subscription/pkg/rubygems-public-suffix?utm_source=rubygems-public-suffix&utm_medium=referral&utm_campaign=enterprise) + + +## Links + +- [Homepage](https://simonecarletti.com/code/publicsuffix-ruby) +- [Repository](https://github.com/weppos/publicsuffix-ruby) +- [API Documentation](https://rubydoc.info/gems/public_suffix) +- [Introducing the Public Suffix List library for Ruby](https://simonecarletti.com/blog/2010/06/public-suffix-list-library-for-ruby/) + + +## Requirements + +PublicSuffix requires **Ruby >= 2.3**. For an older versions of Ruby use a previous release. + + +## Installation + +You can install the gem manually: + +```shell +gem install public_suffix +``` + +Or use Bundler and define it as a dependency in your `Gemfile`: + +```ruby +gem 'public_suffix' +``` + +If you are upgrading to 2.0, see [2.0-Upgrade.md](2.0-Upgrade.md). + +## Usage + +Extract the domain out from a name: + +```ruby +PublicSuffix.domain("google.com") +# => "google.com" +PublicSuffix.domain("www.google.com") +# => "google.com" +PublicSuffix.domain("www.google.co.uk") +# => "google.co.uk" +``` + +Parse a domain without subdomains: + +```ruby +domain = PublicSuffix.parse("google.com") +# => # +domain.tld +# => "com" +domain.sld +# => "google" +domain.trd +# => nil +domain.domain +# => "google.com" +domain.subdomain +# => nil +``` + +Parse a domain with subdomains: + +```ruby +domain = PublicSuffix.parse("www.google.com") +# => # +domain.tld +# => "com" +domain.sld +# => "google" +domain.trd +# => "www" +domain.domain +# => "google.com" +domain.subdomain +# => "www.google.com" +``` + +Simple validation example: + +```ruby +PublicSuffix.valid?("google.com") +# => true + +PublicSuffix.valid?("www.google.com") +# => true + +# Explicitly forbidden, it is listed as a private domain +PublicSuffix.valid?("blogspot.com") +# => false + +# Unknown/not-listed TLD domains are valid by default +PublicSuffix.valid?("example.tldnotlisted") +# => true +``` + +Strict validation (without applying the default * rule): + +```ruby +PublicSuffix.valid?("example.tldnotlisted", default_rule: nil) +# => false +``` + + +## Fully Qualified Domain Names + +This library automatically recognizes Fully Qualified Domain Names. A FQDN is a domain name that end with a trailing dot. + +```ruby +# Parse a standard domain name +PublicSuffix.domain("www.google.com") +# => "google.com" + +# Parse a fully qualified domain name +PublicSuffix.domain("www.google.com.") +# => "google.com" +``` + +## Private domains + +This library has support for switching off support for private (non-ICANN). + +```ruby +# Extract a domain including private domains (by default) +PublicSuffix.domain("something.blogspot.com") +# => "something.blogspot.com" + +# Extract a domain excluding private domains +PublicSuffix.domain("something.blogspot.com", ignore_private: true) +# => "blogspot.com" + +# It also works for #parse and #valid? +PublicSuffix.parse("something.blogspot.com", ignore_private: true) +PublicSuffix.valid?("something.blogspot.com", ignore_private: true) +``` + +If you don't care about private domains at all, it's more efficient to exclude them when the list is parsed: + +```ruby +# Disable support for private TLDs +PublicSuffix::List.default = PublicSuffix::List.parse(File.read(PublicSuffix::List::DEFAULT_LIST_PATH), private_domains: false) +# => "blogspot.com" +PublicSuffix.domain("something.blogspot.com") +# => "blogspot.com" +``` + + +## What is the Public Suffix List? + +The [Public Suffix List](https://publicsuffix.org) is a cross-vendor initiative to provide an accurate list of domain name suffixes. + +The Public Suffix List is an initiative of the Mozilla Project, but is maintained as a community resource. It is available for use in any software, but was originally created to meet the needs of browser manufacturers. + +A "public suffix" is one under which Internet users can directly register names. Some examples of public suffixes are ".com", ".co.uk" and "pvt.k12.wy.us". The Public Suffix List is a list of all known public suffixes. + + +## Why the Public Suffix List is better than any available Regular Expression parser? + +Previously, browsers used an algorithm which basically only denied setting wide-ranging cookies for top-level domains with no dots (e.g. com or org). However, this did not work for top-level domains where only third-level registrations are allowed (e.g. co.uk). In these cases, websites could set a cookie for co.uk which will be passed onto every website registered under co.uk. + +Clearly, this was a security risk as it allowed websites other than the one setting the cookie to read it, and therefore potentially extract sensitive information. + +Since there is no algorithmic method of finding the highest level at which a domain may be registered for a particular top-level domain (the policies differ with each registry), the only method is to create a list of all top-level domains and the level at which domains can be registered. This is the aim of the effective TLD list. + +As well as being used to prevent cookies from being set where they shouldn't be, the list can also potentially be used for other applications where the registry controlled and privately controlled parts of a domain name need to be known, for example when grouping by top-level domains. + +Source: https://wiki.mozilla.org/Public_Suffix_List + +Not convinced yet? Check out [this real world example](https://stackoverflow.com/q/288810/123527). + + +## Does PublicSuffix make requests to Public Suffix List website? + +No. PublicSuffix comes with a bundled list. It does not make any HTTP requests to parse or validate a domain. + + +## Support + +Library documentation is auto-generated from the [README](https://github.com/weppos/publicsuffix-ruby/blob/master/README.md) and the source code, and it's available at https://rubydoc.info/gems/public_suffix. + +- The PublicSuffix bug tracker is here: https://github.com/weppos/publicsuffix-ruby/issues +- The PublicSuffix code repository is here: https://github.com/weppos/publicsuffix-ruby. Contributions are welcome! Please include tests and/or feature coverage for every patch, and create a topic branch for every separate change you make. + +[Consider subscribing to Tidelift which provides Enterprise support for this project](https://tidelift.com/subscription/pkg/rubygems-public-suffix?utm_source=rubygems-public-suffix&utm_medium=referral&utm_campaign=readme) as part of the Tidelift Subscription. Tidelift subscriptions also help the maintainers by funding the project, which in turn allows us to ship releases, bugfixes, and security updates more often. + + +## Security and Vulnerability Reporting + +Full information and description of our security policy please visit [`SECURITY.md`](SECURITY.md) + + +## Changelog + +See the [CHANGELOG.md](CHANGELOG.md) file for details. + + +## License + +Copyright (c) 2009-2020 Simone Carletti. This is Free Software distributed under the MIT license. + +The [Public Suffix List source](https://publicsuffix.org/list/) is subject to the terms of the Mozilla Public License, v. 2.0. diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Rakefile b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Rakefile new file mode 100644 index 0000000..502270a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/Rakefile @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "bundler/gem_tasks" + +# By default, run tests and linter. +task default: [:test, :rubocop] + + +require "rake/testtask" + +Rake::TestTask.new do |t| + t.libs = %w( lib test ) + t.pattern = "test/**/*_test.rb" + t.verbose = !ENV["VERBOSE"].nil? + t.warning = !ENV["WARNING"].nil? +end + +require "rubocop/rake_task" + +RuboCop::RakeTask.new + + +require "yard/rake/yardoc_task" + +YARD::Rake::YardocTask.new(:yardoc) do |y| + y.options = ["--output-dir", "yardoc"] +end + +CLOBBER.include "yardoc" + + +task :benchmarks do + Dir["benchmarks/bm_*.rb"].each do |file| + sh "ruby #{file}" + end +end +task default: [:benchmarks] if ENV["BENCHMARKS"] == "1" + + +desc "Downloads the Public Suffix List file from the repository and stores it locally." +task :"update-list" do + require "net/http" + + DEFINITION_URL = "https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat" + + File.open("data/list.txt", "w+") do |f| + response = Net::HTTP.get_response(URI.parse(DEFINITION_URL)) + response.body + f.write(response.body) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/SECURITY.md b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/SECURITY.md new file mode 100644 index 0000000..f329128 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/SECURITY.md @@ -0,0 +1,104 @@ +# Security Policy + +## Supported Versions + +Security updates are provided only for the current minor version. + +If you are using a previous minor version, we recommend to upgrade to the current minor version. +This project uses [semantic versioning](https://semver.org/), therefore you can upgrade to a more recent minor version without incurring into breaking changes. + +Exceptionally, we may support previous minor versions upon request if there are significant reasons preventing to immediately switch the latest minor version. + +Older major versions are no longer supported. + + +## Reporting a Vulnerability + +To make a report, please email weppos@weppos.net. + +Please consider encrypting your report with GPG using the key [0x420da82a989398df](https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x420da82a989398df). + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsBNBE/QiI0BCACtBbjJnJIzaLb4NfjaljzT/+dvodst+wyDRE8Vwc6ujwboZjr2 +0QwXScNzObPazyvkSZVh3g6PveneeSD0dSw2XDqKbbtLMg/Ss12yqXJfjavH/zjk +6Xq+nnbSnxBPzwFAAEaEFIc6H6BygJ7zHPP5WEY5QIMqifEAX//aBqHi4GXHJiHE +237Zqufdry23jBYjY7wGXAa11VsU9Iwqh6LPB9/hc1KtzjAuvvm5ufeT/iVjxGQX +te1OZZk6n8xSVYeLsn97PfgYs0yauhexwD9dG7FbRCB379JxPRn5akr391qXcVOG +ZA3yBXUSPOL6D1+TS1S0su5zbw2AEp4+z3SpABEBAAHNIlNpbW9uZSBDYXJsZXR0 +aSA8d2VwcG9zQGdtYWlsLmNvbT7CwHcEEwEKACEFAlXH0UQCGy8FCwkIBwMFFQoJ +CAsFFgIDAQACHgECF4AACgkQQg2oKpiTmN9BOQf/UHd+bmww71MkbS38KkowDu+0 +1VH35aL8sFcAMUSEA4I5oPWZoBtYYPGpALLxtrSNW+SCnmmeCQVfVmLedUVHwDZo +TS4qiynpqnz+Cnq4KRC8VMIyaFoiT5Vg6MLtau8hJtqZn1Wv68g0nXuprsCuf9vs +z7DDZ36z8em6OJQJQ/FQ4BGogzyYHa90cJnIM6BeLiRUUpFTl1tHLlw4JFUNi8sx +6VQ1/nhcr3OyskAix5TytRnJ8uIn22m25GGdTF2WQPNfkWJQVT4ZDbCxT20acRp0 +l3x1DAk3Eel8gOKzgOboB3bkI5/l1XZvNL0YWGZeyfp8I7ZqpXg/m4qLDkYU2cLA +egQTAQoAJAIbLwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAUCVf6KvAIZAQAKCRBC +DagqmJOY34ABB/9WbNAh0l07UN1ePfVm6Brg2Yt8k6Q2lIRUG2xAeQj/+Kx/4lAL +oY6F0jJ44kIDZqZdNA0QIqYzZRBV4iW+cQrsBUUyM+chiA3RuOsDG18sfvkyPvRy +ecOVubHCN+nK2GKy1oHQkCpVFIeetr0ugB/j+xNDKJ3Oa5dGBKF29ZH5Pxg7cqwH +cdkhBGMpPbBYq5pJtYGggqypELzFTG292StbtV837Eze+clWRTKtMBOHke/oKBCr +YYic2fmipGC9XUiqvMEMAKYq5WWWXIlcSVSnBDdxq41tXjKK4XMVgoOboZCcNFvh +0NxuDQATk1YruRZOS4SpBPXykEA1pK/zm3WmzSNTaW1vbmUgQ2FybGV0dGkgPHdl +cHBvc0B3ZXBwb3MubmV0PsLAeQQTAQIAIwUCT9CIjQIbLwcLCQgHAwIBBhUIAgkK +CwQWAgMBAh4BAheAAAoJEEINqCqYk5jfGWcH/Ax3EhAckGeCqNYE5BTx94bKB1LL +vUjeUoImMtGGFxQu2jNOAjtpuyjihm9uHBZ+dxaxHHrhE11f+0sDcwvW8qtKEzOs +GESr01VqTaVFS2JOEHhLphXseaLXJe32Osz0kHCZmrz1fCwv3b8QuWBifn8oVzcV +vrE7lGC6pGwaiUvMsvA++RUquTlNVlh8uRrqcQCU8Ne9lSoDWHlUJes5s4FoCh3R +oVBcKPsx3m/P9+GlEgTDqYP+WU3sfSfJYERH0r0NAYP96m2e7UQrqdgvMTVVDkPB +UB9efZzgkL7u9IAqmLU2klSGdEZnJ8t1AsjEyHXMztC7ICUhRFCeXHdTNhHCwHwE +EwEKACYCGy8HCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAUCVcfRaQIZAQAKCRBC +DagqmJOY31y1B/41I/SsWwDqJP/Y3LzzatGmIv/gy+LkJBBTr/NV0NYzKV2XJ1BG +ese2ZE4tKKdG4HDwF+IwFLBHcPZRv358IwwTRPnzeO23mxpTYAnRCdg/pcaYIJ9r +OxIOP+R52YbgGrNKcezVA+7TY9za072P7Bk85jTM2FNfqevaf/YQ4GRcGLQ3JI8N +tBUdvrOEETDpR0QFTr22Wv1C7UfPDsSf7ZUM7zJ38CmDji8JSlr6y75/LYSY50BB +8EHb03QxyePe98A3WzvOoqamiCIe9bRzH5IqRAtJYDX8cK4PZmp43bQhrjdjawCc +AU/OY9iz+zCw00+b6CNiRb59N+OwpNJh5iNNwsB5BBMBCgAjAhsvBwsJCAcDAgEG +FQgCCQoLBBYCAwECHgECF4AFAlX+iq0ACgkQQg2oKpiTmN/z2gf/VbcQHgTlXFYa +Sq/dE7S54uGFrdzHOV3IJyl+ByMwVoKn6zdpksRoyt7jPV3RonrUO7jEcrt7VKCU +2KC7/MZMDoUsn9BXXTtUk+uTCNh8qllR0Fo/FvWM9RJKmcDMKwAJwcKIgbfUBJGx +1N6pP2DUc+YCnEerRbnQ1DWJUM7BaOEN6bvPxuGblPst1l6S5VktFj3gZGYItHrs +pit5pesILP8K6B6VCNP2WXXYvYQo7yyYcG8WBWXin8/SdNwU68lUbfhhQVIKv6LU +h0wvgG97NsBPrFbij0K6O63FufnNr9WLMZhAzi0h6gNK2HKAyw9AZNKpPccwg+mX +Huc/4CPRlM0uU2ltb25lIENhcmxldHRpIDxzaW1vbmUuY2FybGV0dGlAZG5zaW1w +bGUuY29tPsLAdwQTAQoAIQUCVh4ipAIbLwULCQgHAwUVCgkICwUWAgMBAAIeAQIX +gAAKCRBCDagqmJOY329iCACpOY5SV7hwOZ8VqmRfxRoHQFQe9Owr+hD3eL0AKZaJ +V918dCPrrxbAmwwMAC8pS8J4CmrrTR27kxcUgVwcfyydFPrgST5pg+H7UTrBR045 +4Npw1+m99I2Pyyl3oaym4lKJFbp2c2DGODEzTg8kKfjk0cb8bd+MJrXqFyod1z5r +0pfexwaLVt1Hz+ZsmFIPO1ISHYBPV8OkpL8Kgb8WtY6REntgNjfcmtHNi0VWQ7+N +vgeYqdhscX8c9ROe26BiiiGXphRlAsCU/VLHOJkzoW3f9QLy4z01Xj/7OaD0JkHS +HrES1ye3ZDxnjnTRdh4U8ntJ+L+xnePcFQA2t0eCbPwIzSZTaW1vbmUgQ2FybGV0 +dGkgPHNpbW9uZUBjYXJsZXR0aS5uYW1lPsLAdwQTAQoAIQUCVf7gmwIbLwULCQgH +AwUVCgkICwUWAgMBAAIeAQIXgAAKCRBCDagqmJOY37L+B/45pWT3wgm43+kzHVOT +j63m4zmRb53TGZToRSxz3acyuVSuqU9Tv010F0ZV9ccb0NDeN+88s9tEisuoO0Rz +5vhC8AtwRUyR3ADE9pBtvvxT+4R9y8yYNTCIX45VPG9ZPp9+7i+XCdKtz30KIV7r +smktd2FrK16r/KUN8+03iZSgzQ9lsTmXK5L7zH/f3Tqhbfvybr4+M71KGnSoP+iP +vwfsoBb5rhijQLOykTb+VzdDpHQbupwxwm/3S4nsA4U6tonIywlJgBDSjgDjQj0i +Ez+Db2Wt59y6LoksRQogvJqm0nuxFUWMZc47zdhsRnqmxUYTNpKaJPWc6pfxsQPK +ZvTjzsBNBE/QiI0BCACsaNbG6kyKJBWL5jPhebsijk8PCfSHte1jNCA5l/NvaImZ +6ORq9f8S9MWlYxmzyUkVJaWrv+9p5zmjwcaegjerj6ggjPDEXlZG41Z4YE1/R8pf +wkSvrkLziBxZDB1aYplg8kgXkaIf2yi2FrMPSi04sjvQbBSCcIJeh6+vGK8tIJTn +e0tQbEvRorTwBAPAFlpx/bdk1wZYu11vFKbckhKWou7f8XSdn9ng9cY5uK+xBlFU +2ORgL1ygeIoY9uRvNZG2ncvCvxUPgOqbo31R8KPyvV4rNNvGBOfxQER9LbieBF2I +5I1gpyboGWKcXu1eV7tOpjtW6LHt+6NHhE6L1Lw1ABEBAAHCwX4EGAECAAkFAk/Q +iI0CGy4BKQkQQg2oKpiTmN/AXSAEGQECAAYFAk/QiI0ACgkQcBROh493BN9hdwf9 +GjiF1GcQN+3TZkXdr2WY0AlbcA/wBp6+ShnqcoU5XLuA0RY3+rWGuaSc2buLke6Y +2MhMAYcgmPdG+WTBoW5dWQGXBZ1IHYVR8HLGaF+Vate1MofE1BNHXhnilIMMfH4G +Tcr3Z3/FaSk9OdHlyiE/Jo7++8PQ+auHVyjtqry+/ysAnyr+lnCn+K4E0PQ1fYpP +fiawKtfSqk9h6HjjMyx9Adrz+ljXh+NyVqYZUfRytjgO+v+dAQmMczT1EawLTdX+ +trx1tHR549pEey7in5QKsje3GLH4zq4mCdWBlivQxmmmlvR07DysLADMbcpjKK2g +utfzygZHCU9hWGR3wbWZ7lXjB/0ZzutNaNYzSCkiC8PIWH1bG+TJO9pslHwP+aBJ +NGAmcwyOH9Bub2CSXikQFZNUmVRwtl7mN4bVAHI8zbMd6xdlX22yDgQei54dPXDw +UYsvGE4zmrD97he1EYcIOKMFHzlJNcWK+uR7lEq6mv7SFGnBr8qTYZRi1bySRgwd +UORuDV12GKTen9WectKtepW0fgYSz+udbDKQyyRef+7xGtCErWRL7f1qr8xm60da ++gSwyD/WkPTY8SP2mdq4u+6m4dWS26kKoENwuL7jUktl/C/EG7NmUKURbXG8lmeu +q59MIs/Fb3SgaO+zN2FZTYp6dyRJHbeEz55JdOu6F+6ihZYH +=j6Xr +-----END PGP PUBLIC KEY BLOCK----- +``` + + +## Tracking Security Updates + +Information about security vulnerabilities are published in the [Security Advisories](https://github.com/weppos/publicsuffix-ruby/security/advisories) page. diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/bin/console b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/bin/console new file mode 100755 index 0000000..c638bb4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "bundler/setup" +require "public_suffix" + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require "irb" +IRB.start diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/codecov.yml b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/codecov.yml new file mode 100644 index 0000000..b695937 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/codecov.yml @@ -0,0 +1,12 @@ +# https://docs.codecov.io/docs/coverage-configuration +coverage: + precision: 1 + round: down + status: + project: + default: false + patch: + default: false + +# https://docs.codecov.io/docs/pull-request-comments#section-requiring-changes +comment: off diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/data/list.txt b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/data/list.txt new file mode 100644 index 0000000..9ca00ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/data/list.txt @@ -0,0 +1,13380 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Please pull this list from, and only from https://publicsuffix.org/list/public_suffix_list.dat, +// rather than any other VCS sites. Pulling from any other URL is not guaranteed to be supported. + +// Instructions on pulling and using this list can be found at https://publicsuffix.org/list/. + +// ===BEGIN ICANN DOMAINS=== + +// ac : https://en.wikipedia.org/wiki/.ac +ac +com.ac +edu.ac +gov.ac +net.ac +mil.ac +org.ac + +// ad : https://en.wikipedia.org/wiki/.ad +ad +nom.ad + +// ae : https://en.wikipedia.org/wiki/.ae +// see also: "Domain Name Eligibility Policy" at http://www.aeda.ae/eng/aepolicy.php +ae +co.ae +net.ae +org.ae +sch.ae +ac.ae +gov.ae +mil.ae + +// aero : see https://www.information.aero/index.php?id=66 +aero +accident-investigation.aero +accident-prevention.aero +aerobatic.aero +aeroclub.aero +aerodrome.aero +agents.aero +aircraft.aero +airline.aero +airport.aero +air-surveillance.aero +airtraffic.aero +air-traffic-control.aero +ambulance.aero +amusement.aero +association.aero +author.aero +ballooning.aero +broker.aero +caa.aero +cargo.aero +catering.aero +certification.aero +championship.aero +charter.aero +civilaviation.aero +club.aero +conference.aero +consultant.aero +consulting.aero +control.aero +council.aero +crew.aero +design.aero +dgca.aero +educator.aero +emergency.aero +engine.aero +engineer.aero +entertainment.aero +equipment.aero +exchange.aero +express.aero +federation.aero +flight.aero +fuel.aero +gliding.aero +government.aero +groundhandling.aero +group.aero +hanggliding.aero +homebuilt.aero +insurance.aero +journal.aero +journalist.aero +leasing.aero +logistics.aero +magazine.aero +maintenance.aero +media.aero +microlight.aero +modelling.aero +navigation.aero +parachuting.aero +paragliding.aero +passenger-association.aero +pilot.aero +press.aero +production.aero +recreation.aero +repbody.aero +res.aero +research.aero +rotorcraft.aero +safety.aero +scientist.aero +services.aero +show.aero +skydiving.aero +software.aero +student.aero +trader.aero +trading.aero +trainer.aero +union.aero +workinggroup.aero +works.aero + +// af : http://www.nic.af/help.jsp +af +gov.af +com.af +org.af +net.af +edu.af + +// ag : http://www.nic.ag/prices.htm +ag +com.ag +org.ag +net.ag +co.ag +nom.ag + +// ai : http://nic.com.ai/ +ai +off.ai +com.ai +net.ai +org.ai + +// al : http://www.ert.gov.al/ert_alb/faq_det.html?Id=31 +al +com.al +edu.al +gov.al +mil.al +net.al +org.al + +// am : https://www.amnic.net/policy/en/Policy_EN.pdf +am +co.am +com.am +commune.am +net.am +org.am + +// ao : https://en.wikipedia.org/wiki/.ao +// http://www.dns.ao/REGISTR.DOC +ao +ed.ao +gv.ao +og.ao +co.ao +pb.ao +it.ao + +// aq : https://en.wikipedia.org/wiki/.aq +aq + +// ar : https://nic.ar/nic-argentina/normativa-vigente +ar +com.ar +edu.ar +gob.ar +gov.ar +int.ar +mil.ar +musica.ar +net.ar +org.ar +tur.ar + +// arpa : https://en.wikipedia.org/wiki/.arpa +// Confirmed by registry 2008-06-18 +arpa +e164.arpa +in-addr.arpa +ip6.arpa +iris.arpa +uri.arpa +urn.arpa + +// as : https://en.wikipedia.org/wiki/.as +as +gov.as + +// asia : https://en.wikipedia.org/wiki/.asia +asia + +// at : https://en.wikipedia.org/wiki/.at +// Confirmed by registry 2008-06-17 +at +ac.at +co.at +gv.at +or.at +sth.ac.at + +// au : https://en.wikipedia.org/wiki/.au +// http://www.auda.org.au/ +au +// 2LDs +com.au +net.au +org.au +edu.au +gov.au +asn.au +id.au +// Historic 2LDs (closed to new registration, but sites still exist) +info.au +conf.au +oz.au +// CGDNs - http://www.cgdn.org.au/ +act.au +nsw.au +nt.au +qld.au +sa.au +tas.au +vic.au +wa.au +// 3LDs +act.edu.au +catholic.edu.au +// eq.edu.au - Removed at the request of the Queensland Department of Education +nsw.edu.au +nt.edu.au +qld.edu.au +sa.edu.au +tas.edu.au +vic.edu.au +wa.edu.au +// act.gov.au Bug 984824 - Removed at request of Greg Tankard +// nsw.gov.au Bug 547985 - Removed at request of +// nt.gov.au Bug 940478 - Removed at request of Greg Connors +qld.gov.au +sa.gov.au +tas.gov.au +vic.gov.au +wa.gov.au +// 4LDs +// education.tas.edu.au - Removed at the request of the Department of Education Tasmania +schools.nsw.edu.au + +// aw : https://en.wikipedia.org/wiki/.aw +aw +com.aw + +// ax : https://en.wikipedia.org/wiki/.ax +ax + +// az : https://en.wikipedia.org/wiki/.az +az +com.az +net.az +int.az +gov.az +org.az +edu.az +info.az +pp.az +mil.az +name.az +pro.az +biz.az + +// ba : http://nic.ba/users_data/files/pravilnik_o_registraciji.pdf +ba +com.ba +edu.ba +gov.ba +mil.ba +net.ba +org.ba + +// bb : https://en.wikipedia.org/wiki/.bb +bb +biz.bb +co.bb +com.bb +edu.bb +gov.bb +info.bb +net.bb +org.bb +store.bb +tv.bb + +// bd : https://en.wikipedia.org/wiki/.bd +*.bd + +// be : https://en.wikipedia.org/wiki/.be +// Confirmed by registry 2008-06-08 +be +ac.be + +// bf : https://en.wikipedia.org/wiki/.bf +bf +gov.bf + +// bg : https://en.wikipedia.org/wiki/.bg +// https://www.register.bg/user/static/rules/en/index.html +bg +a.bg +b.bg +c.bg +d.bg +e.bg +f.bg +g.bg +h.bg +i.bg +j.bg +k.bg +l.bg +m.bg +n.bg +o.bg +p.bg +q.bg +r.bg +s.bg +t.bg +u.bg +v.bg +w.bg +x.bg +y.bg +z.bg +0.bg +1.bg +2.bg +3.bg +4.bg +5.bg +6.bg +7.bg +8.bg +9.bg + +// bh : https://en.wikipedia.org/wiki/.bh +bh +com.bh +edu.bh +net.bh +org.bh +gov.bh + +// bi : https://en.wikipedia.org/wiki/.bi +// http://whois.nic.bi/ +bi +co.bi +com.bi +edu.bi +or.bi +org.bi + +// biz : https://en.wikipedia.org/wiki/.biz +biz + +// bj : https://en.wikipedia.org/wiki/.bj +bj +asso.bj +barreau.bj +gouv.bj + +// bm : http://www.bermudanic.bm/dnr-text.txt +bm +com.bm +edu.bm +gov.bm +net.bm +org.bm + +// bn : http://www.bnnic.bn/faqs +bn +com.bn +edu.bn +gov.bn +net.bn +org.bn + +// bo : https://nic.bo/delegacion2015.php#h-1.10 +bo +com.bo +edu.bo +gob.bo +int.bo +org.bo +net.bo +mil.bo +tv.bo +web.bo +// Social Domains +academia.bo +agro.bo +arte.bo +blog.bo +bolivia.bo +ciencia.bo +cooperativa.bo +democracia.bo +deporte.bo +ecologia.bo +economia.bo +empresa.bo +indigena.bo +industria.bo +info.bo +medicina.bo +movimiento.bo +musica.bo +natural.bo +nombre.bo +noticias.bo +patria.bo +politica.bo +profesional.bo +plurinacional.bo +pueblo.bo +revista.bo +salud.bo +tecnologia.bo +tksat.bo +transporte.bo +wiki.bo + +// br : http://registro.br/dominio/categoria.html +// Submitted by registry +br +9guacu.br +abc.br +adm.br +adv.br +agr.br +aju.br +am.br +anani.br +aparecida.br +app.br +arq.br +art.br +ato.br +b.br +barueri.br +belem.br +bhz.br +bib.br +bio.br +blog.br +bmd.br +boavista.br +bsb.br +campinagrande.br +campinas.br +caxias.br +cim.br +cng.br +cnt.br +com.br +contagem.br +coop.br +coz.br +cri.br +cuiaba.br +curitiba.br +def.br +des.br +det.br +dev.br +ecn.br +eco.br +edu.br +emp.br +enf.br +eng.br +esp.br +etc.br +eti.br +far.br +feira.br +flog.br +floripa.br +fm.br +fnd.br +fortal.br +fot.br +foz.br +fst.br +g12.br +geo.br +ggf.br +goiania.br +gov.br +// gov.br 26 states + df https://en.wikipedia.org/wiki/States_of_Brazil +ac.gov.br +al.gov.br +am.gov.br +ap.gov.br +ba.gov.br +ce.gov.br +df.gov.br +es.gov.br +go.gov.br +ma.gov.br +mg.gov.br +ms.gov.br +mt.gov.br +pa.gov.br +pb.gov.br +pe.gov.br +pi.gov.br +pr.gov.br +rj.gov.br +rn.gov.br +ro.gov.br +rr.gov.br +rs.gov.br +sc.gov.br +se.gov.br +sp.gov.br +to.gov.br +gru.br +imb.br +ind.br +inf.br +jab.br +jampa.br +jdf.br +joinville.br +jor.br +jus.br +leg.br +lel.br +log.br +londrina.br +macapa.br +maceio.br +manaus.br +maringa.br +mat.br +med.br +mil.br +morena.br +mp.br +mus.br +natal.br +net.br +niteroi.br +*.nom.br +not.br +ntr.br +odo.br +ong.br +org.br +osasco.br +palmas.br +poa.br +ppg.br +pro.br +psc.br +psi.br +pvh.br +qsl.br +radio.br +rec.br +recife.br +rep.br +ribeirao.br +rio.br +riobranco.br +riopreto.br +salvador.br +sampa.br +santamaria.br +santoandre.br +saobernardo.br +saogonca.br +seg.br +sjc.br +slg.br +slz.br +sorocaba.br +srv.br +taxi.br +tc.br +tec.br +teo.br +the.br +tmp.br +trd.br +tur.br +tv.br +udi.br +vet.br +vix.br +vlog.br +wiki.br +zlg.br + +// bs : http://www.nic.bs/rules.html +bs +com.bs +net.bs +org.bs +edu.bs +gov.bs + +// bt : https://en.wikipedia.org/wiki/.bt +bt +com.bt +edu.bt +gov.bt +net.bt +org.bt + +// bv : No registrations at this time. +// Submitted by registry +bv + +// bw : https://en.wikipedia.org/wiki/.bw +// http://www.gobin.info/domainname/bw.doc +// list of other 2nd level tlds ? +bw +co.bw +org.bw + +// by : https://en.wikipedia.org/wiki/.by +// http://tld.by/rules_2006_en.html +// list of other 2nd level tlds ? +by +gov.by +mil.by +// Official information does not indicate that com.by is a reserved +// second-level domain, but it's being used as one (see www.google.com.by and +// www.yahoo.com.by, for example), so we list it here for safety's sake. +com.by + +// http://hoster.by/ +of.by + +// bz : https://en.wikipedia.org/wiki/.bz +// http://www.belizenic.bz/ +bz +com.bz +net.bz +org.bz +edu.bz +gov.bz + +// ca : https://en.wikipedia.org/wiki/.ca +ca +// ca geographical names +ab.ca +bc.ca +mb.ca +nb.ca +nf.ca +nl.ca +ns.ca +nt.ca +nu.ca +on.ca +pe.ca +qc.ca +sk.ca +yk.ca +// gc.ca: https://en.wikipedia.org/wiki/.gc.ca +// see also: http://registry.gc.ca/en/SubdomainFAQ +gc.ca + +// cat : https://en.wikipedia.org/wiki/.cat +cat + +// cc : https://en.wikipedia.org/wiki/.cc +cc + +// cd : https://en.wikipedia.org/wiki/.cd +// see also: https://www.nic.cd/domain/insertDomain_2.jsp?act=1 +cd +gov.cd + +// cf : https://en.wikipedia.org/wiki/.cf +cf + +// cg : https://en.wikipedia.org/wiki/.cg +cg + +// ch : https://en.wikipedia.org/wiki/.ch +ch + +// ci : https://en.wikipedia.org/wiki/.ci +// http://www.nic.ci/index.php?page=charte +ci +org.ci +or.ci +com.ci +co.ci +edu.ci +ed.ci +ac.ci +net.ci +go.ci +asso.ci +aéroport.ci +int.ci +presse.ci +md.ci +gouv.ci + +// ck : https://en.wikipedia.org/wiki/.ck +*.ck +!www.ck + +// cl : https://www.nic.cl +// Confirmed by .CL registry +cl +aprendemas.cl +co.cl +gob.cl +gov.cl +mil.cl + +// cm : https://en.wikipedia.org/wiki/.cm plus bug 981927 +cm +co.cm +com.cm +gov.cm +net.cm + +// cn : https://en.wikipedia.org/wiki/.cn +// Submitted by registry +cn +ac.cn +com.cn +edu.cn +gov.cn +net.cn +org.cn +mil.cn +公司.cn +网络.cn +網絡.cn +// cn geographic names +ah.cn +bj.cn +cq.cn +fj.cn +gd.cn +gs.cn +gz.cn +gx.cn +ha.cn +hb.cn +he.cn +hi.cn +hl.cn +hn.cn +jl.cn +js.cn +jx.cn +ln.cn +nm.cn +nx.cn +qh.cn +sc.cn +sd.cn +sh.cn +sn.cn +sx.cn +tj.cn +xj.cn +xz.cn +yn.cn +zj.cn +hk.cn +mo.cn +tw.cn + +// co : https://en.wikipedia.org/wiki/.co +// Submitted by registry +co +arts.co +com.co +edu.co +firm.co +gov.co +info.co +int.co +mil.co +net.co +nom.co +org.co +rec.co +web.co + +// com : https://en.wikipedia.org/wiki/.com +com + +// coop : https://en.wikipedia.org/wiki/.coop +coop + +// cr : http://www.nic.cr/niccr_publico/showRegistroDominiosScreen.do +cr +ac.cr +co.cr +ed.cr +fi.cr +go.cr +or.cr +sa.cr + +// cu : https://en.wikipedia.org/wiki/.cu +cu +com.cu +edu.cu +org.cu +net.cu +gov.cu +inf.cu + +// cv : https://en.wikipedia.org/wiki/.cv +cv + +// cw : http://www.una.cw/cw_registry/ +// Confirmed by registry 2013-03-26 +cw +com.cw +edu.cw +net.cw +org.cw + +// cx : https://en.wikipedia.org/wiki/.cx +// list of other 2nd level tlds ? +cx +gov.cx + +// cy : http://www.nic.cy/ +// Submitted by registry Panayiotou Fotia +cy +ac.cy +biz.cy +com.cy +ekloges.cy +gov.cy +ltd.cy +name.cy +net.cy +org.cy +parliament.cy +press.cy +pro.cy +tm.cy + +// cz : https://en.wikipedia.org/wiki/.cz +cz + +// de : https://en.wikipedia.org/wiki/.de +// Confirmed by registry (with technical +// reservations) 2008-07-01 +de + +// dj : https://en.wikipedia.org/wiki/.dj +dj + +// dk : https://en.wikipedia.org/wiki/.dk +// Confirmed by registry 2008-06-17 +dk + +// dm : https://en.wikipedia.org/wiki/.dm +dm +com.dm +net.dm +org.dm +edu.dm +gov.dm + +// do : https://en.wikipedia.org/wiki/.do +do +art.do +com.do +edu.do +gob.do +gov.do +mil.do +net.do +org.do +sld.do +web.do + +// dz : https://en.wikipedia.org/wiki/.dz +dz +com.dz +org.dz +net.dz +gov.dz +edu.dz +asso.dz +pol.dz +art.dz + +// ec : http://www.nic.ec/reg/paso1.asp +// Submitted by registry +ec +com.ec +info.ec +net.ec +fin.ec +k12.ec +med.ec +pro.ec +org.ec +edu.ec +gov.ec +gob.ec +mil.ec + +// edu : https://en.wikipedia.org/wiki/.edu +edu + +// ee : http://www.eenet.ee/EENet/dom_reeglid.html#lisa_B +ee +edu.ee +gov.ee +riik.ee +lib.ee +med.ee +com.ee +pri.ee +aip.ee +org.ee +fie.ee + +// eg : https://en.wikipedia.org/wiki/.eg +eg +com.eg +edu.eg +eun.eg +gov.eg +mil.eg +name.eg +net.eg +org.eg +sci.eg + +// er : https://en.wikipedia.org/wiki/.er +*.er + +// es : https://www.nic.es/site_ingles/ingles/dominios/index.html +es +com.es +nom.es +org.es +gob.es +edu.es + +// et : https://en.wikipedia.org/wiki/.et +et +com.et +gov.et +org.et +edu.et +biz.et +name.et +info.et +net.et + +// eu : https://en.wikipedia.org/wiki/.eu +eu + +// fi : https://en.wikipedia.org/wiki/.fi +fi +// aland.fi : https://en.wikipedia.org/wiki/.ax +// This domain is being phased out in favor of .ax. As there are still many +// domains under aland.fi, we still keep it on the list until aland.fi is +// completely removed. +// TODO: Check for updates (expected to be phased out around Q1/2009) +aland.fi + +// fj : http://domains.fj/ +// Submitted by registry 2020-02-11 +fj +ac.fj +biz.fj +com.fj +gov.fj +info.fj +mil.fj +name.fj +net.fj +org.fj +pro.fj + +// fk : https://en.wikipedia.org/wiki/.fk +*.fk + +// fm : https://en.wikipedia.org/wiki/.fm +com.fm +edu.fm +net.fm +org.fm +fm + +// fo : https://en.wikipedia.org/wiki/.fo +fo + +// fr : http://www.afnic.fr/ +// domaines descriptifs : https://www.afnic.fr/medias/documents/Cadre_legal/Afnic_Naming_Policy_12122016_VEN.pdf +fr +asso.fr +com.fr +gouv.fr +nom.fr +prd.fr +tm.fr +// domaines sectoriels : https://www.afnic.fr/en/products-and-services/the-fr-tld/sector-based-fr-domains-4.html +aeroport.fr +avocat.fr +avoues.fr +cci.fr +chambagri.fr +chirurgiens-dentistes.fr +experts-comptables.fr +geometre-expert.fr +greta.fr +huissier-justice.fr +medecin.fr +notaires.fr +pharmacien.fr +port.fr +veterinaire.fr + +// ga : https://en.wikipedia.org/wiki/.ga +ga + +// gb : This registry is effectively dormant +// Submitted by registry +gb + +// gd : https://en.wikipedia.org/wiki/.gd +edu.gd +gov.gd +gd + +// ge : http://www.nic.net.ge/policy_en.pdf +ge +com.ge +edu.ge +gov.ge +org.ge +mil.ge +net.ge +pvt.ge + +// gf : https://en.wikipedia.org/wiki/.gf +gf + +// gg : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +gg +co.gg +net.gg +org.gg + +// gh : https://en.wikipedia.org/wiki/.gh +// see also: http://www.nic.gh/reg_now.php +// Although domains directly at second level are not possible at the moment, +// they have been possible for some time and may come back. +gh +com.gh +edu.gh +gov.gh +org.gh +mil.gh + +// gi : http://www.nic.gi/rules.html +gi +com.gi +ltd.gi +gov.gi +mod.gi +edu.gi +org.gi + +// gl : https://en.wikipedia.org/wiki/.gl +// http://nic.gl +gl +co.gl +com.gl +edu.gl +net.gl +org.gl + +// gm : http://www.nic.gm/htmlpages%5Cgm-policy.htm +gm + +// gn : http://psg.com/dns/gn/gn.txt +// Submitted by registry +gn +ac.gn +com.gn +edu.gn +gov.gn +org.gn +net.gn + +// gov : https://en.wikipedia.org/wiki/.gov +gov + +// gp : http://www.nic.gp/index.php?lang=en +gp +com.gp +net.gp +mobi.gp +edu.gp +org.gp +asso.gp + +// gq : https://en.wikipedia.org/wiki/.gq +gq + +// gr : https://grweb.ics.forth.gr/english/1617-B-2005.html +// Submitted by registry +gr +com.gr +edu.gr +net.gr +org.gr +gov.gr + +// gs : https://en.wikipedia.org/wiki/.gs +gs + +// gt : http://www.gt/politicas_de_registro.html +gt +com.gt +edu.gt +gob.gt +ind.gt +mil.gt +net.gt +org.gt + +// gu : http://gadao.gov.gu/register.html +// University of Guam : https://www.uog.edu +// Submitted by uognoc@triton.uog.edu +gu +com.gu +edu.gu +gov.gu +guam.gu +info.gu +net.gu +org.gu +web.gu + +// gw : https://en.wikipedia.org/wiki/.gw +gw + +// gy : https://en.wikipedia.org/wiki/.gy +// http://registry.gy/ +gy +co.gy +com.gy +edu.gy +gov.gy +net.gy +org.gy + +// hk : https://www.hkirc.hk +// Submitted by registry +hk +com.hk +edu.hk +gov.hk +idv.hk +net.hk +org.hk +公司.hk +教育.hk +敎育.hk +政府.hk +個人.hk +个人.hk +箇人.hk +網络.hk +网络.hk +组織.hk +網絡.hk +网絡.hk +组织.hk +組織.hk +組织.hk + +// hm : https://en.wikipedia.org/wiki/.hm +hm + +// hn : http://www.nic.hn/politicas/ps02,,05.html +hn +com.hn +edu.hn +org.hn +net.hn +mil.hn +gob.hn + +// hr : http://www.dns.hr/documents/pdf/HRTLD-regulations.pdf +hr +iz.hr +from.hr +name.hr +com.hr + +// ht : http://www.nic.ht/info/charte.cfm +ht +com.ht +shop.ht +firm.ht +info.ht +adult.ht +net.ht +pro.ht +org.ht +med.ht +art.ht +coop.ht +pol.ht +asso.ht +edu.ht +rel.ht +gouv.ht +perso.ht + +// hu : http://www.domain.hu/domain/English/sld.html +// Confirmed by registry 2008-06-12 +hu +co.hu +info.hu +org.hu +priv.hu +sport.hu +tm.hu +2000.hu +agrar.hu +bolt.hu +casino.hu +city.hu +erotica.hu +erotika.hu +film.hu +forum.hu +games.hu +hotel.hu +ingatlan.hu +jogasz.hu +konyvelo.hu +lakas.hu +media.hu +news.hu +reklam.hu +sex.hu +shop.hu +suli.hu +szex.hu +tozsde.hu +utazas.hu +video.hu + +// id : https://pandi.id/en/domain/registration-requirements/ +id +ac.id +biz.id +co.id +desa.id +go.id +mil.id +my.id +net.id +or.id +ponpes.id +sch.id +web.id + +// ie : https://en.wikipedia.org/wiki/.ie +ie +gov.ie + +// il : http://www.isoc.org.il/domains/ +il +ac.il +co.il +gov.il +idf.il +k12.il +muni.il +net.il +org.il + +// im : https://www.nic.im/ +// Submitted by registry +im +ac.im +co.im +com.im +ltd.co.im +net.im +org.im +plc.co.im +tt.im +tv.im + +// in : https://en.wikipedia.org/wiki/.in +// see also: https://registry.in/Policies +// Please note, that nic.in is not an official eTLD, but used by most +// government institutions. +in +co.in +firm.in +net.in +org.in +gen.in +ind.in +nic.in +ac.in +edu.in +res.in +gov.in +mil.in + +// info : https://en.wikipedia.org/wiki/.info +info + +// int : https://en.wikipedia.org/wiki/.int +// Confirmed by registry 2008-06-18 +int +eu.int + +// io : http://www.nic.io/rules.html +// list of other 2nd level tlds ? +io +com.io + +// iq : http://www.cmc.iq/english/iq/iqregister1.htm +iq +gov.iq +edu.iq +mil.iq +com.iq +org.iq +net.iq + +// ir : http://www.nic.ir/Terms_and_Conditions_ir,_Appendix_1_Domain_Rules +// Also see http://www.nic.ir/Internationalized_Domain_Names +// Two .ir entries added at request of , 2010-04-16 +ir +ac.ir +co.ir +gov.ir +id.ir +net.ir +org.ir +sch.ir +// xn--mgba3a4f16a.ir (.ir, Persian YEH) +ایران.ir +// xn--mgba3a4fra.ir (.ir, Arabic YEH) +ايران.ir + +// is : http://www.isnic.is/domain/rules.php +// Confirmed by registry 2008-12-06 +is +net.is +com.is +edu.is +gov.is +org.is +int.is + +// it : https://en.wikipedia.org/wiki/.it +it +gov.it +edu.it +// Reserved geo-names (regions and provinces): +// https://www.nic.it/sites/default/files/archivio/docs/Regulation_assignation_v7.1.pdf +// Regions +abr.it +abruzzo.it +aosta-valley.it +aostavalley.it +bas.it +basilicata.it +cal.it +calabria.it +cam.it +campania.it +emilia-romagna.it +emiliaromagna.it +emr.it +friuli-v-giulia.it +friuli-ve-giulia.it +friuli-vegiulia.it +friuli-venezia-giulia.it +friuli-veneziagiulia.it +friuli-vgiulia.it +friuliv-giulia.it +friulive-giulia.it +friulivegiulia.it +friulivenezia-giulia.it +friuliveneziagiulia.it +friulivgiulia.it +fvg.it +laz.it +lazio.it +lig.it +liguria.it +lom.it +lombardia.it +lombardy.it +lucania.it +mar.it +marche.it +mol.it +molise.it +piedmont.it +piemonte.it +pmn.it +pug.it +puglia.it +sar.it +sardegna.it +sardinia.it +sic.it +sicilia.it +sicily.it +taa.it +tos.it +toscana.it +trentin-sud-tirol.it +trentin-süd-tirol.it +trentin-sudtirol.it +trentin-südtirol.it +trentin-sued-tirol.it +trentin-suedtirol.it +trentino-a-adige.it +trentino-aadige.it +trentino-alto-adige.it +trentino-altoadige.it +trentino-s-tirol.it +trentino-stirol.it +trentino-sud-tirol.it +trentino-süd-tirol.it +trentino-sudtirol.it +trentino-südtirol.it +trentino-sued-tirol.it +trentino-suedtirol.it +trentino.it +trentinoa-adige.it +trentinoaadige.it +trentinoalto-adige.it +trentinoaltoadige.it +trentinos-tirol.it +trentinostirol.it +trentinosud-tirol.it +trentinosüd-tirol.it +trentinosudtirol.it +trentinosüdtirol.it +trentinosued-tirol.it +trentinosuedtirol.it +trentinsud-tirol.it +trentinsüd-tirol.it +trentinsudtirol.it +trentinsüdtirol.it +trentinsued-tirol.it +trentinsuedtirol.it +tuscany.it +umb.it +umbria.it +val-d-aosta.it +val-daosta.it +vald-aosta.it +valdaosta.it +valle-aosta.it +valle-d-aosta.it +valle-daosta.it +valleaosta.it +valled-aosta.it +valledaosta.it +vallee-aoste.it +vallée-aoste.it +vallee-d-aoste.it +vallée-d-aoste.it +valleeaoste.it +valléeaoste.it +valleedaoste.it +valléedaoste.it +vao.it +vda.it +ven.it +veneto.it +// Provinces +ag.it +agrigento.it +al.it +alessandria.it +alto-adige.it +altoadige.it +an.it +ancona.it +andria-barletta-trani.it +andria-trani-barletta.it +andriabarlettatrani.it +andriatranibarletta.it +ao.it +aosta.it +aoste.it +ap.it +aq.it +aquila.it +ar.it +arezzo.it +ascoli-piceno.it +ascolipiceno.it +asti.it +at.it +av.it +avellino.it +ba.it +balsan-sudtirol.it +balsan-südtirol.it +balsan-suedtirol.it +balsan.it +bari.it +barletta-trani-andria.it +barlettatraniandria.it +belluno.it +benevento.it +bergamo.it +bg.it +bi.it +biella.it +bl.it +bn.it +bo.it +bologna.it +bolzano-altoadige.it +bolzano.it +bozen-sudtirol.it +bozen-südtirol.it +bozen-suedtirol.it +bozen.it +br.it +brescia.it +brindisi.it +bs.it +bt.it +bulsan-sudtirol.it +bulsan-südtirol.it +bulsan-suedtirol.it +bulsan.it +bz.it +ca.it +cagliari.it +caltanissetta.it +campidano-medio.it +campidanomedio.it +campobasso.it +carbonia-iglesias.it +carboniaiglesias.it +carrara-massa.it +carraramassa.it +caserta.it +catania.it +catanzaro.it +cb.it +ce.it +cesena-forli.it +cesena-forlì.it +cesenaforli.it +cesenaforlì.it +ch.it +chieti.it +ci.it +cl.it +cn.it +co.it +como.it +cosenza.it +cr.it +cremona.it +crotone.it +cs.it +ct.it +cuneo.it +cz.it +dell-ogliastra.it +dellogliastra.it +en.it +enna.it +fc.it +fe.it +fermo.it +ferrara.it +fg.it +fi.it +firenze.it +florence.it +fm.it +foggia.it +forli-cesena.it +forlì-cesena.it +forlicesena.it +forlìcesena.it +fr.it +frosinone.it +ge.it +genoa.it +genova.it +go.it +gorizia.it +gr.it +grosseto.it +iglesias-carbonia.it +iglesiascarbonia.it +im.it +imperia.it +is.it +isernia.it +kr.it +la-spezia.it +laquila.it +laspezia.it +latina.it +lc.it +le.it +lecce.it +lecco.it +li.it +livorno.it +lo.it +lodi.it +lt.it +lu.it +lucca.it +macerata.it +mantova.it +massa-carrara.it +massacarrara.it +matera.it +mb.it +mc.it +me.it +medio-campidano.it +mediocampidano.it +messina.it +mi.it +milan.it +milano.it +mn.it +mo.it +modena.it +monza-brianza.it +monza-e-della-brianza.it +monza.it +monzabrianza.it +monzaebrianza.it +monzaedellabrianza.it +ms.it +mt.it +na.it +naples.it +napoli.it +no.it +novara.it +nu.it +nuoro.it +og.it +ogliastra.it +olbia-tempio.it +olbiatempio.it +or.it +oristano.it +ot.it +pa.it +padova.it +padua.it +palermo.it +parma.it +pavia.it +pc.it +pd.it +pe.it +perugia.it +pesaro-urbino.it +pesarourbino.it +pescara.it +pg.it +pi.it +piacenza.it +pisa.it +pistoia.it +pn.it +po.it +pordenone.it +potenza.it +pr.it +prato.it +pt.it +pu.it +pv.it +pz.it +ra.it +ragusa.it +ravenna.it +rc.it +re.it +reggio-calabria.it +reggio-emilia.it +reggiocalabria.it +reggioemilia.it +rg.it +ri.it +rieti.it +rimini.it +rm.it +rn.it +ro.it +roma.it +rome.it +rovigo.it +sa.it +salerno.it +sassari.it +savona.it +si.it +siena.it +siracusa.it +so.it +sondrio.it +sp.it +sr.it +ss.it +suedtirol.it +südtirol.it +sv.it +ta.it +taranto.it +te.it +tempio-olbia.it +tempioolbia.it +teramo.it +terni.it +tn.it +to.it +torino.it +tp.it +tr.it +trani-andria-barletta.it +trani-barletta-andria.it +traniandriabarletta.it +tranibarlettaandria.it +trapani.it +trento.it +treviso.it +trieste.it +ts.it +turin.it +tv.it +ud.it +udine.it +urbino-pesaro.it +urbinopesaro.it +va.it +varese.it +vb.it +vc.it +ve.it +venezia.it +venice.it +verbania.it +vercelli.it +verona.it +vi.it +vibo-valentia.it +vibovalentia.it +vicenza.it +viterbo.it +vr.it +vs.it +vt.it +vv.it + +// je : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +je +co.je +net.je +org.je + +// jm : http://www.com.jm/register.html +*.jm + +// jo : http://www.dns.jo/Registration_policy.aspx +jo +com.jo +org.jo +net.jo +edu.jo +sch.jo +gov.jo +mil.jo +name.jo + +// jobs : https://en.wikipedia.org/wiki/.jobs +jobs + +// jp : https://en.wikipedia.org/wiki/.jp +// http://jprs.co.jp/en/jpdomain.html +// Submitted by registry +jp +// jp organizational type names +ac.jp +ad.jp +co.jp +ed.jp +go.jp +gr.jp +lg.jp +ne.jp +or.jp +// jp prefecture type names +aichi.jp +akita.jp +aomori.jp +chiba.jp +ehime.jp +fukui.jp +fukuoka.jp +fukushima.jp +gifu.jp +gunma.jp +hiroshima.jp +hokkaido.jp +hyogo.jp +ibaraki.jp +ishikawa.jp +iwate.jp +kagawa.jp +kagoshima.jp +kanagawa.jp +kochi.jp +kumamoto.jp +kyoto.jp +mie.jp +miyagi.jp +miyazaki.jp +nagano.jp +nagasaki.jp +nara.jp +niigata.jp +oita.jp +okayama.jp +okinawa.jp +osaka.jp +saga.jp +saitama.jp +shiga.jp +shimane.jp +shizuoka.jp +tochigi.jp +tokushima.jp +tokyo.jp +tottori.jp +toyama.jp +wakayama.jp +yamagata.jp +yamaguchi.jp +yamanashi.jp +栃木.jp +愛知.jp +愛媛.jp +兵庫.jp +熊本.jp +茨城.jp +北海道.jp +千葉.jp +和歌山.jp +長崎.jp +長野.jp +新潟.jp +青森.jp +静岡.jp +東京.jp +石川.jp +埼玉.jp +三重.jp +京都.jp +佐賀.jp +大分.jp +大阪.jp +奈良.jp +宮城.jp +宮崎.jp +富山.jp +山口.jp +山形.jp +山梨.jp +岩手.jp +岐阜.jp +岡山.jp +島根.jp +広島.jp +徳島.jp +沖縄.jp +滋賀.jp +神奈川.jp +福井.jp +福岡.jp +福島.jp +秋田.jp +群馬.jp +香川.jp +高知.jp +鳥取.jp +鹿児島.jp +// jp geographic type names +// http://jprs.jp/doc/rule/saisoku-1.html +*.kawasaki.jp +*.kitakyushu.jp +*.kobe.jp +*.nagoya.jp +*.sapporo.jp +*.sendai.jp +*.yokohama.jp +!city.kawasaki.jp +!city.kitakyushu.jp +!city.kobe.jp +!city.nagoya.jp +!city.sapporo.jp +!city.sendai.jp +!city.yokohama.jp +// 4th level registration +aisai.aichi.jp +ama.aichi.jp +anjo.aichi.jp +asuke.aichi.jp +chiryu.aichi.jp +chita.aichi.jp +fuso.aichi.jp +gamagori.aichi.jp +handa.aichi.jp +hazu.aichi.jp +hekinan.aichi.jp +higashiura.aichi.jp +ichinomiya.aichi.jp +inazawa.aichi.jp +inuyama.aichi.jp +isshiki.aichi.jp +iwakura.aichi.jp +kanie.aichi.jp +kariya.aichi.jp +kasugai.aichi.jp +kira.aichi.jp +kiyosu.aichi.jp +komaki.aichi.jp +konan.aichi.jp +kota.aichi.jp +mihama.aichi.jp +miyoshi.aichi.jp +nishio.aichi.jp +nisshin.aichi.jp +obu.aichi.jp +oguchi.aichi.jp +oharu.aichi.jp +okazaki.aichi.jp +owariasahi.aichi.jp +seto.aichi.jp +shikatsu.aichi.jp +shinshiro.aichi.jp +shitara.aichi.jp +tahara.aichi.jp +takahama.aichi.jp +tobishima.aichi.jp +toei.aichi.jp +togo.aichi.jp +tokai.aichi.jp +tokoname.aichi.jp +toyoake.aichi.jp +toyohashi.aichi.jp +toyokawa.aichi.jp +toyone.aichi.jp +toyota.aichi.jp +tsushima.aichi.jp +yatomi.aichi.jp +akita.akita.jp +daisen.akita.jp +fujisato.akita.jp +gojome.akita.jp +hachirogata.akita.jp +happou.akita.jp +higashinaruse.akita.jp +honjo.akita.jp +honjyo.akita.jp +ikawa.akita.jp +kamikoani.akita.jp +kamioka.akita.jp +katagami.akita.jp +kazuno.akita.jp +kitaakita.akita.jp +kosaka.akita.jp +kyowa.akita.jp +misato.akita.jp +mitane.akita.jp +moriyoshi.akita.jp +nikaho.akita.jp +noshiro.akita.jp +odate.akita.jp +oga.akita.jp +ogata.akita.jp +semboku.akita.jp +yokote.akita.jp +yurihonjo.akita.jp +aomori.aomori.jp +gonohe.aomori.jp +hachinohe.aomori.jp +hashikami.aomori.jp +hiranai.aomori.jp +hirosaki.aomori.jp +itayanagi.aomori.jp +kuroishi.aomori.jp +misawa.aomori.jp +mutsu.aomori.jp +nakadomari.aomori.jp +noheji.aomori.jp +oirase.aomori.jp +owani.aomori.jp +rokunohe.aomori.jp +sannohe.aomori.jp +shichinohe.aomori.jp +shingo.aomori.jp +takko.aomori.jp +towada.aomori.jp +tsugaru.aomori.jp +tsuruta.aomori.jp +abiko.chiba.jp +asahi.chiba.jp +chonan.chiba.jp +chosei.chiba.jp +choshi.chiba.jp +chuo.chiba.jp +funabashi.chiba.jp +futtsu.chiba.jp +hanamigawa.chiba.jp +ichihara.chiba.jp +ichikawa.chiba.jp +ichinomiya.chiba.jp +inzai.chiba.jp +isumi.chiba.jp +kamagaya.chiba.jp +kamogawa.chiba.jp +kashiwa.chiba.jp +katori.chiba.jp +katsuura.chiba.jp +kimitsu.chiba.jp +kisarazu.chiba.jp +kozaki.chiba.jp +kujukuri.chiba.jp +kyonan.chiba.jp +matsudo.chiba.jp +midori.chiba.jp +mihama.chiba.jp +minamiboso.chiba.jp +mobara.chiba.jp +mutsuzawa.chiba.jp +nagara.chiba.jp +nagareyama.chiba.jp +narashino.chiba.jp +narita.chiba.jp +noda.chiba.jp +oamishirasato.chiba.jp +omigawa.chiba.jp +onjuku.chiba.jp +otaki.chiba.jp +sakae.chiba.jp +sakura.chiba.jp +shimofusa.chiba.jp +shirako.chiba.jp +shiroi.chiba.jp +shisui.chiba.jp +sodegaura.chiba.jp +sosa.chiba.jp +tako.chiba.jp +tateyama.chiba.jp +togane.chiba.jp +tohnosho.chiba.jp +tomisato.chiba.jp +urayasu.chiba.jp +yachimata.chiba.jp +yachiyo.chiba.jp +yokaichiba.chiba.jp +yokoshibahikari.chiba.jp +yotsukaido.chiba.jp +ainan.ehime.jp +honai.ehime.jp +ikata.ehime.jp +imabari.ehime.jp +iyo.ehime.jp +kamijima.ehime.jp +kihoku.ehime.jp +kumakogen.ehime.jp +masaki.ehime.jp +matsuno.ehime.jp +matsuyama.ehime.jp +namikata.ehime.jp +niihama.ehime.jp +ozu.ehime.jp +saijo.ehime.jp +seiyo.ehime.jp +shikokuchuo.ehime.jp +tobe.ehime.jp +toon.ehime.jp +uchiko.ehime.jp +uwajima.ehime.jp +yawatahama.ehime.jp +echizen.fukui.jp +eiheiji.fukui.jp +fukui.fukui.jp +ikeda.fukui.jp +katsuyama.fukui.jp +mihama.fukui.jp +minamiechizen.fukui.jp +obama.fukui.jp +ohi.fukui.jp +ono.fukui.jp +sabae.fukui.jp +sakai.fukui.jp +takahama.fukui.jp +tsuruga.fukui.jp +wakasa.fukui.jp +ashiya.fukuoka.jp +buzen.fukuoka.jp +chikugo.fukuoka.jp +chikuho.fukuoka.jp +chikujo.fukuoka.jp +chikushino.fukuoka.jp +chikuzen.fukuoka.jp +chuo.fukuoka.jp +dazaifu.fukuoka.jp +fukuchi.fukuoka.jp +hakata.fukuoka.jp +higashi.fukuoka.jp +hirokawa.fukuoka.jp +hisayama.fukuoka.jp +iizuka.fukuoka.jp +inatsuki.fukuoka.jp +kaho.fukuoka.jp +kasuga.fukuoka.jp +kasuya.fukuoka.jp +kawara.fukuoka.jp +keisen.fukuoka.jp +koga.fukuoka.jp +kurate.fukuoka.jp +kurogi.fukuoka.jp +kurume.fukuoka.jp +minami.fukuoka.jp +miyako.fukuoka.jp +miyama.fukuoka.jp +miyawaka.fukuoka.jp +mizumaki.fukuoka.jp +munakata.fukuoka.jp +nakagawa.fukuoka.jp +nakama.fukuoka.jp +nishi.fukuoka.jp +nogata.fukuoka.jp +ogori.fukuoka.jp +okagaki.fukuoka.jp +okawa.fukuoka.jp +oki.fukuoka.jp +omuta.fukuoka.jp +onga.fukuoka.jp +onojo.fukuoka.jp +oto.fukuoka.jp +saigawa.fukuoka.jp +sasaguri.fukuoka.jp +shingu.fukuoka.jp +shinyoshitomi.fukuoka.jp +shonai.fukuoka.jp +soeda.fukuoka.jp +sue.fukuoka.jp +tachiarai.fukuoka.jp +tagawa.fukuoka.jp +takata.fukuoka.jp +toho.fukuoka.jp +toyotsu.fukuoka.jp +tsuiki.fukuoka.jp +ukiha.fukuoka.jp +umi.fukuoka.jp +usui.fukuoka.jp +yamada.fukuoka.jp +yame.fukuoka.jp +yanagawa.fukuoka.jp +yukuhashi.fukuoka.jp +aizubange.fukushima.jp +aizumisato.fukushima.jp +aizuwakamatsu.fukushima.jp +asakawa.fukushima.jp +bandai.fukushima.jp +date.fukushima.jp +fukushima.fukushima.jp +furudono.fukushima.jp +futaba.fukushima.jp +hanawa.fukushima.jp +higashi.fukushima.jp +hirata.fukushima.jp +hirono.fukushima.jp +iitate.fukushima.jp +inawashiro.fukushima.jp +ishikawa.fukushima.jp +iwaki.fukushima.jp +izumizaki.fukushima.jp +kagamiishi.fukushima.jp +kaneyama.fukushima.jp +kawamata.fukushima.jp +kitakata.fukushima.jp +kitashiobara.fukushima.jp +koori.fukushima.jp +koriyama.fukushima.jp +kunimi.fukushima.jp +miharu.fukushima.jp +mishima.fukushima.jp +namie.fukushima.jp +nango.fukushima.jp +nishiaizu.fukushima.jp +nishigo.fukushima.jp +okuma.fukushima.jp +omotego.fukushima.jp +ono.fukushima.jp +otama.fukushima.jp +samegawa.fukushima.jp +shimogo.fukushima.jp +shirakawa.fukushima.jp +showa.fukushima.jp +soma.fukushima.jp +sukagawa.fukushima.jp +taishin.fukushima.jp +tamakawa.fukushima.jp +tanagura.fukushima.jp +tenei.fukushima.jp +yabuki.fukushima.jp +yamato.fukushima.jp +yamatsuri.fukushima.jp +yanaizu.fukushima.jp +yugawa.fukushima.jp +anpachi.gifu.jp +ena.gifu.jp +gifu.gifu.jp +ginan.gifu.jp +godo.gifu.jp +gujo.gifu.jp +hashima.gifu.jp +hichiso.gifu.jp +hida.gifu.jp +higashishirakawa.gifu.jp +ibigawa.gifu.jp +ikeda.gifu.jp +kakamigahara.gifu.jp +kani.gifu.jp +kasahara.gifu.jp +kasamatsu.gifu.jp +kawaue.gifu.jp +kitagata.gifu.jp +mino.gifu.jp +minokamo.gifu.jp +mitake.gifu.jp +mizunami.gifu.jp +motosu.gifu.jp +nakatsugawa.gifu.jp +ogaki.gifu.jp +sakahogi.gifu.jp +seki.gifu.jp +sekigahara.gifu.jp +shirakawa.gifu.jp +tajimi.gifu.jp +takayama.gifu.jp +tarui.gifu.jp +toki.gifu.jp +tomika.gifu.jp +wanouchi.gifu.jp +yamagata.gifu.jp +yaotsu.gifu.jp +yoro.gifu.jp +annaka.gunma.jp +chiyoda.gunma.jp +fujioka.gunma.jp +higashiagatsuma.gunma.jp +isesaki.gunma.jp +itakura.gunma.jp +kanna.gunma.jp +kanra.gunma.jp +katashina.gunma.jp +kawaba.gunma.jp +kiryu.gunma.jp +kusatsu.gunma.jp +maebashi.gunma.jp +meiwa.gunma.jp +midori.gunma.jp +minakami.gunma.jp +naganohara.gunma.jp +nakanojo.gunma.jp +nanmoku.gunma.jp +numata.gunma.jp +oizumi.gunma.jp +ora.gunma.jp +ota.gunma.jp +shibukawa.gunma.jp +shimonita.gunma.jp +shinto.gunma.jp +showa.gunma.jp +takasaki.gunma.jp +takayama.gunma.jp +tamamura.gunma.jp +tatebayashi.gunma.jp +tomioka.gunma.jp +tsukiyono.gunma.jp +tsumagoi.gunma.jp +ueno.gunma.jp +yoshioka.gunma.jp +asaminami.hiroshima.jp +daiwa.hiroshima.jp +etajima.hiroshima.jp +fuchu.hiroshima.jp +fukuyama.hiroshima.jp +hatsukaichi.hiroshima.jp +higashihiroshima.hiroshima.jp +hongo.hiroshima.jp +jinsekikogen.hiroshima.jp +kaita.hiroshima.jp +kui.hiroshima.jp +kumano.hiroshima.jp +kure.hiroshima.jp +mihara.hiroshima.jp +miyoshi.hiroshima.jp +naka.hiroshima.jp +onomichi.hiroshima.jp +osakikamijima.hiroshima.jp +otake.hiroshima.jp +saka.hiroshima.jp +sera.hiroshima.jp +seranishi.hiroshima.jp +shinichi.hiroshima.jp +shobara.hiroshima.jp +takehara.hiroshima.jp +abashiri.hokkaido.jp +abira.hokkaido.jp +aibetsu.hokkaido.jp +akabira.hokkaido.jp +akkeshi.hokkaido.jp +asahikawa.hokkaido.jp +ashibetsu.hokkaido.jp +ashoro.hokkaido.jp +assabu.hokkaido.jp +atsuma.hokkaido.jp +bibai.hokkaido.jp +biei.hokkaido.jp +bifuka.hokkaido.jp +bihoro.hokkaido.jp +biratori.hokkaido.jp +chippubetsu.hokkaido.jp +chitose.hokkaido.jp +date.hokkaido.jp +ebetsu.hokkaido.jp +embetsu.hokkaido.jp +eniwa.hokkaido.jp +erimo.hokkaido.jp +esan.hokkaido.jp +esashi.hokkaido.jp +fukagawa.hokkaido.jp +fukushima.hokkaido.jp +furano.hokkaido.jp +furubira.hokkaido.jp +haboro.hokkaido.jp +hakodate.hokkaido.jp +hamatonbetsu.hokkaido.jp +hidaka.hokkaido.jp +higashikagura.hokkaido.jp +higashikawa.hokkaido.jp +hiroo.hokkaido.jp +hokuryu.hokkaido.jp +hokuto.hokkaido.jp +honbetsu.hokkaido.jp +horokanai.hokkaido.jp +horonobe.hokkaido.jp +ikeda.hokkaido.jp +imakane.hokkaido.jp +ishikari.hokkaido.jp +iwamizawa.hokkaido.jp +iwanai.hokkaido.jp +kamifurano.hokkaido.jp +kamikawa.hokkaido.jp +kamishihoro.hokkaido.jp +kamisunagawa.hokkaido.jp +kamoenai.hokkaido.jp +kayabe.hokkaido.jp +kembuchi.hokkaido.jp +kikonai.hokkaido.jp +kimobetsu.hokkaido.jp +kitahiroshima.hokkaido.jp +kitami.hokkaido.jp +kiyosato.hokkaido.jp +koshimizu.hokkaido.jp +kunneppu.hokkaido.jp +kuriyama.hokkaido.jp +kuromatsunai.hokkaido.jp +kushiro.hokkaido.jp +kutchan.hokkaido.jp +kyowa.hokkaido.jp +mashike.hokkaido.jp +matsumae.hokkaido.jp +mikasa.hokkaido.jp +minamifurano.hokkaido.jp +mombetsu.hokkaido.jp +moseushi.hokkaido.jp +mukawa.hokkaido.jp +muroran.hokkaido.jp +naie.hokkaido.jp +nakagawa.hokkaido.jp +nakasatsunai.hokkaido.jp +nakatombetsu.hokkaido.jp +nanae.hokkaido.jp +nanporo.hokkaido.jp +nayoro.hokkaido.jp +nemuro.hokkaido.jp +niikappu.hokkaido.jp +niki.hokkaido.jp +nishiokoppe.hokkaido.jp +noboribetsu.hokkaido.jp +numata.hokkaido.jp +obihiro.hokkaido.jp +obira.hokkaido.jp +oketo.hokkaido.jp +okoppe.hokkaido.jp +otaru.hokkaido.jp +otobe.hokkaido.jp +otofuke.hokkaido.jp +otoineppu.hokkaido.jp +oumu.hokkaido.jp +ozora.hokkaido.jp +pippu.hokkaido.jp +rankoshi.hokkaido.jp +rebun.hokkaido.jp +rikubetsu.hokkaido.jp +rishiri.hokkaido.jp +rishirifuji.hokkaido.jp +saroma.hokkaido.jp +sarufutsu.hokkaido.jp +shakotan.hokkaido.jp +shari.hokkaido.jp +shibecha.hokkaido.jp +shibetsu.hokkaido.jp +shikabe.hokkaido.jp +shikaoi.hokkaido.jp +shimamaki.hokkaido.jp +shimizu.hokkaido.jp +shimokawa.hokkaido.jp +shinshinotsu.hokkaido.jp +shintoku.hokkaido.jp +shiranuka.hokkaido.jp +shiraoi.hokkaido.jp +shiriuchi.hokkaido.jp +sobetsu.hokkaido.jp +sunagawa.hokkaido.jp +taiki.hokkaido.jp +takasu.hokkaido.jp +takikawa.hokkaido.jp +takinoue.hokkaido.jp +teshikaga.hokkaido.jp +tobetsu.hokkaido.jp +tohma.hokkaido.jp +tomakomai.hokkaido.jp +tomari.hokkaido.jp +toya.hokkaido.jp +toyako.hokkaido.jp +toyotomi.hokkaido.jp +toyoura.hokkaido.jp +tsubetsu.hokkaido.jp +tsukigata.hokkaido.jp +urakawa.hokkaido.jp +urausu.hokkaido.jp +uryu.hokkaido.jp +utashinai.hokkaido.jp +wakkanai.hokkaido.jp +wassamu.hokkaido.jp +yakumo.hokkaido.jp +yoichi.hokkaido.jp +aioi.hyogo.jp +akashi.hyogo.jp +ako.hyogo.jp +amagasaki.hyogo.jp +aogaki.hyogo.jp +asago.hyogo.jp +ashiya.hyogo.jp +awaji.hyogo.jp +fukusaki.hyogo.jp +goshiki.hyogo.jp +harima.hyogo.jp +himeji.hyogo.jp +ichikawa.hyogo.jp +inagawa.hyogo.jp +itami.hyogo.jp +kakogawa.hyogo.jp +kamigori.hyogo.jp +kamikawa.hyogo.jp +kasai.hyogo.jp +kasuga.hyogo.jp +kawanishi.hyogo.jp +miki.hyogo.jp +minamiawaji.hyogo.jp +nishinomiya.hyogo.jp +nishiwaki.hyogo.jp +ono.hyogo.jp +sanda.hyogo.jp +sannan.hyogo.jp +sasayama.hyogo.jp +sayo.hyogo.jp +shingu.hyogo.jp +shinonsen.hyogo.jp +shiso.hyogo.jp +sumoto.hyogo.jp +taishi.hyogo.jp +taka.hyogo.jp +takarazuka.hyogo.jp +takasago.hyogo.jp +takino.hyogo.jp +tamba.hyogo.jp +tatsuno.hyogo.jp +toyooka.hyogo.jp +yabu.hyogo.jp +yashiro.hyogo.jp +yoka.hyogo.jp +yokawa.hyogo.jp +ami.ibaraki.jp +asahi.ibaraki.jp +bando.ibaraki.jp +chikusei.ibaraki.jp +daigo.ibaraki.jp +fujishiro.ibaraki.jp +hitachi.ibaraki.jp +hitachinaka.ibaraki.jp +hitachiomiya.ibaraki.jp +hitachiota.ibaraki.jp +ibaraki.ibaraki.jp +ina.ibaraki.jp +inashiki.ibaraki.jp +itako.ibaraki.jp +iwama.ibaraki.jp +joso.ibaraki.jp +kamisu.ibaraki.jp +kasama.ibaraki.jp +kashima.ibaraki.jp +kasumigaura.ibaraki.jp +koga.ibaraki.jp +miho.ibaraki.jp +mito.ibaraki.jp +moriya.ibaraki.jp +naka.ibaraki.jp +namegata.ibaraki.jp +oarai.ibaraki.jp +ogawa.ibaraki.jp +omitama.ibaraki.jp +ryugasaki.ibaraki.jp +sakai.ibaraki.jp +sakuragawa.ibaraki.jp +shimodate.ibaraki.jp +shimotsuma.ibaraki.jp +shirosato.ibaraki.jp +sowa.ibaraki.jp +suifu.ibaraki.jp +takahagi.ibaraki.jp +tamatsukuri.ibaraki.jp +tokai.ibaraki.jp +tomobe.ibaraki.jp +tone.ibaraki.jp +toride.ibaraki.jp +tsuchiura.ibaraki.jp +tsukuba.ibaraki.jp +uchihara.ibaraki.jp +ushiku.ibaraki.jp +yachiyo.ibaraki.jp +yamagata.ibaraki.jp +yawara.ibaraki.jp +yuki.ibaraki.jp +anamizu.ishikawa.jp +hakui.ishikawa.jp +hakusan.ishikawa.jp +kaga.ishikawa.jp +kahoku.ishikawa.jp +kanazawa.ishikawa.jp +kawakita.ishikawa.jp +komatsu.ishikawa.jp +nakanoto.ishikawa.jp +nanao.ishikawa.jp +nomi.ishikawa.jp +nonoichi.ishikawa.jp +noto.ishikawa.jp +shika.ishikawa.jp +suzu.ishikawa.jp +tsubata.ishikawa.jp +tsurugi.ishikawa.jp +uchinada.ishikawa.jp +wajima.ishikawa.jp +fudai.iwate.jp +fujisawa.iwate.jp +hanamaki.iwate.jp +hiraizumi.iwate.jp +hirono.iwate.jp +ichinohe.iwate.jp +ichinoseki.iwate.jp +iwaizumi.iwate.jp +iwate.iwate.jp +joboji.iwate.jp +kamaishi.iwate.jp +kanegasaki.iwate.jp +karumai.iwate.jp +kawai.iwate.jp +kitakami.iwate.jp +kuji.iwate.jp +kunohe.iwate.jp +kuzumaki.iwate.jp +miyako.iwate.jp +mizusawa.iwate.jp +morioka.iwate.jp +ninohe.iwate.jp +noda.iwate.jp +ofunato.iwate.jp +oshu.iwate.jp +otsuchi.iwate.jp +rikuzentakata.iwate.jp +shiwa.iwate.jp +shizukuishi.iwate.jp +sumita.iwate.jp +tanohata.iwate.jp +tono.iwate.jp +yahaba.iwate.jp +yamada.iwate.jp +ayagawa.kagawa.jp +higashikagawa.kagawa.jp +kanonji.kagawa.jp +kotohira.kagawa.jp +manno.kagawa.jp +marugame.kagawa.jp +mitoyo.kagawa.jp +naoshima.kagawa.jp +sanuki.kagawa.jp +tadotsu.kagawa.jp +takamatsu.kagawa.jp +tonosho.kagawa.jp +uchinomi.kagawa.jp +utazu.kagawa.jp +zentsuji.kagawa.jp +akune.kagoshima.jp +amami.kagoshima.jp +hioki.kagoshima.jp +isa.kagoshima.jp +isen.kagoshima.jp +izumi.kagoshima.jp +kagoshima.kagoshima.jp +kanoya.kagoshima.jp +kawanabe.kagoshima.jp +kinko.kagoshima.jp +kouyama.kagoshima.jp +makurazaki.kagoshima.jp +matsumoto.kagoshima.jp +minamitane.kagoshima.jp +nakatane.kagoshima.jp +nishinoomote.kagoshima.jp +satsumasendai.kagoshima.jp +soo.kagoshima.jp +tarumizu.kagoshima.jp +yusui.kagoshima.jp +aikawa.kanagawa.jp +atsugi.kanagawa.jp +ayase.kanagawa.jp +chigasaki.kanagawa.jp +ebina.kanagawa.jp +fujisawa.kanagawa.jp +hadano.kanagawa.jp +hakone.kanagawa.jp +hiratsuka.kanagawa.jp +isehara.kanagawa.jp +kaisei.kanagawa.jp +kamakura.kanagawa.jp +kiyokawa.kanagawa.jp +matsuda.kanagawa.jp +minamiashigara.kanagawa.jp +miura.kanagawa.jp +nakai.kanagawa.jp +ninomiya.kanagawa.jp +odawara.kanagawa.jp +oi.kanagawa.jp +oiso.kanagawa.jp +sagamihara.kanagawa.jp +samukawa.kanagawa.jp +tsukui.kanagawa.jp +yamakita.kanagawa.jp +yamato.kanagawa.jp +yokosuka.kanagawa.jp +yugawara.kanagawa.jp +zama.kanagawa.jp +zushi.kanagawa.jp +aki.kochi.jp +geisei.kochi.jp +hidaka.kochi.jp +higashitsuno.kochi.jp +ino.kochi.jp +kagami.kochi.jp +kami.kochi.jp +kitagawa.kochi.jp +kochi.kochi.jp +mihara.kochi.jp +motoyama.kochi.jp +muroto.kochi.jp +nahari.kochi.jp +nakamura.kochi.jp +nankoku.kochi.jp +nishitosa.kochi.jp +niyodogawa.kochi.jp +ochi.kochi.jp +okawa.kochi.jp +otoyo.kochi.jp +otsuki.kochi.jp +sakawa.kochi.jp +sukumo.kochi.jp +susaki.kochi.jp +tosa.kochi.jp +tosashimizu.kochi.jp +toyo.kochi.jp +tsuno.kochi.jp +umaji.kochi.jp +yasuda.kochi.jp +yusuhara.kochi.jp +amakusa.kumamoto.jp +arao.kumamoto.jp +aso.kumamoto.jp +choyo.kumamoto.jp +gyokuto.kumamoto.jp +kamiamakusa.kumamoto.jp +kikuchi.kumamoto.jp +kumamoto.kumamoto.jp +mashiki.kumamoto.jp +mifune.kumamoto.jp +minamata.kumamoto.jp +minamioguni.kumamoto.jp +nagasu.kumamoto.jp +nishihara.kumamoto.jp +oguni.kumamoto.jp +ozu.kumamoto.jp +sumoto.kumamoto.jp +takamori.kumamoto.jp +uki.kumamoto.jp +uto.kumamoto.jp +yamaga.kumamoto.jp +yamato.kumamoto.jp +yatsushiro.kumamoto.jp +ayabe.kyoto.jp +fukuchiyama.kyoto.jp +higashiyama.kyoto.jp +ide.kyoto.jp +ine.kyoto.jp +joyo.kyoto.jp +kameoka.kyoto.jp +kamo.kyoto.jp +kita.kyoto.jp +kizu.kyoto.jp +kumiyama.kyoto.jp +kyotamba.kyoto.jp +kyotanabe.kyoto.jp +kyotango.kyoto.jp +maizuru.kyoto.jp +minami.kyoto.jp +minamiyamashiro.kyoto.jp +miyazu.kyoto.jp +muko.kyoto.jp +nagaokakyo.kyoto.jp +nakagyo.kyoto.jp +nantan.kyoto.jp +oyamazaki.kyoto.jp +sakyo.kyoto.jp +seika.kyoto.jp +tanabe.kyoto.jp +uji.kyoto.jp +ujitawara.kyoto.jp +wazuka.kyoto.jp +yamashina.kyoto.jp +yawata.kyoto.jp +asahi.mie.jp +inabe.mie.jp +ise.mie.jp +kameyama.mie.jp +kawagoe.mie.jp +kiho.mie.jp +kisosaki.mie.jp +kiwa.mie.jp +komono.mie.jp +kumano.mie.jp +kuwana.mie.jp +matsusaka.mie.jp +meiwa.mie.jp +mihama.mie.jp +minamiise.mie.jp +misugi.mie.jp +miyama.mie.jp +nabari.mie.jp +shima.mie.jp +suzuka.mie.jp +tado.mie.jp +taiki.mie.jp +taki.mie.jp +tamaki.mie.jp +toba.mie.jp +tsu.mie.jp +udono.mie.jp +ureshino.mie.jp +watarai.mie.jp +yokkaichi.mie.jp +furukawa.miyagi.jp +higashimatsushima.miyagi.jp +ishinomaki.miyagi.jp +iwanuma.miyagi.jp +kakuda.miyagi.jp +kami.miyagi.jp +kawasaki.miyagi.jp +marumori.miyagi.jp +matsushima.miyagi.jp +minamisanriku.miyagi.jp +misato.miyagi.jp +murata.miyagi.jp +natori.miyagi.jp +ogawara.miyagi.jp +ohira.miyagi.jp +onagawa.miyagi.jp +osaki.miyagi.jp +rifu.miyagi.jp +semine.miyagi.jp +shibata.miyagi.jp +shichikashuku.miyagi.jp +shikama.miyagi.jp +shiogama.miyagi.jp +shiroishi.miyagi.jp +tagajo.miyagi.jp +taiwa.miyagi.jp +tome.miyagi.jp +tomiya.miyagi.jp +wakuya.miyagi.jp +watari.miyagi.jp +yamamoto.miyagi.jp +zao.miyagi.jp +aya.miyazaki.jp +ebino.miyazaki.jp +gokase.miyazaki.jp +hyuga.miyazaki.jp +kadogawa.miyazaki.jp +kawaminami.miyazaki.jp +kijo.miyazaki.jp +kitagawa.miyazaki.jp +kitakata.miyazaki.jp +kitaura.miyazaki.jp +kobayashi.miyazaki.jp +kunitomi.miyazaki.jp +kushima.miyazaki.jp +mimata.miyazaki.jp +miyakonojo.miyazaki.jp +miyazaki.miyazaki.jp +morotsuka.miyazaki.jp +nichinan.miyazaki.jp +nishimera.miyazaki.jp +nobeoka.miyazaki.jp +saito.miyazaki.jp +shiiba.miyazaki.jp +shintomi.miyazaki.jp +takaharu.miyazaki.jp +takanabe.miyazaki.jp +takazaki.miyazaki.jp +tsuno.miyazaki.jp +achi.nagano.jp +agematsu.nagano.jp +anan.nagano.jp +aoki.nagano.jp +asahi.nagano.jp +azumino.nagano.jp +chikuhoku.nagano.jp +chikuma.nagano.jp +chino.nagano.jp +fujimi.nagano.jp +hakuba.nagano.jp +hara.nagano.jp +hiraya.nagano.jp +iida.nagano.jp +iijima.nagano.jp +iiyama.nagano.jp +iizuna.nagano.jp +ikeda.nagano.jp +ikusaka.nagano.jp +ina.nagano.jp +karuizawa.nagano.jp +kawakami.nagano.jp +kiso.nagano.jp +kisofukushima.nagano.jp +kitaaiki.nagano.jp +komagane.nagano.jp +komoro.nagano.jp +matsukawa.nagano.jp +matsumoto.nagano.jp +miasa.nagano.jp +minamiaiki.nagano.jp +minamimaki.nagano.jp +minamiminowa.nagano.jp +minowa.nagano.jp +miyada.nagano.jp +miyota.nagano.jp +mochizuki.nagano.jp +nagano.nagano.jp +nagawa.nagano.jp +nagiso.nagano.jp +nakagawa.nagano.jp +nakano.nagano.jp +nozawaonsen.nagano.jp +obuse.nagano.jp +ogawa.nagano.jp +okaya.nagano.jp +omachi.nagano.jp +omi.nagano.jp +ookuwa.nagano.jp +ooshika.nagano.jp +otaki.nagano.jp +otari.nagano.jp +sakae.nagano.jp +sakaki.nagano.jp +saku.nagano.jp +sakuho.nagano.jp +shimosuwa.nagano.jp +shinanomachi.nagano.jp +shiojiri.nagano.jp +suwa.nagano.jp +suzaka.nagano.jp +takagi.nagano.jp +takamori.nagano.jp +takayama.nagano.jp +tateshina.nagano.jp +tatsuno.nagano.jp +togakushi.nagano.jp +togura.nagano.jp +tomi.nagano.jp +ueda.nagano.jp +wada.nagano.jp +yamagata.nagano.jp +yamanouchi.nagano.jp +yasaka.nagano.jp +yasuoka.nagano.jp +chijiwa.nagasaki.jp +futsu.nagasaki.jp +goto.nagasaki.jp +hasami.nagasaki.jp +hirado.nagasaki.jp +iki.nagasaki.jp +isahaya.nagasaki.jp +kawatana.nagasaki.jp +kuchinotsu.nagasaki.jp +matsuura.nagasaki.jp +nagasaki.nagasaki.jp +obama.nagasaki.jp +omura.nagasaki.jp +oseto.nagasaki.jp +saikai.nagasaki.jp +sasebo.nagasaki.jp +seihi.nagasaki.jp +shimabara.nagasaki.jp +shinkamigoto.nagasaki.jp +togitsu.nagasaki.jp +tsushima.nagasaki.jp +unzen.nagasaki.jp +ando.nara.jp +gose.nara.jp +heguri.nara.jp +higashiyoshino.nara.jp +ikaruga.nara.jp +ikoma.nara.jp +kamikitayama.nara.jp +kanmaki.nara.jp +kashiba.nara.jp +kashihara.nara.jp +katsuragi.nara.jp +kawai.nara.jp +kawakami.nara.jp +kawanishi.nara.jp +koryo.nara.jp +kurotaki.nara.jp +mitsue.nara.jp +miyake.nara.jp +nara.nara.jp +nosegawa.nara.jp +oji.nara.jp +ouda.nara.jp +oyodo.nara.jp +sakurai.nara.jp +sango.nara.jp +shimoichi.nara.jp +shimokitayama.nara.jp +shinjo.nara.jp +soni.nara.jp +takatori.nara.jp +tawaramoto.nara.jp +tenkawa.nara.jp +tenri.nara.jp +uda.nara.jp +yamatokoriyama.nara.jp +yamatotakada.nara.jp +yamazoe.nara.jp +yoshino.nara.jp +aga.niigata.jp +agano.niigata.jp +gosen.niigata.jp +itoigawa.niigata.jp +izumozaki.niigata.jp +joetsu.niigata.jp +kamo.niigata.jp +kariwa.niigata.jp +kashiwazaki.niigata.jp +minamiuonuma.niigata.jp +mitsuke.niigata.jp +muika.niigata.jp +murakami.niigata.jp +myoko.niigata.jp +nagaoka.niigata.jp +niigata.niigata.jp +ojiya.niigata.jp +omi.niigata.jp +sado.niigata.jp +sanjo.niigata.jp +seiro.niigata.jp +seirou.niigata.jp +sekikawa.niigata.jp +shibata.niigata.jp +tagami.niigata.jp +tainai.niigata.jp +tochio.niigata.jp +tokamachi.niigata.jp +tsubame.niigata.jp +tsunan.niigata.jp +uonuma.niigata.jp +yahiko.niigata.jp +yoita.niigata.jp +yuzawa.niigata.jp +beppu.oita.jp +bungoono.oita.jp +bungotakada.oita.jp +hasama.oita.jp +hiji.oita.jp +himeshima.oita.jp +hita.oita.jp +kamitsue.oita.jp +kokonoe.oita.jp +kuju.oita.jp +kunisaki.oita.jp +kusu.oita.jp +oita.oita.jp +saiki.oita.jp +taketa.oita.jp +tsukumi.oita.jp +usa.oita.jp +usuki.oita.jp +yufu.oita.jp +akaiwa.okayama.jp +asakuchi.okayama.jp +bizen.okayama.jp +hayashima.okayama.jp +ibara.okayama.jp +kagamino.okayama.jp +kasaoka.okayama.jp +kibichuo.okayama.jp +kumenan.okayama.jp +kurashiki.okayama.jp +maniwa.okayama.jp +misaki.okayama.jp +nagi.okayama.jp +niimi.okayama.jp +nishiawakura.okayama.jp +okayama.okayama.jp +satosho.okayama.jp +setouchi.okayama.jp +shinjo.okayama.jp +shoo.okayama.jp +soja.okayama.jp +takahashi.okayama.jp +tamano.okayama.jp +tsuyama.okayama.jp +wake.okayama.jp +yakage.okayama.jp +aguni.okinawa.jp +ginowan.okinawa.jp +ginoza.okinawa.jp +gushikami.okinawa.jp +haebaru.okinawa.jp +higashi.okinawa.jp +hirara.okinawa.jp +iheya.okinawa.jp +ishigaki.okinawa.jp +ishikawa.okinawa.jp +itoman.okinawa.jp +izena.okinawa.jp +kadena.okinawa.jp +kin.okinawa.jp +kitadaito.okinawa.jp +kitanakagusuku.okinawa.jp +kumejima.okinawa.jp +kunigami.okinawa.jp +minamidaito.okinawa.jp +motobu.okinawa.jp +nago.okinawa.jp +naha.okinawa.jp +nakagusuku.okinawa.jp +nakijin.okinawa.jp +nanjo.okinawa.jp +nishihara.okinawa.jp +ogimi.okinawa.jp +okinawa.okinawa.jp +onna.okinawa.jp +shimoji.okinawa.jp +taketomi.okinawa.jp +tarama.okinawa.jp +tokashiki.okinawa.jp +tomigusuku.okinawa.jp +tonaki.okinawa.jp +urasoe.okinawa.jp +uruma.okinawa.jp +yaese.okinawa.jp +yomitan.okinawa.jp +yonabaru.okinawa.jp +yonaguni.okinawa.jp +zamami.okinawa.jp +abeno.osaka.jp +chihayaakasaka.osaka.jp +chuo.osaka.jp +daito.osaka.jp +fujiidera.osaka.jp +habikino.osaka.jp +hannan.osaka.jp +higashiosaka.osaka.jp +higashisumiyoshi.osaka.jp +higashiyodogawa.osaka.jp +hirakata.osaka.jp +ibaraki.osaka.jp +ikeda.osaka.jp +izumi.osaka.jp +izumiotsu.osaka.jp +izumisano.osaka.jp +kadoma.osaka.jp +kaizuka.osaka.jp +kanan.osaka.jp +kashiwara.osaka.jp +katano.osaka.jp +kawachinagano.osaka.jp +kishiwada.osaka.jp +kita.osaka.jp +kumatori.osaka.jp +matsubara.osaka.jp +minato.osaka.jp +minoh.osaka.jp +misaki.osaka.jp +moriguchi.osaka.jp +neyagawa.osaka.jp +nishi.osaka.jp +nose.osaka.jp +osakasayama.osaka.jp +sakai.osaka.jp +sayama.osaka.jp +sennan.osaka.jp +settsu.osaka.jp +shijonawate.osaka.jp +shimamoto.osaka.jp +suita.osaka.jp +tadaoka.osaka.jp +taishi.osaka.jp +tajiri.osaka.jp +takaishi.osaka.jp +takatsuki.osaka.jp +tondabayashi.osaka.jp +toyonaka.osaka.jp +toyono.osaka.jp +yao.osaka.jp +ariake.saga.jp +arita.saga.jp +fukudomi.saga.jp +genkai.saga.jp +hamatama.saga.jp +hizen.saga.jp +imari.saga.jp +kamimine.saga.jp +kanzaki.saga.jp +karatsu.saga.jp +kashima.saga.jp +kitagata.saga.jp +kitahata.saga.jp +kiyama.saga.jp +kouhoku.saga.jp +kyuragi.saga.jp +nishiarita.saga.jp +ogi.saga.jp +omachi.saga.jp +ouchi.saga.jp +saga.saga.jp +shiroishi.saga.jp +taku.saga.jp +tara.saga.jp +tosu.saga.jp +yoshinogari.saga.jp +arakawa.saitama.jp +asaka.saitama.jp +chichibu.saitama.jp +fujimi.saitama.jp +fujimino.saitama.jp +fukaya.saitama.jp +hanno.saitama.jp +hanyu.saitama.jp +hasuda.saitama.jp +hatogaya.saitama.jp +hatoyama.saitama.jp +hidaka.saitama.jp +higashichichibu.saitama.jp +higashimatsuyama.saitama.jp +honjo.saitama.jp +ina.saitama.jp +iruma.saitama.jp +iwatsuki.saitama.jp +kamiizumi.saitama.jp +kamikawa.saitama.jp +kamisato.saitama.jp +kasukabe.saitama.jp +kawagoe.saitama.jp +kawaguchi.saitama.jp +kawajima.saitama.jp +kazo.saitama.jp +kitamoto.saitama.jp +koshigaya.saitama.jp +kounosu.saitama.jp +kuki.saitama.jp +kumagaya.saitama.jp +matsubushi.saitama.jp +minano.saitama.jp +misato.saitama.jp +miyashiro.saitama.jp +miyoshi.saitama.jp +moroyama.saitama.jp +nagatoro.saitama.jp +namegawa.saitama.jp +niiza.saitama.jp +ogano.saitama.jp +ogawa.saitama.jp +ogose.saitama.jp +okegawa.saitama.jp +omiya.saitama.jp +otaki.saitama.jp +ranzan.saitama.jp +ryokami.saitama.jp +saitama.saitama.jp +sakado.saitama.jp +satte.saitama.jp +sayama.saitama.jp +shiki.saitama.jp +shiraoka.saitama.jp +soka.saitama.jp +sugito.saitama.jp +toda.saitama.jp +tokigawa.saitama.jp +tokorozawa.saitama.jp +tsurugashima.saitama.jp +urawa.saitama.jp +warabi.saitama.jp +yashio.saitama.jp +yokoze.saitama.jp +yono.saitama.jp +yorii.saitama.jp +yoshida.saitama.jp +yoshikawa.saitama.jp +yoshimi.saitama.jp +aisho.shiga.jp +gamo.shiga.jp +higashiomi.shiga.jp +hikone.shiga.jp +koka.shiga.jp +konan.shiga.jp +kosei.shiga.jp +koto.shiga.jp +kusatsu.shiga.jp +maibara.shiga.jp +moriyama.shiga.jp +nagahama.shiga.jp +nishiazai.shiga.jp +notogawa.shiga.jp +omihachiman.shiga.jp +otsu.shiga.jp +ritto.shiga.jp +ryuoh.shiga.jp +takashima.shiga.jp +takatsuki.shiga.jp +torahime.shiga.jp +toyosato.shiga.jp +yasu.shiga.jp +akagi.shimane.jp +ama.shimane.jp +gotsu.shimane.jp +hamada.shimane.jp +higashiizumo.shimane.jp +hikawa.shimane.jp +hikimi.shimane.jp +izumo.shimane.jp +kakinoki.shimane.jp +masuda.shimane.jp +matsue.shimane.jp +misato.shimane.jp +nishinoshima.shimane.jp +ohda.shimane.jp +okinoshima.shimane.jp +okuizumo.shimane.jp +shimane.shimane.jp +tamayu.shimane.jp +tsuwano.shimane.jp +unnan.shimane.jp +yakumo.shimane.jp +yasugi.shimane.jp +yatsuka.shimane.jp +arai.shizuoka.jp +atami.shizuoka.jp +fuji.shizuoka.jp +fujieda.shizuoka.jp +fujikawa.shizuoka.jp +fujinomiya.shizuoka.jp +fukuroi.shizuoka.jp +gotemba.shizuoka.jp +haibara.shizuoka.jp +hamamatsu.shizuoka.jp +higashiizu.shizuoka.jp +ito.shizuoka.jp +iwata.shizuoka.jp +izu.shizuoka.jp +izunokuni.shizuoka.jp +kakegawa.shizuoka.jp +kannami.shizuoka.jp +kawanehon.shizuoka.jp +kawazu.shizuoka.jp +kikugawa.shizuoka.jp +kosai.shizuoka.jp +makinohara.shizuoka.jp +matsuzaki.shizuoka.jp +minamiizu.shizuoka.jp +mishima.shizuoka.jp +morimachi.shizuoka.jp +nishiizu.shizuoka.jp +numazu.shizuoka.jp +omaezaki.shizuoka.jp +shimada.shizuoka.jp +shimizu.shizuoka.jp +shimoda.shizuoka.jp +shizuoka.shizuoka.jp +susono.shizuoka.jp +yaizu.shizuoka.jp +yoshida.shizuoka.jp +ashikaga.tochigi.jp +bato.tochigi.jp +haga.tochigi.jp +ichikai.tochigi.jp +iwafune.tochigi.jp +kaminokawa.tochigi.jp +kanuma.tochigi.jp +karasuyama.tochigi.jp +kuroiso.tochigi.jp +mashiko.tochigi.jp +mibu.tochigi.jp +moka.tochigi.jp +motegi.tochigi.jp +nasu.tochigi.jp +nasushiobara.tochigi.jp +nikko.tochigi.jp +nishikata.tochigi.jp +nogi.tochigi.jp +ohira.tochigi.jp +ohtawara.tochigi.jp +oyama.tochigi.jp +sakura.tochigi.jp +sano.tochigi.jp +shimotsuke.tochigi.jp +shioya.tochigi.jp +takanezawa.tochigi.jp +tochigi.tochigi.jp +tsuga.tochigi.jp +ujiie.tochigi.jp +utsunomiya.tochigi.jp +yaita.tochigi.jp +aizumi.tokushima.jp +anan.tokushima.jp +ichiba.tokushima.jp +itano.tokushima.jp +kainan.tokushima.jp +komatsushima.tokushima.jp +matsushige.tokushima.jp +mima.tokushima.jp +minami.tokushima.jp +miyoshi.tokushima.jp +mugi.tokushima.jp +nakagawa.tokushima.jp +naruto.tokushima.jp +sanagochi.tokushima.jp +shishikui.tokushima.jp +tokushima.tokushima.jp +wajiki.tokushima.jp +adachi.tokyo.jp +akiruno.tokyo.jp +akishima.tokyo.jp +aogashima.tokyo.jp +arakawa.tokyo.jp +bunkyo.tokyo.jp +chiyoda.tokyo.jp +chofu.tokyo.jp +chuo.tokyo.jp +edogawa.tokyo.jp +fuchu.tokyo.jp +fussa.tokyo.jp +hachijo.tokyo.jp +hachioji.tokyo.jp +hamura.tokyo.jp +higashikurume.tokyo.jp +higashimurayama.tokyo.jp +higashiyamato.tokyo.jp +hino.tokyo.jp +hinode.tokyo.jp +hinohara.tokyo.jp +inagi.tokyo.jp +itabashi.tokyo.jp +katsushika.tokyo.jp +kita.tokyo.jp +kiyose.tokyo.jp +kodaira.tokyo.jp +koganei.tokyo.jp +kokubunji.tokyo.jp +komae.tokyo.jp +koto.tokyo.jp +kouzushima.tokyo.jp +kunitachi.tokyo.jp +machida.tokyo.jp +meguro.tokyo.jp +minato.tokyo.jp +mitaka.tokyo.jp +mizuho.tokyo.jp +musashimurayama.tokyo.jp +musashino.tokyo.jp +nakano.tokyo.jp +nerima.tokyo.jp +ogasawara.tokyo.jp +okutama.tokyo.jp +ome.tokyo.jp +oshima.tokyo.jp +ota.tokyo.jp +setagaya.tokyo.jp +shibuya.tokyo.jp +shinagawa.tokyo.jp +shinjuku.tokyo.jp +suginami.tokyo.jp +sumida.tokyo.jp +tachikawa.tokyo.jp +taito.tokyo.jp +tama.tokyo.jp +toshima.tokyo.jp +chizu.tottori.jp +hino.tottori.jp +kawahara.tottori.jp +koge.tottori.jp +kotoura.tottori.jp +misasa.tottori.jp +nanbu.tottori.jp +nichinan.tottori.jp +sakaiminato.tottori.jp +tottori.tottori.jp +wakasa.tottori.jp +yazu.tottori.jp +yonago.tottori.jp +asahi.toyama.jp +fuchu.toyama.jp +fukumitsu.toyama.jp +funahashi.toyama.jp +himi.toyama.jp +imizu.toyama.jp +inami.toyama.jp +johana.toyama.jp +kamiichi.toyama.jp +kurobe.toyama.jp +nakaniikawa.toyama.jp +namerikawa.toyama.jp +nanto.toyama.jp +nyuzen.toyama.jp +oyabe.toyama.jp +taira.toyama.jp +takaoka.toyama.jp +tateyama.toyama.jp +toga.toyama.jp +tonami.toyama.jp +toyama.toyama.jp +unazuki.toyama.jp +uozu.toyama.jp +yamada.toyama.jp +arida.wakayama.jp +aridagawa.wakayama.jp +gobo.wakayama.jp +hashimoto.wakayama.jp +hidaka.wakayama.jp +hirogawa.wakayama.jp +inami.wakayama.jp +iwade.wakayama.jp +kainan.wakayama.jp +kamitonda.wakayama.jp +katsuragi.wakayama.jp +kimino.wakayama.jp +kinokawa.wakayama.jp +kitayama.wakayama.jp +koya.wakayama.jp +koza.wakayama.jp +kozagawa.wakayama.jp +kudoyama.wakayama.jp +kushimoto.wakayama.jp +mihama.wakayama.jp +misato.wakayama.jp +nachikatsuura.wakayama.jp +shingu.wakayama.jp +shirahama.wakayama.jp +taiji.wakayama.jp +tanabe.wakayama.jp +wakayama.wakayama.jp +yuasa.wakayama.jp +yura.wakayama.jp +asahi.yamagata.jp +funagata.yamagata.jp +higashine.yamagata.jp +iide.yamagata.jp +kahoku.yamagata.jp +kaminoyama.yamagata.jp +kaneyama.yamagata.jp +kawanishi.yamagata.jp +mamurogawa.yamagata.jp +mikawa.yamagata.jp +murayama.yamagata.jp +nagai.yamagata.jp +nakayama.yamagata.jp +nanyo.yamagata.jp +nishikawa.yamagata.jp +obanazawa.yamagata.jp +oe.yamagata.jp +oguni.yamagata.jp +ohkura.yamagata.jp +oishida.yamagata.jp +sagae.yamagata.jp +sakata.yamagata.jp +sakegawa.yamagata.jp +shinjo.yamagata.jp +shirataka.yamagata.jp +shonai.yamagata.jp +takahata.yamagata.jp +tendo.yamagata.jp +tozawa.yamagata.jp +tsuruoka.yamagata.jp +yamagata.yamagata.jp +yamanobe.yamagata.jp +yonezawa.yamagata.jp +yuza.yamagata.jp +abu.yamaguchi.jp +hagi.yamaguchi.jp +hikari.yamaguchi.jp +hofu.yamaguchi.jp +iwakuni.yamaguchi.jp +kudamatsu.yamaguchi.jp +mitou.yamaguchi.jp +nagato.yamaguchi.jp +oshima.yamaguchi.jp +shimonoseki.yamaguchi.jp +shunan.yamaguchi.jp +tabuse.yamaguchi.jp +tokuyama.yamaguchi.jp +toyota.yamaguchi.jp +ube.yamaguchi.jp +yuu.yamaguchi.jp +chuo.yamanashi.jp +doshi.yamanashi.jp +fuefuki.yamanashi.jp +fujikawa.yamanashi.jp +fujikawaguchiko.yamanashi.jp +fujiyoshida.yamanashi.jp +hayakawa.yamanashi.jp +hokuto.yamanashi.jp +ichikawamisato.yamanashi.jp +kai.yamanashi.jp +kofu.yamanashi.jp +koshu.yamanashi.jp +kosuge.yamanashi.jp +minami-alps.yamanashi.jp +minobu.yamanashi.jp +nakamichi.yamanashi.jp +nanbu.yamanashi.jp +narusawa.yamanashi.jp +nirasaki.yamanashi.jp +nishikatsura.yamanashi.jp +oshino.yamanashi.jp +otsuki.yamanashi.jp +showa.yamanashi.jp +tabayama.yamanashi.jp +tsuru.yamanashi.jp +uenohara.yamanashi.jp +yamanakako.yamanashi.jp +yamanashi.yamanashi.jp + +// ke : http://www.kenic.or.ke/index.php/en/ke-domains/ke-domains +ke +ac.ke +co.ke +go.ke +info.ke +me.ke +mobi.ke +ne.ke +or.ke +sc.ke + +// kg : http://www.domain.kg/dmn_n.html +kg +org.kg +net.kg +com.kg +edu.kg +gov.kg +mil.kg + +// kh : http://www.mptc.gov.kh/dns_registration.htm +*.kh + +// ki : http://www.ki/dns/index.html +ki +edu.ki +biz.ki +net.ki +org.ki +gov.ki +info.ki +com.ki + +// km : https://en.wikipedia.org/wiki/.km +// http://www.domaine.km/documents/charte.doc +km +org.km +nom.km +gov.km +prd.km +tm.km +edu.km +mil.km +ass.km +com.km +// These are only mentioned as proposed suggestions at domaine.km, but +// https://en.wikipedia.org/wiki/.km says they're available for registration: +coop.km +asso.km +presse.km +medecin.km +notaires.km +pharmaciens.km +veterinaire.km +gouv.km + +// kn : https://en.wikipedia.org/wiki/.kn +// http://www.dot.kn/domainRules.html +kn +net.kn +org.kn +edu.kn +gov.kn + +// kp : http://www.kcce.kp/en_index.php +kp +com.kp +edu.kp +gov.kp +org.kp +rep.kp +tra.kp + +// kr : https://en.wikipedia.org/wiki/.kr +// see also: http://domain.nida.or.kr/eng/registration.jsp +kr +ac.kr +co.kr +es.kr +go.kr +hs.kr +kg.kr +mil.kr +ms.kr +ne.kr +or.kr +pe.kr +re.kr +sc.kr +// kr geographical names +busan.kr +chungbuk.kr +chungnam.kr +daegu.kr +daejeon.kr +gangwon.kr +gwangju.kr +gyeongbuk.kr +gyeonggi.kr +gyeongnam.kr +incheon.kr +jeju.kr +jeonbuk.kr +jeonnam.kr +seoul.kr +ulsan.kr + +// kw : https://www.nic.kw/policies/ +// Confirmed by registry +kw +com.kw +edu.kw +emb.kw +gov.kw +ind.kw +net.kw +org.kw + +// ky : http://www.icta.ky/da_ky_reg_dom.php +// Confirmed by registry 2008-06-17 +ky +edu.ky +gov.ky +com.ky +org.ky +net.ky + +// kz : https://en.wikipedia.org/wiki/.kz +// see also: http://www.nic.kz/rules/index.jsp +kz +org.kz +edu.kz +net.kz +gov.kz +mil.kz +com.kz + +// la : https://en.wikipedia.org/wiki/.la +// Submitted by registry +la +int.la +net.la +info.la +edu.la +gov.la +per.la +com.la +org.la + +// lb : https://en.wikipedia.org/wiki/.lb +// Submitted by registry +lb +com.lb +edu.lb +gov.lb +net.lb +org.lb + +// lc : https://en.wikipedia.org/wiki/.lc +// see also: http://www.nic.lc/rules.htm +lc +com.lc +net.lc +co.lc +org.lc +edu.lc +gov.lc + +// li : https://en.wikipedia.org/wiki/.li +li + +// lk : https://www.nic.lk/index.php/domain-registration/lk-domain-naming-structure +lk +gov.lk +sch.lk +net.lk +int.lk +com.lk +org.lk +edu.lk +ngo.lk +soc.lk +web.lk +ltd.lk +assn.lk +grp.lk +hotel.lk +ac.lk + +// lr : http://psg.com/dns/lr/lr.txt +// Submitted by registry +lr +com.lr +edu.lr +gov.lr +org.lr +net.lr + +// ls : http://www.nic.ls/ +// Confirmed by registry +ls +ac.ls +biz.ls +co.ls +edu.ls +gov.ls +info.ls +net.ls +org.ls +sc.ls + +// lt : https://en.wikipedia.org/wiki/.lt +lt +// gov.lt : http://www.gov.lt/index_en.php +gov.lt + +// lu : http://www.dns.lu/en/ +lu + +// lv : http://www.nic.lv/DNS/En/generic.php +lv +com.lv +edu.lv +gov.lv +org.lv +mil.lv +id.lv +net.lv +asn.lv +conf.lv + +// ly : http://www.nic.ly/regulations.php +ly +com.ly +net.ly +gov.ly +plc.ly +edu.ly +sch.ly +med.ly +org.ly +id.ly + +// ma : https://en.wikipedia.org/wiki/.ma +// http://www.anrt.ma/fr/admin/download/upload/file_fr782.pdf +ma +co.ma +net.ma +gov.ma +org.ma +ac.ma +press.ma + +// mc : http://www.nic.mc/ +mc +tm.mc +asso.mc + +// md : https://en.wikipedia.org/wiki/.md +md + +// me : https://en.wikipedia.org/wiki/.me +me +co.me +net.me +org.me +edu.me +ac.me +gov.me +its.me +priv.me + +// mg : http://nic.mg/nicmg/?page_id=39 +mg +org.mg +nom.mg +gov.mg +prd.mg +tm.mg +edu.mg +mil.mg +com.mg +co.mg + +// mh : https://en.wikipedia.org/wiki/.mh +mh + +// mil : https://en.wikipedia.org/wiki/.mil +mil + +// mk : https://en.wikipedia.org/wiki/.mk +// see also: http://dns.marnet.net.mk/postapka.php +mk +com.mk +org.mk +net.mk +edu.mk +gov.mk +inf.mk +name.mk + +// ml : http://www.gobin.info/domainname/ml-template.doc +// see also: https://en.wikipedia.org/wiki/.ml +ml +com.ml +edu.ml +gouv.ml +gov.ml +net.ml +org.ml +presse.ml + +// mm : https://en.wikipedia.org/wiki/.mm +*.mm + +// mn : https://en.wikipedia.org/wiki/.mn +mn +gov.mn +edu.mn +org.mn + +// mo : http://www.monic.net.mo/ +mo +com.mo +net.mo +org.mo +edu.mo +gov.mo + +// mobi : https://en.wikipedia.org/wiki/.mobi +mobi + +// mp : http://www.dot.mp/ +// Confirmed by registry 2008-06-17 +mp + +// mq : https://en.wikipedia.org/wiki/.mq +mq + +// mr : https://en.wikipedia.org/wiki/.mr +mr +gov.mr + +// ms : http://www.nic.ms/pdf/MS_Domain_Name_Rules.pdf +ms +com.ms +edu.ms +gov.ms +net.ms +org.ms + +// mt : https://www.nic.org.mt/go/policy +// Submitted by registry +mt +com.mt +edu.mt +net.mt +org.mt + +// mu : https://en.wikipedia.org/wiki/.mu +mu +com.mu +net.mu +org.mu +gov.mu +ac.mu +co.mu +or.mu + +// museum : http://about.museum/naming/ +// http://index.museum/ +museum +academy.museum +agriculture.museum +air.museum +airguard.museum +alabama.museum +alaska.museum +amber.museum +ambulance.museum +american.museum +americana.museum +americanantiques.museum +americanart.museum +amsterdam.museum +and.museum +annefrank.museum +anthro.museum +anthropology.museum +antiques.museum +aquarium.museum +arboretum.museum +archaeological.museum +archaeology.museum +architecture.museum +art.museum +artanddesign.museum +artcenter.museum +artdeco.museum +arteducation.museum +artgallery.museum +arts.museum +artsandcrafts.museum +asmatart.museum +assassination.museum +assisi.museum +association.museum +astronomy.museum +atlanta.museum +austin.museum +australia.museum +automotive.museum +aviation.museum +axis.museum +badajoz.museum +baghdad.museum +bahn.museum +bale.museum +baltimore.museum +barcelona.museum +baseball.museum +basel.museum +baths.museum +bauern.museum +beauxarts.museum +beeldengeluid.museum +bellevue.museum +bergbau.museum +berkeley.museum +berlin.museum +bern.museum +bible.museum +bilbao.museum +bill.museum +birdart.museum +birthplace.museum +bonn.museum +boston.museum +botanical.museum +botanicalgarden.museum +botanicgarden.museum +botany.museum +brandywinevalley.museum +brasil.museum +bristol.museum +british.museum +britishcolumbia.museum +broadcast.museum +brunel.museum +brussel.museum +brussels.museum +bruxelles.museum +building.museum +burghof.museum +bus.museum +bushey.museum +cadaques.museum +california.museum +cambridge.museum +can.museum +canada.museum +capebreton.museum +carrier.museum +cartoonart.museum +casadelamoneda.museum +castle.museum +castres.museum +celtic.museum +center.museum +chattanooga.museum +cheltenham.museum +chesapeakebay.museum +chicago.museum +children.museum +childrens.museum +childrensgarden.museum +chiropractic.museum +chocolate.museum +christiansburg.museum +cincinnati.museum +cinema.museum +circus.museum +civilisation.museum +civilization.museum +civilwar.museum +clinton.museum +clock.museum +coal.museum +coastaldefence.museum +cody.museum +coldwar.museum +collection.museum +colonialwilliamsburg.museum +coloradoplateau.museum +columbia.museum +columbus.museum +communication.museum +communications.museum +community.museum +computer.museum +computerhistory.museum +comunicações.museum +contemporary.museum +contemporaryart.museum +convent.museum +copenhagen.museum +corporation.museum +correios-e-telecomunicações.museum +corvette.museum +costume.museum +countryestate.museum +county.museum +crafts.museum +cranbrook.museum +creation.museum +cultural.museum +culturalcenter.museum +culture.museum +cyber.museum +cymru.museum +dali.museum +dallas.museum +database.museum +ddr.museum +decorativearts.museum +delaware.museum +delmenhorst.museum +denmark.museum +depot.museum +design.museum +detroit.museum +dinosaur.museum +discovery.museum +dolls.museum +donostia.museum +durham.museum +eastafrica.museum +eastcoast.museum +education.museum +educational.museum +egyptian.museum +eisenbahn.museum +elburg.museum +elvendrell.museum +embroidery.museum +encyclopedic.museum +england.museum +entomology.museum +environment.museum +environmentalconservation.museum +epilepsy.museum +essex.museum +estate.museum +ethnology.museum +exeter.museum +exhibition.museum +family.museum +farm.museum +farmequipment.museum +farmers.museum +farmstead.museum +field.museum +figueres.museum +filatelia.museum +film.museum +fineart.museum +finearts.museum +finland.museum +flanders.museum +florida.museum +force.museum +fortmissoula.museum +fortworth.museum +foundation.museum +francaise.museum +frankfurt.museum +franziskaner.museum +freemasonry.museum +freiburg.museum +fribourg.museum +frog.museum +fundacio.museum +furniture.museum +gallery.museum +garden.museum +gateway.museum +geelvinck.museum +gemological.museum +geology.museum +georgia.museum +giessen.museum +glas.museum +glass.museum +gorge.museum +grandrapids.museum +graz.museum +guernsey.museum +halloffame.museum +hamburg.museum +handson.museum +harvestcelebration.museum +hawaii.museum +health.museum +heimatunduhren.museum +hellas.museum +helsinki.museum +hembygdsforbund.museum +heritage.museum +histoire.museum +historical.museum +historicalsociety.museum +historichouses.museum +historisch.museum +historisches.museum +history.museum +historyofscience.museum +horology.museum +house.museum +humanities.museum +illustration.museum +imageandsound.museum +indian.museum +indiana.museum +indianapolis.museum +indianmarket.museum +intelligence.museum +interactive.museum +iraq.museum +iron.museum +isleofman.museum +jamison.museum +jefferson.museum +jerusalem.museum +jewelry.museum +jewish.museum +jewishart.museum +jfk.museum +journalism.museum +judaica.museum +judygarland.museum +juedisches.museum +juif.museum +karate.museum +karikatur.museum +kids.museum +koebenhavn.museum +koeln.museum +kunst.museum +kunstsammlung.museum +kunstunddesign.museum +labor.museum +labour.museum +lajolla.museum +lancashire.museum +landes.museum +lans.museum +läns.museum +larsson.museum +lewismiller.museum +lincoln.museum +linz.museum +living.museum +livinghistory.museum +localhistory.museum +london.museum +losangeles.museum +louvre.museum +loyalist.museum +lucerne.museum +luxembourg.museum +luzern.museum +mad.museum +madrid.museum +mallorca.museum +manchester.museum +mansion.museum +mansions.museum +manx.museum +marburg.museum +maritime.museum +maritimo.museum +maryland.museum +marylhurst.museum +media.museum +medical.museum +medizinhistorisches.museum +meeres.museum +memorial.museum +mesaverde.museum +michigan.museum +midatlantic.museum +military.museum +mill.museum +miners.museum +mining.museum +minnesota.museum +missile.museum +missoula.museum +modern.museum +moma.museum +money.museum +monmouth.museum +monticello.museum +montreal.museum +moscow.museum +motorcycle.museum +muenchen.museum +muenster.museum +mulhouse.museum +muncie.museum +museet.museum +museumcenter.museum +museumvereniging.museum +music.museum +national.museum +nationalfirearms.museum +nationalheritage.museum +nativeamerican.museum +naturalhistory.museum +naturalhistorymuseum.museum +naturalsciences.museum +nature.museum +naturhistorisches.museum +natuurwetenschappen.museum +naumburg.museum +naval.museum +nebraska.museum +neues.museum +newhampshire.museum +newjersey.museum +newmexico.museum +newport.museum +newspaper.museum +newyork.museum +niepce.museum +norfolk.museum +north.museum +nrw.museum +nyc.museum +nyny.museum +oceanographic.museum +oceanographique.museum +omaha.museum +online.museum +ontario.museum +openair.museum +oregon.museum +oregontrail.museum +otago.museum +oxford.museum +pacific.museum +paderborn.museum +palace.museum +paleo.museum +palmsprings.museum +panama.museum +paris.museum +pasadena.museum +pharmacy.museum +philadelphia.museum +philadelphiaarea.museum +philately.museum +phoenix.museum +photography.museum +pilots.museum +pittsburgh.museum +planetarium.museum +plantation.museum +plants.museum +plaza.museum +portal.museum +portland.museum +portlligat.museum +posts-and-telecommunications.museum +preservation.museum +presidio.museum +press.museum +project.museum +public.museum +pubol.museum +quebec.museum +railroad.museum +railway.museum +research.museum +resistance.museum +riodejaneiro.museum +rochester.museum +rockart.museum +roma.museum +russia.museum +saintlouis.museum +salem.museum +salvadordali.museum +salzburg.museum +sandiego.museum +sanfrancisco.museum +santabarbara.museum +santacruz.museum +santafe.museum +saskatchewan.museum +satx.museum +savannahga.museum +schlesisches.museum +schoenbrunn.museum +schokoladen.museum +school.museum +schweiz.museum +science.museum +scienceandhistory.museum +scienceandindustry.museum +sciencecenter.museum +sciencecenters.museum +science-fiction.museum +sciencehistory.museum +sciences.museum +sciencesnaturelles.museum +scotland.museum +seaport.museum +settlement.museum +settlers.museum +shell.museum +sherbrooke.museum +sibenik.museum +silk.museum +ski.museum +skole.museum +society.museum +sologne.museum +soundandvision.museum +southcarolina.museum +southwest.museum +space.museum +spy.museum +square.museum +stadt.museum +stalbans.museum +starnberg.museum +state.museum +stateofdelaware.museum +station.museum +steam.museum +steiermark.museum +stjohn.museum +stockholm.museum +stpetersburg.museum +stuttgart.museum +suisse.museum +surgeonshall.museum +surrey.museum +svizzera.museum +sweden.museum +sydney.museum +tank.museum +tcm.museum +technology.museum +telekommunikation.museum +television.museum +texas.museum +textile.museum +theater.museum +time.museum +timekeeping.museum +topology.museum +torino.museum +touch.museum +town.museum +transport.museum +tree.museum +trolley.museum +trust.museum +trustee.museum +uhren.museum +ulm.museum +undersea.museum +university.museum +usa.museum +usantiques.museum +usarts.museum +uscountryestate.museum +usculture.museum +usdecorativearts.museum +usgarden.museum +ushistory.museum +ushuaia.museum +uslivinghistory.museum +utah.museum +uvic.museum +valley.museum +vantaa.museum +versailles.museum +viking.museum +village.museum +virginia.museum +virtual.museum +virtuel.museum +vlaanderen.museum +volkenkunde.museum +wales.museum +wallonie.museum +war.museum +washingtondc.museum +watchandclock.museum +watch-and-clock.museum +western.museum +westfalen.museum +whaling.museum +wildlife.museum +williamsburg.museum +windmill.museum +workshop.museum +york.museum +yorkshire.museum +yosemite.museum +youth.museum +zoological.museum +zoology.museum +ירושלים.museum +иком.museum + +// mv : https://en.wikipedia.org/wiki/.mv +// "mv" included because, contra Wikipedia, google.mv exists. +mv +aero.mv +biz.mv +com.mv +coop.mv +edu.mv +gov.mv +info.mv +int.mv +mil.mv +museum.mv +name.mv +net.mv +org.mv +pro.mv + +// mw : http://www.registrar.mw/ +mw +ac.mw +biz.mw +co.mw +com.mw +coop.mw +edu.mw +gov.mw +int.mw +museum.mw +net.mw +org.mw + +// mx : http://www.nic.mx/ +// Submitted by registry +mx +com.mx +org.mx +gob.mx +edu.mx +net.mx + +// my : http://www.mynic.net.my/ +my +com.my +net.my +org.my +gov.my +edu.my +mil.my +name.my + +// mz : http://www.uem.mz/ +// Submitted by registry +mz +ac.mz +adv.mz +co.mz +edu.mz +gov.mz +mil.mz +net.mz +org.mz + +// na : http://www.na-nic.com.na/ +// http://www.info.na/domain/ +na +info.na +pro.na +name.na +school.na +or.na +dr.na +us.na +mx.na +ca.na +in.na +cc.na +tv.na +ws.na +mobi.na +co.na +com.na +org.na + +// name : has 2nd-level tlds, but there's no list of them +name + +// nc : http://www.cctld.nc/ +nc +asso.nc +nom.nc + +// ne : https://en.wikipedia.org/wiki/.ne +ne + +// net : https://en.wikipedia.org/wiki/.net +net + +// nf : https://en.wikipedia.org/wiki/.nf +nf +com.nf +net.nf +per.nf +rec.nf +web.nf +arts.nf +firm.nf +info.nf +other.nf +store.nf + +// ng : http://www.nira.org.ng/index.php/join-us/register-ng-domain/189-nira-slds +ng +com.ng +edu.ng +gov.ng +i.ng +mil.ng +mobi.ng +name.ng +net.ng +org.ng +sch.ng + +// ni : http://www.nic.ni/ +ni +ac.ni +biz.ni +co.ni +com.ni +edu.ni +gob.ni +in.ni +info.ni +int.ni +mil.ni +net.ni +nom.ni +org.ni +web.ni + +// nl : https://en.wikipedia.org/wiki/.nl +// https://www.sidn.nl/ +// ccTLD for the Netherlands +nl + +// no : http://www.norid.no/regelverk/index.en.html +// The Norwegian registry has declined to notify us of updates. The web pages +// referenced below are the official source of the data. There is also an +// announce mailing list: +// https://postlister.uninett.no/sympa/info/norid-diskusjon +no +// Norid generic domains : http://www.norid.no/regelverk/vedlegg-c.en.html +fhs.no +vgs.no +fylkesbibl.no +folkebibl.no +museum.no +idrett.no +priv.no +// Non-Norid generic domains : http://www.norid.no/regelverk/vedlegg-d.en.html +mil.no +stat.no +dep.no +kommune.no +herad.no +// no geographical names : http://www.norid.no/regelverk/vedlegg-b.en.html +// counties +aa.no +ah.no +bu.no +fm.no +hl.no +hm.no +jan-mayen.no +mr.no +nl.no +nt.no +of.no +ol.no +oslo.no +rl.no +sf.no +st.no +svalbard.no +tm.no +tr.no +va.no +vf.no +// primary and lower secondary schools per county +gs.aa.no +gs.ah.no +gs.bu.no +gs.fm.no +gs.hl.no +gs.hm.no +gs.jan-mayen.no +gs.mr.no +gs.nl.no +gs.nt.no +gs.of.no +gs.ol.no +gs.oslo.no +gs.rl.no +gs.sf.no +gs.st.no +gs.svalbard.no +gs.tm.no +gs.tr.no +gs.va.no +gs.vf.no +// cities +akrehamn.no +åkrehamn.no +algard.no +ålgård.no +arna.no +brumunddal.no +bryne.no +bronnoysund.no +brønnøysund.no +drobak.no +drøbak.no +egersund.no +fetsund.no +floro.no +florø.no +fredrikstad.no +hokksund.no +honefoss.no +hønefoss.no +jessheim.no +jorpeland.no +jørpeland.no +kirkenes.no +kopervik.no +krokstadelva.no +langevag.no +langevåg.no +leirvik.no +mjondalen.no +mjøndalen.no +mo-i-rana.no +mosjoen.no +mosjøen.no +nesoddtangen.no +orkanger.no +osoyro.no +osøyro.no +raholt.no +råholt.no +sandnessjoen.no +sandnessjøen.no +skedsmokorset.no +slattum.no +spjelkavik.no +stathelle.no +stavern.no +stjordalshalsen.no +stjørdalshalsen.no +tananger.no +tranby.no +vossevangen.no +// communities +afjord.no +åfjord.no +agdenes.no +al.no +ål.no +alesund.no +ålesund.no +alstahaug.no +alta.no +áltá.no +alaheadju.no +álaheadju.no +alvdal.no +amli.no +åmli.no +amot.no +åmot.no +andebu.no +andoy.no +andøy.no +andasuolo.no +ardal.no +årdal.no +aremark.no +arendal.no +ås.no +aseral.no +åseral.no +asker.no +askim.no +askvoll.no +askoy.no +askøy.no +asnes.no +åsnes.no +audnedaln.no +aukra.no +aure.no +aurland.no +aurskog-holand.no +aurskog-høland.no +austevoll.no +austrheim.no +averoy.no +averøy.no +balestrand.no +ballangen.no +balat.no +bálát.no +balsfjord.no +bahccavuotna.no +báhccavuotna.no +bamble.no +bardu.no +beardu.no +beiarn.no +bajddar.no +bájddar.no +baidar.no +báidár.no +berg.no +bergen.no +berlevag.no +berlevåg.no +bearalvahki.no +bearalváhki.no +bindal.no +birkenes.no +bjarkoy.no +bjarkøy.no +bjerkreim.no +bjugn.no +bodo.no +bodø.no +badaddja.no +bådåddjå.no +budejju.no +bokn.no +bremanger.no +bronnoy.no +brønnøy.no +bygland.no +bykle.no +barum.no +bærum.no +bo.telemark.no +bø.telemark.no +bo.nordland.no +bø.nordland.no +bievat.no +bievát.no +bomlo.no +bømlo.no +batsfjord.no +båtsfjord.no +bahcavuotna.no +báhcavuotna.no +dovre.no +drammen.no +drangedal.no +dyroy.no +dyrøy.no +donna.no +dønna.no +eid.no +eidfjord.no +eidsberg.no +eidskog.no +eidsvoll.no +eigersund.no +elverum.no +enebakk.no +engerdal.no +etne.no +etnedal.no +evenes.no +evenassi.no +evenášši.no +evje-og-hornnes.no +farsund.no +fauske.no +fuossko.no +fuoisku.no +fedje.no +fet.no +finnoy.no +finnøy.no +fitjar.no +fjaler.no +fjell.no +flakstad.no +flatanger.no +flekkefjord.no +flesberg.no +flora.no +fla.no +flå.no +folldal.no +forsand.no +fosnes.no +frei.no +frogn.no +froland.no +frosta.no +frana.no +fræna.no +froya.no +frøya.no +fusa.no +fyresdal.no +forde.no +førde.no +gamvik.no +gangaviika.no +gáŋgaviika.no +gaular.no +gausdal.no +gildeskal.no +gildeskål.no +giske.no +gjemnes.no +gjerdrum.no +gjerstad.no +gjesdal.no +gjovik.no +gjøvik.no +gloppen.no +gol.no +gran.no +grane.no +granvin.no +gratangen.no +grimstad.no +grong.no +kraanghke.no +kråanghke.no +grue.no +gulen.no +hadsel.no +halden.no +halsa.no +hamar.no +hamaroy.no +habmer.no +hábmer.no +hapmir.no +hápmir.no +hammerfest.no +hammarfeasta.no +hámmárfeasta.no +haram.no +hareid.no +harstad.no +hasvik.no +aknoluokta.no +ákŋoluokta.no +hattfjelldal.no +aarborte.no +haugesund.no +hemne.no +hemnes.no +hemsedal.no +heroy.more-og-romsdal.no +herøy.møre-og-romsdal.no +heroy.nordland.no +herøy.nordland.no +hitra.no +hjartdal.no +hjelmeland.no +hobol.no +hobøl.no +hof.no +hol.no +hole.no +holmestrand.no +holtalen.no +holtålen.no +hornindal.no +horten.no +hurdal.no +hurum.no +hvaler.no +hyllestad.no +hagebostad.no +hægebostad.no +hoyanger.no +høyanger.no +hoylandet.no +høylandet.no +ha.no +hå.no +ibestad.no +inderoy.no +inderøy.no +iveland.no +jevnaker.no +jondal.no +jolster.no +jølster.no +karasjok.no +karasjohka.no +kárášjohka.no +karlsoy.no +galsa.no +gálsá.no +karmoy.no +karmøy.no +kautokeino.no +guovdageaidnu.no +klepp.no +klabu.no +klæbu.no +kongsberg.no +kongsvinger.no +kragero.no +kragerø.no +kristiansand.no +kristiansund.no +krodsherad.no +krødsherad.no +kvalsund.no +rahkkeravju.no +ráhkkerávju.no +kvam.no +kvinesdal.no +kvinnherad.no +kviteseid.no +kvitsoy.no +kvitsøy.no +kvafjord.no +kvæfjord.no +giehtavuoatna.no +kvanangen.no +kvænangen.no +navuotna.no +návuotna.no +kafjord.no +kåfjord.no +gaivuotna.no +gáivuotna.no +larvik.no +lavangen.no +lavagis.no +loabat.no +loabát.no +lebesby.no +davvesiida.no +leikanger.no +leirfjord.no +leka.no +leksvik.no +lenvik.no +leangaviika.no +leaŋgaviika.no +lesja.no +levanger.no +lier.no +lierne.no +lillehammer.no +lillesand.no +lindesnes.no +lindas.no +lindås.no +lom.no +loppa.no +lahppi.no +láhppi.no +lund.no +lunner.no +luroy.no +lurøy.no +luster.no +lyngdal.no +lyngen.no +ivgu.no +lardal.no +lerdal.no +lærdal.no +lodingen.no +lødingen.no +lorenskog.no +lørenskog.no +loten.no +løten.no +malvik.no +masoy.no +måsøy.no +muosat.no +muosát.no +mandal.no +marker.no +marnardal.no +masfjorden.no +meland.no +meldal.no +melhus.no +meloy.no +meløy.no +meraker.no +meråker.no +moareke.no +moåreke.no +midsund.no +midtre-gauldal.no +modalen.no +modum.no +molde.no +moskenes.no +moss.no +mosvik.no +malselv.no +målselv.no +malatvuopmi.no +málatvuopmi.no +namdalseid.no +aejrie.no +namsos.no +namsskogan.no +naamesjevuemie.no +nååmesjevuemie.no +laakesvuemie.no +nannestad.no +narvik.no +narviika.no +naustdal.no +nedre-eiker.no +nes.akershus.no +nes.buskerud.no +nesna.no +nesodden.no +nesseby.no +unjarga.no +unjárga.no +nesset.no +nissedal.no +nittedal.no +nord-aurdal.no +nord-fron.no +nord-odal.no +norddal.no +nordkapp.no +davvenjarga.no +davvenjárga.no +nordre-land.no +nordreisa.no +raisa.no +ráisa.no +nore-og-uvdal.no +notodden.no +naroy.no +nærøy.no +notteroy.no +nøtterøy.no +odda.no +oksnes.no +øksnes.no +oppdal.no +oppegard.no +oppegård.no +orkdal.no +orland.no +ørland.no +orskog.no +ørskog.no +orsta.no +ørsta.no +os.hedmark.no +os.hordaland.no +osen.no +osteroy.no +osterøy.no +ostre-toten.no +østre-toten.no +overhalla.no +ovre-eiker.no +øvre-eiker.no +oyer.no +øyer.no +oygarden.no +øygarden.no +oystre-slidre.no +øystre-slidre.no +porsanger.no +porsangu.no +porsáŋgu.no +porsgrunn.no +radoy.no +radøy.no +rakkestad.no +rana.no +ruovat.no +randaberg.no +rauma.no +rendalen.no +rennebu.no +rennesoy.no +rennesøy.no +rindal.no +ringebu.no +ringerike.no +ringsaker.no +rissa.no +risor.no +risør.no +roan.no +rollag.no +rygge.no +ralingen.no +rælingen.no +rodoy.no +rødøy.no +romskog.no +rømskog.no +roros.no +røros.no +rost.no +røst.no +royken.no +røyken.no +royrvik.no +røyrvik.no +rade.no +råde.no +salangen.no +siellak.no +saltdal.no +salat.no +sálát.no +sálat.no +samnanger.no +sande.more-og-romsdal.no +sande.møre-og-romsdal.no +sande.vestfold.no +sandefjord.no +sandnes.no +sandoy.no +sandøy.no +sarpsborg.no +sauda.no +sauherad.no +sel.no +selbu.no +selje.no +seljord.no +sigdal.no +siljan.no +sirdal.no +skaun.no +skedsmo.no +ski.no +skien.no +skiptvet.no +skjervoy.no +skjervøy.no +skierva.no +skiervá.no +skjak.no +skjåk.no +skodje.no +skanland.no +skånland.no +skanit.no +skánit.no +smola.no +smøla.no +snillfjord.no +snasa.no +snåsa.no +snoasa.no +snaase.no +snåase.no +sogndal.no +sokndal.no +sola.no +solund.no +songdalen.no +sortland.no +spydeberg.no +stange.no +stavanger.no +steigen.no +steinkjer.no +stjordal.no +stjørdal.no +stokke.no +stor-elvdal.no +stord.no +stordal.no +storfjord.no +omasvuotna.no +strand.no +stranda.no +stryn.no +sula.no +suldal.no +sund.no +sunndal.no +surnadal.no +sveio.no +svelvik.no +sykkylven.no +sogne.no +søgne.no +somna.no +sømna.no +sondre-land.no +søndre-land.no +sor-aurdal.no +sør-aurdal.no +sor-fron.no +sør-fron.no +sor-odal.no +sør-odal.no +sor-varanger.no +sør-varanger.no +matta-varjjat.no +mátta-várjjat.no +sorfold.no +sørfold.no +sorreisa.no +sørreisa.no +sorum.no +sørum.no +tana.no +deatnu.no +time.no +tingvoll.no +tinn.no +tjeldsund.no +dielddanuorri.no +tjome.no +tjøme.no +tokke.no +tolga.no +torsken.no +tranoy.no +tranøy.no +tromso.no +tromsø.no +tromsa.no +romsa.no +trondheim.no +troandin.no +trysil.no +trana.no +træna.no +trogstad.no +trøgstad.no +tvedestrand.no +tydal.no +tynset.no +tysfjord.no +divtasvuodna.no +divttasvuotna.no +tysnes.no +tysvar.no +tysvær.no +tonsberg.no +tønsberg.no +ullensaker.no +ullensvang.no +ulvik.no +utsira.no +vadso.no +vadsø.no +cahcesuolo.no +čáhcesuolo.no +vaksdal.no +valle.no +vang.no +vanylven.no +vardo.no +vardø.no +varggat.no +várggát.no +vefsn.no +vaapste.no +vega.no +vegarshei.no +vegårshei.no +vennesla.no +verdal.no +verran.no +vestby.no +vestnes.no +vestre-slidre.no +vestre-toten.no +vestvagoy.no +vestvågøy.no +vevelstad.no +vik.no +vikna.no +vindafjord.no +volda.no +voss.no +varoy.no +værøy.no +vagan.no +vågan.no +voagat.no +vagsoy.no +vågsøy.no +vaga.no +vågå.no +valer.ostfold.no +våler.østfold.no +valer.hedmark.no +våler.hedmark.no + +// np : http://www.mos.com.np/register.html +*.np + +// nr : http://cenpac.net.nr/dns/index.html +// Submitted by registry +nr +biz.nr +info.nr +gov.nr +edu.nr +org.nr +net.nr +com.nr + +// nu : https://en.wikipedia.org/wiki/.nu +nu + +// nz : https://en.wikipedia.org/wiki/.nz +// Submitted by registry +nz +ac.nz +co.nz +cri.nz +geek.nz +gen.nz +govt.nz +health.nz +iwi.nz +kiwi.nz +maori.nz +mil.nz +māori.nz +net.nz +org.nz +parliament.nz +school.nz + +// om : https://en.wikipedia.org/wiki/.om +om +co.om +com.om +edu.om +gov.om +med.om +museum.om +net.om +org.om +pro.om + +// onion : https://tools.ietf.org/html/rfc7686 +onion + +// org : https://en.wikipedia.org/wiki/.org +org + +// pa : http://www.nic.pa/ +// Some additional second level "domains" resolve directly as hostnames, such as +// pannet.pa, so we add a rule for "pa". +pa +ac.pa +gob.pa +com.pa +org.pa +sld.pa +edu.pa +net.pa +ing.pa +abo.pa +med.pa +nom.pa + +// pe : https://www.nic.pe/InformeFinalComision.pdf +pe +edu.pe +gob.pe +nom.pe +mil.pe +org.pe +com.pe +net.pe + +// pf : http://www.gobin.info/domainname/formulaire-pf.pdf +pf +com.pf +org.pf +edu.pf + +// pg : https://en.wikipedia.org/wiki/.pg +*.pg + +// ph : http://www.domains.ph/FAQ2.asp +// Submitted by registry +ph +com.ph +net.ph +org.ph +gov.ph +edu.ph +ngo.ph +mil.ph +i.ph + +// pk : http://pk5.pknic.net.pk/pk5/msgNamepk.PK +pk +com.pk +net.pk +edu.pk +org.pk +fam.pk +biz.pk +web.pk +gov.pk +gob.pk +gok.pk +gon.pk +gop.pk +gos.pk +info.pk + +// pl http://www.dns.pl/english/index.html +// Submitted by registry +pl +com.pl +net.pl +org.pl +// pl functional domains (http://www.dns.pl/english/index.html) +aid.pl +agro.pl +atm.pl +auto.pl +biz.pl +edu.pl +gmina.pl +gsm.pl +info.pl +mail.pl +miasta.pl +media.pl +mil.pl +nieruchomosci.pl +nom.pl +pc.pl +powiat.pl +priv.pl +realestate.pl +rel.pl +sex.pl +shop.pl +sklep.pl +sos.pl +szkola.pl +targi.pl +tm.pl +tourism.pl +travel.pl +turystyka.pl +// Government domains +gov.pl +ap.gov.pl +ic.gov.pl +is.gov.pl +us.gov.pl +kmpsp.gov.pl +kppsp.gov.pl +kwpsp.gov.pl +psp.gov.pl +wskr.gov.pl +kwp.gov.pl +mw.gov.pl +ug.gov.pl +um.gov.pl +umig.gov.pl +ugim.gov.pl +upow.gov.pl +uw.gov.pl +starostwo.gov.pl +pa.gov.pl +po.gov.pl +psse.gov.pl +pup.gov.pl +rzgw.gov.pl +sa.gov.pl +so.gov.pl +sr.gov.pl +wsa.gov.pl +sko.gov.pl +uzs.gov.pl +wiih.gov.pl +winb.gov.pl +pinb.gov.pl +wios.gov.pl +witd.gov.pl +wzmiuw.gov.pl +piw.gov.pl +wiw.gov.pl +griw.gov.pl +wif.gov.pl +oum.gov.pl +sdn.gov.pl +zp.gov.pl +uppo.gov.pl +mup.gov.pl +wuoz.gov.pl +konsulat.gov.pl +oirm.gov.pl +// pl regional domains (http://www.dns.pl/english/index.html) +augustow.pl +babia-gora.pl +bedzin.pl +beskidy.pl +bialowieza.pl +bialystok.pl +bielawa.pl +bieszczady.pl +boleslawiec.pl +bydgoszcz.pl +bytom.pl +cieszyn.pl +czeladz.pl +czest.pl +dlugoleka.pl +elblag.pl +elk.pl +glogow.pl +gniezno.pl +gorlice.pl +grajewo.pl +ilawa.pl +jaworzno.pl +jelenia-gora.pl +jgora.pl +kalisz.pl +kazimierz-dolny.pl +karpacz.pl +kartuzy.pl +kaszuby.pl +katowice.pl +kepno.pl +ketrzyn.pl +klodzko.pl +kobierzyce.pl +kolobrzeg.pl +konin.pl +konskowola.pl +kutno.pl +lapy.pl +lebork.pl +legnica.pl +lezajsk.pl +limanowa.pl +lomza.pl +lowicz.pl +lubin.pl +lukow.pl +malbork.pl +malopolska.pl +mazowsze.pl +mazury.pl +mielec.pl +mielno.pl +mragowo.pl +naklo.pl +nowaruda.pl +nysa.pl +olawa.pl +olecko.pl +olkusz.pl +olsztyn.pl +opoczno.pl +opole.pl +ostroda.pl +ostroleka.pl +ostrowiec.pl +ostrowwlkp.pl +pila.pl +pisz.pl +podhale.pl +podlasie.pl +polkowice.pl +pomorze.pl +pomorskie.pl +prochowice.pl +pruszkow.pl +przeworsk.pl +pulawy.pl +radom.pl +rawa-maz.pl +rybnik.pl +rzeszow.pl +sanok.pl +sejny.pl +slask.pl +slupsk.pl +sosnowiec.pl +stalowa-wola.pl +skoczow.pl +starachowice.pl +stargard.pl +suwalki.pl +swidnica.pl +swiebodzin.pl +swinoujscie.pl +szczecin.pl +szczytno.pl +tarnobrzeg.pl +tgory.pl +turek.pl +tychy.pl +ustka.pl +walbrzych.pl +warmia.pl +warszawa.pl +waw.pl +wegrow.pl +wielun.pl +wlocl.pl +wloclawek.pl +wodzislaw.pl +wolomin.pl +wroclaw.pl +zachpomor.pl +zagan.pl +zarow.pl +zgora.pl +zgorzelec.pl + +// pm : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +pm + +// pn : http://www.government.pn/PnRegistry/policies.htm +pn +gov.pn +co.pn +org.pn +edu.pn +net.pn + +// post : https://en.wikipedia.org/wiki/.post +post + +// pr : http://www.nic.pr/index.asp?f=1 +pr +com.pr +net.pr +org.pr +gov.pr +edu.pr +isla.pr +pro.pr +biz.pr +info.pr +name.pr +// these aren't mentioned on nic.pr, but on https://en.wikipedia.org/wiki/.pr +est.pr +prof.pr +ac.pr + +// pro : http://registry.pro/get-pro +pro +aaa.pro +aca.pro +acct.pro +avocat.pro +bar.pro +cpa.pro +eng.pro +jur.pro +law.pro +med.pro +recht.pro + +// ps : https://en.wikipedia.org/wiki/.ps +// http://www.nic.ps/registration/policy.html#reg +ps +edu.ps +gov.ps +sec.ps +plo.ps +com.ps +org.ps +net.ps + +// pt : http://online.dns.pt/dns/start_dns +pt +net.pt +gov.pt +org.pt +edu.pt +int.pt +publ.pt +com.pt +nome.pt + +// pw : https://en.wikipedia.org/wiki/.pw +pw +co.pw +ne.pw +or.pw +ed.pw +go.pw +belau.pw + +// py : http://www.nic.py/pautas.html#seccion_9 +// Submitted by registry +py +com.py +coop.py +edu.py +gov.py +mil.py +net.py +org.py + +// qa : http://domains.qa/en/ +qa +com.qa +edu.qa +gov.qa +mil.qa +name.qa +net.qa +org.qa +sch.qa + +// re : http://www.afnic.re/obtenir/chartes/nommage-re/annexe-descriptifs +re +asso.re +com.re +nom.re + +// ro : http://www.rotld.ro/ +ro +arts.ro +com.ro +firm.ro +info.ro +nom.ro +nt.ro +org.ro +rec.ro +store.ro +tm.ro +www.ro + +// rs : https://www.rnids.rs/en/domains/national-domains +rs +ac.rs +co.rs +edu.rs +gov.rs +in.rs +org.rs + +// ru : https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +ru + +// rw : https://www.ricta.org.rw/sites/default/files/resources/registry_registrar_contract_0.pdf +rw +ac.rw +co.rw +coop.rw +gov.rw +mil.rw +net.rw +org.rw + +// sa : http://www.nic.net.sa/ +sa +com.sa +net.sa +org.sa +gov.sa +med.sa +pub.sa +edu.sa +sch.sa + +// sb : http://www.sbnic.net.sb/ +// Submitted by registry +sb +com.sb +edu.sb +gov.sb +net.sb +org.sb + +// sc : http://www.nic.sc/ +sc +com.sc +gov.sc +net.sc +org.sc +edu.sc + +// sd : http://www.isoc.sd/sudanic.isoc.sd/billing_pricing.htm +// Submitted by registry +sd +com.sd +net.sd +org.sd +edu.sd +med.sd +tv.sd +gov.sd +info.sd + +// se : https://en.wikipedia.org/wiki/.se +// Submitted by registry +se +a.se +ac.se +b.se +bd.se +brand.se +c.se +d.se +e.se +f.se +fh.se +fhsk.se +fhv.se +g.se +h.se +i.se +k.se +komforb.se +kommunalforbund.se +komvux.se +l.se +lanbib.se +m.se +n.se +naturbruksgymn.se +o.se +org.se +p.se +parti.se +pp.se +press.se +r.se +s.se +t.se +tm.se +u.se +w.se +x.se +y.se +z.se + +// sg : http://www.nic.net.sg/page/registration-policies-procedures-and-guidelines +sg +com.sg +net.sg +org.sg +gov.sg +edu.sg +per.sg + +// sh : http://www.nic.sh/registrar.html +sh +com.sh +net.sh +gov.sh +org.sh +mil.sh + +// si : https://en.wikipedia.org/wiki/.si +si + +// sj : No registrations at this time. +// Submitted by registry +sj + +// sk : https://en.wikipedia.org/wiki/.sk +// list of 2nd level domains ? +sk + +// sl : http://www.nic.sl +// Submitted by registry +sl +com.sl +net.sl +edu.sl +gov.sl +org.sl + +// sm : https://en.wikipedia.org/wiki/.sm +sm + +// sn : https://en.wikipedia.org/wiki/.sn +sn +art.sn +com.sn +edu.sn +gouv.sn +org.sn +perso.sn +univ.sn + +// so : http://sonic.so/policies/ +so +com.so +edu.so +gov.so +me.so +net.so +org.so + +// sr : https://en.wikipedia.org/wiki/.sr +sr + +// ss : https://registry.nic.ss/ +// Submitted by registry +ss +biz.ss +com.ss +edu.ss +gov.ss +net.ss +org.ss + +// st : http://www.nic.st/html/policyrules/ +st +co.st +com.st +consulado.st +edu.st +embaixada.st +gov.st +mil.st +net.st +org.st +principe.st +saotome.st +store.st + +// su : https://en.wikipedia.org/wiki/.su +su + +// sv : http://www.svnet.org.sv/niveldos.pdf +sv +com.sv +edu.sv +gob.sv +org.sv +red.sv + +// sx : https://en.wikipedia.org/wiki/.sx +// Submitted by registry +sx +gov.sx + +// sy : https://en.wikipedia.org/wiki/.sy +// see also: http://www.gobin.info/domainname/sy.doc +sy +edu.sy +gov.sy +net.sy +mil.sy +com.sy +org.sy + +// sz : https://en.wikipedia.org/wiki/.sz +// http://www.sispa.org.sz/ +sz +co.sz +ac.sz +org.sz + +// tc : https://en.wikipedia.org/wiki/.tc +tc + +// td : https://en.wikipedia.org/wiki/.td +td + +// tel: https://en.wikipedia.org/wiki/.tel +// http://www.telnic.org/ +tel + +// tf : https://en.wikipedia.org/wiki/.tf +tf + +// tg : https://en.wikipedia.org/wiki/.tg +// http://www.nic.tg/ +tg + +// th : https://en.wikipedia.org/wiki/.th +// Submitted by registry +th +ac.th +co.th +go.th +in.th +mi.th +net.th +or.th + +// tj : http://www.nic.tj/policy.html +tj +ac.tj +biz.tj +co.tj +com.tj +edu.tj +go.tj +gov.tj +int.tj +mil.tj +name.tj +net.tj +nic.tj +org.tj +test.tj +web.tj + +// tk : https://en.wikipedia.org/wiki/.tk +tk + +// tl : https://en.wikipedia.org/wiki/.tl +tl +gov.tl + +// tm : http://www.nic.tm/local.html +tm +com.tm +co.tm +org.tm +net.tm +nom.tm +gov.tm +mil.tm +edu.tm + +// tn : https://en.wikipedia.org/wiki/.tn +// http://whois.ati.tn/ +tn +com.tn +ens.tn +fin.tn +gov.tn +ind.tn +intl.tn +nat.tn +net.tn +org.tn +info.tn +perso.tn +tourism.tn +edunet.tn +rnrt.tn +rns.tn +rnu.tn +mincom.tn +agrinet.tn +defense.tn +turen.tn + +// to : https://en.wikipedia.org/wiki/.to +// Submitted by registry +to +com.to +gov.to +net.to +org.to +edu.to +mil.to + +// tr : https://nic.tr/ +// https://nic.tr/forms/eng/policies.pdf +// https://nic.tr/index.php?USRACTN=PRICELST +tr +av.tr +bbs.tr +bel.tr +biz.tr +com.tr +dr.tr +edu.tr +gen.tr +gov.tr +info.tr +mil.tr +k12.tr +kep.tr +name.tr +net.tr +org.tr +pol.tr +tel.tr +tsk.tr +tv.tr +web.tr +// Used by Northern Cyprus +nc.tr +// Used by government agencies of Northern Cyprus +gov.nc.tr + +// tt : http://www.nic.tt/ +tt +co.tt +com.tt +org.tt +net.tt +biz.tt +info.tt +pro.tt +int.tt +coop.tt +jobs.tt +mobi.tt +travel.tt +museum.tt +aero.tt +name.tt +gov.tt +edu.tt + +// tv : https://en.wikipedia.org/wiki/.tv +// Not listing any 2LDs as reserved since none seem to exist in practice, +// Wikipedia notwithstanding. +tv + +// tw : https://en.wikipedia.org/wiki/.tw +tw +edu.tw +gov.tw +mil.tw +com.tw +net.tw +org.tw +idv.tw +game.tw +ebiz.tw +club.tw +網路.tw +組織.tw +商業.tw + +// tz : http://www.tznic.or.tz/index.php/domains +// Submitted by registry +tz +ac.tz +co.tz +go.tz +hotel.tz +info.tz +me.tz +mil.tz +mobi.tz +ne.tz +or.tz +sc.tz +tv.tz + +// ua : https://hostmaster.ua/policy/?ua +// Submitted by registry +ua +// ua 2LD +com.ua +edu.ua +gov.ua +in.ua +net.ua +org.ua +// ua geographic names +// https://hostmaster.ua/2ld/ +cherkassy.ua +cherkasy.ua +chernigov.ua +chernihiv.ua +chernivtsi.ua +chernovtsy.ua +ck.ua +cn.ua +cr.ua +crimea.ua +cv.ua +dn.ua +dnepropetrovsk.ua +dnipropetrovsk.ua +donetsk.ua +dp.ua +if.ua +ivano-frankivsk.ua +kh.ua +kharkiv.ua +kharkov.ua +kherson.ua +khmelnitskiy.ua +khmelnytskyi.ua +kiev.ua +kirovograd.ua +km.ua +kr.ua +krym.ua +ks.ua +kv.ua +kyiv.ua +lg.ua +lt.ua +lugansk.ua +lutsk.ua +lv.ua +lviv.ua +mk.ua +mykolaiv.ua +nikolaev.ua +od.ua +odesa.ua +odessa.ua +pl.ua +poltava.ua +rivne.ua +rovno.ua +rv.ua +sb.ua +sebastopol.ua +sevastopol.ua +sm.ua +sumy.ua +te.ua +ternopil.ua +uz.ua +uzhgorod.ua +vinnica.ua +vinnytsia.ua +vn.ua +volyn.ua +yalta.ua +zaporizhzhe.ua +zaporizhzhia.ua +zhitomir.ua +zhytomyr.ua +zp.ua +zt.ua + +// ug : https://www.registry.co.ug/ +ug +co.ug +or.ug +ac.ug +sc.ug +go.ug +ne.ug +com.ug +org.ug + +// uk : https://en.wikipedia.org/wiki/.uk +// Submitted by registry +uk +ac.uk +co.uk +gov.uk +ltd.uk +me.uk +net.uk +nhs.uk +org.uk +plc.uk +police.uk +*.sch.uk + +// us : https://en.wikipedia.org/wiki/.us +us +dni.us +fed.us +isa.us +kids.us +nsn.us +// us geographic names +ak.us +al.us +ar.us +as.us +az.us +ca.us +co.us +ct.us +dc.us +de.us +fl.us +ga.us +gu.us +hi.us +ia.us +id.us +il.us +in.us +ks.us +ky.us +la.us +ma.us +md.us +me.us +mi.us +mn.us +mo.us +ms.us +mt.us +nc.us +nd.us +ne.us +nh.us +nj.us +nm.us +nv.us +ny.us +oh.us +ok.us +or.us +pa.us +pr.us +ri.us +sc.us +sd.us +tn.us +tx.us +ut.us +vi.us +vt.us +va.us +wa.us +wi.us +wv.us +wy.us +// The registrar notes several more specific domains available in each state, +// such as state.*.us, dst.*.us, etc., but resolution of these is somewhat +// haphazard; in some states these domains resolve as addresses, while in others +// only subdomains are available, or even nothing at all. We include the +// most common ones where it's clear that different sites are different +// entities. +k12.ak.us +k12.al.us +k12.ar.us +k12.as.us +k12.az.us +k12.ca.us +k12.co.us +k12.ct.us +k12.dc.us +k12.de.us +k12.fl.us +k12.ga.us +k12.gu.us +// k12.hi.us Bug 614565 - Hawaii has a state-wide DOE login +k12.ia.us +k12.id.us +k12.il.us +k12.in.us +k12.ks.us +k12.ky.us +k12.la.us +k12.ma.us +k12.md.us +k12.me.us +k12.mi.us +k12.mn.us +k12.mo.us +k12.ms.us +k12.mt.us +k12.nc.us +// k12.nd.us Bug 1028347 - Removed at request of Travis Rosso +k12.ne.us +k12.nh.us +k12.nj.us +k12.nm.us +k12.nv.us +k12.ny.us +k12.oh.us +k12.ok.us +k12.or.us +k12.pa.us +k12.pr.us +// k12.ri.us Removed at request of Kim Cournoyer +k12.sc.us +// k12.sd.us Bug 934131 - Removed at request of James Booze +k12.tn.us +k12.tx.us +k12.ut.us +k12.vi.us +k12.vt.us +k12.va.us +k12.wa.us +k12.wi.us +// k12.wv.us Bug 947705 - Removed at request of Verne Britton +k12.wy.us +cc.ak.us +cc.al.us +cc.ar.us +cc.as.us +cc.az.us +cc.ca.us +cc.co.us +cc.ct.us +cc.dc.us +cc.de.us +cc.fl.us +cc.ga.us +cc.gu.us +cc.hi.us +cc.ia.us +cc.id.us +cc.il.us +cc.in.us +cc.ks.us +cc.ky.us +cc.la.us +cc.ma.us +cc.md.us +cc.me.us +cc.mi.us +cc.mn.us +cc.mo.us +cc.ms.us +cc.mt.us +cc.nc.us +cc.nd.us +cc.ne.us +cc.nh.us +cc.nj.us +cc.nm.us +cc.nv.us +cc.ny.us +cc.oh.us +cc.ok.us +cc.or.us +cc.pa.us +cc.pr.us +cc.ri.us +cc.sc.us +cc.sd.us +cc.tn.us +cc.tx.us +cc.ut.us +cc.vi.us +cc.vt.us +cc.va.us +cc.wa.us +cc.wi.us +cc.wv.us +cc.wy.us +lib.ak.us +lib.al.us +lib.ar.us +lib.as.us +lib.az.us +lib.ca.us +lib.co.us +lib.ct.us +lib.dc.us +// lib.de.us Issue #243 - Moved to Private section at request of Ed Moore +lib.fl.us +lib.ga.us +lib.gu.us +lib.hi.us +lib.ia.us +lib.id.us +lib.il.us +lib.in.us +lib.ks.us +lib.ky.us +lib.la.us +lib.ma.us +lib.md.us +lib.me.us +lib.mi.us +lib.mn.us +lib.mo.us +lib.ms.us +lib.mt.us +lib.nc.us +lib.nd.us +lib.ne.us +lib.nh.us +lib.nj.us +lib.nm.us +lib.nv.us +lib.ny.us +lib.oh.us +lib.ok.us +lib.or.us +lib.pa.us +lib.pr.us +lib.ri.us +lib.sc.us +lib.sd.us +lib.tn.us +lib.tx.us +lib.ut.us +lib.vi.us +lib.vt.us +lib.va.us +lib.wa.us +lib.wi.us +// lib.wv.us Bug 941670 - Removed at request of Larry W Arnold +lib.wy.us +// k12.ma.us contains school districts in Massachusetts. The 4LDs are +// managed independently except for private (PVT), charter (CHTR) and +// parochial (PAROCH) schools. Those are delegated directly to the +// 5LD operators. +pvt.k12.ma.us +chtr.k12.ma.us +paroch.k12.ma.us +// Merit Network, Inc. maintains the registry for =~ /(k12|cc|lib).mi.us/ and the following +// see also: http://domreg.merit.edu +// see also: whois -h whois.domreg.merit.edu help +ann-arbor.mi.us +cog.mi.us +dst.mi.us +eaton.mi.us +gen.mi.us +mus.mi.us +tec.mi.us +washtenaw.mi.us + +// uy : http://www.nic.org.uy/ +uy +com.uy +edu.uy +gub.uy +mil.uy +net.uy +org.uy + +// uz : http://www.reg.uz/ +uz +co.uz +com.uz +net.uz +org.uz + +// va : https://en.wikipedia.org/wiki/.va +va + +// vc : https://en.wikipedia.org/wiki/.vc +// Submitted by registry +vc +com.vc +net.vc +org.vc +gov.vc +mil.vc +edu.vc + +// ve : https://registro.nic.ve/ +// Submitted by registry +ve +arts.ve +co.ve +com.ve +e12.ve +edu.ve +firm.ve +gob.ve +gov.ve +info.ve +int.ve +mil.ve +net.ve +org.ve +rec.ve +store.ve +tec.ve +web.ve + +// vg : https://en.wikipedia.org/wiki/.vg +vg + +// vi : http://www.nic.vi/newdomainform.htm +// http://www.nic.vi/Domain_Rules/body_domain_rules.html indicates some other +// TLDs are "reserved", such as edu.vi and gov.vi, but doesn't actually say they +// are available for registration (which they do not seem to be). +vi +co.vi +com.vi +k12.vi +net.vi +org.vi + +// vn : https://www.dot.vn/vnnic/vnnic/domainregistration.jsp +vn +com.vn +net.vn +org.vn +edu.vn +gov.vn +int.vn +ac.vn +biz.vn +info.vn +name.vn +pro.vn +health.vn + +// vu : https://en.wikipedia.org/wiki/.vu +// http://www.vunic.vu/ +vu +com.vu +edu.vu +net.vu +org.vu + +// wf : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +wf + +// ws : https://en.wikipedia.org/wiki/.ws +// http://samoanic.ws/index.dhtml +ws +com.ws +net.ws +org.ws +gov.ws +edu.ws + +// yt : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +yt + +// IDN ccTLDs +// When submitting patches, please maintain a sort by ISO 3166 ccTLD, then +// U-label, and follow this format: +// // A-Label ("", [, variant info]) : +// // [sponsoring org] +// U-Label + +// xn--mgbaam7a8h ("Emerat", Arabic) : AE +// http://nic.ae/english/arabicdomain/rules.jsp +امارات + +// xn--y9a3aq ("hye", Armenian) : AM +// ISOC AM (operated by .am Registry) +հայ + +// xn--54b7fta0cc ("Bangla", Bangla) : BD +বাংলা + +// xn--90ae ("bg", Bulgarian) : BG +бг + +// xn--90ais ("bel", Belarusian/Russian Cyrillic) : BY +// Operated by .by registry +бел + +// xn--fiqs8s ("Zhongguo/China", Chinese, Simplified) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中国 + +// xn--fiqz9s ("Zhongguo/China", Chinese, Traditional) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中國 + +// xn--lgbbat1ad8j ("Algeria/Al Jazair", Arabic) : DZ +الجزائر + +// xn--wgbh1c ("Egypt/Masr", Arabic) : EG +// http://www.dotmasr.eg/ +مصر + +// xn--e1a4c ("eu", Cyrillic) : EU +// https://eurid.eu +ею + +// xn--qxa6a ("eu", Greek) : EU +// https://eurid.eu +ευ + +// xn--mgbah1a3hjkrd ("Mauritania", Arabic) : MR +موريتانيا + +// xn--node ("ge", Georgian Mkhedruli) : GE +გე + +// xn--qxam ("el", Greek) : GR +// Hellenic Ministry of Infrastructure, Transport, and Networks +ελ + +// xn--j6w193g ("Hong Kong", Chinese) : HK +// https://www.hkirc.hk +// Submitted by registry +// https://www.hkirc.hk/content.jsp?id=30#!/34 +香港 +公司.香港 +教育.香港 +政府.香港 +個人.香港 +網絡.香港 +組織.香港 + +// xn--2scrj9c ("Bharat", Kannada) : IN +// India +ಭಾರತ + +// xn--3hcrj9c ("Bharat", Oriya) : IN +// India +ଭାରତ + +// xn--45br5cyl ("Bharatam", Assamese) : IN +// India +ভাৰত + +// xn--h2breg3eve ("Bharatam", Sanskrit) : IN +// India +भारतम् + +// xn--h2brj9c8c ("Bharot", Santali) : IN +// India +भारोत + +// xn--mgbgu82a ("Bharat", Sindhi) : IN +// India +ڀارت + +// xn--rvc1e0am3e ("Bharatam", Malayalam) : IN +// India +ഭാരതം + +// xn--h2brj9c ("Bharat", Devanagari) : IN +// India +भारत + +// xn--mgbbh1a ("Bharat", Kashmiri) : IN +// India +بارت + +// xn--mgbbh1a71e ("Bharat", Arabic) : IN +// India +بھارت + +// xn--fpcrj9c3d ("Bharat", Telugu) : IN +// India +భారత్ + +// xn--gecrj9c ("Bharat", Gujarati) : IN +// India +ભારત + +// xn--s9brj9c ("Bharat", Gurmukhi) : IN +// India +ਭਾਰਤ + +// xn--45brj9c ("Bharat", Bengali) : IN +// India +ভারত + +// xn--xkc2dl3a5ee0h ("India", Tamil) : IN +// India +இந்தியா + +// xn--mgba3a4f16a ("Iran", Persian) : IR +ایران + +// xn--mgba3a4fra ("Iran", Arabic) : IR +ايران + +// xn--mgbtx2b ("Iraq", Arabic) : IQ +// Communications and Media Commission +عراق + +// xn--mgbayh7gpa ("al-Ordon", Arabic) : JO +// National Information Technology Center (NITC) +// Royal Scientific Society, Al-Jubeiha +الاردن + +// xn--3e0b707e ("Republic of Korea", Hangul) : KR +한국 + +// xn--80ao21a ("Kaz", Kazakh) : KZ +қаз + +// xn--fzc2c9e2c ("Lanka", Sinhalese-Sinhala) : LK +// https://nic.lk +ලංකා + +// xn--xkc2al3hye2a ("Ilangai", Tamil) : LK +// https://nic.lk +இலங்கை + +// xn--mgbc0a9azcg ("Morocco/al-Maghrib", Arabic) : MA +المغرب + +// xn--d1alf ("mkd", Macedonian) : MK +// MARnet +мкд + +// xn--l1acc ("mon", Mongolian) : MN +мон + +// xn--mix891f ("Macao", Chinese, Traditional) : MO +// MONIC / HNET Asia (Registry Operator for .mo) +澳門 + +// xn--mix082f ("Macao", Chinese, Simplified) : MO +澳门 + +// xn--mgbx4cd0ab ("Malaysia", Malay) : MY +مليسيا + +// xn--mgb9awbf ("Oman", Arabic) : OM +عمان + +// xn--mgbai9azgqp6j ("Pakistan", Urdu/Arabic) : PK +پاکستان + +// xn--mgbai9a5eva00b ("Pakistan", Urdu/Arabic, variant) : PK +پاكستان + +// xn--ygbi2ammx ("Falasteen", Arabic) : PS +// The Palestinian National Internet Naming Authority (PNINA) +// http://www.pnina.ps +فلسطين + +// xn--90a3ac ("srb", Cyrillic) : RS +// https://www.rnids.rs/en/domains/national-domains +срб +пр.срб +орг.срб +обр.срб +од.срб +упр.срб +ак.срб + +// xn--p1ai ("rf", Russian-Cyrillic) : RU +// https://cctld.ru/files/pdf/docs/en/rules_ru-rf.pdf +// Submitted by George Georgievsky +рф + +// xn--wgbl6a ("Qatar", Arabic) : QA +// http://www.ict.gov.qa/ +قطر + +// xn--mgberp4a5d4ar ("AlSaudiah", Arabic) : SA +// http://www.nic.net.sa/ +السعودية + +// xn--mgberp4a5d4a87g ("AlSaudiah", Arabic, variant) : SA +السعودیة + +// xn--mgbqly7c0a67fbc ("AlSaudiah", Arabic, variant) : SA +السعودیۃ + +// xn--mgbqly7cvafr ("AlSaudiah", Arabic, variant) : SA +السعوديه + +// xn--mgbpl2fh ("sudan", Arabic) : SD +// Operated by .sd registry +سودان + +// xn--yfro4i67o Singapore ("Singapore", Chinese) : SG +新加坡 + +// xn--clchc0ea0b2g2a9gcd ("Singapore", Tamil) : SG +சிங்கப்பூர் + +// xn--ogbpf8fl ("Syria", Arabic) : SY +سورية + +// xn--mgbtf8fl ("Syria", Arabic, variant) : SY +سوريا + +// xn--o3cw4h ("Thai", Thai) : TH +// http://www.thnic.co.th +ไทย +ศึกษา.ไทย +ธุรกิจ.ไทย +รัฐบาล.ไทย +ทหาร.ไทย +เน็ต.ไทย +องค์กร.ไทย + +// xn--pgbs0dh ("Tunisia", Arabic) : TN +// http://nic.tn +تونس + +// xn--kpry57d ("Taiwan", Chinese, Traditional) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台灣 + +// xn--kprw13d ("Taiwan", Chinese, Simplified) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台湾 + +// xn--nnx388a ("Taiwan", Chinese, variant) : TW +臺灣 + +// xn--j1amh ("ukr", Cyrillic) : UA +укр + +// xn--mgb2ddes ("AlYemen", Arabic) : YE +اليمن + +// xxx : http://icmregistry.com +xxx + +// ye : http://www.y.net.ye/services/domain_name.htm +*.ye + +// za : https://www.zadna.org.za/content/page/domain-information/ +ac.za +agric.za +alt.za +co.za +edu.za +gov.za +grondar.za +law.za +mil.za +net.za +ngo.za +nic.za +nis.za +nom.za +org.za +school.za +tm.za +web.za + +// zm : https://zicta.zm/ +// Submitted by registry +zm +ac.zm +biz.zm +co.zm +com.zm +edu.zm +gov.zm +info.zm +mil.zm +net.zm +org.zm +sch.zm + +// zw : https://www.potraz.gov.zw/ +// Confirmed by registry 2017-01-25 +zw +ac.zw +co.zw +gov.zw +mil.zw +org.zw + + +// newGTLDs + +// List of new gTLDs imported from https://www.icann.org/resources/registries/gtlds/v2/gtlds.json on 2020-08-07T17:16:50Z +// This list is auto-generated, don't edit it manually. +// aaa : 2015-02-26 American Automobile Association, Inc. +aaa + +// aarp : 2015-05-21 AARP +aarp + +// abarth : 2015-07-30 Fiat Chrysler Automobiles N.V. +abarth + +// abb : 2014-10-24 ABB Ltd +abb + +// abbott : 2014-07-24 Abbott Laboratories, Inc. +abbott + +// abbvie : 2015-07-30 AbbVie Inc. +abbvie + +// abc : 2015-07-30 Disney Enterprises, Inc. +abc + +// able : 2015-06-25 Able Inc. +able + +// abogado : 2014-04-24 Minds + Machines Group Limited +abogado + +// abudhabi : 2015-07-30 Abu Dhabi Systems and Information Centre +abudhabi + +// academy : 2013-11-07 Binky Moon, LLC +academy + +// accenture : 2014-08-15 Accenture plc +accenture + +// accountant : 2014-11-20 dot Accountant Limited +accountant + +// accountants : 2014-03-20 Binky Moon, LLC +accountants + +// aco : 2015-01-08 ACO Severin Ahlmann GmbH & Co. KG +aco + +// actor : 2013-12-12 Dog Beach, LLC +actor + +// adac : 2015-07-16 Allgemeiner Deutscher Automobil-Club e.V. (ADAC) +adac + +// ads : 2014-12-04 Charleston Road Registry Inc. +ads + +// adult : 2014-10-16 ICM Registry AD LLC +adult + +// aeg : 2015-03-19 Aktiebolaget Electrolux +aeg + +// aetna : 2015-05-21 Aetna Life Insurance Company +aetna + +// afamilycompany : 2015-07-23 Johnson Shareholdings, Inc. +afamilycompany + +// afl : 2014-10-02 Australian Football League +afl + +// africa : 2014-03-24 ZA Central Registry NPC trading as Registry.Africa +africa + +// agakhan : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +agakhan + +// agency : 2013-11-14 Binky Moon, LLC +agency + +// aig : 2014-12-18 American International Group, Inc. +aig + +// airbus : 2015-07-30 Airbus S.A.S. +airbus + +// airforce : 2014-03-06 Dog Beach, LLC +airforce + +// airtel : 2014-10-24 Bharti Airtel Limited +airtel + +// akdn : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +akdn + +// alfaromeo : 2015-07-31 Fiat Chrysler Automobiles N.V. +alfaromeo + +// alibaba : 2015-01-15 Alibaba Group Holding Limited +alibaba + +// alipay : 2015-01-15 Alibaba Group Holding Limited +alipay + +// allfinanz : 2014-07-03 Allfinanz Deutsche Vermögensberatung Aktiengesellschaft +allfinanz + +// allstate : 2015-07-31 Allstate Fire and Casualty Insurance Company +allstate + +// ally : 2015-06-18 Ally Financial Inc. +ally + +// alsace : 2014-07-02 Region Grand Est +alsace + +// alstom : 2015-07-30 ALSTOM +alstom + +// amazon : 2019-12-19 Amazon Registry Services, Inc. +amazon + +// americanexpress : 2015-07-31 American Express Travel Related Services Company, Inc. +americanexpress + +// americanfamily : 2015-07-23 AmFam, Inc. +americanfamily + +// amex : 2015-07-31 American Express Travel Related Services Company, Inc. +amex + +// amfam : 2015-07-23 AmFam, Inc. +amfam + +// amica : 2015-05-28 Amica Mutual Insurance Company +amica + +// amsterdam : 2014-07-24 Gemeente Amsterdam +amsterdam + +// analytics : 2014-12-18 Campus IP LLC +analytics + +// android : 2014-08-07 Charleston Road Registry Inc. +android + +// anquan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +anquan + +// anz : 2015-07-31 Australia and New Zealand Banking Group Limited +anz + +// aol : 2015-09-17 Oath Inc. +aol + +// apartments : 2014-12-11 Binky Moon, LLC +apartments + +// app : 2015-05-14 Charleston Road Registry Inc. +app + +// apple : 2015-05-14 Apple Inc. +apple + +// aquarelle : 2014-07-24 Aquarelle.com +aquarelle + +// arab : 2015-11-12 League of Arab States +arab + +// aramco : 2014-11-20 Aramco Services Company +aramco + +// archi : 2014-02-06 Afilias Limited +archi + +// army : 2014-03-06 Dog Beach, LLC +army + +// art : 2016-03-24 UK Creative Ideas Limited +art + +// arte : 2014-12-11 Association Relative à la Télévision Européenne G.E.I.E. +arte + +// asda : 2015-07-31 Wal-Mart Stores, Inc. +asda + +// associates : 2014-03-06 Binky Moon, LLC +associates + +// athleta : 2015-07-30 The Gap, Inc. +athleta + +// attorney : 2014-03-20 Dog Beach, LLC +attorney + +// auction : 2014-03-20 Dog Beach, LLC +auction + +// audi : 2015-05-21 AUDI Aktiengesellschaft +audi + +// audible : 2015-06-25 Amazon Registry Services, Inc. +audible + +// audio : 2014-03-20 UNR Corp. +audio + +// auspost : 2015-08-13 Australian Postal Corporation +auspost + +// author : 2014-12-18 Amazon Registry Services, Inc. +author + +// auto : 2014-11-13 XYZ.COM LLC +auto + +// autos : 2014-01-09 DERAutos, LLC +autos + +// avianca : 2015-01-08 Avianca Holdings S.A. +avianca + +// aws : 2015-06-25 Amazon Registry Services, Inc. +aws + +// axa : 2013-12-19 AXA SA +axa + +// azure : 2014-12-18 Microsoft Corporation +azure + +// baby : 2015-04-09 XYZ.COM LLC +baby + +// baidu : 2015-01-08 Baidu, Inc. +baidu + +// banamex : 2015-07-30 Citigroup Inc. +banamex + +// bananarepublic : 2015-07-31 The Gap, Inc. +bananarepublic + +// band : 2014-06-12 Dog Beach, LLC +band + +// bank : 2014-09-25 fTLD Registry Services LLC +bank + +// bar : 2013-12-12 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +bar + +// barcelona : 2014-07-24 Municipi de Barcelona +barcelona + +// barclaycard : 2014-11-20 Barclays Bank PLC +barclaycard + +// barclays : 2014-11-20 Barclays Bank PLC +barclays + +// barefoot : 2015-06-11 Gallo Vineyards, Inc. +barefoot + +// bargains : 2013-11-14 Binky Moon, LLC +bargains + +// baseball : 2015-10-29 MLB Advanced Media DH, LLC +baseball + +// basketball : 2015-08-20 Fédération Internationale de Basketball (FIBA) +basketball + +// bauhaus : 2014-04-17 Werkhaus GmbH +bauhaus + +// bayern : 2014-01-23 Bayern Connect GmbH +bayern + +// bbc : 2014-12-18 British Broadcasting Corporation +bbc + +// bbt : 2015-07-23 BB&T Corporation +bbt + +// bbva : 2014-10-02 BANCO BILBAO VIZCAYA ARGENTARIA, S.A. +bbva + +// bcg : 2015-04-02 The Boston Consulting Group, Inc. +bcg + +// bcn : 2014-07-24 Municipi de Barcelona +bcn + +// beats : 2015-05-14 Beats Electronics, LLC +beats + +// beauty : 2015-12-03 XYZ.COM LLC +beauty + +// beer : 2014-01-09 Minds + Machines Group Limited +beer + +// bentley : 2014-12-18 Bentley Motors Limited +bentley + +// berlin : 2013-10-31 dotBERLIN GmbH & Co. KG +berlin + +// best : 2013-12-19 BestTLD Pty Ltd +best + +// bestbuy : 2015-07-31 BBY Solutions, Inc. +bestbuy + +// bet : 2015-05-07 Afilias Limited +bet + +// bharti : 2014-01-09 Bharti Enterprises (Holding) Private Limited +bharti + +// bible : 2014-06-19 American Bible Society +bible + +// bid : 2013-12-19 dot Bid Limited +bid + +// bike : 2013-08-27 Binky Moon, LLC +bike + +// bing : 2014-12-18 Microsoft Corporation +bing + +// bingo : 2014-12-04 Binky Moon, LLC +bingo + +// bio : 2014-03-06 Afilias Limited +bio + +// black : 2014-01-16 Afilias Limited +black + +// blackfriday : 2014-01-16 UNR Corp. +blackfriday + +// blockbuster : 2015-07-30 Dish DBS Corporation +blockbuster + +// blog : 2015-05-14 Knock Knock WHOIS There, LLC +blog + +// bloomberg : 2014-07-17 Bloomberg IP Holdings LLC +bloomberg + +// blue : 2013-11-07 Afilias Limited +blue + +// bms : 2014-10-30 Bristol-Myers Squibb Company +bms + +// bmw : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +bmw + +// bnpparibas : 2014-05-29 BNP Paribas +bnpparibas + +// boats : 2014-12-04 DERBoats, LLC +boats + +// boehringer : 2015-07-09 Boehringer Ingelheim International GmbH +boehringer + +// bofa : 2015-07-31 Bank of America Corporation +bofa + +// bom : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +bom + +// bond : 2014-06-05 ShortDot SA +bond + +// boo : 2014-01-30 Charleston Road Registry Inc. +boo + +// book : 2015-08-27 Amazon Registry Services, Inc. +book + +// booking : 2015-07-16 Booking.com B.V. +booking + +// bosch : 2015-06-18 Robert Bosch GMBH +bosch + +// bostik : 2015-05-28 Bostik SA +bostik + +// boston : 2015-12-10 Boston TLD Management, LLC +boston + +// bot : 2014-12-18 Amazon Registry Services, Inc. +bot + +// boutique : 2013-11-14 Binky Moon, LLC +boutique + +// box : 2015-11-12 .BOX INC. +box + +// bradesco : 2014-12-18 Banco Bradesco S.A. +bradesco + +// bridgestone : 2014-12-18 Bridgestone Corporation +bridgestone + +// broadway : 2014-12-22 Celebrate Broadway, Inc. +broadway + +// broker : 2014-12-11 Dotbroker Registry Limited +broker + +// brother : 2015-01-29 Brother Industries, Ltd. +brother + +// brussels : 2014-02-06 DNS.be vzw +brussels + +// budapest : 2013-11-21 Minds + Machines Group Limited +budapest + +// bugatti : 2015-07-23 Bugatti International SA +bugatti + +// build : 2013-11-07 Plan Bee LLC +build + +// builders : 2013-11-07 Binky Moon, LLC +builders + +// business : 2013-11-07 Binky Moon, LLC +business + +// buy : 2014-12-18 Amazon Registry Services, Inc. +buy + +// buzz : 2013-10-02 DOTSTRATEGY CO. +buzz + +// bzh : 2014-02-27 Association www.bzh +bzh + +// cab : 2013-10-24 Binky Moon, LLC +cab + +// cafe : 2015-02-11 Binky Moon, LLC +cafe + +// cal : 2014-07-24 Charleston Road Registry Inc. +cal + +// call : 2014-12-18 Amazon Registry Services, Inc. +call + +// calvinklein : 2015-07-30 PVH gTLD Holdings LLC +calvinklein + +// cam : 2016-04-21 AC Webconnecting Holding B.V. +cam + +// camera : 2013-08-27 Binky Moon, LLC +camera + +// camp : 2013-11-07 Binky Moon, LLC +camp + +// cancerresearch : 2014-05-15 Australian Cancer Research Foundation +cancerresearch + +// canon : 2014-09-12 Canon Inc. +canon + +// capetown : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +capetown + +// capital : 2014-03-06 Binky Moon, LLC +capital + +// capitalone : 2015-08-06 Capital One Financial Corporation +capitalone + +// car : 2015-01-22 XYZ.COM LLC +car + +// caravan : 2013-12-12 Caravan International, Inc. +caravan + +// cards : 2013-12-05 Binky Moon, LLC +cards + +// care : 2014-03-06 Binky Moon, LLC +care + +// career : 2013-10-09 dotCareer LLC +career + +// careers : 2013-10-02 Binky Moon, LLC +careers + +// cars : 2014-11-13 XYZ.COM LLC +cars + +// casa : 2013-11-21 Minds + Machines Group Limited +casa + +// case : 2015-09-03 CNH Industrial N.V. +case + +// caseih : 2015-09-03 CNH Industrial N.V. +caseih + +// cash : 2014-03-06 Binky Moon, LLC +cash + +// casino : 2014-12-18 Binky Moon, LLC +casino + +// catering : 2013-12-05 Binky Moon, LLC +catering + +// catholic : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +catholic + +// cba : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +cba + +// cbn : 2014-08-22 The Christian Broadcasting Network, Inc. +cbn + +// cbre : 2015-07-02 CBRE, Inc. +cbre + +// cbs : 2015-08-06 CBS Domains Inc. +cbs + +// ceb : 2015-04-09 The Corporate Executive Board Company +ceb + +// center : 2013-11-07 Binky Moon, LLC +center + +// ceo : 2013-11-07 CEOTLD Pty Ltd +ceo + +// cern : 2014-06-05 European Organization for Nuclear Research ("CERN") +cern + +// cfa : 2014-08-28 CFA Institute +cfa + +// cfd : 2014-12-11 DotCFD Registry Limited +cfd + +// chanel : 2015-04-09 Chanel International B.V. +chanel + +// channel : 2014-05-08 Charleston Road Registry Inc. +channel + +// charity : 2018-04-11 Binky Moon, LLC +charity + +// chase : 2015-04-30 JPMorgan Chase Bank, National Association +chase + +// chat : 2014-12-04 Binky Moon, LLC +chat + +// cheap : 2013-11-14 Binky Moon, LLC +cheap + +// chintai : 2015-06-11 CHINTAI Corporation +chintai + +// christmas : 2013-11-21 UNR Corp. +christmas + +// chrome : 2014-07-24 Charleston Road Registry Inc. +chrome + +// church : 2014-02-06 Binky Moon, LLC +church + +// cipriani : 2015-02-19 Hotel Cipriani Srl +cipriani + +// circle : 2014-12-18 Amazon Registry Services, Inc. +circle + +// cisco : 2014-12-22 Cisco Technology, Inc. +cisco + +// citadel : 2015-07-23 Citadel Domain LLC +citadel + +// citi : 2015-07-30 Citigroup Inc. +citi + +// citic : 2014-01-09 CITIC Group Corporation +citic + +// city : 2014-05-29 Binky Moon, LLC +city + +// cityeats : 2014-12-11 Lifestyle Domain Holdings, Inc. +cityeats + +// claims : 2014-03-20 Binky Moon, LLC +claims + +// cleaning : 2013-12-05 Binky Moon, LLC +cleaning + +// click : 2014-06-05 UNR Corp. +click + +// clinic : 2014-03-20 Binky Moon, LLC +clinic + +// clinique : 2015-10-01 The Estée Lauder Companies Inc. +clinique + +// clothing : 2013-08-27 Binky Moon, LLC +clothing + +// cloud : 2015-04-16 Aruba PEC S.p.A. +cloud + +// club : 2013-11-08 .CLUB DOMAINS, LLC +club + +// clubmed : 2015-06-25 Club Méditerranée S.A. +clubmed + +// coach : 2014-10-09 Binky Moon, LLC +coach + +// codes : 2013-10-31 Binky Moon, LLC +codes + +// coffee : 2013-10-17 Binky Moon, LLC +coffee + +// college : 2014-01-16 XYZ.COM LLC +college + +// cologne : 2014-02-05 dotKoeln GmbH +cologne + +// comcast : 2015-07-23 Comcast IP Holdings I, LLC +comcast + +// commbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +commbank + +// community : 2013-12-05 Binky Moon, LLC +community + +// company : 2013-11-07 Binky Moon, LLC +company + +// compare : 2015-10-08 Registry Services, LLC +compare + +// computer : 2013-10-24 Binky Moon, LLC +computer + +// comsec : 2015-01-08 VeriSign, Inc. +comsec + +// condos : 2013-12-05 Binky Moon, LLC +condos + +// construction : 2013-09-16 Binky Moon, LLC +construction + +// consulting : 2013-12-05 Dog Beach, LLC +consulting + +// contact : 2015-01-08 Dog Beach, LLC +contact + +// contractors : 2013-09-10 Binky Moon, LLC +contractors + +// cooking : 2013-11-21 Minds + Machines Group Limited +cooking + +// cookingchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +cookingchannel + +// cool : 2013-11-14 Binky Moon, LLC +cool + +// corsica : 2014-09-25 Collectivité de Corse +corsica + +// country : 2013-12-19 DotCountry LLC +country + +// coupon : 2015-02-26 Amazon Registry Services, Inc. +coupon + +// coupons : 2015-03-26 Binky Moon, LLC +coupons + +// courses : 2014-12-04 OPEN UNIVERSITIES AUSTRALIA PTY LTD +courses + +// cpa : 2019-06-10 American Institute of Certified Public Accountants +cpa + +// credit : 2014-03-20 Binky Moon, LLC +credit + +// creditcard : 2014-03-20 Binky Moon, LLC +creditcard + +// creditunion : 2015-01-22 CUNA Performance Resources, LLC +creditunion + +// cricket : 2014-10-09 dot Cricket Limited +cricket + +// crown : 2014-10-24 Crown Equipment Corporation +crown + +// crs : 2014-04-03 Federated Co-operatives Limited +crs + +// cruise : 2015-12-10 Viking River Cruises (Bermuda) Ltd. +cruise + +// cruises : 2013-12-05 Binky Moon, LLC +cruises + +// csc : 2014-09-25 Alliance-One Services, Inc. +csc + +// cuisinella : 2014-04-03 SCHMIDT GROUPE S.A.S. +cuisinella + +// cymru : 2014-05-08 Nominet UK +cymru + +// cyou : 2015-01-22 ShortDot SA +cyou + +// dabur : 2014-02-06 Dabur India Limited +dabur + +// dad : 2014-01-23 Charleston Road Registry Inc. +dad + +// dance : 2013-10-24 Dog Beach, LLC +dance + +// data : 2016-06-02 Dish DBS Corporation +data + +// date : 2014-11-20 dot Date Limited +date + +// dating : 2013-12-05 Binky Moon, LLC +dating + +// datsun : 2014-03-27 NISSAN MOTOR CO., LTD. +datsun + +// day : 2014-01-30 Charleston Road Registry Inc. +day + +// dclk : 2014-11-20 Charleston Road Registry Inc. +dclk + +// dds : 2015-05-07 Minds + Machines Group Limited +dds + +// deal : 2015-06-25 Amazon Registry Services, Inc. +deal + +// dealer : 2014-12-22 Intercap Registry Inc. +dealer + +// deals : 2014-05-22 Binky Moon, LLC +deals + +// degree : 2014-03-06 Dog Beach, LLC +degree + +// delivery : 2014-09-11 Binky Moon, LLC +delivery + +// dell : 2014-10-24 Dell Inc. +dell + +// deloitte : 2015-07-31 Deloitte Touche Tohmatsu +deloitte + +// delta : 2015-02-19 Delta Air Lines, Inc. +delta + +// democrat : 2013-10-24 Dog Beach, LLC +democrat + +// dental : 2014-03-20 Binky Moon, LLC +dental + +// dentist : 2014-03-20 Dog Beach, LLC +dentist + +// desi : 2013-11-14 Desi Networks LLC +desi + +// design : 2014-11-07 Top Level Design, LLC +design + +// dev : 2014-10-16 Charleston Road Registry Inc. +dev + +// dhl : 2015-07-23 Deutsche Post AG +dhl + +// diamonds : 2013-09-22 Binky Moon, LLC +diamonds + +// diet : 2014-06-26 UNR Corp. +diet + +// digital : 2014-03-06 Binky Moon, LLC +digital + +// direct : 2014-04-10 Binky Moon, LLC +direct + +// directory : 2013-09-20 Binky Moon, LLC +directory + +// discount : 2014-03-06 Binky Moon, LLC +discount + +// discover : 2015-07-23 Discover Financial Services +discover + +// dish : 2015-07-30 Dish DBS Corporation +dish + +// diy : 2015-11-05 Lifestyle Domain Holdings, Inc. +diy + +// dnp : 2013-12-13 Dai Nippon Printing Co., Ltd. +dnp + +// docs : 2014-10-16 Charleston Road Registry Inc. +docs + +// doctor : 2016-06-02 Binky Moon, LLC +doctor + +// dog : 2014-12-04 Binky Moon, LLC +dog + +// domains : 2013-10-17 Binky Moon, LLC +domains + +// dot : 2015-05-21 Dish DBS Corporation +dot + +// download : 2014-11-20 dot Support Limited +download + +// drive : 2015-03-05 Charleston Road Registry Inc. +drive + +// dtv : 2015-06-04 Dish DBS Corporation +dtv + +// dubai : 2015-01-01 Dubai Smart Government Department +dubai + +// duck : 2015-07-23 Johnson Shareholdings, Inc. +duck + +// dunlop : 2015-07-02 The Goodyear Tire & Rubber Company +dunlop + +// dupont : 2015-06-25 E. I. du Pont de Nemours and Company +dupont + +// durban : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +durban + +// dvag : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +dvag + +// dvr : 2016-05-26 DISH Technologies L.L.C. +dvr + +// earth : 2014-12-04 Interlink Co., Ltd. +earth + +// eat : 2014-01-23 Charleston Road Registry Inc. +eat + +// eco : 2016-07-08 Big Room Inc. +eco + +// edeka : 2014-12-18 EDEKA Verband kaufmännischer Genossenschaften e.V. +edeka + +// education : 2013-11-07 Binky Moon, LLC +education + +// email : 2013-10-31 Binky Moon, LLC +email + +// emerck : 2014-04-03 Merck KGaA +emerck + +// energy : 2014-09-11 Binky Moon, LLC +energy + +// engineer : 2014-03-06 Dog Beach, LLC +engineer + +// engineering : 2014-03-06 Binky Moon, LLC +engineering + +// enterprises : 2013-09-20 Binky Moon, LLC +enterprises + +// epson : 2014-12-04 Seiko Epson Corporation +epson + +// equipment : 2013-08-27 Binky Moon, LLC +equipment + +// ericsson : 2015-07-09 Telefonaktiebolaget L M Ericsson +ericsson + +// erni : 2014-04-03 ERNI Group Holding AG +erni + +// esq : 2014-05-08 Charleston Road Registry Inc. +esq + +// estate : 2013-08-27 Binky Moon, LLC +estate + +// etisalat : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +etisalat + +// eurovision : 2014-04-24 European Broadcasting Union (EBU) +eurovision + +// eus : 2013-12-12 Puntueus Fundazioa +eus + +// events : 2013-12-05 Binky Moon, LLC +events + +// exchange : 2014-03-06 Binky Moon, LLC +exchange + +// expert : 2013-11-21 Binky Moon, LLC +expert + +// exposed : 2013-12-05 Binky Moon, LLC +exposed + +// express : 2015-02-11 Binky Moon, LLC +express + +// extraspace : 2015-05-14 Extra Space Storage LLC +extraspace + +// fage : 2014-12-18 Fage International S.A. +fage + +// fail : 2014-03-06 Binky Moon, LLC +fail + +// fairwinds : 2014-11-13 FairWinds Partners, LLC +fairwinds + +// faith : 2014-11-20 dot Faith Limited +faith + +// family : 2015-04-02 Dog Beach, LLC +family + +// fan : 2014-03-06 Dog Beach, LLC +fan + +// fans : 2014-11-07 ZDNS International Limited +fans + +// farm : 2013-11-07 Binky Moon, LLC +farm + +// farmers : 2015-07-09 Farmers Insurance Exchange +farmers + +// fashion : 2014-07-03 Minds + Machines Group Limited +fashion + +// fast : 2014-12-18 Amazon Registry Services, Inc. +fast + +// fedex : 2015-08-06 Federal Express Corporation +fedex + +// feedback : 2013-12-19 Top Level Spectrum, Inc. +feedback + +// ferrari : 2015-07-31 Fiat Chrysler Automobiles N.V. +ferrari + +// ferrero : 2014-12-18 Ferrero Trading Lux S.A. +ferrero + +// fiat : 2015-07-31 Fiat Chrysler Automobiles N.V. +fiat + +// fidelity : 2015-07-30 Fidelity Brokerage Services LLC +fidelity + +// fido : 2015-08-06 Rogers Communications Canada Inc. +fido + +// film : 2015-01-08 Motion Picture Domain Registry Pty Ltd +film + +// final : 2014-10-16 Núcleo de Informação e Coordenação do Ponto BR - NIC.br +final + +// finance : 2014-03-20 Binky Moon, LLC +finance + +// financial : 2014-03-06 Binky Moon, LLC +financial + +// fire : 2015-06-25 Amazon Registry Services, Inc. +fire + +// firestone : 2014-12-18 Bridgestone Licensing Services, Inc +firestone + +// firmdale : 2014-03-27 Firmdale Holdings Limited +firmdale + +// fish : 2013-12-12 Binky Moon, LLC +fish + +// fishing : 2013-11-21 Minds + Machines Group Limited +fishing + +// fit : 2014-11-07 Minds + Machines Group Limited +fit + +// fitness : 2014-03-06 Binky Moon, LLC +fitness + +// flickr : 2015-04-02 Flickr, Inc. +flickr + +// flights : 2013-12-05 Binky Moon, LLC +flights + +// flir : 2015-07-23 FLIR Systems, Inc. +flir + +// florist : 2013-11-07 Binky Moon, LLC +florist + +// flowers : 2014-10-09 UNR Corp. +flowers + +// fly : 2014-05-08 Charleston Road Registry Inc. +fly + +// foo : 2014-01-23 Charleston Road Registry Inc. +foo + +// food : 2016-04-21 Lifestyle Domain Holdings, Inc. +food + +// foodnetwork : 2015-07-02 Lifestyle Domain Holdings, Inc. +foodnetwork + +// football : 2014-12-18 Binky Moon, LLC +football + +// ford : 2014-11-13 Ford Motor Company +ford + +// forex : 2014-12-11 Dotforex Registry Limited +forex + +// forsale : 2014-05-22 Dog Beach, LLC +forsale + +// forum : 2015-04-02 Fegistry, LLC +forum + +// foundation : 2013-12-05 Binky Moon, LLC +foundation + +// fox : 2015-09-11 FOX Registry, LLC +fox + +// free : 2015-12-10 Amazon Registry Services, Inc. +free + +// fresenius : 2015-07-30 Fresenius Immobilien-Verwaltungs-GmbH +fresenius + +// frl : 2014-05-15 FRLregistry B.V. +frl + +// frogans : 2013-12-19 OP3FT +frogans + +// frontdoor : 2015-07-02 Lifestyle Domain Holdings, Inc. +frontdoor + +// frontier : 2015-02-05 Frontier Communications Corporation +frontier + +// ftr : 2015-07-16 Frontier Communications Corporation +ftr + +// fujitsu : 2015-07-30 Fujitsu Limited +fujitsu + +// fujixerox : 2015-07-23 Xerox DNHC LLC +fujixerox + +// fun : 2016-01-14 DotSpace Inc. +fun + +// fund : 2014-03-20 Binky Moon, LLC +fund + +// furniture : 2014-03-20 Binky Moon, LLC +furniture + +// futbol : 2013-09-20 Dog Beach, LLC +futbol + +// fyi : 2015-04-02 Binky Moon, LLC +fyi + +// gal : 2013-11-07 Asociación puntoGAL +gal + +// gallery : 2013-09-13 Binky Moon, LLC +gallery + +// gallo : 2015-06-11 Gallo Vineyards, Inc. +gallo + +// gallup : 2015-02-19 Gallup, Inc. +gallup + +// game : 2015-05-28 UNR Corp. +game + +// games : 2015-05-28 Dog Beach, LLC +games + +// gap : 2015-07-31 The Gap, Inc. +gap + +// garden : 2014-06-26 Minds + Machines Group Limited +garden + +// gay : 2019-05-23 Top Level Design, LLC +gay + +// gbiz : 2014-07-17 Charleston Road Registry Inc. +gbiz + +// gdn : 2014-07-31 Joint Stock Company "Navigation-information systems" +gdn + +// gea : 2014-12-04 GEA Group Aktiengesellschaft +gea + +// gent : 2014-01-23 COMBELL NV +gent + +// genting : 2015-03-12 Resorts World Inc Pte. Ltd. +genting + +// george : 2015-07-31 Wal-Mart Stores, Inc. +george + +// ggee : 2014-01-09 GMO Internet, Inc. +ggee + +// gift : 2013-10-17 DotGift, LLC +gift + +// gifts : 2014-07-03 Binky Moon, LLC +gifts + +// gives : 2014-03-06 Dog Beach, LLC +gives + +// giving : 2014-11-13 Giving Limited +giving + +// glade : 2015-07-23 Johnson Shareholdings, Inc. +glade + +// glass : 2013-11-07 Binky Moon, LLC +glass + +// gle : 2014-07-24 Charleston Road Registry Inc. +gle + +// global : 2014-04-17 Dot Global Domain Registry Limited +global + +// globo : 2013-12-19 Globo Comunicação e Participações S.A +globo + +// gmail : 2014-05-01 Charleston Road Registry Inc. +gmail + +// gmbh : 2016-01-29 Binky Moon, LLC +gmbh + +// gmo : 2014-01-09 GMO Internet, Inc. +gmo + +// gmx : 2014-04-24 1&1 Mail & Media GmbH +gmx + +// godaddy : 2015-07-23 Go Daddy East, LLC +godaddy + +// gold : 2015-01-22 Binky Moon, LLC +gold + +// goldpoint : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +goldpoint + +// golf : 2014-12-18 Binky Moon, LLC +golf + +// goo : 2014-12-18 NTT Resonant Inc. +goo + +// goodyear : 2015-07-02 The Goodyear Tire & Rubber Company +goodyear + +// goog : 2014-11-20 Charleston Road Registry Inc. +goog + +// google : 2014-07-24 Charleston Road Registry Inc. +google + +// gop : 2014-01-16 Republican State Leadership Committee, Inc. +gop + +// got : 2014-12-18 Amazon Registry Services, Inc. +got + +// grainger : 2015-05-07 Grainger Registry Services, LLC +grainger + +// graphics : 2013-09-13 Binky Moon, LLC +graphics + +// gratis : 2014-03-20 Binky Moon, LLC +gratis + +// green : 2014-05-08 Afilias Limited +green + +// gripe : 2014-03-06 Binky Moon, LLC +gripe + +// grocery : 2016-06-16 Wal-Mart Stores, Inc. +grocery + +// group : 2014-08-15 Binky Moon, LLC +group + +// guardian : 2015-07-30 The Guardian Life Insurance Company of America +guardian + +// gucci : 2014-11-13 Guccio Gucci S.p.a. +gucci + +// guge : 2014-08-28 Charleston Road Registry Inc. +guge + +// guide : 2013-09-13 Binky Moon, LLC +guide + +// guitars : 2013-11-14 UNR Corp. +guitars + +// guru : 2013-08-27 Binky Moon, LLC +guru + +// hair : 2015-12-03 XYZ.COM LLC +hair + +// hamburg : 2014-02-20 Hamburg Top-Level-Domain GmbH +hamburg + +// hangout : 2014-11-13 Charleston Road Registry Inc. +hangout + +// haus : 2013-12-05 Dog Beach, LLC +haus + +// hbo : 2015-07-30 HBO Registry Services, Inc. +hbo + +// hdfc : 2015-07-30 HOUSING DEVELOPMENT FINANCE CORPORATION LIMITED +hdfc + +// hdfcbank : 2015-02-12 HDFC Bank Limited +hdfcbank + +// health : 2015-02-11 DotHealth, LLC +health + +// healthcare : 2014-06-12 Binky Moon, LLC +healthcare + +// help : 2014-06-26 UNR Corp. +help + +// helsinki : 2015-02-05 City of Helsinki +helsinki + +// here : 2014-02-06 Charleston Road Registry Inc. +here + +// hermes : 2014-07-10 HERMES INTERNATIONAL +hermes + +// hgtv : 2015-07-02 Lifestyle Domain Holdings, Inc. +hgtv + +// hiphop : 2014-03-06 UNR Corp. +hiphop + +// hisamitsu : 2015-07-16 Hisamitsu Pharmaceutical Co.,Inc. +hisamitsu + +// hitachi : 2014-10-31 Hitachi, Ltd. +hitachi + +// hiv : 2014-03-13 UNR Corp. +hiv + +// hkt : 2015-05-14 PCCW-HKT DataCom Services Limited +hkt + +// hockey : 2015-03-19 Binky Moon, LLC +hockey + +// holdings : 2013-08-27 Binky Moon, LLC +holdings + +// holiday : 2013-11-07 Binky Moon, LLC +holiday + +// homedepot : 2015-04-02 Home Depot Product Authority, LLC +homedepot + +// homegoods : 2015-07-16 The TJX Companies, Inc. +homegoods + +// homes : 2014-01-09 DERHomes, LLC +homes + +// homesense : 2015-07-16 The TJX Companies, Inc. +homesense + +// honda : 2014-12-18 Honda Motor Co., Ltd. +honda + +// horse : 2013-11-21 Minds + Machines Group Limited +horse + +// hospital : 2016-10-20 Binky Moon, LLC +hospital + +// host : 2014-04-17 DotHost Inc. +host + +// hosting : 2014-05-29 UNR Corp. +hosting + +// hot : 2015-08-27 Amazon Registry Services, Inc. +hot + +// hoteles : 2015-03-05 Travel Reservations SRL +hoteles + +// hotels : 2016-04-07 Booking.com B.V. +hotels + +// hotmail : 2014-12-18 Microsoft Corporation +hotmail + +// house : 2013-11-07 Binky Moon, LLC +house + +// how : 2014-01-23 Charleston Road Registry Inc. +how + +// hsbc : 2014-10-24 HSBC Global Services (UK) Limited +hsbc + +// hughes : 2015-07-30 Hughes Satellite Systems Corporation +hughes + +// hyatt : 2015-07-30 Hyatt GTLD, L.L.C. +hyatt + +// hyundai : 2015-07-09 Hyundai Motor Company +hyundai + +// ibm : 2014-07-31 International Business Machines Corporation +ibm + +// icbc : 2015-02-19 Industrial and Commercial Bank of China Limited +icbc + +// ice : 2014-10-30 IntercontinentalExchange, Inc. +ice + +// icu : 2015-01-08 ShortDot SA +icu + +// ieee : 2015-07-23 IEEE Global LLC +ieee + +// ifm : 2014-01-30 ifm electronic gmbh +ifm + +// ikano : 2015-07-09 Ikano S.A. +ikano + +// imamat : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +imamat + +// imdb : 2015-06-25 Amazon Registry Services, Inc. +imdb + +// immo : 2014-07-10 Binky Moon, LLC +immo + +// immobilien : 2013-11-07 Dog Beach, LLC +immobilien + +// inc : 2018-03-10 Intercap Registry Inc. +inc + +// industries : 2013-12-05 Binky Moon, LLC +industries + +// infiniti : 2014-03-27 NISSAN MOTOR CO., LTD. +infiniti + +// ing : 2014-01-23 Charleston Road Registry Inc. +ing + +// ink : 2013-12-05 Top Level Design, LLC +ink + +// institute : 2013-11-07 Binky Moon, LLC +institute + +// insurance : 2015-02-19 fTLD Registry Services LLC +insurance + +// insure : 2014-03-20 Binky Moon, LLC +insure + +// intel : 2015-08-06 Intel Corporation +intel + +// international : 2013-11-07 Binky Moon, LLC +international + +// intuit : 2015-07-30 Intuit Administrative Services, Inc. +intuit + +// investments : 2014-03-20 Binky Moon, LLC +investments + +// ipiranga : 2014-08-28 Ipiranga Produtos de Petroleo S.A. +ipiranga + +// irish : 2014-08-07 Binky Moon, LLC +irish + +// ismaili : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +ismaili + +// ist : 2014-08-28 Istanbul Metropolitan Municipality +ist + +// istanbul : 2014-08-28 Istanbul Metropolitan Municipality +istanbul + +// itau : 2014-10-02 Itau Unibanco Holding S.A. +itau + +// itv : 2015-07-09 ITV Services Limited +itv + +// iveco : 2015-09-03 CNH Industrial N.V. +iveco + +// jaguar : 2014-11-13 Jaguar Land Rover Ltd +jaguar + +// java : 2014-06-19 Oracle Corporation +java + +// jcb : 2014-11-20 JCB Co., Ltd. +jcb + +// jcp : 2015-04-23 JCP Media, Inc. +jcp + +// jeep : 2015-07-30 FCA US LLC. +jeep + +// jetzt : 2014-01-09 Binky Moon, LLC +jetzt + +// jewelry : 2015-03-05 Binky Moon, LLC +jewelry + +// jio : 2015-04-02 Reliance Industries Limited +jio + +// jll : 2015-04-02 Jones Lang LaSalle Incorporated +jll + +// jmp : 2015-03-26 Matrix IP LLC +jmp + +// jnj : 2015-06-18 Johnson & Johnson Services, Inc. +jnj + +// joburg : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +joburg + +// jot : 2014-12-18 Amazon Registry Services, Inc. +jot + +// joy : 2014-12-18 Amazon Registry Services, Inc. +joy + +// jpmorgan : 2015-04-30 JPMorgan Chase Bank, National Association +jpmorgan + +// jprs : 2014-09-18 Japan Registry Services Co., Ltd. +jprs + +// juegos : 2014-03-20 UNR Corp. +juegos + +// juniper : 2015-07-30 JUNIPER NETWORKS, INC. +juniper + +// kaufen : 2013-11-07 Dog Beach, LLC +kaufen + +// kddi : 2014-09-12 KDDI CORPORATION +kddi + +// kerryhotels : 2015-04-30 Kerry Trading Co. Limited +kerryhotels + +// kerrylogistics : 2015-04-09 Kerry Trading Co. Limited +kerrylogistics + +// kerryproperties : 2015-04-09 Kerry Trading Co. Limited +kerryproperties + +// kfh : 2014-12-04 Kuwait Finance House +kfh + +// kia : 2015-07-09 KIA MOTORS CORPORATION +kia + +// kim : 2013-09-23 Afilias Limited +kim + +// kinder : 2014-11-07 Ferrero Trading Lux S.A. +kinder + +// kindle : 2015-06-25 Amazon Registry Services, Inc. +kindle + +// kitchen : 2013-09-20 Binky Moon, LLC +kitchen + +// kiwi : 2013-09-20 DOT KIWI LIMITED +kiwi + +// koeln : 2014-01-09 dotKoeln GmbH +koeln + +// komatsu : 2015-01-08 Komatsu Ltd. +komatsu + +// kosher : 2015-08-20 Kosher Marketing Assets LLC +kosher + +// kpmg : 2015-04-23 KPMG International Cooperative (KPMG International Genossenschaft) +kpmg + +// kpn : 2015-01-08 Koninklijke KPN N.V. +kpn + +// krd : 2013-12-05 KRG Department of Information Technology +krd + +// kred : 2013-12-19 KredTLD Pty Ltd +kred + +// kuokgroup : 2015-04-09 Kerry Trading Co. Limited +kuokgroup + +// kyoto : 2014-11-07 Academic Institution: Kyoto Jyoho Gakuen +kyoto + +// lacaixa : 2014-01-09 Fundación Bancaria Caixa d’Estalvis i Pensions de Barcelona, “la Caixa” +lacaixa + +// lamborghini : 2015-06-04 Automobili Lamborghini S.p.A. +lamborghini + +// lamer : 2015-10-01 The Estée Lauder Companies Inc. +lamer + +// lancaster : 2015-02-12 LANCASTER +lancaster + +// lancia : 2015-07-31 Fiat Chrysler Automobiles N.V. +lancia + +// land : 2013-09-10 Binky Moon, LLC +land + +// landrover : 2014-11-13 Jaguar Land Rover Ltd +landrover + +// lanxess : 2015-07-30 LANXESS Corporation +lanxess + +// lasalle : 2015-04-02 Jones Lang LaSalle Incorporated +lasalle + +// lat : 2014-10-16 ECOM-LAC Federaciòn de Latinoamèrica y el Caribe para Internet y el Comercio Electrònico +lat + +// latino : 2015-07-30 Dish DBS Corporation +latino + +// latrobe : 2014-06-16 La Trobe University +latrobe + +// law : 2015-01-22 LW TLD Limited +law + +// lawyer : 2014-03-20 Dog Beach, LLC +lawyer + +// lds : 2014-03-20 IRI Domain Management, LLC +lds + +// lease : 2014-03-06 Binky Moon, LLC +lease + +// leclerc : 2014-08-07 A.C.D. LEC Association des Centres Distributeurs Edouard Leclerc +leclerc + +// lefrak : 2015-07-16 LeFrak Organization, Inc. +lefrak + +// legal : 2014-10-16 Binky Moon, LLC +legal + +// lego : 2015-07-16 LEGO Juris A/S +lego + +// lexus : 2015-04-23 TOYOTA MOTOR CORPORATION +lexus + +// lgbt : 2014-05-08 Afilias Limited +lgbt + +// lidl : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +lidl + +// life : 2014-02-06 Binky Moon, LLC +life + +// lifeinsurance : 2015-01-15 American Council of Life Insurers +lifeinsurance + +// lifestyle : 2014-12-11 Lifestyle Domain Holdings, Inc. +lifestyle + +// lighting : 2013-08-27 Binky Moon, LLC +lighting + +// like : 2014-12-18 Amazon Registry Services, Inc. +like + +// lilly : 2015-07-31 Eli Lilly and Company +lilly + +// limited : 2014-03-06 Binky Moon, LLC +limited + +// limo : 2013-10-17 Binky Moon, LLC +limo + +// lincoln : 2014-11-13 Ford Motor Company +lincoln + +// linde : 2014-12-04 Linde Aktiengesellschaft +linde + +// link : 2013-11-14 UNR Corp. +link + +// lipsy : 2015-06-25 Lipsy Ltd +lipsy + +// live : 2014-12-04 Dog Beach, LLC +live + +// living : 2015-07-30 Lifestyle Domain Holdings, Inc. +living + +// lixil : 2015-03-19 LIXIL Group Corporation +lixil + +// llc : 2017-12-14 Afilias Limited +llc + +// llp : 2019-08-26 UNR Corp. +llp + +// loan : 2014-11-20 dot Loan Limited +loan + +// loans : 2014-03-20 Binky Moon, LLC +loans + +// locker : 2015-06-04 Dish DBS Corporation +locker + +// locus : 2015-06-25 Locus Analytics LLC +locus + +// loft : 2015-07-30 Annco, Inc. +loft + +// lol : 2015-01-30 UNR Corp. +lol + +// london : 2013-11-14 Dot London Domains Limited +london + +// lotte : 2014-11-07 Lotte Holdings Co., Ltd. +lotte + +// lotto : 2014-04-10 Afilias Limited +lotto + +// love : 2014-12-22 Merchant Law Group LLP +love + +// lpl : 2015-07-30 LPL Holdings, Inc. +lpl + +// lplfinancial : 2015-07-30 LPL Holdings, Inc. +lplfinancial + +// ltd : 2014-09-25 Binky Moon, LLC +ltd + +// ltda : 2014-04-17 InterNetX, Corp +ltda + +// lundbeck : 2015-08-06 H. Lundbeck A/S +lundbeck + +// lupin : 2014-11-07 LUPIN LIMITED +lupin + +// luxe : 2014-01-09 Minds + Machines Group Limited +luxe + +// luxury : 2013-10-17 Luxury Partners, LLC +luxury + +// macys : 2015-07-31 Macys, Inc. +macys + +// madrid : 2014-05-01 Comunidad de Madrid +madrid + +// maif : 2014-10-02 Mutuelle Assurance Instituteur France (MAIF) +maif + +// maison : 2013-12-05 Binky Moon, LLC +maison + +// makeup : 2015-01-15 XYZ.COM LLC +makeup + +// man : 2014-12-04 MAN SE +man + +// management : 2013-11-07 Binky Moon, LLC +management + +// mango : 2013-10-24 PUNTO FA S.L. +mango + +// map : 2016-06-09 Charleston Road Registry Inc. +map + +// market : 2014-03-06 Dog Beach, LLC +market + +// marketing : 2013-11-07 Binky Moon, LLC +marketing + +// markets : 2014-12-11 Dotmarkets Registry Limited +markets + +// marriott : 2014-10-09 Marriott Worldwide Corporation +marriott + +// marshalls : 2015-07-16 The TJX Companies, Inc. +marshalls + +// maserati : 2015-07-31 Fiat Chrysler Automobiles N.V. +maserati + +// mattel : 2015-08-06 Mattel Sites, Inc. +mattel + +// mba : 2015-04-02 Binky Moon, LLC +mba + +// mckinsey : 2015-07-31 McKinsey Holdings, Inc. +mckinsey + +// med : 2015-08-06 Medistry LLC +med + +// media : 2014-03-06 Binky Moon, LLC +media + +// meet : 2014-01-16 Charleston Road Registry Inc. +meet + +// melbourne : 2014-05-29 The Crown in right of the State of Victoria, represented by its Department of State Development, Business and Innovation +melbourne + +// meme : 2014-01-30 Charleston Road Registry Inc. +meme + +// memorial : 2014-10-16 Dog Beach, LLC +memorial + +// men : 2015-02-26 Exclusive Registry Limited +men + +// menu : 2013-09-11 Dot Menu Registry, LLC +menu + +// merckmsd : 2016-07-14 MSD Registry Holdings, Inc. +merckmsd + +// metlife : 2015-05-07 MetLife Services and Solutions, LLC +metlife + +// miami : 2013-12-19 Minds + Machines Group Limited +miami + +// microsoft : 2014-12-18 Microsoft Corporation +microsoft + +// mini : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +mini + +// mint : 2015-07-30 Intuit Administrative Services, Inc. +mint + +// mit : 2015-07-02 Massachusetts Institute of Technology +mit + +// mitsubishi : 2015-07-23 Mitsubishi Corporation +mitsubishi + +// mlb : 2015-05-21 MLB Advanced Media DH, LLC +mlb + +// mls : 2015-04-23 The Canadian Real Estate Association +mls + +// mma : 2014-11-07 MMA IARD +mma + +// mobile : 2016-06-02 Dish DBS Corporation +mobile + +// moda : 2013-11-07 Dog Beach, LLC +moda + +// moe : 2013-11-13 Interlink Co., Ltd. +moe + +// moi : 2014-12-18 Amazon Registry Services, Inc. +moi + +// mom : 2015-04-16 UNR Corp. +mom + +// monash : 2013-09-30 Monash University +monash + +// money : 2014-10-16 Binky Moon, LLC +money + +// monster : 2015-09-11 XYZ.COM LLC +monster + +// mormon : 2013-12-05 IRI Domain Management, LLC +mormon + +// mortgage : 2014-03-20 Dog Beach, LLC +mortgage + +// moscow : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +moscow + +// moto : 2015-06-04 Motorola Trademark Holdings, LLC +moto + +// motorcycles : 2014-01-09 DERMotorcycles, LLC +motorcycles + +// mov : 2014-01-30 Charleston Road Registry Inc. +mov + +// movie : 2015-02-05 Binky Moon, LLC +movie + +// msd : 2015-07-23 MSD Registry Holdings, Inc. +msd + +// mtn : 2014-12-04 MTN Dubai Limited +mtn + +// mtr : 2015-03-12 MTR Corporation Limited +mtr + +// mutual : 2015-04-02 Northwestern Mutual MU TLD Registry, LLC +mutual + +// nab : 2015-08-20 National Australia Bank Limited +nab + +// nagoya : 2013-10-24 GMO Registry, Inc. +nagoya + +// nationwide : 2015-07-23 Nationwide Mutual Insurance Company +nationwide + +// natura : 2015-03-12 NATURA COSMÉTICOS S.A. +natura + +// navy : 2014-03-06 Dog Beach, LLC +navy + +// nba : 2015-07-31 NBA REGISTRY, LLC +nba + +// nec : 2015-01-08 NEC Corporation +nec + +// netbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +netbank + +// netflix : 2015-06-18 Netflix, Inc. +netflix + +// network : 2013-11-14 Binky Moon, LLC +network + +// neustar : 2013-12-05 NeuStar, Inc. +neustar + +// new : 2014-01-30 Charleston Road Registry Inc. +new + +// newholland : 2015-09-03 CNH Industrial N.V. +newholland + +// news : 2014-12-18 Dog Beach, LLC +news + +// next : 2015-06-18 Next plc +next + +// nextdirect : 2015-06-18 Next plc +nextdirect + +// nexus : 2014-07-24 Charleston Road Registry Inc. +nexus + +// nfl : 2015-07-23 NFL Reg Ops LLC +nfl + +// ngo : 2014-03-06 Public Interest Registry +ngo + +// nhk : 2014-02-13 Japan Broadcasting Corporation (NHK) +nhk + +// nico : 2014-12-04 DWANGO Co., Ltd. +nico + +// nike : 2015-07-23 NIKE, Inc. +nike + +// nikon : 2015-05-21 NIKON CORPORATION +nikon + +// ninja : 2013-11-07 Dog Beach, LLC +ninja + +// nissan : 2014-03-27 NISSAN MOTOR CO., LTD. +nissan + +// nissay : 2015-10-29 Nippon Life Insurance Company +nissay + +// nokia : 2015-01-08 Nokia Corporation +nokia + +// northwesternmutual : 2015-06-18 Northwestern Mutual Registry, LLC +northwesternmutual + +// norton : 2014-12-04 Symantec Corporation +norton + +// now : 2015-06-25 Amazon Registry Services, Inc. +now + +// nowruz : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +nowruz + +// nowtv : 2015-05-14 Starbucks (HK) Limited +nowtv + +// nra : 2014-05-22 NRA Holdings Company, INC. +nra + +// nrw : 2013-11-21 Minds + Machines GmbH +nrw + +// ntt : 2014-10-31 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +ntt + +// nyc : 2014-01-23 The City of New York by and through the New York City Department of Information Technology & Telecommunications +nyc + +// obi : 2014-09-25 OBI Group Holding SE & Co. KGaA +obi + +// observer : 2015-04-30 Top Level Spectrum, Inc. +observer + +// off : 2015-07-23 Johnson Shareholdings, Inc. +off + +// office : 2015-03-12 Microsoft Corporation +office + +// okinawa : 2013-12-05 BRregistry, Inc. +okinawa + +// olayan : 2015-05-14 Crescent Holding GmbH +olayan + +// olayangroup : 2015-05-14 Crescent Holding GmbH +olayangroup + +// oldnavy : 2015-07-31 The Gap, Inc. +oldnavy + +// ollo : 2015-06-04 Dish DBS Corporation +ollo + +// omega : 2015-01-08 The Swatch Group Ltd +omega + +// one : 2014-11-07 One.com A/S +one + +// ong : 2014-03-06 Public Interest Registry +ong + +// onl : 2013-09-16 I-Registry Ltd. +onl + +// online : 2015-01-15 DotOnline Inc. +online + +// onyourside : 2015-07-23 Nationwide Mutual Insurance Company +onyourside + +// ooo : 2014-01-09 INFIBEAM AVENUES LIMITED +ooo + +// open : 2015-07-31 American Express Travel Related Services Company, Inc. +open + +// oracle : 2014-06-19 Oracle Corporation +oracle + +// orange : 2015-03-12 Orange Brand Services Limited +orange + +// organic : 2014-03-27 Afilias Limited +organic + +// origins : 2015-10-01 The Estée Lauder Companies Inc. +origins + +// osaka : 2014-09-04 Osaka Registry Co., Ltd. +osaka + +// otsuka : 2013-10-11 Otsuka Holdings Co., Ltd. +otsuka + +// ott : 2015-06-04 Dish DBS Corporation +ott + +// ovh : 2014-01-16 MédiaBC +ovh + +// page : 2014-12-04 Charleston Road Registry Inc. +page + +// panasonic : 2015-07-30 Panasonic Corporation +panasonic + +// paris : 2014-01-30 City of Paris +paris + +// pars : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +pars + +// partners : 2013-12-05 Binky Moon, LLC +partners + +// parts : 2013-12-05 Binky Moon, LLC +parts + +// party : 2014-09-11 Blue Sky Registry Limited +party + +// passagens : 2015-03-05 Travel Reservations SRL +passagens + +// pay : 2015-08-27 Amazon Registry Services, Inc. +pay + +// pccw : 2015-05-14 PCCW Enterprises Limited +pccw + +// pet : 2015-05-07 Afilias Limited +pet + +// pfizer : 2015-09-11 Pfizer Inc. +pfizer + +// pharmacy : 2014-06-19 National Association of Boards of Pharmacy +pharmacy + +// phd : 2016-07-28 Charleston Road Registry Inc. +phd + +// philips : 2014-11-07 Koninklijke Philips N.V. +philips + +// phone : 2016-06-02 Dish DBS Corporation +phone + +// photo : 2013-11-14 UNR Corp. +photo + +// photography : 2013-09-20 Binky Moon, LLC +photography + +// photos : 2013-10-17 Binky Moon, LLC +photos + +// physio : 2014-05-01 PhysBiz Pty Ltd +physio + +// pics : 2013-11-14 UNR Corp. +pics + +// pictet : 2014-06-26 Pictet Europe S.A. +pictet + +// pictures : 2014-03-06 Binky Moon, LLC +pictures + +// pid : 2015-01-08 Top Level Spectrum, Inc. +pid + +// pin : 2014-12-18 Amazon Registry Services, Inc. +pin + +// ping : 2015-06-11 Ping Registry Provider, Inc. +ping + +// pink : 2013-10-01 Afilias Limited +pink + +// pioneer : 2015-07-16 Pioneer Corporation +pioneer + +// pizza : 2014-06-26 Binky Moon, LLC +pizza + +// place : 2014-04-24 Binky Moon, LLC +place + +// play : 2015-03-05 Charleston Road Registry Inc. +play + +// playstation : 2015-07-02 Sony Interactive Entertainment Inc. +playstation + +// plumbing : 2013-09-10 Binky Moon, LLC +plumbing + +// plus : 2015-02-05 Binky Moon, LLC +plus + +// pnc : 2015-07-02 PNC Domain Co., LLC +pnc + +// pohl : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +pohl + +// poker : 2014-07-03 Afilias Limited +poker + +// politie : 2015-08-20 Politie Nederland +politie + +// porn : 2014-10-16 ICM Registry PN LLC +porn + +// pramerica : 2015-07-30 Prudential Financial, Inc. +pramerica + +// praxi : 2013-12-05 Praxi S.p.A. +praxi + +// press : 2014-04-03 DotPress Inc. +press + +// prime : 2015-06-25 Amazon Registry Services, Inc. +prime + +// prod : 2014-01-23 Charleston Road Registry Inc. +prod + +// productions : 2013-12-05 Binky Moon, LLC +productions + +// prof : 2014-07-24 Charleston Road Registry Inc. +prof + +// progressive : 2015-07-23 Progressive Casualty Insurance Company +progressive + +// promo : 2014-12-18 Afilias Limited +promo + +// properties : 2013-12-05 Binky Moon, LLC +properties + +// property : 2014-05-22 UNR Corp. +property + +// protection : 2015-04-23 XYZ.COM LLC +protection + +// pru : 2015-07-30 Prudential Financial, Inc. +pru + +// prudential : 2015-07-30 Prudential Financial, Inc. +prudential + +// pub : 2013-12-12 Dog Beach, LLC +pub + +// pwc : 2015-10-29 PricewaterhouseCoopers LLP +pwc + +// qpon : 2013-11-14 dotCOOL, Inc. +qpon + +// quebec : 2013-12-19 PointQuébec Inc +quebec + +// quest : 2015-03-26 XYZ.COM LLC +quest + +// qvc : 2015-07-30 QVC, Inc. +qvc + +// racing : 2014-12-04 Premier Registry Limited +racing + +// radio : 2016-07-21 European Broadcasting Union (EBU) +radio + +// raid : 2015-07-23 Johnson Shareholdings, Inc. +raid + +// read : 2014-12-18 Amazon Registry Services, Inc. +read + +// realestate : 2015-09-11 dotRealEstate LLC +realestate + +// realtor : 2014-05-29 Real Estate Domains LLC +realtor + +// realty : 2015-03-19 Fegistry, LLC +realty + +// recipes : 2013-10-17 Binky Moon, LLC +recipes + +// red : 2013-11-07 Afilias Limited +red + +// redstone : 2014-10-31 Redstone Haute Couture Co., Ltd. +redstone + +// redumbrella : 2015-03-26 Travelers TLD, LLC +redumbrella + +// rehab : 2014-03-06 Dog Beach, LLC +rehab + +// reise : 2014-03-13 Binky Moon, LLC +reise + +// reisen : 2014-03-06 Binky Moon, LLC +reisen + +// reit : 2014-09-04 National Association of Real Estate Investment Trusts, Inc. +reit + +// reliance : 2015-04-02 Reliance Industries Limited +reliance + +// ren : 2013-12-12 ZDNS International Limited +ren + +// rent : 2014-12-04 XYZ.COM LLC +rent + +// rentals : 2013-12-05 Binky Moon, LLC +rentals + +// repair : 2013-11-07 Binky Moon, LLC +repair + +// report : 2013-12-05 Binky Moon, LLC +report + +// republican : 2014-03-20 Dog Beach, LLC +republican + +// rest : 2013-12-19 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +rest + +// restaurant : 2014-07-03 Binky Moon, LLC +restaurant + +// review : 2014-11-20 dot Review Limited +review + +// reviews : 2013-09-13 Dog Beach, LLC +reviews + +// rexroth : 2015-06-18 Robert Bosch GMBH +rexroth + +// rich : 2013-11-21 I-Registry Ltd. +rich + +// richardli : 2015-05-14 Pacific Century Asset Management (HK) Limited +richardli + +// ricoh : 2014-11-20 Ricoh Company, Ltd. +ricoh + +// ril : 2015-04-02 Reliance Industries Limited +ril + +// rio : 2014-02-27 Empresa Municipal de Informática SA - IPLANRIO +rio + +// rip : 2014-07-10 Dog Beach, LLC +rip + +// rmit : 2015-11-19 Royal Melbourne Institute of Technology +rmit + +// rocher : 2014-12-18 Ferrero Trading Lux S.A. +rocher + +// rocks : 2013-11-14 Dog Beach, LLC +rocks + +// rodeo : 2013-12-19 Minds + Machines Group Limited +rodeo + +// rogers : 2015-08-06 Rogers Communications Canada Inc. +rogers + +// room : 2014-12-18 Amazon Registry Services, Inc. +room + +// rsvp : 2014-05-08 Charleston Road Registry Inc. +rsvp + +// rugby : 2016-12-15 World Rugby Strategic Developments Limited +rugby + +// ruhr : 2013-10-02 regiodot GmbH & Co. KG +ruhr + +// run : 2015-03-19 Binky Moon, LLC +run + +// rwe : 2015-04-02 RWE AG +rwe + +// ryukyu : 2014-01-09 BRregistry, Inc. +ryukyu + +// saarland : 2013-12-12 dotSaarland GmbH +saarland + +// safe : 2014-12-18 Amazon Registry Services, Inc. +safe + +// safety : 2015-01-08 Safety Registry Services, LLC. +safety + +// sakura : 2014-12-18 SAKURA Internet Inc. +sakura + +// sale : 2014-10-16 Dog Beach, LLC +sale + +// salon : 2014-12-11 Binky Moon, LLC +salon + +// samsclub : 2015-07-31 Wal-Mart Stores, Inc. +samsclub + +// samsung : 2014-04-03 SAMSUNG SDS CO., LTD +samsung + +// sandvik : 2014-11-13 Sandvik AB +sandvik + +// sandvikcoromant : 2014-11-07 Sandvik AB +sandvikcoromant + +// sanofi : 2014-10-09 Sanofi +sanofi + +// sap : 2014-03-27 SAP AG +sap + +// sarl : 2014-07-03 Binky Moon, LLC +sarl + +// sas : 2015-04-02 Research IP LLC +sas + +// save : 2015-06-25 Amazon Registry Services, Inc. +save + +// saxo : 2014-10-31 Saxo Bank A/S +saxo + +// sbi : 2015-03-12 STATE BANK OF INDIA +sbi + +// sbs : 2014-11-07 SPECIAL BROADCASTING SERVICE CORPORATION +sbs + +// sca : 2014-03-13 SVENSKA CELLULOSA AKTIEBOLAGET SCA (publ) +sca + +// scb : 2014-02-20 The Siam Commercial Bank Public Company Limited ("SCB") +scb + +// schaeffler : 2015-08-06 Schaeffler Technologies AG & Co. KG +schaeffler + +// schmidt : 2014-04-03 SCHMIDT GROUPE S.A.S. +schmidt + +// scholarships : 2014-04-24 Scholarships.com, LLC +scholarships + +// school : 2014-12-18 Binky Moon, LLC +school + +// schule : 2014-03-06 Binky Moon, LLC +schule + +// schwarz : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +schwarz + +// science : 2014-09-11 dot Science Limited +science + +// scjohnson : 2015-07-23 Johnson Shareholdings, Inc. +scjohnson + +// scot : 2014-01-23 Dot Scot Registry Limited +scot + +// search : 2016-06-09 Charleston Road Registry Inc. +search + +// seat : 2014-05-22 SEAT, S.A. (Sociedad Unipersonal) +seat + +// secure : 2015-08-27 Amazon Registry Services, Inc. +secure + +// security : 2015-05-14 XYZ.COM LLC +security + +// seek : 2014-12-04 Seek Limited +seek + +// select : 2015-10-08 Registry Services, LLC +select + +// sener : 2014-10-24 Sener Ingeniería y Sistemas, S.A. +sener + +// services : 2014-02-27 Binky Moon, LLC +services + +// ses : 2015-07-23 SES +ses + +// seven : 2015-08-06 Seven West Media Ltd +seven + +// sew : 2014-07-17 SEW-EURODRIVE GmbH & Co KG +sew + +// sex : 2014-11-13 ICM Registry SX LLC +sex + +// sexy : 2013-09-11 UNR Corp. +sexy + +// sfr : 2015-08-13 Societe Francaise du Radiotelephone - SFR +sfr + +// shangrila : 2015-09-03 Shangri‐La International Hotel Management Limited +shangrila + +// sharp : 2014-05-01 Sharp Corporation +sharp + +// shaw : 2015-04-23 Shaw Cablesystems G.P. +shaw + +// shell : 2015-07-30 Shell Information Technology International Inc +shell + +// shia : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +shia + +// shiksha : 2013-11-14 Afilias Limited +shiksha + +// shoes : 2013-10-02 Binky Moon, LLC +shoes + +// shop : 2016-04-08 GMO Registry, Inc. +shop + +// shopping : 2016-03-31 Binky Moon, LLC +shopping + +// shouji : 2015-01-08 Beijing Qihu Keji Co., Ltd. +shouji + +// show : 2015-03-05 Binky Moon, LLC +show + +// showtime : 2015-08-06 CBS Domains Inc. +showtime + +// shriram : 2014-01-23 Shriram Capital Ltd. +shriram + +// silk : 2015-06-25 Amazon Registry Services, Inc. +silk + +// sina : 2015-03-12 Sina Corporation +sina + +// singles : 2013-08-27 Binky Moon, LLC +singles + +// site : 2015-01-15 DotSite Inc. +site + +// ski : 2015-04-09 Afilias Limited +ski + +// skin : 2015-01-15 XYZ.COM LLC +skin + +// sky : 2014-06-19 Sky International AG +sky + +// skype : 2014-12-18 Microsoft Corporation +skype + +// sling : 2015-07-30 DISH Technologies L.L.C. +sling + +// smart : 2015-07-09 Smart Communications, Inc. (SMART) +smart + +// smile : 2014-12-18 Amazon Registry Services, Inc. +smile + +// sncf : 2015-02-19 Société Nationale des Chemins de fer Francais S N C F +sncf + +// soccer : 2015-03-26 Binky Moon, LLC +soccer + +// social : 2013-11-07 Dog Beach, LLC +social + +// softbank : 2015-07-02 SoftBank Group Corp. +softbank + +// software : 2014-03-20 Dog Beach, LLC +software + +// sohu : 2013-12-19 Sohu.com Limited +sohu + +// solar : 2013-11-07 Binky Moon, LLC +solar + +// solutions : 2013-11-07 Binky Moon, LLC +solutions + +// song : 2015-02-26 Amazon Registry Services, Inc. +song + +// sony : 2015-01-08 Sony Corporation +sony + +// soy : 2014-01-23 Charleston Road Registry Inc. +soy + +// spa : 2019-09-19 Asia Spa and Wellness Promotion Council Limited +spa + +// space : 2014-04-03 DotSpace Inc. +space + +// sport : 2017-11-16 Global Association of International Sports Federations (GAISF) +sport + +// spot : 2015-02-26 Amazon Registry Services, Inc. +spot + +// spreadbetting : 2014-12-11 Dotspreadbetting Registry Limited +spreadbetting + +// srl : 2015-05-07 InterNetX, Corp +srl + +// stada : 2014-11-13 STADA Arzneimittel AG +stada + +// staples : 2015-07-30 Staples, Inc. +staples + +// star : 2015-01-08 Star India Private Limited +star + +// statebank : 2015-03-12 STATE BANK OF INDIA +statebank + +// statefarm : 2015-07-30 State Farm Mutual Automobile Insurance Company +statefarm + +// stc : 2014-10-09 Saudi Telecom Company +stc + +// stcgroup : 2014-10-09 Saudi Telecom Company +stcgroup + +// stockholm : 2014-12-18 Stockholms kommun +stockholm + +// storage : 2014-12-22 XYZ.COM LLC +storage + +// store : 2015-04-09 DotStore Inc. +store + +// stream : 2016-01-08 dot Stream Limited +stream + +// studio : 2015-02-11 Dog Beach, LLC +studio + +// study : 2014-12-11 OPEN UNIVERSITIES AUSTRALIA PTY LTD +study + +// style : 2014-12-04 Binky Moon, LLC +style + +// sucks : 2014-12-22 Vox Populi Registry Ltd. +sucks + +// supplies : 2013-12-19 Binky Moon, LLC +supplies + +// supply : 2013-12-19 Binky Moon, LLC +supply + +// support : 2013-10-24 Binky Moon, LLC +support + +// surf : 2014-01-09 Minds + Machines Group Limited +surf + +// surgery : 2014-03-20 Binky Moon, LLC +surgery + +// suzuki : 2014-02-20 SUZUKI MOTOR CORPORATION +suzuki + +// swatch : 2015-01-08 The Swatch Group Ltd +swatch + +// swiftcover : 2015-07-23 Swiftcover Insurance Services Limited +swiftcover + +// swiss : 2014-10-16 Swiss Confederation +swiss + +// sydney : 2014-09-18 State of New South Wales, Department of Premier and Cabinet +sydney + +// systems : 2013-11-07 Binky Moon, LLC +systems + +// tab : 2014-12-04 Tabcorp Holdings Limited +tab + +// taipei : 2014-07-10 Taipei City Government +taipei + +// talk : 2015-04-09 Amazon Registry Services, Inc. +talk + +// taobao : 2015-01-15 Alibaba Group Holding Limited +taobao + +// target : 2015-07-31 Target Domain Holdings, LLC +target + +// tatamotors : 2015-03-12 Tata Motors Ltd +tatamotors + +// tatar : 2014-04-24 Limited Liability Company "Coordination Center of Regional Domain of Tatarstan Republic" +tatar + +// tattoo : 2013-08-30 UNR Corp. +tattoo + +// tax : 2014-03-20 Binky Moon, LLC +tax + +// taxi : 2015-03-19 Binky Moon, LLC +taxi + +// tci : 2014-09-12 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +tci + +// tdk : 2015-06-11 TDK Corporation +tdk + +// team : 2015-03-05 Binky Moon, LLC +team + +// tech : 2015-01-30 Personals TLD Inc. +tech + +// technology : 2013-09-13 Binky Moon, LLC +technology + +// temasek : 2014-08-07 Temasek Holdings (Private) Limited +temasek + +// tennis : 2014-12-04 Binky Moon, LLC +tennis + +// teva : 2015-07-02 Teva Pharmaceutical Industries Limited +teva + +// thd : 2015-04-02 Home Depot Product Authority, LLC +thd + +// theater : 2015-03-19 Binky Moon, LLC +theater + +// theatre : 2015-05-07 XYZ.COM LLC +theatre + +// tiaa : 2015-07-23 Teachers Insurance and Annuity Association of America +tiaa + +// tickets : 2015-02-05 Accent Media Limited +tickets + +// tienda : 2013-11-14 Binky Moon, LLC +tienda + +// tiffany : 2015-01-30 Tiffany and Company +tiffany + +// tips : 2013-09-20 Binky Moon, LLC +tips + +// tires : 2014-11-07 Binky Moon, LLC +tires + +// tirol : 2014-04-24 punkt Tirol GmbH +tirol + +// tjmaxx : 2015-07-16 The TJX Companies, Inc. +tjmaxx + +// tjx : 2015-07-16 The TJX Companies, Inc. +tjx + +// tkmaxx : 2015-07-16 The TJX Companies, Inc. +tkmaxx + +// tmall : 2015-01-15 Alibaba Group Holding Limited +tmall + +// today : 2013-09-20 Binky Moon, LLC +today + +// tokyo : 2013-11-13 GMO Registry, Inc. +tokyo + +// tools : 2013-11-21 Binky Moon, LLC +tools + +// top : 2014-03-20 .TOP Registry +top + +// toray : 2014-12-18 Toray Industries, Inc. +toray + +// toshiba : 2014-04-10 TOSHIBA Corporation +toshiba + +// total : 2015-08-06 Total SA +total + +// tours : 2015-01-22 Binky Moon, LLC +tours + +// town : 2014-03-06 Binky Moon, LLC +town + +// toyota : 2015-04-23 TOYOTA MOTOR CORPORATION +toyota + +// toys : 2014-03-06 Binky Moon, LLC +toys + +// trade : 2014-01-23 Elite Registry Limited +trade + +// trading : 2014-12-11 Dottrading Registry Limited +trading + +// training : 2013-11-07 Binky Moon, LLC +training + +// travel : 2015-10-09 Dog Beach, LLC +travel + +// travelchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +travelchannel + +// travelers : 2015-03-26 Travelers TLD, LLC +travelers + +// travelersinsurance : 2015-03-26 Travelers TLD, LLC +travelersinsurance + +// trust : 2014-10-16 NCC Group Domain Services, Inc. +trust + +// trv : 2015-03-26 Travelers TLD, LLC +trv + +// tube : 2015-06-11 Latin American Telecom LLC +tube + +// tui : 2014-07-03 TUI AG +tui + +// tunes : 2015-02-26 Amazon Registry Services, Inc. +tunes + +// tushu : 2014-12-18 Amazon Registry Services, Inc. +tushu + +// tvs : 2015-02-19 T V SUNDRAM IYENGAR & SONS LIMITED +tvs + +// ubank : 2015-08-20 National Australia Bank Limited +ubank + +// ubs : 2014-12-11 UBS AG +ubs + +// unicom : 2015-10-15 China United Network Communications Corporation Limited +unicom + +// university : 2014-03-06 Binky Moon, LLC +university + +// uno : 2013-09-11 DotSite Inc. +uno + +// uol : 2014-05-01 UBN INTERNET LTDA. +uol + +// ups : 2015-06-25 UPS Market Driver, Inc. +ups + +// vacations : 2013-12-05 Binky Moon, LLC +vacations + +// vana : 2014-12-11 Lifestyle Domain Holdings, Inc. +vana + +// vanguard : 2015-09-03 The Vanguard Group, Inc. +vanguard + +// vegas : 2014-01-16 Dot Vegas, Inc. +vegas + +// ventures : 2013-08-27 Binky Moon, LLC +ventures + +// verisign : 2015-08-13 VeriSign, Inc. +verisign + +// versicherung : 2014-03-20 tldbox GmbH +versicherung + +// vet : 2014-03-06 Dog Beach, LLC +vet + +// viajes : 2013-10-17 Binky Moon, LLC +viajes + +// video : 2014-10-16 Dog Beach, LLC +video + +// vig : 2015-05-14 VIENNA INSURANCE GROUP AG Wiener Versicherung Gruppe +vig + +// viking : 2015-04-02 Viking River Cruises (Bermuda) Ltd. +viking + +// villas : 2013-12-05 Binky Moon, LLC +villas + +// vin : 2015-06-18 Binky Moon, LLC +vin + +// vip : 2015-01-22 Minds + Machines Group Limited +vip + +// virgin : 2014-09-25 Virgin Enterprises Limited +virgin + +// visa : 2015-07-30 Visa Worldwide Pte. Limited +visa + +// vision : 2013-12-05 Binky Moon, LLC +vision + +// viva : 2014-11-07 Saudi Telecom Company +viva + +// vivo : 2015-07-31 Telefonica Brasil S.A. +vivo + +// vlaanderen : 2014-02-06 DNS.be vzw +vlaanderen + +// vodka : 2013-12-19 Minds + Machines Group Limited +vodka + +// volkswagen : 2015-05-14 Volkswagen Group of America Inc. +volkswagen + +// volvo : 2015-11-12 Volvo Holding Sverige Aktiebolag +volvo + +// vote : 2013-11-21 Monolith Registry LLC +vote + +// voting : 2013-11-13 Valuetainment Corp. +voting + +// voto : 2013-11-21 Monolith Registry LLC +voto + +// voyage : 2013-08-27 Binky Moon, LLC +voyage + +// vuelos : 2015-03-05 Travel Reservations SRL +vuelos + +// wales : 2014-05-08 Nominet UK +wales + +// walmart : 2015-07-31 Wal-Mart Stores, Inc. +walmart + +// walter : 2014-11-13 Sandvik AB +walter + +// wang : 2013-10-24 Zodiac Wang Limited +wang + +// wanggou : 2014-12-18 Amazon Registry Services, Inc. +wanggou + +// watch : 2013-11-14 Binky Moon, LLC +watch + +// watches : 2014-12-22 Richemont DNS Inc. +watches + +// weather : 2015-01-08 International Business Machines Corporation +weather + +// weatherchannel : 2015-03-12 International Business Machines Corporation +weatherchannel + +// webcam : 2014-01-23 dot Webcam Limited +webcam + +// weber : 2015-06-04 Saint-Gobain Weber SA +weber + +// website : 2014-04-03 DotWebsite Inc. +website + +// wed : 2013-10-01 Atgron, Inc. +wed + +// wedding : 2014-04-24 Minds + Machines Group Limited +wedding + +// weibo : 2015-03-05 Sina Corporation +weibo + +// weir : 2015-01-29 Weir Group IP Limited +weir + +// whoswho : 2014-02-20 Who's Who Registry +whoswho + +// wien : 2013-10-28 punkt.wien GmbH +wien + +// wiki : 2013-11-07 Top Level Design, LLC +wiki + +// williamhill : 2014-03-13 William Hill Organization Limited +williamhill + +// win : 2014-11-20 First Registry Limited +win + +// windows : 2014-12-18 Microsoft Corporation +windows + +// wine : 2015-06-18 Binky Moon, LLC +wine + +// winners : 2015-07-16 The TJX Companies, Inc. +winners + +// wme : 2014-02-13 William Morris Endeavor Entertainment, LLC +wme + +// wolterskluwer : 2015-08-06 Wolters Kluwer N.V. +wolterskluwer + +// woodside : 2015-07-09 Woodside Petroleum Limited +woodside + +// work : 2013-12-19 Minds + Machines Group Limited +work + +// works : 2013-11-14 Binky Moon, LLC +works + +// world : 2014-06-12 Binky Moon, LLC +world + +// wow : 2015-10-08 Amazon Registry Services, Inc. +wow + +// wtc : 2013-12-19 World Trade Centers Association, Inc. +wtc + +// wtf : 2014-03-06 Binky Moon, LLC +wtf + +// xbox : 2014-12-18 Microsoft Corporation +xbox + +// xerox : 2014-10-24 Xerox DNHC LLC +xerox + +// xfinity : 2015-07-09 Comcast IP Holdings I, LLC +xfinity + +// xihuan : 2015-01-08 Beijing Qihu Keji Co., Ltd. +xihuan + +// xin : 2014-12-11 Elegant Leader Limited +xin + +// xn--11b4c3d : 2015-01-15 VeriSign Sarl +कॉम + +// xn--1ck2e1b : 2015-02-26 Amazon Registry Services, Inc. +セール + +// xn--1qqw23a : 2014-01-09 Guangzhou YU Wei Information Technology Co., Ltd. +佛山 + +// xn--30rr7y : 2014-06-12 Excellent First Limited +慈善 + +// xn--3bst00m : 2013-09-13 Eagle Horizon Limited +集团 + +// xn--3ds443g : 2013-09-08 TLD REGISTRY LIMITED OY +在线 + +// xn--3oq18vl8pn36a : 2015-07-02 Volkswagen (China) Investment Co., Ltd. +大众汽车 + +// xn--3pxu8k : 2015-01-15 VeriSign Sarl +点看 + +// xn--42c2d9a : 2015-01-15 VeriSign Sarl +คอม + +// xn--45q11c : 2013-11-21 Zodiac Gemini Ltd +八卦 + +// xn--4gbrim : 2013-10-04 Fans TLD Limited +موقع + +// xn--55qw42g : 2013-11-08 China Organizational Name Administration Center +公益 + +// xn--55qx5d : 2013-11-14 China Internet Network Information Center (CNNIC) +公司 + +// xn--5su34j936bgsg : 2015-09-03 Shangri‐La International Hotel Management Limited +香格里拉 + +// xn--5tzm5g : 2014-12-22 Global Website TLD Asia Limited +网站 + +// xn--6frz82g : 2013-09-23 Afilias Limited +移动 + +// xn--6qq986b3xl : 2013-09-13 Tycoon Treasure Limited +我爱你 + +// xn--80adxhks : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +москва + +// xn--80aqecdr1a : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +католик + +// xn--80asehdb : 2013-07-14 CORE Association +онлайн + +// xn--80aswg : 2013-07-14 CORE Association +сайт + +// xn--8y0a063a : 2015-03-26 China United Network Communications Corporation Limited +联通 + +// xn--9dbq2a : 2015-01-15 VeriSign Sarl +קום + +// xn--9et52u : 2014-06-12 RISE VICTORY LIMITED +时尚 + +// xn--9krt00a : 2015-03-12 Sina Corporation +微博 + +// xn--b4w605ferd : 2014-08-07 Temasek Holdings (Private) Limited +淡马锡 + +// xn--bck1b9a5dre4c : 2015-02-26 Amazon Registry Services, Inc. +ファッション + +// xn--c1avg : 2013-11-14 Public Interest Registry +орг + +// xn--c2br7g : 2015-01-15 VeriSign Sarl +नेट + +// xn--cck2b3b : 2015-02-26 Amazon Registry Services, Inc. +ストア + +// xn--cckwcxetd : 2019-12-19 Amazon Registry Services, Inc. +アマゾン + +// xn--cg4bki : 2013-09-27 SAMSUNG SDS CO., LTD +삼성 + +// xn--czr694b : 2014-01-16 Internet DotTrademark Organisation Limited +商标 + +// xn--czrs0t : 2013-12-19 Binky Moon, LLC +商店 + +// xn--czru2d : 2013-11-21 Zodiac Aquarius Limited +商城 + +// xn--d1acj3b : 2013-11-20 The Foundation for Network Initiatives “The Smart Internet” +дети + +// xn--eckvdtc9d : 2014-12-18 Amazon Registry Services, Inc. +ポイント + +// xn--efvy88h : 2014-08-22 Guangzhou YU Wei Information Technology Co., Ltd. +新闻 + +// xn--fct429k : 2015-04-09 Amazon Registry Services, Inc. +家電 + +// xn--fhbei : 2015-01-15 VeriSign Sarl +كوم + +// xn--fiq228c5hs : 2013-09-08 TLD REGISTRY LIMITED OY +中文网 + +// xn--fiq64b : 2013-10-14 CITIC Group Corporation +中信 + +// xn--fjq720a : 2014-05-22 Binky Moon, LLC +娱乐 + +// xn--flw351e : 2014-07-31 Charleston Road Registry Inc. +谷歌 + +// xn--fzys8d69uvgm : 2015-05-14 PCCW Enterprises Limited +電訊盈科 + +// xn--g2xx48c : 2015-01-30 Minds + Machines Group Limited +购物 + +// xn--gckr3f0f : 2015-02-26 Amazon Registry Services, Inc. +クラウド + +// xn--gk3at1e : 2015-10-08 Amazon Registry Services, Inc. +通販 + +// xn--hxt814e : 2014-05-15 Zodiac Taurus Limited +网店 + +// xn--i1b6b1a6a2e : 2013-11-14 Public Interest Registry +संगठन + +// xn--imr513n : 2014-12-11 Internet DotTrademark Organisation Limited +餐厅 + +// xn--io0a7i : 2013-11-14 China Internet Network Information Center (CNNIC) +网络 + +// xn--j1aef : 2015-01-15 VeriSign Sarl +ком + +// xn--jlq480n2rg : 2019-12-19 Amazon Registry Services, Inc. +亚马逊 + +// xn--jlq61u9w7b : 2015-01-08 Nokia Corporation +诺基亚 + +// xn--jvr189m : 2015-02-26 Amazon Registry Services, Inc. +食品 + +// xn--kcrx77d1x4a : 2014-11-07 Koninklijke Philips N.V. +飞利浦 + +// xn--kput3i : 2014-02-13 Beijing RITT-Net Technology Development Co., Ltd +手机 + +// xn--mgba3a3ejt : 2014-11-20 Aramco Services Company +ارامكو + +// xn--mgba7c0bbn0a : 2015-05-14 Crescent Holding GmbH +العليان + +// xn--mgbaakc7dvf : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +اتصالات + +// xn--mgbab2bd : 2013-10-31 CORE Association +بازار + +// xn--mgbca7dzdo : 2015-07-30 Abu Dhabi Systems and Information Centre +ابوظبي + +// xn--mgbi4ecexp : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +كاثوليك + +// xn--mgbt3dhd : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +همراه + +// xn--mk1bu44c : 2015-01-15 VeriSign Sarl +닷컴 + +// xn--mxtq1m : 2014-03-06 Net-Chinese Co., Ltd. +政府 + +// xn--ngbc5azd : 2013-07-13 International Domain Registry Pty. Ltd. +شبكة + +// xn--ngbe9e0a : 2014-12-04 Kuwait Finance House +بيتك + +// xn--ngbrx : 2015-11-12 League of Arab States +عرب + +// xn--nqv7f : 2013-11-14 Public Interest Registry +机构 + +// xn--nqv7fs00ema : 2013-11-14 Public Interest Registry +组织机构 + +// xn--nyqy26a : 2014-11-07 Stable Tone Limited +健康 + +// xn--otu796d : 2017-08-06 Jiang Yu Liang Cai Technology Company Limited +招聘 + +// xn--p1acf : 2013-12-12 Rusnames Limited +рус + +// xn--pssy2u : 2015-01-15 VeriSign Sarl +大拿 + +// xn--q9jyb4c : 2013-09-17 Charleston Road Registry Inc. +みんな + +// xn--qcka1pmc : 2014-07-31 Charleston Road Registry Inc. +グーグル + +// xn--rhqv96g : 2013-09-11 Stable Tone Limited +世界 + +// xn--rovu88b : 2015-02-26 Amazon Registry Services, Inc. +書籍 + +// xn--ses554g : 2014-01-16 KNET Co., Ltd. +网址 + +// xn--t60b56a : 2015-01-15 VeriSign Sarl +닷넷 + +// xn--tckwe : 2015-01-15 VeriSign Sarl +コム + +// xn--tiq49xqyj : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +天主教 + +// xn--unup4y : 2013-07-14 Binky Moon, LLC +游戏 + +// xn--vermgensberater-ctb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberater + +// xn--vermgensberatung-pwb : 2014-06-23 Deutsche Vermögensberatung Aktiengesellschaft DVAG +vermögensberatung + +// xn--vhquv : 2013-08-27 Binky Moon, LLC +企业 + +// xn--vuq861b : 2014-10-16 Beijing Tele-info Network Technology Co., Ltd. +信息 + +// xn--w4r85el8fhu5dnra : 2015-04-30 Kerry Trading Co. Limited +嘉里大酒店 + +// xn--w4rs40l : 2015-07-30 Kerry Trading Co. Limited +嘉里 + +// xn--xhq521b : 2013-11-14 Guangzhou YU Wei Information Technology Co., Ltd. +广东 + +// xn--zfr164b : 2013-11-08 China Organizational Name Administration Center +政务 + +// xyz : 2013-12-05 XYZ.COM LLC +xyz + +// yachts : 2014-01-09 DERYachts, LLC +yachts + +// yahoo : 2015-04-02 Yahoo! Domain Services Inc. +yahoo + +// yamaxun : 2014-12-18 Amazon Registry Services, Inc. +yamaxun + +// yandex : 2014-04-10 Yandex Europe B.V. +yandex + +// yodobashi : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +yodobashi + +// yoga : 2014-05-29 Minds + Machines Group Limited +yoga + +// yokohama : 2013-12-12 GMO Registry, Inc. +yokohama + +// you : 2015-04-09 Amazon Registry Services, Inc. +you + +// youtube : 2014-05-01 Charleston Road Registry Inc. +youtube + +// yun : 2015-01-08 Beijing Qihu Keji Co., Ltd. +yun + +// zappos : 2015-06-25 Amazon Registry Services, Inc. +zappos + +// zara : 2014-11-07 Industria de Diseño Textil, S.A. (INDITEX, S.A.) +zara + +// zero : 2014-12-18 Amazon Registry Services, Inc. +zero + +// zip : 2014-05-08 Charleston Road Registry Inc. +zip + +// zone : 2013-11-14 Binky Moon, LLC +zone + +// zuerich : 2014-11-07 Kanton Zürich (Canton of Zurich) +zuerich + + +// ===END ICANN DOMAINS=== +// ===BEGIN PRIVATE DOMAINS=== +// (Note: these are in alphabetical order by company name) + +// 1GB LLC : https://www.1gb.ua/ +// Submitted by 1GB LLC +cc.ua +inf.ua +ltd.ua + +// 611coin : https://611project.org/ +611.to + +// Adobe : https://www.adobe.com/ +// Submitted by Ian Boston +adobeaemcloud.com +adobeaemcloud.net +*.dev.adobeaemcloud.com + +// Agnat sp. z o.o. : https://domena.pl +// Submitted by Przemyslaw Plewa +beep.pl + +// alboto.ca : http://alboto.ca +// Submitted by Anton Avramov +barsy.ca + +// Alces Software Ltd : http://alces-software.com +// Submitted by Mark J. Titorenko +*.compute.estate +*.alces.network + +// all-inkl.com : https://all-inkl.com +// Submitted by Werner Kaltofen +kasserver.com + +// Algorithmia, Inc. : algorithmia.com +// Submitted by Eli Perelman +*.algorithmia.com +!teams.algorithmia.com +!test.algorithmia.com + +// Altervista: https://www.altervista.org +// Submitted by Carlo Cannas +altervista.org + +// alwaysdata : https://www.alwaysdata.com +// Submitted by Cyril +alwaysdata.net + +// Amazon CloudFront : https://aws.amazon.com/cloudfront/ +// Submitted by Donavan Miller +cloudfront.net + +// Amazon Elastic Compute Cloud : https://aws.amazon.com/ec2/ +// Submitted by Luke Wells +*.compute.amazonaws.com +*.compute-1.amazonaws.com +*.compute.amazonaws.com.cn +us-east-1.amazonaws.com + +// Amazon Elastic Beanstalk : https://aws.amazon.com/elasticbeanstalk/ +// Submitted by Luke Wells +cn-north-1.eb.amazonaws.com.cn +cn-northwest-1.eb.amazonaws.com.cn +elasticbeanstalk.com +ap-northeast-1.elasticbeanstalk.com +ap-northeast-2.elasticbeanstalk.com +ap-northeast-3.elasticbeanstalk.com +ap-south-1.elasticbeanstalk.com +ap-southeast-1.elasticbeanstalk.com +ap-southeast-2.elasticbeanstalk.com +ca-central-1.elasticbeanstalk.com +eu-central-1.elasticbeanstalk.com +eu-west-1.elasticbeanstalk.com +eu-west-2.elasticbeanstalk.com +eu-west-3.elasticbeanstalk.com +sa-east-1.elasticbeanstalk.com +us-east-1.elasticbeanstalk.com +us-east-2.elasticbeanstalk.com +us-gov-west-1.elasticbeanstalk.com +us-west-1.elasticbeanstalk.com +us-west-2.elasticbeanstalk.com + +// Amazon Elastic Load Balancing : https://aws.amazon.com/elasticloadbalancing/ +// Submitted by Luke Wells +*.elb.amazonaws.com +*.elb.amazonaws.com.cn + +// Amazon S3 : https://aws.amazon.com/s3/ +// Submitted by Luke Wells +s3.amazonaws.com +s3-ap-northeast-1.amazonaws.com +s3-ap-northeast-2.amazonaws.com +s3-ap-south-1.amazonaws.com +s3-ap-southeast-1.amazonaws.com +s3-ap-southeast-2.amazonaws.com +s3-ca-central-1.amazonaws.com +s3-eu-central-1.amazonaws.com +s3-eu-west-1.amazonaws.com +s3-eu-west-2.amazonaws.com +s3-eu-west-3.amazonaws.com +s3-external-1.amazonaws.com +s3-fips-us-gov-west-1.amazonaws.com +s3-sa-east-1.amazonaws.com +s3-us-gov-west-1.amazonaws.com +s3-us-east-2.amazonaws.com +s3-us-west-1.amazonaws.com +s3-us-west-2.amazonaws.com +s3.ap-northeast-2.amazonaws.com +s3.ap-south-1.amazonaws.com +s3.cn-north-1.amazonaws.com.cn +s3.ca-central-1.amazonaws.com +s3.eu-central-1.amazonaws.com +s3.eu-west-2.amazonaws.com +s3.eu-west-3.amazonaws.com +s3.us-east-2.amazonaws.com +s3.dualstack.ap-northeast-1.amazonaws.com +s3.dualstack.ap-northeast-2.amazonaws.com +s3.dualstack.ap-south-1.amazonaws.com +s3.dualstack.ap-southeast-1.amazonaws.com +s3.dualstack.ap-southeast-2.amazonaws.com +s3.dualstack.ca-central-1.amazonaws.com +s3.dualstack.eu-central-1.amazonaws.com +s3.dualstack.eu-west-1.amazonaws.com +s3.dualstack.eu-west-2.amazonaws.com +s3.dualstack.eu-west-3.amazonaws.com +s3.dualstack.sa-east-1.amazonaws.com +s3.dualstack.us-east-1.amazonaws.com +s3.dualstack.us-east-2.amazonaws.com +s3-website-us-east-1.amazonaws.com +s3-website-us-west-1.amazonaws.com +s3-website-us-west-2.amazonaws.com +s3-website-ap-northeast-1.amazonaws.com +s3-website-ap-southeast-1.amazonaws.com +s3-website-ap-southeast-2.amazonaws.com +s3-website-eu-west-1.amazonaws.com +s3-website-sa-east-1.amazonaws.com +s3-website.ap-northeast-2.amazonaws.com +s3-website.ap-south-1.amazonaws.com +s3-website.ca-central-1.amazonaws.com +s3-website.eu-central-1.amazonaws.com +s3-website.eu-west-2.amazonaws.com +s3-website.eu-west-3.amazonaws.com +s3-website.us-east-2.amazonaws.com + +// Amsterdam Wireless: https://www.amsterdamwireless.nl/ +// Submitted by Imre Jonk +amsw.nl + +// Amune : https://amune.org/ +// Submitted by Team Amune +t3l3p0rt.net +tele.amune.org + +// Apigee : https://apigee.com/ +// Submitted by Apigee Security Team +apigee.io + +// Aptible : https://www.aptible.com/ +// Submitted by Thomas Orozco +on-aptible.com + +// ASEINet : https://www.aseinet.com/ +// Submitted by Asei SEKIGUCHI +user.aseinet.ne.jp +gv.vc +d.gv.vc + +// Asociación Amigos de la Informática "Euskalamiga" : http://encounter.eus/ +// Submitted by Hector Martin +user.party.eus + +// Association potager.org : https://potager.org/ +// Submitted by Lunar +pimienta.org +poivron.org +potager.org +sweetpepper.org + +// ASUSTOR Inc. : http://www.asustor.com +// Submitted by Vincent Tseng +myasustor.com + +// AVM : https://avm.de +// Submitted by Andreas Weise +myfritz.net + +// AW AdvisorWebsites.com Software Inc : https://advisorwebsites.com +// Submitted by James Kennedy +*.awdev.ca +*.advisor.ws + +// b-data GmbH : https://www.b-data.io +// Submitted by Olivier Benz +b-data.io + +// backplane : https://www.backplane.io +// Submitted by Anthony Voutas +backplaneapp.io + +// Balena : https://www.balena.io +// Submitted by Petros Angelatos +balena-devices.com + +// Banzai Cloud +// Submitted by Janos Matyas +*.banzai.cloud +app.banzaicloud.io +*.backyards.banzaicloud.io + + +// BetaInABox +// Submitted by Adrian +betainabox.com + +// BinaryLane : http://www.binarylane.com +// Submitted by Nathan O'Sullivan +bnr.la + +// Blackbaud, Inc. : https://www.blackbaud.com +// Submitted by Paul Crowder +blackbaudcdn.net + +// Boomla : https://boomla.com +// Submitted by Tibor Halter +boomla.net + +// Boxfuse : https://boxfuse.com +// Submitted by Axel Fontaine +boxfuse.io + +// bplaced : https://www.bplaced.net/ +// Submitted by Miroslav Bozic +square7.ch +bplaced.com +bplaced.de +square7.de +bplaced.net +square7.net + +// BrowserSafetyMark +// Submitted by Dave Tharp +browsersafetymark.io + +// Bytemark Hosting : https://www.bytemark.co.uk +// Submitted by Paul Cammish +uk0.bigv.io +dh.bytemark.co.uk +vm.bytemark.co.uk + +// callidomus : https://www.callidomus.com/ +// Submitted by Marcus Popp +mycd.eu + +// Carrd : https://carrd.co +// Submitted by AJ +carrd.co +crd.co +uwu.ai + +// CentralNic : http://www.centralnic.com/names/domains +// Submitted by registry +ae.org +br.com +cn.com +com.de +com.se +de.com +eu.com +gb.net +hu.net +jp.net +jpn.com +mex.com +ru.com +sa.com +se.net +uk.com +uk.net +us.com +za.bz +za.com + +// No longer operated by CentralNic, these entries should be adopted and/or removed by current operators +// Submitted by Gavin Brown +ar.com +gb.com +hu.com +kr.com +no.com +qc.com +uy.com + +// Africa.com Web Solutions Ltd : https://registry.africa.com +// Submitted by Gavin Brown +africa.com + +// iDOT Services Limited : http://www.domain.gr.com +// Submitted by Gavin Brown +gr.com + +// Radix FZC : http://domains.in.net +// Submitted by Gavin Brown +in.net +web.in + +// US REGISTRY LLC : http://us.org +// Submitted by Gavin Brown +us.org + +// co.com Registry, LLC : https://registry.co.com +// Submitted by Gavin Brown +co.com + +// Roar Domains LLC : https://roar.basketball/ +// Submitted by Gavin Brown +aus.basketball +nz.basketball + +// BRS Media : https://brsmedia.com/ +// Submitted by Gavin Brown +radio.am +radio.fm + +// Globe Hosting SRL : https://www.globehosting.com/ +// Submitted by Gavin Brown +co.ro +shop.ro + +// c.la : http://www.c.la/ +c.la + +// certmgr.org : https://certmgr.org +// Submitted by B. Blechschmidt +certmgr.org + +// Citrix : https://citrix.com +// Submitted by Alex Stoddard +xenapponazure.com + +// Civilized Discourse Construction Kit, Inc. : https://www.discourse.org/ +// Submitted by Rishabh Nambiar & Michael Brown +discourse.group +discourse.team + +// ClearVox : http://www.clearvox.nl/ +// Submitted by Leon Rowland +virtueeldomein.nl + +// Clever Cloud : https://www.clever-cloud.com/ +// Submitted by Quentin Adam +cleverapps.io + +// Clerk : https://www.clerk.dev +// Submitted by Colin Sidoti +*.lcl.dev +*.stg.dev + +// Clic2000 : https://clic2000.fr +// Submitted by Mathilde Blanchemanche +clic2000.net + +// Cloud66 : https://www.cloud66.com/ +// Submitted by Khash Sajadi +c66.me +cloud66.ws +cloud66.zone + +// CloudAccess.net : https://www.cloudaccess.net/ +// Submitted by Pawel Panek +jdevcloud.com +wpdevcloud.com +cloudaccess.host +freesite.host +cloudaccess.net + +// cloudControl : https://www.cloudcontrol.com/ +// Submitted by Tobias Wilken +cloudcontrolled.com +cloudcontrolapp.com + +// Cloudera, Inc. : https://www.cloudera.com/ +// Submitted by Philip Langdale +cloudera.site + +// Cloudflare, Inc. : https://www.cloudflare.com/ +// Submitted by Jake Riesterer +trycloudflare.com +workers.dev + +// Clovyr : https://clovyr.io +// Submitted by Patrick Nielsen +wnext.app + +// co.ca : http://registry.co.ca/ +co.ca + +// Co & Co : https://co-co.nl/ +// Submitted by Govert Versluis +*.otap.co + +// i-registry s.r.o. : http://www.i-registry.cz/ +// Submitted by Martin Semrad +co.cz + +// CDN77.com : http://www.cdn77.com +// Submitted by Jan Krpes +c.cdn77.org +cdn77-ssl.net +r.cdn77.net +rsc.cdn77.org +ssl.origin.cdn77-secure.org + +// Cloud DNS Ltd : http://www.cloudns.net +// Submitted by Aleksander Hristov +cloudns.asia +cloudns.biz +cloudns.club +cloudns.cc +cloudns.eu +cloudns.in +cloudns.info +cloudns.org +cloudns.pro +cloudns.pw +cloudns.us + +// Cloudeity Inc : https://cloudeity.com +// Submitted by Stefan Dimitrov +cloudeity.net + +// CNPY : https://cnpy.gdn +// Submitted by Angelo Gladding +cnpy.gdn + +// CoDNS B.V. +co.nl +co.no + +// Combell.com : https://www.combell.com +// Submitted by Thomas Wouters +webhosting.be +hosting-cluster.nl + +// Coordination Center for TLD RU and XN--P1AI : https://cctld.ru/en/domains/domens_ru/reserved/ +// Submitted by George Georgievsky +ac.ru +edu.ru +gov.ru +int.ru +mil.ru +test.ru + +// COSIMO GmbH : http://www.cosimo.de +// Submitted by Rene Marticke +dyn.cosidns.de +dynamisches-dns.de +dnsupdater.de +internet-dns.de +l-o-g-i-n.de +dynamic-dns.info +feste-ip.net +knx-server.net +static-access.net + +// Craynic, s.r.o. : http://www.craynic.com/ +// Submitted by Ales Krajnik +realm.cz + +// Cryptonomic : https://cryptonomic.net/ +// Submitted by Andrew Cady +*.cryptonomic.net + +// Cupcake : https://cupcake.io/ +// Submitted by Jonathan Rudenberg +cupcake.is + +// Curv UG : https://curv-labs.de/ +// Submitted by Marvin Wiesner +curv.dev + +// Customer OCI - Oracle Dyn https://cloud.oracle.com/home https://dyn.com/dns/ +// Submitted by Gregory Drake +// Note: This is intended to also include customer-oci.com due to wildcards implicitly including the current label +*.customer-oci.com +*.oci.customer-oci.com +*.ocp.customer-oci.com +*.ocs.customer-oci.com + +// cyon GmbH : https://www.cyon.ch/ +// Submitted by Dominic Luechinger +cyon.link +cyon.site + +// Danger Science Group: https://dangerscience.com/ +// Submitted by Skylar MacDonald +fnwk.site +folionetwork.site +platform0.app + +// Daplie, Inc : https://daplie.com +// Submitted by AJ ONeal +daplie.me +localhost.daplie.me + +// Datto, Inc. : https://www.datto.com/ +// Submitted by Philipp Heckel +dattolocal.com +dattorelay.com +dattoweb.com +mydatto.com +dattolocal.net +mydatto.net + +// Dansk.net : http://www.dansk.net/ +// Submitted by Anani Voule +biz.dk +co.dk +firm.dk +reg.dk +store.dk + +// dappnode.io : https://dappnode.io/ +// Submitted by Abel Boldu / DAppNode Team +dyndns.dappnode.io + +// dapps.earth : https://dapps.earth/ +// Submitted by Daniil Burdakov +*.dapps.earth +*.bzz.dapps.earth + +// Dark, Inc. : https://darklang.com +// Submitted by Paul Biggar +builtwithdark.com + +// Datawire, Inc : https://www.datawire.io +// Submitted by Richard Li +edgestack.me + +// Debian : https://www.debian.org/ +// Submitted by Peter Palfrader / Debian Sysadmin Team +debian.net + +// deSEC : https://desec.io/ +// Submitted by Peter Thomassen +dedyn.io + +// DNS Africa Ltd https://dns.business +// Submitted by Calvin Browne +jozi.biz + +// DNShome : https://www.dnshome.de/ +// Submitted by Norbert Auler +dnshome.de + +// DotArai : https://www.dotarai.com/ +// Submitted by Atsadawat Netcharadsang +online.th +shop.th + +// DrayTek Corp. : https://www.draytek.com/ +// Submitted by Paul Fang +drayddns.com + +// DreamHost : http://www.dreamhost.com/ +// Submitted by Andrew Farmer +dreamhosters.com + +// Drobo : http://www.drobo.com/ +// Submitted by Ricardo Padilha +mydrobo.com + +// Drud Holdings, LLC. : https://www.drud.com/ +// Submitted by Kevin Bridges +drud.io +drud.us + +// DuckDNS : http://www.duckdns.org/ +// Submitted by Richard Harper +duckdns.org + +// bitbridge.net : Submitted by Craig Welch, abeliidev@gmail.com +bitbridge.net + +// dy.fi : http://dy.fi/ +// Submitted by Heikki Hannikainen +dy.fi +tunk.org + +// DynDNS.com : http://www.dyndns.com/services/dns/dyndns/ +dyndns-at-home.com +dyndns-at-work.com +dyndns-blog.com +dyndns-free.com +dyndns-home.com +dyndns-ip.com +dyndns-mail.com +dyndns-office.com +dyndns-pics.com +dyndns-remote.com +dyndns-server.com +dyndns-web.com +dyndns-wiki.com +dyndns-work.com +dyndns.biz +dyndns.info +dyndns.org +dyndns.tv +at-band-camp.net +ath.cx +barrel-of-knowledge.info +barrell-of-knowledge.info +better-than.tv +blogdns.com +blogdns.net +blogdns.org +blogsite.org +boldlygoingnowhere.org +broke-it.net +buyshouses.net +cechire.com +dnsalias.com +dnsalias.net +dnsalias.org +dnsdojo.com +dnsdojo.net +dnsdojo.org +does-it.net +doesntexist.com +doesntexist.org +dontexist.com +dontexist.net +dontexist.org +doomdns.com +doomdns.org +dvrdns.org +dyn-o-saur.com +dynalias.com +dynalias.net +dynalias.org +dynathome.net +dyndns.ws +endofinternet.net +endofinternet.org +endoftheinternet.org +est-a-la-maison.com +est-a-la-masion.com +est-le-patron.com +est-mon-blogueur.com +for-better.biz +for-more.biz +for-our.info +for-some.biz +for-the.biz +forgot.her.name +forgot.his.name +from-ak.com +from-al.com +from-ar.com +from-az.net +from-ca.com +from-co.net +from-ct.com +from-dc.com +from-de.com +from-fl.com +from-ga.com +from-hi.com +from-ia.com +from-id.com +from-il.com +from-in.com +from-ks.com +from-ky.com +from-la.net +from-ma.com +from-md.com +from-me.org +from-mi.com +from-mn.com +from-mo.com +from-ms.com +from-mt.com +from-nc.com +from-nd.com +from-ne.com +from-nh.com +from-nj.com +from-nm.com +from-nv.com +from-ny.net +from-oh.com +from-ok.com +from-or.com +from-pa.com +from-pr.com +from-ri.com +from-sc.com +from-sd.com +from-tn.com +from-tx.com +from-ut.com +from-va.com +from-vt.com +from-wa.com +from-wi.com +from-wv.com +from-wy.com +ftpaccess.cc +fuettertdasnetz.de +game-host.org +game-server.cc +getmyip.com +gets-it.net +go.dyndns.org +gotdns.com +gotdns.org +groks-the.info +groks-this.info +ham-radio-op.net +here-for-more.info +hobby-site.com +hobby-site.org +home.dyndns.org +homedns.org +homeftp.net +homeftp.org +homeip.net +homelinux.com +homelinux.net +homelinux.org +homeunix.com +homeunix.net +homeunix.org +iamallama.com +in-the-band.net +is-a-anarchist.com +is-a-blogger.com +is-a-bookkeeper.com +is-a-bruinsfan.org +is-a-bulls-fan.com +is-a-candidate.org +is-a-caterer.com +is-a-celticsfan.org +is-a-chef.com +is-a-chef.net +is-a-chef.org +is-a-conservative.com +is-a-cpa.com +is-a-cubicle-slave.com +is-a-democrat.com +is-a-designer.com +is-a-doctor.com +is-a-financialadvisor.com +is-a-geek.com +is-a-geek.net +is-a-geek.org +is-a-green.com +is-a-guru.com +is-a-hard-worker.com +is-a-hunter.com +is-a-knight.org +is-a-landscaper.com +is-a-lawyer.com +is-a-liberal.com +is-a-libertarian.com +is-a-linux-user.org +is-a-llama.com +is-a-musician.com +is-a-nascarfan.com +is-a-nurse.com +is-a-painter.com +is-a-patsfan.org +is-a-personaltrainer.com +is-a-photographer.com +is-a-player.com +is-a-republican.com +is-a-rockstar.com +is-a-socialist.com +is-a-soxfan.org +is-a-student.com +is-a-teacher.com +is-a-techie.com +is-a-therapist.com +is-an-accountant.com +is-an-actor.com +is-an-actress.com +is-an-anarchist.com +is-an-artist.com +is-an-engineer.com +is-an-entertainer.com +is-by.us +is-certified.com +is-found.org +is-gone.com +is-into-anime.com +is-into-cars.com +is-into-cartoons.com +is-into-games.com +is-leet.com +is-lost.org +is-not-certified.com +is-saved.org +is-slick.com +is-uberleet.com +is-very-bad.org +is-very-evil.org +is-very-good.org +is-very-nice.org +is-very-sweet.org +is-with-theband.com +isa-geek.com +isa-geek.net +isa-geek.org +isa-hockeynut.com +issmarterthanyou.com +isteingeek.de +istmein.de +kicks-ass.net +kicks-ass.org +knowsitall.info +land-4-sale.us +lebtimnetz.de +leitungsen.de +likes-pie.com +likescandy.com +merseine.nu +mine.nu +misconfused.org +mypets.ws +myphotos.cc +neat-url.com +office-on-the.net +on-the-web.tv +podzone.net +podzone.org +readmyblog.org +saves-the-whales.com +scrapper-site.net +scrapping.cc +selfip.biz +selfip.com +selfip.info +selfip.net +selfip.org +sells-for-less.com +sells-for-u.com +sells-it.net +sellsyourhome.org +servebbs.com +servebbs.net +servebbs.org +serveftp.net +serveftp.org +servegame.org +shacknet.nu +simple-url.com +space-to-rent.com +stuff-4-sale.org +stuff-4-sale.us +teaches-yoga.com +thruhere.net +traeumtgerade.de +webhop.biz +webhop.info +webhop.net +webhop.org +worse-than.tv +writesthisblog.com + +// ddnss.de : https://www.ddnss.de/ +// Submitted by Robert Niedziela +ddnss.de +dyn.ddnss.de +dyndns.ddnss.de +dyndns1.de +dyn-ip24.de +home-webserver.de +dyn.home-webserver.de +myhome-server.de +ddnss.org + +// Definima : http://www.definima.com/ +// Submitted by Maxence Bitterli +definima.net +definima.io + +// dnstrace.pro : https://dnstrace.pro/ +// Submitted by Chris Partridge +bci.dnstrace.pro + +// Dynu.com : https://www.dynu.com/ +// Submitted by Sue Ye +ddnsfree.com +ddnsgeek.com +giize.com +gleeze.com +kozow.com +loseyourip.com +ooguy.com +theworkpc.com +casacam.net +dynu.net +accesscam.org +camdvr.org +freeddns.org +mywire.org +webredirect.org +myddns.rocks +blogsite.xyz + +// dynv6 : https://dynv6.com +// Submitted by Dominik Menke +dynv6.net + +// E4YOU spol. s.r.o. : https://e4you.cz/ +// Submitted by Vladimir Dudr +e4.cz + +// En root‽ : https://en-root.org +// Submitted by Emmanuel Raviart +en-root.fr + +// Enalean SAS: https://www.enalean.com +// Submitted by Thomas Cottier +mytuleap.com + +// ECG Robotics, Inc: https://ecgrobotics.org +// Submitted by +onred.one +staging.onred.one + +// Enonic : http://enonic.com/ +// Submitted by Erik Kaareng-Sunde +enonic.io +customer.enonic.io + +// EU.org https://eu.org/ +// Submitted by Pierre Beyssac +eu.org +al.eu.org +asso.eu.org +at.eu.org +au.eu.org +be.eu.org +bg.eu.org +ca.eu.org +cd.eu.org +ch.eu.org +cn.eu.org +cy.eu.org +cz.eu.org +de.eu.org +dk.eu.org +edu.eu.org +ee.eu.org +es.eu.org +fi.eu.org +fr.eu.org +gr.eu.org +hr.eu.org +hu.eu.org +ie.eu.org +il.eu.org +in.eu.org +int.eu.org +is.eu.org +it.eu.org +jp.eu.org +kr.eu.org +lt.eu.org +lu.eu.org +lv.eu.org +mc.eu.org +me.eu.org +mk.eu.org +mt.eu.org +my.eu.org +net.eu.org +ng.eu.org +nl.eu.org +no.eu.org +nz.eu.org +paris.eu.org +pl.eu.org +pt.eu.org +q-a.eu.org +ro.eu.org +ru.eu.org +se.eu.org +si.eu.org +sk.eu.org +tr.eu.org +uk.eu.org +us.eu.org + +// Evennode : http://www.evennode.com/ +// Submitted by Michal Kralik +eu-1.evennode.com +eu-2.evennode.com +eu-3.evennode.com +eu-4.evennode.com +us-1.evennode.com +us-2.evennode.com +us-3.evennode.com +us-4.evennode.com + +// eDirect Corp. : https://hosting.url.com.tw/ +// Submitted by C.S. chang +twmail.cc +twmail.net +twmail.org +mymailer.com.tw +url.tw + +// Fabrica Technologies, Inc. : https://www.fabrica.dev/ +// Submitted by Eric Jiang +onfabrica.com + +// Facebook, Inc. +// Submitted by Peter Ruibal +apps.fbsbx.com + +// FAITID : https://faitid.org/ +// Submitted by Maxim Alzoba +// https://www.flexireg.net/stat_info +ru.net +adygeya.ru +bashkiria.ru +bir.ru +cbg.ru +com.ru +dagestan.ru +grozny.ru +kalmykia.ru +kustanai.ru +marine.ru +mordovia.ru +msk.ru +mytis.ru +nalchik.ru +nov.ru +pyatigorsk.ru +spb.ru +vladikavkaz.ru +vladimir.ru +abkhazia.su +adygeya.su +aktyubinsk.su +arkhangelsk.su +armenia.su +ashgabad.su +azerbaijan.su +balashov.su +bashkiria.su +bryansk.su +bukhara.su +chimkent.su +dagestan.su +east-kazakhstan.su +exnet.su +georgia.su +grozny.su +ivanovo.su +jambyl.su +kalmykia.su +kaluga.su +karacol.su +karaganda.su +karelia.su +khakassia.su +krasnodar.su +kurgan.su +kustanai.su +lenug.su +mangyshlak.su +mordovia.su +msk.su +murmansk.su +nalchik.su +navoi.su +north-kazakhstan.su +nov.su +obninsk.su +penza.su +pokrovsk.su +sochi.su +spb.su +tashkent.su +termez.su +togliatti.su +troitsk.su +tselinograd.su +tula.su +tuva.su +vladikavkaz.su +vladimir.su +vologda.su + +// Fancy Bits, LLC : http://getchannels.com +// Submitted by Aman Gupta +channelsdvr.net +u.channelsdvr.net + +// Fastly Inc. : http://www.fastly.com/ +// Submitted by Fastly Security +fastly-terrarium.com +fastlylb.net +map.fastlylb.net +freetls.fastly.net +map.fastly.net +a.prod.fastly.net +global.prod.fastly.net +a.ssl.fastly.net +b.ssl.fastly.net +global.ssl.fastly.net + +// FASTVPS EESTI OU : https://fastvps.ru/ +// Submitted by Likhachev Vasiliy +fastvps-server.com +fastvps.host +myfast.host +fastvps.site +myfast.space + +// Featherhead : https://featherhead.xyz/ +// Submitted by Simon Menke +fhapp.xyz + +// Fedora : https://fedoraproject.org/ +// submitted by Patrick Uiterwijk +fedorainfracloud.org +fedorapeople.org +cloud.fedoraproject.org +app.os.fedoraproject.org +app.os.stg.fedoraproject.org + +// FearWorks Media Ltd. : https://fearworksmedia.co.uk +// submitted by Keith Fairley +conn.uk +copro.uk +couk.me +ukco.me + +// Fermax : https://fermax.com/ +// submitted by Koen Van Isterdael +mydobiss.com + +// Filegear Inc. : https://www.filegear.com +// Submitted by Jason Zhu +filegear.me +filegear-au.me +filegear-de.me +filegear-gb.me +filegear-ie.me +filegear-jp.me +filegear-sg.me + +// Firebase, Inc. +// Submitted by Chris Raynor +firebaseapp.com + +// fly.io: https://fly.io +// Submitted by Kurt Mackey +fly.dev +edgeapp.net +shw.io + +// Flynn : https://flynn.io +// Submitted by Jonathan Rudenberg +flynnhosting.net + +// Frederik Braun https://frederik-braun.com +// Submitted by Frederik Braun +0e.vc + +// Freebox : http://www.freebox.fr +// Submitted by Romain Fliedel +freebox-os.com +freeboxos.com +fbx-os.fr +fbxos.fr +freebox-os.fr +freeboxos.fr + +// freedesktop.org : https://www.freedesktop.org +// Submitted by Daniel Stone +freedesktop.org + +// FunkFeuer - Verein zur Förderung freier Netze : https://www.funkfeuer.at +// Submitted by Daniel A. Maierhofer +wien.funkfeuer.at + +// Futureweb OG : http://www.futureweb.at +// Submitted by Andreas Schnederle-Wagner +*.futurecms.at +*.ex.futurecms.at +*.in.futurecms.at +futurehosting.at +futuremailing.at +*.ex.ortsinfo.at +*.kunden.ortsinfo.at +*.statics.cloud + +// GDS : https://www.gov.uk/service-manual/operations/operating-servicegovuk-subdomains +// Submitted by David Illsley +service.gov.uk + +// Gehirn Inc. : https://www.gehirn.co.jp/ +// Submitted by Kohei YOSHIDA +gehirn.ne.jp +usercontent.jp + +// Gentlent, Inc. : https://www.gentlent.com +// Submitted by Tom Klein +gentapps.com +gentlentapis.com +lab.ms + +// GitHub, Inc. +// Submitted by Patrick Toomey +github.io +githubusercontent.com + +// GitLab, Inc. +// Submitted by Alex Hanselka +gitlab.io + +// Gitplac.si - https://gitplac.si +// Submitted by Aljaž Starc +gitpage.si + +// Glitch, Inc : https://glitch.com +// Submitted by Mads Hartmann +glitch.me + +// GMO Pepabo, Inc. : https://pepabo.com/ +// Submitted by dojineko +lolipop.io + +// GOV.UK Platform as a Service : https://www.cloud.service.gov.uk/ +// Submitted by Tom Whitwell +cloudapps.digital +london.cloudapps.digital + +// GOV.UK Pay : https://www.payments.service.gov.uk/ +// Submitted by Richard Baker +pymnt.uk + +// UKHomeOffice : https://www.gov.uk/government/organisations/home-office +// Submitted by Jon Shanks +homeoffice.gov.uk + +// GlobeHosting, Inc. +// Submitted by Zoltan Egresi +ro.im + +// GoIP DNS Services : http://www.goip.de +// Submitted by Christian Poulter +goip.de + +// Google, Inc. +// Submitted by Eduardo Vela +run.app +a.run.app +web.app +*.0emm.com +appspot.com +*.r.appspot.com +blogspot.ae +blogspot.al +blogspot.am +blogspot.ba +blogspot.be +blogspot.bg +blogspot.bj +blogspot.ca +blogspot.cf +blogspot.ch +blogspot.cl +blogspot.co.at +blogspot.co.id +blogspot.co.il +blogspot.co.ke +blogspot.co.nz +blogspot.co.uk +blogspot.co.za +blogspot.com +blogspot.com.ar +blogspot.com.au +blogspot.com.br +blogspot.com.by +blogspot.com.co +blogspot.com.cy +blogspot.com.ee +blogspot.com.eg +blogspot.com.es +blogspot.com.mt +blogspot.com.ng +blogspot.com.tr +blogspot.com.uy +blogspot.cv +blogspot.cz +blogspot.de +blogspot.dk +blogspot.fi +blogspot.fr +blogspot.gr +blogspot.hk +blogspot.hr +blogspot.hu +blogspot.ie +blogspot.in +blogspot.is +blogspot.it +blogspot.jp +blogspot.kr +blogspot.li +blogspot.lt +blogspot.lu +blogspot.md +blogspot.mk +blogspot.mr +blogspot.mx +blogspot.my +blogspot.nl +blogspot.no +blogspot.pe +blogspot.pt +blogspot.qa +blogspot.re +blogspot.ro +blogspot.rs +blogspot.ru +blogspot.se +blogspot.sg +blogspot.si +blogspot.sk +blogspot.sn +blogspot.td +blogspot.tw +blogspot.ug +blogspot.vn +cloudfunctions.net +cloud.goog +codespot.com +googleapis.com +googlecode.com +pagespeedmobilizer.com +publishproxy.com +withgoogle.com +withyoutube.com + +// Aaron Marais' Gitlab pages: https://lab.aaronleem.co.za +// Submitted by Aaron Marais +graphox.us + +// Group 53, LLC : https://www.group53.com +// Submitted by Tyler Todd +awsmppl.com + +// Hakaran group: http://hakaran.cz +// Submited by Arseniy Sokolov +fin.ci +free.hr +caa.li +ua.rs +conf.se + +// Handshake : https://handshake.org +// Submitted by Mike Damm +hs.zone +hs.run + +// Hashbang : https://hashbang.sh +hashbang.sh + +// Hasura : https://hasura.io +// Submitted by Shahidh K Muhammed +hasura.app +hasura-app.io + +// Hepforge : https://www.hepforge.org +// Submitted by David Grellscheid +hepforge.org + +// Heroku : https://www.heroku.com/ +// Submitted by Tom Maher +herokuapp.com +herokussl.com + +// Hibernating Rhinos +// Submitted by Oren Eini +myravendb.com +ravendb.community +ravendb.me +development.run +ravendb.run + +// HOSTBIP REGISTRY : https://www.hostbip.com/ +// Submitted by Atanunu Igbunuroghene +bpl.biz +orx.biz +ng.city +biz.gl +ng.ink +col.ng +firm.ng +gen.ng +ltd.ng +ngo.ng +ng.school +sch.so + +// HostyHosting (hostyhosting.com) +hostyhosting.io + +// Häkkinen.fi +// Submitted by Eero Häkkinen +häkkinen.fi + +// Ici la Lune : http://www.icilalune.com/ +// Submitted by Simon Morvan +*.moonscale.io +moonscale.net + +// iki.fi +// Submitted by Hannu Aronsson +iki.fi + +// Individual Network Berlin e.V. : https://www.in-berlin.de/ +// Submitted by Christian Seitz +dyn-berlin.de +in-berlin.de +in-brb.de +in-butter.de +in-dsl.de +in-dsl.net +in-dsl.org +in-vpn.de +in-vpn.net +in-vpn.org + +// info.at : http://www.info.at/ +biz.at +info.at + +// info.cx : http://info.cx +// Submitted by Jacob Slater +info.cx + +// Interlegis : http://www.interlegis.leg.br +// Submitted by Gabriel Ferreira +ac.leg.br +al.leg.br +am.leg.br +ap.leg.br +ba.leg.br +ce.leg.br +df.leg.br +es.leg.br +go.leg.br +ma.leg.br +mg.leg.br +ms.leg.br +mt.leg.br +pa.leg.br +pb.leg.br +pe.leg.br +pi.leg.br +pr.leg.br +rj.leg.br +rn.leg.br +ro.leg.br +rr.leg.br +rs.leg.br +sc.leg.br +se.leg.br +sp.leg.br +to.leg.br + +// intermetrics GmbH : https://pixolino.com/ +// Submitted by Wolfgang Schwarz +pixolino.com + +// Internet-Pro, LLP: https://netangels.ru/ +// Submited by Vasiliy Sheredeko +na4u.ru + +// IPiFony Systems, Inc. : https://www.ipifony.com/ +// Submitted by Matthew Hardeman +ipifony.net + +// IServ GmbH : https://iserv.eu +// Submitted by Kim-Alexander Brodowski +mein-iserv.de +schulserver.de +test-iserv.de +iserv.dev + +// I-O DATA DEVICE, INC. : http://www.iodata.com/ +// Submitted by Yuji Minagawa +iobb.net + +//Jelastic, Inc. : https://jelastic.com/ +// Submited by Ihor Kolodyuk +appengine.flow.ch +vip.jelastic.cloud +jele.cloud +jele.club +dopaas.com +hidora.com +jcloud.ik-server.com +demo.jelastic.com +paas.massivegrid.com +j.scaleforce.com.cy +jelastic.dogado.eu +fi.cloudplatform.fi +paas.datacenter.fi +jele.host +mircloud.host +jele.io +cloudjiffy.net +jls-sto1.elastx.net +jelastic.saveincloud.net +jelastic.regruhosting.ru +jele.site +jelastic.team +j.layershift.co.uk + +// Jino : https://www.jino.ru +// Submitted by Sergey Ulyashin +myjino.ru +*.hosting.myjino.ru +*.landing.myjino.ru +*.spectrum.myjino.ru +*.vps.myjino.ru + +// Joyent : https://www.joyent.com/ +// Submitted by Brian Bennett +*.triton.zone +*.cns.joyent.com + +// JS.ORG : http://dns.js.org +// Submitted by Stefan Keim +js.org + +// KaasHosting : http://www.kaashosting.nl/ +// Submitted by Wouter Bakker +kaas.gg +khplay.nl + +// Keyweb AG : https://www.keyweb.de +// Submitted by Martin Dannehl +keymachine.de + +// KingHost : https://king.host +// Submitted by Felipe Keller Braz +kinghost.net +uni5.net + +// KnightPoint Systems, LLC : http://www.knightpoint.com/ +// Submitted by Roy Keene +knightpoint.systems + +// KUROKU LTD : https://kuroku.ltd/ +// Submitted by DisposaBoy +oya.to + +// .KRD : http://nic.krd/data/krd/Registration%20Policy.pdf +co.krd +edu.krd + +// LCube - Professional hosting e.K. : https://www.lcube-webhosting.de +// Submitted by Lars Laehn +git-repos.de +lcube-server.de +svn-repos.de + +// Leadpages : https://www.leadpages.net +// Submitted by Greg Dallavalle +leadpages.co +lpages.co +lpusercontent.com + +// Lelux.fi : https://lelux.fi/ +// Submitted by Lelux Admin +lelux.site + +// Lifetime Hosting : https://Lifetime.Hosting/ +// Submitted by Mike Fillator +co.business +co.education +co.events +co.financial +co.network +co.place +co.technology + +// Lightmaker Property Manager, Inc. : https://app.lmpm.com/ +// Submitted by Greg Holland +app.lmpm.com + +// Linki Tools UG : https://linki.tools +// Submitted by Paulo Matos +linkitools.space + +// linkyard ldt: https://www.linkyard.ch/ +// Submitted by Mario Siegenthaler +linkyard.cloud +linkyard-cloud.ch + +// Linode : https://linode.com +// Submitted by +members.linode.com +*.nodebalancer.linode.com +*.linodeobjects.com + +// LiquidNet Ltd : http://www.liquidnetlimited.com/ +// Submitted by Victor Velchev +we.bs + +// Log'in Line : https://www.loginline.com/ +// Submitted by Rémi Mach +loginline.app +loginline.dev +loginline.io +loginline.services +loginline.site + +// LubMAN UMCS Sp. z o.o : https://lubman.pl/ +// Submitted by Ireneusz Maliszewski +krasnik.pl +leczna.pl +lubartow.pl +lublin.pl +poniatowa.pl +swidnik.pl + +// Lug.org.uk : https://lug.org.uk +// Submitted by Jon Spriggs +uklugs.org +glug.org.uk +lug.org.uk +lugs.org.uk + +// Lukanet Ltd : https://lukanet.com +// Submitted by Anton Avramov +barsy.bg +barsy.co.uk +barsyonline.co.uk +barsycenter.com +barsyonline.com +barsy.club +barsy.de +barsy.eu +barsy.in +barsy.info +barsy.io +barsy.me +barsy.menu +barsy.mobi +barsy.net +barsy.online +barsy.org +barsy.pro +barsy.pub +barsy.shop +barsy.site +barsy.support +barsy.uk + +// Magento Commerce +// Submitted by Damien Tournoud +*.magentosite.cloud + +// May First - People Link : https://mayfirst.org/ +// Submitted by Jamie McClelland +mayfirst.info +mayfirst.org + +// Mail.Ru Group : https://hb.cldmail.ru +// Submitted by Ilya Zaretskiy +hb.cldmail.ru + +// mcpe.me : https://mcpe.me +// Submitted by Noa Heyl +mcpe.me + +// McHost : https://mchost.ru +// Submitted by Evgeniy Subbotin +mcdir.ru +vps.mcdir.ru + +// Memset hosting : https://www.memset.com +// Submitted by Tom Whitwell +miniserver.com +memset.net + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Zdeněk Šustr +*.cloud.metacentrum.cz +custom.metacentrum.cz + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Radim Janča +flt.cloud.muni.cz +usr.cloud.muni.cz + +// Meteor Development Group : https://www.meteor.com/hosting +// Submitted by Pierre Carrier +meteorapp.com +eu.meteorapp.com + +// Michau Enterprises Limited : http://www.co.pl/ +co.pl + +// Microsoft Corporation : http://microsoft.com +// Submitted by Mostafa Elzeiny +*.azurecontainer.io +azurewebsites.net +azure-mobile.net +cloudapp.net + +// minion.systems : http://minion.systems +// Submitted by Robert Böttinger +csx.cc + +// MobileEducation, LLC : https://joinforte.com +// Submitted by Grayson Martin +forte.id + +// Mozilla Corporation : https://mozilla.com +// Submitted by Ben Francis +mozilla-iot.org + +// Mozilla Foundation : https://mozilla.org/ +// Submitted by glob +bmoattachments.org + +// MSK-IX : https://www.msk-ix.ru/ +// Submitted by Khannanov Roman +net.ru +org.ru +pp.ru + +// Mythic Beasts : https://www.mythic-beasts.com +// Submitted by Paul Cammish +hostedpi.com +customer.mythic-beasts.com +lynx.mythic-beasts.com +ocelot.mythic-beasts.com +onza.mythic-beasts.com +sphinx.mythic-beasts.com +vs.mythic-beasts.com +x.mythic-beasts.com +yali.mythic-beasts.com +cust.retrosnub.co.uk + +// Nabu Casa : https://www.nabucasa.com +// Submitted by Paulus Schoutsen +ui.nabu.casa + +// Names.of.London : https://names.of.london/ +// Submitted by James Stevens or +pony.club +of.fashion +on.fashion +of.football +in.london +of.london +for.men +and.mom +for.mom +for.one +for.sale +of.work +to.work + +// NCTU.ME : https://nctu.me/ +// Submitted by Tocknicsu +nctu.me + +// Netlify : https://www.netlify.com +// Submitted by Jessica Parsons +netlify.app + +// Neustar Inc. +// Submitted by Trung Tran +4u.com + +// ngrok : https://ngrok.com/ +// Submitted by Alan Shreve +ngrok.io + +// Nimbus Hosting Ltd. : https://www.nimbushosting.co.uk/ +// Submitted by Nicholas Ford +nh-serv.co.uk + +// NFSN, Inc. : https://www.NearlyFreeSpeech.NET/ +// Submitted by Jeff Wheelhouse +nfshost.com + +// Now-DNS : https://now-dns.com +// Submitted by Steve Russell +dnsking.ch +mypi.co +n4t.co +001www.com +ddnslive.com +myiphost.com +forumz.info +16-b.it +32-b.it +64-b.it +soundcast.me +tcp4.me +dnsup.net +hicam.net +now-dns.net +ownip.net +vpndns.net +dynserv.org +now-dns.org +x443.pw +now-dns.top +ntdll.top +freeddns.us +crafting.xyz +zapto.xyz + +// nsupdate.info : https://www.nsupdate.info/ +// Submitted by Thomas Waldmann +nsupdate.info +nerdpol.ovh + +// No-IP.com : https://noip.com/ +// Submitted by Deven Reza +blogsyte.com +brasilia.me +cable-modem.org +ciscofreak.com +collegefan.org +couchpotatofries.org +damnserver.com +ddns.me +ditchyourip.com +dnsfor.me +dnsiskinky.com +dvrcam.info +dynns.com +eating-organic.net +fantasyleague.cc +geekgalaxy.com +golffan.us +health-carereform.com +homesecuritymac.com +homesecuritypc.com +hopto.me +ilovecollege.info +loginto.me +mlbfan.org +mmafan.biz +myactivedirectory.com +mydissent.net +myeffect.net +mymediapc.net +mypsx.net +mysecuritycamera.com +mysecuritycamera.net +mysecuritycamera.org +net-freaks.com +nflfan.org +nhlfan.net +no-ip.ca +no-ip.co.uk +no-ip.net +noip.us +onthewifi.com +pgafan.net +point2this.com +pointto.us +privatizehealthinsurance.net +quicksytes.com +read-books.org +securitytactics.com +serveexchange.com +servehumour.com +servep2p.com +servesarcasm.com +stufftoread.com +ufcfan.org +unusualperson.com +workisboring.com +3utilities.com +bounceme.net +ddns.net +ddnsking.com +gotdns.ch +hopto.org +myftp.biz +myftp.org +myvnc.com +no-ip.biz +no-ip.info +no-ip.org +noip.me +redirectme.net +servebeer.com +serveblog.net +servecounterstrike.com +serveftp.com +servegame.com +servehalflife.com +servehttp.com +serveirc.com +serveminecraft.net +servemp3.com +servepics.com +servequake.com +sytes.net +webhop.me +zapto.org + +// NodeArt : https://nodeart.io +// Submitted by Konstantin Nosov +stage.nodeart.io + +// Nodum B.V. : https://nodum.io/ +// Submitted by Wietse Wind +nodum.co +nodum.io + +// Nucleos Inc. : https://nucleos.com +// Submitted by Piotr Zduniak +pcloud.host + +// NYC.mn : http://www.information.nyc.mn +// Submitted by Matthew Brown +nyc.mn + +// NymNom : https://nymnom.com/ +// Submitted by NymNom +nom.ae +nom.af +nom.ai +nom.al +nym.by +nom.bz +nym.bz +nom.cl +nym.ec +nom.gd +nom.ge +nom.gl +nym.gr +nom.gt +nym.gy +nym.hk +nom.hn +nym.ie +nom.im +nom.ke +nym.kz +nym.la +nym.lc +nom.li +nym.li +nym.lt +nym.lu +nom.lv +nym.me +nom.mk +nym.mn +nym.mx +nom.nu +nym.nz +nym.pe +nym.pt +nom.pw +nom.qa +nym.ro +nom.rs +nom.si +nym.sk +nom.st +nym.su +nym.sx +nom.tj +nym.tw +nom.ug +nom.uy +nom.vc +nom.vg + +// Observable, Inc. : https://observablehq.com +// Submitted by Mike Bostock +static.observableusercontent.com + +// Octopodal Solutions, LLC. : https://ulterius.io/ +// Submitted by Andrew Sampson +cya.gg + +// OMG.LOL : +// Submitted by Adam Newbold +omg.lol + +// Omnibond Systems, LLC. : https://www.omnibond.com +// Submitted by Cole Estep +cloudycluster.net + +// OmniWe Limited: https://omniwe.com +// Submitted by Vicary Archangel +omniwe.site + +// One Fold Media : http://www.onefoldmedia.com/ +// Submitted by Eddie Jones +nid.io + +// Open Social : https://www.getopensocial.com/ +// Submitted by Alexander Varwijk +opensocial.site + +// OpenCraft GmbH : http://opencraft.com/ +// Submitted by Sven Marnach +opencraft.hosting + +// Opera Software, A.S.A. +// Submitted by Yngve Pettersen +operaunite.com + +// Oursky Limited : https://skygear.io/ +// Submited by Skygear Developer +skygearapp.com + +// OutSystems +// Submitted by Duarte Santos +outsystemscloud.com + +// OwnProvider GmbH: http://www.ownprovider.com +// Submitted by Jan Moennich +ownprovider.com +own.pm + +// OwO : https://whats-th.is/ +// Submitted by Dean Sheather +*.owo.codes + +// OX : http://www.ox.rs +// Submitted by Adam Grand +ox.rs + +// oy.lc +// Submitted by Charly Coste +oy.lc + +// Pagefog : https://pagefog.com/ +// Submitted by Derek Myers +pgfog.com + +// Pagefront : https://www.pagefronthq.com/ +// Submitted by Jason Kriss +pagefrontapp.com + +// PageXL : https://pagexl.com +// Submitted by Yann Guichard +pagexl.com + +// pcarrier.ca Software Inc: https://pcarrier.ca/ +// Submitted by Pierre Carrier +bar0.net +bar1.net +bar2.net +rdv.to + +// .pl domains (grandfathered) +art.pl +gliwice.pl +krakow.pl +poznan.pl +wroc.pl +zakopane.pl + +// Pantheon Systems, Inc. : https://pantheon.io/ +// Submitted by Gary Dylina +pantheonsite.io +gotpantheon.com + +// Peplink | Pepwave : http://peplink.com/ +// Submitted by Steve Leung +mypep.link + +// Perspecta : https://perspecta.com/ +// Submitted by Kenneth Van Alstyne +perspecta.cloud + +// Planet-Work : https://www.planet-work.com/ +// Submitted by Frédéric VANNIÈRE +on-web.fr + +// Platform.sh : https://platform.sh +// Submitted by Nikola Kotur +bc.platform.sh +ent.platform.sh +eu.platform.sh +us.platform.sh +*.platformsh.site + +// Platter: https://platter.dev +// Submitted by Patrick Flor +platter-app.com +platter-app.dev +platterp.us + +// Plesk : https://www.plesk.com/ +// Submitted by Anton Akhtyamov +pdns.page +plesk.page +pleskns.com + +// Port53 : https://port53.io/ +// Submitted by Maximilian Schieder +dyn53.io + +// Positive Codes Technology Company : http://co.bn/faq.html +// Submitted by Zulfais +co.bn + +// prgmr.com : https://prgmr.com/ +// Submitted by Sarah Newman +xen.prgmr.com + +// priv.at : http://www.nic.priv.at/ +// Submitted by registry +priv.at + +// privacytools.io : https://www.privacytools.io/ +// Submitted by Jonah Aragon +prvcy.page + +// Protocol Labs : https://protocol.ai/ +// Submitted by Michael Burns +*.dweb.link + +// Protonet GmbH : http://protonet.io +// Submitted by Martin Meier +protonet.io + +// Publication Presse Communication SARL : https://ppcom.fr +// Submitted by Yaacov Akiba Slama +chirurgiens-dentistes-en-france.fr +byen.site + +// pubtls.org: https://www.pubtls.org +// Submitted by Kor Nielsen +pubtls.org + +// Qualifio : https://qualifio.com/ +// Submitted by Xavier De Cock +qualifioapp.com + +// QuickBackend: https://www.quickbackend.com +// Submitted by Dani Biro +qbuser.com + +// Redstar Consultants : https://www.redstarconsultants.com/ +// Submitted by Jons Slemmer +instantcloud.cn + +// Russian Academy of Sciences +// Submitted by Tech Support +ras.ru + +// QA2 +// Submitted by Daniel Dent (https://www.danieldent.com/) +qa2.com + +// QCX +// Submitted by Cassandra Beelen +qcx.io +*.sys.qcx.io + +// QNAP System Inc : https://www.qnap.com +// Submitted by Nick Chang +dev-myqnapcloud.com +alpha-myqnapcloud.com +myqnapcloud.com + +// Quip : https://quip.com +// Submitted by Patrick Linehan +*.quipelements.com + +// Qutheory LLC : http://qutheory.io +// Submitted by Jonas Schwartz +vapor.cloud +vaporcloud.io + +// Rackmaze LLC : https://www.rackmaze.com +// Submitted by Kirill Pertsev +rackmaze.com +rackmaze.net + +// Rakuten Games, Inc : https://dev.viberplay.io +// Submitted by Joshua Zhang +g.vbrplsbx.io + +// Rancher Labs, Inc : https://rancher.com +// Submitted by Vincent Fiduccia +*.on-k3s.io +*.on-rancher.cloud +*.on-rio.io + +// Read The Docs, Inc : https://www.readthedocs.org +// Submitted by David Fischer +readthedocs.io + +// Red Hat, Inc. OpenShift : https://openshift.redhat.com/ +// Submitted by Tim Kramer +rhcloud.com + +// Render : https://render.com +// Submitted by Anurag Goel +app.render.com +onrender.com + +// Repl.it : https://repl.it +// Submitted by Mason Clayton +repl.co +repl.run + +// Resin.io : https://resin.io +// Submitted by Tim Perry +resindevice.io +devices.resinstaging.io + +// RethinkDB : https://www.rethinkdb.com/ +// Submitted by Chris Kastorff +hzc.io + +// Revitalised Limited : http://www.revitalised.co.uk +// Submitted by Jack Price +wellbeingzone.eu +ptplus.fit +wellbeingzone.co.uk + +// Rochester Institute of Technology : http://www.rit.edu/ +// Submitted by Jennifer Herting +git-pages.rit.edu + +// Sandstorm Development Group, Inc. : https://sandcats.io/ +// Submitted by Asheesh Laroia +sandcats.io + +// SBE network solutions GmbH : https://www.sbe.de/ +// Submitted by Norman Meilick +logoip.de +logoip.com + +// schokokeks.org GbR : https://schokokeks.org/ +// Submitted by Hanno Böck +schokokeks.net + +// Scottish Government: https://www.gov.scot +// Submitted by Martin Ellis +gov.scot + +// Scry Security : http://www.scrysec.com +// Submitted by Shante Adam +scrysec.com + +// Securepoint GmbH : https://www.securepoint.de +// Submitted by Erik Anders +firewall-gateway.com +firewall-gateway.de +my-gateway.de +my-router.de +spdns.de +spdns.eu +firewall-gateway.net +my-firewall.org +myfirewall.org +spdns.org + +// Seidat : https://www.seidat.com +// Submitted by Artem Kondratev +seidat.net + +// Senseering GmbH : https://www.senseering.de +// Submitted by Felix Mönckemeyer +senseering.net + +// Service Online LLC : http://drs.ua/ +// Submitted by Serhii Bulakh +biz.ua +co.ua +pp.ua + +// ShiftEdit : https://shiftedit.net/ +// Submitted by Adam Jimenez +shiftedit.io + +// Shopblocks : http://www.shopblocks.com/ +// Submitted by Alex Bowers +myshopblocks.com + +// Shopit : https://www.shopitcommerce.com/ +// Submitted by Craig McMahon +shopitsite.com + +// shopware AG : https://shopware.com +// Submitted by Jens Küper +shopware.store + +// Siemens Mobility GmbH +// Submitted by Oliver Graebner +mo-siemens.io + +// SinaAppEngine : http://sae.sina.com.cn/ +// Submitted by SinaAppEngine +1kapp.com +appchizi.com +applinzi.com +sinaapp.com +vipsinaapp.com + +// Siteleaf : https://www.siteleaf.com/ +// Submitted by Skylar Challand +siteleaf.net + +// Skyhat : http://www.skyhat.io +// Submitted by Shante Adam +bounty-full.com +alpha.bounty-full.com +beta.bounty-full.com + +// Small Technology Foundation : https://small-tech.org +// Submitted by Aral Balkan +small-web.org + +// Stackhero : https://www.stackhero.io +// Submitted by Adrien Gillon +stackhero-network.com + +// staticland : https://static.land +// Submitted by Seth Vincent +static.land +dev.static.land +sites.static.land + +// Sony Interactive Entertainment LLC : https://sie.com/ +// Submitted by David Coles +playstation-cloud.com + +// SourceLair PC : https://www.sourcelair.com +// Submitted by Antonis Kalipetis +apps.lair.io +*.stolos.io + +// SpaceKit : https://www.spacekit.io/ +// Submitted by Reza Akhavan +spacekit.io + +// SpeedPartner GmbH: https://www.speedpartner.de/ +// Submitted by Stefan Neufeind +customer.speedpartner.de + +// Standard Library : https://stdlib.com +// Submitted by Jacob Lee +api.stdlib.com + +// Storj Labs Inc. : https://storj.io/ +// Submitted by Philip Hutchins +storj.farm + +// Studenten Net Twente : http://www.snt.utwente.nl/ +// Submitted by Silke Hofstra +utwente.io + +// Student-Run Computing Facility : https://www.srcf.net/ +// Submitted by Edwin Balani +soc.srcf.net +user.srcf.net + +// Sub 6 Limited: http://www.sub6.com +// Submitted by Dan Miller +temp-dns.com + +// Swisscom Application Cloud: https://developer.swisscom.com +// Submitted by Matthias.Winzeler +applicationcloud.io +scapp.io + +// Symfony, SAS : https://symfony.com/ +// Submitted by Fabien Potencier +*.s5y.io +*.sensiosite.cloud + +// Syncloud : https://syncloud.org +// Submitted by Boris Rybalkin +syncloud.it + +// Synology, Inc. : https://www.synology.com/ +// Submitted by Rony Weng +diskstation.me +dscloud.biz +dscloud.me +dscloud.mobi +dsmynas.com +dsmynas.net +dsmynas.org +familyds.com +familyds.net +familyds.org +i234.me +myds.me +synology.me +vpnplus.to +direct.quickconnect.to + +// TAIFUN Software AG : http://taifun-software.de +// Submitted by Bjoern Henke +taifun-dns.de + +// TASK geographical domains (www.task.gda.pl/uslugi/dns) +gda.pl +gdansk.pl +gdynia.pl +med.pl +sopot.pl + +// Teckids e.V. : https://www.teckids.org +// Submitted by Dominik George +edugit.org + +// Telebit : https://telebit.cloud +// Submitted by AJ ONeal +telebit.app +telebit.io +*.telebit.xyz + +// The Gwiddle Foundation : https://gwiddlefoundation.org.uk +// Submitted by Joshua Bayfield +gwiddle.co.uk + +// Thingdust AG : https://thingdust.com/ +// Submitted by Adrian Imboden +thingdustdata.com +cust.dev.thingdust.io +cust.disrec.thingdust.io +cust.prod.thingdust.io +cust.testing.thingdust.io +*.firenet.ch +*.svc.firenet.ch + +// Tlon.io : https://tlon.io +// Submitted by Mark Staarink +arvo.network +azimuth.network + +// TownNews.com : http://www.townnews.com +// Submitted by Dustin Ward +bloxcms.com +townnews-staging.com + +// TrafficPlex GmbH : https://www.trafficplex.de/ +// Submitted by Phillipp Röll +12hp.at +2ix.at +4lima.at +lima-city.at +12hp.ch +2ix.ch +4lima.ch +lima-city.ch +trafficplex.cloud +de.cool +12hp.de +2ix.de +4lima.de +lima-city.de +1337.pictures +clan.rip +lima-city.rocks +webspace.rocks +lima.zone + +// TransIP : https://www.transip.nl +// Submitted by Rory Breuk +*.transurl.be +*.transurl.eu +*.transurl.nl + +// TuxFamily : http://tuxfamily.org +// Submitted by TuxFamily administrators +tuxfamily.org + +// TwoDNS : https://www.twodns.de/ +// Submitted by TwoDNS-Support +dd-dns.de +diskstation.eu +diskstation.org +dray-dns.de +draydns.de +dyn-vpn.de +dynvpn.de +mein-vigor.de +my-vigor.de +my-wan.de +syno-ds.de +synology-diskstation.de +synology-ds.de + +// Uberspace : https://uberspace.de +// Submitted by Moritz Werner +uber.space +*.uberspace.de + +// UDR Limited : http://www.udr.hk.com +// Submitted by registry +hk.com +hk.org +ltd.hk +inc.hk + +// United Gameserver GmbH : https://united-gameserver.de +// Submitted by Stefan Schwarz +virtualuser.de +virtual-user.de + +// urown.net : https://urown.net +// Submitted by Hostmaster +urown.cloud +dnsupdate.info + +// .US +// Submitted by Ed Moore +lib.de.us + +// VeryPositive SIA : http://very.lv +// Submitted by Danko Aleksejevs +2038.io + +// Vercel, Inc : https://vercel.com/ +// Submitted by Connor Davis +vercel.app +vercel.dev +now.sh + +// Viprinet Europe GmbH : http://www.viprinet.com +// Submitted by Simon Kissel +router.management + +// Virtual-Info : https://www.virtual-info.info/ +// Submitted by Adnan RIHAN +v-info.info + +// Voorloper.com: https://voorloper.com +// Submitted by Nathan van Bakel +voorloper.cloud + +// Voxel.sh DNS : https://voxel.sh/dns/ +// Submitted by Mia Rehlinger +neko.am +nyaa.am +be.ax +cat.ax +es.ax +eu.ax +gg.ax +mc.ax +us.ax +xy.ax +nl.ci +xx.gl +app.gp +blog.gt +de.gt +to.gt +be.gy +cc.hn +blog.kg +io.kg +jp.kg +tv.kg +uk.kg +us.kg +de.ls +at.md +de.md +jp.md +to.md +uwu.nu +indie.porn +vxl.sh +ch.tc +me.tc +we.tc +nyan.to +at.vg +blog.vu +dev.vu +me.vu + +// V.UA Domain Administrator : https://domain.v.ua/ +// Submitted by Serhii Rostilo +v.ua + +// Waffle Computer Inc., Ltd. : https://docs.waffleinfo.com +// Submitted by Masayuki Note +wafflecell.com + +// WebHare bv: https://www.webhare.com/ +// Submitted by Arnold Hendriks +*.webhare.dev + +// WeDeploy by Liferay, Inc. : https://www.wedeploy.com +// Submitted by Henrique Vicente +wedeploy.io +wedeploy.me +wedeploy.sh + +// Western Digital Technologies, Inc : https://www.wdc.com +// Submitted by Jung Jin +remotewd.com + +// WIARD Enterprises : https://wiardweb.com +// Submitted by Kidd Hustle +pages.wiardweb.com + +// Wikimedia Labs : https://wikitech.wikimedia.org +// Submitted by Arturo Borrero Gonzalez +wmflabs.org +toolforge.org +wmcloud.org + +// WISP : https://wisp.gg +// Submitted by Stepan Fedotov +panel.gg +daemon.panel.gg + +// WoltLab GmbH : https://www.woltlab.com +// Submitted by Tim Düsterhus +myforum.community +community-pro.de +diskussionsbereich.de +community-pro.net +meinforum.net + +// www.com.vc : http://www.com.vc +// Submitted by Li Hui +cn.vu + +// XenonCloud GbR: https://xenoncloud.net +// Submitted by Julian Uphoff +half.host + +// XnBay Technology : http://www.xnbay.com/ +// Submitted by XnBay Developer +xnbay.com +u2.xnbay.com +u2-local.xnbay.com + +// XS4ALL Internet bv : https://www.xs4all.nl/ +// Submitted by Daniel Mostertman +cistron.nl +demon.nl +xs4all.space + +// Yandex.Cloud LLC: https://cloud.yandex.com +// Submitted by Alexander Lodin +yandexcloud.net +storage.yandexcloud.net +website.yandexcloud.net + +// YesCourse Pty Ltd : https://yescourse.com +// Submitted by Atul Bhouraskar +official.academy + +// Yola : https://www.yola.com/ +// Submitted by Stefano Rivera +yolasite.com + +// Yombo : https://yombo.net +// Submitted by Mitch Schwenk +ybo.faith +yombo.me +homelink.one +ybo.party +ybo.review +ybo.science +ybo.trade + +// Yunohost : https://yunohost.org +// Submitted by Valentin Grimaud +nohost.me +noho.st + +// ZaNiC : http://www.za.net/ +// Submitted by registry +za.net +za.org + +// Zine EOOD : https://zine.bg/ +// Submitted by Martin Angelov +bss.design + +// Zitcom A/S : https://www.zitcom.dk +// Submitted by Emil Stahl +basicserver.io +virtualserver.io +enterprisecloud.nu + +// Mintere : https://mintere.com/ +// Submitted by Ben Aubin +mintere.site + +// WP Engine : https://wpengine.com/ +// Submitted by Michael Smith +wpenginepowered.com + +// Impertrix Solutions : +// Submitted by Zhixiang Zhao +impertrixcdn.com +impertrix.com +// ===END PRIVATE DOMAINS=== diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix.rb new file mode 100644 index 0000000..01f880e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +require_relative "public_suffix/domain" +require_relative "public_suffix/version" +require_relative "public_suffix/errors" +require_relative "public_suffix/rule" +require_relative "public_suffix/list" + +# PublicSuffix is a Ruby domain name parser based on the Public Suffix List. +# +# The [Public Suffix List](https://publicsuffix.org) is a cross-vendor initiative +# to provide an accurate list of domain name suffixes. +# +# The Public Suffix List is an initiative of the Mozilla Project, +# but is maintained as a community resource. It is available for use in any software, +# but was originally created to meet the needs of browser manufacturers. +module PublicSuffix + + DOT = "." + BANG = "!" + STAR = "*" + + # Parses +name+ and returns the {PublicSuffix::Domain} instance. + # + # @example Parse a valid domain + # PublicSuffix.parse("google.com") + # # => # + # + # @example Parse a valid subdomain + # PublicSuffix.parse("www.google.com") + # # => # + # + # @example Parse a fully qualified domain + # PublicSuffix.parse("google.com.") + # # => # + # + # @example Parse a fully qualified domain (subdomain) + # PublicSuffix.parse("www.google.com.") + # # => # + # + # @example Parse an invalid (unlisted) domain + # PublicSuffix.parse("x.yz") + # # => # + # + # @example Parse an invalid (unlisted) domain with strict checking (without applying the default * rule) + # PublicSuffix.parse("x.yz", default_rule: nil) + # # => PublicSuffix::DomainInvalid: `x.yz` is not a valid domain + # + # @example Parse an URL (not supported, only domains) + # PublicSuffix.parse("http://www.google.com") + # # => PublicSuffix::DomainInvalid: http://www.google.com is not expected to contain a scheme + # + # + # @param [String, #to_s] name The domain name or fully qualified domain name to parse. + # @param [PublicSuffix::List] list The rule list to search, defaults to the default {PublicSuffix::List} + # @param [Boolean] ignore_private + # @return [PublicSuffix::Domain] + # + # @raise [PublicSuffix::DomainInvalid] + # If domain is not a valid domain. + # @raise [PublicSuffix::DomainNotAllowed] + # If a rule for +domain+ is found, but the rule doesn't allow +domain+. + def self.parse(name, list: List.default, default_rule: list.default_rule, ignore_private: false) + what = normalize(name) + raise what if what.is_a?(DomainInvalid) + + rule = list.find(what, default: default_rule, ignore_private: ignore_private) + + # rubocop:disable Style/IfUnlessModifier + if rule.nil? + raise DomainInvalid, "`#{what}` is not a valid domain" + end + if rule.decompose(what).last.nil? + raise DomainNotAllowed, "`#{what}` is not allowed according to Registry policy" + end + + # rubocop:enable Style/IfUnlessModifier + + decompose(what, rule) + end + + # Checks whether +domain+ is assigned and allowed, without actually parsing it. + # + # This method doesn't care whether domain is a domain or subdomain. + # The validation is performed using the default {PublicSuffix::List}. + # + # @example Validate a valid domain + # PublicSuffix.valid?("example.com") + # # => true + # + # @example Validate a valid subdomain + # PublicSuffix.valid?("www.example.com") + # # => true + # + # @example Validate a not-listed domain + # PublicSuffix.valid?("example.tldnotlisted") + # # => true + # + # @example Validate a not-listed domain with strict checking (without applying the default * rule) + # PublicSuffix.valid?("example.tldnotlisted") + # # => true + # PublicSuffix.valid?("example.tldnotlisted", default_rule: nil) + # # => false + # + # @example Validate a fully qualified domain + # PublicSuffix.valid?("google.com.") + # # => true + # PublicSuffix.valid?("www.google.com.") + # # => true + # + # @example Check an URL (which is not a valid domain) + # PublicSuffix.valid?("http://www.example.com") + # # => false + # + # + # @param [String, #to_s] name The domain name or fully qualified domain name to validate. + # @param [Boolean] ignore_private + # @return [Boolean] + def self.valid?(name, list: List.default, default_rule: list.default_rule, ignore_private: false) + what = normalize(name) + return false if what.is_a?(DomainInvalid) + + rule = list.find(what, default: default_rule, ignore_private: ignore_private) + + !rule.nil? && !rule.decompose(what).last.nil? + end + + # Attempt to parse the name and returns the domain, if valid. + # + # This method doesn't raise. Instead, it returns nil if the domain is not valid for whatever reason. + # + # @param [String, #to_s] name The domain name or fully qualified domain name to parse. + # @param [PublicSuffix::List] list The rule list to search, defaults to the default {PublicSuffix::List} + # @param [Boolean] ignore_private + # @return [String] + def self.domain(name, **options) + parse(name, **options).domain + rescue PublicSuffix::Error + nil + end + + + # private + + def self.decompose(name, rule) + left, right = rule.decompose(name) + + parts = left.split(DOT) + # If we have 0 parts left, there is just a tld and no domain or subdomain + # If we have 1 part left, there is just a tld, domain and not subdomain + # If we have 2 parts left, the last part is the domain, the other parts (combined) are the subdomain + tld = right + sld = parts.empty? ? nil : parts.pop + trd = parts.empty? ? nil : parts.join(DOT) + + Domain.new(tld, sld, trd) + end + + # Pretend we know how to deal with user input. + def self.normalize(name) + name = name.to_s.dup + name.strip! + name.chomp!(DOT) + name.downcase! + + return DomainInvalid.new("Name is blank") if name.empty? + return DomainInvalid.new("Name starts with a dot") if name.start_with?(DOT) + return DomainInvalid.new("%s is not expected to contain a scheme" % name) if name.include?("://") + + name + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb new file mode 100644 index 0000000..3d65eac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/domain.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # Domain represents a domain name, composed by a TLD, SLD and TRD. + class Domain + + # Splits a string into the labels, that is the dot-separated parts. + # + # The input is not validated, but it is assumed to be a valid domain name. + # + # @example + # + # name_to_labels('example.com') + # # => ['example', 'com'] + # + # name_to_labels('example.co.uk') + # # => ['example', 'co', 'uk'] + # + # @param name [String, #to_s] The domain name to split. + # @return [Array] + def self.name_to_labels(name) + name.to_s.split(DOT) + end + + + attr_reader :tld, :sld, :trd + + # Creates and returns a new {PublicSuffix::Domain} instance. + # + # @overload initialize(tld) + # Initializes with a +tld+. + # @param [String] tld The TLD (extension) + # @overload initialize(tld, sld) + # Initializes with a +tld+ and +sld+. + # @param [String] tld The TLD (extension) + # @param [String] sld The TRD (domain) + # @overload initialize(tld, sld, trd) + # Initializes with a +tld+, +sld+ and +trd+. + # @param [String] tld The TLD (extension) + # @param [String] sld The SLD (domain) + # @param [String] trd The TRD (subdomain) + # + # @yield [self] Yields on self. + # @yieldparam [PublicSuffix::Domain] self The newly creates instance + # + # @example Initialize with a TLD + # PublicSuffix::Domain.new("com") + # # => # + # + # @example Initialize with a TLD and SLD + # PublicSuffix::Domain.new("com", "example") + # # => # + # + # @example Initialize with a TLD, SLD and TRD + # PublicSuffix::Domain.new("com", "example", "wwww") + # # => # + # + def initialize(*args) + @tld, @sld, @trd = args + yield(self) if block_given? + end + + # Returns a string representation of this object. + # + # @return [String] + def to_s + name + end + + # Returns an array containing the domain parts. + # + # @return [Array] + # + # @example + # + # PublicSuffix::Domain.new("google.com").to_a + # # => [nil, "google", "com"] + # + # PublicSuffix::Domain.new("www.google.com").to_a + # # => [nil, "google", "com"] + # + def to_a + [@trd, @sld, @tld] + end + + # Returns the full domain name. + # + # @return [String] + # + # @example Gets the domain name of a domain + # PublicSuffix::Domain.new("com", "google").name + # # => "google.com" + # + # @example Gets the domain name of a subdomain + # PublicSuffix::Domain.new("com", "google", "www").name + # # => "www.google.com" + # + def name + [@trd, @sld, @tld].compact.join(DOT) + end + + # Returns a domain-like representation of this object + # if the object is a {#domain?}, nil otherwise. + # + # PublicSuffix::Domain.new("com").domain + # # => nil + # + # PublicSuffix::Domain.new("com", "google").domain + # # => "google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").domain + # # => "www.google.com" + # + # This method doesn't validate the input. It handles the domain + # as a valid domain name and simply applies the necessary transformations. + # + # This method returns a FQD, not just the domain part. + # To get the domain part, use #sld (aka second level domain). + # + # PublicSuffix::Domain.new("com", "google", "www").domain + # # => "google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").sld + # # => "google" + # + # @see #domain? + # @see #subdomain + # + # @return [String] + def domain + [@sld, @tld].join(DOT) if domain? + end + + # Returns a subdomain-like representation of this object + # if the object is a {#subdomain?}, nil otherwise. + # + # PublicSuffix::Domain.new("com").subdomain + # # => nil + # + # PublicSuffix::Domain.new("com", "google").subdomain + # # => nil + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain + # # => "www.google.com" + # + # This method doesn't validate the input. It handles the domain + # as a valid domain name and simply applies the necessary transformations. + # + # This method returns a FQD, not just the subdomain part. + # To get the subdomain part, use #trd (aka third level domain). + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain + # # => "www.google.com" + # + # PublicSuffix::Domain.new("com", "google", "www").trd + # # => "www" + # + # @see #subdomain? + # @see #domain + # + # @return [String] + def subdomain + [@trd, @sld, @tld].join(DOT) if subdomain? + end + + # Checks whether self looks like a domain. + # + # This method doesn't actually validate the domain. + # It only checks whether the instance contains + # a value for the {#tld} and {#sld} attributes. + # + # @example + # + # PublicSuffix::Domain.new("com").domain? + # # => false + # + # PublicSuffix::Domain.new("com", "google").domain? + # # => true + # + # PublicSuffix::Domain.new("com", "google", "www").domain? + # # => true + # + # # This is an invalid domain, but returns true + # # because this method doesn't validate the content. + # PublicSuffix::Domain.new("com", nil).domain? + # # => true + # + # @see #subdomain? + # + # @return [Boolean] + def domain? + !(@tld.nil? || @sld.nil?) + end + + # Checks whether self looks like a subdomain. + # + # This method doesn't actually validate the subdomain. + # It only checks whether the instance contains + # a value for the {#tld}, {#sld} and {#trd} attributes. + # If you also want to validate the domain, + # use {#valid_subdomain?} instead. + # + # @example + # + # PublicSuffix::Domain.new("com").subdomain? + # # => false + # + # PublicSuffix::Domain.new("com", "google").subdomain? + # # => false + # + # PublicSuffix::Domain.new("com", "google", "www").subdomain? + # # => true + # + # # This is an invalid domain, but returns true + # # because this method doesn't validate the content. + # PublicSuffix::Domain.new("com", "example", nil).subdomain? + # # => true + # + # @see #domain? + # + # @return [Boolean] + def subdomain? + !(@tld.nil? || @sld.nil? || @trd.nil?) + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb new file mode 100644 index 0000000..2d5798d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/errors.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + class Error < StandardError + end + + # Raised when trying to parse an invalid name. + # A name is considered invalid when no rule is found in the definition list. + # + # @example + # + # PublicSuffix.parse("nic.test") + # # => PublicSuffix::DomainInvalid + # + # PublicSuffix.parse("http://www.nic.it") + # # => PublicSuffix::DomainInvalid + # + class DomainInvalid < Error + end + + # Raised when trying to parse a name that matches a suffix. + # + # @example + # + # PublicSuffix.parse("nic.do") + # # => PublicSuffix::DomainNotAllowed + # + # PublicSuffix.parse("www.nic.do") + # # => PublicSuffix::Domain + # + class DomainNotAllowed < DomainInvalid + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb new file mode 100644 index 0000000..321b59a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/list.rb @@ -0,0 +1,247 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # A {PublicSuffix::List} is a collection of one + # or more {PublicSuffix::Rule}. + # + # Given a {PublicSuffix::List}, + # you can add or remove {PublicSuffix::Rule}, + # iterate all items in the list or search for the first rule + # which matches a specific domain name. + # + # # Create a new list + # list = PublicSuffix::List.new + # + # # Push two rules to the list + # list << PublicSuffix::Rule.factory("it") + # list << PublicSuffix::Rule.factory("com") + # + # # Get the size of the list + # list.size + # # => 2 + # + # # Search for the rule matching given domain + # list.find("example.com") + # # => # + # list.find("example.org") + # # => nil + # + # You can create as many {PublicSuffix::List} you want. + # The {PublicSuffix::List.default} rule list is used + # to tokenize and validate a domain. + # + class List + + DEFAULT_LIST_PATH = File.expand_path("../../data/list.txt", __dir__) + + # Gets the default rule list. + # + # Initializes a new {PublicSuffix::List} parsing the content + # of {PublicSuffix::List.default_list_content}, if required. + # + # @return [PublicSuffix::List] + def self.default(**options) + @default ||= parse(File.read(DEFAULT_LIST_PATH), **options) + end + + # Sets the default rule list to +value+. + # + # @param value [PublicSuffix::List] the new list + # @return [PublicSuffix::List] + def self.default=(value) + @default = value + end + + # Parse given +input+ treating the content as Public Suffix List. + # + # See http://publicsuffix.org/format/ for more details about input format. + # + # @param string [#each_line] the list to parse + # @param private_domains [Boolean] whether to ignore the private domains section + # @return [PublicSuffix::List] + def self.parse(input, private_domains: true) + comment_token = "//" + private_token = "===BEGIN PRIVATE DOMAINS===" + section = nil # 1 == ICANN, 2 == PRIVATE + + new do |list| + input.each_line do |line| + line.strip! + case # rubocop:disable Style/EmptyCaseCondition + + # skip blank lines + when line.empty? + next + + # include private domains or stop scanner + when line.include?(private_token) + break if !private_domains + + section = 2 + + # skip comments + when line.start_with?(comment_token) + next + + else + list.add(Rule.factory(line, private: section == 2)) + + end + end + end + end + + + # Initializes an empty {PublicSuffix::List}. + # + # @yield [self] Yields on self. + # @yieldparam [PublicSuffix::List] self The newly created instance. + def initialize + @rules = {} + yield(self) if block_given? + end + + + # Checks whether two lists are equal. + # + # List one is equal to two, if two is an instance of + # {PublicSuffix::List} and each +PublicSuffix::Rule::*+ + # in list one is available in list two, in the same order. + # + # @param other [PublicSuffix::List] the List to compare + # @return [Boolean] + def ==(other) + return false unless other.is_a?(List) + + equal?(other) || @rules == other.rules + end + alias eql? == + + # Iterates each rule in the list. + def each(&block) + Enumerator.new do |y| + @rules.each do |key, node| + y << entry_to_rule(node, key) + end + end.each(&block) + end + + + # Adds the given object to the list and optionally refreshes the rule index. + # + # @param rule [PublicSuffix::Rule::*] the rule to add to the list + # @return [self] + def add(rule) + @rules[rule.value] = rule_to_entry(rule) + self + end + alias << add + + # Gets the number of rules in the list. + # + # @return [Integer] + def size + @rules.size + end + + # Checks whether the list is empty. + # + # @return [Boolean] + def empty? + @rules.empty? + end + + # Removes all rules. + # + # @return [self] + def clear + @rules.clear + self + end + + # Finds and returns the rule corresponding to the longest public suffix for the hostname. + # + # @param name [#to_s] the hostname + # @param default [PublicSuffix::Rule::*] the default rule to return in case no rule matches + # @return [PublicSuffix::Rule::*] + def find(name, default: default_rule, **options) + rule = select(name, **options).inject do |l, r| + return r if r.class == Rule::Exception + + l.length > r.length ? l : r + end + rule || default + end + + # Selects all the rules matching given hostame. + # + # If `ignore_private` is set to true, the algorithm will skip the rules that are flagged as + # private domain. Note that the rules will still be part of the loop. + # If you frequently need to access lists ignoring the private domains, + # you should create a list that doesn't include these domains setting the + # `private_domains: false` option when calling {.parse}. + # + # Note that this method is currently private, as you should not rely on it. Instead, + # the public interface is {#find}. The current internal algorithm allows to return all + # matching rules, but different data structures may not be able to do it, and instead would + # return only the match. For this reason, you should rely on {#find}. + # + # @param name [#to_s] the hostname + # @param ignore_private [Boolean] + # @return [Array] + def select(name, ignore_private: false) + name = name.to_s + + parts = name.split(DOT).reverse! + index = 0 + query = parts[index] + rules = [] + + loop do + match = @rules[query] + rules << entry_to_rule(match, query) if !match.nil? && (ignore_private == false || match.private == false) + + index += 1 + break if index >= parts.size + + query = parts[index] + DOT + query + end + + rules + end + private :select + + # Gets the default rule. + # + # @see PublicSuffix::Rule.default_rule + # + # @return [PublicSuffix::Rule::*] + def default_rule + PublicSuffix::Rule.default + end + + + protected + + attr_reader :rules + + + private + + def entry_to_rule(entry, value) + entry.type.new(value: value, length: entry.length, private: entry.private) + end + + def rule_to_entry(rule) + Rule::Entry.new(rule.class, rule.length, rule.private) + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb new file mode 100644 index 0000000..d4c32ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/rule.rb @@ -0,0 +1,350 @@ +# frozen_string_literal: true + +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + + # A Rule is a special object which holds a single definition + # of the Public Suffix List. + # + # There are 3 types of rules, each one represented by a specific + # subclass within the +PublicSuffix::Rule+ namespace. + # + # To create a new Rule, use the {PublicSuffix::Rule#factory} method. + # + # PublicSuffix::Rule.factory("ar") + # # => # + # + module Rule + + # @api internal + Entry = Struct.new(:type, :length, :private) # rubocop:disable Lint/StructNewOverride + + # = Abstract rule class + # + # This represent the base class for a Rule definition + # in the {Public Suffix List}[https://publicsuffix.org]. + # + # This is intended to be an Abstract class + # and you shouldn't create a direct instance. The only purpose + # of this class is to expose a common interface + # for all the available subclasses. + # + # * {PublicSuffix::Rule::Normal} + # * {PublicSuffix::Rule::Exception} + # * {PublicSuffix::Rule::Wildcard} + # + # ## Properties + # + # A rule is composed by 4 properties: + # + # value - A normalized version of the rule name. + # The normalization process depends on rule tpe. + # + # Here's an example + # + # PublicSuffix::Rule.factory("*.google.com") + # # + # + # ## Rule Creation + # + # The best way to create a new rule is passing the rule name + # to the PublicSuffix::Rule.factory method. + # + # PublicSuffix::Rule.factory("com") + # # => PublicSuffix::Rule::Normal + # + # PublicSuffix::Rule.factory("*.com") + # # => PublicSuffix::Rule::Wildcard + # + # This method will detect the rule type and create an instance + # from the proper rule class. + # + # ## Rule Usage + # + # A rule describes the composition of a domain name and explains how to tokenize + # the name into tld, sld and trd. + # + # To use a rule, you first need to be sure the name you want to tokenize + # can be handled by the current rule. + # You can use the #match? method. + # + # rule = PublicSuffix::Rule.factory("com") + # + # rule.match?("google.com") + # # => true + # + # rule.match?("google.com") + # # => false + # + # Rule order is significant. A name can match more than one rule. + # See the {Public Suffix Documentation}[http://publicsuffix.org/format/] + # to learn more about rule priority. + # + # When you have the right rule, you can use it to tokenize the domain name. + # + # rule = PublicSuffix::Rule.factory("com") + # + # rule.decompose("google.com") + # # => ["google", "com"] + # + # rule.decompose("www.google.com") + # # => ["www.google", "com"] + # + # @abstract + # + class Base + + # @return [String] the rule definition + attr_reader :value + + # @return [String] the length of the rule + attr_reader :length + + # @return [Boolean] true if the rule is a private domain + attr_reader :private + + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content, private: private) + end + + # Initializes a new rule. + # + # @param value [String] + # @param private [Boolean] + def initialize(value:, length: nil, private: false) + @value = value.to_s + @length = length || @value.count(DOT) + 1 + @private = private + end + + # Checks whether this rule is equal to other. + # + # @param [PublicSuffix::Rule::*] other The rule to compare + # @return [Boolean] + # Returns true if this rule and other are instances of the same class + # and has the same value, false otherwise. + def ==(other) + equal?(other) || (self.class == other.class && value == other.value) + end + alias eql? == + + # Checks if this rule matches +name+. + # + # A domain name is said to match a rule if and only if + # all of the following conditions are met: + # + # - When the domain and rule are split into corresponding labels, + # that the domain contains as many or more labels than the rule. + # - Beginning with the right-most labels of both the domain and the rule, + # and continuing for all labels in the rule, one finds that for every pair, + # either they are identical, or that the label from the rule is "*". + # + # @see https://publicsuffix.org/list/ + # + # @example + # PublicSuffix::Rule.factory("com").match?("example.com") + # # => true + # PublicSuffix::Rule.factory("com").match?("example.net") + # # => false + # + # @param name [String] the domain name to check + # @return [Boolean] + def match?(name) + # Note: it works because of the assumption there are no + # rules like foo.*.com. If the assumption is incorrect, + # we need to properly walk the input and skip parts according + # to wildcard component. + diff = name.chomp(value) + diff.empty? || diff.end_with?(DOT) + end + + # @abstract + def parts + raise NotImplementedError + end + + # @abstract + # @param [String, #to_s] name The domain name to decompose + # @return [Array] + def decompose(*) + raise NotImplementedError + end + + end + + # Normal represents a standard rule (e.g. com). + class Normal < Base + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = parts.join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # + # @return [Array] + def parts + @value.split(DOT) + end + + end + + # Wildcard represents a wildcard rule (e.g. *.co.uk). + class Wildcard < Base + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content.to_s[2..-1], private: private) + end + + # Initializes a new rule. + # + # @param value [String] + # @param private [Boolean] + def initialize(value:, length: nil, private: false) + super(value: value, length: length, private: private) + length or @length += 1 # * counts as 1 + end + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + value == "" ? STAR : STAR + DOT + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = ([".*?"] + parts).join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # + # @return [Array] + def parts + @value.split(DOT) + end + + end + + # Exception represents an exception rule (e.g. !parliament.uk). + class Exception < Base + + # Initializes a new rule from the content. + # + # @param content [String] the content of the rule + # @param private [Boolean] + def self.build(content, private: false) + new(value: content.to_s[1..-1], private: private) + end + + # Gets the original rule definition. + # + # @return [String] The rule definition. + def rule + BANG + value + end + + # Decomposes the domain name according to rule properties. + # + # @param [String, #to_s] name The domain name to decompose + # @return [Array] The array with [trd + sld, tld]. + def decompose(domain) + suffix = parts.join('\.') + matches = domain.to_s.match(/^(.*)\.(#{suffix})$/) + matches ? matches[1..2] : [nil, nil] + end + + # dot-split rule value and returns all rule parts + # in the order they appear in the value. + # The leftmost label is not considered a label. + # + # See http://publicsuffix.org/format/: + # If the prevailing rule is a exception rule, + # modify it by removing the leftmost label. + # + # @return [Array] + def parts + @value.split(DOT)[1..-1] + end + + end + + + # Takes the +name+ of the rule, detects the specific rule class + # and creates a new instance of that class. + # The +name+ becomes the rule +value+. + # + # @example Creates a Normal rule + # PublicSuffix::Rule.factory("ar") + # # => # + # + # @example Creates a Wildcard rule + # PublicSuffix::Rule.factory("*.ar") + # # => # + # + # @example Creates an Exception rule + # PublicSuffix::Rule.factory("!congresodelalengua3.ar") + # # => # + # + # @param [String] content The rule content. + # @return [PublicSuffix::Rule::*] A rule instance. + def self.factory(content, private: false) + case content.to_s[0, 1] + when STAR + Wildcard + when BANG + Exception + else + Normal + end.build(content, private: private) + end + + # The default rule to use if no rule match. + # + # The default rule is "*". From https://publicsuffix.org/list/: + # + # > If no rules match, the prevailing rule is "*". + # + # @return [PublicSuffix::Rule::Wildcard] The default rule. + def self.default + factory(STAR) + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb new file mode 100644 index 0000000..c9f93f1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/lib/public_suffix/version.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +# +# = Public Suffix +# +# Domain name parser based on the Public Suffix List. +# +# Copyright (c) 2009-2020 Simone Carletti + +module PublicSuffix + # The current library version. + VERSION = "4.0.6" +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/public_suffix.gemspec b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/public_suffix.gemspec new file mode 100644 index 0000000..ae552fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/public_suffix.gemspec @@ -0,0 +1,29 @@ +# -*- encoding: utf-8 -*- +$LOAD_PATH.push File.expand_path("../lib", __FILE__) +require "public_suffix/version" + +Gem::Specification.new do |s| + s.name = "public_suffix" + s.version = PublicSuffix::VERSION + s.authors = ["Simone Carletti"] + s.email = ["weppos@weppos.net"] + s.homepage = "https://simonecarletti.com/code/publicsuffix-ruby" + s.summary = "Domain name parser based on the Public Suffix List." + s.description = "PublicSuffix can parse and decompose a domain name into top level domain, domain and subdomains." + s.licenses = ["MIT"] + + s.metadata = { + "bug_tracker_uri" => "https://github.com/weppos/publicsuffix-ruby/issues", + "changelog_uri" => "https://github.com/weppos/publicsuffix-ruby/blob/master/CHANGELOG.md", + "documentation_uri" => "https://rubydoc.info/gems/#{s.name}/#{s.version}", + "homepage_uri" => s.homepage, + "source_code_uri" => "https://github.com/weppos/publicsuffix-ruby/tree/v#{s.version}", + } + + s.required_ruby_version = ">= 2.3" + + s.require_paths = ["lib"] + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") + s.extra_rdoc_files = %w( LICENSE.txt ) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/.empty b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/.empty new file mode 100644 index 0000000..e322015 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/.empty @@ -0,0 +1,2 @@ +# This is an empty file I use to force a non-empty commit when I only need to store notes +.. \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/acceptance_test.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/acceptance_test.rb new file mode 100644 index 0000000..371bfe1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/acceptance_test.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require "test_helper" + +class AcceptanceTest < Minitest::Test + + VALID_CASES = [ + ["example.com", "example.com", [nil, "example", "com"]], + ["foo.example.com", "example.com", ["foo", "example", "com"]], + + ["verybritish.co.uk", "verybritish.co.uk", [nil, "verybritish", "co.uk"]], + ["foo.verybritish.co.uk", "verybritish.co.uk", ["foo", "verybritish", "co.uk"]], + + ["parliament.uk", "parliament.uk", [nil, "parliament", "uk"]], + ["foo.parliament.uk", "parliament.uk", ["foo", "parliament", "uk"]], + ].freeze + + def test_valid + VALID_CASES.each do |input, domain, results| + parsed = PublicSuffix.parse(input) + trd, sld, tld = results + assert_equal tld, parsed.tld, "Invalid tld for `#{name}`" + assert_equal sld, parsed.sld, "Invalid sld for `#{name}`" + if trd.nil? + assert_nil parsed.trd, "Invalid trd for `#{name}`" + else + assert_equal trd, parsed.trd, "Invalid trd for `#{name}`" + end + + assert_equal domain, PublicSuffix.domain(input) + assert PublicSuffix.valid?(input) + end + end + + + INVALID_CASES = [ + ["nic.bd", PublicSuffix::DomainNotAllowed], + [nil, PublicSuffix::DomainInvalid], + ["", PublicSuffix::DomainInvalid], + [" ", PublicSuffix::DomainInvalid], + ].freeze + + def test_invalid + INVALID_CASES.each do |(name, error)| + assert_raises(error) { PublicSuffix.parse(name) } + assert !PublicSuffix.valid?(name) + end + end + + + REJECTED_CASES = [ + ["www. .com", true], + ["foo.co..uk", true], + ["goo,gle.com", true], + ["-google.com", true], + ["google-.com", true], + + # This case was covered in GH-15. + # I decided to cover this case because it's not easily reproducible with URI.parse + # and can lead to several false positives. + ["http://google.com", false], + ].freeze + + def test_rejected + REJECTED_CASES.each do |name, expected| + assert_equal expected, PublicSuffix.valid?(name), + "Expected %s to be %s" % [name.inspect, expected.inspect] + assert !valid_domain?(name), + "#{name} expected to be invalid" + end + end + + + CASE_CASES = [ + ["Www.google.com", %w( www google com )], + ["www.Google.com", %w( www google com )], + ["www.google.Com", %w( www google com )], + ].freeze + + def test_ignore_case + CASE_CASES.each do |name, results| + domain = PublicSuffix.parse(name) + trd, sld, tld = results + assert_equal tld, domain.tld, "Invalid tld for `#{name}'" + assert_equal sld, domain.sld, "Invalid sld for `#{name}'" + assert_equal trd, domain.trd, "Invalid trd for `#{name}'" + assert PublicSuffix.valid?(name) + end + end + + + INCLUDE_PRIVATE_CASES = [ + ["blogspot.com", true, "blogspot.com"], + ["blogspot.com", false, nil], + ["subdomain.blogspot.com", true, "blogspot.com"], + ["subdomain.blogspot.com", false, "subdomain.blogspot.com"], + ].freeze + + # rubocop:disable Style/CombinableLoops + def test_ignore_private + # test domain and parse + INCLUDE_PRIVATE_CASES.each do |given, ignore_private, expected| + if expected.nil? + assert_nil PublicSuffix.domain(given, ignore_private: ignore_private) + else + assert_equal expected, PublicSuffix.domain(given, ignore_private: ignore_private) + end + end + # test valid? + INCLUDE_PRIVATE_CASES.each do |given, ignore_private, expected| + assert_equal !expected.nil?, PublicSuffix.valid?(given, ignore_private: ignore_private) + end + end + # rubocop:enable Style/CombinableLoops + + + def valid_uri?(name) + uri = URI.parse(name) + !uri.host.nil? + rescue StandardError + false + end + + def valid_domain?(name) + uri = URI.parse(name) + !uri.host.nil? && uri.scheme.nil? + rescue StandardError + false + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb new file mode 100644 index 0000000..0074f1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find.rb @@ -0,0 +1,66 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.find("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffixList.find(NAME_SHORT) != nil } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM) != nil } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffixList.find(NAME_LONG) != nil } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffixList.find(NAME_WILD) != nil } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffixList.find(NAME_EXCP) != nil } + end + + x.report("IAAA") do + TIMES.times { PublicSuffixList.find(IAAA) != nil } + end + x.report("IZZZ") do + TIMES.times { PublicSuffixList.find(IZZZ) != nil } + end + + x.report("PAAA") do + TIMES.times { PublicSuffixList.find(PAAA) != nil } + end + x.report("PZZZ") do + TIMES.times { PublicSuffixList.find(PZZZ) != nil } + end + + x.report("JP") do + TIMES.times { PublicSuffixList.find(JP) != nil } + end + x.report("IT") do + TIMES.times { PublicSuffixList.find(IT) != nil } + end + x.report("COM") do + TIMES.times { PublicSuffixList.find(COM) != nil } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb new file mode 100644 index 0000000..0bcfd42 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_find_all.rb @@ -0,0 +1,102 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.find("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffixList.find(NAME_SHORT) != nil } + end + x.report("NAME_SHORT (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_SHORT, ignore_private: true) != nil } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM) != nil } + end + x.report("NAME_MEDIUM (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_MEDIUM, ignore_private: true) != nil } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffixList.find(NAME_LONG) != nil } + end + x.report("NAME_LONG (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_LONG, ignore_private: true) != nil } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffixList.find(NAME_WILD) != nil } + end + x.report("NAME_WILD (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_WILD, ignore_private: true) != nil } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffixList.find(NAME_EXCP) != nil } + end + x.report("NAME_EXCP (noprivate)") do + TIMES.times { PublicSuffixList.find(NAME_EXCP, ignore_private: true) != nil } + end + + x.report("IAAA") do + TIMES.times { PublicSuffixList.find(IAAA) != nil } + end + x.report("IAAA (noprivate)") do + TIMES.times { PublicSuffixList.find(IAAA, ignore_private: true) != nil } + end + x.report("IZZZ") do + TIMES.times { PublicSuffixList.find(IZZZ) != nil } + end + x.report("IZZZ (noprivate)") do + TIMES.times { PublicSuffixList.find(IZZZ, ignore_private: true) != nil } + end + + x.report("PAAA") do + TIMES.times { PublicSuffixList.find(PAAA) != nil } + end + x.report("PAAA (noprivate)") do + TIMES.times { PublicSuffixList.find(PAAA, ignore_private: true) != nil } + end + x.report("PZZZ") do + TIMES.times { PublicSuffixList.find(PZZZ) != nil } + end + x.report("PZZZ (noprivate)") do + TIMES.times { PublicSuffixList.find(PZZZ, ignore_private: true) != nil } + end + + x.report("JP") do + TIMES.times { PublicSuffixList.find(JP) != nil } + end + x.report("JP (noprivate)") do + TIMES.times { PublicSuffixList.find(JP, ignore_private: true) != nil } + end + x.report("IT") do + TIMES.times { PublicSuffixList.find(IT) != nil } + end + x.report("IT (noprivate)") do + TIMES.times { PublicSuffixList.find(IT, ignore_private: true) != nil } + end + x.report("COM") do + TIMES.times { PublicSuffixList.find(COM) != nil } + end + x.report("COM (noprivate)") do + TIMES.times { PublicSuffixList.find(COM, ignore_private: true) != nil } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb new file mode 100644 index 0000000..36b2bce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_names.rb @@ -0,0 +1,91 @@ +require 'benchmark/ips' + +STRING = "www.subdomain.example.com" +ARRAY = %w( + com + example.com + subdomain.example.com + www.subdomain.example.com +) + +def tokenizer1(string) + parts = string.split(".").reverse! + index = 0 + query = parts[index] + names = [] + + loop do + names << query + + index += 1 + break if index >= parts.size + query = parts[index] + "." + query + end + names +end + +def tokenizer2(string) + parts = string.split(".") + index = parts.size - 1 + query = parts[index] + names = [] + + loop do + names << query + + index -= 1 + break if index < 0 + query = parts[index] + "." + query + end + names +end + +def tokenizer3(string) + isx = string.size + idx = string.size - 1 + names = [] + + loop do + isx = string.rindex(".", isx - 1) || -1 + names << string[isx + 1, idx - isx] + + break if isx <= 0 + end + names +end + +def tokenizer4(string) + isx = string.size + idx = string.size - 1 + names = [] + + loop do + isx = string.rindex(".", isx - 1) || -1 + names << string[(isx+1)..idx] + + break if isx <= 0 + end + names +end + +(x = tokenizer1(STRING)) == ARRAY or fail("tokenizer1 failed: #{x.inspect}") +(x = tokenizer2(STRING)) == ARRAY or fail("tokenizer2 failed: #{x.inspect}") +(x = tokenizer3(STRING)) == ARRAY or fail("tokenizer3 failed: #{x.inspect}") +(x = tokenizer4(STRING)) == ARRAY or fail("tokenizer4 failed: #{x.inspect}") + +Benchmark.ips do |x| + x.report("tokenizer1") do + tokenizer1(STRING).is_a?(Array) + end + x.report("tokenizer2") do + tokenizer2(STRING).is_a?(Array) + end + x.report("tokenizer3") do + tokenizer3(STRING).is_a?(Array) + end + x.report("tokenizer4") do + tokenizer4(STRING).is_a?(Array) + end + + x.compare! +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb new file mode 100644 index 0000000..66d908b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select.rb @@ -0,0 +1,26 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +JP = "www.yokoshibahikari.chiba.jp" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +class PublicSuffix::List + public :select +end +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.select("example.jp") +PublicSuffixList.find("example.jp") + +Benchmark.bmbm(25) do |x| + x.report("JP select") do + TIMES.times { PublicSuffixList.select(JP) } + end + x.report("JP find") do + TIMES.times { PublicSuffixList.find(JP) } + end + # x.report("JP (noprivate)") do + # TIMES.times { PublicSuffixList.find(JP, ignore_private: true) != nil } + # end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb new file mode 100644 index 0000000..f002c82 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_select_incremental.rb @@ -0,0 +1,25 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +JP = "www.yokoshibahikari.chiba.jp" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +class PublicSuffix::List + public :select +end +PublicSuffixList = PublicSuffix::List.default +PublicSuffixList.select("example.jp") + +Benchmark.bmbm(25) do |x| + x.report("select jp") do + TIMES.times { PublicSuffixList.select("jp") } + end + x.report("select example.jp") do + TIMES.times { PublicSuffixList.select("example.jp") } + end + x.report("select www.example.jp") do + TIMES.times { PublicSuffixList.select("www.example.jp") } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb new file mode 100644 index 0000000..a484451 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/benchmarks/bm_valid.rb @@ -0,0 +1,101 @@ +require 'benchmark' +require_relative "../../lib/public_suffix" + +NAME_SHORT = "example.de" +NAME_MEDIUM = "www.subdomain.example.de" +NAME_LONG = "one.two.three.four.five.example.de" +NAME_WILD = "one.two.three.four.five.example.bd" +NAME_EXCP = "one.two.three.four.five.www.ck" + +IAAA = "www.example.ac" +IZZZ = "www.example.zone" + +PAAA = "one.two.three.four.five.example.beep.pl" +PZZZ = "one.two.three.four.five.example.now.sh" + +JP = "www.yokoshibahikari.chiba.jp" +IT = "www.example.it" +COM = "www.example.com" + +TIMES = (ARGV.first || 50_000).to_i + +# Initialize +PublicSuffix.valid?("example.com") + +Benchmark.bmbm(25) do |x| + x.report("NAME_SHORT") do + TIMES.times { PublicSuffix.valid?(NAME_SHORT) == true } + end + x.report("NAME_SHORT (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_SHORT, ignore_private: true) == true } + end + x.report("NAME_MEDIUM") do + TIMES.times { PublicSuffix.valid?(NAME_MEDIUM) == true } + end + x.report("NAME_MEDIUM (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_MEDIUM, ignore_private: true) == true } + end + x.report("NAME_LONG") do + TIMES.times { PublicSuffix.valid?(NAME_LONG) == true } + end + x.report("NAME_LONG (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_LONG, ignore_private: true) == true } + end + x.report("NAME_WILD") do + TIMES.times { PublicSuffix.valid?(NAME_WILD) == true } + end + x.report("NAME_WILD (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_WILD, ignore_private: true) == true } + end + x.report("NAME_EXCP") do + TIMES.times { PublicSuffix.valid?(NAME_EXCP) == true } + end + x.report("NAME_EXCP (noprivate)") do + TIMES.times { PublicSuffix.valid?(NAME_EXCP, ignore_private: true) == true } + end + + x.report("IAAA") do + TIMES.times { PublicSuffix.valid?(IAAA) == true } + end + x.report("IAAA (noprivate)") do + TIMES.times { PublicSuffix.valid?(IAAA, ignore_private: true) == true } + end + x.report("IZZZ") do + TIMES.times { PublicSuffix.valid?(IZZZ) == true } + end + x.report("IZZZ (noprivate)") do + TIMES.times { PublicSuffix.valid?(IZZZ, ignore_private: true) == true } + end + + x.report("PAAA") do + TIMES.times { PublicSuffix.valid?(PAAA) == true } + end + x.report("PAAA (noprivate)") do + TIMES.times { PublicSuffix.valid?(PAAA, ignore_private: true) == true } + end + x.report("PZZZ") do + TIMES.times { PublicSuffix.valid?(PZZZ) == true } + end + x.report("PZZZ (noprivate)") do + TIMES.times { PublicSuffix.valid?(PZZZ, ignore_private: true) == true } + end + + x.report("JP") do + TIMES.times { PublicSuffix.valid?(JP) == true } + end + x.report("JP (noprivate)") do + TIMES.times { PublicSuffix.valid?(JP, ignore_private: true) == true } + end + x.report("IT") do + TIMES.times { PublicSuffix.valid?(IT) == true } + end + x.report("IT (noprivate)") do + TIMES.times { PublicSuffix.valid?(IT, ignore_private: true) == true } + end + x.report("COM") do + TIMES.times { PublicSuffix.valid?(COM) == true } + end + x.report("COM (noprivate)") do + TIMES.times { PublicSuffix.valid?(COM, ignore_private: true) == true } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb new file mode 100644 index 0000000..1ed1050 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/domain_profiler.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix.domain("www.example.com") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb new file mode 100644 index 0000000..53d28eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix::List.default.find("www.example.com") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb new file mode 100644 index 0000000..65c13fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/find_profiler_jp.rb @@ -0,0 +1,12 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +PublicSuffix::List.default + +report = MemoryProfiler.report do + PublicSuffix::List.default.find("a.b.ide.kyoto.jp") +end + +report.pretty_print diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb new file mode 100644 index 0000000..008b1e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/initialization_profiler.rb @@ -0,0 +1,11 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require "memory_profiler" +require "public_suffix" + +report = MemoryProfiler.report do + PublicSuffix::List.default +end + +report.pretty_print +# report.pretty_print(to_file: 'profiler-%s-%d.txt' % [ARGV[0], Time.now.to_i]) diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb new file mode 100644 index 0000000..0b98b4b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/list_profsize.rb @@ -0,0 +1,11 @@ +$LOAD_PATH.unshift File.expand_path("../../lib", __dir__) + +require_relative "object_binsize" +require "public_suffix" + +list = PublicSuffix::List.default +puts "#{list.size} rules:" + +prof = ObjectBinsize.new +prof.report(PublicSuffix::List.default, label: "PublicSuffix::List size") +prof.report(PublicSuffix::List.default.instance_variable_get(:@rules), label: "Size of rules") diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb new file mode 100644 index 0000000..dc60bdb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/profilers/object_binsize.rb @@ -0,0 +1,57 @@ +require 'tempfile' + +# A very simple memory profiles that checks the full size of a variable +# by serializing into a binary file. +# +# Yes, I know this is very rough, but there are cases where ObjectSpace.memsize_of +# doesn't cooperate, and this is one of the possible workarounds. +# +# For certain cases, it works (TM). +class ObjectBinsize + + def measure(var, label: nil) + dump(var, label: label) + end + + def report(var, label: nil, padding: 10) + file = measure(var, label: label) + + size = format_integer(file.size) + name = label || File.basename(file.path) + printf("%#{padding}s %s\n", size, name) + end + + private + + def dump(var, **args) + file = Tempfile.new(args[:label].to_s) + file.write(Marshal.dump(var)) + file + ensure + file.close + end + + def format_integer(int) + int.to_s.reverse.gsub(/...(?=.)/, '\&,').reverse + end + +end + +if __FILE__ == $0 + prof = ObjectBinsize.new + + prof.report(nil, label: "nil") + prof.report(false, label: "false") + prof.report(true, label: "true") + prof.report(0, label: "integer") + prof.report("", label: "empty string") + prof.report({}, label: "empty hash") + prof.report({}, label: "empty array") + + prof.report({ foo: "1" }, label: "hash 1 item (symbol)") + prof.report({ foo: "1", bar: 2 }, label: "hash 2 items (symbol)") + prof.report({ "foo" => "1" }, label: "hash 1 item (string)") + prof.report({ "foo" => "1", "bar" => 2 }, label: "hash 2 items (string)") + + prof.report("big string" * 200, label: "big string * 200") +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/psl_test.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/psl_test.rb new file mode 100644 index 0000000..fae398f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/psl_test.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +require "test_helper" +require "public_suffix" + +# This test runs against the current PSL file and ensures +# the definitions satisfies the test suite. +class PslTest < Minitest::Test + + ROOT = File.expand_path("..", __dir__) + + # rubocop:disable Security/Eval + def self.tests + File.readlines(File.join(ROOT, "test/tests.txt")).map do |line| + line = line.strip + next if line.empty? + next if line.start_with?("//") + + input, output = line.split(", ") + + # handle the case of eval("null"), it must be eval("nil") + input = "nil" if input == "null" + output = "nil" if output == "null" + + input = eval(input) + output = eval(output) + [input, output] + end + end + # rubocop:enable Security/Eval + + + def test_valid + # Parse the PSL and run the tests + data = File.read(PublicSuffix::List::DEFAULT_LIST_PATH) + PublicSuffix::List.default = PublicSuffix::List.parse(data) + + failures = [] + self.class.tests.each do |input, output| + # Punycode domains are not supported ATM + next if input =~ /xn--/ + + domain = PublicSuffix.domain(input) rescue nil + failures << [input, output, domain] if output != domain + end + + message = "The following #{failures.size} tests fail:\n" + failures.each { |i, o, d| message += "Expected %s to be %s, got %s\n" % [i.inspect, o.inspect, d.inspect] } + assert_equal 0, failures.size, message + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/test_helper.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/test_helper.rb new file mode 100644 index 0000000..4f8a447 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/test_helper.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +if ENV["COVERAGE"] + require "simplecov" + SimpleCov.start + + require "codecov" + SimpleCov.formatter = SimpleCov::Formatter::Codecov +end + +require "minitest/autorun" +require "minitest/reporters" +require "mocha/minitest" + +Minitest::Reporters.use! Minitest::Reporters::DefaultReporter.new(color: true) + +$LOAD_PATH.unshift File.expand_path("../lib", __dir__) +require "public_suffix" diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/tests.txt b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/tests.txt new file mode 100644 index 0000000..b11150a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/tests.txt @@ -0,0 +1,98 @@ +// Any copyright is dedicated to the Public Domain. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// null input +null, null +// Mixed case +'COM', null +'example.COM', 'example.com' +'WwW.example.COM', 'example.com' +// Leading dot +'.com', null +'.example', null +'.example.com', null +'.example.example', null +// Unlisted TLD +'example', null +'example.example', 'example.example' +'b.example.example', 'example.example' +'a.b.example.example', 'example.example' +// Listed, but non-Internet, TLD +//'local', null +//'example.local', null +//'b.example.local', null +//'a.b.example.local', null +// TLD with only 1 rule +'biz', null +'domain.biz', 'domain.biz' +'b.domain.biz', 'domain.biz' +'a.b.domain.biz', 'domain.biz' +// TLD with some 2-level rules +'com', null +'example.com', 'example.com' +'b.example.com', 'example.com' +'a.b.example.com', 'example.com' +'uk.com', null +'example.uk.com', 'example.uk.com' +'b.example.uk.com', 'example.uk.com' +'a.b.example.uk.com', 'example.uk.com' +'test.ac', 'test.ac' +// TLD with only 1 (wildcard) rule +'mm', null +'c.mm', null +'b.c.mm', 'b.c.mm' +'a.b.c.mm', 'b.c.mm' +// More complex TLD +'jp', null +'test.jp', 'test.jp' +'www.test.jp', 'test.jp' +'ac.jp', null +'test.ac.jp', 'test.ac.jp' +'www.test.ac.jp', 'test.ac.jp' +'kyoto.jp', null +'test.kyoto.jp', 'test.kyoto.jp' +'ide.kyoto.jp', null +'b.ide.kyoto.jp', 'b.ide.kyoto.jp' +'a.b.ide.kyoto.jp', 'b.ide.kyoto.jp' +'c.kobe.jp', null +'b.c.kobe.jp', 'b.c.kobe.jp' +'a.b.c.kobe.jp', 'b.c.kobe.jp' +'city.kobe.jp', 'city.kobe.jp' +'www.city.kobe.jp', 'city.kobe.jp' +// TLD with a wildcard rule and exceptions +'ck', null +'test.ck', null +'b.test.ck', 'b.test.ck' +'a.b.test.ck', 'b.test.ck' +'www.ck', 'www.ck' +'www.www.ck', 'www.ck' +// US K12 +'us', null +'test.us', 'test.us' +'www.test.us', 'test.us' +'ak.us', null +'test.ak.us', 'test.ak.us' +'www.test.ak.us', 'test.ak.us' +'k12.ak.us', null +'test.k12.ak.us', 'test.k12.ak.us' +'www.test.k12.ak.us', 'test.k12.ak.us' +// IDN labels +'食狮.com.cn', '食狮.com.cn' +'食狮.公司.cn', '食狮.公司.cn' +'www.食狮.公司.cn', '食狮.公司.cn' +'shishi.公司.cn', 'shishi.公司.cn' +'公司.cn', null +'食狮.中国', '食狮.中国' +'www.食狮.中国', '食狮.中国' +'shishi.中国', 'shishi.中国' +'中国', null +// Same as above, but punycoded +'xn--85x722f.com.cn', 'xn--85x722f.com.cn' +'xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn' +'www.xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn' +'shishi.xn--55qx5d.cn', 'shishi.xn--55qx5d.cn' +'xn--55qx5d.cn', null +'xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s' +'www.xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s' +'shishi.xn--fiqs8s', 'shishi.xn--fiqs8s' +'xn--fiqs8s', null diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb new file mode 100644 index 0000000..968462d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/domain_test.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "test_helper" + +class PublicSuffix::DomainTest < Minitest::Test + + def setup + @klass = PublicSuffix::Domain + end + + # Tokenizes given input into labels. + def test_self_name_to_labels + assert_equal %w( someone spaces live com ), + PublicSuffix::Domain.name_to_labels("someone.spaces.live.com") + assert_equal %w( leontina23samiko wiki zoho com ), + PublicSuffix::Domain.name_to_labels("leontina23samiko.wiki.zoho.com") + end + + # Converts input into String. + def test_self_name_to_labels_converts_input_to_string + assert_equal %w( someone spaces live com ), + PublicSuffix::Domain.name_to_labels(:"someone.spaces.live.com") + end + + + def test_initialize_with_tld + domain = @klass.new("com") + assert_equal "com", domain.tld + assert_nil domain.sld + assert_nil domain.trd + end + + def test_initialize_with_tld_and_sld + domain = @klass.new("com", "google") + assert_equal "com", domain.tld + assert_equal "google", domain.sld + assert_nil domain.trd + end + + def test_initialize_with_tld_and_sld_and_trd + domain = @klass.new("com", "google", "www") + assert_equal "com", domain.tld + assert_equal "google", domain.sld + assert_equal "www", domain.trd + end + + + def test_to_s + assert_equal "com", @klass.new("com").to_s + assert_equal "google.com", @klass.new("com", "google").to_s + assert_equal "www.google.com", @klass.new("com", "google", "www").to_s + end + + def test_to_a + assert_equal [nil, nil, "com"], @klass.new("com").to_a + assert_equal [nil, "google", "com"], @klass.new("com", "google").to_a + assert_equal ["www", "google", "com"], @klass.new("com", "google", "www").to_a + end + + + def test_tld + assert_equal "com", @klass.new("com", "google", "www").tld + end + + def test_sld + assert_equal "google", @klass.new("com", "google", "www").sld + end + + def test_trd + assert_equal "www", @klass.new("com", "google", "www").trd + end + + + def test_name + assert_equal "com", @klass.new("com").name + assert_equal "google.com", @klass.new("com", "google").name + assert_equal "www.google.com", @klass.new("com", "google", "www").name + end + + def test_domain + assert_nil @klass.new("com").domain + assert_nil @klass.new("tldnotlisted").domain + assert_equal "google.com", @klass.new("com", "google").domain + assert_equal "google.tldnotlisted", @klass.new("tldnotlisted", "google").domain + assert_equal "google.com", @klass.new("com", "google", "www").domain + assert_equal "google.tldnotlisted", @klass.new("tldnotlisted", "google", "www").domain + end + + def test_subdomain + assert_nil @klass.new("com").subdomain + assert_nil @klass.new("tldnotlisted").subdomain + assert_nil @klass.new("com", "google").subdomain + assert_nil @klass.new("tldnotlisted", "google").subdomain + assert_equal "www.google.com", @klass.new("com", "google", "www").subdomain + assert_equal "www.google.tldnotlisted", @klass.new("tldnotlisted", "google", "www").subdomain + end + + + def test_domain_question + assert !@klass.new("com").domain? + assert @klass.new("com", "example").domain? + assert @klass.new("com", "example", "www").domain? + assert @klass.new("tldnotlisted", "example").domain? + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb new file mode 100644 index 0000000..75099ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/errors_test.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require "test_helper" + +class ErrorsTest < Minitest::Test + + # Inherits from StandardError + def test_error_inheritance + assert_kind_of StandardError, + PublicSuffix::Error.new + end + + # Inherits from PublicSuffix::Error + def test_domain_invalid_inheritance + assert_kind_of PublicSuffix::Error, + PublicSuffix::DomainInvalid.new + end + + # Inherits from PublicSuffix::DomainInvalid + def test_domain_not_allowed_inheritance + assert_kind_of PublicSuffix::DomainInvalid, + PublicSuffix::DomainNotAllowed.new + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/list_test.rb b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/list_test.rb new file mode 100644 index 0000000..9852935 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/public_suffix-4.0.6/test/unit/list_test.rb @@ -0,0 +1,241 @@ +# frozen_string_literal: true + +require "test_helper" + +class PublicSuffix::ListTest < Minitest::Test + + def setup + @list = PublicSuffix::List.new + end + + def teardown + PublicSuffix::List.default = nil + end + + + def test_initialize + assert_instance_of PublicSuffix::List, @list + assert_equal 0, @list.size + end + + + def test_equality_with_self + list = PublicSuffix::List.new + assert_equal list, list + end + + def test_equality_with_internals + rule = PublicSuffix::Rule.factory("com") + assert_equal PublicSuffix::List.new.add(rule), PublicSuffix::List.new.add(rule) + end + + def test_each_without_block + list = PublicSuffix::List.parse(< + Text, text, text + +EOF +doc = Document.new string +``` + +So parsing a string is just as easy as parsing a file. + +## Development + +After checking out the repo, run `rake test` to run the tests. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/ruby/rexml. + +## License + +The gem is available as open source under the terms of the [BSD-2-Clause](LICENSE.txt). diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/context.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/context.rdoc new file mode 100644 index 0000000..7ef01f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/context.rdoc @@ -0,0 +1,143 @@ +== Element Context + +Notes: +- All code on this page presupposes that the following has been executed: + + require 'rexml/document' + +- For convenience, examples on this page use +REXML::Document.new+, not +REXML::Element.new+. + This is completely valid, because REXML::Document is a subclass of REXML::Element. + +The context for an element is a hash of processing directives +that influence the way \XML is read, stored, and written. +The context entries are: + +- +:respect_whitespace+: controls treatment of whitespace. +- +:compress_whitespace+: determines whether whitespace is compressed. +- +:ignore_whitespace_nodes+: determines whether whitespace-only nodes are to be ignored. +- +:raw+: controls treatment of special characters and entities. + +The default context for a new element is {}. +You can set the context at element-creation time: + + d = REXML::Document.new('', {compress_whitespace: :all, raw: :all}) + d.context # => {:compress_whitespace=>:all, :raw=>:all} + +You can reset the entire context by assigning a new hash: + + d.context = {ignore_whitespace_nodes: :all} + d.context # => {:ignore_whitespace_nodes=>:all} + +Or you can create or modify an individual entry: + + d.context[:raw] = :all + d.context # => {:ignore_whitespace_nodes=>:all, :raw=>:all} + +=== +:respect_whitespace+ + +Affects: +REXML::Element.new+, +REXML::Element.text=+. + +By default, all parsed whitespace is respected (that is, stored whitespace not compressed): + + xml_string = 'a b c d e f' + d = REXML::Document.new(xml_string) + d.to_s # => "a b c d e f" + +Use +:respect_whitespace+ with an array of element names +to specify the elements that _are_ to have their whitespace respected; +other elements' whitespace, and whitespace between elements, will be compressed. + +In this example: +foo+ and +baz+ will have their whitespace respected; ++bar+ and the space between elements will have their whitespace compressed: + + d = REXML::Document.new(xml_string, {respect_whitespace: ['foo', 'baz']}) + d.to_s # => "a b c d e f" + bar = d.root[2] # => ... + bar.text = 'X Y' + d.to_s # => "a b X Y e f" + +=== +:compress_whitespace+ + +Affects: +REXML::Element.new+, +REXML::Element.text=+. + +Use compress_whitespace: :all +to compress whitespace both within and between elements: + + xml_string = 'a b c d e f' + d = REXML::Document.new(xml_string, {compress_whitespace: :all}) + d.to_s # => "a b c d e f" + +Use +:compress_whitespace+ with an array of element names +to compress whitespace in those elements, +but not in other elements nor between elements. + +In this example, +foo+ and +baz+ will have their whitespace compressed; ++bar+ and the space between elements will not: + + d = REXML::Document.new(xml_string, {compress_whitespace: ['foo', 'baz']}) + d.to_s # => "a b c d e f" + foo = d.root[0] # => ... + foo.text= 'X Y' + d.to_s # => "X Y c d e f" + +=== +:ignore_whitespace_nodes+ + +Affects: +REXML::Element.new+. + +Use ignore_whitespace_nodes: :all to omit all whitespace-only elements. + +In this example, +bar+ has a text node, while nodes +foo+ and +baz+ do not: + + xml_string = ' BAR ' + d = REXML::Document.new(xml_string, {ignore_whitespace_nodes: :all}) + d.to_s # => " FOO BAZ " + root = d.root # => ... + foo = root[0] # => + bar = root[1] # => ... + baz = root[2] # => + foo.first.class # => NilClass + bar.first.class # => REXML::Text + baz.first.class # => NilClass + +Use +:ignore_whitespace_nodes+ with an array of element names +to specify the elements that are to have whitespace nodes ignored. + +In this example, +bar+ and +baz+ have text nodes, while node +foo+ does not. + + xml_string = ' BAR ' + d = REXML::Document.new(xml_string, {ignore_whitespace_nodes: ['foo']}) + d.to_s # => " BAR " + root = d.root # => ... + foo = root[0] # => + bar = root[1] # => ... + baz = root[2] # => ... + foo.first.class # => NilClass + bar.first.class # => REXML::Text + baz.first.class # => REXML::Text + +=== +:raw+ + +Affects: +Element.text=+, +Element.add_text+, +Text.to_s+. + +Parsing of +a+ elements is not affected by +raw+: + + xml_string = '0 < 11 > 0' + d = REXML::Document.new(xml_string, {:raw => ['a']}) + d.root.to_s # => "0 < 11 > 0" + a, b = *d.root.elements + a.to_s # => "0 < 1" + b.to_s # => "1 > 0" + +But Element#text= is affected: + + a.text = '0 < 1' + b.text = '1 > 0' + a.to_s # => "0 < 1" + b.to_s # => "1 &gt; 0" + +As is Element.add_text: + + a.add_text(' so 1 > 0') + b.add_text(' so 0 < 1') + a.to_s # => "0 < 1 so 1 > 0" + b.to_s # => "1 &gt; 0 so 0 &lt; 1" diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/child.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/child.rdoc new file mode 100644 index 0000000..8953638 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/child.rdoc @@ -0,0 +1,87 @@ +== Class Child + +Class Child includes module Node; +see {Tasks for Node}[node_rdoc.html]. + +:include: ../tocs/child_toc.rdoc + +=== Relationships + +==== Task: Set the Parent + +Use method {Child#parent=}[../../../../REXML/Parent.html#method-i-parent-3D] +to set the parent: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e1.parent # => nil + e1.parent = e0 + e1.parent # => + +==== Task: Insert Previous Sibling + +Use method {Child#previous_sibling=}[../../../../REXML/Parent.html#method-i-previous_sibling-3D] +to insert a previous sibling: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_a # => [, ] + c = d.root[1] # => + b = REXML::Element.new('b') + c.previous_sibling = b + d.root.to_a # => [, , ] + +==== Task: Insert Next Sibling + +Use method {Child#next_sibling=}[../../../../REXML/Parent.html#method-i-next-sibling-3D] +to insert a previous sibling: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_a # => [, ] + a = d.root[0] # => + b = REXML::Element.new('b') + a.next_sibling = b + d.root.to_a # => [, , ] + +=== Removal or Replacement + +==== Task: Remove Child from Parent + +Use method {Child#remove}[../../../../REXML/Parent.html#method-i-remove] +to remove a child from its parent; returns the removed child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_a # => [, , ] + b = d.root[1] # => + b.remove # => + d.root.to_a # => [, ] + +==== Task: Replace Child + +Use method {Child#replace_with}[../../../../REXML/Parent.html#method-i-replace] +to replace a child; +returns the replaced child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_a # => [, , ] + b = d.root[1] # => + d = REXML::Element.new('d') + b.replace_with(d) # => + d.root.to_a # => [, , ] + +=== Document + +==== Task: Get the Document + +Use method {Child#document}[../../../../REXML/Parent.html#method-i-document] +to get the document for the child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_a # => [, , ] + b = d.root[1] # => + b.document == d # => true + REXML::Child.new.document # => nil diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/document.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/document.rdoc new file mode 100644 index 0000000..96d0335 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/document.rdoc @@ -0,0 +1,276 @@ +== Class Document + +Class Document has methods from its superclasses and included modules; +see: + +- {Tasks for Element}[element_rdoc.html]. +- {Tasks for Parent}[parent_rdoc.html]. +- {Tasks for Child}[child_rdoc.html]. +- {Tasks for Node}[node_rdoc.html]. +- {Module Enumerable}[https://docs.ruby-lang.org/en/master/Enumerable.html]. + +:include: ../tocs/document_toc.rdoc + +=== New Document + +==== Task: Create an Empty Document + +Use method {Document::new}[../../../../REXML/Document.html#method-c-new] +to create an empty document. + + d = REXML::Document.new + +==== Task: Parse a \String into a New Document + +Use method {Document::new}[../../../../REXML/Document.html#method-c-new] +to parse an XML string into a new document: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.root # => ... + +==== Task: Parse an \IO Stream into a New Document + +Use method {Document::new}[../../../../REXML/Document.html#method-c-new] +to parse an XML \IO stream into a new document: + + xml_string = 'textmore' + File.write('t.xml', xml_string) + d = File.open('t.xml', 'r') do |file| + REXML::Document.new(file) + end + d.root # => ... + +==== Task: Create a Document from an Existing Document + +Use method {Document::new}[../../../../REXML/Document.html#method-c-new] +to create a document from an existing document. +The context and attributes are copied to the new document, +but not the children: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.children # => [ ... ] + d.context = {raw: :all, compress_whitespace: :all} + d.add_attributes({'bar' => 0, 'baz' => 1}) + d1 = REXML::Document.new(d) + d1.context # => {:raw=>:all, :compress_whitespace=>:all} + d1.attributes # => {"bar"=>bar='0', "baz"=>baz='1'} + d1.children # => [] + +==== Task: Clone a Document + +Use method {Document#clone}[../../../../REXML/Document.html#method-i-clone] +to clone a document. +The context and attributes are copied to the new document, +but not the children: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.children # => [ ... ] + d.context = {raw: :all, compress_whitespace: :all} + d.add_attributes({'bar' => 0, 'baz' => 1}) + d1 = d.clone # => < bar='0' baz='1'/> + d1.context # => {:raw=>:all, :compress_whitespace=>:all} + d1.attributes # => {"bar"=>bar='0', "baz"=>baz='1'} + d1.children # => [] + +=== Document Type + +==== Task: Get the Document Type + +Use method {Document#doctype}[../../../../REXML/Document.html#method-i-doctype] +to get the document type: + + d = REXML::Document.new('') + d.doctype.class # => REXML::DocType + d = REXML::Document.new('') + d.doctype.class # => nil + +==== Task: Set the Document Type + +Use method {document#add}[../../../../REXML/Document.html#method-i-add] +to add or replace the document type: + + d = REXML::Document.new('') + d.doctype.class # => nil + d.add(REXML::DocType.new('foo')) + d.doctype.class # => REXML::DocType + +=== XML Declaration + +==== Task: Get the XML Declaration + +Use method {document#xml_decl}[../../../../REXML/Document.html#method-i-xml_decl] +to get the XML declaration: + + d = REXML::Document.new('') + d.xml_decl.class # => REXML::XMLDecl + d.xml_decl # => + d = REXML::Document.new('') + d.xml_decl.class # => REXML::XMLDecl + d.xml_decl # => + +==== Task: Set the XML Declaration + +Use method {document#add}[../../../../REXML/Document.html#method-i-add] +to replace the XML declaration: + + d = REXML::Document.new('') + d.add(REXML::XMLDecl.new) + +=== Children + +==== Task: Add an Element Child + +Use method +{document#add_element}[../../../../REXML/Document.html#method-i-add_element] +to add an element to the document: + + d = REXML::Document.new('') + d.add_element(REXML::Element.new('root')) + d.children # => [] + +==== Task: Add a Non-Element Child + +Use method +{document#add}[../../../../REXML/Document.html#method-i-add] +to add a non-element to the document: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.add(REXML::Text.new('foo')) + d.children # => [ ... , "foo"] + +=== Writing + +==== Task: Write to $stdout + +Use method +{document#write}[../../../../REXML/Document.html#method-i-write] +to write the document to $stdout: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.write + +Output: + + textmore + +==== Task: Write to IO Stream + +Use method +{document#write}[../../../../REXML/Document.html#method-i-write] +to write the document to $stdout: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + File.open('t.xml', 'w') do |file| + d.write(file) + end + p File.read('t.xml') + +Output: + + "textmore" + +==== Task: Write with No Indentation + +Use method +{document#write}[../../../../REXML/Document.html#method-i-write] +to write the document with no indentation: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.write({indent: 0}) + +Output: + + + + + + + + + +==== Task: Write with Specified Indentation + +Use method +{document#write}[../../../../REXML/Document.html#method-i-write] +to write the document with a specified indentation: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.write({indent: 2}) + +Output: + + + + + + + + + +=== Querying + +==== Task: Get the Document + +Use method +{document#document}[../../../../REXML/Document.html#method-i-document] +to get the document (+self+); overrides Element#document: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.document == d # => true + +==== Task: Get the Encoding + +Use method +{document#document}[../../../../REXML/Document.html#method-i-document] +to get the document (+self+); overrides Element#document: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.encoding # => "UTF-8" + +==== Task: Get the Node Type + +Use method +{document#node_type}[../../../../REXML/Document.html#method-i-node_type] +to get the node type (+:document+); overrides Element#node_type: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.node_type # => :document + +==== Task: Get the Root Element + +Use method +{document#root}[../../../../REXML/Document.html#method-i-root] +to get the root element: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root # => ... + +==== Task: Determine Whether Stand-Alone + +Use method +{document#stand_alone?}[../../../../REXML/Document.html#method-i-stand_alone-3F] +to get the stand-alone value: + + d = REXML::Document.new('') + d.stand_alone? # => "yes" + +==== Task: Get the Version + +Use method +{document#version}[../../../../REXML/Document.html#method-i-version] +to get the version: + + d = REXML::Document.new('') + d.version # => "2.0" diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/element.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/element.rdoc new file mode 100644 index 0000000..f229275 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/element.rdoc @@ -0,0 +1,602 @@ +== Class Element + +Class Element has methods from its superclasses and included modules; +see: + +- {Tasks for Parent}[parent_rdoc.html]. +- {Tasks for Child}[child_rdoc.html]. +- {Tasks for Node}[node_rdoc.html]. +- {Module Enumerable}[https://docs.ruby-lang.org/en/master/Enumerable.html]. + +:include: ../tocs/element_toc.rdoc + +=== New Element + +==== Task: Create a Default Element + +Use method +{Element::new}[../../../../REXML/Element.html#method-c-new] +with no arguments to create a default element: + + e = REXML::Element.new + e.name # => "UNDEFINED" + e.parent # => nil + e.context # => nil + +==== Task: Create a Named Element + +Use method +{Element::new}[../../../../REXML/Element.html#method-c-new] +with a string name argument +to create a named element: + + e = REXML::Element.new('foo') + e.name # => "foo" + e.parent # => nil + e.context # => nil + +==== Task: Create an Element with Name and Parent + +Use method +{Element::new}[../../../../REXML/Element.html#method-c-new] +with name and parent arguments +to create an element with name and parent: + + p = REXML::Parent.new + e = REXML::Element.new('foo', p) + e.name # => "foo" + e.parent # => #]> + e.context # => nil + +==== Task: Create an Element with Name, Parent, and Context + +Use method +{Element::new}[../../../../REXML/Element.html#method-c-new] +with name, parent, and context arguments +to create an element with name, parent, and context: + + p = REXML::Parent.new + e = REXML::Element.new('foo', p, {compress_whitespace: :all}) + e.name # => "foo" + e.parent # => #]> + e.context # => {:compress_whitespace=>:all} + +==== Task: Create a Shallow Clone + +Use method +{Element#clone}[../../../../REXML/Element.html#method-i-clone] +to create a shallow clone of an element, +copying only the name, attributes, and context: + + e0 = REXML::Element.new('foo', nil, {compress_whitespace: :all}) + e0.add_attribute(REXML::Attribute.new('bar', 'baz')) + e0.context = {compress_whitespace: :all} + e1 = e0.clone # => + e1.name # => "foo" + e1.context # => {:compress_whitespace=>:all} + +=== Attributes + +==== Task: Create and Add an Attribute + +Use method +{Element#add_attribute}[../../../../REXML/Element.html#method-i-add_attribute] +to create and add an attribute: + + e = REXML::Element.new + e.add_attribute('attr', 'value') # => "value" + e['attr'] # => "value" + e.add_attribute('attr', 'VALUE') # => "VALUE" + e['attr'] # => "VALUE" + +==== Task: Add an Existing Attribute + +Use method +{Element#add_attribute}[../../../../REXML/Element.html#method-i-add_attribute] +to add an existing attribute: + + e = REXML::Element.new + a = REXML::Attribute.new('attr', 'value') + e.add_attribute(a) + e['attr'] # => "value" + a = REXML::Attribute.new('attr', 'VALUE') + e.add_attribute(a) + e['attr'] # => "VALUE" + +==== Task: Add Multiple Attributes from a Hash + +Use method +{Element#add_attributes}[../../../../REXML/Element.html#method-i-add_attributes] +to add multiple attributes from a hash: + + e = REXML::Element.new + h = {'foo' => 0, 'bar' => 1} + e.add_attributes(h) + e['foo'] # => "0" + e['bar'] # => "1" + +==== Task: Add Multiple Attributes from an Array + +Use method +{Element#add_attributes}[../../../../REXML/Element.html#method-i-add_attributes] +to add multiple attributes from an array: + + e = REXML::Element.new + a = [['foo', 0], ['bar', 1]] + e.add_attributes(a) + e['foo'] # => "0" + e['bar'] # => "1" + +==== Task: Retrieve the Value for an Attribute Name + +Use method +{Element#[]}[../../../../REXML/Element.html#method-i-5B-5D] +to retrieve the value for an attribute name: + + e = REXML::Element.new + e.add_attribute('attr', 'value') # => "value" + e['attr'] # => "value" + +==== Task: Retrieve the Attribute Value for a Name and Namespace + +Use method +{Element#attribute}[../../../../REXML/Element.html#method-i-attribute] +to retrieve the value for an attribute name: + + xml_string = "" + d = REXML::Document.new(xml_string) + e = d.root + e.attribute("x") # => x='x' + e.attribute("x", "a") # => a:x='a:x' + +==== Task: Delete an Attribute + +Use method +{Element#delete_attribute}[../../../../REXML/Element.html#method-i-delete_attribute] +to remove an attribute: + + e = REXML::Element.new('foo') + e.add_attribute('bar', 'baz') + e.delete_attribute('bar') + e.delete_attribute('bar') + e['bar'] # => nil + +==== Task: Determine Whether the Element Has Attributes + +Use method +{Element#has_attributes?}[../../../../REXML/Element.html#method-i-has_attributes-3F] +to determine whether the element has attributes: + + e = REXML::Element.new('foo') + e.has_attributes? # => false + e.add_attribute('bar', 'baz') + e.has_attributes? # => true + +=== Children + +Element Children + +==== Task: Create and Add an Element + +Use method +{Element#add_element}[../../../../REXML/Element.html#method-i-add_element] +to create a new element and add it to this element: + + e0 = REXML::Element.new('foo') + e0.add_element('bar') + e0.children # => [] + +==== Task: Add an Existing Element + +Use method +{Element#add_element}[../../../../REXML/Element.html#method-i-add_element] +to add an element to this element: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e0.add_element(e1) + e0.children # => [] + +==== Task: Create and Add an Element with Attributes + +Use method +{Element#add_element}[../../../../REXML/Element.html#method-i-add_element] +to create a new element with attributes, and add it to this element: + + e0 = REXML::Element.new('foo') + e0.add_element('bar', {'name' => 'value'}) + e0.children # => [] + +==== Task: Add an Existing Element with Added Attributes + +Use method +{Element#add_element}[../../../../REXML/Element.html#method-i-add_element] +to add an element to this element: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e0.add_element(e1, {'name' => 'value'}) + e0.children # => [] + +==== Task: Delete a Specified Element + +Use method +{Element#delete_element}[../../../../REXML/Element.html#method-i-delete_element] +to remove a specified element from this element: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e0.add_element(e1) + e0.children # => [] + e0.delete_element(e1) + e0.children # => [] + +==== Task: Delete an Element by Index + +Use method +{Element#delete_element}[../../../../REXML/Element.html#method-i-delete_element] +to remove an element from this element by index: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e0.add_element(e1) + e0.children # => [] + e0.delete_element(1) + e0.children # => [] + +==== Task: Delete an Element by XPath + +Use method +{Element#delete_element}[../../../../REXML/Element.html#method-i-delete_element] +to remove an element from this element by XPath: + + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + e0.add_element(e1) + e0.children # => [] + e0.delete_element('//bar/') + e0.children # => [] + +==== Task: Determine Whether Element Children + +Use method +{Element#has_elements?}[../../../../REXML/Element.html#method-i-has_elements-3F] +to determine whether the element has element children: + + e0 = REXML::Element.new('foo') + e0.has_elements? # => false + e0.add_element(REXML::Element.new('bar')) + e0.has_elements? # => true + +==== Task: Get Element Descendants by XPath + +Use method +{Element#get_elements}[../../../../REXML/Element.html#method-i-get_elements] +to fetch all element descendant children by XPath: + + xml_string = <<-EOT + + + + + + EOT + d = REXML::Document.new(xml_string) + d.root.get_elements('//a') # => [ ... , ] + +==== Task: Get Next Element Sibling + +Use method +{Element#next_element}[../../../../REXML/Element.html#method-i-next_element] +to retrieve the next element sibling: + + d = REXML::Document.new 'text' + d.root.elements['b'].next_element #-> + d.root.elements['c'].next_element #-> nil + +==== Task: Get Previous Element Sibling + +Use method +{Element#previous_element}[../../../../REXML/Element.html#method-i-previous_element] +to retrieve the previous element sibling: + + d = REXML::Document.new 'text' + d.root.elements['c'].previous_element #-> + d.root.elements['b'].previous_element #-> nil + +Text Children + +==== Task: Add a Text Node + +Use method +{Element#add_text}[../../../../REXML/Element.html#method-i-add_text] +to add a text node to the element: + + d = REXML::Document.new('foobar') + e = d.root + e.add_text(REXML::Text.new('baz')) + e.to_a # => ["foo", , "bar", "baz"] + e.add_text(REXML::Text.new('baz')) + e.to_a # => ["foo", , "bar", "baz", "baz"] + +==== Task: Replace the First Text Node + +Use method +{Element#text=}[../../../../REXML/Element.html#method-i-text-3D] +to replace the first text node in the element: + + d = REXML::Document.new('textmore') + e = d.root + e.to_a # => [, "text", , "more", ] + e.text = 'oops' + e.to_a # => [, "oops", , "more", ] + +==== Task: Remove the First Text Node + +Use method +{Element#text=}[../../../../REXML/Element.html#method-i-text-3D] +to remove the first text node in the element: + + d = REXML::Document.new('textmore') + e = d.root + e.to_a # => [, "text", , "more", ] + e.text = nil + e.to_a # => [, , "more", ] + +==== Task: Retrieve the First Text Node + +Use method +{Element#get_text}[../../../../REXML/Element.html#method-i-get_text] +to retrieve the first text node in the element: + + d = REXML::Document.new('textmore') + e = d.root + e.to_a # => [, "text", , "more", ] + e.get_text # => "text" + +==== Task: Retrieve a Specific Text Node + +Use method +{Element#get_text}[../../../../REXML/Element.html#method-i-get_text] +to retrieve the first text node in a specified element: + + d = REXML::Document.new "some text this is bold! more text" + e = d.root + e.get_text('//root') # => "some text " + e.get_text('//b') # => "this is bold!" + +==== Task: Determine Whether the Element has Text Nodes + +Use method +{Element#has_text?}[../../../../REXML/Element.html#method-i-has_text-3F] +to determine whethe the element has text: + + e = REXML::Element.new('foo') + e.has_text? # => false + e.add_text('bar') + e.has_text? # => true + +Other Children + +==== Task: Get the Child at a Given Index + +Use method +{Element#[]}[../../../../REXML/Element.html#method-i-5B-5D] +to retrieve the child at a given index: + + d = REXML::Document.new '>textmore' + e = d.root + e[0] # => + e[1] # => "text" + e[2] # => + +==== Task: Get All CDATA Children + +Use method +{Element#cdatas}[../../../../REXML/Element.html#method-i-cdatas] +to retrieve all CDATA children: + + xml_string = <<-EOT + + + + + EOT + d = REXML::Document.new(xml_string) + d.root.cdatas # => ["foo", "bar"] + +==== Task: Get All Comment Children + +Use method +{Element#comments}[../../../../REXML/Element.html#method-i-comments] +to retrieve all comment children: + + xml_string = <<-EOT + + + + + EOT + d = REXML::Document.new(xml_string) + d.root.comments.map {|comment| comment.to_s } # => ["foo", "bar"] + +==== Task: Get All Processing Instruction Children + +Use method +{Element#instructions}[../../../../REXML/Element.html#method-i-instructions] +to retrieve all processing instruction children: + + xml_string = <<-EOT + + + + + EOT + d = REXML::Document.new(xml_string) + instructions = d.root.instructions.map {|instruction| instruction.to_s } + instructions # => ["", ""] + +==== Task: Get All Text Children + +Use method +{Element#texts}[../../../../REXML/Element.html#method-i-texts] +to retrieve all text children: + + xml_string = 'textmore' + d = REXML::Document.new(xml_string) + d.root.texts # => ["text", "more"] + +=== Namespaces + +==== Task: Add a Namespace + +Use method +{Element#add_namespace}[../../../../REXML/Element.html#method-i-add_namespace] +to add a namespace to the element: + + e = REXML::Element.new('foo') + e.add_namespace('bar') + e.namespaces # => {"xmlns"=>"bar"} + +==== Task: Delete the Default Namespace + +Use method +{Element#delete_namespace}[../../../../REXML/Element.html#method-i-delete_namespace] +to remove the default namespace from the element: + + d = REXML::Document.new "" + d.to_s # => "" + d.root.delete_namespace # => + d.to_s # => "" + +==== Task: Delete a Specific Namespace + +Use method +{Element#delete_namespace}[../../../../REXML/Element.html#method-i-delete_namespace] +to remove a specific namespace from the element: + + d = REXML::Document.new "" + d.to_s # => "" + d.root.delete_namespace # => + d.to_s # => "" + d.root.delete_namespace('foo') + d.to_s # => "" + +==== Task: Get a Namespace URI + +Use method +{Element#namespace}[../../../../REXML/Element.html#method-i-namespace] +to retrieve a speficic namespace URI for the element: + + xml_string = <<-EOT + + + + + + + EOT + d = REXML::Document.new(xml_string) + b = d.elements['//b'] + b.namespace # => "1" + b.namespace('y') # => "2" + +==== Task: Retrieve Namespaces + +Use method +{Element#namespaces}[../../../../REXML/Element.html#method-i-namespaces] +to retrieve all namespaces for the element: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.attributes.namespaces # => {"xmlns"=>"foo", "x"=>"bar", "y"=>"twee"} + +==== Task: Retrieve Namespace Prefixes + +Use method +{Element#prefixes}[../../../../REXML/Element.html#method-i-prefixes] +to retrieve all prefixes (namespace names) for the element: + + xml_string = <<-EOT + + + + + + + EOT + d = REXML::Document.new(xml_string, {compress_whitespace: :all}) + d.elements['//a'].prefixes # => ["x", "y"] + d.elements['//b'].prefixes # => ["x", "y"] + d.elements['//c'].prefixes # => ["x", "y", "z"] + +=== Iteration + +==== Task: Iterate Over Elements + +Use method +{Element#each_element}[../../../../REXML/Element.html#method-i-each_element] +to iterate over element children: + + d = REXML::Document.new 'bbd' + d.root.each_element {|e| p e } + +Output: + + ... + ... + ... + + +==== Task: Iterate Over Elements Having a Specified Attribute + +Use method +{Element#each_element_with_attribute}[../../../../REXML/Element.html#method-i-each_element_with_attribute] +to iterate over element children that have a specified attribute: + + d = REXML::Document.new '' + a = d.root + a.each_element_with_attribute('id') {|e| p e } + +Output: + + + + + +==== Task: Iterate Over Elements Having a Specified Attribute and Value + +Use method +{Element#each_element_with_attribute}[../../../../REXML/Element.html#method-i-each_element_with_attribute] +to iterate over element children that have a specified attribute and value: + + d = REXML::Document.new '' + a = d.root + a.each_element_with_attribute('id', '1') {|e| p e } + +Output: + + + + +==== Task: Iterate Over Elements Having Specified Text + +Use method +{Element#each_element_with_text}[../../../../REXML/Element.html#method-i-each_element_with_text] +to iterate over element children that have specified text: + + +=== Context + +#whitespace +#ignore_whitespace_nodes +#raw + +=== Other Getters + +#document +#root +#root_node +#node_type +#xpath +#inspect diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/node.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/node.rdoc new file mode 100644 index 0000000..d5d2e12 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/node.rdoc @@ -0,0 +1,97 @@ +== Module Node + +:include: ../tocs/node_toc.rdoc + +=== Siblings + +==== Task: Find Previous Sibling + +Use method +{Node.previous_sibling_node}[../../../../REXML/Node.html#method-i-previous_sibling] +to retrieve the previous sibling: + + d = REXML::Document.new('') + b = d.root[1] # => + b.previous_sibling_node # => + +==== Task: Find Next Sibling + +Use method +{Node.next_sibling_node}[../../../../REXML/Node.html#method-i-next_sibling] +to retrieve the next sibling: + + d = REXML::Document.new('') + b = d.root[1] # => + b.next_sibling_node # => + +=== Position + +==== Task: Find Own Index Among Siblings + +Use method +{Node.index_in_parent}[../../../../REXML/Node.html#method-i-index_in_parent] +to retrieve the 1-based index of this node among its siblings: + + d = REXML::Document.new('') + b = d.root[1] # => + b.index_in_parent # => 2 + +=== Recursive Traversal + +==== Task: Traverse Each Recursively + +Use method +{Node.each_recursive}[../../../../REXML/Node.html#method-i-each_recursive] +to traverse a tree of nodes recursively: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.each_recursive {|node| p node } + +Output: + + ... + ... + + ... + + +=== Recursive Search + +==== Task: Traverse Each Recursively + +Use method +{Node.find_first_recursive}[../../../../REXML/Node.html#method-i-find_first_recursive] +to search a tree of nodes recursively: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.find_first_recursive {|node| node.name == 'c' } # => + +=== Representation + +==== Task: Represent a String + +Use method {Node.to_s}[../../../../REXML/Node.html#method-i-to_s] +to represent the node as a string: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.to_s # => "" + +=== Parent? + +==== Task: Determine Whether the Node is a Parent + +Use method {Node.parent?}[../../../../REXML/Node.html#method-i-parent-3F] +to determine whether the node is a parent; +class Text derives from Node: + + d = REXML::Document.new('textmore') + t = d.root[1] # => "text" + t.parent? # => false + +Class Parent also derives from Node, but overrides this method: + + p = REXML::Parent.new + p.parent? # => true diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/parent.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/parent.rdoc new file mode 100644 index 0000000..54f1dbe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/rdoc/parent.rdoc @@ -0,0 +1,267 @@ +== Class Parent + +Class Parent has methods from its superclasses and included modules; +see: + +- {Tasks for Child}[child_rdoc.html]. +- {Tasks for Node}[node_rdoc.html]. +- {Module Enumerable}[https://docs.ruby-lang.org/en/master/Enumerable.html]. + +:include: ../tocs/parent_toc.rdoc + +=== Queries + +==== Task: Get the Count of Children + +Use method {Parent#size}[../../../../REXML/Parent.html#method-i-size] +(or its alias +length+) to get the count of the parent's children: + + p = REXML::Parent.new + p.size # => 0 + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.size # => 3 + +==== Task: Get the Child at a Given Index + +Use method {Parent#[]}[../../../../REXML/Parent.html#method-i-5B-5D] +to get the child at a given index: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root[1] # => + d.root[-1] # => + d.root[50] # => nil + +==== Task: Get the Index of a Given Child + +Use method {Parent#index}[../../../../REXML/Parent.html#method-i-index] +to get the index (0-based offset) of a child: + + d = REXML::Document.new('') + root = d.root + e0 = REXML::Element.new('foo') + e1 = REXML::Element.new('bar') + root.add(e0) # => + root.add(e1) # => + root.add(e0) # => + root.add(e1) # => + root.index(e0) # => 0 + root.index(e1) # => 1 + +==== Task: Get the Children + +Use method {Parent#children}[../../../../REXML/Parent.html#method-i-children] +(or its alias +to_a+) to get the parent's children: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + +==== Task: Determine Whether the Node is a Parent + +Use method {Parent#parent?}[../../../../REXML/Parent.html#method-i-parent-3F] +to determine whether the node is a parent; +class Text derives from Node: + + d = REXML::Document.new('textmore') + t = d.root[1] # => "text" + t.parent? # => false + +Class Parent also derives from Node, but overrides this method: + + p = REXML::Parent.new + p.parent? # => true + +=== Additions + +==== Task: Add a Child at the Beginning + +Use method {Parent#unshift}[../../../../REXML/Parent.html#method-i-unshift] +to add a child as at the beginning of the children: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + d.root.unshift REXML::Element.new('d') + d.root.children # => [, , , ] + +==== Task: Add a Child at the End + +Use method {Parent#<<}[../../../../REXML/Parent.html#method-i-3C-3C] +(or an alias +push+ or +add+) to add a child as at the end of the children: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + d.root << REXML::Element.new('d') + d.root.children # => [, , , ] + +==== Task: Replace a Child with Another Child + +Use method {Parent#replace}[../../../../REXML/Parent.html#method-i-replace] + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + b = d.root[1] # => + d.replace_child(b, REXML::Element.new('d')) + d.root.children # => [, ] + +==== Task: Replace Multiple Children with Another Child + +Use method {Parent#[]=}[../../../../REXML/Parent.html#method-i-parent-5B-5D-3D] +to replace multiple consecutive children with another child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , , ] + d.root[1, 2] = REXML::Element.new('x') + d.root.children # => [, , ] + d.root[1, 5] = REXML::Element.new('x') + d.root.children # => [, ] # BUG? + +==== Task: Insert Child Before a Given Child + +Use method {Parent#insert_before}[../../../../REXML/Parent.html#method-i-insert_before] +to insert a child immediately before a given child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + b = d.root[1] # => + x = REXML::Element.new('x') + d.root.insert_before(b, x) + d.root.children # => [, , , ] + +==== Task: Insert Child After a Given Child + +Use method {Parent#insert_after}[../../../../REXML/Parent.html#method-i-insert_after] +to insert a child immediately after a given child: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + b = d.root[1] # => + x = REXML::Element.new('x') + d.root.insert_after(b, x) + d.root.children # => [, , , ] + +=== Deletions + +==== Task: Remove a Given Child + +Use method {Parent#delete}[../../../../REXML/Parent.html#method-i-delete] +to remove all occurrences of a given child: + + d = REXML::Document.new('') + a = REXML::Element.new('a') + b = REXML::Element.new('b') + d.root.add(a) + d.root.add(b) + d.root.add(a) + d.root.add(b) + d.root.children # => [, , , ] + d.root.delete(b) + d.root.children # => [, ] + +==== Task: Remove the Child at a Specified Offset + +Use method {Parent#delete_at}[../../../../REXML/Parent.html#method-i-delete_at] +to remove the child at a specified offset: + + d = REXML::Document.new('') + a = REXML::Element.new('a') + b = REXML::Element.new('b') + d.root.add(a) + d.root.add(b) + d.root.add(a) + d.root.add(b) + d.root.children # => [, , , ] + d.root.delete_at(2) + d.root.children # => [, , ] + +==== Task: Remove Children That Meet Specified Criteria + +Use method {Parent#delete_if}[../../../../REXML/Parent.html#method-i-delete_if] +to remove children that meet criteria specified in the given block: + + d = REXML::Document.new('') + d.root.add(REXML::Element.new('x')) + d.root.add(REXML::Element.new('xx')) + d.root.add(REXML::Element.new('xxx')) + d.root.add(REXML::Element.new('xxxx')) + d.root.children # => [, , , ] + d.root.delete_if {|child| child.name.size.odd? } + d.root.children # => [, ] + +=== Iterations + +==== Task: Iterate Over Children + +Use method {Parent#each_child}[../../../../REXML/Parent.html#method-i-each_child] +(or its alias +each+) to iterate over all children: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + d.root.each_child {|child| p child } + +Output: + + + + + +==== Task: Iterate Over Child Indexes + +Use method {Parent#each_index}[../../../../REXML/Parent.html#method-i-each_index] +to iterate over all child indexes: + + xml_string = '' + d = REXML::Document.new(xml_string) + d.root.children # => [, , ] + d.root.each_index {|child| p child } + +Output: + + 0 + 1 + 2 + +=== Clones + +==== Task: Clone Deeply + +Use method {Parent#deep_clone}[../../../../REXML/Parent.html#method-i-deep_clone] +to clone deeply; that is, to clone every nested node that is a Parent object: + + xml_string = <<-EOT + + + + Everyday Italian + Giada De Laurentiis + 2005 + 30.00 + + + Harry Potter + J K. Rowling + 2005 + 29.99 + + + Learning XML + Erik T. Ray + 2003 + 39.95 + + + EOT + d = REXML::Document.new(xml_string) + root = d.root + shallow = root.clone + deep = root.deep_clone + shallow.to_s.size # => 12 + deep.to_s.size # => 590 diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/child_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/child_toc.rdoc new file mode 100644 index 0000000..a2083a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/child_toc.rdoc @@ -0,0 +1,12 @@ +Tasks on this page: + +- {Relationships}[#label-Relationships] + - {Task: Set the Parent}[#label-Task-3A+Set+the+Parent] + - {Task: Insert Previous Sibling}[#label-Task-3A+Insert+Previous+Sibling] + - {Task: Insert Next Sibling}[#label-Task-3A+Insert+Next+Sibling] +- {Removal or Replacement}[#label-Removal+or+Replacement] + - {Task: Remove Child from Parent}[#label-Task-3A+Remove+Child+from+Parent] + - {Task: Replace Child}[#label-Task-3A+Replace+Child] +- {Document}[#label-Document] + - {Task: Get the Document}[#label-Task-3A+Get+the+Document] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/document_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/document_toc.rdoc new file mode 100644 index 0000000..5db055f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/document_toc.rdoc @@ -0,0 +1,30 @@ +Tasks on this page: + +- {New Document}[#label-New+Document] + - {Task: Create an Empty Document}[#label-Task-3A+Create+an+Empty+Document] + - {Task: Parse a String into a New Document}[#label-Task-3A+Parse+a+String+into+a+New+Document] + - {Task: Parse an IO Stream into a New Document}[#label-Task-3A+Parse+an+IO+Stream+into+a+New+Document] + - {Task: Create a Document from an Existing Document}[#label-Task-3A+Create+a+Document+from+an+Existing+Document] + - {Task: Clone a Document}[#label-Task-3A+Clone+a+Document] +- {Document Type}[#label-Document+Type] + - {Task: Get the Document Type}[#label-Task-3A+Get+the+Document+Type] + - {Task: Set the Document Type}[#label-Task-3A+Set+the+Document+Type] +- {XML Declaration}[#label-XML+Declaration] + - {Task: Get the XML Declaration}[#label-Task-3A+Get+the+XML+Declaration] + - {Task: Set the XML Declaration}[#label-Task-3A+Set+the+XML+Declaration] +- {Children}[#label-Children] + - {Task: Add an Element Child}[#label-Task-3A+Add+an+Element+Child] + - {Task: Add a Non-Element Child}[#label-Task-3A+Add+a+Non-Element+Child] +- {Writing}[#label-Writing] + - {Task: Write to $stdout}[#label-Task-3A+Write+to+-24stdout] + - {Task: Write to IO Stream}[#label-Task-3A+Write+to+IO+Stream] + - {Task: Write with No Indentation}[#label-Task-3A+Write+with+No+Indentation] + - {Task: Write with Specified Indentation}[#label-Task-3A+Write+with+Specified+Indentation] +- {Querying}[#label-Querying] + - {Task: Get the Document}[#label-Task-3A+Get+the+Document] + - {Task: Get the Encoding}[#label-Task-3A+Get+the+Encoding] + - {Task: Get the Node Type}[#label-Task-3A+Get+the+Node+Type] + - {Task: Get the Root Element}[#label-Task-3A+Get+the+Root+Element] + - {Task: Determine Whether Stand-Alone}[#label-Task-3A+Determine+Whether+Stand-Alone] + - {Task: Get the Version}[#label-Task-3A+Get+the+Version] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/element_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/element_toc.rdoc new file mode 100644 index 0000000..60a504a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/element_toc.rdoc @@ -0,0 +1,55 @@ +Tasks on this page: + +- {New Element}[#label-New+Element] + - {Task: Create a Default Element}[#label-Task-3A+Create+a+Default+Element] + - {Task: Create a Named Element}[#label-Task-3A+Create+a+Named+Element] + - {Task: Create an Element with Name and Parent}[#label-Task-3A+Create+an+Element+with+Name+and+Parent] + - {Task: Create an Element with Name, Parent, and Context}[#label-Task-3A+Create+an+Element+with+Name-2C+Parent-2C+and+Context] + - {Task: Create a Shallow Clone}[#label-Task-3A+Create+a+Shallow+Clone] +- {Attributes}[#label-Attributes] + - {Task: Create and Add an Attribute}[#label-Task-3A+Create+and+Add+an+Attribute] + - {Task: Add an Existing Attribute}[#label-Task-3A+Add+an+Existing+Attribute] + - {Task: Add Multiple Attributes from a Hash}[#label-Task-3A+Add+Multiple+Attributes+from+a+Hash] + - {Task: Add Multiple Attributes from an Array}[#label-Task-3A+Add+Multiple+Attributes+from+an+Array] + - {Task: Retrieve the Value for an Attribute Name}[#label-Task-3A+Retrieve+the+Value+for+an+Attribute+Name] + - {Task: Retrieve the Attribute Value for a Name and Namespace}[#label-Task-3A+Retrieve+the+Attribute+Value+for+a+Name+and+Namespace] + - {Task: Delete an Attribute}[#label-Task-3A+Delete+an+Attribute] + - {Task: Determine Whether the Element Has Attributes}[#label-Task-3A+Determine+Whether+the+Element+Has+Attributes] +- {Children}[#label-Children] + - {Task: Create and Add an Element}[#label-Task-3A+Create+and+Add+an+Element] + - {Task: Add an Existing Element}[#label-Task-3A+Add+an+Existing+Element] + - {Task: Create and Add an Element with Attributes}[#label-Task-3A+Create+and+Add+an+Element+with+Attributes] + - {Task: Add an Existing Element with Added Attributes}[#label-Task-3A+Add+an+Existing+Element+with+Added+Attributes] + - {Task: Delete a Specified Element}[#label-Task-3A+Delete+a+Specified+Element] + - {Task: Delete an Element by Index}[#label-Task-3A+Delete+an+Element+by+Index] + - {Task: Delete an Element by XPath}[#label-Task-3A+Delete+an+Element+by+XPath] + - {Task: Determine Whether Element Children}[#label-Task-3A+Determine+Whether+Element+Children] + - {Task: Get Element Descendants by XPath}[#label-Task-3A+Get+Element+Descendants+by+XPath] + - {Task: Get Next Element Sibling}[#label-Task-3A+Get+Next+Element+Sibling] + - {Task: Get Previous Element Sibling}[#label-Task-3A+Get+Previous+Element+Sibling] + - {Task: Add a Text Node}[#label-Task-3A+Add+a+Text+Node] + - {Task: Replace the First Text Node}[#label-Task-3A+Replace+the+First+Text+Node] + - {Task: Remove the First Text Node}[#label-Task-3A+Remove+the+First+Text+Node] + - {Task: Retrieve the First Text Node}[#label-Task-3A+Retrieve+the+First+Text+Node] + - {Task: Retrieve a Specific Text Node}[#label-Task-3A+Retrieve+a+Specific+Text+Node] + - {Task: Determine Whether the Element has Text Nodes}[#label-Task-3A+Determine+Whether+the+Element+has+Text+Nodes] + - {Task: Get the Child at a Given Index}[#label-Task-3A+Get+the+Child+at+a+Given+Index] + - {Task: Get All CDATA Children}[#label-Task-3A+Get+All+CDATA+Children] + - {Task: Get All Comment Children}[#label-Task-3A+Get+All+Comment+Children] + - {Task: Get All Processing Instruction Children}[#label-Task-3A+Get+All+Processing+Instruction+Children] + - {Task: Get All Text Children}[#label-Task-3A+Get+All+Text+Children] +- {Namespaces}[#label-Namespaces] + - {Task: Add a Namespace}[#label-Task-3A+Add+a+Namespace] + - {Task: Delete the Default Namespace}[#label-Task-3A+Delete+the+Default+Namespace] + - {Task: Delete a Specific Namespace}[#label-Task-3A+Delete+a+Specific+Namespace] + - {Task: Get a Namespace URI}[#label-Task-3A+Get+a+Namespace+URI] + - {Task: Retrieve Namespaces}[#label-Task-3A+Retrieve+Namespaces] + - {Task: Retrieve Namespace Prefixes}[#label-Task-3A+Retrieve+Namespace+Prefixes] +- {Iteration}[#label-Iteration] + - {Task: Iterate Over Elements}[#label-Task-3A+Iterate+Over+Elements] + - {Task: Iterate Over Elements Having a Specified Attribute}[#label-Task-3A+Iterate+Over+Elements+Having+a+Specified+Attribute] + - {Task: Iterate Over Elements Having a Specified Attribute and Value}[#label-Task-3A+Iterate+Over+Elements+Having+a+Specified+Attribute+and+Value] + - {Task: Iterate Over Elements Having Specified Text}[#label-Task-3A+Iterate+Over+Elements+Having+Specified+Text] +- {Context}[#label-Context] +- {Other Getters}[#label-Other+Getters] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/master_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/master_toc.rdoc new file mode 100644 index 0000000..0214f6b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/master_toc.rdoc @@ -0,0 +1,135 @@ +== Tasks + +=== {Child}[../../tasks/rdoc/child_rdoc.html] +- {Relationships}[../../tasks/rdoc/child_rdoc.html#label-Relationships] + - {Task: Set the Parent}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Set+the+Parent] + - {Task: Insert Previous Sibling}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Insert+Previous+Sibling] + - {Task: Insert Next Sibling}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Insert+Next+Sibling] +- {Removal or Replacement}[../../tasks/rdoc/child_rdoc.html#label-Removal+or+Replacement] + - {Task: Remove Child from Parent}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Remove+Child+from+Parent] + - {Task: Replace Child}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Replace+Child] +- {Document}[../../tasks/rdoc/child_rdoc.html#label-Document] + - {Task: Get the Document}[../../tasks/rdoc/child_rdoc.html#label-Task-3A+Get+the+Document] + +=== {Document}[../../tasks/rdoc/document_rdoc.html] +- {New Document}[../../tasks/rdoc/document_rdoc.html#label-New+Document] + - {Task: Create an Empty Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Create+an+Empty+Document] + - {Task: Parse a String into a New Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Parse+a+String+into+a+New+Document] + - {Task: Parse an IO Stream into a New Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Parse+an+IO+Stream+into+a+New+Document] + - {Task: Create a Document from an Existing Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Create+a+Document+from+an+Existing+Document] + - {Task: Clone a Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Clone+a+Document] +- {Document Type}[../../tasks/rdoc/document_rdoc.html#label-Document+Type] + - {Task: Get the Document Type}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Document+Type] + - {Task: Set the Document Type}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Set+the+Document+Type] +- {XML Declaration}[../../tasks/rdoc/document_rdoc.html#label-XML+Declaration] + - {Task: Get the XML Declaration}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+XML+Declaration] + - {Task: Set the XML Declaration}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Set+the+XML+Declaration] +- {Children}[../../tasks/rdoc/document_rdoc.html#label-Children] + - {Task: Add an Element Child}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Add+an+Element+Child] + - {Task: Add a Non-Element Child}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Add+a+Non-Element+Child] +- {Writing}[../../tasks/rdoc/document_rdoc.html#label-Writing] + - {Task: Write to $stdout}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Write+to+-24stdout] + - {Task: Write to IO Stream}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Write+to+IO+Stream] + - {Task: Write with No Indentation}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Write+with+No+Indentation] + - {Task: Write with Specified Indentation}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Write+with+Specified+Indentation] +- {Querying}[../../tasks/rdoc/document_rdoc.html#label-Querying] + - {Task: Get the Document}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Document] + - {Task: Get the Encoding}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Encoding] + - {Task: Get the Node Type}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Node+Type] + - {Task: Get the Root Element}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Root+Element] + - {Task: Determine Whether Stand-Alone}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Determine+Whether+Stand-Alone] + - {Task: Get the Version}[../../tasks/rdoc/document_rdoc.html#label-Task-3A+Get+the+Version] + +=== {Element}[../../tasks/rdoc/element_rdoc.html] +- {New Element}[../../tasks/rdoc/element_rdoc.html#label-New+Element] + - {Task: Create a Default Element}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+a+Default+Element] + - {Task: Create a Named Element}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+a+Named+Element] + - {Task: Create an Element with Name and Parent}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+an+Element+with+Name+and+Parent] + - {Task: Create an Element with Name, Parent, and Context}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+an+Element+with+Name-2C+Parent-2C+and+Context] + - {Task: Create a Shallow Clone}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+a+Shallow+Clone] +- {Attributes}[../../tasks/rdoc/element_rdoc.html#label-Attributes] + - {Task: Create and Add an Attribute}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+and+Add+an+Attribute] + - {Task: Add an Existing Attribute}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+an+Existing+Attribute] + - {Task: Add Multiple Attributes from a Hash}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+Multiple+Attributes+from+a+Hash] + - {Task: Add Multiple Attributes from an Array}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+Multiple+Attributes+from+an+Array] + - {Task: Retrieve the Value for an Attribute Name}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+the+Value+for+an+Attribute+Name] + - {Task: Retrieve the Attribute Value for a Name and Namespace}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+the+Attribute+Value+for+a+Name+and+Namespace] + - {Task: Delete an Attribute}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+an+Attribute] + - {Task: Determine Whether the Element Has Attributes}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Determine+Whether+the+Element+Has+Attributes] +- {Children}[../../tasks/rdoc/element_rdoc.html#label-Children] + - {Task: Create and Add an Element}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+and+Add+an+Element] + - {Task: Add an Existing Element}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+an+Existing+Element] + - {Task: Create and Add an Element with Attributes}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Create+and+Add+an+Element+with+Attributes] + - {Task: Add an Existing Element with Added Attributes}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+an+Existing+Element+with+Added+Attributes] + - {Task: Delete a Specified Element}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+a+Specified+Element] + - {Task: Delete an Element by Index}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+an+Element+by+Index] + - {Task: Delete an Element by XPath}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+an+Element+by+XPath] + - {Task: Determine Whether Element Children}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Determine+Whether+Element+Children] + - {Task: Get Element Descendants by XPath}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+Element+Descendants+by+XPath] + - {Task: Get Next Element Sibling}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+Next+Element+Sibling] + - {Task: Get Previous Element Sibling}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+Previous+Element+Sibling] + - {Task: Add a Text Node}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+a+Text+Node] + - {Task: Replace the First Text Node}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Replace+the+First+Text+Node] + - {Task: Remove the First Text Node}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Remove+the+First+Text+Node] + - {Task: Retrieve the First Text Node}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+the+First+Text+Node] + - {Task: Retrieve a Specific Text Node}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+a+Specific+Text+Node] + - {Task: Determine Whether the Element has Text Nodes}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Determine+Whether+the+Element+has+Text+Nodes] + - {Task: Get the Child at a Given Index}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+the+Child+at+a+Given+Index] + - {Task: Get All CDATA Children}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+All+CDATA+Children] + - {Task: Get All Comment Children}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+All+Comment+Children] + - {Task: Get All Processing Instruction Children}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+All+Processing+Instruction+Children] + - {Task: Get All Text Children}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+All+Text+Children] +- {Namespaces}[../../tasks/rdoc/element_rdoc.html#label-Namespaces] + - {Task: Add a Namespace}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Add+a+Namespace] + - {Task: Delete the Default Namespace}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+the+Default+Namespace] + - {Task: Delete a Specific Namespace}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Delete+a+Specific+Namespace] + - {Task: Get a Namespace URI}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Get+a+Namespace+URI] + - {Task: Retrieve Namespaces}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+Namespaces] + - {Task: Retrieve Namespace Prefixes}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Retrieve+Namespace+Prefixes] +- {Iteration}[../../tasks/rdoc/element_rdoc.html#label-Iteration] + - {Task: Iterate Over Elements}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Iterate+Over+Elements] + - {Task: Iterate Over Elements Having a Specified Attribute}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Iterate+Over+Elements+Having+a+Specified+Attribute] + - {Task: Iterate Over Elements Having a Specified Attribute and Value}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Iterate+Over+Elements+Having+a+Specified+Attribute+and+Value] + - {Task: Iterate Over Elements Having Specified Text}[../../tasks/rdoc/element_rdoc.html#label-Task-3A+Iterate+Over+Elements+Having+Specified+Text] +- {Context}[../../tasks/rdoc/element_rdoc.html#label-Context] +- {Other Getters}[../../tasks/rdoc/element_rdoc.html#label-Other+Getters] + +=== {Node}[../../tasks/rdoc/node_rdoc.html] +- {Siblings}[../../tasks/rdoc/node_rdoc.html#label-Siblings] + - {Task: Find Previous Sibling}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Find+Previous+Sibling] + - {Task: Find Next Sibling}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Find+Next+Sibling] +- {Position}[../../tasks/rdoc/node_rdoc.html#label-Position] + - {Task: Find Own Index Among Siblings}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Find+Own+Index+Among+Siblings] +- {Recursive Traversal}[../../tasks/rdoc/node_rdoc.html#label-Recursive+Traversal] + - {Task: Traverse Each Recursively}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Traverse+Each+Recursively] +- {Recursive Search}[../../tasks/rdoc/node_rdoc.html#label-Recursive+Search] + - {Task: Traverse Each Recursively}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Traverse+Each+Recursively] +- {Representation}[../../tasks/rdoc/node_rdoc.html#label-Representation] + - {Task: Represent a String}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Represent+a+String] +- {Parent?}[../../tasks/rdoc/node_rdoc.html#label-Parent-3F] + - {Task: Determine Whether the Node is a Parent}[../../tasks/rdoc/node_rdoc.html#label-Task-3A+Determine+Whether+the+Node+is+a+Parent] + +=== {Parent}[../../tasks/rdoc/parent_rdoc.html] +- {Queries}[../../tasks/rdoc/parent_rdoc.html#label-Queries] + - {Task: Get the Count of Children}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Get+the+Count+of+Children] + - {Task: Get the Child at a Given Index}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Get+the+Child+at+a+Given+Index] + - {Task: Get the Index of a Given Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Get+the+Index+of+a+Given+Child] + - {Task: Get the Children}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Get+the+Children] + - {Task: Determine Whether the Node is a Parent}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Determine+Whether+the+Node+is+a+Parent] +- {Additions}[../../tasks/rdoc/parent_rdoc.html#label-Additions] + - {Task: Add a Child at the Beginning}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Add+a+Child+at+the+Beginning] + - {Task: Add a Child at the End}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Add+a+Child+at+the+End] + - {Task: Replace a Child with Another Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Replace+a+Child+with+Another+Child] + - {Task: Replace Multiple Children with Another Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Replace+Multiple+Children+with+Another+Child] + - {Task: Insert Child Before a Given Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Insert+Child+Before+a+Given+Child] + - {Task: Insert Child After a Given Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Insert+Child+After+a+Given+Child] +- {Deletions}[../../tasks/rdoc/parent_rdoc.html#label-Deletions] + - {Task: Remove a Given Child}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Remove+a+Given+Child] + - {Task: Remove the Child at a Specified Offset}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Remove+the+Child+at+a+Specified+Offset] + - {Task: Remove Children That Meet Specified Criteria}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Remove+Children+That+Meet+Specified+Criteria] +- {Iterations}[../../tasks/rdoc/parent_rdoc.html#label-Iterations] + - {Task: Iterate Over Children}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Iterate+Over+Children] + - {Task: Iterate Over Child Indexes}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Iterate+Over+Child+Indexes] +- {Clones}[../../tasks/rdoc/parent_rdoc.html#label-Clones] + - {Task: Clone Deeply}[../../tasks/rdoc/parent_rdoc.html#label-Task-3A+Clone+Deeply] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/node_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/node_toc.rdoc new file mode 100644 index 0000000..d9114fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/node_toc.rdoc @@ -0,0 +1,16 @@ +Tasks on this page: + +- {Siblings}[#label-Siblings] + - {Task: Find Previous Sibling}[#label-Task-3A+Find+Previous+Sibling] + - {Task: Find Next Sibling}[#label-Task-3A+Find+Next+Sibling] +- {Position}[#label-Position] + - {Task: Find Own Index Among Siblings}[#label-Task-3A+Find+Own+Index+Among+Siblings] +- {Recursive Traversal}[#label-Recursive+Traversal] + - {Task: Traverse Each Recursively}[#label-Task-3A+Traverse+Each+Recursively] +- {Recursive Search}[#label-Recursive+Search] + - {Task: Traverse Each Recursively}[#label-Task-3A+Traverse+Each+Recursively] +- {Representation}[#label-Representation] + - {Task: Represent a String}[#label-Task-3A+Represent+a+String] +- {Parent?}[#label-Parent-3F] + - {Task: Determine Whether the Node is a Parent}[#label-Task-3A+Determine+Whether+the+Node+is+a+Parent] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/parent_toc.rdoc b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/parent_toc.rdoc new file mode 100644 index 0000000..68fc0b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/doc/rexml/tasks/tocs/parent_toc.rdoc @@ -0,0 +1,25 @@ +Tasks on this page: + +- {Queries}[#label-Queries] + - {Task: Get the Count of Children}[#label-Task-3A+Get+the+Count+of+Children] + - {Task: Get the Child at a Given Index}[#label-Task-3A+Get+the+Child+at+a+Given+Index] + - {Task: Get the Index of a Given Child}[#label-Task-3A+Get+the+Index+of+a+Given+Child] + - {Task: Get the Children}[#label-Task-3A+Get+the+Children] + - {Task: Determine Whether the Node is a Parent}[#label-Task-3A+Determine+Whether+the+Node+is+a+Parent] +- {Additions}[#label-Additions] + - {Task: Add a Child at the Beginning}[#label-Task-3A+Add+a+Child+at+the+Beginning] + - {Task: Add a Child at the End}[#label-Task-3A+Add+a+Child+at+the+End] + - {Task: Replace a Child with Another Child}[#label-Task-3A+Replace+a+Child+with+Another+Child] + - {Task: Replace Multiple Children with Another Child}[#label-Task-3A+Replace+Multiple+Children+with+Another+Child] + - {Task: Insert Child Before a Given Child}[#label-Task-3A+Insert+Child+Before+a+Given+Child] + - {Task: Insert Child After a Given Child}[#label-Task-3A+Insert+Child+After+a+Given+Child] +- {Deletions}[#label-Deletions] + - {Task: Remove a Given Child}[#label-Task-3A+Remove+a+Given+Child] + - {Task: Remove the Child at a Specified Offset}[#label-Task-3A+Remove+the+Child+at+a+Specified+Offset] + - {Task: Remove Children That Meet Specified Criteria}[#label-Task-3A+Remove+Children+That+Meet+Specified+Criteria] +- {Iterations}[#label-Iterations] + - {Task: Iterate Over Children}[#label-Task-3A+Iterate+Over+Children] + - {Task: Iterate Over Child Indexes}[#label-Task-3A+Iterate+Over+Child+Indexes] +- {Clones}[#label-Clones] + - {Task: Clone Deeply}[#label-Task-3A+Clone+Deeply] + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml.rb new file mode 100644 index 0000000..eee246e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require_relative "rexml/document" diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attlistdecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attlistdecl.rb new file mode 100644 index 0000000..44a91d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attlistdecl.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: false +#vim:ts=2 sw=2 noexpandtab: +require_relative 'child' +require_relative 'source' + +module REXML + # This class needs: + # * Documentation + # * Work! Not all types of attlists are intelligently parsed, so we just + # spew back out what we get in. This works, but it would be better if + # we formatted the output ourselves. + # + # AttlistDecls provide *just* enough support to allow namespace + # declarations. If you need some sort of generalized support, or have an + # interesting idea about how to map the hideous, terrible design of DTD + # AttlistDecls onto an intuitive Ruby interface, let me know. I'm desperate + # for anything to make DTDs more palateable. + class AttlistDecl < Child + include Enumerable + + # What is this? Got me. + attr_reader :element_name + + # Create an AttlistDecl, pulling the information from a Source. Notice + # that this isn't very convenient; to create an AttlistDecl, you basically + # have to format it yourself, and then have the initializer parse it. + # Sorry, but for the foreseeable future, DTD support in REXML is pretty + # weak on convenience. Have I mentioned how much I hate DTDs? + def initialize(source) + super() + if (source.kind_of? Array) + @element_name, @pairs, @contents = *source + end + end + + # Access the attlist attribute/value pairs. + # value = attlist_decl[ attribute_name ] + def [](key) + @pairs[key] + end + + # Whether an attlist declaration includes the given attribute definition + # if attlist_decl.include? "xmlns:foobar" + def include?(key) + @pairs.keys.include? key + end + + # Iterate over the key/value pairs: + # attlist_decl.each { |attribute_name, attribute_value| ... } + def each(&block) + @pairs.each(&block) + end + + # Write out exactly what we got in. + def write out, indent=-1 + out << @contents + end + + def node_type + :attlistdecl + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attribute.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attribute.rb new file mode 100644 index 0000000..8933a01 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/attribute.rb @@ -0,0 +1,205 @@ +# frozen_string_literal: false +require_relative "namespace" +require_relative 'text' + +module REXML + # Defines an Element Attribute; IE, a attribute=value pair, as in: + # . Attributes can be in their own + # namespaces. General users of REXML will not interact with the + # Attribute class much. + class Attribute + include Node + include Namespace + + # The element to which this attribute belongs + attr_reader :element + # The normalized value of this attribute. That is, the attribute with + # entities intact. + attr_writer :normalized + PATTERN = /\s*(#{NAME_STR})\s*=\s*(["'])(.*?)\2/um + + NEEDS_A_SECOND_CHECK = /(<|&((#{Entity::NAME});|(#0*((?:\d+)|(?:x[a-fA-F0-9]+)));)?)/um + + # Constructor. + # FIXME: The parser doesn't catch illegal characters in attributes + # + # first:: + # Either: an Attribute, which this new attribute will become a + # clone of; or a String, which is the name of this attribute + # second:: + # If +first+ is an Attribute, then this may be an Element, or nil. + # If nil, then the Element parent of this attribute is the parent + # of the +first+ Attribute. If the first argument is a String, + # then this must also be a String, and is the content of the attribute. + # If this is the content, it must be fully normalized (contain no + # illegal characters). + # parent:: + # Ignored unless +first+ is a String; otherwise, may be the Element + # parent of this attribute, or nil. + # + # + # Attribute.new( attribute_to_clone ) + # Attribute.new( attribute_to_clone, parent_element ) + # Attribute.new( "attr", "attr_value" ) + # Attribute.new( "attr", "attr_value", parent_element ) + def initialize( first, second=nil, parent=nil ) + @normalized = @unnormalized = @element = nil + if first.kind_of? Attribute + self.name = first.expanded_name + @unnormalized = first.value + if second.kind_of? Element + @element = second + else + @element = first.element + end + elsif first.kind_of? String + @element = parent + self.name = first + @normalized = second.to_s + else + raise "illegal argument #{first.class.name} to Attribute constructor" + end + end + + # Returns the namespace of the attribute. + # + # e = Element.new( "elns:myelement" ) + # e.add_attribute( "nsa:a", "aval" ) + # e.add_attribute( "b", "bval" ) + # e.attributes.get_attribute( "a" ).prefix # -> "nsa" + # e.attributes.get_attribute( "b" ).prefix # -> "" + # a = Attribute.new( "x", "y" ) + # a.prefix # -> "" + def prefix + super + end + + # Returns the namespace URL, if defined, or nil otherwise + # + # e = Element.new("el") + # e.add_namespace("ns", "http://url") + # e.add_attribute("ns:a", "b") + # e.add_attribute("nsx:a", "c") + # e.attribute("ns:a").namespace # => "http://url" + # e.attribute("nsx:a").namespace # => nil + # + # This method always returns "" for no namespace attribute. Because + # the default namespace doesn't apply to attribute names. + # + # From https://www.w3.org/TR/xml-names/#uniqAttrs + # + # > the default namespace does not apply to attribute names + # + # e = REXML::Element.new("el") + # e.add_namespace("", "http://example.com/") + # e.namespace # => "http://example.com/" + # e.add_attribute("a", "b") + # e.attribute("a").namespace # => "" + def namespace arg=nil + arg = prefix if arg.nil? + if arg == "" + "" + else + @element.namespace(arg) + end + end + + # Returns true if other is an Attribute and has the same name and value, + # false otherwise. + def ==( other ) + other.kind_of?(Attribute) and other.name==name and other.value==value + end + + # Creates (and returns) a hash from both the name and value + def hash + name.hash + value.hash + end + + # Returns this attribute out as XML source, expanding the name + # + # a = Attribute.new( "x", "y" ) + # a.to_string # -> "x='y'" + # b = Attribute.new( "ns:x", "y" ) + # b.to_string # -> "ns:x='y'" + def to_string + if @element and @element.context and @element.context[:attribute_quote] == :quote + %Q^#@expanded_name="#{to_s().gsub(/"/, '"')}"^ + else + "#@expanded_name='#{to_s().gsub(/'/, ''')}'" + end + end + + def doctype + if @element + doc = @element.document + doc.doctype if doc + end + end + + # Returns the attribute value, with entities replaced + def to_s + return @normalized if @normalized + + @normalized = Text::normalize( @unnormalized, doctype ) + @unnormalized = nil + @normalized + end + + # Returns the UNNORMALIZED value of this attribute. That is, entities + # have been expanded to their values + def value + return @unnormalized if @unnormalized + @unnormalized = Text::unnormalize( @normalized, doctype ) + @normalized = nil + @unnormalized + end + + # Returns a copy of this attribute + def clone + Attribute.new self + end + + # Sets the element of which this object is an attribute. Normally, this + # is not directly called. + # + # Returns this attribute + def element=( element ) + @element = element + + if @normalized + Text.check( @normalized, NEEDS_A_SECOND_CHECK, doctype ) + end + + self + end + + # Removes this Attribute from the tree, and returns true if successful + # + # This method is usually not called directly. + def remove + @element.attributes.delete self.name unless @element.nil? + end + + # Writes this attribute (EG, puts 'key="value"' to the output) + def write( output, indent=-1 ) + output << to_string + end + + def node_type + :attribute + end + + def inspect + rv = "" + write( rv ) + rv + end + + def xpath + path = @element.xpath + path += "/@#{self.expanded_name}" + return path + end + end +end +#vim:ts=2 sw=2 noexpandtab: diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/cdata.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/cdata.rb new file mode 100644 index 0000000..997f5a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/cdata.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: false +require_relative "text" + +module REXML + class CData < Text + START = '' + ILLEGAL = /(\]\]>)/ + + # Constructor. CData is data between + # + # _Examples_ + # CData.new( source ) + # CData.new( "Here is some CDATA" ) + # CData.new( "Some unprocessed data", respect_whitespace_TF, parent_element ) + def initialize( first, whitespace=true, parent=nil ) + super( first, whitespace, parent, false, true, ILLEGAL ) + end + + # Make a copy of this object + # + # _Examples_ + # c = CData.new( "Some text" ) + # d = c.clone + # d.to_s # -> "Some text" + def clone + CData.new self + end + + # Returns the content of this CData object + # + # _Examples_ + # c = CData.new( "Some text" ) + # c.to_s # -> "Some text" + def to_s + @string + end + + def value + @string + end + + # == DEPRECATED + # See the rexml/formatters package + # + # Generates XML output of this object + # + # output:: + # Where to write the string. Defaults to $stdout + # indent:: + # The amount to indent this node by + # transitive:: + # Ignored + # ie_hack:: + # Ignored + # + # _Examples_ + # c = CData.new( " Some text " ) + # c.write( $stdout ) #-> + def write( output=$stdout, indent=-1, transitive=false, ie_hack=false ) + Kernel.warn( "#{self.class.name}.write is deprecated", uplevel: 1) + indent( output, indent ) + output << START + output << @string + output << STOP + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/child.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/child.rb new file mode 100644 index 0000000..cc6e9a4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/child.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: false +require_relative "node" + +module REXML + ## + # A Child object is something contained by a parent, and this class + # contains methods to support that. Most user code will not use this + # class directly. + class Child + include Node + attr_reader :parent # The Parent of this object + + # Constructor. Any inheritors of this class should call super to make + # sure this method is called. + # parent:: + # if supplied, the parent of this child will be set to the + # supplied value, and self will be added to the parent + def initialize( parent = nil ) + @parent = nil + # Declare @parent, but don't define it. The next line sets the + # parent. + parent.add( self ) if parent + end + + # Replaces this object with another object. Basically, calls + # Parent.replace_child + # + # Returns:: self + def replace_with( child ) + @parent.replace_child( self, child ) + self + end + + # Removes this child from the parent. + # + # Returns:: self + def remove + unless @parent.nil? + @parent.delete self + end + self + end + + # Sets the parent of this child to the supplied argument. + # + # other:: + # Must be a Parent object. If this object is the same object as the + # existing parent of this child, no action is taken. Otherwise, this + # child is removed from the current parent (if one exists), and is added + # to the new parent. + # Returns:: The parent added + def parent=( other ) + return @parent if @parent == other + @parent.delete self if defined? @parent and @parent + @parent = other + end + + alias :next_sibling :next_sibling_node + alias :previous_sibling :previous_sibling_node + + # Sets the next sibling of this child. This can be used to insert a child + # after some other child. + # a = Element.new("a") + # b = a.add_element("b") + # c = Element.new("c") + # b.next_sibling = c + # # => + def next_sibling=( other ) + parent.insert_after self, other + end + + # Sets the previous sibling of this child. This can be used to insert a + # child before some other child. + # a = Element.new("a") + # b = a.add_element("b") + # c = Element.new("c") + # b.previous_sibling = c + # # => + def previous_sibling=(other) + parent.insert_before self, other + end + + # Returns:: the document this child belongs to, or nil if this child + # belongs to no document + def document + return parent.document unless parent.nil? + nil + end + + # This doesn't yet handle encodings + def bytes + document.encoding + + to_s + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/comment.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/comment.rb new file mode 100644 index 0000000..52c58b4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/comment.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: false +require_relative "child" + +module REXML + ## + # Represents an XML comment; that is, text between \ + class Comment < Child + include Comparable + START = "" + + # The content text + + attr_accessor :string + + ## + # Constructor. The first argument can be one of three types: + # @param first If String, the contents of this comment are set to the + # argument. If Comment, the argument is duplicated. If + # Source, the argument is scanned for a comment. + # @param second If the first argument is a Source, this argument + # should be nil, not supplied, or a Parent to be set as the parent + # of this object + def initialize( first, second = nil ) + super(second) + if first.kind_of? String + @string = first + elsif first.kind_of? Comment + @string = first.string + end + end + + def clone + Comment.new self + end + + # == DEPRECATED + # See REXML::Formatters + # + # output:: + # Where to write the string + # indent:: + # An integer. If -1, no indenting will be used; otherwise, the + # indentation will be this number of spaces, and children will be + # indented an additional amount. + # transitive:: + # Ignored by this class. The contents of comments are never modified. + # ie_hack:: + # Needed for conformity to the child API, but not used by this class. + def write( output, indent=-1, transitive=false, ie_hack=false ) + Kernel.warn("Comment.write is deprecated. See REXML::Formatters", uplevel: 1) + indent( output, indent ) + output << START + output << @string + output << STOP + end + + alias :to_s :string + + ## + # Compares this Comment to another; the contents of the comment are used + # in the comparison. + def <=>(other) + other.to_s <=> @string + end + + ## + # Compares this Comment to another; the contents of the comment are used + # in the comparison. + def ==( other ) + other.kind_of? Comment and + (other <=> self) == 0 + end + + def node_type + :comment + end + end +end +#vim:ts=2 sw=2 noexpandtab: diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/doctype.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/doctype.rb new file mode 100644 index 0000000..f359048 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/doctype.rb @@ -0,0 +1,311 @@ +# frozen_string_literal: false +require_relative "parent" +require_relative "parseexception" +require_relative "namespace" +require_relative 'entity' +require_relative 'attlistdecl' +require_relative 'xmltokens' + +module REXML + class ReferenceWriter + def initialize(id_type, + public_id_literal, + system_literal, + context=nil) + @id_type = id_type + @public_id_literal = public_id_literal + @system_literal = system_literal + if context and context[:prologue_quote] == :apostrophe + @default_quote = "'" + else + @default_quote = "\"" + end + end + + def write(output) + output << " #{@id_type}" + if @public_id_literal + if @public_id_literal.include?("'") + quote = "\"" + else + quote = @default_quote + end + output << " #{quote}#{@public_id_literal}#{quote}" + end + if @system_literal + if @system_literal.include?("'") + quote = "\"" + elsif @system_literal.include?("\"") + quote = "'" + else + quote = @default_quote + end + output << " #{quote}#{@system_literal}#{quote}" + end + end + end + + # Represents an XML DOCTYPE declaration; that is, the contents of . DOCTYPES can be used to declare the DTD of a document, as well as + # being used to declare entities used in the document. + class DocType < Parent + include XMLTokens + START = "" + SYSTEM = "SYSTEM" + PUBLIC = "PUBLIC" + DEFAULT_ENTITIES = { + 'gt'=>EntityConst::GT, + 'lt'=>EntityConst::LT, + 'quot'=>EntityConst::QUOT, + "apos"=>EntityConst::APOS + } + + # name is the name of the doctype + # external_id is the referenced DTD, if given + attr_reader :name, :external_id, :entities, :namespaces + + # Constructor + # + # dt = DocType.new( 'foo', '-//I/Hate/External/IDs' ) + # # + # dt = DocType.new( doctype_to_clone ) + # # Incomplete. Shallow clone of doctype + # + # +Note+ that the constructor: + # + # Doctype.new( Source.new( "" ) ) + # + # is _deprecated_. Do not use it. It will probably disappear. + def initialize( first, parent=nil ) + @entities = DEFAULT_ENTITIES + @long_name = @uri = nil + if first.kind_of? String + super() + @name = first + @external_id = parent + elsif first.kind_of? DocType + super( parent ) + @name = first.name + @external_id = first.external_id + @long_name = first.instance_variable_get(:@long_name) + @uri = first.instance_variable_get(:@uri) + elsif first.kind_of? Array + super( parent ) + @name = first[0] + @external_id = first[1] + @long_name = first[2] + @uri = first[3] + elsif first.kind_of? Source + super( parent ) + parser = Parsers::BaseParser.new( first ) + event = parser.pull + if event[0] == :start_doctype + @name, @external_id, @long_name, @uri, = event[1..-1] + end + else + super() + end + end + + def node_type + :doctype + end + + def attributes_of element + rv = [] + each do |child| + child.each do |key,val| + rv << Attribute.new(key,val) + end if child.kind_of? AttlistDecl and child.element_name == element + end + rv + end + + def attribute_of element, attribute + att_decl = find do |child| + child.kind_of? AttlistDecl and + child.element_name == element and + child.include? attribute + end + return nil unless att_decl + att_decl[attribute] + end + + def clone + DocType.new self + end + + # output:: + # Where to write the string + # indent:: + # An integer. If -1, no indentation will be used; otherwise, the + # indentation will be this number of spaces, and children will be + # indented an additional amount. + # transitive:: + # Ignored + # ie_hack:: + # Ignored + def write( output, indent=0, transitive=false, ie_hack=false ) + f = REXML::Formatters::Default.new + indent( output, indent ) + output << START + output << ' ' + output << @name + if @external_id + reference_writer = ReferenceWriter.new(@external_id, + @long_name, + @uri, + context) + reference_writer.write(output) + end + unless @children.empty? + output << ' [' + @children.each { |child| + output << "\n" + f.write( child, output ) + } + output << "\n]" + end + output << STOP + end + + def context + if @parent + @parent.context + else + nil + end + end + + def entity( name ) + @entities[name].unnormalized if @entities[name] + end + + def add child + super(child) + @entities = DEFAULT_ENTITIES.clone if @entities == DEFAULT_ENTITIES + @entities[ child.name ] = child if child.kind_of? Entity + end + + # This method retrieves the public identifier identifying the document's + # DTD. + # + # Method contributed by Henrik Martensson + def public + case @external_id + when "SYSTEM" + nil + when "PUBLIC" + @long_name + end + end + + # This method retrieves the system identifier identifying the document's DTD + # + # Method contributed by Henrik Martensson + def system + case @external_id + when "SYSTEM" + @long_name + when "PUBLIC" + @uri.kind_of?(String) ? @uri : nil + end + end + + # This method returns a list of notations that have been declared in the + # _internal_ DTD subset. Notations in the external DTD subset are not + # listed. + # + # Method contributed by Henrik Martensson + def notations + children().select {|node| node.kind_of?(REXML::NotationDecl)} + end + + # Retrieves a named notation. Only notations declared in the internal + # DTD subset can be retrieved. + # + # Method contributed by Henrik Martensson + def notation(name) + notations.find { |notation_decl| + notation_decl.name == name + } + end + end + + # We don't really handle any of these since we're not a validating + # parser, so we can be pretty dumb about them. All we need to be able + # to do is spew them back out on a write() + + # This is an abstract class. You never use this directly; it serves as a + # parent class for the specific declarations. + class Declaration < Child + def initialize src + super() + @string = src + end + + def to_s + @string+'>' + end + + # == DEPRECATED + # See REXML::Formatters + # + def write( output, indent ) + output << to_s + end + end + + public + class ElementDecl < Declaration + def initialize( src ) + super + end + end + + class ExternalEntity < Child + def initialize( src ) + super() + @entity = src + end + def to_s + @entity + end + def write( output, indent ) + output << @entity + end + end + + class NotationDecl < Child + attr_accessor :public, :system + def initialize name, middle, pub, sys + super(nil) + @name = name + @middle = middle + @public = pub + @system = sys + end + + def to_s + context = nil + context = parent.context if parent + notation = "" + notation + end + + def write( output, indent=-1 ) + output << to_s + end + + # This method retrieves the name of the notation. + # + # Method contributed by Henrik Martensson + def name + @name + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/document.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/document.rb new file mode 100644 index 0000000..2edeb98 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/document.rb @@ -0,0 +1,451 @@ +# frozen_string_literal: false +require_relative "security" +require_relative "element" +require_relative "xmldecl" +require_relative "source" +require_relative "comment" +require_relative "doctype" +require_relative "instruction" +require_relative "rexml" +require_relative "parseexception" +require_relative "output" +require_relative "parsers/baseparser" +require_relative "parsers/streamparser" +require_relative "parsers/treeparser" + +module REXML + # Represents an XML document. + # + # A document may have: + # + # - A single child that may be accessed via method #root. + # - An XML declaration. + # - A document type. + # - Processing instructions. + # + # == In a Hurry? + # + # If you're somewhat familiar with XML + # and have a particular task in mind, + # you may want to see the + # {tasks pages}[../doc/rexml/tasks/tocs/master_toc_rdoc.html], + # and in particular, the + # {tasks page for documents}[../doc/rexml/tasks/tocs/document_toc_rdoc.html]. + # + class Document < Element + # A convenient default XML declaration. Use: + # + # mydoc << XMLDecl.default + # + DECLARATION = XMLDecl.default + + # :call-seq: + # new(string = nil, context = {}) -> new_document + # new(io_stream = nil, context = {}) -> new_document + # new(document = nil, context = {}) -> new_document + # + # Returns a new \REXML::Document object. + # + # When no arguments are given, + # returns an empty document: + # + # d = REXML::Document.new + # d.to_s # => "" + # + # When argument +string+ is given, it must be a string + # containing a valid XML document: + # + # xml_string = 'FooBar' + # d = REXML::Document.new(xml_string) + # d.to_s # => "FooBar" + # + # When argument +io_stream+ is given, it must be an \IO object + # that is opened for reading, and when read must return a valid XML document: + # + # File.write('t.xml', xml_string) + # d = File.open('t.xml', 'r') do |io| + # REXML::Document.new(io) + # end + # d.to_s # => "FooBar" + # + # When argument +document+ is given, it must be an existing + # document object, whose context and attributes (but not chidren) + # are cloned into the new document: + # + # d = REXML::Document.new(xml_string) + # d.children # => [ ... ] + # d.context = {raw: :all, compress_whitespace: :all} + # d.add_attributes({'bar' => 0, 'baz' => 1}) + # d1 = REXML::Document.new(d) + # d1.children # => [] + # d1.context # => {:raw=>:all, :compress_whitespace=>:all} + # d1.attributes # => {"bar"=>bar='0', "baz"=>baz='1'} + # + # When argument +context+ is given, it must be a hash + # containing context entries for the document; + # see {Element Context}[../doc/rexml/context_rdoc.html]: + # + # context = {raw: :all, compress_whitespace: :all} + # d = REXML::Document.new(xml_string, context) + # d.context # => {:raw=>:all, :compress_whitespace=>:all} + # + def initialize( source = nil, context = {} ) + @entity_expansion_count = 0 + super() + @context = context + return if source.nil? + if source.kind_of? Document + @context = source.context + super source + else + build( source ) + end + end + + # :call-seq: + # node_type -> :document + # + # Returns the symbol +:document+. + # + def node_type + :document + end + + # :call-seq: + # clone -> new_document + # + # Returns the new document resulting from executing + # Document.new(self). See Document.new. + # + def clone + Document.new self + end + + # :call-seq: + # expanded_name -> empty_string + # + # Returns an empty string. + # + def expanded_name + '' + #d = doc_type + #d ? d.name : "UNDEFINED" + end + alias :name :expanded_name + + # :call-seq: + # add(xml_decl) -> self + # add(doc_type) -> self + # add(object) -> self + # + # Adds an object to the document; returns +self+. + # + # When argument +xml_decl+ is given, + # it must be an REXML::XMLDecl object, + # which becomes the XML declaration for the document, + # replacing the previous XML declaration if any: + # + # d = REXML::Document.new + # d.xml_decl.to_s # => "" + # d.add(REXML::XMLDecl.new('2.0')) + # d.xml_decl.to_s # => "" + # + # When argument +doc_type+ is given, + # it must be an REXML::DocType object, + # which becomes the document type for the document, + # replacing the previous document type, if any: + # + # d = REXML::Document.new + # d.doctype.to_s # => "" + # d.add(REXML::DocType.new('foo')) + # d.doctype.to_s # => "" + # + # When argument +object+ (not an REXML::XMLDecl or REXML::DocType object) + # is given it is added as the last child: + # + # d = REXML::Document.new + # d.add(REXML::Element.new('foo')) + # d.to_s # => "" + # + def add( child ) + if child.kind_of? XMLDecl + if @children[0].kind_of? XMLDecl + @children[0] = child + else + @children.unshift child + end + child.parent = self + elsif child.kind_of? DocType + # Find first Element or DocType node and insert the decl right + # before it. If there is no such node, just insert the child at the + # end. If there is a child and it is an DocType, then replace it. + insert_before_index = @children.find_index { |x| + x.kind_of?(Element) || x.kind_of?(DocType) + } + if insert_before_index # Not null = not end of list + if @children[ insert_before_index ].kind_of? DocType + @children[ insert_before_index ] = child + else + @children[ insert_before_index-1, 0 ] = child + end + else # Insert at end of list + @children << child + end + child.parent = self + else + rv = super + raise "attempted adding second root element to document" if @elements.size > 1 + rv + end + end + alias :<< :add + + # :call-seq: + # add_element(name_or_element = nil, attributes = nil) -> new_element + # + # Adds an element to the document by calling REXML::Element.add_element: + # + # REXML::Element.add_element(name_or_element, attributes) + def add_element(arg=nil, arg2=nil) + rv = super + raise "attempted adding second root element to document" if @elements.size > 1 + rv + end + + # :call-seq: + # root -> root_element or nil + # + # Returns the root element of the document, if it exists, otherwise +nil+: + # + # d = REXML::Document.new('') + # d.root # => + # d = REXML::Document.new('') + # d.root # => nil + # + def root + elements[1] + #self + #@children.find { |item| item.kind_of? Element } + end + + # :call-seq: + # doctype -> doc_type or nil + # + # Returns the DocType object for the document, if it exists, otherwise +nil+: + # + # d = REXML::Document.new('') + # d.doctype.class # => REXML::DocType + # d = REXML::Document.new('') + # d.doctype.class # => nil + # + def doctype + @children.find { |item| item.kind_of? DocType } + end + + # :call-seq: + # xml_decl -> xml_decl + # + # Returns the XMLDecl object for the document, if it exists, + # otherwise the default XMLDecl object: + # + # d = REXML::Document.new('') + # d.xml_decl.class # => REXML::XMLDecl + # d.xml_decl.to_s # => "" + # d = REXML::Document.new('') + # d.xml_decl.class # => REXML::XMLDecl + # d.xml_decl.to_s # => "" + # + def xml_decl + rv = @children[0] + return rv if rv.kind_of? XMLDecl + @children.unshift(XMLDecl.default)[0] + end + + # :call-seq: + # version -> version_string + # + # Returns the XMLDecl version of this document as a string, + # if it has been set, otherwise the default version: + # + # d = REXML::Document.new('') + # d.version # => "2.0" + # d = REXML::Document.new('') + # d.version # => "1.0" + # + def version + xml_decl().version + end + + # :call-seq: + # encoding -> encoding_string + # + # Returns the XMLDecl encoding of the document, + # if it has been set, otherwise the default encoding: + # + # d = REXML::Document.new('') + # d.encoding # => "UTF-16" + # d = REXML::Document.new('') + # d.encoding # => "UTF-8" + # + def encoding + xml_decl().encoding + end + + # :call-seq: + # stand_alone? + # + # Returns the XMLDecl standalone value of the document as a string, + # if it has been set, otherwise the default standalone value: + # + # d = REXML::Document.new('') + # d.stand_alone? # => "yes" + # d = REXML::Document.new('') + # d.stand_alone? # => nil + # + def stand_alone? + xml_decl().stand_alone? + end + + # :call-seq: + # doc.write(output=$stdout, indent=-1, transtive=false, ie_hack=false, encoding=nil) + # doc.write(options={:output => $stdout, :indent => -1, :transtive => false, :ie_hack => false, :encoding => nil}) + # + # Write the XML tree out, optionally with indent. This writes out the + # entire XML document, including XML declarations, doctype declarations, + # and processing instructions (if any are given). + # + # A controversial point is whether Document should always write the XML + # declaration () whether or not one is given by the + # user (or source document). REXML does not write one if one was not + # specified, because it adds unnecessary bandwidth to applications such + # as XML-RPC. + # + # Accept Nth argument style and options Hash style as argument. + # The recommended style is options Hash style for one or more + # arguments case. + # + # _Examples_ + # Document.new("").write + # + # output = "" + # Document.new("").write(output) + # + # output = "" + # Document.new("").write(:output => output, :indent => 2) + # + # See also the classes in the rexml/formatters package for the proper way + # to change the default formatting of XML output. + # + # _Examples_ + # + # output = "" + # tr = Transitive.new + # tr.write(Document.new(""), output) + # + # output:: + # output an object which supports '<< string'; this is where the + # document will be written. + # indent:: + # An integer. If -1, no indenting will be used; otherwise, the + # indentation will be twice this number of spaces, and children will be + # indented an additional amount. For a value of 3, every item will be + # indented 3 more levels, or 6 more spaces (2 * 3). Defaults to -1 + # transitive:: + # If transitive is true and indent is >= 0, then the output will be + # pretty-printed in such a way that the added whitespace does not affect + # the absolute *value* of the document -- that is, it leaves the value + # and number of Text nodes in the document unchanged. + # ie_hack:: + # This hack inserts a space before the /> on empty tags to address + # a limitation of Internet Explorer. Defaults to false + # encoding:: + # Encoding name as String. Change output encoding to specified encoding + # instead of encoding in XML declaration. + # Defaults to nil. It means encoding in XML declaration is used. + def write(*arguments) + if arguments.size == 1 and arguments[0].class == Hash + options = arguments[0] + + output = options[:output] + indent = options[:indent] + transitive = options[:transitive] + ie_hack = options[:ie_hack] + encoding = options[:encoding] + else + output, indent, transitive, ie_hack, encoding, = *arguments + end + + output ||= $stdout + indent ||= -1 + transitive = false if transitive.nil? + ie_hack = false if ie_hack.nil? + encoding ||= xml_decl.encoding + + if encoding != 'UTF-8' && !output.kind_of?(Output) + output = Output.new( output, encoding ) + end + formatter = if indent > -1 + if transitive + require_relative "formatters/transitive" + REXML::Formatters::Transitive.new( indent, ie_hack ) + else + REXML::Formatters::Pretty.new( indent, ie_hack ) + end + else + REXML::Formatters::Default.new( ie_hack ) + end + formatter.write( self, output ) + end + + + def Document::parse_stream( source, listener ) + Parsers::StreamParser.new( source, listener ).parse + end + + # Set the entity expansion limit. By default the limit is set to 10000. + # + # Deprecated. Use REXML::Security.entity_expansion_limit= instead. + def Document::entity_expansion_limit=( val ) + Security.entity_expansion_limit = val + end + + # Get the entity expansion limit. By default the limit is set to 10000. + # + # Deprecated. Use REXML::Security.entity_expansion_limit= instead. + def Document::entity_expansion_limit + return Security.entity_expansion_limit + end + + # Set the entity expansion limit. By default the limit is set to 10240. + # + # Deprecated. Use REXML::Security.entity_expansion_text_limit= instead. + def Document::entity_expansion_text_limit=( val ) + Security.entity_expansion_text_limit = val + end + + # Get the entity expansion limit. By default the limit is set to 10240. + # + # Deprecated. Use REXML::Security.entity_expansion_text_limit instead. + def Document::entity_expansion_text_limit + return Security.entity_expansion_text_limit + end + + attr_reader :entity_expansion_count + + def record_entity_expansion + @entity_expansion_count += 1 + if @entity_expansion_count > Security.entity_expansion_limit + raise "number of entity expansions exceeded, processing aborted." + end + end + + def document + self + end + + private + def build( source ) + Parsers::TreeParser.new( source, self ).parse + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/attlistdecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/attlistdecl.rb new file mode 100644 index 0000000..1326cb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/attlistdecl.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: false +require_relative "../child" +module REXML + module DTD + class AttlistDecl < Child + START = ")/um + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/dtd.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/dtd.rb new file mode 100644 index 0000000..8b0f2d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/dtd.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: false +require_relative "elementdecl" +require_relative "entitydecl" +require_relative "../comment" +require_relative "notationdecl" +require_relative "attlistdecl" +require_relative "../parent" + +module REXML + module DTD + class Parser + def Parser.parse( input ) + case input + when String + parse_helper input + when File + parse_helper input.read + end + end + + # Takes a String and parses it out + def Parser.parse_helper( input ) + contents = Parent.new + while input.size > 0 + case input + when ElementDecl.PATTERN_RE + match = $& + contents << ElementDecl.new( match ) + when AttlistDecl.PATTERN_RE + matchdata = $~ + contents << AttlistDecl.new( matchdata ) + when EntityDecl.PATTERN_RE + matchdata = $~ + contents << EntityDecl.new( matchdata ) + when Comment.PATTERN_RE + matchdata = $~ + contents << Comment.new( matchdata ) + when NotationDecl.PATTERN_RE + matchdata = $~ + contents << NotationDecl.new( matchdata ) + end + end + contents + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/elementdecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/elementdecl.rb new file mode 100644 index 0000000..20ed023 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/elementdecl.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: false +require_relative "../child" +module REXML + module DTD + class ElementDecl < Child + START = "/um + PATTERN_RE = /^\s*#{START}\s+((?:[:\w][-\.\w]*:)?[-!\*\.\w]*)(.*?)>/ + #\s*((((["']).*?\5)|[^\/'">]*)*?)(\/)?>/um, true) + + def initialize match + @name = match[1] + @rest = match[2] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/entitydecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/entitydecl.rb new file mode 100644 index 0000000..312df65 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/entitydecl.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: false +require_relative "../child" +module REXML + module DTD + class EntityDecl < Child + START = "/um + SYSTEM = /^\s*#{START}\s+(?:%\s+)?(\w+)\s+SYSTEM\s+((["']).*?\3)(?:\s+NDATA\s+\w+)?\s*>/um + PLAIN = /^\s*#{START}\s+(\w+)\s+((["']).*?\3)\s*>/um + PERCENT = /^\s*#{START}\s+%\s+(\w+)\s+((["']).*?\3)\s*>/um + # + # + def initialize src + super() + md = nil + if src.match( PUBLIC ) + md = src.match( PUBLIC, true ) + @middle = "PUBLIC" + @content = "#{md[2]} #{md[4]}" + elsif src.match( SYSTEM ) + md = src.match( SYSTEM, true ) + @middle = "SYSTEM" + @content = md[2] + elsif src.match( PLAIN ) + md = src.match( PLAIN, true ) + @middle = "" + @content = md[2] + elsif src.match( PERCENT ) + md = src.match( PERCENT, true ) + @middle = "" + @content = md[2] + end + raise ParseException.new("failed Entity match", src) if md.nil? + @name = md[1] + end + + def to_s + rv = " 0 + rv << @content + rv + end + + def write( output, indent ) + indent( output, indent ) + output << to_s + end + + def EntityDecl.parse_source source, listener + md = source.match( PATTERN_RE, true ) + thing = md[0].squeeze(" \t\n\r") + listener.send inspect.downcase, thing + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/notationdecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/notationdecl.rb new file mode 100644 index 0000000..04a9b08 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/dtd/notationdecl.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: false +require_relative "../child" +module REXML + module DTD + class NotationDecl < Child + START = "/um + SYSTEM = /^\s*#{START}\s+(\w[\w-]*)\s+(SYSTEM)\s+((["']).*?\4)\s*>/um + def initialize src + super() + if src.match( PUBLIC ) + md = src.match( PUBLIC, true ) + elsif src.match( SYSTEM ) + md = src.match( SYSTEM, true ) + else + raise ParseException.new( "error parsing notation: no matching pattern", src ) + end + @name = md[1] + @middle = md[2] + @rest = md[3] + end + + def to_s + "" + end + + def write( output, indent ) + indent( output, indent ) + output << to_s + end + + def NotationDecl.parse_source source, listener + md = source.match( PATTERN_RE, true ) + thing = md[0].squeeze(" \t\n\r") + listener.send inspect.downcase, thing + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/element.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/element.rb new file mode 100644 index 0000000..4c21dbd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/element.rb @@ -0,0 +1,2599 @@ +# frozen_string_literal: false +require_relative "parent" +require_relative "namespace" +require_relative "attribute" +require_relative "cdata" +require_relative "xpath" +require_relative "parseexception" + +module REXML + # An implementation note about namespaces: + # As we parse, when we find namespaces we put them in a hash and assign + # them a unique ID. We then convert the namespace prefix for the node + # to the unique ID. This makes namespace lookup much faster for the + # cost of extra memory use. We save the namespace prefix for the + # context node and convert it back when we write it. + @@namespaces = {} + + # An \REXML::Element object represents an XML element. + # + # An element: + # + # - Has a name (string). + # - May have a parent (another element). + # - Has zero or more children + # (other elements, text, CDATA, processing instructions, and comments). + # - Has zero or more siblings + # (other elements, text, CDATA, processing instructions, and comments). + # - Has zero or more named attributes. + # + # == In a Hurry? + # + # If you're somewhat familiar with XML + # and have a particular task in mind, + # you may want to see the + # {tasks pages}[../doc/rexml/tasks/tocs/master_toc_rdoc.html], + # and in particular, the + # {tasks page for elements}[../doc/rexml/tasks/tocs/element_toc_rdoc.html]. + # + # === Name + # + # An element has a name, which is initially set when the element is created: + # + # e = REXML::Element.new('foo') + # e.name # => "foo" + # + # The name may be changed: + # + # e.name = 'bar' + # e.name # => "bar" + # + # + # === \Parent + # + # An element may have a parent. + # + # Its parent may be assigned explicitly when the element is created: + # + # e0 = REXML::Element.new('foo') + # e1 = REXML::Element.new('bar', e0) + # e1.parent # => ... + # + # Note: the representation of an element always shows the element's name. + # If the element has children, the representation indicates that + # by including an ellipsis (...). + # + # The parent may be assigned explicitly at any time: + # + # e2 = REXML::Element.new('baz') + # e1.parent = e2 + # e1.parent # => + # + # When an element is added as a child, its parent is set automatically: + # + # e1.add_element(e0) + # e0.parent # => ... + # + # For an element that has no parent, method +parent+ returns +nil+. + # + # === Children + # + # An element has zero or more children. + # The children are an ordered collection + # of all objects whose parent is the element itself. + # + # The children may include any combination of elements, text, comments, + # processing instructions, and CDATA. + # (This example keeps things clean by controlling whitespace + # via a +context+ setting.) + # + # xml_string = <<-EOT + # + # + # text 0 + # + # + # + # + # text 1 + # + # + # + # + # EOT + # context = {ignore_whitespace_nodes: :all, compress_whitespace: :all} + # d = REXML::Document.new(xml_string, context) + # root = d.root + # root.children.size # => 10 + # root.each {|child| p "#{child.class}: #{child}" } + # + # Output: + # + # "REXML::Element: " + # "REXML::Text: \n text 0\n " + # "REXML::Comment: comment 0" + # "REXML::Instruction: " + # "REXML::CData: cdata 0" + # "REXML::Element: " + # "REXML::Text: \n text 1\n " + # "REXML::Comment: comment 1" + # "REXML::Instruction: " + # "REXML::CData: cdata 1" + # + # A child may be added using inherited methods + # Parent#insert_before or Parent#insert_after: + # + # xml_string = '' + # d = REXML::Document.new(xml_string) + # root = d.root + # c = d.root[1] # => + # root.insert_before(c, REXML::Element.new('b')) + # root.to_a # => [, , , ] + # + # A child may be replaced using Parent#replace_child: + # + # root.replace_child(c, REXML::Element.new('x')) + # root.to_a # => [, , , ] + # + # A child may be removed using Parent#delete: + # + # x = root[2] # => + # root.delete(x) + # root.to_a # => [, , ] + # + # === Siblings + # + # An element has zero or more siblings, + # which are the other children of the element's parent. + # + # In the example above, element +ele_1+ is between a CDATA sibling + # and a text sibling: + # + # ele_1 = root[5] # => + # ele_1.previous_sibling # => "cdata 0" + # ele_1.next_sibling # => "\n text 1\n " + # + # === \Attributes + # + # An element has zero or more named attributes. + # + # A new element has no attributes: + # + # e = REXML::Element.new('foo') + # e.attributes # => {} + # + # Attributes may be added: + # + # e.add_attribute('bar', 'baz') + # e.add_attribute('bat', 'bam') + # e.attributes.size # => 2 + # e['bar'] # => "baz" + # e['bat'] # => "bam" + # + # An existing attribute may be modified: + # + # e.add_attribute('bar', 'bad') + # e.attributes.size # => 2 + # e['bar'] # => "bad" + # + # An existing attribute may be deleted: + # + # e.delete_attribute('bar') + # e.attributes.size # => 1 + # e['bar'] # => nil + # + # == What's Here + # + # To begin with, what's elsewhere? + # + # \Class \REXML::Element inherits from its ancestor classes: + # + # - REXML::Child + # - REXML::Parent + # + # \REXML::Element itself and its ancestors also include modules: + # + # - {Enumerable}[https://docs.ruby-lang.org/en/master/Enumerable.html] + # - REXML::Namespace + # - REXML::Node + # - REXML::XMLTokens + # + # === Methods for Creating an \Element + # + # ::new:: Returns a new empty element. + # #clone:: Returns a clone of another element. + # + # === Methods for Attributes + # + # {[attribute_name]}[#method-i-5B-5D]:: Returns an attribute value. + # #add_attribute:: Adds a new attribute. + # #add_attributes:: Adds multiple new attributes. + # #attribute:: Returns the attribute value for a given name and optional namespace. + # #delete_attribute:: Removes an attribute. + # + # === Methods for Children + # + # {[index]}[#method-i-5B-5D]:: Returns the child at the given offset. + # #add_element:: Adds an element as the last child. + # #delete_element:: Deletes a child element. + # #each_element:: Calls the given block with each child element. + # #each_element_with_attribute:: Calls the given block with each child element + # that meets given criteria, + # which can include the attribute name. + # #each_element_with_text:: Calls the given block with each child element + # that meets given criteria, + # which can include text. + # #get_elements:: Returns an array of element children that match a given xpath. + # + # === Methods for \Text Children + # + # #add_text:: Adds a text node to the element. + # #get_text:: Returns a text node that meets specified criteria. + # #text:: Returns the text string from the first node that meets specified criteria. + # #texts:: Returns an array of the text children of the element. + # #text=:: Adds, removes, or replaces the first text child of the element + # + # === Methods for Other Children + # + # #cdatas:: Returns an array of the cdata children of the element. + # #comments:: Returns an array of the comment children of the element. + # #instructions:: Returns an array of the instruction children of the element. + # + # === Methods for Namespaces + # + # #add_namespace:: Adds a namespace to the element. + # #delete_namespace:: Removes a namespace from the element. + # #namespace:: Returns the string namespace URI for the element. + # #namespaces:: Returns a hash of all defined namespaces in the element. + # #prefixes:: Returns an array of the string prefixes (names) + # of all defined namespaces in the element + # + # === Methods for Querying + # + # #document:: Returns the document, if any, that the element belongs to. + # #root:: Returns the most distant element (not document) ancestor of the element. + # #root_node:: Returns the most distant ancestor of the element. + # #xpath:: Returns the string xpath to the element + # relative to the most distant parent + # #has_attributes?:: Returns whether the element has attributes. + # #has_elements?:: Returns whether the element has elements. + # #has_text?:: Returns whether the element has text. + # #next_element:: Returns the next sibling that is an element. + # #previous_element:: Returns the previous sibling that is an element. + # #raw:: Returns whether raw mode is set for the element. + # #whitespace:: Returns whether whitespace is respected for the element. + # #ignore_whitespace_nodes:: Returns whether whitespace nodes + # are to be ignored for the element. + # #node_type:: Returns symbol :element. + # + # === One More Method + # + # #inspect:: Returns a string representation of the element. + # + # === Accessors + # + # #elements:: Returns the REXML::Elements object for the element. + # #attributes:: Returns the REXML::Attributes object for the element. + # #context:: Returns or sets the context hash for the element. + # + class Element < Parent + include Namespace + + UNDEFINED = "UNDEFINED"; # The default name + + # Mechanisms for accessing attributes and child elements of this + # element. + attr_reader :attributes, :elements + # The context holds information about the processing environment, such as + # whitespace handling. + attr_accessor :context + + # :call-seq: + # Element.new(name = 'UNDEFINED', parent = nil, context = nil) -> new_element + # Element.new(element, parent = nil, context = nil) -> new_element + # + # Returns a new \REXML::Element object. + # + # When no arguments are given, + # returns an element with name 'UNDEFINED': + # + # e = REXML::Element.new # => + # e.class # => REXML::Element + # e.name # => "UNDEFINED" + # + # When only argument +name+ is given, + # returns an element of the given name: + # + # REXML::Element.new('foo') # => + # + # When only argument +element+ is given, it must be an \REXML::Element object; + # returns a shallow copy of the given element: + # + # e0 = REXML::Element.new('foo') + # e1 = REXML::Element.new(e0) # => + # + # When argument +parent+ is also given, it must be an REXML::Parent object: + # + # e = REXML::Element.new('foo', REXML::Parent.new) + # e.parent # => #]> + # + # When argument +context+ is also given, it must be a hash + # representing the context for the element; + # see {Element Context}[../doc/rexml/context_rdoc.html]: + # + # e = REXML::Element.new('foo', nil, {raw: :all}) + # e.context # => {:raw=>:all} + # + def initialize( arg = UNDEFINED, parent=nil, context=nil ) + super(parent) + + @elements = Elements.new(self) + @attributes = Attributes.new(self) + @context = context + + if arg.kind_of? String + self.name = arg + elsif arg.kind_of? Element + self.name = arg.expanded_name + arg.attributes.each_attribute{ |attribute| + @attributes << Attribute.new( attribute ) + } + @context = arg.context + end + end + + # :call-seq: + # inspect -> string + # + # Returns a string representation of the element. + # + # For an element with no attributes and no children, shows the element name: + # + # REXML::Element.new.inspect # => "" + # + # Shows attributes, if any: + # + # e = REXML::Element.new('foo') + # e.add_attributes({'bar' => 0, 'baz' => 1}) + # e.inspect # => "" + # + # Shows an ellipsis (...), if there are child elements: + # + # e.add_element(REXML::Element.new('bar')) + # e.add_element(REXML::Element.new('baz')) + # e.inspect # => " ... " + # + def inspect + rv = "<#@expanded_name" + + @attributes.each_attribute do |attr| + rv << " " + attr.write( rv, 0 ) + end + + if children.size > 0 + rv << "> ... " + else + rv << "/>" + end + end + + # :call-seq: + # clone -> new_element + # + # Returns a shallow copy of the element, containing the name and attributes, + # but not the parent or children: + # + # e = REXML::Element.new('foo') + # e.add_attributes({'bar' => 0, 'baz' => 1}) + # e.clone # => + # + def clone + self.class.new self + end + + # :call-seq: + # root_node -> document or element + # + # Returns the most distant ancestor of +self+. + # + # When the element is part of a document, + # returns the root node of the document. + # Note that the root node is different from the document element; + # in this example +a+ is document element and the root node is its parent: + # + # d = REXML::Document.new('') + # top_element = d.first # => ... + # child = top_element.first # => ... + # d.root_node == d # => true + # top_element.root_node == d # => true + # child.root_node == d # => true + # + # When the element is not part of a document, but does have ancestor elements, + # returns the most distant ancestor element: + # + # e0 = REXML::Element.new('foo') + # e1 = REXML::Element.new('bar') + # e1.parent = e0 + # e2 = REXML::Element.new('baz') + # e2.parent = e1 + # e2.root_node == e0 # => true + # + # When the element has no ancestor elements, + # returns +self+: + # + # e = REXML::Element.new('foo') + # e.root_node == e # => true + # + # Related: #root, #document. + # + def root_node + parent.nil? ? self : parent.root_node + end + + # :call-seq: + # root -> element + # + # Returns the most distant _element_ (not document) ancestor of the element: + # + # d = REXML::Document.new('') + # top_element = d.first + # child = top_element.first + # top_element.root == top_element # => true + # child.root == top_element # => true + # + # For a document, returns the topmost element: + # + # d.root == top_element # => true + # + # Related: #root_node, #document. + # + def root + return elements[1] if self.kind_of? Document + return self if parent.kind_of? Document or parent.nil? + return parent.root + end + + # :call-seq: + # document -> document or nil + # + # If the element is part of a document, returns that document: + # + # d = REXML::Document.new('') + # top_element = d.first + # child = top_element.first + # top_element.document == d # => true + # child.document == d # => true + # + # If the element is not part of a document, returns +nil+: + # + # REXML::Element.new.document # => nil + # + # For a document, returns +self+: + # + # d.document == d # => true + # + # Related: #root, #root_node. + # + def document + rt = root + rt.parent if rt + end + + # :call-seq: + # whitespace + # + # Returns +true+ if whitespace is respected for this element, + # +false+ otherwise. + # + # See {Element Context}[../doc/rexml/context_rdoc.html]. + # + # The evaluation is tested against the element's +expanded_name+, + # and so is namespace-sensitive. + def whitespace + @whitespace = nil + if @context + if @context[:respect_whitespace] + @whitespace = (@context[:respect_whitespace] == :all or + @context[:respect_whitespace].include? expanded_name) + end + @whitespace = false if (@context[:compress_whitespace] and + (@context[:compress_whitespace] == :all or + @context[:compress_whitespace].include? expanded_name) + ) + end + @whitespace = true unless @whitespace == false + @whitespace + end + + # :call-seq: + # ignore_whitespace_nodes + # + # Returns +true+ if whitespace nodes are ignored for the element. + # + # See {Element Context}[../doc/rexml/context_rdoc.html]. + # + def ignore_whitespace_nodes + @ignore_whitespace_nodes = false + if @context + if @context[:ignore_whitespace_nodes] + @ignore_whitespace_nodes = + (@context[:ignore_whitespace_nodes] == :all or + @context[:ignore_whitespace_nodes].include? expanded_name) + end + end + end + + # :call-seq: + # raw + # + # Returns +true+ if raw mode is set for the element. + # + # See {Element Context}[../doc/rexml/context_rdoc.html]. + # + # The evaluation is tested against +expanded_name+, and so is namespace + # sensitive. + def raw + @raw = (@context and @context[:raw] and + (@context[:raw] == :all or + @context[:raw].include? expanded_name)) + @raw + end + + #once :whitespace, :raw, :ignore_whitespace_nodes + + ################################################# + # Namespaces # + ################################################# + + # :call-seq: + # prefixes -> array_of_namespace_prefixes + # + # Returns an array of the string prefixes (names) of all defined namespaces + # in the element and its ancestors: + # + # xml_string = <<-EOT + # + # + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string, {compress_whitespace: :all}) + # d.elements['//a'].prefixes # => ["x", "y"] + # d.elements['//b'].prefixes # => ["x", "y"] + # d.elements['//c'].prefixes # => ["x", "y", "z"] + # + def prefixes + prefixes = [] + prefixes = parent.prefixes if parent + prefixes |= attributes.prefixes + return prefixes + end + + # :call-seq: + # namespaces -> array_of_namespace_names + # + # Returns a hash of all defined namespaces + # in the element and its ancestors: + # + # xml_string = <<-EOT + # + # + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # d.elements['//a'].namespaces # => {"x"=>"1", "y"=>"2"} + # d.elements['//b'].namespaces # => {"x"=>"1", "y"=>"2"} + # d.elements['//c'].namespaces # => {"x"=>"1", "y"=>"2", "z"=>"3"} + # + def namespaces + namespaces = {} + namespaces = parent.namespaces if parent + namespaces = namespaces.merge( attributes.namespaces ) + return namespaces + end + + # :call-seq: + # namespace(prefix = nil) -> string_uri or nil + # + # Returns the string namespace URI for the element, + # possibly deriving from one of its ancestors. + # + # xml_string = <<-EOT + # + # + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # b = d.elements['//b'] + # b.namespace # => "1" + # b.namespace('y') # => "2" + # b.namespace('nosuch') # => nil + # + def namespace(prefix=nil) + if prefix.nil? + prefix = prefix() + end + if prefix == '' + prefix = "xmlns" + else + prefix = "xmlns:#{prefix}" unless prefix[0,5] == 'xmlns' + end + ns = attributes[ prefix ] + ns = parent.namespace(prefix) if ns.nil? and parent + ns = '' if ns.nil? and prefix == 'xmlns' + return ns + end + + # :call-seq: + # add_namespace(prefix, uri = nil) -> self + # + # Adds a namespace to the element; returns +self+. + # + # With the single argument +prefix+, + # adds a namespace using the given +prefix+ and the namespace URI: + # + # e = REXML::Element.new('foo') + # e.add_namespace('bar') + # e.namespaces # => {"xmlns"=>"bar"} + # + # With both arguments +prefix+ and +uri+ given, + # adds a namespace using both arguments: + # + # e.add_namespace('baz', 'bat') + # e.namespaces # => {"xmlns"=>"bar", "baz"=>"bat"} + # + def add_namespace( prefix, uri=nil ) + unless uri + @attributes["xmlns"] = prefix + else + prefix = "xmlns:#{prefix}" unless prefix =~ /^xmlns:/ + @attributes[ prefix ] = uri + end + self + end + + # :call-seq: + # delete_namespace(namespace = 'xmlns') -> self + # + # Removes a namespace from the element. + # + # With no argument, removes the default namespace: + # + # d = REXML::Document.new "" + # d.to_s # => "" + # d.root.delete_namespace # => + # d.to_s # => "" + # + # With argument +namespace+, removes the specified namespace: + # + # d.root.delete_namespace('foo') + # d.to_s # => "" + # + # Does nothing if no such namespace is found: + # + # d.root.delete_namespace('nosuch') + # d.to_s # => "" + # + def delete_namespace namespace="xmlns" + namespace = "xmlns:#{namespace}" unless namespace == 'xmlns' + attribute = attributes.get_attribute(namespace) + attribute.remove unless attribute.nil? + self + end + + ################################################# + # Elements # + ################################################# + + # :call-seq: + # add_element(name, attributes = nil) -> new_element + # add_element(element, attributes = nil) -> element + # + # Adds a child element, optionally setting attributes + # on the added element; returns the added element. + # + # With string argument +name+, creates a new element with that name + # and adds the new element as a child: + # + # e0 = REXML::Element.new('foo') + # e0.add_element('bar') + # e0[0] # => + # + # + # With argument +name+ and hash argument +attributes+, + # sets attributes on the new element: + # + # e0.add_element('baz', {'bat' => '0', 'bam' => '1'}) + # e0[1] # => + # + # With element argument +element+, adds that element as a child: + # + # e0 = REXML::Element.new('foo') + # e1 = REXML::Element.new('bar') + # e0.add_element(e1) + # e0[0] # => + # + # With argument +element+ and hash argument +attributes+, + # sets attributes on the added element: + # + # e0.add_element(e1, {'bat' => '0', 'bam' => '1'}) + # e0[1] # => + # + def add_element element, attrs=nil + raise "First argument must be either an element name, or an Element object" if element.nil? + el = @elements.add(element) + attrs.each do |key, value| + el.attributes[key]=value + end if attrs.kind_of? Hash + el + end + + # :call-seq: + # delete_element(index) -> removed_element or nil + # delete_element(element) -> removed_element or nil + # delete_element(xpath) -> removed_element or nil + # + # Deletes a child element. + # + # When 1-based integer argument +index+ is given, + # removes and returns the child element at that offset if it exists; + # indexing does not include text nodes; + # returns +nil+ if the element does not exist: + # + # d = REXML::Document.new 'text' + # a = d.root # => ... + # a.delete_element(1) # => + # a.delete_element(1) # => + # a.delete_element(1) # => nil + # + # When element argument +element+ is given, + # removes and returns that child element if it exists, + # otherwise returns +nil+: + # + # d = REXML::Document.new 'text' + # a = d.root # => ... + # c = a[2] # => + # a.delete_element(c) # => + # a.delete_element(c) # => nil + # + # When xpath argument +xpath+ is given, + # removes and returns the element at xpath if it exists, + # otherwise returns +nil+: + # + # d = REXML::Document.new 'text' + # a = d.root # => ... + # a.delete_element('//c') # => + # a.delete_element('//c') # => nil + # + def delete_element element + @elements.delete element + end + + # :call-seq: + # has_elements? + # + # Returns +true+ if the element has one or more element children, + # +false+ otherwise: + # + # d = REXML::Document.new 'text' + # a = d.root # => ... + # a.has_elements? # => true + # b = a[0] # => + # b.has_elements? # => false + # + def has_elements? + !@elements.empty? + end + + # :call-seq: + # each_element_with_attribute(attr_name, value = nil, max = 0, xpath = nil) {|e| ... } + # + # Calls the given block with each child element that meets given criteria. + # + # When only string argument +attr_name+ is given, + # calls the block with each child element that has that attribute: + # + # d = REXML::Document.new '' + # a = d.root + # a.each_element_with_attribute('id') {|e| p e } + # + # Output: + # + # + # + # + # + # With argument +attr_name+ and string argument +value+ given, + # calls the block with each child element that has that attribute + # with that value: + # + # a.each_element_with_attribute('id', '1') {|e| p e } + # + # Output: + # + # + # + # + # With arguments +attr_name+, +value+, and integer argument +max+ given, + # calls the block with at most +max+ child elements: + # + # a.each_element_with_attribute('id', '1', 1) {|e| p e } + # + # Output: + # + # + # + # With all arguments given, including +xpath+, + # calls the block with only those child elements + # that meet the first three criteria, + # and also match the given +xpath+: + # + # a.each_element_with_attribute('id', '1', 2, '//d') {|e| p e } + # + # Output: + # + # + # + def each_element_with_attribute( key, value=nil, max=0, name=nil, &block ) # :yields: Element + each_with_something( proc {|child| + if value.nil? + child.attributes[key] != nil + else + child.attributes[key]==value + end + }, max, name, &block ) + end + + # :call-seq: + # each_element_with_text(text = nil, max = 0, xpath = nil) {|e| ... } + # + # Calls the given block with each child element that meets given criteria. + # + # With no arguments, calls the block with each child element that has text: + # + # d = REXML::Document.new 'bbd' + # a = d.root + # a.each_element_with_text {|e| p e } + # + # Output: + # + # ... + # ... + # ... + # + # With the single string argument +text+, + # calls the block with each element that has exactly that text: + # + # a.each_element_with_text('b') {|e| p e } + # + # Output: + # + # ... + # ... + # + # With argument +text+ and integer argument +max+, + # calls the block with at most +max+ elements: + # + # a.each_element_with_text('b', 1) {|e| p e } + # + # Output: + # + # ... + # + # With all arguments given, including +xpath+, + # calls the block with only those child elements + # that meet the first two criteria, + # and also match the given +xpath+: + # + # a.each_element_with_text('b', 2, '//c') {|e| p e } + # + # Output: + # + # ... + # + def each_element_with_text( text=nil, max=0, name=nil, &block ) # :yields: Element + each_with_something( proc {|child| + if text.nil? + child.has_text? + else + child.text == text + end + }, max, name, &block ) + end + + # :call-seq: + # each_element {|e| ... } + # + # Calls the given block with each child element: + # + # d = REXML::Document.new 'bbd' + # a = d.root + # a.each_element {|e| p e } + # + # Output: + # + # ... + # ... + # ... + # + # + def each_element( xpath=nil, &block ) # :yields: Element + @elements.each( xpath, &block ) + end + + # :call-seq: + # get_elements(xpath) + # + # Returns an array of the elements that match the given +xpath+: + # + # xml_string = <<-EOT + # + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # d.root.get_elements('//a') # => [ ... , ] + # + def get_elements( xpath ) + @elements.to_a( xpath ) + end + + # :call-seq: + # next_element + # + # Returns the next sibling that is an element if it exists, + # +niL+ otherwise: + # + # d = REXML::Document.new 'text' + # d.root.elements['b'].next_element #-> + # d.root.elements['c'].next_element #-> nil + # + def next_element + element = next_sibling + element = element.next_sibling until element.nil? or element.kind_of? Element + return element + end + + # :call-seq: + # previous_element + # + # Returns the previous sibling that is an element if it exists, + # +niL+ otherwise: + # + # d = REXML::Document.new 'text' + # d.root.elements['c'].previous_element #-> + # d.root.elements['b'].previous_element #-> nil + # + def previous_element + element = previous_sibling + element = element.previous_sibling until element.nil? or element.kind_of? Element + return element + end + + + ################################################# + # Text # + ################################################# + + # :call-seq: + # has_text? -> true or false + # + # Returns +true if the element has one or more text noded, + # +false+ otherwise: + # + # d = REXML::Document.new 'text' + # a = d.root + # a.has_text? # => true + # b = a[0] + # b.has_text? # => false + # + def has_text? + not text().nil? + end + + # :call-seq: + # text(xpath = nil) -> text_string or nil + # + # Returns the text string from the first text node child + # in a specified element, if it exists, # +nil+ otherwise. + # + # With no argument, returns the text from the first text node in +self+: + # + # d = REXML::Document.new "

some text this is bold! more text

" + # d.root.text.class # => String + # d.root.text # => "some text " + # + # With argument +xpath+, returns text from the the first text node + # in the element that matches +xpath+: + # + # d.root.text(1) # => "this is bold!" + # + # Note that an element may have multiple text nodes, + # possibly separated by other non-text children, as above. + # Even so, the returned value is the string text from the first such node. + # + # Note also that the text note is retrieved by method get_text, + # and so is always normalized text. + # + def text( path = nil ) + rv = get_text(path) + return rv.value unless rv.nil? + nil + end + + # :call-seq: + # get_text(xpath = nil) -> text_node or nil + # + # Returns the first text node child in a specified element, if it exists, + # +nil+ otherwise. + # + # With no argument, returns the first text node from +self+: + # + # d = REXML::Document.new "

some text this is bold! more text

" + # d.root.get_text.class # => REXML::Text + # d.root.get_text # => "some text " + # + # With argument +xpath+, returns the first text node from the element + # that matches +xpath+: + # + # d.root.get_text(1) # => "this is bold!" + # + def get_text path = nil + rv = nil + if path + element = @elements[ path ] + rv = element.get_text unless element.nil? + else + rv = @children.find { |node| node.kind_of? Text } + end + return rv + end + + # :call-seq: + # text = string -> string + # text = nil -> nil + # + # Adds, replaces, or removes the first text node child in the element. + # + # With string argument +string+, + # creates a new \REXML::Text node containing that string, + # honoring the current settings for whitespace and row, + # then places the node as the first text child in the element; + # returns +string+. + # + # If the element has no text child, the text node is added: + # + # d = REXML::Document.new '' + # d.root.text = 'foo' #-> 'foo' + # + # If the element has a text child, it is replaced: + # + # d.root.text = 'bar' #-> 'bar' + # + # With argument +nil+, removes the first text child: + # + # d.root.text = nil #-> '' + # + def text=( text ) + if text.kind_of? String + text = Text.new( text, whitespace(), nil, raw() ) + elsif !text.nil? and !text.kind_of? Text + text = Text.new( text.to_s, whitespace(), nil, raw() ) + end + old_text = get_text + if text.nil? + old_text.remove unless old_text.nil? + else + if old_text.nil? + self << text + else + old_text.replace_with( text ) + end + end + return self + end + + # :call-seq: + # add_text(string) -> nil + # add_text(text_node) -> self + # + # Adds text to the element. + # + # When string argument +string+ is given, returns +nil+. + # + # If the element has no child text node, + # creates a \REXML::Text object using the string, + # honoring the current settings for whitespace and raw, + # then adds that node to the element: + # + # d = REXML::Document.new('') + # a = d.root + # a.add_text('foo') + # a.to_a # => [, "foo"] + # + # If the element has child text nodes, + # appends the string to the _last_ text node: + # + # d = REXML::Document.new('foobar') + # a = d.root + # a.add_text('baz') + # a.to_a # => ["foo", , "barbaz"] + # a.add_text('baz') + # a.to_a # => ["foo", , "barbazbaz"] + # + # When text node argument +text_node+ is given, + # appends the node as the last text node in the element; + # returns +self+: + # + # d = REXML::Document.new('foobar') + # a = d.root + # a.add_text(REXML::Text.new('baz')) + # a.to_a # => ["foo", , "bar", "baz"] + # a.add_text(REXML::Text.new('baz')) + # a.to_a # => ["foo", , "bar", "baz", "baz"] + # + def add_text( text ) + if text.kind_of? String + if @children[-1].kind_of? Text + @children[-1] << text + return + end + text = Text.new( text, whitespace(), nil, raw() ) + end + self << text unless text.nil? + return self + end + + # :call-seq: + # node_type -> :element + # + # Returns symbol :element: + # + # d = REXML::Document.new('') + # a = d.root # => + # a.node_type # => :element + # + def node_type + :element + end + + # :call-seq: + # xpath -> string_xpath + # + # Returns the string xpath to the element + # relative to the most distant parent: + # + # d = REXML::Document.new('') + # a = d.root # => ... + # b = a[0] # => ... + # c = b[0] # => + # d.xpath # => "" + # a.xpath # => "/a" + # b.xpath # => "/a/b" + # c.xpath # => "/a/b/c" + # + # If there is no parent, returns the expanded name of the element: + # + # e = REXML::Element.new('foo') + # e.xpath # => "foo" + # + def xpath + path_elements = [] + cur = self + path_elements << __to_xpath_helper( self ) + while cur.parent + cur = cur.parent + path_elements << __to_xpath_helper( cur ) + end + return path_elements.reverse.join( "/" ) + end + + ################################################# + # Attributes # + ################################################# + + # :call-seq: + # [index] -> object + # [attr_name] -> attr_value + # [attr_sym] -> attr_value + # + # With integer argument +index+ given, + # returns the child at offset +index+, or +nil+ if none: + # + # d = REXML::Document.new '>textmore
' + # root = d.root + # (0..root.size).each do |index| + # node = root[index] + # p "#{index}: #{node} (#{node.class})" + # end + # + # Output: + # + # "0: (REXML::Element)" + # "1: text (REXML::Text)" + # "2: (REXML::Element)" + # "3: more (REXML::Text)" + # "4: (REXML::Element)" + # "5: (NilClass)" + # + # With string argument +attr_name+ given, + # returns the string value for the given attribute name if it exists, + # otherwise +nil+: + # + # d = REXML::Document.new('') + # root = d.root + # root['attr'] # => "value" + # root['nosuch'] # => nil + # + # With symbol argument +attr_sym+ given, + # returns [attr_sym.to_s]: + # + # root[:attr] # => "value" + # root[:nosuch] # => nil + # + def [](name_or_index) + case name_or_index + when String + attributes[name_or_index] + when Symbol + attributes[name_or_index.to_s] + else + super + end + end + + + # :call-seq: + # attribute(name, namespace = nil) + # + # Returns the string value for the given attribute name. + # + # With only argument +name+ given, + # returns the value of the named attribute if it exists, otherwise +nil+: + # + # xml_string = <<-EOT + # + # + # + # + #
+ # EOT + # d = REXML::Document.new(xml_string) + # root = d.root + # a = root[1] # => + # a.attribute('attr') # => attr='value' + # a.attribute('nope') # => nil + # + # With arguments +name+ and +namespace+ given, + # returns the value of the named attribute if it exists, otherwise +nil+: + # + # xml_string = "" + # document = REXML::Document.new(xml_string) + # document.root.attribute("x") # => x='x' + # document.root.attribute("x", "a") # => a:x='a:x' + # + def attribute( name, namespace=nil ) + prefix = nil + if namespaces.respond_to? :key + prefix = namespaces.key(namespace) if namespace + else + prefix = namespaces.index(namespace) if namespace + end + prefix = nil if prefix == 'xmlns' + + ret_val = + attributes.get_attribute( "#{prefix ? prefix + ':' : ''}#{name}" ) + + return ret_val unless ret_val.nil? + return nil if prefix.nil? + + # now check that prefix'es namespace is not the same as the + # default namespace + return nil unless ( namespaces[ prefix ] == namespaces[ 'xmlns' ] ) + + attributes.get_attribute( name ) + + end + + # :call-seq: + # has_attributes? -> true or false + # + # Returns +true+ if the element has attributes, +false+ otherwise: + # + # d = REXML::Document.new('') + # a, b = *d.root + # a.has_attributes? # => true + # b.has_attributes? # => false + # + def has_attributes? + return !@attributes.empty? + end + + # :call-seq: + # add_attribute(name, value) -> value + # add_attribute(attribute) -> attribute + # + # Adds an attribute to this element, overwriting any existing attribute + # by the same name. + # + # With string argument +name+ and object +value+ are given, + # adds the attribute created with that name and value: + # + # e = REXML::Element.new + # e.add_attribute('attr', 'value') # => "value" + # e['attr'] # => "value" + # e.add_attribute('attr', 'VALUE') # => "VALUE" + # e['attr'] # => "VALUE" + # + # With only attribute object +attribute+ given, + # adds the given attribute: + # + # a = REXML::Attribute.new('attr', 'value') + # e.add_attribute(a) # => attr='value' + # e['attr'] # => "value" + # a = REXML::Attribute.new('attr', 'VALUE') + # e.add_attribute(a) # => attr='VALUE' + # e['attr'] # => "VALUE" + # + def add_attribute( key, value=nil ) + if key.kind_of? Attribute + @attributes << key + else + @attributes[key] = value + end + end + + # :call-seq: + # add_attributes(hash) -> hash + # add_attributes(array) + # + # Adds zero or more attributes to the element; + # returns the argument. + # + # If hash argument +hash+ is given, + # each key must be a string; + # adds each attribute created with the key/value pair: + # + # e = REXML::Element.new + # h = {'foo' => 'bar', 'baz' => 'bat'} + # e.add_attributes(h) + # + # If argument +array+ is given, + # each array member must be a 2-element array [name, value]; + # each name must be a string: + # + # e = REXML::Element.new + # a = [['foo' => 'bar'], ['baz' => 'bat']] + # e.add_attributes(a) + # + def add_attributes hash + if hash.kind_of? Hash + hash.each_pair {|key, value| @attributes[key] = value } + elsif hash.kind_of? Array + hash.each { |value| @attributes[ value[0] ] = value[1] } + end + end + + # :call-seq: + # delete_attribute(name) -> removed_attribute or nil + # + # Removes a named attribute if it exists; + # returns the removed attribute if found, otherwise +nil+: + # + # e = REXML::Element.new('foo') + # e.add_attribute('bar', 'baz') + # e.delete_attribute('bar') # => + # e.delete_attribute('bar') # => nil + # + def delete_attribute(key) + attr = @attributes.get_attribute(key) + attr.remove unless attr.nil? + end + + ################################################# + # Other Utilities # + ################################################# + + # :call-seq: + # cdatas -> array_of_cdata_children + # + # Returns a frozen array of the REXML::CData children of the element: + # + # xml_string = <<-EOT + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # cds = d.root.cdatas # => ["foo", "bar"] + # cds.frozen? # => true + # cds.map {|cd| cd.class } # => [REXML::CData, REXML::CData] + # + def cdatas + find_all { |child| child.kind_of? CData }.freeze + end + + # :call-seq: + # comments -> array_of_comment_children + # + # Returns a frozen array of the REXML::Comment children of the element: + # + # xml_string = <<-EOT + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # cs = d.root.comments + # cs.frozen? # => true + # cs.map {|c| c.class } # => [REXML::Comment, REXML::Comment] + # cs.map {|c| c.to_s } # => ["foo", "bar"] + # + def comments + find_all { |child| child.kind_of? Comment }.freeze + end + + # :call-seq: + # instructions -> array_of_instruction_children + # + # Returns a frozen array of the REXML::Instruction children of the element: + # + # xml_string = <<-EOT + # + # + # + # + # EOT + # d = REXML::Document.new(xml_string) + # is = d.root.instructions + # is.frozen? # => true + # is.map {|i| i.class } # => [REXML::Instruction, REXML::Instruction] + # is.map {|i| i.to_s } # => ["", ""] + # + def instructions + find_all { |child| child.kind_of? Instruction }.freeze + end + + # :call-seq: + # texts -> array_of_text_children + # + # Returns a frozen array of the REXML::Text children of the element: + # + # xml_string = 'textmore' + # d = REXML::Document.new(xml_string) + # ts = d.root.texts + # ts.frozen? # => true + # ts.map {|t| t.class } # => [REXML::Text, REXML::Text] + # ts.map {|t| t.to_s } # => ["text", "more"] + # + def texts + find_all { |child| child.kind_of? Text }.freeze + end + + # == DEPRECATED + # See REXML::Formatters + # + # Writes out this element, and recursively, all children. + # output:: + # output an object which supports '<< string'; this is where the + # document will be written. + # indent:: + # An integer. If -1, no indenting will be used; otherwise, the + # indentation will be this number of spaces, and children will be + # indented an additional amount. Defaults to -1 + # transitive:: + # If transitive is true and indent is >= 0, then the output will be + # pretty-printed in such a way that the added whitespace does not affect + # the parse tree of the document + # ie_hack:: + # This hack inserts a space before the /> on empty tags to address + # a limitation of Internet Explorer. Defaults to false + # + # out = '' + # doc.write( out ) #-> doc is written to the string 'out' + # doc.write( $stdout ) #-> doc written to the console + def write(output=$stdout, indent=-1, transitive=false, ie_hack=false) + Kernel.warn("#{self.class.name}.write is deprecated. See REXML::Formatters", uplevel: 1) + formatter = if indent > -1 + if transitive + require_relative "formatters/transitive" + REXML::Formatters::Transitive.new( indent, ie_hack ) + else + REXML::Formatters::Pretty.new( indent, ie_hack ) + end + else + REXML::Formatters::Default.new( ie_hack ) + end + formatter.write( self, output ) + end + + + private + def __to_xpath_helper node + rv = node.expanded_name.clone + if node.parent + results = node.parent.find_all {|n| + n.kind_of?(REXML::Element) and n.expanded_name == node.expanded_name + } + if results.length > 1 + idx = results.index( node ) + rv << "[#{idx+1}]" + end + end + rv + end + + # A private helper method + def each_with_something( test, max=0, name=nil ) + num = 0 + @elements.each( name ){ |child| + yield child if test.call(child) and num += 1 + return if max>0 and num == max + } + end + end + + ######################################################################## + # ELEMENTS # + ######################################################################## + + # A class which provides filtering of children for Elements, and + # XPath search support. You are expected to only encounter this class as + # the element.elements object. Therefore, you are + # _not_ expected to instantiate this yourself. + # + # xml_string = <<-EOT + # + # + # + # Everyday Italian + # Giada De Laurentiis + # 2005 + # 30.00 + # + # + # Harry Potter + # J K. Rowling + # 2005 + # 29.99 + # + # + # XQuery Kick Start + # James McGovern + # Per Bothner + # Kurt Cagle + # James Linn + # Vaidyanathan Nagarajan + # 2003 + # 49.99 + # + # + # Learning XML + # Erik T. Ray + # 2003 + # 39.95 + # + # + # EOT + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements # => # ... > + # + class Elements + include Enumerable + # :call-seq: + # new(parent) -> new_elements_object + # + # Returns a new \Elements object with the given +parent+. + # Does _not_ assign parent.elements = self: + # + # d = REXML::Document.new(xml_string) + # eles = REXML::Elements.new(d.root) + # eles # => # ... > + # eles == d.root.elements # => false + # + def initialize parent + @element = parent + end + + # :call-seq: + # parent + # + # Returns the parent element cited in creating the \Elements object. + # This element is also the default starting point for searching + # in the \Elements object. + # + # d = REXML::Document.new(xml_string) + # elements = REXML::Elements.new(d.root) + # elements.parent == d.root # => true + # + def parent + @element + end + + # :call-seq: + # elements[index] -> element or nil + # elements[xpath] -> element or nil + # elements[n, name] -> element or nil + # + # Returns the first \Element object selected by the arguments, + # if any found, or +nil+ if none found. + # + # Notes: + # - The +index+ is 1-based, not 0-based, so that: + # - The first element has index 1 + # - The _nth_ element has index +n+. + # - The selection ignores non-\Element nodes. + # + # When the single argument +index+ is given, + # returns the element given by the index, if any; otherwise, +nil+: + # + # d = REXML::Document.new(xml_string) + # eles = d.root.elements + # eles # => # ... > + # eles[1] # => ... + # eles.size # => 4 + # eles[4] # => ... + # eles[5] # => nil + # + # The node at this index is not an \Element, and so is not returned: + # + # eles = d.root.first.first # => ... </> + # eles.to_a # => ["Everyday Italian"] + # eles[1] # => nil + # + # When the single argument +xpath+ is given, + # returns the first element found via that +xpath+, if any; otherwise, +nil+: + # + # eles = d.root.elements # => #<REXML::Elements @element=<bookstore> ... </>> + # eles['/bookstore'] # => <bookstore> ... </> + # eles['//book'] # => <book category='cooking'> ... </> + # eles['//book [@category="children"]'] # => <book category='children'> ... </> + # eles['/nosuch'] # => nil + # eles['//nosuch'] # => nil + # eles['//book [@category="nosuch"]'] # => nil + # eles['.'] # => <bookstore> ... </> + # eles['..'].class # => REXML::Document + # + # With arguments +n+ and +name+ given, + # returns the _nth_ found element that has the given +name+, + # or +nil+ if there is no such _nth_ element: + # + # eles = d.root.elements # => #<REXML::Elements @element=<bookstore> ... </>> + # eles[1, 'book'] # => <book category='cooking'> ... </> + # eles[4, 'book'] # => <book category='web' cover='paperback'> ... </> + # eles[5, 'book'] # => nil + # + def []( index, name=nil) + if index.kind_of? Integer + raise "index (#{index}) must be >= 1" if index < 1 + name = literalize(name) if name + num = 0 + @element.find { |child| + child.kind_of? Element and + (name.nil? ? true : child.has_name?( name )) and + (num += 1) == index + } + else + return XPath::first( @element, index ) + #{ |element| + # return element if element.kind_of? Element + #} + #return nil + end + end + + # :call-seq: + # elements[] = index, replacement_element -> replacement_element or nil + # + # Replaces or adds an element. + # + # When <tt>eles[index]</tt> exists, replaces it with +replacement_element+ + # and returns +replacement_element+: + # + # d = REXML::Document.new(xml_string) + # eles = d.root.elements # => #<REXML::Elements @element=<bookstore> ... </>> + # eles[1] # => <book category='cooking'> ... </> + # eles[1] = REXML::Element.new('foo') + # eles[1] # => <foo/> + # + # Does nothing (or raises an exception) + # if +replacement_element+ is not an \Element: + # eles[2] # => <book category='web' cover='paperback'> ... </> + # eles[2] = REXML::Text.new('bar') + # eles[2] # => <book category='web' cover='paperback'> ... </> + # + # When <tt>eles[index]</tt> does not exist, + # adds +replacement_element+ to the element and returns + # + # d = REXML::Document.new(xml_string) + # eles = d.root.elements # => #<REXML::Elements @element=<bookstore> ... </>> + # eles.size # => 4 + # eles[50] = REXML::Element.new('foo') # => <foo/> + # eles.size # => 5 + # eles[5] # => <foo/> + # + # Does nothing (or raises an exception) + # if +replacement_element+ is not an \Element: + # + # eles[50] = REXML::Text.new('bar') # => "bar" + # eles.size # => 5 + # + def []=( index, element ) + previous = self[index] + if previous.nil? + @element.add element + else + previous.replace_with element + end + return previous + end + + # :call-seq: + # empty? -> true or false + # + # Returns +true+ if there are no children, +false+ otherwise. + # + # d = REXML::Document.new('') + # d.elements.empty? # => true + # d = REXML::Document.new(xml_string) + # d.elements.empty? # => false + # + def empty? + @element.find{ |child| child.kind_of? Element}.nil? + end + + # :call-seq: + # index(element) + # + # Returns the 1-based index of the given +element+, if found; + # otherwise, returns -1: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # ele_1, ele_2, ele_3, ele_4 = *elements + # elements.index(ele_4) # => 4 + # elements.delete(ele_3) + # elements.index(ele_4) # => 3 + # elements.index(ele_3) # => -1 + # + def index element + rv = 0 + found = @element.find do |child| + child.kind_of? Element and + (rv += 1) and + child == element + end + return rv if found == element + return -1 + end + + # :call-seq: + # delete(index) -> removed_element or nil + # delete(element) -> removed_element or nil + # delete(xpath) -> removed_element or nil + # + # Removes an element; returns the removed element, or +nil+ if none removed. + # + # With integer argument +index+ given, + # removes the child element at that offset: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.size # => 4 + # elements[2] # => <book category='children'> ... </> + # elements.delete(2) # => <book category='children'> ... </> + # elements.size # => 3 + # elements[2] # => <book category='web'> ... </> + # elements.delete(50) # => nil + # + # With element argument +element+ given, + # removes that child element: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # ele_1, ele_2, ele_3, ele_4 = *elements + # elements.size # => 4 + # elements[2] # => <book category='children'> ... </> + # elements.delete(ele_2) # => <book category='children'> ... </> + # elements.size # => 3 + # elements[2] # => <book category='web'> ... </> + # elements.delete(ele_2) # => nil + # + # With string argument +xpath+ given, + # removes the first element found via that xpath: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.delete('//book') # => <book category='cooking'> ... </> + # elements.delete('//book [@category="children"]') # => <book category='children'> ... </> + # elements.delete('//nosuch') # => nil + # + def delete element + if element.kind_of? Element + @element.delete element + else + el = self[element] + el.remove if el + end + end + + # :call-seq: + # delete_all(xpath) + # + # Removes all elements found via the given +xpath+; + # returns the array of removed elements, if any, else +nil+. + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.size # => 4 + # deleted_elements = elements.delete_all('//book [@category="web"]') + # deleted_elements.size # => 2 + # elements.size # => 2 + # deleted_elements = elements.delete_all('//book') + # deleted_elements.size # => 2 + # elements.size # => 0 + # elements.delete_all('//book') # => [] + # + def delete_all( xpath ) + rv = [] + XPath::each( @element, xpath) {|element| + rv << element if element.kind_of? Element + } + rv.each do |element| + @element.delete element + element.remove + end + return rv + end + + # :call-seq: + # add -> new_element + # add(name) -> new_element + # add(element) -> element + # + # Adds an element; returns the element added. + # + # With no argument, creates and adds a new element. + # The new element has: + # + # - No name. + # - \Parent from the \Elements object. + # - Context from the that parent. + # + # Example: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # parent = elements.parent # => <bookstore> ... </> + # parent.context = {raw: :all} + # elements.size # => 4 + # new_element = elements.add # => </> + # elements.size # => 5 + # new_element.name # => nil + # new_element.parent # => <bookstore> ... </> + # new_element.context # => {:raw=>:all} + # + # With string argument +name+, creates and adds a new element. + # The new element has: + # + # - Name +name+. + # - \Parent from the \Elements object. + # - Context from the that parent. + # + # Example: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # parent = elements.parent # => <bookstore> ... </> + # parent.context = {raw: :all} + # elements.size # => 4 + # new_element = elements.add('foo') # => <foo/> + # elements.size # => 5 + # new_element.name # => "foo" + # new_element.parent # => <bookstore> ... </> + # new_element.context # => {:raw=>:all} + # + # With argument +element+, + # creates and adds a clone of the given +element+. + # The new element has name, parent, and context from the given +element+. + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.size # => 4 + # e0 = REXML::Element.new('foo') + # e1 = REXML::Element.new('bar', e0, {raw: :all}) + # element = elements.add(e1) # => <bar/> + # elements.size # => 5 + # element.name # => "bar" + # element.parent # => <bookstore> ... </> + # element.context # => {:raw=>:all} + # + def add element=nil + if element.nil? + Element.new("", self, @element.context) + elsif not element.kind_of?(Element) + Element.new(element, self, @element.context) + else + @element << element + element.context = @element.context + element + end + end + + alias :<< :add + + # :call-seq: + # each(xpath = nil) {|element| ... } -> self + # + # Iterates over the elements. + # + # With no argument, calls the block with each element: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.each {|element| p element } + # + # Output: + # + # <book category='cooking'> ... </> + # <book category='children'> ... </> + # <book category='web'> ... </> + # <book category='web' cover='paperback'> ... </> + # + # With argument +xpath+, calls the block with each element + # that matches the given +xpath+: + # + # elements.each('//book [@category="web"]') {|element| p element } + # + # Output: + # + # <book category='web'> ... </> + # <book category='web' cover='paperback'> ... </> + # + def each( xpath=nil ) + XPath::each( @element, xpath ) {|e| yield e if e.kind_of? Element } + end + + # :call-seq: + # collect(xpath = nil) {|element| ... } -> array + # + # Iterates over the elements; returns the array of block return values. + # + # With no argument, iterates over all elements: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.collect {|element| element.size } # => [9, 9, 17, 9] + # + # With argument +xpath+, iterates over elements that match + # the given +xpath+: + # + # xpath = '//book [@category="web"]' + # elements.collect(xpath) {|element| element.size } # => [17, 9] + # + def collect( xpath=nil ) + collection = [] + XPath::each( @element, xpath ) {|e| + collection << yield(e) if e.kind_of?(Element) + } + collection + end + + # :call-seq: + # inject(xpath = nil, initial = nil) -> object + # + # Calls the block with elements; returns the last block return value. + # + # With no argument, iterates over the elements, calling the block + # <tt>elements.size - 1</tt> times. + # + # - The first call passes the first and second elements. + # - The second call passes the first block return value and the third element. + # - The third call passes the second block return value and the fourth element. + # - And so on. + # + # In this example, the block returns the passed element, + # which is then the object argument to the next call: + # + # d = REXML::Document.new(xml_string) + # elements = d.root.elements + # elements.inject do |object, element| + # p [elements.index(object), elements.index(element)] + # element + # end + # + # Output: + # + # [1, 2] + # [2, 3] + # [3, 4] + # + # With the single argument +xpath+, calls the block only with + # elements matching that xpath: + # + # elements.inject('//book [@category="web"]') do |object, element| + # p [elements.index(object), elements.index(element)] + # element + # end + # + # Output: + # + # [3, 4] + # + # With argument +xpath+ given as +nil+ + # and argument +initial+ also given, + # calls the block once for each element. + # + # - The first call passes the +initial+ and the first element. + # - The second call passes the first block return value and the second element. + # - The third call passes the second block return value and the third element. + # - And so on. + # + # In this example, the first object index is <tt>-1</tt> + # + # elements.inject(nil, 'Initial') do |object, element| + # p [elements.index(object), elements.index(element)] + # element + # end + # + # Output: + # + # [-1, 1] + # [1, 2] + # [2, 3] + # [3, 4] + # + # In this form the passed object can be used as an accumulator: + # + # elements.inject(nil, 0) do |total, element| + # total += element.size + # end # => 44 + # + # With both arguments +xpath+ and +initial+ are given, + # calls the block only with elements matching that xpath: + # + # elements.inject('//book [@category="web"]', 0) do |total, element| + # total += element.size + # end # => 26 + # + def inject( xpath=nil, initial=nil ) + first = true + XPath::each( @element, xpath ) {|e| + if (e.kind_of? Element) + if (first and initial == nil) + initial = e + first = false + else + initial = yield( initial, e ) if e.kind_of? Element + end + end + } + initial + end + + # :call-seq: + # size -> integer + # + # Returns the count of \Element children: + # + # d = REXML::Document.new '<a>sean<b/>elliott<b/>russell<b/></a>' + # d.root.elements.size # => 3 # Three elements. + # d.root.size # => 6 # Three elements plus three text nodes.. + # + def size + count = 0 + @element.each {|child| count+=1 if child.kind_of? Element } + count + end + + # :call-seq: + # to_a(xpath = nil) -> array_of_elements + # + # Returns an array of element children (not including non-element children). + # + # With no argument, returns an array of all element children: + # + # d = REXML::Document.new '<a>sean<b/>elliott<c/></a>' + # elements = d.root.elements + # elements.to_a # => [<b/>, <c/>] # Omits non-element children. + # children = d.root.children + # children # => ["sean", <b/>, "elliott", <c/>] # Includes non-element children. + # + # With argument +xpath+, returns an array of element children + # that match the xpath: + # + # elements.to_a('//c') # => [<c/>] + # + def to_a( xpath=nil ) + rv = XPath.match( @element, xpath ) + return rv.find_all{|e| e.kind_of? Element} if xpath + rv + end + + private + # Private helper class. Removes quotes from quoted strings + def literalize name + name = name[1..-2] if name[0] == ?' or name[0] == ?" #' + name + end + end + + ######################################################################## + # ATTRIBUTES # + ######################################################################## + + # A class that defines the set of Attributes of an Element and provides + # operations for accessing elements in that set. + class Attributes < Hash + + # :call-seq: + # new(element) + # + # Creates and returns a new \REXML::Attributes object. + # The element given by argument +element+ is stored, + # but its own attributes are not modified: + # + # ele = REXML::Element.new('foo') + # attrs = REXML::Attributes.new(ele) + # attrs.object_id == ele.attributes.object_id # => false + # + # Other instance methods in class \REXML::Attributes may refer to: + # + # - +element.document+. + # - +element.prefix+. + # - +element.expanded_name+. + # + def initialize element + @element = element + end + + # :call-seq: + # [name] -> attribute_value or nil + # + # Returns the value for the attribute given by +name+, + # if it exists; otherwise +nil+. + # The value returned is the unnormalized attribute value, + # with entities expanded: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # ele.attributes['att'] # => "<" + # ele.attributes['bar:att'] # => "2" + # ele.attributes['nosuch'] # => nil + # + # Related: get_attribute (returns an \Attribute object). + # + def [](name) + attr = get_attribute(name) + return attr.value unless attr.nil? + return nil + end + + # :call-seq: + # to_a -> array_of_attribute_objects + # + # Returns an array of \REXML::Attribute objects representing + # the attributes: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes.to_a # => [foo:att='1', bar:att='2', att='<'] + # attrs.first.class # => REXML::Attribute + # + def to_a + enum_for(:each_attribute).to_a + end + + # :call-seq: + # length + # + # Returns the count of attributes: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # ele.attributes.length # => 3 + # + def length + c = 0 + each_attribute { c+=1 } + c + end + alias :size :length + + # :call-seq: + # each_attribute {|attr| ... } + # + # Calls the given block with each \REXML::Attribute object: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # ele.attributes.each_attribute do |attr| + # p [attr.class, attr] + # end + # + # Output: + # + # [REXML::Attribute, foo:att='1'] + # [REXML::Attribute, bar:att='2'] + # [REXML::Attribute, att='<'] + # + def each_attribute # :yields: attribute + return to_enum(__method__) unless block_given? + each_value do |val| + if val.kind_of? Attribute + yield val + else + val.each_value { |atr| yield atr } + end + end + end + + # :call-seq: + # each {|expanded_name, value| ... } + # + # Calls the given block with each expanded-name/value pair: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # ele.attributes.each do |expanded_name, value| + # p [expanded_name, value] + # end + # + # Output: + # + # ["foo:att", "1"] + # ["bar:att", "2"] + # ["att", "<"] + # + def each + return to_enum(__method__) unless block_given? + each_attribute do |attr| + yield [attr.expanded_name, attr.value] + end + end + + # :call-seq: + # get_attribute(name) -> attribute_object or nil + # + # Returns the \REXML::Attribute object for the given +name+: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs.get_attribute('foo:att') # => foo:att='1' + # attrs.get_attribute('foo:att').class # => REXML::Attribute + # attrs.get_attribute('bar:att') # => bar:att='2' + # attrs.get_attribute('att') # => att='<' + # attrs.get_attribute('nosuch') # => nil + # + def get_attribute( name ) + attr = fetch( name, nil ) + if attr.nil? + return nil if name.nil? + # Look for prefix + name =~ Namespace::NAMESPLIT + prefix, n = $1, $2 + if prefix + attr = fetch( n, nil ) + # check prefix + if attr == nil + elsif attr.kind_of? Attribute + return attr if prefix == attr.prefix + else + attr = attr[ prefix ] + return attr + end + end + element_document = @element.document + if element_document and element_document.doctype + expn = @element.expanded_name + expn = element_document.doctype.name if expn.size == 0 + attr_val = element_document.doctype.attribute_of(expn, name) + return Attribute.new( name, attr_val ) if attr_val + end + return nil + end + if attr.kind_of? Hash + attr = attr[ @element.prefix ] + end + return attr + end + + # :call-seq: + # [name] = value -> value + # + # When +value+ is non-+nil+, + # assigns that to the attribute for the given +name+, + # overwriting the previous value if it exists: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs['foo:att'] = '2' # => "2" + # attrs['baz:att'] = '3' # => "3" + # + # When +value+ is +nil+, deletes the attribute if it exists: + # + # attrs['baz:att'] = nil + # attrs.include?('baz:att') # => false + # + def []=( name, value ) + if value.nil? # Delete the named attribute + attr = get_attribute(name) + delete attr + return + end + + unless value.kind_of? Attribute + if @element.document and @element.document.doctype + value = Text::normalize( value, @element.document.doctype ) + else + value = Text::normalize( value, nil ) + end + value = Attribute.new(name, value) + end + value.element = @element + old_attr = fetch(value.name, nil) + if old_attr.nil? + store(value.name, value) + elsif old_attr.kind_of? Hash + old_attr[value.prefix] = value + elsif old_attr.prefix != value.prefix + # Check for conflicting namespaces + if value.prefix != "xmlns" and old_attr.prefix != "xmlns" + old_namespace = old_attr.namespace + new_namespace = value.namespace + if old_namespace == new_namespace + raise ParseException.new( + "Namespace conflict in adding attribute \"#{value.name}\": "+ + "Prefix \"#{old_attr.prefix}\" = \"#{old_namespace}\" and "+ + "prefix \"#{value.prefix}\" = \"#{new_namespace}\"") + end + end + store value.name, {old_attr.prefix => old_attr, + value.prefix => value} + else + store value.name, value + end + return @element + end + + # :call-seq: + # prefixes -> array_of_prefix_strings + # + # Returns an array of prefix strings in the attributes. + # The array does not include the default + # namespace declaration, if one exists. + # + # xml_string = '<a xmlns="foo" xmlns:x="bar" xmlns:y="twee" z="glorp"/>' + # d = REXML::Document.new(xml_string) + # d.root.attributes.prefixes # => ["x", "y"] + # + def prefixes + ns = [] + each_attribute do |attribute| + ns << attribute.name if attribute.prefix == 'xmlns' + end + if @element.document and @element.document.doctype + expn = @element.expanded_name + expn = @element.document.doctype.name if expn.size == 0 + @element.document.doctype.attributes_of(expn).each { + |attribute| + ns << attribute.name if attribute.prefix == 'xmlns' + } + end + ns + end + + # :call-seq: + # namespaces + # + # Returns a hash of name/value pairs for the namespaces: + # + # xml_string = '<a xmlns="foo" xmlns:x="bar" xmlns:y="twee" z="glorp"/>' + # d = REXML::Document.new(xml_string) + # d.root.attributes.namespaces # => {"xmlns"=>"foo", "x"=>"bar", "y"=>"twee"} + # + def namespaces + namespaces = {} + each_attribute do |attribute| + namespaces[attribute.name] = attribute.value if attribute.prefix == 'xmlns' or attribute.name == 'xmlns' + end + if @element.document and @element.document.doctype + expn = @element.expanded_name + expn = @element.document.doctype.name if expn.size == 0 + @element.document.doctype.attributes_of(expn).each { + |attribute| + namespaces[attribute.name] = attribute.value if attribute.prefix == 'xmlns' or attribute.name == 'xmlns' + } + end + namespaces + end + + # :call-seq: + # delete(name) -> element + # delete(attribute) -> element + # + # Removes a specified attribute if it exists; + # returns the attributes' element. + # + # When string argument +name+ is given, + # removes the attribute of that name if it exists: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs.delete('foo:att') # => <ele bar:att='2' att='<'/> + # attrs.delete('foo:att') # => <ele bar:att='2' att='<'/> + # + # When attribute argument +attribute+ is given, + # removes that attribute if it exists: + # + # attr = REXML::Attribute.new('bar:att', '2') + # attrs.delete(attr) # => <ele att='<'/> # => <ele att='<'/> + # attrs.delete(attr) # => <ele att='<'/> # => <ele/> + # + def delete( attribute ) + name = nil + prefix = nil + if attribute.kind_of? Attribute + name = attribute.name + prefix = attribute.prefix + else + attribute =~ Namespace::NAMESPLIT + prefix, name = $1, $2 + prefix = '' unless prefix + end + old = fetch(name, nil) + if old.kind_of? Hash # the supplied attribute is one of many + old.delete(prefix) + if old.size == 1 + repl = nil + old.each_value{|v| repl = v} + store name, repl + end + elsif old.nil? + return @element + else # the supplied attribute is a top-level one + super(name) + end + @element + end + + # :call-seq: + # add(attribute) -> attribute + # + # Adds attribute +attribute+, replacing the previous + # attribute of the same name if it exists; + # returns +attribute+: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs # => {"att"=>{"foo"=>foo:att='1', "bar"=>bar:att='2', ""=>att='<'}} + # attrs.add(REXML::Attribute.new('foo:att', '2')) # => foo:att='2' + # attrs.add(REXML::Attribute.new('baz', '3')) # => baz='3' + # attrs.include?('baz') # => true + # + def add( attribute ) + self[attribute.name] = attribute + end + + alias :<< :add + + # :call-seq: + # delete_all(name) -> array_of_removed_attributes + # + # Removes all attributes matching the given +name+; + # returns an array of the removed attributes: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs.delete_all('att') # => [att='<'] + # + def delete_all( name ) + rv = [] + each_attribute { |attribute| + rv << attribute if attribute.expanded_name == name + } + rv.each{ |attr| attr.remove } + return rv + end + + # :call-seq: + # get_attribute_ns(namespace, name) + # + # Returns the \REXML::Attribute object among the attributes + # that matches the given +namespace+ and +name+: + # + # xml_string = <<-EOT + # <root xmlns:foo="http://foo" xmlns:bar="http://bar"> + # <ele foo:att='1' bar:att='2' att='<'/> + # </root> + # EOT + # d = REXML::Document.new(xml_string) + # ele = d.root.elements['//ele'] # => <a foo:att='1' bar:att='2' att='<'/> + # attrs = ele.attributes + # attrs.get_attribute_ns('http://foo', 'att') # => foo:att='1' + # attrs.get_attribute_ns('http://foo', 'nosuch') # => nil + # + def get_attribute_ns(namespace, name) + result = nil + each_attribute() { |attribute| + if name == attribute.name && + namespace == attribute.namespace() && + ( !namespace.empty? || !attribute.fully_expanded_name.index(':') ) + # foo will match xmlns:foo, but only if foo isn't also an attribute + result = attribute if !result or !namespace.empty? or + !attribute.fully_expanded_name.index(':') + end + } + result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/encoding.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/encoding.rb new file mode 100644 index 0000000..da2d70d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/encoding.rb @@ -0,0 +1,51 @@ +# coding: US-ASCII +# frozen_string_literal: false +module REXML + module Encoding + # ID ---> Encoding name + attr_reader :encoding + def encoding=(encoding) + encoding = encoding.name if encoding.is_a?(Encoding) + if encoding.is_a?(String) + original_encoding = encoding + encoding = find_encoding(encoding) + unless encoding + raise ArgumentError, "Bad encoding name #{original_encoding}" + end + end + return false if defined?(@encoding) and encoding == @encoding + if encoding + @encoding = encoding.upcase + else + @encoding = 'UTF-8' + end + true + end + + def encode(string) + string.encode(@encoding) + end + + def decode(string) + string.encode(::Encoding::UTF_8, @encoding) + end + + private + def find_encoding(name) + case name + when /\Ashift-jis\z/i + return "SHIFT_JIS" + when /\ACP-(\d+)\z/ + name = "CP#{$1}" + when /\AUTF-8\z/i + return name + end + begin + ::Encoding::Converter.search_convpath(name, 'UTF-8') + rescue ::Encoding::ConverterNotFoundError + return nil + end + name + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/entity.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/entity.rb new file mode 100644 index 0000000..89a9e84 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/entity.rb @@ -0,0 +1,171 @@ +# frozen_string_literal: false +require_relative 'child' +require_relative 'source' +require_relative 'xmltokens' + +module REXML + class Entity < Child + include XMLTokens + PUBIDCHAR = "\x20\x0D\x0Aa-zA-Z0-9\\-()+,./:=?;!*@$_%#" + SYSTEMLITERAL = %Q{((?:"[^"]*")|(?:'[^']*'))} + PUBIDLITERAL = %Q{("[#{PUBIDCHAR}']*"|'[#{PUBIDCHAR}]*')} + EXTERNALID = "(?:(?:(SYSTEM)\\s+#{SYSTEMLITERAL})|(?:(PUBLIC)\\s+#{PUBIDLITERAL}\\s+#{SYSTEMLITERAL}))" + NDATADECL = "\\s+NDATA\\s+#{NAME}" + PEREFERENCE = "%#{NAME};" + ENTITYVALUE = %Q{((?:"(?:[^%&"]|#{PEREFERENCE}|#{REFERENCE})*")|(?:'([^%&']|#{PEREFERENCE}|#{REFERENCE})*'))} + PEDEF = "(?:#{ENTITYVALUE}|#{EXTERNALID})" + ENTITYDEF = "(?:#{ENTITYVALUE}|(?:#{EXTERNALID}(#{NDATADECL})?))" + PEDECL = "<!ENTITY\\s+(%)\\s+#{NAME}\\s+#{PEDEF}\\s*>" + GEDECL = "<!ENTITY\\s+#{NAME}\\s+#{ENTITYDEF}\\s*>" + ENTITYDECL = /\s*(?:#{GEDECL})|(?:#{PEDECL})/um + + attr_reader :name, :external, :ref, :ndata, :pubid + + # Create a new entity. Simple entities can be constructed by passing a + # name, value to the constructor; this creates a generic, plain entity + # reference. For anything more complicated, you have to pass a Source to + # the constructor with the entity definition, or use the accessor methods. + # +WARNING+: There is no validation of entity state except when the entity + # is read from a stream. If you start poking around with the accessors, + # you can easily create a non-conformant Entity. + # + # e = Entity.new( 'amp', '&' ) + def initialize stream, value=nil, parent=nil, reference=false + super(parent) + @ndata = @pubid = @value = @external = nil + if stream.kind_of? Array + @name = stream[1] + if stream[-1] == '%' + @reference = true + stream.pop + else + @reference = false + end + if stream[2] =~ /SYSTEM|PUBLIC/ + @external = stream[2] + if @external == 'SYSTEM' + @ref = stream[3] + @ndata = stream[4] if stream.size == 5 + else + @pubid = stream[3] + @ref = stream[4] + end + else + @value = stream[2] + end + else + @reference = reference + @external = nil + @name = stream + @value = value + end + end + + # Evaluates whether the given string matches an entity definition, + # returning true if so, and false otherwise. + def Entity::matches? string + (ENTITYDECL =~ string) == 0 + end + + # Evaluates to the unnormalized value of this entity; that is, replacing + # all entities -- both %ent; and &ent; entities. This differs from + # +value()+ in that +value+ only replaces %ent; entities. + def unnormalized + document.record_entity_expansion unless document.nil? + v = value() + return nil if v.nil? + @unnormalized = Text::unnormalize(v, parent) + @unnormalized + end + + #once :unnormalized + + # Returns the value of this entity unprocessed -- raw. This is the + # normalized value; that is, with all %ent; and &ent; entities intact + def normalized + @value + end + + # Write out a fully formed, correct entity definition (assuming the Entity + # object itself is valid.) + # + # out:: + # An object implementing <TT><<</TT> to which the entity will be + # output + # indent:: + # *DEPRECATED* and ignored + def write out, indent=-1 + out << '<!ENTITY ' + out << '% ' if @reference + out << @name + out << ' ' + if @external + out << @external << ' ' + if @pubid + q = @pubid.include?('"')?"'":'"' + out << q << @pubid << q << ' ' + end + q = @ref.include?('"')?"'":'"' + out << q << @ref << q + out << ' NDATA ' << @ndata if @ndata + else + q = @value.include?('"')?"'":'"' + out << q << @value << q + end + out << '>' + end + + # Returns this entity as a string. See write(). + def to_s + rv = '' + write rv + rv + end + + PEREFERENCE_RE = /#{PEREFERENCE}/um + # Returns the value of this entity. At the moment, only internal entities + # are processed. If the value contains internal references (IE, + # %blah;), those are replaced with their values. IE, if the doctype + # contains: + # <!ENTITY % foo "bar"> + # <!ENTITY yada "nanoo %foo; nanoo> + # then: + # doctype.entity('yada').value #-> "nanoo bar nanoo" + def value + if @value + matches = @value.scan(PEREFERENCE_RE) + rv = @value.clone + if @parent + sum = 0 + matches.each do |entity_reference| + entity_value = @parent.entity( entity_reference[0] ) + if sum + entity_value.bytesize > Security.entity_expansion_text_limit + raise "entity expansion has grown too large" + else + sum += entity_value.bytesize + end + rv.gsub!( /%#{entity_reference.join};/um, entity_value ) + end + end + return rv + end + nil + end + end + + # This is a set of entity constants -- the ones defined in the XML + # specification. These are +gt+, +lt+, +amp+, +quot+ and +apos+. + # CAUTION: these entities does not have parent and document + module EntityConst + # +>+ + GT = Entity.new( 'gt', '>' ) + # +<+ + LT = Entity.new( 'lt', '<' ) + # +&+ + AMP = Entity.new( 'amp', '&' ) + # +"+ + QUOT = Entity.new( 'quot', '"' ) + # +'+ + APOS = Entity.new( 'apos', "'" ) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/default.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/default.rb new file mode 100644 index 0000000..811b2ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/default.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: false + +module REXML + module Formatters + class Default + # Prints out the XML document with no formatting -- except if ie_hack is + # set. + # + # ie_hack:: + # If set to true, then inserts whitespace before the close of an empty + # tag, so that IE's bad XML parser doesn't choke. + def initialize( ie_hack=false ) + @ie_hack = ie_hack + end + + # Writes the node to some output. + # + # node:: + # The node to write + # output:: + # A class implementing <TT><<</TT>. Pass in an Output object to + # change the output encoding. + def write( node, output ) + case node + + when Document + if node.xml_decl.encoding != 'UTF-8' && !output.kind_of?(Output) + output = Output.new( output, node.xml_decl.encoding ) + end + write_document( node, output ) + + when Element + write_element( node, output ) + + when Declaration, ElementDecl, NotationDecl, ExternalEntity, Entity, + Attribute, AttlistDecl + node.write( output,-1 ) + + when Instruction + write_instruction( node, output ) + + when DocType, XMLDecl + node.write( output ) + + when Comment + write_comment( node, output ) + + when CData + write_cdata( node, output ) + + when Text + write_text( node, output ) + + else + raise Exception.new("XML FORMATTING ERROR") + + end + end + + protected + def write_document( node, output ) + node.children.each { |child| write( child, output ) } + end + + def write_element( node, output ) + output << "<#{node.expanded_name}" + + node.attributes.to_a.map { |a| + Hash === a ? a.values : a + }.flatten.sort_by {|attr| attr.name}.each do |attr| + output << " " + attr.write( output ) + end unless node.attributes.empty? + + if node.children.empty? + output << " " if @ie_hack + output << "/" + else + output << ">" + node.children.each { |child| + write( child, output ) + } + output << "</#{node.expanded_name}" + end + output << ">" + end + + def write_text( node, output ) + output << node.to_s() + end + + def write_comment( node, output ) + output << Comment::START + output << node.to_s + output << Comment::STOP + end + + def write_cdata( node, output ) + output << CData::START + output << node.to_s + output << CData::STOP + end + + def write_instruction( node, output ) + output << Instruction::START + output << node.target + content = node.content + if content + output << ' ' + output << content + end + output << Instruction::STOP + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/pretty.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/pretty.rb new file mode 100644 index 0000000..562ef94 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/pretty.rb @@ -0,0 +1,142 @@ +# frozen_string_literal: false +require_relative 'default' + +module REXML + module Formatters + # Pretty-prints an XML document. This destroys whitespace in text nodes + # and will insert carriage returns and indentations. + # + # TODO: Add an option to print attributes on new lines + class Pretty < Default + + # If compact is set to true, then the formatter will attempt to use as + # little space as possible + attr_accessor :compact + # The width of a page. Used for formatting text + attr_accessor :width + + # Create a new pretty printer. + # + # output:: + # An object implementing '<<(String)', to which the output will be written. + # indentation:: + # An integer greater than 0. The indentation of each level will be + # this number of spaces. If this is < 1, the behavior of this object + # is undefined. Defaults to 2. + # ie_hack:: + # If true, the printer will insert whitespace before closing empty + # tags, thereby allowing Internet Explorer's XML parser to + # function. Defaults to false. + def initialize( indentation=2, ie_hack=false ) + @indentation = indentation + @level = 0 + @ie_hack = ie_hack + @width = 80 + @compact = false + end + + protected + def write_element(node, output) + output << ' '*@level + output << "<#{node.expanded_name}" + + node.attributes.each_attribute do |attr| + output << " " + attr.write( output ) + end unless node.attributes.empty? + + if node.children.empty? + if @ie_hack + output << " " + end + output << "/" + else + output << ">" + # If compact and all children are text, and if the formatted output + # is less than the specified width, then try to print everything on + # one line + skip = false + if compact + if node.children.inject(true) {|s,c| s & c.kind_of?(Text)} + string = "" + old_level = @level + @level = 0 + node.children.each { |child| write( child, string ) } + @level = old_level + if string.length < @width + output << string + skip = true + end + end + end + unless skip + output << "\n" + @level += @indentation + node.children.each { |child| + next if child.kind_of?(Text) and child.to_s.strip.length == 0 + write( child, output ) + output << "\n" + } + @level -= @indentation + output << ' '*@level + end + output << "</#{node.expanded_name}" + end + output << ">" + end + + def write_text( node, output ) + s = node.to_s() + s.gsub!(/\s/,' ') + s.squeeze!(" ") + s = wrap(s, @width - @level) + s = indent_text(s, @level, " ", true) + output << (' '*@level + s) + end + + def write_comment( node, output) + output << ' ' * @level + super + end + + def write_cdata( node, output) + output << ' ' * @level + super + end + + def write_document( node, output ) + # Ok, this is a bit odd. All XML documents have an XML declaration, + # but it may not write itself if the user didn't specifically add it, + # either through the API or in the input document. If it doesn't write + # itself, then we don't need a carriage return... which makes this + # logic more complex. + node.children.each { |child| + next if child == node.children[-1] and child.instance_of?(Text) + unless child == node.children[0] or child.instance_of?(Text) or + (child == node.children[1] and !node.children[0].writethis) + output << "\n" + end + write( child, output ) + } + end + + private + def indent_text(string, level=1, style="\t", indentfirstline=true) + return string if level < 0 + string.gsub(/\n/, "\n#{style*level}") + end + + def wrap(string, width) + parts = [] + while string.length > width and place = string.rindex(' ', width) + parts << string[0...place] + string = string[place+1..-1] + end + parts << string + parts.join("\n") + end + + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/transitive.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/transitive.rb new file mode 100644 index 0000000..5ff51e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/formatters/transitive.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: false +require_relative 'pretty' + +module REXML + module Formatters + # The Transitive formatter writes an XML document that parses to an + # identical document as the source document. This means that no extra + # whitespace nodes are inserted, and whitespace within text nodes is + # preserved. Within these constraints, the document is pretty-printed, + # with whitespace inserted into the metadata to introduce formatting. + # + # Note that this is only useful if the original XML is not already + # formatted. Since this formatter does not alter whitespace nodes, the + # results of formatting already formatted XML will be odd. + class Transitive < Default + def initialize( indentation=2, ie_hack=false ) + @indentation = indentation + @level = 0 + @ie_hack = ie_hack + end + + protected + def write_element( node, output ) + output << "<#{node.expanded_name}" + + node.attributes.each_attribute do |attr| + output << " " + attr.write( output ) + end unless node.attributes.empty? + + output << "\n" + output << ' '*@level + if node.children.empty? + output << " " if @ie_hack + output << "/" + else + output << ">" + # If compact and all children are text, and if the formatted output + # is less than the specified width, then try to print everything on + # one line + @level += @indentation + node.children.each { |child| + write( child, output ) + } + @level -= @indentation + output << "</#{node.expanded_name}" + output << "\n" + output << ' '*@level + end + output << ">" + end + + def write_text( node, output ) + output << node.to_s() + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/functions.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/functions.rb new file mode 100644 index 0000000..77926bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/functions.rb @@ -0,0 +1,447 @@ +# frozen_string_literal: false +module REXML + # If you add a method, keep in mind two things: + # (1) the first argument will always be a list of nodes from which to + # filter. In the case of context methods (such as position), the function + # should return an array with a value for each child in the array. + # (2) all method calls from XML will have "-" replaced with "_". + # Therefore, in XML, "local-name()" is identical (and actually becomes) + # "local_name()" + module Functions + @@available_functions = {} + @@context = nil + @@namespace_context = {} + @@variables = {} + + INTERNAL_METHODS = [ + :namespace_context, + :namespace_context=, + :variables, + :variables=, + :context=, + :get_namespace, + :send, + ] + class << self + def singleton_method_added(name) + unless INTERNAL_METHODS.include?(name) + @@available_functions[name] = true + end + end + end + + def Functions::namespace_context=(x) ; @@namespace_context=x ; end + def Functions::variables=(x) ; @@variables=x ; end + def Functions::namespace_context ; @@namespace_context ; end + def Functions::variables ; @@variables ; end + + def Functions::context=(value); @@context = value; end + + def Functions::text( ) + if @@context[:node].node_type == :element + return @@context[:node].find_all{|n| n.node_type == :text}.collect{|n| n.value} + elsif @@context[:node].node_type == :text + return @@context[:node].value + else + return false + end + end + + # Returns the last node of the given list of nodes. + def Functions::last( ) + @@context[:size] + end + + def Functions::position( ) + @@context[:index] + end + + # Returns the size of the given list of nodes. + def Functions::count( node_set ) + node_set.size + end + + # Since REXML is non-validating, this method is not implemented as it + # requires a DTD + def Functions::id( object ) + end + + def Functions::local_name(node_set=nil) + get_namespace(node_set) do |node| + return node.local_name + end + "" + end + + def Functions::namespace_uri( node_set=nil ) + get_namespace( node_set ) {|node| node.namespace} + end + + def Functions::name( node_set=nil ) + get_namespace( node_set ) do |node| + node.expanded_name + end + end + + # Helper method. + def Functions::get_namespace( node_set = nil ) + if node_set == nil + yield @@context[:node] if @@context[:node].respond_to?(:namespace) + else + if node_set.respond_to? :each + result = [] + node_set.each do |node| + result << yield(node) if node.respond_to?(:namespace) + end + result + elsif node_set.respond_to? :namespace + yield node_set + end + end + end + + # A node-set is converted to a string by returning the string-value of the + # node in the node-set that is first in document order. If the node-set is + # empty, an empty string is returned. + # + # A number is converted to a string as follows + # + # NaN is converted to the string NaN + # + # positive zero is converted to the string 0 + # + # negative zero is converted to the string 0 + # + # positive infinity is converted to the string Infinity + # + # negative infinity is converted to the string -Infinity + # + # if the number is an integer, the number is represented in decimal form + # as a Number with no decimal point and no leading zeros, preceded by a + # minus sign (-) if the number is negative + # + # otherwise, the number is represented in decimal form as a Number + # including a decimal point with at least one digit before the decimal + # point and at least one digit after the decimal point, preceded by a + # minus sign (-) if the number is negative; there must be no leading zeros + # before the decimal point apart possibly from the one required digit + # immediately before the decimal point; beyond the one required digit + # after the decimal point there must be as many, but only as many, more + # digits as are needed to uniquely distinguish the number from all other + # IEEE 754 numeric values. + # + # The boolean false value is converted to the string false. The boolean + # true value is converted to the string true. + # + # An object of a type other than the four basic types is converted to a + # string in a way that is dependent on that type. + def Functions::string( object=@@context[:node] ) + if object.respond_to?(:node_type) + case object.node_type + when :attribute + object.value + when :element + string_value(object) + when :document + string_value(object.root) + when :processing_instruction + object.content + else + object.to_s + end + else + case object + when Array + string(object[0]) + when Float + if object.nan? + "NaN" + else + integer = object.to_i + if object == integer + "%d" % integer + else + object.to_s + end + end + else + object.to_s + end + end + end + + # A node-set is converted to a string by + # returning the concatenation of the string-value + # of each of the children of the node in the + # node-set that is first in document order. + # If the node-set is empty, an empty string is returned. + def Functions::string_value( o ) + rv = "" + o.children.each { |e| + if e.node_type == :text + rv << e.to_s + elsif e.node_type == :element + rv << string_value( e ) + end + } + rv + end + + def Functions::concat( *objects ) + concatenated = "" + objects.each do |object| + concatenated << string(object) + end + concatenated + end + + # Fixed by Mike Stok + def Functions::starts_with( string, test ) + string(string).index(string(test)) == 0 + end + + # Fixed by Mike Stok + def Functions::contains( string, test ) + string(string).include?(string(test)) + end + + # Kouhei fixed this + def Functions::substring_before( string, test ) + ruby_string = string(string) + ruby_index = ruby_string.index(string(test)) + if ruby_index.nil? + "" + else + ruby_string[ 0...ruby_index ] + end + end + + # Kouhei fixed this too + def Functions::substring_after( string, test ) + ruby_string = string(string) + return $1 if ruby_string =~ /#{test}(.*)/ + "" + end + + # Take equal portions of Mike Stok and Sean Russell; mix + # vigorously, and pour into a tall, chilled glass. Serves 10,000. + def Functions::substring( string, start, length=nil ) + ruby_string = string(string) + ruby_length = if length.nil? + ruby_string.length.to_f + else + number(length) + end + ruby_start = number(start) + + # Handle the special cases + return '' if ( + ruby_length.nan? or + ruby_start.nan? or + ruby_start.infinite? + ) + + infinite_length = ruby_length.infinite? == 1 + ruby_length = ruby_string.length if infinite_length + + # Now, get the bounds. The XPath bounds are 1..length; the ruby bounds + # are 0..length. Therefore, we have to offset the bounds by one. + ruby_start = round(ruby_start) - 1 + ruby_length = round(ruby_length) + + if ruby_start < 0 + ruby_length += ruby_start unless infinite_length + ruby_start = 0 + end + return '' if ruby_length <= 0 + ruby_string[ruby_start,ruby_length] + end + + # UNTESTED + def Functions::string_length( string ) + string(string).length + end + + # UNTESTED + def Functions::normalize_space( string=nil ) + string = string(@@context[:node]) if string.nil? + if string.kind_of? Array + string.collect{|x| string.to_s.strip.gsub(/\s+/um, ' ') if string} + else + string.to_s.strip.gsub(/\s+/um, ' ') + end + end + + # This is entirely Mike Stok's beast + def Functions::translate( string, tr1, tr2 ) + from = string(tr1) + to = string(tr2) + + # the map is our translation table. + # + # if a character occurs more than once in the + # from string then we ignore the second & + # subsequent mappings + # + # if a character maps to nil then we delete it + # in the output. This happens if the from + # string is longer than the to string + # + # there's nothing about - or ^ being special in + # http://www.w3.org/TR/xpath#function-translate + # so we don't build ranges or negated classes + + map = Hash.new + 0.upto(from.length - 1) { |pos| + from_char = from[pos] + unless map.has_key? from_char + map[from_char] = + if pos < to.length + to[pos] + else + nil + end + end + } + + if ''.respond_to? :chars + string(string).chars.collect { |c| + if map.has_key? c then map[c] else c end + }.compact.join + else + string(string).unpack('U*').collect { |c| + if map.has_key? c then map[c] else c end + }.compact.pack('U*') + end + end + + def Functions::boolean(object=@@context[:node]) + case object + when true, false + object + when Float + return false if object.zero? + return false if object.nan? + true + when Numeric + not object.zero? + when String + not object.empty? + when Array + not object.empty? + else + object ? true : false + end + end + + # UNTESTED + def Functions::not( object ) + not boolean( object ) + end + + # UNTESTED + def Functions::true( ) + true + end + + # UNTESTED + def Functions::false( ) + false + end + + # UNTESTED + def Functions::lang( language ) + lang = false + node = @@context[:node] + attr = nil + until node.nil? + if node.node_type == :element + attr = node.attributes["xml:lang"] + unless attr.nil? + lang = compare_language(string(language), attr) + break + else + end + end + node = node.parent + end + lang + end + + def Functions::compare_language lang1, lang2 + lang2.downcase.index(lang1.downcase) == 0 + end + + # a string that consists of optional whitespace followed by an optional + # minus sign followed by a Number followed by whitespace is converted to + # the IEEE 754 number that is nearest (according to the IEEE 754 + # round-to-nearest rule) to the mathematical value represented by the + # string; any other string is converted to NaN + # + # boolean true is converted to 1; boolean false is converted to 0 + # + # a node-set is first converted to a string as if by a call to the string + # function and then converted in the same way as a string argument + # + # an object of a type other than the four basic types is converted to a + # number in a way that is dependent on that type + def Functions::number(object=@@context[:node]) + case object + when true + Float(1) + when false + Float(0) + when Array + number(string(object)) + when Numeric + object.to_f + else + str = string(object) + case str.strip + when /\A\s*(-?(?:\d+(?:\.\d*)?|\.\d+))\s*\z/ + $1.to_f + else + Float::NAN + end + end + end + + def Functions::sum( nodes ) + nodes = [nodes] unless nodes.kind_of? Array + nodes.inject(0) { |r,n| r + number(string(n)) } + end + + def Functions::floor( number ) + number(number).floor + end + + def Functions::ceiling( number ) + number(number).ceil + end + + def Functions::round( number ) + number = number(number) + begin + neg = number.negative? + number = number.abs.round + neg ? -number : number + rescue FloatDomainError + number + end + end + + def Functions::processing_instruction( node ) + node.node_type == :processing_instruction + end + + def Functions::send(name, *args) + if @@available_functions[name.to_sym] + super + else + # TODO: Maybe, this is not XPath spec behavior. + # This behavior must be reconsidered. + XPath.match(@@context[:node], name.to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/instruction.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/instruction.rb new file mode 100644 index 0000000..318741f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/instruction.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: false + +require_relative "child" +require_relative "source" + +module REXML + # Represents an XML Instruction; IE, <? ... ?> + # TODO: Add parent arg (3rd arg) to constructor + class Instruction < Child + START = "<?" + STOP = "?>" + + # target is the "name" of the Instruction; IE, the "tag" in <?tag ...?> + # content is everything else. + attr_accessor :target, :content + + # Constructs a new Instruction + # @param target can be one of a number of things. If String, then + # the target of this instruction is set to this. If an Instruction, + # then the Instruction is shallowly cloned (target and content are + # copied). + # @param content Must be either a String, or a Parent. Can only + # be a Parent if the target argument is a Source. Otherwise, this + # String is set as the content of this instruction. + def initialize(target, content=nil) + case target + when String + super() + @target = target + @content = content + when Instruction + super(content) + @target = target.target + @content = target.content + else + message = + "processing instruction target must be String or REXML::Instruction: " + message << "<#{target.inspect}>" + raise ArgumentError, message + end + @content.strip! if @content + end + + def clone + Instruction.new self + end + + # == DEPRECATED + # See the rexml/formatters package + # + def write writer, indent=-1, transitive=false, ie_hack=false + Kernel.warn( "#{self.class.name}.write is deprecated", uplevel: 1) + indent(writer, indent) + writer << START + writer << @target + if @content + writer << ' ' + writer << @content + end + writer << STOP + end + + # @return true if other is an Instruction, and the content and target + # of the other matches the target and content of this object. + def ==( other ) + other.kind_of? Instruction and + other.target == @target and + other.content == @content + end + + def node_type + :processing_instruction + end + + def inspect + "<?p-i #{target} ...?>" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/light/node.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/light/node.rb new file mode 100644 index 0000000..3dab885 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/light/node.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: false +require_relative '../xmltokens' + +module REXML + module Light + # Represents a tagged XML element. Elements are characterized by + # having children, attributes, and names, and can themselves be + # children. + class Node + NAMESPLIT = /^(?:(#{XMLTokens::NCNAME_STR}):)?(#{XMLTokens::NCNAME_STR})/u + PARENTS = [ :element, :document, :doctype ] + # Create a new element. + def initialize node=nil + @node = node + if node.kind_of? String + node = [ :text, node ] + elsif node.nil? + node = [ :document, nil, nil ] + elsif node[0] == :start_element + node[0] = :element + elsif node[0] == :start_doctype + node[0] = :doctype + elsif node[0] == :start_document + node[0] = :document + end + end + + def size + if PARENTS.include? @node[0] + @node[-1].size + else + 0 + end + end + + def each + size.times { |x| yield( at(x+4) ) } + end + + def name + at(2) + end + + def name=( name_str, ns=nil ) + pfx = '' + pfx = "#{prefix(ns)}:" if ns + _old_put(2, "#{pfx}#{name_str}") + end + + def parent=( node ) + _old_put(1,node) + end + + def local_name + namesplit + @name + end + + def local_name=( name_str ) + _old_put( 1, "#@prefix:#{name_str}" ) + end + + def prefix( namespace=nil ) + prefix_of( self, namespace ) + end + + def namespace( prefix=prefix() ) + namespace_of( self, prefix ) + end + + def namespace=( namespace ) + @prefix = prefix( namespace ) + pfx = '' + pfx = "#@prefix:" if @prefix.size > 0 + _old_put(1, "#{pfx}#@name") + end + + def []( reference, ns=nil ) + if reference.kind_of? String + pfx = '' + pfx = "#{prefix(ns)}:" if ns + at(3)["#{pfx}#{reference}"] + elsif reference.kind_of? Range + _old_get( Range.new(4+reference.begin, reference.end, reference.exclude_end?) ) + else + _old_get( 4+reference ) + end + end + + def =~( path ) + XPath.match( self, path ) + end + + # Doesn't handle namespaces yet + def []=( reference, ns, value=nil ) + if reference.kind_of? String + value = ns unless value + at( 3 )[reference] = value + elsif reference.kind_of? Range + _old_put( Range.new(3+reference.begin, reference.end, reference.exclude_end?), ns ) + else + if value + _old_put( 4+reference, ns, value ) + else + _old_put( 4+reference, ns ) + end + end + end + + # Append a child to this element, optionally under a provided namespace. + # The namespace argument is ignored if the element argument is an Element + # object. Otherwise, the element argument is a string, the namespace (if + # provided) is the namespace the element is created in. + def << element + if node_type() == :text + at(-1) << element + else + newnode = Node.new( element ) + newnode.parent = self + self.push( newnode ) + end + at(-1) + end + + def node_type + _old_get(0) + end + + def text=( foo ) + replace = at(4).kind_of?(String)? 1 : 0 + self._old_put(4,replace, normalizefoo) + end + + def root + context = self + context = context.at(1) while context.at(1) + end + + def has_name?( name, namespace = '' ) + at(3) == name and namespace() == namespace + end + + def children + self + end + + def parent + at(1) + end + + def to_s + + end + + private + + def namesplit + return if @name.defined? + at(2) =~ NAMESPLIT + @prefix = '' || $1 + @name = $2 + end + + def namespace_of( node, prefix=nil ) + if not prefix + name = at(2) + name =~ NAMESPLIT + prefix = $1 + end + to_find = 'xmlns' + to_find = "xmlns:#{prefix}" if not prefix.nil? + ns = at(3)[ to_find ] + ns ? ns : namespace_of( @node[0], prefix ) + end + + def prefix_of( node, namespace=nil ) + if not namespace + name = node.name + name =~ NAMESPLIT + $1 + else + ns = at(3).find { |k,v| v == namespace } + ns ? ns : prefix_of( node.parent, namespace ) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/namespace.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/namespace.rb new file mode 100644 index 0000000..924edf9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/namespace.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: false + +require_relative 'xmltokens' + +module REXML + # Adds named attributes to an object. + module Namespace + # The name of the object, valid if set + attr_reader :name, :expanded_name + # The expanded name of the object, valid if name is set + attr_accessor :prefix + include XMLTokens + NAMESPLIT = /^(?:(#{NCNAME_STR}):)?(#{NCNAME_STR})/u + + # Sets the name and the expanded name + def name=( name ) + @expanded_name = name + case name + when NAMESPLIT + if $1 + @prefix = $1 + else + @prefix = "" + @namespace = "" + end + @name = $2 + when "" + @prefix = nil + @namespace = nil + @name = nil + else + message = "name must be \#{PREFIX}:\#{LOCAL_NAME} or \#{LOCAL_NAME}: " + message += "<#{name.inspect}>" + raise ArgumentError, message + end + end + + # Compares names optionally WITH namespaces + def has_name?( other, ns=nil ) + if ns + return (namespace() == ns and name() == other) + elsif other.include? ":" + return fully_expanded_name == other + else + return name == other + end + end + + alias :local_name :name + + # Fully expand the name, even if the prefix wasn't specified in the + # source file. + def fully_expanded_name + ns = prefix + return "#{ns}:#@name" if ns.size > 0 + return @name + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/node.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/node.rb new file mode 100644 index 0000000..081caba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/node.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: false +require_relative "parseexception" +require_relative "formatters/pretty" +require_relative "formatters/default" + +module REXML + # Represents a node in the tree. Nodes are never encountered except as + # superclasses of other objects. Nodes have siblings. + module Node + # @return the next sibling (nil if unset) + def next_sibling_node + return nil if @parent.nil? + @parent[ @parent.index(self) + 1 ] + end + + # @return the previous sibling (nil if unset) + def previous_sibling_node + return nil if @parent.nil? + ind = @parent.index(self) + return nil if ind == 0 + @parent[ ind - 1 ] + end + + # indent:: + # *DEPRECATED* This parameter is now ignored. See the formatters in the + # REXML::Formatters package for changing the output style. + def to_s indent=nil + unless indent.nil? + Kernel.warn( "#{self.class.name}.to_s(indent) parameter is deprecated", uplevel: 1) + f = REXML::Formatters::Pretty.new( indent ) + f.write( self, rv = "" ) + else + f = REXML::Formatters::Default.new + f.write( self, rv = "" ) + end + return rv + end + + def indent to, ind + if @parent and @parent.context and not @parent.context[:indentstyle].nil? then + indentstyle = @parent.context[:indentstyle] + else + indentstyle = ' ' + end + to << indentstyle*ind unless ind<1 + end + + def parent? + false; + end + + + # Visit all subnodes of +self+ recursively + def each_recursive(&block) # :yields: node + self.elements.each {|node| + block.call(node) + node.each_recursive(&block) + } + end + + # Find (and return) first subnode (recursively) for which the block + # evaluates to true. Returns +nil+ if none was found. + def find_first_recursive(&block) # :yields: node + each_recursive {|node| + return node if block.call(node) + } + return nil + end + + # Returns the position that +self+ holds in its parent's array, indexed + # from 1. + def index_in_parent + parent.index(self)+1 + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/output.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/output.rb new file mode 100644 index 0000000..88a5fb3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/output.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: false +require_relative 'encoding' + +module REXML + class Output + include Encoding + + attr_reader :encoding + + def initialize real_IO, encd="iso-8859-1" + @output = real_IO + self.encoding = encd + + @to_utf = encoding != 'UTF-8' + + if encoding == "UTF-16" + @output << "\ufeff".encode("UTF-16BE") + self.encoding = "UTF-16BE" + end + end + + def <<( content ) + @output << (@to_utf ? self.encode(content) : content) + end + + def to_s + "Output[#{encoding}]" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parent.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parent.rb new file mode 100644 index 0000000..6a53b37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parent.rb @@ -0,0 +1,166 @@ +# frozen_string_literal: false +require_relative "child" + +module REXML + # A parent has children, and has methods for accessing them. The Parent + # class is never encountered except as the superclass for some other + # object. + class Parent < Child + include Enumerable + + # Constructor + # @param parent if supplied, will be set as the parent of this object + def initialize parent=nil + super(parent) + @children = [] + end + + def add( object ) + object.parent = self + @children << object + object + end + + alias :push :add + alias :<< :push + + def unshift( object ) + object.parent = self + @children.unshift object + end + + def delete( object ) + found = false + @children.delete_if {|c| c.equal?(object) and found = true } + object.parent = nil if found + found ? object : nil + end + + def each(&block) + @children.each(&block) + end + + def delete_if( &block ) + @children.delete_if(&block) + end + + def delete_at( index ) + @children.delete_at index + end + + def each_index( &block ) + @children.each_index(&block) + end + + # Fetches a child at a given index + # @param index the Integer index of the child to fetch + def []( index ) + @children[index] + end + + alias :each_child :each + + + + # Set an index entry. See Array.[]= + # @param index the index of the element to set + # @param opt either the object to set, or an Integer length + # @param child if opt is an Integer, this is the child to set + # @return the parent (self) + def []=( *args ) + args[-1].parent = self + @children[*args[0..-2]] = args[-1] + end + + # Inserts an child before another child + # @param child1 this is either an xpath or an Element. If an Element, + # child2 will be inserted before child1 in the child list of the parent. + # If an xpath, child2 will be inserted before the first child to match + # the xpath. + # @param child2 the child to insert + # @return the parent (self) + def insert_before( child1, child2 ) + if child1.kind_of? String + child1 = XPath.first( self, child1 ) + child1.parent.insert_before child1, child2 + else + ind = index(child1) + child2.parent.delete(child2) if child2.parent + @children[ind,0] = child2 + child2.parent = self + end + self + end + + # Inserts an child after another child + # @param child1 this is either an xpath or an Element. If an Element, + # child2 will be inserted after child1 in the child list of the parent. + # If an xpath, child2 will be inserted after the first child to match + # the xpath. + # @param child2 the child to insert + # @return the parent (self) + def insert_after( child1, child2 ) + if child1.kind_of? String + child1 = XPath.first( self, child1 ) + child1.parent.insert_after child1, child2 + else + ind = index(child1)+1 + child2.parent.delete(child2) if child2.parent + @children[ind,0] = child2 + child2.parent = self + end + self + end + + def to_a + @children.dup + end + + # Fetches the index of a given child + # @param child the child to get the index of + # @return the index of the child, or nil if the object is not a child + # of this parent. + def index( child ) + count = -1 + @children.find { |i| count += 1 ; i.hash == child.hash } + count + end + + # @return the number of children of this parent + def size + @children.size + end + + alias :length :size + + # Replaces one child with another, making sure the nodelist is correct + # @param to_replace the child to replace (must be a Child) + # @param replacement the child to insert into the nodelist (must be a + # Child) + def replace_child( to_replace, replacement ) + @children.map! {|c| c.equal?( to_replace ) ? replacement : c } + to_replace.parent = nil + replacement.parent = self + end + + # Deeply clones this object. This creates a complete duplicate of this + # Parent, including all descendants. + def deep_clone + cl = clone() + each do |child| + if child.kind_of? Parent + cl << child.deep_clone + else + cl << child.clone + end + end + cl + end + + alias :children :to_a + + def parent? + true + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parseexception.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parseexception.rb new file mode 100644 index 0000000..7b16cd1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parseexception.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: false +module REXML + class ParseException < RuntimeError + attr_accessor :source, :parser, :continued_exception + + def initialize( message, source=nil, parser=nil, exception=nil ) + super(message) + @source = source + @parser = parser + @continued_exception = exception + end + + def to_s + # Quote the original exception, if there was one + if @continued_exception + err = @continued_exception.inspect + err << "\n" + err << @continued_exception.backtrace.join("\n") + err << "\n...\n" + else + err = "" + end + + # Get the stack trace and error message + err << super + + # Add contextual information + if @source + err << "\nLine: #{line}\n" + err << "Position: #{position}\n" + err << "Last 80 unconsumed characters:\n" + err << @source.buffer[0..80].force_encoding("ASCII-8BIT").gsub(/\n/, ' ') + end + + err + end + + def position + @source.current_line[0] if @source and defined? @source.current_line and + @source.current_line + end + + def line + @source.current_line[2] if @source and defined? @source.current_line and + @source.current_line + end + + def context + @source.current_line + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/baseparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/baseparser.rb new file mode 100644 index 0000000..305b120 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/baseparser.rb @@ -0,0 +1,694 @@ +# frozen_string_literal: false +require_relative '../parseexception' +require_relative '../undefinednamespaceexception' +require_relative '../source' +require 'set' +require "strscan" + +module REXML + module Parsers + # = Using the Pull Parser + # <em>This API is experimental, and subject to change.</em> + # parser = PullParser.new( "<a>text<b att='val'/>txet</a>" ) + # while parser.has_next? + # res = parser.next + # puts res[1]['att'] if res.start_tag? and res[0] == 'b' + # end + # See the PullEvent class for information on the content of the results. + # The data is identical to the arguments passed for the various events to + # the StreamListener API. + # + # Notice that: + # parser = PullParser.new( "<a>BAD DOCUMENT" ) + # while parser.has_next? + # res = parser.next + # raise res[1] if res.error? + # end + # + # Nat Price gave me some good ideas for the API. + class BaseParser + LETTER = '[:alpha:]' + DIGIT = '[:digit:]' + + COMBININGCHAR = '' # TODO + EXTENDER = '' # TODO + + NCNAME_STR= "[#{LETTER}_][-[:alnum:]._#{COMBININGCHAR}#{EXTENDER}]*" + QNAME_STR= "(?:(#{NCNAME_STR}):)?(#{NCNAME_STR})" + QNAME = /(#{QNAME_STR})/ + + # Just for backward compatibility. For example, kramdown uses this. + # It's not used in REXML. + UNAME_STR= "(?:#{NCNAME_STR}:)?#{NCNAME_STR}" + + NAMECHAR = '[\-\w\.:]' + NAME = "([\\w:]#{NAMECHAR}*)" + NMTOKEN = "(?:#{NAMECHAR})+" + NMTOKENS = "#{NMTOKEN}(\\s+#{NMTOKEN})*" + REFERENCE = "&(?:#{NAME};|#\\d+;|#x[0-9a-fA-F]+;)" + REFERENCE_RE = /#{REFERENCE}/ + + DOCTYPE_START = /\A\s*<!DOCTYPE\s/um + DOCTYPE_END = /\A\s*\]\s*>/um + ATTRIBUTE_PATTERN = /\s*(#{QNAME_STR})\s*=\s*(["'])(.*?)\4/um + COMMENT_START = /\A<!--/u + COMMENT_PATTERN = /<!--(.*?)-->/um + CDATA_START = /\A<!\[CDATA\[/u + CDATA_END = /\A\s*\]\s*>/um + CDATA_PATTERN = /<!\[CDATA\[(.*?)\]\]>/um + XMLDECL_START = /\A<\?xml\s/u; + XMLDECL_PATTERN = /<\?xml\s+(.*?)\?>/um + INSTRUCTION_START = /\A<\?/u + INSTRUCTION_PATTERN = /<\?#{NAME}(\s+.*?)?\?>/um + TAG_MATCH = /\A<((?>#{QNAME_STR}))/um + CLOSE_MATCH = /\A\s*<\/(#{QNAME_STR})\s*>/um + + VERSION = /\bversion\s*=\s*["'](.*?)['"]/um + ENCODING = /\bencoding\s*=\s*["'](.*?)['"]/um + STANDALONE = /\bstandalone\s*=\s*["'](.*?)['"]/um + + ENTITY_START = /\A\s*<!ENTITY/ + ELEMENTDECL_START = /\A\s*<!ELEMENT/um + ELEMENTDECL_PATTERN = /\A\s*(<!ELEMENT.*?)>/um + SYSTEMENTITY = /\A\s*(%.*?;)\s*$/um + ENUMERATION = "\\(\\s*#{NMTOKEN}(?:\\s*\\|\\s*#{NMTOKEN})*\\s*\\)" + NOTATIONTYPE = "NOTATION\\s+\\(\\s*#{NAME}(?:\\s*\\|\\s*#{NAME})*\\s*\\)" + ENUMERATEDTYPE = "(?:(?:#{NOTATIONTYPE})|(?:#{ENUMERATION}))" + ATTTYPE = "(CDATA|ID|IDREF|IDREFS|ENTITY|ENTITIES|NMTOKEN|NMTOKENS|#{ENUMERATEDTYPE})" + ATTVALUE = "(?:\"((?:[^<&\"]|#{REFERENCE})*)\")|(?:'((?:[^<&']|#{REFERENCE})*)')" + DEFAULTDECL = "(#REQUIRED|#IMPLIED|(?:(#FIXED\\s+)?#{ATTVALUE}))" + ATTDEF = "\\s+#{NAME}\\s+#{ATTTYPE}\\s+#{DEFAULTDECL}" + ATTDEF_RE = /#{ATTDEF}/ + ATTLISTDECL_START = /\A\s*<!ATTLIST/um + ATTLISTDECL_PATTERN = /\A\s*<!ATTLIST\s+#{NAME}(?:#{ATTDEF})*\s*>/um + + TEXT_PATTERN = /\A([^<]*)/um + + # Entity constants + PUBIDCHAR = "\x20\x0D\x0Aa-zA-Z0-9\\-()+,./:=?;!*@$_%#" + SYSTEMLITERAL = %Q{((?:"[^"]*")|(?:'[^']*'))} + PUBIDLITERAL = %Q{("[#{PUBIDCHAR}']*"|'[#{PUBIDCHAR}]*')} + EXTERNALID = "(?:(?:(SYSTEM)\\s+#{SYSTEMLITERAL})|(?:(PUBLIC)\\s+#{PUBIDLITERAL}\\s+#{SYSTEMLITERAL}))" + NDATADECL = "\\s+NDATA\\s+#{NAME}" + PEREFERENCE = "%#{NAME};" + ENTITYVALUE = %Q{((?:"(?:[^%&"]|#{PEREFERENCE}|#{REFERENCE})*")|(?:'([^%&']|#{PEREFERENCE}|#{REFERENCE})*'))} + PEDEF = "(?:#{ENTITYVALUE}|#{EXTERNALID})" + ENTITYDEF = "(?:#{ENTITYVALUE}|(?:#{EXTERNALID}(#{NDATADECL})?))" + PEDECL = "<!ENTITY\\s+(%)\\s+#{NAME}\\s+#{PEDEF}\\s*>" + GEDECL = "<!ENTITY\\s+#{NAME}\\s+#{ENTITYDEF}\\s*>" + ENTITYDECL = /\s*(?:#{GEDECL})|(?:#{PEDECL})/um + + NOTATIONDECL_START = /\A\s*<!NOTATION/um + EXTERNAL_ID_PUBLIC = /\A\s*PUBLIC\s+#{PUBIDLITERAL}\s+#{SYSTEMLITERAL}\s*/um + EXTERNAL_ID_SYSTEM = /\A\s*SYSTEM\s+#{SYSTEMLITERAL}\s*/um + PUBLIC_ID = /\A\s*PUBLIC\s+#{PUBIDLITERAL}\s*/um + + EREFERENCE = /&(?!#{NAME};)/ + + DEFAULT_ENTITIES = { + 'gt' => [/>/, '>', '>', />/], + 'lt' => [/</, '<', '<', /</], + 'quot' => [/"/, '"', '"', /"/], + "apos" => [/'/, "'", "'", /'/] + } + + def initialize( source ) + self.stream = source + @listeners = [] + end + + def add_listener( listener ) + @listeners << listener + end + + attr_reader :source + + def stream=( source ) + @source = SourceFactory.create_from( source ) + @closed = nil + @document_status = nil + @tags = [] + @stack = [] + @entities = [] + @nsstack = [] + end + + def position + if @source.respond_to? :position + @source.position + else + # FIXME + 0 + end + end + + # Returns true if there are no more events + def empty? + return (@source.empty? and @stack.empty?) + end + + # Returns true if there are more events. Synonymous with !empty? + def has_next? + return !(@source.empty? and @stack.empty?) + end + + # Push an event back on the head of the stream. This method + # has (theoretically) infinite depth. + def unshift token + @stack.unshift(token) + end + + # Peek at the +depth+ event in the stack. The first element on the stack + # is at depth 0. If +depth+ is -1, will parse to the end of the input + # stream and return the last event, which is always :end_document. + # Be aware that this causes the stream to be parsed up to the +depth+ + # event, so you can effectively pre-parse the entire document (pull the + # entire thing into memory) using this method. + def peek depth=0 + raise %Q[Illegal argument "#{depth}"] if depth < -1 + temp = [] + if depth == -1 + temp.push(pull()) until empty? + else + while @stack.size+temp.size < depth+1 + temp.push(pull()) + end + end + @stack += temp if temp.size > 0 + @stack[depth] + end + + # Returns the next event. This is a +PullEvent+ object. + def pull + pull_event.tap do |event| + @listeners.each do |listener| + listener.receive event + end + end + end + + def pull_event + if @closed + x, @closed = @closed, nil + return [ :end_element, x ] + end + return [ :end_document ] if empty? + return @stack.shift if @stack.size > 0 + #STDERR.puts @source.encoding + #STDERR.puts "BUFFER = #{@source.buffer.inspect}" + if @document_status == nil + word = @source.match( /\A((?:\s+)|(?:<[^>]*>))/um ) + word = word[1] unless word.nil? + #STDERR.puts "WORD = #{word.inspect}" + case word + when COMMENT_START + return [ :comment, @source.match( COMMENT_PATTERN, true )[1] ] + when XMLDECL_START + #STDERR.puts "XMLDECL" + results = @source.match( XMLDECL_PATTERN, true )[1] + version = VERSION.match( results ) + version = version[1] unless version.nil? + encoding = ENCODING.match(results) + encoding = encoding[1] unless encoding.nil? + if need_source_encoding_update?(encoding) + @source.encoding = encoding + end + if encoding.nil? and /\AUTF-16(?:BE|LE)\z/i =~ @source.encoding + encoding = "UTF-16" + end + standalone = STANDALONE.match(results) + standalone = standalone[1] unless standalone.nil? + return [ :xmldecl, version, encoding, standalone ] + when INSTRUCTION_START + return process_instruction + when DOCTYPE_START + base_error_message = "Malformed DOCTYPE" + @source.match(DOCTYPE_START, true) + @nsstack.unshift(curr_ns=Set.new) + name = parse_name(base_error_message) + if @source.match(/\A\s*\[/um, true) + id = [nil, nil, nil] + @document_status = :in_doctype + elsif @source.match(/\A\s*>/um, true) + id = [nil, nil, nil] + @document_status = :after_doctype + else + id = parse_id(base_error_message, + accept_external_id: true, + accept_public_id: false) + if id[0] == "SYSTEM" + # For backward compatibility + id[1], id[2] = id[2], nil + end + if @source.match(/\A\s*\[/um, true) + @document_status = :in_doctype + elsif @source.match(/\A\s*>/um, true) + @document_status = :after_doctype + else + message = "#{base_error_message}: garbage after external ID" + raise REXML::ParseException.new(message, @source) + end + end + args = [:start_doctype, name, *id] + if @document_status == :after_doctype + @source.match(/\A\s*/um, true) + @stack << [ :end_doctype ] + end + return args + when /\A\s+/ + else + @document_status = :after_doctype + if @source.encoding == "UTF-8" + @source.buffer.force_encoding(::Encoding::UTF_8) + end + end + end + if @document_status == :in_doctype + md = @source.match(/\A\s*(.*?>)/um) + case md[1] + when SYSTEMENTITY + match = @source.match( SYSTEMENTITY, true )[1] + return [ :externalentity, match ] + + when ELEMENTDECL_START + return [ :elementdecl, @source.match( ELEMENTDECL_PATTERN, true )[1] ] + + when ENTITY_START + match = @source.match( ENTITYDECL, true ).to_a.compact + match[0] = :entitydecl + ref = false + if match[1] == '%' + ref = true + match.delete_at 1 + end + # Now we have to sort out what kind of entity reference this is + if match[2] == 'SYSTEM' + # External reference + match[3] = match[3][1..-2] # PUBID + match.delete_at(4) if match.size > 4 # Chop out NDATA decl + # match is [ :entity, name, SYSTEM, pubid(, ndata)? ] + elsif match[2] == 'PUBLIC' + # External reference + match[3] = match[3][1..-2] # PUBID + match[4] = match[4][1..-2] # HREF + match.delete_at(5) if match.size > 5 # Chop out NDATA decl + # match is [ :entity, name, PUBLIC, pubid, href(, ndata)? ] + else + match[2] = match[2][1..-2] + match.pop if match.size == 4 + # match is [ :entity, name, value ] + end + match << '%' if ref + return match + when ATTLISTDECL_START + md = @source.match( ATTLISTDECL_PATTERN, true ) + raise REXML::ParseException.new( "Bad ATTLIST declaration!", @source ) if md.nil? + element = md[1] + contents = md[0] + + pairs = {} + values = md[0].scan( ATTDEF_RE ) + values.each do |attdef| + unless attdef[3] == "#IMPLIED" + attdef.compact! + val = attdef[3] + val = attdef[4] if val == "#FIXED " + pairs[attdef[0]] = val + if attdef[0] =~ /^xmlns:(.*)/ + @nsstack[0] << $1 + end + end + end + return [ :attlistdecl, element, pairs, contents ] + when NOTATIONDECL_START + base_error_message = "Malformed notation declaration" + unless @source.match(/\A\s*<!NOTATION\s+/um, true) + if @source.match(/\A\s*<!NOTATION\s*>/um) + message = "#{base_error_message}: name is missing" + else + message = "#{base_error_message}: invalid declaration name" + end + raise REXML::ParseException.new(message, @source) + end + name = parse_name(base_error_message) + id = parse_id(base_error_message, + accept_external_id: true, + accept_public_id: true) + unless @source.match(/\A\s*>/um, true) + message = "#{base_error_message}: garbage before end >" + raise REXML::ParseException.new(message, @source) + end + return [:notationdecl, name, *id] + when DOCTYPE_END + @document_status = :after_doctype + @source.match( DOCTYPE_END, true ) + return [ :end_doctype ] + end + end + if @document_status == :after_doctype + @source.match(/\A\s*/um, true) + end + begin + @source.read if @source.buffer.size<2 + if @source.buffer[0] == ?< + if @source.buffer[1] == ?/ + @nsstack.shift + last_tag = @tags.pop + md = @source.match( CLOSE_MATCH, true ) + if md and !last_tag + message = "Unexpected top-level end tag (got '#{md[1]}')" + raise REXML::ParseException.new(message, @source) + end + if md.nil? or last_tag != md[1] + message = "Missing end tag for '#{last_tag}'" + message << " (got '#{md[1]}')" if md + raise REXML::ParseException.new(message, @source) + end + return [ :end_element, last_tag ] + elsif @source.buffer[1] == ?! + md = @source.match(/\A(\s*[^>]*>)/um) + #STDERR.puts "SOURCE BUFFER = #{source.buffer}, #{source.buffer.size}" + raise REXML::ParseException.new("Malformed node", @source) unless md + if md[0][2] == ?- + md = @source.match( COMMENT_PATTERN, true ) + + case md[1] + when /--/, /-\z/ + raise REXML::ParseException.new("Malformed comment", @source) + end + + return [ :comment, md[1] ] if md + else + md = @source.match( CDATA_PATTERN, true ) + return [ :cdata, md[1] ] if md + end + raise REXML::ParseException.new( "Declarations can only occur "+ + "in the doctype declaration.", @source) + elsif @source.buffer[1] == ?? + return process_instruction + else + # Get the next tag + md = @source.match(TAG_MATCH, true) + unless md + raise REXML::ParseException.new("malformed XML: missing tag start", @source) + end + @document_status = :in_element + prefixes = Set.new + prefixes << md[2] if md[2] + @nsstack.unshift(curr_ns=Set.new) + attributes, closed = parse_attributes(prefixes, curr_ns) + # Verify that all of the prefixes have been defined + for prefix in prefixes + unless @nsstack.find{|k| k.member?(prefix)} + raise UndefinedNamespaceException.new(prefix,@source,self) + end + end + + if closed + @closed = md[1] + @nsstack.shift + else + @tags.push( md[1] ) + end + return [ :start_element, md[1], attributes ] + end + else + md = @source.match( TEXT_PATTERN, true ) + if md[0].length == 0 + @source.match( /(\s+)/, true ) + end + #STDERR.puts "GOT #{md[1].inspect}" unless md[0].length == 0 + #return [ :text, "" ] if md[0].length == 0 + # unnormalized = Text::unnormalize( md[1], self ) + # return PullEvent.new( :text, md[1], unnormalized ) + return [ :text, md[1] ] + end + rescue REXML::UndefinedNamespaceException + raise + rescue REXML::ParseException + raise + rescue => error + raise REXML::ParseException.new( "Exception parsing", + @source, self, (error ? error : $!) ) + end + return [ :dummy ] + end + private :pull_event + + def entity( reference, entities ) + value = nil + value = entities[ reference ] if entities + if not value + value = DEFAULT_ENTITIES[ reference ] + value = value[2] if value + end + unnormalize( value, entities ) if value + end + + # Escapes all possible entities + def normalize( input, entities=nil, entity_filter=nil ) + copy = input.clone + # Doing it like this rather than in a loop improves the speed + copy.gsub!( EREFERENCE, '&' ) + entities.each do |key, value| + copy.gsub!( value, "&#{key};" ) unless entity_filter and + entity_filter.include?(entity) + end if entities + copy.gsub!( EREFERENCE, '&' ) + DEFAULT_ENTITIES.each do |key, value| + copy.gsub!( value[3], value[1] ) + end + copy + end + + # Unescapes all possible entities + def unnormalize( string, entities=nil, filter=nil ) + rv = string.clone + rv.gsub!( /\r\n?/, "\n" ) + matches = rv.scan( REFERENCE_RE ) + return rv if matches.size == 0 + rv.gsub!( /�*((?:\d+)|(?:x[a-fA-F0-9]+));/ ) { + m=$1 + m = "0#{m}" if m[0] == ?x + [Integer(m)].pack('U*') + } + matches.collect!{|x|x[0]}.compact! + if matches.size > 0 + matches.each do |entity_reference| + unless filter and filter.include?(entity_reference) + entity_value = entity( entity_reference, entities ) + if entity_value + re = /&#{entity_reference};/ + rv.gsub!( re, entity_value ) + else + er = DEFAULT_ENTITIES[entity_reference] + rv.gsub!( er[0], er[2] ) if er + end + end + end + rv.gsub!( /&/, '&' ) + end + rv + end + + private + def need_source_encoding_update?(xml_declaration_encoding) + return false if xml_declaration_encoding.nil? + return false if /\AUTF-16\z/i =~ xml_declaration_encoding + true + end + + def parse_name(base_error_message) + md = @source.match(/\A\s*#{NAME}/um, true) + unless md + if @source.match(/\A\s*\S/um) + message = "#{base_error_message}: invalid name" + else + message = "#{base_error_message}: name is missing" + end + raise REXML::ParseException.new(message, @source) + end + md[1] + end + + def parse_id(base_error_message, + accept_external_id:, + accept_public_id:) + if accept_external_id and (md = @source.match(EXTERNAL_ID_PUBLIC, true)) + pubid = system = nil + pubid_literal = md[1] + pubid = pubid_literal[1..-2] if pubid_literal # Remove quote + system_literal = md[2] + system = system_literal[1..-2] if system_literal # Remove quote + ["PUBLIC", pubid, system] + elsif accept_public_id and (md = @source.match(PUBLIC_ID, true)) + pubid = system = nil + pubid_literal = md[1] + pubid = pubid_literal[1..-2] if pubid_literal # Remove quote + ["PUBLIC", pubid, nil] + elsif accept_external_id and (md = @source.match(EXTERNAL_ID_SYSTEM, true)) + system = nil + system_literal = md[1] + system = system_literal[1..-2] if system_literal # Remove quote + ["SYSTEM", nil, system] + else + details = parse_id_invalid_details(accept_external_id: accept_external_id, + accept_public_id: accept_public_id) + message = "#{base_error_message}: #{details}" + raise REXML::ParseException.new(message, @source) + end + end + + def parse_id_invalid_details(accept_external_id:, + accept_public_id:) + public = /\A\s*PUBLIC/um + system = /\A\s*SYSTEM/um + if (accept_external_id or accept_public_id) and @source.match(/#{public}/um) + if @source.match(/#{public}(?:\s+[^'"]|\s*[\[>])/um) + return "public ID literal is missing" + end + unless @source.match(/#{public}\s+#{PUBIDLITERAL}/um) + return "invalid public ID literal" + end + if accept_public_id + if @source.match(/#{public}\s+#{PUBIDLITERAL}\s+[^'"]/um) + return "system ID literal is missing" + end + unless @source.match(/#{public}\s+#{PUBIDLITERAL}\s+#{SYSTEMLITERAL}/um) + return "invalid system literal" + end + "garbage after system literal" + else + "garbage after public ID literal" + end + elsif accept_external_id and @source.match(/#{system}/um) + if @source.match(/#{system}(?:\s+[^'"]|\s*[\[>])/um) + return "system literal is missing" + end + unless @source.match(/#{system}\s+#{SYSTEMLITERAL}/um) + return "invalid system literal" + end + "garbage after system literal" + else + unless @source.match(/\A\s*(?:PUBLIC|SYSTEM)\s/um) + return "invalid ID type" + end + "ID type is missing" + end + end + + def process_instruction + match_data = @source.match(INSTRUCTION_PATTERN, true) + unless match_data + message = "Invalid processing instruction node" + raise REXML::ParseException.new(message, @source) + end + [:processing_instruction, match_data[1], match_data[2]] + end + + def parse_attributes(prefixes, curr_ns) + attributes = {} + closed = false + match_data = @source.match(/^(.*?)(\/)?>/um, true) + if match_data.nil? + message = "Start tag isn't ended" + raise REXML::ParseException.new(message, @source) + end + + raw_attributes = match_data[1] + closed = !match_data[2].nil? + return attributes, closed if raw_attributes.nil? + return attributes, closed if raw_attributes.empty? + + scanner = StringScanner.new(raw_attributes) + until scanner.eos? + if scanner.scan(/\s+/) + break if scanner.eos? + end + + pos = scanner.pos + loop do + break if scanner.scan(ATTRIBUTE_PATTERN) + unless scanner.scan(QNAME) + message = "Invalid attribute name: <#{scanner.rest}>" + raise REXML::ParseException.new(message, @source) + end + name = scanner[0] + unless scanner.scan(/\s*=\s*/um) + message = "Missing attribute equal: <#{name}>" + raise REXML::ParseException.new(message, @source) + end + quote = scanner.scan(/['"]/) + unless quote + message = "Missing attribute value start quote: <#{name}>" + raise REXML::ParseException.new(message, @source) + end + unless scanner.scan(/.*#{Regexp.escape(quote)}/um) + match_data = @source.match(/^(.*?)(\/)?>/um, true) + if match_data + scanner << "/" if closed + scanner << ">" + scanner << match_data[1] + scanner.pos = pos + closed = !match_data[2].nil? + next + end + message = + "Missing attribute value end quote: <#{name}>: <#{quote}>" + raise REXML::ParseException.new(message, @source) + end + end + name = scanner[1] + prefix = scanner[2] + local_part = scanner[3] + # quote = scanner[4] + value = scanner[5] + if prefix == "xmlns" + if local_part == "xml" + if value != "http://www.w3.org/XML/1998/namespace" + msg = "The 'xml' prefix must not be bound to any other namespace "+ + "(http://www.w3.org/TR/REC-xml-names/#ns-decl)" + raise REXML::ParseException.new( msg, @source, self ) + end + elsif local_part == "xmlns" + msg = "The 'xmlns' prefix must not be declared "+ + "(http://www.w3.org/TR/REC-xml-names/#ns-decl)" + raise REXML::ParseException.new( msg, @source, self) + end + curr_ns << local_part + elsif prefix + prefixes << prefix unless prefix == "xml" + end + + if attributes.has_key?(name) + msg = "Duplicate attribute #{name.inspect}" + raise REXML::ParseException.new(msg, @source, self) + end + + attributes[name] = value + end + return attributes, closed + end + end + end +end + +=begin + case event[0] + when :start_element + when :text + when :end_element + when :processing_instruction + when :cdata + when :comment + when :xmldecl + when :start_doctype + when :end_doctype + when :externalentity + when :elementdecl + when :entity + when :attlistdecl + when :notationdecl + when :end_doctype + end +=end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/lightparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/lightparser.rb new file mode 100644 index 0000000..bdc0827 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/lightparser.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: false +require_relative 'streamparser' +require_relative 'baseparser' +require_relative '../light/node' + +module REXML + module Parsers + class LightParser + def initialize stream + @stream = stream + @parser = REXML::Parsers::BaseParser.new( stream ) + end + + def add_listener( listener ) + @parser.add_listener( listener ) + end + + def rewind + @stream.rewind + @parser.stream = @stream + end + + def parse + root = context = [ :document ] + while true + event = @parser.pull + case event[0] + when :end_document + break + when :start_element, :start_doctype + new_node = event + context << new_node + new_node[1,0] = [context] + context = new_node + when :end_element, :end_doctype + context = context[1] + else + new_node = event + context << new_node + new_node[1,0] = [context] + end + end + root + end + end + + # An element is an array. The array contains: + # 0 The parent element + # 1 The tag name + # 2 A hash of attributes + # 3..-1 The child elements + # An element is an array of size > 3 + # Text is a String + # PIs are [ :processing_instruction, target, data ] + # Comments are [ :comment, data ] + # DocTypes are DocType structs + # The root is an array with XMLDecls, Text, DocType, Array, Text + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/pullparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/pullparser.rb new file mode 100644 index 0000000..f8b232a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/pullparser.rb @@ -0,0 +1,197 @@ +# frozen_string_literal: false +require 'forwardable' + +require_relative '../parseexception' +require_relative 'baseparser' +require_relative '../xmltokens' + +module REXML + module Parsers + # = Using the Pull Parser + # <em>This API is experimental, and subject to change.</em> + # parser = PullParser.new( "<a>text<b att='val'/>txet</a>" ) + # while parser.has_next? + # res = parser.next + # puts res[1]['att'] if res.start_tag? and res[0] == 'b' + # end + # See the PullEvent class for information on the content of the results. + # The data is identical to the arguments passed for the various events to + # the StreamListener API. + # + # Notice that: + # parser = PullParser.new( "<a>BAD DOCUMENT" ) + # while parser.has_next? + # res = parser.next + # raise res[1] if res.error? + # end + # + # Nat Price gave me some good ideas for the API. + class PullParser + include XMLTokens + extend Forwardable + + def_delegators( :@parser, :has_next? ) + def_delegators( :@parser, :entity ) + def_delegators( :@parser, :empty? ) + def_delegators( :@parser, :source ) + + def initialize stream + @entities = {} + @listeners = nil + @parser = BaseParser.new( stream ) + @my_stack = [] + end + + def add_listener( listener ) + @listeners = [] unless @listeners + @listeners << listener + end + + def each + while has_next? + yield self.pull + end + end + + def peek depth=0 + if @my_stack.length <= depth + (depth - @my_stack.length + 1).times { + e = PullEvent.new(@parser.pull) + @my_stack.push(e) + } + end + @my_stack[depth] + end + + def pull + return @my_stack.shift if @my_stack.length > 0 + + event = @parser.pull + case event[0] + when :entitydecl + @entities[ event[1] ] = + event[2] unless event[2] =~ /PUBLIC|SYSTEM/ + when :text + unnormalized = @parser.unnormalize( event[1], @entities ) + event << unnormalized + end + PullEvent.new( event ) + end + + def unshift token + @my_stack.unshift token + end + end + + # A parsing event. The contents of the event are accessed as an +Array?, + # and the type is given either by the ...? methods, or by accessing the + # +type+ accessor. The contents of this object vary from event to event, + # but are identical to the arguments passed to +StreamListener+s for each + # event. + class PullEvent + # The type of this event. Will be one of :tag_start, :tag_end, :text, + # :processing_instruction, :comment, :doctype, :attlistdecl, :entitydecl, + # :notationdecl, :entity, :cdata, :xmldecl, or :error. + def initialize(arg) + @contents = arg + end + + def []( start, endd=nil) + if start.kind_of? Range + @contents.slice( start.begin+1 .. start.end ) + elsif start.kind_of? Numeric + if endd.nil? + @contents.slice( start+1 ) + else + @contents.slice( start+1, endd ) + end + else + raise "Illegal argument #{start.inspect} (#{start.class})" + end + end + + def event_type + @contents[0] + end + + # Content: [ String tag_name, Hash attributes ] + def start_element? + @contents[0] == :start_element + end + + # Content: [ String tag_name ] + def end_element? + @contents[0] == :end_element + end + + # Content: [ String raw_text, String unnormalized_text ] + def text? + @contents[0] == :text + end + + # Content: [ String text ] + def instruction? + @contents[0] == :processing_instruction + end + + # Content: [ String text ] + def comment? + @contents[0] == :comment + end + + # Content: [ String name, String pub_sys, String long_name, String uri ] + def doctype? + @contents[0] == :start_doctype + end + + # Content: [ String text ] + def attlistdecl? + @contents[0] == :attlistdecl + end + + # Content: [ String text ] + def elementdecl? + @contents[0] == :elementdecl + end + + # Due to the wonders of DTDs, an entity declaration can be just about + # anything. There's no way to normalize it; you'll have to interpret the + # content yourself. However, the following is true: + # + # * If the entity declaration is an internal entity: + # [ String name, String value ] + # Content: [ String text ] + def entitydecl? + @contents[0] == :entitydecl + end + + # Content: [ String text ] + def notationdecl? + @contents[0] == :notationdecl + end + + # Content: [ String text ] + def entity? + @contents[0] == :entity + end + + # Content: [ String text ] + def cdata? + @contents[0] == :cdata + end + + # Content: [ String version, String encoding, String standalone ] + def xmldecl? + @contents[0] == :xmldecl + end + + def error? + @contents[0] == :error + end + + def inspect + @contents[0].to_s + ": " + @contents[1..-1].inspect + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/sax2parser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/sax2parser.rb new file mode 100644 index 0000000..6a24ce2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/sax2parser.rb @@ -0,0 +1,273 @@ +# frozen_string_literal: false +require_relative 'baseparser' +require_relative '../parseexception' +require_relative '../namespace' +require_relative '../text' + +module REXML + module Parsers + # SAX2Parser + class SAX2Parser + def initialize source + @parser = BaseParser.new(source) + @listeners = [] + @procs = [] + @namespace_stack = [] + @has_listeners = false + @tag_stack = [] + @entities = {} + end + + def source + @parser.source + end + + def add_listener( listener ) + @parser.add_listener( listener ) + end + + # Listen arguments: + # + # Symbol, Array, Block + # Listen to Symbol events on Array elements + # Symbol, Block + # Listen to Symbol events + # Array, Listener + # Listen to all events on Array elements + # Array, Block + # Listen to :start_element events on Array elements + # Listener + # Listen to All events + # + # Symbol can be one of: :start_element, :end_element, + # :start_prefix_mapping, :end_prefix_mapping, :characters, + # :processing_instruction, :doctype, :attlistdecl, :elementdecl, + # :entitydecl, :notationdecl, :cdata, :xmldecl, :comment + # + # There is an additional symbol that can be listened for: :progress. + # This will be called for every event generated, passing in the current + # stream position. + # + # Array contains regular expressions or strings which will be matched + # against fully qualified element names. + # + # Listener must implement the methods in SAX2Listener + # + # Block will be passed the same arguments as a SAX2Listener method would + # be, where the method name is the same as the matched Symbol. + # See the SAX2Listener for more information. + def listen( *args, &blok ) + if args[0].kind_of? Symbol + if args.size == 2 + args[1].each { |match| @procs << [args[0], match, blok] } + else + add( [args[0], nil, blok] ) + end + elsif args[0].kind_of? Array + if args.size == 2 + args[0].each { |match| add( [nil, match, args[1]] ) } + else + args[0].each { |match| add( [ :start_element, match, blok ] ) } + end + else + add([nil, nil, args[0]]) + end + end + + def deafen( listener=nil, &blok ) + if listener + @listeners.delete_if {|item| item[-1] == listener } + @has_listeners = false if @listeners.size == 0 + else + @procs.delete_if {|item| item[-1] == blok } + end + end + + def parse + @procs.each { |sym,match,block| block.call if sym == :start_document } + @listeners.each { |sym,match,block| + block.start_document if sym == :start_document or sym.nil? + } + context = [] + while true + event = @parser.pull + case event[0] + when :end_document + handle( :end_document ) + break + when :start_doctype + handle( :doctype, *event[1..-1]) + when :end_doctype + context = context[1] + when :start_element + @tag_stack.push(event[1]) + # find the observers for namespaces + procs = get_procs( :start_prefix_mapping, event[1] ) + listeners = get_listeners( :start_prefix_mapping, event[1] ) + if procs or listeners + # break out the namespace declarations + # The attributes live in event[2] + event[2].each {|n, v| event[2][n] = @parser.normalize(v)} + nsdecl = event[2].find_all { |n, value| n =~ /^xmlns(:|$)/ } + nsdecl.collect! { |n, value| [ n[6..-1], value ] } + @namespace_stack.push({}) + nsdecl.each do |n,v| + @namespace_stack[-1][n] = v + # notify observers of namespaces + procs.each { |ob| ob.call( n, v ) } if procs + listeners.each { |ob| ob.start_prefix_mapping(n, v) } if listeners + end + end + event[1] =~ Namespace::NAMESPLIT + prefix = $1 + local = $2 + uri = get_namespace(prefix) + # find the observers for start_element + procs = get_procs( :start_element, event[1] ) + listeners = get_listeners( :start_element, event[1] ) + # notify observers + procs.each { |ob| ob.call( uri, local, event[1], event[2] ) } if procs + listeners.each { |ob| + ob.start_element( uri, local, event[1], event[2] ) + } if listeners + when :end_element + @tag_stack.pop + event[1] =~ Namespace::NAMESPLIT + prefix = $1 + local = $2 + uri = get_namespace(prefix) + # find the observers for start_element + procs = get_procs( :end_element, event[1] ) + listeners = get_listeners( :end_element, event[1] ) + # notify observers + procs.each { |ob| ob.call( uri, local, event[1] ) } if procs + listeners.each { |ob| + ob.end_element( uri, local, event[1] ) + } if listeners + + namespace_mapping = @namespace_stack.pop + # find the observers for namespaces + procs = get_procs( :end_prefix_mapping, event[1] ) + listeners = get_listeners( :end_prefix_mapping, event[1] ) + if procs or listeners + namespace_mapping.each do |ns_prefix, ns_uri| + # notify observers of namespaces + procs.each { |ob| ob.call( ns_prefix ) } if procs + listeners.each { |ob| ob.end_prefix_mapping(ns_prefix) } if listeners + end + end + when :text + #normalized = @parser.normalize( event[1] ) + #handle( :characters, normalized ) + copy = event[1].clone + + esub = proc { |match| + if @entities.has_key?($1) + @entities[$1].gsub(Text::REFERENCE, &esub) + else + match + end + } + + copy.gsub!( Text::REFERENCE, &esub ) + copy.gsub!( Text::NUMERICENTITY ) {|m| + m=$1 + m = "0#{m}" if m[0] == ?x + [Integer(m)].pack('U*') + } + handle( :characters, copy ) + when :entitydecl + handle_entitydecl( event ) + when :processing_instruction, :comment, :attlistdecl, + :elementdecl, :cdata, :notationdecl, :xmldecl + handle( *event ) + end + handle( :progress, @parser.position ) + end + end + + private + def handle( symbol, *arguments ) + tag = @tag_stack[-1] + procs = get_procs( symbol, tag ) + listeners = get_listeners( symbol, tag ) + # notify observers + procs.each { |ob| ob.call( *arguments ) } if procs + listeners.each { |l| + l.send( symbol.to_s, *arguments ) + } if listeners + end + + def handle_entitydecl( event ) + @entities[ event[1] ] = event[2] if event.size == 3 + parameter_reference_p = false + case event[2] + when "SYSTEM" + if event.size == 5 + if event.last == "%" + parameter_reference_p = true + else + event[4, 0] = "NDATA" + end + end + when "PUBLIC" + if event.size == 6 + if event.last == "%" + parameter_reference_p = true + else + event[5, 0] = "NDATA" + end + end + else + parameter_reference_p = (event.size == 4) + end + event[1, 0] = event.pop if parameter_reference_p + handle( event[0], event[1..-1] ) + end + + # The following methods are duplicates, but it is faster than using + # a helper + def get_procs( symbol, name ) + return nil if @procs.size == 0 + @procs.find_all do |sym, match, block| + ( + (sym.nil? or symbol == sym) and + ((name.nil? and match.nil?) or match.nil? or ( + (name == match) or + (match.kind_of? Regexp and name =~ match) + ) + ) + ) + end.collect{|x| x[-1]} + end + def get_listeners( symbol, name ) + return nil if @listeners.size == 0 + @listeners.find_all do |sym, match, block| + ( + (sym.nil? or symbol == sym) and + ((name.nil? and match.nil?) or match.nil? or ( + (name == match) or + (match.kind_of? Regexp and name =~ match) + ) + ) + ) + end.collect{|x| x[-1]} + end + + def add( pair ) + if pair[-1].respond_to? :call + @procs << pair unless @procs.include? pair + else + @listeners << pair unless @listeners.include? pair + @has_listeners = true + end + end + + def get_namespace( prefix ) + uris = (@namespace_stack.find_all { |ns| not ns[prefix].nil? }) || + (@namespace_stack.find { |ns| not ns[nil].nil? }) + uris[-1][prefix] unless uris.nil? or 0 == uris.size + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/streamparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/streamparser.rb new file mode 100644 index 0000000..9e0eb0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/streamparser.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: false +require_relative "baseparser" + +module REXML + module Parsers + class StreamParser + def initialize source, listener + @listener = listener + @parser = BaseParser.new( source ) + @tag_stack = [] + end + + def add_listener( listener ) + @parser.add_listener( listener ) + end + + def parse + # entity string + while true + event = @parser.pull + case event[0] + when :end_document + unless @tag_stack.empty? + tag_path = "/" + @tag_stack.join("/") + raise ParseException.new("Missing end tag for '#{tag_path}'", + @parser.source) + end + return + when :start_element + @tag_stack << event[1] + attrs = event[2].each do |n, v| + event[2][n] = @parser.unnormalize( v ) + end + @listener.tag_start( event[1], attrs ) + when :end_element + @listener.tag_end( event[1] ) + @tag_stack.pop + when :text + normalized = @parser.unnormalize( event[1] ) + @listener.text( normalized ) + when :processing_instruction + @listener.instruction( *event[1,2] ) + when :start_doctype + @listener.doctype( *event[1..-1] ) + when :end_doctype + # FIXME: remove this condition for milestone:3.2 + @listener.doctype_end if @listener.respond_to? :doctype_end + when :comment, :attlistdecl, :cdata, :xmldecl, :elementdecl + @listener.send( event[0].to_s, *event[1..-1] ) + when :entitydecl, :notationdecl + @listener.send( event[0].to_s, event[1..-1] ) + when :externalentity + entity_reference = event[1] + content = entity_reference.gsub(/\A%|;\z/, "") + @listener.entity(content) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/treeparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/treeparser.rb new file mode 100644 index 0000000..bf9a425 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/treeparser.rb @@ -0,0 +1,101 @@ +# frozen_string_literal: false +require_relative '../validation/validationexception' +require_relative '../undefinednamespaceexception' + +module REXML + module Parsers + class TreeParser + def initialize( source, build_context = Document.new ) + @build_context = build_context + @parser = Parsers::BaseParser.new( source ) + end + + def add_listener( listener ) + @parser.add_listener( listener ) + end + + def parse + tag_stack = [] + in_doctype = false + entities = nil + begin + while true + event = @parser.pull + #STDERR.puts "TREEPARSER GOT #{event.inspect}" + case event[0] + when :end_document + unless tag_stack.empty? + raise ParseException.new("No close tag for #{@build_context.xpath}", + @parser.source, @parser) + end + return + when :start_element + tag_stack.push(event[1]) + el = @build_context = @build_context.add_element( event[1] ) + event[2].each do |key, value| + el.attributes[key]=Attribute.new(key,value,self) + end + when :end_element + tag_stack.pop + @build_context = @build_context.parent + when :text + if not in_doctype + if @build_context[-1].instance_of? Text + @build_context[-1] << event[1] + else + @build_context.add( + Text.new(event[1], @build_context.whitespace, nil, true) + ) unless ( + @build_context.ignore_whitespace_nodes and + event[1].strip.size==0 + ) + end + end + when :comment + c = Comment.new( event[1] ) + @build_context.add( c ) + when :cdata + c = CData.new( event[1] ) + @build_context.add( c ) + when :processing_instruction + @build_context.add( Instruction.new( event[1], event[2] ) ) + when :end_doctype + in_doctype = false + entities.each { |k,v| entities[k] = @build_context.entities[k].value } + @build_context = @build_context.parent + when :start_doctype + doctype = DocType.new( event[1..-1], @build_context ) + @build_context = doctype + entities = {} + in_doctype = true + when :attlistdecl + n = AttlistDecl.new( event[1..-1] ) + @build_context.add( n ) + when :externalentity + n = ExternalEntity.new( event[1] ) + @build_context.add( n ) + when :elementdecl + n = ElementDecl.new( event[1] ) + @build_context.add(n) + when :entitydecl + entities[ event[1] ] = event[2] unless event[2] =~ /PUBLIC|SYSTEM/ + @build_context.add(Entity.new(event)) + when :notationdecl + n = NotationDecl.new( *event[1..-1] ) + @build_context.add( n ) + when :xmldecl + x = XMLDecl.new( event[1], event[2], event[3] ) + @build_context.add( x ) + end + end + rescue REXML::Validation::ValidationException + raise + rescue REXML::ParseException + raise + rescue + raise ParseException.new( $!.message, @parser.source, @parser, $! ) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/ultralightparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/ultralightparser.rb new file mode 100644 index 0000000..e0029f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/ultralightparser.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: false +require_relative 'streamparser' +require_relative 'baseparser' + +module REXML + module Parsers + class UltraLightParser + def initialize stream + @stream = stream + @parser = REXML::Parsers::BaseParser.new( stream ) + end + + def add_listener( listener ) + @parser.add_listener( listener ) + end + + def rewind + @stream.rewind + @parser.stream = @stream + end + + def parse + root = context = [] + while true + event = @parser.pull + case event[0] + when :end_document + break + when :end_doctype + context = context[1] + when :start_element, :start_doctype + context << event + event[1,0] = [context] + context = event + when :end_element + context = context[1] + else + context << event + end + end + root + end + end + + # An element is an array. The array contains: + # 0 The parent element + # 1 The tag name + # 2 A hash of attributes + # 3..-1 The child elements + # An element is an array of size > 3 + # Text is a String + # PIs are [ :processing_instruction, target, data ] + # Comments are [ :comment, data ] + # DocTypes are DocType structs + # The root is an array with XMLDecls, Text, DocType, Array, Text + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/xpathparser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/xpathparser.rb new file mode 100644 index 0000000..d92678f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/parsers/xpathparser.rb @@ -0,0 +1,689 @@ +# frozen_string_literal: false +require_relative '../namespace' +require_relative '../xmltokens' + +module REXML + module Parsers + # You don't want to use this class. Really. Use XPath, which is a wrapper + # for this class. Believe me. You don't want to poke around in here. + # There is strange, dark magic at work in this code. Beware. Go back! Go + # back while you still can! + class XPathParser + include XMLTokens + LITERAL = /^'([^']*)'|^"([^"]*)"/u + + def namespaces=( namespaces ) + Functions::namespace_context = namespaces + @namespaces = namespaces + end + + def parse path + path = path.dup + path.gsub!(/([\(\[])\s+/, '\1') # Strip ignorable spaces + path.gsub!( /\s+([\]\)])/, '\1') + parsed = [] + rest = OrExpr(path, parsed) + if rest + unless rest.strip.empty? + raise ParseException.new("Garbage component exists at the end: " + + "<#{rest}>: <#{path}>") + end + end + parsed + end + + def predicate path + parsed = [] + Predicate( "[#{path}]", parsed ) + parsed + end + + def abbreviate( path ) + path = path.kind_of?(String) ? parse( path ) : path + string = "" + document = false + while path.size > 0 + op = path.shift + case op + when :node + when :attribute + string << "/" if string.size > 0 + string << "@" + when :child + string << "/" if string.size > 0 + when :descendant_or_self + string << "/" + when :self + string << "." + when :parent + string << ".." + when :any + string << "*" + when :text + string << "text()" + when :following, :following_sibling, + :ancestor, :ancestor_or_self, :descendant, + :namespace, :preceding, :preceding_sibling + string << "/" unless string.size == 0 + string << op.to_s.tr("_", "-") + string << "::" + when :qname + prefix = path.shift + name = path.shift + string << prefix+":" if prefix.size > 0 + string << name + when :predicate + string << '[' + string << predicate_to_string( path.shift ) {|x| abbreviate( x ) } + string << ']' + when :document + document = true + when :function + string << path.shift + string << "( " + string << predicate_to_string( path.shift[0] ) {|x| abbreviate( x )} + string << " )" + when :literal + string << %Q{ "#{path.shift}" } + else + string << "/" unless string.size == 0 + string << "UNKNOWN(" + string << op.inspect + string << ")" + end + end + string = "/"+string if document + return string + end + + def expand( path ) + path = path.kind_of?(String) ? parse( path ) : path + string = "" + document = false + while path.size > 0 + op = path.shift + case op + when :node + string << "node()" + when :attribute, :child, :following, :following_sibling, + :ancestor, :ancestor_or_self, :descendant, :descendant_or_self, + :namespace, :preceding, :preceding_sibling, :self, :parent + string << "/" unless string.size == 0 + string << op.to_s.tr("_", "-") + string << "::" + when :any + string << "*" + when :qname + prefix = path.shift + name = path.shift + string << prefix+":" if prefix.size > 0 + string << name + when :predicate + string << '[' + string << predicate_to_string( path.shift ) { |x| expand(x) } + string << ']' + when :document + document = true + else + string << "/" unless string.size == 0 + string << "UNKNOWN(" + string << op.inspect + string << ")" + end + end + string = "/"+string if document + return string + end + + def predicate_to_string( path, &block ) + string = "" + case path[0] + when :and, :or, :mult, :plus, :minus, :neq, :eq, :lt, :gt, :lteq, :gteq, :div, :mod, :union + op = path.shift + case op + when :eq + op = "=" + when :lt + op = "<" + when :gt + op = ">" + when :lteq + op = "<=" + when :gteq + op = ">=" + when :neq + op = "!=" + when :union + op = "|" + end + left = predicate_to_string( path.shift, &block ) + right = predicate_to_string( path.shift, &block ) + string << " " + string << left + string << " " + string << op.to_s + string << " " + string << right + string << " " + when :function + path.shift + name = path.shift + string << name + string << "( " + string << predicate_to_string( path.shift, &block ) + string << " )" + when :literal + path.shift + string << " " + string << path.shift.inspect + string << " " + else + string << " " + string << yield( path ) + string << " " + end + return string.squeeze(" ") + end + + private + #LocationPath + # | RelativeLocationPath + # | '/' RelativeLocationPath? + # | '//' RelativeLocationPath + def LocationPath path, parsed + path = path.lstrip + if path[0] == ?/ + parsed << :document + if path[1] == ?/ + parsed << :descendant_or_self + parsed << :node + path = path[2..-1] + else + path = path[1..-1] + end + end + return RelativeLocationPath( path, parsed ) if path.size > 0 + end + + #RelativeLocationPath + # | Step + # | (AXIS_NAME '::' | '@' | '') AxisSpecifier + # NodeTest + # Predicate + # | '.' | '..' AbbreviatedStep + # | RelativeLocationPath '/' Step + # | RelativeLocationPath '//' Step + AXIS = /^(ancestor|ancestor-or-self|attribute|child|descendant|descendant-or-self|following|following-sibling|namespace|parent|preceding|preceding-sibling|self)::/ + def RelativeLocationPath path, parsed + loop do + original_path = path + path = path.lstrip + + return original_path if path.empty? + + # (axis or @ or <child::>) nodetest predicate > + # OR > / Step + # (. or ..) > + if path[0] == ?. + if path[1] == ?. + parsed << :parent + parsed << :node + path = path[2..-1] + else + parsed << :self + parsed << :node + path = path[1..-1] + end + else + path_before_axis_specifier = path + parsed_not_abberviated = [] + if path[0] == ?@ + parsed_not_abberviated << :attribute + path = path[1..-1] + # Goto Nodetest + elsif path =~ AXIS + parsed_not_abberviated << $1.tr('-','_').intern + path = $' + # Goto Nodetest + else + parsed_not_abberviated << :child + end + + path_before_node_test = path + path = NodeTest(path, parsed_not_abberviated) + if path == path_before_node_test + return path_before_axis_specifier + end + path = Predicate(path, parsed_not_abberviated) + + parsed.concat(parsed_not_abberviated) + end + + original_path = path + path = path.lstrip + return original_path if path.empty? + + return original_path if path[0] != ?/ + + if path[1] == ?/ + parsed << :descendant_or_self + parsed << :node + path = path[2..-1] + else + path = path[1..-1] + end + end + end + + # Returns a 1-1 map of the nodeset + # The contents of the resulting array are either: + # true/false, if a positive match + # String, if a name match + #NodeTest + # | ('*' | NCNAME ':' '*' | QNAME) NameTest + # | '*' ':' NCNAME NameTest since XPath 2.0 + # | NODE_TYPE '(' ')' NodeType + # | PI '(' LITERAL ')' PI + # | '[' expr ']' Predicate + PREFIX_WILDCARD = /^\*:(#{NCNAME_STR})/u + LOCAL_NAME_WILDCARD = /^(#{NCNAME_STR}):\*/u + QNAME = Namespace::NAMESPLIT + NODE_TYPE = /^(comment|text|node)\(\s*\)/m + PI = /^processing-instruction\(/ + def NodeTest path, parsed + original_path = path + path = path.lstrip + case path + when PREFIX_WILDCARD + prefix = nil + name = $1 + path = $' + parsed << :qname + parsed << prefix + parsed << name + when /^\*/ + path = $' + parsed << :any + when NODE_TYPE + type = $1 + path = $' + parsed << type.tr('-', '_').intern + when PI + path = $' + literal = nil + if path =~ /^\s*\)/ + path = $' + else + path =~ LITERAL + literal = $1 + path = $' + raise ParseException.new("Missing ')' after processing instruction") if path[0] != ?) + path = path[1..-1] + end + parsed << :processing_instruction + parsed << (literal || '') + when LOCAL_NAME_WILDCARD + prefix = $1 + path = $' + parsed << :namespace + parsed << prefix + when QNAME + prefix = $1 + name = $2 + path = $' + prefix = "" unless prefix + parsed << :qname + parsed << prefix + parsed << name + else + path = original_path + end + return path + end + + # Filters the supplied nodeset on the predicate(s) + def Predicate path, parsed + original_path = path + path = path.lstrip + return original_path unless path[0] == ?[ + predicates = [] + while path[0] == ?[ + path, expr = get_group(path) + predicates << expr[1..-2] if expr + end + predicates.each{ |pred| + preds = [] + parsed << :predicate + parsed << preds + OrExpr(pred, preds) + } + path + end + + # The following return arrays of true/false, a 1-1 mapping of the + # supplied nodeset, except for axe(), which returns a filtered + # nodeset + + #| OrExpr S 'or' S AndExpr + #| AndExpr + def OrExpr path, parsed + n = [] + rest = AndExpr( path, n ) + if rest != path + while rest =~ /^\s*( or )/ + n = [ :or, n, [] ] + rest = AndExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| AndExpr S 'and' S EqualityExpr + #| EqualityExpr + def AndExpr path, parsed + n = [] + rest = EqualityExpr( path, n ) + if rest != path + while rest =~ /^\s*( and )/ + n = [ :and, n, [] ] + rest = EqualityExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| EqualityExpr ('=' | '!=') RelationalExpr + #| RelationalExpr + def EqualityExpr path, parsed + n = [] + rest = RelationalExpr( path, n ) + if rest != path + while rest =~ /^\s*(!?=)\s*/ + if $1[0] == ?! + n = [ :neq, n, [] ] + else + n = [ :eq, n, [] ] + end + rest = RelationalExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| RelationalExpr ('<' | '>' | '<=' | '>=') AdditiveExpr + #| AdditiveExpr + def RelationalExpr path, parsed + n = [] + rest = AdditiveExpr( path, n ) + if rest != path + while rest =~ /^\s*([<>]=?)\s*/ + if $1[0] == ?< + sym = "lt" + else + sym = "gt" + end + sym << "eq" if $1[-1] == ?= + n = [ sym.intern, n, [] ] + rest = AdditiveExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| AdditiveExpr ('+' | '-') MultiplicativeExpr + #| MultiplicativeExpr + def AdditiveExpr path, parsed + n = [] + rest = MultiplicativeExpr( path, n ) + if rest != path + while rest =~ /^\s*(\+|-)\s*/ + if $1[0] == ?+ + n = [ :plus, n, [] ] + else + n = [ :minus, n, [] ] + end + rest = MultiplicativeExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| MultiplicativeExpr ('*' | S ('div' | 'mod') S) UnaryExpr + #| UnaryExpr + def MultiplicativeExpr path, parsed + n = [] + rest = UnaryExpr( path, n ) + if rest != path + while rest =~ /^\s*(\*| div | mod )\s*/ + if $1[0] == ?* + n = [ :mult, n, [] ] + elsif $1.include?( "div" ) + n = [ :div, n, [] ] + else + n = [ :mod, n, [] ] + end + rest = UnaryExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace(n) + elsif n.size > 0 + parsed << n + end + rest + end + + #| '-' UnaryExpr + #| UnionExpr + def UnaryExpr path, parsed + path =~ /^(\-*)/ + path = $' + if $1 and (($1.size % 2) != 0) + mult = -1 + else + mult = 1 + end + parsed << :neg if mult < 0 + + n = [] + path = UnionExpr( path, n ) + parsed.concat( n ) + path + end + + #| UnionExpr '|' PathExpr + #| PathExpr + def UnionExpr path, parsed + n = [] + rest = PathExpr( path, n ) + if rest != path + while rest =~ /^\s*(\|)\s*/ + n = [ :union, n, [] ] + rest = PathExpr( $', n[-1] ) + end + end + if parsed.size == 0 and n.size != 0 + parsed.replace( n ) + elsif n.size > 0 + parsed << n + end + rest + end + + #| LocationPath + #| FilterExpr ('/' | '//') RelativeLocationPath + def PathExpr path, parsed + path = path.lstrip + n = [] + rest = FilterExpr( path, n ) + if rest != path + if rest and rest[0] == ?/ + rest = RelativeLocationPath(rest, n) + parsed.concat(n) + return rest + end + end + rest = LocationPath(rest, n) if rest =~ /\A[\/\.\@\[\w*]/ + parsed.concat(n) + return rest + end + + #| FilterExpr Predicate + #| PrimaryExpr + def FilterExpr path, parsed + n = [] + path_before_primary_expr = path + path = PrimaryExpr(path, n) + return path_before_primary_expr if path == path_before_primary_expr + path = Predicate(path, n) + parsed.concat(n) + path + end + + #| VARIABLE_REFERENCE + #| '(' expr ')' + #| LITERAL + #| NUMBER + #| FunctionCall + VARIABLE_REFERENCE = /^\$(#{NAME_STR})/u + NUMBER = /^(\d*\.?\d+)/ + NT = /^comment|text|processing-instruction|node$/ + def PrimaryExpr path, parsed + case path + when VARIABLE_REFERENCE + varname = $1 + path = $' + parsed << :variable + parsed << varname + #arry << @variables[ varname ] + when /^(\w[-\w]*)(?:\()/ + fname = $1 + tmp = $' + return path if fname =~ NT + path = tmp + parsed << :function + parsed << fname + path = FunctionCall(path, parsed) + when NUMBER + varname = $1.nil? ? $2 : $1 + path = $' + parsed << :literal + parsed << (varname.include?('.') ? varname.to_f : varname.to_i) + when LITERAL + varname = $1.nil? ? $2 : $1 + path = $' + parsed << :literal + parsed << varname + when /^\(/ #/ + path, contents = get_group(path) + contents = contents[1..-2] + n = [] + OrExpr( contents, n ) + parsed.concat(n) + end + path + end + + #| FUNCTION_NAME '(' ( expr ( ',' expr )* )? ')' + def FunctionCall rest, parsed + path, arguments = parse_args(rest) + argset = [] + for argument in arguments + args = [] + OrExpr( argument, args ) + argset << args + end + parsed << argset + path + end + + # get_group( '[foo]bar' ) -> ['bar', '[foo]'] + def get_group string + ind = 0 + depth = 0 + st = string[0,1] + en = (st == "(" ? ")" : "]") + begin + case string[ind,1] + when st + depth += 1 + when en + depth -= 1 + end + ind += 1 + end while depth > 0 and ind < string.length + return nil unless depth==0 + [string[ind..-1], string[0..ind-1]] + end + + def parse_args( string ) + arguments = [] + ind = 0 + inquot = false + inapos = false + depth = 1 + begin + case string[ind] + when ?" + inquot = !inquot unless inapos + when ?' + inapos = !inapos unless inquot + else + unless inquot or inapos + case string[ind] + when ?( + depth += 1 + if depth == 1 + string = string[1..-1] + ind -= 1 + end + when ?) + depth -= 1 + if depth == 0 + s = string[0,ind].strip + arguments << s unless s == "" + string = string[ind+1..-1] + end + when ?, + if depth == 1 + s = string[0,ind].strip + arguments << s unless s == "" + string = string[ind+1..-1] + ind = -1 + end + end + end + end + ind += 1 + end while depth > 0 and ind < string.length + return nil unless depth==0 + [string,arguments] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/quickpath.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/quickpath.rb new file mode 100644 index 0000000..a0466b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/quickpath.rb @@ -0,0 +1,266 @@ +# frozen_string_literal: false +require_relative 'functions' +require_relative 'xmltokens' + +module REXML + class QuickPath + include Functions + include XMLTokens + + # A base Hash object to be used when initializing a + # default empty namespaces set. + EMPTY_HASH = {} + + def QuickPath::first element, path, namespaces=EMPTY_HASH + match(element, path, namespaces)[0] + end + + def QuickPath::each element, path, namespaces=EMPTY_HASH, &block + path = "*" unless path + match(element, path, namespaces).each( &block ) + end + + def QuickPath::match element, path, namespaces=EMPTY_HASH + raise "nil is not a valid xpath" unless path + results = nil + Functions::namespace_context = namespaces + case path + when /^\/([^\/]|$)/u + # match on root + path = path[1..-1] + return [element.root.parent] if path == '' + results = filter([element.root], path) + when /^[-\w]*::/u + results = filter([element], path) + when /^\*/u + results = filter(element.to_a, path) + when /^[\[!\w:]/u + # match on child + children = element.to_a + results = filter(children, path) + else + results = filter([element], path) + end + return results + end + + # Given an array of nodes it filters the array based on the path. The + # result is that when this method returns, the array will contain elements + # which match the path + def QuickPath::filter elements, path + return elements if path.nil? or path == '' or elements.size == 0 + case path + when /^\/\//u # Descendant + return axe( elements, "descendant-or-self", $' ) + when /^\/?\b(\w[-\w]*)\b::/u # Axe + return axe( elements, $1, $' ) + when /^\/(?=\b([:!\w][-\.\w]*:)?[-!\*\.\w]*\b([^:(]|$)|\*)/u # Child + rest = $' + results = [] + elements.each do |element| + results |= filter( element.to_a, rest ) + end + return results + when /^\/?(\w[-\w]*)\(/u # / Function + return function( elements, $1, $' ) + when Namespace::NAMESPLIT # Element name + name = $2 + ns = $1 + rest = $' + elements.delete_if do |element| + !(element.kind_of? Element and + (element.expanded_name == name or + (element.name == name and + element.namespace == Functions.namespace_context[ns]))) + end + return filter( elements, rest ) + when /^\/\[/u + matches = [] + elements.each do |element| + matches |= predicate( element.to_a, path[1..-1] ) if element.kind_of? Element + end + return matches + when /^\[/u # Predicate + return predicate( elements, path ) + when /^\/?\.\.\./u # Ancestor + return axe( elements, "ancestor", $' ) + when /^\/?\.\./u # Parent + return filter( elements.collect{|e|e.parent}, $' ) + when /^\/?\./u # Self + return filter( elements, $' ) + when /^\*/u # Any + results = [] + elements.each do |element| + results |= filter( [element], $' ) if element.kind_of? Element + #if element.kind_of? Element + # children = element.to_a + # children.delete_if { |child| !child.kind_of?(Element) } + # results |= filter( children, $' ) + #end + end + return results + end + return [] + end + + def QuickPath::axe( elements, axe_name, rest ) + matches = [] + matches = filter( elements.dup, rest ) if axe_name =~ /-or-self$/u + case axe_name + when /^descendant/u + elements.each do |element| + matches |= filter( element.to_a, "descendant-or-self::#{rest}" ) if element.kind_of? Element + end + when /^ancestor/u + elements.each do |element| + while element.parent + matches << element.parent + element = element.parent + end + end + matches = filter( matches, rest ) + when "self" + matches = filter( elements, rest ) + when "child" + elements.each do |element| + matches |= filter( element.to_a, rest ) if element.kind_of? Element + end + when "attribute" + elements.each do |element| + matches << element.attributes[ rest ] if element.kind_of? Element + end + when "parent" + matches = filter(elements.collect{|element| element.parent}.uniq, rest) + when "following-sibling" + matches = filter(elements.collect{|element| element.next_sibling}.uniq, + rest) + when "previous-sibling" + matches = filter(elements.collect{|element| + element.previous_sibling}.uniq, rest ) + end + return matches.uniq + end + + OPERAND_ = '((?=(?:(?!and|or).)*[^\s<>=])[^\s<>=]+)' + # A predicate filters a node-set with respect to an axis to produce a + # new node-set. For each node in the node-set to be filtered, the + # PredicateExpr is evaluated with that node as the context node, with + # the number of nodes in the node-set as the context size, and with the + # proximity position of the node in the node-set with respect to the + # axis as the context position; if PredicateExpr evaluates to true for + # that node, the node is included in the new node-set; otherwise, it is + # not included. + # + # A PredicateExpr is evaluated by evaluating the Expr and converting + # the result to a boolean. If the result is a number, the result will + # be converted to true if the number is equal to the context position + # and will be converted to false otherwise; if the result is not a + # number, then the result will be converted as if by a call to the + # boolean function. Thus a location path para[3] is equivalent to + # para[position()=3]. + def QuickPath::predicate( elements, path ) + ind = 1 + bcount = 1 + while bcount > 0 + bcount += 1 if path[ind] == ?[ + bcount -= 1 if path[ind] == ?] + ind += 1 + end + ind -= 1 + predicate = path[1..ind-1] + rest = path[ind+1..-1] + + # have to change 'a [=<>] b [=<>] c' into 'a [=<>] b and b [=<>] c' + # + predicate.gsub!( + /#{OPERAND_}\s*([<>=])\s*#{OPERAND_}\s*([<>=])\s*#{OPERAND_}/u, + '\1 \2 \3 and \3 \4 \5' ) + # Let's do some Ruby trickery to avoid some work: + predicate.gsub!( /&/u, "&&" ) + predicate.gsub!( /=/u, "==" ) + predicate.gsub!( /@(\w[-\w.]*)/u, 'attribute("\1")' ) + predicate.gsub!( /\bmod\b/u, "%" ) + predicate.gsub!( /\b(\w[-\w.]*\()/u ) { + fname = $1 + fname.gsub( /-/u, "_" ) + } + + Functions.pair = [ 0, elements.size ] + results = [] + elements.each do |element| + Functions.pair[0] += 1 + Functions.node = element + res = eval( predicate ) + case res + when true + results << element + when Integer + results << element if Functions.pair[0] == res + when String + results << element + end + end + return filter( results, rest ) + end + + def QuickPath::attribute( name ) + return Functions.node.attributes[name] if Functions.node.kind_of? Element + end + + def QuickPath::name() + return Functions.node.name if Functions.node.kind_of? Element + end + + def QuickPath::method_missing( id, *args ) + begin + Functions.send( id.id2name, *args ) + rescue Exception + raise "METHOD: #{id.id2name}(#{args.join ', '})\n#{$!.message}" + end + end + + def QuickPath::function( elements, fname, rest ) + args = parse_args( elements, rest ) + Functions.pair = [0, elements.size] + results = [] + elements.each do |element| + Functions.pair[0] += 1 + Functions.node = element + res = Functions.send( fname, *args ) + case res + when true + results << element + when Integer + results << element if Functions.pair[0] == res + end + end + return results + end + + def QuickPath::parse_args( element, string ) + # /.*?(?:\)|,)/ + arguments = [] + buffer = "" + while string and string != "" + c = string[0] + string.sub!(/^./u, "") + case c + when ?, + # if depth = 1, then we start a new argument + arguments << evaluate( buffer ) + #arguments << evaluate( string[0..count] ) + when ?( + # start a new method call + function( element, buffer, string ) + buffer = "" + when ?) + # close the method call and return arguments + return arguments + else + buffer << c + end + end + "" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/rexml.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/rexml.rb new file mode 100644 index 0000000..8a01f0e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/rexml.rb @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# frozen_string_literal: false +# +# \Module \REXML provides classes and methods for parsing, +# editing, and generating XML. +# +# == Implementation +# +# \REXML: +# - Is pure Ruby. +# - Provides tree, stream, SAX2, pull, and lightweight APIs. +# - Conforms to {XML version 1.0}[https://www.w3.org/TR/REC-xml/]. +# - Fully implements {XPath version 1.0}[http://www.w3c.org/tr/xpath]. +# - Is {non-validating}[https://www.w3.org/TR/xml/]. +# - Passes 100% of the non-validating {Oasis tests}[http://www.oasis-open.org/committees/xml-conformance/xml-test-suite.shtml]. +# +# == In a Hurry? +# +# If you're somewhat familiar with XML +# and have a particular task in mind, +# you may want to see {the tasks pages}[doc/rexml/tasks/tocs/master_toc_rdoc.html]. +# +# == API +# +# Among the most important classes for using \REXML are: +# - REXML::Document. +# - REXML::Element. +# +module REXML + COPYRIGHT = "Copyright © 2001-2008 Sean Russell <ser@germane-software.com>" + DATE = "2008/019" + VERSION = "3.2.5" + REVISION = "" + + Copyright = COPYRIGHT + Version = VERSION +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/sax2listener.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/sax2listener.rb new file mode 100644 index 0000000..5afdc80 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/sax2listener.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: false +module REXML + # A template for stream parser listeners. + # Note that the declarations (attlistdecl, elementdecl, etc) are trivially + # processed; REXML doesn't yet handle doctype entity declarations, so you + # have to parse them out yourself. + # === Missing methods from SAX2 + # ignorable_whitespace + # === Methods extending SAX2 + # +WARNING+ + # These methods are certainly going to change, until DTDs are fully + # supported. Be aware of this. + # start_document + # end_document + # doctype + # elementdecl + # attlistdecl + # entitydecl + # notationdecl + # cdata + # xmldecl + # comment + module SAX2Listener + def start_document + end + def end_document + end + def start_prefix_mapping prefix, uri + end + def end_prefix_mapping prefix + end + def start_element uri, localname, qname, attributes + end + def end_element uri, localname, qname + end + def characters text + end + def processing_instruction target, data + end + # Handles a doctype declaration. Any attributes of the doctype which are + # not supplied will be nil. # EG, <!DOCTYPE me PUBLIC "foo" "bar"> + # @p name the name of the doctype; EG, "me" + # @p pub_sys "PUBLIC", "SYSTEM", or nil. EG, "PUBLIC" + # @p long_name the supplied long name, or nil. EG, "foo" + # @p uri the uri of the doctype, or nil. EG, "bar" + def doctype name, pub_sys, long_name, uri + end + # If a doctype includes an ATTLIST declaration, it will cause this + # method to be called. The content is the declaration itself, unparsed. + # EG, <!ATTLIST el attr CDATA #REQUIRED> will come to this method as "el + # attr CDATA #REQUIRED". This is the same for all of the .*decl + # methods. + def attlistdecl(element, pairs, contents) + end + # <!ELEMENT ...> + def elementdecl content + end + # <!ENTITY ...> + # The argument passed to this method is an array of the entity + # declaration. It can be in a number of formats, but in general it + # returns (example, result): + # <!ENTITY % YN '"Yes"'> + # ["%", "YN", "\"Yes\""] + # <!ENTITY % YN 'Yes'> + # ["%", "YN", "Yes"] + # <!ENTITY WhatHeSaid "He said %YN;"> + # ["WhatHeSaid", "He said %YN;"] + # <!ENTITY open-hatch SYSTEM "http://www.textuality.com/boilerplate/OpenHatch.xml"> + # ["open-hatch", "SYSTEM", "http://www.textuality.com/boilerplate/OpenHatch.xml"] + # <!ENTITY open-hatch PUBLIC "-//Textuality//TEXT Standard open-hatch boilerplate//EN" "http://www.textuality.com/boilerplate/OpenHatch.xml"> + # ["open-hatch", "PUBLIC", "-//Textuality//TEXT Standard open-hatch boilerplate//EN", "http://www.textuality.com/boilerplate/OpenHatch.xml"] + # <!ENTITY hatch-pic SYSTEM "../grafix/OpenHatch.gif" NDATA gif> + # ["hatch-pic", "SYSTEM", "../grafix/OpenHatch.gif", "NDATA", "gif"] + def entitydecl declaration + end + # <!NOTATION ...> + def notationdecl name, public_or_system, public_id, system_id + end + # Called when <![CDATA[ ... ]]> is encountered in a document. + # @p content "..." + def cdata content + end + # Called when an XML PI is encountered in the document. + # EG: <?xml version="1.0" encoding="utf"?> + # @p version the version attribute value. EG, "1.0" + # @p encoding the encoding attribute value, or nil. EG, "utf" + # @p standalone the standalone attribute value, or nil. EG, nil + # @p spaced the declaration is followed by a line break + def xmldecl version, encoding, standalone + end + # Called when a comment is encountered. + # @p comment The content of the comment + def comment comment + end + def progress position + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/security.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/security.rb new file mode 100644 index 0000000..99b7460 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/security.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: false +module REXML + module Security + @@entity_expansion_limit = 10_000 + + # Set the entity expansion limit. By default the limit is set to 10000. + def self.entity_expansion_limit=( val ) + @@entity_expansion_limit = val + end + + # Get the entity expansion limit. By default the limit is set to 10000. + def self.entity_expansion_limit + return @@entity_expansion_limit + end + + @@entity_expansion_text_limit = 10_240 + + # Set the entity expansion limit. By default the limit is set to 10240. + def self.entity_expansion_text_limit=( val ) + @@entity_expansion_text_limit = val + end + + # Get the entity expansion limit. By default the limit is set to 10240. + def self.entity_expansion_text_limit + return @@entity_expansion_text_limit + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/source.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/source.rb new file mode 100644 index 0000000..90b370b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/source.rb @@ -0,0 +1,298 @@ +# coding: US-ASCII +# frozen_string_literal: false +require_relative 'encoding' + +module REXML + # Generates Source-s. USE THIS CLASS. + class SourceFactory + # Generates a Source object + # @param arg Either a String, or an IO + # @return a Source, or nil if a bad argument was given + def SourceFactory::create_from(arg) + if arg.respond_to? :read and + arg.respond_to? :readline and + arg.respond_to? :nil? and + arg.respond_to? :eof? + IOSource.new(arg) + elsif arg.respond_to? :to_str + require 'stringio' + IOSource.new(StringIO.new(arg)) + elsif arg.kind_of? Source + arg + else + raise "#{arg.class} is not a valid input stream. It must walk \n"+ + "like either a String, an IO, or a Source." + end + end + end + + # A Source can be searched for patterns, and wraps buffers and other + # objects and provides consumption of text + class Source + include Encoding + # The current buffer (what we're going to read next) + attr_reader :buffer + # The line number of the last consumed text + attr_reader :line + attr_reader :encoding + + # Constructor + # @param arg must be a String, and should be a valid XML document + # @param encoding if non-null, sets the encoding of the source to this + # value, overriding all encoding detection + def initialize(arg, encoding=nil) + @orig = @buffer = arg + if encoding + self.encoding = encoding + else + detect_encoding + end + @line = 0 + end + + + # Inherited from Encoding + # Overridden to support optimized en/decoding + def encoding=(enc) + return unless super + encoding_updated + end + + # Scans the source for a given pattern. Note, that this is not your + # usual scan() method. For one thing, the pattern argument has some + # requirements; for another, the source can be consumed. You can easily + # confuse this method. Originally, the patterns were easier + # to construct and this method more robust, because this method + # generated search regexps on the fly; however, this was + # computationally expensive and slowed down the entire REXML package + # considerably, since this is by far the most commonly called method. + # @param pattern must be a Regexp, and must be in the form of + # /^\s*(#{your pattern, with no groups})(.*)/. The first group + # will be returned; the second group is used if the consume flag is + # set. + # @param consume if true, the pattern returned will be consumed, leaving + # everything after it in the Source. + # @return the pattern, if found, or nil if the Source is empty or the + # pattern is not found. + def scan(pattern, cons=false) + return nil if @buffer.nil? + rv = @buffer.scan(pattern) + @buffer = $' if cons and rv.size>0 + rv + end + + def read + end + + def consume( pattern ) + @buffer = $' if pattern.match( @buffer ) + end + + def match_to( char, pattern ) + return pattern.match(@buffer) + end + + def match_to_consume( char, pattern ) + md = pattern.match(@buffer) + @buffer = $' + return md + end + + def match(pattern, cons=false) + md = pattern.match(@buffer) + @buffer = $' if cons and md + return md + end + + # @return true if the Source is exhausted + def empty? + @buffer == "" + end + + def position + @orig.index( @buffer ) + end + + # @return the current line in the source + def current_line + lines = @orig.split + res = lines.grep @buffer[0..30] + res = res[-1] if res.kind_of? Array + lines.index( res ) if res + end + + private + def detect_encoding + buffer_encoding = @buffer.encoding + detected_encoding = "UTF-8" + begin + @buffer.force_encoding("ASCII-8BIT") + if @buffer[0, 2] == "\xfe\xff" + @buffer[0, 2] = "" + detected_encoding = "UTF-16BE" + elsif @buffer[0, 2] == "\xff\xfe" + @buffer[0, 2] = "" + detected_encoding = "UTF-16LE" + elsif @buffer[0, 3] == "\xef\xbb\xbf" + @buffer[0, 3] = "" + detected_encoding = "UTF-8" + end + ensure + @buffer.force_encoding(buffer_encoding) + end + self.encoding = detected_encoding + end + + def encoding_updated + if @encoding != 'UTF-8' + @buffer = decode(@buffer) + @to_utf = true + else + @to_utf = false + @buffer.force_encoding ::Encoding::UTF_8 + end + end + end + + # A Source that wraps an IO. See the Source class for method + # documentation + class IOSource < Source + #attr_reader :block_size + + # block_size has been deprecated + def initialize(arg, block_size=500, encoding=nil) + @er_source = @source = arg + @to_utf = false + @pending_buffer = nil + + if encoding + super("", encoding) + else + super(@source.read(3) || "") + end + + if !@to_utf and + @buffer.respond_to?(:force_encoding) and + @source.respond_to?(:external_encoding) and + @source.external_encoding != ::Encoding::UTF_8 + @force_utf8 = true + else + @force_utf8 = false + end + end + + def scan(pattern, cons=false) + rv = super + # You'll notice that this next section is very similar to the same + # section in match(), but just a liiittle different. This is + # because it is a touch faster to do it this way with scan() + # than the way match() does it; enough faster to warrant duplicating + # some code + if rv.size == 0 + until @buffer =~ pattern or @source.nil? + begin + @buffer << readline + rescue Iconv::IllegalSequence + raise + rescue + @source = nil + end + end + rv = super + end + rv.taint if RUBY_VERSION < '2.7' + rv + end + + def read + begin + @buffer << readline + rescue Exception, NameError + @source = nil + end + end + + def consume( pattern ) + match( pattern, true ) + end + + def match( pattern, cons=false ) + rv = pattern.match(@buffer) + @buffer = $' if cons and rv + while !rv and @source + begin + @buffer << readline + rv = pattern.match(@buffer) + @buffer = $' if cons and rv + rescue + @source = nil + end + end + rv.taint if RUBY_VERSION < '2.7' + rv + end + + def empty? + super and ( @source.nil? || @source.eof? ) + end + + def position + @er_source.pos rescue 0 + end + + # @return the current line in the source + def current_line + begin + pos = @er_source.pos # The byte position in the source + lineno = @er_source.lineno # The XML < position in the source + @er_source.rewind + line = 0 # The \r\n position in the source + begin + while @er_source.pos < pos + @er_source.readline + line += 1 + end + rescue + end + @er_source.seek(pos) + rescue IOError + pos = -1 + line = -1 + end + [pos, lineno, line] + end + + private + def readline + str = @source.readline(@line_break) + if @pending_buffer + if str.nil? + str = @pending_buffer + else + str = @pending_buffer + str + end + @pending_buffer = nil + end + return nil if str.nil? + + if @to_utf + decode(str) + else + str.force_encoding(::Encoding::UTF_8) if @force_utf8 + str + end + end + + def encoding_updated + case @encoding + when "UTF-16BE", "UTF-16LE" + @source.binmode + @source.set_encoding(@encoding, @encoding) + end + @line_break = encode(">") + @pending_buffer, @buffer = @buffer, "" + @pending_buffer.force_encoding(@encoding) + super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/streamlistener.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/streamlistener.rb new file mode 100644 index 0000000..30c8945 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/streamlistener.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: false +module REXML + # A template for stream parser listeners. + # Note that the declarations (attlistdecl, elementdecl, etc) are trivially + # processed; REXML doesn't yet handle doctype entity declarations, so you + # have to parse them out yourself. + module StreamListener + # Called when a tag is encountered. + # @p name the tag name + # @p attrs an array of arrays of attribute/value pairs, suitable for + # use with assoc or rassoc. IE, <tag attr1="value1" attr2="value2"> + # will result in + # tag_start( "tag", # [["attr1","value1"],["attr2","value2"]]) + def tag_start name, attrs + end + # Called when the end tag is reached. In the case of <tag/>, tag_end + # will be called immediately after tag_start + # @p the name of the tag + def tag_end name + end + # Called when text is encountered in the document + # @p text the text content. + def text text + end + # Called when an instruction is encountered. EG: <?xsl sheet='foo'?> + # @p name the instruction name; in the example, "xsl" + # @p instruction the rest of the instruction. In the example, + # "sheet='foo'" + def instruction name, instruction + end + # Called when a comment is encountered. + # @p comment The content of the comment + def comment comment + end + # Handles a doctype declaration. Any attributes of the doctype which are + # not supplied will be nil. # EG, <!DOCTYPE me PUBLIC "foo" "bar"> + # @p name the name of the doctype; EG, "me" + # @p pub_sys "PUBLIC", "SYSTEM", or nil. EG, "PUBLIC" + # @p long_name the supplied long name, or nil. EG, "foo" + # @p uri the uri of the doctype, or nil. EG, "bar" + def doctype name, pub_sys, long_name, uri + end + # Called when the doctype is done + def doctype_end + end + # If a doctype includes an ATTLIST declaration, it will cause this + # method to be called. The content is the declaration itself, unparsed. + # EG, <!ATTLIST el attr CDATA #REQUIRED> will come to this method as "el + # attr CDATA #REQUIRED". This is the same for all of the .*decl + # methods. + def attlistdecl element_name, attributes, raw_content + end + # <!ELEMENT ...> + def elementdecl content + end + # <!ENTITY ...> + # The argument passed to this method is an array of the entity + # declaration. It can be in a number of formats, but in general it + # returns (example, result): + # <!ENTITY % YN '"Yes"'> + # ["YN", "\"Yes\"", "%"] + # <!ENTITY % YN 'Yes'> + # ["YN", "Yes", "%"] + # <!ENTITY WhatHeSaid "He said %YN;"> + # ["WhatHeSaid", "He said %YN;"] + # <!ENTITY open-hatch SYSTEM "http://www.textuality.com/boilerplate/OpenHatch.xml"> + # ["open-hatch", "SYSTEM", "http://www.textuality.com/boilerplate/OpenHatch.xml"] + # <!ENTITY open-hatch PUBLIC "-//Textuality//TEXT Standard open-hatch boilerplate//EN" "http://www.textuality.com/boilerplate/OpenHatch.xml"> + # ["open-hatch", "PUBLIC", "-//Textuality//TEXT Standard open-hatch boilerplate//EN", "http://www.textuality.com/boilerplate/OpenHatch.xml"] + # <!ENTITY hatch-pic SYSTEM "../grafix/OpenHatch.gif" NDATA gif> + # ["hatch-pic", "SYSTEM", "../grafix/OpenHatch.gif", "gif"] + def entitydecl content + end + # <!NOTATION ...> + def notationdecl content + end + # Called when %foo; is encountered in a doctype declaration. + # @p content "foo" + def entity content + end + # Called when <![CDATA[ ... ]]> is encountered in a document. + # @p content "..." + def cdata content + end + # Called when an XML PI is encountered in the document. + # EG: <?xml version="1.0" encoding="utf"?> + # @p version the version attribute value. EG, "1.0" + # @p encoding the encoding attribute value, or nil. EG, "utf" + # @p standalone the standalone attribute value, or nil. EG, nil + def xmldecl version, encoding, standalone + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/text.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/text.rb new file mode 100644 index 0000000..050b09c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/text.rb @@ -0,0 +1,424 @@ +# frozen_string_literal: false +require_relative 'security' +require_relative 'entity' +require_relative 'doctype' +require_relative 'child' +require_relative 'doctype' +require_relative 'parseexception' + +module REXML + # Represents text nodes in an XML document + class Text < Child + include Comparable + # The order in which the substitutions occur + SPECIALS = [ /&(?!#?[\w-]+;)/u, /</u, />/u, /"/u, /'/u, /\r/u ] + SUBSTITUTES = ['&', '<', '>', '"', ''', ' '] + # Characters which are substituted in written strings + SLAICEPS = [ '<', '>', '"', "'", '&' ] + SETUTITSBUS = [ /</u, />/u, /"/u, /'/u, /&/u ] + + # If +raw+ is true, then REXML leaves the value alone + attr_accessor :raw + + NEEDS_A_SECOND_CHECK = /(<|&((#{Entity::NAME});|(#0*((?:\d+)|(?:x[a-fA-F0-9]+)));)?)/um + NUMERICENTITY = /�*((?:\d+)|(?:x[a-fA-F0-9]+));/ + VALID_CHAR = [ + 0x9, 0xA, 0xD, + (0x20..0xD7FF), + (0xE000..0xFFFD), + (0x10000..0x10FFFF) + ] + + if String.method_defined? :encode + VALID_XML_CHARS = Regexp.new('^['+ + VALID_CHAR.map { |item| + case item + when Integer + [item].pack('U').force_encoding('utf-8') + when Range + [item.first, '-'.ord, item.last].pack('UUU').force_encoding('utf-8') + end + }.join + + ']*$') + else + VALID_XML_CHARS = /^( + [\x09\x0A\x0D\x20-\x7E] # ASCII + | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE][\x80-\xBF]{2} # straight 3-byte + | \xEF[\x80-\xBE]{2} # + | \xEF\xBF[\x80-\xBD] # excluding U+fffe and U+ffff + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )*$/nx; + end + + # Constructor + # +arg+ if a String, the content is set to the String. If a Text, + # the object is shallowly cloned. + # + # +respect_whitespace+ (boolean, false) if true, whitespace is + # respected + # + # +parent+ (nil) if this is a Parent object, the parent + # will be set to this. + # + # +raw+ (nil) This argument can be given three values. + # If true, then the value of used to construct this object is expected to + # contain no unescaped XML markup, and REXML will not change the text. If + # this value is false, the string may contain any characters, and REXML will + # escape any and all defined entities whose values are contained in the + # text. If this value is nil (the default), then the raw value of the + # parent will be used as the raw value for this node. If there is no raw + # value for the parent, and no value is supplied, the default is false. + # Use this field if you have entities defined for some text, and you don't + # want REXML to escape that text in output. + # Text.new( "<&", false, nil, false ) #-> "<&" + # Text.new( "<&", false, nil, false ) #-> "&lt;&amp;" + # Text.new( "<&", false, nil, true ) #-> Parse exception + # Text.new( "<&", false, nil, true ) #-> "<&" + # # Assume that the entity "s" is defined to be "sean" + # # and that the entity "r" is defined to be "russell" + # Text.new( "sean russell" ) #-> "&s; &r;" + # Text.new( "sean russell", false, nil, true ) #-> "sean russell" + # + # +entity_filter+ (nil) This can be an array of entities to match in the + # supplied text. This argument is only useful if +raw+ is set to false. + # Text.new( "sean russell", false, nil, false, ["s"] ) #-> "&s; russell" + # Text.new( "sean russell", false, nil, true, ["s"] ) #-> "sean russell" + # In the last example, the +entity_filter+ argument is ignored. + # + # +illegal+ INTERNAL USE ONLY + def initialize(arg, respect_whitespace=false, parent=nil, raw=nil, + entity_filter=nil, illegal=NEEDS_A_SECOND_CHECK ) + + @raw = false + @parent = nil + @entity_filter = nil + + if parent + super( parent ) + @raw = parent.raw + end + + if arg.kind_of? String + @string = arg.dup + elsif arg.kind_of? Text + @string = arg.instance_variable_get(:@string).dup + @raw = arg.raw + @entity_filter = arg.instance_variable_get(:@entity_filter) + else + raise "Illegal argument of type #{arg.type} for Text constructor (#{arg})" + end + + @string.squeeze!(" \n\t") unless respect_whitespace + @string.gsub!(/\r\n?/, "\n") + @raw = raw unless raw.nil? + @entity_filter = entity_filter if entity_filter + clear_cache + + Text.check(@string, illegal, doctype) if @raw + end + + def parent= parent + super(parent) + Text.check(@string, NEEDS_A_SECOND_CHECK, doctype) if @raw and @parent + end + + # check for illegal characters + def Text.check string, pattern, doctype + + # illegal anywhere + if string !~ VALID_XML_CHARS + if String.method_defined? :encode + string.chars.each do |c| + case c.ord + when *VALID_CHAR + else + raise "Illegal character #{c.inspect} in raw string #{string.inspect}" + end + end + else + string.scan(/[\x00-\x7F]|[\x80-\xBF][\xC0-\xF0]*|[\xC0-\xF0]/n) do |c| + case c.unpack('U') + when *VALID_CHAR + else + raise "Illegal character #{c.inspect} in raw string #{string.inspect}" + end + end + end + end + + # context sensitive + string.scan(pattern) do + if $1[-1] != ?; + raise "Illegal character #{$1.inspect} in raw string #{string.inspect}" + elsif $1[0] == ?& + if $5 and $5[0] == ?# + case ($5[1] == ?x ? $5[2..-1].to_i(16) : $5[1..-1].to_i) + when *VALID_CHAR + else + raise "Illegal character #{$1.inspect} in raw string #{string.inspect}" + end + # FIXME: below can't work but this needs API change. + # elsif @parent and $3 and !SUBSTITUTES.include?($1) + # if !doctype or !doctype.entities.has_key?($3) + # raise "Undeclared entity '#{$1}' in raw string \"#{string}\"" + # end + end + end + end + end + + def node_type + :text + end + + def empty? + @string.size==0 + end + + + def clone + return Text.new(self, true) + end + + + # Appends text to this text node. The text is appended in the +raw+ mode + # of this text node. + # + # +returns+ the text itself to enable method chain like + # 'text << "XXX" << "YYY"'. + def <<( to_append ) + @string << to_append.gsub( /\r\n?/, "\n" ) + clear_cache + self + end + + + # +other+ a String or a Text + # +returns+ the result of (to_s <=> arg.to_s) + def <=>( other ) + to_s() <=> other.to_s + end + + def doctype + if @parent + doc = @parent.document + doc.doctype if doc + end + end + + REFERENCE = /#{Entity::REFERENCE}/ + # Returns the string value of this text node. This string is always + # escaped, meaning that it is a valid XML text node string, and all + # entities that can be escaped, have been inserted. This method respects + # the entity filter set in the constructor. + # + # # Assume that the entity "s" is defined to be "sean", and that the + # # entity "r" is defined to be "russell" + # t = Text.new( "< & sean russell", false, nil, false, ['s'] ) + # t.to_s #-> "< & &s; russell" + # t = Text.new( "< & &s; russell", false, nil, false ) + # t.to_s #-> "< & &s; russell" + # u = Text.new( "sean russell", false, nil, true ) + # u.to_s #-> "sean russell" + def to_s + return @string if @raw + @normalized ||= Text::normalize( @string, doctype, @entity_filter ) + end + + def inspect + @string.inspect + end + + # Returns the string value of this text. This is the text without + # entities, as it might be used programmatically, or printed to the + # console. This ignores the 'raw' attribute setting, and any + # entity_filter. + # + # # Assume that the entity "s" is defined to be "sean", and that the + # # entity "r" is defined to be "russell" + # t = Text.new( "< & sean russell", false, nil, false, ['s'] ) + # t.value #-> "< & sean russell" + # t = Text.new( "< & &s; russell", false, nil, false ) + # t.value #-> "< & sean russell" + # u = Text.new( "sean russell", false, nil, true ) + # u.value #-> "sean russell" + def value + @unnormalized ||= Text::unnormalize( @string, doctype ) + end + + # Sets the contents of this text node. This expects the text to be + # unnormalized. It returns self. + # + # e = Element.new( "a" ) + # e.add_text( "foo" ) # <a>foo</a> + # e[0].value = "bar" # <a>bar</a> + # e[0].value = "<a>" # <a><a></a> + def value=( val ) + @string = val.gsub( /\r\n?/, "\n" ) + clear_cache + @raw = false + end + + def wrap(string, width, addnewline=false) + # Recursively wrap string at width. + return string if string.length <= width + place = string.rindex(' ', width) # Position in string with last ' ' before cutoff + if addnewline then + return "\n" + string[0,place] + "\n" + wrap(string[place+1..-1], width) + else + return string[0,place] + "\n" + wrap(string[place+1..-1], width) + end + end + + def indent_text(string, level=1, style="\t", indentfirstline=true) + return string if level < 0 + new_string = '' + string.each_line { |line| + indent_string = style * level + new_line = (indent_string + line).sub(/[\s]+$/,'') + new_string << new_line + } + new_string.strip! unless indentfirstline + return new_string + end + + # == DEPRECATED + # See REXML::Formatters + # + def write( writer, indent=-1, transitive=false, ie_hack=false ) + Kernel.warn("#{self.class.name}.write is deprecated. See REXML::Formatters", uplevel: 1) + formatter = if indent > -1 + REXML::Formatters::Pretty.new( indent ) + else + REXML::Formatters::Default.new + end + formatter.write( self, writer ) + end + + # FIXME + # This probably won't work properly + def xpath + path = @parent.xpath + path += "/text()" + return path + end + + # Writes out text, substituting special characters beforehand. + # +out+ A String, IO, or any other object supporting <<( String ) + # +input+ the text to substitute and the write out + # + # z=utf8.unpack("U*") + # ascOut="" + # z.each{|r| + # if r < 0x100 + # ascOut.concat(r.chr) + # else + # ascOut.concat(sprintf("&#x%x;", r)) + # end + # } + # puts ascOut + def write_with_substitution out, input + copy = input.clone + # Doing it like this rather than in a loop improves the speed + copy.gsub!( SPECIALS[0], SUBSTITUTES[0] ) + copy.gsub!( SPECIALS[1], SUBSTITUTES[1] ) + copy.gsub!( SPECIALS[2], SUBSTITUTES[2] ) + copy.gsub!( SPECIALS[3], SUBSTITUTES[3] ) + copy.gsub!( SPECIALS[4], SUBSTITUTES[4] ) + copy.gsub!( SPECIALS[5], SUBSTITUTES[5] ) + out << copy + end + + private + def clear_cache + @normalized = nil + @unnormalized = nil + end + + # Reads text, substituting entities + def Text::read_with_substitution( input, illegal=nil ) + copy = input.clone + + if copy =~ illegal + raise ParseException.new( "malformed text: Illegal character #$& in \"#{copy}\"" ) + end if illegal + + copy.gsub!( /\r\n?/, "\n" ) + if copy.include? ?& + copy.gsub!( SETUTITSBUS[0], SLAICEPS[0] ) + copy.gsub!( SETUTITSBUS[1], SLAICEPS[1] ) + copy.gsub!( SETUTITSBUS[2], SLAICEPS[2] ) + copy.gsub!( SETUTITSBUS[3], SLAICEPS[3] ) + copy.gsub!( SETUTITSBUS[4], SLAICEPS[4] ) + copy.gsub!( /�*((?:\d+)|(?:x[a-f0-9]+));/ ) { + m=$1 + #m='0' if m=='' + m = "0#{m}" if m[0] == ?x + [Integer(m)].pack('U*') + } + end + copy + end + + EREFERENCE = /&(?!#{Entity::NAME};)/ + # Escapes all possible entities + def Text::normalize( input, doctype=nil, entity_filter=nil ) + copy = input.to_s + # Doing it like this rather than in a loop improves the speed + #copy = copy.gsub( EREFERENCE, '&' ) + copy = copy.gsub( "&", "&" ) + if doctype + # Replace all ampersands that aren't part of an entity + doctype.entities.each_value do |entity| + copy = copy.gsub( entity.value, + "&#{entity.name};" ) if entity.value and + not( entity_filter and entity_filter.include?(entity.name) ) + end + else + # Replace all ampersands that aren't part of an entity + DocType::DEFAULT_ENTITIES.each_value do |entity| + copy = copy.gsub(entity.value, "&#{entity.name};" ) + end + end + copy + end + + # Unescapes all possible entities + def Text::unnormalize( string, doctype=nil, filter=nil, illegal=nil ) + sum = 0 + string.gsub( /\r\n?/, "\n" ).gsub( REFERENCE ) { + s = Text.expand($&, doctype, filter) + if sum + s.bytesize > Security.entity_expansion_text_limit + raise "entity expansion has grown too large" + else + sum += s.bytesize + end + s + } + end + + def Text.expand(ref, doctype, filter) + if ref[1] == ?# + if ref[2] == ?x + [ref[3...-1].to_i(16)].pack('U*') + else + [ref[2...-1].to_i].pack('U*') + end + elsif ref == '&' + '&' + elsif filter and filter.include?( ref[1...-1] ) + ref + elsif doctype + doctype.entity( ref[1...-1] ) or ref + else + entity_value = DocType::DEFAULT_ENTITIES[ ref[1...-1] ] + entity_value ? entity_value.value : ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/undefinednamespaceexception.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/undefinednamespaceexception.rb new file mode 100644 index 0000000..492a098 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/undefinednamespaceexception.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: false +require_relative 'parseexception' +module REXML + class UndefinedNamespaceException < ParseException + def initialize( prefix, source, parser ) + super( "Undefined prefix #{prefix} found" ) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/relaxng.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/relaxng.rb new file mode 100644 index 0000000..f29a2c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/relaxng.rb @@ -0,0 +1,539 @@ +# frozen_string_literal: false +require_relative "validation" +require_relative "../parsers/baseparser" + +module REXML + module Validation + # Implemented: + # * empty + # * element + # * attribute + # * text + # * optional + # * choice + # * oneOrMore + # * zeroOrMore + # * group + # * value + # * interleave + # * mixed + # * ref + # * grammar + # * start + # * define + # + # Not implemented: + # * data + # * param + # * include + # * externalRef + # * notAllowed + # * anyName + # * nsName + # * except + # * name + class RelaxNG + include Validator + + INFINITY = 1.0 / 0.0 + EMPTY = Event.new( nil ) + TEXT = [:start_element, "text"] + attr_accessor :current + attr_accessor :count + attr_reader :references + + # FIXME: Namespaces + def initialize source + parser = REXML::Parsers::BaseParser.new( source ) + + @count = 0 + @references = {} + @root = @current = Sequence.new(self) + @root.previous = true + states = [ @current ] + begin + event = parser.pull + case event[0] + when :start_element + case event[1] + when "empty" + when "element", "attribute", "text", "value" + states[-1] << event + when "optional" + states << Optional.new( self ) + states[-2] << states[-1] + when "choice" + states << Choice.new( self ) + states[-2] << states[-1] + when "oneOrMore" + states << OneOrMore.new( self ) + states[-2] << states[-1] + when "zeroOrMore" + states << ZeroOrMore.new( self ) + states[-2] << states[-1] + when "group" + states << Sequence.new( self ) + states[-2] << states[-1] + when "interleave" + states << Interleave.new( self ) + states[-2] << states[-1] + when "mixed" + states << Interleave.new( self ) + states[-2] << states[-1] + states[-1] << TEXT + when "define" + states << [ event[2]["name"] ] + when "ref" + states[-1] << Ref.new( event[2]["name"] ) + when "anyName" + states << AnyName.new( self ) + states[-2] << states[-1] + when "nsName" + when "except" + when "name" + when "data" + when "param" + when "include" + when "grammar" + when "start" + when "externalRef" + when "notAllowed" + end + when :end_element + case event[1] + when "element", "attribute" + states[-1] << event + when "zeroOrMore", "oneOrMore", "choice", "optional", + "interleave", "group", "mixed" + states.pop + when "define" + ref = states.pop + @references[ ref.shift ] = ref + #when "empty" + end + when :end_document + states[-1] << event + when :text + states[-1] << event + end + end while event[0] != :end_document + end + + def receive event + validate( event ) + end + end + + class State + def initialize( context ) + @previous = [] + @events = [] + @current = 0 + @count = context.count += 1 + @references = context.references + @value = false + end + + def reset + return if @current == 0 + @current = 0 + @events.each {|s| s.reset if s.kind_of? State } + end + + def previous=( previous ) + @previous << previous + end + + def next( event ) + #print "In next with #{event.inspect}. " + #p @previous + return @previous.pop.next( event ) if @events[@current].nil? + expand_ref_in( @events, @current ) if @events[@current].class == Ref + if ( @events[@current].kind_of? State ) + @current += 1 + @events[@current-1].previous = self + return @events[@current-1].next( event ) + end + if ( @events[@current].matches?(event) ) + @current += 1 + if @events[@current].nil? + return @previous.pop + elsif @events[@current].kind_of? State + @current += 1 + @events[@current-1].previous = self + return @events[@current-1] + else + return self + end + else + return nil + end + end + + def to_s + # Abbreviated: + self.class.name =~ /(?:::)(\w)\w+$/ + # Full: + #self.class.name =~ /(?:::)(\w+)$/ + "#$1.#@count" + end + + def inspect + "< #{to_s} #{@events.collect{|e| + pre = e == @events[@current] ? '#' : '' + pre + e.inspect unless self == e + }.join(', ')} >" + end + + def expected + return [@events[@current]] + end + + def <<( event ) + add_event_to_arry( @events, event ) + end + + + protected + def expand_ref_in( arry, ind ) + new_events = [] + @references[ arry[ind].to_s ].each{ |evt| + add_event_to_arry(new_events,evt) + } + arry[ind,1] = new_events + end + + def add_event_to_arry( arry, evt ) + evt = generate_event( evt ) + if evt.kind_of? String + arry[-1].event_arg = evt if arry[-1].kind_of? Event and @value + @value = false + else + arry << evt + end + end + + def generate_event( event ) + return event if event.kind_of? State or event.class == Ref + evt = nil + arg = nil + case event[0] + when :start_element + case event[1] + when "element" + evt = :start_element + arg = event[2]["name"] + when "attribute" + evt = :start_attribute + arg = event[2]["name"] + when "text" + evt = :text + when "value" + evt = :text + @value = true + end + when :text + return event[1] + when :end_document + return Event.new( event[0] ) + else # then :end_element + case event[1] + when "element" + evt = :end_element + when "attribute" + evt = :end_attribute + end + end + return Event.new( evt, arg ) + end + end + + + class Sequence < State + def matches?(event) + @events[@current].matches?( event ) + end + end + + + class Optional < State + def next( event ) + if @current == 0 + rv = super + return rv if rv + @prior = @previous.pop + return @prior.next( event ) + end + super + end + + def matches?(event) + @events[@current].matches?(event) || + (@current == 0 and @previous[-1].matches?(event)) + end + + def expected + return [ @prior.expected, @events[0] ].flatten if @current == 0 + return [@events[@current]] + end + end + + + class ZeroOrMore < Optional + def next( event ) + expand_ref_in( @events, @current ) if @events[@current].class == Ref + if ( @events[@current].matches?(event) ) + @current += 1 + if @events[@current].nil? + @current = 0 + return self + elsif @events[@current].kind_of? State + @current += 1 + @events[@current-1].previous = self + return @events[@current-1] + else + return self + end + else + @prior = @previous.pop + return @prior.next( event ) if @current == 0 + return nil + end + end + + def expected + return [ @prior.expected, @events[0] ].flatten if @current == 0 + return [@events[@current]] + end + end + + + class OneOrMore < State + def initialize context + super + @ord = 0 + end + + def reset + super + @ord = 0 + end + + def next( event ) + expand_ref_in( @events, @current ) if @events[@current].class == Ref + if ( @events[@current].matches?(event) ) + @current += 1 + @ord += 1 + if @events[@current].nil? + @current = 0 + return self + elsif @events[@current].kind_of? State + @current += 1 + @events[@current-1].previous = self + return @events[@current-1] + else + return self + end + else + return @previous.pop.next( event ) if @current == 0 and @ord > 0 + return nil + end + end + + def matches?( event ) + @events[@current].matches?(event) || + (@current == 0 and @ord > 0 and @previous[-1].matches?(event)) + end + + def expected + if @current == 0 and @ord > 0 + return [@previous[-1].expected, @events[0]].flatten + else + return [@events[@current]] + end + end + end + + + class Choice < State + def initialize context + super + @choices = [] + end + + def reset + super + @events = [] + @choices.each { |c| c.each { |s| s.reset if s.kind_of? State } } + end + + def <<( event ) + add_event_to_arry( @choices, event ) + end + + def next( event ) + # Make the choice if we haven't + if @events.size == 0 + c = 0 ; max = @choices.size + while c < max + if @choices[c][0].class == Ref + expand_ref_in( @choices[c], 0 ) + @choices += @choices[c] + @choices.delete( @choices[c] ) + max -= 1 + else + c += 1 + end + end + @events = @choices.find { |evt| evt[0].matches? event } + # Remove the references + # Find the events + end + unless @events + @events = [] + return nil + end + super + end + + def matches?( event ) + return @events[@current].matches?( event ) if @events.size > 0 + !@choices.find{|evt| evt[0].matches?(event)}.nil? + end + + def expected + return [@events[@current]] if @events.size > 0 + return @choices.collect do |x| + if x[0].kind_of? State + x[0].expected + else + x[0] + end + end.flatten + end + + def inspect + "< #{to_s} #{@choices.collect{|e| e.collect{|f|f.to_s}.join(', ')}.join(' or ')} >" + end + + protected + def add_event_to_arry( arry, evt ) + if evt.kind_of? State or evt.class == Ref + arry << [evt] + elsif evt[0] == :text + if arry[-1] and + arry[-1][-1].kind_of?( Event ) and + arry[-1][-1].event_type == :text and @value + + arry[-1][-1].event_arg = evt[1] + @value = false + end + else + arry << [] if evt[0] == :start_element + arry[-1] << generate_event( evt ) + end + end + end + + + class Interleave < Choice + def initialize context + super + @choice = 0 + end + + def reset + @choice = 0 + end + + def next_current( event ) + # Expand references + c = 0 ; max = @choices.size + while c < max + if @choices[c][0].class == Ref + expand_ref_in( @choices[c], 0 ) + @choices += @choices[c] + @choices.delete( @choices[c] ) + max -= 1 + else + c += 1 + end + end + @events = @choices[@choice..-1].find { |evt| evt[0].matches? event } + @current = 0 + if @events + # reorder the choices + old = @choices[@choice] + idx = @choices.index( @events ) + @choices[@choice] = @events + @choices[idx] = old + @choice += 1 + end + + @events = [] unless @events + end + + + def next( event ) + # Find the next series + next_current(event) unless @events[@current] + return nil unless @events[@current] + + expand_ref_in( @events, @current ) if @events[@current].class == Ref + if ( @events[@current].kind_of? State ) + @current += 1 + @events[@current-1].previous = self + return @events[@current-1].next( event ) + end + return @previous.pop.next( event ) if @events[@current].nil? + if ( @events[@current].matches?(event) ) + @current += 1 + if @events[@current].nil? + return self unless @choices[@choice].nil? + return @previous.pop + elsif @events[@current].kind_of? State + @current += 1 + @events[@current-1].previous = self + return @events[@current-1] + else + return self + end + else + return nil + end + end + + def matches?( event ) + return @events[@current].matches?( event ) if @events[@current] + !@choices[@choice..-1].find{|evt| evt[0].matches?(event)}.nil? + end + + def expected + return [@events[@current]] if @events[@current] + return @choices[@choice..-1].collect do |x| + if x[0].kind_of? State + x[0].expected + else + x[0] + end + end.flatten + end + + def inspect + "< #{to_s} #{@choices.collect{|e| e.collect{|f|f.to_s}.join(', ')}.join(' and ')} >" + end + end + + class Ref + def initialize value + @value = value + end + def to_s + @value + end + def inspect + "{#{to_s}}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validation.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validation.rb new file mode 100644 index 0000000..0ad6ada --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validation.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: false +require_relative 'validationexception' + +module REXML + module Validation + module Validator + NILEVENT = [ nil ] + def reset + @current = @root + @root.reset + @root.previous = true + @attr_stack = [] + self + end + def dump + puts @root.inspect + end + def validate( event ) + @attr_stack = [] unless defined? @attr_stack + match = @current.next(event) + raise ValidationException.new( "Validation error. Expected: "+ + @current.expected.join( " or " )+" from #{@current.inspect} "+ + " but got #{Event.new( event[0], event[1] ).inspect}" ) unless match + @current = match + + # Check for attributes + case event[0] + when :start_element + @attr_stack << event[2] + begin + sattr = [:start_attribute, nil] + eattr = [:end_attribute] + text = [:text, nil] + k, = event[2].find { |key,value| + sattr[1] = key + m = @current.next( sattr ) + if m + # If the state has text children... + if m.matches?( eattr ) + @current = m + else + text[1] = value + m = m.next( text ) + text[1] = nil + return false unless m + @current = m if m + end + m = @current.next( eattr ) + if m + @current = m + true + else + false + end + else + false + end + } + event[2].delete(k) if k + end while k + when :end_element + attrs = @attr_stack.pop + raise ValidationException.new( "Validation error. Illegal "+ + " attributes: #{attrs.inspect}") if attrs.length > 0 + end + end + end + + class Event + def initialize(event_type, event_arg=nil ) + @event_type = event_type + @event_arg = event_arg + end + + attr_reader :event_type + attr_accessor :event_arg + + def done? + @done + end + + def single? + return (@event_type != :start_element and @event_type != :start_attribute) + end + + def matches?( event ) + return false unless event[0] == @event_type + case event[0] + when nil + return true + when :start_element + return true if event[1] == @event_arg + when :end_element + return true + when :start_attribute + return true if event[1] == @event_arg + when :end_attribute + return true + when :end_document + return true + when :text + return (@event_arg.nil? or @event_arg == event[1]) +=begin + when :processing_instruction + false + when :xmldecl + false + when :start_doctype + false + when :end_doctype + false + when :externalentity + false + when :elementdecl + false + when :entity + false + when :attlistdecl + false + when :notationdecl + false + when :end_doctype + false +=end + else + false + end + end + + def ==( other ) + return false unless other.kind_of? Event + @event_type == other.event_type and @event_arg == other.event_arg + end + + def to_s + inspect + end + + def inspect + "#{@event_type.inspect}( #@event_arg )" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validationexception.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validationexception.rb new file mode 100644 index 0000000..78cd63f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/validation/validationexception.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: false +module REXML + module Validation + class ValidationException < RuntimeError + def initialize msg + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmldecl.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmldecl.rb new file mode 100644 index 0000000..d19407c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmldecl.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: false + +require_relative 'encoding' +require_relative 'source' + +module REXML + # NEEDS DOCUMENTATION + class XMLDecl < Child + include Encoding + + DEFAULT_VERSION = "1.0" + DEFAULT_ENCODING = "UTF-8" + DEFAULT_STANDALONE = "no" + START = "<?xml" + STOP = "?>" + + attr_accessor :version, :standalone + attr_reader :writeencoding, :writethis + + def initialize(version=DEFAULT_VERSION, encoding=nil, standalone=nil) + @writethis = true + @writeencoding = !encoding.nil? + if version.kind_of? XMLDecl + super() + @version = version.version + self.encoding = version.encoding + @writeencoding = version.writeencoding + @standalone = version.standalone + @writethis = version.writethis + else + super() + @version = version + self.encoding = encoding + @standalone = standalone + end + @version = DEFAULT_VERSION if @version.nil? + end + + def clone + XMLDecl.new(self) + end + + # indent:: + # Ignored. There must be no whitespace before an XML declaration + # transitive:: + # Ignored + # ie_hack:: + # Ignored + def write(writer, indent=-1, transitive=false, ie_hack=false) + return nil unless @writethis or writer.kind_of? Output + writer << START + writer << " #{content encoding}" + writer << STOP + end + + def ==( other ) + other.kind_of?(XMLDecl) and + other.version == @version and + other.encoding == self.encoding and + other.standalone == @standalone + end + + def xmldecl version, encoding, standalone + @version = version + self.encoding = encoding + @standalone = standalone + end + + def node_type + :xmldecl + end + + alias :stand_alone? :standalone + alias :old_enc= :encoding= + + def encoding=( enc ) + if enc.nil? + self.old_enc = "UTF-8" + @writeencoding = false + else + self.old_enc = enc + @writeencoding = true + end + self.dowrite + end + + # Only use this if you do not want the XML declaration to be written; + # this object is ignored by the XML writer. Otherwise, instantiate your + # own XMLDecl and add it to the document. + # + # Note that XML 1.1 documents *must* include an XML declaration + def XMLDecl.default + rv = XMLDecl.new( "1.0" ) + rv.nowrite + rv + end + + def nowrite + @writethis = false + end + + def dowrite + @writethis = true + end + + def inspect + "#{START} ... #{STOP}" + end + + private + def content(enc) + context = nil + context = parent.context if parent + if context and context[:prologue_quote] == :quote + quote = "\"" + else + quote = "'" + end + + rv = "version=#{quote}#{@version}#{quote}" + if @writeencoding or enc !~ /\Autf-8\z/i + rv << " encoding=#{quote}#{enc}#{quote}" + end + if @standalone + rv << " standalone=#{quote}#{@standalone}#{quote}" + end + rv + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmltokens.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmltokens.rb new file mode 100644 index 0000000..392b47b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xmltokens.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: false +module REXML + # Defines a number of tokens used for parsing XML. Not for general + # consumption. + module XMLTokens + # From http://www.w3.org/TR/REC-xml/#sec-common-syn + # + # [4] NameStartChar ::= + # ":" | + # [A-Z] | + # "_" | + # [a-z] | + # [#xC0-#xD6] | + # [#xD8-#xF6] | + # [#xF8-#x2FF] | + # [#x370-#x37D] | + # [#x37F-#x1FFF] | + # [#x200C-#x200D] | + # [#x2070-#x218F] | + # [#x2C00-#x2FEF] | + # [#x3001-#xD7FF] | + # [#xF900-#xFDCF] | + # [#xFDF0-#xFFFD] | + # [#x10000-#xEFFFF] + name_start_chars = [ + ":", + "A-Z", + "_", + "a-z", + "\\u00C0-\\u00D6", + "\\u00D8-\\u00F6", + "\\u00F8-\\u02FF", + "\\u0370-\\u037D", + "\\u037F-\\u1FFF", + "\\u200C-\\u200D", + "\\u2070-\\u218F", + "\\u2C00-\\u2FEF", + "\\u3001-\\uD7FF", + "\\uF900-\\uFDCF", + "\\uFDF0-\\uFFFD", + "\\u{10000}-\\u{EFFFF}", + ] + # From http://www.w3.org/TR/REC-xml/#sec-common-syn + # + # [4a] NameChar ::= + # NameStartChar | + # "-" | + # "." | + # [0-9] | + # #xB7 | + # [#x0300-#x036F] | + # [#x203F-#x2040] + name_chars = name_start_chars + [ + "\\-", + "\\.", + "0-9", + "\\u00B7", + "\\u0300-\\u036F", + "\\u203F-\\u2040", + ] + NAME_START_CHAR = "[#{name_start_chars.join('')}]" + NAME_CHAR = "[#{name_chars.join('')}]" + NAMECHAR = NAME_CHAR # deprecated. Use NAME_CHAR instead. + + # From http://www.w3.org/TR/xml-names11/#NT-NCName + # + # [6] NCNameStartChar ::= NameStartChar - ':' + ncname_start_chars = name_start_chars - [":"] + # From http://www.w3.org/TR/xml-names11/#NT-NCName + # + # [5] NCNameChar ::= NameChar - ':' + ncname_chars = name_chars - [":"] + NCNAME_STR = "[#{ncname_start_chars.join('')}][#{ncname_chars.join('')}]*" + NAME_STR = "(?:#{NCNAME_STR}:)?#{NCNAME_STR}" + + NAME = "(#{NAME_START_CHAR}#{NAME_CHAR}*)" + NMTOKEN = "(?:#{NAME_CHAR})+" + NMTOKENS = "#{NMTOKEN}(\\s+#{NMTOKEN})*" + REFERENCE = "(?:&#{NAME};|&#\\d+;|&#x[0-9a-fA-F]+;)" + + #REFERENCE = "(?:#{ENTITYREF}|#{CHARREF})" + #ENTITYREF = "&#{NAME};" + #CHARREF = "&#\\d+;|&#x[0-9a-fA-F]+;" + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath.rb new file mode 100644 index 0000000..a0921bd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: false +require_relative 'functions' +require_relative 'xpath_parser' + +module REXML + # Wrapper class. Use this class to access the XPath functions. + class XPath + include Functions + # A base Hash object, supposing to be used when initializing a + # default empty namespaces set, but is currently unused. + # TODO: either set the namespaces=EMPTY_HASH, or deprecate this. + EMPTY_HASH = {} + + # Finds and returns the first node that matches the supplied xpath. + # element:: + # The context element + # path:: + # The xpath to search for. If not supplied or nil, returns the first + # node matching '*'. + # namespaces:: + # If supplied, a Hash which defines a namespace mapping. + # variables:: + # If supplied, a Hash which maps $variables in the query + # to values. This can be used to avoid XPath injection attacks + # or to automatically handle escaping string values. + # + # XPath.first( node ) + # XPath.first( doc, "//b"} ) + # XPath.first( node, "a/x:b", { "x"=>"http://doofus" } ) + # XPath.first( node, '/book/publisher/text()=$publisher', {}, {"publisher"=>"O'Reilly"}) + def XPath::first(element, path=nil, namespaces=nil, variables={}, options={}) + raise "The namespaces argument, if supplied, must be a hash object." unless namespaces.nil? or namespaces.kind_of?(Hash) + raise "The variables argument, if supplied, must be a hash object." unless variables.kind_of?(Hash) + parser = XPathParser.new(**options) + parser.namespaces = namespaces + parser.variables = variables + path = "*" unless path + element = [element] unless element.kind_of? Array + parser.parse(path, element).flatten[0] + end + + # Iterates over nodes that match the given path, calling the supplied + # block with the match. + # element:: + # The context element + # path:: + # The xpath to search for. If not supplied or nil, defaults to '*' + # namespaces:: + # If supplied, a Hash which defines a namespace mapping + # variables:: + # If supplied, a Hash which maps $variables in the query + # to values. This can be used to avoid XPath injection attacks + # or to automatically handle escaping string values. + # + # XPath.each( node ) { |el| ... } + # XPath.each( node, '/*[@attr='v']' ) { |el| ... } + # XPath.each( node, 'ancestor::x' ) { |el| ... } + # XPath.each( node, '/book/publisher/text()=$publisher', {}, {"publisher"=>"O'Reilly"}) \ + # {|el| ... } + def XPath::each(element, path=nil, namespaces=nil, variables={}, options={}, &block) + raise "The namespaces argument, if supplied, must be a hash object." unless namespaces.nil? or namespaces.kind_of?(Hash) + raise "The variables argument, if supplied, must be a hash object." unless variables.kind_of?(Hash) + parser = XPathParser.new(**options) + parser.namespaces = namespaces + parser.variables = variables + path = "*" unless path + element = [element] unless element.kind_of? Array + parser.parse(path, element).each( &block ) + end + + # Returns an array of nodes matching a given XPath. + def XPath::match(element, path=nil, namespaces=nil, variables={}, options={}) + parser = XPathParser.new(**options) + parser.namespaces = namespaces + parser.variables = variables + path = "*" unless path + element = [element] unless element.kind_of? Array + parser.parse(path,element) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath_parser.rb b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath_parser.rb new file mode 100644 index 0000000..d8b88e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/rexml-3.2.5/lib/rexml/xpath_parser.rb @@ -0,0 +1,974 @@ +# frozen_string_literal: false + +require "pp" + +require_relative 'namespace' +require_relative 'xmltokens' +require_relative 'attribute' +require_relative 'parsers/xpathparser' + +module REXML + module DClonable + refine Object do + # provides a unified +clone+ operation, for REXML::XPathParser + # to use across multiple Object types + def dclone + clone + end + end + refine Symbol do + # provides a unified +clone+ operation, for REXML::XPathParser + # to use across multiple Object types + def dclone ; self ; end + end + refine Integer do + # provides a unified +clone+ operation, for REXML::XPathParser + # to use across multiple Object types + def dclone ; self ; end + end + refine Float do + # provides a unified +clone+ operation, for REXML::XPathParser + # to use across multiple Object types + def dclone ; self ; end + end + refine Array do + # provides a unified +clone+ operation, for REXML::XPathParser + # to use across multiple Object+ types + def dclone + klone = self.clone + klone.clear + self.each{|v| klone << v.dclone} + klone + end + end + end +end + +using REXML::DClonable + +module REXML + # You don't want to use this class. Really. Use XPath, which is a wrapper + # for this class. Believe me. You don't want to poke around in here. + # There is strange, dark magic at work in this code. Beware. Go back! Go + # back while you still can! + class XPathParser + include XMLTokens + LITERAL = /^'([^']*)'|^"([^"]*)"/u + + DEBUG = (ENV["REXML_XPATH_PARSER_DEBUG"] == "true") + + def initialize(strict: false) + @debug = DEBUG + @parser = REXML::Parsers::XPathParser.new + @namespaces = nil + @variables = {} + @nest = 0 + @strict = strict + end + + def namespaces=( namespaces={} ) + Functions::namespace_context = namespaces + @namespaces = namespaces + end + + def variables=( vars={} ) + Functions::variables = vars + @variables = vars + end + + def parse path, nodeset + path_stack = @parser.parse( path ) + match( path_stack, nodeset ) + end + + def get_first path, nodeset + path_stack = @parser.parse( path ) + first( path_stack, nodeset ) + end + + def predicate path, nodeset + path_stack = @parser.parse( path ) + match( path_stack, nodeset ) + end + + def []=( variable_name, value ) + @variables[ variable_name ] = value + end + + + # Performs a depth-first (document order) XPath search, and returns the + # first match. This is the fastest, lightest way to return a single result. + # + # FIXME: This method is incomplete! + def first( path_stack, node ) + return nil if path.size == 0 + + case path[0] + when :document + # do nothing + return first( path[1..-1], node ) + when :child + for c in node.children + r = first( path[1..-1], c ) + return r if r + end + when :qname + name = path[2] + if node.name == name + return node if path.size == 3 + return first( path[3..-1], node ) + else + return nil + end + when :descendant_or_self + r = first( path[1..-1], node ) + return r if r + for c in node.children + r = first( path, c ) + return r if r + end + when :node + return first( path[1..-1], node ) + when :any + return first( path[1..-1], node ) + end + return nil + end + + + def match(path_stack, nodeset) + nodeset = nodeset.collect.with_index do |node, i| + position = i + 1 + XPathNode.new(node, position: position) + end + result = expr(path_stack, nodeset) + case result + when Array # nodeset + unnode(result) + else + [result] + end + end + + private + def strict? + @strict + end + + # Returns a String namespace for a node, given a prefix + # The rules are: + # + # 1. Use the supplied namespace mapping first. + # 2. If no mapping was supplied, use the context node to look up the namespace + def get_namespace( node, prefix ) + if @namespaces + return @namespaces[prefix] || '' + else + return node.namespace( prefix ) if node.node_type == :element + return '' + end + end + + + # Expr takes a stack of path elements and a set of nodes (either a Parent + # or an Array and returns an Array of matching nodes + def expr( path_stack, nodeset, context=nil ) + enter(:expr, path_stack, nodeset) if @debug + return nodeset if path_stack.length == 0 || nodeset.length == 0 + while path_stack.length > 0 + trace(:while, path_stack, nodeset) if @debug + if nodeset.length == 0 + path_stack.clear + return [] + end + op = path_stack.shift + case op + when :document + first_raw_node = nodeset.first.raw_node + nodeset = [XPathNode.new(first_raw_node.root_node, position: 1)] + when :self + nodeset = step(path_stack) do + [nodeset] + end + when :child + nodeset = step(path_stack) do + child(nodeset) + end + when :literal + trace(:literal, path_stack, nodeset) if @debug + return path_stack.shift + when :attribute + nodeset = step(path_stack, any_type: :attribute) do + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + next unless raw_node.node_type == :element + attributes = raw_node.attributes + next if attributes.empty? + nodesets << attributes.each_attribute.collect.with_index do |attribute, i| + XPathNode.new(attribute, position: i + 1) + end + end + nodesets + end + when :namespace + pre_defined_namespaces = { + "xml" => "http://www.w3.org/XML/1998/namespace", + } + nodeset = step(path_stack, any_type: :namespace) do + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + case raw_node.node_type + when :element + if @namespaces + nodesets << pre_defined_namespaces.merge(@namespaces) + else + nodesets << pre_defined_namespaces.merge(raw_node.namespaces) + end + when :attribute + if @namespaces + nodesets << pre_defined_namespaces.merge(@namespaces) + else + nodesets << pre_defined_namespaces.merge(raw_node.element.namespaces) + end + end + end + nodesets + end + when :parent + nodeset = step(path_stack) do + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + if raw_node.node_type == :attribute + parent = raw_node.element + else + parent = raw_node.parent + end + nodesets << [XPathNode.new(parent, position: 1)] if parent + end + nodesets + end + when :ancestor + nodeset = step(path_stack) do + nodesets = [] + # new_nodes = {} + nodeset.each do |node| + raw_node = node.raw_node + new_nodeset = [] + while raw_node.parent + raw_node = raw_node.parent + # next if new_nodes.key?(node) + new_nodeset << XPathNode.new(raw_node, + position: new_nodeset.size + 1) + # new_nodes[node] = true + end + nodesets << new_nodeset unless new_nodeset.empty? + end + nodesets + end + when :ancestor_or_self + nodeset = step(path_stack) do + nodesets = [] + # new_nodes = {} + nodeset.each do |node| + raw_node = node.raw_node + next unless raw_node.node_type == :element + new_nodeset = [XPathNode.new(raw_node, position: 1)] + # new_nodes[node] = true + while raw_node.parent + raw_node = raw_node.parent + # next if new_nodes.key?(node) + new_nodeset << XPathNode.new(raw_node, + position: new_nodeset.size + 1) + # new_nodes[node] = true + end + nodesets << new_nodeset unless new_nodeset.empty? + end + nodesets + end + when :descendant_or_self + nodeset = step(path_stack) do + descendant(nodeset, true) + end + when :descendant + nodeset = step(path_stack) do + descendant(nodeset, false) + end + when :following_sibling + nodeset = step(path_stack) do + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + next unless raw_node.respond_to?(:parent) + next if raw_node.parent.nil? + all_siblings = raw_node.parent.children + current_index = all_siblings.index(raw_node) + following_siblings = all_siblings[(current_index + 1)..-1] + next if following_siblings.empty? + nodesets << following_siblings.collect.with_index do |sibling, i| + XPathNode.new(sibling, position: i + 1) + end + end + nodesets + end + when :preceding_sibling + nodeset = step(path_stack, order: :reverse) do + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + next unless raw_node.respond_to?(:parent) + next if raw_node.parent.nil? + all_siblings = raw_node.parent.children + current_index = all_siblings.index(raw_node) + preceding_siblings = all_siblings[0, current_index].reverse + next if preceding_siblings.empty? + nodesets << preceding_siblings.collect.with_index do |sibling, i| + XPathNode.new(sibling, position: i + 1) + end + end + nodesets + end + when :preceding + nodeset = step(path_stack, order: :reverse) do + unnode(nodeset) do |node| + preceding(node) + end + end + when :following + nodeset = step(path_stack) do + unnode(nodeset) do |node| + following(node) + end + end + when :variable + var_name = path_stack.shift + return [@variables[var_name]] + + when :eq, :neq, :lt, :lteq, :gt, :gteq + left = expr( path_stack.shift, nodeset.dup, context ) + right = expr( path_stack.shift, nodeset.dup, context ) + res = equality_relational_compare( left, op, right ) + trace(op, left, right, res) if @debug + return res + + when :or + left = expr(path_stack.shift, nodeset.dup, context) + return true if Functions.boolean(left) + right = expr(path_stack.shift, nodeset.dup, context) + return Functions.boolean(right) + + when :and + left = expr(path_stack.shift, nodeset.dup, context) + return false unless Functions.boolean(left) + right = expr(path_stack.shift, nodeset.dup, context) + return Functions.boolean(right) + + when :div, :mod, :mult, :plus, :minus + left = expr(path_stack.shift, nodeset, context) + right = expr(path_stack.shift, nodeset, context) + left = unnode(left) if left.is_a?(Array) + right = unnode(right) if right.is_a?(Array) + left = Functions::number(left) + right = Functions::number(right) + case op + when :div + return left / right + when :mod + return left % right + when :mult + return left * right + when :plus + return left + right + when :minus + return left - right + else + raise "[BUG] Unexpected operator: <#{op.inspect}>" + end + when :union + left = expr( path_stack.shift, nodeset, context ) + right = expr( path_stack.shift, nodeset, context ) + left = unnode(left) if left.is_a?(Array) + right = unnode(right) if right.is_a?(Array) + return (left | right) + when :neg + res = expr( path_stack, nodeset, context ) + res = unnode(res) if res.is_a?(Array) + return -Functions.number(res) + when :not + when :function + func_name = path_stack.shift.tr('-','_') + arguments = path_stack.shift + + if nodeset.size != 1 + message = "[BUG] Node set size must be 1 for function call: " + message += "<#{func_name}>: <#{nodeset.inspect}>: " + message += "<#{arguments.inspect}>" + raise message + end + + node = nodeset.first + if context + target_context = context + else + target_context = {:size => nodeset.size} + if node.is_a?(XPathNode) + target_context[:node] = node.raw_node + target_context[:index] = node.position + else + target_context[:node] = node + target_context[:index] = 1 + end + end + args = arguments.dclone.collect do |arg| + result = expr(arg, nodeset, target_context) + result = unnode(result) if result.is_a?(Array) + result + end + Functions.context = target_context + return Functions.send(func_name, *args) + + else + raise "[BUG] Unexpected path: <#{op.inspect}>: <#{path_stack.inspect}>" + end + end # while + return nodeset + ensure + leave(:expr, path_stack, nodeset) if @debug + end + + def step(path_stack, any_type: :element, order: :forward) + nodesets = yield + begin + enter(:step, path_stack, nodesets) if @debug + nodesets = node_test(path_stack, nodesets, any_type: any_type) + while path_stack[0] == :predicate + path_stack.shift # :predicate + predicate_expression = path_stack.shift.dclone + nodesets = evaluate_predicate(predicate_expression, nodesets) + end + if nodesets.size == 1 + ordered_nodeset = nodesets[0] + else + raw_nodes = [] + nodesets.each do |nodeset| + nodeset.each do |node| + if node.respond_to?(:raw_node) + raw_nodes << node.raw_node + else + raw_nodes << node + end + end + end + ordered_nodeset = sort(raw_nodes, order) + end + new_nodeset = [] + ordered_nodeset.each do |node| + # TODO: Remove duplicated + new_nodeset << XPathNode.new(node, position: new_nodeset.size + 1) + end + new_nodeset + ensure + leave(:step, path_stack, new_nodeset) if @debug + end + end + + def node_test(path_stack, nodesets, any_type: :element) + enter(:node_test, path_stack, nodesets) if @debug + operator = path_stack.shift + case operator + when :qname + prefix = path_stack.shift + name = path_stack.shift + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + case raw_node.node_type + when :element + if prefix.nil? + raw_node.name == name + elsif prefix.empty? + if strict? + raw_node.name == name and raw_node.namespace == "" + else + # FIXME: This DOUBLES the time XPath searches take + ns = get_namespace(raw_node, prefix) + raw_node.name == name and raw_node.namespace == ns + end + else + # FIXME: This DOUBLES the time XPath searches take + ns = get_namespace(raw_node, prefix) + raw_node.name == name and raw_node.namespace == ns + end + when :attribute + if prefix.nil? + raw_node.name == name + elsif prefix.empty? + raw_node.name == name and raw_node.namespace == "" + else + # FIXME: This DOUBLES the time XPath searches take + ns = get_namespace(raw_node.element, prefix) + raw_node.name == name and raw_node.namespace == ns + end + else + false + end + end + end + when :namespace + prefix = path_stack.shift + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + case raw_node.node_type + when :element + namespaces = @namespaces || raw_node.namespaces + raw_node.namespace == namespaces[prefix] + when :attribute + namespaces = @namespaces || raw_node.element.namespaces + raw_node.namespace == namespaces[prefix] + else + false + end + end + end + when :any + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + raw_node.node_type == any_type + end + end + when :comment + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + raw_node.node_type == :comment + end + end + when :text + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + raw_node.node_type == :text + end + end + when :processing_instruction + target = path_stack.shift + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + raw_node = node.raw_node + (raw_node.node_type == :processing_instruction) and + (target.empty? or (raw_node.target == target)) + end + end + when :node + new_nodesets = nodesets.collect do |nodeset| + filter_nodeset(nodeset) do |node| + true + end + end + else + message = "[BUG] Unexpected node test: " + + "<#{operator.inspect}>: <#{path_stack.inspect}>" + raise message + end + new_nodesets + ensure + leave(:node_test, path_stack, new_nodesets) if @debug + end + + def filter_nodeset(nodeset) + new_nodeset = [] + nodeset.each do |node| + next unless yield(node) + new_nodeset << XPathNode.new(node, position: new_nodeset.size + 1) + end + new_nodeset + end + + def evaluate_predicate(expression, nodesets) + enter(:predicate, expression, nodesets) if @debug + new_nodesets = nodesets.collect do |nodeset| + new_nodeset = [] + subcontext = { :size => nodeset.size } + nodeset.each_with_index do |node, index| + if node.is_a?(XPathNode) + subcontext[:node] = node.raw_node + subcontext[:index] = node.position + else + subcontext[:node] = node + subcontext[:index] = index + 1 + end + result = expr(expression.dclone, [node], subcontext) + trace(:predicate_evaluate, expression, node, subcontext, result) if @debug + result = result[0] if result.kind_of? Array and result.length == 1 + if result.kind_of? Numeric + if result == node.position + new_nodeset << XPathNode.new(node, position: new_nodeset.size + 1) + end + elsif result.instance_of? Array + if result.size > 0 and result.inject(false) {|k,s| s or k} + if result.size > 0 + new_nodeset << XPathNode.new(node, position: new_nodeset.size + 1) + end + end + else + if result + new_nodeset << XPathNode.new(node, position: new_nodeset.size + 1) + end + end + end + new_nodeset + end + new_nodesets + ensure + leave(:predicate, new_nodesets) if @debug + end + + def trace(*args) + indent = " " * @nest + PP.pp(args, "").each_line do |line| + puts("#{indent}#{line}") + end + end + + def enter(tag, *args) + trace(:enter, tag, *args) + @nest += 1 + end + + def leave(tag, *args) + @nest -= 1 + trace(:leave, tag, *args) + end + + # Reorders an array of nodes so that they are in document order + # It tries to do this efficiently. + # + # FIXME: I need to get rid of this, but the issue is that most of the XPath + # interpreter functions as a filter, which means that we lose context going + # in and out of function calls. If I knew what the index of the nodes was, + # I wouldn't have to do this. Maybe add a document IDX for each node? + # Problems with mutable documents. Or, rewrite everything. + def sort(array_of_nodes, order) + new_arry = [] + array_of_nodes.each { |node| + node_idx = [] + np = node.node_type == :attribute ? node.element : node + while np.parent and np.parent.node_type == :element + node_idx << np.parent.index( np ) + np = np.parent + end + new_arry << [ node_idx.reverse, node ] + } + ordered = new_arry.sort_by do |index, node| + if order == :forward + index + else + -index + end + end + ordered.collect do |_index, node| + node + end + end + + def descendant(nodeset, include_self) + nodesets = [] + nodeset.each do |node| + new_nodeset = [] + new_nodes = {} + descendant_recursive(node.raw_node, new_nodeset, new_nodes, include_self) + nodesets << new_nodeset unless new_nodeset.empty? + end + nodesets + end + + def descendant_recursive(raw_node, new_nodeset, new_nodes, include_self) + if include_self + return if new_nodes.key?(raw_node) + new_nodeset << XPathNode.new(raw_node, position: new_nodeset.size + 1) + new_nodes[raw_node] = true + end + + node_type = raw_node.node_type + if node_type == :element or node_type == :document + raw_node.children.each do |child| + descendant_recursive(child, new_nodeset, new_nodes, true) + end + end + end + + # Builds a nodeset of all of the preceding nodes of the supplied node, + # in reverse document order + # preceding:: includes every element in the document that precedes this node, + # except for ancestors + def preceding(node) + ancestors = [] + parent = node.parent + while parent + ancestors << parent + parent = parent.parent + end + + precedings = [] + preceding_node = preceding_node_of(node) + while preceding_node + if ancestors.include?(preceding_node) + ancestors.delete(preceding_node) + else + precedings << XPathNode.new(preceding_node, + position: precedings.size + 1) + end + preceding_node = preceding_node_of(preceding_node) + end + precedings + end + + def preceding_node_of( node ) + psn = node.previous_sibling_node + if psn.nil? + if node.parent.nil? or node.parent.class == Document + return nil + end + return node.parent + #psn = preceding_node_of( node.parent ) + end + while psn and psn.kind_of? Element and psn.children.size > 0 + psn = psn.children[-1] + end + psn + end + + def following(node) + followings = [] + following_node = next_sibling_node(node) + while following_node + followings << XPathNode.new(following_node, + position: followings.size + 1) + following_node = following_node_of(following_node) + end + followings + end + + def following_node_of( node ) + if node.kind_of? Element and node.children.size > 0 + return node.children[0] + end + return next_sibling_node(node) + end + + def next_sibling_node(node) + psn = node.next_sibling_node + while psn.nil? + if node.parent.nil? or node.parent.class == Document + return nil + end + node = node.parent + psn = node.next_sibling_node + end + return psn + end + + def child(nodeset) + nodesets = [] + nodeset.each do |node| + raw_node = node.raw_node + node_type = raw_node.node_type + # trace(:child, node_type, node) + case node_type + when :element + nodesets << raw_node.children.collect.with_index do |child_node, i| + XPathNode.new(child_node, position: i + 1) + end + when :document + new_nodeset = [] + raw_node.children.each do |child| + case child + when XMLDecl, Text + # Ignore + else + new_nodeset << XPathNode.new(child, position: new_nodeset.size + 1) + end + end + nodesets << new_nodeset unless new_nodeset.empty? + end + end + nodesets + end + + def norm b + case b + when true, false + return b + when 'true', 'false' + return Functions::boolean( b ) + when /^\d+(\.\d+)?$/, Numeric + return Functions::number( b ) + else + return Functions::string( b ) + end + end + + def equality_relational_compare(set1, op, set2) + set1 = unnode(set1) if set1.is_a?(Array) + set2 = unnode(set2) if set2.is_a?(Array) + + if set1.kind_of? Array and set2.kind_of? Array + # If both objects to be compared are node-sets, then the + # comparison will be true if and only if there is a node in the + # first node-set and a node in the second node-set such that the + # result of performing the comparison on the string-values of + # the two nodes is true. + set1.product(set2).any? do |node1, node2| + node_string1 = Functions.string(node1) + node_string2 = Functions.string(node2) + compare(node_string1, op, node_string2) + end + elsif set1.kind_of? Array or set2.kind_of? Array + # If one is nodeset and other is number, compare number to each item + # in nodeset s.t. number op number(string(item)) + # If one is nodeset and other is string, compare string to each item + # in nodeset s.t. string op string(item) + # If one is nodeset and other is boolean, compare boolean to each item + # in nodeset s.t. boolean op boolean(item) + if set1.kind_of? Array + a = set1 + b = set2 + else + a = set2 + b = set1 + end + + case b + when true, false + each_unnode(a).any? do |unnoded| + compare(Functions.boolean(unnoded), op, b) + end + when Numeric + each_unnode(a).any? do |unnoded| + compare(Functions.number(unnoded), op, b) + end + when /\A\d+(\.\d+)?\z/ + b = Functions.number(b) + each_unnode(a).any? do |unnoded| + compare(Functions.number(unnoded), op, b) + end + else + b = Functions::string(b) + each_unnode(a).any? do |unnoded| + compare(Functions::string(unnoded), op, b) + end + end + else + # If neither is nodeset, + # If op is = or != + # If either boolean, convert to boolean + # If either number, convert to number + # Else, convert to string + # Else + # Convert both to numbers and compare + compare(set1, op, set2) + end + end + + def value_type(value) + case value + when true, false + :boolean + when Numeric + :number + when String + :string + else + raise "[BUG] Unexpected value type: <#{value.inspect}>" + end + end + + def normalize_compare_values(a, operator, b) + a_type = value_type(a) + b_type = value_type(b) + case operator + when :eq, :neq + if a_type == :boolean or b_type == :boolean + a = Functions.boolean(a) unless a_type == :boolean + b = Functions.boolean(b) unless b_type == :boolean + elsif a_type == :number or b_type == :number + a = Functions.number(a) unless a_type == :number + b = Functions.number(b) unless b_type == :number + else + a = Functions.string(a) unless a_type == :string + b = Functions.string(b) unless b_type == :string + end + when :lt, :lteq, :gt, :gteq + a = Functions.number(a) unless a_type == :number + b = Functions.number(b) unless b_type == :number + else + message = "[BUG] Unexpected compare operator: " + + "<#{operator.inspect}>: <#{a.inspect}>: <#{b.inspect}>" + raise message + end + [a, b] + end + + def compare(a, operator, b) + a, b = normalize_compare_values(a, operator, b) + case operator + when :eq + a == b + when :neq + a != b + when :lt + a < b + when :lteq + a <= b + when :gt + a > b + when :gteq + a >= b + else + message = "[BUG] Unexpected compare operator: " + + "<#{operator.inspect}>: <#{a.inspect}>: <#{b.inspect}>" + raise message + end + end + + def each_unnode(nodeset) + return to_enum(__method__, nodeset) unless block_given? + nodeset.each do |node| + if node.is_a?(XPathNode) + unnoded = node.raw_node + else + unnoded = node + end + yield(unnoded) + end + end + + def unnode(nodeset) + each_unnode(nodeset).collect do |unnoded| + unnoded = yield(unnoded) if block_given? + unnoded + end + end + end + + # @private + class XPathNode + attr_reader :raw_node, :context + def initialize(node, context=nil) + if node.is_a?(XPathNode) + @raw_node = node.raw_node + else + @raw_node = node + end + @context = context || {} + end + + def position + @context[:position] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/.yardopts b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/.yardopts new file mode 100644 index 0000000..035b890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/.yardopts @@ -0,0 +1 @@ +--no-private --markup-provider=redcarpet --markup=markdown - README.md LICENSE diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/LICENSE new file mode 100644 index 0000000..89dd7d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015, 2016, 2017, 2018 William Woodruff <william @ yossarian.net> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/README.md b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/README.md new file mode 100644 index 0000000..a22065a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/README.md @@ -0,0 +1,62 @@ +ruby-macho +================ + +[![Gem Version](https://badge.fury.io/rb/ruby-macho.svg)](http://badge.fury.io/rb/ruby-macho) +[![Build Status](https://travis-ci.org/Homebrew/ruby-macho.svg?branch=master)](https://travis-ci.org/Homebrew/ruby-macho) +[![Coverage Status](https://codecov.io/gh/Homebrew/ruby-macho/branch/master/graph/badge.svg)](https://codecov.io/gh/Homebrew/ruby-macho) + +A Ruby library for examining and modifying Mach-O files. + +### What is a Mach-O file? + +The [Mach-O file format](https://en.wikipedia.org/wiki/Mach-O) is used by macOS +and iOS (among others) as a general purpose binary format for object files, +executables, dynamic libraries, and so forth. + +### Documentation + +Full documentation is available on [RubyDoc](http://www.rubydoc.info/gems/ruby-macho/). + +A quick example of what ruby-macho can do: + +```ruby +require 'macho' + +file = MachO::MachOFile.new("/path/to/my/binary") + +# get the file's type (object, dynamic lib, executable, etc) +file.filetype # => :execute + +# get all load commands in the file and print their offsets: +file.load_commands.each do |lc| + puts "#{lc.type}: offset #{lc.offset}, size: #{lc.cmdsize}" +end + +# access a specific load command +lc_vers = file[:LC_VERSION_MIN_MACOSX].first +puts lc_vers.version_string # => "10.10.0" +``` + +### What works? + +* Reading data from x86/x86_64/PPC Mach-O files +* Changing the IDs of Mach-O and Fat dylibs +* Changing install names in Mach-O and Fat files +* Adding, deleting, and modifying rpaths. + +### What needs to be done? + +* Documentation. +* Unit and performance testing. + +Attribution: + +* Constants were taken from Apple, Inc's +[`loader.h` in `cctools/include/mach-o`](https://www.opensource.apple.com/source/cctools/cctools-870/include/mach-o/loader.h). +(Apple Public Source License 2.0). + +### License + +`ruby-macho` is licensed under the MIT License. + +For the exact terms, see the [license](LICENSE) file. diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho.rb new file mode 100644 index 0000000..517c9c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho.rb @@ -0,0 +1,40 @@ +require_relative "macho/structure" +require_relative "macho/view" +require_relative "macho/headers" +require_relative "macho/load_commands" +require_relative "macho/sections" +require_relative "macho/macho_file" +require_relative "macho/fat_file" +require_relative "macho/exceptions" +require_relative "macho/utils" +require_relative "macho/tools" + +# The primary namespace for ruby-macho. +module MachO + # release version + VERSION = "1.4.0".freeze + + # Opens the given filename as a MachOFile or FatFile, depending on its magic. + # @param filename [String] the file being opened + # @return [MachOFile] if the file is a Mach-O + # @return [FatFile] if the file is a Fat file + # @raise [ArgumentError] if the given file does not exist + # @raise [TruncatedFileError] if the file is too small to have a valid header + # @raise [MagicError] if the file's magic is not valid Mach-O magic + def self.open(filename) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + raise TruncatedFileError unless File.stat(filename).size >= 4 + + magic = File.open(filename, "rb") { |f| f.read(4) }.unpack("N").first + + if Utils.fat_magic?(magic) + file = FatFile.new(filename) + elsif Utils.magic?(magic) + file = MachOFile.new(filename) + else + raise MagicError, magic + end + + file + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/exceptions.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/exceptions.rb new file mode 100644 index 0000000..a07781c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/exceptions.rb @@ -0,0 +1,207 @@ +module MachO + # A generic Mach-O error in execution. + class MachOError < RuntimeError + end + + # Raised when a Mach-O file modification fails. + class ModificationError < MachOError + end + + # Raised when a Mach-O file modification fails but can be recovered when + # operating on multiple Mach-O slices of a fat binary in non-strict mode. + class RecoverableModificationError < ModificationError + # @return [Integer, nil] The index of the Mach-O slice of a fat binary for + # which modification failed or `nil` if not a fat binary. This is used to + # make the error message more useful. + attr_accessor :macho_slice + + # @return [String] The exception message. + def to_s + s = super.to_s + s = "While modifying Mach-O slice #{@macho_slice}: #{s}" if @macho_slice + s + end + end + + # Raised when a file is not a Mach-O. + class NotAMachOError < MachOError + # @param error [String] the error in question + def initialize(error) + super error + end + end + + # Raised when a file is too short to be a valid Mach-O file. + class TruncatedFileError < NotAMachOError + def initialize + super "File is too short to be a valid Mach-O" + end + end + + # Raised when a file's magic bytes are not valid Mach-O magic. + class MagicError < NotAMachOError + # @param num [Integer] the unknown number + def initialize(num) + super "Unrecognized Mach-O magic: 0x#{"%02x" % num}" + end + end + + # Raised when a file is a Java classfile instead of a fat Mach-O. + class JavaClassFileError < NotAMachOError + def initialize + super "File is a Java class file" + end + end + + # Raised when a fat binary is loaded with MachOFile. + class FatBinaryError < MachOError + def initialize + super "Fat binaries must be loaded with MachO::FatFile" + end + end + + # Raised when a Mach-O is loaded with FatFile. + class MachOBinaryError < MachOError + def initialize + super "Normal binaries must be loaded with MachO::MachOFile" + end + end + + # Raised when the CPU type is unknown. + class CPUTypeError < MachOError + # @param cputype [Integer] the unknown CPU type + def initialize(cputype) + super "Unrecognized CPU type: 0x#{"%08x" % cputype}" + end + end + + # Raised when the CPU type/sub-type pair is unknown. + class CPUSubtypeError < MachOError + # @param cputype [Integer] the CPU type of the unknown pair + # @param cpusubtype [Integer] the CPU sub-type of the unknown pair + def initialize(cputype, cpusubtype) + super "Unrecognized CPU sub-type: 0x#{"%08x" % cpusubtype}" \ + " (for CPU type: 0x#{"%08x" % cputype})" + end + end + + # Raised when a mach-o file's filetype field is unknown. + class FiletypeError < MachOError + # @param num [Integer] the unknown number + def initialize(num) + super "Unrecognized Mach-O filetype code: 0x#{"%02x" % num}" + end + end + + # Raised when an unknown load command is encountered. + class LoadCommandError < MachOError + # @param num [Integer] the unknown number + def initialize(num) + super "Unrecognized Mach-O load command: 0x#{"%02x" % num}" + end + end + + # Raised when a load command can't be created manually. + class LoadCommandNotCreatableError < MachOError + # @param cmd_sym [Symbol] the uncreatable load command's symbol + def initialize(cmd_sym) + super "Load commands of type #{cmd_sym} cannot be created manually" + end + end + + # Raised when the number of arguments used to create a load command manually + # is wrong. + class LoadCommandCreationArityError < MachOError + # @param cmd_sym [Symbol] the load command's symbol + # @param expected_arity [Integer] the number of arguments expected + # @param actual_arity [Integer] the number of arguments received + def initialize(cmd_sym, expected_arity, actual_arity) + super "Expected #{expected_arity} arguments for #{cmd_sym} creation," \ + " got #{actual_arity}" + end + end + + # Raised when a load command can't be serialized. + class LoadCommandNotSerializableError < MachOError + # @param cmd_sym [Symbol] the load command's symbol + def initialize(cmd_sym) + super "Load commands of type #{cmd_sym} cannot be serialized" + end + end + + # Raised when a load command string is malformed in some way. + class LCStrMalformedError < MachOError + # @param lc [MachO::LoadCommand] the load command containing the string + def initialize(lc) + super "Load command #{lc.type} at offset #{lc.view.offset} contains a" \ + " malformed string" + end + end + + # Raised when a change at an offset is not valid. + class OffsetInsertionError < ModificationError + # @param offset [Integer] the invalid offset + def initialize(offset) + super "Insertion at offset #{offset} is not valid" + end + end + + # Raised when load commands are too large to fit in the current file. + class HeaderPadError < ModificationError + # @param filename [String] the filename + def initialize(filename) + super "Updated load commands do not fit in the header of " \ + "#{filename}. #{filename} needs to be relinked, possibly with " \ + "-headerpad or -headerpad_max_install_names" + end + end + + # Raised when attempting to change a dylib name that doesn't exist. + class DylibUnknownError < RecoverableModificationError + # @param dylib [String] the unknown shared library name + def initialize(dylib) + super "No such dylib name: #{dylib}" + end + end + + # Raised when a dylib is missing an ID + class DylibIdMissingError < RecoverableModificationError + def initialize + super "Dylib is missing a dylib ID" + end + end + + # Raised when attempting to change an rpath that doesn't exist. + class RpathUnknownError < RecoverableModificationError + # @param path [String] the unknown runtime path + def initialize(path) + super "No such runtime path: #{path}" + end + end + + # Raised when attempting to add an rpath that already exists. + class RpathExistsError < RecoverableModificationError + # @param path [String] the extant path + def initialize(path) + super "#{path} already exists" + end + end + + # Raised whenever unfinished code is called. + class UnimplementedError < MachOError + # @param thing [String] the thing that is unimplemented + def initialize(thing) + super "Unimplemented: #{thing}" + end + end + + # Raised when attempting to create a {FatFile} from one or more {MachOFile}s + # whose offsets will not fit within the resulting 32-bit {Headers::FatArch#offset} fields. + class FatArchOffsetOverflowError < MachOError + # @param offset [Integer] the offending offset + def initialize(offset) + super "Offset #{offset} exceeds the 32-bit width of a fat_arch offset." \ + " Consider merging with `fat64: true`" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/fat_file.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/fat_file.rb new file mode 100644 index 0000000..b90df90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/fat_file.rb @@ -0,0 +1,419 @@ +require "forwardable" + +module MachO + # Represents a "Fat" file, which contains a header, a listing of available + # architectures, and one or more Mach-O binaries. + # @see https://en.wikipedia.org/wiki/Mach-O#Multi-architecture_binaries + # @see MachOFile + class FatFile + extend Forwardable + + # @return [String] the filename loaded from, or nil if loaded from a binary string + attr_accessor :filename + + # @return [Headers::FatHeader] the file's header + attr_reader :header + + # @return [Array<Headers::FatArch>, Array<Headers::FatArch64] an array of fat architectures + attr_reader :fat_archs + + # @return [Array<MachOFile>] an array of Mach-O binaries + attr_reader :machos + + # Creates a new FatFile from the given (single-arch) Mach-Os + # @param machos [Array<MachOFile>] the machos to combine + # @param fat64 [Boolean] whether to use {Headers::FatArch64}s to represent each slice + # @return [FatFile] a new FatFile containing the give machos + # @raise [ArgumentError] if less than one Mach-O is given + # @raise [FatArchOffsetOverflowError] if the Mach-Os are too big to be represented + # in a 32-bit {Headers::FatArch} and `fat64` is `false`. + def self.new_from_machos(*machos, fat64: false) + raise ArgumentError, "expected at least one Mach-O" if machos.empty? + + fa_klass, magic = if fat64 + [Headers::FatArch64, Headers::FAT_MAGIC_64] + else + [Headers::FatArch, Headers::FAT_MAGIC] + end + + # put the smaller alignments further forwards in fat macho, so that we do less padding + machos = machos.sort_by(&:segment_alignment) + + bin = if String.instance_methods.include? :+@ + +"" + else + "" + end + + bin << Headers::FatHeader.new(magic, machos.size).serialize + offset = Headers::FatHeader.bytesize + (machos.size * fa_klass.bytesize) + + macho_pads = {} + + machos.each do |macho| + macho_offset = Utils.round(offset, 2**macho.segment_alignment) + + if !fat64 && macho_offset > (2**32 - 1) + raise FatArchOffsetOverflowError, macho_offset + end + + macho_pads[macho] = Utils.padding_for(offset, 2**macho.segment_alignment) + + bin << fa_klass.new(macho.header.cputype, macho.header.cpusubtype, + macho_offset, macho.serialize.bytesize, + macho.segment_alignment).serialize + + offset += (macho.serialize.bytesize + macho_pads[macho]) + end + + machos.each do |macho| + bin << Utils.nullpad(macho_pads[macho]) + bin << macho.serialize + end + + new_from_bin(bin) + end + + # Creates a new FatFile instance from a binary string. + # @param bin [String] a binary string containing raw Mach-O data + # @return [FatFile] a new FatFile + def self.new_from_bin(bin) + instance = allocate + instance.initialize_from_bin(bin) + + instance + end + + # Creates a new FatFile from the given filename. + # @param filename [String] the fat file to load from + # @raise [ArgumentError] if the given file does not exist + def initialize(filename) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + + @filename = filename + @raw_data = File.open(@filename, "rb", &:read) + populate_fields + end + + # Initializes a new FatFile instance from a binary string. + # @see new_from_bin + # @api private + def initialize_from_bin(bin) + @filename = nil + @raw_data = bin + populate_fields + end + + # The file's raw fat data. + # @return [String] the raw fat data + def serialize + @raw_data + end + + # @!method object? + # @return (see MachO::MachOFile#object?) + # @!method executable? + # @return (see MachO::MachOFile#executable?) + # @!method fvmlib? + # @return (see MachO::MachOFile#fvmlib?) + # @!method core? + # @return (see MachO::MachOFile#core?) + # @!method preload? + # @return (see MachO::MachOFile#preload?) + # @!method dylib? + # @return (see MachO::MachOFile#dylib?) + # @!method dylinker? + # @return (see MachO::MachOFile#dylinker?) + # @!method bundle? + # @return (see MachO::MachOFile#bundle?) + # @!method dsym? + # @return (see MachO::MachOFile#dsym?) + # @!method kext? + # @return (see MachO::MachOFile#kext?) + # @!method filetype + # @return (see MachO::MachOFile#filetype) + # @!method dylib_id + # @return (see MachO::MachOFile#dylib_id) + def_delegators :canonical_macho, :object?, :executable?, :fvmlib?, + :core?, :preload?, :dylib?, :dylinker?, :bundle?, + :dsym?, :kext?, :filetype, :dylib_id + + # @!method magic + # @return (see MachO::Headers::FatHeader#magic) + def_delegators :header, :magic + + # @return [String] a string representation of the file's magic number + def magic_string + Headers::MH_MAGICS[magic] + end + + # Populate the instance's fields with the raw Fat Mach-O data. + # @return [void] + # @note This method is public, but should (almost) never need to be called. + def populate_fields + @header = populate_fat_header + @fat_archs = populate_fat_archs + @machos = populate_machos + end + + # All load commands responsible for loading dylibs in the file's Mach-O's. + # @return [Array<LoadCommands::DylibCommand>] an array of DylibCommands + def dylib_load_commands + machos.map(&:dylib_load_commands).flatten + end + + # Changes the file's dylib ID to `new_id`. If the file is not a dylib, + # does nothing. + # @example + # file.change_dylib_id('libFoo.dylib') + # @param new_id [String] the new dylib ID + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @raise [ArgumentError] if `new_id` is not a String + # @see MachOFile#linked_dylibs + def change_dylib_id(new_id, options = {}) + raise ArgumentError, "argument must be a String" unless new_id.is_a?(String) + return unless machos.all?(&:dylib?) + + each_macho(options) do |macho| + macho.change_dylib_id(new_id, options) + end + + repopulate_raw_machos + end + + alias dylib_id= change_dylib_id + + # All shared libraries linked to the file's Mach-Os. + # @return [Array<String>] an array of all shared libraries + # @see MachOFile#linked_dylibs + def linked_dylibs + # Individual architectures in a fat binary can link to different subsets + # of libraries, but at this point we want to have the full picture, i.e. + # the union of all libraries used by all architectures. + machos.map(&:linked_dylibs).flatten.uniq + end + + # Changes all dependent shared library install names from `old_name` to + # `new_name`. In a fat file, this changes install names in all internal + # Mach-Os. + # @example + # file.change_install_name('/usr/lib/libFoo.dylib', '/usr/lib/libBar.dylib') + # @param old_name [String] the shared library name being changed + # @param new_name [String] the new name + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#change_install_name + def change_install_name(old_name, new_name, options = {}) + each_macho(options) do |macho| + macho.change_install_name(old_name, new_name, options) + end + + repopulate_raw_machos + end + + alias change_dylib change_install_name + + # All runtime paths associated with the file's Mach-Os. + # @return [Array<String>] an array of all runtime paths + # @see MachOFile#rpaths + def rpaths + # Can individual architectures have different runtime paths? + machos.map(&:rpaths).flatten.uniq + end + + # Change the runtime path `old_path` to `new_path` in the file's Mach-Os. + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#change_rpath + def change_rpath(old_path, new_path, options = {}) + each_macho(options) do |macho| + macho.change_rpath(old_path, new_path, options) + end + + repopulate_raw_machos + end + + # Add the given runtime path to the file's Mach-Os. + # @param path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#add_rpath + def add_rpath(path, options = {}) + each_macho(options) do |macho| + macho.add_rpath(path, options) + end + + repopulate_raw_machos + end + + # Delete the given runtime path from the file's Mach-Os. + # @param path [String] the runtime path to delete + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return void + # @see MachOFile#delete_rpath + def delete_rpath(path, options = {}) + each_macho(options) do |macho| + macho.delete_rpath(path, options) + end + + repopulate_raw_machos + end + + # Extract a Mach-O with the given CPU type from the file. + # @example + # file.extract(:i386) # => MachO::MachOFile + # @param cputype [Symbol] the CPU type of the Mach-O being extracted + # @return [MachOFile, nil] the extracted Mach-O or nil if no Mach-O has the given CPU type + def extract(cputype) + machos.select { |macho| macho.cputype == cputype }.first + end + + # Write all (fat) data to the given filename. + # @param filename [String] the file to write to + # @return [void] + def write(filename) + File.open(filename, "wb") { |f| f.write(@raw_data) } + end + + # Write all (fat) data to the file used to initialize the instance. + # @return [void] + # @raise [MachOError] if the instance was initialized without a file + # @note Overwrites all data in the file! + def write! + raise MachOError, "no initial file to write to" if filename.nil? + + File.open(@filename, "wb") { |f| f.write(@raw_data) } + end + + # @return [Hash] a hash representation of this {FatFile} + def to_h + { + "header" => header.to_h, + "fat_archs" => fat_archs.map(&:to_h), + "machos" => machos.map(&:to_h), + } + end + + private + + # Obtain the fat header from raw file data. + # @return [Headers::FatHeader] the fat header + # @raise [TruncatedFileError] if the file is too small to have a + # valid header + # @raise [MagicError] if the magic is not valid Mach-O magic + # @raise [MachOBinaryError] if the magic is for a non-fat Mach-O file + # @raise [JavaClassFileError] if the file is a Java classfile + # @api private + def populate_fat_header + # the smallest fat Mach-O header is 8 bytes + raise TruncatedFileError if @raw_data.size < 8 + + fh = Headers::FatHeader.new_from_bin(:big, @raw_data[0, Headers::FatHeader.bytesize]) + + raise MagicError, fh.magic unless Utils.magic?(fh.magic) + raise MachOBinaryError unless Utils.fat_magic?(fh.magic) + + # Rationale: Java classfiles have the same magic as big-endian fat + # Mach-Os. Classfiles encode their version at the same offset as + # `nfat_arch` and the lowest version number is 43, so we error out + # if a file claims to have over 30 internal architectures. It's + # technically possible for a fat Mach-O to have over 30 architectures, + # but this is extremely unlikely and in practice distinguishes the two + # formats. + raise JavaClassFileError if fh.nfat_arch > 30 + + fh + end + + # Obtain an array of fat architectures from raw file data. + # @return [Array<Headers::FatArch>] an array of fat architectures + # @api private + def populate_fat_archs + archs = [] + + fa_klass = Utils.fat_magic32?(header.magic) ? Headers::FatArch : Headers::FatArch64 + fa_off = Headers::FatHeader.bytesize + fa_len = fa_klass.bytesize + + header.nfat_arch.times do |i| + archs << fa_klass.new_from_bin(:big, @raw_data[fa_off + (fa_len * i), fa_len]) + end + + archs + end + + # Obtain an array of Mach-O blobs from raw file data. + # @return [Array<MachOFile>] an array of Mach-Os + # @api private + def populate_machos + machos = [] + + fat_archs.each do |arch| + machos << MachOFile.new_from_bin(@raw_data[arch.offset, arch.size]) + end + + machos + end + + # Repopulate the raw Mach-O data with each internal Mach-O object. + # @return [void] + # @api private + def repopulate_raw_machos + machos.each_with_index do |macho, i| + arch = fat_archs[i] + + @raw_data[arch.offset, arch.size] = macho.serialize + end + end + + # Yield each Mach-O object in the file, rescuing and accumulating errors. + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if at least one Mach-O raises an exception. If false, + # only raises an exception if *all* Mach-Os raise exceptions. + # @raise [RecoverableModificationError] under the conditions of + # the `:strict` option above. + # @api private + def each_macho(options = {}) + strict = options.fetch(:strict, true) + errors = [] + + machos.each_with_index do |macho, index| + begin + yield macho + rescue RecoverableModificationError => error + error.macho_slice = index + + # Strict mode: Immediately re-raise. Otherwise: Retain, check later. + raise error if strict + + errors << error + end + end + + # Non-strict mode: Raise first error if *all* Mach-O slices failed. + raise errors.first if errors.size == machos.size + end + + # Return a single-arch Mach-O that represents this fat Mach-O for purposes + # of delegation. + # @return [MachOFile] the Mach-O file + # @api private + def canonical_macho + machos.first + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/headers.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/headers.rb new file mode 100644 index 0000000..0a940e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/headers.rb @@ -0,0 +1,784 @@ +module MachO + # Classes and constants for parsing the headers of Mach-O binaries. + module Headers + # big-endian fat magic + # @api private + FAT_MAGIC = 0xcafebabe + + # little-endian fat magic + # @note This is defined for completeness, but should never appear in ruby-macho code, + # since fat headers are always big-endian. + # @api private + FAT_CIGAM = 0xbebafeca + + # 64-bit big-endian fat magic + FAT_MAGIC_64 = 0xcafebabf + + # 64-bit little-endian fat magic + # @note This is defined for completeness, but should never appear in ruby-macho code, + # since fat headers are always big-endian. + FAT_CIGAM_64 = 0xbfbafeca + + # 32-bit big-endian magic + # @api private + MH_MAGIC = 0xfeedface + + # 32-bit little-endian magic + # @api private + MH_CIGAM = 0xcefaedfe + + # 64-bit big-endian magic + # @api private + MH_MAGIC_64 = 0xfeedfacf + + # 64-bit little-endian magic + # @api private + MH_CIGAM_64 = 0xcffaedfe + + # association of magic numbers to string representations + # @api private + MH_MAGICS = { + FAT_MAGIC => "FAT_MAGIC", + FAT_MAGIC_64 => "FAT_MAGIC_64", + MH_MAGIC => "MH_MAGIC", + MH_CIGAM => "MH_CIGAM", + MH_MAGIC_64 => "MH_MAGIC_64", + MH_CIGAM_64 => "MH_CIGAM_64", + }.freeze + + # mask for CPUs with 64-bit architectures (when running a 64-bit ABI?) + # @api private + CPU_ARCH_ABI64 = 0x01000000 + + # mask for CPUs with 64-bit architectures (when running a 32-bit ABI?) + # @see https://github.com/Homebrew/ruby-macho/issues/113 + # @api private + CPU_ARCH_ABI32 = 0x02000000 + + # any CPU (unused?) + # @api private + CPU_TYPE_ANY = -1 + + # m68k compatible CPUs + # @api private + CPU_TYPE_MC680X0 = 0x06 + + # i386 and later compatible CPUs + # @api private + CPU_TYPE_I386 = 0x07 + + # x86_64 (AMD64) compatible CPUs + # @api private + CPU_TYPE_X86_64 = (CPU_TYPE_I386 | CPU_ARCH_ABI64) + + # 32-bit ARM compatible CPUs + # @api private + CPU_TYPE_ARM = 0x0c + + # m88k compatible CPUs + # @api private + CPU_TYPE_MC88000 = 0xd + + # 64-bit ARM compatible CPUs + # @api private + CPU_TYPE_ARM64 = (CPU_TYPE_ARM | CPU_ARCH_ABI64) + + # 64-bit ARM compatible CPUs (running in 32-bit mode?) + # @see https://github.com/Homebrew/ruby-macho/issues/113 + CPU_TYPE_ARM64_32 = (CPU_TYPE_ARM | CPU_ARCH_ABI32) + + # PowerPC compatible CPUs + # @api private + CPU_TYPE_POWERPC = 0x12 + + # PowerPC64 compatible CPUs + # @api private + CPU_TYPE_POWERPC64 = (CPU_TYPE_POWERPC | CPU_ARCH_ABI64) + + # association of cpu types to symbol representations + # @api private + CPU_TYPES = { + CPU_TYPE_ANY => :any, + CPU_TYPE_I386 => :i386, + CPU_TYPE_X86_64 => :x86_64, + CPU_TYPE_ARM => :arm, + CPU_TYPE_ARM64 => :arm64, + CPU_TYPE_ARM64_32 => :arm64_32, + CPU_TYPE_POWERPC => :ppc, + CPU_TYPE_POWERPC64 => :ppc64, + }.freeze + + # mask for CPU subtype capabilities + # @api private + CPU_SUBTYPE_MASK = 0xff000000 + + # 64-bit libraries (undocumented!) + # @see http://llvm.org/docs/doxygen/html/Support_2MachO_8h_source.html + # @api private + CPU_SUBTYPE_LIB64 = 0x80000000 + + # the lowest common sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_I386 = 3 + + # the i486 sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_486 = 4 + + # the i486SX sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_486SX = 132 + + # the i586 (P5, Pentium) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_586 = 5 + + # @see CPU_SUBTYPE_586 + # @api private + CPU_SUBTYPE_PENT = CPU_SUBTYPE_586 + + # the Pentium Pro (P6) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTPRO = 22 + + # the Pentium II (P6, M3?) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTII_M3 = 54 + + # the Pentium II (P6, M5?) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTII_M5 = 86 + + # the Pentium 4 (Netburst) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTIUM_4 = 10 + + # the lowest common sub-type for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC680X0_ALL = 1 + + # @see CPU_SUBTYPE_MC680X0_ALL + # @api private + CPU_SUBTYPE_MC68030 = CPU_SUBTYPE_MC680X0_ALL + + # the 040 subtype for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC68040 = 2 + + # the 030 subtype for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC68030_ONLY = 3 + + # the lowest common sub-type for `CPU_TYPE_X86_64` + # @api private + CPU_SUBTYPE_X86_64_ALL = CPU_SUBTYPE_I386 + + # the Haskell sub-type for `CPU_TYPE_X86_64` + # @api private + CPU_SUBTYPE_X86_64_H = 8 + + # the lowest common sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_ALL = 0 + + # the v4t sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V4T = 5 + + # the v6 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V6 = 6 + + # the v5 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V5TEJ = 7 + + # the xscale (v5 family) sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_XSCALE = 8 + + # the v7 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7 = 9 + + # the v7f (Cortex A9) sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7F = 10 + + # the v7s ("Swift") sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7S = 11 + + # the v7k ("Kirkwood40") sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7K = 12 + + # the v6m sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V6M = 14 + + # the v7m sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7M = 15 + + # the v7em sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7EM = 16 + + # the v8 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V8 = 13 + + # the lowest common sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64_ALL = 0 + + # the v8 sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64_V8 = 1 + + # the v8 sub-type for `CPU_TYPE_ARM64_32` + # @api private + CPU_SUBTYPE_ARM64_32_V8 = 1 + + # the e (A12) sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64E = 2 + + # the lowest common sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88000_ALL = 0 + + # @see CPU_SUBTYPE_MC88000_ALL + # @api private + CPU_SUBTYPE_MMAX_JPC = CPU_SUBTYPE_MC88000_ALL + + # the 100 sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88100 = 1 + + # the 110 sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88110 = 2 + + # the lowest common sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_ALL = 0 + + # the 601 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_601 = 1 + + # the 602 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_602 = 2 + + # the 603 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603 = 3 + + # the 603e (G2) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603E = 4 + + # the 603ev sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603EV = 5 + + # the 604 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_604 = 6 + + # the 604e sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_604E = 7 + + # the 620 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_620 = 8 + + # the 750 (G3) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_750 = 9 + + # the 7400 (G4) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_7400 = 10 + + # the 7450 (G4 "Voyager") sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_7450 = 11 + + # the 970 (G5) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_970 = 100 + + # any CPU sub-type for CPU type `CPU_TYPE_POWERPC64` + # @api private + CPU_SUBTYPE_POWERPC64_ALL = CPU_SUBTYPE_POWERPC_ALL + + # association of CPU types/subtype pairs to symbol representations in + # (very) roughly descending order of commonness + # @see https://opensource.apple.com/source/cctools/cctools-877.8/libstuff/arch.c + # @api private + CPU_SUBTYPES = { + CPU_TYPE_I386 => { + CPU_SUBTYPE_I386 => :i386, + CPU_SUBTYPE_486 => :i486, + CPU_SUBTYPE_486SX => :i486SX, + CPU_SUBTYPE_586 => :i586, # also "pentium" in arch(3) + CPU_SUBTYPE_PENTPRO => :i686, # also "pentpro" in arch(3) + CPU_SUBTYPE_PENTII_M3 => :pentIIm3, + CPU_SUBTYPE_PENTII_M5 => :pentIIm5, + CPU_SUBTYPE_PENTIUM_4 => :pentium4, + }.freeze, + CPU_TYPE_X86_64 => { + CPU_SUBTYPE_X86_64_ALL => :x86_64, + CPU_SUBTYPE_X86_64_H => :x86_64h, + }.freeze, + CPU_TYPE_ARM => { + CPU_SUBTYPE_ARM_ALL => :arm, + CPU_SUBTYPE_ARM_V4T => :armv4t, + CPU_SUBTYPE_ARM_V6 => :armv6, + CPU_SUBTYPE_ARM_V5TEJ => :armv5, + CPU_SUBTYPE_ARM_XSCALE => :xscale, + CPU_SUBTYPE_ARM_V7 => :armv7, + CPU_SUBTYPE_ARM_V7F => :armv7f, + CPU_SUBTYPE_ARM_V7S => :armv7s, + CPU_SUBTYPE_ARM_V7K => :armv7k, + CPU_SUBTYPE_ARM_V6M => :armv6m, + CPU_SUBTYPE_ARM_V7M => :armv7m, + CPU_SUBTYPE_ARM_V7EM => :armv7em, + CPU_SUBTYPE_ARM_V8 => :armv8, + }.freeze, + CPU_TYPE_ARM64 => { + CPU_SUBTYPE_ARM64_ALL => :arm64, + CPU_SUBTYPE_ARM64_V8 => :arm64v8, + CPU_SUBTYPE_ARM64E => :arm64e, + }.freeze, + CPU_TYPE_ARM64_32 => { + CPU_SUBTYPE_ARM64_32_V8 => :arm64_32v8, + }.freeze, + CPU_TYPE_POWERPC => { + CPU_SUBTYPE_POWERPC_ALL => :ppc, + CPU_SUBTYPE_POWERPC_601 => :ppc601, + CPU_SUBTYPE_POWERPC_603 => :ppc603, + CPU_SUBTYPE_POWERPC_603E => :ppc603e, + CPU_SUBTYPE_POWERPC_603EV => :ppc603ev, + CPU_SUBTYPE_POWERPC_604 => :ppc604, + CPU_SUBTYPE_POWERPC_604E => :ppc604e, + CPU_SUBTYPE_POWERPC_750 => :ppc750, + CPU_SUBTYPE_POWERPC_7400 => :ppc7400, + CPU_SUBTYPE_POWERPC_7450 => :ppc7450, + CPU_SUBTYPE_POWERPC_970 => :ppc970, + }.freeze, + CPU_TYPE_POWERPC64 => { + CPU_SUBTYPE_POWERPC64_ALL => :ppc64, + # apparently the only exception to the naming scheme + CPU_SUBTYPE_POWERPC_970 => :ppc970_64, + }.freeze, + CPU_TYPE_MC680X0 => { + CPU_SUBTYPE_MC680X0_ALL => :m68k, + CPU_SUBTYPE_MC68030 => :mc68030, + CPU_SUBTYPE_MC68040 => :mc68040, + }, + CPU_TYPE_MC88000 => { + CPU_SUBTYPE_MC88000_ALL => :m88k, + }, + }.freeze + + # relocatable object file + # @api private + MH_OBJECT = 0x1 + + # demand paged executable file + # @api private + MH_EXECUTE = 0x2 + + # fixed VM shared library file + # @api private + MH_FVMLIB = 0x3 + + # core dump file + # @api private + MH_CORE = 0x4 + + # preloaded executable file + # @api private + MH_PRELOAD = 0x5 + + # dynamically bound shared library + # @api private + MH_DYLIB = 0x6 + + # dynamic link editor + # @api private + MH_DYLINKER = 0x7 + + # dynamically bound bundle file + # @api private + MH_BUNDLE = 0x8 + + # shared library stub for static linking only, no section contents + # @api private + MH_DYLIB_STUB = 0x9 + + # companion file with only debug sections + # @api private + MH_DSYM = 0xa + + # x86_64 kexts + # @api private + MH_KEXT_BUNDLE = 0xb + + # association of filetypes to Symbol representations + # @api private + MH_FILETYPES = { + MH_OBJECT => :object, + MH_EXECUTE => :execute, + MH_FVMLIB => :fvmlib, + MH_CORE => :core, + MH_PRELOAD => :preload, + MH_DYLIB => :dylib, + MH_DYLINKER => :dylinker, + MH_BUNDLE => :bundle, + MH_DYLIB_STUB => :dylib_stub, + MH_DSYM => :dsym, + MH_KEXT_BUNDLE => :kext_bundle, + }.freeze + + # association of mach header flag symbols to values + # @api private + MH_FLAGS = { + :MH_NOUNDEFS => 0x1, + :MH_INCRLINK => 0x2, + :MH_DYLDLINK => 0x4, + :MH_BINDATLOAD => 0x8, + :MH_PREBOUND => 0x10, + :MH_SPLIT_SEGS => 0x20, + :MH_LAZY_INIT => 0x40, + :MH_TWOLEVEL => 0x80, + :MH_FORCE_FLAT => 0x100, + :MH_NOMULTIDEFS => 0x200, + :MH_NOPREFIXBINDING => 0x400, + :MH_PREBINDABLE => 0x800, + :MH_ALLMODSBOUND => 0x1000, + :MH_SUBSECTIONS_VIA_SYMBOLS => 0x2000, + :MH_CANONICAL => 0x4000, + :MH_WEAK_DEFINES => 0x8000, + :MH_BINDS_TO_WEAK => 0x10000, + :MH_ALLOW_STACK_EXECUTION => 0x20000, + :MH_ROOT_SAFE => 0x40000, + :MH_SETUID_SAFE => 0x80000, + :MH_NO_REEXPORTED_DYLIBS => 0x100000, + :MH_PIE => 0x200000, + :MH_DEAD_STRIPPABLE_DYLIB => 0x400000, + :MH_HAS_TLV_DESCRIPTORS => 0x800000, + :MH_NO_HEAP_EXECUTION => 0x1000000, + :MH_APP_EXTENSION_SAFE => 0x02000000, + }.freeze + + # Fat binary header structure + # @see MachO::FatArch + class FatHeader < MachOStructure + # @return [Integer] the magic number of the header (and file) + attr_reader :magic + + # @return [Integer] the number of fat architecture structures following the header + attr_reader :nfat_arch + + # always big-endian + # @see MachOStructure::FORMAT + # @api private + FORMAT = "N2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + + # @api private + def initialize(magic, nfat_arch) + @magic = magic + @nfat_arch = nfat_arch + end + + # @return [String] the serialized fields of the fat header + def serialize + [magic, nfat_arch].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatHeader} + def to_h + { + "magic" => magic, + "magic_sym" => MH_MAGICS[magic], + "nfat_arch" => nfat_arch, + }.merge super + end + end + + # 32-bit fat binary header architecture structure. A 32-bit fat Mach-O has one or more of + # these, indicating one or more internal Mach-O blobs. + # @note "32-bit" indicates the fact that this structure stores 32-bit offsets, not that the + # Mach-Os that it points to necessarily *are* 32-bit. + # @see MachO::Headers::FatHeader + class FatArch < MachOStructure + # @return [Integer] the CPU type of the Mach-O + attr_reader :cputype + + # @return [Integer] the CPU subtype of the Mach-O + attr_reader :cpusubtype + + # @return [Integer] the file offset to the beginning of the Mach-O data + attr_reader :offset + + # @return [Integer] the size, in bytes, of the Mach-O data + attr_reader :size + + # @return [Integer] the alignment, as a power of 2 + attr_reader :align + + # @note Always big endian. + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L>5".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(cputype, cpusubtype, offset, size, align) + @cputype = cputype + @cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK + @offset = offset + @size = size + @align = align + end + + # @return [String] the serialized fields of the fat arch + def serialize + [cputype, cpusubtype, offset, size, align].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatArch} + def to_h + { + "cputype" => cputype, + "cputype_sym" => CPU_TYPES[cputype], + "cpusubtype" => cpusubtype, + "cpusubtype_sym" => CPU_SUBTYPES[cputype][cpusubtype], + "offset" => offset, + "size" => size, + "align" => align, + }.merge super + end + end + + # 64-bit fat binary header architecture structure. A 64-bit fat Mach-O has one or more of + # these, indicating one or more internal Mach-O blobs. + # @note "64-bit" indicates the fact that this structure stores 64-bit offsets, not that the + # Mach-Os that it points to necessarily *are* 64-bit. + # @see MachO::Headers::FatHeader + class FatArch64 < FatArch + # @return [void] + attr_reader :reserved + + # @note Always big endian. + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L>2Q>2L>2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 32 + + # @api private + def initialize(cputype, cpusubtype, offset, size, align, reserved = 0) + super(cputype, cpusubtype, offset, size, align) + @reserved = reserved + end + + # @return [String] the serialized fields of the fat arch + def serialize + [cputype, cpusubtype, offset, size, align, reserved].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatArch64} + def to_h + { + "reserved" => reserved, + }.merge super + end + end + + # 32-bit Mach-O file header structure + class MachHeader < MachOStructure + # @return [Integer] the magic number + attr_reader :magic + + # @return [Integer] the CPU type of the Mach-O + attr_reader :cputype + + # @return [Integer] the CPU subtype of the Mach-O + attr_reader :cpusubtype + + # @return [Integer] the file type of the Mach-O + attr_reader :filetype + + # @return [Integer] the number of load commands in the Mach-O + attr_reader :ncmds + + # @return [Integer] the size of all load commands, in bytes, in the Mach-O + attr_reader :sizeofcmds + + # @return [Integer] the header flags associated with the Mach-O + attr_reader :flags + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=7".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 28 + + # @api private + def initialize(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, + flags) + @magic = magic + @cputype = cputype + # For now we're not interested in additional capability bits also to be + # found in the `cpusubtype` field. We only care about the CPU sub-type. + @cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK + @filetype = filetype + @ncmds = ncmds + @sizeofcmds = sizeofcmds + @flags = flags + end + + # @example + # puts "this mach-o has position-independent execution" if header.flag?(:MH_PIE) + # @param flag [Symbol] a mach header flag symbol + # @return [Boolean] true if `flag` is present in the header's flag section + def flag?(flag) + flag = MH_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # @return [Boolean] whether or not the file is of type `MH_OBJECT` + def object? + filetype == Headers::MH_OBJECT + end + + # @return [Boolean] whether or not the file is of type `MH_EXECUTE` + def executable? + filetype == Headers::MH_EXECUTE + end + + # @return [Boolean] whether or not the file is of type `MH_FVMLIB` + def fvmlib? + filetype == Headers::MH_FVMLIB + end + + # @return [Boolean] whether or not the file is of type `MH_CORE` + def core? + filetype == Headers::MH_CORE + end + + # @return [Boolean] whether or not the file is of type `MH_PRELOAD` + def preload? + filetype == Headers::MH_PRELOAD + end + + # @return [Boolean] whether or not the file is of type `MH_DYLIB` + def dylib? + filetype == Headers::MH_DYLIB + end + + # @return [Boolean] whether or not the file is of type `MH_DYLINKER` + def dylinker? + filetype == Headers::MH_DYLINKER + end + + # @return [Boolean] whether or not the file is of type `MH_BUNDLE` + def bundle? + filetype == Headers::MH_BUNDLE + end + + # @return [Boolean] whether or not the file is of type `MH_DSYM` + def dsym? + filetype == Headers::MH_DSYM + end + + # @return [Boolean] whether or not the file is of type `MH_KEXT_BUNDLE` + def kext? + filetype == Headers::MH_KEXT_BUNDLE + end + + # @return [Boolean] true if the Mach-O has 32-bit magic, false otherwise + def magic32? + Utils.magic32?(magic) + end + + # @return [Boolean] true if the Mach-O has 64-bit magic, false otherwise + def magic64? + Utils.magic64?(magic) + end + + # @return [Integer] the file's internal alignment + def alignment + magic32? ? 4 : 8 + end + + # @return [Hash] a hash representation of this {MachHeader} + def to_h + { + "magic" => magic, + "magic_sym" => MH_MAGICS[magic], + "cputype" => cputype, + "cputype_sym" => CPU_TYPES[cputype], + "cpusubtype" => cpusubtype, + "cpusubtype_sym" => CPU_SUBTYPES[cputype][cpusubtype], + "filetype" => filetype, + "filetype_sym" => MH_FILETYPES[filetype], + "ncmds" => ncmds, + "sizeofcmds" => sizeofcmds, + "flags" => flags, + "alignment" => alignment, + }.merge super + end + end + + # 64-bit Mach-O file header structure + class MachHeader64 < MachHeader + # @return [void] + attr_reader :reserved + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=8".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 32 + + # @api private + def initialize(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, + flags, reserved) + super(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags) + @reserved = reserved + end + + # @return [Hash] a hash representation of this {MachHeader64} + def to_h + { + "reserved" => reserved, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/load_commands.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/load_commands.rb new file mode 100644 index 0000000..394a0c6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/load_commands.rb @@ -0,0 +1,1790 @@ +module MachO + # Classes and constants for parsing load commands in Mach-O binaries. + module LoadCommands + # load commands added after OS X 10.1 need to be bitwise ORed with + # LC_REQ_DYLD to be recognized by the dynamic linker (dyld) + # @api private + LC_REQ_DYLD = 0x80000000 + + # association of load commands to symbol representations + # @api private + LOAD_COMMANDS = { + 0x1 => :LC_SEGMENT, + 0x2 => :LC_SYMTAB, + 0x3 => :LC_SYMSEG, + 0x4 => :LC_THREAD, + 0x5 => :LC_UNIXTHREAD, + 0x6 => :LC_LOADFVMLIB, + 0x7 => :LC_IDFVMLIB, + 0x8 => :LC_IDENT, + 0x9 => :LC_FVMFILE, + 0xa => :LC_PREPAGE, + 0xb => :LC_DYSYMTAB, + 0xc => :LC_LOAD_DYLIB, + 0xd => :LC_ID_DYLIB, + 0xe => :LC_LOAD_DYLINKER, + 0xf => :LC_ID_DYLINKER, + 0x10 => :LC_PREBOUND_DYLIB, + 0x11 => :LC_ROUTINES, + 0x12 => :LC_SUB_FRAMEWORK, + 0x13 => :LC_SUB_UMBRELLA, + 0x14 => :LC_SUB_CLIENT, + 0x15 => :LC_SUB_LIBRARY, + 0x16 => :LC_TWOLEVEL_HINTS, + 0x17 => :LC_PREBIND_CKSUM, + (0x18 | LC_REQ_DYLD) => :LC_LOAD_WEAK_DYLIB, + 0x19 => :LC_SEGMENT_64, + 0x1a => :LC_ROUTINES_64, + 0x1b => :LC_UUID, + (0x1c | LC_REQ_DYLD) => :LC_RPATH, + 0x1d => :LC_CODE_SIGNATURE, + 0x1e => :LC_SEGMENT_SPLIT_INFO, + (0x1f | LC_REQ_DYLD) => :LC_REEXPORT_DYLIB, + 0x20 => :LC_LAZY_LOAD_DYLIB, + 0x21 => :LC_ENCRYPTION_INFO, + 0x22 => :LC_DYLD_INFO, + (0x22 | LC_REQ_DYLD) => :LC_DYLD_INFO_ONLY, + (0x23 | LC_REQ_DYLD) => :LC_LOAD_UPWARD_DYLIB, + 0x24 => :LC_VERSION_MIN_MACOSX, + 0x25 => :LC_VERSION_MIN_IPHONEOS, + 0x26 => :LC_FUNCTION_STARTS, + 0x27 => :LC_DYLD_ENVIRONMENT, + (0x28 | LC_REQ_DYLD) => :LC_MAIN, + 0x29 => :LC_DATA_IN_CODE, + 0x2a => :LC_SOURCE_VERSION, + 0x2b => :LC_DYLIB_CODE_SIGN_DRS, + 0x2c => :LC_ENCRYPTION_INFO_64, + 0x2d => :LC_LINKER_OPTION, + 0x2e => :LC_LINKER_OPTIMIZATION_HINT, + 0x2f => :LC_VERSION_MIN_TVOS, + 0x30 => :LC_VERSION_MIN_WATCHOS, + 0x31 => :LC_NOTE, + 0x32 => :LC_BUILD_VERSION, + }.freeze + + # association of symbol representations to load command constants + # @api private + LOAD_COMMAND_CONSTANTS = LOAD_COMMANDS.invert.freeze + + # load commands responsible for loading dylibs + # @api private + DYLIB_LOAD_COMMANDS = %i[ + LC_LOAD_DYLIB + LC_LOAD_WEAK_DYLIB + LC_REEXPORT_DYLIB + LC_LAZY_LOAD_DYLIB + LC_LOAD_UPWARD_DYLIB + ].freeze + + # load commands that can be created manually via {LoadCommand.create} + # @api private + CREATABLE_LOAD_COMMANDS = DYLIB_LOAD_COMMANDS + %i[ + LC_ID_DYLIB + LC_RPATH + LC_LOAD_DYLINKER + ].freeze + + # association of load command symbols to string representations of classes + # @api private + LC_STRUCTURES = { + :LC_SEGMENT => "SegmentCommand", + :LC_SYMTAB => "SymtabCommand", + # "obsolete" + :LC_SYMSEG => "SymsegCommand", + # seems obsolete, but not documented as such + :LC_THREAD => "ThreadCommand", + :LC_UNIXTHREAD => "ThreadCommand", + # "obsolete" + :LC_LOADFVMLIB => "FvmlibCommand", + # "obsolete" + :LC_IDFVMLIB => "FvmlibCommand", + # "obsolete" + :LC_IDENT => "IdentCommand", + # "reserved for internal use only" + :LC_FVMFILE => "FvmfileCommand", + # "reserved for internal use only", no public struct + :LC_PREPAGE => "LoadCommand", + :LC_DYSYMTAB => "DysymtabCommand", + :LC_LOAD_DYLIB => "DylibCommand", + :LC_ID_DYLIB => "DylibCommand", + :LC_LOAD_DYLINKER => "DylinkerCommand", + :LC_ID_DYLINKER => "DylinkerCommand", + :LC_PREBOUND_DYLIB => "PreboundDylibCommand", + :LC_ROUTINES => "RoutinesCommand", + :LC_SUB_FRAMEWORK => "SubFrameworkCommand", + :LC_SUB_UMBRELLA => "SubUmbrellaCommand", + :LC_SUB_CLIENT => "SubClientCommand", + :LC_SUB_LIBRARY => "SubLibraryCommand", + :LC_TWOLEVEL_HINTS => "TwolevelHintsCommand", + :LC_PREBIND_CKSUM => "PrebindCksumCommand", + :LC_LOAD_WEAK_DYLIB => "DylibCommand", + :LC_SEGMENT_64 => "SegmentCommand64", + :LC_ROUTINES_64 => "RoutinesCommand64", + :LC_UUID => "UUIDCommand", + :LC_RPATH => "RpathCommand", + :LC_CODE_SIGNATURE => "LinkeditDataCommand", + :LC_SEGMENT_SPLIT_INFO => "LinkeditDataCommand", + :LC_REEXPORT_DYLIB => "DylibCommand", + :LC_LAZY_LOAD_DYLIB => "DylibCommand", + :LC_ENCRYPTION_INFO => "EncryptionInfoCommand", + :LC_DYLD_INFO => "DyldInfoCommand", + :LC_DYLD_INFO_ONLY => "DyldInfoCommand", + :LC_LOAD_UPWARD_DYLIB => "DylibCommand", + :LC_VERSION_MIN_MACOSX => "VersionMinCommand", + :LC_VERSION_MIN_IPHONEOS => "VersionMinCommand", + :LC_FUNCTION_STARTS => "LinkeditDataCommand", + :LC_DYLD_ENVIRONMENT => "DylinkerCommand", + :LC_MAIN => "EntryPointCommand", + :LC_DATA_IN_CODE => "LinkeditDataCommand", + :LC_SOURCE_VERSION => "SourceVersionCommand", + :LC_DYLIB_CODE_SIGN_DRS => "LinkeditDataCommand", + :LC_ENCRYPTION_INFO_64 => "EncryptionInfoCommand64", + :LC_LINKER_OPTION => "LinkerOptionCommand", + :LC_LINKER_OPTIMIZATION_HINT => "LinkeditDataCommand", + :LC_VERSION_MIN_TVOS => "VersionMinCommand", + :LC_VERSION_MIN_WATCHOS => "VersionMinCommand", + :LC_NOTE => "NoteCommand", + :LC_BUILD_VERSION => "BuildVersionCommand", + }.freeze + + # association of segment name symbols to names + # @api private + SEGMENT_NAMES = { + :SEG_PAGEZERO => "__PAGEZERO", + :SEG_TEXT => "__TEXT", + :SEG_DATA => "__DATA", + :SEG_OBJC => "__OBJC", + :SEG_ICON => "__ICON", + :SEG_LINKEDIT => "__LINKEDIT", + :SEG_UNIXSTACK => "__UNIXSTACK", + :SEG_IMPORT => "__IMPORT", + }.freeze + + # association of segment flag symbols to values + # @api private + SEGMENT_FLAGS = { + :SG_HIGHVM => 0x1, + :SG_FVMLIB => 0x2, + :SG_NORELOC => 0x4, + :SG_PROTECTED_VERSION_1 => 0x8, + }.freeze + + # The top-level Mach-O load command structure. + # + # This is the most generic load command -- only the type ID and size are + # represented. Used when a more specific class isn't available or isn't implemented. + class LoadCommand < MachOStructure + # @return [MachO::MachOView, nil] the raw view associated with the load command, + # or nil if the load command was created via {create}. + attr_reader :view + + # @return [Integer] the load command's type ID + attr_reader :cmd + + # @return [Integer] the size of the load command, in bytes + attr_reader :cmdsize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + + # Instantiates a new LoadCommand given a view into its origin Mach-O + # @param view [MachO::MachOView] the load command's raw view + # @return [LoadCommand] the new load command + # @api private + def self.new_from_bin(view) + bin = view.raw_data.slice(view.offset, bytesize) + format = Utils.specialize_format(self::FORMAT, view.endianness) + + new(view, *bin.unpack(format)) + end + + # Creates a new (viewless) command corresponding to the symbol provided + # @param cmd_sym [Symbol] the symbol of the load command being created + # @param args [Array] the arguments for the load command being created + def self.create(cmd_sym, *args) + raise LoadCommandNotCreatableError, cmd_sym unless CREATABLE_LOAD_COMMANDS.include?(cmd_sym) + + klass = LoadCommands.const_get LC_STRUCTURES[cmd_sym] + cmd = LOAD_COMMAND_CONSTANTS[cmd_sym] + + # cmd will be filled in, view and cmdsize will be left unpopulated + klass_arity = klass.instance_method(:initialize).arity - 3 + + raise LoadCommandCreationArityError.new(cmd_sym, klass_arity, args.size) if klass_arity != args.size + + klass.new(nil, cmd, nil, *args) + end + + # @param view [MachO::MachOView] the load command's raw view + # @param cmd [Integer] the load command's identifying number + # @param cmdsize [Integer] the size of the load command in bytes + # @api private + def initialize(view, cmd, cmdsize) + @view = view + @cmd = cmd + @cmdsize = cmdsize + end + + # @return [Boolean] whether the load command can be serialized + def serializable? + CREATABLE_LOAD_COMMANDS.include?(LOAD_COMMANDS[cmd]) + end + + # @param context [SerializationContext] the context + # to serialize into + # @return [String, nil] the serialized fields of the load command, or nil + # if the load command can't be serialized + # @api private + def serialize(context) + raise LoadCommandNotSerializableError, LOAD_COMMANDS[cmd] unless serializable? + + format = Utils.specialize_format(FORMAT, context.endianness) + [cmd, SIZEOF].pack(format) + end + + # @return [Integer] the load command's offset in the source file + # @deprecated use {#view} instead + def offset + view.offset + end + + # @return [Symbol, nil] a symbol representation of the load command's + # type ID, or nil if the ID doesn't correspond to a known load command class + def type + LOAD_COMMANDS[cmd] + end + + alias to_sym type + + # @return [String] a string representation of the load command's + # identifying number + def to_s + type.to_s + end + + # @return [Hash] a hash representation of this load command + # @note Children should override this to include additional information. + def to_h + { + "view" => view.to_h, + "cmd" => cmd, + "cmdsize" => cmdsize, + "type" => type, + }.merge super + end + + # Represents a Load Command string. A rough analogue to the lc_str + # struct used internally by OS X. This class allows ruby-macho to + # pretend that strings stored in LCs are immediately available without + # explicit operations on the raw Mach-O data. + class LCStr + # @param lc [LoadCommand] the load command + # @param lc_str [Integer, String] the offset to the beginning of the + # string, or the string itself if not being initialized with a view. + # @raise [MachO::LCStrMalformedError] if the string is malformed + # @todo devise a solution such that the `lc_str` parameter is not + # interpreted differently depending on `lc.view`. The current behavior + # is a hack to allow viewless load command creation. + # @api private + def initialize(lc, lc_str) + view = lc.view + + if view + lc_str_abs = view.offset + lc_str + lc_end = view.offset + lc.cmdsize - 1 + raw_string = view.raw_data.slice(lc_str_abs..lc_end) + @string, null_byte, _padding = raw_string.partition("\x00") + + raise LCStrMalformedError, lc if null_byte.empty? + + @string_offset = lc_str + else + @string = lc_str + @string_offset = 0 + end + end + + # @return [String] a string representation of the LCStr + def to_s + @string + end + + # @return [Integer] the offset to the beginning of the string in the + # load command + def to_i + @string_offset + end + + # @return [Hash] a hash representation of this {LCStr}. + def to_h + { + "string" => to_s, + "offset" => to_i, + } + end + end + + # Represents the contextual information needed by a load command to + # serialize itself correctly into a binary string. + class SerializationContext + # @return [Symbol] the endianness of the serialized load command + attr_reader :endianness + + # @return [Integer] the constant alignment value used to pad the + # serialized load command + attr_reader :alignment + + # @param macho [MachO::MachOFile] the file to contextualize + # @return [SerializationContext] the + # resulting context + def self.context_for(macho) + new(macho.endianness, macho.alignment) + end + + # @param endianness [Symbol] the endianness of the context + # @param alignment [Integer] the alignment of the context + # @api private + def initialize(endianness, alignment) + @endianness = endianness + @alignment = alignment + end + end + end + + # A load command containing a single 128-bit unique random number + # identifying an object produced by static link editor. Corresponds to + # LC_UUID. + class UUIDCommand < LoadCommand + # @return [Array<Integer>] the UUID + attr_reader :uuid + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2a16".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, uuid) + super(view, cmd, cmdsize) + @uuid = uuid.unpack("C16") # re-unpack for the actual UUID array + end + + # @return [String] a string representation of the UUID + def uuid_string + hexes = uuid.map { |e| "%02x" % e } + segs = [ + hexes[0..3].join, hexes[4..5].join, hexes[6..7].join, + hexes[8..9].join, hexes[10..15].join + ] + + segs.join("-") + end + + # @return [Hash] returns a hash representation of this {UUIDCommand} + def to_h + { + "uuid" => uuid, + "uuid_string" => uuid_string, + }.merge super + end + end + + # A load command indicating that part of this file is to be mapped into + # the task's address space. Corresponds to LC_SEGMENT. + class SegmentCommand < LoadCommand + # @return [String] the name of the segment + attr_reader :segname + + # @return [Integer] the memory address of the segment + attr_reader :vmaddr + + # @return [Integer] the memory size of the segment + attr_reader :vmsize + + # @return [Integer] the file offset of the segment + attr_reader :fileoff + + # @return [Integer] the amount to map from the file + attr_reader :filesize + + # @return [Integer] the maximum VM protection + attr_reader :maxprot + + # @return [Integer] the initial VM protection + attr_reader :initprot + + # @return [Integer] the number of sections in the segment + attr_reader :nsects + + # @return [Integer] any flags associated with the segment + attr_reader :flags + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16L=4l=2L=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 56 + + # @api private + def initialize(view, cmd, cmdsize, segname, vmaddr, vmsize, fileoff, + filesize, maxprot, initprot, nsects, flags) + super(view, cmd, cmdsize) + @segname = segname + @vmaddr = vmaddr + @vmsize = vmsize + @fileoff = fileoff + @filesize = filesize + @maxprot = maxprot + @initprot = initprot + @nsects = nsects + @flags = flags + end + + # All sections referenced within this segment. + # @return [Array<MachO::Sections::Section>] if the Mach-O is 32-bit + # @return [Array<MachO::Sections::Section64>] if the Mach-O is 64-bit + def sections + klass = case self + when SegmentCommand64 + MachO::Sections::Section64 + when SegmentCommand + MachO::Sections::Section + end + + offset = view.offset + self.class.bytesize + length = nsects * klass.bytesize + + bins = view.raw_data[offset, length] + bins.unpack("a#{klass.bytesize}" * nsects).map do |bin| + klass.new_from_bin(view.endianness, bin) + end + end + + # @example + # puts "this segment relocated in/to it" if sect.flag?(:SG_NORELOC) + # @param flag [Symbol] a segment flag symbol + # @return [Boolean] true if `flag` is present in the segment's flag field + def flag?(flag) + flag = SEGMENT_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # Guesses the alignment of the segment. + # @return [Integer] the guessed alignment, as a power of 2 + # @note See `guess_align` in `cctools/misc/lipo.c` + def guess_align + return Sections::MAX_SECT_ALIGN if vmaddr.zero? + + align = 0 + segalign = 1 + + while (segalign & vmaddr).zero? + segalign <<= 1 + align += 1 + end + + return 2 if align < 2 + return Sections::MAX_SECT_ALIGN if align > Sections::MAX_SECT_ALIGN + + align + end + + # @return [Hash] a hash representation of this {SegmentCommand} + def to_h + { + "segname" => segname, + "vmaddr" => vmaddr, + "vmsize" => vmsize, + "fileoff" => fileoff, + "filesize" => filesize, + "maxprot" => maxprot, + "initprot" => initprot, + "nsects" => nsects, + "flags" => flags, + "sections" => sections.map(&:to_h), + }.merge super + end + end + + # A load command indicating that part of this file is to be mapped into + # the task's address space. Corresponds to LC_SEGMENT_64. + class SegmentCommand64 < SegmentCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16Q=4l=2L=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 72 + end + + # A load command representing some aspect of shared libraries, depending + # on filetype. Corresponds to LC_ID_DYLIB, LC_LOAD_DYLIB, + # LC_LOAD_WEAK_DYLIB, and LC_REEXPORT_DYLIB. + class DylibCommand < LoadCommand + # @return [LCStr] the library's path + # name as an LCStr + attr_reader :name + + # @return [Integer] the library's build time stamp + attr_reader :timestamp + + # @return [Integer] the library's current version number + attr_reader :current_version + + # @return [Integer] the library's compatibility version number + attr_reader :compatibility_version + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, name, timestamp, current_version, + compatibility_version) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @timestamp = timestamp + @current_version = current_version + @compatibility_version = compatibility_version + end + + # @param context [SerializationContext] + # the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :name => name.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:name], timestamp, current_version, + compatibility_version].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {DylibCommand} + def to_h + { + "name" => name.to_h, + "timestamp" => timestamp, + "current_version" => current_version, + "compatibility_version" => compatibility_version, + }.merge super + end + end + + # A load command representing some aspect of the dynamic linker, depending + # on filetype. Corresponds to LC_ID_DYLINKER, LC_LOAD_DYLINKER, and + # LC_DYLD_ENVIRONMENT. + class DylinkerCommand < LoadCommand + # @return [LCStr] the dynamic linker's + # path name as an LCStr + attr_reader :name + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, name) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + end + + # @param context [SerializationContext] + # the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :name => name.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:name]].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {DylinkerCommand} + def to_h + { + "name" => name.to_h, + }.merge super + end + end + + # A load command used to indicate dynamic libraries used in prebinding. + # Corresponds to LC_PREBOUND_DYLIB. + class PreboundDylibCommand < LoadCommand + # @return [LCStr] the library's path + # name as an LCStr + attr_reader :name + + # @return [Integer] the number of modules in the library + attr_reader :nmodules + + # @return [Integer] a bit vector of linked modules + attr_reader :linked_modules + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(view, cmd, cmdsize, name, nmodules, linked_modules) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @nmodules = nmodules + @linked_modules = linked_modules + end + + # @return [Hash] a hash representation of this {PreboundDylibCommand} + def to_h + { + "name" => name.to_h, + "nmodules" => nmodules, + "linked_modules" => linked_modules, + }.merge super + end + end + + # A load command used to represent threads. + # @note cctools-870 and onwards have all fields of thread_command commented + # out except the common ones (cmd, cmdsize) + class ThreadCommand < LoadCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + end + + # A load command containing the address of the dynamic shared library + # initialization routine and an index into the module table for the module + # that defines the routine. Corresponds to LC_ROUTINES. + class RoutinesCommand < LoadCommand + # @return [Integer] the address of the initialization routine + attr_reader :init_address + + # @return [Integer] the index into the module table that the init routine + # is defined in + attr_reader :init_module + + # @return [void] + attr_reader :reserved1 + + # @return [void] + attr_reader :reserved2 + + # @return [void] + attr_reader :reserved3 + + # @return [void] + attr_reader :reserved4 + + # @return [void] + attr_reader :reserved5 + + # @return [void] + attr_reader :reserved6 + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=10".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 40 + + # @api private + def initialize(view, cmd, cmdsize, init_address, init_module, reserved1, + reserved2, reserved3, reserved4, reserved5, reserved6) + super(view, cmd, cmdsize) + @init_address = init_address + @init_module = init_module + @reserved1 = reserved1 + @reserved2 = reserved2 + @reserved3 = reserved3 + @reserved4 = reserved4 + @reserved5 = reserved5 + @reserved6 = reserved6 + end + + # @return [Hash] a hash representation of this {RoutinesCommand} + def to_h + { + "init_address" => init_address, + "init_module" => init_module, + "reserved1" => reserved1, + "reserved2" => reserved2, + "reserved3" => reserved3, + "reserved4" => reserved4, + "reserved5" => reserved5, + "reserved6" => reserved6, + }.merge super + end + end + + # A load command containing the address of the dynamic shared library + # initialization routine and an index into the module table for the module + # that defines the routine. Corresponds to LC_ROUTINES_64. + class RoutinesCommand64 < RoutinesCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=8".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 72 + end + + # A load command signifying membership of a subframework containing the name + # of an umbrella framework. Corresponds to LC_SUB_FRAMEWORK. + class SubFrameworkCommand < LoadCommand + # @return [LCStr] the umbrella framework name as an LCStr + attr_reader :umbrella + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, umbrella) + super(view, cmd, cmdsize) + @umbrella = LCStr.new(self, umbrella) + end + + # @return [Hash] a hash representation of this {SubFrameworkCommand} + def to_h + { + "umbrella" => umbrella.to_h, + }.merge super + end + end + + # A load command signifying membership of a subumbrella containing the name + # of an umbrella framework. Corresponds to LC_SUB_UMBRELLA. + class SubUmbrellaCommand < LoadCommand + # @return [LCStr] the subumbrella framework name as an LCStr + attr_reader :sub_umbrella + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_umbrella) + super(view, cmd, cmdsize) + @sub_umbrella = LCStr.new(self, sub_umbrella) + end + + # @return [Hash] a hash representation of this {SubUmbrellaCommand} + def to_h + { + "sub_umbrella" => sub_umbrella.to_h, + }.merge super + end + end + + # A load command signifying a sublibrary of a shared library. Corresponds + # to LC_SUB_LIBRARY. + class SubLibraryCommand < LoadCommand + # @return [LCStr] the sublibrary name as an LCStr + attr_reader :sub_library + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_library) + super(view, cmd, cmdsize) + @sub_library = LCStr.new(self, sub_library) + end + + # @return [Hash] a hash representation of this {SubLibraryCommand} + def to_h + { + "sub_library" => sub_library.to_h, + }.merge super + end + end + + # A load command signifying a shared library that is a subframework of + # an umbrella framework. Corresponds to LC_SUB_CLIENT. + class SubClientCommand < LoadCommand + # @return [LCStr] the subclient name as an LCStr + attr_reader :sub_client + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_client) + super(view, cmd, cmdsize) + @sub_client = LCStr.new(self, sub_client) + end + + # @return [Hash] a hash representation of this {SubClientCommand} + def to_h + { + "sub_client" => sub_client.to_h, + }.merge super + end + end + + # A load command containing the offsets and sizes of the link-edit 4.3BSD + # "stab" style symbol table information. Corresponds to LC_SYMTAB. + class SymtabCommand < LoadCommand + # @return [Integer] the symbol table's offset + attr_reader :symoff + + # @return [Integer] the number of symbol table entries + attr_reader :nsyms + + # @return [Integer] the string table's offset + attr_reader :stroff + + # @return [Integer] the string table size in bytes + attr_reader :strsize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, symoff, nsyms, stroff, strsize) + super(view, cmd, cmdsize) + @symoff = symoff + @nsyms = nsyms + @stroff = stroff + @strsize = strsize + end + + # @return [Hash] a hash representation of this {SymtabCommand} + def to_h + { + "symoff" => symoff, + "nsyms" => nsyms, + "stroff" => stroff, + "strsize" => strsize, + }.merge super + end + end + + # A load command containing symbolic information needed to support data + # structures used by the dynamic link editor. Corresponds to LC_DYSYMTAB. + class DysymtabCommand < LoadCommand + # @return [Integer] the index to local symbols + attr_reader :ilocalsym + + # @return [Integer] the number of local symbols + attr_reader :nlocalsym + + # @return [Integer] the index to externally defined symbols + attr_reader :iextdefsym + + # @return [Integer] the number of externally defined symbols + attr_reader :nextdefsym + + # @return [Integer] the index to undefined symbols + attr_reader :iundefsym + + # @return [Integer] the number of undefined symbols + attr_reader :nundefsym + + # @return [Integer] the file offset to the table of contents + attr_reader :tocoff + + # @return [Integer] the number of entries in the table of contents + attr_reader :ntoc + + # @return [Integer] the file offset to the module table + attr_reader :modtaboff + + # @return [Integer] the number of entries in the module table + attr_reader :nmodtab + + # @return [Integer] the file offset to the referenced symbol table + attr_reader :extrefsymoff + + # @return [Integer] the number of entries in the referenced symbol table + attr_reader :nextrefsyms + + # @return [Integer] the file offset to the indirect symbol table + attr_reader :indirectsymoff + + # @return [Integer] the number of entries in the indirect symbol table + attr_reader :nindirectsyms + + # @return [Integer] the file offset to the external relocation entries + attr_reader :extreloff + + # @return [Integer] the number of external relocation entries + attr_reader :nextrel + + # @return [Integer] the file offset to the local relocation entries + attr_reader :locreloff + + # @return [Integer] the number of local relocation entries + attr_reader :nlocrel + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=20".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 80 + + # ugh + # @api private + def initialize(view, cmd, cmdsize, ilocalsym, nlocalsym, iextdefsym, + nextdefsym, iundefsym, nundefsym, tocoff, ntoc, modtaboff, + nmodtab, extrefsymoff, nextrefsyms, indirectsymoff, + nindirectsyms, extreloff, nextrel, locreloff, nlocrel) + super(view, cmd, cmdsize) + @ilocalsym = ilocalsym + @nlocalsym = nlocalsym + @iextdefsym = iextdefsym + @nextdefsym = nextdefsym + @iundefsym = iundefsym + @nundefsym = nundefsym + @tocoff = tocoff + @ntoc = ntoc + @modtaboff = modtaboff + @nmodtab = nmodtab + @extrefsymoff = extrefsymoff + @nextrefsyms = nextrefsyms + @indirectsymoff = indirectsymoff + @nindirectsyms = nindirectsyms + @extreloff = extreloff + @nextrel = nextrel + @locreloff = locreloff + @nlocrel = nlocrel + end + + # @return [Hash] a hash representation of this {DysymtabCommand} + def to_h + { + "ilocalsym" => ilocalsym, + "nlocalsym" => nlocalsym, + "iextdefsym" => iextdefsym, + "nextdefsym" => nextdefsym, + "iundefsym" => iundefsym, + "nundefsym" => nundefsym, + "tocoff" => tocoff, + "ntoc" => ntoc, + "modtaboff" => modtaboff, + "nmodtab" => nmodtab, + "extrefsymoff" => extrefsymoff, + "nextrefsyms" => nextrefsyms, + "indirectsymoff" => indirectsymoff, + "nindirectsyms" => nindirectsyms, + "extreloff" => extreloff, + "nextrel" => nextrel, + "locreloff" => locreloff, + "nlocrel" => nlocrel, + }.merge super + end + end + + # A load command containing the offset and number of hints in the two-level + # namespace lookup hints table. Corresponds to LC_TWOLEVEL_HINTS. + class TwolevelHintsCommand < LoadCommand + # @return [Integer] the offset to the hint table + attr_reader :htoffset + + # @return [Integer] the number of hints in the hint table + attr_reader :nhints + + # @return [TwolevelHintsTable] + # the hint table + attr_reader :table + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, htoffset, nhints) + super(view, cmd, cmdsize) + @htoffset = htoffset + @nhints = nhints + @table = TwolevelHintsTable.new(view, htoffset, nhints) + end + + # @return [Hash] a hash representation of this {TwolevelHintsCommand} + def to_h + { + "htoffset" => htoffset, + "nhints" => nhints, + "table" => table.hints.map(&:to_h), + }.merge super + end + + # A representation of the two-level namespace lookup hints table exposed + # by a {TwolevelHintsCommand} (`LC_TWOLEVEL_HINTS`). + class TwolevelHintsTable + # @return [Array<TwolevelHint>] all hints in the table + attr_reader :hints + + # @param view [MachO::MachOView] the view into the current Mach-O + # @param htoffset [Integer] the offset of the hints table + # @param nhints [Integer] the number of two-level hints in the table + # @api private + def initialize(view, htoffset, nhints) + format = Utils.specialize_format("L=#{nhints}", view.endianness) + raw_table = view.raw_data[htoffset, nhints * 4] + blobs = raw_table.unpack(format) + + @hints = blobs.map { |b| TwolevelHint.new(b) } + end + + # An individual two-level namespace lookup hint. + class TwolevelHint + # @return [Integer] the index into the sub-images + attr_reader :isub_image + + # @return [Integer] the index into the table of contents + attr_reader :itoc + + # @param blob [Integer] the 32-bit number containing the lookup hint + # @api private + def initialize(blob) + @isub_image = blob >> 24 + @itoc = blob & 0x00FFFFFF + end + + # @return [Hash] a hash representation of this {TwolevelHint} + def to_h + { + "isub_image" => isub_image, + "itoc" => itoc, + } + end + end + end + end + + # A load command containing the value of the original checksum for prebound + # files, or zero. Corresponds to LC_PREBIND_CKSUM. + class PrebindCksumCommand < LoadCommand + # @return [Integer] the checksum or 0 + attr_reader :cksum + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, cksum) + super(view, cmd, cmdsize) + @cksum = cksum + end + + # @return [Hash] a hash representation of this {PrebindCksumCommand} + def to_h + { + "cksum" => cksum, + }.merge super + end + end + + # A load command representing an rpath, which specifies a path that should + # be added to the current run path used to find @rpath prefixed dylibs. + # Corresponds to LC_RPATH. + class RpathCommand < LoadCommand + # @return [LCStr] the path to add to the run path as an LCStr + attr_reader :path + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, path) + super(view, cmd, cmdsize) + @path = LCStr.new(self, path) + end + + # @param context [SerializationContext] the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :path => path.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:path]].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {RpathCommand} + def to_h + { + "path" => path.to_h, + }.merge super + end + end + + # A load command representing the offsets and sizes of a blob of data in + # the __LINKEDIT segment. Corresponds to LC_CODE_SIGNATURE, + # LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, + # LC_DYLIB_CODE_SIGN_DRS, and LC_LINKER_OPTIMIZATION_HINT. + class LinkeditDataCommand < LoadCommand + # @return [Integer] offset to the data in the __LINKEDIT segment + attr_reader :dataoff + + # @return [Integer] size of the data in the __LINKEDIT segment + attr_reader :datasize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, dataoff, datasize) + super(view, cmd, cmdsize) + @dataoff = dataoff + @datasize = datasize + end + + # @return [Hash] a hash representation of this {LinkeditDataCommand} + def to_h + { + "dataoff" => dataoff, + "datasize" => datasize, + }.merge super + end + end + + # A load command representing the offset to and size of an encrypted + # segment. Corresponds to LC_ENCRYPTION_INFO. + class EncryptionInfoCommand < LoadCommand + # @return [Integer] the offset to the encrypted segment + attr_reader :cryptoff + + # @return [Integer] the size of the encrypted segment + attr_reader :cryptsize + + # @return [Integer] the encryption system, or 0 if not encrypted yet + attr_reader :cryptid + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(view, cmd, cmdsize, cryptoff, cryptsize, cryptid) + super(view, cmd, cmdsize) + @cryptoff = cryptoff + @cryptsize = cryptsize + @cryptid = cryptid + end + + # @return [Hash] a hash representation of this {EncryptionInfoCommand} + def to_h + { + "cryptoff" => cryptoff, + "cryptsize" => cryptsize, + "cryptid" => cryptid, + }.merge super + end + end + + # A load command representing the offset to and size of an encrypted + # segment. Corresponds to LC_ENCRYPTION_INFO_64. + class EncryptionInfoCommand64 < EncryptionInfoCommand + # @return [Integer] 64-bit padding value + attr_reader :pad + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, cryptoff, cryptsize, cryptid, pad) + super(view, cmd, cmdsize, cryptoff, cryptsize, cryptid) + @pad = pad + end + + # @return [Hash] a hash representation of this {EncryptionInfoCommand64} + def to_h + { + "pad" => pad, + }.merge super + end + end + + # A load command containing the minimum OS version on which the binary + # was built to run. Corresponds to LC_VERSION_MIN_MACOSX and + # LC_VERSION_MIN_IPHONEOS. + class VersionMinCommand < LoadCommand + # @return [Integer] the version X.Y.Z packed as x16.y8.z8 + attr_reader :version + + # @return [Integer] the SDK version X.Y.Z packed as x16.y8.z8 + attr_reader :sdk + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, version, sdk) + super(view, cmd, cmdsize) + @version = version + @sdk = sdk + end + + # A string representation of the binary's minimum OS version. + # @return [String] a string representing the minimum OS version. + def version_string + binary = "%032b" % version + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # A string representation of the binary's SDK version. + # @return [String] a string representing the SDK version. + def sdk_string + binary = "%032b" % sdk + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {VersionMinCommand} + def to_h + { + "version" => version, + "version_string" => version_string, + "sdk" => sdk, + "sdk_string" => sdk_string, + }.merge super + end + end + + # A load command containing the minimum OS version on which + # the binary was built for its platform. + # Corresponds to LC_BUILD_VERSION. + class BuildVersionCommand < LoadCommand + # @return [Integer] + attr_reader :platform + + # @return [Integer] the minimum OS version X.Y.Z packed as x16.y8.z8 + attr_reader :minos + + # @return [Integer] the SDK version X.Y.Z packed as x16.y8.z8 + attr_reader :sdk + + # @return [ToolEntries] tool entries + attr_reader :tool_entries + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, platform, minos, sdk, ntools) + super(view, cmd, cmdsize) + @platform = platform + @minos = minos + @sdk = sdk + @tool_entries = ToolEntries.new(view, ntools) + end + + # A string representation of the binary's minimum OS version. + # @return [String] a string representing the minimum OS version. + def minos_string + binary = "%032b" % minos + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # A string representation of the binary's SDK version. + # @return [String] a string representing the SDK version. + def sdk_string + binary = "%032b" % sdk + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {BuildVersionCommand} + def to_h + { + "platform" => platform, + "minos" => minos, + "minos_string" => minos_string, + "sdk" => sdk, + "sdk_string" => sdk_string, + "tool_entries" => tool_entries.tools.map(&:to_h), + }.merge super + end + + # A representation of the tool versions exposed + # by a {BuildVersionCommand} (`LC_BUILD_VERSION`). + class ToolEntries + # @return [Array<Tool>] all tools + attr_reader :tools + + # @param view [MachO::MachOView] the view into the current Mach-O + # @param ntools [Integer] the number of tools + # @api private + def initialize(view, ntools) + format = Utils.specialize_format("L=#{ntools * 2}", view.endianness) + raw_table = view.raw_data[view.offset + 24, ntools * 8] + blobs = raw_table.unpack(format).each_slice(2).to_a + + @tools = blobs.map { |b| Tool.new(*b) } + end + + # An individual tool. + class Tool + # @return [Integer] the enum for the tool + attr_reader :tool + + # @return [Integer] the tool's version number + attr_reader :version + + # @param tool [Integer] 32-bit integer + # @param version [Integer] 32-bit integer + # @api private + def initialize(tool, version) + @tool = tool + @version = version + end + + # @return [Hash] a hash representation of this {Tool} + def to_h + { + "tool" => tool, + "version" => version, + } + end + end + end + end + + # A load command containing the file offsets and sizes of the new + # compressed form of the information dyld needs to load the image. + # Corresponds to LC_DYLD_INFO and LC_DYLD_INFO_ONLY. + class DyldInfoCommand < LoadCommand + # @return [Integer] the file offset to the rebase information + attr_reader :rebase_off + + # @return [Integer] the size of the rebase information + attr_reader :rebase_size + + # @return [Integer] the file offset to the binding information + attr_reader :bind_off + + # @return [Integer] the size of the binding information + attr_reader :bind_size + + # @return [Integer] the file offset to the weak binding information + attr_reader :weak_bind_off + + # @return [Integer] the size of the weak binding information + attr_reader :weak_bind_size + + # @return [Integer] the file offset to the lazy binding information + attr_reader :lazy_bind_off + + # @return [Integer] the size of the lazy binding information + attr_reader :lazy_bind_size + + # @return [Integer] the file offset to the export information + attr_reader :export_off + + # @return [Integer] the size of the export information + attr_reader :export_size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=12".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 48 + + # @api private + def initialize(view, cmd, cmdsize, rebase_off, rebase_size, bind_off, + bind_size, weak_bind_off, weak_bind_size, lazy_bind_off, + lazy_bind_size, export_off, export_size) + super(view, cmd, cmdsize) + @rebase_off = rebase_off + @rebase_size = rebase_size + @bind_off = bind_off + @bind_size = bind_size + @weak_bind_off = weak_bind_off + @weak_bind_size = weak_bind_size + @lazy_bind_off = lazy_bind_off + @lazy_bind_size = lazy_bind_size + @export_off = export_off + @export_size = export_size + end + + # @return [Hash] a hash representation of this {DyldInfoCommand} + def to_h + { + "rebase_off" => rebase_off, + "rebase_size" => rebase_size, + "bind_off" => bind_off, + "bind_size" => bind_size, + "weak_bind_off" => weak_bind_off, + "weak_bind_size" => weak_bind_size, + "lazy_bind_off" => lazy_bind_off, + "lazy_bind_size" => lazy_bind_size, + "export_off" => export_off, + "export_size" => export_size, + }.merge super + end + end + + # A load command containing linker options embedded in object files. + # Corresponds to LC_LINKER_OPTION. + class LinkerOptionCommand < LoadCommand + # @return [Integer] the number of strings + attr_reader :count + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, count) + super(view, cmd, cmdsize) + @count = count + end + + # @return [Hash] a hash representation of this {LinkerOptionCommand} + def to_h + { + "count" => count, + }.merge super + end + end + + # A load command specifying the offset of main(). Corresponds to LC_MAIN. + class EntryPointCommand < LoadCommand + # @return [Integer] the file (__TEXT) offset of main() + attr_reader :entryoff + + # @return [Integer] if not 0, the initial stack size. + attr_reader :stacksize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, entryoff, stacksize) + super(view, cmd, cmdsize) + @entryoff = entryoff + @stacksize = stacksize + end + + # @return [Hash] a hash representation of this {EntryPointCommand} + def to_h + { + "entryoff" => entryoff, + "stacksize" => stacksize, + }.merge super + end + end + + # A load command specifying the version of the sources used to build the + # binary. Corresponds to LC_SOURCE_VERSION. + class SourceVersionCommand < LoadCommand + # @return [Integer] the version packed as a24.b10.c10.d10.e10 + attr_reader :version + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=1".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, version) + super(view, cmd, cmdsize) + @version = version + end + + # A string representation of the sources used to build the binary. + # @return [String] a string representation of the version + def version_string + binary = "%064b" % version + segs = [ + binary[0..23], binary[24..33], binary[34..43], binary[44..53], + binary[54..63] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {SourceVersionCommand} + def to_h + { + "version" => version, + "version_string" => version_string, + }.merge super + end + end + + # An obsolete load command containing the offset and size of the (GNU style) + # symbol table information. Corresponds to LC_SYMSEG. + class SymsegCommand < LoadCommand + # @return [Integer] the offset to the symbol segment + attr_reader :offset + + # @return [Integer] the size of the symbol segment in bytes + attr_reader :size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, offset, size) + super(view, cmd, cmdsize) + @offset = offset + @size = size + end + + # @return [Hash] a hash representation of this {SymsegCommand} + def to_h + { + "offset" => offset, + "size" => size, + }.merge super + end + end + + # An obsolete load command containing a free format string table. Each + # string is null-terminated and the command is zero-padded to a multiple of + # 4. Corresponds to LC_IDENT. + class IdentCommand < LoadCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + end + + # An obsolete load command containing the path to a file to be loaded into + # memory. Corresponds to LC_FVMFILE. + class FvmfileCommand < LoadCommand + # @return [LCStr] the pathname of the file being loaded + attr_reader :name + + # @return [Integer] the virtual address being loaded at + attr_reader :header_addr + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + def initialize(view, cmd, cmdsize, name, header_addr) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @header_addr = header_addr + end + + # @return [Hash] a hash representation of this {FvmfileCommand} + def to_h + { + "name" => name.to_h, + "header_addr" => header_addr, + }.merge super + end + end + + # An obsolete load command containing the path to a library to be loaded + # into memory. Corresponds to LC_LOADFVMLIB and LC_IDFVMLIB. + class FvmlibCommand < LoadCommand + # @return [LCStr] the library's target pathname + attr_reader :name + + # @return [Integer] the library's minor version number + attr_reader :minor_version + + # @return [Integer] the library's header address + attr_reader :header_addr + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + def initialize(view, cmd, cmdsize, name, minor_version, header_addr) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @minor_version = minor_version + @header_addr = header_addr + end + + # @return [Hash] a hash representation of this {FvmlibCommand} + def to_h + { + "name" => name.to_h, + "minor_version" => minor_version, + "header_addr" => header_addr, + }.merge super + end + end + + # A load command containing an owner name and offset/size for an arbitrary data region. + # Corresponds to LC_NOTE. + class NoteCommand < LoadCommand + # @return [String] the name of the owner for this note + attr_reader :data_owner + + # @return [Integer] the offset, within the file, of the note + attr_reader :offset + + # @return [Integer] the size, in bytes, of the note + attr_reader :size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16Q=2".freeze + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 48 + + def initialize(view, cmd, cmdsize, data_owner, offset, size) + super(view, cmd, cmdsize) + @data_owner = data_owner + @offset = offset + @size = size + end + + # @return [Hash] a hash representation of this {NoteCommand} + def to_h + { + "data_owner" => data_owner, + "offset" => offset, + "size" => size, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/macho_file.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/macho_file.rb new file mode 100644 index 0000000..081f1f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/macho_file.rb @@ -0,0 +1,577 @@ +require "forwardable" + +module MachO + # Represents a Mach-O file, which contains a header and load commands + # as well as binary executable instructions. Mach-O binaries are + # architecture specific. + # @see https://en.wikipedia.org/wiki/Mach-O + # @see FatFile + class MachOFile + extend Forwardable + + # @return [String] the filename loaded from, or nil if loaded from a binary + # string + attr_accessor :filename + + # @return [Symbol] the endianness of the file, :big or :little + attr_reader :endianness + + # @return [Headers::MachHeader] if the Mach-O is 32-bit + # @return [Headers::MachHeader64] if the Mach-O is 64-bit + attr_reader :header + + # @return [Array<LoadCommands::LoadCommand>] an array of the file's load + # commands + # @note load commands are provided in order of ascending offset. + attr_reader :load_commands + + # Creates a new instance from a binary string. + # @param bin [String] a binary string containing raw Mach-O data + # @return [MachOFile] a new MachOFile + def self.new_from_bin(bin) + instance = allocate + instance.initialize_from_bin(bin) + + instance + end + + # Creates a new instance from data read from the given filename. + # @param filename [String] the Mach-O file to load from + # @raise [ArgumentError] if the given file does not exist + def initialize(filename) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + + @filename = filename + @raw_data = File.open(@filename, "rb", &:read) + populate_fields + end + + # Initializes a new MachOFile instance from a binary string. + # @see MachO::MachOFile.new_from_bin + # @api private + def initialize_from_bin(bin) + @filename = nil + @raw_data = bin + populate_fields + end + + # The file's raw Mach-O data. + # @return [String] the raw Mach-O data + def serialize + @raw_data + end + + # @!method magic + # @return (see MachO::Headers::MachHeader#magic) + # @!method ncmds + # @return (see MachO::Headers::MachHeader#ncmds) + # @!method sizeofcmds + # @return (see MachO::Headers::MachHeader#sizeofcmds) + # @!method flags + # @return (see MachO::Headers::MachHeader#flags) + # @!method object? + # @return (see MachO::Headers::MachHeader#object?) + # @!method executable? + # @return (see MachO::Headers::MachHeader#executable?) + # @!method fvmlib? + # @return (see MachO::Headers::MachHeader#fvmlib?) + # @!method core? + # @return (see MachO::Headers::MachHeader#core?) + # @!method preload? + # @return (see MachO::Headers::MachHeader#preload?) + # @!method dylib? + # @return (see MachO::Headers::MachHeader#dylib?) + # @!method dylinker? + # @return (see MachO::Headers::MachHeader#dylinker?) + # @!method bundle? + # @return (see MachO::Headers::MachHeader#bundle?) + # @!method dsym? + # @return (see MachO::Headers::MachHeader#dsym?) + # @!method kext? + # @return (see MachO::Headers::MachHeader#kext?) + # @!method magic32? + # @return (see MachO::Headers::MachHeader#magic32?) + # @!method magic64? + # @return (see MachO::Headers::MachHeader#magic64?) + # @!method alignment + # @return (see MachO::Headers::MachHeader#alignment) + def_delegators :header, :magic, :ncmds, :sizeofcmds, :flags, :object?, + :executable?, :fvmlib?, :core?, :preload?, :dylib?, + :dylinker?, :bundle?, :dsym?, :kext?, :magic32?, :magic64?, + :alignment + + # @return [String] a string representation of the file's magic number + def magic_string + Headers::MH_MAGICS[magic] + end + + # @return [Symbol] a string representation of the Mach-O's filetype + def filetype + Headers::MH_FILETYPES[header.filetype] + end + + # @return [Symbol] a symbol representation of the Mach-O's CPU type + def cputype + Headers::CPU_TYPES[header.cputype] + end + + # @return [Symbol] a symbol representation of the Mach-O's CPU subtype + def cpusubtype + Headers::CPU_SUBTYPES[header.cputype][header.cpusubtype] + end + + # All load commands of a given name. + # @example + # file.command("LC_LOAD_DYLIB") + # file[:LC_LOAD_DYLIB] + # @param [String, Symbol] name the load command ID + # @return [Array<LoadCommands::LoadCommand>] an array of load commands + # corresponding to `name` + def command(name) + load_commands.select { |lc| lc.type == name.to_sym } + end + + alias [] command + + # Inserts a load command at the given offset. + # @param offset [Integer] the offset to insert at + # @param lc [LoadCommands::LoadCommand] the load command to insert + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @raise [OffsetInsertionError] if the offset is not in the load command region + # @raise [HeaderPadError] if the new command exceeds the header pad buffer + # @note Calling this method with an arbitrary offset in the load command + # region **will leave the object in an inconsistent state**. + def insert_command(offset, lc, options = {}) + context = LoadCommands::LoadCommand::SerializationContext.context_for(self) + cmd_raw = lc.serialize(context) + + if offset < header.class.bytesize || offset + cmd_raw.bytesize > low_fileoff + raise OffsetInsertionError, offset + end + + new_sizeofcmds = sizeofcmds + cmd_raw.bytesize + + if header.class.bytesize + new_sizeofcmds > low_fileoff + raise HeaderPadError, @filename + end + + # update Mach-O header fields to account for inserted load command + update_ncmds(ncmds + 1) + update_sizeofcmds(new_sizeofcmds) + + @raw_data.insert(offset, cmd_raw) + @raw_data.slice!(header.class.bytesize + new_sizeofcmds, cmd_raw.bytesize) + + populate_fields if options.fetch(:repopulate, true) + end + + # Replace a load command with another command in the Mach-O, preserving location. + # @param old_lc [LoadCommands::LoadCommand] the load command being replaced + # @param new_lc [LoadCommands::LoadCommand] the load command being added + # @return [void] + # @raise [HeaderPadError] if the new command exceeds the header pad buffer + # @see #insert_command + # @note This is public, but methods like {#dylib_id=} should be preferred. + def replace_command(old_lc, new_lc) + context = LoadCommands::LoadCommand::SerializationContext.context_for(self) + cmd_raw = new_lc.serialize(context) + new_sizeofcmds = sizeofcmds + cmd_raw.bytesize - old_lc.cmdsize + if header.class.bytesize + new_sizeofcmds > low_fileoff + raise HeaderPadError, @filename + end + + delete_command(old_lc) + insert_command(old_lc.view.offset, new_lc) + end + + # Appends a new load command to the Mach-O. + # @param lc [LoadCommands::LoadCommand] the load command being added + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @return [void] + # @see #insert_command + # @note This is public, but methods like {#add_rpath} should be preferred. + # Setting `repopulate` to false **will leave the instance in an + # inconsistent state** unless {#populate_fields} is called **immediately** + # afterwards. + def add_command(lc, options = {}) + insert_command(header.class.bytesize + sizeofcmds, lc, options) + end + + # Delete a load command from the Mach-O. + # @param lc [LoadCommands::LoadCommand] the load command being deleted + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @return [void] + # @note This is public, but methods like {#delete_rpath} should be preferred. + # Setting `repopulate` to false **will leave the instance in an + # inconsistent state** unless {#populate_fields} is called **immediately** + # afterwards. + def delete_command(lc, options = {}) + @raw_data.slice!(lc.view.offset, lc.cmdsize) + + # update Mach-O header fields to account for deleted load command + update_ncmds(ncmds - 1) + update_sizeofcmds(sizeofcmds - lc.cmdsize) + + # pad the space after the load commands to preserve offsets + @raw_data.insert(header.class.bytesize + sizeofcmds - lc.cmdsize, Utils.nullpad(lc.cmdsize)) + + populate_fields if options.fetch(:repopulate, true) + end + + # Populate the instance's fields with the raw Mach-O data. + # @return [void] + # @note This method is public, but should (almost) never need to be called. + # The exception to this rule is when methods like {#add_command} and + # {#delete_command} have been called with `repopulate = false`. + def populate_fields + @header = populate_mach_header + @load_commands = populate_load_commands + end + + # All load commands responsible for loading dylibs. + # @return [Array<LoadCommands::DylibCommand>] an array of DylibCommands + def dylib_load_commands + load_commands.select { |lc| LoadCommands::DYLIB_LOAD_COMMANDS.include?(lc.type) } + end + + # All segment load commands in the Mach-O. + # @return [Array<LoadCommands::SegmentCommand>] if the Mach-O is 32-bit + # @return [Array<LoadCommands::SegmentCommand64>] if the Mach-O is 64-bit + def segments + if magic32? + command(:LC_SEGMENT) + else + command(:LC_SEGMENT_64) + end + end + + # The segment alignment for the Mach-O. Guesses conservatively. + # @return [Integer] the alignment, as a power of 2 + # @note This is **not** the same as {#alignment}! + # @note See `get_align` and `get_align_64` in `cctools/misc/lipo.c` + def segment_alignment + # special cases: 12 for x86/64/PPC/PP64, 14 for ARM/ARM64 + return 12 if %i[i386 x86_64 ppc ppc64].include?(cputype) + return 14 if %i[arm arm64].include?(cputype) + + cur_align = Sections::MAX_SECT_ALIGN + + segments.each do |segment| + if filetype == :object + # start with the smallest alignment, and work our way up + align = magic32? ? 2 : 3 + segment.sections.each do |section| + align = section.align unless section.align <= align + end + else + align = segment.guess_align + end + cur_align = align if align < cur_align + end + + cur_align + end + + # The Mach-O's dylib ID, or `nil` if not a dylib. + # @example + # file.dylib_id # => 'libBar.dylib' + # @return [String, nil] the Mach-O's dylib ID + def dylib_id + return unless dylib? + + dylib_id_cmd = command(:LC_ID_DYLIB).first + + dylib_id_cmd.name.to_s + end + + # Changes the Mach-O's dylib ID to `new_id`. Does nothing if not a dylib. + # @example + # file.change_dylib_id("libFoo.dylib") + # @param new_id [String] the dylib's new ID + # @param _options [Hash] + # @return [void] + # @raise [ArgumentError] if `new_id` is not a String + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_dylib_id} + def change_dylib_id(new_id, _options = {}) + raise ArgumentError, "new ID must be a String" unless new_id.is_a?(String) + return unless dylib? + + old_lc = command(:LC_ID_DYLIB).first + raise DylibIdMissingError unless old_lc + + new_lc = LoadCommands::LoadCommand.create(:LC_ID_DYLIB, new_id, + old_lc.timestamp, + old_lc.current_version, + old_lc.compatibility_version) + + replace_command(old_lc, new_lc) + end + + alias dylib_id= change_dylib_id + + # All shared libraries linked to the Mach-O. + # @return [Array<String>] an array of all shared libraries + def linked_dylibs + # Some linkers produce multiple `LC_LOAD_DYLIB` load commands for the same + # library, but at this point we're really only interested in a list of + # unique libraries this Mach-O file links to, thus: `uniq`. (This is also + # for consistency with `FatFile` that merges this list across all archs.) + dylib_load_commands.map(&:name).map(&:to_s).uniq + end + + # Changes the shared library `old_name` to `new_name` + # @example + # file.change_install_name("abc.dylib", "def.dylib") + # @param old_name [String] the shared library's old name + # @param new_name [String] the shared library's new name + # @param _options [Hash] + # @return [void] + # @raise [DylibUnknownError] if no shared library has the old name + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_install_name} + def change_install_name(old_name, new_name, _options = {}) + old_lc = dylib_load_commands.find { |d| d.name.to_s == old_name } + raise DylibUnknownError, old_name if old_lc.nil? + + new_lc = LoadCommands::LoadCommand.create(old_lc.type, new_name, + old_lc.timestamp, + old_lc.current_version, + old_lc.compatibility_version) + + replace_command(old_lc, new_lc) + end + + alias change_dylib change_install_name + + # All runtime paths searched by the dynamic linker for the Mach-O. + # @return [Array<String>] an array of all runtime paths + def rpaths + command(:LC_RPATH).map(&:path).map(&:to_s) + end + + # Changes the runtime path `old_path` to `new_path` + # @example + # file.change_rpath("/usr/lib", "/usr/local/lib") + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param _options [Hash] + # @return [void] + # @raise [RpathUnknownError] if no such old runtime path exists + # @raise [RpathExistsError] if the new runtime path already exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_rpath} + def change_rpath(old_path, new_path, _options = {}) + old_lc = command(:LC_RPATH).find { |r| r.path.to_s == old_path } + raise RpathUnknownError, old_path if old_lc.nil? + raise RpathExistsError, new_path if rpaths.include?(new_path) + + new_lc = LoadCommands::LoadCommand.create(:LC_RPATH, new_path) + + delete_rpath(old_path) + insert_command(old_lc.view.offset, new_lc) + end + + # Add the given runtime path to the Mach-O. + # @example + # file.rpaths # => ["/lib"] + # file.add_rpath("/usr/lib") + # file.rpaths # => ["/lib", "/usr/lib"] + # @param path [String] the new runtime path + # @param _options [Hash] + # @return [void] + # @raise [RpathExistsError] if the runtime path already exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#add_rpath} + def add_rpath(path, _options = {}) + raise RpathExistsError, path if rpaths.include?(path) + + rpath_cmd = LoadCommands::LoadCommand.create(:LC_RPATH, path) + add_command(rpath_cmd) + end + + # Delete the given runtime path from the Mach-O. + # @example + # file.rpaths # => ["/lib"] + # file.delete_rpath("/lib") + # file.rpaths # => [] + # @param path [String] the runtime path to delete + # @param _options [Hash] + # @return void + # @raise [RpathUnknownError] if no such runtime path exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#delete_rpath} + def delete_rpath(path, _options = {}) + rpath_cmds = command(:LC_RPATH).select { |r| r.path.to_s == path } + raise RpathUnknownError, path if rpath_cmds.empty? + + # delete the commands in reverse order, offset descending. this + # allows us to defer (expensive) field population until the very end + rpath_cmds.reverse_each { |cmd| delete_command(cmd, :repopulate => false) } + + populate_fields + end + + # Write all Mach-O data to the given filename. + # @param filename [String] the file to write to + # @return [void] + def write(filename) + File.open(filename, "wb") { |f| f.write(@raw_data) } + end + + # Write all Mach-O data to the file used to initialize the instance. + # @return [void] + # @raise [MachOError] if the instance was initialized without a file + # @note Overwrites all data in the file! + def write! + raise MachOError, "no initial file to write to" if @filename.nil? + + File.open(@filename, "wb") { |f| f.write(@raw_data) } + end + + # @return [Hash] a hash representation of this {MachOFile} + def to_h + { + "header" => header.to_h, + "load_commands" => load_commands.map(&:to_h), + } + end + + private + + # The file's Mach-O header structure. + # @return [Headers::MachHeader] if the Mach-O is 32-bit + # @return [Headers::MachHeader64] if the Mach-O is 64-bit + # @raise [TruncatedFileError] if the file is too small to have a valid header + # @api private + def populate_mach_header + # the smallest Mach-O header is 28 bytes + raise TruncatedFileError if @raw_data.size < 28 + + magic = populate_and_check_magic + mh_klass = Utils.magic32?(magic) ? Headers::MachHeader : Headers::MachHeader64 + mh = mh_klass.new_from_bin(endianness, @raw_data[0, mh_klass.bytesize]) + + check_cputype(mh.cputype) + check_cpusubtype(mh.cputype, mh.cpusubtype) + check_filetype(mh.filetype) + + mh + end + + # Read just the file's magic number and check its validity. + # @return [Integer] the magic + # @raise [MagicError] if the magic is not valid Mach-O magic + # @raise [FatBinaryError] if the magic is for a Fat file + # @api private + def populate_and_check_magic + magic = @raw_data[0..3].unpack("N").first + + raise MagicError, magic unless Utils.magic?(magic) + raise FatBinaryError if Utils.fat_magic?(magic) + + @endianness = Utils.little_magic?(magic) ? :little : :big + + magic + end + + # Check the file's CPU type. + # @param cputype [Integer] the CPU type + # @raise [CPUTypeError] if the CPU type is unknown + # @api private + def check_cputype(cputype) + raise CPUTypeError, cputype unless Headers::CPU_TYPES.key?(cputype) + end + + # Check the file's CPU type/subtype pair. + # @param cpusubtype [Integer] the CPU subtype + # @raise [CPUSubtypeError] if the CPU sub-type is unknown + # @api private + def check_cpusubtype(cputype, cpusubtype) + # Only check sub-type w/o capability bits (see `populate_mach_header`). + raise CPUSubtypeError.new(cputype, cpusubtype) unless Headers::CPU_SUBTYPES[cputype].key?(cpusubtype) + end + + # Check the file's type. + # @param filetype [Integer] the file type + # @raise [FiletypeError] if the file type is unknown + # @api private + def check_filetype(filetype) + raise FiletypeError, filetype unless Headers::MH_FILETYPES.key?(filetype) + end + + # All load commands in the file. + # @return [Array<LoadCommands::LoadCommand>] an array of load commands + # @raise [LoadCommandError] if an unknown load command is encountered + # @api private + def populate_load_commands + offset = header.class.bytesize + load_commands = [] + + header.ncmds.times do + fmt = Utils.specialize_format("L=", endianness) + cmd = @raw_data.slice(offset, 4).unpack(fmt).first + cmd_sym = LoadCommands::LOAD_COMMANDS[cmd] + + raise LoadCommandError, cmd if cmd_sym.nil? + + # why do I do this? i don't like declaring constants below + # classes, and i need them to resolve... + klass = LoadCommands.const_get LoadCommands::LC_STRUCTURES[cmd_sym] + view = MachOView.new(@raw_data, endianness, offset) + command = klass.new_from_bin(view) + + load_commands << command + offset += command.cmdsize + end + + load_commands + end + + # The low file offset (offset to first section data). + # @return [Integer] the offset + # @api private + def low_fileoff + offset = @raw_data.size + + segments.each do |seg| + seg.sections.each do |sect| + next if sect.empty? + next if sect.flag?(:S_ZEROFILL) + next if sect.flag?(:S_THREAD_LOCAL_ZEROFILL) + next unless sect.offset < offset + + offset = sect.offset + end + end + + offset + end + + # Updates the number of load commands in the raw data. + # @param ncmds [Integer] the new number of commands + # @return [void] + # @api private + def update_ncmds(ncmds) + fmt = Utils.specialize_format("L=", endianness) + ncmds_raw = [ncmds].pack(fmt) + @raw_data[16..19] = ncmds_raw + end + + # Updates the size of all load commands in the raw data. + # @param size [Integer] the new size, in bytes + # @return [void] + # @api private + def update_sizeofcmds(size) + fmt = Utils.specialize_format("L=", endianness) + size_raw = [size].pack(fmt) + @raw_data[20..23] = size_raw + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/sections.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/sections.rb new file mode 100644 index 0000000..093fbb2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/sections.rb @@ -0,0 +1,204 @@ +module MachO + # Classes and constants for parsing sections in Mach-O binaries. + module Sections + # type mask + SECTION_TYPE = 0x000000ff + + # attributes mask + SECTION_ATTRIBUTES = 0xffffff00 + + # user settable attributes mask + SECTION_ATTRIBUTES_USR = 0xff000000 + + # system settable attributes mask + SECTION_ATTRIBUTES_SYS = 0x00ffff00 + + # maximum specifiable section alignment, as a power of 2 + # @note see `MAXSECTALIGN` macro in `cctools/misc/lipo.c` + MAX_SECT_ALIGN = 15 + + # association of section flag symbols to values + # @api private + SECTION_FLAGS = { + :S_REGULAR => 0x0, + :S_ZEROFILL => 0x1, + :S_CSTRING_LITERALS => 0x2, + :S_4BYTE_LITERALS => 0x3, + :S_8BYTE_LITERALS => 0x4, + :S_LITERAL_POINTERS => 0x5, + :S_NON_LAZY_SYMBOL_POINTERS => 0x6, + :S_LAZY_SYMBOL_POINTERS => 0x7, + :S_SYMBOL_STUBS => 0x8, + :S_MOD_INIT_FUNC_POINTERS => 0x9, + :S_MOD_TERM_FUNC_POINTERS => 0xa, + :S_COALESCED => 0xb, + :S_GB_ZEROFILE => 0xc, + :S_INTERPOSING => 0xd, + :S_16BYTE_LITERALS => 0xe, + :S_DTRACE_DOF => 0xf, + :S_LAZY_DYLIB_SYMBOL_POINTERS => 0x10, + :S_THREAD_LOCAL_REGULAR => 0x11, + :S_THREAD_LOCAL_ZEROFILL => 0x12, + :S_THREAD_LOCAL_VARIABLES => 0x13, + :S_THREAD_LOCAL_VARIABLE_POINTERS => 0x14, + :S_THREAD_LOCAL_INIT_FUNCTION_POINTERS => 0x15, + :S_ATTR_PURE_INSTRUCTIONS => 0x80000000, + :S_ATTR_NO_TOC => 0x40000000, + :S_ATTR_STRIP_STATIC_SYMS => 0x20000000, + :S_ATTR_NO_DEAD_STRIP => 0x10000000, + :S_ATTR_LIVE_SUPPORT => 0x08000000, + :S_ATTR_SELF_MODIFYING_CODE => 0x04000000, + :S_ATTR_DEBUG => 0x02000000, + :S_ATTR_SOME_INSTRUCTIONS => 0x00000400, + :S_ATTR_EXT_RELOC => 0x00000200, + :S_ATTR_LOC_RELOC => 0x00000100, + }.freeze + + # association of section name symbols to names + # @api private + SECTION_NAMES = { + :SECT_TEXT => "__text", + :SECT_FVMLIB_INIT0 => "__fvmlib_init0", + :SECT_FVMLIB_INIT1 => "__fvmlib_init1", + :SECT_DATA => "__data", + :SECT_BSS => "__bss", + :SECT_COMMON => "__common", + :SECT_OBJC_SYMBOLS => "__symbol_table", + :SECT_OBJC_MODULES => "__module_info", + :SECT_OBJC_STRINGS => "__selector_strs", + :SECT_OBJC_REFS => "__selector_refs", + :SECT_ICON_HEADER => "__header", + :SECT_ICON_TIFF => "__tiff", + }.freeze + + # Represents a section of a segment for 32-bit architectures. + class Section < MachOStructure + # @return [String] the name of the section, including null pad bytes + attr_reader :sectname + + # @return [String] the name of the segment's section, including null + # pad bytes + attr_reader :segname + + # @return [Integer] the memory address of the section + attr_reader :addr + + # @return [Integer] the size, in bytes, of the section + attr_reader :size + + # @return [Integer] the file offset of the section + attr_reader :offset + + # @return [Integer] the section alignment (power of 2) of the section + attr_reader :align + + # @return [Integer] the file offset of the section's relocation entries + attr_reader :reloff + + # @return [Integer] the number of relocation entries + attr_reader :nreloc + + # @return [Integer] flags for type and attributes of the section + attr_reader :flags + + # @return [void] reserved (for offset or index) + attr_reader :reserved1 + + # @return [void] reserved (for count or sizeof) + attr_reader :reserved2 + + # @see MachOStructure::FORMAT + FORMAT = "Z16Z16L=9".freeze + + # @see MachOStructure::SIZEOF + SIZEOF = 68 + + # @api private + def initialize(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2) + @sectname = sectname + @segname = segname + @addr = addr + @size = size + @offset = offset + @align = align + @reloff = reloff + @nreloc = nreloc + @flags = flags + @reserved1 = reserved1 + @reserved2 = reserved2 + end + + # @return [String] the section's name + def section_name + sectname + end + + # @return [String] the parent segment's name + def segment_name + segname + end + + # @return [Boolean] whether the section is empty (i.e, {size} is 0) + def empty? + size.zero? + end + + # @example + # puts "this section is regular" if sect.flag?(:S_REGULAR) + # @param flag [Symbol] a section flag symbol + # @return [Boolean] whether the flag is present in the section's {flags} + def flag?(flag) + flag = SECTION_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # @return [Hash] a hash representation of this {Section} + def to_h + { + "sectname" => sectname, + "segname" => segname, + "addr" => addr, + "size" => size, + "offset" => offset, + "align" => align, + "reloff" => reloff, + "nreloc" => nreloc, + "flags" => flags, + "reserved1" => reserved1, + "reserved2" => reserved2, + }.merge super + end + end + + # Represents a section of a segment for 64-bit architectures. + class Section64 < Section + # @return [void] reserved + attr_reader :reserved3 + + # @see MachOStructure::FORMAT + FORMAT = "Z16Z16Q=2L=8".freeze + + # @see MachOStructure::SIZEOF + SIZEOF = 80 + + # @api private + def initialize(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2, reserved3) + super(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2) + @reserved3 = reserved3 + end + + # @return [Hash] a hash representation of this {Section64} + def to_h + { + "reserved3" => reserved3, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/structure.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/structure.rb new file mode 100644 index 0000000..7bece4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/structure.rb @@ -0,0 +1,40 @@ +module MachO + # A general purpose pseudo-structure. + # @abstract + class MachOStructure + # The String#unpack format of the data structure. + # @return [String] the unpacking format + # @api private + FORMAT = "".freeze + + # The size of the data structure, in bytes. + # @return [Integer] the size, in bytes + # @api private + SIZEOF = 0 + + # @return [Integer] the size, in bytes, of the represented structure. + def self.bytesize + self::SIZEOF + end + + # @param endianness [Symbol] either `:big` or `:little` + # @param bin [String] the string to be unpacked into the new structure + # @return [MachO::MachOStructure] the resulting structure + # @api private + def self.new_from_bin(endianness, bin) + format = Utils.specialize_format(self::FORMAT, endianness) + + new(*bin.unpack(format)) + end + + # @return [Hash] a hash representation of this {MachOStructure}. + def to_h + { + "structure" => { + "format" => self.class::FORMAT, + "bytesize" => self.class.bytesize, + }, + } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/tools.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/tools.rb new file mode 100644 index 0000000..c9bcbd7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/tools.rb @@ -0,0 +1,109 @@ +module MachO + # A collection of convenient methods for common operations on Mach-O and Fat + # binaries. + module Tools + # @param filename [String] the Mach-O or Fat binary being read + # @return [Array<String>] an array of all dylibs linked to the binary + def self.dylibs(filename) + file = MachO.open(filename) + + file.linked_dylibs + end + + # Changes the dylib ID of a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param new_id [String] the new dylib ID for the binary + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_dylib_id(filename, new_id, options = {}) + file = MachO.open(filename) + + file.change_dylib_id(new_id, options) + file.write! + end + + # Changes a shared library install name in a Mach-O or Fat binary, + # overwriting the source file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_name [String] the old shared library name + # @param new_name [String] the new shared library name + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_install_name(filename, old_name, new_name, options = {}) + file = MachO.open(filename) + + file.change_install_name(old_name, new_name, options) + file.write! + end + + # Changes a runtime path in a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_rpath(filename, old_path, new_path, options = {}) + file = MachO.open(filename) + + file.change_rpath(old_path, new_path, options) + file.write! + end + + # Add a runtime path to a Mach-O or Fat binary, overwriting the source file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.add_rpath(filename, new_path, options = {}) + file = MachO.open(filename) + + file.add_rpath(new_path, options) + file.write! + end + + # Delete a runtime path from a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_path [String] the old runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.delete_rpath(filename, old_path, options = {}) + file = MachO.open(filename) + + file.delete_rpath(old_path, options) + file.write! + end + + # Merge multiple Mach-Os into one universal (Fat) binary. + # @param filename [String] the fat binary to create + # @param files [Array<String>] the files to merge + # @param fat64 [Boolean] whether to use {Headers::FatArch64}s to represent each slice + # @return [void] + def self.merge_machos(filename, *files, fat64: false) + machos = files.map do |file| + macho = MachO.open(file) + case macho + when MachO::MachOFile + macho + else + macho.machos + end + end.flatten + + fat_macho = MachO::FatFile.new_from_machos(*machos, :fat64 => fat64) + fat_macho.write(filename) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/utils.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/utils.rb new file mode 100644 index 0000000..2b5a307 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/utils.rb @@ -0,0 +1,123 @@ +module MachO + # A collection of utility functions used throughout ruby-macho. + module Utils + # Rounds a value to the next multiple of the given round. + # @param value [Integer] the number being rounded + # @param round [Integer] the number being rounded with + # @return [Integer] the rounded value + # @see http://www.opensource.apple.com/source/cctools/cctools-870/libstuff/rnd.c + def self.round(value, round) + round -= 1 + value += round + value &= ~round + value + end + + # Returns the number of bytes needed to pad the given size to the given + # alignment. + # @param size [Integer] the unpadded size + # @param alignment [Integer] the number to alignment the size with + # @return [Integer] the number of pad bytes required + def self.padding_for(size, alignment) + round(size, alignment) - size + end + + # Returns a string of null bytes of the requested (non-negative) size + # @param size [Integer] the size of the nullpad + # @return [String] the null string (or empty string, for `size = 0`) + # @raise [ArgumentError] if a non-positive nullpad is requested + def self.nullpad(size) + raise ArgumentError, "size < 0: #{size}" if size < 0 + + "\x00" * size + end + + # Converts an abstract (native-endian) String#unpack format to big or + # little. + # @param format [String] the format string being converted + # @param endianness [Symbol] either `:big` or `:little` + # @return [String] the converted string + def self.specialize_format(format, endianness) + modifier = endianness == :big ? ">" : "<" + format.tr("=", modifier) + end + + # Packs tagged strings into an aligned payload. + # @param fixed_offset [Integer] the baseline offset for the first packed + # string + # @param alignment [Integer] the alignment value to use for packing + # @param strings [Hash] the labeled strings to pack + # @return [Array<String, Hash>] the packed string and labeled offsets + def self.pack_strings(fixed_offset, alignment, strings = {}) + offsets = {} + next_offset = fixed_offset + payload = "" + + strings.each do |key, string| + offsets[key] = next_offset + payload << string + payload << Utils.nullpad(1) + next_offset += string.bytesize + 1 + end + + payload << Utils.nullpad(padding_for(fixed_offset + payload.bytesize, alignment)) + [payload, offsets] + end + + # Compares the given number to valid Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid Mach-O magic number + def self.magic?(num) + Headers::MH_MAGICS.key?(num) + end + + # Compares the given number to valid Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid Fat magic number + def self.fat_magic?(num) + [Headers::FAT_MAGIC, Headers::FAT_MAGIC_64].include? num + end + + # Compares the given number to valid 32-bit Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 32-bit fat magic number + def self.fat_magic32?(num) + num == Headers::FAT_MAGIC + end + + # Compares the given number to valid 64-bit Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 64-bit fat magic number + def self.fat_magic64?(num) + num == Headers::FAT_MAGIC_64 + end + + # Compares the given number to valid 32-bit Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 32-bit magic number + def self.magic32?(num) + [Headers::MH_MAGIC, Headers::MH_CIGAM].include? num + end + + # Compares the given number to valid 64-bit Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 64-bit magic number + def self.magic64?(num) + [Headers::MH_MAGIC_64, Headers::MH_CIGAM_64].include? num + end + + # Compares the given number to valid little-endian magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid little-endian magic number + def self.little_magic?(num) + [Headers::MH_CIGAM, Headers::MH_CIGAM_64].include? num + end + + # Compares the given number to valid big-endian magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid big-endian magic number + def self.big_magic?(num) + [Headers::MH_MAGIC, Headers::MH_MAGIC_64].include? num + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/view.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/view.rb new file mode 100644 index 0000000..5bd40e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-1.4.0/lib/macho/view.rb @@ -0,0 +1,31 @@ +module MachO + # A representation of some unspecified Mach-O data. + class MachOView + # @return [String] the raw Mach-O data + attr_reader :raw_data + + # @return [Symbol] the endianness of the data (`:big` or `:little`) + attr_reader :endianness + + # @return [Integer] the offset of the relevant data (in {#raw_data}) + attr_reader :offset + + # Creates a new MachOView. + # @param raw_data [String] the raw Mach-O data + # @param endianness [Symbol] the endianness of the data + # @param offset [Integer] the offset of the relevant data + def initialize(raw_data, endianness, offset) + @raw_data = raw_data + @endianness = endianness + @offset = offset + end + + # @return [Hash] a hash representation of this {MachOView}. + def to_h + { + "endianness" => endianness, + "offset" => offset, + } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/.yardopts b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/.yardopts new file mode 100644 index 0000000..035b890 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/.yardopts @@ -0,0 +1 @@ +--no-private --markup-provider=redcarpet --markup=markdown - README.md LICENSE diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/LICENSE b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/LICENSE new file mode 100644 index 0000000..89dd7d6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015, 2016, 2017, 2018 William Woodruff <william @ yossarian.net> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/README.md b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/README.md new file mode 100644 index 0000000..28813f1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/README.md @@ -0,0 +1,69 @@ +ruby-macho +================ + +[![Gem Version](https://badge.fury.io/rb/ruby-macho.svg)](http://badge.fury.io/rb/ruby-macho) +[![Build Status](https://travis-ci.org/Homebrew/ruby-macho.svg?branch=master)](https://travis-ci.org/Homebrew/ruby-macho) +[![Coverage Status](https://codecov.io/gh/Homebrew/ruby-macho/branch/master/graph/badge.svg)](https://codecov.io/gh/Homebrew/ruby-macho) + +A Ruby library for examining and modifying Mach-O files. + +### What is a Mach-O file? + +The [Mach-O file format](https://en.wikipedia.org/wiki/Mach-O) is used by macOS +and iOS (among others) as a general purpose binary format for object files, +executables, dynamic libraries, and so forth. + +### Installation + +ruby-macho can be installed via RubyGems: + +```bash +$ gem install ruby-macho +``` + +### Documentation + +Full documentation is available on [RubyDoc](http://www.rubydoc.info/gems/ruby-macho/). + +A quick example of what ruby-macho can do: + +```ruby +require 'macho' + +file = MachO::MachOFile.new("/path/to/my/binary") + +# get the file's type (object, dynamic lib, executable, etc) +file.filetype # => :execute + +# get all load commands in the file and print their offsets: +file.load_commands.each do |lc| + puts "#{lc.type}: offset #{lc.offset}, size: #{lc.cmdsize}" +end + +# access a specific load command +lc_vers = file[:LC_VERSION_MIN_MACOSX].first +puts lc_vers.version_string # => "10.10.0" +``` + +### What works? + +* Reading data from x86/x86_64/PPC Mach-O files +* Changing the IDs of Mach-O and Fat dylibs +* Changing install names in Mach-O and Fat files +* Adding, deleting, and modifying rpaths. + +### What needs to be done? + +* Unit and performance testing. + +Attribution: + +* Constants were taken from Apple, Inc's +[`loader.h` in `cctools/include/mach-o`](https://www.opensource.apple.com/source/cctools/cctools-870/include/mach-o/loader.h). +(Apple Public Source License 2.0). + +### License + +`ruby-macho` is licensed under the MIT License. + +For the exact terms, see the [license](LICENSE) file. diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho.rb new file mode 100644 index 0000000..db4dc22 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +require "open3" + +require_relative "macho/structure" +require_relative "macho/view" +require_relative "macho/headers" +require_relative "macho/load_commands" +require_relative "macho/sections" +require_relative "macho/macho_file" +require_relative "macho/fat_file" +require_relative "macho/exceptions" +require_relative "macho/utils" +require_relative "macho/tools" + +# The primary namespace for ruby-macho. +module MachO + # release version + VERSION = "2.5.1" + + # Opens the given filename as a MachOFile or FatFile, depending on its magic. + # @param filename [String] the file being opened + # @return [MachOFile] if the file is a Mach-O + # @return [FatFile] if the file is a Fat file + # @raise [ArgumentError] if the given file does not exist + # @raise [TruncatedFileError] if the file is too small to have a valid header + # @raise [MagicError] if the file's magic is not valid Mach-O magic + def self.open(filename) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + raise TruncatedFileError unless File.stat(filename).size >= 4 + + magic = File.open(filename, "rb") { |f| f.read(4) }.unpack1("N") + + if Utils.fat_magic?(magic) + file = FatFile.new(filename) + elsif Utils.magic?(magic) + file = MachOFile.new(filename) + else + raise MagicError, magic + end + + file + end + + # Signs the dylib using an ad-hoc identity. + # Necessary after making any changes to a dylib, since otherwise + # changing a signed file invalidates its signature. + # @param filename [String] the file being opened + # @return [void] + # @raise [ModificationError] if the operation fails + def self.codesign!(filename) + raise ArgumentError, "codesign binary is not available on Linux" if RUBY_PLATFORM !~ /darwin/ + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + + _, _, status = Open3.capture3("codesign", "--sign", "-", "--force", + "--preserve-metadata=entitlements,requirements,flags,runtime", + filename) + + raise CodeSigningError, "#{filename}: signing failed!" unless status.success? + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/exceptions.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/exceptions.rb new file mode 100644 index 0000000..b3b53e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/exceptions.rb @@ -0,0 +1,210 @@ +# frozen_string_literal: true + +module MachO + # A generic Mach-O error in execution. + class MachOError < RuntimeError + end + + # Raised when a Mach-O file modification fails. + class ModificationError < MachOError + end + + # Raised when codesigning fails. Certain environments + # may want to rescue this to treat it as non-fatal. + class CodeSigningError < MachOError + end + + # Raised when a Mach-O file modification fails but can be recovered when + # operating on multiple Mach-O slices of a fat binary in non-strict mode. + class RecoverableModificationError < ModificationError + # @return [Integer, nil] The index of the Mach-O slice of a fat binary for + # which modification failed or `nil` if not a fat binary. This is used to + # make the error message more useful. + attr_accessor :macho_slice + + # @return [String] The exception message. + def to_s + s = super.to_s + s = "While modifying Mach-O slice #{@macho_slice}: #{s}" if @macho_slice + s + end + end + + # Raised when a file is not a Mach-O. + class NotAMachOError < MachOError + end + + # Raised when a file is too short to be a valid Mach-O file. + class TruncatedFileError < NotAMachOError + def initialize + super "File is too short to be a valid Mach-O" + end + end + + # Raised when a file's magic bytes are not valid Mach-O magic. + class MagicError < NotAMachOError + # @param num [Integer] the unknown number + def initialize(magic) + super "Unrecognized Mach-O magic: 0x%02<magic>x" % { :magic => magic } + end + end + + # Raised when a file is a Java classfile instead of a fat Mach-O. + class JavaClassFileError < NotAMachOError + def initialize + super "File is a Java class file" + end + end + + # Raised when a fat binary is loaded with MachOFile. + class FatBinaryError < MachOError + def initialize + super "Fat binaries must be loaded with MachO::FatFile" + end + end + + # Raised when a Mach-O is loaded with FatFile. + class MachOBinaryError < MachOError + def initialize + super "Normal binaries must be loaded with MachO::MachOFile" + end + end + + # Raised when the CPU type is unknown. + class CPUTypeError < MachOError + # @param cputype [Integer] the unknown CPU type + def initialize(cputype) + super "Unrecognized CPU type: 0x%08<cputype>x" % { :cputype => cputype } + end + end + + # Raised when the CPU type/sub-type pair is unknown. + class CPUSubtypeError < MachOError + # @param cputype [Integer] the CPU type of the unknown pair + # @param cpusubtype [Integer] the CPU sub-type of the unknown pair + def initialize(cputype, cpusubtype) + super "Unrecognized CPU sub-type: 0x%08<cpusubtype>x" \ + " (for CPU type: 0x%08<cputype>x" % { :cputype => cputype, :cpusubtype => cpusubtype } + end + end + + # Raised when a mach-o file's filetype field is unknown. + class FiletypeError < MachOError + # @param num [Integer] the unknown number + def initialize(num) + super "Unrecognized Mach-O filetype code: 0x%02<num>x" % { :num => num } + end + end + + # Raised when an unknown load command is encountered. + class LoadCommandError < MachOError + # @param num [Integer] the unknown number + def initialize(num) + super "Unrecognized Mach-O load command: 0x%02<num>x" % { :num => num } + end + end + + # Raised when a load command can't be created manually. + class LoadCommandNotCreatableError < MachOError + # @param cmd_sym [Symbol] the uncreatable load command's symbol + def initialize(cmd_sym) + super "Load commands of type #{cmd_sym} cannot be created manually" + end + end + + # Raised when the number of arguments used to create a load command manually + # is wrong. + class LoadCommandCreationArityError < MachOError + # @param cmd_sym [Symbol] the load command's symbol + # @param expected_arity [Integer] the number of arguments expected + # @param actual_arity [Integer] the number of arguments received + def initialize(cmd_sym, expected_arity, actual_arity) + super "Expected #{expected_arity} arguments for #{cmd_sym} creation," \ + " got #{actual_arity}" + end + end + + # Raised when a load command can't be serialized. + class LoadCommandNotSerializableError < MachOError + # @param cmd_sym [Symbol] the load command's symbol + def initialize(cmd_sym) + super "Load commands of type #{cmd_sym} cannot be serialized" + end + end + + # Raised when a load command string is malformed in some way. + class LCStrMalformedError < MachOError + # @param lc [MachO::LoadCommand] the load command containing the string + def initialize(lc) + super "Load command #{lc.type} at offset #{lc.view.offset} contains a" \ + " malformed string" + end + end + + # Raised when a change at an offset is not valid. + class OffsetInsertionError < ModificationError + # @param offset [Integer] the invalid offset + def initialize(offset) + super "Insertion at offset #{offset} is not valid" + end + end + + # Raised when load commands are too large to fit in the current file. + class HeaderPadError < ModificationError + # @param filename [String] the filename + def initialize(filename) + super "Updated load commands do not fit in the header of " \ + "#{filename}. #{filename} needs to be relinked, possibly with " \ + "-headerpad or -headerpad_max_install_names" + end + end + + # Raised when attempting to change a dylib name that doesn't exist. + class DylibUnknownError < RecoverableModificationError + # @param dylib [String] the unknown shared library name + def initialize(dylib) + super "No such dylib name: #{dylib}" + end + end + + # Raised when a dylib is missing an ID + class DylibIdMissingError < RecoverableModificationError + def initialize + super "Dylib is missing a dylib ID" + end + end + + # Raised when attempting to change an rpath that doesn't exist. + class RpathUnknownError < RecoverableModificationError + # @param path [String] the unknown runtime path + def initialize(path) + super "No such runtime path: #{path}" + end + end + + # Raised when attempting to add an rpath that already exists. + class RpathExistsError < RecoverableModificationError + # @param path [String] the extant path + def initialize(path) + super "#{path} already exists" + end + end + + # Raised whenever unfinished code is called. + class UnimplementedError < MachOError + # @param thing [String] the thing that is unimplemented + def initialize(thing) + super "Unimplemented: #{thing}" + end + end + + # Raised when attempting to create a {FatFile} from one or more {MachOFile}s + # whose offsets will not fit within the resulting 32-bit {Headers::FatArch#offset} fields. + class FatArchOffsetOverflowError < MachOError + # @param offset [Integer] the offending offset + def initialize(offset) + super "Offset #{offset} exceeds the 32-bit width of a fat_arch offset." \ + " Consider merging with `fat64: true`" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/fat_file.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/fat_file.rb new file mode 100644 index 0000000..0eecdcf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/fat_file.rb @@ -0,0 +1,423 @@ +# frozen_string_literal: true + +require "forwardable" + +module MachO + # Represents a "Fat" file, which contains a header, a listing of available + # architectures, and one or more Mach-O binaries. + # @see https://en.wikipedia.org/wiki/Mach-O#Multi-architecture_binaries + # @see MachOFile + class FatFile + extend Forwardable + + # @return [String] the filename loaded from, or nil if loaded from a binary string + attr_accessor :filename + + # @return [Hash] any parser options that the instance was created with + # @note Options specified in a {FatFile} trickle down into the internal {MachOFile}s. + attr_reader :options + + # @return [Headers::FatHeader] the file's header + attr_reader :header + + # @return [Array<Headers::FatArch>, Array<Headers::FatArch64] an array of fat architectures + attr_reader :fat_archs + + # @return [Array<MachOFile>] an array of Mach-O binaries + attr_reader :machos + + # Creates a new FatFile from the given (single-arch) Mach-Os + # @param machos [Array<MachOFile>] the machos to combine + # @param fat64 [Boolean] whether to use {Headers::FatArch64}s to represent each slice + # @return [FatFile] a new FatFile containing the give machos + # @raise [ArgumentError] if less than one Mach-O is given + # @raise [FatArchOffsetOverflowError] if the Mach-Os are too big to be represented + # in a 32-bit {Headers::FatArch} and `fat64` is `false`. + def self.new_from_machos(*machos, fat64: false) + raise ArgumentError, "expected at least one Mach-O" if machos.empty? + + fa_klass, magic = if fat64 + [Headers::FatArch64, Headers::FAT_MAGIC_64] + else + [Headers::FatArch, Headers::FAT_MAGIC] + end + + # put the smaller alignments further forwards in fat macho, so that we do less padding + machos = machos.sort_by(&:segment_alignment) + + bin = +"" + + bin << Headers::FatHeader.new(magic, machos.size).serialize + offset = Headers::FatHeader.bytesize + (machos.size * fa_klass.bytesize) + + macho_pads = {} + + machos.each do |macho| + macho_offset = Utils.round(offset, 2**macho.segment_alignment) + + raise FatArchOffsetOverflowError, macho_offset if !fat64 && macho_offset > (2**32 - 1) + + macho_pads[macho] = Utils.padding_for(offset, 2**macho.segment_alignment) + + bin << fa_klass.new(macho.header.cputype, macho.header.cpusubtype, + macho_offset, macho.serialize.bytesize, + macho.segment_alignment).serialize + + offset += (macho.serialize.bytesize + macho_pads[macho]) + end + + machos.each do |macho| # rubocop:disable Style/CombinableLoops + bin << Utils.nullpad(macho_pads[macho]) + bin << macho.serialize + end + + new_from_bin(bin) + end + + # Creates a new FatFile instance from a binary string. + # @param bin [String] a binary string containing raw Mach-O data + # @param opts [Hash] options to control the parser with + # @note see {MachOFile#initialize} for currently valid options + # @return [FatFile] a new FatFile + def self.new_from_bin(bin, **opts) + instance = allocate + instance.initialize_from_bin(bin, opts) + + instance + end + + # Creates a new FatFile from the given filename. + # @param filename [String] the fat file to load from + # @param opts [Hash] options to control the parser with + # @note see {MachOFile#initialize} for currently valid options + # @raise [ArgumentError] if the given file does not exist + def initialize(filename, **opts) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + + @filename = filename + @options = opts + @raw_data = File.open(@filename, "rb", &:read) + populate_fields + end + + # Initializes a new FatFile instance from a binary string with the given options. + # @see new_from_bin + # @api private + def initialize_from_bin(bin, opts) + @filename = nil + @options = opts + @raw_data = bin + populate_fields + end + + # The file's raw fat data. + # @return [String] the raw fat data + def serialize + @raw_data + end + + # @!method object? + # @return (see MachO::MachOFile#object?) + # @!method executable? + # @return (see MachO::MachOFile#executable?) + # @!method fvmlib? + # @return (see MachO::MachOFile#fvmlib?) + # @!method core? + # @return (see MachO::MachOFile#core?) + # @!method preload? + # @return (see MachO::MachOFile#preload?) + # @!method dylib? + # @return (see MachO::MachOFile#dylib?) + # @!method dylinker? + # @return (see MachO::MachOFile#dylinker?) + # @!method bundle? + # @return (see MachO::MachOFile#bundle?) + # @!method dsym? + # @return (see MachO::MachOFile#dsym?) + # @!method kext? + # @return (see MachO::MachOFile#kext?) + # @!method filetype + # @return (see MachO::MachOFile#filetype) + # @!method dylib_id + # @return (see MachO::MachOFile#dylib_id) + def_delegators :canonical_macho, :object?, :executable?, :fvmlib?, + :core?, :preload?, :dylib?, :dylinker?, :bundle?, + :dsym?, :kext?, :filetype, :dylib_id + + # @!method magic + # @return (see MachO::Headers::FatHeader#magic) + def_delegators :header, :magic + + # @return [String] a string representation of the file's magic number + def magic_string + Headers::MH_MAGICS[magic] + end + + # Populate the instance's fields with the raw Fat Mach-O data. + # @return [void] + # @note This method is public, but should (almost) never need to be called. + def populate_fields + @header = populate_fat_header + @fat_archs = populate_fat_archs + @machos = populate_machos + end + + # All load commands responsible for loading dylibs in the file's Mach-O's. + # @return [Array<LoadCommands::DylibCommand>] an array of DylibCommands + def dylib_load_commands + machos.map(&:dylib_load_commands).flatten + end + + # Changes the file's dylib ID to `new_id`. If the file is not a dylib, + # does nothing. + # @example + # file.change_dylib_id('libFoo.dylib') + # @param new_id [String] the new dylib ID + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @raise [ArgumentError] if `new_id` is not a String + # @see MachOFile#linked_dylibs + def change_dylib_id(new_id, options = {}) + raise ArgumentError, "argument must be a String" unless new_id.is_a?(String) + return unless machos.all?(&:dylib?) + + each_macho(options) do |macho| + macho.change_dylib_id(new_id, options) + end + + repopulate_raw_machos + end + + alias dylib_id= change_dylib_id + + # All shared libraries linked to the file's Mach-Os. + # @return [Array<String>] an array of all shared libraries + # @see MachOFile#linked_dylibs + def linked_dylibs + # Individual architectures in a fat binary can link to different subsets + # of libraries, but at this point we want to have the full picture, i.e. + # the union of all libraries used by all architectures. + machos.map(&:linked_dylibs).flatten.uniq + end + + # Changes all dependent shared library install names from `old_name` to + # `new_name`. In a fat file, this changes install names in all internal + # Mach-Os. + # @example + # file.change_install_name('/usr/lib/libFoo.dylib', '/usr/lib/libBar.dylib') + # @param old_name [String] the shared library name being changed + # @param new_name [String] the new name + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#change_install_name + def change_install_name(old_name, new_name, options = {}) + each_macho(options) do |macho| + macho.change_install_name(old_name, new_name, options) + end + + repopulate_raw_machos + end + + alias change_dylib change_install_name + + # All runtime paths associated with the file's Mach-Os. + # @return [Array<String>] an array of all runtime paths + # @see MachOFile#rpaths + def rpaths + # Can individual architectures have different runtime paths? + machos.map(&:rpaths).flatten.uniq + end + + # Change the runtime path `old_path` to `new_path` in the file's Mach-Os. + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#change_rpath + def change_rpath(old_path, new_path, options = {}) + each_macho(options) do |macho| + macho.change_rpath(old_path, new_path, options) + end + + repopulate_raw_machos + end + + # Add the given runtime path to the file's Mach-Os. + # @param path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return [void] + # @see MachOFile#add_rpath + def add_rpath(path, options = {}) + each_macho(options) do |macho| + macho.add_rpath(path, options) + end + + repopulate_raw_machos + end + + # Delete the given runtime path from the file's Mach-Os. + # @param path [String] the runtime path to delete + # @param options [Hash] + # @option options [Boolean] :strict (true) if true, fail if one slice fails. + # if false, fail only if all slices fail. + # @return void + # @see MachOFile#delete_rpath + def delete_rpath(path, options = {}) + each_macho(options) do |macho| + macho.delete_rpath(path, options) + end + + repopulate_raw_machos + end + + # Extract a Mach-O with the given CPU type from the file. + # @example + # file.extract(:i386) # => MachO::MachOFile + # @param cputype [Symbol] the CPU type of the Mach-O being extracted + # @return [MachOFile, nil] the extracted Mach-O or nil if no Mach-O has the given CPU type + def extract(cputype) + machos.select { |macho| macho.cputype == cputype }.first + end + + # Write all (fat) data to the given filename. + # @param filename [String] the file to write to + # @return [void] + def write(filename) + File.open(filename, "wb") { |f| f.write(@raw_data) } + end + + # Write all (fat) data to the file used to initialize the instance. + # @return [void] + # @raise [MachOError] if the instance was initialized without a file + # @note Overwrites all data in the file! + def write! + raise MachOError, "no initial file to write to" if filename.nil? + + File.open(@filename, "wb") { |f| f.write(@raw_data) } + end + + # @return [Hash] a hash representation of this {FatFile} + def to_h + { + "header" => header.to_h, + "fat_archs" => fat_archs.map(&:to_h), + "machos" => machos.map(&:to_h), + } + end + + private + + # Obtain the fat header from raw file data. + # @return [Headers::FatHeader] the fat header + # @raise [TruncatedFileError] if the file is too small to have a + # valid header + # @raise [MagicError] if the magic is not valid Mach-O magic + # @raise [MachOBinaryError] if the magic is for a non-fat Mach-O file + # @raise [JavaClassFileError] if the file is a Java classfile + # @api private + def populate_fat_header + # the smallest fat Mach-O header is 8 bytes + raise TruncatedFileError if @raw_data.size < 8 + + fh = Headers::FatHeader.new_from_bin(:big, @raw_data[0, Headers::FatHeader.bytesize]) + + raise MagicError, fh.magic unless Utils.magic?(fh.magic) + raise MachOBinaryError unless Utils.fat_magic?(fh.magic) + + # Rationale: Java classfiles have the same magic as big-endian fat + # Mach-Os. Classfiles encode their version at the same offset as + # `nfat_arch` and the lowest version number is 43, so we error out + # if a file claims to have over 30 internal architectures. It's + # technically possible for a fat Mach-O to have over 30 architectures, + # but this is extremely unlikely and in practice distinguishes the two + # formats. + raise JavaClassFileError if fh.nfat_arch > 30 + + fh + end + + # Obtain an array of fat architectures from raw file data. + # @return [Array<Headers::FatArch>] an array of fat architectures + # @api private + def populate_fat_archs + archs = [] + + fa_klass = Utils.fat_magic32?(header.magic) ? Headers::FatArch : Headers::FatArch64 + fa_off = Headers::FatHeader.bytesize + fa_len = fa_klass.bytesize + + header.nfat_arch.times do |i| + archs << fa_klass.new_from_bin(:big, @raw_data[fa_off + (fa_len * i), fa_len]) + end + + archs + end + + # Obtain an array of Mach-O blobs from raw file data. + # @return [Array<MachOFile>] an array of Mach-Os + # @api private + def populate_machos + machos = [] + + fat_archs.each do |arch| + machos << MachOFile.new_from_bin(@raw_data[arch.offset, arch.size], **options) + end + + machos + end + + # Repopulate the raw Mach-O data with each internal Mach-O object. + # @return [void] + # @api private + def repopulate_raw_machos + machos.each_with_index do |macho, i| + arch = fat_archs[i] + + @raw_data[arch.offset, arch.size] = macho.serialize + end + end + + # Yield each Mach-O object in the file, rescuing and accumulating errors. + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if at least one Mach-O raises an exception. If false, + # only raises an exception if *all* Mach-Os raise exceptions. + # @raise [RecoverableModificationError] under the conditions of + # the `:strict` option above. + # @api private + def each_macho(options = {}) + strict = options.fetch(:strict, true) + errors = [] + + machos.each_with_index do |macho, index| + yield macho + rescue RecoverableModificationError => e + e.macho_slice = index + + # Strict mode: Immediately re-raise. Otherwise: Retain, check later. + raise e if strict + + errors << e + end + + # Non-strict mode: Raise first error if *all* Mach-O slices failed. + raise errors.first if errors.size == machos.size + end + + # Return a single-arch Mach-O that represents this fat Mach-O for purposes + # of delegation. + # @return [MachOFile] the Mach-O file + # @api private + def canonical_macho + machos.first + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/headers.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/headers.rb new file mode 100644 index 0000000..a7ed2ca --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/headers.rb @@ -0,0 +1,789 @@ +# frozen_string_literal: true + +module MachO + # Classes and constants for parsing the headers of Mach-O binaries. + module Headers + # big-endian fat magic + # @api private + FAT_MAGIC = 0xcafebabe + + # little-endian fat magic + # @note This is defined for completeness, but should never appear in ruby-macho code, + # since fat headers are always big-endian. + # @api private + FAT_CIGAM = 0xbebafeca + + # 64-bit big-endian fat magic + FAT_MAGIC_64 = 0xcafebabf + + # 64-bit little-endian fat magic + # @note This is defined for completeness, but should never appear in ruby-macho code, + # since fat headers are always big-endian. + FAT_CIGAM_64 = 0xbfbafeca + + # 32-bit big-endian magic + # @api private + MH_MAGIC = 0xfeedface + + # 32-bit little-endian magic + # @api private + MH_CIGAM = 0xcefaedfe + + # 64-bit big-endian magic + # @api private + MH_MAGIC_64 = 0xfeedfacf + + # 64-bit little-endian magic + # @api private + MH_CIGAM_64 = 0xcffaedfe + + # association of magic numbers to string representations + # @api private + MH_MAGICS = { + FAT_MAGIC => "FAT_MAGIC", + FAT_MAGIC_64 => "FAT_MAGIC_64", + MH_MAGIC => "MH_MAGIC", + MH_CIGAM => "MH_CIGAM", + MH_MAGIC_64 => "MH_MAGIC_64", + MH_CIGAM_64 => "MH_CIGAM_64", + }.freeze + + # mask for CPUs with 64-bit architectures (when running a 64-bit ABI?) + # @api private + CPU_ARCH_ABI64 = 0x01000000 + + # mask for CPUs with 64-bit architectures (when running a 32-bit ABI?) + # @see https://github.com/Homebrew/ruby-macho/issues/113 + # @api private + CPU_ARCH_ABI32 = 0x02000000 + + # any CPU (unused?) + # @api private + CPU_TYPE_ANY = -1 + + # m68k compatible CPUs + # @api private + CPU_TYPE_MC680X0 = 0x06 + + # i386 and later compatible CPUs + # @api private + CPU_TYPE_I386 = 0x07 + + # x86_64 (AMD64) compatible CPUs + # @api private + CPU_TYPE_X86_64 = (CPU_TYPE_I386 | CPU_ARCH_ABI64) + + # 32-bit ARM compatible CPUs + # @api private + CPU_TYPE_ARM = 0x0c + + # m88k compatible CPUs + # @api private + CPU_TYPE_MC88000 = 0xd + + # 64-bit ARM compatible CPUs + # @api private + CPU_TYPE_ARM64 = (CPU_TYPE_ARM | CPU_ARCH_ABI64) + + # 64-bit ARM compatible CPUs (running in 32-bit mode?) + # @see https://github.com/Homebrew/ruby-macho/issues/113 + CPU_TYPE_ARM64_32 = (CPU_TYPE_ARM | CPU_ARCH_ABI32) + + # PowerPC compatible CPUs + # @api private + CPU_TYPE_POWERPC = 0x12 + + # PowerPC64 compatible CPUs + # @api private + CPU_TYPE_POWERPC64 = (CPU_TYPE_POWERPC | CPU_ARCH_ABI64) + + # association of cpu types to symbol representations + # @api private + CPU_TYPES = { + CPU_TYPE_ANY => :any, + CPU_TYPE_I386 => :i386, + CPU_TYPE_X86_64 => :x86_64, + CPU_TYPE_ARM => :arm, + CPU_TYPE_ARM64 => :arm64, + CPU_TYPE_ARM64_32 => :arm64_32, + CPU_TYPE_POWERPC => :ppc, + CPU_TYPE_POWERPC64 => :ppc64, + }.freeze + + # mask for CPU subtype capabilities + # @api private + CPU_SUBTYPE_MASK = 0xff000000 + + # 64-bit libraries (undocumented!) + # @see http://llvm.org/docs/doxygen/html/Support_2MachO_8h_source.html + # @api private + CPU_SUBTYPE_LIB64 = 0x80000000 + + # the lowest common sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_I386 = 3 + + # the i486 sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_486 = 4 + + # the i486SX sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_486SX = 132 + + # the i586 (P5, Pentium) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_586 = 5 + + # @see CPU_SUBTYPE_586 + # @api private + CPU_SUBTYPE_PENT = CPU_SUBTYPE_586 + + # the Pentium Pro (P6) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTPRO = 22 + + # the Pentium II (P6, M3?) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTII_M3 = 54 + + # the Pentium II (P6, M5?) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTII_M5 = 86 + + # the Pentium 4 (Netburst) sub-type for `CPU_TYPE_I386` + # @api private + CPU_SUBTYPE_PENTIUM_4 = 10 + + # the lowest common sub-type for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC680X0_ALL = 1 + + # @see CPU_SUBTYPE_MC680X0_ALL + # @api private + CPU_SUBTYPE_MC68030 = CPU_SUBTYPE_MC680X0_ALL + + # the 040 subtype for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC68040 = 2 + + # the 030 subtype for `CPU_TYPE_MC680X0` + # @api private + CPU_SUBTYPE_MC68030_ONLY = 3 + + # the lowest common sub-type for `CPU_TYPE_X86_64` + # @api private + CPU_SUBTYPE_X86_64_ALL = CPU_SUBTYPE_I386 + + # the Haskell sub-type for `CPU_TYPE_X86_64` + # @api private + CPU_SUBTYPE_X86_64_H = 8 + + # the lowest common sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_ALL = 0 + + # the v4t sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V4T = 5 + + # the v6 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V6 = 6 + + # the v5 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V5TEJ = 7 + + # the xscale (v5 family) sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_XSCALE = 8 + + # the v7 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7 = 9 + + # the v7f (Cortex A9) sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7F = 10 + + # the v7s ("Swift") sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7S = 11 + + # the v7k ("Kirkwood40") sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7K = 12 + + # the v6m sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V6M = 14 + + # the v7m sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7M = 15 + + # the v7em sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V7EM = 16 + + # the v8 sub-type for `CPU_TYPE_ARM` + # @api private + CPU_SUBTYPE_ARM_V8 = 13 + + # the lowest common sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64_ALL = 0 + + # the v8 sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64_V8 = 1 + + # the v8 sub-type for `CPU_TYPE_ARM64_32` + # @api private + CPU_SUBTYPE_ARM64_32_V8 = 1 + + # the e (A12) sub-type for `CPU_TYPE_ARM64` + # @api private + CPU_SUBTYPE_ARM64E = 2 + + # the lowest common sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88000_ALL = 0 + + # @see CPU_SUBTYPE_MC88000_ALL + # @api private + CPU_SUBTYPE_MMAX_JPC = CPU_SUBTYPE_MC88000_ALL + + # the 100 sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88100 = 1 + + # the 110 sub-type for `CPU_TYPE_MC88000` + # @api private + CPU_SUBTYPE_MC88110 = 2 + + # the lowest common sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_ALL = 0 + + # the 601 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_601 = 1 + + # the 602 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_602 = 2 + + # the 603 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603 = 3 + + # the 603e (G2) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603E = 4 + + # the 603ev sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_603EV = 5 + + # the 604 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_604 = 6 + + # the 604e sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_604E = 7 + + # the 620 sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_620 = 8 + + # the 750 (G3) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_750 = 9 + + # the 7400 (G4) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_7400 = 10 + + # the 7450 (G4 "Voyager") sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_7450 = 11 + + # the 970 (G5) sub-type for `CPU_TYPE_POWERPC` + # @api private + CPU_SUBTYPE_POWERPC_970 = 100 + + # any CPU sub-type for CPU type `CPU_TYPE_POWERPC64` + # @api private + CPU_SUBTYPE_POWERPC64_ALL = CPU_SUBTYPE_POWERPC_ALL + + # association of CPU types/subtype pairs to symbol representations in + # (very) roughly descending order of commonness + # @see https://opensource.apple.com/source/cctools/cctools-877.8/libstuff/arch.c + # @api private + CPU_SUBTYPES = { + CPU_TYPE_I386 => { + CPU_SUBTYPE_I386 => :i386, + CPU_SUBTYPE_486 => :i486, + CPU_SUBTYPE_486SX => :i486SX, + CPU_SUBTYPE_586 => :i586, # also "pentium" in arch(3) + CPU_SUBTYPE_PENTPRO => :i686, # also "pentpro" in arch(3) + CPU_SUBTYPE_PENTII_M3 => :pentIIm3, + CPU_SUBTYPE_PENTII_M5 => :pentIIm5, + CPU_SUBTYPE_PENTIUM_4 => :pentium4, + }.freeze, + CPU_TYPE_X86_64 => { + CPU_SUBTYPE_X86_64_ALL => :x86_64, + CPU_SUBTYPE_X86_64_H => :x86_64h, + }.freeze, + CPU_TYPE_ARM => { + CPU_SUBTYPE_ARM_ALL => :arm, + CPU_SUBTYPE_ARM_V4T => :armv4t, + CPU_SUBTYPE_ARM_V6 => :armv6, + CPU_SUBTYPE_ARM_V5TEJ => :armv5, + CPU_SUBTYPE_ARM_XSCALE => :xscale, + CPU_SUBTYPE_ARM_V7 => :armv7, + CPU_SUBTYPE_ARM_V7F => :armv7f, + CPU_SUBTYPE_ARM_V7S => :armv7s, + CPU_SUBTYPE_ARM_V7K => :armv7k, + CPU_SUBTYPE_ARM_V6M => :armv6m, + CPU_SUBTYPE_ARM_V7M => :armv7m, + CPU_SUBTYPE_ARM_V7EM => :armv7em, + CPU_SUBTYPE_ARM_V8 => :armv8, + }.freeze, + CPU_TYPE_ARM64 => { + CPU_SUBTYPE_ARM64_ALL => :arm64, + CPU_SUBTYPE_ARM64_V8 => :arm64v8, + CPU_SUBTYPE_ARM64E => :arm64e, + }.freeze, + CPU_TYPE_ARM64_32 => { + CPU_SUBTYPE_ARM64_32_V8 => :arm64_32v8, + }.freeze, + CPU_TYPE_POWERPC => { + CPU_SUBTYPE_POWERPC_ALL => :ppc, + CPU_SUBTYPE_POWERPC_601 => :ppc601, + CPU_SUBTYPE_POWERPC_603 => :ppc603, + CPU_SUBTYPE_POWERPC_603E => :ppc603e, + CPU_SUBTYPE_POWERPC_603EV => :ppc603ev, + CPU_SUBTYPE_POWERPC_604 => :ppc604, + CPU_SUBTYPE_POWERPC_604E => :ppc604e, + CPU_SUBTYPE_POWERPC_750 => :ppc750, + CPU_SUBTYPE_POWERPC_7400 => :ppc7400, + CPU_SUBTYPE_POWERPC_7450 => :ppc7450, + CPU_SUBTYPE_POWERPC_970 => :ppc970, + }.freeze, + CPU_TYPE_POWERPC64 => { + CPU_SUBTYPE_POWERPC64_ALL => :ppc64, + # apparently the only exception to the naming scheme + CPU_SUBTYPE_POWERPC_970 => :ppc970_64, + }.freeze, + CPU_TYPE_MC680X0 => { + CPU_SUBTYPE_MC680X0_ALL => :m68k, + CPU_SUBTYPE_MC68030 => :mc68030, + CPU_SUBTYPE_MC68040 => :mc68040, + }, + CPU_TYPE_MC88000 => { + CPU_SUBTYPE_MC88000_ALL => :m88k, + }, + }.freeze + + # relocatable object file + # @api private + MH_OBJECT = 0x1 + + # demand paged executable file + # @api private + MH_EXECUTE = 0x2 + + # fixed VM shared library file + # @api private + MH_FVMLIB = 0x3 + + # core dump file + # @api private + MH_CORE = 0x4 + + # preloaded executable file + # @api private + MH_PRELOAD = 0x5 + + # dynamically bound shared library + # @api private + MH_DYLIB = 0x6 + + # dynamic link editor + # @api private + MH_DYLINKER = 0x7 + + # dynamically bound bundle file + # @api private + MH_BUNDLE = 0x8 + + # shared library stub for static linking only, no section contents + # @api private + MH_DYLIB_STUB = 0x9 + + # companion file with only debug sections + # @api private + MH_DSYM = 0xa + + # x86_64 kexts + # @api private + MH_KEXT_BUNDLE = 0xb + + # association of filetypes to Symbol representations + # @api private + MH_FILETYPES = { + MH_OBJECT => :object, + MH_EXECUTE => :execute, + MH_FVMLIB => :fvmlib, + MH_CORE => :core, + MH_PRELOAD => :preload, + MH_DYLIB => :dylib, + MH_DYLINKER => :dylinker, + MH_BUNDLE => :bundle, + MH_DYLIB_STUB => :dylib_stub, + MH_DSYM => :dsym, + MH_KEXT_BUNDLE => :kext_bundle, + }.freeze + + # association of mach header flag symbols to values + # @api private + MH_FLAGS = { + :MH_NOUNDEFS => 0x1, + :MH_INCRLINK => 0x2, + :MH_DYLDLINK => 0x4, + :MH_BINDATLOAD => 0x8, + :MH_PREBOUND => 0x10, + :MH_SPLIT_SEGS => 0x20, + :MH_LAZY_INIT => 0x40, + :MH_TWOLEVEL => 0x80, + :MH_FORCE_FLAT => 0x100, + :MH_NOMULTIDEFS => 0x200, + :MH_NOPREFIXBINDING => 0x400, + :MH_PREBINDABLE => 0x800, + :MH_ALLMODSBOUND => 0x1000, + :MH_SUBSECTIONS_VIA_SYMBOLS => 0x2000, + :MH_CANONICAL => 0x4000, + :MH_WEAK_DEFINES => 0x8000, + :MH_BINDS_TO_WEAK => 0x10000, + :MH_ALLOW_STACK_EXECUTION => 0x20000, + :MH_ROOT_SAFE => 0x40000, + :MH_SETUID_SAFE => 0x80000, + :MH_NO_REEXPORTED_DYLIBS => 0x100000, + :MH_PIE => 0x200000, + :MH_DEAD_STRIPPABLE_DYLIB => 0x400000, + :MH_HAS_TLV_DESCRIPTORS => 0x800000, + :MH_NO_HEAP_EXECUTION => 0x1000000, + :MH_APP_EXTENSION_SAFE => 0x02000000, + }.freeze + + # Fat binary header structure + # @see MachO::FatArch + class FatHeader < MachOStructure + # @return [Integer] the magic number of the header (and file) + attr_reader :magic + + # @return [Integer] the number of fat architecture structures following the header + attr_reader :nfat_arch + + # always big-endian + # @see MachOStructure::FORMAT + # @api private + FORMAT = "N2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + + # @api private + def initialize(magic, nfat_arch) + super() + @magic = magic + @nfat_arch = nfat_arch + end + + # @return [String] the serialized fields of the fat header + def serialize + [magic, nfat_arch].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatHeader} + def to_h + { + "magic" => magic, + "magic_sym" => MH_MAGICS[magic], + "nfat_arch" => nfat_arch, + }.merge super + end + end + + # 32-bit fat binary header architecture structure. A 32-bit fat Mach-O has one or more of + # these, indicating one or more internal Mach-O blobs. + # @note "32-bit" indicates the fact that this structure stores 32-bit offsets, not that the + # Mach-Os that it points to necessarily *are* 32-bit. + # @see MachO::Headers::FatHeader + class FatArch < MachOStructure + # @return [Integer] the CPU type of the Mach-O + attr_reader :cputype + + # @return [Integer] the CPU subtype of the Mach-O + attr_reader :cpusubtype + + # @return [Integer] the file offset to the beginning of the Mach-O data + attr_reader :offset + + # @return [Integer] the size, in bytes, of the Mach-O data + attr_reader :size + + # @return [Integer] the alignment, as a power of 2 + attr_reader :align + + # @note Always big endian. + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L>5" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(cputype, cpusubtype, offset, size, align) + super() + @cputype = cputype + @cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK + @offset = offset + @size = size + @align = align + end + + # @return [String] the serialized fields of the fat arch + def serialize + [cputype, cpusubtype, offset, size, align].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatArch} + def to_h + { + "cputype" => cputype, + "cputype_sym" => CPU_TYPES[cputype], + "cpusubtype" => cpusubtype, + "cpusubtype_sym" => CPU_SUBTYPES[cputype][cpusubtype], + "offset" => offset, + "size" => size, + "align" => align, + }.merge super + end + end + + # 64-bit fat binary header architecture structure. A 64-bit fat Mach-O has one or more of + # these, indicating one or more internal Mach-O blobs. + # @note "64-bit" indicates the fact that this structure stores 64-bit offsets, not that the + # Mach-Os that it points to necessarily *are* 64-bit. + # @see MachO::Headers::FatHeader + class FatArch64 < FatArch + # @return [void] + attr_reader :reserved + + # @note Always big endian. + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L>2Q>2L>2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 32 + + # @api private + def initialize(cputype, cpusubtype, offset, size, align, reserved = 0) + super(cputype, cpusubtype, offset, size, align) + @reserved = reserved + end + + # @return [String] the serialized fields of the fat arch + def serialize + [cputype, cpusubtype, offset, size, align, reserved].pack(FORMAT) + end + + # @return [Hash] a hash representation of this {FatArch64} + def to_h + { + "reserved" => reserved, + }.merge super + end + end + + # 32-bit Mach-O file header structure + class MachHeader < MachOStructure + # @return [Integer] the magic number + attr_reader :magic + + # @return [Integer] the CPU type of the Mach-O + attr_reader :cputype + + # @return [Integer] the CPU subtype of the Mach-O + attr_reader :cpusubtype + + # @return [Integer] the file type of the Mach-O + attr_reader :filetype + + # @return [Integer] the number of load commands in the Mach-O + attr_reader :ncmds + + # @return [Integer] the size of all load commands, in bytes, in the Mach-O + attr_reader :sizeofcmds + + # @return [Integer] the header flags associated with the Mach-O + attr_reader :flags + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=7" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 28 + + # @api private + def initialize(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, + flags) + super() + @magic = magic + @cputype = cputype + # For now we're not interested in additional capability bits also to be + # found in the `cpusubtype` field. We only care about the CPU sub-type. + @cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK + @filetype = filetype + @ncmds = ncmds + @sizeofcmds = sizeofcmds + @flags = flags + end + + # @example + # puts "this mach-o has position-independent execution" if header.flag?(:MH_PIE) + # @param flag [Symbol] a mach header flag symbol + # @return [Boolean] true if `flag` is present in the header's flag section + def flag?(flag) + flag = MH_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # @return [Boolean] whether or not the file is of type `MH_OBJECT` + def object? + filetype == Headers::MH_OBJECT + end + + # @return [Boolean] whether or not the file is of type `MH_EXECUTE` + def executable? + filetype == Headers::MH_EXECUTE + end + + # @return [Boolean] whether or not the file is of type `MH_FVMLIB` + def fvmlib? + filetype == Headers::MH_FVMLIB + end + + # @return [Boolean] whether or not the file is of type `MH_CORE` + def core? + filetype == Headers::MH_CORE + end + + # @return [Boolean] whether or not the file is of type `MH_PRELOAD` + def preload? + filetype == Headers::MH_PRELOAD + end + + # @return [Boolean] whether or not the file is of type `MH_DYLIB` + def dylib? + filetype == Headers::MH_DYLIB + end + + # @return [Boolean] whether or not the file is of type `MH_DYLINKER` + def dylinker? + filetype == Headers::MH_DYLINKER + end + + # @return [Boolean] whether or not the file is of type `MH_BUNDLE` + def bundle? + filetype == Headers::MH_BUNDLE + end + + # @return [Boolean] whether or not the file is of type `MH_DSYM` + def dsym? + filetype == Headers::MH_DSYM + end + + # @return [Boolean] whether or not the file is of type `MH_KEXT_BUNDLE` + def kext? + filetype == Headers::MH_KEXT_BUNDLE + end + + # @return [Boolean] true if the Mach-O has 32-bit magic, false otherwise + def magic32? + Utils.magic32?(magic) + end + + # @return [Boolean] true if the Mach-O has 64-bit magic, false otherwise + def magic64? + Utils.magic64?(magic) + end + + # @return [Integer] the file's internal alignment + def alignment + magic32? ? 4 : 8 + end + + # @return [Hash] a hash representation of this {MachHeader} + def to_h + { + "magic" => magic, + "magic_sym" => MH_MAGICS[magic], + "cputype" => cputype, + "cputype_sym" => CPU_TYPES[cputype], + "cpusubtype" => cpusubtype, + "cpusubtype_sym" => CPU_SUBTYPES[cputype][cpusubtype], + "filetype" => filetype, + "filetype_sym" => MH_FILETYPES[filetype], + "ncmds" => ncmds, + "sizeofcmds" => sizeofcmds, + "flags" => flags, + "alignment" => alignment, + }.merge super + end + end + + # 64-bit Mach-O file header structure + class MachHeader64 < MachHeader + # @return [void] + attr_reader :reserved + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=8" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 32 + + # @api private + def initialize(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, + flags, reserved) + super(magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags) + @reserved = reserved + end + + # @return [Hash] a hash representation of this {MachHeader64} + def to_h + { + "reserved" => reserved, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/load_commands.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/load_commands.rb new file mode 100644 index 0000000..aa90c0b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/load_commands.rb @@ -0,0 +1,1798 @@ +# frozen_string_literal: true + +module MachO + # Classes and constants for parsing load commands in Mach-O binaries. + module LoadCommands + # load commands added after OS X 10.1 need to be bitwise ORed with + # LC_REQ_DYLD to be recognized by the dynamic linker (dyld) + # @api private + LC_REQ_DYLD = 0x80000000 + + # association of load commands to symbol representations + # @api private + LOAD_COMMANDS = { + 0x1 => :LC_SEGMENT, + 0x2 => :LC_SYMTAB, + 0x3 => :LC_SYMSEG, + 0x4 => :LC_THREAD, + 0x5 => :LC_UNIXTHREAD, + 0x6 => :LC_LOADFVMLIB, + 0x7 => :LC_IDFVMLIB, + 0x8 => :LC_IDENT, + 0x9 => :LC_FVMFILE, + 0xa => :LC_PREPAGE, + 0xb => :LC_DYSYMTAB, + 0xc => :LC_LOAD_DYLIB, + 0xd => :LC_ID_DYLIB, + 0xe => :LC_LOAD_DYLINKER, + 0xf => :LC_ID_DYLINKER, + 0x10 => :LC_PREBOUND_DYLIB, + 0x11 => :LC_ROUTINES, + 0x12 => :LC_SUB_FRAMEWORK, + 0x13 => :LC_SUB_UMBRELLA, + 0x14 => :LC_SUB_CLIENT, + 0x15 => :LC_SUB_LIBRARY, + 0x16 => :LC_TWOLEVEL_HINTS, + 0x17 => :LC_PREBIND_CKSUM, + (0x18 | LC_REQ_DYLD) => :LC_LOAD_WEAK_DYLIB, + 0x19 => :LC_SEGMENT_64, + 0x1a => :LC_ROUTINES_64, + 0x1b => :LC_UUID, + (0x1c | LC_REQ_DYLD) => :LC_RPATH, + 0x1d => :LC_CODE_SIGNATURE, + 0x1e => :LC_SEGMENT_SPLIT_INFO, + (0x1f | LC_REQ_DYLD) => :LC_REEXPORT_DYLIB, + 0x20 => :LC_LAZY_LOAD_DYLIB, + 0x21 => :LC_ENCRYPTION_INFO, + 0x22 => :LC_DYLD_INFO, + (0x22 | LC_REQ_DYLD) => :LC_DYLD_INFO_ONLY, + (0x23 | LC_REQ_DYLD) => :LC_LOAD_UPWARD_DYLIB, + 0x24 => :LC_VERSION_MIN_MACOSX, + 0x25 => :LC_VERSION_MIN_IPHONEOS, + 0x26 => :LC_FUNCTION_STARTS, + 0x27 => :LC_DYLD_ENVIRONMENT, + (0x28 | LC_REQ_DYLD) => :LC_MAIN, + 0x29 => :LC_DATA_IN_CODE, + 0x2a => :LC_SOURCE_VERSION, + 0x2b => :LC_DYLIB_CODE_SIGN_DRS, + 0x2c => :LC_ENCRYPTION_INFO_64, + 0x2d => :LC_LINKER_OPTION, + 0x2e => :LC_LINKER_OPTIMIZATION_HINT, + 0x2f => :LC_VERSION_MIN_TVOS, + 0x30 => :LC_VERSION_MIN_WATCHOS, + 0x31 => :LC_NOTE, + 0x32 => :LC_BUILD_VERSION, + (0x33 | LC_REQ_DYLD) => :LC_DYLD_EXPORTS_TRIE, + (0x34 | LC_REQ_DYLD) => :LD_DYLD_CHAINED_FIXUPS, + }.freeze + + # association of symbol representations to load command constants + # @api private + LOAD_COMMAND_CONSTANTS = LOAD_COMMANDS.invert.freeze + + # load commands responsible for loading dylibs + # @api private + DYLIB_LOAD_COMMANDS = %i[ + LC_LOAD_DYLIB + LC_LOAD_WEAK_DYLIB + LC_REEXPORT_DYLIB + LC_LAZY_LOAD_DYLIB + LC_LOAD_UPWARD_DYLIB + ].freeze + + # load commands that can be created manually via {LoadCommand.create} + # @api private + CREATABLE_LOAD_COMMANDS = DYLIB_LOAD_COMMANDS + %i[ + LC_ID_DYLIB + LC_RPATH + LC_LOAD_DYLINKER + ].freeze + + # association of load command symbols to string representations of classes + # @api private + LC_STRUCTURES = { + :LC_SEGMENT => "SegmentCommand", + :LC_SYMTAB => "SymtabCommand", + # "obsolete" + :LC_SYMSEG => "SymsegCommand", + # seems obsolete, but not documented as such + :LC_THREAD => "ThreadCommand", + :LC_UNIXTHREAD => "ThreadCommand", + # "obsolete" + :LC_LOADFVMLIB => "FvmlibCommand", + # "obsolete" + :LC_IDFVMLIB => "FvmlibCommand", + # "obsolete" + :LC_IDENT => "IdentCommand", + # "reserved for internal use only" + :LC_FVMFILE => "FvmfileCommand", + # "reserved for internal use only", no public struct + :LC_PREPAGE => "LoadCommand", + :LC_DYSYMTAB => "DysymtabCommand", + :LC_LOAD_DYLIB => "DylibCommand", + :LC_ID_DYLIB => "DylibCommand", + :LC_LOAD_DYLINKER => "DylinkerCommand", + :LC_ID_DYLINKER => "DylinkerCommand", + :LC_PREBOUND_DYLIB => "PreboundDylibCommand", + :LC_ROUTINES => "RoutinesCommand", + :LC_SUB_FRAMEWORK => "SubFrameworkCommand", + :LC_SUB_UMBRELLA => "SubUmbrellaCommand", + :LC_SUB_CLIENT => "SubClientCommand", + :LC_SUB_LIBRARY => "SubLibraryCommand", + :LC_TWOLEVEL_HINTS => "TwolevelHintsCommand", + :LC_PREBIND_CKSUM => "PrebindCksumCommand", + :LC_LOAD_WEAK_DYLIB => "DylibCommand", + :LC_SEGMENT_64 => "SegmentCommand64", + :LC_ROUTINES_64 => "RoutinesCommand64", + :LC_UUID => "UUIDCommand", + :LC_RPATH => "RpathCommand", + :LC_CODE_SIGNATURE => "LinkeditDataCommand", + :LC_SEGMENT_SPLIT_INFO => "LinkeditDataCommand", + :LC_REEXPORT_DYLIB => "DylibCommand", + :LC_LAZY_LOAD_DYLIB => "DylibCommand", + :LC_ENCRYPTION_INFO => "EncryptionInfoCommand", + :LC_DYLD_INFO => "DyldInfoCommand", + :LC_DYLD_INFO_ONLY => "DyldInfoCommand", + :LC_LOAD_UPWARD_DYLIB => "DylibCommand", + :LC_VERSION_MIN_MACOSX => "VersionMinCommand", + :LC_VERSION_MIN_IPHONEOS => "VersionMinCommand", + :LC_FUNCTION_STARTS => "LinkeditDataCommand", + :LC_DYLD_ENVIRONMENT => "DylinkerCommand", + :LC_MAIN => "EntryPointCommand", + :LC_DATA_IN_CODE => "LinkeditDataCommand", + :LC_SOURCE_VERSION => "SourceVersionCommand", + :LC_DYLIB_CODE_SIGN_DRS => "LinkeditDataCommand", + :LC_ENCRYPTION_INFO_64 => "EncryptionInfoCommand64", + :LC_LINKER_OPTION => "LinkerOptionCommand", + :LC_LINKER_OPTIMIZATION_HINT => "LinkeditDataCommand", + :LC_VERSION_MIN_TVOS => "VersionMinCommand", + :LC_VERSION_MIN_WATCHOS => "VersionMinCommand", + :LC_NOTE => "NoteCommand", + :LC_BUILD_VERSION => "BuildVersionCommand", + :LC_DYLD_EXPORTS_TRIE => "LinkeditDataCommand", + :LD_DYLD_CHAINED_FIXUPS => "LinkeditDataCommand", + }.freeze + + # association of segment name symbols to names + # @api private + SEGMENT_NAMES = { + :SEG_PAGEZERO => "__PAGEZERO", + :SEG_TEXT => "__TEXT", + :SEG_DATA => "__DATA", + :SEG_OBJC => "__OBJC", + :SEG_ICON => "__ICON", + :SEG_LINKEDIT => "__LINKEDIT", + :SEG_UNIXSTACK => "__UNIXSTACK", + :SEG_IMPORT => "__IMPORT", + }.freeze + + # association of segment flag symbols to values + # @api private + SEGMENT_FLAGS = { + :SG_HIGHVM => 0x1, + :SG_FVMLIB => 0x2, + :SG_NORELOC => 0x4, + :SG_PROTECTED_VERSION_1 => 0x8, + }.freeze + + # The top-level Mach-O load command structure. + # + # This is the most generic load command -- only the type ID and size are + # represented. Used when a more specific class isn't available or isn't implemented. + class LoadCommand < MachOStructure + # @return [MachO::MachOView, nil] the raw view associated with the load command, + # or nil if the load command was created via {create}. + attr_reader :view + + # @return [Integer] the load command's type ID + attr_reader :cmd + + # @return [Integer] the size of the load command, in bytes + attr_reader :cmdsize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + + # Instantiates a new LoadCommand given a view into its origin Mach-O + # @param view [MachO::MachOView] the load command's raw view + # @return [LoadCommand] the new load command + # @api private + def self.new_from_bin(view) + bin = view.raw_data.slice(view.offset, bytesize) + format = Utils.specialize_format(self::FORMAT, view.endianness) + + new(view, *bin.unpack(format)) + end + + # Creates a new (viewless) command corresponding to the symbol provided + # @param cmd_sym [Symbol] the symbol of the load command being created + # @param args [Array] the arguments for the load command being created + def self.create(cmd_sym, *args) + raise LoadCommandNotCreatableError, cmd_sym unless CREATABLE_LOAD_COMMANDS.include?(cmd_sym) + + klass = LoadCommands.const_get LC_STRUCTURES[cmd_sym] + cmd = LOAD_COMMAND_CONSTANTS[cmd_sym] + + # cmd will be filled in, view and cmdsize will be left unpopulated + klass_arity = klass.instance_method(:initialize).arity - 3 + + raise LoadCommandCreationArityError.new(cmd_sym, klass_arity, args.size) if klass_arity != args.size + + klass.new(nil, cmd, nil, *args) + end + + # @param view [MachO::MachOView] the load command's raw view + # @param cmd [Integer] the load command's identifying number + # @param cmdsize [Integer] the size of the load command in bytes + # @api private + def initialize(view, cmd, cmdsize) + super() + @view = view + @cmd = cmd + @cmdsize = cmdsize + end + + # @return [Boolean] whether the load command can be serialized + def serializable? + CREATABLE_LOAD_COMMANDS.include?(LOAD_COMMANDS[cmd]) + end + + # @param context [SerializationContext] the context + # to serialize into + # @return [String, nil] the serialized fields of the load command, or nil + # if the load command can't be serialized + # @api private + def serialize(context) + raise LoadCommandNotSerializableError, LOAD_COMMANDS[cmd] unless serializable? + + format = Utils.specialize_format(FORMAT, context.endianness) + [cmd, SIZEOF].pack(format) + end + + # @return [Integer] the load command's offset in the source file + # @deprecated use {#view} instead + def offset + view.offset + end + + # @return [Symbol, nil] a symbol representation of the load command's + # type ID, or nil if the ID doesn't correspond to a known load command class + def type + LOAD_COMMANDS[cmd] + end + + alias to_sym type + + # @return [String] a string representation of the load command's + # identifying number + def to_s + type.to_s + end + + # @return [Hash] a hash representation of this load command + # @note Children should override this to include additional information. + def to_h + { + "view" => view.to_h, + "cmd" => cmd, + "cmdsize" => cmdsize, + "type" => type, + }.merge super + end + + # Represents a Load Command string. A rough analogue to the lc_str + # struct used internally by OS X. This class allows ruby-macho to + # pretend that strings stored in LCs are immediately available without + # explicit operations on the raw Mach-O data. + class LCStr + # @param lc [LoadCommand] the load command + # @param lc_str [Integer, String] the offset to the beginning of the + # string, or the string itself if not being initialized with a view. + # @raise [MachO::LCStrMalformedError] if the string is malformed + # @todo devise a solution such that the `lc_str` parameter is not + # interpreted differently depending on `lc.view`. The current behavior + # is a hack to allow viewless load command creation. + # @api private + def initialize(lc, lc_str) + view = lc.view + + if view + lc_str_abs = view.offset + lc_str + lc_end = view.offset + lc.cmdsize - 1 + raw_string = view.raw_data.slice(lc_str_abs..lc_end) + @string, null_byte, _padding = raw_string.partition("\x00") + + raise LCStrMalformedError, lc if null_byte.empty? + + @string_offset = lc_str + else + @string = lc_str + @string_offset = 0 + end + end + + # @return [String] a string representation of the LCStr + def to_s + @string + end + + # @return [Integer] the offset to the beginning of the string in the + # load command + def to_i + @string_offset + end + + # @return [Hash] a hash representation of this {LCStr}. + def to_h + { + "string" => to_s, + "offset" => to_i, + } + end + end + + # Represents the contextual information needed by a load command to + # serialize itself correctly into a binary string. + class SerializationContext + # @return [Symbol] the endianness of the serialized load command + attr_reader :endianness + + # @return [Integer] the constant alignment value used to pad the + # serialized load command + attr_reader :alignment + + # @param macho [MachO::MachOFile] the file to contextualize + # @return [SerializationContext] the + # resulting context + def self.context_for(macho) + new(macho.endianness, macho.alignment) + end + + # @param endianness [Symbol] the endianness of the context + # @param alignment [Integer] the alignment of the context + # @api private + def initialize(endianness, alignment) + @endianness = endianness + @alignment = alignment + end + end + end + + # A load command containing a single 128-bit unique random number + # identifying an object produced by static link editor. Corresponds to + # LC_UUID. + class UUIDCommand < LoadCommand + # @return [Array<Integer>] the UUID + attr_reader :uuid + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2a16" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, uuid) + super(view, cmd, cmdsize) + @uuid = uuid.unpack("C16") # re-unpack for the actual UUID array + end + + # @return [String] a string representation of the UUID + def uuid_string + hexes = uuid.map { |elem| "%02<elem>x" % { :elem => elem } } + segs = [ + hexes[0..3].join, hexes[4..5].join, hexes[6..7].join, + hexes[8..9].join, hexes[10..15].join + ] + + segs.join("-") + end + + # @return [Hash] returns a hash representation of this {UUIDCommand} + def to_h + { + "uuid" => uuid, + "uuid_string" => uuid_string, + }.merge super + end + end + + # A load command indicating that part of this file is to be mapped into + # the task's address space. Corresponds to LC_SEGMENT. + class SegmentCommand < LoadCommand + # @return [String] the name of the segment + attr_reader :segname + + # @return [Integer] the memory address of the segment + attr_reader :vmaddr + + # @return [Integer] the memory size of the segment + attr_reader :vmsize + + # @return [Integer] the file offset of the segment + attr_reader :fileoff + + # @return [Integer] the amount to map from the file + attr_reader :filesize + + # @return [Integer] the maximum VM protection + attr_reader :maxprot + + # @return [Integer] the initial VM protection + attr_reader :initprot + + # @return [Integer] the number of sections in the segment + attr_reader :nsects + + # @return [Integer] any flags associated with the segment + attr_reader :flags + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16L=4l=2L=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 56 + + # @api private + def initialize(view, cmd, cmdsize, segname, vmaddr, vmsize, fileoff, + filesize, maxprot, initprot, nsects, flags) + super(view, cmd, cmdsize) + @segname = segname + @vmaddr = vmaddr + @vmsize = vmsize + @fileoff = fileoff + @filesize = filesize + @maxprot = maxprot + @initprot = initprot + @nsects = nsects + @flags = flags + end + + # All sections referenced within this segment. + # @return [Array<MachO::Sections::Section>] if the Mach-O is 32-bit + # @return [Array<MachO::Sections::Section64>] if the Mach-O is 64-bit + def sections + klass = case self + when SegmentCommand64 + MachO::Sections::Section64 + when SegmentCommand + MachO::Sections::Section + end + + offset = view.offset + self.class.bytesize + length = nsects * klass.bytesize + + bins = view.raw_data[offset, length] + bins.unpack("a#{klass.bytesize}" * nsects).map do |bin| + klass.new_from_bin(view.endianness, bin) + end + end + + # @example + # puts "this segment relocated in/to it" if sect.flag?(:SG_NORELOC) + # @param flag [Symbol] a segment flag symbol + # @return [Boolean] true if `flag` is present in the segment's flag field + def flag?(flag) + flag = SEGMENT_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # Guesses the alignment of the segment. + # @return [Integer] the guessed alignment, as a power of 2 + # @note See `guess_align` in `cctools/misc/lipo.c` + def guess_align + return Sections::MAX_SECT_ALIGN if vmaddr.zero? + + align = 0 + segalign = 1 + + while (segalign & vmaddr).zero? + segalign <<= 1 + align += 1 + end + + return 2 if align < 2 + return Sections::MAX_SECT_ALIGN if align > Sections::MAX_SECT_ALIGN + + align + end + + # @return [Hash] a hash representation of this {SegmentCommand} + def to_h + { + "segname" => segname, + "vmaddr" => vmaddr, + "vmsize" => vmsize, + "fileoff" => fileoff, + "filesize" => filesize, + "maxprot" => maxprot, + "initprot" => initprot, + "nsects" => nsects, + "flags" => flags, + "sections" => sections.map(&:to_h), + }.merge super + end + end + + # A load command indicating that part of this file is to be mapped into + # the task's address space. Corresponds to LC_SEGMENT_64. + class SegmentCommand64 < SegmentCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16Q=4l=2L=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 72 + end + + # A load command representing some aspect of shared libraries, depending + # on filetype. Corresponds to LC_ID_DYLIB, LC_LOAD_DYLIB, + # LC_LOAD_WEAK_DYLIB, and LC_REEXPORT_DYLIB. + class DylibCommand < LoadCommand + # @return [LCStr] the library's path + # name as an LCStr + attr_reader :name + + # @return [Integer] the library's build time stamp + attr_reader :timestamp + + # @return [Integer] the library's current version number + attr_reader :current_version + + # @return [Integer] the library's compatibility version number + attr_reader :compatibility_version + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, name, timestamp, current_version, + compatibility_version) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @timestamp = timestamp + @current_version = current_version + @compatibility_version = compatibility_version + end + + # @param context [SerializationContext] + # the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :name => name.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:name], timestamp, current_version, + compatibility_version].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {DylibCommand} + def to_h + { + "name" => name.to_h, + "timestamp" => timestamp, + "current_version" => current_version, + "compatibility_version" => compatibility_version, + }.merge super + end + end + + # A load command representing some aspect of the dynamic linker, depending + # on filetype. Corresponds to LC_ID_DYLINKER, LC_LOAD_DYLINKER, and + # LC_DYLD_ENVIRONMENT. + class DylinkerCommand < LoadCommand + # @return [LCStr] the dynamic linker's + # path name as an LCStr + attr_reader :name + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, name) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + end + + # @param context [SerializationContext] + # the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :name => name.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:name]].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {DylinkerCommand} + def to_h + { + "name" => name.to_h, + }.merge super + end + end + + # A load command used to indicate dynamic libraries used in prebinding. + # Corresponds to LC_PREBOUND_DYLIB. + class PreboundDylibCommand < LoadCommand + # @return [LCStr] the library's path + # name as an LCStr + attr_reader :name + + # @return [Integer] the number of modules in the library + attr_reader :nmodules + + # @return [Integer] a bit vector of linked modules + attr_reader :linked_modules + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(view, cmd, cmdsize, name, nmodules, linked_modules) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @nmodules = nmodules + @linked_modules = linked_modules + end + + # @return [Hash] a hash representation of this {PreboundDylibCommand} + def to_h + { + "name" => name.to_h, + "nmodules" => nmodules, + "linked_modules" => linked_modules, + }.merge super + end + end + + # A load command used to represent threads. + # @note cctools-870 and onwards have all fields of thread_command commented + # out except the common ones (cmd, cmdsize) + class ThreadCommand < LoadCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + end + + # A load command containing the address of the dynamic shared library + # initialization routine and an index into the module table for the module + # that defines the routine. Corresponds to LC_ROUTINES. + class RoutinesCommand < LoadCommand + # @return [Integer] the address of the initialization routine + attr_reader :init_address + + # @return [Integer] the index into the module table that the init routine + # is defined in + attr_reader :init_module + + # @return [void] + attr_reader :reserved1 + + # @return [void] + attr_reader :reserved2 + + # @return [void] + attr_reader :reserved3 + + # @return [void] + attr_reader :reserved4 + + # @return [void] + attr_reader :reserved5 + + # @return [void] + attr_reader :reserved6 + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=10" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 40 + + # @api private + def initialize(view, cmd, cmdsize, init_address, init_module, reserved1, + reserved2, reserved3, reserved4, reserved5, reserved6) + super(view, cmd, cmdsize) + @init_address = init_address + @init_module = init_module + @reserved1 = reserved1 + @reserved2 = reserved2 + @reserved3 = reserved3 + @reserved4 = reserved4 + @reserved5 = reserved5 + @reserved6 = reserved6 + end + + # @return [Hash] a hash representation of this {RoutinesCommand} + def to_h + { + "init_address" => init_address, + "init_module" => init_module, + "reserved1" => reserved1, + "reserved2" => reserved2, + "reserved3" => reserved3, + "reserved4" => reserved4, + "reserved5" => reserved5, + "reserved6" => reserved6, + }.merge super + end + end + + # A load command containing the address of the dynamic shared library + # initialization routine and an index into the module table for the module + # that defines the routine. Corresponds to LC_ROUTINES_64. + class RoutinesCommand64 < RoutinesCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=8" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 72 + end + + # A load command signifying membership of a subframework containing the name + # of an umbrella framework. Corresponds to LC_SUB_FRAMEWORK. + class SubFrameworkCommand < LoadCommand + # @return [LCStr] the umbrella framework name as an LCStr + attr_reader :umbrella + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, umbrella) + super(view, cmd, cmdsize) + @umbrella = LCStr.new(self, umbrella) + end + + # @return [Hash] a hash representation of this {SubFrameworkCommand} + def to_h + { + "umbrella" => umbrella.to_h, + }.merge super + end + end + + # A load command signifying membership of a subumbrella containing the name + # of an umbrella framework. Corresponds to LC_SUB_UMBRELLA. + class SubUmbrellaCommand < LoadCommand + # @return [LCStr] the subumbrella framework name as an LCStr + attr_reader :sub_umbrella + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_umbrella) + super(view, cmd, cmdsize) + @sub_umbrella = LCStr.new(self, sub_umbrella) + end + + # @return [Hash] a hash representation of this {SubUmbrellaCommand} + def to_h + { + "sub_umbrella" => sub_umbrella.to_h, + }.merge super + end + end + + # A load command signifying a sublibrary of a shared library. Corresponds + # to LC_SUB_LIBRARY. + class SubLibraryCommand < LoadCommand + # @return [LCStr] the sublibrary name as an LCStr + attr_reader :sub_library + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_library) + super(view, cmd, cmdsize) + @sub_library = LCStr.new(self, sub_library) + end + + # @return [Hash] a hash representation of this {SubLibraryCommand} + def to_h + { + "sub_library" => sub_library.to_h, + }.merge super + end + end + + # A load command signifying a shared library that is a subframework of + # an umbrella framework. Corresponds to LC_SUB_CLIENT. + class SubClientCommand < LoadCommand + # @return [LCStr] the subclient name as an LCStr + attr_reader :sub_client + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, sub_client) + super(view, cmd, cmdsize) + @sub_client = LCStr.new(self, sub_client) + end + + # @return [Hash] a hash representation of this {SubClientCommand} + def to_h + { + "sub_client" => sub_client.to_h, + }.merge super + end + end + + # A load command containing the offsets and sizes of the link-edit 4.3BSD + # "stab" style symbol table information. Corresponds to LC_SYMTAB. + class SymtabCommand < LoadCommand + # @return [Integer] the symbol table's offset + attr_reader :symoff + + # @return [Integer] the number of symbol table entries + attr_reader :nsyms + + # @return [Integer] the string table's offset + attr_reader :stroff + + # @return [Integer] the string table size in bytes + attr_reader :strsize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, symoff, nsyms, stroff, strsize) + super(view, cmd, cmdsize) + @symoff = symoff + @nsyms = nsyms + @stroff = stroff + @strsize = strsize + end + + # @return [Hash] a hash representation of this {SymtabCommand} + def to_h + { + "symoff" => symoff, + "nsyms" => nsyms, + "stroff" => stroff, + "strsize" => strsize, + }.merge super + end + end + + # A load command containing symbolic information needed to support data + # structures used by the dynamic link editor. Corresponds to LC_DYSYMTAB. + class DysymtabCommand < LoadCommand + # @return [Integer] the index to local symbols + attr_reader :ilocalsym + + # @return [Integer] the number of local symbols + attr_reader :nlocalsym + + # @return [Integer] the index to externally defined symbols + attr_reader :iextdefsym + + # @return [Integer] the number of externally defined symbols + attr_reader :nextdefsym + + # @return [Integer] the index to undefined symbols + attr_reader :iundefsym + + # @return [Integer] the number of undefined symbols + attr_reader :nundefsym + + # @return [Integer] the file offset to the table of contents + attr_reader :tocoff + + # @return [Integer] the number of entries in the table of contents + attr_reader :ntoc + + # @return [Integer] the file offset to the module table + attr_reader :modtaboff + + # @return [Integer] the number of entries in the module table + attr_reader :nmodtab + + # @return [Integer] the file offset to the referenced symbol table + attr_reader :extrefsymoff + + # @return [Integer] the number of entries in the referenced symbol table + attr_reader :nextrefsyms + + # @return [Integer] the file offset to the indirect symbol table + attr_reader :indirectsymoff + + # @return [Integer] the number of entries in the indirect symbol table + attr_reader :nindirectsyms + + # @return [Integer] the file offset to the external relocation entries + attr_reader :extreloff + + # @return [Integer] the number of external relocation entries + attr_reader :nextrel + + # @return [Integer] the file offset to the local relocation entries + attr_reader :locreloff + + # @return [Integer] the number of local relocation entries + attr_reader :nlocrel + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=20" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 80 + + # ugh + # @api private + def initialize(view, cmd, cmdsize, ilocalsym, nlocalsym, iextdefsym, + nextdefsym, iundefsym, nundefsym, tocoff, ntoc, modtaboff, + nmodtab, extrefsymoff, nextrefsyms, indirectsymoff, + nindirectsyms, extreloff, nextrel, locreloff, nlocrel) + super(view, cmd, cmdsize) + @ilocalsym = ilocalsym + @nlocalsym = nlocalsym + @iextdefsym = iextdefsym + @nextdefsym = nextdefsym + @iundefsym = iundefsym + @nundefsym = nundefsym + @tocoff = tocoff + @ntoc = ntoc + @modtaboff = modtaboff + @nmodtab = nmodtab + @extrefsymoff = extrefsymoff + @nextrefsyms = nextrefsyms + @indirectsymoff = indirectsymoff + @nindirectsyms = nindirectsyms + @extreloff = extreloff + @nextrel = nextrel + @locreloff = locreloff + @nlocrel = nlocrel + end + + # @return [Hash] a hash representation of this {DysymtabCommand} + def to_h + { + "ilocalsym" => ilocalsym, + "nlocalsym" => nlocalsym, + "iextdefsym" => iextdefsym, + "nextdefsym" => nextdefsym, + "iundefsym" => iundefsym, + "nundefsym" => nundefsym, + "tocoff" => tocoff, + "ntoc" => ntoc, + "modtaboff" => modtaboff, + "nmodtab" => nmodtab, + "extrefsymoff" => extrefsymoff, + "nextrefsyms" => nextrefsyms, + "indirectsymoff" => indirectsymoff, + "nindirectsyms" => nindirectsyms, + "extreloff" => extreloff, + "nextrel" => nextrel, + "locreloff" => locreloff, + "nlocrel" => nlocrel, + }.merge super + end + end + + # A load command containing the offset and number of hints in the two-level + # namespace lookup hints table. Corresponds to LC_TWOLEVEL_HINTS. + class TwolevelHintsCommand < LoadCommand + # @return [Integer] the offset to the hint table + attr_reader :htoffset + + # @return [Integer] the number of hints in the hint table + attr_reader :nhints + + # @return [TwolevelHintsTable] + # the hint table + attr_reader :table + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, htoffset, nhints) + super(view, cmd, cmdsize) + @htoffset = htoffset + @nhints = nhints + @table = TwolevelHintsTable.new(view, htoffset, nhints) + end + + # @return [Hash] a hash representation of this {TwolevelHintsCommand} + def to_h + { + "htoffset" => htoffset, + "nhints" => nhints, + "table" => table.hints.map(&:to_h), + }.merge super + end + + # A representation of the two-level namespace lookup hints table exposed + # by a {TwolevelHintsCommand} (`LC_TWOLEVEL_HINTS`). + class TwolevelHintsTable + # @return [Array<TwolevelHint>] all hints in the table + attr_reader :hints + + # @param view [MachO::MachOView] the view into the current Mach-O + # @param htoffset [Integer] the offset of the hints table + # @param nhints [Integer] the number of two-level hints in the table + # @api private + def initialize(view, htoffset, nhints) + format = Utils.specialize_format("L=#{nhints}", view.endianness) + raw_table = view.raw_data[htoffset, nhints * 4] + blobs = raw_table.unpack(format) + + @hints = blobs.map { |b| TwolevelHint.new(b) } + end + + # An individual two-level namespace lookup hint. + class TwolevelHint + # @return [Integer] the index into the sub-images + attr_reader :isub_image + + # @return [Integer] the index into the table of contents + attr_reader :itoc + + # @param blob [Integer] the 32-bit number containing the lookup hint + # @api private + def initialize(blob) + @isub_image = blob >> 24 + @itoc = blob & 0x00FFFFFF + end + + # @return [Hash] a hash representation of this {TwolevelHint} + def to_h + { + "isub_image" => isub_image, + "itoc" => itoc, + } + end + end + end + end + + # A load command containing the value of the original checksum for prebound + # files, or zero. Corresponds to LC_PREBIND_CKSUM. + class PrebindCksumCommand < LoadCommand + # @return [Integer] the checksum or 0 + attr_reader :cksum + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, cksum) + super(view, cmd, cmdsize) + @cksum = cksum + end + + # @return [Hash] a hash representation of this {PrebindCksumCommand} + def to_h + { + "cksum" => cksum, + }.merge super + end + end + + # A load command representing an rpath, which specifies a path that should + # be added to the current run path used to find @rpath prefixed dylibs. + # Corresponds to LC_RPATH. + class RpathCommand < LoadCommand + # @return [LCStr] the path to add to the run path as an LCStr + attr_reader :path + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, path) + super(view, cmd, cmdsize) + @path = LCStr.new(self, path) + end + + # @param context [SerializationContext] the context + # @return [String] the serialized fields of the load command + # @api private + def serialize(context) + format = Utils.specialize_format(FORMAT, context.endianness) + string_payload, string_offsets = Utils.pack_strings(SIZEOF, + context.alignment, + :path => path.to_s) + cmdsize = SIZEOF + string_payload.bytesize + [cmd, cmdsize, string_offsets[:path]].pack(format) + string_payload + end + + # @return [Hash] a hash representation of this {RpathCommand} + def to_h + { + "path" => path.to_h, + }.merge super + end + end + + # A load command representing the offsets and sizes of a blob of data in + # the __LINKEDIT segment. Corresponds to LC_CODE_SIGNATURE, + # LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, + # LC_DYLIB_CODE_SIGN_DRS, LC_LINKER_OPTIMIZATION_HINT, LC_DYLD_EXPORTS_TRIE, + # or LC_DYLD_CHAINED_FIXUPS. + class LinkeditDataCommand < LoadCommand + # @return [Integer] offset to the data in the __LINKEDIT segment + attr_reader :dataoff + + # @return [Integer] size of the data in the __LINKEDIT segment + attr_reader :datasize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, dataoff, datasize) + super(view, cmd, cmdsize) + @dataoff = dataoff + @datasize = datasize + end + + # @return [Hash] a hash representation of this {LinkeditDataCommand} + def to_h + { + "dataoff" => dataoff, + "datasize" => datasize, + }.merge super + end + end + + # A load command representing the offset to and size of an encrypted + # segment. Corresponds to LC_ENCRYPTION_INFO. + class EncryptionInfoCommand < LoadCommand + # @return [Integer] the offset to the encrypted segment + attr_reader :cryptoff + + # @return [Integer] the size of the encrypted segment + attr_reader :cryptsize + + # @return [Integer] the encryption system, or 0 if not encrypted yet + attr_reader :cryptid + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + # @api private + def initialize(view, cmd, cmdsize, cryptoff, cryptsize, cryptid) + super(view, cmd, cmdsize) + @cryptoff = cryptoff + @cryptsize = cryptsize + @cryptid = cryptid + end + + # @return [Hash] a hash representation of this {EncryptionInfoCommand} + def to_h + { + "cryptoff" => cryptoff, + "cryptsize" => cryptsize, + "cryptid" => cryptid, + }.merge super + end + end + + # A load command representing the offset to and size of an encrypted + # segment. Corresponds to LC_ENCRYPTION_INFO_64. + class EncryptionInfoCommand64 < EncryptionInfoCommand + # @return [Integer] 64-bit padding value + attr_reader :pad + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, cryptoff, cryptsize, cryptid, pad) + super(view, cmd, cmdsize, cryptoff, cryptsize, cryptid) + @pad = pad + end + + # @return [Hash] a hash representation of this {EncryptionInfoCommand64} + def to_h + { + "pad" => pad, + }.merge super + end + end + + # A load command containing the minimum OS version on which the binary + # was built to run. Corresponds to LC_VERSION_MIN_MACOSX and + # LC_VERSION_MIN_IPHONEOS. + class VersionMinCommand < LoadCommand + # @return [Integer] the version X.Y.Z packed as x16.y8.z8 + attr_reader :version + + # @return [Integer] the SDK version X.Y.Z packed as x16.y8.z8 + attr_reader :sdk + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, version, sdk) + super(view, cmd, cmdsize) + @version = version + @sdk = sdk + end + + # A string representation of the binary's minimum OS version. + # @return [String] a string representing the minimum OS version. + def version_string + binary = "%032<version>b" % { :version => version } + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # A string representation of the binary's SDK version. + # @return [String] a string representing the SDK version. + def sdk_string + binary = "%032<sdk>b" % { :sdk => sdk } + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {VersionMinCommand} + def to_h + { + "version" => version, + "version_string" => version_string, + "sdk" => sdk, + "sdk_string" => sdk_string, + }.merge super + end + end + + # A load command containing the minimum OS version on which + # the binary was built for its platform. + # Corresponds to LC_BUILD_VERSION. + class BuildVersionCommand < LoadCommand + # @return [Integer] + attr_reader :platform + + # @return [Integer] the minimum OS version X.Y.Z packed as x16.y8.z8 + attr_reader :minos + + # @return [Integer] the SDK version X.Y.Z packed as x16.y8.z8 + attr_reader :sdk + + # @return [ToolEntries] tool entries + attr_reader :tool_entries + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=6" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, platform, minos, sdk, ntools) + super(view, cmd, cmdsize) + @platform = platform + @minos = minos + @sdk = sdk + @tool_entries = ToolEntries.new(view, ntools) + end + + # A string representation of the binary's minimum OS version. + # @return [String] a string representing the minimum OS version. + def minos_string + binary = "%032<minos>b" % { :minos => minos } + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # A string representation of the binary's SDK version. + # @return [String] a string representing the SDK version. + def sdk_string + binary = "%032<sdk>b" % { :sdk => sdk } + segs = [ + binary[0..15], binary[16..23], binary[24..31] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {BuildVersionCommand} + def to_h + { + "platform" => platform, + "minos" => minos, + "minos_string" => minos_string, + "sdk" => sdk, + "sdk_string" => sdk_string, + "tool_entries" => tool_entries.tools.map(&:to_h), + }.merge super + end + + # A representation of the tool versions exposed + # by a {BuildVersionCommand} (`LC_BUILD_VERSION`). + class ToolEntries + # @return [Array<Tool>] all tools + attr_reader :tools + + # @param view [MachO::MachOView] the view into the current Mach-O + # @param ntools [Integer] the number of tools + # @api private + def initialize(view, ntools) + format = Utils.specialize_format("L=#{ntools * 2}", view.endianness) + raw_table = view.raw_data[view.offset + 24, ntools * 8] + blobs = raw_table.unpack(format).each_slice(2).to_a + + @tools = blobs.map { |b| Tool.new(*b) } + end + + # An individual tool. + class Tool + # @return [Integer] the enum for the tool + attr_reader :tool + + # @return [Integer] the tool's version number + attr_reader :version + + # @param tool [Integer] 32-bit integer + # @param version [Integer] 32-bit integer + # @api private + def initialize(tool, version) + @tool = tool + @version = version + end + + # @return [Hash] a hash representation of this {Tool} + def to_h + { + "tool" => tool, + "version" => version, + } + end + end + end + end + + # A load command containing the file offsets and sizes of the new + # compressed form of the information dyld needs to load the image. + # Corresponds to LC_DYLD_INFO and LC_DYLD_INFO_ONLY. + class DyldInfoCommand < LoadCommand + # @return [Integer] the file offset to the rebase information + attr_reader :rebase_off + + # @return [Integer] the size of the rebase information + attr_reader :rebase_size + + # @return [Integer] the file offset to the binding information + attr_reader :bind_off + + # @return [Integer] the size of the binding information + attr_reader :bind_size + + # @return [Integer] the file offset to the weak binding information + attr_reader :weak_bind_off + + # @return [Integer] the size of the weak binding information + attr_reader :weak_bind_size + + # @return [Integer] the file offset to the lazy binding information + attr_reader :lazy_bind_off + + # @return [Integer] the size of the lazy binding information + attr_reader :lazy_bind_size + + # @return [Integer] the file offset to the export information + attr_reader :export_off + + # @return [Integer] the size of the export information + attr_reader :export_size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=12" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 48 + + # @api private + def initialize(view, cmd, cmdsize, rebase_off, rebase_size, bind_off, + bind_size, weak_bind_off, weak_bind_size, lazy_bind_off, + lazy_bind_size, export_off, export_size) + super(view, cmd, cmdsize) + @rebase_off = rebase_off + @rebase_size = rebase_size + @bind_off = bind_off + @bind_size = bind_size + @weak_bind_off = weak_bind_off + @weak_bind_size = weak_bind_size + @lazy_bind_off = lazy_bind_off + @lazy_bind_size = lazy_bind_size + @export_off = export_off + @export_size = export_size + end + + # @return [Hash] a hash representation of this {DyldInfoCommand} + def to_h + { + "rebase_off" => rebase_off, + "rebase_size" => rebase_size, + "bind_off" => bind_off, + "bind_size" => bind_size, + "weak_bind_off" => weak_bind_off, + "weak_bind_size" => weak_bind_size, + "lazy_bind_off" => lazy_bind_off, + "lazy_bind_size" => lazy_bind_size, + "export_off" => export_off, + "export_size" => export_size, + }.merge super + end + end + + # A load command containing linker options embedded in object files. + # Corresponds to LC_LINKER_OPTION. + class LinkerOptionCommand < LoadCommand + # @return [Integer] the number of strings + attr_reader :count + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=3" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 12 + + # @api private + def initialize(view, cmd, cmdsize, count) + super(view, cmd, cmdsize) + @count = count + end + + # @return [Hash] a hash representation of this {LinkerOptionCommand} + def to_h + { + "count" => count, + }.merge super + end + end + + # A load command specifying the offset of main(). Corresponds to LC_MAIN. + class EntryPointCommand < LoadCommand + # @return [Integer] the file (__TEXT) offset of main() + attr_reader :entryoff + + # @return [Integer] if not 0, the initial stack size. + attr_reader :stacksize + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 24 + + # @api private + def initialize(view, cmd, cmdsize, entryoff, stacksize) + super(view, cmd, cmdsize) + @entryoff = entryoff + @stacksize = stacksize + end + + # @return [Hash] a hash representation of this {EntryPointCommand} + def to_h + { + "entryoff" => entryoff, + "stacksize" => stacksize, + }.merge super + end + end + + # A load command specifying the version of the sources used to build the + # binary. Corresponds to LC_SOURCE_VERSION. + class SourceVersionCommand < LoadCommand + # @return [Integer] the version packed as a24.b10.c10.d10.e10 + attr_reader :version + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Q=1" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, version) + super(view, cmd, cmdsize) + @version = version + end + + # A string representation of the sources used to build the binary. + # @return [String] a string representation of the version + def version_string + binary = "%064<version>b" % { :version => version } + segs = [ + binary[0..23], binary[24..33], binary[34..43], binary[44..53], + binary[54..63] + ].map { |s| s.to_i(2) } + + segs.join(".") + end + + # @return [Hash] a hash representation of this {SourceVersionCommand} + def to_h + { + "version" => version, + "version_string" => version_string, + }.merge super + end + end + + # An obsolete load command containing the offset and size of the (GNU style) + # symbol table information. Corresponds to LC_SYMSEG. + class SymsegCommand < LoadCommand + # @return [Integer] the offset to the symbol segment + attr_reader :offset + + # @return [Integer] the size of the symbol segment in bytes + attr_reader :size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + # @api private + def initialize(view, cmd, cmdsize, offset, size) + super(view, cmd, cmdsize) + @offset = offset + @size = size + end + + # @return [Hash] a hash representation of this {SymsegCommand} + def to_h + { + "offset" => offset, + "size" => size, + }.merge super + end + end + + # An obsolete load command containing a free format string table. Each + # string is null-terminated and the command is zero-padded to a multiple of + # 4. Corresponds to LC_IDENT. + class IdentCommand < LoadCommand + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 8 + end + + # An obsolete load command containing the path to a file to be loaded into + # memory. Corresponds to LC_FVMFILE. + class FvmfileCommand < LoadCommand + # @return [LCStr] the pathname of the file being loaded + attr_reader :name + + # @return [Integer] the virtual address being loaded at + attr_reader :header_addr + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=4" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 16 + + def initialize(view, cmd, cmdsize, name, header_addr) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @header_addr = header_addr + end + + # @return [Hash] a hash representation of this {FvmfileCommand} + def to_h + { + "name" => name.to_h, + "header_addr" => header_addr, + }.merge super + end + end + + # An obsolete load command containing the path to a library to be loaded + # into memory. Corresponds to LC_LOADFVMLIB and LC_IDFVMLIB. + class FvmlibCommand < LoadCommand + # @return [LCStr] the library's target pathname + attr_reader :name + + # @return [Integer] the library's minor version number + attr_reader :minor_version + + # @return [Integer] the library's header address + attr_reader :header_addr + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=5" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 20 + + def initialize(view, cmd, cmdsize, name, minor_version, header_addr) + super(view, cmd, cmdsize) + @name = LCStr.new(self, name) + @minor_version = minor_version + @header_addr = header_addr + end + + # @return [Hash] a hash representation of this {FvmlibCommand} + def to_h + { + "name" => name.to_h, + "minor_version" => minor_version, + "header_addr" => header_addr, + }.merge super + end + end + + # A load command containing an owner name and offset/size for an arbitrary data region. + # Corresponds to LC_NOTE. + class NoteCommand < LoadCommand + # @return [String] the name of the owner for this note + attr_reader :data_owner + + # @return [Integer] the offset, within the file, of the note + attr_reader :offset + + # @return [Integer] the size, in bytes, of the note + attr_reader :size + + # @see MachOStructure::FORMAT + # @api private + FORMAT = "L=2Z16Q=2" + + # @see MachOStructure::SIZEOF + # @api private + SIZEOF = 48 + + def initialize(view, cmd, cmdsize, data_owner, offset, size) + super(view, cmd, cmdsize) + @data_owner = data_owner + @offset = offset + @size = size + end + + # @return [Hash] a hash representation of this {NoteCommand} + def to_h + { + "data_owner" => data_owner, + "offset" => offset, + "size" => size, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/macho_file.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/macho_file.rb new file mode 100644 index 0000000..85be727 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/macho_file.rb @@ -0,0 +1,587 @@ +# frozen_string_literal: true + +require "forwardable" + +module MachO + # Represents a Mach-O file, which contains a header and load commands + # as well as binary executable instructions. Mach-O binaries are + # architecture specific. + # @see https://en.wikipedia.org/wiki/Mach-O + # @see FatFile + class MachOFile + extend Forwardable + + # @return [String, nil] the filename loaded from, or nil if loaded from a binary + # string + attr_accessor :filename + + # @return [Hash] any parser options that the instance was created with + attr_reader :options + + # @return [Symbol] the endianness of the file, :big or :little + attr_reader :endianness + + # @return [Headers::MachHeader] if the Mach-O is 32-bit + # @return [Headers::MachHeader64] if the Mach-O is 64-bit + attr_reader :header + + # @return [Array<LoadCommands::LoadCommand>] an array of the file's load + # commands + # @note load commands are provided in order of ascending offset. + attr_reader :load_commands + + # Creates a new instance from a binary string. + # @param bin [String] a binary string containing raw Mach-O data + # @param opts [Hash] options to control the parser with + # @option opts [Boolean] :permissive whether to ignore unknown load commands + # @return [MachOFile] a new MachOFile + def self.new_from_bin(bin, **opts) + instance = allocate + instance.initialize_from_bin(bin, opts) + + instance + end + + # Creates a new instance from data read from the given filename. + # @param filename [String] the Mach-O file to load from + # @param opts [Hash] options to control the parser with + # @option opts [Boolean] :permissive whether to ignore unknown load commands + # @raise [ArgumentError] if the given file does not exist + def initialize(filename, **opts) + raise ArgumentError, "#{filename}: no such file" unless File.file?(filename) + + @filename = filename + @options = opts + @raw_data = File.open(@filename, "rb", &:read) + populate_fields + end + + # Initializes a new MachOFile instance from a binary string with the given options. + # @see MachO::MachOFile.new_from_bin + # @api private + def initialize_from_bin(bin, opts) + @filename = nil + @options = opts + @raw_data = bin + populate_fields + end + + # The file's raw Mach-O data. + # @return [String] the raw Mach-O data + def serialize + @raw_data + end + + # @!method magic + # @return (see MachO::Headers::MachHeader#magic) + # @!method ncmds + # @return (see MachO::Headers::MachHeader#ncmds) + # @!method sizeofcmds + # @return (see MachO::Headers::MachHeader#sizeofcmds) + # @!method flags + # @return (see MachO::Headers::MachHeader#flags) + # @!method object? + # @return (see MachO::Headers::MachHeader#object?) + # @!method executable? + # @return (see MachO::Headers::MachHeader#executable?) + # @!method fvmlib? + # @return (see MachO::Headers::MachHeader#fvmlib?) + # @!method core? + # @return (see MachO::Headers::MachHeader#core?) + # @!method preload? + # @return (see MachO::Headers::MachHeader#preload?) + # @!method dylib? + # @return (see MachO::Headers::MachHeader#dylib?) + # @!method dylinker? + # @return (see MachO::Headers::MachHeader#dylinker?) + # @!method bundle? + # @return (see MachO::Headers::MachHeader#bundle?) + # @!method dsym? + # @return (see MachO::Headers::MachHeader#dsym?) + # @!method kext? + # @return (see MachO::Headers::MachHeader#kext?) + # @!method magic32? + # @return (see MachO::Headers::MachHeader#magic32?) + # @!method magic64? + # @return (see MachO::Headers::MachHeader#magic64?) + # @!method alignment + # @return (see MachO::Headers::MachHeader#alignment) + def_delegators :header, :magic, :ncmds, :sizeofcmds, :flags, :object?, + :executable?, :fvmlib?, :core?, :preload?, :dylib?, + :dylinker?, :bundle?, :dsym?, :kext?, :magic32?, :magic64?, + :alignment + + # @return [String] a string representation of the file's magic number + def magic_string + Headers::MH_MAGICS[magic] + end + + # @return [Symbol] a string representation of the Mach-O's filetype + def filetype + Headers::MH_FILETYPES[header.filetype] + end + + # @return [Symbol] a symbol representation of the Mach-O's CPU type + def cputype + Headers::CPU_TYPES[header.cputype] + end + + # @return [Symbol] a symbol representation of the Mach-O's CPU subtype + def cpusubtype + Headers::CPU_SUBTYPES[header.cputype][header.cpusubtype] + end + + # All load commands of a given name. + # @example + # file.command("LC_LOAD_DYLIB") + # file[:LC_LOAD_DYLIB] + # @param [String, Symbol] name the load command ID + # @return [Array<LoadCommands::LoadCommand>] an array of load commands + # corresponding to `name` + def command(name) + load_commands.select { |lc| lc.type == name.to_sym } + end + + alias [] command + + # Inserts a load command at the given offset. + # @param offset [Integer] the offset to insert at + # @param lc [LoadCommands::LoadCommand] the load command to insert + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @raise [OffsetInsertionError] if the offset is not in the load command region + # @raise [HeaderPadError] if the new command exceeds the header pad buffer + # @note Calling this method with an arbitrary offset in the load command + # region **will leave the object in an inconsistent state**. + def insert_command(offset, lc, options = {}) + context = LoadCommands::LoadCommand::SerializationContext.context_for(self) + cmd_raw = lc.serialize(context) + fileoff = offset + cmd_raw.bytesize + + raise OffsetInsertionError, offset if offset < header.class.bytesize || fileoff > low_fileoff + + new_sizeofcmds = sizeofcmds + cmd_raw.bytesize + + raise HeaderPadError, @filename if header.class.bytesize + new_sizeofcmds > low_fileoff + + # update Mach-O header fields to account for inserted load command + update_ncmds(ncmds + 1) + update_sizeofcmds(new_sizeofcmds) + + @raw_data.insert(offset, cmd_raw) + @raw_data.slice!(header.class.bytesize + new_sizeofcmds, cmd_raw.bytesize) + + populate_fields if options.fetch(:repopulate, true) + end + + # Replace a load command with another command in the Mach-O, preserving location. + # @param old_lc [LoadCommands::LoadCommand] the load command being replaced + # @param new_lc [LoadCommands::LoadCommand] the load command being added + # @return [void] + # @raise [HeaderPadError] if the new command exceeds the header pad buffer + # @see #insert_command + # @note This is public, but methods like {#dylib_id=} should be preferred. + def replace_command(old_lc, new_lc) + context = LoadCommands::LoadCommand::SerializationContext.context_for(self) + cmd_raw = new_lc.serialize(context) + new_sizeofcmds = sizeofcmds + cmd_raw.bytesize - old_lc.cmdsize + + raise HeaderPadError, @filename if header.class.bytesize + new_sizeofcmds > low_fileoff + + delete_command(old_lc) + insert_command(old_lc.view.offset, new_lc) + end + + # Appends a new load command to the Mach-O. + # @param lc [LoadCommands::LoadCommand] the load command being added + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @return [void] + # @see #insert_command + # @note This is public, but methods like {#add_rpath} should be preferred. + # Setting `repopulate` to false **will leave the instance in an + # inconsistent state** unless {#populate_fields} is called **immediately** + # afterwards. + def add_command(lc, options = {}) + insert_command(header.class.bytesize + sizeofcmds, lc, options) + end + + # Delete a load command from the Mach-O. + # @param lc [LoadCommands::LoadCommand] the load command being deleted + # @param options [Hash] + # @option options [Boolean] :repopulate (true) whether or not to repopulate + # the instance fields + # @return [void] + # @note This is public, but methods like {#delete_rpath} should be preferred. + # Setting `repopulate` to false **will leave the instance in an + # inconsistent state** unless {#populate_fields} is called **immediately** + # afterwards. + def delete_command(lc, options = {}) + @raw_data.slice!(lc.view.offset, lc.cmdsize) + + # update Mach-O header fields to account for deleted load command + update_ncmds(ncmds - 1) + update_sizeofcmds(sizeofcmds - lc.cmdsize) + + # pad the space after the load commands to preserve offsets + @raw_data.insert(header.class.bytesize + sizeofcmds - lc.cmdsize, Utils.nullpad(lc.cmdsize)) + + populate_fields if options.fetch(:repopulate, true) + end + + # Populate the instance's fields with the raw Mach-O data. + # @return [void] + # @note This method is public, but should (almost) never need to be called. + # The exception to this rule is when methods like {#add_command} and + # {#delete_command} have been called with `repopulate = false`. + def populate_fields + @header = populate_mach_header + @load_commands = populate_load_commands + end + + # All load commands responsible for loading dylibs. + # @return [Array<LoadCommands::DylibCommand>] an array of DylibCommands + def dylib_load_commands + load_commands.select { |lc| LoadCommands::DYLIB_LOAD_COMMANDS.include?(lc.type) } + end + + # All segment load commands in the Mach-O. + # @return [Array<LoadCommands::SegmentCommand>] if the Mach-O is 32-bit + # @return [Array<LoadCommands::SegmentCommand64>] if the Mach-O is 64-bit + def segments + if magic32? + command(:LC_SEGMENT) + else + command(:LC_SEGMENT_64) + end + end + + # The segment alignment for the Mach-O. Guesses conservatively. + # @return [Integer] the alignment, as a power of 2 + # @note This is **not** the same as {#alignment}! + # @note See `get_align` and `get_align_64` in `cctools/misc/lipo.c` + def segment_alignment + # special cases: 12 for x86/64/PPC/PP64, 14 for ARM/ARM64 + return 12 if %i[i386 x86_64 ppc ppc64].include?(cputype) + return 14 if %i[arm arm64].include?(cputype) + + cur_align = Sections::MAX_SECT_ALIGN + + segments.each do |segment| + if filetype == :object + # start with the smallest alignment, and work our way up + align = magic32? ? 2 : 3 + segment.sections.each do |section| + align = section.align unless section.align <= align + end + else + align = segment.guess_align + end + cur_align = align if align < cur_align + end + + cur_align + end + + # The Mach-O's dylib ID, or `nil` if not a dylib. + # @example + # file.dylib_id # => 'libBar.dylib' + # @return [String, nil] the Mach-O's dylib ID + def dylib_id + return unless dylib? + + dylib_id_cmd = command(:LC_ID_DYLIB).first + + dylib_id_cmd.name.to_s + end + + # Changes the Mach-O's dylib ID to `new_id`. Does nothing if not a dylib. + # @example + # file.change_dylib_id("libFoo.dylib") + # @param new_id [String] the dylib's new ID + # @param _options [Hash] + # @return [void] + # @raise [ArgumentError] if `new_id` is not a String + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_dylib_id} + def change_dylib_id(new_id, _options = {}) + raise ArgumentError, "new ID must be a String" unless new_id.is_a?(String) + return unless dylib? + + old_lc = command(:LC_ID_DYLIB).first + raise DylibIdMissingError unless old_lc + + new_lc = LoadCommands::LoadCommand.create(:LC_ID_DYLIB, new_id, + old_lc.timestamp, + old_lc.current_version, + old_lc.compatibility_version) + + replace_command(old_lc, new_lc) + end + + alias dylib_id= change_dylib_id + + # All shared libraries linked to the Mach-O. + # @return [Array<String>] an array of all shared libraries + def linked_dylibs + # Some linkers produce multiple `LC_LOAD_DYLIB` load commands for the same + # library, but at this point we're really only interested in a list of + # unique libraries this Mach-O file links to, thus: `uniq`. (This is also + # for consistency with `FatFile` that merges this list across all archs.) + dylib_load_commands.map(&:name).map(&:to_s).uniq + end + + # Changes the shared library `old_name` to `new_name` + # @example + # file.change_install_name("abc.dylib", "def.dylib") + # @param old_name [String] the shared library's old name + # @param new_name [String] the shared library's new name + # @param _options [Hash] + # @return [void] + # @raise [DylibUnknownError] if no shared library has the old name + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_install_name} + def change_install_name(old_name, new_name, _options = {}) + old_lc = dylib_load_commands.find { |d| d.name.to_s == old_name } + raise DylibUnknownError, old_name if old_lc.nil? + + new_lc = LoadCommands::LoadCommand.create(old_lc.type, new_name, + old_lc.timestamp, + old_lc.current_version, + old_lc.compatibility_version) + + replace_command(old_lc, new_lc) + end + + alias change_dylib change_install_name + + # All runtime paths searched by the dynamic linker for the Mach-O. + # @return [Array<String>] an array of all runtime paths + def rpaths + command(:LC_RPATH).map(&:path).map(&:to_s) + end + + # Changes the runtime path `old_path` to `new_path` + # @example + # file.change_rpath("/usr/lib", "/usr/local/lib") + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param _options [Hash] + # @return [void] + # @raise [RpathUnknownError] if no such old runtime path exists + # @raise [RpathExistsError] if the new runtime path already exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#change_rpath} + def change_rpath(old_path, new_path, _options = {}) + old_lc = command(:LC_RPATH).find { |r| r.path.to_s == old_path } + raise RpathUnknownError, old_path if old_lc.nil? + raise RpathExistsError, new_path if rpaths.include?(new_path) + + new_lc = LoadCommands::LoadCommand.create(:LC_RPATH, new_path) + + delete_rpath(old_path) + insert_command(old_lc.view.offset, new_lc) + end + + # Add the given runtime path to the Mach-O. + # @example + # file.rpaths # => ["/lib"] + # file.add_rpath("/usr/lib") + # file.rpaths # => ["/lib", "/usr/lib"] + # @param path [String] the new runtime path + # @param _options [Hash] + # @return [void] + # @raise [RpathExistsError] if the runtime path already exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#add_rpath} + def add_rpath(path, _options = {}) + raise RpathExistsError, path if rpaths.include?(path) + + rpath_cmd = LoadCommands::LoadCommand.create(:LC_RPATH, path) + add_command(rpath_cmd) + end + + # Delete the given runtime path from the Mach-O. + # @example + # file.rpaths # => ["/lib"] + # file.delete_rpath("/lib") + # file.rpaths # => [] + # @param path [String] the runtime path to delete + # @param _options [Hash] + # @return void + # @raise [RpathUnknownError] if no such runtime path exists + # @note `_options` is currently unused and is provided for signature + # compatibility with {MachO::FatFile#delete_rpath} + def delete_rpath(path, _options = {}) + rpath_cmds = command(:LC_RPATH).select { |r| r.path.to_s == path } + raise RpathUnknownError, path if rpath_cmds.empty? + + # delete the commands in reverse order, offset descending. + rpath_cmds.reverse_each { |cmd| delete_command(cmd) } + end + + # Write all Mach-O data to the given filename. + # @param filename [String] the file to write to + # @return [void] + def write(filename) + File.open(filename, "wb") { |f| f.write(@raw_data) } + end + + # Write all Mach-O data to the file used to initialize the instance. + # @return [void] + # @raise [MachOError] if the instance was initialized without a file + # @note Overwrites all data in the file! + def write! + raise MachOError, "no initial file to write to" if @filename.nil? + + File.open(@filename, "wb") { |f| f.write(@raw_data) } + end + + # @return [Hash] a hash representation of this {MachOFile} + def to_h + { + "header" => header.to_h, + "load_commands" => load_commands.map(&:to_h), + } + end + + private + + # The file's Mach-O header structure. + # @return [Headers::MachHeader] if the Mach-O is 32-bit + # @return [Headers::MachHeader64] if the Mach-O is 64-bit + # @raise [TruncatedFileError] if the file is too small to have a valid header + # @api private + def populate_mach_header + # the smallest Mach-O header is 28 bytes + raise TruncatedFileError if @raw_data.size < 28 + + magic = populate_and_check_magic + mh_klass = Utils.magic32?(magic) ? Headers::MachHeader : Headers::MachHeader64 + mh = mh_klass.new_from_bin(endianness, @raw_data[0, mh_klass.bytesize]) + + check_cputype(mh.cputype) + check_cpusubtype(mh.cputype, mh.cpusubtype) + check_filetype(mh.filetype) + + mh + end + + # Read just the file's magic number and check its validity. + # @return [Integer] the magic + # @raise [MagicError] if the magic is not valid Mach-O magic + # @raise [FatBinaryError] if the magic is for a Fat file + # @api private + def populate_and_check_magic + magic = @raw_data[0..3].unpack1("N") + + raise MagicError, magic unless Utils.magic?(magic) + raise FatBinaryError if Utils.fat_magic?(magic) + + @endianness = Utils.little_magic?(magic) ? :little : :big + + magic + end + + # Check the file's CPU type. + # @param cputype [Integer] the CPU type + # @raise [CPUTypeError] if the CPU type is unknown + # @api private + def check_cputype(cputype) + raise CPUTypeError, cputype unless Headers::CPU_TYPES.key?(cputype) + end + + # Check the file's CPU type/subtype pair. + # @param cpusubtype [Integer] the CPU subtype + # @raise [CPUSubtypeError] if the CPU sub-type is unknown + # @api private + def check_cpusubtype(cputype, cpusubtype) + # Only check sub-type w/o capability bits (see `populate_mach_header`). + raise CPUSubtypeError.new(cputype, cpusubtype) unless Headers::CPU_SUBTYPES[cputype].key?(cpusubtype) + end + + # Check the file's type. + # @param filetype [Integer] the file type + # @raise [FiletypeError] if the file type is unknown + # @api private + def check_filetype(filetype) + raise FiletypeError, filetype unless Headers::MH_FILETYPES.key?(filetype) + end + + # All load commands in the file. + # @return [Array<LoadCommands::LoadCommand>] an array of load commands + # @raise [LoadCommandError] if an unknown load command is encountered + # @api private + def populate_load_commands + permissive = options.fetch(:permissive, false) + offset = header.class.bytesize + load_commands = [] + + header.ncmds.times do + fmt = Utils.specialize_format("L=", endianness) + cmd = @raw_data.slice(offset, 4).unpack1(fmt) + cmd_sym = LoadCommands::LOAD_COMMANDS[cmd] + + raise LoadCommandError, cmd unless cmd_sym || permissive + + # If we're here, then either cmd_sym represents a valid load + # command *or* we're in permissive mode. + klass = if (klass_str = LoadCommands::LC_STRUCTURES[cmd_sym]) + LoadCommands.const_get klass_str + else + LoadCommands::LoadCommand + end + + view = MachOView.new(@raw_data, endianness, offset) + command = klass.new_from_bin(view) + + load_commands << command + offset += command.cmdsize + end + + load_commands + end + + # The low file offset (offset to first section data). + # @return [Integer] the offset + # @api private + def low_fileoff + offset = @raw_data.size + + segments.each do |seg| + seg.sections.each do |sect| + next if sect.empty? + next if sect.flag?(:S_ZEROFILL) + next if sect.flag?(:S_THREAD_LOCAL_ZEROFILL) + next unless sect.offset < offset + + offset = sect.offset + end + end + + offset + end + + # Updates the number of load commands in the raw data. + # @param ncmds [Integer] the new number of commands + # @return [void] + # @api private + def update_ncmds(ncmds) + fmt = Utils.specialize_format("L=", endianness) + ncmds_raw = [ncmds].pack(fmt) + @raw_data[16..19] = ncmds_raw + end + + # Updates the size of all load commands in the raw data. + # @param size [Integer] the new size, in bytes + # @return [void] + # @api private + def update_sizeofcmds(size) + fmt = Utils.specialize_format("L=", endianness) + size_raw = [size].pack(fmt) + @raw_data[20..23] = size_raw + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/sections.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/sections.rb new file mode 100644 index 0000000..d53811a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/sections.rb @@ -0,0 +1,207 @@ +# frozen_string_literal: true + +module MachO + # Classes and constants for parsing sections in Mach-O binaries. + module Sections + # type mask + SECTION_TYPE = 0x000000ff + + # attributes mask + SECTION_ATTRIBUTES = 0xffffff00 + + # user settable attributes mask + SECTION_ATTRIBUTES_USR = 0xff000000 + + # system settable attributes mask + SECTION_ATTRIBUTES_SYS = 0x00ffff00 + + # maximum specifiable section alignment, as a power of 2 + # @note see `MAXSECTALIGN` macro in `cctools/misc/lipo.c` + MAX_SECT_ALIGN = 15 + + # association of section flag symbols to values + # @api private + SECTION_FLAGS = { + :S_REGULAR => 0x0, + :S_ZEROFILL => 0x1, + :S_CSTRING_LITERALS => 0x2, + :S_4BYTE_LITERALS => 0x3, + :S_8BYTE_LITERALS => 0x4, + :S_LITERAL_POINTERS => 0x5, + :S_NON_LAZY_SYMBOL_POINTERS => 0x6, + :S_LAZY_SYMBOL_POINTERS => 0x7, + :S_SYMBOL_STUBS => 0x8, + :S_MOD_INIT_FUNC_POINTERS => 0x9, + :S_MOD_TERM_FUNC_POINTERS => 0xa, + :S_COALESCED => 0xb, + :S_GB_ZEROFILE => 0xc, + :S_INTERPOSING => 0xd, + :S_16BYTE_LITERALS => 0xe, + :S_DTRACE_DOF => 0xf, + :S_LAZY_DYLIB_SYMBOL_POINTERS => 0x10, + :S_THREAD_LOCAL_REGULAR => 0x11, + :S_THREAD_LOCAL_ZEROFILL => 0x12, + :S_THREAD_LOCAL_VARIABLES => 0x13, + :S_THREAD_LOCAL_VARIABLE_POINTERS => 0x14, + :S_THREAD_LOCAL_INIT_FUNCTION_POINTERS => 0x15, + :S_ATTR_PURE_INSTRUCTIONS => 0x80000000, + :S_ATTR_NO_TOC => 0x40000000, + :S_ATTR_STRIP_STATIC_SYMS => 0x20000000, + :S_ATTR_NO_DEAD_STRIP => 0x10000000, + :S_ATTR_LIVE_SUPPORT => 0x08000000, + :S_ATTR_SELF_MODIFYING_CODE => 0x04000000, + :S_ATTR_DEBUG => 0x02000000, + :S_ATTR_SOME_INSTRUCTIONS => 0x00000400, + :S_ATTR_EXT_RELOC => 0x00000200, + :S_ATTR_LOC_RELOC => 0x00000100, + }.freeze + + # association of section name symbols to names + # @api private + SECTION_NAMES = { + :SECT_TEXT => "__text", + :SECT_FVMLIB_INIT0 => "__fvmlib_init0", + :SECT_FVMLIB_INIT1 => "__fvmlib_init1", + :SECT_DATA => "__data", + :SECT_BSS => "__bss", + :SECT_COMMON => "__common", + :SECT_OBJC_SYMBOLS => "__symbol_table", + :SECT_OBJC_MODULES => "__module_info", + :SECT_OBJC_STRINGS => "__selector_strs", + :SECT_OBJC_REFS => "__selector_refs", + :SECT_ICON_HEADER => "__header", + :SECT_ICON_TIFF => "__tiff", + }.freeze + + # Represents a section of a segment for 32-bit architectures. + class Section < MachOStructure + # @return [String] the name of the section, including null pad bytes + attr_reader :sectname + + # @return [String] the name of the segment's section, including null + # pad bytes + attr_reader :segname + + # @return [Integer] the memory address of the section + attr_reader :addr + + # @return [Integer] the size, in bytes, of the section + attr_reader :size + + # @return [Integer] the file offset of the section + attr_reader :offset + + # @return [Integer] the section alignment (power of 2) of the section + attr_reader :align + + # @return [Integer] the file offset of the section's relocation entries + attr_reader :reloff + + # @return [Integer] the number of relocation entries + attr_reader :nreloc + + # @return [Integer] flags for type and attributes of the section + attr_reader :flags + + # @return [void] reserved (for offset or index) + attr_reader :reserved1 + + # @return [void] reserved (for count or sizeof) + attr_reader :reserved2 + + # @see MachOStructure::FORMAT + FORMAT = "Z16Z16L=9" + + # @see MachOStructure::SIZEOF + SIZEOF = 68 + + # @api private + def initialize(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2) + super() + @sectname = sectname + @segname = segname + @addr = addr + @size = size + @offset = offset + @align = align + @reloff = reloff + @nreloc = nreloc + @flags = flags + @reserved1 = reserved1 + @reserved2 = reserved2 + end + + # @return [String] the section's name + def section_name + sectname + end + + # @return [String] the parent segment's name + def segment_name + segname + end + + # @return [Boolean] whether the section is empty (i.e, {size} is 0) + def empty? + size.zero? + end + + # @example + # puts "this section is regular" if sect.flag?(:S_REGULAR) + # @param flag [Symbol] a section flag symbol + # @return [Boolean] whether the flag is present in the section's {flags} + def flag?(flag) + flag = SECTION_FLAGS[flag] + + return false if flag.nil? + + flags & flag == flag + end + + # @return [Hash] a hash representation of this {Section} + def to_h + { + "sectname" => sectname, + "segname" => segname, + "addr" => addr, + "size" => size, + "offset" => offset, + "align" => align, + "reloff" => reloff, + "nreloc" => nreloc, + "flags" => flags, + "reserved1" => reserved1, + "reserved2" => reserved2, + }.merge super + end + end + + # Represents a section of a segment for 64-bit architectures. + class Section64 < Section + # @return [void] reserved + attr_reader :reserved3 + + # @see MachOStructure::FORMAT + FORMAT = "Z16Z16Q=2L=8" + + # @see MachOStructure::SIZEOF + SIZEOF = 80 + + # @api private + def initialize(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2, reserved3) + super(sectname, segname, addr, size, offset, align, reloff, + nreloc, flags, reserved1, reserved2) + @reserved3 = reserved3 + end + + # @return [Hash] a hash representation of this {Section64} + def to_h + { + "reserved3" => reserved3, + }.merge super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/structure.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/structure.rb new file mode 100644 index 0000000..0356d60 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/structure.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module MachO + # A general purpose pseudo-structure. + # @abstract + class MachOStructure + # The String#unpack format of the data structure. + # @return [String] the unpacking format + # @api private + FORMAT = "" + + # The size of the data structure, in bytes. + # @return [Integer] the size, in bytes + # @api private + SIZEOF = 0 + + # @return [Integer] the size, in bytes, of the represented structure. + def self.bytesize + self::SIZEOF + end + + # @param endianness [Symbol] either `:big` or `:little` + # @param bin [String] the string to be unpacked into the new structure + # @return [MachO::MachOStructure] the resulting structure + # @api private + def self.new_from_bin(endianness, bin) + format = Utils.specialize_format(self::FORMAT, endianness) + + new(*bin.unpack(format)) + end + + # @return [Hash] a hash representation of this {MachOStructure}. + def to_h + { + "structure" => { + "format" => self.class::FORMAT, + "bytesize" => self.class.bytesize, + }, + } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/tools.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/tools.rb new file mode 100644 index 0000000..2bb05fd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/tools.rb @@ -0,0 +1,111 @@ +# frozen_string_literal: true + +module MachO + # A collection of convenient methods for common operations on Mach-O and Fat + # binaries. + module Tools + # @param filename [String] the Mach-O or Fat binary being read + # @return [Array<String>] an array of all dylibs linked to the binary + def self.dylibs(filename) + file = MachO.open(filename) + + file.linked_dylibs + end + + # Changes the dylib ID of a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param new_id [String] the new dylib ID for the binary + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_dylib_id(filename, new_id, options = {}) + file = MachO.open(filename) + + file.change_dylib_id(new_id, options) + file.write! + end + + # Changes a shared library install name in a Mach-O or Fat binary, + # overwriting the source file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_name [String] the old shared library name + # @param new_name [String] the new shared library name + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_install_name(filename, old_name, new_name, options = {}) + file = MachO.open(filename) + + file.change_install_name(old_name, new_name, options) + file.write! + end + + # Changes a runtime path in a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_path [String] the old runtime path + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.change_rpath(filename, old_path, new_path, options = {}) + file = MachO.open(filename) + + file.change_rpath(old_path, new_path, options) + file.write! + end + + # Add a runtime path to a Mach-O or Fat binary, overwriting the source file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param new_path [String] the new runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.add_rpath(filename, new_path, options = {}) + file = MachO.open(filename) + + file.add_rpath(new_path, options) + file.write! + end + + # Delete a runtime path from a Mach-O or Fat binary, overwriting the source + # file. + # @param filename [String] the Mach-O or Fat binary being modified + # @param old_path [String] the old runtime path + # @param options [Hash] + # @option options [Boolean] :strict (true) whether or not to fail loudly + # with an exception if the change cannot be performed + # @return [void] + def self.delete_rpath(filename, old_path, options = {}) + file = MachO.open(filename) + + file.delete_rpath(old_path, options) + file.write! + end + + # Merge multiple Mach-Os into one universal (Fat) binary. + # @param filename [String] the fat binary to create + # @param files [Array<String>] the files to merge + # @param fat64 [Boolean] whether to use {Headers::FatArch64}s to represent each slice + # @return [void] + def self.merge_machos(filename, *files, fat64: false) + machos = files.map do |file| + macho = MachO.open(file) + case macho + when MachO::MachOFile + macho + else + macho.machos + end + end.flatten + + fat_macho = MachO::FatFile.new_from_machos(*machos, :fat64 => fat64) + fat_macho.write(filename) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/utils.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/utils.rb new file mode 100644 index 0000000..362e4db --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/utils.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +module MachO + # A collection of utility functions used throughout ruby-macho. + module Utils + # Rounds a value to the next multiple of the given round. + # @param value [Integer] the number being rounded + # @param round [Integer] the number being rounded with + # @return [Integer] the rounded value + # @see http://www.opensource.apple.com/source/cctools/cctools-870/libstuff/rnd.c + def self.round(value, round) + round -= 1 + value += round + value &= ~round + value + end + + # Returns the number of bytes needed to pad the given size to the given + # alignment. + # @param size [Integer] the unpadded size + # @param alignment [Integer] the number to alignment the size with + # @return [Integer] the number of pad bytes required + def self.padding_for(size, alignment) + round(size, alignment) - size + end + + # Returns a string of null bytes of the requested (non-negative) size + # @param size [Integer] the size of the nullpad + # @return [String] the null string (or empty string, for `size = 0`) + # @raise [ArgumentError] if a non-positive nullpad is requested + def self.nullpad(size) + raise ArgumentError, "size < 0: #{size}" if size.negative? + + "\x00" * size + end + + # Converts an abstract (native-endian) String#unpack format to big or + # little. + # @param format [String] the format string being converted + # @param endianness [Symbol] either `:big` or `:little` + # @return [String] the converted string + def self.specialize_format(format, endianness) + modifier = endianness == :big ? ">" : "<" + format.tr("=", modifier) + end + + # Packs tagged strings into an aligned payload. + # @param fixed_offset [Integer] the baseline offset for the first packed + # string + # @param alignment [Integer] the alignment value to use for packing + # @param strings [Hash] the labeled strings to pack + # @return [Array<String, Hash>] the packed string and labeled offsets + def self.pack_strings(fixed_offset, alignment, strings = {}) + offsets = {} + next_offset = fixed_offset + payload = +"" + + strings.each do |key, string| + offsets[key] = next_offset + payload << string + payload << Utils.nullpad(1) + next_offset += string.bytesize + 1 + end + + payload << Utils.nullpad(padding_for(fixed_offset + payload.bytesize, alignment)) + [payload.freeze, offsets] + end + + # Compares the given number to valid Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid Mach-O magic number + def self.magic?(num) + Headers::MH_MAGICS.key?(num) + end + + # Compares the given number to valid Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid Fat magic number + def self.fat_magic?(num) + [Headers::FAT_MAGIC, Headers::FAT_MAGIC_64].include? num + end + + # Compares the given number to valid 32-bit Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 32-bit fat magic number + def self.fat_magic32?(num) + num == Headers::FAT_MAGIC + end + + # Compares the given number to valid 64-bit Fat magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 64-bit fat magic number + def self.fat_magic64?(num) + num == Headers::FAT_MAGIC_64 + end + + # Compares the given number to valid 32-bit Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 32-bit magic number + def self.magic32?(num) + [Headers::MH_MAGIC, Headers::MH_CIGAM].include? num + end + + # Compares the given number to valid 64-bit Mach-O magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid 64-bit magic number + def self.magic64?(num) + [Headers::MH_MAGIC_64, Headers::MH_CIGAM_64].include? num + end + + # Compares the given number to valid little-endian magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid little-endian magic number + def self.little_magic?(num) + [Headers::MH_CIGAM, Headers::MH_CIGAM_64].include? num + end + + # Compares the given number to valid big-endian magic numbers. + # @param num [Integer] the number being checked + # @return [Boolean] whether `num` is a valid big-endian magic number + def self.big_magic?(num) + [Headers::MH_MAGIC, Headers::MH_MAGIC_64].include? num + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/view.rb b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/view.rb new file mode 100644 index 0000000..ad68ee3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/ruby-macho-2.5.1/lib/macho/view.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module MachO + # A representation of some unspecified Mach-O data. + class MachOView + # @return [String] the raw Mach-O data + attr_reader :raw_data + + # @return [Symbol] the endianness of the data (`:big` or `:little`) + attr_reader :endianness + + # @return [Integer] the offset of the relevant data (in {#raw_data}) + attr_reader :offset + + # Creates a new MachOView. + # @param raw_data [String] the raw Mach-O data + # @param endianness [Symbol] the endianness of the data + # @param offset [Integer] the offset of the relevant data + def initialize(raw_data, endianness, offset) + @raw_data = raw_data + @endianness = endianness + @offset = offset + end + + # @return [Hash] a hash representation of this {MachOView}. + def to_h + { + "endianness" => endianness, + "offset" => offset, + } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.rspec b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.rspec new file mode 100644 index 0000000..20f9c85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.rspec @@ -0,0 +1,2 @@ +--require spec_helper +--format progress diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.travis.yml new file mode 100644 index 0000000..5137aef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.travis.yml @@ -0,0 +1,46 @@ +dist: trusty +sudo: required +language: ruby +rvm: + - 2.4.0 + - 2.3.3 + - 2.2.6 + - 2.1.9 + - 2.0.0 + - 1.9.3 + - ruby-head + - jruby-9.1.5.0 + - jruby-1.7.26 + - jruby-head + - rubinius-3 + - rubinius-2 +jdk: + - openjdk7 + - oraclejdk8 +matrix: + exclude: + - rvm: 2.4.0 + jdk: oraclejdk8 + - rvm: 2.3.3 + jdk: oraclejdk8 + - rvm: 2.2.6 + jdk: oraclejdk8 + - rvm: 2.1.9 + jdk: oraclejdk8 + - rvm: 2.0.0 + jdk: oraclejdk8 + - rvm: 1.9.3 + jdk: oraclejdk8 + - rvm: ruby-head + jdk: oraclejdk8 + - rvm: rubinius-3 + jdk: oraclejdk8 + - rvm: rubinius-2 + jdk: oraclejdk8 + allow_failures: + - rvm: 1.9.3 + - rvm: ruby-head + - rvm: jruby-1.7.26 + - rvm: jruby-head + - rvm: rubinius-2 +script: "bundle exec rake test" diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.yardopts b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.yardopts new file mode 100644 index 0000000..ee7982a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/.yardopts @@ -0,0 +1,13 @@ +--protected +--no-private +--embed-mixins +--output-dir ./yardoc +--markup markdown +--title=Concurrent Ruby +--template default +--template-path ./yard-template + +./lib/**/*.rb +- +README.md +LICENSE diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Gemfile b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Gemfile new file mode 100644 index 0000000..d73c1f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Gemfile @@ -0,0 +1,20 @@ +source 'https://rubygems.org' + +gemspec + +group :development, :test do + gem 'rspec', '~> 3.2.0' + gem 'simplecov', '~> 0.9.2', :require => false + if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('2.0.0') + gem 'term-ansicolor', '~> 1.3.2', :require => false + gem 'tins', '~> 1.6.0', :require => false + end + gem 'coveralls', '~> 0.7.11', :require => false +end + +group :documentation do + gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false + gem 'yard', '~> 0.8.7.6', :require => false + gem 'inch', '~> 0.5.10', :platforms => :mri, :require => false + gem 'redcarpet', '~> 3.2.2', platforms: :mri # understands github markdown +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/LICENSE b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/LICENSE new file mode 100644 index 0000000..5336718 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/LICENSE @@ -0,0 +1,144 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as +defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that +is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that +control, are controlled by, or are under common control with that entity. For the purposes +of this definition, "control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or otherwise, or (ii) ownership +of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of +such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by +this License. + +"Source" form shall mean the preferred form for making modifications, including but not +limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of +a Source form, including but not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available +under the License, as indicated by a copyright notice that is included in or attached to the +work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on +(or derived from) the Work and for which the editorial revisions, annotations, elaborations, +or other modifications represent, as a whole, an original work of authorship. For the +purposes of this License, Derivative Works shall not include works that remain separable +from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works +thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work +and any modifications or additions to that Work or Derivative Works thereof, that is +intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by +an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, verbal, or written +communication sent to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and issue tracking +systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and +improving the Work, but excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a +Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each +Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such Derivative +Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each +Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license +applies only to those patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent litigation against +any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or +a Contribution incorporated within the Work constitutes direct or contributory patent +infringement, then any patent licenses granted to You under this License for that Work shall +terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works +thereof in any medium, with or without modifications, and in Source or Object form, provided +that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; +and + +You must cause any modified files to carry prominent notices stating that You changed the +files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all +copyright, patent, trademark, and attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative +Works that You distribute must include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not pertain to any part of the +Derivative Works, in at least one of the following places: within a NOTICE text file +distributed as part of the Derivative Works; within the Source form or documentation, if +provided along with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of the NOTICE +file are for informational purposes only and do not modify the License. You may add Your own +attribution notices within Derivative Works that You distribute, alongside or as an addendum +to the NOTICE text from the Work, provided that such additional attribution notices cannot +be construed as modifying the License. You may add Your own copyright statement to Your +modifications and may provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution +intentionally submitted for inclusion in the Work by You to the Licensor shall be under the +terms and conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of any +separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for reasonable and +customary use in describing the origin of the Work and reproducing the content of the NOTICE +file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, +Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, +without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, +MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for +determining the appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort +(including negligence), contract, or otherwise, unless required by applicable law (such as +deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, or +consequential damages of any character arising as a result of this License or out of the use +or inability to use the Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all other commercial damages or +losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative +Works thereof, You may choose to offer, and charge a fee for, acceptance of support, +warranty, indemnity, or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only on Your own behalf and on +Your sole responsibility, not on behalf of any other Contributor, and only if You agree to +indemnify, defend, and hold each Contributor harmless for any liability incurred by, or +claims asserted against, such Contributor by reason of your accepting any such warranty or +additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/README.md b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/README.md new file mode 100644 index 0000000..535a791 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/README.md @@ -0,0 +1,60 @@ +# Threadsafe (Inactive, code moved to concurrent-ruby gem and repo.) + +[![Gem Version](https://badge.fury.io/rb/thread_safe.svg)](http://badge.fury.io/rb/thread_safe) [![Build Status](https://travis-ci.org/ruby-concurrency/thread_safe.svg?branch=master)](https://travis-ci.org/ruby-concurrency/thread_safe) [![Coverage Status](https://img.shields.io/coveralls/ruby-concurrency/thread_safe/master.svg)](https://coveralls.io/r/ruby-concurrency/thread_safe) [![Code Climate](https://codeclimate.com/github/ruby-concurrency/thread_safe.svg)](https://codeclimate.com/github/ruby-concurrency/thread_safe) [![Dependency Status](https://gemnasium.com/ruby-concurrency/thread_safe.svg)](https://gemnasium.com/ruby-concurrency/thread_safe) [![License](https://img.shields.io/badge/license-apache-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +A collection of thread-safe versions of common core Ruby classes. + +__This code base is now part of the concurrent-ruby gem +at https://github.com/ruby-concurrency/concurrent-ruby. +The code in this repository is no longer maintained.__ + +## Installation + +Add this line to your application's Gemfile: + + gem 'thread_safe' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install thread_safe + +## Usage + +```ruby +require 'thread_safe' + +sa = ThreadSafe::Array.new # supports standard Array.new forms +sh = ThreadSafe::Hash.new # supports standard Hash.new forms +``` + +`ThreadSafe::Cache` also exists, as a hash-like object, and should have +much better performance characteristics esp. under high concurrency than +`ThreadSafe::Hash`. However, `ThreadSafe::Cache` is not strictly semantically +equivalent to a ruby `Hash` -- for instance, it does not necessarily retain +ordering by insertion time as `Hash` does. For most uses it should do fine +though, and we recommend you consider `ThreadSafe::Cache` instead of +`ThreadSafe::Hash` for your concurrency-safe hash needs. It understands some +options when created (depending on your ruby platform) that control some of the +internals - when unsure just leave them out: + + +```ruby +require 'thread_safe' + +cache = ThreadSafe::Cache.new +``` + +## Contributing + +1. Fork it +2. Clone it (`git clone git@github.com:you/thread_safe.git`) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Build the jar (`rake jar`) NOTE: Requires JRuby +5. Install dependencies (`bundle install`) +6. Commit your changes (`git commit -am 'Added some feature'`) +7. Push to the branch (`git push origin my-new-feature`) +8. Create new Pull Request diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Rakefile b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Rakefile new file mode 100644 index 0000000..3559a98 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/Rakefile @@ -0,0 +1,62 @@ +require 'bundler/gem_tasks' +require 'rspec' +require 'rspec/core/rake_task' + +## safely load all the rake tasks in the `tasks` directory +def safe_load(file) + begin + load file + rescue LoadError => ex + puts "Error loading rake tasks from '#{file}' but will continue..." + puts ex.message + end +end + +Dir.glob('tasks/**/*.rake').each do |rakefile| + safe_load rakefile +end + +task :default => :test + +if defined?(JRUBY_VERSION) + require 'ant' + + directory 'pkg/classes' + directory 'pkg/tests' + + desc "Clean up build artifacts" + task :clean do + rm_rf "pkg/classes" + rm_rf "pkg/tests" + rm_rf "lib/thread_safe/jruby_cache_backend.jar" + end + + desc "Compile the extension" + task :compile => "pkg/classes" do |t| + ant.javac :srcdir => "ext", :destdir => t.prerequisites.first, + :source => "1.5", :target => "1.5", :debug => true, + :classpath => "${java.class.path}:${sun.boot.class.path}" + end + + desc "Build the jar" + task :jar => :compile do + ant.jar :basedir => "pkg/classes", :destfile => "lib/thread_safe/jruby_cache_backend.jar", :includes => "**/*.class" + end + + desc "Build test jar" + task 'test-jar' => 'pkg/tests' do |t| + ant.javac :srcdir => 'spec/src', :destdir => t.prerequisites.first, + :source => "1.5", :target => "1.5", :debug => true + + ant.jar :basedir => 'pkg/tests', :destfile => 'spec/package.jar', :includes => '**/*.class' + end + + task :package => [ :clean, :compile, :jar, 'test-jar' ] +else + # No need to package anything for non-jruby rubies + task :package +end + +RSpec::Core::RakeTask.new :test => :package do |t| + t.rspec_opts = '--color --backtrace --tag ~unfinished --seed 1 --format documentation ./spec' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/examples/bench_cache.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/examples/bench_cache.rb new file mode 100755 index 0000000..14171c9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/examples/bench_cache.rb @@ -0,0 +1,35 @@ +#!/usr/bin/env ruby -wKU + +require "benchmark" +require "thread_safe" + +hash = {} +cache = ThreadSafe::Cache.new + +ENTRIES = 10_000 + +ENTRIES.times do |i| + hash[i] = i + cache[i] = i +end + +TESTS = 40_000_000 +Benchmark.bmbm do |results| + key = rand(10_000) + + results.report('Hash#[]') do + TESTS.times { hash[key] } + end + + results.report('Cache#[]') do + TESTS.times { cache[key] } + end + + results.report('Hash#each_pair') do + (TESTS / ENTRIES).times { hash.each_pair {|k,v| v} } + end + + results.report('Cache#each_pair') do + (TESTS / ENTRIES).times { cache.each_pair {|k,v| v} } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java new file mode 100644 index 0000000..97f9d3f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java @@ -0,0 +1,245 @@ +package org.jruby.ext.thread_safe; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8; +import org.jruby.ext.thread_safe.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyCacheBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyClass jrubyRefClass = runtime.defineClassUnder("JRubyCacheBackend", runtime.getObject(), BACKEND_ALLOCATOR, runtime.getModule("ThreadSafe")); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyCacheBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyCacheBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyCacheBackend", parent="Object") + public static class JRubyCacheBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap<IRubyObject, IRubyObject> map; + + private static ConcurrentHashMap<IRubyObject, IRubyObject> newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8<IRubyObject, IRubyObject>(initialCapacity, loadFactor); + } else { + return new org.jruby.ext.thread_safe.jsr166e.nounsafe.ConcurrentHashMapV8<IRubyObject, IRubyObject>(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap<IRubyObject, IRubyObject> newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyCacheBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap<IRubyObject, IRubyObject> toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun<IRubyObject, IRubyObject>() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun<IRubyObject, IRubyObject, IRubyObject>() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry<IRubyObject,IRubyObject> entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyCacheBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..90ae1a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package org.jruby.ext.thread_safe.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap<K, V> { + /** Interface describing a function of one argument */ + public interface Fun<A,T> { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun<A,B,T> { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun<? super K, ? extends V> mf); + public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> mf); + public V compute(K key, BiFun<? super K, ? super V, ? extends V> mf); + public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set<Map.Entry<K,V>> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..c85b989 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package org.jruby.ext.thread_safe.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do <em>not</em> entail locking, + * and there is <em>not</em> any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + * <p>Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently <em>completed</em> update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * <em>happens-before</em> relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do <em>not</em> throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + * <p>The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + * <p>This class and its views and iterators implement all of the + * <em>optional</em> methods of the {@link Map} and {@link Iterator} + * interfaces. + * + * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class + * does <em>not</em> allow {@code null} to be used as a key or value. + * + * <p>ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + * <ul> + * <li> forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.</li> + * + * <li> search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.</li> + * + * <li> reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + * <ul> + * + * <li> Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)</li> + * + * <li> Mapped reductions that accumulate the results of a given + * function applied to each element.</li> + * + * <li> Reductions to scalar doubles, longs, and ints, using a + * given basis value.</li> + * + * </li> + * </ul> + * </ul> + * + * <p>The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + * <p>Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + * <p>Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + * <p>Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + * <p>Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + * <p>All arguments to all task methods must be non-null. + * + * <p><em>jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8.</em> + * + * <p>This class is a member of the + * <a href="{@docRoot}/../technotes/guides/collections/index.html"> + * Java Collections Framework</a>. + * + * @since 1.5 + * @author Doug Lea + * @param <K> the type of keys maintained by this map + * @param <V> the type of mapped values + */ +public class ConcurrentHashMapV8<K, V> + implements ConcurrentMap<K, V>, Serializable, ConcurrentHashMap<K, V> { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + * <p>This interface exports a subset of expected JDK8 + * functionality. + * + * <p>Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + * <pre> + * {@code ConcurrentHashMapV8<String, Long> m = ... + * // split as if have 8 * parallelism, for load balance + * int n = m.size(); + * int p = aForkJoinPool.getParallelism() * 8; + * int split = (n < p)? n : p; + * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null)); + * // ... + * static class SumValues extends RecursiveTask<Long> { + * final Spliterator<Long> s; + * final int split; // split while > 1 + * final SumValues nextJoin; // records forked subtasks to join + * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) { + * this.s = s; this.depth = depth; this.nextJoin = nextJoin; + * } + * public Long compute() { + * long sum = 0; + * SumValues subtasks = null; // fork subtasks + * for (int s = split >>> 1; s > 0; s >>>= 1) + * (subtasks = new SumValues(s.split(), s, subtasks)).fork(); + * while (s.hasNext()) // directly process remaining elements + * sum += s.next(); + * for (SumValues t = subtasks; t != null; t = t.nextJoin) + * sum += t.join(); // collect subtask results + * return sum; + * } + * } + * }</pre> + */ + public static interface Spliterator<T> extends Iterator<T> { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will <em>not</em> produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator<T> split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView<K,V> keySet; + private transient ValuesView<K,V> values; + private transient EntrySetView<K,V> entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment<K,V>[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE); + } + + private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) { + return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v); + } + + private static final void setTabAt(Node[] tab, int i, Node v) { + UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class<?> k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable<T> + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun<? super K, ?> mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map<?, ?> m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry<?, ?> entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser<K,V,R> { + final ConcurrentHashMapV8<K, V> map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8<K, V> map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser<K,V,?> it) { + ConcurrentHashMapV8<K, V> m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8<K, V> m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static <K> KeySetView<K,Boolean> newKeySet() { + return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { + return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + * <p>More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + * <p>The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map<? extends K, ? extends V> m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + * <pre> {@code + * if (map.containsKey(key)) + * return map.get(key); + * value = mappingFunction.apply(key); + * if (value != null) + * map.put(key, value); + * return value;}</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + * <pre> {@code + * map.computeIfAbsent(key, new Fun<K, V>() { + * public V map(K k) { return new Value(f(k)); }});}</pre> + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun<? super K, ? extends V> mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + * <pre> {@code + * if (map.containsKey(key)) { + * value = remappingFunction.apply(key, map.get(key)); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * } + * }</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + * <pre> {@code + * value = remappingFunction.apply(key, map.get(key)); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * }</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * <pre> {@code + * Map<Key, String> map = ...; + * final String msg = ...; + * map.compute(key, new BiFun<Key, String, String>() { + * public String apply(Key k, String v) { + * return (v == null) ? msg : v + msg;});}}</pre> + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + * <pre> {@code + * if (!map.containsKey(key)) + * map.put(value); + * else { + * newValue = remappingFunction.apply(map.get(key), value); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * } + * }</pre> + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView<K,V> keySet() { + KeySetView<K,V> ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView<K,V> keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView<K,V>(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView<K,V> values() { + ValuesView<K,V> vs = values; + return (vs != null) ? vs : (values = new ValuesView<K,V>(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + * <p>The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set<Map.Entry<K,V>> entrySet() { + EntrySetView<K,V> es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration<K> keys() { + return new KeyIterator<K,V>(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration<V> elements() { + return new ValueIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator<K> keySpliterator() { + return new KeyIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator<V> valueSpliterator() { + return new ValueIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator<Map.Entry<K,V>> entrySpliterator() { + return new EntryIterator<K,V>(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map<?,?> m = (Map<?,?>) o; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry<?,?> e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<K>, Enumeration<K> { + KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + KeyIterator(Traverser<K,V,Object> it) { + super(it); + } + public KeyIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator<K,V>(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<V>, Enumeration<V> { + ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + ValueIterator(Traverser<K,V,Object> it) { + super(it); + } + public ValueIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator<K,V>(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<Map.Entry<K,V>> { + EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + EntryIterator(Traverser<K,V,Object> it) { + super(it); + } + public EntryIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator<K,V>(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry<K,V>((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry<K,V> implements Map.Entry<K, V> { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8<K, V> map; + MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment<K,V> implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment<K,V>[]) + new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment<K,V>(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action<A> { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction<A,B> { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator<T> { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble<A> { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong<A> { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt<A> {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView<K, V> { + final ConcurrentHashMapV8<K, V> map; + CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8<K,V> getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator<?> iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator<?> it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator<?> it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator<?> it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator<?> it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection<?> c) { + if (c != this) { + for (Iterator<?> it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection<?> c) { + boolean modified = false; + for (Iterator<?> it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection<?> c) { + boolean modified = false; + for (Iterator<?> it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator<K> iterator() { return new KeyIterator<K,V>(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection<? extends K> c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + * <p>The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView<K,V> extends CHMView<K,V> + implements Collection<V> { + ValuesView(ConcurrentHashMapV8<K, V> map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator<V> it = new ValueIterator<K,V>(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator<V> iterator() { + return new ValueIterator<K,V>(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection<? extends V> c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView<K,V> extends CHMView<K,V> + implements Set<Map.Entry<K,V>> { + EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator<Map.Entry<K,V>> iterator() { + return new EntryIterator<K,V>(map); + } + + public final boolean add(Entry<K,V> e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection<? extends Entry<K,V>> c) { + boolean added = false; + for (Entry<K,V> e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class<?> k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class<?> sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction<sun.misc.Unsafe>() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java new file mode 100644 index 0000000..22d0cbc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package org.jruby.ext.thread_safe.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + * <p>This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + * <p>This class extends {@link Number}, but does <em>not</em> define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + * <p><em>jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic.</em> + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is <em>NOT</em> an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * <em>not</em> guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java new file mode 100644 index 0000000..8651cdc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package org.jruby.ext.thread_safe.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class<?> ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal<HashCode> { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class<?> sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction<sun.misc.Unsafe>() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..4e1e33a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; +import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do <em>not</em> entail locking, + * and there is <em>not</em> any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + * <p>Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently <em>completed</em> update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * <em>happens-before</em> relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do <em>not</em> throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + * <p>The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + * <p>This class and its views and iterators implement all of the + * <em>optional</em> methods of the {@link Map} and {@link Iterator} + * interfaces. + * + * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class + * does <em>not</em> allow {@code null} to be used as a key or value. + * + * <p>ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + * <ul> + * <li> forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.</li> + * + * <li> search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.</li> + * + * <li> reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + * <ul> + * + * <li> Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)</li> + * + * <li> Mapped reductions that accumulate the results of a given + * function applied to each element.</li> + * + * <li> Reductions to scalar doubles, longs, and ints, using a + * given basis value.</li> + * + * </li> + * </ul> + * </ul> + * + * <p>The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + * <p>Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + * <p>Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + * <p>Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + * <p>Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + * <p>All arguments to all task methods must be non-null. + * + * <p><em>jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8.</em> + * + * <p>This class is a member of the + * <a href="{@docRoot}/../technotes/guides/collections/index.html"> + * Java Collections Framework</a>. + * + * @since 1.5 + * @author Doug Lea + * @param <K> the type of keys maintained by this map + * @param <V> the type of mapped values + */ +public class ConcurrentHashMapV8<K, V> + implements ConcurrentMap<K, V>, Serializable, ConcurrentHashMap<K, V> { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + * <p>This interface exports a subset of expected JDK8 + * functionality. + * + * <p>Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + * <pre> + * {@code ConcurrentHashMapV8<String, Long> m = ... + * // split as if have 8 * parallelism, for load balance + * int n = m.size(); + * int p = aForkJoinPool.getParallelism() * 8; + * int split = (n < p)? n : p; + * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null)); + * // ... + * static class SumValues extends RecursiveTask<Long> { + * final Spliterator<Long> s; + * final int split; // split while > 1 + * final SumValues nextJoin; // records forked subtasks to join + * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) { + * this.s = s; this.depth = depth; this.nextJoin = nextJoin; + * } + * public Long compute() { + * long sum = 0; + * SumValues subtasks = null; // fork subtasks + * for (int s = split >>> 1; s > 0; s >>>= 1) + * (subtasks = new SumValues(s.split(), s, subtasks)).fork(); + * while (s.hasNext()) // directly process remaining elements + * sum += s.next(); + * for (SumValues t = subtasks; t != null; t = t.nextJoin) + * sum += t.join(); // collect subtask results + * return sum; + * } + * } + * }</pre> + */ + public static interface Spliterator<T> extends Iterator<T> { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will <em>not</em> produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator<T> split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray<Node> table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView<K,V> keySet; + private transient ValuesView<K,V> values; + private transient EntrySetView<K,V> entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment<K,V>[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray<Node> tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray<Node> tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray<Node> tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray<Node> tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable<T> + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray<Node> tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray<Node> tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray<Node>)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray<Node> tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray<Node> tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray<Node> tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun<? super K, ?> mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray<Node> tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray<Node> tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray<Node> tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map<?, ?> m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry<?, ?> entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray<Node> tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray<Node> initTable() { + AtomicReferenceArray<Node> tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray<Node>(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray<Node> tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray<Node> tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray<Node>(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray<Node> rebuild(AtomicReferenceArray<Node> tab) { + int n = tab.length(); + AtomicReferenceArray<Node> nextTab = new AtomicReferenceArray<Node>(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray<Node> nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray<Node> nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray<Node> tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray<Node>)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser<K,V,R> { + final ConcurrentHashMapV8<K, V> map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray<Node> tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8<K, V> map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser<K,V,?> it) { + ConcurrentHashMapV8<K, V> m; AtomicReferenceArray<Node> t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8<K, V> m; + AtomicReferenceArray<Node> t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray<Node>)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static <K> KeySetView<K,Boolean> newKeySet() { + return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { + return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + * <p>More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + * <p>The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map<? extends K, ? extends V> m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + * <pre> {@code + * if (map.containsKey(key)) + * return map.get(key); + * value = mappingFunction.apply(key); + * if (value != null) + * map.put(key, value); + * return value;}</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + * <pre> {@code + * map.computeIfAbsent(key, new Fun<K, V>() { + * public V map(K k) { return new Value(f(k)); }});}</pre> + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun<? super K, ? extends V> mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + * <pre> {@code + * if (map.containsKey(key)) { + * value = remappingFunction.apply(key, map.get(key)); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * } + * }</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + * <pre> {@code + * value = remappingFunction.apply(key, map.get(key)); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * }</pre> + * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * <pre> {@code + * Map<Key, String> map = ...; + * final String msg = ...; + * map.compute(key, new BiFun<Key, String, String>() { + * public String apply(Key k, String v) { + * return (v == null) ? msg : v + msg;});}}</pre> + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + * <pre> {@code + * if (!map.containsKey(key)) + * map.put(value); + * else { + * newValue = remappingFunction.apply(map.get(key), value); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * } + * }</pre> + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView<K,V> keySet() { + KeySetView<K,V> ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView<K,V> keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView<K,V>(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView<K,V> values() { + ValuesView<K,V> vs = values; + return (vs != null) ? vs : (values = new ValuesView<K,V>(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + * <p>The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set<Map.Entry<K,V>> entrySet() { + EntrySetView<K,V> es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration<K> keys() { + return new KeyIterator<K,V>(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration<V> elements() { + return new ValueIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator<K> keySpliterator() { + return new KeyIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator<V> valueSpliterator() { + return new ValueIterator<K,V>(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator<Map.Entry<K,V>> entrySpliterator() { + return new EntryIterator<K,V>(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map<?,?> m = (Map<?,?>) o; + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry<?,?> e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<K>, Enumeration<K> { + KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + KeyIterator(Traverser<K,V,Object> it) { + super(it); + } + public KeyIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator<K,V>(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<V>, Enumeration<V> { + ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + ValueIterator(Traverser<K,V,Object> it) { + super(it); + } + public ValueIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator<K,V>(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object> + implements Spliterator<Map.Entry<K,V>> { + EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); } + EntryIterator(Traverser<K,V,Object> it) { + super(it); + } + public EntryIterator<K,V> split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator<K,V>(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry<K,V>((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry<K,V> implements Map.Entry<K, V> { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8<K, V> map; + MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment<K,V> implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment<K,V>[]) + new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment<K,V>(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser<K,V,Object> it = new Traverser<K,V,Object>(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray<Node> tab = new AtomicReferenceArray<Node>(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray<Node> tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action<A> { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction<A,B> { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator<T> { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble<A> { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong<A> { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt<A> {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView<K, V> { + final ConcurrentHashMapV8<K, V> map; + CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8<K,V> getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator<?> iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator<?> it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator<?> it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator<?> it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator<?> it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection<?> c) { + if (c != this) { + for (Iterator<?> it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection<?> c) { + boolean modified = false; + for (Iterator<?> it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection<?> c) { + boolean modified = false; + for (Iterator<?> it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator<K> iterator() { return new KeyIterator<K,V>(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection<? extends K> c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + * <p>The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView<K,V> extends CHMView<K,V> + implements Collection<V> { + ValuesView(ConcurrentHashMapV8<K, V> map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator<V> it = new ValueIterator<K,V>(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator<V> iterator() { + return new ValueIterator<K,V>(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection<? extends V> c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView<K,V> extends CHMView<K,V> + implements Set<Map.Entry<K,V>> { + EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry<?,?> e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry<?,?>)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator<Map.Entry<K,V>> iterator() { + return new EntryIterator<K,V>(map); + } + + public final boolean add(Entry<K,V> e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection<? extends Entry<K,V>> c) { + boolean added = false; + for (Entry<K,V> e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set<?> c; + return ((o instanceof Set) && + ((c = (Set<?>)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..e7a1c5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + * <p>This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + * <p>This class extends {@link Number}, but does <em>not</em> define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + * <p><em>jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic.</em> + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is <em>NOT</em> an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * <em>not</em> guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..ee69567 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package org.jruby.ext.thread_safe.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater<Cell> VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal<HashCode> { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater<Striped64> BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater<Striped64> BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..81ba441 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package org.jruby.ext.thread_safe.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + * <p>Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + * <p>This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal<ThreadLocalRandom> localRandom = + new ThreadLocal<ThreadLocalRandom>() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java new file mode 100644 index 0000000..2c5bcea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/ext/thread_safe/JrubyCacheBackendService.java @@ -0,0 +1,15 @@ +package thread_safe; + +import java.io.IOException; + +import org.jruby.Ruby; +import org.jruby.ext.thread_safe.JRubyCacheBackendLibrary; +import org.jruby.runtime.load.BasicLibraryService; + +// can't name this JRubyCacheBackendService or else JRuby doesn't pick this up +public class JrubyCacheBackendService implements BasicLibraryService { + public boolean basicLoad(final Ruby runtime) throws IOException { + new JRubyCacheBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe.rb new file mode 100644 index 0000000..5b8fb27 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe.rb @@ -0,0 +1,65 @@ +require 'thread_safe/version' +require 'thread_safe/synchronized_delegator' + +module ThreadSafe + autoload :Cache, 'thread_safe/cache' + autoload :Util, 'thread_safe/util' + + # Various classes within allows for +nil+ values to be stored, so a special +NULL+ token is required to indicate the "nil-ness". + NULL = Object.new + + if defined?(JRUBY_VERSION) + require 'jruby/synchronized' + + # A thread-safe subclass of Array. This version locks + # against the object itself for every method call, + # ensuring only one thread can be reading or writing + # at a time. This includes iteration methods like + # #each. + class Array < ::Array + include JRuby::Synchronized + end + + # A thread-safe subclass of Hash. This version locks + # against the object itself for every method call, + # ensuring only one thread can be reading or writing + # at a time. This includes iteration methods like + # #each. + class Hash < ::Hash + include JRuby::Synchronized + end + elsif !defined?(RUBY_ENGINE) || RUBY_ENGINE == 'ruby' + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + Array = ::Array + Hash = ::Hash + elsif defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' + require 'monitor' + + class Hash < ::Hash; end + class Array < ::Array; end + + [Hash, Array].each do |klass| + klass.class_eval do + private + def _mon_initialize + @_monitor = Monitor.new unless @_monitor # avoid double initialisation + end + + def self.allocate + obj = super + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{method}(*args) + @_monitor.synchronize { super } + end + RUBY_EVAL + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb new file mode 100644 index 0000000..7e710b5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/atomic_reference_cache_backend.rb @@ -0,0 +1,908 @@ +module ThreadSafe + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + class AtomicReferenceCacheBackend + class Table < Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + class Node + extend Util::Volatile + attr_volatile :hash, :value, :next + + include Util::CheapLockable + + bit_shift = Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Util::CPU_COUNT > 1 ? Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_aquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_aquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb new file mode 100644 index 0000000..265c0e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/cache.rb @@ -0,0 +1,161 @@ +require 'thread' + +module ThreadSafe + autoload :JRubyCacheBackend, 'thread_safe/jruby_cache_backend' + autoload :MriCacheBackend, 'thread_safe/mri_cache_backend' + autoload :NonConcurrentCacheBackend, 'thread_safe/non_concurrent_cache_backend' + autoload :AtomicReferenceCacheBackend, 'thread_safe/atomic_reference_cache_backend' + autoload :SynchronizedCacheBackend, 'thread_safe/synchronized_cache_backend' + + ConcurrentCacheBackend = if defined?(RUBY_ENGINE) + case RUBY_ENGINE + when 'jruby'; JRubyCacheBackend + when 'ruby'; MriCacheBackend + when 'rbx'; AtomicReferenceCacheBackend + else + warn 'ThreadSafe: unsupported Ruby engine, using a fully synchronized ThreadSafe::Cache implementation' if $VERBOSE + SynchronizedCacheBackend + end + else + MriCacheBackend + end + + class Cache < ConcurrentCacheBackend + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + alias_method :get, :[] + alias_method :put, :[]= + + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end unless method_defined?(:value?) + + def keys + arr = [] + each_pair {|k, v| arr << k} + arr + end unless method_defined?(:keys) + + def values + arr = [] + each_pair {|k, v| arr << v} + arr + end unless method_defined?(:values) + + def each_key + each_pair {|k, v| yield k} + end unless method_defined?(:each_key) + + def each_value + each_pair {|k, v| yield v} + end unless method_defined?(:each_value) + + def key(value) + each_pair {|k, v| return k if v == value} + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + def empty? + each_pair {|k, v| return false} + true + end unless method_defined?(:empty?) + + def size + count = 0 + each_pair {|k, v| count += 1} + count + end unless method_defined?(:size) + + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair {|k, v| h[k] = v} + h + end + + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + private + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair {|k, v| self[k] = v} + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(0.class) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive #{0.class}" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb new file mode 100644 index 0000000..13e57b3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/mri_cache_backend.rb @@ -0,0 +1,60 @@ +module ThreadSafe + class MriCacheBackend < NonConcurrentCacheBackend + # We can get away with a single global write lock (instead of a per-instance + # one) because of the GVL/green threads. + # + # NOTE: a neat idea of writing a c-ext to manually perform atomic + # put_if_absent, while relying on Ruby not releasing a GVL while calling a + # c-ext will not work because of the potentially Ruby implemented `#hash` + # and `#eql?` key methods. + WRITE_LOCK = Mutex.new + + def []=(key, value) + WRITE_LOCK.synchronize { super } + end + + def compute_if_absent(key) + if stored_value = _get(key) # fast non-blocking path for the most likely case + stored_value + else + WRITE_LOCK.synchronize { super } + end + end + + def compute_if_present(key) + WRITE_LOCK.synchronize { super } + end + + def compute(key) + WRITE_LOCK.synchronize { super } + end + + def merge_pair(key, value) + WRITE_LOCK.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + WRITE_LOCK.synchronize { super } + end + + def replace_if_exists(key, new_value) + WRITE_LOCK.synchronize { super } + end + + def get_and_set(key, value) + WRITE_LOCK.synchronize { super } + end + + def delete(key) + WRITE_LOCK.synchronize { super } + end + + def delete_pair(key, value) + WRITE_LOCK.synchronize { super } + end + + def clear + WRITE_LOCK.synchronize { super } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb new file mode 100644 index 0000000..111ae63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/non_concurrent_cache_backend.rb @@ -0,0 +1,135 @@ +module ThreadSafe + class NonConcurrentCacheBackend + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedCacheBackend which uses a non-reentrant mutex for perfomance + # reasons. + def initialize(options = nil) + @backend = {} + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(@backend[key])) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = @backend[key] + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def value?(value) + @backend.value?(value) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + alias_method :_get, :[] + alias_method :_set, :[]= + private :_get, :_set + private + def initialize_copy(other) + super + @backend = {} + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb new file mode 100644 index 0000000..2af3b8c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_cache_backend.rb @@ -0,0 +1,77 @@ +module ThreadSafe + class SynchronizedCacheBackend < NonConcurrentCacheBackend + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def value?(value) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..2fc351d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/synchronized_delegator.rb @@ -0,0 +1,43 @@ +require 'delegate' +require 'monitor' + +# This class provides a trivial way to synchronize all calls to a given object +# by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls +# around the delegated `#send`. Example: +# +# array = [] # not thread-safe on many impls +# array = SynchronizedDelegator.new([]) # thread-safe +# +# A simple `Monitor` provides a very coarse-grained way to synchronize a given +# object, in that it will cause synchronization for methods that have no need +# for it, but this is a trivial way to get thread-safety where none may exist +# currently on some implementations. +# +# This class is currently being considered for inclusion into stdlib, via +# https://bugs.ruby-lang.org/issues/8556 +class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + +end unless defined?(SynchronizedDelegator) diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb new file mode 100644 index 0000000..bad4238 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util.rb @@ -0,0 +1,16 @@ +module ThreadSafe + module Util + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + CPU_COUNT = 16 # is there a way to determine this? + + autoload :AtomicReference, 'thread_safe/util/atomic_reference' + autoload :Adder, 'thread_safe/util/adder' + autoload :CheapLockable, 'thread_safe/util/cheap_lockable' + autoload :PowerOfTwoTuple, 'thread_safe/util/power_of_two_tuple' + autoload :Striped64, 'thread_safe/util/striped64' + autoload :Volatile, 'thread_safe/util/volatile' + autoload :VolatileTuple, 'thread_safe/util/volatile_tuple' + autoload :XorShiftRandom, 'thread_safe/util/xor_shift_random' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb new file mode 100644 index 0000000..aefda5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/adder.rb @@ -0,0 +1,62 @@ +module ThreadSafe + module Util + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb new file mode 100644 index 0000000..04800f1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/atomic_reference.rb @@ -0,0 +1,44 @@ +module ThreadSafe + module Util + AtomicReference = + if defined?(Rubinius::AtomicReference) + # An overhead-less atomic reference. + Rubinius::AtomicReference + else + begin + require 'atomic' + defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic + rescue LoadError, NameError + class FullLockingAtomicReference + def initialize(value = nil) + @___mutex = Mutex.new + @___value = value + end + + def get + @___mutex.synchronize { @___value } + end + alias_method :value, :get + + def set(new_value) + @___mutex.synchronize { @___value = new_value } + end + alias_method :value=, :set + + def compare_and_set(old_value, new_value) + return false unless @___mutex.try_lock + begin + return false unless @___value.equal? old_value + @___value = new_value + ensure + @___mutex.unlock + end + true + end + end + + FullLockingAtomicReference + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..143276a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,106 @@ +module ThreadSafe + module Util + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..3e2d076 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,26 @@ +module ThreadSafe + module Util + class PowerOfTwoTuple < VolatileTuple + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb new file mode 100644 index 0000000..702fa14 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/striped64.rb @@ -0,0 +1,222 @@ +module ThreadSafe + module Util + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + class Striped64 + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + class Cell < AtomicReference + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + attr_reader *(Array.new(12).map {|i| :"padding_#{i}"}) + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb new file mode 100644 index 0000000..e050e21 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile.rb @@ -0,0 +1,64 @@ +module ThreadSafe + module Util + module Volatile + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of the +AtomicReference+s. + # + # Usage: + # class Foo + # extend ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb new file mode 100644 index 0000000..500beb7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/volatile_tuple.rb @@ -0,0 +1,46 @@ +module ThreadSafe + module Util + # A fixed size array with volatile volatile getters/setters. + # Usage: + # arr = VolatileTuple.new(16) + # arr.volatile_set(0, :foo) + # arr.volatile_get(0) # => :foo + # arr.cas(0, :foo, :bar) # => true + # arr.volatile_get(0) # => :bar + class VolatileTuple + include Enumerable + + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array + + def initialize(size) + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = AtomicReference.new + i += 1 + end + end + + def volatile_get(i) + @tuple[i].get + end + + def volatile_set(i, value) + @tuple[i].set(value) + end + + def compare_and_set(i, old_value, new_value) + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + def size + @tuple.size + end + + def each + @tuple.each {|ref| yield ref.get} + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..89442ff --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,41 @@ +module ThreadSafe + module Util + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb new file mode 100644 index 0000000..88821f6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/lib/thread_safe/version.rb @@ -0,0 +1,21 @@ +module ThreadSafe + VERSION = "0.3.6" +end + +# NOTE: <= 0.2.0 used Threadsafe::VERSION +# @private +module Threadsafe + + # @private + def self.const_missing(name) + name = name.to_sym + if ThreadSafe.const_defined?(name) + warn "[DEPRECATION] `Threadsafe::#{name}' is deprecated, use `ThreadSafe::#{name}' instead." + ThreadSafe.const_get(name) + else + warn "[DEPRECATION] the `Threadsafe' module is deprecated, please use `ThreadSafe` instead." + super + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/.gitignore b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/spec_helper.rb new file mode 100644 index 0000000..e22fe20 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/spec_helper.rb @@ -0,0 +1,31 @@ +require 'simplecov' +require 'coveralls' +require 'logger' + +SimpleCov.formatter = SimpleCov::Formatter::MultiFormatter[ + SimpleCov::Formatter::HTMLFormatter, + Coveralls::SimpleCov::Formatter +] + +SimpleCov.start do + project_name 'thread_safe' + add_filter '/coverage/' + add_filter '/pkg/' + add_filter '/spec/' + add_filter '/tasks/' + add_filter '/yard-template/' +end + +$VERBOSE = nil # suppress our deprecation warnings +require 'thread_safe' + +logger = Logger.new($stderr) +logger.level = Logger::WARN + +# import all the support files +Dir[File.join(File.dirname(__FILE__), 'support/**/*.rb')].each { |f| require File.expand_path(f) } + +RSpec.configure do |config| + #config.raise_errors_for_deprecations! + config.order = 'random' +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java new file mode 100644 index 0000000..beb6c02 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/src/thread_safe/SecurityManager.java @@ -0,0 +1,21 @@ +package thread_safe; + +import java.security.Permission; +import java.util.ArrayList; +import java.util.List; + +public class SecurityManager extends java.lang.SecurityManager { + private final List<Permission> deniedPermissions = + new ArrayList<Permission>(); + + @Override + public void checkPermission(Permission p) { + if (deniedPermissions.contains(p)) { + throw new SecurityException("Denied!"); + } + } + + public void deny(Permission p) { + deniedPermissions.add(p); + } +} diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/.gitignore b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threads.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threads.rb new file mode 100644 index 0000000..1251fa4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threads.rb @@ -0,0 +1 @@ +THREADS = (RUBY_ENGINE == 'ruby' ? 100 : 10) diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb new file mode 100644 index 0000000..ac3c66a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/support/threadsafe_test.rb @@ -0,0 +1,75 @@ +module ThreadSafe + module Test + class Latch + def initialize(count = 1) + @count = count + @mutex = Mutex.new + @cond = ConditionVariable.new + end + + def release + @mutex.synchronize do + @count -= 1 if @count > 0 + @cond.broadcast if @count.zero? + end + end + + def await + @mutex.synchronize do + @cond.wait @mutex if @count > 0 + end + end + end + + class Barrier < Latch + def await + @mutex.synchronize do + if @count.zero? # fall through + elsif @count > 0 + @count -= 1 + @count.zero? ? @cond.broadcast : @cond.wait(@mutex) + end + end + end + end + + class HashCollisionKey + attr_reader :hash, :key + def initialize(key, hash = key.hash % 3) + @key = key + @hash = hash + end + + def eql?(other) + other.kind_of?(self.class) && @key.eql?(other.key) + end + + def even? + @key.even? + end + + def <=>(other) + @key <=> other.key + end + end + + # having 4 separate HCK classes helps for a more thorough CHMV8 testing + class HashCollisionKey2 < HashCollisionKey; end + class HashCollisionKeyNoCompare < HashCollisionKey + def <=>(other) + 0 + end + end + class HashCollisionKey4 < HashCollisionKeyNoCompare; end + + HASH_COLLISION_CLASSES = [HashCollisionKey, HashCollisionKey2, HashCollisionKeyNoCompare, HashCollisionKey4] + + def self.HashCollisionKey(key, hash = key.hash % 3) + HASH_COLLISION_CLASSES[rand(4)].new(key, hash) + end + + class HashCollisionKeyNonComparable < HashCollisionKey + undef <=> + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/.gitignore b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb new file mode 100644 index 0000000..f3752ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/array_spec.rb @@ -0,0 +1,18 @@ +module ThreadSafe + describe Array do + let!(:ary) { described_class.new } + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do + ary << i + ary.each { |x| x * 2 } + ary.shift + ary.last + end + end + end.map(&:join) + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb new file mode 100644 index 0000000..f29dc54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_loops_spec.rb @@ -0,0 +1,507 @@ +Thread.abort_on_exception = true + +module ThreadSafe + describe 'CacheTorture' do + THREAD_COUNT = 40 + KEY_COUNT = (((2**13) - 2) * 0.75).to_i # get close to the doubling cliff + LOW_KEY_COUNT = (((2**8 ) - 2) * 0.75).to_i # get close to the doubling cliff + + INITIAL_VALUE_CACHE_SETUP = lambda do |options, keys| + cache = ThreadSafe::Cache.new + initial_value = options[:initial_value] || 0 + keys.each { |key| cache[key] = initial_value } + cache + end + ZERO_VALUE_CACHE_SETUP = lambda do |options, keys| + INITIAL_VALUE_CACHE_SETUP.call(options.merge(:initial_value => 0), keys) + end + + DEFAULTS = { + key_count: KEY_COUNT, + thread_count: THREAD_COUNT, + loop_count: 1, + prelude: '', + cache_setup: lambda { |options, keys| ThreadSafe::Cache.new } + } + + LOW_KEY_COUNT_OPTIONS = {loop_count: 150, key_count: LOW_KEY_COUNT} + SINGLE_KEY_COUNT_OPTIONS = {loop_count: 100_000, key_count: 1} + + it 'concurrency' do + code = <<-RUBY_EVAL + cache[key] + cache[key] = key + cache[key] + cache.delete(key) + RUBY_EVAL + do_thread_loop(:concurrency, code) + end + + it '#put_if_absent' do + do_thread_loop( + :put_if_absent, + 'acc += 1 unless cache.put_if_absent(key, key)', + key_count: 100_000 + ) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it '#compute_put_if_absent' do + code = <<-RUBY_EVAL + if key.even? + cache.compute_if_absent(key) { acc += 1; key } + else + acc += 1 unless cache.put_if_absent(key, key) + end + RUBY_EVAL + do_thread_loop(:compute_if_absent, code) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it '#compute_if_absent_and_present' do + compute_if_absent_and_present + compute_if_absent_and_present(LOW_KEY_COUNT_OPTIONS) + compute_if_absent_and_present(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_to_zero' do + add_remove_to_zero + add_remove_to_zero(LOW_KEY_COUNT_OPTIONS) + add_remove_to_zero(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_to_zero_via_merge_pair' do + add_remove_to_zero_via_merge_pair + add_remove_to_zero_via_merge_pair(LOW_KEY_COUNT_OPTIONS) + add_remove_to_zero_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove' do + add_remove + add_remove(LOW_KEY_COUNT_OPTIONS) + add_remove(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_via_compute' do + add_remove_via_compute + add_remove_via_compute(LOW_KEY_COUNT_OPTIONS) + add_remove_via_compute(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'emove_via_compute_if_absent_present' do + add_remove_via_compute_if_absent_present + add_remove_via_compute_if_absent_present(LOW_KEY_COUNT_OPTIONS) + add_remove_via_compute_if_absent_present(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'add_remove_indiscriminate' do + add_remove_indiscriminate + add_remove_indiscriminate(LOW_KEY_COUNT_OPTIONS) + add_remove_indiscriminate(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up' do + count_up + count_up(LOW_KEY_COUNT_OPTIONS) + count_up(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up_via_compute' do + count_up_via_compute + count_up_via_compute(LOW_KEY_COUNT_OPTIONS) + count_up_via_compute(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_up_via_merge_pair' do + count_up_via_merge_pair + count_up_via_merge_pair(LOW_KEY_COUNT_OPTIONS) + count_up_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) + end + + it 'count_race' do + prelude = 'change = (rand(2) == 1) ? 1 : -1' + code = <<-RUBY_EVAL + v = cache[key] + acc += change if cache.replace_pair(key, v, v + change) + RUBY_EVAL + do_thread_loop( + :count_race, + code, + loop_count: 5, + prelude: prelude, + cache_setup: ZERO_VALUE_CACHE_SETUP + ) do |result, cache, options, keys| + result_sum = sum(result) + expect(sum(keys.map { |key| cache[key] })).to eq result_sum + expect(sum(cache.values)).to eq result_sum + expect(options[:key_count]).to eq cache.size + end + end + + it 'get_and_set_new' do + code = 'acc += 1 unless cache.get_and_set(key, key)' + do_thread_loop(:get_and_set_new, code) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + it 'get_and_set_existing' do + code = 'acc += 1 if cache.get_and_set(key, key) == -1' + do_thread_loop( + :get_and_set_existing, + code, + cache_setup: INITIAL_VALUE_CACHE_SETUP, + initial_value: -1 + ) do |result, cache, options, keys| + expect_standard_accumulator_test_result(result, cache, options, keys) + end + end + + private + + def compute_if_absent_and_present(opts = {}) + prelude = 'on_present = rand(2) == 1' + code = <<-RUBY_EVAL + if on_present + cache.compute_if_present(key) { |old_value| acc += 1; old_value + 1 } + else + cache.compute_if_absent(key) { acc += 1; 1 } + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + stored_sum = 0 + stored_key_count = 0 + keys.each do |k| + if value = cache[k] + stored_sum += value + stored_key_count += 1 + end + end + expect(stored_sum).to eq sum(result) + expect(stored_key_count).to eq cache.size + end + end + + def add_remove(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + acc += 1 unless cache.put_if_absent(key, key) + else + acc -= 1 if cache.delete_pair(key, key) + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_via_compute(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + cache.compute(key) do |old_value| + if do_add + acc += 1 unless old_value + key + else + acc -= 1 if old_value + nil + end + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_via_compute_if_absent_present(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + cache.compute_if_absent(key) { acc += 1; key } + else + cache.compute_if_present(key) { acc -= 1; nil } + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_indiscriminate(opts = {}) + prelude = 'do_add = rand(2) == 1' + code = <<-RUBY_EVAL + if do_add + acc += 1 unless cache.put_if_absent(key, key) + else + acc -= 1 if cache.delete(key) + end + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, prelude: prelude}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def count_up(opts = {}) + code = <<-RUBY_EVAL + v = cache[key] + acc += 1 if cache.replace_pair(key, v, v + 1) + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5, cache_setup: ZERO_VALUE_CACHE_SETUP}.merge(opts) + ) do |result, cache, options, keys| + expect_count_up(result, cache, options, keys) + end + end + + def count_up_via_compute(opts = {}) + code = <<-RUBY_EVAL + cache.compute(key) do |old_value| + acc += 1 + old_value ? old_value + 1 : 1 + end + RUBY_EVAL + do_thread_loop( + __method__, + code, {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_count_up(result, cache, options, keys) + result.inject(nil) do |previous_value, next_value| # since compute guarantees atomicity all count ups should be equal + expect(previous_value).to eq next_value if previous_value + next_value + end + end + end + + def count_up_via_merge_pair(opts = {}) + code = <<-RUBY_EVAL + cache.merge_pair(key, 1) { |old_value| old_value + 1 } + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + all_match = true + expected_value = options[:loop_count] * options[:thread_count] + keys.each do |key| + value = cache[key] + if expected_value != value + all_match = false + break + end + end + expect(all_match).to be_truthy + end + end + + def add_remove_to_zero(opts = {}) + code = <<-RUBY_EVAL + acc += 1 unless cache.put_if_absent(key, key) + acc -= 1 if cache.delete_pair(key, key) + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def add_remove_to_zero_via_merge_pair(opts = {}) + code = <<-RUBY_EVAL + acc += (cache.merge_pair(key, key) {}) ? 1 : -1 + RUBY_EVAL + do_thread_loop( + __method__, + code, + {loop_count: 5}.merge(opts) + ) do |result, cache, options, keys| + expect_all_key_mappings_exist(cache, keys, false) + expect(cache.size).to eq sum(result) + end + end + + def do_thread_loop(name, code, options = {}, &block) + options = DEFAULTS.merge(options) + meth = define_loop(name, code, options[:prelude]) + keys = to_keys_array(options[:key_count]) + run_thread_loop(meth, keys, options, &block) + + if options[:key_count] > 1 + options[:key_count] = (options[:key_count] / 40).to_i + keys = to_hash_collision_keys_array(options[:key_count]) + run_thread_loop( + meth, + keys, + options.merge(loop_count: options[:loop_count] * 5), + &block + ) + end + end + + def run_thread_loop(meth, keys, options, &block) + cache = options[:cache_setup].call(options, keys) + barrier = ThreadSafe::Test::Barrier.new(options[:thread_count]) + result = (1..options[:thread_count]).map do + Thread.new do + setup_sync_and_start_loop( + meth, + cache, + keys, + barrier, + options[:loop_count] + ) + end + end.map(&:value) + block.call(result, cache, options, keys) if block_given? + end + + def setup_sync_and_start_loop(meth, cache, keys, barrier, loop_count) + my_keys = keys.shuffle + barrier.await + if my_keys.size == 1 + key = my_keys.first + send("#{meth}_single_key", cache, key, loop_count) + else + send("#{meth}_multiple_keys", cache, my_keys, loop_count) + end + end + + def define_loop(name, body, prelude) + inner_meth_name = :"_#{name}_loop_inner" + outer_meth_name = :"_#{name}_loop_outer" + # looping is splitted into the "loop methods" to trigger the JIT + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{inner_meth_name}_multiple_keys(cache, keys, i, length, acc) + #{prelude} + target = i + length + while i < target + key = keys[i] + #{body} + i += 1 + end + acc + end unless method_defined?(:#{inner_meth_name}_multiple_keys) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{inner_meth_name}_single_key(cache, key, i, length, acc) + #{prelude} + target = i + length + while i < target + #{body} + i += 1 + end + acc + end unless method_defined?(:#{inner_meth_name}_single_key) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{outer_meth_name}_multiple_keys(cache, keys, loop_count) + total_length = keys.size + acc = 0 + inc = 100 + loop_count.times do + i = 0 + pre_loop_inc = total_length % inc + acc = #{inner_meth_name}_multiple_keys(cache, keys, i, pre_loop_inc, acc) + i += pre_loop_inc + while i < total_length + acc = #{inner_meth_name}_multiple_keys(cache, keys, i, inc, acc) + i += inc + end + end + acc + end unless method_defined?(:#{outer_meth_name}_multiple_keys) + RUBY_EVAL + + self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{outer_meth_name}_single_key(cache, key, loop_count) + acc = 0 + i = 0 + inc = 100 + + pre_loop_inc = loop_count % inc + acc = #{inner_meth_name}_single_key(cache, key, i, pre_loop_inc, acc) + i += pre_loop_inc + + while i < loop_count + acc = #{inner_meth_name}_single_key(cache, key, i, inc, acc) + i += inc + end + acc + end unless method_defined?(:#{outer_meth_name}_single_key) + RUBY_EVAL + outer_meth_name + end + + def to_keys_array(key_count) + arr = [] + key_count.times {|i| arr << i} + arr + end + + def to_hash_collision_keys_array(key_count) + to_keys_array(key_count).map { |key| ThreadSafe::Test::HashCollisionKey(key) } + end + + def sum(result) + result.inject(0) { |acc, i| acc + i } + end + + def expect_standard_accumulator_test_result(result, cache, options, keys) + expect_all_key_mappings_exist(cache, keys) + expect(options[:key_count]).to eq sum(result) + expect(options[:key_count]).to eq cache.size + end + + def expect_all_key_mappings_exist(cache, keys, all_must_exist = true) + keys.each do |key| + value = cache[key] + if value || all_must_exist + expect(key).to eq value unless key == value # don't do a bazzilion assertions unless necessary + end + end + end + + def expect_count_up(result, cache, options, keys) + keys.each do |key| + value = cache[key] + expect(value).to be_truthy unless value + end + expect(sum(cache.values)).to eq sum(result) + expect(options[:key_count]).to eq cache.size + end + end unless RUBY_ENGINE == 'rbx' || ENV['TRAVIS'] +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb new file mode 100644 index 0000000..ff02857 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/cache_spec.rb @@ -0,0 +1,943 @@ +Thread.abort_on_exception = true + +module ThreadSafe + describe Cache do + before(:each) do + @cache = described_class.new + end + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do |j| + key = i * 1000 + j + @cache[key] = i + @cache[key] + @cache.delete(key) + end + end + end.map(&:join) + end + + it 'retrieval' do + expect_size_change(1) do + expect(nil).to eq @cache[:a] + expect(nil).to eq @cache.get(:a) + @cache[:a] = 1 + expect(1).to eq @cache[:a] + expect(1).to eq @cache.get(:a) + end + end + + it '#put_if_absent' do + with_or_without_default_proc do + expect_size_change(1) do + expect(nil).to eq @cache.put_if_absent(:a, 1) + expect(1).to eq @cache.put_if_absent(:a, 1) + expect(1).to eq @cache.put_if_absent(:a, 2) + expect(1).to eq @cache[:a] + end + end + end + + describe '#compute_if_absent' do + it 'common' do + with_or_without_default_proc do + expect_size_change(3) do + expect(1).to eq @cache.compute_if_absent(:a) { 1 } + expect(1).to eq @cache.compute_if_absent(:a) { 2 } + expect(1).to eq @cache[:a] + + @cache[:b] = nil + expect(nil).to eq @cache.compute_if_absent(:b) { 1 } + expect(nil).to eq @cache.compute_if_absent(:c) {} + expect(nil).to eq @cache[:c] + expect(true).to eq @cache.key?(:c) + end + end + end + + it 'with return' do + with_or_without_default_proc do + expect_handles_return_lambda(:compute_if_absent, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + expect_handles_exception(:compute_if_absent, :a) + end + end + + it 'atomicity' do + late_compute_threads_count = 10 + late_put_if_absent_threads_count = 10 + getter_threads_count = 5 + compute_started = ThreadSafe::Test::Latch.new(1) + compute_proceed = ThreadSafe::Test::Latch.new( + late_compute_threads_count + + late_put_if_absent_threads_count + + getter_threads_count + ) + block_until_compute_started = lambda do |name| + # what does it mean? + if (v = @cache[:a]) != nil + expect(nil).to v + end + compute_proceed.release + compute_started.await + end + + expect_size_change 1 do + late_compute_threads = Array.new(late_compute_threads_count) do + Thread.new do + block_until_compute_started.call('compute_if_absent') + expect(1).to eq @cache.compute_if_absent(:a) { fail } + end + end + + late_put_if_absent_threads = Array.new(late_put_if_absent_threads_count) do + Thread.new do + block_until_compute_started.call('put_if_absent') + expect(1).to eq @cache.put_if_absent(:a, 2) + end + end + + getter_threads = Array.new(getter_threads_count) do + Thread.new do + block_until_compute_started.call('getter') + Thread.pass while @cache[:a].nil? + expect(1).to eq @cache[:a] + end + end + + Thread.new do + @cache.compute_if_absent(:a) do + compute_started.release + compute_proceed.await + sleep(0.2) + 1 + end + end.join + (late_compute_threads + + late_put_if_absent_threads + + getter_threads).each(&:join) + end + end + end + + describe '#compute_if_present' do + it 'common' do + with_or_without_default_proc do + expect_no_size_change do + expect(nil).to eq @cache.compute_if_present(:a) {} + expect(nil).to eq @cache.compute_if_present(:a) { 1 } + expect(nil).to eq @cache.compute_if_present(:a) { fail } + expect(false).to eq @cache.key?(:a) + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.compute_if_present(:a) { 1 } + expect(1).to eq @cache[:a] + expect(2).to eq @cache.compute_if_present(:a) { 2 } + expect(2).to eq @cache[:a] + expect(false).to eq @cache.compute_if_present(:a) { false } + expect(false).to eq @cache[:a] + + @cache[:a] = 1 + yielded = false + @cache.compute_if_present(:a) do |old_value| + yielded = true + expect(1).to eq old_value + 2 + end + expect(true).to eq yielded + end + + expect_size_change(-1) do + expect(nil).to eq @cache.compute_if_present(:a) {} + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache.compute_if_present(:a) { 1 } + expect(false).to eq @cache.key?(:a) + end + end + end + + it 'with return' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_return_lambda(:compute_if_present, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_exception(:compute_if_present, :a) + end + end + end + + describe '#compute' do + it 'common' do + with_or_without_default_proc do + expect_no_size_change do + expect_compute(:a, nil, nil) {} + end + + expect_size_change(1) do + expect_compute(:a, nil, 1) { 1 } + expect_compute(:a, 1, 2) { 2 } + expect_compute(:a, 2, false) { false } + expect(false).to eq @cache[:a] + end + + expect_size_change(-1) do + expect_compute(:a, false, nil) {} + end + end + end + + it 'with return' do + with_or_without_default_proc do + expect_handles_return_lambda(:compute, :a) + @cache[:a] = 1 + expect_handles_return_lambda(:compute, :a) + end + end + + it 'exception' do + with_or_without_default_proc do + expect_handles_exception(:compute, :a) + @cache[:a] = 2 + expect_handles_exception(:compute, :a) + end + end + end + + describe '#merge_pair' do + it 'common' do + with_or_without_default_proc do + expect_size_change(1) do + expect(nil).to eq @cache.merge_pair(:a, nil) { fail } + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache[:a] + end + + expect_no_size_change do + expect_merge_pair(:a, nil, nil, false) { false } + expect_merge_pair(:a, nil, false, 1) { 1 } + expect_merge_pair(:a, nil, 1, 2) { 2 } + end + + expect_size_change(-1) do + expect_merge_pair(:a, nil, 2, nil) {} + expect(false).to eq @cache.key?(:a) + end + end + end + + it 'with return' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_return_lambda(:merge_pair, :a, 2) + end + end + + it 'exception' do + with_or_without_default_proc do + @cache[:a] = 1 + expect_handles_exception(:merge_pair, :a, 2) + end + end + end + + it 'updates dont block reads' do + getters_count = 20 + key_klass = ThreadSafe::Test::HashCollisionKey + keys = [key_klass.new(1, 100), + key_klass.new(2, 100), + key_klass.new(3, 100)] # hash colliding keys + inserted_keys = [] + + keys.each do |key, i| + compute_started = ThreadSafe::Test::Latch.new(1) + compute_finished = ThreadSafe::Test::Latch.new(1) + getters_started = ThreadSafe::Test::Latch.new(getters_count) + getters_finished = ThreadSafe::Test::Latch.new(getters_count) + + computer_thread = Thread.new do + getters_started.await + @cache.compute_if_absent(key) do + compute_started.release + getters_finished.await + 1 + end + compute_finished.release + end + + getter_threads = (1..getters_count).map do + Thread.new do + getters_started.release + inserted_keys.each do |inserted_key| + expect(true).to eq @cache.key?(inserted_key) + expect(1).to eq @cache[inserted_key] + end + expect(false).to eq @cache.key?(key) + compute_started.await + inserted_keys.each do |inserted_key| + expect(true).to eq @cache.key?(inserted_key) + expect(1).to eq @cache[inserted_key] + end + expect(false).to eq @cache.key?(key) + expect(nil).to eq @cache[key] + getters_finished.release + compute_finished.await + expect(true).to eq @cache.key?(key) + expect(1).to eq @cache[key] + end + end + + (getter_threads << computer_thread).map do |t| + expect(t.join(2)).to be_truthy + end # asserting no deadlocks + inserted_keys << key + end + end + + specify 'collision resistance' do + expect_collision_resistance( + (0..1000).map { |i| ThreadSafe::Test::HashCollisionKey(i, 1) } + ) + end + + specify 'collision resistance with arrays' do + special_array_class = Class.new(Array) do + def key # assert_collision_resistance expects to be able to call .key to get the "real" key + first.key + end + end + # Test collision resistance with a keys that say they responds_to <=>, but then raise exceptions + # when actually called (ie: an Array filled with non-comparable keys). + # See https://github.com/headius/thread_safe/issues/19 for more info. + expect_collision_resistance( + (0..100).map do |i| + special_array_class.new( + [ThreadSafe::Test::HashCollisionKeyNonComparable.new(i, 1)] + ) + end + ) + end + + it '#replace_pair' do + with_or_without_default_proc do + expect_no_size_change do + expect(false).to eq @cache.replace_pair(:a, 1, 2) + expect(false).to eq @cache.replace_pair(:a, nil, nil) + expect(false).to eq @cache.key?(:a) + end + end + + @cache[:a] = 1 + expect_no_size_change do + expect(true).to eq @cache.replace_pair(:a, 1, 2) + expect(false).to eq @cache.replace_pair(:a, 1, 2) + expect(2).to eq @cache[:a] + expect(true).to eq @cache.replace_pair(:a, 2, 2) + expect(2).to eq @cache[:a] + expect(true).to eq @cache.replace_pair(:a, 2, nil) + expect(false).to eq @cache.replace_pair(:a, 2, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(true).to eq @cache.replace_pair(:a, nil, nil) + expect(true).to eq @cache.key?(:a) + expect(true).to eq @cache.replace_pair(:a, nil, 1) + expect(1).to eq @cache[:a] + end + end + + it '#replace_if_exists' do + with_or_without_default_proc do + expect_no_size_change do + expect(nil).to eq @cache.replace_if_exists(:a, 1) + expect(false).to eq @cache.key?(:a) + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.replace_if_exists(:a, 2) + expect(2).to eq @cache[:a] + expect(2).to eq @cache.replace_if_exists(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.replace_if_exists(:a, 1) + expect(1).to eq @cache[:a] + end + end + end + + it '#get_and_set' do + with_or_without_default_proc do + expect(nil).to eq @cache.get_and_set(:a, 1) + expect(true).to eq @cache.key?(:a) + expect(1).to eq @cache[:a] + expect(1).to eq @cache.get_and_set(:a, 2) + expect(2).to eq @cache.get_and_set(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.get_and_set(:a, 1) + expect(1).to eq @cache[:a] + end + end + + it '#key' do + with_or_without_default_proc do + expect(nil).to eq @cache.key(1) + @cache[:a] = 1 + expect(:a).to eq @cache.key(1) + expect(nil).to eq @cache.key(0) + end + end + + it '#key?' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + @cache[:a] = 1 + expect(true).to eq @cache.key?(:a) + end + end + + it '#value?' do + with_or_without_default_proc do + expect(false).to eq @cache.value?(1) + @cache[:a] = 1 + expect(true).to eq @cache.value?(1) + end + end + + it '#delete' do + with_or_without_default_proc do |default_proc_set| + expect_no_size_change do + expect(nil).to eq @cache.delete(:a) + end + @cache[:a] = 1 + expect_size_change -1 do + expect(1).to eq @cache.delete(:a) + end + expect_no_size_change do + expect(nil).to eq @cache[:a] unless default_proc_set + + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache.delete(:a) + end + end + end + + it '#delete_pair' do + with_or_without_default_proc do + expect_no_size_change do + expect(false).to eq @cache.delete_pair(:a, 2) + expect(false).to eq @cache.delete_pair(:a, nil) + end + @cache[:a] = 1 + expect_no_size_change do + expect(false).to eq @cache.delete_pair(:a, 2) + end + expect_size_change(-1) do + expect(1).to eq @cache[:a] + expect(true).to eq @cache.delete_pair(:a, 1) + expect(false).to eq @cache.delete_pair(:a, 1) + expect(false).to eq @cache.key?(:a) + end + end + end + + specify 'default proc' do + @cache = cache_with_default_proc(1) + expect_no_size_change do + expect(false).to eq @cache.key?(:a) + end + expect_size_change(1) do + expect(1).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + end + + specify 'falsy default proc' do + @cache = cache_with_default_proc(nil) + expect_no_size_change do + expect(false).to eq @cache.key?(:a) + end + expect_size_change(1) do + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + end + + describe '#fetch' do + it 'common' do + with_or_without_default_proc do |default_proc_set| + expect_no_size_change do + expect(1).to eq @cache.fetch(:a, 1) + expect(1).to eq @cache.fetch(:a) { 1 } + expect(false).to eq @cache.key?(:a) + + expect(nil).to eq @cache[:a] unless default_proc_set + end + + @cache[:a] = 1 + expect_no_size_change do + expect(1).to eq @cache.fetch(:a) { fail } + end + + expect { @cache.fetch(:b) }.to raise_error(KeyError) + + expect_no_size_change do + expect(1).to eq @cache.fetch(:b, :c) {1} # assert block supersedes default value argument + expect(false).to eq @cache.key?(:b) + end + end + end + + it 'falsy' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + + expect_no_size_change do + expect(nil).to eq @cache.fetch(:a, nil) + expect(false).to eq @cache.fetch(:a, false) + expect(nil).to eq @cache.fetch(:a) {} + expect(false).to eq @cache.fetch(:a) { false } + end + + @cache[:a] = nil + expect_no_size_change do + expect(true).to eq @cache.key?(:a) + expect(nil).to eq @cache.fetch(:a) { fail } + end + end + end + + it 'with return' do + with_or_without_default_proc do + r = fetch_with_return + # r = lambda do + # @cache.fetch(:a) { return 10 } + # end.call + + expect_no_size_change do + expect(10).to eq r + expect(false).to eq @cache.key?(:a) + end + end + end + end + + describe '#fetch_or_store' do + it 'common' do + with_or_without_default_proc do |default_proc_set| + expect_size_change(1) do + expect(1).to eq @cache.fetch_or_store(:a, 1) + expect(1).to eq @cache[:a] + end + + @cache.delete(:a) + + expect_size_change 1 do + expect(1).to eq @cache.fetch_or_store(:a) { 1 } + expect(1).to eq @cache[:a] + end + + expect_no_size_change do + expect(1).to eq @cache.fetch_or_store(:a) { fail } + end + + expect { @cache.fetch_or_store(:b) }. + to raise_error(KeyError) + + expect_size_change(1) do + expect(1).to eq @cache.fetch_or_store(:b, :c) { 1 } # assert block supersedes default value argument + expect(1).to eq @cache[:b] + end + end + end + + it 'falsy' do + with_or_without_default_proc do + expect(false).to eq @cache.key?(:a) + + expect_size_change(1) do + expect(nil).to eq @cache.fetch_or_store(:a, nil) + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(false).to eq @cache.fetch_or_store(:a, false) + expect(false).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(nil).to eq @cache.fetch_or_store(:a) {} + expect(nil).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + @cache.delete(:a) + + expect_size_change(1) do + expect(false).to eq @cache.fetch_or_store(:a) { false } + expect(false).to eq @cache[:a] + expect(true).to eq @cache.key?(:a) + end + + @cache[:a] = nil + expect_no_size_change do + expect(nil).to eq @cache.fetch_or_store(:a) { fail } + end + end + end + + it 'with return' do + with_or_without_default_proc do + r = fetch_or_store_with_return + + expect_no_size_change do + expect(10).to eq r + expect(false).to eq @cache.key?(:a) + end + end + end + end + + it '#clear' do + @cache[:a] = 1 + expect_size_change(-1) do + expect(@cache).to eq @cache.clear + expect(false).to eq @cache.key?(:a) + expect(nil).to eq @cache[:a] + end + end + + describe '#each_pair' do + it 'common' do + @cache.each_pair { |k, v| fail } + expect(@cache).to eq @cache.each_pair {} + @cache[:a] = 1 + + h = {} + @cache.each_pair { |k, v| h[k] = v } + expect({:a => 1}).to eq h + + @cache[:b] = 2 + h = {} + @cache.each_pair { |k, v| h[k] = v } + expect({:a => 1, :b => 2}).to eq h + end + + it 'pair iterator' do + @cache[:a] = 1 + @cache[:b] = 2 + i = 0 + r = @cache.each_pair do |k, v| + if i == 0 + i += 1 + next + fail + elsif i == 1 + break :breaked + end + end + + expect(:breaked).to eq r + end + + it 'allows modification' do + @cache[:a] = 1 + @cache[:b] = 1 + @cache[:c] = 1 + + expect_size_change(1) do + @cache.each_pair do |k, v| + @cache[:z] = 1 + end + end + end + end + + it '#keys' do + expect([]).to eq @cache.keys + + @cache[1] = 1 + expect([1]).to eq @cache.keys + + @cache[2] = 2 + expect([1, 2]).to eq @cache.keys.sort + end + + it '#values' do + expect([]).to eq @cache.values + + @cache[1] = 1 + expect([1]).to eq @cache.values + + @cache[2] = 2 + expect([1, 2]).to eq @cache.values.sort + end + + it '#each_key' do + expect(@cache).to eq @cache.each_key { fail } + + @cache[1] = 1 + arr = [] + @cache.each_key { |k| arr << k } + expect([1]).to eq arr + + @cache[2] = 2 + arr = [] + @cache.each_key { |k| arr << k } + expect([1, 2]).to eq arr.sort + end + + it '#each_value' do + expect(@cache).to eq @cache.each_value { fail } + + @cache[1] = 1 + arr = [] + @cache.each_value { |k| arr << k } + expect([1]).to eq arr + + @cache[2] = 2 + arr = [] + @cache.each_value { |k| arr << k } + expect([1, 2]).to eq arr.sort + end + + it '#empty' do + expect(true).to eq @cache.empty? + @cache[:a] = 1 + expect(false).to eq @cache.empty? + end + + it 'options validation' do + expect_valid_options(nil) + expect_valid_options({}) + expect_valid_options(foo: :bar) + end + + it 'initial capacity options validation' do + expect_valid_option(:initial_capacity, nil) + expect_valid_option(:initial_capacity, 1) + expect_invalid_option(:initial_capacity, '') + expect_invalid_option(:initial_capacity, 1.0) + expect_invalid_option(:initial_capacity, -1) + end + + it 'load factor options validation' do + expect_valid_option(:load_factor, nil) + expect_valid_option(:load_factor, 0.01) + expect_valid_option(:load_factor, 0.75) + expect_valid_option(:load_factor, 1) + expect_invalid_option(:load_factor, '') + expect_invalid_option(:load_factor, 0) + expect_invalid_option(:load_factor, 1.1) + expect_invalid_option(:load_factor, 2) + expect_invalid_option(:load_factor, -1) + end + + it '#size' do + expect(0).to eq @cache.size + @cache[:a] = 1 + expect(1).to eq @cache.size + @cache[:b] = 1 + expect(2).to eq @cache.size + @cache.delete(:a) + expect(1).to eq @cache.size + @cache.delete(:b) + expect(0).to eq @cache.size + end + + it '#get_or_default' do + with_or_without_default_proc do + expect(1).to eq @cache.get_or_default(:a, 1) + expect(nil).to eq @cache.get_or_default(:a, nil) + expect(false).to eq @cache.get_or_default(:a, false) + expect(false).to eq @cache.key?(:a) + + @cache[:a] = 1 + expect(1).to eq @cache.get_or_default(:a, 2) + end + end + + it '#dup,#clone' do + [:dup, :clone].each do |meth| + cache = cache_with_default_proc(:default_value) + cache[:a] = 1 + dupped = cache.send(meth) + expect(1).to eq dupped[:a] + expect(1).to eq dupped.size + expect_size_change(1, cache) do + expect_no_size_change(dupped) do + cache[:b] = 1 + end + end + expect(false).to eq dupped.key?(:b) + expect_no_size_change(cache) do + expect_size_change(-1, dupped) do + dupped.delete(:a) + end + end + expect(false).to eq dupped.key?(:a) + expect(true).to eq cache.key?(:a) + # test default proc + expect_size_change(1, cache) do + expect_no_size_change dupped do + expect(:default_value).to eq cache[:c] + expect(false).to eq dupped.key?(:c) + end + end + expect_no_size_change cache do + expect_size_change 1, dupped do + expect(:default_value).to eq dupped[:d] + expect(false).to eq cache.key?(:d) + end + end + end + end + + it 'is unfreezable' do + expect { @cache.freeze }.to raise_error(NoMethodError) + end + + it 'marshal dump load' do + new_cache = Marshal.load(Marshal.dump(@cache)) + expect(new_cache).to be_an_instance_of ThreadSafe::Cache + expect(0).to eq new_cache.size + @cache[:a] = 1 + new_cache = Marshal.load(Marshal.dump(@cache)) + expect(1).to eq @cache[:a] + expect(1).to eq new_cache.size + end + + it 'marshal dump doesnt work with default proc' do + expect { Marshal.dump(ThreadSafe::Cache.new {}) }.to raise_error(TypeError) + end + + private + + def with_or_without_default_proc(&block) + block.call(false) + @cache = ThreadSafe::Cache.new { |h, k| h[k] = :default_value } + block.call(true) + end + + def cache_with_default_proc(default_value = 1) + ThreadSafe::Cache.new { |cache, k| cache[k] = default_value } + end + + def expect_size_change(change, cache = @cache, &block) + start = cache.size + block.call + expect(change).to eq cache.size - start + end + + def expect_valid_option(option_name, value) + expect_valid_options(option_name => value) + end + + def expect_valid_options(options) + c = ThreadSafe::Cache.new(options) + expect(c).to be_an_instance_of ThreadSafe::Cache + end + + def expect_invalid_option(option_name, value) + expect_invalid_options(option_name => value) + end + + def expect_invalid_options(options) + expect { ThreadSafe::Cache.new(options) }.to raise_error(ArgumentError) + end + + def expect_no_size_change(cache = @cache, &block) + expect_size_change(0, cache, &block) + end + + def expect_handles_return_lambda(method, key, *args) + before_had_key = @cache.key?(key) + before_had_value = before_had_key ? @cache[key] : nil + + returning_lambda = lambda do + @cache.send(method, key, *args) { return :direct_return } + end + + expect_no_size_change do + expect(:direct_return).to eq returning_lambda.call + expect(before_had_key).to eq @cache.key?(key) + expect(before_had_value).to eq @cache[key] if before_had_value + end + end + + class TestException < Exception; end + def expect_handles_exception(method, key, *args) + before_had_key = @cache.key?(key) + before_had_value = before_had_key ? @cache[key] : nil + + expect_no_size_change do + expect { @cache.send(method, key, *args) { raise TestException, '' } }. + to raise_error(TestException) + + expect(before_had_key).to eq @cache.key?(key) + expect(before_had_value).to eq @cache[key] if before_had_value + end + end + + def expect_compute(key, expected_old_value, expected_result, &block) + result = @cache.compute(:a) do |old_value| + expect(expected_old_value).to eq old_value + block.call + end + expect(expected_result).to eq result + end + + def expect_merge_pair(key, value, expected_old_value, expected_result, &block) + result = @cache.merge_pair(key, value) do |old_value| + expect(expected_old_value).to eq old_value + block.call + end + expect(expected_result).to eq result + end + + def expect_collision_resistance(keys) + keys.each { |k| @cache[k] = k.key } + 10.times do |i| + size = keys.size + while i < size + k = keys[i] + expect(k.key == @cache.delete(k) && + !@cache.key?(k) && + (@cache[k] = k.key; @cache[k] == k.key)).to be_truthy + i += 10 + end + end + expect(keys.all? { |k| @cache[k] == k.key }).to be_truthy + end + + # Took out for compatibility with Rubinius, see https://github.com/rubinius/rubinius/issues/1312 + def fetch_with_return + lambda do + @cache.fetch(:a) { return 10 } + end.call + end + + # Took out for compatibility with Rubinius, see https://github.com/rubinius/rubinius/issues/1312 + def fetch_or_store_with_return + lambda do + @cache.fetch_or_store(:a) { return 10 } + end.call + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb new file mode 100644 index 0000000..1403b63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/hash_spec.rb @@ -0,0 +1,17 @@ +module ThreadSafe + describe Hash do + let!(:hsh) { described_class.new } + + it 'concurrency' do + (1..THREADS).map do |i| + Thread.new do + 1000.times do |j| + hsh[i * 1000 + j] = i + hsh[i * 1000 + j] + hsh.delete(i * 1000 + j) + end + end + end.map(&:join) + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb new file mode 100644 index 0000000..3bd03e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/no_unsafe_spec.rb @@ -0,0 +1,27 @@ +if defined?(JRUBY_VERSION) && ENV['TEST_NO_UNSAFE'] + # to be used like this: rake test TEST_NO_UNSAFE=true + load 'test/package.jar' + java_import 'thread_safe.SecurityManager' + manager = SecurityManager.new + + # Prevent accessing internal classes + manager.deny(java.lang.RuntimePermission.new('accessClassInPackage.sun.misc')) + java.lang.System.setSecurityManager(manager) + + module ThreadSafe + describe 'no_unsafe' do + it 'security_manager_is_used' do + begin + java_import 'sun.misc.Unsafe' + fail + rescue SecurityError + end + end + + it 'no_unsafe_version_of_chmv8_is_used' do + require 'thread_safe/jruby_cache_backend' # make sure the jar has been loaded + expect(!Java::OrgJrubyExtThread_safe::JRubyCacheBackendLibrary::JRubyCacheBackend::CAN_USE_UNSAFE_CHM).to be_truthy + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb new file mode 100644 index 0000000..5302e2b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/spec/thread_safe/synchronized_delegator_spec.rb @@ -0,0 +1,85 @@ +require 'thread_safe/synchronized_delegator.rb' + +module ThreadSafe + describe SynchronizedDelegator do + it 'wraps array' do + array = ::Array.new + sync_array = described_class.new(array) + + array << 1 + expect(1).to eq sync_array[0] + + sync_array << 2 + expect(2).to eq array[1] + end + + it 'synchronizes access' do + t1_continue, t2_continue = false, false + + hash = ::Hash.new do |hash, key| + t2_continue = true + unless hash.find { |e| e[1] == key.to_s } # just to do something + hash[key] = key.to_s + Thread.pass until t1_continue + end + end + sync_hash = described_class.new(hash) + sync_hash[1] = 'egy' + + t1 = Thread.new do + sync_hash[2] = 'dva' + sync_hash[3] # triggers t2_continue + end + + t2 = Thread.new do + Thread.pass until t2_continue + sync_hash[4] = '42' + end + + sleep(0.05) # sleep some to allow threads to boot + + until t2.status == 'sleep' do + Thread.pass + end + + expect(3).to eq hash.keys.size + + t1_continue = true + t1.join; t2.join + + expect(4).to eq sync_hash.size + end + + it 'synchronizes access with block' do + t1_continue, t2_continue = false, false + + array = ::Array.new + sync_array = described_class.new(array) + + t1 = Thread.new do + sync_array << 1 + sync_array.each do + t2_continue = true + Thread.pass until t1_continue + end + end + + t2 = Thread.new do + # sleep(0.01) + Thread.pass until t2_continue + sync_array << 2 + end + + until t2.status == 'sleep' || t2.status == false + Thread.pass + end + + expect(1).to eq array.size + + t1_continue = true + t1.join; t2.join + + expect([1, 2]).to eq array + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/tasks/update_doc.rake b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/tasks/update_doc.rake new file mode 100644 index 0000000..f2cab2f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/tasks/update_doc.rake @@ -0,0 +1,45 @@ +require 'yard' +YARD::Rake::YardocTask.new + +root = File.expand_path File.join(File.dirname(__FILE__), '..') + +namespace :yard do + + cmd = lambda do |command| + puts ">> executing: #{command}" + system command or raise "#{command} failed" + end + + desc 'Pushes generated documentation to github pages: http://ruby-concurrency.github.io/thread_safe/' + task :push => [:setup, :yard] do + + message = Dir.chdir(root) do + `git log -n 1 --oneline`.strip + end + puts "Generating commit: #{message}" + + Dir.chdir "#{root}/yardoc" do + cmd.call "git add -A" + cmd.call "git commit -m '#{message}'" + cmd.call 'git push origin gh-pages' + end + + end + + desc 'Setups second clone in ./yardoc dir for pushing doc to github' + task :setup do + + unless File.exist? "#{root}/yardoc/.git" + cmd.call "rm -rf #{root}/yardoc" if File.exist?("#{root}/yardoc") + Dir.chdir "#{root}" do + cmd.call 'git clone --single-branch --branch gh-pages git@github.com:ruby-concurrency/thread_safe.git ./yardoc' + end + end + Dir.chdir "#{root}/yardoc" do + cmd.call 'git fetch origin' + cmd.call 'git reset --hard origin/gh-pages' + end + + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/thread_safe.gemspec b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/thread_safe.gemspec new file mode 100644 index 0000000..cf3faeb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/thread_safe.gemspec @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +$:.push File.expand_path('../lib', __FILE__) unless $:.include?('lib') +require 'thread_safe/version' + +Gem::Specification.new do |gem| + gem.authors = ["Charles Oliver Nutter", "thedarkone"] + gem.email = ["headius@headius.com", "thedarkone2@gmail.com"] + gem.summary = %q{Thread-safe collections and utilities for Ruby} + gem.description = %q{A collection of data structures and utilities to make thread-safe programming in Ruby easier} + gem.homepage = "https://github.com/ruby-concurrency/thread_safe" + + gem.files = `git ls-files`.split($\) + gem.files += ['lib/thread_safe/jruby_cache_backend.jar'] if defined?(JRUBY_VERSION) + gem.files -= ['.gitignore'] # see https://github.com/headius/thread_safe/issues/40#issuecomment-42315441 + gem.platform = 'java' if defined?(JRUBY_VERSION) + gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } + gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) + gem.name = "thread_safe" + gem.require_paths = ["lib"] + gem.version = ThreadSafe::VERSION + gem.license = "Apache-2.0" + + gem.add_development_dependency 'atomic', '= 1.1.16' + gem.add_development_dependency 'rake', '< 12.0' + gem.add_development_dependency 'rspec', '~> 3.2' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css new file mode 100644 index 0000000..dfd9d85 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/fulldoc/html/css/common.css @@ -0,0 +1,125 @@ +/* Override this file with custom rules */ + +body { + line-height: 18px; +} + +.docstring code, .docstring .object_link a, #filecontents code { + padding: 0px 3px 1px 3px; + border: 1px solid #eef; + background: #f5f5ff; +} + +#filecontents pre code, .docstring pre code { + border: none; + background: none; + padding: 0; +} + +#filecontents pre.code, .docstring pre.code, .tags pre.example, .docstring code, .docstring .object_link a, +#filecontents code { + -moz-border-radius: 2px; + -webkit-border-radius: 2px; +} + +/* syntax highlighting */ +.source_code { + display: none; + padding: 3px 8px; + border-left: 8px solid #ddd; + margin-top: 5px; +} + +#filecontents pre.code, .docstring pre.code, .source_code pre { + font-family: monospace; +} + +#filecontents pre.code, .docstring pre.code { + display: block; +} + +.source_code .lines { + padding-right: 12px; + color: #555; + text-align: right; +} + +#filecontents pre.code, .docstring pre.code, +.tags pre.example { + padding: 5px 12px; + margin-top: 4px; + border: 1px solid #eef; + background: #f5f5ff; +} + +pre.code { + color: #000; +} + +pre.code .info.file { + color: #555; +} + +pre.code .val { + color: #036A07; +} + +pre.code .tstring_content, +pre.code .heredoc_beg, pre.code .heredoc_end, +pre.code .qwords_beg, pre.code .qwords_end, +pre.code .tstring, pre.code .dstring { + color: #036A07; +} + +pre.code .fid, +pre.code .rubyid_new, +pre.code .rubyid_to_s, +pre.code .rubyid_to_sym, +pre.code .rubyid_to_f, +pre.code .rubyid_to_i, +pre.code .rubyid_each { + color: inherit; +} + +pre.code .comment { + color: #777; + font-style: italic; +} + +pre.code .const, pre.code .constant { + color: inherit; + font-weight: bold; + font-style: italic; +} + +pre.code .label, +pre.code .symbol { + color: #C5060B; +} + +pre.code .kw, +pre.code .rubyid_require, +pre.code .rubyid_extend, +pre.code .rubyid_include, +pre.code .int { + color: #0000FF; +} + +pre.code .ivar { + color: #660E7A; +} + +pre.code .gvar, +pre.code .rubyid_backref, +pre.code .rubyid_nth_ref { + color: #6D79DE; +} + +pre.code .regexp, .dregexp { + color: #036A07; +} + +pre.code a { + border-bottom: 1px dotted #bbf; +} + diff --git a/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb new file mode 100644 index 0000000..94c0273 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/thread_safe-0.3.6/yard-template/default/layout/html/footer.erb @@ -0,0 +1,16 @@ +<div id="footer"> + Generated on <%= Time.now.strftime("%c") %> by + <a href="http://yardoc.org" title="Yay! A Ruby Documentation Tool" target="_parent">yard</a> + <%= YARD::VERSION %> (ruby-<%= RUBY_VERSION %>). +</div> + +<script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-57940973-1', 'auto'); + ga('send', 'pageview'); + +</script> diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.gitignore b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.gitignore new file mode 100644 index 0000000..a414378 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.gitignore @@ -0,0 +1,8 @@ +*.gem +Gemfile.lock +doc/ +.yardoc +.rvmrc +coverage +check.sh +tags diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.rspec b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.rspec new file mode 100644 index 0000000..838fd57 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.rspec @@ -0,0 +1,4 @@ +--tty +--color +--format documentation +--backtrace diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.travis.yml b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.travis.yml new file mode 100644 index 0000000..dc0c9df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/.travis.yml @@ -0,0 +1,26 @@ +language: ruby +script: "bundle exec rake" +rvm: + - 1.9.3 + - 2.0.0 + - 2.1.10 + - 2.2.10 + - 2.3.8 + - 2.4.7 + - 2.5.6 + - 2.6.4 + - ruby-head + - jruby-head + - jruby-18mode + - jruby-19mode +matrix: + fast_finish: true + allow_failures: + - rvm: ruby-head + - rvm: jruby-head + - rvm: ree + include: + - rvm: 1.8.7 + dist: precise + - rvm: 1.9.2 + dist: trusty diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CHANGELOG.md b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CHANGELOG.md new file mode 100644 index 0000000..223e4bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CHANGELOG.md @@ -0,0 +1,406 @@ +# Changelog + +## Master + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.4.0...master) + +## 1.4.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.1.2...v1.4.0) + +#### 1 feature +- Faraday adapter exceptions namespace compatibility with Faraday v1 ([@iMacTia](https://github.com/iMacTia) in [#616](https://github.com/typhoeus/typhoeus/pull/616)) + +#### 3 Others +- Yard warning fixes ([@olleolleolle](https://github.com/olleolleolle) in [#622](https://github.com/typhoeus/typhoeus/pull/622)) +- Add more Ruby versions in CI matrix ([@olleolleolle](https://github.com/olleolleolle) in [#623](https://github.com/typhoeus/typhoeus/pull/623)) +- Use of argument passed in function instead of `attr_reader` ([@v-kolesnikov](https://github.com/v-kolesnikov) in [#625](https://github.com/typhoeus/typhoeus/pull/625)) + +## 1.1.2 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.1.1...v1.1.2) + +## 1.1.1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.1.0...v1.1.1) + +## 1.1.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.0.2...v1.1.0) + +* Unless specified `Expect` header is set to be empty to avoid `100 continue` + to be set when using `PUT` +* Add global config option `Typhoeus::Config.proxy` + +## 1.0.2 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.0.1...v1.0.2) + +## 1.0.1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v1.0.0...v1.0.1) + +## 1.0.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.8.0...v1.0.0) + +## 0.8.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.7.3...v0.8.0) + +* `EasyFactory`: Reduced object allocations and method calls during deprecated + option handling and option sanitization. + ([Tasos Laskos](https://github.com/zapotek)) +* `Response` ([Tasos Laskos](https://github.com/zapotek)) + * `Header` + * `#process_pair`: Halved `#set_value` calls. + * `#set_value`: Minimized `Hash` accesses. + * `#parse`: Use `String#start_with?` instead of `Regexp` match. + * `#process_line`: Optimized key/value sanitization. + * `Status` + * `#timed_out?`: Only return `true` when `#return_code` is `operation_timedout`. + +## 0.7.3 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.7.2...v0.7.3) + +* Add on_body callbacks individually to allow Ethon to recognize the return code + +## 0.7.2 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.7.1...v0.7.2) + +* Allow arrays to be passed to Expectation#and_return + ([JP Moral](https://github.com/jpmoral)) + +* Added getter for `redirect_time` value. + ([Adrien Jarthon](https://github.com/jarthod)) + +## 0.7.1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.7.0...v0.7.1) + +Bugfixes: + +* Forking may cause libcurl sockets to be shared with child processes, causing HTTP requests to be interleaved + ([Rolf Timmermans](https://github.com/rolftimmermans), [\#436](https://github.com/typhoeus/typhoeus/pull/426)) + +## 0.7.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.7.0.pre1...v0.7.0) + +Bugfixes: + +* Call on_headers and on_body when using stubbed responses. + +## 0.7.0.pre1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.9...v0.7.0.pre1) + +Enhancements: + +* Improving timeout behavior and documentation. `no_signal` is now set per default! + ([Jonas Wagner](https://github.com/jwagner), [\#398](https://github.com/typhoeus/typhoeus/pull/398)) + +## 0.6.8 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.7...v0.6.8) + +Bugfixes: + +* Fix Faraday 0.9 compatibility. + ([Gleb Mazovetskiy](https://github.com/glebm), [\#357](https://github.com/typhoeus/typhoeus/pull/357)) +* Fix Request#hash for different key orders. + ([Matthew Schulkind](https://github.com/mschulkind), [\#344](https://github.com/typhoeus/typhoeus/pull/344)) + +Enhancements: + +* Use an updated Ethon version. Note that from now on the `mime-types` is no longer a Ethon dependency. The gem will be still used if available to determine the mime type of a file which is uploaded. That means you have to have take care of the gem installation yourself. +* Use SVG for status badges in README. + ([Sean Linsley](https://github.com/seanlinsley), [\#353](https://github.com/typhoeus/typhoeus/pull/353)) +* Missing quotes in README example code. + ([Jason R. Clark](https://github.com/jasonrclark), [\#351](https://github.com/typhoeus/typhoeus/pull/351)) +* Specs for Faraday adapter. + ([michaelavila](https://github.com/michaelavila), [\#348](https://github.com/typhoeus/typhoeus/pull/348)) +* Clarify wording in README. + ([Sean Linsley](https://github.com/seanlinsley), [\#347](https://github.com/typhoeus/typhoeus/pull/347)) +* Make caching easier for non-memory caches. + ([Matthew Schulkind](https://github.com/mschulkind), [\#345](https://github.com/typhoeus/typhoeus/pull/345)) +* Spec refactoring. + ([Matthew Schulkind](https://github.com/mschulkind), [\#343](https://github.com/typhoeus/typhoeus/pull/343)) + +## 0.6.7 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.6...v0.6.7) + +Enhancements: + +* Add response streaming. + ([\#339](https://github.com/typhoeus/typhoeus/pull/339)) + +## 0.6.6 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.5...v0.6.6) + +## 0.6.5 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.4...v0.6.5) + +## 0.6.4 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.3...v0.6.4) + +The changelog entries are coming soon! + +## 0.6.3 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.2...v0.6.3) + +Enhancements: + +* Cache hydra per thread. +* Various documentation improvements. + ([craiglittle](https://github.com/craiglittle)) +* Add support for lazy construction of responses from stubbed requests. + ([ryankindermann](https://github.com/ryankinderman), [\#275](https://github.com/typhoeus/typhoeus/pull/275)) + +## 0.6.2 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.1...v0.6.2) + +Enhancements: + +* Reintroduce a global cache. +* `Request#handled_response` falls back to the original response. + ([turnerking](https://github.com/turnerking), [\#272](https://github.com/typhoeus/typhoeus/pull/272)) +* When `Errors::NoStub` is raised the `url` is displayed. + ([dschneider](https://github.com/dschneider), [\#276](https://github.com/typhoeus/typhoeus/pull/276)) +* Make `Request#hash` consistent. +* Add `.rvmrc` and `ctags` to `.gitignore`. + ([ryankindermann](https://github.com/ryankinderman), [\#274](https://github.com/typhoeus/typhoeus/pull/274)) + +## 0.6.1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.6.0...v0.6.1) + +Enhancements: + +* Updated ethon version which allows to set multiple protocols. + +## 0.6.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.5.4...v0.6.0) + +Enhancements: + +* `Request#url` now also contains the url parameters. +* Use updated ethon version which provides access to protocols and redir_protocols in response to [libcurl SASL buffer overflow vulnerability](http://curl.haxx.se/docs/adv_20130206.html) + +Bugfixes: + +* Corrected ssl options for the faraday adapter. +* The before hook now correctly returns the response. + ([Mattias Putman](https://github.com/challengee), [\#268](https://github.com/typhoeus/typhoeus/pull/268)) +* Benchmark is working again. + +## 0.5.4 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.5.3...v0.5.4) + +Enhancements: + +* Make sure response_code is an integer. +* When setting an header through vcr or webmock it becomes a `Typhoeus::Response::Header`. +* Provide a Rack middleware to decode nested Typhoeus arrays properly. + ([Dwayne Macgowan](https://github.com/dwaynemac), [\#224](https://github.com/typhoeus/typhoeus/issues/224)) +* Handled response is available again. +* Rename parameter `url` to `base_url`. See discussion here: [\#250](https://github.com/typhoeus/typhoeus/issues/250). + ([bkimble](https://github.com/bkimble), [\#256](https://github.com/typhoeus/typhoeus/pull/256)) +* Provide O(1) header access. + * Work around ruby 1.8.7 limitations. + ([Chris Johnson](https://github.com/findchris), [\#227](https://github.com/typhoeus/typhoeus/pull/227) ) + * Provide symbol access. + +## 0.5.3 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.5.2...v0.5.3) + +Enhancements: + +* When checking options in Expecation#matches? also consider Request#options. + +Bugfixes: + +* Do not break backwards compatibility with case insensitive headers access. +* Make sure hydra behaves correct in case of before hooks. + +## 0.5.2 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.5.1...v0.5.2) + +Enhancements: + +* Do not check the return_code in Response#success? when response is mocked. +* Check for memoization, stubbing, before hooks are delayed to Hydra#run. It + was on Hydra#queue before and led to strange behavior because if the request + was stubbed, it was wrapped up in queue already. There was no way to add + callbacks after queue thatswhy. This is now different, since everything happens + in run, just as you expect. + +## 0.5.1 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.5.0...v0.5.1) + +Enhancements: + +* Downcase header keys for easier access + ( [\#227](https://github.com/typhoeus/typhoeus/issues/227) ) +* Using an updated Ethon version. + +## 0.5.0 + +[Full Changelog](http://github.com/typhoeus/typhoeus/compare/v0.4.2...v0.5.0) + +Major Changes: + +* Ethon integration + * Params are url params and a body is always a body for every request type + * The options you can set might have a slightly other names, as Ethon sticks to + libcurl names. See + [Easy.new](http://rubydoc.info/github/typhoeus/ethon/Ethon/Easy#initialize-instance_method) + for a description. + * Request parameter and body are properly encoded (only POST multiform body is not) + * No more header sanitizing. Before: `:headers => { 'user_agent' => 'Custom' }` was modified to + `:headers => { 'User-Agent' => 'Custom' }` + * `Typhoeus::Easy` and `Typhoeus::Multi` are now `Ethon::Easy` and `Ethon::Multi` + +* Request shortcuts: `Typhoeus.get("www.google.de")` +* Global configuration: +```ruby +Typhoeus.configure do |config| + config.verbose = true + config.memoize = true +end +``` +* No more `Response#headers_hash`, instead `Response#headers` returning the last + header and response#redirections returning the responses with headers + generated through redirections +* Instead of defining the same callbacks on every request, you can define global callbacks: +```ruby +Typhoeus.on_complete { p "yay" } +``` +* The stubbing interface changed slightly. You now have the same syntax as for requests: +```ruby +Typhoeus.stub(url, options).and_return(response) +``` +* The following things were removed because they do not seemed to be used at all. Ping me if you disagree! + * `Typhoeus::Filter` + * `Typhoeus::Remote` + * `Typhoeus::RemoteMethod` + * `Typhoeus::RemoteProxyObject` + * build in cache interface + +Enhancements: + +* Documentation + ( [Alex P](https://github.com/ifesdjeen), [\#188](https://github.com/typhoeus/typhoeus/issues/188) ) +* Request#on\_complete can hold multiple blocks. +* Request#eql? recognizes when header/params/body has a different order, but still same keys and values + ( [Alex P](https://github.com/ifesdjeen), [\#194](https://github.com/typhoeus/typhoeus/issues/194) ) + +Bug Fixes: + +* Zero bytes in strings are escaped for libcurl +* Add support for socks5 hostname proxy type + ( [eweathers](https://github.com/eweathers), [\#183](https://github.com/typhoeus/typhoeus/issues/183) ) +* Post body is encoded + ( [Rohan Deshpande](https://github.com/rdeshpande), [\#143](https://github.com/typhoeus/typhoeus/issues/143) ) +* Set default user agent + ( [Steven Shingler](https://github.com/sshingler), [\#176](https://github.com/typhoeus/typhoeus/issues/176) ) + +## 0.4.2 +* A header hotfix + +## 0.4.1 +* Fix verifypeer and verifyhost options +* Fix header sending + +## 0.4.0 +* Make a GET even when a body is given +* Deprecated User Agent setter removed +* Allow cache key basis overwrite (John Crepezzi, #147) +* FFI integration (Daniel Cavanagh, #151) +* Refactor upload code (Marnen Laibow-Koser, #152) +* Fix travis-ci build (Ezekiel Templin, #160) + +## 0.3.3 +* Make sure to call the Easy::failure callback on all non-success http response codes, even invalid ones. [balexis] +* Use bytesize instead of length to determine Content-Length [dlamacchia] +* Added SSL version option to Easy/Request [michelbarbosa/dbalatero] + +## 0.3.2 +* Fix array params to be consistent with HTTP spec [gridaphobe] +* traversal\_to\_params\_hash should use the escape option [itsmeduncan] +* Fix > 1024 open file descriptors [mschulkind] +* Fixed a bug with internally queued requests being dropped [mschulkind] +* Use gemspec in bundler to avoid duplication [mschulkind] +* Run internally queued requests in FIFO order [mschulkind] +* Moved Typhoeus::VERSION to a separate file, to fix rake build\_native [mschulkind] +* Fixed problems related to put requests with empty bodies [skaes, GH-84] +* Added CURLOPT\_INTERFACE option via Request#interface=. [spiegela] +* Added Tempfile support to Form#process! [richievos] +* Hydra won't forget to accept gzip/deflate encoding [codesnik] +* Accept and convert strings to integers in Typhoeus::Request#initialize for timeout/cache\_timeout/connect\_timeout values when using ruby 1.9.x. [djnawara] +* Added interface for registering stub finders [myronmarston] +* Fixed header stubbing [myronmarston] +* Added PKCS12 support [jodell] +* Make a request with handlers marshallable [bernerdschaefer] +* Upgraded to RSpec 2 [bernerdschaefer] +* Fix HTTP status edge-case [balexis] +* Expose primary\_ip to easy object [balexis] + +## 0.2.4 +* Fix form POSTs to only use multipart for file uploads, otherwise use application/x-www-form-urlencoded [dbalatero] + +## 0.2.3 +* Code duplication in Typhoeus::Form led to nested URL param errors on POST only. Fixed [dbalatero] + +## 0.2.2 +* Fixed a problem with nested URL params encoding incorrectly [dbalatero] + +## 0.2.1 +* Added extended proxy support [Zapotek, GH-46] +* eliminated compile time warnings by using proper type declarations [skaes, GH-54] +* fixed broken calls to rb\_raise [skaes, GH-54] +* prevent leaking of curl easy handles when exceptions are raised (either from typhoeus itself or user callbacks) [skaes, GH-54] +* fixed Easy#timed\_out? using curl return codes [skaes, GH-54] +* provide curl return codes and corresponding curl error messages on classes Easy and Request [skaes, GH-54] +* allow VCR to whitelist hosts in Typhoeus stubbing/mocking [myronmarston, GH-57] +* added timed\_out? documentation, method to Response [dbalatero, GH-34] +* added abort to Hydra to prematurely stop a hydra.run [Zapotek] +* added file upload support for POST requests [jtarchie, GH-59] + +## 0.2.0 +* Fix warning in Request#headers from attr\_accessor +* Params with array values were not parsing into the format that rack expects [GH-39, smartocci] +* Removed Rack as a dependency [GH-45] +* Added integration hooks for VCR! + +## 0.1.31 +* Fixed bug in setting compression encoding [morhekil] +* Exposed authentication control methods through Request interface [morhekil] + +## 0.1.30 +* Exposed CURLOPT\_CONNECTTIMEOUT\_MS to Requests [balexis] + +## 0.1.29 +* Fixed a memory corruption with using CURLOPT\_POSTFIELDS [gravis, +32531d0821aecc4] + +## 0.1.28 +* Added SSL cert options for Typhoeus::Easy [GH-25, gravis] +* Ported SSL cert options to Typhoeus::Request interface [gravis] +* Added support for any HTTP method (purge for Varnish) [ryana] + +## 0.1.27 +* Added rack as dependency, added dev dependencies to Rakefile [GH-21] diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CONTRIBUTING.md b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CONTRIBUTING.md new file mode 100644 index 0000000..667e8a3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/CONTRIBUTING.md @@ -0,0 +1,20 @@ +We love pull requests. Here's a quick guide: + +1. Fork the repo. + +2. Run the tests. We only take pull requests with passing tests, and it's great +to know that you have a clean slate: `bundle && bundle exec rake` + +3. Add a test for your change. Only refactoring and documentation changes +require no new tests. If you are adding functionality or fixing a bug, we need +a test! + +4. Make the test pass. + +5. Push to your fork and submit a pull request. + +And in case we didn't emphasize it enough: we love tests! + +## Issue triage [![Open Source Helpers](https://www.codetriage.com/typhoeus/typhoeus/badges/users.svg)](https://www.codetriage.com/typhoeus/typhoeus) + +You can contribute by triaging issues which may include reproducing bug reports or asking for vital information, such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to typhoeus on CodeTriage](https://www.codetriage.com/typhoeus/typhoeus). diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Gemfile b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Gemfile new file mode 100644 index 0000000..dd662d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Gemfile @@ -0,0 +1,32 @@ +source "https://rubygems.org" +gemspec + +if Gem.ruby_version < Gem::Version.new("2.0.0") + gem "rake", "< 11" + gem "json", "< 2" +else + gem "json" + gem "rake" +end + +group :development, :test do + gem "rspec", "~> 3.0" + + gem "sinatra", "~> 1.3" + + if Gem.ruby_version >= Gem::Version.new("1.9.0") + gem "faraday", ">= 0.9" + gem "dalli", "~> 2.0" + end + + gem "redis", "~> 3.0" + + if RUBY_PLATFORM == "java" + gem "spoon" + end + + unless ENV["CI"] + gem "guard-rspec", "~> 0.7" + gem 'rb-fsevent', '~> 0.9.1' + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Guardfile b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Guardfile new file mode 100644 index 0000000..f0db299 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Guardfile @@ -0,0 +1,9 @@ +# vim:set filetype=ruby: +guard( + "rspec", + all_after_pass: false, + cli: "--fail-fast --tty --format documentation --colour") do + + watch(%r{^spec/.+_spec\.rb$}) + watch(%r{^lib/(.+)\.rb$}) { |match| "spec/#{match[1]}_spec.rb" } +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/LICENSE new file mode 100644 index 0000000..387fac5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2009-2010 Paul Dix +Copyright (c) 2011 David Balatero +Copyright (c) 2012-2016 Hans Hasselberg + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/README.md b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/README.md new file mode 100644 index 0000000..1bbab54 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/README.md @@ -0,0 +1,584 @@ +# Typhoeus [![Build Status](https://img.shields.io/travis/typhoeus/typhoeus/master.svg)](https://travis-ci.org/typhoeus/typhoeus) [![Code Climate](https://img.shields.io/codeclimate/maintainability/typhoeus/typhoeus.svg)](https://codeclimate.com/github/typhoeus/typhoeus) [![Gem Version](https://img.shields.io/gem/v/typhoeus.svg)](https://rubygems.org/gems/typhoeus) + +Like a modern code version of the mythical beast with 100 serpent heads, Typhoeus runs HTTP requests in parallel while cleanly encapsulating handling logic. + +## Example + +A single request: + +```ruby +Typhoeus.get("www.example.com", followlocation: true) +``` + +Parallel requests: + +```ruby +hydra = Typhoeus::Hydra.new +10.times.map{ hydra.queue(Typhoeus::Request.new("www.example.com", followlocation: true)) } +hydra.run +``` + +## Installation +Add the following line to your Gemfile: +``` +gem "typhoeus" +``` +Then run `bundle install` + +Or install it yourself as: + +``` +gem install typhoeus +``` + +## Project Tracking + +* [Documentation](http://rubydoc.info/github/typhoeus/typhoeus/frames/Typhoeus) (GitHub master) +* [Mailing list](http://groups.google.com/group/typhoeus) + +## Usage + +### Introduction + +The primary interface for Typhoeus is comprised of three classes: Request, Response, and Hydra. Request represents an HTTP request object, response represents an HTTP response, and Hydra manages making parallel HTTP connections. + +```ruby +request = Typhoeus::Request.new( + "www.example.com", + method: :post, + body: "this is a request body", + params: { field1: "a field" }, + headers: { Accept: "text/html" } +) +``` + +We can see from this that the first argument is the url. The second is a set of options. +The options are all optional. The default for `:method` is `:get`. + +When you want to send URL parameters, you can use `:params` hash to do so. Please note that in case of you should send a request via `x-www-form-urlencoded` parameters, you need to use `:body` hash instead. `params` are for URL parameters and `:body` is for the request body. + +#### Sending requests through the proxy + +Add a proxy url to the list of options: + +```ruby +options = {proxy: 'http://myproxy.org'} +req = Typhoeus::Request.new(url, options) +``` + +If your proxy requires authentication, add it with `proxyuserpwd` option key: + +```ruby +options = {proxy: 'http://proxyurl.com', proxyuserpwd: 'user:password'} +req = Typhoeus::Request.new(url, options) +``` + +Note that `proxyuserpwd` is a colon-separated username and password, in the vein of basic auth `userpwd` option. + + +You can run the query either on its own or through the hydra: + +``` ruby +request.run +#=> <Typhoeus::Response ... > +``` + +```ruby +hydra = Typhoeus::Hydra.hydra +hydra.queue(request) +hydra.run +``` + +The response object will be set after the request is run. + +```ruby +response = request.response +response.code +response.total_time +response.headers +response.body +``` + +### Making Quick Requests + +Typhoeus has some convenience methods for performing single HTTP requests. The arguments are the same as those you pass into the request constructor. + +```ruby +Typhoeus.get("www.example.com") +Typhoeus.head("www.example.com") +Typhoeus.put("www.example.com/posts/1", body: "whoo, a body") +Typhoeus.patch("www.example.com/posts/1", body: "a new body") +Typhoeus.post("www.example.com/posts", body: { title: "test post", content: "this is my test"}) +Typhoeus.delete("www.example.com/posts/1") +Typhoeus.options("www.example.com") +``` +#### Sending params in the body with PUT +When using POST the content-type is set automatically to 'application/x-www-form-urlencoded'. That's not the case for any other method like PUT, PATCH, HEAD and so on, irrespective of whether you are using body or not. To get the same result as POST, i.e. a hash in the body coming through as params in the receiver, you need to set the content-type as shown below: +```ruby +Typhoeus.put("www.example.com/posts/1", + headers: {'Content-Type'=> "application/x-www-form-urlencoded"}, + body: {title:"test post updated title", content: "this is my updated content"} + ) +``` + +### Handling HTTP errors + +You can query the response object to figure out if you had a successful +request or not. Here’s some example code that you might use to handle errors. +The callbacks are executed right after the request is finished, make sure to define +them before running the request. + +```ruby +request = Typhoeus::Request.new("www.example.com", followlocation: true) + +request.on_complete do |response| + if response.success? + # hell yeah + elsif response.timed_out? + # aw hell no + log("got a time out") + elsif response.code == 0 + # Could not get an http response, something's wrong. + log(response.return_message) + else + # Received a non-successful http response. + log("HTTP request failed: " + response.code.to_s) + end +end + +request.run +``` + +This also works with serial (blocking) requests in the same fashion. Both +serial and parallel requests return a Response object. + +### Handling file uploads + +A File object can be passed as a param for a POST request to handle uploading +files to the server. Typhoeus will upload the file as the original file name +and use Mime::Types to set the content type. + +```ruby +Typhoeus.post( + "http://localhost:3000/posts", + body: { + title: "test post", + content: "this is my test", + file: File.open("thesis.txt","r") + } +) +``` + +### Streaming the response body + +Typhoeus can stream responses. When you're expecting a large response, +set the `on_body` callback on a request. Typhoeus will yield to the callback +with chunks of the response, as they're read. When you set an `on_body` callback, +Typhoeus will not store the complete response. + +```ruby +downloaded_file = File.open 'huge.iso', 'wb' +request = Typhoeus::Request.new("www.example.com/huge.iso") +request.on_headers do |response| + if response.code != 200 + raise "Request failed" + end +end +request.on_body do |chunk| + downloaded_file.write(chunk) +end +request.on_complete do |response| + downloaded_file.close + # Note that response.body is "" +end +request.run +``` + +If you need to interrupt the stream halfway, +you can return the `:abort` symbol from the `on_body` block, example: + +```ruby +request.on_body do |chunk| + buffer << chunk + :abort if buffer.size > 1024 * 1024 +end +``` + +This will properly stop the stream internally and avoid any memory leak which +may happen if you interrupt with something like a `return`, `throw` or `raise`. + +### Making Parallel Requests + +Generally, you should be running requests through hydra. Here is how that looks: + +```ruby +hydra = Typhoeus::Hydra.hydra + +first_request = Typhoeus::Request.new("http://example.com/posts/1") +first_request.on_complete do |response| + third_url = response.body + third_request = Typhoeus::Request.new(third_url) + hydra.queue third_request +end +second_request = Typhoeus::Request.new("http://example.com/posts/2") + +hydra.queue first_request +hydra.queue second_request +hydra.run # this is a blocking call that returns once all requests are complete +``` + +The execution of that code goes something like this. The first and second requests are built and queued. When hydra is run the first and second requests run in parallel. When the first request completes, the third request is then built and queued, in this example based on the result of the first request. The moment it is queued Hydra starts executing it. Meanwhile the second request would continue to run (or it could have completed before the first). Once the third request is done, `hydra.run` returns. + +How to get an array of response bodies back after executing a queue: + +```ruby +hydra = Typhoeus::Hydra.new +requests = 10.times.map { + request = Typhoeus::Request.new("www.example.com", followlocation: true) + hydra.queue(request) + request +} +hydra.run + +responses = requests.map { |request| + request.response.body +} +``` +`hydra.run` is a blocking request. You can also use the `on_complete` callback to handle each request as it completes: + +```ruby +hydra = Typhoeus::Hydra.new +10.times do + request = Typhoeus::Request.new("www.example.com", followlocation: true) + request.on_complete do |response| + #do_something_with response + end + hydra.queue(request) +end +hydra.run +``` + +### Making Parallel Requests with Faraday + Typhoeus + +```ruby +require 'faraday' + +conn = Faraday.new(:url => 'http://httppage.com') do |builder| + builder.request :url_encoded + builder.response :logger + builder.adapter :typhoeus +end + +conn.in_parallel do + response1 = conn.get('/first') + response2 = conn.get('/second') + + # these will return nil here since the + # requests have not been completed + response1.body + response2.body +end + +# after it has been completed the response information is fully available +# response1.status, etc +response1.body +response2.body +``` + +### Specifying Max Concurrency + +Hydra will also handle how many requests you can make in parallel. Things will get flakey if you try to make too many requests at the same time. The built in limit is 200. When more requests than that are queued up, hydra will save them for later and start the requests as others are finished. You can raise or lower the concurrency limit through the Hydra constructor. + +```ruby +Typhoeus::Hydra.new(max_concurrency: 20) +``` + +### Memoization + +Hydra memoizes requests within a single run call. You have to enable memoization. +This will result in a single request being issued. However, the on_complete handlers of both will be called. + +```ruby +Typhoeus::Config.memoize = true + +hydra = Typhoeus::Hydra.new(max_concurrency: 1) +2.times do + hydra.queue Typhoeus::Request.new("www.example.com") +end +hydra.run +``` + +This will result in two requests. + +```ruby +Typhoeus::Config.memoize = false + +hydra = Typhoeus::Hydra.new(max_concurrency: 1) +2.times do + hydra.queue Typhoeus::Request.new("www.example.com") +end +hydra.run +``` + +### Caching + +Typhoeus includes built in support for caching. In the following example, if there is a cache hit, the cached object is passed to the on_complete handler of the request object. + +```ruby +class Cache + def initialize + @memory = {} + end + + def get(request) + @memory[request] + end + + def set(request, response) + @memory[request] = response + end +end + +Typhoeus::Config.cache = Cache.new + +Typhoeus.get("www.example.com").cached? +#=> false +Typhoeus.get("www.example.com").cached? +#=> true +``` + +For use with [Dalli](https://github.com/mperham/dalli): + +```ruby +dalli = Dalli::Client.new(...) +Typhoeus::Config.cache = Typhoeus::Cache::Dalli.new(dalli) +``` + +For use with Rails: + +```ruby +Typhoeus::Config.cache = Typhoeus::Cache::Rails.new +``` + +For use with [Redis](https://github.com/redis/redis-rb): + +```ruby +redis = Redis.new(...) +Typhoeus::Config.cache = Typhoeus::Cache::Redis.new(redis) +``` + +All three of these adapters take an optional keyword argument `default_ttl`, which sets a default +TTL on cached responses (in seconds), for requests which do not have a cache TTL set. + +You may also selectively choose not to cache by setting `cache` to `false` on a request or to use +a different adapter. + +```ruby +cache = Cache.new +Typhoeus.get("www.example.com", cache: cache) +``` + +### Direct Stubbing + +Hydra allows you to stub out specific urls and patterns to avoid hitting +remote servers while testing. + +```ruby +response = Typhoeus::Response.new(code: 200, body: "{'name' : 'paul'}") +Typhoeus.stub('www.example.com').and_return(response) + +Typhoeus.get("www.example.com") == response +#=> true +``` + +The queued request will hit the stub. You can also specify a regex to match urls. + +```ruby +response = Typhoeus::Response.new(code: 200, body: "{'name' : 'paul'}") +Typhoeus.stub(/example/).and_return(response) + +Typhoeus.get("www.example.com") == response +#=> true +``` + +You may also specify an array for the stub to return sequentially. + +```ruby +Typhoeus.stub('www.example.com').and_return([response1, response2]) + +Typhoeus.get('www.example.com') == response1 #=> true +Typhoeus.get('www.example.com') == response2 #=> true +``` + +When testing make sure to clear your expectations or the stubs will persist between tests. The following can be included in your spec_helper.rb file to do this automatically. + +```ruby +RSpec.configure do |config| + config.before :each do + Typhoeus::Expectation.clear + end +end +``` + +### Timeouts + +No exceptions are raised on HTTP timeouts. You can check whether a request timed out with the following method: + +```ruby +Typhoeus.get("www.example.com", timeout: 1).timed_out? +``` + +Timed out responses also have their success? method return false. + +There are two different timeouts available: [`timeout`](http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTTIMEOUT) +and [`connecttimeout`](http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUT). +`timeout` is the time limit for the entire request in seconds. +`connecttimeout` is the time limit for just the connection phase, again in seconds. + +There are two additional more fine grained options `timeout_ms` and +`connecttimeout_ms`. These options offer millisecond precision but are not always available (for instance on linux if `nosignal` is not set to true). + +When you pass a floating point `timeout` (or `connecttimeout`) Typhoeus will set `timeout_ms` for you if it has not been defined. The actual timeout values passed to curl will always be rounded up. + +DNS timeouts of less than one second are not supported unless curl is compiled with an asynchronous resolver. + +The default `timeout` is 0 (zero) which means curl never times out during transfer. The default `connecttimeout` is 300 seconds. A `connecttimeout` of 0 will also result in the default `connecttimeout` of 300 seconds. + +### Following Redirections + +Use `followlocation: true`, eg: + +```ruby +Typhoeus.get("www.example.com", followlocation: true) +``` + +### Basic Authentication + +```ruby +Typhoeus::Request.get("www.example.com", userpwd: "user:password") +``` + +### Compression + +```ruby +Typhoeus.get("www.example.com", accept_encoding: "gzip") +``` + +The above has a different behavior than setting the header directly in the header hash, eg: +```ruby +Typhoeus.get("www.example.com", headers: {"Accept-Encoding" => "gzip"}) +``` + +Setting the header hash directly will not include the `--compressed` flag in the libcurl command and therefore libcurl will not decompress the response. If you want the `--compressed` flag to be added automatically, set `:accept_encoding` Typhoeus option. + + +### Cookies + +```ruby +Typhoeus::Request.get("www.example.com", cookiefile: "/path/to/file", cookiejar: "/path/to/file") +``` + +Here, `cookiefile` is a file to read cookies from, and `cookiejar` is a file to write received cookies to. +If you just want cookies enabled, you need to pass the same filename for both options. + +### Other CURL options + +Are available and documented [here](http://rubydoc.info/github/typhoeus/ethon/Ethon/Easy/Options) + +### SSL + +SSL comes built in to libcurl so it’s in Typhoeus as well. If you pass in a +url with "https" it should just work assuming that you have your [cert +bundle](http://curl.haxx.se/docs/caextract.html) in order and the server is +verifiable. You must also have libcurl built with SSL support enabled. You can +check that by doing this: + +``` +curl --version +``` + +Now, even if you have libcurl built with OpenSSL you may still have a messed +up cert bundle or if you’re hitting a non-verifiable SSL server then you’ll +have to disable peer verification to make SSL work. Like this: + +```ruby +Typhoeus.get("https://www.example.com", ssl_verifypeer: false) +``` + +If you are getting "SSL: certificate subject name does not match target host +name" from curl (ex:- you are trying to access to b.c.host.com when the +certificate subject is \*.host.com). You can disable host verification. Like +this: + +```ruby +# host checking enabled +Typhoeus.get("https://www.example.com", ssl_verifyhost: 2) +# host checking disabled +Typhoeus.get("https://www.example.com", ssl_verifyhost: 0) +``` + +### Verbose debug output + +It’s sometimes useful to see verbose output from curl. You can enable it on a per-request basis: + +```ruby +Typhoeus.get("http://example.com", verbose: true) +``` + +or globally: + +```ruby +Typhoeus::Config.verbose = true +``` + +Just remember that libcurl prints it’s debug output to the console (to +STDERR), so you’ll need to run your scripts from the console to see it. + +### Default User Agent Header + +In many cases, all HTTP requests made by an application require the same User-Agent header set. Instead of supplying it on a per-request basis by supplying a custom header, it is possible to override it for all requests using: + + +```ruby +Typhoeus::Config.user_agent = "custom user agent" +``` + +### Running the specs + +Running the specs should be as easy as: + +``` +bundle install +bundle exec rake +``` +## Semantic Versioning + +This project conforms to [semver](http://semver.org/). + +## LICENSE + +(The MIT License) + +Copyright © 2009-2010 [Paul Dix](http://www.pauldix.net/) + +Copyright © 2011-2012 [David Balatero](https://github.com/dbalatero/) + +Copyright © 2012-2016 [Hans Hasselberg](http://github.com/i0rek/) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without +limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Rakefile b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Rakefile new file mode 100644 index 0000000..d5693a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/Rakefile @@ -0,0 +1,38 @@ +require "bundler" +Bundler.setup + +require "rake" +require "rspec/core/rake_task" +$LOAD_PATH.unshift File.expand_path("../lib", __FILE__) +require "typhoeus/version" + +task :gem => :build +task :build do + system "gem build typhoeus.gemspec" +end + +task :install => :build do + system "gem install typhoeus-#{Typhoeus::VERSION}.gem" +end + +task :release => :build do + system "git tag -a v#{Typhoeus::VERSION} -m 'Tagging #{Typhoeus::VERSION}'" + system "git push --tags" + system "gem push typhoeus-#{Typhoeus::VERSION}.gem" +end + +RSpec::Core::RakeTask.new(:spec) do |t| + t.verbose = false + t.ruby_opts = "-W -I./spec -rspec_helper" +end + +desc "Start up the test servers" +task :start do + require_relative 'spec/support/boot' + begin + Boot.start_servers(:rake) + rescue Exception + end +end + +task :default => :spec diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/UPGRADE.md b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/UPGRADE.md new file mode 100644 index 0000000..28c7f1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/UPGRADE.md @@ -0,0 +1,55 @@ +# Upgrade guide + +## 0.5 + +### Options + +Fix the option names, because some were renamed. The errors should point you in the right direction: + +```ruby +Typhoeus.get("www.example.com", follow_location: true) +# Ethon::Errors::InvalidOption: The option: follow_location is invalid. +# Please try followlocation instead of follow_location. +# ... [Backtrace] + +Typhoeus.get("www.example.com", followlocation: true).code +#=> 200 +``` + +### Headers + +`Response#headers` returns a hash now and replaces `Response#headers_hash`, use `Response#response_headers` for the raw string: + +```ruby +Typhoeus.get("www.example.com", followlocation: true).headers +#=> { +# "date"=>"Tue, 06 Nov 2012 09:07:27 GMT", +# "server"=>"Apache/2.2.3 (CentOS)", +# "last-modified"=>"Wed, 09 Feb 2011 17:13:15 GMT", +# "vary"=>"Accept-Encoding", +# "connection"=>"close", +# "content-type"=>"text/html; charset=UTF-8" +# } + +Typhoeus.get("www.example.com", followlocation: true).response_headers +#=> "HTTP/1.0 302 Found\r\nLocation: http://www.iana.org/domains/example/ [...]" +``` + +### Params vs body + +Make sure every request sends proper params and body (especially POST/PUT). `:params` becomes url parameter and `:body` request body. Before params for POST was smashed into the body. + +### Configuration + +Create a global configuration in case you want to turn on verbose, memoize or block_connection: + +```ruby +Typhoeus.configure do |config| + config.verbose = true + config.memoize = true +end +``` + +### Docs + +When in doubt, read the [docs](http://rubydoc.info/github/typhoeus/typhoeus/frames/Typhoeus) or the [code](https://www.github.com/typhoeus). diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus.rb new file mode 100644 index 0000000..dd93611 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus.rb @@ -0,0 +1 @@ +require "rack/typhoeus/middleware/params_decoder" diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder.rb new file mode 100644 index 0000000..cbd10f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder.rb @@ -0,0 +1,57 @@ +require 'rack/typhoeus/middleware/params_decoder/helper' + +module Rack + module Typhoeus + module Middleware + + # This Rack middleware takes care of the proper deserialization of + # the nested params encoded by Typhoeus. + # + # @example Require the railtie when using Rails. + # require 'typhoeus/railtie' + # + # @example Include the middleware for Rack based applications. + # use Rack::Typhoeus::Middleware::ParamsDecoder + # + # @example Use the helper directly. Not recommended as b/c the interface might change. + # require 'rack/typhoeus/middleware/params_decoder/helper' + # include Rack::Typhoeus::Middleware::ParamsDecoder::Helper + # decode!(params) + # + # @author Dwayne Macgowan + # @since 0.5.4 + class ParamsDecoder + include ParamsDecoder::Helper + + def initialize(app) + @app = app + end + + def call(env) + req = Rack::Request.new(env) + decode(req.params).each_pair { |k, v| update_params req, k, v } + @app.call(env) + end + + private + + # Persist params change in environment. Extracted from: + # https://github.com/rack/rack/blob/master/lib/rack/request.rb#L243 + def update_params(req, k, v) + found = false + if req.GET.has_key?(k) + found = true + req.GET[k] = v + end + if req.POST.has_key?(k) + found = true + req.POST[k] = v + end + unless found + req.GET[k] = v + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder/helper.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder/helper.rb new file mode 100644 index 0000000..0542328 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/rack/typhoeus/middleware/params_decoder/helper.rb @@ -0,0 +1,76 @@ +module Rack + module Typhoeus + module Middleware + class ParamsDecoder + module Helper + + # Recursively decodes Typhoeus encoded arrays in given Hash. + # + # @example Use directly in a Rails controller. + # class ApplicationController + # before_filter :decode_typhoeus_arrays + # end + # + # @author Dwayne Macgowan + # + def decode_typhoeus_arrays + decode!(params) + end + + # Recursively decodes Typhoeus encoded arrays in given Hash. + # + # @param hash [Hash]. This Hash will be modified! + # + # @return [Hash] Hash with properly decoded nested arrays. + def decode!(hash) + return hash unless hash.is_a?(Hash) + hash.each_pair do |key,value| + if value.is_a?(Hash) + decode!(value) + hash[key] = convert(value) + end + end + hash + end + + def decode(hash) + decode!(hash.dup) + end + + private + + # Checks if Hash is an Array encoded as a Hash. + # Specifically will check for the Hash to have this + # form: {'0' => v0, '1' => v1, .., 'n' => vN } + # + # @param hash [Hash] + # + # @return [Boolean] True if its a encoded Array, else false. + def encoded?(hash) + return false if hash.empty? + if hash.keys.size > 1 + keys = hash.keys.map{|i| i.to_i if i.respond_to?(:to_i)}.sort + keys == hash.keys.size.times.to_a + else + hash.keys.first =~ /0/ + end + end + + # If the Hash is an array encoded by typhoeus an array is returned + # else the self is returned + # + # @param hash [Hash] The Hash to convert into an Array. + # + # @return [Arraya/Hash] + def convert(hash) + if encoded?(hash) + hash.sort{ |a, b| a[0].to_i <=> b[0].to_i }.map{ |key, value| value } + else + hash + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus.rb new file mode 100644 index 0000000..1e2226a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus.rb @@ -0,0 +1,143 @@ +require 'digest/sha2' +require 'ethon' + +require 'typhoeus/config' +require 'typhoeus/easy_factory' +require 'typhoeus/errors' +require 'typhoeus/expectation' +require 'typhoeus/hydra' +require 'typhoeus/pool' +require 'typhoeus/request' +require 'typhoeus/response' +require 'typhoeus/version' + +# If we are using any Rack-based application, then we need the Typhoeus rack +# middleware to ensure our app is running properly. +if defined?(Rack) + require "rack/typhoeus" +end + +# If the Redis gem is available, load the redis cache adapter +if defined?(Redis) + require "typhoeus/cache/redis" +end + +# If the Dalli gem is available, load the Dalli cache adapter +if defined?(Dalli) + require "typhoeus/cache/dalli" +end + +# If we are using Rails, load the Rails cache adapter +if defined?(Rails) + require "typhoeus/cache/rails" +end + +# If we are using Rails, then we will include the Typhoeus railtie. +# if defined?(Rails) +# require "typhoeus/railtie" +# end + +# Typhoeus is a HTTP client library based on Ethon which +# wraps libcurl. Sitting on top of libcurl makes Typhoeus +# very reliable and fast. +# +# There are some gems using Typhoeus like +# {https://github.com/myronmarston/vcr VCR}, +# {https://github.com/bblimke/webmock WebMock} or +# {https://github.com/technoweenie/faraday Faraday}. VCR +# and WebMock provide their own adapter whereas +# Faraday relies on {Faraday::Adapter::Typhoeus} +# since Typhoeus version 0.5. +# +# @example (see Typhoeus::Request) +# @example (see Typhoeus::Hydra) +# +# @see Typhoeus::Request +# @see Typhoeus::Hydra +# @see Faraday::Adapter::Typhoeus +# +# @since 0.5.0 +module Typhoeus + extend Request::Actions + extend Request::Callbacks::Types + + # The default Typhoeus user agent. + USER_AGENT = "Typhoeus - https://github.com/typhoeus/typhoeus" + + # Set the Typhoeus configuration options by passing a block. + # + # @example (see Typhoeus::Config) + # + # @yield [ Typhoeus::Config ] + # + # @return [ Typhoeus::Config ] The configuration. + # + # @see Typhoeus::Config + def self.configure + yield Config + end + + # Stub out a specific request. + # + # @example (see Typhoeus::Expectation) + # + # @param [ String ] base_url The url to stub out. + # @param [ Hash ] options The options to stub out. + # + # @return [ Typhoeus::Expectation ] The expecatation. + # + # @see Typhoeus::Expectation + def self.stub(base_url, options = {}, &block) + expectation = Expectation.all.find{ |e| e.base_url == base_url && e.options == options } + if expectation.nil? + expectation = Expectation.new(base_url, options) + Expectation.all << expectation + end + + expectation.and_return(&block) unless block.nil? + expectation + end + + # Add before callbacks. + # + # @example Add before callback. + # Typhoeus.before { |request| p request.base_url } + # + # @param [ Block ] block The callback. + # + # @yield [ Typhoeus::Request ] + # + # @return [ Array<Block> ] All before blocks. + def self.before(&block) + @before ||= [] + @before << block if block_given? + @before + end + + # Execute given block as if block connection is turned off. + # The old block connection state is restored afterwards. + # + # @example Make a real request, no matter if it's blocked. + # Typhoeus::Config.block_connection = true + # Typhoeus.get("www.example.com").code + # #=> raise Typhoeus::Errors::NoStub + # + # Typhoeus.with_connection do + # Typhoeus.get("www.example.com").code + # #=> :ok + # end + # + # @yield Yields control to the block after disabling block_connection. + # Afterwards, the block_connection is set to its original + # value. + # @return [ Object ] Returns the return value of the block. + # + # @see Typhoeus::Config.block_connection + def self.with_connection + old = Config.block_connection + Config.block_connection = false + result = yield if block_given? + Config.block_connection = old + result + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/adapters/faraday.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/adapters/faraday.rb new file mode 100644 index 0000000..8452010 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/adapters/faraday.rb @@ -0,0 +1,180 @@ +require 'faraday' + +module Faraday # :nodoc: + class Adapter # :nodoc: + + # Adapter to use Faraday with Typhoeus. + # + # @example Use Typhoeus. + # require 'faraday' + # require 'typhoeus' + # require 'typhoeus/adapters/faraday' + # + # conn = Faraday.new(url: "www.example.com") do |faraday| + # faraday.adapter :typhoeus + # + # # You can include Typhoeus options to be used for every request + # # faraday.adapter :typhoeus, forbid_reuse: true, maxredirs: 1 + # end + # + # response = conn.get("/") + class Typhoeus < Faraday::Adapter + self.supports_parallel = true + + (class << self; self; end).instance_eval do + remove_method :setup_parallel_manager if method_defined? :setup_parallel_manager + end + + remove_method :call if method_defined? :call + remove_method :perform_request if method_defined? :perform_request + remove_method :request if method_defined? :request + remove_method :read_body if method_defined? :read_body + remove_method :configure_ssl if method_defined? :configure_ssl + remove_method :configure_proxy if method_defined? :configure_proxy + remove_method :configure_timeout if method_defined? :configure_timeout + remove_method :configure_socket if method_defined? :configure_socket + remove_method :parallel? if method_defined? :parallel? + + # Initialize the Typhoeus adapter + # + # @param [ App ] app Farday app + # @option [ Hash ] adapter_options Typhoeus options + # + # @return [ void ] + def initialize(app, adapter_options = {}) + super(app) + @adapter_options = adapter_options + end + + # Setup Hydra with provided options. + # + # @example Setup Hydra. + # Faraday::Adapter::Typhoeus.setup_parallel_manager + # #=> #<Typhoeus::Hydra ... > + # + # @param (see Typhoeus::Hydra#initialize) + # @option (see Typhoeus::Hydra#initialize) + # + # @return [ Typhoeus::Hydra ] The hydra. + def self.setup_parallel_manager(options = {}) + ::Typhoeus::Hydra.new(options) + end + + dependency 'typhoeus' + + # Hook into Faraday and perform the request with Typhoeus. + # + # @param [ Hash ] env The environment. + # + # @return [ void ] + def call(env) + super + perform_request env + @app.call env + end + + private + + def perform_request(env) + if parallel?(env) + env[:parallel_manager].queue request(env) + else + request(env).run + end + end + + def request(env) + read_body env + + req = typhoeus_request(env) + + configure_ssl req, env + configure_proxy req, env + configure_timeout req, env + configure_socket req, env + + req.on_complete do |resp| + if resp.timed_out? + env[:typhoeus_timed_out] = true + unless parallel?(env) + raise Faraday::TimeoutError, "request timed out" + end + elsif (resp.response_code == 0) || ((resp.return_code != :ok) && !resp.mock?) + env[:typhoeus_connection_failed] = true + env[:typhoeus_return_message] = resp.return_message + unless parallel?(env) + raise Faraday::ConnectionFailed, resp.return_message + end + end + + save_response(env, resp.code, resp.body) do |response_headers| + response_headers.parse resp.response_headers + end + # in async mode, :response is initialized at this point + env[:response].finish(env) if parallel?(env) + end + + req + end + + def typhoeus_request(env) + opts = { + :method => env[:method], + :body => env[:body], + :headers => env[:request_headers] + }.merge(@adapter_options) + + ::Typhoeus::Request.new(env[:url].to_s, opts) + end + + def read_body(env) + env[:body] = env[:body].read if env[:body].respond_to? :read + end + + def configure_ssl(req, env) + ssl = env[:ssl] + + verify_p = (ssl && ssl.fetch(:verify, true)) + + ssl_verifyhost = verify_p ? 2 : 0 + req.options[:ssl_verifyhost] = ssl_verifyhost + req.options[:ssl_verifypeer] = verify_p + req.options[:sslversion] = ssl[:version] if ssl[:version] + req.options[:sslcert] = ssl[:client_cert] if ssl[:client_cert] + req.options[:sslkey] = ssl[:client_key] if ssl[:client_key] + req.options[:cainfo] = ssl[:ca_file] if ssl[:ca_file] + req.options[:capath] = ssl[:ca_path] if ssl[:ca_path] + client_cert_passwd_key = [:client_cert_passwd, :client_certificate_password].detect { |name| ssl.key?(name) } + req.options[:keypasswd] = ssl[client_cert_passwd_key] if client_cert_passwd_key + end + + def configure_proxy(req, env) + proxy = env[:request][:proxy] + return unless proxy + + req.options[:proxy] = "#{proxy[:uri].scheme}://#{proxy[:uri].host}:#{proxy[:uri].port}" + + if proxy[:user] && proxy[:password] + req.options[:proxyauth] = :any + req.options[:proxyuserpwd] = "#{proxy[:user]}:#{proxy[:password]}" + end + end + + def configure_timeout(req, env) + env_req = env[:request] + req.options[:timeout_ms] = (env_req[:timeout] * 1000).to_i if env_req[:timeout] + req.options[:connecttimeout_ms] = (env_req[:open_timeout] * 1000).to_i if env_req[:open_timeout] + end + + def configure_socket(req, env) + if bind = env[:request][:bind] + req.options[:interface] = bind[:host] + end + end + + def parallel?(env) + !!env[:parallel_manager] + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/dalli.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/dalli.rb new file mode 100644 index 0000000..5a09b10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/dalli.rb @@ -0,0 +1,28 @@ +module Typhoeus + module Cache + # This module provides a simple way to cache HTTP responses using Dalli. + class Dalli + # @example Set Dalli as the Typhoeus cache backend + # Typhoeus::Config.cache = Typhoeus::Cache::Dalli.new + # + # @param [ Dalli::Client ] client + # A connection to the cache server. Defaults to `Dalli::Client.new` + # @param [ Hash ] options + # Options + # @option options [ Integer ] :default_ttl + # The default TTL of cached responses in seconds, for requests which do not set a cache_ttl. + def initialize(client = ::Dalli::Client.new, options = {}) + @client = client + @default_ttl = options[:default_ttl] + end + + def get(request) + @client.get(request.cache_key) + end + + def set(request, response) + @client.set(request.cache_key, response, request.cache_ttl || @default_ttl) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/rails.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/rails.rb new file mode 100644 index 0000000..10835e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/rails.rb @@ -0,0 +1,28 @@ +module Typhoeus + module Cache + # This module provides a simple way to cache HTTP responses in using the Rails cache. + class Rails + # @example Use the Rails cache setup to cache Typhoeus responses. + # Typhoeus::Config.cache = Typhoeus::Cache::Rails.new + # + # @param [ ActiveSupport::Cache::Store ] cache + # A Rails cache backend. Defaults to Rails.cache. + # @param [ Hash ] options + # Options + # @option options [ Integer ] :default_ttl + # The default TTL of cached responses in seconds, for requests which do not set a cache_ttl. + def initialize(cache = ::Rails.cache, options = {}) + @cache = cache + @default_ttl = options[:default_ttl] + end + + def get(request) + @cache.read(request) + end + + def set(request, response) + @cache.write(request.cache_key, response, :expires_in => request.cache_ttl || @default_ttl) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/redis.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/redis.rb new file mode 100644 index 0000000..e54ca4e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/cache/redis.rb @@ -0,0 +1,35 @@ +module Typhoeus + module Cache + # This module provides a simple way to cache HTTP responses in Redis. + class Redis + # @example Set Redis as the Typhoeus cache backend + # Typhoeus::Config.cache = Typhoeus::Cache::Redis.new + # + # @param [ Redis ] redis + # A connection to Redis. Defaults to `Redis.new`, which uses the + # `REDIS_URL` environment variable to connect + # @param [ Hash ] options + # Options + # @option options [ Integer ] :default_ttl + # The default TTL of cached responses in seconds, for requests which do not set a cache_ttl. + def initialize(redis = ::Redis.new, options = {}) + @redis = redis + @default_ttl = options[:default_ttl] + end + + def get(request) + serialized_response = @redis.get(request.cache_key) + return unless serialized_response + Marshal.load(serialized_response) + end + + def set(request, response) + ttl = request.cache_ttl || @default_ttl + key = request.cache_key + serialized_response = Marshal.dump(response) + @redis.set(key, serialized_response) + @redis.expire(key, ttl) if ttl + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/config.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/config.rb new file mode 100644 index 0000000..f05491c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/config.rb @@ -0,0 +1,69 @@ +module Typhoeus + + # The Typhoeus configuration used to set global + # options. + # @example Set the configuration options within a block. + # Typhoeus.configure do |config| + # config.verbose = true + # end + # + # @example Set the configuration directly. + # Typhoeus::Config.verbose = true + module Config + extend self + + # Defines whether the connection is blocked. + # Defaults to false. When set to true, only + # stubbed requests are allowed. A + # {Typhoeus::Errors::NoStub} error is raised, + # when trying to do a real request. It's possible + # to work around inside + # {Typhoeus.with_connection}. + # + # @return [ Boolean ] + # + # @see Typhoeus::Request::BlockConnection + # @see Typhoeus::Hydra::BlockConnection + # @see Typhoeus#with_connection + # @see Typhoeus::Errors::NoStub + attr_accessor :block_connection + + # Defines whether GET requests are memoized when using the {Typhoeus::Hydra}. + # + # @return [ Boolean ] + # + # @see Typhoeus::Hydra + # @see Typhoeus::Hydra::Memoizable + attr_accessor :memoize + + # Defines whether curls debug output is shown. + # Unfortunately it prints to stderr. + # + # @return [ Boolean ] + # + # @see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTVERBOSE + attr_accessor :verbose + + # Defines whether requests are cached. + # + # @return [ Object ] + # + # @see Typhoeus::Hydra::Cacheable + # @see Typhoeus::Request::Cacheable + attr_accessor :cache + + # Defines whether to use a default user agent. + # + # @return [ String ] + # + # @see Typhoeus::Request#set_defaults + attr_accessor :user_agent + + # Defines wether to use a proxy server for every request. + # + # @return [ String ] + # + # @see Typhoeus::Request#set_defaults + attr_accessor :proxy + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/easy_factory.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/easy_factory.rb new file mode 100644 index 0000000..84c7131 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/easy_factory.rb @@ -0,0 +1,180 @@ +require 'set' + +module Typhoeus + + # This is a Factory for easies to be used in the hydra. + # Before an easy is ready to be added to a multi the + # on_complete callback to be set. + # This is done by this class. + # + # @api private + class EasyFactory + + RENAMED_OPTIONS = { + :auth_method => :httpauth, + :connect_timeout => :connecttimeout, + :encoding => :accept_encoding, + :follow_location => :followlocation, + :max_redirects => :maxredirs, + :proxy_type => :proxytype, + :ssl_cacert => :cainfo, + :ssl_capath => :capath, + :ssl_cert => :sslcert, + :ssl_cert_type => :sslcerttype, + :ssl_key => :sslkey, + :ssl_key_password => :keypasswd, + :ssl_key_type => :sslkeytype, + :ssl_version => :sslversion, + } + + CHANGED_OPTIONS = { + :disable_ssl_host_verification => :ssl_verifyhost, + :disable_ssl_peer_verification => :ssl_verifypeer, + :proxy_auth_method => :proxyauth, + } + + REMOVED_OPTIONS = Set.new([:cache_key_basis, :cache_timeout, :user_agent]) + + SANITIZE_IGNORE = Set.new([:method, :cache_ttl, :cache]) + SANITIZE_TIMEOUT = Set.new([:timeout_ms, :connecttimeout_ms]) + + # Returns the request provided. + # + # @return [ Typhoeus::Request ] + attr_reader :request + + # Returns the hydra provided. + # + # @return [ Typhoeus::Hydra ] + attr_reader :hydra + + # Create an easy factory. + # + # @example Create easy factory. + # Typhoeus::Hydra::EasyFactory.new(request, hydra) + # + # @param [ Request ] request The request to build an easy for. + # @param [ Hydra ] hydra The hydra to build an easy for. + def initialize(request, hydra = nil) + @request = request + @hydra = hydra + end + + # Return the easy in question. + # + # @example Return easy. + # easy_factory.easy + # + # @return [ Ethon::Easy ] The easy. + def easy + @easy ||= Typhoeus::Pool.get + end + + # Fabricated easy. + # + # @example Prepared easy. + # easy_factory.get + # + # @return [ Ethon::Easy ] The easy. + def get + begin + easy.http_request( + request.base_url.to_s, + request.options.fetch(:method, :get), + sanitize(request.options) + ) + rescue Ethon::Errors::InvalidOption => e + help = provide_help(e.message.match(/:\s(\w+)/)[1]) + raise $!, "#{$!}#{help}", $!.backtrace + end + set_callback + easy + end + + private + + def sanitize(options) + # set nosignal to true by default + # this improves thread safety and timeout behavior + sanitized = {:nosignal => true} + options.each do |k,v| + s = k.to_sym + next if SANITIZE_IGNORE.include?(s) + if new_option = RENAMED_OPTIONS[k.to_sym] + warn("Deprecated option #{k}. Please use #{new_option} instead.") + sanitized[new_option] = v + # sanitize timeouts + elsif SANITIZE_TIMEOUT.include?(s) + if !v.integer? + warn("Value '#{v}' for option '#{k}' must be integer.") + end + sanitized[k] = v.ceil + else + sanitized[k] = v + end + end + + sanitize_timeout!(sanitized, :timeout) + sanitize_timeout!(sanitized, :connecttimeout) + + sanitized + end + + def sanitize_timeout!(options, timeout) + timeout_ms = :"#{timeout}_ms" + if options[timeout] && options[timeout].round != options[timeout] + if !options[timeout_ms] + options[timeout_ms] = (options[timeout]*1000).ceil + end + options[timeout] = options[timeout].ceil + end + options + end + + # Sets on_complete callback on easy in order to be able to + # track progress. + # + # @example Set callback. + # easy_factory.set_callback + # + # @return [ Ethon::Easy ] The easy. + def set_callback + if request.streaming? + response = nil + easy.on_headers do |easy| + response = Response.new(Ethon::Easy::Mirror.from_easy(easy).options) + request.execute_headers_callbacks(response) + end + request.on_body.each do |callback| + easy.on_body do |chunk, easy| + callback.call(chunk, response) + end + end + else + easy.on_headers do |easy| + request.execute_headers_callbacks(Response.new(Ethon::Easy::Mirror.from_easy(easy).options)) + end + end + request.on_progress.each do |callback| + easy.on_progress do |dltotal, dlnow, ultotal, ulnow, easy| + callback.call(dltotal, dlnow, ultotal, ulnow, response) + end + end + easy.on_complete do |easy| + request.finish(Response.new(easy.mirror.options)) + Typhoeus::Pool.release(easy) + if hydra && !hydra.queued_requests.empty? + hydra.dequeue_many + end + end + end + + def provide_help(option) + if new_option = CHANGED_OPTIONS[option.to_sym] + "\nPlease try #{new_option} instead of #{option}." if new_option + elsif REMOVED_OPTIONS.include?(option.to_sym) + "\nThe option #{option} was removed." + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors.rb new file mode 100644 index 0000000..5ad2c5d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors.rb @@ -0,0 +1,9 @@ +require 'typhoeus/errors/typhoeus_error' +require 'typhoeus/errors/no_stub' + +module Typhoeus + + # This namespace contains all errors raised by Typhoeus. + module Errors + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/no_stub.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/no_stub.rb new file mode 100644 index 0000000..d04534c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/no_stub.rb @@ -0,0 +1,12 @@ +module Typhoeus + module Errors + + # Raises when block connection is turned on + # and making a real request. + class NoStub < TyphoeusError + def initialize(request) + super("The connection is blocked and no stub defined: #{request.url}") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/typhoeus_error.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/typhoeus_error.rb new file mode 100644 index 0000000..1550b40 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/errors/typhoeus_error.rb @@ -0,0 +1,8 @@ +module Typhoeus + module Errors + + # Default typhoeus error class for all custom errors. + class TyphoeusError < StandardError + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/expectation.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/expectation.rb new file mode 100644 index 0000000..a8ae368 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/expectation.rb @@ -0,0 +1,217 @@ +module Typhoeus + + # This class represents an expectation. It is part + # of the stubbing mechanism. An expectation contains + # a url and options, like a request. They are compared + # to the request url and options in order to evaluate + # whether they match. If that's the case, the attached + # responses are returned one by one. + # + # @example Stub a request and get specified response. + # expected = Typhoeus::Response.new + # Typhoeus.stub("www.example.com").and_return(expected) + # + # actual = Typhoeus.get("www.example.com") + # expected == actual + # #=> true + # + # @example Stub a request and get a lazily-constructed response containing data from actual widgets that exist in the system when the stubbed request is made. + # Typhoeus.stub("www.example.com/widgets") do + # actual_widgets = Widget.all + # Typhoeus::Response.new( + # :body => actual_widgets.inject([]) do |ids, widget| + # ids << widget.id + # end.join(",") + # ) + # end + # + # @example Stub a request and get a lazily-constructed response in the format requested. + # Typhoeus.stub("www.example.com") do |request| + # accept = (request.options[:headers]||{})['Accept'] || "application/json" + # format = accept.split(",").first + # body_obj = { 'things' => [ { 'id' => 'foo' } ] } + # + # Typhoeus::Response.new( + # :headers => { + # 'Content-Type' => format + # }, + # :body => SERIALIZERS[format].serialize(body_obj) + # ) + # end + class Expectation + + # @api private + attr_reader :base_url + + # @api private + attr_reader :options + + # @api private + attr_reader :from + + class << self + + # Returns all expectations. + # + # @example Return expectations. + # Typhoeus::Expectation.all + # + # @return [ Array<Typhoeus::Expectation> ] The expectations. + def all + @expectations ||= [] + end + + # Clears expectations. This is handy while + # testing, and you want to make sure that + # you don't get canned responses. + # + # @example Clear expectations. + # Typhoeus::Expectation.clear + def clear + all.clear + end + + # Returns stubbed response matching the + # provided request. + # + # @example Find response + # Typhoeus::Expectation.response_for(request) + # + # @return [ Typhoeus::Response ] The stubbed response from a + # matching expectation, or nil if no matching expectation + # is found. + # + # @api private + def response_for(request) + expectation = find_by(request) + return nil if expectation.nil? + + expectation.response(request) + end + + # @api private + def find_by(request) + all.find do |expectation| + expectation.matches?(request) + end + end + end + + # Creates an expectation. + # + # @example Create expectation. + # Typhoeus::Expectation.new(base_url) + # + # @return [ Expectation ] The created expectation. + # + # @api private + def initialize(base_url, options = {}) + @base_url = base_url + @options = options + @response_counter = 0 + @from = nil + end + + # Set from value to mark an expectaion. Useful for + # other libraries, e.g. WebMock. + # + # @example Mark expectation. + # expectation.from(:webmock) + # + # @param [ String ] value Value to set. + # + # @return [ Expectation ] Returns self. + # + # @api private + def stubbed_from(value) + @from = value + self + end + + # Specify what should be returned, + # when this expectation is hit. + # + # @example Add response. + # expectation.and_return(response) + # + # @return [ void ] + def and_return(response=nil, &block) + new_response = (response.nil? ? block : response) + responses.push(*new_response) + end + + # Checks whether this expectation matches + # the provided request. + # + # @example Check if request matches. + # expectation.matches? request + # + # @param [ Request ] request The request to check. + # + # @return [ Boolean ] True when matches, else false. + # + # @api private + def matches?(request) + url_match?(request.base_url) && options_match?(request) + end + + # Return canned responses. + # + # @example Return responses. + # expectation.responses + # + # @return [ Array<Typhoeus::Response> ] The responses. + # + # @api private + def responses + @responses ||= [] + end + + # Return the response. When there are + # multiple responses, they are returned one + # by one. + # + # @example Return response. + # expectation.response + # + # @return [ Response ] The response. + # + # @api private + def response(request) + response = responses.fetch(@response_counter, responses.last) + if response.respond_to?(:call) + response = response.call(request) + end + @response_counter += 1 + response.mock = @from || true + response + end + + private + + # Check whether the options matches the request options. + # I checks options and original options. + def options_match?(request) + (options ? options.all?{ |k,v| request.original_options[k] == v || request.options[k] == v } : true) + end + + # Check whether the base_url matches the request url. + # The base_url can be a string, regex or nil. String and + # regexp are checked, nil is always true, else false. + # + # Nil serves as a placeholder in case you want to match + # all urls. + def url_match?(request_url) + case base_url + when String + base_url == request_url + when Regexp + base_url === request_url + when nil + true + else + false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra.rb new file mode 100644 index 0000000..0796981 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra.rb @@ -0,0 +1,95 @@ +require 'typhoeus/hydra/addable' +require 'typhoeus/hydra/before' +require 'typhoeus/hydra/cacheable' +require 'typhoeus/hydra/block_connection' +require 'typhoeus/hydra/memoizable' +require 'typhoeus/hydra/queueable' +require 'typhoeus/hydra/runnable' +require 'typhoeus/hydra/stubbable' + +module Typhoeus + + # Hydra manages making parallel HTTP requests. This + # is achieved by using libcurls multi interface: + # http://curl.haxx.se/libcurl/c/libcurl-multi.html + # The benefits are that you don't have to worry running + # the requests by yourself. + # + # Hydra will also handle how many requests you can + # make in parallel. Things will get flakey if you + # try to make too many requests at the same time. + # The built in limit is 200. When more requests than + # that are queued up, hydra will save them for later + # and start the requests as others are finished. You + # can raise or lower the concurrency limit through + # the Hydra constructor. + # + # Regarding the asynchronous behavior of the hydra, + # it is important to know that this is completely hidden + # from the developer and you are free to apply + # whatever technique you want to your code. That should not + # conflict with libcurls internal concurrency mechanism. + # + # @example Use the hydra to do multiple requests. + # hydra = Typhoeus::Hydra.new + # requests = (0..9).map{ Typhoeus::Request.new("www.example.com") } + # requests.each{ |request| hydra.queue(request) } + # hydra.run + # + # @note Callbacks are going to delay the request + # execution. + class Hydra + include Hydra::Addable + include Hydra::Runnable + include Hydra::Memoizable + include Hydra::Cacheable + include Hydra::BlockConnection + include Hydra::Stubbable + include Hydra::Before + include Hydra::Queueable + + # @example Set max_concurrency. + # Typhoeus::Hydra.new(max_concurrency: 20) + attr_accessor :max_concurrency + + # @api private + attr_reader :multi + + class << self + + # Returns a memoized hydra instance. + # + # @example Get a hydra. + # Typhoeus::Hydra.hydra + # + # @return [Typhoeus::Hydra] A new hydra. + def hydra + Thread.current[:typhoeus_hydra] ||= new + end + end + + # Create a new hydra. All + # {http://rubydoc.info/github/typhoeus/ethon/Ethon/Multi#initialize-instance_method Ethon::Multi#initialize} + # options are also available. + # + # @example Create a hydra. + # Typhoeus::Hydra.new + # + # @example Create a hydra with max_concurrency. + # Typhoeus::Hydra.new(max_concurrency: 20) + # + # @param [ Hash ] options The options hash. + # + # @option options :max_concurrency [ Integer ] Number + # of max concurrent connections to create. Default is + # 200. + # + # @see http://rubydoc.info/github/typhoeus/ethon/Ethon/Multi#initialize-instance_method + # Ethon::Multi#initialize + def initialize(options = {}) + @options = options + @max_concurrency = Integer(@options.fetch(:max_concurrency, 200)) + @multi = Ethon::Multi.new(options.reject{|k,_| k==:max_concurrency}) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/addable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/addable.rb new file mode 100644 index 0000000..1a86960 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/addable.rb @@ -0,0 +1,23 @@ +module Typhoeus + class Hydra + + # This module handles the request adding on + # hydra. + # + # @api private + module Addable + + # Adds request to multi. + # + # @example Add request. + # hydra.add(request) + # + # @param [ Typhoeus::Request ] request to add. + # + # @return [ void ] + def add(request) + multi.add(EasyFactory.new(request, self).get) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/before.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/before.rb new file mode 100644 index 0000000..75e9213 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/before.rb @@ -0,0 +1,31 @@ +module Typhoeus + class Hydra + + # This module provides a way to hook into before + # a request gets queued in hydra. This is very powerful + # and you should be careful because when you accidently + # return a falsy value the request won't be executed. + # + # @api private + module Before + + # Overrride add in order to execute callbacks in + # Typhoeus.before. Will break and return when a + # callback returns nil, false or a response. Calls super + # otherwise. + # + # @example Add the request. + # hydra.add(request) + def add(request) + Typhoeus.before.each do |callback| + value = callback.call(request) + if value.nil? || value == false || value.is_a?(Response) + dequeue + return value + end + end + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/block_connection.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/block_connection.rb new file mode 100644 index 0000000..9f3bd2f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/block_connection.rb @@ -0,0 +1,35 @@ +module Typhoeus + class Hydra + + # This module handles the blocked connection request mode on + # the hydra side, where only stubbed requests + # are allowed. + # Connection blocking needs to be turned on: + # Typhoeus.configure do |config| + # config.block_connection = true + # end + # + # When trying to do real requests a NoStub error + # is raised. + # + # @api private + module BlockConnection + + # Overrides add in order to check before if block connection + # is turned on. If thats the case a NoStub error is + # raised. + # + # @example Add the request. + # hydra.add(request) + # + # @param [ Request ] request The request to enqueue. + def add(request) + if request.blocked? + raise Typhoeus::Errors::NoStub.new(request) + else + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/cacheable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/cacheable.rb new file mode 100644 index 0000000..74c7f30 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/cacheable.rb @@ -0,0 +1,15 @@ +module Typhoeus + class Hydra + module Cacheable + def add(request) + if request.cacheable? && response = request.cached_response + response.cached = true + request.finish(response) + dequeue + else + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/memoizable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/memoizable.rb new file mode 100644 index 0000000..0402a86 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/memoizable.rb @@ -0,0 +1,56 @@ +module Typhoeus + class Hydra + + # This module handles the GET request memoization + # on the hydra side. Memoization needs to be turned + # on: + # Typhoeus.configure do |config| + # config.memoize = true + # end + # + # @api private + module Memoizable + + # Return the memory. + # + # @example Return the memory. + # hydra.memory + # + # @return [ Hash ] The memory. + def memory + @memory ||= {} + end + + # Overrides add in order to check before if request + # is memoizable and already in memory. If thats the case, + # super is not called, instead the response is set and + # the on_complete callback called. + # + # @example Add the request. + # hydra.add(request) + # + # @param [ Request ] request The request to add. + # + # @return [ Request ] The added request. + def add(request) + if request.memoizable? && memory.has_key?(request) + response = memory[request] + request.finish(response, true) + dequeue + else + super + end + end + + # Overrides run to make sure the memory is cleared after + # each run. + # + # @example Run hydra. + # hydra.run + def run + super + memory.clear + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/queueable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/queueable.rb new file mode 100644 index 0000000..b53dc67 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/queueable.rb @@ -0,0 +1,83 @@ +module Typhoeus + class Hydra + + # This module handles the request queueing on + # hydra. + # + # @api private + module Queueable + + # Return the queued requests. + # + # @example Return queued requests. + # hydra.queued_requests + # + # @return [ Array<Typhoeus::Request> ] The queued requests. + def queued_requests + @queued_requests ||= [] + end + + # Abort the current hydra run as good as + # possible. This means that it only + # clears the queued requests and can't do + # anything about already running requests. + # + # @example Abort hydra. + # hydra.abort + def abort + queued_requests.clear + end + + # Enqueues a request in order to be performed + # by the hydra. This can even be done while + # the hydra is running. Also sets hydra on + # request. + # + # @example Queue request. + # hydra.queue(request) + def queue(request) + request.hydra = self + queued_requests << request + end + + # Pushes a request to the front of the queue, + # to be performed by the hydra. Also sets hydra + # on request + # + # @example Queue reques. + # hydra.queue_front(request) + def queue_front(request) + request.hydra = self + queued_requests.unshift request + end + + # Removes a request from queued_requests and + # adds it to the hydra in order to be + # performed next. + # + # @example Dequeue request. + # hydra.dequeue + # + # @since 0.6.4 + def dequeue + add(queued_requests.shift) unless queued_requests.empty? + end + + # Removes requests from queued_requests and + # adds them to the hydra until max_concurrency + # is reached. + # + # @example Dequeue requests. + # hydra.dequeue_many + # + # @since 0.6.8 + def dequeue_many + number = multi.easy_handles.count + until number == max_concurrency || queued_requests.empty? + add(queued_requests.shift) + number += 1 + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/runnable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/runnable.rb new file mode 100644 index 0000000..9c2ad51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/runnable.rb @@ -0,0 +1,19 @@ +module Typhoeus + class Hydra + + # This module contains logic to run a hydra. + module Runnable + + # Start the hydra run. + # + # @example Start hydra run. + # hydra.run + # + # @return [ Symbol ] Return value from multi.perform. + def run + dequeue_many + multi.perform + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/stubbable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/stubbable.rb new file mode 100644 index 0000000..930997c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/hydra/stubbable.rb @@ -0,0 +1,28 @@ +module Typhoeus + class Hydra + + # This module handles stubbing on the hydra side. + # It plays well with the block_connection configuration, + # which raises when you make a request which is not stubbed. + # + # @api private + module Stubbable + + # Override add in order to check for matching expecations. + # When an expecation is found, super is not called. Instead a + # canned response is assigned to the request. + # + # @example Add the request. + # hydra.add(request) + def add(request) + if response = Expectation.response_for(request) + request.execute_headers_callbacks(response) + request.on_body.each{ |callback| callback.call(response.body, response) } + request.finish(response) + else + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/pool.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/pool.rb new file mode 100644 index 0000000..552a73f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/pool.rb @@ -0,0 +1,70 @@ +require 'thread' + +module Typhoeus + + # The easy pool stores already initialized + # easy handles for future use. This is useful + # because creating them is expensive. + # + # @api private + module Pool + @mutex = Mutex.new + @pid = Process.pid + + # Releases easy into the pool. The easy handle is + # reset before it gets back in. + # + # @example Release easy. + # Typhoeus::Pool.release(easy) + def self.release(easy) + easy.cookielist = "flush" # dump all known cookies to 'cookiejar' + easy.cookielist = "all" # remove all cookies from memory for this handle + easy.reset + @mutex.synchronize { easies << easy } + end + + # Return an easy from the pool. + # + # @example Return easy. + # Typhoeus::Pool.get + # + # @return [ Ethon::Easy ] The easy. + def self.get + @mutex.synchronize do + if @pid == Process.pid + easies.pop + else + # Process has forked. Clear all easies to avoid sockets being + # shared between processes. + @pid = Process.pid + easies.clear + nil + end + end || Ethon::Easy.new + end + + # Clear the pool + def self.clear + @mutex.synchronize { easies.clear } + end + + # Use yielded easy, will be released automatically afterwards. + # + # @example Use easy. + # Typhoeus::Pool.with_easy do |easy| + # # use easy + # end + def self.with_easy(&block) + easy = get + yield easy + ensure + release(easy) if easy + end + + private + + def self.easies + @easies ||= [] + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/railtie.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/railtie.rb new file mode 100644 index 0000000..ee95f2a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/railtie.rb @@ -0,0 +1,12 @@ +require "typhoeus" + +module Rails + module Typhoeus + class Railtie < Rails::Railtie + # Need to include the Typhoeus middleware. + initializer "include the identity map" do |app| + app.config.middleware.use "Rack::Typhoeus::Middleware::ParamsDecoder" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request.rb new file mode 100644 index 0000000..c41ee7b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request.rb @@ -0,0 +1,221 @@ +require 'zlib' +require 'digest/sha1' +require 'typhoeus/request/actions' +require 'typhoeus/request/before' +require 'typhoeus/request/block_connection' +require 'typhoeus/request/cacheable' +require 'typhoeus/request/callbacks' +require 'typhoeus/request/marshal' +require 'typhoeus/request/memoizable' +require 'typhoeus/request/operations' +require 'typhoeus/request/responseable' +require 'typhoeus/request/streamable' +require 'typhoeus/request/stubbable' + +module Typhoeus + + # This class represents a request. + # + # @example (see #initialize) + # + # @example Make a request with the shortcut. + # response = Typhoeus.get("www.example.com") + # + # @see (see #initialize) + class Request + extend Request::Actions + include Request::Callbacks::Types + include Request::Callbacks + include Request::Streamable + include Request::Marshal + include Request::Operations + include Request::Responseable + include Request::Memoizable + include Request::Cacheable + include Request::BlockConnection + include Request::Stubbable + include Request::Before + + # Returns the provided base url. + # + # @return [ String ] + attr_accessor :base_url + + # Returns options, which includes default parameters. + # + # @return [ Hash ] + attr_accessor :options + + # Returns the hydra in which the request ran, if any. + # + # @return [ Typhoeus::Hydra ] + # + # @api private + attr_accessor :hydra + + # Returns the original options provided. + # + # @return [ Hash ] + # + # @api private + attr_accessor :original_options + + # @return [ Boolean ] + # + # @api private + attr_accessor :block_connection + + # Creates a new request. + # + # @example Simplest request. + # response = Typhoeus::Request.new("www.example.com").run + # + # @example Request with url parameters. + # response = Typhoeus::Request.new( + # "www.example.com", + # params: {a: 1} + # ).run + # + # @example Request with a body. + # response = Typhoeus::Request.new( + # "www.example.com", + # body: {b: 2} + # ).run + # + # @example Request with parameters and body. + # response = Typhoeus::Request.new( + # "www.example.com", + # params: {a: 1}, + # body: {b: 2} + # ).run + # + # @example Create a request and allow follow redirections. + # response = Typhoeus::Request.new( + # "www.example.com", + # followlocation: true + # ).run + # + # @param [ String ] base_url The url to request. + # @param [ options ] options The options. + # + # @option options [ Hash ] :params Translated + # into url parameters. + # @option options [ Hash ] :body Translated + # into HTTP POST request body. + # + # @return [ Typhoeus::Request ] The request. + # + # @note See {http://rubydoc.info/github/typhoeus/ethon/Ethon/Easy/Options Ethon::Easy::Options} for more options. + # + # @see Typhoeus::Hydra + # @see Typhoeus::Response + # @see Typhoeus::Request::Actions + def initialize(base_url, options = {}) + @base_url = base_url + @original_options = options + @options = options.dup + + set_defaults + end + + # Return the url. + # In contrast to base_url which returns the value you specified, url returns + # the full url including the parameters. + # + # @example Get the url. + # request.url + # + # @since 0.5.5 + def url + easy = EasyFactory.new(self).get + url = easy.url + Typhoeus::Pool.release(easy) + url + end + + # Returns whether other is equal to self. + # + # @example Are request equal? + # request.eql?(other_request) + # + # @param [ Object ] other The object to check. + # + # @return [ Boolean ] Returns true if equal, else false. + # + # @api private + def eql?(other) + self.class == other.class && + self.base_url == other.base_url && + fuzzy_hash_eql?(self.options, other.options) + end + + # Overrides Object#hash. + # + # @return [ Integer ] The integer representing the request. + # + # @api private + def hash + Zlib.crc32 cache_key + end + + # Returns a cache key for use with caching methods that required a string + # for a key. Will get used by ActiveSupport::Cache stores automatically. + # + # @return [ String ] The cache key. + def cache_key + Digest::SHA1.hexdigest "#{self.class.name}#{base_url}#{hashable_string_for(options)}" + end + + # Mimics libcurls POST body generation. This is not accurate, but good + # enough for VCR. + # + # @return [ String ] The encoded body. + # otherwise. + # + # @api private + def encoded_body + Ethon::Easy::Form.new(nil, options[:body]).to_s + end + + private + + # Checks if two hashes are equal or not, discarding + # first-level hash order. + # + # @param [ Hash ] left + # @param [ Hash ] right hash to check for equality + # + # @return [ Boolean ] Returns true if hashes have + # same values for same keys and same length, + # even if the keys are given in a different order. + def fuzzy_hash_eql?(left, right) + return true if (left == right) + + (left.count == right.count) && left.inject(true) do |res, kvp| + res && (kvp[1] == right[kvp[0]]) + end + end + + def hashable_string_for(obj) + case obj + when Hash + hashable_string_for(obj.sort_by {|sub_obj| sub_obj.first.to_s}) + when Array + obj.map {|sub_obj| hashable_string_for(sub_obj)}.to_s + else + obj.to_s + end + end + + # Sets default header and verbose when turned on. + def set_defaults + default_user_agent = Config.user_agent || Typhoeus::USER_AGENT + + options[:headers] = {'User-Agent' => default_user_agent}.merge(options[:headers] || {}) + options[:headers]['Expect'] ||= '' + options[:verbose] = Typhoeus::Config.verbose if options[:verbose].nil? && !Typhoeus::Config.verbose.nil? + options[:maxredirs] ||= 50 + options[:proxy] = Typhoeus::Config.proxy unless options.has_key?(:proxy) || Typhoeus::Config.proxy.nil? + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/actions.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/actions.rb new file mode 100644 index 0000000..b7974d8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/actions.rb @@ -0,0 +1,125 @@ +module Typhoeus + class Request + + # Module containing logic about shortcuts to + # http methods. Like + # Typhoeus.get("www.example.com") + module Actions + + # Make a get request. + # + # @example Make get request. + # Typhoeus.get("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def get(base_url, options = {}) + Request.new(base_url, options.merge(:method => :get)).run + end + + # Make a post request. + # + # @example Make post request. + # Typhoeus.post("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def post(base_url, options = {}) + Request.new(base_url, options.merge(:method => :post)).run + end + + # Make a put request. + # + # @example Make put request. + # Typhoeus.put("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option options :params [ Hash ] Params hash which + # is attached to the base_url. + # @option options :body [ Hash ] Body hash which + # becomes a PUT request body. + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def put(base_url, options = {}) + Request.new(base_url, options.merge(:method => :put)).run + end + + # Make a delete request. + # + # @example Make delete request. + # Typhoeus.delete("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def delete(base_url, options = {}) + Request.new(base_url, options.merge(:method => :delete)).run + end + + # Make a head request. + # + # @example Make head request. + # Typhoeus.head("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def head(base_url, options = {}) + Request.new(base_url, options.merge(:method => :head)).run + end + + # Make a patch request. + # + # @example Make patch request. + # Typhoeus.patch("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def patch(base_url, options = {}) + Request.new(base_url, options.merge(:method => :patch)).run + end + + # Make a options request. + # + # @example Make options request. + # Typhoeus.options("www.example.com") + # + # @param (see Typhoeus::Request#initialize) + # + # @option (see Typhoeus::Request#initialize) + # + # @return (see Typhoeus::Response#initialize) + # + # @note (see Typhoeus::Request#initialize) + def options(base_url, options = {}) + Request.new(base_url, options.merge(:method => :options)).run + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/before.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/before.rb new file mode 100644 index 0000000..d6ad765 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/before.rb @@ -0,0 +1,30 @@ +module Typhoeus + class Request + + # This module provides a way to hook into before + # a request runs. This is very powerful + # and you should be careful because when you accidently + # return a falsy value the request won't be executed. + # + # @api private + module Before + + # Overrride run in order to execute callbacks in + # Typhoeus.before. Will break and return when a + # callback returns nil or false. Calls super + # otherwise. + # + # @example Run the request. + # request.run + def run + Typhoeus.before.each do |callback| + value = callback.call(self) + if value.nil? || value == false || value.is_a?(Response) + return response + end + end + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/block_connection.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/block_connection.rb new file mode 100644 index 0000000..7fc966b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/block_connection.rb @@ -0,0 +1,52 @@ +module Typhoeus + class Request + + # This module handles the blocked connection request mode on + # the request side, where only stubbed requests + # are allowed. + # Connection blocking needs to be turned on: + # Typhoeus.configure do |config| + # config.block_connection = true + # end + # + # When trying to do real requests a NoStub error + # is raised. + # + # @api private + module BlockConnection + + # Overrides run in order to check before if block connection + # is turned on. If thats the case a NoStub error is + # raised. + # + # @example Run request. + # request.run + # + # @raise [Typhoeus::Errors::NoStub] If connection is blocked + # and no stub defined. + def run + if blocked? + raise Typhoeus::Errors::NoStub.new(self) + else + super + end + end + + # Returns wether a request is blocked or not. Takes + # request.block_connection and Typhoeus::Config.block_connection + # into consideration. + # + # @example Blocked? + # request.blocked? + # + # @return [ Boolean ] True if blocked, false else. + def blocked? + if block_connection.nil? + Typhoeus::Config.block_connection + else + block_connection + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/cacheable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/cacheable.rb new file mode 100644 index 0000000..74a41a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/cacheable.rb @@ -0,0 +1,38 @@ +module Typhoeus + class Request + module Cacheable + def response=(response) + cache.set(self, response) if cacheable? && !response.cached? + super + end + + def cacheable? + cache + end + + def run + if response = cached_response + response.cached = true + finish(response) + else + super + end + end + + def cached_response + cacheable? && cache.get(self) + end + + def cache_ttl + options[:cache_ttl] + end + + private + + def cache + return nil if options[:cache] === false + options[:cache] || Typhoeus::Config.cache + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/callbacks.rb new file mode 100644 index 0000000..cc2cc1e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/callbacks.rb @@ -0,0 +1,151 @@ +module Typhoeus + class Request + + # This module contains the logic for the response callbacks. + # + # You can set multiple callbacks, which are then executed + # in the same order. + # + # request.on_complete { |response| p 1 } + # request.on_complete { |response| p 2 } + # request.execute_callbacks + # #=> 1 + # #=> 2 + # + # You can clear the callbacks: + # + # request.on_complete { |response| p 1 } + # request.on_complete { |response| p 2 } + # request.on_complete.clear + # request.execute_callbacks + # #=> nil + # + # @note If you're using the Hydra to execute multiple + # requests, then callbacks are delaying the + # request execution. + module Callbacks + + module Types # :nodoc: + # Set on_complete callback. + # + # @example Set on_complete. + # request.on_complete { |response| p "yay" } + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response ] + # + # @return [ Array<Block> ] All on_complete blocks. + def on_complete(&block) + @on_complete ||= [] + @on_complete << block if block_given? + @on_complete + end + + # Set on_success callback. + # + # @example Set on_success. + # request.on_success { |response| p "yay" } + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response ] + # + # @return [ Array<Block> ] All on_success blocks. + def on_success(&block) + @on_success ||= [] + @on_success << block if block_given? + @on_success + end + + # Set on_failure callback. + # + # @example Set on_failure. + # request.on_failure { |response| p "yay" } + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response ] + # + # @return [ Array<Block> ] All on_failure blocks. + def on_failure(&block) + @on_failure ||= [] + @on_failure << block if block_given? + @on_failure + end + + # Set on_headers callback. + # + # @example Set on_headers. + # request.on_headers { |response| p "yay" } + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response ] + # + # @return [ Array<Block> ] All on_headers blocks. + def on_headers(&block) + @on_headers ||= [] + @on_headers << block if block_given? + @on_headers + end + + # Set on_progress callback. + # + # @example Set on_progress. + # request.on_progress do |dltotal, dlnow, ultotal, ulnow| + # puts "dltotal (#{dltotal}), dlnow (#{dlnow}), ultotal (#{ultotal}), ulnow (#{ulnow})" + # end + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response ] + # + # @return [ Array<Block> ] All on_progress blocks. + def on_progress(&block) + @on_progress ||= [] + @on_progress << block if block_given? + @on_progress + end + end + + # Execute the headers callbacks and yields response. + # + # @example Execute callbacks. + # request.execute_headers_callbacks + # + # @return [ Array<Object> ] The results of the on_headers callbacks. + # + # @api private + def execute_headers_callbacks(response) + (Typhoeus.on_headers + on_headers).map do |callback| + callback.call(response) + end + end + + # Execute necessary callback and yields response. This + # include in every case on_complete and on_progress, on_success + # if successful and on_failure if not. + # + # @example Execute callbacks. + # request.execute_callbacks + # + # @return [ void ] + # + # @api private + def execute_callbacks + callbacks = Typhoeus.on_complete + Typhoeus.on_progress + on_complete + on_progress + + if response && response.success? + callbacks += Typhoeus.on_success + on_success + elsif response + callbacks += Typhoeus.on_failure + on_failure + end + + callbacks.each do |callback| + self.response.handled_response = callback.call(self.response) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/marshal.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/marshal.rb new file mode 100644 index 0000000..3c8387c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/marshal.rb @@ -0,0 +1,22 @@ +module Typhoeus + class Request + + # This module contains custom serializer. + module Marshal + + # Return the important data needed to serialize this Request, except the + # request callbacks and `hydra`, since they cannot be marshalled. + def marshal_dump + unmarshallable = %w(@on_complete @on_success @on_failure @on_progress @on_headers @on_body @hydra) + (instance_variables - unmarshallable - unmarshallable.map(&:to_sym)).map do |name| + [name, instance_variable_get(name)] + end + end + + # Load. + def marshal_load(attributes) + attributes.each { |name, value| instance_variable_set(name, value) } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/memoizable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/memoizable.rb new file mode 100644 index 0000000..c09ab10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/memoizable.rb @@ -0,0 +1,38 @@ +module Typhoeus + class Request + + # This module handles the GET request memoization + # on the request side. Memoization needs to be turned + # on: + # Typhoeus.configure do |config| + # config.memoize = true + # end + # + # @api private + module Memoizable + + # Override response setter and memoizes response + # if the request is memoizable. + # + # @param [ Response ] response The response to set. + # + # @example Set response. + # request.response = response + def response=(response) + hydra.memory[self] = response if memoizable? + super + end + + # Return whether a request is memoizable. + # + # @example Is request memoizable? + # request.memoizable? + # + # @return [ Boolean ] Return true if memoizable, false else. + def memoizable? + Typhoeus::Config.memoize && + (options[:method].nil? || options[:method] == :get) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/operations.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/operations.rb new file mode 100644 index 0000000..a461aa4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/operations.rb @@ -0,0 +1,40 @@ +module Typhoeus + class Request + + # This module contains everything what is necessary + # to make a single request. + module Operations + + # Run a request. + # + # @example Run a request. + # Typhoeus::Request.new("www.example.com").run + # + # @return [ Response ] The response. + def run + easy = EasyFactory.new(self).get + easy.perform + response + end + + # Sets a response, the request on the response + # and executes the callbacks. + # + # @param [Typhoeus::Response] response The response. + # @param [Boolean] bypass_memoization Wether to bypass + # memoization or not. Decides how the response is set. + # + # @return [Typhoeus::Response] The response. + def finish(response, bypass_memoization = nil) + if bypass_memoization + @response = response + else + self.response = response + end + self.response.request = self + execute_callbacks + response + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/responseable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/responseable.rb new file mode 100644 index 0000000..2b91a6d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/responseable.rb @@ -0,0 +1,29 @@ +module Typhoeus + class Request + + # This module contains logic for having a reponse + # getter and setter. + module Responseable + + # Set the response. + # + # @example Set response. + # request.response = response + # + # @param [ Response ] value The response to set. + def response=(value) + @response = value + end + + # Return the response. + # + # @example Return response. + # request.response + # + # @return [ Response ] The response. + def response + @response ||= nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/streamable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/streamable.rb new file mode 100644 index 0000000..0cfaa48 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/streamable.rb @@ -0,0 +1,34 @@ +module Typhoeus + class Request + + # This module contians the logic for response streaming. + module Streamable + + # Set on_body callback. + # + # This callback will be called each time a portion of the body is read from the socket. + # Setting an on_body callback will cause the response body to be empty. + # + # @example Set on_body. + # request.on_body { |body_chunk, response| puts "Got #{body_chunk.bytesize} bytes" } + # + # @param [ Block ] block The block to execute. + # + # @yield [ Typhoeus::Response, String ] + # + # @return [ Array<Block> ] All on_body blocks. + def on_body(&block) + @on_body ||= [] + @on_body << block if block_given? + @on_body + end + + # Is this request using streaming? + # + # @return [ Boolean ] True if any on_body blocks have been set. + def streaming? + defined?(@on_body) && @on_body.any? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/stubbable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/stubbable.rb new file mode 100644 index 0000000..ee42557 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/request/stubbable.rb @@ -0,0 +1,30 @@ +module Typhoeus + class Request + + # This module handles stubbing on the request side. + # It plays well with the block_connection configuration, + # which raises when you make a request which is not stubbed. + # + # @api private + module Stubbable + + # Override run in order to check for matching expectations. + # When an expectation is found, super is not called. Instead a + # canned response is assigned to the request. + # + # @example Run the request. + # request.run + # + # @return [ Response ] The response. + def run + if response = Expectation.response_for(self) + execute_headers_callbacks(response) + self.on_body.each{ |callback| callback.call(response.body, response) } + finish(response) + else + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response.rb new file mode 100644 index 0000000..efa7dbd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response.rb @@ -0,0 +1,68 @@ +require 'typhoeus/response/header' +require 'typhoeus/response/informations' +require 'typhoeus/response/status' +require 'typhoeus/response/cacheable' + +module Typhoeus + + # This class represents the response. + class Response + include Response::Informations + include Response::Status + include Response::Cacheable + + # Remembers the corresponding request. + # + # @example Get request. + # request = Typhoeus::Request.new("www.example.com") + # response = request.run + # request == response.request + # #=> true + # + # @return [ Typhoeus::Request ] + attr_accessor :request + + # The provided options, which contain all the + # informations about the request. + # + # @return [ Hash ] + attr_accessor :options + + # Set the handled response. + attr_writer :handled_response + + # @api private + attr_writer :mock + + # Create a new response. + # + # @example Create a response. + # Response.new + # + # @param [ Hash ] options The options hash. + # + # @return [ Response ] The new response. + def initialize(options = {}) + @options = options + @headers = Header.new(options[:headers]) if options[:headers] + end + + # Returns whether this request is mocked + # or not. + # + # @api private + def mock + defined?(@mock) ? @mock : options[:mock] + end + alias :mock? :mock + + # Returns the handled_response if it has + # been defined; otherwise, returns the response + # + # @return [ Object ] The result of callbacks + # done on the response or the original response. + def handled_response + @handled_response || self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/cacheable.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/cacheable.rb new file mode 100644 index 0000000..0170171 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/cacheable.rb @@ -0,0 +1,14 @@ +module Typhoeus + class Response + module Cacheable + + # Set the cache status, if we got response from cache + # it will have cached? == true + attr_writer :cached + + def cached? + defined?(@cached) ? !!@cached : false + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/header.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/header.rb new file mode 100644 index 0000000..11580ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/header.rb @@ -0,0 +1,105 @@ +require 'delegate' + +module Typhoeus + class Response + + # This class represents the response header. + # It can be accessed like a hash. + # Values can be strings (normal case) or arrays of strings (for duplicates headers) + # + # @api private + class Header < DelegateClass(Hash) + + # Create a new header. + # + # @example Create new header. + # Header.new(raw) + # + # @param [ String ] raw The raw header. + def initialize(raw) + super({}) + @raw = raw + @sanitized = {} + parse + end + + def [](key) + fetch(key) { @sanitized[key.to_s.downcase] } + end + + # Parses the raw header. + # + # @example Parse header. + # header.parse + def parse + case @raw + when Hash + raw.each do |k, v| + process_pair(k, v) + end + when String + raw.split(/\r?\n(?!\s)/).each do |header| + header.strip! + next if header.empty? || header.start_with?( 'HTTP/' ) + process_line(header) + end + end + end + + private + + # Processes line and saves the result. + # + # @return [ void ] + def process_line(header) + key, value = header.split(':', 2) + process_pair(key.strip, (value ? value.strip.gsub(/\r?\n\s*/, ' ') : '')) + end + + # Sets key value pair for self and @sanitized. + # + # @return [ void ] + def process_pair(key, value) + set_value(key, value, self) + @sanitized[key.downcase] = self[key] + end + + # Sets value for key in specified hash + # + # @return [ void ] + def set_value(key, value, hash) + current_value = hash[key] + if current_value + if current_value.is_a? Array + current_value << value + else + hash[key] = [current_value, value] + end + else + hash[key] = value + end + end + + # Returns the raw header or empty string. + # + # @example Return raw header. + # header.raw + # + # @return [ String ] The raw header. + def raw + @raw || '' + end + + # Sets the default proc for the specified hash independent of the Ruby version. + # + # @return [ void ] + def set_default_proc_on(hash, default_proc) + if hash.respond_to?(:default_proc=) + hash.default_proc = default_proc + else + hash.replace(Hash.new(&default_proc).merge(hash)) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/informations.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/informations.rb new file mode 100644 index 0000000..73544dd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/informations.rb @@ -0,0 +1,248 @@ +module Typhoeus + class Response + + # This module contains logic about informations + # on a response. + module Informations + + # Return libcurls return value. + # + # @example Get return_code. + # response.return_code + # + # @return [ Symbol ] The return_code. + def return_code + options[:return_code] + end + + # Returns a string describing the return. + # + # @example Get return_message. + # response.return_message + # + # @return [ String ] The return_message. + # + # @since 0.6.2 + def return_message + Ethon::Curl.easy_strerror(return_code) if return_code + end + + # Return the http response body. + # + # @example Get response_body. + # response.response_body + # + # @return [ String ] The response_body. + def response_body + options[:response_body] || options[:body] + end + alias :body :response_body + + # Return the http response headers. + # + # @example Get response_headers. + # response.response_headers + # + # @return [ String ] The response_headers. + def response_headers + return options[:response_headers] if options[:response_headers] + if mock? && h = options[:headers] + status_code = return_code || "200" + reason_phrase = status_code == "200" ? "OK" : "Mock Reason Phrase" + status_line = "HTTP/1.1 #{status_code} #{reason_phrase}" + actual_headers = h.map{ |k,v| [k, v.respond_to?(:join) ? v.join(',') : v] }. + map{ |e| "#{e.first}: #{e.last}" } + + [status_line, *actual_headers].join("\r\n") + end + end + + # Return the last received HTTP, FTP or SMTP response code. + # The value will be zero if no server response code has + # been received. Note that a proxy's CONNECT response should + # be read with http_connect_code and not this. + # + # @example Get response_code. + # response.response_code + # + # @return [ Integer ] The response_code. + def response_code + (options[:response_code] || options[:code]).to_i + end + alias :code :response_code + + # Return the available http auth methods. + # Bitmask indicating the authentication method(s) + # available. + # + # @example Get httpauth_avail. + # response.httpauth_avail + # + # @return [ Integer ] The bitmask. + def httpauth_avail + options[:httpauth_avail] + end + + + # Return the total time in seconds for the previous + # transfer, including name resolving, TCP connect etc. + # + # @example Get total_time. + # response.total_time + # + # @return [ Float ] The total_time. + def total_time + options[:total_time] || options[:time] + end + alias :time :total_time + + # Return the time, in seconds, it took from the start + # until the first byte is received by libcurl. This + # includes pretransfer time and also the time the + # server needs to calculate the result. + # + # @example Get starttransfer_time. + # response.starttransfer_time + # + # @return [ Float ] The starttransfer_time. + def starttransfer_time + options[:starttransfer_time] || options[:start_transfer_time] + end + alias :start_transfer_time :starttransfer_time + + # Return the time, in seconds, it took from the start + # until the SSL/SSH connect/handshake to the remote + # host was completed. This time is most often very near + # to the pre transfer time, except for cases such as HTTP + # pipelining where the pretransfer time can be delayed + # due to waits in line for the pipeline and more. + # + # @example Get appconnect_time. + # response.appconnect_time + # + # @return [ Float ] The appconnect_time. + def appconnect_time + options[:appconnect_time] || options[:app_connect_time] + end + alias :app_connect_time :appconnect_time + + # Return the time, in seconds, it took from the start + # until the file transfer is just about to begin. This + # includes all pre-transfer commands and negotiations + # that are specific to the particular protocol(s) involved. + # It does not involve the sending of the protocol- + # specific request that triggers a transfer. + # + # @example Get pretransfer_time. + # response.pretransfer_time + # + # @return [ Float ] The pretransfer_time. + def pretransfer_time + options[:pretransfer_time] + end + + # Return the time, in seconds, it took from the start + # until the connect to the remote host (or proxy) was completed. + # + # @example Get connect_time. + # response.connect_time + # + # @return [ Float ] The connect_time. + def connect_time + options[:connect_time] + end + + # Return the time, in seconds, it took from the + # start until the name resolving was completed. + # + # @example Get namelookup_time. + # response.namelookup_time + # + # @return [ Float ] The namelookup_time. + def namelookup_time + options[:namelookup_time] || options[:name_lookup_time] + end + alias :name_lookup_time :namelookup_time + + # Return the time, in seconds, it took for all redirection steps + # include name lookup, connect, pretransfer and transfer before the + # final transaction was started. time_redirect shows the complete + # execution time for multiple redirections. + # + # @example Get redirect_time. + # response.redirect_time + # + # @return [ Float ] The redirect_time. + def redirect_time + options[:redirect_time] + end + + # Return the last used effective url. + # + # @example Get effective_url. + # response.effective_url + # + # @return [ String ] The effective_url. + def effective_url + options[:effective_url] + end + + # Return the string holding the IP address of the most recent + # connection done with this curl handle. This string + # may be IPv6 if that's enabled. + # + # @example Get primary_ip. + # response.primary_ip + # + # @return [ String ] The primary_ip. + def primary_ip + options[:primary_ip] + end + + # Return the total number of redirections that were + # actually followed + # + # @example Get redirect_count. + # response.redirect_count + # + # @return [ Integer ] The redirect_count. + def redirect_count + options[:redirect_count] + end + + def request_size + options[:request_size] + end + + def debug_info + options[:debug_info] + end + + # Returns the response header. + # + # @example Return headers. + # response.headers + # + # @return [ Typhoeus::Header ] The response header. + def headers + return Header.new(options[:headers]) if mock? && options[:headers] + return nil if response_headers.nil? && !defined?(@headers) + @headers ||= Header.new(response_headers.split("\r\n\r\n").last) + end + alias :headers_hash :headers + + # Return all redirections in between as multiple + # responses with header. + # + # @example Return redirections. + # response.redirections + # + # @return [ Array<Typhoeus::Response> ] The redirections + def redirections + return [] unless response_headers + response_headers.split("\r\n\r\n")[0..-2].map{ |h| Response.new(:response_headers => h) } + end + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/status.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/status.rb new file mode 100644 index 0000000..e1840f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/response/status.rb @@ -0,0 +1,106 @@ +module Typhoeus + class Response + + # This module contains logic about the http + # status. + module Status + + # Return the status message if present. + # + # @example Return status message. + # reesponse.status_message + # + # @return [ String ] The message. + def status_message + return @status_message if defined?(@status_message) && @status_message + return options[:status_message] unless options[:status_message].nil? + + # HTTP servers can choose not to include the explanation to HTTP codes. The RFC + # states this (http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4): + # Except when responding to a HEAD request, the server SHOULD include an entity containing + # an explanation of the error situation [...] + # This means 'HTTP/1.1 404' is as valid as 'HTTP/1.1 404 Not Found' and we have to handle it. + # + # Regexp doc: http://rubular.com/r/eAr1oVYsVa + if first_header_line != nil and first_header_line[/\d{3} (.*)$/, 1] != nil + @status_message = first_header_line[/\d{3} (.*)$/, 1].chomp + else + @status_message = nil + end + end + + # Return the http version. + # + # @example Return http version. + # response.http_version + # + # @return [ String ] The http version. + def http_version + @http_version ||= first_header_line ? first_header_line[/HTTP\/(\S+)/, 1] : nil + end + + # Return whether the response is a success. + # + # @example Return if the response was successful. + # response.success? + # + # @return [ Boolean ] Return true if successful, false else. + def success? + (mock || return_code == :ok) && response_code && has_good_response_code? + end + + # Return whether the response is a failure. + # + # @example Return if the response was failed. + # response.failure? + # + # @return [ Boolean ] Return true if failure, false else. + def failure? + (mock || return_code == :internal_server_error) && response_code && has_bad_response_code? + end + + # Return wether the response is modified. + # + # @example Return if the response was modified. + # response.modified? + # + # @return [ Boolean ] Return true if modified, false else. + def modified? + (mock || return_code == :ok) && response_code && response_code != 304 + end + + # Return whether the response is timed out. + # + # @example Return if the response timed out. + # response.timed_out? + # + # @return [ Boolean ] Return true if timed out, false else. + def timed_out? + return_code == :operation_timedout + end + + private + + # :nodoc: + def first_header_line + @first_header_line ||= begin + if response_headers.to_s.include?("\r\n\r\n") + response_headers.to_s.split("\r\n\r\n").last.split("\r\n").first + else + response_headers.to_s.split("\r\n").first + end + end + end + + # :nodoc: + def has_good_response_code? + response_code >= 200 && response_code < 300 + end + + # :nodoc: + def has_bad_response_code? + !has_good_response_code? + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/version.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/version.rb new file mode 100644 index 0000000..048f091 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/lib/typhoeus/version.rb @@ -0,0 +1,5 @@ +module Typhoeus + + # The current Typhoeus version. + VERSION = '1.4.0' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/profile.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/profile.rb new file mode 100644 index 0000000..29ec9c2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/profile.rb @@ -0,0 +1,14 @@ +require 'typhoeus' +require 'ruby-prof' + +calls = 50 +base_url = "http://127.0.0.1:3000/" + +RubyProf.start +calls.times do |i| + Typhoeus::Request.get(base_url+i.to_s) +end +result = RubyProf.stop + +printer = RubyProf::FlatPrinter.new(result) +printer.print(STDOUT) diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/vs_nethttp.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/vs_nethttp.rb new file mode 100644 index 0000000..305f9fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/perf/vs_nethttp.rb @@ -0,0 +1,64 @@ +require 'typhoeus' +require 'net/http' +require 'open-uri' +require 'benchmark' + +URL = "http://localhost:300" +hydra = Typhoeus::Hydra.new(max_concurrency: 3) + +if defined? require_relative + require_relative '../spec/support/localhost_server.rb' + require_relative '../spec/support/server.rb' +else + require '../spec/support/localhost_server.rb' + require '../spec/support/server.rb' +end +LocalhostServer.new(TESTSERVER.new, 3000) +LocalhostServer.new(TESTSERVER.new, 3001) +LocalhostServer.new(TESTSERVER.new, 3002) + +def url_for(i) + "#{URL}#{i%3}/" +end + +Benchmark.bm do |bm| + + [1000].each do |calls| + puts "[ #{calls} requests ]" + + bm.report("net/http ") do + calls.times do |i| + uri = URI.parse(url_for(i)) + Net::HTTP.get_response(uri) + end + end + + bm.report("open ") do + calls.times do |i| + open(url_for(i)) + end + end + + bm.report("request ") do + calls.times do |i| + Typhoeus::Request.get(url_for(i)) + end + end + + bm.report("hydra ") do + calls.times do |i| + hydra.queue(Typhoeus::Request.new(url_for(i))) + end + hydra.run + end + + bm.report("hydra memoize ") do + Typhoeus::Config.memoize = true + calls.times do |i| + hydra.queue(Typhoeus::Request.new(url_for(i))) + end + hydra.run + Typhoeus::Config.memoize = false + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder/helper_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder/helper_spec.rb new file mode 100644 index 0000000..3aee759 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder/helper_spec.rb @@ -0,0 +1,156 @@ +require 'spec_helper' +require "rack/typhoeus" + +describe "Rack::Typhoeus::Middleware::ParamsDecoder::Helper" do + + let(:klass) do + Class.new do + include Rack::Typhoeus::Middleware::ParamsDecoder::Helper + end.new + end + + describe "#decode" do + let(:decoded) { klass.decode(params) } + let(:params) { { :array => {'0' => :a, '1' => :b } } } + + it "decodes" do + expect(decoded[:array]).to match_array([:a, :b]) + end + + it "doesn't modify" do + expect(decoded).to_not be(params) + end + end + + describe "#decode!" do + let(:decoded) { klass.decode!(params) } + + context "when hash" do + context "when encoded" do + context "when simple" do + let(:params) { { :array => {'0' => :a, '1' => :b } } } + + it "decodes" do + expect(decoded[:array]).to match_array([:a, :b]) + end + + it "modifies" do + expect(decoded).to eq(params) + end + end + + context "when longer and more complex" do + let(:params) do + { + :ids => { + "0" => "407304", + "1" => "407305", + "2" => "407306", + "3" => "407307", + "4" => "407308", + "5" => "407309", + "6" => "407310", + "7" => "407311", + "8" => "407312", + "9" => "407313", + "10" => "327012" + } + } + end + + it "decodes ensuring arrays maintain their original order" do + expect(decoded[:ids]).to eq(["407304", "407305", "407306", "407307", "407308", "407309", "407310", "407311", "407312", "407313", "327012"]) + end + end + + context "when nested" do + let(:params) do + { :array => { '0' => 0, '1' => { '0' => 'sub0', '1' => 'sub1' } } } + end + + it "decodes" do + expect(decoded[:array]).to include(0) + expect(decoded[:array].find{|e| e.is_a?(Array)}).to( + match_array(['sub0', 'sub1']) + ) + end + + it "modifies" do + expect(decoded).to eq(params) + end + end + end + + context "when not encoded" do + let(:params) { {:a => :a} } + + it "doesn't modify" do + expect(decoded).to be(params) + end + end + end + + context "when no hash" do + let(:params) { "a" } + + it "returns self" do + expect(decoded).to be(params) + end + end + end + + describe "#encoded?" do + let(:encoded) { klass.send(:encoded?, params) } + + context "when there is only one key" do + context "and its 0" do + let(:params){ {'0' => 1} } + it 'returns true' do + expect(encoded).to be_truthy + end + end + context "and its not 0" do + let(:params){ {'some-key' => 1}} + it 'returns false' do + expect(encoded).to be_falsey + end + end + end + + context "when keys are ascending numbers starting with zero" do + let(:params) { Hash[12.times.map {|i| [i, (i+65).chr]}] } + + it "returns true" do + expect(encoded).to be_truthy + end + end + + context "when keys are not ascending numbers starting with zero" do + let(:params) { {:a => 1} } + + it "returns false" do + expect(encoded).to be_falsey + end + end + end + + describe "#convert" do + let(:converted) { klass.send(:convert, params) } + + context "when encoded" do + let(:params) { {'0' => :a, '1' => :b} } + + it "returns values" do + expect(converted).to match_array([:a, :b]) + end + end + + context "when not encoded" do + let(:params) { {:a => :a} } + + it "returns unmodified" do + expect(converted).to be(params) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder_spec.rb new file mode 100644 index 0000000..187da9f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/rack/typhoeus/middleware/params_decoder_spec.rb @@ -0,0 +1,31 @@ +require 'spec_helper' + +describe "Rack::Typhoeus::Middleware::ParamsDecoder" do + + before(:all) do + require "rack/typhoeus" + end + + let(:app) do + double + end + + let(:env) do + double + end + + let(:klass) do + Rack::Typhoeus::Middleware::ParamsDecoder + end + + describe "#call" do + end + + context "when requesting" do + let(:response) { Typhoeus.get("localhost:3001", :params => {:x => [:a]}) } + + it "transforms parameters" do + expect(response.body).to include("query_hash\":{\"x\":[\"a\"]}") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/spec_helper.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/spec_helper.rb new file mode 100644 index 0000000..73302cc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/spec_helper.rb @@ -0,0 +1,29 @@ +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), "..", "lib")) + +require "bundler" +Bundler.setup +require "typhoeus" +require "rspec" + +Dir[File.join(File.dirname(__FILE__), "support/**/*.rb")].each { |f| require f } + +RSpec.configure do |config| + config.order = :rand + + config.before(:suite) do + LocalhostServer.new(TESTSERVER.new, 3001) + end + + config.after do + Typhoeus::Pool.clear + Typhoeus::Expectation.clear + Typhoeus.before.clear + Typhoeus.on_complete.clear + Typhoeus.on_success.clear + Typhoeus.on_failure.clear + Typhoeus::Config.verbose = false + Typhoeus::Config.block_connection = false + Typhoeus::Config.memoize = false + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/localhost_server.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/localhost_server.rb new file mode 100644 index 0000000..c5a7508 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/localhost_server.rb @@ -0,0 +1,94 @@ +require 'rack' +require 'rack/handler/webrick' +require 'net/http' + +# The code for this is inspired by Capybara's server: +# http://github.com/jnicklas/capybara/blob/0.3.9/lib/capybara/server.rb +class LocalhostServer + READY_MESSAGE = "Server ready" + + class Identify + def initialize(app) + @app = app + end + + def call(env) + if env["PATH_INFO"] == "/__identify__" + [200, {}, [LocalhostServer::READY_MESSAGE]] + else + @app.call(env) + end + end + end + + attr_reader :port + + def initialize(rack_app, port = nil) + @port = port || find_available_port + @rack_app = rack_app + concurrently { boot } + wait_until(10, "Boot failed.") { booted? } + end + + private + + def find_available_port + server = TCPServer.new('127.0.0.1', 0) + server.addr[1] + ensure + server.close if server + end + + def boot + # Use WEBrick since it's part of the ruby standard library and is available on all ruby interpreters. + options = { :Port => port } + options.merge!(:AccessLog => [], :Logger => WEBrick::BasicLog.new(StringIO.new)) unless ENV['VERBOSE_SERVER'] + Rack::Handler::WEBrick.run(Identify.new(@rack_app), options) + end + + def booted? + res = ::Net::HTTP.get_response("localhost", '/__identify__', port) + if res.is_a?(::Net::HTTPSuccess) or res.is_a?(::Net::HTTPRedirection) + return res.body == READY_MESSAGE + end + rescue Errno::ECONNREFUSED, Errno::EBADF + return false + end + + def concurrently + if should_use_subprocess? + pid = Process.fork do + trap(:INT) { ::Rack::Handler::WEBrick.shutdown } + yield + exit # manually exit; otherwise this sub-process will re-run the specs that haven't run yet. + end + + at_exit do + Process.kill('INT', pid) + begin + Process.wait(pid) + rescue Errno::ECHILD + # ignore this error...I think it means the child process has already exited. + end + end + else + Thread.new { yield } + end + end + + def should_use_subprocess? + # !ENV['THREADED'] + false + end + + def wait_until(timeout, error_message, &block) + start_time = Time.now + + while true + return if yield + raise TimeoutError.new(error_message) if (Time.now - start_time) > timeout + sleep(0.05) + end + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/memory_cache.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/memory_cache.rb new file mode 100644 index 0000000..15cc56b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/memory_cache.rb @@ -0,0 +1,15 @@ +class MemoryCache + attr_reader :memory + + def initialize + @memory = {} + end + + def get(request) + memory[request] + end + + def set(request, response) + memory[request] = response + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/server.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/server.rb new file mode 100644 index 0000000..b7f6f06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/support/server.rb @@ -0,0 +1,116 @@ +#!/usr/bin/env ruby +require 'json' +require 'zlib' +require 'sinatra/base' +require 'rack/typhoeus' + +TESTSERVER = Sinatra.new do + set :logging, false + use Rack::Typhoeus::Middleware::ParamsDecoder + + fail_count = 0 + + post '/file' do + { + 'content-type' => params[:file][:type], + 'filename' => params[:file][:filename], + 'content' => params[:file][:tempfile].read, + 'request-content-type' => request.env['CONTENT_TYPE'] + }.to_json + end + + get '/multiple-headers' do + [200, { 'Set-Cookie' => %w[ foo bar ], 'Content-Type' => 'text/plain' }, ['']] + end + + get '/cookies-test' do + [200, { 'Set-Cookie' => %w(foo=bar bar=foo), 'Content-Type' => 'text/plain' }, ['']] + end + + get '/cookies-test2' do + [200, { 'Set-Cookie' => %w(foo2=bar bar2=foo), 'Content-Type' => 'text/plain' }, ['']] + end + + get '/fail/:number' do + if fail_count >= params[:number].to_i + "ok" + else + fail_count += 1 + error 500, "oh noes!" + end + end + + get '/fail_forever' do + error 500, "oh noes!" + end + + get '/redirect' do + redirect '/' + end + + get '/bad_redirect' do + redirect '/bad_redirect' + end + + get '/auth_basic/:username/:password' do + @auth ||= Rack::Auth::Basic::Request.new(request.env) + # Check that we've got a basic auth, and that it's credentials match the ones + # provided in the request + if @auth.provided? && @auth.basic? && @auth.credentials == [ params[:username], params[:password] ] + # auth is valid - confirm it + true + else + # invalid auth - request the authentication + response['WWW-Authenticate'] = %(Basic realm="Testing HTTP Auth") + throw(:halt, [401, "Not authorized\n"]) + end + end + + get '/auth_ntlm' do + # we're just checking for the existence if NTLM auth header here. It's validation + # is too troublesome and really doesn't bother is much, it's up to libcurl to make + # it valid + response['WWW-Authenticate'] = 'NTLM' + is_ntlm_auth = /^NTLM/ =~ request.env['HTTP_AUTHORIZATION'] + true if is_ntlm_auth + throw(:halt, [401, "Not authorized\n"]) if !is_ntlm_auth + end + + get '/gzipped' do + req_env = request.env.to_json + z = Zlib::Deflate.new + gzipped_env = z.deflate(req_env, Zlib::FINISH) + z.close + response['Content-Encoding'] = 'gzip' + gzipped_env + end + + get '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + request.env.merge!(:body => request.body.read).to_json + end + + head '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + end + + put '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + post '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + delete '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + patch '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + options '/**' do + request.env.merge!(:body => request.body.read).to_json + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/adapters/faraday_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/adapters/faraday_spec.rb new file mode 100644 index 0000000..716fb76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/adapters/faraday_spec.rb @@ -0,0 +1,339 @@ +if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("1.9.0") + require 'spec_helper' + require 'typhoeus/adapters/faraday' + + describe Faraday::Adapter::Typhoeus do + let(:base_url) { "http://localhost:3001" } + let(:adapter) { described_class.new(nil) } + let(:request) { Typhoeus::Request.new(base_url) } + let(:conn) do + Faraday.new(:url => base_url) do |faraday| + faraday.adapter :typhoeus + end + end + let(:response) { conn.get("/") } + + context "when parallel" do + it "returns a faraday response" do + response = nil + conn.in_parallel { response = conn.get("/") } + expect(response).to be_a(Faraday::Response) + end + + it "succeeds" do + response = nil + conn.in_parallel { response = conn.get("/") } + expect(response.status).to be(200) + end + end + + context "when not parallel" do + it "returns a faraday response" do + expect(response).to be_a(Faraday::Response) + end + + it "succeeds" do + expect(response.status).to be(200) + end + end + + context "when a response is stubbed" do + before do + stub = Typhoeus::Response.new \ + :code => 200, + :headers => { "Foo" => "2", "Bar" => "3" }, + :body => "Hello", + :mock => true + + Typhoeus.stub(base_url + '/').and_return(stub) + end + + it 'stubs the status code' do + expect(response.status).to eq(200) + end + + it 'stubs the response body' do + expect(response.body).to eq("Hello") + end + + it 'stubs the headers' do + expect(response.headers).to eq("Foo" => "2", "Bar" => "3") + end + end + + describe "#initialize" do + let(:request) { adapter.method(:typhoeus_request).call({}) } + + context "when typhoeus request options specified" do + let(:adapter) { described_class.new(nil, { :forbid_reuse => true, :maxredirs => 1 }) } + + it "should set option for request" do + expect(request.options[:forbid_reuse]).to be_truthy + expect(request.options[:maxredirs]).to eq(1) + end + end + end + + describe "#perform_request" do + let(:env) { {} } + + context "when body" do + let(:env) { { :body => double(:read => "body") } } + + it "reads body" do + expect(adapter.method(:read_body).call(env)).to eq("body") + end + end + + context "parallel_manager" do + context "when given" do + let(:env) { { :parallel_manager => double(:queue => true), :ssl => {}, :request => {} } } + + it "uses" do + adapter.method(:perform_request).call(env) + end + end + + context "when not given" do + let(:env) { { :method => :get, :ssl => {}, :request => {} } } + + it "falls back to single" do + expect(Typhoeus::Request).to receive(:new).and_return(double(:options => {}, :on_complete => [], :run => true)) + adapter.method(:perform_request).call(env) + end + end + end + end + + describe "#request" do + let(:env) do + { :url => "url", :method => :get, :body => "body", :request_headers => {}, :ssl => {}, :request => {} } + end + + let(:request) { adapter.method(:request).call(env) } + + it "returns request" do + expect(request).to be_a(Typhoeus::Request) + end + + it "sets url" do + expect(request.base_url).to eq("url") + end + + it "sets http method" do + expect(request.original_options[:method]).to eq(:get) + end + + it "sets body" do + expect(request.original_options[:body]).to eq("body") + end + + it "sets headers" do + expect(request.original_options[:headers]).to eq({}) + end + + it "sets on_complete callback" do + expect(request.on_complete.size).to eq(1) + end + end + + context "when the connection failed" do + before do + stub = Typhoeus::Response.new \ + :response_code => 0, + :return_code => 0, + :mock => true + + Typhoeus.stub(base_url + '/').and_return(stub) + end + + context "when parallel" do + it "isn't successful" do + response = nil + conn.in_parallel { response = conn.get("/") } + expect(response.success?).to be_falsey + end + + it "translates the response code into an error message" do + response = nil + conn.in_parallel { response = conn.get("/") } + expect(response.env[:typhoeus_return_message]).to eq("No error") + end + end + + context "when not parallel" do + it "raises an error" do + expect { conn.get("/") }.to raise_error(Faraday::ConnectionFailed, "No error") + end + end + end + + describe "#configure_socket" do + let(:env) { { :request => { :bind => { :host => "interface" } } } } + + before { adapter.method(:configure_socket).call(request, env) } + + context "when host" do + it "sets interface" do + expect(request.options[:interface]).to eq("interface") + end + end + end + + describe "#configure_timeout" do + before { adapter.method(:configure_timeout).call(request, env) } + + context "when timeout" do + let(:env) { { :request => { :timeout => 1 } } } + + it "sets timeout_ms" do + expect(request.options[:timeout_ms]).to eq(1000) + end + end + + context "when open_timeout" do + let(:env) { { :request => { :open_timeout => 1 } } } + + it "sets connecttimeout_ms" do + expect(request.options[:connecttimeout_ms]).to eq(1000) + end + end + end + + describe "#configure_proxy" do + before { adapter.method(:configure_proxy).call(request, env) } + + context "when proxy" do + let(:env) { { :request => { :proxy => { :uri => double(:scheme => 'http', :host => "localhost", :port => "3001") } } } } + + it "sets proxy" do + expect(request.options[:proxy]).to eq("http://localhost:3001") + end + + context "when username and password" do + let(:env) do + { :request => { :proxy => { + :uri => double(:scheme => 'http', :host => :a, :port => :b), + :user => "a", + :password => "b" + } } } + end + + it "sets proxyuserpwd" do + expect(request.options[:proxyuserpwd]).to eq("a:b") + end + end + end + end + + describe "#configure_ssl" do + before { adapter.method(:configure_ssl).call(request, env) } + + context "when version" do + let(:env) { { :ssl => { :version => "a" } } } + + it "sets sslversion" do + expect(request.options[:sslversion]).to eq("a") + end + end + + context "when client_cert" do + let(:env) { { :ssl => { :client_cert => "a" } } } + + it "sets sslcert" do + expect(request.options[:sslcert]).to eq("a") + end + end + + context "when client_key" do + let(:env) { { :ssl => { :client_key => "a" } } } + + it "sets sslkey" do + expect(request.options[:sslkey]).to eq("a") + end + end + + context "when ca_file" do + let(:env) { { :ssl => { :ca_file => "a" } } } + + it "sets cainfo" do + expect(request.options[:cainfo]).to eq("a") + end + end + + context "when ca_path" do + let(:env) { { :ssl => { :ca_path => "a" } } } + + it "sets capath" do + expect(request.options[:capath]).to eq("a") + end + end + + context "when client_cert_passwd" do + let(:env) { { :ssl => { :client_cert_passwd => "a" } } } + + it "sets keypasswd to the value of client_cert_passwd" do + expect(request.options[:keypasswd]).to eq("a") + end + end + + context "when client_certificate_password" do + let(:env) { { :ssl => { :client_certificate_password => "a" } } } + + it "sets keypasswd to the value of client_cert_passwd" do + expect(request.options[:keypasswd]).to eq("a") + end + end + + context "when no client_cert_passwd" do + let(:env) { { :ssl => { } } } + + it "does not set keypasswd on options" do + expect(request.options).not_to have_key :keypasswd + end + end + + context "when verify is false" do + let(:env) { { :ssl => { :verify => false } } } + + it "sets ssl_verifyhost to 0" do + expect(request.options[:ssl_verifyhost]).to eq(0) + end + + it "sets ssl_verifypeer to false" do + expect(request.options[:ssl_verifypeer]).to be_falsey + end + end + + context "when verify is true" do + let(:env) { { :ssl => { :verify => true } } } + + it "sets ssl_verifyhost to 2" do + expect(request.options[:ssl_verifyhost]).to eq(2) + end + + it "sets ssl_verifypeer to true" do + expect(request.options[:ssl_verifypeer]).to be_truthy + end + end + end + + describe "#parallel?" do + context "when parallel_manager" do + let(:env) { { :parallel_manager => true } } + + it "returns true" do + expect(adapter.method(:parallel?).call(env)).to be_truthy + end + end + + context "when no parallel_manager" do + let(:env) { { :parallel_manager => nil } } + + it "returns false" do + expect(adapter.method(:parallel?).call(env)).to be_falsey + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/dalli_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/dalli_spec.rb new file mode 100644 index 0000000..1ea2ce0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/dalli_spec.rb @@ -0,0 +1,41 @@ +if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("1.9.0") + require 'dalli' + require 'typhoeus/cache/dalli' + require 'spec_helper' + + describe Typhoeus::Cache::Dalli do + let(:dalli) { instance_double(Dalli::Client) } + let(:cache) { Typhoeus::Cache::Dalli.new(dalli) } + + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url, {:method => :get}) } + let(:response) { Typhoeus::Response.new(:response_code => 0, :return_code => 0, :mock => true) } + + describe "#set" do + it "sends the request to Dalli" do + expect(dalli).to receive(:set).with(request.cache_key, response, nil) + + cache.set(request, response) + end + end + + describe "#get" do + it "returns nil when the key is not in the cache" do + expect(dalli).to receive(:get).with(request.cache_key).and_return(nil) + + expect(cache.get(request)).to be_nil + end + + it "returns the cached response when the key is in cache" do + expect(dalli).to receive(:get).with(request.cache_key).and_return(response) + + result = cache.get(request) + expect(result).to_not be_nil + expect(result.response_code).to eq(response.response_code) + expect(result.return_code).to eq(response.return_code) + expect(result.headers).to eq(response.headers) + expect(result.body).to eq(response.body) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/redis_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/redis_spec.rb new file mode 100644 index 0000000..9ee5941 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/cache/redis_spec.rb @@ -0,0 +1,41 @@ +require 'redis' +require 'typhoeus/cache/redis' +require 'spec_helper' + +describe Typhoeus::Cache::Redis do + let(:redis) { instance_double(Redis) } + let(:cache) { Typhoeus::Cache::Redis.new(redis) } + + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url, {:method => :get}) } + let(:response) { Typhoeus::Response.new(:response_code => 0, :return_code => 0, :mock => true) } + let(:serialized_response) { Marshal.dump(response) } + + describe "#set" do + it "sends the serialized request to Redis" do + expect(redis).to receive(:set).with(request.cache_key, serialized_response) + expect(redis).to_not receive(:expire).with(request.cache_key, request.cache_ttl) + + cache.set(request, response) + end + end + + describe "#get" do + it "returns nil when the key is not in Redis" do + expect(redis).to receive(:get).with(request.cache_key).and_return(nil) + + expect(cache.get(request)).to be_nil + end + + it "returns the cached response when the key is in Redis" do + expect(redis).to receive(:get).with(request.cache_key).and_return(serialized_response) + + result = cache.get(request) + expect(result).to_not be_nil + expect(result.response_code).to eq(response.response_code) + expect(result.return_code).to eq(response.return_code) + expect(result.headers).to eq(response.headers) + expect(result.body).to eq(response.body) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/config_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/config_spec.rb new file mode 100644 index 0000000..ac721c5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/config_spec.rb @@ -0,0 +1,15 @@ +require 'spec_helper' + +describe Typhoeus::Config do + let(:config) { Typhoeus::Config } + + [:block_connection, :memoize, :verbose, :cache, :user_agent, :proxy].each do |name| + it "responds to #{name}" do + expect(config).to respond_to(name) + end + + it "responds to #{name}=" do + expect(config).to respond_to("#{name}=") + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/easy_factory_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/easy_factory_spec.rb new file mode 100644 index 0000000..cc1e9bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/easy_factory_spec.rb @@ -0,0 +1,143 @@ +require 'spec_helper' + +describe Typhoeus::EasyFactory do + let(:base_url) { "http://localhost:3001" } + let(:hydra) { Typhoeus::Hydra.new(:max_concurrency => 1) } + let(:options) { {} } + let(:request) { Typhoeus::Request.new(base_url, options) } + let(:easy_factory) { described_class.new(request, hydra) } + + describe "#get" do + context "when option[:cache_ttl]" do + let(:options) { {:cache_ttl => 1} } + + it "creates Ethon::Easy" do + expect(easy_factory.get).to be_a(Ethon::Easy) + end + end + + context "timeouts" do + it "sets nosignal to true by default" do + expect(easy_factory.easy).to receive(:http_request).with(anything(), anything(), hash_including(:nosignal => true)) + easy_factory.get + end + + context "when timeout is not a whole number and timeout_ms is not set" do + let(:options) { {:timeout => 0.1} } + it "ceils timeout and sets timeout_ms" do + expect(easy_factory.easy).to receive(:http_request).with(anything(), anything(), hash_including(:timeout_ms => 100, :timeout => 1)) + easy_factory.get + end + end + + context "when timeout is not a whole number and timeout_ms is set" do + let(:options) { {:timeout => 0.1, :timeout_ms => 123} } + it "ceils timeout and does not change timeout_ms" do + expect(easy_factory.easy).to receive(:http_request).with(anything(), anything(), hash_including(:timeout_ms => 123, :timeout => 1)) + easy_factory.get + end + end + + context "when connecttimeout is not a whole number and connecttimeout_ms is not set" do + let(:options) { {:connecttimeout => 0.1} } + it "ceils connecttimeout and sets connecttimeout_ms" do + expect(easy_factory.easy).to receive(:http_request).with(anything(), anything(), hash_including(:connecttimeout_ms => 100, :connecttimeout => 1)) + easy_factory.get + end + end + + context "when connecttimeout is not a whole number and connecttimeout_ms is set" do + let(:options) { {:connecttimeout => 0.1, :connecttimeout_ms => 123} } + it "ceils connecttimeout and does not change connecttimeout_ms" do + expect(easy_factory.easy).to receive(:http_request).with(anything(), anything(), hash_including(:connecttimeout_ms => 123, :connecttimeout => 1)) + easy_factory.get + end + end + + + end + + context "when invalid option" do + let(:options) { {:invalid => 1} } + + it "reraises" do + expect{ easy_factory.get }.to raise_error(Ethon::Errors::InvalidOption) + end + end + + context "when removed option" do + let(:options) { {:cache_timeout => 1} } + + it "reraises with help" do + expect{ easy_factory.get }.to raise_error( + Ethon::Errors::InvalidOption, /The option cache_timeout was removed/ + ) + end + end + + context "when changed option" do + let(:options) { {:proxy_auth_method => 1} } + + it "reraises with help" do + expect{ easy_factory.get }.to raise_error( + Ethon::Errors::InvalidOption, /Please try proxyauth instead of proxy_auth_method/ + ) + end + end + + context "when renamed option" do + let(:options) { {:connect_timeout => 1} } + + it "warns" do + expect(easy_factory).to receive(:warn).with( + "Deprecated option connect_timeout. Please use connecttimeout instead." + ) + easy_factory.get + end + + it "passes correct option" do + expect(easy_factory).to receive(:warn) + expect(easy_factory.easy).to receive(:connecttimeout=).with(1) + easy_factory.get + end + end + end + + describe "#set_callback" do + it "sets easy.on_progress callback when an on_progress callback is provided" do + request.on_progress { 1 } + expect(easy_factory.easy).to receive(:on_progress) + easy_factory.send(:set_callback) + end + + it "sets easy.on_complete callback" do + expect(easy_factory.easy).to receive(:on_complete) + easy_factory.send(:set_callback) + end + + it "finishes request" do + easy_factory.send(:set_callback) + expect(request).to receive(:finish) + easy_factory.easy.complete + end + + it "resets easy" do + easy_factory.send(:set_callback) + expect(easy_factory.easy).to receive(:reset) + easy_factory.easy.complete + end + + it "pushes easy back into the pool" do + easy_factory.send(:set_callback) + easy_factory.easy.complete + expect(Typhoeus::Pool.send(:easies)).to include(easy_factory.easy) + end + + it "adds next request" do + easy_factory.hydra.instance_variable_set(:@queued_requests, [request]) + expect(easy_factory.hydra).to receive(:add).with(request) + easy_factory.send(:set_callback) + easy_factory.easy.complete + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/errors/no_stub_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/errors/no_stub_spec.rb new file mode 100644 index 0000000..5815cea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/errors/no_stub_spec.rb @@ -0,0 +1,13 @@ +require 'spec_helper' + +describe Typhoeus::Errors::NoStub do + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url) } + let(:message) { "The connection is blocked and no stub defined: " } + + subject { Typhoeus::Errors::NoStub } + + it "displays the request url" do + expect { raise subject.new(request) }.to raise_error(subject, message + base_url) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/expectation_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/expectation_spec.rb new file mode 100644 index 0000000..9dfb2b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/expectation_spec.rb @@ -0,0 +1,280 @@ +require 'spec_helper' + +describe Typhoeus::Expectation do + let(:options) { {} } + let(:base_url) { "www.example.com" } + let(:expectation) { described_class.new(base_url, options) } + + describe ".new" do + it "sets base_url" do + expect(expectation.instance_variable_get(:@base_url)).to eq(base_url) + end + + it "sets options" do + expect(expectation.instance_variable_get(:@options)).to eq(options) + end + + it "initializes response_counter" do + expect(expectation.instance_variable_get(:@response_counter)).to eq(0) + end + end + + describe ".all" do + context "when @expectations nil" do + it "returns empty array" do + expect(Typhoeus::Expectation.all).to eq([]) + end + end + + context "when @expectations not nil" do + let(:expectations) { [1] } + + it "returns @expectations" do + Typhoeus::Expectation.instance_variable_set(:@expectations, expectations) + expect(Typhoeus::Expectation.all).to be(expectations) + end + end + end + + describe ".clear" do + let(:expectations) { double(:clear) } + + it "clears all" do + expect(expectations).to receive(:clear) + Typhoeus::Expectation.instance_variable_set(:@expectations, expectations) + Typhoeus::Expectation.clear + Typhoeus::Expectation.instance_variable_set(:@expectations, nil) + end + end + + describe ".response_for" do + let(:request) { Typhoeus::Request.new("") } + let(:stubbed_response) { Typhoeus::Response.new } + + it "finds a matching expectation and returns its next response" do + Typhoeus::Expectation.all << expectation + expect(expectation).to receive(:matches?).with(request).and_return(true) + expect(expectation).to receive(:response).with(request).and_return(stubbed_response) + + response = Typhoeus::Expectation.response_for(request) + + expect(response).to be(stubbed_response) + end + + it "returns nil if no matching expectation is found" do + response = Typhoeus::Expectation.response_for(request) + expect(response).to be(nil) + end + end + + describe "#stubbed_from" do + it "sets value" do + expectation.stubbed_from(:webmock) + expect(expectation.from).to eq(:webmock) + end + + it "returns self" do + expect(expectation.stubbed_from(:webmock)).to be(expectation) + end + end + + describe "#and_return" do + context "when value" do + it "adds to responses" do + expectation.and_return(1) + expect(expectation.responses).to eq([1]) + end + end + + context "when array" do + it "adds to responses" do + expectation.and_return([1, 2]) + expect(expectation.responses).to eq([1, 2]) + end + end + + context "when block" do + it "adds to responses" do + block = Proc.new {} + expectation.and_return(&block) + expect(expectation.responses).to eq([block]) + end + end + end + + describe "#responses" do + it "returns responses" do + expect(expectation.responses).to be_a(Array) + end + end + + describe "#response" do + let(:request) { Typhoeus::Request.new("") } + + before { expectation.instance_variable_set(:@responses, responses) } + + context "when one response" do + context "is pre-constructed" do + let(:responses) { [Typhoeus::Response.new] } + + it "returns response" do + expect(expectation.response(request)).to be(responses[0]) + end + end + + context "is lazily-constructed" do + def construct_response(request) + @request_from_response_construction = request + lazily_constructed_response + end + + let(:lazily_constructed_response) { Typhoeus::Response.new } + let(:responses) { [ Proc.new { |request| construct_response(request) } ] } + + it "returns response" do + expect(expectation.response(request)).to be(lazily_constructed_response) + expect(@request_from_response_construction).to be(request) + end + end + end + + context "when multiple responses" do + let(:responses) { [Typhoeus::Response.new, Typhoeus::Response.new, Typhoeus::Response.new] } + + it "returns one by one" do + 3.times do |i| + expect(expectation.response(request)).to be(responses[i]) + end + end + end + end + + describe "#matches?" do + let(:request) { double(:base_url => nil) } + + it "calls url_match?" do + expect(expectation).to receive(:url_match?) + expectation.matches?(request) + end + + it "calls options_match?" do + expect(expectation).to receive(:url_match?).and_return(true) + expect(expectation).to receive(:options_match?) + expectation.matches?(request) + end + end + + describe "#url_match?" do + let(:request_url) { "www.example.com" } + let(:request) { Typhoeus::Request.new(request_url) } + let(:url_match) { expectation.method(:url_match?).call(request.base_url) } + + context "when string" do + context "when match" do + it "returns true" do + expect(url_match).to be_truthy + end + end + + context "when no match" do + let(:base_url) { "no_match" } + + it "returns false" do + expect(url_match).to be_falsey + end + end + end + + context "when regexp" do + context "when match" do + let(:base_url) { /example/ } + + it "returns true" do + expect(url_match).to be_truthy + end + end + + context "when no match" do + let(:base_url) { /nomatch/ } + + it "returns false" do + expect(url_match).to be_falsey + end + + context "with nil request_url" do + let(:request_url) { nil } + + it "returns false" do + expect(url_match).to be_falsey + end + end + end + end + + context "when nil" do + let(:base_url) { nil } + + it "returns true" do + expect(url_match).to be_truthy + end + end + + context "when not string, regexp, nil" do + let(:base_url) { 1 } + + it "returns false" do + expect(url_match).to be_falsey + end + end + end + + describe "options_match?" do + let(:request_options) { {} } + let(:request) { Typhoeus::Request.new(nil, request_options) } + let(:options_match) { expectation.method(:options_match?).call(request) } + + context "when match" do + let(:options) { { :a => 1 } } + let(:request_options) { options } + + it "returns true" do + expect(options_match).to be_truthy + end + end + + context "when options are a subset from request_options" do + let(:options) { { :a => 1 } } + let(:request_options) { { :a => 1, :b => 2 } } + + it "returns true" do + expect(options_match).to be_truthy + end + end + + context "when options are nested" do + let(:options) { { :a => { :b => 1 } } } + let(:request_options) { options } + + it "returns true" do + expect(options_match).to be_truthy + end + end + + context "when options contains an array" do + let(:options) { { :a => [1, 2] } } + let(:request_options) { options } + + it "returns true" do + expect(options_match).to be_truthy + end + end + + context "when no match" do + let(:options) { { :a => 1 } } + + it "returns false" do + expect(options_match).to be_falsey + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/addable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/addable_spec.rb new file mode 100644 index 0000000..4cb8e1a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/addable_spec.rb @@ -0,0 +1,22 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Addable do + let(:hydra) { Typhoeus::Hydra.new() } + let(:request) { Typhoeus::Request.new("localhost:3001", {:method => :get}) } + + it "asks easy factory for an easy" do + multi = double + expect(Typhoeus::EasyFactory).to receive(:new).with(request, hydra).and_return(double(:get => 1)) + expect(hydra).to receive(:multi).and_return(multi) + expect(multi).to receive(:add).with(1) + hydra.add(request) + end + + it "adds easy to multi" do + multi = double + expect(Typhoeus::EasyFactory).to receive(:new).with(request, hydra).and_return(double(:get => 1)) + expect(hydra).to receive(:multi).and_return(multi) + expect(multi).to receive(:add).with(1) + hydra.add(request) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/before_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/before_spec.rb new file mode 100644 index 0000000..e17e043 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/before_spec.rb @@ -0,0 +1,98 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Before do + let(:request) { Typhoeus::Request.new("") } + let(:hydra) { Typhoeus::Hydra.new } + let(:receive_counter) { double :mark => :twain } + + describe "#add" do + context "when before" do + context "when one" do + it "executes" do + Typhoeus.before { |r| receive_counter.mark } + expect(receive_counter).to receive(:mark) + hydra.add(request) + end + + context "when true" do + it "calls super" do + Typhoeus.before { true } + expect(Typhoeus::Expectation).to receive(:response_for) + hydra.add(request) + end + end + + context "when falsy" do + context "when queue requests" do + let(:queued_request) { Typhoeus::Request.new("") } + + before { hydra.queue(queued_request) } + + it "dequeues" do + Typhoeus.before { false } + hydra.add(request) + expect(hydra.queued_requests).to be_empty + end + end + + context "when false" do + it "doesn't call super" do + Typhoeus.before { false } + expect(Typhoeus::Expectation).to receive(:response_for).never + hydra.add(request) + end + end + + context "when response" do + it "doesn't call super" do + Typhoeus.before { Typhoeus::Response.new } + expect(Typhoeus::Expectation).to receive(:response_for).never + hydra.add(request) + end + end + end + end + + context "when multi" do + context "when all true" do + before { 3.times { Typhoeus.before { |r| receive_counter.mark } } } + + it "calls super" do + expect(Typhoeus::Expectation).to receive(:response_for) + hydra.add(request) + end + + it "executes all" do + expect(receive_counter).to receive(:mark).exactly(3).times + hydra.add(request) + end + end + + context "when middle false" do + before do + Typhoeus.before { |r| receive_counter.mark } + Typhoeus.before { |r| receive_counter.mark; nil } + Typhoeus.before { |r| receive_counter.mark } + end + + it "doesn't call super" do + expect(Typhoeus::Expectation).to receive(:response_for).never + hydra.add(request) + end + + it "executes only two" do + expect(receive_counter).to receive(:mark).exactly(2).times + hydra.add(request) + end + end + end + end + + context "when no before" do + it "calls super" do + expect(Typhoeus::Expectation).to receive(:response_for) + hydra.add(request) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/block_connection_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/block_connection_spec.rb new file mode 100644 index 0000000..df3e945 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/block_connection_spec.rb @@ -0,0 +1,18 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::BlockConnection do + let(:base_url) { "localhost:3001" } + let(:hydra) { Typhoeus::Hydra.new() } + let(:request) { Typhoeus::Request.new(base_url, {:method => :get}) } + + describe "add" do + context "when block_connection activated" do + before { Typhoeus::Config.block_connection = true } + after { Typhoeus::Config.block_connection = false } + + it "raises" do + expect{ hydra.add(request) }.to raise_error(Typhoeus::Errors::NoStub) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/cacheable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/cacheable_spec.rb new file mode 100644 index 0000000..6e2f0ab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/cacheable_spec.rb @@ -0,0 +1,88 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Cacheable do + let(:base_url) { "localhost:3001" } + let(:hydra) { Typhoeus::Hydra.new() } + let(:request) { Typhoeus::Request.new(base_url, {:method => :get}) } + let(:response) { Typhoeus::Response.new } + let(:cache) { MemoryCache.new } + + describe "add" do + context "when cache activated" do + before { Typhoeus::Config.cache = cache } + after { Typhoeus::Config.cache = false } + + context "when request new" do + it "sets no response" do + hydra.add(request) + expect(request.response).to be_nil + end + + it "doesn't call complete" do + expect(request).to receive(:complete).never + hydra.add(request) + end + end + + context "when request in memory" do + before { cache.memory[request] = response } + + it "returns response with cached status" do + hydra.add(request) + expect(response.cached?).to be_truthy + end + + context "when no queued requests" do + it "finishes request" do + expect(request).to receive(:finish).with(response) + hydra.add(request) + expect(response.cached?).to be_truthy + end + end + + context "when queued requests" do + let(:queued_request) { Typhoeus::Request.new(base_url, {:method => :get}) } + + before { cache.memory[queued_request] = response } + + it "finishes both requests" do + hydra.queue(queued_request) + expect(request).to receive(:finish).with(response) + expect(queued_request).to receive(:finish).with(response) + hydra.add(request) + end + end + end + + context "when cache is specified on a request" do + before { Typhoeus::Config.cache = false } + + context "when cache is false" do + let(:non_cached_request) { Typhoeus::Request.new(base_url, {:method => :get, :cache => false}) } + + it "initiates an HTTP call" do + expect(Typhoeus::EasyFactory).to receive(:new).with(non_cached_request, hydra).and_call_original + + hydra.add(non_cached_request) + end + end + + context "when cache is defined" do + let(:cached_request) { Typhoeus::Request.new(base_url, {:method => :get, :cache => cache}) } + + before { cache.memory[cached_request] = response } + + it "uses the cache instead of making a new request" do + expect(Typhoeus::EasyFactory).not_to receive(:new) + + hydra.add(cached_request) + + expect(cached_request.response).to be_cached + expect(cached_request.response).to eq(response) + end + end + end + + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/memoizable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/memoizable_spec.rb new file mode 100644 index 0000000..c04f299 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/memoizable_spec.rb @@ -0,0 +1,53 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Memoizable do + let(:base_url) { "localhost:3001" } + let(:hydra) { Typhoeus::Hydra.new() } + let(:request) { Typhoeus::Request.new(base_url) } + + describe "add" do + context "when memoization activated" do + before { Typhoeus::Config.memoize = true } + + context "when request new" do + it "sets no response" do + hydra.add(request) + expect(request.response).to be_nil + end + + it "doesn't call complete" do + expect(request).to receive(:complete).never + hydra.add(request) + end + end + + context "when request in memory" do + let(:response) { Typhoeus::Response.new } + before { hydra.memory[request] = response } + + it "finishes request" do + expect(request).to receive(:finish).with(response, true) + hydra.add(request) + end + + context "when queued request" do + let(:queued_request) { Typhoeus::Request.new(base_url) } + + it "dequeues" do + hydra.queue(queued_request) + expect(request).to receive(:finish).with(response, true) + expect(queued_request).to receive(:finish).with(response, true) + hydra.add(request) + end + end + end + end + end + + describe "#run" do + it "clears memory before starting" do + expect(hydra.memory).to receive(:clear) + hydra.run + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/queueable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/queueable_spec.rb new file mode 100644 index 0000000..d9fc87a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/queueable_spec.rb @@ -0,0 +1,98 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Queueable do + let(:base_url) { "localhost:3001" } + let(:options) { {} } + let(:hydra) { Typhoeus::Hydra.new(options) } + + describe "#queue" do + let(:request) { Typhoeus::Request.new("") } + + it "accepts requests" do + hydra.queue(request) + end + + it "sets hydra on request" do + hydra.queue(request) + expect(request.hydra).to eq(hydra) + end + + it "adds to queued requests" do + hydra.queue(request) + expect(hydra.queued_requests).to include(request) + end + + it "adds to front of queued requests" do + hydra.queue_front(request) + expect(hydra.queued_requests.first).to be(request) + end + end + + describe "#abort" do + before { hydra.queued_requests << 1 } + + it "clears queue" do + hydra.abort + expect(hydra.queued_requests).to be_empty + end + end + + describe "#dequeue_many" do + before do + requests.each { |r| hydra.queue r } + end + + context "when no request queued" do + let(:requests) { [] } + + it "does nothing" do + expect(hydra).to_not receive(:add) + hydra.dequeue_many + end + end + + context "when request queued" do + let(:first) { Typhoeus::Request.new("localhost:3001/first") } + let(:requests) { [first] } + + it "adds request from queue to multi" do + expect(hydra).to receive(:add).with(first) + hydra.dequeue_many + end + end + + context "when three request queued" do + let(:first) { Typhoeus::Request.new("localhost:3001/first") } + let(:second) { Typhoeus::Request.new("localhost:3001/second") } + let(:third) { Typhoeus::Request.new("localhost:3001/third") } + let(:requests) { [first, second, third] } + + it "adds requests from queue to multi" do + expect(hydra).to receive(:add).with(first) + expect(hydra).to receive(:add).with(second) + expect(hydra).to receive(:add).with(third) + hydra.dequeue_many + end + + context "when max_concurrency is two" do + let(:options) { {:max_concurrency => 2} } + it "adds requests from queue to multi" do + expect(hydra).to receive(:add).with(first) + expect(hydra).to receive(:add).with(second) + expect(hydra).to_not receive(:add).with(third) + hydra.dequeue_many + end + end + + context "when max_concurrency is a string" do + let(:options) { {:max_concurrency => "2"} } + it "adds requests from queue to multi" do + expect(hydra).to receive(:add).with(first) + expect(hydra).to receive(:add).with(second) + expect(hydra).to_not receive(:add).with(third) + hydra.dequeue_many + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/runnable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/runnable_spec.rb new file mode 100644 index 0000000..1e0237e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/runnable_spec.rb @@ -0,0 +1,137 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Runnable do + let(:base_url) { "localhost:3001" } + let(:options) { {} } + let(:hydra) { Typhoeus::Hydra.new(options) } + let(:receive_counter) { double :mark => :twain } + + describe "#run" do + let(:requests) { [] } + + before do + requests.each { |r| hydra.queue r } + end + + it "runs multi#dequeue_many" do + expect(hydra).to receive(:dequeue_many) + hydra.run + end + + it "runs multi#perform" do + expect(hydra.multi).to receive(:perform) + hydra.run + end + + context "when request queued" do + let(:first) { Typhoeus::Request.new("localhost:3001/first") } + let(:requests) { [first] } + + it "sends" do + hydra.run + expect(first.response).to be + end + end + + context "when three request queued" do + let(:first) { Typhoeus::Request.new("localhost:3001/first") } + let(:second) { Typhoeus::Request.new("localhost:3001/second") } + let(:third) { Typhoeus::Request.new("localhost:3001/third") } + let(:requests) { [first, second, third] } + + it "sends first" do + hydra.run + expect(first.response).to be + end + + it "sends second" do + hydra.run + expect(second.response).to be + end + + it "sends third" do + hydra.run + expect(third.response).to be + end + + it "sends first first" do + first.on_complete do + expect(second.response).to be_nil + expect(third.response).to be_nil + end + end + + it "sends second second" do + first.on_complete do + expect(first.response).to be + expect(third.response).to be_nil + end + end + + it "sends thirds last" do + first.on_complete do + expect(second.response).to be + expect(third.response).to be + end + end + end + + context "when really queued request" do + let(:options) { {:max_concurrency => 1} } + let(:first) { Typhoeus::Request.new("localhost:3001/first") } + let(:second) { Typhoeus::Request.new("localhost:3001/second") } + let(:third) { Typhoeus::Request.new("localhost:3001/third") } + let(:requests) { [first, second, third] } + + it "sends first" do + hydra.run + expect(first.response).to be + end + + it "sends second" do + hydra.run + expect(second.response).to be + end + + it "sends third" do + hydra.run + expect(third.response).to be + end + end + + context "when request queued in callback" do + let(:first) do + Typhoeus::Request.new("localhost:3001/first").tap do |r| + r.on_complete{ hydra.queue(second) } + end + end + let(:second) { Typhoeus::Request.new("localhost:3001/second") } + let(:requests) { [first] } + + before { Typhoeus.on_complete { |r| receive_counter.mark } } + after { Typhoeus.on_complete.clear; Typhoeus.before.clear } + + context "when real request" do + context "when max_concurrency default" do + let(:options) { {} } + + it "calls on_complete callback once for every response" do + expect(receive_counter).to receive(:mark).exactly(2).times + hydra.run + end + end + end + + context "when no real request" do + context "when before hook returns and finishes response" do + before { Typhoeus.before{ |request| request.finish(Typhoeus::Response.new) } } + + it "simulates real multi run and adds and finishes both requests" do + expect(receive_counter).to receive(:mark).exactly(2).times + hydra.run + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/stubbable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/stubbable_spec.rb new file mode 100644 index 0000000..6b64cc0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra/stubbable_spec.rb @@ -0,0 +1,48 @@ +require 'spec_helper' + +describe Typhoeus::Hydra::Stubbable do + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url) } + let(:response) { Typhoeus::Response.new } + let(:hydra) { Typhoeus::Hydra.new } + + before { Typhoeus.stub(base_url).and_return(response) } + + describe "#add" do + it "checks expectations" do + hydra.add(request) + end + + context "when expectation found" do + it "calls on_headers callbacks" do + canary = :not_called + request.on_headers do + canary = :called + end + hydra.add(request) + hydra.run + expect(canary).to eq(:called) + end + + it "calls on_body callbacks" do + canary = :not_called + request.on_body do + canary = :called + end + hydra.add(request) + hydra.run + expect(canary).to eq(:called) + end + + it "finishes response" do + expect(request).to receive(:finish) + hydra.add(request) + end + + it "is a mock" do + hydra.add(request) + expect(request.response.mock).to be(true) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra_spec.rb new file mode 100644 index 0000000..a8a35aa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/hydra_spec.rb @@ -0,0 +1,22 @@ +require File.expand_path(File.dirname(__FILE__) + '/../spec_helper') + +describe Typhoeus::Hydra do + let(:base_url) { "localhost:3001" } + let(:options) { {} } + let(:hydra) { Typhoeus::Hydra.new(options) } + + describe "#new" do + let(:options) { {:pipeling => true} } + + it "passes options to multi" do + expect(Ethon::Multi).to receive(:new).with(options) + hydra + end + end + + describe "#hydra" do + it "returns a hydra" do + expect(Typhoeus::Hydra.hydra).to be_a(Typhoeus::Hydra) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/pool_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/pool_spec.rb new file mode 100644 index 0000000..b04f433 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/pool_spec.rb @@ -0,0 +1,137 @@ +require 'spec_helper' + +describe Typhoeus::Pool do + let(:easy) { Ethon::Easy.new } + after { Typhoeus::Pool.clear } + + describe "#easies" do + it "returns array" do + expect(Typhoeus::Pool.send(:easies)).to be_a(Array) + end + end + + describe "#release" do + it "resets easy" do + expect(easy).to receive(:reset) + Typhoeus::Pool.release(easy) + end + + it "flush cookies to disk" do + expect(easy).to receive(:cookielist=).with('flush') + expect(easy).to receive(:reset) + expect(easy).to receive(:cookielist=).with('all') + Typhoeus::Pool.release(easy) + end + + it "writes cookies to disk" do + tempfile1 = Tempfile.new('cookies') + tempfile2 = Tempfile.new('cookies') + + easy.cookiejar = tempfile1.path + easy.url = "localhost:3001/cookies-test" + easy.perform + + Typhoeus::Pool.release(easy) + + expect(File.zero?(tempfile1.path)).to be(false) + expect(File.read(tempfile1.path)).to match(/\s+foo\s+bar$/) + expect(File.read(tempfile1.path)).to match(/\s+bar\s+foo$/) + + # do it again - and check if tempfile1 wasn't change + easy.cookiejar = tempfile2.path + easy.url = "localhost:3001/cookies-test2" + easy.perform + + Typhoeus::Pool.release(easy) + + # tempfile 1 + expect(File.zero?(tempfile1.path)).to be(false) + expect(File.read(tempfile1.path)).to match(/\s+foo\s+bar$/) + expect(File.read(tempfile1.path)).to match(/\s+bar\s+foo$/) + + # tempfile2 + expect(File.zero?(tempfile2.path)).to be(false) + expect(File.read(tempfile2.path)).to match(/\s+foo2\s+bar$/) + expect(File.read(tempfile2.path)).to match(/\s+bar2\s+foo$/) + end + + it "puts easy back into pool" do + Typhoeus::Pool.release(easy) + expect(Typhoeus::Pool.send(:easies)).to include(easy) + end + + context "when threaded access" do + it "releases correct number of easies" do + (0..9).map do |n| + Thread.new do + Typhoeus::Pool.release(Ethon::Easy.new) + end + end.map(&:join) + expect(Typhoeus::Pool.send(:easies).size).to eq(10) + end + end + end + + describe "#get" do + context "when easy in pool" do + before { Typhoeus::Pool.send(:easies) << easy } + + it "takes" do + expect(Typhoeus::Pool.get).to eq(easy) + end + end + + context "when no easy in pool" do + it "creates" do + expect(Typhoeus::Pool.get).to be_a(Ethon::Easy) + end + + context "when threaded access" do + it "creates correct number of easies" do + queue = Queue.new + (0..9).map do |n| + Thread.new do + queue.enq(Typhoeus::Pool.get) + end + end.map(&:join) + + array = Array.new(queue.size) { queue.pop } + expect(array.uniq.size).to eq(10) + end + end + end + + context "when forked" do + before do + allow(Process).to receive(:pid).and_return(1) + Typhoeus::Pool.send(:easies) << easy + allow(Process).to receive(:pid).and_return(2) + end + + after do + allow(Process).to receive(:pid).and_call_original + Typhoeus::Pool.instance_variable_set(:@pid, Process.pid) + end + + it "creates" do + expect(Typhoeus::Pool.get).to_not eq(easy) + end + end + end + + describe "#with" do + it "is re-entrant" do + array = [] + Typhoeus::Pool.with_easy do |e1| + array << e1 + Typhoeus::Pool.with_easy do |e2| + array << e2 + Typhoeus::Pool.with_easy do |e3| + array << e3 + end + end + end + expect(array.uniq.size).to eq(3) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/actions_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/actions_spec.rb new file mode 100644 index 0000000..03493e1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/actions_spec.rb @@ -0,0 +1,19 @@ +require 'spec_helper' + +describe Typhoeus::Request::Actions do + [:get, :post, :put, :delete, :head, :patch, :options].each do |name| + describe ".#{name}" do + let(:response) { Typhoeus::Request.method(name).call("http://localhost:3001") } + + it "returns ok" do + expect(response.return_code).to eq(:ok) + end + + unless name == :head + it "makes #{name.to_s.upcase} Request" do + expect(response.response_body).to include("\"REQUEST_METHOD\":\"#{name.to_s.upcase}\"") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/before_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/before_spec.rb new file mode 100644 index 0000000..ed39b0c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/before_spec.rb @@ -0,0 +1,93 @@ +require 'spec_helper' + +describe Typhoeus::Request::Before do + let(:request) { Typhoeus::Request.new("") } + let(:receive_counter) { double :mark => :twain } + + describe "#queue" do + context "when before" do + context "when one" do + it "executes" do + Typhoeus.before { |r| receive_counter.mark } + expect(receive_counter).to receive(:mark) + request.run + end + + context "when true" do + it "calls super" do + Typhoeus.before { true } + expect(Typhoeus::Expectation).to receive(:response_for) + request.run + end + end + + context "when false" do + it "doesn't call super" do + Typhoeus.before { false } + expect(Typhoeus::Expectation).to receive(:response_for).never + request.run + end + + it "returns response" do + Typhoeus.before { |r| r.response = 1; false } + expect(request.run).to be(1) + end + end + + context "when a response" do + it "doesn't call super" do + Typhoeus.before { Typhoeus::Response.new } + expect(Typhoeus::Expectation).to receive(:response_for).never + request.run + end + + it "returns response" do + Typhoeus.before { |r| r.response = Typhoeus::Response.new } + expect(request.run).to be_a(Typhoeus::Response) + end + end + end + + context "when multi" do + context "when all true" do + before { 3.times { Typhoeus.before { |r| receive_counter.mark } } } + + it "calls super" do + expect(Typhoeus::Expectation).to receive(:response_for) + request.run + end + + it "executes all" do + expect(receive_counter).to receive(:mark).exactly(3) + request.run + end + end + + context "when middle false" do + before do + Typhoeus.before { |r| receive_counter.mark } + Typhoeus.before { |r| receive_counter.mark; nil } + Typhoeus.before { |r| receive_counter.mark } + end + + it "doesn't call super" do + expect(Typhoeus::Expectation).to receive(:response_for).never + request.run + end + + it "executes only two" do + expect(receive_counter).to receive(:mark).exactly(2).times + request.run + end + end + end + end + + context "when no before" do + it "calls super" do + expect(Typhoeus::Expectation).to receive(:response_for) + request.run + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/block_connection_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/block_connection_spec.rb new file mode 100644 index 0000000..2ddacdc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/block_connection_spec.rb @@ -0,0 +1,75 @@ +require 'spec_helper' + +describe Typhoeus::Request::BlockConnection do + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url, {:method => :get}) } + + describe "run" do + context "when blocked" do + before { request.block_connection = true } + + it "raises" do + expect{ request.run }.to raise_error(Typhoeus::Errors::NoStub) + end + end + + context "when not blocked" do + before { request.block_connection = false } + + it "doesn't raise" do + expect{ request.run }.to_not raise_error + end + end + end + + describe "#blocked?" do + context "when local block_connection" do + context "when true" do + before { request.block_connection = true } + + it "returns true" do + expect(request.blocked?).to be_truthy + end + end + + context "when false" do + before { request.block_connection = false } + + it "returns false" do + expect(request.blocked?).to be_falsey + end + end + end + + context "when global block_connection" do + context "when true" do + before { Typhoeus::Config.block_connection = true } + after { Typhoeus::Config.block_connection = false } + + it "returns true" do + expect(request.blocked?).to be_truthy + end + end + + context "when false" do + before { Typhoeus::Config.block_connection = false } + + it "returns false" do + expect(request.blocked?).to be_falsey + end + end + end + + context "when global and local block_connection" do + before do + Typhoeus::Config.block_connection = true + request.block_connection = false + end + after { Typhoeus::Config.block_connection = false } + + it "takes local" do + expect(request.blocked?).to be_falsey + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/cacheable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/cacheable_spec.rb new file mode 100644 index 0000000..bffa3e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/cacheable_spec.rb @@ -0,0 +1,94 @@ +require 'spec_helper' + +describe Typhoeus::Request::Cacheable do + let(:cache) { MemoryCache.new } + let(:options) { {} } + let(:request) { Typhoeus::Request.new("http://localhost:3001", options) } + let(:response) { Typhoeus::Response.new } + + before { Typhoeus::Config.cache = cache } + after { Typhoeus::Config.cache = false } + + describe "#response=" do + context "when cache activated" do + context "when request new" do + it "caches response" do + request.response = response + expect(cache.memory[request]).to be + end + + it "doesn't set cached on response" do + request.response = response + expect(request.response.cached?).to be_falsey + end + end + + context "when request in memory" do + before { cache.memory[request] = response } + + it "finishes request" do + expect(request).to receive(:finish).with(response) + request.run + end + + it "sets cached to true for response" do + request.run + expect(request.response.cached?).to be_truthy + end + end + end + end + + describe "#run" do + context "when cache activated" do + context "when request new" do + it "fetches response" do + expect(request.response).to_not be(response) + end + end + + context "when request in memory" do + before { cache.memory[request] = response } + + it "finishes request" do + expect(request).to receive(:finish).with(response) + request.run + end + end + + context "when cache is specified on a request" do + before { Typhoeus::Config.cache = false } + + context "when cache is false" do + let(:options) { { :cache => false } } + + it "finishes request" do + expect(request.response).to_not be(response) + request.run + end + end + + context "when cache is defined" do + let(:options) { { :cache => cache } } + + before { cache.memory[request] = response } + + it "finishes request" do + expect(request).to receive(:finish).with(response) + request.run + end + end + end + end + end + + describe "#cache_ttl" do + context "when option[:cache_ttl]" do + let(:options) { {:cache_ttl => 1} } + + it "returns" do + expect(request.cache_ttl).to be(1) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/callbacks_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/callbacks_spec.rb new file mode 100644 index 0000000..9b30e74 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/callbacks_spec.rb @@ -0,0 +1,91 @@ +require 'spec_helper' + +describe Typhoeus::Request::Callbacks do + let(:request) { Typhoeus::Request.new("fubar") } + + [:on_complete, :on_success, :on_failure, :on_progress].each do |callback| + describe "##{callback}" do + it "responds" do + expect(request).to respond_to(callback) + end + + context "when no block given" do + it "returns @#{callback}" do + expect(request.method(callback).call).to eq([]) + end + end + + context "when block given" do + it "stores" do + request.method(callback).call { p 1 } + expect(request.instance_variable_get("@#{callback}").size).to eq(1) + end + end + + context "when multiple blocks given" do + it "stores" do + request.method(callback).call { p 1 } + request.method(callback).call { p 2 } + expect(request.instance_variable_get("@#{callback}").size).to eq(2) + end + end + end + end + + describe "#execute_callbacks" do + [:on_complete, :on_success, :on_failure, :on_progress].each do |callback| + context "when #{callback}" do + context "when local callback" do + before do + code = if callback == :on_failure + 500 + else + 200 + end + request.response = Typhoeus::Response.new(:mock => true, :response_code => code) + request.method(callback).call {|r| expect(r).to be_a(Typhoeus::Response) } + end + + it "executes blocks and passes response" do + request.execute_callbacks + end + + it "sets handled_response" do + request.method(callback).call { 1 } + request.execute_callbacks + expect(request.response.handled_response).to be(1) + end + end + + context "when global callback" do + before do + request.response = Typhoeus::Response.new + Typhoeus.method(callback).call {|r| expect(r).to be_a(Typhoeus::Response) } + end + + it "executes blocks and passes response" do + request.execute_callbacks + end + end + + context "when global and local callbacks" do + before do + request.response = Typhoeus::Response.new + Typhoeus.method(callback).call {|r| r.instance_variable_set(:@fu, 1) } + request.method(callback).call {|r| expect(r.instance_variable_get(:@fu)).to eq(1) } + end + + it "runs global first" do + request.execute_callbacks + end + end + end + end + + context "when local on_complete and gobal on_success" do + it "runs all global callbacks first" do + skip + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/marshal_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/marshal_spec.rb new file mode 100644 index 0000000..6ab922a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/marshal_spec.rb @@ -0,0 +1,60 @@ +require 'spec_helper' + +describe Typhoeus::Request::Marshal do + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url) } + + describe "#marshal_dump" do + %w(on_complete on_success on_failure on_progress).each do |name| + context "when #{name} handler" do + before { request.instance_variable_set("@#{name}", Proc.new{}) } + + it "doesn't include @#{name}" do + expect(request.send(:marshal_dump).map(&:first)).to_not include("@#{name}") + end + + it "doesn't raise when dumped" do + expect { Marshal.dump(request) }.to_not raise_error + end + + context "when loading" do + let(:loaded) { Marshal.load(Marshal.dump(request)) } + + it "includes base_url" do + expect(loaded.base_url).to eq(request.base_url) + end + + it "doesn't include #{name}" do + expect(loaded.instance_variables).to_not include("@#{name}") + end + end + end + end + + context 'when run through hydra' do + let(:options) { {} } + let(:hydra) { Typhoeus::Hydra.new(options) } + + before(:each) do + hydra.queue(request) + hydra.run + end + + it "doesn't include @hydra" do + expect(request.send(:marshal_dump).map(&:first)).to_not include("@hydra") + end + + context 'when loading' do + let(:loaded) { Marshal.load(Marshal.dump(request)) } + + it "includes base_url" do + expect(loaded.base_url).to eq(request.base_url) + end + + it "doesn't include #{name}" do + expect(loaded.instance_variables).to_not include("@hydra") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/memoizable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/memoizable_spec.rb new file mode 100644 index 0000000..1bb7ebc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/memoizable_spec.rb @@ -0,0 +1,34 @@ +require 'spec_helper' + +describe Typhoeus::Request::Memoizable do + let(:options) { {} } + let(:request) { Typhoeus::Request.new("fu", options) } + let(:response) { Typhoeus::Response.new } + let(:hydra) { Typhoeus::Hydra.new } + + describe "#response=" do + context "when memoization activated" do + before { Typhoeus::Config.memoize = true } + after { Typhoeus::Config.memoize = false } + + context "when GET request" do + let(:options) { {:method => :get} } + before { request.hydra = hydra } + + it "stores response in memory" do + request.response = response + expect(hydra.memory[request]).to be + end + end + + context "when no GET request" do + let(:options) { {:method => :post} } + + it "doesn't store response in memory" do + request.response = response + expect(hydra.memory[request]).to be_nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/operations_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/operations_spec.rb new file mode 100644 index 0000000..79f6be0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/operations_spec.rb @@ -0,0 +1,101 @@ +require 'spec_helper' + +describe Typhoeus::Request::Operations do + let(:base_url) { "localhost:3001" } + let(:options) { {} } + let(:request) { Typhoeus::Request.new(base_url, options) } + + describe "#run" do + let(:easy) { Ethon::Easy.new } + before { expect(Typhoeus::Pool).to receive(:get).and_return(easy) } + + it "grabs an easy" do + request.run + end + + it "generates settings" do + expect(easy).to receive(:http_request) + request.run + end + + it "performs" do + expect(easy).to receive(:perform) + request.run + end + + it "sets response" do + request.run + expect(request.response).to be + end + + it "releases easy" do + expect(Typhoeus::Pool).to receive(:release) + request.run + end + + it "calls on_body" do + on_body_called = false + request.on_body { |body, response| on_body_called = true } + request.run + expect(on_body_called).to be_truthy + expect(request.response.body).to satisfy { |v| v.nil? || v == '' } + end + + it "makes response headers available to on_body" do + headers = nil + request.on_body { |body, response| headers = response.headers } + request.run + expect(headers).to be + expect(headers).to eq(request.response.headers) + end + + it "calls on_headers and on_body" do + headers = nil + request.on_headers { |response| headers = response.headers } + request.on_body { |body, response| expect(headers).not_to be_nil ; expect(response.headers).to eq(headers) } + request.on_complete { |response| expect(response).not_to be_nil ; expect(response.headers).to eq(headers) ; expect(response.body).to be_empty } + request.run + end + + it "calls on_headers and on_complete" do + headers = nil + request.on_headers { |response| headers = response.headers } + request.on_complete { |response| expect(response).not_to be_nil ; expect(response.headers).to eq(headers) ; expect(response.body).not_to be_empty } + request.run + end + + it "calls on_complete" do + callback = double(:call) + expect(callback).to receive(:call) + request.instance_variable_set(:@on_complete, [callback]) + request.run + end + + it "returns a response" do + expect(request.run).to be_a(Typhoeus::Response) + end + end + + describe "#finish" do + let(:response) { Typhoeus::Response.new } + + it "assigns response" do + request.finish(response) + expect(request.response).to be(response) + end + + it "assigns request to response" do + request.finish(response) + expect(request.response.request).to be(request) + end + + it "executes callbacks" do + expect(request).to receive(:execute_callbacks) + request.finish(response) + end + + it "returns response" do + expect(request.finish(response)).to be(response) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/responseable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/responseable_spec.rb new file mode 100644 index 0000000..01087bc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/responseable_spec.rb @@ -0,0 +1,13 @@ +require 'spec_helper' + +describe Typhoeus::Request::Responseable do + let(:request) { Typhoeus::Request.new("base_url", {}) } + let(:response) { Typhoeus::Response.new } + + describe "#response=" do + it "stores response" do + request.response = response + expect(request.response).to eq(response) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/stubbable_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/stubbable_spec.rb new file mode 100644 index 0000000..4651d5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request/stubbable_spec.rb @@ -0,0 +1,45 @@ +require 'spec_helper' + +describe Typhoeus::Request::Stubbable do + let(:base_url) { "localhost:3001" } + let(:request) { Typhoeus::Request.new(base_url) } + let(:response) { Typhoeus::Response.new } + + before { Typhoeus.stub(base_url).and_return(response) } + + describe "#run" do + it "checks expectations" do + request.run + end + + context "when expectation found" do + it "calls on_headers callbacks" do + canary = :not_called + request.on_headers do + canary = :called + end + request.run + expect(canary).to eq(:called) + end + + it "calls on_body callbacks" do + canary = :not_called + request.on_body do + canary = :called + end + request.run + expect(canary).to eq(:called) + end + + it "finishes request" do + expect(request).to receive(:finish) + request.run + end + + it "sets mock on response" do + request.run + expect(request.response.mock).to be(true) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request_spec.rb new file mode 100644 index 0000000..cabcb4c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/request_spec.rb @@ -0,0 +1,232 @@ +require 'spec_helper' + +describe Typhoeus::Request do + let(:base_url) { "localhost:3001" } + let(:options) { {:verbose => true, :headers => { 'User-Agent' => "Fubar", 'Expect' => "" }, :maxredirs => 50} } + let(:request) { Typhoeus::Request.new(base_url, options) } + + describe ".url" do + context "when no parameters" do + it "returns base_url" do + expect(request.url).to eq(request.base_url) + end + end + + context "when parameters" do + let(:options) { {:params => {:a => 1}} } + + it "returns full url" do + expect(request.url).to eq("#{request.base_url}?a=1") + end + end + + it "pushes an easy back into the pool" do + easy = double.as_null_object + allow(Typhoeus::Pool).to receive(:get).and_return(easy) + expect(Typhoeus::Pool).to receive(:release).with(easy) + request.url + end + end + + describe ".new" do + it "stores base_url" do + expect(request.base_url).to eq(base_url) + end + + it "stores options" do + expect(request.options).to eq(options) + end + + it "stores original options" do + expect(request.original_options).to eq(options) + expect(request.original_options).to_not be(request.options) + end + + it "sets defaults" do + expect(request.options[:headers]['User-Agent']).to be + end + end + + describe "set_defaults" do + context "when header with user agent" do + let(:options) { {:headers => {'User-Agent' => "Custom"} } } + + it "doesn't modify user agent" do + expect(request.options[:headers]['User-Agent']).to eq("Custom") + end + end + + context "when header without user agent" do + let(:options) { {:headers => {} } } + + it "add user agent" do + agent = request.options[:headers]['User-Agent'] + expect(agent).to eq(Typhoeus::USER_AGENT) + end + end + + context "when Config.user_agent set" do + before { Typhoeus.configure { |config| config.user_agent = "Default" } } + after { Typhoeus.configure { |config| config.user_agent = nil } } + + context "with headers" do + let(:options) { {:headers => { "User-Agent" => "Fubar" } } } + + it "uses the request options' user agent" do + expect(request.options[:headers]["User-Agent"]).to eq("Fubar") + end + end + + context "without headers" do + let(:options) { {:headers => {} } } + + it "uses the global options' user agent" do + expect(request.options[:headers]["User-Agent"]).to eq("Default") + end + end + end + + context "when Config.verbose set" do + before { Typhoeus.configure { |config| config.verbose = true} } + after { Typhoeus.configure { |config| config.verbose = false} } + + it "respects" do + expect(request.options[:verbose]).to be_truthy + end + end + + context "when maxredirs" do + context "when not set" do + it "defaults to 50" do + expect(request.options[:maxredirs]).to be(50) + end + end + + context "when set" do + let(:options) { {:maxredirs => 1} } + + it "respects" do + expect(request.options[:maxredirs]).to be(1) + end + end + end + + context "when Config.proxy set" do + before { Typhoeus.configure { |config| config.proxy = "http://proxy.internal" } } + after { Typhoeus.configure { |config| config.proxy = nil } } + + it "respects" do + expect(request.options[:proxy]).to eq("http://proxy.internal") + end + + context "when option proxy set" do + let(:options) { {:proxy => nil} } + + it "does not override" do + expect(request.options[:proxy]).to be_nil + end + end + end + end + + describe "#eql?" do + context "when another class" do + let(:other) { "" } + + it "returns false" do + expect(request).to_not eql other + end + end + + context "when same class" do + let(:other) { Typhoeus::Request.new("base_url", options) } + + context "when other base_url" do + it "returns false" do + expect(request).to_not eql other + end + end + + context "when same base_url and other options" do + let(:other) { Typhoeus::Request.new(base_url, {}) } + + it "returns false" do + expect(request).to_not eql other + end + end + + + context "when same base_url and options" do + context "when same order" do + let(:other) { Typhoeus::Request.new(base_url, options) } + + it "returns true" do + expect(request).to eql other + end + end + + context "when different order" do + let(:other_options) { + {:headers => { 'User-Agent' => "Fubar", 'Expect' => ""}, :verbose => true } + } + let(:other) { Typhoeus::Request.new(base_url, other_options)} + + it "returns true" do + expect(request).to eql other + end + end + end + end + end + + describe "#hash" do + context "when request.eql?(other)" do + context "when different order" do + let(:other_options) { + {:headers => { 'User-Agent' => "Fubar", 'Expect' => "" }, :verbose => true } + } + let(:other) { Typhoeus::Request.new(base_url, other_options)} + + it "has same hashes" do + expect(request.hash).to eq(other.hash) + end + end + + context "when same order" do + let(:other) { Typhoeus::Request.new(base_url, options) } + + it "has same hashes" do + expect(request.hash).to eq(other.hash) + end + end + + context "when hashes with different orders are contained in arrays" do + let(:request) { Typhoeus::Request.new(base_url, :params => [{:b => 2, :a => 1}]) } + let(:other) { Typhoeus::Request.new(base_url, :params => [{:a => 1, :b => 2}]) } + it "has different hashes" do + expect(request.hash).to eq(other.hash) + end + end + end + + context "when not request.eql?(other)" do + let(:request) { Typhoeus::Request.new(base_url, :params => {:foo => 'bar'}) } + let(:other) { Typhoeus::Request.new(base_url, :params => {:foo => 'baz'}) } + + it "has different hashes" do + expect(request.hash).to_not eq(other.hash) + end + end + end + + describe "#encoded_body" do + let(:request) { + Typhoeus::Request.new("www.example.com",:body => {:a => 1}) + } + + it "returns encoded body" do + expect(request.encoded_body).to eq("a=1") + end + end + +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/header_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/header_spec.rb new file mode 100644 index 0000000..922d99e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/header_spec.rb @@ -0,0 +1,147 @@ +require 'spec_helper' + +describe Typhoeus::Response::Header do + let(:raw) { nil } + let(:header) { Typhoeus::Response::Header.new(raw) } + + describe ".new" do + context "when string" do + let(:raw) { 'Date: Fri, 29 Jun 2012 10:09:23 GMT' } + + it "sets Date" do + expect(header['Date']).to eq('Fri, 29 Jun 2012 10:09:23 GMT') + end + + it "provides case insensitive access" do + expect(header['DaTe']).to eq('Fri, 29 Jun 2012 10:09:23 GMT') + end + + it "provides symbol access" do + expect(header[:date]).to eq('Fri, 29 Jun 2012 10:09:23 GMT') + end + end + + context "when hash" do + let(:raw) { { 'Date' => 'Fri, 29 Jun 2012 10:09:23 GMT' } } + + it "sets Date" do + expect(header['Date']).to eq(raw['Date']) + end + + it "provides case insensitive access" do + expect(header['DaTe']).to eq(raw['Date']) + end + end + end + + describe "#parse" do + context "when no header" do + it "returns nil" do + expect(header).to be_empty + end + end + + context "when header" do + let(:raw) do + 'HTTP/1.1 200 OK + Set-Cookie: NID=61=LblqYgUOu; expires=Sat, 29-Dec-2012 10:09:23 GMT; path=/; domain=.google.de; HttpOnly + Date: Fri, 29 Jun 2012 10:09:23 GMT + Expires: -1 + Cache-Control: private, max-age=0 + Content-Type: text/html; charset=ISO-8859-1 + Set-Cookie: PREF=ID=77e93yv0hPtejLou; expires=Sun, 29-Jun-2014 10:09:23 GMT; path=/; domain=.google.de + Set-Cookie: NID=61=LblqYgh5Ou; expires=Sat, 29-Dec-2012 10:09:23 GMT; path=/; domain=.google.de; HttpOnly + P3P: CP="This is not a P3P policy! See http://www.google.com/support/accounts/bin/answer.py?hl=en&answer=151657 for more info." + Server: gws + X-XSS-Protection: 1; mode=block + X-Frame-Options: SAMEORIGIN + Transfer-Encoding: chunked'.gsub(/^\s{8}/, '') + end + + it "sets raw" do + expect(header.send(:raw)).to eq(raw) + end + + it "sets Set-Cookie" do + expect(header['set-cookie'].size).to eq(3) + end + + it "provides case insensitive access" do + expect(header['Set-CooKie'].size).to eq(3) + end + + [ + 'NID=61=LblqYgUOu; expires=Sat, 29-Dec-2012 10:09:23 GMT; path=/; domain=.google.de; HttpOnly', + 'PREF=ID=77e93yv0hPtejLou; expires=Sun, 29-Jun-2014 10:09:23 GMT; path=/; domain=.google.de', + 'NID=61=LblqYgh5Ou; expires=Sat, 29-Dec-2012 10:09:23 GMT; path=/; domain=.google.de; HttpOnly' + ].each_with_index do |cookie, i| + it "sets Cookie##{i}" do + expect(header['set-cookie']).to include(cookie) + end + end + + { + 'Date' => 'Fri, 29 Jun 2012 10:09:23 GMT', 'Expires' => '-1', + 'Cache-Control' => 'private, max-age=0', + 'Content-Type' => 'text/html; charset=ISO-8859-1', + 'P3P' => 'CP="This is not a P3P policy! See http://www.google.com/support/accounts/bin/answer.py?hl=en&answer=151657 for more info."', + 'Server' => 'gws', 'X-XSS-Protection' => '1; mode=block', + 'X-Frame-Options' => 'SAMEORIGIN', 'Transfer-Encoding' => 'chunked' + }.each do |name, value| + it "sets #{name}" do + expect(header[name.downcase]).to eq(value) + end + end + + context 'includes a multi-line header' do + let(:raw) do + 'HTTP/1.1 200 OK + Date: Fri, 29 Jun 2012 10:09:23 GMT + Content-Security-Policy: default-src "self"; + img-src * data: "self"; + upgrade-insecure-requests;'.gsub(/^\s{10}/, '') + end + + it "joins header parts" do + expect(header).to eq({ + 'Date' => 'Fri, 29 Jun 2012 10:09:23 GMT', + 'Content-Security-Policy' => 'default-src "self"; img-src * data: "self"; upgrade-insecure-requests;' + }) + end + end + + context 'includes line with only whitespace' do + let(:raw) do + 'HTTP/1.1 200 OK + Date: Fri, 29 Jun 2012 10:09:23 GMT + + '.gsub(/^\s{10}/, '') + end + + it 'ignores it' do + expect(header).to eq({ 'Date' => 'Fri, 29 Jun 2012 10:09:23 GMT' }) + end + end + + context 'with broken headers' do + let(:raw) do + 'HTTP/1.1 200 OK + Date: + Content-Type + '.gsub(/^\s{10}/, '') + end + + it 'returns empty string for invalid headers' do + expect(header.to_hash).to include({ 'Date' => '', 'Content-Type' => '' }) + end + end + end + end + + it "can be Marshal'd" do + header = Typhoeus::Response::Header.new("Foo: Bar") + expect { + Marshal.dump(header) + }.not_to raise_error + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/informations_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/informations_spec.rb new file mode 100644 index 0000000..896931a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/informations_spec.rb @@ -0,0 +1,283 @@ +require 'spec_helper' + +describe Typhoeus::Response::Informations do + let(:options) { {} } + let(:response) { Typhoeus::Response.new(options) } + + describe "#return_code" do + let(:options) { { :return_code => :ok } } + + it "returns return_code from options" do + expect(response.return_code).to be(:ok) + end + end + + describe "#debug_info" do + let(:options) { { :debug_info => Ethon::Easy::DebugInfo.new } } + + it "returns debug_info from options" do + expect(response.debug_info).to be_a(Ethon::Easy::DebugInfo) + end + end + + describe "#return_message" do + let(:options) { { :return_code => :couldnt_connect } } + + it "returns a message" do + expect(response.return_message).to eq("Couldn't connect to server") + end + + describe "with nil return_code" do + let(:options) { { :return_code => nil } } + + it "returns nil" do + expect(response.return_message).to be_nil + end + end + end + + describe "#response_body" do + context "when response_body" do + let(:options) { { :response_body => "body" } } + + it "returns response_body from options" do + expect(response.response_body).to eq("body") + end + end + + context "when body" do + let(:options) { { :body => "body" } } + + it "returns body from options" do + expect(response.body).to eq("body") + end + end + end + + describe "#response_headers" do + let(:options) { { :response_headers => "Length: 1" } } + + context "when no mock" do + it "returns response_headers from options" do + expect(response.response_headers).to eq("Length: 1") + end + end + + context "when mock" do + context "when no response_headers" do + context "when headers" do + let(:options) { { :mock => true, :headers => {"Length" => 1, "Content-Type" => "text/plain" } } } + + it "constructs response_headers" do + expect(response.response_headers).to include("Length: 1") + expect(response.response_headers).to include("Content-Type: text/plain") + expect(response.response_headers).to include("\r\n") + end + end + + context "when multiple values for a header" do + let(:options) { { :mock => true, :headers => {"Length" => 1, "Content-Type" => "text/plain", "set-cookie" => ["cookieone=one","cookietwo=two"] } } } + + it "constructs response_headers" do + expect(response.response_headers).to include("Length: 1") + expect(response.response_headers).to include("Content-Type: text/plain") + expect(response.response_headers).to include("set-cookie: cookieone=one,cookietwo=two") + expect(response.response_headers).to include("\r\n") + end + end + end + end + end + + describe "#response_code" do + context "when response_code" do + let(:options) { { :response_code => "200" } } + + it "returns response_code from options" do + expect(response.response_code).to eq(200) + end + end + + context "when code" do + let(:options) { { :code => "200" } } + + it "returns code from options" do + expect(response.code).to eq(200) + end + end + end + + describe "#httpauth_avail" do + let(:options) { { :httpauth_avail => "code" } } + + it "returns httpauth_avail from options" do + expect(response.httpauth_avail).to eq("code") + end + end + + describe "#total_time" do + let(:options) { { :total_time => 1 } } + + it "returns total_time from options" do + expect(response.total_time).to eq(1) + end + end + + describe "#starttransfer_time" do + let(:options) { { :starttransfer_time => 1 } } + + it "returns starttransfer_time from options" do + expect(response.starttransfer_time).to eq(1) + end + end + + describe "#appconnect_time" do + let(:options) { { :appconnect_time => 1 } } + + it "returns appconnect_time from options" do + expect(response.appconnect_time).to eq(1) + end + end + + describe "#pretransfer_time" do + let(:options) { { :pretransfer_time => 1 } } + + it "returns pretransfer_time from options" do + expect(response.pretransfer_time).to eq(1) + end + end + + describe "#connect_time" do + let(:options) { { :connect_time => 1 } } + + it "returns connect_time from options" do + expect(response.connect_time).to eq(1) + end + end + + describe "#namelookup_time" do + let(:options) { { :namelookup_time => 1 } } + + it "returns namelookup_time from options" do + expect(response.namelookup_time).to eq(1) + end + end + + describe "#redirect_time" do + let(:options) { { :redirect_time => 1 } } + + it "returns redirect_time from options" do + expect(response.redirect_time).to eq(1) + end + end + + describe "#effective_url" do + let(:options) { { :effective_url => "http://www.example.com" } } + + it "returns effective_url from options" do + expect(response.effective_url).to eq("http://www.example.com") + end + end + + describe "#primary_ip" do + let(:options) { { :primary_ip => "127.0.0.1" } } + + it "returns primary_ip from options" do + expect(response.primary_ip).to eq("127.0.0.1") + end + end + + describe "#redirect_count" do + let(:options) { { :redirect_count => 2 } } + + it "returns redirect_count from options" do + expect(response.redirect_count).to eq(2) + end + end + + describe "#request_size" do + let(:options) { { :request_size => 2 } } + + it "returns request_size from options" do + expect(response.request_size).to eq(2) + end + end + + describe "#headers" do + context "when no response_headers" do + it "returns nil" do + expect(response.headers).to be_nil + end + end + + context "when response_headers" do + let(:options) { {:response_headers => "Expire: -1\nServer: gws"} } + + it "returns nonempty headers" do + expect(response.headers).to_not be_empty + end + + it "has Expire" do + expect(response.headers['expire']).to eq('-1') + end + + it "has Server" do + expect(response.headers['server']).to eq('gws') + end + end + + context "when multiple headers" do + let(:options) { {:response_headers => "Server: A\r\n\r\nServer: B"} } + + it "returns the last" do + expect(response.headers['server']).to eq("B") + end + end + + context "when mock" do + context "when headers" do + let(:options) { {:mock => true, :headers => {"Length" => "1"}} } + + it "returns Typhoeus::Response::Header" do + expect(response.headers).to be_a(Typhoeus::Response::Header) + end + + it "returns headers" do + expect(response.headers.to_hash).to include("Length" => "1") + end + end + end + + context "when requesting" do + let(:response) { Typhoeus.get("localhost:3001") } + + it "returns headers" do + expect(response.headers).to_not be_empty + end + end + end + + describe "#redirections" do + context "when no response_headers" do + it "returns empty array" do + expect(response.redirections).to be_empty + end + end + + context "when headers" do + let(:options) { {:response_headers => "Expire: -1\nServer: gws"} } + + it "returns empty array" do + expect(response.redirections).to be_empty + end + end + + context "when multiple headers" do + let(:options) { {:response_headers => "Server: A\r\n\r\nServer: B"} } + + it "returns response from all but last headers" do + expect(response.redirections.size).to eq(1) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/status_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/status_spec.rb new file mode 100644 index 0000000..64ba1e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response/status_spec.rb @@ -0,0 +1,256 @@ +require 'spec_helper' + +describe Typhoeus::Response::Status do + let(:response) { Typhoeus::Response.new(options) } + let(:options) { {} } + + describe "timed_out?" do + context "when return code is operation_timedout" do + let(:options) { {:return_code => :operation_timedout} } + + it "return true" do + expect(response).to be_timed_out + end + end + end + + describe "#status_message" do + context "when no header" do + it "returns nil" do + expect(response.status_message).to be_nil + end + end + + context "when header" do + context "when no message" do + let(:options) { {:response_headers => "HTTP/1.1 200\r\n"} } + + it "returns nil" do + expect(response.status_message).to be_nil + end + end + + context "when messsage" do + let(:options) { {:response_headers => "HTTP/1.1 200 message\r\n"} } + + it "returns message" do + expect(response.status_message).to eq("message") + end + end + end + end + + describe "#http_version" do + context "when no header" do + it "returns nil" do + expect(response.http_version).to be_nil + end + end + + context "when header" do + context "when no http version" do + let(:options) { {:response_headers => "HTTP OK"} } + + it "returns nil" do + expect(response.http_version).to be_nil + end + end + + context "when invalid http_version" do + let(:options) { {:response_headers => "HTTP foo/bar OK"} } + + it "returns nil" do + expect(response.http_version).to be_nil + end + end + + context "when valid http_version" do + let(:options) { {:response_headers => "HTTP/1.1 OK"} } + + it "returns http_version" do + expect(response.http_version).to eq("1.1") + end + end + end + end + + describe "#success?" do + context "when response code 200-299" do + let(:options) { {:return_code => return_code, :response_code => 201} } + + context "when mock" do + before { response.mock = true } + + context "when return_code :ok" do + let(:return_code) { :ok } + + it "returns true" do + expect(response.success?).to be_truthy + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns true" do + expect(response.success?).to be_truthy + end + end + end + + context "when no mock" do + before { response.mock = nil } + + context "when return_code :ok" do + let(:return_code) { :ok } + + it "returns true" do + expect(response.success?).to be_truthy + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns false" do + expect(response.success?).to be_falsey + end + end + end + end + + context "when response code is not 200-299" do + let(:options) { {:return_code => :ok, :response_code => 500} } + + it "returns false" do + expect(response.success?).to be_falsey + end + end + end + + describe "#failure?" do + context "when response code between 300-526 and 100-300" do + let(:options) { {:return_code => return_code, :response_code => 300} } + + context "when mock" do + before { response.mock = true } + + context "when return_code :internal_server_error" do + let(:return_code) { :internal_server_error } + + it "returns true" do + expect(response.failure?).to be_truthy + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns true" do + expect(response.failure?).to be_truthy + end + end + end + + context "when no mock" do + before { response.mock = nil } + + context "when return_code :internal_server_error" do + let(:return_code) { :internal_server_error } + + it "returns true" do + expect(response.failure?).to be_truthy + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns false" do + expect(response.failure?).to be_falsey + end + end + end + end + + context "when response code is not 300-526" do + let(:options) { {:return_code => :ok, :response_code => 200} } + + it "returns false" do + expect(response.failure?).to be_falsey + end + end + end + + describe "#modified?" do + context "when response code 304" do + let(:options) { {:return_code => :ok, :response_code => 304} } + + context "when mock" do + before { response.mock = true } + + context "when return_code :ok" do + let(:return_code) { :ok } + + it "returns false" do + expect(response.modified?).to be_falsey + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns false" do + expect(response.modified?).to be_falsey + end + end + end + + context "when no mock" do + before { response.mock = nil } + + context "when return_code :ok" do + let(:return_code) { :ok } + + it "returns false" do + expect(response.modified?).to be_falsey + end + end + + context "when return_code nil" do + let(:return_code) { nil } + + it "returns true" do + expect(response.modified?).to be_falsey + end + end + end + end + + context "when response code is not 304" do + let(:options) { {:return_code => :ok, :response_code => 500} } + + it "returns true" do + expect(response.modified?).to be_truthy + end + end + end + + describe "#first_header_line" do + context "when multiple header" do + let(:options) { {:response_headers => "1\r\n\r\n2\r\nbla"} } + + it "returns first line of last block" do + expect(response.method(:first_header_line).call).to eq("2") + end + end + + context "when single header" do + let(:options) { {:response_headers => "1"} } + + it "returns first line" do + expect(response.method(:first_header_line).call).to eq("1") + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response_spec.rb new file mode 100644 index 0000000..02512f4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus/response_spec.rb @@ -0,0 +1,100 @@ +require 'spec_helper' + +describe Typhoeus::Response do + let(:response) { Typhoeus::Response.new(options) } + let(:options) { {} } + + describe ".new" do + context "when options" do + context "when return_code" do + let(:options) { {:return_code => 2} } + + it "stores" do + expect(response.options[:return_code]).to be(2) + end + end + + context "when headers" do + let(:options) { {:headers => {'A' => 'B'}} } + + it "stores unmodified" do + expect(response.options[:headers]).to be(options[:headers]) + end + + it "sets @headers to a Typhoeus::Response::Header" do + expect(response.instance_variable_get(:@headers)).to be_a(Typhoeus::Response::Header) + end + + it "has key" do + expect(response.headers['A']).to eq('B') + end + end + end + end + + describe "#mock" do + context "when @mock" do + before { response.mock = true } + + it "returns @mock" do + expect(response.mock).to be_truthy + end + end + + context "when options[:mock]" do + let(:options) { {:mock => true} } + + it "returns options[:mock]" do + expect(response.mock).to be_truthy + end + end + + context "when @mock and options[:mock]" do + let(:options) { {:mock => 1} } + before { response.mock = 2 } + + it "returns @mock" do + expect(response.mock).to be(2) + end + end + end + + describe "#handled_response" do + let(:handled_response) { Typhoeus::Response.new } + + context "when @handled_response" do + before { response.handled_response = handled_response } + + it "returns @handled_response" do + expect(response.handled_response).to be(handled_response) + end + end + + context "when @handled_response is nil" do + before { response.handled_response = nil } + + it "returns response" do + expect(response.handled_response).to be(response) + end + end + end + + describe "#cached" do + context "when @cached" do + before { response.cached = true } + + it "returns cached status" do + expect(response.cached?).to be_truthy + end + end + + context "when @cached is nil" do + before { response.cached = nil } + + it "returns false" do + expect(response.cached?).to be_falsey + end + end + + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus_spec.rb b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus_spec.rb new file mode 100644 index 0000000..6432c8d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/spec/typhoeus_spec.rb @@ -0,0 +1,105 @@ +require 'spec_helper' + +describe Typhoeus do + before(:each) do + Typhoeus.configure { |config| config.verbose = false; config.block_connection = false } + end + + describe ".configure" do + it "yields config" do + Typhoeus.configure do |config| + expect(config).to be_a(Typhoeus::Config) + end + end + + it "sets values config" do + Typhoeus::Config.verbose = true + expect(Typhoeus::Config.verbose).to be_truthy + end + end + + describe ".stub" do + let(:base_url) { "www.example.com" } + + shared_examples "lazy response construction" do + it "calls the block to construct a response when a request matches the stub" do + expected_response = Typhoeus::Response.new + Typhoeus.stub(base_url) do |request| + expected_response + end + + response = Typhoeus.get(base_url) + + expect(response).to be(expected_response) + end + end + + context "when no similar expectation exists" do + include_examples "lazy response construction" + + it "returns expectation" do + expect(Typhoeus.stub(base_url)).to be_a(Typhoeus::Expectation) + end + + it "adds expectation" do + Typhoeus.stub(:get, "") + expect(Typhoeus::Expectation.all.size).to eq(1) + end + end + + context "when similar expectation exists" do + include_examples "lazy response construction" + + let(:expectation) { Typhoeus::Expectation.new(base_url) } + before { Typhoeus::Expectation.all << expectation } + + it "returns expectation" do + expect(Typhoeus.stub(base_url)).to be_a(Typhoeus::Expectation) + end + + it "doesn't add expectation" do + Typhoeus.stub(base_url) + expect(Typhoeus::Expectation.all.size).to eq(1) + end + end + end + + describe ".before" do + it "adds callback" do + Typhoeus.before { true } + expect(Typhoeus.before.size).to eq(1) + end + end + + describe ".with_connection" do + it "executes block with block connection is false" do + Typhoeus.with_connection { expect(Typhoeus::Config.block_connection).to be(false) } + end + + it "sets block connection back to previous value" do + Typhoeus::Config.block_connection = true + Typhoeus.with_connection {} + expect(Typhoeus::Config.block_connection).to be(true) + end + + it "returns result of block" do + expect(Typhoeus.with_connection { "123" }).to eq("123") + end + end + + [:get, :post, :put, :delete, :head, :patch, :options].each do |name| + describe ".#{name}" do + let(:response) { Typhoeus::Request.method(name).call("http://localhost:3001") } + + it "returns ok" do + expect(response.return_code).to eq(:ok) + end + + unless name == :head + it "makes #{name.to_s.upcase} requests" do + expect(response.response_body).to include("\"REQUEST_METHOD\":\"#{name.to_s.upcase}\"") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/typhoeus.gemspec b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/typhoeus.gemspec new file mode 100644 index 0000000..39bcc7a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/typhoeus-1.4.0/typhoeus.gemspec @@ -0,0 +1,25 @@ +# encoding: utf-8 +lib = File.expand_path('../lib/', __FILE__) +$:.unshift lib unless $:.include?(lib) + +require 'typhoeus/version' + +Gem::Specification.new do |s| + s.name = "typhoeus" + s.version = Typhoeus::VERSION + s.platform = Gem::Platform::RUBY + s.authors = ["David Balatero", "Paul Dix", "Hans Hasselberg"] + s.email = ["hans.hasselberg@gmail.com"] + s.homepage = "https://github.com/typhoeus/typhoeus" + s.summary = "Parallel HTTP library on top of libcurl multi." + s.description = %q{Like a modern code version of the mythical beast with 100 serpent heads, Typhoeus runs HTTP requests in parallel while cleanly encapsulating handling logic.} + + s.required_rubygems_version = ">= 1.3.6" + s.license = 'MIT' + + s.add_dependency('ethon', [">= 0.9.0"]) + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- spec/*`.split("\n") + s.require_path = 'lib' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/.yardopts b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/.yardopts new file mode 100644 index 0000000..aa3b230 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/.yardopts @@ -0,0 +1,6 @@ +--no-private +lib/**/*.rb +- +CHANGES.md +LICENSE +README.md diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/CHANGES.md b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/CHANGES.md new file mode 100644 index 0000000..dfe39a5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/CHANGES.md @@ -0,0 +1,815 @@ +Version 1.2.8 - 8-Nov-2020 +-------------------------- + +* Added support for handling "slim" format zoneinfo files that are produced by + default by zic version 2020b and later. The POSIX-style TZ string is now used + calculate DST transition times after the final defined transition in the file. + The 64-bit section is now always used regardless of whether Time has support + for 64-bit times. +* Rubinius is no longer supported. + + +Version 1.2.7 - 2-Apr-2020 +-------------------------- + +* Fixed 'wrong number of arguments' errors when running on JRuby 9.0. #114. +* Fixed warnings when running on Ruby 2.8. #112. + + +Version 1.2.6 - 24-Dec-2019 +--------------------------- + +* Timezone#strftime('%s', time) will now return the correct number of seconds + since the epoch. #91. +* Removed the unused TZInfo::RubyDataSource::REQUIRE_PATH constant. +* Fixed "SecurityError: Insecure operation - require" exceptions when loading + data with recent Ruby releases in safe mode. +* Fixed warnings when running on Ruby 2.7. #106 and #111. + + +Version 1.2.5 - 4-Feb-2018 +-------------------------- + +* Support recursively (deep) freezing Country and Timezone instances. #80. +* Allow negative daylight savings time offsets to be derived when reading from + zoneinfo files. The utc_offset and std_offset are now derived correctly for + Europe/Dublin in the 2018a and 2018b releases of the Time Zone Database. + + +Version 1.2.4 - 26-Oct-2017 +--------------------------- + +* Ignore the leapseconds file that is included in zoneinfo directories installed + with version 2017c and later of the Time Zone Database. + + +Version 1.2.3 - 25-Mar-2017 +--------------------------- + +* Reduce the number of String objects allocated when loading zoneinfo files. + #54. +* Make Timezone#friendly_identifier compatible with frozen string literals. +* Improve the algorithm for deriving the utc_offset from zoneinfo files. This + now correctly handles Pacific/Apia switching from one side of the + International Date Line to the other whilst observing daylight savings time. + #66. +* Fix an UnknownTimezone exception when calling transitions_up_to or + offsets_up_to on a TimezoneProxy instance obtained from Timezone.get_proxy. +* Allow the Factory zone to be obtained from the Zoneinfo data source. +* Ignore the /usr/share/zoneinfo/timeconfig symlink included in Slackware + distributions. #64. +* Fix Timezone#strftime handling of %Z expansion when %Z is prefixed with more + than one percent. #31. +* Support expansion of %z, %:z, %::z and %:::z to the UTC offset of the time + zone in Timezone#strftime. #31 and #67. + + +Version 1.2.2 - 8-Aug-2014 +-------------------------- + +* Fix an error with duplicates being returned by Timezone#all_country_zones + and Timezone#all_country_zone_identifiers when used with tzinfo-data + v1.2014.6 or later. +* Use the zone1970.tab file for country timezone data if it is found in the + zoneinfo directory (and fallback to zone.tab if not). zone1970.tab was added + in tzdata 2014f. zone.tab is now deprecated. + + +Version 1.2.1 - 1-Jun-2014 +-------------------------- + +* Support zoneinfo files generated with zic version 2014c and later. +* On platforms that only support positive 32-bit timestamps, ensure that + conversions are accurate from the epoch instead of just from the first + transition after the epoch. +* Minor documentation improvements. + + +Version 1.2.0 - 26-May-2014 +--------------------------- + +* Raise the minimum supported Ruby version to 1.8.7. +* Support loading system zoneinfo data on FreeBSD, OpenBSD and Solaris. + Resolves #15. +* Add canonical_identifier and canonical_zone methods to Timezone. Resolves #16. +* Add a link to a DataSourceNotFound help page in the TZInfo::DataSourceNotFound + exception message. +* Load iso3166.tab and zone.tab files as UTF-8. +* Fix Timezone#local_to_utc returning local Time instances on systems using UTC + as the local time zone. Resolves #13. +* Fix == methods raising an exception when passed an instance of a different + class by making <=> return nil if passed a non-comparable argument. +* Eliminate "require 'rational'" warnings. Resolves #10. +* Eliminate "assigned but unused variable - info" warnings. Resolves #11. +* Switch to minitest v5 for unit tests. Resolves #18. + + +Version 1.1.0 - 25-Sep-2013 +--------------------------- + +* TZInfo is now thread safe. ThreadSafe::Cache is now used instead of Hash + to cache Timezone and Country instances returned by Timezone.get and + Country.get. The tzinfo gem now depends on thread_safe ~> 0.1. +* Added a transitions_up_to method to Timezone that returns a list of the times + where the UTC offset of the timezone changes. +* Added an offsets_up_to method to Timezone that returns the set of offsets + that have been observed in a defined timezone. +* Fixed a "can't modify frozen String" error when loading a Timezone from a + zoneinfo file using an identifier String that is both tainted and frozen. + Resolves #3. +* Support TZif3 format zoneinfo files (now produced by zic from tzcode version + 2013e onwards). +* Support using YARD to generate documentation (added a .yardopts file). +* Ignore the +VERSION file included in the zoneinfo directory on Mac OS X. +* Added a note to the documentation concerning 32-bit zoneinfo files (as + included with Mac OS X). + + +Version 1.0.1 - 22-Jun-2013 +--------------------------- + +* Fix a test case failure when tests are run from a directory that contains a + dot in the path (issue #29751). + + +Version 1.0.0 - 2-Jun-2013 +-------------------------- + +* Allow TZInfo to be used with different data sources instead of just the + built-in Ruby module data files. +* Include a data source that allows TZInfo to load data from the binary + zoneinfo files produced by zic and included with many Linux and Unix-like + distributions. +* Remove the definition and index Ruby modules from TZInfo and move them into + a separate TZInfo::Data library (available as the tzinfo-data gem). +* Default to using the TZInfo::Data library as the data source if it is + installed, otherwise use zoneinfo files instead. +* Preserve the nanoseconds of local timezone Time objects when performing + conversions (issue #29705). +* Don't add the tzinfo lib directory to the search path when requiring 'tzinfo'. + The tzinfo lib directory must now be in the search path before 'tzinfo' is + required. +* Add utc_start_time, utc_end_time, local_start_time and local_end_time instance + methods to TimezonePeriod. These return an identical value as the existing + utc_start, utc_end, local_start and local_end methods, but return Time + instances instead of DateTime. +* Make the start_transition, end_transition and offset properties of + TimezonePeriod protected. To access properties of the period, callers should + use other TimezonePeriod instance methods instead (issue #7655). + + +Version 0.3.53 (tzdata v2017b) - 23-Mar-2017 +-------------------------------------------- + +* Updated to tzdata version 2017b + (https://mm.icann.org/pipermail/tz-announce/2017-March/000046.html). + + +Version 0.3.52 (tzdata v2016h) - 28-Oct-2016 +-------------------------------------------- + +* Updated to tzdata version 2016h + (https://mm.icann.org/pipermail/tz-announce/2016-October/000042.html). + + +Version 0.3.51 (tzdata v2016f) - 5-Jul-2016 +------------------------------------------- + +* Updated to tzdata version 2016f + (https://mm.icann.org/pipermail/tz-announce/2016-July/000040.html). + + +Version 0.3.50 (tzdata v2016e) - 14-Jun-2016 +-------------------------------------------- + +* Updated to tzdata version 2016e + (https://mm.icann.org/pipermail/tz-announce/2016-June/000039.html). + + +Version 0.3.49 (tzdata v2016d) - 18-Apr-2016 +-------------------------------------------- + +* Updated to tzdata version 2016d + (https://mm.icann.org/pipermail/tz-announce/2016-April/000038.html). + + +Version 0.3.48 (tzdata v2016c) - 23-Mar-2016 +-------------------------------------------- + +* Updated to tzdata version 2016c + (https://mm.icann.org/pipermail/tz-announce/2016-March/000037.html). + + +Version 0.3.47 (tzdata v2016b) - 15-Mar-2016 +-------------------------------------------- + +* Updated to tzdata version 2016b + (https://mm.icann.org/pipermail/tz-announce/2016-March/000036.html). + + +Version 0.3.46 (tzdata v2015g) - 2-Dec-2015 +------------------------------------------- + +* From version 2015e, the IANA time zone database uses non-ASCII characters in + country names. Backport the encoding handling from TZInfo::Data to allow + TZInfo 0.3.x to support Ruby 1.9 (which would otherwise fail with an invalid + byte sequence error when loading the countries index). Resolves #41. + + +Version 0.3.45 (tzdata v2015g) - 3-Oct-2015 +------------------------------------------- + +* Updated to tzdata version 2015g + (https://mm.icann.org/pipermail/tz-announce/2015-October/000034.html). + + +Version 0.3.44 (tzdata v2015d) - 24-Apr-2015 +-------------------------------------------- + +* Updated to tzdata version 2015d + (https://mm.icann.org/pipermail/tz-announce/2015-April/000031.html). + + +Version 0.3.43 (tzdata v2015a) - 31-Jan-2015 +-------------------------------------------- + +* Updated to tzdata version 2015a + (https://mm.icann.org/pipermail/tz-announce/2015-January/000028.html). + + +Version 0.3.42 (tzdata v2014i) - 23-Oct-2014 +-------------------------------------------- + +* Updated to tzdata version 2014i + (https://mm.icann.org/pipermail/tz-announce/2014-October/000026.html). + + +Version 0.3.41 (tzdata v2014f) - 8-Aug-2014 +------------------------------------------- + +* Updated to tzdata version 2014f + (https://mm.icann.org/pipermail/tz-announce/2014-August/000023.html). + + +Version 0.3.40 (tzdata v2014e) - 10-Jul-2014 +-------------------------------------------- + +* Updated to tzdata version 2014e + (https://mm.icann.org/pipermail/tz-announce/2014-June/000022.html). + + +Version 0.3.39 (tzdata v2014a) - 9-Mar-2014 +------------------------------------------- + +* Updated to tzdata version 2014a + (https://mm.icann.org/pipermail/tz-announce/2014-March/000018.html). + + +Version 0.3.38 (tzdata v2013g) - 8-Oct-2013 +------------------------------------------- + +* Updated to tzdata version 2013g + (https://mm.icann.org/pipermail/tz-announce/2013-October/000015.html). + + +Version 0.3.37 (tzdata v2013b) - 11-Mar-2013 +-------------------------------------------- + +* Updated to tzdata version 2013b + (https://mm.icann.org/pipermail/tz-announce/2013-March/000010.html). + + +Version 0.3.36 (tzdata v2013a) - 3-Mar-2013 +------------------------------------------- + +* Updated to tzdata version 2013a + (https://mm.icann.org/pipermail/tz-announce/2013-March/000009.html). +* Fix TimezoneTransitionInfo#eql? incorrectly returning false when running on + Ruby 2.0. +* Change eql? and == implementations to test the class of the passed in object + instead of checking individual properties with 'respond_to?'. + + +Version 0.3.35 (tzdata v2012i) - 4-Nov-2012 +------------------------------------------- + +* Updated to tzdata version 2012i + (https://mm.icann.org/pipermail/tz-announce/2012-November/000007.html). + + +Version 0.3.34 (tzdata v2012h) - 27-Oct-2012 +-------------------------------------------- + +* Updated to tzdata version 2012h + (https://mm.icann.org/pipermail/tz-announce/2012-October/000006.html). + + +Version 0.3.33 (tzdata v2012c) - 8-Apr-2012 +------------------------------------------- + +* Updated to tzdata version 2012c + (https://mm.icann.org/pipermail/tz/2012-April/017627.html). + + +Version 0.3.32 (tzdata v2012b) - 4-Mar-2012 +------------------------------------------- + +* Updated to tzdata version 2012b + (https://mm.icann.org/pipermail/tz/2012-March/017524.html). + + +Version 0.3.31 (tzdata v2011n) - 6-Nov-2011 +------------------------------------------- + +* Updated to tzdata version 2011n + (https://mm.icann.org/pipermail/tz/2011-October/017201.html). + + +Version 0.3.30 (tzdata v2011k) - 29-Sep-2011 +-------------------------------------------- + +* Updated to tzdata version 2011k + (https://mm.icann.org/pipermail/tz/2011-September/008889.html). + + +Version 0.3.29 (tzdata v2011h) - 27-Jun-2011 +-------------------------------------------- + +* Updated to tzdata version 2011h + (https://mm.icann.org/pipermail/tz/2011-June/008576.html). +* Allow the default value of the local_to_utc and period_for_local dst + parameter to be specified globally with a Timezone.default_dst attribute. + Thanks to Kurt Werle for the suggestion and patch. + + + Version 0.3.28 (tzdata v2011g) - 13-Jun-2011 +--------------------------------------------- + +* Add support for Ruby 1.9.3 (trunk revision 31668 and later). Thanks to + Aaron Patterson for reporting the problems running on the new version. + Closes #29233. + + +Version 0.3.27 (tzdata v2011g) - 26-Apr-2011 +-------------------------------------------- + +* Updated to tzdata version 2011g + (https://mm.icann.org/pipermail/tz/2011-April/016875.html). + + +Version 0.3.26 (tzdata v2011e) - 2-Apr-2011 +------------------------------------------- + +* Updated to tzdata version 2011e + (https://mm.icann.org/pipermail/tz/2011-April/016809.html). + + +Version 0.3.25 (tzdata v2011d) - 14-Mar-2011 +-------------------------------------------- + +* Updated to tzdata version 2011d + (https://mm.icann.org/pipermail/tz/2011-March/016746.html). + + +Version 0.3.24 (tzdata v2010o) - 15-Jan-2011 +-------------------------------------------- + +* Updated to tzdata version 2010o + (https://mm.icann.org/pipermail/tz/2010-November/016517.html). + + +Version 0.3.23 (tzdata v2010l) - 19-Aug-2010 +-------------------------------------------- + +* Updated to tzdata version 2010l + (https://mm.icann.org/pipermail/tz/2010-August/016360.html). + + +Version 0.3.22 (tzdata v2010j) - 29-May-2010 +-------------------------------------------- + +* Corrected file permissions issue with 0.3.21 release. + + +Version 0.3.21 (tzdata v2010j) - 28-May-2010 +-------------------------------------------- + +* Updated to tzdata version 2010j + (https://mm.icann.org/pipermail/tz/2010-May/016211.html). +* Change invalid timezone check to exclude characters not used in timezone + identifiers and avoid 'character class has duplicated range' warnings with + Ruby 1.9.2. +* Ruby 1.9.2 has deprecated "require 'rational'", but older versions of + Ruby need rational to be required. Require rational only when the Rational + module has not already been loaded. +* Remove circular requires (now a warning in Ruby 1.9.2). Instead of using + requires in each file for dependencies, tzinfo.rb now requires all tzinfo + files. If you were previously requiring files within the tzinfo directory + (e.g. require 'tzinfo/timezone'), then you will now have to + require 'tzinfo' instead. + + +Version 0.3.20 (tzdata v2010i) - 19-Apr-2010 +-------------------------------------------- + +* Updated to tzdata version 2010i + (https://mm.icann.org/pipermail/tz/2010-April/016184.html). + + +Version 0.3.19 (tzdata v2010h) - 5-Apr-2010 +------------------------------------------- + +* Updated to tzdata version 2010h + (https://mm.icann.org/pipermail/tz/2010-April/016161.html). + + +Version 0.3.18 (tzdata v2010g) - 29-Mar-2010 +-------------------------------------------- + +* Updated to tzdata version 2010g + (https://mm.icann.org/pipermail/tz/2010-March/016140.html). + + +Version 0.3.17 (tzdata v2010e) - 8-Mar-2010 +------------------------------------------- + +* Updated to tzdata version 2010e + (https://mm.icann.org/pipermail/tz/2010-March/016088.html). + + +Version 0.3.16 (tzdata v2009u) - 5-Jan-2010 +------------------------------------------- + +* Support the use of '-' to denote '0' as an offset in the tz data files. + Used for the first time in the SAVE field in tzdata v2009u. +* Updated to tzdata version 2009u + (https://mm.icann.org/pipermail/tz/2009-December/016001.html). + + +Version 0.3.15 (tzdata v2009p) - 26-Oct-2009 +-------------------------------------------- + +* Updated to tzdata version 2009p + (https://mm.icann.org/pipermail/tz/2009-October/015889.html). +* Added a description to the gem spec. +* List test files in test_files instead of files in the gem spec. + + +Version 0.3.14 (tzdata v2009l) - 19-Aug-2009 +-------------------------------------------- + +* Updated to tzdata version 2009l + (https://mm.icann.org/pipermail/tz/2009-August/015729.html). +* Include current directory in load path to allow running tests on + Ruby 1.9.2, which doesn't include it by default any more. + + +Version 0.3.13 (tzdata v2009f) - 15-Apr-2009 +-------------------------------------------- + +* Updated to tzdata version 2009f + (https://mm.icann.org/pipermail/tz/2009-April/015544.html). +* Untaint the timezone module filename after validation to allow use + with $SAFE == 1 (e.g. under mod_ruby). Thanks to Dmitry Borodaenko for + the suggestion. Closes #25349. + + +Version 0.3.12 (tzdata v2008i) - 12-Nov-2008 +-------------------------------------------- + +* Updated to tzdata version 2008i + (https://mm.icann.org/pipermail/tz/2008-October/015260.html). + + +Version 0.3.11 (tzdata v2008g) - 7-Oct-2008 +------------------------------------------- + +* Updated to tzdata version 2008g + (https://mm.icann.org/pipermail/tz/2008-October/015139.html). +* Support Ruby 1.9.0-5. Rational.new! has now been removed in Ruby 1.9. + Only use Rational.new! if it is available (it is preferable in Ruby 1.8 + for performance reasons). Thanks to Jeremy Kemper and Pratik Naik for + reporting this. Closes #22312. +* Apply a patch from Pratik Naik to replace assert calls that have been + deprecated in the Ruby svn trunk. Closes #22308. + + +Version 0.3.10 (tzdata v2008f) - 16-Sep-2008 +-------------------------------------------- + +* Updated to tzdata version 2008f + (https://mm.icann.org/pipermail/tz/2008-September/015090.html). + + +Version 0.3.9 (tzdata v2008c) - 27-May-2008 +------------------------------------------- + +* Updated to tzdata version 2008c + (https://mm.icann.org/pipermail/tz/2008-May/014956.html). +* Support loading timezone data in the latest trunk versions of Ruby 1.9. + Rational.new! is now private, so call it using Rational.send :new! instead. + Thanks to Jeremy Kemper and Pratik Naik for spotting this. Closes #19184. +* Prevent warnings from being output when running Ruby with the -v or -w + command line options. Thanks to Paul McMahon for the patch. Closes #19719. + + +Version 0.3.8 (tzdata v2008b) - 24-Mar-2008 +------------------------------------------- + +* Updated to tzdata version 2008b + (https://mm.icann.org/pipermail/tz/2008-March/014910.html). +* Support loading timezone data in Ruby 1.9.0. Use DateTime.new! if it is + available instead of DateTime.new0 when constructing transition times. + DateTime.new! was added in Ruby 1.8.6. DateTime.new0 was removed in + Ruby 1.9.0. Thanks to Joshua Peek for reporting this. Closes #17606. +* Modify some of the equality test cases to cope with the differences + between Ruby 1.8.6 and Ruby 1.9.0. + + +Version 0.3.7 (tzdata v2008a) - 10-Mar-2008 +------------------------------------------- + +* Updated to tzdata version 2008a + (https://mm.icann.org/pipermail/tz/2008-March/014851.html). + + +Version 0.3.6 (tzdata v2007k) - 1-Jan-2008 +------------------------------------------ + +* Updated to tzdata version 2007k + (https://mm.icann.org/pipermail/tz/2007-December/014765.html). +* Removed deprecated RubyGems autorequire option. + + +Version 0.3.5 (tzdata v2007h) - 1-Oct-2007 +------------------------------------------ + +* Updated to tzdata version 2007h + (https://mm.icann.org/pipermail/tz/2007-October/014585.html). + + +Version 0.3.4 (tzdata v2007g) - 21-Aug-2007 +------------------------------------------- + +* Updated to tzdata version 2007g + (https://mm.icann.org/pipermail/tz/2007-August/014499.html). + + +Version 0.3.3 (tzdata v2006p) - 27-Nov-2006 +------------------------------------------- + +* Updated to tzdata version 2006p + (https://mm.icann.org/pipermail/tz/2006-November/013999.html). + + +Version 0.3.2 (tzdata v2006n) - 11-Oct-2006 +------------------------------------------- + +* Updated to tzdata version 2006n + (https://mm.icann.org/pipermail/tz/2006-October/013911.html). Note that this release of + tzdata removes the country Serbia and Montenegro (CS) and replaces it with + separate Serbia (RS) and Montenegro (ME) entries. + + +Version 0.3.1 (tzdata v2006j) - 21-Aug-2006 +------------------------------------------- + +* Remove colon from case statements to avoid warning in Ruby 1.8.5. #5198. +* Use temporary variable to avoid dynamic string warning from rdoc. +* Updated to tzdata version 2006j + (https://mm.icann.org/pipermail/tz/2006-August/013767.html). + + +Version 0.3.0 (tzdata v2006g) - 17-Jul-2006 +------------------------------------------- + +* New timezone data format. Timezone data now occupies less space on disk and + takes less memory once loaded. #4142, #4144. +* Timezone data is defined in modules rather than classes. Timezone instances + returned by Timezone.get are no longer instances of data classes, but are + instead instances of new DataTimezone and LinkedTimezone classes. +* Timezone instances can now be used with Marshal.dump and Marshal.load. #4240. +* Added a Timezone.get_proxy method that returns a TimezoneProxy object for a + given identifier. +* Country index data is now defined in a single module that is independent + of the Country class implementation. +* Country instances can now be used with Marshal.dump and Marshal.load. #4240. +* Country has a new zone_info method that returns CountryTimezone objects + containing additional information (latitude, longitude and a description) + relating to each Timezone. #4140. +* Timezones within a Country are now returned in an order that makes + geographic sense. +* The zdumptest utility now checks local to utc conversions in addition to + utc to local conversions. +* eql? method defined on Country and Timezone that is equivalent to ==. +* The == method of Timezone no longer raises an exception when passed an object + with no identifier method. +* The == method of Country no longer raises an exception when passed an object + with no code method. +* hash method defined on Country that returns the hash of the code. +* hash method defined on Timezone that returns the hash of the identifier. +* Miscellaneous API documentation corrections and improvements. +* Timezone definition and indexes are now excluded from rdoc (the contents were + previously ignored with #:nodoc: anyway). +* Removed no longer needed #:nodoc: directives from timezone data files (which + are now excluded from the rdoc build). +* Installation of the gem now causes rdoc API documentation to be generated. + #4905. +* When optimizing transitions to generate zone definitions, check the + UTC and standard offsets separately rather than just the total offset to UTC. + Fixes an incorrect abbreviation issue with Europe/London, Europe/Dublin and + Pacific/Auckland. +* Eliminated unnecessary .nil? calls to give a minor performance gain. +* Timezone.all and Timezone.all_identifiers now return all the + Timezones/identifiers rather than just those associated with countries. #4146. +* Added all_data_zones, all_data_zone_identifiers, all_linked_zones and + all_linked_zone_identifiers class methods to Timezone. +* Added a strftime method to Timezone that converts a time in UTC to local + time and then returns it formatted. %Z is replaced with the Timezone + abbreviation for the given time (for example, EST or EDT). #4143. +* Fix escaping of quotes in TZDataParser. This affected country names and + descriptions of timezones within countries. + + +Version 0.2.2 (tzdata v2006g) - 17-May-2006 +------------------------------------------- + +* Use class-scoped instance variables to store the Timezone identifier and + singleton instance. Loading a linked zone no longer causes the parent + zone's identifier to be changed. The instance method of a linked zone class + also now returns an instance of the linked zone class rather than the parent + class. #4502. +* The zdumptest utility now compares the TZInfo zone identifier with the zdump + zone identifier. +* The zdumptestall utility now exits if not supplied with enough parameters. +* Updated to tzdata version 2006g + (https://mm.icann.org/pipermail/tz/2006-May/013590.html). + + +Version 0.2.1 (tzdata v2006d) - 17-Apr-2006 +------------------------------------------- + +* Fix a performance issue caused in 0.2.0 with Timezone.local_to_utc. + Conversions performed on TimeOrDateTime instances passed to <=> are now + cached as originally intended. Thanks to Michael Smedberg for spotting this. +* Fix a performance issue with the local_to_utc period search algorithm + originally implemented in 0.1.0. The condition that was supposed to cause + the search to terminate when enough periods had been found was only being + evaluated in a small subset of cases. Thanks to Michael Smedberg and + Jamis Buck for reporting this. +* Added abbreviation as an alias for TimezonePeriod.zone_identifier. +* Updated to tzdata version 2006d + (https://mm.icann.org/pipermail/tz/2006-April/013517.html). +* Ignore any offset in DateTimes passed in (as is already done for Times). + All of the following now refer to the same UTC time (15:40 on 17 April 2006). + Previously, the DateTime in the second line would have been interpreted + as 20:40. + + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0).new_offset(Rational(5, 24))) + tz.utc_to_local(Time.utc(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(Time.local(2006, 4, 17, 15, 40, 0)) + + +Version 0.2.0 (tzdata v2006c) - 3-Apr-2006 +------------------------------------------ + +* Use timestamps rather than DateTime objects in zone files for times between + 1970 and 2037 (the range of Time). +* Don't convert passed in Time objects to DateTime in most cases (provides + a substantial performance improvement). +* Allow integer timestamps (time in seconds since 1970-1-1) to be used as well + as Time and DateTime objects in all public methods that take times as + parameters. +* Tool to compare TZInfo conversions with output from zdump. +* TZDataParser zone generation algorithm rewritten. Now based on the zic code. + TZInfo is now 100% compatible with zic/zdump output. +* Riyadh Solar Time zones now included again (generation time has been reduced + with TZDataParser changes). +* Use binary mode when writing zone and country files to get Unix (\n) new + lines. +* Omit unnecessary quotes in zone identifier symbols. +* Omit the final transition to DST if there is a prior transition in the last + year processed to standard time. +* Updated to tzdata version 2006c + (https://mm.icann.org/pipermail/tz/2006-April/013500.html). + + +Version 0.1.2 (tzdata v2006a) - 5-Feb-2006 +------------------------------------------ + +* Add lib directory to the load path when tzinfo is required. Makes it easier + to use tzinfo gem when unpacked to vendor directory in rails. +* Updated to tzdata version 2006a + (https://mm.icann.org/pipermail/tz/2006-January/013311.html). +* build_tz_classes rake task now handles running svn add and svn delete as new + timezones and countries are added and old ones are removed. +* Return a better error when attempting to use a Timezone instance that was + constructed with Timezone.new(nil). This will occur when using Rails' + composed_of. When the timezone identifier in the database is null, attempting + to use the Timezone will now result in an UnknownTimezone exception rather + than a NameError. + + +Version 0.1.1 (tzdata v2005q) - 18-Dec-2005 +------------------------------------------- + +* Timezones that are defined by a single unbounded period (e.g. UTC) now + work again. +* Updated to tzdata version 2005q. + + +Version 0.1.0 (tzdata v2005n) - 27-Nov-2005 +------------------------------------------- + +* period_for_local and local_to_utc now allow resolution of ambiguous + times (e.g. when switching from daylight savings to standard time). + The behaviour of these methods when faced with an ambiguous local time + has now changed. If you are using these methods you should check + the documentation. Thanks to Cliff Matthews for suggesting this change. +* Added require 'date' to timezone.rb (date isn't loaded by default in all + environments). +* Use rake to build packages and documentation. +* License file is now included in gem distribution. +* Dates in definitions stored as Astronomical Julian Day numbers rather than + as civil dates (improves performance creating DateTime instances). +* Added options to TZDataParser to allow generation of specific zones and + countries. +* Moved TimezonePeriod class to timezone_period.rb. +* New TimezonePeriodList class to store TimezonePeriods for a timezone and + perform searches for periods. +* Timezones now defined using blocks. TimezonePeriods are only instantiated + when they are needed. Thanks to Jamis Buck for the suggestion. +* Add options to TZDataParser to allow exclusion of specific zones and + countries. +* Exclude the Riyadh Solar Time zones. The rules are only for 1987 to 1989 and + take a long time to generate and process. Riyadh Solar Time is no longer + observed. +* The last TimezonePeriod for each Timezone is now written out with an + unbounded rather than arbitrary end time. +* Construct the Rational offset in TimezonePeriod once when the TimezonePeriod + is constructed rather than each time it is needed. +* Timezone and Country now keep a cache of loaded instances to avoid running + require which can be slow on some platforms. +* Updated to tzdata version 2005n. + + +Version 0.0.4 (tzdata v2005m) - 18-Sep-2005 +------------------------------------------- + +* Removed debug output accidentally included in the previous release. +* Fixed a bug in the generation of friendly zone identifiers (was inserting + apostrophes into UTC, GMT, etc). +* Fixed Country <=> operator (was comparing non-existent attribute) +* Fixed Timezone.period_for_local error when period not found. +* Added testcases for Timezone, TimezoneProxy, TimezonePeriod, Country and + some selected timezones. + + +Version 0.0.3 (tzdata v2005m) - 17-Sep-2005 +------------------------------------------- + +* Reduced visibility of some methods added in Timezone#setup and Country#setup. +* Added name method to Timezone (returns the identifier). +* Added friendly_identifier method to Timezone. Returns a more friendly version + of the identifier. +* Added to_s method to Timezone. Returns the friendly identifier. +* Added == and <=> operators to Timezone (compares identifiers). +* Timezone now includes Comparable. +* Added to_s method to Country. +* Added == and <=> operators to Country (compares ISO 3166 country codes). +* Country now includes Comparable. +* New TimezoneProxy class that behaves the same as a Timezone but doesn't + actually load in its definition until it is actually required. +* Modified Timezone and Country methods that return Timezones to return + TimezoneProxy instances instead. This makes these methods much quicker. + + In Ruby on Rails, you can now show a drop-down list of all timezones using the + Rails time_zone_select helper method: + + <%= time_zone_select 'user', 'time_zone', TZInfo::Timezone.all.sort, :model => TZInfo::Timezone %> + + +Version 0.0.2 (tzdata v2005m) - 13-Sep-2005 +------------------------------------------- + +* Country and Timezone data is now loaded into class rather than instance + variables. This makes Timezone links more efficient and saves memory if + creating specific Timezone and Country classes directly. +* TimezonePeriod zone_identifier is now defined as a symbol to save memory + (was previously a string). +* TimezonePeriod zone_identifiers that were previously '' are now :Unknown. +* Timezones and Countries can now be returned using Timezone.new(identifier) + and Country.new(identifier). When passed an identifier, the new method + calls get to return an instance of the specified timezone or country. +* Added new class methods to Timezone to return sets of zones and identifiers. + +Thanks to Scott Barron of Lunchbox Software for the suggestions in his +article about using TZInfo with Rails +(https://web.archive.org/web/20060425190845/http://lunchroom.lunchboxsoftware.com/pages/tzinfo_rails) + + +Version 0.0.1 (tzdata v2005m) - 29-Aug-2005 +------------------------------------------- + +* First release. diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/LICENSE b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/LICENSE new file mode 100644 index 0000000..6e74e29 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2005-2020 Philip Ross + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/README.md b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/README.md new file mode 100644 index 0000000..50766f8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/README.md @@ -0,0 +1,152 @@ +TZInfo - Ruby Timezone Library +============================== + +[![RubyGems](https://img.shields.io/gem/v/tzinfo)](https://rubygems.org/gems/tzinfo) [![Travis CI Build](https://img.shields.io/travis/com/tzinfo/tzinfo/1.2?logo=travis)](https://travis-ci.com/tzinfo/tzinfo) [![AppVeyor Build](https://img.shields.io/appveyor/build/philr/tzinfo/1.2?logo=appveyor)](https://ci.appveyor.com/project/philr/tzinfo/branch/1.2) + +[TZInfo](https://tzinfo.github.io) provides daylight savings aware +transformations between times in different timezones. + + +Data Sources +------------ + +TZInfo requires a source of timezone data. There are two built-in options: + +1. The TZInfo::Data library (the tzinfo-data gem). TZInfo::Data contains a set + of Ruby modules that are generated from the [IANA Time Zone Database](https://www.iana.org/time-zones). +2. A zoneinfo directory. Most Unix-like systems include a zoneinfo directory + containing timezone definitions. These are also generated from the + [IANA Time Zone Database](https://www.iana.org/time-zones). + +By default, TZInfo::Data will be used. If TZInfo::Data is not available (i.e. +if `require 'tzinfo/data'` fails), then TZInfo will search for a zoneinfo +directory instead (using the search path specified by +`TZInfo::ZoneinfoDataSource::DEFAULT_SEARCH_PATH`). + +If no data source can be found, a `TZInfo::DataSourceNotFound` exception will be +raised when TZInfo is used. Further information is available +[in the wiki](https://tzinfo.github.io/datasourcenotfound) to help with +resolving `TZInfo::DataSourceNotFound` errors. + +The default data source selection can be overridden using +`TZInfo::DataSource.set`. + +Custom data sources can also be used. See `TZInfo::DataSource.set` for +further details. + + +Installation +------------ + +The TZInfo gem can be installed by running: + + gem install tzinfo + +To use the Ruby modules as the data source, TZInfo::Data will also need to be +installed: + + gem install tzinfo-data + + +Example Usage +------------- + +The following code will obtain the America/New_York timezone (as an instance +of `TZInfo::Timezone`) and convert a time in UTC to local New York time: + + require 'tzinfo' + + tz = TZInfo::Timezone.get('America/New_York') + local = tz.utc_to_local(Time.utc(2005,8,29,15,35,0)) + +Note that the local Time returned will have a UTC timezone (`local.zone` will +return `"UTC"`). This is because the Time class in older (but still supported by +TZInfo) versions of Ruby can only handle two timezones: UTC and the system local +timezone. + +To convert from a local time to UTC, the `local_to_utc` method can be used as +follows: + + utc = tz.local_to_utc(local) + +Note that the timezone information of the local Time object is ignored (TZInfo +will just read the date and time and treat them as if there were in the `tz` +timezone). The following two lines will return the same result regardless of +the system's local timezone: + + tz.local_to_utc(Time.local(2006,6,26,1,0,0)) + tz.local_to_utc(Time.utc(2006,6,26,1,0,0)) + +To obtain information about the rules in force at a particular UTC or local +time, the `TZInfo::Timezone.period_for_utc` and +`TZInfo::Timezone.period_for_local` methods can be used. Both of these methods +return `TZInfo::TimezonePeriod` objects. The following gets the identifier for +the period (in this case EDT). + + period = tz.period_for_utc(Time.utc(2005,8,29,15,35,0)) + id = period.zone_identifier + +The current local time in a `Timezone` can be obtained with the +`TZInfo::Timezone#now` method: + + now = tz.now + +All methods in TZInfo that operate on a time can be used with either `Time` or +`DateTime` instances or with Integer timestamps (i.e. as returned by +`Time#to_i`). The type of the values returned will match the type passed in. + +A list of all the available timezone identifiers can be obtained using the +`TZInfo::Timezone.all_identifiers` method. `TZInfo::Timezone.all` can be called +to get an `Array` of all the `TZInfo::Timezone` instances. + +Timezones can also be accessed by country (using an ISO 3166-1 alpha-2 country +code). The following code retrieves the `TZInfo::Country` instance representing +the USA (country code 'US') and then gets all the timezone identifiers used in +the USA. + + us = TZInfo::Country.get('US') + timezones = us.zone_identifiers + +The `TZInfo::Country#zone_info` method provides an additional description and +geographic location for each timezone in a country. + +A list of all the available country codes can be obtained using the +`TZInfo::Country.all_codes` method. `TZInfo::Country.all` can be called to get +an `Array` of all the `Country` instances. + +For further detail, please refer to the API documentation for the +`TZInfo::Timezone` and `TZInfo::Country` classes. + + +Thread-Safety +------------- + +The `TZInfo::Country` and `TZInfo::Timezone` classes are thread-safe. It is safe +to use class and instance methods of `TZInfo::Country` and `TZInfo::Timezone` in +concurrently executing threads. Instances of both classes can be shared across +thread boundaries. + + +Documentation +------------- + +API documentation for TZInfo is available on [RubyDoc.info](https://rubydoc.info/gems/tzinfo/frames). + + +License +------- + +TZInfo is released under the MIT license, see LICENSE for details. + + +Source Code +----------- + +Source code for TZInfo is available on [GitHub](https://github.com/tzinfo/tzinfo). + + +Issue Tracker +------------- + +Please post any bugs, issues, feature requests or questions to the +[GitHub issue tracker](https://github.com/tzinfo/tzinfo/issues). diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/Rakefile b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/Rakefile new file mode 100644 index 0000000..3cabb07 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/Rakefile @@ -0,0 +1,107 @@ +require 'rubygems' +require 'rubygems/package_task' +require 'fileutils' +require 'rake/testtask' + +# Ignore errors loading rdoc/task (the rdoc tasks will be excluded if +# rdoc is unavailable). +begin + require 'rdoc/task' +rescue LoadError, RuntimeError +end + +BASE_DIR = File.expand_path(File.dirname(__FILE__)) + +task :default => [:test] + +spec = eval(File.read('tzinfo.gemspec')) + +class TZInfoPackageTask < Gem::PackageTask + alias_method :orig_sh, :sh + private :orig_sh + + def sh(*cmd, &block) + if cmd[0] == '__tar_with_owner__' && cmd[1] =~ /\A-?[zjcvf]+\z/ + opts = cmd[1] + cmd = ['tar', 'c', '--owner', '0', '--group', '0', "#{opts.start_with?('-') ? '' : '-'}#{opts.gsub('c', '')}"] + cmd.drop(2) + elsif cmd.first =~ /\A__tar_with_owner__ -?([zjcvf]+)(.*)\z/ + opts = $1 + args = $2 + cmd[0] = "tar c --owner 0 --group 0 -#{opts.gsub('c', '')}#{args}" + end + + orig_sh(*cmd, &block) + end +end + +def add_signing_key(spec) + # Attempt to find the private key and add options to sign the gem if found. + private_key_path = File.expand_path(File.join(BASE_DIR, '..', 'key', 'gem-private_key.pem')) + + if File.exist?(private_key_path) + spec = spec.clone + spec.signing_key = private_key_path + spec.cert_chain = [File.join(BASE_DIR, 'gem-public_cert.pem')] + else + puts 'WARNING: Private key not found. Not signing gem file.' + end + + spec +end + +package_task = TZInfoPackageTask.new(add_signing_key(spec)) do |pkg| + pkg.need_zip = true + pkg.need_tar_gz = true + pkg.tar_command = '__tar_with_owner__' +end + +# Skip the rdoc task if RDoc::Task is unavailable +if defined?(RDoc) && defined?(RDoc::Task) + RDoc::Task.new do |rdoc| + rdoc.rdoc_dir = 'doc' + rdoc.options.concat spec.rdoc_options + rdoc.rdoc_files.include(spec.extra_rdoc_files) + rdoc.rdoc_files.include('lib') + end +end + +Rake::Task[package_task.package_dir_path].enhance do + recurse_chmod(package_task.package_dir_path) +end + +Rake::Task[:package].enhance do + FileUtils.rm_rf(package_task.package_dir_path) +end + +def recurse_chmod(dir) + File.chmod(0755, dir) + + Dir.entries(dir).each do |entry| + if entry != '.' && entry != '..' + path = File.join(dir, entry) + if File.directory?(path) + recurse_chmod(path) + else + File.chmod(0644, path) + end + end + end +end + +desc 'Run tests using RubyDataSource, then ZoneinfoDataSource' +task :test => [:test_ruby, :test_zoneinfo] do +end + +def setup_tests(test_task, type) + test_task.libs = [File.join(BASE_DIR, 'lib')] + test_task.pattern = File.join(BASE_DIR, 'test', "ts_all_#{type}.rb") + test_task.warning = true +end + +Rake::TestTask.new(:test_ruby) do |t| + setup_tests(t, :ruby) +end + +Rake::TestTask.new(:test_zoneinfo) do |t| + setup_tests(t, :zoneinfo) +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo.rb new file mode 100644 index 0000000..7a11ef7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo.rb @@ -0,0 +1,43 @@ +# Top level module for TZInfo. +module TZInfo +end + +require 'tzinfo/ruby_core_support' +require 'tzinfo/offset_rationals' +require 'tzinfo/time_or_datetime' + +require 'tzinfo/timezone_definition' + +require 'tzinfo/timezone_offset' +require 'tzinfo/timezone_transition' +require 'tzinfo/transition_rule' +require 'tzinfo/annual_rules' +require 'tzinfo/timezone_transition_definition' + +require 'tzinfo/timezone_index_definition' + +require 'tzinfo/timezone_info' +require 'tzinfo/data_timezone_info' +require 'tzinfo/linked_timezone_info' +require 'tzinfo/transition_data_timezone_info' +require 'tzinfo/zoneinfo_timezone_info' + +require 'tzinfo/data_source' +require 'tzinfo/ruby_data_source' +require 'tzinfo/posix_time_zone_parser' +require 'tzinfo/zoneinfo_data_source' + +require 'tzinfo/timezone_period' +require 'tzinfo/timezone' +require 'tzinfo/info_timezone' +require 'tzinfo/data_timezone' +require 'tzinfo/linked_timezone' +require 'tzinfo/timezone_proxy' + +require 'tzinfo/country_index_definition' +require 'tzinfo/country_info' +require 'tzinfo/ruby_country_info' +require 'tzinfo/zoneinfo_country_info' + +require 'tzinfo/country' +require 'tzinfo/country_timezone' diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb new file mode 100644 index 0000000..fec436b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb @@ -0,0 +1,51 @@ +module TZInfo + # A set of rules that define when transitions occur in time zones with + # annually occurring daylight savings time. + # + # @private + class AnnualRules #:nodoc: + # Returned by #transitions. #offset is the TimezoneOffset that applies + # from the UTC TimeOrDateTime #at. #previous_offset is the prior + # TimezoneOffset. + Transition = Struct.new(:offset, :previous_offset, :at) + + # The standard offset that applies when daylight savings time is not in + # force. + attr_reader :std_offset + + # The offset that applies when daylight savings time is in force. + attr_reader :dst_offset + + # The rule that determines when daylight savings time starts. + attr_reader :dst_start_rule + + # The rule that determines when daylight savings time ends. + attr_reader :dst_end_rule + + # Initializes a new {AnnualRules} instance. + def initialize(std_offset, dst_offset, dst_start_rule, dst_end_rule) + @std_offset = std_offset + @dst_offset = dst_offset + @dst_start_rule = dst_start_rule + @dst_end_rule = dst_end_rule + end + + # Returns the transitions between standard and daylight savings time for a + # given year. The results are ordered by time of occurrence (earliest to + # latest). + def transitions(year) + start_dst = apply_rule(@dst_start_rule, @std_offset, @dst_offset, year) + end_dst = apply_rule(@dst_end_rule, @dst_offset, @std_offset, year) + + end_dst.at < start_dst.at ? [end_dst, start_dst] : [start_dst, end_dst] + end + + private + + # Applies a given rule between offsets on a year. + def apply_rule(rule, from_offset, to_offset, year) + at = rule.at(from_offset, year) + Transition.new(to_offset, from_offset, at) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb new file mode 100644 index 0000000..0dccebd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country.rb @@ -0,0 +1,196 @@ +require 'thread_safe' + +module TZInfo + # Raised by Country#get if the code given is not valid. + class InvalidCountryCode < StandardError + end + + # The Country class represents an ISO 3166-1 country. It can be used to + # obtain a list of Timezones for a country. For example: + # + # us = Country.get('US') + # us.zone_identifiers + # us.zones + # us.zone_info + # + # The Country class is thread-safe. It is safe to use class and instance + # methods of Country in concurrently executing threads. Instances of Country + # can be shared across thread boundaries. + # + # Country information available through TZInfo is intended as an aid for + # users, to help them select time zone data appropriate for their practical + # needs. It is not intended to take or endorse any position on legal or + # territorial claims. + class Country + include Comparable + + # Defined countries. + # + # @!visibility private + @@countries = nil + + # Whether the countries index has been loaded yet. + # + # @!visibility private + @@index_loaded = false + + # Gets a Country by its ISO 3166-1 alpha-2 code. Raises an + # InvalidCountryCode exception if it couldn't be found. + def self.get(identifier) + instance = @@countries[identifier] + + unless instance + # Thread-safety: It is possible that multiple equivalent Country + # instances could be created here in concurrently executing threads. + # The consequences of this are that the data may be loaded more than + # once (depending on the data source) and memoized calculations could + # be discarded. The performance benefit of ensuring that only a single + # instance is created is unlikely to be worth the overhead of only + # allowing one Country to be loaded at a time. + info = data_source.load_country_info(identifier) + instance = Country.new(info) + @@countries[identifier] = instance + end + + instance + end + + # If identifier is a CountryInfo object, initializes the Country instance, + # otherwise calls get(identifier). + def self.new(identifier) + if identifier.kind_of?(CountryInfo) + instance = super() + instance.send :setup, identifier + instance + else + get(identifier) + end + end + + # Returns an Array of all the valid country codes. + def self.all_codes + data_source.country_codes + end + + # Returns an Array of all the defined Countries. + def self.all + data_source.country_codes.collect {|code| get(code)} + end + + # The ISO 3166-1 alpha-2 country code. + def code + @info.code + end + + # The name of the country. + def name + @info.name + end + + # Alias for name. + def to_s + name + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@info.code}>" + end + + # Returns a frozen array of all the zone identifiers for the country. These + # are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Returned zone identifiers may refer to cities and regions outside of the + # country. This will occur if the zone covers multiple countries. Any zones + # referring to a city or region in a different country will be listed after + # those relating to this country. + def zone_identifiers + @info.zone_identifiers + end + alias zone_names zone_identifiers + + # An array of all the Timezones for this country. Returns TimezoneProxy + # objects to avoid the overhead of loading Timezone definitions until + # a conversion is actually required. The Timezones are returned in an order + # that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Identifiers of the zones returned may refer to cities and regions outside + # of the country. This will occur if the zone covers multiple countries. Any + # zones referring to a city or region in a different country will be listed + # after those relating to this country. + def zones + zone_identifiers.collect {|id| + Timezone.get_proxy(id) + } + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances (containing extra information about each zone). + # These are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Identifiers and descriptions of the zones returned may refer to cities and + # regions outside of the country. This will occur if the zone covers + # multiple countries. Any zones referring to a city or region in a different + # country will be listed after those relating to this country. + def zone_info + @info.zones + end + + # Compare two Countries based on their code. Returns -1 if c is less + # than self, 0 if c is equal to self and +1 if c is greater than self. + # + # Returns nil if c is not comparable with Country instances. + def <=>(c) + return nil unless c.is_a?(Country) + code <=> c.code + end + + # Returns true if and only if the code of c is equal to the code of this + # Country. + def eql?(c) + self == c + end + + # Returns a hash value for this Country. + def hash + code.hash + end + + # Dumps this Country for marshalling. + def _dump(limit) + code + end + + # Loads a marshalled Country. + def self._load(data) + Country.get(data) + end + + private + # Called by Country.new to initialize a new Country instance. The info + # parameter is a CountryInfo that defines the country. + def setup(info) + @info = info + end + + # Initializes @@countries. + def self.init_countries + @@countries = ThreadSafe::Cache.new + end + init_countries + + # Returns the current DataSource + def self.data_source + DataSource.get + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb new file mode 100644 index 0000000..431790b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_index_definition.rb @@ -0,0 +1,31 @@ +module TZInfo + # The country index file includes CountryIndexDefinition which provides + # a country method used to define each country in the index. + # + # @private + module CountryIndexDefinition #:nodoc: + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval { @countries = {} } + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Defines a country with an ISO 3166 country code, name and block. The + # block will be evaluated to obtain all the timezones for the country. + # Calls Country.country_defined with the definition of each country. + def country(code, name, &block) + @countries[code] = RubyCountryInfo.new(code, name, &block) + end + + # Returns a frozen hash of all the countries that have been defined in + # the index. + def countries + @countries.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb new file mode 100644 index 0000000..e9d71c7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_info.rb @@ -0,0 +1,42 @@ +module TZInfo + # Represents a country and references to its timezones as returned by a + # DataSource. + class CountryInfo + # The ISO 3166 country code. + attr_reader :code + + # The name of the country. + attr_reader :name + + # Constructs a new CountryInfo with an ISO 3166 country code and name + def initialize(code, name) + @code = code + @name = name + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@code>" + end + + # Returns a frozen array of all the zone identifiers for the country. + # The identifiers are ordered by importance according to the DataSource. + def zone_identifiers + raise_not_implemented('zone_identifiers') + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances. + # + # The timezones are ordered by importance according to the DataSource. + def zones + raise_not_implemented('zones') + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb new file mode 100644 index 0000000..d46bec0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/country_timezone.rb @@ -0,0 +1,135 @@ +module TZInfo + # A Timezone within a Country. This contains extra information about the + # Timezone that is specific to the Country (a Timezone could be used by + # multiple countries). + class CountryTimezone + # The zone identifier. + attr_reader :identifier + + # A description of this timezone in relation to the country, e.g. + # "Eastern Time". This is usually nil for countries having only a single + # Timezone. + attr_reader :description + + class << self + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude are specified as + # rationals - a numerator and denominator. For performance reasons, the + # numerators and denominators must be specified in their lowest form. + # + # For use internally within TZInfo. + # + # @!visibility private + alias :new! :new + + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude must be specified + # as instances of Rational. + # + # CountryTimezone instances should normally only be constructed when + # creating new DataSource implementations. + def new(identifier, latitude, longitude, description = nil) + super(identifier, latitude, nil, longitude, nil, description) + end + end + + # Creates a new CountryTimezone with a timezone identifier, latitude, + # longitude and description. The latitude and longitude are specified as + # rationals - a numerator and denominator. For performance reasons, the + # numerators and denominators must be specified in their lowest form. + # + # @!visibility private + def initialize(identifier, latitude_numerator, latitude_denominator, + longitude_numerator, longitude_denominator, description = nil) #:nodoc: + @identifier = identifier + + if latitude_numerator.kind_of?(Rational) + @latitude = latitude_numerator + else + @latitude = nil + @latitude_numerator = latitude_numerator + @latitude_denominator = latitude_denominator + end + + if longitude_numerator.kind_of?(Rational) + @longitude = longitude_numerator + else + @longitude = nil + @longitude_numerator = longitude_numerator + @longitude_denominator = longitude_denominator + end + + @description = description + end + + # The Timezone (actually a TimezoneProxy for performance reasons). + def timezone + Timezone.get_proxy(@identifier) + end + + # if description is not nil, this method returns description; otherwise it + # returns timezone.friendly_identifier(true). + def description_or_friendly_identifier + description || timezone.friendly_identifier(true) + end + + # The latitude of this timezone in degrees as a Rational. + def latitude + # Thread-safety: It is possible that the value of @latitude may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @latitude is only + # calculated once. + unless @latitude + result = RubyCoreSupport.rational_new!(@latitude_numerator, @latitude_denominator) + return result if frozen? + @latitude = result + end + + @latitude + end + + # The longitude of this timezone in degrees as a Rational. + def longitude + # Thread-safety: It is possible that the value of @longitude may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @longitude is only + # calculated once. + unless @longitude + result = RubyCoreSupport.rational_new!(@longitude_numerator, @longitude_denominator) + return result if frozen? + @longitude = result + end + + @longitude + end + + # Returns true if and only if the given CountryTimezone is equal to the + # current CountryTimezone (has the same identifer, latitude, longitude + # and description). + def ==(ct) + ct.kind_of?(CountryTimezone) && + identifier == ct.identifier && latitude == ct.latitude && + longitude == ct.longitude && description == ct.description + end + + # Returns true if and only if the given CountryTimezone is equal to the + # current CountryTimezone (has the same identifer, latitude, longitude + # and description). + def eql?(ct) + self == ct + end + + # Returns a hash of this CountryTimezone. + def hash + @identifier.hash ^ + (@latitude ? @latitude.numerator.hash ^ @latitude.denominator.hash : @latitude_numerator.hash ^ @latitude_denominator.hash) ^ + (@longitude ? @longitude.numerator.hash ^ @longitude.denominator.hash : @longitude_numerator.hash ^ @longitude_denominator.hash) ^ + @description.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier>" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb new file mode 100644 index 0000000..c5deeac --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_source.rb @@ -0,0 +1,190 @@ +require 'thread' + +module TZInfo + # InvalidDataSource is raised if the DataSource is used doesn't implement one + # of the required methods. + class InvalidDataSource < StandardError + end + + # DataSourceNotFound is raised if no data source could be found (i.e. + # if 'tzinfo/data' cannot be found on the load path and no valid zoneinfo + # directory can be found on the system). + class DataSourceNotFound < StandardError + end + + # The base class for data sources of timezone and country data. + # + # Use DataSource.set to change the data source being used. + class DataSource + # The currently selected data source. + @@instance = nil + + # Mutex used to ensure the default data source is only created once. + @@default_mutex = Mutex.new + + # Returns the currently selected DataSource instance. + def self.get + # If a DataSource hasn't been manually set when the first request is + # made to obtain a DataSource, then a Default data source is created. + + # This is done at the first request rather than when TZInfo is loaded to + # avoid unnecessary (or in some cases potentially harmful) attempts to + # find a suitable DataSource. + + # A Mutex is used to ensure that only a single default instance is + # created (having two different DataSources in use simultaneously could + # cause unexpected results). + + unless @@instance + @@default_mutex.synchronize do + set(create_default_data_source) unless @@instance + end + end + + @@instance + end + + # Sets the currently selected data source for Timezone and Country data. + # + # This should usually be set to one of the two standard data source types: + # + # * +:ruby+ - read data from the Ruby modules included in the TZInfo::Data + # library (tzinfo-data gem). + # * +:zoneinfo+ - read data from the zoneinfo files included with most + # Unix-like operating sytems (e.g. in /usr/share/zoneinfo). + # + # To set TZInfo to use one of the standard data source types, call + # \TZInfo::DataSource.set in one of the following ways: + # + # TZInfo::DataSource.set(:ruby) + # TZInfo::DataSource.set(:zoneinfo) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file) + # + # \DataSource.set(:zoneinfo) will automatically search for the zoneinfo + # directory by checking the paths specified in + # ZoneinfoDataSource.search_paths. ZoneinfoDirectoryNotFound will be raised + # if no valid zoneinfo directory could be found. + # + # \DataSource.set(:zoneinfo, zoneinfo_dir) uses the specified zoneinfo + # directory as the data source. If the directory is not a valid zoneinfo + # directory, an InvalidZoneinfoDirectory exception will be raised. + # + # \DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file) uses the + # specified zoneinfo directory as the data source, but loads the iso3166.tab + # file from an alternate path. If the directory is not a valid zoneinfo + # directory, an InvalidZoneinfoDirectory exception will be raised. + # + # Custom data sources can be created by subclassing TZInfo::DataSource and + # implementing the following methods: + # + # * \load_timezone_info + # * \timezone_identifiers + # * \data_timezone_identifiers + # * \linked_timezone_identifiers + # * \load_country_info + # * \country_codes + # + # To have TZInfo use the custom data source, call \DataSource.set + # as follows: + # + # TZInfo::DataSource.set(CustomDataSource.new) + # + # To avoid inconsistent data, \DataSource.set should be called before + # accessing any Timezone or Country data. + # + # If \DataSource.set is not called, TZInfo will by default use TZInfo::Data + # as the data source. If TZInfo::Data is not available (i.e. if require + # 'tzinfo/data' fails), then TZInfo will search for a zoneinfo directory + # instead (using the search path specified by + # TZInfo::ZoneinfoDataSource::DEFAULT_SEARCH_PATH). + def self.set(data_source_or_type, *args) + if data_source_or_type.kind_of?(DataSource) + @@instance = data_source_or_type + elsif data_source_or_type == :ruby + @@instance = RubyDataSource.new + elsif data_source_or_type == :zoneinfo + @@instance = ZoneinfoDataSource.new(*args) + else + raise ArgumentError, 'data_source_or_type must be a DataSource instance or a data source type (:ruby)' + end + end + + # Returns a TimezoneInfo instance for a given identifier. The TimezoneInfo + # instance should derive from either DataTimzoneInfo for timezones that + # define their own data or LinkedTimezoneInfo for links or aliases to + # other timezones. + # + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + raise_invalid_data_source('load_timezone_info') + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + raise_invalid_data_source('timezone_identifiers') + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + def data_timezone_identifiers + raise_invalid_data_source('data_timezone_identifiers') + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + def linked_timezone_identifiers + raise_invalid_data_source('linked_timezone_identifiers') + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + raise_invalid_data_source('load_country_info') + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + raise_invalid_data_source('country_codes') + end + + # Returns the name of this DataSource. + def to_s + "Default DataSource" + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}>" + end + + private + + # Creates a DataSource instance for use as the default. Used if + # no preference has been specified manually. + def self.create_default_data_source + has_tzinfo_data = false + + begin + require 'tzinfo/data' + has_tzinfo_data = true + rescue LoadError + end + + return RubyDataSource.new if has_tzinfo_data + + begin + return ZoneinfoDataSource.new + rescue ZoneinfoDirectoryNotFound + raise DataSourceNotFound, "No source of timezone data could be found.\nPlease refer to https://tzinfo.github.io/datasourcenotfound for help resolving this error." + end + end + + def raise_invalid_data_source(method_name) + raise InvalidDataSource, "#{method_name} not defined" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb new file mode 100644 index 0000000..94958c9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone.rb @@ -0,0 +1,58 @@ +module TZInfo + + # A Timezone based on a DataTimezoneInfo. + # + # @private + class DataTimezone < InfoTimezone #:nodoc: + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + # + # If no TimezonePeriod could be found, PeriodNotFound is raised. + def period_for_utc(utc) + info.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Raises PeriodNotFound if no periods are found for the given time. + def periods_for_local(local) + info.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + info.transitions_up_to(utc_to, utc_from) + end + + # Returns the canonical zone for this Timezone. + # + # For a DataTimezone, this is always self. + def canonical_zone + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb new file mode 100644 index 0000000..7d0fa5f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/data_timezone_info.rb @@ -0,0 +1,55 @@ +module TZInfo + # Represents a defined timezone containing transition data. + class DataTimezoneInfo < TimezoneInfo + + # Returns the TimezonePeriod for the given UTC time. + def period_for_utc(utc) + raise_not_implemented('period_for_utc') + end + + # Returns the set of TimezonePeriods for the given local time as an array. + # Results returned are ordered by increasing UTC start date. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + raise_not_implemented('periods_for_local') + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + raise_not_implemented('transitions_up_to') + end + + # Constructs a Timezone instance for the timezone represented by this + # DataTimezoneInfo. + def create_timezone + DataTimezone.new(self) + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb new file mode 100644 index 0000000..4eb014b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/info_timezone.rb @@ -0,0 +1,30 @@ +module TZInfo + + # A Timezone based on a TimezoneInfo. + # + # @private + class InfoTimezone < Timezone #:nodoc: + + # Constructs a new InfoTimezone with a TimezoneInfo instance. + def self.new(info) + tz = super() + tz.send(:setup, info) + tz + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + @info.identifier + end + + protected + # The TimezoneInfo for this Timezone. + def info + @info + end + + def setup(info) + @info = info + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb new file mode 100644 index 0000000..c4f4a0d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone.rb @@ -0,0 +1,63 @@ +module TZInfo + + # A Timezone based on a LinkedTimezoneInfo. + # + # @private + class LinkedTimezone < InfoTimezone #:nodoc: + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + # + # If no TimezonePeriod could be found, PeriodNotFound is raised. + def period_for_utc(utc) + @linked_timezone.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Raises PeriodNotFound if no periods are found for the given time. + def periods_for_local(local) + @linked_timezone.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + @linked_timezone.transitions_up_to(utc_to, utc_from) + end + + # Returns the canonical zone for this Timezone. + # + # For a LinkedTimezone, this is the canonical zone of the link target. + def canonical_zone + @linked_timezone.canonical_zone + end + + protected + def setup(info) + super(info) + @linked_timezone = Timezone.get(info.link_to_identifier) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb new file mode 100644 index 0000000..f7961d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/linked_timezone_info.rb @@ -0,0 +1,26 @@ +module TZInfo + # Represents a timezone that is defined as a link or alias to another zone. + class LinkedTimezoneInfo < TimezoneInfo + + # The zone that provides the data (that this zone is an alias for). + attr_reader :link_to_identifier + + # Constructs a new LinkedTimezoneInfo with an identifier and the identifier + # of the zone linked to. + def initialize(identifier, link_to_identifier) + super(identifier) + @link_to_identifier = link_to_identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier,#@link_to_identifier>" + end + + # Constructs a Timezone instance for the timezone represented by this + # DataTimezoneInfo. + def create_timezone + LinkedTimezone.new(self) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb new file mode 100644 index 0000000..2a50a08 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/offset_rationals.rb @@ -0,0 +1,77 @@ +require 'rational' unless defined?(Rational) + +module TZInfo + + # Provides a method for getting Rationals for a timezone offset in seconds. + # Pre-reduced rationals are returned for all the half-hour intervals between + # -14 and +14 hours to avoid having to call gcd at runtime. + # + # @private + module OffsetRationals #:nodoc: + @@rational_cache = { + -50400 => RubyCoreSupport.rational_new!(-7,12), + -48600 => RubyCoreSupport.rational_new!(-9,16), + -46800 => RubyCoreSupport.rational_new!(-13,24), + -45000 => RubyCoreSupport.rational_new!(-25,48), + -43200 => RubyCoreSupport.rational_new!(-1,2), + -41400 => RubyCoreSupport.rational_new!(-23,48), + -39600 => RubyCoreSupport.rational_new!(-11,24), + -37800 => RubyCoreSupport.rational_new!(-7,16), + -36000 => RubyCoreSupport.rational_new!(-5,12), + -34200 => RubyCoreSupport.rational_new!(-19,48), + -32400 => RubyCoreSupport.rational_new!(-3,8), + -30600 => RubyCoreSupport.rational_new!(-17,48), + -28800 => RubyCoreSupport.rational_new!(-1,3), + -27000 => RubyCoreSupport.rational_new!(-5,16), + -25200 => RubyCoreSupport.rational_new!(-7,24), + -23400 => RubyCoreSupport.rational_new!(-13,48), + -21600 => RubyCoreSupport.rational_new!(-1,4), + -19800 => RubyCoreSupport.rational_new!(-11,48), + -18000 => RubyCoreSupport.rational_new!(-5,24), + -16200 => RubyCoreSupport.rational_new!(-3,16), + -14400 => RubyCoreSupport.rational_new!(-1,6), + -12600 => RubyCoreSupport.rational_new!(-7,48), + -10800 => RubyCoreSupport.rational_new!(-1,8), + -9000 => RubyCoreSupport.rational_new!(-5,48), + -7200 => RubyCoreSupport.rational_new!(-1,12), + -5400 => RubyCoreSupport.rational_new!(-1,16), + -3600 => RubyCoreSupport.rational_new!(-1,24), + -1800 => RubyCoreSupport.rational_new!(-1,48), + 0 => RubyCoreSupport.rational_new!(0,1), + 1800 => RubyCoreSupport.rational_new!(1,48), + 3600 => RubyCoreSupport.rational_new!(1,24), + 5400 => RubyCoreSupport.rational_new!(1,16), + 7200 => RubyCoreSupport.rational_new!(1,12), + 9000 => RubyCoreSupport.rational_new!(5,48), + 10800 => RubyCoreSupport.rational_new!(1,8), + 12600 => RubyCoreSupport.rational_new!(7,48), + 14400 => RubyCoreSupport.rational_new!(1,6), + 16200 => RubyCoreSupport.rational_new!(3,16), + 18000 => RubyCoreSupport.rational_new!(5,24), + 19800 => RubyCoreSupport.rational_new!(11,48), + 21600 => RubyCoreSupport.rational_new!(1,4), + 23400 => RubyCoreSupport.rational_new!(13,48), + 25200 => RubyCoreSupport.rational_new!(7,24), + 27000 => RubyCoreSupport.rational_new!(5,16), + 28800 => RubyCoreSupport.rational_new!(1,3), + 30600 => RubyCoreSupport.rational_new!(17,48), + 32400 => RubyCoreSupport.rational_new!(3,8), + 34200 => RubyCoreSupport.rational_new!(19,48), + 36000 => RubyCoreSupport.rational_new!(5,12), + 37800 => RubyCoreSupport.rational_new!(7,16), + 39600 => RubyCoreSupport.rational_new!(11,24), + 41400 => RubyCoreSupport.rational_new!(23,48), + 43200 => RubyCoreSupport.rational_new!(1,2), + 45000 => RubyCoreSupport.rational_new!(25,48), + 46800 => RubyCoreSupport.rational_new!(13,24), + 48600 => RubyCoreSupport.rational_new!(9,16), + 50400 => RubyCoreSupport.rational_new!(7,12)}.freeze + + # Returns a Rational expressing the fraction of a day that offset in + # seconds represents (i.e. equivalent to Rational(offset, 86400)). + def rational_for_offset(offset) + @@rational_cache[offset] || Rational(offset, 86400) + end + module_function :rational_for_offset + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb new file mode 100644 index 0000000..af2d7e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb @@ -0,0 +1,136 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'strscan' + +module TZInfo + # An {InvalidPosixTimeZone} exception is raised if an invalid POSIX-style + # time zone string is encountered. + # + # @private + class InvalidPosixTimeZone < StandardError #:nodoc: + end + + # A parser for POSIX-style TZ strings used in zoneinfo files and specified + # by tzfile.5 and tzset.3. + # + # @private + class PosixTimeZoneParser #:nodoc: + # Parses a POSIX-style TZ string, returning either a TimezoneOffset or + # an AnnualRules instance. + def parse(tz_string) + raise InvalidPosixTimeZone unless tz_string.kind_of?(String) + return nil if tz_string.empty? + + s = StringScanner.new(tz_string) + check_scan(s, /([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + std_abbrev = s[1] || s[2] + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + std_offset = get_offset_from_hms(s[1], s[2], s[3]) + + if s.scan(/([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + dst_abbrev = s[1] || s[2] + + if s.scan(/([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + dst_offset = get_offset_from_hms(s[1], s[2], s[3]) + else + # POSIX is negative for ahead of UTC. + dst_offset = std_offset - 3600 + end + + dst_difference = std_offset - dst_offset + + start_rule = parse_rule(s, 'start') + end_rule = parse_rule(s, 'end') + + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." if s.rest? + + if start_rule.is_always_first_day_of_year? && start_rule.transition_at == 0 && + end_rule.is_always_last_day_of_year? && end_rule.transition_at == 86400 + dst_difference + # Constant daylight savings time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym) + else + AnnualRules.new( + TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym), + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym), + start_rule, + end_rule) + end + elsif !s.rest? + # Constant standard time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym) + else + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." + end + end + + private + + # Parses the rule from the TZ string, returning a TransitionRule. + def parse_rule(s, type) + check_scan(s, /,(?: (?: J(\d+) ) | (\d+) | (?: M(\d+)\.(\d)\.(\d) ) )/x) + julian_day_of_year = s[1] + absolute_day_of_year = s[2] + month = s[3] + week = s[4] + day_of_week = s[5] + + if s.scan(/\//) + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + transition_at = get_seconds_after_midnight_from_hms(s[1], s[2], s[3]) + else + transition_at = 7200 + end + + begin + if julian_day_of_year + JulianDayOfYearTransitionRule.new(julian_day_of_year.to_i, transition_at) + elsif absolute_day_of_year + AbsoluteDayOfYearTransitionRule.new(absolute_day_of_year.to_i, transition_at) + elsif week == '5' + LastDayOfMonthTransitionRule.new(month.to_i, day_of_week.to_i, transition_at) + else + DayOfMonthTransitionRule.new(month.to_i, week.to_i, day_of_week.to_i, transition_at) + end + rescue ArgumentError => e + raise InvalidPosixTimeZone, "Invalid #{type} rule in POSIX-style time zone string: #{e}" + end + end + + # Returns an offset in seconds from hh:mm:ss values. The value can be + # negative. -02:33:12 would represent 2 hours, 33 minutes and 12 seconds + # ahead of UTC. + def get_offset_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in offset for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in offset for POSIX-style time zone string." if s > 59 + magnitude = (h.abs * 60 + m) * 60 + s + h < 0 ? -magnitude : magnitude + end + + # Returns the seconds from midnight from hh:mm:ss values. Hours can exceed + # 24 for a time on the following day. Hours can be negative to subtract + # hours from midnight on the given day. -02:33:12 represents 22:33:12 on + # the prior day. + def get_seconds_after_midnight_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in time for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in time for POSIX-style time zone string." if s > 59 + (h * 3600) + m * 60 + s + end + + # Scans for a pattern and raises an exception if the pattern does not + # match the input. + def check_scan(s, pattern) + result = s.scan(pattern) + raise InvalidPosixTimeZone, "Expected '#{s.rest}' to match #{pattern} in POSIX-style time zone string." unless result + result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb new file mode 100644 index 0000000..7ba776b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_core_support.rb @@ -0,0 +1,169 @@ +require 'date' +require 'rational' unless defined?(Rational) + +module TZInfo + + # Methods to support different versions of Ruby. + # + # @private + module RubyCoreSupport #:nodoc: + + # Use Rational.new! for performance reasons in Ruby 1.8. + # This has been removed from 1.9, but Rational performs better. + if Rational.respond_to? :new! + def self.rational_new!(numerator, denominator = 1) + Rational.new!(numerator, denominator) + end + else + def self.rational_new!(numerator, denominator = 1) + Rational(numerator, denominator) + end + end + + # Ruby 1.8.6 introduced new! and deprecated new0. + # Ruby 1.9.0 removed new0. + # Ruby trunk revision 31668 removed the new! method. + # Still support new0 for better performance on older versions of Ruby (new0 indicates + # that the rational has already been reduced to its lowest terms). + # Fallback to jd with conversion from ajd if new! and new0 are unavailable. + if DateTime.respond_to? :new! + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + DateTime.new!(ajd, of, sg) + end + elsif DateTime.respond_to? :new0 + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + DateTime.new0(ajd, of, sg) + end + else + HALF_DAYS_IN_DAY = rational_new!(1, 2) + + def self.datetime_new!(ajd = 0, of = 0, sg = Date::ITALY) + # Convert from an Astronomical Julian Day number to a civil Julian Day number. + jd = ajd + of + HALF_DAYS_IN_DAY + + # Ruby trunk revision 31862 changed the behaviour of DateTime.jd so that it will no + # longer accept a fractional civil Julian Day number if further arguments are specified. + # Calculate the hours, minutes and seconds to pass to jd. + + jd_i = jd.to_i + jd_i -= 1 if jd < 0 + hours = (jd - jd_i) * 24 + hours_i = hours.to_i + minutes = (hours - hours_i) * 60 + minutes_i = minutes.to_i + seconds = (minutes - minutes_i) * 60 + + DateTime.jd(jd_i, hours_i, minutes_i, seconds, of, sg) + end + end + + # DateTime in Ruby 1.8.6 doesn't consider times within the 60th second to be + # valid. When attempting to specify such a DateTime, subtract the fractional + # part and then add it back later + if Date.respond_to?(:valid_time?) && !Date.valid_time?(0, 0, rational_new!(59001, 1000)) # 0:0:59.001 + def self.datetime_new(y=-4712, m=1, d=1, h=0, min=0, s=0, of=0, sg=Date::ITALY) + if !s.kind_of?(Integer) && s > 59 + dt = DateTime.new(y, m, d, h, min, 59, of, sg) + dt + (s - 59) / 86400 + else + DateTime.new(y, m, d, h, min, s, of, sg) + end + end + else + def self.datetime_new(y=-4712, m=1, d=1, h=0, min=0, s=0, of=0, sg=Date::ITALY) + DateTime.new(y, m, d, h, min, s, of, sg) + end + end + + # Returns true if Time on the runtime platform supports Times defined + # by negative 32-bit timestamps, otherwise false. + begin + Time.at(-1) + Time.at(-2147483648) + + def self.time_supports_negative + true + end + rescue ArgumentError + def self.time_supports_negative + false + end + end + + # Returns true if Time on the runtime platform supports Times defined by + # 64-bit timestamps, otherwise false. + begin + Time.at(-2147483649) + Time.at(2147483648) + + def self.time_supports_64bit + true + end + rescue RangeError + def self.time_supports_64bit + false + end + end + + # Return the result of Time#nsec if it exists, otherwise return the + # result of Time#usec * 1000. + if Time.method_defined?(:nsec) + def self.time_nsec(time) + time.nsec + end + else + def self.time_nsec(time) + time.usec * 1000 + end + end + + # Call String#force_encoding if this version of Ruby has encoding support + # otherwise treat as a no-op. + if String.method_defined?(:force_encoding) + def self.force_encoding(str, encoding) + str.force_encoding(encoding) + end + else + def self.force_encoding(str, encoding) + str + end + end + + + # Wrapper for File.open that supports passing hash options for specifying + # encodings on Ruby 1.9+. The options are ignored on earlier versions of + # Ruby. + if RUBY_VERSION =~ /\A1\.[0-8]\./ + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, &block) + end + elsif RUBY_VERSION =~ /\A1\.9\./ + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, opts, &block) + end + else + # Evaluate method as a string because **opts isn't valid syntax prior to + # Ruby 2.0. + eval(<<-EOF + def self.open_file(file_name, mode, opts, &block) + File.open(file_name, mode, **opts, &block) + end + EOF + ) + end + + + # Object#untaint is a deprecated no-op in Ruby >= 2.7 and will be removed in + # 3.2. Add a refinement to either silence the warning, or supply the method + # if needed. + if !Object.new.respond_to?(:untaint) || RUBY_VERSION =~ /\A(\d+)\.(\d+)(?:\.|\z)/ && ($1 == '2' && $2.to_i >= 7 || $1.to_i >= 3) + module UntaintExt + refine Object do + def untaint + self + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb new file mode 100644 index 0000000..e51915d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_country_info.rb @@ -0,0 +1,74 @@ +module TZInfo + # Represents information about a country returned by RubyDataSource. + # + # @private + class RubyCountryInfo < CountryInfo #:nodoc: + # Constructs a new CountryInfo with an ISO 3166 country code, name and + # block. The block will be evaluated to obtain the timezones for the + # country when the zones are first needed. + def initialize(code, name, &block) + super(code, name) + @block = block + @zones = nil + @zone_identifiers = nil + end + + # Returns a frozen array of all the zone identifiers for the country. These + # are in the order they were added using the timezone method. + def zone_identifiers + # Thread-safety: It is possible that the value of @zone_identifiers may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zone_identifiers is only + # calculated once. + + unless @zone_identifiers + result = zones.collect {|zone| zone.identifier}.freeze + return result if frozen? + @zone_identifiers = result + end + + @zone_identifiers + end + + # Returns a frozen array of all the timezones for the for the country as + # CountryTimezone instances. These are in the order they were added using + # the timezone method. + def zones + # Thread-safety: It is possible that the value of @zones may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zones is only + # calculated once. + + unless @zones + zones = Zones.new + @block.call(zones) if @block + result = zones.list.freeze + return result if frozen? + @block = nil + @zones = result + end + + @zones + end + + # An instance of the Zones class is passed to the block used to define + # timezones. + # + # @private + class Zones #:nodoc: + attr_reader :list + + def initialize + @list = [] + end + + # Called by the index data to define a timezone for the country. + def timezone(identifier, latitude_numerator, latitude_denominator, + longitude_numerator, longitude_denominator, description = nil) + @list << CountryTimezone.new!(identifier, latitude_numerator, + latitude_denominator, longitude_numerator, longitude_denominator, + description) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb new file mode 100644 index 0000000..b5a6752 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/ruby_data_source.rb @@ -0,0 +1,140 @@ +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + + # A DataSource that loads data from the set of Ruby modules included in the + # TZInfo::Data library (tzinfo-data gem). + # + # To have TZInfo use this DataSource, call TZInfo::DataSource.set as follows: + # + # TZInfo::DataSource.set(:ruby) + class RubyDataSource < DataSource + # Whether the timezone index has been loaded yet. + @@timezone_index_loaded = false + + # Whether the country index has been loaded yet. + @@country_index_loaded = false + + # Initializes a new RubyDataSource instance. + def initialize + tzinfo_data = File.join('tzinfo', 'data') + begin + require(tzinfo_data) + + data_file = File.join('', 'tzinfo', 'data.rb') + path = $".reverse_each.detect {|p| p.end_with?(data_file) } + if path + @base_path = File.join(File.dirname(path), 'data').untaint + else + @base_path = tzinfo_data + end + rescue LoadError + @base_path = tzinfo_data + end + end + + # Returns a TimezoneInfo instance for a given identifier. + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + raise InvalidTimezoneIdentifier, 'Invalid identifier' if identifier !~ /^[A-Za-z0-9\+\-_]+(\/[A-Za-z0-9\+\-_]+)*$/ + + identifier = identifier.gsub(/-/, '__m__').gsub(/\+/, '__p__') + + # Untaint identifier after it has been reassigned to a new string. We + # don't want to modify the original identifier. identifier may also be + # frozen and therefore cannot be untainted. + identifier.untaint + + identifier = identifier.split('/') + begin + require_definition(identifier) + + m = Data::Definitions + identifier.each {|part| + m = m.const_get(part) + } + + m.get + rescue LoadError, NameError => e + raise InvalidTimezoneIdentifier, e.message + end + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.timezones + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + def data_timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.data_timezones + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + def linked_timezone_identifiers + load_timezone_index + Data::Indexes::Timezones.linked_timezones + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + load_country_index + info = Data::Indexes::Countries.countries[code] + raise InvalidCountryCode, 'Invalid country code' unless info + info + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + load_country_index + Data::Indexes::Countries.countries.keys.freeze + end + + # Returns the name of this DataSource. + def to_s + "Ruby DataSource" + end + + private + + # Requires a zone definition by its identifier (split on /). + def require_definition(identifier) + require_data(*(['definitions'] + identifier)) + end + + # Requires an index by its name. + def require_index(name) + require_data(*['indexes', name]) + end + + # Requires a file from tzinfo/data. + def require_data(*file) + require(File.join(@base_path, *file)) + end + + # Loads in the index of timezones if it hasn't already been loaded. + def load_timezone_index + unless @@timezone_index_loaded + require_index('timezones') + @@timezone_index_loaded = true + end + end + + # Loads in the index of countries if it hasn't already been loaded. + def load_country_index + unless @@country_index_loaded + require_index('countries') + @@country_index_loaded = true + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb new file mode 100644 index 0000000..56c3314 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/time_or_datetime.rb @@ -0,0 +1,351 @@ +require 'date' +require 'rational' unless defined?(Rational) +require 'time' + +module TZInfo + # Used by TZInfo internally to represent either a Time, DateTime or + # an Integer timestamp (seconds since 1970-01-01 00:00:00). + class TimeOrDateTime + include Comparable + + # Constructs a new TimeOrDateTime. timeOrDateTime can be a Time, DateTime + # or Integer. If using a Time or DateTime, any time zone information + # is ignored. + # + # Integer timestamps must be within the range supported by Time on the + # platform being used. + def initialize(timeOrDateTime) + @time = nil + @datetime = nil + @timestamp = nil + + if timeOrDateTime.is_a?(Time) + @time = timeOrDateTime + + # Avoid using the slower Rational class unless necessary. + nsec = RubyCoreSupport.time_nsec(@time) + usec = nsec % 1000 == 0 ? nsec / 1000 : Rational(nsec, 1000) + + @time = Time.utc(@time.year, @time.mon, @time.mday, @time.hour, @time.min, @time.sec, usec) unless @time.utc? + @orig = @time + elsif timeOrDateTime.is_a?(DateTime) + @datetime = timeOrDateTime + @datetime = @datetime.new_offset(0) unless @datetime.offset == 0 + @orig = @datetime + else + @timestamp = timeOrDateTime.to_i + + if !RubyCoreSupport.time_supports_64bit && (@timestamp > 2147483647 || @timestamp < -2147483648 || (@timestamp < 0 && !RubyCoreSupport.time_supports_negative)) + raise RangeError, 'Timestamp is outside the supported range of Time on this platform' + end + + @orig = @timestamp + end + end + + # Returns the time as a Time. + # + # When converting from a DateTime, the result is truncated to microsecond + # precision. + def to_time + # Thread-safety: It is possible that the value of @time may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @time is only + # calculated once. + + unless @time + result = if @timestamp + Time.at(@timestamp).utc + else + Time.utc(year, mon, mday, hour, min, sec, usec) + end + + return result if frozen? + @time = result + end + + @time + end + + # Returns the time as a DateTime. + # + # When converting from a Time, the result is truncated to microsecond + # precision. + def to_datetime + # Thread-safety: It is possible that the value of @datetime may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @datetime is only + # calculated once. + + unless @datetime + # Avoid using Rational unless necessary. + u = usec + s = u == 0 ? sec : Rational(sec * 1000000 + u, 1000000) + result = RubyCoreSupport.datetime_new(year, mon, mday, hour, min, s) + return result if frozen? + @datetime = result + end + + @datetime + end + + # Returns the time as an integer timestamp. + def to_i + # Thread-safety: It is possible that the value of @timestamp may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @timestamp is only + # calculated once. + + unless @timestamp + result = to_time.to_i + return result if frozen? + @timestamp = result + end + + @timestamp + end + + # Returns the time as the original time passed to new. + def to_orig + @orig + end + + # Returns a string representation of the TimeOrDateTime. + def to_s + if @orig.is_a?(Time) + "Time: #{@orig.to_s}" + elsif @orig.is_a?(DateTime) + "DateTime: #{@orig.to_s}" + else + "Timestamp: #{@orig.to_s}" + end + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@orig.inspect}>" + end + + # Returns the year. + def year + if @time + @time.year + elsif @datetime + @datetime.year + else + to_time.year + end + end + + # Returns the month of the year (1..12). + def mon + if @time + @time.mon + elsif @datetime + @datetime.mon + else + to_time.mon + end + end + alias :month :mon + + # Returns the day of the month (1..n). + def mday + if @time + @time.mday + elsif @datetime + @datetime.mday + else + to_time.mday + end + end + alias :day :mday + + # Returns the day of the week (0..6 for Sunday to Saturday). + def wday + if @time + @time.wday + elsif @datetime + @datetime.wday + else + to_time.wday + end + end + + # Returns the hour of the day (0..23). + def hour + if @time + @time.hour + elsif @datetime + @datetime.hour + else + to_time.hour + end + end + + # Returns the minute of the hour (0..59). + def min + if @time + @time.min + elsif @datetime + @datetime.min + else + to_time.min + end + end + + # Returns the second of the minute (0..60). (60 for a leap second). + def sec + if @time + @time.sec + elsif @datetime + @datetime.sec + else + to_time.sec + end + end + + # Returns the number of microseconds for the time. + def usec + if @time + @time.usec + elsif @datetime + # Ruby 1.8 has sec_fraction (of which the documentation says + # 'I do NOT recommend you to use this method'). sec_fraction no longer + # exists in Ruby 1.9. + + # Calculate the sec_fraction from the day_fraction. + ((@datetime.day_fraction - OffsetRationals.rational_for_offset(@datetime.hour * 3600 + @datetime.min * 60 + @datetime.sec)) * 86400000000).to_i + else + 0 + end + end + + # Compares this TimeOrDateTime with another Time, DateTime, timestamp + # (Integer) or TimeOrDateTime. Returns -1, 0 or +1 depending + # whether the receiver is less than, equal to, or greater than + # timeOrDateTime. + # + # Returns nil if the passed in timeOrDateTime is not comparable with + # TimeOrDateTime instances. + # + # Comparisons involving a DateTime will be performed using DateTime#<=>. + # Comparisons that don't involve a DateTime, but include a Time will be + # performed with Time#<=>. Otherwise comparisons will be performed with + # Integer#<=>. + def <=>(timeOrDateTime) + return nil unless timeOrDateTime.is_a?(TimeOrDateTime) || + timeOrDateTime.is_a?(Time) || + timeOrDateTime.is_a?(DateTime) || + timeOrDateTime.respond_to?(:to_i) + + unless timeOrDateTime.is_a?(TimeOrDateTime) + timeOrDateTime = TimeOrDateTime.wrap(timeOrDateTime) + end + + orig = timeOrDateTime.to_orig + + if @orig.is_a?(DateTime) || orig.is_a?(DateTime) + # If either is a DateTime, assume it is there for a reason + # (i.e. for its larger range of acceptable values on 32-bit systems). + to_datetime <=> timeOrDateTime.to_datetime + elsif @orig.is_a?(Time) || orig.is_a?(Time) + to_time <=> timeOrDateTime.to_time + else + to_i <=> timeOrDateTime.to_i + end + end + + # Adds a number of seconds to the TimeOrDateTime. Returns a new + # TimeOrDateTime, preserving what the original constructed type was. + # If the original type is a Time and the resulting calculation goes out of + # range for Times, then an exception will be raised by the Time class. + def +(seconds) + if seconds == 0 + self + else + if @orig.is_a?(DateTime) + TimeOrDateTime.new(@orig + OffsetRationals.rational_for_offset(seconds)) + else + # + defined for Time and Integer + TimeOrDateTime.new(@orig + seconds) + end + end + end + + # Subtracts a number of seconds from the TimeOrDateTime. Returns a new + # TimeOrDateTime, preserving what the original constructed type was. + # If the original type is a Time and the resulting calculation goes out of + # range for Times, then an exception will be raised by the Time class. + def -(seconds) + self + (-seconds) + end + + # Similar to the + operator, but converts to a DateTime based TimeOrDateTime + # where the Time or Integer timestamp to go out of the allowed range for a + # Time, converts to a DateTime based TimeOrDateTime. + # + # Note that the range of Time varies based on the platform. + def add_with_convert(seconds) + if seconds == 0 + self + else + if @orig.is_a?(DateTime) + TimeOrDateTime.new(@orig + OffsetRationals.rational_for_offset(seconds)) + else + # A Time or timestamp. + result = to_i + seconds + + if ((result > 2147483647 || result < -2147483648) && !RubyCoreSupport.time_supports_64bit) || (result < 0 && !RubyCoreSupport.time_supports_negative) + result = TimeOrDateTime.new(to_datetime + OffsetRationals.rational_for_offset(seconds)) + else + result = TimeOrDateTime.new(@orig + seconds) + end + end + end + end + + # Returns true if todt represents the same time and was originally + # constructed with the same type (DateTime, Time or timestamp) as this + # TimeOrDateTime. + def eql?(todt) + todt.kind_of?(TimeOrDateTime) && to_orig.eql?(todt.to_orig) + end + + # Returns a hash of this TimeOrDateTime. + def hash + @orig.hash + end + + # If no block is given, returns a TimeOrDateTime wrapping the given + # timeOrDateTime. If a block is specified, a TimeOrDateTime is constructed + # and passed to the block. The result of the block must be a TimeOrDateTime. + # + # The result of the block will be converted to the type of the originally + # passed in timeOrDateTime and then returned as the result of wrap. + # + # timeOrDateTime can be a Time, DateTime, timestamp (Integer) or + # TimeOrDateTime. If a TimeOrDateTime is passed in, no new TimeOrDateTime + # will be constructed and the value passed to wrap will be used when + # calling the block. + def self.wrap(timeOrDateTime) + t = timeOrDateTime.is_a?(TimeOrDateTime) ? timeOrDateTime : TimeOrDateTime.new(timeOrDateTime) + + if block_given? + t = yield t + + if timeOrDateTime.is_a?(TimeOrDateTime) + t + elsif timeOrDateTime.is_a?(Time) + t.to_time + elsif timeOrDateTime.is_a?(DateTime) + t.to_datetime + else + t.to_i + end + else + t + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb new file mode 100644 index 0000000..ee838b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone.rb @@ -0,0 +1,673 @@ +require 'date' +require 'set' +require 'thread_safe' + +module TZInfo + # AmbiguousTime is raised to indicates that a specified time in a local + # timezone has more than one possible equivalent UTC time. This happens when + # transitioning from daylight savings time to standard time where the clocks + # are rolled back. + # + # AmbiguousTime is raised by period_for_local and local_to_utc when using an + # ambiguous time and not specifying any means to resolve the ambiguity. + class AmbiguousTime < StandardError + end + + # PeriodNotFound is raised to indicate that no TimezonePeriod matching a given + # time could be found. + class PeriodNotFound < StandardError + end + + # Raised by Timezone#get if the identifier given is not valid. + class InvalidTimezoneIdentifier < StandardError + end + + # Raised if an attempt is made to use a timezone created with + # Timezone.new(nil). + class UnknownTimezone < StandardError + end + + # Timezone is the base class of all timezones. It provides a factory method, + # 'get', to access timezones by identifier. Once a specific Timezone has been + # retrieved, DateTimes, Times and timestamps can be converted between the UTC + # and the local time for the zone. For example: + # + # tz = TZInfo::Timezone.get('America/New_York') + # puts tz.utc_to_local(DateTime.new(2005,8,29,15,35,0)).to_s + # puts tz.local_to_utc(Time.utc(2005,8,29,11,35,0)).to_s + # puts tz.utc_to_local(1125315300).to_s + # + # Each time conversion method returns an object of the same type it was + # passed. + # + # The Timezone class is thread-safe. It is safe to use class and instance + # methods of Timezone in concurrently executing threads. Instances of Timezone + # can be shared across thread boundaries. + class Timezone + include Comparable + + # Cache of loaded zones by identifier to avoid using require if a zone + # has already been loaded. + # + # @!visibility private + @@loaded_zones = nil + + # Default value of the dst parameter of the local_to_utc and + # period_for_local methods. + # + # @!visibility private + @@default_dst = nil + + # Sets the default value of the optional dst parameter of the + # local_to_utc and period_for_local methods. Can be set to nil, true or + # false. + # + # The value of default_dst defaults to nil if unset. + def self.default_dst=(value) + @@default_dst = value.nil? ? nil : !!value + end + + # Gets the default value of the optional dst parameter of the + # local_to_utc and period_for_local methods. Can be set to nil, true or + # false. + def self.default_dst + @@default_dst + end + + # Returns a timezone by its identifier (e.g. "Europe/London", + # "America/Chicago" or "UTC"). + # + # Raises InvalidTimezoneIdentifier if the timezone couldn't be found. + def self.get(identifier) + instance = @@loaded_zones[identifier] + + unless instance + # Thread-safety: It is possible that multiple equivalent Timezone + # instances could be created here in concurrently executing threads. + # The consequences of this are that the data may be loaded more than + # once (depending on the data source) and memoized calculations could + # be discarded. The performance benefit of ensuring that only a single + # instance is created is unlikely to be worth the overhead of only + # allowing one Timezone to be loaded at a time. + info = data_source.load_timezone_info(identifier) + instance = info.create_timezone + @@loaded_zones[instance.identifier] = instance + end + + instance + end + + # Returns a proxy for the Timezone with the given identifier. The proxy + # will cause the real timezone to be loaded when an attempt is made to + # find a period or convert a time. get_proxy will not validate the + # identifier. If an invalid identifier is specified, no exception will be + # raised until the proxy is used. + def self.get_proxy(identifier) + TimezoneProxy.new(identifier) + end + + # If identifier is nil calls super(), otherwise calls get. An identfier + # should always be passed in when called externally. + def self.new(identifier = nil) + if identifier + get(identifier) + else + super() + end + end + + # Returns an array containing all the available Timezones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all + get_proxies(all_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones. + def self.all_identifiers + data_source.timezone_identifiers + end + + # Returns an array containing all the available Timezones that are based + # on data (are not links to other Timezones). + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_data_zones + get_proxies(all_data_zone_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones that are based on data (are not links to other Timezones).. + def self.all_data_zone_identifiers + data_source.data_timezone_identifiers + end + + # Returns an array containing all the available Timezones that are links + # to other Timezones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_linked_zones + get_proxies(all_linked_zone_identifiers) + end + + # Returns an array containing the identifiers of all the available + # Timezones that are links to other Timezones. + def self.all_linked_zone_identifiers + data_source.linked_timezone_identifiers + end + + # Returns all the Timezones defined for all Countries. This is not the + # complete set of Timezones as some are not country specific (e.g. + # 'Etc/GMT'). + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.all_country_zones + Country.all_codes.inject([]) do |zones,country| + zones += Country.get(country).zones + end.uniq + end + + # Returns all the zone identifiers defined for all Countries. This is not the + # complete set of zone identifiers as some are not country specific (e.g. + # 'Etc/GMT'). You can obtain a Timezone instance for a given identifier + # with the get method. + def self.all_country_zone_identifiers + Country.all_codes.inject([]) do |zones,country| + zones += Country.get(country).zone_identifiers + end.uniq + end + + # Returns all US Timezone instances. A shortcut for + # TZInfo::Country.get('US').zones. + # + # Returns TimezoneProxy objects to avoid the overhead of loading Timezone + # definitions until a conversion is actually required. + def self.us_zones + Country.get('US').zones + end + + # Returns all US zone identifiers. A shortcut for + # TZInfo::Country.get('US').zone_identifiers. + def self.us_zone_identifiers + Country.get('US').zone_identifiers + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + raise_unknown_timezone + end + + # An alias for identifier. + def name + # Don't use alias, as identifier gets overridden. + identifier + end + + # Returns a friendlier version of the identifier. + def to_s + friendly_identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{identifier}>" + end + + # Returns a friendlier version of the identifier. Set skip_first_part to + # omit the first part of the identifier (typically a region name) where + # there is more than one part. + # + # For example: + # + # Timezone.get('Europe/Paris').friendly_identifier(false) #=> "Europe - Paris" + # Timezone.get('Europe/Paris').friendly_identifier(true) #=> "Paris" + # Timezone.get('America/Indiana/Knox').friendly_identifier(false) #=> "America - Knox, Indiana" + # Timezone.get('America/Indiana/Knox').friendly_identifier(true) #=> "Knox, Indiana" + def friendly_identifier(skip_first_part = false) + parts = identifier.split('/') + if parts.empty? + # shouldn't happen + identifier + elsif parts.length == 1 + parts[0] + else + prefix = skip_first_part ? nil : "#{parts[0]} - " + + parts = parts.drop(1).map do |part| + part.gsub!(/_/, ' ') + + if part.index(/[a-z]/) + # Missing a space if a lower case followed by an upper case and the + # name isn't McXxxx. + part.gsub!(/([^M][a-z])([A-Z])/, '\1 \2') + part.gsub!(/([M][a-bd-z])([A-Z])/, '\1 \2') + + # Missing an apostrophe if two consecutive upper case characters. + part.gsub!(/([A-Z])([A-Z])/, '\1\'\2') + end + + part + end + + "#{prefix}#{parts.reverse.join(', ')}" + end + end + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + def period_for_utc(utc) + raise_unknown_timezone + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how ambiguities should be resolved. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + raise_unknown_timezone + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + raise_unknown_timezone + end + + # Returns the canonical Timezone instance for this Timezone. + # + # The IANA Time Zone database contains two types of definition: Zones and + # Links. Zones are defined by rules that set out when transitions occur. + # Links are just references to fully defined Zone, creating an alias for + # that Zone. + # + # Links are commonly used where a time zone has been renamed in a + # release of the Time Zone database. For example, the Zone US/Eastern was + # renamed as America/New_York. A US/Eastern Link was added in its place, + # linking to (and creating an alias for) for America/New_York. + # + # Links are also used for time zones that are currently identical to a full + # Zone, but that are administered seperately. For example, Europe/Vatican is + # a Link to (and alias for) Europe/Rome. + # + # For a full Zone, canonical_zone returns self. + # + # For a Link, canonical_zone returns a Timezone instance representing the + # full Zone that the link targets. + # + # TZInfo can be used with different data sources (see the documentation for + # TZInfo::DataSource). Please note that some DataSource implementations may + # not support distinguishing between full Zones and Links and will treat all + # time zones as full Zones. In this case, the canonical_zone will always + # return self. + # + # There are two built-in DataSource implementations. RubyDataSource (which + # will be used if the tzinfo-data gem is available) supports Link zones. + # ZoneinfoDataSource returns Link zones as if they were full Zones. If the + # canonical_zone or canonical_identifier methods are required, the + # tzinfo-data gem should be installed. + # + # The TZInfo::DataSource.get method can be used to check which DataSource + # implementation is being used. + def canonical_zone + raise_unknown_timezone + end + + # Returns the TimezonePeriod for the given local time. local can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in local is ignored (it is treated as a time in the current + # timezone). + # + # Warning: There are local times that have no equivalent UTC times (e.g. + # in the transition from standard time to daylight savings time). There are + # also local times that have more than one UTC equivalent (e.g. in the + # transition from daylight savings time to standard time). + # + # In the first case (no equivalent UTC time), a PeriodNotFound exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an AmbiguousTime + # exception will be raised unless the optional dst parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the dst parameter can be used to select whether the + # daylight savings time or local time is used. For example, + # + # Timezone.get('America/New_York').period_for_local(DateTime.new(2004,10,31,1,30,0)) + # + # would raise an AmbiguousTime exception. + # + # Specifying dst=true would the daylight savings period from April to + # October 2004. Specifying dst=false would return the standard period + # from October 2004 to April 2005. + # + # If the dst parameter does not resolve the ambiguity, and a block is + # specified, it is called. The block must take a single parameter - an + # array of the periods that need to be resolved. The block can select and + # return a single period or return nil or an empty array + # to cause an AmbiguousTime exception to be raised. + # + # The default value of the dst parameter can be specified by setting + # Timezone.default_dst. If default_dst is not set, or is set to nil, then + # an AmbiguousTime exception will be raised in ambiguous situations unless + # a block is given to resolve the ambiguity. + def period_for_local(local, dst = Timezone.default_dst) + results = periods_for_local(local) + + if results.empty? + raise PeriodNotFound + elsif results.size < 2 + results.first + else + # ambiguous result try to resolve + + if !dst.nil? + matches = results.find_all {|period| period.dst? == dst} + results = matches if !matches.empty? + end + + if results.size < 2 + results.first + else + # still ambiguous, try the block + + if block_given? + results = yield results + end + + if results.is_a?(TimezonePeriod) + results + elsif results && results.size == 1 + results.first + else + raise AmbiguousTime, "#{local} is an ambiguous local time." + end + end + end + end + + # Converts a time in UTC to the local timezone. utc can either be + # a DateTime, Time or timestamp (Time.to_i). The returned time has the same + # type as utc. Any timezone information in utc is ignored (it is treated as + # a UTC time). + def utc_to_local(utc) + TimeOrDateTime.wrap(utc) {|wrapped| + period_for_utc(wrapped).to_local(wrapped) + } + end + + # Converts a time in the local timezone to UTC. local can either be + # a DateTime, Time or timestamp (Time.to_i). The returned time has the same + # type as local. Any timezone information in local is ignored (it is treated + # as a local time). + # + # Warning: There are local times that have no equivalent UTC times (e.g. + # in the transition from standard time to daylight savings time). There are + # also local times that have more than one UTC equivalent (e.g. in the + # transition from daylight savings time to standard time). + # + # In the first case (no equivalent UTC time), a PeriodNotFound exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an AmbiguousTime + # exception will be raised unless the optional dst parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the dst parameter can be used to select whether the + # daylight savings time or local time is used. For example, + # + # Timezone.get('America/New_York').local_to_utc(DateTime.new(2004,10,31,1,30,0)) + # + # would raise an AmbiguousTime exception. + # + # Specifying dst=true would return 2004-10-31 5:30:00. Specifying dst=false + # would return 2004-10-31 6:30:00. + # + # If the dst parameter does not resolve the ambiguity, and a block is + # specified, it is called. The block must take a single parameter - an + # array of the periods that need to be resolved. The block can return a + # single period to use to convert the time or return nil or an empty array + # to cause an AmbiguousTime exception to be raised. + # + # The default value of the dst parameter can be specified by setting + # Timezone.default_dst. If default_dst is not set, or is set to nil, then + # an AmbiguousTime exception will be raised in ambiguous situations unless + # a block is given to resolve the ambiguity. + def local_to_utc(local, dst = Timezone.default_dst) + TimeOrDateTime.wrap(local) {|wrapped| + if block_given? + period = period_for_local(wrapped, dst) {|periods| yield periods } + else + period = period_for_local(wrapped, dst) + end + + period.to_utc(wrapped) + } + end + + # Returns information about offsets used by the Timezone up to a given + # date and time, specified using UTC (utc_to). The information is returned + # as an Array of TimezoneOffset instances. + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only offsets used from + # that date and time forward will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. + # + # Offsets may be returned in any order. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # offsets_up_to raises an ArgumentError exception. + def offsets_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + transitions = transitions_up_to(utc_to, utc_from) + + if transitions.empty? + # No transitions in the range, find the period that covers it. + + if utc_from + # Use the from date as it is inclusive. + period = period_for_utc(utc_from) + else + # utc_to is exclusive, so this can't be used with period_for_utc. + # However, any time earlier than utc_to can be used. + + # Subtract 1 hour (since this is one of the cached OffsetRationals). + # Use add_with_convert so that conversion to DateTime is performed if + # required. + period = period_for_utc(utc_to.add_with_convert(-3600)) + end + + [period.offset] + else + result = Set.new + + first = transitions.first + result << first.previous_offset unless utc_from && first.at == utc_from + + transitions.each do |t| + result << t.offset + end + + result.to_a + end + end + + # Returns the canonical identifier for this Timezone. + # + # This is a shortcut for calling canonical_zone.identifier. Please refer + # to the canonical_zone documentation for further information. + def canonical_identifier + canonical_zone.identifier + end + + # Returns the current time in the timezone as a Time. + def now + utc_to_local(Time.now.utc) + end + + # Returns the TimezonePeriod for the current time. + def current_period + period_for_utc(Time.now.utc) + end + + # Returns the current Time and TimezonePeriod as an array. The first element + # is the time, the second element is the period. + def current_period_and_time + utc = Time.now.utc + period = period_for_utc(utc) + [period.to_local(utc), period] + end + + alias :current_time_and_period :current_period_and_time + + # Converts a time in UTC to local time and returns it as a string according + # to the given format. + # + # The formatting is identical to Time.strftime and DateTime.strftime, except + # %Z and %z are replaced with the timezone abbreviation (for example, EST or + # EDT) and offset for the specified Timezone and time. + # + # The offset can be formatted as follows: + # + # - %z - hour and minute (e.g. +0500) + # - %:z - hour and minute separated with a colon (e.g. +05:00) + # - %::z - hour minute and second separated with colons (e.g. +05:00:00) + # - %:::z - hour only (e.g. +05) + # + # Timezone#strftime currently handles the replacement of %z. From TZInfo + # version 2.0.0, %z will be passed to Time#strftime and DateTime#strftime + # instead. Some of the formatting options may cease to be available + # depending on the version of Ruby in use (for example, %:::z is only + # supported by Time#strftime from MRI version 2.0.0 onwards). + def strftime(format, utc = Time.now.utc) + utc = TimeOrDateTime.wrap(utc) + period = period_for_utc(utc) + local_wrapped = period.to_local(utc) + local = local_wrapped.to_orig + local = local_wrapped.to_time unless local.kind_of?(Time) || local.kind_of?(DateTime) + abbreviation = period.abbreviation.to_s.gsub(/%/, '%%') + + format = format.gsub(/%(%*)([sZ]|:*z)/) do + if $1.length.odd? + # Escaped literal percent or series of percents. Pass on to strftime. + "#$1%#$2" + elsif $2 == "s" + "#$1#{utc.to_i}" + elsif $2 == "Z" + "#$1#{abbreviation}" + else + m, s = period.utc_total_offset.divmod(60) + h, m = m.divmod(60) + case $2.length + when 1 + "#$1#{'%+03d%02d' % [h,m]}" + when 2 + "#$1#{'%+03d:%02d' % [h,m]}" + when 3 + "#$1#{'%+03d:%02d:%02d' % [h,m,s]}" + when 4 + "#$1#{'%+03d' % [h]}" + else # more than 3 colons - not a valid option + # Passing the invalid format string through to Time#strftime or + # DateTime#strtime would normally result in it being returned in the + # result. However, with Ruby 1.8.7 on Windows (as tested with Ruby + # 1.8.7-p374 from https://rubyinstaller.org/downloads/archives), + # this causes Time#strftime to always return an empty string (e.g. + # Time.now.strftime('a %::::z b') returns ''). + # + # Escape the percent to force it to be evaluated as a literal. + "#$1%%#$2" + end + end + end + + local.strftime(format) + end + + # Compares two Timezones based on their identifier. Returns -1 if tz is less + # than self, 0 if tz is equal to self and +1 if tz is greater than self. + # + # Returns nil if tz is not comparable with Timezone instances. + def <=>(tz) + return nil unless tz.is_a?(Timezone) + identifier <=> tz.identifier + end + + # Returns true if and only if the identifier of tz is equal to the + # identifier of this Timezone. + def eql?(tz) + self == tz + end + + # Returns a hash of this Timezone. + def hash + identifier.hash + end + + # Dumps this Timezone for marshalling. + def _dump(limit) + identifier + end + + # Loads a marshalled Timezone. + def self._load(data) + Timezone.get(data) + end + + private + # Initializes @@loaded_zones. + def self.init_loaded_zones + @@loaded_zones = ThreadSafe::Cache.new + end + init_loaded_zones + + # Returns an array of proxies corresponding to the given array of + # identifiers. + def self.get_proxies(identifiers) + identifiers.collect {|identifier| get_proxy(identifier)} + end + + # Returns the current DataSource. + def self.data_source + DataSource.get + end + + # Raises an UnknownTimezone exception. + def raise_unknown_timezone + raise UnknownTimezone, 'TZInfo::Timezone constructed directly' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb new file mode 100644 index 0000000..5ceb686 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_definition.rb @@ -0,0 +1,36 @@ +module TZInfo + + # TimezoneDefinition is included into Timezone definition modules. + # TimezoneDefinition provides the methods for defining timezones. + # + # @private + module TimezoneDefinition #:nodoc: + # Add class methods to the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Returns and yields a TransitionDataTimezoneInfo object to define a + # timezone. + def timezone(identifier) + yield @timezone = TransitionDataTimezoneInfo.new(identifier) + end + + # Defines a linked timezone. + def linked_timezone(identifier, link_to_identifier) + @timezone = LinkedTimezoneInfo.new(identifier, link_to_identifier) + end + + # Returns the last TimezoneInfo to be defined with timezone or + # linked_timezone. + def get + @timezone + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb new file mode 100644 index 0000000..59dc953 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_index_definition.rb @@ -0,0 +1,54 @@ +module TZInfo + # The timezone index file includes TimezoneIndexDefinition which provides + # methods used to define timezones in the index. + # + # @private + module TimezoneIndexDefinition #:nodoc: + # Add class methods to the includee and initialize class instance variables. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval do + @timezones = [] + @data_timezones = [] + @linked_timezones = [] + end + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # Defines a timezone based on data. + def timezone(identifier) + @timezones << identifier + @data_timezones << identifier + end + + # Defines a timezone which is a link to another timezone. + def linked_timezone(identifier) + @timezones << identifier + @linked_timezones << identifier + end + + # Returns a frozen array containing the identifiers of all the timezones. + # Identifiers appear in the order they were defined in the index. + def timezones + @timezones.freeze + end + + # Returns a frozen array containing the identifiers of all data timezones. + # Identifiers appear in the order they were defined in the index. + def data_timezones + @data_timezones.freeze + end + + # Returns a frozen array containing the identifiers of all linked + # timezones. Identifiers appear in the order they were defined in + # the index. + def linked_timezones + @linked_timezones.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb new file mode 100644 index 0000000..13f66ba --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_info.rb @@ -0,0 +1,30 @@ +module TZInfo + # Represents a timezone defined by a data source. + class TimezoneInfo + + # The timezone identifier. + attr_reader :identifier + + # Constructs a new TimezoneInfo with an identifier. + def initialize(identifier) + @identifier = identifier + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@identifier>" + end + + # Constructs a Timezone instance for the timezone represented by this + # TimezoneInfo. + def create_timezone + raise_not_implemented('create_timezone') + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb new file mode 100644 index 0000000..dbce0e9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_offset.rb @@ -0,0 +1,101 @@ +module TZInfo + # Represents an offset defined in a Timezone data file. + class TimezoneOffset + # The base offset of the timezone from UTC in seconds. This does not include + # any adjustment made for daylight savings time and will typically remain + # constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use utc_total_offset instead. + # + # Note that zoneinfo files only include the value of utc_total_offset and a + # DST flag. When using ZoneinfoDataSource, the utc_offset will be derived + # from changes to the UTC total offset and the DST flag. As a consequence, + # utc_total_offset will always be correct, but utc_offset may be inaccurate. + # + # If you require utc_offset to be accurate, install the tzinfo-data gem and + # set RubyDataSource as the DataSource. + attr_reader :utc_offset + + # The offset from the time zone's standard time in seconds. Zero + # when daylight savings time is not in effect. Non-zero (usually 3600 = 1 + # hour) if daylight savings is being observed. + # + # Note that zoneinfo files only include the value of utc_total_offset and + # a DST flag. When using DataSources::ZoneinfoDataSource, the std_offset + # will be derived from changes to the UTC total offset and the DST flag. As + # a consequence, utc_total_offset will always be correct, but std_offset + # may be inaccurate. + # + # If you require std_offset to be accurate, install the tzinfo-data gem + # and set RubyDataSource as the DataSource. + attr_reader :std_offset + + # The total offset of this observance from UTC in seconds + # (utc_offset + std_offset). + attr_reader :utc_total_offset + + # The abbreviation that identifies this observance, e.g. "GMT" + # (Greenwich Mean Time) or "BST" (British Summer Time) for "Europe/London". The returned identifier is a + # symbol. + attr_reader :abbreviation + + # Constructs a new TimezoneOffset. utc_offset and std_offset are specified + # in seconds. + def initialize(utc_offset, std_offset, abbreviation) + @utc_offset = utc_offset + @std_offset = std_offset + @abbreviation = abbreviation + + @utc_total_offset = @utc_offset + @std_offset + end + + # True if std_offset is non-zero. + def dst? + @std_offset != 0 + end + + # Converts a UTC Time, DateTime or integer timestamp to local time, based on + # the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_local(utc) + TimeOrDateTime.wrap(utc) {|wrapped| + wrapped + @utc_total_offset + } + end + + # Converts a local Time, DateTime or integer timestamp to UTC, based on the + # offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_utc(local) + TimeOrDateTime.wrap(local) {|wrapped| + wrapped - @utc_total_offset + } + end + + # Returns true if and only if toi has the same utc_offset, std_offset + # and abbreviation as this TimezoneOffset. + def ==(toi) + toi.kind_of?(TimezoneOffset) && + utc_offset == toi.utc_offset && std_offset == toi.std_offset && abbreviation == toi.abbreviation + end + + # Returns true if and only if toi has the same utc_offset, std_offset + # and abbreviation as this TimezoneOffset. + def eql?(toi) + self == toi + end + + # Returns a hash of this TimezoneOffset. + def hash + utc_offset.hash ^ std_offset.hash ^ abbreviation.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #@utc_offset,#@std_offset,#@abbreviation>" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb new file mode 100644 index 0000000..2805841 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_period.rb @@ -0,0 +1,245 @@ +module TZInfo + # A period of time in a timezone where the same offset from UTC applies. + # + # All the methods that take times accept instances of Time or DateTime as well + # as Integer timestamps. + class TimezonePeriod + # The TimezoneTransition that defines the start of this TimezonePeriod + # (may be nil if unbounded). + attr_reader :start_transition + + # The TimezoneTransition that defines the end of this TimezonePeriod + # (may be nil if unbounded). + attr_reader :end_transition + + # The TimezoneOffset for this period. + attr_reader :offset + + # Initializes a new TimezonePeriod. + # + # TimezonePeriod instances should not normally be constructed manually. + def initialize(start_transition, end_transition, offset = nil) + @start_transition = start_transition + @end_transition = end_transition + + if offset + raise ArgumentError, 'Offset specified with transitions' if @start_transition || @end_transition + @offset = offset + else + if @start_transition + @offset = @start_transition.offset + elsif @end_transition + @offset = @end_transition.previous_offset + else + raise ArgumentError, 'No offset specified and no transitions to determine it from' + end + end + + @utc_total_offset_rational = nil + end + + # The base offset of the timezone from UTC in seconds. This does not include + # any adjustment made for daylight savings time and will typically remain + # constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use utc_total_offset instead. + # + # Note that zoneinfo files only include the value of utc_total_offset and a + # DST flag. When using ZoneinfoDataSource, the utc_offset will be derived + # from changes to the UTC total offset and the DST flag. As a consequence, + # utc_total_offset will always be correct, but utc_offset may be inaccurate. + # + # If you require utc_offset to be accurate, install the tzinfo-data gem and + # set RubyDataSource as the DataSource. + def utc_offset + @offset.utc_offset + end + + # The offset from the time zone's standard time in seconds. Zero + # when daylight savings time is not in effect. Non-zero (usually 3600 = 1 + # hour) if daylight savings is being observed. + # + # Note that zoneinfo files only include the value of utc_total_offset and + # a DST flag. When using DataSources::ZoneinfoDataSource, the std_offset + # will be derived from changes to the UTC total offset and the DST flag. As + # a consequence, utc_total_offset will always be correct, but std_offset + # may be inaccurate. + # + # If you require std_offset to be accurate, install the tzinfo-data gem + # and set RubyDataSource as the DataSource. + def std_offset + @offset.std_offset + end + + # The identifier of this period, e.g. "GMT" (Greenwich Mean Time) or "BST" + # (British Summer Time) for "Europe/London". The returned identifier is a + # symbol. + def abbreviation + @offset.abbreviation + end + alias :zone_identifier :abbreviation + + # Total offset from UTC (seconds). Equal to utc_offset + std_offset. + def utc_total_offset + @offset.utc_total_offset + end + + # Total offset from UTC (days). Result is a Rational. + def utc_total_offset_rational + # Thread-safety: It is possible that the value of + # @utc_total_offset_rational may be calculated multiple times in + # concurrently executing threads. It is not worth the overhead of locking + # to ensure that @zone_identifiers is only calculated once. + + unless @utc_total_offset_rational + result = OffsetRationals.rational_for_offset(utc_total_offset) + return result if frozen? + @utc_total_offset_rational = result + end + @utc_total_offset_rational + end + + # The start time of the period in UTC as a DateTime. May be nil if unbounded. + def utc_start + @start_transition ? @start_transition.at.to_datetime : nil + end + + # The start time of the period in UTC as a Time. May be nil if unbounded. + def utc_start_time + @start_transition ? @start_transition.at.to_time : nil + end + + # The end time of the period in UTC as a DateTime. May be nil if unbounded. + def utc_end + @end_transition ? @end_transition.at.to_datetime : nil + end + + # The end time of the period in UTC as a Time. May be nil if unbounded. + def utc_end_time + @end_transition ? @end_transition.at.to_time : nil + end + + # The start time of the period in local time as a DateTime. May be nil if + # unbounded. + def local_start + @start_transition ? @start_transition.local_start_at.to_datetime : nil + end + + # The start time of the period in local time as a Time. May be nil if + # unbounded. + def local_start_time + @start_transition ? @start_transition.local_start_at.to_time : nil + end + + # The end time of the period in local time as a DateTime. May be nil if + # unbounded. + def local_end + @end_transition ? @end_transition.local_end_at.to_datetime : nil + end + + # The end time of the period in local time as a Time. May be nil if + # unbounded. + def local_end_time + @end_transition ? @end_transition.local_end_at.to_time : nil + end + + # true if daylight savings is in effect for this period; otherwise false. + def dst? + @offset.dst? + end + + # true if this period is valid for the given UTC DateTime; otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def valid_for_utc?(utc) + utc_after_start?(utc) && utc_before_end?(utc) + end + + # true if the given UTC DateTime is after the start of the period + # (inclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def utc_after_start?(utc) + !@start_transition || @start_transition.at <= utc + end + + # true if the given UTC DateTime is before the end of the period + # (exclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def utc_before_end?(utc) + !@end_transition || @end_transition.at > utc + end + + # true if this period is valid for the given local DateTime; otherwise + # false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def valid_for_local?(local) + local_after_start?(local) && local_before_end?(local) + end + + # true if the given local DateTime is after the start of the period + # (inclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def local_after_start?(local) + !@start_transition || @start_transition.local_start_at <= local + end + + # true if the given local DateTime is before the end of the period + # (exclusive); otherwise false. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def local_before_end?(local) + !@end_transition || @end_transition.local_end_at > local + end + + # Converts a UTC DateTime to local time based on the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_local(utc) + @offset.to_local(utc) + end + + # Converts a local DateTime to UTC based on the offset of this period. + # + # Deprecation warning: this method will be removed in TZInfo version 2.0.0. + def to_utc(local) + @offset.to_utc(local) + end + + # Returns true if this TimezonePeriod is equal to p. This compares the + # start_transition, end_transition and offset using ==. + def ==(p) + p.kind_of?(TimezonePeriod) && + start_transition == p.start_transition && + end_transition == p.end_transition && + offset == p.offset + end + + # Returns true if this TimezonePeriods is equal to p. This compares the + # start_transition, end_transition and offset using eql? + def eql?(p) + p.kind_of?(TimezonePeriod) && + start_transition.eql?(p.start_transition) && + end_transition.eql?(p.end_transition) && + offset.eql?(p.offset) + end + + # Returns a hash of this TimezonePeriod. + def hash + result = @start_transition.hash ^ @end_transition.hash + result ^= @offset.hash unless @start_transition || @end_transition + result + end + + # Returns internal object state as a programmer-readable string. + def inspect + result = "#<#{self.class}: #{@start_transition.inspect},#{@end_transition.inspect}" + result << ",#{@offset.inspect}>" unless @start_transition || @end_transition + result + '>' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb new file mode 100644 index 0000000..c913011 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_proxy.rb @@ -0,0 +1,105 @@ +module TZInfo + + # A proxy class representing a timezone with a given identifier. TimezoneProxy + # inherits from Timezone and can be treated like any Timezone loaded with + # Timezone.get. + # + # The first time an attempt is made to access the data for the timezone, the + # real Timezone is loaded. If the proxy's identifier was not valid, then an + # exception will be raised at this point. + class TimezoneProxy < Timezone + # Construct a new TimezoneProxy for the given identifier. The identifier + # is not checked when constructing the proxy. It will be validated on the + # when the real Timezone is loaded. + def self.new(identifier) + # Need to override new to undo the behaviour introduced in Timezone#new. + tzp = super() + tzp.send(:setup, identifier) + tzp + end + + # The identifier of the timezone, e.g. "Europe/Paris". + def identifier + @real_timezone ? @real_timezone.identifier : @identifier + end + + # Returns the TimezonePeriod for the given UTC time. utc can either be + # a DateTime, Time or integer timestamp (Time.to_i). Any timezone + # information in utc is ignored (it is treated as a UTC time). + def period_for_utc(utc) + real_timezone.period_for_utc(utc) + end + + # Returns the set of TimezonePeriod instances that are valid for the given + # local time as an array. If you just want a single period, use + # period_for_local instead and specify how abiguities should be resolved. + # Returns an empty array if no periods are found for the given time. + def periods_for_local(local) + real_timezone.periods_for_local(local) + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time (to). + # + # A from date and time may also be supplied using the from parameter. If + # from is not nil, only transitions from that date and time onwards will be + # returned. + # + # Comparisons with to are exclusive. Comparisons with from are inclusive. + # If a transition falls precisely on to, it will be excluded. If a + # transition falls on from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # to and from can be specified using either a Time, DateTime, Time or + # Timestamp. + # + # If from is specified and to is not greater than from, then an + # ArgumentError exception is raised. + # + # ArgumentError is raised if to is nil or of either to or from are + # Timestamps with unspecified offsets. + def transitions_up_to(to, from = nil) + real_timezone.transitions_up_to(to, from) + end + + # Returns the canonical zone for this Timezone. + def canonical_zone + real_timezone.canonical_zone + end + + # Dumps this TimezoneProxy for marshalling. + def _dump(limit) + identifier + end + + # Loads a marshalled TimezoneProxy. + def self._load(data) + TimezoneProxy.new(data) + end + + private + def setup(identifier) + @identifier = identifier + @real_timezone = nil + end + + def real_timezone + # Thread-safety: It is possible that the value of @real_timezone may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @real_timezone is only + # calculated once. + unless @real_timezone + result = Timezone.get(@identifier) + return result if frozen? + @real_timezone = result + end + + @real_timezone + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb new file mode 100644 index 0000000..b905c62 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition.rb @@ -0,0 +1,130 @@ +module TZInfo + # Represents a transition from one timezone offset to another at a particular + # date and time. + class TimezoneTransition + # The offset this transition changes to (a TimezoneOffset instance). + attr_reader :offset + + # The offset this transition changes from (a TimezoneOffset instance). + attr_reader :previous_offset + + # Initializes a new TimezoneTransition. + # + # TimezoneTransition instances should not normally be constructed manually. + def initialize(offset, previous_offset) + @offset = offset + @previous_offset = previous_offset + @local_end_at = nil + @local_start_at = nil + end + + # A TimeOrDateTime instance representing the UTC time when this transition + # occurs. + def at + raise_not_implemented('at') + end + + # The UTC time when this transition occurs, returned as a DateTime instance. + def datetime + at.to_datetime + end + + # The UTC time when this transition occurs, returned as a Time instance. + def time + at.to_time + end + + # A TimeOrDateTime instance representing the local time when this transition + # causes the previous observance to end (calculated from at using + # previous_offset). + def local_end_at + # Thread-safety: It is possible that the value of @local_end_at may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @local_end_at is only + # calculated once. + + unless @local_end_at + result = at.add_with_convert(@previous_offset.utc_total_offset) + return result if frozen? + @local_end_at = result + end + + @local_end_at + end + + # The local time when this transition causes the previous observance to end, + # returned as a DateTime instance. + def local_end + local_end_at.to_datetime + end + + # The local time when this transition causes the previous observance to end, + # returned as a Time instance. + def local_end_time + local_end_at.to_time + end + + # A TimeOrDateTime instance representing the local time when this transition + # causes the next observance to start (calculated from at using offset). + def local_start_at + # Thread-safety: It is possible that the value of @local_start_at may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @local_start_at is only + # calculated once. + + unless @local_start_at + result = at.add_with_convert(@offset.utc_total_offset) + return result if frozen? + @local_start_at = result + end + + @local_start_at + end + + # The local time when this transition causes the next observance to start, + # returned as a DateTime instance. + def local_start + local_start_at.to_datetime + end + + # The local time when this transition causes the next observance to start, + # returned as a Time instance. + def local_start_time + local_start_at.to_time + end + + # Returns true if this TimezoneTransition is equal to the given + # TimezoneTransition. Two TimezoneTransition instances are + # considered to be equal by == if offset, previous_offset and at are all + # equal. + def ==(tti) + tti.kind_of?(TimezoneTransition) && + offset == tti.offset && previous_offset == tti.previous_offset && at == tti.at + end + + # Returns true if this TimezoneTransition is equal to the given + # TimezoneTransition. Two TimezoneTransition instances are + # considered to be equal by eql? if offset, previous_offset and at are all + # equal and the type used to define at in both instances is the same. + def eql?(tti) + tti.kind_of?(TimezoneTransition) && + offset == tti.offset && previous_offset == tti.previous_offset && at.eql?(tti.at) + end + + # Returns a hash of this TimezoneTransition instance. + def hash + @offset.hash ^ @previous_offset.hash ^ at.hash + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{at.inspect},#{@offset.inspect}>" + end + + private + + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb new file mode 100644 index 0000000..016816b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/timezone_transition_definition.rb @@ -0,0 +1,104 @@ +module TZInfo + # A TimezoneTransition defined by as integer timestamp, as a rational to + # create a DateTime or as both. + # + # @private + class TimezoneTransitionDefinition < TimezoneTransition #:nodoc: + # The numerator of the DateTime if the transition time is defined as a + # DateTime, otherwise the transition time as a timestamp. + attr_reader :numerator_or_time + protected :numerator_or_time + + # Either the denominator of the DateTime if the transition time is defined + # as a DateTime, otherwise nil. + attr_reader :denominator + protected :denominator + + # Creates a new TimezoneTransitionDefinition with the given offset, + # previous_offset (both TimezoneOffset instances) and UTC time. + # + # The time can be specified as a timestamp, as a rational to create a + # DateTime, or as both. + # + # If both a timestamp and rational are given, then the rational will only + # be used if the timestamp falls outside of the range of Time on the + # platform being used at runtime. + # + # DateTimes are created from the rational as follows: + # + # RubyCoreSupport.datetime_new!(RubyCoreSupport.rational_new!(numerator, denominator), 0, Date::ITALY) + # + # For performance reasons, the numerator and denominator must be specified + # in their lowest form. + def initialize(offset, previous_offset, numerator_or_timestamp, denominator_or_numerator = nil, denominator = nil) + super(offset, previous_offset) + + if denominator + numerator = denominator_or_numerator + timestamp = numerator_or_timestamp + elsif denominator_or_numerator + numerator = numerator_or_timestamp + denominator = denominator_or_numerator + timestamp = nil + else + numerator = nil + denominator = nil + timestamp = numerator_or_timestamp + end + + # Determine whether to use the timestamp or the numerator and denominator. + if numerator && ( + !timestamp || + (timestamp < 0 && !RubyCoreSupport.time_supports_negative) || + ((timestamp < -2147483648 || timestamp > 2147483647) && !RubyCoreSupport.time_supports_64bit) + ) + + @numerator_or_time = numerator + @denominator = denominator + else + @numerator_or_time = timestamp + @denominator = nil + end + + @at = nil + end + + # A TimeOrDateTime instance representing the UTC time when this transition + # occurs. + def at + # Thread-safety: It is possible that the value of @at may be calculated + # multiple times in concurrently executing threads. It is not worth the + # overhead of locking to ensure that @at is only calculated once. + + unless @at + result = unless @denominator + TimeOrDateTime.new(@numerator_or_time) + else + r = RubyCoreSupport.rational_new!(@numerator_or_time, @denominator) + dt = RubyCoreSupport.datetime_new!(r, 0, Date::ITALY) + TimeOrDateTime.new(dt) + end + + return result if frozen? + @at = result + end + + @at + end + + # Returns true if this TimezoneTransitionDefinition is equal to the given + # TimezoneTransitionDefinition. Two TimezoneTransitionDefinition instances + # are considered to be equal by eql? if offset, previous_offset, + # numerator_or_time and denominator are all equal. + def eql?(tti) + tti.kind_of?(TimezoneTransitionDefinition) && + offset == tti.offset && previous_offset == tti.previous_offset && + numerator_or_time == tti.numerator_or_time && denominator == tti.denominator + end + + # Returns a hash of this TimezoneTransitionDefinition instance. + def hash + @offset.hash ^ @previous_offset.hash ^ @numerator_or_time.hash ^ @denominator.hash + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb new file mode 100644 index 0000000..026bf22 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_data_timezone_info.rb @@ -0,0 +1,274 @@ +module TZInfo + # Raised if no offsets have been defined when calling period_for_utc or + # periods_for_local. Indicates an error in the timezone data. + class NoOffsetsDefined < StandardError + end + + # Represents a data timezone defined by a set of offsets and a set + # of transitions. + # + # @private + class TransitionDataTimezoneInfo < DataTimezoneInfo #:nodoc: + + # Constructs a new TransitionDataTimezoneInfo with its identifier. + def initialize(identifier) + super(identifier) + @offsets = {} + @transitions = [] + @previous_offset = nil + @transitions_index = nil + end + + # Defines a offset. The id uniquely identifies this offset within the + # timezone. utc_offset and std_offset define the offset in seconds of + # standard time from UTC and daylight savings from standard time + # respectively. abbreviation describes the timezone offset (e.g. GMT, BST, + # EST or EDT). + # + # The first offset to be defined is treated as the offset that applies + # until the first transition. This will usually be in Local Mean Time (LMT). + # + # ArgumentError will be raised if the id is already defined. + def offset(id, utc_offset, std_offset, abbreviation) + raise ArgumentError, 'Offset already defined' if @offsets.has_key?(id) + + offset = TimezoneOffset.new(utc_offset, std_offset, abbreviation) + @offsets[id] = offset + @previous_offset = offset unless @previous_offset + end + + # Defines a transition. Transitions must be defined in chronological order. + # ArgumentError will be raised if a transition is added out of order. + # offset_id refers to an id defined with offset. ArgumentError will be + # raised if the offset_id cannot be found. numerator_or_time and + # denomiator specify the time the transition occurs as. See + # TimezoneTransition for more detail about specifying times. + def transition(year, month, offset_id, numerator_or_timestamp, denominator_or_numerator = nil, denominator = nil) + offset = @offsets[offset_id] + raise ArgumentError, 'Offset not found' unless offset + + if @transitions_index + if year < @last_year || (year == @last_year && month < @last_month) + raise ArgumentError, 'Transitions must be increasing date order' + end + + # Record the position of the first transition with this index. + index = transition_index(year, month) + @transitions_index[index] ||= @transitions.length + + # Fill in any gaps + (index - 1).downto(0) do |i| + break if @transitions_index[i] + @transitions_index[i] = @transitions.length + end + else + @transitions_index = [@transitions.length] + @start_year = year + @start_month = month + end + + @transitions << TimezoneTransitionDefinition.new(offset, @previous_offset, + numerator_or_timestamp, denominator_or_numerator, denominator) + @last_year = year + @last_month = month + @previous_offset = offset + end + + # Returns the TimezonePeriod for the given UTC time. + # Raises NoOffsetsDefined if no offsets have been defined. + def period_for_utc(utc) + unless @transitions.empty? + utc = TimeOrDateTime.wrap(utc) + index = transition_index(utc.year, utc.mon) + + start_transition = nil + start = transition_before_end(index) + if start + start.downto(0) do |i| + if @transitions[i].at <= utc + start_transition = @transitions[i] + break + end + end + end + + end_transition = nil + start = transition_after_start(index) + if start + start.upto(@transitions.length - 1) do |i| + if @transitions[i].at > utc + end_transition = @transitions[i] + break + end + end + end + + if start_transition || end_transition + TimezonePeriod.new(start_transition, end_transition) + else + # Won't happen since there are transitions. Must always find one + # transition that is either >= or < the specified time. + raise 'No transitions found in search' + end + else + raise NoOffsetsDefined, 'No offsets have been defined' unless @previous_offset + TimezonePeriod.new(nil, nil, @previous_offset) + end + end + + # Returns the set of TimezonePeriods for the given local time as an array. + # Results returned are ordered by increasing UTC start date. + # Returns an empty array if no periods are found for the given time. + # Raises NoOffsetsDefined if no offsets have been defined. + def periods_for_local(local) + unless @transitions.empty? + local = TimeOrDateTime.wrap(local) + index = transition_index(local.year, local.mon) + + result = [] + + start_index = transition_after_start(index - 1) + if start_index && @transitions[start_index].local_end_at > local + if start_index > 0 + if @transitions[start_index - 1].local_start_at <= local + result << TimezonePeriod.new(@transitions[start_index - 1], @transitions[start_index]) + end + else + result << TimezonePeriod.new(nil, @transitions[start_index]) + end + end + + end_index = transition_before_end(index + 1) + + if end_index + start_index = end_index unless start_index + + start_index.upto(transition_before_end(index + 1)) do |i| + if @transitions[i].local_start_at <= local + if i + 1 < @transitions.length + if @transitions[i + 1].local_end_at > local + result << TimezonePeriod.new(@transitions[i], @transitions[i + 1]) + end + else + result << TimezonePeriod.new(@transitions[i], nil) + end + end + end + end + + result + else + raise NoOffsetsDefined, 'No offsets have been defined' unless @previous_offset + [TimezonePeriod.new(nil, nil, @previous_offset)] + end + end + + # Returns an Array of TimezoneTransition instances representing the times + # where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given date and time up to a given date + # and time, specified in UTC (utc_to). + # + # A from date and time may also be supplied using the utc_from parameter + # (also specified in UTC). If utc_from is not nil, only transitions from + # that date and time onwards will be returned. + # + # Comparisons with utc_to are exclusive. Comparisons with utc_from are + # inclusive. If a transition falls precisely on utc_to, it will be excluded. + # If a transition falls on utc_from, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # utc_to and utc_from can be specified using either DateTime, Time or + # integer timestamps (Time.to_i). + # + # If utc_from is specified and utc_to is not greater than utc_from, then + # transitions_up_to raises an ArgumentError exception. + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + unless @transitions.empty? + if utc_from + from = transition_after_start(transition_index(utc_from.year, utc_from.mon)) + + if from + while from < @transitions.length && @transitions[from].at < utc_from + from += 1 + end + + if from >= @transitions.length + return [] + end + else + # utc_from is later than last transition. + return [] + end + else + from = 0 + end + + to = transition_before_end(transition_index(utc_to.year, utc_to.mon)) + + if to + while to >= 0 && @transitions[to].at >= utc_to + to -= 1 + end + + if to < 0 + return [] + end + else + # utc_to is earlier than first transition. + return [] + end + + @transitions[from..to] + else + [] + end + end + + private + # Returns the index into the @transitions_index array for a given year + # and month. + def transition_index(year, month) + index = (year - @start_year) * 2 + index += 1 if month > 6 + index -= 1 if @start_month > 6 + index + end + + # Returns the index into @transitions of the first transition that occurs + # on or after the start of the given index into @transitions_index. + # Returns nil if there are no such transitions. + def transition_after_start(index) + if index >= @transitions_index.length + nil + else + index = 0 if index < 0 + @transitions_index[index] + end + end + + # Returns the index into @transitions of the first transition that occurs + # before the end of the given index into @transitions_index. + # Returns nil if there are no such transitions. + def transition_before_end(index) + index = index + 1 + + if index <= 0 + nil + elsif index >= @transitions_index.length + @transitions.length - 1 + else + @transitions_index[index] - 1 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb new file mode 100644 index 0000000..b8d1605 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb @@ -0,0 +1,325 @@ +require 'date' + +module TZInfo + # Base class for rules definining the transition between standard and daylight + # savings time. + class TransitionRule #:nodoc: + # Returns the number of seconds after midnight local time on the day + # identified by the rule at which the transition occurs. Can be negative to + # denote a time on the prior day. Can be greater than or equal to 86,400 to + # denote a time of the following day. + attr_reader :transition_at + + # Initializes a new TransitionRule. + def initialize(transition_at) + raise ArgumentError, 'Invalid transition_at' unless transition_at.kind_of?(Integer) + @transition_at = transition_at + end + + # Calculates the UTC time of the transition from a given offset on a given + # year. + def at(offset, year) + day = get_day(year) + day.add_with_convert(@transition_at - offset.utc_total_offset) + end + + # Determines if this TransitionRule is equal to another instance. + def ==(r) + r.kind_of?(TransitionRule) && @transition_at == r.transition_at + end + alias eql? == + + # Returns a hash based on hash_args (defaulting to transition_at). + def hash + hash_args.hash + end + + protected + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@transition_at] + end + + def new_time_or_datetime(year, month = 1, day = 1) + result = if ((year >= 2039 || (year == 2038 && (month >= 2 || (month == 1 && day >= 20)))) && !RubyCoreSupport.time_supports_64bit) || + (year < 1970 && !RubyCoreSupport.time_supports_negative) + + # Time handles 29 February on a non-leap year as 1 March. + # DateTime rejects. Advance manually. + if month == 2 && day == 29 && !Date.gregorian_leap?(year) + month = 3 + day = 1 + end + + RubyCoreSupport.datetime_new(year, month, day) + else + Time.utc(year, month, day) + end + + TimeOrDateTime.wrap(result) + end + end + + # A base class for transition rules that activate based on an integer day of + # the year. + # + # @private + class DayOfYearTransitionRule < TransitionRule #:nodoc: + # Initializes a new DayOfYearTransitionRule. + def initialize(day, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid day' unless day.kind_of?(Integer) + @seconds = day * 86400 + end + + # Determines if this DayOfYearTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfYearTransitionRule) && @seconds == r.seconds + end + alias eql? == + + protected + + # @return [Integer] the day multipled by the number of seconds in a day. + attr_reader :seconds + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@seconds] + super + end + end + + # Defines transitions that occur on the zero-based nth day of the year. + # + # Day 0 is 1 January. + # + # Leap days are counted. Day 59 will be 29 February on a leap year and 1 March + # on a non-leap year. Day 365 will be 31 December on a leap year and 1 January + # the following year on a non-leap year. + # + # @private + class AbsoluteDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # Initializes a new AbsoluteDayOfYearTransitionRule. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 0 && day <= 365 + end + + # Returns true if the day specified by this transition is the first in the + # year (a day number of 0), otherwise false. + def is_always_first_day_of_year? + seconds == 0 + end + + # @returns false. + def is_always_last_day_of_year? + false + end + + # Determines if this AbsoluteDayOfYearTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(AbsoluteDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + new_time_or_datetime(year).add_with_convert(seconds) + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [AbsoluteDayOfYearTransitionRule] + super + end + end + + # Defines transitions that occur on the one-based nth Julian day of the year. + # + # Leap days are not counted. Day 1 is 1 January. Day 60 is always 1 March. + # Day 365 is always 31 December. + # + # @private + class JulianDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # The 60 days in seconds. + LEAP = 60 * 86400 + + # The length of a non-leap year in seconds. + YEAR = 365 * 86400 + + # Initializes a new JulianDayOfYearTransitionRule. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 1 && day <= 365 + end + + # Returns true if the day specified by this transition is the first in the + # year (a day number of 1), otherwise false. + def is_always_first_day_of_year? + seconds == 86400 + end + + # Returns true if the day specified by this transition is the last in the + # year (a day number of 365), otherwise false. + def is_always_last_day_of_year? + seconds == YEAR + end + + # Determines if this JulianDayOfYearTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(JulianDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + # Returns 1 March on non-leap years. + leap = new_time_or_datetime(year, 2, 29) + diff = seconds - LEAP + diff += 86400 if diff >= 0 && leap.mday == 29 + leap.add_with_convert(diff) + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [JulianDayOfYearTransitionRule] + super + end + end + + # A base class for rules that transition on a particular day of week of a + # given week (subclasses specify which week of the month). + # + # @private + class DayOfWeekTransitionRule < TransitionRule #:nodoc: + # Initializes a new DayOfWeekTransitionRule. + def initialize(month, day_of_week, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid month' unless month.kind_of?(Integer) && month >= 1 && month <= 12 + raise ArgumentError, 'Invalid day_of_week' unless day_of_week.kind_of?(Integer) && day_of_week >= 0 && day_of_week <= 6 + @month = month + @day_of_week = day_of_week + end + + # Returns false. + def is_always_first_day_of_year? + false + end + + # Returns false. + def is_always_last_day_of_year? + false + end + + # Determines if this DayOfWeekTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfWeekTransitionRule) && @month == r.month && @day_of_week == r.day_of_week + end + alias eql? == + + protected + + # Returns the month of the year (1 to 12). + attr_reader :month + + # Returns the day of the week (0 to 6 for Sunday to Monday). + attr_reader :day_of_week + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@month, @day_of_week] + super + end + end + + # A rule that transitions on the nth occurrence of a particular day of week + # of a calendar month. + # + # @private + class DayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new DayOfMonthTransitionRule. + def initialize(month, week, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + raise ArgumentError, 'Invalid week' unless week.kind_of?(Integer) && week >= 1 && week <= 4 + @offset_start = (week - 1) * 7 + 1 + end + + # Determines if this DayOfMonthTransitionRule is equal to another instance. + def ==(r) + super(r) && r.kind_of?(DayOfMonthTransitionRule) && @offset_start == r.offset_start + end + alias eql? == + + protected + + # Returns the day the week starts on for a month starting on a Sunday. + attr_reader :offset_start + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + candidate = new_time_or_datetime(year, month, @offset_start) + diff = day_of_week - candidate.wday + + if diff < 0 + candidate.add_with_convert((7 + diff) * 86400) + elsif diff > 0 + candidate.add_with_convert(diff * 86400) + else + candidate + end + end + + # Returns an Array of parameters that will influence the output of hash. + def hash_args + [@offset_start] + super + end + end + + # A rule that transitions on the last occurrence of a particular day of week + # of a calendar month. + # + # @private + class LastDayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new LastDayOfMonthTransitionRule. + def initialize(month, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + end + + # Determines if this LastDayOfMonthTransitionRule is equal to another + # instance. + def ==(r) + super(r) && r.kind_of?(LastDayOfMonthTransitionRule) + end + alias eql? == + + protected + + # Returns a TimeOrDateTime representing midnight local time on the day + # specified by the rule for the given offset and year. + def get_day(year) + next_month = month + 1 + if next_month == 13 + year += 1 + next_month = 1 + end + + candidate = new_time_or_datetime(year, next_month).add_with_convert(-86400) + diff = candidate.wday - day_of_week + + if diff < 0 + candidate - (diff + 7) * 86400 + elsif diff > 0 + candidate - diff * 86400 + else + candidate + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb new file mode 100644 index 0000000..c99acaa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_country_info.rb @@ -0,0 +1,37 @@ +module TZInfo + # Represents information about a country returned by ZoneinfoDataSource. + # + # @private + class ZoneinfoCountryInfo < CountryInfo #:nodoc: + # Constructs a new CountryInfo with an ISO 3166 country code, name and + # an array of CountryTimezones. + def initialize(code, name, zones) + super(code, name) + @zones = zones.dup.freeze + @zone_identifiers = nil + end + + # Returns a frozen array of all the zone identifiers for the country ordered + # geographically, most populous first. + def zone_identifiers + # Thread-safety: It is possible that the value of @zone_identifiers may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @zone_identifiers is only + # calculated once. + + unless @zone_identifiers + result = zones.collect {|zone| zone.identifier}.freeze + return result if frozen? + @zone_identifiers = result + end + + @zone_identifiers + end + + # Returns a frozen array of all the timezones for the for the country + # ordered geographically, most populous first. + def zones + @zones + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb new file mode 100644 index 0000000..3959090 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_data_source.rb @@ -0,0 +1,497 @@ +module TZInfo + # Use send as a workaround for an issue on JRuby 9.2.9.0 where using the + # refinement causes calls to RubyCoreSupport.file_open to fail to pass the + # block parameter. + # + # https://travis-ci.org/tzinfo/tzinfo/jobs/628812051#L1931 + # https://github.com/jruby/jruby/issues/6009 + send(:using, TZInfo::RubyCoreSupport::UntaintExt) if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) + + # An InvalidZoneinfoDirectory exception is raised if the DataSource is + # set to a specific zoneinfo path, which is not a valid zoneinfo directory + # (i.e. a directory containing index files named iso3166.tab and zone.tab + # as well as other timezone files). + class InvalidZoneinfoDirectory < StandardError + end + + # A ZoneinfoDirectoryNotFound exception is raised if no valid zoneinfo + # directory could be found when checking the paths listed in + # ZoneinfoDataSource.search_path. A valid zoneinfo directory is one that + # contains timezone files, a country code index file named iso3166.tab and a + # timezone index file named zone1970.tab or zone.tab. + class ZoneinfoDirectoryNotFound < StandardError + end + + # A DataSource that loads data from a 'zoneinfo' directory containing + # compiled "TZif" version 3 (or earlier) files in addition to iso3166.tab and + # zone1970.tab or zone.tab index files. + # + # To have TZInfo load the system zoneinfo files, call TZInfo::DataSource.set + # as follows: + # + # TZInfo::DataSource.set(:zoneinfo) + # + # To load zoneinfo files from a particular directory, pass the directory to + # TZInfo::DataSource.set: + # + # TZInfo::DataSource.set(:zoneinfo, directory) + # + # Note that the platform used at runtime may limit the range of available + # transition data that can be loaded from zoneinfo files. There are two + # factors to consider: + # + # First of all, the zoneinfo support in TZInfo makes use of Ruby's Time class. + # On 32-bit builds of Ruby 1.8, the Time class only supports 32-bit + # timestamps. This means that only Times between 1901-12-13 20:45:52 and + # 2038-01-19 03:14:07 can be represented. Furthermore, certain platforms only + # allow for positive 32-bit timestamps (notably Windows), making the earliest + # representable time 1970-01-01 00:00:00. + # + # 64-bit builds of Ruby 1.8 and all builds of Ruby 1.9 support 64-bit + # timestamps. This means that there is no practical restriction on the range + # of the Time class on these platforms. + # + # TZInfo will only load transitions that fall within the supported range of + # the Time class. Any queries performed on times outside of this range may + # give inaccurate results. + # + # The second factor concerns the zoneinfo files. Versions of the 'zic' tool + # (used to build zoneinfo files) that were released prior to February 2006 + # created zoneinfo files that used 32-bit integers for transition timestamps. + # Later versions of zic produce zoneinfo files that use 64-bit integers. If + # you have 32-bit zoneinfo files on your system, then any queries falling + # outside of the range 1901-12-13 20:45:52 to 2038-01-19 03:14:07 may be + # inaccurate. + # + # Most modern platforms include 64-bit zoneinfo files. However, Mac OS X (up + # to at least 10.8.4) still uses 32-bit zoneinfo files. + # + # To check whether your zoneinfo files contain 32-bit or 64-bit transition + # data, you can run the following code (substituting the identifier of the + # zone you want to test for zone_identifier): + # + # TZInfo::DataSource.set(:zoneinfo) + # dir = TZInfo::DataSource.get.zoneinfo_dir + # File.open(File.join(dir, zone_identifier), 'r') {|f| f.read(5) } + # + # If the last line returns "TZif\\x00", then you have a 32-bit zoneinfo file. + # If it returns "TZif2" or "TZif3" then you have a 64-bit zoneinfo file. + # + # If you require support for 64-bit transitions, but are restricted to 32-bit + # zoneinfo support, then you may want to consider using TZInfo::RubyDataSource + # instead. + class ZoneinfoDataSource < DataSource + # The default value of ZoneinfoDataSource.search_path. + DEFAULT_SEARCH_PATH = ['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'].freeze + + # The default value of ZoneinfoDataSource.alternate_iso3166_tab_search_path. + DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH = ['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'].freeze + + # Paths to be checked to find the system zoneinfo directory. + @@search_path = DEFAULT_SEARCH_PATH.dup + + # Paths to possible alternate iso3166.tab files (used to locate the + # system-wide iso3166.tab files on FreeBSD and OpenBSD). + @@alternate_iso3166_tab_search_path = DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH.dup + + # An Array of directories that will be checked to find the system zoneinfo + # directory. + # + # Directories are checked in the order they appear in the Array. + # + # The default value is ['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo']. + def self.search_path + @@search_path + end + + # Sets the directories to be checked when locating the system zoneinfo + # directory. + # + # Can be set to an Array of directories or a String containing directories + # separated with File::PATH_SEPARATOR. + # + # Directories are checked in the order they appear in the Array or String. + # + # Set to nil to revert to the default paths. + def self.search_path=(search_path) + @@search_path = process_search_path(search_path, DEFAULT_SEARCH_PATH) + end + + # An Array of paths that will be checked to find an alternate iso3166.tab + # file if one was not included in the zoneinfo directory (for example, on + # FreeBSD and OpenBSD systems). + # + # Paths are checked in the order they appear in the array. + # + # The default value is ['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166']. + def self.alternate_iso3166_tab_search_path + @@alternate_iso3166_tab_search_path + end + + # Sets the paths to check to locate an alternate iso3166.tab file if one was + # not included in the zoneinfo directory. + # + # Can be set to an Array of directories or a String containing directories + # separated with File::PATH_SEPARATOR. + # + # Paths are checked in the order they appear in the array. + # + # Set to nil to revert to the default paths. + def self.alternate_iso3166_tab_search_path=(alternate_iso3166_tab_search_path) + @@alternate_iso3166_tab_search_path = process_search_path(alternate_iso3166_tab_search_path, DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH) + end + + # The zoneinfo directory being used. + attr_reader :zoneinfo_dir + + # Creates a new ZoneinfoDataSource. + # + # If zoneinfo_dir is specified, it will be checked and used as the source + # of zoneinfo files. + # + # The directory must contain a file named iso3166.tab and a file named + # either zone1970.tab or zone.tab. These may either be included in the root + # of the directory or in a 'tab' sub-directory and named 'country.tab' and + # 'zone_sun.tab' respectively (as is the case on Solaris. + # + # Additionally, the path to iso3166.tab can be overridden using the + # alternate_iso3166_tab_path parameter. + # + # InvalidZoneinfoDirectory will be raised if the iso3166.tab and + # zone1970.tab or zone.tab files cannot be found using the zoneinfo_dir and + # alternate_iso3166_tab_path parameters. + # + # If zoneinfo_dir is not specified or nil, the paths referenced in + # search_path are searched in order to find a valid zoneinfo directory + # (one that contains zone1970.tab or zone.tab and iso3166.tab files as + # above). + # + # The paths referenced in alternate_iso3166_tab_search_path are also + # searched to find an iso3166.tab file if one of the searched zoneinfo + # directories doesn't contain an iso3166.tab file. + # + # If no valid directory can be found by searching, ZoneinfoDirectoryNotFound + # will be raised. + def initialize(zoneinfo_dir = nil, alternate_iso3166_tab_path = nil) + if zoneinfo_dir + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(zoneinfo_dir, alternate_iso3166_tab_path) + + unless iso3166_tab_path && zone_tab_path + raise InvalidZoneinfoDirectory, "#{zoneinfo_dir} is not a directory or doesn't contain a iso3166.tab file and a zone1970.tab or zone.tab file." + end + + @zoneinfo_dir = zoneinfo_dir + else + @zoneinfo_dir, iso3166_tab_path, zone_tab_path = find_zoneinfo_dir + + unless @zoneinfo_dir && iso3166_tab_path && zone_tab_path + raise ZoneinfoDirectoryNotFound, "None of the paths included in TZInfo::ZoneinfoDataSource.search_path are valid zoneinfo directories." + end + end + + @zoneinfo_dir = File.expand_path(@zoneinfo_dir).freeze + @timezone_index = load_timezone_index.freeze + @country_index = load_country_index(iso3166_tab_path, zone_tab_path).freeze + @posix_tz_parser = PosixTimeZoneParser.new + end + + # Returns a TimezoneInfo instance for a given identifier. + # Raises InvalidTimezoneIdentifier if the timezone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + begin + if @timezone_index.include?(identifier) + path = File.join(@zoneinfo_dir, identifier) + + # Untaint path rather than identifier. We don't want to modify + # identifier. identifier may also be frozen and therefore cannot be + # untainted. + path.untaint + + begin + ZoneinfoTimezoneInfo.new(identifier, path, @posix_tz_parser) + rescue InvalidZoneinfoFile => e + raise InvalidTimezoneIdentifier, e.message + end + else + raise InvalidTimezoneIdentifier, 'Invalid identifier' + end + rescue Errno::ENOENT, Errno::ENAMETOOLONG, Errno::ENOTDIR + raise InvalidTimezoneIdentifier, 'Invalid identifier' + rescue Errno::EACCES => e + raise InvalidTimezoneIdentifier, e.message + end + end + + # Returns an array of all the available timezone identifiers. + def timezone_identifiers + @timezone_index + end + + # Returns an array of all the available timezone identifiers for + # data timezones (i.e. those that actually contain definitions). + # + # For ZoneinfoDataSource, this will always be identical to + # timezone_identifers. + def data_timezone_identifiers + @timezone_index + end + + # Returns an array of all the available timezone identifiers that + # are links to other timezones. + # + # For ZoneinfoDataSource, this will always be an empty array. + def linked_timezone_identifiers + [].freeze + end + + # Returns a CountryInfo instance for the given ISO 3166-1 alpha-2 + # country code. Raises InvalidCountryCode if the country could not be found + # or the code is invalid. + def load_country_info(code) + info = @country_index[code] + raise InvalidCountryCode, 'Invalid country code' unless info + info + end + + # Returns an array of all the available ISO 3166-1 alpha-2 + # country codes. + def country_codes + @country_index.keys.freeze + end + + # Returns the name and information about this DataSource. + def to_s + "Zoneinfo DataSource: #{@zoneinfo_dir}" + end + + # Returns internal object state as a programmer-readable string. + def inspect + "#<#{self.class}: #{@zoneinfo_dir}>" + end + + private + + # Processes a path for use as the search_path or + # alternate_iso3166_tab_search_path. + def self.process_search_path(path, default) + if path + if path.kind_of?(String) + path.split(File::PATH_SEPARATOR) + else + path.collect {|p| p.to_s} + end + else + default.dup + end + end + + # Validates a zoneinfo directory and returns the paths to the iso3166.tab + # and zone1970.tab or zone.tab files if valid. If the directory is not + # valid, returns nil. + # + # The path to the iso3166.tab file may be overriden by passing in a path. + # This is treated as either absolute or relative to the current working + # directory. + def validate_zoneinfo_dir(path, iso3166_tab_path = nil) + if File.directory?(path) + if iso3166_tab_path + return nil unless File.file?(iso3166_tab_path) + else + iso3166_tab_path = resolve_tab_path(path, ['iso3166.tab'], 'country.tab') + return nil unless iso3166_tab_path + end + + zone_tab_path = resolve_tab_path(path, ['zone1970.tab', 'zone.tab'], 'zone_sun.tab') + return nil unless zone_tab_path + + [iso3166_tab_path, zone_tab_path] + else + nil + end + end + + # Attempts to resolve the path to a tab file given its standard names and + # tab sub-directory name (as used on Solaris). + def resolve_tab_path(zoneinfo_path, standard_names, tab_name) + standard_names.each do |standard_name| + path = File.join(zoneinfo_path, standard_name) + return path if File.file?(path) + end + + path = File.join(zoneinfo_path, 'tab', tab_name) + return path if File.file?(path) + + nil + end + + # Finds a zoneinfo directory using search_path and + # alternate_iso3166_tab_search_path. Returns the paths to the directory, + # the iso3166.tab file and the zone.tab file or nil if not found. + def find_zoneinfo_dir + alternate_iso3166_tab_path = self.class.alternate_iso3166_tab_search_path.detect do |path| + File.file?(path) + end + + self.class.search_path.each do |path| + # Try without the alternate_iso3166_tab_path first. + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + + if alternate_iso3166_tab_path + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path, alternate_iso3166_tab_path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + end + end + + # Not found. + nil + end + + # Scans @zoneinfo_dir and returns an Array of available timezone + # identifiers. + def load_timezone_index + index = [] + + # Ignoring particular files: + # +VERSION is included on Mac OS X. + # leapseconds is a list of leap seconds. + # localtime is the current local timezone (may be a link). + # posix, posixrules and right are directories containing other versions of the zoneinfo files. + # src is a directory containing the tzdata source included on Solaris. + # timeconfig is a symlink included on Slackware. + + enum_timezones(nil, ['+VERSION', 'leapseconds', 'localtime', 'posix', 'posixrules', 'right', 'src', 'timeconfig']) do |identifier| + index << identifier + end + + index.sort + end + + # Recursively scans a directory of timezones, calling the passed in block + # for each identifier found. + def enum_timezones(dir, exclude = [], &block) + Dir.foreach(dir ? File.join(@zoneinfo_dir, dir) : @zoneinfo_dir) do |entry| + unless entry =~ /\./ || exclude.include?(entry) + entry.untaint + path = dir ? File.join(dir, entry) : entry + full_path = File.join(@zoneinfo_dir, path) + + if File.directory?(full_path) + enum_timezones(path, [], &block) + elsif File.file?(full_path) + yield path + end + end + end + end + + # Uses the iso3166.tab and zone1970.tab or zone.tab files to build an index + # of the available countries and their timezones. + def load_country_index(iso3166_tab_path, zone_tab_path) + + # Handle standard 3 to 4 column zone.tab files as well as the 4 to 5 + # column format used by Solaris. + # + # On Solaris, an extra column before the comment gives an optional + # linked/alternate timezone identifier (or '-' if not set). + # + # Additionally, there is a section at the end of the file for timezones + # covering regions. These are given lower-case "country" codes. The timezone + # identifier column refers to a continent instead of an identifier. These + # lines will be ignored by TZInfo. + # + # Since the last column is optional in both formats, testing for the + # Solaris format is done in two passes. The first pass identifies if there + # are any lines using 5 columns. + + + # The first column is allowed to be a comma separated list of country + # codes, as used in zone1970.tab (introduced in tzdata 2014f). + # + # The first country code in the comma-separated list is the country that + # contains the city the zone identifer is based on. The first country + # code on each line is considered to be primary with the others + # secondary. + # + # The zones for each country are ordered primary first, then secondary. + # Within the primary and secondary groups, the zones are ordered by their + # order in the file. + + file_is_5_column = false + zone_tab = [] + + RubyCoreSupport.open_file(zone_tab_path, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + + if line =~ /\A([A-Z]{2}(?:,[A-Z]{2})*)\t(?:([+\-])(\d{2})(\d{2})([+\-])(\d{3})(\d{2})|([+\-])(\d{2})(\d{2})(\d{2})([+\-])(\d{3})(\d{2})(\d{2}))\t([^\t]+)(?:\t([^\t]+))?(?:\t([^\t]+))?\z/ + codes = $1 + + if $2 + latitude = dms_to_rational($2, $3, $4) + longitude = dms_to_rational($5, $6, $7) + else + latitude = dms_to_rational($8, $9, $10, $11) + longitude = dms_to_rational($12, $13, $14, $15) + end + + zone_identifier = $16 + column4 = $17 + column5 = $18 + + file_is_5_column = true if column5 + + zone_tab << [codes.split(','.freeze), zone_identifier, latitude, longitude, column4, column5] + end + end + end + + primary_zones = {} + secondary_zones = {} + + zone_tab.each do |codes, zone_identifier, latitude, longitude, column4, column5| + description = file_is_5_column ? column5 : column4 + country_timezone = CountryTimezone.new(zone_identifier, latitude, longitude, description) + + # codes will always have at least one element + + (primary_zones[codes.first] ||= []) << country_timezone + + codes[1..-1].each do |code| + (secondary_zones[code] ||= []) << country_timezone + end + end + + countries = {} + + RubyCoreSupport.open_file(iso3166_tab_path, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + + # Handle both the two column alpha-2 and name format used in the tz + # database as well as the 4 column alpha-2, alpha-3, numeric-3 and + # name format used by FreeBSD and OpenBSD. + + if line =~ /\A([A-Z]{2})(?:\t[A-Z]{3}\t[0-9]{3})?\t(.+)\z/ + code = $1 + name = $2 + zones = (primary_zones[code] || []) + (secondary_zones[code] || []) + + countries[code] = ZoneinfoCountryInfo.new(code, name, zones) + end + end + end + + countries + end + + # Converts degrees, minutes and seconds to a Rational. + def dms_to_rational(sign, degrees, minutes, seconds = nil) + result = degrees.to_i + Rational(minutes.to_i, 60) + result += Rational(seconds.to_i, 3600) if seconds + result = -result if sign == '-'.freeze + result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb new file mode 100644 index 0000000..4f28a19 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb @@ -0,0 +1,515 @@ +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + + # An InvalidZoneinfoFile exception is raised if an attempt is made to load an + # invalid zoneinfo file. + class InvalidZoneinfoFile < StandardError + end + + # Represents a timezone defined by a compiled zoneinfo TZif (\0, 2 or 3) file. + # + # @private + class ZoneinfoTimezoneInfo < TransitionDataTimezoneInfo #:nodoc: + # The year to generate transitions up to. + # + # @private + GENERATE_UP_TO = RubyCoreSupport.time_supports_64bit ? Time.now.utc.year + 100 : 2037 + + # Minimum supported timestamp (inclusive). + # + # Time.utc(1700, 1, 1).to_i + MIN_TIMESTAMP = -8520336000 + + # Maximum supported timestamp (exclusive). + # + # Time.utc(2500, 1, 1).to_i + MAX_TIMESTAMP = 16725225600 + + # Constructs the new ZoneinfoTimezoneInfo with an identifier, path + # to the file and parser to use to parse the POSIX-like TZ string. + def initialize(identifier, file_path, posix_tz_parser) + super(identifier) + + File.open(file_path, 'rb') do |file| + parse(file, posix_tz_parser) + end + end + + private + # Unpack will return unsigned 32-bit integers. Translate to + # signed 32-bit. + def make_signed_int32(long) + long >= 0x80000000 ? long - 0x100000000 : long + end + + # Unpack will return a 64-bit integer as two unsigned 32-bit integers + # (most significant first). Translate to signed 64-bit + def make_signed_int64(high, low) + unsigned = (high << 32) | low + unsigned >= 0x8000000000000000 ? unsigned - 0x10000000000000000 : unsigned + end + + # Read bytes from file and check that the correct number of bytes could + # be read. Raises InvalidZoneinfoFile if the number of bytes didn't match + # the number requested. + def check_read(file, bytes) + result = file.read(bytes) + + unless result && result.length == bytes + raise InvalidZoneinfoFile, "Expected #{bytes} bytes reading '#{file.path}', but got #{result ? result.length : 0} bytes" + end + + result + end + + # Zoneinfo files don't include the offset from standard time (std_offset) + # for DST periods. Derive the base offset (utc_offset) where DST is + # observed from either the previous or next non-DST period. + # + # Returns the index of the offset to be used prior to the first + # transition. + def derive_offsets(transitions, offsets) + # The first non-DST offset (if there is one) is the offset observed + # before the first transition. Fallback to the first DST offset if there + # are no non-DST offsets. + first_non_dst_offset_index = offsets.index {|o| !o[:is_dst] } + first_offset_index = first_non_dst_offset_index || 0 + return first_offset_index if transitions.empty? + + # Determine the utc_offset of the next non-dst offset at each transition. + utc_offset_from_next = nil + + transitions.reverse_each do |transition| + offset = offsets[transition[:offset]] + if offset[:is_dst] + transition[:utc_offset_from_next] = utc_offset_from_next if utc_offset_from_next + else + utc_offset_from_next = offset[:utc_total_offset] + end + end + + utc_offset_from_previous = first_non_dst_offset_index ? offsets[first_non_dst_offset_index][:utc_total_offset] : nil + defined_offsets = {} + + transitions.each do |transition| + offset_index = transition[:offset] + offset = offsets[offset_index] + utc_total_offset = offset[:utc_total_offset] + + if offset[:is_dst] + utc_offset_from_next = transition[:utc_offset_from_next] + + difference_to_previous = (utc_total_offset - (utc_offset_from_previous || utc_total_offset)).abs + difference_to_next = (utc_total_offset - (utc_offset_from_next || utc_total_offset)).abs + + utc_offset = if difference_to_previous == 3600 + utc_offset_from_previous + elsif difference_to_next == 3600 + utc_offset_from_next + elsif difference_to_previous > 0 && difference_to_next > 0 + difference_to_previous < difference_to_next ? utc_offset_from_previous : utc_offset_from_next + elsif difference_to_previous > 0 + utc_offset_from_previous + elsif difference_to_next > 0 + utc_offset_from_next + else + # No difference, assume a 1 hour offset from standard time. + utc_total_offset - 3600 + end + + if !offset[:utc_offset] + offset[:utc_offset] = utc_offset + defined_offsets[offset] = offset_index + elsif offset[:utc_offset] != utc_offset + # An earlier transition has already derived a different + # utc_offset. Define a new offset or reuse an existing identically + # defined offset. + new_offset = offset.dup + new_offset[:utc_offset] = utc_offset + + offset_index = defined_offsets[new_offset] + + unless offset_index + offsets << new_offset + offset_index = offsets.length - 1 + defined_offsets[new_offset] = offset_index + end + + transition[:offset] = offset_index + end + else + utc_offset_from_previous = utc_total_offset + end + end + + first_offset_index + end + + # Remove transitions before a minimum supported value. If there is not a + # transition exactly on the minimum supported value move the latest from + # before up to the minimum supported value. + def remove_unsupported_negative_transitions(transitions, min_supported) + result = transitions.drop_while {|t| t[:at] < min_supported } + if result.empty? || (result[0][:at] > min_supported && result.length < transitions.length) + last_before = transitions[-1 - result.length] + last_before[:at] = min_supported + [last_before] + result + else + result + end + end + + # Determines if the offset from a transition matches the offset from a + # rule. This is a looser match than TimezoneOffset#==, not requiring that + # the utc_offset and std_offset both match (which have to be derived for + # transitions, but are known for rules. + def offset_matches_rule?(offset, rule_offset) + offset[:utc_total_offset] == rule_offset.utc_total_offset && + offset[:is_dst] == rule_offset.dst? && + offset[:abbr] == rule_offset.abbreviation.to_s + end + + # Determins if the offset from a transition exactly matches the offset + # from a rule. + def offset_equals_rule?(offset, rule_offset) + offset_matches_rule?(offset, rule_offset) && + (offset[:utc_offset] || (offset[:is_dst] ? offset[:utc_total_offset] - 3600 : offset[:utc_total_offset])) == rule_offset.utc_offset + end + + # Finds an offset hash that is an exact match to the rule offset specified. + def find_existing_offset_index(offsets, rule_offset) + offsets.find_index {|o| offset_equals_rule?(o, rule_offset) } + end + + # Gets an existing matching offset index or adds a new offset hash for a + # rule offset. + def get_rule_offset_index(offsets, offset) + index = find_existing_offset_index(offsets, offset) + unless index + index = offsets.length + offsets << {:utc_total_offset => offset.utc_total_offset, :utc_offset => offset.utc_offset, :is_dst => offset.dst?, :abbr => offset.abbreviation} + end + index + end + + # Gets a hash mapping rule offsets to indexes in offsets, creating new + # offset hashes if required. + def get_rule_offset_indexes(offsets, annual_rules) + { + annual_rules.std_offset => get_rule_offset_index(offsets, annual_rules.std_offset), + annual_rules.dst_offset => get_rule_offset_index(offsets, annual_rules.dst_offset) + } + end + + # Converts an array of rule transitions to hashes. + def convert_transitions_to_hashes(offset_indexes, transitions) + transitions.map {|t| {:at => t.at.to_i, :offset => offset_indexes[t.offset]} } + end + + # Apply the rules from the TZ string when there were no defined + # transitions. Checks for a matching offset. Returns the rules-based + # constant offset or generates transitions from 1970 until 100 years into + # the future (at the time of loading zoneinfo_timezone_info.rb) or 2037 if + # limited to 32-bit Times. + def apply_rules_without_transitions(file, offsets, first_offset_index, rules) + first_offset = offsets[first_offset_index] + + if rules.kind_of?(TimezoneOffset) + unless offset_matches_rule?(first_offset, rules) + raise InvalidZoneinfoFile, "Constant offset POSIX-style TZ string does not match constant offset in file '#{file.path}'." + end + + first_offset[:utc_offset] = rules.utc_offset + [] + else + transitions = 1970.upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten + first_transition = transitions[0] + + if offset_matches_rule?(first_offset, first_transition.previous_offset) + # Correct the first offset if it isn't an exact match. + first_offset[:utc_offset] = first_transition.previous_offset.utc_offset + else + # Not transitioning from the designated first offset. + if offset_matches_rule?(first_offset, first_transition.offset) + # Correct the first offset if it isn't an exact match. + first_offset[:utc_offset] = first_transition.offset.utc_offset + + # Skip an unnecessary transition to the first offset. + transitions.shift + end + + # If the first offset doesn't match either the offset or previous + # offset, then it will be retained. + end + + offset_indexes = get_rule_offset_indexes(offsets, rules) + convert_transitions_to_hashes(offset_indexes, transitions) + end + end + + # Validates the rules offset against the offset of the last defined + # transition. Replaces the transition with an equivalent using the rules + # offset if the rules give a different definition for the base offset. + def replace_last_transition_offset_if_valid_and_needed(file, transitions, offsets) + last_transition = transitions.last + last_offset = offsets[last_transition[:offset]] + rule_offset = yield last_offset + + unless offset_matches_rule?(last_offset, rule_offset) + raise InvalidZoneinfoFile, "Offset from POSIX-style TZ string does not match final transition in file '#{file.path}'." + end + + # The total_utc_offset and abbreviation must always be the same. The + # base utc_offset and std_offset might differ. In which case the rule + # should be used as it will be more precise. + last_offset[:utc_offset] = rule_offset.utc_offset + last_transition + end + + # todo: port over validate_and_fix_last_defined_transition_offset + # when fixing the previous offset will need to define a new one + + # Validates the offset indicated to be observed by the rules before the + # first generated transition against the offset of the last defined + # transition. + # + # Fix the last defined transition if it differ on just base/std offsets + # (which are derived). Raise an error if the observed UTC offset or + # abbreviations differ. + def validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, first_rule_offset) + offset_of_last_defined = offsets[last_defined[:offset]] + + if offset_equals_rule?(offset_of_last_defined, first_rule_offset) + last_defined + else + if offset_matches_rule?(offset_of_last_defined, first_rule_offset) + # The same overall offset, but differing in the base or std + # offset (which are derived). Correct by using the rule. + + offset_index = get_rule_offset_index(offsets, first_rule_offset) + {:at => last_defined[:at], :offset => offset_index} + else + raise InvalidZoneinfoFile, "The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{file.path}'." + end + end + end + + # Apply the rules from the TZ string when there were defined transitions. + # Checks for a matching offset with the last transition. Redefines the + # last transition if required and if the rules don't specific a constant + # offset, generates transitions until 100 years into the future (at the + # time of loading zoneinfo_timezone_info.rb) or 2037 if limited to 32-bit + # Times. + def apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules) + last_defined = transitions[-1] + + if rules.kind_of?(TimezoneOffset) + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, rules) + else + previous_offset_index = transitions.length > 1 ? transitions[-2][:offset] : first_offset_index + previous_offset = offsets[previous_offset_index] + last_year = (Time.at(last_defined[:at]).utc + previous_offset[:utc_total_offset]).year + + if last_year <= GENERATE_UP_TO + generated = rules.transitions(last_year).find_all {|t| t.at > last_defined[:at] } + + (last_year + 1).upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten + + unless generated.empty? + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, generated[0].previous_offset) + rule_offset_indexes = get_rule_offset_indexes(offsets, rules) + transitions.concat(convert_transitions_to_hashes(rule_offset_indexes, generated)) + end + end + end + end + + # Defines an offset for the timezone based on the given index and offset + # Hash. + def define_offset(index, offset) + utc_total_offset = offset[:utc_total_offset] + utc_offset = offset[:utc_offset] + + if utc_offset + # DST offset with base utc_offset derived by derive_offsets. + std_offset = utc_total_offset - utc_offset + elsif offset[:is_dst] + # DST offset unreferenced by a transition (offset in use before the + # first transition). No derived base UTC offset, so assume 1 hour + # DST. + utc_offset = utc_total_offset - 3600 + std_offset = 3600 + else + # Non-DST offset. + utc_offset = utc_total_offset + std_offset = 0 + end + + offset index, utc_offset, std_offset, offset[:abbr].untaint.to_sym + end + + # Parses a zoneinfo file and intializes the DataTimezoneInfo structures. + def parse(file, posix_tz_parser) + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + if magic != 'TZif' + raise InvalidZoneinfoFile, "The file '#{file.path}' does not start with the expected header." + end + + if version == '2' || version == '3' + # Skip the first 32-bit section and read the header of the second + # 64-bit section. The 64-bit section is always used even if the + # runtime platform doesn't support 64-bit timestamps. In "slim" format + # zoneinfo files the 32-bit section will be empty. + file.seek(timecnt * 5 + typecnt * 6 + charcnt + leapcnt * 8 + ttisstdcnt + ttisutccnt, IO::SEEK_CUR) + + prev_version = version + + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + unless magic == 'TZif' && (version == prev_version) + raise InvalidZoneinfoFile, "The file '#{file.path}' contains an invalid 64-bit section header." + end + + using_64bit = true + elsif version != '3' && version != '2' && version != "\0" + raise InvalidZoneinfoFile, "The file '#{file.path}' contains a version of the zoneinfo format that is not currently supported." + else + using_64bit = false + end + + unless leapcnt == 0 + raise InvalidZoneinfoFile, "The zoneinfo file '#{file.path}' contains leap second data. TZInfo requires zoneinfo files that omit leap seconds." + end + + transitions = [] + + if using_64bit + timecnt.times do |i| + high, low = check_read(file, 8).unpack('NN'.freeze) + transition_time = make_signed_int64(high, low) + transitions << {:at => transition_time} + end + else + timecnt.times do |i| + transition_time = make_signed_int32(check_read(file, 4).unpack('N'.freeze)[0]) + transitions << {:at => transition_time} + end + end + + timecnt.times do |i| + localtime_type = check_read(file, 1).unpack('C'.freeze)[0] + transitions[i][:offset] = localtime_type + end + + offsets = [] + + typecnt.times do |i| + gmtoff, isdst, abbrind = check_read(file, 6).unpack('NCC'.freeze) + gmtoff = make_signed_int32(gmtoff) + isdst = isdst == 1 + offset = {:utc_total_offset => gmtoff, :is_dst => isdst, :abbr_index => abbrind} + + unless isdst + offset[:utc_offset] = gmtoff + end + + offsets << offset + end + + abbrev = check_read(file, charcnt) + + if using_64bit + # Skip to the POSIX-style TZ string. + file.seek(ttisstdcnt + ttisutccnt, IO::SEEK_CUR) # + leapcnt * 8, but leapcnt is checked above and guaranteed to be 0. + tz_string_start = check_read(file, 1) + raise InvalidZoneinfoFile, "Expected newline starting POSIX-style TZ string in file '#{file.path}'." unless tz_string_start == "\n" + tz_string = RubyCoreSupport.force_encoding(file.readline("\n"), 'UTF-8') + raise InvalidZoneinfoFile, "Expected newline ending POSIX-style TZ string in file '#{file.path}'." unless tz_string.chomp!("\n") + + begin + rules = posix_tz_parser.parse(tz_string) + rescue InvalidPosixTimeZone => e + raise InvalidZoneinfoFile, "Failed to parse POSIX-style TZ string in file '#{file.path}': #{e}" + end + else + rules = nil + end + + offsets.each do |o| + abbrev_start = o[:abbr_index] + raise InvalidZoneinfoFile, "Abbreviation index is out of range in file '#{file.path}'" unless abbrev_start < abbrev.length + + abbrev_end = abbrev.index("\0", abbrev_start) + raise InvalidZoneinfoFile, "Missing abbreviation null terminator in file '#{file.path}'" unless abbrev_end + + o[:abbr] = RubyCoreSupport.force_encoding(abbrev[abbrev_start...abbrev_end], 'UTF-8') + end + + transitions.each do |t| + if t[:offset] < 0 || t[:offset] >= offsets.length + raise InvalidZoneinfoFile, "Invalid offset referenced by transition in file '#{file.path}'." + end + end + + # Derive the offsets from standard time (std_offset). + first_offset_index = derive_offsets(transitions, offsets) + + # Filter out transitions that are not supported by Time on this + # platform. + unless transitions.empty? + if !RubyCoreSupport.time_supports_negative + transitions = remove_unsupported_negative_transitions(transitions, 0) + elsif !RubyCoreSupport.time_supports_64bit + transitions = remove_unsupported_negative_transitions(transitions, -2**31) + else + # Ignore transitions that occur outside of a defined window. The + # transition index cannot handle a large range of transition times. + # + # This is primarily intended to ignore the far in the past + # transition added in zic 2014c (at timestamp -2**63 in zic 2014c + # and at the approximate time of the big bang from zic 2014d). + # + # Assumes MIN_TIMESTAMP is less than -2**31. + transitions = remove_unsupported_negative_transitions(transitions, MIN_TIMESTAMP) + end + + if !RubyCoreSupport.time_supports_64bit + i = transitions.find_index {|t| t[:at] >= 2**31 } + had_later_transition = !!i + transitions = transitions.first(i) if i + else + had_later_transition = false + end + end + + if rules && !had_later_transition + if transitions.empty? + transitions = apply_rules_without_transitions(file, offsets, first_offset_index, rules) + else + apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules) + end + end + + define_offset(first_offset_index, offsets[first_offset_index]) + + used_offset_indexes = transitions.map {|t| t[:offset] }.to_set + + offsets.each_with_index do |o, i| + define_offset(i, o) if i != first_offset_index && used_offset_indexes.include?(i) + end + + # Ignore transitions that occur outside of a defined window. The + # transition index cannot handle a large range of transition times. + transitions.each do |t| + at = t[:at] + break if at >= MAX_TIMESTAMP + time = Time.at(at).utc + transition time.year, time.mon, t[:offset], at + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb new file mode 100644 index 0000000..610d878 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_annual_rules.rb @@ -0,0 +1,95 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCAnnualRules < Minitest::Test + + def test_initialize + std_offset = TimezoneOffset.new(0, 0, 'GMT') + dst_offset = TimezoneOffset.new(0, 3600, 'BST') + dst_start_rule = FakeAlwaysDateAdjustmentRule.new(3, 1) + dst_end_rule = FakeAlwaysDateAdjustmentRule.new(10, 1) + + rules = AnnualRules.new(std_offset, dst_offset, dst_start_rule, dst_end_rule) + assert_same(std_offset, rules.std_offset) + assert_same(dst_offset, rules.dst_offset) + assert_same(dst_start_rule, rules.dst_start_rule) + assert_same(dst_end_rule, rules.dst_end_rule) + end + + [2020, 2021].each do |year| + define_method "test_transitions_for_dst_mid_year_#{year}" do + std_offset = TimezoneOffset.new(3600, 0, 'TEST') + dst_offset = TimezoneOffset.new(3600, 3600, 'TESTS') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(3, 21), + FakeAlwaysDateAdjustmentRule.new(10, 22) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 3600)), + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 7200)) + ] + + assert_equal(expected, result) + end + + define_method "test_transitions_for_dst_start_and_end_of_year_#{year}" do + std_offset = TimezoneOffset.new(3600, 0, 'TEST') + dst_offset = TimezoneOffset.new(3600, 3600, 'TESTS') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(10, 22), + FakeAlwaysDateAdjustmentRule.new(3, 21) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 7200)), + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 3600)) + ] + + assert_equal(expected, result) + end + + define_method "test_transitions_for_negative_dst_start_and_end_of_year_#{year}" do + std_offset = TimezoneOffset.new(7200, 0, 'TEST') + dst_offset = TimezoneOffset.new(7200, -3600, 'TESTW') + + rules = AnnualRules.new( + std_offset, + dst_offset, + FakeAlwaysDateAdjustmentRule.new(10, 22), + FakeAlwaysDateAdjustmentRule.new(3, 21) + ) + + result = rules.transitions(year) + + expected = [ + AnnualRules::Transition.new(std_offset, dst_offset, TimeOrDateTime.wrap(Time.utc(year, 3, 21, 0, 0, 0) - 3600)), + AnnualRules::Transition.new(dst_offset, std_offset, TimeOrDateTime.wrap(Time.utc(year, 10, 22, 0, 0, 0) - 7200)) + ] + + assert_equal(expected, result) + end + end + + class FakeAlwaysDateAdjustmentRule + def initialize(month, day) + @month = month + @day = day + end + + def at(offset, year) + TimeOrDateTime.wrap(Time.utc(year, @month, @day, 0, 0, 0) - offset.utc_total_offset) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country.rb new file mode 100644 index 0000000..faa3bb5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country.rb @@ -0,0 +1,238 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCCountry < Minitest::Test + def setup + @orig_data_source = DataSource.get + Country.send :init_countries + end + + def teardown + DataSource.set(@orig_data_source) + end + + def test_get_valid + c = Country.get('GB') + + assert c + assert_equal('GB', c.code) + end + + def test_get_not_exist + assert_raises(InvalidCountryCode) { + Country.get('ZZ') + } + end + + def test_get_invalid + assert_raises(InvalidCountryCode) { + Country.get('../Countries/GB') + } + end + + def test_get_nil + assert_raises(InvalidCountryCode) { + Country.get(nil) + } + end + + def test_get_case + assert_raises(InvalidCountryCode) { + Country.get('gb') + } + end + + def test_get_tainted_loaded + Country.get('GB') + + safe_test(:unavailable => :skip) do + code = 'GB'.dup.taint + assert(code.tainted?) + country = Country.get(code) + assert_equal('GB', country.code) + assert(code.tainted?) + end + end + + def test_get_tainted_and_frozen_loaded + Country.get('GB') + + safe_test do + country = Country.get('GB'.dup.taint.freeze) + assert_equal('GB', country.code) + end + end + + def test_get_tainted_not_previously_loaded + safe_test(:unavailable => :skip) do + code = 'GB'.dup.taint + assert(code.tainted?) + country = Country.get(code) + assert_equal('GB', country.code) + assert(code.tainted?) + end + end + + def test_get_tainted_and_frozen_not_previously_loaded + safe_test do + country = Country.get('GB'.dup.taint.freeze) + assert_equal('GB', country.code) + end + end + + def test_new_nil + assert_raises(InvalidCountryCode) { + Country.new(nil) + } + end + + def test_new_arg + c = Country.new('GB') + assert_same(Country.get('GB'), c) + end + + def test_new_arg_not_exist + assert_raises(InvalidCountryCode) { + Country.new('ZZ') + } + end + + def test_all_codes + all_codes = Country.all_codes + assert_kind_of(Array, all_codes) + end + + def test_all + all = Country.all + assert_equal(Country.all_codes, all.collect {|c| c.code}) + end + + def test_code + assert_equal('US', Country.get('US').code) + end + + def test_name + assert_kind_of(String, Country.get('US').name) + end + + def test_to_s + assert_equal(Country.get('US').name, Country.get('US').to_s) + assert_equal(Country.get('GB').name, Country.get('GB').to_s) + end + + def test_zone_identifiers + zone_names = Country.get('US').zone_names + assert_kind_of(Array, zone_names) + assert_equal(true, zone_names.frozen?) + end + + def test_zone_names + assert_equal(Country.get('US').zone_identifiers, Country.get('US').zone_names) + end + + def test_zones + zones = Country.get('US').zones + assert_kind_of(Array, zones) + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.identifier}) + + zones.each {|z| assert_kind_of(TimezoneProxy, z)} + end + + def test_zone_info + zones = Country.get('US').zone_info + assert_kind_of(Array, zones) + assert_equal(true, zones.frozen?) + + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.identifier}) + assert_equal(Country.get('US').zone_identifiers, zones.collect {|z| z.timezone.identifier}) + + zones.each {|z| assert_kind_of(CountryTimezone, z)} + end + + def test_compare + assert_equal(0, Country.get('GB') <=> Country.get('GB')) + assert_equal(-1, Country.get('GB') <=> Country.get('US')) + assert_equal(1, Country.get('US') <=> Country.get('GB')) + assert_equal(-1, Country.get('FR') <=> Country.get('US')) + assert_equal(1, Country.get('US') <=> Country.get('FR')) + end + + def test_compare_non_comparable + assert_nil(Country.get('GB') <=> Object.new) + end + + def test_equality + assert_equal(true, Country.get('GB') == Country.get('GB')) + assert_equal(false, Country.get('GB') == Country.get('US')) + assert(!(Country.get('GB') == Object.new)) + end + + def test_eql + assert_equal(true, Country.get('GB').eql?(Country.get('GB'))) + assert_equal(false, Country.get('GB').eql?(Country.get('US'))) + assert(!Country.get('GB').eql?(Object.new)) + end + + def test_hash + assert_equal('GB'.hash, Country.get('GB').hash) + assert_equal('US'.hash, Country.get('US').hash) + end + + def test_marshal + c = Country.get('US') + + # Should get back the same instance because load calls Country.get. + assert_same(c, Marshal.load(Marshal.dump(c))) + end + + def test_reload + # If country gets reloaded for some reason, it needs to force a reload of + # the country index. + + assert_equal('US', Country.get('US').code) + + # Suppress redefined method warnings. + without_warnings do + load 'tzinfo/country.rb' + end + + assert_equal('US', Country.get('US').code) + end + + def test_get_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.get('GB') + end + end + + def test_new_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.new('GB') + end + end + + def test_all_codes_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.all_codes + end + end + + def test_all_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Country.all + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb new file mode 100644 index 0000000..16f76e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_index_definition.rb @@ -0,0 +1,69 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryIndexDefinition < Minitest::Test + + module CountriesTest1 + include CountryIndexDefinition + + country 'ZZ', 'Country One' do |c| + c.timezone 'Test/Zone/1', 3, 2, 41,20 + end + + country 'AA', 'Aland' do |c| + c.timezone 'Test/Zone/3', 71,30, 358, 15 + c.timezone 'Test/Zone/2', 41, 20, 211, 30 + end + + country 'TE', 'Three' + end + + module CountriesTest2 + include CountryIndexDefinition + + country 'CO', 'First Country' do |c| + end + end + + def test_module_1 + hash = CountriesTest1.countries + assert_equal(3, hash.length) + assert_equal(true, hash.frozen?) + + zz = hash['ZZ'] + aa = hash['AA'] + te = hash['TE'] + + assert_kind_of(RubyCountryInfo, zz) + assert_equal('ZZ', zz.code) + assert_equal('Country One', zz.name) + assert_equal(1, zz.zones.length) + assert_equal('Test/Zone/1', zz.zones[0].identifier) + + assert_kind_of(RubyCountryInfo, aa) + assert_equal('AA', aa.code) + assert_equal('Aland', aa.name) + assert_equal(2, aa.zones.length) + assert_equal('Test/Zone/3', aa.zones[0].identifier) + assert_equal('Test/Zone/2', aa.zones[1].identifier) + + assert_kind_of(RubyCountryInfo, te) + assert_equal('TE', te.code) + assert_equal('Three', te.name) + assert_equal(0, te.zones.length) + end + + def test_module_2 + hash = CountriesTest2.countries + assert_equal(1, hash.length) + assert_equal(true, hash.frozen?) + + co = hash['CO'] + + assert_kind_of(RubyCountryInfo, co) + assert_equal('CO', co.code) + assert_equal('First Country', co.name) + assert_equal(0, co.zones.length) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_info.rb new file mode 100644 index 0000000..12002d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_info.rb @@ -0,0 +1,16 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryInfo < Minitest::Test + + def test_code + ci = CountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = CountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('Zzz', ci.name) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb new file mode 100644 index 0000000..77fd7d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_country_timezone.rb @@ -0,0 +1,173 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCCountryTimezone < Minitest::Test + def test_identifier_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal('Europe/London', ct.identifier) + end + + def test_identifier_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal('Europe/London', ct.identifier) + end + + def test_latitude_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_latitude_after_freeze_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct.freeze + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_latitude_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal(Rational(2059, 40), ct.latitude) + end + + def test_longitude_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_longitude_after_freeze_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct.freeze + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_longitude_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal(Rational(-5, 16), ct.longitude) + end + + def test_description_omit_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + assert_nil(ct.description) + end + + def test_description_omit_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_nil(ct.description) + end + + def test_description_nil_new! + ct = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, nil) + assert_nil(ct.description) + end + + def test_description_nil_new + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16), nil) + assert_nil(ct.description) + end + + def test_description_new! + ct = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + assert_equal('Eastern Time', ct.description) + end + + def test_description_new + ct = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + assert_equal('Eastern Time', ct.description) + end + + def test_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_kind_of(TimezoneProxy, ct.timezone) + assert_equal('Europe/London', ct.timezone.identifier) + end + + def test_description_or_friendly_idenfier_no_description + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + assert_equal('London', ct.description_or_friendly_identifier) + end + + def test_description_or_friendly_idenfier_description + ct = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + assert_equal('Eastern Time', ct.description_or_friendly_identifier) + end + + def test_equality_1 + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct3 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct4 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, 'Description') + ct5 = CountryTimezone.new!('Europe/LondonB', 2059, 40, -5, 16) + ct6 = CountryTimezone.new!('Europe/London', 2060, 40, -5, 16) + ct7 = CountryTimezone.new!('Europe/London', 2059, 40, -6, 16) + + assert_equal(true, ct1 == ct1) + assert_equal(true, ct1 == ct2) + assert_equal(true, ct1 == ct3) + assert_equal(false, ct1 == ct4) + assert_equal(false, ct1 == ct5) + assert_equal(false, ct1 == ct6) + assert_equal(false, ct1 == ct7) + end + + def test_equality_2 + ct1 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time2') + + assert_equal(true, ct1 == ct1) + assert_equal(false, ct1 == ct2) + end + + def test_equality_non_country_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + + assert_equal(false, ct == Object.new) + end + + def test_eql_1 + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct3 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct4 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16, 'Description') + ct5 = CountryTimezone.new!('Europe/LondonB', 2059, 40, -5, 16) + ct6 = CountryTimezone.new!('Europe/London', 2060, 40, -5, 16) + ct7 = CountryTimezone.new!('Europe/London', 2059, 40, -6, 16) + + assert_equal(true, ct1.eql?(ct1)) + assert_equal(true, ct1.eql?(ct2)) + assert_equal(true, ct1.eql?(ct3)) + assert_equal(false, ct1.eql?(ct4)) + assert_equal(false, ct1.eql?(ct5)) + assert_equal(false, ct1.eql?(ct6)) + assert_equal(false, ct1.eql?(ct7)) + end + + def test_eql_2 + ct1 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time2') + + assert_equal(true, ct1.eql?(ct1)) + assert_equal(false, ct1.eql?(ct2)) + end + + def test_eql_non_country_timezone + ct = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + + assert_equal(false, ct.eql?(Object.new)) + end + + def test_hash_new! + ct1 = CountryTimezone.new!('Europe/London', 2059, 40, -5, 16) + ct2 = CountryTimezone.new!('America/New_York', 48857, 1200, -266423, 3600, 'Eastern Time') + + assert_equal('Europe/London'.hash ^ 2059.hash ^ 40.hash ^ -5.hash ^ 16.hash ^ nil.hash, ct1.hash) + assert_equal('America/New_York'.hash ^ 48857.hash ^ 1200.hash ^ -266423.hash ^ 3600.hash ^ 'Eastern Time'.hash, ct2.hash) + end + + def test_hash_new + ct1 = CountryTimezone.new('Europe/London', Rational(2059, 40), Rational(-5, 16)) + ct2 = CountryTimezone.new('America/New_York', Rational(48857, 1200), Rational(-266423, 3600), 'Eastern Time') + + assert_equal('Europe/London'.hash ^ 2059.hash ^ 40.hash ^ -5.hash ^ 16.hash ^ nil.hash, ct1.hash) + assert_equal('America/New_York'.hash ^ 48857.hash ^ 1200.hash ^ -266423.hash ^ 3600.hash ^ 'Eastern Time'.hash, ct2.hash) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_source.rb new file mode 100644 index 0000000..3d11ffc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_source.rb @@ -0,0 +1,218 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'tmpdir' + +include TZInfo + +class TCDataSource < Minitest::Test + class InitDataSource < DataSource + end + + class DummyDataSource < DataSource + end + + def setup + @orig_data_source = DataSource.get + DataSource.set(InitDataSource.new) + @orig_search_path = ZoneinfoDataSource.search_path.clone + end + + def teardown + DataSource.set(@orig_data_source) + ZoneinfoDataSource.search_path = @orig_search_path + end + + def test_get + data_source = DataSource.get + assert_kind_of(InitDataSource, data_source) + end + + def test_get_default_ruby_only + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir] + + puts TZInfo::DataSource.get.class + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::RubyDataSource'], code, [TZINFO_TEST_DATA_DIR]) + end + + def test_get_default_zoneinfo_only + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir, '#{TZINFO_TEST_ZONEINFO_DIR}'] + + puts TZInfo::DataSource.get.class + puts TZInfo::DataSource.get.zoneinfo_dir + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns( + ['TZInfo::ZoneinfoDataSource', TZINFO_TEST_ZONEINFO_DIR], + code) + end + + def test_get_default_ruby_and_zoneinfo + code = <<-EOF + begin + TZInfo::ZoneinfoDataSource.search_path = ['#{TZINFO_TEST_ZONEINFO_DIR}'] + + puts TZInfo::DataSource.get.class + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::RubyDataSource'], code, [TZINFO_TEST_DATA_DIR]) + end + + def test_get_default_no_data + code = <<-EOF + require 'tmpdir' + + begin + Dir.mktmpdir('tzinfo_test_dir') do |dir| + TZInfo::ZoneinfoDataSource.search_path = [dir] + + begin + data_source = TZInfo::DataSource.get + puts "No exception raised, returned \#{data_source} instead" + rescue Exception => e + puts e.class + end + end + rescue Exception => e + puts "Unexpected exception: \#{e}" + end + EOF + + assert_sub_process_returns(['TZInfo::DataSourceNotFound'], code) + end + + def test_set_instance + DataSource.set(DummyDataSource.new) + data_source = DataSource.get + assert_kind_of(DummyDataSource, data_source) + end + + def test_set_standard_ruby + DataSource.set(:ruby) + data_source = DataSource.get + assert_kind_of(RubyDataSource, data_source) + end + + def test_set_standard_zoneinfo_search + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + ZoneinfoDataSource.search_path = [dir] + + DataSource.set(:zoneinfo) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_search_zone1970 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone1970.tab')) + + ZoneinfoDataSource.search_path = [dir] + + DataSource.set(:zoneinfo) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + DataSource.set(:zoneinfo, dir) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit_zone1970 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.touch(File.join(dir, 'zone.tab')) + + DataSource.set(:zoneinfo, dir) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_explicit_alternate_iso3166 + Dir.mktmpdir('tzinfo_test_dir') do |dir| + zoneinfo_dir = File.join(dir, 'zoneinfo') + tab_dir = File.join(dir, 'tab') + + FileUtils.mkdir(zoneinfo_dir) + FileUtils.mkdir(tab_dir) + + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + iso3166_file = File.join(tab_dir, 'iso3166.tab') + FileUtils.touch(iso3166_file) + + DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_file) + data_source = DataSource.get + assert_kind_of(ZoneinfoDataSource, data_source) + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + + def test_set_standard_zoneinfo_search_not_found + Dir.mktmpdir('tzinfo_test_dir') do |dir| + ZoneinfoDataSource.search_path = [dir] + + assert_raises(ZoneinfoDirectoryNotFound) do + DataSource.set(:zoneinfo) + end + + assert_kind_of(InitDataSource, DataSource.get) + end + end + + def test_set_standard_zoneinfo_explicit_invalid + Dir.mktmpdir('tzinfo_test_dir') do |dir| + assert_raises(InvalidZoneinfoDirectory) do + DataSource.set(:zoneinfo, dir) + end + + assert_kind_of(InitDataSource, DataSource.get) + end + end + + def test_set_standard_zoneinfo_wrong_arg_count + assert_raises(ArgumentError) do + DataSource.set(:zoneinfo, 1, 2, 3) + end + + assert_kind_of(InitDataSource, DataSource.get) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb new file mode 100644 index 0000000..15ee27b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone.rb @@ -0,0 +1,99 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCDataTimezone < Minitest::Test + + class TestTimezoneInfo < TimezoneInfo + attr_reader :utc + attr_reader :local + attr_reader :utc_to + attr_reader :utc_from + + def initialize(identifier, utc_period, local_periods, transitions_up_to) + super(identifier) + @utc_period = utc_period + @local_periods = local_periods || [] + @transitions_up_to = transitions_up_to + end + + def period_for_utc(utc) + @utc = utc + @utc_period + end + + def periods_for_local(local) + @local = local + @local_periods + end + + def transitions_up_to(utc_to, utc_from = nil) + @utc_to = utc_to + @utc_from = utc_from + @transitions_up_to + end + end + + def test_identifier + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_equal('Test/Zone', tz.identifier) + end + + def test_period_for_utc + # Don't need actual TimezonePeriods. DataTimezone isn't supposed to do + # anything with them apart from return them. + period = Object.new + tti = TestTimezoneInfo.new('Test/Zone', period, [], []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(period, tz.period_for_utc(t)) + assert_same(t, tti.utc) + end + + def test_periods_for_local + # Don't need actual TimezonePeriods. DataTimezone isn't supposed to do + # anything with them apart from return them. + periods = [Object.new, Object.new] + tti = TestTimezoneInfo.new('Test/Zone', nil, periods, []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(periods, tz.periods_for_local(t)) + assert_same(t, tti.local) + end + + def test_periods_for_local_not_found + periods = [] + tti = TestTimezoneInfo.new('Test/Zone', nil, periods, []) + tz = DataTimezone.new(tti) + + t = Time.utc(2006, 6, 27, 22, 50, 12) + assert_same(periods, tz.periods_for_local(t)) + assert_same(t, tti.local) + end + + def test_transitions_up_to + # Don't need actual TimezoneTransition instances. DataTimezone isn't + # supposed to do anything with them apart from return them. + transitions = [Object.new, Object.new] + tti = TestTimezoneInfo.new('Test/Zone', nil, nil, transitions) + tz = DataTimezone.new(tti) + + utc_to = Time.utc(2013, 1, 1, 0, 0, 0) + utc_from = Time.utc(2012, 1, 1, 0, 0, 0) + assert_same(transitions, tz.transitions_up_to(utc_to, utc_from)) + assert_same(utc_to, tti.utc_to) + assert_same(utc_from, tti.utc_from) + end + + def test_canonical_identifier + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_equal('Test/Zone', tz.canonical_identifier) + end + + def test_canonical_zone + tz = DataTimezone.new(TestTimezoneInfo.new('Test/Zone', nil, [], [])) + assert_same(tz, tz.canonical_zone) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb new file mode 100644 index 0000000..1b9d144 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_data_timezone_info.rb @@ -0,0 +1,18 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCDataTimezoneInfo < Minitest::Test + + def test_identifier + ti = DataTimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', ti.identifier) + end + + def test_construct_timezone + ti = DataTimezoneInfo.new('Test/Zone') + tz = ti.create_timezone + assert_kind_of(DataTimezone, tz) + assert_equal('Test/Zone', tz.identifier) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb new file mode 100644 index 0000000..c70a098 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_info_timezone.rb @@ -0,0 +1,34 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCInfoTimezone < Minitest::Test + + class TestInfoTimezone < InfoTimezone + attr_reader :setup_info + + protected + def setup(info) + super(info) + @setup_info = info + end + end + + def test_identifier + tz = InfoTimezone.new(TimezoneInfo.new('Test/Identifier')) + assert_equal('Test/Identifier', tz.identifier) + end + + def test_info + i = TimezoneInfo.new('Test/Identifier') + tz = InfoTimezone.new(i) + assert_same(i, tz.send(:info)) + end + + def test_setup + i = TimezoneInfo.new('Test/Identifier') + tz = TestInfoTimezone.new(i) + assert_same(i, tz.setup_info) + end +end + diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb new file mode 100644 index 0000000..5da9303 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone.rb @@ -0,0 +1,155 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCLinkedTimezone < Minitest::Test + + class TestTimezone < Timezone + attr_reader :utc_period + attr_reader :local_periods + attr_reader :up_to_transitions + attr_reader :utc + attr_reader :local + attr_reader :utc_to + attr_reader :utc_from + + def self.new(identifier, no_local_periods = false) + tz = super() + tz.send(:setup, identifier, no_local_periods) + tz + end + + def identifier + @identifier + end + + def period_for_utc(utc) + @utc = utc + @utc_period + end + + def periods_for_local(local) + @local = local + raise PeriodNotFound if @no_local_periods + @local_periods + end + + def transitions_up_to(utc_to, utc_from = nil) + @utc_to = utc_to + @utc_from = utc_from + @up_to_transitions + end + + def canonical_zone + self + end + + private + def setup(identifier, no_local_periods) + @identifier = identifier + @no_local_periods = no_local_periods + + # Don't have to be real TimezonePeriod or TimezoneTransition objects + # (nothing will use them). + @utc_period = Object.new + @local_periods = [Object.new, Object.new] + @up_to_transitions = [Object.new, Object.new] + end + end + + + def setup + # Redefine Timezone.get to return a fake timezone. + # Use without_warnings to suppress redefined get method warning. + without_warnings do + def Timezone.get(identifier) + raise InvalidTimezoneIdentifier, 'Invalid identifier' if identifier == 'Invalid/Identifier' + + @timezones ||= {} + @timezones[identifier] ||= + identifier == 'Test/Recursive/Linked' ? + LinkedTimezone.new(LinkedTimezoneInfo.new(identifier, 'Test/Recursive/Data')) : + TestTimezone.new(identifier, identifier == 'Test/No/Local') + end + end + end + + def teardown + # Re-require timezone to reset. + # Suppress redefined method warnings. + without_warnings do + load 'tzinfo/timezone.rb' + end + end + + def test_identifier + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + assert_equal('Test/Zone', tz.identifier) + end + + def test_invalid_linked_identifier + assert_raises(InvalidTimezoneIdentifier) { LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Invalid/Identifier')) } + end + + def test_period_for_utc + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_same(linked_tz.utc_period, tz.period_for_utc(t)) + assert_same(t, linked_tz.utc) + end + + def test_periods_for_local + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_same(linked_tz.local_periods, tz.periods_for_local(t)) + assert_same(t, linked_tz.local) + end + + def test_periods_for_local_not_found + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/No/Local')) + linked_tz = Timezone.get('Test/No/Local') + t = Time.utc(2006, 6, 27, 23, 12, 28) + assert_raises(PeriodNotFound) { tz.periods_for_local(t) } + assert_same(t, linked_tz.local) + end + + def test_transitions_up_to + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + utc_to = Time.utc(2013, 1, 1, 0, 0, 0) + utc_from = Time.utc(2012, 1, 1, 0, 0, 0) + assert_same(linked_tz.up_to_transitions, tz.transitions_up_to(utc_to, utc_from)) + assert_same(utc_to, linked_tz.utc_to) + assert_same(utc_from, linked_tz.utc_from) + end + + def test_canonical_identifier + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + assert_equal('Test/Linked', tz.canonical_identifier) + end + + def test_canonical_identifier_recursive + # Recursive links are not currently used in the Time Zone database, but + # will be supported by TZInfo. + + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Recursive/Linked')) + assert_equal('Test/Recursive/Data', tz.canonical_identifier) + end + + def test_canonical_zone + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked')) + linked_tz = Timezone.get('Test/Linked') + assert_same(linked_tz, tz.canonical_zone) + end + + def test_canonical_zone_recursive + # Recursive links are not currently used in the Time Zone database, but + # will be supported by TZInfo. + + tz = LinkedTimezone.new(LinkedTimezoneInfo.new('Test/Zone', 'Test/Recursive/Linked')) + linked_tz = Timezone.get('Test/Recursive/Data') + assert_same(linked_tz, tz.canonical_zone) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb new file mode 100644 index 0000000..4e55f4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_linked_timezone_info.rb @@ -0,0 +1,23 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCLinkedTimezoneInfo < Minitest::Test + + def test_identifier + lti = LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked') + assert_equal('Test/Zone', lti.identifier) + end + + def test_link_to_identifier + lti = LinkedTimezoneInfo.new('Test/Zone', 'Test/Linked') + assert_equal('Test/Linked', lti.link_to_identifier) + end + + def test_construct_timezone + lti = LinkedTimezoneInfo.new('Test/Zone', 'Europe/London') + tz = lti.create_timezone + assert_kind_of(LinkedTimezone, tz) + assert_equal('Test/Zone', tz.identifier) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb new file mode 100644 index 0000000..2881fc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_offset_rationals.rb @@ -0,0 +1,23 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCOffsetRationals < Minitest::Test + def test_rational_for_offset + [0,1,2,3,4,-1,-2,-3,-4,30*60,-30*60,61*60,-61*60,14*60*60,-14*60*60,20*60*60,-20*60*60].each {|seconds| + assert_equal(Rational(seconds, 86400), OffsetRationals.rational_for_offset(seconds)) + } + end + + def test_rational_for_offset_constant + -28.upto(28) {|i| + seconds = i * 30 * 60 + + r1 = OffsetRationals.rational_for_offset(seconds) + r2 = OffsetRationals.rational_for_offset(seconds) + + assert_equal(Rational(seconds, 86400), r1) + assert_same(r1, r2) + } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb new file mode 100644 index 0000000..1e8f153 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_posix_time_zone_parser.rb @@ -0,0 +1,261 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCPosixTimeZoneParser < Minitest::Test + HOUR = 3600 + MINUTE = 60 + + class << self + private + + def append_time_to_rule(day_rule, time) + time ? "#{day_rule}/#{time}" : day_rule + end + + def define_invalid_dst_rule_tests(type, rule) + define_method "test_#{type}_dst_start_rule_for_invalid_#{rule}" do + tz_string = "STD-1DST,#{rule},300" + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + + define_method "test_#{type}_dst_end_rule_for_invalid_#{rule}" do + tz_string = "STD-1DST,60,#{rule}" + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + end + + def setup + @parser = PosixTimeZoneParser.new + end + + def test_empty_rule_returns_nil + result = @parser.parse('') + assert_nil(result) + end + + ABBREVIATIONS_WITH_OFFSETS = [ + ['UTC0', :UTC, 0], + ['U0', :U, 0], + ['West1', :West, -HOUR], + ['East-1', :East, HOUR], + ['<-05>5', :'-05', -5 * HOUR], + ['<+12>-12', :'+12', 12 * HOUR], + ['HMM2:30', :HMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMM02:30', :HHMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMM+02:30', :HHMM, -(2 * HOUR + 30 * MINUTE)], + ['HHMSS-12:5:50', :HHMSS, 12 * HOUR + 5 * MINUTE + 50], + ['HHMMSS-12:05:50', :HHMMSS, 12 * HOUR + 5 * MINUTE + 50], + ['HHMMS-12:05:7', :HHMMS, 12 * HOUR + 5 * MINUTE + 7] + ] + + ABBREVIATIONS_WITH_OFFSETS.each do |(tz_string, expected_abbrev, expected_base_offset)| + define_method "test_std_only_returns_std_offset_#{tz_string}" do + result = @parser.parse(tz_string) + expected = TimezoneOffset.new(expected_base_offset, 0, expected_abbrev) + assert_equal(expected, result) + end + end + + ABBREVIATIONS_WITH_OFFSETS.each do |(abbrev_and_offset, expected_abbrev, expected_base_offset)| + define_method "test_std_offset_#{abbrev_and_offset}" do + result = @parser.parse(abbrev_and_offset + 'DST,60,300') + expected_std_offset = TimezoneOffset.new(expected_base_offset, 0, expected_abbrev) + assert_equal(expected_std_offset, result.std_offset) + end + end + + [ + ['Zero0One-1', :One, 0, HOUR], + ['Zero0One', :One, 0, HOUR], + ['Z0O', :O, 0, HOUR], + ['West1WestS0', :WestS, -HOUR, HOUR], + ['West1WestS', :WestS, -HOUR, HOUR], + ['East-1EastS-2', :EastS, HOUR, HOUR], + ['East-1EastS', :EastS, HOUR, HOUR], + ['Neg2NegS3', :NegS, -2 * HOUR, -HOUR], + ['<-05>5<-04>4', :'-04', -5 * HOUR, HOUR], + ['STD5<-04>4', :'-04', -5 * HOUR, HOUR], + ['<+12>-12<+13>-13', :'+13', 12 * HOUR, HOUR], + ['STD-12<+13>-13', :'+13', 12 * HOUR, HOUR], + ['HMM2:30SHMM1:15', :SHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMM02:30SHHMM01:15', :SHHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMM+02:30SHHMM+01:15', :SHHMM, -(2 * HOUR + 30 * MINUTE), HOUR + 15 * MINUTE], + ['HHMSS-12:5:50SHHMSS-13:4:30', :SHHMSS, 12 * HOUR + 5 * MINUTE + 50, 58 * MINUTE + 40], + ['HHMMSS-12:05:50SHHMMSS-13:04:30', :SHHMMSS, 12 * HOUR + 5 * MINUTE + 50, 58 * MINUTE + 40], + ['HHMMS-12:05:7SHHMMSS-13:06:8', :SHHMMSS, 12 * HOUR + 5 * MINUTE + 7, HOUR + MINUTE + 1], + ].each do |(abbrevs_and_offsets, expected_abbrev, expected_base_offset, expected_std_offset)| + define_method "test_dst_offset_#{abbrevs_and_offsets}" do + result = @parser.parse(abbrevs_and_offsets + ',60,300') + expected_dst_offset = TimezoneOffset.new(expected_base_offset, expected_std_offset, expected_abbrev) + assert_equal(expected_dst_offset, result.dst_offset) + end + end + + ['<M-1>01:-1', '<M60>01:60', '<S-1>01:00:-1', '<S60>01:00:60'].each do |abbrev_and_offset| + ['', 'DST,60,300'].each do |dst_suffix| + tz_string = abbrev_and_offset + dst_suffix + define_method "test_std_offset_invalid_#{tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + + tz_string = "STD1#{abbrev_and_offset},60,300" + define_method "test_dst_offset_invalid_#{tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + end + end + + [ + [nil, 2 * HOUR], + ['2', 2 * HOUR], + ['+2', 2 * HOUR], + ['-2', -2 * HOUR], + ['2:3:4', 2 * HOUR + 3 * MINUTE + 4], + ['02:03:04', 2 * HOUR + 3 * MINUTE + 4], + ['-2:3:4', -2 * HOUR + 3 * MINUTE + 4], # 22:03:04 on the day prior to the one specified + ['-02:03:04', -2 * HOUR + 3 * MINUTE + 4], + ['167', 167 * HOUR], + ['-167', -167 * HOUR] + ].each do |(time, expected_offset_from_midnight)| + [ + ['J1', 1], + ['J365', 365] + ].each do |(julian_day_rule, expected_julian_day)| + rule = append_time_to_rule(julian_day_rule, time) + + define_method "test_julian_day_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = JulianDayOfYearTransitionRule.new(expected_julian_day, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_julian_day_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = JulianDayOfYearTransitionRule.new(expected_julian_day, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['0', 0], + ['365', 365] + ].each do |(absolute_day_rule, expected_day)| + rule = append_time_to_rule(absolute_day_rule, time) + + define_method "test_absolute_day_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},J300") + expected_dst_start_rule = AbsoluteDayOfYearTransitionRule.new(expected_day, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_absolute_day_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,J60,#{rule}") + expected_dst_end_rule = AbsoluteDayOfYearTransitionRule.new(expected_day, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['M1.1.0', 1, 1, 0], + ['M12.4.6', 12, 4, 6] + ].each do |(day_of_month_rule, expected_month, expected_week, expected_day_of_week)| + rule = append_time_to_rule(day_of_month_rule, time) + + define_method "test_day_of_month_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = DayOfMonthTransitionRule.new(expected_month, expected_week, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_day_of_month_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = DayOfMonthTransitionRule.new(expected_month, expected_week, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + + [ + ['M1.5.0', 1, 0], + ['M12.5.6', 12, 6] + ].each do |(last_day_of_month_rule, expected_month, expected_day_of_week)| + rule = append_time_to_rule(last_day_of_month_rule, time) + + define_method "test_last_day_of_month_dst_start_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,#{rule},300") + expected_dst_start_rule = LastDayOfMonthTransitionRule.new(expected_month, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_start_rule, result.dst_start_rule) + end + + define_method "test_last_day_of_month_dst_end_rule_for_#{rule}" do + result = @parser.parse("STD-1DST,60,#{rule}") + expected_dst_end_rule = LastDayOfMonthTransitionRule.new(expected_month, expected_day_of_week, expected_offset_from_midnight) + assert_equal(expected_dst_end_rule, result.dst_end_rule) + end + end + end + + ['J0', 'J366'].each do |julian_day_rule| + define_invalid_dst_rule_tests('julian_day', julian_day_rule) + end + + ['-1', '366'].each do |absolute_day_rule| + define_invalid_dst_rule_tests('absolute_day', absolute_day_rule) + end + + ['M0,1,0', 'M13,1,0', 'M6,0,0', 'M6,6,0', 'M6,1,-1', 'M6,1,7'].each do |day_of_month_rule| + define_invalid_dst_rule_tests('day_of_month', day_of_month_rule) + end + + ['M0,5,0', 'M13,5,0', 'M6,5,-1', 'M6,5,7'].each do |last_day_of_month_rule| + define_invalid_dst_rule_tests('last_day_of_month', last_day_of_month_rule) + end + + def test_invalid_dst_start_rule + assert_raises(InvalidPosixTimeZone) { @parser.parse('STD1DST,X60,300') } + end + + def test_invalid_dst_end_rule + assert_raises(InvalidPosixTimeZone) { @parser.parse('STD1DST,60,X300') } + end + + [ + ['STD5DST,0/0,J365/25', :DST, -5 * HOUR, HOUR], + ['STD-5DST,0/0,J365/25', :DST, 5 * HOUR, HOUR], + ['STD5DST3,0/0,J365/26', :DST, -5 * HOUR, 2 * HOUR], + ['STD5DST6,0/0,J365/23', :DST, -5 * HOUR, -HOUR], + ['STD5DST,J1/0,J365/25', :DST, -5 * HOUR, HOUR], + ['Winter5Summer,0/0,J365/25', :Summer, -5 * HOUR, HOUR], + ['<-05>5<-06>,0/0,J365/25', :'-06', -5 * HOUR, HOUR] + ].each do |(tz_string, expected_abbrev, expected_base_offset, expected_std_offset)| + define_method "test_dst_only_returns_continuous_offset_for_#{tz_string}" do + result = @parser.parse(tz_string) + expected = TimezoneOffset.new(expected_base_offset, expected_std_offset, expected_abbrev) + assert_equal(expected, result) + end + end + + def test_parses_tainted_string_in_safe_mode_and_returns_untainted_abbreviations + safe_test(:unavailable => :skip) do + result = @parser.parse('STD1DST,60,300'.dup.taint) + + assert_equal(:STD, result.std_offset.abbreviation) + assert_equal(:DST, result.dst_offset.abbreviation) + end + end + + ['STD1', 'STD1DST,60,300'].each do |tz_string| + tz_string += "-" + define_method "test_content_after_end_for_#{tz_string}" do + error = assert_raises(InvalidPosixTimeZone) { @parser.parse(tz_string) } + assert_equal("Expected the end of a POSIX-style time zone string but found '-'.", error.message) + end + end + + ['X', 0].each do |invalid_tz_string| + define_method "test_invalid_tz_string_#{invalid_tz_string}" do + assert_raises(InvalidPosixTimeZone) { @parser.parse(invalid_tz_string) } + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb new file mode 100644 index 0000000..56c5a8a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_core_support.rb @@ -0,0 +1,168 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCRubyCoreSupport < Minitest::Test + def test_rational_new! + assert_equal(Rational(3,4), RubyCoreSupport.rational_new!(3,4)) + end + + def test_datetime_new! + assert_equal(DateTime.new(2008,10,5,12,0,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(2454745,0,2299161)) + assert_equal(DateTime.new(2008,10,5,13,0,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(2454745,Rational(1, 24),2299161)) + + assert_equal(DateTime.new(2008,10,5,20,30,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(117827777, 48), 0, 2299161)) + assert_equal(DateTime.new(2008,10,5,21,30,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(117827777, 48), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(2008,10,6,6,26,21, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(70696678127,28800), 0, 2299161)) + assert_equal(DateTime.new(2008,10,6,7,26,21, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(70696678127, 28800), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4712,1,1,12,0,0, 0, Date::ITALY), RubyCoreSupport.datetime_new!(0, 0, 2299161)) + assert_equal(DateTime.new(-4712,1,1,13,0,0, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(0, Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4713,12,31,23,58,59, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-43261, 86400), 0, 2299161)) + assert_equal(DateTime.new(-4712,1,1,0,58,59, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-43261, 86400), Rational(1, 24), 2299161)) + + assert_equal(DateTime.new(-4713,12,30,23,58,59, 0, Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-129661, 86400), 0, 2299161)) + assert_equal(DateTime.new(-4713,12,31,0,58,59, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new!(Rational(-129661, 86400), Rational(1, 24), 2299161)) + end + + def test_datetime_new + assert_equal(DateTime.new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY), RubyCoreSupport.datetime_new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY)) + assert_equal(DateTime.new(2013, 2, 6, 23, 2, 36, Rational(1, 24), Date::ITALY), RubyCoreSupport.datetime_new(2013, 2, 6, 23, 2, 36, Rational(1,24), Date::ITALY)) + + assert_equal(DateTime.new(2012, 12, 31, 23, 59, 59, 0, Date::ITALY) + Rational(1, 86400000), RubyCoreSupport.datetime_new(2012, 12, 31, 23, 59, 59 + Rational(1, 1000), 0, Date::ITALY)) + assert_equal(DateTime.new(2001, 10, 12, 12, 22, 59, Rational(1, 24), Date::ITALY) + Rational(501, 86400000), RubyCoreSupport.datetime_new(2001, 10, 12, 12, 22, 59 + Rational(501, 1000), Rational(1, 24), Date::ITALY)) + end + + def test_time_supports_negative + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59), Time.at(-1).utc) + else + assert_raises(ArgumentError) do + Time.at(-1) + end + end + end + + def test_time_supports_64_bit + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51), Time.at(-2147483649).utc) + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8), Time.at(2147483648).utc) + else + assert_raises(RangeError) do + Time.at(-2147483649) + end + + assert_raises(RangeError) do + Time.at(2147483648) + end + end + end + + def test_time_nsec + t = Time.utc(2013, 2, 6, 21, 56, 23, 567890 + Rational(123,1000)) + + if t.respond_to?(:nsec) + assert_equal(567890123, RubyCoreSupport.time_nsec(t)) + else + assert_equal(567890000, RubyCoreSupport.time_nsec(t)) + end + end + + def test_force_encoding + s = [0xC2, 0xA9].pack('c2') + + if s.respond_to?(:force_encoding) + # Ruby 1.9+ - should call String#force_encoding + assert_equal('ASCII-8BIT', s.encoding.name) + assert_equal(2, s.bytesize) + result = RubyCoreSupport.force_encoding(s, 'UTF-8') + assert_same(s, result) + assert_equal('UTF-8', s.encoding.name) + assert_equal(2, s.bytesize) + assert_equal(1, s.length) + assert_equal('©', s) + else + # Ruby 1.8 - no-op + result = RubyCoreSupport.force_encoding(s, 'UTF-8') + assert_same(s, result) + assert_equal('©', s) + end + end + + begin + SUPPORTS_ENCODING = !!Encoding + rescue NameError + SUPPORTS_ENCODING = false + end + + def check_open_file_test_file_bytes(test_file) + if SUPPORTS_ENCODING + File.open(test_file, 'r') do |file| + file.binmode + data = file.read(2) + refute_nil(data) + assert_equal(2, data.length) + bytes = data.unpack('C2') + assert_equal(0xC2, bytes[0]) + assert_equal(0xA9, bytes[1]) + end + end + end + + def check_open_file_test_file_content(file) + content = file.gets + refute_nil(content) + content.chomp! + + if SUPPORTS_ENCODING + assert_equal('UTF-8', content.encoding.name) + assert_equal(1, content.length) + assert_equal(2, content.bytesize) + assert_equal('©', content) + else + assert_equal('x', content) + end + end + + def test_open_file + Dir.mktmpdir('tzinfo_test') do |dir| + test_file = File.join(dir, 'test.txt') + + file = RubyCoreSupport.open_file(test_file, 'w', :external_encoding => 'UTF-8') + begin + file.puts(SUPPORTS_ENCODING ? '©' : 'x') + ensure + file.close + end + + check_open_file_test_file_bytes(test_file) + + file = RubyCoreSupport.open_file(test_file, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') + begin + check_open_file_test_file_content(file) + ensure + file.close + end + end + end + + def test_open_file_block + Dir.mktmpdir('tzinfo_test') do |dir| + test_file = File.join(dir, 'test.txt') + + RubyCoreSupport.open_file(test_file, 'w', :external_encoding => 'UTF-8') do |file| + file.puts(SUPPORTS_ENCODING ? '©' : 'x') + end + + check_open_file_test_file_bytes(test_file) + + RubyCoreSupport.open_file(test_file, 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + check_open_file_test_file_content(file) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb new file mode 100644 index 0000000..c99f09f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_country_info.rb @@ -0,0 +1,110 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCRubyCountryInfo < Minitest::Test + + def test_code + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert_equal('Zzz', ci.name) + end + + def test_zone_identifiers_empty + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers_no_block + ci = RubyCountryInfo.new('ZZ', 'Zzz') + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers_after_freeze + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + ci.freeze + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + end + + def test_zones_empty + ci = RubyCountryInfo.new('ZZ', 'Zzz') {|c| } + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones_no_block + ci = RubyCountryInfo.new('ZZ', 'Zzz') + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + assert_equal([CountryTimezone.new!('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B'), + CountryTimezone.new!('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A'), + CountryTimezone.new!('ZZ/TimezoneC', -10, 3, -20, 7, 'C'), + CountryTimezone.new!('ZZ/TimezoneD', -10, 3, -20, 7)], + ci.zones) + assert(ci.zones.frozen?) + end + + def test_zones_after_freeze + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + c.timezone('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B') + c.timezone('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A') + c.timezone('ZZ/TimezoneC', -10, 3, -20, 7, 'C') + c.timezone('ZZ/TimezoneD', -10, 3, -20, 7) + end + + ci.freeze + + assert_equal([CountryTimezone.new!('ZZ/TimezoneB', 1, 2, 1, 2, 'Timezone B'), + CountryTimezone.new!('ZZ/TimezoneA', 1, 4, 1, 4, 'Timezone A'), + CountryTimezone.new!('ZZ/TimezoneC', -10, 3, -20, 7, 'C'), + CountryTimezone.new!('ZZ/TimezoneD', -10, 3, -20, 7)], + ci.zones) + end + + def test_deferred_evaluate + block_called = false + + ci = RubyCountryInfo.new('ZZ', 'Zzz') do |c| + block_called = true + end + + assert_equal(false, block_called) + ci.zones + assert_equal(true, block_called) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb new file mode 100644 index 0000000..790dd8e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_ruby_data_source.rb @@ -0,0 +1,167 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCRubyDataSource < Minitest::Test + def setup + @data_source = RubyDataSource.new + end + + def test_initialize_not_found + # A failure to load tzinfo/data in initialize will not cause an exception. + # Attempts to load data files will subsequently fail. + code = <<-EOF + begin + ds = TZInfo::RubyDataSource.new + puts 'Initialized' + ds.load_timezone_info('Europe/London') + rescue Exception => e + puts e.class + end + EOF + + assert_sub_process_returns(['Initialized', 'TZInfo::InvalidTimezoneIdentifier'], code) + end + + def test_load_timezone_info_data + info = @data_source.load_timezone_info('Europe/London') + assert_kind_of(DataTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def test_load_timezone_info_linked + info = @data_source.load_timezone_info('UTC') + assert_kind_of(LinkedTimezoneInfo, info) + assert_equal('UTC', info.identifier) + assert_equal('Etc/UTC', info.link_to_identifier) + end + + def test_load_timezone_info_does_not_exist + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('Nowhere/Special') + end + end + + def test_load_timezone_info_invalid + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('../Definitions/UTC') + end + end + + def test_load_timezone_info_nil + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(nil) + end + end + + def test_load_timezone_info_case + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('europe/london') + end + end + + def test_load_timezone_info_plus + info = @data_source.load_timezone_info('Etc/GMT+1') + assert_equal('Etc/GMT+1', info.identifier) + end + + def test_load_timezone_info_minus + info = @data_source.load_timezone_info('Etc/GMT-1') + assert_equal('Etc/GMT-1', info.identifier) + end + + def test_load_timezone_info_tainted + skip_if_has_bug_14060 + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Amsterdam'.dup.taint + assert(identifier.tainted?) + info = @data_source.load_timezone_info(identifier) + assert_equal('Europe/Amsterdam', info.identifier) + assert(identifier.tainted?) + end + end + + def test_load_timezone_info_tainted_and_frozen + skip_if_has_bug_14060 + + safe_test do + info = @data_source.load_timezone_info('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', info.identifier) + end + end + + def test_timezone_identifiers + all = @data_source.timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.timezones, all) + assert_equal(true, all.frozen?) + end + + def test_data_timezone_identifiers + all_data = @data_source.data_timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.data_timezones, all_data) + assert_equal(true, all_data.frozen?) + end + + def test_linked_timezone_identifiers + all_linked = @data_source.linked_timezone_identifiers + assert_equal(TZInfo::Data::Indexes::Timezones.linked_timezones, all_linked) + assert_equal(true, all_linked.frozen?) + end + + def test_load_country_info + info = @data_source.load_country_info('GB') + assert_equal('GB', info.code) + end + + def test_load_country_info_not_exist + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('ZZ') + end + end + + def test_load_country_info_invalid + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('../Countries/GB') + end + end + + def test_load_country_info_nil + assert_raises(InvalidCountryCode) do + @data_source.load_country_info(nil) + end + end + + def test_load_country_info_case + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('gb') + end + end + + def test_load_country_info_tainted + safe_test(:unavailable => :skip) do + code = 'NL'.dup.taint + assert(code.tainted?) + info = @data_source.load_country_info(code) + assert_equal('NL', info.code) + assert(code.tainted?) + end + end + + def test_load_country_info_tainted_and_frozen + safe_test do + info = @data_source.load_country_info('NL'.dup.taint.freeze) + assert_equal('NL', info.code) + end + end + + def test_country_codes + codes = @data_source.country_codes + assert_equal(TZInfo::Data::Indexes::Countries.countries.keys, codes) + assert_equal(true, codes.frozen?) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb new file mode 100644 index 0000000..d5ca73e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_time_or_datetime.rb @@ -0,0 +1,674 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'rational' unless defined?(Rational) + +include TZInfo + +class TCTimeOrDateTime < Minitest::Test + # Ruby 1.8.7 (built with GCC 8.3.0 on Ubuntu 19.04) fails to perform DateTime + # arithmetic operations correctly when sub-seconds are used. Detect this and + # disable the affected tests. + DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN = (DateTime.new(2019, 12, 22, 15, 0, Rational(1, 1000)) + Rational(1, 86400)).strftime('%Q') != '1577026801001' + puts "DateTime sub-second arithmetic is broken on this platform" if DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + + def test_initialize_time + assert_nothing_raised do + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + end + end + + def test_initialize_time_local + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3)) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + + def test_intialize_time_local_usec + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3, 721123)) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + + if Time.utc(2013, 1, 1).respond_to?(:nsec) + def test_initialize_time_local_nsec + tdt = TimeOrDateTime.new(Time.local(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000))) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000)), tdt.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(456,1000)), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + end + end + + def test_initialize_time_utc_local + # Check that local Time instances on systems using UTC as the system + # time zone are still converted to UTC Time instances. + + # Note that this will only test will only work correctly on platforms where + # setting the TZ environment variable has an effect. If setting TZ has no + # effect, then this test will still pass. + + old_tz = ENV['TZ'] + begin + ENV['TZ'] = 'UTC' + tdt = TimeOrDateTime.new(Time.local(2014, 1, 11, 17, 18, 41)) + assert_equal(Time.utc(2014, 1, 11, 17, 18, 41), tdt.to_time) + assert_equal(Time.utc(2014, 1, 11, 17, 18, 41), tdt.to_orig) + assert(tdt.to_time.utc?) + assert(tdt.to_orig.utc?) + ensure + ENV['TZ'] = old_tz + end + end + + def test_initialize_datetime_offset + tdt = TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3).new_offset(Rational(5, 24))) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), tdt.to_datetime) + assert_equal(0, tdt.to_datetime.offset) + end + + def test_initialize_datetime + assert_nothing_raised do + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + end + end + + def test_initialize_timestamp + assert_nothing_raised do + TimeOrDateTime.new(1143214323) + end + end + + def test_initialize_timestamp_string + assert_nothing_raised do + TimeOrDateTime.new('1143214323') + end + end + + unless RubyCoreSupport.time_supports_64bit + # Only define this test for non-64bit platforms. Some 64-bit Rubies support + # greater than 64-bit, others support less than the full range. In either + # case, times at the far ends of the range are so far in the future or past + # that they are not going to turn up in timezone data. + def test_initialize_timestamp_supported_range + assert_equal((2 ** 31) - 1, TimeOrDateTime.new((2 ** 31) - 1).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(2 ** 31) + end + + if RubyCoreSupport.time_supports_negative + assert_equal(-(2 ** 31), TimeOrDateTime.new(-(2 ** 31)).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(-(2 ** 31) - 1) + end + else + assert_equal(0, TimeOrDateTime.new(0).to_orig) + + assert_raises(RangeError) do + TimeOrDateTime.new(-1) + end + end + end + end + + def test_to_time + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000))).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(1143214323).to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new('1143214323').to_time) + end + + def test_to_time_trunc_to_usec + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721123), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(7211239, 10000000))).to_time) + end + + def test_to_time_after_freeze + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).freeze.to_time) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(1143214323).freeze.to_time) + end + + def test_to_datetime + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000))).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(1143214323).to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new('1143214323').to_datetime) + end + + def test_to_datetime_ruby186_bug + # DateTime.new in Ruby 1.8.6 won't allow a time to be specified using + # fractions of a second that is within the 60th second of a minute. + + # TimeOrDateTime has a workaround for this issue. + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 59) + Rational(721123, 86400000000), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 59, 721123)).to_datetime) + end + + def test_to_datetime_trunc_to_usec + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123, 1000000)), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123 + Rational(9, 10))).to_datetime) + end + + def test_to_datetime_after_freeze + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).freeze.to_datetime) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(1143214323).freeze.to_datetime) + end + + def test_to_i + assert_equal(1143214323, + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).to_i) + assert_equal(1143214323, + TimeOrDateTime.new(1143214323).to_i) + assert_equal(1143214323, + TimeOrDateTime.new('1143214323').to_i) + end + + def test_to_i_after_freeze + assert_equal(1143214323, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).freeze.to_i) + assert_equal(1143214323, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).freeze.to_i) + end + + def test_to_orig + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).to_orig) + assert_equal(1143214323, + TimeOrDateTime.new(1143214323).to_orig) + assert_equal(1143214323, + TimeOrDateTime.new('1143214323').to_orig) + end + + def test_to_s + assert_equal("Time: #{Time.utc(2006, 3, 24, 15, 32, 3).to_s}", + TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).to_s) + assert_equal("DateTime: #{DateTime.new(2006, 3, 24, 15, 32, 3)}", + TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).to_s) + assert_equal('Timestamp: 1143214323', + TimeOrDateTime.new(1143214323).to_s) + assert_equal('Timestamp: 1143214323', + TimeOrDateTime.new('1143214323').to_s) + end + + def test_year + assert_equal(2006, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).year) + assert_equal(2006, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).year) + assert_equal(2006, TimeOrDateTime.new(1143214323).year) + end + + def test_mon + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).mon) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).mon) + assert_equal(3, TimeOrDateTime.new(1143214323).mon) + end + + def test_month + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).month) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).month) + assert_equal(3, TimeOrDateTime.new(1143214323).month) + end + + def test_mday + assert_equal(24, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).mday) + assert_equal(24, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).mday) + assert_equal(24, TimeOrDateTime.new(1143214323).mday) + end + + def test_day + assert_equal(24, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).day) + assert_equal(24, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).day) + assert_equal(24, TimeOrDateTime.new(1143214323).day) + end + + 19.upto(25).each_with_index do |mday,i| + define_method "test_wday_time_#{i}" do + assert_equal(i, TimeOrDateTime.new(Time.utc(2006, 3, mday)).wday) + end + + define_method "test_wday_datetime_#{i}" do + assert_equal(i, TimeOrDateTime.new(DateTime.new(2006, 3, mday)).wday) + end + + define_method "test_wday_timestamp_#{i}" do + assert_equal(i, TimeOrDateTime.new(Time.utc(2006, 3, mday).to_i).wday) + end + end + + def test_hour + assert_equal(15, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).hour) + assert_equal(15, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).hour) + assert_equal(15, TimeOrDateTime.new(1143214323).hour) + end + + def test_min + assert_equal(32, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).min) + assert_equal(32, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).min) + assert_equal(32, TimeOrDateTime.new(1143214323).min) + end + + def test_sec + assert_equal(3, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).sec) + assert_equal(3, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).sec) + assert_equal(3, TimeOrDateTime.new(1143214323).sec) + end + + def test_usec + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).usec) + assert_equal(721123, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721123)).usec) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).usec) + assert_equal(721123, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721123,1000000))).usec) + assert_equal(0, TimeOrDateTime.new(1143214323).usec) + end + + def test_usec_after_to_i + val = TimeOrDateTime.new(Time.utc(2013, 2, 4, 22, 10, 33, 598000)) + assert_equal(Time.utc(2013, 2, 4, 22, 10, 33).to_i, val.to_i) + assert_equal(598000, val.usec) + end + + def test_compare_timeordatetime_time + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2007, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(Time.utc(2005, 3, 24, 15, 32, 3))) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500001))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 499999))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000 + DATETIME_RESOLUTION))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000 - DATETIME_RESOLUTION))) + end + + def test_compare_timeordatetime_datetime + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4))) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3))) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 2))) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3))) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000)))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000)))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000)))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000)))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000)))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000)))) + end + + def test_compare_timeordatetime_timestamp + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1111678323)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1111678323)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> TimeOrDateTime.new(1143214323)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214324)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1174750323)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214323)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1143214322)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> TimeOrDateTime.new(1111678323)) + end + + def test_compare_time + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> Time.utc(2007, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> Time.utc(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> Time.utc(2005, 3, 24, 15, 32, 3)) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500001)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> Time.utc(2006, 3, 24, 15, 32, 3, 499999)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000 + DATETIME_RESOLUTION)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> Time.utc(2006, 3, 24, 15, 32, 3, 500000 - DATETIME_RESOLUTION)) + end + + def test_compare_datetime + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 4)) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2040, 3, 24, 15, 32, 3)) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> DateTime.new(2006, 3, 24, 15, 32, 2)) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> DateTime.new(1960, 3, 24, 15, 32, 3)) + + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000))) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 500000)) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000))) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 + DATETIME_RESOLUTION, 1000000))) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000, 1000000))) <=> DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(500000 - DATETIME_RESOLUTION, 1000000))) + end + + def test_compare_timestamp + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> 1111678323) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> 1111678323) + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> 1143214323) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> 1143214324) + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> 1174750323) + assert_equal(0, TimeOrDateTime.new(1143214323) <=> 1143214323) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> 1143214322) + assert_equal(1, TimeOrDateTime.new(1143214323) <=> 1111678323) + end + + def test_compare_timestamp_str + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> '1111678323') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> '1111678323') + assert_equal(-1, TimeOrDateTime.new(DateTime.new(1960, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(DateTime.new(2040, 3, 24, 15, 32, 3)) <=> '1143214323') + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> '1143214324') + assert_equal(-1, TimeOrDateTime.new(1143214323) <=> '1174750323') + assert_equal(0, TimeOrDateTime.new(1143214323) <=> '1143214323') + assert_equal(1, TimeOrDateTime.new(1143214323) <=> '1143214322') + assert_equal(1, TimeOrDateTime.new(1143214323) <=> '1111678323') + end + + def test_compare_non_comparable + assert_nil(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) <=> Object.new) + assert_nil(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) <=> Object.new) + assert_nil(TimeOrDateTime.new(1143214323) <=> Object.new) + end + + def test_eql + assert_equal(true, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 4)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).eql?(Object.new)) + + assert_equal(true, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 722000)))) + assert_equal(false, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).eql?(Object.new)) + + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(true, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 4)))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).eql?(Object.new)) + + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)))) + assert_equal(true, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(722, 1000))))) + assert_equal(false, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).eql?(Object.new)) + + + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)))) + assert_equal(true, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(1143214323))) + assert_equal(true, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new('1143214323'))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(TimeOrDateTime.new(1143214324))) + assert_equal(false, TimeOrDateTime.new(1143214323).eql?(Object.new)) + end + + def test_hash + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3).hash, TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).hash) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3).hash, TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).hash) + assert_equal(1143214323.hash, TimeOrDateTime.new(1143214323).hash) + assert_equal(1143214323.hash, TimeOrDateTime.new('1143214323').hash) + end + + def test_add + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + 0).to_orig) + assert_equal(1143214323, (TimeOrDateTime.new(1143214323) + 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + 1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, (TimeOrDateTime.new(1143214323) + 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) + (-1)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) + (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) + (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) + (-1)).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, (TimeOrDateTime.new(1143214323) + (-1)).to_orig) + end + + def test_subtract + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - 0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - 0).to_orig) + assert_equal(1143214323, (TimeOrDateTime.new(1143214323) - 0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - 1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - 1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, (TimeOrDateTime.new(1143214323) - 1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)) - (-1)).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), (TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)) - (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)) - (-1)).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), (TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))) - (-1)).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, (TimeOrDateTime.new(1143214323) - (-1)).to_orig) + end + + def test_add_with_convert + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(0).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(0).to_orig) + assert_equal(1143214323, TimeOrDateTime.new(1143214323).add_with_convert(0).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214324, TimeOrDateTime.new(1143214323).add_with_convert(1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3)).add_with_convert(-1).to_orig) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 2, 721000), TimeOrDateTime.new(Time.utc(2006, 3, 24, 15, 32, 3, 721000)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 2 + Rational(721, 1000)), TimeOrDateTime.new(DateTime.new(2006, 3, 24, 15, 32, 3 + Rational(721, 1000))).add_with_convert(-1).to_orig) unless DATETIME_SUBSECOND_ARITHMETIC_IS_BROKEN + assert_equal(1143214322, TimeOrDateTime.new(1143214323).add_with_convert(-1).to_orig) + + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0)).add_with_convert(-1).to_orig) + assert_equal(-1, TimeOrDateTime.new(0).add_with_convert(-1).to_orig) + assert_equal(Time.utc(1969, 12, 31, 23, 59, 59, 892000), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0, 892000)).add_with_convert(-1).to_orig) + + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52)).add_with_convert(-1).to_orig) + assert_equal(-2147483649, TimeOrDateTime.new(-2147483648).add_with_convert(-1).to_orig) + assert_equal(Time.utc(1901, 12, 13, 20, 45, 51, 892000), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52, 892000)).add_with_convert(-1).to_orig) + else + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51), TimeOrDateTime.new(-2147483648).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1901, 12, 13, 20, 45, 51 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(1901, 12, 13, 20, 45, 52, 892000)).add_with_convert(-1).to_orig) + end + else + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0)).add_with_convert(-1).to_orig) + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), TimeOrDateTime.new(0).add_with_convert(-1).to_orig) + assert_equal(RubyCoreSupport.datetime_new(1969, 12, 31, 23, 59, 59 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(1970, 1, 1, 0, 0, 0, 892000)).add_with_convert(-1).to_orig) + end + + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7)).add_with_convert(1).to_orig) + assert_equal(2147483648, TimeOrDateTime.new(2147483647).add_with_convert(1).to_orig) + assert_equal(Time.utc(2038, 1, 19, 3, 14, 8, 892000), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7, 892000)).add_with_convert(1).to_orig) + else + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7)).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), TimeOrDateTime.new(2147483647).add_with_convert(1).to_orig) + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8 + Rational(892,1000)), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 7, 892000)).add_with_convert(1).to_orig) + + assert_equal(Time.utc(2038, 1, 19, 3, 14, 7, 892000), TimeOrDateTime.new(Time.utc(2038, 1, 19, 3, 14, 6, 892000)).add_with_convert(1).to_orig) + end + end + + def test_wrap_time + t = TimeOrDateTime.wrap(Time.utc(2006, 3, 24, 15, 32, 3)) + assert_instance_of(TimeOrDateTime, t) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), t.to_orig) + end + + def test_wrap_datetime + t = TimeOrDateTime.wrap(DateTime.new(2006, 3, 24, 15, 32, 3)) + assert_instance_of(TimeOrDateTime, t) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), t.to_orig) + end + + def test_wrap_timestamp + t = TimeOrDateTime.wrap(1143214323) + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + end + + def test_wrap_timestamp_str + t = TimeOrDateTime.wrap('1143214323') + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + end + + def test_wrap_timeordatetime + t = TimeOrDateTime.new(1143214323) + t2 = TimeOrDateTime.wrap(t) + assert_same(t, t2) + end + + def test_wrap_block_time + assert_equal(Time.utc(2006, 3, 24, 15, 32, 4), TimeOrDateTime.wrap(Time.utc(2006, 3, 24, 15, 32, 3)) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(Time.utc(2006, 3, 24, 15, 32, 3), t.to_orig) + t + 1 + }) + end + + def test_wrap_block_datetime + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 4), TimeOrDateTime.wrap(DateTime.new(2006, 3, 24, 15, 32, 3)) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(DateTime.new(2006, 3, 24, 15, 32, 3), t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timestamp + assert_equal(1143214324, TimeOrDateTime.wrap(1143214323) {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timestamp_str + assert_equal(1143214324, TimeOrDateTime.wrap('1143214323') {|t| + assert_instance_of(TimeOrDateTime, t) + assert_equal(1143214323, t.to_orig) + t + 1 + }) + end + + def test_wrap_block_timeordatetime + t1 = TimeOrDateTime.new(1143214323) + + t2 = TimeOrDateTime.wrap(t1) {|t| + assert_same(t1, t) + t + 1 + } + + assert t2 + assert_instance_of(TimeOrDateTime, t2) + assert_equal(1143214324, t2.to_orig) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone.rb new file mode 100644 index 0000000..0dc0611 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone.rb @@ -0,0 +1,1361 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCTimezone < Minitest::Test + + class BlockCalled < StandardError + end + + class TestTimezone < Timezone + def self.new(identifier, period_for_utc = nil, periods_for_local = nil, expected = nil) + t = super() + t.send(:setup, identifier, period_for_utc, periods_for_local, expected) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + utc = TimeOrDateTime.wrap(utc) + raise "Unexpected utc #{utc} in period_for_utc" unless @expected.eql?(utc) + @period_for_utc + end + + def periods_for_local(local) + local = TimeOrDateTime.wrap(local) + raise "Unexpected local #{local} in periods_for_local" unless @expected.eql?(local) + @periods_for_local.clone + end + + def transitions_up_to(utc_to, utc_from = nil) + raise 'transitions_up_to called' + end + + private + def setup(identifier, period_for_utc, periods_for_local, expected) + @identifier = identifier + @period_for_utc = period_for_utc + @periods_for_local = periods_for_local || [] + @expected = TimeOrDateTime.wrap(expected) + end + end + + class OffsetsUpToTestTimezone < Timezone + def self.new(identifier, expected_utc_to, expected_utc_from, transitions_up_to) + t = super() + t.send(:setup, identifier, expected_utc_to, expected_utc_from, transitions_up_to) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + raise 'period_for_utc called' + end + + def periods_for_local(local) + raise 'periods_for_local called' + end + + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + raise "Unexpected utc_to #{utc_to || 'nil'} in transitions_up_to" unless @expected_utc_to.eql?(utc_to) + + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + raise "Unexpected utc_from #{utc_from || 'nil'} in transitions_up_to" unless @expected_utc_from.eql?(utc_from) + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + @transitions_up_to + end + + private + + def setup(identifier, expected_utc_to, expected_utc_from, transitions_up_to) + @identifier = identifier + @expected_utc_to = TimeOrDateTime.wrap(expected_utc_to) + @expected_utc_from = expected_utc_from ? TimeOrDateTime.wrap(expected_utc_from) : nil + @transitions_up_to = transitions_up_to + end + end + + class OffsetsUpToNoTransitionsTestTimezone < Timezone + def self.new(identifier, expected_utc_to, expected_utc_from, period_for_utc) + t = super() + t.send(:setup, identifier, expected_utc_to, expected_utc_from, period_for_utc) + t + end + + def identifier + @identifier + end + + def period_for_utc(utc) + utc = TimeOrDateTime.wrap(utc) + + raise "Unexpected utc #{utc} in period_for_utc (should be utc_from)" if @expected_utc_from && !@expected_utc_from.eql?(utc) + raise "Unexpected utc #{utc} in period_for_utc (should be < utc_to)" if !@expected_utc_from && @expected_utc_to <= utc + + @period_for_utc + end + + def periods_for_local(local) + raise 'periods_for_local called' + end + + def transitions_up_to(utc_to, utc_from = nil) + utc_to = TimeOrDateTime.wrap(utc_to) + raise "Unexpected utc_to #{utc_to || 'nil'} in transitions_up_to" unless @expected_utc_to.eql?(utc_to) + + utc_from = utc_from ? TimeOrDateTime.wrap(utc_from) : nil + raise "Unexpected utc_from #{utc_from || 'nil'} in transitions_up_to" unless @expected_utc_from.eql?(utc_from) + + if utc_from && utc_to <= utc_from + raise ArgumentError, 'utc_to must be greater than utc_from' + end + + [] + end + + private + + def setup(identifier, expected_utc_to, expected_utc_from, period_for_utc) + @identifier = identifier + @expected_utc_to = TimeOrDateTime.wrap(expected_utc_to) + @expected_utc_from = expected_utc_from ? TimeOrDateTime.wrap(expected_utc_from) : nil + @period_for_utc = period_for_utc + end + end + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def setup + @orig_default_dst = Timezone.default_dst + @orig_data_source = DataSource.get + Timezone.send :init_loaded_zones + end + + def teardown + Timezone.default_dst = @orig_default_dst + DataSource.set(@orig_data_source) + end + + def test_default_dst_initial_value + assert_nil(Timezone.default_dst) + end + + def test_set_default_dst + Timezone.default_dst = true + assert_equal(true, Timezone.default_dst) + Timezone.default_dst = false + assert_equal(false, Timezone.default_dst) + Timezone.default_dst = nil + assert_nil(Timezone.default_dst) + Timezone.default_dst = 0 + assert_equal(true, Timezone.default_dst) + end + + def test_get_valid_1 + tz = Timezone.get('Europe/London') + + assert_kind_of(DataTimezone, tz) + assert_equal('Europe/London', tz.identifier) + end + + def test_get_valid_2 + tz = Timezone.get('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo for any timezone. + if DataSource.get.load_timezone_info('UTC').kind_of?(LinkedTimezoneInfo) + assert_kind_of(LinkedTimezone, tz) + else + assert_kind_of(DataTimezone, tz) + end + + assert_equal('UTC', tz.identifier) + end + + def test_get_valid_3 + tz = Timezone.get('America/Argentina/Buenos_Aires') + + assert_kind_of(DataTimezone, tz) + assert_equal('America/Argentina/Buenos_Aires', tz.identifier) + end + + def test_get_same_instance + tz1 = Timezone.get('Europe/London') + tz2 = Timezone.get('Europe/London') + assert_same(tz1, tz2) + end + + def test_get_not_exist + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('Nowhere/Special') } + end + + def test_get_invalid + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('../Definitions/UTC') } + end + + def test_get_nil + assert_raises(InvalidTimezoneIdentifier) { Timezone.get(nil) } + end + + def test_get_case + Timezone.get('Europe/Prague') + assert_raises(InvalidTimezoneIdentifier) { Timezone.get('Europe/prague') } + end + + def test_get_proxy_valid + proxy = Timezone.get_proxy('Europe/London') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('Europe/London', proxy.identifier) + end + + def test_get_proxy_not_exist + proxy = Timezone.get_proxy('Not/There') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('Not/There', proxy.identifier) + end + + def test_get_proxy_invalid + proxy = Timezone.get_proxy('../Invalid/Identifier') + assert_kind_of(TimezoneProxy, proxy) + assert_equal('../Invalid/Identifier', proxy.identifier) + end + + def test_get_tainted_loaded + Timezone.get('Europe/Andorra') + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Andorra'.dup.taint + assert(identifier.tainted?) + tz = Timezone.get(identifier) + assert_equal('Europe/Andorra', tz.identifier) + assert(identifier.tainted?) + end + end + + def test_get_tainted_and_frozen_loaded + Timezone.get('Europe/Andorra') + + safe_test do + tz = Timezone.get('Europe/Andorra'.dup.taint.freeze) + assert_equal('Europe/Andorra', tz.identifier) + end + end + + def test_get_tainted_not_previously_loaded + skip_if_has_bug_14060 + + safe_test(:unavailable => :skip) do + identifier = 'Europe/Andorra'.dup.taint + assert(identifier.tainted?) + tz = Timezone.get(identifier) + assert_equal('Europe/Andorra', tz.identifier) + assert(identifier.tainted?) + end + end + + def test_get_tainted_and_frozen_not_previously_loaded + skip_if_has_bug_14060 + + safe_test do + tz = Timezone.get('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', tz.identifier) + end + end + + def test_new_no_args + tz = Timezone.new + + assert_raises(UnknownTimezone) { tz.identifier } + assert_raises(UnknownTimezone) { tz.friendly_identifier } + assert_raises(UnknownTimezone) { tz.utc_to_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.local_to_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.periods_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.now } + assert_raises(UnknownTimezone) { tz.current_period_and_time } + assert_raises(UnknownTimezone) { tz.transitions_up_to(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.canonical_identifier } + assert_raises(UnknownTimezone) { tz.canonical_zone } + end + + def test_new_nil + tz = Timezone.new(nil) + + assert_raises(UnknownTimezone) { tz.identifier } + assert_raises(UnknownTimezone) { tz.friendly_identifier } + assert_raises(UnknownTimezone) { tz.utc_to_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.local_to_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_utc(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.periods_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.period_for_local(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.now } + assert_raises(UnknownTimezone) { tz.current_period_and_time } + assert_raises(UnknownTimezone) { tz.transitions_up_to(DateTime.new(2006,1,1,1,0,0)) } + assert_raises(UnknownTimezone) { tz.canonical_identifier } + assert_raises(UnknownTimezone) { tz.canonical_zone } + end + + def test_new_arg + tz = Timezone.new('Europe/London') + assert_same(Timezone.get('Europe/London'), tz) + end + + def test_new_arg_not_exist + assert_raises(InvalidTimezoneIdentifier) { Timezone.new('Nowhere/Special') } + end + + def test_all + all = Timezone.all + expected = DataSource.get.timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all) + end + + def test_all_identifiers + all = Timezone.all_identifiers + assert_equal(DataSource.get.timezone_identifiers, all) + end + + def test_all_data_zones + all_data = Timezone.all_data_zones + expected = DataSource.get.data_timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all_data) + end + + def test_all_data_zone_identifiers + all_data = Timezone.all_data_zone_identifiers + assert_equal(DataSource.get.data_timezone_identifiers, all_data) + end + + def test_all_linked_zones + all_linked = Timezone.all_linked_zones + expected = DataSource.get.linked_timezone_identifiers.collect {|identifier| Timezone.get_proxy(identifier)} + assert_equal(expected, all_linked) + end + + def test_all_linked_zone_identifiers + all_linked = Timezone.all_linked_zone_identifiers + assert_equal(DataSource.get.linked_timezone_identifiers, all_linked) + end + + def test_all_country_zones + # Probably should relax this test - just need all the zones, don't care + # about order. + expected = Country.all.inject([]) {|result,country| + result += country.zones + } + expected.uniq! + + all_country_zones = Timezone.all_country_zones + assert_equal(expected, all_country_zones) + + all_country_zone_identifiers = Timezone.all_country_zone_identifiers + assert_equal(all_country_zone_identifiers.length, all_country_zones.length) + + all_country_zones.each {|zone| + assert_kind_of(TimezoneProxy, zone) + assert(all_country_zone_identifiers.include?(zone.identifier)) + } + end + + def test_all_country_zone_identifiers + # Probably should relax this test - just need all the zones, don't care + # about order. + expected = Country.all.inject([]) {|result,country| + result += country.zone_identifiers + } + expected.uniq! + + assert_equal(expected, Timezone.all_country_zone_identifiers) + end + + def test_us_zones + # Probably should relax this test - just need all the zones, don't care + # about order. + us_zones = Timezone.us_zones + assert_equal(Country.get('US').zones.uniq, us_zones) + + us_zone_identifiers = Timezone.us_zone_identifiers + assert_equal(us_zone_identifiers.length, us_zones.length) + + us_zones.each {|zone| + assert_kind_of(TimezoneProxy, zone) + assert(us_zone_identifiers.include?(zone.identifier)) + } + end + + def test_us_zone_identifiers + # Probably should relax this test - just need all the zones, don't care + # about order. + assert_equal(Country.get('US').zone_identifiers.uniq, Timezone.us_zone_identifiers) + end + + def test_identifier + assert_raises(UnknownTimezone) { Timezone.new.identifier } + assert_equal('Europe/Paris', TestTimezone.new('Europe/Paris').identifier) + end + + def test_name + assert_raises(UnknownTimezone) { Timezone.new.name } + assert_equal('Europe/Paris', TestTimezone.new('Europe/Paris').name) + end + + def test_friendly_identifier + assert_equal('Paris', TestTimezone.new('Europe/Paris').friendly_identifier(true)) + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').friendly_identifier(false)) + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').friendly_identifier) + assert_equal('Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier(true)) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier(false)) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').friendly_identifier) + assert_equal('Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier(true)) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier(false)) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').friendly_identifier) + assert_equal('McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier(true)) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier(false)) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').friendly_identifier) + assert_equal('GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier(true)) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier(false)) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').friendly_identifier) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier(true)) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier(false)) + assert_equal('UTC', TestTimezone.new('UTC').friendly_identifier) + end + + if defined?(Encoding) + def test_friendly_identifier_non_binary_encoding + refute_equal(Encoding::ASCII_8BIT, TestTimezone.new('Europe/Paris').friendly_identifier(true).encoding) + refute_equal(Encoding::ASCII_8BIT, TestTimezone.new('Europe/Paris').friendly_identifier(false).encoding) + end + end + + def test_to_s + assert_equal('Europe - Paris', TestTimezone.new('Europe/Paris').to_s) + assert_equal('America - Knox, Indiana', TestTimezone.new('America/Indiana/Knox').to_s) + assert_equal('Antarctica - Dumont D\'Urville', TestTimezone.new('Antarctica/DumontDUrville').to_s) + assert_equal('Antarctica - McMurdo', TestTimezone.new('Antarctica/McMurdo').to_s) + assert_equal('Etc - GMT+1', TestTimezone.new('Etc/GMT+1').to_s) + assert_equal('UTC', TestTimezone.new('UTC').to_s) + end + + def test_period_for_local + dt = DateTime.new(2005,2,18,16,24,23) + dt2 = DateTime.new(2005,2,18,16,24,23).new_offset(Rational(5,24)) + dt3 = DateTime.new(2005,2,18,16,24,23 + Rational(789, 1000)) + t = Time.utc(2005,2,18,16,24,23) + t2 = Time.local(2005,2,18,16,24,23) + t3 = Time.utc(2005,2,18,16,24,23,789000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o1, o2, 1099184400), + TestTimezoneTransition.new(o2, o1, 1111885200)) + + dt_period = TestTimezone.new('Europe/London', nil, [period], dt).period_for_local(dt) + dt2_period = TestTimezone.new('Europe/London', nil, [period], dt2).period_for_local(dt2) + dt3_period = TestTimezone.new('Europe/London', nil, [period], dt3).period_for_local(dt3) + t_period = TestTimezone.new('Europe/London', nil, [period], t).period_for_local(t) + t2_period = TestTimezone.new('Europe/London', nil, [period], t2).period_for_local(t2) + t3_period = TestTimezone.new('Europe/London', nil, [period], t3).period_for_local(t3) + ts_period = TestTimezone.new('Europe/London', nil, [period], ts).period_for_local(ts) + + assert_equal(period, dt_period) + assert_equal(period, dt2_period) + assert_equal(period, dt3_period) + assert_equal(period, t_period) + assert_equal(period, t2_period) + assert_equal(period, t3_period) + assert_equal(period, ts_period) + end + + def test_period_for_local_invalid + dt = DateTime.new(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], dt) + + assert_raises(PeriodNotFound) do + tz.period_for_local(dt) + end + end + + def test_period_for_local_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,0,0) + dt2 = DateTime.new(2004,10,31,1,0,Rational(555,1000)) + t = Time.utc(2004,10,31,1,30,0) + t2 = Time.utc(2004,10,31,1,30,0,555000) + i = Time.utc(2004,10,31,1,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + dt2_tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt2) + t_tz = TestTimezone.new('America/New_York', nil, [p1, p2], t) + t2_tz = TestTimezone.new('America/New_York', nil, [p1, p2], t2) + i_tz = TestTimezone.new('America/New_York', nil, [p1, p2], i) + + assert_raises(AmbiguousTime) { dt_tz.period_for_local(dt) } + assert_raises(AmbiguousTime) { dt2_tz.period_for_local(dt2) } + assert_raises(AmbiguousTime) { t_tz.period_for_local(t) } + assert_raises(AmbiguousTime) { t2_tz.period_for_local(t2) } + assert_raises(AmbiguousTime) { i_tz.period_for_local(i) } + end + + def test_period_for_local_not_found + dt = DateTime.new(2004,4,4,2,0,0) + dt2 = DateTime.new(2004,4,4,2,0,Rational(987,1000)) + t = Time.utc(2004,4,4,2,30,0) + t2 = Time.utc(2004,4,4,2,30,0,987000) + i = Time.utc(2004,4,4,2,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [], dt) + dt2_tz = TestTimezone.new('America/New_York', nil, [], dt2) + t_tz = TestTimezone.new('America/New_York', nil, [], t) + t2_tz = TestTimezone.new('America/New_York', nil, [], t2) + i_tz = TestTimezone.new('America/New_York', nil, [], i) + + assert_raises(PeriodNotFound) { dt_tz.period_for_local(dt) } + assert_raises(PeriodNotFound) { dt2_tz.period_for_local(dt2) } + assert_raises(PeriodNotFound) { t_tz.period_for_local(t) } + assert_raises(PeriodNotFound) { t2_tz.period_for_local(t2) } + assert_raises(PeriodNotFound) { i_tz.period_for_local(i) } + end + + def test_period_for_local_default_dst_set_true + Timezone.default_dst = true + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p1, tz.period_for_local(dt)) + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_raises(AmbiguousTime) { tz.period_for_local(dt, nil) } + end + + def test_period_for_local_default_dst_set_false + Timezone.default_dst = false + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p2, tz.period_for_local(dt)) + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_raises(AmbiguousTime) { tz.period_for_local(dt, nil) } + end + + def test_period_for_local_dst_flag_resolved + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(p1, tz.period_for_local(dt, true)) + assert_equal(p2, tz.period_for_local(dt, false)) + assert_equal(p1, tz.period_for_local(dt, true) {|periods| raise BlockCalled, 'should not be called' }) + assert_equal(p2, tz.period_for_local(dt, false) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_period_for_local_dst_block_called + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.period_for_local(dt) {|periods| + assert_equal([p1, p2], periods) + + # raise exception to test that the block was called + raise BlockCalled, 'should be raised' + } + } + + assert_equal(p1, tz.period_for_local(dt) {|periods| periods.first}) + assert_equal(p2, tz.period_for_local(dt) {|periods| periods.last}) + assert_equal(p1, tz.period_for_local(dt) {|periods| [periods.first]}) + assert_equal(p2, tz.period_for_local(dt) {|periods| [periods.last]}) + end + + def test_period_for_local_dst_cannot_resolve + # At midnight local time on Aug 5 1915 in Warsaw, the clocks were put back + # 24 minutes and both periods were non-DST. Hence the block should be + # called regardless of the value of the Boolean dst parameter. + + o0 = TimezoneOffset.new(5040, 0, :LMT) + o1 = TimezoneOffset.new(5040, 0, :WMT) + o2 = TimezoneOffset.new(3600, 0, :CET) + o3 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TestTimezoneTransition.new(o1, o0, DateTime.new(1879, 12, 31, 22, 36, 0)) + t2 = TestTimezoneTransition.new(o2, o1, DateTime.new(1915, 8, 4, 22, 36, 0)) + t3 = TestTimezoneTransition.new(o3, o2, DateTime.new(1916, 4, 30, 22, 0, 0)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(1915,8,4,23,40,0) + + tz = TestTimezone.new('Europe/Warsaw', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.period_for_local(dt, true) {|periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + } + } + + assert_raises(BlockCalled) { + tz.period_for_local(dt, false) {|periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + } + } + end + + def test_period_for_local_block_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| nil} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| periods} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| []} + end + + assert_raises(AmbiguousTime) do + tz.period_for_local(dt) {|periods| raise AmbiguousTime, 'Ambiguous time'} + end + end + + def test_utc_to_local + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5,24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5,24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(DateTime.new(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], dt).utc_to_local(dt)) + assert_equal(DateTime.new(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], dt2).utc_to_local(dt2)) + assert_equal(DateTime.new(2005,6,18,17,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', period, [], dtu).utc_to_local(dtu)) + assert_equal(DateTime.new(2005,6,18,17,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', period, [], dtu2).utc_to_local(dtu2)) + assert_equal(Time.utc(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], t).utc_to_local(t)) + assert_equal(Time.utc(2005,6,18,17,24,23), TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2)) + assert_equal(Time.utc(2005,6,18,17,24,23,567000), TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu)) + assert_equal(Time.utc(2005,6,18,17,24,23,567000), TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2)) + assert_equal(Time.utc(2005,6,18,17,24,23).to_i, TestTimezone.new('Europe/London', period, [], ts).utc_to_local(ts)) + end + + def test_utc_to_local_offset + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5,24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5,24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(0, TestTimezone.new('Europe/London', period, [], dt).utc_to_local(dt).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dt2).utc_to_local(dt2).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dtu).utc_to_local(dtu).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], dtu2).utc_to_local(dtu2).offset) + assert_equal(0, TestTimezone.new('Europe/London', period, [], t).utc_to_local(t).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], t).utc_to_local(t).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], t2).utc_to_local(t2).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], tu).utc_to_local(tu).utc?) + assert_equal(0, TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2).utc_offset) + assert(TestTimezone.new('Europe/London', period, [], tu2).utc_to_local(tu2).utc?) + end + + def test_local_to_utc + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5, 24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5, 24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + ts = t.to_i + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(DateTime.new(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], dt).local_to_utc(dt)) + assert_equal(DateTime.new(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], dt2).local_to_utc(dt2)) + assert_equal(DateTime.new(2005,6,18,15,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', nil, [period], dtu).local_to_utc(dtu)) + assert_equal(DateTime.new(2005,6,18,15,24,23 + Rational(567,1000)), TestTimezone.new('Europe/London', nil, [period], dtu2).local_to_utc(dtu2)) + assert_equal(Time.utc(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t)) + assert_equal(Time.utc(2005,6,18,15,24,23), TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2)) + assert_equal(Time.utc(2005,6,18,15,24,23,567000), TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu)) + assert_equal(Time.utc(2005,6,18,15,24,23,567000), TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2)) + assert_equal(Time.utc(2005,6,18,15,24,23).to_i, TestTimezone.new('Europe/London', nil, [period], ts).local_to_utc(ts)) + end + + def test_local_to_utc_offset + dt = DateTime.new(2005,6,18,16,24,23) + dt2 = DateTime.new(2005,6,18,16,24,23).new_offset(Rational(5, 24)) + dtu = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)) + dtu2 = DateTime.new(2005,6,18,16,24,23 + Rational(567,1000)).new_offset(Rational(5, 24)) + t = Time.utc(2005,6,18,16,24,23) + t2 = Time.local(2005,6,18,16,24,23) + tu = Time.utc(2005,6,18,16,24,23,567000) + tu2 = Time.local(2005,6,18,16,24,23,567000) + + o1 = TimezoneOffset.new(0, 0, :GMT) + o2 = TimezoneOffset.new(0, 3600, :BST) + + period = TimezonePeriod.new( + TestTimezoneTransition.new(o2, o1, 1111885200), + TestTimezoneTransition.new(o1, o2, 1130634000)) + + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dt).local_to_utc(dt).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dt2).local_to_utc(dt2).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dtu).local_to_utc(dtu).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], dtu2).local_to_utc(dtu2).offset) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], t).local_to_utc(t).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], t2).local_to_utc(t2).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], tu).local_to_utc(tu).utc?) + assert_equal(0, TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2).utc_offset) + assert(TestTimezone.new('Europe/London', nil, [period], tu2).local_to_utc(tu2).utc?) + end + + def test_local_to_utc_utc_local_returns_utc + # Check that UTC time instances are always returned even if the system + # is using UTC as the time zone. + + # Note that this will only test will only work correctly on platforms where + # setting the TZ environment variable has an effect. If setting TZ has no + # effect, then this test will still pass. + + old_tz = ENV['TZ'] + begin + ENV['TZ'] = 'UTC' + + tz = Timezone.get('America/New_York') + + t = tz.local_to_utc(Time.local(2014, 1, 11, 17, 18, 41)) + assert_equal(Time.utc(2014, 1, 11, 22, 18, 41), t) + assert(t.utc?) + ensure + ENV['TZ'] = old_tz + end + end + + def test_local_to_utc_invalid + dt = DateTime.new(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], dt) + assert_raises(PeriodNotFound) { tz.local_to_utc(dt) } + + t = Time.utc(2004,4,4,2,30,0) + tz = TestTimezone.new('America/New_York', nil, [], t) + assert_raises(PeriodNotFound) { tz.local_to_utc(t) } + + i = Time.utc(2004,4,4,2,30,0).to_i + tz = TestTimezone.new('America/New_York', nil, [], i) + assert_raises(PeriodNotFound) { tz.local_to_utc(i) } + end + + def test_local_to_utc_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) } + + t = Time.utc(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], t) + assert_raises(AmbiguousTime) { tz.local_to_utc(t) } + + i = Time.utc(2004,10,31,1,30,0).to_i + tz = TestTimezone.new('America/New_York', nil, [p1, p2], i) + assert_raises(AmbiguousTime) { tz.local_to_utc(i) } + + f = Time.utc(2004,10,31,1,30,0,501).to_i + tz = TestTimezone.new('America/New_York', nil, [p1, p2], f) + assert_raises(AmbiguousTime) { tz.local_to_utc(f) } + end + + def test_local_to_utc_not_found + dt = DateTime.new(2004,4,4,2,0,0) + t = Time.utc(2004,4,4,2,30,0) + i = Time.utc(2004,4,4,2,59,59).to_i + + dt_tz = TestTimezone.new('America/New_York', nil, [], dt) + t_tz = TestTimezone.new('America/New_York', nil, [], t) + i_tz = TestTimezone.new('America/New_York', nil, [], i) + + assert_raises(PeriodNotFound) { dt_tz.local_to_utc(dt) } + assert_raises(PeriodNotFound) { t_tz.local_to_utc(t) } + assert_raises(PeriodNotFound) { i_tz.local_to_utc(i) } + end + + def test_local_to_utc_default_dst_set_true + Timezone.default_dst = true + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt, nil) } + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_default_dst_set_false + Timezone.default_dst = false + + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_raises(AmbiguousTime) { tz.local_to_utc(dt, nil) } + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_dst_flag_resolved + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true)) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false)) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt, true) {|periods| raise BlockCalled, 'should not be called' }) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt, false) {|periods| raise BlockCalled, 'should not be called' }) + end + + def test_local_to_utc_dst_block_called + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(BlockCalled) { + tz.local_to_utc(dt) {|periods| + assert_equal([p1, p2], periods) + + # raise exception to test that the block was called + raise BlockCalled, 'should be raised' + } + } + + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| periods.first}) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| periods.last}) + assert_equal(DateTime.new(2004,10,31,5,30,0), tz.local_to_utc(dt) {|periods| [periods.first]}) + assert_equal(DateTime.new(2004,10,31,6,30,0), tz.local_to_utc(dt) {|periods| [periods.last]}) + end + + def test_local_to_utc_dst_cannot_resolve + # At midnight local time on Aug 5 1915 in Warsaw, the clocks were put back + # 24 minutes and both periods were non-DST. Hence the block should be + # called regardless of the value of the Boolean dst parameter. + + o0 = TimezoneOffset.new(5040, 0, :LMT) + o1 = TimezoneOffset.new(5040, 0, :WMT) + o2 = TimezoneOffset.new(3600, 0, :CET) + o3 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TestTimezoneTransition.new(o1, o0, DateTime.new(1879, 12, 31, 22, 36, 0)) + t2 = TestTimezoneTransition.new(o2, o1, DateTime.new(1915, 8, 4, 22, 36, 0)) + t3 = TestTimezoneTransition.new(o3, o2, DateTime.new(1916, 4, 30, 22, 0, 0)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(1915,8,4,23,40,0) + + tz = TestTimezone.new('Europe/Warsaw', nil, [p1, p2], dt) + + assert_raises(BlockCalled) do + tz.local_to_utc(dt, true) do |periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + end + end + + assert_raises(BlockCalled) do + tz.local_to_utc(dt, false) do |periods| + assert_equal([p1, p2], periods) + raise BlockCalled, 'should be raised' + end + end + + assert_equal(DateTime.new(1915,8,4,22,16,0), tz.local_to_utc(dt) {|periods| periods.first}) + assert_equal(DateTime.new(1915,8,4,22,40,0), tz.local_to_utc(dt) {|periods| periods.last}) + assert_equal(DateTime.new(1915,8,4,22,16,0), tz.local_to_utc(dt) {|periods| [periods.first]}) + assert_equal(DateTime.new(1915,8,4,22,40,0), tz.local_to_utc(dt) {|periods| [periods.last]}) + end + + def test_local_to_utc_block_ambiguous + o1 = TimezoneOffset.new(-18000, 0, :EST) + o2 = TimezoneOffset.new(-18000, 3600, :EDT) + + t1 = TestTimezoneTransition.new(o2, o1, 1081062000) + t2 = TestTimezoneTransition.new(o1, o2, 1099202400) + t3 = TestTimezoneTransition.new(o2, o1, 1112511600) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t2, t3) + + dt = DateTime.new(2004,10,31,1,30,0) + tz = TestTimezone.new('America/New_York', nil, [p1, p2], dt) + + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| nil} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| periods} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| []} } + assert_raises(AmbiguousTime) { tz.local_to_utc(dt) {|periods| raise AmbiguousTime, 'Ambiguous time'} } + end + + def test_offsets_up_to + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + o5 = TimezoneOffset.new(-21600, 0, :TESTS) + + t1 = TestTimezoneTransition.new(o2, o1, Time.utc(2010, 4,1,1,0,0).to_i) + t2 = TestTimezoneTransition.new(o3, o2, Time.utc(2010,10,1,1,0,0).to_i) + t3 = TestTimezoneTransition.new(o2, o3, Time.utc(2011, 3,1,1,0,0).to_i) + t4 = TestTimezoneTransition.new(o4, o2, Time.utc(2011, 4,1,1,0,0).to_i) + t5 = TestTimezoneTransition.new(o3, o4, Time.utc(2011,10,1,1,0,0).to_i) + t6 = TestTimezoneTransition.new(o5, o3, Time.utc(2012, 3,1,1,0,0).to_i) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,0), [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,0))) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0), nil, [t1, t2, t3, t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,1), [t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1), Time.utc(2010,4,1,1,0,1))) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2011,3,1,2,0,0), Time.utc(2011,3,1,0,0,0), [t3]). + offsets_up_to(Time.utc(2011,3,1,2,0,0), Time.utc(2011,3,1,0,0,0))) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0), Time.utc(2011,4,1,1,0,0), [t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0), Time.utc(2011,4,1,1,0,0))) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i)) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i)) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0).to_i, nil, [t1, t2, t3, t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0).to_i)) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i, [t2, t3, t4, t5, t6]). + offsets_up_to(Time.utc(2012,3,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2011,3,1,2,0,0).to_i, Time.utc(2011,3,1,0,0,0).to_i, [t3]). + offsets_up_to(Time.utc(2011,3,1,2,0,0).to_i, Time.utc(2011,3,1,0,0,0).to_i)) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,3,1,1,0,0).to_i, Time.utc(2011,4,1,1,0,0).to_i, [t4, t5]). + offsets_up_to(Time.utc(2012,3,1,1,0,0).to_i, Time.utc(2011,4,1,1,0,0).to_i)) + + assert_array_same_items([o1, o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), nil, [t1, t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,0), [t1, t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,0))) + assert_array_same_items([o1, o2, o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,0), nil, [t1, t2, t3, t4, t5]). + offsets_up_to(DateTime.new(2012,3,1,1,0,0))) + assert_array_same_items([o2, o3, o4, o5], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,1), [t2, t3, t4, t5, t6]). + offsets_up_to(DateTime.new(2012,3,1,1,0,1), DateTime.new(2010,4,1,1,0,1))) + assert_array_same_items([o2, o3], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2011,3,1,2,0,0), DateTime.new(2011,3,1,0,0,0), [t3]). + offsets_up_to(DateTime.new(2011,3,1,2,0,0), DateTime.new(2011,3,1,0,0,0))) + assert_array_same_items([o3, o4], + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,3,1,1,0,0), DateTime.new(2011,4,1,1,0,0), [t4, t5]). + offsets_up_to(DateTime.new(2012,3,1,1,0,0), DateTime.new(2011,4,1,1,0,0))) + end + + def test_offsets_up_to_no_transitions + o = TimezoneOffset.new(600, 0, :LMT) + p = TimezonePeriod.new(nil, nil, o) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0), nil, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0))) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0), Time.utc(1990,1,1,1,0,0), p). + offsets_up_to(Time.utc(2000,1,1,1,0,0), Time.utc(1990,1,1,1,0,0))) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0).to_i, nil, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0).to_i)) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', Time.utc(2000,1,1,1,0,0).to_i, Time.utc(1990,1,1,1,0,0).to_i, p). + offsets_up_to(Time.utc(2000,1,1,1,0,0).to_i, Time.utc(1990,1,1,1,0,0).to_i)) + + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', DateTime.new(2000,1,1,1,0,0), nil, p). + offsets_up_to(DateTime.new(2000,1,1,1,0,0))) + assert_array_same_items([o], + OffsetsUpToNoTransitionsTestTimezone.new('Test/Zone', DateTime.new(2000,1,1,1,0,0), DateTime.new(1990,1,1,1,0,0), p). + offsets_up_to(DateTime.new(2000,1,1,1,0,0), DateTime.new(1990,1,1,1,0,0))) + end + + def test_offsets_up_to_utc_to_not_greater_than_utc_from + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,8,1,0,0,0), Time.utc(2012,8,1,0,0,0), []). + offsets_up_to(Time.utc(2012,8,1,0,0,0), Time.utc(2012,8,1,0,0,0)) + end + + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i, []). + offsets_up_to(Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i) + end + + assert_raises(ArgumentError) do + OffsetsUpToTestTimezone.new('Test/Zone', DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0), []). + offsets_up_to(DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0)) + end + end + + def test_now + assert_kind_of(Time, Timezone.get('Europe/London').now) + end + + def test_current_period + assert_kind_of(TimezonePeriod, Timezone.get('Europe/London').current_period) + end + + def test_current_period_and_time + current = Timezone.get('Europe/London').current_period_and_time + assert_equal(2, current.length) + assert_kind_of(Time, current[0]) + assert_kind_of(TimezonePeriod, current[1]) + end + + def test_current_time_and_period + current = Timezone.get('Europe/London').current_time_and_period + assert_equal(2, current.length) + assert_kind_of(Time, current[0]) + assert_kind_of(TimezonePeriod, current[1]) + end + + def test_compare + assert_equal(0, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/London')) + assert_equal(-1, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/london')) + assert_equal(-1, TestTimezone.new('Europe/London') <=> TestTimezone.new('Europe/Paris')) + assert_equal(1, TestTimezone.new('Europe/Paris') <=> TestTimezone.new('Europe/London')) + assert_equal(-1, TestTimezone.new('America/New_York') <=> TestTimezone.new('Europe/Paris')) + assert_equal(1, TestTimezone.new('Europe/Paris') <=> TestTimezone.new('America/New_York')) + end + + def test_compare_non_comparable + assert_nil(TestTimezone.new('Europe/London') <=> Object.new) + end + + def test_equality + assert_equal(true, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/London')) + assert_equal(false, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/london')) + assert_equal(false, TestTimezone.new('Europe/London') == TestTimezone.new('Europe/Paris')) + assert(!(TestTimezone.new('Europe/London') == Object.new)) + end + + def test_eql + assert_equal(true, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/London'))) + assert_equal(false, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/london'))) + assert_equal(false, TestTimezone.new('Europe/London').eql?(TestTimezone.new('Europe/Paris'))) + assert(!TestTimezone.new('Europe/London').eql?(Object.new)) + end + + def test_hash + assert_equal('Europe/London'.hash, TestTimezone.new('Europe/London').hash) + assert_equal('America/New_York'.hash, TestTimezone.new('America/New_York').hash) + end + + def test_marshal_data + tz = Timezone.get('Europe/London') + assert_kind_of(DataTimezone, tz) + assert_same(tz, Marshal.load(Marshal.dump(tz))) + end + + def test_marshal_linked + tz = Timezone.get('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo for any timezone. + if DataSource.get.load_timezone_info('UTC').kind_of?(LinkedTimezoneInfo) + assert_kind_of(LinkedTimezone, tz) + else + assert_kind_of(DataTimezone, tz) + end + + assert_same(tz, Marshal.load(Marshal.dump(tz))) + end + + def test_strftime_datetime + tz = Timezone.get('Europe/London') + dt = DateTime.new(2006, 7, 15, 22, 12, 2) + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', dt)) + assert_equal('BST', tz.strftime('%Z', dt)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', dt)) + assert_equal('BST BST', tz.strftime('%Z %Z', dt)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', dt)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', dt)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', dt)) + end + + def test_strftime_time + tz = Timezone.get('Europe/London') + t = Time.utc(2006, 7, 15, 22, 12, 2) + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', t)) + assert_equal('BST', tz.strftime('%Z', t)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', t)) + assert_equal('BST BST', tz.strftime('%Z %Z', t)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', t)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', t)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', t)) + end + + def test_strftime_int + tz = Timezone.get('Europe/London') + i = Time.utc(2006, 7, 15, 22, 12, 2).to_i + assert_equal('23:12:02 BST', tz.strftime('%H:%M:%S %Z', i)) + assert_equal('BST', tz.strftime('%Z', i)) + assert_equal('%ZBST', tz.strftime('%%Z%Z', i)) + assert_equal('BST BST', tz.strftime('%Z %Z', i)) + assert_equal('BST %Z %BST %%Z %%BST', tz.strftime('%Z %%Z %%%Z %%%%Z %%%%%Z', i)) + assert_equal('+0100 +01:00 +01:00:00 +01 %::::z', tz.strftime('%z %:z %::z %:::z %::::z', i)) + assert_equal('1153001522 %s %1153001522', tz.strftime('%s %%s %%%s', i)) + end + + def test_get_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.get('Europe/London') + end + end + + def test_new_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.new('Europe/London') + end + end + + def test_all_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all + end + end + + def test_all_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_identifiers + end + end + + def test_all_data_zones_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_data_zones + end + end + + def test_all_data_zone_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_data_zone_identifiers + end + end + + def test_all_linked_zones_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_linked_zones + end + end + + def test_all_linked_zone_identifiers_missing_data_source + DataSource.set(DataSource.new) + + assert_raises(InvalidDataSource) do + Timezone.all_linked_zone_identifiers + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb new file mode 100644 index 0000000..f7f2388 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_definition.rb @@ -0,0 +1,113 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneDefinition < Minitest::Test + + module DataTest + include TimezoneDefinition + + timezone 'Test/Data/Zone' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + module LinkedTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone', 'Test/Linked_To/Zone' + end + + module DoubleDataTest + include TimezoneDefinition + + timezone 'Test/Data/Zone1' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + + timezone 'Test/Data/Zone2' do |tz| + tz.offset :o0, 75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + module DoubleLinkedTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone1', 'Test/Linked_To/Zone1' + linked_timezone 'Test/Linked/Zone2', 'Test/Linked_To/Zone2' + end + + module DataLinkedTest + include TimezoneDefinition + + timezone 'Test/Data/Zone1' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + + linked_timezone 'Test/Linked/Zone2', 'Test/Linked_To/Zone2' + end + + module LinkedDataTest + include TimezoneDefinition + + linked_timezone 'Test/Linked/Zone1', 'Test/Linked_To/Zone1' + + timezone 'Test/Data/Zone2' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + + tz.transition 1847, 12, :o1, 2760187969, 1152 + end + end + + def test_data + assert_kind_of(TransitionDataTimezoneInfo, DataTest.get) + assert_equal('Test/Data/Zone', DataTest.get.identifier) + assert_equal(:LMT, DataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, DataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end + + def test_linked + assert_kind_of(LinkedTimezoneInfo, LinkedTest.get) + assert_equal('Test/Linked/Zone', LinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone', LinkedTest.get.link_to_identifier) + end + + def test_double_data + assert_kind_of(TransitionDataTimezoneInfo, DoubleDataTest.get) + assert_equal('Test/Data/Zone2', DoubleDataTest.get.identifier) + assert_equal(:LMT, DoubleDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, DoubleDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end + + def test_double_linked + assert_kind_of(LinkedTimezoneInfo, DoubleLinkedTest.get) + assert_equal('Test/Linked/Zone2', DoubleLinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone2', DoubleLinkedTest.get.link_to_identifier) + end + + def test_data_linked + assert_kind_of(LinkedTimezoneInfo, DataLinkedTest.get) + assert_equal('Test/Linked/Zone2', DataLinkedTest.get.identifier) + assert_equal('Test/Linked_To/Zone2', DataLinkedTest.get.link_to_identifier) + end + + def test_linked_data + assert_kind_of(TransitionDataTimezoneInfo, LinkedDataTest.get) + assert_equal('Test/Data/Zone2', LinkedDataTest.get.identifier) + assert_equal(:LMT, LinkedDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,14)).abbreviation) + assert_equal(:GMT, LinkedDataTest.get.period_for_utc(DateTime.new(1847,12,1,0,1,15)).abbreviation) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb new file mode 100644 index 0000000..cd55dd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_index_definition.rb @@ -0,0 +1,73 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneIndexDefinition < Minitest::Test + + module TimezonesTest1 + include TimezoneIndexDefinition + + timezone 'Test/One' + timezone 'Test/Two' + linked_timezone 'Test/Three' + timezone 'Another/Zone' + linked_timezone 'And/Yet/Another' + end + + module TimezonesTest2 + include TimezoneIndexDefinition + + timezone 'Test/A/One' + timezone 'Test/A/Two' + timezone 'Test/A/Three' + end + + module TimezonesTest3 + include TimezoneIndexDefinition + + linked_timezone 'Test/B/One' + linked_timezone 'Test/B/Two' + linked_timezone 'Test/B/Three' + end + + module TimezonesTest4 + include TimezoneIndexDefinition + + end + + def test_timezones + assert_equal(['Test/One', 'Test/Two', 'Test/Three', 'Another/Zone', 'And/Yet/Another'], TimezonesTest1.timezones) + assert_equal(['Test/A/One', 'Test/A/Two', 'Test/A/Three'], TimezonesTest2.timezones) + assert_equal(['Test/B/One', 'Test/B/Two', 'Test/B/Three'], TimezonesTest3.timezones) + assert_equal([], TimezonesTest4.timezones) + + assert_equal(true, TimezonesTest1.timezones.frozen?) + assert_equal(true, TimezonesTest2.timezones.frozen?) + assert_equal(true, TimezonesTest3.timezones.frozen?) + assert_equal(true, TimezonesTest4.timezones.frozen?) + end + + def test_data_timezones + assert_equal(['Test/One', 'Test/Two', 'Another/Zone'], TimezonesTest1.data_timezones) + assert_equal(['Test/A/One', 'Test/A/Two', 'Test/A/Three'], TimezonesTest2.data_timezones) + assert_equal([], TimezonesTest3.data_timezones) + assert_equal([], TimezonesTest4.data_timezones) + + assert_equal(true, TimezonesTest1.data_timezones.frozen?) + assert_equal(true, TimezonesTest2.data_timezones.frozen?) + assert_equal(true, TimezonesTest3.data_timezones.frozen?) + assert_equal(true, TimezonesTest4.data_timezones.frozen?) + end + + def test_linked_timezones + assert_equal(['Test/Three', 'And/Yet/Another'], TimezonesTest1.linked_timezones) + assert_equal([], TimezonesTest2.linked_timezones) + assert_equal(['Test/B/One', 'Test/B/Two', 'Test/B/Three'], TimezonesTest3.linked_timezones) + assert_equal([], TimezonesTest4.linked_timezones) + + assert_equal(true, TimezonesTest1.linked_timezones.frozen?) + assert_equal(true, TimezonesTest2.linked_timezones.frozen?) + assert_equal(true, TimezonesTest3.linked_timezones.frozen?) + assert_equal(true, TimezonesTest4.linked_timezones.frozen?) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb new file mode 100644 index 0000000..fd70929 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_info.rb @@ -0,0 +1,11 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneInfo < Minitest::Test + + def test_identifier + ti = TimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', ti.identifier) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb new file mode 100644 index 0000000..a0599b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_london.rb @@ -0,0 +1,143 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneLondon < Minitest::Test + def test_2004 + #Europe/London Sun Mar 28 00:59:59 2004 UTC = Sun Mar 28 00:59:59 2004 GMT isdst=0 gmtoff=0 + #Europe/London Sun Mar 28 01:00:00 2004 UTC = Sun Mar 28 02:00:00 2004 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 31 00:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 31 01:00:00 2004 UTC = Sun Oct 31 01:00:00 2004 GMT isdst=0 gmtoff=0 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(2004,3,28,0,59,59), tz.utc_to_local(DateTime.new(2004,3,28,0,59,59))) + assert_equal(DateTime.new(2004,3,28,2,0,0), tz.utc_to_local(DateTime.new(2004,3,28,1,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,31,0,59,59))) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.utc_to_local(DateTime.new(2004,10,31,1,0,0))) + + assert_equal(DateTime.new(2004,3,28,0,59,59), tz.local_to_utc(DateTime.new(2004,3,28,0,59,59))) + assert_equal(DateTime.new(2004,3,28,1,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0))) + assert_equal(DateTime.new(2004,10,31,0,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), true)) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), false)) + assert_equal(DateTime.new(2004,10,31,0,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), true)) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,3,28,1,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,10,31,1,0,0)) } + + assert_equal(:GMT, tz.period_for_utc(DateTime.new(2004,3,28,0,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(2004,3,28,1,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(2004,10,31,0,59,59)).zone_identifier) + assert_equal(:GMT, tz.period_for_utc(DateTime.new(2004,10,31,1,0,0)).zone_identifier) + + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,3,28,0,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,3,28,2,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(2004,3,28,0,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(2004,3,28,1,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(2004,10,31,0,59,59)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(2004,10,31,1,0,0)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(2004,3,28,0,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,3,28,2,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,3,28,1,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,31,1,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(0, 0, :GMT), TimezoneOffset.new(0, 3600, :BST)], offsets) + end + + def test_1961 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1961. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #Europe/London Sun Mar 26 01:59:59 1961 UTC = Sun Mar 26 01:59:59 1961 GMT isdst=0 gmtoff=0 + #Europe/London Sun Mar 26 02:00:00 1961 UTC = Sun Mar 26 03:00:00 1961 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 29 01:59:59 1961 UTC = Sun Oct 29 02:59:59 1961 BST isdst=1 gmtoff=3600 + #Europe/London Sun Oct 29 02:00:00 1961 UTC = Sun Oct 29 02:00:00 1961 GMT isdst=0 gmtoff=0 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(1961,3,26,1,59,59), tz.utc_to_local(DateTime.new(1961,3,26,1,59,59))) + assert_equal(DateTime.new(1961,3,26,3,0,0), tz.utc_to_local(DateTime.new(1961,3,26,2,0,0))) + assert_equal(DateTime.new(1961,10,29,2,59,59), tz.utc_to_local(DateTime.new(1961,10,29,1,59,59))) + assert_equal(DateTime.new(1961,10,29,2,0,0), tz.utc_to_local(DateTime.new(1961,10,29,2,0,0))) + + assert_equal(DateTime.new(1961,3,26,1,59,59), tz.local_to_utc(DateTime.new(1961,3,26,1,59,59))) + assert_equal(DateTime.new(1961,3,26,2,0,0), tz.local_to_utc(DateTime.new(1961,3,26,3,0,0))) + assert_equal(DateTime.new(1961,10,29,1,59,59), tz.local_to_utc(DateTime.new(1961,10,29,2,59,59), true)) + assert_equal(DateTime.new(1961,10,29,2,59,59), tz.local_to_utc(DateTime.new(1961,10,29,2,59,59), false)) + assert_equal(DateTime.new(1961,10,29,1,0,0), tz.local_to_utc(DateTime.new(1961,10,29,2,0,0), true)) + assert_equal(DateTime.new(1961,10,29,2,0,0), tz.local_to_utc(DateTime.new(1961,10,29,2,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1961,3,26,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1961,10,29,2,0,0)) } + + assert_equal(:GMT, tz.period_for_utc(DateTime.new(1961,3,26,1,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(1961,3,26,2,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_utc(DateTime.new(1961,10,29,1,59,59)).zone_identifier) + assert_equal(:GMT, tz.period_for_utc(DateTime.new(1961,10,29,2,0,0)).zone_identifier) + + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,3,26,1,59,59)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,3,26,3,0,0)).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), false).zone_identifier) + assert_equal(:BST, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), true).zone_identifier) + assert_equal(:GMT, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), false).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(1961,3,26,1,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(1961,3,26,2,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_utc(DateTime.new(1961,10,29,1,59,59)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(1961,10,29,2,0,0)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(1961,3,26,1,59,59)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,3,26,3,0,0)).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(1961,10,29,2,59,59), false).utc_total_offset) + assert_equal(3600, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), true).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(1961,10,29,2,0,0), false).utc_total_offset) + + + transitions = tz.transitions_up_to(DateTime.new(1962,1,1,0,0,0), DateTime.new(1961,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1961,3,26,2,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1961,10,29,2,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(0, 3600, :BST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(0, 0, :GMT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1962,1,1,0,0,0), DateTime.new(1961,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(0, 0, :GMT), TimezoneOffset.new(0, 3600, :BST)], offsets) + end + end + + def test_time_boundary + #Europe/London Sat Oct 26 23:00:00 1968 UTC = Sun Oct 27 00:00:00 1968 GMT isdst=0 gmtoff=3600 + #Europe/London Sun Oct 31 01:59:59 1971 UTC = Sun Oct 31 02:59:59 1971 GMT isdst=0 gmtoff=3600 + + tz = Timezone.get('Europe/London') + assert_equal(DateTime.new(1970,1,1,1,0,0), tz.utc_to_local(DateTime.new(1970,1,1,0,0,0))) + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.local_to_utc(DateTime.new(1970,1,1,1,0,0))) + assert_equal(Time.utc(1970,1,1,1,0,0), tz.utc_to_local(Time.utc(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.local_to_utc(Time.utc(1970,1,1,1,0,0))) + assert_equal(3600, tz.utc_to_local(0)) + assert_equal(0, tz.local_to_utc(3600)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb new file mode 100644 index 0000000..bbfa883 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_melbourne.rb @@ -0,0 +1,142 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneMelbourne < Minitest::Test + def test_2004 + #Australia/Melbourne Sat Mar 27 15:59:59 2004 UTC = Sun Mar 28 02:59:59 2004 AEDT isdst=1 gmtoff=39600 + #Australia/Melbourne Sat Mar 27 16:00:00 2004 UTC = Sun Mar 28 02:00:00 2004 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 15:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 16:00:00 2004 UTC = Sun Oct 31 03:00:00 2004 AEDT isdst=1 gmtoff=39600 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(2004,3,28,2,59,59), tz.utc_to_local(DateTime.new(2004,3,27,15,59,59))) + assert_equal(DateTime.new(2004,3,28,2,0,0), tz.utc_to_local(DateTime.new(2004,3,27,16,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,30,15,59,59))) + assert_equal(DateTime.new(2004,10,31,3,0,0), tz.utc_to_local(DateTime.new(2004,10,30,16,0,0))) + + assert_equal(DateTime.new(2004,3,27,15,59,59), tz.local_to_utc(DateTime.new(2004,3,28,2,59,59), true)) + assert_equal(DateTime.new(2004,3,27,16,59,59), tz.local_to_utc(DateTime.new(2004,3,28,2,59,59), false)) + assert_equal(DateTime.new(2004,3,27,15,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0), true)) + assert_equal(DateTime.new(2004,3,27,16,0,0), tz.local_to_utc(DateTime.new(2004,3,28,2,0,0), false)) + assert_equal(DateTime.new(2004,10,30,15,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59))) + assert_equal(DateTime.new(2004,10,30,16,0,0), tz.local_to_utc(DateTime.new(2004,10,31,3,0,0))) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,10,31,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,3,28,2,0,0)) } + + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(2004,3,27,15,59,59)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(2004,3,27,16,0,0)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(2004,10,30,15,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(2004,10,30,16,0,0)).zone_identifier) + + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), false).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), false).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(2004,10,31,3,0,0)).zone_identifier) + + assert_equal(39600, tz.period_for_utc(DateTime.new(2004,3,27,15,59,59)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(2004,3,27,16,0,0)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(2004,10,30,15,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_utc(DateTime.new(2004,10,30,16,0,0)).utc_total_offset) + + assert_equal(39600, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,3,28,2,59,59), false).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,3,28,2,0,0), false).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(2004,10,31,1,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(2004,10,31,3,0,0)).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,3,27,16,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,30,16,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(36000, 0, :AEST), TimezoneOffset.new(36000, 3600, :AEDT)], offsets) + end + + def test_1942 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1942. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #Australia/Melbourne Sat Mar 28 14:59:59 1942 UTC = Sun Mar 29 01:59:59 1942 AEDT isdst=1 gmtoff=39600 + #Australia/Melbourne Sat Mar 28 15:00:00 1942 UTC = Sun Mar 29 01:00:00 1942 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Sep 26 15:59:59 1942 UTC = Sun Sep 27 01:59:59 1942 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Sep 26 16:00:00 1942 UTC = Sun Sep 27 03:00:00 1942 AEDT isdst=1 gmtoff=39600 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(1942,3,29,1,59,59), tz.utc_to_local(DateTime.new(1942,3,28,14,59,59))) + assert_equal(DateTime.new(1942,3,29,1,0,0), tz.utc_to_local(DateTime.new(1942,3,28,15,0,0))) + assert_equal(DateTime.new(1942,9,27,1,59,59), tz.utc_to_local(DateTime.new(1942,9,26,15,59,59))) + assert_equal(DateTime.new(1942,9,27,3,0,0), tz.utc_to_local(DateTime.new(1942,9,26,16,0,0))) + + assert_equal(DateTime.new(1942,3,28,14,59,59), tz.local_to_utc(DateTime.new(1942,3,29,1,59,59), true)) + assert_equal(DateTime.new(1942,3,28,15,59,59), tz.local_to_utc(DateTime.new(1942,3,29,1,59,59), false)) + assert_equal(DateTime.new(1942,3,28,14,0,0), tz.local_to_utc(DateTime.new(1942,3,29,1,0,0), true)) + assert_equal(DateTime.new(1942,3,28,15,0,0), tz.local_to_utc(DateTime.new(1942,3,29,1,0,0), false)) + assert_equal(DateTime.new(1942,9,26,15,59,59), tz.local_to_utc(DateTime.new(1942,9,27,1,59,59))) + assert_equal(DateTime.new(1942,9,26,16,0,0), tz.local_to_utc(DateTime.new(1942,9,27,3,0,0))) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1942,9,27,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1942,3,29,1,0,0)) } + + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(1942,3,28,14,59,59)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(1942,3,28,15,0,0)).zone_identifier) + assert_equal(:AEST, tz.period_for_utc(DateTime.new(1942,9,26,15,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_utc(DateTime.new(1942,9,26,16,0,0)).zone_identifier) + + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), false).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), true).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), false).zone_identifier) + assert_equal(:AEST, tz.period_for_local(DateTime.new(1942,9,27,1,59,59)).zone_identifier) + assert_equal(:AEDT, tz.period_for_local(DateTime.new(1942,9,27,3,0,0)).zone_identifier) + + assert_equal(39600, tz.period_for_utc(DateTime.new(1942,3,28,14,59,59)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(1942,3,28,15,0,0)).utc_total_offset) + assert_equal(36000, tz.period_for_utc(DateTime.new(1942,9,26,15,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_utc(DateTime.new(1942,9,26,16,0,0)).utc_total_offset) + + assert_equal(39600, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,3,29,1,59,59), false).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), true).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,3,29,1,0,0), false).utc_total_offset) + assert_equal(36000, tz.period_for_local(DateTime.new(1942,9,27,1,59,59)).utc_total_offset) + assert_equal(39600, tz.period_for_local(DateTime.new(1942,9,27,3,0,0)).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(1943,1,1,0,0,0), DateTime.new(1942,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1942,3,28,15,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1942,9,26,16,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(36000, 0, :AEST), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(36000, 3600, :AEDT), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1943,1,1,0,0,0), DateTime.new(1942,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(36000, 0, :AEST), TimezoneOffset.new(36000, 3600, :AEDT)], offsets) + end + end + + def test_time_boundary + #Australia/Melbourne Sat Mar 25 15:00:00 1944 UTC = Sun Mar 26 01:00:00 1944 AEST isdst=0 gmtoff=36000 + #Australia/Melbourne Sat Oct 30 15:59:59 1971 UTC = Sun Oct 31 01:59:59 1971 AEST isdst=0 gmtoff=36000 + + tz = Timezone.get('Australia/Melbourne') + assert_equal(DateTime.new(1970,1,1,10,0,0), tz.utc_to_local(DateTime.new(1970,1,1,0,0,0))) + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.local_to_utc(DateTime.new(1970,1,1,10,0,0))) + assert_equal(Time.utc(1970,1,1,10,0,0), tz.utc_to_local(Time.utc(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.local_to_utc(Time.utc(1970,1,1,10,0,0))) + assert_equal(36000, tz.utc_to_local(0)) + assert_equal(0, tz.local_to_utc(36000)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb new file mode 100644 index 0000000..3e270a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_new_york.rb @@ -0,0 +1,142 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneNewYork < Minitest::Test + def test_2004 + #America/New_York Sun Apr 4 06:59:59 2004 UTC = Sun Apr 4 01:59:59 2004 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 4 07:00:00 2004 UTC = Sun Apr 4 03:00:00 2004 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 31 05:59:59 2004 UTC = Sun Oct 31 01:59:59 2004 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 31 06:00:00 2004 UTC = Sun Oct 31 01:00:00 2004 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(2004,4,4,1,59,59), tz.utc_to_local(DateTime.new(2004,4,4,6,59,59))) + assert_equal(DateTime.new(2004,4,4,3,0,0), tz.utc_to_local(DateTime.new(2004,4,4,7,0,0))) + assert_equal(DateTime.new(2004,10,31,1,59,59), tz.utc_to_local(DateTime.new(2004,10,31,5,59,59))) + assert_equal(DateTime.new(2004,10,31,1,0,0), tz.utc_to_local(DateTime.new(2004,10,31,6,0,0))) + + assert_equal(DateTime.new(2004,4,4,6,59,59), tz.local_to_utc(DateTime.new(2004,4,4,1,59,59))) + assert_equal(DateTime.new(2004,4,4,7,0,0), tz.local_to_utc(DateTime.new(2004,4,4,3,0,0))) + assert_equal(DateTime.new(2004,10,31,5,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), true)) + assert_equal(DateTime.new(2004,10,31,6,59,59), tz.local_to_utc(DateTime.new(2004,10,31,1,59,59), false)) + assert_equal(DateTime.new(2004,10,31,5,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), true)) + assert_equal(DateTime.new(2004,10,31,6,0,0), tz.local_to_utc(DateTime.new(2004,10,31,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(2004,4,4,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(2004,10,31,1,0,0)) } + + assert_equal(:EST, tz.period_for_utc(DateTime.new(2004,4,4,6,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(2004,4,4,7,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(2004,10,31,5,59,59)).zone_identifier) + assert_equal(:EST, tz.period_for_utc(DateTime.new(2004,10,31,6,0,0)).zone_identifier) + + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,4,4,1,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,4,4,3,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).zone_identifier) + + assert_equal(-18000, tz.period_for_utc(DateTime.new(2004,4,4,6,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(2004,4,4,7,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(2004,10,31,5,59,59)).utc_total_offset) + assert_equal(-18000, tz.period_for_utc(DateTime.new(2004,10,31,6,0,0)).utc_total_offset) + + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,4,4,1,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,4,4,3,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,10,31,1,59,59), false).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(2004,10,31,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,4,4,7,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(2004,10,31,6,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(2005,1,1,0,0,0), DateTime.new(2004,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(-18000, 0, :EST), TimezoneOffset.new(-18000, 3600, :EDT)], offsets) + end + + def test_1957 + # This test cannot be run when using ZoneinfoDataSource on platforms + # that don't support Times before the epoch (i.e. Ruby < 1.9 on Windows) + # because it relates to the year 1957. + + if !DataSource.get.kind_of?(ZoneinfoDataSource) || RubyCoreSupport.time_supports_negative + #America/New_York Sun Apr 28 06:59:59 1957 UTC = Sun Apr 28 01:59:59 1957 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 28 07:00:00 1957 UTC = Sun Apr 28 03:00:00 1957 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 27 05:59:59 1957 UTC = Sun Oct 27 01:59:59 1957 EDT isdst=1 gmtoff=-14400 + #America/New_York Sun Oct 27 06:00:00 1957 UTC = Sun Oct 27 01:00:00 1957 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(1957,4,28,1,59,59), tz.utc_to_local(DateTime.new(1957,4,28,6,59,59))) + assert_equal(DateTime.new(1957,4,28,3,0,0), tz.utc_to_local(DateTime.new(1957,4,28,7,0,0))) + assert_equal(DateTime.new(1957,10,27,1,59,59), tz.utc_to_local(DateTime.new(1957,10,27,5,59,59))) + assert_equal(DateTime.new(1957,10,27,1,0,0), tz.utc_to_local(DateTime.new(1957,10,27,6,0,0))) + + assert_equal(DateTime.new(1957,4,28,6,59,59), tz.local_to_utc(DateTime.new(1957,4,28,1,59,59))) + assert_equal(DateTime.new(1957,4,28,7,0,0), tz.local_to_utc(DateTime.new(1957,4,28,3,0,0))) + assert_equal(DateTime.new(1957,10,27,5,59,59), tz.local_to_utc(DateTime.new(1957,10,27,1,59,59), true)) + assert_equal(DateTime.new(1957,10,27,6,59,59), tz.local_to_utc(DateTime.new(1957,10,27,1,59,59), false)) + assert_equal(DateTime.new(1957,10,27,5,0,0), tz.local_to_utc(DateTime.new(1957,10,27,1,0,0), true)) + assert_equal(DateTime.new(1957,10,27,6,0,0), tz.local_to_utc(DateTime.new(1957,10,27,1,0,0), false)) + + assert_raises(PeriodNotFound) { tz.local_to_utc(DateTime.new(1957,4,28,2,0,0)) } + assert_raises(AmbiguousTime) { tz.local_to_utc(DateTime.new(1957,10,27,1,0,0)) } + + assert_equal(:EST, tz.period_for_utc(DateTime.new(1957,4,28,6,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(1957,4,28,7,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_utc(DateTime.new(1957,10,27,5,59,59)).zone_identifier) + assert_equal(:EST, tz.period_for_utc(DateTime.new(1957,10,27,6,0,0)).zone_identifier) + + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,4,28,1,59,59)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,4,28,3,0,0)).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), false).zone_identifier) + assert_equal(:EDT, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), true).zone_identifier) + assert_equal(:EST, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), false).zone_identifier) + + assert_equal(-18000, tz.period_for_utc(DateTime.new(1957,4,28,6,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(1957,4,28,7,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_utc(DateTime.new(1957,10,27,5,59,59)).utc_total_offset) + assert_equal(-18000, tz.period_for_utc(DateTime.new(1957,10,27,6,0,0)).utc_total_offset) + + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,4,28,1,59,59)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,4,28,3,0,0)).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,10,27,1,59,59), false).utc_total_offset) + assert_equal(-14400, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), true).utc_total_offset) + assert_equal(-18000, tz.period_for_local(DateTime.new(1957,10,27,1,0,0), false).utc_total_offset) + + transitions = tz.transitions_up_to(DateTime.new(1958,1,1,0,0,0), DateTime.new(1957,1,1,0,0,0)) + assert_equal(2, transitions.length) + assert_equal(TimeOrDateTime.new(DateTime.new(1957,4,28,7,0,0)), transitions[0].at) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[0].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[0].offset) + assert_equal(TimeOrDateTime.new(DateTime.new(1957,10,27,6,0,0)), transitions[1].at) + assert_equal(TimezoneOffset.new(-18000, 3600, :EDT), transitions[1].previous_offset) + assert_equal(TimezoneOffset.new(-18000, 0, :EST), transitions[1].offset) + + offsets = tz.offsets_up_to(DateTime.new(1958,1,1,0,0,0), DateTime.new(1957,1,1,0,0,0)) + assert_array_same_items([TimezoneOffset.new(-18000, 0, :EST), TimezoneOffset.new(-18000, 3600, :EDT)], offsets) + end + end + + def test_time_boundary + #America/New_York Sun Oct 26 06:00:00 1969 UTC = Sun Oct 26 01:00:00 1969 EST isdst=0 gmtoff=-18000 + #America/New_York Sun Apr 26 06:59:59 1970 UTC = Sun Apr 26 01:59:59 1970 EST isdst=0 gmtoff=-18000 + + tz = Timezone.get('America/New_York') + assert_equal(DateTime.new(1970,1,1,0,0,0), tz.utc_to_local(DateTime.new(1970,1,1,5,0,0))) + assert_equal(DateTime.new(1970,1,1,5,0,0), tz.local_to_utc(DateTime.new(1970,1,1,0,0,0))) + assert_equal(Time.utc(1970,1,1,0,0,0), tz.utc_to_local(Time.utc(1970,1,1,5,0,0))) + assert_equal(Time.utc(1970,1,1,5,0,0), tz.local_to_utc(Time.utc(1970,1,1,0,0,0))) + assert_equal(0, tz.utc_to_local(18000)) + assert_equal(18000, tz.local_to_utc(0)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb new file mode 100644 index 0000000..6e29e4d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_offset.rb @@ -0,0 +1,126 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneOffset < Minitest::Test + + def test_utc_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000, o1.utc_offset) + assert_equal(-3600, o2.utc_offset) + end + + def test_std_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(0, o1.std_offset) + assert_equal(3600, o2.std_offset) + end + + def test_utc_total_offset + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000, o1.utc_total_offset) + assert_equal(0, o2.utc_total_offset) + end + + def test_abbreviation + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(:TEST, o1.abbreviation) + assert_equal(:TEST2, o2.abbreviation) + end + + def test_dst + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(false, o1.dst?) + assert_equal(true, o2.dst?) + end + + def test_to_local + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(1148949080, o1.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), o1.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20, 782000), o1.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 782000))) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), o1.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20 + Rational(782, 1000)), o1.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(782, 1000)))) + assert_equal(1148949080, o1.to_local(1148931080)) + assert(TimeOrDateTime.new(1148949080).eql?(o1.to_local(TimeOrDateTime.new(1148931080)))) + + assert_equal(1148931080, o2.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20, 123000), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 123000))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(123, 1000)), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(123, 1000)))) + assert_equal(1148931080, o2.to_local(1148931080)) + assert(TimeOrDateTime.new(1148931080).eql?(o2.to_local(TimeOrDateTime.new(1148931080)))) + end + + def test_to_utc + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(1148913080, o1.to_utc(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 14, 31, 20), o1.to_utc(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 14, 31, 20, 913000), o1.to_utc(Time.utc(2006, 5, 29, 19, 31, 20, 913000))) + assert_equal(DateTime.new(2006, 5, 29, 14, 31, 20), o1.to_utc(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 14, 31, 20 + Rational(913,1000)), o1.to_utc(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(913,1000)))) + assert_equal(1148913080, o1.to_utc(1148931080)) + assert(TimeOrDateTime.new(1148913080).eql?(o1.to_utc(TimeOrDateTime.new(1148931080)))) + + assert_equal(1148931080, o2.to_local(1148931080)) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20))) + assert_equal(Time.utc(2006, 5, 29, 19, 31, 20, 323000), o2.to_local(Time.utc(2006, 5, 29, 19, 31, 20, 323000))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20))) + assert_equal(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(323, 1000)), o2.to_local(DateTime.new(2006, 5, 29, 19, 31, 20 + Rational(323, 1000)))) + assert_equal(1148931080, o2.to_utc(1148931080)) + assert(TimeOrDateTime.new(1148931080).eql?(o2.to_local(TimeOrDateTime.new(1148931080)))) + end + + def test_equality + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(18000, 0, :TEST) + o3 = TimezoneOffset.new(18001, 0, :TEST) + o4 = TimezoneOffset.new(18000, 1, :TEST) + o5 = TimezoneOffset.new(18000, 0, :TEST2) + + assert_equal(true, o1 == o1) + assert_equal(true, o1 == o2) + assert_equal(false, o1 == o3) + assert_equal(false, o1 == o4) + assert_equal(false, o1 == o5) + assert_equal(false, o1 == Object.new) + end + + def test_eql + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(18000, 0, :TEST) + o3 = TimezoneOffset.new(18001, 0, :TEST) + o4 = TimezoneOffset.new(18000, 1, :TEST) + o5 = TimezoneOffset.new(18000, 0, :TEST2) + + assert_equal(true, o1.eql?(o1)) + assert_equal(true, o1.eql?(o2)) + assert_equal(false, o1.eql?(o3)) + assert_equal(false, o1.eql?(o4)) + assert_equal(false, o1.eql?(o5)) + assert_equal(false, o1.eql?(Object.new)) + end + + def test_hash + o1 = TimezoneOffset.new(18000, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST2) + + assert_equal(18000.hash ^ 0.hash ^ :TEST.hash, o1.hash) + assert_equal(-3600.hash ^ 3600.hash ^ :TEST2.hash, o2.hash) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb new file mode 100644 index 0000000..bcfb8d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_period.rb @@ -0,0 +1,555 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezonePeriod < Minitest::Test + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def test_initialize_start_end + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + p = TimezonePeriod.new(start_t, end_t) + + assert_same(start_t, p.start_transition) + assert_same(end_t, p.end_transition) + assert_same(dst, p.offset) + assert_equal(DateTime.new(2006,1,1,0,0,0), p.utc_start) + assert_equal(Time.utc(2006,1,1,0,0,0), p.utc_start_time) + assert_equal(DateTime.new(2006,1,2,0,0,0), p.utc_end) + assert_equal(Time.utc(2006,1,2,0,0,0), p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_equal(DateTime.new(2005,12,31,23,0,0), p.local_start) + assert_equal(Time.utc(2005,12,31,23,0,0), p.local_start_time) + assert_equal(DateTime.new(2006,1,1,23,0,0), p.local_end) + assert_equal(Time.utc(2006,1,1,23,0,0), p.local_end_time) + end + + def test_initialize_start_end_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + assert_raises(ArgumentError) { TimezonePeriod.new(start_t, end_t, special) } + end + + def test_initialize_start + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + + p = TimezonePeriod.new(start_t, nil) + + assert_same(start_t, p.start_transition) + assert_nil(p.end_transition) + assert_same(dst, p.offset) + assert_equal(DateTime.new(2006,1,1,0,0,0), p.utc_start) + assert_equal(Time.utc(2006,1,1,0,0,0), p.utc_start_time) + assert_nil(p.utc_end) + assert_nil(p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_equal(DateTime.new(2005,12,31,23,0,0), p.local_start) + assert_equal(Time.utc(2005,12,31,23,0,0), p.local_start_time) + assert_nil(p.local_end) + assert_nil(p.local_end_time) + end + + def test_initialize_start_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + start_t = TestTimezoneTransition.new(dst, std, 1136073600) + + assert_raises(ArgumentError) { TimezonePeriod.new(start_t, nil, special) } + end + + def test_initialize_end + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + p = TimezonePeriod.new(nil, end_t) + + assert_nil(p.start_transition) + assert_same(end_t, p.end_transition) + assert_same(dst, p.offset) + assert_nil(p.utc_start) + assert_nil(p.utc_start_time) + assert_equal(DateTime.new(2006,1,2,0,0,0), p.utc_end) + assert_equal(Time.utc(2006,1,2,0,0,0), p.utc_end_time) + assert_equal(-7200, p.utc_offset) + assert_equal(3600, p.std_offset) + assert_equal(-3600, p.utc_total_offset) + assert_equal(Rational(-3600, 86400), p.utc_total_offset_rational) + assert_equal(:TEST, p.zone_identifier) + assert_equal(:TEST, p.abbreviation) + assert_nil(p.local_start) + assert_nil(p.local_start_time) + assert_equal(DateTime.new(2006,1,1,23,0,0), p.local_end) + assert_equal(Time.utc(2006,1,1,23,0,0), p.local_end_time) + end + + def test_initialize_end_offset + std = TimezoneOffset.new(-7200, 0, :TEST) + dst = TimezoneOffset.new(-7200, 3600, :TEST) + special = TimezoneOffset.new(0, 0, :SPECIAL) + end_t = TestTimezoneTransition.new(std, dst, 1136160000) + + assert_raises(ArgumentError) { TimezonePeriod.new(nil, end_t, special) } + end + + def test_initialize + assert_raises(ArgumentError) { TimezonePeriod.new(nil, nil) } + end + + def test_initialize_offset + special = TimezoneOffset.new(0, 0, :SPECIAL) + + p = TimezonePeriod.new(nil, nil, special) + + assert_nil(p.start_transition) + assert_nil(p.end_transition) + assert_same(special, p.offset) + assert_nil(p.utc_start) + assert_nil(p.utc_start_time) + assert_nil(p.utc_end) + assert_nil(p.utc_end_time) + assert_equal(0, p.utc_offset) + assert_equal(0, p.std_offset) + assert_equal(0, p.utc_total_offset) + assert_equal(Rational(0, 86400), p.utc_total_offset_rational) + assert_equal(:SPECIAL, p.zone_identifier) + assert_equal(:SPECIAL, p.abbreviation) + assert_nil(p.local_start) + assert_nil(p.local_start_time) + assert_nil(p.local_end) + assert_nil(p.local_end_time) + end + + def test_utc_total_offset_rational_after_freeze + o = TimezoneOffset.new(3600, 0, :TEST) + p = TimezonePeriod.new(nil, nil, o) + p.freeze + assert_equal(Rational(1, 24), p.utc_total_offset_rational) + end + + def test_dst + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, -3600, :TEST)) + p4 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 7200, :TEST)) + p5 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, -7200, :TEST)) + + assert_equal(true, p1.dst?) + assert_equal(false, p2.dst?) + assert_equal(true, p3.dst?) + assert_equal(true, p4.dst?) + assert_equal(true, p5.dst?) + end + + def test_valid_for_utc + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t1, nil) + p4 = TimezonePeriod.new(nil, nil, offset) + p5 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p1.valid_for_utc?(1104541262)) + assert_equal(true, p1.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p1.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p2.valid_for_utc?(1104541262)) + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p2.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p2.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p2.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p2.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p3.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p3.valid_for_utc?(1104541262)) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p3.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p3.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p3.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p4.valid_for_utc?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p4.valid_for_utc?(1104541262)) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p4.valid_for_utc?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(false, p5.valid_for_utc?(Time.utc(2005,1,1,1,1,1))) + assert_equal(false, p5.valid_for_utc?(1104541262)) + end + + def test_utc_after_start + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.utc_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.utc_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(false, p1.utc_after_start?(1104541260)) + assert_equal(true, p1.utc_after_start?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(false, p1.utc_after_start?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.utc_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.utc_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(true, p2.utc_after_start?(1104541260)) + + assert_equal(true, p3.utc_after_start?(Time.utc(2005,1,2,1,1,1))) + assert_equal(true, p3.utc_after_start?(1104627661)) + end + + def test_utc_before_end + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104541261) + t2 = TestTimezoneTransition.new(offset, offset, 1107309722) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t1, nil) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.utc_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p1.utc_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(false, p1.utc_before_end?(1107309723)) + assert_equal(false, p1.utc_before_end?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(true, p1.utc_before_end?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.utc_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.utc_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(true, p2.utc_before_end?(1107309723)) + + assert_equal(false, p3.utc_before_end?(Time.utc(2005,1,2,1,1,1))) + assert_equal(false, p3.utc_before_end?(1104627661)) + end + + def test_valid_for_local + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, 1104544861) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 1, 1, 1, 1, 1)) + t5 = TestTimezoneTransition.new(offset, offset, DateTime.new(1960, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, nil) + p4 = TimezonePeriod.new(nil, nil, offset) + p5 = TimezonePeriod.new(t4, t5) + + assert_equal(true, p1.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.valid_for_local?(Time.utc(2005,2,2,2,2,1))) + assert_equal(true, p1.valid_for_local?(1104541262)) + assert_equal(true, p1.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p1.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p1.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.valid_for_local?(1104541262)) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p2.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(false, p2.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p2.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(false, p2.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p3.valid_for_local?(1104541262)) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(false, p3.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(false, p3.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p3.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p4.valid_for_local?(1104541262)) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,1,1,1,1,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2005,2,2,2,2,3))) + assert_equal(true, p4.valid_for_local?(DateTime.new(1960,1,1,1,1,0))) + assert_equal(true, p4.valid_for_local?(DateTime.new(2040,1,1,1,1,0))) + + assert_equal(false, p5.valid_for_utc?(Time.utc(2005,1,1,1,1,1))) + assert_equal(false, p5.valid_for_utc?(1104541262)) + end + + def test_local_after_start + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(nil, t2) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.local_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p1.local_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(false, p1.local_after_start?(1104541260)) + assert_equal(true, p1.local_after_start?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(false, p1.local_after_start?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.local_after_start?(DateTime.new(2005,1,1,1,1,1))) + assert_equal(true, p2.local_after_start?(Time.utc(2005,1,1,1,1,2))) + assert_equal(true, p2.local_after_start?(1104541260)) + + assert_equal(true, p3.local_after_start?(Time.utc(2005,1,2,1,1,1))) + assert_equal(true, p3.local_after_start?(1104627661)) + end + + def test_local_before_end + offset = TimezoneOffset.new(-7200, 3600, :TEST) + t1 = TestTimezoneTransition.new(offset, offset, 1104544861) + t2 = TestTimezoneTransition.new(offset, offset, 1107313322) + t3 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 1, 1, 1, 1, 1)) + t4 = TestTimezoneTransition.new(offset, offset, DateTime.new(1945, 2, 2, 2, 2, 2)) + + p1 = TimezonePeriod.new(t1, t2) + p2 = TimezonePeriod.new(t1, nil) + p3 = TimezonePeriod.new(t3, t4) + + assert_equal(true, p1.local_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p1.local_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(false, p1.local_before_end?(1107309723)) + assert_equal(false, p1.local_before_end?(DateTime.new(2045,1,1,1,1,0))) + assert_equal(true, p1.local_before_end?(DateTime.new(1955,1,1,1,1,0))) + + assert_equal(true, p2.local_before_end?(DateTime.new(2005,2,2,2,2,1))) + assert_equal(true, p2.local_before_end?(Time.utc(2005,2,2,2,2,0))) + assert_equal(true, p2.local_before_end?(1107309723)) + + assert_equal(false, p3.local_before_end?(Time.utc(2005,1,2,1,1,1))) + assert_equal(false, p3.local_before_end?(1104627661)) + end + + def test_to_local + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(7200, 3600, :TEST)) + + assert_equal(DateTime.new(2005,1,19,22,0,0), p1.to_local(DateTime.new(2005,1,20,1,0,0))) + assert_equal(DateTime.new(2005,1,19,22,0,0 + Rational(512,1000)), p1.to_local(DateTime.new(2005,1,20,1,0,0 + Rational(512,1000)))) + assert_equal(Time.utc(2005,1,19,21,0,0), p2.to_local(Time.utc(2005,1,20,1,0,0))) + assert_equal(Time.utc(2005,1,19,21,0,0,512000), p2.to_local(Time.utc(2005,1,20,1,0,0,512000))) + assert_equal(1106193600, p3.to_local(1106182800)) + end + + def test_to_utc + p1 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 3600, :TEST)) + p2 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(-14400, 0, :TEST)) + p3 = TimezonePeriod.new(nil, nil, TimezoneOffset.new(7200, 3600, :TEST)) + + assert_equal(DateTime.new(2005,1,20,4,0,0), p1.to_utc(DateTime.new(2005,1,20,1,0,0))) + assert_equal(DateTime.new(2005,1,20,4,0,0 + Rational(571,1000)), p1.to_utc(DateTime.new(2005,1,20,1,0,0 + Rational(571,1000)))) + assert_equal(Time.utc(2005,1,20,5,0,0), p2.to_utc(Time.utc(2005,1,20,1,0,0))) + assert_equal(Time.utc(2005,1,20,5,0,0,571000), p2.to_utc(Time.utc(2005,1,20,1,0,0,571000))) + assert_equal(1106172000, p3.to_utc(1106182800)) + end + + def test_time_boundary_start + o1 = TimezoneOffset.new(-3600, 0, :TEST) + o2 = TimezoneOffset.new(-3600, 3600, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 0) + + p1 = TimezonePeriod.new(t1, nil) + + assert_equal(DateTime.new(1969,12,31,23,0,0), p1.local_start) + + # Conversion to Time will fail on systems that don't support negative times. + if RubyCoreSupport.time_supports_negative + assert_equal(Time.utc(1969,12,31,23,0,0), p1.local_start_time) + end + end + + def test_time_boundary_end + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o2, o1, 2147482800) + + p1 = TimezonePeriod.new(nil, t1) + + assert_equal(DateTime.new(2038,1,19,4,0,0), p1.local_end) + + # Conversion to Time will fail on systems that don't support 64-bit times + if RubyCoreSupport.time_supports_64bit + assert_equal(Time.utc(2038,1,19,4,0,0), p1.local_end_time) + end + end + + def test_equality + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t1, t3) + p3 = TimezonePeriod.new(t2, t3) + p4 = TimezonePeriod.new(t3, nil) + p5 = TimezonePeriod.new(t3, nil) + p6 = TimezonePeriod.new(t4, nil) + p7 = TimezonePeriod.new(nil, t3) + p8 = TimezonePeriod.new(nil, t3) + p9 = TimezonePeriod.new(nil, t4) + p10 = TimezonePeriod.new(nil, nil, o1) + p11 = TimezonePeriod.new(nil, nil, o1) + p12 = TimezonePeriod.new(nil, nil, o2) + + assert_equal(true, p1 == p1) + assert_equal(true, p1 == p2) + assert_equal(true, p1 == p3) + assert_equal(false, p1 == p4) + assert_equal(false, p1 == p5) + assert_equal(false, p1 == p6) + assert_equal(false, p1 == p7) + assert_equal(false, p1 == p8) + assert_equal(false, p1 == p9) + assert_equal(false, p1 == p10) + assert_equal(false, p1 == p11) + assert_equal(false, p1 == p12) + assert_equal(false, p1 == Object.new) + + assert_equal(true, p4 == p4) + assert_equal(true, p4 == p5) + assert_equal(false, p4 == p6) + assert_equal(false, p4 == Object.new) + + assert_equal(true, p7 == p7) + assert_equal(true, p7 == p8) + assert_equal(false, p7 == p9) + assert_equal(false, p7 == Object.new) + + assert_equal(true, p10 == p10) + assert_equal(true, p10 == p11) + assert_equal(false, p10 == p12) + assert_equal(false, p10 == Object.new) + end + + def test_eql + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t1, t3) + p3 = TimezonePeriod.new(t2, t3) + p4 = TimezonePeriod.new(t3, nil) + p5 = TimezonePeriod.new(t3, nil) + p6 = TimezonePeriod.new(t4, nil) + p7 = TimezonePeriod.new(nil, t3) + p8 = TimezonePeriod.new(nil, t3) + p9 = TimezonePeriod.new(nil, t4) + p10 = TimezonePeriod.new(nil, nil, o1) + p11 = TimezonePeriod.new(nil, nil, o1) + p12 = TimezonePeriod.new(nil, nil, o2) + + assert_equal(true, p1.eql?(p1)) + assert_equal(true, p1.eql?(p2)) + assert_equal(false, p1.eql?(p3)) + assert_equal(false, p1.eql?(p4)) + assert_equal(false, p1.eql?(p5)) + assert_equal(false, p1.eql?(p6)) + assert_equal(false, p1.eql?(p7)) + assert_equal(false, p1.eql?(p8)) + assert_equal(false, p1.eql?(p9)) + assert_equal(false, p1.eql?(p10)) + assert_equal(false, p1.eql?(p11)) + assert_equal(false, p1.eql?(p12)) + assert_equal(false, p1.eql?(Object.new)) + + assert_equal(true, p4.eql?(p4)) + assert_equal(true, p4.eql?(p5)) + assert_equal(false, p4.eql?(p6)) + assert_equal(false, p4.eql?(Object.new)) + + assert_equal(true, p7.eql?(p7)) + assert_equal(true, p7.eql?(p8)) + assert_equal(false, p7.eql?(p9)) + assert_equal(false, p7.eql?(Object.new)) + + assert_equal(true, p10.eql?(p10)) + assert_equal(true, p10.eql?(p11)) + assert_equal(false, p10.eql?(p12)) + assert_equal(false, p10.eql?(Object.new)) + end + + def test_hash + o1 = TimezoneOffset.new(0, 3600, :TEST) + o2 = TimezoneOffset.new(0, 0, :TEST) + t1 = TestTimezoneTransition.new(o1, o2, 1149368400) + t2 = TestTimezoneTransition.new(o1, o2, DateTime.new(2006, 6, 3, 21, 0, 0)) + t3 = TestTimezoneTransition.new(o1, o2, 1149454800) + t4 = TestTimezoneTransition.new(o1, o2, 1149541200) + + p1 = TimezonePeriod.new(t1, t3) + p2 = TimezonePeriod.new(t2, nil) + p3 = TimezonePeriod.new(nil, t4) + p4 = TimezonePeriod.new(nil, nil, o1) + + assert_equal(t1.hash ^ t3.hash, p1.hash) + assert_equal(t2.hash ^ nil.hash, p2.hash) + assert_equal(nil.hash ^ t4.hash, p3.hash) + assert_equal(nil.hash ^ nil.hash ^ o1.hash, p4.hash) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb new file mode 100644 index 0000000..22ba3d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_proxy.rb @@ -0,0 +1,136 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneProxy < Minitest::Test + def test_not_exist + proxy = TimezoneProxy.new('Nothing/Special') + t = Time.utc(2006,1,1,0,0,0) + assert_equal('Nothing/Special', proxy.identifier) + assert_equal('Nothing/Special', proxy.name) + assert_equal('Nothing - Special', proxy.friendly_identifier) + assert_equal('Nothing - Special', proxy.to_s) + + assert_raises(InvalidTimezoneIdentifier) { proxy.canonical_identifier } + assert_raises(InvalidTimezoneIdentifier) { proxy.canonical_zone } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_period } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_period_and_time } + assert_raises(InvalidTimezoneIdentifier) { proxy.current_time_and_period } + assert_raises(InvalidTimezoneIdentifier) { proxy.local_to_utc(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.now } + assert_raises(InvalidTimezoneIdentifier) { proxy.offsets_up_to(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.period_for_local(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.period_for_utc(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.periods_for_local(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.strftime('%Z', t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.transitions_up_to(t) } + assert_raises(InvalidTimezoneIdentifier) { proxy.utc_to_local(t) } + end + + def test_valid + proxy = TimezoneProxy.new('Europe/London') + real = Timezone.get('Europe/London') + + t1 = Time.utc(2005,8,1,0,0,0) + t2 = Time.utc(2004,8,1,0,0,0) + + assert_equal(real.canonical_identifier, proxy.canonical_identifier) + assert_same(real.canonical_zone, proxy.canonical_zone) + assert_nothing_raised { proxy.current_period } + assert_nothing_raised { proxy.current_period_and_time } + assert_nothing_raised { proxy.current_time_and_period } + assert_equal(real.friendly_identifier(true), proxy.friendly_identifier(true)) + assert_equal(real.friendly_identifier(false), proxy.friendly_identifier(false)) + assert_equal(real.friendly_identifier, proxy.friendly_identifier) + assert_equal(real.identifier, proxy.identifier) + assert_equal(real.local_to_utc(t1), proxy.local_to_utc(t1)) + assert_equal(real.name, proxy.name) + assert_nothing_raised { proxy.now } + assert_equal(real.offsets_up_to(t1), proxy.offsets_up_to(t1)) + assert_equal(real.offsets_up_to(t1, t2), proxy.offsets_up_to(t1, t2)) + assert_equal(real.period_for_local(t1), proxy.period_for_local(t1)) + assert_equal(real.period_for_utc(t1), proxy.period_for_utc(t1)) + assert_equal(real.periods_for_local(t1), proxy.periods_for_local(t1)) + assert_equal(real.strftime('%Z', t1), proxy.strftime('%Z', t1)) + assert_equal(real.to_s, proxy.to_s) + assert_equal(real.transitions_up_to(t1), proxy.transitions_up_to(t1)) + assert_equal(real.transitions_up_to(t1, t2), proxy.transitions_up_to(t1, t2)) + assert_equal(real.utc_to_local(t1), proxy.utc_to_local(t1)) + + + assert(real == proxy) + assert(proxy == real) + assert_equal(0, real <=> proxy) + assert_equal(0, proxy <=> real) + end + + def test_canonical_linked + # Test that the implementation of canonical_zone and canonical_identifier + # are actually calling the real timezone and not just returning it and + # its identifier. + + real = Timezone.get('UTC') + proxy = TimezoneProxy.new('UTC') + + # ZoneinfoDataSource doesn't return LinkedTimezoneInfo instances for any + # timezone. + if real.kind_of?(LinkedTimezone) + assert_equal('Etc/UTC', proxy.canonical_identifier) + assert_same(Timezone.get('Etc/UTC'), proxy.canonical_zone) + else + if DataSource.get.kind_of?(RubyDataSource) + # Not got a LinkedTimezone despite using a DataSource that supports it. + # Raise an exception as this shouldn't happen. + raise 'Non-LinkedTimezone instance returned for UTC using RubyDataSource' + end + + assert_equal('UTC', proxy.canonical_identifier) + assert_same(Timezone.get('UTC'), proxy.canonical_zone) + end + end + + def test_after_freeze + proxy = TimezoneProxy.new('Europe/London') + real = Timezone.get('Europe/London') + t = Time.utc(2017, 6, 1) + proxy.freeze + assert_equal('Europe/London', proxy.identifier) + assert_equal(real.utc_to_local(t), proxy.utc_to_local(t)) + end + + def test_equals + assert_equal(true, TimezoneProxy.new('Europe/London') == TimezoneProxy.new('Europe/London')) + assert_equal(false, TimezoneProxy.new('Europe/London') == TimezoneProxy.new('Europe/Paris')) + assert(!(TimezoneProxy.new('Europe/London') == Object.new)) + end + + def test_compare + assert_equal(0, TimezoneProxy.new('Europe/London') <=> TimezoneProxy.new('Europe/London')) + assert_equal(0, Timezone.get('Europe/London') <=> TimezoneProxy.new('Europe/London')) + assert_equal(0, TimezoneProxy.new('Europe/London') <=> Timezone.get('Europe/London')) + assert_equal(-1, TimezoneProxy.new('Europe/London') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, Timezone.get('Europe/London') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, TimezoneProxy.new('Europe/London') <=> Timezone.get('Europe/Paris')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> TimezoneProxy.new('Europe/London')) + assert_equal(1, Timezone.get('Europe/Paris') <=> TimezoneProxy.new('Europe/London')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> Timezone.get('Europe/London')) + assert_equal(-1, TimezoneProxy.new('America/New_York') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, Timezone.get('America/New_York') <=> TimezoneProxy.new('Europe/Paris')) + assert_equal(-1, TimezoneProxy.new('America/New_York') <=> Timezone.get('Europe/Paris')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> TimezoneProxy.new('America/New_York')) + assert_equal(1, Timezone.get('Europe/Paris') <=> TimezoneProxy.new('America/New_York')) + assert_equal(1, TimezoneProxy.new('Europe/Paris') <=> Timezone.get('America/New_York')) + end + + def test_kind + assert_kind_of(Timezone, TimezoneProxy.new('America/New_York')) + end + + def test_marshal + tp = TimezoneProxy.new('Europe/London') + tp2 = Marshal.load(Marshal.dump(tp)) + + assert_kind_of(TimezoneProxy, tp2) + assert_equal('Europe/London', tp2.identifier) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb new file mode 100644 index 0000000..3dba518 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition.rb @@ -0,0 +1,366 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'date' + +include TZInfo + +class TCTimezoneTransition < Minitest::Test + + class TestTimezoneTransition < TimezoneTransition + def initialize(offset, previous_offset, at) + super(offset, previous_offset) + @at = TimeOrDateTime.wrap(at) + end + + def at + @at + end + end + + def test_offset + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDT), t.offset) + end + + def test_previous_offset + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + + assert_equal(TimezoneOffset.new(3600, 0, :TST), t.previous_offset) + end + + def test_datetime + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t1.datetime) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t2.datetime) + assert_equal(DateTime.new(2006, 5, 30, 0, 31, 20), t3.datetime) + end + + def test_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t1.time) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t2.time) + assert_equal(Time.utc(2006, 5, 30, 0, 31, 20), t3.time) + end + + def test_local_end_at + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert(TimeOrDateTime.new(1148952680).eql?(t1.local_end_at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 1, 31, 20)).eql?(t2.local_end_at)) + assert(TimeOrDateTime.new(Time.utc(2006, 5, 30, 1, 31, 20)).eql?(t3.local_end_at)) + end + + def test_local_end_at_after_freeze + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t.freeze + assert(TimeOrDateTime.new(1148952680).eql?(t.local_end_at)) + end + + def test_local_end + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t1.local_end) + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t2.local_end) + assert_equal(DateTime.new(2006, 5, 30, 1, 31, 20), t3.local_end) + end + + def test_local_end_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t1.local_end_time) + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t2.local_end_time) + assert_equal(Time.utc(2006, 5, 30, 1, 31, 20), t3.local_end_time) + end + + def test_local_start_at + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert(TimeOrDateTime.new(1148956280).eql?(t1.local_start_at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 2, 31, 20)).eql?(t2.local_start_at)) + assert(TimeOrDateTime.new(Time.utc(2006, 5, 30, 2, 31, 20)).eql?(t3.local_start_at)) + end + + def test_local_start_at_after_freeze + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t.freeze + assert(TimeOrDateTime.new(1148956280).eql?(t.local_start_at)) + end + + def test_local_start + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t1.local_start) + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t2.local_start) + assert_equal(DateTime.new(2006, 5, 30, 2, 31, 20), t3.local_start) + end + + def test_local_start_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t1.local_start_time) + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t2.local_start_time) + assert_equal(Time.utc(2006, 5, 30, 2, 31, 20), t3.local_start_time) + end + + if RubyCoreSupport.time_supports_negative + def test_local_end_at_before_negative_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), -2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147490000).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 19, 0, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_before_negative_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), -2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147486400).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 20, 0, 0)).eql?(t.local_start_at)) + end + end + end + + def test_local_end_at_before_epoch + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), 1800) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-5400).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 22, 30, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_before_epoch + t = TestTimezoneTransition.new(TimezoneOffset.new(-7200, 3600, :TDT), + TimezoneOffset.new(-7200, 0, :TST), 1800) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-1800).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 23, 30, 0)).eql?(t.local_start_at)) + end + end + + def test_local_end_at_after_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147486400).eql?(t.local_end_at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 4, 0, 0)).eql?(t.local_end_at)) + end + end + + def test_local_start_at_after_32bit + t = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147482800) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147490000).eql?(t.local_start_at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 5, 0, 0)).eql?(t.local_start_at)) + end + end + + def test_equality_timestamp + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 1, 31, 21)) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_equality_datetime + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 1, 31, 21)) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_equality_time + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 0, 31, 20)) + t4 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t5 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), Time.utc(2006, 5, 30, 0, 31, 21)) + t6 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), DateTime.new(2006, 5, 30, 1, 31, 21)) + t7 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t8 = TestTimezoneTransition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t9 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1 == t1) + assert_equal(true, t1 == t2) + assert_equal(true, t1 == t3) + assert_equal(true, t1 == t4) + assert_equal(false, t1 == t5) + assert_equal(false, t1 == t6) + assert_equal(false, t1 == t7) + assert_equal(false, t1 == t8) + assert_equal(false, t1 == t9) + assert_equal(false, t1 == Object.new) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(t8)) + assert_equal(false, t1.eql?(t9)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_hash + t1 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTA), + TimezoneOffset.new(3600, 0, :TSTA), 1148949080) + t2 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTB), + TimezoneOffset.new(3600, 0, :TSTB), DateTime.new(2006, 5, 30, 1, 31, 20)) + t3 = TestTimezoneTransition.new(TimezoneOffset.new(3600, 3600, :TDTC), + TimezoneOffset.new(3600, 0, :TSTC), Time.utc(2006, 5, 30, 1, 31, 20)) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDTA).hash ^ + TimezoneOffset.new(3600, 0, :TSTA).hash ^ TimeOrDateTime.new(1148949080).hash, + t1.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDTB).hash ^ + TimezoneOffset.new(3600, 0, :TSTB).hash ^ TimeOrDateTime.new(DateTime.new(2006, 5, 30, 1, 31, 20)).hash, + t2.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDTC).hash ^ + TimezoneOffset.new(3600, 0, :TSTC).hash ^ TimeOrDateTime.new(Time.utc(2006, 5, 30, 1, 31, 20)).hash, + t3.hash) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb new file mode 100644 index 0000000..b66233b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_transition_definition.rb @@ -0,0 +1,295 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'date' + +include TZInfo + +class TCTimezoneTransitionDefinition < Minitest::Test + def test_initialize_timestamp_only + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + end + end + + def test_initialize_timestamp_and_datetime + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + end + end + + def test_initialize_datetime_only + assert_nothing_raised do + TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + end + end + + def test_at + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + + assert(TimeOrDateTime.new(1148949080).eql?(t1.at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 0, 31, 20)).eql?(t2.at)) + assert(TimeOrDateTime.new(1148949080).eql?(t3.at)) + end + + def test_at_before_negative_32_bit + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(-2147483649).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(1901, 12, 13, 20, 45, 51)).eql?(t.at)) + end + end + + def test_at_before_epoch + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + + if RubyCoreSupport.time_supports_negative + assert(TimeOrDateTime.new(-1).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(1969, 12, 31, 23, 59, 59)).eql?(t.at)) + end + end + + def test_at_after_32bit + t = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + + if RubyCoreSupport.time_supports_64bit + assert(TimeOrDateTime.new(2147483648).eql?(t.at)) + else + assert(TimeOrDateTime.new(DateTime.new(2038, 1, 19, 3, 14, 8)).eql?(t.at)) + end + end + + def test_at_after_freeze + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t1.freeze + t2.freeze + assert(TimeOrDateTime.new(1148949080).eql?(t1.at)) + assert(TimeOrDateTime.new(DateTime.new(2006, 5, 30, 0, 31, 20)).eql?(t2.at)) + end + + def test_eql_timestamp + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949081) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_datetime + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 7852433803, 3200) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 5300392727, 2160) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(false, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_timestamp_and_datetime + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148952681, 7852433803, 3200) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3601, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t7 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3601, 0, :TST), 1148949080, 5300392727, 2160) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(false, t1.eql?(t5)) + assert_equal(false, t1.eql?(t6)) + assert_equal(false, t1.eql?(t7)) + assert_equal(false, t1.eql?(Object.new)) + end + + def test_eql_timestamp_and_datetime_before_negative_32bit + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 69573092117, 28800) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_eql_timestamp_and_datetime_before_epoch + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 210866759999, 86400) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_negative + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_eql_timestamp_and_datetime_after_32bit + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 3328347557, 1350) + + assert_equal(true, t1.eql?(t1)) + assert_equal(true, t1.eql?(t2)) + + if RubyCoreSupport.time_supports_64bit + assert_equal(true, t1.eql?(t3)) + assert_equal(false, t1.eql?(t4)) + assert_equal(true, t3.eql?(t1)) + assert_equal(false, t4.eql?(t1)) + else + assert_equal(false, t1.eql?(t3)) + assert_equal(true, t1.eql?(t4)) + assert_equal(false, t3.eql?(t1)) + assert_equal(true, t4.eql?(t1)) + end + end + + def test_hash + t1 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080) + t2 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 5300392727, 2160) + t3 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 1148949080, 5300392727, 2160) + t4 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -2147483649, 69573092117, 28800) + t5 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), -1, 210866759999, 86400) + t6 = TimezoneTransitionDefinition.new(TimezoneOffset.new(3600, 3600, :TDT), + TimezoneOffset.new(3600, 0, :TST), 2147483648, 3328347557, 1350) + + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 1148949080.hash ^ nil.hash, + t1.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 5300392727.hash ^ 2160.hash, + t2.hash) + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 1148949080.hash ^ nil.hash, + t3.hash) + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ -2147483649.hash ^ nil.hash, + t4.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 69573092117.hash ^ 28800.hash, + t4.hash) + end + + if RubyCoreSupport.time_supports_negative + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ -1.hash ^ nil.hash, + t5.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 210866759999.hash ^ 86400.hash, + t5.hash) + end + + if RubyCoreSupport.time_supports_64bit + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 2147483648.hash ^ nil.hash, + t6.hash) + else + assert_equal(TimezoneOffset.new(3600, 3600, :TDT).hash ^ + TimezoneOffset.new(3600, 0, :TST).hash ^ 3328347557.hash ^ 1350.hash, + t6.hash) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb new file mode 100644 index 0000000..212bca0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_timezone_utc.rb @@ -0,0 +1,27 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTimezoneUTC < Minitest::Test + def test_2004 + tz = Timezone.get('UTC') + + assert_equal(DateTime.new(2004,1,1,0,0,0), tz.utc_to_local(DateTime.new(2004,1,1,0,0,0))) + assert_equal(DateTime.new(2004,12,31,23,59,59), tz.utc_to_local(DateTime.new(2004,12,31,23,59,59))) + + assert_equal(DateTime.new(2004,1,1,0,0,0), tz.local_to_utc(DateTime.new(2004,1,1,0,0,0))) + assert_equal(DateTime.new(2004,12,31,23,59,59), tz.local_to_utc(DateTime.new(2004,12,31,23,59,59))) + + assert_equal(:UTC, tz.period_for_utc(DateTime.new(2004,1,1,0,0,0)).zone_identifier) + assert_equal(:UTC, tz.period_for_utc(DateTime.new(2004,12,31,23,59,59)).zone_identifier) + + assert_equal(:UTC, tz.period_for_local(DateTime.new(2004,1,1,0,0,0)).zone_identifier) + assert_equal(:UTC, tz.period_for_local(DateTime.new(2004,12,31,23,59,59)).zone_identifier) + + assert_equal(0, tz.period_for_utc(DateTime.new(2004,1,1,0,0,0)).utc_total_offset) + assert_equal(0, tz.period_for_utc(DateTime.new(2004,12,31,23,59,59)).utc_total_offset) + + assert_equal(0, tz.period_for_local(DateTime.new(2004,1,1,0,0,0)).utc_total_offset) + assert_equal(0, tz.period_for_local(DateTime.new(2004,12,31,23,59,59)).utc_total_offset) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb new file mode 100644 index 0000000..cc4af72 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_data_timezone_info.rb @@ -0,0 +1,433 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTransitionDataTimezoneInfo < Minitest::Test + + def test_identifier + dti = TransitionDataTimezoneInfo.new('Test/Zone') + assert_equal('Test/Zone', dti.identifier) + end + + def test_offset + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_nothing_raised do + dti.offset :o1, -18000, 3600, :TEST + end + end + + def test_offset_already_defined + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, 3600, 0, :TEST + dti.offset :o2, 1800, 0, :TEST2 + + assert_raises(ArgumentError) { dti.offset :o1, 3600, 3600, :TESTD } + end + + def test_transition_timestamp + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 1149368400 + end + end + + def test_transition_datetime + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 19631123, 8 + end + end + + def test_transition_timestamp_and_datetime + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + assert_nothing_raised do + dti.transition 2006, 6, :o1, 1149368400, 19631123, 8 + end + end + + def test_transition_invalid_offset + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2006, 6, :o2, 1149454800 } + end + + def test_transition_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(ArgumentError) { dti.transition 2006, 6, :o1, 1149368400 } + end + + def test_transition_invalid_order_month + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2006, 5, :o2, 1146690000 } + end + + def test_transition_invalid_order_year + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -18000, 3600, :TEST + + dti.transition 2006, 6, :o1, 1149368400 + + assert_raises(ArgumentError) { dti.transition 2005, 7, :o2, 1120424400 } + end + + def test_period_for_utc + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2000, 4, :o2, Time.utc(2000, 4,1,1,0,0).to_i + dti.transition 2000, 10, :o3, Time.utc(2000,10,1,1,0,0).to_i + dti.transition 2001, 3, :o2, 58847269, 24 # (2001, 3,1,1,0,0) + dti.transition 2001, 4, :o4, Time.utc(2001, 4,1,1,0,0).to_i, 58848013, 24 + dti.transition 2001, 10, :o3, Time.utc(2001,10,1,1,0,0).to_i + dti.transition 2002, 10, :o3, Time.utc(2002,10,1,1,0,0).to_i + dti.transition 2003, 2, :o2, Time.utc(2003, 2,1,1,0,0).to_i + dti.transition 2003, 3, :o3, Time.utc(2003, 3,1,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000, 4,1,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2000,10,1,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2001, 3,1,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2001, 4,1,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2001,10,1,1,0,0).to_i) + t6 = TimezoneTransitionDefinition.new(o3, o3, Time.utc(2002,10,1,1,0,0).to_i) + t7 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2003, 2,1,1,0,0).to_i) + t8 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2003, 3,1,1,0,0).to_i) + + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(DateTime.new(1960, 1,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(DateTime.new(1999,12,1,0, 0, 0))) + assert_equal(TimezonePeriod.new(nil, t1), dti.period_for_utc(Time.utc( 2000, 4,1,0,59,59))) + assert_equal(TimezonePeriod.new(t1, t2), dti.period_for_utc(DateTime.new(2000, 4,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t1, t2), dti.period_for_utc(Time.utc( 2000,10,1,0,59,59).to_i)) + assert_equal(TimezonePeriod.new(t2, t3), dti.period_for_utc(Time.utc( 2000,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t2, t3), dti.period_for_utc(Time.utc( 2001, 3,1,0,59,59))) + assert_equal(TimezonePeriod.new(t3, t4), dti.period_for_utc(Time.utc( 2001, 3,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t3, t4), dti.period_for_utc(Time.utc( 2001, 4,1,0,59,59))) + assert_equal(TimezonePeriod.new(t4, t5), dti.period_for_utc(Time.utc( 2001, 4,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t4, t5), dti.period_for_utc(Time.utc( 2001,10,1,0,59,59))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2001,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2002, 2,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t5, t6), dti.period_for_utc(Time.utc( 2002,10,1,0,59,59))) + assert_equal(TimezonePeriod.new(t6, t7), dti.period_for_utc(Time.utc( 2002,10,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t6, t7), dti.period_for_utc(Time.utc( 2003, 2,1,0,59,59))) + assert_equal(TimezonePeriod.new(t7, t8), dti.period_for_utc(Time.utc( 2003, 2,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t7, t8), dti.period_for_utc(Time.utc( 2003, 3,1,0,59,59))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(Time.utc( 2003, 3,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(Time.utc( 2004, 1,1,1, 0, 0))) + assert_equal(TimezonePeriod.new(t8, nil), dti.period_for_utc(DateTime.new(2050, 1,1,1, 0, 0))) + end + + def test_period_for_utc_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 0, :TEST + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(DateTime.new(2005,1,1,0,0,0))) + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(Time.utc(2005,1,1,0,0,0))) + assert_equal(TimezonePeriod.new(nil, nil, o1), dti.period_for_utc(Time.utc(2005,1,1,0,0,0).to_i)) + end + + def test_period_for_utc_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(NoOffsetsDefined) { dti.period_for_utc(DateTime.new(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.period_for_utc(Time.utc(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.period_for_utc(Time.utc(2005,1,1,0,0,0).to_i) } + end + + def test_periods_for_local + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2000, 4, :o2, 58839277, 24 # 2000,4,2,1,0,0 + dti.transition 2000, 10, :o3, Time.utc(2000,10,2,1,0,0).to_i, 58843669, 24 + dti.transition 2001, 3, :o2, Time.utc(2001, 3,2,1,0,0).to_i + dti.transition 2001, 4, :o4, Time.utc(2001, 4,2,1,0,0).to_i + dti.transition 2001, 10, :o3, Time.utc(2001,10,2,1,0,0).to_i + dti.transition 2002, 10, :o3, 58861189, 24 # 2002,10,2,1,0,0 + dti.transition 2003, 2, :o2, Time.utc(2003, 2,2,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000, 4,2,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2000,10,2,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2001, 3,2,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2001, 4,2,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2001,10,2,1,0,0).to_i) + t6 = TimezoneTransitionDefinition.new(o3, o3, Time.utc(2002,10,2,1,0,0).to_i) + t7 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2003, 2,2,1,0,0).to_i) + + + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(DateTime.new(1960, 1, 1, 1, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(DateTime.new(1999,12, 1, 0, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc( 2000, 1, 1,10, 0, 0))) + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc( 2000, 4, 1,20, 1,39))) + assert_equal([], dti.periods_for_local(Time.utc( 2000, 4, 1,20, 1,40))) + assert_equal([], dti.periods_for_local(Time.utc( 2000, 4, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t1, t2)], dti.periods_for_local(Time.utc( 2000, 4, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t1, t2)], dti.periods_for_local(DateTime.new(2000,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2000,10, 1,20, 0, 0).to_i)) + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(DateTime.new(2000,10, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2000,10, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t2, t3)], dti.periods_for_local(Time.utc( 2001, 3, 1,19,59,59))) + assert_equal([], dti.periods_for_local(Time.utc( 2001, 3, 1,20, 0, 0))) + assert_equal([], dti.periods_for_local(DateTime.new(2001, 3, 1,20, 30, 0))) + assert_equal([], dti.periods_for_local(Time.utc( 2001, 3, 1,20,59,59).to_i)) + assert_equal([TimezonePeriod.new(t3, t4)], dti.periods_for_local(Time.utc( 2001, 3, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t3, t4)], dti.periods_for_local(Time.utc( 2001, 4, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t3, t4), + TimezonePeriod.new(t4, t5)], dti.periods_for_local(DateTime.new(2001, 4, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t3, t4), + TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001, 4, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001, 4, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t4, t5)], dti.periods_for_local(Time.utc( 2001,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2001,10, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2002, 2, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t5, t6)], dti.periods_for_local(Time.utc( 2002,10, 1,19,59,59))) + assert_equal([TimezonePeriod.new(t6, t7)], dti.periods_for_local(Time.utc( 2002,10, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t6, t7)], dti.periods_for_local(Time.utc( 2003, 2, 1,19,59,59))) + assert_equal([], dti.periods_for_local(Time.utc( 2003, 2, 1,20, 0, 0))) + assert_equal([], dti.periods_for_local(Time.utc( 2003, 2, 1,20,59,59))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(Time.utc( 2003, 2, 1,21, 0, 0))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(Time.utc( 2004, 2, 1,20, 0, 0))) + assert_equal([TimezonePeriod.new(t7, nil)], dti.periods_for_local(DateTime.new(2040, 2, 1,20, 0, 0))) + end + + def test_periods_for_local_warsaw + dti = TransitionDataTimezoneInfo.new('Test/Europe/Warsaw') + dti.offset :o1, 5040, 0, :LMT + dti.offset :o2, 5040, 0, :WMT + dti.offset :o3, 3600, 0, :CET + dti.offset :o4, 3600, 3600, :CEST + + dti.transition 1879, 12, :o2, 288925853, 120 # 1879,12,31,22,36,0 + dti.transition 1915, 8, :o3, 290485733, 120 # 1915, 8, 4,22,36,0 + dti.transition 1916, 4, :o4, 29051813, 12 # 1916, 4,30,22, 0,0 + + o1 = TimezoneOffset.new(5040, 0, :LMT) + o2 = TimezoneOffset.new(5040, 0, :WMT) + o3 = TimezoneOffset.new(3600, 0, :CET) + o4 = TimezoneOffset.new(3600, 3600, :CEST) + + t1 = TimezoneTransitionDefinition.new(o2, o1, 288925853, 120) + t2 = TimezoneTransitionDefinition.new(o3, o2, 290485733, 120) + t3 = TimezoneTransitionDefinition.new(o4, o3, 29051813, 12) + + assert_equal([TimezonePeriod.new(t1, t2), + TimezonePeriod.new(t2, t3)], dti.periods_for_local(DateTime.new(1915,8,4,23,40,0))) + end + + def test_periods_for_local_boundary + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -3600, 0, :TESTD + dti.offset :o2, -3600, 0, :TESTS + + dti.transition 2000, 7, :o2, Time.utc(2000,7,1,0,0,0).to_i + + o1 = TimezoneOffset.new(-3600, 0, :TESTD) + o2 = TimezoneOffset.new(-3600, 0, :TESTS) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2000,7,1,0,0,0).to_i) + + # 2000-07-01 00:00:00 UTC is 2000-06-30 23:00:00 UTC-1 + # hence to find periods for local times between 2000-06-30 23:00:00 + # and 2000-07-01 00:00:00 a search has to be carried out in the next half + # year to the one containing the date we are looking for + + assert_equal([TimezonePeriod.new(nil, t1)], dti.periods_for_local(Time.utc(2000,6,30,22,59,59))) + assert_equal([TimezonePeriod.new(t1, nil)], dti.periods_for_local(Time.utc(2000,6,30,23, 0, 0))) + assert_equal([TimezonePeriod.new(t1, nil)], dti.periods_for_local(Time.utc(2000,7, 1, 0, 0, 0))) + end + + def test_periods_for_local_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 0, :TEST + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(DateTime.new(2005,1,1,0,0,0))) + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(Time.utc(2005,1,1,0,0,0))) + assert_equal([TimezonePeriod.new(nil, nil, o1)], dti.periods_for_local(Time.utc(2005,1,1,0,0,0).to_i)) + end + + def test_periods_for_local_no_offsets + dti = TransitionDataTimezoneInfo.new('Test/Zone') + + assert_raises(NoOffsetsDefined) { dti.periods_for_local(DateTime.new(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.periods_for_local(Time.utc(2005,1,1,0,0,0)) } + assert_raises(NoOffsetsDefined) { dti.periods_for_local(Time.utc(2005,1,1,0,0,0).to_i) } + end + + def test_transitions_up_to + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + dti.offset :o2, -18000, 3600, :TESTD + dti.offset :o3, -18000, 0, :TESTS + dti.offset :o4, -21600, 3600, :TESTD + + dti.transition 2010, 4, :o2, Time.utc(2010, 4,1,1,0,0).to_i + dti.transition 2010, 10, :o3, Time.utc(2010,10,1,1,0,0).to_i + dti.transition 2011, 3, :o2, 58934917, 24 # (2011, 3,1,1,0,0) + dti.transition 2011, 4, :o4, Time.utc(2011, 4,1,1,0,0).to_i, 58935661, 24 + dti.transition 2011, 10, :o3, Time.utc(2011,10,1,1,0,0).to_i + + o1 = TimezoneOffset.new(-17900, 0, :TESTLMT) + o2 = TimezoneOffset.new(-18000, 3600, :TESTD) + o3 = TimezoneOffset.new(-18000, 0, :TESTS) + o4 = TimezoneOffset.new(-21600, 3600, :TESTD) + + t1 = TimezoneTransitionDefinition.new(o2, o1, Time.utc(2010, 4,1,1,0,0).to_i) + t2 = TimezoneTransitionDefinition.new(o3, o2, Time.utc(2010,10,1,1,0,0).to_i) + t3 = TimezoneTransitionDefinition.new(o2, o3, Time.utc(2011, 3,1,1,0,0).to_i) + t4 = TimezoneTransitionDefinition.new(o4, o2, Time.utc(2011, 4,1,1,0,0).to_i) + t5 = TimezoneTransitionDefinition.new(o3, o4, Time.utc(2011,10,1,1,0,0).to_i) + + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0), Time.utc(2000,1,1,0,0,0))) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1))) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1), Time.utc(2000,1,1,0,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,4,1,1,0,1), Time.utc(2010,10,1,1,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0), Time.utc(2010,4,1,1,0,1))) + assert_equal([t3], dti.transitions_up_to(Time.utc(2011,4,1,1,0,0), Time.utc(2010,10,1,1,0,1))) + assert_equal([], dti.transitions_up_to(Time.utc(2011,3,1,1,0,0), Time.utc(2010,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,0))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,1))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1), Time.utc(2010,4,1,1,0,0,1))) + assert_equal([t5], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0), Time.utc(2011,10,1,1,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0), Time.utc(2011,10,1,1,0,1))) + + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2010,4,1,1,0,0).to_i, Time.utc(2000,1,1,0,0,0).to_i)) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t1], dti.transitions_up_to(Time.utc(2010,4,1,1,0,1).to_i, Time.utc(2000,1,1,0,0,0).to_i)) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,4,1,1,0,1).to_i, Time.utc(2010,10,1,1,0,0).to_i)) + assert_equal([t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t3], dti.transitions_up_to(Time.utc(2011,4,1,1,0,0).to_i, Time.utc(2010,10,1,1,0,1).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2011,3,1,1,0,0).to_i, Time.utc(2010,10,1,1,0,1).to_i)) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(Time.utc(2011,10,1,1,0,0).to_i)) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i)) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,0).to_i)) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(Time.utc(2011,10,1,1,0,1).to_i, Time.utc(2010,4,1,1,0,1).to_i)) + assert_equal([t5], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i, Time.utc(2011,10,1,1,0,0).to_i)) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i, Time.utc(2011,10,1,1,0,1).to_i)) + + assert_equal([], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,0))) + assert_equal([], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,0), DateTime.new(2000,1,1,0,0,0))) + assert_equal([t1], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,1))) + assert_equal([t1], dti.transitions_up_to(DateTime.new(2010,4,1,1,0,1), DateTime.new(2000,1,1,0,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,4,1,1,0,1), DateTime.new(2010,10,1,1,0,0))) + assert_equal([t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,0), DateTime.new(2010,4,1,1,0,1))) + assert_equal([t3], dti.transitions_up_to(DateTime.new(2011,4,1,1,0,0), DateTime.new(2010,10,1,1,0,1))) + assert_equal([], dti.transitions_up_to(DateTime.new(2011,3,1,1,0,0), DateTime.new(2010,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,0))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,Rational(DATETIME_RESOLUTION,1000000)))) + assert_equal([t1,t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1), DateTime.new(2010,4,1,1,0,0))) + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(DateTime.new(2011,10,1,1,0,1), DateTime.new(2010,4,1,1,0,1))) + + # Ruby 1.8.7 (built with GCC 8.3.0 on Ubuntu 19.04) fails to perform + # DateTime comparisons correctly when sub-seconds are used. + to = DateTime.new(2011,10,1,1,0,1) + from = DateTime.new(2010,4,1,1,0,Rational(DATETIME_RESOLUTION,1000000)) + if to > from + assert_equal([t2,t3,t4,t5], dti.transitions_up_to(to, from)) + else + puts "This platform does not consider the DateTime #{to.strftime('%Y-%m-%d %H:%m:%S.%N')} to be later than the DateTime #{from.strftime('%Y-%m-%d %H:%m:%S.%N')}" + end + + assert_equal([t5], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0), DateTime.new(2011,10,1,1,0,0))) + assert_equal([], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0), DateTime.new(2011,10,1,1,0,1))) + end + + def test_transitions_up_to_no_transitions + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0))) + assert_equal([], dti.transitions_up_to(Time.utc(2015,1,1,0,0,0).to_i)) + assert_equal([], dti.transitions_up_to(DateTime.new(2015,1,1,0,0,0))) + end + + def test_transitions_up_to_utc_to_not_greater_than_utc_from + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, -17900, 0, :TESTLMT + + assert_raises(ArgumentError) do + dti.transitions_up_to(Time.utc(2012,8,1,0,0,0), Time.utc(2013,8,1,0,0,0)) + end + + assert_raises(ArgumentError) do + dti.transitions_up_to(Time.utc(2012,8,1,0,0,0).to_i, Time.utc(2012,8,1,0,0,0).to_i) + end + + assert_raises(ArgumentError) do + dti.transitions_up_to(DateTime.new(2012,8,1,0,0,0), DateTime.new(2012,8,1,0,0,0)) + end + end + + def test_datetime_and_timestamp_use + dti = TransitionDataTimezoneInfo.new('Test/Zone') + dti.offset :o1, 0, 0, :TESTS + dti.offset :o2, 0, 3600, :TESTD + + dti.transition 1901, 12, :o2, -2147483649, 69573092117, 28800 + dti.transition 1969, 12, :o1, -1, 210866759999, 86400 + dti.transition 2001, 9, :o2, 1000000000, 529666909, 216 + dti.transition 2038, 1, :o1, 2147483648, 3328347557, 1350 + + if RubyCoreSupport.time_supports_negative && RubyCoreSupport.time_supports_64bit + assert(dti.period_for_utc(DateTime.new(1901,12,13,20,45,51)).start_transition.at.eql?(TimeOrDateTime.new(-2147483649))) + else + assert(dti.period_for_utc(DateTime.new(1901,12,13,20,45,51)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(1901,12,13,20,45,51)))) + end + + if RubyCoreSupport.time_supports_negative + assert(dti.period_for_utc(DateTime.new(1969,12,31,23,59,59)).start_transition.at.eql?(TimeOrDateTime.new(-1))) + else + assert(dti.period_for_utc(DateTime.new(1969,12,31,23,59,59)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(1969,12,31,23,59,59)))) + end + + assert(dti.period_for_utc(DateTime.new(2001,9,9,2,46,40)).start_transition.at.eql?(TimeOrDateTime.new(1000000000))) + + if RubyCoreSupport.time_supports_64bit + assert(dti.period_for_utc(DateTime.new(2038,1,19,3,14,8)).start_transition.at.eql?(TimeOrDateTime.new(2147483648))) + else + assert(dti.period_for_utc(DateTime.new(2038,1,19,3,14,8)).start_transition.at.eql?(TimeOrDateTime.new(DateTime.new(2038,1,19,3,14,8)))) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb new file mode 100644 index 0000000..c180f0c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_transition_rule.rb @@ -0,0 +1,663 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCTransitionRule < Minitest::Test + [-1, 0, 1].each do |transition_at| + define_method "test_transition_at_#{transition_at}" do + rule = TestTransitionRule.new(transition_at) + assert_equal(transition_at, rule.transition_at) + end + end + + [ + [-1, 19, 23, 59, 59], + [0, 20, 0, 0, 0], + [1, 20, 0, 0, 1], + [60, 20, 0, 1, 0], + [86399, 20, 23, 59, 59], + [86400, 21, 0, 0, 0] + ].each do |(transition_at, expected_day, expected_hour, expected_minute, expected_second)| + define_method "test_at_with_transition_at_#{transition_at}" do + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(transition_at) do |y| + assert_equal(2020, y) + TimeOrDateTime.wrap(Time.utc(2020, 3, 20)) + end + + result = rule.at(offset, 2020) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(2020, 3, expected_day, expected_hour, expected_minute, expected_second), result.to_time) + end + end + + [-7200, 0, 7200].each do |offset_seconds| + define_method "test_at_with_offset_#{offset_seconds}" do + offset = TimezoneOffset.new(offset_seconds, 0, 'TEST') + rule = TestTransitionRule.new(3600) do |y| + assert_equal(2020, y) + TimeOrDateTime.wrap(Time.utc(2020, 3, 20)) + end + + result = rule.at(offset, 2020) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(2020, 3, 20, 1, 0, 0) - offset_seconds, result.to_time) + end + end + + [2020, 2021].each do |year| + define_method "test_at_with_year_#{year}" do + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(3600) do |y| + assert_equal(year, y) + TimeOrDateTime.wrap(Time.utc(year, 3, 20)) + end + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, 3, 20, 1, 0, 0), result.to_time) + end + end + + def test_at_crosses_64_bit_boundary + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(11648) do |y| + assert_equal(2038, y) + TimeOrDateTime.wrap(Time.utc(2038, 1, 19)) + end + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, 19, 3, 14, 8), result.to_datetime) + end + + def test_at_crosses_negative_boundary + offset = TimezoneOffset.new(0, 0, 'TEST') + rule = TestTransitionRule.new(-1) do |y| + assert_equal(1970, y) + TimeOrDateTime.wrap(Time.utc(1970, 1, 1)) + end + + result = rule.at(offset, 1970) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_negative + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(1969, 12, 31, 23, 59, 59), result.to_datetime) + end + + NEGATIVE_DATES = [[1969, 12, 31]] + B32_DATES = [[1970, 1, 1], [2038, 1, 19]] + B64_DATES = [[2038, 1, 20], [2038, 2, 1], [2039, 1, 1]] + + B32_DATES.concat(RubyCoreSupport.time_supports_negative ? NEGATIVE_DATES : []).concat(RubyCoreSupport.time_supports_64bit ? B64_DATES : []).each do |(year, month, day)| + define_method "test_new_time_or_datetime_returns_time_based_instance_for_#{year}_#{month}_#{day}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, month, day) + assert_kind_of(TimeOrDateTime, result) + assert_kind_of(Time, result.to_orig) + assert_equal(Time.utc(year, month, day), result.to_orig) + end + end + + (RubyCoreSupport.time_supports_negative ? [] : NEGATIVE_DATES).concat(RubyCoreSupport.time_supports_64bit ? [] : B64_DATES).each do |(year, month, day)| + define_method "test_new_time_or_datetime_returns_datetime_based_instance_for_#{year}_#{month}_#{day}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, month, day) + assert_kind_of(TimeOrDateTime, result) + assert_kind_of(DateTime, result.to_orig) + assert_equal(DateTime.new(year, month, day), result.to_orig) + end + end + + [1900, 2021, 2039].each do |year| + define_method "test_new_time_or_datetime_returns_march_1_for_feb_29_on_non_leap_year_#{year}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, 2, 29) + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 3, 1), result.to_datetime) + end + end + + [1968, 2000, 2040].each do |year| + define_method "test_new_time_or_datetime_returns_feb_29_on_leap_year_#{year}" do + result = TestTransitionRule.new(0).new_time_or_datetime!(year, 2, 29) + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 2, 29), result.to_datetime) + end + end + + class TestTransitionRule < TransitionRule + def initialize(transition_at, &block) + super(transition_at) + @get_day = block + end + + alias new_time_or_datetime! new_time_or_datetime + public :new_time_or_datetime! + + protected + + def get_day(year) + @get_day.call(year) + end + end +end + +module BaseTransitionRuleTestHelper + def test_invalid_transition_at + error = assert_raises(ArgumentError) { create_with_transition_at('0') } + assert_match(/\btransition_at(\b.*)?/, error.message) + end + + [:==, :eql?].each do |method| + define_method "test_not_equal_by_transition_at_with_#{method}" do + rule1 = create_with_transition_at(0) + rule2 = create_with_transition_at(1) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_other_with_#{method}" do + rule = create_with_transition_at(0) + assert_equal(false, rule.send(method, Object.new)) + end + end +end + +class TCAbsoluteDayOfYearTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + + [-1, 366, '0'].each do |value| + define_method "test_invalid_day_#{value}" do + error = assert_raises(ArgumentError) { AbsoluteDayOfYearTransitionRule.new(value) } + assert_match(/\bday\b/, error.message) + end + end + + [ + [2020, 0, 1, 1], + [2020, 58, 2, 28], + [2020, 59, 2, 29], + [2020, 365, 12, 31], + [2021, 59, 3, 1], + [2021, 365, 13, 1], + [2100, 59, 3, 1], + [2100, 365, 13, 1], + [2000, 59, 2, 29], + [2000, 365, 12, 31] + ].each do |(year, day, expected_month, expected_day)| + define_method "test_day_#{day}_of_year_#{year}" do + rule = AbsoluteDayOfYearTransitionRule.new(day, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + expected_year = year + if expected_month == 13 + expected_year += 1 + expected_month = 1 + end + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(expected_year, expected_month, expected_day, 1, 0, 0), result.to_datetime) + end + end + + def test_crosses_64_bit_boundary + # Internally the calculation starts with Jan 1 and adds days. Jan 20 2038 + # will require a DateTime on 32-bit only systems. + + rule = AbsoluteDayOfYearTransitionRule.new(19, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, 20, 1, 0, 0), result.to_datetime) + end + + def test_day_0_is_always_first_day_of_year + rule = AbsoluteDayOfYearTransitionRule.new(0) + assert_equal(true, rule.is_always_first_day_of_year?) + end + + [1, 365].each do |day| + define_method "test_day_#{day}_is_not_always_first_day_of_year" do + rule = AbsoluteDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_first_day_of_year?) + end + + define_method "test_day_#{day}_is_not_always_last_day_of_year" do + rule = AbsoluteDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_last_day_of_year?) + end + end + + [:==, :eql?].each do |method| + [ + [0, 0], + [0, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_equal_for_day_#{day}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + rule2 = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_day_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(0, 3600) + rule2 = AbsoluteDayOfYearTransitionRule.new(1, 3600) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_julian_with_#{method}" do + rule1 = AbsoluteDayOfYearTransitionRule.new(1, 0) + rule2 = JulianDayOfYearTransitionRule.new(1, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [0, 0], + [0, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_hash_for_day_#{day}_and_transition_at_#{transition_at}" do + rule = AbsoluteDayOfYearTransitionRule.new(day, transition_at) + expected = [AbsoluteDayOfYearTransitionRule, day * 86400, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + AbsoluteDayOfYearTransitionRule.new(1, transition_at) + end +end + +class TCJulianDayOfYearTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + + [0, 366, '1'].each do |value| + define_method "test_invalid_day_#{value}" do + error = assert_raises(ArgumentError) { JulianDayOfYearTransitionRule.new(value) } + assert_match(/\bday\b/, error.message) + end + end + + [2020, 2021, 2100, 2000].each do |year| + [ + [1, 1, 1], + [59, 2, 28], + [60, 3, 1], + [365, 12, 31] + ].each do |(day, expected_month, expected_day)| + define_method "test_day_#{day}_of_year_#{year}" do + rule = JulianDayOfYearTransitionRule.new(day, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, expected_month, expected_day, 1, 0, 0), result.to_datetime) + end + end + end + + def test_day_1_is_always_first_day_of_year + rule = JulianDayOfYearTransitionRule.new(1) + assert_equal(true, rule.is_always_first_day_of_year?) + end + + [2, 365].each do |day| + define_method "test_day_#{day}_is_not_always_first_day_of_year" do + rule = JulianDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_first_day_of_year?) + end + end + + def test_day_365_is_always_last_day_of_year + rule = JulianDayOfYearTransitionRule.new(365) + assert_equal(true, rule.is_always_last_day_of_year?) + end + + [1, 364].each do |day| + define_method "test_day_#{day}_is_not_always_last_day_of_year" do + rule = JulianDayOfYearTransitionRule.new(day) + assert_equal(false, rule.is_always_last_day_of_year?) + end + end + + [:==, :eql?].each do |method| + [ + [1, 0], + [1, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_equal_for_day_#{day}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(day, transition_at) + rule2 = JulianDayOfYearTransitionRule.new(day, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_day_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(1, 0) + rule2 = JulianDayOfYearTransitionRule.new(2, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_absolute_with_#{method}" do + rule1 = JulianDayOfYearTransitionRule.new(1, 0) + rule2 = AbsoluteDayOfYearTransitionRule.new(1, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 0], + [1, 3600], + [365, 0] + ].each do |(day, transition_at)| + define_method "test_hash_for_day_#{day}_and_transition_at_#{transition_at}" do + rule = JulianDayOfYearTransitionRule.new(day, transition_at) + expected = [JulianDayOfYearTransitionRule, day * 86400, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + JulianDayOfYearTransitionRule.new(1, transition_at) + end +end + +module DayOfWeekTransitionRuleTestHelper + [-1, 0, 13, '1'].each do |month| + define_method "test_invalid_month_#{month}" do + error = assert_raises(ArgumentError) { create_with_month_and_day_of_week(month, 0) } + assert_match(/\bmonth\b/, error.message) + end + end + + [-1, 7, '0'].each do |day_of_week| + define_method "test_invalid_day_of_week_#{day_of_week}" do + error = assert_raises(ArgumentError) { create_with_month_and_day_of_week(1, day_of_week) } + assert_match(/\bday_of_week\b/, error.message) + end + end + + def test_is_not_always_first_day_of_year + rule = create_with_month_and_day_of_week(1, 0) + assert_equal(false, rule.is_always_first_day_of_year?) + end + + def test_is_not_always_last_day_of_year + rule = create_with_month_and_day_of_week(12, 6) + assert_equal(false, rule.is_always_last_day_of_year?) + end + + [:==, :eql?].each do |method| + define_method "test_not_equal_by_month_with_#{method}" do + rule1 = create_with_month_and_day_of_week(1, 0) + rule2 = create_with_month_and_day_of_week(2, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_by_day_of_week_with_#{method}" do + rule1 = create_with_month_and_day_of_week(1, 0) + rule2 = create_with_month_and_day_of_week(1, 1) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end +end + +class TCDayOfMonthTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + include DayOfWeekTransitionRuleTestHelper + + [-1, 0, 5, '1'].each do |week| + define_method "test_invalid_week_#{week}" do + error = assert_raises(ArgumentError) { DayOfMonthTransitionRule.new(1, week, 0) } + assert_match(/\bweek\b/, error.message) + end + end + + [ + # All possible first week start days. + [2020, 3, [1, 2, 3, 4, 5, 6, 7]], + [2021, 3, [7, 1, 2, 3, 4, 5, 6]], + [2022, 3, [6, 7, 1, 2, 3, 4, 5]], + [2023, 3, [5, 6, 7, 1, 2, 3, 4]], + [2018, 3, [4, 5, 6, 7, 1, 2, 3]], + [2024, 3, [3, 4, 5, 6, 7, 1, 2]], + [2025, 3, [2, 3, 4, 5, 6, 7, 1]], + + # All possible months. + [2019, 1, [6]], + [2019, 2, [3]], + [2019, 3, [3]], + [2019, 4, [7]], + [2019, 5, [5]], + [2019, 6, [2]], + [2019, 7, [7]], + [2019, 8, [4]], + [2019, 9, [1]], + [2019, 10, [6]], + [2019, 11, [3]], + [2019, 12, [1]] + ].each do |(year, month, days)| + days.each_with_index do |expected_day, day_of_week| + (1..4).each do |week| + define_method "test_month_#{month}_week_#{week}_and_day_of_week_#{day_of_week}_year_#{year}" do + rule = DayOfMonthTransitionRule.new(month, week, day_of_week, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, month, expected_day + (week - 1) * 7, 1, 0, 0), result.to_time) + end + end + end + end + + [[3, 3, 20], [4, 6, 23]].each do |(week, day_of_week, expected_day)| + define_method "test_crosses_64_bit_boundary_for_day_of_week_#{day_of_week}" do + rule = DayOfMonthTransitionRule.new(1, week, day_of_week, 3600) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, 2038) + + assert_kind_of(TimeOrDateTime, result) + if RubyCoreSupport.time_supports_64bit + assert_kind_of(Time, result.to_orig) + else + assert_kind_of(DateTime, result.to_orig) + end + assert_equal(DateTime.new(2038, 1, expected_day, 1, 0, 0), result.to_datetime) + end + end + + [:==, :eql?].each do |method| + [ + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 0], + [1, 2, 0, 0], + [2, 1, 0, 0] + ].each do |(month, week, day_of_week, transition_at)| + define_method "test_equal_for_month_#{month}_week_#{week}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + rule2 = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_by_week_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + rule2 = DayOfMonthTransitionRule.new(1, 2, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + + define_method "test_not_equal_to_last_day_of_month_with_#{method}" do + rule1 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + rule2 = LastDayOfMonthTransitionRule.new(1, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 0], + [1, 2, 0, 0], + [2, 1, 0, 0] + ].each do |(month, week, day_of_week, transition_at)| + define_method "test_hash_for_month_#{month}_week_#{week}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}" do + rule = DayOfMonthTransitionRule.new(month, week, day_of_week, transition_at) + expected = [(week - 1) * 7 + 1, month, day_of_week, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + DayOfMonthTransitionRule.new(1, 1, 0, transition_at) + end + + def create_with_month_and_day_of_week(month, day_of_week) + DayOfMonthTransitionRule.new(month, 1, day_of_week) + end +end + +class TCLastDayOfMonthTransitionRule < Minitest::Test + include BaseTransitionRuleTestHelper + include DayOfWeekTransitionRuleTestHelper + + [ + # All possible last days. + [2021, 10, [31, 25, 26, 27, 28, 29, 30]], + [2022, 10, [30, 31, 25, 26, 27, 28, 29]], + [2023, 10, [29, 30, 31, 25, 26, 27, 28]], + [2018, 10, [28, 29, 30, 31, 25, 26, 27]], + [2024, 10, [27, 28, 29, 30, 31, 25, 26]], + [2025, 10, [26, 27, 28, 29, 30, 31, 25]], + [2026, 10, [25, 26, 27, 28, 29, 30, 31]], + + # All possible months. + [2020, 1, [26]], + [2020, 2, [23]], + [2020, 3, [29]], + [2020, 4, [26]], + [2020, 5, [31]], + [2020, 6, [28]], + [2020, 7, [26]], + [2020, 8, [30]], + [2020, 9, [27]], + [2020, 10, [25]], + [2020, 11, [29]], + [2020, 12, [27]] + ].each do |(year, month, days)| + days.each_with_index do |expected_day, day_of_week| + define_method "test_month_#{month}_day_of_week_#{day_of_week}_year_#{year}" do + rule = LastDayOfMonthTransitionRule.new(month, day_of_week, 7200) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(Time.utc(year, month, expected_day, 2, 0, 0), result.to_time) + end + end + end + + [[2020, 6, 29], [2021, 0, 28], [2000, 2, 29], [2100, 0, 28]].each do |(year, day_of_week, expected_day)| + define_method "test_#{expected_day == 29 ? '' : 'non_'}leap_year_#{year}" do + rule = LastDayOfMonthTransitionRule.new(2, day_of_week, 7200) + offset = TimezoneOffset.new(0, 0, 'TEST') + + result = rule.at(offset, year) + + assert_kind_of(TimeOrDateTime, result) + assert_equal(DateTime.new(year, 2, expected_day, 2, 0, 0), result.to_datetime) + end + end + + [:==, :eql?].each do |method| + [ + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [2, 0, 0] + ].each do |(month, day_of_week, transition_at)| + define_method "test_equal_for_month_#{month}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}_with_#{method}" do + rule1 = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + rule2 = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + assert_equal(true, rule1.send(method, rule2)) + assert_equal(true, rule2.send(method, rule1)) + end + end + + define_method "test_not_equal_to_day_of_month_with_#{method}" do + rule1 = LastDayOfMonthTransitionRule.new(1, 0, 0) + rule2 = DayOfMonthTransitionRule.new(1, 1, 0, 0) + assert_equal(false, rule1.send(method, rule2)) + assert_equal(false, rule2.send(method, rule1)) + end + end + + [ + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [2, 0, 0] + ].each do |(month, day_of_week, transition_at)| + define_method "test_hash_for_month_#{month}_day_of_week_#{day_of_week}_and_transition_at_#{transition_at}" do + rule = LastDayOfMonthTransitionRule.new(month, day_of_week, transition_at) + expected = [month, day_of_week, transition_at].hash + assert_equal(expected, rule.hash) + end + end + + protected + + def create_with_transition_at(transition_at) + LastDayOfMonthTransitionRule.new(1, 0, transition_at) + end + + def create_with_month_and_day_of_week(month, day_of_week) + LastDayOfMonthTransitionRule.new(month, day_of_week) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb new file mode 100644 index 0000000..f54e9b1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_country_info.rb @@ -0,0 +1,78 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') + +include TZInfo + +class TCZoneinfoCountryInfo < Minitest::Test + + def test_code + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert_equal('ZZ', ci.code) + end + + def test_name + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert_equal('Zzz', ci.name) + end + + def test_zone_identifiers_empty + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) {|c| } + assert(ci.zone_identifiers.empty?) + assert(ci.zone_identifiers.frozen?) + end + + def test_zone_identifiers + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + assert(ci.zone_identifiers.frozen?) + assert(!ci.zones.equal?(zones)) + assert(!zones.frozen?) + end + + def test_zone_identifiers_after_freeze + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + ci.freeze + + assert_equal(['ZZ/TimezoneB', 'ZZ/TimezoneA', 'ZZ/TimezoneC', 'ZZ/TimezoneD'], ci.zone_identifiers) + end + + def test_zones_empty + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', []) + assert(ci.zones.empty?) + assert(ci.zones.frozen?) + end + + def test_zones + zones = [ + CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7)) + ] + + ci = ZoneinfoCountryInfo.new('ZZ', 'Zzz', zones) + + assert_equal([CountryTimezone.new('ZZ/TimezoneB', Rational(1, 2), Rational(1, 2), 'Timezone B'), + CountryTimezone.new('ZZ/TimezoneA', Rational(1, 4), Rational(1, 4), 'Timezone A'), + CountryTimezone.new('ZZ/TimezoneC', Rational(-10, 3), Rational(-20, 7), 'C'), + CountryTimezone.new('ZZ/TimezoneD', Rational(-10, 3), Rational(-20, 7))], + ci.zones) + assert(ci.zones.frozen?) + assert(!ci.zones.equal?(zones)) + assert(!zones.frozen?) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb new file mode 100644 index 0000000..fe517cb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_data_source.rb @@ -0,0 +1,1204 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'fileutils' +require 'pathname' +require 'tmpdir' + +include TZInfo + +# Use send as a workaround for an issue on JRuby 9.2.9.0 where using the +# refinement causes calls to RubyCoreSupport.file_open to fail to pass the block +# parameter. +# +# https://travis-ci.org/tzinfo/tzinfo/jobs/628812051#L1931 +# https://github.com/jruby/jruby/issues/6009 +send(:using, TZInfo::RubyCoreSupport::UntaintExt) if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) +send(:using, TaintExt) if Module.const_defined?(:TaintExt) + +class TCZoneinfoDataSource < Minitest::Test + ZONEINFO_DIR = File.join(File.expand_path(File.dirname(__FILE__)), 'zoneinfo').untaint + + def setup + @orig_search_path = ZoneinfoDataSource.search_path.clone + @orig_alternate_iso3166_tab_search_path = ZoneinfoDataSource.alternate_iso3166_tab_search_path.clone + @orig_pwd = FileUtils.pwd + + # A zoneinfo directory containing files needed by the tests. + # The symlinks in this directory are set up in test_utils.rb. + @data_source = ZoneinfoDataSource.new(ZONEINFO_DIR) + end + + def teardown + ZoneinfoDataSource.search_path = @orig_search_path + ZoneinfoDataSource.alternate_iso3166_tab_search_path = @orig_alternate_iso3166_tab_search_path + FileUtils.chdir(@orig_pwd) + end + + def test_default_search_path + assert_equal(['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'], ZoneinfoDataSource.search_path) + assert_equal(false, ZoneinfoDataSource.search_path.frozen?) + end + + def test_set_search_path_default + ZoneinfoDataSource.search_path = ['/tmp/zoneinfo1', '/tmp/zoneinfo2'] + assert_equal(['/tmp/zoneinfo1', '/tmp/zoneinfo2'], ZoneinfoDataSource.search_path) + + ZoneinfoDataSource.search_path = nil + assert_equal(['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'], ZoneinfoDataSource.search_path) + assert_equal(false, ZoneinfoDataSource.search_path.frozen?) + end + + def test_set_search_path_array + path = ['/tmp/zoneinfo1', '/tmp/zoneinfo2'] + ZoneinfoDataSource.search_path = path + assert_equal(['/tmp/zoneinfo1', '/tmp/zoneinfo2'], ZoneinfoDataSource.search_path) + refute_same(path, ZoneinfoDataSource.search_path) + end + + def test_set_search_path_array_to_s + ZoneinfoDataSource.search_path = [Pathname.new('/tmp/zoneinfo3')] + assert_equal(['/tmp/zoneinfo3'], ZoneinfoDataSource.search_path) + end + + def test_set_search_path_string + ZoneinfoDataSource.search_path = ['/tmp/zoneinfo4', '/tmp/zoneinfo5'].join(File::PATH_SEPARATOR) + assert_equal(['/tmp/zoneinfo4', '/tmp/zoneinfo5'], ZoneinfoDataSource.search_path) + end + + def test_default_alternate_iso3166_tab_search_path + assert_equal(['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + assert_equal(false, ZoneinfoDataSource.alternate_iso3166_tab_search_path.frozen?) + end + + def test_set_alternate_iso3166_tab_search_path_default + ZoneinfoDataSource.alternate_iso3166_tab_search_path = ['/tmp/iso3166.tab', '/tmp/iso3166'] + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + + ZoneinfoDataSource.alternate_iso3166_tab_search_path = nil + assert_equal(['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + assert_equal(false, ZoneinfoDataSource.alternate_iso3166_tab_search_path.frozen?) + end + + def test_set_alternate_iso3166_tab_search_path_array + path = ['/tmp/iso3166.tab', '/tmp/iso3166'] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = path + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + refute_same(path, ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_set_alternate_iso3166_tab_search_path_array_to_s + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [Pathname.new('/tmp/iso3166.tab')] + assert_equal(['/tmp/iso3166.tab'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_set_alternate_iso3166_tab_search_path_string + ZoneinfoDataSource.alternate_iso3166_tab_search_path = ['/tmp/iso3166.tab', '/tmp/iso3166'].join(File::PATH_SEPARATOR) + assert_equal(['/tmp/iso3166.tab', '/tmp/iso3166'], ZoneinfoDataSource.alternate_iso3166_tab_search_path) + end + + def test_new_search + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + file = File.join(dir1, 'file') + FileUtils.touch(File.join(dir2, 'zone.tab')) + FileUtils.touch(File.join(dir3, 'iso3166.tab')) + FileUtils.touch(File.join(dir4, 'zone.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir4, data_source.zoneinfo_dir) + end + end + end + end + end + + def test_new_search_zone1970 + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + file = File.join(dir1, 'file') + FileUtils.touch(File.join(dir2, 'zone1970.tab')) + FileUtils.touch(File.join(dir3, 'iso3166.tab')) + FileUtils.touch(File.join(dir4, 'zone1970.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir4, data_source.zoneinfo_dir) + end + end + end + end + end + + def test_new_search_solaris_tab_files + # Solaris names the tab files 'tab/country.tab' (iso3166.tab) and + # 'tab/zone_sun.tab' (zone.tab). + + Dir.mktmpdir('tzinfo_test_dir') do |dir| + tab = File.join(dir, 'tab') + FileUtils.mkdir(tab) + FileUtils.touch(File.join(tab, 'country.tab')) + FileUtils.touch(File.join(tab, 'zone_sun.tab')) + + ZoneinfoDataSource.search_path = [dir] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + data_source = ZoneinfoDataSource.new + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_search_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + tab_file = File.join(tab_dir, 'iso3166') + + ZoneinfoDataSource.search_path = [zoneinfo_dir] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [tab_file] + + assert_raises(ZoneinfoDirectoryNotFound) do + ZoneinfoDataSource.new + end + + FileUtils.touch(tab_file) + + data_source = ZoneinfoDataSource.new + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + end + + def test_new_search_not_found + Dir.mktmpdir('tzinfo_test_dir1') do |dir1| + Dir.mktmpdir('tzinfo_test_dir2') do |dir2| + Dir.mktmpdir('tzinfo_test_dir3') do |dir3| + Dir.mktmpdir('tzinfo_test_dir4') do |dir4| + Dir.mktmpdir('tzinfo_test_dir5') do |dir5| + file = File.join(dir1, 'file') + FileUtils.touch(file) + FileUtils.touch(File.join(dir2, 'zone.tab')) + FileUtils.touch(File.join(dir3, 'zone1970.tab')) + FileUtils.touch(File.join(dir4, 'iso3166.tab')) + + ZoneinfoDataSource.search_path = [file, dir2, dir3, dir4, dir5] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + + assert_raises(ZoneinfoDirectoryNotFound) do + ZoneinfoDataSource.new + end + end + end + end + end + end + end + + def test_new_search_relative + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.chdir(dir) + + ZoneinfoDataSource.search_path = ['.'] + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [] + data_source = ZoneinfoDataSource.new + assert_equal(Pathname.new(dir).realpath.to_s, data_source.zoneinfo_dir) + + # Change out of the directory to allow it to be deleted on Windows. + FileUtils.chdir(@orig_pwd) + end + end + + def test_new_dir + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_zone1970 + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone1970.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_solaris_tab_files + # Solaris names the tab files 'tab/country.tab' (iso3166.tab) and + # 'tab/zone_sun.tab' (zone.tab). + + Dir.mktmpdir('tzinfo_test') do |dir| + tab = File.join(dir, 'tab') + FileUtils.mkdir(tab) + FileUtils.touch(File.join(tab, 'country.tab')) + FileUtils.touch(File.join(tab, 'zone_sun.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + end + end + + def test_new_dir_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + tab_file = File.join(tab_dir, 'iso3166') + FileUtils.touch(tab_file) + + ZoneinfoDataSource.alternate_iso3166_tab_search_path = [tab_file] + + assert_raises(InvalidZoneinfoDirectory) do + # The alternate_iso3166_tab_search_path should not be used. This should raise + # an exception. + ZoneinfoDataSource.new(zoneinfo_dir) + end + + data_source = ZoneinfoDataSource.new(zoneinfo_dir, tab_file) + assert_equal(zoneinfo_dir, data_source.zoneinfo_dir) + end + end + end + + def test_new_dir_invalid + Dir.mktmpdir('tzinfo_test') do |dir| + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(dir) + end + end + end + + def test_new_dir_invalid_alternate_iso3166_path + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(zoneinfo_dir, File.join(tab_dir, 'iso3166')) + end + end + end + end + + def test_new_dir_invalid_alternate_iso3166_path_overrides_valid + Dir.mktmpdir('tzinfo_test_dir_zoneinfo') do |zoneinfo_dir| + Dir.mktmpdir('tzinfo_test_dir_tab') do |tab_dir| + FileUtils.touch(File.join(zoneinfo_dir, 'iso3166.tab')) + FileUtils.touch(File.join(zoneinfo_dir, 'zone.tab')) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(zoneinfo_dir, File.join(tab_dir, 'iso3166')) + end + end + end + end + + def test_new_file + Dir.mktmpdir('tzinfo_test') do |dir| + file = File.join(dir, 'file') + FileUtils.touch(file) + + assert_raises(InvalidZoneinfoDirectory) do + ZoneinfoDataSource.new(file) + end + end + end + + def test_new_dir_relative + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.chdir(dir) + + data_source = ZoneinfoDataSource.new('.') + assert_equal(Pathname.new(dir).realpath.to_s, data_source.zoneinfo_dir) + + # Change out of the directory to allow it to be deleted on Windows. + FileUtils.chdir(@orig_pwd) + end + end + + def test_zoneinfo_dir + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + data_source = ZoneinfoDataSource.new(dir) + assert_equal(dir, data_source.zoneinfo_dir) + assert_equal(true, data_source.zoneinfo_dir.frozen?) + end + end + + def test_load_timezone_info_data + info = @data_source.load_timezone_info('Europe/London') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def test_load_timezone_info_linked + info = @data_source.load_timezone_info('UTC') + + # On platforms that don't support symlinks, 'UTC' will be created as a copy. + # Either way, a ZoneinfoTimezoneInfo should be returned. + + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('UTC', info.identifier) + end + + def test_load_timezone_info_does_not_exist + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('Nowhere/Special') + end + end + + def test_load_timezone_info_invalid + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('../Definitions/Europe/London') + end + end + + %w(leapseconds localtime).each do |file_name| + define_method("test_load_timezone_info_ignored_#{file_name}_file") do + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(file_name) + end + end + end + + def test_load_timezone_info_ignored_plus_version_file + # Mac OS X includes a file named +VERSION containing the tzdata version. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + File.open(File.join(dir, '+VERSION'), 'w') do |f| + f.binmode + f.write("2013a\n") + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('+VERSION') + end + end + end + + def test_load_timezone_info_ignored_timeconfig_symlink + # Slackware includes a symlink named timeconfig that points at /usr/sbin/timeconfig. + + Dir.mktmpdir('tzinfo_test_target') do |target_dir| + target_path = File.join(target_dir, 'timeconfig') + + File.open(target_path, 'w') do |f| + f.write("#!/bin/sh\n") + f.write("#\n") + f.write('# timeconfig Slackware Linux timezone configuration utility.\n') + end + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + symlink_path = File.join(dir, 'timeconfig') + begin + FileUtils.ln_s(target_path, symlink_path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + FileUtils.cp(target_path, symlink_path) + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('timeconfig') + end + end + end + end + + def test_load_timezone_info_nil + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info(nil) + end + end + + def test_load_timezone_info_case + assert_raises(InvalidTimezoneIdentifier) do + @data_source.load_timezone_info('europe/london') + end + end + + def test_load_timezone_info_permission_denied + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + file = File.join(dir, 'UTC') + FileUtils.touch(file) + FileUtils.chmod(0200, file) + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('UTC') + end + end + end + + def test_load_timezone_info_directory + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + subdir = File.join(dir, 'Subdir') + FileUtils.mkdir(subdir) + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Subdir') + end + end + end + + def test_load_timezone_info_linked_absolute_outside + Dir.mktmpdir('tzinfo_test') do |dir| + Dir.mktmpdir('tzinfo_test') do |outside| + outside_file = File.join(outside, 'EST') + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), outside_file) + + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + file = File.join(dir, 'EST') + + begin + FileUtils.ln_s(outside_file, file) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('EST') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('EST', info.identifier) + end + end + end + + def test_load_timezone_info_linked_absolute_inside + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + link = File.join(dir, 'Link') + + begin + FileUtils.ln_s(File.join(File.expand_path(dir), 'EST'), link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Link', info.identifier) + end + end + + def test_load_timezone_info_linked_relative_outside + Dir.mktmpdir('tzinfo_test') do |root| + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(root, 'outside')) + + dir = File.join(root, 'zoneinfo') + FileUtils.mkdir(dir) + + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + link = File.join(dir, 'Link') + + begin + FileUtils.ln_s('../outside', link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + subdir = File.join(dir, 'Subdir') + subdir_link = File.join(subdir, 'Link') + FileUtils.mkdir(subdir) + FileUtils.ln_s('../../outside', subdir_link) + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Link', info.identifier) + + info = data_source.load_timezone_info('Subdir/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link', info.identifier) + end + end + + def test_load_timezone_info_linked_relative_parent_inside + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + subdir = File.join(dir, 'Subdir') + FileUtils.mkdir(subdir) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(subdir, 'EST')) + + subdir_link = File.join(subdir, 'Link') + begin + FileUtils.ln_s('../Subdir/EST', subdir_link) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Skip test. + return + end + + subdir_link2 = File.join(subdir, 'Link2') + FileUtils.ln_s('../EST', subdir_link2) + + subdir2 = File.join(dir, 'Subdir2') + FileUtils.mkdir(subdir2) + subdir2_link = File.join(subdir2, 'Link') + FileUtils.ln_s('../Subdir/EST', subdir2_link) + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_timezone_info('Subdir/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link', info.identifier) + + info = data_source.load_timezone_info('Subdir/Link2') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir/Link2', info.identifier) + + info = data_source.load_timezone_info('Subdir2/Link') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Subdir2/Link', info.identifier) + end + end + + def test_load_timezone_info_invalid_file + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + File.open(File.join(dir, 'Zone'), 'wb') do |file| + file.write('NotAValidTZifFile') + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Zone') + end + end + end + + def test_load_timezone_info_invalid_file_2 + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + + zone = File.join(dir, 'Zone') + + File.open(File.join(@data_source.zoneinfo_dir, 'EST')) do |src| + # Change header to TZif1 (which is not a valid header). + File.open(zone, 'wb') do |dest| + dest.write('TZif1') + src.pos = 5 + FileUtils.copy_stream(src, dest) + end + end + + data_source = ZoneinfoDataSource.new(dir) + + assert_raises(InvalidTimezoneIdentifier) do + data_source.load_timezone_info('Zone') + end + end + end + + def test_load_timezone_info_tainted + safe_test(:unavailable => :skip) do + identifier = 'Europe/Amsterdam'.dup.taint + assert(identifier.tainted?) + info = @data_source.load_timezone_info(identifier) + assert_equal('Europe/Amsterdam', info.identifier) + assert(identifier.tainted?) + end + end + + def test_load_timezone_info_tainted_and_frozen + safe_test do + info = @data_source.load_timezone_info('Europe/Amsterdam'.dup.taint.freeze) + assert_equal('Europe/Amsterdam', info.identifier) + end + end + + def test_load_timezone_info_tainted_zoneinfo_dir_safe_mode + safe_test(:unavailable => :skip) do + assert_raises(SecurityError) do + ZoneinfoDataSource.new(@data_source.zoneinfo_dir.dup.taint) + end + end + end + + def test_load_timezone_info_tainted_zoneinfo_dir + data_source = ZoneinfoDataSource.new(@data_source.zoneinfo_dir.dup.taint) + info = data_source.load_timezone_info('Europe/London') + assert_kind_of(ZoneinfoTimezoneInfo, info) + assert_equal('Europe/London', info.identifier) + end + + def get_timezone_filenames(directory) + entries = Dir.glob(File.join(directory, '**', '*')) + + entries = entries.select do |file| + file.untaint + File.file?(file) + end + + entries = entries.collect {|file| file[directory.length + File::SEPARATOR.length, file.length - directory.length - File::SEPARATOR.length]} + + # Exclude right (with leapseconds) and posix (copy) directories; .tab files; leapseconds, localtime and posixrules files. + entries = entries.select do |file| + file !~ /\A(posix|right)\// && + file !~ /\.tab\z/ && + !%w(leapseconds localtime posixrules).include?(file) + end + + entries.sort + end + + def test_timezone_identifiers + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all = @data_source.timezone_identifiers + assert_kind_of(Array, all) + assert_array_same_items(expected, all) + assert_equal(true, all.frozen?) + end + + def test_data_timezone_identifiers + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all_data = @data_source.data_timezone_identifiers + assert_kind_of(Array, all_data) + assert_array_same_items(expected, all_data) + assert_equal(true, all_data.frozen?) + end + + def test_linked_timezone_identifiers + all_linked = @data_source.linked_timezone_identifiers + assert_kind_of(Array, all_linked) + assert_equal(true, all_linked.empty?) + assert_equal(true, all_linked.frozen?) + end + + def test_timezone_identifiers_safe_mode + safe_test do + expected = get_timezone_filenames(@data_source.zoneinfo_dir) + all = @data_source.timezone_identifiers + assert_kind_of(Array, all) + assert_array_same_items(expected, all) + assert_equal(true, all.frozen?) + end + end + + def test_timezone_identifiers_ignored_plus_version_file + # Mac OS X includes a file named +VERSION containing the tzdata version. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + File.open(File.join(dir, '+VERSION'), 'w') do |f| + f.binmode + f.write("2013a\n") + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + + def test_timezone_identifiers_ignored_timeconfig_symlink + # Slackware includes a symlink named timeconfig that points at /usr/sbin/timeconfig. + + Dir.mktmpdir('tzinfo_test_target') do |target_dir| + target_path = File.join(target_dir, 'timeconfig') + + File.open(target_path, 'w') do |f| + f.write("#!/bin/sh\n") + f.write("#\n") + f.write('# timeconfig Slackware Linux timezone configuration utility.\n') + end + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + symlink_path = File.join(dir, 'timeconfig') + begin + FileUtils.ln_s(target_path, symlink_path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + FileUtils.cp(target_path, symlink_path) + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + end + + def test_timezone_identifiers_ignored_src_directory + # Solaris includes a src directory containing the source timezone data files + # from the tzdata distribution. These should be ignored. + + Dir.mktmpdir('tzinfo_test') do |dir| + FileUtils.touch(File.join(dir, 'zone.tab')) + FileUtils.touch(File.join(dir, 'iso3166.tab')) + FileUtils.cp(File.join(@data_source.zoneinfo_dir, 'EST'), File.join(dir, 'EST')) + + src_dir = File.join(dir, 'src') + FileUtils.mkdir(src_dir) + + File.open(File.join(src_dir, 'europe'), 'w') do |f| + f.binmode + f.write("Zone\tEurope/London\t0:00\tEU\tGMT/BST\n") + end + + data_source = ZoneinfoDataSource.new(dir) + assert_array_same_items(['EST'], data_source.timezone_identifiers) + end + end + + def test_load_country_info + info = @data_source.load_country_info('GB') + assert_equal('GB', info.code) + assert_equal('Britain (UK)', info.name) + end + + def test_load_country_info_not_exist + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('ZZ') + end + end + + def test_load_country_info_invalid + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('../Countries/GB') + end + end + + def test_load_country_info_nil + assert_raises(InvalidCountryCode) do + @data_source.load_country_info(nil) + end + end + + def test_load_country_info_case + assert_raises(InvalidCountryCode) do + @data_source.load_country_info('gb') + end + end + + def test_load_country_info_tainted + safe_test(:unavailable => :skip) do + code = 'NL'.dup.taint + assert(code.tainted?) + info = @data_source.load_country_info(code) + assert_equal('NL', info.code) + assert(code.tainted?) + end + end + + def test_load_country_info_tainted_and_frozen + safe_test do + info = @data_source.load_country_info('NL'.dup.taint.freeze) + assert_equal('NL', info.code) + end + end + + def test_load_country_info_check_zones + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts('# iso3166.tab') + iso3166.puts('') + iso3166.puts("FC\tFake Country") + iso3166.puts("OC\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone.tab') + zone.puts('') + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_zone1970 + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts('# iso3166.tab') + iso3166.puts('') + iso3166.puts("AC\tAnother Country") + iso3166.puts("FC\tFake Country") + iso3166.puts("OC\tOther Country") + end + + # zone.tab will be ignored. + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone.tab') + zone.puts('') + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + # zone1970.tab will be used. + RubyCoreSupport.open_file(File.join(dir, 'zone1970.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts('# zone1970.tab') + zone.puts('') + zone.puts("AC,OC\t+0000+00000\tMiddle/Another/One\tAnother's One") + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC,OC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC,OC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + zone.puts("OC\t+5015+11426\tOther/Two") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('AC') + assert_equal('AC', info.code) + assert_equal('Another Country', info.name) + assert_equal(['Middle/Another/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Middle/Another/One', Rational(0, 1), Rational(0, 1), "Another's One")], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + # Testing the ordering of zones. A zone can either be primary (country + # code is the first in the first column), or secondary (country code is + # not the first). Should return all the primaries first in the order they + # appeared in the file, followed by all the secondaries in the order they + # appeared in file. + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One', 'Other/Two', 'Middle/Another/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Other/One', Rational(601, 12), Rational( 433, 30)), + CountryTimezone.new('Other/Two', Rational(201, 4), Rational(3433, 30)), + CountryTimezone.new('Middle/Another/One', Rational(0, 1), Rational(0, 1), "Another's One"), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_solaris_tab_files + # Solaris uses 5 columns instead of the usual 4 in zone_sun.tab. + # An extra column before the comment gives an optional linked/alternate + # timezone identifier (or '-' if not set). + # + # Additionally, there is a section at the end of the file for timezones + # covering regions. These are given lower-case "country" codes. The timezone + # identifier column refers to a continent instead of an identifier. These + # lines will be ignored by TZInfo. + + Dir.mktmpdir('tzinfo_test') do |dir| + tab_dir = File.join(dir, 'tab') + FileUtils.mkdir(tab_dir) + + RubyCoreSupport.open_file(File.join(tab_dir, 'country.tab'), 'w', :external_encoding => 'UTF-8') do |country| + country.puts('# country.tab') + country.puts('# Solaris') + country.puts("FC\tFake Country") + country.puts("OC\tOther Country") + end + + RubyCoreSupport.open_file(File.join(tab_dir, 'zone_sun.tab'), 'w', :external_encoding => 'UTF-8') do |zone_sun| + zone_sun.puts('# zone_sun.tab') + zone_sun.puts('# Solaris') + zone_sun.puts('# Countries') + zone_sun.puts("FC\t+513030-0000731\tFake/One\t-\tDescription of one") + zone_sun.puts("FC\t+353916+1394441\tFake/Two\tFake/Alias/Two\tAnother description") + zone_sun.puts("FC\t-2332-04637\tFake/Three\tFake/Alias/Three\tThis is Three") + zone_sun.puts("OC\t+5005+01426\tOther/One\tOther/Linked/One") + zone_sun.puts("OC\t+5015+01436\tOther/Two\t-") + zone_sun.puts('# Regions') + zone_sun.puts("ee\t+0000+00000\tEurope/\tEET") + zone_sun.puts("me\t+0000+00000\tEurope/\tMET") + zone_sun.puts("we\t+0000+00000\tEurope/\tWET") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One', 'Other/Two'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30)), + CountryTimezone.new('Other/Two', Rational(201, 4), Rational(73, 5))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_check_zones_alternate_iso3166_file + Dir.mktmpdir('tzinfo_test') do |dir| + zoneinfo_dir = File.join(dir, 'zoneinfo') + tab_dir = File.join(dir, 'tab') + FileUtils.mkdir(zoneinfo_dir) + FileUtils.mkdir(tab_dir) + + tab_file = File.join(tab_dir, 'iso3166') + RubyCoreSupport.open_file(tab_file, 'w', :external_encoding => 'UTF-8') do |iso3166| + # Use the BSD 4 column format (alternate iso3166 is used on BSD). + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(zoneinfo_dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("FC\t+353916+1394441\tFake/Two\tAnother description") + zone.puts("FC\t-2332-04637\tFake/Three\tThis is Three") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(zoneinfo_dir, tab_file) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + assert_equal(['Fake/One', 'Fake/Two', 'Fake/Three'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([ + CountryTimezone.new('Fake/One', Rational(6181, 120), Rational(-451, 3600), 'Description of one'), + CountryTimezone.new('Fake/Two', Rational(32089, 900), Rational(503081, 3600), 'Another description'), + CountryTimezone.new('Fake/Three', Rational(-353, 15), Rational(-2797, 60), 'This is Three')], info.zones) + assert_equal(true, info.zones.frozen?) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + assert_equal(['Other/One'], info.zone_identifiers) + assert_equal(true, info.zone_identifiers.frozen?) + assert_equal([CountryTimezone.new('Other/One', Rational(601, 12), Rational(433, 30))], info.zones) + assert_equal(true, info.zones.frozen?) + end + end + + def test_load_country_info_four_column_iso31611 + # OpenBSD and FreeBSD use a 4 column iso3166.tab file that includes + # alpha-3 and numeric-3 codes in addition to the alpha-2 and name in the + # tz database version. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('FC') + assert_equal('FC', info.code) + assert_equal('Fake Country', info.name) + + info = data_source.load_country_info('OC') + assert_equal('OC', info.code) + assert_equal('Other Country', info.name) + end + end + + def test_load_country_info_utf8 + # iso3166.tab is currently in ASCII (as of tzdata 2014f), but will be + # changed to UTF-8 in the future. + + # zone.tab is in ASCII, with no plans to change. Since ASCII is a subset of + # UTF-8, test that this is loaded in UTF-8 anyway. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("UT\tUnicode Test ✓") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("UT\t+513030-0000731\tUnicode✓/One\tUnicode Description ✓") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('UT') + assert_equal('UT', info.code) + assert_equal('Unicode Test ✓', info.name) + assert_equal(['Unicode✓/One'], info.zone_identifiers) + assert_equal([CountryTimezone.new('Unicode✓/One', Rational(6181, 120), Rational(-451, 3600), 'Unicode Description ✓')], info.zones) + end + end + + def test_load_country_info_utf8_zone1970 + # iso3166.tab is currently in ASCII (as of tzdata 2014f), but will be + # changed to UTF-8 in the future. + + # zone1970.tab is in UTF-8. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("UT\tUnicode Test ✓") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone1970.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("UT\t+513030-0000731\tUnicode✓/One\tUnicode Description ✓") + end + + data_source = ZoneinfoDataSource.new(dir) + + info = data_source.load_country_info('UT') + assert_equal('UT', info.code) + assert_equal('Unicode Test ✓', info.name) + assert_equal(['Unicode✓/One'], info.zone_identifiers) + assert_equal([CountryTimezone.new('Unicode✓/One', Rational(6181, 120), Rational(-451, 3600), 'Unicode Description ✓')], info.zones) + end + end + + def test_country_codes + file_codes = [] + + RubyCoreSupport.open_file(File.join(@data_source.zoneinfo_dir, 'iso3166.tab'), 'r', :external_encoding => 'UTF-8', :internal_encoding => 'UTF-8') do |file| + file.each_line do |line| + line.chomp! + file_codes << $1 if line =~ /\A([A-Z]{2})\t/ + end + end + + codes = @data_source.country_codes + assert_array_same_items(file_codes, codes) + assert_equal(true, codes.frozen?) + end + + def test_country_codes_four_column_iso3166 + # OpenBSD and FreeBSD use a 4 column iso3166.tab file that includes + # alpha-3 and numeric-3 codes in addition to the alpha-2 and name in the + # tz database version. + + Dir.mktmpdir('tzinfo_test') do |dir| + RubyCoreSupport.open_file(File.join(dir, 'iso3166.tab'), 'w', :external_encoding => 'UTF-8') do |iso3166| + iso3166.puts("FC\tFCC\t001\tFake Country") + iso3166.puts("OC\tOCC\t002\tOther Country") + end + + RubyCoreSupport.open_file(File.join(dir, 'zone.tab'), 'w', :external_encoding => 'UTF-8') do |zone| + zone.puts("FC\t+513030-0000731\tFake/One\tDescription of one") + zone.puts("OC\t+5005+01426\tOther/One") + end + + data_source = ZoneinfoDataSource.new(dir) + + codes = data_source.country_codes + assert_array_same_items(%w(FC OC), codes) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb new file mode 100644 index 0000000..c8c47a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tc_zoneinfo_timezone_info.rb @@ -0,0 +1,2075 @@ +# encoding: UTF-8 + +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils') +require 'tempfile' + +include TZInfo + +# Use send as a workaround for erroneous 'wrong number of arguments' errors with +# JRuby 9.0.5.0 when calling methods with Java implementations. See #114. +send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt) + +class TCZoneinfoTimezoneInfo < Minitest::Test + class FakePosixTimeZoneParser + def initialize(&block) + @on_parse = block + end + + def parse(tz_string) + @on_parse.call(tz_string) + end + end + + begin + Time.at(-2147483649) + Time.at(2147483648) + SUPPORTS_64BIT = true + rescue RangeError + SUPPORTS_64BIT = false + end + + begin + Time.at(-1) + Time.at(-2147483648) + SUPPORTS_NEGATIVE = true + rescue ArgumentError + SUPPORTS_NEGATIVE = false + end + + def setup + @expect_tz_string = nil + @tz_parse_result = nil + @posix_tz_parser = FakePosixTimeZoneParser.new do |tz_string| + raise "Unexpected tz_string passed to PosixTimeZoneParser: #{tz_string}" unless tz_string == @expect_tz_string + raise InvalidPosixTimeZone, 'FakePosixTimeZoneParser Failure.' if @tz_parse_result == :fail + @tz_parse_result + end + end + + def assert_period(abbreviation, utc_offset, std_offset, dst, start_at, end_at, info) + if start_at + period = info.period_for_utc(start_at) + elsif end_at + period = info.period_for_utc(TimeOrDateTime.wrap(end_at).add_with_convert(-1).to_orig) + else + # no transitions, pick the epoch + period = info.period_for_utc(Time.utc(1970, 1, 1)) + end + + assert_equal(abbreviation, period.abbreviation, 'abbreviation') + assert_equal(utc_offset, period.utc_offset, 'utc_offset') + assert_equal(std_offset, period.std_offset, 'std_offset') + assert_equal(dst, period.dst?, 'dst') + + if start_at + refute_nil(period.utc_start_time, 'utc_start_time') + assert_equal(start_at, period.utc_start_time, 'utc_start_time') + else + assert_nil(period.utc_start_time, 'utc_start_time') + end + + if end_at + refute_nil(period.utc_end_time, 'utc_end_time') + assert_equal(end_at, period.utc_end_time, 'utc_end_time') + else + assert_nil(period.utc_end_time, 'utc_end_time') + end + end + + def convert_times_to_i(items, key = :at) + items.each do |item| + if item[key].kind_of?(Time) + item[key] = item[key].utc.to_i + end + end + end + + def select_with_32bit_values(items, key = :at) + items.select do |item| + i = item[key] + i >= -2147483648 && i <= 2147483647 + end + end + + def pack_int64_network_order(values) + values.collect {|value| [value >> 32, value & 0xFFFFFFFF]}.flatten.pack('NN' * values.length) + end + + def pack_int64_signed_network_order(values) + # Convert to the equivalent 64-bit unsigned integer with the same bit representation + pack_int64_network_order(values.collect {|value| value < 0 ? value + 0x10000000000000000 : value}) + end + + def write_tzif(format, offsets, transitions, tz_string, leaps, options = {}) + + # Options for testing malformed zoneinfo files. + magic = options[:magic] + section2_magic = options[:section2_magic] + abbrev_separator = options[:abbrev_separator] || "\0" + abbrev_offset_base = options[:abbrev_offset_base] || 0 + omit_tz_string_start_new_line = options[:omit_tz_string_start_new_line] + omit_tz_string_end_new_line = options[:omit_tz_string_end_new_line] + + unless magic + if format == 1 + magic = "TZif\0" + elsif format >= 2 + magic = "TZif#{format}" + else + raise ArgumentError, 'Invalid format specified' + end + end + + if section2_magic.kind_of?(Proc) + section2_magic = section2_magic.call(format) + else + section2_magic = magic unless section2_magic + end + + convert_times_to_i(transitions) + convert_times_to_i(leaps) + + abbrevs = offsets.collect {|o| o[:abbrev]}.uniq + + if abbrevs.length > 0 + abbrevs = abbrevs.collect {|a| a.encode('UTF-8')} if abbrevs.first.respond_to?(:encode) + + if abbrevs.first.respond_to?(:bytesize) + abbrevs_length = abbrevs.inject(0) {|sum, a| sum + a.bytesize + abbrev_separator.bytesize} + else + abbrevs_length = abbrevs.inject(0) {|sum, a| sum + a.length + abbrev_separator.length} + end + else + abbrevs_length = 0 + end + + b32_transitions = select_with_32bit_values(transitions) + b32_leaps = select_with_32bit_values(leaps) + + Tempfile.open('tzinfo-test-zone') do |file| + file.binmode + + file.write( + [magic, offsets.length, offsets.length, leaps.length, + b32_transitions.length, offsets.length, abbrevs_length].pack('a5 x15 NNNNNN')) + + unless b32_transitions.empty? + file.write(b32_transitions.collect {|t| t[:at]}.pack('N' * b32_transitions.length)) + file.write(b32_transitions.collect {|t| t[:offset_index]}.pack('C' * b32_transitions.length)) + end + + offsets.each do |offset| + index = abbrevs.index(offset[:abbrev]) + abbrev_offset = abbrev_offset_base + 0.upto(index - 1) {|i| abbrev_offset += abbrevs[i].length + 1} + + file.write([offset[:gmtoff], offset[:isdst] ? 1 : 0, abbrev_offset].pack('NCC')) + end + + abbrevs.each do |a| + file.write(a) + file.write(abbrev_separator) + end + + b32_leaps.each do |leap| + file.write([leap[:at], leap[:seconds]].pack('NN')) + end + + unless offsets.empty? + file.write("\0" * offsets.length * 2) + end + + if format >= 2 + file.write( + [section2_magic, offsets.length, offsets.length, leaps.length, + transitions.length, offsets.length, abbrevs_length].pack('a5 x15 NNNNNN')) + + unless transitions.empty? + file.write(pack_int64_signed_network_order(transitions.collect {|t| t[:at]})) + file.write(transitions.collect {|t| t[:offset_index]}.pack('C' * transitions.length)) + end + + offsets.each do |offset| + index = abbrevs.index(offset[:abbrev]) + abbrev_offset = abbrev_offset_base + 0.upto(index - 1) {|i| abbrev_offset += abbrevs[i].length + 1} + + file.write([offset[:gmtoff], offset[:isdst] ? 1 : 0, abbrev_offset].pack('NCC')) + end + + abbrevs.each do |a| + file.write(a) + file.write(abbrev_separator) + end + + leaps.each do |leap| + file.write(pack_int64_signed_network_order([leap[:at]])) + file.write([leap[:seconds]].pack('N')) + end + + unless offsets.empty? + file.write("\0" * offsets.length * 2) + end + + # Empty POSIX timezone string + file.write("\n") unless omit_tz_string_start_new_line + tz_string = tz_string.encode(Encoding::UTF_8) if tz_string.respond_to?(:encode) + file.write(tz_string) + file.write("\n") unless omit_tz_string_end_new_line + end + + file.flush + + yield file.path + end + end + + def tzif_test(offsets, transitions, options = {}, &block) + rules = options[:rules] + tz_string = options[:tz_string] || (rules ? "TEST_TZ_STRING_#{rand(1000000)}" : '') + leaps = options[:leaps] || [] + min_format = options[:min_format] || (tz_string.empty? ? 1 : 2) + + min_format.upto(3) do |format| + write_tzif(format, offsets, transitions, tz_string, leaps, options) do |path| + if format >= 2 + @tz_parse_result = rules + @expect_tz_string = tz_string + end + begin + yield path, format + ensure + @tz_parse_result = nil + @expect_tz_string = nil + end + end + end + end + + def test_load + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1971, 1, 2), :offset_index => 1}, + {:at => Time.utc(1980, 4, 22), :offset_index => 2}, + {:at => Time.utc(1980, 10, 21), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1971, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1971, 1, 2), Time.utc(1980, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1980, 4, 22), Time.utc(1980, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(1980, 10, 21), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_negative_utc_offset + offsets = [ + {:gmtoff => -12492, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -12000, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => -8400, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => -8400, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1971, 7, 9, 3, 0, 0), :offset_index => 1}, + {:at => Time.utc(1972, 10, 12, 3, 0, 0), :offset_index => 2}, + {:at => Time.utc(1973, 4, 29, 3, 0, 0), :offset_index => 1}, + {:at => Time.utc(1992, 4, 1, 4, 30, 0), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, -12492, 0, false, nil, Time.utc(1971, 7, 9, 3, 0, 0), info) + assert_period(:XST, -12000, 0, false, Time.utc(1971, 7, 9, 3, 0, 0), Time.utc(1972, 10, 12, 3, 0, 0), info) + assert_period(:XDT, -12000, 3600, true, Time.utc(1972, 10, 12, 3, 0, 0), Time.utc(1973, 4, 29, 3, 0, 0), info) + assert_period(:XST, -12000, 0, false, Time.utc(1973, 4, 29, 3, 0, 0), Time.utc(1992, 4, 1, 4, 30, 0), info) + assert_period(:XNST, -8400, 0, false, Time.utc(1992, 4, 1, 4, 30, 0), nil, info) + end + end + + def test_load_dst_first + offsets = [ + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => Time.utc(1979, 1, 2), :offset_index => 2}, + {:at => Time.utc(1980, 4, 22), :offset_index => 0}, + {:at => Time.utc(1980, 10, 21), :offset_index => 2}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Two', path, @posix_tz_parser) + assert_equal('Zone/Two', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1979, 1, 2), info) + end + end + + def test_load_no_transitions + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, []) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/three', path, @posix_tz_parser) + assert_equal('Zone/three', info.identifier) + + assert_period(:LT, -12094, 0, false, nil, nil, info) + end + end + + def test_load_no_offsets + offsets = [] + transitions = [{:at => Time.utc(2000, 12, 31), :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_invalid_offset_index + offsets = [{:gmtoff => -0, :isdst => false, :abbrev => 'LMT'}] + transitions = [{:at => Time.utc(2000, 12, 31), :offset_index => 2}] + + tzif_test(offsets, transitions) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_with_leap_seconds + offsets = [{:gmtoff => -0, :isdst => false, :abbrev => 'LMT'}] + leaps = [{:at => Time.utc(1972,6,30,23,59,60), :seconds => 1}] + + tzif_test(offsets, [], :leaps => leaps) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_invalid_magic + ['TZif4', 'tzif2', '12345'].each do |magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :magic => magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone2', path, @posix_tz_parser) + end + end + end + end + + def test_load_invalid_section2_magic + ['TZif4', 'tzif2', '12345'].each do |section2_magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :min_format => 2, :section2_magic => section2_magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone4', path, @posix_tz_parser) + end + end + end + end + + def test_load_mismatched_section2_magic + minus_one = Proc.new {|f| f == 2 ? "TZif\0" : "TZif#{f - 1}" } + plus_one = Proc.new {|f| "TZif#{f + 1}" } + + [minus_one, plus_one].each do |section2_magic| + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, [], :min_format => 2, :section2_magic => section2_magic) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone5', path, @posix_tz_parser) + end + end + end + end + + def test_load_invalid_format + Tempfile.open('tzinfo-test-zone') do |file| + file.write('Invalid') + file.flush + + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone3', file.path, @posix_tz_parser) + end + end + end + + def test_load_missing_abbrev_null_termination + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions, :abbrev_separator => '^') do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_out_of_range_abbrev_offsets + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions, :abbrev_offset_base => 8) do |path, format| + assert_raises(InvalidZoneinfoFile) do + ZoneinfoTimezoneInfo.new('Zone', path, @posix_tz_parser) + end + end + end + + def test_load_before_epoch + # Some platforms don't support negative timestamps for times before the + # epoch. Check that they are returned when supported and skipped when not. + + # Note the last transition before the epoch (and within the 32-bit range) is + # moved to the epoch on platforms that do not support negative timestamps. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -694224000, :offset_index => 1}, # Time.utc(1948, 1, 2) + {:at => -21945600, :offset_index => 2}, # Time.utc(1969, 4, 22) + {:at => Time.utc(1970, 10, 21), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Negative', path, @posix_tz_parser) + assert_equal('Zone/Negative', info.identifier) + + if SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1948, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1948, 1, 2), Time.utc(1969, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1969, 4, 22), Time.utc(1970, 10, 21), info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1970, 1, 1), Time.utc(1970, 10, 21), info) + end + + assert_period(:XST, 3600, 0, false, Time.utc(1970, 10, 21), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_on_epoch + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -694224000, :offset_index => 1}, # Time.utc(1948, 1, 2) + {:at => -21945600, :offset_index => 2}, # Time.utc(1969, 4, 22) + {:at => Time.utc(1970, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 12, 31), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/Negative', path, @posix_tz_parser) + assert_equal('Zone/Negative', info.identifier) + + if SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1948, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1948, 1, 2), Time.utc(1969, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(1969, 4, 22), Time.utc(1970, 1, 1), info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + end + + assert_period(:XST, 3600, 0, false, Time.utc(1970, 1, 1), Time.utc(2000, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2000, 12, 31), nil, info) + end + end + + def test_load_64bit + # The TZif version2 and later format contains both 32-bit and 64-bit + # sections. The 64-bit section is always used. + # + # Negative transitions before the supported range are moved to the start of + # the supported range. + # + # Transitions after 2**31 - 1 are discarded if 64-bit times aren't + # supported. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -3786739200, :offset_index => 1}, # Time.utc(1850, 1, 2) + {:at => Time.utc(2003, 4, 22), :offset_index => 2}, + {:at => Time.utc(2003, 10, 21), :offset_index => 1}, + {:at => 2240524800, :offset_index => 3}] # Time.utc(2040, 12, 31) + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFour', path, @posix_tz_parser) + assert_equal('Zone/SixtyFour', info.identifier) + + if SUPPORTS_64BIT && SUPPORTS_NEGATIVE && format >= 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1850, 1, 2), info) + assert_period(:XST, 3600, 0, false, Time.utc(1850, 1, 2), Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), Time.utc(2040, 12, 31), info) + assert_period(:XNST, 0, 0, false, Time.utc(2040, 12, 31), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2003, 4, 22), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2003, 4, 22), Time.utc(2003, 10, 21), info) + assert_period(:XST, 3600, 0, false, Time.utc(2003, 10, 21), nil, info) + end + end + end + + def test_load_64bit_range + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. The last transition before the earliest + # supported time will be moved to that time if there isn't already a + # transition at that time. Transitions after the latest supported time are + # ignored. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**63, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => 2**63 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + # When the full range is supported, the following periods will be defined: + #assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**63).utc, info) + #assert_period(:XST, 3600, 0, false, Time.at(-2**63).utc, Time.utc(2014, 5, 27), info) + #assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**63 - 1).utc, info) + #assert_period(:LMT, 3542, 0, false, Time.at(2**63 - 1).utc, nil, info) + + # Without full range support, the following periods will be defined: + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1700, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(1700, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_64bit_range_transition_at_earliest_supported + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. The last transition before the earliest + # supported time will be moved to that time if there isn't already a + # transition at that time. Transitions after the latest supported time are + # ignored. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**63, :offset_index => 1}, + {:at => -8520336000, :offset_index => 2}, # Time.utc(1700, 1, 1).to_i + {:at => Time.utc(2014, 5, 27), :offset_index => 3}, + {:at => 2**63 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + # When the full range is supported, the following periods will be defined: + #assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**63).utc, info) + #assert_period(:XST, 3600, 0, false, Time.at(-2**63).utc, Time.utc(2014, 5, 27), info) + #assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**63 - 1).utc, info) + #assert_period(:LMT, 3542, 0, false, Time.at(2**63 - 1).utc, nil, info) + + # Without full range support, the following periods will be defined: + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1700, 1, 1), info) + assert_period(:XST2, 3600, 0, false, Time.utc(1700, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + elsif SUPPORTS_NEGATIVE + assert_period(:LMT, 3542, 0, false, nil, Time.at(-2**31).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(-2**31).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XST2, 3600, 0, false, Time.utc(1970, 1, 1), Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_supported_64bit_range + # The full range of 64 bit timestamps is not currently supported because of + # the way transitions are indexed. + + min_timestamp = -8520336000 # Time.utc(1700, 1, 1).to_i + max_timestamp = 16725225600 # Time.utc(2500, 1, 1).to_i + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => min_timestamp, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => max_timestamp - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SupportedSixtyFourRange', path, @posix_tz_parser) + assert_equal('Zone/SupportedSixtyFourRange', info.identifier) + + if SUPPORTS_64BIT && format >= 2 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_timestamp).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_timestamp).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(max_timestamp - 1).utc, info) + assert_period(:LMT, 3542, 0, false, Time.at(max_timestamp - 1).utc, nil, info) + elsif format < 2 + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + else + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), nil, info) + end + end + end + + def test_load_32bit_range + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XNST'}] + + transitions = [ + {:at => -2**31, :offset_index => 1}, + {:at => Time.utc(2014, 5, 27), :offset_index => 2}, + {:at => 2**31 - 1, :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/ThirtyTwoRange', path, @posix_tz_parser) + assert_equal('Zone/ThirtyTwoRange', info.identifier) + + min_supported = SUPPORTS_NEGATIVE ? -2**31 : 0 + assert_period(:LMT, 3542, 0, false, nil, Time.at(min_supported).utc, info) + assert_period(:XST, 3600, 0, false, Time.at(min_supported).utc, Time.utc(2014, 5, 27), info) + assert_period(:XNST, 7200, 0, false, Time.utc(2014, 5, 27), Time.at(2**31 - 1), info) + assert_period(:LMT, 3542, 0, false, Time.at(2**31 - 1).utc, nil, info) + end + end + + def test_load_std_offset_changes + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_load_std_offset_changes_jump_to_double_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 4, 1), :offset_index => 1}, + {:at => Time.utc(2000, 5, 1), :offset_index => 2}, + {:at => Time.utc(2000, 6, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 6, 1), nil, info) + end + end + + def test_load_std_offset_changes_negative + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -10821, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -10800, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => -7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => -3600, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}, + {:at => Time.utc(2000, 5, 1), :offset_index => 3}, + {:at => Time.utc(2000, 6, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, -10821, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, -10800, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDDT, -10800, 7200, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XDDT, -10800, 7200, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST, -10800, 0, false, Time.utc(2000, 6, 1), nil, info) + end + end + + def test_load_starts_two_hour_std_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 3}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDDT, 3600, 7200, true, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_starts_only_dst_transition_with_lmt + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 1, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/OnlyDST', path, @posix_tz_parser) + assert_equal('Zone/OnlyDST', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDT, 3542, 3658, true, Time.utc(2000, 1, 1), nil, info) + end + end + + def test_load_starts_only_dst_transition_without_lmt + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [{:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 1, 1), :offset_index => 0}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/OnlyDST', path, @posix_tz_parser) + assert_equal('Zone/OnlyDST', info.identifier) + + assert_period(:XDT, 3600, 3600, true, nil, Time.utc(2000, 1, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 1, 1), nil, info) + end + end + + def test_load_switch_to_dst_and_change_utc_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + # Switch from non-DST to DST at the same time as moving the UTC offset + # back an hour (i.e. wall clock time doesn't change). + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'YST'}, + {:gmtoff => 3600, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/DoubleDaylight', path, @posix_tz_parser) + assert_equal('Zone/DoubleDaylight', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:YST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 3600, true, Time.utc(2000, 2, 1), nil, info) + end + end + + def test_load_apia_international_dateline_change + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + # Pacific/Apia moved across the International Date Line whilst observing + # daylight savings time. + + offsets = [ + {:gmtoff => 45184, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => -39600, :isdst => false, :abbrev => '-11'}, + {:gmtoff => -36000, :isdst => true, :abbrev => '-10'}, + {:gmtoff => 50400, :isdst => true, :abbrev => '+14'}, + {:gmtoff => 46800, :isdst => false, :abbrev => '+13'}] + + transitions = [ + {:at => Time.utc(2011, 4, 2, 14, 0, 0), :offset_index => 1}, + {:at => Time.utc(2011, 9, 24, 14, 0, 0), :offset_index => 2}, + {:at => Time.utc(2011, 12, 30, 10, 0, 0), :offset_index => 3}, + {:at => Time.utc(2012, 3, 31, 14, 0, 0), :offset_index => 4}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Test/Pacific/Apia', path, @posix_tz_parser) + assert_equal('Test/Pacific/Apia', info.identifier) + + assert_period( :LMT, 45184, 0, false, nil, Time.utc(2011, 4, 2, 14, 0, 0), info) + assert_period(:'-11', -39600, 0, false, Time.utc(2011, 4, 2, 14, 0, 0), Time.utc(2011, 9, 24, 14, 0, 0), info) + assert_period(:'-10', -39600, 3600, true, Time.utc(2011, 9, 24, 14, 0, 0), Time.utc(2011, 12, 30, 10, 0, 0), info) + assert_period(:'+14', 46800, 3600, true, Time.utc(2011, 12, 30, 10, 0, 0), Time.utc(2012, 3, 31, 14, 0, 0), info) + assert_period(:'+13', 46800, 0, false, Time.utc(2012, 3, 31, 14, 0, 0), nil, info) + end + end + + def test_load_offset_split_for_different_utc_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 3}, + {:at => Time.utc(2000, 6, 1), :offset_index => 2}, + {:at => Time.utc(2000, 7, 1), :offset_index => 1}, + {:at => Time.utc(2000, 8, 1), :offset_index => 3}, + {:at => Time.utc(2000, 9, 1), :offset_index => 1}, + {:at => Time.utc(2000, 10, 1), :offset_index => 2}, + {:at => Time.utc(2000, 11, 1), :offset_index => 3}, + {:at => Time.utc(2000, 12, 1), :offset_index => 2}] + + # XDT will be split and defined according to its surrounding standard time + # offsets. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/SplitUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/SplitUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 7200, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 5, 1), Time.utc(2000, 6, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 6, 1), Time.utc(2000, 7, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 7, 1), Time.utc(2000, 8, 1), info) + assert_period( :XDT, 3600, 7200, true, Time.utc(2000, 8, 1), Time.utc(2000, 9, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 9, 1), Time.utc(2000, 10, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 10, 1), Time.utc(2000, 11, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 11, 1), Time.utc(2000, 12, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 12, 1), nil, info) + + 1.upto(6) do |i| + assert_same(info.period_for_utc(Time.utc(2000, i, 1)).offset, info.period_for_utc(Time.utc(2000, i + 6, 1)).offset) + end + end + end + + def test_load_offset_utc_offset_taken_from_minimum_difference_minimum_after + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 2}] + + # XDT should use the closest utc_offset (7200) (and not an equivalent + # utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/MinimumUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/MinimumUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_utc_offset_taken_from_minimum_difference_minimum_before + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT should use the closest utc_offset (7200) (and not an equivalent + # utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/MinimumUtcOffset', path, @posix_tz_parser) + assert_equal('Zone/MinimumUtcOffset', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_does_not_use_equal_utc_total_offset_equal_after + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 2}] + + # XDT will be based on the utc_offset of XST1 even though XST2 has an + # equivalent (or greater) utc_total_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetEqual', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetEqual', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_does_not_use_equal_utc_total_offset_equal_before + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT will be based on the utc_offset of XST1 even though XST2 has an + # equivalent (or greater) utc_total_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetEqual', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetEqual', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period( :XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_both_adjacent_non_dst_equal_utc_total_offset + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}] + + # XDT will just assume an std_offset of +1 hour and calculate the utc_offset + # from utc_total_offset - std_offset. + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/AdjacentEqual', path, @posix_tz_parser) + assert_equal('Zone/AdjacentEqual', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_offset_utc_offset_preserved_from_next + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT1'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT2'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 4}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}] + + # Both XDT1 and XDT2 should both use the closest utc_offset (7200) (and not + # an equivalent utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetPreserved', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetPreserved', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT1, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDT2, 7200, 3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_load_offset_utc_offset_preserved_from_previous + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST1'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST2'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT1'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT2'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 2}, + {:at => Time.utc(2000, 2, 1), :offset_index => 3}, + {:at => Time.utc(2000, 3, 1), :offset_index => 4}, + {:at => Time.utc(2000, 4, 1), :offset_index => 1}] + + # Both XDT1 and XDT2 should both use the closest utc_offset (7200) (and not + # an equivalent utc_offset of 3600 and std_offset of 7200). + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/UtcOffsetPreserved', path, @posix_tz_parser) + assert_equal('Zone/UtcOffsetPreserved', info.identifier) + + assert_period( :LMT, 3542, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST2, 7200, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT1, 7200, 3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XDT2, 7200, 3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST1, 3600, 0, false, Time.utc(2000, 4, 1), nil, info) + end + end + + def test_read_offset_negative_std_offset_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 0, :isdst => true, :abbrev => 'XWT'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/NegativeStdOffsetDst', path, @posix_tz_parser) + assert_equal('Zone/NegativeStdOffsetDst', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 5, 1), nil, info) + end + end + + def test_read_offset_negative_std_offset_dst_initial_dst + # The zoneinfo files don't include the offset from standard time, so this + # has to be derived by looking at changes in the total UTC offset. + + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 0, :isdst => true, :abbrev => 'XWT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 1}, + {:at => Time.utc(2000, 4, 1), :offset_index => 2}, + {:at => Time.utc(2000, 5, 1), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/NegativeStdOffsetDstInitialDst', path, @posix_tz_parser) + assert_equal('Zone/NegativeStdOffsetDstInitialDst', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 3, 1), Time.utc(2000, 4, 1), info) + assert_period(:XST, 3600, 0, false, Time.utc(2000, 4, 1), Time.utc(2000, 5, 1), info) + assert_period(:XWT, 3600, -3600, true, Time.utc(2000, 5, 1), nil, info) + end + end + + def test_read_offset_prefer_base_offset_moves_to_dst_not_hour + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 1800, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 1800, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/BaseOffsetMovesToDstNotHour', path, @posix_tz_parser) + assert_equal('Zone/BaseOffsetMovesToDstNotHour', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 0, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 1800, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 1800, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_read_offset_prefer_base_offset_moves_from_dst_not_hour + offsets = [ + {:gmtoff => -100, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 1800, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 1800, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 0, :isdst => false, :abbrev => 'XST'}] + + transitions = [ + {:at => Time.utc(2000, 1, 1), :offset_index => 1}, + {:at => Time.utc(2000, 2, 1), :offset_index => 2}, + {:at => Time.utc(2000, 3, 1), :offset_index => 3}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/BaseOffsetMovesFromDstNotHour', path, @posix_tz_parser) + assert_equal('Zone/BaseOffsetMovesFromDstNotHour', info.identifier) + + assert_period(:LMT, -100, 0, false, nil, Time.utc(2000, 1, 1), info) + assert_period(:XST, 1800, 0, false, Time.utc(2000, 1, 1), Time.utc(2000, 2, 1), info) + assert_period(:XDT, 0, 1800, true, Time.utc(2000, 2, 1), Time.utc(2000, 3, 1), info) + assert_period(:XST, 0, 0, false, Time.utc(2000, 3, 1), nil, info) + end + end + + def test_load_in_safe_mode + offsets = [{:gmtoff => -12094, :isdst => false, :abbrev => 'LT'}] + + tzif_test(offsets, []) do |path, format| + # untaint only required for Ruby 1.9.2 + path.untaint + + safe_test do + info = ZoneinfoTimezoneInfo.new('Zone/three', path, @posix_tz_parser) + assert_equal('Zone/three', info.identifier) + + assert_period(:LT, -12094, 0, false, nil, nil, info) + end + end + end + + def test_load_encoding + # tzfile.5 doesn't specify an encoding, but the source data is in ASCII. + # ZoneinfoTimezoneInfo will load as UTF-8 (a superset of ASCII). + + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST©'}] + + transitions = [ + {:at => Time.utc(1971, 1, 2), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(1971, 1, 2), info) + assert_period(:"XST©", 3600, 0, false, Time.utc(1971, 1, 2), nil, info) + end + end + + def test_load_binmode + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}] + + # Transition time that includes CRLF (4EFF0D0A). + # Test that this doesn't get corrupted by translating CRLF to LF. + transitions = [ + {:at => Time.utc(2011, 12, 31, 13, 24, 26), :offset_index => 1}] + + tzif_test(offsets, transitions) do |path, format| + info = ZoneinfoTimezoneInfo.new('Zone/One', path, @posix_tz_parser) + assert_equal('Zone/One', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2011, 12, 31, 13, 24, 26), info) + assert_period(:XST, 3600, 0, false, Time.utc(2011, 12, 31, 13, 24, 26), nil, info) + end + end + + def test_load_invalid_tz_string + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + + tzif_test(offsets, [], :rules => :fail) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/String', path, @posix_tz_parser) } + assert_equal("Failed to parse POSIX-style TZ string in file '#{path}': FakePosixTimeZoneParser Failure.", error.message) + end + end + + def test_load_tz_string_missing_start_newline + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + rules = TimezoneOffset.new(0, 0, 'UTC') + + tzif_test(offsets, [], :rules => rules, :omit_tz_string_start_new_line => true) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Missing/Start', path, @posix_tz_parser) } + assert_equal("Expected newline starting POSIX-style TZ string in file '#{path}'.", error.message) + end + end + + def test_load_tz_string_missing_end_newline + offsets = [{:gmtoff => 0, :isdst => false, :abbrev => 'UTC'}] + rules = TimezoneOffset.new(0, 0, 'UTC') + + tzif_test(offsets, [], :rules => rules, :omit_tz_string_end_new_line => true) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Missing/End', path, @posix_tz_parser) } + assert_equal("Expected newline ending POSIX-style TZ string in file '#{path}'.", error.message) + end + end + + [ + [false, 1, 0, 'TEST'], + [false, 0, 1, 'TEST'], + [false, 0, 0, 'TEST2'], + [false, -1, 1, 'TEST'], + [true, 0, 0, 'TEST'] + ].each do |(isdst, base_utc_offset, std_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_#{isdst ? 'dst' : 'std'}_constant_offset_#{base_utc_offset}_#{std_offset}_#{abbreviation}" do + offsets = [{:gmtoff => 0, :isdst => isdst, :abbrev => 'TEST'}] + rules = TimezoneOffset.new(base_utc_offset, std_offset, abbreviation) + + tzif_test(offsets, [], :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("Constant offset POSIX-style TZ string does not match constant offset in file '#{path}'.", error.message) + end + end + end + + [ + [3601, 'XST'], + [3600, 'YST'] + ].each do |(base_utc_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_final_std_transition_offset_#{base_utc_offset}_#{abbreviation}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 2, 0, 0) - 3600, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 7200, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(base_utc_offset, 0, abbreviation), + TimezoneOffset.new(3600, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 7200), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + end + + [ + [3601, 0, 'XST'], + [3600, 1, 'XST'], + [3600, 0, 'YST'] + ].each do |(base_utc_offset, std_offset, abbreviation)| + define_method "test_load_tz_string_does_not_match_final_dst_transition_offset_#{base_utc_offset}_#{std_offset}_#{abbreviation}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 3600, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 2, 0, 0) - 3600, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(base_utc_offset, std_offset, abbreviation), + JulianDayOfYearTransitionRule.new(100, 7200), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Offset/Mismatch', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + end + + [ + [3600, 0], + [3600, 3600], + [10800, -3600] + ].each do |(base_utc_offset, std_offset)| + rules = TimezoneOffset.new(base_utc_offset, std_offset, 'TEST') + + define_method "test_load_tz_string_uses_constant_offset_with_no_transitions_#{base_utc_offset}_#{std_offset}" do + offsets = [{:gmtoff => base_utc_offset + std_offset, :isdst => std_offset != 0, :abbrev => 'TEST'}] + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Constant/Offset', path, @posix_tz_parser) + assert_equal('Constant/Offset', info.identifier) + assert_period(:TEST, base_utc_offset, std_offset, std_offset != 0, nil, nil, info) + end + end + + define_method "test_load_tz_string_uses_constant_offset_after_last_transition_#{base_utc_offset}_#{std_offset}" do + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => base_utc_offset + std_offset, :isdst => std_offset != 0, :abbrev => 'TEST'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 3542, :offset_index => 1} + ] + + t0 = Time.utc(1971, 1, 2, 2, 0, 0) - 3542 + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Constant/After', path, @posix_tz_parser) + assert_equal('Constant/After', info.identifier) + assert_period(:LMT, 3542, 0, false, nil, t0, info) + assert_period(:TEST, base_utc_offset, std_offset, std_offset != 0, t0, nil, info) + end + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined + offsets = [{:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XST, 7200, 0, false, nil, Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_omitting_first_if_matches_first_offset + offsets = [{:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XDT, 7200, 3600, true, nil, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, Time.utc(1971, 4, 10, 1, 0, 0) - 7200, info) + + 1971.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_with_previous_offset_of_first_matching_first_offset + offsets = [{:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_uses_rules_to_generate_all_transitions_when_none_defined_correcting_initial_offset + offsets = [{:gmtoff => 10800, :isdst => true, :abbrev => 'XDDT'}] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(3600, 7200, 'XDDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, [], :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('All/Rules', path, @posix_tz_parser) + assert_equal('All/Rules', info.identifier) + + assert_period(:XDDT, 3600, 7200, true, nil, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, info) # would be :XDT, 7200, 3600 otherwise + assert_period(:XST, 3600, 0, false, Time.utc(1970, 10, 27, 2, 0, 0) - 10800, Time.utc(1971, 4, 10, 1, 0, 0) - 3600, info) + + 1971.upto(generate_up_to - 1).each do |year| + assert_period(:XDDT, 3600, 7200, true, Time.utc(year, 4, 10, 1, 0, 0) - 3600, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 3600, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 3600, info) + end + + assert_period(:XDDT, 3600, 7200, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 3600, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 3600, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_std_to_dst_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_dst_to_std_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_dst_to_std_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 1, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1982, 4, 10, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 10, 27, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 3600), + JulianDayOfYearTransitionRule.new(100, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, Time.utc(1982, 4, 10, 2, 0, 0) - 10800, info) + + 1982.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 7200, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_starting_from_std_to_dst_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 1, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1982, 4, 10, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 10, 27, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1983, 4, 10, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 3600), + JulianDayOfYearTransitionRule.new(100, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 1, 0, 0) - 7142, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(1981, 10, 27, 1, 0, 0) - 7200, Time.utc(1982, 4, 10, 2, 0, 0) - 10800, info) + + 1982.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 7200, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_transitions_negative_dst + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}, + {:gmtoff => 10800, :isdst => false, :abbrev => 'XST'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(10800, 0, 'XST'), + TimezoneOffset.new(10800, -3600, 'XDT'), + JulianDayOfYearTransitionRule.new(300, 7200), + JulianDayOfYearTransitionRule.new(100, 3600) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, Time.utc(1981, 4, 10, 1, 0, 0) - 7200, info) + + 1981.upto(generate_up_to - 1).each do |year| + assert_period(:XST, 10800, 0, false, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XST, 10800, 0, false, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XDT, 10800, -3600, true, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_extends_single_transition_in_final_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + transitions = [ + {:at => Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Final/Year', path, @posix_tz_parser) + assert_equal('Final/Year', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to - 1, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to - 1, 10, 27, 2, 0, 0) - 10800, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_adds_nothing_if_transitions_up_to_final_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + transitions = [ + {:at => Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Final/Year', path, @posix_tz_parser) + assert_equal('Final/Year', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc( 1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + + def test_load_tz_string_corrects_offset_of_final_transition + offsets = [ + {:gmtoff => 3542, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => true, :abbrev => 'XDT'}] + + transitions = [{:at => Time.utc(2000, 4, 10, 1, 0, 0) - 3542, :offset_index => 1}] + + rules = AnnualRules.new( + TimezoneOffset.new(3600, 0, 'XST'), + TimezoneOffset.new(3600, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Correct/Final', path, @posix_tz_parser) + assert_equal('Correct/Final', info.identifier) + + assert_period(:LMT, 3542, 0, false, nil, Time.utc(2000, 4, 10, 1, 0, 0) - 3542, info) + assert_period(:XDT, 3600, 3600, true, Time.utc(2000, 4, 10, 1, 0, 0) - 3542, Time.utc(2000, 10, 27, 2, 0, 0) - 7200, info) # would be :XDT, 3542, 3658 without tz_string + assert_period(:XST, 3600, 0, false, Time.utc(2000, 10, 27, 2, 0, 0) - 7200, Time.utc(2001, 4, 10, 1, 0, 0) - 3600, info) + + 2001.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 3600, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 3600, Time.utc(year, 10, 27, 2, 0, 0) - 7200, info) + assert_period(:XST, 3600, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 7200, Time.utc(year + 1, 4, 10, 1, 0, 0) - 3600, info) + end + + assert_period(:XDT, 3600, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 3600, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 7200, info) + assert_period(:XST, 3600, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 7200, nil, info) + end + end + + def test_load_tz_string_specifies_transition_to_offset_of_final_transition_same_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 10800, :offset_index => 1}, + {:at => Time.utc(1982, 4, 10, 1, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(101, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/Offset', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + + def test_load_tz_string_specifies_transition_to_offset_of_final_transition_following_year + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1981, 10, 27, 2, 0, 0) - 7200, :offset_index => 2} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(299, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + error = assert_raises(InvalidZoneinfoFile) { ZoneinfoTimezoneInfo.new('Invalid/Offset', path, @posix_tz_parser) } + assert_equal("The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{path}'.", error.message) + end + end + + unless SUPPORTS_64BIT + def test_generate_up_to_limited_to_2037_with_no_64_bit_support + assert_equal(2037, ZoneinfoTimezoneInfo::GENERATE_UP_TO) + end + + def test_load_tz_string_does_not_generate_if_transition_after_32_bit_range_with_no_64_bit_support + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1971, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => 2154474000 - 7200, :offset_index => 2}, # Time.utc(2038, 4, 10, 1, 0, 0).to_i + {:at => 2171757600 - 10800, :offset_index => 1} # Time.utc(2038, 10, 27, 2, 0, 0).to_i + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('From/Rules', path, @posix_tz_parser) + assert_equal('From/Rules', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1971, 1, 2, 2, 0, 0) - 7142, nil, info) + end + end + end + + if SUPPORTS_NEGATIVE + def test_load_tz_string_generates_from_last_transition_if_before_1970_and_supports_negative + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => Time.utc(1961, 1, 2, 2, 0, 0) - 7142, :offset_index => 1}, + {:at => Time.utc(1962, 4, 10, 1, 0, 0) - 7200, :offset_index => 2}, + {:at => Time.utc(1962, 10, 27, 2, 0, 0) - 10800, :offset_index => 1} + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Pre/1970', path, @posix_tz_parser) + assert_equal('Pre/1970', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1961, 1, 2, 2, 0, 0) - 7142, info) + assert_period(:XST, 7200, 0, false, Time.utc(1961, 1, 2, 2, 0, 0) - 7142, Time.utc(1962, 4, 10, 1, 0, 0) - 7200, info) + + 1962.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + else + def test_load_tz_string_generates_from_1970_if_last_transition_before_1970_and_does_not_support_negative + offsets = [ + {:gmtoff => 7142, :isdst => false, :abbrev => 'LMT'}, + {:gmtoff => 7200, :isdst => false, :abbrev => 'XST'}, + {:gmtoff => 10800, :isdst => true, :abbrev => 'XDT'} + ] + + transitions = [ + {:at => -283903200 - 7142, :offset_index => 1}, # Time.utc(1961, 1, 2, 2, 0, 0) + {:at => -243903600 - 7200, :offset_index => 2}, # Time.utc(1962, 4, 10, 1, 0, 0) + {:at => -226620000 - 10800, :offset_index => 1} # Time.utc(1962, 10, 27, 2, 0, 0) + ] + + rules = AnnualRules.new( + TimezoneOffset.new(7200, 0, 'XST'), + TimezoneOffset.new(7200, 3600, 'XDT'), + JulianDayOfYearTransitionRule.new(100, 3600), + JulianDayOfYearTransitionRule.new(300, 7200) + ) + + generate_up_to = ZoneinfoTimezoneInfo::GENERATE_UP_TO + + tzif_test(offsets, transitions, :rules => rules) do |path, format| + info = ZoneinfoTimezoneInfo.new('Pre/1970', path, @posix_tz_parser) + assert_equal('Pre/1970', info.identifier) + + assert_period(:LMT, 7142, 0, false, nil, Time.utc(1970, 1, 1), info) + assert_period(:XST, 7200, 0, false, Time.utc(1970, 1, 1), Time.utc(1970, 4, 10, 1, 0, 0) - 7200, info) + + 1970.upto(generate_up_to - 1).each do |year| + assert_period(:XDT, 7200, 3600, true, Time.utc(year, 4, 10, 1, 0, 0) - 7200, Time.utc(year, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(year, 10, 27, 2, 0, 0) - 10800, Time.utc(year + 1, 4, 10, 1, 0, 0) - 7200, info) + end + + assert_period(:XDT, 7200, 3600, true, Time.utc(generate_up_to, 4, 10, 1, 0, 0) - 7200, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, info) + assert_period(:XST, 7200, 0, false, Time.utc(generate_up_to, 10, 27, 2, 0, 0) - 10800, nil, info) + end + end + end + + def test_load_tz_string_as_utf8 + offsets = [{:gmtoff => 3600, :isdst => false, :abbrev => 'áccént'}] + rules = TimezoneOffset.new(3600, 0, 'áccént') + + tzif_test(offsets, [], :tz_string => '<áccént>1', :rules => rules) do |path, format| + # FakePosixTimeZoneParser will test that the tz_string matches. + info = ZoneinfoTimezoneInfo.new('Test/Utf8', path, @posix_tz_parser) + assert_period(:'áccént', 3600, 0, false, nil, nil, info) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/test_utils.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/test_utils.rb new file mode 100644 index 0000000..c97facf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/test_utils.rb @@ -0,0 +1,192 @@ +tests_dir = File.expand_path(File.dirname(__FILE__)) +tests_dir.untaint if RUBY_VERSION < '2.7' +TESTS_DIR = tests_dir +TZINFO_LIB_DIR = File.expand_path(File.join(TESTS_DIR, '..', 'lib')) +TZINFO_TEST_DATA_DIR = File.join(TESTS_DIR, 'tzinfo-data') +TZINFO_TEST_ZONEINFO_DIR = File.join(TESTS_DIR, 'zoneinfo') + +$:.unshift(TZINFO_LIB_DIR) unless $:.include?(TZINFO_LIB_DIR) + +# tzinfo-data contains a cut down copy of tzinfo-data for use in the tests. +# Add it to the load path. +$:.unshift(TZINFO_TEST_DATA_DIR) unless $:.include?(TZINFO_TEST_DATA_DIR) + +require 'minitest/autorun' +require 'tzinfo' +require 'fileutils' +require 'rbconfig' + +module TestUtils + ZONEINFO_SYMLINKS = [ + ['localtime', 'America/New_York'], + ['UTC', 'Etc/UTC']] + + + def self.prepare_test_zoneinfo_dir + ZONEINFO_SYMLINKS.each do |file, target| + path = File.join(TZINFO_TEST_ZONEINFO_DIR, file) + + File.delete(path) if File.exist?(path) + + begin + FileUtils.ln_s(target, path) + rescue NotImplementedError, Errno::EACCES + # Symlinks not supported on this platform, or permission denied + # (administrative rights are required on Windows). Copy instead. + target_path = File.join(TZINFO_TEST_ZONEINFO_DIR, target) + FileUtils.cp(target_path, path) + end + end + end +end + +TestUtils.prepare_test_zoneinfo_dir + + + +module Kernel + # Suppresses any warnings raised in a specified block. + def without_warnings + old_verbose = $VERBOSE + begin + $VERBOSE = nil + yield + ensure + $-v = old_verbose + end + end + + def safe_test(options = {}) + # Ruby >= 2.7, JRuby and Rubinus don't support SAFE levels. + available = RUBY_VERSION < '2.7' && !(defined?(RUBY_ENGINE) && %w(jruby rbx).include?(RUBY_ENGINE)) + + if available || options[:unavailable] != :skip + thread = Thread.new do + orig_diff = Minitest::Assertions.diff + + if available + orig_safe = $SAFE + $SAFE = options[:level] || 1 + end + begin + # Disable the use of external diff tools during safe mode tests (since + # safe mode will prevent their use). The initial value is retrieved + # before activating safe mode because the first time + # Minitest::Assertions.diff is called, it will attempt to find a diff + # tool. Finding the diff tool will also fail in safe mode. + Minitest::Assertions.diff = nil + begin + yield + ensure + Minitest::Assertions.diff = orig_diff + end + ensure + if available + # On Ruby < 2.6, setting $SAFE affects only the current thread + # and the $SAFE level cannot be downgraded. Catch and ignore the + # SecurityError. + # On Ruby >= 2.6, setting $SAFE is global, and the $SAFE level + # can be downgraded. Restore $SAFE back to the original level. + begin + $SAFE = orig_safe + rescue SecurityError + end + end + end + end + + thread.join + end + end + + def skip_if_has_bug_14060 + # On Ruby 2.4.4 in safe mode, require will fail with a SecurityError for + # any file that has not previously been loaded, regardless of whether the + # file name is tainted. + # See https://bugs.ruby-lang.org/issues/14060#note-5. + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'ruby' && RUBY_VERSION == '2.4.4' + skip('Skipping test due to Ruby 2.4.4 being affected by Bug 14060 (see https://bugs.ruby-lang.org/issues/14060#note-5)') + end + end + + def assert_array_same_items(expected, actual, msg = nil) + full_message = message(msg, '') { diff(expected, actual) } + condition = (expected.size == actual.size) && (expected - actual == []) + assert(condition, full_message) + end + + def assert_sub_process_returns(expected_lines, code, extra_load_path = [], required = ['tzinfo']) + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'jruby' && JRUBY_VERSION.start_with?('9.0.') && RbConfig::CONFIG['host_os'] =~ /mswin/ + skip('JRuby 9.0 on Windows cannot handle writing to the IO instance returned by popen') + end + + ruby = File.join(RbConfig::CONFIG['bindir'], + RbConfig::CONFIG['ruby_install_name'] + RbConfig::CONFIG['EXEEXT']) + + load_path = [TZINFO_LIB_DIR] + extra_load_path + + # If RubyGems is loaded in the current process, then require it in the + # sub-process, as it may be needed in order to require dependencies. + if defined?(Gem) && Gem.instance_of?(Module) + required = ['rubygems'] + required + end + + if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' + # Stop Rubinus from operating as irb. + args = ' -' + else + args = '' + end + + IO.popen("\"#{ruby}\"#{args}", 'r+') do |process| + load_path.each do |p| + process.puts("$:.unshift('#{p.gsub("'", "\\\\'")}')") + end + + required.each do |r| + process.puts("require '#{r.gsub("'", "\\\\'")}'") + end + + process.puts(code) + process.flush + process.close_write + + actual_lines = process.readlines + actual_lines = actual_lines.collect {|l| l.chomp} + assert_equal(expected_lines, actual_lines) + end + end + + def assert_nothing_raised(msg = nil) + begin + yield + rescue => e + full_message = message(msg) { exception_details(e, 'Exception raised: ') } + assert(false, full_message) + end + end +end + + +# Object#taint is a deprecated no-op in Ruby 2.7 and outputs a warning. It will +# be removed in 3.2. Silence the warning or supply a replacement. +if TZInfo::RubyCoreSupport.const_defined?(:UntaintExt) + module TaintExt + refine Object do + def taint + self + end + end + end +end + + +# JRuby 1.7.5 to 1.7.9 consider DateTime instances that differ by less than +# 1 millisecond to be equivalent (https://github.com/jruby/jruby/issues/1311). +# +# A few test cases compare at a resolution of 1 microsecond, so this causes +# failures on JRuby 1.7.5 to 1.7.9. +# +# Determine what the platform supports and adjust the tests accordingly. +DATETIME_RESOLUTION = (0..5).collect {|i| 10**i}.find {|i| (DateTime.new(2013,1,1,0,0,0) <=> DateTime.new(2013,1,1,0,0,Rational(i,1000000))) < 0} +raise 'Unable to compare DateTimes at a resolution less than one second on this platform' unless DATETIME_RESOLUTION diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all.rb new file mode 100644 index 0000000..f90836d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all.rb @@ -0,0 +1,7 @@ +# Force a particular timezone to be local (helps find issues when local +# timezone isn't GMT). This won't work on Windows. +ENV['TZ'] = 'America/Los_Angeles' + +Dir[File.join(File.expand_path(File.dirname(__FILE__)), 'tc_*.rb')].each {|t| require t} + +puts "Using #{DataSource.get}" diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb new file mode 100644 index 0000000..5b822bf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_ruby.rb @@ -0,0 +1,5 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils.rb') + +TZInfo::DataSource.set(:ruby) + +require File.join(File.expand_path(File.dirname(__FILE__)), 'ts_all.rb') diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb new file mode 100644 index 0000000..ae1ba15 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/ts_all_zoneinfo.rb @@ -0,0 +1,9 @@ +require File.join(File.expand_path(File.dirname(__FILE__)), 'test_utils.rb') + +# Use a zoneinfo directory containing files needed by the tests. +# The symlinks in this directory are set up in test_utils.rb. +zoneinfo_path = File.join(File.expand_path(File.dirname(__FILE__)), 'zoneinfo') +zoneinfo_path.untaint if RUBY_VERSION < '2.7' +TZInfo::DataSource.set(:zoneinfo, zoneinfo_path) + +require File.join(File.expand_path(File.dirname(__FILE__)), 'ts_all.rb') diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb new file mode 100644 index 0000000..6c3721f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data.rb @@ -0,0 +1,8 @@ +# Top level module for TZInfo. +module TZInfo + # Top level module for TZInfo::Data. + module Data + end +end + +require 'tzinfo/data/version' diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb new file mode 100644 index 0000000..19853ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/Argentina/Buenos_Aires.rb @@ -0,0 +1,89 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module America + module Argentina + module Buenos_Aires + include TimezoneDefinition + + timezone 'America/Argentina/Buenos_Aires' do |tz| + tz.offset :o0, -14028, 0, :LMT + tz.offset :o1, -15408, 0, :CMT + tz.offset :o2, -14400, 0, :'-04' + tz.offset :o3, -14400, 3600, :'-03' + tz.offset :o4, -10800, 0, :'-03' + tz.offset :o5, -10800, 3600, :'-02' + + tz.transition 1894, 10, :o1, -2372097972, 17374555169, 7200 + tz.transition 1920, 5, :o2, -1567453392, 1453467407, 600 + tz.transition 1930, 12, :o3, -1233432000, 7278935, 3 + tz.transition 1931, 4, :o2, -1222981200, 19411461, 8 + tz.transition 1931, 10, :o3, -1205956800, 7279889, 3 + tz.transition 1932, 3, :o2, -1194037200, 19414141, 8 + tz.transition 1932, 11, :o3, -1172865600, 7281038, 3 + tz.transition 1933, 3, :o2, -1162501200, 19417061, 8 + tz.transition 1933, 11, :o3, -1141329600, 7282133, 3 + tz.transition 1934, 3, :o2, -1130965200, 19419981, 8 + tz.transition 1934, 11, :o3, -1109793600, 7283228, 3 + tz.transition 1935, 3, :o2, -1099429200, 19422901, 8 + tz.transition 1935, 11, :o3, -1078257600, 7284323, 3 + tz.transition 1936, 3, :o2, -1067806800, 19425829, 8 + tz.transition 1936, 11, :o3, -1046635200, 7285421, 3 + tz.transition 1937, 3, :o2, -1036270800, 19428749, 8 + tz.transition 1937, 11, :o3, -1015099200, 7286516, 3 + tz.transition 1938, 3, :o2, -1004734800, 19431669, 8 + tz.transition 1938, 11, :o3, -983563200, 7287611, 3 + tz.transition 1939, 3, :o2, -973198800, 19434589, 8 + tz.transition 1939, 11, :o3, -952027200, 7288706, 3 + tz.transition 1940, 3, :o2, -941576400, 19437517, 8 + tz.transition 1940, 7, :o3, -931032000, 7289435, 3 + tz.transition 1941, 6, :o2, -900882000, 19441285, 8 + tz.transition 1941, 10, :o3, -890337600, 7290848, 3 + tz.transition 1943, 8, :o2, -833749200, 19447501, 8 + tz.transition 1943, 10, :o3, -827265600, 7293038, 3 + tz.transition 1946, 3, :o2, -752274000, 19455045, 8 + tz.transition 1946, 10, :o3, -733780800, 7296284, 3 + tz.transition 1963, 10, :o2, -197326800, 19506429, 8 + tz.transition 1963, 12, :o3, -190843200, 7315136, 3 + tz.transition 1964, 3, :o2, -184194000, 19507645, 8 + tz.transition 1964, 10, :o3, -164491200, 7316051, 3 + tz.transition 1965, 3, :o2, -152658000, 19510565, 8 + tz.transition 1965, 10, :o3, -132955200, 7317146, 3 + tz.transition 1966, 3, :o2, -121122000, 19513485, 8 + tz.transition 1966, 10, :o3, -101419200, 7318241, 3 + tz.transition 1967, 4, :o2, -86821200, 19516661, 8 + tz.transition 1967, 10, :o3, -71092800, 7319294, 3 + tz.transition 1968, 4, :o2, -54766800, 19519629, 8 + tz.transition 1968, 10, :o3, -39038400, 7320407, 3 + tz.transition 1969, 4, :o2, -23317200, 19522541, 8 + tz.transition 1969, 10, :o4, -7588800, 7321499, 3 + tz.transition 1974, 1, :o5, 128142000 + tz.transition 1974, 5, :o4, 136605600 + tz.transition 1988, 12, :o5, 596948400 + tz.transition 1989, 3, :o4, 605066400 + tz.transition 1989, 10, :o5, 624423600 + tz.transition 1990, 3, :o4, 636516000 + tz.transition 1990, 10, :o5, 656478000 + tz.transition 1991, 3, :o4, 667965600 + tz.transition 1991, 10, :o5, 687927600 + tz.transition 1992, 3, :o4, 699415200 + tz.transition 1992, 10, :o5, 719377200 + tz.transition 1993, 3, :o4, 731469600 + tz.transition 1999, 10, :o3, 938919600 + tz.transition 2000, 3, :o4, 952052400 + tz.transition 2007, 12, :o5, 1198983600 + tz.transition 2008, 3, :o4, 1205632800 + tz.transition 2008, 10, :o5, 1224385200 + tz.transition 2009, 3, :o4, 1237082400 + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb new file mode 100644 index 0000000..6802f53 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/America/New_York.rb @@ -0,0 +1,327 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module America + module New_York + include TimezoneDefinition + + timezone 'America/New_York' do |tz| + tz.offset :o0, -17762, 0, :LMT + tz.offset :o1, -18000, 0, :EST + tz.offset :o2, -18000, 3600, :EDT + tz.offset :o3, -18000, 3600, :EWT + tz.offset :o4, -18000, 3600, :EPT + + tz.transition 1883, 11, :o1, -2717650800, 57819197, 24 + tz.transition 1918, 3, :o2, -1633280400, 58120411, 24 + tz.transition 1918, 10, :o1, -1615140000, 9687575, 4 + tz.transition 1919, 3, :o2, -1601830800, 58129147, 24 + tz.transition 1919, 10, :o1, -1583690400, 9689031, 4 + tz.transition 1920, 3, :o2, -1570381200, 58137883, 24 + tz.transition 1920, 10, :o1, -1551636000, 9690515, 4 + tz.transition 1921, 4, :o2, -1536512400, 58147291, 24 + tz.transition 1921, 9, :o1, -1523210400, 9691831, 4 + tz.transition 1922, 4, :o2, -1504458000, 58156195, 24 + tz.transition 1922, 9, :o1, -1491760800, 9693287, 4 + tz.transition 1923, 4, :o2, -1473008400, 58164931, 24 + tz.transition 1923, 9, :o1, -1459706400, 9694771, 4 + tz.transition 1924, 4, :o2, -1441558800, 58173667, 24 + tz.transition 1924, 9, :o1, -1428256800, 9696227, 4 + tz.transition 1925, 4, :o2, -1410109200, 58182403, 24 + tz.transition 1925, 9, :o1, -1396807200, 9697683, 4 + tz.transition 1926, 4, :o2, -1378659600, 58191139, 24 + tz.transition 1926, 9, :o1, -1365357600, 9699139, 4 + tz.transition 1927, 4, :o2, -1347210000, 58199875, 24 + tz.transition 1927, 9, :o1, -1333908000, 9700595, 4 + tz.transition 1928, 4, :o2, -1315155600, 58208779, 24 + tz.transition 1928, 9, :o1, -1301853600, 9702079, 4 + tz.transition 1929, 4, :o2, -1283706000, 58217515, 24 + tz.transition 1929, 9, :o1, -1270404000, 9703535, 4 + tz.transition 1930, 4, :o2, -1252256400, 58226251, 24 + tz.transition 1930, 9, :o1, -1238954400, 9704991, 4 + tz.transition 1931, 4, :o2, -1220806800, 58234987, 24 + tz.transition 1931, 9, :o1, -1207504800, 9706447, 4 + tz.transition 1932, 4, :o2, -1189357200, 58243723, 24 + tz.transition 1932, 9, :o1, -1176055200, 9707903, 4 + tz.transition 1933, 4, :o2, -1157302800, 58252627, 24 + tz.transition 1933, 9, :o1, -1144605600, 9709359, 4 + tz.transition 1934, 4, :o2, -1125853200, 58261363, 24 + tz.transition 1934, 9, :o1, -1112551200, 9710843, 4 + tz.transition 1935, 4, :o2, -1094403600, 58270099, 24 + tz.transition 1935, 9, :o1, -1081101600, 9712299, 4 + tz.transition 1936, 4, :o2, -1062954000, 58278835, 24 + tz.transition 1936, 9, :o1, -1049652000, 9713755, 4 + tz.transition 1937, 4, :o2, -1031504400, 58287571, 24 + tz.transition 1937, 9, :o1, -1018202400, 9715211, 4 + tz.transition 1938, 4, :o2, -1000054800, 58296307, 24 + tz.transition 1938, 9, :o1, -986752800, 9716667, 4 + tz.transition 1939, 4, :o2, -968000400, 58305211, 24 + tz.transition 1939, 9, :o1, -955303200, 9718123, 4 + tz.transition 1940, 4, :o2, -936550800, 58313947, 24 + tz.transition 1940, 9, :o1, -923248800, 9719607, 4 + tz.transition 1941, 4, :o2, -905101200, 58322683, 24 + tz.transition 1941, 9, :o1, -891799200, 9721063, 4 + tz.transition 1942, 2, :o3, -880218000, 58329595, 24 + tz.transition 1945, 8, :o4, -769395600, 58360379, 24 + tz.transition 1945, 9, :o1, -765396000, 9726915, 4 + tz.transition 1946, 4, :o2, -747248400, 58366531, 24 + tz.transition 1946, 9, :o1, -733946400, 9728371, 4 + tz.transition 1947, 4, :o2, -715798800, 58375267, 24 + tz.transition 1947, 9, :o1, -702496800, 9729827, 4 + tz.transition 1948, 4, :o2, -684349200, 58384003, 24 + tz.transition 1948, 9, :o1, -671047200, 9731283, 4 + tz.transition 1949, 4, :o2, -652899600, 58392739, 24 + tz.transition 1949, 9, :o1, -639597600, 9732739, 4 + tz.transition 1950, 4, :o2, -620845200, 58401643, 24 + tz.transition 1950, 9, :o1, -608148000, 9734195, 4 + tz.transition 1951, 4, :o2, -589395600, 58410379, 24 + tz.transition 1951, 9, :o1, -576093600, 9735679, 4 + tz.transition 1952, 4, :o2, -557946000, 58419115, 24 + tz.transition 1952, 9, :o1, -544644000, 9737135, 4 + tz.transition 1953, 4, :o2, -526496400, 58427851, 24 + tz.transition 1953, 9, :o1, -513194400, 9738591, 4 + tz.transition 1954, 4, :o2, -495046800, 58436587, 24 + tz.transition 1954, 9, :o1, -481744800, 9740047, 4 + tz.transition 1955, 4, :o2, -463597200, 58445323, 24 + tz.transition 1955, 10, :o1, -447271200, 9741643, 4 + tz.transition 1956, 4, :o2, -431542800, 58454227, 24 + tz.transition 1956, 10, :o1, -415821600, 9743099, 4 + tz.transition 1957, 4, :o2, -400093200, 58462963, 24 + tz.transition 1957, 10, :o1, -384372000, 9744555, 4 + tz.transition 1958, 4, :o2, -368643600, 58471699, 24 + tz.transition 1958, 10, :o1, -352922400, 9746011, 4 + tz.transition 1959, 4, :o2, -337194000, 58480435, 24 + tz.transition 1959, 10, :o1, -321472800, 9747467, 4 + tz.transition 1960, 4, :o2, -305744400, 58489171, 24 + tz.transition 1960, 10, :o1, -289418400, 9748951, 4 + tz.transition 1961, 4, :o2, -273690000, 58498075, 24 + tz.transition 1961, 10, :o1, -257968800, 9750407, 4 + tz.transition 1962, 4, :o2, -242240400, 58506811, 24 + tz.transition 1962, 10, :o1, -226519200, 9751863, 4 + tz.transition 1963, 4, :o2, -210790800, 58515547, 24 + tz.transition 1963, 10, :o1, -195069600, 9753319, 4 + tz.transition 1964, 4, :o2, -179341200, 58524283, 24 + tz.transition 1964, 10, :o1, -163620000, 9754775, 4 + tz.transition 1965, 4, :o2, -147891600, 58533019, 24 + tz.transition 1965, 10, :o1, -131565600, 9756259, 4 + tz.transition 1966, 4, :o2, -116442000, 58541755, 24 + tz.transition 1966, 10, :o1, -100116000, 9757715, 4 + tz.transition 1967, 4, :o2, -84387600, 58550659, 24 + tz.transition 1967, 10, :o1, -68666400, 9759171, 4 + tz.transition 1968, 4, :o2, -52938000, 58559395, 24 + tz.transition 1968, 10, :o1, -37216800, 9760627, 4 + tz.transition 1969, 4, :o2, -21488400, 58568131, 24 + tz.transition 1969, 10, :o1, -5767200, 9762083, 4 + tz.transition 1970, 4, :o2, 9961200 + tz.transition 1970, 10, :o1, 25682400 + tz.transition 1971, 4, :o2, 41410800 + tz.transition 1971, 10, :o1, 57736800 + tz.transition 1972, 4, :o2, 73465200 + tz.transition 1972, 10, :o1, 89186400 + tz.transition 1973, 4, :o2, 104914800 + tz.transition 1973, 10, :o1, 120636000 + tz.transition 1974, 1, :o2, 126687600 + tz.transition 1974, 10, :o1, 152085600 + tz.transition 1975, 2, :o2, 162370800 + tz.transition 1975, 10, :o1, 183535200 + tz.transition 1976, 4, :o2, 199263600 + tz.transition 1976, 10, :o1, 215589600 + tz.transition 1977, 4, :o2, 230713200 + tz.transition 1977, 10, :o1, 247039200 + tz.transition 1978, 4, :o2, 262767600 + tz.transition 1978, 10, :o1, 278488800 + tz.transition 1979, 4, :o2, 294217200 + tz.transition 1979, 10, :o1, 309938400 + tz.transition 1980, 4, :o2, 325666800 + tz.transition 1980, 10, :o1, 341388000 + tz.transition 1981, 4, :o2, 357116400 + tz.transition 1981, 10, :o1, 372837600 + tz.transition 1982, 4, :o2, 388566000 + tz.transition 1982, 10, :o1, 404892000 + tz.transition 1983, 4, :o2, 420015600 + tz.transition 1983, 10, :o1, 436341600 + tz.transition 1984, 4, :o2, 452070000 + tz.transition 1984, 10, :o1, 467791200 + tz.transition 1985, 4, :o2, 483519600 + tz.transition 1985, 10, :o1, 499240800 + tz.transition 1986, 4, :o2, 514969200 + tz.transition 1986, 10, :o1, 530690400 + tz.transition 1987, 4, :o2, 544604400 + tz.transition 1987, 10, :o1, 562140000 + tz.transition 1988, 4, :o2, 576054000 + tz.transition 1988, 10, :o1, 594194400 + tz.transition 1989, 4, :o2, 607503600 + tz.transition 1989, 10, :o1, 625644000 + tz.transition 1990, 4, :o2, 638953200 + tz.transition 1990, 10, :o1, 657093600 + tz.transition 1991, 4, :o2, 671007600 + tz.transition 1991, 10, :o1, 688543200 + tz.transition 1992, 4, :o2, 702457200 + tz.transition 1992, 10, :o1, 719992800 + tz.transition 1993, 4, :o2, 733906800 + tz.transition 1993, 10, :o1, 752047200 + tz.transition 1994, 4, :o2, 765356400 + tz.transition 1994, 10, :o1, 783496800 + tz.transition 1995, 4, :o2, 796806000 + tz.transition 1995, 10, :o1, 814946400 + tz.transition 1996, 4, :o2, 828860400 + tz.transition 1996, 10, :o1, 846396000 + tz.transition 1997, 4, :o2, 860310000 + tz.transition 1997, 10, :o1, 877845600 + tz.transition 1998, 4, :o2, 891759600 + tz.transition 1998, 10, :o1, 909295200 + tz.transition 1999, 4, :o2, 923209200 + tz.transition 1999, 10, :o1, 941349600 + tz.transition 2000, 4, :o2, 954658800 + tz.transition 2000, 10, :o1, 972799200 + tz.transition 2001, 4, :o2, 986108400 + tz.transition 2001, 10, :o1, 1004248800 + tz.transition 2002, 4, :o2, 1018162800 + tz.transition 2002, 10, :o1, 1035698400 + tz.transition 2003, 4, :o2, 1049612400 + tz.transition 2003, 10, :o1, 1067148000 + tz.transition 2004, 4, :o2, 1081062000 + tz.transition 2004, 10, :o1, 1099202400 + tz.transition 2005, 4, :o2, 1112511600 + tz.transition 2005, 10, :o1, 1130652000 + tz.transition 2006, 4, :o2, 1143961200 + tz.transition 2006, 10, :o1, 1162101600 + tz.transition 2007, 3, :o2, 1173596400 + tz.transition 2007, 11, :o1, 1194156000 + tz.transition 2008, 3, :o2, 1205046000 + tz.transition 2008, 11, :o1, 1225605600 + tz.transition 2009, 3, :o2, 1236495600 + tz.transition 2009, 11, :o1, 1257055200 + tz.transition 2010, 3, :o2, 1268550000 + tz.transition 2010, 11, :o1, 1289109600 + tz.transition 2011, 3, :o2, 1299999600 + tz.transition 2011, 11, :o1, 1320559200 + tz.transition 2012, 3, :o2, 1331449200 + tz.transition 2012, 11, :o1, 1352008800 + tz.transition 2013, 3, :o2, 1362898800 + tz.transition 2013, 11, :o1, 1383458400 + tz.transition 2014, 3, :o2, 1394348400 + tz.transition 2014, 11, :o1, 1414908000 + tz.transition 2015, 3, :o2, 1425798000 + tz.transition 2015, 11, :o1, 1446357600 + tz.transition 2016, 3, :o2, 1457852400 + tz.transition 2016, 11, :o1, 1478412000 + tz.transition 2017, 3, :o2, 1489302000 + tz.transition 2017, 11, :o1, 1509861600 + tz.transition 2018, 3, :o2, 1520751600 + tz.transition 2018, 11, :o1, 1541311200 + tz.transition 2019, 3, :o2, 1552201200 + tz.transition 2019, 11, :o1, 1572760800 + tz.transition 2020, 3, :o2, 1583650800 + tz.transition 2020, 11, :o1, 1604210400 + tz.transition 2021, 3, :o2, 1615705200 + tz.transition 2021, 11, :o1, 1636264800 + tz.transition 2022, 3, :o2, 1647154800 + tz.transition 2022, 11, :o1, 1667714400 + tz.transition 2023, 3, :o2, 1678604400 + tz.transition 2023, 11, :o1, 1699164000 + tz.transition 2024, 3, :o2, 1710054000 + tz.transition 2024, 11, :o1, 1730613600 + tz.transition 2025, 3, :o2, 1741503600 + tz.transition 2025, 11, :o1, 1762063200 + tz.transition 2026, 3, :o2, 1772953200 + tz.transition 2026, 11, :o1, 1793512800 + tz.transition 2027, 3, :o2, 1805007600 + tz.transition 2027, 11, :o1, 1825567200 + tz.transition 2028, 3, :o2, 1836457200 + tz.transition 2028, 11, :o1, 1857016800 + tz.transition 2029, 3, :o2, 1867906800 + tz.transition 2029, 11, :o1, 1888466400 + tz.transition 2030, 3, :o2, 1899356400 + tz.transition 2030, 11, :o1, 1919916000 + tz.transition 2031, 3, :o2, 1930806000 + tz.transition 2031, 11, :o1, 1951365600 + tz.transition 2032, 3, :o2, 1962860400 + tz.transition 2032, 11, :o1, 1983420000 + tz.transition 2033, 3, :o2, 1994310000 + tz.transition 2033, 11, :o1, 2014869600 + tz.transition 2034, 3, :o2, 2025759600 + tz.transition 2034, 11, :o1, 2046319200 + tz.transition 2035, 3, :o2, 2057209200 + tz.transition 2035, 11, :o1, 2077768800 + tz.transition 2036, 3, :o2, 2088658800 + tz.transition 2036, 11, :o1, 2109218400 + tz.transition 2037, 3, :o2, 2120108400 + tz.transition 2037, 11, :o1, 2140668000 + tz.transition 2038, 3, :o2, 2152162800, 59171923, 24 + tz.transition 2038, 11, :o1, 2172722400, 9862939, 4 + tz.transition 2039, 3, :o2, 2183612400, 59180659, 24 + tz.transition 2039, 11, :o1, 2204172000, 9864395, 4 + tz.transition 2040, 3, :o2, 2215062000, 59189395, 24 + tz.transition 2040, 11, :o1, 2235621600, 9865851, 4 + tz.transition 2041, 3, :o2, 2246511600, 59198131, 24 + tz.transition 2041, 11, :o1, 2267071200, 9867307, 4 + tz.transition 2042, 3, :o2, 2277961200, 59206867, 24 + tz.transition 2042, 11, :o1, 2298520800, 9868763, 4 + tz.transition 2043, 3, :o2, 2309410800, 59215603, 24 + tz.transition 2043, 11, :o1, 2329970400, 9870219, 4 + tz.transition 2044, 3, :o2, 2341465200, 59224507, 24 + tz.transition 2044, 11, :o1, 2362024800, 9871703, 4 + tz.transition 2045, 3, :o2, 2372914800, 59233243, 24 + tz.transition 2045, 11, :o1, 2393474400, 9873159, 4 + tz.transition 2046, 3, :o2, 2404364400, 59241979, 24 + tz.transition 2046, 11, :o1, 2424924000, 9874615, 4 + tz.transition 2047, 3, :o2, 2435814000, 59250715, 24 + tz.transition 2047, 11, :o1, 2456373600, 9876071, 4 + tz.transition 2048, 3, :o2, 2467263600, 59259451, 24 + tz.transition 2048, 11, :o1, 2487823200, 9877527, 4 + tz.transition 2049, 3, :o2, 2499318000, 59268355, 24 + tz.transition 2049, 11, :o1, 2519877600, 9879011, 4 + tz.transition 2050, 3, :o2, 2530767600, 59277091, 24 + tz.transition 2050, 11, :o1, 2551327200, 9880467, 4 + tz.transition 2051, 3, :o2, 2562217200, 59285827, 24 + tz.transition 2051, 11, :o1, 2582776800, 9881923, 4 + tz.transition 2052, 3, :o2, 2593666800, 59294563, 24 + tz.transition 2052, 11, :o1, 2614226400, 9883379, 4 + tz.transition 2053, 3, :o2, 2625116400, 59303299, 24 + tz.transition 2053, 11, :o1, 2645676000, 9884835, 4 + tz.transition 2054, 3, :o2, 2656566000, 59312035, 24 + tz.transition 2054, 11, :o1, 2677125600, 9886291, 4 + tz.transition 2055, 3, :o2, 2688620400, 59320939, 24 + tz.transition 2055, 11, :o1, 2709180000, 9887775, 4 + tz.transition 2056, 3, :o2, 2720070000, 59329675, 24 + tz.transition 2056, 11, :o1, 2740629600, 9889231, 4 + tz.transition 2057, 3, :o2, 2751519600, 59338411, 24 + tz.transition 2057, 11, :o1, 2772079200, 9890687, 4 + tz.transition 2058, 3, :o2, 2782969200, 59347147, 24 + tz.transition 2058, 11, :o1, 2803528800, 9892143, 4 + tz.transition 2059, 3, :o2, 2814418800, 59355883, 24 + tz.transition 2059, 11, :o1, 2834978400, 9893599, 4 + tz.transition 2060, 3, :o2, 2846473200, 59364787, 24 + tz.transition 2060, 11, :o1, 2867032800, 9895083, 4 + tz.transition 2061, 3, :o2, 2877922800, 59373523, 24 + tz.transition 2061, 11, :o1, 2898482400, 9896539, 4 + tz.transition 2062, 3, :o2, 2909372400, 59382259, 24 + tz.transition 2062, 11, :o1, 2929932000, 9897995, 4 + tz.transition 2063, 3, :o2, 2940822000, 59390995, 24 + tz.transition 2063, 11, :o1, 2961381600, 9899451, 4 + tz.transition 2064, 3, :o2, 2972271600, 59399731, 24 + tz.transition 2064, 11, :o1, 2992831200, 9900907, 4 + tz.transition 2065, 3, :o2, 3003721200, 59408467, 24 + tz.transition 2065, 11, :o1, 3024280800, 9902363, 4 + tz.transition 2066, 3, :o2, 3035775600, 59417371, 24 + tz.transition 2066, 11, :o1, 3056335200, 9903847, 4 + tz.transition 2067, 3, :o2, 3067225200, 59426107, 24 + tz.transition 2067, 11, :o1, 3087784800, 9905303, 4 + tz.transition 2068, 3, :o2, 3098674800, 59434843, 24 + tz.transition 2068, 11, :o1, 3119234400, 9906759, 4 + tz.transition 2069, 3, :o2, 3130124400, 59443579, 24 + tz.transition 2069, 11, :o1, 3150684000, 9908215, 4 + tz.transition 2070, 3, :o2, 3161574000, 59452315, 24 + tz.transition 2070, 11, :o1, 3182133600, 9909671, 4 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb new file mode 100644 index 0000000..6acd078 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Australia/Melbourne.rb @@ -0,0 +1,230 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Australia + module Melbourne + include TimezoneDefinition + + timezone 'Australia/Melbourne' do |tz| + tz.offset :o0, 34792, 0, :LMT + tz.offset :o1, 36000, 0, :AEST + tz.offset :o2, 36000, 3600, :AEDT + + tz.transition 1895, 1, :o1, -2364111592, 26062831051, 10800 + tz.transition 1916, 12, :o2, -1672567140, 3486569881, 1440 + tz.transition 1917, 3, :o1, -1665392400, 19370497, 8 + tz.transition 1941, 12, :o2, -883641600, 14582161, 6 + tz.transition 1942, 3, :o1, -876128400, 19443577, 8 + tz.transition 1942, 9, :o2, -860400000, 14583775, 6 + tz.transition 1943, 3, :o1, -844678800, 19446489, 8 + tz.transition 1943, 10, :o2, -828345600, 14586001, 6 + tz.transition 1944, 3, :o1, -813229200, 19449401, 8 + tz.transition 1971, 10, :o2, 57686400 + tz.transition 1972, 2, :o1, 67968000 + tz.transition 1972, 10, :o2, 89136000 + tz.transition 1973, 3, :o1, 100022400 + tz.transition 1973, 10, :o2, 120585600 + tz.transition 1974, 3, :o1, 131472000 + tz.transition 1974, 10, :o2, 152035200 + tz.transition 1975, 3, :o1, 162921600 + tz.transition 1975, 10, :o2, 183484800 + tz.transition 1976, 3, :o1, 194976000 + tz.transition 1976, 10, :o2, 215539200 + tz.transition 1977, 3, :o1, 226425600 + tz.transition 1977, 10, :o2, 246988800 + tz.transition 1978, 3, :o1, 257875200 + tz.transition 1978, 10, :o2, 278438400 + tz.transition 1979, 3, :o1, 289324800 + tz.transition 1979, 10, :o2, 309888000 + tz.transition 1980, 3, :o1, 320774400 + tz.transition 1980, 10, :o2, 341337600 + tz.transition 1981, 2, :o1, 352224000 + tz.transition 1981, 10, :o2, 372787200 + tz.transition 1982, 3, :o1, 384278400 + tz.transition 1982, 10, :o2, 404841600 + tz.transition 1983, 3, :o1, 415728000 + tz.transition 1983, 10, :o2, 436291200 + tz.transition 1984, 3, :o1, 447177600 + tz.transition 1984, 10, :o2, 467740800 + tz.transition 1985, 3, :o1, 478627200 + tz.transition 1985, 10, :o2, 499190400 + tz.transition 1986, 3, :o1, 511286400 + tz.transition 1986, 10, :o2, 530035200 + tz.transition 1987, 3, :o1, 542736000 + tz.transition 1987, 10, :o2, 561484800 + tz.transition 1988, 3, :o1, 574790400 + tz.transition 1988, 10, :o2, 594144000 + tz.transition 1989, 3, :o1, 606240000 + tz.transition 1989, 10, :o2, 625593600 + tz.transition 1990, 3, :o1, 637689600 + tz.transition 1990, 10, :o2, 657043200 + tz.transition 1991, 3, :o1, 667929600 + tz.transition 1991, 10, :o2, 688492800 + tz.transition 1992, 2, :o1, 699379200 + tz.transition 1992, 10, :o2, 719942400 + tz.transition 1993, 3, :o1, 731433600 + tz.transition 1993, 10, :o2, 751996800 + tz.transition 1994, 3, :o1, 762883200 + tz.transition 1994, 10, :o2, 783446400 + tz.transition 1995, 3, :o1, 796147200 + tz.transition 1995, 10, :o2, 814896000 + tz.transition 1996, 3, :o1, 828201600 + tz.transition 1996, 10, :o2, 846345600 + tz.transition 1997, 3, :o1, 859651200 + tz.transition 1997, 10, :o2, 877795200 + tz.transition 1998, 3, :o1, 891100800 + tz.transition 1998, 10, :o2, 909244800 + tz.transition 1999, 3, :o1, 922550400 + tz.transition 1999, 10, :o2, 941299200 + tz.transition 2000, 3, :o1, 954000000 + tz.transition 2000, 8, :o2, 967305600 + tz.transition 2001, 3, :o1, 985449600 + tz.transition 2001, 10, :o2, 1004198400 + tz.transition 2002, 3, :o1, 1017504000 + tz.transition 2002, 10, :o2, 1035648000 + tz.transition 2003, 3, :o1, 1048953600 + tz.transition 2003, 10, :o2, 1067097600 + tz.transition 2004, 3, :o1, 1080403200 + tz.transition 2004, 10, :o2, 1099152000 + tz.transition 2005, 3, :o1, 1111852800 + tz.transition 2005, 10, :o2, 1130601600 + tz.transition 2006, 4, :o1, 1143907200 + tz.transition 2006, 10, :o2, 1162051200 + tz.transition 2007, 3, :o1, 1174752000 + tz.transition 2007, 10, :o2, 1193500800 + tz.transition 2008, 4, :o1, 1207411200 + tz.transition 2008, 10, :o2, 1223136000 + tz.transition 2009, 4, :o1, 1238860800 + tz.transition 2009, 10, :o2, 1254585600 + tz.transition 2010, 4, :o1, 1270310400 + tz.transition 2010, 10, :o2, 1286035200 + tz.transition 2011, 4, :o1, 1301760000 + tz.transition 2011, 10, :o2, 1317484800 + tz.transition 2012, 3, :o1, 1333209600 + tz.transition 2012, 10, :o2, 1349539200 + tz.transition 2013, 4, :o1, 1365264000 + tz.transition 2013, 10, :o2, 1380988800 + tz.transition 2014, 4, :o1, 1396713600 + tz.transition 2014, 10, :o2, 1412438400 + tz.transition 2015, 4, :o1, 1428163200 + tz.transition 2015, 10, :o2, 1443888000 + tz.transition 2016, 4, :o1, 1459612800 + tz.transition 2016, 10, :o2, 1475337600 + tz.transition 2017, 4, :o1, 1491062400 + tz.transition 2017, 9, :o2, 1506787200 + tz.transition 2018, 3, :o1, 1522512000 + tz.transition 2018, 10, :o2, 1538841600 + tz.transition 2019, 4, :o1, 1554566400 + tz.transition 2019, 10, :o2, 1570291200 + tz.transition 2020, 4, :o1, 1586016000 + tz.transition 2020, 10, :o2, 1601740800 + tz.transition 2021, 4, :o1, 1617465600 + tz.transition 2021, 10, :o2, 1633190400 + tz.transition 2022, 4, :o1, 1648915200 + tz.transition 2022, 10, :o2, 1664640000 + tz.transition 2023, 4, :o1, 1680364800 + tz.transition 2023, 9, :o2, 1696089600 + tz.transition 2024, 4, :o1, 1712419200 + tz.transition 2024, 10, :o2, 1728144000 + tz.transition 2025, 4, :o1, 1743868800 + tz.transition 2025, 10, :o2, 1759593600 + tz.transition 2026, 4, :o1, 1775318400 + tz.transition 2026, 10, :o2, 1791043200 + tz.transition 2027, 4, :o1, 1806768000 + tz.transition 2027, 10, :o2, 1822492800 + tz.transition 2028, 4, :o1, 1838217600 + tz.transition 2028, 9, :o2, 1853942400 + tz.transition 2029, 3, :o1, 1869667200 + tz.transition 2029, 10, :o2, 1885996800 + tz.transition 2030, 4, :o1, 1901721600 + tz.transition 2030, 10, :o2, 1917446400 + tz.transition 2031, 4, :o1, 1933171200 + tz.transition 2031, 10, :o2, 1948896000 + tz.transition 2032, 4, :o1, 1964620800 + tz.transition 2032, 10, :o2, 1980345600 + tz.transition 2033, 4, :o1, 1996070400 + tz.transition 2033, 10, :o2, 2011795200 + tz.transition 2034, 4, :o1, 2027520000 + tz.transition 2034, 9, :o2, 2043244800 + tz.transition 2035, 3, :o1, 2058969600 + tz.transition 2035, 10, :o2, 2075299200 + tz.transition 2036, 4, :o1, 2091024000 + tz.transition 2036, 10, :o2, 2106748800 + tz.transition 2037, 4, :o1, 2122473600 + tz.transition 2037, 10, :o2, 2138198400 + tz.transition 2038, 4, :o1, 2153923200, 14793103, 6 + tz.transition 2038, 10, :o2, 2169648000, 14794195, 6 + tz.transition 2039, 4, :o1, 2185372800, 14795287, 6 + tz.transition 2039, 10, :o2, 2201097600, 14796379, 6 + tz.transition 2040, 3, :o1, 2216822400, 14797471, 6 + tz.transition 2040, 10, :o2, 2233152000, 14798605, 6 + tz.transition 2041, 4, :o1, 2248876800, 14799697, 6 + tz.transition 2041, 10, :o2, 2264601600, 14800789, 6 + tz.transition 2042, 4, :o1, 2280326400, 14801881, 6 + tz.transition 2042, 10, :o2, 2296051200, 14802973, 6 + tz.transition 2043, 4, :o1, 2311776000, 14804065, 6 + tz.transition 2043, 10, :o2, 2327500800, 14805157, 6 + tz.transition 2044, 4, :o1, 2343225600, 14806249, 6 + tz.transition 2044, 10, :o2, 2358950400, 14807341, 6 + tz.transition 2045, 4, :o1, 2374675200, 14808433, 6 + tz.transition 2045, 9, :o2, 2390400000, 14809525, 6 + tz.transition 2046, 3, :o1, 2406124800, 14810617, 6 + tz.transition 2046, 10, :o2, 2422454400, 14811751, 6 + tz.transition 2047, 4, :o1, 2438179200, 14812843, 6 + tz.transition 2047, 10, :o2, 2453904000, 14813935, 6 + tz.transition 2048, 4, :o1, 2469628800, 14815027, 6 + tz.transition 2048, 10, :o2, 2485353600, 14816119, 6 + tz.transition 2049, 4, :o1, 2501078400, 14817211, 6 + tz.transition 2049, 10, :o2, 2516803200, 14818303, 6 + tz.transition 2050, 4, :o1, 2532528000, 14819395, 6 + tz.transition 2050, 10, :o2, 2548252800, 14820487, 6 + tz.transition 2051, 4, :o1, 2563977600, 14821579, 6 + tz.transition 2051, 9, :o2, 2579702400, 14822671, 6 + tz.transition 2052, 4, :o1, 2596032000, 14823805, 6 + tz.transition 2052, 10, :o2, 2611756800, 14824897, 6 + tz.transition 2053, 4, :o1, 2627481600, 14825989, 6 + tz.transition 2053, 10, :o2, 2643206400, 14827081, 6 + tz.transition 2054, 4, :o1, 2658931200, 14828173, 6 + tz.transition 2054, 10, :o2, 2674656000, 14829265, 6 + tz.transition 2055, 4, :o1, 2690380800, 14830357, 6 + tz.transition 2055, 10, :o2, 2706105600, 14831449, 6 + tz.transition 2056, 4, :o1, 2721830400, 14832541, 6 + tz.transition 2056, 9, :o2, 2737555200, 14833633, 6 + tz.transition 2057, 3, :o1, 2753280000, 14834725, 6 + tz.transition 2057, 10, :o2, 2769609600, 14835859, 6 + tz.transition 2058, 4, :o1, 2785334400, 14836951, 6 + tz.transition 2058, 10, :o2, 2801059200, 14838043, 6 + tz.transition 2059, 4, :o1, 2816784000, 14839135, 6 + tz.transition 2059, 10, :o2, 2832508800, 14840227, 6 + tz.transition 2060, 4, :o1, 2848233600, 14841319, 6 + tz.transition 2060, 10, :o2, 2863958400, 14842411, 6 + tz.transition 2061, 4, :o1, 2879683200, 14843503, 6 + tz.transition 2061, 10, :o2, 2895408000, 14844595, 6 + tz.transition 2062, 4, :o1, 2911132800, 14845687, 6 + tz.transition 2062, 9, :o2, 2926857600, 14846779, 6 + tz.transition 2063, 3, :o1, 2942582400, 14847871, 6 + tz.transition 2063, 10, :o2, 2958912000, 14849005, 6 + tz.transition 2064, 4, :o1, 2974636800, 14850097, 6 + tz.transition 2064, 10, :o2, 2990361600, 14851189, 6 + tz.transition 2065, 4, :o1, 3006086400, 14852281, 6 + tz.transition 2065, 10, :o2, 3021811200, 14853373, 6 + tz.transition 2066, 4, :o1, 3037536000, 14854465, 6 + tz.transition 2066, 10, :o2, 3053260800, 14855557, 6 + tz.transition 2067, 4, :o1, 3068985600, 14856649, 6 + tz.transition 2067, 10, :o2, 3084710400, 14857741, 6 + tz.transition 2068, 3, :o1, 3100435200, 14858833, 6 + tz.transition 2068, 10, :o2, 3116764800, 14859967, 6 + tz.transition 2069, 4, :o1, 3132489600, 14861059, 6 + tz.transition 2069, 10, :o2, 3148214400, 14862151, 6 + tz.transition 2070, 4, :o1, 3163939200, 14863243, 6 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb new file mode 100644 index 0000000..82808df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/EST.rb @@ -0,0 +1,19 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module EST + include TimezoneDefinition + + timezone 'EST' do |tz| + tz.offset :o0, -18000, 0, :EST + + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb new file mode 100644 index 0000000..57ab6ed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__m__1.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module GMT__m__1 + include TimezoneDefinition + + timezone 'Etc/GMT-1' do |tz| + tz.offset :o0, 3600, 0, :'+01' + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb new file mode 100644 index 0000000..dec19e5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/GMT__p__1.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module GMT__p__1 + include TimezoneDefinition + + timezone 'Etc/GMT+1' do |tz| + tz.offset :o0, -3600, 0, :'-01' + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb new file mode 100644 index 0000000..48fbcf2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Etc/UTC.rb @@ -0,0 +1,21 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Etc + module UTC + include TimezoneDefinition + + timezone 'Etc/UTC' do |tz| + tz.offset :o0, 0, 0, :UTC + + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb new file mode 100644 index 0000000..bedf7e4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Amsterdam.rb @@ -0,0 +1,273 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Amsterdam + include TimezoneDefinition + + timezone 'Europe/Amsterdam' do |tz| + tz.offset :o0, 1172, 0, :LMT + tz.offset :o1, 1172, 0, :AMT + tz.offset :o2, 1172, 3600, :NST + tz.offset :o3, 1200, 3600, :'+0120' + tz.offset :o4, 1200, 0, :'+0020' + tz.offset :o5, 3600, 3600, :CEST + tz.offset :o6, 3600, 0, :CET + + tz.transition 1834, 12, :o1, -4260212372, 51651636907, 21600 + tz.transition 1916, 4, :o2, -1693700372, 52293264907, 21600 + tz.transition 1916, 9, :o1, -1680484772, 52296568807, 21600 + tz.transition 1917, 4, :o2, -1663453172, 52300826707, 21600 + tz.transition 1917, 9, :o1, -1650147572, 52304153107, 21600 + tz.transition 1918, 4, :o2, -1633213172, 52308386707, 21600 + tz.transition 1918, 9, :o1, -1617488372, 52312317907, 21600 + tz.transition 1919, 4, :o2, -1601158772, 52316400307, 21600 + tz.transition 1919, 9, :o1, -1586038772, 52320180307, 21600 + tz.transition 1920, 4, :o2, -1569709172, 52324262707, 21600 + tz.transition 1920, 9, :o1, -1554589172, 52328042707, 21600 + tz.transition 1921, 4, :o2, -1538259572, 52332125107, 21600 + tz.transition 1921, 9, :o1, -1523139572, 52335905107, 21600 + tz.transition 1922, 3, :o2, -1507501172, 52339814707, 21600 + tz.transition 1922, 10, :o1, -1490566772, 52344048307, 21600 + tz.transition 1923, 6, :o2, -1470176372, 52349145907, 21600 + tz.transition 1923, 10, :o1, -1459117172, 52351910707, 21600 + tz.transition 1924, 3, :o2, -1443997172, 52355690707, 21600 + tz.transition 1924, 10, :o1, -1427667572, 52359773107, 21600 + tz.transition 1925, 6, :o2, -1406672372, 52365021907, 21600 + tz.transition 1925, 10, :o1, -1396217972, 52367635507, 21600 + tz.transition 1926, 5, :o2, -1376950772, 52372452307, 21600 + tz.transition 1926, 10, :o1, -1364768372, 52375497907, 21600 + tz.transition 1927, 5, :o2, -1345414772, 52380336307, 21600 + tz.transition 1927, 10, :o1, -1333318772, 52383360307, 21600 + tz.transition 1928, 5, :o2, -1313792372, 52388241907, 21600 + tz.transition 1928, 10, :o1, -1301264372, 52391373907, 21600 + tz.transition 1929, 5, :o2, -1282256372, 52396125907, 21600 + tz.transition 1929, 10, :o1, -1269814772, 52399236307, 21600 + tz.transition 1930, 5, :o2, -1250720372, 52404009907, 21600 + tz.transition 1930, 10, :o1, -1238365172, 52407098707, 21600 + tz.transition 1931, 5, :o2, -1219184372, 52411893907, 21600 + tz.transition 1931, 10, :o1, -1206915572, 52414961107, 21600 + tz.transition 1932, 5, :o2, -1186957172, 52419950707, 21600 + tz.transition 1932, 10, :o1, -1175465972, 52422823507, 21600 + tz.transition 1933, 5, :o2, -1156025972, 52427683507, 21600 + tz.transition 1933, 10, :o1, -1143411572, 52430837107, 21600 + tz.transition 1934, 5, :o2, -1124489972, 52435567507, 21600 + tz.transition 1934, 10, :o1, -1111961972, 52438699507, 21600 + tz.transition 1935, 5, :o2, -1092953972, 52443451507, 21600 + tz.transition 1935, 10, :o1, -1080512372, 52446561907, 21600 + tz.transition 1936, 5, :o2, -1061331572, 52451357107, 21600 + tz.transition 1936, 10, :o1, -1049062772, 52454424307, 21600 + tz.transition 1937, 5, :o2, -1029190772, 52459392307, 21600 + tz.transition 1937, 6, :o3, -1025745572, 52460253607, 21600 + tz.transition 1937, 10, :o4, -1017613200, 174874289, 72 + tz.transition 1938, 5, :o3, -998259600, 174890417, 72 + tz.transition 1938, 10, :o4, -986163600, 174900497, 72 + tz.transition 1939, 5, :o3, -966723600, 174916697, 72 + tz.transition 1939, 10, :o4, -954109200, 174927209, 72 + tz.transition 1940, 5, :o5, -935022000, 174943115, 72 + tz.transition 1942, 11, :o6, -857257200, 58335973, 24 + tz.transition 1943, 3, :o5, -844556400, 58339501, 24 + tz.transition 1943, 10, :o6, -828226800, 58344037, 24 + tz.transition 1944, 4, :o5, -812502000, 58348405, 24 + tz.transition 1944, 10, :o6, -796777200, 58352773, 24 + tz.transition 1945, 4, :o5, -781052400, 58357141, 24 + tz.transition 1945, 9, :o6, -766623600, 58361149, 24 + tz.transition 1977, 4, :o5, 228877200 + tz.transition 1977, 9, :o6, 243997200 + tz.transition 1978, 4, :o5, 260326800 + tz.transition 1978, 10, :o6, 276051600 + tz.transition 1979, 4, :o5, 291776400 + tz.transition 1979, 9, :o6, 307501200 + tz.transition 1980, 4, :o5, 323830800 + tz.transition 1980, 9, :o6, 338950800 + tz.transition 1981, 3, :o5, 354675600 + tz.transition 1981, 9, :o6, 370400400 + tz.transition 1982, 3, :o5, 386125200 + tz.transition 1982, 9, :o6, 401850000 + tz.transition 1983, 3, :o5, 417574800 + tz.transition 1983, 9, :o6, 433299600 + tz.transition 1984, 3, :o5, 449024400 + tz.transition 1984, 9, :o6, 465354000 + tz.transition 1985, 3, :o5, 481078800 + tz.transition 1985, 9, :o6, 496803600 + tz.transition 1986, 3, :o5, 512528400 + tz.transition 1986, 9, :o6, 528253200 + tz.transition 1987, 3, :o5, 543978000 + tz.transition 1987, 9, :o6, 559702800 + tz.transition 1988, 3, :o5, 575427600 + tz.transition 1988, 9, :o6, 591152400 + tz.transition 1989, 3, :o5, 606877200 + tz.transition 1989, 9, :o6, 622602000 + tz.transition 1990, 3, :o5, 638326800 + tz.transition 1990, 9, :o6, 654656400 + tz.transition 1991, 3, :o5, 670381200 + tz.transition 1991, 9, :o6, 686106000 + tz.transition 1992, 3, :o5, 701830800 + tz.transition 1992, 9, :o6, 717555600 + tz.transition 1993, 3, :o5, 733280400 + tz.transition 1993, 9, :o6, 749005200 + tz.transition 1994, 3, :o5, 764730000 + tz.transition 1994, 9, :o6, 780454800 + tz.transition 1995, 3, :o5, 796179600 + tz.transition 1995, 9, :o6, 811904400 + tz.transition 1996, 3, :o5, 828234000 + tz.transition 1996, 10, :o6, 846378000 + tz.transition 1997, 3, :o5, 859683600 + tz.transition 1997, 10, :o6, 877827600 + tz.transition 1998, 3, :o5, 891133200 + tz.transition 1998, 10, :o6, 909277200 + tz.transition 1999, 3, :o5, 922582800 + tz.transition 1999, 10, :o6, 941331600 + tz.transition 2000, 3, :o5, 954032400 + tz.transition 2000, 10, :o6, 972781200 + tz.transition 2001, 3, :o5, 985482000 + tz.transition 2001, 10, :o6, 1004230800 + tz.transition 2002, 3, :o5, 1017536400 + tz.transition 2002, 10, :o6, 1035680400 + tz.transition 2003, 3, :o5, 1048986000 + tz.transition 2003, 10, :o6, 1067130000 + tz.transition 2004, 3, :o5, 1080435600 + tz.transition 2004, 10, :o6, 1099184400 + tz.transition 2005, 3, :o5, 1111885200 + tz.transition 2005, 10, :o6, 1130634000 + tz.transition 2006, 3, :o5, 1143334800 + tz.transition 2006, 10, :o6, 1162083600 + tz.transition 2007, 3, :o5, 1174784400 + tz.transition 2007, 10, :o6, 1193533200 + tz.transition 2008, 3, :o5, 1206838800 + tz.transition 2008, 10, :o6, 1224982800 + tz.transition 2009, 3, :o5, 1238288400 + tz.transition 2009, 10, :o6, 1256432400 + tz.transition 2010, 3, :o5, 1269738000 + tz.transition 2010, 10, :o6, 1288486800 + tz.transition 2011, 3, :o5, 1301187600 + tz.transition 2011, 10, :o6, 1319936400 + tz.transition 2012, 3, :o5, 1332637200 + tz.transition 2012, 10, :o6, 1351386000 + tz.transition 2013, 3, :o5, 1364691600 + tz.transition 2013, 10, :o6, 1382835600 + tz.transition 2014, 3, :o5, 1396141200 + tz.transition 2014, 10, :o6, 1414285200 + tz.transition 2015, 3, :o5, 1427590800 + tz.transition 2015, 10, :o6, 1445734800 + tz.transition 2016, 3, :o5, 1459040400 + tz.transition 2016, 10, :o6, 1477789200 + tz.transition 2017, 3, :o5, 1490490000 + tz.transition 2017, 10, :o6, 1509238800 + tz.transition 2018, 3, :o5, 1521939600 + tz.transition 2018, 10, :o6, 1540688400 + tz.transition 2019, 3, :o5, 1553994000 + tz.transition 2019, 10, :o6, 1572138000 + tz.transition 2020, 3, :o5, 1585443600 + tz.transition 2020, 10, :o6, 1603587600 + tz.transition 2021, 3, :o5, 1616893200 + tz.transition 2021, 10, :o6, 1635642000 + tz.transition 2022, 3, :o5, 1648342800 + tz.transition 2022, 10, :o6, 1667091600 + tz.transition 2023, 3, :o5, 1679792400 + tz.transition 2023, 10, :o6, 1698541200 + tz.transition 2024, 3, :o5, 1711846800 + tz.transition 2024, 10, :o6, 1729990800 + tz.transition 2025, 3, :o5, 1743296400 + tz.transition 2025, 10, :o6, 1761440400 + tz.transition 2026, 3, :o5, 1774746000 + tz.transition 2026, 10, :o6, 1792890000 + tz.transition 2027, 3, :o5, 1806195600 + tz.transition 2027, 10, :o6, 1824944400 + tz.transition 2028, 3, :o5, 1837645200 + tz.transition 2028, 10, :o6, 1856394000 + tz.transition 2029, 3, :o5, 1869094800 + tz.transition 2029, 10, :o6, 1887843600 + tz.transition 2030, 3, :o5, 1901149200 + tz.transition 2030, 10, :o6, 1919293200 + tz.transition 2031, 3, :o5, 1932598800 + tz.transition 2031, 10, :o6, 1950742800 + tz.transition 2032, 3, :o5, 1964048400 + tz.transition 2032, 10, :o6, 1982797200 + tz.transition 2033, 3, :o5, 1995498000 + tz.transition 2033, 10, :o6, 2014246800 + tz.transition 2034, 3, :o5, 2026947600 + tz.transition 2034, 10, :o6, 2045696400 + tz.transition 2035, 3, :o5, 2058397200 + tz.transition 2035, 10, :o6, 2077146000 + tz.transition 2036, 3, :o5, 2090451600 + tz.transition 2036, 10, :o6, 2108595600 + tz.transition 2037, 3, :o5, 2121901200 + tz.transition 2037, 10, :o6, 2140045200 + tz.transition 2038, 3, :o5, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o6, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o5, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o6, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o5, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o6, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o5, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o6, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o5, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o6, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o5, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o6, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o5, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o6, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o5, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o6, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o5, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o6, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o5, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o6, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o5, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o6, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o5, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o6, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o5, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o6, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o5, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o6, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o5, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o6, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o5, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o6, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o5, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o6, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o5, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o6, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o5, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o6, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o5, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o6, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o5, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o6, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o5, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o6, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o5, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o6, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o5, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o6, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o5, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o6, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o5, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o6, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o5, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o6, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o5, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o6, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o5, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o6, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o5, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o6, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o5, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o6, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o5, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o6, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o5, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o6, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb new file mode 100644 index 0000000..d51c5dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Andorra.rb @@ -0,0 +1,198 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Andorra + include TimezoneDefinition + + timezone 'Europe/Andorra' do |tz| + tz.offset :o0, 364, 0, :LMT + tz.offset :o1, 0, 0, :WET + tz.offset :o2, 3600, 0, :CET + tz.offset :o3, 3600, 3600, :CEST + + tz.transition 1900, 12, :o1, -2177453164, 52172326709, 21600 + tz.transition 1946, 9, :o2, -733881600, 4864187, 2 + tz.transition 1985, 3, :o3, 481078800 + tz.transition 1985, 9, :o2, 496803600 + tz.transition 1986, 3, :o3, 512528400 + tz.transition 1986, 9, :o2, 528253200 + tz.transition 1987, 3, :o3, 543978000 + tz.transition 1987, 9, :o2, 559702800 + tz.transition 1988, 3, :o3, 575427600 + tz.transition 1988, 9, :o2, 591152400 + tz.transition 1989, 3, :o3, 606877200 + tz.transition 1989, 9, :o2, 622602000 + tz.transition 1990, 3, :o3, 638326800 + tz.transition 1990, 9, :o2, 654656400 + tz.transition 1991, 3, :o3, 670381200 + tz.transition 1991, 9, :o2, 686106000 + tz.transition 1992, 3, :o3, 701830800 + tz.transition 1992, 9, :o2, 717555600 + tz.transition 1993, 3, :o3, 733280400 + tz.transition 1993, 9, :o2, 749005200 + tz.transition 1994, 3, :o3, 764730000 + tz.transition 1994, 9, :o2, 780454800 + tz.transition 1995, 3, :o3, 796179600 + tz.transition 1995, 9, :o2, 811904400 + tz.transition 1996, 3, :o3, 828234000 + tz.transition 1996, 10, :o2, 846378000 + tz.transition 1997, 3, :o3, 859683600 + tz.transition 1997, 10, :o2, 877827600 + tz.transition 1998, 3, :o3, 891133200 + tz.transition 1998, 10, :o2, 909277200 + tz.transition 1999, 3, :o3, 922582800 + tz.transition 1999, 10, :o2, 941331600 + tz.transition 2000, 3, :o3, 954032400 + tz.transition 2000, 10, :o2, 972781200 + tz.transition 2001, 3, :o3, 985482000 + tz.transition 2001, 10, :o2, 1004230800 + tz.transition 2002, 3, :o3, 1017536400 + tz.transition 2002, 10, :o2, 1035680400 + tz.transition 2003, 3, :o3, 1048986000 + tz.transition 2003, 10, :o2, 1067130000 + tz.transition 2004, 3, :o3, 1080435600 + tz.transition 2004, 10, :o2, 1099184400 + tz.transition 2005, 3, :o3, 1111885200 + tz.transition 2005, 10, :o2, 1130634000 + tz.transition 2006, 3, :o3, 1143334800 + tz.transition 2006, 10, :o2, 1162083600 + tz.transition 2007, 3, :o3, 1174784400 + tz.transition 2007, 10, :o2, 1193533200 + tz.transition 2008, 3, :o3, 1206838800 + tz.transition 2008, 10, :o2, 1224982800 + tz.transition 2009, 3, :o3, 1238288400 + tz.transition 2009, 10, :o2, 1256432400 + tz.transition 2010, 3, :o3, 1269738000 + tz.transition 2010, 10, :o2, 1288486800 + tz.transition 2011, 3, :o3, 1301187600 + tz.transition 2011, 10, :o2, 1319936400 + tz.transition 2012, 3, :o3, 1332637200 + tz.transition 2012, 10, :o2, 1351386000 + tz.transition 2013, 3, :o3, 1364691600 + tz.transition 2013, 10, :o2, 1382835600 + tz.transition 2014, 3, :o3, 1396141200 + tz.transition 2014, 10, :o2, 1414285200 + tz.transition 2015, 3, :o3, 1427590800 + tz.transition 2015, 10, :o2, 1445734800 + tz.transition 2016, 3, :o3, 1459040400 + tz.transition 2016, 10, :o2, 1477789200 + tz.transition 2017, 3, :o3, 1490490000 + tz.transition 2017, 10, :o2, 1509238800 + tz.transition 2018, 3, :o3, 1521939600 + tz.transition 2018, 10, :o2, 1540688400 + tz.transition 2019, 3, :o3, 1553994000 + tz.transition 2019, 10, :o2, 1572138000 + tz.transition 2020, 3, :o3, 1585443600 + tz.transition 2020, 10, :o2, 1603587600 + tz.transition 2021, 3, :o3, 1616893200 + tz.transition 2021, 10, :o2, 1635642000 + tz.transition 2022, 3, :o3, 1648342800 + tz.transition 2022, 10, :o2, 1667091600 + tz.transition 2023, 3, :o3, 1679792400 + tz.transition 2023, 10, :o2, 1698541200 + tz.transition 2024, 3, :o3, 1711846800 + tz.transition 2024, 10, :o2, 1729990800 + tz.transition 2025, 3, :o3, 1743296400 + tz.transition 2025, 10, :o2, 1761440400 + tz.transition 2026, 3, :o3, 1774746000 + tz.transition 2026, 10, :o2, 1792890000 + tz.transition 2027, 3, :o3, 1806195600 + tz.transition 2027, 10, :o2, 1824944400 + tz.transition 2028, 3, :o3, 1837645200 + tz.transition 2028, 10, :o2, 1856394000 + tz.transition 2029, 3, :o3, 1869094800 + tz.transition 2029, 10, :o2, 1887843600 + tz.transition 2030, 3, :o3, 1901149200 + tz.transition 2030, 10, :o2, 1919293200 + tz.transition 2031, 3, :o3, 1932598800 + tz.transition 2031, 10, :o2, 1950742800 + tz.transition 2032, 3, :o3, 1964048400 + tz.transition 2032, 10, :o2, 1982797200 + tz.transition 2033, 3, :o3, 1995498000 + tz.transition 2033, 10, :o2, 2014246800 + tz.transition 2034, 3, :o3, 2026947600 + tz.transition 2034, 10, :o2, 2045696400 + tz.transition 2035, 3, :o3, 2058397200 + tz.transition 2035, 10, :o2, 2077146000 + tz.transition 2036, 3, :o3, 2090451600 + tz.transition 2036, 10, :o2, 2108595600 + tz.transition 2037, 3, :o3, 2121901200 + tz.transition 2037, 10, :o2, 2140045200 + tz.transition 2038, 3, :o3, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o2, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o3, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o2, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o3, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o2, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o3, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o2, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o3, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o2, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o3, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o2, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o3, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o2, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o3, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o2, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o3, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o2, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o3, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o2, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o3, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o2, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o3, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o2, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o3, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o2, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o3, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o2, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o3, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o2, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o3, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o2, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o3, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o2, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o3, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o2, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o3, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o2, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o3, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o2, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o3, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o2, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o3, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o2, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o3, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o2, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o3, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o2, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o3, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o2, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o3, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o2, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o3, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o2, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o3, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o2, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o3, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o2, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o3, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o2, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o3, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o2, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o3, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o2, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o3, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o2, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb new file mode 100644 index 0000000..942e008 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/London.rb @@ -0,0 +1,333 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module London + include TimezoneDefinition + + timezone 'Europe/London' do |tz| + tz.offset :o0, -75, 0, :LMT + tz.offset :o1, 0, 0, :GMT + tz.offset :o2, 0, 3600, :BST + tz.offset :o3, 0, 7200, :BDST + tz.offset :o4, 3600, 0, :BST + + tz.transition 1847, 12, :o1, -3852662325, 2760187969, 1152 + tz.transition 1916, 5, :o2, -1691964000, 29052055, 12 + tz.transition 1916, 10, :o1, -1680472800, 29053651, 12 + tz.transition 1917, 4, :o2, -1664143200, 29055919, 12 + tz.transition 1917, 9, :o1, -1650146400, 29057863, 12 + tz.transition 1918, 3, :o2, -1633903200, 29060119, 12 + tz.transition 1918, 9, :o1, -1617487200, 29062399, 12 + tz.transition 1919, 3, :o2, -1601848800, 29064571, 12 + tz.transition 1919, 9, :o1, -1586037600, 29066767, 12 + tz.transition 1920, 3, :o2, -1570399200, 29068939, 12 + tz.transition 1920, 10, :o1, -1552168800, 29071471, 12 + tz.transition 1921, 4, :o2, -1538344800, 29073391, 12 + tz.transition 1921, 10, :o1, -1522533600, 29075587, 12 + tz.transition 1922, 3, :o2, -1507500000, 29077675, 12 + tz.transition 1922, 10, :o1, -1490565600, 29080027, 12 + tz.transition 1923, 4, :o2, -1473631200, 29082379, 12 + tz.transition 1923, 9, :o1, -1460930400, 29084143, 12 + tz.transition 1924, 4, :o2, -1442786400, 29086663, 12 + tz.transition 1924, 9, :o1, -1428876000, 29088595, 12 + tz.transition 1925, 4, :o2, -1410732000, 29091115, 12 + tz.transition 1925, 10, :o1, -1396216800, 29093131, 12 + tz.transition 1926, 4, :o2, -1379282400, 29095483, 12 + tz.transition 1926, 10, :o1, -1364767200, 29097499, 12 + tz.transition 1927, 4, :o2, -1348437600, 29099767, 12 + tz.transition 1927, 10, :o1, -1333317600, 29101867, 12 + tz.transition 1928, 4, :o2, -1315778400, 29104303, 12 + tz.transition 1928, 10, :o1, -1301263200, 29106319, 12 + tz.transition 1929, 4, :o2, -1284328800, 29108671, 12 + tz.transition 1929, 10, :o1, -1269813600, 29110687, 12 + tz.transition 1930, 4, :o2, -1253484000, 29112955, 12 + tz.transition 1930, 10, :o1, -1238364000, 29115055, 12 + tz.transition 1931, 4, :o2, -1221429600, 29117407, 12 + tz.transition 1931, 10, :o1, -1206914400, 29119423, 12 + tz.transition 1932, 4, :o2, -1189980000, 29121775, 12 + tz.transition 1932, 10, :o1, -1175464800, 29123791, 12 + tz.transition 1933, 4, :o2, -1159135200, 29126059, 12 + tz.transition 1933, 10, :o1, -1143410400, 29128243, 12 + tz.transition 1934, 4, :o2, -1126476000, 29130595, 12 + tz.transition 1934, 10, :o1, -1111960800, 29132611, 12 + tz.transition 1935, 4, :o2, -1095631200, 29134879, 12 + tz.transition 1935, 10, :o1, -1080511200, 29136979, 12 + tz.transition 1936, 4, :o2, -1063576800, 29139331, 12 + tz.transition 1936, 10, :o1, -1049061600, 29141347, 12 + tz.transition 1937, 4, :o2, -1032127200, 29143699, 12 + tz.transition 1937, 10, :o1, -1017612000, 29145715, 12 + tz.transition 1938, 4, :o2, -1001282400, 29147983, 12 + tz.transition 1938, 10, :o1, -986162400, 29150083, 12 + tz.transition 1939, 4, :o2, -969228000, 29152435, 12 + tz.transition 1939, 11, :o1, -950479200, 29155039, 12 + tz.transition 1940, 2, :o2, -942012000, 29156215, 12 + tz.transition 1941, 5, :o3, -904518000, 58322845, 24 + tz.transition 1941, 8, :o2, -896050800, 58325197, 24 + tz.transition 1942, 4, :o3, -875487600, 58330909, 24 + tz.transition 1942, 8, :o2, -864601200, 58333933, 24 + tz.transition 1943, 4, :o3, -844038000, 58339645, 24 + tz.transition 1943, 8, :o2, -832546800, 58342837, 24 + tz.transition 1944, 4, :o3, -812588400, 58348381, 24 + tz.transition 1944, 9, :o2, -798073200, 58352413, 24 + tz.transition 1945, 4, :o3, -781052400, 58357141, 24 + tz.transition 1945, 7, :o2, -772066800, 58359637, 24 + tz.transition 1945, 10, :o1, -764805600, 29180827, 12 + tz.transition 1946, 4, :o2, -748476000, 29183095, 12 + tz.transition 1946, 10, :o1, -733356000, 29185195, 12 + tz.transition 1947, 3, :o2, -719445600, 29187127, 12 + tz.transition 1947, 4, :o3, -717030000, 58374925, 24 + tz.transition 1947, 8, :o2, -706748400, 58377781, 24 + tz.transition 1947, 11, :o1, -699487200, 29189899, 12 + tz.transition 1948, 3, :o2, -687996000, 29191495, 12 + tz.transition 1948, 10, :o1, -668037600, 29194267, 12 + tz.transition 1949, 4, :o2, -654732000, 29196115, 12 + tz.transition 1949, 10, :o1, -636588000, 29198635, 12 + tz.transition 1950, 4, :o2, -622072800, 29200651, 12 + tz.transition 1950, 10, :o1, -605743200, 29202919, 12 + tz.transition 1951, 4, :o2, -590623200, 29205019, 12 + tz.transition 1951, 10, :o1, -574293600, 29207287, 12 + tz.transition 1952, 4, :o2, -558568800, 29209471, 12 + tz.transition 1952, 10, :o1, -542239200, 29211739, 12 + tz.transition 1953, 4, :o2, -527119200, 29213839, 12 + tz.transition 1953, 10, :o1, -512604000, 29215855, 12 + tz.transition 1954, 4, :o2, -496274400, 29218123, 12 + tz.transition 1954, 10, :o1, -481154400, 29220223, 12 + tz.transition 1955, 4, :o2, -464220000, 29222575, 12 + tz.transition 1955, 10, :o1, -449704800, 29224591, 12 + tz.transition 1956, 4, :o2, -432165600, 29227027, 12 + tz.transition 1956, 10, :o1, -417650400, 29229043, 12 + tz.transition 1957, 4, :o2, -401320800, 29231311, 12 + tz.transition 1957, 10, :o1, -386200800, 29233411, 12 + tz.transition 1958, 4, :o2, -369266400, 29235763, 12 + tz.transition 1958, 10, :o1, -354751200, 29237779, 12 + tz.transition 1959, 4, :o2, -337816800, 29240131, 12 + tz.transition 1959, 10, :o1, -323301600, 29242147, 12 + tz.transition 1960, 4, :o2, -306972000, 29244415, 12 + tz.transition 1960, 10, :o1, -291852000, 29246515, 12 + tz.transition 1961, 3, :o2, -276732000, 29248615, 12 + tz.transition 1961, 10, :o1, -257983200, 29251219, 12 + tz.transition 1962, 3, :o2, -245282400, 29252983, 12 + tz.transition 1962, 10, :o1, -226533600, 29255587, 12 + tz.transition 1963, 3, :o2, -213228000, 29257435, 12 + tz.transition 1963, 10, :o1, -195084000, 29259955, 12 + tz.transition 1964, 3, :o2, -182383200, 29261719, 12 + tz.transition 1964, 10, :o1, -163634400, 29264323, 12 + tz.transition 1965, 3, :o2, -150933600, 29266087, 12 + tz.transition 1965, 10, :o1, -132184800, 29268691, 12 + tz.transition 1966, 3, :o2, -119484000, 29270455, 12 + tz.transition 1966, 10, :o1, -100735200, 29273059, 12 + tz.transition 1967, 3, :o2, -88034400, 29274823, 12 + tz.transition 1967, 10, :o1, -68680800, 29277511, 12 + tz.transition 1968, 2, :o2, -59004000, 29278855, 12 + tz.transition 1968, 10, :o4, -37242000, 58563755, 24 + tz.transition 1971, 10, :o1, 57722400 + tz.transition 1972, 3, :o2, 69818400 + tz.transition 1972, 10, :o1, 89172000 + tz.transition 1973, 3, :o2, 101268000 + tz.transition 1973, 10, :o1, 120621600 + tz.transition 1974, 3, :o2, 132717600 + tz.transition 1974, 10, :o1, 152071200 + tz.transition 1975, 3, :o2, 164167200 + tz.transition 1975, 10, :o1, 183520800 + tz.transition 1976, 3, :o2, 196221600 + tz.transition 1976, 10, :o1, 214970400 + tz.transition 1977, 3, :o2, 227671200 + tz.transition 1977, 10, :o1, 246420000 + tz.transition 1978, 3, :o2, 259120800 + tz.transition 1978, 10, :o1, 278474400 + tz.transition 1979, 3, :o2, 290570400 + tz.transition 1979, 10, :o1, 309924000 + tz.transition 1980, 3, :o2, 322020000 + tz.transition 1980, 10, :o1, 341373600 + tz.transition 1981, 3, :o2, 354675600 + tz.transition 1981, 10, :o1, 372819600 + tz.transition 1982, 3, :o2, 386125200 + tz.transition 1982, 10, :o1, 404269200 + tz.transition 1983, 3, :o2, 417574800 + tz.transition 1983, 10, :o1, 435718800 + tz.transition 1984, 3, :o2, 449024400 + tz.transition 1984, 10, :o1, 467773200 + tz.transition 1985, 3, :o2, 481078800 + tz.transition 1985, 10, :o1, 499222800 + tz.transition 1986, 3, :o2, 512528400 + tz.transition 1986, 10, :o1, 530672400 + tz.transition 1987, 3, :o2, 543978000 + tz.transition 1987, 10, :o1, 562122000 + tz.transition 1988, 3, :o2, 575427600 + tz.transition 1988, 10, :o1, 593571600 + tz.transition 1989, 3, :o2, 606877200 + tz.transition 1989, 10, :o1, 625626000 + tz.transition 1990, 3, :o2, 638326800 + tz.transition 1990, 10, :o1, 657075600 + tz.transition 1991, 3, :o2, 670381200 + tz.transition 1991, 10, :o1, 688525200 + tz.transition 1992, 3, :o2, 701830800 + tz.transition 1992, 10, :o1, 719974800 + tz.transition 1993, 3, :o2, 733280400 + tz.transition 1993, 10, :o1, 751424400 + tz.transition 1994, 3, :o2, 764730000 + tz.transition 1994, 10, :o1, 782874000 + tz.transition 1995, 3, :o2, 796179600 + tz.transition 1995, 10, :o1, 814323600 + tz.transition 1996, 3, :o2, 828234000 + tz.transition 1996, 10, :o1, 846378000 + tz.transition 1997, 3, :o2, 859683600 + tz.transition 1997, 10, :o1, 877827600 + tz.transition 1998, 3, :o2, 891133200 + tz.transition 1998, 10, :o1, 909277200 + tz.transition 1999, 3, :o2, 922582800 + tz.transition 1999, 10, :o1, 941331600 + tz.transition 2000, 3, :o2, 954032400 + tz.transition 2000, 10, :o1, 972781200 + tz.transition 2001, 3, :o2, 985482000 + tz.transition 2001, 10, :o1, 1004230800 + tz.transition 2002, 3, :o2, 1017536400 + tz.transition 2002, 10, :o1, 1035680400 + tz.transition 2003, 3, :o2, 1048986000 + tz.transition 2003, 10, :o1, 1067130000 + tz.transition 2004, 3, :o2, 1080435600 + tz.transition 2004, 10, :o1, 1099184400 + tz.transition 2005, 3, :o2, 1111885200 + tz.transition 2005, 10, :o1, 1130634000 + tz.transition 2006, 3, :o2, 1143334800 + tz.transition 2006, 10, :o1, 1162083600 + tz.transition 2007, 3, :o2, 1174784400 + tz.transition 2007, 10, :o1, 1193533200 + tz.transition 2008, 3, :o2, 1206838800 + tz.transition 2008, 10, :o1, 1224982800 + tz.transition 2009, 3, :o2, 1238288400 + tz.transition 2009, 10, :o1, 1256432400 + tz.transition 2010, 3, :o2, 1269738000 + tz.transition 2010, 10, :o1, 1288486800 + tz.transition 2011, 3, :o2, 1301187600 + tz.transition 2011, 10, :o1, 1319936400 + tz.transition 2012, 3, :o2, 1332637200 + tz.transition 2012, 10, :o1, 1351386000 + tz.transition 2013, 3, :o2, 1364691600 + tz.transition 2013, 10, :o1, 1382835600 + tz.transition 2014, 3, :o2, 1396141200 + tz.transition 2014, 10, :o1, 1414285200 + tz.transition 2015, 3, :o2, 1427590800 + tz.transition 2015, 10, :o1, 1445734800 + tz.transition 2016, 3, :o2, 1459040400 + tz.transition 2016, 10, :o1, 1477789200 + tz.transition 2017, 3, :o2, 1490490000 + tz.transition 2017, 10, :o1, 1509238800 + tz.transition 2018, 3, :o2, 1521939600 + tz.transition 2018, 10, :o1, 1540688400 + tz.transition 2019, 3, :o2, 1553994000 + tz.transition 2019, 10, :o1, 1572138000 + tz.transition 2020, 3, :o2, 1585443600 + tz.transition 2020, 10, :o1, 1603587600 + tz.transition 2021, 3, :o2, 1616893200 + tz.transition 2021, 10, :o1, 1635642000 + tz.transition 2022, 3, :o2, 1648342800 + tz.transition 2022, 10, :o1, 1667091600 + tz.transition 2023, 3, :o2, 1679792400 + tz.transition 2023, 10, :o1, 1698541200 + tz.transition 2024, 3, :o2, 1711846800 + tz.transition 2024, 10, :o1, 1729990800 + tz.transition 2025, 3, :o2, 1743296400 + tz.transition 2025, 10, :o1, 1761440400 + tz.transition 2026, 3, :o2, 1774746000 + tz.transition 2026, 10, :o1, 1792890000 + tz.transition 2027, 3, :o2, 1806195600 + tz.transition 2027, 10, :o1, 1824944400 + tz.transition 2028, 3, :o2, 1837645200 + tz.transition 2028, 10, :o1, 1856394000 + tz.transition 2029, 3, :o2, 1869094800 + tz.transition 2029, 10, :o1, 1887843600 + tz.transition 2030, 3, :o2, 1901149200 + tz.transition 2030, 10, :o1, 1919293200 + tz.transition 2031, 3, :o2, 1932598800 + tz.transition 2031, 10, :o1, 1950742800 + tz.transition 2032, 3, :o2, 1964048400 + tz.transition 2032, 10, :o1, 1982797200 + tz.transition 2033, 3, :o2, 1995498000 + tz.transition 2033, 10, :o1, 2014246800 + tz.transition 2034, 3, :o2, 2026947600 + tz.transition 2034, 10, :o1, 2045696400 + tz.transition 2035, 3, :o2, 2058397200 + tz.transition 2035, 10, :o1, 2077146000 + tz.transition 2036, 3, :o2, 2090451600 + tz.transition 2036, 10, :o1, 2108595600 + tz.transition 2037, 3, :o2, 2121901200 + tz.transition 2037, 10, :o1, 2140045200 + tz.transition 2038, 3, :o2, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o1, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o2, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o1, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o2, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o1, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o2, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o1, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o2, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o1, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o2, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o1, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o2, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o1, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o2, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o1, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o2, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o1, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o2, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o1, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o2, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o1, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o2, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o1, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o2, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o1, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o2, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o1, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o2, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o1, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o2, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o1, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o2, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o1, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o2, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o1, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o2, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o1, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o2, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o1, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o2, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o1, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o2, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o1, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o2, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o1, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o2, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o1, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o2, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o1, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o2, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o1, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o2, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o1, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o2, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o1, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o2, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o1, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o2, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o1, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o2, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o1, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o2, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o1, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o2, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o1, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb new file mode 100644 index 0000000..e16ba37 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Paris.rb @@ -0,0 +1,277 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Paris + include TimezoneDefinition + + timezone 'Europe/Paris' do |tz| + tz.offset :o0, 561, 0, :LMT + tz.offset :o1, 561, 0, :PMT + tz.offset :o2, 0, 0, :WET + tz.offset :o3, 0, 3600, :WEST + tz.offset :o4, 3600, 3600, :CEST + tz.offset :o5, 3600, 0, :CET + tz.offset :o6, 0, 7200, :WEMT + + tz.transition 1891, 3, :o1, -2486592561, 69460055813, 28800 + tz.transition 1911, 3, :o2, -1855958961, 69670267013, 28800 + tz.transition 1916, 6, :o3, -1689814800, 58104707, 24 + tz.transition 1916, 10, :o2, -1680397200, 58107323, 24 + tz.transition 1917, 3, :o3, -1665363600, 58111499, 24 + tz.transition 1917, 10, :o2, -1648342800, 58116227, 24 + tz.transition 1918, 3, :o3, -1635123600, 58119899, 24 + tz.transition 1918, 10, :o2, -1616893200, 58124963, 24 + tz.transition 1919, 3, :o3, -1604278800, 58128467, 24 + tz.transition 1919, 10, :o2, -1585443600, 58133699, 24 + tz.transition 1920, 2, :o3, -1574038800, 58136867, 24 + tz.transition 1920, 10, :o2, -1552266000, 58142915, 24 + tz.transition 1921, 3, :o3, -1539997200, 58146323, 24 + tz.transition 1921, 10, :o2, -1520557200, 58151723, 24 + tz.transition 1922, 3, :o3, -1507510800, 58155347, 24 + tz.transition 1922, 10, :o2, -1490576400, 58160051, 24 + tz.transition 1923, 5, :o3, -1470618000, 58165595, 24 + tz.transition 1923, 10, :o2, -1459126800, 58168787, 24 + tz.transition 1924, 3, :o3, -1444006800, 58172987, 24 + tz.transition 1924, 10, :o2, -1427677200, 58177523, 24 + tz.transition 1925, 4, :o3, -1411952400, 58181891, 24 + tz.transition 1925, 10, :o2, -1396227600, 58186259, 24 + tz.transition 1926, 4, :o3, -1379293200, 58190963, 24 + tz.transition 1926, 10, :o2, -1364778000, 58194995, 24 + tz.transition 1927, 4, :o3, -1348448400, 58199531, 24 + tz.transition 1927, 10, :o2, -1333328400, 58203731, 24 + tz.transition 1928, 4, :o3, -1316394000, 58208435, 24 + tz.transition 1928, 10, :o2, -1301274000, 58212635, 24 + tz.transition 1929, 4, :o3, -1284339600, 58217339, 24 + tz.transition 1929, 10, :o2, -1269824400, 58221371, 24 + tz.transition 1930, 4, :o3, -1253494800, 58225907, 24 + tz.transition 1930, 10, :o2, -1238374800, 58230107, 24 + tz.transition 1931, 4, :o3, -1221440400, 58234811, 24 + tz.transition 1931, 10, :o2, -1206925200, 58238843, 24 + tz.transition 1932, 4, :o3, -1191200400, 58243211, 24 + tz.transition 1932, 10, :o2, -1175475600, 58247579, 24 + tz.transition 1933, 3, :o3, -1160355600, 58251779, 24 + tz.transition 1933, 10, :o2, -1143421200, 58256483, 24 + tz.transition 1934, 4, :o3, -1127696400, 58260851, 24 + tz.transition 1934, 10, :o2, -1111971600, 58265219, 24 + tz.transition 1935, 3, :o3, -1096851600, 58269419, 24 + tz.transition 1935, 10, :o2, -1080522000, 58273955, 24 + tz.transition 1936, 4, :o3, -1063587600, 58278659, 24 + tz.transition 1936, 10, :o2, -1049072400, 58282691, 24 + tz.transition 1937, 4, :o3, -1033347600, 58287059, 24 + tz.transition 1937, 10, :o2, -1017622800, 58291427, 24 + tz.transition 1938, 3, :o3, -1002502800, 58295627, 24 + tz.transition 1938, 10, :o2, -986173200, 58300163, 24 + tz.transition 1939, 4, :o3, -969238800, 58304867, 24 + tz.transition 1939, 11, :o2, -950490000, 58310075, 24 + tz.transition 1940, 2, :o3, -942012000, 29156215, 12 + tz.transition 1940, 6, :o4, -932436000, 29157545, 12 + tz.transition 1942, 11, :o5, -857257200, 58335973, 24 + tz.transition 1943, 3, :o4, -844556400, 58339501, 24 + tz.transition 1943, 10, :o5, -828226800, 58344037, 24 + tz.transition 1944, 4, :o4, -812502000, 58348405, 24 + tz.transition 1944, 8, :o6, -800071200, 29175929, 12 + tz.transition 1944, 10, :o3, -796266000, 58352915, 24 + tz.transition 1945, 4, :o6, -781052400, 58357141, 24 + tz.transition 1945, 9, :o5, -766623600, 58361149, 24 + tz.transition 1976, 3, :o4, 196819200 + tz.transition 1976, 9, :o5, 212540400 + tz.transition 1977, 4, :o4, 228877200 + tz.transition 1977, 9, :o5, 243997200 + tz.transition 1978, 4, :o4, 260326800 + tz.transition 1978, 10, :o5, 276051600 + tz.transition 1979, 4, :o4, 291776400 + tz.transition 1979, 9, :o5, 307501200 + tz.transition 1980, 4, :o4, 323830800 + tz.transition 1980, 9, :o5, 338950800 + tz.transition 1981, 3, :o4, 354675600 + tz.transition 1981, 9, :o5, 370400400 + tz.transition 1982, 3, :o4, 386125200 + tz.transition 1982, 9, :o5, 401850000 + tz.transition 1983, 3, :o4, 417574800 + tz.transition 1983, 9, :o5, 433299600 + tz.transition 1984, 3, :o4, 449024400 + tz.transition 1984, 9, :o5, 465354000 + tz.transition 1985, 3, :o4, 481078800 + tz.transition 1985, 9, :o5, 496803600 + tz.transition 1986, 3, :o4, 512528400 + tz.transition 1986, 9, :o5, 528253200 + tz.transition 1987, 3, :o4, 543978000 + tz.transition 1987, 9, :o5, 559702800 + tz.transition 1988, 3, :o4, 575427600 + tz.transition 1988, 9, :o5, 591152400 + tz.transition 1989, 3, :o4, 606877200 + tz.transition 1989, 9, :o5, 622602000 + tz.transition 1990, 3, :o4, 638326800 + tz.transition 1990, 9, :o5, 654656400 + tz.transition 1991, 3, :o4, 670381200 + tz.transition 1991, 9, :o5, 686106000 + tz.transition 1992, 3, :o4, 701830800 + tz.transition 1992, 9, :o5, 717555600 + tz.transition 1993, 3, :o4, 733280400 + tz.transition 1993, 9, :o5, 749005200 + tz.transition 1994, 3, :o4, 764730000 + tz.transition 1994, 9, :o5, 780454800 + tz.transition 1995, 3, :o4, 796179600 + tz.transition 1995, 9, :o5, 811904400 + tz.transition 1996, 3, :o4, 828234000 + tz.transition 1996, 10, :o5, 846378000 + tz.transition 1997, 3, :o4, 859683600 + tz.transition 1997, 10, :o5, 877827600 + tz.transition 1998, 3, :o4, 891133200 + tz.transition 1998, 10, :o5, 909277200 + tz.transition 1999, 3, :o4, 922582800 + tz.transition 1999, 10, :o5, 941331600 + tz.transition 2000, 3, :o4, 954032400 + tz.transition 2000, 10, :o5, 972781200 + tz.transition 2001, 3, :o4, 985482000 + tz.transition 2001, 10, :o5, 1004230800 + tz.transition 2002, 3, :o4, 1017536400 + tz.transition 2002, 10, :o5, 1035680400 + tz.transition 2003, 3, :o4, 1048986000 + tz.transition 2003, 10, :o5, 1067130000 + tz.transition 2004, 3, :o4, 1080435600 + tz.transition 2004, 10, :o5, 1099184400 + tz.transition 2005, 3, :o4, 1111885200 + tz.transition 2005, 10, :o5, 1130634000 + tz.transition 2006, 3, :o4, 1143334800 + tz.transition 2006, 10, :o5, 1162083600 + tz.transition 2007, 3, :o4, 1174784400 + tz.transition 2007, 10, :o5, 1193533200 + tz.transition 2008, 3, :o4, 1206838800 + tz.transition 2008, 10, :o5, 1224982800 + tz.transition 2009, 3, :o4, 1238288400 + tz.transition 2009, 10, :o5, 1256432400 + tz.transition 2010, 3, :o4, 1269738000 + tz.transition 2010, 10, :o5, 1288486800 + tz.transition 2011, 3, :o4, 1301187600 + tz.transition 2011, 10, :o5, 1319936400 + tz.transition 2012, 3, :o4, 1332637200 + tz.transition 2012, 10, :o5, 1351386000 + tz.transition 2013, 3, :o4, 1364691600 + tz.transition 2013, 10, :o5, 1382835600 + tz.transition 2014, 3, :o4, 1396141200 + tz.transition 2014, 10, :o5, 1414285200 + tz.transition 2015, 3, :o4, 1427590800 + tz.transition 2015, 10, :o5, 1445734800 + tz.transition 2016, 3, :o4, 1459040400 + tz.transition 2016, 10, :o5, 1477789200 + tz.transition 2017, 3, :o4, 1490490000 + tz.transition 2017, 10, :o5, 1509238800 + tz.transition 2018, 3, :o4, 1521939600 + tz.transition 2018, 10, :o5, 1540688400 + tz.transition 2019, 3, :o4, 1553994000 + tz.transition 2019, 10, :o5, 1572138000 + tz.transition 2020, 3, :o4, 1585443600 + tz.transition 2020, 10, :o5, 1603587600 + tz.transition 2021, 3, :o4, 1616893200 + tz.transition 2021, 10, :o5, 1635642000 + tz.transition 2022, 3, :o4, 1648342800 + tz.transition 2022, 10, :o5, 1667091600 + tz.transition 2023, 3, :o4, 1679792400 + tz.transition 2023, 10, :o5, 1698541200 + tz.transition 2024, 3, :o4, 1711846800 + tz.transition 2024, 10, :o5, 1729990800 + tz.transition 2025, 3, :o4, 1743296400 + tz.transition 2025, 10, :o5, 1761440400 + tz.transition 2026, 3, :o4, 1774746000 + tz.transition 2026, 10, :o5, 1792890000 + tz.transition 2027, 3, :o4, 1806195600 + tz.transition 2027, 10, :o5, 1824944400 + tz.transition 2028, 3, :o4, 1837645200 + tz.transition 2028, 10, :o5, 1856394000 + tz.transition 2029, 3, :o4, 1869094800 + tz.transition 2029, 10, :o5, 1887843600 + tz.transition 2030, 3, :o4, 1901149200 + tz.transition 2030, 10, :o5, 1919293200 + tz.transition 2031, 3, :o4, 1932598800 + tz.transition 2031, 10, :o5, 1950742800 + tz.transition 2032, 3, :o4, 1964048400 + tz.transition 2032, 10, :o5, 1982797200 + tz.transition 2033, 3, :o4, 1995498000 + tz.transition 2033, 10, :o5, 2014246800 + tz.transition 2034, 3, :o4, 2026947600 + tz.transition 2034, 10, :o5, 2045696400 + tz.transition 2035, 3, :o4, 2058397200 + tz.transition 2035, 10, :o5, 2077146000 + tz.transition 2036, 3, :o4, 2090451600 + tz.transition 2036, 10, :o5, 2108595600 + tz.transition 2037, 3, :o4, 2121901200 + tz.transition 2037, 10, :o5, 2140045200 + tz.transition 2038, 3, :o4, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o5, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o4, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o5, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o4, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o5, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o4, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o5, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o4, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o5, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o4, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o5, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o4, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o5, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o4, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o5, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o4, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o5, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o4, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o5, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o4, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o5, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o4, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o5, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o4, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o5, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o4, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o5, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o4, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o5, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o4, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o5, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o4, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o5, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o4, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o5, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o4, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o5, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o4, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o5, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o4, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o5, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o4, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o5, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o4, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o5, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o4, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o5, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o4, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o5, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o4, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o5, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o4, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o5, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o4, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o5, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o4, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o5, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o4, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o5, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o4, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o5, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o4, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o5, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o4, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o5, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb new file mode 100644 index 0000000..9db4208 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/Europe/Prague.rb @@ -0,0 +1,235 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module Europe + module Prague + include TimezoneDefinition + + timezone 'Europe/Prague' do |tz| + tz.offset :o0, 3464, 0, :LMT + tz.offset :o1, 3464, 0, :PMT + tz.offset :o2, 3600, 0, :CET + tz.offset :o3, 3600, 3600, :CEST + tz.offset :o4, 3600, -3600, :GMT + + tz.transition 1849, 12, :o1, -3786829064, 25884991367, 10800 + tz.transition 1891, 9, :o2, -2469401864, 26049669767, 10800 + tz.transition 1916, 4, :o3, -1693706400, 29051813, 12 + tz.transition 1916, 9, :o2, -1680483600, 58107299, 24 + tz.transition 1917, 4, :o3, -1663455600, 58112029, 24 + tz.transition 1917, 9, :o2, -1650150000, 58115725, 24 + tz.transition 1918, 4, :o3, -1632006000, 58120765, 24 + tz.transition 1918, 9, :o2, -1618700400, 58124461, 24 + tz.transition 1940, 4, :o3, -938905200, 58313293, 24 + tz.transition 1942, 11, :o2, -857257200, 58335973, 24 + tz.transition 1943, 3, :o3, -844556400, 58339501, 24 + tz.transition 1943, 10, :o2, -828226800, 58344037, 24 + tz.transition 1944, 4, :o3, -812502000, 58348405, 24 + tz.transition 1944, 10, :o2, -796777200, 58352773, 24 + tz.transition 1945, 4, :o3, -781052400, 58357141, 24 + tz.transition 1945, 10, :o2, -765327600, 58361509, 24 + tz.transition 1946, 5, :o3, -746578800, 58366717, 24 + tz.transition 1946, 10, :o2, -733359600, 58370389, 24 + tz.transition 1946, 12, :o4, -728517600, 29185867, 12 + tz.transition 1947, 2, :o2, -721260000, 29186875, 12 + tz.transition 1947, 4, :o3, -716425200, 58375093, 24 + tz.transition 1947, 10, :o2, -701910000, 58379125, 24 + tz.transition 1948, 4, :o3, -684975600, 58383829, 24 + tz.transition 1948, 10, :o2, -670460400, 58387861, 24 + tz.transition 1949, 4, :o3, -654217200, 58392373, 24 + tz.transition 1949, 10, :o2, -639010800, 58396597, 24 + tz.transition 1979, 4, :o3, 291776400 + tz.transition 1979, 9, :o2, 307501200 + tz.transition 1980, 4, :o3, 323830800 + tz.transition 1980, 9, :o2, 338950800 + tz.transition 1981, 3, :o3, 354675600 + tz.transition 1981, 9, :o2, 370400400 + tz.transition 1982, 3, :o3, 386125200 + tz.transition 1982, 9, :o2, 401850000 + tz.transition 1983, 3, :o3, 417574800 + tz.transition 1983, 9, :o2, 433299600 + tz.transition 1984, 3, :o3, 449024400 + tz.transition 1984, 9, :o2, 465354000 + tz.transition 1985, 3, :o3, 481078800 + tz.transition 1985, 9, :o2, 496803600 + tz.transition 1986, 3, :o3, 512528400 + tz.transition 1986, 9, :o2, 528253200 + tz.transition 1987, 3, :o3, 543978000 + tz.transition 1987, 9, :o2, 559702800 + tz.transition 1988, 3, :o3, 575427600 + tz.transition 1988, 9, :o2, 591152400 + tz.transition 1989, 3, :o3, 606877200 + tz.transition 1989, 9, :o2, 622602000 + tz.transition 1990, 3, :o3, 638326800 + tz.transition 1990, 9, :o2, 654656400 + tz.transition 1991, 3, :o3, 670381200 + tz.transition 1991, 9, :o2, 686106000 + tz.transition 1992, 3, :o3, 701830800 + tz.transition 1992, 9, :o2, 717555600 + tz.transition 1993, 3, :o3, 733280400 + tz.transition 1993, 9, :o2, 749005200 + tz.transition 1994, 3, :o3, 764730000 + tz.transition 1994, 9, :o2, 780454800 + tz.transition 1995, 3, :o3, 796179600 + tz.transition 1995, 9, :o2, 811904400 + tz.transition 1996, 3, :o3, 828234000 + tz.transition 1996, 10, :o2, 846378000 + tz.transition 1997, 3, :o3, 859683600 + tz.transition 1997, 10, :o2, 877827600 + tz.transition 1998, 3, :o3, 891133200 + tz.transition 1998, 10, :o2, 909277200 + tz.transition 1999, 3, :o3, 922582800 + tz.transition 1999, 10, :o2, 941331600 + tz.transition 2000, 3, :o3, 954032400 + tz.transition 2000, 10, :o2, 972781200 + tz.transition 2001, 3, :o3, 985482000 + tz.transition 2001, 10, :o2, 1004230800 + tz.transition 2002, 3, :o3, 1017536400 + tz.transition 2002, 10, :o2, 1035680400 + tz.transition 2003, 3, :o3, 1048986000 + tz.transition 2003, 10, :o2, 1067130000 + tz.transition 2004, 3, :o3, 1080435600 + tz.transition 2004, 10, :o2, 1099184400 + tz.transition 2005, 3, :o3, 1111885200 + tz.transition 2005, 10, :o2, 1130634000 + tz.transition 2006, 3, :o3, 1143334800 + tz.transition 2006, 10, :o2, 1162083600 + tz.transition 2007, 3, :o3, 1174784400 + tz.transition 2007, 10, :o2, 1193533200 + tz.transition 2008, 3, :o3, 1206838800 + tz.transition 2008, 10, :o2, 1224982800 + tz.transition 2009, 3, :o3, 1238288400 + tz.transition 2009, 10, :o2, 1256432400 + tz.transition 2010, 3, :o3, 1269738000 + tz.transition 2010, 10, :o2, 1288486800 + tz.transition 2011, 3, :o3, 1301187600 + tz.transition 2011, 10, :o2, 1319936400 + tz.transition 2012, 3, :o3, 1332637200 + tz.transition 2012, 10, :o2, 1351386000 + tz.transition 2013, 3, :o3, 1364691600 + tz.transition 2013, 10, :o2, 1382835600 + tz.transition 2014, 3, :o3, 1396141200 + tz.transition 2014, 10, :o2, 1414285200 + tz.transition 2015, 3, :o3, 1427590800 + tz.transition 2015, 10, :o2, 1445734800 + tz.transition 2016, 3, :o3, 1459040400 + tz.transition 2016, 10, :o2, 1477789200 + tz.transition 2017, 3, :o3, 1490490000 + tz.transition 2017, 10, :o2, 1509238800 + tz.transition 2018, 3, :o3, 1521939600 + tz.transition 2018, 10, :o2, 1540688400 + tz.transition 2019, 3, :o3, 1553994000 + tz.transition 2019, 10, :o2, 1572138000 + tz.transition 2020, 3, :o3, 1585443600 + tz.transition 2020, 10, :o2, 1603587600 + tz.transition 2021, 3, :o3, 1616893200 + tz.transition 2021, 10, :o2, 1635642000 + tz.transition 2022, 3, :o3, 1648342800 + tz.transition 2022, 10, :o2, 1667091600 + tz.transition 2023, 3, :o3, 1679792400 + tz.transition 2023, 10, :o2, 1698541200 + tz.transition 2024, 3, :o3, 1711846800 + tz.transition 2024, 10, :o2, 1729990800 + tz.transition 2025, 3, :o3, 1743296400 + tz.transition 2025, 10, :o2, 1761440400 + tz.transition 2026, 3, :o3, 1774746000 + tz.transition 2026, 10, :o2, 1792890000 + tz.transition 2027, 3, :o3, 1806195600 + tz.transition 2027, 10, :o2, 1824944400 + tz.transition 2028, 3, :o3, 1837645200 + tz.transition 2028, 10, :o2, 1856394000 + tz.transition 2029, 3, :o3, 1869094800 + tz.transition 2029, 10, :o2, 1887843600 + tz.transition 2030, 3, :o3, 1901149200 + tz.transition 2030, 10, :o2, 1919293200 + tz.transition 2031, 3, :o3, 1932598800 + tz.transition 2031, 10, :o2, 1950742800 + tz.transition 2032, 3, :o3, 1964048400 + tz.transition 2032, 10, :o2, 1982797200 + tz.transition 2033, 3, :o3, 1995498000 + tz.transition 2033, 10, :o2, 2014246800 + tz.transition 2034, 3, :o3, 2026947600 + tz.transition 2034, 10, :o2, 2045696400 + tz.transition 2035, 3, :o3, 2058397200 + tz.transition 2035, 10, :o2, 2077146000 + tz.transition 2036, 3, :o3, 2090451600 + tz.transition 2036, 10, :o2, 2108595600 + tz.transition 2037, 3, :o3, 2121901200 + tz.transition 2037, 10, :o2, 2140045200 + tz.transition 2038, 3, :o3, 2153350800, 59172253, 24 + tz.transition 2038, 10, :o2, 2172099600, 59177461, 24 + tz.transition 2039, 3, :o3, 2184800400, 59180989, 24 + tz.transition 2039, 10, :o2, 2203549200, 59186197, 24 + tz.transition 2040, 3, :o3, 2216250000, 59189725, 24 + tz.transition 2040, 10, :o2, 2234998800, 59194933, 24 + tz.transition 2041, 3, :o3, 2248304400, 59198629, 24 + tz.transition 2041, 10, :o2, 2266448400, 59203669, 24 + tz.transition 2042, 3, :o3, 2279754000, 59207365, 24 + tz.transition 2042, 10, :o2, 2297898000, 59212405, 24 + tz.transition 2043, 3, :o3, 2311203600, 59216101, 24 + tz.transition 2043, 10, :o2, 2329347600, 59221141, 24 + tz.transition 2044, 3, :o3, 2342653200, 59224837, 24 + tz.transition 2044, 10, :o2, 2361402000, 59230045, 24 + tz.transition 2045, 3, :o3, 2374102800, 59233573, 24 + tz.transition 2045, 10, :o2, 2392851600, 59238781, 24 + tz.transition 2046, 3, :o3, 2405552400, 59242309, 24 + tz.transition 2046, 10, :o2, 2424301200, 59247517, 24 + tz.transition 2047, 3, :o3, 2437606800, 59251213, 24 + tz.transition 2047, 10, :o2, 2455750800, 59256253, 24 + tz.transition 2048, 3, :o3, 2469056400, 59259949, 24 + tz.transition 2048, 10, :o2, 2487200400, 59264989, 24 + tz.transition 2049, 3, :o3, 2500506000, 59268685, 24 + tz.transition 2049, 10, :o2, 2519254800, 59273893, 24 + tz.transition 2050, 3, :o3, 2531955600, 59277421, 24 + tz.transition 2050, 10, :o2, 2550704400, 59282629, 24 + tz.transition 2051, 3, :o3, 2563405200, 59286157, 24 + tz.transition 2051, 10, :o2, 2582154000, 59291365, 24 + tz.transition 2052, 3, :o3, 2595459600, 59295061, 24 + tz.transition 2052, 10, :o2, 2613603600, 59300101, 24 + tz.transition 2053, 3, :o3, 2626909200, 59303797, 24 + tz.transition 2053, 10, :o2, 2645053200, 59308837, 24 + tz.transition 2054, 3, :o3, 2658358800, 59312533, 24 + tz.transition 2054, 10, :o2, 2676502800, 59317573, 24 + tz.transition 2055, 3, :o3, 2689808400, 59321269, 24 + tz.transition 2055, 10, :o2, 2708557200, 59326477, 24 + tz.transition 2056, 3, :o3, 2721258000, 59330005, 24 + tz.transition 2056, 10, :o2, 2740006800, 59335213, 24 + tz.transition 2057, 3, :o3, 2752707600, 59338741, 24 + tz.transition 2057, 10, :o2, 2771456400, 59343949, 24 + tz.transition 2058, 3, :o3, 2784762000, 59347645, 24 + tz.transition 2058, 10, :o2, 2802906000, 59352685, 24 + tz.transition 2059, 3, :o3, 2816211600, 59356381, 24 + tz.transition 2059, 10, :o2, 2834355600, 59361421, 24 + tz.transition 2060, 3, :o3, 2847661200, 59365117, 24 + tz.transition 2060, 10, :o2, 2866410000, 59370325, 24 + tz.transition 2061, 3, :o3, 2879110800, 59373853, 24 + tz.transition 2061, 10, :o2, 2897859600, 59379061, 24 + tz.transition 2062, 3, :o3, 2910560400, 59382589, 24 + tz.transition 2062, 10, :o2, 2929309200, 59387797, 24 + tz.transition 2063, 3, :o3, 2942010000, 59391325, 24 + tz.transition 2063, 10, :o2, 2960758800, 59396533, 24 + tz.transition 2064, 3, :o3, 2974064400, 59400229, 24 + tz.transition 2064, 10, :o2, 2992208400, 59405269, 24 + tz.transition 2065, 3, :o3, 3005514000, 59408965, 24 + tz.transition 2065, 10, :o2, 3023658000, 59414005, 24 + tz.transition 2066, 3, :o3, 3036963600, 59417701, 24 + tz.transition 2066, 10, :o2, 3055712400, 59422909, 24 + tz.transition 2067, 3, :o3, 3068413200, 59426437, 24 + tz.transition 2067, 10, :o2, 3087162000, 59431645, 24 + tz.transition 2068, 3, :o3, 3099862800, 59435173, 24 + tz.transition 2068, 10, :o2, 3118611600, 59440381, 24 + tz.transition 2069, 3, :o3, 3131917200, 59444077, 24 + tz.transition 2069, 10, :o2, 3150061200, 59449117, 24 + tz.transition 2070, 3, :o3, 3163366800, 59452813, 24 + tz.transition 2070, 10, :o2, 3181510800, 59457853, 24 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb new file mode 100644 index 0000000..2fec70c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/definitions/UTC.rb @@ -0,0 +1,16 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Definitions + module UTC + include TimezoneDefinition + + linked_timezone 'UTC', 'Etc/UTC' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb new file mode 100644 index 0000000..689e68b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/countries.rb @@ -0,0 +1,940 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Indexes + module Countries + include CountryIndexDefinition + + country 'AD', 'Andorra' do |c| + c.timezone 'Europe/Andorra', 85, 2, 91, 60 + end + country 'AE', 'United Arab Emirates' do |c| + c.timezone 'Asia/Dubai', 253, 10, 553, 10 + end + country 'AF', 'Afghanistan' do |c| + c.timezone 'Asia/Kabul', 2071, 60, 346, 5 + end + country 'AG', 'Antigua & Barbuda' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'AI', 'Anguilla' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'AL', 'Albania' do |c| + c.timezone 'Europe/Tirane', 124, 3, 119, 6 + end + country 'AM', 'Armenia' do |c| + c.timezone 'Asia/Yerevan', 2411, 60, 89, 2 + end + country 'AO', 'Angola' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'AQ', 'Antarctica' do |c| + c.timezone 'Antarctica/Casey', -3977, 60, 6631, 60, 'Casey' + c.timezone 'Antarctica/Davis', -823, 12, 2339, 30, 'Davis' + c.timezone 'Antarctica/DumontDUrville', -200, 3, 8401, 60, 'Dumont-d\'Urville' + c.timezone 'Antarctica/Mawson', -338, 5, 3773, 60, 'Mawson' + c.timezone 'Antarctica/Palmer', -324, 5, -641, 10, 'Palmer' + c.timezone 'Antarctica/Rothera', -2027, 30, -1022, 15, 'Rothera' + c.timezone 'Antarctica/Syowa', -124211, 1800, 3959, 100, 'Syowa' + c.timezone 'Antarctica/Troll', -259241, 3600, 507, 200, 'Troll' + c.timezone 'Antarctica/Vostok', -392, 5, 1069, 10, 'Vostok' + c.timezone 'Pacific/Auckland', -553, 15, 5243, 30, 'New Zealand time' + end + country 'AR', 'Argentina' do |c| + c.timezone 'America/Argentina/Buenos_Aires', -173, 5, -1169, 20, 'Buenos Aires (BA, CF)' + c.timezone 'America/Argentina/Cordoba', -157, 5, -3851, 60, 'Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)' + c.timezone 'America/Argentina/Salta', -1487, 60, -785, 12, 'Salta (SA, LP, NQ, RN)' + c.timezone 'America/Argentina/Jujuy', -1451, 60, -653, 10, 'Jujuy (JY)' + c.timezone 'America/Argentina/Tucuman', -1609, 60, -3913, 60, 'Tucumán (TM)' + c.timezone 'America/Argentina/Catamarca', -427, 15, -3947, 60, 'Catamarca (CT); Chubut (CH)' + c.timezone 'America/Argentina/La_Rioja', -883, 30, -1337, 20, 'La Rioja (LR)' + c.timezone 'America/Argentina/San_Juan', -473, 15, -4111, 60, 'San Juan (SJ)' + c.timezone 'America/Argentina/Mendoza', -1973, 60, -4129, 60, 'Mendoza (MZ)' + c.timezone 'America/Argentina/San_Luis', -1999, 60, -1327, 20, 'San Luis (SL)' + c.timezone 'America/Argentina/Rio_Gallegos', -1549, 30, -4153, 60, 'Santa Cruz (SC)' + c.timezone 'America/Argentina/Ushuaia', -274, 5, -683, 10, 'Tierra del Fuego (TF)' + end + country 'AS', 'Samoa (American)' do |c| + c.timezone 'Pacific/Pago_Pago', -214, 15, -1707, 10, 'Samoa, Midway' + end + country 'AT', 'Austria' do |c| + c.timezone 'Europe/Vienna', 2893, 60, 49, 3 + end + country 'AU', 'Australia' do |c| + c.timezone 'Australia/Lord_Howe', -631, 20, 1909, 12, 'Lord Howe Island' + c.timezone 'Antarctica/Macquarie', -109, 2, 3179, 20, 'Macquarie Island' + c.timezone 'Australia/Hobart', -2573, 60, 8839, 60, 'Tasmania (most areas)' + c.timezone 'Australia/Currie', -599, 15, 2158, 15, 'Tasmania (King Island)' + c.timezone 'Australia/Melbourne', -2269, 60, 4349, 30, 'Victoria' + c.timezone 'Australia/Sydney', -508, 15, 9073, 60, 'New South Wales (most areas)' + c.timezone 'Australia/Broken_Hill', -639, 20, 2829, 20, 'New South Wales (Yancowinna)' + c.timezone 'Australia/Brisbane', -412, 15, 4591, 30, 'Queensland (most areas)' + c.timezone 'Australia/Lindeman', -304, 15, 149, 1, 'Queensland (Whitsunday Islands)' + c.timezone 'Australia/Adelaide', -419, 12, 1663, 12, 'South Australia' + c.timezone 'Australia/Darwin', -187, 15, 785, 6, 'Northern Territory' + c.timezone 'Australia/Perth', -639, 20, 2317, 20, 'Western Australia (most areas)' + c.timezone 'Australia/Eucla', -1903, 60, 1933, 15, 'Western Australia (Eucla)' + end + country 'AW', 'Aruba' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'AX', 'Åland Islands' do |c| + c.timezone 'Europe/Helsinki', 361, 6, 749, 30 + end + country 'AZ', 'Azerbaijan' do |c| + c.timezone 'Asia/Baku', 2423, 60, 997, 20 + end + country 'BA', 'Bosnia & Herzegovina' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'BB', 'Barbados' do |c| + c.timezone 'America/Barbados', 131, 10, -3577, 60 + end + country 'BD', 'Bangladesh' do |c| + c.timezone 'Asia/Dhaka', 1423, 60, 1085, 12 + end + country 'BE', 'Belgium' do |c| + c.timezone 'Europe/Brussels', 305, 6, 13, 3 + end + country 'BF', 'Burkina Faso' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'BG', 'Bulgaria' do |c| + c.timezone 'Europe/Sofia', 2561, 60, 1399, 60 + end + country 'BH', 'Bahrain' do |c| + c.timezone 'Asia/Qatar', 1517, 60, 773, 15 + end + country 'BI', 'Burundi' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'BJ', 'Benin' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'BL', 'St Barthelemy' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'BM', 'Bermuda' do |c| + c.timezone 'Atlantic/Bermuda', 1937, 60, -1943, 30 + end + country 'BN', 'Brunei' do |c| + c.timezone 'Asia/Brunei', 74, 15, 1379, 12 + end + country 'BO', 'Bolivia' do |c| + c.timezone 'America/La_Paz', -33, 2, -1363, 20 + end + country 'BQ', 'Caribbean NL' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'BR', 'Brazil' do |c| + c.timezone 'America/Noronha', -77, 20, -389, 12, 'Atlantic islands' + c.timezone 'America/Belem', -29, 20, -2909, 60, 'Pará (east); Amapá' + c.timezone 'America/Fortaleza', -223, 60, -77, 2, 'Brazil (northeast: MA, PI, CE, RN, PB)' + c.timezone 'America/Recife', -161, 20, -349, 10, 'Pernambuco' + c.timezone 'America/Araguaina', -36, 5, -241, 5, 'Tocantins' + c.timezone 'America/Maceio', -29, 3, -2143, 60, 'Alagoas, Sergipe' + c.timezone 'America/Bahia', -779, 60, -2311, 60, 'Bahia' + c.timezone 'America/Sao_Paulo', -353, 15, -2797, 60, 'Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS)' + c.timezone 'America/Campo_Grande', -409, 20, -3277, 60, 'Mato Grosso do Sul' + c.timezone 'America/Cuiaba', -187, 12, -673, 12, 'Mato Grosso' + c.timezone 'America/Santarem', -73, 30, -823, 15, 'Pará (west)' + c.timezone 'America/Porto_Velho', -263, 30, -639, 10, 'Rondônia' + c.timezone 'America/Boa_Vista', 169, 60, -182, 3, 'Roraima' + c.timezone 'America/Manaus', -47, 15, -3601, 60, 'Amazonas (east)' + c.timezone 'America/Eirunepe', -20, 3, -1048, 15, 'Amazonas (west)' + c.timezone 'America/Rio_Branco', -299, 30, -339, 5, 'Acre' + end + country 'BS', 'Bahamas' do |c| + c.timezone 'America/Nassau', 301, 12, -1547, 20 + end + country 'BT', 'Bhutan' do |c| + c.timezone 'Asia/Thimphu', 412, 15, 1793, 20 + end + country 'BV', 'Bouvet Island' + country 'BW', 'Botswana' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'BY', 'Belarus' do |c| + c.timezone 'Europe/Minsk', 539, 10, 827, 30 + end + country 'BZ', 'Belize' do |c| + c.timezone 'America/Belize', 35, 2, -441, 5 + end + country 'CA', 'Canada' do |c| + c.timezone 'America/St_Johns', 1427, 30, -3163, 60, 'Newfoundland; Labrador (southeast)' + c.timezone 'America/Halifax', 893, 20, -318, 5, 'Atlantic - NS (most areas); PE' + c.timezone 'America/Glace_Bay', 231, 5, -1199, 20, 'Atlantic - NS (Cape Breton)' + c.timezone 'America/Moncton', 461, 10, -3887, 60, 'Atlantic - New Brunswick' + c.timezone 'America/Goose_Bay', 160, 3, -725, 12, 'Atlantic - Labrador (most areas)' + c.timezone 'America/Blanc-Sablon', 617, 12, -3427, 60, 'AST - QC (Lower North Shore)' + c.timezone 'America/Toronto', 873, 20, -4763, 60, 'Eastern - ON, QC (most areas)' + c.timezone 'America/Nipigon', 2941, 60, -1324, 15, 'Eastern - ON, QC (no DST 1967-73)' + c.timezone 'America/Thunder_Bay', 2903, 60, -357, 4, 'Eastern - ON (Thunder Bay)' + c.timezone 'America/Iqaluit', 956, 15, -1027, 15, 'Eastern - NU (most east areas)' + c.timezone 'America/Pangnirtung', 992, 15, -986, 15, 'Eastern - NU (Pangnirtung)' + c.timezone 'America/Atikokan', 175531, 3600, -54973, 600, 'EST - ON (Atikokan); NU (Coral H)' + c.timezone 'America/Winnipeg', 2993, 60, -1943, 20, 'Central - ON (west); Manitoba' + c.timezone 'America/Rainy_River', 2923, 60, -2837, 30, 'Central - ON (Rainy R, Ft Frances)' + c.timezone 'America/Resolute', 33613, 450, -22759, 240, 'Central - NU (Resolute)' + c.timezone 'America/Rankin_Inlet', 3769, 60, -331499, 3600, 'Central - NU (central)' + c.timezone 'America/Regina', 252, 5, -2093, 20, 'CST - SK (most areas)' + c.timezone 'America/Swift_Current', 3017, 60, -647, 6, 'CST - SK (midwest)' + c.timezone 'America/Edmonton', 1071, 20, -1702, 15, 'Mountain - AB; BC (E); SK (W)' + c.timezone 'America/Cambridge_Bay', 24881, 360, -37819, 360, 'Mountain - NU (west)' + c.timezone 'America/Yellowknife', 1249, 20, -2287, 20, 'Mountain - NT (central)' + c.timezone 'America/Inuvik', 246059, 3600, -8023, 60, 'Mountain - NT (west)' + c.timezone 'America/Creston', 491, 10, -6991, 60, 'MST - BC (Creston)' + c.timezone 'America/Dawson_Creek', 1793, 30, -3607, 30, 'MST - BC (Dawson Cr, Ft St John)' + c.timezone 'America/Fort_Nelson', 294, 5, -1227, 10, 'MST - BC (Ft Nelson)' + c.timezone 'America/Vancouver', 739, 15, -7387, 60, 'Pacific - BC (most areas)' + c.timezone 'America/Whitehorse', 3643, 60, -2701, 20, 'Pacific - Yukon (east)' + c.timezone 'America/Dawson', 961, 15, -1673, 12, 'Pacific - Yukon (west)' + end + country 'CC', 'Cocos (Keeling) Islands' do |c| + c.timezone 'Indian/Cocos', -73, 6, 1163, 12 + end + country 'CD', 'Congo (Dem. Rep.)' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CF', 'Central African Rep.' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CG', 'Congo (Rep.)' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CH', 'Switzerland' do |c| + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'CI', 'Côte d\'Ivoire' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'CK', 'Cook Islands' do |c| + c.timezone 'Pacific/Rarotonga', -637, 30, -4793, 30 + end + country 'CL', 'Chile' do |c| + c.timezone 'America/Santiago', -669, 20, -212, 3, 'Chile (most areas)' + c.timezone 'America/Punta_Arenas', -1063, 20, -851, 12, 'Region of Magallanes' + c.timezone 'Pacific/Easter', -543, 20, -3283, 30, 'Easter Island' + end + country 'CM', 'Cameroon' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'CN', 'China' do |c| + c.timezone 'Asia/Shanghai', 937, 30, 1822, 15, 'Beijing Time' + c.timezone 'Asia/Urumqi', 219, 5, 1051, 12, 'Xinjiang Time' + end + country 'CO', 'Colombia' do |c| + c.timezone 'America/Bogota', 23, 5, -889, 12 + end + country 'CR', 'Costa Rica' do |c| + c.timezone 'America/Costa_Rica', 149, 15, -1009, 12 + end + country 'CU', 'Cuba' do |c| + c.timezone 'America/Havana', 347, 15, -2471, 30 + end + country 'CV', 'Cape Verde' do |c| + c.timezone 'Atlantic/Cape_Verde', 179, 12, -1411, 60 + end + country 'CW', 'Curaçao' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'CX', 'Christmas Island' do |c| + c.timezone 'Indian/Christmas', -125, 12, 6343, 60 + end + country 'CY', 'Cyprus' do |c| + c.timezone 'Asia/Nicosia', 211, 6, 1001, 30, 'Cyprus (most areas)' + c.timezone 'Asia/Famagusta', 2107, 60, 679, 20, 'Northern Cyprus' + end + country 'CZ', 'Czech Republic' do |c| + c.timezone 'Europe/Prague', 601, 12, 433, 30 + end + country 'DE', 'Germany' do |c| + c.timezone 'Europe/Berlin', 105, 2, 401, 30, 'Germany (most areas)' + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'DJ', 'Djibouti' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'DK', 'Denmark' do |c| + c.timezone 'Europe/Copenhagen', 167, 3, 151, 12 + end + country 'DM', 'Dominica' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'DO', 'Dominican Republic' do |c| + c.timezone 'America/Santo_Domingo', 277, 15, -699, 10 + end + country 'DZ', 'Algeria' do |c| + c.timezone 'Africa/Algiers', 2207, 60, 61, 20 + end + country 'EC', 'Ecuador' do |c| + c.timezone 'America/Guayaquil', -13, 6, -479, 6, 'Ecuador (mainland)' + c.timezone 'Pacific/Galapagos', -9, 10, -448, 5, 'Galápagos Islands' + end + country 'EE', 'Estonia' do |c| + c.timezone 'Europe/Tallinn', 713, 12, 99, 4 + end + country 'EG', 'Egypt' do |c| + c.timezone 'Africa/Cairo', 601, 20, 125, 4 + end + country 'EH', 'Western Sahara' do |c| + c.timezone 'Africa/El_Aaiun', 543, 20, -66, 5 + end + country 'ER', 'Eritrea' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'ES', 'Spain' do |c| + c.timezone 'Europe/Madrid', 202, 5, -221, 60, 'Spain (mainland)' + c.timezone 'Africa/Ceuta', 2153, 60, -319, 60, 'Ceuta, Melilla' + c.timezone 'Atlantic/Canary', 281, 10, -77, 5, 'Canary Islands' + end + country 'ET', 'Ethiopia' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'FI', 'Finland' do |c| + c.timezone 'Europe/Helsinki', 361, 6, 749, 30 + end + country 'FJ', 'Fiji' do |c| + c.timezone 'Pacific/Fiji', -272, 15, 2141, 12 + end + country 'FK', 'Falkland Islands' do |c| + c.timezone 'Atlantic/Stanley', -517, 10, -1157, 20 + end + country 'FM', 'Micronesia' do |c| + c.timezone 'Pacific/Chuuk', 89, 12, 9107, 60, 'Chuuk/Truk, Yap' + c.timezone 'Pacific/Pohnpei', 209, 30, 9493, 60, 'Pohnpei/Ponape' + c.timezone 'Pacific/Kosrae', 319, 60, 9779, 60, 'Kosrae' + end + country 'FO', 'Faroe Islands' do |c| + c.timezone 'Atlantic/Faroe', 3721, 60, -203, 30 + end + country 'FR', 'France' do |c| + c.timezone 'Europe/Paris', 733, 15, 7, 3 + end + country 'GA', 'Gabon' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'GB', 'Britain (UK)' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'GD', 'Grenada' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'GE', 'Georgia' do |c| + c.timezone 'Asia/Tbilisi', 2503, 60, 2689, 60 + end + country 'GF', 'French Guiana' do |c| + c.timezone 'America/Cayenne', 74, 15, -157, 3 + end + country 'GG', 'Guernsey' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'GH', 'Ghana' do |c| + c.timezone 'Africa/Accra', 111, 20, -13, 60 + end + country 'GI', 'Gibraltar' do |c| + c.timezone 'Europe/Gibraltar', 542, 15, -107, 20 + end + country 'GL', 'Greenland' do |c| + c.timezone 'America/Nuuk', 3851, 60, -776, 15, 'Greenland (most areas)' + c.timezone 'America/Danmarkshavn', 2303, 30, -56, 3, 'National Park (east coast)' + c.timezone 'America/Scoresbysund', 4229, 60, -659, 30, 'Scoresbysund/Ittoqqortoormiit' + c.timezone 'America/Thule', 2297, 30, -4127, 60, 'Thule/Pituffik' + end + country 'GM', 'Gambia' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'GN', 'Guinea' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'GP', 'Guadeloupe' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'GQ', 'Equatorial Guinea' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'GR', 'Greece' do |c| + c.timezone 'Europe/Athens', 1139, 30, 1423, 60 + end + country 'GS', 'South Georgia & the South Sandwich Islands' do |c| + c.timezone 'Atlantic/South_Georgia', -814, 15, -548, 15 + end + country 'GT', 'Guatemala' do |c| + c.timezone 'America/Guatemala', 439, 30, -5431, 60 + end + country 'GU', 'Guam' do |c| + c.timezone 'Pacific/Guam', 202, 15, 579, 4 + end + country 'GW', 'Guinea-Bissau' do |c| + c.timezone 'Africa/Bissau', 237, 20, -187, 12 + end + country 'GY', 'Guyana' do |c| + c.timezone 'America/Guyana', 34, 5, -349, 6 + end + country 'HK', 'Hong Kong' do |c| + c.timezone 'Asia/Hong_Kong', 1337, 60, 2283, 20 + end + country 'HM', 'Heard Island & McDonald Islands' + country 'HN', 'Honduras' do |c| + c.timezone 'America/Tegucigalpa', 141, 10, -5233, 60 + end + country 'HR', 'Croatia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'HT', 'Haiti' do |c| + c.timezone 'America/Port-au-Prince', 278, 15, -217, 3 + end + country 'HU', 'Hungary' do |c| + c.timezone 'Europe/Budapest', 95, 2, 229, 12 + end + country 'ID', 'Indonesia' do |c| + c.timezone 'Asia/Jakarta', -37, 6, 534, 5, 'Java, Sumatra' + c.timezone 'Asia/Pontianak', -1, 30, 328, 3, 'Borneo (west, central)' + c.timezone 'Asia/Makassar', -307, 60, 597, 5, 'Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)' + c.timezone 'Asia/Jayapura', -38, 15, 1407, 10, 'New Guinea (West Papua / Irian Jaya); Malukus/Moluccas' + end + country 'IE', 'Ireland' do |c| + c.timezone 'Europe/Dublin', 160, 3, -25, 4 + end + country 'IL', 'Israel' do |c| + c.timezone 'Asia/Jerusalem', 11441, 360, 63403, 1800 + end + country 'IM', 'Isle of Man' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'IN', 'India' do |c| + c.timezone 'Asia/Kolkata', 338, 15, 2651, 30 + end + country 'IO', 'British Indian Ocean Territory' do |c| + c.timezone 'Indian/Chagos', -22, 3, 869, 12 + end + country 'IQ', 'Iraq' do |c| + c.timezone 'Asia/Baghdad', 667, 20, 533, 12 + end + country 'IR', 'Iran' do |c| + c.timezone 'Asia/Tehran', 107, 3, 1543, 30 + end + country 'IS', 'Iceland' do |c| + c.timezone 'Atlantic/Reykjavik', 1283, 20, -437, 20 + end + country 'IT', 'Italy' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'JE', 'Jersey' do |c| + c.timezone 'Europe/London', 6181, 120, -451, 3600 + end + country 'JM', 'Jamaica' do |c| + c.timezone 'America/Jamaica', 12937, 720, -11519, 150 + end + country 'JO', 'Jordan' do |c| + c.timezone 'Asia/Amman', 639, 20, 539, 15 + end + country 'JP', 'Japan' do |c| + c.timezone 'Asia/Tokyo', 32089, 900, 503081, 3600 + end + country 'KE', 'Kenya' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'KG', 'Kyrgyzstan' do |c| + c.timezone 'Asia/Bishkek', 429, 10, 373, 5 + end + country 'KH', 'Cambodia' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'KI', 'Kiribati' do |c| + c.timezone 'Pacific/Tarawa', 17, 12, 173, 1, 'Gilbert Islands' + c.timezone 'Pacific/Enderbury', -47, 15, -2053, 12, 'Phoenix Islands' + c.timezone 'Pacific/Kiritimati', 28, 15, -472, 3, 'Line Islands' + end + country 'KM', 'Comoros' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'KN', 'St Kitts & Nevis' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'KP', 'Korea (North)' do |c| + c.timezone 'Asia/Pyongyang', 2341, 60, 503, 4 + end + country 'KR', 'Korea (South)' do |c| + c.timezone 'Asia/Seoul', 751, 20, 3809, 30 + end + country 'KW', 'Kuwait' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'KY', 'Cayman Islands' do |c| + c.timezone 'America/Panama', 269, 30, -1193, 15 + end + country 'KZ', 'Kazakhstan' do |c| + c.timezone 'Asia/Almaty', 173, 4, 1539, 20, 'Kazakhstan (most areas)' + c.timezone 'Asia/Qyzylorda', 224, 5, 982, 15, 'Qyzylorda/Kyzylorda/Kzyl-Orda' + c.timezone 'Asia/Qostanay', 266, 5, 3817, 60, 'Qostanay/Kostanay/Kustanay' + c.timezone 'Asia/Aqtobe', 3017, 60, 343, 6, 'Aqtöbe/Aktobe' + c.timezone 'Asia/Aqtau', 2671, 60, 754, 15, 'Mangghystaū/Mankistau' + c.timezone 'Asia/Atyrau', 2827, 60, 779, 15, 'Atyraū/Atirau/Gur\'yev' + c.timezone 'Asia/Oral', 3073, 60, 1027, 20, 'West Kazakhstan' + end + country 'LA', 'Laos' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'LB', 'Lebanon' do |c| + c.timezone 'Asia/Beirut', 2033, 60, 71, 2 + end + country 'LC', 'St Lucia' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'LI', 'Liechtenstein' do |c| + c.timezone 'Europe/Zurich', 2843, 60, 128, 15, 'Swiss time' + end + country 'LK', 'Sri Lanka' do |c| + c.timezone 'Asia/Colombo', 104, 15, 1597, 20 + end + country 'LR', 'Liberia' do |c| + c.timezone 'Africa/Monrovia', 63, 10, -647, 60 + end + country 'LS', 'Lesotho' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'LT', 'Lithuania' do |c| + c.timezone 'Europe/Vilnius', 3281, 60, 1519, 60 + end + country 'LU', 'Luxembourg' do |c| + c.timezone 'Europe/Luxembourg', 248, 5, 123, 20 + end + country 'LV', 'Latvia' do |c| + c.timezone 'Europe/Riga', 1139, 20, 241, 10 + end + country 'LY', 'Libya' do |c| + c.timezone 'Africa/Tripoli', 329, 10, 791, 60 + end + country 'MA', 'Morocco' do |c| + c.timezone 'Africa/Casablanca', 673, 20, -91, 12 + end + country 'MC', 'Monaco' do |c| + c.timezone 'Europe/Monaco', 437, 10, 443, 60 + end + country 'MD', 'Moldova' do |c| + c.timezone 'Europe/Chisinau', 47, 1, 173, 6 + end + country 'ME', 'Montenegro' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'MF', 'St Martin (French)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'MG', 'Madagascar' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'MH', 'Marshall Islands' do |c| + c.timezone 'Pacific/Majuro', 143, 20, 856, 5, 'Marshall Islands (most areas)' + c.timezone 'Pacific/Kwajalein', 109, 12, 502, 3, 'Kwajalein' + end + country 'MK', 'North Macedonia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'ML', 'Mali' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'MM', 'Myanmar (Burma)' do |c| + c.timezone 'Asia/Yangon', 1007, 60, 577, 6 + end + country 'MN', 'Mongolia' do |c| + c.timezone 'Asia/Ulaanbaatar', 575, 12, 6413, 60, 'Mongolia (most areas)' + c.timezone 'Asia/Hovd', 2881, 60, 1833, 20, 'Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan' + c.timezone 'Asia/Choibalsan', 721, 15, 229, 2, 'Dornod, Sükhbaatar' + end + country 'MO', 'Macau' do |c| + c.timezone 'Asia/Macau', 7991, 360, 2725, 24 + end + country 'MP', 'Northern Mariana Islands' do |c| + c.timezone 'Pacific/Guam', 202, 15, 579, 4 + end + country 'MQ', 'Martinique' do |c| + c.timezone 'America/Martinique', 73, 5, -733, 12 + end + country 'MR', 'Mauritania' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'MS', 'Montserrat' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'MT', 'Malta' do |c| + c.timezone 'Europe/Malta', 359, 10, 871, 60 + end + country 'MU', 'Mauritius' do |c| + c.timezone 'Indian/Mauritius', -121, 6, 115, 2 + end + country 'MV', 'Maldives' do |c| + c.timezone 'Indian/Maldives', 25, 6, 147, 2 + end + country 'MW', 'Malawi' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'MX', 'Mexico' do |c| + c.timezone 'America/Mexico_City', 97, 5, -1983, 20, 'Central Time' + c.timezone 'America/Cancun', 253, 12, -2603, 30, 'Eastern Standard Time - Quintana Roo' + c.timezone 'America/Merida', 629, 30, -5377, 60, 'Central Time - Campeche, Yucatán' + c.timezone 'America/Monterrey', 77, 3, -6019, 60, 'Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas)' + c.timezone 'America/Matamoros', 155, 6, -195, 2, 'Central Time US - Coahuila, Nuevo León, Tamaulipas (US border)' + c.timezone 'America/Mazatlan', 1393, 60, -1277, 12, 'Mountain Time - Baja California Sur, Nayarit, Sinaloa' + c.timezone 'America/Chihuahua', 859, 30, -1273, 12, 'Mountain Time - Chihuahua (most areas)' + c.timezone 'America/Ojinaga', 887, 30, -1253, 12, 'Mountain Time US - Chihuahua (US border)' + c.timezone 'America/Hermosillo', 436, 15, -3329, 30, 'Mountain Standard Time - Sonora' + c.timezone 'America/Tijuana', 488, 15, -7021, 60, 'Pacific Time US - Baja California' + c.timezone 'America/Bahia_Banderas', 104, 5, -421, 4, 'Central Time - Bahía de Banderas' + end + country 'MY', 'Malaysia' do |c| + c.timezone 'Asia/Kuala_Lumpur', 19, 6, 1017, 10, 'Malaysia (peninsula)' + c.timezone 'Asia/Kuching', 31, 20, 331, 3, 'Sabah, Sarawak' + end + country 'MZ', 'Mozambique' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'NA', 'Namibia' do |c| + c.timezone 'Africa/Windhoek', -677, 30, 171, 10 + end + country 'NC', 'New Caledonia' do |c| + c.timezone 'Pacific/Noumea', -334, 15, 3329, 20 + end + country 'NE', 'Niger' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'NF', 'Norfolk Island' do |c| + c.timezone 'Pacific/Norfolk', -581, 20, 5039, 30 + end + country 'NG', 'Nigeria' do |c| + c.timezone 'Africa/Lagos', 129, 20, 17, 5, 'West Africa Time' + end + country 'NI', 'Nicaragua' do |c| + c.timezone 'America/Managua', 243, 20, -5177, 60 + end + country 'NL', 'Netherlands' do |c| + c.timezone 'Europe/Amsterdam', 1571, 30, 49, 10 + end + country 'NO', 'Norway' do |c| + c.timezone 'Europe/Oslo', 719, 12, 43, 4 + end + country 'NP', 'Nepal' do |c| + c.timezone 'Asia/Kathmandu', 1663, 60, 5119, 60 + end + country 'NR', 'Nauru' do |c| + c.timezone 'Pacific/Nauru', -31, 60, 2003, 12 + end + country 'NU', 'Niue' do |c| + c.timezone 'Pacific/Niue', -1141, 60, -2039, 12 + end + country 'NZ', 'New Zealand' do |c| + c.timezone 'Pacific/Auckland', -553, 15, 5243, 30, 'New Zealand time' + c.timezone 'Pacific/Chatham', -879, 20, -3531, 20, 'Chatham Islands' + end + country 'OM', 'Oman' do |c| + c.timezone 'Asia/Dubai', 253, 10, 553, 10 + end + country 'PA', 'Panama' do |c| + c.timezone 'America/Panama', 269, 30, -1193, 15 + end + country 'PE', 'Peru' do |c| + c.timezone 'America/Lima', -241, 20, -1541, 20 + end + country 'PF', 'French Polynesia' do |c| + c.timezone 'Pacific/Tahiti', -263, 15, -4487, 30, 'Society Islands' + c.timezone 'Pacific/Marquesas', -9, 1, -279, 2, 'Marquesas Islands' + c.timezone 'Pacific/Gambier', -347, 15, -2699, 20, 'Gambier Islands' + end + country 'PG', 'Papua New Guinea' do |c| + c.timezone 'Pacific/Port_Moresby', -19, 2, 883, 6, 'Papua New Guinea (most areas)' + c.timezone 'Pacific/Bougainville', -373, 60, 4667, 30, 'Bougainville' + end + country 'PH', 'Philippines' do |c| + c.timezone 'Asia/Manila', 175, 12, 121, 1 + end + country 'PK', 'Pakistan' do |c| + c.timezone 'Asia/Karachi', 373, 15, 1341, 20 + end + country 'PL', 'Poland' do |c| + c.timezone 'Europe/Warsaw', 209, 4, 21, 1 + end + country 'PM', 'St Pierre & Miquelon' do |c| + c.timezone 'America/Miquelon', 941, 20, -169, 3 + end + country 'PN', 'Pitcairn' do |c| + c.timezone 'Pacific/Pitcairn', -376, 15, -1561, 12 + end + country 'PR', 'Puerto Rico' do |c| + c.timezone 'America/Puerto_Rico', 11081, 600, -118991, 1800 + end + country 'PS', 'Palestine' do |c| + c.timezone 'Asia/Gaza', 63, 2, 517, 15, 'Gaza Strip' + c.timezone 'Asia/Hebron', 473, 15, 7019, 200, 'West Bank' + end + country 'PT', 'Portugal' do |c| + c.timezone 'Europe/Lisbon', 2323, 60, -137, 15, 'Portugal (mainland)' + c.timezone 'Atlantic/Madeira', 979, 30, -169, 10, 'Madeira Islands' + c.timezone 'Atlantic/Azores', 566, 15, -77, 3, 'Azores' + end + country 'PW', 'Palau' do |c| + c.timezone 'Pacific/Palau', 22, 3, 8069, 60 + end + country 'PY', 'Paraguay' do |c| + c.timezone 'America/Asuncion', -379, 15, -173, 3 + end + country 'QA', 'Qatar' do |c| + c.timezone 'Asia/Qatar', 1517, 60, 773, 15 + end + country 'RE', 'Réunion' do |c| + c.timezone 'Indian/Reunion', -313, 15, 832, 15, 'Réunion, Crozet, Scattered Islands' + end + country 'RO', 'Romania' do |c| + c.timezone 'Europe/Bucharest', 1333, 30, 261, 10 + end + country 'RS', 'Serbia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'RU', 'Russia' do |c| + c.timezone 'Europe/Kaliningrad', 3283, 60, 41, 2, 'MSK-01 - Kaliningrad' + c.timezone 'Europe/Moscow', 66907, 1200, 8464, 225, 'MSK+00 - Moscow area' + c.timezone 'Europe/Simferopol', 899, 20, 341, 10, 'Crimea' + c.timezone 'Europe/Kirov', 293, 5, 993, 20, 'MSK+00 - Kirov' + c.timezone 'Europe/Astrakhan', 927, 20, 961, 20, 'MSK+01 - Astrakhan' + c.timezone 'Europe/Volgograd', 731, 15, 533, 12, 'MSK+01 - Volgograd' + c.timezone 'Europe/Saratov', 1547, 30, 1381, 30, 'MSK+01 - Saratov' + c.timezone 'Europe/Ulyanovsk', 163, 3, 242, 5, 'MSK+01 - Ulyanovsk' + c.timezone 'Europe/Samara', 266, 5, 1003, 20, 'MSK+01 - Samara, Udmurtia' + c.timezone 'Asia/Yekaterinburg', 1137, 20, 303, 5, 'MSK+02 - Urals' + c.timezone 'Asia/Omsk', 55, 1, 367, 5, 'MSK+03 - Omsk' + c.timezone 'Asia/Novosibirsk', 1651, 30, 995, 12, 'MSK+04 - Novosibirsk' + c.timezone 'Asia/Barnaul', 1601, 30, 335, 4, 'MSK+04 - Altai' + c.timezone 'Asia/Tomsk', 113, 2, 2549, 30, 'MSK+04 - Tomsk' + c.timezone 'Asia/Novokuznetsk', 215, 4, 5227, 60, 'MSK+04 - Kemerovo' + c.timezone 'Asia/Krasnoyarsk', 3361, 60, 557, 6, 'MSK+04 - Krasnoyarsk area' + c.timezone 'Asia/Irkutsk', 784, 15, 313, 3, 'MSK+05 - Irkutsk, Buryatia' + c.timezone 'Asia/Chita', 1041, 20, 1702, 15, 'MSK+06 - Zabaykalsky' + c.timezone 'Asia/Yakutsk', 62, 1, 389, 3, 'MSK+06 - Lena River' + c.timezone 'Asia/Khandyga', 225563, 3600, 243997, 1800, 'MSK+06 - Tomponsky, Ust-Maysky' + c.timezone 'Asia/Vladivostok', 259, 6, 1979, 15, 'MSK+07 - Amur River' + c.timezone 'Asia/Ust-Nera', 232417, 3600, 10742, 75, 'MSK+07 - Oymyakonsky' + c.timezone 'Asia/Magadan', 1787, 30, 754, 5, 'MSK+08 - Magadan' + c.timezone 'Asia/Sakhalin', 1409, 30, 1427, 10, 'MSK+08 - Sakhalin Island' + c.timezone 'Asia/Srednekolymsk', 1012, 15, 9223, 60, 'MSK+08 - Sakha (E); North Kuril Is' + c.timezone 'Asia/Kamchatka', 3181, 60, 3173, 20, 'MSK+09 - Kamchatka' + c.timezone 'Asia/Anadyr', 259, 4, 10649, 60, 'MSK+09 - Bering Sea' + end + country 'RW', 'Rwanda' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'SA', 'Saudi Arabia' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'SB', 'Solomon Islands' do |c| + c.timezone 'Pacific/Guadalcanal', -143, 15, 801, 5 + end + country 'SC', 'Seychelles' do |c| + c.timezone 'Indian/Mahe', -14, 3, 832, 15 + end + country 'SD', 'Sudan' do |c| + c.timezone 'Africa/Khartoum', 78, 5, 488, 15 + end + country 'SE', 'Sweden' do |c| + c.timezone 'Europe/Stockholm', 178, 3, 361, 20 + end + country 'SG', 'Singapore' do |c| + c.timezone 'Asia/Singapore', 77, 60, 2077, 20 + end + country 'SH', 'St Helena' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SI', 'Slovenia' do |c| + c.timezone 'Europe/Belgrade', 269, 6, 41, 2 + end + country 'SJ', 'Svalbard & Jan Mayen' do |c| + c.timezone 'Europe/Oslo', 719, 12, 43, 4 + end + country 'SK', 'Slovakia' do |c| + c.timezone 'Europe/Prague', 601, 12, 433, 30 + end + country 'SL', 'Sierra Leone' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SM', 'San Marino' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'SN', 'Senegal' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'SO', 'Somalia' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'SR', 'Suriname' do |c| + c.timezone 'America/Paramaribo', 35, 6, -331, 6 + end + country 'SS', 'South Sudan' do |c| + c.timezone 'Africa/Juba', 97, 20, 1897, 60 + end + country 'ST', 'Sao Tome & Principe' do |c| + c.timezone 'Africa/Sao_Tome', 1, 3, 101, 15 + end + country 'SV', 'El Salvador' do |c| + c.timezone 'America/El_Salvador', 137, 10, -446, 5 + end + country 'SX', 'St Maarten (Dutch)' do |c| + c.timezone 'America/Curacao', 731, 60, -69, 1 + end + country 'SY', 'Syria' do |c| + c.timezone 'Asia/Damascus', 67, 2, 363, 10 + end + country 'SZ', 'Eswatini (Swaziland)' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'TC', 'Turks & Caicos Is' do |c| + c.timezone 'America/Grand_Turk', 322, 15, -1067, 15 + end + country 'TD', 'Chad' do |c| + c.timezone 'Africa/Ndjamena', 727, 60, 301, 20 + end + country 'TF', 'French Southern & Antarctic Lands' do |c| + c.timezone 'Indian/Kerguelen', -17767, 360, 28087, 400, 'Kerguelen, St Paul Island, Amsterdam Island' + c.timezone 'Indian/Reunion', -313, 15, 832, 15, 'Réunion, Crozet, Scattered Islands' + end + country 'TG', 'Togo' do |c| + c.timezone 'Africa/Abidjan', 319, 60, -121, 30 + end + country 'TH', 'Thailand' do |c| + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'TJ', 'Tajikistan' do |c| + c.timezone 'Asia/Dushanbe', 463, 12, 344, 5 + end + country 'TK', 'Tokelau' do |c| + c.timezone 'Pacific/Fakaofo', -281, 30, -5137, 30 + end + country 'TL', 'East Timor' do |c| + c.timezone 'Asia/Dili', -171, 20, 1507, 12 + end + country 'TM', 'Turkmenistan' do |c| + c.timezone 'Asia/Ashgabat', 759, 20, 3503, 60 + end + country 'TN', 'Tunisia' do |c| + c.timezone 'Africa/Tunis', 184, 5, 611, 60 + end + country 'TO', 'Tonga' do |c| + c.timezone 'Pacific/Tongatapu', -127, 6, -1051, 6 + end + country 'TR', 'Turkey' do |c| + c.timezone 'Europe/Istanbul', 2461, 60, 869, 30 + end + country 'TT', 'Trinidad & Tobago' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'TV', 'Tuvalu' do |c| + c.timezone 'Pacific/Funafuti', -511, 60, 10753, 60 + end + country 'TW', 'Taiwan' do |c| + c.timezone 'Asia/Taipei', 501, 20, 243, 2 + end + country 'TZ', 'Tanzania' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'UA', 'Ukraine' do |c| + c.timezone 'Europe/Kiev', 1513, 30, 1831, 60, 'Ukraine (most areas)' + c.timezone 'Europe/Uzhgorod', 2917, 60, 223, 10, 'Transcarpathia' + c.timezone 'Europe/Zaporozhye', 287, 6, 211, 6, 'Zaporozhye and east Lugansk' + c.timezone 'Europe/Simferopol', 899, 20, 341, 10, 'Crimea' + end + country 'UG', 'Uganda' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'UM', 'US minor outlying islands' do |c| + c.timezone 'Pacific/Wake', 1157, 60, 9997, 60, 'Wake Island' + c.timezone 'Pacific/Pago_Pago', -214, 15, -1707, 10, 'Samoa, Midway' + c.timezone 'Pacific/Honolulu', 15341, 720, -18943, 120, 'Hawaii' + end + country 'US', 'United States' do |c| + c.timezone 'America/New_York', 48857, 1200, -266423, 3600, 'Eastern (most areas)' + c.timezone 'America/Detroit', 152393, 3600, -19931, 240, 'Eastern - MI (most areas)' + c.timezone 'America/Kentucky/Louisville', 9181, 240, -154367, 1800, 'Eastern - KY (Louisville area)' + c.timezone 'America/Kentucky/Monticello', 132587, 3600, -101819, 1200, 'Eastern - KY (Wayne)' + c.timezone 'America/Indiana/Indianapolis', 23861, 600, -310169, 3600, 'Eastern - IN (most areas)' + c.timezone 'America/Indiana/Vincennes', 69619, 1800, -315103, 3600, 'Eastern - IN (Da, Du, K, Mn)' + c.timezone 'America/Indiana/Winamac', 29557, 720, -311771, 3600, 'Eastern - IN (Pulaski)' + c.timezone 'America/Indiana/Marengo', 17269, 450, -310841, 3600, 'Eastern - IN (Crawford)' + c.timezone 'America/Indiana/Petersburg', 138571, 3600, -314203, 3600, 'Eastern - IN (Pike)' + c.timezone 'America/Indiana/Vevay', 34873, 900, -153121, 1800, 'Eastern - IN (Switzerland)' + c.timezone 'America/Chicago', 837, 20, -1753, 20, 'Central (most areas)' + c.timezone 'America/Indiana/Tell_City', 136631, 3600, -312341, 3600, 'Central - IN (Perry)' + c.timezone 'America/Indiana/Knox', 9911, 240, -693, 8, 'Central - IN (Starke)' + c.timezone 'America/Menominee', 40597, 900, -105137, 1200, 'Central - MI (Wisconsin border)' + c.timezone 'America/North_Dakota/Center', 169619, 3600, -121559, 1200, 'Central - ND (Oliver)' + c.timezone 'America/North_Dakota/New_Salem', 9369, 200, -121693, 1200, 'Central - ND (Morton rural)' + c.timezone 'America/North_Dakota/Beulah', 56717, 1200, -916, 9, 'Central - ND (Mercer)' + c.timezone 'America/Denver', 47687, 1200, -125981, 1200, 'Mountain (most areas)' + c.timezone 'America/Boise', 157009, 3600, -46481, 400, 'Mountain - ID (south); OR (east)' + c.timezone 'America/Phoenix', 20069, 600, -16811, 150, 'MST - Arizona (except Navajo)' + c.timezone 'America/Los_Angeles', 30647, 900, -212837, 1800, 'Pacific' + c.timezone 'America/Anchorage', 44077, 720, -539641, 3600, 'Alaska (most areas)' + c.timezone 'America/Juneau', 209887, 3600, -483911, 3600, 'Alaska - Juneau area' + c.timezone 'America/Sitka', 41167, 720, -487087, 3600, 'Alaska - Sitka area' + c.timezone 'America/Metlakatla', 198457, 3600, -18947, 144, 'Alaska - Annette Island' + c.timezone 'America/Yakutat', 214369, 3600, -251509, 1800, 'Alaska - Yakutat' + c.timezone 'America/Nome', 58051, 900, -595463, 3600, 'Alaska (west)' + c.timezone 'America/Adak', 1297, 25, -635969, 3600, 'Aleutian Islands' + c.timezone 'Pacific/Honolulu', 15341, 720, -18943, 120, 'Hawaii' + end + country 'UY', 'Uruguay' do |c| + c.timezone 'America/Montevideo', -41891, 1200, -4497, 80 + end + country 'UZ', 'Uzbekistan' do |c| + c.timezone 'Asia/Samarkand', 119, 3, 334, 5, 'Uzbekistan (west)' + c.timezone 'Asia/Tashkent', 124, 3, 693, 10, 'Uzbekistan (east)' + end + country 'VA', 'Vatican City' do |c| + c.timezone 'Europe/Rome', 419, 10, 749, 60 + end + country 'VC', 'St Vincent' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VE', 'Venezuela' do |c| + c.timezone 'America/Caracas', 21, 2, -1004, 15 + end + country 'VG', 'Virgin Islands (UK)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VI', 'Virgin Islands (US)' do |c| + c.timezone 'America/Port_of_Spain', 213, 20, -3691, 60 + end + country 'VN', 'Vietnam' do |c| + c.timezone 'Asia/Ho_Chi_Minh', 43, 4, 320, 3, 'Vietnam (south)' + c.timezone 'Asia/Bangkok', 55, 4, 6031, 60, 'Indochina (most areas)' + end + country 'VU', 'Vanuatu' do |c| + c.timezone 'Pacific/Efate', -53, 3, 2021, 12 + end + country 'WF', 'Wallis & Futuna' do |c| + c.timezone 'Pacific/Wallis', -133, 10, -1057, 6 + end + country 'WS', 'Samoa (western)' do |c| + c.timezone 'Pacific/Apia', -83, 6, -2576, 15 + end + country 'YE', 'Yemen' do |c| + c.timezone 'Asia/Riyadh', 739, 30, 2803, 60 + end + country 'YT', 'Mayotte' do |c| + c.timezone 'Africa/Nairobi', -77, 60, 2209, 60 + end + country 'ZA', 'South Africa' do |c| + c.timezone 'Africa/Johannesburg', -105, 4, 28, 1 + end + country 'ZM', 'Zambia' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + country 'ZW', 'Zimbabwe' do |c| + c.timezone 'Africa/Maputo', -779, 30, 391, 12, 'Central Africa Time' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb new file mode 100644 index 0000000..a42a257 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/indexes/timezones.rb @@ -0,0 +1,609 @@ +# encoding: UTF-8 + +# This file contains data derived from the IANA Time Zone Database +# (https://www.iana.org/time-zones). + +module TZInfo + module Data + module Indexes + module Timezones + include TimezoneIndexDefinition + + timezone 'Africa/Abidjan' + timezone 'Africa/Accra' + linked_timezone 'Africa/Addis_Ababa' + timezone 'Africa/Algiers' + linked_timezone 'Africa/Asmara' + linked_timezone 'Africa/Asmera' + linked_timezone 'Africa/Bamako' + linked_timezone 'Africa/Bangui' + linked_timezone 'Africa/Banjul' + timezone 'Africa/Bissau' + linked_timezone 'Africa/Blantyre' + linked_timezone 'Africa/Brazzaville' + linked_timezone 'Africa/Bujumbura' + timezone 'Africa/Cairo' + timezone 'Africa/Casablanca' + timezone 'Africa/Ceuta' + linked_timezone 'Africa/Conakry' + linked_timezone 'Africa/Dakar' + linked_timezone 'Africa/Dar_es_Salaam' + linked_timezone 'Africa/Djibouti' + linked_timezone 'Africa/Douala' + timezone 'Africa/El_Aaiun' + linked_timezone 'Africa/Freetown' + linked_timezone 'Africa/Gaborone' + linked_timezone 'Africa/Harare' + timezone 'Africa/Johannesburg' + timezone 'Africa/Juba' + linked_timezone 'Africa/Kampala' + timezone 'Africa/Khartoum' + linked_timezone 'Africa/Kigali' + linked_timezone 'Africa/Kinshasa' + timezone 'Africa/Lagos' + linked_timezone 'Africa/Libreville' + linked_timezone 'Africa/Lome' + linked_timezone 'Africa/Luanda' + linked_timezone 'Africa/Lubumbashi' + linked_timezone 'Africa/Lusaka' + linked_timezone 'Africa/Malabo' + timezone 'Africa/Maputo' + linked_timezone 'Africa/Maseru' + linked_timezone 'Africa/Mbabane' + linked_timezone 'Africa/Mogadishu' + timezone 'Africa/Monrovia' + timezone 'Africa/Nairobi' + timezone 'Africa/Ndjamena' + linked_timezone 'Africa/Niamey' + linked_timezone 'Africa/Nouakchott' + linked_timezone 'Africa/Ouagadougou' + linked_timezone 'Africa/Porto-Novo' + timezone 'Africa/Sao_Tome' + linked_timezone 'Africa/Timbuktu' + timezone 'Africa/Tripoli' + timezone 'Africa/Tunis' + timezone 'Africa/Windhoek' + timezone 'America/Adak' + timezone 'America/Anchorage' + linked_timezone 'America/Anguilla' + linked_timezone 'America/Antigua' + timezone 'America/Araguaina' + timezone 'America/Argentina/Buenos_Aires' + timezone 'America/Argentina/Catamarca' + linked_timezone 'America/Argentina/ComodRivadavia' + timezone 'America/Argentina/Cordoba' + timezone 'America/Argentina/Jujuy' + timezone 'America/Argentina/La_Rioja' + timezone 'America/Argentina/Mendoza' + timezone 'America/Argentina/Rio_Gallegos' + timezone 'America/Argentina/Salta' + timezone 'America/Argentina/San_Juan' + timezone 'America/Argentina/San_Luis' + timezone 'America/Argentina/Tucuman' + timezone 'America/Argentina/Ushuaia' + linked_timezone 'America/Aruba' + timezone 'America/Asuncion' + timezone 'America/Atikokan' + linked_timezone 'America/Atka' + timezone 'America/Bahia' + timezone 'America/Bahia_Banderas' + timezone 'America/Barbados' + timezone 'America/Belem' + timezone 'America/Belize' + timezone 'America/Blanc-Sablon' + timezone 'America/Boa_Vista' + timezone 'America/Bogota' + timezone 'America/Boise' + linked_timezone 'America/Buenos_Aires' + timezone 'America/Cambridge_Bay' + timezone 'America/Campo_Grande' + timezone 'America/Cancun' + timezone 'America/Caracas' + linked_timezone 'America/Catamarca' + timezone 'America/Cayenne' + linked_timezone 'America/Cayman' + timezone 'America/Chicago' + timezone 'America/Chihuahua' + linked_timezone 'America/Coral_Harbour' + linked_timezone 'America/Cordoba' + timezone 'America/Costa_Rica' + timezone 'America/Creston' + timezone 'America/Cuiaba' + timezone 'America/Curacao' + timezone 'America/Danmarkshavn' + timezone 'America/Dawson' + timezone 'America/Dawson_Creek' + timezone 'America/Denver' + timezone 'America/Detroit' + linked_timezone 'America/Dominica' + timezone 'America/Edmonton' + timezone 'America/Eirunepe' + timezone 'America/El_Salvador' + linked_timezone 'America/Ensenada' + timezone 'America/Fort_Nelson' + linked_timezone 'America/Fort_Wayne' + timezone 'America/Fortaleza' + timezone 'America/Glace_Bay' + linked_timezone 'America/Godthab' + timezone 'America/Goose_Bay' + timezone 'America/Grand_Turk' + linked_timezone 'America/Grenada' + linked_timezone 'America/Guadeloupe' + timezone 'America/Guatemala' + timezone 'America/Guayaquil' + timezone 'America/Guyana' + timezone 'America/Halifax' + timezone 'America/Havana' + timezone 'America/Hermosillo' + timezone 'America/Indiana/Indianapolis' + timezone 'America/Indiana/Knox' + timezone 'America/Indiana/Marengo' + timezone 'America/Indiana/Petersburg' + timezone 'America/Indiana/Tell_City' + timezone 'America/Indiana/Vevay' + timezone 'America/Indiana/Vincennes' + timezone 'America/Indiana/Winamac' + linked_timezone 'America/Indianapolis' + timezone 'America/Inuvik' + timezone 'America/Iqaluit' + timezone 'America/Jamaica' + linked_timezone 'America/Jujuy' + timezone 'America/Juneau' + timezone 'America/Kentucky/Louisville' + timezone 'America/Kentucky/Monticello' + linked_timezone 'America/Knox_IN' + linked_timezone 'America/Kralendijk' + timezone 'America/La_Paz' + timezone 'America/Lima' + timezone 'America/Los_Angeles' + linked_timezone 'America/Louisville' + linked_timezone 'America/Lower_Princes' + timezone 'America/Maceio' + timezone 'America/Managua' + timezone 'America/Manaus' + linked_timezone 'America/Marigot' + timezone 'America/Martinique' + timezone 'America/Matamoros' + timezone 'America/Mazatlan' + linked_timezone 'America/Mendoza' + timezone 'America/Menominee' + timezone 'America/Merida' + timezone 'America/Metlakatla' + timezone 'America/Mexico_City' + timezone 'America/Miquelon' + timezone 'America/Moncton' + timezone 'America/Monterrey' + timezone 'America/Montevideo' + linked_timezone 'America/Montreal' + linked_timezone 'America/Montserrat' + timezone 'America/Nassau' + timezone 'America/New_York' + timezone 'America/Nipigon' + timezone 'America/Nome' + timezone 'America/Noronha' + timezone 'America/North_Dakota/Beulah' + timezone 'America/North_Dakota/Center' + timezone 'America/North_Dakota/New_Salem' + timezone 'America/Nuuk' + timezone 'America/Ojinaga' + timezone 'America/Panama' + timezone 'America/Pangnirtung' + timezone 'America/Paramaribo' + timezone 'America/Phoenix' + timezone 'America/Port-au-Prince' + timezone 'America/Port_of_Spain' + linked_timezone 'America/Porto_Acre' + timezone 'America/Porto_Velho' + timezone 'America/Puerto_Rico' + timezone 'America/Punta_Arenas' + timezone 'America/Rainy_River' + timezone 'America/Rankin_Inlet' + timezone 'America/Recife' + timezone 'America/Regina' + timezone 'America/Resolute' + timezone 'America/Rio_Branco' + linked_timezone 'America/Rosario' + linked_timezone 'America/Santa_Isabel' + timezone 'America/Santarem' + timezone 'America/Santiago' + timezone 'America/Santo_Domingo' + timezone 'America/Sao_Paulo' + timezone 'America/Scoresbysund' + linked_timezone 'America/Shiprock' + timezone 'America/Sitka' + linked_timezone 'America/St_Barthelemy' + timezone 'America/St_Johns' + linked_timezone 'America/St_Kitts' + linked_timezone 'America/St_Lucia' + linked_timezone 'America/St_Thomas' + linked_timezone 'America/St_Vincent' + timezone 'America/Swift_Current' + timezone 'America/Tegucigalpa' + timezone 'America/Thule' + timezone 'America/Thunder_Bay' + timezone 'America/Tijuana' + timezone 'America/Toronto' + linked_timezone 'America/Tortola' + timezone 'America/Vancouver' + linked_timezone 'America/Virgin' + timezone 'America/Whitehorse' + timezone 'America/Winnipeg' + timezone 'America/Yakutat' + timezone 'America/Yellowknife' + timezone 'Antarctica/Casey' + timezone 'Antarctica/Davis' + timezone 'Antarctica/DumontDUrville' + timezone 'Antarctica/Macquarie' + timezone 'Antarctica/Mawson' + linked_timezone 'Antarctica/McMurdo' + timezone 'Antarctica/Palmer' + timezone 'Antarctica/Rothera' + linked_timezone 'Antarctica/South_Pole' + timezone 'Antarctica/Syowa' + timezone 'Antarctica/Troll' + timezone 'Antarctica/Vostok' + linked_timezone 'Arctic/Longyearbyen' + linked_timezone 'Asia/Aden' + timezone 'Asia/Almaty' + timezone 'Asia/Amman' + timezone 'Asia/Anadyr' + timezone 'Asia/Aqtau' + timezone 'Asia/Aqtobe' + timezone 'Asia/Ashgabat' + linked_timezone 'Asia/Ashkhabad' + timezone 'Asia/Atyrau' + timezone 'Asia/Baghdad' + linked_timezone 'Asia/Bahrain' + timezone 'Asia/Baku' + timezone 'Asia/Bangkok' + timezone 'Asia/Barnaul' + timezone 'Asia/Beirut' + timezone 'Asia/Bishkek' + timezone 'Asia/Brunei' + linked_timezone 'Asia/Calcutta' + timezone 'Asia/Chita' + timezone 'Asia/Choibalsan' + linked_timezone 'Asia/Chongqing' + linked_timezone 'Asia/Chungking' + timezone 'Asia/Colombo' + linked_timezone 'Asia/Dacca' + timezone 'Asia/Damascus' + timezone 'Asia/Dhaka' + timezone 'Asia/Dili' + timezone 'Asia/Dubai' + timezone 'Asia/Dushanbe' + timezone 'Asia/Famagusta' + timezone 'Asia/Gaza' + linked_timezone 'Asia/Harbin' + timezone 'Asia/Hebron' + timezone 'Asia/Ho_Chi_Minh' + timezone 'Asia/Hong_Kong' + timezone 'Asia/Hovd' + timezone 'Asia/Irkutsk' + linked_timezone 'Asia/Istanbul' + timezone 'Asia/Jakarta' + timezone 'Asia/Jayapura' + timezone 'Asia/Jerusalem' + timezone 'Asia/Kabul' + timezone 'Asia/Kamchatka' + timezone 'Asia/Karachi' + linked_timezone 'Asia/Kashgar' + timezone 'Asia/Kathmandu' + linked_timezone 'Asia/Katmandu' + timezone 'Asia/Khandyga' + timezone 'Asia/Kolkata' + timezone 'Asia/Krasnoyarsk' + timezone 'Asia/Kuala_Lumpur' + timezone 'Asia/Kuching' + linked_timezone 'Asia/Kuwait' + linked_timezone 'Asia/Macao' + timezone 'Asia/Macau' + timezone 'Asia/Magadan' + timezone 'Asia/Makassar' + timezone 'Asia/Manila' + linked_timezone 'Asia/Muscat' + timezone 'Asia/Nicosia' + timezone 'Asia/Novokuznetsk' + timezone 'Asia/Novosibirsk' + timezone 'Asia/Omsk' + timezone 'Asia/Oral' + linked_timezone 'Asia/Phnom_Penh' + timezone 'Asia/Pontianak' + timezone 'Asia/Pyongyang' + timezone 'Asia/Qatar' + timezone 'Asia/Qostanay' + timezone 'Asia/Qyzylorda' + linked_timezone 'Asia/Rangoon' + timezone 'Asia/Riyadh' + linked_timezone 'Asia/Saigon' + timezone 'Asia/Sakhalin' + timezone 'Asia/Samarkand' + timezone 'Asia/Seoul' + timezone 'Asia/Shanghai' + timezone 'Asia/Singapore' + timezone 'Asia/Srednekolymsk' + timezone 'Asia/Taipei' + timezone 'Asia/Tashkent' + timezone 'Asia/Tbilisi' + timezone 'Asia/Tehran' + linked_timezone 'Asia/Tel_Aviv' + linked_timezone 'Asia/Thimbu' + timezone 'Asia/Thimphu' + timezone 'Asia/Tokyo' + timezone 'Asia/Tomsk' + linked_timezone 'Asia/Ujung_Pandang' + timezone 'Asia/Ulaanbaatar' + linked_timezone 'Asia/Ulan_Bator' + timezone 'Asia/Urumqi' + timezone 'Asia/Ust-Nera' + linked_timezone 'Asia/Vientiane' + timezone 'Asia/Vladivostok' + timezone 'Asia/Yakutsk' + timezone 'Asia/Yangon' + timezone 'Asia/Yekaterinburg' + timezone 'Asia/Yerevan' + timezone 'Atlantic/Azores' + timezone 'Atlantic/Bermuda' + timezone 'Atlantic/Canary' + timezone 'Atlantic/Cape_Verde' + linked_timezone 'Atlantic/Faeroe' + timezone 'Atlantic/Faroe' + linked_timezone 'Atlantic/Jan_Mayen' + timezone 'Atlantic/Madeira' + timezone 'Atlantic/Reykjavik' + timezone 'Atlantic/South_Georgia' + linked_timezone 'Atlantic/St_Helena' + timezone 'Atlantic/Stanley' + linked_timezone 'Australia/ACT' + timezone 'Australia/Adelaide' + timezone 'Australia/Brisbane' + timezone 'Australia/Broken_Hill' + linked_timezone 'Australia/Canberra' + timezone 'Australia/Currie' + timezone 'Australia/Darwin' + timezone 'Australia/Eucla' + timezone 'Australia/Hobart' + linked_timezone 'Australia/LHI' + timezone 'Australia/Lindeman' + timezone 'Australia/Lord_Howe' + timezone 'Australia/Melbourne' + linked_timezone 'Australia/NSW' + linked_timezone 'Australia/North' + timezone 'Australia/Perth' + linked_timezone 'Australia/Queensland' + linked_timezone 'Australia/South' + timezone 'Australia/Sydney' + linked_timezone 'Australia/Tasmania' + linked_timezone 'Australia/Victoria' + linked_timezone 'Australia/West' + linked_timezone 'Australia/Yancowinna' + linked_timezone 'Brazil/Acre' + linked_timezone 'Brazil/DeNoronha' + linked_timezone 'Brazil/East' + linked_timezone 'Brazil/West' + timezone 'CET' + timezone 'CST6CDT' + linked_timezone 'Canada/Atlantic' + linked_timezone 'Canada/Central' + linked_timezone 'Canada/Eastern' + linked_timezone 'Canada/Mountain' + linked_timezone 'Canada/Newfoundland' + linked_timezone 'Canada/Pacific' + linked_timezone 'Canada/Saskatchewan' + linked_timezone 'Canada/Yukon' + linked_timezone 'Chile/Continental' + linked_timezone 'Chile/EasterIsland' + linked_timezone 'Cuba' + timezone 'EET' + timezone 'EST' + timezone 'EST5EDT' + linked_timezone 'Egypt' + linked_timezone 'Eire' + timezone 'Etc/GMT' + linked_timezone 'Etc/GMT+0' + timezone 'Etc/GMT+1' + timezone 'Etc/GMT+10' + timezone 'Etc/GMT+11' + timezone 'Etc/GMT+12' + timezone 'Etc/GMT+2' + timezone 'Etc/GMT+3' + timezone 'Etc/GMT+4' + timezone 'Etc/GMT+5' + timezone 'Etc/GMT+6' + timezone 'Etc/GMT+7' + timezone 'Etc/GMT+8' + timezone 'Etc/GMT+9' + linked_timezone 'Etc/GMT-0' + timezone 'Etc/GMT-1' + timezone 'Etc/GMT-10' + timezone 'Etc/GMT-11' + timezone 'Etc/GMT-12' + timezone 'Etc/GMT-13' + timezone 'Etc/GMT-14' + timezone 'Etc/GMT-2' + timezone 'Etc/GMT-3' + timezone 'Etc/GMT-4' + timezone 'Etc/GMT-5' + timezone 'Etc/GMT-6' + timezone 'Etc/GMT-7' + timezone 'Etc/GMT-8' + timezone 'Etc/GMT-9' + linked_timezone 'Etc/GMT0' + linked_timezone 'Etc/Greenwich' + linked_timezone 'Etc/UCT' + timezone 'Etc/UTC' + linked_timezone 'Etc/Universal' + linked_timezone 'Etc/Zulu' + timezone 'Europe/Amsterdam' + timezone 'Europe/Andorra' + timezone 'Europe/Astrakhan' + timezone 'Europe/Athens' + linked_timezone 'Europe/Belfast' + timezone 'Europe/Belgrade' + timezone 'Europe/Berlin' + linked_timezone 'Europe/Bratislava' + timezone 'Europe/Brussels' + timezone 'Europe/Bucharest' + timezone 'Europe/Budapest' + linked_timezone 'Europe/Busingen' + timezone 'Europe/Chisinau' + timezone 'Europe/Copenhagen' + timezone 'Europe/Dublin' + timezone 'Europe/Gibraltar' + linked_timezone 'Europe/Guernsey' + timezone 'Europe/Helsinki' + linked_timezone 'Europe/Isle_of_Man' + timezone 'Europe/Istanbul' + linked_timezone 'Europe/Jersey' + timezone 'Europe/Kaliningrad' + timezone 'Europe/Kiev' + timezone 'Europe/Kirov' + timezone 'Europe/Lisbon' + linked_timezone 'Europe/Ljubljana' + timezone 'Europe/London' + timezone 'Europe/Luxembourg' + timezone 'Europe/Madrid' + timezone 'Europe/Malta' + linked_timezone 'Europe/Mariehamn' + timezone 'Europe/Minsk' + timezone 'Europe/Monaco' + timezone 'Europe/Moscow' + linked_timezone 'Europe/Nicosia' + timezone 'Europe/Oslo' + timezone 'Europe/Paris' + linked_timezone 'Europe/Podgorica' + timezone 'Europe/Prague' + timezone 'Europe/Riga' + timezone 'Europe/Rome' + timezone 'Europe/Samara' + linked_timezone 'Europe/San_Marino' + linked_timezone 'Europe/Sarajevo' + timezone 'Europe/Saratov' + timezone 'Europe/Simferopol' + linked_timezone 'Europe/Skopje' + timezone 'Europe/Sofia' + timezone 'Europe/Stockholm' + timezone 'Europe/Tallinn' + timezone 'Europe/Tirane' + linked_timezone 'Europe/Tiraspol' + timezone 'Europe/Ulyanovsk' + timezone 'Europe/Uzhgorod' + linked_timezone 'Europe/Vaduz' + linked_timezone 'Europe/Vatican' + timezone 'Europe/Vienna' + timezone 'Europe/Vilnius' + timezone 'Europe/Volgograd' + timezone 'Europe/Warsaw' + linked_timezone 'Europe/Zagreb' + timezone 'Europe/Zaporozhye' + timezone 'Europe/Zurich' + timezone 'Factory' + linked_timezone 'GB' + linked_timezone 'GB-Eire' + linked_timezone 'GMT' + linked_timezone 'GMT+0' + linked_timezone 'GMT-0' + linked_timezone 'GMT0' + linked_timezone 'Greenwich' + timezone 'HST' + linked_timezone 'Hongkong' + linked_timezone 'Iceland' + linked_timezone 'Indian/Antananarivo' + timezone 'Indian/Chagos' + timezone 'Indian/Christmas' + timezone 'Indian/Cocos' + linked_timezone 'Indian/Comoro' + timezone 'Indian/Kerguelen' + timezone 'Indian/Mahe' + timezone 'Indian/Maldives' + timezone 'Indian/Mauritius' + linked_timezone 'Indian/Mayotte' + timezone 'Indian/Reunion' + linked_timezone 'Iran' + linked_timezone 'Israel' + linked_timezone 'Jamaica' + linked_timezone 'Japan' + linked_timezone 'Kwajalein' + linked_timezone 'Libya' + timezone 'MET' + timezone 'MST' + timezone 'MST7MDT' + linked_timezone 'Mexico/BajaNorte' + linked_timezone 'Mexico/BajaSur' + linked_timezone 'Mexico/General' + linked_timezone 'NZ' + linked_timezone 'NZ-CHAT' + linked_timezone 'Navajo' + linked_timezone 'PRC' + timezone 'PST8PDT' + timezone 'Pacific/Apia' + timezone 'Pacific/Auckland' + timezone 'Pacific/Bougainville' + timezone 'Pacific/Chatham' + timezone 'Pacific/Chuuk' + timezone 'Pacific/Easter' + timezone 'Pacific/Efate' + timezone 'Pacific/Enderbury' + timezone 'Pacific/Fakaofo' + timezone 'Pacific/Fiji' + timezone 'Pacific/Funafuti' + timezone 'Pacific/Galapagos' + timezone 'Pacific/Gambier' + timezone 'Pacific/Guadalcanal' + timezone 'Pacific/Guam' + timezone 'Pacific/Honolulu' + linked_timezone 'Pacific/Johnston' + timezone 'Pacific/Kiritimati' + timezone 'Pacific/Kosrae' + timezone 'Pacific/Kwajalein' + timezone 'Pacific/Majuro' + timezone 'Pacific/Marquesas' + linked_timezone 'Pacific/Midway' + timezone 'Pacific/Nauru' + timezone 'Pacific/Niue' + timezone 'Pacific/Norfolk' + timezone 'Pacific/Noumea' + timezone 'Pacific/Pago_Pago' + timezone 'Pacific/Palau' + timezone 'Pacific/Pitcairn' + timezone 'Pacific/Pohnpei' + linked_timezone 'Pacific/Ponape' + timezone 'Pacific/Port_Moresby' + timezone 'Pacific/Rarotonga' + linked_timezone 'Pacific/Saipan' + linked_timezone 'Pacific/Samoa' + timezone 'Pacific/Tahiti' + timezone 'Pacific/Tarawa' + timezone 'Pacific/Tongatapu' + linked_timezone 'Pacific/Truk' + timezone 'Pacific/Wake' + timezone 'Pacific/Wallis' + linked_timezone 'Pacific/Yap' + linked_timezone 'Poland' + linked_timezone 'Portugal' + linked_timezone 'ROC' + linked_timezone 'ROK' + linked_timezone 'Singapore' + linked_timezone 'Turkey' + linked_timezone 'UCT' + linked_timezone 'US/Alaska' + linked_timezone 'US/Aleutian' + linked_timezone 'US/Arizona' + linked_timezone 'US/Central' + linked_timezone 'US/East-Indiana' + linked_timezone 'US/Eastern' + linked_timezone 'US/Hawaii' + linked_timezone 'US/Indiana-Starke' + linked_timezone 'US/Michigan' + linked_timezone 'US/Mountain' + linked_timezone 'US/Pacific' + linked_timezone 'US/Samoa' + linked_timezone 'UTC' + linked_timezone 'Universal' + linked_timezone 'W-SU' + timezone 'WET' + linked_timezone 'Zulu' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb new file mode 100644 index 0000000..51e2d31 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/tzinfo-data/tzinfo/data/version.rb @@ -0,0 +1,20 @@ +module TZInfo + module Data + # TZInfo::Data version number. + VERSION = '1.2020.4' + + # TZInfo::Data version information. + module Version + # The format of the Ruby modules. The only format currently supported by + # TZInfo is version 1. + FORMAT = 1 + + # TZInfo::Data version number. + STRING = VERSION + + # The version of the {IANA Time Zone Database}[https://www.iana.org/time-zones] + # used to generate this version of TZInfo::Data. + TZDATA = '2020d' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires new file mode 100644 index 0000000..d6f999b Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/Argentina/Buenos_Aires differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York new file mode 100644 index 0000000..2b6c2ee Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/America/New_York differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne new file mode 100644 index 0000000..c7160da Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Australia/Melbourne differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/EST b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/EST new file mode 100644 index 0000000..3ae9691 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/EST differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC new file mode 100644 index 0000000..00841a6 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Etc/UTC differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam new file mode 100644 index 0000000..4a6fa1d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Amsterdam differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra new file mode 100644 index 0000000..38685d4 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Andorra differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London new file mode 100644 index 0000000..323cd38 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/London differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris new file mode 100644 index 0000000..00a2726 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Paris differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague new file mode 100644 index 0000000..fb7c145 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Europe/Prague differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory new file mode 100644 index 0000000..b4dd773 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/Factory differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab new file mode 100644 index 0000000..a4ff61a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/iso3166.tab @@ -0,0 +1,274 @@ +# ISO 3166 alpha-2 country codes +# +# This file is in the public domain, so clarified as of +# 2009-05-17 by Arthur David Olson. +# +# From Paul Eggert (2015-05-02): +# This file contains a table of two-letter country codes. Columns are +# separated by a single tab. Lines beginning with '#' are comments. +# All text uses UTF-8 encoding. The columns of the table are as follows: +# +# 1. ISO 3166-1 alpha-2 country code, current as of +# ISO 3166-1 N976 (2018-11-06). See: Updates on ISO 3166-1 +# https://isotc.iso.org/livelink/livelink/Open/16944257 +# 2. The usual English name for the coded region, +# chosen so that alphabetic sorting of subsets produces helpful lists. +# This is not the same as the English name in the ISO 3166 tables. +# +# The table is sorted by country code. +# +# This table is intended as an aid for users, to help them select time +# zone data appropriate for their practical needs. It is not intended +# to take or endorse any position on legal or territorial claims. +# +#country- +#code name of country, territory, area, or subdivision +AD Andorra +AE United Arab Emirates +AF Afghanistan +AG Antigua & Barbuda +AI Anguilla +AL Albania +AM Armenia +AO Angola +AQ Antarctica +AR Argentina +AS Samoa (American) +AT Austria +AU Australia +AW Aruba +AX Åland Islands +AZ Azerbaijan +BA Bosnia & Herzegovina +BB Barbados +BD Bangladesh +BE Belgium +BF Burkina Faso +BG Bulgaria +BH Bahrain +BI Burundi +BJ Benin +BL St Barthelemy +BM Bermuda +BN Brunei +BO Bolivia +BQ Caribbean NL +BR Brazil +BS Bahamas +BT Bhutan +BV Bouvet Island +BW Botswana +BY Belarus +BZ Belize +CA Canada +CC Cocos (Keeling) Islands +CD Congo (Dem. Rep.) +CF Central African Rep. +CG Congo (Rep.) +CH Switzerland +CI Côte d'Ivoire +CK Cook Islands +CL Chile +CM Cameroon +CN China +CO Colombia +CR Costa Rica +CU Cuba +CV Cape Verde +CW Curaçao +CX Christmas Island +CY Cyprus +CZ Czech Republic +DE Germany +DJ Djibouti +DK Denmark +DM Dominica +DO Dominican Republic +DZ Algeria +EC Ecuador +EE Estonia +EG Egypt +EH Western Sahara +ER Eritrea +ES Spain +ET Ethiopia +FI Finland +FJ Fiji +FK Falkland Islands +FM Micronesia +FO Faroe Islands +FR France +GA Gabon +GB Britain (UK) +GD Grenada +GE Georgia +GF French Guiana +GG Guernsey +GH Ghana +GI Gibraltar +GL Greenland +GM Gambia +GN Guinea +GP Guadeloupe +GQ Equatorial Guinea +GR Greece +GS South Georgia & the South Sandwich Islands +GT Guatemala +GU Guam +GW Guinea-Bissau +GY Guyana +HK Hong Kong +HM Heard Island & McDonald Islands +HN Honduras +HR Croatia +HT Haiti +HU Hungary +ID Indonesia +IE Ireland +IL Israel +IM Isle of Man +IN India +IO British Indian Ocean Territory +IQ Iraq +IR Iran +IS Iceland +IT Italy +JE Jersey +JM Jamaica +JO Jordan +JP Japan +KE Kenya +KG Kyrgyzstan +KH Cambodia +KI Kiribati +KM Comoros +KN St Kitts & Nevis +KP Korea (North) +KR Korea (South) +KW Kuwait +KY Cayman Islands +KZ Kazakhstan +LA Laos +LB Lebanon +LC St Lucia +LI Liechtenstein +LK Sri Lanka +LR Liberia +LS Lesotho +LT Lithuania +LU Luxembourg +LV Latvia +LY Libya +MA Morocco +MC Monaco +MD Moldova +ME Montenegro +MF St Martin (French) +MG Madagascar +MH Marshall Islands +MK North Macedonia +ML Mali +MM Myanmar (Burma) +MN Mongolia +MO Macau +MP Northern Mariana Islands +MQ Martinique +MR Mauritania +MS Montserrat +MT Malta +MU Mauritius +MV Maldives +MW Malawi +MX Mexico +MY Malaysia +MZ Mozambique +NA Namibia +NC New Caledonia +NE Niger +NF Norfolk Island +NG Nigeria +NI Nicaragua +NL Netherlands +NO Norway +NP Nepal +NR Nauru +NU Niue +NZ New Zealand +OM Oman +PA Panama +PE Peru +PF French Polynesia +PG Papua New Guinea +PH Philippines +PK Pakistan +PL Poland +PM St Pierre & Miquelon +PN Pitcairn +PR Puerto Rico +PS Palestine +PT Portugal +PW Palau +PY Paraguay +QA Qatar +RE Réunion +RO Romania +RS Serbia +RU Russia +RW Rwanda +SA Saudi Arabia +SB Solomon Islands +SC Seychelles +SD Sudan +SE Sweden +SG Singapore +SH St Helena +SI Slovenia +SJ Svalbard & Jan Mayen +SK Slovakia +SL Sierra Leone +SM San Marino +SN Senegal +SO Somalia +SR Suriname +SS South Sudan +ST Sao Tome & Principe +SV El Salvador +SX St Maarten (Dutch) +SY Syria +SZ Eswatini (Swaziland) +TC Turks & Caicos Is +TD Chad +TF French Southern & Antarctic Lands +TG Togo +TH Thailand +TJ Tajikistan +TK Tokelau +TL East Timor +TM Turkmenistan +TN Tunisia +TO Tonga +TR Turkey +TT Trinidad & Tobago +TV Tuvalu +TW Taiwan +TZ Tanzania +UA Ukraine +UG Uganda +UM US minor outlying islands +US United States +UY Uruguay +UZ Uzbekistan +VA Vatican City +VC St Vincent +VE Venezuela +VG Virgin Islands (UK) +VI Virgin Islands (US) +VN Vietnam +VU Vanuatu +WF Wallis & Futuna +WS Samoa (western) +YE Yemen +YT Mayotte +ZA South Africa +ZM Zambia +ZW Zimbabwe diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds new file mode 100644 index 0000000..3ce3ab2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/leapseconds @@ -0,0 +1,78 @@ +# Allowance for leap seconds added to each time zone file. + +# This file is in the public domain. + +# This file is generated automatically from the data in the public-domain +# NIST format leap-seconds.list file, which can be copied from +# <ftp://ftp.nist.gov/pub/time/leap-seconds.list> +# or <ftp://ftp.boulder.nist.gov/pub/time/leap-seconds.list>. +# For more about leap-seconds.list, please see +# The NTP Timescale and Leap Seconds +# <https://www.eecis.udel.edu/~mills/leap.html>. + +# The rules for leap seconds are specified in Annex 1 (Time scales) of: +# Standard-frequency and time-signal emissions. +# International Telecommunication Union - Radiocommunication Sector +# (ITU-R) Recommendation TF.460-6 (02/2002) +# <https://www.itu.int/rec/R-REC-TF.460-6-200202-I/>. +# The International Earth Rotation and Reference Systems Service (IERS) +# periodically uses leap seconds to keep UTC to within 0.9 s of UT1 +# (a proxy for Earth's angle in space as measured by astronomers) +# and publishes leap second data in a copyrighted file +# <https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat>. +# See: Levine J. Coordinated Universal Time and the leap second. +# URSI Radio Sci Bull. 2016;89(4):30-6. doi:10.23919/URSIRSB.2016.7909995 +# <https://ieeexplore.ieee.org/document/7909995>. + +# There were no leap seconds before 1972, as no official mechanism +# accounted for the discrepancy between atomic time (TAI) and the earth's +# rotation. The first ("1 Jan 1972") data line in leap-seconds.list +# does not denote a leap second; it denotes the start of the current definition +# of UTC. + +# All leap-seconds are Stationary (S) at the given UTC time. +# The correction (+ or -) is made at the given time, so in the unlikely +# event of a negative leap second, a line would look like this: +# Leap YEAR MON DAY 23:59:59 - S +# Typical lines look like this: +# Leap YEAR MON DAY 23:59:60 + S +Leap 1972 Jun 30 23:59:60 + S +Leap 1972 Dec 31 23:59:60 + S +Leap 1973 Dec 31 23:59:60 + S +Leap 1974 Dec 31 23:59:60 + S +Leap 1975 Dec 31 23:59:60 + S +Leap 1976 Dec 31 23:59:60 + S +Leap 1977 Dec 31 23:59:60 + S +Leap 1978 Dec 31 23:59:60 + S +Leap 1979 Dec 31 23:59:60 + S +Leap 1981 Jun 30 23:59:60 + S +Leap 1982 Jun 30 23:59:60 + S +Leap 1983 Jun 30 23:59:60 + S +Leap 1985 Jun 30 23:59:60 + S +Leap 1987 Dec 31 23:59:60 + S +Leap 1989 Dec 31 23:59:60 + S +Leap 1990 Dec 31 23:59:60 + S +Leap 1992 Jun 30 23:59:60 + S +Leap 1993 Jun 30 23:59:60 + S +Leap 1994 Jun 30 23:59:60 + S +Leap 1995 Dec 31 23:59:60 + S +Leap 1997 Jun 30 23:59:60 + S +Leap 1998 Dec 31 23:59:60 + S +Leap 2005 Dec 31 23:59:60 + S +Leap 2008 Dec 31 23:59:60 + S +Leap 2012 Jun 30 23:59:60 + S +Leap 2015 Jun 30 23:59:60 + S +Leap 2016 Dec 31 23:59:60 + S + +# UTC timestamp when this leap second list expires. +# Any additional leap seconds will come after this. +# This Expires line is commented out for now, +# so that pre-2020a zic implementations do not reject this file. +#Expires 2021 Jun 28 00:00:00 + +# POSIX timestamps for the data in this file: +#updated 1467936000 (2016-07-08 00:00:00 UTC) +#expires 1624838400 (2021-06-28 00:00:00 UTC) + +# Updated through IERS Bulletin C60 +# File expires on: 28 June 2021 diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London new file mode 100644 index 0000000..323cd38 Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posix/Europe/London differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules new file mode 100644 index 0000000..2b6c2ee Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/posixrules differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London new file mode 100644 index 0000000..9b8560d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/right/Europe/London differ diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab new file mode 100644 index 0000000..8d056e3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone.tab @@ -0,0 +1,452 @@ +# tzdb timezone descriptions (deprecated version) +# +# This file is in the public domain, so clarified as of +# 2009-05-17 by Arthur David Olson. +# +# From Paul Eggert (2018-06-27): +# This file is intended as a backward-compatibility aid for older programs. +# New programs should use zone1970.tab. This file is like zone1970.tab (see +# zone1970.tab's comments), but with the following additional restrictions: +# +# 1. This file contains only ASCII characters. +# 2. The first data column contains exactly one country code. +# +# Because of (2), each row stands for an area that is the intersection +# of a region identified by a country code and of a timezone where civil +# clocks have agreed since 1970; this is a narrower definition than +# that of zone1970.tab. +# +# This table is intended as an aid for users, to help them select timezones +# appropriate for their practical needs. It is not intended to take or +# endorse any position on legal or territorial claims. +# +#country- +#code coordinates TZ comments +AD +4230+00131 Europe/Andorra +AE +2518+05518 Asia/Dubai +AF +3431+06912 Asia/Kabul +AG +1703-06148 America/Antigua +AI +1812-06304 America/Anguilla +AL +4120+01950 Europe/Tirane +AM +4011+04430 Asia/Yerevan +AO -0848+01314 Africa/Luanda +AQ -7750+16636 Antarctica/McMurdo New Zealand time - McMurdo, South Pole +AQ -6617+11031 Antarctica/Casey Casey +AQ -6835+07758 Antarctica/Davis Davis +AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville +AQ -6736+06253 Antarctica/Mawson Mawson +AQ -6448-06406 Antarctica/Palmer Palmer +AQ -6734-06808 Antarctica/Rothera Rothera +AQ -690022+0393524 Antarctica/Syowa Syowa +AQ -720041+0023206 Antarctica/Troll Troll +AQ -7824+10654 Antarctica/Vostok Vostok +AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) +AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF) +AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN) +AR -2411-06518 America/Argentina/Jujuy Jujuy (JY) +AR -2649-06513 America/Argentina/Tucuman Tucuman (TM) +AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH) +AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR) +AR -3132-06831 America/Argentina/San_Juan San Juan (SJ) +AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ) +AR -3319-06621 America/Argentina/San_Luis San Luis (SL) +AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC) +AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF) +AS -1416-17042 Pacific/Pago_Pago +AT +4813+01620 Europe/Vienna +AU -3133+15905 Australia/Lord_Howe Lord Howe Island +AU -5430+15857 Antarctica/Macquarie Macquarie Island +AU -4253+14719 Australia/Hobart Tasmania (most areas) +AU -3956+14352 Australia/Currie Tasmania (King Island) +AU -3749+14458 Australia/Melbourne Victoria +AU -3352+15113 Australia/Sydney New South Wales (most areas) +AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna) +AU -2728+15302 Australia/Brisbane Queensland (most areas) +AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands) +AU -3455+13835 Australia/Adelaide South Australia +AU -1228+13050 Australia/Darwin Northern Territory +AU -3157+11551 Australia/Perth Western Australia (most areas) +AU -3143+12852 Australia/Eucla Western Australia (Eucla) +AW +1230-06958 America/Aruba +AX +6006+01957 Europe/Mariehamn +AZ +4023+04951 Asia/Baku +BA +4352+01825 Europe/Sarajevo +BB +1306-05937 America/Barbados +BD +2343+09025 Asia/Dhaka +BE +5050+00420 Europe/Brussels +BF +1222-00131 Africa/Ouagadougou +BG +4241+02319 Europe/Sofia +BH +2623+05035 Asia/Bahrain +BI -0323+02922 Africa/Bujumbura +BJ +0629+00237 Africa/Porto-Novo +BL +1753-06251 America/St_Barthelemy +BM +3217-06446 Atlantic/Bermuda +BN +0456+11455 Asia/Brunei +BO -1630-06809 America/La_Paz +BQ +120903-0681636 America/Kralendijk +BR -0351-03225 America/Noronha Atlantic islands +BR -0127-04829 America/Belem Para (east); Amapa +BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB) +BR -0803-03454 America/Recife Pernambuco +BR -0712-04812 America/Araguaina Tocantins +BR -0940-03543 America/Maceio Alagoas, Sergipe +BR -1259-03831 America/Bahia Bahia +BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS) +BR -2027-05437 America/Campo_Grande Mato Grosso do Sul +BR -1535-05605 America/Cuiaba Mato Grosso +BR -0226-05452 America/Santarem Para (west) +BR -0846-06354 America/Porto_Velho Rondonia +BR +0249-06040 America/Boa_Vista Roraima +BR -0308-06001 America/Manaus Amazonas (east) +BR -0640-06952 America/Eirunepe Amazonas (west) +BR -0958-06748 America/Rio_Branco Acre +BS +2505-07721 America/Nassau +BT +2728+08939 Asia/Thimphu +BW -2439+02555 Africa/Gaborone +BY +5354+02734 Europe/Minsk +BZ +1730-08812 America/Belize +CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast) +CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE +CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton) +CA +4606-06447 America/Moncton Atlantic - New Brunswick +CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas) +CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore) +CA +4339-07923 America/Toronto Eastern - ON, QC (most areas) +CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73) +CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay) +CA +6344-06828 America/Iqaluit Eastern - NU (most east areas) +CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung) +CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H) +CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba +CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances) +CA +744144-0944945 America/Resolute Central - NU (Resolute) +CA +624900-0920459 America/Rankin_Inlet Central - NU (central) +CA +5024-10439 America/Regina CST - SK (most areas) +CA +5017-10750 America/Swift_Current CST - SK (midwest) +CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W) +CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west) +CA +6227-11421 America/Yellowknife Mountain - NT (central) +CA +682059-1334300 America/Inuvik Mountain - NT (west) +CA +4906-11631 America/Creston MST - BC (Creston) +CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) +CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson) +CA +4916-12307 America/Vancouver Pacific - BC (most areas) +CA +6043-13503 America/Whitehorse Pacific - Yukon (east) +CA +6404-13925 America/Dawson Pacific - Yukon (west) +CC -1210+09655 Indian/Cocos +CD -0418+01518 Africa/Kinshasa Dem. Rep. of Congo (west) +CD -1140+02728 Africa/Lubumbashi Dem. Rep. of Congo (east) +CF +0422+01835 Africa/Bangui +CG -0416+01517 Africa/Brazzaville +CH +4723+00832 Europe/Zurich +CI +0519-00402 Africa/Abidjan +CK -2114-15946 Pacific/Rarotonga +CL -3327-07040 America/Santiago Chile (most areas) +CL -5309-07055 America/Punta_Arenas Region of Magallanes +CL -2709-10926 Pacific/Easter Easter Island +CM +0403+00942 Africa/Douala +CN +3114+12128 Asia/Shanghai Beijing Time +CN +4348+08735 Asia/Urumqi Xinjiang Time +CO +0436-07405 America/Bogota +CR +0956-08405 America/Costa_Rica +CU +2308-08222 America/Havana +CV +1455-02331 Atlantic/Cape_Verde +CW +1211-06900 America/Curacao +CX -1025+10543 Indian/Christmas +CY +3510+03322 Asia/Nicosia Cyprus (most areas) +CY +3507+03357 Asia/Famagusta Northern Cyprus +CZ +5005+01426 Europe/Prague +DE +5230+01322 Europe/Berlin Germany (most areas) +DE +4742+00841 Europe/Busingen Busingen +DJ +1136+04309 Africa/Djibouti +DK +5540+01235 Europe/Copenhagen +DM +1518-06124 America/Dominica +DO +1828-06954 America/Santo_Domingo +DZ +3647+00303 Africa/Algiers +EC -0210-07950 America/Guayaquil Ecuador (mainland) +EC -0054-08936 Pacific/Galapagos Galapagos Islands +EE +5925+02445 Europe/Tallinn +EG +3003+03115 Africa/Cairo +EH +2709-01312 Africa/El_Aaiun +ER +1520+03853 Africa/Asmara +ES +4024-00341 Europe/Madrid Spain (mainland) +ES +3553-00519 Africa/Ceuta Ceuta, Melilla +ES +2806-01524 Atlantic/Canary Canary Islands +ET +0902+03842 Africa/Addis_Ababa +FI +6010+02458 Europe/Helsinki +FJ -1808+17825 Pacific/Fiji +FK -5142-05751 Atlantic/Stanley +FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap +FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape +FM +0519+16259 Pacific/Kosrae Kosrae +FO +6201-00646 Atlantic/Faroe +FR +4852+00220 Europe/Paris +GA +0023+00927 Africa/Libreville +GB +513030-0000731 Europe/London +GD +1203-06145 America/Grenada +GE +4143+04449 Asia/Tbilisi +GF +0456-05220 America/Cayenne +GG +492717-0023210 Europe/Guernsey +GH +0533-00013 Africa/Accra +GI +3608-00521 Europe/Gibraltar +GL +6411-05144 America/Nuuk Greenland (most areas) +GL +7646-01840 America/Danmarkshavn National Park (east coast) +GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit +GL +7634-06847 America/Thule Thule/Pituffik +GM +1328-01639 Africa/Banjul +GN +0931-01343 Africa/Conakry +GP +1614-06132 America/Guadeloupe +GQ +0345+00847 Africa/Malabo +GR +3758+02343 Europe/Athens +GS -5416-03632 Atlantic/South_Georgia +GT +1438-09031 America/Guatemala +GU +1328+14445 Pacific/Guam +GW +1151-01535 Africa/Bissau +GY +0648-05810 America/Guyana +HK +2217+11409 Asia/Hong_Kong +HN +1406-08713 America/Tegucigalpa +HR +4548+01558 Europe/Zagreb +HT +1832-07220 America/Port-au-Prince +HU +4730+01905 Europe/Budapest +ID -0610+10648 Asia/Jakarta Java, Sumatra +ID -0002+10920 Asia/Pontianak Borneo (west, central) +ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west) +ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas +IE +5320-00615 Europe/Dublin +IL +314650+0351326 Asia/Jerusalem +IM +5409-00428 Europe/Isle_of_Man +IN +2232+08822 Asia/Kolkata +IO -0720+07225 Indian/Chagos +IQ +3321+04425 Asia/Baghdad +IR +3540+05126 Asia/Tehran +IS +6409-02151 Atlantic/Reykjavik +IT +4154+01229 Europe/Rome +JE +491101-0020624 Europe/Jersey +JM +175805-0764736 America/Jamaica +JO +3157+03556 Asia/Amman +JP +353916+1394441 Asia/Tokyo +KE -0117+03649 Africa/Nairobi +KG +4254+07436 Asia/Bishkek +KH +1133+10455 Asia/Phnom_Penh +KI +0125+17300 Pacific/Tarawa Gilbert Islands +KI -0308-17105 Pacific/Enderbury Phoenix Islands +KI +0152-15720 Pacific/Kiritimati Line Islands +KM -1141+04316 Indian/Comoro +KN +1718-06243 America/St_Kitts +KP +3901+12545 Asia/Pyongyang +KR +3733+12658 Asia/Seoul +KW +2920+04759 Asia/Kuwait +KY +1918-08123 America/Cayman +KZ +4315+07657 Asia/Almaty Kazakhstan (most areas) +KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda +KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay +KZ +5017+05710 Asia/Aqtobe Aqtobe/Aktobe +KZ +4431+05016 Asia/Aqtau Mangghystau/Mankistau +KZ +4707+05156 Asia/Atyrau Atyrau/Atirau/Gur'yev +KZ +5113+05121 Asia/Oral West Kazakhstan +LA +1758+10236 Asia/Vientiane +LB +3353+03530 Asia/Beirut +LC +1401-06100 America/St_Lucia +LI +4709+00931 Europe/Vaduz +LK +0656+07951 Asia/Colombo +LR +0618-01047 Africa/Monrovia +LS -2928+02730 Africa/Maseru +LT +5441+02519 Europe/Vilnius +LU +4936+00609 Europe/Luxembourg +LV +5657+02406 Europe/Riga +LY +3254+01311 Africa/Tripoli +MA +3339-00735 Africa/Casablanca +MC +4342+00723 Europe/Monaco +MD +4700+02850 Europe/Chisinau +ME +4226+01916 Europe/Podgorica +MF +1804-06305 America/Marigot +MG -1855+04731 Indian/Antananarivo +MH +0709+17112 Pacific/Majuro Marshall Islands (most areas) +MH +0905+16720 Pacific/Kwajalein Kwajalein +MK +4159+02126 Europe/Skopje +ML +1239-00800 Africa/Bamako +MM +1647+09610 Asia/Yangon +MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas) +MN +4801+09139 Asia/Hovd Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan +MN +4804+11430 Asia/Choibalsan Dornod, Sukhbaatar +MO +221150+1133230 Asia/Macau +MP +1512+14545 Pacific/Saipan +MQ +1436-06105 America/Martinique +MR +1806-01557 Africa/Nouakchott +MS +1643-06213 America/Montserrat +MT +3554+01431 Europe/Malta +MU -2010+05730 Indian/Mauritius +MV +0410+07330 Indian/Maldives +MW -1547+03500 Africa/Blantyre +MX +1924-09909 America/Mexico_City Central Time +MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo +MX +2058-08937 America/Merida Central Time - Campeche, Yucatan +MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo Leon, Tamaulipas (most areas) +MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo Leon, Tamaulipas (US border) +MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa +MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas) +MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border) +MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora +MX +3232-11701 America/Tijuana Pacific Time US - Baja California +MX +2048-10515 America/Bahia_Banderas Central Time - Bahia de Banderas +MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula) +MY +0133+11020 Asia/Kuching Sabah, Sarawak +MZ -2558+03235 Africa/Maputo +NA -2234+01706 Africa/Windhoek +NC -2216+16627 Pacific/Noumea +NE +1331+00207 Africa/Niamey +NF -2903+16758 Pacific/Norfolk +NG +0627+00324 Africa/Lagos +NI +1209-08617 America/Managua +NL +5222+00454 Europe/Amsterdam +NO +5955+01045 Europe/Oslo +NP +2743+08519 Asia/Kathmandu +NR -0031+16655 Pacific/Nauru +NU -1901-16955 Pacific/Niue +NZ -3652+17446 Pacific/Auckland New Zealand (most areas) +NZ -4357-17633 Pacific/Chatham Chatham Islands +OM +2336+05835 Asia/Muscat +PA +0858-07932 America/Panama +PE -1203-07703 America/Lima +PF -1732-14934 Pacific/Tahiti Society Islands +PF -0900-13930 Pacific/Marquesas Marquesas Islands +PF -2308-13457 Pacific/Gambier Gambier Islands +PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas) +PG -0613+15534 Pacific/Bougainville Bougainville +PH +1435+12100 Asia/Manila +PK +2452+06703 Asia/Karachi +PL +5215+02100 Europe/Warsaw +PM +4703-05620 America/Miquelon +PN -2504-13005 Pacific/Pitcairn +PR +182806-0660622 America/Puerto_Rico +PS +3130+03428 Asia/Gaza Gaza Strip +PS +313200+0350542 Asia/Hebron West Bank +PT +3843-00908 Europe/Lisbon Portugal (mainland) +PT +3238-01654 Atlantic/Madeira Madeira Islands +PT +3744-02540 Atlantic/Azores Azores +PW +0720+13429 Pacific/Palau +PY -2516-05740 America/Asuncion +QA +2517+05132 Asia/Qatar +RE -2052+05528 Indian/Reunion +RO +4426+02606 Europe/Bucharest +RS +4450+02030 Europe/Belgrade +RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad +RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area +# The obsolescent zone.tab format cannot represent Europe/Simferopol well. +# Put it in RU section and list as UA. See "territorial claims" above. +# Programs should use zone1970.tab instead; see above. +UA +4457+03406 Europe/Simferopol Crimea +RU +5836+04939 Europe/Kirov MSK+00 - Kirov +RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan +RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd +RU +5134+04602 Europe/Saratov MSK+01 - Saratov +RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk +RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia +RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals +RU +5500+07324 Asia/Omsk MSK+03 - Omsk +RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk +RU +5322+08345 Asia/Barnaul MSK+04 - Altai +RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk +RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo +RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area +RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia +RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky +RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River +RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky +RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River +RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky +RU +5934+15048 Asia/Magadan MSK+08 - Magadan +RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island +RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is +RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka +RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea +RW -0157+03004 Africa/Kigali +SA +2438+04643 Asia/Riyadh +SB -0932+16012 Pacific/Guadalcanal +SC -0440+05528 Indian/Mahe +SD +1536+03232 Africa/Khartoum +SE +5920+01803 Europe/Stockholm +SG +0117+10351 Asia/Singapore +SH -1555-00542 Atlantic/St_Helena +SI +4603+01431 Europe/Ljubljana +SJ +7800+01600 Arctic/Longyearbyen +SK +4809+01707 Europe/Bratislava +SL +0830-01315 Africa/Freetown +SM +4355+01228 Europe/San_Marino +SN +1440-01726 Africa/Dakar +SO +0204+04522 Africa/Mogadishu +SR +0550-05510 America/Paramaribo +SS +0451+03137 Africa/Juba +ST +0020+00644 Africa/Sao_Tome +SV +1342-08912 America/El_Salvador +SX +180305-0630250 America/Lower_Princes +SY +3330+03618 Asia/Damascus +SZ -2618+03106 Africa/Mbabane +TC +2128-07108 America/Grand_Turk +TD +1207+01503 Africa/Ndjamena +TF -492110+0701303 Indian/Kerguelen +TG +0608+00113 Africa/Lome +TH +1345+10031 Asia/Bangkok +TJ +3835+06848 Asia/Dushanbe +TK -0922-17114 Pacific/Fakaofo +TL -0833+12535 Asia/Dili +TM +3757+05823 Asia/Ashgabat +TN +3648+01011 Africa/Tunis +TO -2110-17510 Pacific/Tongatapu +TR +4101+02858 Europe/Istanbul +TT +1039-06131 America/Port_of_Spain +TV -0831+17913 Pacific/Funafuti +TW +2503+12130 Asia/Taipei +TZ -0648+03917 Africa/Dar_es_Salaam +UA +5026+03031 Europe/Kiev Ukraine (most areas) +UA +4837+02218 Europe/Uzhgorod Transcarpathia +UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk +UG +0019+03225 Africa/Kampala +UM +2813-17722 Pacific/Midway Midway Islands +UM +1917+16637 Pacific/Wake Wake Island +US +404251-0740023 America/New_York Eastern (most areas) +US +421953-0830245 America/Detroit Eastern - MI (most areas) +US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area) +US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne) +US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas) +US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn) +US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski) +US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford) +US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike) +US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland) +US +415100-0873900 America/Chicago Central (most areas) +US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry) +US +411745-0863730 America/Indiana/Knox Central - IN (Starke) +US +450628-0873651 America/Menominee Central - MI (Wisconsin border) +US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver) +US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural) +US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer) +US +394421-1045903 America/Denver Mountain (most areas) +US +433649-1161209 America/Boise Mountain - ID (south); OR (east) +US +332654-1120424 America/Phoenix MST - Arizona (except Navajo) +US +340308-1181434 America/Los_Angeles Pacific +US +611305-1495401 America/Anchorage Alaska (most areas) +US +581807-1342511 America/Juneau Alaska - Juneau area +US +571035-1351807 America/Sitka Alaska - Sitka area +US +550737-1313435 America/Metlakatla Alaska - Annette Island +US +593249-1394338 America/Yakutat Alaska - Yakutat +US +643004-1652423 America/Nome Alaska (west) +US +515248-1763929 America/Adak Aleutian Islands +US +211825-1575130 Pacific/Honolulu Hawaii +UY -345433-0561245 America/Montevideo +UZ +3940+06648 Asia/Samarkand Uzbekistan (west) +UZ +4120+06918 Asia/Tashkent Uzbekistan (east) +VA +415408+0122711 Europe/Vatican +VC +1309-06114 America/St_Vincent +VE +1030-06656 America/Caracas +VG +1827-06437 America/Tortola +VI +1821-06456 America/St_Thomas +VN +1045+10640 Asia/Ho_Chi_Minh +VU -1740+16825 Pacific/Efate +WF -1318-17610 Pacific/Wallis +WS -1350-17144 Pacific/Apia +YE +1245+04512 Asia/Aden +YT -1247+04514 Indian/Mayotte +ZA -2615+02800 Africa/Johannesburg +ZM -1525+02817 Africa/Lusaka +ZW -1750+03103 Africa/Harare diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab new file mode 100644 index 0000000..53ee77e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/test/zoneinfo/zone1970.tab @@ -0,0 +1,384 @@ +# tzdb timezone descriptions +# +# This file is in the public domain. +# +# From Paul Eggert (2018-06-27): +# This file contains a table where each row stands for a timezone where +# civil timestamps have agreed since 1970. Columns are separated by +# a single tab. Lines beginning with '#' are comments. All text uses +# UTF-8 encoding. The columns of the table are as follows: +# +# 1. The countries that overlap the timezone, as a comma-separated list +# of ISO 3166 2-character country codes. See the file 'iso3166.tab'. +# 2. Latitude and longitude of the timezone's principal location +# in ISO 6709 sign-degrees-minutes-seconds format, +# either ±DDMM±DDDMM or ±DDMMSS±DDDMMSS, +# first latitude (+ is north), then longitude (+ is east). +# 3. Timezone name used in value of TZ environment variable. +# Please see the theory.html file for how these names are chosen. +# If multiple timezones overlap a country, each has a row in the +# table, with each column 1 containing the country code. +# 4. Comments; present if and only if a country has multiple timezones. +# +# If a timezone covers multiple countries, the most-populous city is used, +# and that country is listed first in column 1; any other countries +# are listed alphabetically by country code. The table is sorted +# first by country code, then (if possible) by an order within the +# country that (1) makes some geographical sense, and (2) puts the +# most populous timezones first, where that does not contradict (1). +# +# This table is intended as an aid for users, to help them select timezones +# appropriate for their practical needs. It is not intended to take or +# endorse any position on legal or territorial claims. +# +#country- +#codes coordinates TZ comments +AD +4230+00131 Europe/Andorra +AE,OM +2518+05518 Asia/Dubai +AF +3431+06912 Asia/Kabul +AL +4120+01950 Europe/Tirane +AM +4011+04430 Asia/Yerevan +AQ -6617+11031 Antarctica/Casey Casey +AQ -6835+07758 Antarctica/Davis Davis +AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville +AQ -6736+06253 Antarctica/Mawson Mawson +AQ -6448-06406 Antarctica/Palmer Palmer +AQ -6734-06808 Antarctica/Rothera Rothera +AQ -690022+0393524 Antarctica/Syowa Syowa +AQ -720041+0023206 Antarctica/Troll Troll +AQ -7824+10654 Antarctica/Vostok Vostok +AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF) +AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF) +AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN) +AR -2411-06518 America/Argentina/Jujuy Jujuy (JY) +AR -2649-06513 America/Argentina/Tucuman Tucumán (TM) +AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH) +AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR) +AR -3132-06831 America/Argentina/San_Juan San Juan (SJ) +AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ) +AR -3319-06621 America/Argentina/San_Luis San Luis (SL) +AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC) +AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF) +AS,UM -1416-17042 Pacific/Pago_Pago Samoa, Midway +AT +4813+01620 Europe/Vienna +AU -3133+15905 Australia/Lord_Howe Lord Howe Island +AU -5430+15857 Antarctica/Macquarie Macquarie Island +AU -4253+14719 Australia/Hobart Tasmania (most areas) +AU -3956+14352 Australia/Currie Tasmania (King Island) +AU -3749+14458 Australia/Melbourne Victoria +AU -3352+15113 Australia/Sydney New South Wales (most areas) +AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna) +AU -2728+15302 Australia/Brisbane Queensland (most areas) +AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands) +AU -3455+13835 Australia/Adelaide South Australia +AU -1228+13050 Australia/Darwin Northern Territory +AU -3157+11551 Australia/Perth Western Australia (most areas) +AU -3143+12852 Australia/Eucla Western Australia (Eucla) +AZ +4023+04951 Asia/Baku +BB +1306-05937 America/Barbados +BD +2343+09025 Asia/Dhaka +BE +5050+00420 Europe/Brussels +BG +4241+02319 Europe/Sofia +BM +3217-06446 Atlantic/Bermuda +BN +0456+11455 Asia/Brunei +BO -1630-06809 America/La_Paz +BR -0351-03225 America/Noronha Atlantic islands +BR -0127-04829 America/Belem Pará (east); Amapá +BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB) +BR -0803-03454 America/Recife Pernambuco +BR -0712-04812 America/Araguaina Tocantins +BR -0940-03543 America/Maceio Alagoas, Sergipe +BR -1259-03831 America/Bahia Bahia +BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS) +BR -2027-05437 America/Campo_Grande Mato Grosso do Sul +BR -1535-05605 America/Cuiaba Mato Grosso +BR -0226-05452 America/Santarem Pará (west) +BR -0846-06354 America/Porto_Velho Rondônia +BR +0249-06040 America/Boa_Vista Roraima +BR -0308-06001 America/Manaus Amazonas (east) +BR -0640-06952 America/Eirunepe Amazonas (west) +BR -0958-06748 America/Rio_Branco Acre +BS +2505-07721 America/Nassau +BT +2728+08939 Asia/Thimphu +BY +5354+02734 Europe/Minsk +BZ +1730-08812 America/Belize +CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast) +CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE +CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton) +CA +4606-06447 America/Moncton Atlantic - New Brunswick +CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas) +CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore) +CA +4339-07923 America/Toronto Eastern - ON, QC (most areas) +CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73) +CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay) +CA +6344-06828 America/Iqaluit Eastern - NU (most east areas) +CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung) +CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H) +CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba +CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances) +CA +744144-0944945 America/Resolute Central - NU (Resolute) +CA +624900-0920459 America/Rankin_Inlet Central - NU (central) +CA +5024-10439 America/Regina CST - SK (most areas) +CA +5017-10750 America/Swift_Current CST - SK (midwest) +CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W) +CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west) +CA +6227-11421 America/Yellowknife Mountain - NT (central) +CA +682059-1334300 America/Inuvik Mountain - NT (west) +CA +4906-11631 America/Creston MST - BC (Creston) +CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John) +CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson) +CA +4916-12307 America/Vancouver Pacific - BC (most areas) +CA +6043-13503 America/Whitehorse Pacific - Yukon (east) +CA +6404-13925 America/Dawson Pacific - Yukon (west) +CC -1210+09655 Indian/Cocos +CH,DE,LI +4723+00832 Europe/Zurich Swiss time +CI,BF,GM,GN,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan +CK -2114-15946 Pacific/Rarotonga +CL -3327-07040 America/Santiago Chile (most areas) +CL -5309-07055 America/Punta_Arenas Region of Magallanes +CL -2709-10926 Pacific/Easter Easter Island +CN +3114+12128 Asia/Shanghai Beijing Time +CN +4348+08735 Asia/Urumqi Xinjiang Time +CO +0436-07405 America/Bogota +CR +0956-08405 America/Costa_Rica +CU +2308-08222 America/Havana +CV +1455-02331 Atlantic/Cape_Verde +CW,AW,BQ,SX +1211-06900 America/Curacao +CX -1025+10543 Indian/Christmas +CY +3510+03322 Asia/Nicosia Cyprus (most areas) +CY +3507+03357 Asia/Famagusta Northern Cyprus +CZ,SK +5005+01426 Europe/Prague +DE +5230+01322 Europe/Berlin Germany (most areas) +DK +5540+01235 Europe/Copenhagen +DO +1828-06954 America/Santo_Domingo +DZ +3647+00303 Africa/Algiers +EC -0210-07950 America/Guayaquil Ecuador (mainland) +EC -0054-08936 Pacific/Galapagos Galápagos Islands +EE +5925+02445 Europe/Tallinn +EG +3003+03115 Africa/Cairo +EH +2709-01312 Africa/El_Aaiun +ES +4024-00341 Europe/Madrid Spain (mainland) +ES +3553-00519 Africa/Ceuta Ceuta, Melilla +ES +2806-01524 Atlantic/Canary Canary Islands +FI,AX +6010+02458 Europe/Helsinki +FJ -1808+17825 Pacific/Fiji +FK -5142-05751 Atlantic/Stanley +FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap +FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape +FM +0519+16259 Pacific/Kosrae Kosrae +FO +6201-00646 Atlantic/Faroe +FR +4852+00220 Europe/Paris +GB,GG,IM,JE +513030-0000731 Europe/London +GE +4143+04449 Asia/Tbilisi +GF +0456-05220 America/Cayenne +GH +0533-00013 Africa/Accra +GI +3608-00521 Europe/Gibraltar +GL +6411-05144 America/Nuuk Greenland (most areas) +GL +7646-01840 America/Danmarkshavn National Park (east coast) +GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit +GL +7634-06847 America/Thule Thule/Pituffik +GR +3758+02343 Europe/Athens +GS -5416-03632 Atlantic/South_Georgia +GT +1438-09031 America/Guatemala +GU,MP +1328+14445 Pacific/Guam +GW +1151-01535 Africa/Bissau +GY +0648-05810 America/Guyana +HK +2217+11409 Asia/Hong_Kong +HN +1406-08713 America/Tegucigalpa +HT +1832-07220 America/Port-au-Prince +HU +4730+01905 Europe/Budapest +ID -0610+10648 Asia/Jakarta Java, Sumatra +ID -0002+10920 Asia/Pontianak Borneo (west, central) +ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west) +ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas +IE +5320-00615 Europe/Dublin +IL +314650+0351326 Asia/Jerusalem +IN +2232+08822 Asia/Kolkata +IO -0720+07225 Indian/Chagos +IQ +3321+04425 Asia/Baghdad +IR +3540+05126 Asia/Tehran +IS +6409-02151 Atlantic/Reykjavik +IT,SM,VA +4154+01229 Europe/Rome +JM +175805-0764736 America/Jamaica +JO +3157+03556 Asia/Amman +JP +353916+1394441 Asia/Tokyo +KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi +KG +4254+07436 Asia/Bishkek +KI +0125+17300 Pacific/Tarawa Gilbert Islands +KI -0308-17105 Pacific/Enderbury Phoenix Islands +KI +0152-15720 Pacific/Kiritimati Line Islands +KP +3901+12545 Asia/Pyongyang +KR +3733+12658 Asia/Seoul +KZ +4315+07657 Asia/Almaty Kazakhstan (most areas) +KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda +KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay +KZ +5017+05710 Asia/Aqtobe Aqtöbe/Aktobe +KZ +4431+05016 Asia/Aqtau Mangghystaū/Mankistau +KZ +4707+05156 Asia/Atyrau Atyraū/Atirau/Gur'yev +KZ +5113+05121 Asia/Oral West Kazakhstan +LB +3353+03530 Asia/Beirut +LK +0656+07951 Asia/Colombo +LR +0618-01047 Africa/Monrovia +LT +5441+02519 Europe/Vilnius +LU +4936+00609 Europe/Luxembourg +LV +5657+02406 Europe/Riga +LY +3254+01311 Africa/Tripoli +MA +3339-00735 Africa/Casablanca +MC +4342+00723 Europe/Monaco +MD +4700+02850 Europe/Chisinau +MH +0709+17112 Pacific/Majuro Marshall Islands (most areas) +MH +0905+16720 Pacific/Kwajalein Kwajalein +MM +1647+09610 Asia/Yangon +MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas) +MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan +MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar +MO +221150+1133230 Asia/Macau +MQ +1436-06105 America/Martinique +MT +3554+01431 Europe/Malta +MU -2010+05730 Indian/Mauritius +MV +0410+07330 Indian/Maldives +MX +1924-09909 America/Mexico_City Central Time +MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo +MX +2058-08937 America/Merida Central Time - Campeche, Yucatán +MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas) +MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo León, Tamaulipas (US border) +MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa +MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas) +MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border) +MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora +MX +3232-11701 America/Tijuana Pacific Time US - Baja California +MX +2048-10515 America/Bahia_Banderas Central Time - Bahía de Banderas +MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula) +MY +0133+11020 Asia/Kuching Sabah, Sarawak +MZ,BI,BW,CD,MW,RW,ZM,ZW -2558+03235 Africa/Maputo Central Africa Time +NA -2234+01706 Africa/Windhoek +NC -2216+16627 Pacific/Noumea +NF -2903+16758 Pacific/Norfolk +NG,AO,BJ,CD,CF,CG,CM,GA,GQ,NE +0627+00324 Africa/Lagos West Africa Time +NI +1209-08617 America/Managua +NL +5222+00454 Europe/Amsterdam +NO,SJ +5955+01045 Europe/Oslo +NP +2743+08519 Asia/Kathmandu +NR -0031+16655 Pacific/Nauru +NU -1901-16955 Pacific/Niue +NZ,AQ -3652+17446 Pacific/Auckland New Zealand time +NZ -4357-17633 Pacific/Chatham Chatham Islands +PA,KY +0858-07932 America/Panama +PE -1203-07703 America/Lima +PF -1732-14934 Pacific/Tahiti Society Islands +PF -0900-13930 Pacific/Marquesas Marquesas Islands +PF -2308-13457 Pacific/Gambier Gambier Islands +PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas) +PG -0613+15534 Pacific/Bougainville Bougainville +PH +1435+12100 Asia/Manila +PK +2452+06703 Asia/Karachi +PL +5215+02100 Europe/Warsaw +PM +4703-05620 America/Miquelon +PN -2504-13005 Pacific/Pitcairn +PR +182806-0660622 America/Puerto_Rico +PS +3130+03428 Asia/Gaza Gaza Strip +PS +313200+0350542 Asia/Hebron West Bank +PT +3843-00908 Europe/Lisbon Portugal (mainland) +PT +3238-01654 Atlantic/Madeira Madeira Islands +PT +3744-02540 Atlantic/Azores Azores +PW +0720+13429 Pacific/Palau +PY -2516-05740 America/Asuncion +QA,BH +2517+05132 Asia/Qatar +RE,TF -2052+05528 Indian/Reunion Réunion, Crozet, Scattered Islands +RO +4426+02606 Europe/Bucharest +RS,BA,HR,ME,MK,SI +4450+02030 Europe/Belgrade +RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad +RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area +# Mention RU and UA alphabetically. See "territorial claims" above. +RU,UA +4457+03406 Europe/Simferopol Crimea +RU +5836+04939 Europe/Kirov MSK+00 - Kirov +RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan +RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd +RU +5134+04602 Europe/Saratov MSK+01 - Saratov +RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk +RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia +RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals +RU +5500+07324 Asia/Omsk MSK+03 - Omsk +RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk +RU +5322+08345 Asia/Barnaul MSK+04 - Altai +RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk +RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo +RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area +RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia +RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky +RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River +RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky +RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River +RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky +RU +5934+15048 Asia/Magadan MSK+08 - Magadan +RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island +RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is +RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka +RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea +SA,KW,YE +2438+04643 Asia/Riyadh +SB -0932+16012 Pacific/Guadalcanal +SC -0440+05528 Indian/Mahe +SD +1536+03232 Africa/Khartoum +SE +5920+01803 Europe/Stockholm +SG +0117+10351 Asia/Singapore +SR +0550-05510 America/Paramaribo +SS +0451+03137 Africa/Juba +ST +0020+00644 Africa/Sao_Tome +SV +1342-08912 America/El_Salvador +SY +3330+03618 Asia/Damascus +TC +2128-07108 America/Grand_Turk +TD +1207+01503 Africa/Ndjamena +TF -492110+0701303 Indian/Kerguelen Kerguelen, St Paul Island, Amsterdam Island +TH,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas) +TJ +3835+06848 Asia/Dushanbe +TK -0922-17114 Pacific/Fakaofo +TL -0833+12535 Asia/Dili +TM +3757+05823 Asia/Ashgabat +TN +3648+01011 Africa/Tunis +TO -2110-17510 Pacific/Tongatapu +TR +4101+02858 Europe/Istanbul +TT,AG,AI,BL,DM,GD,GP,KN,LC,MF,MS,VC,VG,VI +1039-06131 America/Port_of_Spain +TV -0831+17913 Pacific/Funafuti +TW +2503+12130 Asia/Taipei +UA +5026+03031 Europe/Kiev Ukraine (most areas) +UA +4837+02218 Europe/Uzhgorod Transcarpathia +UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk +UM +1917+16637 Pacific/Wake Wake Island +US +404251-0740023 America/New_York Eastern (most areas) +US +421953-0830245 America/Detroit Eastern - MI (most areas) +US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area) +US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne) +US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas) +US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn) +US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski) +US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford) +US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike) +US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland) +US +415100-0873900 America/Chicago Central (most areas) +US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry) +US +411745-0863730 America/Indiana/Knox Central - IN (Starke) +US +450628-0873651 America/Menominee Central - MI (Wisconsin border) +US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver) +US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural) +US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer) +US +394421-1045903 America/Denver Mountain (most areas) +US +433649-1161209 America/Boise Mountain - ID (south); OR (east) +US +332654-1120424 America/Phoenix MST - Arizona (except Navajo) +US +340308-1181434 America/Los_Angeles Pacific +US +611305-1495401 America/Anchorage Alaska (most areas) +US +581807-1342511 America/Juneau Alaska - Juneau area +US +571035-1351807 America/Sitka Alaska - Sitka area +US +550737-1313435 America/Metlakatla Alaska - Annette Island +US +593249-1394338 America/Yakutat Alaska - Yakutat +US +643004-1652423 America/Nome Alaska (west) +US +515248-1763929 America/Adak Aleutian Islands +US,UM +211825-1575130 Pacific/Honolulu Hawaii +UY -345433-0561245 America/Montevideo +UZ +3940+06648 Asia/Samarkand Uzbekistan (west) +UZ +4120+06918 Asia/Tashkent Uzbekistan (east) +VE +1030-06656 America/Caracas +VN +1045+10640 Asia/Ho_Chi_Minh Vietnam (south) +VU -1740+16825 Pacific/Efate +WF -1318-17610 Pacific/Wallis +WS -1350-17144 Pacific/Apia +ZA,LS,SZ -2615+02800 Africa/Johannesburg diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/tzinfo.gemspec b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/tzinfo.gemspec new file mode 100644 index 0000000..c868d47 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-1.2.8/tzinfo.gemspec @@ -0,0 +1,21 @@ +Gem::Specification.new do |s| + s.name = 'tzinfo' + s.version = '1.2.8' + s.summary = 'Daylight savings aware timezone library' + s.description = 'TZInfo provides daylight savings aware transformations between times in different time zones.' + s.author = 'Philip Ross' + s.email = 'phil.ross@gmail.com' + s.homepage = 'https://tzinfo.github.io' + s.license = 'MIT' + s.files = %w(CHANGES.md LICENSE Rakefile README.md tzinfo.gemspec .yardopts) + + Dir['lib/**/*.rb'].delete_if {|f| f.include?('.svn')} + + Dir['test/**/*.rb'].delete_if {|f| f.include?('.svn')} + + Dir['test/zoneinfo/**/*'].delete_if {|f| f.include?('.svn') || File.symlink?(f)} + s.platform = Gem::Platform::RUBY + s.require_path = 'lib' + s.rdoc_options << '--title' << 'TZInfo' << + '--main' << 'README.md' + s.extra_rdoc_files = ['README.md', 'CHANGES.md', 'LICENSE'] + s.required_ruby_version = '>= 1.8.7' + s.add_dependency 'thread_safe', '~> 0.1' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/.yardopts b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/.yardopts new file mode 100644 index 0000000..52f74b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/.yardopts @@ -0,0 +1,9 @@ +--markup markdown +--no-private +--protected +--readme README.md +lib/**/*.rb +- +CHANGES.md +LICENSE +README.md diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/CHANGES.md b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/CHANGES.md new file mode 100644 index 0000000..fd9dc04 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/CHANGES.md @@ -0,0 +1,973 @@ +# Changes + +## Version 2.0.4 - 16-Dec-2020 + +* Fixed an incorrect InvalidTimezoneIdentifier exception raised when loading a + zoneinfo file that includes rules specifying an additional transition to the + final defined offset (for example, Africa/Casablanca in version 2018e of the + Time Zone Database). #123. + + +## Version 2.0.3 - 8-Nov-2020 + +* Added support for handling "slim" format zoneinfo files that are produced by + default by zic version 2020b and later. The POSIX-style TZ string is now used + calculate DST transition times after the final defined transition in the file. + #120. +* Fixed `TimeWithOffset#getlocal` returning a `TimeWithOffset` with the + `timezone_offset` still assigned when called with an offset argument on JRuby + 9.3. +* Rubinius is no longer supported. + + +## Version 2.0.2 - 2-Apr-2020 + +* Fixed 'wrong number of arguments' errors when running on JRuby 9.0. #114. +* Fixed warnings when running on Ruby 2.8. #113. + + +## Version 2.0.1 - 24-Dec-2019 + +* Fixed "SecurityError: Insecure operation - require" exceptions when loading + data with recent Ruby releases in safe mode. #100. +* Fixed warnings when running on Ruby 2.7. #109. +* Added a `TZInfo::Timezone#=~` method that performs a regex match on the time + zone identifier. #99. +* Added a `TZInfo::Country#=~` method that performs a regex match on the country + code. + + +## Version 2.0.0 - 26-Dec-2018 + +### Added + +* `to_local` and `period_for` instance methods have been added to + `TZInfo::Timezone`. These are similar to `utc_to_local` and `period_for_utc`, + but take the UTC offset of the given time into account. +* `abbreviation`, `dst?`, `base_utc_offset` and `observed_utc_offset` instance + methods have been added to `TZInfo::Timezone`, returning the abbreviation, + whether daylight savings time is in effect and the UTC offset of the time zone + at a specified time. +* A `TZInfo::Timestamp` class has been added. It can be used with + `TZInfo::Timezone` in place of a `Time` or `DateTime`. +* `local_time`, `local_datetime` and `local_timestamp` instance methods have + been added to `TZInfo::Timezone`. These methods construct local `Time`, + `DateTime` and `TZInfo::Timestamp` instances with the correct UTC offset and + abbreviation for the time zone. +* Support for a (yet to be released) version 2 of tzinfo-data has been added, in + addition to support for version 1. The new version will remove the (no longer + needed) `DateTime` parameters from transition times, reduce memory consumption + and improve the efficiency of loading timezone and country indexes. +* A `TZInfo::VERSION` constant has been added, indicating the TZInfo version + number. + +### Changed + +* The minimum supported Ruby versions are now Ruby MRI 1.9.3, JRuby 1.7 (in 1.9 + or later mode) and Rubinius 3. +* Local times are now returned using the correct UTC offset (instead of using + UTC). #49 and #52. +* Local times are returned as instances of `TimeWithOffset`, + `DateTimeWithOffset` or `TZInfo::TimestampWithOffset`. These classes subclass + `Time`, `DateTime` and `TZInfo::Timestamp` respectively. They override the + default behaviour of the base classes to return information about the observed + offset at the indicated time. For example, the zone abbreviation is returned + when using the `%Z` directive with `strftime`. +* The `transitions_up_to`, `offsets_up_to` and `strftime` instance methods of + `TZInfo::Timezone` now take the UTC offsets of given times into account + (instead of ignoring them as was previously the case). +* The `TZInfo::TimezonePeriod` class has been split into two subclasses: + `TZInfo::OffsetTimezonePeriod` and `TZInfo::TransitionsTimezonePeriod`. + `TZInfo::OffsetTimezonePeriod` is returned for time zones that only have a + single offset. `TZInfo::TransitionsTimezonePeriod` is returned for periods + that start or end with a transition. +* `TZInfo::TimezoneOffset#abbreviation`, `TZInfo::TimezonePeriod#abbreviation` + and `TZInfo::TimezonePeriod#zone_identifier` now return frozen `String` + instances instead of instances of `Symbol`. +* The `utc_offset` and `utc_total_offset` attributes of `TZInfo::TimezonePeriod` + and `TZInfo::TimezoneOffset` have been renamed `base_utc_offset` and + `observed_utc_offset` respectively. The former names have been retained as + aliases. +* `TZInfo::Timezone.get`, `TZInfo::Timezone.get_proxy` and `TZInfo::Country.get` + can now be used with strings having any encoding. Previously, only encodings + that are directly comparable with UTF-8 were supported. +* The requested identifier is included in `TZInfo::InvalidTimezoneIdentifier` + exception messages. +* The requested country code is included in `TZInfo::InvalidCountryCode` + exception messages. +* The full range of transitions is now loaded from zoneinfo files. Zoneinfo + files produced with version 2014c of the `zic` tool contain an initial + transition `2**63` seconds before the epoch. Zoneinfo files produced with + version 2014d or later of `zic` contain an initial transition `2**59` seconds + before the epoch. These transitions would previously have been ignored, but + are now returned in methods such as `TZInfo::Timezone#transitions_up_to`. +* The `TZInfo::RubyDataSource` and `TZInfo::ZoneinfoDataSource` classes have + been moved into a new `TZInfo::DataSources` module. Code that is setting + `TZInfo::ZoneinfoDataSource.search_path` or + `TZInfo::ZoneinfoDataSource.alternate_iso3166_tab_search_path` will need to be + updated accordingly. +* The `TZInfo::InvalidZoneinfoDirectory` and `TZInfo::ZoneinfoDirectoryNotFound` + exception classes raised by `TZInfo::DataSources::ZoneinfoDataSource` have + been moved into the `TZInfo::DataSources` module. +* Setting the data source to `:ruby` or instantiating + `TZInfo::DataSources::RubyDataSource` will now immediately raise a + `TZInfo::DataSources::TZInfoDataNotFound` exception if `require 'tzinfo/data'` + fails. Previously, a failure would only occur later when accessing an index or + loading a timezone or country. +* The `DEFAULT_SEARCH_PATH` and `DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH` + constants of `TZInfo::DataSources::ZoneinfoDataSource` have been made private. +* The `TZInfo::Country.data_source`, + `TZInfo::DataSource.create_default_data_source`, + `TZInfo::DataSources::ZoneinfoDataSource.process_search_path`, + `TZInfo::Timezone.get_proxies` and `TZInfo::Timezone.data_source` methods have + been made private. +* The performance of loading zoneinfo files and the associated indexes has been + improved. +* Memory use has been decreased by deduplicating `String` instances when loading + country and time zone data. +* The dependency on the deprecated thread_safe gem as been removed and replaced + by concurrent-ruby. +* The Info classes used to return time zone and country information from + `TZInfo::DataSource` implementations have been moved into the + `TZInfo::DataSources` module. +* The `TZInfo::TransitionDataTimezoneInfo` class has been removed and replaced + with `TZInfo::DataSources::TransitionsDataTimezoneInfo` and + `TZInfo::DataSources::ConstantOffsetDataTimezoneInfo`. + `TZInfo::DataSources::TransitionsDataTimezoneInfo` is constructed with an + `Array` of `TZInfo::TimezoneTransition` instances representing times when the + offset changes. `TZInfo::DataSources::ConstantOffsetDataTimezoneInfo` is + constructed with a `TZInfo::TimezoneOffset` instance representing the offset + constantly observed in a time zone. +* The `TZInfo::DataSource#timezone_identifiers` method should no longer be + overridden in custom data source implementations. The implementation in the + base class now calculates a result from + `TZInfo::DataSource#data_timezone_identifiers` and + `TZInfo::DataSource#linked_timezone_identifiers`. +* The results of the `TZInfo::DataSources::RubyDataSource` `to_s` and `inspect` + methods now include the time zone database and tzinfo-data versions. + + +### Removed + +* Methods of `TZInfo::Timezone` that accept time arguments no longer allow + `Integer` timestamp values. `Time`, `DateTime` or `TZInfo::Timestamp` values + or objects that respond to `to_i`, `subsec` and optionally `utc_offset` must + be used instead. +* The `%:::z` format directive can now only be used with + `TZInfo::Timezone#strftime` if it is supported by `Time#strftime` on the + runtime platform. +* Using `TZInfo::Timezone.new(identifier)` and `TZInfo::Country.new(code)` to + obtain a specific `TZInfo::Timezone` or `TZInfo::Country` will no longer work. + `TZInfo::Timezone.get(identifier)` and `TZInfo::Country.get(code)` should be + used instead. +* The `TZInfo::TimeOrDateTime` class has been removed. +* The `valid_for_utc?`, `utc_after_start?`, `utc_before_end?`, + `valid_for_local?`, `local_after_start?` and `local_before_end?` instance + methods of `TZInfo::TimezonePeriod` have been removed. Comparisons can be + performed with the results of the `starts_at`, `ends_at`, `local_starts_at` + and `local_ends_at` methods instead. +* The `to_local` and `to_utc` instance methods of `TZInfo::TimezonePeriod` and + `TZInfo::TimezoneOffset` have been removed. Conversions should be performed + using the `TZInfo::Timezone` class instead. +* The `TZInfo::TimezonePeriod#utc_total_offset_rational` method has been + removed. Equivalent information can be obtained using the + `TZInfo::TimezonePeriod#observed_utc_offset` method. +* The `datetime`, `time`, `local_end`, `local_end_time`, `local_start` and + `local_start_time` instance methods of `TZInfo::TimezoneTransition` have been + removed. The `at`, `local_end_at` and `local_start_at` methods should be used + instead and the result (a `TZInfo::TimestampWithOffset`) converted to either a + `DateTime` or `Time` by calling `to_datetime` or `to_time` on the result. +* The `us_zones` and `us_zone_identifiers` class methods of `TZInfo::Timezone` + have been removed. `TZInfo::Country.get('US').zones` and + `TZInfo::Country.get('US').zone_identifiers` should be used instead. + + +## Version 1.2.9 - 16-Dec-2020 + +* Fixed an incorrect InvalidTimezoneIdentifier exception raised when loading a + zoneinfo file that includes rules specifying an additional transition to the + final defined offset (for example, Africa/Casablanca in version 2018e of the + Time Zone Database). #123. + + +## Version 1.2.8 - 8-Nov-2020 + +* Added support for handling "slim" format zoneinfo files that are produced by + default by zic version 2020b and later. The POSIX-style TZ string is now used + calculate DST transition times after the final defined transition in the file. + The 64-bit section is now always used regardless of whether Time has support + for 64-bit times. #120. +* Rubinius is no longer supported. + + +## Version 1.2.7 - 2-Apr-2020 + +* Fixed 'wrong number of arguments' errors when running on JRuby 9.0. #114. +* Fixed warnings when running on Ruby 2.8. #112. + + +## Version 1.2.6 - 24-Dec-2019 + +* `Timezone#strftime('%s', time)` will now return the correct number of seconds + since the epoch. #91. +* Removed the unused `TZInfo::RubyDataSource::REQUIRE_PATH` constant. +* Fixed "SecurityError: Insecure operation - require" exceptions when loading + data with recent Ruby releases in safe mode. +* Fixed warnings when running on Ruby 2.7. #106 and #111. + + +## Version 1.2.5 - 4-Feb-2018 + +* Support recursively (deep) freezing `Country` and `Timezone` instances. #80. +* Allow negative daylight savings time offsets to be derived when reading from + zoneinfo files. The utc_offset and std_offset are now derived correctly for + Europe/Dublin in the 2018a and 2018b releases of the Time Zone Database. + + +## Version 1.2.4 - 26-Oct-2017 + +* Ignore the leapseconds file that is included in zoneinfo directories installed + with version 2017c and later of the Time Zone Database. + + +## Version 1.2.3 - 25-Mar-2017 + +* Reduce the number of `String` objects allocated when loading zoneinfo files. + #54. +* Make `Timezone#friendly_identifier` compatible with frozen string literals. +* Improve the algorithm for deriving the `utc_offset` from zoneinfo files. This + now correctly handles Pacific/Apia switching from one side of the + International Date Line to the other whilst observing daylight savings time. + #66. +* Fix an `UnknownTimezone` exception when calling transitions_up_to or + offsets_up_to on a `TimezoneProxy` instance obtained from + `Timezone.get_proxy`. +* Allow the Factory zone to be obtained from the Zoneinfo data source. +* Ignore the /usr/share/zoneinfo/timeconfig symlink included in Slackware + distributions. #64. +* Fix `Timezone#strftime` handling of `%Z` expansion when `%Z` is prefixed with + more than one percent. #31. +* Support expansion of `%z`, `%:z`, `%::z` and `%:::z` to the UTC offset of the + time zone in `Timezone#strftime`. #31 and #67. + + +## Version 1.2.2 - 8-Aug-2014 + +* Fix an error with duplicates being returned by `Timezone#all_country_zones` + and `Timezone#all_country_zone_identifiers` when used with tzinfo-data + v1.2014.6 or later. +* Use the zone1970.tab file for country timezone data if it is found in the + zoneinfo directory (and fallback to zone.tab if not). zone1970.tab was added + in tzdata 2014f. zone.tab is now deprecated. + + +## Version 1.2.1 - 1-Jun-2014 + +* Support zoneinfo files generated with zic version 2014c and later. +* On platforms that only support positive 32-bit timestamps, ensure that + conversions are accurate from the epoch instead of just from the first + transition after the epoch. +* Minor documentation improvements. + + +## Version 1.2.0 - 26-May-2014 + +* Raise the minimum supported Ruby version to 1.8.7. +* Support loading system zoneinfo data on FreeBSD, OpenBSD and Solaris. + Resolves #15. +* Add `canonical_identifier` and `canonical_zone` methods to `Timezone`. + Resolves #16. +* Add a link to a `DataSourceNotFound` help page in the + `TZInfo::DataSourceNotFound` exception message. +* Load iso3166.tab and zone.tab files as UTF-8. +* Fix `Timezone#local_to_utc` returning local `Time` instances on systems using + UTC as the local time zone. Resolves #13. +* Fix `==` methods raising an exception when passed an instance of a different + class by making `<=>` return `nil` if passed a non-comparable argument. +* Eliminate `require 'rational'` warnings. Resolves #10. +* Eliminate "assigned but unused variable - info" warnings. Resolves #11. +* Switch to minitest v5 for unit tests. Resolves #18. + + +## Version 1.1.0 - 25-Sep-2013 + +* TZInfo is now thread safe. `ThreadSafe::Cache` is now used instead of `Hash` + to cache `Timezone` and `Country` instances returned by `Timezone.get` and + `Country.get`. The tzinfo gem now depends on thread_safe ~> 0.1. +* Added a `transitions_up_to` method to `Timezone` that returns a list of the + times where the UTC offset of the timezone changes. +* Added an `offsets_up_to` method to `Timezone` that returns the set of offsets + that have been observed in a defined timezone. +* Fixed a "can't modify frozen String" error when loading a `Timezone` from a + zoneinfo file using an identifier `String` that is both tainted and frozen. + Resolves #3. +* Support TZif3 format zoneinfo files (now produced by zic from tzcode version + 2013e onwards). +* Support using YARD to generate documentation (added a .yardopts file). +* Ignore the +VERSION file included in the zoneinfo directory on Mac OS X. +* Added a note to the documentation concerning 32-bit zoneinfo files (as + included with Mac OS X). + + +## Version 1.0.1 - 22-Jun-2013 + +* Fix a test case failure when tests are run from a directory that contains a + dot in the path (issue #29751). + + +## Version 1.0.0 - 2-Jun-2013 + +* Allow TZInfo to be used with different data sources instead of just the + built-in Ruby module data files. +* Include a data source that allows TZInfo to load data from the binary + zoneinfo files produced by zic and included with many Linux and Unix-like + distributions. +* Remove the definition and index Ruby modules from TZInfo and move them into + a separate TZInfo::Data library (available as the tzinfo-data gem). +* Default to using the TZInfo::Data library as the data source if it is + installed, otherwise use zoneinfo files instead. +* Preserve the nanoseconds of local timezone Time objects when performing + conversions (issue #29705). +* Don't add the tzinfo lib directory to the search path when requiring 'tzinfo'. + The tzinfo lib directory must now be in the search path before 'tzinfo' is + required. +* Add `utc_start_time`, `utc_end_time`, `local_start_time` and `local_end_time` + instance methods to `TimezonePeriod`. These return an identical value as the + existing `utc_start`, `utc_end`, `local_start` and `local_end` methods, but + return `Time` instances instead of `DateTime`. +* Make the `start_transition`, `end_transition` and `offset` properties of + `TimezonePeriod` protected. To access properties of the period, callers should + use other `TimezonePeriod` instance methods instead (issue #7655). + + +## Version 0.3.58 (tzdata v2020d) - 8-Nov-2020 + +* Updated to tzdata version 2020d + (https://mm.icann.org/pipermail/tz-announce/2020-October/000062.html). + + +## Version 0.3.57 (tzdata v2020a) - 17-May-2020 + +* Updated to tzdata version 2020a + (<https://mm.icann.org/pipermail/tz-announce/2020-April/000058.html>). + + +## Version 0.3.56 (tzdata v2019c) - 1-Nov-2019 + +* Updated to tzdata version 2019c + (<https://mm.icann.org/pipermail/tz-announce/2019-September/000057.html>). + + +## Version 0.3.55 (tzdata v2018g) - 27-Oct-2018 + +* Updated to tzdata version 2018g + (<https://mm.icann.org/pipermail/tz-announce/2018-October/000052.html>). + + +## Version 0.3.54 (tzdata v2018d) - 25-Mar-2018 + +* Updated to tzdata version 2018d + (<https://mm.icann.org/pipermail/tz-announce/2018-March/000049.html>). + + +## Version 0.3.53 (tzdata v2017b) - 23-Mar-2017 + +* Updated to tzdata version 2017b + (<https://mm.icann.org/pipermail/tz-announce/2017-March/000046.html>). + + +## Version 0.3.52 (tzdata v2016h) - 28-Oct-2016 + +* Updated to tzdata version 2016h + (<https://mm.icann.org/pipermail/tz-announce/2016-October/000042.html>). + + +## Version 0.3.51 (tzdata v2016f) - 5-Jul-2016 + +* Updated to tzdata version 2016f + (<https://mm.icann.org/pipermail/tz-announce/2016-July/000040.html>). + + +## Version 0.3.50 (tzdata v2016e) - 14-Jun-2016 + +* Updated to tzdata version 2016e + (<https://mm.icann.org/pipermail/tz-announce/2016-June/000039.html>). + + +## Version 0.3.49 (tzdata v2016d) - 18-Apr-2016 + +* Updated to tzdata version 2016d + (<https://mm.icann.org/pipermail/tz-announce/2016-April/000038.html>). + + +## Version 0.3.48 (tzdata v2016c) - 23-Mar-2016 + +* Updated to tzdata version 2016c + (<https://mm.icann.org/pipermail/tz-announce/2016-March/000037.html>). + + +## Version 0.3.47 (tzdata v2016b) - 15-Mar-2016 + +* Updated to tzdata version 2016b + (<https://mm.icann.org/pipermail/tz-announce/2016-March/000036.html>). + + +## Version 0.3.46 (tzdata v2015g) - 2-Dec-2015 + +* From version 2015e, the IANA time zone database uses non-ASCII characters in + country names. Backport the encoding handling from TZInfo::Data to allow + TZInfo 0.3.x to support Ruby 1.9 (which would otherwise fail with an invalid + byte sequence error when loading the countries index). Resolves #41. + + +## Version 0.3.45 (tzdata v2015g) - 3-Oct-2015 + +* Updated to tzdata version 2015g + (<https://mm.icann.org/pipermail/tz-announce/2015-October/000034.html>). + + +## Version 0.3.44 (tzdata v2015d) - 24-Apr-2015 + +* Updated to tzdata version 2015d + (<https://mm.icann.org/pipermail/tz-announce/2015-April/000031.html>). + + +## Version 0.3.43 (tzdata v2015a) - 31-Jan-2015 + +* Updated to tzdata version 2015a + (<https://mm.icann.org/pipermail/tz-announce/2015-January/000028.html>). + + +## Version 0.3.42 (tzdata v2014i) - 23-Oct-2014 + +* Updated to tzdata version 2014i + (<https://mm.icann.org/pipermail/tz-announce/2014-October/000026.html>). + + +## Version 0.3.41 (tzdata v2014f) - 8-Aug-2014 + +* Updated to tzdata version 2014f + (<https://mm.icann.org/pipermail/tz-announce/2014-August/000023.html>). + + +## Version 0.3.40 (tzdata v2014e) - 10-Jul-2014 + +* Updated to tzdata version 2014e + (<https://mm.icann.org/pipermail/tz-announce/2014-June/000022.html>). + + +## Version 0.3.39 (tzdata v2014a) - 9-Mar-2014 + +* Updated to tzdata version 2014a + (<https://mm.icann.org/pipermail/tz-announce/2014-March/000018.html>). + + +## Version 0.3.38 (tzdata v2013g) - 8-Oct-2013 + +* Updated to tzdata version 2013g + (<https://mm.icann.org/pipermail/tz-announce/2013-October/000015.html>). + + +## Version 0.3.37 (tzdata v2013b) - 11-Mar-2013 + +* Updated to tzdata version 2013b + (<https://mm.icann.org/pipermail/tz-announce/2013-March/000010.html>). + + +## Version 0.3.36 (tzdata v2013a) - 3-Mar-2013 + +* Updated to tzdata version 2013a + (<https://mm.icann.org/pipermail/tz-announce/2013-March/000009.html>). +* Fix `TimezoneTransitionInfo#eql?` incorrectly returning false when running on + Ruby 2.0. +* Change `eql?` and `==` implementations to test the class of the passed in + object instead of checking individual properties with `respond_to?`. + + +## Version 0.3.35 (tzdata v2012i) - 4-Nov-2012 + +* Updated to tzdata version 2012i + (<https://mm.icann.org/pipermail/tz-announce/2012-November/000007.html>). + + +## Version 0.3.34 (tzdata v2012h) - 27-Oct-2012 + +* Updated to tzdata version 2012h + (<https://mm.icann.org/pipermail/tz-announce/2012-October/000006.html>). + + +## Version 0.3.33 (tzdata v2012c) - 8-Apr-2012 + +* Updated to tzdata version 2012c + (<https://mm.icann.org/pipermail/tz/2012-April/017627.html>). + + +## Version 0.3.32 (tzdata v2012b) - 4-Mar-2012 + +* Updated to tzdata version 2012b + (<https://mm.icann.org/pipermail/tz/2012-March/017524.html>). + + +## Version 0.3.31 (tzdata v2011n) - 6-Nov-2011 + +* Updated to tzdata version 2011n + (<https://mm.icann.org/pipermail/tz/2011-October/017201.html>). + + +## Version 0.3.30 (tzdata v2011k) - 29-Sep-2011 + +* Updated to tzdata version 2011k + (<https://mm.icann.org/pipermail/tz/2011-September/008889.html>). + + +## Version 0.3.29 (tzdata v2011h) - 27-Jun-2011 + +* Updated to tzdata version 2011h + (<https://mm.icann.org/pipermail/tz/2011-June/008576.html>). +* Allow the default value of the `local_to_utc` and `period_for_local` `dst` + parameter to be specified globally with a `Timezone.default_dst` attribute. + Thanks to Kurt Werle for the suggestion and patch. + + +## Version 0.3.28 (tzdata v2011g) - 13-Jun-2011 + +* Add support for Ruby 1.9.3 (trunk revision 31668 and later). Thanks to + Aaron Patterson for reporting the problems running on the new version. + Closes #29233. + + +## Version 0.3.27 (tzdata v2011g) - 26-Apr-2011 + +* Updated to tzdata version 2011g + (<https://mm.icann.org/pipermail/tz/2011-April/016875.html>). + + +## Version 0.3.26 (tzdata v2011e) - 2-Apr-2011 + +* Updated to tzdata version 2011e + (<https://mm.icann.org/pipermail/tz/2011-April/016809.html>). + + +## Version 0.3.25 (tzdata v2011d) - 14-Mar-2011 + +* Updated to tzdata version 2011d + (<https://mm.icann.org/pipermail/tz/2011-March/016746.html>). + + +## Version 0.3.24 (tzdata v2010o) - 15-Jan-2011 + +* Updated to tzdata version 2010o + (<https://mm.icann.org/pipermail/tz/2010-November/016517.html>). + + +## Version 0.3.23 (tzdata v2010l) - 19-Aug-2010 + +* Updated to tzdata version 2010l + (<https://mm.icann.org/pipermail/tz/2010-August/016360.html>). + + +## Version 0.3.22 (tzdata v2010j) - 29-May-2010 + +* Corrected file permissions issue with 0.3.21 release. + + +## Version 0.3.21 (tzdata v2010j) - 28-May-2010 + +* Updated to tzdata version 2010j + (<https://mm.icann.org/pipermail/tz/2010-May/016211.html>). +* Change invalid timezone check to exclude characters not used in timezone + identifiers and avoid 'character class has duplicated range' warnings with + Ruby 1.9.2. +* Ruby 1.9.2 has deprecated `require 'rational'`, but older versions of + Ruby need rational to be required. Require rational only when the Rational + module has not already been loaded. +* Remove circular requires (now a warning in Ruby 1.9.2). Instead of using + requires in each file for dependencies, `tzinfo.rb` now requires all tzinfo + files. If you were previously requiring files within the tzinfo directory + (e.g. `require 'tzinfo/timezone'`), then you will now have to + `require 'tzinfo'` instead. + + +## Version 0.3.20 (tzdata v2010i) - 19-Apr-2010 + +* Updated to tzdata version 2010i + (<https://mm.icann.org/pipermail/tz/2010-April/016184.html>). + + +## Version 0.3.19 (tzdata v2010h) - 5-Apr-2010 + +* Updated to tzdata version 2010h + (<https://mm.icann.org/pipermail/tz/2010-April/016161.html>). + + +## Version 0.3.18 (tzdata v2010g) - 29-Mar-2010 + +* Updated to tzdata version 2010g + (<https://mm.icann.org/pipermail/tz/2010-March/016140.html>). + + +## Version 0.3.17 (tzdata v2010e) - 8-Mar-2010 + +* Updated to tzdata version 2010e + (<https://mm.icann.org/pipermail/tz/2010-March/016088.html>). + + +## Version 0.3.16 (tzdata v2009u) - 5-Jan-2010 + +* Support the use of '-' to denote '0' as an offset in the tz data files. + Used for the first time in the SAVE field in tzdata v2009u. +* Updated to tzdata version 2009u + (<https://mm.icann.org/pipermail/tz/2009-December/016001.html>). + + +## Version 0.3.15 (tzdata v2009p) - 26-Oct-2009 + +* Updated to tzdata version 2009p + (<https://mm.icann.org/pipermail/tz/2009-October/015889.html>). +* Added a description to the gem spec. +* List test files in test_files instead of files in the gem spec. + + +## Version 0.3.14 (tzdata v2009l) - 19-Aug-2009 + +* Updated to tzdata version 2009l + (<https://mm.icann.org/pipermail/tz/2009-August/015729.html>). +* Include current directory in load path to allow running tests on + Ruby 1.9.2, which doesn't include it by default any more. + + +## Version 0.3.13 (tzdata v2009f) - 15-Apr-2009 + +* Updated to tzdata version 2009f + (<https://mm.icann.org/pipermail/tz/2009-April/015544.html>). +* Untaint the timezone module filename after validation to allow use + with `$SAFE == 1` (e.g. under mod_ruby). Thanks to Dmitry Borodaenko for + the suggestion. Closes #25349. + + +## Version 0.3.12 (tzdata v2008i) - 12-Nov-2008 + +* Updated to tzdata version 2008i + (<https://mm.icann.org/pipermail/tz/2008-October/015260.html>). + + +## Version 0.3.11 (tzdata v2008g) - 7-Oct-2008 + +* Updated to tzdata version 2008g + (<https://mm.icann.org/pipermail/tz/2008-October/015139.html>). +* Support Ruby 1.9.0-5. `Rational.new!` has now been removed in Ruby 1.9. + Only use `Rational.new!` if it is available (it is preferable in Ruby 1.8 + for performance reasons). Thanks to Jeremy Kemper and Pratik Naik for + reporting this. Closes #22312. +* Apply a patch from Pratik Naik to replace assert calls that have been + deprecated in the Ruby svn trunk. Closes #22308. + + +## Version 0.3.10 (tzdata v2008f) - 16-Sep-2008 + +* Updated to tzdata version 2008f + (<https://mm.icann.org/pipermail/tz/2008-September/015090.html>). + + +## Version 0.3.9 (tzdata v2008c) - 27-May-2008 + +* Updated to tzdata version 2008c + (<https://mm.icann.org/pipermail/tz/2008-May/014956.html>). +* Support loading timezone data in the latest trunk versions of Ruby 1.9. + `Rational.new!` is now private, so call it using `Rational.send :new!` + instead. Thanks to Jeremy Kemper and Pratik Naik for spotting this. Closes + #19184. +* Prevent warnings from being output when running Ruby with the -v or -w + command line options. Thanks to Paul McMahon for the patch. Closes #19719. + + +## Version 0.3.8 (tzdata v2008b) - 24-Mar-2008 + +* Updated to tzdata version 2008b + (<https://mm.icann.org/pipermail/tz/2008-March/014910.html>). +* Support loading timezone data in Ruby 1.9.0. Use `DateTime.new!` if it is + available instead of `DateTime.new0` when constructing transition times. + `DateTime.new!` was added in Ruby 1.8.6. `DateTime.new0` was removed in + Ruby 1.9.0. Thanks to Joshua Peek for reporting this. Closes #17606. +* Modify some of the equality test cases to cope with the differences + between Ruby 1.8.6 and Ruby 1.9.0. + + +## Version 0.3.7 (tzdata v2008a) - 10-Mar-2008 + +* Updated to tzdata version 2008a + (<https://mm.icann.org/pipermail/tz/2008-March/014851.html>). + + +## Version 0.3.6 (tzdata v2007k) - 1-Jan-2008 + +* Updated to tzdata version 2007k + (<https://mm.icann.org/pipermail/tz/2007-December/014765.html>). +* Removed deprecated RubyGems autorequire option. + + +## Version 0.3.5 (tzdata v2007h) - 1-Oct-2007 + +* Updated to tzdata version 2007h + (<https://mm.icann.org/pipermail/tz/2007-October/014585.html>). + + +## Version 0.3.4 (tzdata v2007g) - 21-Aug-2007 + +* Updated to tzdata version 2007g + (<https://mm.icann.org/pipermail/tz/2007-August/014499.html>). + + +## Version 0.3.3 (tzdata v2006p) - 27-Nov-2006 + +* Updated to tzdata version 2006p + (<https://mm.icann.org/pipermail/tz/2006-November/013999.html>). + + +## Version 0.3.2 (tzdata v2006n) - 11-Oct-2006 + +* Updated to tzdata version 2006n + (<https://mm.icann.org/pipermail/tz/2006-October/013911.html>). Note that this + release of tzdata removes the country Serbia and Montenegro (CS) and replaces + it with separate Serbia (RS) and Montenegro (ME) entries. + + +## Version 0.3.1 (tzdata v2006j) - 21-Aug-2006 + +* Remove colon from case statements to avoid warning in Ruby 1.8.5. #5198. +* Use temporary variable to avoid dynamic string warning from rdoc. +* Updated to tzdata version 2006j + (<https://mm.icann.org/pipermail/tz/2006-August/013767.html>). + + +## Version 0.3.0 (tzdata v2006g) - 17-Jul-2006 + +* New timezone data format. Timezone data now occupies less space on disk and + takes less memory once loaded. #4142, #4144. +* Timezone data is defined in modules rather than classes. `Timezone` instances + returned by `Timezone.get` are no longer instances of data classes, but are + instead instances of new `DataTimezone` and `LinkedTimezone` classes. +* `Timezone` instances can now be used with `Marshal.dump` and `Marshal.load`. + #4240. +* Added a `Timezone.get_proxy` method that returns a `TimezoneProxy` object for + a given identifier. +* Country index data is now defined in a single module that is independent + of the `Country` class implementation. +* `Country` instances can now be used with `Marshal.dump` and `Marshal.load`. + #4240. +* `Country` has a new `zone_info` method that returns `CountryTimezone` objects + containing additional information (latitude, longitude and a description) + relating to each `Timezone`. #4140. +* Time zones within a `Country` are now returned in an order that makes + geographic sense. +* The zdumptest utility now checks local to utc conversions in addition to + utc to local conversions. +* `eql?` method defined on `Country` and `Timezone` that is equivalent to `==`. +* The `==` method of `Timezone` no longer raises an exception when passed an + object with no identifier method. +* The `==` method of `Country` no longer raises an exception when passed an + object with no code method. +* `hash` method defined on `Country` that returns the hash of the code. +* `hash` method defined on `Timezone` that returns the hash of the identifier. +* Miscellaneous API documentation corrections and improvements. +* Timezone definition and indexes are now excluded from rdoc (the contents were + previously ignored with `#:nodoc:` anyway). +* Removed no longer needed `#:nodoc:` directives from timezone data files (which + are now excluded from the rdoc build). +* Installation of the gem now causes rdoc API documentation to be generated. + #4905. +* When optimizing transitions to generate zone definitions, check the + UTC and standard offsets separately rather than just the total offset to UTC. + Fixes an incorrect abbreviation issue with Europe/London, Europe/Dublin and + Pacific/Auckland. +* Eliminated unnecessary `.nil?` calls to give a minor performance gain. +* `Timezone.all` and `Timezone.all_identifiers` now return all the + `Timezone` instances/identifiers rather than just those associated with + countries. #4146. +* Added `all_data_zones`, `all_data_zone_identifiers`, `all_linked_zones` and + `all_linked_zone_identifiers` class methods to `Timezone`. +* Added a `strftime` method to `Timezone` that converts a time in UTC to local + time and then returns it formatted. `%Z` is replaced with the timezone + abbreviation for the given time (for example, EST or EDT). #4143. +* Fix escaping of quotes in `TZDataParser`. This affected country names and + descriptions of time zones within countries. + + +## Version 0.2.2 (tzdata v2006g) - 17-May-2006 + +* Use class-scoped instance variables to store the Timezone identifier and + singleton instance. Loading a linked zone no longer causes the parent + zone's identifier to be changed. The instance method of a linked zone class + also now returns an instance of the linked zone class rather than the parent + class. #4502. +* The zdumptest utility now compares the TZInfo zone identifier with the zdump + zone identifier. +* The zdumptestall utility now exits if not supplied with enough parameters. +* Updated to tzdata version 2006g + (<https://mm.icann.org/pipermail/tz/2006-May/013590.html>). + + +## Version 0.2.1 (tzdata v2006d) - 17-Apr-2006 + +* Fix a performance issue caused in 0.2.0 with `Timezone.local_to_utc`. + Conversions performed on `TimeOrDateTime` instances passed to `<=>` are now + cached as originally intended. Thanks to Michael Smedberg for spotting this. +* Fix a performance issue with the `local_to_utc` period search algorithm + originally implemented in 0.1.0. The condition that was supposed to cause + the search to terminate when enough periods had been found was only being + evaluated in a small subset of cases. Thanks to Michael Smedberg and + Jamis Buck for reporting this. +* Added abbreviation as an alias for `TimezonePeriod.zone_identifier`. +* Updated to tzdata version 2006d + (<https://mm.icann.org/pipermail/tz/2006-April/013517.html>). +* Ignore any offset in `DateTime` instances passed in (as is already done for + `Time` instances). All of the following now refer to the same UTC time (15:40 on 17 April 2006). Previously, the `DateTime` in the second line would have been interpreted as 20:40. + + ```ruby + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(DateTime.new(2006, 4, 17, 15, 40, 0).new_offset(Rational(5, 24))) + tz.utc_to_local(Time.utc(2006, 4, 17, 15, 40, 0)) + tz.utc_to_local(Time.local(2006, 4, 17, 15, 40, 0)) + ``` + + +## Version 0.2.0 (tzdata v2006c) - 3-Apr-2006 + +* Use timestamps rather than `DateTime` objects in zone files for times between + 1970 and 2037 (the range of `Time`). +* Don't convert passed in `Time` objects to `DateTime` in most cases (provides + a substantial performance improvement). +* Allow integer timestamps (time in seconds since 1970-1-1) to be used as well + as `Time` and `DateTime` objects in all public methods that take times as + parameters. +* Tool to compare TZInfo conversions with output from zdump. +* `TZDataParser` zone generation algorithm rewritten. Now based on the zic code. + TZInfo is now 100% compatible with zic/zdump output. +* Riyadh Solar Time zones now included again (generation time has been reduced + with `TZDataParser` changes). +* Use binary mode when writing zone and country files to get Unix (\n) new + lines. +* Omit unnecessary quotes in zone identifier symbols. +* Omit the final transition to DST if there is a prior transition in the last + year processed to standard time. +* Updated to tzdata version 2006c + (<https://mm.icann.org/pipermail/tz/2006-April/013500.html>). + + +## Version 0.1.2 (tzdata v2006a) - 5-Feb-2006 + +* Add lib directory to the load path when tzinfo is required. Makes it easier + to use tzinfo gem when unpacked to vendor directory in rails. +* Updated to tzdata version 2006a + (<https://mm.icann.org/pipermail/tz/2006-January/013311.html>). +* `build_tz_classes` rake task now handles running svn add and svn delete as new + time zones and countries are added and old ones are removed. +* Return a better error when attempting to use a `Timezone` instance that was + constructed with `Timezone.new(nil)`. This will occur when using Rails' + `composed_of`. When the timezone identifier in the database is null, + attempting to use the `Timezone` will now result in an `UnknownTimezone` + exception rather than a `NameError`. + + +## Version 0.1.1 (tzdata v2005q) - 18-Dec-2005 + +* Time zones that are defined by a single unbounded period (e.g. UTC) now + work again. +* Updated to tzdata version 2005q. + + +## Version 0.1.0 (tzdata v2005n) - 27-Nov-2005 + +* `period_for_local` and `local_to_utc` now allow resolution of ambiguous + times (e.g. when switching from daylight savings to standard time). + The behaviour of these methods when faced with an ambiguous local time + has now changed. If you are using these methods you should check + the documentation. Thanks to Cliff Matthews for suggesting this change. +* Added `require 'date'` to `timezone.rb` (date isn't loaded by default in all + environments). +* Use rake to build packages and documentation. +* License file is now included in gem distribution. +* Dates in definitions stored as Astronomical Julian Day numbers rather than + as civil dates (improves performance creating `DateTime` instances). +* Added options to `TZDataParser` to allow generation of specific zones and + countries. +* Moved `TimezonePeriod` class to `timezone_period.rb`. +* New `TimezonePeriodList` class to store `TimezonePeriod` instances for a + timezone and perform searches for periods. +* Time zones are now defined using blocks. `TimezonePeriod` instances are only + created when they are needed. Thanks to Jamis Buck for the suggestion. +* Add options to `TZDataParser` to allow exclusion of specific zones and + countries. +* Exclude the Riyadh Solar Time zones. The rules are only for 1987 to 1989 and + take a long time to generate and process. Riyadh Solar Time is no longer + observed. +* The last `TimezonePeriod` for each `Timezone` is now written out with an + unbounded rather than arbitrary end time. +* Construct the `Rational` offset in `TimezonePeriod` once when the + `TimezonePeriod` is constructed rather than each time it is needed. +* `Timezone` and `Country` now keep a cache of loaded instances to avoid running + `require` which can be slow on some platforms. +* Updated to tzdata version 2005n. + + +## Version 0.0.4 (tzdata v2005m) - 18-Sep-2005 + +* Removed debug output accidentally included in the previous release. +* Fixed a bug in the generation of friendly zone identifiers (was inserting + apostrophes into UTC, GMT, etc). +* Fixed `Country` `<=>` operator (was comparing non-existent attribute) +* Fixed `Timezone.period_for_local` error when period not found. +* Added test cases for `Timezone`, `TimezoneProxy`, `TimezonePeriod`, `Country` + and some selected time zones. + + +## Version 0.0.3 (tzdata v2005m) - 17-Sep-2005 + +* Reduced visibility of some methods added in `Timezone#setup` and + `Country#setup`. +* Added `name` method to `Timezone` (returns the identifier). +* Added `friendly_identifier` method to `Timezone`. Returns a more friendly + version of the identifier. +* Added `to_s` method to `Timezone`. Returns the friendly identifier. +* Added `==` and `<=>` operators to `Timezone` (compares identifiers). +* `Timezone` now includes `Comparable`. +* Added `to_s` method to `Country`. +* Added `==` and `<=>` operators to `Country` (compares ISO 3166 country codes). +* `Country` now includes `Comparable`. +* New `TimezoneProxy` class that behaves the same as a `Timezone` but doesn't + actually load in its definition until it is actually required. +* Modified `Timezone` and `Country` methods that return `Timezone` instances to + return `TimezoneProxy` instances instead. This makes these methods much + quicker. + +In Ruby on Rails, you can now show a drop-down list of all time zones using the +Rails `time_zone_select` helper method: + +```ruby +<%= time_zone_select 'user', 'time_zone', TZInfo::Timezone.all.sort, :model => TZInfo::Timezone %> +``` + + +## Version 0.0.2 (tzdata v2005m) - 13-Sep-2005 + +* `Country` and `Timezone` data is now loaded into class rather than instance + variables. This makes `Timezone` links more efficient and saves memory if + creating specific `Timezone` and `Country` classes directly. +* `TimezonePeriod` `zone_identifier` is now defined as a symbol to save memory + (was previously a string). +* `TimezonePeriod` `zone_identifier`s that were previously `''` are now + `:Unknown`. +* `Timezone` and `Country` instances can now be returned using + `Timezone.new(identifier)` and `Country.new(identifier)`. When passed an + identifier, the `new` method calls `get` to return an instance of the + specified timezone or country. +* Added new class methods to `Timezone` to return sets of zones and identifiers. + +Thanks to Scott Barron of Lunchbox Software for the suggestions in his +article about using TZInfo with Rails +(<https://web.archive.org/web/20060425190845/http://lunchroom.lunchboxsoftware.com/pages/tzinfo_rails>) + + +## Version 0.0.1 (tzdata v2005m) - 29-Aug-2005 + +* First release. diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/LICENSE b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/LICENSE new file mode 100644 index 0000000..9eb35c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2005-2020 Philip Ross + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/README.md b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/README.md new file mode 100644 index 0000000..d26008f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/README.md @@ -0,0 +1,406 @@ +# TZInfo - Ruby Time Zone Library + +[![RubyGems](https://img.shields.io/gem/v/tzinfo)](https://rubygems.org/gems/tzinfo) [![Travis CI Build](https://img.shields.io/travis/com/tzinfo/tzinfo?logo=travis)](https://travis-ci.com/github/tzinfo/tzinfo) [![AppVeyor Build](https://img.shields.io/appveyor/build/philr/tzinfo?logo=appveyor)](https://ci.appveyor.com/project/philr/tzinfo) + +[TZInfo](https://tzinfo.github.io) is a Ruby library that provides access to +time zone data and allows times to be converted using time zone rules. + + +## Data Sources + +TZInfo requires a source of time zone data. There are two options: + +1. A zoneinfo directory containing timezone definition files. These files are + generated from the [IANA Time Zone Database](https://www.iana.org/time-zones) + using the `zic` utility. Most Unix-like systems include a zoneinfo directory. +2. The TZInfo::Data library (the tzinfo-data gem). TZInfo::Data contains a set + of Ruby modules that are also generated from the IANA Time Zone Database. + +By default, TZInfo will attempt to use TZInfo::Data. If TZInfo::Data is not +available (i.e. if `require 'tzinfo/data'` fails), then TZInfo will search for a +zoneinfo directory instead (using the search path specified by +`TZInfo::ZoneinfoDataSource::DEFAULT_SEARCH_PATH`). + +If no data source can be found, a `TZInfo::DataSourceNotFound` exception will be +raised when TZInfo is used. Further information is available +[in the wiki](https://tzinfo.github.io/datasourcenotfound) to help resolve +`TZInfo::DataSourceNotFound` errors. + +The default data source selection can be overridden by calling +`TZInfo::DataSource.set`. + +Custom data sources can also be used. See the `TZInfo::DataSource.set` +documentation for further details. + + +## Installation + +The TZInfo gem can be installed by running `gem install tzinfo` or by adding +to `gem 'tzinfo'` to your `Gemfile` and running `bundle install`. + +To use the Ruby modules as the data source, TZInfo::Data will also need to be +installed by running `gem install tzinfo-data` or by adding `gem 'tzinfo-data'` +to your `Gemfile`. + + +## IANA Time Zone Database + +The data returned and used by TZInfo is sourced from the +[IANA Time Zone Database](http://www.iana.org/time-zones). The +[Theory and pragmatics of the tz code and data](https://data.iana.org/time-zones/theory.html) +document gives details of how the data is organized and managed. + + +## Example Usage + +To use TZInfo, it must first be required with: + +```ruby +require 'tzinfo' +``` + +The `TZInfo::Timezone` class provides access to time zone data and methods for +converting times. + +The `all_identifiers` method returns a list of valid time zone identifiers: + +```ruby +identifiers = TZInfo::Timezone.all_identifiers +# => ["Africa/Adibdjan", "Africa/Accra", ..., "Zulu"] +``` + +A `TZInfo::Timezone` instance representing an individual time zone can be +obtained with `TZInfo::Timezone.get`: + +```ruby +tz = TZInfo::Timezone.get('America/New_York') +# => #<TZInfo::DataTimezone: America/New_York> +``` + +A time can be converted to the local time of the time zone with `to_local`: + +```ruby +tz.to_local(Time.utc(2018, 2, 1, 12, 30, 0)) +# => 2018-02-01 07:30:00 -0500 +tz.to_local(Time.utc(2018, 7, 1, 12, 30, 0)) +# => 2018-07-01 08:30:00 -0400 +tz.to_local(Time.new(2018, 7, 1, 13, 30, 0, '+01:00')) +# => 2018-07-01 08:30:00 -0400 +``` + +Local times with the appropriate offset for the time zone can be constructed +with `local_time`: + +```ruby +tz.local_time(2018, 2, 1, 7, 30, 0) +# => 2018-02-01 07:30:00 -0500 +tz.local_time(2018, 7, 1, 8, 30, 0) +# => 2018-07-01 08:30:00 -0400 +``` + +Local times can be converted to UTC by using `local_time` and calling `utc` on +the result: + +```ruby +tz.local_time(2018, 2, 1, 7, 30, 0).utc +# => 2018-02-01 12:30:00 UTC +tz.local_time(2018, 7, 1, 8, 30, 0).utc +# => 2018-07-01 12:30:00 UTC +``` + +The `local_to_utc` method can also be used to convert a time object to UTC. The +offset of the time is ignored - it is treated as if it were a local time for the +time zone: + +```ruby +tz.local_to_utc(Time.utc(2018, 2, 1, 7, 30, 0)) +# => 2018-02-01 12:30:00 UTC +tz.local_to_utc(Time.new(2018, 2, 1, 7, 30, 0, '+01:00')) +# => 2018-02-01 12:30:00 UTC +``` + +Information about the time zone can be obtained from returned local times: + +```ruby +local_time = tz.to_local(Time.utc(2018, 2, 1, 12, 30, 0)) +local_time.utc_offset # => -18000 +local_time.dst? # => false +local_time.zone # => "EST" + +local_time = tz.to_local(Time.utc(2018, 7, 1, 12, 30, 0)) +local_time.utc_offset # => -14400 +local_time.dst? # => true +local_time.zone # => "EDT" +``` + +Time zone information can be included when formatting times with `strftime` +using the `%z` and `%Z` directives: + +```ruby +tz.to_local(Time.utc(2018, 2, 1, 12, 30, 0)).strftime('%Y-%m-%d %H:%M:%S %z %Z') +# => "2018-02-01 07:30:00 -0500 EST" +tz.to_local(Time.utc(2018, 7, 1, 12, 30, 0)).strftime('%Y-%m-%d %H:%M:%S %z %Z') +# => "2018-07-01 08:30:00 -0400 EDT" +``` + +The `period_for` method can be used to obtain information about the observed +time zone information at a particular time as a `TZInfo::TimezonePeriod` object: + +```ruby +period = tz.period_for(Time.utc(2018, 7, 1, 12, 30, 0)) +period.base_utc_offset # => -18000 +period.std_offset # => 3600 +period.observed_utc_offset # => -14400 +period.abbreviation # => "EDT" +period.dst? # => true +period.local_starts_at.to_time # => 2018-03-11 03:00:00 -0400 +period.local_ends_at.to_time # => 2018-11-04 02:00:00 -0400 +``` + +A list of transitions between periods where different rules are observed can be +obtained with the `transitions_up_to` method. The result is returned as an +`Array` of `TZInfo::TimezoneTransition` objects: + +```ruby +transitions = tz.transitions_up_to(Time.utc(2019, 1, 1), Time.utc(2017, 1, 1)) +transitions.map do |t| + [t.local_end_at.to_time, t.offset.observed_utc_offset, t.offset.abbreviation] +end +# => [[2017-03-12 02:00:00 -0500, -14400, "EDT"], +# [2017-11-05 02:00:00 -0400, -18000, "EST"], +# [2018-03-11 02:00:00 -0500, -14400, "EDT"], +# [2018-11-04 02:00:00 -0400, -18000, "EST"]] +``` + +A list of the unique offsets used by a time zone can be obtained with the +`offsets_up_to` method. The result is returned as an `Array` of +`TZInfo::TimezoneOffset` objects: + +```ruby +offsets = tz.offsets_up_to(Time.utc(2019, 1, 1)) +offsets.map {|o| [o.observed_utc_offset, o.abbreviation] } +# => [[-17762, "LMT"], +# [-18000, "EST"], +# [-14400, "EDT"], +# [-14400, "EWT"], +# [-14400, "EPT"]] +``` + +All `TZInfo::Timezone` methods that accept a time as a parameter can be used +with either instances of `Time`, `DateTime` or `TZInfo::Timestamp`. Arbitrary +`Time`-like objects that respond to both `to_i` and `subsec` and optionally +`utc_offset` will be treated as if they are instances of `Time`. + +`TZInfo::Timezone` methods that both accept and return times will return an +object with a type matching that of the parameter (actually a +`TZInfo::TimeWithOffset`, `TZInfo::DateTimeWithOffset` or +`TZInfo::TimestampWithOffset` subclass when returning a local time): + +```ruby +tz.to_local(Time.utc(2018, 7, 1, 12, 30, 0)) +# => 2018-07-01 08:30:00 -0400 +tz.to_local(DateTime.new(2018, 7, 1, 12, 30, 0)) +# => #<TZInfo::DateTimeWithOffset: 2018-07-01T08:30:00-04:00 ((2458301j,45000s,0n),-14400s,2299161j)> +tz.to_local(TZInfo::Timestamp.create(2018, 7, 1, 12, 30, 0, 0, :utc)) +# => #<TZInfo::TimestampWithOffset: @value=1530448200, @sub_second=0, @utc_offset=-14400, @utc=false> +``` + +In addition to `local_time`, which returns `Time` instances, the +`local_datetime` and `local_timestamp` methods can be used to construct local +`DateTime` and `TZInfo::Timestamp` instances with the appropriate offset: + +```ruby +tz.local_time(2018, 2, 1, 7, 30, 0) +# => 2018-02-01 07:30:00 -0500 +tz.local_datetime(2018, 2, 1, 7, 30, 0) +# => #<TZInfo::DateTimeWithOffset: 2018-02-01T07:30:00-05:00 ((2458151j,45000s,0n),-18000s,2299161j)> +tz.local_timestamp(2018, 2, 1, 7, 30, 0) +# => #<TZInfo::TimestampWithOffset: @value=1517488200, @sub_second=0, @utc_offset=-18000, @utc=false> +``` + +The `local_to_utc`, `local_time`, `local_datetime` and `local_timestamp` methods +may raise a `TZInfo::PeriodNotFound` or a `TZInfo::AmbiguousTime` exception. +`TZInfo::PeriodNotFound` signals that there is no equivalent UTC time (for +example, during the transition from standard time to daylight savings time when +the clocks are moved forward and an hour is skipped). `TZInfo::AmbiguousTime` +signals that there is more than one equivalent UTC time (for example, during the +transition from daylight savings time to standard time where the clocks are +moved back and an hour is repeated): + +```ruby +tz.local_time(2018, 3, 11, 2, 30, 0, 0) +# raises TZInfo::PeriodNotFound (2018-03-11 02:30:00 is an invalid local time.) +tz.local_time(2018, 11, 4, 1, 30, 0, 0) +# raises TZInfo::AmbiguousTime (2018-11-04 01:30:00 is an ambiguous local time.) +``` + +`TZInfo::PeriodNotFound` exceptions can only be resolved by adjusting the time, +for example, by advancing an hour: + +```ruby +tz.local_time(2018, 3, 11, 3, 30, 0, 0) +# => 2018-03-11 03:30:00 -0400 +``` + +`TZInfo::AmbiguousTime` exceptions can be resolved by setting the `dst` +parameter and/or specifying a block to choose one of the interpretations: + +```ruby +tz.local_time(2018, 11, 4, 1, 30, 0, 0, true) +# => 2018-11-04 01:30:00 -0400 +tz.local_time(2018, 11, 4, 1, 30, 0, 0, false) +# => 2018-11-04 01:30:00 -0500 + +tz.local_time(2018, 11, 4, 1, 30, 0, 0) {|p| p.first } +# => 2018-11-04 01:30:00 -0400 +tz.local_time(2018, 11, 4, 1, 30, 0, 0) {|p| p.last } +# => 2018-11-04 01:30:00 -0500 +``` + +The default value of the `dst` parameter can also be set globally: + +```ruby +TZInfo::Timezone.default_dst = true +tz.local_time(2018, 11, 4, 1, 30, 0, 0) +# => 2018-11-04 01:30:00 -0400 +TZInfo::Timezone.default_dst = false +tz.local_time(2018, 11, 4, 1, 30, 0, 0) +# => 2018-11-04 01:30:00 -0500 +``` + +TZInfo also provides information about +[ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html) countries and +their associated time zones via the `TZInfo::Country` class. + +A list of valid ISO 3166-1 (alpha-2) country codes can be obtained by calling +`TZInfo::Country.all_codes`: + +```ruby +TZInfo::Country.all_codes +# => ["AD", "AE", ..., "ZW"] +``` + +A `TZInfo::Country` instance representing an individual time zone can be +obtained with `TZInfo::Country.get`: + +```ruby +c = TZInfo::Country.get('US') +# => #<TZInfo::Country: US> +c.name +# => "United States" +``` + +The `zone_identifiers` method returns a list of the time zone identifiers used +in a country: + +```ruby +c.zone_identifiers +# => ["America/New_York", "America/Detroit", ..., "Pacific/Honolulu"] +``` + +The `zone_info` method returns further information about the time zones used in +a country as an `Array` of `TZInfo::CountryTimezone` instances: + +```ruby +zi = c.zone_info.first +zi.identifier # => "America/New_York" +zi.latitude.to_f.round(5) # => 40.71417 +zi.longitude.to_f.round(5) # => -74.00639 +zi.description # => "Eastern (most areas)" +``` + +The `zones` method returns an `Array` of `TZInfo::Timezone` instances for a +country. A `TZInfo::Timezone` instance can be obtained from a +`TZInfo::CountryTimezone` using the `timezone` method: + +```ruby +zi.timezone.to_local(Time.utc(2018, 2, 1, 12, 30, 0)) +# => 2018-02-01 07:30:00 -0500 +``` + +For further detail, please refer to the API documentation for the +`TZInfo::Timezone` and `TZInfo::Country` classes. + + +## Time Zone Selection + +The Time Zone Database maintainers recommend that time zone identifiers are not +made visible to end-users (see [Names of +timezones](https://data.iana.org/time-zones/theory.html#naming)). + +Instead of displaying a list of time zone identifiers, time zones can be +selected by the user's country. Call `TZInfo::Country.all` to obtain a list of +`TZInfo::Country` objects, each with a unique `code` and a `name` that can be +used for display purposes. + +Most countries have a single time zone. When choosing such a country, the time +zone can be inferred and selected automatically. + +```ruby +croatia = TZInfo::Country.get('HR') +# => #<TZInfo::Country: HR> +croatia.zone_info.length +# => 1 +croatia.zone_info[0].identifier +# => "Europe/Belgrade" +``` + +Some countries have multiple time zones. The `zone_info` method can be used +to obtain a list of user-friendly descriptions of the available options: + +```ruby +australia = TZInfo::Country.get('AU') +# => #<TZInfo::Country: AU> +australia.zone_info.length +# => 13 +australia.zone_info.map {|i| [i.identifier, i.description] } +# => [["Australia/Lord_Howe", "Lord Howe Island"], +# ["Antarctica/Macquarie", "Macquarie Island"], +# ... +# ["Australia/Eucla", "Western Australia (Eucla)"]] +``` + +Please note that country information available through TZInfo is intended as an +aid to help users select a time zone data appropriate for their practical needs. +It is not intended to take or endorse any position on legal or territorial +claims. + + +## Compatibility + +TZInfo v2.0.0 requires a minimum of Ruby MRI 1.9.3 or JRuby 1.7 (in 1.9 mode or +later). + + +## Thread-Safety + +The `TZInfo::Country` and `TZInfo::Timezone` classes are thread-safe. It is safe +to use class and instance methods of `TZInfo::Country` and `TZInfo::Timezone` in +concurrently executing threads. Instances of both classes can be shared across +thread boundaries. + + +## Documentation + +API documentation for TZInfo is available on +[RubyDoc.info](https://www.rubydoc.info/gems/tzinfo/). + + +## License + +TZInfo is released under the MIT license, see LICENSE for details. + + +## Source Code + +Source code for TZInfo is available on +[GitHub](https://github.com/tzinfo/tzinfo). + + +## Issue Tracker + +Please post any bugs, issues, feature requests or questions about TZInfo to the +[GitHub issue tracker](https://github.com/tzinfo/tzinfo/issues). + +Issues with the underlying time zone data should be raised on the +[Time Zone Database Discussion mailing list](https://mm.icann.org/mailman/listinfo/tz). diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo.rb new file mode 100644 index 0000000..df447a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo.rb @@ -0,0 +1,73 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +# The top level module for TZInfo. +module TZInfo +end + +# Object#untaint is a deprecated no-op in Ruby >= 2.7 and will be removed in +# 3.2. Add a refinement to either silence the warning, or supply the method if +# needed. +if !Object.new.respond_to?(:untaint) || RUBY_VERSION =~ /\A(\d+)\.(\d+)(?:\.|\z)/ && ($1 == '2' && $2.to_i >= 7 || $1.to_i >= 3) + require_relative 'tzinfo/untaint_ext' +end + +require_relative 'tzinfo/version' + +require_relative 'tzinfo/string_deduper' + +require_relative 'tzinfo/timestamp' + +require_relative 'tzinfo/with_offset' +require_relative 'tzinfo/datetime_with_offset' +require_relative 'tzinfo/time_with_offset' +require_relative 'tzinfo/timestamp_with_offset' + +require_relative 'tzinfo/timezone_offset' +require_relative 'tzinfo/timezone_transition' +require_relative 'tzinfo/transition_rule' +require_relative 'tzinfo/annual_rules' + +require_relative 'tzinfo/data_sources' +require_relative 'tzinfo/data_sources/timezone_info' +require_relative 'tzinfo/data_sources/data_timezone_info' +require_relative 'tzinfo/data_sources/linked_timezone_info' +require_relative 'tzinfo/data_sources/constant_offset_data_timezone_info' +require_relative 'tzinfo/data_sources/transitions_data_timezone_info' + +require_relative 'tzinfo/data_sources/country_info' + +require_relative 'tzinfo/data_sources/posix_time_zone_parser' +require_relative 'tzinfo/data_sources/zoneinfo_reader' + +require_relative 'tzinfo/data_source' +require_relative 'tzinfo/data_sources/ruby_data_source' +require_relative 'tzinfo/data_sources/zoneinfo_data_source' + +require_relative 'tzinfo/timezone_period' +require_relative 'tzinfo/offset_timezone_period' +require_relative 'tzinfo/transitions_timezone_period' +require_relative 'tzinfo/timezone' +require_relative 'tzinfo/info_timezone' +require_relative 'tzinfo/data_timezone' +require_relative 'tzinfo/linked_timezone' +require_relative 'tzinfo/timezone_proxy' + +require_relative 'tzinfo/country' +require_relative 'tzinfo/country_timezone' + +require_relative 'tzinfo/format2' +require_relative 'tzinfo/format2/country_definer' +require_relative 'tzinfo/format2/country_index_definer' +require_relative 'tzinfo/format2/country_index_definition' +require_relative 'tzinfo/format2/timezone_definer' +require_relative 'tzinfo/format2/timezone_definition' +require_relative 'tzinfo/format2/timezone_index_definer' +require_relative 'tzinfo/format2/timezone_index_definition' + +require_relative 'tzinfo/format1' +require_relative 'tzinfo/format1/country_definer' +require_relative 'tzinfo/format1/country_index_definition' +require_relative 'tzinfo/format1/timezone_definer' +require_relative 'tzinfo/format1/timezone_definition' +require_relative 'tzinfo/format1/timezone_index_definition' diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/annual_rules.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/annual_rules.rb new file mode 100644 index 0000000..c73e38b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/annual_rules.rb @@ -0,0 +1,71 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # A set of rules that define when transitions occur in time zones with + # annually occurring daylight savings time. + # + # @private + class AnnualRules #:nodoc: + # @return [TimezoneOffset] the standard offset that applies when daylight + # savings time is not in force. + attr_reader :std_offset + + # @return [TimezoneOffset] the offset that applies when daylight savings + # time is in force. + attr_reader :dst_offset + + # @return [TransitionRule] the rule that determines when daylight savings + # time starts. + attr_reader :dst_start_rule + + # @return [TransitionRule] the rule that determines when daylight savings + # time ends. + attr_reader :dst_end_rule + + # Initializes a new {AnnualRules} instance. + # + # @param std_offset [TimezoneOffset] the standard offset that applies when + # daylight savings time is not in force. + # @param dst_offset [TimezoneOffset] the offset that applies when daylight + # savings time is in force. + # @param dst_start_rule [TransitionRule] the rule that determines when + # daylight savings time starts. + # @param dst_end_rule [TransitionRule] the rule that determines when daylight + # savings time ends. + def initialize(std_offset, dst_offset, dst_start_rule, dst_end_rule) + @std_offset = std_offset + @dst_offset = dst_offset + @dst_start_rule = dst_start_rule + @dst_end_rule = dst_end_rule + end + + # Returns the transitions between standard and daylight savings time for a + # given year. The results are ordered by time of occurrence (earliest to + # latest). + # + # @param year [Integer] the year to calculate transitions for. + # @return [Array<TimezoneTransition>] the transitions for the year. + def transitions(year) + start_dst = apply_rule(@dst_start_rule, @std_offset, @dst_offset, year) + end_dst = apply_rule(@dst_end_rule, @dst_offset, @std_offset, year) + + end_dst.timestamp_value < start_dst.timestamp_value ? [end_dst, start_dst] : [start_dst, end_dst] + end + + private + + # Applies a given rule between offsets on a year. + # + # @param rule [TransitionRule] the rule to apply. + # @param from_offset [TimezoneOffset] the offset the rule transitions from. + # @param to_offset [TimezoneOffset] the offset the rule transitions to. + # @param year [Integer] the year when the transition occurs. + # @return [TimezoneTransition] the transition determined by the rule. + def apply_rule(rule, from_offset, to_offset, year) + at = rule.at(from_offset, year) + TimezoneTransition.new(to_offset, from_offset, at.value) + end + end + private_constant :AnnualRules +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country.rb new file mode 100644 index 0000000..1bd216a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country.rb @@ -0,0 +1,208 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # {InvalidCountryCode} is raised by {Country#get} if the code given is not a + # valid ISO 3166-1 alpha-2 code. + class InvalidCountryCode < StandardError + end + + # The {Country} class represents an ISO 3166-1 country. It can be used to + # obtain a list of time zones observed by a country. For example: + # + # united_states = Country.get('US') + # united_states.zone_identifiers + # united_states.zones + # united_states.zone_info + # + # The {Country} class is thread-safe. It is safe to use class and instance + # methods of {Country} in concurrently executing threads. Instances of + # {Country} can be shared across thread boundaries. + # + # Country information available through TZInfo is intended as an aid for + # users, to help them select time zone data appropriate for their practical + # needs. It is not intended to take or endorse any position on legal or + # territorial claims. + class Country + include Comparable + + class << self + # Gets a {Country} by its ISO 3166-1 alpha-2 code. + # + # The {Country.all_codes} method can be used to obtain a list of valid ISO + # 3166-1 alpha-2 codes. + # + # @param code [String] An ISO 3166-1 alpha-2 code. + # @return [Country] a {Country} instance representing the ISO-3166-1 + # country identified by the `code` parameter. + # @raise [InvalidCountryCode] If {code} is not a valid ISO 3166-1 alpha-2 + # code it couldn't be found. + def get(code) + Country.new(data_source.get_country_info(code)) + end + + # @return [Array<String>] an `Array` containing all the valid ISO 3166-1 + # alpha-2 country codes. + def all_codes + data_source.country_codes + end + + # @return [Array<Country>] an `Array` containing one {Country} instance + # for each defined country. + def all + data_source.country_codes.collect {|code| get(code)} + end + + private + + # @return [DataSource] the current DataSource. + def data_source + DataSource.get + end + end + + # Initializes a new {Country} based upon a {DataSources::CountryInfo} + # instance. + # + # {Country} instances should not normally be constructed directly. Use + # the {Country.get} method to obtain instances instead. + # + # @param info [DataSources::CountryInfo] the data to base the new {Country} + # instance upon. + def initialize(info) + @info = info + end + + # @return [String] the ISO 3166-1 alpha-2 country code. + def code + @info.code + end + + # @return [String] the name of the country. + def name + @info.name + end + + # @return [String] a `String` representation of this {Country} (the name of + # the country). + def to_s + name + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: #{@info.code}>" + end + + # Returns an `Array` containing the identifier for each time zone observed + # by the country. These are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # Returned zone identifiers may refer to cities and regions outside of the + # country. This will occur if the zone covers multiple countries. Any zones + # referring to a city or region in a different country will be listed after + # those relating to this country. + # + # @return [Array<String>] an `Array` containing the identifier for each time + # zone observed by the country + def zone_identifiers + zone_info.map(&:identifier) + end + alias zone_names zone_identifiers + + # Returns An `Array` containing a {Timezone} instance for each time zone + # observed by the country. These are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # The identifiers of the time zones returned may refer to cities and regions + # outside of the country. This will occur if the time zone covers multiple + # countries. Any zones referring to a city or region in a different country + # will be listed after those relating to this country. + # + # The results are actually instances of {TimezoneProxy} in order to defer + # loading of the time zone transition data until it is first needed. + # + # @return [Array<Timezone>] an `Array` containing a {Timezone} instance for + # each time zone observed by the country. + def zones + zone_info.map(&:timezone) + end + + # Returns a frozen `Array` containing a {CountryTimezone} instance for each + # time zone observed by the country. These are in an order that + # + # 1. makes some geographical sense, and + # 2. puts the most populous zones first, where that does not contradict 1. + # + # The {CountryTimezone} instances can be used to obtain the location and + # descriptions of the observed time zones. + # + # Identifiers and descriptions of the time zones returned may refer to + # cities and regions outside of the country. This will occur if the time + # zone covers multiple countries. Any zones referring to a city or region in + # a different country will be listed after those relating to this country. + # + # @return [Array<CountryTimezone>] a frozen `Array` containing a + # {CountryTimezone} instance for each time zone observed by the country. + def zone_info + @info.zones + end + + # Compares this {Country} with another based on their {code}. + # + # @param c [Object] an `Object` to compare this {Country} with. + # @return [Integer] -1 if `c` is less than `self`, 0 if `c` is equal to + # `self` and +1 if `c` is greater than `self`, or `nil` if `c` is not an + # instance of {Country}. + def <=>(c) + return nil unless c.is_a?(Country) + code <=> c.code + end + + # @param c [Object] an `Object` to compare this {Country} with. + # @return [Boolean] `true` if `c` is an instance of {Country} and has the + # same code as `self`, otherwise `false`. + def eql?(c) + self == c + end + + # @return [Integer] a hash based on the {code}. + def hash + code.hash + end + + # Matches `regexp` against the {code} of this {Country}. + # + # @param regexp [Regexp] a `Regexp` to match against the {code} of + # this {Country}. + # @return [Integer] the position the match starts, or `nil` if there is no + # match. + def =~(regexp) + regexp =~ code + end + + # Returns a serialized representation of this {Country}. This method is + # called when using `Marshal.dump` with an instance of {Country}. + # + # @param limit [Integer] the maximum depth to dump - ignored. + # @return [String] a serialized representation of this {Country}. + def _dump(limit) + code + end + + # Loads a {Country} from the serialized representation returned by {_dump}. + # This is method is called when using `Marshal.load` or `Marshal.restore` + # to restore a serialized {Country}. + # + # @param data [String] a serialized representation of a {Country}. + # @return [Country] the result of converting `data` back into a {Country}. + def self._load(data) + Country.get(data) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country_timezone.rb new file mode 100644 index 0000000..0def7fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/country_timezone.rb @@ -0,0 +1,93 @@ +# encoding: UTF-8 + +module TZInfo + # Information about a time zone used by a {Country}. + class CountryTimezone + # @return [String] the identifier of the {Timezone} being described. + attr_reader :identifier + + # The latitude of this time zone in degrees. Positive numbers are degrees + # north and negative numbers are degrees south. + # + # Note that depending on the data source, the position given by {#latitude} + # and {#longitude} may not be within the country. + # + # @return [Rational] the latitude in degrees. + attr_reader :latitude + + # The longitude of this time zone in degrees. Positive numbers are degrees + # east and negative numbers are degrees west. + # + # Note that depending on the data source, the position given by {#latitude} + # and {#longitude} may not be within the country. + # + # @return [Rational] the longitude in degrees. + attr_reader :longitude + + # A description of this time zone in relation to the country, e.g. "Eastern + # Time". This is usually `nil` for countries that have a single time zone. + # + # @return [String] an optional description of the time zone. + attr_reader :description + + # Creates a new {CountryTimezone}. + # + # The passed in identifier and description instances will be frozen. + # + # {CountryTimezone} instances should normally only be constructed + # by implementations of {DataSource}. + # + # @param identifier [String] the {Timezone} identifier. + # @param latitude [Rational] the latitude of the time zone. + # @param longitude [Rational] the longitude of the time zone. + # @param description [String] an optional description of the time zone. + def initialize(identifier, latitude, longitude, description = nil) + @identifier = identifier.freeze + @latitude = latitude + @longitude = longitude + @description = description && description.freeze + end + + # Returns the associated {Timezone}. + # + # The result is actually an instance of {TimezoneProxy} in order to defer + # loading of the time zone transition data until it is first needed. + # + # @return [Timezone] the associated {Timezone}. + def timezone + Timezone.get_proxy(@identifier) + end + + # @return [String] the {description} if present, otherwise a human-readable + # representation of the identifier (using {Timezone#friendly_identifier}). + def description_or_friendly_identifier + description || timezone.friendly_identifier(true) + end + + # Tests if the given object is equal to the current instance (has the same + # identifier, latitude, longitude and description). + # + # @param ct [Object] the object to be compared. + # @return [TrueClass] `true` if `ct` is equal to the current instance. + def ==(ct) + ct.kind_of?(CountryTimezone) && + identifier == ct.identifier && latitude == ct.latitude && + longitude == ct.longitude && description == ct.description + end + + # Tests if the given object is equal to the current instance (has the same + # identifier, latitude, longitude and description). + # + # @param ct [Object] the object to be compared. + # @return [Boolean] `true` if `ct` is equal to the current instance. + def eql?(ct) + self == ct + end + + # @return [Integer] a hash based on the {identifier}, {latitude}, + # {longitude} and {description}. + def hash + [@identifier, @latitude, @longitude, @description].hash + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_source.rb new file mode 100644 index 0000000..6288aec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_source.rb @@ -0,0 +1,435 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'concurrent' +require 'thread' + +module TZInfo + # {InvalidDataSource} is raised if the selected {DataSource} doesn't implement + # one of the required methods. + class InvalidDataSource < StandardError + end + + # {DataSourceNotFound} is raised if no data source could be found (i.e. if + # `'tzinfo/data'` cannot be found on the load path and no valid zoneinfo + # directory can be found on the system). + class DataSourceNotFound < StandardError + end + + # TZInfo can be used with different data sources for time zone and country + # data. Each source of data is implemented as a subclass of {DataSource}. + # + # To choose a data source and override the default selection, use the + # {DataSource.set} method. + # + # @abstract To create a custom data source, create a subclass of {DataSource} + # and implement the {load_timezone_info}, {data_timezone_identifiers}, + # {linked_timezone_identifiers}, {load_country_info} and {country_codes} + # methods. + class DataSource + # The currently selected data source. + # + # @private + @@instance = nil + + # A `Mutex` used to ensure the default data source is only created once. + # + # @private + @@default_mutex = Mutex.new + + class << self + # @return [DataSource] the currently selected source of data. + def get + # If a DataSource hasn't been manually set when the first request is + # made to obtain a DataSource, then a default data source is created. + # + # This is done at the first request rather than when TZInfo is loaded to + # avoid unnecessary attempts to find a suitable DataSource. + # + # A `Mutex` is used to ensure that only a single default instance is + # created (this avoiding the possibility of retaining two copies of the + # same data in memory). + + unless @@instance + @@default_mutex.synchronize do + set(create_default_data_source) unless @@instance + end + end + + @@instance + end + + # Sets the currently selected data source for time zone and country data. + # + # This should usually be set to one of the two standard data source types: + # + # * `:ruby` - read data from the Ruby modules included in the TZInfo::Data + # library (tzinfo-data gem). + # * `:zoneinfo` - read data from the zoneinfo files included with most + # Unix-like operating systems (e.g. in /usr/share/zoneinfo). + # + # To set TZInfo to use one of the standard data source types, call + # `TZInfo::DataSource.set`` in one of the following ways: + # + # TZInfo::DataSource.set(:ruby) + # TZInfo::DataSource.set(:zoneinfo) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir) + # TZInfo::DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file) + # + # `DataSource.set(:zoneinfo)` will automatically search for the zoneinfo + # directory by checking the paths specified in + # {DataSources::ZoneinfoDataSource.search_path}. + # {DataSources::ZoneinfoDirectoryNotFound} will be raised if no valid + # zoneinfo directory could be found. + # + # `DataSource.set(:zoneinfo, zoneinfo_dir)` uses the specified + # `zoneinfo_dir` directory as the data source. If the directory is not a + # valid zoneinfo directory, a {DataSources::InvalidZoneinfoDirectory} + # exception will be raised. + # + # `DataSource.set(:zoneinfo, zoneinfo_dir, iso3166_tab_file)` uses the + # specified `zoneinfo_dir` directory as the data source, but loads the + # `iso3166.tab` file from the path given by `iso3166_tab_file`. If the + # directory is not a valid zoneinfo directory, a + # {DataSources::InvalidZoneinfoDirectory} exception will be raised. + # + # Custom data sources can be created by subclassing TZInfo::DataSource and + # implementing the following methods: + # + # * {load_timezone_info} + # * {data_timezone_identifiers} + # * {linked_timezone_identifiers} + # * {load_country_info} + # * {country_codes} + # + # To have TZInfo use the custom data source, call {DataSource.set}, + # passing an instance of the custom data source implementation as follows: + # + # TZInfo::DataSource.set(CustomDataSource.new) + # + # Calling {DataSource.set} will only affect instances of {Timezone} and + # {Country} obtained with {Timezone.get} and {Country.get} subsequent to + # the {DataSource.set} call. Existing {Timezone} and {Country} instances + # will be unaffected. + # + # If {DataSource.set} is not called, TZInfo will by default attempt to use + # TZInfo::Data as the data source. If TZInfo::Data is not available (i.e. + # if `require 'tzinfo/data'` fails), then TZInfo will search for a + # zoneinfo directory instead (using the search path specified by + # {DataSources::ZoneinfoDataSource.search_path}). + # + # @param data_source_or_type [Object] either `:ruby`, `:zoneinfo` or an + # instance of a {DataSource}. + # @param args [Array<Object>] when `data_source_or_type` is a symbol, + # optional arguments to use when initializing the data source. + # @raise [ArgumentError] if `data_source_or_type` is not `:ruby`, + # `:zoneinfo` or an instance of {DataSource}. + def set(data_source_or_type, *args) + if data_source_or_type.kind_of?(DataSource) + @@instance = data_source_or_type + elsif data_source_or_type == :ruby + @@instance = DataSources::RubyDataSource.new + elsif data_source_or_type == :zoneinfo + @@instance = DataSources::ZoneinfoDataSource.new(*args) + else + raise ArgumentError, 'data_source_or_type must be a DataSource instance or a data source type (:ruby or :zoneinfo)' + end + end + + private + + # Creates a {DataSource} instance for use as the default. Used if no + # preference has been specified manually. + # + # @return [DataSource] the newly created default {DataSource} instance. + def create_default_data_source + has_tzinfo_data = false + + begin + require 'tzinfo/data' + has_tzinfo_data = true + rescue LoadError + end + + return DataSources::RubyDataSource.new if has_tzinfo_data + + begin + return DataSources::ZoneinfoDataSource.new + rescue DataSources::ZoneinfoDirectoryNotFound + raise DataSourceNotFound, "No source of timezone data could be found.\nPlease refer to https://tzinfo.github.io/datasourcenotfound for help resolving this error." + end + end + end + + # Initializes a new {DataSource} instance. Typically only called via + # subclasses of {DataSource}. + def initialize + @timezones = Concurrent::Map.new + end + + # Returns a {DataSources::TimezoneInfo} instance for the given identifier. + # The result will derive from either {DataSources::DataTimezoneInfo} for + # time zones that define their own data or {DataSources::LinkedTimezoneInfo} + # for links or aliases to other time zones. + # + # {get_timezone_info} calls {load_timezone_info} to create the + # {DataSources::TimezoneInfo} instance. The returned instance is cached and + # returned in subsequent calls to {get_timezone_info} for the identifier. + # + # @param identifier [String] A time zone identifier. + # @return [DataSources::TimezoneInfo] a {DataSources::TimezoneInfo} instance + # for a given identifier. + # @raise [InvalidTimezoneIdentifier] if the time zone is not found or the + # identifier is invalid. + def get_timezone_info(identifier) + result = @timezones[identifier] + + unless result + # Thread-safety: It is possible that multiple equivalent TimezoneInfo + # instances could be created here in concurrently executing threads. The + # consequences of this are that the data may be loaded more than once + # (depending on the data source). The performance benefit of ensuring + # that only a single instance is created is unlikely to be worth the + # overhead of only allowing one TimezoneInfo to be loaded at a time. + + result = load_timezone_info(identifier) + @timezones[result.identifier] = result + end + + result + end + + # @return [Array<String>] a frozen `Array`` of all the available time zone + # identifiers. The identifiers are sorted according to `String#<=>`. + def timezone_identifiers + # Thread-safety: It is possible that the value of @timezone_identifiers + # may be calculated multiple times in concurrently executing threads. It + # is not worth the overhead of locking to ensure that + # @timezone_identifiers is only calculated once. + @timezone_identifiers ||= build_timezone_identifiers + end + + # Returns a frozen `Array` of all the available time zone identifiers for + # data time zones (i.e. those that actually contain definitions). The + # identifiers are sorted according to `String#<=>`. + # + # @return [Array<String>] a frozen `Array` of all the available time zone + # identifiers for data time zones. + def data_timezone_identifiers + raise_invalid_data_source('data_timezone_identifiers') + end + + # Returns a frozen `Array` of all the available time zone identifiers that + # are links to other time zones. The identifiers are sorted according to + # `String#<=>`. + # + # @return [Array<String>] a frozen `Array` of all the available time zone + # identifiers that are links to other time zones. + def linked_timezone_identifiers + raise_invalid_data_source('linked_timezone_identifiers') + end + + # @param code [String] an ISO 3166-1 alpha-2 country code. + # @return [DataSources::CountryInfo] a {DataSources::CountryInfo} instance + # for the given ISO 3166-1 alpha-2 country code. + # @raise [InvalidCountryCode] if the country could not be found or the code + # is invalid. + def get_country_info(code) + load_country_info(code) + end + + # Returns a frozen `Array` of all the available ISO 3166-1 alpha-2 country + # codes. The identifiers are sorted according to `String#<=>`. + # + # @return [Array<String>] a frozen `Array` of all the available ISO 3166-1 + # alpha-2 country codes. + def country_codes + raise_invalid_data_source('country_codes') + end + + # @return [String] a description of the {DataSource}. + def to_s + "Default DataSource" + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}>" + end + + protected + + # Returns a {DataSources::TimezoneInfo} instance for the given time zone + # identifier. The result should derive from either + # {DataSources::DataTimezoneInfo} for time zones that define their own data + # or {DataSources::LinkedTimezoneInfo} for links to or aliases for other + # time zones. + # + # @param identifier [String] A time zone identifier. + # @return [DataSources::TimezoneInfo] a {DataSources::TimezoneInfo} instance + # for the given time zone identifier. + # @raise [InvalidTimezoneIdentifier] if the time zone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + raise_invalid_data_source('load_timezone_info') + end + + # @param code [String] an ISO 3166-1 alpha-2 country code. + # @return [DataSources::CountryInfo] a {DataSources::CountryInfo} instance + # for the given ISO 3166-1 alpha-2 country code. + # @raise [InvalidCountryCode] if the country could not be found or the code + # is invalid. + def load_country_info(code) + raise_invalid_data_source('load_country_info') + end + + # @return [Encoding] the `Encoding` used by the `String` instances returned + # by {data_timezone_identifiers} and {linked_timezone_identifiers}. + def timezone_identifier_encoding + Encoding::UTF_8 + end + + # Checks that the given identifier is a valid time zone identifier (can be + # found in the {timezone_identifiers} `Array`). If the identifier is valid, + # the `String` instance representing that identifier from + # `timezone_identifiers` is returned. Otherwise an + # {InvalidTimezoneIdentifier} exception is raised. + # + # @param identifier [String] a time zone identifier to be validated. + # @return [String] the `String` instance equivalent to `identifier` from + # {timezone_identifiers}. + # @raise [InvalidTimezoneIdentifier] if `identifier` was not found in + # {timezone_identifiers}. + def validate_timezone_identifier(identifier) + raise InvalidTimezoneIdentifier, "Invalid identifier: #{identifier.nil? ? 'nil' : identifier}" unless identifier.kind_of?(String) + + valid_identifier = try_with_encoding(identifier, timezone_identifier_encoding) {|id| find_timezone_identifier(id) } + return valid_identifier if valid_identifier + + raise InvalidTimezoneIdentifier, "Invalid identifier: #{identifier.encode(Encoding::UTF_8)}" + end + + # Looks up a given code in the given hash of code to + # {DataSources::CountryInfo} mappings. If the code is found the + # {DataSources::CountryInfo} is returned. Otherwise an {InvalidCountryCode} + # exception is raised. + # + # @param hash [String, DataSources::CountryInfo] a mapping from ISO 3166-1 + # alpha-2 country codes to {DataSources::CountryInfo} instances. + # @param code [String] a country code to lookup. + # @param encoding [Encoding] the encoding used for the country codes in + # `hash`. + # @return [DataSources::CountryInfo] the {DataSources::CountryInfo} instance + # corresponding to `code`. + # @raise [InvalidCountryCode] if `code` was not found in `hash`. + def lookup_country_info(hash, code, encoding = Encoding::UTF_8) + raise InvalidCountryCode, "Invalid country code: #{code.nil? ? 'nil' : code}" unless code.kind_of?(String) + + info = try_with_encoding(code, encoding) {|c| hash[c] } + return info if info + + raise InvalidCountryCode, "Invalid country code: #{code.encode(Encoding::UTF_8)}" + end + + private + + # Raises {InvalidDataSource} to indicate that a method has not been + # overridden by a particular data source implementation. + # + # @raise [InvalidDataSource] always. + def raise_invalid_data_source(method_name) + raise InvalidDataSource, "#{method_name} not defined" + end + + # Combines {data_timezone_identifiers} and {linked_timezone_identifiers} + # to create an `Array` containing all valid time zone identifiers. If + # {linked_timezone_identifiers} is empty, the {data_timezone_identifiers} + # instance is returned. + # + # The returned `Array` is frozen. The identifiers are sorted according to + # `String#<=>`. + # + # @return [Array<String>] an `Array` containing all valid time zone + # identifiers. + def build_timezone_identifiers + data = data_timezone_identifiers + linked = linked_timezone_identifiers + linked.empty? ? data : (data + linked).sort!.freeze + end + + if [].respond_to?(:bsearch) + # If the given `identifier` is contained within the {timezone_identifiers} + # `Array`, the `String` instance representing that identifier from + # {timezone_identifiers} is returned. Otherwise, `nil` is returned. + # + # @param identifier [String] A time zone identifier to search for. + # @return [String] the `String` instance representing `identifier` from + # {timezone_identifiers} if found, or `nil` if not found. + # + # :nocov_no_array_bsearch: + def find_timezone_identifier(identifier) + + result = timezone_identifiers.bsearch {|i| i >= identifier } + result == identifier ? result : nil + end + # :nocov_no_array_bsearch: + else + # If the given `identifier` is contained within the {timezone_identifiers} + # `Array`, the `String` instance representing that identifier from + # {timezone_identifiers} is returned. Otherwise, `nil` is returned. + # + # @param identifier [String] A time zone identifier to search for. + # @return [String] the `String` instance representing `identifier` from + # {timezone_identifiers} if found, or `nil` if not found. + # + # :nocov_array_bsearch: + def find_timezone_identifier(identifier) + identifiers = timezone_identifiers + low = 0 + high = identifiers.length + + while low < high do + mid = (low + high).div(2) + mid_identifier = identifiers[mid] + cmp = mid_identifier <=> identifier + + return mid_identifier if cmp == 0 + + if cmp > 0 + high = mid + else + low = mid + 1 + end + end + + nil + end + # :nocov_array_bsearch: + end + + # Tries an operation using `string` directly. If the operation fails, the + # string is copied and encoded with `encoding` and the operation is tried + # again. + # + # @param string [String] The `String` to perform the operation on. + # @param encoding [Encoding] The `Encoding` to use if the initial attempt + # fails. + # @yield [s] the caller will be yielded to once or twice to attempt the + # operation. + # @yieldparam s [String] either `string` or an encoded copy of `string`. + # @yieldreturn [Object] The result of the operation. Must be truthy if + # successful. + # @return [Object] the result of the operation or `nil` if the first attempt + # fails and `string` is already encoded with `encoding`. + def try_with_encoding(string, encoding) + result = yield string + return result if result + + unless encoding == string.encoding + string = string.encode(encoding) + yield string + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources.rb new file mode 100644 index 0000000..712920d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources.rb @@ -0,0 +1,8 @@ +# encoding: UTF-8 + +module TZInfo + # {DataSource} implementations and classes used by {DataSource} + # implementations. + module DataSources + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/constant_offset_data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/constant_offset_data_timezone_info.rb new file mode 100644 index 0000000..076ee9b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/constant_offset_data_timezone_info.rb @@ -0,0 +1,56 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module DataSources + # Represents a data time zone defined by a constantly observed offset. + class ConstantOffsetDataTimezoneInfo < DataTimezoneInfo + # @return [TimezoneOffset] the offset that is constantly observed. + attr_reader :constant_offset + + # Initializes a new {ConstantOffsetDataTimezoneInfo}. + # + # The passed in `identifier` instance will be frozen. A reference to the + # passed in {TimezoneOffset} will be retained. + # + # @param identifier [String] the identifier of the time zone. + # @param constant_offset [TimezoneOffset] the constantly observed offset. + # @raise [ArgumentError] if `identifier` or `constant_offset` is `nil`. + def initialize(identifier, constant_offset) + super(identifier) + raise ArgumentError, 'constant_offset must be specified' unless constant_offset + @constant_offset = constant_offset + end + + # @param timestamp [Timestamp] ignored. + # @return [TimezonePeriod] an unbounded {TimezonePeriod} for the time + # zone's constantly observed offset. + def period_for(timestamp) + constant_period + end + + # @param local_timestamp [Timestamp] ignored. + # @return [Array<TimezonePeriod>] an `Array` containing a single unbounded + # {TimezonePeriod} for the time zone's constantly observed offset. + def periods_for_local(local_timestamp) + [constant_period] + end + + # @param to_timestamp [Timestamp] ignored. + # @param from_timestamp [Timestamp] ignored. + # @return [Array] an empty `Array`, since there are no transitions in time + # zones that observe a constant offset. + def transitions_up_to(to_timestamp, from_timestamp = nil) + [] + end + + private + + # @return [TimezonePeriod] an unbounded {TimezonePeriod} with the constant + # offset of this timezone. + def constant_period + OffsetTimezonePeriod.new(@constant_offset) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/country_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/country_info.rb new file mode 100644 index 0000000..4745f3b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/country_info.rb @@ -0,0 +1,42 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module DataSources + # Represents a country and references to its time zones as returned by a + # {DataSource}. + class CountryInfo + # @return [String] the ISO 3166-1 alpha-2 country code. + attr_reader :code + + # @return [String] the name of the country. + attr_reader :name + + # @return [Array<CountryTimezone>] the time zones observed in the country. + attr_reader :zones + + # Initializes a new {CountryInfo}. The passed in `code`, `name` and + # `zones` instances will be frozen. + # + # @param code [String] an ISO 3166-1 alpha-2 country code. + # @param name [String] the name of the country. + # @param zones [Array<CountryTimezone>] the time zones observed in the + # country. + # @raise [ArgumentError] if `code`, `name` or `zones` is `nil`. + def initialize(code, name, zones) + raise ArgumentError, 'code must be specified' unless code + raise ArgumentError, 'name must be specified' unless name + raise ArgumentError, 'zones must be specified' unless zones + @code = code.freeze + @name = name.freeze + @zones = zones.freeze + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: #@code>" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/data_timezone_info.rb new file mode 100644 index 0000000..d43203a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/data_timezone_info.rb @@ -0,0 +1,91 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module DataSources + # The base class for time zones defined as either a series of transitions + # ({TransitionsDataTimezoneInfo}) or a constantly observed offset + # ({ConstantOffsetDataTimezoneInfo}). + # + # @abstract Data sources return instances of {DataTimezoneInfo} subclasses. + class DataTimezoneInfo < TimezoneInfo + # @param timestamp [Timestamp] a {Timestamp} with a specified + # {Timestamp#utc_offset utc_offset}. + # @return [TimezonePeriod] the {TimezonePeriod} observed at the time + # specified by `timestamp`. + # @raise [ArgumentError] may be raised if `timestamp` is `nil` or does not + # have a specified {Timestamp#utc_offset utc_offset}. + def period_for(timestamp) + raise_not_implemented('period_for') + end + + # Returns an `Array` containing the {TimezonePeriod TimezonePeriods} that + # could be observed at the local time specified by `local_timestamp`. The + # results are are ordered by increasing UTC start date. An empty `Array` + # is returned if no periods are found for the given local time. + # + # @param local_timestamp [Timestamp] a {Timestamp} representing a local + # time - must have an unspecified {Timestamp#utc_offset utc_offset}. + # @return [Array<TimezonePeriod>] an `Array` containing the + # {TimezonePeriod TimezonePeriods} that could be observed at the local + # time specified by `local_timestamp`. + # @raise [ArgumentError] may be raised if `local_timestamp` is `nil`, or + # has a specified {Timestamp#utc_offset utc_offset}. + def periods_for_local(local_timestamp) + raise_not_implemented('periods_for_local') + end + + # Returns an `Array` of {TimezoneTransition} instances representing the + # times where the UTC offset of the time zone changes. + # + # Transitions are returned up to a given {Timestamp} (`to_timestamp`). + # + # A from {Timestamp} may also be supplied using the `from_timestamp` + # parameter. If `from_timestamp` is specified, only transitions from that + # time onwards will be returned. + # + # Comparisons with `to_timestamp` are exclusive. Comparisons with + # `from_timestamp` are inclusive. If a transition falls precisely on + # `to_timestamp`, it will be excluded. If a transition falls on + # `from_timestamp`, it will be included. + # + # Transitions returned are ordered by when they occur, from earliest to + # latest. + # + # @param to_timestamp [Timestamp] a {Timestamp} with a specified + # {Timestamp#utc_offset utc_offset}. Transitions are returned if they + # occur before this time. + # @param from_timestamp [Timestamp] an optional {Timestamp} with a + # specified {Timestamp#utc_offset utc_offset}. If specified, transitions + # are returned if they occur at or after this time. + # @return [Array<TimezoneTransition>] an `Array` of {TimezoneTransition} + # instances representing the times where the UTC offset of the time zone + # changes. + # @raise [ArgumentError] may be raised if `to_timestamp` is `nil` or does + # not have a specified {Timestamp#utc_offset utc_offset}. + # @raise [ArgumentError] may be raised if `from_timestamp` is specified + # but does not have a specified {Timestamp#utc_offset utc_offset}. + # @raise [ArgumentError] may be raised if `from_timestamp` is specified + # but is not earlier than or at the same time as `to_timestamp`. + def transitions_up_to(to_timestamp, from_timestamp = nil) + raise_not_implemented('transitions_up_to') + end + + # @return [DataTimezone] a new {DataTimezone} instance for the time zone + # represented by this {DataTimezoneInfo}. + def create_timezone + DataTimezone.new(self) + end + + private + + # Raises a {NotImplementedError} to indicate that the base class is + # incorrectly being used directly. + # + # raise [NotImplementedError] always. + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/linked_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/linked_timezone_info.rb new file mode 100644 index 0000000..f44e79d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/linked_timezone_info.rb @@ -0,0 +1,33 @@ +# encoding: UTF-8 + +module TZInfo + module DataSources + # Represents a time zone that is defined as a link to or alias of another + # zone. + class LinkedTimezoneInfo < TimezoneInfo + # @return [String] the identifier of the time zone that provides the data + # (that this zone links to or is an alias for). + attr_reader :link_to_identifier + + # Initializes a new {LinkedTimezoneInfo}. The passed in `identifier` and + # `link_to_identifier` instances will be frozen. + # + # @param identifier [String] the identifier of the time zone. + # @param link_to_identifier [String] the identifier of the time zone that + # this zone link to. + # @raise [ArgumentError] if `identifier` or `link_to_identifier` are + # `nil`. + def initialize(identifier, link_to_identifier) + super(identifier) + raise ArgumentError, 'link_to_identifier must be specified' unless link_to_identifier + @link_to_identifier = link_to_identifier.freeze + end + + # @return [LinkedTimezone] a new {LinkedTimezone} instance for the time + # zone represented by this {LinkedTimezoneInfo}. + def create_timezone + LinkedTimezone.new(self) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/posix_time_zone_parser.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/posix_time_zone_parser.rb new file mode 100644 index 0000000..b3d2b2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/posix_time_zone_parser.rb @@ -0,0 +1,181 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'strscan' + +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, UntaintExt) if TZInfo.const_defined?(:UntaintExt) + + module DataSources + # An {InvalidPosixTimeZone} exception is raised if an invalid POSIX-style + # time zone string is encountered. + # + # @private + class InvalidPosixTimeZone < StandardError #:nodoc: + end + private_constant :InvalidPosixTimeZone + + # A parser for POSIX-style TZ strings used in zoneinfo files and specified + # by tzfile.5 and tzset.3. + # + # @private + class PosixTimeZoneParser #:nodoc: + # Initializes a new {PosixTimeZoneParser}. + # + # @param string_deduper [StringDeduper] a {StringDeduper} instance to use + # to dedupe abbreviations. + def initialize(string_deduper) + @string_deduper = string_deduper + end + + # Parses a POSIX-style TZ string. + # + # @param tz_string [String] the string to parse. + # @return [Object] either a {TimezoneOffset} for a constantly applied + # offset or an {AnnualRules} instance representing the rules. + # @raise [InvalidPosixTimeZone] if `tz_string` is not a `String`. + # @raise [InvalidPosixTimeZone] if `tz_string` is is not valid. + def parse(tz_string) + raise InvalidPosixTimeZone unless tz_string.kind_of?(String) + return nil if tz_string.empty? + + s = StringScanner.new(tz_string) + check_scan(s, /([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + std_abbrev = @string_deduper.dedupe((s[1] || s[2]).untaint) + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + std_offset = get_offset_from_hms(s[1], s[2], s[3]) + + if s.scan(/([^-+,\d<][^-+,\d]*) | <([^>]+)>/x) + dst_abbrev = @string_deduper.dedupe((s[1] || s[2]).untaint) + + if s.scan(/([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + dst_offset = get_offset_from_hms(s[1], s[2], s[3]) + else + # POSIX is negative for ahead of UTC. + dst_offset = std_offset - 3600 + end + + dst_difference = std_offset - dst_offset + + start_rule = parse_rule(s, 'start') + end_rule = parse_rule(s, 'end') + + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." if s.rest? + + if start_rule.is_always_first_day_of_year? && start_rule.transition_at == 0 && + end_rule.is_always_last_day_of_year? && end_rule.transition_at == 86400 + dst_difference + # Constant daylight savings time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev) + else + AnnualRules.new( + TimezoneOffset.new(-std_offset, 0, std_abbrev), + TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev), + start_rule, + end_rule) + end + elsif !s.rest? + # Constant standard time. + # POSIX is negative for ahead of UTC. + TimezoneOffset.new(-std_offset, 0, std_abbrev) + else + raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." + end + end + + private + + # Parses a rule. + # + # @param s [StringScanner] the `StringScanner` to read the rule from. + # @param type [String] the type of rule (either `'start'` or `'end'`). + # @raise [InvalidPosixTimeZone] if the rule is not valid. + # @return [TransitionRule] the parsed rule. + def parse_rule(s, type) + check_scan(s, /,(?: (?: J(\d+) ) | (\d+) | (?: M(\d+)\.(\d)\.(\d) ) )/x) + julian_day_of_year = s[1] + absolute_day_of_year = s[2] + month = s[3] + week = s[4] + day_of_week = s[5] + + if s.scan(/\//) + check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/) + transition_at = get_seconds_after_midnight_from_hms(s[1], s[2], s[3]) + else + transition_at = 7200 + end + + begin + if julian_day_of_year + JulianDayOfYearTransitionRule.new(julian_day_of_year.to_i, transition_at) + elsif absolute_day_of_year + AbsoluteDayOfYearTransitionRule.new(absolute_day_of_year.to_i, transition_at) + elsif week == '5' + LastDayOfMonthTransitionRule.new(month.to_i, day_of_week.to_i, transition_at) + else + DayOfMonthTransitionRule.new(month.to_i, week.to_i, day_of_week.to_i, transition_at) + end + rescue ArgumentError => e + raise InvalidPosixTimeZone, "Invalid #{type} rule in POSIX-style time zone string: #{e}" + end + end + + # Returns an offset in seconds from hh:mm:ss values. The value can be + # negative. -02:33:12 would represent 2 hours, 33 minutes and 12 seconds + # ahead of UTC. + # + # @param h [String] the hours. + # @param m [String] the minutes. + # @param s [String] the seconds. + # @return [Integer] the offset. + # @raise [InvalidPosixTimeZone] if the mm and ss values are greater than + # 59. + def get_offset_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in offset for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in offset for POSIX-style time zone string." if s > 59 + magnitude = (h.abs * 60 + m) * 60 + s + h < 0 ? -magnitude : magnitude + end + + # Returns the seconds from midnight from hh:mm:ss values. Hours can exceed + # 24 for a time on the following day. Hours can be negative to subtract + # hours from midnight on the given day. -02:33:12 represents 22:33:12 on + # the prior day. + # + # @param h [String] the hour. + # @param m [String] the minutes past the hour. + # @param s [String] the seconds past the minute. + # @return [Integer] the number of seconds after midnight. + # @raise [InvalidPosixTimeZone] if the mm and ss values are greater than + # 59. + def get_seconds_after_midnight_from_hms(h, m, s) + h = h.to_i + m = m.to_i + s = s.to_i + raise InvalidPosixTimeZone, "Invalid minute #{m} in time for POSIX-style time zone string." if m > 59 + raise InvalidPosixTimeZone, "Invalid second #{s} in time for POSIX-style time zone string." if s > 59 + (h * 3600) + m * 60 + s + end + + # Scans for a pattern and raises an exception if the pattern does not + # match the input. + # + # @param s [StringScanner] the `StringScanner` to scan. + # @param pattern [Regexp] the pattern to match. + # @return [String] the result of the scan. + # @raise [InvalidPosixTimeZone] if the pattern does not match the input. + def check_scan(s, pattern) + result = s.scan(pattern) + raise InvalidPosixTimeZone, "Expected '#{s.rest}' to match #{pattern} in POSIX-style time zone string." unless result + result + end + end + private_constant :PosixTimeZoneParser + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/ruby_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/ruby_data_source.rb new file mode 100644 index 0000000..d2dbb26 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/ruby_data_source.rb @@ -0,0 +1,145 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, UntaintExt) if TZInfo.const_defined?(:UntaintExt) + + module DataSources + # A {TZInfoDataNotFound} exception is raised if the tzinfo-data gem could + # not be found (i.e. `require 'tzinfo/data'` failed) when selecting the Ruby + # data source. + class TZInfoDataNotFound < StandardError + end + + # A DataSource implementation that loads data from the set of Ruby modules + # included in the tzinfo-data gem. + # + # TZInfo will use {RubyDataSource} by default if the tzinfo-data gem + # is available on the load path. It can also be selected by calling + # {DataSource.set} as follows: + # + # TZInfo::DataSource.set(:ruby) + class RubyDataSource < DataSource + # (see DataSource#data_timezone_identifiers) + attr_reader :data_timezone_identifiers + + # (see DataSource#linked_timezone_identifiers) + attr_reader :linked_timezone_identifiers + + # (see DataSource#country_codes) + attr_reader :country_codes + + # Initializes a new {RubyDataSource} instance. + # + # @raise [TZInfoDataNotFound] if the tzinfo-data gem could not be found + # (i.e. `require 'tzinfo/data'` failed). + def initialize + super + + begin + require('tzinfo/data') + rescue LoadError + raise TZInfoDataNotFound, "The tzinfo-data gem could not be found (require 'tzinfo/data' failed)." + end + + if TZInfo::Data.const_defined?(:LOCATION) + # Format 2 + @base_path = File.join(TZInfo::Data::LOCATION, 'tzinfo', 'data') + else + # Format 1 + data_file = File.join('', 'tzinfo', 'data.rb') + path = $".reverse_each.detect {|p| p.end_with?(data_file) } + if path + @base_path = File.join(File.dirname(path), 'data').untaint + else + @base_path = 'tzinfo/data' + end + end + + require_index('timezones') + require_index('countries') + + @data_timezone_identifiers = Data::Indexes::Timezones.data_timezones + @linked_timezone_identifiers = Data::Indexes::Timezones.linked_timezones + @countries = Data::Indexes::Countries.countries + @country_codes = @countries.keys.sort!.freeze + end + + # (see DataSource#to_s) + def to_s + "Ruby DataSource: #{version_info}" + end + + # (see DataSource#inspect) + def inspect + "#<TZInfo::DataSources::RubyDataSource: #{version_info}>" + end + + protected + + # Returns a {TimezoneInfo} instance for the given time zone identifier. + # The result will either be a {ConstantOffsetDataTimezoneInfo}, a + # {TransitionsDataTimezoneInfo} or a {LinkedTimezoneInfo} depending on the + # type of time zone. + # + # @param identifier [String] A time zone identifier. + # @return [TimezoneInfo] a {TimezoneInfo} instance for the given time zone + # identifier. + # @raise [InvalidTimezoneIdentifier] if the time zone is not found or the + # identifier is invalid. + def load_timezone_info(identifier) + valid_identifier = validate_timezone_identifier(identifier) + split_identifier = valid_identifier.gsub(/-/, '__m__').gsub(/\+/, '__p__').split('/') + + begin + require_definition(split_identifier) + + m = Data::Definitions + split_identifier.each {|part| m = m.const_get(part) } + m.get + rescue LoadError, NameError => e + raise InvalidTimezoneIdentifier, "#{e.message.encode(Encoding::UTF_8)} (loading #{valid_identifier})" + end + end + + # (see DataSource#load_country_info) + def load_country_info(code) + lookup_country_info(@countries, code) + end + + private + + # Requires a zone definition by its identifier (split on /). + # + # @param identifier [Array<string>] the component parts of a time zone + # identifier (split on /). This must have already been validated. + def require_definition(identifier) + require_data(*(['definitions'] + identifier)) + end + + # Requires an index by its name. + # + # @param name [String] an index name. + def require_index(name) + require_data(*['indexes', name]) + end + + # Requires a file from tzinfo/data. + # + # @param file [Array<String>] a relative path to a file to be required. + def require_data(*file) + require(File.join(@base_path, *file)) + end + + # @return [String] a `String` containing TZInfo::Data version infomation + # for inclusion in the #to_s and #inspect output. + def version_info + # The TZInfo::Data::VERSION constant is only available from v1.2014.8 + # onwards. + "tzdb v#{TZInfo::Data::Version::TZDATA}#{TZInfo::Data.const_defined?(:VERSION) ? ", tzinfo-data v#{TZInfo::Data::VERSION}" : ''}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/timezone_info.rb new file mode 100644 index 0000000..51d8d51 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/timezone_info.rb @@ -0,0 +1,47 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module DataSources + # Represents a time zone defined by a data source. + # + # @abstract Data sources return instances of {TimezoneInfo} subclasses. + class TimezoneInfo + # @return [String] the identifier of the time zone. + attr_reader :identifier + + # Initializes a new TimezoneInfo. The passed in `identifier` instance will + # be frozen. + # + # @param identifier [String] the identifier of the time zone. + # @raise [ArgumentError] if `identifier` is `nil`. + def initialize(identifier) + raise ArgumentError, 'identifier must be specified' unless identifier + @identifier = identifier.freeze + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: #@identifier>" + end + + # @return [Timezone] a new {Timezone} instance for the time zone + # represented by this {TimezoneInfo}. + def create_timezone + raise_not_implemented('create_timezone') + end + + private + + # Raises a {NotImplementedError}. + # + # @param method_name [String] the name of the method that must be + # overridden. + # @raise NotImplementedError always. + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/transitions_data_timezone_info.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/transitions_data_timezone_info.rb new file mode 100644 index 0000000..14fe51e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/transitions_data_timezone_info.rb @@ -0,0 +1,214 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module DataSources + # Represents a data time zone defined by a list of transitions that change + # the locally observed time. + class TransitionsDataTimezoneInfo < DataTimezoneInfo + # @return [Array<TimezoneTransition>] the transitions that define this + # time zone in order of ascending timestamp. + attr_reader :transitions + + # Initializes a new {TransitionsDataTimezoneInfo}. + # + # The passed in `identifier` instance will be frozen. A reference to the + # passed in `Array` will be retained. + # + # The `transitions` `Array` must be sorted in order of ascending + # timestamp. Each transition must have a + # {TimezoneTransition#timestamp_value timestamp_value} that is greater + # than the {TimezoneTransition#timestamp_value timestamp_value} of the + # prior transition. + # + # @param identifier [String] the identifier of the time zone. + # @param transitions [Array<TimezoneTransitions>] an `Array` of + # transitions that each indicate when a change occurs in the locally + # observed time. + # @raise [ArgumentError] if `identifier` is `nil`. + # @raise [ArgumentError] if `transitions` is `nil`. + # @raise [ArgumentError] if `transitions` is an empty `Array`. + def initialize(identifier, transitions) + super(identifier) + raise ArgumentError, 'transitions must be specified' unless transitions + raise ArgumentError, 'transitions must not be an empty Array' if transitions.empty? + @transitions = transitions.freeze + end + + # (see DataTimezoneInfo#period_for) + def period_for(timestamp) + raise ArgumentError, 'timestamp must be specified' unless timestamp + raise ArgumentError, 'timestamp must have a specified utc_offset' unless timestamp.utc_offset + + timestamp_value = timestamp.value + + index = find_minimum_transition {|t| t.timestamp_value >= timestamp_value } + + if index + transition = @transitions[index] + + if transition.timestamp_value == timestamp_value + # timestamp occurs within the second of the found transition, so is + # the transition that starts the period. + start_transition = transition + end_transition = @transitions[index + 1] + else + # timestamp occurs before the second of the found transition, so is + # the transition that ends the period. + start_transition = index == 0 ? nil : @transitions[index - 1] + end_transition = transition + end + else + start_transition = @transitions.last + end_transition = nil + end + + TransitionsTimezonePeriod.new(start_transition, end_transition) + end + + # (see DataTimezoneInfo#periods_for_local) + def periods_for_local(local_timestamp) + raise ArgumentError, 'local_timestamp must be specified' unless local_timestamp + raise ArgumentError, 'local_timestamp must have an unspecified utc_offset' if local_timestamp.utc_offset + + local_timestamp_value = local_timestamp.value + latest_possible_utc_value = local_timestamp_value + 86400 + earliest_possible_utc_value = local_timestamp_value - 86400 + + # Find the index of the first transition that occurs after a latest + # possible UTC representation of the local timestamp and then search + # backwards until an earliest possible UTC representation. + + index = find_minimum_transition {|t| t.timestamp_value >= latest_possible_utc_value } + + # No transitions after latest_possible_utc_value, set to max index + 1 + # to search backwards including the period after the last transition + index = @transitions.length unless index + + result = [] + + index.downto(0) do |i| + start_transition = i > 0 ? @transitions[i - 1] : nil + end_transition = @transitions[i] + offset = start_transition ? start_transition.offset : end_transition.previous_offset + utc_timestamp_value = local_timestamp_value - offset.observed_utc_offset + + # It is not necessary to compare the sub-seconds because a timestamp + # is in the period if is >= the start transition (sub-seconds would + # make == become >) and if it is < the end transition (which + # sub-seconds cannot affect). + if (!start_transition || utc_timestamp_value >= start_transition.timestamp_value) && (!end_transition || utc_timestamp_value < end_transition.timestamp_value) + result << TransitionsTimezonePeriod.new(start_transition, end_transition) + elsif end_transition && end_transition.timestamp_value < earliest_possible_utc_value + break + end + end + + result.reverse! + end + + # (see DataTimezoneInfo#transitions_up_to) + def transitions_up_to(to_timestamp, from_timestamp = nil) + raise ArgumentError, 'to_timestamp must be specified' unless to_timestamp + raise ArgumentError, 'to_timestamp must have a specified utc_offset' unless to_timestamp.utc_offset + + if from_timestamp + raise ArgumentError, 'from_timestamp must have a specified utc_offset' unless from_timestamp.utc_offset + raise ArgumentError, 'to_timestamp must be greater than from_timestamp' if to_timestamp <= from_timestamp + end + + if from_timestamp + from_index = find_minimum_transition {|t| transition_on_or_after_timestamp?(t, from_timestamp) } + return [] unless from_index + else + from_index = 0 + end + + to_index = find_minimum_transition {|t| transition_on_or_after_timestamp?(t, to_timestamp) } + + if to_index + return [] if to_index < 1 + to_index -= 1 + else + to_index = -1 + end + + @transitions[from_index..to_index] + end + + private + + # Array#bsearch_index was added in Ruby 2.3.0. Use bsearch_index to find + # transitions if it is available, otherwise use a Ruby implementation. + if [].respond_to?(:bsearch_index) + # Performs a binary search on {transitions} to find the index of the + # earliest transition satisfying a condition. + # + # @yield [transition] the caller will be yielded to to test the search + # condition. + # @yieldparam transition [TimezoneTransition] a {TimezoneTransition} + # instance from {transitions}. + # @yieldreturn [Boolean] `true` for the earliest transition that + # satisfies the condition and return `true` for all subsequent + # transitions. In all other cases, the result of the block must be + # `false`. + # @return [Integer] the index of the earliest transition safisfying + # the condition or `nil` if there are no such transitions. + # + # :nocov_no_array_bsearch_index: + def find_minimum_transition(&block) + @transitions.bsearch_index(&block) + end + # :nocov_no_array_bsearch_index: + else + # Performs a binary search on {transitions} to find the index of the + # earliest transition satisfying a condition. + # + # @yield [transition] the caller will be yielded to to test the search + # condition. + # @yieldparam transition [TimezoneTransition] a {TimezoneTransition} + # instance from {transitions}. + # @yieldreturn [Boolean] `true` for the earliest transition that + # satisfies the condition and return `true` for all subsequent + # transitions. In all other cases, the result of the block must be + # `false`. + # @return [Integer] the index of the earliest transition safisfying + # the condition or `nil` if there are no such transitions. + # + # :nocov_array_bsearch_index: + def find_minimum_transition + # A Ruby implementation of the find-minimum mode of Array#bsearch_index. + low = 0 + high = @transitions.length + satisfied = false + + while low < high do + mid = (low + high).div(2) + if yield @transitions[mid] + satisfied = true + high = mid + else + low = mid + 1 + end + end + + satisfied ? low : nil + end + # :nocov_array_bsearch_index: + end + + # Determines if a transition occurs at or after a given {Timestamp}, + # taking the {Timestamp#sub_second sub_second} into consideration. + # + # @param transition [TimezoneTransition] the transition to compare. + # @param timestamp [Timestamp] the timestamp to compare. + # @return [Boolean] `true` if `transition` occurs at or after `timestamp`, + # otherwise `false`. + def transition_on_or_after_timestamp?(transition, timestamp) + transition_timestamp_value = transition.timestamp_value + timestamp_value = timestamp.value + transition_timestamp_value > timestamp_value || transition_timestamp_value == timestamp_value && timestamp.sub_second == 0 + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_data_source.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_data_source.rb new file mode 100644 index 0000000..cb76c3a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_data_source.rb @@ -0,0 +1,580 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, UntaintExt) if TZInfo.const_defined?(:UntaintExt) + + module DataSources + # An {InvalidZoneinfoDirectory} exception is raised if {ZoneinfoDataSource} + # is initialized with a specific zoneinfo path that is not a valid zoneinfo + # directory. A valid zoneinfo directory is one that contains time zone + # files, a country code index file named iso3166.tab and a time zone index + # file named zone1970.tab or zone.tab. + class InvalidZoneinfoDirectory < StandardError + end + + # A {ZoneinfoDirectoryNotFound} exception is raised if no valid zoneinfo + # directory could be found when checking the paths listed in + # {ZoneinfoDataSource.search_path}. A valid zoneinfo directory is one that + # contains time zone files, a country code index file named iso3166.tab and + # a time zone index file named zone1970.tab or zone.tab. + class ZoneinfoDirectoryNotFound < StandardError + end + + # A DataSource implementation that loads data from a 'zoneinfo' directory + # containing compiled "TZif" version 3 (or earlier) files in addition to + # iso3166.tab and zone1970.tab or zone.tab index files. + # + # To have TZInfo load the system zoneinfo files, call + # {TZInfo::DataSource.set} as follows: + # + # TZInfo::DataSource.set(:zoneinfo) + # + # To load zoneinfo files from a particular directory, pass the directory to + # {TZInfo::DataSource.set}: + # + # TZInfo::DataSource.set(:zoneinfo, directory) + # + # To load zoneinfo files from a particular directory, but load the + # iso3166.tab index file from a separate location, pass the directory and + # path to the iso3166.tab file to {TZInfo::DataSource.set}: + # + # TZInfo::DataSource.set(:zoneinfo, directory, iso3166_path) + # + # Please note that versions of the 'zic' tool (used to build zoneinfo files) + # that were released prior to February 2006 created zoneinfo files that used + # 32-bit integers for transition timestamps. Later versions of zic produce + # zoneinfo files that use 64-bit integers. If you have 32-bit zoneinfo files + # on your system, then any queries falling outside of the range 1901-12-13 + # 20:45:52 to 2038-01-19 03:14:07 may be inaccurate. + # + # Most modern platforms include 64-bit zoneinfo files. However, Mac OS X (up + # to at least 10.8.4) still uses 32-bit zoneinfo files. + # + # To check whether your zoneinfo files contain 32-bit or 64-bit transition + # data, you can run the following code (substituting the identifier of the + # zone you want to test for `zone_identifier`): + # + # TZInfo::DataSource.set(:zoneinfo) + # dir = TZInfo::DataSource.get.zoneinfo_dir + # File.open(File.join(dir, zone_identifier), 'r') {|f| f.read(5) } + # + # If the last line returns `"TZif\\x00"`, then you have a 32-bit zoneinfo + # file. If it returns `"TZif2"` or `"TZif3"` then you have a 64-bit zoneinfo + # file. + # + # It is also worth noting that as of the 2017c release of the IANA Time Zone + # Database, 64-bit zoneinfo files only include future transitions up to + # 2038-01-19 03:14:07. Any queries falling after this time may be + # inaccurate. + class ZoneinfoDataSource < DataSource + # The default value of {ZoneinfoDataSource.search_path}. + DEFAULT_SEARCH_PATH = ['/usr/share/zoneinfo', '/usr/share/lib/zoneinfo', '/etc/zoneinfo'].freeze + private_constant :DEFAULT_SEARCH_PATH + + # The default value of {ZoneinfoDataSource.alternate_iso3166_tab_search_path}. + DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH = ['/usr/share/misc/iso3166.tab', '/usr/share/misc/iso3166'].freeze + private_constant :DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH + + # Paths to be checked to find the system zoneinfo directory. + # + # @private + @@search_path = DEFAULT_SEARCH_PATH.dup + + # Paths to possible alternate iso3166.tab files (used to locate the + # system-wide iso3166.tab files on FreeBSD and OpenBSD). + # + # @private + @@alternate_iso3166_tab_search_path = DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH.dup + + class << self + # An `Array` of directories that will be checked to find the system + # zoneinfo directory. + # + # Directories are checked in the order they appear in the `Array`. + # + # The default value is `['/usr/share/zoneinfo', + # '/usr/share/lib/zoneinfo', '/etc/zoneinfo']`. + # + # @return [Array<String>] an `Array` of directories to check in order to + # find the system zoneinfo directory. + def search_path + @@search_path + end + + # Sets the directories to be checked when locating the system zoneinfo + # directory. + # + # Can be set to an `Array` of directories or a `String` containing + # directories separated with `File::PATH_SEPARATOR`. + # + # Directories are checked in the order they appear in the `Array` or + # `String`. + # + # Set to `nil` to revert to the default paths. + # + # @param search_path [Object] either `nil` or a list of directories to + # check as either an `Array` of `String` or a `File::PATH_SEPARATOR` + # separated `String`. + def search_path=(search_path) + @@search_path = process_search_path(search_path, DEFAULT_SEARCH_PATH) + end + + # An `Array` of paths that will be checked to find an alternate + # iso3166.tab file if one was not included in the zoneinfo directory + # (for example, on FreeBSD and OpenBSD systems). + # + # Paths are checked in the order they appear in the `Array`. + # + # The default value is `['/usr/share/misc/iso3166.tab', + # '/usr/share/misc/iso3166']`. + # + # @return [Array<String>] an `Array` of paths to check in order to + # locate an iso3166.tab file. + def alternate_iso3166_tab_search_path + @@alternate_iso3166_tab_search_path + end + + # Sets the paths to check to locate an alternate iso3166.tab file if one + # was not included in the zoneinfo directory. + # + # Can be set to an `Array` of paths or a `String` containing paths + # separated with `File::PATH_SEPARATOR`. + # + # Paths are checked in the order they appear in the array. + # + # Set to `nil` to revert to the default paths. + # + # @param alternate_iso3166_tab_search_path [Object] either `nil` or a + # list of paths to check as either an `Array` of `String` or a + # `File::PATH_SEPARATOR` separated `String`. + def alternate_iso3166_tab_search_path=(alternate_iso3166_tab_search_path) + @@alternate_iso3166_tab_search_path = process_search_path(alternate_iso3166_tab_search_path, DEFAULT_ALTERNATE_ISO3166_TAB_SEARCH_PATH) + end + + private + + # Processes a path for use as the {search_path} or + # {alternate_iso3166_tab_search_path}. + # + # @param path [Object] either `nil` or a list of paths to check as + # either an `Array` of `String` or a `File::PATH_SEPARATOR` separated + # `String`. + # @param default [Array<String>] the default value. + # @return [Array<String>] the processed path. + def process_search_path(path, default) + if path + if path.kind_of?(String) + path.split(File::PATH_SEPARATOR) + else + path.collect(&:to_s) + end + else + default.dup + end + end + end + + # @return [String] the zoneinfo directory being used. + attr_reader :zoneinfo_dir + + # (see DataSource#country_codes) + attr_reader :country_codes + + # Initializes a new {ZoneinfoDataSource}. + # + # If `zoneinfo_dir` is specified, it will be checked and used as the + # source of zoneinfo files. + # + # The directory must contain a file named iso3166.tab and a file named + # either zone1970.tab or zone.tab. These may either be included in the + # root of the directory or in a 'tab' sub-directory and named country.tab + # and zone_sun.tab respectively (as is the case on Solaris). + # + # Additionally, the path to iso3166.tab can be overridden using the + # `alternate_iso3166_tab_path` parameter. + # + # If `zoneinfo_dir` is not specified or `nil`, the paths referenced in + # {search_path} are searched in order to find a valid zoneinfo directory + # (one that contains zone1970.tab or zone.tab and iso3166.tab files as + # above). + # + # The paths referenced in {alternate_iso3166_tab_search_path} are also + # searched to find an iso3166.tab file if one of the searched zoneinfo + # directories doesn't contain an iso3166.tab file. + # + # @param zoneinfo_dir [String] an optional path to a directory to use as + # the source of zoneinfo files. + # @param alternate_iso3166_tab_path [String] an optional path to the + # iso3166.tab file. + # @raise [InvalidZoneinfoDirectory] if the iso3166.tab and zone1970.tab or + # zone.tab files cannot be found using the `zoneinfo_dir` and + # `alternate_iso3166_tab_path` parameters. + # @raise [ZoneinfoDirectoryNotFound] if no valid directory can be found + # by searching. + def initialize(zoneinfo_dir = nil, alternate_iso3166_tab_path = nil) + super() + + if zoneinfo_dir + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(zoneinfo_dir, alternate_iso3166_tab_path) + + unless iso3166_tab_path && zone_tab_path + raise InvalidZoneinfoDirectory, "#{zoneinfo_dir} is not a directory or doesn't contain a iso3166.tab file and a zone1970.tab or zone.tab file." + end + + @zoneinfo_dir = zoneinfo_dir + else + @zoneinfo_dir, iso3166_tab_path, zone_tab_path = find_zoneinfo_dir + + unless @zoneinfo_dir && iso3166_tab_path && zone_tab_path + raise ZoneinfoDirectoryNotFound, "None of the paths included in #{self.class.name}.search_path are valid zoneinfo directories." + end + end + + @zoneinfo_dir = File.expand_path(@zoneinfo_dir).freeze + @timezone_identifiers = load_timezone_identifiers.freeze + @countries = load_countries(iso3166_tab_path, zone_tab_path).freeze + @country_codes = @countries.keys.sort!.freeze + + string_deduper = ConcurrentStringDeduper.new + posix_tz_parser = PosixTimeZoneParser.new(string_deduper) + @zoneinfo_reader = ZoneinfoReader.new(posix_tz_parser, string_deduper) + end + + # Returns a frozen `Array` of all the available time zone identifiers. The + # identifiers are sorted according to `String#<=>`. + # + # @return [Array<String>] a frozen `Array` of all the available time zone + # identifiers. + def data_timezone_identifiers + @timezone_identifiers + end + + # Returns an empty `Array`. There is no information about linked/aliased + # time zones in the zoneinfo files. When using {ZoneinfoDataSource}, every + # time zone will be returned as a {DataTimezone}. + # + # @return [Array<String>] an empty `Array`. + def linked_timezone_identifiers + [].freeze + end + + # (see DataSource#to_s) + def to_s + "Zoneinfo DataSource: #{@zoneinfo_dir}" + end + + # (see DataSource#inspect) + def inspect + "#<#{self.class}: #{@zoneinfo_dir}>" + end + + protected + + # Returns a {TimezoneInfo} instance for the given time zone identifier. + # The result will either be a {ConstantOffsetDataTimezoneInfo} or a + # {TransitionsDataTimezoneInfo}. + # + # @param identifier [String] A time zone identifier. + # @return [TimezoneInfo] a {TimezoneInfo} instance for the given time zone + # identifier. + # @raise [InvalidTimezoneIdentifier] if the time zone is not found, the + # identifier is invalid, the zoneinfo file cannot be opened or the + # zoneinfo file is not valid. + def load_timezone_info(identifier) + valid_identifier = validate_timezone_identifier(identifier) + path = File.join(@zoneinfo_dir, valid_identifier) + + zoneinfo = begin + @zoneinfo_reader.read(path) + rescue Errno::EACCES, InvalidZoneinfoFile => e + raise InvalidTimezoneIdentifier, "#{e.message.encode(Encoding::UTF_8)} (loading #{valid_identifier})" + rescue Errno::EISDIR, Errno::ENAMETOOLONG, Errno::ENOENT, Errno::ENOTDIR + raise InvalidTimezoneIdentifier, "Invalid identifier: #{valid_identifier}" + end + + if zoneinfo.kind_of?(TimezoneOffset) + ConstantOffsetDataTimezoneInfo.new(valid_identifier, zoneinfo) + else + TransitionsDataTimezoneInfo.new(valid_identifier, zoneinfo) + end + end + + # (see DataSource#load_country_info) + def load_country_info(code) + lookup_country_info(@countries, code) + end + + private + + # Validates a zoneinfo directory and returns the paths to the iso3166.tab + # and zone1970.tab or zone.tab files if valid. If the directory is not + # valid, returns `nil`. + # + # The path to the iso3166.tab file may be overridden by passing in a path. + # This is treated as either absolute or relative to the current working + # directory. + # + # @param path [String] the path to a possible zoneinfo directory. + # @param iso3166_tab_path [String] an optional path to an external + # iso3166.tab file. + # @return [Array<String>] an `Array` containing the iso3166.tab and + # zone.tab paths if the directory is valid, otherwise `nil`. + def validate_zoneinfo_dir(path, iso3166_tab_path = nil) + if File.directory?(path) + if iso3166_tab_path + return nil unless File.file?(iso3166_tab_path) + else + iso3166_tab_path = resolve_tab_path(path, ['iso3166.tab'], 'country.tab') + return nil unless iso3166_tab_path + end + + zone_tab_path = resolve_tab_path(path, ['zone1970.tab', 'zone.tab'], 'zone_sun.tab') + return nil unless zone_tab_path + + [iso3166_tab_path, zone_tab_path] + else + nil + end + end + + # Attempts to resolve the path to a tab file given its standard names and + # tab sub-directory name (as used on Solaris). + # + # @param zoneinfo_path [String] the path to a zoneinfo directory. + # @param standard_names [Array<String>] the standard names for the tab + # file. + # @param tab_name [String] the alternate name for the tab file to check in + # the tab sub-directory. + # @return [String] the path to the tab file. + def resolve_tab_path(zoneinfo_path, standard_names, tab_name) + standard_names.each do |standard_name| + path = File.join(zoneinfo_path, standard_name) + return path if File.file?(path) + end + + path = File.join(zoneinfo_path, 'tab', tab_name) + return path if File.file?(path) + + nil + end + + # Finds a zoneinfo directory using {search_path} and + # {alternate_iso3166_tab_search_path}. + # + # @return [Array<String>] an `Array` containing the iso3166.tab and + # zone.tab paths if a zoneinfo directory was found, otherwise `nil`. + def find_zoneinfo_dir + alternate_iso3166_tab_path = self.class.alternate_iso3166_tab_search_path.detect do |path| + File.file?(path) + end + + self.class.search_path.each do |path| + # Try without the alternate_iso3166_tab_path first. + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + + if alternate_iso3166_tab_path + iso3166_tab_path, zone_tab_path = validate_zoneinfo_dir(path, alternate_iso3166_tab_path) + return path, iso3166_tab_path, zone_tab_path if iso3166_tab_path && zone_tab_path + end + end + + # Not found. + nil + end + + # Scans @zoneinfo_dir and returns an `Array` of available time zone + # identifiers. The result is sorted according to `String#<=>`. + # + # @return [Array<String>] an `Array` containing all the time zone + # identifiers found. + def load_timezone_identifiers + index = [] + + # Ignoring particular files: + # +VERSION is included on Mac OS X. + # leapseconds is a list of leap seconds. + # localtime is the current local timezone (may be a link). + # posix, posixrules and right are directories containing other versions of the zoneinfo files. + # src is a directory containing the tzdata source included on Solaris. + # timeconfig is a symlink included on Slackware. + + enum_timezones([], ['+VERSION', 'leapseconds', 'localtime', 'posix', 'posixrules', 'right', 'src', 'timeconfig']) do |identifier| + index << identifier.join('/').freeze + end + + index.sort! + end + + # Recursively enumerate a directory of time zones. + # + # @param dir [Array<String>] the directory to enumerate as an `Array` of + # path components. + # @param exclude [Array<String>] file names to exclude when scanning + # `dir`. + # @yield [path] the path of each time zone file found is passed to + # the block. + # @yieldparam path [Array<String>] the path of a time zone file as an + # `Array` of path components. + def enum_timezones(dir, exclude = [], &block) + Dir.foreach(File.join(@zoneinfo_dir, *dir)) do |entry| + begin + entry.encode!(Encoding::UTF_8) + rescue EncodingError + next + end + + unless entry =~ /\./ || exclude.include?(entry) + entry.untaint + path = dir + [entry] + full_path = File.join(@zoneinfo_dir, *path) + + if File.directory?(full_path) + enum_timezones(path, [], &block) + elsif File.file?(full_path) + yield path + end + end + end + end + + # Uses the iso3166.tab and zone1970.tab or zone.tab files to return a Hash + # mapping country codes to CountryInfo instances. + # + # @param iso3166_tab_path [String] the path to the iso3166.tab file. + # @param zone_tab_path [String] the path to the zone.tab file. + # @return [Hash<String, CountryInfo>] a mapping from ISO 3166-1 alpha-2 + # country codes to {CountryInfo} instances. + def load_countries(iso3166_tab_path, zone_tab_path) + + # Handle standard 3 to 4 column zone.tab files as well as the 4 to 5 + # column format used by Solaris. + # + # On Solaris, an extra column before the comment gives an optional + # linked/alternate timezone identifier (or '-' if not set). + # + # Additionally, there is a section at the end of the file for timezones + # covering regions. These are given lower-case "country" codes. The timezone + # identifier column refers to a continent instead of an identifier. These + # lines will be ignored by TZInfo. + # + # Since the last column is optional in both formats, testing for the + # Solaris format is done in two passes. The first pass identifies if there + # are any lines using 5 columns. + + + # The first column is allowed to be a comma separated list of country + # codes, as used in zone1970.tab (introduced in tzdata 2014f). + # + # The first country code in the comma-separated list is the country that + # contains the city the zone identifier is based on. The first country + # code on each line is considered to be primary with the others + # secondary. + # + # The zones for each country are ordered primary first, then secondary. + # Within the primary and secondary groups, the zones are ordered by their + # order in the file. + + file_is_5_column = false + zone_tab = [] + + file = File.read(zone_tab_path, external_encoding: Encoding::UTF_8, internal_encoding: Encoding::UTF_8) + file.each_line do |line| + line.chomp! + + if line =~ /\A([A-Z]{2}(?:,[A-Z]{2})*)\t(?:([+\-])(\d{2})(\d{2})([+\-])(\d{3})(\d{2})|([+\-])(\d{2})(\d{2})(\d{2})([+\-])(\d{3})(\d{2})(\d{2}))\t([^\t]+)(?:\t([^\t]+))?(?:\t([^\t]+))?\z/ + codes = $1 + + if $2 + latitude = dms_to_rational($2, $3, $4) + longitude = dms_to_rational($5, $6, $7) + else + latitude = dms_to_rational($8, $9, $10, $11) + longitude = dms_to_rational($12, $13, $14, $15) + end + + zone_identifier = $16 + column4 = $17 + column5 = $18 + + file_is_5_column = true if column5 + + zone_tab << [codes.split(','.freeze), zone_identifier, latitude, longitude, column4, column5] + end + end + + string_deduper = StringDeduper.new + primary_zones = {} + secondary_zones = {} + + zone_tab.each do |codes, zone_identifier, latitude, longitude, column4, column5| + description = file_is_5_column ? column5 : column4 + description = string_deduper.dedupe(description) if description + + # Lookup the identifier in the timezone index, so that the same + # String instance can be used (saving memory). + begin + zone_identifier = validate_timezone_identifier(zone_identifier) + rescue InvalidTimezoneIdentifier + # zone_identifier is not valid, dedupe and allow anyway. + zone_identifier = string_deduper.dedupe(zone_identifier) + end + + country_timezone = CountryTimezone.new(zone_identifier, latitude, longitude, description) + + # codes will always have at least one element + + (primary_zones[codes.first.freeze] ||= []) << country_timezone + + codes[1..-1].each do |code| + (secondary_zones[code.freeze] ||= []) << country_timezone + end + end + + countries = {} + + file = File.read(iso3166_tab_path, external_encoding: Encoding::UTF_8, internal_encoding: Encoding::UTF_8) + file.each_line do |line| + line.chomp! + + # Handle both the two column alpha-2 and name format used in the tz + # database as well as the 4 column alpha-2, alpha-3, numeric-3 and + # name format used by FreeBSD and OpenBSD. + + if line =~ /\A([A-Z]{2})(?:\t[A-Z]{3}\t[0-9]{3})?\t(.+)\z/ + code = $1 + name = $2 + zones = (primary_zones[code] || []) + (secondary_zones[code] || []) + + countries[code] = CountryInfo.new(code, name, zones) + end + end + + countries + end + + # Converts degrees, minutes and seconds to a Rational. + # + # @param sign [String] `'-'` or `'+'`. + # @param degrees [String] the number of degrees. + # @param minutes [String] the number of minutes. + # @param seconds [String] the number of seconds (optional). + # @return [Rational] the result of converting from degrees, minutes and + # seconds to a `Rational`. + def dms_to_rational(sign, degrees, minutes, seconds = nil) + degrees = degrees.to_i + minutes = minutes.to_i + sign = sign == '-'.freeze ? -1 : 1 + + if seconds + Rational(sign * (degrees * 3600 + minutes * 60 + seconds.to_i), 3600) + else + Rational(sign * (degrees * 60 + minutes), 60) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_reader.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_reader.rb new file mode 100644 index 0000000..ac82585 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_sources/zoneinfo_reader.rb @@ -0,0 +1,486 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Use send as a workaround for erroneous 'wrong number of arguments' errors + # with JRuby 9.0.5.0 when calling methods with Java implementations. See #114. + send(:using, UntaintExt) if TZInfo.const_defined?(:UntaintExt) + + module DataSources + # An {InvalidZoneinfoFile} exception is raised if an attempt is made to load + # an invalid zoneinfo file. + class InvalidZoneinfoFile < StandardError + end + + # Reads compiled zoneinfo TZif (\0, 2 or 3) files. + class ZoneinfoReader #:nodoc: + # The year to generate transitions up to. + # + # @private + GENERATE_UP_TO = Time.now.utc.year + 100 + private_constant :GENERATE_UP_TO + + # Initializes a new {ZoneinfoReader}. + # + # @param posix_tz_parser [PosixTimeZoneParser] a {PosixTimeZoneParser} + # instance to use to parse POSIX-style TZ strings. + # @param string_deduper [StringDeduper] a {StringDeduper} instance to use + # to dedupe abbreviations. + def initialize(posix_tz_parser, string_deduper) + @posix_tz_parser = posix_tz_parser + @string_deduper = string_deduper + end + + # Reads a zoneinfo structure from the given path. Returns either a + # {TimezoneOffset} that is constantly observed or an `Array` + # {TimezoneTransition}s. + # + # @param file_path [String] the path of a zoneinfo file. + # @return [Object] either a {TimezoneOffset} or an `Array` of + # {TimezoneTransition}s. + # @raise [SecurityError] if safe mode is enabled and `file_path` is + # tainted. + # @raise [InvalidZoneinfoFile] if `file_path`` does not refer to a valid + # zoneinfo file. + def read(file_path) + File.open(file_path, 'rb') { |file| parse(file) } + end + + private + + # Translates an unsigned 32-bit integer (as returned by unpack) to signed + # 32-bit. + # + # @param long [Integer] an unsigned 32-bit integer. + # @return [Integer] {long} translated to signed 32-bit. + def make_signed_int32(long) + long >= 0x80000000 ? long - 0x100000000 : long + end + + # Translates a pair of unsigned 32-bit integers (as returned by unpack, + # most significant first) to a signed 64-bit integer. + # + # @param high [Integer] the most significant 32-bits. + # @param low [Integer] the least significant 32-bits. + # @return [Integer] {high} and {low} combined and translated to signed + # 64-bit. + def make_signed_int64(high, low) + unsigned = (high << 32) | low + unsigned >= 0x8000000000000000 ? unsigned - 0x10000000000000000 : unsigned + end + + # Reads the given number of bytes from the given file and checks that the + # correct number of bytes could be read. + # + # @param file [IO] the file to read from. + # @param bytes [Integer] the number of bytes to read. + # @return [String] the bytes that were read. + # @raise [InvalidZoneinfoFile] if the number of bytes available didn't + # match the number requested. + def check_read(file, bytes) + result = file.read(bytes) + + unless result && result.length == bytes + raise InvalidZoneinfoFile, "Expected #{bytes} bytes reading '#{file.path}', but got #{result ? result.length : 0} bytes" + end + + result + end + + # Zoneinfo files don't include the offset from standard time (std_offset) + # for DST periods. Derive the base offset (base_utc_offset) where DST is + # observed from either the previous or next non-DST period. + # + # @param transitions [Array<Hash>] an `Array` of transition hashes. + # @param offsets [Array<Hash>] an `Array` of offset hashes. + # @return [Integer] the index of the offset to be used prior to the first + # transition. + def derive_offsets(transitions, offsets) + # The first non-DST offset (if there is one) is the offset observed + # before the first transition. Fall back to the first DST offset if + # there are no non-DST offsets. + first_non_dst_offset_index = offsets.index {|o| !o[:is_dst] } + first_offset_index = first_non_dst_offset_index || 0 + return first_offset_index if transitions.empty? + + # Determine the base_utc_offset of the next non-dst offset at each transition. + base_utc_offset_from_next = nil + + transitions.reverse_each do |transition| + offset = offsets[transition[:offset]] + if offset[:is_dst] + transition[:base_utc_offset_from_next] = base_utc_offset_from_next if base_utc_offset_from_next + else + base_utc_offset_from_next = offset[:observed_utc_offset] + end + end + + base_utc_offset_from_previous = first_non_dst_offset_index ? offsets[first_non_dst_offset_index][:observed_utc_offset] : nil + defined_offsets = {} + + transitions.each do |transition| + offset_index = transition[:offset] + offset = offsets[offset_index] + observed_utc_offset = offset[:observed_utc_offset] + + if offset[:is_dst] + base_utc_offset_from_next = transition[:base_utc_offset_from_next] + + difference_to_previous = (observed_utc_offset - (base_utc_offset_from_previous || observed_utc_offset)).abs + difference_to_next = (observed_utc_offset - (base_utc_offset_from_next || observed_utc_offset)).abs + + base_utc_offset = if difference_to_previous == 3600 + base_utc_offset_from_previous + elsif difference_to_next == 3600 + base_utc_offset_from_next + elsif difference_to_previous > 0 && difference_to_next > 0 + difference_to_previous < difference_to_next ? base_utc_offset_from_previous : base_utc_offset_from_next + elsif difference_to_previous > 0 + base_utc_offset_from_previous + elsif difference_to_next > 0 + base_utc_offset_from_next + else + # No difference, assume a 1 hour offset from standard time. + observed_utc_offset - 3600 + end + + if !offset[:base_utc_offset] + offset[:base_utc_offset] = base_utc_offset + defined_offsets[offset] = offset_index + elsif offset[:base_utc_offset] != base_utc_offset + # An earlier transition has already derived a different + # base_utc_offset. Define a new offset or reuse an existing identically + # defined offset. + new_offset = offset.dup + new_offset[:base_utc_offset] = base_utc_offset + + offset_index = defined_offsets[new_offset] + + unless offset_index + offsets << new_offset + offset_index = offsets.length - 1 + defined_offsets[new_offset] = offset_index + end + + transition[:offset] = offset_index + end + else + base_utc_offset_from_previous = observed_utc_offset + end + end + + first_offset_index + end + + # Determines if the offset from a transition matches the offset from a + # rule. This is a looser match than equality, not requiring that the + # base_utc_offset and std_offset both match (which have to be derived for + # transitions, but are known for rules. + # + # @param offset [TimezoneOffset] an offset from a transition. + # @param rule_offset [TimezoneOffset] an offset from a rule. + # @return [Boolean] whether the offsets match. + def offset_matches_rule?(offset, rule_offset) + offset.observed_utc_offset == rule_offset.observed_utc_offset && + offset.dst? == rule_offset.dst? && + offset.abbreviation == rule_offset.abbreviation + end + + # Apply the rules from the TZ string when there were no defined + # transitions. Checks for a matching offset. Returns the rules-based + # constant offset or generates transitions from 1970 until 100 years into + # the future (at the time of loading zoneinfo_reader.rb). + # + # @param file [IO] the file being processed. + # @param first_offset [TimezoneOffset] the first offset included in the + # file that would normally apply without the rules. + # @param rules [Object] a {TimezoneOffset} specifying a constant offset or + # {AnnualRules} instance specfying transitions. + # @return [Object] either a {TimezoneOffset} or an `Array` of + # {TimezoneTransition}s. + # @raise [InvalidZoneinfoFile] if the first offset does not match the + # rules. + def apply_rules_without_transitions(file, first_offset, rules) + if rules.kind_of?(TimezoneOffset) + unless offset_matches_rule?(first_offset, rules) + raise InvalidZoneinfoFile, "Constant offset POSIX-style TZ string does not match constant offset in file '#{file.path}'." + end + rules + else + transitions = 1970.upto(GENERATE_UP_TO).flat_map {|y| rules.transitions(y) } + first_transition = transitions[0] + + unless offset_matches_rule?(first_offset, first_transition.previous_offset) + # Not transitioning from the designated first offset. + + if offset_matches_rule?(first_offset, first_transition.offset) + # Skip an unnecessary transition to the first offset. + transitions.shift + else + # The initial offset doesn't match the ongoing rules. Replace the + # previous offset of the first transition. + transitions[0] = TimezoneTransition.new(first_transition.offset, first_offset, first_transition.timestamp_value) + end + end + + transitions + end + end + + # Finds an offset that is equivalent to the one specified in the given + # `Array`. Matching is performed with {TimezoneOffset#==}. + # + # @param offsets [Array<TimezoneOffset>] an `Array` to search. + # @param offset [TimezoneOffset] the offset to search for. + # @return [TimezoneOffset] the matching offset from `offsets` or `nil` + # if not found. + def find_existing_offset(offsets, offset) + offsets.find {|o| o == offset } + end + + # Returns a new AnnualRules instance with standard and daylight savings + # offsets replaced with equivalents from an array. This reduces the memory + # requirement for loaded time zones by reusing offsets for rule-generated + # transitions. + # + # @param offsets [Array<TimezoneOffset>] an `Array` to search for + # equivalent offsets. + # @param annual_rules [AnnualRules] the {AnnualRules} instance to check. + # @return [AnnualRules] either a new {AnnualRules} instance with either + # the {AnnualRules#std_offset std_offset} or {AnnualRules#dst_offset + # dst_offset} replaced, or the original instance if no equivalent for + # either {AnnualRules#std_offset std_offset} or {AnnualRules#dst_offset + # dst_offset} could be found. + def replace_with_existing_offsets(offsets, annual_rules) + existing_std_offset = find_existing_offset(offsets, annual_rules.std_offset) + existing_dst_offset = find_existing_offset(offsets, annual_rules.dst_offset) + if existing_std_offset || existing_dst_offset + AnnualRules.new(existing_std_offset || annual_rules.std_offset, existing_dst_offset || annual_rules.dst_offset, + annual_rules.dst_start_rule, annual_rules.dst_end_rule) + else + annual_rules + end + end + + # Validates the offset indicated to be observed by the rules before the + # first generated transition against the offset of the last defined + # transition. + # + # Fix the last defined transition if it differ on just base/std offsets + # (which are derived). Raise an error if the observed UTC offset or + # abbreviations differ. + # + # @param file [IO] the file being processed. + # @param last_defined [TimezoneTransition] the last defined transition in + # the file. + # @param first_rule_offset [TimezoneOffset] the offset the rules indicate + # is observed prior to the first rules generated transition. + # @return [TimezoneTransition] the last defined transition (either the + # original instance or a replacement). + # @raise [InvalidZoneinfoFile] if the offset of {last_defined} and + # {first_rule_offset} do not match. + def validate_and_fix_last_defined_transition_offset(file, last_defined, first_rule_offset) + offset_of_last_defined = last_defined.offset + + if offset_of_last_defined == first_rule_offset + last_defined + else + if offset_matches_rule?(offset_of_last_defined, first_rule_offset) + # The same overall offset, but differing in the base or std + # offset (which are derived). Correct by using the rule. + TimezoneTransition.new(first_rule_offset, last_defined.previous_offset, last_defined.timestamp_value) + else + raise InvalidZoneinfoFile, "The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{file.path}'." + end + end + end + + # Apply the rules from the TZ string when there were defined + # transitions. Checks for a matching offset with the last transition. + # Redefines the last transition if required and if the rules don't + # specific a constant offset, generates transitions until 100 years into + # the future (at the time of loading zoneinfo_reader.rb). + # + # @param file [IO] the file being processed. + # @param transitions [Array<TimezoneTransition>] the defined transitions. + # @param offsets [Array<TimezoneOffset>] the offsets used by the defined + # transitions. + # @param rules [Object] a {TimezoneOffset} specifying a constant offset or + # {AnnualRules} instance specfying transitions. + # @raise [InvalidZoneinfoFile] if the first offset does not match the + # rules. + # @raise [InvalidZoneinfoFile] if the previous offset of the first + # generated transition does not match the offset of the last defined + # transition. + def apply_rules_with_transitions(file, transitions, offsets, rules) + last_defined = transitions[-1] + + if rules.kind_of?(TimezoneOffset) + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, last_defined, rules) + else + last_year = last_defined.local_end_at.to_time.year + + if last_year <= GENERATE_UP_TO + rules = replace_with_existing_offsets(offsets, rules) + + generated = rules.transitions(last_year).find_all do |t| + t.timestamp_value > last_defined.timestamp_value && !offset_matches_rule?(last_defined.offset, t.offset) + end + + generated += (last_year + 1).upto(GENERATE_UP_TO).flat_map {|y| rules.transitions(y) } + + unless generated.empty? + transitions[-1] = validate_and_fix_last_defined_transition_offset(file, last_defined, generated[0].previous_offset) + transitions.concat(generated) + end + end + end + end + + # Parses a zoneinfo file and returns either a {TimezoneOffset} that is + # constantly observed or an `Array` of {TimezoneTransition}s. + # + # @param file [IO] the file to be read. + # @return [Object] either a {TimezoneOffset} or an `Array` of + # {TimezoneTransition}s. + # @raise [InvalidZoneinfoFile] if the file is not a valid zoneinfo file. + def parse(file) + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + if magic != 'TZif' + raise InvalidZoneinfoFile, "The file '#{file.path}' does not start with the expected header." + end + + if version == '2' || version == '3' + # Skip the first 32-bit section and read the header of the second 64-bit section + file.seek(timecnt * 5 + typecnt * 6 + charcnt + leapcnt * 8 + ttisstdcnt + ttisutccnt, IO::SEEK_CUR) + + prev_version = version + + magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt = + check_read(file, 44).unpack('a4 a x15 NNNNNN') + + unless magic == 'TZif' && (version == prev_version) + raise InvalidZoneinfoFile, "The file '#{file.path}' contains an invalid 64-bit section header." + end + + using_64bit = true + elsif version != '3' && version != '2' && version != "\0" + raise InvalidZoneinfoFile, "The file '#{file.path}' contains a version of the zoneinfo format that is not currently supported." + else + using_64bit = false + end + + unless leapcnt == 0 + raise InvalidZoneinfoFile, "The file '#{file.path}' contains leap second data. TZInfo requires zoneinfo files that omit leap seconds." + end + + transitions = if using_64bit + timecnt.times.map do |i| + high, low = check_read(file, 8).unpack('NN'.freeze) + transition_time = make_signed_int64(high, low) + {at: transition_time} + end + else + timecnt.times.map do |i| + transition_time = make_signed_int32(check_read(file, 4).unpack('N'.freeze)[0]) + {at: transition_time} + end + end + + check_read(file, timecnt).unpack('C*'.freeze).each_with_index do |localtime_type, i| + raise InvalidZoneinfoFile, "Invalid offset referenced by transition in file '#{file.path}'." if localtime_type >= typecnt + transitions[i][:offset] = localtime_type + end + + offsets = typecnt.times.map do |i| + gmtoff, isdst, abbrind = check_read(file, 6).unpack('NCC'.freeze) + gmtoff = make_signed_int32(gmtoff) + isdst = isdst == 1 + {observed_utc_offset: gmtoff, is_dst: isdst, abbr_index: abbrind} + end + + abbrev = check_read(file, charcnt) + + if using_64bit + # Skip to the POSIX-style TZ string. + file.seek(ttisstdcnt + ttisutccnt, IO::SEEK_CUR) # + leapcnt * 8, but leapcnt is checked above and guaranteed to be 0. + tz_string_start = check_read(file, 1) + raise InvalidZoneinfoFile, "Expected newline starting POSIX-style TZ string in file '#{file.path}'." unless tz_string_start == "\n" + tz_string = file.readline("\n").force_encoding(Encoding::UTF_8) + raise InvalidZoneinfoFile, "Expected newline ending POSIX-style TZ string in file '#{file.path}'." unless tz_string.chomp!("\n") + + begin + rules = @posix_tz_parser.parse(tz_string) + rescue InvalidPosixTimeZone => e + raise InvalidZoneinfoFile, "Failed to parse POSIX-style TZ string in file '#{file.path}': #{e}" + end + else + rules = nil + end + + # Derive the offsets from standard time (std_offset). + first_offset_index = derive_offsets(transitions, offsets) + + offsets = offsets.map do |o| + observed_utc_offset = o[:observed_utc_offset] + base_utc_offset = o[:base_utc_offset] + + if base_utc_offset + # DST offset with base_utc_offset derived by derive_offsets. + std_offset = observed_utc_offset - base_utc_offset + elsif o[:is_dst] + # DST offset unreferenced by a transition (offset in use before the + # first transition). No derived base UTC offset, so assume 1 hour + # DST. + base_utc_offset = observed_utc_offset - 3600 + std_offset = 3600 + else + # Non-DST offset. + base_utc_offset = observed_utc_offset + std_offset = 0 + end + + abbrev_start = o[:abbr_index] + raise InvalidZoneinfoFile, "Abbreviation index is out of range in file '#{file.path}'." unless abbrev_start < abbrev.length + + abbrev_end = abbrev.index("\0", abbrev_start) + raise InvalidZoneinfoFile, "Missing abbreviation null terminator in file '#{file.path}'." unless abbrev_end + + abbr = @string_deduper.dedupe(abbrev[abbrev_start...abbrev_end].force_encoding(Encoding::UTF_8).untaint) + + TimezoneOffset.new(base_utc_offset, std_offset, abbr) + end + + first_offset = offsets[first_offset_index] + + + if transitions.empty? + if rules + apply_rules_without_transitions(file, first_offset, rules) + else + first_offset + end + else + previous_offset = first_offset + previous_at = nil + + transitions = transitions.map do |t| + offset = offsets[t[:offset]] + at = t[:at] + raise InvalidZoneinfoFile, "Transition at #{at} is not later than the previous transition at #{previous_at} in file '#{file.path}'." if previous_at && previous_at >= at + tt = TimezoneTransition.new(offset, previous_offset, at) + previous_offset = offset + previous_at = at + tt + end + + apply_rules_with_transitions(file, transitions, offsets, rules) if rules + transitions + end + end + end + private_constant :ZoneinfoReader + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_timezone.rb new file mode 100644 index 0000000..a7c60d9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/data_timezone.rb @@ -0,0 +1,44 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Represents time zones that are defined by rules that set out when + # transitions occur. + class DataTimezone < InfoTimezone + # (see Timezone#period_for) + def period_for(time) + raise ArgumentError, 'time must be specified' unless time + timestamp = Timestamp.for(time) + raise ArgumentError, 'time must have a specified utc_offset' unless timestamp.utc_offset + info.period_for(timestamp) + end + + # (see Timezone#periods_for_local) + def periods_for_local(local_time) + raise ArgumentError, 'local_time must be specified' unless local_time + info.periods_for_local(Timestamp.for(local_time, :ignore)) + end + + # (see Timezone#transitions_up_to) + def transitions_up_to(to, from = nil) + raise ArgumentError, 'to must be specified' unless to + to_timestamp = Timestamp.for(to) + from_timestamp = from && Timestamp.for(from) + + begin + info.transitions_up_to(to_timestamp, from_timestamp) + rescue ArgumentError => e + raise ArgumentError, e.message.gsub('_timestamp', '') + end + end + + # Returns the canonical {Timezone} instance for this {DataTimezone}. + # + # For a {DataTimezone}, this is always `self`. + # + # @return [Timezone] `self`. + def canonical_zone + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/datetime_with_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/datetime_with_offset.rb new file mode 100644 index 0000000..63c4bed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/datetime_with_offset.rb @@ -0,0 +1,153 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'date' + +module TZInfo + # A subclass of `DateTime` used to represent local times. {DateTimeWithOffset} + # holds a reference to the related {TimezoneOffset} and overrides various + # methods to return results appropriate for the {TimezoneOffset}. Certain + # operations will clear the associated {TimezoneOffset} (if the + # {TimezoneOffset} would not necessarily be valid for the result). Once the + # {TimezoneOffset} has been cleared, {DateTimeWithOffset} behaves identically + # to `DateTime`. + # + # Arithmetic performed on {DateTimeWithOffset} instances is _not_ time + # zone-aware. Regardless of whether transitions in the time zone are crossed, + # results of arithmetic operations will always maintain the same offset from + # UTC (`offset`). The associated {TimezoneOffset} will aways be cleared. + class DateTimeWithOffset < DateTime + include WithOffset + + # @return [TimezoneOffset] the {TimezoneOffset} associated with this + # instance. + attr_reader :timezone_offset + + # Sets the associated {TimezoneOffset}. + # + # @param timezone_offset [TimezoneOffset] a {TimezoneOffset} valid at the + # time and for the offset of this {DateTimeWithOffset}. + # @return [DateTimeWithOffset] `self`. + # @raise [ArgumentError] if `timezone_offset` is `nil`. + # @raise [ArgumentError] if `timezone_offset.observed_utc_offset` does not + # equal `self.offset * 86400`. + def set_timezone_offset(timezone_offset) + raise ArgumentError, 'timezone_offset must be specified' unless timezone_offset + raise ArgumentError, 'timezone_offset.observed_utc_offset does not match self.utc_offset' if offset * 86400 != timezone_offset.observed_utc_offset + @timezone_offset = timezone_offset + self + end + + # An overridden version of `DateTime#to_time` that, if there is an + # associated {TimezoneOffset}, returns a {DateTimeWithOffset} with that + # offset. + # + # @return [Time] if there is an associated {TimezoneOffset}, a + # {TimeWithOffset} representation of this {DateTimeWithOffset}, otherwise + # a `Time` representation. + def to_time + if_timezone_offset(super) do |o,t| + # Ruby 2.4.0 changed the behaviour of to_time so that it preserves the + # offset instead of converting to the system local timezone. + # + # When self has an associated TimezonePeriod, this implementation will + # preserve the offset on all versions of Ruby. + TimeWithOffset.at(t.to_i, t.subsec * 1_000_000).set_timezone_offset(o) + end + end + + # An overridden version of `DateTime#downto` that clears the associated + # {TimezoneOffset} of the returned or yielded instances. + def downto(min) + if block_given? + super {|dt| yield dt.clear_timezone_offset } + else + enum = super + enum.each {|dt| dt.clear_timezone_offset } + enum + end + end + + # An overridden version of `DateTime#england` that preserves the associated + # {TimezoneOffset}. + # + # @return [DateTime] + def england + # super doesn't call #new_start on MRI, so each method has to be + # individually overridden. + if_timezone_offset(super) {|o,dt| dt.set_timezone_offset(o) } + end + + # An overridden version of `DateTime#gregorian` that preserves the + # associated {TimezoneOffset}. + # + # @return [DateTime] + def gregorian + # super doesn't call #new_start on MRI, so each method has to be + # individually overridden. + if_timezone_offset(super) {|o,dt| dt.set_timezone_offset(o) } + end + + # An overridden version of `DateTime#italy` that preserves the associated + # {TimezoneOffset}. + # + # @return [DateTime] + def italy + # super doesn't call #new_start on MRI, so each method has to be + # individually overridden. + if_timezone_offset(super) {|o,dt| dt.set_timezone_offset(o) } + end + + # An overridden version of `DateTime#julian` that preserves the associated + # {TimezoneOffset}. + # + # @return [DateTime] + def julian + # super doesn't call #new_start on MRI, so each method has to be + # individually overridden. + if_timezone_offset(super) {|o,dt| dt.set_timezone_offset(o) } + end + + # An overridden version of `DateTime#new_start` that preserves the + # associated {TimezoneOffset}. + # + # @return [DateTime] + def new_start(start = Date::ITALY) + if_timezone_offset(super) {|o,dt| dt.set_timezone_offset(o) } + end + + # An overridden version of `DateTime#step` that clears the associated + # {TimezoneOffset} of the returned or yielded instances. + def step(limit, step = 1) + if block_given? + super {|dt| yield dt.clear_timezone_offset } + else + enum = super + enum.each {|dt| dt.clear_timezone_offset } + enum + end + end + + # An overridden version of `DateTime#upto` that clears the associated + # {TimezoneOffset} of the returned or yielded instances. + def upto(max) + if block_given? + super {|dt| yield dt.clear_timezone_offset } + else + enum = super + enum.each {|dt| dt.clear_timezone_offset } + enum + end + end + + protected + + # Clears the associated {TimezoneOffset}. + # + # @return [DateTimeWithOffset] `self`. + def clear_timezone_offset + @timezone_offset = nil + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1.rb new file mode 100644 index 0000000..ff3a2eb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1.rb @@ -0,0 +1,10 @@ +# encoding: UTF-8 + +module TZInfo + # Modules and classes used by the format 1 version of TZInfo::Data. + # + # @private + module Format1 #:nodoc: + end + private_constant :Format1 +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_definer.rb new file mode 100644 index 0000000..67bf044 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_definer.rb @@ -0,0 +1,17 @@ +# encoding: UTF-8 + +module TZInfo + module Format1 + # Instances of {Format1::CountryDefiner} are yielded to the format 1 version + # of `TZInfo::Data::Indexes::Countries` by {CountryIndexDefinition} to allow + # the zones of a country to be specified. + # + # @private + class CountryDefiner < Format2::CountryDefiner #:nodoc: + # Initializes a new {CountryDefiner}. + def initialize(identifier_deduper, description_deduper) + super(nil, identifier_deduper, description_deduper) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_index_definition.rb new file mode 100644 index 0000000..040ed24 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/country_index_definition.rb @@ -0,0 +1,64 @@ +# encoding: UTF-8 + +module TZInfo + module Format1 + # The format 1 TZInfo::Data country index file includes + # {Format1::CountryIndexDefinition}, which provides a + # {CountryIndexDefinition::ClassMethods#country country} method used to + # define each country in the index. + # + # @private + module CountryIndexDefinition #:nodoc: + # Adds class methods to the includee and initializes class instance + # variables. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval { @countries = {} } + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # @return [Hash<String, DataSources::CountryInfo>] a frozen `Hash` + # of all the countries that have been defined in the index keyed by + # their codes. + def countries + @description_deduper = nil + @countries.freeze + end + + private + + # Defines a country with an ISO 3166-1 alpha-2 country code and name. + # + # @param code [String] the ISO 3166-1 alpha-2 country code. + # @param name [String] the name of the country. + # @yield [definer] (optional) to obtain the time zones for the country. + # @yieldparam definer [CountryDefiner] a {CountryDefiner} instance. + def country(code, name) + @description_deduper ||= StringDeduper.new + + zones = if block_given? + definer = CountryDefiner.new(StringDeduper.global, @description_deduper) + yield definer + definer.timezones + else + [] + end + + @countries[code.freeze] = DataSources::CountryInfo.new(code, name, zones) + end + end + end + end + + # Alias used by TZInfo::Data format1 releases. + # + # @private + CountryIndexDefinition = Format1::CountryIndexDefinition #:nodoc: + private_constant :CountryIndexDefinition +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definer.rb new file mode 100644 index 0000000..578e4ea --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definer.rb @@ -0,0 +1,64 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module Format1 + # Instances of {Format1::TimezoneDefiner} are yielded to TZInfo::Data + # format 1 modules by {TimezoneDefinition} to allow the offsets and + # transitions of the time zone to be specified. + # + # @private + class TimezoneDefiner < Format2::TimezoneDefiner #:nodoc: + undef_method :subsequent_rules + + # Defines an offset. + # + # @param id [Symbol] an arbitrary value used identify the offset in + # subsequent calls to transition. It must be unique. + # @param utc_offset [Integer] the base offset from UTC of the zone in + # seconds. This does not include daylight savings time. + # @param std_offset [Integer] the daylight savings offset from the base + # offset in seconds. Typically either 0 or 3600. + # @param abbreviation [Symbol] an abbreviation for the offset, for + # example, `:EST` or `:EDT`. + # @raise [ArgumentError] if another offset has already been defined with + # the given id. + def offset(id, utc_offset, std_offset, abbreviation) + super(id, utc_offset, std_offset, abbreviation.to_s) + end + + # Defines a transition to a given offset. + # + # Transitions must be defined in increasing time order. + # + # @param year [Integer] the UTC year in which the transition occurs. Used + # in earlier versions of TZInfo, but now ignored. + # @param month [Integer] the UTC month in which the transition occurs. + # Used in earlier versions of TZInfo, but now ignored. + # @param offset_id [Symbol] references the id of a previously defined + # offset (see #offset). + # @param timestamp_value [Integer] the time the transition occurs as an + # Integer number of seconds since 1970-01-01 00:00:00 UTC ignoring leap + # seconds (i.e. each day is treated as if it were 86,400 seconds long). + # @param datetime_numerator [Integer] the time of the transition as the + # numerator of the `Rational` returned by `DateTime#ajd`. Used in + # earlier versions of TZInfo, but now ignored. + # @param datetime_denominator [Integer] the time of the transition as the + # denominator of the `Rational` returned by `DateTime#ajd`. Used in + # earlier versions of TZInfo, but now ignored. + # @raise [ArgumentError] if `offset_id` does not reference a defined + # offset. + # @raise [ArgumentError] if `timestamp_value` is not greater than the + # `timestamp_value` of the previously defined transition. + # @raise [ArgumentError] if `datetime_numerator` is specified, but + # `datetime_denominator` is not. In older versions of TZInfo, it was + # possible to define a transition with the `DateTime` numerator as the + # 4th parameter and the denominator as the 5th parameter. This style of + # definition is not used in released versions of TZInfo::Data. + def transition(year, month, offset_id, timestamp_value, datetime_numerator = nil, datetime_denominator = nil) + raise ArgumentError, 'DateTime-only transitions are not supported' if datetime_numerator && !datetime_denominator + super(offset_id, timestamp_value) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definition.rb new file mode 100644 index 0000000..4acf9a0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_definition.rb @@ -0,0 +1,39 @@ +# encoding: UTF-8 + +module TZInfo + module Format1 + # {Format1::TimezoneDefinition} is included into format 1 time zone + # definition modules and provides the methods for defining time zones. + # + # @private + module TimezoneDefinition #:nodoc: + # Adds class methods to the includee. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(Format2::TimezoneDefinition::ClassMethods) + base.extend(ClassMethods) + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + private + + # @return the class to be instantiated and yielded by + # {Format2::TimezoneDefinition::ClassMethods#timezone}. + def timezone_definer_class + TimezoneDefiner + end + end + end + end + + # Alias used by TZInfo::Data format1 releases. + # + # @private + TimezoneDefinition = Format1::TimezoneDefinition #:nodoc: + private_constant :TimezoneDefinition +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_index_definition.rb new file mode 100644 index 0000000..5d4518a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format1/timezone_index_definition.rb @@ -0,0 +1,77 @@ +# encoding: UTF-8 + +module TZInfo + module Format1 + # The format 1 TZInfo::Data time zone index file includes + # {Format1::TimezoneIndexDefinition}, which provides methods used to define + # time zones in the index. + # + # @private + module TimezoneIndexDefinition #:nodoc: + # Adds class methods to the includee and initializes class instance + # variables. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval do + @timezones = [] + @data_timezones = [] + @linked_timezones = [] + end + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # @return [Array<String>] a frozen `Array` containing the identifiers of + # all data time zones. Identifiers are sorted according to + # `String#<=>`. + def data_timezones + unless @data_timezones.frozen? + @data_timezones = @data_timezones.sort.freeze + end + @data_timezones + end + + # @return [Array<String>] a frozen `Array` containing the identifiers of + # all linked time zones. Identifiers are sorted according to + # `String#<=>`. + def linked_timezones + unless @linked_timezones.frozen? + @linked_timezones = @linked_timezones.sort.freeze + end + @linked_timezones + end + + private + + # Adds a data time zone to the index. + # + # @param identifier [String] the time zone identifier. + def timezone(identifier) + identifier = StringDeduper.global.dedupe(identifier) + @timezones << identifier + @data_timezones << identifier + end + + # Adds a linked time zone to the index. + # + # @param identifier [String] the time zone identifier. + def linked_timezone(identifier) + identifier = StringDeduper.global.dedupe(identifier) + @timezones << identifier + @linked_timezones << identifier + end + end + end + end + + # Alias used by TZInfo::Data format 1 releases. + # + # @private + TimezoneIndexDefinition = Format1::TimezoneIndexDefinition #:nodoc: + private_constant :TimezoneIndexDefinition +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2.rb new file mode 100644 index 0000000..f7c6316 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2.rb @@ -0,0 +1,10 @@ +# encoding: UTF-8 + +module TZInfo + # Modules and classes used by the format 2 version of TZInfo::Data. + # + # @private + module Format2 #:nodoc: + end + private_constant :Format2 +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_definer.rb new file mode 100644 index 0000000..158e3ec --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_definer.rb @@ -0,0 +1,68 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module Format2 + # Instances of {Format2::CountryDefiner} are yielded to the format 2 version + # of `TZInfo::Data::Indexes::Countries` by {CountryIndexDefiner} to allow + # the zones of a country to be specified. + # + # @private + class CountryDefiner #:nodoc: + # @return [Array<CountryTimezone>] the time zones observed in the country. + attr_reader :timezones + + # Initializes a new {CountryDefiner}. + # + # @param shared_timezones [Hash<Symbol, CountryTimezone>] a `Hash` + # containing time zones shared by more than one country, keyed by a + # unique reference. + # @param identifier_deduper [StringDeduper] a {StringDeduper} instance to + # use when deduping time zone identifiers. + # @param description_deduper [StringDeduper] a {StringDeduper} instance to + # use when deduping time zone descriptions. + def initialize(shared_timezones, identifier_deduper, description_deduper) + @shared_timezones = shared_timezones + @identifier_deduper = identifier_deduper + @description_deduper = description_deduper + @timezones = [] + end + + # @overload timezone(reference) + # Defines a time zone of a country as a reference to a pre-defined + # shared time zone. + # @param reference [Symbol] a reference for a pre-defined shared time + # zone. + # @overload timezone(identifier, latitude_numerator, latitude_denominator, longitude_numerator, longitude_denominator, description) + # Defines a (non-shared) time zone of a country. The latitude and + # longitude are given as the numerator and denominator of a `Rational`. + # @param identifier [String] the time zone identifier. + # @param latitude_numerator [Integer] the numerator of the latitude. + # @param latitude_denominator [Integer] the denominator of the latitude. + # @param longitude_numerator [Integer] the numerator of the longitude. + # @param longitude_denominator [Integer] the denominator of the + # longitude. + # @param description [String] an optional description for the time zone. + def timezone(identifier_or_reference, latitude_numerator = nil, + latitude_denominator = nil, longitude_numerator = nil, + longitude_denominator = nil, description = nil) + if latitude_numerator + unless latitude_denominator && longitude_numerator && longitude_denominator + raise ArgumentError, 'Either just a reference should be supplied, or the identifier, latitude and longitude must all be specified' + end + + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + + @timezones << CountryTimezone.new(@identifier_deduper.dedupe(identifier_or_reference), + Rational(latitude_numerator, latitude_denominator), + Rational(longitude_numerator, longitude_denominator), description && @description_deduper.dedupe(description)) + else + shared_timezone = @shared_timezones[identifier_or_reference] + raise ArgumentError, "Unknown shared timezone: #{identifier_or_reference}" unless shared_timezone + @timezones << shared_timezone + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definer.rb new file mode 100644 index 0000000..be28095 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definer.rb @@ -0,0 +1,68 @@ +# encoding: UTF-8 + +module TZInfo + module Format2 + # Instances of {Format2::CountryIndexDefiner} are yielded to the format 2 + # version of `TZInfo::Data::Indexes::Countries` by {CountryIndexDefinition} + # to allow countries and their time zones to be specified. + # + # @private + class CountryIndexDefiner #:nodoc: + # @return [Hash<String, CountryInfo>] a `Hash` of all the countries that + # have been defined in the index keyed by their codes. + attr_reader :countries + + # Initializes a new {CountryIndexDefiner}. + # + # @param identifier_deduper [StringDeduper] a {StringDeduper} instance to + # use when deduping time zone identifiers. + # @param description_deduper [StringDeduper] a {StringDeduper} instance to + # use when deduping time zone descriptions. + def initialize(identifier_deduper, description_deduper) + @identifier_deduper = identifier_deduper + @description_deduper = description_deduper + @shared_timezones = {} + @countries = {} + end + + # Defines a time zone shared by many countries with an reference for + # subsequent use in country definitions. The latitude and longitude are + # given as the numerator and denominator of a `Rational`. + # + # @param reference [Symbol] a unique reference for the time zone. + # @param identifier [String] the time zone identifier. + # @param latitude_numerator [Integer] the numerator of the latitude. + # @param latitude_denominator [Integer] the denominator of the latitude. + # @param longitude_numerator [Integer] the numerator of the longitude. + # @param longitude_denominator [Integer] the denominator of the longitude. + # @param description [String] an optional description for the time zone. + def timezone(reference, identifier, latitude_numerator, latitude_denominator, + longitude_numerator, longitude_denominator, description = nil) + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + @shared_timezones[reference] = CountryTimezone.new(@identifier_deduper.dedupe(identifier), + Rational(latitude_numerator, latitude_denominator), + Rational(longitude_numerator, longitude_denominator), description && @description_deduper.dedupe(description)) + end + + # Defines a country. + # + # @param code [String] The ISO 3166-1 alpha-2 code of the country. + # @param name [String] Then name of the country. + # @yield [definer] yields (optional) to obtain the time zones for the + # country. + # @yieldparam definer [CountryDefiner] a {CountryDefiner} + # instance that should be used to specify the time zones of the country. + def country(code, name) + timezones = if block_given? + definer = CountryDefiner.new(@shared_timezones, @identifier_deduper, @description_deduper) + yield definer + definer.timezones + else + [] + end + @countries[code.freeze] = DataSources::CountryInfo.new(code, name, timezones) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definition.rb new file mode 100644 index 0000000..82b2127 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/country_index_definition.rb @@ -0,0 +1,46 @@ +# encoding: UTF-8 + +module TZInfo + module Format2 + # The format 2 country index file includes + # {Format2::CountryIndexDefinition}, which provides a + # {CountryIndexDefinition::ClassMethods#country_index country_index} method + # used to define the country index. + # + # @private + module CountryIndexDefinition #:nodoc: + # Adds class methods to the includee and initializes class instance + # variables. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval { @countries = {}.freeze } + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # @return [Hash<String, DataSources::CountryInfo>] a frozen `Hash` + # of all the countries that have been defined in the index keyed by + # their codes. + attr_reader :countries + + private + + # Defines the index. + # + # @yield [definer] yields to allow the index to be defined. + # @yieldparam definer [CountryIndexDefiner] a {CountryIndexDefiner} + # instance that should be used to define the index. + def country_index + definer = CountryIndexDefiner.new(StringDeduper.global, StringDeduper.new) + yield definer + @countries = definer.countries.freeze + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definer.rb new file mode 100644 index 0000000..3c43caa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definer.rb @@ -0,0 +1,94 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + module Format2 + # Instances of {TimezoneDefiner} are yielded to TZInfo::Data modules by + # {TimezoneDefinition} to allow the offsets and transitions of the time zone + # to be specified. + # + # @private + class TimezoneDefiner #:nodoc: + # @return [Array<TimezoneTransition>] the defined transitions of the time + # zone. + attr_reader :transitions + + # Initializes a new TimezoneDefiner. + # + # @param string_deduper [StringDeduper] a {StringDeduper} instance to use + # when deduping abbreviations. + def initialize(string_deduper) + @string_deduper = string_deduper + @offsets = {} + @transitions = [] + end + + # Returns the first offset to be defined or `nil` if no offsets have been + # defined. The first offset is observed before the time of the first + # transition. + # + # @return [TimezoneOffset] the first offset to be defined or `nil` if no + # offsets have been defined. + def first_offset + first = @offsets.first + first && first.last + end + + # Defines an offset. + # + # @param id [Symbol] an arbitrary value used identify the offset in + # subsequent calls to transition. It must be unique. + # @param base_utc_offset [Integer] the base offset from UTC of the zone in + # seconds. This does not include daylight savings time. + # @param std_offset [Integer] the daylight savings offset from the base + # offset in seconds. Typically either 0 or 3600. + # @param abbreviation [String] an abbreviation for the offset, for + # example, EST or EDT. + # @raise [ArgumentError] if another offset has already been defined with + # the given id. + def offset(id, base_utc_offset, std_offset, abbreviation) + raise ArgumentError, 'An offset has already been defined with the given id' if @offsets.has_key?(id) + + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + abbreviation = @string_deduper.dedupe(abbreviation) + + offset = TimezoneOffset.new(base_utc_offset, std_offset, abbreviation) + @offsets[id] = offset + @previous_offset ||= offset + end + + # Defines a transition to a given offset. + # + # Transitions must be defined in increasing time order. + # + # @param offset_id [Symbol] references the id of a previously defined + # offset. + # @param timestamp_value [Integer] the time the transition occurs as a + # number of seconds since 1970-01-01 00:00:00 UTC ignoring leap seconds + # (i.e. each day is treated as if it were 86,400 seconds long). + # @raise [ArgumentError] if `offset_id` does not reference a defined + # offset. + # @raise [ArgumentError] if `timestamp_value` is not greater than the + # `timestamp_value` of the previously defined transition. + def transition(offset_id, timestamp_value) + offset = @offsets[offset_id] + raise ArgumentError, 'offset_id has not been defined' unless offset + raise ArgumentError, 'timestamp is not greater than the timestamp of the previously defined transition' if !@transitions.empty? && @transitions.last.timestamp_value >= timestamp_value + @transitions << TimezoneTransition.new(offset, @previous_offset, timestamp_value) + @previous_offset = offset + end + + # Defines the rules that will be used for handling instants after the last + # transition. + # + # This method is currently just a placeholder for forward compatibility + # that accepts and ignores any arguments passed. + # + # Support for subsequent rules will be added in a future version of TZInfo + # and the rules will be included in format 2 releases of TZInfo::Data. + def subsequent_rules(*args) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definition.rb new file mode 100644 index 0000000..216df90 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_definition.rb @@ -0,0 +1,73 @@ +# encoding: UTF-8 + +module TZInfo + module Format2 + # {Format2::TimezoneDefinition} is included into format 2 time zone + # definition modules and provides methods for defining time zones. + # + # @private + module TimezoneDefinition #:nodoc: + # Adds class methods to the includee. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # @return [TimezoneInfo] the last time zone to be defined. + def get + @timezone + end + + private + + # @return [Class] the class to be instantiated and yielded by + # {#timezone}. + def timezone_definer_class + TimezoneDefiner + end + + # Defines a data time zone. + # + # @param identifier [String] the identifier of the time zone. + # @yield [definer] yields to the caller to define the time zone. + # @yieldparam definer [Object] an instance of the class returned by + # {#timezone_definer_class}, typically {TimezoneDefiner}. + def timezone(identifier) + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + string_deduper = StringDeduper.global + identifier = string_deduper.dedupe(identifier) + + definer = timezone_definer_class.new(string_deduper) + yield definer + transitions = definer.transitions + + @timezone = if transitions.empty? + DataSources::ConstantOffsetDataTimezoneInfo.new(identifier, definer.first_offset) + else + DataSources::TransitionsDataTimezoneInfo.new(identifier, transitions) + end + end + + # Defines a linked time zone. + # + # @param identifier [String] the identifier of the time zone being + # defined. + # @param link_to_identifier [String] the identifier the new time zone + # links to (is an alias for). + def linked_timezone(identifier, link_to_identifier) + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + string_deduper = StringDeduper.global + @timezone = DataSources::LinkedTimezoneInfo.new(string_deduper.dedupe(identifier), string_deduper.dedupe(link_to_identifier)) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definer.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definer.rb new file mode 100644 index 0000000..f90e776 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definer.rb @@ -0,0 +1,45 @@ +# encoding: UTF-8 + +module TZInfo + module Format2 + # Instances of {TimezoneIndexDefiner} are yielded by + # {TimezoneIndexDefinition} to allow the time zone index to be defined. + # + # @private + class TimezoneIndexDefiner #:nodoc: + # @return [Array<String>] the identifiers of all data time zones. + attr_reader :data_timezones + + # @return [Array<String>] the identifiers of all linked time zones. + attr_reader :linked_timezones + + # Initializes a new TimezoneDefiner. + # + # @param string_deduper [StringDeduper] a {StringDeduper} instance to use + # when deduping identifiers. + def initialize(string_deduper) + @string_deduper = string_deduper + @data_timezones = [] + @linked_timezones = [] + end + + # Adds a data time zone to the index. + # + # @param identifier [String] the time zone identifier. + def data_timezone(identifier) + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + @data_timezones << @string_deduper.dedupe(identifier) + end + + # Adds a linked time zone to the index. + # + # @param identifier [String] the time zone identifier. + def linked_timezone(identifier) + # Dedupe non-frozen literals from format 1 on all Ruby versions and + # format 2 on Ruby < 2.3 (without frozen_string_literal support). + @linked_timezones << @string_deduper.dedupe(identifier) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definition.rb new file mode 100644 index 0000000..ceafd0c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/format2/timezone_index_definition.rb @@ -0,0 +1,55 @@ +# encoding: UTF-8 + +module TZInfo + module Format2 + # The format 2 time zone index file includes {TimezoneIndexDefinition}, + # which provides the {TimezoneIndexDefinition::ClassMethods#timezone_index + # timezone_index} method used to define the index. + # + # @private + module TimezoneIndexDefinition #:nodoc: + # Adds class methods to the includee and initializes class instance + # variables. + # + # @param base [Module] the includee. + def self.append_features(base) + super + base.extend(ClassMethods) + base.instance_eval do + empty = [].freeze + @timezones = empty + @data_timezones = empty + @linked_timezones = empty + end + end + + # Class methods for inclusion. + # + # @private + module ClassMethods #:nodoc: + # @return [Array<String>] a frozen `Array` containing the identifiers of + # all data time zones. Identifiers are sorted according to + # `String#<=>`. + attr_reader :data_timezones + + # @return [Array<String>] a frozen `Array` containing the identifiers of + # all linked time zones. Identifiers are sorted according to + # `String#<=>`. + attr_reader :linked_timezones + + # Defines the index. + # + # @yield [definer] yields to the caller to allow the index to be + # defined. + # @yieldparam definer [TimezoneIndexDefiner] a {TimezoneIndexDefiner} + # instance that should be used to define the index. + def timezone_index + definer = TimezoneIndexDefiner.new(StringDeduper.global) + yield definer + @data_timezones = definer.data_timezones.sort!.freeze + @linked_timezones = definer.linked_timezones.sort!.freeze + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/info_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/info_timezone.rb new file mode 100644 index 0000000..ff9dcfa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/info_timezone.rb @@ -0,0 +1,35 @@ +# encoding: UTF-8 + +module TZInfo + + # A {Timezone} based on a {DataSources::TimezoneInfo}. + # + # @abstract + class InfoTimezone < Timezone + # Initializes a new {InfoTimezone}. + # + # {InfoTimezone} instances should not normally be created directly. Use + # the {Timezone.get} method to obtain {Timezone} instances. + # + # @param info [DataSources::TimezoneInfo] a {DataSources::TimezoneInfo} + # instance supplied by a {DataSource} that will be used as the source of + # data for this {InfoTimezone}. + def initialize(info) + super() + @info = info + end + + # (see Timezone#identifier) + def identifier + @info.identifier + end + + protected + + # @return [DataSources::TimezoneInfo] the {DataSources::TimezoneInfo} this + # {InfoTimezone} is based on. + def info + @info + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/linked_timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/linked_timezone.rb new file mode 100644 index 0000000..af800dc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/linked_timezone.rb @@ -0,0 +1,44 @@ +# encoding: UTF-8 + +module TZInfo + # Represents time zones that are defined as a link to or alias for another + # time zone. + class LinkedTimezone < InfoTimezone + # Initializes a new {LinkedTimezone}. + # + # {LinkedTimezone} instances should not normally be created directly. Use + # the {Timezone.get} method to obtain {Timezone} instances. + # + # @param info [DataSources::LinkedTimezoneInfo] a + # {DataSources::LinkedTimezoneInfo} instance supplied by a {DataSource} + # that will be used as the source of data for this {LinkedTimezone}. + def initialize(info) + super + @linked_timezone = Timezone.get(info.link_to_identifier) + end + + # (see Timezone#period_for) + def period_for(time) + @linked_timezone.period_for(time) + end + + # (see Timezone#periods_for_local) + def periods_for_local(local_time) + @linked_timezone.periods_for_local(local_time) + end + + # (see Timezone#transitions_up_to) + def transitions_up_to(to, from = nil) + @linked_timezone.transitions_up_to(to, from) + end + + # Returns the canonical {Timezone} instance for this {LinkedTimezone}. + # + # For a {LinkedTimezone}, this is the canonical zone of the link target. + # + # @return [Timezone] the canonical {Timezone} instance for this {Timezone}. + def canonical_zone + @linked_timezone.canonical_zone + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/offset_timezone_period.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/offset_timezone_period.rb new file mode 100644 index 0000000..a5988e0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/offset_timezone_period.rb @@ -0,0 +1,42 @@ +# encoding: UTF-8 + +module TZInfo + # Represents the infinite period of time in a time zone that constantly + # observes the same offset from UTC (has an unbounded start and end). + class OffsetTimezonePeriod < TimezonePeriod + # Initializes an {OffsetTimezonePeriod}. + # + # @param offset [TimezoneOffset] the offset that is constantly observed. + # @raise [ArgumentError] if `offset` is `nil`. + def initialize(offset) + super + end + + # @return [TimezoneTransition] the transition that defines the start of this + # {TimezonePeriod}, always `nil` for {OffsetTimezonePeriod}. + def start_transition + nil + end + + # @return [TimezoneTransition] the transition that defines the end of this + # {TimezonePeriod}, always `nil` for {OffsetTimezonePeriod}. + def end_transition + nil + end + + # Determines if this {OffsetTimezonePeriod} is equal to another instance. + # + # @param p [Object] the instance to test for equality. + # @return [Boolean] `true` if `p` is a {OffsetTimezonePeriod} with the same + # {offset}, otherwise `false`. + def ==(p) + p.kind_of?(OffsetTimezonePeriod) && offset == p.offset + end + alias eql? == + + # @return [Integer] a hash based on {offset}. + def hash + offset.hash + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/string_deduper.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/string_deduper.rb new file mode 100644 index 0000000..e07fa10 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/string_deduper.rb @@ -0,0 +1,118 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'concurrent' + +module TZInfo + # Maintains a pool of `String` instances. The {#dedupe} method will return + # either a pooled copy of a given `String` or add the instance to the pool. + # + # @private + class StringDeduper #:nodoc: + class << self + # @return [StringDeduper] a globally available singleton instance of + # {StringDeduper}. This instance is safe for use in concurrently + # executing threads. + attr_reader :global + end + + # Initializes a new {StringDeduper}. + def initialize + @strings = create_hash do |h, k| + v = k.dup.freeze + h[v] = v + end + end + + # @param string [String] the string to deduplicate. + # @return [bool] `string` if it is frozen, otherwise a frozen, possibly + # pre-existing copy of `string`. + def dedupe(string) + return string if string.frozen? + @strings[string] + end + + protected + + # Creates a `Hash` to store pooled `String` instances. + # + # @param block [Proc] Default value block to be passed to `Hash.new`. + # @return [Hash] a `Hash` to store pooled `String` instances. + def create_hash(&block) + Hash.new(&block) + end + end + private_constant :StringDeduper + + # A thread-safe version of {StringDeduper}. + # + # @private + class ConcurrentStringDeduper < StringDeduper #:nodoc: + protected + + def create_hash(&block) + Concurrent::Map.new(&block) + end + end + private_constant :ConcurrentStringDeduper + + + string_unary_minus_does_dedupe = if '0'.respond_to?(:-@) + # :nocov_no_string_-@: + s1 = -('0'.dup) + s2 = -('0'.dup) + s1.object_id == s2.object_id + # :nocov_no_string_-@: + else + # :nocov_string_-@: + false + # :nocov_string_-@: + end + + if string_unary_minus_does_dedupe + # :nocov_no_deduping_string_unary_minus: + + # An implementation of {StringDeduper} using the `String#-@` method where + # that method performs deduplication (Ruby 2.5 and later). + # + # Note that this is slightly different to the plain {StringDeduper} + # implementation. In this implementation, frozen literal strings are already + # in the pool and are candidates for being returned, even when passed + # another equal frozen non-literal string. {StringDeduper} will always + # return frozen strings. + # + # There are also differences in encoding handling. This implementation will + # treat strings with different encodings as different strings. + # {StringDeduper} will treat strings with the compatible encodings as the + # same string. + # + # @private + class UnaryMinusGlobalStringDeduper #:nodoc: + # @param string [String] the string to deduplicate. + # @return [bool] `string` if it is frozen, otherwise a frozen, possibly + # pre-existing copy of `string`. + def dedupe(string) + # String#-@ on Ruby 2.6 will dedupe a frozen non-literal String. Ruby + # 2.5 will just return frozen strings. + # + # The pooled implementation can't tell the difference between frozen + # literals and frozen non-literals, so must always return frozen String + # instances to avoid doing unncessary work when loading format 2 + # TZInfo::Data modules. + # + # For compatibility with the pooled implementation, just return frozen + # string instances (acting like Ruby 2.5). + return string if string.frozen? + -string + end + end + private_constant :UnaryMinusGlobalStringDeduper + + StringDeduper.instance_variable_set(:@global, UnaryMinusGlobalStringDeduper.new) + # :nocov_no_deduping_string_unary_minus: + else + # :nocov_deduping_string_unary_minus: + StringDeduper.instance_variable_set(:@global, ConcurrentStringDeduper.new) + # :nocov_deduping_string_unary_minus: + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/time_with_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/time_with_offset.rb new file mode 100644 index 0000000..6b9bebb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/time_with_offset.rb @@ -0,0 +1,154 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # A subclass of `Time` used to represent local times. {TimeWithOffset} holds a + # reference to the related {TimezoneOffset} and overrides various methods to + # return results appropriate for the {TimezoneOffset}. Certain operations will + # clear the associated {TimezoneOffset} (if the {TimezoneOffset} would not + # necessarily be valid for the result). Once the {TimezoneOffset} has been + # cleared, {TimeWithOffset} behaves identically to `Time`. + # + # Arithmetic performed on {TimeWithOffset} instances is _not_ time zone-aware. + # Regardless of whether transitions in the time zone are crossed, results of + # arithmetic operations will always maintain the same offset from UTC + # (`utc_offset`). The associated {TimezoneOffset} will aways be cleared. + class TimeWithOffset < Time + include WithOffset + + # @return [TimezoneOffset] the {TimezoneOffset} associated with this + # instance. + attr_reader :timezone_offset + + # Marks this {TimeWithOffset} as a local time with the UTC offset of a given + # {TimezoneOffset} and sets the associated {TimezoneOffset}. + # + # @param timezone_offset [TimezoneOffset] the {TimezoneOffset} to use to set + # the offset of this {TimeWithOffset}. + # @return [TimeWithOffset] `self`. + # @raise [ArgumentError] if `timezone_offset` is `nil`. + def set_timezone_offset(timezone_offset) + raise ArgumentError, 'timezone_offset must be specified' unless timezone_offset + localtime(timezone_offset.observed_utc_offset) + @timezone_offset = timezone_offset + self + end + + # An overridden version of `Time#dst?` that, if there is an associated + # {TimezoneOffset}, returns the result of calling {TimezoneOffset#dst? dst?} + # on that offset. + # + # @return [Boolean] `true` if daylight savings time is being observed, + # otherwise `false`. + def dst? + to = timezone_offset + to ? to.dst? : super + end + alias isdst dst? + + # An overridden version of `Time#getlocal` that clears the associated + # {TimezoneOffset} if the base implementation of `getlocal` returns a + # {TimeWithOffset}. + # + # @return [Time] a representation of the {TimeWithOffset} using either the + # local time zone or the given offset. + def getlocal(*args) + # JRuby < 9.3 returns a Time in all cases. + # JRuby >= 9.3 returns a Time when called with no arguments and a + # TimeWithOffset with a timezone_offset assigned when called with an + # offset argument. + result = super + result.clear_timezone_offset if result.kind_of?(TimeWithOffset) + result + end + + # An overridden version of `Time#gmtime` that clears the associated + # {TimezoneOffset}. + # + # @return [TimeWithOffset] `self`. + def gmtime + super + @timezone_offset = nil + self + end + + # An overridden version of `Time#localtime` that clears the associated + # {TimezoneOffset}. + # + # @return [TimeWithOffset] `self`. + def localtime(*args) + super + @timezone_offset = nil + self + end + + # An overridden version of `Time#round` that, if there is an associated + # {TimezoneOffset}, returns a {TimeWithOffset} preserving that offset. + # + # @return [Time] the rounded time. + def round(ndigits = 0) + if_timezone_offset(super) {|o,t| self.class.at(t.to_i, t.subsec * 1_000_000).set_timezone_offset(o) } + end + + # An overridden version of `Time#to_a`. The `isdst` (index 8) and `zone` + # (index 9) elements of the array are set according to the associated + # {TimezoneOffset}. + # + # @return [Array] an `Array` representation of the {TimeWithOffset}. + def to_a + if_timezone_offset(super) do |o,a| + a[8] = o.dst? + a[9] = o.abbreviation + a + end + end + + # An overridden version of `Time#utc` that clears the associated + # {TimezoneOffset}. + # + # @return [TimeWithOffset] `self`. + def utc + super + @timezone_offset = nil + self + end + + # An overridden version of `Time#zone` that, if there is an associated + # {TimezoneOffset}, returns the {TimezoneOffset#abbreviation abbreviation} + # of that offset. + # + # @return [String] the {TimezoneOffset#abbreviation abbreviation} of the + # associated {TimezoneOffset}, or the result from `Time#zone` if there is + # no such offset. + def zone + to = timezone_offset + to ? to.abbreviation : super + end + + # An overridden version of `Time#to_datetime` that, if there is an + # associated {TimezoneOffset}, returns a {DateTimeWithOffset} with that + # offset. + # + # @return [DateTime] if there is an associated {TimezoneOffset}, a + # {DateTimeWithOffset} representation of this {TimeWithOffset}, otherwise + # a `Time` representation. + def to_datetime + if_timezone_offset(super) do |o,dt| + offset = dt.offset + result = DateTimeWithOffset.jd(dt.jd + dt.day_fraction - offset) + result = result.new_offset(offset) unless offset == 0 + result.set_timezone_offset(o) + end + end + + protected + + # Clears the associated {TimezoneOffset}. + # + # @return [TimeWithOffset] `self`. + def clear_timezone_offset + @timezone_offset = nil + self + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp.rb new file mode 100644 index 0000000..5ab44f7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp.rb @@ -0,0 +1,548 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # A time represented as an `Integer` number of seconds since 1970-01-01 + # 00:00:00 UTC (ignoring leap seconds), the fraction through the second + # (sub_second as a `Rational`) and an optional UTC offset. Like Ruby's `Time` + # class, {Timestamp} can distinguish between a local time with a zero offset + # and a time specified explicitly as UTC. + class Timestamp + include Comparable + + # The Unix epoch (1970-01-01 00:00:00 UTC) as a chronological Julian day + # number. + JD_EPOCH = 2440588 + private_constant :JD_EPOCH + + class << self + # Returns a new {Timestamp} representing the (Gregorian calendar) date and + # time specified by the supplied parameters. + # + # If `utc_offset` is `nil`, `:utc` or 0, the date and time parameters will + # be interpreted as representing a UTC date and time. Otherwise the date + # and time parameters will be interpreted as a local date and time with + # the given offset. + # + # @param year [Integer] the year. + # @param month [Integer] the month (1-12). + # @param day [Integer] the day of the month (1-31). + # @param hour [Integer] the hour (0-23). + # @param minute [Integer] the minute (0-59). + # @param second [Integer] the second (0-59). + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param utc_offset [Object] either `nil` for a {Timestamp} without a + # specified offset, an offset from UTC specified as an `Integer` number + # of seconds or the `Symbol` `:utc`). + # @return [Timestamp] a new {Timestamp} representing the specified + # (Gregorian calendar) date and time. + # @raise [ArgumentError] if either of `year`, `month`, `day`, `hour`, + # `minute`, or `second` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` + # and not the `Symbol` `:utc`. + # @raise [RangeError] if `month` is not between 1 and 12. + # @raise [RangeError] if `day` is not between 1 and 31. + # @raise [RangeError] if `hour` is not between 0 and 23. + # @raise [RangeError] if `minute` is not between 0 and 59. + # @raise [RangeError] if `second` is not between 0 and 59. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + def create(year, month = 1, day = 1, hour = 0, minute = 0, second = 0, sub_second = 0, utc_offset = nil) + raise ArgumentError, 'year must be an Integer' unless year.kind_of?(Integer) + raise ArgumentError, 'month must be an Integer' unless month.kind_of?(Integer) + raise ArgumentError, 'day must be an Integer' unless day.kind_of?(Integer) + raise ArgumentError, 'hour must be an Integer' unless hour.kind_of?(Integer) + raise ArgumentError, 'minute must be an Integer' unless minute.kind_of?(Integer) + raise ArgumentError, 'second must be an Integer' unless second.kind_of?(Integer) + raise RangeError, 'month must be between 1 and 12' if month < 1 || month > 12 + raise RangeError, 'day must be between 1 and 31' if day < 1 || day > 31 + raise RangeError, 'hour must be between 0 and 23' if hour < 0 || hour > 23 + raise RangeError, 'minute must be between 0 and 59' if minute < 0 || minute > 59 + raise RangeError, 'second must be between 0 and 59' if second < 0 || second > 59 + + # Based on days_from_civil from https://howardhinnant.github.io/date_algorithms.html#days_from_civil + after_february = month > 2 + year -= 1 unless after_february + era = year / 400 + year_of_era = year - era * 400 + day_of_year = (153 * (month + (after_february ? -3 : 9)) + 2) / 5 + day - 1 + day_of_era = year_of_era * 365 + year_of_era / 4 - year_of_era / 100 + day_of_year + days_since_epoch = era * 146097 + day_of_era - 719468 + value = ((days_since_epoch * 24 + hour) * 60 + minute) * 60 + second + value -= utc_offset if utc_offset.kind_of?(Integer) + + new(value, sub_second, utc_offset) + end + + # When used without a block, returns a {Timestamp} representation of a + # given `Time`, `DateTime` or {Timestamp}. + # + # When called with a block, the {Timestamp} representation of `value` is + # passed to the block. The block must then return a {Timestamp}, which + # will be converted back to the type of the initial value. If the initial + # value was a {Timestamp}, the block result will just be returned. + # + # The UTC offset of `value` can either be preserved (the {Timestamp} + # representation will have the same UTC offset as `value`), ignored (the + # {Timestamp} representation will have no defined UTC offset), or treated + # as though it were UTC (the {Timestamp} representation will have a + # {utc_offset} of 0 and {utc?} will return `true`). + # + # @param value [Object] a `Time`, `DateTime` or {Timestamp}. + # @param offset [Symbol] either `:preserve` to preserve the offset of + # `value`, `:ignore` to ignore the offset of `value` and create a + # {Timestamp} with an unspecified offset, or `:treat_as_utc` to treat + # the offset of `value` as though it were UTC and create a UTC + # {Timestamp}. + # @yield [timestamp] if a block is provided, the {Timestamp} + # representation is passed to the block. + # @yieldparam timestamp [Timestamp] the {Timestamp} representation of + # `value`. + # @yieldreturn [Timestamp] a {Timestamp} to be converted back to the type + # of `value`. + # @return [Object] if called without a block, the {Timestamp} + # representation of `value`, otherwise the result of the block, + # converted back to the type of `value`. + def for(value, offset = :preserve) + raise ArgumentError, 'value must be specified' unless value + + case offset + when :ignore + ignore_offset = true + target_utc_offset = nil + when :treat_as_utc + ignore_offset = true + target_utc_offset = :utc + when :preserve + ignore_offset = false + target_utc_offset = nil + else + raise ArgumentError, 'offset must be :preserve, :ignore or :treat_as_utc' + end + + time_like = false + timestamp = case value + when Time + for_time(value, ignore_offset, target_utc_offset) + when DateTime + for_datetime(value, ignore_offset, target_utc_offset) + when Timestamp + for_timestamp(value, ignore_offset, target_utc_offset) + else + raise ArgumentError, "#{value.class} values are not supported" unless is_time_like?(value) + time_like = true + for_time_like(value, ignore_offset, target_utc_offset) + end + + if block_given? + result = yield timestamp + raise ArgumentError, 'block must return a Timestamp' unless result.kind_of?(Timestamp) + + case value + when Time + result.to_time + when DateTime + result.to_datetime + else # A Time-like value or a Timestamp + time_like ? result.to_time : result + end + else + timestamp + end + end + + # Creates a new UTC {Timestamp}. + # + # @param value [Integer] the number of seconds since 1970-01-01 00:00:00 + # UTC ignoring leap seconds. + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @raise [ArgumentError] if `value` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + def utc(value, sub_second = 0) + new(value, sub_second, :utc) + end + + private + + # Constructs a new instance of `self` (i.e. {Timestamp} or a subclass of + # {Timestamp}) without validating the parameters. This method is used + # internally within {Timestamp} to avoid the overhead of checking + # parameters. + # + # @param value [Integer] the number of seconds since 1970-01-01 00:00:00 + # UTC ignoring leap seconds. + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param utc_offset [Object] either `nil` for a {Timestamp} without a + # specified offset, an offset from UTC specified as an `Integer` number + # of seconds or the `Symbol` `:utc`). + # @return [Timestamp] a new instance of `self`. + def new!(value, sub_second = 0, utc_offset = nil) + result = allocate + result.send(:initialize!, value, sub_second, utc_offset) + result + end + + # Creates a {Timestamp} that represents a given `Time`, optionally + # ignoring the offset. + # + # @param time [Time] a `Time`. + # @param ignore_offset [Boolean] whether to ignore the offset of `time`. + # @param target_utc_offset [Object] if `ignore_offset` is `true`, the UTC + # offset of the result (`:utc`, `nil` or an `Integer`). + # @return [Timestamp] the {Timestamp} representation of `time`. + def for_time(time, ignore_offset, target_utc_offset) + value = time.to_i + sub_second = time.subsec + + if ignore_offset + utc_offset = target_utc_offset + value += time.utc_offset + elsif time.utc? + utc_offset = :utc + else + utc_offset = time.utc_offset + end + + new!(value, sub_second, utc_offset) + end + + # Creates a {Timestamp} that represents a given `DateTime`, optionally + # ignoring the offset. + # + # @param datetime [DateTime] a `DateTime`. + # @param ignore_offset [Boolean] whether to ignore the offset of + # `datetime`. + # @param target_utc_offset [Object] if `ignore_offset` is `true`, the UTC + # offset of the result (`:utc`, `nil` or an `Integer`). + # @return [Timestamp] the {Timestamp} representation of `datetime`. + def for_datetime(datetime, ignore_offset, target_utc_offset) + value = (datetime.jd - JD_EPOCH) * 86400 + datetime.sec + datetime.min * 60 + datetime.hour * 3600 + sub_second = datetime.sec_fraction + + if ignore_offset + utc_offset = target_utc_offset + else + utc_offset = (datetime.offset * 86400).to_i + value -= utc_offset + end + + new!(value, sub_second, utc_offset) + end + + # Returns a {Timestamp} that represents another {Timestamp}, optionally + # ignoring the offset. If the result would be identical to `value`, the + # same instance is returned. If the passed in value is an instance of a + # subclass of {Timestamp}, then a new {Timestamp} will always be returned. + # + # @param timestamp [Timestamp] a {Timestamp}. + # @param ignore_offset [Boolean] whether to ignore the offset of + # `timestamp`. + # @param target_utc_offset [Object] if `ignore_offset` is `true`, the UTC + # offset of the result (`:utc`, `nil` or an `Integer`). + # @return [Timestamp] a [Timestamp] representation of `timestamp`. + def for_timestamp(timestamp, ignore_offset, target_utc_offset) + if ignore_offset + if target_utc_offset + unless target_utc_offset == :utc && timestamp.utc? || timestamp.utc_offset == target_utc_offset + return new!(timestamp.value + (timestamp.utc_offset || 0), timestamp.sub_second, target_utc_offset) + end + elsif timestamp.utc_offset + return new!(timestamp.value + timestamp.utc_offset, timestamp.sub_second) + end + end + + unless timestamp.instance_of?(Timestamp) + # timestamp is identical in value, sub_second and utc_offset but is a + # subclass (i.e. TimestampWithOffset). Return a new Timestamp + # instance. + return new!(timestamp.value, timestamp.sub_second, timestamp.utc? ? :utc : timestamp.utc_offset) + end + + timestamp + end + + # Determines if an object is like a `Time` (for the purposes of converting + # to a {Timestamp} with {for}), responding to `to_i` and `subsec`. + # + # @param value [Object] an object to test. + # @return [Boolean] `true` if the object is `Time`-like, otherwise + # `false`. + def is_time_like?(value) + value.respond_to?(:to_i) && value.respond_to?(:subsec) + end + + # Creates a {Timestamp} that represents a given `Time`-like object, + # optionally ignoring the offset (if the `time_like` responds to + # `utc_offset`). + # + # @param time_like [Object] a `Time`-like object. + # @param ignore_offset [Boolean] whether to ignore the offset of `time`. + # @param target_utc_offset [Object] if `ignore_offset` is `true`, the UTC + # offset of the result (`:utc`, `nil` or an `Integer`). + # @return [Timestamp] the {Timestamp} representation of `time_like`. + def for_time_like(time_like, ignore_offset, target_utc_offset) + value = time_like.to_i + sub_second = time_like.subsec.to_r + + if ignore_offset + utc_offset = target_utc_offset + value += time_like.utc_offset.to_i if time_like.respond_to?(:utc_offset) + elsif time_like.respond_to?(:utc_offset) + utc_offset = time_like.utc_offset.to_i + else + utc_offset = 0 + end + + new(value, sub_second, utc_offset) + end + end + + # @return [Integer] the number of seconds since 1970-01-01 00:00:00 UTC + # ignoring leap seconds (i.e. each day is treated as if it were 86,400 + # seconds long). + attr_reader :value + + # @return [Numeric] the fraction of a second elapsed since timestamp as + # either a `Rational` or the `Integer` 0. Always greater than or equal to + # 0 and less than 1. + attr_reader :sub_second + + # @return [Integer] the offset from UTC in seconds or `nil` if the + # {Timestamp} doesn't have a specified offset. + attr_reader :utc_offset + + # Initializes a new {Timestamp}. + # + # @param value [Integer] the number of seconds since 1970-01-01 00:00:00 UTC + # ignoring leap seconds. + # @param sub_second [Numeric] the fractional part of the second as either a + # `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param utc_offset [Object] either `nil` for a {Timestamp} without a + # specified offset, an offset from UTC specified as an `Integer` number of + # seconds or the `Symbol` `:utc`). + # @raise [ArgumentError] if `value` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` and + # not the `Symbol` `:utc`. + def initialize(value, sub_second = 0, utc_offset = nil) + raise ArgumentError, 'value must be an Integer' unless value.kind_of?(Integer) + raise ArgumentError, 'sub_second must be a Rational or the Integer 0' unless (sub_second.kind_of?(Integer) && sub_second == 0) || sub_second.kind_of?(Rational) + raise RangeError, 'sub_second must be >= 0 and < 1' if sub_second < 0 || sub_second >= 1 + raise ArgumentError, 'utc_offset must be an Integer, :utc or nil' if utc_offset && utc_offset != :utc && !utc_offset.kind_of?(Integer) + initialize!(value, sub_second, utc_offset) + end + + # @return [Boolean] `true` if this {Timestamp} represents UTC, `false` if + # the {Timestamp} wasn't specified as UTC or `nil` if the {Timestamp} has + # no specified offset. + def utc? + @utc + end + + # Adds a number of seconds to the {Timestamp} value, setting the UTC offset + # of the result. + # + # @param seconds [Integer] the number of seconds to be added. + # @param utc_offset [Object] either `nil` for a {Timestamp} without a + # specified offset, an offset from UTC specified as an `Integer` number of + # seconds or the `Symbol` `:utc`). + # @return [Timestamp] the result of adding `seconds` to the + # {Timestamp} value as a new {Timestamp} instance with the chosen + # `utc_offset`. + # @raise [ArgumentError] if `seconds` is not an `Integer`. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` and + # not the `Symbol` `:utc`. + def add_and_set_utc_offset(seconds, utc_offset) + raise ArgumentError, 'seconds must be an Integer' unless seconds.kind_of?(Integer) + raise ArgumentError, 'utc_offset must be an Integer, :utc or nil' if utc_offset && utc_offset != :utc && !utc_offset.kind_of?(Integer) + return self if seconds == 0 && utc_offset == (@utc ? :utc : @utc_offset) + Timestamp.send(:new!, @value + seconds, @sub_second, utc_offset) + end + + # @return [Timestamp] a UTC {Timestamp} equivalent to this instance. Returns + # `self` if {#utc? self.utc?} is `true`. + def utc + return self if @utc + Timestamp.send(:new!, @value, @sub_second, :utc) + end + + # Converts this {Timestamp} to a `Time`. + # + # @return [Time] a `Time` representation of this {Timestamp}. If the UTC + # offset of this {Timestamp} is not specified, a UTC `Time` will be + # returned. + def to_time + time = new_time + + if @utc_offset && !@utc + time.localtime(@utc_offset) + else + time.utc + end + end + + # Converts this {Timestamp} to a `DateTime`. + # + # @return [DateTime] a DateTime representation of this {Timestamp}. If the + # UTC offset of this {Timestamp} is not specified, a UTC `DateTime` will + # be returned. + def to_datetime + new_datetime + end + + # Converts this {Timestamp} to an `Integer` number of seconds since + # 1970-01-01 00:00:00 UTC (ignoring leap seconds). + # + # @return [Integer] an Integer representation of this {Timestamp} (the + # number of seconds since 1970-01-01 00:00:00 UTC ignoring leap seconds). + def to_i + value + end + + # Formats this {Timestamp} according to the directives in the given format + # string. + # + # @param format [String] the format string. Please refer to `Time#strftime` + # for a list of supported format directives. + # @return [String] the formatted {Timestamp}. + # @raise [ArgumentError] if `format` is not specified. + def strftime(format) + raise ArgumentError, 'format must be specified' unless format + to_time.strftime(format) + end + + # @return [String] a `String` representation of this {Timestamp}. + def to_s + return value_and_sub_second_to_s unless @utc_offset + return "#{value_and_sub_second_to_s} UTC" if @utc + + sign = @utc_offset >= 0 ? '+' : '-' + min, sec = @utc_offset.abs.divmod(60) + hour, min = min.divmod(60) + + "#{value_and_sub_second_to_s(@utc_offset)} #{sign}#{'%02d' % hour}:#{'%02d' % min}#{sec > 0 ? ':%02d' % sec : nil}#{@utc_offset != 0 ? " (#{value_and_sub_second_to_s} UTC)" : nil}" + end + + # Compares this {Timestamp} with another. + # + # {Timestamp} instances without a defined UTC offset are not comparable with + # {Timestamp} instances that have a defined UTC offset. + # + # @param t [Timestamp] the {Timestamp} to compare this instance with. + # @return [Integer] -1, 0 or 1 depending if this instance is earlier, equal + # or later than `t` respectively. Returns `nil` when comparing a + # {Timestamp} that does not have a defined UTC offset with a {Timestamp} + # that does have a defined UTC offset. Returns `nil` if `t` is not a + # {Timestamp}. + def <=>(t) + return nil unless t.kind_of?(Timestamp) + return nil if utc_offset && !t.utc_offset + return nil if !utc_offset && t.utc_offset + + result = value <=> t.value + result = sub_second <=> t.sub_second if result == 0 + result + end + + alias eql? == + + # @return [Integer] a hash based on the value, sub-second and whether there + # is a defined UTC offset. + def hash + [@value, @sub_second, !!@utc_offset].hash + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: @value=#{@value}, @sub_second=#{@sub_second}, @utc_offset=#{@utc_offset.inspect}, @utc=#{@utc.inspect}>" + end + + protected + + # Creates a new instance of a `Time` or `Time`-like class matching the + # {value} and {sub_second} of this {Timestamp}, but not setting the offset. + # + # @param klass [Class] the class to instantiate. + # + # @private + def new_time(klass = Time) + klass.at(@value, @sub_second * 1_000_000) + end + + # Constructs a new instance of a `DateTime` or `DateTime`-like class with + # the same {value}, {sub_second} and {utc_offset} as this {Timestamp}. + # + # @param klass [Class] the class to instantiate. + # + # @private + def new_datetime(klass = DateTime) + datetime = klass.jd(JD_EPOCH + ((@value.to_r + @sub_second) / 86400)) + @utc_offset && @utc_offset != 0 ? datetime.new_offset(Rational(@utc_offset, 86400)) : datetime + end + + private + + # Converts the value and sub-seconds to a `String`, adding on the given + # offset. + # + # @param offset [Integer] the offset to add to the value. + # @return [String] the value and sub-seconds. + def value_and_sub_second_to_s(offset = 0) + "#{@value + offset}#{sub_second_to_s}" + end + + # Converts the {sub_second} value to a `String` suitable for appending to + # the `String` representation of a {Timestamp}. + # + # @return [String] a `String` representation of {sub_second}. + def sub_second_to_s + if @sub_second == 0 + '' + else + " #{@sub_second.numerator}/#{@sub_second.denominator}" + end + end + + # Initializes a new {Timestamp} without validating the parameters. This + # method is used internally within {Timestamp} to avoid the overhead of + # checking parameters. + # + # @param value [Integer] the number of seconds since 1970-01-01 00:00:00 UTC + # ignoring leap seconds. + # @param sub_second [Numeric] the fractional part of the second as either a + # `Rational` that is greater than or equal to 0 and less than 1, or the + # `Integer` 0. + # @param utc_offset [Object] either `nil` for a {Timestamp} without a + # specified offset, an offset from UTC specified as an `Integer` number of + # seconds or the `Symbol` `:utc`). + def initialize!(value, sub_second = 0, utc_offset = nil) + @value = value + + # Convert Rational(0,1) to 0. + @sub_second = sub_second == 0 ? 0 : sub_second + + if utc_offset + @utc = utc_offset == :utc + @utc_offset = @utc ? 0 : utc_offset + else + @utc = @utc_offset = nil + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp_with_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp_with_offset.rb new file mode 100644 index 0000000..5fd6212 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timestamp_with_offset.rb @@ -0,0 +1,85 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # A subclass of {Timestamp} used to represent local times. + # {TimestampWithOffset} holds a reference to the related {TimezoneOffset} and + # overrides various methods to return results appropriate for the + # {TimezoneOffset}. Certain operations will clear the associated + # {TimezoneOffset} (if the {TimezoneOffset} would not necessarily be valid for + # the result). Once the {TimezoneOffset} has been cleared, + # {TimestampWithOffset} behaves identically to {Timestamp}. + class TimestampWithOffset < Timestamp + include WithOffset + + # @return [TimezoneOffset] the {TimezoneOffset} associated with this + # instance. + attr_reader :timezone_offset + + # Creates a new {TimestampWithOffset} from a given {Timestamp} and + # {TimezoneOffset}. + # + # @param timestamp [Timestamp] a {Timestamp}. + # @param timezone_offset [TimezoneOffset] a {TimezoneOffset} valid at the + # time of `timestamp`. + # @return [TimestampWithOffset] a {TimestampWithOffset} that has the same + # {value value} and {sub_second sub_second} as the `timestamp` parameter, + # a {utc_offset utc_offset} equal to the + # {TimezoneOffset#observed_utc_offset observed_utc_offset} of the + # `timezone_offset` parameter and {timezone_offset timezone_offset} set to + # the `timezone_offset` parameter. + # @raise [ArgumentError] if `timestamp` or `timezone_offset` is `nil`. + def self.set_timezone_offset(timestamp, timezone_offset) + raise ArgumentError, 'timestamp must be specified' unless timestamp + raise ArgumentError, 'timezone_offset must be specified' unless timezone_offset + new!(timestamp.value, timestamp.sub_second, timezone_offset.observed_utc_offset).set_timezone_offset(timezone_offset) + end + + # Sets the associated {TimezoneOffset} of this {TimestampWithOffset}. + # + # @param timezone_offset [TimezoneOffset] a {TimezoneOffset} valid at the time + # and for the offset of this {TimestampWithOffset}. + # @return [TimestampWithOffset] `self`. + # @raise [ArgumentError] if `timezone_offset` is `nil`. + # @raise [ArgumentError] if {utc? self.utc?} is `true`. + # @raise [ArgumentError] if `timezone_offset.observed_utc_offset` does not equal + # `self.utc_offset`. + def set_timezone_offset(timezone_offset) + raise ArgumentError, 'timezone_offset must be specified' unless timezone_offset + raise ArgumentError, 'timezone_offset.observed_utc_offset does not match self.utc_offset' if utc? || utc_offset != timezone_offset.observed_utc_offset + @timezone_offset = timezone_offset + self + end + + # An overridden version of {Timestamp#to_time} that, if there is an + # associated {TimezoneOffset}, returns a {TimeWithOffset} with that offset. + # + # @return [Time] if there is an associated {TimezoneOffset}, a + # {TimeWithOffset} representation of this {TimestampWithOffset}, otherwise + # a `Time` representation. + def to_time + to = timezone_offset + if to + new_time(TimeWithOffset).set_timezone_offset(to) + else + super + end + end + + # An overridden version of {Timestamp#to_datetime}, if there is an + # associated {TimezoneOffset}, returns a {DateTimeWithOffset} with that + # offset. + # + # @return [DateTime] if there is an associated {TimezoneOffset}, a + # {DateTimeWithOffset} representation of this {TimestampWithOffset}, + # otherwise a `DateTime` representation. + def to_datetime + to = timezone_offset + if to + new_datetime(DateTimeWithOffset).set_timezone_offset(to) + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone.rb new file mode 100644 index 0000000..1fb5681 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone.rb @@ -0,0 +1,1160 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +require 'set' + +module TZInfo + # {AmbiguousTime} is raised to indicate that a specified local time has more + # than one possible equivalent UTC time. Such ambiguities arise when the + # clocks are set back in a time zone, most commonly during the repeated hour + # when transitioning from daylight savings time to standard time. + # + # {AmbiguousTime} is raised by {Timezone#local_datetime}, + # {Timezone#local_time}, {Timezone#local_timestamp}, {Timezone#local_to_utc} + # and {Timezone#period_for_local} when using an ambiguous time and not + # specifying how to resolve the ambiguity. + class AmbiguousTime < StandardError + end + + # {PeriodNotFound} is raised to indicate that no {TimezonePeriod} matching a + # given time could be found. + class PeriodNotFound < StandardError + end + + # {InvalidTimezoneIdentifier} is raised by {Timezone.get} if the identifier + # given is not valid. + class InvalidTimezoneIdentifier < StandardError + end + + # {UnknownTimezone} is raised when calling methods on an instance of + # {Timezone} that was created directly. To obtain {Timezone} instances the + # {Timezone.get} method should be used instead. + class UnknownTimezone < StandardError + end + + # The {Timezone} class represents a time zone. It provides a factory method, + # {get}, to retrieve {Timezone} instances by their identifier. + # + # The {Timezone#to_local} method can be used to convert `Time` and `DateTime` + # instances to the local time for the zone. For example: + # + # tz = TZInfo::Timezone.get('America/New_York') + # local_time = tz.to_local(Time.utc(2005,8,29,15,35,0)) + # local_datetime = tz.to_local(DateTime.new(2005,8,29,15,35,0)) + # + # Local `Time` and `DateTime` instances returned by `Timezone` have the + # correct local offset. + # + # The {Timezone#local_to_utc} method can by used to convert local `Time` and + # `DateTime` instances to UTC. {Timezone#local_to_utc} ignores the UTC offset + # of the supplied value and treats if it is a local time for the zone. For + # example: + # + # tz = TZInfo::Timezone.get('America/New_York') + # utc_time = tz.local_to_utc(Time.new(2005,8,29,11,35,0)) + # utc_datetime = tz.local_to_utc(DateTime.new(2005,8,29,11,35,0)) + # + # Each time zone is treated as sequence of periods of time ({TimezonePeriod}) + # that observe the same offset ({TimezoneOffset}). Transitions + # ({TimezoneTransition}) denote the end of one period and the start of the + # next. The {Timezone} class has methods that allow the periods, offsets and + # transitions of a time zone to be interrogated. + # + # All methods that take `Time` objects as parameters can be used with + # arbitrary `Time`-like objects that respond to both `to_i` and `subsec` and + # optionally `utc_offset`. + # + # The {Timezone} class is thread-safe. It is safe to use class and instance + # methods of {Timezone} in concurrently executing threads. Instances of + # {Timezone} can be shared across thread boundaries. + # + # The IANA Time Zone Database maintainers recommend that time zone identifiers + # are not made visible to end-users (see [Names of + # timezones](https://data.iana.org/time-zones/theory.html#naming)). The + # {Country} class can be used to obtain lists of time zones by country, + # including user-friendly descriptions and approximate locations. + # + # @abstract The {get} method returns an instance of either {DataTimezone} or + # {LinkedTimezone}. The {get_proxy} method and other methods returning + # collections of time zones return instances of {TimezoneProxy}. + class Timezone + include Comparable + + # The default value of the dst parameter of the {local_datetime}, + # {local_time}, {local_timestamp}, {local_to_utc} and {period_for_local} + # methods. + # + # @!visibility private + @@default_dst = nil + + class << self + # Sets the default value of the optional `dst` parameter of the + # {local_datetime}, {local_time}, {local_timestamp}, {local_to_utc} and + # {period_for_local} methods. Can be set to `nil`, `true` or `false`. + # + # @param value [Boolean] `nil`, `true` or `false`. + def default_dst=(value) + @@default_dst = value.nil? ? nil : !!value + end + + # Returns the default value of the optional `dst` parameter of the + # {local_time}, {local_datetime} and {local_timestamp}, {local_to_utc} + # and {period_for_local} methods (`nil`, `true` or `false`). + # + # {default_dst} defaults to `nil` unless changed with {default_dst=}. + # + # @return [Boolean] the default value of the optional `dst` parameter of + # the {local_time}, {local_datetime} and {local_timestamp}, + # {local_to_utc} and {period_for_local} methods (`nil`, `true` or + # `false`). + def default_dst + @@default_dst + end + + # Returns a time zone by its IANA Time Zone Database identifier (e.g. + # `"Europe/London"` or `"America/Chicago"`). Call {all_identifiers} for a + # list of all the valid identifiers. + # + # The {get} method will return a subclass of {Timezone}, either a + # {DataTimezone} (for a time zone defined by rules that set out when + # transitions occur) or a {LinkedTimezone} (for a time zone that is just a + # link to or alias for a another time zone). + # + # @param identifier [String] an IANA Time Zone Database time zone + # identifier. + # @return [Timezone] the {Timezone} with the given `identifier`. + # @raise [InvalidTimezoneIdentifier] if the `identifier` is not valid. + def get(identifier) + data_source.get_timezone_info(identifier).create_timezone + end + + # Returns a proxy for the time zone with the given identifier. This allows + # loading of the time zone data to be deferred until it is first needed. + # + # The identifier will not be validated. If an invalid identifier is + # specified, no exception will be raised until the proxy is used. + # + # @param identifier [String] an IANA Time Zone Database time zone + # identifier. + # @return [TimezoneProxy] a proxy for the time zone with the given + # `identifier`. + def get_proxy(identifier) + TimezoneProxy.new(identifier) + end + + # Returns an `Array` of all the available time zones. + # + # {TimezoneProxy} instances are returned to avoid the overhead of loading + # time zone data until it is first needed. + # + # @return [Array<Timezone>] all available time zones. + def all + get_proxies(all_identifiers) + end + + # @return [Array<String>] an `Array` containing the identifiers of all the + # available time zones. + def all_identifiers + data_source.timezone_identifiers + end + + # Returns an `Array` of all the available time zones that are + # defined by offsets and transitions. + # + # {TimezoneProxy} instances are returned to avoid the overhead of loading + # time zone data until it is first needed. + # + # @return [Array<Timezone>] an `Array` of all the available time zones + # that are defined by offsets and transitions. + def all_data_zones + get_proxies(all_data_zone_identifiers) + end + + # @return [Array<String>] an `Array` of the identifiers of all available + # time zones that are defined by offsets and transitions. + def all_data_zone_identifiers + data_source.data_timezone_identifiers + end + + # Returns an `Array` of all the available time zones that are + # defined as links to / aliases for other time zones. + # + # {TimezoneProxy} instances are returned to avoid the overhead of loading + # time zone data until it is first needed. + # + # @return [Array<Timezone>] an `Array` of all the available time zones + # that are defined as links to / aliases for other time zones. + def all_linked_zones + get_proxies(all_linked_zone_identifiers) + end + + # @return [Array<String>] an `Array` of the identifiers of all available + # time zones that are defined as links to / aliases for other time zones. + def all_linked_zone_identifiers + data_source.linked_timezone_identifiers + end + + # Returns an `Array` of all the time zones that are observed by at least + # one {Country}. This is not the complete set of time zones as some are + # not country specific (e.g. `'Etc/GMT'`). + # + # {TimezoneProxy} instances are returned to avoid the overhead of loading + # time zone data until it is first needed. + # + # @return [Array<Timezone>] an `Array` of all the time zones that are + # observed by at least one {Country}. + def all_country_zones + Country.all.map(&:zones).flatten.uniq + end + + # Returns an `Array` of the identifiers of all the time zones that are + # observed by at least one {Country}. This is not the complete set of time + # zone identifiers as some are not country specific (e.g. `'Etc/GMT'`). + # + # {TimezoneProxy} instances are returned to avoid the overhead of loading + # time zone data until it is first needed. + # + # @return [Array<String>] an `Array` of the identifiers of all the time + # zones that are observed by at least one {Country}. + def all_country_zone_identifiers + Country.all.map(&:zone_identifiers).flatten.uniq + end + + private + + # @param [Enumerable<String>] identifiers an `Enumerable` of time zone + # identifiers. + # @return [Array<TimezoneProxy>] an `Array` of {TimezoneProxy} + # instances corresponding to the given identifiers. + def get_proxies(identifiers) + identifiers.collect {|identifier| get_proxy(identifier)} + end + + # @return [DataSource] the current DataSource. + def data_source + DataSource.get + end + end + + # @return [String] the identifier of the time zone, for example, + # `"Europe/Paris"`. + def identifier + raise_unknown_timezone + end + + # @return [String] the identifier of the time zone, for example, + # `"Europe/Paris"`. + def name + # Don't use alias, as identifier gets overridden. + identifier + end + + # @return [String] {identifier}, modified to make it more readable. + def to_s + friendly_identifier + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: #{identifier}>" + end + + # Returns {identifier}, modified to make it more readable. Set + # `skip_first_part` to omit the first part of the identifier (typically a + # region name) where there is more than one part. + # + # For example: + # + # TZInfo::Timezone.get('Europe/Paris').friendly_identifier(false) #=> "Europe - Paris" + # TZInfo::Timezone.get('Europe/Paris').friendly_identifier(true) #=> "Paris" + # TZInfo::Timezone.get('America/Indiana/Knox').friendly_identifier(false) #=> "America - Knox, Indiana" + # TZInfo::Timezone.get('America/Indiana/Knox').friendly_identifier(true) #=> "Knox, Indiana" + # + # @param skip_first_part [Boolean] whether the first part of the identifier + # (typically a region name) should be omitted. + # @return [String] the modified identifier. + def friendly_identifier(skip_first_part = false) + id = identifier + id = id.encode(Encoding::UTF_8) unless id.encoding.ascii_compatible? + parts = id.split('/') + if parts.empty? + # shouldn't happen + identifier + elsif parts.length == 1 + parts[0] + else + prefix = skip_first_part ? nil : "#{parts[0]} - " + + parts = parts.drop(1).map do |part| + part.gsub!(/_/, ' ') + + if part.index(/[a-z]/) + # Missing a space if a lower case followed by an upper case and the + # name isn't McXxxx. + part.gsub!(/([^M][a-z])([A-Z])/, '\1 \2') + part.gsub!(/([M][a-bd-z])([A-Z])/, '\1 \2') + + # Missing an apostrophe if two consecutive upper case characters. + part.gsub!(/([A-Z])([A-Z])/, '\1\'\2') + end + + part + end + + "#{prefix}#{parts.reverse.join(', ')}" + end + end + + # Returns the {TimezonePeriod} that is valid at a given time. + # + # Unlike {period_for_local} and {period_for_utc}, the UTC offset of the + # `time` parameter is taken into consideration. + # + # @param time [Object] a `Time`, `DateTime` or {Timestamp}. + # @return [TimezonePeriod] the {TimezonePeriod} that is valid at `time`. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified + # offset. + def period_for(time) + raise_unknown_timezone + end + + # Returns the set of {TimezonePeriod}s that are valid for the given + # local time as an `Array`. + # + # The UTC offset of the `local_time` parameter is ignored (it is treated as + # a time in the time zone represented by `self`). + # + # This will typically return an `Array` containing a single + # {TimezonePeriod}. More than one {TimezonePeriod} will be returned when the + # local time is ambiguous (for example, when daylight savings time ends). An + # empty `Array` will be returned when the local time is not valid (for + # example, when daylight savings time begins). + # + # To obtain just a single {TimezonePeriod} in all cases, use + # {period_for_local} instead and specify how ambiguities should be resolved. + # + # @param local_time [Object] a `Time`, `DateTime` or {Timestamp}. + # @return [Array<TimezonePeriod>] the set of {TimezonePeriod}s that are + # valid at `local_time`. + # @raise [ArgumentError] if `local_time` is `nil`. + def periods_for_local(local_time) + raise_unknown_timezone + end + + # Returns an `Array` of {TimezoneTransition} instances representing the + # times where the UTC offset of the timezone changes. + # + # Transitions are returned up to a given time (`to`). + # + # A from time may also be supplied using the `from` parameter. If from is + # not `nil`, only transitions from that time onwards will be returned. + # + # Comparisons with `to` are exclusive. Comparisons with `from` are + # inclusive. If a transition falls precisely on `to`, it will be excluded. + # If a transition falls on `from`, it will be included. + # + # @param to [Object] a `Time`, `DateTime` or {Timestamp} specifying the + # latest (exclusive) transition to return. + # @param from [Object] an optional `Time`, `DateTime` or {Timestamp} + # specifying the earliest (inclusive) transition to return. + # @return [Array<TimezoneTransition>] the transitions that are earlier than + # `to` and, if specified, at or later than `from`. Transitions are ordered + # by when they occur, from earliest to latest. + # @raise [ArgumentError] if `from` is specified and `to` is not greater than + # `from`. + # @raise [ArgumentError] is raised if `to` is `nil`. + # @raise [ArgumentError] if either `to` or `from` is a {Timestamp} with an + # unspecified offset. + def transitions_up_to(to, from = nil) + raise_unknown_timezone + end + + # Returns the canonical {Timezone} instance for this {Timezone}. + # + # The IANA Time Zone database contains two types of definition: Zones and + # Links. Zones are defined by rules that set out when transitions occur. + # Links are just references to fully defined Zone, creating an alias for + # that Zone. + # + # Links are commonly used where a time zone has been renamed in a release of + # the Time Zone database. For example, the US/Eastern Zone was renamed as + # America/New_York. A US/Eastern Link was added in its place, linking to + # (and creating an alias for) America/New_York. + # + # Links are also used for time zones that are currently identical to a full + # Zone, but that are administered separately. For example, Europe/Vatican is + # a Link to (and alias for) Europe/Rome. + # + # For a full Zone (implemented by {DataTimezone}), {canonical_zone} returns + # self. + # + # For a Link (implemented by {LinkedTimezone}), {canonical_zone} returns a + # {Timezone} instance representing the full Zone that the link targets. + # + # TZInfo can be used with different data sources (see the documentation for + # {TZInfo::DataSource}). Some DataSource implementations may not support + # distinguishing between full Zones and Links and will treat all time zones + # as full Zones. In this case, {canonical_zone} will always return `self`. + # + # There are two built-in DataSource implementations. + # {DataSources::RubyDataSource} (which will be used if the tzinfo-data gem + # is available) supports Link zones. {DataSources::ZoneinfoDataSource} + # returns Link zones as if they were full Zones. If the {canonical_zone} or + # {canonical_identifier} methods are needed, the tzinfo-data gem should be + # installed. + # + # The {TZInfo::DataSource.get} method can be used to check which DataSource + # implementation is being used. + # + # @return [Timezone] the canonical {Timezone} instance for this {Timezone}. + def canonical_zone + raise_unknown_timezone + end + + # Returns the {TimezonePeriod} that is valid at a given time. + # + # The UTC offset of the `utc_time` parameter is ignored (it is treated as a + # UTC time). Use the {period_for} method instead if the UTC offset of the + # time needs to be taken into consideration. + # + # @param utc_time [Object] a `Time`, `DateTime` or {Timestamp}. + # @return [TimezonePeriod] the {TimezonePeriod} that is valid at `utc_time`. + # @raise [ArgumentError] if `utc_time` is `nil`. + def period_for_utc(utc_time) + raise ArgumentError, 'utc_time must be specified' unless utc_time + period_for(Timestamp.for(utc_time, :treat_as_utc)) + end + + # Returns the {TimezonePeriod} that is valid at the given local time. + # + # The UTC offset of the `local_time` parameter is ignored (it is treated as + # a time in the time zone represented by `self`). Use the {period_for} + # method instead if the the UTC offset of the time needs to be taken into + # consideration. + # + # _Warning:_ There are local times that have no equivalent UTC times (for + # example, during the transition from standard time to daylight savings + # time). There are also local times that have more than one UTC equivalent + # (for example, during the transition from daylight savings time to standard + # time). + # + # In the first case (no equivalent UTC time), a {PeriodNotFound} exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an {AmbiguousTime} + # exception will be raised unless the optional `dst` parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the `dst` parameter can be used to select whether the + # daylight savings time or local time is used. For example, the following + # code would raise an {AmbiguousTime} exception: + # + # tz = TZInfo::Timezone.get('America/New_York') + # tz.period_for_local(Time.new(2004,10,31,1,30,0)) + # + # Specifying `dst = true` would select the daylight savings period from + # April to October 2004. Specifying `dst = false` would return the + # standard time period from October 2004 to April 2005. + # + # The `dst` parameter will not be able to resolve an ambiguity resulting + # from the clocks being set back without changing from daylight savings time + # to standard time. In this case, if a block is specified, it will be called + # to resolve the ambiguity. The block must take a single parameter - an + # `Array` of {TimezonePeriod}s that need to be resolved. The block can + # select and return a single {TimezonePeriod} or return `nil` or an empty + # `Array` to cause an {AmbiguousTime} exception to be raised. + # + # The default value of the `dst` parameter can be specified using + # {Timezone.default_dst=}. + # + # @param local_time [Object] a `Time`, `DateTime` or {Timestamp}. + # @param dst [Boolean] whether to resolve ambiguous local times by always + # selecting the period observing daylight savings time (`true`), always + # selecting the period observing standard time (`false`), or leaving the + # ambiguity unresolved (`nil`). + # @yield [periods] if the `dst` parameter did not resolve an ambiguity, an + # optional block is yielded to. + # @yieldparam periods [Array<TimezonePeriod>] an `Array` containing all + # the {TimezonePeriod}s that still match `local_time` after applying the + # `dst` parameter. + # @yieldreturn [Object] to resolve the ambiguity: a chosen {TimezonePeriod} + # or an `Array` containing a chosen {TimezonePeriod}; to leave the + # ambiguity unresolved: an empty `Array`, an `Array` containing more than + # one {TimezonePeriod}, or `nil`. + # @return [TimezonePeriod] the {TimezonePeriod} that is valid at + # `local_time`. + # @raise [ArgumentError] if `local_time` is `nil`. + # @raise [PeriodNotFound] if `local_time` is not valid for the time zone + # (there is no equivalent UTC time). + # @raise [AmbiguousTime] if `local_time` was ambiguous for the time zone and + # the `dst` parameter or block did not resolve the ambiguity. + def period_for_local(local_time, dst = Timezone.default_dst) + raise ArgumentError, 'local_time must be specified' unless local_time + local_time = Timestamp.for(local_time, :ignore) + results = periods_for_local(local_time) + + if results.empty? + raise PeriodNotFound, "#{local_time.strftime('%Y-%m-%d %H:%M:%S')} is an invalid local time." + elsif results.size < 2 + results.first + else + # ambiguous result try to resolve + + if !dst.nil? + matches = results.find_all {|period| period.dst? == dst} + results = matches if !matches.empty? + end + + if results.size < 2 + results.first + else + # still ambiguous, try the block + + if block_given? + results = yield results + end + + if results.is_a?(TimezonePeriod) + results + elsif results && results.size == 1 + results.first + else + raise AmbiguousTime, "#{local_time.strftime('%Y-%m-%d %H:%M:%S')} is an ambiguous local time." + end + end + end + end + + # Converts a time to the local time for the time zone. + # + # The result will be of type {TimeWithOffset} (if passed a `Time`), + # {DateTimeWithOffset} (if passed a `DateTime`) or {TimestampWithOffset} (if + # passed a {Timestamp}). {TimeWithOffset}, {DateTimeWithOffset} and + # {TimestampWithOffset} are subclasses of `Time`, `DateTime` and {Timestamp} + # that provide additional information about the local result. + # + # Unlike {utc_to_local}, {to_local} takes the UTC offset of the given time + # into consideration. + # + # @param time [Object] a `Time`, `DateTime` or {Timestamp}. + # @return [Object] the local equivalent of `time` as a {TimeWithOffset}, + # {DateTimeWithOffset} or {TimestampWithOffset}. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} that does not have a + # specified UTC offset. + def to_local(time) + raise ArgumentError, 'time must be specified' unless time + + Timestamp.for(time) do |ts| + TimestampWithOffset.set_timezone_offset(ts, period_for(ts).offset) + end + end + + # Converts a time in UTC to the local time for the time zone. + # + # The result will be of type {TimeWithOffset} (if passed a `Time`), + # {DateTimeWithOffset} (if passed a `DateTime`) or {TimestampWithOffset} (if + # passed a {Timestamp}). {TimeWithOffset}, {DateTimeWithOffset} and + # {TimestampWithOffset} are subclasses of `Time`, `DateTime` and {Timestamp} + # that provide additional information about the local result. + # + # The UTC offset of the `utc_time` parameter is ignored (it is treated as a + # UTC time). Use the {to_local} method instead if the the UTC offset of the + # time needs to be taken into consideration. + # + # @param utc_time [Object] a `Time`, `DateTime` or {Timestamp}. + # @return [Object] the local equivalent of `utc_time` as a {TimeWithOffset}, + # {DateTimeWithOffset} or {TimestampWithOffset}. + # @raise [ArgumentError] if `utc_time` is `nil`. + def utc_to_local(utc_time) + raise ArgumentError, 'utc_time must be specified' unless utc_time + + Timestamp.for(utc_time, :treat_as_utc) do |ts| + to_local(ts) + end + end + + # Converts a local time for the time zone to UTC. + # + # The result will either be a `Time`, `DateTime` or {Timestamp} according to + # the type of the `local_time` parameter. + # + # The UTC offset of the `local_time` parameter is ignored (it is treated as + # a time in the time zone represented by `self`). + # + # _Warning:_ There are local times that have no equivalent UTC times (for + # example, during the transition from standard time to daylight savings + # time). There are also local times that have more than one UTC equivalent + # (for example, during the transition from daylight savings time to standard + # time). + # + # In the first case (no equivalent UTC time), a {PeriodNotFound} exception + # will be raised. + # + # In the second case (more than one equivalent UTC time), an {AmbiguousTime} + # exception will be raised unless the optional `dst` parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the `dst` parameter can be used to select whether the + # daylight savings time or local time is used. For example, the following + # code would raise an {AmbiguousTime} exception: + # + # tz = TZInfo::Timezone.get('America/New_York') + # tz.period_for_local(Time.new(2004,10,31,1,30,0)) + # + # Specifying `dst = true` would select the daylight savings period from + # April to October 2004. Specifying `dst = false` would return the + # standard time period from October 2004 to April 2005. + # + # The `dst` parameter will not be able to resolve an ambiguity resulting + # from the clocks being set back without changing from daylight savings time + # to standard time. In this case, if a block is specified, it will be called + # to resolve the ambiguity. The block must take a single parameter - an + # `Array` of {TimezonePeriod}s that need to be resolved. The block can + # select and return a single {TimezonePeriod} or return `nil` or an empty + # `Array` to cause an {AmbiguousTime} exception to be raised. + # + # The default value of the `dst` parameter can be specified using + # {Timezone.default_dst=}. + # + # @param local_time [Object] a `Time`, `DateTime` or {Timestamp}. + # @param dst [Boolean] whether to resolve ambiguous local times by always + # selecting the period observing daylight savings time (`true`), always + # selecting the period observing standard time (`false`), or leaving the + # ambiguity unresolved (`nil`). + # @yield [periods] if the `dst` parameter did not resolve an ambiguity, an + # optional block is yielded to. + # @yieldparam periods [Array<TimezonePeriod>] an `Array` containing all + # the {TimezonePeriod}s that still match `local_time` after applying the + # `dst` parameter. + # @yieldreturn [Object] to resolve the ambiguity: a chosen {TimezonePeriod} + # or an `Array` containing a chosen {TimezonePeriod}; to leave the + # ambiguity unresolved: an empty `Array`, an `Array` containing more than + # one {TimezonePeriod}, or `nil`. + # @return [Object] the UTC equivalent of `local_time` as a `Time`, + # `DateTime` or {Timestamp}. + # @raise [ArgumentError] if `local_time` is `nil`. + # @raise [PeriodNotFound] if `local_time` is not valid for the time zone + # (there is no equivalent UTC time). + # @raise [AmbiguousTime] if `local_time` was ambiguous for the time zone and + # the `dst` parameter or block did not resolve the ambiguity. + def local_to_utc(local_time, dst = Timezone.default_dst) + raise ArgumentError, 'local_time must be specified' unless local_time + + Timestamp.for(local_time, :ignore) do |ts| + period = if block_given? + period_for_local(ts, dst) {|periods| yield periods } + else + period_for_local(ts, dst) + end + + ts.add_and_set_utc_offset(-period.observed_utc_offset, :utc) + end + end + + # Creates a `Time` object based on the given (Gregorian calendar) date and + # time parameters. The parameters are interpreted as a local time in the + # time zone. The result has the appropriate `utc_offset`, `zone` and + # {TimeWithOffset#timezone_offset timezone_offset}. + # + # _Warning:_ There are time values that are not valid as local times in a + # time zone (for example, during the transition from standard time to + # daylight savings time). There are also time values that are ambiguous, + # occurring more than once with different offsets to UTC (for example, + # during the transition from daylight savings time to standard time). + # + # In the first case (an invalid local time), a {PeriodNotFound} exception + # will be raised. + # + # In the second case (more than one occurrence), an {AmbiguousTime} + # exception will be raised unless the optional `dst` parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the `dst` parameter can be used to select whether the + # daylight savings time or local time is used. For example, the following + # code would raise an {AmbiguousTime} exception: + # + # tz = TZInfo::Timezone.get('America/New_York') + # tz.local_time(2004,10,31,1,30,0,0) + # + # Specifying `dst = true` would return a `Time` with a UTC offset of -4 + # hours and abbreviation EDT (Eastern Daylight Time). Specifying `dst = + # false` would return a `Time` with a UTC offset of -5 hours and + # abbreviation EST (Eastern Standard Time). + # + # The `dst` parameter will not be able to resolve an ambiguity resulting + # from the clocks being set back without changing from daylight savings time + # to standard time. In this case, if a block is specified, it will be called + # to resolve the ambiguity. The block must take a single parameter - an + # `Array` of {TimezonePeriod}s that need to be resolved. The block can + # select and return a single {TimezonePeriod} or return `nil` or an empty + # `Array` to cause an {AmbiguousTime} exception to be raised. + # + # The default value of the `dst` parameter can be specified using + # {Timezone.default_dst=}. + # + # @param year [Integer] the year. + # @param month [Integer] the month (1-12). + # @param day [Integer] the day of the month (1-31). + # @param hour [Integer] the hour (0-23). + # @param minute [Integer] the minute (0-59). + # @param second [Integer] the second (0-59). + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param dst [Boolean] whether to resolve ambiguous local times by always + # selecting the period observing daylight savings time (`true`), always + # selecting the period observing standard time (`false`), or leaving the + # ambiguity unresolved (`nil`). + # @yield [periods] if the `dst` parameter did not resolve an ambiguity, an + # optional block is yielded to. + # @yieldparam periods [Array<TimezonePeriod>] an `Array` containing all + # the {TimezonePeriod}s that still match `local_time` after applying the + # `dst` parameter. + # @yieldreturn [Object] to resolve the ambiguity: a chosen {TimezonePeriod} + # or an `Array` containing a chosen {TimezonePeriod}; to leave the + # ambiguity unresolved: an empty `Array`, an `Array` containing more than + # one {TimezonePeriod}, or `nil`. + # @return [TimeWithOffset] a new `Time` object based on the given values, + # interpreted as a local time in the time zone. + # @raise [ArgumentError] if either of `year`, `month`, `day`, `hour`, + # `minute`, or `second` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` + # and not the `Symbol` `:utc`. + # @raise [RangeError] if `month` is not between 1 and 12. + # @raise [RangeError] if `day` is not between 1 and 31. + # @raise [RangeError] if `hour` is not between 0 and 23. + # @raise [RangeError] if `minute` is not between 0 and 59. + # @raise [RangeError] if `second` is not between 0 and 59. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + # @raise [PeriodNotFound] if the date and time parameters do not specify a + # valid local time in the time zone. + # @raise [AmbiguousTime] if the date and time parameters are ambiguous for + # the time zone and the `dst` parameter or block did not resolve the + # ambiguity. + def local_time(year, month = 1, day = 1, hour = 0, minute = 0, second = 0, sub_second = 0, dst = Timezone.default_dst, &block) + local_timestamp(year, month, day, hour, minute, second, sub_second, dst, &block).to_time + end + + # Creates a `DateTime` object based on the given (Gregorian calendar) date + # and time parameters. The parameters are interpreted as a local time in the + # time zone. The result has the appropriate `offset` and + # {DateTimeWithOffset#timezone_offset timezone_offset}. + # + # _Warning:_ There are time values that are not valid as local times in a + # time zone (for example, during the transition from standard time to + # daylight savings time). There are also time values that are ambiguous, + # occurring more than once with different offsets to UTC (for example, + # during the transition from daylight savings time to standard time). + # + # In the first case (an invalid local time), a {PeriodNotFound} exception + # will be raised. + # + # In the second case (more than one occurrence), an {AmbiguousTime} + # exception will be raised unless the optional `dst` parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the `dst` parameter can be used to select whether the + # daylight savings time or local time is used. For example, the following + # code would raise an {AmbiguousTime} exception: + # + # tz = TZInfo::Timezone.get('America/New_York') + # tz.local_datetime(2004,10,31,1,30,0,0) + # + # Specifying `dst = true` would return a `Time` with a UTC offset of -4 + # hours and abbreviation EDT (Eastern Daylight Time). Specifying `dst = + # false` would return a `Time` with a UTC offset of -5 hours and + # abbreviation EST (Eastern Standard Time). + # + # The `dst` parameter will not be able to resolve an ambiguity resulting + # from the clocks being set back without changing from daylight savings time + # to standard time. In this case, if a block is specified, it will be called + # to resolve the ambiguity. The block must take a single parameter - an + # `Array` of {TimezonePeriod}s that need to be resolved. The block can + # select and return a single {TimezonePeriod} or return `nil` or an empty + # `Array` to cause an {AmbiguousTime} exception to be raised. + # + # The default value of the `dst` parameter can be specified using + # {Timezone.default_dst=}. + # + # @param year [Integer] the year. + # @param month [Integer] the month (1-12). + # @param day [Integer] the day of the month (1-31). + # @param hour [Integer] the hour (0-23). + # @param minute [Integer] the minute (0-59). + # @param second [Integer] the second (0-59). + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param dst [Boolean] whether to resolve ambiguous local times by always + # selecting the period observing daylight savings time (`true`), always + # selecting the period observing standard time (`false`), or leaving the + # ambiguity unresolved (`nil`). + # @yield [periods] if the `dst` parameter did not resolve an ambiguity, an + # optional block is yielded to. + # @yieldparam periods [Array<TimezonePeriod>] an `Array` containing all + # the {TimezonePeriod}s that still match `local_time` after applying the + # `dst` parameter. + # @yieldreturn [Object] to resolve the ambiguity: a chosen {TimezonePeriod} + # or an `Array` containing a chosen {TimezonePeriod}; to leave the + # ambiguity unresolved: an empty `Array`, an `Array` containing more than + # one {TimezonePeriod}, or `nil`. + # @return [DateTimeWithOffset] a new `DateTime` object based on the given + # values, interpreted as a local time in the time zone. + # @raise [ArgumentError] if either of `year`, `month`, `day`, `hour`, + # `minute`, or `second` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` + # and not the `Symbol` `:utc`. + # @raise [RangeError] if `month` is not between 1 and 12. + # @raise [RangeError] if `day` is not between 1 and 31. + # @raise [RangeError] if `hour` is not between 0 and 23. + # @raise [RangeError] if `minute` is not between 0 and 59. + # @raise [RangeError] if `second` is not between 0 and 59. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + # @raise [PeriodNotFound] if the date and time parameters do not specify a + # valid local time in the time zone. + # @raise [AmbiguousTime] if the date and time parameters are ambiguous for + # the time zone and the `dst` parameter or block did not resolve the + # ambiguity. + def local_datetime(year, month = 1, day = 1, hour = 0, minute = 0, second = 0, sub_second = 0, dst = Timezone.default_dst, &block) + local_timestamp(year, month, day, hour, minute, second, sub_second, dst, &block).to_datetime + end + + # Creates a {Timestamp} object based on the given (Gregorian calendar) date + # and time parameters. The parameters are interpreted as a local time in the + # time zone. The result has the appropriate {Timestamp#utc_offset + # utc_offset} and {TimestampWithOffset#timezone_offset timezone_offset}. + # + # _Warning:_ There are time values that are not valid as local times in a + # time zone (for example, during the transition from standard time to + # daylight savings time). There are also time values that are ambiguous, + # occurring more than once with different offsets to UTC (for example, + # during the transition from daylight savings time to standard time). + # + # In the first case (an invalid local time), a {PeriodNotFound} exception + # will be raised. + # + # In the second case (more than one occurrence), an {AmbiguousTime} + # exception will be raised unless the optional `dst` parameter or block + # handles the ambiguity. + # + # If the ambiguity is due to a transition from daylight savings time to + # standard time, the `dst` parameter can be used to select whether the + # daylight savings time or local time is used. For example, the following + # code would raise an {AmbiguousTime} exception: + # + # tz = TZInfo::Timezone.get('America/New_York') + # tz.local_timestamp(2004,10,31,1,30,0,0) + # + # Specifying `dst = true` would return a `Time` with a UTC offset of -4 + # hours and abbreviation EDT (Eastern Daylight Time). Specifying `dst = + # false` would return a `Time` with a UTC offset of -5 hours and + # abbreviation EST (Eastern Standard Time). + # + # The `dst` parameter will not be able to resolve an ambiguity resulting + # from the clocks being set back without changing from daylight savings time + # to standard time. In this case, if a block is specified, it will be called + # to resolve the ambiguity. The block must take a single parameter - an + # `Array` of {TimezonePeriod}s that need to be resolved. The block can + # select and return a single {TimezonePeriod} or return `nil` or an empty + # `Array` to cause an {AmbiguousTime} exception to be raised. + # + # The default value of the `dst` parameter can be specified using + # {Timezone.default_dst=}. + # + # @param year [Integer] the year. + # @param month [Integer] the month (1-12). + # @param day [Integer] the day of the month (1-31). + # @param hour [Integer] the hour (0-23). + # @param minute [Integer] the minute (0-59). + # @param second [Integer] the second (0-59). + # @param sub_second [Numeric] the fractional part of the second as either + # a `Rational` that is greater than or equal to 0 and less than 1, or + # the `Integer` 0. + # @param dst [Boolean] whether to resolve ambiguous local times by always + # selecting the period observing daylight savings time (`true`), always + # selecting the period observing standard time (`false`), or leaving the + # ambiguity unresolved (`nil`). + # @yield [periods] if the `dst` parameter did not resolve an ambiguity, an + # optional block is yielded to. + # @yieldparam periods [Array<TimezonePeriod>] an `Array` containing all + # the {TimezonePeriod}s that still match `local_time` after applying the + # `dst` parameter. + # @yieldreturn [Object] to resolve the ambiguity: a chosen {TimezonePeriod} + # or an `Array` containing a chosen {TimezonePeriod}; to leave the + # ambiguity unresolved: an empty `Array`, an `Array` containing more than + # one {TimezonePeriod}, or `nil`. + # @return [TimestampWithOffset] a new {Timestamp} object based on the given + # values, interpreted as a local time in the time zone. + # @raise [ArgumentError] if either of `year`, `month`, `day`, `hour`, + # `minute`, or `second` is not an `Integer`. + # @raise [ArgumentError] if `sub_second` is not a `Rational`, or the + # `Integer` 0. + # @raise [ArgumentError] if `utc_offset` is not `nil`, not an `Integer` + # and not the `Symbol` `:utc`. + # @raise [RangeError] if `month` is not between 1 and 12. + # @raise [RangeError] if `day` is not between 1 and 31. + # @raise [RangeError] if `hour` is not between 0 and 23. + # @raise [RangeError] if `minute` is not between 0 and 59. + # @raise [RangeError] if `second` is not between 0 and 59. + # @raise [RangeError] if `sub_second` is a `Rational` but that is less + # than 0 or greater than or equal to 1. + # @raise [PeriodNotFound] if the date and time parameters do not specify a + # valid local time in the time zone. + # @raise [AmbiguousTime] if the date and time parameters are ambiguous for + # the time zone and the `dst` parameter or block did not resolve the + # ambiguity. + def local_timestamp(year, month = 1, day = 1, hour = 0, minute = 0, second = 0, sub_second = 0, dst = Timezone.default_dst, &block) + ts = Timestamp.create(year, month, day, hour, minute, second, sub_second) + timezone_offset = period_for_local(ts, dst, &block).offset + utc_offset = timezone_offset.observed_utc_offset + TimestampWithOffset.new(ts.value - utc_offset, sub_second, utc_offset).set_timezone_offset(timezone_offset) + end + + # Returns the unique offsets used by the time zone up to a given time (`to`) + # as an `Array` of {TimezoneOffset} instances. + # + # A from time may also be supplied using the `from` parameter. If from is + # not `nil`, only offsets used from that time onwards will be returned. + # + # Comparisons with `to` are exclusive. Comparisons with `from` are + # inclusive. + # + # @param to [Object] a `Time`, `DateTime` or {Timestamp} specifying the + # latest (exclusive) offset to return. + # @param from [Object] an optional `Time`, `DateTime` or {Timestamp} + # specifying the earliest (inclusive) offset to return. + # @return [Array<TimezoneOffsets>] the offsets that are used earlier than + # `to` and, if specified, at or later than `from`. Offsets may be returned + # in any order. + # @raise [ArgumentError] if `from` is specified and `to` is not greater than + # `from`. + # @raise [ArgumentError] is raised if `to` is `nil`. + # @raise [ArgumentError] if either `to` or `from` is a {Timestamp} with an + # unspecified offset. + def offsets_up_to(to, from = nil) + raise ArgumentError, 'to must be specified' unless to + + to_timestamp = Timestamp.for(to) + from_timestamp = from && Timestamp.for(from) + transitions = transitions_up_to(to_timestamp, from_timestamp) + + if transitions.empty? + # No transitions in the range, find the period that covers it. + + if from_timestamp + # Use the from date as it is inclusive. + period = period_for(from_timestamp) + else + # to is exclusive, so this can't be used with period_for. However, any + # time earlier than to can be used. Subtract 1 hour. + period = period_for(to_timestamp.add_and_set_utc_offset(-3600, :utc)) + end + + [period.offset] + else + result = Set.new + + first = transitions.first + result << first.previous_offset unless from_timestamp && first.at == from_timestamp + + transitions.each do |t| + result << t.offset + end + + result.to_a + end + end + + # Returns the canonical identifier of this time zone. + # + # This is a shortcut for calling `canonical_zone.identifier`. Please refer + # to the {canonical_zone} documentation for further information. + # + # @return [String] the canonical identifier of this time zone. + def canonical_identifier + canonical_zone.identifier + end + + # @return [TimeWithOffset] the current local time in the time zone. + def now + to_local(Time.now) + end + + # @return [TimezonePeriod] the current {TimezonePeriod} for the time zone. + def current_period + period_for(Time.now) + end + + # Returns the current local time and {TimezonePeriod} for the time zone as + # an `Array`. The first element is the time as a {TimeWithOffset}. The + # second element is the period. + # + # @return [Array] an `Array` containing the current {TimeWithOffset} for the + # time zone as the first element and the current {TimezonePeriod} for the + # time zone as the second element. + def current_time_and_period + period = nil + + local_time = Timestamp.for(Time.now) do |ts| + period = period_for(ts) + TimestampWithOffset.set_timezone_offset(ts, period.offset) + end + + [local_time, period] + end + alias current_period_and_time current_time_and_period + + # Converts a time to local time for the time zone and returns a `String` + # representation of the local time according to the given format. + # + # `Timezone#strftime` first expands any occurrences of `%Z` in the format + # string to the time zone abbreviation for the local time (for example, EST + # or EDT). Depending on the type of `time` parameter, the result of the + # expansion is then passed to either `Time#strftime`, `DateTime#strftime` or + # `Timestamp#strftime` to handle any other format directives. + # + # This method is equivalent to the following: + # + # time_zone.to_local(time).strftime(format) + # + # @param format [String] the format string. + # @param time [Object] a `Time`, `DateTime` or `Timestamp`. + # @return [String] the formatted local time. + # @raise [ArgumentError] if `format` or `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified UTC + # offset. + def strftime(format, time = Time.now) + to_local(time).strftime(format) + end + + # @param time [Object] a `Time`, `DateTime` or `Timestamp`. + # @return [String] the abbreviation of this {Timezone} at the given time. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified UTC + # offset. + def abbreviation(time = Time.now) + period_for(time).abbreviation + end + alias abbr abbreviation + + # @param time [Object] a `Time`, `DateTime` or `Timestamp`. + # @return [Boolean] whether daylight savings time is in effect at the given + # time. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified UTC + # offset. + def dst?(time = Time.now) + period_for(time).dst? + end + + # Returns the base offset from UTC in seconds at the given time. This does + # not include any adjustment made for daylight savings time and will + # typically remain constant throughout the year. + # + # To obtain the observed offset from UTC, including the effect of daylight + # savings time, use {observed_utc_offset} instead. + # + # If you require accurate {base_utc_offset} values, you should install the + # tzinfo-data gem and set {DataSources::RubyDataSource} as the {DataSource}. + # When using {DataSources::ZoneinfoDataSource}, the value of + # {base_utc_offset} has to be derived from changes to the observed UTC + # offset and DST status since it is not included in zoneinfo files. + # + # @param time [Object] a `Time`, `DateTime` or `Timestamp`. + # @return [Integer] the base offset from UTC in seconds at the given time. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified UTC + # offset. + def base_utc_offset(time = Time.now) + period_for(time).base_utc_offset + end + + # Returns the observed offset from UTC in seconds at the given time. This + # includes adjustments made for daylight savings time. + # + # @param time [Object] a `Time`, `DateTime` or `Timestamp`. + # @return [Integer] the observed offset from UTC in seconds at the given + # time. + # @raise [ArgumentError] if `time` is `nil`. + # @raise [ArgumentError] if `time` is a {Timestamp} with an unspecified UTC + # offset. + def observed_utc_offset(time = Time.now) + period_for(time).observed_utc_offset + end + alias utc_offset observed_utc_offset + + # Compares this {Timezone} with another based on the {identifier}. + # + # @param tz [Object] an `Object` to compare this {Timezone} with. + # @return [Integer] -1 if `tz` is less than `self`, 0 if `tz` is equal to + # `self` and +1 if `tz` is greater than `self`, or `nil` if `tz` is not an + # instance of {Timezone}. + def <=>(tz) + return nil unless tz.is_a?(Timezone) + identifier <=> tz.identifier + end + + # @param tz [Object] an `Object` to compare this {Timezone} with. + # @return [Boolean] `true` if `tz` is an instance of {Timezone} and has the + # same {identifier} as `self`, otherwise `false`. + def eql?(tz) + self == tz + end + + # @return [Integer] a hash based on the {identifier}. + def hash + identifier.hash + end + + # Matches `regexp` against the {identifier} of this {Timezone}. + # + # @param regexp [Regexp] a `Regexp` to match against the {identifier} of + # this {Timezone}. + # @return [Integer] the position the match starts, or `nil` if there is no + # match. + def =~(regexp) + regexp =~ identifier + end + + # Returns a serialized representation of this {Timezone}. This method is + # called when using `Marshal.dump` with an instance of {Timezone}. + # + # @param limit [Integer] the maximum depth to dump - ignored. + # @return [String] a serialized representation of this {Timezone}. + def _dump(limit) + identifier + end + + # Loads a {Timezone} from the serialized representation returned by {_dump}. + # This is method is called when using `Marshal.load` or `Marshal.restore` + # to restore a serialized {Timezone}. + # + # @param data [String] a serialized representation of a {Timezone}. + # @return [Timezone] the result of converting `data` back into a {Timezone}. + def self._load(data) + Timezone.get(data) + end + + private + + # Raises an {UnknownTimezone} exception. + # + # @raise [UnknownTimezone] always. + def raise_unknown_timezone + raise UnknownTimezone, 'TZInfo::Timezone should not be constructed directly (use TZInfo::Timezone.get instead)' + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_offset.rb new file mode 100644 index 0000000..25c98c3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_offset.rb @@ -0,0 +1,111 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Represents an offset from UTC observed by a time zone. + class TimezoneOffset + # Returns the base offset from UTC in seconds (`observed_utc_offset - + # std_offset`). This does not include any adjustment made for daylight + # savings time and will typically remain constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use {observed_utc_offset} instead. + # + # If you require accurate {base_utc_offset} values, you should install the + # tzinfo-data gem and set {DataSources::RubyDataSource} as the {DataSource}. + # When using {DataSources::ZoneinfoDataSource}, the value of + # {base_utc_offset} has to be derived from changes to the observed UTC + # offset and DST status since it is not included in zoneinfo files. + # + # @return [Integer] the base offset from UTC in seconds. + attr_reader :base_utc_offset + alias utc_offset base_utc_offset + + # Returns the offset from the time zone's standard time in seconds + # (`observed_utc_offset - base_utc_offset`). Zero when daylight savings time + # is not in effect. Non-zero (usually 3600 = 1 hour) if daylight savings is + # being observed. + # + # If you require accurate {std_offset} values, you should install the + # tzinfo-data gem and set {DataSources::RubyDataSource} as the {DataSource}. + # When using {DataSources::ZoneinfoDataSource}, the value of {std_offset} + # has to be derived from changes to the observed UTC offset and DST status + # since it is not included in zoneinfo files. + # + # @return [Integer] the offset from the time zone's standard time in + # seconds. + attr_reader :std_offset + + # Returns the observed offset from UTC in seconds (`base_utc_offset + + # std_offset`). This includes adjustments made for daylight savings time. + # + # @return [Integer] the observed offset from UTC in seconds. + attr_reader :observed_utc_offset + alias utc_total_offset observed_utc_offset + + # The abbreviation that identifies this offset. For example GMT + # (Greenwich Mean Time) or BST (British Summer Time) for Europe/London. + # + # @return [String] the abbreviation that identifies this offset. + attr_reader :abbreviation + alias abbr abbreviation + + # Initializes a new {TimezoneOffset}. + # + # {TimezoneOffset} instances should not normally be constructed manually. + # + # The passed in `abbreviation` instance will be frozen. + # + # @param base_utc_offset [Integer] the base offset from UTC in seconds. + # @param std_offset [Integer] the offset from standard time in seconds. + # @param abbreviation [String] the abbreviation identifying the offset. + def initialize(base_utc_offset, std_offset, abbreviation) + @base_utc_offset = base_utc_offset + @std_offset = std_offset + @abbreviation = abbreviation.freeze + + @observed_utc_offset = @base_utc_offset + @std_offset + end + + # Determines if daylight savings is in effect (i.e. if {std_offset} is + # non-zero). + # + # @return [Boolean] `true` if {std_offset} is non-zero, otherwise `false`. + def dst? + @std_offset != 0 + end + + # Determines if this {TimezoneOffset} is equal to another instance. + # + # @param toi [Object] the instance to test for equality. + # @return [Boolean] `true` if `toi` is a {TimezoneOffset} with the same + # {utc_offset}, {std_offset} and {abbreviation} as this {TimezoneOffset}, + # otherwise `false`. + def ==(toi) + toi.kind_of?(TimezoneOffset) && + base_utc_offset == toi.base_utc_offset && std_offset == toi.std_offset && abbreviation == toi.abbreviation + end + + # Determines if this {TimezoneOffset} is equal to another instance. + # + # @param toi [Object] the instance to test for equality. + # @return [Boolean] `true` if `toi` is a {TimezoneOffset} with the same + # {utc_offset}, {std_offset} and {abbreviation} as this {TimezoneOffset}, + # otherwise `false`. + def eql?(toi) + self == toi + end + + # @return [Integer] a hash based on {utc_offset}, {std_offset} and + # {abbreviation}. + def hash + [@base_utc_offset, @std_offset, @abbreviation].hash + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: @base_utc_offset=#{@base_utc_offset}, @std_offset=#{@std_offset}, @abbreviation=#{@abbreviation}>" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_period.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_period.rb new file mode 100644 index 0000000..a2cc985 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_period.rb @@ -0,0 +1,179 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # {TimezonePeriod} represents a period of time for a time zone where the same + # offset from UTC applies. It provides access to the observed offset, time + # zone abbreviation, start time and end time. + # + # The period of time can be unbounded at the start, end, or both the start + # and end. + # + # @abstract Time zone period data will returned as an instance of one of the + # subclasses of {TimezonePeriod}. + class TimezonePeriod + # @return [TimezoneOffset] the offset that applies in the period of time. + attr_reader :offset + + # Initializes a {TimezonePeriod}. + # + # @param offset [TimezoneOffset] the offset that is observed for the period + # of time. + # @raise [ArgumentError] if `offset` is `nil`. + def initialize(offset) + raise ArgumentError, 'offset must be specified' unless offset + @offset = offset + end + + # @return [TimezoneTransition] the transition that defines the start of this + # {TimezonePeriod} (`nil` if the start is unbounded). + def start_transition + raise_not_implemented(:start_transition) + end + + # @return [TimezoneTransition] the transition that defines the end of this + # {TimezonePeriod} (`nil` if the end is unbounded). + def end_transition + raise_not_implemented(:end_transition) + end + + # Returns the base offset from UTC in seconds (`observed_utc_offset - + # std_offset`). This does not include any adjustment made for daylight + # savings time and will typically remain constant throughout the year. + # + # To obtain the currently observed offset from UTC, including the effect of + # daylight savings time, use {observed_utc_offset} instead. + # + # If you require accurate {base_utc_offset} values, you should install the + # tzinfo-data gem and set {DataSources::RubyDataSource} as the {DataSource}. + # When using {DataSources::ZoneinfoDataSource}, the value of + # {base_utc_offset} has to be derived from changes to the observed UTC + # offset and DST status since it is not included in zoneinfo files. + # + # @return [Integer] the base offset from UTC in seconds. + def base_utc_offset + @offset.base_utc_offset + end + alias utc_offset base_utc_offset + + # Returns the offset from the time zone's standard time in seconds + # (`observed_utc_offset - base_utc_offset`). Zero when daylight savings time + # is not in effect. Non-zero (usually 3600 = 1 hour) if daylight savings is + # being observed. + # + # If you require accurate {std_offset} values, you should install the + # tzinfo-data gem and set {DataSources::RubyDataSource} as the {DataSource}. + # When using {DataSources::ZoneinfoDataSource}, the value of {std_offset} + # has to be derived from changes to the observed UTC offset and DST status + # since it is not included in zoneinfo files. + # + # @return [Integer] the offset from the time zone's standard time in + # seconds. + def std_offset + @offset.std_offset + end + + # The abbreviation that identifies this offset. For example GMT + # (Greenwich Mean Time) or BST (British Summer Time) for Europe/London. + # + # @return [String] the abbreviation that identifies this offset. + def abbreviation + @offset.abbreviation + end + alias abbr abbreviation + alias zone_identifier abbreviation + + # Returns the observed offset from UTC in seconds (`base_utc_offset + + # std_offset`). This includes adjustments made for daylight savings time. + # + # @return [Integer] the observed offset from UTC in seconds. + def observed_utc_offset + @offset.observed_utc_offset + end + alias utc_total_offset observed_utc_offset + + # Determines if daylight savings is in effect (i.e. if {std_offset} is + # non-zero). + # + # @return [Boolean] `true` if {std_offset} is non-zero, otherwise `false`. + def dst? + @offset.dst? + end + + # Returns the UTC start time of the period or `nil` if the start of the + # period is unbounded. + # + # The result is returned as a {Timestamp}. To obtain the start time as a + # `Time` or `DateTime`, call either {Timestamp#to_time to_time} or + # {Timestamp#to_datetime to_datetime} on the result. + # + # @return [Timestamp] the UTC start time of the period or `nil` if the start + # of the period is unbounded. + def starts_at + timestamp(start_transition) + end + + # Returns the UTC end time of the period or `nil` if the end of the period + # is unbounded. + # + # The result is returned as a {Timestamp}. To obtain the end time as a + # `Time` or `DateTime`, call either {Timestamp#to_time to_time} or + # {Timestamp#to_datetime to_datetime} on the result. + # + # @return [Timestamp] the UTC end time of the period or `nil` if the end of + # the period is unbounded. + def ends_at + timestamp(end_transition) + end + + # Returns the local start time of the period or `nil` if the start of the + # period is unbounded. + # + # The result is returned as a {TimestampWithOffset}. To obtain the start + # time as a `Time` or `DateTime`, call either {TimestampWithOffset#to_time + # to_time} or {TimestampWithOffset#to_datetime to_datetime} on the result. + # + # @return [TimestampWithOffset] the local start time of the period or `nil` + # if the start of the period is unbounded. + def local_starts_at + timestamp_with_offset(start_transition) + end + + # Returns the local end time of the period or `nil` if the end of the period + # is unbounded. + # + # The result is returned as a {TimestampWithOffset}. To obtain the end time + # as a `Time` or `DateTime`, call either {TimestampWithOffset#to_time + # to_time} or {TimestampWithOffset#to_datetime to_datetime} on the result. + # + # @return [TimestampWithOffset] the local end time of the period or `nil` if + # the end of the period is unbounded. + def local_ends_at + timestamp_with_offset(end_transition) + end + + private + + # Raises a {NotImplementedError} to indicate that subclasses should override + # a method. + # + # @raise [NotImplementedError] always. + def raise_not_implemented(method_name) + raise NotImplementedError, "Subclasses must override #{method_name}" + end + + # @param transition [TimezoneTransition] a transition or `nil`. + # @return [Timestamp] the {Timestamp} representing when a transition occurs, + # or `nil` if `transition` is `nil`. + def timestamp(transition) + transition ? transition.at : nil + end + + # @param transition [TimezoneTransition] a transition or `nil`. + # @return [TimestampWithOffset] a {Timestamp} representing when a transition + # occurs with offset set to {#offset}, or `nil` if `transition` is `nil`. + def timestamp_with_offset(transition) + transition ? TimestampWithOffset.set_timezone_offset(transition.at, offset) : nil + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_proxy.rb new file mode 100644 index 0000000..749f840 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_proxy.rb @@ -0,0 +1,96 @@ +# encoding: UTF-8 + +module TZInfo + + # A proxy class standing in for a {Timezone} with a given identifier. + # {TimezoneProxy} inherits from {Timezone} and can be treated identically to + # {Timezone} instances loaded with {Timezone.get}. + # + # {TimezoneProxy} instances are used to avoid the performance overhead of + # loading time zone data into memory, for example, by {Timezone.all}. + # + # The first time an attempt is made to access the data for the time zone, the + # real {Timezone} will be loaded is loaded. If the proxy's identifier was not + # valid, then an exception will be raised at this point. + class TimezoneProxy < Timezone + # Initializes a new {TimezoneProxy}. + # + # The `identifier` parameter is not checked when initializing the proxy. It + # will be validated when the real {Timezone} instance is loaded. + # + # @param identifier [String] an IANA Time Zone Database time zone + # identifier. + def initialize(identifier) + super() + @identifier = identifier + @real_timezone = nil + end + + # (see Timezone#identifier) + def identifier + @real_timezone ? @real_timezone.identifier : @identifier + end + + # (see Timezone#period_for) + def period_for(time) + real_timezone.period_for_utc(time) + end + + # (see Timezone#periods_for_local) + def periods_for_local(local_time) + real_timezone.periods_for_local(local_time) + end + + # (see Timezone#transitions_up_to) + def transitions_up_to(to, from = nil) + real_timezone.transitions_up_to(to, from) + end + + # (see Timezone#canonical_zone) + def canonical_zone + real_timezone.canonical_zone + end + + # Returns a serialized representation of this {TimezoneProxy}. This method + # is called when using `Marshal.dump` with an instance of {TimezoneProxy}. + # + # @param limit [Integer] the maximum depth to dump - ignored. @return + # [String] a serialized representation of this {TimezoneProxy}. + # @return [String] a serialized representation of this {TimezoneProxy}. + def _dump(limit) + identifier + end + + # Loads a {TimezoneProxy} from the serialized representation returned by + # {_dump}. This is method is called when using `Marshal.load` or + # `Marshal.restore` to restore a serialized {Timezone}. + # + # @param data [String] a serialized representation of a {TimezoneProxy}. + # @return [TimezoneProxy] the result of converting `data` back into a + # {TimezoneProxy}. + def self._load(data) + TimezoneProxy.new(data) + end + + private + + # Returns the real {Timezone} instance being proxied. + # + # The real {Timezone} is loaded using {Timezone.get} on the first access. + # + # @return [Timezone] the real {Timezone} instance being proxied. + def real_timezone + # Thread-safety: It is possible that the value of @real_timezone may be + # calculated multiple times in concurrently executing threads. It is not + # worth the overhead of locking to ensure that @real_timezone is only + # calculated once. + unless @real_timezone + result = Timezone.get(@identifier) + return result if frozen? + @real_timezone = result + end + + @real_timezone + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_transition.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_transition.rb new file mode 100644 index 0000000..db75b23 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/timezone_transition.rb @@ -0,0 +1,98 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Represents a transition from one observed UTC offset ({TimezoneOffset} to + # another for a time zone. + class TimezoneTransition + # @return [TimezoneOffset] the offset this transition changes to. + attr_reader :offset + + # @return [TimezoneOffset] the offset this transition changes from. + attr_reader :previous_offset + + # When this transition occurs as an `Integer` number of seconds since + # 1970-01-01 00:00:00 UTC ignoring leap seconds (i.e. each day is treated as + # if it were 86,400 seconds long). Equivalent to the result of calling the + # {Timestamp#value value} method on the {Timestamp} returned by {at}. + # + # @return [Integer] when this transition occurs as a number of seconds since + # 1970-01-01 00:00:00 UTC ignoring leap seconds. + attr_reader :timestamp_value + + # Initializes a new {TimezoneTransition}. + # + # {TimezoneTransition} instances should not normally be constructed + # manually. + # + # @param offset [TimezoneOffset] the offset the transition changes to. + # @param previous_offset [TimezoneOffset] the offset the transition changes + # from. + # @param timestamp_value [Integer] when the transition occurs as a + # number of seconds since 1970-01-01 00:00:00 UTC ignoring leap seconds + # (i.e. each day is treated as if it were 86,400 seconds long). + def initialize(offset, previous_offset, timestamp_value) + @offset = offset + @previous_offset = previous_offset + @timestamp_value = timestamp_value + end + + # Returns a {Timestamp} instance representing the UTC time when this + # transition occurs. + # + # To obtain the result as a `Time` or `DateTime`, call either + # {Timestamp#to_time to_time} or {Timestamp#to_datetime to_datetime} on the + # {Timestamp} instance that is returned. + # + # @return [Timestamp] the UTC time when this transition occurs. + def at + Timestamp.utc(@timestamp_value) + end + + # Returns a {TimestampWithOffset} instance representing the local time when + # this transition causes the previous observance to end (calculated from + # {at} using {previous_offset}). + # + # To obtain the result as a `Time` or `DateTime`, call either + # {TimestampWithOffset#to_time to_time} or {TimestampWithOffset#to_datetime + # to_datetime} on the {TimestampWithOffset} instance that is returned. + # + # @return [TimestampWithOffset] the local time when this transition causes + # the previous observance to end. + def local_end_at + TimestampWithOffset.new(@timestamp_value, 0, @previous_offset.observed_utc_offset).set_timezone_offset(@previous_offset) + end + + # Returns a {TimestampWithOffset} instance representing the local time when + # this transition causes the next observance to start (calculated from {at} + # using {offset}). + # + # To obtain the result as a `Time` or `DateTime`, call either + # {TimestampWithOffset#to_time to_time} or {TimestampWithOffset#to_datetime + # to_datetime} on the {TimestampWithOffset} instance that is returned. + # + # @return [TimestampWithOffset] the local time when this transition causes + # the next observance to start. + def local_start_at + TimestampWithOffset.new(@timestamp_value, 0, @offset.observed_utc_offset).set_timezone_offset(@offset) + end + + # Determines if this {TimezoneTransition} is equal to another instance. + # + # @param tti [Object] the instance to test for equality. + # @return [Boolean] `true` if `tti` is a {TimezoneTransition} with the same + # {offset}, {previous_offset} and {timestamp_value} as this + # {TimezoneTransition}, otherwise `false`. + def ==(tti) + tti.kind_of?(TimezoneTransition) && + offset == tti.offset && previous_offset == tti.previous_offset && timestamp_value == tti.timestamp_value + end + alias eql? == + + # @return [Integer] a hash based on {offset}, {previous_offset} and + # {timestamp_value}. + def hash + [@offset, @previous_offset, @timestamp_value].hash + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transition_rule.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transition_rule.rb new file mode 100644 index 0000000..58ef568 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transition_rule.rb @@ -0,0 +1,455 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Base class for rules definining the transition between standard and daylight + # savings time. + # + # @abstract + # @private + class TransitionRule #:nodoc: + # Returns the number of seconds after midnight local time on the day + # identified by the rule at which the transition occurs. Can be negative to + # denote a time on the prior day. Can be greater than or equal to 86,400 to + # denote a time of the following day. + # + # @return [Integer] the time in seconds after midnight local time at which + # the transition occurs. + attr_reader :transition_at + + # Initializes a new {TransitionRule}. + # + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + def initialize(transition_at) + raise ArgumentError, 'Invalid transition_at' unless transition_at.kind_of?(Integer) + @transition_at = transition_at + end + + # Calculates the time of the transition from a given offset on a given year. + # + # @param offset [TimezoneOffset] the current offset at the time the rule + # will transition. + # @param year [Integer] the year in which the transition occurs (local + # time). + # @return [TimestampWithOffset] the time at which the transition occurs. + def at(offset, year) + day = get_day(offset, year) + TimestampWithOffset.set_timezone_offset(Timestamp.for(day + @transition_at), offset) + end + + # Determines if this {TransitionRule} is equal to another instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {TransitionRule} with the same + # {transition_at} as this {TransitionRule}, otherwise `false`. + def ==(r) + r.kind_of?(TransitionRule) && @transition_at == r.transition_at + end + alias eql? == + + # @return [Integer] a hash based on {hash_args} (defaulting to + # {transition_at}). + def hash + hash_args.hash + end + + protected + + # @return [Array] an `Array` of parameters that will influence the output of + # {hash}. + def hash_args + [@transition_at] + end + end + private_constant :TransitionRule + + # A base class for transition rules that activate based on an integer day of + # the year. + # + # @abstract + # @private + class DayOfYearTransitionRule < TransitionRule #:nodoc: + # Initializes a new {DayOfYearTransitionRule}. + # + # @param day [Integer] the day of the year on which the transition occurs. + # The precise meaning is defined by subclasses. + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `day` is not an `Integer`. + def initialize(day, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid day' unless day.kind_of?(Integer) + @seconds = day * 86400 + end + + # Determines if this {DayOfYearTransitionRule} is equal to another instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {DayOfYearTransitionRule} with the + # same {transition_at} and day as this {DayOfYearTransitionRule}, + # otherwise `false`. + def ==(r) + super(r) && r.kind_of?(DayOfYearTransitionRule) && @seconds == r.seconds + end + alias eql? == + + protected + + # @return [Integer] the day multipled by the number of seconds in a day. + attr_reader :seconds + + # (see TransitionRule#hash_args) + def hash_args + [@seconds] + super + end + end + private_constant :DayOfYearTransitionRule + + # Defines transitions that occur on the zero-based nth day of the year. + # + # Day 0 is 1 January. + # + # Leap days are counted. Day 59 will be 29 February on a leap year and 1 March + # on a non-leap year. Day 365 will be 31 December on a leap year and 1 January + # the following year on a non-leap year. + # + # @private + class AbsoluteDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # Initializes a new {AbsoluteDayOfYearTransitionRule}. + # + # @param day [Integer] the zero-based day of the year on which the + # transition occurs (0 to 365 inclusive). + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `day` is not an `Integer`. + # @raise [ArgumentError] if `day` is less than 0 or greater than 365. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 0 && day <= 365 + end + + # @return [Boolean] `true` if the day specified by this transition is the + # first in the year (a day number of 0), otherwise `false`. + def is_always_first_day_of_year? + seconds == 0 + end + + # @return [Boolean] `false`. + def is_always_last_day_of_year? + false + end + + # Determines if this {AbsoluteDayOfYearTransitionRule} is equal to another + # instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {AbsoluteDayOfYearTransitionRule} + # with the same {transition_at} and day as this + # {AbsoluteDayOfYearTransitionRule}, otherwise `false`. + def ==(r) + super(r) && r.kind_of?(AbsoluteDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a `Time` representing midnight local time on the day specified by + # the rule for the given offset and year. + # + # @param offset [TimezoneOffset] the current offset at the time of the + # transition. + # @param year [Integer] the year in which the transition occurs. + # @return [Time] midnight local time on the day specified by the rule for + # the given offset and year. + def get_day(offset, year) + Time.new(year, 1, 1, 0, 0, 0, offset.observed_utc_offset) + seconds + end + + # (see TransitionRule#hash_args) + def hash_args + [AbsoluteDayOfYearTransitionRule] + super + end + end + + # Defines transitions that occur on the one-based nth Julian day of the year. + # + # Leap days are not counted. Day 1 is 1 January. Day 60 is always 1 March. + # Day 365 is always 31 December. + # + # @private + class JulianDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc: + # The 60 days in seconds. + LEAP = 60 * 86400 + private_constant :LEAP + + # The length of a non-leap year in seconds. + YEAR = 365 * 86400 + private_constant :YEAR + + # Initializes a new {JulianDayOfYearTransitionRule}. + # + # @param day [Integer] the one-based Julian day of the year on which the + # transition occurs (1 to 365 inclusive). + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `day` is not an `Integer`. + # @raise [ArgumentError] if `day` is less than 1 or greater than 365. + def initialize(day, transition_at = 0) + super(day, transition_at) + raise ArgumentError, 'Invalid day' unless day >= 1 && day <= 365 + end + + # @return [Boolean] `true` if the day specified by this transition is the + # first in the year (a day number of 1), otherwise `false`. + def is_always_first_day_of_year? + seconds == 86400 + end + + # @return [Boolean] `true` if the day specified by this transition is the + # last in the year (a day number of 365), otherwise `false`. + def is_always_last_day_of_year? + seconds == YEAR + end + + # Determines if this {JulianDayOfYearTransitionRule} is equal to another + # instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {JulianDayOfYearTransitionRule} with + # the same {transition_at} and day as this + # {JulianDayOfYearTransitionRule}, otherwise `false`. + def ==(r) + super(r) && r.kind_of?(JulianDayOfYearTransitionRule) + end + alias eql? == + + protected + + # Returns a `Time` representing midnight local time on the day specified by + # the rule for the given offset and year. + # + # @param offset [TimezoneOffset] the current offset at the time of the + # transition. + # @param year [Integer] the year in which the transition occurs. + # @return [Time] midnight local time on the day specified by the rule for + # the given offset and year. + def get_day(offset, year) + # Returns 1 March on non-leap years. + leap = Time.new(year, 2, 29, 0, 0, 0, offset.observed_utc_offset) + diff = seconds - LEAP + diff += 86400 if diff >= 0 && leap.mday == 29 + leap + diff + end + + # (see TransitionRule#hash_args) + def hash_args + [JulianDayOfYearTransitionRule] + super + end + end + private_constant :JulianDayOfYearTransitionRule + + # A base class for rules that transition on a particular day of week of a + # given week (subclasses specify which week of the month). + # + # @abstract + # @private + class DayOfWeekTransitionRule < TransitionRule #:nodoc: + # Initializes a new {DayOfWeekTransitionRule}. + # + # @param month [Integer] the month of the year when the transition occurs. + # @param day_of_week [Integer] the day of the week when the transition + # occurs. 0 is Sunday, 6 is Saturday. + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `month` is not an `Integer`. + # @raise [ArgumentError] if `month` is less than 1 or greater than 12. + # @raise [ArgumentError] if `day_of_week` is not an `Integer`. + # @raise [ArgumentError] if `day_of_week` is less than 0 or greater than 6. + def initialize(month, day_of_week, transition_at) + super(transition_at) + raise ArgumentError, 'Invalid month' unless month.kind_of?(Integer) && month >= 1 && month <= 12 + raise ArgumentError, 'Invalid day_of_week' unless day_of_week.kind_of?(Integer) && day_of_week >= 0 && day_of_week <= 6 + @month = month + @day_of_week = day_of_week + end + + # @return [Boolean] `false`. + def is_always_first_day_of_year? + false + end + + # @return [Boolean] `false`. + def is_always_last_day_of_year? + false + end + + # Determines if this {DayOfWeekTransitionRule} is equal to another + # instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {DayOfWeekTransitionRule} with the + # same {transition_at}, month and day of week as this + # {DayOfWeekTransitionRule}, otherwise `false`. + def ==(r) + super(r) && r.kind_of?(DayOfWeekTransitionRule) && @month == r.month && @day_of_week == r.day_of_week + end + alias eql? == + + protected + + # @return [Integer] the month of the year (1 to 12). + attr_reader :month + + # @return [Integer] the day of the week (0 to 6 for Sunday to Monday). + attr_reader :day_of_week + + # (see TransitionRule#hash_args) + def hash_args + [@month, @day_of_week] + super + end + end + private_constant :DayOfWeekTransitionRule + + # A rule that transitions on the nth occurrence of a particular day of week + # of a calendar month. + # + # @private + class DayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new {DayOfMonthTransitionRule}. + # + # @param month [Integer] the month of the year when the transition occurs. + # @param week [Integer] the week of the month when the transition occurs (1 + # to 4). + # @param day_of_week [Integer] the day of the week when the transition + # occurs. 0 is Sunday, 6 is Saturday. + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `month` is not an `Integer`. + # @raise [ArgumentError] if `month` is less than 1 or greater than 12. + # @raise [ArgumentError] if `week` is not an `Integer`. + # @raise [ArgumentError] if `week` is less than 1 or greater than 4. + # @raise [ArgumentError] if `day_of_week` is not an `Integer`. + # @raise [ArgumentError] if `day_of_week` is less than 0 or greater than 6. + def initialize(month, week, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + raise ArgumentError, 'Invalid week' unless week.kind_of?(Integer) && week >= 1 && week <= 4 + @offset_start = (week - 1) * 7 + 1 + end + + # Determines if this {DayOfMonthTransitionRule} is equal to another + # instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {DayOfMonthTransitionRule} with the + # same {transition_at}, month, week and day of week as this + # {DayOfMonthTransitionRule}, otherwise `false`. + def ==(r) + super(r) && r.kind_of?(DayOfMonthTransitionRule) && @offset_start == r.offset_start + end + alias eql? == + + protected + + # @return [Integer] the day the week starts on for a month starting on a + # Sunday. + attr_reader :offset_start + + # Returns a `Time` representing midnight local time on the day specified by + # the rule for the given offset and year. + # + # @param offset [TimezoneOffset] the current offset at the time of the + # transition. + # @param year [Integer] the year in which the transition occurs. + # @return [Time] midnight local time on the day specified by the rule for + # the given offset and year. + def get_day(offset, year) + candidate = Time.new(year, month, @offset_start, 0, 0, 0, offset.observed_utc_offset) + diff = day_of_week - candidate.wday + + if diff < 0 + candidate + (7 + diff) * 86400 + elsif diff > 0 + candidate + diff * 86400 + else + candidate + end + end + + # (see TransitionRule#hash_args) + def hash_args + [@offset_start] + super + end + end + private_constant :DayOfMonthTransitionRule + + # A rule that transitions on the last occurrence of a particular day of week + # of a calendar month. + # + # @private + class LastDayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc: + # Initializes a new {LastDayOfMonthTransitionRule}. + # + # @param month [Integer] the month of the year when the transition occurs. + # @param day_of_week [Integer] the day of the week when the transition + # occurs. 0 is Sunday, 6 is Saturday. + # @param transition_at [Integer] the time in seconds after midnight local + # time at which the transition occurs. + # @raise [ArgumentError] if `transition_at` is not an `Integer`. + # @raise [ArgumentError] if `month` is not an `Integer`. + # @raise [ArgumentError] if `month` is less than 1 or greater than 12. + # @raise [ArgumentError] if `day_of_week` is not an `Integer`. + # @raise [ArgumentError] if `day_of_week` is less than 0 or greater than 6. + def initialize(month, day_of_week, transition_at = 0) + super(month, day_of_week, transition_at) + end + + # Determines if this {LastDayOfMonthTransitionRule} is equal to another + # instance. + # + # @param r [Object] the instance to test for equality. + # @return [Boolean] `true` if `r` is a {LastDayOfMonthTransitionRule} with + # the same {transition_at}, month and day of week as this + # {LastDayOfMonthTransitionRule}, otherwise `false`. + def ==(r) + super(r) && r.kind_of?(LastDayOfMonthTransitionRule) + end + alias eql? == + + protected + + # Returns a `Time` representing midnight local time on the day specified by + # the rule for the given offset and year. + # + # @param offset [TimezoneOffset] the current offset at the time of the + # transition. + # @param year [Integer] the year in which the transition occurs. + # @return [Time] midnight local time on the day specified by the rule for + # the given offset and year. + def get_day(offset, year) + next_month = month + 1 + if next_month == 13 + year += 1 + next_month = 1 + end + + candidate = Time.new(year, next_month, 1, 0, 0, 0, offset.observed_utc_offset) - 86400 + diff = candidate.wday - day_of_week + + if diff < 0 + candidate - (diff + 7) * 86400 + elsif diff > 0 + candidate - diff * 86400 + else + candidate + end + end + end + private_constant :LastDayOfMonthTransitionRule +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transitions_timezone_period.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transitions_timezone_period.rb new file mode 100644 index 0000000..aea87f3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/transitions_timezone_period.rb @@ -0,0 +1,63 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Represents a period of time in a time zone where the same offset from UTC + # applies. The period of time is bounded at at least one end, either having a + # start transition, end transition or both start and end transitions. + class TransitionsTimezonePeriod < TimezonePeriod + # @return [TimezoneTransition] the transition that defines the start of this + # {TimezonePeriod} (`nil` if the start is unbounded). + attr_reader :start_transition + + # @return [TimezoneTransition] the transition that defines the end of this + # {TimezonePeriod} (`nil` if the end is unbounded). + attr_reader :end_transition + + # Initializes a {TransitionsTimezonePeriod}. + # + # At least one of `start_transition` and `end_transition` must be specified. + # + # @param start_transition [TimezoneTransition] the transition that defines + # the start of the period, or `nil` if the start is unbounded. + # @param end_transition [TimezoneTransition] the transition that defines the + # end of the period, or `nil` if the end is unbounded. + # @raise [ArgumentError] if both `start_transition` and `end_transition` are + # `nil`. + def initialize(start_transition, end_transition) + if start_transition + super(start_transition.offset) + elsif end_transition + super(end_transition.previous_offset) + else + raise ArgumentError, 'At least one of start_transition and end_transition must be specified' + end + + @start_transition = start_transition + @end_transition = end_transition + end + + # Determines if this {TransitionsTimezonePeriod} is equal to another + # instance. + # + # @param p [Object] the instance to test for equality. + # @return [Boolean] `true` if `p` is a {TransitionsTimezonePeriod} with the + # same {offset}, {start_transition} and {end_transition}, otherwise + # `false`. + def ==(p) + p.kind_of?(TransitionsTimezonePeriod) && start_transition == p.start_transition && end_transition == p.end_transition + end + alias eql? == + + # @return [Integer] a hash based on {start_transition} and {end_transition}. + def hash + [@start_transition, @end_transition].hash + end + + # @return [String] the internal object state as a programmer-readable + # `String`. + def inspect + "#<#{self.class}: @start_transition=#{@start_transition.inspect}, @end_transition=#{@end_transition.inspect}>" + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/untaint_ext.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/untaint_ext.rb new file mode 100644 index 0000000..4e8d0c0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/untaint_ext.rb @@ -0,0 +1,18 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # Object#untaint is deprecated in Ruby >= 2.7 and will be removed in 3.2. + # UntaintExt adds a refinement to make Object#untaint a no-op and avoid the + # warning. + # + # @private + module UntaintExt # :nodoc: + refine Object do + def untaint + self + end + end + end + private_constant :UntaintExt +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/version.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/version.rb new file mode 100644 index 0000000..6538121 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/version.rb @@ -0,0 +1,7 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # The TZInfo version number. + VERSION = '2.0.4' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/with_offset.rb b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/with_offset.rb new file mode 100644 index 0000000..3f2d9b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/tzinfo-2.0.4/lib/tzinfo/with_offset.rb @@ -0,0 +1,61 @@ +# encoding: UTF-8 +# frozen_string_literal: true + +module TZInfo + # The {WithOffset} module is included in {TimeWithOffset}, + # {DateTimeWithOffset} and {TimestampWithOffset}. It provides an override for + # the {strftime} method that handles expanding the `%Z` directive according to + # the {TimezoneOffset#abbreviation abbreviation} of the {TimezoneOffset} + # associated with a local time. + module WithOffset + # Overrides the `Time`, `DateTime` or {Timestamp} version of `strftime`, + # replacing `%Z` with the {TimezoneOffset#abbreviation abbreviation} of the + # associated {TimezoneOffset}. If there is no associated offset, `%Z` is + # expanded by the base class instead. + # + # All the format directives handled by the base class are supported. + # + # @param format [String] the format string. + # @return [String] the formatted time. + # @raise [ArgumentError] if `format` is `nil`. + def strftime(format) + raise ArgumentError, 'format must be specified' unless format + + if_timezone_offset do |o| + abbreviation = nil + + format = format.gsub(/%(%*)Z/) do + if $1.length.odd? + # Return %%Z so the real strftime treats it as a literal %Z too. + "#$1%Z" + else + "#$1#{abbreviation ||= o.abbreviation.gsub(/%/, '%%')}" + end + end + end + + super + end + + protected + + # Performs a calculation if there is an associated {TimezoneOffset}. + # + # @param result [Object] a result value that can be manipulated by the block + # if there is an associated {TimezoneOffset}. + # @yield [period, result] if there is an associated {TimezoneOffset}, the + # block is yielded to in order to calculate the method result. + # @yieldparam period [TimezoneOffset] the associated {TimezoneOffset}. + # @yieldparam result [Object] the `result` parameter. + # @yieldreturn [Object] the result of the calculation performed if there is + # an associated {TimezoneOffset}. + # @return [Object] the result of the block if there is an associated + # {TimezoneOffset}, otherwise the `result` parameter. + # + # @private + def if_timezone_offset(result = nil) #:nodoc: + to = timezone_offset + to ? yield(to, result) : result + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/LICENSE new file mode 100644 index 0000000..f060fab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Eloy Durán <eloy.de.enige@gmail.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/README.md b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/README.md new file mode 100644 index 0000000..60412a9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/README.md @@ -0,0 +1,94 @@ +# Xcodeproj + +[![Build Status](https://img.shields.io/travis/CocoaPods/Xcodeproj/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Xcodeproj) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/Xcodeproj.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Xcodeproj) +[![Code Climate](https://img.shields.io/codeclimate/maintainability/CocoaPods/Xcodeproj.svg?style=flat&label=code%20climate)](https://codeclimate.com/github/CocoaPods/Xcodeproj) + +Xcodeproj lets you create and modify Xcode projects from [Ruby][ruby]. +Script boring management tasks or build Xcode-friendly libraries. Also includes +support for Xcode workspaces (`.xcworkspace`), configuration files (`.xcconfig`) and +Xcode Scheme files (`.xcscheme`). + +It is used in [CocoaPods](https://github.com/CocoaPods/CocoaPods) to create a +a collection of supplemental libraries or frameworks, for all platforms Xcode supports. + +The API reference can be found [here](http://www.rubydoc.info/gems/xcodeproj). + +## Installing Xcodeproj + +Xcodeproj itself installs through RubyGems, the Ruby package manager. Install it +by performing the following command: + + $ [sudo] gem install xcodeproj + +## Quickstart + +To begin editing an xcodeproj file start by opening it as an Xcodeproj with: + +```ruby +require 'xcodeproj' +project_path = '/your_path/your_project.xcodeproj' +project = Xcodeproj::Project.open(project_path) +``` + +#### Some Small Examples To Get You Started + +> Look through all targets + +```ruby +project.targets.each do |target| + puts target.name +end +``` + +> Get all source files for a target + +```ruby +target = project.targets.first +files = target.source_build_phase.files.to_a.map do |pbx_build_file| + pbx_build_file.file_ref.real_path.to_s + +end.select do |path| + path.end_with?(".m", ".mm", ".swift") + +end.select do |path| + File.exists?(path) +end +``` + +> Set a specific build configuration to all targets + +```ruby +project.targets.each do |target| + target.build_configurations.each do |config| + config.build_settings['MY_CUSTOM_FLAG'] ||= 'TRUE' + end +end +``` + +## Command Line Tool + +Installing the Xcodeproj gem will also install a command-line tool `xcodeproj` which you can +use to generate project diffs, target diffs, output all configurations and show a YAML representation. + +For more information consult `xcodeproj --help`. + +## Collaborate + +All Xcodeproj development happens on [GitHub][xcodeproj]. Contributing patches +is really easy and gratifying. + +Follow [@CocoaPods][twitter] to get up to date information about what's +going on in the CocoaPods world. + + +## LICENSE + +These works are available under the MIT license. See the [LICENSE][license] file +for more info. + +[twitter]: http://twitter.com/CocoaPods +[ruby]: http://www.ruby-lang.org/en/ +[xcodeproj]: https://github.com/cocoapods/xcodeproj +[tickets]: https://github.com/cocoapods/xcodeproj/issues +[license]: LICENSE diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/bin/xcodeproj b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/bin/xcodeproj new file mode 100755 index 0000000..5d163fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/bin/xcodeproj @@ -0,0 +1,10 @@ +#!/usr/bin/env ruby + +if $PROGRAM_NAME == __FILE__ + ENV['BUNDLE_GEMFILE'] = File.expand_path('../../Gemfile', __FILE__) + require 'bundler/setup' +end + +require 'xcodeproj' + +Xcodeproj::Command.run(ARGV) diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj.rb new file mode 100644 index 0000000..3a179d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj.rb @@ -0,0 +1,29 @@ +module Xcodeproj + require 'pathname' + require 'claide' + require 'colored2' + + class PlainInformative < StandardError + include CLAide::InformativeError + end + + class Informative < PlainInformative + def message + super !~ /\[!\]/ ? "[!] #{super}\n".red : super + end + end + + require 'xcodeproj/gem_version' + require 'xcodeproj/user_interface' + + autoload :Command, 'xcodeproj/command' + autoload :Config, 'xcodeproj/config' + autoload :Constants, 'xcodeproj/constants' + autoload :Differ, 'xcodeproj/differ' + autoload :Helper, 'xcodeproj/helper' + autoload :Plist, 'xcodeproj/plist' + autoload :Project, 'xcodeproj/project' + autoload :Workspace, 'xcodeproj/workspace' + autoload :XCScheme, 'xcodeproj/scheme' + autoload :XcodebuildHelper, 'xcodeproj/xcodebuild_helper' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command.rb new file mode 100644 index 0000000..9087500 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command.rb @@ -0,0 +1,63 @@ +module Xcodeproj + require 'colored2' + require 'claide' + + class Command < CLAide::Command + require 'xcodeproj/command/config_dump' + require 'xcodeproj/command/target_diff' + require 'xcodeproj/command/project_diff' + require 'xcodeproj/command/show' + require 'xcodeproj/command/sort' + + self.abstract_command = true + self.command = 'xcodeproj' + self.version = VERSION + self.description = 'Xcodeproj lets you create and modify Xcode projects from Ruby.' + self.plugin_prefixes = %w(claide xcodeproj) + + def initialize(argv) + super + unless self.ansi_output? + Colored2.disable! + String.send(:define_method, :colorize) { |string, _| string } + end + end + + private + + def xcodeproj_path + unless @xcodeproj_path + projects = Dir.glob('*.xcodeproj') + if projects.size == 1 + xcodeproj_path = projects.first + elsif projects.size > 1 + raise Informative, 'There are more than one Xcode project documents ' \ + 'in the current working directory. Please specify ' \ + 'which to use with the `--project` option.' + else + raise Informative, 'No Xcode project document found in the current ' \ + 'working directory. Please specify which to use ' \ + 'with the `--project` option.' + end + @xcodeproj_path = Pathname.new(xcodeproj_path).expand_path + end + @xcodeproj_path + end + + def open_project!(*paths) + if paths.empty? + [xcodeproj] + else + paths.map { |path| Project.open(path) } + end + end + + def xcodeproj_path=(path) + @xcodeproj_path = path && Pathname.new(path).expand_path + end + + def xcodeproj + @xcodeproj ||= Project.open(xcodeproj_path) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/config_dump.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/config_dump.rb new file mode 100644 index 0000000..dac7645 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/config_dump.rb @@ -0,0 +1,91 @@ +module Xcodeproj + class Command + class ConfigDump < Command + self.description = <<-eos + Dumps the build settings of all project targets for all configurations in + directories named by the target in given output directory. + + It extracts common build settings in a per target base.xcconfig file. + + If no `PROJECT` is specified then the current work directory is searched + for one. + + If no `OUTPUT` is specified then the project directory will be used. + eos + + self.summary = 'Dumps the build settings of all project targets for ' \ + 'all configurations in directories named by the target ' \ + 'in the given output directory.' + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + CLAide::Argument.new('OUTPUT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + @output_path = Pathname(argv.shift_argument || '.') + + super + end + + def validate! + super + + raise Informative, 'The output path must be a directory.' unless @output_path.directory? + open_project! + end + + def run + dump_all_configs(xcodeproj, 'Project') + + xcodeproj.targets.each do |target| + dump_all_configs(target, target.name) + end + end + + def dump_all_configs(configurable, name) + path = Pathname(name) + + # Dump base configuration to file + base_settings = extract_common_settings!(configurable.build_configurations) + base_file_path = path + "#{name}_base.xcconfig" + dump_config_to_file(base_settings, base_file_path) + + # Dump each configuration to file + configurable.build_configurations.each do |config| + settings = config.build_settings + dump_config_to_file(settings, path + "#{name}_#{config.name.downcase}.xcconfig", [base_file_path]) + end + end + + def extract_common_settings!(build_configurations) + # Grasp all common build settings + all_build_settings = build_configurations.map(&:build_settings) + common_build_settings = all_build_settings.reduce do |settings, config_build_settings| + settings.select { |key, value| value == config_build_settings[key] } + end + + # Remove all common build settings from each configuration specific build settings + build_configurations.each do |config| + config.build_settings.reject! { |key| !common_build_settings[key].nil? } + end + + common_build_settings + end + + def dump_config_to_file(settings, file_path, includes = []) + dir = @output_path + file_path + '..' + dir.mkpath + + settings = Hash[settings.map do |k, v| + [k, Array(v).join(' ')] + end] + + config = Config.new(settings) + config.includes = includes + config.save_as(@output_path + file_path) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/project_diff.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/project_diff.rb new file mode 100644 index 0000000..2f5d553 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/project_diff.rb @@ -0,0 +1,56 @@ +module Xcodeproj + class Command + class ProjectDiff < Command + self.summary = 'Shows the difference between two projects' + + self.description = summary + <<-EOS.gsub(/ {8}/, '') + + It shows the difference in a UUID agnostic fashion. + + To reduce the noise (and to simplify implementation) differences in the + order of arrays are ignored. + EOS + + def self.options + [ + ['--ignore=KEY', 'A key to ignore in the comparison. Can be specified multiple times.'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('PROJECT1', true), + CLAide::Argument.new('PROJECT2', true), + ] + + def initialize(argv) + @path_project1 = argv.shift_argument + @path_project2 = argv.shift_argument + @keys_to_ignore = argv.all_options('ignore') + super + end + + def validate! + super + @project1, @project2 = open_project!(@path_project1, @path_project2) + end + + def run + hash_1 = @project1.to_tree_hash.dup + hash_2 = @project2.to_tree_hash.dup + @keys_to_ignore.each do |key| + Differ.clean_hash!(hash_1, key) + Differ.clean_hash!(hash_2, key) + end + + diff = Differ.project_diff(hash_1, hash_2, @path_project1, @path_project2) + + require 'yaml' + yaml = diff.to_yaml + yaml.gsub!(@path_project1, @path_project1.cyan) + yaml.gsub!(@path_project2, @path_project2.magenta) + yaml.gsub!(':diff:', 'diff:'.yellow) + puts yaml + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/show.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/show.rb new file mode 100644 index 0000000..5c36c06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/show.rb @@ -0,0 +1,60 @@ +module Xcodeproj + class Command + class Show < Command + self.summary = 'Shows an overview of a project in a YAML representation.' + + def self.options + [ + ['--format=[hash|tree_hash|raw]', 'YAML output format'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + @output_format = argv.option('format') + @output_format &&= @output_format.to_sym + super + end + + def validate + super + unless [nil, :hash, :tree_hash, :raw].include?(@output_format) + help! "Unknown format `#{@output_format}`" + end + open_project! + end + + def run + require 'yaml' + + if @output_format + case @output_format + when :hash + puts xcodeproj.to_hash.to_yaml + when :tree_hash + puts xcodeproj.to_tree_hash.to_yaml + when :raw + puts xcodeproj.to_yaml + end + return + end + + pretty_print = xcodeproj.pretty_print + sections = [] + pretty_print.each do |key, value| + section = key.green + yaml = value.to_yaml + yaml.gsub!(/^---$/, '') + yaml.gsub!(/^-/, "\n-") + yaml.prepend(section) + sections << yaml + end + puts sections * "\n\n" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/sort.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/sort.rb new file mode 100644 index 0000000..77afc7c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/sort.rb @@ -0,0 +1,33 @@ +module Xcodeproj + class Command + class Sort < Command + self.description = <<-eos + Sorts the given project. + + If no `PROJECT' is specified then the current work directory is searched for one. + eos + + self.summary = 'Sorts the given project.' + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + super + end + + def validate! + super + open_project! + end + + def run + xcodeproj.sort + xcodeproj.save + puts "The `#{File.basename(xcodeproj_path)}` project was sorted" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/target_diff.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/target_diff.rb new file mode 100644 index 0000000..7390bee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/command/target_diff.rb @@ -0,0 +1,43 @@ +module Xcodeproj + class Command + class TargetDiff < Command + self.summary = 'Shows the difference between two targets' + + def self.options + [ + ['--project PATH', 'The Xcode project document to use.'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('TARGET1', true), + CLAide::Argument.new('TARGET2', true), + ] + + def initialize(argv) + @target1 = argv.shift_argument + @target2 = argv.shift_argument + self.xcodeproj_path = argv.option('--project') + super + end + + def validate! + super + open_project! + end + + def run + require 'yaml' + differ = Helper::TargetDiff.new(xcodeproj, @target1, @target2) + files = differ.new_source_build_files.map do |build_file| + { + 'Name' => build_file.file_ref.name, + 'Path' => build_file.file_ref.path, + 'Build settings' => build_file.settings, + } + end + puts files.to_yaml + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config.rb new file mode 100644 index 0000000..671f881 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config.rb @@ -0,0 +1,376 @@ +require 'shellwords' +require 'xcodeproj/config/other_linker_flags_parser' + +module Xcodeproj + # This class holds the data for a Xcode build settings file (xcconfig) and + # provides support for serialization. + # + class Config + require 'set' + + KEY_VALUE_PATTERN = / + ( + [^=\[]+ # Any char, but not an assignment operator + # or subscript (non-greedy) + (?: # One or multiple conditional subscripts + \[ + [^\]]* # The subscript key + (?: + = # The subscript comparison operator + [^\]]* # The subscript value + )? + \] + )* + ) + \s* # Whitespaces after the key (needed because subscripts + # always end with ']') + = # The assignment operator + (.*) # The value + /x + private_constant :KEY_VALUE_PATTERN + + INHERITED = %w($(inherited) ${inherited}).freeze + private_constant :INHERITED + + INHERITED_REGEXP = Regexp.union(INHERITED) + private_constant :INHERITED_REGEXP + + # @return [Hash{String => String}] The attributes of the settings file + # excluding frameworks, weak_framework and libraries. + # + attr_accessor :attributes + + # @return [Hash{Symbol => Set<String>}] The other linker flags by key. + # Xcodeproj handles them in a dedicated way to prevent duplication + # of the libraries and of the frameworks. + # + attr_accessor :other_linker_flags + + # @return [Array] The list of the configuration files included by this + # configuration file (`#include "SomeConfig"`). + # + attr_accessor :includes + + # @param [Hash, File, String] xcconfig_hash_or_file + # The initial data. + # + def initialize(xcconfig_hash_or_file = {}) + @attributes = {} + @includes = [] + @other_linker_flags = {} + [:simple, :frameworks, :weak_frameworks, :libraries, :force_load].each do |key| + @other_linker_flags[key] = Set.new + end + merge!(extract_hash(xcconfig_hash_or_file)) + end + + def inspect + to_hash.inspect + end + + def ==(other) + other.attributes == attributes && other.other_linker_flags == other_linker_flags && other.includes == includes + end + + public + + # @!group Serialization + #-------------------------------------------------------------------------# + + # Sorts the internal data by setting name and serializes it in the xcconfig + # format. + # + # @example + # + # config = Config.new('PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2') + # config.to_s # => "OTHER_LDFLAGS = -lxml2\nPODS_ROOT = \"$(SRCROOT)/Pods\"" + # + # @return [String] The serialized internal data. + # + def to_s(prefix = nil) + include_lines = includes.map { |path| "#include \"#{normalized_xcconfig_path(path)}\"" } + settings = to_hash(prefix).sort_by(&:first).map { |k, v| "#{k} = #{v}".strip } + (include_lines + settings).join("\n") << "\n" + end + + # Writes the serialized representation of the internal data to the given + # path. + # + # @param [Pathname] pathname + # The file where the data should be written to. + # + # @return [void] + # + def save_as(pathname, prefix = nil) + if File.exist?(pathname) + return if Config.new(pathname) == self + end + + pathname.open('w') { |file| file << to_s(prefix) } + end + + # The hash representation of the xcconfig. The hash includes the + # frameworks, the weak frameworks, the libraries and the simple other + # linker flags in the `Other Linker Flags` (`OTHER_LDFLAGS`). + # + # @note All the values are sorted to have a consistent output in Ruby + # 1.8.7. + # + # @return [Hash] The hash representation + # + def to_hash(prefix = nil) + list = [] + list += other_linker_flags[:simple].to_a.sort + modifiers = { + :frameworks => '-framework ', + :weak_frameworks => '-weak_framework ', + :libraries => '-l', + :force_load => '-force_load', + } + [:libraries, :frameworks, :weak_frameworks, :force_load].each do |key| + modifier = modifiers[key] + sorted = other_linker_flags[key].to_a.sort + if key == :force_load + list += sorted.map { |l| %(#{modifier} #{l}) } + else + list += sorted.map { |l| %(#{modifier}"#{l}") } + end + end + + result = attributes.dup + result['OTHER_LDFLAGS'] = list.join(' ') unless list.empty? + result.reject! { |_, v| INHERITED.any? { |i| i == v.to_s.strip } } + + result = @includes.map do |incl| + path = File.expand_path(incl, @filepath.dirname) + if File.readable? path + Xcodeproj::Config.new(path).to_hash + else + {} + end + end.inject(&:merge).merge(result) unless @filepath.nil? || @includes.empty? + + if prefix + Hash[result.map { |k, v| [prefix + k, v] }] + else + result + end + end + + alias_method :to_h, :to_hash + + # @return [Set<String>] The list of the frameworks required by this + # settings file. + # + def frameworks + other_linker_flags[:frameworks] + end + + # @return [Set<String>] The list of the *weak* frameworks required by + # this settings file. + # + def weak_frameworks + other_linker_flags[:weak_frameworks] + end + + # @return [Set<String>] The list of the libraries required by this + # settings file. + # + def libraries + other_linker_flags[:libraries] + end + + public + + # @!group Merging + #-------------------------------------------------------------------------# + + # Merges the given xcconfig representation in the receiver. + # + # @example + # + # config = Config.new('PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2') + # config.merge!('OTHER_LDFLAGS' => '-lz', 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers"') + # config.to_hash # => { 'PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2 -lz', 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers"' } + # + # @note If a key in the given hash already exists in the internal data + # then its value is appended. + # + # @param [Hash, Config] config + # The xcconfig representation to merge. + # + # @todo The logic to normalize an hash should be extracted and the + # initializer should not call this method. + # + # @return [void] + # + def merge!(xcconfig) + if xcconfig.is_a? Config + merge_attributes!(xcconfig.attributes) + other_linker_flags.keys.each do |key| + other_linker_flags[key].merge(xcconfig.other_linker_flags[key]) + end + else + merge_attributes!(xcconfig.to_hash) + if flags = attributes.delete('OTHER_LDFLAGS') + flags_by_key = OtherLinkerFlagsParser.parse(flags) + other_linker_flags.keys.each do |key| + other_linker_flags[key].merge(flags_by_key[key]) + end + end + end + end + alias_method :<<, :merge! + + # Creates a new #{Config} with the data of the receiver merged with the + # given xcconfig representation. + # + # @param [Hash, Config] config + # The xcconfig representation to merge. + # + # @return [Config] the new xcconfig. + # + def merge(config) + dup.tap { |x| x.merge!(config) } + end + + # @return [Config] A copy of the receiver. + # + def dup + Xcodeproj::Config.new(to_hash.dup) + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private Helpers + + # Returns a hash from the given argument reading it from disk if necessary. + # + # @param [String, Pathname, Hash] argument + # The source from where the hash should be extracted. + # + # @return [Hash] + # + def extract_hash(argument) + if argument.respond_to? :read + @filepath = Pathname.new(argument.to_path) + hash_from_file_content(argument.read) + elsif File.readable?(argument.to_s) + @filepath = Pathname.new(argument.to_s) + hash_from_file_content(File.read(argument)) + else + argument + end + end + + # Returns a hash from the string representation of an Xcconfig file. + # + # @param [String] string + # The string representation of an xcconfig file. + # + # @return [Hash] the hash containing the xcconfig data. + # + def hash_from_file_content(string) + hash = {} + string.split("\n").each do |line| + uncommented_line = strip_comment(line) + if include = extract_include(uncommented_line) + @includes.push normalized_xcconfig_path(include) + else + key, value = extract_key_value(uncommented_line) + next unless key + value.gsub!(INHERITED_REGEXP) { |m| hash.fetch(key, m) } + hash[key] = value + end + end + hash + end + + # Merges the given attributes hash while ensuring values are not duplicated. + # + # @param [Hash] attributes + # The attributes hash to merge into @attributes. + # + # @return [void] + # + def merge_attributes!(attributes) + @attributes.merge!(attributes) do |_, v1, v2| + v1 = v1.strip + v2 = v2.strip + v1_split = v1.shellsplit + v2_split = v2.shellsplit + if (v2_split - v1_split).empty? || v1_split.first(v2_split.size) == v2_split + v1 + elsif v2_split.first(v1_split.size) == v1_split + v2 + else + "#{v1} #{v2}" + end + end + end + + # Strips the comments from a line of an xcconfig string. + # + # @param [String] line + # the line to process. + # + # @return [String] the uncommented line. + # + def strip_comment(line) + line.partition('//').first + end + + # Returns the file included by a line of an xcconfig string if present. + # + # @param [String] line + # the line to process. + # + # @return [String] the included file. + # @return [Nil] if no include was found in the line. + # + def extract_include(line) + regexp = /#include\s*"(.+)"/ + match = line.match(regexp) + match[1] if match + end + + # Returns the key and the value described by the given line of an xcconfig. + # + # @param [String] line + # the line to process. + # + # @return [Array] A tuple where the first entry is the key and the second + # entry is the value. + # + def extract_key_value(line) + match = line.match(KEY_VALUE_PATTERN) + if match + key = match[1] + value = match[2] + [key.strip, value.strip] + else + [] + end + end + + # Normalizes the given path to an xcconfing file to be used in includes, + # appending the extension if necessary. + # + # @param [String] path + # The path of the file which will be included in the xcconfig. + # + # @return [String] The normalized path. + # + def normalized_xcconfig_path(path) + if File.extname(path) == '.xcconfig' + path + else + "#{path}.xcconfig" + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config/other_linker_flags_parser.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config/other_linker_flags_parser.rb new file mode 100644 index 0000000..9c94246 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/config/other_linker_flags_parser.rb @@ -0,0 +1,68 @@ +require 'shellwords' + +module Xcodeproj + class Config + # Parses other linker flags values. + # + module OtherLinkerFlagsParser + # @return [Hash{Symbol, Array[String]}] Splits the given + # other linker flags value by type. + # + # @param [String, Array] flags + # The other linker flags value. + # + def self.parse(flags) + result = { + :frameworks => [], + :weak_frameworks => [], + :libraries => [], + :simple => [], + :force_load => [], + } + + key = nil + if flags.is_a? String + flags = split(flags) + end + flags.each do |token| + case token + when '-framework' + key = :frameworks + when '-weak_framework' + key = :weak_frameworks + when '-l' + key = :libraries + when '-force_load' + key = :force_load + else + if key + result[key] << token + key = nil + else + result[:simple] << token + end + end + end + result + end + + # @return [Array<String>] Split the given other linker + # flags value, taking into account quoting and + # the fact that the `-l` flag might omit the + # space. + # + # @param [String] flags + # The other linker flags value. + # + def self.split(flags) + flags.strip.shellsplit.flat_map do |string| + if string =~ /\A-l.+/ + ['-l', string[2..-1]] + else + string + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/constants.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/constants.rb new file mode 100644 index 0000000..352b444 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/constants.rb @@ -0,0 +1,457 @@ +module Xcodeproj + # This modules groups all the constants known to Xcodeproj. + # + module Constants + # @return [String] The last known iOS SDK (stable). + # + LAST_KNOWN_IOS_SDK = '14.0' + + # @return [String] The last known OS X SDK (stable). + # + LAST_KNOWN_OSX_SDK = '10.15' + + # @return [String] The last known tvOS SDK (stable). + # + LAST_KNOWN_TVOS_SDK = '14.0' + + # @return [String] The last known watchOS SDK (stable). + # + LAST_KNOWN_WATCHOS_SDK = '7.0' + + # @return [String] The last known archive version to Xcodeproj. + # + LAST_KNOWN_ARCHIVE_VERSION = 1 + + # @return [String] The last known Swift version (stable). + # + LAST_KNOWN_SWIFT_VERSION = '5.0' + + # @return [String] The default object version for Xcodeproj. + # + DEFAULT_OBJECT_VERSION = 46 + + # @return [String] The last known object version to Xcodeproj. + # + LAST_KNOWN_OBJECT_VERSION = 54 + + # @return [String] The last known object version to Xcodeproj. + # + LAST_UPGRADE_CHECK = '1100' + + # @return [String] The last known object version to Xcodeproj. + # + LAST_SWIFT_UPGRADE_CHECK = '1100' + + # @return [String] The version of `.xcscheme` files supported by Xcodeproj + # + XCSCHEME_FORMAT_VERSION = '1.3' + + # @return [Hash] The all the known ISAs grouped by superclass. + # + KNOWN_ISAS = { + 'AbstractObject' => %w( + PBXBuildFile + AbstractBuildPhase + PBXBuildRule + XCBuildConfiguration + XCConfigurationList + PBXContainerItemProxy + PBXFileReference + PBXGroup + PBXProject + PBXTargetDependency + PBXReferenceProxy + AbstractTarget + ), + + 'AbstractBuildPhase' => %w( + PBXCopyFilesBuildPhase + PBXResourcesBuildPhase + PBXSourcesBuildPhase + PBXFrameworksBuildPhase + PBXHeadersBuildPhase + PBXShellScriptBuildPhase + ), + + 'AbstractTarget' => %w( + PBXNativeTarget + PBXAggregateTarget + PBXLegacyTarget + ), + + 'PBXGroup' => %w( + XCVersionGroup + PBXVariantGroup + ), + }.freeze + + # @return [Hash] The known file types corresponding to each extension. + # + FILE_TYPES_BY_EXTENSION = { + 'a' => 'archive.ar', + 'apns' => 'text', + 'app' => 'wrapper.application', + 'appex' => 'wrapper.app-extension', + 'bundle' => 'wrapper.plug-in', + 'cpp' => 'sourcecode.cpp.cpp', + 'dylib' => 'compiled.mach-o.dylib', + 'entitlements' => 'text.plist.entitlements', + 'framework' => 'wrapper.framework', + 'gif' => 'image.gif', + 'gpx' => 'text.xml', + 'h' => 'sourcecode.c.h', + 'hpp' => 'sourcecode.cpp.h', + 'm' => 'sourcecode.c.objc', + 'markdown' => 'text', + 'mdimporter' => 'wrapper.cfbundle', + 'modulemap' => 'sourcecode.module', + 'mov' => 'video.quicktime', + 'mp3' => 'audio.mp3', + 'octest' => 'wrapper.cfbundle', + 'pch' => 'sourcecode.c.h', + 'plist' => 'text.plist.xml', + 'png' => 'image.png', + 'sh' => 'text.script.sh', + 'sks' => 'file.sks', + 'storyboard' => 'file.storyboard', + 'strings' => 'text.plist.strings', + 'swift' => 'sourcecode.swift', + 'xcassets' => 'folder.assetcatalog', + 'xcconfig' => 'text.xcconfig', + 'xcdatamodel' => 'wrapper.xcdatamodel', + 'xcodeproj' => 'wrapper.pb-project', + 'xctest' => 'wrapper.cfbundle', + 'xib' => 'file.xib', + 'zip' => 'archive.zip', + }.freeze + + # @return [Hash] The compatibility version string for different object versions. + # + COMPATIBILITY_VERSION_BY_OBJECT_VERSION = { + 54 => 'Xcode 12.0', + 53 => 'Xcode 11.4', + 52 => 'Xcode 11.0', + 51 => 'Xcode 10.0', + 50 => 'Xcode 9.3', + 48 => 'Xcode 8.0', + 47 => 'Xcode 6.3', + 46 => 'Xcode 3.2', + 45 => 'Xcode 3.1', + }.freeze + + # @return [Hash] The uniform type identifier of various product types. + # + PRODUCT_TYPE_UTI = { + :application => 'com.apple.product-type.application', + :application_on_demand_install_capable => 'com.apple.product-type.application.on-demand-install-capable', + :framework => 'com.apple.product-type.framework', + :dynamic_library => 'com.apple.product-type.library.dynamic', + :static_library => 'com.apple.product-type.library.static', + :bundle => 'com.apple.product-type.bundle', + :octest_bundle => 'com.apple.product-type.bundle', + :unit_test_bundle => 'com.apple.product-type.bundle.unit-test', + :ui_test_bundle => 'com.apple.product-type.bundle.ui-testing', + :app_extension => 'com.apple.product-type.app-extension', + :command_line_tool => 'com.apple.product-type.tool', + :watch_app => 'com.apple.product-type.application.watchapp', + :watch2_app => 'com.apple.product-type.application.watchapp2', + :watch2_app_container => 'com.apple.product-type.application.watchapp2-container', + :watch_extension => 'com.apple.product-type.watchkit-extension', + :watch2_extension => 'com.apple.product-type.watchkit2-extension', + :tv_extension => 'com.apple.product-type.tv-app-extension', + :messages_application => 'com.apple.product-type.application.messages', + :messages_extension => 'com.apple.product-type.app-extension.messages', + :sticker_pack => 'com.apple.product-type.app-extension.messages-sticker-pack', + :xpc_service => 'com.apple.product-type.xpc-service', + }.freeze + + # @return [Hash] The extensions or the various product UTIs. + # + PRODUCT_UTI_EXTENSIONS = { + :application => 'app', + :application_on_demand_install_capable => 'app', + :framework => 'framework', + :dynamic_library => 'dylib', + :static_library => 'a', + :bundle => 'bundle', + :octest_bundle => 'octest', + :unit_test_bundle => 'xctest', + :ui_test_bundle => 'xctest', + :app_extension => 'appex', + :messages_application => 'app', + :messages_extension => 'appex', + :sticker_pack => 'appex', + :watch2_extension => 'appex', + :watch2_app => 'app', + :watch2_app_container => 'app', + }.freeze + + # @return [Hash] The common build settings grouped by platform, and build + # configuration name. + # + COMMON_BUILD_SETTINGS = { + :all => { + # empty? + # come from the project settings + }.freeze, + [:debug] => { + # empty? + # come from the project settings + }.freeze, + [:release] => { + # empty? + # come from the project settings + }.freeze, + [:ios] => { + 'SDKROOT' => 'iphoneos', + }.freeze, + [:osx] => { + 'SDKROOT' => 'macosx', + }.freeze, + [:tvos] => { + 'SDKROOT' => 'appletvos', + }.freeze, + [:watchos] => { + 'SDKROOT' => 'watchos', + }.freeze, + [:debug, :osx] => { + # Empty? + }.freeze, + [:release, :osx] => { + # Empty? + }.freeze, + [:debug, :ios] => { + # Empty? + }.freeze, + [:release, :ios] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:debug, :tvos] => { + # Empty? + }.freeze, + [:release, :tvos] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:debug, :watchos] => { + # Empty? + }.freeze, + [:release, :watchos] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:swift] => { + # Empty? + # The swift version, like deployment target, is set in + # ProjectHelper#common_build_settings + }.freeze, + [:debug, :application, :swift] => { + # Empty? + }.freeze, + [:debug, :swift] => { + # Swift optimization settings are provided by the project settings + }.freeze, + [:release, :swift] => { + # Swift optimization settings are provided by the project settings + }.freeze, + [:debug, :static_library, :swift] => { + }.freeze, + [:framework] => { + 'CURRENT_PROJECT_VERSION' => '1', + 'DEFINES_MODULE' => 'YES', + 'DYLIB_COMPATIBILITY_VERSION' => '1', + 'DYLIB_CURRENT_VERSION' => '1', + 'DYLIB_INSTALL_NAME_BASE' => '@rpath', + 'INSTALL_PATH' => '$(LOCAL_LIBRARY_DIR)/Frameworks', + 'PRODUCT_NAME' => '$(TARGET_NAME:c99extidentifier)', + 'SKIP_INSTALL' => 'YES', + 'VERSION_INFO_PREFIX' => '', + 'VERSIONING_SYSTEM' => 'apple-generic', + }.freeze, + [:ios, :framework] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:osx, :framework] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/../Frameworks @loader_path/Frameworks', + }.freeze, + [:watchos, :framework] => { + 'APPLICATION_EXTENSION_API_ONLY' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :framework] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:framework, :swift] => { + 'DEFINES_MODULE' => 'YES', + }.freeze, + [:osx, :static_library] => { + 'EXECUTABLE_PREFIX' => 'lib', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:ios, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:watchos, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:osx, :dynamic_library] => { + 'DYLIB_COMPATIBILITY_VERSION' => '1', + 'DYLIB_CURRENT_VERSION' => '1', + 'EXECUTABLE_PREFIX' => 'lib', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:application] => { + 'ASSETCATALOG_COMPILER_APPICON_NAME' => 'AppIcon', + 'ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME' => 'AccentColor', + }.freeze, + [:ios, :application] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:osx, :application] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/../Frameworks', + }.freeze, + [:watchos, :application] => { + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :application] => { + 'ASSETCATALOG_COMPILER_APPICON_NAME' => 'App Icon & Top Shelf Image', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:tvos, :application, :swift] => { + 'ENABLE_PREVIEWS' => 'YES', + }.freeze, + [:watchos, :application, :swift] => { + 'ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES' => 'YES', + }.freeze, + [:bundle] => { + 'WRAPPER_EXTENSION' => 'bundle', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:ios, :bundle] => { + 'SDKROOT' => 'iphoneos', + }.freeze, + [:osx, :bundle] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'INSTALL_PATH' => '$(LOCAL_LIBRARY_DIR)/Bundles', + 'SDKROOT' => 'macosx', + }.freeze, + }.freeze + + # @return [Hash] The default build settings for a new project. + # + PROJECT_DEFAULT_BUILD_SETTINGS = { + :all => { + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + 'CLANG_ANALYZER_NONNULL' => 'YES', + 'CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION' => 'YES_AGGRESSIVE', + 'CLANG_CXX_LANGUAGE_STANDARD' => 'gnu++14', + 'CLANG_CXX_LIBRARY' => 'libc++', + 'CLANG_ENABLE_MODULES' => 'YES', + 'CLANG_ENABLE_OBJC_ARC' => 'YES', + 'CLANG_ENABLE_OBJC_WEAK' => 'YES', + 'CLANG_WARN__DUPLICATE_METHOD_MATCH' => 'YES', + 'CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING' => 'YES', + 'CLANG_WARN_BOOL_CONVERSION' => 'YES', + 'CLANG_WARN_COMMA' => 'YES', + 'CLANG_WARN_CONSTANT_CONVERSION' => 'YES', + 'CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS' => 'YES', + 'CLANG_WARN_DIRECT_OBJC_ISA_USAGE' => 'YES_ERROR', + 'CLANG_WARN_DOCUMENTATION_COMMENTS' => 'YES', + 'CLANG_WARN_EMPTY_BODY' => 'YES', + 'CLANG_WARN_ENUM_CONVERSION' => 'YES', + 'CLANG_WARN_INFINITE_RECURSION' => 'YES', + 'CLANG_WARN_INT_CONVERSION' => 'YES', + 'CLANG_WARN_NON_LITERAL_NULL_CONVERSION' => 'YES', + 'CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF' => 'YES', + 'CLANG_WARN_OBJC_LITERAL_CONVERSION' => 'YES', + 'CLANG_WARN_OBJC_ROOT_CLASS' => 'YES_ERROR', + 'CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER' => 'YES', + 'CLANG_WARN_RANGE_LOOP_ANALYSIS' => 'YES', + 'CLANG_WARN_STRICT_PROTOTYPES' => 'YES', + 'CLANG_WARN_SUSPICIOUS_MOVE' => 'YES', + 'CLANG_WARN_UNGUARDED_AVAILABILITY' => 'YES_AGGRESSIVE', + 'CLANG_WARN_UNREACHABLE_CODE' => 'YES', + 'COPY_PHASE_STRIP' => 'NO', + 'ENABLE_STRICT_OBJC_MSGSEND' => 'YES', + 'GCC_C_LANGUAGE_STANDARD' => 'gnu11', + 'GCC_NO_COMMON_BLOCKS' => 'YES', + 'GCC_WARN_64_TO_32_BIT_CONVERSION' => 'YES', + 'GCC_WARN_ABOUT_RETURN_TYPE' => 'YES_ERROR', + 'GCC_WARN_UNDECLARED_SELECTOR' => 'YES', + 'GCC_WARN_UNINITIALIZED_AUTOS' => 'YES_AGGRESSIVE', + 'GCC_WARN_UNUSED_FUNCTION' => 'YES', + 'GCC_WARN_UNUSED_VARIABLE' => 'YES', + 'MTL_FAST_MATH' => 'YES', + 'PRODUCT_NAME' => '$(TARGET_NAME)', + 'SWIFT_VERSION' => '5.0', + }, + :release => { + 'DEBUG_INFORMATION_FORMAT' => 'dwarf-with-dsym', + 'ENABLE_NS_ASSERTIONS' => 'NO', + 'MTL_ENABLE_DEBUG_INFO' => 'NO', + 'SWIFT_COMPILATION_MODE' => 'wholemodule', + 'SWIFT_OPTIMIZATION_LEVEL' => '-O', + }.freeze, + :debug => { + 'DEBUG_INFORMATION_FORMAT' => 'dwarf', + 'ENABLE_TESTABILITY' => 'YES', + 'GCC_DYNAMIC_NO_PIC' => 'NO', + 'GCC_OPTIMIZATION_LEVEL' => '0', + 'GCC_PREPROCESSOR_DEFINITIONS' => ['DEBUG=1', '$(inherited)'], + 'MTL_ENABLE_DEBUG_INFO' => 'INCLUDE_SOURCE', + 'ONLY_ACTIVE_ARCH' => 'YES', + 'SWIFT_ACTIVE_COMPILATION_CONDITIONS' => 'DEBUG', + 'SWIFT_OPTIMIZATION_LEVEL' => '-Onone', + }.freeze, + }.freeze + + # @return [Hash] The corresponding numeric value of each copy build phase + # destination. + # + COPY_FILES_BUILD_PHASE_DESTINATIONS = { + :absolute_path => '0', + :products_directory => '16', + :wrapper => '1', + :resources => '7', # default + :executables => '6', + :java_resources => '15', + :frameworks => '10', + :shared_frameworks => '11', + :shared_support => '12', + :plug_ins => '13', + }.freeze + + # @return [Hash] The corresponding numeric value of each proxy type for + # PBXContainerItemProxy. + PROXY_TYPES = { + :native_target => '1', + :reference => '2', + }.freeze + + # @return [Array] The extensions which are associated with header files. + # + HEADER_FILES_EXTENSIONS = %w(.h .hh .hpp .ipp .tpp .hxx .def .inl .inc).freeze + + # @return [Array] The keywords Xcode use to identify a build setting can + # inherit values from a previous precedence level + INHERITED_KEYWORDS = %w( + $(inherited) + ${inherited} + ).freeze + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/differ.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/differ.rb new file mode 100644 index 0000000..33f634f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/differ.rb @@ -0,0 +1,239 @@ +module Xcodeproj + # Computes the recursive diff of Hashes, Array and other objects. + # + # Useful to compare two projects. Inspired from + # 'active_support/core_ext/hash/diff'. + # + # @example + # h1 = { :common => 'value', :changed => 'v1' } + # h2 = { :common => 'value', :changed => 'v2', :addition => 'new_value' } + # h1.recursive_diff(h2) == { + # :changed => { + # :self => 'v1', + # :other => 'v2' + # }, + # :addition => { + # :self => nil, + # :other => 'new_value' + # } + # } #=> true + # + # + # + # + module Differ + # Computes the recursive difference of two given values. + # + # @param [Object] value_1 + # The first value to compare. + # + # @param [Object] value_2 + # The second value to compare. + # + # @param [Object] key_1 + # The key for the diff of value_1. + # + # @param [Object] key_2 + # The key for the diff of value_2. + # + # @param [Object] id_key + # The key used to identify correspondent hashes in an array. + # + # @return [Hash] The diff + # @return [Nil] if the given values are equal. + # + def self.diff(value_1, value_2, options = {}) + options[:key_1] ||= 'value_1' + options[:key_2] ||= 'value_2' + options[:id_key] ||= nil + + method = if value_1.class == value_2.class + case value_1 + when Hash then :hash_diff + when Array then :array_diff + else :generic_diff + end + else + :generic_diff + end + send(method, value_1, value_2, options) + end + + # Optimized for reducing the noise from the tree hash of projects + # + def self.project_diff(project_1, project_2, key_1 = 'project_1', key_2 = 'project_2') + project_1 = project_1.to_tree_hash unless project_1.is_a?(Hash) + project_2 = project_2.to_tree_hash unless project_2.is_a?(Hash) + options = { + :key_1 => key_1, + :key_2 => key_2, + :id_key => 'displayName', + } + diff(project_1, project_2, options) + end + + #-------------------------------------------------------------------------# + + public + + # @!group Type specific handlers + + # Computes the recursive difference of two hashes. + # + # @see diff + # + def self.hash_diff(value_1, value_2, options) + ensure_class(value_1, Hash) + ensure_class(value_2, Hash) + return nil if value_1 == value_2 + + result = {} + all_keys = (value_1.keys + value_2.keys).uniq + all_keys.each do |key| + key_value_1 = value_1[key] + key_value_2 = value_2[key] + diff = diff(key_value_1, key_value_2, options) + if diff + result[key] = diff if diff + end + end + if result.empty? + nil + else + result + end + end + + # Returns the recursive diff of two arrays. + # + # @see diff + # + def self.array_diff(value_1, value_2, options) + ensure_class(value_1, Array) + ensure_class(value_2, Array) + return nil if value_1 == value_2 + + new_objects_value_1 = (value_1 - value_2) + new_objects_value_2 = (value_2 - value_1) + return nil if value_1.empty? && value_2.empty? + + matched_diff = {} + if id_key = options[:id_key] + matched_value_1 = [] + matched_value_2 = [] + new_objects_value_1.each do |entry_value_1| + if entry_value_1.is_a?(Hash) + id_value = entry_value_1[id_key] + entry_value_2 = new_objects_value_2.find do |entry| + entry[id_key] == id_value + end + if entry_value_2 + matched_value_1 << entry_value_1 + matched_value_2 << entry_value_2 + diff = diff(entry_value_1, entry_value_2, options) + matched_diff[id_value] = diff if diff + end + end + end + + new_objects_value_1 -= matched_value_1 + new_objects_value_2 -= matched_value_2 + end + + if new_objects_value_1.empty? && new_objects_value_2.empty? + if matched_diff.empty? + nil + else + matched_diff + end + else + result = {} + result[options[:key_1]] = new_objects_value_1 unless new_objects_value_1.empty? + result[options[:key_2]] = new_objects_value_2 unless new_objects_value_2.empty? + result[:diff] = matched_diff unless matched_diff.empty? + result + end + end + + # Returns the diff of two generic objects. + # + # @see diff + # + def self.generic_diff(value_1, value_2, options) + return nil if value_1 == value_2 + + { + options[:key_1] => value_1, + options[:key_2] => value_2, + } + end + + #-------------------------------------------------------------------------# + + public + + # @!group Cleaning + + # Returns a copy of the hash where the given key is removed recursively. + # + # @param [Hash] hash + # The hash to clean + # + # @param [Object] key + # The key to remove. + # + # @return [Hash] A copy of the hash without the key. + # + def self.clean_hash(hash, key) + new_hash = hash.dup + self.clean_hash!(new_hash, key) + new_hash + end + + # Recursively cleans a key from the given hash. + # + # @param [Hash] hash + # The hash to clean + # + # @param [Object] key + # The key to remove. + # + # @return [void] + # + def self.clean_hash!(hash, key) + hash.delete(key) + hash.each do |_, value| + case value + when Hash + clean_hash!(value, key) + when Array + value.each { |entry| clean_hash!(entry, key) if entry.is_a?(Hash) } + end + end + end + + #-------------------------------------------------------------------------# + + private + + # @! Helpers + + # Ensures that the given object belongs to the given class. + # + # @param [Object] object + # The object to check. + # + # @param [Class] klass + # the expected class of the object. + # + # @raise If the object doesn't belong to the given class. + # + # @return [void] + # + def self.ensure_class(object, klass) + raise "Wrong type `#{object.inspect}`" unless object.is_a?(klass) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/gem_version.rb new file mode 100644 index 0000000..0fd42b7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/gem_version.rb @@ -0,0 +1,5 @@ +module Xcodeproj + # The version of the xcodeproj gem. + # + VERSION = '1.19.0'.freeze unless defined? Xcodeproj::VERSION +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/helper.rb new file mode 100644 index 0000000..baaa80d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/helper.rb @@ -0,0 +1,30 @@ +module Xcodeproj + module Helper + class TargetDiff + attr_reader :project, :target1, :target2 + + def initialize(project, target1_name, target2_name) + @project = project + unless @target1 = @project.targets.find { |target| target.name == target1_name } + raise ArgumentError, "Target 1 by name `#{target1_name}' not found in the project." + end + unless @target2 = @project.targets.find { |target| target.name == target2_name } + raise ArgumentError, "Target 1 by name `#{target2_name}' not found in the project." + end + end + + # @return [Array<PBXBuildFile>] A list of source files (that will be + # compiled) which are in ‘target 2’ but not in ‘target 1’. The + # list is sorted by file path. + # + def new_source_build_files + new = @target2.source_build_phase.files.reject do |target2_build_file| + @target1.source_build_phase.files.any? do |target1_build_file| + target1_build_file.file_ref.path == target2_build_file.file_ref.path + end + end + new.sort_by { |build_file| build_file.file_ref.path } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/plist.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/plist.rb new file mode 100644 index 0000000..50b0137 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/plist.rb @@ -0,0 +1,94 @@ +autoload :Nanaimo, 'nanaimo' +autoload :CFPropertyList, 'cfpropertylist' + +module Xcodeproj + # Provides support for loading and serializing property list files. + # + module Plist + # @return [Hash] Returns the native objects loaded from a property list + # file. + # + # @param [#to_s] path + # The path of the file. + # + def self.read_from_path(path) + path = path.to_s + unless File.exist?(path) + raise Informative, "The plist file at path `#{path}` doesn't exist." + end + contents = File.read(path) + if file_in_conflict?(contents) + raise Informative, "The file `#{path}` is in a merge conflict." + end + case Nanaimo::Reader.plist_type(contents) + when :xml, :binary + CFPropertyList.native_types(CFPropertyList::List.new(:data => contents).value) + else + Nanaimo::Reader.new(contents).parse!.as_ruby + end + end + + # Serializes a hash as an XML property list file. + # + # @param [#to_hash] hash + # The hash to store. + # + # @param [#to_s] path + # The path of the file. + # + def self.write_to_path(hash, path) + if hash.respond_to?(:to_hash) + hash = hash.to_hash + else + raise TypeError, "The given `#{hash.inspect}` must respond " \ + "to #to_hash'." + end + + unless path.is_a?(String) || path.is_a?(Pathname) + raise TypeError, "The given `#{path}` must be a string or 'pathname'." + end + path = path.to_s + raise IOError, 'Empty path.' if path.empty? + + File.open(path, 'w') do |f| + plist = Nanaimo::Plist.new(hash, :xml) + Nanaimo::Writer::XMLWriter.new(plist, :pretty => true, :output => f, :strict => false).write + end + end + + # The known modules that can serialize plists. + # + KNOWN_IMPLEMENTATIONS = [] + + class << self + # @deprecated This method will be removed in 2.0 + # + # @return [Nil] + # + attr_accessor :implementation + end + + # @deprecated This method will be removed in 2.0 + # + # @return [Nil] + # + def self.autoload_implementation + end + + # @return [Bool] Checks whether there are merge conflicts in the file. + # + # @param [#to_s] contents + # The contents of the file. + # + def self.file_in_conflict?(contents) + conflict_regex = / + ^<{7}(?!<) # Exactly 7 left arrows at the beginning of the line + [\w\W]* # Anything + ^={7}(?!=) # Exactly 7 equality symbols at the beginning of the line + [\w\W]* # Anything + ^>{7}(?!>) # Exactly 7 right arrows at the beginning of the line + /xm + contents.match(conflict_regex) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project.rb new file mode 100644 index 0000000..39ca523 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project.rb @@ -0,0 +1,867 @@ +require 'atomos' +require 'fileutils' +require 'securerandom' + +require 'xcodeproj/project/object' +require 'xcodeproj/project/project_helper' +require 'xcodeproj/project/uuid_generator' +require 'xcodeproj/plist' + +module Xcodeproj + # This class represents a Xcode project document. + # + # It can be used to manipulate existing documents or even create new ones + # from scratch. + # + # An Xcode project document is a plist file where the root is a dictionary + # containing the following keys: + # + # - archiveVersion: the version of the document. + # - objectVersion: the version of the objects description. + # - classes: a key that apparently is always empty. + # - objects: a dictionary where the UUID of every object is associated to + # its attributes. + # - rootObject: the UUID identifier of the root object ({PBXProject}). + # + # Every object is in turn a dictionary that specifies an `isa` (the class of + # the object) and in accordance to it maintains a set attributes. Those + # attributes might reference one or more other objects by UUID. If the + # reference is a collection, it is ordered. + # + # The {Project} API returns instances of {AbstractObject} which wrap the + # objects described in the Xcode project document. All the attributes types + # are preserved from the plist, except for the relationships which are + # replaced with objects instead of UUIDs. + # + # An object might be referenced by multiple objects, an when no other object + # is references it, it becomes unreachable (the root object is referenced by + # the project itself). Xcodeproj takes care of adding and removing those + # objects from the `objects` dictionary so the project is always in a + # consistent state. + # + class Project + include Object + + # @return [Pathname] the path of the project. + # + attr_reader :path + + # @return [Pathname] the directory of the project + # + attr_reader :project_dir + + # @param [Pathname, String] path @see path + # The path provided will be expanded to an absolute path. + # @param [Bool] skip_initialization + # Wether the project should be initialized from scratch. + # @param [Int] object_version + # Object version to use for serialization, defaults to Xcode 3.2 compatible. + # + # @example Creating a project + # Project.new("path/to/Project.xcodeproj") + # + # @note When initializing the project, Xcodeproj mimics the Xcode behaviour + # including the setup of a debug and release configuration. If you want a + # clean project without any configurations, you should override the + # `initialize_from_scratch` method to not add these configurations and + # manually set the object version. + # + def initialize(path, skip_initialization = false, object_version = Constants::DEFAULT_OBJECT_VERSION) + @path = Pathname.new(path).expand_path + @project_dir = @path.dirname + @objects_by_uuid = {} + @generated_uuids = [] + @available_uuids = [] + @dirty = true + unless skip_initialization.is_a?(TrueClass) || skip_initialization.is_a?(FalseClass) + raise ArgumentError, '[Xcodeproj] Initialization parameter expected to ' \ + "be a boolean #{skip_initialization}" + end + unless skip_initialization + initialize_from_scratch + @object_version = object_version.to_s + unless Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION.key?(object_version) + raise ArgumentError, "[Xcodeproj] Unable to find compatibility version string for object version `#{object_version}`." + end + root_object.compatibility_version = Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION[object_version] + end + end + + # Opens the project at the given path. + # + # @param [Pathname, String] path + # The path to the Xcode project document (xcodeproj). + # + # @raise If the project versions are more recent than the ones know to + # Xcodeproj to prevent it from corrupting existing projects. + # Naturally, this would never happen with a project generated by + # xcodeproj itself. + # + # @raise If it can't find the root object. This means that the project is + # malformed. + # + # @example Opening a project + # Project.open("path/to/Project.xcodeproj") + # + def self.open(path) + path = Pathname.pwd + path + unless Pathname.new(path).exist? + raise "[Xcodeproj] Unable to open `#{path}` because it doesn't exist." + end + project = new(path, true) + project.send(:initialize_from_file) + project + end + + # @return [String] the archive version. + # + attr_reader :archive_version + + # @return [Hash] an dictionary whose purpose is unknown. + # + attr_reader :classes + + # @return [String] the objects version. + # + attr_reader :object_version + + # @return [Hash{String => AbstractObject}] A hash containing all the + # objects of the project by UUID. + # + attr_reader :objects_by_uuid + + # @return [PBXProject] the root object of the project. + # + attr_reader :root_object + + # A fast way to see if two {Project} instances refer to the same projects on + # disk. Use this over {#eql?} when you do not need to compare the full data. + # + # This shallow comparison was chosen as the (common) `==` implementation, + # because it was too easy to introduce changes into the Xcodeproj code-base + # that were slower than O(1). + # + # @return [Boolean] whether or not the two `Project` instances refer to the + # same projects on disk, determined solely by {#path} and + # `root_object.uuid` equality. + # + # @todo If ever needed, we could also compare `uuids.sort` instead. + # + def ==(other) + other && path == other.path && root_object.uuid == other.root_object.uuid + end + + # Compares the project to another one, or to a plist representation. + # + # @note This operation can be extremely expensive, because it converts a + # `Project` instance to a hash, and should _only_ ever be used to + # determine wether or not the data contents of two `Project` instances + # are completely equal. + # + # To simply determine wether or not two {Project} instances refer to + # the same projects on disk, use the {#==} method instead. + # + # @param [#to_hash] other the object to compare. + # + # @return [Boolean] whether the project is equivalent to the given object. + # + def eql?(other) + other.respond_to?(:to_hash) && to_hash == other.to_hash + end + + def to_s + "#<#{self.class}> path:`#{path}` UUID:`#{root_object.uuid}`" + end + + alias_method :inspect, :to_s + + public + + # @!group Initialization + #-------------------------------------------------------------------------# + + # Initializes the instance from scratch. + # + def initialize_from_scratch + @archive_version = Constants::LAST_KNOWN_ARCHIVE_VERSION.to_s + @classes = {} + + root_object.remove_referrer(self) if root_object + @root_object = new(PBXProject) + root_object.add_referrer(self) + + root_object.main_group = new(PBXGroup) + root_object.product_ref_group = root_object.main_group.new_group('Products') + + config_list = new(XCConfigurationList) + root_object.build_configuration_list = config_list + config_list.default_configuration_name = 'Release' + config_list.default_configuration_is_visible = '0' + add_build_configuration('Debug', :debug) + add_build_configuration('Release', :release) + + new_group('Frameworks') + end + + # Initializes the instance with the project stored in the `path` attribute. + # + def initialize_from_file + pbxproj_path = path + 'project.pbxproj' + plist = Plist.read_from_path(pbxproj_path.to_s) + root_object.remove_referrer(self) if root_object + @root_object = new_from_plist(plist['rootObject'], plist['objects'], self) + @archive_version = plist['archiveVersion'] + @object_version = plist['objectVersion'] + @classes = plist['classes'] || {} + @dirty = false + + unless root_object + raise "[Xcodeproj] Unable to find a root object in #{pbxproj_path}." + end + + if archive_version.to_i > Constants::LAST_KNOWN_ARCHIVE_VERSION + raise '[Xcodeproj] Unknown archive version.' + end + + if object_version.to_i > Constants::LAST_KNOWN_OBJECT_VERSION + raise '[Xcodeproj] Unknown object version.' + end + + # Projects can have product_ref_groups that are not listed in the main_groups["Products"] + root_object.product_ref_group ||= root_object.main_group['Products'] || root_object.main_group.new_group('Products') + end + + public + + # @!group Plist serialization + #-------------------------------------------------------------------------# + + # Creates a new object from the given UUID and `objects` hash (of a plist). + # + # The method sets up any relationship of the new object, generating the + # destination object(s) if not already present in the project. + # + # @note This method is used to generate the root object + # from a plist. Subsequent invocation are called by the + # {AbstractObject#configure_with_plist}. Clients of {Xcodeproj} are + # not expected to call this method. + # + # @param [String] uuid + # The UUID of the object that needs to be generated. + # + # @param [Hash {String => Hash}] objects_by_uuid_plist + # The `objects` hash of the plist representation of the project. + # + # @param [Boolean] root_object + # Whether the requested object is the root object and needs to be + # retained by the project before configuration to add it to the + # `objects` hash and avoid infinite loops. + # + # @return [AbstractObject] the new object. + # + # @visibility private. + # + def new_from_plist(uuid, objects_by_uuid_plist, root_object = false) + attributes = objects_by_uuid_plist[uuid] + if attributes + klass = Object.const_get(attributes['isa']) + object = klass.new(self, uuid) + objects_by_uuid[uuid] = object + object.add_referrer(self) if root_object + object.configure_with_plist(objects_by_uuid_plist) + object + end + end + + # @return [Hash] The hash representation of the project. + # + def to_hash + plist = {} + objects_dictionary = {} + objects.each { |obj| objects_dictionary[obj.uuid] = obj.to_hash } + plist['objects'] = objects_dictionary + plist['archiveVersion'] = archive_version.to_s + plist['objectVersion'] = object_version.to_s + plist['classes'] = classes + plist['rootObject'] = root_object.uuid + plist + end + + def to_ascii_plist + plist = {} + objects_dictionary = {} + objects + .sort_by { |o| [o.isa, o.uuid] } + .each do |obj| + key = Nanaimo::String.new(obj.uuid, obj.ascii_plist_annotation) + value = obj.to_ascii_plist.tap { |a| a.annotation = nil } + objects_dictionary[key] = value + end + plist['archiveVersion'] = archive_version.to_s + plist['classes'] = classes + plist['objectVersion'] = object_version.to_s + plist['objects'] = objects_dictionary + plist['rootObject'] = Nanaimo::String.new(root_object.uuid, root_object.ascii_plist_annotation) + Nanaimo::Plist.new.tap { |p| p.root_object = plist } + end + + # Converts the objects tree to a hash substituting the hash + # of the referenced to their UUID reference. As a consequence the hash of + # an object might appear multiple times and the information about their + # uniqueness is lost. + # + # This method is designed to work in conjunction with {Hash#recursive_diff} + # to provide a complete, yet readable, diff of two projects *not* affected + # by differences in UUIDs. + # + # @return [Hash] a hash representation of the project different from the + # plist one. + # + def to_tree_hash + hash = {} + objects_dictionary = {} + hash['objects'] = objects_dictionary + hash['archiveVersion'] = archive_version.to_s + hash['objectVersion'] = object_version.to_s + hash['classes'] = classes + hash['rootObject'] = root_object.to_tree_hash + hash + end + + # @return [Hash{String => Hash}] A hash suitable to display the project + # to the user. + # + def pretty_print + build_configurations = root_object.build_configuration_list.build_configurations + { + 'File References' => root_object.main_group.pretty_print.values.first, + 'Targets' => root_object.targets.map(&:pretty_print), + 'Build Configurations' => build_configurations.sort_by(&:name).map(&:pretty_print), + } + end + + # Serializes the project in the xcodeproj format using the path provided + # during initialization or the given path (`xcodeproj` file). If a path is + # provided file references depending on the root of the project are not + # updated automatically, thus clients are responsible to perform any needed + # modification before saving. + # + # @param [String, Pathname] path + # The optional path where the project should be saved. + # + # @example Saving a project + # project.save + # project.save + # + # @return [void] + # + def save(save_path = nil) + save_path ||= path + @dirty = false if save_path == path + FileUtils.mkdir_p(save_path) + file = File.join(save_path, 'project.pbxproj') + Atomos.atomic_write(file) do |f| + Nanaimo::Writer::PBXProjWriter.new(to_ascii_plist, :pretty => true, :output => f, :strict => false).write + end + end + + # Marks the project as dirty, that is, modified from what is on disk. + # + # @return [void] + # + def mark_dirty! + @dirty = true + end + + # @return [Boolean] Whether this project has been modified since read from + # disk or saved. + # + def dirty? + @dirty == true + end + + # Replaces all the UUIDs in the project with deterministic MD5 checksums. + # + # @note The current sorting of the project is taken into account when + # generating the new UUIDs. + # + # @note This method should only be used for entirely machine-generated + # projects, as true UUIDs are useful for tracking changes in the + # project. + # + # @return [void] + # + def predictabilize_uuids + UUIDGenerator.new([self]).generate! + end + + # Replaces all the UUIDs in the list of provided projects with deterministic MD5 checksums. + # + # @param [Array<Project>] projects + # + # @note The current sorting of the project is taken into account when + # generating the new UUIDs. + # + # @note This method should only be used for entirely machine-generated + # projects, as true UUIDs are useful for tracking changes in the + # project. + # + # @return [void] + # + def self.predictabilize_uuids(projects) + UUIDGenerator.new(projects).generate! + end + + public + + # @!group Creating objects + #-------------------------------------------------------------------------# + + # Creates a new object with a suitable UUID. + # + # The object is only configured with the default values of the `:simple` + # attributes, for this reason it is better to use the convenience methods + # offered by the {AbstractObject} subclasses or by this class. + # + # @param [Class, String] klass + # The concrete subclass of AbstractObject for new object or its + # ISA. + # + # @return [AbstractObject] the new object. + # + def new(klass) + if klass.is_a?(String) + klass = Object.const_get(klass) + end + object = klass.new(self, generate_uuid) + object.initialize_defaults + object + end + + # Generates a UUID unique for the project. + # + # @note UUIDs are not guaranteed to be generated unique because we need + # to trim the ones generated in the xcodeproj extension. + # + # @note Implementation detail: as objects usually are created serially + # this method creates a batch of UUID and stores the not colliding + # ones, so the search for collisions with known UUIDS (a + # performance bottleneck) is performed less often. + # + # @return [String] A UUID unique to the project. + # + def generate_uuid + generate_available_uuid_list while @available_uuids.empty? + @available_uuids.shift + end + + # @return [Array<String>] the list of all the generated UUIDs. + # + # @note Used for checking new UUIDs for duplicates with UUIDs already + # generated but used for objects which are not yet part of the + # `objects` hash but which might be added at a later time. + # + attr_reader :generated_uuids + + # Pre-generates the given number of UUIDs. Useful for optimizing + # performance when the rough number of objects that will be created is + # known in advance. + # + # @param [Integer] count + # the number of UUIDs that should be generated. + # + # @note This method might generated a minor number of uniques UUIDs than + # the given count, because some might be duplicated a thus will be + # discarded. + # + # @return [void] + # + def generate_available_uuid_list(count = 100) + new_uuids = (0..count).map { SecureRandom.hex(12).upcase } + uniques = (new_uuids - (@generated_uuids + uuids)) + @generated_uuids += uniques + @available_uuids += uniques + end + + public + + # @!group Convenience accessors + #-------------------------------------------------------------------------# + + # @return [Array<AbstractObject>] all the objects of the project. + # + def objects + objects_by_uuid.values + end + + # @return [Array<String>] all the UUIDs of the project. + # + def uuids + objects_by_uuid.keys + end + + # @return [Array<AbstractObject>] all the objects of the project with a + # given ISA. + # + def list_by_class(klass) + objects.select { |o| o.class == klass } + end + + # @return [PBXGroup] the main top-level group. + # + def main_group + root_object.main_group + end + + # @return [ObjectList<PBXGroup>] a list of all the groups in the + # project. + # + def groups + main_group.groups + end + + # Returns a group at the given subpath relative to the main group. + # + # @example + # frameworks = project['Frameworks'] + # frameworks.name #=> 'Frameworks' + # main_group.children.include? frameworks #=> True + # + # @param [String] group_path @see MobileCoreServices + # + # @return [PBXGroup] the group at the given subpath. + # + def [](group_path) + main_group[group_path] + end + + # @return [ObjectList<PBXFileReference>] a list of all the files in the + # project. + # + def files + objects.grep(PBXFileReference) + end + + # Returns the file reference for the given absolute path. + # + # @param [#to_s] absolute_path + # The absolute path of the file whose reference is needed. + # + # @return [PBXFileReference] The file reference. + # @return [Nil] If no file reference could be found. + # + def reference_for_path(absolute_path) + absolute_pathname = Pathname.new(absolute_path) + + unless absolute_pathname.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_path}" + end + + objects.find do |child| + child.isa == 'PBXFileReference' && child.real_path == absolute_pathname + end + end + + # @return [ObjectList<AbstractTarget>] A list of all the targets in the + # project. + # + def targets + root_object.targets + end + + # @return [ObjectList<PBXNativeTarget>] A list of all the targets in the + # project excluding aggregate targets. + # + def native_targets + root_object.targets.grep(PBXNativeTarget) + end + + # Checks the native target for any targets in the project + # that are dependent on the native target and would be + # embedded in it at build time + # + # @param [PBXNativeTarget] native target to check for + # embedded targets + # + # + # @return [Array<PBXNativeTarget>] A list of all targets that + # are embedded in the passed in target + # + def embedded_targets_in_native_target(native_target) + native_targets.select do |target| + host_targets_for_embedded_target(target).any? { |host| host.uuid == native_target.uuid } + end + end + + # Returns the native targets, in which the embedded target is + # embedded. This works by traversing the targets to find those + # where the target is a dependency. + # + # @param [PBXNativeTarget] native target that might be embedded + # in another target + # + # @return [Array<PBXNativeTarget>] the native targets that host the + # embedded target + # + def host_targets_for_embedded_target(embedded_target) + native_targets.select do |native_target| + ((embedded_target.uuid != native_target.uuid) && + (native_target.dependencies.map(&:native_target_uuid).include? embedded_target.uuid)) + end + end + + # @return [PBXGroup] The group which holds the product file references. + # + def products_group + root_object.product_ref_group + end + + # @return [ObjectList<PBXFileReference>] A list of the product file + # references. + # + def products + products_group.children + end + + # @return [PBXGroup] the `Frameworks` group creating it if necessary. + # + def frameworks_group + main_group['Frameworks'] || main_group.new_group('Frameworks') + end + + # @return [ObjectList<XCConfigurationList>] The build configuration list of + # the project. + # + def build_configuration_list + root_object.build_configuration_list + end + + # @return [ObjectList<XCBuildConfiguration>] A list of project wide + # build configurations. + # + def build_configurations + root_object.build_configuration_list.build_configurations + end + + # Returns the build settings of the project wide build configuration with + # the given name. + # + # @param [String] name + # The name of a project wide build configuration. + # + # @return [Hash] The build settings. + # + def build_settings(name) + root_object.build_configuration_list.build_settings(name) + end + + public + + # @!group Helpers + #-------------------------------------------------------------------------# + + # Creates a new file reference in the main group. + # + # @param @see PBXGroup#new_file + # + # @return [PBXFileReference] the new file. + # + def new_file(path, source_tree = :group) + main_group.new_file(path, source_tree) + end + + # Creates a new group at the given subpath of the main group. + # + # @param @see PBXGroup#new_group + # + # @return [PBXGroup] the new group. + # + def new_group(name, path = nil, source_tree = :group) + main_group.new_group(name, path, source_tree) + end + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [Symbol] type + # the type of target. Can be `:application`, `:framework`, + # `:dynamic_library` or `:static_library`. + # + # @param [String] name + # the name of the target product. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [PBXNativeTarget] the target. + # + def new_target(type, name, platform, deployment_target = nil, product_group = nil, language = nil) + product_group ||= products_group + ProjectHelper.new_target(self, type, name, platform, deployment_target, product_group, language) + end + + # Creates a new resource bundles target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings + # + # @param [String] name + # the name of the resources bundle. + # + # @param [Symbol] platform + # the platform of the resources bundle. Can be `:ios` or `:osx`. + # + # @return [PBXNativeTarget] the target. + # + def new_resources_bundle(name, platform, product_group = nil) + product_group ||= products_group + ProjectHelper.new_resources_bundle(self, name, platform, product_group) + end + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [String] name + # the name of the target. + # + # @param [Array<AbstractTarget>] target_dependencies + # targets, which should be added as dependencies. + # + # @param [Symbol] platform + # the platform of the aggregate target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @return [PBXNativeTarget] the target. + # + def new_aggregate_target(name, target_dependencies = [], platform = nil, deployment_target = nil) + ProjectHelper.new_aggregate_target(self, name, platform, deployment_target).tap do |aggregate_target| + target_dependencies.each do |dep| + aggregate_target.add_dependency(dep) + end + end + end + + # Adds a new build configuration to the project and populates its with + # default settings according to the provided type. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] The new build configuration. + # + def add_build_configuration(name, type) + build_configuration_list = root_object.build_configuration_list + if build_configuration = build_configuration_list[name] + build_configuration + else + build_configuration = new(XCBuildConfiguration) + build_configuration.name = name + common_settings = Constants::PROJECT_DEFAULT_BUILD_SETTINGS + settings = ProjectHelper.deep_dup(common_settings[:all]) + settings.merge!(ProjectHelper.deep_dup(common_settings[type])) + build_configuration.build_settings = settings + build_configuration_list.build_configurations << build_configuration + build_configuration + end + end + + # Sorts the project. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or `:below`. + # + # @return [void] + # + def sort(options = nil) + root_object.sort_recursively(options) + end + + public + + # @!group Schemes + #-------------------------------------------------------------------------# + + # Get list of shared schemes in project + # + # @param [String] path + # project path + # + # @return [Array] + # + def self.schemes(project_path) + schemes = Dir[File.join(project_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].map do |scheme| + File.basename(scheme, '.xcscheme') + end + schemes << File.basename(project_path, '.xcodeproj') if schemes.empty? + schemes + end + + # Recreates the user schemes of the project from scratch (removes the + # folder) and optionally hides them. + # + # @param [Bool] visible + # Whether the schemes should be visible or hidden. + # + # @return [void] + # + def recreate_user_schemes(visible = true) + schemes_dir = XCScheme.user_data_dir(path) + FileUtils.rm_rf(schemes_dir) + FileUtils.mkdir_p(schemes_dir) + + xcschememanagement = {} + xcschememanagement['SchemeUserState'] = {} + xcschememanagement['SuppressBuildableAutocreation'] = {} + + targets.each do |target| + scheme = XCScheme.new + + test_target = target if target.respond_to?(:test_target_type?) && target.test_target_type? + launch_target = target.respond_to?(:launchable_target_type?) && target.launchable_target_type? + scheme.configure_with_targets(target, test_target, :launch_target => launch_target) + + yield scheme, target if block_given? + scheme.save_as(path, target.name, false) + xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"] = {} + xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"]['isShown'] = visible + end + + xcschememanagement_path = schemes_dir + 'xcschememanagement.plist' + Plist.write_to_path(xcschememanagement, xcschememanagement_path) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/case_converter.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/case_converter.rb new file mode 100644 index 0000000..1f2fe1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/case_converter.rb @@ -0,0 +1,90 @@ +module Xcodeproj + class Project + module Object + # Converts between camel case names used in the xcodeproj plist files + # and the ruby symbols used to represent them. + # + module CaseConverter + # @return [String] The plist equivalent of the given Ruby name. + # + # @param [Symbol, String] name + # The name to convert + # + # @param [Symbol, Nil] type + # The type of conversion. Pass `nil` for normal camel case and + # `:lower` for camel case starting with a lower case letter. + # + # @example + # CaseConverter.convert_to_plist(:project_ref) #=> ProjectRef + # + def self.convert_to_plist(name, type = nil) + case name + when :remote_global_id_string + 'remoteGlobalIDString' + else + if type == :lower + cache = plist_cache[:lower] ||= {} + cache[name] ||= camelize(name, :uppercase_first_letter => false) + else + cache = plist_cache[:normal] ||= {} + cache[name] ||= camelize(name) + end + end + end + + # The following two methods are taken from activesupport, + # https://github.com/rails/rails/blob/v5.0.2/activesupport/lib/active_support/inflector/methods.rb + # all credit for them goes to the original authors. + # + # The code is used under the MIT license. + + def self.camelize(term, uppercase_first_letter: true) + string = term.to_s + string = if uppercase_first_letter + string.sub(/^[a-z\d]*/, &:capitalize) + else + string.sub(/^(?:(?=a)b(?=\b|[A-Z_])|\w)/, &:downcase) + end + string.gsub!(%r{(?:_|(/))([a-z\d]*)}i) { "#{Regexp.last_match(1)}#{Regexp.last_match(2).capitalize}" } + string.gsub!('/'.freeze, '::'.freeze) + string + end + private_class_method :camelize + + def self.underscore(camel_cased_word) + return camel_cased_word unless camel_cased_word =~ /[A-Z-]|::/ + word = camel_cased_word.to_s.gsub('::'.freeze, '/'.freeze) + word.gsub!(/(?:(?<=([A-Za-z\d]))|\b)((?=a)b)(?=\b|[^a-z])/) { "#{Regexp.last_match(1) && '_'.freeze}#{Regexp.last_match(2).downcase}" } + word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2'.freeze) + word.gsub!(/([a-z\d])([A-Z])/, '\1_\2'.freeze) + word.tr!('-'.freeze, '_'.freeze) + word.downcase! + word + end + private_class_method :underscore + + # @return [Symbol] The Ruby equivalent of the given plist name. + # + # @param [String] name + # The name to convert + # + # @example + # CaseConverter.convert_to_ruby('ProjectRef') #=> :project_ref + # + def self.convert_to_ruby(name) + underscore(name.to_s).to_sym + end + + # @return [Hash] A cache for the conversion to the Plist format. + # + # @note A cache is used because this operation is performed for each + # attribute of the project when it is saved and caching it has + # an important performance benefit. + # + def self.plist_cache + @plist_cache ||= {} + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object.rb new file mode 100644 index 0000000..9231ef8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object.rb @@ -0,0 +1,534 @@ +module Xcodeproj + class Project + # This is the namespace in which all the classes that wrap the objects in + # a Xcode project reside. + # + # The base class from which all classes inherit is AbstractObject. + # + # If you need to deal with these classes directly, it's possible to include + # this namespace into yours, making it unnecessary to prefix them with + # Xcodeproj::Project::Object. + # + # @example + # class SourceFileSorter + # include Xcodeproj::Project::Object + # end + # + module Object + # @abstract + # + # This is the base class of all object types that can exist in a Xcode + # project. As such it provides common behavior, but you can only use + # instances of subclasses of AbstractObject, because this class does + # not exist in actual Xcode projects. + # + # Almost all the methods implemented by this class are not expected to be + # used by {Xcodeproj} clients. + # + # Subclasses should clearly identify which methods reflect the xcodeproj + # document model and which methods are offered as convenience. Object + # lists always represent a relationship to many of the model while simple + # arrays represent dynamically generated values offered a convenience for + # clients. + # + class AbstractObject + # @!group AbstractObject + + # @return [String] the ISA of the class. + # + def self.isa + @isa ||= name.split('::').last + end + + # @return [String] the object's class name. + # + attr_reader :isa + + # It is not recommended to instantiate objects through this + # constructor. To create objects manually is easier to use + # the {Project#new}. Otherwise, it is possible to use the convenience + # methods offered by {Xcodeproj} which take care of configuring the + # objects for common usage cases. + # + # @param [Project] project + # the project that will host the object. + # + # @param [String] uuid + # the UUID of the new object. + # + # @visibility private + # + def initialize(project, uuid) + @project = project + @uuid = uuid + @isa = self.class.isa + @referrers = [] + unless @isa =~ /^(PBX|XC)/ + raise "[Xcodeproj] Attempt to initialize an abstract class (#{self.class})." + end + end + + # Initializes the object with the default values of simple attributes. + # + # This method is called by the {Project#new} and is not performed on + # initialization to prevent adding defaults to objects generated by a + # plist. + # + # @return [void] + # + # @visibility private + # + def initialize_defaults + simple_attributes.each { |a| a.set_default(self) } + end + + # @return [String] the object universally unique identifier. + # + attr_reader :uuid + + # @return [Project] the project that owns the object. + # + attr_reader :project + + # Removes the object from the project by asking to its referrers to + # remove the reference to it. + # + # @note The root object is owned by the project and should not be + # manipulated with this method. + # + # @return [void] + # + def remove_from_project + mark_project_as_dirty! + project.objects_by_uuid.delete(uuid) + + referrers.dup.each do |referrer| + referrer.remove_reference(self) + end + + to_one_attributes.each do |attrb| + object = attrb.get_value(self) + object.remove_referrer(self) if object + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.clear + end + + unless referrers.count == 0 + raise "[Xcodeproj] BUG: #{self} should have no referrers instead" \ + "the following objects are still referencing it #{referrers}" + end + end + + # Returns the value of the name attribute or returns a generic name for + # the object. + # + # @note Not all concrete classes implement the name attribute and this + # method prevents from overriding it in plist. + # + # @return [String] a name for the object. + # + def display_name + declared_name = name if self.respond_to?(:name) + if declared_name && !declared_name.empty? + declared_name + else + isa.gsub(/^(PBX|XC)/, '') + end + end + alias_method :to_s, :display_name + + # Sorts the to many attributes of the object according to the display + # name. + # + def sort(_options = nil) + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.sort! do |x, y| + x.display_name.downcase <=> y.display_name.downcase + end + end + end + + # Sorts the object and the objects that it references. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or + # `:below`. + # + # @note Some objects may in turn refer back to objects higher in the + # object tree, which will lead to stack level deep errors. + # These objects should **not** try to perform a recursive sort, + # also because these objects would get sorted through other + # paths in the tree anyways. + # + # At the time of writing the only known case is + # `PBXTargetDependency`. + # + def sort_recursively(options = nil) + to_one_attributes.each do |attrb| + value = attrb.get_value(self) + value.sort_recursively(options) if value + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.each { |entry| entry.sort_recursively(options) } + end + + sort(options) + end + + # @!group Reference counting + + # @return [Array<ObjectList>] The list of the objects that have a + # reference to this object. + # + # @visibility private + # + attr_reader :referrers + + # Informs the object that another object is referencing it. If the + # object had no previous references it is added to the project UUIDs + # hash. + # + # @return [void] + # + # @visibility private + # + def add_referrer(referrer) + @referrers << referrer + @project.objects_by_uuid[uuid] = self + end + + # Informs the object that another object stopped referencing it. If the + # object has no other references it is removed from the project UUIDs + # hash because it is unreachable. + # + # @return [void] + # + # @visibility private + # + def remove_referrer(referrer) + @referrers.delete(referrer) + if @referrers.count == 0 + mark_project_as_dirty! + @project.objects_by_uuid.delete(uuid) + end + end + + # Removes all the references to a given object. + # + # @return [void] + # + # @visibility private + # + def remove_reference(object) + to_one_attributes.each do |attrb| + value = attrb.get_value(self) + attrb.set_value(self, nil) if value.equal?(object) + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.delete(object) + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + list.each { |dictionary| dictionary.remove_reference(object) } + end + end + + # Marks the project that this object belongs to as having been modified. + # + # @return [void] + # + # @visibility private + # + def mark_project_as_dirty! + project.mark_dirty! + end + + #---------------------------------------------------------------------# + + public + + # @!group Plist related methods + + # Configures the object with the objects hash from a plist. + # + # **Implementation detail**: it is important that the attributes for a + # given concrete class are unique because the value is removed from the + # array at each iteration and duplicate would result in nil values. + # + # @return [void] + # + # @visibility private + # + def configure_with_plist(objects_by_uuid_plist) + object_plist = objects_by_uuid_plist[uuid].dup + + unless object_plist['isa'] == isa + raise "[Xcodeproj] Attempt to initialize `#{isa}` from plist with " \ + "different isa `#{object_plist}`" + end + object_plist.delete('isa') + + simple_attributes.each do |attrb| + attrb.set_value(self, object_plist[attrb.plist_name]) + object_plist.delete(attrb.plist_name) + end + + to_one_attributes.each do |attrb| + ref_uuid = object_plist[attrb.plist_name] + if ref_uuid + ref = object_with_uuid(ref_uuid, objects_by_uuid_plist, attrb) + attrb.set_value(self, ref) if ref + end + object_plist.delete(attrb.plist_name) + end + + to_many_attributes.each do |attrb| + ref_uuids = object_plist[attrb.plist_name] || [] + list = attrb.get_value(self) + ref_uuids.each do |uuid| + ref = object_with_uuid(uuid, objects_by_uuid_plist, attrb) + list << ref if ref + end + object_plist.delete(attrb.plist_name) + end + + references_by_keys_attributes.each do |attrb| + hashes = object_plist[attrb.plist_name] || {} + list = attrb.get_value(self) + hashes.each do |hash| + dictionary = ObjectDictionary.new(attrb, self) + hash.each do |key, uuid| + ref = object_with_uuid(uuid, objects_by_uuid_plist, attrb) + dictionary[key] = ref if ref + end + list << dictionary + end + object_plist.delete(attrb.plist_name) + end + + unless object_plist.empty? + UI.warn "[!] Xcodeproj doesn't know about the following " \ + "attributes #{object_plist.inspect} for the '#{isa}' isa." \ + "\nIf this attribute was generated by Xcode please file " \ + 'an issue: https://github.com/CocoaPods/Xcodeproj/issues/new' + end + end + + # Initializes and returns the object with the given UUID. + # + # @param [String] uuid + # The UUID of the object that should be initialized. + # + # @param [Hash{String=>String}] objects_by_uuid_plist + # The hash contained by `objects` key of the plist containing + # the information about the object that should be initialized. + # + # @param [AbstractObjectAttribute] attribute + # The attribute that requested the object. It is used only for + # exceptions. + # + # @raise If the hash for the given UUID contains an unknown ISA. + # + # @return [AbstractObject] the initialized object. + # @return [Nil] if the UUID could not be found in the objects hash. In + # this case a warning is printed to STDERR. + # + # @visibility private + # + def object_with_uuid(uuid, objects_by_uuid_plist, attribute) + unless object = project.objects_by_uuid[uuid] || project.new_from_plist(uuid, objects_by_uuid_plist) + UI.warn "`#{inspect}` attempted to initialize an object with " \ + "an unknown UUID. `#{uuid}` for attribute: " \ + "`#{attribute.name}`. This can be the result of a merge and " \ + 'the unknown UUID is being discarded.' + end + object + rescue NameError + attributes = objects_by_uuid_plist[uuid] + raise "`#{isa}` attempted to initialize an object with unknown ISA "\ + "`#{attributes['isa']}` from attributes: `#{attributes}`\n" \ + 'If this ISA was generated by Xcode please file an issue: ' \ + 'https://github.com/CocoaPods/Xcodeproj/issues/new' + end + + # Returns a cascade representation of the object with UUIDs. + # + # @return [Hash] a hash representation of the project. + # + # @visibility public + # + # @note the key for simple and to_one attributes usually appears only + # if there is a value. To-many keys always appear with an empty + # array. + # + def to_hash + to_hash_as + end + + def to_hash_as(method = :to_hash) + plist = {} + plist['isa'] = isa + + simple_attributes.each do |attrb| + value = attrb.get_value(self) + plist[attrb.plist_name] = value if value + end + + to_one_attributes.each do |attrb| + obj = attrb.get_value(self) + plist[attrb.plist_name] = nested_object_for_hash(obj, method) if obj + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + plist[attrb.plist_name] = list.map { |o| nested_object_for_hash(o, method) } + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + plist[attrb.plist_name] = list.map(&method) + end + + plist + end + private :to_hash_as + + def nested_object_for_hash(object, method) + case method + when :to_ascii_plist + Nanaimo::String.new(object.uuid, object.ascii_plist_annotation) + else + object.uuid + end + end + + def ascii_plist_annotation + " #{display_name} " + end + + def to_ascii_plist + Nanaimo::Dictionary.new(to_hash_as(:to_ascii_plist), ascii_plist_annotation) + end + + # Returns a cascade representation of the object without UUIDs. + # + # This method is designed to work in conjunction with + # {Hash#recursive_diff} to provide a complete, yet readable, diff of + # two projects *not* affected by ISA differences. + # + # @todo The current implementation might lead to infinite loops. + # + # @return [Hash] a hash representation of the project different from + # the plist one. + # + # @visibility private + # + def to_tree_hash + hash = {} + hash['displayName'] = display_name + hash['isa'] = isa + + simple_attributes.each do |attrb| + value = attrb.get_value(self) + hash[attrb.plist_name] = value if value + end + + to_one_attributes.each do |attrb| + obj = attrb.get_value(self) + hash[attrb.plist_name] = obj.to_tree_hash if obj + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + hash[attrb.plist_name] = list.map(&:to_tree_hash) + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + hash[attrb.plist_name] = list.map(&:to_tree_hash) + end + + hash + end + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + if to_many_attributes.count == 1 + children = to_many_attributes.first.get_value(self) + { display_name => children.map(&:pretty_print) } + else + display_name + end + end + + #---------------------------------------------------------------------# + + public + + # @!group Object methods + + def ==(other) + other.is_a?(AbstractObject) && to_hash == other.to_hash + end + + def <=>(other) + uuid <=> other.uuid + end + + def inspect + optional = '' + optional << " name=`#{name}`" if respond_to?(:name) && name + optional << " path=`#{path}`" if respond_to?(:path) && path + "<#{isa}#{optional} UUID=`#{uuid}`>" + end + end + end + end +end + +require 'xcodeproj/project/case_converter' +require 'xcodeproj/project/object_attributes' +require 'xcodeproj/project/object_dictionary' +require 'xcodeproj/project/object_list' + +# Required because some classes have cyclical references to each other. +# +# @todo I'm sure that there is a method to achieve the same result which +# doesn't present the risk of some rubist laughing at me :-) +# +Xcodeproj::Constants::KNOWN_ISAS.each do |superclass_name, isas| + superklass = Xcodeproj::Project::Object.const_get(superclass_name) + isas.each do |isa| + c = Class.new(superklass) + Xcodeproj::Project::Object.const_set(isa, c) + end +end + +# Now load the concrete subclasses. +require 'xcodeproj/project/object/swift_package_remote_reference' +require 'xcodeproj/project/object/swift_package_product_dependency' +require 'xcodeproj/project/object/build_configuration' +require 'xcodeproj/project/object/build_file' +require 'xcodeproj/project/object/build_phase' +require 'xcodeproj/project/object/build_rule' +require 'xcodeproj/project/object/configuration_list' +require 'xcodeproj/project/object/container_item_proxy' +require 'xcodeproj/project/object/file_reference' +require 'xcodeproj/project/object/group' +require 'xcodeproj/project/object/native_target' +require 'xcodeproj/project/object/root_object' +require 'xcodeproj/project/object/target_dependency' +require 'xcodeproj/project/object/reference_proxy' diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_configuration.rb new file mode 100644 index 0000000..664144f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_configuration.rb @@ -0,0 +1,255 @@ +module Xcodeproj + class Project + module Object + # Encapsulates the information a specific build configuration referenced + # by a {XCConfigurationList} which in turn might be referenced by a + # {PBXProject} or a {PBXNativeTarget}. + # + class XCBuildConfiguration < AbstractObject + MUTUAL_RECURSION_SENTINEL = 'xcodeproj.mutual_recursion_sentinel'.freeze + + private_constant :MUTUAL_RECURSION_SENTINEL + + # @!group Attributes + + # @return [String] the name of the Target. + # + attribute :name, String + + # @return [Hash] the build settings to use for building the target. + # + attribute :build_settings, Hash, {} + + # @return [PBXFileReference] an optional file reference to a + # configuration file (`.xcconfig`). + # + has_one :base_configuration_reference, PBXFileReference + + public + + # @!group AbstractObject Hooks + #---------------------------------------------------------------------# + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + data = {} + data['Build Settings'] = sorted_build_settings + if base_configuration_reference + data['Base Configuration'] = base_configuration_reference.pretty_print + end + { name => data } + end + + def to_hash_as(method = :to_hash) + super.tap do |hash| + normalize_array_settings(hash['buildSettings']) + end + end + + # Sorts the build settings. Valid only in Ruby > 1.9.2 because in + # previous versions the hash are not sorted. + # + # @return [void] + # + def sort(_options = nil) + self.build_settings = sorted_build_settings + end + + # @return [Boolean] Whether this configuration is configured for + # debugging. + # + def debug? + gcc_preprocessor_definitions = resolve_build_setting('GCC_PREPROCESSOR_DEFINITIONS') + gcc_preprocessor_definitions && gcc_preprocessor_definitions.include?('DEBUG=1') + end + + # @return [Symbol] The symbolic type of this configuration, either + # `:debug` or `:release`. + # + def type + debug? ? :debug : :release + end + + # @!group Helpers + #---------------------------------------------------------------------# + + # Gets the value for the given build setting considering any configuration + # file present and resolving inheritance between them. It also takes in + # consideration environment variables. + # + # @param [String] key + # the key of the build setting. + # + # @param [PBXNativeTarget] root_target + # use this to resolve complete recursion between project and targets. + # + # @param [String] previous_key + # use this to resolve complete recursion between different build settings. + # + # @return [String] The value of the build setting + # + def resolve_build_setting(key, root_target = nil, previous_key = nil) + setting = build_settings[key] + setting = resolve_variable_substitution(key, setting, root_target, previous_key) + + config_setting = config[key] + config_setting = resolve_variable_substitution(key, config_setting, root_target, previous_key) + + project_setting = project.build_configuration_list[name] + project_setting = nil if equal?(project_setting) + project_setting &&= project_setting.resolve_build_setting(key, root_target) + + defaults = { + 'CONFIGURATION' => name, + 'SRCROOT' => project.project_dir.to_s, + } + + # if previous_key is nil, it means that we're back at the first call, so we can replace our sentinel string + # used to prevent recursion with nil + if previous_key.nil? && setting == MUTUAL_RECURSION_SENTINEL + setting = nil + end + + [defaults[key], project_setting, config_setting, setting, ENV[key]].compact.reduce(nil) do |inherited, value| + expand_build_setting(value, inherited) + end + end + + #---------------------------------------------------------------------# + + private + + VARIABLE_NAME_PATTERN = + '( # capture block + [_a-zA-Z0-9]+? # non-greedy lookup for everything contained in this list + )'.freeze + private_constant :VARIABLE_NAME_PATTERN + + CAPTURE_VARIABLE_IN_BUILD_CONFIG = / + \$ # matches dollar sign literally + (?: # non-capturing group + [{] # matches a single character on this list + #{VARIABLE_NAME_PATTERN} + [}] # matches a single character on this list + | # or + [(] # matches a single character on this list + #{VARIABLE_NAME_PATTERN} + [)] # matches a single character on this list + ) + /x + private_constant :CAPTURE_VARIABLE_IN_BUILD_CONFIG + + def expand_build_setting(build_setting_value, config_value) + if build_setting_value.is_a?(Array) && config_value.is_a?(String) + config_value = split_build_setting_array_to_string(config_value) + elsif build_setting_value.is_a?(String) && config_value.is_a?(Array) + build_setting_value = split_build_setting_array_to_string(build_setting_value) + end + + default = build_setting_value.is_a?(String) ? '' : [] + inherited = config_value || default + + return build_setting_value.gsub(Regexp.union(Constants::INHERITED_KEYWORDS), inherited) if build_setting_value.is_a? String + build_setting_value.flat_map { |value| Constants::INHERITED_KEYWORDS.include?(value) ? inherited : value } + end + + def resolve_variable_substitution(key, value, root_target, previous_key = nil) + case value + when Array + return value.map { |v| resolve_variable_substitution(key, v, root_target) } + when nil + return + when String + # we know how to resolve strings! + nil + else + raise ArgumentError, "Settings values can only be nil, string, or array, got #{value.inspect} for #{key}" + end + + unless variable_match_data = value.match(CAPTURE_VARIABLE_IN_BUILD_CONFIG) + # no variables left, return the value unchanged + return value + end + variable_reference, variable = *variable_match_data.values_at(0, 1, 2).compact + + case variable + when 'inherited' + # this is handled separately, after resolving all other variable references + value + when key + # to prevent infinite recursion + nil + when previous_key + # to prevent infinite recursion; we don't return nil as for the self recursion because it needs to be + # distinguished outside this method too + MUTUAL_RECURSION_SENTINEL + else + configuration_to_resolve_against = root_target ? root_target.build_configuration_list[name] : self + resolved_value_for_variable = configuration_to_resolve_against.resolve_build_setting(variable, root_target, key) || '' + + # we use this sentinel string instead of nil, because, otherwise, it would be swallowed by the default empty + # string from the preceding line, and we want to distinguish between mutual recursion and other cases + if resolved_value_for_variable == MUTUAL_RECURSION_SENTINEL + return MUTUAL_RECURSION_SENTINEL + end + + value = value.gsub(variable_reference, resolved_value_for_variable) + resolve_variable_substitution(key, value, root_target) + end + end + + def sorted_build_settings + sorted = {} + build_settings.keys.sort.each do |key| + sorted[key] = build_settings[key] + end + sorted + end + + def normalize_array_settings(settings) + return unless settings + + array_settings = BuildSettingsArraySettingsByObjectVersion[project.object_version] + + settings.keys.each do |key| + next unless value = settings[key] + stripped_key = key.sub(/\[[^\]]+\]$/, '') + case value + when String + next unless array_settings.include?(stripped_key) + array_value = split_build_setting_array_to_string(value) + next unless array_value.size > 1 + settings[key] = array_value + when Array + next if value.size > 1 && array_settings.include?(stripped_key) + settings[key] = value.join(' ') + end + end + end + + def split_build_setting_array_to_string(string) + regexp = / *((['"]?).*?[^\\]\2)(?=( |\z))/ + string.scan(regexp).map(&:first) + end + + def config + return {} unless base_configuration_reference + @config ||= + if base_configuration_reference.real_path.exist? + Xcodeproj::Config.new(base_configuration_reference.real_path).to_hash.tap do |hash| + normalize_array_settings(hash) + end + else + {} + end + end + + #---------------------------------------------------------------------# + end + end + end +end + +require 'xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version' diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_file.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_file.rb new file mode 100644 index 0000000..6b10097 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_file.rb @@ -0,0 +1,78 @@ +module Xcodeproj + class Project + module Object + # Contains the information about the build settings of a file used by an + # {AbstractBuildPhase}. + # + class PBXBuildFile < AbstractObject + # @!group Attributes + + # @return [Hash] the list of build settings for this file. + # + # @note The contents of this array depend on the phase of the build + # file. + # + # For PBXHeadersBuildPhase is `{ "ATTRIBUTES" => [:value] }` + # where `:value` can be `Public`, `Private`, or nil + # (Protected). + # + attribute :settings, Hash + + # @return [PBXFileReference] the file to build. + # + # @todo I think that is possible to add any kind of group (for + # example folders linked to a path). + # + has_one :file_ref, [ + PBXFileReference, + PBXGroup, + PBXVariantGroup, + XCVersionGroup, + PBXReferenceProxy, + ] + + # @return [XCSwiftPackageProductDependency] the Swift Package file to build. + # + has_one :product_ref, XCSwiftPackageProductDependency + + # @return [String] the platform filter for this build file. + # + attribute :platform_filter, String + + #---------------------------------------------------------------------# + + public + + # @!group AbstractObject Hooks + + # @return [String] A name suitable for displaying the object to the + # user. + # + def display_name + if file_ref + file_ref.display_name + else + super + end + end + + # @return [Hash{String => Hash}, String] A hash suitable to display the + # object to the user. + # + def pretty_print + if settings.nil? || settings.empty? + display_name + else + { display_name => settings } + end + end + + def ascii_plist_annotation + " #{display_name} in #{GroupableHelper.parent(self).display_name} " + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_phase.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_phase.rb new file mode 100644 index 0000000..8a681b0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_phase.rb @@ -0,0 +1,348 @@ +module Xcodeproj + class Project + module Object + # @abstract + # + # This class is abstract and it doesn't appear in the project document. + # + class AbstractBuildPhase < AbstractObject + # @!group Attributes + + # @return [ObjectList<PBXBuildFile>] the files processed by this build + # configuration. + # + has_many :files, PBXBuildFile + + # @return [String] some kind of magic number which usually is + # '2147483647' (can be also `8` and `12` in + # PBXCopyFilesBuildPhase, one of the masks is + # run_only_for_deployment_postprocessing). + # + attribute :build_action_mask, String, '2147483647' + + # @return [String] whether or not this should only be processed before + # deployment. Can be either '0', or '1'. + # + # @note This option is exposed in Xcode in the UI of + # PBXCopyFilesBuildPhase as `Copy only when installing` or in + # PBXShellScriptBuildPhase as `Run script only when + # installing`. + # + attribute :run_only_for_deployment_postprocessing, String, '0' + + # @return [String] whether or not this run script will be forced to + # run even on incremental builds. Can be either '1', or + # missing. By default this option is disabled in Xcode. + # + # @note This setting is exposed in Xcode in the UI of + # PBXShellScriptBuildPhase as `Based on + # dependency analysis` (selected by default). + # + attribute :always_out_of_date, String + + # @return [String] Comments associated with this build phase. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + #--------------------------------------# + + public + + # @!group Helpers + + # @return [Array<PBXFileReference>] the list of all the files + # referenced by this build phase. + # + def files_references + files.map(&:file_ref) + end + + # @return [Array<String>] The display name of the build files. + # + def file_display_names + files.map(&:display_name) + end + + # @return [PBXBuildFile] the first build file associated with the given + # file reference if one exists. + # + def build_file(file_ref) + (file_ref.referrers & files).first + end + + # Returns whether a build file for the given file reference exists. + # + # @param [PBXFileReference] file_ref + # + # @return [Bool] whether the reference is already present. + # + def include?(file_ref) + !build_file(file_ref).nil? + end + + # Adds a new build file, initialized with the given file reference, to + # the phase. + # + # @param [PBXFileReference] file_ref + # The file reference that should be added to the build phase. + # + # @return [PBXBuildFile] the build file generated. + # + def add_file_reference(file_ref, avoid_duplicates = false) + if avoid_duplicates && existing = build_file(file_ref) + existing + else + build_file = project.new(PBXBuildFile) + build_file.file_ref = file_ref + files << build_file + build_file + end + end + + # Removes the build file associated with the given file reference from + # the phase. + # + # @param [PBXFileReference] file_ref + # The file to remove + # + # @return [void] + # + def remove_file_reference(file_ref) + build_file = files.find { |bf| bf.file_ref == file_ref } + remove_build_file(build_file) if build_file + end + + # Removes a build file from the phase and clears its relationship to + # the file reference. + # + # @param [PBXBuildFile] build_file the file to remove + # + # @return [void] + # + def remove_build_file(build_file) + build_file.file_ref = nil + build_file.remove_from_project + end + + # Removes all the build files from the phase and clears their + # relationship to the file reference. + # + # @return [void] + # + def clear + files.objects.each do |bf| + remove_build_file(bf) + end + end + alias_method :clear_build_files, :clear + + def display_name + super.gsub(/BuildPhase$/, '') + end + + def ascii_plist_annotation + " #{display_name} " + end + end + + #-----------------------------------------------------------------------# + + # The phase responsible of copying headers. Known as `Copy Headers` in + # the UI. + # + # @note This phase can appear only once in a target. + # + class PBXHeadersBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The phase responsible of compiling the files. Known as `Compile + # Sources` in the UI. + # + # @note This phase can appear only once in a target. + # + class PBXSourcesBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The phase responsible on linking with frameworks. Known as `Link Binary + # With Libraries` in the UI. + # + # @note This phase can appear only once in a target. + # + class PBXFrameworksBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The resources build phase apparently is a specialized copy build phase + # for resources. Known as `Copy Bundle Resources` in the UI. It is + # unclear if this is the only one capable of optimizing PNG. + # + # @note This phase can appear only once in a target. + # + class PBXResourcesBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # Phase that copies the files to the bundle of the target (aka `Copy + # Files`). + # + # @note This phase can appear multiple times in a target. + # + class PBXCopyFilesBuildPhase < AbstractBuildPhase + # @!group Attributes + + # @return [String] the name of the build phase. + # + attribute :name, String + + # @return [String] the subpath of `dst_subfolder_spec` where this file + # should be copied to. + # + # @note Can accept environment variables like `$(PRODUCT_NAME)`. + # + attribute :dst_path, String, '' + + # @return [String] the path (destination) where the files should be + # copied to. + # + attribute :dst_subfolder_spec, String, Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS[:resources] + + # @return [Hash{String => Hash}] A hash suitable to display the build + # phase to the user. + # + def pretty_print + { + display_name => { + 'Destination Path' => dst_path, + 'Destination Subfolder' => Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS.key(dst_subfolder_spec).to_s, + 'Files' => files.map(&:pretty_print), + }, + } + end + + # Alias method for #dst_subfolder_spec=, which accepts symbol values + # instead of numeric string values. + # + # @param [Symbol] value + # one of `COPY_FILES_BUILD_PHASE_DESTINATIONS.keys` + # + # @raise [StandardError] if value is not a valid known key + # + def symbol_dst_subfolder_spec=(value) + numeric_value = Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS[value] + raise "[Xcodeproj] Value checking error: got `#{value.inspect}` for" \ + ' attribute: dst_subfolder_spec' if numeric_value.nil? + self.dst_subfolder_spec = numeric_value + end + + # Alias method for #dst_subfolder_spec, which returns symbol values + # instead of numeric string values. + # + # @return [Symbol] + # + def symbol_dst_subfolder_spec + key = Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS.find { |_, num| num == dst_subfolder_spec } + key ? key.first : nil + end + end + + #-----------------------------------------------------------------------# + + # A phase responsible of running a shell script (aka `Run Script`). + # + # @note This phase can appear multiple times in a target. + # + class PBXShellScriptBuildPhase < AbstractBuildPhase + # @!group Attributes + + # @return [String] the name of the build phase. + # + attribute :name, String + + # @return [Array<String>] an array of the paths to pass to the script. + # + # @example + # "$(SRCROOT)/myfile" + # + attribute :input_paths, Array, [] + + # @return [Array<String>] an array of input file list paths of the script. + # + # @example + # "$(SRCROOT)/newInputFile.xcfilelist" + # + attribute :input_file_list_paths, Array, [] + + # @return [Array<String>] an array of output paths of the script. + # + # @example + # "$(DERIVED_FILE_DIR)/myfile" + # + attribute :output_paths, Array, [] + + # @return [Array<String>] an array of output file list paths of the script. + # + # @example + # "$(SRCROOT)/newOutputFile.xcfilelist" + # + attribute :output_file_list_paths, Array, [] + + # @return [String] the path to the script interpreter. + # + # @note Defaults to `/bin/sh`. + # + attribute :shell_path, String, '/bin/sh' + + # @return [String] the actual script to perform. + # + # @note Defaults to a comment string. + # + attribute :shell_script, String, "# Type a script or drag a script file from your workspace to insert its path.\n" + + # @return [String] whether or not the ENV variables should be shown in + # the build log. + # + attribute :show_env_vars_in_log, String + + # @return [String] the discovered dependency file to use. + # + attribute :dependency_file, String + + # @return [Hash{String => Hash}] A hash suitable to display the build + # phase to the user. + # + def pretty_print + { + display_name => { + 'Input File List Paths' => input_file_list_paths || [], + 'Input Paths' => input_paths || [], + 'Output File List Paths' => output_file_list_paths || [], + 'Output Paths' => output_paths || [], + 'Shell Path' => shell_path, + 'Shell Script' => shell_script, + }, + } + end + end + + #-----------------------------------------------------------------------# + + # Apparently a build phase named `Build Carbon Resources` (Observed for + # kernel extensions targets). + # + # @note This phase can appear multiple times in a target. + # + class PBXRezBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_rule.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_rule.rb new file mode 100644 index 0000000..929fbab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/build_rule.rb @@ -0,0 +1,102 @@ +module Xcodeproj + class Project + module Object + # This class represents a custom build rule of a Target. + # + class PBXBuildRule < AbstractObject + # @!group Attributes + + # @return [String] the name of the rule. + # + attribute :name, String + + # @return [String] a string representing what compiler to use. + # + # @example + # `com.apple.compilers.proxy.script`. + # + attribute :compiler_spec, String + + # @return [String] the type of the files that should be processed by + # this rule. + # + # @example + # `pattern.proxy`. + # + attribute :file_type, String + + # @return [String] the pattern of the files that should be processed by + # this rule. This attribute is an alternative to to + # `file_type`. + # + # @example + # `*.css`. + # + attribute :file_patterns, String + + # @return [String] whether the rule is editable. + # + # @example + # `1`. + # + attribute :is_editable, String, '1' + + # @return [ObjectList<PBXFileReference>] the file references for the + # input files files. + # + attribute :input_files, Array + + # @return [ObjectList<PBXFileReference>] the file references for the + # output files. + # + attribute :output_files, Array + + # @return [ObjectList<String>] the compiler flags used when creating the + # respective output files. + # + attribute :output_files_compiler_flags, Array + + # @return [String] whether the rule should be run once per architecture. + # + # @example + # `0`. + # + attribute :run_once_per_architecture, String + + # @return [String] the content of the script to use for the build rule. + # + # @note This attribute is present if the #{#compiler_spec} is + # `com.apple.compilers.proxy.script` + # + attribute :script, String + + # @!group Helpers + + # Adds an output file with the specified compiler flags. + # + # @param [PBXFileReference] file the file to add. + # + # @param [String] compiler_flags the compiler flags for the file. + # + # @return [Void] + # + def add_output_file(file, compiler_flags = '') + (self.output_files ||= []) << file + (self.output_files_compiler_flags ||= []) << compiler_flags + end + + # @return [Array<[PBXFileReference, String]>] + # An array containing tuples of output files and their compiler + # flags. + # + def output_files_and_flags + (output_files || []).zip(output_files_compiler_flags || []) + end + + def ascii_plist_annotation + " #{isa} " + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/configuration_list.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/configuration_list.rb new file mode 100644 index 0000000..d39834f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/configuration_list.rb @@ -0,0 +1,117 @@ +module Xcodeproj + class Project + module Object + # The primary purpose of this class is to maintain a collection of + # related build configurations of a {PBXProject} or a {PBXNativeTarget}. + # + class XCConfigurationList < AbstractObject + # @!group Attributes + + # @return [String] whether the default configuration is visible. + # Usually `0`. The purpose of this flag and how Xcode displays + # it in the UI is unknown. + # + attribute :default_configuration_is_visible, String, '0' + + # @return [String] the name of the default configuration. + # Usually `Release`. Xcode exposes this attribute as the + # configuration for the command line tools and only allows to + # set it at the project level. + # + attribute :default_configuration_name, String, 'Release' + + # @return [ObjectList<XCBuildConfiguration>] the build + # configurations of the target. + # + has_many :build_configurations, XCBuildConfiguration + + public + + # @!group Helpers + # --------------------------------------------------------------------# + + # Returns the build configuration with the given name. + # + # @param [String] name + # The name of the build configuration. + # + # @return [XCBuildConfiguration] The build configuration. + # @return [Nil] If not build configuration with the given name is found. + # + def [](name) + build_configurations.find { |bc| bc.name == name } + end + + # Returns the build settings of the build configuration with + # the given name. + # + # @param [String] build_configuration_name + # The name of the build configuration. + # + # @return [Hash {String=>String}] the build settings + # + def build_settings(build_configuration_name) + if config = self[build_configuration_name] + config.build_settings + end + end + + # Gets the value for the given build setting in all the build + # configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [Bool] resolve_against_xcconfig + # wether the retrieved setting should take in consideration any + # configuration file present. + # + # @param [PBXNativeTarget] root_target + # use this to resolve complete recursion between project and targets + # + # @return [Hash{String => String}] The value of the build setting + # grouped by the name of the build configuration. + # + def get_setting(key, resolve_against_xcconfig = false, root_target = nil) + result = {} + build_configurations.each do |bc| + result[bc.name] = resolve_against_xcconfig ? bc.resolve_build_setting(key, root_target) : bc.build_settings[key] + end + result + end + + # Sets the given value for the build setting associated with the given + # key across all the build configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [String] value + # the value for the build setting. + # + # @return [void] + # + def set_setting(key, value) + build_configurations.each do |bc| + bc.build_settings[key] = value + end + end + + def target + return project.root_object if project.build_configuration_list.uuid == uuid + project.targets.find { |t| t.build_configuration_list.uuid == uuid } + end + + #---------------------------------------------------------------------# + + def ascii_plist_annotation + if target.nil? + ' Build configuration list for <deleted target> ' + else + " Build configuration list for #{target.isa} \"#{target}\" " + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/container_item_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/container_item_proxy.rb new file mode 100644 index 0000000..30abdd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/container_item_proxy.rb @@ -0,0 +1,116 @@ +module Xcodeproj + class Project + module Object + # Apparently a proxy for another object which might belong another + # project contained in the same workspace of the project document. + # + # This class is referenced by {PBXTargetDependency} for information about + # it usage see the specs of that class. + # + # @note This class references the other objects by UUID instead of + # creating proper relationships because the other objects might be + # part of another project. This implies that the references to + # other objects should not increase the retain count of the + # targets. + # + # @todo: This class needs some work to support targets across workspaces, + # as the container portal might not be initialized leading + # xcodeproj to raise because ti can't find the UUID. + # + class PBXContainerItemProxy < AbstractObject + # @!group Attributes + + # @return [String] apparently the UUID of the root object + # {PBXProject} of the project containing the represented + # object. + # + # @todo this is an attribute because a it is usually a reference to + # the root object or to a file reference to another project. + # The reference to the root object causes a retain cycle that + # could cause issues (e.g. in to_tree_hash). Usually those + # objects are retained by at least another object (the + # {Project} for the root object and a {PBXGroup} for the + # reference to another project) and so the referenced object + # should be serialized. + # + # If this assumption is incorrect, there could be loss of + # information opening and saving an existing project. + # + # @todo This is the external reference that 'contains' other proxy + # items. + attribute :container_portal, String + + # @return [String] the type of the proxy. + # + # @note @see {Constants::PROXY_TYPE.values} for valid values. + # + attribute :proxy_type, String + + # @return [String] apparently the UUID of the represented + # object. + # + # @note If the object is in another project the UUID would not be + # present in the {Project#objects_by_uuid} hash. For this + # reason this is not an `has_one` attribute. It is assumes that + # if the object belongs to the project at least another object + # should be retaining it. This assumption is reasonable because + # this is a proxy class. + # + # If this assumption is incorrect, there could be loss of + # information opening and saving an existing project. + # + attribute :remote_global_id_string, String + + # @return [String] apparently the name of the object represented by + # the proxy. + # + attribute :remote_info, String + + # Checks whether the reference points to a remote project. + # + # @return [Bool] + # + def remote? + project.root_object.uuid != container_portal + end + + # Get the proxied object + # + # @return [AbstractObject] + # + def proxied_object + container_portal_object.objects_by_uuid[remote_global_id_string] + end + + def container_portal_object + if remote? + container_portal_file_ref = project.objects_by_uuid[container_portal] + Project.open(container_portal_file_ref.real_path) + else + project + end + end + + def container_portal_annotation + if remote? + " #{File.basename(project.objects_by_uuid[container_portal].real_path)} " + else + project.root_object.ascii_plist_annotation + end + end + + def to_hash_as(method = :to_hash) + hash = super + if method == :to_ascii_plist + hash['containerPortal'] = Nanaimo::String.new(container_portal, container_portal_annotation) + end + hash + end + + def ascii_plist_annotation + " #{isa} " + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/file_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/file_reference.rb new file mode 100644 index 0000000..0cf0043 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/file_reference.rb @@ -0,0 +1,337 @@ +require 'xcodeproj/project/object/helpers/groupable_helper' + +module Xcodeproj + class Project + module Object + # This class represents a reference to a file in the file system. + # + class PBXFileReference < AbstractObject + # @!group Attributes + + # @return [String] the name of the reference, often not present. + # + attribute :name, String + + # @return [String] the path to the file relative to the source tree + # + attribute :path, String + + # @return [String] the directory to which the path is relative. + # + # @note The accepted values are: + # - `<absolute>` for absolute paths + # - `<group>` for paths relative to the group + # - `SOURCE_ROOT` for paths relative to the project + # - `DEVELOPER_DIR` for paths relative to the developer + # directory. + # - `BUILT_PRODUCTS_DIR` for paths relative to the build + # products directory. + # - `SDKROOT` for paths relative to the SDK directory. + # + attribute :source_tree, String, 'SOURCE_ROOT' + + # @return [String] the file type (apparently) used for products + # generated by Xcode (i.e. applications, libraries). + # + attribute :explicit_file_type, String + + # @return [String] the file type guessed by Xcode. + # + # @note This attribute is not present if there is an + # `explicit_file_type`. + # + attribute :last_known_file_type, String + + # @return [String] whether this file should be indexed. It can + # be either `0` or `1`. + # + # @note Apparently present only for products generated by Xcode with + # a value of `0`. + # + attribute :include_in_index, String, '1' + + # @return [String] a string containing a number which represents the + # encoding format of the file. + # + attribute :fileEncoding, String + + # @return [String] a string that specifies the UTI for the syntax + # highlighting. + # + # @example + # `xcode.lang.ruby` + # + attribute :xc_language_specification_identifier, String + + # @return [String] a string that specifies the UTI for the structure of + # a plist file. + # + # @example + # `com.apple.xcode.plist.structure-definition.iphone.info-plist` + # + attribute :plist_structure_definition_identifier, String + + # @return [String] Whether Xcode should use tabs for text alignment. + # + # @example + # `1` + # + attribute :uses_tabs, String + + # @return [String] The width of the indent. + # + # @example + # `2` + # + attribute :indent_width, String + + # @return [String] The width of the tabs. + # + # @example + # `2` + # + attribute :tab_width, String + + # @return [String] Whether Xcode should wrap lines. + # + # @example + # `1` + # + attribute :wraps_lines, String + + # @return [String] Apparently whether Xcode should add, if needed, a + # new line feed before saving the file. + # + # @example + # `0` + # + attribute :line_ending, String + + # @return [String] Comments associated with this file. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + #---------------------------------------------------------------------# + + public + + # @!group Helpers + + # @return [String] the name of the file taking into account the path if + # needed. + # + def display_name + if name + name + elsif (class << GroupableHelper; self; end)::SOURCE_TREES_BY_KEY[:built_products] == source_tree + path + elsif path + File.basename(path) + end + end + + # @return [PBXGroup, PBXProject] the parent of the file. + # + def parent + GroupableHelper.parent(self) + end + + # @return [Array<PBXGroup, PBXProject>] The list of the parents of the + # reference. + # + def parents + GroupableHelper.parents(self) + end + + # @return [String] A representation of the reference hierarchy. + # + def hierarchy_path + GroupableHelper.hierarchy_path(self) + end + + # Moves the reference to a new parent. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(new_parent) + GroupableHelper.move(self, new_parent) + end + + # @return [Pathname] the absolute path of the file resolving the + # source tree. + # + def real_path + GroupableHelper.real_path(self) + end + + # @return [Pathname] the path of the file without resolving the + # source tree. + # + def full_path + GroupableHelper.full_path(self) + end + + # Sets the source tree of the reference. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a symbol. + # + # @return [void] + # + def set_source_tree(source_tree) + GroupableHelper.set_source_tree(self, source_tree) + end + + # Allows to set the path according to the source tree of the reference. + # + # @param [#to_s] the path for the reference. + # + # @return [void] + # + def set_path(path) + if path + GroupableHelper.set_path_with_source_tree(self, path, source_tree) + else + self.path = nil + end + end + + # @return [Array<PBXBuildFile>] the build files associated with the + # current file reference. + # + def build_files + referrers.grep(PBXBuildFile) + end + + # Sets the last known file type according to the extension of the path. + # + # @return [void] + # + def set_last_known_file_type(type = nil) + if type + self.last_known_file_type = type + elsif path + extension = Pathname(path).extname[1..-1] + self.last_known_file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + end + end + + # Sets the explicit file type according to the extension of the path, + # and clears the last known file type. + # + # @return [void] + # + def set_explicit_file_type(type = nil) + self.last_known_file_type = nil + if type + self.explicit_file_type = type + elsif path + extension = Pathname(path).extname[1..-1] + self.explicit_file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + end + end + + #---------------------------------------------------------------------# + + # Checks whether the reference is a proxy. + # + # @return [Bool] always false for this ISA. + # + def proxy? + false + end + + # If this file reference represents an external Xcode project reference + # then this will return metadata about it which includes the reference + # to the 'Products' group that's created in this project (the project + # that includes the external project). + # + # @return [ObjectDictionary, nil] The external project metadata for + # this file reference or `nil` if it's not an external project. + # + def project_reference_metadata + project.root_object.project_references.find do |project_reference| + project_reference[:project_ref] == self + end + end + + # If this file reference represents an external Xcode project reference + # then this will return the objects that are 'containers' for items + # contained in the external Xcode project. + # + # @return [Array<PBXContainerItemProxy>] The containers for items in + # the external Xcode project. + # + def proxy_containers + project.objects.select do |object| + object.isa == 'PBXContainerItemProxy' && + object.container_portal == uuid + end + end + + # If this file reference represents an external Xcode project reference + # then this will return proxies for file references contained in the + # external Xcode project. + # + # @return [Array<PBXReferenceProxy>] The file reference proxies for + # items located in the external Xcode project. + # + def file_reference_proxies + containers = proxy_containers + if containers.empty? + [] + else + project.objects.select do |object| + object.isa == 'PBXReferenceProxy' && + containers.include?(object.remote_ref) + end + end + end + + # If this file reference represents an external Xcode project reference + # then this will return dependencies on targets contained in the + # external Xcode project. + # + # @return [Array<PBXTargetDependency>] The dependencies on targets + # located in the external Xcode project. + # + def target_dependency_proxies + containers = proxy_containers + if containers.empty? + [] + else + project.objects.select do |object| + object.isa == 'PBXTargetDependency' && + containers.include?(object.target_proxy) + end + end + end + + # In addition to removing the file reference, this will also remove any + # items related to this reference in case it represents an external + # Xcode project. + # + # @see AbstractObject#remove_from_project + # + # @return [void] + # + def remove_from_project + if project_reference = project_reference_metadata + file_reference_proxies.each(&:remove_from_project) + target_dependency_proxies.each(&:remove_from_project) + project_reference[:product_group].remove_from_project + project.root_object.project_references.delete(project_reference) + end + super + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/group.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/group.rb new file mode 100644 index 0000000..b403080 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/group.rb @@ -0,0 +1,485 @@ +require 'xcodeproj/project/object/helpers/groupable_helper' +require 'xcodeproj/project/object/helpers/file_references_factory' + +module Xcodeproj + class Project + module Object + # This class represents a group. A group can contain other groups + # (PBXGroup) and file references (PBXFileReference). + # + class PBXGroup < AbstractObject + # @!group Attributes + + # @return [ObjectList<PBXGroup, PBXFileReference>] + # the objects contained by the group. + # + has_many :children, [PBXGroup, PBXFileReference, PBXReferenceProxy] + + # @return [String] the directory to which the path is relative. + # + # @note The accepted values are: + # - `<absolute>` for absolute paths + # - `<group>` for paths relative to the group + # - `SOURCE_ROOT` for paths relative to the project + # - `DEVELOPER_DIR` for paths relative to the developer + # directory. + # - `BUILT_PRODUCTS_DIR` for paths relative to the build + # products directory. + # - `SDKROOT` for paths relative to the SDK directory. + # + attribute :source_tree, String, '<group>' + + # @return [String] the path to a folder in the file system. + # + # @note This attribute is present for groups that are linked to a + # folder in the file system. + # + attribute :path, String + + # @return [String] the name of the group. + # + # @note If path is specified this attribute is not present. + # + attribute :name, String + + # @return [String] Whether Xcode should use tabs for text alignment. + # + # @example + # `1` + # + attribute :uses_tabs, String + + # @return [String] The width of the indent. + # + # @example + # `2` + # + attribute :indent_width, String + + # @return [String] The width of the tabs. + # + # @example + # `2` + # + attribute :tab_width, String + + # @return [String] Whether Xcode should wrap lines. + # + # @example + # `1` + # + attribute :wraps_lines, String + + # @return [String] Comments associated with this group. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + public + + # @!group Helpers + #---------------------------------------------------------------------# + + # @return [PBXGroup, PBXProject] The parent of the group. + # + def parent + GroupableHelper.parent(self) + end + + # @return [Array<PBXGroup, PBXProject>] The list of the parents of the + # group. + # + def parents + GroupableHelper.parents(self) + end + + # @return [String] A representation of the group hierarchy. + # + def hierarchy_path + GroupableHelper.hierarchy_path(self) + end + + # Moves the group to a new parent. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(new_parent) + GroupableHelper.move(self, new_parent) + end + + # @return [Pathname] the absolute path of the group resolving the + # source tree. + # + def real_path + GroupableHelper.real_path(self) + end + + # Sets the source tree of the group. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a symbol. + # + # @return [void] + # + def set_source_tree(source_tree) + GroupableHelper.set_source_tree(self, source_tree) + end + + # Allows to set the path according to the source tree of the group. + # + # @param [#to_s] the path for the group. + # + # @return [void] + # + def set_path(path) + if path + source_tree + GroupableHelper.set_path_with_source_tree(self, path, source_tree) + else + self.path = nil + self.source_tree = '<group>' + end + end + + # @return [Array<PBXFileReference>] the file references in the group + # children. + # + def files + children.grep(PBXFileReference) + end + + # @return [PBXFileReference] The file references whose path (regardless + # of the source tree) matches the give path. + # + def find_file_by_path(path) + files.find { |ref| ref.path == path } + end + + # @return [Array<PBXGroup>] the groups in the group children. + # + def groups + # Don't grep / is_a? as this would include child classes. + children.select { |obj| obj.class == PBXGroup } + end + + # @return [Array<XCVersionGroup>] the version groups in the group + # children. + # + def version_groups + children.grep(XCVersionGroup) + end + + # @return [Array<PBXGroup,PBXFileReference,PBXReferenceProxy>] the + # recursive children of the group. + # + def recursive_children_groups + result = [] + groups.each do |child| + result << child + result.concat(child.recursive_children_groups) + end + result + end + + # @return [Array<PBXGroup,PBXFileReference,PBXReferenceProxy>] the + # recursive list of the children of the group. + # + def recursive_children + result = [] + children.each do |child| + result << child + if child.is_a?(PBXGroup) + result.concat(child.recursive_children) + end + end + result + end + + # @return [Bool] Whether the group is empty. + # + def empty? + children.count.zero? + end + + # Creates a new reference with the given path and adds it to the + # group. The reference is configured according to the extension + # of the path. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference, XCVersionGroup] The new reference. + # + def new_reference(path, source_tree = :group) + FileReferencesFactory.new_reference(self, path, source_tree) + end + alias_method :new_file, :new_reference + + # Creates a file reference to a static library and adds it to the + # group. + # + # @param [#to_s] product_name + # The name of the static library. + # + # @return [PBXFileReference] The new file reference. + # + def new_product_ref_for_target(target_name, product_type) + FileReferencesFactory.new_product_ref_for_target(self, target_name, product_type) + end + + # Creates a file reference to a new bundle. + # + # @param [#to_s] product_name + # The name of the bundle. + # + # @return [PBXFileReference] The new file reference. + # + def new_bundle(product_name) + FileReferencesFactory.new_bundle(self, product_name) + end + + # Creates a file reference to a new bundle and adds it to the group. + # + # @note @see new_reference + # + # @param [#to_s] name + # the name of the new group. + # + # @return [PBXGroup] the new group. + # + def new_group(name, path = nil, source_tree = :group) + group = project.new(PBXGroup) + children << group + group.name = name + group.set_source_tree(source_tree) + group.set_path(path) + group + end + + # Creates a new variant group and adds it to the group + # + # @note @see new_group + # + # @param [#to_s] name + # the name of the new group. + # + # @param [#to_s] path + # The, preferably absolute, path of the variant group. + # Pass the path of the folder containing all the .lproj bundles, + # that contain files for the variant group. + # Do not pass the path of a specific bundle (such as en.lproj) + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXVariantGroup] the new variant group. + # + def new_variant_group(name, path = nil, source_tree = :group) + group = project.new(PBXVariantGroup) + children << group + group.name = name + group.set_source_tree(source_tree) + group.set_path(path) + group + end + + # Traverses the children groups and finds the group with the given + # path, if exists. + # + # @see find_subpath + # + def [](path) + find_subpath(path, false) + end + + # Removes children files and groups under this group. + # + def clear + children.objects.each(&:remove_from_project) + end + alias_method :remove_children_recursively, :clear + + # Traverses the children groups and finds the children with the given + # path, optionally, creating any needed group. If the given path is + # `nil` it returns itself. + # + # @param [String] path + # a string with the names of the groups separated by a '`/`'. + # + # @param [Boolean] should_create + # whether the path should be created. + # + # @note The path is matched against the {#display_name} of the groups. + # + # @example + # g = main_group['Frameworks'] + # g.name #=> 'Frameworks' + # + # @return [PBXGroup] the group if found. + # @return [Nil] if the path could not be found and should create is + # false. + # + def find_subpath(path, should_create = false) + return self unless path + path = path.split('/') unless path.is_a?(Array) + child_name = path.shift + child = children.find { |c| c.display_name == child_name } + if child.nil? + if should_create + child = new_group(child_name) + else + return nil + end + end + if path.empty? + child + else + child.find_subpath(path, should_create) + end + end + + # Adds an object to the group. + # + # @return [ObjectList<AbstractObject>] the children list. + # + def <<(child) + children << child + end + + # Sorts the children of the group by type and then by name. + # + # @note This is safe to call in an object list because it modifies it + # in C in Ruby MRI. In other Ruby implementation it can cause + # issues if there is one call to the notification enabled + # methods not compensated by the corespondent opposite (loss of + # UUIDs and objects from the project). + # + # @return [void] + # + def sort_by_type + children.sort! do |x, y| + if x.isa == 'PBXGroup' && !(y.isa == 'PBXGroup') + -1 + elsif !(x.isa == 'PBXGroup') && y.isa == 'PBXGroup' + 1 + elsif x.display_name && y.display_name + extname_x = File.extname(x.display_name) + extname_y = File.extname(y.display_name) + if extname_x != extname_y + extname_x <=> extname_y + else + File.basename(x.display_name, '.*') <=> File.basename(y.display_name, '.*') + end + else + 0 + end + end + end + + # Sorts the group by type recursively. + # + # @return [void] + # + def sort_recursively_by_type + groups.each(&:sort_recursively_by_type) + sort_by_type + end + + public + + # @!group AbstractObject Hooks + #---------------------------------------------------------------------# + + # @return [String] the name of the group taking into account the path + # or other factors if needed. + # + def display_name + if name + name + elsif path + File.basename(path) + elsif self.equal?(project.main_group) + 'Main Group' + end + end + + def ascii_plist_annotation + super unless self.equal?(project.main_group) + end + + # Sorts the to many attributes of the object according to the display + # name. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or + # `:below`. + # + # @return [void] + # + def sort(options = nil) + children.sort! do |x, y| + if options && groups_position = options[:groups_position] + raise ArgumentError unless [:above, :below].include?(groups_position) + if x.isa == 'PBXGroup' && !(y.isa == 'PBXGroup') + next groups_position == :above ? -1 : 1 + elsif !(x.isa == 'PBXGroup') && y.isa == 'PBXGroup' + next groups_position == :above ? 1 : -1 + end + end + + result = File.basename(x.display_name.downcase, '.*') <=> File.basename(y.display_name.downcase, '.*') + if result.zero? + File.extname(x.display_name.downcase) <=> File.extname(y.display_name.downcase) + else + result + end + end + end + end + + #-----------------------------------------------------------------------# + + # This class is used to gather localized files into one entry. + # + class PBXVariantGroup < PBXGroup + # @!group Attributes + + # @return [String] the file type guessed by Xcode. + # + attribute :last_known_file_type, String + end + + #-----------------------------------------------------------------------# + + # A group that contains multiple files references to the different + # versions of a resource. + # + # Used to contain the different versions of a `xcdatamodel`. + # + class XCVersionGroup < PBXGroup + # @!group Attributes + + # @return [PBXFileReference] the reference to the current version. + # + has_one :current_version, PBXFileReference + + # @return [String] the type of the versioned resource. + # + attribute :version_group_type, String, 'wrapper.xcdatamodel' + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb new file mode 100644 index 0000000..431fdd1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb @@ -0,0 +1,72 @@ +require 'set' + +module Xcodeproj + class Project + module Object + class XCBuildConfiguration + # yes, they are case-sensitive. + # no, Xcode doesn't do this for other PathList settings nor other + # settings ending in SEARCH_PATHS. + module BuildSettingsArraySettingsByObjectVersion + ARRAY_SETTINGS = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INFOPLIST_PREPROCESSOR_DEFINITIONS + LIBRARY_SEARCH_PATHS + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + private_constant :ARRAY_SETTINGS + + ARRAY_SETTINGS_OBJECT_VERSION_50 = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INCLUDED_SOURCE_FILE_NAMES + INFOPLIST_PREPROCESSOR_DEFINITIONS + LD_RUNPATH_SEARCH_PATHS + LIBRARY_SEARCH_PATHS + LOCALIZED_STRING_MACRO_NAMES + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + SYSTEM_FRAMEWORK_SEARCH_PATHS + SYSTEM_HEADER_SEARCH_PATHS + USER_HEADER_SEARCH_PATHS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + private_constant :ARRAY_SETTINGS_OBJECT_VERSION_50 + + def self.[](object_version) + object_version = object_version.to_i + + if object_version >= 50 + ARRAY_SETTINGS_OBJECT_VERSION_50 + else + ARRAY_SETTINGS + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb new file mode 100644 index 0000000..bfb0c22 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb @@ -0,0 +1,245 @@ +# frozen_string_literal: true +require 'xcodeproj/project/object/helpers/groupable_helper' + +module Xcodeproj + class Project + module Object + class FileReferencesFactory + class << self + # Creates a new reference with the given path and adds it to the + # given group. The reference is configured according to the extension + # of the path. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference, XCVersionGroup] The new reference. + # + def new_reference(group, path, source_tree) + ref = case File.extname(path).downcase + when '.xcdatamodeld' + new_xcdatamodeld(group, path, source_tree) + when '.xcodeproj' + new_subproject(group, path, source_tree) + else + new_file_reference(group, path, source_tree) + end + + configure_defaults_for_file_reference(ref) + ref + end + + # Creates a file reference to a static library and adds it to the + # given group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] product_name + # The name of the static library. + # + # @return [PBXFileReference] The new file reference. + # + def new_product_ref_for_target(group, target_name, product_type) + if product_type == :static_library + prefix = 'lib' + end + extension = Constants::PRODUCT_UTI_EXTENSIONS[product_type] + path = "#{prefix}#{target_name}" + path += ".#{extension}" if extension + ref = new_reference(group, path, :built_products) + ref.include_in_index = '0' + ref.set_explicit_file_type + ref + end + + # Creates a file reference to a new bundle and adds it to the given + # group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] product_name + # The name of the bundle. + # + # @return [PBXFileReference] The new file reference. + # + def new_bundle(group, product_name) + ref = new_reference(group, "#{product_name}.bundle", :built_products) + ref.include_in_index = '0' + ref.set_explicit_file_type('wrapper.cfbundle') + ref + end + + private + + # @group Private Helpers + #-------------------------------------------------------------------# + + # Creates a new file reference with the given path and adds it to the + # given group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference] The new file reference. + # + def new_file_reference(group, path, source_tree) + path = Pathname.new(path) + ref = group.project.new(PBXFileReference) + group.children << ref + GroupableHelper.set_path_with_source_tree(ref, path, source_tree) + ref.set_last_known_file_type + ref + end + + # Creates a new version group reference to an xcdatamodeled adding + # the xcdatamodel files included in the wrapper as children file + # references. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @note To match Xcode behaviour the current version is read from + # the .xccurrentversion file, if it doesn't exist the last + # xcdatamodel according to its path is set as the current + # version. + # + # @return [XCVersionGroup] The new reference. + # + def new_xcdatamodeld(group, path, source_tree) + path = Pathname.new(path) + ref = group.project.new(XCVersionGroup) + group.children << ref + GroupableHelper.set_path_with_source_tree(ref, path, source_tree) + ref.version_group_type = 'wrapper.xcdatamodel' + + real_path = group.real_path.join(path) + current_version_name = nil + if real_path.exist? + real_path.children.each do |child_path| + if File.extname(child_path) == '.xcdatamodel' + new_file_reference(ref, child_path, :group) + elsif File.basename(child_path) == '.xccurrentversion' + full_path = real_path + File.basename(child_path) + xccurrentversion = Plist.read_from_path(full_path) + current_version_name = xccurrentversion['_XCCurrentVersionName'] + end + end + + if current_version_name + ref.current_version = ref.children.find do |obj| + obj.path.split('/').last == current_version_name + end + end + end + + ref + end + + # Creates a file reference to another Xcode subproject and setups the + # proxies to the targets. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @note To analyze the targets the given project is read and thus + # it should already exist in the disk. + # + # @return [PBXFileReference] The new file reference. + # + def new_subproject(group, path, source_tree) + ref = new_file_reference(group, path, source_tree) + ref.include_in_index = nil + + product_group_ref = find_products_group_ref(group, true) + + subproj = Project.open(path) + subproj.products_group.files.each do |product_reference| + container_proxy = group.project.new(PBXContainerItemProxy) + container_proxy.container_portal = ref.uuid + container_proxy.proxy_type = Constants::PROXY_TYPES[:reference] + container_proxy.remote_global_id_string = product_reference.uuid + container_proxy.remote_info = 'Subproject' + + reference_proxy = group.project.new(PBXReferenceProxy) + extension = File.extname(product_reference.path)[1..-1] + reference_proxy.file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + reference_proxy.path = product_reference.path + reference_proxy.remote_ref = container_proxy + reference_proxy.source_tree = 'BUILT_PRODUCTS_DIR' + + product_group_ref << reference_proxy + end + + attribute = PBXProject.references_by_keys_attributes.find { |attrb| attrb.name == :project_references } + project_reference = ObjectDictionary.new(attribute, group.project.root_object) + project_reference[:project_ref] = ref + project_reference[:product_group] = product_group_ref + group.project.root_object.project_references << project_reference + + ref + end + + # Configures a file reference according to the extension to math + # Xcode behaviour. + # + # @param [PBXFileReference] ref + # The file reference to configure. + # + # @note To closely match the Xcode behaviour the name attribute of + # the file reference is set only if the path of the file is + # not equal to the path of the group. + # + # @return [void] + # + def configure_defaults_for_file_reference(ref) + if ref.path.include?('/') + ref.name = ref.path.split('/').last + end + + if File.extname(ref.path).downcase == '.framework' + ref.include_in_index = nil + end + end + + def find_products_group_ref(group, should_create = false) + product_group_ref = + (group.project.root_object.product_ref_group ||= group.project.main_group.find_subpath('Products', should_create)) + product_group_ref + end + + #-------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb new file mode 100644 index 0000000..661a9b2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true +module Xcodeproj + class Project + module Object + class GroupableHelper + class << self + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [PBXGroup, PBXProject] The parent of the object. + # + def parent(object) + referrers = object.referrers.uniq + if referrers.count > 1 + referrers = referrers.grep(PBXGroup) + end + + if referrers.count == 0 + raise '[Xcodeproj] Consistency issue: no parent ' \ + "for object `#{object.display_name}`: "\ + "`#{object.referrers.join('`, `')}`" + elsif referrers.count > 1 + raise '[Xcodeproj] Consistency issue: unexpected multiple parents ' \ + "for object `#{object.display_name}`: "\ + "#{object.referrers}" + end + referrers.first + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Array<PBXGroup, PBXProject>] The parents of the object. + # + def parents(object) + if main_group?(object) + [] + else + parent = parent(object) + parents(parent).push(parent) + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [String] A representation of the group hierarchy. + # + def hierarchy_path(object) + unless main_group?(object) + parent = parent(object) + parent = parent.hierarchy_path if parent.respond_to?(:hierarchy_path) + "#{parent}/#{object.display_name}" + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Bool] Wether the object is the main group of the project. + # + def main_group?(object) + object.equal?(object.project.main_group) + end + + # Moves the object to a new parent. + # + # @param [PBXGroup, PBXFileReference] object + # The object to move. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(object, new_parent) + unless object + raise "[Xcodeproj] Attempt to move nil object to `#{new_parent}`." + end + unless new_parent + raise "[Xcodeproj] Attempt to move object `#{object}` to nil parent." + end + if new_parent.equal?(object) + raise "[Xcodeproj] Attempt to move object `#{object}` to itself." + end + if parents(new_parent).include?(object) + raise "[Xcodeproj] Attempt to move object `#{object}` to a child object `#{new_parent}`." + end + + object.parent.children.delete(object) + new_parent << object + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The absolute path of the object resolving the + # source tree. + # + def real_path(object) + source_tree = source_tree_real_path(object) + path = object.path || ''.freeze + if source_tree + source_tree + path + else + Pathname(path) + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The path of the object without resolving the + # source tree. + # + def full_path(object) + folder = case object.source_tree + when '<group>' + object_parent = parent(object) + if object_parent.isa == 'PBXProject'.freeze + nil + else + full_path(object_parent) + end + when 'SOURCE_ROOT' + nil + when '<absolute>' + Pathname.new('/'.freeze) + else + Pathname.new("${#{object.source_tree}}") + end + folder ||= Pathname.new('') + if object.path + folder + object.path + else + folder + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The absolute path of the source tree of the + # object. + # + def source_tree_real_path(object) + case object.source_tree + when '<group>' + object_parent = parent(object) + if object_parent.isa == 'PBXProject'.freeze + object.project.project_dir + object.project.root_object.project_dir_path + else + real_path(object_parent) + end + when 'SOURCE_ROOT' + object.project.project_dir + when '<absolute>' + nil + else + Pathname.new("${#{object.source_tree}}") + end + end + + # @return [Hash{Symbol => String}] The source tree values by they + # symbol representation. + # + SOURCE_TREES_BY_KEY = { + :absolute => '<absolute>', + :group => '<group>', + :project => 'SOURCE_ROOT', + :built_products => 'BUILT_PRODUCTS_DIR', + :developer_dir => 'DEVELOPER_DIR', + :sdk_root => 'SDKROOT', + }.freeze + + # Sets the source tree of the given object. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [void] + # + def set_source_tree(object, source_tree) + source_tree = normalize_source_tree(source_tree) + object.source_tree = source_tree + end + + # Sets the path of the given object according to the provided source + # tree key. The path is converted to relative according to the real + # path of the source tree for group and project source trees, if both + # paths are relative or absolute. Otherwise the path is set as + # provided. + # + # @param [PBXGroup, PBXFileReference] object + # The object whose path needs to be set. + # + # @param [#to_s] path + # The path. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [void] + # + def set_path_with_source_tree(object, path, source_tree) + path = Pathname.new(path) + source_tree = normalize_source_tree(source_tree) + object.source_tree = source_tree + + if source_tree == SOURCE_TREES_BY_KEY[:absolute] + unless path.absolute? + raise '[Xcodeproj] Attempt to set a relative path with an ' \ + "absolute source tree: `#{path}`" + end + object.path = path.to_s + elsif source_tree == SOURCE_TREES_BY_KEY[:group] || source_tree == SOURCE_TREES_BY_KEY[:project] + source_tree_real_path = GroupableHelper.source_tree_real_path(object) + if source_tree_real_path && source_tree_real_path.absolute? == path.absolute? + relative_path = path.relative_path_from(source_tree_real_path) + object.path = relative_path.to_s + else + object.path = path.to_s + end + else + object.path = path.to_s + end + end + + private + + # @group Helpers + #-------------------------------------------------------------------# + + # Converts the given source tree to its string value. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [String] the string value of the source tree. + # + def normalize_source_tree(source_tree) + if source_tree.is_a?(Symbol) + source_tree = SOURCE_TREES_BY_KEY[source_tree] + end + + unless SOURCE_TREES_BY_KEY.values.include?(source_tree) + raise "[Xcodeproj] Unrecognized source tree option `#{source_tree}`" + end + source_tree + end + + #-------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/native_target.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/native_target.rb new file mode 100644 index 0000000..b9d3664 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/native_target.rb @@ -0,0 +1,708 @@ +module Xcodeproj + class Project + module Object + class AbstractTarget < AbstractObject + # @!group Attributes + + # @return [String] The name of the Target. + # + attribute :name, String + + # @return [String] the name of the build product. + # + attribute :product_name, String + + # @return [String] Comments associated with this target. + # + # This is apparently no longer used by Xcode. + # + attribute :comments, String + + # @return [XCConfigurationList] the list of the build configurations of + # the target. This list commonly include two configurations + # `Debug` and `Release`. + # + has_one :build_configuration_list, XCConfigurationList + + # @return [ObjectList<PBXTargetDependency>] the targets necessary to + # build this target. + # + has_many :dependencies, PBXTargetDependency + + public + + # @!group Helpers + #--------------------------------------# + + # Gets the value for the given build setting in all the build + # configurations or the value inheriting the value from the project + # ones if needed. + # + # @param [String] key + # the key of the build setting. + # + # @param [Bool] resolve_against_xcconfig + # whether the resolved setting should take in consideration any + # configuration file present. + # + # @return [Hash{String => String}] The value of the build setting + # grouped by the name of the build configuration. + # + # TODO: Full support for this would require to take into account + # the default values for the platform. + # + def resolved_build_setting(key, resolve_against_xcconfig = false) + target_settings = build_configuration_list.get_setting(key, resolve_against_xcconfig, self) + project_settings = project.build_configuration_list.get_setting(key, resolve_against_xcconfig) + target_settings.merge(project_settings) do |_key, target_val, proj_val| + target_includes_inherited = Constants::INHERITED_KEYWORDS.any? { |keyword| target_val.include?(keyword) } if target_val + if target_includes_inherited && proj_val + if target_val.is_a? String + target_val.gsub(Regexp.union(Constants::INHERITED_KEYWORDS), proj_val) + else + target_val.flat_map { |value| Constants::INHERITED_KEYWORDS.include?(value) ? proj_val : value } + end + else + target_val || proj_val + end + end + end + + # Gets the value for the given build setting, properly inherited if + # need, if shared across the build configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [Boolean] resolve_against_xcconfig + # whether the resolved setting should take in consideration any + # configuration file present. + # + # @raise If the build setting has multiple values. + # + # @note As it is common not to have a setting with no value for + # custom build configurations nil keys are not considered to + # determine if the setting is unique. This is an heuristic + # which might not closely match Xcode behaviour. + # + # @return [String] The value of the build setting. + # + def common_resolved_build_setting(key, resolve_against_xcconfig: false) + values = resolved_build_setting(key, resolve_against_xcconfig).values.compact.uniq + if values.count <= 1 + values.first + else + raise "[Xcodeproj] Consistency issue: build setting `#{key}` has multiple values: `#{resolved_build_setting(key)}`" + end + end + + # @return [String] the SDK that the target should use. + # + def sdk + common_resolved_build_setting('SDKROOT') + end + + # @return [Symbol] the name of the platform of the target. + # + def platform_name + return unless sdk + if sdk.include? 'iphoneos' + :ios + elsif sdk.include? 'macosx' + :osx + elsif sdk.include? 'appletvos' + :tvos + elsif sdk.include? 'watchos' + :watchos + end + end + + # @return [String] the version of the SDK. + # + def sdk_version + return unless sdk + sdk.scan(/[0-9.]+/).first + end + + # @visibility private + # + # @return [Hash<Symbol, String>] + # The name of the setting for the deployment target by platform + # name. + # + DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME = { + :ios => 'IPHONEOS_DEPLOYMENT_TARGET', + :osx => 'MACOSX_DEPLOYMENT_TARGET', + :tvos => 'TVOS_DEPLOYMENT_TARGET', + :watchos => 'WATCHOS_DEPLOYMENT_TARGET', + }.freeze + + # @return [String] the deployment target of the target according to its + # platform. + # + def deployment_target + return unless setting = DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME[platform_name] + common_resolved_build_setting(setting) + end + + # @param [String] deployment_target the deployment target to set for + # the target according to its platform. + # + def deployment_target=(deployment_target) + return unless setting = DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME[platform_name] + build_configurations.each do |config| + config.build_settings[setting] = deployment_target + end + end + + # @return [ObjectList<XCBuildConfiguration>] the build + # configurations of the target. + # + def build_configurations + build_configuration_list.build_configurations + end + + # Adds a new build configuration to the target and populates its with + # default settings according to the provided type if one doesn't + # exists. + # + # @note If a build configuration with the given name is already + # present no new build configuration is added. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] the created build configuration or the + # existing one with the same name. + # + def add_build_configuration(name, type) + if existing = build_configuration_list[name] + existing + else + build_configuration = project.new(XCBuildConfiguration) + build_configuration.name = name + product_type = self.product_type if respond_to?(:product_type) + build_configuration.build_settings = ProjectHelper.common_build_settings(type, platform_name, deployment_target, product_type) + build_configuration_list.build_configurations << build_configuration + build_configuration + end + end + + # @param [String] build_configuration_name + # the name of a build configuration. + # + # @return [Hash] the build settings of the build configuration with the + # given name. + # + # + def build_settings(build_configuration_name) + build_configuration_list.build_settings(build_configuration_name) + end + + # @!group Build Phases Helpers + + # @return [PBXFrameworksBuildPhase] + # the frameworks build phases of the target. + # + def frameworks_build_phases + build_phases.find { |bp| bp.class == PBXFrameworksBuildPhase } + end + + # @return [Array<PBXCopyFilesBuildPhase>] + # the copy files build phases of the target. + # + def copy_files_build_phases + build_phases.grep(PBXCopyFilesBuildPhase) + end + + # @return [Array<PBXShellScriptBuildPhase>] + # the shell script build phases of the target. + # + def shell_script_build_phases + build_phases.grep(PBXShellScriptBuildPhase) + end + + # Adds a dependency on the given target. + # + # @param [AbstractTarget] target + # the target which should be added to the dependencies list of + # the receiver. The target may be a target of this target's + # project or of a subproject of this project. Note that the + # subproject must already be added to this target's project. + # + # @return [void] + # + def add_dependency(target) + unless dependency_for_target(target) + container_proxy = project.new(Xcodeproj::Project::PBXContainerItemProxy) + if target.project == project + container_proxy.container_portal = project.root_object.uuid + else + subproject_reference = project.reference_for_path(target.project.path) + raise ArgumentError, 'add_dependency received target that belongs to a project that is not this project and is not a subproject of this project' unless subproject_reference + container_proxy.container_portal = subproject_reference.uuid + end + container_proxy.proxy_type = Constants::PROXY_TYPES[:native_target] + container_proxy.remote_global_id_string = target.uuid + container_proxy.remote_info = target.name + + dependency = project.new(Xcodeproj::Project::PBXTargetDependency) + dependency.name = target.name + dependency.target = target if target.project == project + dependency.target_proxy = container_proxy + + dependencies << dependency + end + end + + # Checks whether this target has a dependency on the given target. + # + # @param [AbstractTarget] target + # the target to search for. + # + # @return [PBXTargetDependency] + # + def dependency_for_target(target) + dependencies.find do |dep| + if dep.target_proxy.remote? + subproject_reference = project.reference_for_path(target.project.path) + uuid = subproject_reference.uuid if subproject_reference + dep.target_proxy.remote_global_id_string == target.uuid && dep.target_proxy.container_portal == uuid + else + dep.target.uuid == target.uuid + end + end + end + + # Creates a new copy files build phase. + # + # @param [String] name + # an optional name for the phase. + # + # @return [PBXCopyFilesBuildPhase] the new phase. + # + def new_copy_files_build_phase(name = nil) + phase = project.new(PBXCopyFilesBuildPhase) + phase.name = name + build_phases << phase + phase + end + + # Creates a new shell script build phase. + # + # @param (see #new_copy_files_build_phase) + # + # @return [PBXShellScriptBuildPhase] the new phase. + # + def new_shell_script_build_phase(name = nil) + phase = project.new(PBXShellScriptBuildPhase) + phase.name = name + build_phases << phase + phase + end + + public + + # @!group System frameworks + #--------------------------------------# + + # Adds a file reference for one or more system framework to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the framework. + # + # @note Xcode behaviour is following: if the target has the same SDK + # of the project it adds the reference relative to the SDK root + # otherwise the reference is added relative to the Developer + # directory. This can create confusion or duplication of the + # references of frameworks linked by iOS and OS X targets. For + # this reason the new Xcodeproj behaviour is to add the + # frameworks in a subgroup according to the platform. + # + # @return [Array<PBXFileReference>] An array of the newly created file + # references. + # + def add_system_framework(names) + Array(names).map do |name| + case platform_name + when :ios + group = project.frameworks_group['iOS'] || project.frameworks_group.new_group('iOS') + path_sdk_name = 'iPhoneOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_IOS_SDK + when :osx + group = project.frameworks_group['OS X'] || project.frameworks_group.new_group('OS X') + path_sdk_name = 'MacOSX' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_OSX_SDK + when :tvos + group = project.frameworks_group['tvOS'] || project.frameworks_group.new_group('tvOS') + path_sdk_name = 'AppleTVOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_TVOS_SDK + when :watchos + group = project.frameworks_group['watchOS'] || project.frameworks_group.new_group('watchOS') + path_sdk_name = 'WatchOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_WATCHOS_SDK + else + raise 'Unknown platform for target' + end + + path = "Platforms/#{path_sdk_name}.platform/Developer/SDKs/#{path_sdk_name}#{path_sdk_version}.sdk/System/Library/Frameworks/#{name}.framework" + unless ref = group.find_file_by_path(path) + ref = group.new_file(path, :developer_dir) + end + frameworks_build_phase.add_file_reference(ref, true) + ref + end + end + alias_method :add_system_frameworks, :add_system_framework + + # Adds a file reference for one or more system dylib libraries to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the libraries. + # + # @return [void] + # + def add_system_library(names) + add_system_library_extension(names, 'dylib') + end + alias_method :add_system_libraries, :add_system_library + + def add_system_library_extension(names, extension) + Array(names).each do |name| + path = "usr/lib/lib#{name}.#{extension}" + files = project.frameworks_group.files + unless reference = files.find { |ref| ref.path == path } + reference = project.frameworks_group.new_file(path, :sdk_root) + end + frameworks_build_phase.add_file_reference(reference, true) + reference + end + end + private :add_system_library_extension + + # Adds a file reference for one or more system tbd libraries to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the libraries. + # + # @return [void] + # + def add_system_library_tbd(names) + add_system_library_extension(names, 'tbd') + end + alias_method :add_system_libraries_tbd, :add_system_library_tbd + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + { + display_name => { + 'Build Phases' => build_phases.map(&:pretty_print), + 'Build Configurations' => build_configurations.map(&:pretty_print), + }, + } + end + end + + #-----------------------------------------------------------------------# + + # Represents a target handled by Xcode. + # + class PBXNativeTarget < AbstractTarget + # @!group Attributes + + # @return [PBXBuildRule] the build rules of this target. + # + has_many :build_rules, PBXBuildRule + + # @return [String] the build product type identifier. + # + attribute :product_type, String + + # @return [PBXFileReference] the reference to the product file. + # + has_one :product_reference, PBXFileReference + + # @return [ObjectList<XCSwiftPackageProductDependency>] the Swift package products necessary to + # build this target. + # + has_many :package_product_dependencies, XCSwiftPackageProductDependency + + # @return [String] the install path of the product. + # + attribute :product_install_path, String + + # @return [ObjectList<AbstractBuildPhase>] the build phases of the + # target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, AbstractBuildPhase + + public + + # @!group Helpers + #--------------------------------------# + + # @return [Symbol] The type of the target expressed as a symbol. + # + def symbol_type + Constants::PRODUCT_TYPE_UTI.key(product_type) + end + + # @return [Boolean] Whether the target is a test target. + # + def test_target_type? + case symbol_type + when :octest_bundle, :unit_test_bundle, :ui_test_bundle + true + else + false + end + end + + # @return [Boolean] Whether the target is an extension. + # + def extension_target_type? + case symbol_type + when :app_extension, :watch_extension, :watch2_extension, :tv_extension, :messages_extension + true + else + false + end + end + + # @return [Boolean] Whether the target is launchable. + # + def launchable_target_type? + case symbol_type + when :application, :command_line_tool + true + else + false + end + end + + # Adds source files to the target. + # + # @param [Array<PBXFileReference>] file_references + # the files references of the source files that should be added + # to the target. + # + # @param [String] compiler_flags + # the compiler flags for the source files. + # + # @yield_param [PBXBuildFile] each created build file. + # + # @return [Array<PBXBuildFile>] the created build files. + # + def add_file_references(file_references, compiler_flags = {}) + file_references.map do |file| + extension = File.extname(file.path).downcase + header_extensions = Constants::HEADER_FILES_EXTENSIONS + is_header_phase = header_extensions.include?(extension) + phase = is_header_phase ? headers_build_phase : source_build_phase + + unless build_file = phase.build_file(file) + build_file = project.new(PBXBuildFile) + build_file.file_ref = file + phase.files << build_file + end + + if compiler_flags && !compiler_flags.empty? && !is_header_phase + (build_file.settings ||= {}).merge!('COMPILER_FLAGS' => compiler_flags) do |_, old, new| + [old, new].compact.join(' ') + end + end + + yield build_file if block_given? + + build_file + end + end + + # Adds resource files to the resources build phase of the target. + # + # @param [Array<PBXFileReference>] resource_file_references + # the files references of the resources to the target. + # + # @return [void] + # + def add_resources(resource_file_references) + resource_file_references.each do |file| + next if resources_build_phase.include?(file) + build_file = project.new(PBXBuildFile) + build_file.file_ref = file + resources_build_phase.files << build_file + end + end + + # Finds or creates the headers build phase of the target. + # + # @note A target should have only one headers build phase. + # + # @return [PBXHeadersBuildPhase] the headers build phase. + # + def headers_build_phase + find_or_create_build_phase_by_class(PBXHeadersBuildPhase) + end + + # Finds or creates the source build phase of the target. + # + # @note A target should have only one source build phase. + # + # @return [PBXSourcesBuildPhase] the source build phase. + # + def source_build_phase + find_or_create_build_phase_by_class(PBXSourcesBuildPhase) + end + + # Finds or creates the frameworks build phase of the target. + # + # @note A target should have only one frameworks build phase. + # + # @return [PBXFrameworksBuildPhase] the frameworks build phase. + # + def frameworks_build_phase + find_or_create_build_phase_by_class(PBXFrameworksBuildPhase) + end + + # Finds or creates the resources build phase of the target. + # + # @note A target should have only one resources build phase. + # + # @return [PBXResourcesBuildPhase] the resources build phase. + # + def resources_build_phase + find_or_create_build_phase_by_class(PBXResourcesBuildPhase) + end + + private + + # @!group Internal Helpers + #--------------------------------------# + + # Find or create a build phase by a given class + # + # @param [Class] phase_class the class of the build phase to find or create. + # + # @return [AbstractBuildPhase] the build phase whose class match the given phase_class. + # + def find_or_create_build_phase_by_class(phase_class) + @phases ||= {} + unless phase_class < AbstractBuildPhase + raise ArgumentError, "#{phase_class} must be a subclass of #{AbstractBuildPhase.class}" + end + @phases[phase_class] ||= build_phases.find { |bp| bp.class == phase_class } || + project.new(phase_class).tap { |bp| build_phases << bp } + end + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # Sorts the to many attributes of the object according to the display + # name. + # + # Build phases are not sorted as they order is relevant. + # + def sort(_options = nil) + attributes_to_sort = to_many_attributes.reject { |attr| attr.name == :build_phases } + attributes_to_sort.each do |attrb| + list = attrb.get_value(self) + list.sort! do |x, y| + x.display_name <=> y.display_name + end + end + end + + def to_hash_as(method = :to_hash) + hash_as = super + if !hash_as['packageProductDependencies'].nil? && hash_as['packageProductDependencies'].empty? + hash_as.delete('packageProductDependencies') + end + hash_as + end + + def to_ascii_plist + plist = super + if !plist.value['packageProductDependencies'].nil? && plist.value['packageProductDependencies'].empty? + plist.value.delete('packageProductDependencies') + end + plist + end + end + + #-----------------------------------------------------------------------# + + # Represents a target that only consists in a aggregate of targets. + # + # @todo Apparently it can't have build rules. + # + class PBXAggregateTarget < AbstractTarget + # @!group Attributes + + # @return [PBXBuildRule] the build phases of the target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, [PBXCopyFilesBuildPhase, PBXShellScriptBuildPhase] + end + + #-----------------------------------------------------------------------# + + # Represents a legacy target which uses an external build tool. + # + # Apparently it can't have any build phase but the attribute can be + # present. + # + class PBXLegacyTarget < AbstractTarget + # @!group Attributes + + # @return [String] e.g "Dir" + # + attribute :build_working_directory, String + + # @return [String] e.g "$(ACTION)" + # + attribute :build_arguments_string, String + + # @return [String] e.g "1" + # + attribute :pass_build_settings_in_environment, String + + # @return [String] e.g "/usr/bin/make" + # + attribute :build_tool_path, String + + # @return [PBXBuildRule] the build phases of the target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/reference_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/reference_proxy.rb new file mode 100644 index 0000000..48aedbf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/reference_proxy.rb @@ -0,0 +1,67 @@ +module Xcodeproj + class Project + module Object + # Apparently a proxy for a reference object which might belong another + # project contained in the same workspace of the project document. + # + # This class is used for referencing the products of another project. + # + class PBXReferenceProxy < AbstractObject + # @!group Attributes + + # @return [String] the name of the reference. + # + attribute :name, String + + # @return [String] the path of the referenced filed. + # + attribute :path, String + + # @return [String] the file type of the referenced filed. + # + attribute :file_type, String + + # @return [PBXContainerItemProxy] the proxy to the project that + # contains the object. + # + has_one :remote_ref, PBXContainerItemProxy + + # @return [String] the source tree for the path of the reference. + # + # @example + # "BUILT_PRODUCTS_DIR" + # + attribute :source_tree, String + + #---------------------------------------------------------------------# + + public + + # @!group Helpers + + # Checks whether the reference is a proxy. + # + # @return [Bool] always true for this ISA. + # + def proxy? + true + end + + #---------------------------------------------------------------------# + + def ascii_plist_annotation + " #{name || path && File.basename(path)} " + end + + # @return [String] A name suitable for displaying the object to the + # user. + # + def display_name + return name if name + return path if path + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/root_object.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/root_object.rb new file mode 100644 index 0000000..a7cf6f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/root_object.rb @@ -0,0 +1,100 @@ +module Xcodeproj + class Project + module Object + # This class represents the root object of a project document. + # + class PBXProject < AbstractObject + # @!group Attributes + + # @return [ObjectList<AbstractTarget>] a list of all the targets in + # the project. + # + has_many :targets, AbstractTarget + + # @return [Hash{String => String}] attributes the attributes of the + # target. + # + # @note The hash might contain the following keys: + # + # - `CLASSPREFIX` + # - `LastUpgradeCheck` + # - `ORGANIZATIONNAME` + # + attribute :attributes, Hash, + 'LastSwiftUpdateCheck' => Constants::LAST_SWIFT_UPGRADE_CHECK, + 'LastUpgradeCheck' => Constants::LAST_UPGRADE_CHECK + + # @return [XCConfigurationList] the configuration list of the project. + # + has_one :build_configuration_list, XCConfigurationList + + # @return [String] the compatibility version of the project. + # + attribute :compatibility_version, String, 'Xcode 3.2' + + # @return [String] the development region of the project. + # + attribute :development_region, String, 'en' + + # @return [String] whether the project has scanned for encodings. + # + attribute :has_scanned_for_encodings, String, '0' + + # @return [Array<String>] the list of known regions. + # + attribute :known_regions, Array, %w(en Base) + + # @return [PBXGroup] the main group of the project. The one displayed + # by Xcode in the Project Navigator. + # + has_one :main_group, PBXGroup + + # @return [PBXGroup] the group containing the references to products of + # the project. + # + has_one :product_ref_group, PBXGroup + + # @return [String] the directory of the project. + # + attribute :project_dir_path, String, '' + + # @return [String] the root of the project. + # + attribute :project_root, String, '' + + # @return [Array<XCRemoteSwiftPackageReference>] the list of Swift package references. + # + has_many :package_references, XCRemoteSwiftPackageReference + + # @return [Array<ObjectDictionary>] any reference to other projects. + # + has_many_references_by_keys :project_references, + :project_ref => PBXFileReference, + :product_group => PBXGroup + + def name + project.path.basename('.xcodeproj').to_s + end + + def ascii_plist_annotation + ' Project object ' + end + + def to_hash_as(method = :to_hash) + hash_as = super + if !hash_as['packageReferences'].nil? && hash_as['packageReferences'].empty? + hash_as.delete('packageReferences') if !hash_as['packageReferences'].nil? && hash_as['packageReferences'].empty? + end + hash_as + end + + def to_ascii_plist + plist = super + plist.value.delete('projectReferences') if plist.value['projectReferences'].empty? + plist.value.delete('packageReferences') if !plist.value['packageReferences'].nil? && plist.value['packageReferences'].empty? + plist + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb new file mode 100644 index 0000000..cb83e2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb @@ -0,0 +1,19 @@ +module Xcodeproj + class Project + module Object + # This class represents a Swift package product dependency. + # + class XCSwiftPackageProductDependency < AbstractObject + # @!group Attributes + + # @return [XCRemoteSwiftPackageReference] the Swift package reference. + # + has_one :package, XCRemoteSwiftPackageReference + + # @return [String] the product name of this Swift package. + # + attribute :product_name, String + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb new file mode 100644 index 0000000..a70d684 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb @@ -0,0 +1,19 @@ +module Xcodeproj + class Project + module Object + # This class represents a Swift package reference. + # + class XCRemoteSwiftPackageReference < AbstractObject + # @!group Attributes + + # @return [String] the repository url this Swift package was installed from. + # + attribute :repositoryURL, String + + # @return [Hash] the version requirements for this Swift package. + # + attribute :requirement, Hash + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/target_dependency.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/target_dependency.rb new file mode 100644 index 0000000..43f4092 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object/target_dependency.rb @@ -0,0 +1,90 @@ +module Xcodeproj + class Project + module Object + # Represents a dependency of a target on another one. + # + class PBXTargetDependency < AbstractObject + # @!group Attributes + + # @return [PBXNativeTarget] the target that needs to be built to + # satisfy the dependency. + # + has_one :target, AbstractTarget + + # @return [PBXContainerItemProxy] a proxy for the target that needs to + # be built. + # + # @note Apparently to support targets in other projects of the same + # workspace. + # + has_one :target_proxy, PBXContainerItemProxy + + # @return [String] the name of the target. + # + # @note This seems only to be used when the target dependency is a + # target from a nested Xcode project. + # + attribute :name, String + + # @return [String] the platform filter for this target dependency. + # + attribute :platform_filter, String + + # @return [String] the product reference for this target dependency. + # + attribute :product_ref, String + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # @return [String] The name of the dependency. + # + def display_name + return name if name + return target.name if target + return target_proxy.remote_info if target_proxy + end + + def ascii_plist_annotation + " #{isa} " + end + + # @return [String] uuid of the target, if the dependency + # is a native target, otherwise the uuid of the + # target in the sub-project if the dependency is + # a target proxy + # + def native_target_uuid + return target.uuid if target + return target_proxy.remote_global_id_string if target_proxy + raise "Expected target or target_proxy, from which to fetch a uuid for target '#{display_name}'." \ + "Find and clear the PBXTargetDependency entry with uuid '#{@uuid}' in your .xcodeproj." + end + + # @note This override is necessary because Xcode allows for circular + # target dependencies. + # + # @return [Hash<String => String>] Returns a cascade representation of + # the object without UUIDs. + # + def to_tree_hash + hash = {} + hash['displayName'] = display_name + hash['isa'] = isa + hash['targetProxy'] = target_proxy.to_tree_hash + hash + end + + # @note This is a no-op, because the targets could theoretically depend + # on each other, leading to a stack level too deep error. + # + # @see AbstractObject#sort_recursively + # + def sort_recursively(_options = nil) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_attributes.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_attributes.rb new file mode 100644 index 0000000..e532565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_attributes.rb @@ -0,0 +1,522 @@ +module Xcodeproj + class Project + module Object + # This class represents an attribute of {AbstractObject} subclasses. + # Attributes are created by the {AbstractObject} DSL methods and allow to + # mirror the underlying attributes of the xcodeproj document model. + # + # Attributes provide support for runtime type checking. They also allow + # {AbstractObject} initialization and serialization to plist. + # + # @todo Add support for a list of required values so objects can be + # validated before serialization ? + # + class AbstractObjectAttribute + # @return [Symbol] the type of the attribute. It can be `:simple`, + # `:to_one`, `:to_many`. + # + attr_reader :type + + # @return [Symbol] the name of the attribute. + # + attr_reader :name + + # @return [Class] the class that owns the attribute. + # + attr_accessor :owner + + # Creates a new attribute with the given type and name. + # + # Attributes are expected to be instantiated only by the + # {AbstractObject} DSL methods. + # + # @param [Symbol] type + # the type of the attribute. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Class] owner + # the class that owns the attribute. + # + def initialize(type, name, owner) + @type = type + @name = name + @owner = owner + end + + # @return [String] The name of the attribute in camel case. + # + # @example + # attribute.new(:simple, :project_root) + # attribute.plist_name #=> projectRoot + # + def plist_name + @plist_name ||= CaseConverter.convert_to_plist(name, :lower) + end + + # @return [Array<Class>] the list of the classes accepted by the + # attribute. + # + attr_accessor :classes + + # @return [{Symbol, Array<Class>}] the list of the classes accepted by + # each key for attributes which store a dictionary. + # + attr_accessor :classes_by_key + + # @return [String, Array, Hash] the default value, if any, for simple + # attributes. + # + attr_accessor :default_value + + # Convenience method that returns the value of this attribute for a + # given object. + # + # @param [AbstractObject] object + # the object for which the value of this attribute is requested. + # + # @return [String, Array, Hash, AbstractObject, ObjectList] + # the value. + # + def get_value(object) + object.send(name) + end + + # Convenience method that sets the value of this attribute for a + # given object. It makes sense only for `:simple` or `:to_one` + # attributes. + # + # @raise It the type of this attribute is `:to_many`. + # + # @param [AbstractObject] object + # the object for which to set the value. + # + # @param [String, Hash, Array, AbstractObject] new_value + # the value to set for the attribute. + # + # @return [void] + # + def set_value(object, new_value) + if type == :to_many + raise '[Xcodeproj] Set value called for a to-many attribute' + end + object.send("#{name}=", new_value) + end + + # Convenience method that sets the value of this attribute for a given + # object to the default (if any). It makes sense only for `:simple` + # attributes. + # + # @param [AbstractObject] object + # the object for which to set the default value. + # + # @note It is extremely important to duplicate the default values + # otherwise kittens cry! + # + # @return [void] + # + def set_default(object) + unless type == :simple + raise "[Xcodeproj] Set value called for a #{type} attribute" + end + set_value(object, default_value.dup) if default_value + end + + # Checks that a given value is compatible with the attribute. + # + # This method powers the runtime type checking of the {AbstractObject} + # and is used its by synthesised methods. + # + # @raise If the class of the value is not compatible with the attribute. + # + # @return [void] + # + def validate_value(object) + return unless object + acceptable = classes.find { |klass| object.class == klass || object.class < klass } + if type == :simple + raise "[Xcodeproj] Type checking error: got `#{object.class}` " \ + "for attribute: #{inspect}" unless acceptable + else + raise "[Xcodeproj] Type checking error: got `#{object.isa}` for " \ + "attribute: #{inspect}" unless acceptable + end + end + + # Checks that a given value is compatible with a key for attributes + # which store references by key. + # + # This method is used by the #{ObjectDictionary} class. + # + # @raise If the class of the value is not compatible with the given + # key. + # + def validate_value_for_key(object, key) + unless type == :references_by_keys + raise '[Xcodeproj] This method should be called only for ' \ + 'attributes of type `references_by_keys`' + end + + unless classes_by_key.keys.include?(key) + raise "[Xcodeproj] unsupported key `#{key}` " \ + "(accepted `#{classes_by_key.keys}`) for attribute `#{inspect}`" + end + + return unless object + classes = Array(classes_by_key[key]) + acceptable = classes.find { |klass| object.class == klass || object.class < klass } + unless acceptable + raise "[Xcodeproj] Type checking error: got `#{object.isa}` " \ + "for key `#{key}` (which accepts `#{classes}`) of " \ + "attribute: `#{inspect}`" + end + end + + # @return [String] A string suitable for debugging the object. + # + def inspect + if type == :simple + "Attribute `#{plist_name}` (type: `#{type}`, classes: " \ + "`#{classes}`, owner class: `#{owner.isa}`)" + else + "Attribute `#{plist_name}` (type: `#{type}`, classes: " \ + "`#{classes.map(&:isa)}`, owner class: `#{owner.isa}`)" + end + end + end + + class AbstractObject + # The {AbstractObject} DSL methods allow to specify with fidelity the + # underlying model of the xcodeproj document format. {AbstractObject} + # subclasses should specify their attributes through the following + # methods: + # + # - `{AbstractObject.attribute}` + # - `{AbstractObject.has_one}` + # - `{AbstractObject.has_many}` + # + # @note The subclasses should not interfere with the methods + # synthesised by the DSL and should only implement helpers in top + # of them. + # + # @note Attributes are typed and are validated at runtime. + # + class << self + # @return [Array<AbstractObjectAttribute>] the attributes associated + # with the class. + # + # @note It includes the attributes defined in the superclass and the + # list is cleaned for duplicates. Subclasses should not duplicate + # an attribute of the superclass but for the method implementation + # they will duplicate them. + # + # @visibility private + # + def attributes + unless @full_attributes + attributes = @attributes || [] + if superclass.respond_to?(:attributes) + super_attributes = superclass.attributes + else + super_attributes = [] + end + # The uniqueness of the attributes is very important because the + # initialization from plist deletes the values from the + # dictionary. + @full_attributes = attributes.concat(super_attributes).uniq + end + @full_attributes + end + + # @return [Array<AbstractObjectAttribute>] the simple attributes + # associated with with the class. + # + # @visibility private + # + def simple_attributes + @simple_attributes ||= attributes.select { |a| a.type == :simple } + end + + # @return [Array<AbstractObjectAttribute>] the attributes + # representing a to one relationship associated with with the + # class. + # + # @visibility private + # + def to_one_attributes + @to_one_attributes ||= attributes.select { |a| a.type == :to_one } + end + + # @return [Array<AbstractObjectAttribute>] the attributes + # representing a to many relationship associated with with the + # class. + # + # @visibility private + # + def to_many_attributes + @to_many_attributes ||= attributes.select { |a| a.type == :to_many } + end + + # @visibility private + # + def references_by_keys_attributes + @references_by_keys_attributes ||= attributes.select { |a| a.type == :references_by_keys } + end + + private + + # Defines a new simple attribute and synthesises the corresponding + # methods. + # + # @note Simple attributes are directly stored in a hash. They can + # contain only a string, array of strings or a hash containing + # strings and thus they are not affected by reference counting. + # Clients can access the hash directly through the + # {AbstractObject#simple_attributes_hash} method. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Class] klass + # the accepted {Class} for the values of the attribute. + # + # @param [String, Array<String>, Hash{String=>String}] default_value + # the default value for new objects. + # + # @example + # attribute :project_root + # #=> leads to the creation of the following methods + # + # def project_root + # @simple_attributes_hash[projectRoot] + # end + # + # def project_root=(value) + # attribute.validate_value(value) + # @simple_attributes_hash[projectRoot] = value + # end + # + # @macro [attach] attribute + # @!attribute [rw] $1 + # + def attribute(name, klass, default_value = nil) + attrb = AbstractObjectAttribute.new(:simple, name, self) + attrb.classes = [klass] + attrb.default_value = default_value + add_attribute(attrb) + + define_method(attrb.name) do + @simple_attributes_hash ||= {} + @simple_attributes_hash[attrb.plist_name] + end + + define_method("#{attrb.name}=") do |value| + @simple_attributes_hash ||= {} + attrb.validate_value(value) + + existing = @simple_attributes_hash[attrb.plist_name] + if existing.is_a?(Hash) && value.is_a?(Hash) + return value if existing.keys == value.keys && existing == value + elsif existing == value + return value + end + mark_project_as_dirty! + @simple_attributes_hash[attrb.plist_name] = value + end + end + + # rubocop:disable Style/PredicateName + + # Defines a new relationship to a single and synthesises the + # corresponding methods. + # + # @note The synthesised setter takes care of handling reference + # counting directly. + # + # @param [String] singular_name + # the name of the relationship. + # + # @param [Class, Array<Class>] isas + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_one + # @!attribute [rw] $1 + # + def has_one(singular_name, isas) + isas = [isas] unless isas.is_a?(Array) + attrb = AbstractObjectAttribute.new(:to_one, singular_name, self) + attrb.classes = isas + add_attribute(attrb) + + attr_reader(attrb.name) + # 1.9.2 fix, see https://github.com/CocoaPods/Xcodeproj/issues/40. + public(attrb.name) + + variable_name = :"@#{attrb.name}" + define_method("#{attrb.name}=") do |value| + attrb.validate_value(value) + + previous_value = send(attrb.name) + return value if previous_value == value + mark_project_as_dirty! + previous_value.remove_referrer(self) if previous_value + instance_variable_set(variable_name, value) + value.add_referrer(self) if value + end + end + + # Defines a new ordered relationship to many. + # + # @note This attribute only generates the reader method. Clients are + # not supposed to create {ObjectList} objects which are created + # by the methods synthesised by this attribute on demand. + # Clients, however can mutate the list according to its + # interface. The list is responsible to manage the reference + # counting for its values. + # + # @param [String] plural_name + # the name of the relationship. + # + # @param [Class, Array<Class>] isas + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_many + # @!attribute [r] $1 + # + def has_many(plural_name, isas) + isas = [isas] unless isas.is_a?(Array) + + attrb = AbstractObjectAttribute.new(:to_many, plural_name, self) + attrb.classes = isas + add_attribute(attrb) + + variable_name = :"@#{attrb.name}" + define_method(attrb.name) do + # Here we are in the context of the instance + list = instance_variable_get(variable_name) + unless list + list = ObjectList.new(attrb, self) + instance_variable_set(variable_name, list) + end + list + end + end + + # Defines a new ordered relationship to many. + # + # @note This attribute only generates the reader method. Clients are + # not supposed to create {ObjectList} objects which are created + # by the methods synthesised by this attribute on demand. + # Clients, however can mutate the list according to its + # interface. The list is responsible to manage the reference + # counting for its values. + # + # @param [String] plural_name + # the name of the relationship. + # + # @param [{Symbol, Array<Class>}] classes_by_key + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_many + # @!attribute [r] $1 + # + def has_many_references_by_keys(plural_name, classes_by_key) + attrb = AbstractObjectAttribute.new(:references_by_keys, plural_name, self) + attrb.classes = classes_by_key.values + attrb.classes_by_key = classes_by_key + add_attribute(attrb) + + variable_name = :"@#{attrb.name}" + define_method(attrb.name) do + # Here we are in the context of the instance + list = instance_variable_get(variable_name) + unless list + list = ObjectList.new(attrb, self) + instance_variable_set(variable_name, list) + end + list + end + end + + # rubocop:enable Style/PredicateName + + protected + + # Adds an attribute to the list of attributes of the class. + # + # @note This method is intended to be invoked only by the + # {AbstractObject} meta programming methods + # + # @return [void] + # + def add_attribute(attribute) + unless attribute.classes + raise "[Xcodeproj] BUG - missing classes for #{attribute.inspect}" + end + + unless attribute.classes.all? { |klass| klass.is_a?(Class) } + raise "[Xcodeproj] BUG - classes:#{attribute.classes} for #{attribute.inspect}" + end + + @attributes ||= [] + @attributes << attribute + end + end # AbstractObject << self + + private + + # @return [Hash] the simple attributes hash. + # + attr_reader :simple_attributes_hash + + public + + # @!group xcodeproj format attributes + + # @return (see AbstractObject.attributes) + # + # @visibility private + # + def attributes + self.class.attributes + end + + # @return (see AbstractObject.simple_attributes) + # + # @visibility private + # + def simple_attributes + self.class.simple_attributes + end + + # @return (see AbstractObject.to_one_attributes) + # + # @visibility private + # + def to_one_attributes + self.class.to_one_attributes + end + + # @return (see AbstractObject.to_many_attributes) + # + # @visibility private + # + def to_many_attributes + self.class.to_many_attributes + end + + # @return (see AbstractObject.to_many_attributes) + # + # @visibility private + # + def references_by_keys_attributes + self.class.references_by_keys_attributes + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_dictionary.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_dictionary.rb new file mode 100644 index 0000000..f664057 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_dictionary.rb @@ -0,0 +1,210 @@ +module Xcodeproj + class Project + # This class represents relationships to other objects stored in a + # Dictionary. + # + # It works in conjunction with the {AbstractObject} class to ensure that + # the project is not serialized with unreachable objects by updating the + # with reference count on modifications. + # + # @note To provide full support as the other classes the dictionary should: + # + # Give the following attribute: + # + # has_many_references_by_keys :project_references, { + # :project_ref => PBXFileReference, + # :product_group => PBXGroup + # } + # + # This should be possible: + # + # #=> Note the API: + # root_object.project_references.project_ref = file + # + # #=> This should raise: + # root_object.project_references.product_group = file + # + # I.e. generate setters and getters from the specification hash. + # + # Also the interface is a dirty hybrid between the + # {AbstractObjectAttribute} and the {ObjectList}. + # + # @note Concerning the mutations methods it is safe to call only those + # which are overridden to inform objects reference count. Ideally all + # the hash methods should be covered, but this is not done yet. + # Moreover it is a moving target because the methods of array + # usually are implemented in C. + # + # @todo This class should use a {Hash} as a backing store instead of + # inheriting from it. This would prevent the usage of methods which + # don't notify the objects. + # + class ObjectDictionary < Hash + # @param [Object::AbstractObjectAttribute] attribute @see #attribute + # @param [Object] owner @see #owner + # + def initialize(attribute, owner) + @attribute = attribute + @owner = owner + end + + # @return [Object::AbstractObjectAttribute] The attribute that generated + # the list. + # + attr_reader :attribute + + # @return [Object] The object that owns the list. + # + attr_reader :owner + + # @return [Array<Symbol>] The list of the allowed keys. + # + def allowed_keys + attribute.classes_by_key.keys + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<ObjectDictionary attribute:`#{@attribute.name}` " \ + "owner:`#{@owner.display_name}` values:#{super.inspect}>" + end + + # @!group Notification enabled methods + #------------------------------------------------------------------------# + + # Associates an object to the given key and updates its references count. + # + # @param [String] key + # The key. + # + # @param [AbstractObject] object + # The object to add to the dictionary. + # + # @return [AbstractObject] The given object. + # + def []=(key, object) + key = normalize_key(key) + if object + perform_additions_operations(object, key) + else + perform_deletion_operations(self[key]) + end + super(key, object) + end + + # Removes the given key from the dictionary and informs the object that + # is not longer referenced by the owner. + # + # @param [String] key + # The key. + # + def delete(key) + key = normalize_key(key) + object = self[key] + perform_deletion_operations(object) + super + end + + # @!group AbstractObject Methods + #-----------------------------------------------------------------------# + + # @return [Hash<String => String>] The plist representation of the + # dictionary where the objects are replaced by their UUIDs. + # + def to_hash + result = {} + each do |key, obj| + if obj + plist_key = Object::CaseConverter.convert_to_plist(key, nil) + result[plist_key] = Nanaimo::String.new(obj.uuid, obj.ascii_plist_annotation) + end + end + result + end + + def to_ascii_plist + to_hash + end + + # @return [Hash<String => String>] Returns a cascade representation of + # the object without UUIDs. + # + def to_tree_hash + result = {} + each do |key, obj| + if obj + plist_key = Object::CaseConverter.convert_to_plist(key, nil) + result[plist_key] = obj.to_tree_hash + end + end + result + end + + # Removes all the references to a given object. + # + def remove_reference(object) + each { |key, obj| self[key] = nil if obj == object } + end + + # Informs the objects contained in the dictionary that another object is + # referencing them. + # + def add_referrer(referrer) + values.each { |obj| obj.add_referrer(referrer) } + end + + # Informs the objects contained in the dictionary that another object + # stopped referencing them. + # + def remove_referrer(referrer) + values.each { |obj| obj.remove_referrer(referrer) } + end + + private + + # @!group Private helpers + #------------------------------------------------------------------------# + + # @return [Symbol] Normalizes a key to a symbol converting the camel case + # format with underscores. + # + # @param [String, Symbol] key + # The key to normalize. + # + def normalize_key(key) + if key.is_a?(String) + key = Object::CaseConverter.convert_to_ruby(key) + end + + unless allowed_keys.include?(key) + raise "[Xcodeproj] Unsupported key `#{key}` (allowed " \ + "`#{allowed_keys}`) for `#{inspect}`" + end + key + end + + # Informs an object that it was added to the dictionary. In practice it + # adds the owner of the list as referrer to the objects. It also + # validates the value. + # + # @return [void] + # + def perform_additions_operations(object, key) + owner.mark_project_as_dirty! + object.add_referrer(owner) + attribute.validate_value_for_key(object, key) + end + + # Informs an object that it was removed from to the dictionary, so it can + # remove it from its referrers and take the appropriate actions. + # + # @return [void] + # + def perform_deletion_operations(objects) + owner.mark_project_as_dirty! + objects.remove_referrer(owner) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_list.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_list.rb new file mode 100644 index 0000000..e1df1b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/object_list.rb @@ -0,0 +1,223 @@ +module Xcodeproj + class Project + # This class represents an ordered relationship to many objects. + # + # It works in conjunction with the {AbstractObject} class to ensure that + # the project is not serialized with unreachable objects by updating the + # with reference count on modifications. + # + # @note Concerning the mutations methods it is safe to call only those + # which are overridden to inform objects reference count. Ideally all + # the array methods should be covered, but this is not done yet. + # Moreover it is a moving target because the methods of array + # usually are implemented in C + # + # @todo Cover all the mutations methods of the {Array} class. + # + class ObjectList < Array + # {Xcodeproj} clients are not expected to create instances of + # {ObjectList}, it is always initialized empty and automatically by the + # synthesized methods generated by {AbstractObject.has_many}. + # + def initialize(attribute, owner) + @attribute = attribute + @owner = owner + end + + # @return [Array<Class>] the attribute that generated the list. + # + attr_reader :attribute + + # @return [Array<Class>] the object that owns the list. + # + attr_reader :owner + + # @return [Array<String>] + # the UUIDs of all the objects referenced by this list. + # + def uuids + map(&:uuid) + end + + # @return [Array<AbstractObject>] + # a new array generated with the objects contained in the list. + # + def objects + to_a + end + + public + + # @!group Notification enabled methods + #------------------------------------------------------------------------# + + # TODO: the overridden methods are incomplete. + + # Adds an array of objects to list and updates their references count. + # + # @param [Array<AbstractObject, ObjectDictionary>] objects + # an array of objects to add to the list. + # + # @return [void] + # + def +(other) + perform_additions_operations(other) + super + end + + # Appends an object to list the and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def <<(object) + perform_additions_operations(object) + super + end + + # Adds an object to the given index of the list the and updates its + # references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def insert(index, object) + perform_additions_operations(object) + super + end + + # Prepends an object to the list and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def unshift(object) + perform_additions_operations(object) + super + end + + # Removes an object to list and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # the object to delete from the list. + # + # @return [AbstractObject, ObjectDictionary, Nil] the object if found. + # + def delete(object) + perform_deletion_operations(object) + super + end + + # Removes the object at the given index from the list and updates its + # references count. + # + # @param [Fixnum] from + # The index of the object. + # + # @return [AbstractObject, ObjectDictionary, Nil] the object if found. + # + def delete_at(index) + object = at(index) + perform_deletion_operations(object) + super + end + + # Removes all the objects contained in the list and updates their + # reference counts. + # + # @return [void] + # + def clear + objects.each do |object| + perform_deletion_operations(object) + end + super + end + + # Moves the given object to the given index. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to move. + # + # @param [Fixnum] to + # The new index for the object. + # + # @return [void] + # + def move(object, new_index) + return if index(object) == new_index + if obj = delete(object) + insert(new_index, obj) + else + raise "Attempt to move object `#{object}` not present in the list `#{inspect}`" + end + end + + # Moves the object at the given index to the given position. + # + # @param [Fixnum] from + # The current index of the object. + # + # @param [Fixnum] to + # The new index for the object. + # + # @return [void] + # + def move_from(current_index, new_index) + return if current_index == new_index + if obj = delete_at(current_index) + insert(new_index, obj) + else + raise "Attempt to move object from index `#{current_index}` which is beyond bounds of the list `#{inspect}`" + end + end + + def sort! + return super if owner.project.dirty? + previous = to_a + super + owner.mark_project_as_dirty! unless previous == to_a + self + end + + private + + # @!group Notification Methods + #------------------------------------------------------------------------# + + # Informs an object that it was added to the list. In practice it adds + # the owner of the list as referrer to the objects. It also validates the + # value. + # + # @return [void] + # + def perform_additions_operations(objects) + objects = [objects] unless objects.is_a?(Array) + objects.each do |obj| + owner.mark_project_as_dirty! + obj.add_referrer(owner) + attribute.validate_value(obj) unless obj.is_a?(ObjectDictionary) + end + end + + # Informs an object that it was removed from to the list, so it can + # remove its owner from its referrers and take the appropriate actions. + # + # @return [void] + # + def perform_deletion_operations(objects) + objects = [objects] unless objects.is_a?(Array) + objects.each do |obj| + owner.mark_project_as_dirty! + obj.remove_referrer(owner) unless obj.is_a?(ObjectDictionary) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/project_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/project_helper.rb new file mode 100644 index 0000000..77eac8e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/project_helper.rb @@ -0,0 +1,341 @@ +module Xcodeproj + class Project + module ProjectHelper + include Object + + # @!group Targets + + #-----------------------------------------------------------------------# + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [Symbol] type + # the type of target. Can be `:application`, `:dynamic_library`, + # `framework` or `:static_library`. + # + # @param [String] name + # the name of the target product. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [PBXNativeTarget] the target. + # + def self.new_target(project, type, name, platform, deployment_target, product_group, language) + # Target + target = project.new(PBXNativeTarget) + project.targets << target + target.name = name + target.product_name = name + target.product_type = Constants::PRODUCT_TYPE_UTI[type] + target.build_configuration_list = configuration_list(project, platform, deployment_target, type, language) + + # Product + product = product_group.new_product_ref_for_target(name, type) + target.product_reference = product + + # Build phases + build_phases_for_target_type(type).each { |phase| target.build_phases << project.new(phase) } + + # Frameworks + unless type == :static_library + framework_name = (platform == :osx) ? 'Cocoa' : 'Foundation' + target.add_system_framework(framework_name) + end + + target + end + + # Creates a new resource bundles target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the resources bundle. + # + # @param [Symbol] platform + # the platform of the resources bundle. Can be `:ios` or `:osx`. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @return [PBXNativeTarget] the target. + # + def self.new_resources_bundle(project, name, platform, product_group) + # Target + target = project.new(PBXNativeTarget) + project.targets << target + target.name = name + target.product_name = name + target.product_type = Constants::PRODUCT_TYPE_UTI[:bundle] + + # Configuration List + cl = project.new(XCConfigurationList) + cl.default_configuration_is_visible = '0' + cl.default_configuration_name = 'Release' + release_conf = project.new(XCBuildConfiguration) + release_conf.name = 'Release' + release_conf.build_settings = common_build_settings(nil, platform, nil, target.product_type) + debug_conf = project.new(XCBuildConfiguration) + debug_conf.name = 'Debug' + debug_conf.build_settings = common_build_settings(nil, platform, nil, target.product_type) + cl.build_configurations << release_conf + cl.build_configurations << debug_conf + target.build_configuration_list = cl + + # Product + product = product_group.new_bundle(name) + target.product_reference = product + + # Build phases + build_phases_for_target_type(:bundle).each { |phase| target.build_phases << project.new(phase) } + + target + end + + # Creates a new aggregate target and adds it to the project. + # + # The target is configured for the given platform. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the aggregate target. + # + # @param [Symbol] platform + # the platform of the aggregate target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @return [PBXAggregateTarget] the target. + # + def self.new_aggregate_target(project, name, platform, deployment_target) + target = project.new(PBXAggregateTarget) + project.targets << target + target.name = name + target.build_configuration_list = configuration_list(project, platform, deployment_target) + target + end + + # Creates a new legacy target and adds it to the project. + # + # The target is configured for the given platform. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the aggregate target. + # + # @param [String] build_tool_path + # the build tool path to use for this target. + # + # @param [String] build_arguments_string + # the build arguments string to use for this target. + # + # @param [String] build_working_directory + # the build working directory to use for this target. + # + # @param [String] pass_build_settings_in_environment + # whether to pass build settings in the environment during execution of this target. + # + # @return [PBXLegacyTarget] the target. + # + def self.new_legacy_target(project, name, build_tool_path = '/usr/bin/make', build_arguments_string = '$(ACTION)', + build_working_directory = nil, pass_build_settings_in_environment = '1') + target = project.new(PBXLegacyTarget) + project.targets << target + target.name = name + target.build_configuration_list = configuration_list(project) + target.build_tool_path = build_tool_path + target.build_arguments_string = build_arguments_string + target.build_working_directory = build_working_directory + target.pass_build_settings_in_environment = pass_build_settings_in_environment + target + end + + # @!group Private Helpers + + #-----------------------------------------------------------------------# + + # Returns a new configuration list, populated with release and debug + # configurations with common build settings for the given platform. + # + # @param [Project] project + # the project to which the configuration list should be added. + # + # @param [Symbol] platform + # the platform for the configuration list, can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [Symbol] target_product_type + # the product type of the target, can be any of `Constants::PRODUCT_TYPE_UTI.values` + # or `Constants::PRODUCT_TYPE_UTI.keys`. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [XCConfigurationList] the generated configuration list. + # + def self.configuration_list(project, platform = nil, deployment_target = nil, target_product_type = nil, language = nil) + cl = project.new(XCConfigurationList) + cl.default_configuration_is_visible = '0' + cl.default_configuration_name = 'Release' + + release_conf = project.new(XCBuildConfiguration) + release_conf.name = 'Release' + release_conf.build_settings = common_build_settings(:release, platform, deployment_target, target_product_type, language) + + debug_conf = project.new(XCBuildConfiguration) + debug_conf.name = 'Debug' + debug_conf.build_settings = common_build_settings(:debug, platform, deployment_target, target_product_type, language) + + cl.build_configurations << release_conf + cl.build_configurations << debug_conf + + existing_configurations = cl.build_configurations.map(&:name) + project.build_configurations.each do |configuration| + next if existing_configurations.include?(configuration.name) + + new_config = project.new(XCBuildConfiguration) + new_config.name = configuration.name + new_config.build_settings = common_build_settings(configuration.type, platform, deployment_target, target_product_type, language) + cl.build_configurations << new_config + end + + cl + end + + # Returns the common build settings for a given platform and configuration + # name. + # + # @param [Symbol] type + # the type of the build configuration, can be `:release` or + # `:debug`. + # + # @param [Symbol] platform + # the platform for the build settings, can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [Symbol] target_product_type + # the product type of the target, can be any of + # `Constants::PRODUCT_TYPE_UTI.values` + # or `Constants::PRODUCT_TYPE_UTI.keys`. Default is :application. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [Hash] The common build settings + # + def self.common_build_settings(type, platform = nil, deployment_target = nil, target_product_type = nil, language = :objc) + target_product_type = (Constants::PRODUCT_TYPE_UTI.find { |_, v| v == target_product_type } || [target_product_type || :application])[0] + common_settings = Constants::COMMON_BUILD_SETTINGS + + # Use intersecting settings for all key sets as base + settings = deep_dup(common_settings[:all]) + + # Match further common settings by key sets + keys = [type, platform, target_product_type, language].compact + key_combinations = (1..keys.length).flat_map { |n| keys.combination(n).to_a } + key_combinations.each do |key_combination| + settings.merge!(deep_dup(common_settings[key_combination] || {})) + end + + if deployment_target + case platform + when :ios + settings['IPHONEOS_DEPLOYMENT_TARGET'] = deployment_target + settings['CLANG_ENABLE_OBJC_WEAK'] = 'NO' if deployment_target < '5' + when :osx + settings['MACOSX_DEPLOYMENT_TARGET'] = deployment_target + settings['CLANG_ENABLE_OBJC_WEAK'] = 'NO' if deployment_target < '10.7' + when :tvos + settings['TVOS_DEPLOYMENT_TARGET'] = deployment_target + when :watchos + settings['WATCHOS_DEPLOYMENT_TARGET'] = deployment_target + end + end + + settings + end + + # Creates a deep copy of the given object + # + # @param [Object] object + # the object to copy. + # + # @return [Object] The deep copy of the object. + # + def self.deep_dup(object) + case object + when Hash + new_hash = {} + object.each do |key, value| + new_hash[key] = deep_dup(value) + end + new_hash + when Array + object.map { |value| deep_dup(value) } + else + object.dup + end + end + + # Returns the build phases, in order, that appear by default + # on a target of the given type. + # + # @param [Symbol] type + # the name of the target type. + # + # @return [Array<String>] The list of build phase class names for the target type. + # + def self.build_phases_for_target_type(type) + case type + when :static_library, :dynamic_library + %w(Headers Sources Frameworks) + when :framework + %w(Headers Sources Frameworks Resources) + when :command_line_tool + %w(Sources Frameworks) + else + %w(Sources Frameworks Resources) + end.map { |phase| "PBX#{phase}BuildPhase" } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/uuid_generator.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/uuid_generator.rb new file mode 100644 index 0000000..f47da43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/project/uuid_generator.rb @@ -0,0 +1,132 @@ +module Xcodeproj + class Project + class UUIDGenerator + require 'digest' + + def initialize(projects) + @projects = Array(projects) + @paths_by_object = {} + end + + def generate! + generate_all_paths_by_objects(@projects) + + new_objects_by_project = Hash[@projects.map do |project| + [project, switch_uuids(project)] + end] + all_new_objects_by_project = new_objects_by_project.values.flat_map(&:values) + all_objects_by_uuid = @projects.map(&:objects_by_uuid).inject(:merge) + all_objects = @projects.flat_map(&:objects) + verify_no_duplicates!(all_objects, all_new_objects_by_project) + @projects.each { |project| fixup_uuid_references(project, all_objects_by_uuid) } + new_objects_by_project.each do |project, new_objects_by_uuid| + project.instance_variable_set(:@generated_uuids, project.instance_variable_get(:@available_uuids)) + project.instance_variable_set(:@objects_by_uuid, new_objects_by_uuid) + end + end + + private + + UUID_ATTRIBUTES = [:remote_global_id_string, :container_portal, :target_proxy].freeze + + def verify_no_duplicates!(all_objects, all_new_objects) + duplicates = all_objects - all_new_objects + UserInterface.warn "[Xcodeproj] Generated duplicate UUIDs:\n\n" << + duplicates.map { |d| "#{d.isa} -- #{@paths_by_object[d]}" }.join("\n") unless duplicates.empty? + end + + def fixup_uuid_references(target_project, all_objects_by_uuid) + fixup = ->(object, attr) do + if object.respond_to?(attr) && link = all_objects_by_uuid[object.send(attr)] + object.send(:"#{attr}=", link.uuid) + end + end + target_project.objects.each do |object| + UUID_ATTRIBUTES.each do |attr| + fixup[object, attr] + end + end + + if (project_attributes = target_project.root_object.attributes) && project_attributes['TargetAttributes'] + project_attributes['TargetAttributes'] = Hash[project_attributes['TargetAttributes'].map do |target_uuid, attributes| + if test_target_id = attributes['TestTargetID'] + attributes = attributes.merge('TestTargetID' => all_objects_by_uuid[test_target_id].uuid) + end + if target_object = all_objects_by_uuid[target_uuid] + target_uuid = target_object.uuid + end + [target_uuid, attributes] + end] + end + end + + def generate_all_paths_by_objects(projects) + projects.each { |project| generate_paths(project.root_object, project.path.basename.to_s) } + end + + def generate_paths(object, path = '') + existing = @paths_by_object[object] || path + return existing if @paths_by_object.key?(object) + @paths_by_object[object] = path.size > existing.size ? path : existing + + object.to_one_attributes.each do |attrb| + obj = attrb.get_value(object) + generate_paths(obj, path + '/' << attrb.plist_name) if obj + end + + object.to_many_attributes.each do |attrb| + attrb.get_value(object).each do |o| + generate_paths(o, path + '/' << attrb.plist_name << "/#{path_component_for_object(o)}") + end + end + + object.references_by_keys_attributes.each do |attrb| + attrb.get_value(object).each do |dictionary| + dictionary.each do |key, value| + generate_paths(value, path + '/' << attrb.plist_name << "/k:#{key}/#{path_component_for_object(value)}") + end + end + end + end + + def switch_uuids(project) + project.mark_dirty! + project.objects.each_with_object({}) do |object, hash| + next unless path = @paths_by_object[object] + uuid = uuid_for_path(path) + object.instance_variable_set(:@uuid, uuid) + hash[uuid] = object + end + end + + def uuid_for_path(path) + Digest::MD5.hexdigest(path).upcase + end + + def path_component_for_object(object) + @path_component_for_object ||= Hash.new do |cache, key| + component = tree_hash_to_path(key.to_tree_hash) + component << key.hierarchy_path.to_s if key.respond_to?(:hierarchy_path) + cache[key] = component + end + @path_component_for_object[object] + end + + def tree_hash_to_path(object, depth = 4) + return '|' if depth.zero? + case object + when Hash + object.sort.each_with_object('') do |(key, value), string| + string << key << ':' << tree_hash_to_path(value, depth - 1) << ',' + end + when Array + object.map do |value| + tree_hash_to_path(value, depth - 1) + end.join(',') + else + object.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme.rb new file mode 100644 index 0000000..774ccf5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme.rb @@ -0,0 +1,371 @@ +require 'rexml/document' + +require 'xcodeproj/scheme/build_action' +require 'xcodeproj/scheme/test_action' +require 'xcodeproj/scheme/launch_action' +require 'xcodeproj/scheme/profile_action' +require 'xcodeproj/scheme/analyze_action' +require 'xcodeproj/scheme/archive_action' + +require 'xcodeproj/scheme/buildable_product_runnable' +require 'xcodeproj/scheme/buildable_reference' +require 'xcodeproj/scheme/macro_expansion' +require 'xcodeproj/scheme/remote_runnable' + +module Xcodeproj + # This class represents a Scheme document represented by a ".xcscheme" file + # usually stored in a xcuserdata or xcshareddata (for a shared scheme) + # folder. + # + class XCScheme + # @return [REXML::Document] the XML object that will be manipulated to save + # the scheme file after. + # + attr_reader :doc + + # Create a XCScheme either from scratch or using an existing file + # + # @param [String] file_path + # The path of the existing .xcscheme file. If nil will create an empty scheme + # + def initialize(file_path = nil) + if file_path + @file_path = file_path + @doc = File.open(file_path, 'r') do |f| + REXML::Document.new(f) + end + @doc.context[:attribute_quote] = :quote + + @scheme = @doc.elements['Scheme'] + else + @doc = REXML::Document.new + @doc.context[:attribute_quote] = :quote + @doc << REXML::XMLDecl.new(REXML::XMLDecl::DEFAULT_VERSION, 'UTF-8') + + @scheme = @doc.add_element 'Scheme' + @scheme.attributes['LastUpgradeVersion'] = Constants::LAST_UPGRADE_CHECK + @scheme.attributes['version'] = Xcodeproj::Constants::XCSCHEME_FORMAT_VERSION + + self.build_action = BuildAction.new + self.test_action = TestAction.new + self.launch_action = LaunchAction.new + self.profile_action = ProfileAction.new + self.analyze_action = AnalyzeAction.new + self.archive_action = ArchiveAction.new + end + end + + # Convenience method to quickly add app and test targets to a new scheme. + # + # It will add the runnable_target to the Build, Launch and Profile actions + # and the test_target to the Build and Test actions + # + # @param [Xcodeproj::Project::Object::PBXAbstractTarget] runnable_target + # The target to use for the 'Run', 'Profile' and 'Analyze' actions + # + # @param [Xcodeproj::Project::Object::PBXAbstractTarget] test_target + # The target to use for the 'Test' action + # + # @param [Boolean] launch_target + # Determines if the runnable_target is launchable. + # + def configure_with_targets(runnable_target, test_target, launch_target: false) + if runnable_target + add_build_target(runnable_target) + set_launch_target(runnable_target) if launch_target + end + if test_target + add_build_target(test_target, false) if test_target != runnable_target + add_test_target(test_target) + end + end + + public + + # @!group Access Action nodes + + # @return [XCScheme::BuildAction] + # The Build Action associated with this scheme + # + def build_action + @build_action ||= BuildAction.new(@scheme.elements['BuildAction']) + end + + # @param [XCScheme::BuildAction] action + # The Build Action to associate to this scheme + # + def build_action=(action) + @scheme.delete_element('BuildAction') + @scheme.add_element(action.xml_element) + @build_action = action + end + + # @return [XCScheme::TestAction] + # The Test Action associated with this scheme + # + def test_action + @test_action ||= TestAction.new(@scheme.elements['TestAction']) + end + + # @param [XCScheme::TestAction] action + # The Test Action to associate to this scheme + # + def test_action=(action) + @scheme.delete_element('TestAction') + @scheme.add_element(action.xml_element) + @test_action = action + end + + # @return [XCScheme::LaunchAction] + # The Launch Action associated with this scheme + # + def launch_action + @launch_action ||= LaunchAction.new(@scheme.elements['LaunchAction']) + end + + # @param [XCScheme::LaunchAction] action + # The Launch Action to associate to this scheme + # + def launch_action=(action) + @scheme.delete_element('LaunchAction') + @scheme.add_element(action.xml_element) + @launch_action = action + end + + # @return [XCScheme::ProfileAction] + # The Profile Action associated with this scheme + # + def profile_action + @profile_action ||= ProfileAction.new(@scheme.elements['ProfileAction']) + end + + # @param [XCScheme::ProfileAction] action + # The Profile Action to associate to this scheme + # + def profile_action=(action) + @scheme.delete_element('ProfileAction') + @scheme.add_element(action.xml_element) + @profile_action = action + end + + # @return [XCScheme::AnalyzeAction] + # The Analyze Action associated with this scheme + # + def analyze_action + @analyze_action ||= AnalyzeAction.new(@scheme.elements['AnalyzeAction']) + end + + # @param [XCScheme::AnalyzeAction] action + # The Analyze Action to associate to this scheme + # + def analyze_action=(action) + @scheme.delete_element('AnalyzeAction') + @scheme.add_element(action.xml_element) + @analyze_action = action + end + + # @return [XCScheme::ArchiveAction] + # The Archive Action associated with this scheme + # + def archive_action + @archive_action ||= ArchiveAction.new(@scheme.elements['ArchiveAction']) + end + + # @param [XCScheme::ArchiveAction] action + # The Archive Action to associate to this scheme + # + def archive_action=(action) + @scheme.delete_element('ArchiveAction') + @scheme.add_element(action.xml_element) + @archive_action = action + end + + # @!group Target methods + + # Add a target to the list of targets to build in the build action. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] build_target + # A target used by scheme in the build step. + # + # @param [Bool] build_for_running + # Whether to build this target in the launch action. Often false for test targets. + # + def add_build_target(build_target, build_for_running = true) + entry = BuildAction::Entry.new(build_target) + + entry.build_for_testing = true + entry.build_for_running = build_for_running + entry.build_for_profiling = build_for_running + entry.build_for_archiving = build_for_running + entry.build_for_analyzing = build_for_running + + build_action.add_entry(entry) + end + + # Add a target to the list of targets to build in the build action. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] test_target + # A target used by scheme in the test step. + # + def add_test_target(test_target) + testable = TestAction::TestableReference.new(test_target) + test_action.add_testable(testable) + end + + # Sets a runnable target to be the target of the launch action of the scheme. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] build_target + # A target used by scheme in the launch step. + # + def set_launch_target(build_target) + launch_runnable = BuildableProductRunnable.new(build_target, 0) + launch_action.buildable_product_runnable = launch_runnable + + profile_runnable = BuildableProductRunnable.new(build_target) + profile_action.buildable_product_runnable = profile_runnable + + macro_exp = MacroExpansion.new(build_target) + test_action.add_macro_expansion(macro_exp) + end + + # @!group Class methods + + #-------------------------------------------------------------------------# + + # Share a User Scheme. Basically this method move the xcscheme file from + # the xcuserdata folder to xcshareddata folder. + # + # @param [String] project_path + # Path of the .xcodeproj folder. + # + # @param [String] scheme_name + # The name of scheme that will be shared. + # + # @param [String] user + # The user name that have the scheme. + # + def self.share_scheme(project_path, scheme_name, user = nil) + to_folder = shared_data_dir(project_path) + to_folder.mkpath + to = to_folder + "#{scheme_name}.xcscheme" + from = user_data_dir(project_path, user) + "#{scheme_name}.xcscheme" + FileUtils.mv(from, to) + end + + # @return [Pathname] + # + def self.shared_data_dir(project_path) + project_path = Pathname.new(project_path) + project_path + 'xcshareddata/xcschemes' + end + + # @return [Pathname] + # + def self.user_data_dir(project_path, user = nil) + project_path = Pathname.new(project_path) + user ||= ENV['USER'] + project_path + "xcuserdata/#{user}.xcuserdatad/xcschemes" + end + + public + + # @!group Serialization + + #-------------------------------------------------------------------------# + + # Serializes the current state of the object to a String. + # + # @note The goal of the string representation is to match Xcode output as + # close as possible to aide comparison. + # + # @return [String] the XML string value of the current state of the object. + # + def to_s + formatter = XMLFormatter.new(2) + formatter.compact = false + out = '' + formatter.write(@doc, out) + out.gsub!("<?xml version='1.0' encoding='UTF-8'?>", '<?xml version="1.0" encoding="UTF-8"?>') + out << "\n" + out + end + + # Serializes the current state of the object to a ".xcscheme" file. + # + # @param [String, Pathname] project_path + # The path where the ".xcscheme" file should be stored. + # + # @param [String] name + # The name of the scheme, to have ".xcscheme" appended. + # + # @param [Boolean] shared + # true => if the scheme must be a shared scheme (default value) + # false => if the scheme must be a user scheme + # + # @return [void] + # + # @example Saving a scheme + # scheme.save_as('path/to/Project.xcodeproj') #=> true + # + def save_as(project_path, name, shared = true) + scheme_folder_path = if shared + self.class.shared_data_dir(project_path) + else + self.class.user_data_dir(project_path) + end + scheme_folder_path.mkpath + scheme_path = scheme_folder_path + "#{name}.xcscheme" + @file_path = scheme_path + File.open(scheme_path, 'w') do |f| + f.write(to_s) + end + end + + # Serializes the current state of the object to the original ".xcscheme" + # file this XCScheme was created from, overriding the original file. + # + # Requires that the XCScheme object was initialized using a file path. + # + def save! + raise Informative, 'This XCScheme object was not initialized ' \ + 'using a file path. Use save_as instead.' unless @file_path + File.open(@file_path, 'w') do |f| + f.write(to_s) + end + end + + #-------------------------------------------------------------------------# + + # XML formatter which closely mimics the output generated by Xcode. + # + class XMLFormatter < REXML::Formatters::Pretty + def write_element(node, output) + @indentation = 3 + output << ' ' * @level + output << "<#{node.expanded_name}" + + @level += @indentation + node.context = node.parent.context # HACK: to ensure strings are properly quoted + node.attributes.each_attribute do |attr| + output << "\n" + output << ' ' * @level + output << attr.to_string.sub(/=/, ' = ') + end unless node.attributes.empty? + + output << '>' + + output << "\n" + node.children.each do |child| + next if child.is_a?(REXML::Text) && child.to_s.strip.length == 0 + write(child, output) + output << "\n" + end + @level -= @indentation + output << ' ' * @level + output << "</#{node.expanded_name}>" + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/abstract_scheme_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/abstract_scheme_action.rb new file mode 100644 index 0000000..3a4cf52 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/abstract_scheme_action.rb @@ -0,0 +1,28 @@ +require 'xcodeproj/scheme/xml_element_wrapper' +require 'xcodeproj/scheme/environment_variables' +require 'xcodeproj/scheme/command_line_arguments' + +module Xcodeproj + class XCScheme + # This abstract class aims to be the base class for every XxxAction class + # that have a #build_configuration attribute + # + class AbstractSchemeAction < XMLElementWrapper + # @return [String] + # The build configuration associated with this action + # (usually either 'Debug' or 'Release') + # + def build_configuration + @xml_element.attributes['buildConfiguration'] + end + + # @param [String] config_name + # The build configuration to associate with this action + # (usually either 'Debug' or 'Release') + # + def build_configuration=(config_name) + @xml_element.attributes['buildConfiguration'] = config_name + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/analyze_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/analyze_action.rb new file mode 100644 index 0000000..e951980 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/analyze_action.rb @@ -0,0 +1,19 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the AnalyzeAction node of a .xcscheme XML file + # + class AnalyzeAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'AnalyzeAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'AnalyzeAction') do + self.build_configuration = 'Debug' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/archive_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/archive_action.rb new file mode 100644 index 0000000..d00581f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/archive_action.rb @@ -0,0 +1,59 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the ArchiveAction node of a .xcscheme XML file + # + class ArchiveAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'ArchiveAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ArchiveAction') do + self.build_configuration = 'Release' + self.reveal_archive_in_organizer = true + end + end + + # @return [Bool] + # Whether the Archive will be revealed in Xcode's Organizer + # after it's done building. + # + def reveal_archive_in_organizer? + string_to_bool(@xml_element.attributes['revealArchiveInOrganizer']) + end + + # @param [Bool] flag + # Set whether the Archive will be revealed in Xcode's Organizer + # after it's done building. + # + def reveal_archive_in_organizer=(flag) + @xml_element.attributes['revealArchiveInOrganizer'] = bool_to_string(flag) + end + + # @return [String] + # The custom name to give to the archive. + # If nil, the generated archive will have the same name as the one + # set in the associated target's Build Settings for the built product. + # + def custom_archive_name + @xml_element.attributes['customArchiveName'] + end + + # @param [String] name + # Set the custom name to use for the built archive + # If nil, the customization of the archive name will be removed and + # the generated archive will have the same name as the one set in the + # associated target's Build Settings for the build product. + # + def custom_archive_name=(name) + if name + @xml_element.attributes['customArchiveName'] = name + else + @xml_element.delete_attribute('customArchiveName') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/build_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/build_action.rb new file mode 100644 index 0000000..11c6f2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/build_action.rb @@ -0,0 +1,212 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + # This class wraps the BuildAction node of a .xcscheme XML file + # + # Note: It's not a AbstractSchemeAction like the others because it is + # a special case of action (with no build_configuration, etc) + # + class BuildAction < XMLElementWrapper + # @param [REXML::Element] node + # The 'BuildAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'BuildAction') do + self.parallelize_buildables = true + self.build_implicit_dependencies = true + end + end + + # @return [Bool] + # Whether or not to build the various targets in parallel + # + def parallelize_buildables? + string_to_bool(@xml_element.attributes['parallelizeBuildables']) + end + + # @param [Bool] flag + # Set whether or not to build the various targets in parallel + # + def parallelize_buildables=(flag) + @xml_element.attributes['parallelizeBuildables'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to detect and build implicit dependencies for each target + # + def build_implicit_dependencies? + string_to_bool(@xml_element.attributes['buildImplicitDependencies']) + end + + # @param [Bool] flag + # Whether or not to detect and build implicit dependencies for each target + # + def build_implicit_dependencies=(flag) + @xml_element.attributes['buildImplicitDependencies'] = bool_to_string(flag) + end + + # @return [Array<BuildAction::Entry>] + # The list of BuildActionEntry nodes associated with this Build Action. + # Each entry represent a target to build and tells for which action it's needed to be built. + # + def entries + entries = @xml_element.elements['BuildActionEntries'] + return nil unless entries + entries.get_elements('BuildActionEntry').map do |entry_node| + BuildAction::Entry.new(entry_node) + end + end + + # @param [Array<BuildAction::Entry>] entries + # Sets the list of BuildActionEntry nodes associated with this Build Action. + # + def entries=(entries) + @xml_element.delete_element('BuildActionEntries') + unless entries.empty? + entries_element = @xml_element.add_element('BuildActionEntries') + entries.each do |entry_node| + entries_element.add_element(entry_node.xml_element) + end + end + entries + end + + # @param [BuildAction::Entry] entry + # The BuildActionEntry to add to the list of targets to build for the various actions + # + def add_entry(entry) + entries = @xml_element.elements['BuildActionEntries'] || @xml_element.add_element('BuildActionEntries') + entries.add_element(entry.xml_element) + end + + #-------------------------------------------------------------------------# + + class Entry < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildActionEntry' node element to reference, + # or nil to create an new, empty Entry with default values + # + def initialize(target_or_node = nil) + create_xml_element_with_fallback(target_or_node, 'BuildActionEntry') do + # Check target type to configure the default entry attributes accordingly + is_test_target = false + is_app_target = false + if target_or_node && target_or_node.is_a?(::Xcodeproj::Project::Object::PBXNativeTarget) + test_types = [Constants::PRODUCT_TYPE_UTI[:octest_bundle], + Constants::PRODUCT_TYPE_UTI[:unit_test_bundle], + Constants::PRODUCT_TYPE_UTI[:ui_test_bundle]] + app_types = [Constants::PRODUCT_TYPE_UTI[:application]] + is_test_target = test_types.include?(target_or_node.product_type) + is_app_target = app_types.include?(target_or_node.product_type) + end + + self.build_for_analyzing = true + self.build_for_testing = is_test_target + self.build_for_running = is_app_target + self.build_for_profiling = is_app_target + self.build_for_archiving = is_app_target + + add_buildable_reference BuildableReference.new(target_or_node) if target_or_node + end + end + + # @return [Bool] + # Whether or not to build this target when building for Testing + # + def build_for_testing? + string_to_bool(@xml_element.attributes['buildForTesting']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Testing + # + def build_for_testing=(flag) + @xml_element.attributes['buildForTesting'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Running + # + def build_for_running? + string_to_bool(@xml_element.attributes['buildForRunning']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Running + # + def build_for_running=(flag) + @xml_element.attributes['buildForRunning'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Profiling + # + def build_for_profiling? + string_to_bool(@xml_element.attributes['buildForProfiling']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Profiling + # + def build_for_profiling=(flag) + @xml_element.attributes['buildForProfiling'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Archiving + # + def build_for_archiving? + string_to_bool(@xml_element.attributes['buildForArchiving']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Archiving + # + def build_for_archiving=(flag) + @xml_element.attributes['buildForArchiving'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Analyzing + # + def build_for_analyzing? + string_to_bool(@xml_element.attributes['buildForAnalyzing']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Analyzing + # + def build_for_analyzing=(flag) + @xml_element.attributes['buildForAnalyzing'] = bool_to_string(flag) + end + + # @return [Array<BuildableReference>] + # The list of BuildableReferences this entry will build. + # (The list usually contains only one element) + # + def buildable_references + @xml_element.get_elements('BuildableReference').map do |node| + BuildableReference.new(node) + end + end + + # @param [BuildableReference] ref + # The BuildableReference to add to the list of targets this entry will build + # + def add_buildable_reference(ref) + @xml_element.add_element(ref.xml_element) + end + + # @param [BuildableReference] ref + # The BuildableReference to remove from the list of targets this entry will build + # + def remove_buildable_reference(ref) + @xml_element.delete_element(ref.xml_element) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_product_runnable.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_product_runnable.rb new file mode 100644 index 0000000..f4d350f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_product_runnable.rb @@ -0,0 +1,55 @@ +module Xcodeproj + class XCScheme + # This class wraps the BuildableProductRunnable node of a .xcscheme XML file + # + # A BuildableProductRunnable is a product that is both buildable + # (it contains a BuildableReference) and runnable (it can be launched and debugged) + # + class BuildableProductRunnable < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildableProductRunnable' node element to reference + # or nil to create an new, empty BuildableProductRunnable + # + # @param [#to_s] runnable_debugging_mode + # The debugging mode (usually '0') + # + def initialize(target_or_node = nil, runnable_debugging_mode = nil) + create_xml_element_with_fallback(target_or_node, 'BuildableProductRunnable') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + @xml_element.attributes['runnableDebuggingMode'] = runnable_debugging_mode.to_s if runnable_debugging_mode + end + end + + # @return [String] + # The Runnable debugging mode (usually either empty or equal to '0') + # + def runnable_debugging_mode + @xml_element.attributes['runnableDebuggingMode'] + end + + # @param [String] value + # Set the runnable debugging mode of this buildable product runnable + # + def runnable_debugging_mode=(value) + @xml_element.attributes['runnableDebuggingMode'] = value.to_s + end + + # @return [BuildableReference] + # The Buildable Reference this Buildable Product Runnable is gonna build and run + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the Buildable Reference this Buildable Product Runnable is gonna build and run + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_reference.rb new file mode 100644 index 0000000..b75e26e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/buildable_reference.rb @@ -0,0 +1,129 @@ +module Xcodeproj + class XCScheme + # This class wraps the BuildableReference node of a .xcscheme XML file + # + # A BuildableReference is a reference to a buildable product (which is + # typically is synonymous for an Xcode target) + # + class BuildableReference < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildableReference' node element to reference + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + def initialize(target_or_node, root_project = nil) + create_xml_element_with_fallback(target_or_node, 'BuildableReference') do + @xml_element.attributes['BuildableIdentifier'] = 'primary' + set_reference_target(target_or_node, true, root_project) if target_or_node + end + end + + # @return [String] + # The name of the target this Buildable Reference points to + # + def target_name + @xml_element.attributes['BlueprintName'] + end + + # @return [String] + # The Unique Identifier of the target (target.uuid) this Buildable Reference points to. + # + # @note You can use this to `#find` the `Xcodeproj::Project::Object::AbstractTarget` + # instance in your Xcodeproj::Project object. + # e.g. `project.targets.find { |t| t.uuid == ref.target_uuid }` + # + def target_uuid + @xml_element.attributes['BlueprintIdentifier'] + end + + # @return [String] + # The string representing the container of that target. + # Typically in the form of 'container:xxxx.xcodeproj' + # + def target_referenced_container + @xml_element.attributes['ReferencedContainer'] + end + + # Set the BlueprintIdentifier (target.uuid), BlueprintName (target.name) + # and TerefencedContainer (URI pointing to target's projet) all at once + # + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # The target this BuildableReference refers to. + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + # @param [Bool] override_buildable_name + # If true, buildable_name will also be updated by computing a name from the target + # + def set_reference_target(target, override_buildable_name = false, root_project = nil) + # note, the order of assignment here is important, it determines the order of serialization in the xml + # this matches the order that Xcode generates + @xml_element.attributes['BlueprintIdentifier'] = target.uuid + self.buildable_name = construct_buildable_name(target) if override_buildable_name + @xml_element.attributes['BlueprintName'] = target.name + @xml_element.attributes['ReferencedContainer'] = construct_referenced_container_uri(target, root_project) + end + + # @return [String] + # The name of the final product when building this Buildable Reference + # + def buildable_name + @xml_element.attributes['BuildableName'] + end + + # @param [String] value + # Set the name of the final product when building this Buildable Reference + # + def buildable_name=(value) + @xml_element.attributes['BuildableName'] = value + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # + # @return [String] The buildable name of the scheme. + # + def construct_buildable_name(build_target) + case build_target.isa + when 'PBXNativeTarget' + File.basename(build_target.product_reference.path) + when 'PBXAggregateTarget' + build_target.name + else + raise ArgumentError, "Unsupported build target type #{build_target.isa}" + end + end + + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + # @return [String] A string in the format "container:[path to the project + # file relative to the project_dir_path, always ending with + # the actual project directory name]" + # + def construct_referenced_container_uri(target, root_project = nil) + target_project = target.project + root_project ||= target_project + root_project_dir_path = root_project.root_object.project_dir_path + path = if !root_project_dir_path.to_s.empty? + root_project.path + root_project_dir_path + else + root_project.project_dir + end + relative_path = target_project.path.relative_path_from(path).to_s + relative_path = target_project.path.basename if relative_path == '.' + "container:#{relative_path}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/command_line_arguments.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/command_line_arguments.rb new file mode 100644 index 0000000..f5fd5d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/command_line_arguments.rb @@ -0,0 +1,162 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + COMMAND_LINE_ARGS_NODE = 'CommandLineArguments'.freeze + COMMAND_LINE_ARG_NODE = 'CommandLineArgument'.freeze + + # This class wraps the CommandLineArguments node of a .xcscheme XML file. This + # is just a container of CommandLineArgument objects. It can either appear on a + # LaunchAction or TestAction scheme group. + # + class CommandLineArguments < XMLElementWrapper + # @param [nil,REXML::Element,Array<CommandLineArgument>,Array<Hash{Symbol => String,Bool}>] node_or_arguments + # The 'CommandLineArguments' XML node, or list of command line arguments, that this object represents. + # - If nil, an empty 'CommandLineArguments' XML node will be created + # - If an REXML::Element, it must be named 'CommandLineArguments' + # - If an Array of objects or Hashes, they'll each be passed to {#assign_argument} + # + def initialize(node_or_arguments = nil) + create_xml_element_with_fallback(node_or_arguments, COMMAND_LINE_ARGS_NODE) do + @all_arguments = [] + node_or_arguments.each { |var| assign_argument(var) } unless node_or_arguments.nil? + end + end + + # @return [Array<CommandLineArgument>] + # The key value pairs currently set in @xml_element + # + def all_arguments + @all_arguments ||= @xml_element.get_elements(COMMAND_LINE_ARG_NODE).map { |argument| CommandLineArgument.new(argument) } + end + + # Adds a given argument to the set of command line arguments, or replaces it if that key already exists + # + # @param [CommandLineArgument,Hash{Symbol => String,Bool}] argument + # The argument to add or update, backed by an CommandLineArgument node. + # - If an CommandLineArgument, the previous reference will still be valid + # - If a Hash, must conform to {CommandLineArgument#initialize} requirements + # @return [Array<CommandLineArgument>] + # The new set of command line arguments after addition + # + def assign_argument(argument) + command_line_arg = if argument.is_a?(CommandLineArgument) + argument + else + CommandLineArgument.new(argument) + end + all_arguments.each { |existing_var| remove_argument(existing_var) if existing_var.argument == command_line_arg.argument } + @xml_element.add_element(command_line_arg.xml_element) + @all_arguments << command_line_arg + end + + # Removes a specified argument (by string or object) from the set of command line arguments + # + # @param [CommandLineArgument,String] argument + # The argument to remove + # @return [Array<CommandLineArgument>] + # The new set of command line arguments after removal + # + def remove_argument(argument) + command_line_arg = if argument.is_a?(CommandLineArgument) + argument + else + CommandLineArgument.new(argument) + end + raise "Unexpected parameter type: #{command_line_arg.class}" unless command_line_arg.is_a?(CommandLineArgument) + @xml_element.delete_element(command_line_arg.xml_element) + @all_arguments -= [command_line_arg] + end + + # @param [String] key + # The key to lookup + # @return [CommandLineArgument] argument + # Returns the matching command line argument for a specified key + # + def [](argument) + all_arguments.find { |var| var.argument == argument } + end + + # Assigns a value for a specified key + # + # @param [String] key + # The key to update in the command line arguments + # @param [String] value + # The value to lookup + # @return [CommandLineArgument] argument + # The newly updated command line argument + # + def []=(argument, enabled) + assign_argument(:argument => argument, :enabled => enabled) + self[argument] + end + + # @return [Array<Hash{Symbol => String,Bool}>] + # The current command line arguments represented as an array + # + def to_a + all_arguments.map(&:to_h) + end + end + + # This class wraps the CommandLineArgument node of a .xcscheme XML file. + # Environment arguments are accessible via the NSDictionary returned from + # [[NSProcessInfo processInfo] arguments] in your app code. + # + class CommandLineArgument < XMLElementWrapper + # @param [nil,REXML::Element,Hash{Symbol => String,Bool}] node_or_argument + # - If nil, it will create a default XML node to use + # - If a REXML::Element, should be a <CommandLineArgument> XML node to wrap + # - If a Hash, must contain keys :key and :value (Strings) and optionally :enabled (Boolean) + # + def initialize(node_or_argument) + create_xml_element_with_fallback(node_or_argument, COMMAND_LINE_ARG_NODE) do + raise "Must pass a Hash with 'argument' and 'enabled'!" unless node_or_argument.is_a?(Hash) && + node_or_argument.key?(:argument) && node_or_argument.key?(:enabled) + + @xml_element.attributes['argument'] = node_or_argument[:argument] + @xml_element.attributes['isEnabled'] = if node_or_argument.key?(:enabled) + bool_to_string(node_or_argument[:enabled]) + else + bool_to_string(false) + end + end + end + + # Returns the CommandLineArgument's key + # @return [String] + # + def argument + @xml_element.attributes['argument'] + end + + # Sets the CommandLineArgument's key + # @param [String] key + # + def argument=(argument) + @xml_element.attributes['argument'] = argument + end + + # Returns the CommandLineArgument's enabled state + # @return [Bool] + # + def enabled + string_to_bool(@xml_element.attributes['isEnabled']) + end + + # Sets the CommandLineArgument's enabled state + # @param [Bool] enabled + # + def enabled=(enabled) + @xml_element.attributes['isEnabled'] = bool_to_string(enabled) + end + + # @return [Hash{:key => String, :value => String, :enabled => Bool}] + # The command line argument XML node with attributes converted to a representative Hash + # + def to_h + { :argument => argument, :enabled => enabled } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/environment_variables.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/environment_variables.rb new file mode 100644 index 0000000..7335df6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/environment_variables.rb @@ -0,0 +1,170 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + VARIABLES_NODE = 'EnvironmentVariables' + VARIABLE_NODE = 'EnvironmentVariable' + + # This class wraps the EnvironmentVariables node of a .xcscheme XML file. This + # is just a container of EnvironmentVariable objects. It can either appear on a + # LaunchAction or TestAction scheme group. + # + class EnvironmentVariables < XMLElementWrapper + # @param [nil,REXML::Element,Array<EnvironmentVariable>,Array<Hash{Symbol => String,Bool}>] node_or_variables + # The 'EnvironmentVariables' XML node, or list of environment variables, that this object represents. + # - If nil, an empty 'EnvironmentVariables' XML node will be created + # - If an REXML::Element, it must be named 'EnvironmentVariables' + # - If an Array of objects or Hashes, they'll each be passed to {#assign_variable} + # + def initialize(node_or_variables = nil) + create_xml_element_with_fallback(node_or_variables, VARIABLES_NODE) do + @all_variables = [] + node_or_variables.each { |var| assign_variable(var) } unless node_or_variables.nil? + end + end + + # @return [Array<EnvironmentVariable>] + # The key value pairs currently set in @xml_element + # + def all_variables + @all_variables ||= @xml_element.get_elements(VARIABLE_NODE).map { |variable| EnvironmentVariable.new(variable) } + end + + # Adds a given variable to the set of environment variables, or replaces it if that key already exists + # + # @param [EnvironmentVariable,Hash{Symbol => String,Bool}] variable + # The variable to add or update, backed by an EnvironmentVariable node. + # - If an EnvironmentVariable, the previous reference will still be valid + # - If a Hash, must conform to {EnvironmentVariable#initialize} requirements + # @return [Array<EnvironmentVariable>] + # The new set of environment variables after addition + # + def assign_variable(variable) + env_var = variable.is_a?(EnvironmentVariable) ? variable : EnvironmentVariable.new(variable) + all_variables.each { |existing_var| remove_variable(existing_var) if existing_var.key == env_var.key } + @xml_element.add_element(env_var.xml_element) + @all_variables << env_var + end + + # Removes a specified variable (by string or object) from the set of environment variables + # + # @param [EnvironmentVariable,String] variable + # The variable to remove + # @return [Array<EnvironmentVariable>] + # The new set of environment variables after removal + # + def remove_variable(variable) + env_var = variable.is_a?(EnvironmentVariable) ? variable : all_variables.find { |var| var.key == variable } + raise "Unexpected parameter type: #{env_var.class}" unless env_var.is_a?(EnvironmentVariable) + @xml_element.delete_element(env_var.xml_element) + @all_variables -= [env_var] + end + + # @param [String] key + # The key to lookup + # @return [EnvironmentVariable] variable + # Returns the matching environment variable for a specified key + # + def [](key) + all_variables.find { |var| var.key == key } + end + + # Assigns a value for a specified key + # + # @param [String] key + # The key to update in the environment variables + # @param [String] value + # The value to lookup + # @return [EnvironmentVariable] variable + # The newly updated environment variable + # + def []=(key, value) + assign_variable(:key => key, :value => value) + self[key] + end + + # @return [Array<Hash{Symbol => String,Bool}>] + # The current environment variables represented as an array + # + def to_a + all_variables.map(&:to_h) + end + end + + # This class wraps the EnvironmentVariable node of a .xcscheme XML file. + # Environment variables are accessible via the NSDictionary returned from + # [[NSProcessInfo processInfo] environment] in your app code. + # + class EnvironmentVariable < XMLElementWrapper + # @param [nil,REXML::Element,Hash{Symbol => String,Bool}] node_or_variable + # - If nil, it will create a default XML node to use + # - If a REXML::Element, should be a <EnvironmentVariable> XML node to wrap + # - If a Hash, must contain keys :key and :value (Strings) and optionally :enabled (Boolean) + # + def initialize(node_or_variable) + create_xml_element_with_fallback(node_or_variable, VARIABLE_NODE) do + raise "Must pass a Hash with 'key' and 'value'!" unless node_or_variable.is_a?(Hash) && + node_or_variable.key?(:key) && node_or_variable.key?(:value) + + @xml_element.attributes['key'] = node_or_variable[:key] + @xml_element.attributes['value'] = node_or_variable[:value] + + @xml_element.attributes['isEnabled'] = if node_or_variable.key?(:enabled) + bool_to_string(node_or_variable[:enabled]) + else + bool_to_string(true) + end + end + end + + # Returns the EnvironmentValue's key + # @return [String] + # + def key + @xml_element.attributes['key'] + end + + # Sets the EnvironmentValue's key + # @param [String] key + # + def key=(key) + @xml_element.attributes['key'] = key + end + + # Returns the EnvironmentValue's value + # @return [String] + # + def value + @xml_element.attributes['value'] + end + + # Sets the EnvironmentValue's value + # @param [String] value + # + def value=(value) + @xml_element.attributes['value'] = value + end + + # Returns the EnvironmentValue's enabled state + # @return [Bool] + # + def enabled + string_to_bool(@xml_element.attributes['isEnabled']) + end + + # Sets the EnvironmentValue's enabled state + # @param [Bool] enabled + # + def enabled=(enabled) + @xml_element.attributes['isEnabled'] = bool_to_string(enabled) + end + + # @return [Hash{:key => String, :value => String, :enabled => Bool}] + # The environment variable XML node with attributes converted to a representative Hash + # + def to_h + { :key => key, :value => value, :enabled => enabled } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/launch_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/launch_action.rb new file mode 100644 index 0000000..452fb58 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/launch_action.rb @@ -0,0 +1,164 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the LaunchAction node of a .xcscheme XML file + # + class LaunchAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'LaunchAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'LaunchAction') do + # Add some attributes (that are not handled by this wrapper class yet but expected in the XML) + @xml_element.attributes['selectedDebuggerIdentifier'] = 'Xcode.DebuggerFoundation.Debugger.LLDB' + @xml_element.attributes['selectedLauncherIdentifier'] = 'Xcode.DebuggerFoundation.Launcher.LLDB' + @xml_element.attributes['launchStyle'] = '0' + @xml_element.attributes['useCustomWorkingDirectory'] = bool_to_string(false) + @xml_element.attributes['ignoresPersistentStateOnLaunch'] = bool_to_string(false) + @xml_element.attributes['debugDocumentVersioning'] = bool_to_string(true) + @xml_element.attributes['debugServiceExtension'] = 'internal' + @xml_element.add_element('AdditionalOptions') + + # Setup default values for other (handled) attributes + self.build_configuration = 'Debug' + self.allow_location_simulation = true + end + end + + # @todo handle 'launchStyle' attribute + # @todo handle 'useCustomWorkingDirectory attribute + # @todo handle 'ignoresPersistentStateOnLaunch' attribute + # @todo handle 'debugDocumentVersioning' attribute + # @todo handle 'debugServiceExtension' + + # @return [Bool] + # Whether or not to allow GPS location simulation when launching this target + # + def allow_location_simulation? + string_to_bool(@xml_element.attributes['allowLocationSimulation']) + end + + # @param [Bool] flag + # Set whether or not to allow GPS location simulation when launching this target + # + def allow_location_simulation=(flag) + @xml_element.attributes['allowLocationSimulation'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether this Build Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker? + string_to_bool(@xml_element.attributes['disableMainThreadChecker']) + end + + # @param [Bool] flag + # Set whether this Build Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker=(flag) + @xml_element.attributes['disableMainThreadChecker'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether UI API misuse from background threads detection should pause execution. + # This flag is ignored when the thread checker disabled + # ([disable_main_thread_checker] flag). + # + def stop_on_every_main_thread_checker_issue? + string_to_bool(@xml_element.attributes['stopOnEveryMainThreadCheckerIssue']) + end + + # @param [Bool] flag + # Set whether UI API misuse from background threads detection should pause execution. + # This flag is ignored when the thread checker disabled + # ([disable_main_thread_checker] flag). + # + def stop_on_every_main_thread_checker_issue=(flag) + @xml_element.attributes['stopOnEveryMainThreadCheckerIssue'] = bool_to_string(flag) + end + + # @return [String] + # The launch automatically substyle + # + def launch_automatically_substyle + @xml_element.attributes['launchAutomaticallySubstyle'] + end + + # @param [String] flag + # Set the launch automatically substyle ('2' for extensions) + # + def launch_automatically_substyle=(value) + @xml_element.attributes['launchAutomaticallySubstyle'] = value.to_s + end + + # @return [BuildableProductRunnable] + # The BuildReference to launch when executing the Launch Action + # + def buildable_product_runnable + BuildableProductRunnable.new(@xml_element.elements['BuildableProductRunnable'], 0) + end + + # @param [BuildableProductRunnable] runnable + # Set the BuildableProductRunnable referencing the target to launch + # + def buildable_product_runnable=(runnable) + @xml_element.delete_element('BuildableProductRunnable') + @xml_element.add_element(runnable.xml_element) if runnable + end + + # @return [EnvironmentVariables] + # Returns the EnvironmentVariables that will be defined at app launch + # + def environment_variables + EnvironmentVariables.new(@xml_element.elements[XCScheme::VARIABLES_NODE]) + end + + # @param [EnvironmentVariables,nil] env_vars + # Sets the EnvironmentVariables that will be defined at app launch + # + def environment_variables=(env_vars) + @xml_element.delete_element(XCScheme::VARIABLES_NODE) + @xml_element.add_element(env_vars.xml_element) if env_vars + env_vars + end + + # @todo handle 'AdditionalOptions' tag + + # @return [CommandLineArguments] + # Returns the CommandLineArguments that will be passed at app launch + # + def command_line_arguments + CommandLineArguments.new(@xml_element.elements[XCScheme::COMMAND_LINE_ARGS_NODE]) + end + + # @return [CommandLineArguments] arguments + # Sets the CommandLineArguments that will be passed at app launch + # + def command_line_arguments=(arguments) + @xml_element.delete_element(XCScheme::COMMAND_LINE_ARGS_NODE) + @xml_element.add_element(arguments.xml_element) if arguments + arguments + end + + # @return [Array<MacroExpansion>] + # The list of MacroExpansion bound with this LaunchAction + # + def macro_expansions + @xml_element.get_elements('MacroExpansion').map do |node| + MacroExpansion.new(node) + end + end + + # @param [MacroExpansion] macro_expansion + # Add a MacroExpansion to this LaunchAction + # + def add_macro_expansion(macro_expansion) + @xml_element.add_element(macro_expansion.xml_element) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/macro_expansion.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/macro_expansion.rb new file mode 100644 index 0000000..66b099a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/macro_expansion.rb @@ -0,0 +1,34 @@ +module Xcodeproj + class XCScheme + # This class wraps the MacroExpansion node of a .xcscheme XML file + # + class MacroExpansion < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'MacroExpansion' node element + # or nil to create an empty MacroExpansion object + # + def initialize(target_or_node = nil) + create_xml_element_with_fallback(target_or_node, 'MacroExpansion') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + end + end + + # @return [BuildableReference] + # The BuildableReference this MacroExpansion refers to + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the BuildableReference this MacroExpansion refers to + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/profile_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/profile_action.rb new file mode 100644 index 0000000..c799ce8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/profile_action.rb @@ -0,0 +1,57 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the ProfileAction node of a .xcscheme XML file + # + class ProfileAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'ProfileAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ProfileAction') do + # Add some attributes (that are not handled by this wrapper class yet but expected in the XML) + @xml_element.attributes['savedToolIdentifier'] = '' + @xml_element.attributes['useCustomWorkingDirectory'] = bool_to_string(false) + @xml_element.attributes['debugDocumentVersioning'] = bool_to_string(true) + + # Setup default values for other (handled) attributes + self.build_configuration = 'Release' + self.should_use_launch_scheme_args_env = true + end + end + + # @return [Bool] + # Whether this Profile Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env? + string_to_bool(@xml_element.attributes['shouldUseLaunchSchemeArgsEnv']) + end + + # @param [Bool] flag + # Set Whether this Profile Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env=(flag) + @xml_element.attributes['shouldUseLaunchSchemeArgsEnv'] = bool_to_string(flag) + end + + # @return [BuildableProductRunnable] + # The BuildableProductRunnable to launch when launching the Profile action + # + def buildable_product_runnable + BuildableProductRunnable.new @xml_element.elements['BuildableProductRunnable'] + end + + # @param [BuildableProductRunnable] runnable + # Set the BuildableProductRunnable referencing the target to launch when profiling + # + def buildable_product_runnable=(runnable) + @xml_element.delete_element('BuildableProductRunnable') + @xml_element.add_element(runnable.xml_element) if runnable + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/remote_runnable.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/remote_runnable.rb new file mode 100644 index 0000000..c51c547 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/remote_runnable.rb @@ -0,0 +1,92 @@ +module Xcodeproj + class XCScheme + # This class wraps the RemoteRunnable node of a .xcscheme XML file + # + # A RemoteRunnable is a product that is both buildable + # (it contains a BuildableReference) and + # runnable remotely (it can be launched and debugged on a remote device, i.e. an Apple Watch) + # + class RemoteRunnable < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'RemoteRunnable' node element to reference + # or nil to create an new, empty RemoteRunnable + # + # @param [#to_s] runnable_debugging_mode + # The debugging mode (usually '2') + # + # @param [#to_s] bundle_identifier + # The bundle identifier (usually 'com.apple.Carousel') + # + # @param [#to_s] remote_path + # The remote path (not required, unknown usage) + # + def initialize(target_or_node = nil, runnable_debugging_mode = nil, bundle_identifier = nil, remote_path = nil) + create_xml_element_with_fallback(target_or_node, 'RemoteRunnable') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + @xml_element.attributes['runnableDebuggingMode'] = runnable_debugging_mode.to_s if runnable_debugging_mode + @xml_element.attributes['BundleIdentifier'] = bundle_identifier.to_s if bundle_identifier + @xml_element.attributes['RemotePath'] = remote_path.to_s if remote_path + end + end + + # @return [String] + # The runnable debugging mode (usually '2') + # + def runnable_debugging_mode + @xml_element.attributes['runnableDebuggingMode'] + end + + # @param [String] value + # Set the runnable debugging mode + # + def runnable_debugging_mode=(value) + @xml_element.attributes['runnableDebuggingMode'] = value.to_s + end + + # @return [String] + # The runnable bundle identifier (usually 'com.apple.Carousel') + # + def bundle_identifier + @xml_element.attributes['BundleIdentifier'] + end + + # @param [String] value + # Set the runnable bundle identifier + # + def bundle_identifier=(value) + @xml_element.attributes['BundleIdentifier'] = value.to_s + end + + # @return [String] + # The runnable remote path (not required, unknown usage) + # + def remote_path + @xml_element.attributes['RemotePath'] + end + + # @param [String] value + # Set the runnable remote path + # + def remote_path=(value) + @xml_element.attributes['RemotePath'] = value.to_s + end + + # @return [BuildableReference] + # The buildable reference this remote runnable is gonna build and run + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the buildable reference this remote runnable is gonna build and run + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/test_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/test_action.rb new file mode 100644 index 0000000..0162e5b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/test_action.rb @@ -0,0 +1,352 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the TestAction node of a .xcscheme XML file + # + class TestAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'TestAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'TestAction') do + @xml_element.attributes['selectedDebuggerIdentifier'] = 'Xcode.DebuggerFoundation.Debugger.LLDB' + @xml_element.attributes['selectedLauncherIdentifier'] = 'Xcode.DebuggerFoundation.Launcher.LLDB' + @xml_element.add_element('AdditionalOptions') + self.should_use_launch_scheme_args_env = true + self.build_configuration = 'Debug' + end + end + + # @return [Bool] + # Whether this Test Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env? + string_to_bool(@xml_element.attributes['shouldUseLaunchSchemeArgsEnv']) + end + + # @param [Bool] flag + # Set whether this Test Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env=(flag) + @xml_element.attributes['shouldUseLaunchSchemeArgsEnv'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether this Test Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker? + string_to_bool(@xml_element.attributes['disableMainThreadChecker']) + end + + # @param [Bool] flag + # Set whether this Test Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker=(flag) + @xml_element.attributes['disableMainThreadChecker'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether Clang Code Coverage is enabled ('Gather coverage data' turned ON) + # + def code_coverage_enabled? + string_to_bool(@xml_element.attributes['codeCoverageEnabled']) + end + + # @param [Bool] flag + # Set whether Clang Code Coverage is enabled ('Gather coverage data' turned ON) + # + def code_coverage_enabled=(flag) + @xml_element.attributes['codeCoverageEnabled'] = bool_to_string(flag) + end + + # @return [Array<TestableReference>] + # The list of TestableReference (test bundles) associated with this Test Action + # + def testables + return [] unless @xml_element.elements['Testables'] + + @xml_element.elements['Testables'].get_elements('TestableReference').map do |node| + TestableReference.new(node) + end + end + + # @param [Array<TestableReference>] testables + # Sets the list of TestableReference (test bundles) associated with this Test Action + # + def testables=(testables) + @xml_element.delete_element('Testables') + testables_element = @xml_element.add_element('Testables') + testables.each do |testable| + testables_element.add_element(testable.xml_element) + end + testables + end + + # @param [TestableReference] testable + # Add a TestableReference (test bundle) to this Test Action + # + def add_testable(testable) + testables = @xml_element.elements['Testables'] || @xml_element.add_element('Testables') + testables.add_element(testable.xml_element) + end + + # @return [Array<MacroExpansion>] + # The list of MacroExpansion bound with this TestAction + # + def macro_expansions + @xml_element.get_elements('MacroExpansion').map do |node| + MacroExpansion.new(node) + end + end + + # @param [MacroExpansion] macro_expansion + # Add a MacroExpansion to this TestAction + # + def add_macro_expansion(macro_expansion) + @xml_element.add_element(macro_expansion.xml_element) + end + + # @return [EnvironmentVariables] + # Returns the EnvironmentVariables that will be defined at test launch + # + def environment_variables + EnvironmentVariables.new(@xml_element.elements[XCScheme::VARIABLES_NODE]) + end + + # @param [EnvironmentVariables,nil] env_vars + # Sets the EnvironmentVariables that will be defined at test launch + # @return [EnvironmentVariables] + # + def environment_variables=(env_vars) + @xml_element.delete_element(XCScheme::VARIABLES_NODE) + @xml_element.add_element(env_vars.xml_element) if env_vars + env_vars + end + + # @todo handle 'AdditionalOptions' tag + + # @return [CommandLineArguments] + # Returns the CommandLineArguments that will be passed at app launch + # + def command_line_arguments + CommandLineArguments.new(@xml_element.elements[XCScheme::COMMAND_LINE_ARGS_NODE]) + end + + # @return [CommandLineArguments] arguments + # Sets the CommandLineArguments that will be passed at app launch + # + def command_line_arguments=(arguments) + @xml_element.delete_element(XCScheme::COMMAND_LINE_ARGS_NODE) + @xml_element.add_element(arguments.xml_element) if arguments + arguments + end + + #-------------------------------------------------------------------------# + + class TestableReference < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'TestableReference' node element to reference, + # or nil to create an new, empty TestableReference + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + def initialize(target_or_node = nil, root_project = nil) + create_xml_element_with_fallback(target_or_node, 'TestableReference') do + self.skipped = false + add_buildable_reference BuildableReference.new(target_or_node, root_project) unless target_or_node.nil? + end + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be skipped or not + # + def skipped? + string_to_bool(@xml_element.attributes['skipped']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should be skipped or not + # + def skipped=(flag) + @xml_element.attributes['skipped'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be run in parallel or not + # + def parallelizable? + string_to_bool(@xml_element.attributes['parallelizable']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should be run in parallel or not + # + def parallelizable=(flag) + @xml_element.attributes['parallelizable'] = bool_to_string(flag) + end + + # @return [String] + # The execution order for this TestableReference (test bundle) + # + def test_execution_ordering + @xml_element.attributes['testExecutionOrdering'] + end + + # @param [String] order + # Set the execution order for this TestableReference (test bundle) + # + def test_execution_ordering=(order) + @xml_element.attributes['testExecutionOrdering'] = order + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be run in randomized order. + # + def randomized? + test_execution_ordering == 'random' + end + + # @return [Array<BuildableReference>] + # The list of BuildableReferences this action will build. + # (The list usually contains only one element) + # + def buildable_references + @xml_element.get_elements('BuildableReference').map do |node| + BuildableReference.new(node) + end + end + + # @param [BuildableReference] ref + # The BuildableReference to add to the list of targets this action will build + # + def add_buildable_reference(ref) + @xml_element.add_element(ref.xml_element) + end + + # @param [BuildableReference] ref + # The BuildableReference to remove from the list of targets this entry will build + # + def remove_buildable_reference(ref) + @xml_element.delete_element(ref.xml_element) + end + + # @return [Array<Test>] + # The list of SkippedTest this action will skip. + # + def skipped_tests + return [] if @xml_element.elements['SkippedTests'].nil? + @xml_element.elements['SkippedTests'].get_elements('Test').map do |node| + Test.new(node) + end + end + + # @param [Array<Test>] tests + # Set the list of SkippedTest this action will skip. + # + def skipped_tests=(tests) + @xml_element.delete_element('SkippedTests') + if tests.nil? + return + end + entries = @xml_element.add_element('SkippedTests') + tests.each do |skipped| + entries.add_element(skipped.xml_element) + end + end + + # @param [Test] skipped_test + # The SkippedTest to add to the list of tests this action will skip + # + def add_skipped_test(skipped_test) + entries = @xml_element.elements['SkippedTests'] || @xml_element.add_element('SkippedTests') + entries.add_element(skipped_test.xml_element) + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should use a whitelist or not + # + def use_test_selection_whitelist? + string_to_bool(@xml_element.attributes['useTestSelectionWhitelist']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should use a whitelist or not + # + def use_test_selection_whitelist=(flag) + @xml_element.attributes['useTestSelectionWhitelist'] = bool_to_string(flag) + end + + # @return [Array<Test>] + # The list of SelectedTest this action will run. + # + def selected_tests + return [] if @xml_element.elements['SelectedTests'].nil? + @xml_element.elements['SelectedTests'].get_elements('Test').map do |node| + Test.new(node) + end + end + + # @param [Array<Test>] tests + # Set the list of SelectedTest this action will run. + # + def selected_tests=(tests) + @xml_element.delete_element('SelectedTests') + return if tests.nil? + entries = @xml_element.add_element('SelectedTests') + tests.each do |selected| + entries.add_element(selected.xml_element) + end + end + + # @param [Test] selected_test + # The SelectedTest to add to the list of tests this action will run. + # + def add_selected_test(selected_test) + entries = @xml_element.elements['SelectedTests'] || @xml_element.add_element('SelectedTests') + entries.add_element(selected_test.xml_element) + end + + class Test < XMLElementWrapper + # @param [REXML::Element] node + # The 'Test' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'Test') do + self.identifier = node.attributes['Identifier'] unless node.nil? + end + end + + # @return [String] + # Skipped test class name + # + def identifier + @xml_element.attributes['Identifier'] + end + + # @param [String] value + # Set the name of the skipped test class name + # + def identifier=(value) + @xml_element.attributes['Identifier'] = value + end + end + + # Aliased to`Test` for compatibility + # @todo Remove in Xcodeproj 2 + # + SkippedTest = Test + + # @todo handle 'AdditionalOptions' tag + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/xml_element_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/xml_element_wrapper.rb new file mode 100644 index 0000000..ceb7420 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/scheme/xml_element_wrapper.rb @@ -0,0 +1,82 @@ +module Xcodeproj + class XCScheme + # Abstract base class used for other XCScheme classes wrapping an XML element + # + class XMLElementWrapper + # @return [REXML::Element] + # The XML node wrapped and manipulated by this XMLElementWrapper object + # + attr_reader :xml_element + + # @return [String] + # The XML representation of the node this XMLElementWrapper wraps, + # formatted in the same way that Xcode would. + def to_s + formatter = XMLFormatter.new(2) + formatter.compact = false + out = '' + formatter.write(@xml_element, out) + out.gsub!("<?xml version='1.0' encoding='UTF-8'?>", '') + out << "\n" + out + end + + private + + # This is a method intended to be used to facilitate the implementation of the initializers. + # + # - Create the @xml_element attribute based on the node passed as parameter, only if + # that parameter is of type REXML::Element and its name matches the tag_name given. + # - Otherwise, create a brand new REXML::Element with the proper tag name and + # execute the block given as a fallback to let the caller the chance to configure it + # + # @param [REXML::Element, *] node + # The node this XMLElementWrapper is expected to wrap + # or any other object (typically an AbstractTarget instance, or nil) the initializer might expect + # + # @param [String] tag_name + # The expected name for the node, which will also be the name used to create the new node + # if that `node` parameter is not a REXML::Element itself. + # + # @yield a parameter-less block if the `node` parameter is not actually a REXML::Element + # + # @raise Informative + # If the `node` parameter is a REXML::Element instance but the node's name + # doesn't match the one provided by the `tag_name` parameter. + # + def create_xml_element_with_fallback(node, tag_name) + if node && node.is_a?(REXML::Element) + raise Informative, 'Wrong XML tag name' unless node.name == tag_name + @xml_element = node + else + @xml_element = REXML::Element.new(tag_name) + yield if block_given? + end + end + + # @param [Bool] + # The boolean we want to represent as a string + # + # @return [String] + # The string representaiton of that boolean used in the XML ('YES' or 'NO') + # + def bool_to_string(flag) + flag ? 'YES' : 'NO' + end + + # @param [String] + # The string representaiton of a boolean used in the XML ('YES' or 'NO') + # + # @return [Bool] + # The boolean interpretation of that string + # + # @raise Informative + # If the string is not representing a boolean (i.e. is neither 'YES' nor 'NO') + # + def string_to_bool(str) + raise Informative, 'Invalid tag value. Expected YES or NO.' unless %w(YES NO).include?(str) + str == 'YES' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/user_interface.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/user_interface.rb new file mode 100644 index 0000000..9737c52 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/user_interface.rb @@ -0,0 +1,22 @@ +module Xcodeproj + # Manages the UI output so clients can customize it. + # + module UserInterface + # Prints a message to standard output. + # + # @return [void] + # + def self.puts(message) + STDOUT.puts message + end + + # Prints a message to standard error. + # + # @return [void] + # + def self.warn(message) + STDERR.puts message + end + end + UI = UserInterface +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace.rb new file mode 100644 index 0000000..40afbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace.rb @@ -0,0 +1,277 @@ +require 'fileutils' +require 'rexml/document' +require 'xcodeproj/workspace/file_reference' +require 'xcodeproj/workspace/group_reference' + +module Xcodeproj + # Provides support for generating, reading and serializing Xcode Workspace + # documents. + # + class Workspace + # @return [REXML::Document] the parsed XML model for the workspace contents + attr_reader :document + + # @return [Hash<String => String>] a mapping from scheme name to project full path + # containing the scheme + attr_reader :schemes + + # @return [Array<FileReference>] the paths of the projects contained in the + # workspace. + # + def file_references + return [] unless @document + @document.get_elements('/Workspace//FileRef').map do |node| + FileReference.from_node(node) + end + end + + # @return [Array<GroupReference>] the groups contained in the workspace + # + def group_references + return [] unless @document + @document.get_elements('/Workspace//Group').map do |node| + GroupReference.from_node(node) + end + end + + # @param [REXML::Document] document @see document + # @param [Array<FileReference>] file_references additional projects to add + # + # @note The document parameter is passed to the << operator if it is not a + # valid REXML::Document. It is optional, but may also be passed as nil + # + def initialize(document, *file_references) + @schemes = {} + if document.nil? + @document = REXML::Document.new(root_xml('')) + elsif document.is_a?(REXML::Document) + @document = document + else + @document = REXML::Document.new(root_xml('')) + self << document + end + file_references.each { |ref| self << ref } + end + + #-------------------------------------------------------------------------# + + # Returns a workspace generated by reading the contents of the given path. + # + # @param [String] path + # the path of the `xcworkspace` file. + # + # @return [Workspace] the generated workspace. + # + def self.new_from_xcworkspace(path) + from_s(File.read(File.join(path, 'contents.xcworkspacedata')), + File.expand_path(path)) + rescue Errno::ENOENT + new(nil) + end + + #-------------------------------------------------------------------------# + + # Returns a workspace generated by reading the contents of the given + # XML representation. + # + # @param [String] xml + # the XML representation of the workspace. + # + # @return [Workspace] the generated workspace. + # + def self.from_s(xml, workspace_path = '') + document = REXML::Document.new(xml) + instance = new(document) + instance.load_schemes(workspace_path) + instance + end + + # Adds a new path to the list of the of projects contained in the + # workspace. + # @param [String, Xcodeproj::Workspace::FileReference] path_or_reference + # A string or Xcode::Workspace::FileReference containing a path to an Xcode project + # + # @raise [ArgumentError] Raised if the input is neither a String nor a FileReference + # + # @return [void] + # + def <<(path_or_reference) + return unless @document && @document.respond_to?(:root) + + case path_or_reference + when String + project_file_reference = Xcodeproj::Workspace::FileReference.new(path_or_reference) + when Xcodeproj::Workspace::FileReference + project_file_reference = path_or_reference + projpath = nil + else + raise ArgumentError, "Input to the << operator must be a file path or FileReference, got #{path_or_reference.inspect}" + end + unless file_references.any? { |ref| project_file_reference.eql? ref } + @document.root.add_element(project_file_reference.to_node) + end + load_schemes_from_project File.expand_path(projpath || project_file_reference.path) + end + + #-------------------------------------------------------------------------# + + # Adds a new group container to the workspace + # workspace. + # + # @param [String] name The name of the group + # + # @yield [Xcodeproj::Workspace::GroupReference, REXML::Element] + # Yields the GroupReference and underlying XML element for mutation + # + # @return [Xcodeproj::Workspace::GroupReference] The added group reference + # + def add_group(name) + return nil unless @document + group = Xcodeproj::Workspace::GroupReference.new(name) + elem = @document.root.add_element(group.to_node) + yield group, elem if block_given? + group + end + + # Checks if the workspace contains the project with the given file + # reference. + # + # @param [FileReference] file_reference + # The file_reference to the project. + # + # @return [Boolean] whether the project is contained in the workspace. + # + def include?(file_reference) + file_references.include?(file_reference) + end + + # @return [String] the XML representation of the workspace. + # + def to_s + contents = '' + stack = [] + @document.root.each_recursive do |elem| + until stack.empty? + last = stack.last + break if last == elem.parent + contents += xcworkspace_element_end_xml(stack.length, last) + stack.pop + end + + stack << elem + contents += xcworkspace_element_start_xml(stack.length, elem) + end + + until stack.empty? + contents += xcworkspace_element_end_xml(stack.length, stack.last) + stack.pop + end + + root_xml(contents) + end + + # Saves the workspace at the given `xcworkspace` path. + # + # @param [String] path + # the path where to save the project. + # + # @return [void] + # + def save_as(path) + FileUtils.mkdir_p(path) + File.open(File.join(path, 'contents.xcworkspacedata'), 'w') do |out| + out << to_s + end + end + + #-------------------------------------------------------------------------# + + # Load all schemes from all projects in workspace or in the workspace container itself + # + # @param [String] workspace_dir_path + # path of workspaces dir + # + # @return [void] + # + def load_schemes(workspace_dir_path) + # Normalizes path to directory of workspace needed for file_reference.absolute_path + workspaces_dir = workspace_dir_path + if File.extname(workspace_dir_path) == '.xcworkspace' + workspaces_dir = File.expand_path('..', workspaces_dir) + end + + file_references.each do |file_reference| + project_full_path = file_reference.absolute_path(workspaces_dir) + load_schemes_from_project(project_full_path) + end + + # Load schemes that are in the workspace container. + workspace_abs_path = File.absolute_path(workspace_dir_path) + Dir[File.join(workspace_dir_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].each do |scheme| + scheme_name = File.basename(scheme, '.xcscheme') + @schemes[scheme_name] = workspace_abs_path + end + end + + private + + # Load all schemes from project + # + # @param [String] project_full_path + # project full path + # + # @return [void] + # + def load_schemes_from_project(project_full_path) + schemes = Xcodeproj::Project.schemes project_full_path + schemes.each do |scheme_name| + @schemes[scheme_name] = project_full_path + end + end + + # @return [String] The template of the workspace XML as formated by Xcode. + # + # @param [String] contents The XML contents of the workspace. + # + def root_xml(contents) + <<-DOC +<?xml version="1.0" encoding="UTF-8"?> +<Workspace + version = "1.0"> +#{contents.rstrip} +</Workspace> +DOC + end + + # + # @param [Integer] depth The depth of the element in the tree + # @param [REXML::Document::Element] elem The XML element to format. + # + # @return [String] The Xcode-specific XML formatting of an element start + # + def xcworkspace_element_start_xml(depth, elem) + attributes = case elem.name + when 'Group' + %w(location name) + when 'FileRef' + %w(location) + end + contents = "<#{elem.name}" + indent = ' ' * depth + attributes.each { |name| contents += "\n #{name} = \"#{elem.attribute(name)}\"" } + contents.split("\n").map { |line| "#{indent}#{line}" }.join("\n") + ">\n" + end + + # + # @param [Integer] depth The depth of the element in the tree + # @param [REXML::Document::Element] elem The XML element to format. + # + # @return [String] The Xcode-specific XML formatting of an element end + # + def xcworkspace_element_end_xml(depth, elem) + "#{' ' * depth}</#{elem.name}>\n" + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/file_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/file_reference.rb new file mode 100644 index 0000000..1691a63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/file_reference.rb @@ -0,0 +1,79 @@ +require 'xcodeproj/workspace/reference' + +module Xcodeproj + class Workspace + # Describes a file reference of a Workspace. + # + class FileReference < Reference + # @return [String] the path to the project + # + attr_reader :path + + # @param [#to_s] path @see path + # @param [#to_s] type @see type + # + def initialize(path, type = 'group') + @path = Pathname.new(path.to_s).cleanpath.to_s + @type = type.to_s + end + + # @return [Bool] Wether a file reference is equal to another. + # + def ==(other) + path == other.path && type == other.type + end + alias_method :eql?, :== + + # @return [Fixnum] A hash identical for equals objects. + # + def hash + [path, type].hash + end + + # Returns a file reference given XML representation. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @return [FileReference] The new file reference instance. + # + def self.from_node(xml_node) + type, path = xml_node.attribute('location').value.split(':', 2) + if type == 'group' + path = prepend_parent_path(xml_node, path) + end + new(path, type) + end + + # @return [REXML::Element] the XML representation of the file reference. + # + def to_node + REXML::Element.new('FileRef').tap do |element| + element.add_attribute('location', "#{type}:#{path}") + end + end + + # Returns the absolute path of a file reference given the path of the + # directory containing workspace. + # + # @param [#to_s] workspace_dir_path + # The Path of the directory containing the workspace. + # + # @return [String] The absolute path to the project. + # + def absolute_path(workspace_dir_path) + workspace_dir_path = workspace_dir_path.to_s + case type + when 'group', 'container', 'self' + File.expand_path(File.join(workspace_dir_path, path)) + when 'absolute' + File.expand_path(path) + when 'developer' + raise "Developer workspace file reference type is not yet supported (#{path})" + else + raise "Unsupported workspace file reference type `#{type}`" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/group_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/group_reference.rb new file mode 100644 index 0000000..ec73dbf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/group_reference.rb @@ -0,0 +1,67 @@ +require 'xcodeproj/workspace/reference' + +module Xcodeproj + class Workspace + # Describes a group reference of a Workspace. + # + class GroupReference < Reference + # @return [String] the name of the group + # + attr_reader :name + + # @return [String] the location of the group on disk + # + attr_reader :location + + # @param [#to_s] name @see name + # @param [#to_s] type @see type + # @param [#to_s] location @see location + # + def initialize(name, type = 'container', location = '') + @name = name.to_s + @type = type.to_s + @location = location.to_s + end + + # @return [Bool] Whether a group reference is equal to another. + # + def ==(other) + name == other.name && type == other.type && location == other.location + end + alias_method :eql?, :== + + # @return [Fixnum] A hash identical for equals objects. + # + def hash + [name, type, location].hash + end + + # Returns a group reference given XML representation. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @return [GroupReference] The new group reference instance. + # + def self.from_node(xml_node) + location_array = xml_node.attribute('location').value.split(':', 2) + type = location_array.first + location = location_array[1] || '' + if type == 'group' + location = prepend_parent_path(xml_node, location) + end + name = xml_node.attribute('name').value + new(name, type, location) + end + + # @return [REXML::Element] the XML representation of the group reference. + # + def to_node + REXML::Element.new('Group').tap do |element| + element.add_attribute('location', "#{type}:#{location}") + element.add_attribute('name', "#{name}") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/reference.rb new file mode 100644 index 0000000..599052b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/workspace/reference.rb @@ -0,0 +1,40 @@ +module Xcodeproj + class Workspace + # Describes a file/group reference of a Workspace. + # + class Reference + # @return [String] the type of reference to the project + # + # This can be of the following values: + # - absolute + # - group + # - container + # - developer (unsupported) + # + attr_reader :type + + # Returns the relative path to the parent group reference (if one exists) + # prepended to the passed in path. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @param [String] path + # the path that will be prepended to. + # + # @return [String] the extended path including the parent node's path. + # + def self.prepend_parent_path(xml_node, path) + if !xml_node.parent.nil? && (xml_node.parent.name == 'Group') + group = GroupReference.from_node(xml_node.parent) + if !group.location.nil? && !group.location.empty? + path = '' if path.nil? + path = File.join(group.location, path) + end + end + + path + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/xcodebuild_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/xcodebuild_helper.rb new file mode 100644 index 0000000..6470b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.19.0/lib/xcodeproj/xcodebuild_helper.rb @@ -0,0 +1,108 @@ +module Xcodeproj + # Helper class which returns information from xcodebuild. + # + class XcodebuildHelper + def initialize + @needs_to_parse_sdks = true + end + + # @return [String] The version of the last iOS sdk. + # + def last_ios_sdk + parse_sdks_if_needed + versions_by_sdk[:ios].sort.last + end + + # @return [String] The version of the last OS X sdk. + # + def last_osx_sdk + parse_sdks_if_needed + versions_by_sdk[:osx].sort.last + end + + # @return [String] The version of the last tvOS sdk. + # + def last_tvos_sdk + parse_sdks_if_needed + versions_by_sdk[:tvos].sort.last + end + + # @return [String] The version of the last watchOS sdk. + # + def last_watchos_sdk + parse_sdks_if_needed + versions_by_sdk[:watchos].sort.last + end + + private + + # !@group Private Helpers + + #-------------------------------------------------------------------------# + + # @return [Hash] The versions of the sdks grouped by name (`:ios`, or `:osx`). + # + attr_accessor :versions_by_sdk + + # @return [void] Parses the SDKs returned by xcodebuild and stores the + # information in the `needs_to_parse_sdks` hash. + # + def parse_sdks_if_needed + if @needs_to_parse_sdks + @versions_by_sdk = {} + @versions_by_sdk[:osx] = [] + @versions_by_sdk[:ios] = [] + @versions_by_sdk[:tvos] = [] + @versions_by_sdk[:watchos] = [] + if xcodebuild_available? + sdks = parse_sdks_information(xcodebuild_sdks) + sdks.each do |(name, version)| + case + when name == 'macosx' then @versions_by_sdk[:osx] << version + when name == 'iphoneos' then @versions_by_sdk[:ios] << version + when name == 'appletvos' then @versions_by_sdk[:tvos] << version + when name == 'watchos' then @versions_by_sdk[:watchos] << version + end + end + end + end + end + + # @return [Bool] Whether xcodebuild is available. + # + def xcodebuild_available? + if @xcodebuild_available.nil? + `which xcodebuild 2>/dev/null` + @xcodebuild_available = $?.exitstatus.zero? + end + @xcodebuild_available + end + + # @return [Array<Array<String>>] An array of tuples where the first element + # is the name of the SDK and the second is the version. + # + def parse_sdks_information(output) + output.scan(/-sdk (macosx|iphoneos|watchos|appletvos)(.+\w)/) + end + + # @return [String] The sdk information reported by xcodebuild. + # + def xcodebuild_sdks + `xcodebuild -showsdks 2>/dev/null` + end + + #-------------------------------------------------------------------------# + + # @!group Singleton + + # @return [XcodebuildHelper] the current xcodebuild instance creating one + # if needed, which caches the information from the xcodebuild + # command line tool. + # + def self.instance + @instance ||= new + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/LICENSE b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/LICENSE new file mode 100644 index 0000000..f060fab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Eloy Durán <eloy.de.enige@gmail.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/README.md b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/README.md new file mode 100644 index 0000000..0ceb317 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/README.md @@ -0,0 +1,94 @@ +# Xcodeproj + +[![Build Status](https://img.shields.io/travis/CocoaPods/Xcodeproj/master.svg?style=flat)](https://travis-ci.org/CocoaPods/Xcodeproj) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/Xcodeproj.svg?style=flat)](https://codeclimate.com/github/CocoaPods/Xcodeproj) +[![Code Climate](https://img.shields.io/codeclimate/maintainability/CocoaPods/Xcodeproj.svg?style=flat&label=code%20climate)](https://codeclimate.com/github/CocoaPods/Xcodeproj) + +Xcodeproj lets you create and modify Xcode projects from [Ruby][ruby]. +Script boring management tasks or build Xcode-friendly libraries. Also includes +support for Xcode workspaces (`.xcworkspace`), configuration files (`.xcconfig`) and +Xcode Scheme files (`.xcscheme`). + +It is used in [CocoaPods](https://github.com/CocoaPods/CocoaPods) to create a +collection of supplemental libraries or frameworks, for all platforms Xcode supports. + +The API reference can be found [here](http://www.rubydoc.info/gems/xcodeproj). + +## Installing Xcodeproj + +Xcodeproj itself installs through RubyGems, the Ruby package manager. Install it +by performing the following command: + + $ [sudo] gem install xcodeproj + +## Quickstart + +To begin editing an xcodeproj file start by opening it as an Xcodeproj with: + +```ruby +require 'xcodeproj' +project_path = '/your_path/your_project.xcodeproj' +project = Xcodeproj::Project.open(project_path) +``` + +#### Some Small Examples To Get You Started + +> Look through all targets + +```ruby +project.targets.each do |target| + puts target.name +end +``` + +> Get all source files for a target + +```ruby +target = project.targets.first +files = target.source_build_phase.files.to_a.map do |pbx_build_file| + pbx_build_file.file_ref.real_path.to_s + +end.select do |path| + path.end_with?(".m", ".mm", ".swift") + +end.select do |path| + File.exists?(path) +end +``` + +> Set a specific build configuration to all targets + +```ruby +project.targets.each do |target| + target.build_configurations.each do |config| + config.build_settings['MY_CUSTOM_FLAG'] ||= 'TRUE' + end +end +``` + +## Command Line Tool + +Installing the Xcodeproj gem will also install a command-line tool `xcodeproj` which you can +use to generate project diffs, target diffs, output all configurations and show a YAML representation. + +For more information consult `xcodeproj --help`. + +## Collaborate + +All Xcodeproj development happens on [GitHub][xcodeproj]. Contributing patches +is really easy and gratifying. + +Follow [@CocoaPods][twitter] to get up to date information about what's +going on in the CocoaPods world. + + +## LICENSE + +These works are available under the MIT license. See the [LICENSE][license] file +for more info. + +[twitter]: http://twitter.com/CocoaPods +[ruby]: http://www.ruby-lang.org/en/ +[xcodeproj]: https://github.com/cocoapods/xcodeproj +[tickets]: https://github.com/cocoapods/xcodeproj/issues +[license]: LICENSE diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/bin/xcodeproj b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/bin/xcodeproj new file mode 100755 index 0000000..5d163fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/bin/xcodeproj @@ -0,0 +1,10 @@ +#!/usr/bin/env ruby + +if $PROGRAM_NAME == __FILE__ + ENV['BUNDLE_GEMFILE'] = File.expand_path('../../Gemfile', __FILE__) + require 'bundler/setup' +end + +require 'xcodeproj' + +Xcodeproj::Command.run(ARGV) diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj.rb new file mode 100644 index 0000000..3a179d0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj.rb @@ -0,0 +1,29 @@ +module Xcodeproj + require 'pathname' + require 'claide' + require 'colored2' + + class PlainInformative < StandardError + include CLAide::InformativeError + end + + class Informative < PlainInformative + def message + super !~ /\[!\]/ ? "[!] #{super}\n".red : super + end + end + + require 'xcodeproj/gem_version' + require 'xcodeproj/user_interface' + + autoload :Command, 'xcodeproj/command' + autoload :Config, 'xcodeproj/config' + autoload :Constants, 'xcodeproj/constants' + autoload :Differ, 'xcodeproj/differ' + autoload :Helper, 'xcodeproj/helper' + autoload :Plist, 'xcodeproj/plist' + autoload :Project, 'xcodeproj/project' + autoload :Workspace, 'xcodeproj/workspace' + autoload :XCScheme, 'xcodeproj/scheme' + autoload :XcodebuildHelper, 'xcodeproj/xcodebuild_helper' +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command.rb new file mode 100644 index 0000000..9087500 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command.rb @@ -0,0 +1,63 @@ +module Xcodeproj + require 'colored2' + require 'claide' + + class Command < CLAide::Command + require 'xcodeproj/command/config_dump' + require 'xcodeproj/command/target_diff' + require 'xcodeproj/command/project_diff' + require 'xcodeproj/command/show' + require 'xcodeproj/command/sort' + + self.abstract_command = true + self.command = 'xcodeproj' + self.version = VERSION + self.description = 'Xcodeproj lets you create and modify Xcode projects from Ruby.' + self.plugin_prefixes = %w(claide xcodeproj) + + def initialize(argv) + super + unless self.ansi_output? + Colored2.disable! + String.send(:define_method, :colorize) { |string, _| string } + end + end + + private + + def xcodeproj_path + unless @xcodeproj_path + projects = Dir.glob('*.xcodeproj') + if projects.size == 1 + xcodeproj_path = projects.first + elsif projects.size > 1 + raise Informative, 'There are more than one Xcode project documents ' \ + 'in the current working directory. Please specify ' \ + 'which to use with the `--project` option.' + else + raise Informative, 'No Xcode project document found in the current ' \ + 'working directory. Please specify which to use ' \ + 'with the `--project` option.' + end + @xcodeproj_path = Pathname.new(xcodeproj_path).expand_path + end + @xcodeproj_path + end + + def open_project!(*paths) + if paths.empty? + [xcodeproj] + else + paths.map { |path| Project.open(path) } + end + end + + def xcodeproj_path=(path) + @xcodeproj_path = path && Pathname.new(path).expand_path + end + + def xcodeproj + @xcodeproj ||= Project.open(xcodeproj_path) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/config_dump.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/config_dump.rb new file mode 100644 index 0000000..dac7645 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/config_dump.rb @@ -0,0 +1,91 @@ +module Xcodeproj + class Command + class ConfigDump < Command + self.description = <<-eos + Dumps the build settings of all project targets for all configurations in + directories named by the target in given output directory. + + It extracts common build settings in a per target base.xcconfig file. + + If no `PROJECT` is specified then the current work directory is searched + for one. + + If no `OUTPUT` is specified then the project directory will be used. + eos + + self.summary = 'Dumps the build settings of all project targets for ' \ + 'all configurations in directories named by the target ' \ + 'in the given output directory.' + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + CLAide::Argument.new('OUTPUT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + @output_path = Pathname(argv.shift_argument || '.') + + super + end + + def validate! + super + + raise Informative, 'The output path must be a directory.' unless @output_path.directory? + open_project! + end + + def run + dump_all_configs(xcodeproj, 'Project') + + xcodeproj.targets.each do |target| + dump_all_configs(target, target.name) + end + end + + def dump_all_configs(configurable, name) + path = Pathname(name) + + # Dump base configuration to file + base_settings = extract_common_settings!(configurable.build_configurations) + base_file_path = path + "#{name}_base.xcconfig" + dump_config_to_file(base_settings, base_file_path) + + # Dump each configuration to file + configurable.build_configurations.each do |config| + settings = config.build_settings + dump_config_to_file(settings, path + "#{name}_#{config.name.downcase}.xcconfig", [base_file_path]) + end + end + + def extract_common_settings!(build_configurations) + # Grasp all common build settings + all_build_settings = build_configurations.map(&:build_settings) + common_build_settings = all_build_settings.reduce do |settings, config_build_settings| + settings.select { |key, value| value == config_build_settings[key] } + end + + # Remove all common build settings from each configuration specific build settings + build_configurations.each do |config| + config.build_settings.reject! { |key| !common_build_settings[key].nil? } + end + + common_build_settings + end + + def dump_config_to_file(settings, file_path, includes = []) + dir = @output_path + file_path + '..' + dir.mkpath + + settings = Hash[settings.map do |k, v| + [k, Array(v).join(' ')] + end] + + config = Config.new(settings) + config.includes = includes + config.save_as(@output_path + file_path) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/project_diff.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/project_diff.rb new file mode 100644 index 0000000..2f5d553 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/project_diff.rb @@ -0,0 +1,56 @@ +module Xcodeproj + class Command + class ProjectDiff < Command + self.summary = 'Shows the difference between two projects' + + self.description = summary + <<-EOS.gsub(/ {8}/, '') + + It shows the difference in a UUID agnostic fashion. + + To reduce the noise (and to simplify implementation) differences in the + order of arrays are ignored. + EOS + + def self.options + [ + ['--ignore=KEY', 'A key to ignore in the comparison. Can be specified multiple times.'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('PROJECT1', true), + CLAide::Argument.new('PROJECT2', true), + ] + + def initialize(argv) + @path_project1 = argv.shift_argument + @path_project2 = argv.shift_argument + @keys_to_ignore = argv.all_options('ignore') + super + end + + def validate! + super + @project1, @project2 = open_project!(@path_project1, @path_project2) + end + + def run + hash_1 = @project1.to_tree_hash.dup + hash_2 = @project2.to_tree_hash.dup + @keys_to_ignore.each do |key| + Differ.clean_hash!(hash_1, key) + Differ.clean_hash!(hash_2, key) + end + + diff = Differ.project_diff(hash_1, hash_2, @path_project1, @path_project2) + + require 'yaml' + yaml = diff.to_yaml + yaml.gsub!(@path_project1, @path_project1.cyan) + yaml.gsub!(@path_project2, @path_project2.magenta) + yaml.gsub!(':diff:', 'diff:'.yellow) + puts yaml + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/show.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/show.rb new file mode 100644 index 0000000..5c36c06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/show.rb @@ -0,0 +1,60 @@ +module Xcodeproj + class Command + class Show < Command + self.summary = 'Shows an overview of a project in a YAML representation.' + + def self.options + [ + ['--format=[hash|tree_hash|raw]', 'YAML output format'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + @output_format = argv.option('format') + @output_format &&= @output_format.to_sym + super + end + + def validate + super + unless [nil, :hash, :tree_hash, :raw].include?(@output_format) + help! "Unknown format `#{@output_format}`" + end + open_project! + end + + def run + require 'yaml' + + if @output_format + case @output_format + when :hash + puts xcodeproj.to_hash.to_yaml + when :tree_hash + puts xcodeproj.to_tree_hash.to_yaml + when :raw + puts xcodeproj.to_yaml + end + return + end + + pretty_print = xcodeproj.pretty_print + sections = [] + pretty_print.each do |key, value| + section = key.green + yaml = value.to_yaml + yaml.gsub!(/^---$/, '') + yaml.gsub!(/^-/, "\n-") + yaml.prepend(section) + sections << yaml + end + puts sections * "\n\n" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/sort.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/sort.rb new file mode 100644 index 0000000..5f4b70f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/sort.rb @@ -0,0 +1,44 @@ +module Xcodeproj + class Command + class Sort < Command + self.description = <<-eos + Sorts the given project. + + If no `PROJECT' is specified then the current work directory is searched for one. + eos + + self.summary = 'Sorts the given project.' + + def self.options + [ + ['--group-option=[above|below]', 'The position of the groups when sorting. If no option is specified, sorting will interleave groups and files.'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('PROJECT', false), + ] + + def initialize(argv) + self.xcodeproj_path = argv.shift_argument + @group_option = argv.option('group-option') + @group_option &&= @group_option.to_sym + super + end + + def validate! + super + unless [nil, :above, :below].include?(@group_option) + help! "Unknown format `#{@group_option}`" + end + open_project! + end + + def run + xcodeproj.sort(:groups_position => @group_option) + xcodeproj.save + puts "The `#{File.basename(xcodeproj_path)}` project was sorted" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/target_diff.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/target_diff.rb new file mode 100644 index 0000000..7390bee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/command/target_diff.rb @@ -0,0 +1,43 @@ +module Xcodeproj + class Command + class TargetDiff < Command + self.summary = 'Shows the difference between two targets' + + def self.options + [ + ['--project PATH', 'The Xcode project document to use.'], + ].concat(super) + end + + self.arguments = [ + CLAide::Argument.new('TARGET1', true), + CLAide::Argument.new('TARGET2', true), + ] + + def initialize(argv) + @target1 = argv.shift_argument + @target2 = argv.shift_argument + self.xcodeproj_path = argv.option('--project') + super + end + + def validate! + super + open_project! + end + + def run + require 'yaml' + differ = Helper::TargetDiff.new(xcodeproj, @target1, @target2) + files = differ.new_source_build_files.map do |build_file| + { + 'Name' => build_file.file_ref.name, + 'Path' => build_file.file_ref.path, + 'Build settings' => build_file.settings, + } + end + puts files.to_yaml + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config.rb new file mode 100644 index 0000000..35d48d3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config.rb @@ -0,0 +1,386 @@ +# frozen_string_literal: true +require 'shellwords' +require 'xcodeproj/config/other_linker_flags_parser' + +module Xcodeproj + # This class holds the data for a Xcode build settings file (xcconfig) and + # provides support for serialization. + # + class Config + require 'set' + + KEY_VALUE_PATTERN = / + ( + [^=\[]+ # Any char, but not an assignment operator + # or subscript (non-greedy) + (?: # One or multiple conditional subscripts + \[ + [^\]]* # The subscript key + (?: + = # The subscript comparison operator + [^\]]* # The subscript value + )? + \] + )* + ) + \s* # Whitespaces after the key (needed because subscripts + # always end with ']') + = # The assignment operator + (.*) # The value + /x + private_constant :KEY_VALUE_PATTERN + + INHERITED = %w($(inherited) ${inherited}).freeze + private_constant :INHERITED + + INHERITED_REGEXP = Regexp.union(INHERITED) + private_constant :INHERITED_REGEXP + + # @return [Hash{String => String}] The attributes of the settings file + # excluding frameworks, weak_framework and libraries. + # + attr_accessor :attributes + + # @return [Hash{Symbol => Set<String>}] The other linker flags by key. + # Xcodeproj handles them in a dedicated way to prevent duplication + # of the libraries and of the frameworks. + # + attr_accessor :other_linker_flags + + # @return [Array] The list of the configuration files included by this + # configuration file (`#include "SomeConfig"`). + # + attr_accessor :includes + + # @param [Hash, File, String] xcconfig_hash_or_file + # The initial data. + # + def initialize(xcconfig_hash_or_file = {}) + @attributes = {} + @includes = [] + @other_linker_flags = {} + [:simple, :frameworks, :weak_frameworks, :libraries, :arg_files, :force_load].each do |key| + @other_linker_flags[key] = Set.new + end + merge!(extract_hash(xcconfig_hash_or_file)) + end + + def inspect + to_hash.inspect + end + + def ==(other) + other.attributes == attributes && other.other_linker_flags == other_linker_flags && other.includes == includes + end + + public + + # @!group Serialization + #-------------------------------------------------------------------------# + + # Sorts the internal data by setting name and serializes it in the xcconfig + # format. + # + # @example + # + # config = Config.new('PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2') + # config.to_s # => "OTHER_LDFLAGS = -lxml2\nPODS_ROOT = \"$(SRCROOT)/Pods\"" + # + # @return [String] The serialized internal data. + # + def to_s(prefix = nil) + include_lines = includes.map { |path| "#include \"#{normalized_xcconfig_path(path)}\"" } + settings = to_hash(prefix).sort_by(&:first).map { |k, v| "#{k} = #{v}".strip } + (include_lines + settings).join("\n") << "\n" + end + + # Writes the serialized representation of the internal data to the given + # path. + # + # @param [Pathname] pathname + # The file where the data should be written to. + # + # @return [void] + # + def save_as(pathname, prefix = nil) + if File.exist?(pathname) + return if Config.new(pathname) == self + end + + pathname.open('w') { |file| file << to_s(prefix) } + end + + # The hash representation of the xcconfig. The hash includes the + # frameworks, the weak frameworks, the libraries and the simple other + # linker flags in the `Other Linker Flags` (`OTHER_LDFLAGS`). + # + # @note All the values are sorted to have a consistent output in Ruby + # 1.8.7. + # + # @return [Hash] The hash representation + # + def to_hash(prefix = nil) + list = [] + list += other_linker_flags[:simple].to_a.sort + modifiers = { + :frameworks => '-framework ', + :weak_frameworks => '-weak_framework ', + :libraries => '-l', + :arg_files => '@', + :force_load => '-force_load', + } + [:libraries, :frameworks, :weak_frameworks, :arg_files, :force_load].each do |key| + modifier = modifiers[key] + sorted = other_linker_flags[key].to_a.sort + if key == :force_load + list += sorted.map { |l| %(#{modifier} #{l}) } + else + list += sorted.map { |l| %(#{modifier}"#{l}") } + end + end + + result = attributes.dup + result['OTHER_LDFLAGS'] = list.join(' ') unless list.empty? + result.reject! { |_, v| INHERITED.any? { |i| i == v.to_s.strip } } + + result = @includes.map do |incl| + path = File.expand_path(incl, @filepath.dirname) + if File.readable? path + Xcodeproj::Config.new(path).to_hash + else + {} + end + end.inject(&:merge).merge(result) unless @filepath.nil? || @includes.empty? + + if prefix + Hash[result.map { |k, v| [prefix + k, v] }] + else + result + end + end + + alias_method :to_h, :to_hash + + # @return [Set<String>] The list of the frameworks required by this + # settings file. + # + def frameworks + other_linker_flags[:frameworks] + end + + # @return [Set<String>] The list of the *weak* frameworks required by + # this settings file. + # + def weak_frameworks + other_linker_flags[:weak_frameworks] + end + + # @return [Set<String>] The list of the libraries required by this + # settings file. + # + def libraries + other_linker_flags[:libraries] + end + + # @return [Set<String>] The list of the arg files required by this + # settings file. + # + def arg_files + other_linker_flags[:arg_files] + end + + public + + # @!group Merging + #-------------------------------------------------------------------------# + + # Merges the given xcconfig representation in the receiver. + # + # @example + # + # config = Config.new('PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2') + # config.merge!('OTHER_LDFLAGS' => '-lz', 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers"') + # config.to_hash # => { 'PODS_ROOT' => '"$(SRCROOT)/Pods"', 'OTHER_LDFLAGS' => '-lxml2 -lz', 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers"' } + # + # @note If a key in the given hash already exists in the internal data + # then its value is appended. + # + # @param [Hash, Config] config + # The xcconfig representation to merge. + # + # @todo The logic to normalize an hash should be extracted and the + # initializer should not call this method. + # + # @return [void] + # + def merge!(xcconfig) + if xcconfig.is_a? Config + merge_attributes!(xcconfig.attributes) + other_linker_flags.keys.each do |key| + other_linker_flags[key].merge(xcconfig.other_linker_flags[key]) + end + else + merge_attributes!(xcconfig.to_hash) + if flags = attributes.delete('OTHER_LDFLAGS') + flags_by_key = OtherLinkerFlagsParser.parse(flags) + other_linker_flags.keys.each do |key| + other_linker_flags[key].merge(flags_by_key[key]) + end + end + end + end + alias_method :<<, :merge! + + # Creates a new #{Config} with the data of the receiver merged with the + # given xcconfig representation. + # + # @param [Hash, Config] config + # The xcconfig representation to merge. + # + # @return [Config] the new xcconfig. + # + def merge(config) + dup.tap { |x| x.merge!(config) } + end + + # @return [Config] A copy of the receiver. + # + def dup + Xcodeproj::Config.new(to_hash.dup) + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private Helpers + + # Returns a hash from the given argument reading it from disk if necessary. + # + # @param [String, Pathname, Hash] argument + # The source from where the hash should be extracted. + # + # @return [Hash] + # + def extract_hash(argument) + return argument if argument.is_a?(Hash) + if argument.respond_to? :read + @filepath = Pathname.new(argument.to_path) + hash_from_file_content(argument.read) + elsif File.readable?(argument.to_s) + @filepath = Pathname.new(argument.to_s) + hash_from_file_content(File.read(argument)) + else + argument + end + end + + # Returns a hash from the string representation of an Xcconfig file. + # + # @param [String] string + # The string representation of an xcconfig file. + # + # @return [Hash] the hash containing the xcconfig data. + # + def hash_from_file_content(string) + hash = {} + string.split("\n").each do |line| + uncommented_line = strip_comment(line) + if include = extract_include(uncommented_line) + @includes.push normalized_xcconfig_path(include) + else + key, value = extract_key_value(uncommented_line) + next unless key + value.gsub!(INHERITED_REGEXP) { |m| hash.fetch(key, m) } + hash[key] = value + end + end + hash + end + + # Merges the given attributes hash while ensuring values are not duplicated. + # + # @param [Hash] attributes + # The attributes hash to merge into @attributes. + # + # @return [void] + # + def merge_attributes!(attributes) + @attributes.merge!(attributes) do |_, v1, v2| + v1 = v1.strip + v2 = v2.strip + v1_split = v1.shellsplit + v2_split = v2.shellsplit + if (v2_split - v1_split).empty? || v1_split.first(v2_split.size) == v2_split + v1 + elsif v2_split.first(v1_split.size) == v1_split + v2 + else + "#{v1} #{v2}" + end + end + end + + # Strips the comments from a line of an xcconfig string. + # + # @param [String] line + # the line to process. + # + # @return [String] the uncommented line. + # + def strip_comment(line) + line.partition('//').first + end + + # Returns the file included by a line of an xcconfig string if present. + # + # @param [String] line + # the line to process. + # + # @return [String] the included file. + # @return [Nil] if no include was found in the line. + # + def extract_include(line) + regexp = /#include\s*"(.+)"/ + match = line.match(regexp) + match[1] if match + end + + # Returns the key and the value described by the given line of an xcconfig. + # + # @param [String] line + # the line to process. + # + # @return [Array] A tuple where the first entry is the key and the second + # entry is the value. + # + def extract_key_value(line) + match = line.match(KEY_VALUE_PATTERN) + if match + key = match[1] + value = match[2] + [key.strip, value.strip] + else + [] + end + end + + # Normalizes the given path to an xcconfing file to be used in includes, + # appending the extension if necessary. + # + # @param [String] path + # The path of the file which will be included in the xcconfig. + # + # @return [String] The normalized path. + # + def normalized_xcconfig_path(path) + if File.extname(path) == '.xcconfig' + path + else + "#{path}.xcconfig" + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config/other_linker_flags_parser.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config/other_linker_flags_parser.rb new file mode 100644 index 0000000..31eccef --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/config/other_linker_flags_parser.rb @@ -0,0 +1,73 @@ +require 'shellwords' + +module Xcodeproj + class Config + # Parses other linker flags values. + # + module OtherLinkerFlagsParser + # @return [Hash{Symbol, Array[String]}] Splits the given + # other linker flags value by type. + # + # @param [String, Array] flags + # The other linker flags value. + # + def self.parse(flags) + result = { + :frameworks => [], + :weak_frameworks => [], + :libraries => [], + :arg_files => [], + :simple => [], + :force_load => [], + } + + key = nil + if flags.is_a? String + flags = split(flags) + end + flags.each do |token| + case token + when '-framework' + key = :frameworks + when '-weak_framework' + key = :weak_frameworks + when '-l' + key = :libraries + when '@' + key = :arg_files + when '-force_load' + key = :force_load + else + if key + result[key] << token + key = nil + else + result[:simple] << token + end + end + end + result + end + + # @return [Array<String>] Split the given other linker + # flags value, taking into account quoting and + # the fact that the `-l` flag might omit the + # space. + # + # @param [String] flags + # The other linker flags value. + # + def self.split(flags) + flags.strip.shellsplit.flat_map do |string| + if string =~ /\A-l.+/ + ['-l', string[2..-1]] + elsif string =~ /\A@.+/ + ['@', string[1..-1]] + else + string + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/constants.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/constants.rb new file mode 100644 index 0000000..fd23718 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/constants.rb @@ -0,0 +1,465 @@ +module Xcodeproj + # This modules groups all the constants known to Xcodeproj. + # + module Constants + # @return [String] The last known iOS SDK (stable). + # + LAST_KNOWN_IOS_SDK = '14.0' + + # @return [String] The last known OS X SDK (stable). + # + LAST_KNOWN_OSX_SDK = '10.15' + + # @return [String] The last known tvOS SDK (stable). + # + LAST_KNOWN_TVOS_SDK = '14.0' + + # @return [String] The last known watchOS SDK (stable). + # + LAST_KNOWN_WATCHOS_SDK = '7.0' + + # @return [String] The last known archive version to Xcodeproj. + # + LAST_KNOWN_ARCHIVE_VERSION = 1 + + # @return [String] The last known Swift version (stable). + # + LAST_KNOWN_SWIFT_VERSION = '5.0' + + # @return [String] The default object version for Xcodeproj. + # + DEFAULT_OBJECT_VERSION = 46 + + # @return [String] The last known object version to Xcodeproj. + # + LAST_KNOWN_OBJECT_VERSION = 55 + + # @return [String] The last known Xcode version to Xcodeproj. + # + LAST_UPGRADE_CHECK = '1240' + + # @return [String] The last known Xcode version to Xcodeproj. + # + LAST_SWIFT_UPGRADE_CHECK = '1240' + + # @return [String] The version of `.xcscheme` files supported by Xcodeproj + # + XCSCHEME_FORMAT_VERSION = '1.3' + + # @return [Hash] The all the known ISAs grouped by superclass. + # + KNOWN_ISAS = { + 'AbstractObject' => %w( + PBXBuildFile + AbstractBuildPhase + PBXBuildRule + XCBuildConfiguration + XCConfigurationList + PBXContainerItemProxy + PBXFileReference + PBXGroup + PBXProject + PBXTargetDependency + PBXReferenceProxy + AbstractTarget + ), + + 'AbstractBuildPhase' => %w( + PBXCopyFilesBuildPhase + PBXResourcesBuildPhase + PBXSourcesBuildPhase + PBXFrameworksBuildPhase + PBXHeadersBuildPhase + PBXShellScriptBuildPhase + ), + + 'AbstractTarget' => %w( + PBXNativeTarget + PBXAggregateTarget + PBXLegacyTarget + ), + + 'PBXGroup' => %w( + XCVersionGroup + PBXVariantGroup + ), + }.freeze + + # @return [Hash] The known file types corresponding to each extension. + # + FILE_TYPES_BY_EXTENSION = { + 'a' => 'archive.ar', + 'apns' => 'text', + 'app' => 'wrapper.application', + 'appex' => 'wrapper.app-extension', + 'bundle' => 'wrapper.plug-in', + 'cpp' => 'sourcecode.cpp.cpp', + 'dylib' => 'compiled.mach-o.dylib', + 'entitlements' => 'text.plist.entitlements', + 'framework' => 'wrapper.framework', + 'gif' => 'image.gif', + 'gpx' => 'text.xml', + 'h' => 'sourcecode.c.h', + 'hpp' => 'sourcecode.cpp.h', + 'm' => 'sourcecode.c.objc', + 'markdown' => 'text', + 'mdimporter' => 'wrapper.cfbundle', + 'modulemap' => 'sourcecode.module', + 'mov' => 'video.quicktime', + 'mp3' => 'audio.mp3', + 'octest' => 'wrapper.cfbundle', + 'pch' => 'sourcecode.c.h', + 'plist' => 'text.plist.xml', + 'png' => 'image.png', + 'sh' => 'text.script.sh', + 'sks' => 'file.sks', + 'storyboard' => 'file.storyboard', + 'strings' => 'text.plist.strings', + 'swift' => 'sourcecode.swift', + 'xcassets' => 'folder.assetcatalog', + 'xcconfig' => 'text.xcconfig', + 'xcdatamodel' => 'wrapper.xcdatamodel', + 'xcodeproj' => 'wrapper.pb-project', + 'xctest' => 'wrapper.cfbundle', + 'xib' => 'file.xib', + 'zip' => 'archive.zip', + }.freeze + + # @return [Hash] The compatibility version string for different object versions. + # + COMPATIBILITY_VERSION_BY_OBJECT_VERSION = { + 55 => 'Xcode 13.0', + 54 => 'Xcode 12.0', + 53 => 'Xcode 11.4', + 52 => 'Xcode 11.0', + 51 => 'Xcode 10.0', + 50 => 'Xcode 9.3', + 48 => 'Xcode 8.0', + 47 => 'Xcode 6.3', + 46 => 'Xcode 3.2', + 45 => 'Xcode 3.1', + }.freeze + + # @return [Hash] The uniform type identifier of various product types. + # + PRODUCT_TYPE_UTI = { + :application => 'com.apple.product-type.application', + :application_on_demand_install_capable => 'com.apple.product-type.application.on-demand-install-capable', + :framework => 'com.apple.product-type.framework', + :dynamic_library => 'com.apple.product-type.library.dynamic', + :static_library => 'com.apple.product-type.library.static', + :bundle => 'com.apple.product-type.bundle', + :octest_bundle => 'com.apple.product-type.bundle', + :unit_test_bundle => 'com.apple.product-type.bundle.unit-test', + :ui_test_bundle => 'com.apple.product-type.bundle.ui-testing', + :app_extension => 'com.apple.product-type.app-extension', + :command_line_tool => 'com.apple.product-type.tool', + :watch_app => 'com.apple.product-type.application.watchapp', + :watch2_app => 'com.apple.product-type.application.watchapp2', + :watch2_app_container => 'com.apple.product-type.application.watchapp2-container', + :watch_extension => 'com.apple.product-type.watchkit-extension', + :watch2_extension => 'com.apple.product-type.watchkit2-extension', + :tv_extension => 'com.apple.product-type.tv-app-extension', + :messages_application => 'com.apple.product-type.application.messages', + :messages_extension => 'com.apple.product-type.app-extension.messages', + :sticker_pack => 'com.apple.product-type.app-extension.messages-sticker-pack', + :xpc_service => 'com.apple.product-type.xpc-service', + }.freeze + + # @return [Hash] The extensions or the various product UTIs. + # + PRODUCT_UTI_EXTENSIONS = { + :application => 'app', + :application_on_demand_install_capable => 'app', + :framework => 'framework', + :dynamic_library => 'dylib', + :static_library => 'a', + :bundle => 'bundle', + :octest_bundle => 'octest', + :unit_test_bundle => 'xctest', + :ui_test_bundle => 'xctest', + :app_extension => 'appex', + :messages_application => 'app', + :messages_extension => 'appex', + :sticker_pack => 'appex', + :watch2_extension => 'appex', + :watch2_app => 'app', + :watch2_app_container => 'app', + }.freeze + + # @return [Hash] The common build settings grouped by platform, and build + # configuration name. + # + COMMON_BUILD_SETTINGS = { + :all => { + # empty? + # come from the project settings + }.freeze, + [:debug] => { + # empty? + # come from the project settings + }.freeze, + [:release] => { + # empty? + # come from the project settings + }.freeze, + [:ios] => { + 'SDKROOT' => 'iphoneos', + }.freeze, + [:osx] => { + 'SDKROOT' => 'macosx', + }.freeze, + [:tvos] => { + 'SDKROOT' => 'appletvos', + }.freeze, + [:watchos] => { + 'SDKROOT' => 'watchos', + }.freeze, + [:debug, :osx] => { + # Empty? + }.freeze, + [:release, :osx] => { + # Empty? + }.freeze, + [:debug, :ios] => { + # Empty? + }.freeze, + [:release, :ios] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:debug, :tvos] => { + # Empty? + }.freeze, + [:release, :tvos] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:debug, :watchos] => { + # Empty? + }.freeze, + [:release, :watchos] => { + 'VALIDATE_PRODUCT' => 'YES', + }.freeze, + [:swift] => { + # Empty? + # The swift version, like deployment target, is set in + # ProjectHelper#common_build_settings + }.freeze, + [:debug, :application, :swift] => { + # Empty? + }.freeze, + [:debug, :swift] => { + # Swift optimization settings are provided by the project settings + }.freeze, + [:release, :swift] => { + # Swift optimization settings are provided by the project settings + }.freeze, + [:debug, :static_library, :swift] => { + }.freeze, + [:framework] => { + 'CURRENT_PROJECT_VERSION' => '1', + 'DEFINES_MODULE' => 'YES', + 'DYLIB_COMPATIBILITY_VERSION' => '1', + 'DYLIB_CURRENT_VERSION' => '1', + 'DYLIB_INSTALL_NAME_BASE' => '@rpath', + 'INSTALL_PATH' => '$(LOCAL_LIBRARY_DIR)/Frameworks', + 'PRODUCT_NAME' => '$(TARGET_NAME:c99extidentifier)', + 'SKIP_INSTALL' => 'YES', + 'VERSION_INFO_PREFIX' => '', + 'VERSIONING_SYSTEM' => 'apple-generic', + }.freeze, + [:ios, :framework] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:osx, :framework] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/../Frameworks @loader_path/Frameworks', + }.freeze, + [:watchos, :framework] => { + 'APPLICATION_EXTENSION_API_ONLY' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :framework] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks @loader_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:framework, :swift] => { + 'DEFINES_MODULE' => 'YES', + }.freeze, + [:osx, :static_library] => { + 'EXECUTABLE_PREFIX' => 'lib', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:ios, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:watchos, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :static_library] => { + 'OTHER_LDFLAGS' => '-ObjC', + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:osx, :dynamic_library] => { + 'DYLIB_COMPATIBILITY_VERSION' => '1', + 'DYLIB_CURRENT_VERSION' => '1', + 'EXECUTABLE_PREFIX' => 'lib', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:application] => { + 'ASSETCATALOG_COMPILER_APPICON_NAME' => 'AppIcon', + 'ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME' => 'AccentColor', + }.freeze, + [:ios, :application] => { + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '1,2', + }.freeze, + [:osx, :application] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/../Frameworks', + }.freeze, + [:watchos, :application] => { + 'SKIP_INSTALL' => 'YES', + 'TARGETED_DEVICE_FAMILY' => '4', + }.freeze, + [:tvos, :application] => { + 'ASSETCATALOG_COMPILER_APPICON_NAME' => 'App Icon & Top Shelf Image', + 'LD_RUNPATH_SEARCH_PATHS' => '$(inherited) @executable_path/Frameworks', + 'TARGETED_DEVICE_FAMILY' => '3', + }.freeze, + [:tvos, :application, :swift] => { + 'ENABLE_PREVIEWS' => 'YES', + }.freeze, + [:watchos, :application, :swift] => { + 'ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES' => 'YES', + }.freeze, + [:bundle] => { + 'WRAPPER_EXTENSION' => 'bundle', + 'SKIP_INSTALL' => 'YES', + }.freeze, + [:ios, :bundle] => { + 'SDKROOT' => 'iphoneos', + }.freeze, + [:osx, :bundle] => { + 'COMBINE_HIDPI_IMAGES' => 'YES', + 'INSTALL_PATH' => '$(LOCAL_LIBRARY_DIR)/Bundles', + 'SDKROOT' => 'macosx', + }.freeze, + }.freeze + + # @return [Hash] The default build settings for a new project. + # + PROJECT_DEFAULT_BUILD_SETTINGS = { + :all => { + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + 'CLANG_ANALYZER_NONNULL' => 'YES', + 'CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION' => 'YES_AGGRESSIVE', + 'CLANG_CXX_LANGUAGE_STANDARD' => 'gnu++14', + 'CLANG_CXX_LIBRARY' => 'libc++', + 'CLANG_ENABLE_MODULES' => 'YES', + 'CLANG_ENABLE_OBJC_ARC' => 'YES', + 'CLANG_ENABLE_OBJC_WEAK' => 'YES', + 'CLANG_WARN__DUPLICATE_METHOD_MATCH' => 'YES', + 'CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING' => 'YES', + 'CLANG_WARN_BOOL_CONVERSION' => 'YES', + 'CLANG_WARN_COMMA' => 'YES', + 'CLANG_WARN_CONSTANT_CONVERSION' => 'YES', + 'CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS' => 'YES', + 'CLANG_WARN_DIRECT_OBJC_ISA_USAGE' => 'YES_ERROR', + 'CLANG_WARN_DOCUMENTATION_COMMENTS' => 'YES', + 'CLANG_WARN_EMPTY_BODY' => 'YES', + 'CLANG_WARN_ENUM_CONVERSION' => 'YES', + 'CLANG_WARN_INFINITE_RECURSION' => 'YES', + 'CLANG_WARN_INT_CONVERSION' => 'YES', + 'CLANG_WARN_NON_LITERAL_NULL_CONVERSION' => 'YES', + 'CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF' => 'YES', + 'CLANG_WARN_OBJC_LITERAL_CONVERSION' => 'YES', + 'CLANG_WARN_OBJC_ROOT_CLASS' => 'YES_ERROR', + 'CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER' => 'YES', + 'CLANG_WARN_RANGE_LOOP_ANALYSIS' => 'YES', + 'CLANG_WARN_STRICT_PROTOTYPES' => 'YES', + 'CLANG_WARN_SUSPICIOUS_MOVE' => 'YES', + 'CLANG_WARN_UNGUARDED_AVAILABILITY' => 'YES_AGGRESSIVE', + 'CLANG_WARN_UNREACHABLE_CODE' => 'YES', + 'COPY_PHASE_STRIP' => 'NO', + 'ENABLE_STRICT_OBJC_MSGSEND' => 'YES', + 'GCC_C_LANGUAGE_STANDARD' => 'gnu11', + 'GCC_NO_COMMON_BLOCKS' => 'YES', + 'GCC_WARN_64_TO_32_BIT_CONVERSION' => 'YES', + 'GCC_WARN_ABOUT_RETURN_TYPE' => 'YES_ERROR', + 'GCC_WARN_UNDECLARED_SELECTOR' => 'YES', + 'GCC_WARN_UNINITIALIZED_AUTOS' => 'YES_AGGRESSIVE', + 'GCC_WARN_UNUSED_FUNCTION' => 'YES', + 'GCC_WARN_UNUSED_VARIABLE' => 'YES', + 'MTL_FAST_MATH' => 'YES', + 'PRODUCT_NAME' => '$(TARGET_NAME)', + 'SWIFT_VERSION' => '5.0', + }, + :release => { + 'DEBUG_INFORMATION_FORMAT' => 'dwarf-with-dsym', + 'ENABLE_NS_ASSERTIONS' => 'NO', + 'MTL_ENABLE_DEBUG_INFO' => 'NO', + 'SWIFT_COMPILATION_MODE' => 'wholemodule', + 'SWIFT_OPTIMIZATION_LEVEL' => '-O', + }.freeze, + :debug => { + 'DEBUG_INFORMATION_FORMAT' => 'dwarf', + 'ENABLE_TESTABILITY' => 'YES', + 'GCC_DYNAMIC_NO_PIC' => 'NO', + 'GCC_OPTIMIZATION_LEVEL' => '0', + 'GCC_PREPROCESSOR_DEFINITIONS' => ['DEBUG=1', '$(inherited)'], + 'MTL_ENABLE_DEBUG_INFO' => 'INCLUDE_SOURCE', + 'ONLY_ACTIVE_ARCH' => 'YES', + 'SWIFT_ACTIVE_COMPILATION_CONDITIONS' => 'DEBUG', + 'SWIFT_OPTIMIZATION_LEVEL' => '-Onone', + }.freeze, + }.freeze + + # @return [Hash] The corresponding numeric value of each copy build phase + # destination. + # + COPY_FILES_BUILD_PHASE_DESTINATIONS = { + :absolute_path => '0', + :products_directory => '16', + :wrapper => '1', + :resources => '7', # default + :executables => '6', + :java_resources => '15', + :frameworks => '10', + :shared_frameworks => '11', + :shared_support => '12', + :plug_ins => '13', + }.freeze + + # @return [Hash] The corresponding numeric value of each proxy type for + # PBXContainerItemProxy. + PROXY_TYPES = { + :native_target => '1', + :reference => '2', + }.freeze + + # @return [Array] The extensions which are associated with header files. + # + HEADER_FILES_EXTENSIONS = %w(.h .hh .hpp .ipp .tpp .hxx .def .inl .inc).freeze + + # @return [Array] The keywords Xcode use to identify a build setting can + # inherit values from a previous precedence level + INHERITED_KEYWORDS = %w( + $(inherited) + ${inherited} + ).freeze + + # @return [Hash] Possible types for a scheme's 'ExecutionAction' node + # + EXECUTION_ACTION_TYPE = { + :shell_script => 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.ShellScriptAction', + :send_email => 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.SendEmailAction', + }.freeze + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/differ.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/differ.rb new file mode 100644 index 0000000..33f634f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/differ.rb @@ -0,0 +1,239 @@ +module Xcodeproj + # Computes the recursive diff of Hashes, Array and other objects. + # + # Useful to compare two projects. Inspired from + # 'active_support/core_ext/hash/diff'. + # + # @example + # h1 = { :common => 'value', :changed => 'v1' } + # h2 = { :common => 'value', :changed => 'v2', :addition => 'new_value' } + # h1.recursive_diff(h2) == { + # :changed => { + # :self => 'v1', + # :other => 'v2' + # }, + # :addition => { + # :self => nil, + # :other => 'new_value' + # } + # } #=> true + # + # + # + # + module Differ + # Computes the recursive difference of two given values. + # + # @param [Object] value_1 + # The first value to compare. + # + # @param [Object] value_2 + # The second value to compare. + # + # @param [Object] key_1 + # The key for the diff of value_1. + # + # @param [Object] key_2 + # The key for the diff of value_2. + # + # @param [Object] id_key + # The key used to identify correspondent hashes in an array. + # + # @return [Hash] The diff + # @return [Nil] if the given values are equal. + # + def self.diff(value_1, value_2, options = {}) + options[:key_1] ||= 'value_1' + options[:key_2] ||= 'value_2' + options[:id_key] ||= nil + + method = if value_1.class == value_2.class + case value_1 + when Hash then :hash_diff + when Array then :array_diff + else :generic_diff + end + else + :generic_diff + end + send(method, value_1, value_2, options) + end + + # Optimized for reducing the noise from the tree hash of projects + # + def self.project_diff(project_1, project_2, key_1 = 'project_1', key_2 = 'project_2') + project_1 = project_1.to_tree_hash unless project_1.is_a?(Hash) + project_2 = project_2.to_tree_hash unless project_2.is_a?(Hash) + options = { + :key_1 => key_1, + :key_2 => key_2, + :id_key => 'displayName', + } + diff(project_1, project_2, options) + end + + #-------------------------------------------------------------------------# + + public + + # @!group Type specific handlers + + # Computes the recursive difference of two hashes. + # + # @see diff + # + def self.hash_diff(value_1, value_2, options) + ensure_class(value_1, Hash) + ensure_class(value_2, Hash) + return nil if value_1 == value_2 + + result = {} + all_keys = (value_1.keys + value_2.keys).uniq + all_keys.each do |key| + key_value_1 = value_1[key] + key_value_2 = value_2[key] + diff = diff(key_value_1, key_value_2, options) + if diff + result[key] = diff if diff + end + end + if result.empty? + nil + else + result + end + end + + # Returns the recursive diff of two arrays. + # + # @see diff + # + def self.array_diff(value_1, value_2, options) + ensure_class(value_1, Array) + ensure_class(value_2, Array) + return nil if value_1 == value_2 + + new_objects_value_1 = (value_1 - value_2) + new_objects_value_2 = (value_2 - value_1) + return nil if value_1.empty? && value_2.empty? + + matched_diff = {} + if id_key = options[:id_key] + matched_value_1 = [] + matched_value_2 = [] + new_objects_value_1.each do |entry_value_1| + if entry_value_1.is_a?(Hash) + id_value = entry_value_1[id_key] + entry_value_2 = new_objects_value_2.find do |entry| + entry[id_key] == id_value + end + if entry_value_2 + matched_value_1 << entry_value_1 + matched_value_2 << entry_value_2 + diff = diff(entry_value_1, entry_value_2, options) + matched_diff[id_value] = diff if diff + end + end + end + + new_objects_value_1 -= matched_value_1 + new_objects_value_2 -= matched_value_2 + end + + if new_objects_value_1.empty? && new_objects_value_2.empty? + if matched_diff.empty? + nil + else + matched_diff + end + else + result = {} + result[options[:key_1]] = new_objects_value_1 unless new_objects_value_1.empty? + result[options[:key_2]] = new_objects_value_2 unless new_objects_value_2.empty? + result[:diff] = matched_diff unless matched_diff.empty? + result + end + end + + # Returns the diff of two generic objects. + # + # @see diff + # + def self.generic_diff(value_1, value_2, options) + return nil if value_1 == value_2 + + { + options[:key_1] => value_1, + options[:key_2] => value_2, + } + end + + #-------------------------------------------------------------------------# + + public + + # @!group Cleaning + + # Returns a copy of the hash where the given key is removed recursively. + # + # @param [Hash] hash + # The hash to clean + # + # @param [Object] key + # The key to remove. + # + # @return [Hash] A copy of the hash without the key. + # + def self.clean_hash(hash, key) + new_hash = hash.dup + self.clean_hash!(new_hash, key) + new_hash + end + + # Recursively cleans a key from the given hash. + # + # @param [Hash] hash + # The hash to clean + # + # @param [Object] key + # The key to remove. + # + # @return [void] + # + def self.clean_hash!(hash, key) + hash.delete(key) + hash.each do |_, value| + case value + when Hash + clean_hash!(value, key) + when Array + value.each { |entry| clean_hash!(entry, key) if entry.is_a?(Hash) } + end + end + end + + #-------------------------------------------------------------------------# + + private + + # @! Helpers + + # Ensures that the given object belongs to the given class. + # + # @param [Object] object + # The object to check. + # + # @param [Class] klass + # the expected class of the object. + # + # @raise If the object doesn't belong to the given class. + # + # @return [void] + # + def self.ensure_class(object, klass) + raise "Wrong type `#{object.inspect}`" unless object.is_a?(klass) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/gem_version.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/gem_version.rb new file mode 100644 index 0000000..44093ce --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/gem_version.rb @@ -0,0 +1,5 @@ +module Xcodeproj + # The version of the xcodeproj gem. + # + VERSION = '1.21.0'.freeze unless defined? Xcodeproj::VERSION +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/helper.rb new file mode 100644 index 0000000..baaa80d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/helper.rb @@ -0,0 +1,30 @@ +module Xcodeproj + module Helper + class TargetDiff + attr_reader :project, :target1, :target2 + + def initialize(project, target1_name, target2_name) + @project = project + unless @target1 = @project.targets.find { |target| target.name == target1_name } + raise ArgumentError, "Target 1 by name `#{target1_name}' not found in the project." + end + unless @target2 = @project.targets.find { |target| target.name == target2_name } + raise ArgumentError, "Target 1 by name `#{target2_name}' not found in the project." + end + end + + # @return [Array<PBXBuildFile>] A list of source files (that will be + # compiled) which are in ‘target 2’ but not in ‘target 1’. The + # list is sorted by file path. + # + def new_source_build_files + new = @target2.source_build_phase.files.reject do |target2_build_file| + @target1.source_build_phase.files.any? do |target1_build_file| + target1_build_file.file_ref.path == target2_build_file.file_ref.path + end + end + new.sort_by { |build_file| build_file.file_ref.path } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/plist.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/plist.rb new file mode 100644 index 0000000..50b0137 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/plist.rb @@ -0,0 +1,94 @@ +autoload :Nanaimo, 'nanaimo' +autoload :CFPropertyList, 'cfpropertylist' + +module Xcodeproj + # Provides support for loading and serializing property list files. + # + module Plist + # @return [Hash] Returns the native objects loaded from a property list + # file. + # + # @param [#to_s] path + # The path of the file. + # + def self.read_from_path(path) + path = path.to_s + unless File.exist?(path) + raise Informative, "The plist file at path `#{path}` doesn't exist." + end + contents = File.read(path) + if file_in_conflict?(contents) + raise Informative, "The file `#{path}` is in a merge conflict." + end + case Nanaimo::Reader.plist_type(contents) + when :xml, :binary + CFPropertyList.native_types(CFPropertyList::List.new(:data => contents).value) + else + Nanaimo::Reader.new(contents).parse!.as_ruby + end + end + + # Serializes a hash as an XML property list file. + # + # @param [#to_hash] hash + # The hash to store. + # + # @param [#to_s] path + # The path of the file. + # + def self.write_to_path(hash, path) + if hash.respond_to?(:to_hash) + hash = hash.to_hash + else + raise TypeError, "The given `#{hash.inspect}` must respond " \ + "to #to_hash'." + end + + unless path.is_a?(String) || path.is_a?(Pathname) + raise TypeError, "The given `#{path}` must be a string or 'pathname'." + end + path = path.to_s + raise IOError, 'Empty path.' if path.empty? + + File.open(path, 'w') do |f| + plist = Nanaimo::Plist.new(hash, :xml) + Nanaimo::Writer::XMLWriter.new(plist, :pretty => true, :output => f, :strict => false).write + end + end + + # The known modules that can serialize plists. + # + KNOWN_IMPLEMENTATIONS = [] + + class << self + # @deprecated This method will be removed in 2.0 + # + # @return [Nil] + # + attr_accessor :implementation + end + + # @deprecated This method will be removed in 2.0 + # + # @return [Nil] + # + def self.autoload_implementation + end + + # @return [Bool] Checks whether there are merge conflicts in the file. + # + # @param [#to_s] contents + # The contents of the file. + # + def self.file_in_conflict?(contents) + conflict_regex = / + ^<{7}(?!<) # Exactly 7 left arrows at the beginning of the line + [\w\W]* # Anything + ^={7}(?!=) # Exactly 7 equality symbols at the beginning of the line + [\w\W]* # Anything + ^>{7}(?!>) # Exactly 7 right arrows at the beginning of the line + /xm + contents.match(conflict_regex) + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project.rb new file mode 100644 index 0000000..c187fbb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project.rb @@ -0,0 +1,870 @@ +# frozen_string_literal: true +require 'atomos' +require 'fileutils' +require 'securerandom' + +require 'xcodeproj/project/object' +require 'xcodeproj/project/project_helper' +require 'xcodeproj/project/uuid_generator' +require 'xcodeproj/plist' + +module Xcodeproj + # This class represents a Xcode project document. + # + # It can be used to manipulate existing documents or even create new ones + # from scratch. + # + # An Xcode project document is a plist file where the root is a dictionary + # containing the following keys: + # + # - archiveVersion: the version of the document. + # - objectVersion: the version of the objects description. + # - classes: a key that apparently is always empty. + # - objects: a dictionary where the UUID of every object is associated to + # its attributes. + # - rootObject: the UUID identifier of the root object ({PBXProject}). + # + # Every object is in turn a dictionary that specifies an `isa` (the class of + # the object) and in accordance to it maintains a set attributes. Those + # attributes might reference one or more other objects by UUID. If the + # reference is a collection, it is ordered. + # + # The {Project} API returns instances of {AbstractObject} which wrap the + # objects described in the Xcode project document. All the attributes types + # are preserved from the plist, except for the relationships which are + # replaced with objects instead of UUIDs. + # + # An object might be referenced by multiple objects, an when no other object + # is references it, it becomes unreachable (the root object is referenced by + # the project itself). Xcodeproj takes care of adding and removing those + # objects from the `objects` dictionary so the project is always in a + # consistent state. + # + class Project + include Object + + # @return [Pathname] the path of the project. + # + attr_reader :path + + # @return [Pathname] the directory of the project + # + attr_reader :project_dir + + # @param [Pathname, String] path @see path + # The path provided will be expanded to an absolute path. + # @param [Bool] skip_initialization + # Wether the project should be initialized from scratch. + # @param [Int] object_version + # Object version to use for serialization, defaults to Xcode 3.2 compatible. + # + # @example Creating a project + # Project.new("path/to/Project.xcodeproj") + # + # @note When initializing the project, Xcodeproj mimics the Xcode behaviour + # including the setup of a debug and release configuration. If you want a + # clean project without any configurations, you should override the + # `initialize_from_scratch` method to not add these configurations and + # manually set the object version. + # + def initialize(path, skip_initialization = false, object_version = Constants::DEFAULT_OBJECT_VERSION) + @path = Pathname.new(path).expand_path + @project_dir = @path.dirname + @objects_by_uuid = {} + @generated_uuids = [] + @available_uuids = [] + @dirty = true + unless skip_initialization.is_a?(TrueClass) || skip_initialization.is_a?(FalseClass) + raise ArgumentError, '[Xcodeproj] Initialization parameter expected to ' \ + "be a boolean #{skip_initialization}" + end + unless skip_initialization + initialize_from_scratch + @object_version = object_version.to_s + unless Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION.key?(object_version) + raise ArgumentError, "[Xcodeproj] Unable to find compatibility version string for object version `#{object_version}`." + end + root_object.compatibility_version = Constants::COMPATIBILITY_VERSION_BY_OBJECT_VERSION[object_version] + end + end + + # Opens the project at the given path. + # + # @param [Pathname, String] path + # The path to the Xcode project document (xcodeproj). + # + # @raise If the project versions are more recent than the ones know to + # Xcodeproj to prevent it from corrupting existing projects. + # Naturally, this would never happen with a project generated by + # xcodeproj itself. + # + # @raise If it can't find the root object. This means that the project is + # malformed. + # + # @example Opening a project + # Project.open("path/to/Project.xcodeproj") + # + def self.open(path) + path = Pathname.pwd + path + unless Pathname.new(path).exist? + raise "[Xcodeproj] Unable to open `#{path}` because it doesn't exist." + end + project = new(path, true) + project.send(:initialize_from_file) + project + end + + # @return [String] the archive version. + # + attr_reader :archive_version + + # @return [Hash] an dictionary whose purpose is unknown. + # + attr_reader :classes + + # @return [String] the objects version. + # + attr_reader :object_version + + # @return [Hash{String => AbstractObject}] A hash containing all the + # objects of the project by UUID. + # + attr_reader :objects_by_uuid + + # @return [PBXProject] the root object of the project. + # + attr_reader :root_object + + # A fast way to see if two {Project} instances refer to the same projects on + # disk. Use this over {#eql?} when you do not need to compare the full data. + # + # This shallow comparison was chosen as the (common) `==` implementation, + # because it was too easy to introduce changes into the Xcodeproj code-base + # that were slower than O(1). + # + # @return [Boolean] whether or not the two `Project` instances refer to the + # same projects on disk, determined solely by {#path} and + # `root_object.uuid` equality. + # + # @todo If ever needed, we could also compare `uuids.sort` instead. + # + def ==(other) + other && path == other.path && root_object.uuid == other.root_object.uuid + end + + # Compares the project to another one, or to a plist representation. + # + # @note This operation can be extremely expensive, because it converts a + # `Project` instance to a hash, and should _only_ ever be used to + # determine wether or not the data contents of two `Project` instances + # are completely equal. + # + # To simply determine wether or not two {Project} instances refer to + # the same projects on disk, use the {#==} method instead. + # + # @param [#to_hash] other the object to compare. + # + # @return [Boolean] whether the project is equivalent to the given object. + # + def eql?(other) + other.respond_to?(:to_hash) && to_hash == other.to_hash + end + + def to_s + "#<#{self.class}> path:`#{path}` UUID:`#{root_object.uuid}`" + end + + alias_method :inspect, :to_s + + public + + # @!group Initialization + #-------------------------------------------------------------------------# + + # Initializes the instance from scratch. + # + def initialize_from_scratch + @archive_version = Constants::LAST_KNOWN_ARCHIVE_VERSION.to_s + @classes = {} + + root_object.remove_referrer(self) if root_object + @root_object = new(PBXProject) + root_object.add_referrer(self) + + root_object.main_group = new(PBXGroup) + root_object.product_ref_group = root_object.main_group.new_group('Products') + + config_list = new(XCConfigurationList) + root_object.build_configuration_list = config_list + config_list.default_configuration_name = 'Release' + config_list.default_configuration_is_visible = '0' + add_build_configuration('Debug', :debug) + add_build_configuration('Release', :release) + + new_group('Frameworks') + end + + # Initializes the instance with the project stored in the `path` attribute. + # + def initialize_from_file + pbxproj_path = path + 'project.pbxproj' + plist = Plist.read_from_path(pbxproj_path.to_s) + root_object.remove_referrer(self) if root_object + @root_object = new_from_plist(plist['rootObject'], plist['objects'], self) + @archive_version = plist['archiveVersion'] + @object_version = plist['objectVersion'] + @classes = plist['classes'] || {} + @dirty = false + + unless root_object + raise "[Xcodeproj] Unable to find a root object in #{pbxproj_path}." + end + + if archive_version.to_i > Constants::LAST_KNOWN_ARCHIVE_VERSION + raise "[Xcodeproj] Unknown archive version (#{archive_version.to_i})." + end + + if object_version.to_i > Constants::LAST_KNOWN_OBJECT_VERSION + raise "[Xcodeproj] Unknown object version (#{object_version.to_i})." + end + + # Projects can have product_ref_groups that are not listed in the main_groups["Products"] + root_object.product_ref_group ||= root_object.main_group['Products'] || root_object.main_group.new_group('Products') + end + + public + + # @!group Plist serialization + #-------------------------------------------------------------------------# + + # Creates a new object from the given UUID and `objects` hash (of a plist). + # + # The method sets up any relationship of the new object, generating the + # destination object(s) if not already present in the project. + # + # @note This method is used to generate the root object + # from a plist. Subsequent invocation are called by the + # {AbstractObject#configure_with_plist}. Clients of {Xcodeproj} are + # not expected to call this method. + # + # @param [String] uuid + # The UUID of the object that needs to be generated. + # + # @param [Hash {String => Hash}] objects_by_uuid_plist + # The `objects` hash of the plist representation of the project. + # + # @param [Boolean] root_object + # Whether the requested object is the root object and needs to be + # retained by the project before configuration to add it to the + # `objects` hash and avoid infinite loops. + # + # @return [AbstractObject] the new object. + # + # @visibility private. + # + def new_from_plist(uuid, objects_by_uuid_plist, root_object = false) + attributes = objects_by_uuid_plist[uuid] + if attributes + klass = Object.const_get(attributes['isa']) + object = klass.new(self, uuid) + objects_by_uuid[uuid] = object + object.add_referrer(self) if root_object + object.configure_with_plist(objects_by_uuid_plist) + object + end + end + + # @return [Hash] The hash representation of the project. + # + def to_hash + plist = {} + objects_dictionary = {} + objects.each { |obj| objects_dictionary[obj.uuid] = obj.to_hash } + plist['objects'] = objects_dictionary + plist['archiveVersion'] = archive_version.to_s + plist['objectVersion'] = object_version.to_s + plist['classes'] = classes + plist['rootObject'] = root_object.uuid + plist + end + + def to_ascii_plist + plist = {} + objects_dictionary = {} + objects + .sort_by { |o| [o.isa, o.uuid] } + .each do |obj| + key = Nanaimo::String.new(obj.uuid, obj.ascii_plist_annotation) + value = obj.to_ascii_plist.tap { |a| a.annotation = nil } + objects_dictionary[key] = value + end + plist['archiveVersion'] = archive_version.to_s + plist['classes'] = classes + plist['objectVersion'] = object_version.to_s + plist['objects'] = objects_dictionary + plist['rootObject'] = Nanaimo::String.new(root_object.uuid, root_object.ascii_plist_annotation) + Nanaimo::Plist.new.tap { |p| p.root_object = plist } + end + + # Converts the objects tree to a hash substituting the hash + # of the referenced to their UUID reference. As a consequence the hash of + # an object might appear multiple times and the information about their + # uniqueness is lost. + # + # This method is designed to work in conjunction with {Hash#recursive_diff} + # to provide a complete, yet readable, diff of two projects *not* affected + # by differences in UUIDs. + # + # @return [Hash] a hash representation of the project different from the + # plist one. + # + def to_tree_hash + hash = {} + objects_dictionary = {} + hash['objects'] = objects_dictionary + hash['archiveVersion'] = archive_version.to_s + hash['objectVersion'] = object_version.to_s + hash['classes'] = classes + hash['rootObject'] = root_object.to_tree_hash + hash + end + + # @return [Hash{String => Hash}] A hash suitable to display the project + # to the user. + # + def pretty_print + build_configurations = root_object.build_configuration_list.build_configurations + { + 'File References' => root_object.main_group.pretty_print.values.first, + 'Targets' => root_object.targets.map(&:pretty_print), + 'Build Configurations' => build_configurations.sort_by(&:name).map(&:pretty_print), + } + end + + # Serializes the project in the xcodeproj format using the path provided + # during initialization or the given path (`xcodeproj` file). If a path is + # provided file references depending on the root of the project are not + # updated automatically, thus clients are responsible to perform any needed + # modification before saving. + # + # @param [String, Pathname] path + # The optional path where the project should be saved. + # + # @example Saving a project + # project.save + # project.save + # + # @return [void] + # + def save(save_path = nil) + save_path ||= path + @dirty = false if save_path == path + FileUtils.mkdir_p(save_path) + file = File.join(save_path, 'project.pbxproj') + Atomos.atomic_write(file) do |f| + Nanaimo::Writer::PBXProjWriter.new(to_ascii_plist, :pretty => true, :output => f, :strict => false).write + end + end + + # Marks the project as dirty, that is, modified from what is on disk. + # + # @return [void] + # + def mark_dirty! + @dirty = true + end + + # @return [Boolean] Whether this project has been modified since read from + # disk or saved. + # + def dirty? + @dirty == true + end + + # Replaces all the UUIDs in the project with deterministic MD5 checksums. + # + # @note The current sorting of the project is taken into account when + # generating the new UUIDs. + # + # @note This method should only be used for entirely machine-generated + # projects, as true UUIDs are useful for tracking changes in the + # project. + # + # @return [void] + # + def predictabilize_uuids + UUIDGenerator.new([self]).generate! + end + + # Replaces all the UUIDs in the list of provided projects with deterministic MD5 checksums. + # + # @param [Array<Project>] projects + # + # @note The current sorting of the project is taken into account when + # generating the new UUIDs. + # + # @note This method should only be used for entirely machine-generated + # projects, as true UUIDs are useful for tracking changes in the + # project. + # + # @return [void] + # + def self.predictabilize_uuids(projects) + UUIDGenerator.new(projects).generate! + end + + public + + # @!group Creating objects + #-------------------------------------------------------------------------# + + # Creates a new object with a suitable UUID. + # + # The object is only configured with the default values of the `:simple` + # attributes, for this reason it is better to use the convenience methods + # offered by the {AbstractObject} subclasses or by this class. + # + # @param [Class, String] klass + # The concrete subclass of AbstractObject for new object or its + # ISA. + # + # @return [AbstractObject] the new object. + # + def new(klass) + if klass.is_a?(String) + klass = Object.const_get(klass) + end + object = klass.new(self, generate_uuid) + object.initialize_defaults + object + end + + # Generates a UUID unique for the project. + # + # @note UUIDs are not guaranteed to be generated unique because we need + # to trim the ones generated in the xcodeproj extension. + # + # @note Implementation detail: as objects usually are created serially + # this method creates a batch of UUID and stores the not colliding + # ones, so the search for collisions with known UUIDS (a + # performance bottleneck) is performed less often. + # + # @return [String] A UUID unique to the project. + # + def generate_uuid + generate_available_uuid_list while @available_uuids.empty? + @available_uuids.shift + end + + # @return [Array<String>] the list of all the generated UUIDs. + # + # @note Used for checking new UUIDs for duplicates with UUIDs already + # generated but used for objects which are not yet part of the + # `objects` hash but which might be added at a later time. + # + attr_reader :generated_uuids + + # Pre-generates the given number of UUIDs. Useful for optimizing + # performance when the rough number of objects that will be created is + # known in advance. + # + # @param [Integer] count + # the number of UUIDs that should be generated. + # + # @note This method might generated a minor number of uniques UUIDs than + # the given count, because some might be duplicated a thus will be + # discarded. + # + # @return [void] + # + def generate_available_uuid_list(count = 100) + new_uuids = (0..count).map { SecureRandom.hex(12).upcase } + uniques = (new_uuids - (@generated_uuids + uuids)) + @generated_uuids += uniques + @available_uuids += uniques + end + + public + + # @!group Convenience accessors + #-------------------------------------------------------------------------# + + # @return [Array<AbstractObject>] all the objects of the project. + # + def objects + objects_by_uuid.values + end + + # @return [Array<String>] all the UUIDs of the project. + # + def uuids + objects_by_uuid.keys + end + + # @return [Array<AbstractObject>] all the objects of the project with a + # given ISA. + # + def list_by_class(klass) + objects.select { |o| o.class == klass } + end + + # @return [PBXGroup] the main top-level group. + # + def main_group + root_object.main_group + end + + # @return [ObjectList<PBXGroup>] a list of all the groups in the + # project. + # + def groups + main_group.groups + end + + # Returns a group at the given subpath relative to the main group. + # + # @example + # frameworks = project['Frameworks'] + # frameworks.name #=> 'Frameworks' + # main_group.children.include? frameworks #=> True + # + # @param [String] group_path @see MobileCoreServices + # + # @return [PBXGroup] the group at the given subpath. + # + def [](group_path) + main_group[group_path] + end + + # @return [ObjectList<PBXFileReference>] a list of all the files in the + # project. + # + def files + objects.grep(PBXFileReference) + end + + # Returns the file reference for the given absolute path. + # + # @param [#to_s] absolute_path + # The absolute path of the file whose reference is needed. + # + # @return [PBXFileReference] The file reference. + # @return [Nil] If no file reference could be found. + # + def reference_for_path(absolute_path) + absolute_pathname = Pathname.new(absolute_path) + + unless absolute_pathname.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_path}" + end + + objects.find do |child| + child.isa == 'PBXFileReference' && child.real_path == absolute_pathname + end + end + + # @return [ObjectList<AbstractTarget>] A list of all the targets in the + # project. + # + def targets + root_object.targets + end + + # @return [ObjectList<PBXNativeTarget>] A list of all the targets in the + # project excluding aggregate targets. + # + def native_targets + root_object.targets.grep(PBXNativeTarget) + end + + # Checks the native target for any targets in the project + # that are dependent on the native target and would be + # embedded in it at build time + # + # @param [PBXNativeTarget] native target to check for + # embedded targets + # + # + # @return [Array<PBXNativeTarget>] A list of all targets that + # are embedded in the passed in target + # + def embedded_targets_in_native_target(native_target) + native_targets.select do |target| + host_targets_for_embedded_target(target).any? { |host| host.uuid == native_target.uuid } + end + end + + # Returns the native targets, in which the embedded target is + # embedded. This works by traversing the targets to find those + # where the target is a dependency. + # + # @param [PBXNativeTarget] native target that might be embedded + # in another target + # + # @return [Array<PBXNativeTarget>] the native targets that host the + # embedded target + # + def host_targets_for_embedded_target(embedded_target) + native_targets.select do |native_target| + ((embedded_target.uuid != native_target.uuid) && + (native_target.dependencies.map(&:native_target_uuid).include? embedded_target.uuid)) + end + end + + # @return [PBXGroup] The group which holds the product file references. + # + def products_group + root_object.product_ref_group + end + + # @return [ObjectList<PBXFileReference>] A list of the product file + # references. + # + def products + products_group.children + end + + # @return [PBXGroup] the `Frameworks` group creating it if necessary. + # + def frameworks_group + main_group['Frameworks'] || main_group.new_group('Frameworks') + end + + # @return [ObjectList<XCConfigurationList>] The build configuration list of + # the project. + # + def build_configuration_list + root_object.build_configuration_list + end + + # @return [ObjectList<XCBuildConfiguration>] A list of project wide + # build configurations. + # + def build_configurations + root_object.build_configuration_list.build_configurations + end + + # Returns the build settings of the project wide build configuration with + # the given name. + # + # @param [String] name + # The name of a project wide build configuration. + # + # @return [Hash] The build settings. + # + def build_settings(name) + root_object.build_configuration_list.build_settings(name) + end + + public + + # @!group Helpers + #-------------------------------------------------------------------------# + + # Creates a new file reference in the main group. + # + # @param @see PBXGroup#new_file + # + # @return [PBXFileReference] the new file. + # + def new_file(path, source_tree = :group) + main_group.new_file(path, source_tree) + end + + # Creates a new group at the given subpath of the main group. + # + # @param @see PBXGroup#new_group + # + # @return [PBXGroup] the new group. + # + def new_group(name, path = nil, source_tree = :group) + main_group.new_group(name, path, source_tree) + end + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [Symbol] type + # the type of target. Can be `:application`, `:framework`, + # `:dynamic_library` or `:static_library`. + # + # @param [String] name + # the name of the target product. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [PBXNativeTarget] the target. + # + def new_target(type, name, platform, deployment_target = nil, product_group = nil, language = nil, product_basename = nil) + product_group ||= products_group + product_basename ||= name + ProjectHelper.new_target(self, type, name, platform, deployment_target, product_group, language, product_basename) + end + + # Creates a new resource bundles target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings + # + # @param [String] name + # the name of the resources bundle. + # + # @param [Symbol] platform + # the platform of the resources bundle. Can be `:ios` or `:osx`. + # + # @return [PBXNativeTarget] the target. + # + def new_resources_bundle(name, platform, product_group = nil, product_basename = nil) + product_group ||= products_group + product_basename ||= name + ProjectHelper.new_resources_bundle(self, name, platform, product_group, product_basename) + end + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [String] name + # the name of the target. + # + # @param [Array<AbstractTarget>] target_dependencies + # targets, which should be added as dependencies. + # + # @param [Symbol] platform + # the platform of the aggregate target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @return [PBXNativeTarget] the target. + # + def new_aggregate_target(name, target_dependencies = [], platform = nil, deployment_target = nil) + ProjectHelper.new_aggregate_target(self, name, platform, deployment_target).tap do |aggregate_target| + target_dependencies.each do |dep| + aggregate_target.add_dependency(dep) + end + end + end + + # Adds a new build configuration to the project and populates its with + # default settings according to the provided type. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] The new build configuration. + # + def add_build_configuration(name, type) + build_configuration_list = root_object.build_configuration_list + if build_configuration = build_configuration_list[name] + build_configuration + else + build_configuration = new(XCBuildConfiguration) + build_configuration.name = name + common_settings = Constants::PROJECT_DEFAULT_BUILD_SETTINGS + settings = ProjectHelper.deep_dup(common_settings[:all]) + settings.merge!(ProjectHelper.deep_dup(common_settings[type])) + build_configuration.build_settings = settings + build_configuration_list.build_configurations << build_configuration + build_configuration + end + end + + # Sorts the project. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or `:below`. + # + # @return [void] + # + def sort(options = nil) + root_object.sort_recursively(options) + end + + public + + # @!group Schemes + #-------------------------------------------------------------------------# + + # Get list of shared schemes in project + # + # @param [String] path + # project path + # + # @return [Array] + # + def self.schemes(project_path) + schemes = Dir[File.join(project_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].map do |scheme| + File.basename(scheme, '.xcscheme') + end + schemes << File.basename(project_path, '.xcodeproj') if schemes.empty? + schemes + end + + # Recreates the user schemes of the project from scratch (removes the + # folder) and optionally hides them. + # + # @param [Bool] visible + # Whether the schemes should be visible or hidden. + # + # @return [void] + # + def recreate_user_schemes(visible = true) + schemes_dir = XCScheme.user_data_dir(path) + FileUtils.rm_rf(schemes_dir) + FileUtils.mkdir_p(schemes_dir) + + xcschememanagement = {} + xcschememanagement['SchemeUserState'] = {} + xcschememanagement['SuppressBuildableAutocreation'] = {} + + targets.each do |target| + scheme = XCScheme.new + + test_target = target if target.respond_to?(:test_target_type?) && target.test_target_type? + launch_target = target.respond_to?(:launchable_target_type?) && target.launchable_target_type? + scheme.configure_with_targets(target, test_target, :launch_target => launch_target) + + yield scheme, target if block_given? + scheme.save_as(path, target.name, false) + xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"] = {} + xcschememanagement['SchemeUserState']["#{target.name}.xcscheme"]['isShown'] = visible + end + + xcschememanagement_path = schemes_dir + 'xcschememanagement.plist' + Plist.write_to_path(xcschememanagement, xcschememanagement_path) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/case_converter.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/case_converter.rb new file mode 100644 index 0000000..1f2fe1d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/case_converter.rb @@ -0,0 +1,90 @@ +module Xcodeproj + class Project + module Object + # Converts between camel case names used in the xcodeproj plist files + # and the ruby symbols used to represent them. + # + module CaseConverter + # @return [String] The plist equivalent of the given Ruby name. + # + # @param [Symbol, String] name + # The name to convert + # + # @param [Symbol, Nil] type + # The type of conversion. Pass `nil` for normal camel case and + # `:lower` for camel case starting with a lower case letter. + # + # @example + # CaseConverter.convert_to_plist(:project_ref) #=> ProjectRef + # + def self.convert_to_plist(name, type = nil) + case name + when :remote_global_id_string + 'remoteGlobalIDString' + else + if type == :lower + cache = plist_cache[:lower] ||= {} + cache[name] ||= camelize(name, :uppercase_first_letter => false) + else + cache = plist_cache[:normal] ||= {} + cache[name] ||= camelize(name) + end + end + end + + # The following two methods are taken from activesupport, + # https://github.com/rails/rails/blob/v5.0.2/activesupport/lib/active_support/inflector/methods.rb + # all credit for them goes to the original authors. + # + # The code is used under the MIT license. + + def self.camelize(term, uppercase_first_letter: true) + string = term.to_s + string = if uppercase_first_letter + string.sub(/^[a-z\d]*/, &:capitalize) + else + string.sub(/^(?:(?=a)b(?=\b|[A-Z_])|\w)/, &:downcase) + end + string.gsub!(%r{(?:_|(/))([a-z\d]*)}i) { "#{Regexp.last_match(1)}#{Regexp.last_match(2).capitalize}" } + string.gsub!('/'.freeze, '::'.freeze) + string + end + private_class_method :camelize + + def self.underscore(camel_cased_word) + return camel_cased_word unless camel_cased_word =~ /[A-Z-]|::/ + word = camel_cased_word.to_s.gsub('::'.freeze, '/'.freeze) + word.gsub!(/(?:(?<=([A-Za-z\d]))|\b)((?=a)b)(?=\b|[^a-z])/) { "#{Regexp.last_match(1) && '_'.freeze}#{Regexp.last_match(2).downcase}" } + word.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2'.freeze) + word.gsub!(/([a-z\d])([A-Z])/, '\1_\2'.freeze) + word.tr!('-'.freeze, '_'.freeze) + word.downcase! + word + end + private_class_method :underscore + + # @return [Symbol] The Ruby equivalent of the given plist name. + # + # @param [String] name + # The name to convert + # + # @example + # CaseConverter.convert_to_ruby('ProjectRef') #=> :project_ref + # + def self.convert_to_ruby(name) + underscore(name.to_s).to_sym + end + + # @return [Hash] A cache for the conversion to the Plist format. + # + # @note A cache is used because this operation is performed for each + # attribute of the project when it is saved and caching it has + # an important performance benefit. + # + def self.plist_cache + @plist_cache ||= {} + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object.rb new file mode 100644 index 0000000..9231ef8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object.rb @@ -0,0 +1,534 @@ +module Xcodeproj + class Project + # This is the namespace in which all the classes that wrap the objects in + # a Xcode project reside. + # + # The base class from which all classes inherit is AbstractObject. + # + # If you need to deal with these classes directly, it's possible to include + # this namespace into yours, making it unnecessary to prefix them with + # Xcodeproj::Project::Object. + # + # @example + # class SourceFileSorter + # include Xcodeproj::Project::Object + # end + # + module Object + # @abstract + # + # This is the base class of all object types that can exist in a Xcode + # project. As such it provides common behavior, but you can only use + # instances of subclasses of AbstractObject, because this class does + # not exist in actual Xcode projects. + # + # Almost all the methods implemented by this class are not expected to be + # used by {Xcodeproj} clients. + # + # Subclasses should clearly identify which methods reflect the xcodeproj + # document model and which methods are offered as convenience. Object + # lists always represent a relationship to many of the model while simple + # arrays represent dynamically generated values offered a convenience for + # clients. + # + class AbstractObject + # @!group AbstractObject + + # @return [String] the ISA of the class. + # + def self.isa + @isa ||= name.split('::').last + end + + # @return [String] the object's class name. + # + attr_reader :isa + + # It is not recommended to instantiate objects through this + # constructor. To create objects manually is easier to use + # the {Project#new}. Otherwise, it is possible to use the convenience + # methods offered by {Xcodeproj} which take care of configuring the + # objects for common usage cases. + # + # @param [Project] project + # the project that will host the object. + # + # @param [String] uuid + # the UUID of the new object. + # + # @visibility private + # + def initialize(project, uuid) + @project = project + @uuid = uuid + @isa = self.class.isa + @referrers = [] + unless @isa =~ /^(PBX|XC)/ + raise "[Xcodeproj] Attempt to initialize an abstract class (#{self.class})." + end + end + + # Initializes the object with the default values of simple attributes. + # + # This method is called by the {Project#new} and is not performed on + # initialization to prevent adding defaults to objects generated by a + # plist. + # + # @return [void] + # + # @visibility private + # + def initialize_defaults + simple_attributes.each { |a| a.set_default(self) } + end + + # @return [String] the object universally unique identifier. + # + attr_reader :uuid + + # @return [Project] the project that owns the object. + # + attr_reader :project + + # Removes the object from the project by asking to its referrers to + # remove the reference to it. + # + # @note The root object is owned by the project and should not be + # manipulated with this method. + # + # @return [void] + # + def remove_from_project + mark_project_as_dirty! + project.objects_by_uuid.delete(uuid) + + referrers.dup.each do |referrer| + referrer.remove_reference(self) + end + + to_one_attributes.each do |attrb| + object = attrb.get_value(self) + object.remove_referrer(self) if object + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.clear + end + + unless referrers.count == 0 + raise "[Xcodeproj] BUG: #{self} should have no referrers instead" \ + "the following objects are still referencing it #{referrers}" + end + end + + # Returns the value of the name attribute or returns a generic name for + # the object. + # + # @note Not all concrete classes implement the name attribute and this + # method prevents from overriding it in plist. + # + # @return [String] a name for the object. + # + def display_name + declared_name = name if self.respond_to?(:name) + if declared_name && !declared_name.empty? + declared_name + else + isa.gsub(/^(PBX|XC)/, '') + end + end + alias_method :to_s, :display_name + + # Sorts the to many attributes of the object according to the display + # name. + # + def sort(_options = nil) + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.sort! do |x, y| + x.display_name.downcase <=> y.display_name.downcase + end + end + end + + # Sorts the object and the objects that it references. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or + # `:below`. + # + # @note Some objects may in turn refer back to objects higher in the + # object tree, which will lead to stack level deep errors. + # These objects should **not** try to perform a recursive sort, + # also because these objects would get sorted through other + # paths in the tree anyways. + # + # At the time of writing the only known case is + # `PBXTargetDependency`. + # + def sort_recursively(options = nil) + to_one_attributes.each do |attrb| + value = attrb.get_value(self) + value.sort_recursively(options) if value + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.each { |entry| entry.sort_recursively(options) } + end + + sort(options) + end + + # @!group Reference counting + + # @return [Array<ObjectList>] The list of the objects that have a + # reference to this object. + # + # @visibility private + # + attr_reader :referrers + + # Informs the object that another object is referencing it. If the + # object had no previous references it is added to the project UUIDs + # hash. + # + # @return [void] + # + # @visibility private + # + def add_referrer(referrer) + @referrers << referrer + @project.objects_by_uuid[uuid] = self + end + + # Informs the object that another object stopped referencing it. If the + # object has no other references it is removed from the project UUIDs + # hash because it is unreachable. + # + # @return [void] + # + # @visibility private + # + def remove_referrer(referrer) + @referrers.delete(referrer) + if @referrers.count == 0 + mark_project_as_dirty! + @project.objects_by_uuid.delete(uuid) + end + end + + # Removes all the references to a given object. + # + # @return [void] + # + # @visibility private + # + def remove_reference(object) + to_one_attributes.each do |attrb| + value = attrb.get_value(self) + attrb.set_value(self, nil) if value.equal?(object) + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + list.delete(object) + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + list.each { |dictionary| dictionary.remove_reference(object) } + end + end + + # Marks the project that this object belongs to as having been modified. + # + # @return [void] + # + # @visibility private + # + def mark_project_as_dirty! + project.mark_dirty! + end + + #---------------------------------------------------------------------# + + public + + # @!group Plist related methods + + # Configures the object with the objects hash from a plist. + # + # **Implementation detail**: it is important that the attributes for a + # given concrete class are unique because the value is removed from the + # array at each iteration and duplicate would result in nil values. + # + # @return [void] + # + # @visibility private + # + def configure_with_plist(objects_by_uuid_plist) + object_plist = objects_by_uuid_plist[uuid].dup + + unless object_plist['isa'] == isa + raise "[Xcodeproj] Attempt to initialize `#{isa}` from plist with " \ + "different isa `#{object_plist}`" + end + object_plist.delete('isa') + + simple_attributes.each do |attrb| + attrb.set_value(self, object_plist[attrb.plist_name]) + object_plist.delete(attrb.plist_name) + end + + to_one_attributes.each do |attrb| + ref_uuid = object_plist[attrb.plist_name] + if ref_uuid + ref = object_with_uuid(ref_uuid, objects_by_uuid_plist, attrb) + attrb.set_value(self, ref) if ref + end + object_plist.delete(attrb.plist_name) + end + + to_many_attributes.each do |attrb| + ref_uuids = object_plist[attrb.plist_name] || [] + list = attrb.get_value(self) + ref_uuids.each do |uuid| + ref = object_with_uuid(uuid, objects_by_uuid_plist, attrb) + list << ref if ref + end + object_plist.delete(attrb.plist_name) + end + + references_by_keys_attributes.each do |attrb| + hashes = object_plist[attrb.plist_name] || {} + list = attrb.get_value(self) + hashes.each do |hash| + dictionary = ObjectDictionary.new(attrb, self) + hash.each do |key, uuid| + ref = object_with_uuid(uuid, objects_by_uuid_plist, attrb) + dictionary[key] = ref if ref + end + list << dictionary + end + object_plist.delete(attrb.plist_name) + end + + unless object_plist.empty? + UI.warn "[!] Xcodeproj doesn't know about the following " \ + "attributes #{object_plist.inspect} for the '#{isa}' isa." \ + "\nIf this attribute was generated by Xcode please file " \ + 'an issue: https://github.com/CocoaPods/Xcodeproj/issues/new' + end + end + + # Initializes and returns the object with the given UUID. + # + # @param [String] uuid + # The UUID of the object that should be initialized. + # + # @param [Hash{String=>String}] objects_by_uuid_plist + # The hash contained by `objects` key of the plist containing + # the information about the object that should be initialized. + # + # @param [AbstractObjectAttribute] attribute + # The attribute that requested the object. It is used only for + # exceptions. + # + # @raise If the hash for the given UUID contains an unknown ISA. + # + # @return [AbstractObject] the initialized object. + # @return [Nil] if the UUID could not be found in the objects hash. In + # this case a warning is printed to STDERR. + # + # @visibility private + # + def object_with_uuid(uuid, objects_by_uuid_plist, attribute) + unless object = project.objects_by_uuid[uuid] || project.new_from_plist(uuid, objects_by_uuid_plist) + UI.warn "`#{inspect}` attempted to initialize an object with " \ + "an unknown UUID. `#{uuid}` for attribute: " \ + "`#{attribute.name}`. This can be the result of a merge and " \ + 'the unknown UUID is being discarded.' + end + object + rescue NameError + attributes = objects_by_uuid_plist[uuid] + raise "`#{isa}` attempted to initialize an object with unknown ISA "\ + "`#{attributes['isa']}` from attributes: `#{attributes}`\n" \ + 'If this ISA was generated by Xcode please file an issue: ' \ + 'https://github.com/CocoaPods/Xcodeproj/issues/new' + end + + # Returns a cascade representation of the object with UUIDs. + # + # @return [Hash] a hash representation of the project. + # + # @visibility public + # + # @note the key for simple and to_one attributes usually appears only + # if there is a value. To-many keys always appear with an empty + # array. + # + def to_hash + to_hash_as + end + + def to_hash_as(method = :to_hash) + plist = {} + plist['isa'] = isa + + simple_attributes.each do |attrb| + value = attrb.get_value(self) + plist[attrb.plist_name] = value if value + end + + to_one_attributes.each do |attrb| + obj = attrb.get_value(self) + plist[attrb.plist_name] = nested_object_for_hash(obj, method) if obj + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + plist[attrb.plist_name] = list.map { |o| nested_object_for_hash(o, method) } + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + plist[attrb.plist_name] = list.map(&method) + end + + plist + end + private :to_hash_as + + def nested_object_for_hash(object, method) + case method + when :to_ascii_plist + Nanaimo::String.new(object.uuid, object.ascii_plist_annotation) + else + object.uuid + end + end + + def ascii_plist_annotation + " #{display_name} " + end + + def to_ascii_plist + Nanaimo::Dictionary.new(to_hash_as(:to_ascii_plist), ascii_plist_annotation) + end + + # Returns a cascade representation of the object without UUIDs. + # + # This method is designed to work in conjunction with + # {Hash#recursive_diff} to provide a complete, yet readable, diff of + # two projects *not* affected by ISA differences. + # + # @todo The current implementation might lead to infinite loops. + # + # @return [Hash] a hash representation of the project different from + # the plist one. + # + # @visibility private + # + def to_tree_hash + hash = {} + hash['displayName'] = display_name + hash['isa'] = isa + + simple_attributes.each do |attrb| + value = attrb.get_value(self) + hash[attrb.plist_name] = value if value + end + + to_one_attributes.each do |attrb| + obj = attrb.get_value(self) + hash[attrb.plist_name] = obj.to_tree_hash if obj + end + + to_many_attributes.each do |attrb| + list = attrb.get_value(self) + hash[attrb.plist_name] = list.map(&:to_tree_hash) + end + + references_by_keys_attributes.each do |attrb| + list = attrb.get_value(self) + hash[attrb.plist_name] = list.map(&:to_tree_hash) + end + + hash + end + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + if to_many_attributes.count == 1 + children = to_many_attributes.first.get_value(self) + { display_name => children.map(&:pretty_print) } + else + display_name + end + end + + #---------------------------------------------------------------------# + + public + + # @!group Object methods + + def ==(other) + other.is_a?(AbstractObject) && to_hash == other.to_hash + end + + def <=>(other) + uuid <=> other.uuid + end + + def inspect + optional = '' + optional << " name=`#{name}`" if respond_to?(:name) && name + optional << " path=`#{path}`" if respond_to?(:path) && path + "<#{isa}#{optional} UUID=`#{uuid}`>" + end + end + end + end +end + +require 'xcodeproj/project/case_converter' +require 'xcodeproj/project/object_attributes' +require 'xcodeproj/project/object_dictionary' +require 'xcodeproj/project/object_list' + +# Required because some classes have cyclical references to each other. +# +# @todo I'm sure that there is a method to achieve the same result which +# doesn't present the risk of some rubist laughing at me :-) +# +Xcodeproj::Constants::KNOWN_ISAS.each do |superclass_name, isas| + superklass = Xcodeproj::Project::Object.const_get(superclass_name) + isas.each do |isa| + c = Class.new(superklass) + Xcodeproj::Project::Object.const_set(isa, c) + end +end + +# Now load the concrete subclasses. +require 'xcodeproj/project/object/swift_package_remote_reference' +require 'xcodeproj/project/object/swift_package_product_dependency' +require 'xcodeproj/project/object/build_configuration' +require 'xcodeproj/project/object/build_file' +require 'xcodeproj/project/object/build_phase' +require 'xcodeproj/project/object/build_rule' +require 'xcodeproj/project/object/configuration_list' +require 'xcodeproj/project/object/container_item_proxy' +require 'xcodeproj/project/object/file_reference' +require 'xcodeproj/project/object/group' +require 'xcodeproj/project/object/native_target' +require 'xcodeproj/project/object/root_object' +require 'xcodeproj/project/object/target_dependency' +require 'xcodeproj/project/object/reference_proxy' diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_configuration.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_configuration.rb new file mode 100644 index 0000000..664144f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_configuration.rb @@ -0,0 +1,255 @@ +module Xcodeproj + class Project + module Object + # Encapsulates the information a specific build configuration referenced + # by a {XCConfigurationList} which in turn might be referenced by a + # {PBXProject} or a {PBXNativeTarget}. + # + class XCBuildConfiguration < AbstractObject + MUTUAL_RECURSION_SENTINEL = 'xcodeproj.mutual_recursion_sentinel'.freeze + + private_constant :MUTUAL_RECURSION_SENTINEL + + # @!group Attributes + + # @return [String] the name of the Target. + # + attribute :name, String + + # @return [Hash] the build settings to use for building the target. + # + attribute :build_settings, Hash, {} + + # @return [PBXFileReference] an optional file reference to a + # configuration file (`.xcconfig`). + # + has_one :base_configuration_reference, PBXFileReference + + public + + # @!group AbstractObject Hooks + #---------------------------------------------------------------------# + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + data = {} + data['Build Settings'] = sorted_build_settings + if base_configuration_reference + data['Base Configuration'] = base_configuration_reference.pretty_print + end + { name => data } + end + + def to_hash_as(method = :to_hash) + super.tap do |hash| + normalize_array_settings(hash['buildSettings']) + end + end + + # Sorts the build settings. Valid only in Ruby > 1.9.2 because in + # previous versions the hash are not sorted. + # + # @return [void] + # + def sort(_options = nil) + self.build_settings = sorted_build_settings + end + + # @return [Boolean] Whether this configuration is configured for + # debugging. + # + def debug? + gcc_preprocessor_definitions = resolve_build_setting('GCC_PREPROCESSOR_DEFINITIONS') + gcc_preprocessor_definitions && gcc_preprocessor_definitions.include?('DEBUG=1') + end + + # @return [Symbol] The symbolic type of this configuration, either + # `:debug` or `:release`. + # + def type + debug? ? :debug : :release + end + + # @!group Helpers + #---------------------------------------------------------------------# + + # Gets the value for the given build setting considering any configuration + # file present and resolving inheritance between them. It also takes in + # consideration environment variables. + # + # @param [String] key + # the key of the build setting. + # + # @param [PBXNativeTarget] root_target + # use this to resolve complete recursion between project and targets. + # + # @param [String] previous_key + # use this to resolve complete recursion between different build settings. + # + # @return [String] The value of the build setting + # + def resolve_build_setting(key, root_target = nil, previous_key = nil) + setting = build_settings[key] + setting = resolve_variable_substitution(key, setting, root_target, previous_key) + + config_setting = config[key] + config_setting = resolve_variable_substitution(key, config_setting, root_target, previous_key) + + project_setting = project.build_configuration_list[name] + project_setting = nil if equal?(project_setting) + project_setting &&= project_setting.resolve_build_setting(key, root_target) + + defaults = { + 'CONFIGURATION' => name, + 'SRCROOT' => project.project_dir.to_s, + } + + # if previous_key is nil, it means that we're back at the first call, so we can replace our sentinel string + # used to prevent recursion with nil + if previous_key.nil? && setting == MUTUAL_RECURSION_SENTINEL + setting = nil + end + + [defaults[key], project_setting, config_setting, setting, ENV[key]].compact.reduce(nil) do |inherited, value| + expand_build_setting(value, inherited) + end + end + + #---------------------------------------------------------------------# + + private + + VARIABLE_NAME_PATTERN = + '( # capture block + [_a-zA-Z0-9]+? # non-greedy lookup for everything contained in this list + )'.freeze + private_constant :VARIABLE_NAME_PATTERN + + CAPTURE_VARIABLE_IN_BUILD_CONFIG = / + \$ # matches dollar sign literally + (?: # non-capturing group + [{] # matches a single character on this list + #{VARIABLE_NAME_PATTERN} + [}] # matches a single character on this list + | # or + [(] # matches a single character on this list + #{VARIABLE_NAME_PATTERN} + [)] # matches a single character on this list + ) + /x + private_constant :CAPTURE_VARIABLE_IN_BUILD_CONFIG + + def expand_build_setting(build_setting_value, config_value) + if build_setting_value.is_a?(Array) && config_value.is_a?(String) + config_value = split_build_setting_array_to_string(config_value) + elsif build_setting_value.is_a?(String) && config_value.is_a?(Array) + build_setting_value = split_build_setting_array_to_string(build_setting_value) + end + + default = build_setting_value.is_a?(String) ? '' : [] + inherited = config_value || default + + return build_setting_value.gsub(Regexp.union(Constants::INHERITED_KEYWORDS), inherited) if build_setting_value.is_a? String + build_setting_value.flat_map { |value| Constants::INHERITED_KEYWORDS.include?(value) ? inherited : value } + end + + def resolve_variable_substitution(key, value, root_target, previous_key = nil) + case value + when Array + return value.map { |v| resolve_variable_substitution(key, v, root_target) } + when nil + return + when String + # we know how to resolve strings! + nil + else + raise ArgumentError, "Settings values can only be nil, string, or array, got #{value.inspect} for #{key}" + end + + unless variable_match_data = value.match(CAPTURE_VARIABLE_IN_BUILD_CONFIG) + # no variables left, return the value unchanged + return value + end + variable_reference, variable = *variable_match_data.values_at(0, 1, 2).compact + + case variable + when 'inherited' + # this is handled separately, after resolving all other variable references + value + when key + # to prevent infinite recursion + nil + when previous_key + # to prevent infinite recursion; we don't return nil as for the self recursion because it needs to be + # distinguished outside this method too + MUTUAL_RECURSION_SENTINEL + else + configuration_to_resolve_against = root_target ? root_target.build_configuration_list[name] : self + resolved_value_for_variable = configuration_to_resolve_against.resolve_build_setting(variable, root_target, key) || '' + + # we use this sentinel string instead of nil, because, otherwise, it would be swallowed by the default empty + # string from the preceding line, and we want to distinguish between mutual recursion and other cases + if resolved_value_for_variable == MUTUAL_RECURSION_SENTINEL + return MUTUAL_RECURSION_SENTINEL + end + + value = value.gsub(variable_reference, resolved_value_for_variable) + resolve_variable_substitution(key, value, root_target) + end + end + + def sorted_build_settings + sorted = {} + build_settings.keys.sort.each do |key| + sorted[key] = build_settings[key] + end + sorted + end + + def normalize_array_settings(settings) + return unless settings + + array_settings = BuildSettingsArraySettingsByObjectVersion[project.object_version] + + settings.keys.each do |key| + next unless value = settings[key] + stripped_key = key.sub(/\[[^\]]+\]$/, '') + case value + when String + next unless array_settings.include?(stripped_key) + array_value = split_build_setting_array_to_string(value) + next unless array_value.size > 1 + settings[key] = array_value + when Array + next if value.size > 1 && array_settings.include?(stripped_key) + settings[key] = value.join(' ') + end + end + end + + def split_build_setting_array_to_string(string) + regexp = / *((['"]?).*?[^\\]\2)(?=( |\z))/ + string.scan(regexp).map(&:first) + end + + def config + return {} unless base_configuration_reference + @config ||= + if base_configuration_reference.real_path.exist? + Xcodeproj::Config.new(base_configuration_reference.real_path).to_hash.tap do |hash| + normalize_array_settings(hash) + end + else + {} + end + end + + #---------------------------------------------------------------------# + end + end + end +end + +require 'xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version' diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_file.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_file.rb new file mode 100644 index 0000000..2e6a9fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_file.rb @@ -0,0 +1,84 @@ +module Xcodeproj + class Project + module Object + # Contains the information about the build settings of a file used by an + # {AbstractBuildPhase}. + # + class PBXBuildFile < AbstractObject + # @!group Attributes + + # @return [Hash] the list of build settings for this file. + # + # @note The contents of this array depend on the phase of the build + # file. + # + # For PBXHeadersBuildPhase is `{ "ATTRIBUTES" => [:value] }` + # where `:value` can be `Public`, `Private`, or nil + # (Protected). + # + attribute :settings, Hash + + # @return [PBXFileReference] the file to build. + # + # @todo I think that is possible to add any kind of group (for + # example folders linked to a path). + # + has_one :file_ref, [ + PBXFileReference, + PBXGroup, + PBXVariantGroup, + XCVersionGroup, + PBXReferenceProxy, + ] + + # @return [XCSwiftPackageProductDependency] the Swift Package file to build. + # + has_one :product_ref, XCSwiftPackageProductDependency + + # @return [String] the platform filter for this build file. + # + attribute :platform_filter, String + + # @return [Array<String>] the platform filters for this build file. + # + attribute :platform_filters, Array + + #---------------------------------------------------------------------# + + public + + # @!group AbstractObject Hooks + + # @return [String] A name suitable for displaying the object to the + # user. + # + def display_name + if product_ref + product_ref.display_name + elsif file_ref + file_ref.display_name + else + super + end + end + + # @return [Hash{String => Hash}, String] A hash suitable to display the + # object to the user. + # + def pretty_print + if settings.nil? || settings.empty? + display_name + else + { display_name => settings } + end + end + + def ascii_plist_annotation + " #{display_name} in #{GroupableHelper.parent(self).display_name} " + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_phase.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_phase.rb new file mode 100644 index 0000000..340dee2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_phase.rb @@ -0,0 +1,369 @@ +module Xcodeproj + class Project + module Object + # @abstract + # + # This class is abstract and it doesn't appear in the project document. + # + class AbstractBuildPhase < AbstractObject + # @!group Attributes + + # @return [ObjectList<PBXBuildFile>] the files processed by this build + # configuration. + # + has_many :files, PBXBuildFile + + # @return [String] some kind of magic number which usually is + # '2147483647' (can be also `8` and `12` in + # PBXCopyFilesBuildPhase, one of the masks is + # run_only_for_deployment_postprocessing). + # + attribute :build_action_mask, String, '2147483647' + + # @return [String] whether or not this should only be processed before + # deployment. Can be either '0', or '1'. + # + # @note This option is exposed in Xcode in the UI of + # PBXCopyFilesBuildPhase as `Copy only when installing` or in + # PBXShellScriptBuildPhase as `Run script only when + # installing`. + # + attribute :run_only_for_deployment_postprocessing, String, '0' + + # @return [String] whether or not this run script will be forced to + # run even on incremental builds. Can be either '1', or + # missing. By default this option is disabled in Xcode. + # + # @note This setting is exposed in Xcode in the UI of + # PBXShellScriptBuildPhase as `Based on + # dependency analysis` (selected by default). + # + attribute :always_out_of_date, String + + # @return [String] Comments associated with this build phase. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + #--------------------------------------# + + public + + # @!group Helpers + + # @return [Array<PBXFileReference>] the list of all the files + # referenced by this build phase. + # + def files_references + files.map(&:file_ref) + end + + # @return [Array<String>] The display name of the build files. + # + def file_display_names + files.map(&:display_name) + end + + # @return [PBXBuildFile] the first build file associated with the given + # file reference if one exists. + # + def build_file(file_ref) + (file_ref.referrers & files).first + end + + # Returns whether a build file for the given file reference exists. + # + # @param [PBXFileReference] file_ref + # + # @return [Bool] whether the reference is already present. + # + def include?(file_ref) + !build_file(file_ref).nil? + end + + # Adds a new build file, initialized with the given file reference, to + # the phase. + # + # @param [PBXFileReference] file_ref + # The file reference that should be added to the build phase. + # + # @return [PBXBuildFile] the build file generated. + # + def add_file_reference(file_ref, avoid_duplicates = false) + if avoid_duplicates && existing = build_file(file_ref) + existing + else + build_file = project.new(PBXBuildFile) + build_file.file_ref = file_ref + files << build_file + build_file + end + end + + # Removes the build file associated with the given file reference from + # the phase. + # + # @param [PBXFileReference] file_ref + # The file to remove + # + # @return [void] + # + def remove_file_reference(file_ref) + build_file = files.find { |bf| bf.file_ref == file_ref } + remove_build_file(build_file) if build_file + end + + # Removes a build file from the phase and clears its relationship to + # the file reference. + # + # @param [PBXBuildFile] build_file the file to remove + # + # @return [void] + # + def remove_build_file(build_file) + build_file.file_ref = nil + build_file.remove_from_project + end + + # Removes all the build files from the phase and clears their + # relationship to the file reference. + # + # @return [void] + # + def clear + files.objects.each do |bf| + remove_build_file(bf) + end + end + alias_method :clear_build_files, :clear + + def display_name + super.gsub(/BuildPhase$/, '') + end + + def ascii_plist_annotation + " #{display_name} " + end + + # Sorts the build files of the phase according to the display + # name or the path. + # + # @param [Hash] _options + # Not used. + # + # @return [void] + # + def sort(_options = nil) + files.sort! do |x, y| + result = File.basename(x.display_name.downcase, '.*') <=> File.basename(y.display_name.downcase, '.*') + if result.zero? + result = File.extname(x.display_name.downcase) <=> File.extname(y.display_name.downcase) + if result.zero? && x.file_ref.respond_to?(:full_path) && y.file_ref.respond_to?(:full_path) + result = x.file_ref.full_path.to_s.downcase <=> y.file_ref.full_path.to_s.downcase + end + end + result + end + end + end + + #-----------------------------------------------------------------------# + + # The phase responsible of copying headers. Known as `Copy Headers` in + # the UI. + # + # @note This phase can appear only once in a target. + # + class PBXHeadersBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The phase responsible of compiling the files. Known as `Compile + # Sources` in the UI. + # + # @note This phase can appear only once in a target. + # + class PBXSourcesBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The phase responsible on linking with frameworks. Known as `Link Binary + # With Libraries` in the UI. + # + # @note This phase can appear only once in a target. + # + class PBXFrameworksBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # The resources build phase apparently is a specialized copy build phase + # for resources. Known as `Copy Bundle Resources` in the UI. It is + # unclear if this is the only one capable of optimizing PNG. + # + # @note This phase can appear only once in a target. + # + class PBXResourcesBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + + # Phase that copies the files to the bundle of the target (aka `Copy + # Files`). + # + # @note This phase can appear multiple times in a target. + # + class PBXCopyFilesBuildPhase < AbstractBuildPhase + # @!group Attributes + + # @return [String] the name of the build phase. + # + attribute :name, String + + # @return [String] the subpath of `dst_subfolder_spec` where this file + # should be copied to. + # + # @note Can accept environment variables like `$(PRODUCT_NAME)`. + # + attribute :dst_path, String, '' + + # @return [String] the path (destination) where the files should be + # copied to. + # + attribute :dst_subfolder_spec, String, Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS[:resources] + + # @return [Hash{String => Hash}] A hash suitable to display the build + # phase to the user. + # + def pretty_print + { + display_name => { + 'Destination Path' => dst_path, + 'Destination Subfolder' => Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS.key(dst_subfolder_spec).to_s, + 'Files' => files.map(&:pretty_print), + }, + } + end + + # Alias method for #dst_subfolder_spec=, which accepts symbol values + # instead of numeric string values. + # + # @param [Symbol] value + # one of `COPY_FILES_BUILD_PHASE_DESTINATIONS.keys` + # + # @raise [StandardError] if value is not a valid known key + # + def symbol_dst_subfolder_spec=(value) + numeric_value = Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS[value] + raise "[Xcodeproj] Value checking error: got `#{value.inspect}` for" \ + ' attribute: dst_subfolder_spec' if numeric_value.nil? + self.dst_subfolder_spec = numeric_value + end + + # Alias method for #dst_subfolder_spec, which returns symbol values + # instead of numeric string values. + # + # @return [Symbol] + # + def symbol_dst_subfolder_spec + key = Constants::COPY_FILES_BUILD_PHASE_DESTINATIONS.find { |_, num| num == dst_subfolder_spec } + key ? key.first : nil + end + end + + #-----------------------------------------------------------------------# + + # A phase responsible of running a shell script (aka `Run Script`). + # + # @note This phase can appear multiple times in a target. + # + class PBXShellScriptBuildPhase < AbstractBuildPhase + # @!group Attributes + + # @return [String] the name of the build phase. + # + attribute :name, String + + # @return [Array<String>] an array of the paths to pass to the script. + # + # @example + # "$(SRCROOT)/myfile" + # + attribute :input_paths, Array, [] + + # @return [Array<String>] an array of input file list paths of the script. + # + # @example + # "$(SRCROOT)/newInputFile.xcfilelist" + # + attribute :input_file_list_paths, Array, [] + + # @return [Array<String>] an array of output paths of the script. + # + # @example + # "$(DERIVED_FILE_DIR)/myfile" + # + attribute :output_paths, Array, [] + + # @return [Array<String>] an array of output file list paths of the script. + # + # @example + # "$(SRCROOT)/newOutputFile.xcfilelist" + # + attribute :output_file_list_paths, Array, [] + + # @return [String] the path to the script interpreter. + # + # @note Defaults to `/bin/sh`. + # + attribute :shell_path, String, '/bin/sh' + + # @return [String] the actual script to perform. + # + # @note Defaults to a comment string. + # + attribute :shell_script, String, "# Type a script or drag a script file from your workspace to insert its path.\n" + + # @return [String] whether or not the ENV variables should be shown in + # the build log. + # + attribute :show_env_vars_in_log, String + + # @return [String] the discovered dependency file to use. + # + attribute :dependency_file, String + + # @return [Hash{String => Hash}] A hash suitable to display the build + # phase to the user. + # + def pretty_print + { + display_name => { + 'Input File List Paths' => input_file_list_paths || [], + 'Input Paths' => input_paths || [], + 'Output File List Paths' => output_file_list_paths || [], + 'Output Paths' => output_paths || [], + 'Shell Path' => shell_path, + 'Shell Script' => shell_script, + }, + } + end + end + + #-----------------------------------------------------------------------# + + # Apparently a build phase named `Build Carbon Resources` (Observed for + # kernel extensions targets). + # + # @note This phase can appear multiple times in a target. + # + class PBXRezBuildPhase < AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_rule.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_rule.rb new file mode 100644 index 0000000..929fbab --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/build_rule.rb @@ -0,0 +1,102 @@ +module Xcodeproj + class Project + module Object + # This class represents a custom build rule of a Target. + # + class PBXBuildRule < AbstractObject + # @!group Attributes + + # @return [String] the name of the rule. + # + attribute :name, String + + # @return [String] a string representing what compiler to use. + # + # @example + # `com.apple.compilers.proxy.script`. + # + attribute :compiler_spec, String + + # @return [String] the type of the files that should be processed by + # this rule. + # + # @example + # `pattern.proxy`. + # + attribute :file_type, String + + # @return [String] the pattern of the files that should be processed by + # this rule. This attribute is an alternative to to + # `file_type`. + # + # @example + # `*.css`. + # + attribute :file_patterns, String + + # @return [String] whether the rule is editable. + # + # @example + # `1`. + # + attribute :is_editable, String, '1' + + # @return [ObjectList<PBXFileReference>] the file references for the + # input files files. + # + attribute :input_files, Array + + # @return [ObjectList<PBXFileReference>] the file references for the + # output files. + # + attribute :output_files, Array + + # @return [ObjectList<String>] the compiler flags used when creating the + # respective output files. + # + attribute :output_files_compiler_flags, Array + + # @return [String] whether the rule should be run once per architecture. + # + # @example + # `0`. + # + attribute :run_once_per_architecture, String + + # @return [String] the content of the script to use for the build rule. + # + # @note This attribute is present if the #{#compiler_spec} is + # `com.apple.compilers.proxy.script` + # + attribute :script, String + + # @!group Helpers + + # Adds an output file with the specified compiler flags. + # + # @param [PBXFileReference] file the file to add. + # + # @param [String] compiler_flags the compiler flags for the file. + # + # @return [Void] + # + def add_output_file(file, compiler_flags = '') + (self.output_files ||= []) << file + (self.output_files_compiler_flags ||= []) << compiler_flags + end + + # @return [Array<[PBXFileReference, String]>] + # An array containing tuples of output files and their compiler + # flags. + # + def output_files_and_flags + (output_files || []).zip(output_files_compiler_flags || []) + end + + def ascii_plist_annotation + " #{isa} " + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/configuration_list.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/configuration_list.rb new file mode 100644 index 0000000..d39834f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/configuration_list.rb @@ -0,0 +1,117 @@ +module Xcodeproj + class Project + module Object + # The primary purpose of this class is to maintain a collection of + # related build configurations of a {PBXProject} or a {PBXNativeTarget}. + # + class XCConfigurationList < AbstractObject + # @!group Attributes + + # @return [String] whether the default configuration is visible. + # Usually `0`. The purpose of this flag and how Xcode displays + # it in the UI is unknown. + # + attribute :default_configuration_is_visible, String, '0' + + # @return [String] the name of the default configuration. + # Usually `Release`. Xcode exposes this attribute as the + # configuration for the command line tools and only allows to + # set it at the project level. + # + attribute :default_configuration_name, String, 'Release' + + # @return [ObjectList<XCBuildConfiguration>] the build + # configurations of the target. + # + has_many :build_configurations, XCBuildConfiguration + + public + + # @!group Helpers + # --------------------------------------------------------------------# + + # Returns the build configuration with the given name. + # + # @param [String] name + # The name of the build configuration. + # + # @return [XCBuildConfiguration] The build configuration. + # @return [Nil] If not build configuration with the given name is found. + # + def [](name) + build_configurations.find { |bc| bc.name == name } + end + + # Returns the build settings of the build configuration with + # the given name. + # + # @param [String] build_configuration_name + # The name of the build configuration. + # + # @return [Hash {String=>String}] the build settings + # + def build_settings(build_configuration_name) + if config = self[build_configuration_name] + config.build_settings + end + end + + # Gets the value for the given build setting in all the build + # configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [Bool] resolve_against_xcconfig + # wether the retrieved setting should take in consideration any + # configuration file present. + # + # @param [PBXNativeTarget] root_target + # use this to resolve complete recursion between project and targets + # + # @return [Hash{String => String}] The value of the build setting + # grouped by the name of the build configuration. + # + def get_setting(key, resolve_against_xcconfig = false, root_target = nil) + result = {} + build_configurations.each do |bc| + result[bc.name] = resolve_against_xcconfig ? bc.resolve_build_setting(key, root_target) : bc.build_settings[key] + end + result + end + + # Sets the given value for the build setting associated with the given + # key across all the build configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [String] value + # the value for the build setting. + # + # @return [void] + # + def set_setting(key, value) + build_configurations.each do |bc| + bc.build_settings[key] = value + end + end + + def target + return project.root_object if project.build_configuration_list.uuid == uuid + project.targets.find { |t| t.build_configuration_list.uuid == uuid } + end + + #---------------------------------------------------------------------# + + def ascii_plist_annotation + if target.nil? + ' Build configuration list for <deleted target> ' + else + " Build configuration list for #{target.isa} \"#{target}\" " + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/container_item_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/container_item_proxy.rb new file mode 100644 index 0000000..30abdd9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/container_item_proxy.rb @@ -0,0 +1,116 @@ +module Xcodeproj + class Project + module Object + # Apparently a proxy for another object which might belong another + # project contained in the same workspace of the project document. + # + # This class is referenced by {PBXTargetDependency} for information about + # it usage see the specs of that class. + # + # @note This class references the other objects by UUID instead of + # creating proper relationships because the other objects might be + # part of another project. This implies that the references to + # other objects should not increase the retain count of the + # targets. + # + # @todo: This class needs some work to support targets across workspaces, + # as the container portal might not be initialized leading + # xcodeproj to raise because ti can't find the UUID. + # + class PBXContainerItemProxy < AbstractObject + # @!group Attributes + + # @return [String] apparently the UUID of the root object + # {PBXProject} of the project containing the represented + # object. + # + # @todo this is an attribute because a it is usually a reference to + # the root object or to a file reference to another project. + # The reference to the root object causes a retain cycle that + # could cause issues (e.g. in to_tree_hash). Usually those + # objects are retained by at least another object (the + # {Project} for the root object and a {PBXGroup} for the + # reference to another project) and so the referenced object + # should be serialized. + # + # If this assumption is incorrect, there could be loss of + # information opening and saving an existing project. + # + # @todo This is the external reference that 'contains' other proxy + # items. + attribute :container_portal, String + + # @return [String] the type of the proxy. + # + # @note @see {Constants::PROXY_TYPE.values} for valid values. + # + attribute :proxy_type, String + + # @return [String] apparently the UUID of the represented + # object. + # + # @note If the object is in another project the UUID would not be + # present in the {Project#objects_by_uuid} hash. For this + # reason this is not an `has_one` attribute. It is assumes that + # if the object belongs to the project at least another object + # should be retaining it. This assumption is reasonable because + # this is a proxy class. + # + # If this assumption is incorrect, there could be loss of + # information opening and saving an existing project. + # + attribute :remote_global_id_string, String + + # @return [String] apparently the name of the object represented by + # the proxy. + # + attribute :remote_info, String + + # Checks whether the reference points to a remote project. + # + # @return [Bool] + # + def remote? + project.root_object.uuid != container_portal + end + + # Get the proxied object + # + # @return [AbstractObject] + # + def proxied_object + container_portal_object.objects_by_uuid[remote_global_id_string] + end + + def container_portal_object + if remote? + container_portal_file_ref = project.objects_by_uuid[container_portal] + Project.open(container_portal_file_ref.real_path) + else + project + end + end + + def container_portal_annotation + if remote? + " #{File.basename(project.objects_by_uuid[container_portal].real_path)} " + else + project.root_object.ascii_plist_annotation + end + end + + def to_hash_as(method = :to_hash) + hash = super + if method == :to_ascii_plist + hash['containerPortal'] = Nanaimo::String.new(container_portal, container_portal_annotation) + end + hash + end + + def ascii_plist_annotation + " #{isa} " + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/file_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/file_reference.rb new file mode 100644 index 0000000..0cf0043 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/file_reference.rb @@ -0,0 +1,337 @@ +require 'xcodeproj/project/object/helpers/groupable_helper' + +module Xcodeproj + class Project + module Object + # This class represents a reference to a file in the file system. + # + class PBXFileReference < AbstractObject + # @!group Attributes + + # @return [String] the name of the reference, often not present. + # + attribute :name, String + + # @return [String] the path to the file relative to the source tree + # + attribute :path, String + + # @return [String] the directory to which the path is relative. + # + # @note The accepted values are: + # - `<absolute>` for absolute paths + # - `<group>` for paths relative to the group + # - `SOURCE_ROOT` for paths relative to the project + # - `DEVELOPER_DIR` for paths relative to the developer + # directory. + # - `BUILT_PRODUCTS_DIR` for paths relative to the build + # products directory. + # - `SDKROOT` for paths relative to the SDK directory. + # + attribute :source_tree, String, 'SOURCE_ROOT' + + # @return [String] the file type (apparently) used for products + # generated by Xcode (i.e. applications, libraries). + # + attribute :explicit_file_type, String + + # @return [String] the file type guessed by Xcode. + # + # @note This attribute is not present if there is an + # `explicit_file_type`. + # + attribute :last_known_file_type, String + + # @return [String] whether this file should be indexed. It can + # be either `0` or `1`. + # + # @note Apparently present only for products generated by Xcode with + # a value of `0`. + # + attribute :include_in_index, String, '1' + + # @return [String] a string containing a number which represents the + # encoding format of the file. + # + attribute :fileEncoding, String + + # @return [String] a string that specifies the UTI for the syntax + # highlighting. + # + # @example + # `xcode.lang.ruby` + # + attribute :xc_language_specification_identifier, String + + # @return [String] a string that specifies the UTI for the structure of + # a plist file. + # + # @example + # `com.apple.xcode.plist.structure-definition.iphone.info-plist` + # + attribute :plist_structure_definition_identifier, String + + # @return [String] Whether Xcode should use tabs for text alignment. + # + # @example + # `1` + # + attribute :uses_tabs, String + + # @return [String] The width of the indent. + # + # @example + # `2` + # + attribute :indent_width, String + + # @return [String] The width of the tabs. + # + # @example + # `2` + # + attribute :tab_width, String + + # @return [String] Whether Xcode should wrap lines. + # + # @example + # `1` + # + attribute :wraps_lines, String + + # @return [String] Apparently whether Xcode should add, if needed, a + # new line feed before saving the file. + # + # @example + # `0` + # + attribute :line_ending, String + + # @return [String] Comments associated with this file. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + #---------------------------------------------------------------------# + + public + + # @!group Helpers + + # @return [String] the name of the file taking into account the path if + # needed. + # + def display_name + if name + name + elsif (class << GroupableHelper; self; end)::SOURCE_TREES_BY_KEY[:built_products] == source_tree + path + elsif path + File.basename(path) + end + end + + # @return [PBXGroup, PBXProject] the parent of the file. + # + def parent + GroupableHelper.parent(self) + end + + # @return [Array<PBXGroup, PBXProject>] The list of the parents of the + # reference. + # + def parents + GroupableHelper.parents(self) + end + + # @return [String] A representation of the reference hierarchy. + # + def hierarchy_path + GroupableHelper.hierarchy_path(self) + end + + # Moves the reference to a new parent. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(new_parent) + GroupableHelper.move(self, new_parent) + end + + # @return [Pathname] the absolute path of the file resolving the + # source tree. + # + def real_path + GroupableHelper.real_path(self) + end + + # @return [Pathname] the path of the file without resolving the + # source tree. + # + def full_path + GroupableHelper.full_path(self) + end + + # Sets the source tree of the reference. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a symbol. + # + # @return [void] + # + def set_source_tree(source_tree) + GroupableHelper.set_source_tree(self, source_tree) + end + + # Allows to set the path according to the source tree of the reference. + # + # @param [#to_s] the path for the reference. + # + # @return [void] + # + def set_path(path) + if path + GroupableHelper.set_path_with_source_tree(self, path, source_tree) + else + self.path = nil + end + end + + # @return [Array<PBXBuildFile>] the build files associated with the + # current file reference. + # + def build_files + referrers.grep(PBXBuildFile) + end + + # Sets the last known file type according to the extension of the path. + # + # @return [void] + # + def set_last_known_file_type(type = nil) + if type + self.last_known_file_type = type + elsif path + extension = Pathname(path).extname[1..-1] + self.last_known_file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + end + end + + # Sets the explicit file type according to the extension of the path, + # and clears the last known file type. + # + # @return [void] + # + def set_explicit_file_type(type = nil) + self.last_known_file_type = nil + if type + self.explicit_file_type = type + elsif path + extension = Pathname(path).extname[1..-1] + self.explicit_file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + end + end + + #---------------------------------------------------------------------# + + # Checks whether the reference is a proxy. + # + # @return [Bool] always false for this ISA. + # + def proxy? + false + end + + # If this file reference represents an external Xcode project reference + # then this will return metadata about it which includes the reference + # to the 'Products' group that's created in this project (the project + # that includes the external project). + # + # @return [ObjectDictionary, nil] The external project metadata for + # this file reference or `nil` if it's not an external project. + # + def project_reference_metadata + project.root_object.project_references.find do |project_reference| + project_reference[:project_ref] == self + end + end + + # If this file reference represents an external Xcode project reference + # then this will return the objects that are 'containers' for items + # contained in the external Xcode project. + # + # @return [Array<PBXContainerItemProxy>] The containers for items in + # the external Xcode project. + # + def proxy_containers + project.objects.select do |object| + object.isa == 'PBXContainerItemProxy' && + object.container_portal == uuid + end + end + + # If this file reference represents an external Xcode project reference + # then this will return proxies for file references contained in the + # external Xcode project. + # + # @return [Array<PBXReferenceProxy>] The file reference proxies for + # items located in the external Xcode project. + # + def file_reference_proxies + containers = proxy_containers + if containers.empty? + [] + else + project.objects.select do |object| + object.isa == 'PBXReferenceProxy' && + containers.include?(object.remote_ref) + end + end + end + + # If this file reference represents an external Xcode project reference + # then this will return dependencies on targets contained in the + # external Xcode project. + # + # @return [Array<PBXTargetDependency>] The dependencies on targets + # located in the external Xcode project. + # + def target_dependency_proxies + containers = proxy_containers + if containers.empty? + [] + else + project.objects.select do |object| + object.isa == 'PBXTargetDependency' && + containers.include?(object.target_proxy) + end + end + end + + # In addition to removing the file reference, this will also remove any + # items related to this reference in case it represents an external + # Xcode project. + # + # @see AbstractObject#remove_from_project + # + # @return [void] + # + def remove_from_project + if project_reference = project_reference_metadata + file_reference_proxies.each(&:remove_from_project) + target_dependency_proxies.each(&:remove_from_project) + project_reference[:product_group].remove_from_project + project.root_object.project_references.delete(project_reference) + end + super + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/group.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/group.rb new file mode 100644 index 0000000..18d16d4 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/group.rb @@ -0,0 +1,487 @@ +require 'xcodeproj/project/object/helpers/groupable_helper' +require 'xcodeproj/project/object/helpers/file_references_factory' + +module Xcodeproj + class Project + module Object + # This class represents a group. A group can contain other groups + # (PBXGroup) and file references (PBXFileReference). + # + class PBXGroup < AbstractObject + # @!group Attributes + + # @return [ObjectList<PBXGroup, PBXFileReference>] + # the objects contained by the group. + # + has_many :children, [PBXGroup, PBXFileReference, PBXReferenceProxy] + + # @return [String] the directory to which the path is relative. + # + # @note The accepted values are: + # - `<absolute>` for absolute paths + # - `<group>` for paths relative to the group + # - `SOURCE_ROOT` for paths relative to the project + # - `DEVELOPER_DIR` for paths relative to the developer + # directory. + # - `BUILT_PRODUCTS_DIR` for paths relative to the build + # products directory. + # - `SDKROOT` for paths relative to the SDK directory. + # + attribute :source_tree, String, '<group>' + + # @return [String] the path to a folder in the file system. + # + # @note This attribute is present for groups that are linked to a + # folder in the file system. + # + attribute :path, String + + # @return [String] the name of the group. + # + # @note If path is specified this attribute is not present. + # + attribute :name, String + + # @return [String] Whether Xcode should use tabs for text alignment. + # + # @example + # `1` + # + attribute :uses_tabs, String + + # @return [String] The width of the indent. + # + # @example + # `2` + # + attribute :indent_width, String + + # @return [String] The width of the tabs. + # + # @example + # `2` + # + attribute :tab_width, String + + # @return [String] Whether Xcode should wrap lines. + # + # @example + # `1` + # + attribute :wraps_lines, String + + # @return [String] Comments associated with this group. + # + # @note This is apparently no longer used by Xcode. + # + attribute :comments, String + + public + + # @!group Helpers + #---------------------------------------------------------------------# + + # @return [PBXGroup, PBXProject] The parent of the group. + # + def parent + GroupableHelper.parent(self) + end + + # @return [Array<PBXGroup, PBXProject>] The list of the parents of the + # group. + # + def parents + GroupableHelper.parents(self) + end + + # @return [String] A representation of the group hierarchy. + # + def hierarchy_path + GroupableHelper.hierarchy_path(self) + end + + # Moves the group to a new parent. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(new_parent) + GroupableHelper.move(self, new_parent) + end + + # @return [Pathname] the absolute path of the group resolving the + # source tree. + # + def real_path + GroupableHelper.real_path(self) + end + + # Sets the source tree of the group. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a symbol. + # + # @return [void] + # + def set_source_tree(source_tree) + GroupableHelper.set_source_tree(self, source_tree) + end + + # Allows to set the path according to the source tree of the group. + # + # @param [#to_s] the path for the group. + # + # @return [void] + # + def set_path(path) + if path + source_tree + GroupableHelper.set_path_with_source_tree(self, path, source_tree) + else + self.path = nil + self.source_tree = '<group>' + end + end + + # @return [Array<PBXFileReference>] the file references in the group + # children. + # + def files + children.grep(PBXFileReference) + end + + # @return [PBXFileReference] The file references whose path (regardless + # of the source tree) matches the give path. + # + def find_file_by_path(path) + files.find { |ref| ref.path == path } + end + + # @return [Array<PBXGroup>] the groups in the group children. + # + def groups + # Don't grep / is_a? as this would include child classes. + children.select { |obj| obj.class == PBXGroup } + end + + # @return [Array<XCVersionGroup>] the version groups in the group + # children. + # + def version_groups + children.grep(XCVersionGroup) + end + + # @return [Array<PBXGroup,PBXFileReference,PBXReferenceProxy>] the + # recursive children of the group. + # + def recursive_children_groups + result = [] + groups.each do |child| + result << child + result.concat(child.recursive_children_groups) + end + result + end + + # @return [Array<PBXGroup,PBXFileReference,PBXReferenceProxy>] the + # recursive list of the children of the group. + # + def recursive_children + result = [] + children.each do |child| + result << child + if child.is_a?(PBXGroup) + result.concat(child.recursive_children) + end + end + result + end + + # @return [Bool] Whether the group is empty. + # + def empty? + children.count.zero? + end + + # Creates a new reference with the given path and adds it to the + # group. The reference is configured according to the extension + # of the path. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference, XCVersionGroup] The new reference. + # + def new_reference(path, source_tree = :group) + FileReferencesFactory.new_reference(self, path, source_tree) + end + alias_method :new_file, :new_reference + + # Creates a file reference to a static library and adds it to the + # group. + # + # @param [#to_s] product_basename + # The name of the static library. + # + # @return [PBXFileReference] The new file reference. + # + def new_product_ref_for_target(product_basename, product_type) + FileReferencesFactory.new_product_ref_for_target(self, product_basename, product_type) + end + + # Creates a file reference to a new bundle. + # + # @param [#to_s] product_basename + # The name of the bundle. + # + # @return [PBXFileReference] The new file reference. + # + def new_bundle(product_basename) + FileReferencesFactory.new_bundle(self, product_basename) + end + + # Creates a file reference to a new bundle and adds it to the group. + # + # @note @see new_reference + # + # @param [#to_s] name + # the name of the new group. + # + # @return [PBXGroup] the new group. + # + def new_group(name, path = nil, source_tree = :group) + group = project.new(PBXGroup) + children << group + group.name = name + group.set_source_tree(source_tree) + group.set_path(path) + group + end + + # Creates a new variant group and adds it to the group + # + # @note @see new_group + # + # @param [#to_s] name + # the name of the new group. + # + # @param [#to_s] path + # The, preferably absolute, path of the variant group. + # Pass the path of the folder containing all the .lproj bundles, + # that contain files for the variant group. + # Do not pass the path of a specific bundle (such as en.lproj) + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXVariantGroup] the new variant group. + # + def new_variant_group(name, path = nil, source_tree = :group) + group = project.new(PBXVariantGroup) + children << group + group.name = name + group.set_source_tree(source_tree) + group.set_path(path) + group + end + + # Traverses the children groups and finds the group with the given + # path, if exists. + # + # @see find_subpath + # + def [](path) + find_subpath(path, false) + end + + # Removes children files and groups under this group. + # + def clear + children.objects.each(&:remove_from_project) + end + alias_method :remove_children_recursively, :clear + + # Traverses the children groups and finds the children with the given + # path, optionally, creating any needed group. If the given path is + # `nil` it returns itself. + # + # @param [String] path + # a string with the names of the groups separated by a '`/`'. + # + # @param [Boolean] should_create + # whether the path should be created. + # + # @note The path is matched against the {#display_name} of the groups. + # + # @example + # g = main_group['Frameworks'] + # g.name #=> 'Frameworks' + # + # @return [PBXGroup] the group if found. + # @return [Nil] if the path could not be found and should create is + # false. + # + def find_subpath(path, should_create = false) + return self unless path + path = path.split('/') unless path.is_a?(Array) + child_name = path.shift + child = children.find { |c| c.display_name == child_name } + if child.nil? + if should_create + child = new_group(child_name) + else + return nil + end + end + if path.empty? + child + else + child.find_subpath(path, should_create) + end + end + + # Adds an object to the group. + # + # @return [ObjectList<AbstractObject>] the children list. + # + def <<(child) + children << child + end + + # Sorts the children of the group by type and then by name. + # + # @note This is safe to call in an object list because it modifies it + # in C in Ruby MRI. In other Ruby implementation it can cause + # issues if there is one call to the notification enabled + # methods not compensated by the corespondent opposite (loss of + # UUIDs and objects from the project). + # + # @return [void] + # + def sort_by_type + children.sort! do |x, y| + if x.isa == 'PBXGroup' && !(y.isa == 'PBXGroup') + -1 + elsif !(x.isa == 'PBXGroup') && y.isa == 'PBXGroup' + 1 + elsif x.display_name && y.display_name + extname_x = File.extname(x.display_name) + extname_y = File.extname(y.display_name) + if extname_x != extname_y + extname_x <=> extname_y + else + File.basename(x.display_name, '.*') <=> File.basename(y.display_name, '.*') + end + else + 0 + end + end + end + + # Sorts the group by type recursively. + # + # @return [void] + # + def sort_recursively_by_type + groups.each(&:sort_recursively_by_type) + sort_by_type + end + + public + + # @!group AbstractObject Hooks + #---------------------------------------------------------------------# + + # @return [String] the name of the group taking into account the path + # or other factors if needed. + # + def display_name + if name + name + elsif path + File.basename(path) + elsif self.equal?(project.main_group) + 'Main Group' + end + end + + def ascii_plist_annotation + super unless self.equal?(project.main_group) + end + + # Sorts the to many attributes of the object according to the display + # name. + # + # @param [Hash] options + # the sorting options. + # @option options [Symbol] :groups_position + # the position of the groups can be either `:above` or + # `:below`. + # + # @return [void] + # + def sort(options = nil) + children.sort! do |x, y| + if options && groups_position = options[:groups_position] + raise ArgumentError unless [:above, :below].include?(groups_position) + if x.isa == 'PBXGroup' && !(y.isa == 'PBXGroup') + next groups_position == :above ? -1 : 1 + elsif !(x.isa == 'PBXGroup') && y.isa == 'PBXGroup' + next groups_position == :above ? 1 : -1 + end + end + + result = File.basename(x.display_name.downcase, '.*') <=> File.basename(y.display_name.downcase, '.*') + if result.zero? + result = File.extname(x.display_name.downcase) <=> File.extname(y.display_name.downcase) + if result.zero? + result = x.path.downcase <=> y.path.downcase + end + end + result + end + end + end + + #-----------------------------------------------------------------------# + + # This class is used to gather localized files into one entry. + # + class PBXVariantGroup < PBXGroup + # @!group Attributes + + # @return [String] the file type guessed by Xcode. + # + attribute :last_known_file_type, String + end + + #-----------------------------------------------------------------------# + + # A group that contains multiple files references to the different + # versions of a resource. + # + # Used to contain the different versions of a `xcdatamodel`. + # + class XCVersionGroup < PBXGroup + # @!group Attributes + + # @return [PBXFileReference] the reference to the current version. + # + has_one :current_version, PBXFileReference + + # @return [String] the type of the versioned resource. + # + attribute :version_group_type, String, 'wrapper.xcdatamodel' + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb new file mode 100644 index 0000000..431fdd1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/build_settings_array_settings_by_object_version.rb @@ -0,0 +1,72 @@ +require 'set' + +module Xcodeproj + class Project + module Object + class XCBuildConfiguration + # yes, they are case-sensitive. + # no, Xcode doesn't do this for other PathList settings nor other + # settings ending in SEARCH_PATHS. + module BuildSettingsArraySettingsByObjectVersion + ARRAY_SETTINGS = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INFOPLIST_PREPROCESSOR_DEFINITIONS + LIBRARY_SEARCH_PATHS + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + private_constant :ARRAY_SETTINGS + + ARRAY_SETTINGS_OBJECT_VERSION_50 = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INCLUDED_SOURCE_FILE_NAMES + INFOPLIST_PREPROCESSOR_DEFINITIONS + LD_RUNPATH_SEARCH_PATHS + LIBRARY_SEARCH_PATHS + LOCALIZED_STRING_MACRO_NAMES + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + SYSTEM_FRAMEWORK_SEARCH_PATHS + SYSTEM_HEADER_SEARCH_PATHS + USER_HEADER_SEARCH_PATHS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + private_constant :ARRAY_SETTINGS_OBJECT_VERSION_50 + + def self.[](object_version) + object_version = object_version.to_i + + if object_version >= 50 + ARRAY_SETTINGS_OBJECT_VERSION_50 + else + ARRAY_SETTINGS + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb new file mode 100644 index 0000000..d59f85c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/file_references_factory.rb @@ -0,0 +1,245 @@ +# frozen_string_literal: true +require 'xcodeproj/project/object/helpers/groupable_helper' + +module Xcodeproj + class Project + module Object + class FileReferencesFactory + class << self + # Creates a new reference with the given path and adds it to the + # given group. The reference is configured according to the extension + # of the path. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference, XCVersionGroup] The new reference. + # + def new_reference(group, path, source_tree) + ref = case File.extname(path).downcase + when '.xcdatamodeld' + new_xcdatamodeld(group, path, source_tree) + when '.xcodeproj' + new_subproject(group, path, source_tree) + else + new_file_reference(group, path, source_tree) + end + + configure_defaults_for_file_reference(ref) + ref + end + + # Creates a file reference to a static library and adds it to the + # given group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] product_basename + # The name of the static library. + # + # @return [PBXFileReference] The new file reference. + # + def new_product_ref_for_target(group, product_basename, product_type) + if product_type == :static_library + prefix = 'lib' + end + extension = Constants::PRODUCT_UTI_EXTENSIONS[product_type] + path = "#{prefix}#{product_basename}" + path += ".#{extension}" if extension + ref = new_reference(group, path, :built_products) + ref.include_in_index = '0' + ref.set_explicit_file_type + ref + end + + # Creates a file reference to a new bundle and adds it to the given + # group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] product_basename + # The name of the bundle. + # + # @return [PBXFileReference] The new file reference. + # + def new_bundle(group, product_basename) + ref = new_reference(group, "#{product_basename}.bundle", :built_products) + ref.include_in_index = '0' + ref.set_explicit_file_type('wrapper.cfbundle') + ref + end + + private + + # @group Private Helpers + #-------------------------------------------------------------------# + + # Creates a new file reference with the given path and adds it to the + # given group. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @return [PBXFileReference] The new file reference. + # + def new_file_reference(group, path, source_tree) + path = Pathname.new(path) + ref = group.project.new(PBXFileReference) + group.children << ref + GroupableHelper.set_path_with_source_tree(ref, path, source_tree) + ref.set_last_known_file_type + ref + end + + # Creates a new version group reference to an xcdatamodeled adding + # the xcdatamodel files included in the wrapper as children file + # references. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @note To match Xcode behaviour the current version is read from + # the .xccurrentversion file, if it doesn't exist the last + # xcdatamodel according to its path is set as the current + # version. + # + # @return [XCVersionGroup] The new reference. + # + def new_xcdatamodeld(group, path, source_tree) + path = Pathname.new(path) + ref = group.project.new(XCVersionGroup) + group.children << ref + GroupableHelper.set_path_with_source_tree(ref, path, source_tree) + ref.version_group_type = 'wrapper.xcdatamodel' + + real_path = group.real_path.join(path) + current_version_name = nil + if real_path.exist? + real_path.children.each do |child_path| + if File.extname(child_path) == '.xcdatamodel' + new_file_reference(ref, child_path, :group) + elsif File.basename(child_path) == '.xccurrentversion' + full_path = real_path + File.basename(child_path) + xccurrentversion = Plist.read_from_path(full_path) + current_version_name = xccurrentversion['_XCCurrentVersionName'] + end + end + + if current_version_name + ref.current_version = ref.children.find do |obj| + obj.path.split('/').last == current_version_name + end + end + end + + ref + end + + # Creates a file reference to another Xcode subproject and setups the + # proxies to the targets. + # + # @param [PBXGroup] group + # The group to which to add the reference. + # + # @param [#to_s] path + # The, preferably absolute, path of the reference. + # + # @param [Symbol] source_tree + # The source tree key to use to configure the path (@see + # GroupableHelper::SOURCE_TREES_BY_KEY). + # + # @note To analyze the targets the given project is read and thus + # it should already exist in the disk. + # + # @return [PBXFileReference] The new file reference. + # + def new_subproject(group, path, source_tree) + ref = new_file_reference(group, path, source_tree) + ref.include_in_index = nil + + product_group_ref = find_products_group_ref(group, true) + + subproj = Project.open(path) + subproj.products_group.files.each do |product_reference| + container_proxy = group.project.new(PBXContainerItemProxy) + container_proxy.container_portal = ref.uuid + container_proxy.proxy_type = Constants::PROXY_TYPES[:reference] + container_proxy.remote_global_id_string = product_reference.uuid + container_proxy.remote_info = 'Subproject' + + reference_proxy = group.project.new(PBXReferenceProxy) + extension = File.extname(product_reference.path)[1..-1] + reference_proxy.file_type = Constants::FILE_TYPES_BY_EXTENSION[extension] + reference_proxy.path = product_reference.path + reference_proxy.remote_ref = container_proxy + reference_proxy.source_tree = 'BUILT_PRODUCTS_DIR' + + product_group_ref << reference_proxy + end + + attribute = PBXProject.references_by_keys_attributes.find { |attrb| attrb.name == :project_references } + project_reference = ObjectDictionary.new(attribute, group.project.root_object) + project_reference[:project_ref] = ref + project_reference[:product_group] = product_group_ref + group.project.root_object.project_references << project_reference + + ref + end + + # Configures a file reference according to the extension to math + # Xcode behaviour. + # + # @param [PBXFileReference] ref + # The file reference to configure. + # + # @note To closely match the Xcode behaviour the name attribute of + # the file reference is set only if the path of the file is + # not equal to the path of the group. + # + # @return [void] + # + def configure_defaults_for_file_reference(ref) + if ref.path.include?('/') + ref.name = ref.path.split('/').last + end + + if File.extname(ref.path).downcase == '.framework' + ref.include_in_index = nil + end + end + + def find_products_group_ref(group, should_create = false) + product_group_ref = + (group.project.root_object.product_ref_group ||= group.project.main_group.find_subpath('Products', should_create)) + product_group_ref + end + + #-------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb new file mode 100644 index 0000000..1ae19fb --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/helpers/groupable_helper.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true +module Xcodeproj + class Project + module Object + class GroupableHelper + class << self + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [PBXGroup, PBXProject] The parent of the object. + # + def parent(object) + referrers = object.referrers.uniq + if referrers.count > 1 + referrers = referrers.grep(PBXGroup) + end + + if referrers.count == 0 + raise '[Xcodeproj] Consistency issue: no parent ' \ + "for object `#{object.display_name}`: "\ + "`#{object.referrers.join('`, `')}`" + elsif referrers.count > 1 + raise '[Xcodeproj] Consistency issue: unexpected multiple parents ' \ + "for object `#{object.display_name}`: "\ + "#{object.referrers}" + end + referrers.first + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Array<PBXGroup, PBXProject>] The parents of the object. + # + def parents(object) + if main_group?(object) + [] + else + parent = parent(object) + parents(parent).push(parent) + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [String] A representation of the group hierarchy. + # + def hierarchy_path(object) + unless main_group?(object) + parent = parent(object) + parent = parent.hierarchy_path if parent.respond_to?(:hierarchy_path) + "#{parent}/#{object.display_name}" + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Bool] Wether the object is the main group of the project. + # + def main_group?(object) + object.equal?(object.project.main_group) + end + + # Moves the object to a new parent. + # + # @param [PBXGroup, PBXFileReference] object + # The object to move. + # + # @param [PBXGroup] new_parent + # The new parent. + # + # @return [void] + # + def move(object, new_parent) + unless object + raise "[Xcodeproj] Attempt to move nil object to `#{new_parent}`." + end + unless new_parent + raise "[Xcodeproj] Attempt to move object `#{object}` to nil parent." + end + if new_parent.equal?(object) + raise "[Xcodeproj] Attempt to move object `#{object}` to itself." + end + if parents(new_parent).include?(object) + raise "[Xcodeproj] Attempt to move object `#{object}` to a child object `#{new_parent}`." + end + + object.parent.children.delete(object) + new_parent << object + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The absolute path of the object resolving the + # source tree. + # + def real_path(object) + source_tree = source_tree_real_path(object) + path = object.path || ''.freeze + if source_tree + source_tree + path + else + Pathname(path) + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The path of the object without resolving the + # source tree. + # + def full_path(object) + folder = case object.source_tree + when '<group>' + object_parent = parent(object) + if object_parent.isa == 'PBXProject'.freeze + nil + else + full_path(object_parent) + end + when 'SOURCE_ROOT' + nil + when '<absolute>' + Pathname.new('/'.freeze) + else + Pathname.new("${#{object.source_tree}}") + end + folder ||= Pathname.new('') + if object.path + folder + object.path + else + folder + end + end + + # @param [PBXGroup, PBXFileReference] object + # The object to analyze. + # + # @return [Pathname] The absolute path of the source tree of the + # object. + # + def source_tree_real_path(object) + case object.source_tree + when '<group>' + object_parent = parent(object) + if object_parent.isa == 'PBXProject'.freeze + object.project.project_dir + object.project.root_object.project_dir_path + else + real_path(object_parent) + end + when 'SOURCE_ROOT' + object.project.project_dir + when '<absolute>' + nil + else + Pathname.new("${#{object.source_tree}}") + end + end + + # @return [Hash{Symbol => String}] The source tree values by they + # symbol representation. + # + SOURCE_TREES_BY_KEY = { + :absolute => '<absolute>', + :group => '<group>', + :project => 'SOURCE_ROOT', + :built_products => 'BUILT_PRODUCTS_DIR', + :developer_dir => 'DEVELOPER_DIR', + :sdk_root => 'SDKROOT', + }.freeze + + # Sets the source tree of the given object. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [void] + # + def set_source_tree(object, source_tree) + source_tree = normalize_source_tree(source_tree) + object.source_tree = source_tree + end + + # Sets the path of the given object according to the provided source + # tree key. The path is converted to relative according to the real + # path of the source tree for group and project source trees, if both + # paths are relative or absolute. Otherwise the path is set as + # provided. + # + # @param [PBXGroup, PBXFileReference] object + # The object whose path needs to be set. + # + # @param [#to_s] path + # The path. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [void] + # + def set_path_with_source_tree(object, path, source_tree) + path = Pathname(path) + source_tree = normalize_source_tree(source_tree) + object.source_tree = source_tree + + if source_tree == SOURCE_TREES_BY_KEY[:absolute] + unless path.absolute? + raise '[Xcodeproj] Attempt to set a relative path with an ' \ + "absolute source tree: `#{path}`" + end + object.path = path.to_s + elsif source_tree == SOURCE_TREES_BY_KEY[:group] || source_tree == SOURCE_TREES_BY_KEY[:project] + source_tree_real_path = GroupableHelper.source_tree_real_path(object) + if source_tree_real_path && source_tree_real_path.absolute? == path.absolute? + relative_path = path.relative_path_from(source_tree_real_path) + object.path = relative_path.to_s + else + object.path = path.to_s + end + else + object.path = path.to_s + end + end + + private + + # @group Helpers + #-------------------------------------------------------------------# + + # Converts the given source tree to its string value. + # + # @param [Symbol, String] source_tree + # The source tree, either a string or a key for + # {SOURCE_TREES_BY_KEY}. + # + # @return [String] the string value of the source tree. + # + def normalize_source_tree(source_tree) + if source_tree.is_a?(Symbol) + source_tree = SOURCE_TREES_BY_KEY[source_tree] + end + + unless SOURCE_TREES_BY_KEY.values.include?(source_tree) + raise "[Xcodeproj] Unrecognized source tree option `#{source_tree}`" + end + source_tree + end + + #-------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/native_target.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/native_target.rb new file mode 100644 index 0000000..0ad7032 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/native_target.rb @@ -0,0 +1,751 @@ +module Xcodeproj + class Project + module Object + class AbstractTarget < AbstractObject + # @!group Attributes + + # @return [String] The name of the Target. + # + attribute :name, String + + # @return [String] the name of the build product. + # + attribute :product_name, String + + # @return [String] Comments associated with this target. + # + # This is apparently no longer used by Xcode. + # + attribute :comments, String + + # @return [XCConfigurationList] the list of the build configurations of + # the target. This list commonly include two configurations + # `Debug` and `Release`. + # + has_one :build_configuration_list, XCConfigurationList + + # @return [ObjectList<PBXTargetDependency>] the targets necessary to + # build this target. + # + has_many :dependencies, PBXTargetDependency + + public + + # @!group Helpers + #--------------------------------------# + + # Gets the value for the given build setting in all the build + # configurations or the value inheriting the value from the project + # ones if needed. + # + # @param [String] key + # the key of the build setting. + # + # @param [Bool] resolve_against_xcconfig + # whether the resolved setting should take in consideration any + # configuration file present. + # + # @return [Hash{String => String}] The value of the build setting + # grouped by the name of the build configuration. + # + # TODO: Full support for this would require to take into account + # the default values for the platform. + # + def resolved_build_setting(key, resolve_against_xcconfig = false) + target_settings = build_configuration_list.get_setting(key, resolve_against_xcconfig, self) + project_settings = project.build_configuration_list.get_setting(key, resolve_against_xcconfig) + target_settings.merge(project_settings) do |_key, target_val, proj_val| + target_includes_inherited = Constants::INHERITED_KEYWORDS.any? { |keyword| target_val.include?(keyword) } if target_val + if target_includes_inherited && proj_val + if target_val.is_a? String + target_val.gsub(Regexp.union(Constants::INHERITED_KEYWORDS), proj_val) + else + target_val.flat_map { |value| Constants::INHERITED_KEYWORDS.include?(value) ? proj_val : value } + end + else + target_val || proj_val + end + end + end + + # Gets the value for the given build setting, properly inherited if + # need, if shared across the build configurations. + # + # @param [String] key + # the key of the build setting. + # + # @param [Boolean] resolve_against_xcconfig + # whether the resolved setting should take in consideration any + # configuration file present. + # + # @raise If the build setting has multiple values. + # + # @note As it is common not to have a setting with no value for + # custom build configurations nil keys are not considered to + # determine if the setting is unique. This is an heuristic + # which might not closely match Xcode behaviour. + # + # @return [String] The value of the build setting. + # + def common_resolved_build_setting(key, resolve_against_xcconfig: false) + values = resolved_build_setting(key, resolve_against_xcconfig).values.compact.uniq + if values.count <= 1 + values.first + else + raise "[Xcodeproj] Consistency issue: build setting `#{key}` has multiple values: `#{resolved_build_setting(key)}`" + end + end + + # @return [String] the SDK that the target should use. + # + def sdk + common_resolved_build_setting('SDKROOT') + end + + # @return [Symbol] the name of the platform of the target. + # + def platform_name + return unless sdk + if sdk.include? 'iphoneos' + :ios + elsif sdk.include? 'macosx' + :osx + elsif sdk.include? 'appletvos' + :tvos + elsif sdk.include? 'watchos' + :watchos + end + end + + # @return [String] the version of the SDK. + # + def sdk_version + return unless sdk + sdk.scan(/[0-9.]+/).first + end + + # @visibility private + # + # @return [Hash<Symbol, String>] + # The name of the setting for the deployment target by platform + # name. + # + DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME = { + :ios => 'IPHONEOS_DEPLOYMENT_TARGET', + :osx => 'MACOSX_DEPLOYMENT_TARGET', + :tvos => 'TVOS_DEPLOYMENT_TARGET', + :watchos => 'WATCHOS_DEPLOYMENT_TARGET', + }.freeze + + # @return [String] the deployment target of the target according to its + # platform. + # + def deployment_target + return unless setting = DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME[platform_name] + common_resolved_build_setting(setting) + end + + # @param [String] deployment_target the deployment target to set for + # the target according to its platform. + # + def deployment_target=(deployment_target) + return unless setting = DEPLOYMENT_TARGET_SETTING_BY_PLATFORM_NAME[platform_name] + build_configurations.each do |config| + config.build_settings[setting] = deployment_target + end + end + + # @return [ObjectList<XCBuildConfiguration>] the build + # configurations of the target. + # + def build_configurations + build_configuration_list.build_configurations + end + + # Adds a new build configuration to the target and populates its with + # default settings according to the provided type if one doesn't + # exists. + # + # @note If a build configuration with the given name is already + # present no new build configuration is added. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] the created build configuration or the + # existing one with the same name. + # + def add_build_configuration(name, type) + if existing = build_configuration_list[name] + existing + else + build_configuration = project.new(XCBuildConfiguration) + build_configuration.name = name + product_type = self.product_type if respond_to?(:product_type) + build_configuration.build_settings = ProjectHelper.common_build_settings(type, platform_name, deployment_target, product_type) + build_configuration_list.build_configurations << build_configuration + build_configuration + end + end + + # @param [String] build_configuration_name + # the name of a build configuration. + # + # @return [Hash] the build settings of the build configuration with the + # given name. + # + # + def build_settings(build_configuration_name) + build_configuration_list.build_settings(build_configuration_name) + end + + # @!group Build Phases Helpers + + # @return [PBXFrameworksBuildPhase] + # the frameworks build phases of the target. + # + def frameworks_build_phases + build_phases.find { |bp| bp.class == PBXFrameworksBuildPhase } + end + + # @return [Array<PBXCopyFilesBuildPhase>] + # the copy files build phases of the target. + # + def copy_files_build_phases + build_phases.grep(PBXCopyFilesBuildPhase) + end + + # @return [Array<PBXShellScriptBuildPhase>] + # the shell script build phases of the target. + # + def shell_script_build_phases + build_phases.grep(PBXShellScriptBuildPhase) + end + + # Adds a dependency on the given target. + # + # @param [AbstractTarget] target + # the target which should be added to the dependencies list of + # the receiver. The target may be a target of this target's + # project or of a subproject of this project. Note that the + # subproject must already be added to this target's project. + # + # @return [void] + # + def add_dependency(target) + unless dependency_for_target(target) + container_proxy = project.new(Xcodeproj::Project::PBXContainerItemProxy) + if target.project == project + container_proxy.container_portal = project.root_object.uuid + else + subproject_reference = project.reference_for_path(target.project.path) + raise ArgumentError, 'add_dependency received target that belongs to a project that is not this project and is not a subproject of this project' unless subproject_reference + container_proxy.container_portal = subproject_reference.uuid + end + container_proxy.proxy_type = Constants::PROXY_TYPES[:native_target] + container_proxy.remote_global_id_string = target.uuid + container_proxy.remote_info = target.name + + dependency = project.new(Xcodeproj::Project::PBXTargetDependency) + dependency.name = target.name + dependency.target = target if target.project == project + dependency.target_proxy = container_proxy + + dependencies << dependency + end + end + + # Checks whether this target has a dependency on the given target. + # + # @param [AbstractTarget] target + # the target to search for. + # + # @return [PBXTargetDependency] + # + def dependency_for_target(target) + dependencies.find do |dep| + if dep.target_proxy.remote? + subproject_reference = project.reference_for_path(target.project.path) + uuid = subproject_reference.uuid if subproject_reference + dep.target_proxy.remote_global_id_string == target.uuid && dep.target_proxy.container_portal == uuid + else + dep.target.uuid == target.uuid + end + end + end + + # Creates a new copy files build phase. + # + # @param [String] name + # an optional name for the phase. + # + # @return [PBXCopyFilesBuildPhase] the new phase. + # + def new_copy_files_build_phase(name = nil) + phase = project.new(PBXCopyFilesBuildPhase) + phase.name = name + build_phases << phase + phase + end + + # Creates a new shell script build phase. + # + # @param (see #new_copy_files_build_phase) + # + # @return [PBXShellScriptBuildPhase] the new phase. + # + def new_shell_script_build_phase(name = nil) + phase = project.new(PBXShellScriptBuildPhase) + phase.name = name + build_phases << phase + phase + end + + public + + # @!group System frameworks + #--------------------------------------# + + # Adds a file reference for one or more system framework to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the framework. + # + # @note Xcode behaviour is following: if the target has the same SDK + # of the project it adds the reference relative to the SDK root + # otherwise the reference is added relative to the Developer + # directory. This can create confusion or duplication of the + # references of frameworks linked by iOS and OS X targets. For + # this reason the new Xcodeproj behaviour is to add the + # frameworks in a subgroup according to the platform. + # + # @return [Array<PBXFileReference>] An array of the newly created file + # references. + # + def add_system_framework(names) + Array(names).map do |name| + case platform_name + when :ios + group = project.frameworks_group['iOS'] || project.frameworks_group.new_group('iOS') + path_sdk_name = 'iPhoneOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_IOS_SDK + when :osx + group = project.frameworks_group['OS X'] || project.frameworks_group.new_group('OS X') + path_sdk_name = 'MacOSX' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_OSX_SDK + when :tvos + group = project.frameworks_group['tvOS'] || project.frameworks_group.new_group('tvOS') + path_sdk_name = 'AppleTVOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_TVOS_SDK + when :watchos + group = project.frameworks_group['watchOS'] || project.frameworks_group.new_group('watchOS') + path_sdk_name = 'WatchOS' + path_sdk_version = sdk_version || Constants::LAST_KNOWN_WATCHOS_SDK + else + raise 'Unknown platform for target' + end + + path = "Platforms/#{path_sdk_name}.platform/Developer/SDKs/#{path_sdk_name}#{path_sdk_version}.sdk/System/Library/Frameworks/#{name}.framework" + unless ref = group.find_file_by_path(path) + ref = group.new_file(path, :developer_dir) + end + frameworks_build_phase.add_file_reference(ref, true) + ref + end + end + alias_method :add_system_frameworks, :add_system_framework + + # Adds a file reference for one or more system dylib libraries to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the libraries. + # + # @return [void] + # + def add_system_library(names) + add_system_library_extension(names, 'dylib') + end + alias_method :add_system_libraries, :add_system_library + + def add_system_library_extension(names, extension) + Array(names).each do |name| + path = "usr/lib/lib#{name}.#{extension}" + files = project.frameworks_group.files + unless reference = files.find { |ref| ref.path == path } + reference = project.frameworks_group.new_file(path, :sdk_root) + end + frameworks_build_phase.add_file_reference(reference, true) + reference + end + end + private :add_system_library_extension + + # Adds a file reference for one or more system tbd libraries to the project + # if needed and adds them to the Frameworks build phases. + # + # @param [Array<String>, String] names + # The name or the list of the names of the libraries. + # + # @return [void] + # + def add_system_library_tbd(names) + add_system_library_extension(names, 'tbd') + end + alias_method :add_system_libraries_tbd, :add_system_library_tbd + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # @return [Hash{String => Hash}] A hash suitable to display the object + # to the user. + # + def pretty_print + { + display_name => { + 'Build Phases' => build_phases.map(&:pretty_print), + 'Build Configurations' => build_configurations.map(&:pretty_print), + }, + } + end + end + + #-----------------------------------------------------------------------# + + # Represents a target handled by Xcode. + # + class PBXNativeTarget < AbstractTarget + # @!group Attributes + + # @return [PBXBuildRule] the build rules of this target. + # + has_many :build_rules, PBXBuildRule + + # @return [String] the build product type identifier. + # + attribute :product_type, String + + # @return [PBXFileReference] the reference to the product file. + # + has_one :product_reference, PBXFileReference + + # @return [ObjectList<XCSwiftPackageProductDependency>] the Swift package products necessary to + # build this target. + # + has_many :package_product_dependencies, XCSwiftPackageProductDependency + + # @return [String] the install path of the product. + # + attribute :product_install_path, String + + # @return [ObjectList<AbstractBuildPhase>] the build phases of the + # target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, AbstractBuildPhase + + public + + # @!group Helpers + #--------------------------------------# + + # @return [Symbol] The type of the target expressed as a symbol. + # + def symbol_type + Constants::PRODUCT_TYPE_UTI.key(product_type) + end + + # @return [Boolean] Whether the target is a test target. + # + def test_target_type? + case symbol_type + when :octest_bundle, :unit_test_bundle, :ui_test_bundle + true + else + false + end + end + + # @return [Boolean] Whether the target is an extension. + # + def extension_target_type? + case symbol_type + when :app_extension, :watch_extension, :watch2_extension, :tv_extension, :messages_extension + true + else + false + end + end + + # @return [Boolean] Whether the target is launchable. + # + def launchable_target_type? + case symbol_type + when :application, :command_line_tool + true + else + false + end + end + + # Adds source files to the target. + # + # @param [Array<PBXFileReference>] file_references + # the files references of the source files that should be added + # to the target. + # + # @param [String] compiler_flags + # the compiler flags for the source files. + # + # @yield_param [PBXBuildFile] each created build file. + # + # @return [Array<PBXBuildFile>] the created build files. + # + def add_file_references(file_references, compiler_flags = {}) + file_references.map do |file| + extension = File.extname(file.path).downcase + header_extensions = Constants::HEADER_FILES_EXTENSIONS + is_header_phase = header_extensions.include?(extension) + phase = is_header_phase ? headers_build_phase : source_build_phase + + unless build_file = phase.build_file(file) + build_file = project.new(PBXBuildFile) + build_file.file_ref = file + phase.files << build_file + end + + if compiler_flags && !compiler_flags.empty? && !is_header_phase + (build_file.settings ||= {}).merge!('COMPILER_FLAGS' => compiler_flags) do |_, old, new| + [old, new].compact.join(' ') + end + end + + yield build_file if block_given? + + build_file + end + end + + # Adds resource files to the resources build phase of the target. + # + # @param [Array<PBXFileReference>] resource_file_references + # the files references of the resources to the target. + # + # @return [void] + # + def add_resources(resource_file_references) + resource_file_references.each do |file| + next if resources_build_phase.include?(file) + build_file = project.new(PBXBuildFile) + build_file.file_ref = file + resources_build_phase.files << build_file + end + end + + # Adds on demand resources to the resources build phase of the target. + # + # @param {String => [Array<PBXFileReference>]} on_demand_resource_tag_files + # the files references of the on demand resources to add to the target keyed by the tag. + # + # @return [void] + # + def add_on_demand_resources(on_demand_resource_tag_files) + on_demand_resource_tag_files.each do |tag, file_refs| + file_refs.each do |file_ref| + if resources_build_phase.include?(file_ref) + existing_build_file = resources_build_phase.build_file(file_ref) + existing_build_file.settings ||= {} + existing_build_file.settings['ASSET_TAGS'] ||= [] + existing_build_file.settings['ASSET_TAGS'] << tag + existing_build_file.settings['ASSET_TAGS'].uniq! + next + end + build_file = resources_build_phase.add_file_reference(file_ref, true) + build_file.settings = (build_file.settings ||= {}).merge('ASSET_TAGS' => [tag]) + end + end + end + + # Remove on demand resources from the resources build phase of the target. + # + # @param {String => [Array<PBXFileReference>]} on_demand_resource_tag_files + # the files references of the on demand resources to add to the target keyed by the tag. + # + # @return [void] + # + def remove_on_demand_resources(on_demand_resource_tag_files) + on_demand_resource_tag_files.each do |tag, file_refs| + file_refs.each do |file_ref| + build_file = resources_build_phase.build_file(file_ref) + next if build_file.nil? + asset_tags = build_file.settings['ASSET_TAGS'] + asset_tags.delete(tag) + resources_build_phase.remove_file_reference(file_ref) if asset_tags.empty? + end + end + end + + # Finds or creates the headers build phase of the target. + # + # @note A target should have only one headers build phase. + # + # @return [PBXHeadersBuildPhase] the headers build phase. + # + def headers_build_phase + find_or_create_build_phase_by_class(PBXHeadersBuildPhase) + end + + # Finds or creates the source build phase of the target. + # + # @note A target should have only one source build phase. + # + # @return [PBXSourcesBuildPhase] the source build phase. + # + def source_build_phase + find_or_create_build_phase_by_class(PBXSourcesBuildPhase) + end + + # Finds or creates the frameworks build phase of the target. + # + # @note A target should have only one frameworks build phase. + # + # @return [PBXFrameworksBuildPhase] the frameworks build phase. + # + def frameworks_build_phase + find_or_create_build_phase_by_class(PBXFrameworksBuildPhase) + end + + # Finds or creates the resources build phase of the target. + # + # @note A target should have only one resources build phase. + # + # @return [PBXResourcesBuildPhase] the resources build phase. + # + def resources_build_phase + find_or_create_build_phase_by_class(PBXResourcesBuildPhase) + end + + private + + # @!group Internal Helpers + #--------------------------------------# + + # Find or create a build phase by a given class + # + # @param [Class] phase_class the class of the build phase to find or create. + # + # @return [AbstractBuildPhase] the build phase whose class match the given phase_class. + # + def find_or_create_build_phase_by_class(phase_class) + @phases ||= {} + unless phase_class < AbstractBuildPhase + raise ArgumentError, "#{phase_class} must be a subclass of #{AbstractBuildPhase.class}" + end + @phases[phase_class] ||= build_phases.find { |bp| bp.class == phase_class } || + project.new(phase_class).tap { |bp| build_phases << bp } + end + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # Sorts the to many attributes of the object according to the display + # name. + # + # Build phases are not sorted as they order is relevant. + # + def sort(_options = nil) + attributes_to_sort = to_many_attributes.reject { |attr| attr.name == :build_phases } + attributes_to_sort.each do |attrb| + list = attrb.get_value(self) + list.sort! do |x, y| + x.display_name <=> y.display_name + end + end + end + + def to_hash_as(method = :to_hash) + hash_as = super + if !hash_as['packageProductDependencies'].nil? && hash_as['packageProductDependencies'].empty? + hash_as.delete('packageProductDependencies') + end + hash_as + end + + def to_ascii_plist + plist = super + if !plist.value['packageProductDependencies'].nil? && plist.value['packageProductDependencies'].empty? + plist.value.delete('packageProductDependencies') + end + plist + end + end + + #-----------------------------------------------------------------------# + + # Represents a target that only consists in a aggregate of targets. + # + # @todo Apparently it can't have build rules. + # + class PBXAggregateTarget < AbstractTarget + # @!group Attributes + + # @return [PBXBuildRule] the build phases of the target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, [PBXCopyFilesBuildPhase, PBXShellScriptBuildPhase] + end + + #-----------------------------------------------------------------------# + + # Represents a legacy target which uses an external build tool. + # + # Apparently it can't have any build phase but the attribute can be + # present. + # + class PBXLegacyTarget < AbstractTarget + # @!group Attributes + + # @return [String] e.g "Dir" + # + attribute :build_working_directory, String + + # @return [String] e.g "$(ACTION)" + # + attribute :build_arguments_string, String + + # @return [String] e.g "1" + # + attribute :pass_build_settings_in_environment, String + + # @return [String] e.g "/usr/bin/make" + # + attribute :build_tool_path, String + + # @return [PBXBuildRule] the build phases of the target. + # + # @note Apparently only PBXCopyFilesBuildPhase and + # PBXShellScriptBuildPhase can appear multiple times in a + # target. + # + has_many :build_phases, AbstractBuildPhase + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/reference_proxy.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/reference_proxy.rb new file mode 100644 index 0000000..48aedbf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/reference_proxy.rb @@ -0,0 +1,67 @@ +module Xcodeproj + class Project + module Object + # Apparently a proxy for a reference object which might belong another + # project contained in the same workspace of the project document. + # + # This class is used for referencing the products of another project. + # + class PBXReferenceProxy < AbstractObject + # @!group Attributes + + # @return [String] the name of the reference. + # + attribute :name, String + + # @return [String] the path of the referenced filed. + # + attribute :path, String + + # @return [String] the file type of the referenced filed. + # + attribute :file_type, String + + # @return [PBXContainerItemProxy] the proxy to the project that + # contains the object. + # + has_one :remote_ref, PBXContainerItemProxy + + # @return [String] the source tree for the path of the reference. + # + # @example + # "BUILT_PRODUCTS_DIR" + # + attribute :source_tree, String + + #---------------------------------------------------------------------# + + public + + # @!group Helpers + + # Checks whether the reference is a proxy. + # + # @return [Bool] always true for this ISA. + # + def proxy? + true + end + + #---------------------------------------------------------------------# + + def ascii_plist_annotation + " #{name || path && File.basename(path)} " + end + + # @return [String] A name suitable for displaying the object to the + # user. + # + def display_name + return name if name + return path if path + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/root_object.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/root_object.rb new file mode 100644 index 0000000..a7cf6f2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/root_object.rb @@ -0,0 +1,100 @@ +module Xcodeproj + class Project + module Object + # This class represents the root object of a project document. + # + class PBXProject < AbstractObject + # @!group Attributes + + # @return [ObjectList<AbstractTarget>] a list of all the targets in + # the project. + # + has_many :targets, AbstractTarget + + # @return [Hash{String => String}] attributes the attributes of the + # target. + # + # @note The hash might contain the following keys: + # + # - `CLASSPREFIX` + # - `LastUpgradeCheck` + # - `ORGANIZATIONNAME` + # + attribute :attributes, Hash, + 'LastSwiftUpdateCheck' => Constants::LAST_SWIFT_UPGRADE_CHECK, + 'LastUpgradeCheck' => Constants::LAST_UPGRADE_CHECK + + # @return [XCConfigurationList] the configuration list of the project. + # + has_one :build_configuration_list, XCConfigurationList + + # @return [String] the compatibility version of the project. + # + attribute :compatibility_version, String, 'Xcode 3.2' + + # @return [String] the development region of the project. + # + attribute :development_region, String, 'en' + + # @return [String] whether the project has scanned for encodings. + # + attribute :has_scanned_for_encodings, String, '0' + + # @return [Array<String>] the list of known regions. + # + attribute :known_regions, Array, %w(en Base) + + # @return [PBXGroup] the main group of the project. The one displayed + # by Xcode in the Project Navigator. + # + has_one :main_group, PBXGroup + + # @return [PBXGroup] the group containing the references to products of + # the project. + # + has_one :product_ref_group, PBXGroup + + # @return [String] the directory of the project. + # + attribute :project_dir_path, String, '' + + # @return [String] the root of the project. + # + attribute :project_root, String, '' + + # @return [Array<XCRemoteSwiftPackageReference>] the list of Swift package references. + # + has_many :package_references, XCRemoteSwiftPackageReference + + # @return [Array<ObjectDictionary>] any reference to other projects. + # + has_many_references_by_keys :project_references, + :project_ref => PBXFileReference, + :product_group => PBXGroup + + def name + project.path.basename('.xcodeproj').to_s + end + + def ascii_plist_annotation + ' Project object ' + end + + def to_hash_as(method = :to_hash) + hash_as = super + if !hash_as['packageReferences'].nil? && hash_as['packageReferences'].empty? + hash_as.delete('packageReferences') if !hash_as['packageReferences'].nil? && hash_as['packageReferences'].empty? + end + hash_as + end + + def to_ascii_plist + plist = super + plist.value.delete('projectReferences') if plist.value['projectReferences'].empty? + plist.value.delete('packageReferences') if !plist.value['packageReferences'].nil? && plist.value['packageReferences'].empty? + plist + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb new file mode 100644 index 0000000..4e5b918 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_product_dependency.rb @@ -0,0 +1,29 @@ +module Xcodeproj + class Project + module Object + # This class represents a Swift package product dependency. + # + class XCSwiftPackageProductDependency < AbstractObject + # @!group Attributes + + # @return [XCRemoteSwiftPackageReference] the Swift package reference. + # + has_one :package, XCRemoteSwiftPackageReference + + # @return [String] the product name of this Swift package. + # + attribute :product_name, String + + # @!group AbstractObject Hooks + #--------------------------------------# + + # @return [String] the name of the Swift package. + # + def display_name + return product_name if product_name + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb new file mode 100644 index 0000000..cf10331 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/swift_package_remote_reference.rb @@ -0,0 +1,33 @@ +module Xcodeproj + class Project + module Object + # This class represents a Swift package reference. + # + class XCRemoteSwiftPackageReference < AbstractObject + # @!group Attributes + + # @return [String] the repository url this Swift package was installed from. + # + attribute :repositoryURL, String + + # @return [Hash] the version requirements for this Swift package. + # + attribute :requirement, Hash + + # @!group AbstractObject Hooks + #--------------------------------------# + + def ascii_plist_annotation + " #{isa} \"#{File.basename(display_name)}\" " + end + + # @return [String] the name of the Swift package repository. + # + def display_name + return repositoryURL if repositoryURL + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/target_dependency.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/target_dependency.rb new file mode 100644 index 0000000..43f4092 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object/target_dependency.rb @@ -0,0 +1,90 @@ +module Xcodeproj + class Project + module Object + # Represents a dependency of a target on another one. + # + class PBXTargetDependency < AbstractObject + # @!group Attributes + + # @return [PBXNativeTarget] the target that needs to be built to + # satisfy the dependency. + # + has_one :target, AbstractTarget + + # @return [PBXContainerItemProxy] a proxy for the target that needs to + # be built. + # + # @note Apparently to support targets in other projects of the same + # workspace. + # + has_one :target_proxy, PBXContainerItemProxy + + # @return [String] the name of the target. + # + # @note This seems only to be used when the target dependency is a + # target from a nested Xcode project. + # + attribute :name, String + + # @return [String] the platform filter for this target dependency. + # + attribute :platform_filter, String + + # @return [String] the product reference for this target dependency. + # + attribute :product_ref, String + + public + + # @!group AbstractObject Hooks + #--------------------------------------# + + # @return [String] The name of the dependency. + # + def display_name + return name if name + return target.name if target + return target_proxy.remote_info if target_proxy + end + + def ascii_plist_annotation + " #{isa} " + end + + # @return [String] uuid of the target, if the dependency + # is a native target, otherwise the uuid of the + # target in the sub-project if the dependency is + # a target proxy + # + def native_target_uuid + return target.uuid if target + return target_proxy.remote_global_id_string if target_proxy + raise "Expected target or target_proxy, from which to fetch a uuid for target '#{display_name}'." \ + "Find and clear the PBXTargetDependency entry with uuid '#{@uuid}' in your .xcodeproj." + end + + # @note This override is necessary because Xcode allows for circular + # target dependencies. + # + # @return [Hash<String => String>] Returns a cascade representation of + # the object without UUIDs. + # + def to_tree_hash + hash = {} + hash['displayName'] = display_name + hash['isa'] = isa + hash['targetProxy'] = target_proxy.to_tree_hash + hash + end + + # @note This is a no-op, because the targets could theoretically depend + # on each other, leading to a stack level too deep error. + # + # @see AbstractObject#sort_recursively + # + def sort_recursively(_options = nil) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_attributes.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_attributes.rb new file mode 100644 index 0000000..e532565 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_attributes.rb @@ -0,0 +1,522 @@ +module Xcodeproj + class Project + module Object + # This class represents an attribute of {AbstractObject} subclasses. + # Attributes are created by the {AbstractObject} DSL methods and allow to + # mirror the underlying attributes of the xcodeproj document model. + # + # Attributes provide support for runtime type checking. They also allow + # {AbstractObject} initialization and serialization to plist. + # + # @todo Add support for a list of required values so objects can be + # validated before serialization ? + # + class AbstractObjectAttribute + # @return [Symbol] the type of the attribute. It can be `:simple`, + # `:to_one`, `:to_many`. + # + attr_reader :type + + # @return [Symbol] the name of the attribute. + # + attr_reader :name + + # @return [Class] the class that owns the attribute. + # + attr_accessor :owner + + # Creates a new attribute with the given type and name. + # + # Attributes are expected to be instantiated only by the + # {AbstractObject} DSL methods. + # + # @param [Symbol] type + # the type of the attribute. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Class] owner + # the class that owns the attribute. + # + def initialize(type, name, owner) + @type = type + @name = name + @owner = owner + end + + # @return [String] The name of the attribute in camel case. + # + # @example + # attribute.new(:simple, :project_root) + # attribute.plist_name #=> projectRoot + # + def plist_name + @plist_name ||= CaseConverter.convert_to_plist(name, :lower) + end + + # @return [Array<Class>] the list of the classes accepted by the + # attribute. + # + attr_accessor :classes + + # @return [{Symbol, Array<Class>}] the list of the classes accepted by + # each key for attributes which store a dictionary. + # + attr_accessor :classes_by_key + + # @return [String, Array, Hash] the default value, if any, for simple + # attributes. + # + attr_accessor :default_value + + # Convenience method that returns the value of this attribute for a + # given object. + # + # @param [AbstractObject] object + # the object for which the value of this attribute is requested. + # + # @return [String, Array, Hash, AbstractObject, ObjectList] + # the value. + # + def get_value(object) + object.send(name) + end + + # Convenience method that sets the value of this attribute for a + # given object. It makes sense only for `:simple` or `:to_one` + # attributes. + # + # @raise It the type of this attribute is `:to_many`. + # + # @param [AbstractObject] object + # the object for which to set the value. + # + # @param [String, Hash, Array, AbstractObject] new_value + # the value to set for the attribute. + # + # @return [void] + # + def set_value(object, new_value) + if type == :to_many + raise '[Xcodeproj] Set value called for a to-many attribute' + end + object.send("#{name}=", new_value) + end + + # Convenience method that sets the value of this attribute for a given + # object to the default (if any). It makes sense only for `:simple` + # attributes. + # + # @param [AbstractObject] object + # the object for which to set the default value. + # + # @note It is extremely important to duplicate the default values + # otherwise kittens cry! + # + # @return [void] + # + def set_default(object) + unless type == :simple + raise "[Xcodeproj] Set value called for a #{type} attribute" + end + set_value(object, default_value.dup) if default_value + end + + # Checks that a given value is compatible with the attribute. + # + # This method powers the runtime type checking of the {AbstractObject} + # and is used its by synthesised methods. + # + # @raise If the class of the value is not compatible with the attribute. + # + # @return [void] + # + def validate_value(object) + return unless object + acceptable = classes.find { |klass| object.class == klass || object.class < klass } + if type == :simple + raise "[Xcodeproj] Type checking error: got `#{object.class}` " \ + "for attribute: #{inspect}" unless acceptable + else + raise "[Xcodeproj] Type checking error: got `#{object.isa}` for " \ + "attribute: #{inspect}" unless acceptable + end + end + + # Checks that a given value is compatible with a key for attributes + # which store references by key. + # + # This method is used by the #{ObjectDictionary} class. + # + # @raise If the class of the value is not compatible with the given + # key. + # + def validate_value_for_key(object, key) + unless type == :references_by_keys + raise '[Xcodeproj] This method should be called only for ' \ + 'attributes of type `references_by_keys`' + end + + unless classes_by_key.keys.include?(key) + raise "[Xcodeproj] unsupported key `#{key}` " \ + "(accepted `#{classes_by_key.keys}`) for attribute `#{inspect}`" + end + + return unless object + classes = Array(classes_by_key[key]) + acceptable = classes.find { |klass| object.class == klass || object.class < klass } + unless acceptable + raise "[Xcodeproj] Type checking error: got `#{object.isa}` " \ + "for key `#{key}` (which accepts `#{classes}`) of " \ + "attribute: `#{inspect}`" + end + end + + # @return [String] A string suitable for debugging the object. + # + def inspect + if type == :simple + "Attribute `#{plist_name}` (type: `#{type}`, classes: " \ + "`#{classes}`, owner class: `#{owner.isa}`)" + else + "Attribute `#{plist_name}` (type: `#{type}`, classes: " \ + "`#{classes.map(&:isa)}`, owner class: `#{owner.isa}`)" + end + end + end + + class AbstractObject + # The {AbstractObject} DSL methods allow to specify with fidelity the + # underlying model of the xcodeproj document format. {AbstractObject} + # subclasses should specify their attributes through the following + # methods: + # + # - `{AbstractObject.attribute}` + # - `{AbstractObject.has_one}` + # - `{AbstractObject.has_many}` + # + # @note The subclasses should not interfere with the methods + # synthesised by the DSL and should only implement helpers in top + # of them. + # + # @note Attributes are typed and are validated at runtime. + # + class << self + # @return [Array<AbstractObjectAttribute>] the attributes associated + # with the class. + # + # @note It includes the attributes defined in the superclass and the + # list is cleaned for duplicates. Subclasses should not duplicate + # an attribute of the superclass but for the method implementation + # they will duplicate them. + # + # @visibility private + # + def attributes + unless @full_attributes + attributes = @attributes || [] + if superclass.respond_to?(:attributes) + super_attributes = superclass.attributes + else + super_attributes = [] + end + # The uniqueness of the attributes is very important because the + # initialization from plist deletes the values from the + # dictionary. + @full_attributes = attributes.concat(super_attributes).uniq + end + @full_attributes + end + + # @return [Array<AbstractObjectAttribute>] the simple attributes + # associated with with the class. + # + # @visibility private + # + def simple_attributes + @simple_attributes ||= attributes.select { |a| a.type == :simple } + end + + # @return [Array<AbstractObjectAttribute>] the attributes + # representing a to one relationship associated with with the + # class. + # + # @visibility private + # + def to_one_attributes + @to_one_attributes ||= attributes.select { |a| a.type == :to_one } + end + + # @return [Array<AbstractObjectAttribute>] the attributes + # representing a to many relationship associated with with the + # class. + # + # @visibility private + # + def to_many_attributes + @to_many_attributes ||= attributes.select { |a| a.type == :to_many } + end + + # @visibility private + # + def references_by_keys_attributes + @references_by_keys_attributes ||= attributes.select { |a| a.type == :references_by_keys } + end + + private + + # Defines a new simple attribute and synthesises the corresponding + # methods. + # + # @note Simple attributes are directly stored in a hash. They can + # contain only a string, array of strings or a hash containing + # strings and thus they are not affected by reference counting. + # Clients can access the hash directly through the + # {AbstractObject#simple_attributes_hash} method. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Class] klass + # the accepted {Class} for the values of the attribute. + # + # @param [String, Array<String>, Hash{String=>String}] default_value + # the default value for new objects. + # + # @example + # attribute :project_root + # #=> leads to the creation of the following methods + # + # def project_root + # @simple_attributes_hash[projectRoot] + # end + # + # def project_root=(value) + # attribute.validate_value(value) + # @simple_attributes_hash[projectRoot] = value + # end + # + # @macro [attach] attribute + # @!attribute [rw] $1 + # + def attribute(name, klass, default_value = nil) + attrb = AbstractObjectAttribute.new(:simple, name, self) + attrb.classes = [klass] + attrb.default_value = default_value + add_attribute(attrb) + + define_method(attrb.name) do + @simple_attributes_hash ||= {} + @simple_attributes_hash[attrb.plist_name] + end + + define_method("#{attrb.name}=") do |value| + @simple_attributes_hash ||= {} + attrb.validate_value(value) + + existing = @simple_attributes_hash[attrb.plist_name] + if existing.is_a?(Hash) && value.is_a?(Hash) + return value if existing.keys == value.keys && existing == value + elsif existing == value + return value + end + mark_project_as_dirty! + @simple_attributes_hash[attrb.plist_name] = value + end + end + + # rubocop:disable Style/PredicateName + + # Defines a new relationship to a single and synthesises the + # corresponding methods. + # + # @note The synthesised setter takes care of handling reference + # counting directly. + # + # @param [String] singular_name + # the name of the relationship. + # + # @param [Class, Array<Class>] isas + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_one + # @!attribute [rw] $1 + # + def has_one(singular_name, isas) + isas = [isas] unless isas.is_a?(Array) + attrb = AbstractObjectAttribute.new(:to_one, singular_name, self) + attrb.classes = isas + add_attribute(attrb) + + attr_reader(attrb.name) + # 1.9.2 fix, see https://github.com/CocoaPods/Xcodeproj/issues/40. + public(attrb.name) + + variable_name = :"@#{attrb.name}" + define_method("#{attrb.name}=") do |value| + attrb.validate_value(value) + + previous_value = send(attrb.name) + return value if previous_value == value + mark_project_as_dirty! + previous_value.remove_referrer(self) if previous_value + instance_variable_set(variable_name, value) + value.add_referrer(self) if value + end + end + + # Defines a new ordered relationship to many. + # + # @note This attribute only generates the reader method. Clients are + # not supposed to create {ObjectList} objects which are created + # by the methods synthesised by this attribute on demand. + # Clients, however can mutate the list according to its + # interface. The list is responsible to manage the reference + # counting for its values. + # + # @param [String] plural_name + # the name of the relationship. + # + # @param [Class, Array<Class>] isas + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_many + # @!attribute [r] $1 + # + def has_many(plural_name, isas) + isas = [isas] unless isas.is_a?(Array) + + attrb = AbstractObjectAttribute.new(:to_many, plural_name, self) + attrb.classes = isas + add_attribute(attrb) + + variable_name = :"@#{attrb.name}" + define_method(attrb.name) do + # Here we are in the context of the instance + list = instance_variable_get(variable_name) + unless list + list = ObjectList.new(attrb, self) + instance_variable_set(variable_name, list) + end + list + end + end + + # Defines a new ordered relationship to many. + # + # @note This attribute only generates the reader method. Clients are + # not supposed to create {ObjectList} objects which are created + # by the methods synthesised by this attribute on demand. + # Clients, however can mutate the list according to its + # interface. The list is responsible to manage the reference + # counting for its values. + # + # @param [String] plural_name + # the name of the relationship. + # + # @param [{Symbol, Array<Class>}] classes_by_key + # the list of the classes corresponding to the accepted isas for + # this relationship. + # + # @macro [attach] has_many + # @!attribute [r] $1 + # + def has_many_references_by_keys(plural_name, classes_by_key) + attrb = AbstractObjectAttribute.new(:references_by_keys, plural_name, self) + attrb.classes = classes_by_key.values + attrb.classes_by_key = classes_by_key + add_attribute(attrb) + + variable_name = :"@#{attrb.name}" + define_method(attrb.name) do + # Here we are in the context of the instance + list = instance_variable_get(variable_name) + unless list + list = ObjectList.new(attrb, self) + instance_variable_set(variable_name, list) + end + list + end + end + + # rubocop:enable Style/PredicateName + + protected + + # Adds an attribute to the list of attributes of the class. + # + # @note This method is intended to be invoked only by the + # {AbstractObject} meta programming methods + # + # @return [void] + # + def add_attribute(attribute) + unless attribute.classes + raise "[Xcodeproj] BUG - missing classes for #{attribute.inspect}" + end + + unless attribute.classes.all? { |klass| klass.is_a?(Class) } + raise "[Xcodeproj] BUG - classes:#{attribute.classes} for #{attribute.inspect}" + end + + @attributes ||= [] + @attributes << attribute + end + end # AbstractObject << self + + private + + # @return [Hash] the simple attributes hash. + # + attr_reader :simple_attributes_hash + + public + + # @!group xcodeproj format attributes + + # @return (see AbstractObject.attributes) + # + # @visibility private + # + def attributes + self.class.attributes + end + + # @return (see AbstractObject.simple_attributes) + # + # @visibility private + # + def simple_attributes + self.class.simple_attributes + end + + # @return (see AbstractObject.to_one_attributes) + # + # @visibility private + # + def to_one_attributes + self.class.to_one_attributes + end + + # @return (see AbstractObject.to_many_attributes) + # + # @visibility private + # + def to_many_attributes + self.class.to_many_attributes + end + + # @return (see AbstractObject.to_many_attributes) + # + # @visibility private + # + def references_by_keys_attributes + self.class.references_by_keys_attributes + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_dictionary.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_dictionary.rb new file mode 100644 index 0000000..f664057 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_dictionary.rb @@ -0,0 +1,210 @@ +module Xcodeproj + class Project + # This class represents relationships to other objects stored in a + # Dictionary. + # + # It works in conjunction with the {AbstractObject} class to ensure that + # the project is not serialized with unreachable objects by updating the + # with reference count on modifications. + # + # @note To provide full support as the other classes the dictionary should: + # + # Give the following attribute: + # + # has_many_references_by_keys :project_references, { + # :project_ref => PBXFileReference, + # :product_group => PBXGroup + # } + # + # This should be possible: + # + # #=> Note the API: + # root_object.project_references.project_ref = file + # + # #=> This should raise: + # root_object.project_references.product_group = file + # + # I.e. generate setters and getters from the specification hash. + # + # Also the interface is a dirty hybrid between the + # {AbstractObjectAttribute} and the {ObjectList}. + # + # @note Concerning the mutations methods it is safe to call only those + # which are overridden to inform objects reference count. Ideally all + # the hash methods should be covered, but this is not done yet. + # Moreover it is a moving target because the methods of array + # usually are implemented in C. + # + # @todo This class should use a {Hash} as a backing store instead of + # inheriting from it. This would prevent the usage of methods which + # don't notify the objects. + # + class ObjectDictionary < Hash + # @param [Object::AbstractObjectAttribute] attribute @see #attribute + # @param [Object] owner @see #owner + # + def initialize(attribute, owner) + @attribute = attribute + @owner = owner + end + + # @return [Object::AbstractObjectAttribute] The attribute that generated + # the list. + # + attr_reader :attribute + + # @return [Object] The object that owns the list. + # + attr_reader :owner + + # @return [Array<Symbol>] The list of the allowed keys. + # + def allowed_keys + attribute.classes_by_key.keys + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<ObjectDictionary attribute:`#{@attribute.name}` " \ + "owner:`#{@owner.display_name}` values:#{super.inspect}>" + end + + # @!group Notification enabled methods + #------------------------------------------------------------------------# + + # Associates an object to the given key and updates its references count. + # + # @param [String] key + # The key. + # + # @param [AbstractObject] object + # The object to add to the dictionary. + # + # @return [AbstractObject] The given object. + # + def []=(key, object) + key = normalize_key(key) + if object + perform_additions_operations(object, key) + else + perform_deletion_operations(self[key]) + end + super(key, object) + end + + # Removes the given key from the dictionary and informs the object that + # is not longer referenced by the owner. + # + # @param [String] key + # The key. + # + def delete(key) + key = normalize_key(key) + object = self[key] + perform_deletion_operations(object) + super + end + + # @!group AbstractObject Methods + #-----------------------------------------------------------------------# + + # @return [Hash<String => String>] The plist representation of the + # dictionary where the objects are replaced by their UUIDs. + # + def to_hash + result = {} + each do |key, obj| + if obj + plist_key = Object::CaseConverter.convert_to_plist(key, nil) + result[plist_key] = Nanaimo::String.new(obj.uuid, obj.ascii_plist_annotation) + end + end + result + end + + def to_ascii_plist + to_hash + end + + # @return [Hash<String => String>] Returns a cascade representation of + # the object without UUIDs. + # + def to_tree_hash + result = {} + each do |key, obj| + if obj + plist_key = Object::CaseConverter.convert_to_plist(key, nil) + result[plist_key] = obj.to_tree_hash + end + end + result + end + + # Removes all the references to a given object. + # + def remove_reference(object) + each { |key, obj| self[key] = nil if obj == object } + end + + # Informs the objects contained in the dictionary that another object is + # referencing them. + # + def add_referrer(referrer) + values.each { |obj| obj.add_referrer(referrer) } + end + + # Informs the objects contained in the dictionary that another object + # stopped referencing them. + # + def remove_referrer(referrer) + values.each { |obj| obj.remove_referrer(referrer) } + end + + private + + # @!group Private helpers + #------------------------------------------------------------------------# + + # @return [Symbol] Normalizes a key to a symbol converting the camel case + # format with underscores. + # + # @param [String, Symbol] key + # The key to normalize. + # + def normalize_key(key) + if key.is_a?(String) + key = Object::CaseConverter.convert_to_ruby(key) + end + + unless allowed_keys.include?(key) + raise "[Xcodeproj] Unsupported key `#{key}` (allowed " \ + "`#{allowed_keys}`) for `#{inspect}`" + end + key + end + + # Informs an object that it was added to the dictionary. In practice it + # adds the owner of the list as referrer to the objects. It also + # validates the value. + # + # @return [void] + # + def perform_additions_operations(object, key) + owner.mark_project_as_dirty! + object.add_referrer(owner) + attribute.validate_value_for_key(object, key) + end + + # Informs an object that it was removed from to the dictionary, so it can + # remove it from its referrers and take the appropriate actions. + # + # @return [void] + # + def perform_deletion_operations(objects) + owner.mark_project_as_dirty! + objects.remove_referrer(owner) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_list.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_list.rb new file mode 100644 index 0000000..e1df1b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/object_list.rb @@ -0,0 +1,223 @@ +module Xcodeproj + class Project + # This class represents an ordered relationship to many objects. + # + # It works in conjunction with the {AbstractObject} class to ensure that + # the project is not serialized with unreachable objects by updating the + # with reference count on modifications. + # + # @note Concerning the mutations methods it is safe to call only those + # which are overridden to inform objects reference count. Ideally all + # the array methods should be covered, but this is not done yet. + # Moreover it is a moving target because the methods of array + # usually are implemented in C + # + # @todo Cover all the mutations methods of the {Array} class. + # + class ObjectList < Array + # {Xcodeproj} clients are not expected to create instances of + # {ObjectList}, it is always initialized empty and automatically by the + # synthesized methods generated by {AbstractObject.has_many}. + # + def initialize(attribute, owner) + @attribute = attribute + @owner = owner + end + + # @return [Array<Class>] the attribute that generated the list. + # + attr_reader :attribute + + # @return [Array<Class>] the object that owns the list. + # + attr_reader :owner + + # @return [Array<String>] + # the UUIDs of all the objects referenced by this list. + # + def uuids + map(&:uuid) + end + + # @return [Array<AbstractObject>] + # a new array generated with the objects contained in the list. + # + def objects + to_a + end + + public + + # @!group Notification enabled methods + #------------------------------------------------------------------------# + + # TODO: the overridden methods are incomplete. + + # Adds an array of objects to list and updates their references count. + # + # @param [Array<AbstractObject, ObjectDictionary>] objects + # an array of objects to add to the list. + # + # @return [void] + # + def +(other) + perform_additions_operations(other) + super + end + + # Appends an object to list the and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def <<(object) + perform_additions_operations(object) + super + end + + # Adds an object to the given index of the list the and updates its + # references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def insert(index, object) + perform_additions_operations(object) + super + end + + # Prepends an object to the list and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to add to the list. + # + # @return [void] + # + def unshift(object) + perform_additions_operations(object) + super + end + + # Removes an object to list and updates its references count. + # + # @param [AbstractObject, ObjectDictionary] object + # the object to delete from the list. + # + # @return [AbstractObject, ObjectDictionary, Nil] the object if found. + # + def delete(object) + perform_deletion_operations(object) + super + end + + # Removes the object at the given index from the list and updates its + # references count. + # + # @param [Fixnum] from + # The index of the object. + # + # @return [AbstractObject, ObjectDictionary, Nil] the object if found. + # + def delete_at(index) + object = at(index) + perform_deletion_operations(object) + super + end + + # Removes all the objects contained in the list and updates their + # reference counts. + # + # @return [void] + # + def clear + objects.each do |object| + perform_deletion_operations(object) + end + super + end + + # Moves the given object to the given index. + # + # @param [AbstractObject, ObjectDictionary] object + # The object to move. + # + # @param [Fixnum] to + # The new index for the object. + # + # @return [void] + # + def move(object, new_index) + return if index(object) == new_index + if obj = delete(object) + insert(new_index, obj) + else + raise "Attempt to move object `#{object}` not present in the list `#{inspect}`" + end + end + + # Moves the object at the given index to the given position. + # + # @param [Fixnum] from + # The current index of the object. + # + # @param [Fixnum] to + # The new index for the object. + # + # @return [void] + # + def move_from(current_index, new_index) + return if current_index == new_index + if obj = delete_at(current_index) + insert(new_index, obj) + else + raise "Attempt to move object from index `#{current_index}` which is beyond bounds of the list `#{inspect}`" + end + end + + def sort! + return super if owner.project.dirty? + previous = to_a + super + owner.mark_project_as_dirty! unless previous == to_a + self + end + + private + + # @!group Notification Methods + #------------------------------------------------------------------------# + + # Informs an object that it was added to the list. In practice it adds + # the owner of the list as referrer to the objects. It also validates the + # value. + # + # @return [void] + # + def perform_additions_operations(objects) + objects = [objects] unless objects.is_a?(Array) + objects.each do |obj| + owner.mark_project_as_dirty! + obj.add_referrer(owner) + attribute.validate_value(obj) unless obj.is_a?(ObjectDictionary) + end + end + + # Informs an object that it was removed from to the list, so it can + # remove its owner from its referrers and take the appropriate actions. + # + # @return [void] + # + def perform_deletion_operations(objects) + objects = [objects] unless objects.is_a?(Array) + objects.each do |obj| + owner.mark_project_as_dirty! + obj.remove_referrer(owner) unless obj.is_a?(ObjectDictionary) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/project_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/project_helper.rb new file mode 100644 index 0000000..7b4eef9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/project_helper.rb @@ -0,0 +1,341 @@ +module Xcodeproj + class Project + module ProjectHelper + include Object + + # @!group Targets + + #-----------------------------------------------------------------------# + + # Creates a new target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings, and the + # appropriate Framework according to the platform is added to to its + # Frameworks phase. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [Symbol] type + # the type of target. Can be `:application`, `:dynamic_library`, + # `framework` or `:static_library`. + # + # @param [String] name + # the name of the target product. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [PBXNativeTarget] the target. + # + def self.new_target(project, type, name, platform, deployment_target, product_group, language, product_basename) + # Target + target = project.new(PBXNativeTarget) + project.targets << target + target.name = name + target.product_name = product_basename + target.product_type = Constants::PRODUCT_TYPE_UTI[type] + target.build_configuration_list = configuration_list(project, platform, deployment_target, type, language) + + # Product + product = product_group.new_product_ref_for_target(target.product_name, type) + target.product_reference = product + + # Build phases + build_phases_for_target_type(type).each { |phase| target.build_phases << project.new(phase) } + + # Frameworks + unless type == :static_library + framework_name = (platform == :osx) ? 'Cocoa' : 'Foundation' + target.add_system_framework(framework_name) + end + + target + end + + # Creates a new resource bundles target and adds it to the project. + # + # The target is configured for the given platform and its file reference it + # is added to the {products_group}. + # + # The target is pre-populated with common build settings + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the resources bundle. + # + # @param [Symbol] platform + # the platform of the resources bundle. Can be `:ios` or `:osx`. + # + # @param [PBXGroup] product_group + # the product group, where to add to a file reference of the + # created target. + # + # @return [PBXNativeTarget] the target. + # + def self.new_resources_bundle(project, name, platform, product_group, product_basename) + # Target + target = project.new(PBXNativeTarget) + project.targets << target + target.name = name + target.product_name = product_basename + target.product_type = Constants::PRODUCT_TYPE_UTI[:bundle] + + # Configuration List + cl = project.new(XCConfigurationList) + cl.default_configuration_is_visible = '0' + cl.default_configuration_name = 'Release' + release_conf = project.new(XCBuildConfiguration) + release_conf.name = 'Release' + release_conf.build_settings = common_build_settings(nil, platform, nil, target.product_type) + debug_conf = project.new(XCBuildConfiguration) + debug_conf.name = 'Debug' + debug_conf.build_settings = common_build_settings(nil, platform, nil, target.product_type) + cl.build_configurations << release_conf + cl.build_configurations << debug_conf + target.build_configuration_list = cl + + # Product + product = product_group.new_bundle(target.product_name) + target.product_reference = product + + # Build phases + build_phases_for_target_type(:bundle).each { |phase| target.build_phases << project.new(phase) } + + target + end + + # Creates a new aggregate target and adds it to the project. + # + # The target is configured for the given platform. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the aggregate target. + # + # @param [Symbol] platform + # the platform of the aggregate target. Can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @return [PBXAggregateTarget] the target. + # + def self.new_aggregate_target(project, name, platform, deployment_target) + target = project.new(PBXAggregateTarget) + project.targets << target + target.name = name + target.build_configuration_list = configuration_list(project, platform, deployment_target) + target + end + + # Creates a new legacy target and adds it to the project. + # + # The target is configured for the given platform. + # + # @param [Project] project + # the project to which the target should be added. + # + # @param [String] name + # the name of the aggregate target. + # + # @param [String] build_tool_path + # the build tool path to use for this target. + # + # @param [String] build_arguments_string + # the build arguments string to use for this target. + # + # @param [String] build_working_directory + # the build working directory to use for this target. + # + # @param [String] pass_build_settings_in_environment + # whether to pass build settings in the environment during execution of this target. + # + # @return [PBXLegacyTarget] the target. + # + def self.new_legacy_target(project, name, build_tool_path = '/usr/bin/make', build_arguments_string = '$(ACTION)', + build_working_directory = nil, pass_build_settings_in_environment = '1') + target = project.new(PBXLegacyTarget) + project.targets << target + target.name = name + target.build_configuration_list = configuration_list(project) + target.build_tool_path = build_tool_path + target.build_arguments_string = build_arguments_string + target.build_working_directory = build_working_directory + target.pass_build_settings_in_environment = pass_build_settings_in_environment + target + end + + # @!group Private Helpers + + #-----------------------------------------------------------------------# + + # Returns a new configuration list, populated with release and debug + # configurations with common build settings for the given platform. + # + # @param [Project] project + # the project to which the configuration list should be added. + # + # @param [Symbol] platform + # the platform for the configuration list, can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [Symbol] target_product_type + # the product type of the target, can be any of `Constants::PRODUCT_TYPE_UTI.values` + # or `Constants::PRODUCT_TYPE_UTI.keys`. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [XCConfigurationList] the generated configuration list. + # + def self.configuration_list(project, platform = nil, deployment_target = nil, target_product_type = nil, language = nil) + cl = project.new(XCConfigurationList) + cl.default_configuration_is_visible = '0' + cl.default_configuration_name = 'Release' + + release_conf = project.new(XCBuildConfiguration) + release_conf.name = 'Release' + release_conf.build_settings = common_build_settings(:release, platform, deployment_target, target_product_type, language) + + debug_conf = project.new(XCBuildConfiguration) + debug_conf.name = 'Debug' + debug_conf.build_settings = common_build_settings(:debug, platform, deployment_target, target_product_type, language) + + cl.build_configurations << release_conf + cl.build_configurations << debug_conf + + existing_configurations = cl.build_configurations.map(&:name) + project.build_configurations.each do |configuration| + next if existing_configurations.include?(configuration.name) + + new_config = project.new(XCBuildConfiguration) + new_config.name = configuration.name + new_config.build_settings = common_build_settings(configuration.type, platform, deployment_target, target_product_type, language) + cl.build_configurations << new_config + end + + cl + end + + # Returns the common build settings for a given platform and configuration + # name. + # + # @param [Symbol] type + # the type of the build configuration, can be `:release` or + # `:debug`. + # + # @param [Symbol] platform + # the platform for the build settings, can be `:ios` or `:osx`. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [Symbol] target_product_type + # the product type of the target, can be any of + # `Constants::PRODUCT_TYPE_UTI.values` + # or `Constants::PRODUCT_TYPE_UTI.keys`. Default is :application. + # + # @param [Symbol] language + # the primary language of the target, can be `:objc` or `:swift`. + # + # @return [Hash] The common build settings + # + def self.common_build_settings(type, platform = nil, deployment_target = nil, target_product_type = nil, language = :objc) + target_product_type = (Constants::PRODUCT_TYPE_UTI.find { |_, v| v == target_product_type } || [target_product_type || :application])[0] + common_settings = Constants::COMMON_BUILD_SETTINGS + + # Use intersecting settings for all key sets as base + settings = deep_dup(common_settings[:all]) + + # Match further common settings by key sets + keys = [type, platform, target_product_type, language].compact + key_combinations = (1..keys.length).flat_map { |n| keys.combination(n).to_a } + key_combinations.each do |key_combination| + settings.merge!(deep_dup(common_settings[key_combination] || {})) + end + + if deployment_target + case platform + when :ios + settings['IPHONEOS_DEPLOYMENT_TARGET'] = deployment_target + settings['CLANG_ENABLE_OBJC_WEAK'] = 'NO' if deployment_target < '5' + when :osx + settings['MACOSX_DEPLOYMENT_TARGET'] = deployment_target + settings['CLANG_ENABLE_OBJC_WEAK'] = 'NO' if deployment_target < '10.7' + when :tvos + settings['TVOS_DEPLOYMENT_TARGET'] = deployment_target + when :watchos + settings['WATCHOS_DEPLOYMENT_TARGET'] = deployment_target + end + end + + settings + end + + # Creates a deep copy of the given object + # + # @param [Object] object + # the object to copy. + # + # @return [Object] The deep copy of the object. + # + def self.deep_dup(object) + case object + when Hash + new_hash = {} + object.each do |key, value| + new_hash[key] = deep_dup(value) + end + new_hash + when Array + object.map { |value| deep_dup(value) } + else + object.dup + end + end + + # Returns the build phases, in order, that appear by default + # on a target of the given type. + # + # @param [Symbol] type + # the name of the target type. + # + # @return [Array<String>] The list of build phase class names for the target type. + # + def self.build_phases_for_target_type(type) + case type + when :static_library, :dynamic_library + %w(Headers Sources Frameworks) + when :framework + %w(Headers Sources Frameworks Resources) + when :command_line_tool + %w(Sources Frameworks) + else + %w(Sources Frameworks Resources) + end.map { |phase| "PBX#{phase}BuildPhase" } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/uuid_generator.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/uuid_generator.rb new file mode 100644 index 0000000..f47da43 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/project/uuid_generator.rb @@ -0,0 +1,132 @@ +module Xcodeproj + class Project + class UUIDGenerator + require 'digest' + + def initialize(projects) + @projects = Array(projects) + @paths_by_object = {} + end + + def generate! + generate_all_paths_by_objects(@projects) + + new_objects_by_project = Hash[@projects.map do |project| + [project, switch_uuids(project)] + end] + all_new_objects_by_project = new_objects_by_project.values.flat_map(&:values) + all_objects_by_uuid = @projects.map(&:objects_by_uuid).inject(:merge) + all_objects = @projects.flat_map(&:objects) + verify_no_duplicates!(all_objects, all_new_objects_by_project) + @projects.each { |project| fixup_uuid_references(project, all_objects_by_uuid) } + new_objects_by_project.each do |project, new_objects_by_uuid| + project.instance_variable_set(:@generated_uuids, project.instance_variable_get(:@available_uuids)) + project.instance_variable_set(:@objects_by_uuid, new_objects_by_uuid) + end + end + + private + + UUID_ATTRIBUTES = [:remote_global_id_string, :container_portal, :target_proxy].freeze + + def verify_no_duplicates!(all_objects, all_new_objects) + duplicates = all_objects - all_new_objects + UserInterface.warn "[Xcodeproj] Generated duplicate UUIDs:\n\n" << + duplicates.map { |d| "#{d.isa} -- #{@paths_by_object[d]}" }.join("\n") unless duplicates.empty? + end + + def fixup_uuid_references(target_project, all_objects_by_uuid) + fixup = ->(object, attr) do + if object.respond_to?(attr) && link = all_objects_by_uuid[object.send(attr)] + object.send(:"#{attr}=", link.uuid) + end + end + target_project.objects.each do |object| + UUID_ATTRIBUTES.each do |attr| + fixup[object, attr] + end + end + + if (project_attributes = target_project.root_object.attributes) && project_attributes['TargetAttributes'] + project_attributes['TargetAttributes'] = Hash[project_attributes['TargetAttributes'].map do |target_uuid, attributes| + if test_target_id = attributes['TestTargetID'] + attributes = attributes.merge('TestTargetID' => all_objects_by_uuid[test_target_id].uuid) + end + if target_object = all_objects_by_uuid[target_uuid] + target_uuid = target_object.uuid + end + [target_uuid, attributes] + end] + end + end + + def generate_all_paths_by_objects(projects) + projects.each { |project| generate_paths(project.root_object, project.path.basename.to_s) } + end + + def generate_paths(object, path = '') + existing = @paths_by_object[object] || path + return existing if @paths_by_object.key?(object) + @paths_by_object[object] = path.size > existing.size ? path : existing + + object.to_one_attributes.each do |attrb| + obj = attrb.get_value(object) + generate_paths(obj, path + '/' << attrb.plist_name) if obj + end + + object.to_many_attributes.each do |attrb| + attrb.get_value(object).each do |o| + generate_paths(o, path + '/' << attrb.plist_name << "/#{path_component_for_object(o)}") + end + end + + object.references_by_keys_attributes.each do |attrb| + attrb.get_value(object).each do |dictionary| + dictionary.each do |key, value| + generate_paths(value, path + '/' << attrb.plist_name << "/k:#{key}/#{path_component_for_object(value)}") + end + end + end + end + + def switch_uuids(project) + project.mark_dirty! + project.objects.each_with_object({}) do |object, hash| + next unless path = @paths_by_object[object] + uuid = uuid_for_path(path) + object.instance_variable_set(:@uuid, uuid) + hash[uuid] = object + end + end + + def uuid_for_path(path) + Digest::MD5.hexdigest(path).upcase + end + + def path_component_for_object(object) + @path_component_for_object ||= Hash.new do |cache, key| + component = tree_hash_to_path(key.to_tree_hash) + component << key.hierarchy_path.to_s if key.respond_to?(:hierarchy_path) + cache[key] = component + end + @path_component_for_object[object] + end + + def tree_hash_to_path(object, depth = 4) + return '|' if depth.zero? + case object + when Hash + object.sort.each_with_object('') do |(key, value), string| + string << key << ':' << tree_hash_to_path(value, depth - 1) << ',' + end + when Array + object.map do |value| + tree_hash_to_path(value, depth - 1) + end.join(',') + else + object.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme.rb new file mode 100644 index 0000000..e855ad1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme.rb @@ -0,0 +1,375 @@ +require 'rexml/document' + +require 'xcodeproj/scheme/build_action' +require 'xcodeproj/scheme/test_action' +require 'xcodeproj/scheme/launch_action' +require 'xcodeproj/scheme/profile_action' +require 'xcodeproj/scheme/analyze_action' +require 'xcodeproj/scheme/archive_action' + +require 'xcodeproj/scheme/buildable_product_runnable' +require 'xcodeproj/scheme/buildable_reference' +require 'xcodeproj/scheme/location_scenario_reference' +require 'xcodeproj/scheme/execution_action' +require 'xcodeproj/scheme/macro_expansion' +require 'xcodeproj/scheme/remote_runnable' +require 'xcodeproj/scheme/send_email_action_content' +require 'xcodeproj/scheme/shell_script_action_content' + +module Xcodeproj + # This class represents a Scheme document represented by a ".xcscheme" file + # usually stored in a xcuserdata or xcshareddata (for a shared scheme) + # folder. + # + class XCScheme + # @return [REXML::Document] the XML object that will be manipulated to save + # the scheme file after. + # + attr_reader :doc + + # Create a XCScheme either from scratch or using an existing file + # + # @param [String] file_path + # The path of the existing .xcscheme file. If nil will create an empty scheme + # + def initialize(file_path = nil) + if file_path + @file_path = file_path + @doc = File.open(file_path, 'r') do |f| + REXML::Document.new(f) + end + @doc.context[:attribute_quote] = :quote + + @scheme = @doc.elements['Scheme'] + else + @doc = REXML::Document.new + @doc.context[:attribute_quote] = :quote + @doc << REXML::XMLDecl.new(REXML::XMLDecl::DEFAULT_VERSION, 'UTF-8') + + @scheme = @doc.add_element 'Scheme' + @scheme.attributes['LastUpgradeVersion'] = Constants::LAST_UPGRADE_CHECK + @scheme.attributes['version'] = Xcodeproj::Constants::XCSCHEME_FORMAT_VERSION + + self.build_action = BuildAction.new + self.test_action = TestAction.new + self.launch_action = LaunchAction.new + self.profile_action = ProfileAction.new + self.analyze_action = AnalyzeAction.new + self.archive_action = ArchiveAction.new + end + end + + # Convenience method to quickly add app and test targets to a new scheme. + # + # It will add the runnable_target to the Build, Launch and Profile actions + # and the test_target to the Build and Test actions + # + # @param [Xcodeproj::Project::Object::PBXAbstractTarget] runnable_target + # The target to use for the 'Run', 'Profile' and 'Analyze' actions + # + # @param [Xcodeproj::Project::Object::PBXAbstractTarget] test_target + # The target to use for the 'Test' action + # + # @param [Boolean] launch_target + # Determines if the runnable_target is launchable. + # + def configure_with_targets(runnable_target, test_target, launch_target: false) + if runnable_target + add_build_target(runnable_target) + set_launch_target(runnable_target) if launch_target + end + if test_target + add_build_target(test_target, false) if test_target != runnable_target + add_test_target(test_target) + end + end + + public + + # @!group Access Action nodes + + # @return [XCScheme::BuildAction] + # The Build Action associated with this scheme + # + def build_action + @build_action ||= BuildAction.new(@scheme.elements['BuildAction']) + end + + # @param [XCScheme::BuildAction] action + # The Build Action to associate to this scheme + # + def build_action=(action) + @scheme.delete_element('BuildAction') + @scheme.add_element(action.xml_element) + @build_action = action + end + + # @return [XCScheme::TestAction] + # The Test Action associated with this scheme + # + def test_action + @test_action ||= TestAction.new(@scheme.elements['TestAction']) + end + + # @param [XCScheme::TestAction] action + # The Test Action to associate to this scheme + # + def test_action=(action) + @scheme.delete_element('TestAction') + @scheme.add_element(action.xml_element) + @test_action = action + end + + # @return [XCScheme::LaunchAction] + # The Launch Action associated with this scheme + # + def launch_action + @launch_action ||= LaunchAction.new(@scheme.elements['LaunchAction']) + end + + # @param [XCScheme::LaunchAction] action + # The Launch Action to associate to this scheme + # + def launch_action=(action) + @scheme.delete_element('LaunchAction') + @scheme.add_element(action.xml_element) + @launch_action = action + end + + # @return [XCScheme::ProfileAction] + # The Profile Action associated with this scheme + # + def profile_action + @profile_action ||= ProfileAction.new(@scheme.elements['ProfileAction']) + end + + # @param [XCScheme::ProfileAction] action + # The Profile Action to associate to this scheme + # + def profile_action=(action) + @scheme.delete_element('ProfileAction') + @scheme.add_element(action.xml_element) + @profile_action = action + end + + # @return [XCScheme::AnalyzeAction] + # The Analyze Action associated with this scheme + # + def analyze_action + @analyze_action ||= AnalyzeAction.new(@scheme.elements['AnalyzeAction']) + end + + # @param [XCScheme::AnalyzeAction] action + # The Analyze Action to associate to this scheme + # + def analyze_action=(action) + @scheme.delete_element('AnalyzeAction') + @scheme.add_element(action.xml_element) + @analyze_action = action + end + + # @return [XCScheme::ArchiveAction] + # The Archive Action associated with this scheme + # + def archive_action + @archive_action ||= ArchiveAction.new(@scheme.elements['ArchiveAction']) + end + + # @param [XCScheme::ArchiveAction] action + # The Archive Action to associate to this scheme + # + def archive_action=(action) + @scheme.delete_element('ArchiveAction') + @scheme.add_element(action.xml_element) + @archive_action = action + end + + # @!group Target methods + + # Add a target to the list of targets to build in the build action. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] build_target + # A target used by scheme in the build step. + # + # @param [Bool] build_for_running + # Whether to build this target in the launch action. Often false for test targets. + # + def add_build_target(build_target, build_for_running = true) + entry = BuildAction::Entry.new(build_target) + + entry.build_for_testing = true + entry.build_for_running = build_for_running + entry.build_for_profiling = build_for_running + entry.build_for_archiving = build_for_running + entry.build_for_analyzing = build_for_running + + build_action.add_entry(entry) + end + + # Add a target to the list of targets to build in the build action. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] test_target + # A target used by scheme in the test step. + # + def add_test_target(test_target) + testable = TestAction::TestableReference.new(test_target) + test_action.add_testable(testable) + end + + # Sets a runnable target to be the target of the launch action of the scheme. + # + # @param [Xcodeproj::Project::Object::AbstractTarget] build_target + # A target used by scheme in the launch step. + # + def set_launch_target(build_target) + launch_runnable = BuildableProductRunnable.new(build_target, 0) + launch_action.buildable_product_runnable = launch_runnable + + profile_runnable = BuildableProductRunnable.new(build_target, 0) + profile_action.buildable_product_runnable = profile_runnable + + macro_exp = MacroExpansion.new(build_target) + test_action.add_macro_expansion(macro_exp) + end + + # @!group Class methods + + #-------------------------------------------------------------------------# + + # Share a User Scheme. Basically this method move the xcscheme file from + # the xcuserdata folder to xcshareddata folder. + # + # @param [String] project_path + # Path of the .xcodeproj folder. + # + # @param [String] scheme_name + # The name of scheme that will be shared. + # + # @param [String] user + # The user name that have the scheme. + # + def self.share_scheme(project_path, scheme_name, user = nil) + to_folder = shared_data_dir(project_path) + to_folder.mkpath + to = to_folder + "#{scheme_name}.xcscheme" + from = user_data_dir(project_path, user) + "#{scheme_name}.xcscheme" + FileUtils.mv(from, to) + end + + # @return [Pathname] + # + def self.shared_data_dir(project_path) + project_path = Pathname.new(project_path) + project_path + 'xcshareddata/xcschemes' + end + + # @return [Pathname] + # + def self.user_data_dir(project_path, user = nil) + project_path = Pathname.new(project_path) + user ||= ENV['USER'] + project_path + "xcuserdata/#{user}.xcuserdatad/xcschemes" + end + + public + + # @!group Serialization + + #-------------------------------------------------------------------------# + + # Serializes the current state of the object to a String. + # + # @note The goal of the string representation is to match Xcode output as + # close as possible to aide comparison. + # + # @return [String] the XML string value of the current state of the object. + # + def to_s + formatter = XMLFormatter.new(2) + formatter.compact = false + out = '' + formatter.write(@doc, out) + out.gsub!("<?xml version='1.0' encoding='UTF-8'?>", '<?xml version="1.0" encoding="UTF-8"?>') + out << "\n" + out + end + + # Serializes the current state of the object to a ".xcscheme" file. + # + # @param [String, Pathname] project_path + # The path where the ".xcscheme" file should be stored. + # + # @param [String] name + # The name of the scheme, to have ".xcscheme" appended. + # + # @param [Boolean] shared + # true => if the scheme must be a shared scheme (default value) + # false => if the scheme must be a user scheme + # + # @return [void] + # + # @example Saving a scheme + # scheme.save_as('path/to/Project.xcodeproj') #=> true + # + def save_as(project_path, name, shared = true) + scheme_folder_path = if shared + self.class.shared_data_dir(project_path) + else + self.class.user_data_dir(project_path) + end + scheme_folder_path.mkpath + scheme_path = scheme_folder_path + "#{name}.xcscheme" + @file_path = scheme_path + File.open(scheme_path, 'w') do |f| + f.write(to_s) + end + end + + # Serializes the current state of the object to the original ".xcscheme" + # file this XCScheme was created from, overriding the original file. + # + # Requires that the XCScheme object was initialized using a file path. + # + def save! + raise Informative, 'This XCScheme object was not initialized ' \ + 'using a file path. Use save_as instead.' unless @file_path + File.open(@file_path, 'w') do |f| + f.write(to_s) + end + end + + #-------------------------------------------------------------------------# + + # XML formatter which closely mimics the output generated by Xcode. + # + class XMLFormatter < REXML::Formatters::Pretty + def write_element(node, output) + @indentation = 3 + output << ' ' * @level + output << "<#{node.expanded_name}" + + @level += @indentation + node.context = node.parent.context # HACK: to ensure strings are properly quoted + node.attributes.each_attribute do |attr| + output << "\n" + output << ' ' * @level + output << attr.to_string.sub(/=/, ' = ') + end unless node.attributes.empty? + + output << '>' + + output << "\n" + node.children.each do |child| + next if child.is_a?(REXML::Text) && child.to_s.strip.length == 0 + write(child, output) + output << "\n" + end + @level -= @indentation + output << ' ' * @level + output << "</#{node.expanded_name}>" + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/abstract_scheme_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/abstract_scheme_action.rb new file mode 100644 index 0000000..0b59d63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/abstract_scheme_action.rb @@ -0,0 +1,100 @@ +require 'xcodeproj/scheme/xml_element_wrapper' +require 'xcodeproj/scheme/environment_variables' +require 'xcodeproj/scheme/command_line_arguments' + +module Xcodeproj + class XCScheme + # This abstract class aims to be the base class for every XxxAction class + # that have a #build_configuration attribute + # + class AbstractSchemeAction < XMLElementWrapper + # @return [String] + # The build configuration associated with this action + # (usually either 'Debug' or 'Release') + # + def build_configuration + @xml_element.attributes['buildConfiguration'] + end + + # @param [String] config_name + # The build configuration to associate with this action + # (usually either 'Debug' or 'Release') + # + def build_configuration=(config_name) + @xml_element.attributes['buildConfiguration'] = config_name + end + + # @return [Array<ExecutionAction>] + # The list of actions to run before this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def pre_actions + pre_actions = @xml_element.elements['PreActions'] + return nil unless pre_actions + pre_actions.get_elements('ExecutionAction').map do |entry_node| + ExecutionAction.new(entry_node) + end + end + + # @param [Array<ExecutionAction>] pre_actions + # Set the list of actions to run before this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def pre_actions=(pre_actions) + @xml_element.delete_element('PreActions') + unless pre_actions.empty? + pre_actions_element = @xml_element.add_element('PreActions') + pre_actions.each do |entry_node| + pre_actions_element.add_element(entry_node.xml_element) + end + end + pre_actions + end + + # @param [ExecutionAction] pre_action + # Add an action to the list of actions to run before this scheme action. + # It can be either a 'Run Script' or a 'Send Email' action. + # + def add_pre_action(pre_action) + pre_actions = @xml_element.elements['PreActions'] || @xml_element.add_element('PreActions') + pre_actions.add_element(pre_action.xml_element) + end + + # @return [Array<ExecutionAction>] + # The list of actions to run after this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def post_actions + post_actions = @xml_element.elements['PostActions'] + return nil unless post_actions + post_actions.get_elements('ExecutionAction').map do |entry_node| + ExecutionAction.new(entry_node) + end + end + + # @param [Array<ExecutionAction>] post_actions + # Set the list of actions to run after this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def post_actions=(post_actions) + @xml_element.delete_element('PostActions') + unless post_actions.empty? + post_actions_element = @xml_element.add_element('PostActions') + post_actions.each do |entry_node| + post_actions_element.add_element(entry_node.xml_element) + end + end + post_actions + end + + # @param [ExecutionAction] post_action + # Add an action to the list of actions to run after this scheme action. + # It can be either a 'Run Script' or a 'Send Email' action. + # + def add_post_action(post_action) + post_actions = @xml_element.elements['PostActions'] || @xml_element.add_element('PostActions') + post_actions.add_element(post_action.xml_element) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/analyze_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/analyze_action.rb new file mode 100644 index 0000000..e951980 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/analyze_action.rb @@ -0,0 +1,19 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the AnalyzeAction node of a .xcscheme XML file + # + class AnalyzeAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'AnalyzeAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'AnalyzeAction') do + self.build_configuration = 'Debug' + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/archive_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/archive_action.rb new file mode 100644 index 0000000..d00581f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/archive_action.rb @@ -0,0 +1,59 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the ArchiveAction node of a .xcscheme XML file + # + class ArchiveAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'ArchiveAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ArchiveAction') do + self.build_configuration = 'Release' + self.reveal_archive_in_organizer = true + end + end + + # @return [Bool] + # Whether the Archive will be revealed in Xcode's Organizer + # after it's done building. + # + def reveal_archive_in_organizer? + string_to_bool(@xml_element.attributes['revealArchiveInOrganizer']) + end + + # @param [Bool] flag + # Set whether the Archive will be revealed in Xcode's Organizer + # after it's done building. + # + def reveal_archive_in_organizer=(flag) + @xml_element.attributes['revealArchiveInOrganizer'] = bool_to_string(flag) + end + + # @return [String] + # The custom name to give to the archive. + # If nil, the generated archive will have the same name as the one + # set in the associated target's Build Settings for the built product. + # + def custom_archive_name + @xml_element.attributes['customArchiveName'] + end + + # @param [String] name + # Set the custom name to use for the built archive + # If nil, the customization of the archive name will be removed and + # the generated archive will have the same name as the one set in the + # associated target's Build Settings for the build product. + # + def custom_archive_name=(name) + if name + @xml_element.attributes['customArchiveName'] = name + else + @xml_element.delete_attribute('customArchiveName') + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/build_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/build_action.rb new file mode 100644 index 0000000..5b67213 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/build_action.rb @@ -0,0 +1,298 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + # This class wraps the BuildAction node of a .xcscheme XML file + # + # Note: It's not a AbstractSchemeAction like the others because it is + # a special case of action (with no build_configuration, etc) + # + class BuildAction < XMLElementWrapper + # @param [REXML::Element] node + # The 'BuildAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'BuildAction') do + self.parallelize_buildables = true + self.build_implicit_dependencies = true + end + end + + # @return [Bool] + # Whether or not to run post actions on build failure + # + def run_post_actions_on_failure? + string_to_bool(@xml_element.attributes['runPostActionsOnFailure']) + end + + # @param [Bool] flag + # Set whether or not to run post actions on build failure + # + def run_post_actions_on_failure=(flag) + @xml_element.attributes['runPostActionsOnFailure'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build the various targets in parallel + # + def parallelize_buildables? + string_to_bool(@xml_element.attributes['parallelizeBuildables']) + end + + # @param [Bool] flag + # Set whether or not to build the various targets in parallel + # + def parallelize_buildables=(flag) + @xml_element.attributes['parallelizeBuildables'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to detect and build implicit dependencies for each target + # + def build_implicit_dependencies? + string_to_bool(@xml_element.attributes['buildImplicitDependencies']) + end + + # @param [Bool] flag + # Whether or not to detect and build implicit dependencies for each target + # + def build_implicit_dependencies=(flag) + @xml_element.attributes['buildImplicitDependencies'] = bool_to_string(flag) + end + + # @return [Array<ExecutionAction>] + # The list of actions to run before this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def pre_actions + pre_actions = @xml_element.elements['PreActions'] + return nil unless pre_actions + pre_actions.get_elements('ExecutionAction').map do |entry_node| + ExecutionAction.new(entry_node) + end + end + + # @param [Array<ExecutionAction>] pre_actions + # Set the list of actions to run before this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def pre_actions=(pre_actions) + @xml_element.delete_element('PreActions') + unless pre_actions.empty? + pre_actions_element = @xml_element.add_element('PreActions') + pre_actions.each do |entry_node| + pre_actions_element.add_element(entry_node.xml_element) + end + end + pre_actions + end + + # @param [ExecutionAction] pre_action + # Add an action to the list of actions to run before this scheme action. + # It can be either a 'Run Script' or a 'Send Email' action. + # + def add_pre_action(pre_action) + pre_actions = @xml_element.elements['PreActions'] || @xml_element.add_element('PreActions') + pre_actions.add_element(pre_action.xml_element) + end + + # @return [Array<ExecutionAction>] + # The list of actions to run after this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def post_actions + post_actions = @xml_element.elements['PostActions'] + return nil unless post_actions + post_actions.get_elements('ExecutionAction').map do |entry_node| + ExecutionAction.new(entry_node) + end + end + + # @param [Array<ExecutionAction>] post_actions + # Set the list of actions to run after this scheme action. + # Each entry can be either a 'Run Script' or a 'Send Email' action. + # + def post_actions=(post_actions) + @xml_element.delete_element('PostActions') + unless post_actions.empty? + post_actions_element = @xml_element.add_element('PostActions') + post_actions.each do |entry_node| + post_actions_element.add_element(entry_node.xml_element) + end + end + post_actions + end + + # @param [ExecutionAction] post_action + # Add an action to the list of actions to run after this scheme action. + # It can be either a 'Run Script' or a 'Send Email' action. + # + def add_post_action(post_action) + post_actions = @xml_element.elements['PostActions'] || @xml_element.add_element('PostActions') + post_actions.add_element(post_action.xml_element) + end + + # @return [Array<BuildAction::Entry>] + # The list of BuildActionEntry nodes associated with this Build Action. + # Each entry represent a target to build and tells for which action it's needed to be built. + # + def entries + entries = @xml_element.elements['BuildActionEntries'] + return nil unless entries + entries.get_elements('BuildActionEntry').map do |entry_node| + BuildAction::Entry.new(entry_node) + end + end + + # @param [Array<BuildAction::Entry>] entries + # Sets the list of BuildActionEntry nodes associated with this Build Action. + # + def entries=(entries) + @xml_element.delete_element('BuildActionEntries') + unless entries.empty? + entries_element = @xml_element.add_element('BuildActionEntries') + entries.each do |entry_node| + entries_element.add_element(entry_node.xml_element) + end + end + entries + end + + # @param [BuildAction::Entry] entry + # The BuildActionEntry to add to the list of targets to build for the various actions + # + def add_entry(entry) + entries = @xml_element.elements['BuildActionEntries'] || @xml_element.add_element('BuildActionEntries') + entries.add_element(entry.xml_element) + end + + #-------------------------------------------------------------------------# + + class Entry < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildActionEntry' node element to reference, + # or nil to create an new, empty Entry with default values + # + def initialize(target_or_node = nil) + create_xml_element_with_fallback(target_or_node, 'BuildActionEntry') do + # Check target type to configure the default entry attributes accordingly + is_test_target = false + is_app_target = false + if target_or_node && target_or_node.is_a?(::Xcodeproj::Project::Object::PBXNativeTarget) + test_types = [Constants::PRODUCT_TYPE_UTI[:octest_bundle], + Constants::PRODUCT_TYPE_UTI[:unit_test_bundle], + Constants::PRODUCT_TYPE_UTI[:ui_test_bundle]] + app_types = [Constants::PRODUCT_TYPE_UTI[:application]] + is_test_target = test_types.include?(target_or_node.product_type) + is_app_target = app_types.include?(target_or_node.product_type) + end + + self.build_for_testing = is_test_target + self.build_for_running = is_app_target + self.build_for_profiling = is_app_target + self.build_for_archiving = is_app_target + self.build_for_analyzing = true + + add_buildable_reference BuildableReference.new(target_or_node) if target_or_node + end + end + + # @return [Bool] + # Whether or not to build this target when building for Testing + # + def build_for_testing? + string_to_bool(@xml_element.attributes['buildForTesting']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Testing + # + def build_for_testing=(flag) + @xml_element.attributes['buildForTesting'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Running + # + def build_for_running? + string_to_bool(@xml_element.attributes['buildForRunning']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Running + # + def build_for_running=(flag) + @xml_element.attributes['buildForRunning'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Profiling + # + def build_for_profiling? + string_to_bool(@xml_element.attributes['buildForProfiling']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Profiling + # + def build_for_profiling=(flag) + @xml_element.attributes['buildForProfiling'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Archiving + # + def build_for_archiving? + string_to_bool(@xml_element.attributes['buildForArchiving']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Archiving + # + def build_for_archiving=(flag) + @xml_element.attributes['buildForArchiving'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not to build this target when building for Analyzing + # + def build_for_analyzing? + string_to_bool(@xml_element.attributes['buildForAnalyzing']) + end + + # @param [Bool] + # Set whether or not to build this target when building for Analyzing + # + def build_for_analyzing=(flag) + @xml_element.attributes['buildForAnalyzing'] = bool_to_string(flag) + end + + # @return [Array<BuildableReference>] + # The list of BuildableReferences this entry will build. + # (The list usually contains only one element) + # + def buildable_references + @xml_element.get_elements('BuildableReference').map do |node| + BuildableReference.new(node) + end + end + + # @param [BuildableReference] ref + # The BuildableReference to add to the list of targets this entry will build + # + def add_buildable_reference(ref) + @xml_element.add_element(ref.xml_element) + end + + # @param [BuildableReference] ref + # The BuildableReference to remove from the list of targets this entry will build + # + def remove_buildable_reference(ref) + @xml_element.delete_element(ref.xml_element) + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_product_runnable.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_product_runnable.rb new file mode 100644 index 0000000..f4d350f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_product_runnable.rb @@ -0,0 +1,55 @@ +module Xcodeproj + class XCScheme + # This class wraps the BuildableProductRunnable node of a .xcscheme XML file + # + # A BuildableProductRunnable is a product that is both buildable + # (it contains a BuildableReference) and runnable (it can be launched and debugged) + # + class BuildableProductRunnable < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildableProductRunnable' node element to reference + # or nil to create an new, empty BuildableProductRunnable + # + # @param [#to_s] runnable_debugging_mode + # The debugging mode (usually '0') + # + def initialize(target_or_node = nil, runnable_debugging_mode = nil) + create_xml_element_with_fallback(target_or_node, 'BuildableProductRunnable') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + @xml_element.attributes['runnableDebuggingMode'] = runnable_debugging_mode.to_s if runnable_debugging_mode + end + end + + # @return [String] + # The Runnable debugging mode (usually either empty or equal to '0') + # + def runnable_debugging_mode + @xml_element.attributes['runnableDebuggingMode'] + end + + # @param [String] value + # Set the runnable debugging mode of this buildable product runnable + # + def runnable_debugging_mode=(value) + @xml_element.attributes['runnableDebuggingMode'] = value.to_s + end + + # @return [BuildableReference] + # The Buildable Reference this Buildable Product Runnable is gonna build and run + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the Buildable Reference this Buildable Product Runnable is gonna build and run + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_reference.rb new file mode 100644 index 0000000..b75e26e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/buildable_reference.rb @@ -0,0 +1,129 @@ +module Xcodeproj + class XCScheme + # This class wraps the BuildableReference node of a .xcscheme XML file + # + # A BuildableReference is a reference to a buildable product (which is + # typically is synonymous for an Xcode target) + # + class BuildableReference < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'BuildableReference' node element to reference + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + def initialize(target_or_node, root_project = nil) + create_xml_element_with_fallback(target_or_node, 'BuildableReference') do + @xml_element.attributes['BuildableIdentifier'] = 'primary' + set_reference_target(target_or_node, true, root_project) if target_or_node + end + end + + # @return [String] + # The name of the target this Buildable Reference points to + # + def target_name + @xml_element.attributes['BlueprintName'] + end + + # @return [String] + # The Unique Identifier of the target (target.uuid) this Buildable Reference points to. + # + # @note You can use this to `#find` the `Xcodeproj::Project::Object::AbstractTarget` + # instance in your Xcodeproj::Project object. + # e.g. `project.targets.find { |t| t.uuid == ref.target_uuid }` + # + def target_uuid + @xml_element.attributes['BlueprintIdentifier'] + end + + # @return [String] + # The string representing the container of that target. + # Typically in the form of 'container:xxxx.xcodeproj' + # + def target_referenced_container + @xml_element.attributes['ReferencedContainer'] + end + + # Set the BlueprintIdentifier (target.uuid), BlueprintName (target.name) + # and TerefencedContainer (URI pointing to target's projet) all at once + # + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # The target this BuildableReference refers to. + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + # @param [Bool] override_buildable_name + # If true, buildable_name will also be updated by computing a name from the target + # + def set_reference_target(target, override_buildable_name = false, root_project = nil) + # note, the order of assignment here is important, it determines the order of serialization in the xml + # this matches the order that Xcode generates + @xml_element.attributes['BlueprintIdentifier'] = target.uuid + self.buildable_name = construct_buildable_name(target) if override_buildable_name + @xml_element.attributes['BlueprintName'] = target.name + @xml_element.attributes['ReferencedContainer'] = construct_referenced_container_uri(target, root_project) + end + + # @return [String] + # The name of the final product when building this Buildable Reference + # + def buildable_name + @xml_element.attributes['BuildableName'] + end + + # @param [String] value + # Set the name of the final product when building this Buildable Reference + # + def buildable_name=(value) + @xml_element.attributes['BuildableName'] = value + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # + # @return [String] The buildable name of the scheme. + # + def construct_buildable_name(build_target) + case build_target.isa + when 'PBXNativeTarget' + File.basename(build_target.product_reference.path) + when 'PBXAggregateTarget' + build_target.name + else + raise ArgumentError, "Unsupported build target type #{build_target.isa}" + end + end + + # @param [Xcodeproj::Project::Object::AbstractTarget] target + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + # @return [String] A string in the format "container:[path to the project + # file relative to the project_dir_path, always ending with + # the actual project directory name]" + # + def construct_referenced_container_uri(target, root_project = nil) + target_project = target.project + root_project ||= target_project + root_project_dir_path = root_project.root_object.project_dir_path + path = if !root_project_dir_path.to_s.empty? + root_project.path + root_project_dir_path + else + root_project.project_dir + end + relative_path = target_project.path.relative_path_from(path).to_s + relative_path = target_project.path.basename if relative_path == '.' + "container:#{relative_path}" + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/command_line_arguments.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/command_line_arguments.rb new file mode 100644 index 0000000..f5fd5d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/command_line_arguments.rb @@ -0,0 +1,162 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + COMMAND_LINE_ARGS_NODE = 'CommandLineArguments'.freeze + COMMAND_LINE_ARG_NODE = 'CommandLineArgument'.freeze + + # This class wraps the CommandLineArguments node of a .xcscheme XML file. This + # is just a container of CommandLineArgument objects. It can either appear on a + # LaunchAction or TestAction scheme group. + # + class CommandLineArguments < XMLElementWrapper + # @param [nil,REXML::Element,Array<CommandLineArgument>,Array<Hash{Symbol => String,Bool}>] node_or_arguments + # The 'CommandLineArguments' XML node, or list of command line arguments, that this object represents. + # - If nil, an empty 'CommandLineArguments' XML node will be created + # - If an REXML::Element, it must be named 'CommandLineArguments' + # - If an Array of objects or Hashes, they'll each be passed to {#assign_argument} + # + def initialize(node_or_arguments = nil) + create_xml_element_with_fallback(node_or_arguments, COMMAND_LINE_ARGS_NODE) do + @all_arguments = [] + node_or_arguments.each { |var| assign_argument(var) } unless node_or_arguments.nil? + end + end + + # @return [Array<CommandLineArgument>] + # The key value pairs currently set in @xml_element + # + def all_arguments + @all_arguments ||= @xml_element.get_elements(COMMAND_LINE_ARG_NODE).map { |argument| CommandLineArgument.new(argument) } + end + + # Adds a given argument to the set of command line arguments, or replaces it if that key already exists + # + # @param [CommandLineArgument,Hash{Symbol => String,Bool}] argument + # The argument to add or update, backed by an CommandLineArgument node. + # - If an CommandLineArgument, the previous reference will still be valid + # - If a Hash, must conform to {CommandLineArgument#initialize} requirements + # @return [Array<CommandLineArgument>] + # The new set of command line arguments after addition + # + def assign_argument(argument) + command_line_arg = if argument.is_a?(CommandLineArgument) + argument + else + CommandLineArgument.new(argument) + end + all_arguments.each { |existing_var| remove_argument(existing_var) if existing_var.argument == command_line_arg.argument } + @xml_element.add_element(command_line_arg.xml_element) + @all_arguments << command_line_arg + end + + # Removes a specified argument (by string or object) from the set of command line arguments + # + # @param [CommandLineArgument,String] argument + # The argument to remove + # @return [Array<CommandLineArgument>] + # The new set of command line arguments after removal + # + def remove_argument(argument) + command_line_arg = if argument.is_a?(CommandLineArgument) + argument + else + CommandLineArgument.new(argument) + end + raise "Unexpected parameter type: #{command_line_arg.class}" unless command_line_arg.is_a?(CommandLineArgument) + @xml_element.delete_element(command_line_arg.xml_element) + @all_arguments -= [command_line_arg] + end + + # @param [String] key + # The key to lookup + # @return [CommandLineArgument] argument + # Returns the matching command line argument for a specified key + # + def [](argument) + all_arguments.find { |var| var.argument == argument } + end + + # Assigns a value for a specified key + # + # @param [String] key + # The key to update in the command line arguments + # @param [String] value + # The value to lookup + # @return [CommandLineArgument] argument + # The newly updated command line argument + # + def []=(argument, enabled) + assign_argument(:argument => argument, :enabled => enabled) + self[argument] + end + + # @return [Array<Hash{Symbol => String,Bool}>] + # The current command line arguments represented as an array + # + def to_a + all_arguments.map(&:to_h) + end + end + + # This class wraps the CommandLineArgument node of a .xcscheme XML file. + # Environment arguments are accessible via the NSDictionary returned from + # [[NSProcessInfo processInfo] arguments] in your app code. + # + class CommandLineArgument < XMLElementWrapper + # @param [nil,REXML::Element,Hash{Symbol => String,Bool}] node_or_argument + # - If nil, it will create a default XML node to use + # - If a REXML::Element, should be a <CommandLineArgument> XML node to wrap + # - If a Hash, must contain keys :key and :value (Strings) and optionally :enabled (Boolean) + # + def initialize(node_or_argument) + create_xml_element_with_fallback(node_or_argument, COMMAND_LINE_ARG_NODE) do + raise "Must pass a Hash with 'argument' and 'enabled'!" unless node_or_argument.is_a?(Hash) && + node_or_argument.key?(:argument) && node_or_argument.key?(:enabled) + + @xml_element.attributes['argument'] = node_or_argument[:argument] + @xml_element.attributes['isEnabled'] = if node_or_argument.key?(:enabled) + bool_to_string(node_or_argument[:enabled]) + else + bool_to_string(false) + end + end + end + + # Returns the CommandLineArgument's key + # @return [String] + # + def argument + @xml_element.attributes['argument'] + end + + # Sets the CommandLineArgument's key + # @param [String] key + # + def argument=(argument) + @xml_element.attributes['argument'] = argument + end + + # Returns the CommandLineArgument's enabled state + # @return [Bool] + # + def enabled + string_to_bool(@xml_element.attributes['isEnabled']) + end + + # Sets the CommandLineArgument's enabled state + # @param [Bool] enabled + # + def enabled=(enabled) + @xml_element.attributes['isEnabled'] = bool_to_string(enabled) + end + + # @return [Hash{:key => String, :value => String, :enabled => Bool}] + # The command line argument XML node with attributes converted to a representative Hash + # + def to_h + { :argument => argument, :enabled => enabled } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/environment_variables.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/environment_variables.rb new file mode 100644 index 0000000..7335df6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/environment_variables.rb @@ -0,0 +1,170 @@ +require 'xcodeproj/scheme/xml_element_wrapper' + +module Xcodeproj + class XCScheme + VARIABLES_NODE = 'EnvironmentVariables' + VARIABLE_NODE = 'EnvironmentVariable' + + # This class wraps the EnvironmentVariables node of a .xcscheme XML file. This + # is just a container of EnvironmentVariable objects. It can either appear on a + # LaunchAction or TestAction scheme group. + # + class EnvironmentVariables < XMLElementWrapper + # @param [nil,REXML::Element,Array<EnvironmentVariable>,Array<Hash{Symbol => String,Bool}>] node_or_variables + # The 'EnvironmentVariables' XML node, or list of environment variables, that this object represents. + # - If nil, an empty 'EnvironmentVariables' XML node will be created + # - If an REXML::Element, it must be named 'EnvironmentVariables' + # - If an Array of objects or Hashes, they'll each be passed to {#assign_variable} + # + def initialize(node_or_variables = nil) + create_xml_element_with_fallback(node_or_variables, VARIABLES_NODE) do + @all_variables = [] + node_or_variables.each { |var| assign_variable(var) } unless node_or_variables.nil? + end + end + + # @return [Array<EnvironmentVariable>] + # The key value pairs currently set in @xml_element + # + def all_variables + @all_variables ||= @xml_element.get_elements(VARIABLE_NODE).map { |variable| EnvironmentVariable.new(variable) } + end + + # Adds a given variable to the set of environment variables, or replaces it if that key already exists + # + # @param [EnvironmentVariable,Hash{Symbol => String,Bool}] variable + # The variable to add or update, backed by an EnvironmentVariable node. + # - If an EnvironmentVariable, the previous reference will still be valid + # - If a Hash, must conform to {EnvironmentVariable#initialize} requirements + # @return [Array<EnvironmentVariable>] + # The new set of environment variables after addition + # + def assign_variable(variable) + env_var = variable.is_a?(EnvironmentVariable) ? variable : EnvironmentVariable.new(variable) + all_variables.each { |existing_var| remove_variable(existing_var) if existing_var.key == env_var.key } + @xml_element.add_element(env_var.xml_element) + @all_variables << env_var + end + + # Removes a specified variable (by string or object) from the set of environment variables + # + # @param [EnvironmentVariable,String] variable + # The variable to remove + # @return [Array<EnvironmentVariable>] + # The new set of environment variables after removal + # + def remove_variable(variable) + env_var = variable.is_a?(EnvironmentVariable) ? variable : all_variables.find { |var| var.key == variable } + raise "Unexpected parameter type: #{env_var.class}" unless env_var.is_a?(EnvironmentVariable) + @xml_element.delete_element(env_var.xml_element) + @all_variables -= [env_var] + end + + # @param [String] key + # The key to lookup + # @return [EnvironmentVariable] variable + # Returns the matching environment variable for a specified key + # + def [](key) + all_variables.find { |var| var.key == key } + end + + # Assigns a value for a specified key + # + # @param [String] key + # The key to update in the environment variables + # @param [String] value + # The value to lookup + # @return [EnvironmentVariable] variable + # The newly updated environment variable + # + def []=(key, value) + assign_variable(:key => key, :value => value) + self[key] + end + + # @return [Array<Hash{Symbol => String,Bool}>] + # The current environment variables represented as an array + # + def to_a + all_variables.map(&:to_h) + end + end + + # This class wraps the EnvironmentVariable node of a .xcscheme XML file. + # Environment variables are accessible via the NSDictionary returned from + # [[NSProcessInfo processInfo] environment] in your app code. + # + class EnvironmentVariable < XMLElementWrapper + # @param [nil,REXML::Element,Hash{Symbol => String,Bool}] node_or_variable + # - If nil, it will create a default XML node to use + # - If a REXML::Element, should be a <EnvironmentVariable> XML node to wrap + # - If a Hash, must contain keys :key and :value (Strings) and optionally :enabled (Boolean) + # + def initialize(node_or_variable) + create_xml_element_with_fallback(node_or_variable, VARIABLE_NODE) do + raise "Must pass a Hash with 'key' and 'value'!" unless node_or_variable.is_a?(Hash) && + node_or_variable.key?(:key) && node_or_variable.key?(:value) + + @xml_element.attributes['key'] = node_or_variable[:key] + @xml_element.attributes['value'] = node_or_variable[:value] + + @xml_element.attributes['isEnabled'] = if node_or_variable.key?(:enabled) + bool_to_string(node_or_variable[:enabled]) + else + bool_to_string(true) + end + end + end + + # Returns the EnvironmentValue's key + # @return [String] + # + def key + @xml_element.attributes['key'] + end + + # Sets the EnvironmentValue's key + # @param [String] key + # + def key=(key) + @xml_element.attributes['key'] = key + end + + # Returns the EnvironmentValue's value + # @return [String] + # + def value + @xml_element.attributes['value'] + end + + # Sets the EnvironmentValue's value + # @param [String] value + # + def value=(value) + @xml_element.attributes['value'] = value + end + + # Returns the EnvironmentValue's enabled state + # @return [Bool] + # + def enabled + string_to_bool(@xml_element.attributes['isEnabled']) + end + + # Sets the EnvironmentValue's enabled state + # @param [Bool] enabled + # + def enabled=(enabled) + @xml_element.attributes['isEnabled'] = bool_to_string(enabled) + end + + # @return [Hash{:key => String, :value => String, :enabled => Bool}] + # The environment variable XML node with attributes converted to a representative Hash + # + def to_h + { :key => key, :value => value, :enabled => enabled } + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/execution_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/execution_action.rb new file mode 100644 index 0000000..6a6e66c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/execution_action.rb @@ -0,0 +1,86 @@ +module Xcodeproj + class XCScheme + # This class wraps the ExecutionAction node of a .xcscheme XML file + # + class ExecutionAction < XMLElementWrapper + # @param [REXML::Element] node + # The 'ExecutionAction' XML node that this object will wrap. + # If nil, will create an empty one + # + # @param [Symbol] action_type + # One of `EXECUTION_ACTION_TYPE.keys` + # + def initialize(node = nil, action_type = nil) + create_xml_element_with_fallback(node, 'ExecutionAction') do + type = action_type || node.action_type + raise "[Xcodeproj] Invalid ActionType `#{type}`" unless Constants::EXECUTION_ACTION_TYPE.keys.include?(type) + @xml_element.attributes['ActionType'] = Constants::EXECUTION_ACTION_TYPE[type] + end + end + + # @return [String] + # The ActionType of this ExecutionAction. One of two values: + # + # Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.ShellScriptAction, + # Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.SendEmailAction + # + def action_type + @xml_element.attributes['ActionType'] + end + + # @return [ShellScriptActionContent] + # If action_type is 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.ShellScriptAction' + # returns the contents of the shell script to run pre/post action. + # + # @return [SendEmailActionContent] + # If action_type is 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.SendEmailAction' + # returns the contents of the email to send pre/post action. + # + def action_content + case action_type + when Constants::EXECUTION_ACTION_TYPE[:shell_script] + ShellScriptActionContent.new(@xml_element.elements['ActionContent']) + when Constants::EXECUTION_ACTION_TYPE[:send_email] + SendEmailActionContent.new(@xml_element.elements['ActionContent']) + else + raise "[Xcodeproj] Invalid ActionType `#{action_type}`" + end + end + + # @param [ShellScriptActionContent, SendEmailActionContent] value + # Set either the contents of the shell script to run pre/post action + # or the contents of the email to send pre/post action. + # + def action_content=(value) + raise "[Xcodeproj] Invalid ActionContent `#{value.class}` for " \ + "ActionType `#{action_type}`" unless valid_action_content?(value) + + @xml_element.delete_element('ActionContent') + @xml_element.add_element(value.xml_element) + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Bool] + # True if value (ActionContent) is valid for current action_type + # + # @param [ShellScriptActionContent, SendEmailActionContent] value + # Checks if value matches the expected action_type if present. + # + def valid_action_content?(value) + case action_type + when Constants::EXECUTION_ACTION_TYPE[:shell_script] + value.is_a?(ShellScriptActionContent) + when Constants::EXECUTION_ACTION_TYPE[:send_email] + value.is_a?(SendEmailActionContent) + else + false + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/launch_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/launch_action.rb new file mode 100644 index 0000000..27eecc0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/launch_action.rb @@ -0,0 +1,179 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the LaunchAction node of a .xcscheme XML file + # + class LaunchAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'LaunchAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'LaunchAction') do + self.build_configuration = 'Debug' + + # Add some attributes (that are not handled by this wrapper class yet but expected in the XML) + @xml_element.attributes['selectedDebuggerIdentifier'] = 'Xcode.DebuggerFoundation.Debugger.LLDB' + @xml_element.attributes['selectedLauncherIdentifier'] = 'Xcode.DebuggerFoundation.Launcher.LLDB' + @xml_element.attributes['launchStyle'] = '0' + @xml_element.attributes['useCustomWorkingDirectory'] = bool_to_string(false) + @xml_element.attributes['ignoresPersistentStateOnLaunch'] = bool_to_string(false) + @xml_element.attributes['debugDocumentVersioning'] = bool_to_string(true) + @xml_element.attributes['debugServiceExtension'] = 'internal' + + # Setup default values for other (handled) attributes + self.allow_location_simulation = true + end + end + + # @todo handle 'launchStyle' attribute + # @todo handle 'useCustomWorkingDirectory attribute + # @todo handle 'ignoresPersistentStateOnLaunch' attribute + # @todo handle 'debugDocumentVersioning' attribute + # @todo handle 'debugServiceExtension' + + # @return [Bool] + # Whether or not to allow GPS location simulation when launching this target + # + def allow_location_simulation? + string_to_bool(@xml_element.attributes['allowLocationSimulation']) + end + + # @param [Bool] flag + # Set whether or not to allow GPS location simulation when launching this target + # + def allow_location_simulation=(flag) + @xml_element.attributes['allowLocationSimulation'] = bool_to_string(flag) + end + + # @return [LocationScenarioReference] + # The LocationScenarioReference to simulate a GPS location when executing the Launch Action + # + def location_scenario_reference? + LocationScenarioReference.new(@xml_element.elements['LocationScenarioReference']) + end + + # @return [LocationScenarioReference] + # Set the LocationScenarioReference which simulates a GPS location when executing the Launch Action + # + def location_scenario_reference=(reference) + @xml_element.delete_element('LocationScenarioReference') + @xml_element.add_element(reference.xml_element) if reference + end + + # @return [Bool] + # Whether this Build Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker? + string_to_bool(@xml_element.attributes['disableMainThreadChecker']) + end + + # @param [Bool] flag + # Set whether this Build Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker=(flag) + @xml_element.attributes['disableMainThreadChecker'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether UI API misuse from background threads detection should pause execution. + # This flag is ignored when the thread checker disabled + # ([disable_main_thread_checker] flag). + # + def stop_on_every_main_thread_checker_issue? + string_to_bool(@xml_element.attributes['stopOnEveryMainThreadCheckerIssue']) + end + + # @param [Bool] flag + # Set whether UI API misuse from background threads detection should pause execution. + # This flag is ignored when the thread checker disabled + # ([disable_main_thread_checker] flag). + # + def stop_on_every_main_thread_checker_issue=(flag) + @xml_element.attributes['stopOnEveryMainThreadCheckerIssue'] = bool_to_string(flag) + end + + # @return [String] + # The launch automatically substyle + # + def launch_automatically_substyle + @xml_element.attributes['launchAutomaticallySubstyle'] + end + + # @param [String] flag + # Set the launch automatically substyle ('2' for extensions) + # + def launch_automatically_substyle=(value) + @xml_element.attributes['launchAutomaticallySubstyle'] = value.to_s + end + + # @return [BuildableProductRunnable] + # The BuildReference to launch when executing the Launch Action + # + def buildable_product_runnable + BuildableProductRunnable.new(@xml_element.elements['BuildableProductRunnable'], 0) + end + + # @param [BuildableProductRunnable] runnable + # Set the BuildableProductRunnable referencing the target to launch + # + def buildable_product_runnable=(runnable) + @xml_element.delete_element('BuildableProductRunnable') + @xml_element.add_element(runnable.xml_element) if runnable + end + + # @return [EnvironmentVariables] + # Returns the EnvironmentVariables that will be defined at app launch + # + def environment_variables + EnvironmentVariables.new(@xml_element.elements[XCScheme::VARIABLES_NODE]) + end + + # @param [EnvironmentVariables,nil] env_vars + # Sets the EnvironmentVariables that will be defined at app launch + # + def environment_variables=(env_vars) + @xml_element.delete_element(XCScheme::VARIABLES_NODE) + @xml_element.add_element(env_vars.xml_element) if env_vars + env_vars + end + + # @todo handle 'AdditionalOptions' tag + + # @return [CommandLineArguments] + # Returns the CommandLineArguments that will be passed at app launch + # + def command_line_arguments + CommandLineArguments.new(@xml_element.elements[XCScheme::COMMAND_LINE_ARGS_NODE]) + end + + # @return [CommandLineArguments] arguments + # Sets the CommandLineArguments that will be passed at app launch + # + def command_line_arguments=(arguments) + @xml_element.delete_element(XCScheme::COMMAND_LINE_ARGS_NODE) + @xml_element.add_element(arguments.xml_element) if arguments + arguments + end + + # @return [Array<MacroExpansion>] + # The list of MacroExpansion bound with this LaunchAction + # + def macro_expansions + @xml_element.get_elements('MacroExpansion').map do |node| + MacroExpansion.new(node) + end + end + + # @param [MacroExpansion] macro_expansion + # Add a MacroExpansion to this LaunchAction + # + def add_macro_expansion(macro_expansion) + @xml_element.add_element(macro_expansion.xml_element) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/location_scenario_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/location_scenario_reference.rb new file mode 100644 index 0000000..e58f241 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/location_scenario_reference.rb @@ -0,0 +1,49 @@ +module Xcodeproj + class XCScheme + # This class wraps the LocationScenarioReference node of a .xcscheme XML file + # + # A LocationScenarioReference is a reference to a simulated GPS location associated + # with a scheme's launch action + # + class LocationScenarioReference < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'LocationScenarioReference' node element to reference + # + def initialize(target_or_node) + create_xml_element_with_fallback(target_or_node, 'LocationScenarioReference') do + self.identifier = '' + self.reference_type = '0' + end + end + + # @return [String] + # The identifier of a built-in location scenario reference, or a path to a GPX file + # + def identifier + @xml_element.attributes['identifier'] + end + + # @param [String] value + # Set the identifier for the location scenario reference + # + def identifier=(value) + @xml_element.attributes['identifier'] = value + end + + # @return [String] + # The reference type is 0 when using a custom GPX file, or 1 when using a built-in location reference + # + def reference_type + @xml_element.attributes['referenceType'] + end + + # @param [String] value + # Set the reference type for the location scenario reference + # + def reference_type=(value) + @xml_element.attributes['referenceType'] = value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/macro_expansion.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/macro_expansion.rb new file mode 100644 index 0000000..66b099a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/macro_expansion.rb @@ -0,0 +1,34 @@ +module Xcodeproj + class XCScheme + # This class wraps the MacroExpansion node of a .xcscheme XML file + # + class MacroExpansion < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'MacroExpansion' node element + # or nil to create an empty MacroExpansion object + # + def initialize(target_or_node = nil) + create_xml_element_with_fallback(target_or_node, 'MacroExpansion') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + end + end + + # @return [BuildableReference] + # The BuildableReference this MacroExpansion refers to + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the BuildableReference this MacroExpansion refers to + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/profile_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/profile_action.rb new file mode 100644 index 0000000..041a810 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/profile_action.rb @@ -0,0 +1,57 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the ProfileAction node of a .xcscheme XML file + # + class ProfileAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'ProfileAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ProfileAction') do + # Setup default values for other (handled) attributes + self.build_configuration = 'Release' + self.should_use_launch_scheme_args_env = true + + # Add some attributes (that are not handled by this wrapper class yet but expected in the XML) + @xml_element.attributes['savedToolIdentifier'] = '' + @xml_element.attributes['useCustomWorkingDirectory'] = bool_to_string(false) + @xml_element.attributes['debugDocumentVersioning'] = bool_to_string(true) + end + end + + # @return [Bool] + # Whether this Profile Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env? + string_to_bool(@xml_element.attributes['shouldUseLaunchSchemeArgsEnv']) + end + + # @param [Bool] flag + # Set Whether this Profile Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env=(flag) + @xml_element.attributes['shouldUseLaunchSchemeArgsEnv'] = bool_to_string(flag) + end + + # @return [BuildableProductRunnable] + # The BuildableProductRunnable to launch when launching the Profile action + # + def buildable_product_runnable + BuildableProductRunnable.new @xml_element.elements['BuildableProductRunnable'], 0 + end + + # @param [BuildableProductRunnable] runnable + # Set the BuildableProductRunnable referencing the target to launch when profiling + # + def buildable_product_runnable=(runnable) + @xml_element.delete_element('BuildableProductRunnable') + @xml_element.add_element(runnable.xml_element) if runnable + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/remote_runnable.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/remote_runnable.rb new file mode 100644 index 0000000..c51c547 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/remote_runnable.rb @@ -0,0 +1,92 @@ +module Xcodeproj + class XCScheme + # This class wraps the RemoteRunnable node of a .xcscheme XML file + # + # A RemoteRunnable is a product that is both buildable + # (it contains a BuildableReference) and + # runnable remotely (it can be launched and debugged on a remote device, i.e. an Apple Watch) + # + class RemoteRunnable < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'RemoteRunnable' node element to reference + # or nil to create an new, empty RemoteRunnable + # + # @param [#to_s] runnable_debugging_mode + # The debugging mode (usually '2') + # + # @param [#to_s] bundle_identifier + # The bundle identifier (usually 'com.apple.Carousel') + # + # @param [#to_s] remote_path + # The remote path (not required, unknown usage) + # + def initialize(target_or_node = nil, runnable_debugging_mode = nil, bundle_identifier = nil, remote_path = nil) + create_xml_element_with_fallback(target_or_node, 'RemoteRunnable') do + self.buildable_reference = BuildableReference.new(target_or_node) if target_or_node + @xml_element.attributes['runnableDebuggingMode'] = runnable_debugging_mode.to_s if runnable_debugging_mode + @xml_element.attributes['BundleIdentifier'] = bundle_identifier.to_s if bundle_identifier + @xml_element.attributes['RemotePath'] = remote_path.to_s if remote_path + end + end + + # @return [String] + # The runnable debugging mode (usually '2') + # + def runnable_debugging_mode + @xml_element.attributes['runnableDebuggingMode'] + end + + # @param [String] value + # Set the runnable debugging mode + # + def runnable_debugging_mode=(value) + @xml_element.attributes['runnableDebuggingMode'] = value.to_s + end + + # @return [String] + # The runnable bundle identifier (usually 'com.apple.Carousel') + # + def bundle_identifier + @xml_element.attributes['BundleIdentifier'] + end + + # @param [String] value + # Set the runnable bundle identifier + # + def bundle_identifier=(value) + @xml_element.attributes['BundleIdentifier'] = value.to_s + end + + # @return [String] + # The runnable remote path (not required, unknown usage) + # + def remote_path + @xml_element.attributes['RemotePath'] + end + + # @param [String] value + # Set the runnable remote path + # + def remote_path=(value) + @xml_element.attributes['RemotePath'] = value.to_s + end + + # @return [BuildableReference] + # The buildable reference this remote runnable is gonna build and run + # + def buildable_reference + @buildable_reference ||= BuildableReference.new @xml_element.elements['BuildableReference'] + end + + # @param [BuildableReference] ref + # Set the buildable reference this remote runnable is gonna build and run + # + def buildable_reference=(ref) + @xml_element.delete_element('BuildableReference') + @xml_element.add_element(ref.xml_element) + @buildable_reference = ref + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/send_email_action_content.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/send_email_action_content.rb new file mode 100644 index 0000000..24ee2fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/send_email_action_content.rb @@ -0,0 +1,84 @@ +module Xcodeproj + class XCScheme + # This class wraps a 'ActionContent' node of type + # 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.SendEmailAction' of a .xcscheme XML file + # + class SendEmailActionContent < XMLElementWrapper + # @param [REXML::Element] node + # The 'ActionContent' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ActionContent') do + self.title = 'Send Email' + # For some reason this is not visible in Xcode's UI and it's always set to 'NO' + # couldn't find much documentation on it so it might be safer to keep it read only + @xml_element.attributes['attachLogToEmail'] = 'NO' + end + end + + # @return [Bool] + # Whether or not this action should attach log to email + # + def attach_log_to_email? + string_to_bool(@xml_element.attributes['attachLogToEmail']) + end + + # @return [String] + # The title of this ActionContent + # + def title + @xml_element.attributes['title'] + end + + # @param [String] value + # Set the title of this ActionContent + # + def title=(value) + @xml_element.attributes['title'] = value + end + + # @return [String] + # The email recipient of this ActionContent + # + def email_recipient + @xml_element.attributes['emailRecipient'] + end + + # @param [String] value + # Set the email recipient of this ActionContent + # + def email_recipient=(value) + @xml_element.attributes['emailRecipient'] = value + end + + # @return [String] + # The email subject of this ActionContent + # + def email_subject + @xml_element.attributes['emailSubject'] + end + + # @param [String] value + # Set the email subject of this ActionContent + # + def email_subject=(value) + @xml_element.attributes['emailSubject'] = value + end + + # @return [String] + # The email body of this ActionContent + # + def email_body + @xml_element.attributes['emailBody'] + end + + # @param [String] value + # Set the email body of this ActionContent + # + def email_body=(value) + @xml_element.attributes['emailBody'] = value + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/shell_script_action_content.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/shell_script_action_content.rb new file mode 100644 index 0000000..75b3bb6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/shell_script_action_content.rb @@ -0,0 +1,77 @@ +module Xcodeproj + class XCScheme + # This class wraps a 'ActionContent' node of type + # 'Xcode.IDEStandardExecutionActionsCore.ExecutionActionType.ShellScriptAction' of a .xcscheme XML file + # + class ShellScriptActionContent < XMLElementWrapper + # @param [REXML::Element] node + # The 'ActionContent' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'ActionContent') do + self.title = 'Run Script' + end + end + + # @return [String] + # The title of this ActionContent + # + def title + @xml_element.attributes['title'] + end + + # @param [String] value + # Set the title of this ActionContent + # + def title=(value) + @xml_element.attributes['title'] = value + end + + # @return [String] + # The contents of the shell script represented by this ActionContent + # + def script_text + @xml_element.attributes['scriptText'] + end + + # @param [String] value + # Set the contents of the shell script represented by this ActionContent + # + def script_text=(value) + @xml_element.attributes['scriptText'] = value + end + + # @return [String] + # The preferred shell to invoke with this ActionContent + # + def shell_to_invoke + @xml_element.attributes['shellToInvoke'] + end + + # @param [String] value + # Set the preferred shell to invoke with this ActionContent + # + def shell_to_invoke=(value) + @xml_element.attributes['shellToInvoke'] = value + end + + # @return [BuildableReference] + # The BuildableReference (Xcode target) associated with this ActionContent + # + def buildable_reference + BuildableReference.new(@xml_element.elements['EnvironmentBuildable'].elements['BuildableReference']) + end + + # @param [BuildableReference] ref + # Set the BuildableReference (Xcode target) associated with this ActionContent + # + def buildable_reference=(ref) + @xml_element.delete_element('EnvironmentBuildable') + + env_buildable = @xml_element.add_element('EnvironmentBuildable') + env_buildable.add_element(ref.xml_element) + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/test_action.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/test_action.rb new file mode 100644 index 0000000..8ad9f06 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/test_action.rb @@ -0,0 +1,356 @@ +require 'xcodeproj/scheme/abstract_scheme_action' + +module Xcodeproj + class XCScheme + # This class wraps the TestAction node of a .xcscheme XML file + # + class TestAction < AbstractSchemeAction + # @param [REXML::Element] node + # The 'TestAction' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'TestAction') do + self.build_configuration = 'Debug' + @xml_element.attributes['selectedDebuggerIdentifier'] = 'Xcode.DebuggerFoundation.Debugger.LLDB' + @xml_element.attributes['selectedLauncherIdentifier'] = 'Xcode.DebuggerFoundation.Launcher.LLDB' + self.should_use_launch_scheme_args_env = true + @xml_element.add_element('Testables') + end + end + + # @return [Bool] + # Whether this Test Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env? + string_to_bool(@xml_element.attributes['shouldUseLaunchSchemeArgsEnv']) + end + + # @param [Bool] flag + # Set whether this Test Action should use the same arguments and environment variables + # as the Launch Action. + # + def should_use_launch_scheme_args_env=(flag) + @xml_element.attributes['shouldUseLaunchSchemeArgsEnv'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether this Test Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker? + string_to_bool(@xml_element.attributes['disableMainThreadChecker']) + end + + # @param [Bool] flag + # Set whether this Test Action should disable detection of UI API misuse + # from background threads + # + def disable_main_thread_checker=(flag) + @xml_element.attributes['disableMainThreadChecker'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether Clang Code Coverage is enabled ('Gather coverage data' turned ON) + # + def code_coverage_enabled? + string_to_bool(@xml_element.attributes['codeCoverageEnabled']) + end + + # @param [Bool] flag + # Set whether Clang Code Coverage is enabled ('Gather coverage data' turned ON) + # + def code_coverage_enabled=(flag) + @xml_element.attributes['codeCoverageEnabled'] = bool_to_string(flag) + end + + # @return [Array<TestableReference>] + # The list of TestableReference (test bundles) associated with this Test Action + # + def testables + return [] unless @xml_element.elements['Testables'] + + @xml_element.elements['Testables'].get_elements('TestableReference').map do |node| + TestableReference.new(node) + end + end + + # @param [Array<TestableReference>] testables + # Sets the list of TestableReference (test bundles) associated with this Test Action + # + def testables=(testables) + @xml_element.delete_element('Testables') + testables_element = @xml_element.add_element('Testables') + testables.each do |testable| + testables_element.add_element(testable.xml_element) + end + testables + end + + # @param [TestableReference] testable + # Add a TestableReference (test bundle) to this Test Action + # + def add_testable(testable) + testables = @xml_element.elements['Testables'] || @xml_element.add_element('Testables') + testables.add_element(testable.xml_element) + end + + # @return [Array<MacroExpansion>] + # The list of MacroExpansion bound with this TestAction + # + def macro_expansions + @xml_element.get_elements('MacroExpansion').map do |node| + MacroExpansion.new(node) + end + end + + # @param [MacroExpansion] macro_expansion + # Add a MacroExpansion to this TestAction + # + def add_macro_expansion(macro_expansion) + if testables = @xml_element.elements['Testables'] + @xml_element.insert_before(testables, macro_expansion.xml_element) + else + @xml_element.add_element(macro_expansion.xml_element) + end + end + + # @return [EnvironmentVariables] + # Returns the EnvironmentVariables that will be defined at test launch + # + def environment_variables + EnvironmentVariables.new(@xml_element.elements[XCScheme::VARIABLES_NODE]) + end + + # @param [EnvironmentVariables,nil] env_vars + # Sets the EnvironmentVariables that will be defined at test launch + # @return [EnvironmentVariables] + # + def environment_variables=(env_vars) + @xml_element.delete_element(XCScheme::VARIABLES_NODE) + @xml_element.add_element(env_vars.xml_element) if env_vars + env_vars + end + + # @todo handle 'AdditionalOptions' tag + + # @return [CommandLineArguments] + # Returns the CommandLineArguments that will be passed at app launch + # + def command_line_arguments + CommandLineArguments.new(@xml_element.elements[XCScheme::COMMAND_LINE_ARGS_NODE]) + end + + # @return [CommandLineArguments] arguments + # Sets the CommandLineArguments that will be passed at app launch + # + def command_line_arguments=(arguments) + @xml_element.delete_element(XCScheme::COMMAND_LINE_ARGS_NODE) + @xml_element.add_element(arguments.xml_element) if arguments + arguments + end + + #-------------------------------------------------------------------------# + + class TestableReference < XMLElementWrapper + # @param [Xcodeproj::Project::Object::AbstractTarget, REXML::Element] target_or_node + # Either the Xcode target to reference, + # or an existing XML 'TestableReference' node element to reference, + # or nil to create an new, empty TestableReference + # + # @param [Xcodeproj::Project] the root project to reference from + # (when nil the project of the target is used) + # + def initialize(target_or_node = nil, root_project = nil) + create_xml_element_with_fallback(target_or_node, 'TestableReference') do + self.skipped = false + add_buildable_reference BuildableReference.new(target_or_node, root_project) unless target_or_node.nil? + end + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be skipped or not + # + def skipped? + string_to_bool(@xml_element.attributes['skipped']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should be skipped or not + # + def skipped=(flag) + @xml_element.attributes['skipped'] = bool_to_string(flag) + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be run in parallel or not + # + def parallelizable? + string_to_bool(@xml_element.attributes['parallelizable']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should be run in parallel or not + # + def parallelizable=(flag) + @xml_element.attributes['parallelizable'] = bool_to_string(flag) + end + + # @return [String] + # The execution order for this TestableReference (test bundle) + # + def test_execution_ordering + @xml_element.attributes['testExecutionOrdering'] + end + + # @param [String] order + # Set the execution order for this TestableReference (test bundle) + # + def test_execution_ordering=(order) + @xml_element.attributes['testExecutionOrdering'] = order + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should be run in randomized order. + # + def randomized? + test_execution_ordering == 'random' + end + + # @return [Array<BuildableReference>] + # The list of BuildableReferences this action will build. + # (The list usually contains only one element) + # + def buildable_references + @xml_element.get_elements('BuildableReference').map do |node| + BuildableReference.new(node) + end + end + + # @param [BuildableReference] ref + # The BuildableReference to add to the list of targets this action will build + # + def add_buildable_reference(ref) + @xml_element.add_element(ref.xml_element) + end + + # @param [BuildableReference] ref + # The BuildableReference to remove from the list of targets this entry will build + # + def remove_buildable_reference(ref) + @xml_element.delete_element(ref.xml_element) + end + + # @return [Array<Test>] + # The list of SkippedTest this action will skip. + # + def skipped_tests + return [] if @xml_element.elements['SkippedTests'].nil? + @xml_element.elements['SkippedTests'].get_elements('Test').map do |node| + Test.new(node) + end + end + + # @param [Array<Test>] tests + # Set the list of SkippedTest this action will skip. + # + def skipped_tests=(tests) + @xml_element.delete_element('SkippedTests') + if tests.nil? + return + end + entries = @xml_element.add_element('SkippedTests') + tests.each do |skipped| + entries.add_element(skipped.xml_element) + end + end + + # @param [Test] skipped_test + # The SkippedTest to add to the list of tests this action will skip + # + def add_skipped_test(skipped_test) + entries = @xml_element.elements['SkippedTests'] || @xml_element.add_element('SkippedTests') + entries.add_element(skipped_test.xml_element) + end + + # @return [Bool] + # Whether or not this TestableReference (test bundle) should use a whitelist or not + # + def use_test_selection_whitelist? + string_to_bool(@xml_element.attributes['useTestSelectionWhitelist']) + end + + # @param [Bool] flag + # Set whether or not this TestableReference (test bundle) should use a whitelist or not + # + def use_test_selection_whitelist=(flag) + @xml_element.attributes['useTestSelectionWhitelist'] = bool_to_string(flag) + end + + # @return [Array<Test>] + # The list of SelectedTest this action will run. + # + def selected_tests + return [] if @xml_element.elements['SelectedTests'].nil? + @xml_element.elements['SelectedTests'].get_elements('Test').map do |node| + Test.new(node) + end + end + + # @param [Array<Test>] tests + # Set the list of SelectedTest this action will run. + # + def selected_tests=(tests) + @xml_element.delete_element('SelectedTests') + return if tests.nil? + entries = @xml_element.add_element('SelectedTests') + tests.each do |selected| + entries.add_element(selected.xml_element) + end + end + + # @param [Test] selected_test + # The SelectedTest to add to the list of tests this action will run. + # + def add_selected_test(selected_test) + entries = @xml_element.elements['SelectedTests'] || @xml_element.add_element('SelectedTests') + entries.add_element(selected_test.xml_element) + end + + class Test < XMLElementWrapper + # @param [REXML::Element] node + # The 'Test' XML node that this object will wrap. + # If nil, will create a default XML node to use. + # + def initialize(node = nil) + create_xml_element_with_fallback(node, 'Test') do + self.identifier = node.attributes['Identifier'] unless node.nil? + end + end + + # @return [String] + # Skipped test class name + # + def identifier + @xml_element.attributes['Identifier'] + end + + # @param [String] value + # Set the name of the skipped test class name + # + def identifier=(value) + @xml_element.attributes['Identifier'] = value + end + end + + # Aliased to`Test` for compatibility + # @todo Remove in Xcodeproj 2 + # + SkippedTest = Test + + # @todo handle 'AdditionalOptions' tag + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/xml_element_wrapper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/xml_element_wrapper.rb new file mode 100644 index 0000000..ceb7420 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/scheme/xml_element_wrapper.rb @@ -0,0 +1,82 @@ +module Xcodeproj + class XCScheme + # Abstract base class used for other XCScheme classes wrapping an XML element + # + class XMLElementWrapper + # @return [REXML::Element] + # The XML node wrapped and manipulated by this XMLElementWrapper object + # + attr_reader :xml_element + + # @return [String] + # The XML representation of the node this XMLElementWrapper wraps, + # formatted in the same way that Xcode would. + def to_s + formatter = XMLFormatter.new(2) + formatter.compact = false + out = '' + formatter.write(@xml_element, out) + out.gsub!("<?xml version='1.0' encoding='UTF-8'?>", '') + out << "\n" + out + end + + private + + # This is a method intended to be used to facilitate the implementation of the initializers. + # + # - Create the @xml_element attribute based on the node passed as parameter, only if + # that parameter is of type REXML::Element and its name matches the tag_name given. + # - Otherwise, create a brand new REXML::Element with the proper tag name and + # execute the block given as a fallback to let the caller the chance to configure it + # + # @param [REXML::Element, *] node + # The node this XMLElementWrapper is expected to wrap + # or any other object (typically an AbstractTarget instance, or nil) the initializer might expect + # + # @param [String] tag_name + # The expected name for the node, which will also be the name used to create the new node + # if that `node` parameter is not a REXML::Element itself. + # + # @yield a parameter-less block if the `node` parameter is not actually a REXML::Element + # + # @raise Informative + # If the `node` parameter is a REXML::Element instance but the node's name + # doesn't match the one provided by the `tag_name` parameter. + # + def create_xml_element_with_fallback(node, tag_name) + if node && node.is_a?(REXML::Element) + raise Informative, 'Wrong XML tag name' unless node.name == tag_name + @xml_element = node + else + @xml_element = REXML::Element.new(tag_name) + yield if block_given? + end + end + + # @param [Bool] + # The boolean we want to represent as a string + # + # @return [String] + # The string representaiton of that boolean used in the XML ('YES' or 'NO') + # + def bool_to_string(flag) + flag ? 'YES' : 'NO' + end + + # @param [String] + # The string representaiton of a boolean used in the XML ('YES' or 'NO') + # + # @return [Bool] + # The boolean interpretation of that string + # + # @raise Informative + # If the string is not representing a boolean (i.e. is neither 'YES' nor 'NO') + # + def string_to_bool(str) + raise Informative, 'Invalid tag value. Expected YES or NO.' unless %w(YES NO).include?(str) + str == 'YES' + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/user_interface.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/user_interface.rb new file mode 100644 index 0000000..9737c52 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/user_interface.rb @@ -0,0 +1,22 @@ +module Xcodeproj + # Manages the UI output so clients can customize it. + # + module UserInterface + # Prints a message to standard output. + # + # @return [void] + # + def self.puts(message) + STDOUT.puts message + end + + # Prints a message to standard error. + # + # @return [void] + # + def self.warn(message) + STDERR.puts message + end + end + UI = UserInterface +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace.rb new file mode 100644 index 0000000..40afbd0 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace.rb @@ -0,0 +1,277 @@ +require 'fileutils' +require 'rexml/document' +require 'xcodeproj/workspace/file_reference' +require 'xcodeproj/workspace/group_reference' + +module Xcodeproj + # Provides support for generating, reading and serializing Xcode Workspace + # documents. + # + class Workspace + # @return [REXML::Document] the parsed XML model for the workspace contents + attr_reader :document + + # @return [Hash<String => String>] a mapping from scheme name to project full path + # containing the scheme + attr_reader :schemes + + # @return [Array<FileReference>] the paths of the projects contained in the + # workspace. + # + def file_references + return [] unless @document + @document.get_elements('/Workspace//FileRef').map do |node| + FileReference.from_node(node) + end + end + + # @return [Array<GroupReference>] the groups contained in the workspace + # + def group_references + return [] unless @document + @document.get_elements('/Workspace//Group').map do |node| + GroupReference.from_node(node) + end + end + + # @param [REXML::Document] document @see document + # @param [Array<FileReference>] file_references additional projects to add + # + # @note The document parameter is passed to the << operator if it is not a + # valid REXML::Document. It is optional, but may also be passed as nil + # + def initialize(document, *file_references) + @schemes = {} + if document.nil? + @document = REXML::Document.new(root_xml('')) + elsif document.is_a?(REXML::Document) + @document = document + else + @document = REXML::Document.new(root_xml('')) + self << document + end + file_references.each { |ref| self << ref } + end + + #-------------------------------------------------------------------------# + + # Returns a workspace generated by reading the contents of the given path. + # + # @param [String] path + # the path of the `xcworkspace` file. + # + # @return [Workspace] the generated workspace. + # + def self.new_from_xcworkspace(path) + from_s(File.read(File.join(path, 'contents.xcworkspacedata')), + File.expand_path(path)) + rescue Errno::ENOENT + new(nil) + end + + #-------------------------------------------------------------------------# + + # Returns a workspace generated by reading the contents of the given + # XML representation. + # + # @param [String] xml + # the XML representation of the workspace. + # + # @return [Workspace] the generated workspace. + # + def self.from_s(xml, workspace_path = '') + document = REXML::Document.new(xml) + instance = new(document) + instance.load_schemes(workspace_path) + instance + end + + # Adds a new path to the list of the of projects contained in the + # workspace. + # @param [String, Xcodeproj::Workspace::FileReference] path_or_reference + # A string or Xcode::Workspace::FileReference containing a path to an Xcode project + # + # @raise [ArgumentError] Raised if the input is neither a String nor a FileReference + # + # @return [void] + # + def <<(path_or_reference) + return unless @document && @document.respond_to?(:root) + + case path_or_reference + when String + project_file_reference = Xcodeproj::Workspace::FileReference.new(path_or_reference) + when Xcodeproj::Workspace::FileReference + project_file_reference = path_or_reference + projpath = nil + else + raise ArgumentError, "Input to the << operator must be a file path or FileReference, got #{path_or_reference.inspect}" + end + unless file_references.any? { |ref| project_file_reference.eql? ref } + @document.root.add_element(project_file_reference.to_node) + end + load_schemes_from_project File.expand_path(projpath || project_file_reference.path) + end + + #-------------------------------------------------------------------------# + + # Adds a new group container to the workspace + # workspace. + # + # @param [String] name The name of the group + # + # @yield [Xcodeproj::Workspace::GroupReference, REXML::Element] + # Yields the GroupReference and underlying XML element for mutation + # + # @return [Xcodeproj::Workspace::GroupReference] The added group reference + # + def add_group(name) + return nil unless @document + group = Xcodeproj::Workspace::GroupReference.new(name) + elem = @document.root.add_element(group.to_node) + yield group, elem if block_given? + group + end + + # Checks if the workspace contains the project with the given file + # reference. + # + # @param [FileReference] file_reference + # The file_reference to the project. + # + # @return [Boolean] whether the project is contained in the workspace. + # + def include?(file_reference) + file_references.include?(file_reference) + end + + # @return [String] the XML representation of the workspace. + # + def to_s + contents = '' + stack = [] + @document.root.each_recursive do |elem| + until stack.empty? + last = stack.last + break if last == elem.parent + contents += xcworkspace_element_end_xml(stack.length, last) + stack.pop + end + + stack << elem + contents += xcworkspace_element_start_xml(stack.length, elem) + end + + until stack.empty? + contents += xcworkspace_element_end_xml(stack.length, stack.last) + stack.pop + end + + root_xml(contents) + end + + # Saves the workspace at the given `xcworkspace` path. + # + # @param [String] path + # the path where to save the project. + # + # @return [void] + # + def save_as(path) + FileUtils.mkdir_p(path) + File.open(File.join(path, 'contents.xcworkspacedata'), 'w') do |out| + out << to_s + end + end + + #-------------------------------------------------------------------------# + + # Load all schemes from all projects in workspace or in the workspace container itself + # + # @param [String] workspace_dir_path + # path of workspaces dir + # + # @return [void] + # + def load_schemes(workspace_dir_path) + # Normalizes path to directory of workspace needed for file_reference.absolute_path + workspaces_dir = workspace_dir_path + if File.extname(workspace_dir_path) == '.xcworkspace' + workspaces_dir = File.expand_path('..', workspaces_dir) + end + + file_references.each do |file_reference| + project_full_path = file_reference.absolute_path(workspaces_dir) + load_schemes_from_project(project_full_path) + end + + # Load schemes that are in the workspace container. + workspace_abs_path = File.absolute_path(workspace_dir_path) + Dir[File.join(workspace_dir_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].each do |scheme| + scheme_name = File.basename(scheme, '.xcscheme') + @schemes[scheme_name] = workspace_abs_path + end + end + + private + + # Load all schemes from project + # + # @param [String] project_full_path + # project full path + # + # @return [void] + # + def load_schemes_from_project(project_full_path) + schemes = Xcodeproj::Project.schemes project_full_path + schemes.each do |scheme_name| + @schemes[scheme_name] = project_full_path + end + end + + # @return [String] The template of the workspace XML as formated by Xcode. + # + # @param [String] contents The XML contents of the workspace. + # + def root_xml(contents) + <<-DOC +<?xml version="1.0" encoding="UTF-8"?> +<Workspace + version = "1.0"> +#{contents.rstrip} +</Workspace> +DOC + end + + # + # @param [Integer] depth The depth of the element in the tree + # @param [REXML::Document::Element] elem The XML element to format. + # + # @return [String] The Xcode-specific XML formatting of an element start + # + def xcworkspace_element_start_xml(depth, elem) + attributes = case elem.name + when 'Group' + %w(location name) + when 'FileRef' + %w(location) + end + contents = "<#{elem.name}" + indent = ' ' * depth + attributes.each { |name| contents += "\n #{name} = \"#{elem.attribute(name)}\"" } + contents.split("\n").map { |line| "#{indent}#{line}" }.join("\n") + ">\n" + end + + # + # @param [Integer] depth The depth of the element in the tree + # @param [REXML::Document::Element] elem The XML element to format. + # + # @return [String] The Xcode-specific XML formatting of an element end + # + def xcworkspace_element_end_xml(depth, elem) + "#{' ' * depth}</#{elem.name}>\n" + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/file_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/file_reference.rb new file mode 100644 index 0000000..1691a63 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/file_reference.rb @@ -0,0 +1,79 @@ +require 'xcodeproj/workspace/reference' + +module Xcodeproj + class Workspace + # Describes a file reference of a Workspace. + # + class FileReference < Reference + # @return [String] the path to the project + # + attr_reader :path + + # @param [#to_s] path @see path + # @param [#to_s] type @see type + # + def initialize(path, type = 'group') + @path = Pathname.new(path.to_s).cleanpath.to_s + @type = type.to_s + end + + # @return [Bool] Wether a file reference is equal to another. + # + def ==(other) + path == other.path && type == other.type + end + alias_method :eql?, :== + + # @return [Fixnum] A hash identical for equals objects. + # + def hash + [path, type].hash + end + + # Returns a file reference given XML representation. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @return [FileReference] The new file reference instance. + # + def self.from_node(xml_node) + type, path = xml_node.attribute('location').value.split(':', 2) + if type == 'group' + path = prepend_parent_path(xml_node, path) + end + new(path, type) + end + + # @return [REXML::Element] the XML representation of the file reference. + # + def to_node + REXML::Element.new('FileRef').tap do |element| + element.add_attribute('location', "#{type}:#{path}") + end + end + + # Returns the absolute path of a file reference given the path of the + # directory containing workspace. + # + # @param [#to_s] workspace_dir_path + # The Path of the directory containing the workspace. + # + # @return [String] The absolute path to the project. + # + def absolute_path(workspace_dir_path) + workspace_dir_path = workspace_dir_path.to_s + case type + when 'group', 'container', 'self' + File.expand_path(File.join(workspace_dir_path, path)) + when 'absolute' + File.expand_path(path) + when 'developer' + raise "Developer workspace file reference type is not yet supported (#{path})" + else + raise "Unsupported workspace file reference type `#{type}`" + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/group_reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/group_reference.rb new file mode 100644 index 0000000..ec73dbf --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/group_reference.rb @@ -0,0 +1,67 @@ +require 'xcodeproj/workspace/reference' + +module Xcodeproj + class Workspace + # Describes a group reference of a Workspace. + # + class GroupReference < Reference + # @return [String] the name of the group + # + attr_reader :name + + # @return [String] the location of the group on disk + # + attr_reader :location + + # @param [#to_s] name @see name + # @param [#to_s] type @see type + # @param [#to_s] location @see location + # + def initialize(name, type = 'container', location = '') + @name = name.to_s + @type = type.to_s + @location = location.to_s + end + + # @return [Bool] Whether a group reference is equal to another. + # + def ==(other) + name == other.name && type == other.type && location == other.location + end + alias_method :eql?, :== + + # @return [Fixnum] A hash identical for equals objects. + # + def hash + [name, type, location].hash + end + + # Returns a group reference given XML representation. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @return [GroupReference] The new group reference instance. + # + def self.from_node(xml_node) + location_array = xml_node.attribute('location').value.split(':', 2) + type = location_array.first + location = location_array[1] || '' + if type == 'group' + location = prepend_parent_path(xml_node, location) + end + name = xml_node.attribute('name').value + new(name, type, location) + end + + # @return [REXML::Element] the XML representation of the group reference. + # + def to_node + REXML::Element.new('Group').tap do |element| + element.add_attribute('location', "#{type}:#{location}") + element.add_attribute('name', "#{name}") + end + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/reference.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/reference.rb new file mode 100644 index 0000000..599052b --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/workspace/reference.rb @@ -0,0 +1,40 @@ +module Xcodeproj + class Workspace + # Describes a file/group reference of a Workspace. + # + class Reference + # @return [String] the type of reference to the project + # + # This can be of the following values: + # - absolute + # - group + # - container + # - developer (unsupported) + # + attr_reader :type + + # Returns the relative path to the parent group reference (if one exists) + # prepended to the passed in path. + # + # @param [REXML::Element] xml_node + # the XML representation. + # + # @param [String] path + # the path that will be prepended to. + # + # @return [String] the extended path including the parent node's path. + # + def self.prepend_parent_path(xml_node, path) + if !xml_node.parent.nil? && (xml_node.parent.name == 'Group') + group = GroupReference.from_node(xml_node.parent) + if !group.location.nil? && !group.location.empty? + path = '' if path.nil? + path = File.join(group.location, path) + end + end + + path + end + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/xcodebuild_helper.rb b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/xcodebuild_helper.rb new file mode 100644 index 0000000..6470b81 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/xcodeproj-1.21.0/lib/xcodeproj/xcodebuild_helper.rb @@ -0,0 +1,108 @@ +module Xcodeproj + # Helper class which returns information from xcodebuild. + # + class XcodebuildHelper + def initialize + @needs_to_parse_sdks = true + end + + # @return [String] The version of the last iOS sdk. + # + def last_ios_sdk + parse_sdks_if_needed + versions_by_sdk[:ios].sort.last + end + + # @return [String] The version of the last OS X sdk. + # + def last_osx_sdk + parse_sdks_if_needed + versions_by_sdk[:osx].sort.last + end + + # @return [String] The version of the last tvOS sdk. + # + def last_tvos_sdk + parse_sdks_if_needed + versions_by_sdk[:tvos].sort.last + end + + # @return [String] The version of the last watchOS sdk. + # + def last_watchos_sdk + parse_sdks_if_needed + versions_by_sdk[:watchos].sort.last + end + + private + + # !@group Private Helpers + + #-------------------------------------------------------------------------# + + # @return [Hash] The versions of the sdks grouped by name (`:ios`, or `:osx`). + # + attr_accessor :versions_by_sdk + + # @return [void] Parses the SDKs returned by xcodebuild and stores the + # information in the `needs_to_parse_sdks` hash. + # + def parse_sdks_if_needed + if @needs_to_parse_sdks + @versions_by_sdk = {} + @versions_by_sdk[:osx] = [] + @versions_by_sdk[:ios] = [] + @versions_by_sdk[:tvos] = [] + @versions_by_sdk[:watchos] = [] + if xcodebuild_available? + sdks = parse_sdks_information(xcodebuild_sdks) + sdks.each do |(name, version)| + case + when name == 'macosx' then @versions_by_sdk[:osx] << version + when name == 'iphoneos' then @versions_by_sdk[:ios] << version + when name == 'appletvos' then @versions_by_sdk[:tvos] << version + when name == 'watchos' then @versions_by_sdk[:watchos] << version + end + end + end + end + end + + # @return [Bool] Whether xcodebuild is available. + # + def xcodebuild_available? + if @xcodebuild_available.nil? + `which xcodebuild 2>/dev/null` + @xcodebuild_available = $?.exitstatus.zero? + end + @xcodebuild_available + end + + # @return [Array<Array<String>>] An array of tuples where the first element + # is the name of the SDK and the second is the version. + # + def parse_sdks_information(output) + output.scan(/-sdk (macosx|iphoneos|watchos|appletvos)(.+\w)/) + end + + # @return [String] The sdk information reported by xcodebuild. + # + def xcodebuild_sdks + `xcodebuild -showsdks 2>/dev/null` + end + + #-------------------------------------------------------------------------# + + # @!group Singleton + + # @return [XcodebuildHelper] the current xcodebuild instance creating one + # if needed, which caches the information from the xcodebuild + # command line tool. + # + def self.instance + @instance ||= new + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/MIT-LICENSE b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/MIT-LICENSE new file mode 100644 index 0000000..d6a2b6e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2019–ω Xavier Noria + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/README.md b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/README.md new file mode 100644 index 0000000..b92b13d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/README.md @@ -0,0 +1,1071 @@ +# Zeitwerk + + + +[![Gem Version](https://img.shields.io/gem/v/zeitwerk.svg?style=for-the-badge)](https://rubygems.org/gems/zeitwerk) +[![Build Status](https://img.shields.io/github/workflow/status/fxn/zeitwerk/CI?event=push&style=for-the-badge)](https://github.com/fxn/zeitwerk/actions?query=event%3Apush) + +<!-- TOC --> + +- [Introduction](#introduction) +- [Synopsis](#synopsis) +- [File structure](#file-structure) + - [The idea: File paths match constant paths](#the-idea-file-paths-match-constant-paths) + - [Inner simple constants](#inner-simple-constants) + - [Root directories and root namespaces](#root-directories-and-root-namespaces) + - [The default root namespace is `Object`](#the-default-root-namespace-is-object) + - [Custom root namespaces](#custom-root-namespaces) + - [Nested root directories](#nested-root-directories) + - [Implicit namespaces](#implicit-namespaces) + - [Explicit namespaces](#explicit-namespaces) + - [Collapsing directories](#collapsing-directories) + - [Testing compliance](#testing-compliance) +- [Usage](#usage) + - [Setup](#setup) + - [Generic](#generic) + - [for_gem](#for_gem) + - [Autoloading](#autoloading) + - [Eager loading](#eager-loading) + - [Eager load exclusions](#eager-load-exclusions) + - [Global eager load](#global-eager-load) + - [Reloading](#reloading) + - [Inflection](#inflection) + - [Zeitwerk::Inflector](#zeitwerkinflector) + - [Zeitwerk::GemInflector](#zeitwerkgeminflector) + - [Custom inflector](#custom-inflector) + - [Callbacks](#callbacks) + - [The on_setup callback](#the-on_setup-callback) + - [The on_load callback](#the-on_load-callback) + - [The on_unload callback](#the-on_unload-callback) + - [Technical details](#technical-details) + - [Logging](#logging) + - [Loader tag](#loader-tag) + - [Ignoring parts of the project](#ignoring-parts-of-the-project) + - [Use case: Files that do not follow the conventions](#use-case-files-that-do-not-follow-the-conventions) + - [Use case: The adapter pattern](#use-case-the-adapter-pattern) + - [Use case: Test files mixed with implementation files](#use-case-test-files-mixed-with-implementation-files) + - [Edge cases](#edge-cases) + - [Beware of circular dependencies](#beware-of-circular-dependencies) + - [Reopening third-party namespaces](#reopening-third-party-namespaces) + - [Rules of thumb](#rules-of-thumb) + - [Debuggers](#debuggers) + - [debug.rb](#debugrb) + - [Byebug](#byebug) + - [Break](#break) +- [Pronunciation](#pronunciation) +- [Supported Ruby versions](#supported-ruby-versions) +- [Testing](#testing) +- [Motivation](#motivation) + - [Kernel#require is brittle](#kernelrequire-is-brittle) + - [Rails autoloading was brittle](#rails-autoloading-was-brittle) +- [Thanks](#thanks) +- [License](#license) + +<!-- /TOC --> + +<a id="markdown-introduction" name="introduction"></a> +## Introduction + +Zeitwerk is an efficient and thread-safe code loader for Ruby. + +Given a [conventional file structure](#file-structure), Zeitwerk is able to load your project's classes and modules on demand (autoloading), or upfront (eager loading). You don't need to write `require` calls for your own files, rather, you can streamline your programming knowing that your classes and modules are available everywhere. This feature is efficient, thread-safe, and matches Ruby's semantics for constants. + +Zeitwerk is also able to reload code, which may be handy while developing web applications. Coordination is needed to reload in a thread-safe manner. The documentation below explains how to do this. + +The gem is designed so that any project, gem dependency, application, etc. can have their own independent loader, coexisting in the same process, managing their own project trees, and independent of each other. Each loader has its own configuration, inflector, and optional logger. + +Internally, Zeitwerk issues `require` calls exclusively using absolute file names, so there are no costly file system lookups in `$LOAD_PATH`. Technically, the directories managed by Zeitwerk do not even need to be in `$LOAD_PATH`. + +Furthermore, Zeitwerk does at most one single scan of the project tree, and it descends into subdirectories lazily, only if their namespaces are used. + +<a id="markdown-synopsis" name="synopsis"></a> +## Synopsis + +Main interface for gems: + +```ruby +# lib/my_gem.rb (main file) + +require "zeitwerk" +loader = Zeitwerk::Loader.for_gem +loader.setup # ready! + +module MyGem + # ... +end + +loader.eager_load # optionally +``` + +Main generic interface: + +```ruby +loader = Zeitwerk::Loader.new +loader.push_dir(...) +loader.setup # ready! +``` + +The `loader` variable can go out of scope. Zeitwerk keeps a registry with all of them, and so the object won't be garbage collected. + +You can reload if you want to: + +```ruby +loader = Zeitwerk::Loader.new +loader.push_dir(...) +loader.enable_reloading # you need to opt-in before setup +loader.setup +... +loader.reload +``` + +and you can eager load all the code: + +```ruby +loader.eager_load +``` + +It is also possible to broadcast `eager_load` to all instances: + +```ruby +Zeitwerk::Loader.eager_load_all +``` + +<a id="markdown-file-structure" name="file-structure"></a> +## File structure + +<a id="markdown-the-idea-file-paths-match-constant-paths" name="the-idea-file-paths-match-constant-paths"></a> +### The idea: File paths match constant paths + +To have a file structure Zeitwerk can work with, just name files and directories after the name of the classes and modules they define: + +``` +lib/my_gem.rb -> MyGem +lib/my_gem/foo.rb -> MyGem::Foo +lib/my_gem/bar_baz.rb -> MyGem::BarBaz +lib/my_gem/woo/zoo.rb -> MyGem::Woo::Zoo +``` + +You can tune that a bit by [collapsing directories](#collapsing-directories), or by [ignoring parts of the project](#ignoring-parts-of-the-project), but that is the main idea. + +<a id="markdown-inner-simple-constants" name="inner-simple-constants"></a> +### Inner simple constants + +While a simple constant like `HttpCrawler::MAX_RETRIES` can be defined in its own file: + +```ruby +# http_crawler/max_retries.rb +HttpCrawler::MAX_RETRIES = 10 +``` + +that is not required, you can also define it the regular way: + +```ruby +# http_crawler.rb +class HttpCrawler + MAX_RETRIES = 10 +end +``` + +The first example needs a custom [inflection](https://github.com/fxn/zeitwerk#inflection) rule: + +```ruby +loader.inflector.inflect("max_retries" => "MAX_RETRIES") +``` + +Otherwise, Zeitwerk would expect the file to define `MaxRetries`. + +In the second example, no custom rule is needed. + +<a id="markdown-root-directories-and-root-namespaces" name="root-directories-and-root-namespaces"></a> +### Root directories and root namespaces + +Every directory configured with `push_dir` is called a _root directory_, and they represent _root namespaces_. + +<a id="markdown-the-default-root-namespace-is-object" name="the-default-root-namespace-is-object"></a> +#### The default root namespace is `Object` + +By default, the namespace associated to a root directory is the top-level one: `Object`. + +For example, given + +```ruby +loader.push_dir("#{__dir__}/models") +loader.push_dir("#{__dir__}/serializers")) +``` + +these are the expected classes and modules being defined by these files: + +``` +models/user.rb -> User +serializers/user_serializer.rb -> UserSerializer +``` + +<a id="markdown-custom-root-namespaces" name="custom-root-namespaces"></a> +#### Custom root namespaces + +While `Object` is by far the most common root namespace, you can associate a different one to a particular root directory. The method `push_dir` accepts a class or module object in the optional `namespace` keyword argument. + +For example, given: + +```ruby +require "active_job" +require "active_job/queue_adapters" +loader.push_dir("#{__dir__}/adapters", namespace: ActiveJob::QueueAdapters) +``` + +a file defining `ActiveJob::QueueAdapters::MyQueueAdapter` does not need the conventional parent directories, you can (and have to) store the file directly below `adapters`: + +``` +adapters/my_queue_adapter.rb -> ActiveJob::QueueAdapters::MyQueueAdapter +``` + +Please, note that the given root namespace must be non-reloadable, though autoloaded constants in that namespace can be. That is, if you associate `app/api` with an existing `Api` module, that module should not be reloadable. However, if the project defines and autoloads the class `Api::Deliveries`, that one can be reloaded. + +<a id="markdown-nested-root-directories" name="nested-root-directories"></a> +#### Nested root directories + +Root directories should not be ideally nested, but Zeitwerk supports them because in Rails, for example, both `app/models` and `app/models/concerns` belong to the autoload paths. + +Zeitwerk detects nested root directories, and treats them as roots only. In the example above, `concerns` is not considered to be a namespace below `app/models`. For example, the file: + +``` +app/models/concerns/geolocatable.rb +``` + +should define `Geolocatable`, not `Concerns::Geolocatable`. + +<a id="markdown-implicit-namespaces" name="implicit-namespaces"></a> +### Implicit namespaces + +Directories without a matching Ruby file get modules autovivified automatically by Zeitwerk. For example, in + +``` +app/controllers/admin/users_controller.rb -> Admin::UsersController +``` + +`Admin` is autovivified as a module on demand, you do not need to define an `Admin` class or module in an `admin.rb` file explicitly. + +<a id="markdown-explicit-namespaces" name="explicit-namespaces"></a> +### Explicit namespaces + +Classes and modules that act as namespaces can also be explicitly defined, though. For instance, consider + +``` +app/models/hotel.rb -> Hotel +app/models/hotel/pricing.rb -> Hotel::Pricing +``` + +There, `app/models/hotel.rb` defines `Hotel`, and thus Zeitwerk does not autovivify a module. + +The classes and modules from the namespace are already available in the body of the class or module defining it: + +```ruby +class Hotel < ApplicationRecord + include Pricing # works + ... +end +``` + +An explicit namespace must be managed by one single loader. Loaders that reopen namespaces owned by other projects are responsible for loading their constants before setup. + +<a id="markdown-collapsing-directories" name="collapsing-directories"></a> +### Collapsing directories + +Say some directories in a project exist for organizational purposes only, and you prefer not to have them as namespaces. For example, the `actions` subdirectory in the next example is not meant to represent a namespace, it is there only to group all actions related to bookings: + +``` +booking.rb -> Booking +booking/actions/create.rb -> Booking::Create +``` + +To make it work that way, configure Zeitwerk to collapse said directory: + +```ruby +loader.collapse("#{__dir__}/booking/actions") +``` + +This method accepts an arbitrary number of strings or `Pathname` objects, and also an array of them. + +You can pass directories and glob patterns. Glob patterns are expanded when they are added, and again on each reload. + +To illustrate usage of glob patterns, if `actions` in the example above is part of a standardized structure, you could use a wildcard: + +```ruby +loader.collapse("#{__dir__}/*/actions") +``` + +<a id="markdown-testing-compliance" name="testing-compliance"></a> +### Testing compliance + +When a managed file is loaded, Zeitwerk verifies the expected constant is defined. If it is not, `Zeitwerk::NameError` is raised. + +So, an easy way to ensure compliance in the test suite is to eager load the project: + +```ruby +begin + loader.eager_load(force: true) +rescue Zeitwerk::NameError => e + flunk e.message +else + assert true +end +``` + +<a id="markdown-usage" name="usage"></a> +## Usage + +<a id="markdown-setup" name="setup"></a> +### Setup + +<a id="markdown-generic" name="generic"></a> +#### Generic + +Loaders are ready to load code right after calling `setup` on them: + +```ruby +loader.setup +``` + +This method is synchronized and idempotent. + +Customization should generally be done before that call. In particular, in the generic interface you may set the root directories from which you want to load files: + +```ruby +loader.push_dir(...) +loader.push_dir(...) +loader.setup +``` + +<a id="markdown-for_gem" name="for_gem"></a> +#### for_gem + +`Zeitwerk::Loader.for_gem` is a convenience shortcut for the common case in which a gem has its entry point directly under the `lib` directory: + +``` +lib/my_gem.rb # MyGem +lib/my_gem/version.rb # MyGem::VERSION +lib/my_gem/foo.rb # MyGem::Foo +``` + +Neither a gemspec nor a version file are technically required, this helper works as long as the code is organized using that standard structure. + +If the entry point of your gem lives in a subdirectory of `lib` because it is reopening a namespace defined somewhere else, please use the generic API to setup the loader, and make sure you check the section [_Reopening third-party namespaces_](https://github.com/fxn/zeitwerk#reopening-third-party-namespaces) down below. + +Conceptually, `for_gem` translates to: + +```ruby +# lib/my_gem.rb + +require "zeitwerk" +loader = Zeitwerk::Loader.new +loader.tag = File.basename(__FILE__, ".rb") +loader.inflector = Zeitwerk::GemInflector.new(__FILE__) +loader.push_dir(__dir__) +``` + +except that this method returns the same object in subsequent calls from the same file, in the unlikely case the gem wants to be able to reload. + +If the main module references project constants at the top-level, Zeitwerk has to be ready to load them. Their definitions, in turn, may reference other project constants. And this is recursive. Therefore, it is important that the `setup` call happens above the main module definition: + +```ruby +# lib/my_gem.rb (main file) + +require "zeitwerk" +loader = Zeitwerk::Loader.for_gem +loader.setup + +module MyGem + # Since the setup has been performed, at this point we are already able + # to reference project constants, in this case MyGem::MyLogger. + include MyLogger +end +``` + +<a id="markdown-autoloading" name="autoloading"></a> +### Autoloading + +After `setup`, you are able to reference classes and modules from the project without issuing `require` calls for them. They are all available everywhere, autoloading loads them on demand. This works even if the reference to the class or module is first hit in client code, outside your project. + +Let's revisit the example above: + +```ruby +# lib/my_gem.rb (main file) + +require "zeitwerk" +loader = Zeitwerk::Loader.for_gem +loader.setup + +module MyGem + include MyLogger # (*) +end +``` + +That works, and there is no `require "my_gem/my_logger"`. When `(*)` is reached, Zeitwerk seamlessly autoloads `MyGem::MyLogger`. + +If autoloading a file does not define the expected class or module, Zeitwerk raises `Zeitwerk::NameError`, which is a subclass of `NameError`. + +<a id="markdown-eager-loading" name="eager-loading"></a> +### Eager loading + +Zeitwerk instances are able to eager load their managed files: + +```ruby +loader.eager_load +``` + +That skips [ignored files and directories](#ignoring-parts-of-the-project). + +In gems, the method needs to be invoked after the main namespace has been defined, as shown in [Synopsis](https://github.com/fxn/zeitwerk#synopsis). + +Eager loading is synchronized and idempotent. + +<a id="markdown-eager-load-exclusions" name="eager-load-exclusions"></a> +#### Eager load exclusions + + You can tell Zeitwerk that certain files or directories are autoloadable, but should not be eager loaded: + +```ruby +db_adapters = "#{__dir__}/my_gem/db_adapters" +loader.do_not_eager_load(db_adapters) +loader.setup +loader.eager_load # won't eager load the database adapters +``` + +However, that can be overridden with `force`: + +```ruby +loader.eager_load(force: true) # database adapters are eager loaded +``` + +Which may be handy if the project eager loads in the test suite to [ensure project layout compliance](#testing-compliance). + +The `force` flag does not affect ignored files and directories, those are still ignored. + +<a id="markdown-global-eager-load" name="global-eager-load"></a> +#### Global eager load + +If you want to eager load yourself and all dependencies that use Zeitwerk, you can broadcast the `eager_load` call to all instances: + +```ruby +Zeitwerk::Loader.eager_load_all +``` + +This may be handy in top-level services, like web applications. + +Note that thanks to idempotence `Zeitwerk::Loader.eager_load_all` won't eager load twice if any of the instances already eager loaded. + +This method does not accept the `force` flag, since in general it wouldn't be a good idea to force eager loading in 3rd party code. + +<a id="markdown-reloading" name="reloading"></a> +### Reloading + +Zeitwerk is able to reload code, but you need to enable this feature: + +```ruby +loader = Zeitwerk::Loader.new +loader.push_dir(...) +loader.enable_reloading # you need to opt-in before setup +loader.setup +... +loader.reload +``` + +There is no way to undo this, either you want to reload or you don't. + +Enabling reloading after setup raises `Zeitwerk::Error`. Attempting to reload without having it enabled raises `Zeitwerk::ReloadingDisabledError`. + +Generally speaking, reloading is useful while developing running services like web applications. Gems that implement regular libraries, so to speak, or services running in testing or production environments, won't normally have a use case for reloading. If reloading is not enabled, Zeitwerk is able to use less memory. + +Reloading removes the currently loaded classes and modules and resets the loader so that it will pick whatever is in the file system now. + +It is important to highlight that this is an instance method. Don't worry about project dependencies managed by Zeitwerk, their loaders are independent. + +In order for reloading to be thread-safe, you need to implement some coordination. For example, a web framework that serves each request with its own thread may have a globally accessible RW lock. When a request comes in, the framework acquires the lock for reading at the beginning, and the code in the framework that calls `loader.reload` needs to acquire the lock for writing. + +On reloading, client code has to update anything that would otherwise be storing a stale object. For example, if the routing layer of a web framework stores controller class objects or instances in internal structures, on reload it has to refresh them somehow, possibly reevaluating routes. + +<a id="markdown-inflection" name="inflection"></a> +### Inflection + +Each individual loader needs an inflector to figure out which constant path would a given file or directory map to. Zeitwerk ships with two basic inflectors, and you can define your own. + +<a id="markdown-zeitwerkinflector" name="zeitwerkinflector"></a> +#### Zeitwerk::Inflector + +Each loader instantiated with `Zeitwerk::Loader.new` has an inflector of this type by default. + +This is a very basic inflector that converts snake case to camel case: + +``` +user -> User +users_controller -> UsersController +html_parser -> HtmlParser +``` + +The camelize logic can be overridden easily for individual basenames: + +```ruby +loader.inflector.inflect( + "html_parser" => "HTMLParser", + "mysql_adapter" => "MySQLAdapter" +) +``` + +The `inflect` method can be invoked several times if you prefer this other style: + +```ruby +loader.inflector.inflect "html_parser" => "HTMLParser" +loader.inflector.inflect "mysql_adapter" => "MySQLAdapter" +``` + +Overrides need to be configured before calling `setup`. + + The inflectors of different loaders are independent of each other. There are no global inflection rules or global configuration that can affect this inflector. It is deterministic. + +<a id="markdown-zeitwerkgeminflector" name="zeitwerkgeminflector"></a> +#### Zeitwerk::GemInflector + +Each loader instantiated with `Zeitwerk::Loader.for_gem` has an inflector of this type by default. + +This inflector is like the basic one, except it expects `lib/my_gem/version.rb` to define `MyGem::VERSION`. + +The inflectors of different loaders are independent of each other. There are no global inflection rules or global configuration that can affect this inflector. It is deterministic. + +<a id="markdown-custom-inflector" name="custom-inflector"></a> +#### Custom inflector + +The inflectors that ship with Zeitwerk are deterministic and simple. But you can configure your own: + +```ruby +# frozen_string_literal: true + +class MyInflector < Zeitwerk::Inflector + def camelize(basename, abspath) + if basename =~ /\Ahtml_(.*)/ + "HTML" + super($1, abspath) + else + super + end + end +end +``` + +The first argument, `basename`, is a string with the basename of the file or directory to be inflected. In the case of a file, without extension. In the case of a directory, without trailing slash. The inflector needs to return this basename inflected. Therefore, a simple constant name without colons. + +The second argument, `abspath`, is a string with the absolute path to the file or directory in case you need it to decide how to inflect the basename. Paths to directories don't have trailing slashes. + +Then, assign the inflector: + +```ruby +loader.inflector = MyInflector.new +``` + +This needs to be done before calling `setup`. + +If a custom inflector definition in a gem takes too much space in the main file, you can extract it. For example, this is a simple pattern: + +```ruby +# lib/my_gem/inflector.rb +module MyGem + class Inflector < Zeitwerk::GemInflector + ... + end +end + +# lib/my_gem.rb +require "zeitwerk" +require_relative "my_gem/inflector" + +loader = Zeitwerk::Loader.for_gem +loader.inflector = MyGem::Inflector.new(__FILE__) +loader.setup + +module MyGem + # ... +end +``` + +Since `MyGem` is referenced before the namespace is defined in the main file, it is important to use this style: + +```ruby +# Correct, effectively defines MyGem. +module MyGem + class Inflector < Zeitwerk::GemInflector + # ... + end +end +``` + +instead of: + +```ruby +# Raises uninitialized constant MyGem (NameError). +class MyGem::Inflector < Zeitwerk::GemInflector + # ... +end +``` + +<a id="markdown-callbacks" name="callbacks"></a> +### Callbacks + +<a id="markdown-the-on_setup-callback" name="the-on_setup-callback"></a> +#### The on_setup callback + +The `on_setup` callback is fired on setup and on each reload: + +```ruby +loader.on_setup do + # Ready to autoload here. +end +``` + +Multiple `on_setup` callbacks are supported, and they run in order of definition. + +If `setup` was already executed, the callback is fired immediately. + +<a id="markdown-the-on_load-callback" name="the-on_load-callback"></a> +#### The on_load callback + +The usual place to run something when a file is loaded is the file itself. However, sometimes you'd like to be called, and this is possible with the `on_load` callback. + +For example, let's imagine this class belongs to a Rails application: + +```ruby +class SomeApiClient + class << self + attr_accessor :endpoint + end +end +``` + +With `on_load`, it is easy to schedule code at boot time that initializes `endpoint` according to the configuration: + +```ruby +# config/environments/development.rb +loader.on_load("SomeApiClient") do |klass, _abspath| + klass.endpoint = "https://api.dev" +end + +# config/environments/production.rb +loader.on_load("SomeApiClient") do |klass, _abspath| + klass.endpoint = "https://api.prod" +end +``` + +Some uses cases: + +* Doing something with a reloadable class or module in a Rails application during initialization, in a way that plays well with reloading. As in the previous example. +* Delaying the execution of the block until the class is loaded for performance. +* Delaying the execution of the block until the class is loaded because it follows the adapter pattern and better not to load the class if the user does not need it. + +`on_load` gets a target constant path as a string (e.g., "User", or "Service::NotificationsGateway"). When fired, its block receives the stored value, and the absolute path to the corresponding file or directory as a string. The callback is executed every time the target is loaded. That includes reloads. + +Multiple callbacks on the same target are supported, and they run in order of definition. + +The block is executed once the loader has loaded the target. In particular, if the target was already loaded when the callback is defined, the block won't run. But if you reload and load the target again, then it will. Normally, you'll want to define `on_load` callbacks before `setup`. + +Defining a callback for a target not managed by the receiver is not an error, the block simply won't ever be executed. + +It is also possible to be called when any constant managed by the loader is loaded: + +```ruby +loader.on_load do |cpath, value, abspath| + # ... +end +``` + +The block gets the constant path as a string (e.g., "User", or "Foo::VERSION"), the value it stores (e.g., the class object stored in `User`, or "2.5.0"), and the absolute path to the corresponding file or directory as a string. + +Multiple callbacks like these are supported, and they run in order of definition. + +There are use cases for this last catch-all callback, but they are rare. If you just need to understand how things are being loaded for debugging purposes, please remember that `Zeitwerk::Loader#log!` logs plenty of information. + +If both types of callbacks are defined, the specific ones run first. + +Since `on_load` callbacks are executed right after files are loaded, even if the loading context seems to be far away, in practice **the block is subject to [circular dependencies](#beware-of-circular-dependencies)**. As a rule of thumb, as far as loading order and its interdependencies is concerned, you have to program as if the block was executed at the bottom of the file just loaded. + +<a id="markdown-the-on_unload-callback" name="the-on_unload-callback"></a> +#### The on_unload callback + +When reloading is enabled, you may occasionally need to execute something before a certain autoloaded class or module is unloaded. The `on_unload` callback allows you to do that. + +For example, let's imagine that a `Country` class fetches a list of countries and caches them when it is loaded. You might want to clear that cache if unloaded: + +```ruby +loader.on_unload("Country") do |klass, _abspath| + klass.clear_cache +end +``` + +`on_unload` gets a target constant path as a string (e.g., "User", or "Service::NotificationsGateway"). When fired, its block receives the stored value, and the absolute path to the corresponding file or directory as a string. The callback is executed every time the target is unloaded. + +`on_unload` blocks are executed before the class is unloaded, but in the middle of unloading, which happens in an unspecified order. Therefore, **that callback should not refer to any reloadable constant because there is no guarantee the constant works there**. Those blocks should rely on objects only, as in the example above, or regular constants not managed by the loader. This remark is transitive, applies to any methods invoked within the block. + +Multiple callbacks on the same target are supported, and they run in order of definition. + +Defining a callback for a target not managed by the receiver is not an error, the block simply won't ever be executed. + +It is also possible to be called when any constant managed by the loader is unloaded: + +```ruby +loader.on_unload do |cpath, value, abspath| + # ... +end +``` + +The block gets the constant path as a string (e.g., "User", or "Foo::VERSION"), the value it stores (e.g., the class object stored in `User`, or "2.5.0"), and the absolute path to the corresponding file or directory as a string. + +Multiple callbacks like these are supported, and they run in order of definition. + +If both types of callbacks are defined, the specific ones run first. + +<a id="markdown-technical-details" name="technical-details"></a> +#### Technical details + +Zeitwerk uses the word "unload" to ease communication and for symmetry with `on_load`. However, in Ruby you cannot unload things for real. So, when does `on_unload` technically happen? + +When unloading, Zeitwerk issues `Module#remove_const` calls. Classes and modules are no longer reachable through their constants, and `on_unload` callbacks are executed right before those calls. + +Technically, though, the objects themselves are still alive, but if everything is used as expected and they are not stored in any non-reloadable place (don't do that), they are ready for garbage collection, which is when the real unloading happens. + +<a id="markdown-logging" name="logging"></a> +### Logging + +Zeitwerk is silent by default, but you can ask loaders to trace their activity. Logging is meant just for troubleshooting, shouldn't normally be enabled. + +The `log!` method is a quick shortcut to let the loader log to `$stdout`: + +``` +loader.log! +``` + +If you want more control, a logger can be configured as a callable + +```ruby +loader.logger = method(:puts) +loader.logger = ->(msg) { ... } +``` + +as well as anything that responds to `debug`: + +```ruby +loader.logger = Logger.new($stderr) +loader.logger = Rails.logger +``` + +In both cases, the corresponding methods are going to be passed exactly one argument with the message to be logged. + +It is also possible to set a global default this way: + +```ruby +Zeitwerk::Loader.default_logger = method(:puts) +``` + +If there is a logger configured, you'll see traces when autoloads are set, files loaded, and modules autovivified. While reloading, removed autoloads and unloaded objects are also traced. + +As a curiosity, if your project has namespaces you'll notice in the traces Zeitwerk sets autoloads for _directories_. That's a technique used to be able to descend into subdirectories on demand, avoiding that way unnecessary tree walks. + +<a id="markdown-loader-tag" name="loader-tag"></a> +#### Loader tag + +Loaders have a tag that is printed in traces in order to be able to distinguish them in globally logged activity: + +``` +Zeitwerk@9fa54b: autoload set for User, to be loaded from ... +``` + +By default, a random tag like the one above is assigned, but you can change it: + +``` +loader.tag = "grep_me" +``` + +The tag of a loader returned by `for_gem` is the basename of the root file without extension: + +``` +Zeitwerk@my_gem: constant MyGem::Foo loaded from ... +``` + +<a id="markdown-ignoring-parts-of-the-project" name="ignoring-parts-of-the-project"></a> +### Ignoring parts of the project + +Zeitwerk ignores automatically any file or directory whose name starts with a dot, and any files that do not have extension ".rb". + +However, sometimes it might still be convenient to tell Zeitwerk to completely ignore some particular Ruby file or directory. That is possible with `ignore`, which accepts an arbitrary number of strings or `Pathname` objects, and also an array of them. + +You can ignore file names, directory names, and glob patterns. Glob patterns are expanded when they are added and again on each reload. + +Let's see some use cases. + +<a id="markdown-use-case-files-that-do-not-follow-the-conventions" name="use-case-files-that-do-not-follow-the-conventions"></a> +#### Use case: Files that do not follow the conventions + +Let's suppose that your gem decorates something in `Kernel`: + +```ruby +# lib/my_gem/core_ext/kernel.rb + +Kernel.module_eval do + # ... +end +``` + +`Kernel` is already defined by Ruby so the module cannot be autoloaded. Also, that file does not define a constant path after the path name. Therefore, Zeitwerk should not process it at all. + +The extension can still coexist with the rest of the project, you only need to tell Zeitwerk to ignore it: + +```ruby +kernel_ext = "#{__dir__}/my_gem/core_ext/kernel.rb" +loader.ignore(kernel_ext) +loader.setup +``` + +You can also ignore the whole directory: + +```ruby +core_ext = "#{__dir__}/my_gem/core_ext" +loader.ignore(core_ext) +loader.setup +``` + +Now, that file has to be loaded manually with `require` or `require_relative`: + +```ruby +require_relative "my_gem/core_ext/kernel" +``` + +and you can do that anytime, before configuring the loader, or after configuring the loader, does not matter. + +<a id="markdown-use-case-the-adapter-pattern" name="use-case-the-adapter-pattern"></a> +#### Use case: The adapter pattern + +Another use case for ignoring files is the adapter pattern. + +Let's imagine your project talks to databases, supports several, and has adapters for each one of them. Those adapters may have top-level `require` calls that load their respective drivers: + +```ruby +# my_gem/db_adapters/postgresql.rb +require "pg" +``` + +but you don't want your users to install them all, only the one they are going to use. + +On the other hand, if your code is eager loaded by you or a parent project (with `Zeitwerk::Loader.eager_load_all`), those `require` calls are going to be executed. Ignoring the adapters prevents that: + +```ruby +db_adapters = "#{__dir__}/my_gem/db_adapters" +loader.ignore(db_adapters) +loader.setup +``` + +The chosen adapter, then, has to be loaded by hand somehow: + +```ruby +require "my_gem/db_adapters/#{config[:db_adapter]}" +``` + +Note that since the directory is ignored, the required adapter can instantiate another loader to manage its subtree, if desired. Such loader would coexist with the main one just fine. + +<a id="markdown-use-case-test-files-mixed-with-implementation-files" name="use-case-test-files-mixed-with-implementation-files"></a> +#### Use case: Test files mixed with implementation files + +There are project layouts that put implementation files and test files together. To ignore the test files, you can use a glob pattern like this: + +```ruby +tests = "#{__dir__}/**/*_test.rb" +loader.ignore(tests) +loader.setup +``` + +<a id="markdown-edge-cases" name="edge-cases"></a> +### Edge cases + +A class or module that acts as a namespace: + +```ruby +# trip.rb +class Trip + include Geolocation +end + +# trip/geolocation.rb +module Trip::Geolocation + ... +end +``` + +has to be defined with the `class` or `module` keywords, as in the example above. + +For technical reasons, raw constant assignment is not supported: + +```ruby +# trip.rb +Trip = Class.new { ... } # NOT SUPPORTED +Trip = Struct.new { ... } # NOT SUPPORTED +``` + +This only affects explicit namespaces, those idioms work well for any other ordinary class or module. + +<a id="markdown-beware-of-circular-dependencies" name="beware-of-circular-dependencies"></a> +### Beware of circular dependencies + +In Ruby, you can't have certain top-level circular dependencies. Take for example: + +```ruby +# c.rb +class C < D +end + +# d.rb +class D + C +end +``` + +In order to define `C`, you need to load `D`. However, the body of `D` refers to `C`. + +Circular dependencies like those do not work in plain Ruby, and therefore do not work in projects managed by Zeitwerk either. + +<a id="markdown-reopening-third-party-namespaces" name="reopening-third-party-namespaces"></a> +### Reopening third-party namespaces + +Projects managed by Zeitwerk can work with namespaces defined by third-party libraries. However, they have to be loaded in memory before calling `setup`. + +For example, let's imagine you're writing a gem that implements an adapter for [Active Job](https://guides.rubyonrails.org/active_job_basics.html) that uses AwesomeQueue as backend. By convention, your gem has to define a class called `ActiveJob::QueueAdapters::AwesomeQueue`, and it has to do so in a file with a matching path: + +```ruby +# lib/active_job/queue_adapters/awesome_queue.rb +module ActiveJob + module QueueAdapters + class AwesomeQueue + # ... + end + end +end +``` + +It is very important that your gem _reopens_ the modules `ActiveJob` and `ActiveJob::QueueAdapters` instead of _defining_ them. Because their proper definition lives in Active Job. Furthermore, if the project reloads, you do not want any of `ActiveJob` or `ActiveJob::QueueAdapters` to be reloaded. + +Bottom line, Zeitwerk should not be managing those namespaces. Active Job owns them and defines them. Your gem needs to _reopen_ them. + +In order to do so, you need to make sure those modules are loaded before calling `setup`. For instance, in the entry file for the gem: + +```ruby +# Ensure these namespaces are reopened, not defined. +require "active_job" +require "active_job/queue_adapters" + +require "zeitwerk" +loader = Zeitwerk::Loader.for_gem +loader.setup +``` + +With that, when Zeitwerk scans the file system and reaches the gem directories `lib/active_job` and `lib/active_job/queue_adapters`, it detects the corresponding modules already exist and therefore understands it does not have to manage them. The loader just descends into those directories. Eventually will reach `lib/active_job/queue_adapters/awesome_queue.rb`, and since `ActiveJob::QueueAdapters::AwesomeQueue` is unknown, Zeitwerk will manage it. Which is what happens regularly with the files in your gem. On reload, the namespaces are safe, won't be reloaded. The loader only reloads what it manages, which in this case is the adapter itself. + +<a id="markdown-rules-of-thumb" name="rules-of-thumb"></a> +### Rules of thumb + +1. Different loaders should manage different directory trees. It is an error condition to configure overlapping root directories in different loaders. + +2. Think the mere existence of a file is effectively like writing a `require` call for them, which is executed on demand (autoload) or upfront (eager load). + +3. In that line, if two loaders manage files that translate to the same constant in the same namespace, the first one wins, the rest are ignored. Similar to what happens with `require` and `$LOAD_PATH`, only the first occurrence matters. + +4. Projects that reopen a namespace defined by some dependency have to ensure said namespace is loaded before setup. That is, the project has to make sure it reopens, rather than define. This is often accomplished just loading the dependency. + +5. Objects stored in reloadable constants should not be cached in places that are not reloaded. For example, non-reloadable classes should not subclass a reloadable class, or mixin a reloadable module. Otherwise, after reloading, those classes or module objects would become stale. Referring to constants in dynamic places like method calls or lambdas is fine. + +6. In a given process, ideally, there should be at most one loader with reloading enabled. Technically, you can have more, but it may get tricky if one refers to constants managed by the other one. Do that only if you know what you are doing. + +<a id="markdown-debuggers" name="debuggers"></a> +### Debuggers + +<a id="markdown-debugrb" name="debugrb"></a> +#### debug.rb + +The new [debug.rb](https://github.com/ruby/debug) gem and Zeitwerk are mostly compatible. This is the new debugger that is going to ship with Ruby 3.1. + +There's one exception, though: Due to a technical limitation of tracepoints, explicit namespaces are not autoloaded while expressions are evaluated in the REPL. See [ruby/debug#408](https://github.com/ruby/debug/issues/408). + +<a id="markdown-byebug" name="byebug"></a> +#### Byebug + +Zeitwerk and [Byebug](https://github.com/deivid-rodriguez/byebug) have a similar edge incompatibility. + +<a id="markdown-break" name="break"></a> +#### Break + +Zeitwerk works fine with [@gsamokovarov](https://github.com/gsamokovarov)'s [Break](https://github.com/gsamokovarov/break) debugger. + +<a id="markdown-pronunciation" name="pronunciation"></a> +## Pronunciation + +"Zeitwerk" is pronounced [this way](http://share.hashref.com/zeitwerk/zeitwerk_pronunciation.mp3). + +<a id="markdown-supported-ruby-versions" name="supported-ruby-versions"></a> +## Supported Ruby versions + +Zeitwerk works with CRuby 2.5 and above. + +On TruffleRuby all is good except for thread-safety. Right now, in TruffleRuby `Module#autoload` does not block threads accessing a constant that is being autoloaded. CRuby prevents such access to avoid concurrent threads from seeing partial evaluations of the corresponding file. Zeitwerk inherits autoloading thread-safety from this property. This is not an issue if your project gets eager loaded, or if you lazy load in single-threaded environments. (See https://github.com/oracle/truffleruby/issues/2431.) + +JRuby 9.3.0.0 is almost there. As of this writing, the test suite of Zeitwerk passes on JRuby except for three tests. (See https://github.com/jruby/jruby/issues/6781.) + +<a id="markdown-testing" name="testing"></a> +## Testing + +In order to run the test suite of Zeitwerk, `cd` into the project root and execute + +``` +bin/test +``` + +To run one particular suite, pass its file name as an argument: + +``` +bin/test test/lib/zeitwerk/test_eager_load.rb +``` + +Furthermore, the project has a development dependency on [`minitest-focus`](https://github.com/seattlerb/minitest-focus). To run an individual test mark it with `focus`: + +```ruby +focus +test "capitalizes the first letter" do + assert_equal "User", camelize("user") +end +``` + +and run `bin/test`. + +<a id="markdown-motivation" name="motivation"></a> +## Motivation + +<a id="markdown-kernelrequire-is-brittle" name="kernelrequire-is-brittle"></a> +### Kernel#require is brittle + +Since `require` has global side-effects, and there is no static way to verify that you have issued the `require` calls for code that your file depends on, in practice it is very easy to forget some. That introduces bugs that depend on the load order. + +Also, if the project has namespaces, setting things up and getting client code to load things in a consistent way needs discipline. For example, `require "foo/bar"` may define `Foo`, instead of reopen it. That may be a broken window, giving place to superclass mismatches or partially-defined namespaces. + +With Zeitwerk, you just name things following conventions and done. Things are available everywhere, and descend is always orderly. Without effort and without broken windows. + +<a id="markdown-rails-autoloading-was-brittle" name="rails-autoloading-was-brittle"></a> +### Rails autoloading was brittle + +Autoloading in Rails was based on `const_missing` up to Rails 5. That callback lacks fundamental information like the nesting or the resolution algorithm being used. Because of that, Rails autoloading was not able to match Ruby's semantics, and that introduced a [series of issues](https://guides.rubyonrails.org/v5.2/autoloading_and_reloading_constants.html#common-gotchas). Zeitwerk is based on a different technique and fixed Rails autoloading starting with Rails 6. + +<a id="markdown-thanks" name="thanks"></a> +## Thanks + +I'd like to thank [@matthewd](https://github.com/matthewd) for the discussions we've had about this topic in the past years, I learned a couple of tricks used in Zeitwerk from him. + +Also, would like to thank [@Shopify](https://github.com/Shopify), [@rafaelfranca](https://github.com/rafaelfranca), and [@dylanahsmith](https://github.com/dylanahsmith), for sharing [this PoC](https://github.com/Shopify/autoload_reloader). The technique Zeitwerk uses to support explicit namespaces was copied from that project. + +Jean Boussier ([@casperisfine](https://github.com/casperisfine), [@byroot](https://github.com/byroot)) deserves special mention. Jean migrated autoloading in Shopify when Zeitwerk integration in Rails was yet unreleased. His work and positive attitude have been outstanding, and thanks to his feedback the interface and performance of Zeitwerk are way, way better. Kudos man ❤️. + +Finally, many thanks to [@schurig](https://github.com/schurig) for recording an [audio file](http://share.hashref.com/zeitwerk/zeitwerk_pronunciation.mp3) with the pronunciation of "Zeitwerk" in perfect German. 💯 + +<a id="markdown-license" name="license"></a> +## License + +Released under the MIT License, Copyright (c) 2019–<i>ω</i> Xavier Noria. diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk.rb new file mode 100644 index 0000000..a1335d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module Zeitwerk + require_relative "zeitwerk/real_mod_name" + require_relative "zeitwerk/loader" + require_relative "zeitwerk/registry" + require_relative "zeitwerk/explicit_namespace" + require_relative "zeitwerk/inflector" + require_relative "zeitwerk/gem_inflector" + require_relative "zeitwerk/kernel" + require_relative "zeitwerk/error" + require_relative "zeitwerk/version" + + # This is a dangerous method. + # + # @experimental + # @sig () -> void + def self.with_loader + loader = Zeitwerk::Loader.new + yield loader + ensure + loader.unregister + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/error.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/error.rb new file mode 100644 index 0000000..5f8d1e7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/error.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +module Zeitwerk + class Error < StandardError + end + + class ReloadingDisabledError < Error + end + + class NameError < ::NameError + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/explicit_namespace.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/explicit_namespace.rb new file mode 100644 index 0000000..b363961 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/explicit_namespace.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +module Zeitwerk + # Centralizes the logic for the trace point used to detect the creation of + # explicit namespaces, needed to descend into matching subdirectories right + # after the constant has been defined. + # + # The implementation assumes an explicit namespace is managed by one loader. + # Loaders that reopen namespaces owned by other projects are responsible for + # loading their constant before setup. This is documented. + module ExplicitNamespace # :nodoc: all + class << self + include RealModName + + # Maps constant paths that correspond to explicit namespaces according to + # the file system, to the loader responsible for them. + # + # @private + # @sig Hash[String, Zeitwerk::Loader] + attr_reader :cpaths + + # @private + # @sig Mutex + attr_reader :mutex + + # @private + # @sig TracePoint + attr_reader :tracer + + # Asserts `cpath` corresponds to an explicit namespace for which `loader` + # is responsible. + # + # @private + # @sig (String, Zeitwerk::Loader) -> void + def register(cpath, loader) + mutex.synchronize do + cpaths[cpath] = loader + # We check enabled? because, looking at the C source code, enabling an + # enabled tracer does not seem to be a simple no-op. + tracer.enable unless tracer.enabled? + end + end + + # @private + # @sig (Zeitwerk::Loader) -> void + def unregister_loader(loader) + cpaths.delete_if { |_cpath, l| l == loader } + disable_tracer_if_unneeded + end + + private + + # @sig () -> void + def disable_tracer_if_unneeded + mutex.synchronize do + tracer.disable if cpaths.empty? + end + end + + # @sig (TracePoint) -> void + def tracepoint_class_callback(event) + # If the class is a singleton class, we won't do anything with it so we + # can bail out immediately. This is several orders of magnitude faster + # than accessing its name. + return if event.self.singleton_class? + + # It might be tempting to return if name.nil?, to avoid the computation + # of a hash code and delete call. But Ruby does not trigger the :class + # event on Class.new or Module.new, so that would incur in an extra call + # for nothing. + # + # On the other hand, if we were called, cpaths is not empty. Otherwise + # the tracer is disabled. So we do need to go ahead with the hash code + # computation and delete call. + if loader = cpaths.delete(real_mod_name(event.self)) + loader.on_namespace_loaded(event.self) + disable_tracer_if_unneeded + end + end + end + + @cpaths = {} + @mutex = Mutex.new + + # We go through a method instead of defining a block mainly to have a better + # label when profiling. + @tracer = TracePoint.new(:class, &method(:tracepoint_class_callback)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/gem_inflector.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/gem_inflector.rb new file mode 100644 index 0000000..8e59de5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/gem_inflector.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module Zeitwerk + class GemInflector < Inflector + # @sig (String) -> void + def initialize(root_file) + namespace = File.basename(root_file, ".rb") + lib_dir = File.dirname(root_file) + @version_file = File.join(lib_dir, namespace, "version.rb") + end + + # @sig (String, String) -> String + def camelize(basename, abspath) + abspath == @version_file ? "VERSION" : super + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/inflector.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/inflector.rb new file mode 100644 index 0000000..8cac2df --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/inflector.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module Zeitwerk + class Inflector + # Very basic snake case -> camel case conversion. + # + # inflector = Zeitwerk::Inflector.new + # inflector.camelize("post", ...) # => "Post" + # inflector.camelize("users_controller", ...) # => "UsersController" + # inflector.camelize("api", ...) # => "Api" + # + # Takes into account hard-coded mappings configured with `inflect`. + # + # @sig (String, String) -> String + def camelize(basename, _abspath) + overrides[basename] || basename.split('_').each(&:capitalize!).join + end + + # Configures hard-coded inflections: + # + # inflector = Zeitwerk::Inflector.new + # inflector.inflect( + # "html_parser" => "HTMLParser", + # "mysql_adapter" => "MySQLAdapter" + # ) + # + # inflector.camelize("html_parser", abspath) # => "HTMLParser" + # inflector.camelize("mysql_adapter", abspath) # => "MySQLAdapter" + # inflector.camelize("users_controller", abspath) # => "UsersController" + # + # @sig (Hash[String, String]) -> void + def inflect(inflections) + overrides.merge!(inflections) + end + + private + + # Hard-coded basename to constant name user maps that override the default + # inflection logic. + # + # @sig () -> Hash[String, String] + def overrides + @overrides ||= {} + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/kernel.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/kernel.rb new file mode 100644 index 0000000..3e2463d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/kernel.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +module Kernel + module_function + + # Zeitwerk's main idea is to define autoloads for project constants, and then + # intercept them when triggered in this thin `Kernel#require` wrapper. + # + # That allows us to complete the circle, invoke callbacks, autovivify modules, + # define autoloads for just autoloaded namespaces, update internal state, etc. + # + # On the other hand, if you publish a new version of a gem that is now managed + # by Zeitwerk, client code can reference directly your classes and modules and + # should not require anything. But if someone has legacy require calls around, + # they will work as expected, and in a compatible way. This feature is by now + # EXPERIMENTAL and UNDOCUMENTED. + # + # We cannot decorate with prepend + super because Kernel has already been + # included in Object, and changes in ancestors don't get propagated into + # already existing ancestor chains on Ruby < 3.0. + alias_method :zeitwerk_original_require, :require + + # @sig (String) -> true | false + def require(path) + if loader = Zeitwerk::Registry.loader_for(path) + if path.end_with?(".rb") + required = zeitwerk_original_require(path) + loader.on_file_autoloaded(path) if required + required + else + loader.on_dir_autoloaded(path) + true + end + else + required = zeitwerk_original_require(path) + if required + abspath = $LOADED_FEATURES.last + if loader = Zeitwerk::Registry.loader_for(abspath) + loader.on_file_autoloaded(abspath) + end + end + required + end + end + + # By now, I have seen no way so far to decorate require_relative. + # + # For starters, at least in CRuby, require_relative does not delegate to + # require. Both require and require_relative delegate the bulk of their work + # to an internal C function called rb_require_safe. So, our require wrapper is + # not executed. + # + # On the other hand, we cannot use the aliasing technique above because + # require_relative receives a path relative to the directory of the file in + # which the call is performed. If a wrapper here invoked the original method, + # Ruby would resolve the relative path taking lib/zeitwerk as base directory. + # + # A workaround could be to extract the base directory from caller_locations, + # but what if someone else decorated require_relative before us? You can't + # really know with certainty where's the original call site in the stack. + # + # However, the main use case for require_relative is to load files from your + # own project. Projects managed by Zeitwerk don't do this for files managed by + # Zeitwerk, precisely. +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader.rb new file mode 100644 index 0000000..3c7ce65 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader.rb @@ -0,0 +1,516 @@ +# frozen_string_literal: true + +require "set" + +module Zeitwerk + class Loader + require_relative "loader/helpers" + require_relative "loader/callbacks" + require_relative "loader/config" + + include RealModName + include Callbacks + include Helpers + include Config + + # Maps absolute paths for which an autoload has been set ---and not + # executed--- to their corresponding parent class or module and constant + # name. + # + # "/Users/fxn/blog/app/models/user.rb" => [Object, :User], + # "/Users/fxn/blog/app/models/hotel/pricing.rb" => [Hotel, :Pricing] + # ... + # + # @private + # @sig Hash[String, [Module, Symbol]] + attr_reader :autoloads + + # We keep track of autoloaded directories to remove them from the registry + # at the end of eager loading. + # + # Files are removed as they are autoloaded, but directories need to wait due + # to concurrency (see why in Zeitwerk::Loader::Callbacks#on_dir_autoloaded). + # + # @private + # @sig Array[String] + attr_reader :autoloaded_dirs + + # Stores metadata needed for unloading. Its entries look like this: + # + # "Admin::Role" => [".../admin/role.rb", [Admin, :Role]] + # + # The cpath as key helps implementing unloadable_cpath? The file name is + # stored in order to be able to delete it from $LOADED_FEATURES, and the + # pair [Module, Symbol] is used to remove_const the constant from the class + # or module object. + # + # If reloading is enabled, this hash is filled as constants are autoloaded + # or eager loaded. Otherwise, the collection remains empty. + # + # @private + # @sig Hash[String, [String, [Module, Symbol]]] + attr_reader :to_unload + + # Maps constant paths of namespaces to arrays of corresponding directories. + # + # For example, given this mapping: + # + # "Admin" => [ + # "/Users/fxn/blog/app/controllers/admin", + # "/Users/fxn/blog/app/models/admin", + # ... + # ] + # + # when `Admin` gets defined we know that it plays the role of a namespace and + # that its children are spread over those directories. We'll visit them to set + # up the corresponding autoloads. + # + # @private + # @sig Hash[String, Array[String]] + attr_reader :lazy_subdirs + + # @private + # @sig Mutex + attr_reader :mutex + + # @private + # @sig Mutex + attr_reader :mutex2 + + def initialize + super + + @autoloads = {} + @autoloaded_dirs = [] + @to_unload = {} + @lazy_subdirs = Hash.new { |h, cpath| h[cpath] = [] } + @mutex = Mutex.new + @mutex2 = Mutex.new + @setup = false + @eager_loaded = false + + Registry.register_loader(self) + end + + # Sets autoloads in the root namespace. + # + # @sig () -> void + def setup + mutex.synchronize do + break if @setup + + actual_root_dirs.each do |root_dir, namespace| + set_autoloads_in_dir(root_dir, namespace) + end + + on_setup_callbacks.each(&:call) + + @setup = true + end + end + + # Removes loaded constants and configured autoloads. + # + # The objects the constants stored are no longer reachable through them. In + # addition, since said objects are normally not referenced from anywhere + # else, they are eligible for garbage collection, which would effectively + # unload them. + # + # This method is public but undocumented. Main interface is `reload`, which + # means `unload` + `setup`. This one is avaiable to be used together with + # `unregister`, which is undocumented too. + # + # @sig () -> void + def unload + mutex.synchronize do + # We are going to keep track of the files that were required by our + # autoloads to later remove them from $LOADED_FEATURES, thus making them + # loadable by Kernel#require again. + # + # Directories are not stored in $LOADED_FEATURES, keeping track of files + # is enough. + unloaded_files = Set.new + + autoloads.each do |abspath, (parent, cname)| + if parent.autoload?(cname) + unload_autoload(parent, cname) + else + # Could happen if loaded with require_relative. That is unsupported, + # and the constant path would escape unloadable_cpath? This is just + # defensive code to clean things up as much as we are able to. + unload_cref(parent, cname) + unloaded_files.add(abspath) if ruby?(abspath) + end + end + + to_unload.each do |cpath, (abspath, (parent, cname))| + # We have to check cdef? in this condition. Reason is, constants whose + # file does not define them have to be kept in to_unload as explained + # in the implementation of on_file_autoloaded. + # + # If the constant is not defined, on_unload should not be triggered + # for it. + if !on_unload_callbacks.empty? && cdef?(parent, cname) + value = parent.const_get(cname) + run_on_unload_callbacks(cpath, value, abspath) + end + + unload_cref(parent, cname) + unloaded_files.add(abspath) if ruby?(abspath) + end + + unless unloaded_files.empty? + # Bootsnap decorates Kernel#require to speed it up using a cache and + # this optimization does not check if $LOADED_FEATURES has the file. + # + # To make it aware of changes, the gem defines singleton methods in + # $LOADED_FEATURES: + # + # https://github.com/Shopify/bootsnap/blob/master/lib/bootsnap/load_path_cache/core_ext/loaded_features.rb + # + # Rails applications may depend on bootsnap, so for unloading to work + # in that setting it is preferable that we restrict our API choice to + # one of those methods. + $LOADED_FEATURES.reject! { |file| unloaded_files.member?(file) } + end + + autoloads.clear + autoloaded_dirs.clear + to_unload.clear + lazy_subdirs.clear + + Registry.on_unload(self) + ExplicitNamespace.unregister_loader(self) + + @setup = false + @eager_loaded = false + end + end + + # Unloads all loaded code, and calls setup again so that the loader is able + # to pick any changes in the file system. + # + # This method is not thread-safe, please see how this can be achieved by + # client code in the README of the project. + # + # @raise [Zeitwerk::Error] + # @sig () -> void + def reload + if reloading_enabled? + unload + recompute_ignored_paths + recompute_collapse_dirs + setup + else + raise ReloadingDisabledError, "can't reload, please call loader.enable_reloading before setup" + end + end + + # Eager loads all files in the root directories, recursively. Files do not + # need to be in `$LOAD_PATH`, absolute file names are used. Ignored files + # are not eager loaded. You can opt-out specifically in specific files and + # directories with `do_not_eager_load`, and that can be overridden passing + # `force: true`. + # + # @sig (true | false) -> void + def eager_load(force: false) + mutex.synchronize do + break if @eager_loaded + + log("eager load start") if logger + + honour_exclusions = !force + + queue = [] + actual_root_dirs.each do |root_dir, namespace| + queue << [namespace, root_dir] unless honour_exclusions && excluded_from_eager_load?(root_dir) + end + + while to_eager_load = queue.shift + namespace, dir = to_eager_load + + ls(dir) do |basename, abspath| + next if honour_exclusions && excluded_from_eager_load?(abspath) + + if ruby?(abspath) + if cref = autoloads[abspath] + cget(*cref) + end + elsif dir?(abspath) && !root_dirs.key?(abspath) + if collapse?(abspath) + queue << [namespace, abspath] + else + cname = inflector.camelize(basename, abspath) + queue << [cget(namespace, cname), abspath] + end + end + end + end + + autoloaded_dirs.each do |autoloaded_dir| + Registry.unregister_autoload(autoloaded_dir) + end + autoloaded_dirs.clear + + @eager_loaded = true + + log("eager load end") if logger + end + end + + # Says if the given constant path would be unloaded on reload. This + # predicate returns `false` if reloading is disabled. + # + # @sig (String) -> bool + def unloadable_cpath?(cpath) + to_unload.key?(cpath) + end + + # Returns an array with the constant paths that would be unloaded on reload. + # This predicate returns an empty array if reloading is disabled. + # + # @sig () -> Array[String] + def unloadable_cpaths + to_unload.keys.freeze + end + + # This is a dangerous method. + # + # @experimental + # @sig () -> void + def unregister + Registry.unregister_loader(self) + ExplicitNamespace.unregister_loader(self) + end + + # --- Class methods --------------------------------------------------------------------------- + + class << self + # @sig #call | #debug | nil + attr_accessor :default_logger + + # @private + # @sig Mutex + attr_accessor :mutex + + # This is a shortcut for + # + # require "zeitwerk" + # loader = Zeitwerk::Loader.new + # loader.tag = File.basename(__FILE__, ".rb") + # loader.inflector = Zeitwerk::GemInflector.new(__FILE__) + # loader.push_dir(__dir__) + # + # except that this method returns the same object in subsequent calls from + # the same file, in the unlikely case the gem wants to be able to reload. + # + # @sig () -> Zeitwerk::Loader + def for_gem + called_from = caller_locations(1, 1).first.path + Registry.loader_for_gem(called_from) + end + + # Broadcasts `eager_load` to all loaders. + # + # @sig () -> void + def eager_load_all + Registry.loaders.each(&:eager_load) + end + + # Returns an array with the absolute paths of the root directories of all + # registered loaders. This is a read-only collection. + # + # @sig () -> Array[String] + def all_dirs + Registry.loaders.flat_map(&:dirs).freeze + end + end + + self.mutex = Mutex.new + + private # ------------------------------------------------------------------------------------- + + # @sig (String, Module) -> void + def set_autoloads_in_dir(dir, parent) + ls(dir) do |basename, abspath| + begin + if ruby?(basename) + basename.delete_suffix!(".rb") + cname = inflector.camelize(basename, abspath).to_sym + autoload_file(parent, cname, abspath) + elsif dir?(abspath) + # In a Rails application, `app/models/concerns` is a subdirectory of + # `app/models`, but both of them are root directories. + # + # To resolve the ambiguity file name -> constant path this introduces, + # the `app/models/concerns` directory is totally ignored as a namespace, + # it counts only as root. The guard checks that. + unless root_dir?(abspath) + cname = inflector.camelize(basename, abspath).to_sym + if collapse?(abspath) + set_autoloads_in_dir(abspath, parent) + else + autoload_subdir(parent, cname, abspath) + end + end + end + rescue ::NameError => error + path_type = ruby?(abspath) ? "file" : "directory" + + raise NameError.new(<<~MESSAGE, error.name) + #{error.message} inferred by #{inflector.class} from #{path_type} + + #{abspath} + + Possible ways to address this: + + * Tell Zeitwerk to ignore this particular #{path_type}. + * Tell Zeitwerk to ignore one of its parent directories. + * Rename the #{path_type} to comply with the naming conventions. + * Modify the inflector to handle this case. + MESSAGE + end + end + end + + # @sig (Module, Symbol, String) -> void + def autoload_subdir(parent, cname, subdir) + if autoload_path = autoload_path_set_by_me_for?(parent, cname) + cpath = cpath(parent, cname) + register_explicit_namespace(cpath) if ruby?(autoload_path) + # We do not need to issue another autoload, the existing one is enough + # no matter if it is for a file or a directory. Just remember the + # subdirectory has to be visited if the namespace is used. + lazy_subdirs[cpath] << subdir + elsif !cdef?(parent, cname) + # First time we find this namespace, set an autoload for it. + lazy_subdirs[cpath(parent, cname)] << subdir + set_autoload(parent, cname, subdir) + else + # For whatever reason the constant that corresponds to this namespace has + # already been defined, we have to recurse. + log("the namespace #{cpath(parent, cname)} already exists, descending into #{subdir}") if logger + set_autoloads_in_dir(subdir, cget(parent, cname)) + end + end + + # @sig (Module, Symbol, String) -> void + def autoload_file(parent, cname, file) + if autoload_path = strict_autoload_path(parent, cname) || Registry.inception?(cpath(parent, cname)) + # First autoload for a Ruby file wins, just ignore subsequent ones. + if ruby?(autoload_path) + log("file #{file} is ignored because #{autoload_path} has precedence") if logger + else + promote_namespace_from_implicit_to_explicit( + dir: autoload_path, + file: file, + parent: parent, + cname: cname + ) + end + elsif cdef?(parent, cname) + log("file #{file} is ignored because #{cpath(parent, cname)} is already defined") if logger + else + set_autoload(parent, cname, file) + end + end + + # `dir` is the directory that would have autovivified a namespace. `file` is + # the file where we've found the namespace is explicitly defined. + # + # @sig (dir: String, file: String, parent: Module, cname: Symbol) -> void + def promote_namespace_from_implicit_to_explicit(dir:, file:, parent:, cname:) + autoloads.delete(dir) + Registry.unregister_autoload(dir) + + log("earlier autoload for #{cpath(parent, cname)} discarded, it is actually an explicit namespace defined in #{file}") if logger + + set_autoload(parent, cname, file) + register_explicit_namespace(cpath(parent, cname)) + end + + # @sig (Module, Symbol, String) -> void + def set_autoload(parent, cname, abspath) + parent.autoload(cname, abspath) + + if logger + if ruby?(abspath) + log("autoload set for #{cpath(parent, cname)}, to be loaded from #{abspath}") + else + log("autoload set for #{cpath(parent, cname)}, to be autovivified from #{abspath}") + end + end + + autoloads[abspath] = [parent, cname] + Registry.register_autoload(self, abspath) + + # See why in the documentation of Zeitwerk::Registry.inceptions. + unless parent.autoload?(cname) + Registry.register_inception(cpath(parent, cname), abspath, self) + end + end + + # @sig (Module, Symbol) -> String? + def autoload_path_set_by_me_for?(parent, cname) + if autoload_path = strict_autoload_path(parent, cname) + autoload_path if autoloads.key?(autoload_path) + else + Registry.inception?(cpath(parent, cname)) + end + end + + # @sig (String) -> void + def register_explicit_namespace(cpath) + ExplicitNamespace.register(cpath, self) + end + + # @sig (String) -> void + def raise_if_conflicting_directory(dir) + self.class.mutex.synchronize do + Registry.loaders.each do |loader| + next if loader == self + next if loader.ignores?(dir) + + dir = dir + "/" + loader.root_dirs.each do |root_dir, _namespace| + next if ignores?(root_dir) + + root_dir = root_dir + "/" + if dir.start_with?(root_dir) || root_dir.start_with?(dir) + require "pp" # Needed for pretty_inspect, even in Ruby 2.5. + raise Error, + "loader\n\n#{pretty_inspect}\n\nwants to manage directory #{dir.chop}," \ + " which is already managed by\n\n#{loader.pretty_inspect}\n" + EOS + end + end + end + end + end + + # @sig (String, Object, String) -> void + def run_on_unload_callbacks(cpath, value, abspath) + # Order matters. If present, run the most specific one. + on_unload_callbacks[cpath]&.each { |c| c.call(value, abspath) } + on_unload_callbacks[:ANY]&.each { |c| c.call(cpath, value, abspath) } + end + + # @sig (Module, Symbol) -> void + def unload_autoload(parent, cname) + parent.__send__(:remove_const, cname) + log("autoload for #{cpath(parent, cname)} removed") if logger + end + + # @sig (Module, Symbol) -> void + def unload_cref(parent, cname) + # Let's optimistically remove_const. The way we use it, this is going to + # succeed always if all is good. + parent.__send__(:remove_const, cname) + rescue ::NameError + # There are a few edge scenarios in which this may happen. If the constant + # is gone, that is OK, anyway. + else + log("#{cpath(parent, cname)} unloaded") if logger + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/callbacks.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/callbacks.rb new file mode 100644 index 0000000..5ec2758 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/callbacks.rb @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +module Zeitwerk::Loader::Callbacks + include Zeitwerk::RealModName + + # Invoked from our decorated Kernel#require when a managed file is autoloaded. + # + # @private + # @sig (String) -> void + def on_file_autoloaded(file) + cref = autoloads.delete(file) + cpath = cpath(*cref) + + # If reloading is enabled, we need to put this constant for unloading + # regardless of what cdef? says. In Ruby < 3.1 the internal state is not + # fully cleared. Module#constants still includes it, and you need to + # remove_const. See https://github.com/ruby/ruby/pull/4715. + to_unload[cpath] = [file, cref] if reloading_enabled? + Zeitwerk::Registry.unregister_autoload(file) + + if cdef?(*cref) + log("constant #{cpath} loaded from file #{file}") if logger + run_on_load_callbacks(cpath, cget(*cref), file) unless on_load_callbacks.empty? + else + raise Zeitwerk::NameError.new("expected file #{file} to define constant #{cpath}, but didn't", cref.last) + end + end + + # Invoked from our decorated Kernel#require when a managed directory is + # autoloaded. + # + # @private + # @sig (String) -> void + def on_dir_autoloaded(dir) + # Module#autoload does not serialize concurrent requires, and we handle + # directories ourselves, so the callback needs to account for concurrency. + # + # Multi-threading would introduce a race condition here in which thread t1 + # autovivifies the module, and while autoloads for its children are being + # set, thread t2 autoloads the same namespace. + # + # Without the mutex and subsequent delete call, t2 would reset the module. + # That not only would reassign the constant (undesirable per se) but, worse, + # the module object created by t2 wouldn't have any of the autoloads for its + # children, since t1 would have correctly deleted its lazy_subdirs entry. + mutex2.synchronize do + if cref = autoloads.delete(dir) + autovivified_module = cref[0].const_set(cref[1], Module.new) + cpath = autovivified_module.name + log("module #{cpath} autovivified from directory #{dir}") if logger + + to_unload[cpath] = [dir, cref] if reloading_enabled? + + # We don't unregister `dir` in the registry because concurrent threads + # wouldn't find a loader associated to it in Kernel#require and would + # try to require the directory. Instead, we are going to keep track of + # these to be able to unregister later if eager loading. + autoloaded_dirs << dir + + on_namespace_loaded(autovivified_module) + + run_on_load_callbacks(cpath, autovivified_module, dir) unless on_load_callbacks.empty? + end + end + end + + # Invoked when a class or module is created or reopened, either from the + # tracer or from module autovivification. If the namespace has matching + # subdirectories, we descend into them now. + # + # @private + # @sig (Module) -> void + def on_namespace_loaded(namespace) + if subdirs = lazy_subdirs.delete(real_mod_name(namespace)) + subdirs.each do |subdir| + set_autoloads_in_dir(subdir, namespace) + end + end + end + + private + + # @sig (String, Object) -> void + def run_on_load_callbacks(cpath, value, abspath) + # Order matters. If present, run the most specific one. + callbacks = reloading_enabled? ? on_load_callbacks[cpath] : on_load_callbacks.delete(cpath) + callbacks&.each { |c| c.call(value, abspath) } + + callbacks = on_load_callbacks[:ANY] + callbacks&.each { |c| c.call(cpath, value, abspath) } + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/config.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/config.rb new file mode 100644 index 0000000..0bf45b5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/config.rb @@ -0,0 +1,321 @@ +# frozen_string_literal: true + +require "set" +require "securerandom" + +module Zeitwerk::Loader::Config + # Absolute paths of the root directories. Stored in a hash to preserve + # order, easily handle duplicates, and also be able to have a fast lookup, + # needed for detecting nested paths. + # + # "/Users/fxn/blog/app/assets" => true, + # "/Users/fxn/blog/app/channels" => true, + # ... + # + # This is a private collection maintained by the loader. The public + # interface for it is `push_dir` and `dirs`. + # + # @private + # @sig Hash[String, true] + attr_reader :root_dirs + + # @sig #camelize + attr_accessor :inflector + + # Absolute paths of files, directories, or glob patterns to be totally + # ignored. + # + # @private + # @sig Set[String] + attr_reader :ignored_glob_patterns + + # The actual collection of absolute file and directory names at the time the + # ignored glob patterns were expanded. Computed on setup, and recomputed on + # reload. + # + # @private + # @sig Set[String] + attr_reader :ignored_paths + + # Absolute paths of directories or glob patterns to be collapsed. + # + # @private + # @sig Set[String] + attr_reader :collapse_glob_patterns + + # The actual collection of absolute directory names at the time the collapse + # glob patterns were expanded. Computed on setup, and recomputed on reload. + # + # @private + # @sig Set[String] + attr_reader :collapse_dirs + + # Absolute paths of files or directories not to be eager loaded. + # + # @private + # @sig Set[String] + attr_reader :eager_load_exclusions + + # User-oriented callbacks to be fired on setup and on reload. + # + # @private + # @sig Array[{ () -> void }] + attr_reader :on_setup_callbacks + + # User-oriented callbacks to be fired when a constant is loaded. + # + # @private + # @sig Hash[String, Array[{ (Object, String) -> void }]] + # Hash[Symbol, Array[{ (String, Object, String) -> void }]] + attr_reader :on_load_callbacks + + # User-oriented callbacks to be fired before constants are removed. + # + # @private + # @sig Hash[String, Array[{ (Object, String) -> void }]] + # Hash[Symbol, Array[{ (String, Object, String) -> void }]] + attr_reader :on_unload_callbacks + + # @sig #call | #debug | nil + attr_accessor :logger + + def initialize + @initialized_at = Time.now + @root_dirs = {} + @inflector = Zeitwerk::Inflector.new + @ignored_glob_patterns = Set.new + @ignored_paths = Set.new + @collapse_glob_patterns = Set.new + @collapse_dirs = Set.new + @eager_load_exclusions = Set.new + @reloading_enabled = false + @on_setup_callbacks = [] + @on_load_callbacks = {} + @on_unload_callbacks = {} + @logger = self.class.default_logger + @tag = SecureRandom.hex(3) + end + + # Pushes `path` to the list of root directories. + # + # Raises `Zeitwerk::Error` if `path` does not exist, or if another loader in + # the same process already manages that directory or one of its ascendants or + # descendants. + # + # @raise [Zeitwerk::Error] + # @sig (String | Pathname, Module) -> void + def push_dir(path, namespace: Object) + # Note that Class < Module. + unless namespace.is_a?(Module) + raise Zeitwerk::Error, "#{namespace.inspect} is not a class or module object, should be" + end + + abspath = File.expand_path(path) + if dir?(abspath) + raise_if_conflicting_directory(abspath) + root_dirs[abspath] = namespace + else + raise Zeitwerk::Error, "the root directory #{abspath} does not exist" + end + end + + # Returns the loader's tag. + # + # Implemented as a method instead of via attr_reader for symmetry with the + # writer below. + # + # @sig () -> String + def tag + @tag + end + + # Sets a tag for the loader, useful for logging. + # + # @param tag [#to_s] + # @sig (#to_s) -> void + def tag=(tag) + @tag = tag.to_s + end + + # Absolute paths of the root directories. This is a read-only collection, + # please push here via `push_dir`. + # + # @sig () -> Array[String] + def dirs + root_dirs.keys.freeze + end + + # You need to call this method before setup in order to be able to reload. + # There is no way to undo this, either you want to reload or you don't. + # + # @raise [Zeitwerk::Error] + # @sig () -> void + def enable_reloading + mutex.synchronize do + break if @reloading_enabled + + if @setup + raise Zeitwerk::Error, "cannot enable reloading after setup" + else + @reloading_enabled = true + end + end + end + + # @sig () -> bool + def reloading_enabled? + @reloading_enabled + end + + # Let eager load ignore the given files or directories. The constants defined + # in those files are still autoloadable. + # + # @sig (*(String | Pathname | Array[String | Pathname])) -> void + def do_not_eager_load(*paths) + mutex.synchronize { eager_load_exclusions.merge(expand_paths(paths)) } + end + + # Configure files, directories, or glob patterns to be totally ignored. + # + # @sig (*(String | Pathname | Array[String | Pathname])) -> void + def ignore(*glob_patterns) + glob_patterns = expand_paths(glob_patterns) + mutex.synchronize do + ignored_glob_patterns.merge(glob_patterns) + ignored_paths.merge(expand_glob_patterns(glob_patterns)) + end + end + + # Configure directories or glob patterns to be collapsed. + # + # @sig (*(String | Pathname | Array[String | Pathname])) -> void + def collapse(*glob_patterns) + glob_patterns = expand_paths(glob_patterns) + mutex.synchronize do + collapse_glob_patterns.merge(glob_patterns) + collapse_dirs.merge(expand_glob_patterns(glob_patterns)) + end + end + + # Configure a block to be called after setup and on each reload. + # If setup was already done, the block runs immediately. + # + # @sig () { () -> void } -> void + def on_setup(&block) + mutex.synchronize do + on_setup_callbacks << block + block.call if @setup + end + end + + # Configure a block to be invoked once a certain constant path is loaded. + # Supports multiple callbacks, and if there are many, they are executed in + # the order in which they were defined. + # + # loader.on_load("SomeApiClient") do |klass, _abspath| + # klass.endpoint = "https://api.dev" + # end + # + # Can also be configured for any constant loaded: + # + # loader.on_load do |cpath, value, abspath| + # # ... + # end + # + # @raise [TypeError] + # @sig (String) { (Object, String) -> void } -> void + # (:ANY) { (String, Object, String) -> void } -> void + def on_load(cpath = :ANY, &block) + raise TypeError, "on_load only accepts strings" unless cpath.is_a?(String) || cpath == :ANY + + mutex.synchronize do + (on_load_callbacks[cpath] ||= []) << block + end + end + + # Configure a block to be invoked right before a certain constant is removed. + # Supports multiple callbacks, and if there are many, they are executed in the + # order in which they were defined. + # + # loader.on_unload("Country") do |klass, _abspath| + # klass.clear_cache + # end + # + # Can also be configured for any removed constant: + # + # loader.on_unload do |cpath, value, abspath| + # # ... + # end + # + # @raise [TypeError] + # @sig (String) { (Object) -> void } -> void + # (:ANY) { (String, Object) -> void } -> void + def on_unload(cpath = :ANY, &block) + raise TypeError, "on_unload only accepts strings" unless cpath.is_a?(String) || cpath == :ANY + + mutex.synchronize do + (on_unload_callbacks[cpath] ||= []) << block + end + end + + # Logs to `$stdout`, handy shortcut for debugging. + # + # @sig () -> void + def log! + @logger = ->(msg) { puts msg } + end + + # @private + # @sig (String) -> bool + def ignores?(abspath) + ignored_paths.any? do |ignored_path| + ignored_path == abspath || (dir?(ignored_path) && abspath.start_with?(ignored_path + "/")) + end + end + + private + + # @sig () -> Array[String] + def actual_root_dirs + root_dirs.reject do |root_dir, _namespace| + !dir?(root_dir) || ignored_paths.member?(root_dir) + end + end + + # @sig (String) -> bool + def root_dir?(dir) + root_dirs.key?(dir) + end + + # @sig (String) -> bool + def excluded_from_eager_load?(abspath) + eager_load_exclusions.member?(abspath) + end + + # @sig (String) -> bool + def collapse?(dir) + collapse_dirs.member?(dir) + end + + # @sig (String | Pathname | Array[String | Pathname]) -> Array[String] + def expand_paths(paths) + paths.flatten.map! { |path| File.expand_path(path) } + end + + # @sig (Array[String]) -> Array[String] + def expand_glob_patterns(glob_patterns) + # Note that Dir.glob works with regular file names just fine. That is, + # glob patterns technically need no wildcards. + glob_patterns.flat_map { |glob_pattern| Dir.glob(glob_pattern) } + end + + # @sig () -> void + def recompute_ignored_paths + ignored_paths.replace(expand_glob_patterns(ignored_glob_patterns)) + end + + # @sig () -> void + def recompute_collapse_dirs + collapse_dirs.replace(expand_glob_patterns(collapse_glob_patterns)) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/helpers.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/helpers.rb new file mode 100644 index 0000000..d8cf1f5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/loader/helpers.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: true + +module Zeitwerk::Loader::Helpers + private + + # --- Logging ----------------------------------------------------------------------------------- + + # @sig (String) -> void + def log(message) + method_name = logger.respond_to?(:debug) ? :debug : :call + logger.send(method_name, "Zeitwerk@#{tag}: #{message}") + end + + # --- Files and directories --------------------------------------------------------------------- + + # @sig (String) { (String, String) -> void } -> void + def ls(dir) + Dir.each_child(dir) do |basename| + next if hidden?(basename) + + abspath = File.join(dir, basename) + next if ignored_paths.member?(abspath) + + # We freeze abspath because that saves allocations when passed later to + # File methods. See #125. + yield basename, abspath.freeze + end + end + + # @sig (String) -> bool + def ruby?(path) + path.end_with?(".rb") + end + + # @sig (String) -> bool + def dir?(path) + File.directory?(path) + end + + # @sig String -> bool + def hidden?(basename) + basename.start_with?(".") + end + + # --- Constants --------------------------------------------------------------------------------- + + # The autoload? predicate takes into account the ancestor chain of the + # receiver, like const_defined? and other methods in the constants API do. + # + # For example, given + # + # class A + # autoload :X, "x.rb" + # end + # + # class B < A + # end + # + # B.autoload?(:X) returns "x.rb". + # + # We need a way to strictly check in parent ignoring ancestors. + # + # @sig (Module, Symbol) -> String? + if method(:autoload?).arity == 1 + def strict_autoload_path(parent, cname) + parent.autoload?(cname) if cdef?(parent, cname) + end + else + def strict_autoload_path(parent, cname) + parent.autoload?(cname, false) + end + end + + # @sig (Module, Symbol) -> String + if Symbol.method_defined?(:name) + # Symbol#name was introduced in Ruby 3.0. It returns always the same + # frozen object, so we may save a few string allocations. + def cpath(parent, cname) + Object == parent ? cname.name : "#{real_mod_name(parent)}::#{cname.name}" + end + else + def cpath(parent, cname) + Object == parent ? cname.to_s : "#{real_mod_name(parent)}::#{cname}" + end + end + + # @sig (Module, Symbol) -> bool + def cdef?(parent, cname) + parent.const_defined?(cname, false) + end + + # @raise [NameError] + # @sig (Module, Symbol) -> Object + def cget(parent, cname) + parent.const_get(cname, false) + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/real_mod_name.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/real_mod_name.rb new file mode 100644 index 0000000..326a619 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/real_mod_name.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module Zeitwerk::RealModName + UNBOUND_METHOD_MODULE_NAME = Module.instance_method(:name) + private_constant :UNBOUND_METHOD_MODULE_NAME + + # Returns the real name of the class or module, as set after the first + # constant to which it was assigned (or nil). + # + # The name method can be overridden, hence the indirection in this method. + # + # @sig (Module) -> String? + if UnboundMethod.method_defined?(:bind_call) + def real_mod_name(mod) + UNBOUND_METHOD_MODULE_NAME.bind_call(mod) + end + else + def real_mod_name(mod) + UNBOUND_METHOD_MODULE_NAME.bind(mod).call + end + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/registry.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/registry.rb new file mode 100644 index 0000000..d5f1c0c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/registry.rb @@ -0,0 +1,145 @@ +# frozen_string_literal: true + +module Zeitwerk + module Registry # :nodoc: all + class << self + # Keeps track of all loaders. Useful to broadcast messages and to prevent + # them from being garbage collected. + # + # @private + # @sig Array[Zeitwerk::Loader] + attr_reader :loaders + + # Registers loaders created with `for_gem` to make the method idempotent + # in case of reload. + # + # @private + # @sig Hash[String, Zeitwerk::Loader] + attr_reader :loaders_managing_gems + + # Maps absolute paths to the loaders responsible for them. + # + # This information is used by our decorated `Kernel#require` to be able to + # invoke callbacks and autovivify modules. + # + # @private + # @sig Hash[String, Zeitwerk::Loader] + attr_reader :autoloads + + # This hash table addresses an edge case in which an autoload is ignored. + # + # For example, let's suppose we want to autoload in a gem like this: + # + # # lib/my_gem.rb + # loader = Zeitwerk::Loader.new + # loader.push_dir(__dir__) + # loader.setup + # + # module MyGem + # end + # + # if you require "my_gem", as Bundler would do, this happens while setting + # up autoloads: + # + # 1. Object.autoload?(:MyGem) returns `nil` because the autoload for + # the constant is issued by Zeitwerk while the same file is being + # required. + # 2. The constant `MyGem` is undefined while setup runs. + # + # Therefore, a directory `lib/my_gem` would autovivify a module according to + # the existing information. But that would be wrong. + # + # To overcome this fundamental limitation, we keep track of the constant + # paths that are in this situation ---in the example above, "MyGem"--- and + # take this collection into account for the autovivification logic. + # + # Note that you cannot generally address this by moving the setup code + # below the constant definition, because we want libraries to be able to + # use managed constants in the module body: + # + # module MyGem + # include MyConcern + # end + # + # @private + # @sig Hash[String, [String, Zeitwerk::Loader]] + attr_reader :inceptions + + # Registers a loader. + # + # @private + # @sig (Zeitwerk::Loader) -> void + def register_loader(loader) + loaders << loader + end + + # @private + # @sig (Zeitwerk::Loader) -> void + def unregister_loader(loader) + loaders.delete(loader) + loaders_managing_gems.delete_if { |_, l| l == loader } + autoloads.delete_if { |_, l| l == loader } + inceptions.delete_if { |_, (_, l)| l == loader } + end + + # This method returns always a loader, the same instance for the same root + # file. That is how Zeitwerk::Loader.for_gem is idempotent. + # + # @private + # @sig (String) -> Zeitwerk::Loader + def loader_for_gem(root_file) + loaders_managing_gems[root_file] ||= begin + Loader.new.tap do |loader| + loader.tag = File.basename(root_file, ".rb") + loader.inflector = GemInflector.new(root_file) + loader.push_dir(File.dirname(root_file)) + end + end + end + + # @private + # @sig (Zeitwerk::Loader, String) -> String + def register_autoload(loader, abspath) + autoloads[abspath] = loader + end + + # @private + # @sig (String) -> void + def unregister_autoload(abspath) + autoloads.delete(abspath) + end + + # @private + # @sig (String, String, Zeitwerk::Loader) -> void + def register_inception(cpath, abspath, loader) + inceptions[cpath] = [abspath, loader] + end + + # @private + # @sig (String) -> String? + def inception?(cpath) + if pair = inceptions[cpath] + pair.first + end + end + + # @private + # @sig (String) -> Zeitwerk::Loader? + def loader_for(path) + autoloads[path] + end + + # @private + # @sig (Zeitwerk::Loader) -> void + def on_unload(loader) + autoloads.delete_if { |_path, object| object == loader } + inceptions.delete_if { |_cpath, (_path, object)| object == loader } + end + end + + @loaders = [] + @loaders_managing_gems = {} + @autoloads = {} + @inceptions = {} + end +end diff --git a/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/version.rb b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/version.rb new file mode 100644 index 0000000..5cb3403 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/gems/zeitwerk-2.5.4/lib/zeitwerk/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Zeitwerk + VERSION = "2.5.4" +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.2.gemspec b/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.2.gemspec new file mode 100644 index 0000000..2f7937f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.2.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: CFPropertyList 3.0.2 ruby lib + +Gem::Specification.new do |s| + s.name = "CFPropertyList".freeze + s.version = "3.0.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Christian Kruse".freeze] + s.date = "2019-12-06" + s.description = "This is a module to read, write and manipulate both binary and XML property lists as defined by apple.".freeze + s.email = "cjk@defunct.ch".freeze + s.extra_rdoc_files = ["README.rdoc".freeze] + s.files = ["README.rdoc".freeze] + s.homepage = "http://github.com/ckruse/CFPropertyList".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Read, write and manipulate both binary and XML property lists as defined by apple".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rake>.freeze, [">= 0.7.0"]) + else + s.add_dependency(%q<rake>.freeze, [">= 0.7.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.5.gemspec new file mode 100644 index 0000000..14918ee --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/CFPropertyList-3.0.5.gemspec @@ -0,0 +1,40 @@ +# -*- encoding: utf-8 -*- +# stub: CFPropertyList 3.0.5 ruby lib + +Gem::Specification.new do |s| + s.name = "CFPropertyList".freeze + s.version = "3.0.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Christian Kruse".freeze] + s.date = "2021-11-15" + s.description = "This is a module to read, write and manipulate both binary and XML property lists as defined by apple.".freeze + s.email = "cjk@defunct.ch".freeze + s.extra_rdoc_files = ["README.rdoc".freeze] + s.files = ["README.rdoc".freeze] + s.homepage = "https://github.com/ckruse/CFPropertyList".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Read, write and manipulate both binary and XML property lists as defined by apple".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<rexml>.freeze, [">= 0"]) + s.add_development_dependency(%q<libxml-ruby>.freeze, [">= 0"]) + s.add_development_dependency(%q<minitest>.freeze, [">= 0"]) + s.add_development_dependency(%q<nokogiri>.freeze, [">= 0"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0.7.0"]) + else + s.add_dependency(%q<rexml>.freeze, [">= 0"]) + s.add_dependency(%q<libxml-ruby>.freeze, [">= 0"]) + s.add_dependency(%q<minitest>.freeze, [">= 0"]) + s.add_dependency(%q<nokogiri>.freeze, [">= 0"]) + s.add_dependency(%q<rake>.freeze, [">= 0.7.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/activesupport-5.2.4.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/activesupport-5.2.4.4.gemspec new file mode 100644 index 0000000..9609bbd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/activesupport-5.2.4.4.gemspec @@ -0,0 +1,39 @@ +# -*- encoding: utf-8 -*- +# stub: activesupport 5.2.4.4 ruby lib + +Gem::Specification.new do |s| + s.name = "activesupport".freeze + s.version = "5.2.4.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/rails/rails/blob/v5.2.4.4/activesupport/CHANGELOG.md", "source_code_uri" => "https://github.com/rails/rails/tree/v5.2.4.4/activesupport" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["David Heinemeier Hansson".freeze] + s.date = "2020-09-09" + s.description = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework. Rich support for multibyte strings, internationalization, time zones, and testing.".freeze + s.email = "david@loudthinking.com".freeze + s.homepage = "http://rubyonrails.org".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--encoding".freeze, "UTF-8".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.2.2".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<i18n>.freeze, [">= 0.7", "< 2"]) + s.add_runtime_dependency(%q<tzinfo>.freeze, ["~> 1.1"]) + s.add_runtime_dependency(%q<minitest>.freeze, ["~> 5.1"]) + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0", ">= 1.0.2"]) + else + s.add_dependency(%q<i18n>.freeze, [">= 0.7", "< 2"]) + s.add_dependency(%q<tzinfo>.freeze, ["~> 1.1"]) + s.add_dependency(%q<minitest>.freeze, ["~> 5.1"]) + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0", ">= 1.0.2"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/activesupport-6.1.4.6.gemspec b/vendor/bundle/ruby/3.0.0/specifications/activesupport-6.1.4.6.gemspec new file mode 100644 index 0000000..decdd18 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/activesupport-6.1.4.6.gemspec @@ -0,0 +1,41 @@ +# -*- encoding: utf-8 -*- +# stub: activesupport 6.1.4.6 ruby lib + +Gem::Specification.new do |s| + s.name = "activesupport".freeze + s.version = "6.1.4.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/rails/rails/issues", "changelog_uri" => "https://github.com/rails/rails/blob/v6.1.4.6/activesupport/CHANGELOG.md", "documentation_uri" => "https://api.rubyonrails.org/v6.1.4.6/", "mailing_list_uri" => "https://discuss.rubyonrails.org/c/rubyonrails-talk", "source_code_uri" => "https://github.com/rails/rails/tree/v6.1.4.6/activesupport" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["David Heinemeier Hansson".freeze] + s.date = "2022-02-11" + s.description = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework. Rich support for multibyte strings, internationalization, time zones, and testing.".freeze + s.email = "david@loudthinking.com".freeze + s.homepage = "https://rubyonrails.org".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--encoding".freeze, "UTF-8".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.5.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A toolkit of support libraries and Ruby core extensions extracted from the Rails framework.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<i18n>.freeze, [">= 1.6", "< 2"]) + s.add_runtime_dependency(%q<tzinfo>.freeze, ["~> 2.0"]) + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0", ">= 1.0.2"]) + s.add_runtime_dependency(%q<zeitwerk>.freeze, ["~> 2.3"]) + s.add_runtime_dependency(%q<minitest>.freeze, [">= 5.1"]) + else + s.add_dependency(%q<i18n>.freeze, [">= 1.6", "< 2"]) + s.add_dependency(%q<tzinfo>.freeze, ["~> 2.0"]) + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0", ">= 1.0.2"]) + s.add_dependency(%q<zeitwerk>.freeze, ["~> 2.3"]) + s.add_dependency(%q<minitest>.freeze, [">= 5.1"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/addressable-2.8.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/addressable-2.8.0.gemspec new file mode 100644 index 0000000..cf8d36c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/addressable-2.8.0.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: addressable 2.8.0 ruby lib + +Gem::Specification.new do |s| + s.name = "addressable".freeze + s.version = "2.8.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Bob Aman".freeze] + s.date = "2021-07-03" + s.description = "Addressable is an alternative implementation to the URI implementation that is\npart of Ruby's standard library. It is flexible, offers heuristic parsing, and\nadditionally provides extensive support for IRIs and URI templates.\n".freeze + s.email = "bob@sporkmonger.com".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["README.md".freeze] + s.homepage = "https://github.com/sporkmonger/addressable".freeze + s.licenses = ["Apache-2.0".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "URI Implementation".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<public_suffix>.freeze, [">= 2.0.2", "< 5.0"]) + s.add_development_dependency(%q<bundler>.freeze, [">= 1.0", "< 3.0"]) + else + s.add_dependency(%q<public_suffix>.freeze, [">= 2.0.2", "< 5.0"]) + s.add_dependency(%q<bundler>.freeze, [">= 1.0", "< 3.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/algoliasearch-1.27.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/algoliasearch-1.27.5.gemspec new file mode 100644 index 0000000..e7b24d5 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/algoliasearch-1.27.5.gemspec @@ -0,0 +1,42 @@ +# -*- encoding: utf-8 -*- +# stub: algoliasearch 1.27.5 ruby lib + +Gem::Specification.new do |s| + s.name = "algoliasearch".freeze + s.version = "1.27.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/algolia/algoliasearch-client-ruby/issues", "changelog_uri" => "https://github.com/algolia/algoliasearch-client-ruby/blob/master/CHANGELOG.md", "documentation_uri" => "http://www.rubydoc.info/gems/algoliasearch", "homepage_uri" => "https://www.algolia.com/doc/api-client/ruby/getting-started/", "source_code_uri" => "https://github.com/algolia/algoliasearch-client-ruby" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Algolia".freeze] + s.date = "2020-10-28" + s.description = "A simple Ruby client for the algolia.com REST API".freeze + s.email = "contact@algolia.com".freeze + s.extra_rdoc_files = ["CHANGELOG.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.files = ["CHANGELOG.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.homepage = "https://github.com/algolia/algoliasearch-client-ruby".freeze + s.licenses = ["MIT".freeze] + s.post_install_message = "A new major version is available for Algolia! Please now use the https://rubygems.org/gems/algolia gem to get the latest features.".freeze + s.rubygems_version = "3.2.3".freeze + s.summary = "A simple Ruby client for the algolia.com REST API".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<json>.freeze, [">= 1.5.1"]) + s.add_runtime_dependency(%q<httpclient>.freeze, ["~> 2.8", ">= 2.8.3"]) + s.add_development_dependency(%q<travis>.freeze, ["~> 0"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 0"]) + s.add_development_dependency(%q<rdoc>.freeze, ["~> 0"]) + else + s.add_dependency(%q<json>.freeze, [">= 1.5.1"]) + s.add_dependency(%q<httpclient>.freeze, ["~> 2.8", ">= 2.8.3"]) + s.add_dependency(%q<travis>.freeze, ["~> 0"]) + s.add_dependency(%q<rake>.freeze, ["~> 0"]) + s.add_dependency(%q<rdoc>.freeze, ["~> 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/atomos-0.1.3.gemspec b/vendor/bundle/ruby/3.0.0/specifications/atomos-0.1.3.gemspec new file mode 100644 index 0000000..cfbddc7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/atomos-0.1.3.gemspec @@ -0,0 +1,37 @@ +# -*- encoding: utf-8 -*- +# stub: atomos 0.1.3 ruby lib + +Gem::Specification.new do |s| + s.name = "atomos".freeze + s.version = "0.1.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Samuel Giddins".freeze] + s.bindir = "exe".freeze + s.date = "2018-08-07" + s.email = ["segiddins@segiddins.me".freeze] + s.homepage = "https://github.com/segiddins/atomos".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A simple gem to atomically write files".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.16"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 3.0"]) + s.add_development_dependency(%q<rubocop>.freeze, [">= 0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.16"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 3.0"]) + s.add_dependency(%q<rubocop>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/claide-1.0.3.gemspec b/vendor/bundle/ruby/3.0.0/specifications/claide-1.0.3.gemspec new file mode 100644 index 0000000..99c609c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/claide-1.0.3.gemspec @@ -0,0 +1,19 @@ +# -*- encoding: utf-8 -*- +# stub: claide 1.0.3 ruby lib + +Gem::Specification.new do |s| + s.name = "claide".freeze + s.version = "1.0.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2019-08-02" + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/CLAide".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "A small command-line interface framework.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/claide-1.1.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/claide-1.1.0.gemspec new file mode 100644 index 0000000..d6776b6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/claide-1.1.0.gemspec @@ -0,0 +1,20 @@ +# -*- encoding: utf-8 -*- +# stub: claide 1.1.0 ruby lib + +Gem::Specification.new do |s| + s.name = "claide".freeze + s.version = "1.1.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2022-01-12" + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/CLAide".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A small command-line interface framework.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-1.11.2.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-1.11.2.gemspec new file mode 100644 index 0000000..752e835 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-1.11.2.gemspec @@ -0,0 +1,71 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods 1.11.2 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods".freeze + s.version = "1.11.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze, "Kyle Fuller".freeze, "Samuel Giddins".freeze] + s.date = "2021-09-13" + s.description = "CocoaPods manages library dependencies for your Xcode project.\n\nYou specify the dependencies for your project in one easy text file. CocoaPods resolves dependencies between libraries, fetches source code for the dependencies, and creates and maintains an Xcode workspace to build your project.\n\nUltimately, the goal is to improve discoverability of, and engagement in, third party open-source libraries, by creating a more centralized ecosystem.".freeze + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze, "kyle@fuller.li".freeze, "segiddins@segiddins.me".freeze] + s.executables = ["pod".freeze, "sandbox-pod".freeze] + s.files = ["bin/pod".freeze, "bin/sandbox-pod".freeze] + s.homepage = "https://github.com/CocoaPods/CocoaPods".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.6".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "The Cocoa library package manager.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<cocoapods-core>.freeze, ["= 1.11.2"]) + s.add_runtime_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-deintegrate>.freeze, [">= 1.0.3", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-downloader>.freeze, [">= 1.4.0", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-plugins>.freeze, [">= 1.0.0", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-search>.freeze, [">= 1.0.0", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-trunk>.freeze, [">= 1.4.0", "< 2.0"]) + s.add_runtime_dependency(%q<cocoapods-try>.freeze, [">= 1.1.0", "< 2.0"]) + s.add_runtime_dependency(%q<molinillo>.freeze, ["~> 0.8.0"]) + s.add_runtime_dependency(%q<xcodeproj>.freeze, [">= 1.21.0", "< 2.0"]) + s.add_runtime_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_runtime_dependency(%q<escape>.freeze, ["~> 0.0.4"]) + s.add_runtime_dependency(%q<fourflusher>.freeze, [">= 2.3.0", "< 3.0"]) + s.add_runtime_dependency(%q<gh_inspector>.freeze, ["~> 1.0"]) + s.add_runtime_dependency(%q<nap>.freeze, ["~> 1.0"]) + s.add_runtime_dependency(%q<ruby-macho>.freeze, [">= 1.0", "< 3.0"]) + s.add_runtime_dependency(%q<addressable>.freeze, ["~> 2.8"]) + s.add_development_dependency(%q<bacon>.freeze, ["~> 1.1"]) + s.add_development_dependency(%q<bundler>.freeze, ["~> 2.0"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + else + s.add_dependency(%q<cocoapods-core>.freeze, ["= 1.11.2"]) + s.add_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_dependency(%q<cocoapods-deintegrate>.freeze, [">= 1.0.3", "< 2.0"]) + s.add_dependency(%q<cocoapods-downloader>.freeze, [">= 1.4.0", "< 2.0"]) + s.add_dependency(%q<cocoapods-plugins>.freeze, [">= 1.0.0", "< 2.0"]) + s.add_dependency(%q<cocoapods-search>.freeze, [">= 1.0.0", "< 2.0"]) + s.add_dependency(%q<cocoapods-trunk>.freeze, [">= 1.4.0", "< 2.0"]) + s.add_dependency(%q<cocoapods-try>.freeze, [">= 1.1.0", "< 2.0"]) + s.add_dependency(%q<molinillo>.freeze, ["~> 0.8.0"]) + s.add_dependency(%q<xcodeproj>.freeze, [">= 1.21.0", "< 2.0"]) + s.add_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_dependency(%q<escape>.freeze, ["~> 0.0.4"]) + s.add_dependency(%q<fourflusher>.freeze, [">= 2.3.0", "< 3.0"]) + s.add_dependency(%q<gh_inspector>.freeze, ["~> 1.0"]) + s.add_dependency(%q<nap>.freeze, ["~> 1.0"]) + s.add_dependency(%q<ruby-macho>.freeze, [">= 1.0", "< 3.0"]) + s.add_dependency(%q<addressable>.freeze, ["~> 2.8"]) + s.add_dependency(%q<bacon>.freeze, ["~> 1.1"]) + s.add_dependency(%q<bundler>.freeze, ["~> 2.0"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-core-1.11.2.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-core-1.11.2.gemspec new file mode 100644 index 0000000..569212a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-core-1.11.2.gemspec @@ -0,0 +1,49 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-core 1.11.2 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-core".freeze + s.version = "1.11.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2021-09-13" + s.description = "The CocoaPods-Core gem provides support to work with the models of CocoaPods.\n\n It is intended to be used in place of the CocoaPods when the the installation of the dependencies is not needed.".freeze + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/CocoaPods".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.6".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "The models of CocoaPods".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<activesupport>.freeze, [">= 5.0", "< 7"]) + s.add_runtime_dependency(%q<nap>.freeze, ["~> 1.0"]) + s.add_runtime_dependency(%q<fuzzy_match>.freeze, ["~> 2.0.4"]) + s.add_runtime_dependency(%q<algoliasearch>.freeze, ["~> 1.0"]) + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.1"]) + s.add_runtime_dependency(%q<typhoeus>.freeze, ["~> 1.0"]) + s.add_runtime_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_runtime_dependency(%q<addressable>.freeze, ["~> 2.8"]) + s.add_runtime_dependency(%q<public_suffix>.freeze, ["~> 4.0"]) + s.add_development_dependency(%q<bacon>.freeze, ["~> 1.1"]) + else + s.add_dependency(%q<activesupport>.freeze, [">= 5.0", "< 7"]) + s.add_dependency(%q<nap>.freeze, ["~> 1.0"]) + s.add_dependency(%q<fuzzy_match>.freeze, ["~> 2.0.4"]) + s.add_dependency(%q<algoliasearch>.freeze, ["~> 1.0"]) + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.1"]) + s.add_dependency(%q<typhoeus>.freeze, ["~> 1.0"]) + s.add_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_dependency(%q<addressable>.freeze, ["~> 2.8"]) + s.add_dependency(%q<public_suffix>.freeze, ["~> 4.0"]) + s.add_dependency(%q<bacon>.freeze, ["~> 1.1"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.4.gemspec new file mode 100644 index 0000000..575f10f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.4.gemspec @@ -0,0 +1,20 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-deintegrate 1.0.4 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-deintegrate".freeze + s.version = "1.0.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Kyle Fuller".freeze] + s.date = "2019-03-27" + s.email = ["kyle@fuller.li".freeze] + s.homepage = "https://github.com/kylef/cocoapods-deintegrate".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A CocoaPods plugin to remove and de-integrate CocoaPods from your project.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.5.gemspec new file mode 100644 index 0000000..01ce3d1 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-deintegrate-1.0.5.gemspec @@ -0,0 +1,20 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-deintegrate 1.0.5 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-deintegrate".freeze + s.version = "1.0.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Kyle Fuller".freeze] + s.date = "2021-08-16" + s.email = ["kyle@fuller.li".freeze] + s.homepage = "https://github.com/kylef/cocoapods-deintegrate".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A CocoaPods plugin to remove and de-integrate CocoaPods from your project.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.4.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.4.0.gemspec new file mode 100644 index 0000000..bf982b8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.4.0.gemspec @@ -0,0 +1,20 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-downloader 1.4.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-downloader".freeze + s.version = "1.4.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2020-07-17" + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-downloader".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A small library for downloading files from remotes in a folder.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.5.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.5.1.gemspec new file mode 100644 index 0000000..d5adaed --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-downloader-1.5.1.gemspec @@ -0,0 +1,20 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-downloader 1.5.1 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-downloader".freeze + s.version = "1.5.1" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze, "Fabio Pelosin".freeze] + s.date = "2021-09-07" + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-downloader".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "A small library for downloading files from remotes in a folder.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-plugins-1.0.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-plugins-1.0.0.gemspec new file mode 100644 index 0000000..8920b00 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-plugins-1.0.0.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-plugins 1.0.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-plugins".freeze + s.version = "1.0.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["David Grandinetti".freeze, "Olivier Halligon".freeze] + s.date = "2016-05-10" + s.description = " This CocoaPods plugin shows information about all available CocoaPods plugins\n (yes, this is very meta!).\n This CP plugin adds the \"pod plugins\" command to CocoaPods so that you can list\n all plugins (registered in the reference JSON hosted at CocoaPods/cocoapods-plugins)\n".freeze + s.homepage = "https://github.com/cocoapods/cocoapods-plugins".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "CocoaPods plugin which shows info about available CocoaPods plugins.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<nap>.freeze, [">= 0"]) + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + else + s.add_dependency(%q<nap>.freeze, [">= 0"]) + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.0.gemspec new file mode 100644 index 0000000..19d7690 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.0.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-search 1.0.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-search".freeze + s.version = "1.0.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Dur\u00E1n".freeze, "Fabio Pelosin".freeze, "Emma Koszinowski".freeze] + s.date = "2016-05-10" + s.description = "Search for pods.".freeze + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze, "emkosz@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-search".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Searches for pods, ignoring case, whose name matches `QUERY`. If the `--full` option is specified, this will also search in the summary and description of the pods.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.1.gemspec new file mode 100644 index 0000000..e3cc982 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-search-1.0.1.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-search 1.0.1 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-search".freeze + s.version = "1.0.1" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Dur\u00E1n".freeze, "Fabio Pelosin".freeze, "Emma Koszinowski".freeze] + s.date = "2021-08-13" + s.description = "Search for pods.".freeze + s.email = ["eloy.de.enige@gmail.com".freeze, "fabiopelosin@gmail.com".freeze, "emkosz@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-search".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Searches for pods, ignoring case, whose name matches `QUERY`. If the `--full` option is specified, this will also search in the summary and description of the pods.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.5.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.5.0.gemspec new file mode 100644 index 0000000..9d05431 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.5.0.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-trunk 1.5.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-trunk".freeze + s.version = "1.5.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Dur\u00E1n".freeze] + s.date = "2020-05-01" + s.email = ["eloy.de.enige@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-trunk".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Interact with trunk.cocoapods.org".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<nap>.freeze, [">= 0.8", "< 2.0"]) + s.add_runtime_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + else + s.add_dependency(%q<nap>.freeze, [">= 0.8", "< 2.0"]) + s.add_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.6.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.6.0.gemspec new file mode 100644 index 0000000..d218e2e --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-trunk-1.6.0.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-trunk 1.6.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-trunk".freeze + s.version = "1.6.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Dur\u00E1n".freeze] + s.date = "2021-09-01" + s.email = ["eloy.de.enige@gmail.com".freeze] + s.homepage = "https://github.com/CocoaPods/cocoapods-trunk".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Interact with trunk.cocoapods.org".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<nap>.freeze, [">= 0.8", "< 2.0"]) + s.add_runtime_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + else + s.add_dependency(%q<nap>.freeze, [">= 0.8", "< 2.0"]) + s.add_dependency(%q<netrc>.freeze, ["~> 0.11"]) + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/cocoapods-try-1.2.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-try-1.2.0.gemspec new file mode 100644 index 0000000..ba9e848 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/cocoapods-try-1.2.0.gemspec @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# stub: cocoapods-try 1.2.0 ruby lib + +Gem::Specification.new do |s| + s.name = "cocoapods-try".freeze + s.version = "1.2.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Fabio Pelosin".freeze] + s.date = "2020-04-20" + s.homepage = "https://github.com/cocoapods/cocoapods-try".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "CocoaPods plugin which allows to quickly try the demo project of a Pod.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.3"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/colored2-3.1.2.gemspec b/vendor/bundle/ruby/3.0.0/specifications/colored2-3.1.2.gemspec new file mode 100644 index 0000000..f8d31a2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/colored2-3.1.2.gemspec @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# stub: colored2 3.1.2 ruby lib + +Gem::Specification.new do |s| + s.name = "colored2".freeze + s.version = "3.1.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.6".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Chris Wanstrath".freeze, "Konstantin Gredeskoul".freeze] + s.date = "2017-02-14" + s.description = "This is a heavily modified fork of http://github.com/defunkt/colored gem, with many\nsensible pull requests combined. Since the authors of the original gem no longer support it,\nthis might, perhaps, be considered a good alternative.\n\nSimple gem that adds various color methods to String class, and can be used as follows:\n\n require 'colored2'\n\n puts 'this is red'.red\n puts 'this is red with a yellow background'.red.on.yellow\n puts 'this is red with and italic'.red.italic\n puts 'this is green bold'.green.bold << ' and regular'.green\n puts 'this is really bold blue on white but reversed'.bold.blue.on.white.reversed\n puts 'this is regular, but '.red! << 'this is red '.yellow! << ' and yellow.'.no_color!\n puts ('this is regular, but '.red! do\n 'this is red '.yellow! do\n ' and yellow.'.no_color!\n end\n end)\n\n".freeze + s.email = "kigster@gmail.com".freeze + s.homepage = "http://github.com/kigster/colored2".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Add even more color to your life.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 3.4"]) + s.add_development_dependency(%q<codeclimate-test-reporter>.freeze, [">= 0"]) + else + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 3.4"]) + s.add_dependency(%q<codeclimate-test-reporter>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.7.gemspec b/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.7.gemspec new file mode 100644 index 0000000..bb2299a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.7.gemspec @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# stub: concurrent-ruby 1.1.7 ruby lib/concurrent-ruby + +Gem::Specification.new do |s| + s.name = "concurrent-ruby".freeze + s.version = "1.1.7" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/ruby-concurrency/concurrent-ruby/blob/master/CHANGELOG.md", "source_code_uri" => "https://github.com/ruby-concurrency/concurrent-ruby" } if s.respond_to? :metadata= + s.require_paths = ["lib/concurrent-ruby".freeze] + s.authors = ["Jerry D'Antonio".freeze, "Petr Chalupa".freeze, "The Ruby Concurrency Team".freeze] + s.date = "2020-08-08" + s.description = "Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.\nInspired by Erlang, Clojure, Go, JavaScript, actors, and classic concurrency patterns.\n".freeze + s.email = "concurrent-ruby@googlegroups.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "LICENSE.txt".freeze, "CHANGELOG.md".freeze] + s.files = ["CHANGELOG.md".freeze, "LICENSE.txt".freeze, "README.md".freeze] + s.homepage = "http://www.concurrent-ruby.com".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.9.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell, F#, C#, Java, and classic concurrency patterns.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.9.gemspec b/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.9.gemspec new file mode 100644 index 0000000..ff5d586 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/concurrent-ruby-1.1.9.gemspec @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# stub: concurrent-ruby 1.1.9 ruby lib/concurrent-ruby + +Gem::Specification.new do |s| + s.name = "concurrent-ruby".freeze + s.version = "1.1.9" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/ruby-concurrency/concurrent-ruby/blob/master/CHANGELOG.md", "source_code_uri" => "https://github.com/ruby-concurrency/concurrent-ruby" } if s.respond_to? :metadata= + s.require_paths = ["lib/concurrent-ruby".freeze] + s.authors = ["Jerry D'Antonio".freeze, "Petr Chalupa".freeze, "The Ruby Concurrency Team".freeze] + s.date = "2021-06-05" + s.description = "Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.\nInspired by Erlang, Clojure, Go, JavaScript, actors, and classic concurrency patterns.\n".freeze + s.email = "concurrent-ruby@googlegroups.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "LICENSE.txt".freeze, "CHANGELOG.md".freeze] + s.files = ["CHANGELOG.md".freeze, "LICENSE.txt".freeze, "README.md".freeze] + s.homepage = "http://www.concurrent-ruby.com".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.9.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell, F#, C#, Java, and classic concurrency patterns.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/escape-0.0.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/escape-0.0.4.gemspec new file mode 100644 index 0000000..32dbed3 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/escape-0.0.4.gemspec @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# stub: escape 0.0.4 ruby lib + +Gem::Specification.new do |s| + s.name = "escape".freeze + s.version = "0.0.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Tanaka Akira".freeze] + s.date = "2007-06-29" + s.description = "...".freeze + s.extra_rdoc_files = ["Readme".freeze] + s.files = ["Readme".freeze] + s.homepage = "http://www.a-k-r.org/escape/".freeze + s.rdoc_options = ["--title".freeze, "escape".freeze, "--main".freeze, "Readme".freeze, "--line-numbers".freeze] + s.required_ruby_version = Gem::Requirement.new("> 0.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "...".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/ethon-0.15.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/ethon-0.15.0.gemspec new file mode 100644 index 0000000..bdd20d7 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/ethon-0.15.0.gemspec @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# stub: ethon 0.15.0 ruby lib + +Gem::Specification.new do |s| + s.name = "ethon".freeze + s.version = "0.15.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.6".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Hans Hasselberg".freeze] + s.date = "2021-10-12" + s.description = "Very lightweight libcurl wrapper.".freeze + s.email = ["me@hans.io".freeze] + s.homepage = "https://github.com/typhoeus/ethon".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Libcurl wrapper.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<ffi>.freeze, [">= 1.15.0"]) + else + s.add_dependency(%q<ffi>.freeze, [">= 1.15.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/ffi-1.15.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/ffi-1.15.5.gemspec new file mode 100644 index 0000000..dbe5536 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/ffi-1.15.5.gemspec @@ -0,0 +1,42 @@ +# -*- encoding: utf-8 -*- +# stub: ffi 1.15.5 ruby lib +# stub: ext/ffi_c/extconf.rb + +Gem::Specification.new do |s| + s.name = "ffi".freeze + s.version = "1.15.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/ffi/ffi/issues", "changelog_uri" => "https://github.com/ffi/ffi/blob/master/CHANGELOG.md", "documentation_uri" => "https://github.com/ffi/ffi/wiki", "mailing_list_uri" => "http://groups.google.com/group/ruby-ffi", "source_code_uri" => "https://github.com/ffi/ffi/", "wiki_uri" => "https://github.com/ffi/ffi/wiki" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Wayne Meissner".freeze] + s.date = "2022-01-10" + s.description = "Ruby FFI library".freeze + s.email = "wmeissner@gmail.com".freeze + s.extensions = ["ext/ffi_c/extconf.rb".freeze] + s.files = ["ext/ffi_c/extconf.rb".freeze] + s.homepage = "https://github.com/ffi/ffi/wiki".freeze + s.licenses = ["BSD-3-Clause".freeze] + s.rdoc_options = ["--exclude=ext/ffi_c/.*\\.o$".freeze, "--exclude=ffi_c\\.(bundle|so)$".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Ruby FFI".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rake>.freeze, ["~> 13.0"]) + s.add_development_dependency(%q<rake-compiler>.freeze, ["~> 1.0"]) + s.add_development_dependency(%q<rake-compiler-dock>.freeze, ["~> 1.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 2.14.1"]) + else + s.add_dependency(%q<rake>.freeze, ["~> 13.0"]) + s.add_dependency(%q<rake-compiler>.freeze, ["~> 1.0"]) + s.add_dependency(%q<rake-compiler-dock>.freeze, ["~> 1.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 2.14.1"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/fourflusher-2.3.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/fourflusher-2.3.1.gemspec new file mode 100644 index 0000000..44ad1fa --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/fourflusher-2.3.1.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: fourflusher 2.3.1 ruby lib + +Gem::Specification.new do |s| + s.name = "fourflusher".freeze + s.version = "2.3.1" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Boris Bu\u0308gling".freeze] + s.bindir = "exe".freeze + s.date = "2019-06-18" + s.email = ["boris@icculus.org".freeze] + s.homepage = "https://github.com/neonichu/fourflusher".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "A library for interacting with Xcode simulators.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.11"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 2"]) + s.add_development_dependency(%q<rubocop>.freeze, ["~> 0.35.0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.11"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 2"]) + s.add_dependency(%q<rubocop>.freeze, ["~> 0.35.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/fuzzy_match-2.0.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/fuzzy_match-2.0.4.gemspec new file mode 100644 index 0000000..a58d6b9 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/fuzzy_match-2.0.4.gemspec @@ -0,0 +1,51 @@ +# -*- encoding: utf-8 -*- +# stub: fuzzy_match 2.0.4 ruby lib + +Gem::Specification.new do |s| + s.name = "fuzzy_match".freeze + s.version = "2.0.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Seamus Abshere".freeze] + s.date = "2013-09-19" + s.description = "Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.".freeze + s.email = ["seamus@abshere.net".freeze] + s.executables = ["fuzzy_match".freeze] + s.files = ["bin/fuzzy_match".freeze] + s.homepage = "https://github.com/seamusabshere/fuzzy_match".freeze + s.rubygems_version = "3.2.3".freeze + s.summary = "Find a needle in a haystack using string similarity and (optionally) regexp rules. Replaces loose_tight_dictionary.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 3 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<active_record_inline_schema>.freeze, [">= 0.4.0"]) + s.add_development_dependency(%q<pry>.freeze, [">= 0"]) + s.add_development_dependency(%q<rspec-core>.freeze, [">= 0"]) + s.add_development_dependency(%q<rspec-expectations>.freeze, [">= 0"]) + s.add_development_dependency(%q<rspec-mocks>.freeze, [">= 0"]) + s.add_development_dependency(%q<activerecord>.freeze, [">= 3"]) + s.add_development_dependency(%q<mysql2>.freeze, [">= 0"]) + s.add_development_dependency(%q<cohort_analysis>.freeze, [">= 0"]) + s.add_development_dependency(%q<weighted_average>.freeze, [">= 0"]) + s.add_development_dependency(%q<yard>.freeze, [">= 0"]) + s.add_development_dependency(%q<amatch>.freeze, [">= 0"]) + else + s.add_dependency(%q<active_record_inline_schema>.freeze, [">= 0.4.0"]) + s.add_dependency(%q<pry>.freeze, [">= 0"]) + s.add_dependency(%q<rspec-core>.freeze, [">= 0"]) + s.add_dependency(%q<rspec-expectations>.freeze, [">= 0"]) + s.add_dependency(%q<rspec-mocks>.freeze, [">= 0"]) + s.add_dependency(%q<activerecord>.freeze, [">= 3"]) + s.add_dependency(%q<mysql2>.freeze, [">= 0"]) + s.add_dependency(%q<cohort_analysis>.freeze, [">= 0"]) + s.add_dependency(%q<weighted_average>.freeze, [">= 0"]) + s.add_dependency(%q<yard>.freeze, [">= 0"]) + s.add_dependency(%q<amatch>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/gh_inspector-1.1.3.gemspec b/vendor/bundle/ruby/3.0.0/specifications/gh_inspector-1.1.3.gemspec new file mode 100644 index 0000000..427f5c8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/gh_inspector-1.1.3.gemspec @@ -0,0 +1,40 @@ +# -*- encoding: utf-8 -*- +# stub: gh_inspector 1.1.3 ruby lib + +Gem::Specification.new do |s| + s.name = "gh_inspector".freeze + s.version = "1.1.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Orta Therox".freeze, "Felix Krause".freeze] + s.date = "2018-03-06" + s.description = "Search through GitHub issues for your project for existing issues about a Ruby Error.".freeze + s.email = ["orta.therox@gmail.com".freeze, "gh_inspector@krausefx.com".freeze] + s.homepage = "https://github.com/orta/gh_inspector".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Search through GitHub issues for your project for existing issues about a Ruby Error.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.11"]) + s.add_development_dependency(%q<pry>.freeze, ["~> 0.6"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 3.0"]) + s.add_development_dependency(%q<rubocop>.freeze, ["~> 0", "> 0"]) + s.add_development_dependency(%q<yard>.freeze, ["~> 0", "> 0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.11"]) + s.add_dependency(%q<pry>.freeze, ["~> 0.6"]) + s.add_dependency(%q<rake>.freeze, ["~> 10.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 3.0"]) + s.add_dependency(%q<rubocop>.freeze, ["~> 0", "> 0"]) + s.add_dependency(%q<yard>.freeze, ["~> 0", "> 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/httpclient-2.8.3.gemspec b/vendor/bundle/ruby/3.0.0/specifications/httpclient-2.8.3.gemspec new file mode 100644 index 0000000..5a1c833 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/httpclient-2.8.3.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# stub: httpclient 2.8.3 ruby lib + +Gem::Specification.new do |s| + s.name = "httpclient".freeze + s.version = "2.8.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Hiroshi Nakamura".freeze] + s.date = "2016-12-09" + s.email = "nahi@ruby-lang.org".freeze + s.executables = ["httpclient".freeze] + s.files = ["bin/httpclient".freeze] + s.homepage = "https://github.com/nahi/httpclient".freeze + s.licenses = ["ruby".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "gives something like the functionality of libwww-perl (LWP) in Ruby".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/i18n-1.10.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/i18n-1.10.0.gemspec new file mode 100644 index 0000000..5856f6a --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/i18n-1.10.0.gemspec @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# stub: i18n 1.10.0 ruby lib + +Gem::Specification.new do |s| + s.name = "i18n".freeze + s.version = "1.10.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.5".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/ruby-i18n/i18n/issues", "changelog_uri" => "https://github.com/ruby-i18n/i18n/releases", "documentation_uri" => "https://guides.rubyonrails.org/i18n.html", "source_code_uri" => "https://github.com/ruby-i18n/i18n" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Sven Fuchs".freeze, "Joshua Harvey".freeze, "Matt Aimonetti".freeze, "Stephan Soller".freeze, "Saimon Moore".freeze, "Ryan Bigg".freeze] + s.date = "2022-02-14" + s.description = "New wave Internationalization support for Ruby.".freeze + s.email = "rails-i18n@googlegroups.com".freeze + s.homepage = "https://github.com/ruby-i18n/i18n".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "New wave Internationalization support for Ruby".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + else + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/i18n-1.8.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/i18n-1.8.5.gemspec new file mode 100644 index 0000000..28bdda6 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/i18n-1.8.5.gemspec @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- +# stub: i18n 1.8.5 ruby lib + +Gem::Specification.new do |s| + s.name = "i18n".freeze + s.version = "1.8.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.5".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/svenfuchs/i18n/issues", "changelog_uri" => "https://github.com/svenfuchs/i18n/releases", "documentation_uri" => "https://guides.rubyonrails.org/i18n.html", "source_code_uri" => "https://github.com/svenfuchs/i18n" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Sven Fuchs".freeze, "Joshua Harvey".freeze, "Matt Aimonetti".freeze, "Stephan Soller".freeze, "Saimon Moore".freeze, "Ryan Bigg".freeze] + s.date = "2020-07-23" + s.description = "New wave Internationalization support for Ruby.".freeze + s.email = "rails-i18n@googlegroups.com".freeze + s.homepage = "https://github.com/ruby-i18n/i18n".freeze + s.licenses = ["MIT".freeze] + s.post_install_message = "\nHEADS UP! i18n 1.1 changed fallbacks to exclude default locale.\nBut that may break your application.\n\nIf you are upgrading your Rails application from an older version of Rails:\n\nPlease check your Rails app for 'config.i18n.fallbacks = true'.\nIf you're using I18n (>= 1.1.0) and Rails (< 5.2.2), this should be\n'config.i18n.fallbacks = [I18n.default_locale]'.\nIf not, fallbacks will be broken in your app by I18n 1.1.x.\n\nIf you are starting a NEW Rails application, you can ignore this notice.\n\nFor more info see:\nhttps://github.com/svenfuchs/i18n/releases/tag/v1.1.0\n\n".freeze + s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "New wave Internationalization support for Ruby".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + else + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/json-2.3.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/json-2.3.1.gemspec new file mode 100644 index 0000000..d96da2d Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/specifications/json-2.3.1.gemspec differ diff --git a/vendor/bundle/ruby/3.0.0/specifications/json-2.6.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/json-2.6.1.gemspec new file mode 100644 index 0000000..484fa7e Binary files /dev/null and b/vendor/bundle/ruby/3.0.0/specifications/json-2.6.1.gemspec differ diff --git a/vendor/bundle/ruby/3.0.0/specifications/minitest-5.14.2.gemspec b/vendor/bundle/ruby/3.0.0/specifications/minitest-5.14.2.gemspec new file mode 100644 index 0000000..cabde76 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/minitest-5.14.2.gemspec @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# stub: minitest 5.14.2 ruby lib + +Gem::Specification.new do |s| + s.name = "minitest".freeze + s.version = "5.14.2" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/seattlerb/minitest/issues", "homepage_uri" => "https://github.com/seattlerb/minitest" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Ryan Davis".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPjCCAiagAwIBAgIBBDANBgkqhkiG9w0BAQsFADBFMRMwEQYDVQQDDApyeWFu\nZC1ydWJ5MRkwFwYKCZImiZPyLGQBGRYJemVuc3BpZGVyMRMwEQYKCZImiZPyLGQB\nGRYDY29tMB4XDTE5MTIxMzAwMDIwNFoXDTIwMTIxMjAwMDIwNFowRTETMBEGA1UE\nAwwKcnlhbmQtcnVieTEZMBcGCgmSJomT8ixkARkWCXplbnNwaWRlcjETMBEGCgmS\nJomT8ixkARkWA2NvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALda\nb9DCgK+627gPJkB6XfjZ1itoOQvpqH1EXScSaba9/S2VF22VYQbXU1xQXL/WzCkx\ntaCPaLmfYIaFcHHCSY4hYDJijRQkLxPeB3xbOfzfLoBDbjvx5JxgJxUjmGa7xhcT\noOvjtt5P8+GSK9zLzxQP0gVLS/D0FmoE44XuDr3iQkVS2ujU5zZL84mMNqNB1znh\nGiadM9GHRaDiaxuX0cIUBj19T01mVE2iymf9I6bEsiayK/n6QujtyCbTWsAS9Rqt\nqhtV7HJxNKuPj/JFH0D2cswvzznE/a5FOYO68g+YCuFi5L8wZuuM8zzdwjrWHqSV\ngBEfoTEGr7Zii72cx+sCAwEAAaM5MDcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAw\nHQYDVR0OBBYEFEfFe9md/r/tj/Wmwpy+MI8d9k/hMA0GCSqGSIb3DQEBCwUAA4IB\nAQCkkcHqAa6IKLYGl93rn78J3L+LnqyxaA059n4IGMHWN5bv9KBQnIjOrpLadtYZ\nvhWkunWDKdfVapBEq5+T4HzqnsEXC3aCv6JEKJY6Zw7iSzl0M8hozuzRr+w46wvT\nfV2yTN6QTVxqbMsJJyjosks4ZdQYov2zdvQpt1HsLi+Qmckmg8SPZsd+T8uiiBCf\nb+1ORSM5eEfBQenPXy83LZcoQz8i6zVB4aAfTGGdhxjoMGUEmSZ6xpkOzmnGa9QK\nm5x9IDiApM+vCELNwDXXGNFEnQBBK+wAe4Pek8o1V1TTOxL1kGPewVOitX1p3xoN\nh7iEjga8iM1LbZUfiISZ+WrB\n-----END CERTIFICATE-----\n".freeze] + s.date = "2020-09-01" + s.description = "minitest provides a complete suite of testing facilities supporting\nTDD, BDD, mocking, and benchmarking.\n\n \"I had a class with Jim Weirich on testing last week and we were\n allowed to choose our testing frameworks. Kirk Haines and I were\n paired up and we cracked open the code for a few test\n frameworks...\n\n I MUST say that minitest is *very* readable / understandable\n compared to the 'other two' options we looked at. Nicely done and\n thank you for helping us keep our mental sanity.\"\n\n -- Wayne E. Seguin\n\nminitest/test is a small and incredibly fast unit testing framework.\nIt provides a rich set of assertions to make your tests clean and\nreadable.\n\nminitest/spec is a functionally complete spec engine. It hooks onto\nminitest/test and seamlessly bridges test assertions over to spec\nexpectations.\n\nminitest/benchmark is an awesome way to assert the performance of your\nalgorithms in a repeatable manner. Now you can assert that your newb\nco-worker doesn't replace your linear algorithm with an exponential\none!\n\nminitest/mock by Steven Baker, is a beautifully tiny mock (and stub)\nobject framework.\n\nminitest/pride shows pride in testing and adds coloring to your test\noutput. I guess it is an example of how to write IO pipes too. :P\n\nminitest/test is meant to have a clean implementation for language\nimplementors that need a minimal set of methods to bootstrap a working\ntest suite. For example, there is no magic involved for test-case\ndiscovery.\n\n \"Again, I can't praise enough the idea of a testing/specing\n framework that I can actually read in full in one sitting!\"\n\n -- Piotr Szotkowski\n\nComparing to rspec:\n\n rspec is a testing DSL. minitest is ruby.\n\n -- Adam Hawkins, \"Bow Before MiniTest\"\n\nminitest doesn't reinvent anything that ruby already provides, like:\nclasses, modules, inheritance, methods. This means you only have to\nlearn ruby to use minitest and all of your regular OO practices like\nextract-method refactorings still apply.".freeze + s.email = ["ryand-ruby@zenspider.com".freeze] + s.extra_rdoc_files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.homepage = "https://github.com/seattlerb/minitest".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--main".freeze, "README.rdoc".freeze] + s.required_ruby_version = Gem::Requirement.new([">= 2.2".freeze, "< 3.1".freeze]) + s.rubygems_version = "3.2.3".freeze + s.summary = "minitest provides a complete suite of testing facilities supporting TDD, BDD, mocking, and benchmarking".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rdoc>.freeze, [">= 4.0", "< 7"]) + s.add_development_dependency(%q<hoe>.freeze, ["~> 3.22"]) + else + s.add_dependency(%q<rdoc>.freeze, [">= 4.0", "< 7"]) + s.add_dependency(%q<hoe>.freeze, ["~> 3.22"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/minitest-5.15.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/minitest-5.15.0.gemspec new file mode 100644 index 0000000..b22e091 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/minitest-5.15.0.gemspec @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# stub: minitest 5.15.0 ruby lib + +Gem::Specification.new do |s| + s.name = "minitest".freeze + s.version = "5.15.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/seattlerb/minitest/issues", "homepage_uri" => "https://github.com/seattlerb/minitest" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Ryan Davis".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPjCCAiagAwIBAgIBBTANBgkqhkiG9w0BAQsFADBFMRMwEQYDVQQDDApyeWFu\nZC1ydWJ5MRkwFwYKCZImiZPyLGQBGRYJemVuc3BpZGVyMRMwEQYKCZImiZPyLGQB\nGRYDY29tMB4XDTIwMTIyMjIwMzgzMFoXDTIxMTIyMjIwMzgzMFowRTETMBEGA1UE\nAwwKcnlhbmQtcnVieTEZMBcGCgmSJomT8ixkARkWCXplbnNwaWRlcjETMBEGCgmS\nJomT8ixkARkWA2NvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALda\nb9DCgK+627gPJkB6XfjZ1itoOQvpqH1EXScSaba9/S2VF22VYQbXU1xQXL/WzCkx\ntaCPaLmfYIaFcHHCSY4hYDJijRQkLxPeB3xbOfzfLoBDbjvx5JxgJxUjmGa7xhcT\noOvjtt5P8+GSK9zLzxQP0gVLS/D0FmoE44XuDr3iQkVS2ujU5zZL84mMNqNB1znh\nGiadM9GHRaDiaxuX0cIUBj19T01mVE2iymf9I6bEsiayK/n6QujtyCbTWsAS9Rqt\nqhtV7HJxNKuPj/JFH0D2cswvzznE/a5FOYO68g+YCuFi5L8wZuuM8zzdwjrWHqSV\ngBEfoTEGr7Zii72cx+sCAwEAAaM5MDcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAw\nHQYDVR0OBBYEFEfFe9md/r/tj/Wmwpy+MI8d9k/hMA0GCSqGSIb3DQEBCwUAA4IB\nAQAE3XRm1YZcCVjAJy5yMZvTOFrS7B2SYErc+0QwmKYbHztTTDY2m5Bii+jhpuxh\nH+ETcU1z8TUKLpsBUP4kUpIRowkVN1p/jKapV8T3Rbwq+VuYFe+GMKsf8wGZSecG\noMQ8DzzauZfbvhe2kDg7G9BBPU0wLQlY25rDcCy9bLnD7R0UK3ONqpwvsI5I7x5X\nZIMXR0a9/DG+55mawwdGzCQobDKiSNLK89KK7OcNTALKU0DfgdTkktdgKchzKHqZ\nd/AHw/kcnU6iuMUoJEcGiJd4gVCTn1l3cDcIvxakGslCA88Jubw0Sqatan0TnC9g\nKToW560QIey7SPfHWduzFJnV\n-----END CERTIFICATE-----\n".freeze] + s.date = "2021-12-15" + s.description = "minitest provides a complete suite of testing facilities supporting\nTDD, BDD, mocking, and benchmarking.\n\n \"I had a class with Jim Weirich on testing last week and we were\n allowed to choose our testing frameworks. Kirk Haines and I were\n paired up and we cracked open the code for a few test\n frameworks...\n\n I MUST say that minitest is *very* readable / understandable\n compared to the 'other two' options we looked at. Nicely done and\n thank you for helping us keep our mental sanity.\"\n\n -- Wayne E. Seguin\n\nminitest/test is a small and incredibly fast unit testing framework.\nIt provides a rich set of assertions to make your tests clean and\nreadable.\n\nminitest/spec is a functionally complete spec engine. It hooks onto\nminitest/test and seamlessly bridges test assertions over to spec\nexpectations.\n\nminitest/benchmark is an awesome way to assert the performance of your\nalgorithms in a repeatable manner. Now you can assert that your newb\nco-worker doesn't replace your linear algorithm with an exponential\none!\n\nminitest/mock by Steven Baker, is a beautifully tiny mock (and stub)\nobject framework.\n\nminitest/pride shows pride in testing and adds coloring to your test\noutput. I guess it is an example of how to write IO pipes too. :P\n\nminitest/test is meant to have a clean implementation for language\nimplementors that need a minimal set of methods to bootstrap a working\ntest suite. For example, there is no magic involved for test-case\ndiscovery.\n\n \"Again, I can't praise enough the idea of a testing/specing\n framework that I can actually read in full in one sitting!\"\n\n -- Piotr Szotkowski\n\nComparing to rspec:\n\n rspec is a testing DSL. minitest is ruby.\n\n -- Adam Hawkins, \"Bow Before MiniTest\"\n\nminitest doesn't reinvent anything that ruby already provides, like:\nclasses, modules, inheritance, methods. This means you only have to\nlearn ruby to use minitest and all of your regular OO practices like\nextract-method refactorings still apply.".freeze + s.email = ["ryand-ruby@zenspider.com".freeze] + s.extra_rdoc_files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.files = ["History.rdoc".freeze, "Manifest.txt".freeze, "README.rdoc".freeze] + s.homepage = "https://github.com/seattlerb/minitest".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--main".freeze, "README.rdoc".freeze] + s.required_ruby_version = Gem::Requirement.new([">= 2.2".freeze, "< 4.0".freeze]) + s.rubygems_version = "3.2.3".freeze + s.summary = "minitest provides a complete suite of testing facilities supporting TDD, BDD, mocking, and benchmarking".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rdoc>.freeze, [">= 4.0", "< 7"]) + s.add_development_dependency(%q<hoe>.freeze, ["~> 3.22"]) + else + s.add_dependency(%q<rdoc>.freeze, [">= 4.0", "< 7"]) + s.add_dependency(%q<hoe>.freeze, ["~> 3.22"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.6.6.gemspec b/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.6.6.gemspec new file mode 100644 index 0000000..30f4e70 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.6.6.gemspec @@ -0,0 +1,31 @@ +# -*- encoding: utf-8 -*- +# stub: molinillo 0.6.6 ruby lib + +Gem::Specification.new do |s| + s.name = "molinillo".freeze + s.version = "0.6.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Samuel E. Giddins".freeze] + s.date = "2018-08-07" + s.email = ["segiddins@segiddins.me".freeze] + s.homepage = "https://github.com/CocoaPods/Molinillo".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Provides support for dependency resolution".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.5"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.5"]) + s.add_dependency(%q<rake>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.8.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.8.0.gemspec new file mode 100644 index 0000000..e04333d --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/molinillo-0.8.0.gemspec @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# stub: molinillo 0.8.0 ruby lib + +Gem::Specification.new do |s| + s.name = "molinillo".freeze + s.version = "0.8.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Samuel E. Giddins".freeze] + s.date = "2021-08-09" + s.email = ["segiddins@segiddins.me".freeze] + s.homepage = "https://github.com/CocoaPods/Molinillo".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Provides support for dependency resolution".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + else + s.add_dependency(%q<rake>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/nanaimo-0.3.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/nanaimo-0.3.0.gemspec new file mode 100644 index 0000000..e24d76c --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/nanaimo-0.3.0.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +# stub: nanaimo 0.3.0 ruby lib + +Gem::Specification.new do |s| + s.name = "nanaimo".freeze + s.version = "0.3.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Danielle Tomlinson".freeze, "Samuel Giddins".freeze] + s.bindir = "exe".freeze + s.date = "2020-07-17" + s.email = ["dan@tomlinson.io".freeze, "segiddins@segiddins.me".freeze] + s.homepage = "https://github.com/CocoaPods/Nanaimo".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "A library for (de)serialization of ASCII Plists.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, ["~> 1.12"]) + s.add_development_dependency(%q<rake>.freeze, ["~> 12.3"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 3.0"]) + else + s.add_dependency(%q<bundler>.freeze, ["~> 1.12"]) + s.add_dependency(%q<rake>.freeze, ["~> 12.3"]) + s.add_dependency(%q<rspec>.freeze, ["~> 3.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/nap-1.1.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/nap-1.1.0.gemspec new file mode 100644 index 0000000..71fbdfd --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/nap-1.1.0.gemspec @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# stub: nap 1.1.0 ruby lib + +Gem::Specification.new do |s| + s.name = "nap".freeze + s.version = "1.1.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Manfred Stienstra".freeze] + s.date = "2016-01-29" + s.description = " Nap is a really simple REST library. It allows you to perform HTTP requests\n with minimal amounts of code.\n".freeze + s.email = "manfred@fngtps.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "LICENSE".freeze] + s.files = ["LICENSE".freeze, "README.md".freeze] + s.homepage = "https://github.com/Fingertips/nap".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--charset=utf-8".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Nap is a really simple REST library.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<rake>.freeze, ["~> 10"]) + s.add_development_dependency(%q<peck>.freeze, ["~> 0.5"]) + else + s.add_dependency(%q<rake>.freeze, ["~> 10"]) + s.add_dependency(%q<peck>.freeze, ["~> 0.5"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/netrc-0.11.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/netrc-0.11.0.gemspec new file mode 100644 index 0000000..ad851fe --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/netrc-0.11.0.gemspec @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# stub: netrc 0.11.0 ruby lib + +Gem::Specification.new do |s| + s.name = "netrc".freeze + s.version = "0.11.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Keith Rarick".freeze, "geemus (Wesley Beary)".freeze] + s.date = "2015-10-29" + s.description = "This library can read and update netrc files, preserving formatting including comments and whitespace.".freeze + s.email = "geemus@gmail.com".freeze + s.homepage = "https://github.com/geemus/netrc".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Library to read and write netrc files.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<minitest>.freeze, [">= 0"]) + else + s.add_dependency(%q<minitest>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/public_suffix-4.0.6.gemspec b/vendor/bundle/ruby/3.0.0/specifications/public_suffix-4.0.6.gemspec new file mode 100644 index 0000000..2de5d38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/public_suffix-4.0.6.gemspec @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# stub: public_suffix 4.0.6 ruby lib + +Gem::Specification.new do |s| + s.name = "public_suffix".freeze + s.version = "4.0.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/weppos/publicsuffix-ruby/issues", "changelog_uri" => "https://github.com/weppos/publicsuffix-ruby/blob/master/CHANGELOG.md", "documentation_uri" => "https://rubydoc.info/gems/public_suffix/4.0.6", "homepage_uri" => "https://simonecarletti.com/code/publicsuffix-ruby", "source_code_uri" => "https://github.com/weppos/publicsuffix-ruby/tree/v4.0.6" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Simone Carletti".freeze] + s.date = "2020-09-02" + s.description = "PublicSuffix can parse and decompose a domain name into top level domain, domain and subdomains.".freeze + s.email = ["weppos@weppos.net".freeze] + s.extra_rdoc_files = ["LICENSE.txt".freeze] + s.files = ["LICENSE.txt".freeze] + s.homepage = "https://simonecarletti.com/code/publicsuffix-ruby".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Domain name parser based on the Public Suffix List.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/rexml-3.2.5.gemspec b/vendor/bundle/ruby/3.0.0/specifications/rexml-3.2.5.gemspec new file mode 100644 index 0000000..fd69e07 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/rexml-3.2.5.gemspec @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# stub: rexml 3.2.5 ruby lib + +Gem::Specification.new do |s| + s.name = "rexml".freeze + s.version = "3.2.5" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Kouhei Sutou".freeze] + s.bindir = "exe".freeze + s.date = "2021-04-05" + s.description = "An XML toolkit for Ruby".freeze + s.email = ["kou@cozmixng.org".freeze] + s.extra_rdoc_files = ["LICENSE.txt".freeze, "NEWS.md".freeze, "README.md".freeze, "doc/rexml/context.rdoc".freeze, "doc/rexml/tasks/rdoc/child.rdoc".freeze, "doc/rexml/tasks/rdoc/document.rdoc".freeze, "doc/rexml/tasks/rdoc/element.rdoc".freeze, "doc/rexml/tasks/rdoc/node.rdoc".freeze, "doc/rexml/tasks/rdoc/parent.rdoc".freeze, "doc/rexml/tasks/tocs/child_toc.rdoc".freeze, "doc/rexml/tasks/tocs/document_toc.rdoc".freeze, "doc/rexml/tasks/tocs/element_toc.rdoc".freeze, "doc/rexml/tasks/tocs/master_toc.rdoc".freeze, "doc/rexml/tasks/tocs/node_toc.rdoc".freeze, "doc/rexml/tasks/tocs/parent_toc.rdoc".freeze] + s.files = ["LICENSE.txt".freeze, "NEWS.md".freeze, "README.md".freeze, "doc/rexml/context.rdoc".freeze, "doc/rexml/tasks/rdoc/child.rdoc".freeze, "doc/rexml/tasks/rdoc/document.rdoc".freeze, "doc/rexml/tasks/rdoc/element.rdoc".freeze, "doc/rexml/tasks/rdoc/node.rdoc".freeze, "doc/rexml/tasks/rdoc/parent.rdoc".freeze, "doc/rexml/tasks/tocs/child_toc.rdoc".freeze, "doc/rexml/tasks/tocs/document_toc.rdoc".freeze, "doc/rexml/tasks/tocs/element_toc.rdoc".freeze, "doc/rexml/tasks/tocs/master_toc.rdoc".freeze, "doc/rexml/tasks/tocs/node_toc.rdoc".freeze, "doc/rexml/tasks/tocs/parent_toc.rdoc".freeze] + s.homepage = "https://github.com/ruby/rexml".freeze + s.licenses = ["BSD-2-Clause".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "An XML toolkit for Ruby".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<bundler>.freeze, [">= 0"]) + s.add_development_dependency(%q<rake>.freeze, [">= 0"]) + s.add_development_dependency(%q<test-unit>.freeze, [">= 0"]) + else + s.add_dependency(%q<bundler>.freeze, [">= 0"]) + s.add_dependency(%q<rake>.freeze, [">= 0"]) + s.add_dependency(%q<test-unit>.freeze, [">= 0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-1.4.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-1.4.0.gemspec new file mode 100644 index 0000000..1789fc8 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-1.4.0.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# stub: ruby-macho 1.4.0 ruby lib + +Gem::Specification.new do |s| + s.name = "ruby-macho".freeze + s.version = "1.4.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["William Woodruff".freeze] + s.date = "2019-02-21" + s.description = "A library for viewing and manipulating Mach-O files in Ruby.".freeze + s.email = "william@yossarian.net".freeze + s.homepage = "https://github.com/Homebrew/ruby-macho".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "ruby-macho - Mach-O file analyzer.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-2.5.1.gemspec b/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-2.5.1.gemspec new file mode 100644 index 0000000..37cdf38 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/ruby-macho-2.5.1.gemspec @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# stub: ruby-macho 2.5.1 ruby lib + +Gem::Specification.new do |s| + s.name = "ruby-macho".freeze + s.version = "2.5.1" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["William Woodruff".freeze] + s.date = "2021-05-15" + s.description = "A library for viewing and manipulating Mach-O files in Ruby.".freeze + s.email = "william@yossarian.net".freeze + s.homepage = "https://github.com/Homebrew/ruby-macho".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.5".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "ruby-macho - Mach-O file analyzer.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/thread_safe-0.3.6.gemspec b/vendor/bundle/ruby/3.0.0/specifications/thread_safe-0.3.6.gemspec new file mode 100644 index 0000000..b156af2 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/thread_safe-0.3.6.gemspec @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +# stub: thread_safe 0.3.6 ruby lib + +Gem::Specification.new do |s| + s.name = "thread_safe".freeze + s.version = "0.3.6" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Charles Oliver Nutter".freeze, "thedarkone".freeze] + s.date = "2017-02-22" + s.description = "A collection of data structures and utilities to make thread-safe programming in Ruby easier".freeze + s.email = ["headius@headius.com".freeze, "thedarkone2@gmail.com".freeze] + s.homepage = "https://github.com/ruby-concurrency/thread_safe".freeze + s.licenses = ["Apache-2.0".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Thread-safe collections and utilities for Ruby".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_development_dependency(%q<atomic>.freeze, ["= 1.1.16"]) + s.add_development_dependency(%q<rake>.freeze, ["< 12.0"]) + s.add_development_dependency(%q<rspec>.freeze, ["~> 3.2"]) + else + s.add_dependency(%q<atomic>.freeze, ["= 1.1.16"]) + s.add_dependency(%q<rake>.freeze, ["< 12.0"]) + s.add_dependency(%q<rspec>.freeze, ["~> 3.2"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/typhoeus-1.4.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/typhoeus-1.4.0.gemspec new file mode 100644 index 0000000..79c1b7f --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/typhoeus-1.4.0.gemspec @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# stub: typhoeus 1.4.0 ruby lib + +Gem::Specification.new do |s| + s.name = "typhoeus".freeze + s.version = "1.4.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 1.3.6".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["David Balatero".freeze, "Paul Dix".freeze, "Hans Hasselberg".freeze] + s.date = "2020-05-08" + s.description = "Like a modern code version of the mythical beast with 100 serpent heads, Typhoeus runs HTTP requests in parallel while cleanly encapsulating handling logic.".freeze + s.email = ["hans.hasselberg@gmail.com".freeze] + s.homepage = "https://github.com/typhoeus/typhoeus".freeze + s.licenses = ["MIT".freeze] + s.rubygems_version = "3.2.3".freeze + s.summary = "Parallel HTTP library on top of libcurl multi.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<ethon>.freeze, [">= 0.9.0"]) + else + s.add_dependency(%q<ethon>.freeze, [">= 0.9.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/tzinfo-1.2.8.gemspec b/vendor/bundle/ruby/3.0.0/specifications/tzinfo-1.2.8.gemspec new file mode 100644 index 0000000..b1c6198 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/tzinfo-1.2.8.gemspec @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# stub: tzinfo 1.2.8 ruby lib + +Gem::Specification.new do |s| + s.name = "tzinfo".freeze + s.version = "1.2.8" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Philip Ross".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPDCCAiSgAwIBAgIBATANBgkqhkiG9w0BAQsFADAkMSIwIAYDVQQDDBlwaGls\nLnJvc3MvREM9Z21haWwvREM9Y29tMB4XDTE5MTIyNDE0NTU0N1oXDTM5MTIyNDE0\nNTU0N1owJDEiMCAGA1UEAwwZcGhpbC5yb3NzL0RDPWdtYWlsL0RDPWNvbTCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJGcwfqn4ZsmPl0b1Lt9dCzExrE5\nEeP/CRQjBdGHkF+mSpi69XysxdwLdfg5SPr9LfxthUug4nNFd5fDCiXM8hYe9jQD\nTmkIQKNBh4fFpGngn9gyy+SumCXi6b5L6d/aMc59NAOM6LJ88TOdH1648dh5rq3C\nULq82n3gg4+u0HHGjRPuR/pnCFQCZbANYdX+UBWd0qkOJn/EreNKROmEeHr/xKuh\n2/GlKFKt9KLcW3hwBB4fHHVYUzRau7D1m9KbEERdg//qNDC4B7fD2BFJuPbM5S7J\n41VwDAh1O8B/Qpg0f+S83K4Kodw4MiPGsug55UkNtd3mGR/zZJ9WM03DSwkCAwEA\nAaN5MHcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAwHQYDVR0OBBYEFA+Z8zvfzBuA\nesoHIfz7+jxfUOcfMB4GA1UdEQQXMBWBE3BoaWwucm9zc0BnbWFpbC5jb20wHgYD\nVR0SBBcwFYETcGhpbC5yb3NzQGdtYWlsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA\nJ80xgZ3gGdQVA8N+8NJANU5HLuZIU9jOaAlziU9ImoTgPiOHKGZC4as1TwT4kBt1\nQcnu7YSANYRrxP5tpOHsWPF/MQYgerAFCZS5+PzOTudwZ+7OsMW4/EMHy6aCVHEd\nc7HzQRC4mSrDRpWxzyBnZ5nX5OAmIkKA8NgeKybT/4Ku6iFPPUQwlyxQaO+Wlxdo\nFqHwpjRyoiVSpe4RUTNK3d3qesWPYi7Lxn6k6ZZeEdvG6ya33AXktE3jmmF+jPR1\nJ3Zn/kSTjTekiaspyGbczC3PUaeJNxr+yCvR4sk71Xmk/GaKKGOHedJ1uj/LAXrA\nMR0mpl7b8zCg0PFC1J73uw==\n-----END CERTIFICATE-----\n".freeze] + s.date = "2020-11-08" + s.description = "TZInfo provides daylight savings aware transformations between times in different time zones.".freeze + s.email = "phil.ross@gmail.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "CHANGES.md".freeze, "LICENSE".freeze] + s.files = ["CHANGES.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.homepage = "https://tzinfo.github.io".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--title".freeze, "TZInfo".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.8.7".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Daylight savings aware timezone library".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<thread_safe>.freeze, ["~> 0.1"]) + else + s.add_dependency(%q<thread_safe>.freeze, ["~> 0.1"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/tzinfo-2.0.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/tzinfo-2.0.4.gemspec new file mode 100644 index 0000000..216a9fc --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/tzinfo-2.0.4.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: tzinfo 2.0.4 ruby lib + +Gem::Specification.new do |s| + s.name = "tzinfo".freeze + s.version = "2.0.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/tzinfo/tzinfo/issues", "changelog_uri" => "https://github.com/tzinfo/tzinfo/blob/master/CHANGES.md", "documentation_uri" => "https://rubydoc.info/gems/tzinfo/2.0.4", "homepage_uri" => "https://tzinfo.github.io", "source_code_uri" => "https://github.com/tzinfo/tzinfo/tree/v2.0.4" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Philip Ross".freeze] + s.cert_chain = ["-----BEGIN CERTIFICATE-----\nMIIDPDCCAiSgAwIBAgIBATANBgkqhkiG9w0BAQsFADAkMSIwIAYDVQQDDBlwaGls\nLnJvc3MvREM9Z21haWwvREM9Y29tMB4XDTE5MTIyNDE0NTU0N1oXDTM5MTIyNDE0\nNTU0N1owJDEiMCAGA1UEAwwZcGhpbC5yb3NzL0RDPWdtYWlsL0RDPWNvbTCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJGcwfqn4ZsmPl0b1Lt9dCzExrE5\nEeP/CRQjBdGHkF+mSpi69XysxdwLdfg5SPr9LfxthUug4nNFd5fDCiXM8hYe9jQD\nTmkIQKNBh4fFpGngn9gyy+SumCXi6b5L6d/aMc59NAOM6LJ88TOdH1648dh5rq3C\nULq82n3gg4+u0HHGjRPuR/pnCFQCZbANYdX+UBWd0qkOJn/EreNKROmEeHr/xKuh\n2/GlKFKt9KLcW3hwBB4fHHVYUzRau7D1m9KbEERdg//qNDC4B7fD2BFJuPbM5S7J\n41VwDAh1O8B/Qpg0f+S83K4Kodw4MiPGsug55UkNtd3mGR/zZJ9WM03DSwkCAwEA\nAaN5MHcwCQYDVR0TBAIwADALBgNVHQ8EBAMCBLAwHQYDVR0OBBYEFA+Z8zvfzBuA\nesoHIfz7+jxfUOcfMB4GA1UdEQQXMBWBE3BoaWwucm9zc0BnbWFpbC5jb20wHgYD\nVR0SBBcwFYETcGhpbC5yb3NzQGdtYWlsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA\nJ80xgZ3gGdQVA8N+8NJANU5HLuZIU9jOaAlziU9ImoTgPiOHKGZC4as1TwT4kBt1\nQcnu7YSANYRrxP5tpOHsWPF/MQYgerAFCZS5+PzOTudwZ+7OsMW4/EMHy6aCVHEd\nc7HzQRC4mSrDRpWxzyBnZ5nX5OAmIkKA8NgeKybT/4Ku6iFPPUQwlyxQaO+Wlxdo\nFqHwpjRyoiVSpe4RUTNK3d3qesWPYi7Lxn6k6ZZeEdvG6ya33AXktE3jmmF+jPR1\nJ3Zn/kSTjTekiaspyGbczC3PUaeJNxr+yCvR4sk71Xmk/GaKKGOHedJ1uj/LAXrA\nMR0mpl7b8zCg0PFC1J73uw==\n-----END CERTIFICATE-----\n".freeze] + s.date = "2020-12-16" + s.description = "TZInfo provides access to time zone data and allows times to be converted using time zone rules.".freeze + s.email = "phil.ross@gmail.com".freeze + s.extra_rdoc_files = ["README.md".freeze, "CHANGES.md".freeze, "LICENSE".freeze] + s.files = ["CHANGES.md".freeze, "LICENSE".freeze, "README.md".freeze] + s.homepage = "https://tzinfo.github.io".freeze + s.licenses = ["MIT".freeze] + s.rdoc_options = ["--title".freeze, "TZInfo".freeze, "--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 1.9.3".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Time Zone Library".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + else + s.add_dependency(%q<concurrent-ruby>.freeze, ["~> 1.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.19.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.19.0.gemspec new file mode 100644 index 0000000..8e6c422 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.19.0.gemspec @@ -0,0 +1,41 @@ +# -*- encoding: utf-8 -*- +# stub: xcodeproj 1.19.0 ruby lib + +Gem::Specification.new do |s| + s.name = "xcodeproj".freeze + s.version = "1.19.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze] + s.date = "2020-10-09" + s.description = "Xcodeproj lets you create and modify Xcode projects from Ruby. Script boring management tasks or build Xcode-friendly libraries. Also includes support for Xcode workspaces (.xcworkspace) and configuration files (.xcconfig).".freeze + s.email = "eloy.de.enige@gmail.com".freeze + s.executables = ["xcodeproj".freeze] + s.files = ["bin/xcodeproj".freeze] + s.homepage = "https://github.com/cocoapods/xcodeproj".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Create and modify Xcode projects from Ruby.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 3 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<atomos>.freeze, ["~> 0.1.3"]) + s.add_runtime_dependency(%q<CFPropertyList>.freeze, [">= 2.3.3", "< 4.0"]) + s.add_runtime_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_runtime_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_runtime_dependency(%q<nanaimo>.freeze, ["~> 0.3.0"]) + else + s.add_dependency(%q<atomos>.freeze, ["~> 0.1.3"]) + s.add_dependency(%q<CFPropertyList>.freeze, [">= 2.3.3", "< 4.0"]) + s.add_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_dependency(%q<nanaimo>.freeze, ["~> 0.3.0"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.21.0.gemspec b/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.21.0.gemspec new file mode 100644 index 0000000..0add674 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/xcodeproj-1.21.0.gemspec @@ -0,0 +1,43 @@ +# -*- encoding: utf-8 -*- +# stub: xcodeproj 1.21.0 ruby lib + +Gem::Specification.new do |s| + s.name = "xcodeproj".freeze + s.version = "1.21.0" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.require_paths = ["lib".freeze] + s.authors = ["Eloy Duran".freeze] + s.date = "2021-08-09" + s.description = "Xcodeproj lets you create and modify Xcode projects from Ruby. Script boring management tasks or build Xcode-friendly libraries. Also includes support for Xcode workspaces (.xcworkspace) and configuration files (.xcconfig).".freeze + s.email = "eloy.de.enige@gmail.com".freeze + s.executables = ["xcodeproj".freeze] + s.files = ["bin/xcodeproj".freeze] + s.homepage = "https://github.com/cocoapods/xcodeproj".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.0.0".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Create and modify Xcode projects from Ruby.".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 3 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q<atomos>.freeze, ["~> 0.1.3"]) + s.add_runtime_dependency(%q<CFPropertyList>.freeze, [">= 2.3.3", "< 4.0"]) + s.add_runtime_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_runtime_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_runtime_dependency(%q<nanaimo>.freeze, ["~> 0.3.0"]) + s.add_runtime_dependency(%q<rexml>.freeze, ["~> 3.2.4"]) + else + s.add_dependency(%q<atomos>.freeze, ["~> 0.1.3"]) + s.add_dependency(%q<CFPropertyList>.freeze, [">= 2.3.3", "< 4.0"]) + s.add_dependency(%q<claide>.freeze, [">= 1.0.2", "< 2.0"]) + s.add_dependency(%q<colored2>.freeze, ["~> 3.1"]) + s.add_dependency(%q<nanaimo>.freeze, ["~> 0.3.0"]) + s.add_dependency(%q<rexml>.freeze, ["~> 3.2.4"]) + end +end diff --git a/vendor/bundle/ruby/3.0.0/specifications/zeitwerk-2.5.4.gemspec b/vendor/bundle/ruby/3.0.0/specifications/zeitwerk-2.5.4.gemspec new file mode 100644 index 0000000..cf67771 --- /dev/null +++ b/vendor/bundle/ruby/3.0.0/specifications/zeitwerk-2.5.4.gemspec @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# stub: zeitwerk 2.5.4 ruby lib + +Gem::Specification.new do |s| + s.name = "zeitwerk".freeze + s.version = "2.5.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/fxn/zeitwerk/issues", "changelog_uri" => "https://github.com/fxn/zeitwerk/blob/master/CHANGELOG.md", "homepage_uri" => "https://github.com/fxn/zeitwerk", "source_code_uri" => "https://github.com/fxn/zeitwerk" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Xavier Noria".freeze] + s.date = "2022-01-27" + s.description = " Zeitwerk implements constant autoloading with Ruby semantics. Each gem\n and application may have their own independent autoloader, with its own\n configuration, inflector, and logger. Supports autoloading,\n reloading, and eager loading.\n".freeze + s.email = "fxn@hashref.com".freeze + s.homepage = "https://github.com/fxn/zeitwerk".freeze + s.licenses = ["MIT".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.5".freeze) + s.rubygems_version = "3.2.3".freeze + s.summary = "Efficient and thread-safe constant autoloader".freeze + + s.installed_by_version = "3.2.3" if s.respond_to? :installed_by_version +end